diff --git a/.cmake-format.py b/.cmake-format.py index a6f4af8f8a39a611df4f65eefeda884f452fc4df..0181300410770d3535a2210a8f1393e4918a9df1 100644 --- a/.cmake-format.py +++ b/.cmake-format.py @@ -109,5 +109,5 @@ with section("parse"): "SRCS": '*', "DEPS": '*', } - } + }, } diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index fda8a37c2ec00ac892700b12211526f56a27f2f8..b58e1b0a4380dc27e84c43ab47aa51e4b276d0dd 100755 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -28,11 +28,17 @@ repos: python/paddle/fluid/tests/unittests/collective/fleet/test_hdfs1.py| python/paddle/fluid/tests/unittests/dygraph_to_static/test_error.py )$ -- repo: https://github.com/google/yapf - rev: v0.32.0 +- repo: https://github.com/psf/black.git + rev: 22.8.0 hooks: - - id: yapf - files: (.*\.(py|bzl)|BUILD|.*\.BUILD|WORKSPACE)$ + - id: black + files: (.*\.(py|pyi|bzl)|BUILD|.*\.BUILD|WORKSPACE)$ + # Temporary exclude, will be formatted in a separate PR + exclude: | + (?x)^( + python/paddle/fluid/tests/unittests/dygraph_to_static/test_error.py| + python/paddle/fluid/tests/unittests/dygraph_to_static/test_origin_info.py + )$ - repo: https://github.com/PyCQA/flake8 rev: 4.0.1 hooks: diff --git a/.style.yapf b/.style.yapf deleted file mode 100644 index 4741fb4f3bbc6681088cf9e960321e7b857a93a8..0000000000000000000000000000000000000000 --- a/.style.yapf +++ /dev/null @@ -1,3 +0,0 @@ -[style] -based_on_style = pep8 -column_limit = 80 diff --git a/cmake/copyfile.py b/cmake/copyfile.py index 7ba4d95049dc76d1f6bd5bb67e116d5d3f4ea23b..552e8ea1a2b14ea2c6efd1ffc0f0b8583cd00aef 100644 --- a/cmake/copyfile.py +++ b/cmake/copyfile.py @@ -21,7 +21,7 @@ import glob def main(): src = sys.argv[1] dst = sys.argv[2] - if os.path.isdir(src): #copy directory + if os.path.isdir(src): # copy directory pathList = os.path.split(src) dst = os.path.join(dst, pathList[-1]) if not os.path.exists(dst): @@ -31,7 +31,7 @@ def main(): shutil.rmtree(dst) shutil.copytree(src, dst) print("overwritten copy directory: {0} --->>> {1}".format(src, dst)) - else: #copy file, wildcard + else: # copy file, wildcard if not os.path.exists(dst): os.makedirs(dst) srcFiles = glob.glob(src) diff --git a/cmake/make_resource.py b/cmake/make_resource.py index 936aefd4e6e6921211e0eb1ada89cc47a220c28b..ad8ee179d60c29d424c12cf663a6f29ce7440335 100644 --- a/cmake/make_resource.py +++ b/cmake/make_resource.py @@ -20,7 +20,15 @@ res = sys.argv[1] out = sys.argv[2] var = re.sub(r'[ .-]', '_', os.path.basename(res)) -open(out, "w").write("const unsigned char " + var + "[] = {" + - ",".join(["0x%02x" % ord(c) - for c in open(res).read()]) + ",0};\n" + - "const unsigned " + var + "_size = sizeof(" + var + ");\n") +open(out, "w").write( + "const unsigned char " + + var + + "[] = {" + + ",".join(["0x%02x" % ord(c) for c in open(res).read()]) + + ",0};\n" + + "const unsigned " + + var + + "_size = sizeof(" + + var + + ");\n" +) diff --git a/paddle/fluid/eager/auto_code_generator/generate_file_structures.py b/paddle/fluid/eager/auto_code_generator/generate_file_structures.py index 19ea5982f5883903d02ca31a438bb1027f4f2955..5555c17f1b6c61d0ce122ebdd648d1da9d3524a2 100644 --- a/paddle/fluid/eager/auto_code_generator/generate_file_structures.py +++ b/paddle/fluid/eager/auto_code_generator/generate_file_structures.py @@ -41,8 +41,9 @@ def GenerateFileStructureForFinalDygraph(eager_dir): os.mkdir(directory) # Empty files - dygraph_forward_api_h_path = os.path.join(generated_dir, - "dygraph_functions.h") + dygraph_forward_api_h_path = os.path.join( + generated_dir, "dygraph_functions.h" + ) empty_files = [dygraph_forward_api_h_path] empty_files.append(os.path.join(forwards_dir, "dygraph_functions.cc")) empty_files.append(os.path.join(nodes_dir, "nodes.cc")) @@ -83,32 +84,41 @@ def GenerateFileStructureForIntermediateDygraph(eager_dir, split_count): os.mkdir(directory) # Empty files - dygraph_forward_api_h_path = os.path.join(generated_dir, - "dygraph_forward_api.h") + dygraph_forward_api_h_path = os.path.join( + generated_dir, "dygraph_forward_api.h" + ) empty_files = [dygraph_forward_api_h_path] empty_files.append(os.path.join(nodes_dir, "nodes.h")) for i in range(split_count): empty_files.append( - os.path.join(forwards_dir, - "dygraph_forward_functions" + str(i + 1) + ".cc")) - empty_files.append(os.path.join(nodes_dir, - "nodes" + str(i + 1) + ".cc")) + os.path.join( + forwards_dir, "dygraph_forward_functions" + str(i + 1) + ".cc" + ) + ) + empty_files.append( + os.path.join(nodes_dir, "nodes" + str(i + 1) + ".cc") + ) empty_files.append( - os.path.join(forwards_dir, "dygraph_forward_functions_args_info.cc")) + os.path.join(forwards_dir, "dygraph_forward_functions_args_info.cc") + ) empty_files.append( - os.path.join(forwards_dir, - "dygraph_forward_functions_args_type_info.cc")) + os.path.join( + forwards_dir, "dygraph_forward_functions_args_type_info.cc" + ) + ) empty_files.append( - os.path.join(forwards_dir, "dygraph_forward_functions_returns_info.cc")) + os.path.join(forwards_dir, "dygraph_forward_functions_returns_info.cc") + ) for path in empty_files: if not os.path.exists(path): open(path, 'a').close() # CMakeLists nodes_level_cmakelist_path = os.path.join(nodes_dir, "CMakeLists.txt") - generated_level_cmakelist_path = os.path.join(generated_dir, - "CMakeLists.txt") + generated_level_cmakelist_path = os.path.join( + generated_dir, "CMakeLists.txt" + ) forwards_level_cmakelist_path = os.path.join(forwards_dir, "CMakeLists.txt") with open(nodes_level_cmakelist_path, "w") as f: @@ -120,9 +130,11 @@ def GenerateFileStructureForIntermediateDygraph(eager_dir, split_count): for i in range(split_count): f.write( " COMMAND ${CMAKE_COMMAND} -E copy_if_different \"${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/fluid_generated/nodes/nodes" - + str(i + 1) + - ".tmp.cc\" \"${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/fluid_generated/nodes/nodes" - + str(i + 1) + ".cc\"\n") + + str(i + 1) + + ".tmp.cc\" \"${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/fluid_generated/nodes/nodes" + + str(i + 1) + + ".cc\"\n" + ) f.write(" DEPENDS legacy_eager_codegen\n") f.write(" VERBATIM)\n") @@ -142,9 +154,11 @@ def GenerateFileStructureForIntermediateDygraph(eager_dir, split_count): for i in range(split_count): f.write( " COMMAND ${CMAKE_COMMAND} -E copy_if_different \"${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/fluid_generated/forwards/dygraph_forward_functions" - + str(i + 1) + - ".tmp.cc\" \"${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/fluid_generated/forwards/dygraph_forward_functions" - + str(i + 1) + ".cc\"\n") + + str(i + 1) + + ".tmp.cc\" \"${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/fluid_generated/forwards/dygraph_forward_functions" + + str(i + 1) + + ".cc\"\n" + ) f.write( " COMMAND ${CMAKE_COMMAND} -E copy_if_different \"${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/fluid_generated/forwards/dygraph_forward_functions_args_info.tmp.cc\" \"${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/fluid_generated/forwards/dygraph_forward_functions_args_info.cc\"\n" ) @@ -167,7 +181,8 @@ def GenerateFileStructureForIntermediateDygraph(eager_dir, split_count): "${fluid_manual_functions} DEPS ${eager_deps} ${fluid_deps} ${GLOB_OP_LIB} ${GLOB_OPERATOR_DEPS})\n" ) f.write( - "add_dependencies(dygraph_function copy_dygraph_forward_functions)") + "add_dependencies(dygraph_function copy_dygraph_forward_functions)" + ) with open(generated_level_cmakelist_path, "w") as f: f.write("add_subdirectory(forwards)\nadd_subdirectory(nodes)") diff --git a/paddle/fluid/eager/auto_code_generator/generator/codegen_utils.py b/paddle/fluid/eager/auto_code_generator/generator/codegen_utils.py index 91a16a0fdf05f31824796522530777003e910abc..587dfb571b00d5ce1c419c57b9706af01029cb69 100644 --- a/paddle/fluid/eager/auto_code_generator/generator/codegen_utils.py +++ b/paddle/fluid/eager/auto_code_generator/generator/codegen_utils.py @@ -18,20 +18,45 @@ import re ######################## ### Global Variables ### ######################## -ops_to_fill_zero_for_empty_grads = set([ - "split_grad", "split_with_num_grad", "rnn_grad", "matmul_double_grad", - "matmul_triple_grad", "sigmoid_double_grad", "sigmoid_triple_grad", - "add_double_grad", "add_triple_grad", "multiply_grad", - "multiply_double_grad", "multiply_triple_grad", "conv2d_grad_grad", - "batch_norm_double_grad", "tanh_double_grad", "tanh_triple_grad", - "subtract_double_grad", "divide_double_grad", "log_double_grad", - "elu_double_grad", "leaky_relu_double_grad", "sqrt_double_grad", - "rsqrt_double_grad", "square_double_grad", "celu_double_grad", - "pad_double_grad", "pad3d_double_grad", "squeeze_double_grad", - "unsqueeze_double_grad", "instance_norm_double_grad", "conv3d_double_grad", - "depthwise_conv2d_grad_grad", "concat_double_grad", "expand_grad", - "argsort_grad" -]) +ops_to_fill_zero_for_empty_grads = set( + [ + "split_grad", + "split_with_num_grad", + "rnn_grad", + "matmul_double_grad", + "matmul_triple_grad", + "sigmoid_double_grad", + "sigmoid_triple_grad", + "add_double_grad", + "add_triple_grad", + "multiply_grad", + "multiply_double_grad", + "multiply_triple_grad", + "conv2d_grad_grad", + "batch_norm_double_grad", + "tanh_double_grad", + "tanh_triple_grad", + "subtract_double_grad", + "divide_double_grad", + "log_double_grad", + "elu_double_grad", + "leaky_relu_double_grad", + "sqrt_double_grad", + "rsqrt_double_grad", + "square_double_grad", + "celu_double_grad", + "pad_double_grad", + "pad3d_double_grad", + "squeeze_double_grad", + "unsqueeze_double_grad", + "instance_norm_double_grad", + "conv3d_double_grad", + "depthwise_conv2d_grad_grad", + "concat_double_grad", + "expand_grad", + "argsort_grad", + ] +) # For API dispatch used at python-level # { op_name : [arg_name, ...] } @@ -40,22 +65,31 @@ core_ops_args_info = {} core_ops_args_type_info = {} yaml_types_mapping = { - 'int' : 'int', 'int32_t' : 'int32_t', 'int64_t' : 'int64_t', 'size_t' : 'size_t', \ - 'float' : 'float', 'double' : 'double', 'bool' : 'bool', \ - 'str' : 'std::string', \ - 'str[]' : 'std::vector', 'float[]' : 'std::vector', \ - 'Place' : 'paddle::Place', 'DataLayout' : 'phi::DataLayout', 'DataType' : 'paddle::experimental::DataType', \ - 'int64_t[]' : 'std::vector', 'int[]' : 'std::vector', - 'Tensor' : 'Tensor', - 'Tensor[]' : 'std::vector', - 'Tensor[Tensor[]]' : 'std::vector>', - 'Scalar' : 'paddle::experimental::Scalar', - 'Scalar(int)' : 'paddle::experimental::Scalar', - 'Scalar(int64_t)' : 'paddle::experimental::Scalar', - 'Scalar(float)' : 'paddle::experimental::Scalar', - 'Scalar(double)' : 'paddle::experimental::Scalar', - 'Scalar[]' : 'std::vector', - 'IntArray' : 'paddle::experimental::IntArray' + 'int': 'int', + 'int32_t': 'int32_t', + 'int64_t': 'int64_t', + 'size_t': 'size_t', + 'float': 'float', + 'double': 'double', + 'bool': 'bool', + 'str': 'std::string', + 'str[]': 'std::vector', + 'float[]': 'std::vector', + 'Place': 'paddle::Place', + 'DataLayout': 'phi::DataLayout', + 'DataType': 'paddle::experimental::DataType', + 'int64_t[]': 'std::vector', + 'int[]': 'std::vector', + 'Tensor': 'Tensor', + 'Tensor[]': 'std::vector', + 'Tensor[Tensor[]]': 'std::vector>', + 'Scalar': 'paddle::experimental::Scalar', + 'Scalar(int)': 'paddle::experimental::Scalar', + 'Scalar(int64_t)': 'paddle::experimental::Scalar', + 'Scalar(float)': 'paddle::experimental::Scalar', + 'Scalar(double)': 'paddle::experimental::Scalar', + 'Scalar[]': 'std::vector', + 'IntArray': 'paddle::experimental::IntArray', } @@ -81,7 +115,8 @@ def ReadBwdFile(filepath): if contents is not None: for content in contents: assert 'backward_op' in content.keys(), AssertMessage( - 'backward_op', content.keys()) + 'backward_op', content.keys() + ) if 'backward_op' in content.keys(): api_name = content['backward_op'] @@ -116,7 +151,8 @@ def IsPlainTensorType(string): def IsVectorTensorType(string): vector_tensor_types = [ - 'std::vector>', 'std::vector' + 'std::vector>', + 'std::vector', ] if string in vector_tensor_types: return True @@ -147,7 +183,6 @@ def RemoveConstAndReference(string): def GetGradNodeName(string): - def str2Hump(text): arr = filter(None, text.split('_')) res = '' @@ -166,7 +201,6 @@ def GetDygraphForwardFunctionName(string): def GetDygraphLogName(string): - def str2Hump(text): arr = filter(None, text.split('_')) res = '' @@ -236,10 +270,14 @@ def ParseYamlArgs(string): m = re.search(pattern, arg) arg_type = m.group(1).strip() arg_name = m.group(3).split("=")[0].strip() - default_value = m.group(3).split("=")[1].strip() if len( - m.group(3).split("=")) > 1 else None - - assert arg_type in yaml_types_mapping.keys( + default_value = ( + m.group(3).split("=")[1].strip() + if len(m.group(3).split("=")) > 1 + else None + ) + + assert ( + arg_type in yaml_types_mapping.keys() ), f"The argument type {arg_type} in yaml config is not supported in yaml_types_mapping." if arg_type in ["DataType", "DataLayout"] and default_value is not None: default_value = f"paddle::experimental::{default_value}" @@ -277,7 +315,8 @@ def ParseYamlReturns(string): else: ret_type = ret.strip() - assert ret_type in yaml_types_mapping.keys( + assert ( + ret_type in yaml_types_mapping.keys() ), f"The return type {ret_type} in yaml config is not supported in yaml_types_mapping." ret_type = yaml_types_mapping[ret_type] @@ -295,7 +334,9 @@ def ParseYamlForwardFromBackward(string): wspace = r'\s*' fargs = r'(.*?)' frets = r'(.*)' - pattern = fr'{fname}{wspace}\({wspace}{fargs}{wspace}\){wspace}->{wspace}{frets}' + pattern = ( + fr'{fname}{wspace}\({wspace}{fargs}{wspace}\){wspace}->{wspace}{frets}' + ) m = re.search(pattern, string) function_name = m.group(1) @@ -359,38 +400,44 @@ def ParseYamlInplaceInfo(string): ### Generator Base ### ######################## class FunctionGeneratorBase: - def __init__(self, forward_api_contents, namespace): self.forward_api_contents = forward_api_contents self.namespace = namespace - self.is_forward_only = False if 'backward' in forward_api_contents.keys( - ) else True + self.is_forward_only = ( + False if 'backward' in forward_api_contents.keys() else True + ) self.forward_api_name = "" - self.orig_forward_inputs_list = [ - ] #[ [arg_name, arg_type, orig_position], ...] - self.orig_forward_attrs_list = [ - ] #[ [attr_name, attr_type, default_value, orig_position], ...] - self.orig_forward_returns_list = [ - ] #[ [ret_name, ret_type, orig_position], ...] + self.orig_forward_inputs_list = ( + [] + ) # [ [arg_name, arg_type, orig_position], ...] + self.orig_forward_attrs_list = ( + [] + ) # [ [attr_name, attr_type, default_value, orig_position], ...] + self.orig_forward_returns_list = ( + [] + ) # [ [ret_name, ret_type, orig_position], ...] # Processed Forward Data - self.forward_inputs_position_map = { - } #{ "name" : [type, fwd_position] } - self.forward_outputs_position_map = { - } #{ "name" : [type, fwd_position] } + self.forward_inputs_position_map = ( + {} + ) # { "name" : [type, fwd_position] } + self.forward_outputs_position_map = ( + {} + ) # { "name" : [type, fwd_position] } # Special Op Attributes - self.optional_inputs = [] #[name, ...] - self.no_need_buffers = [] #[name, ...] - self.intermediate_outputs = [] #[name, ...] - self.forward_inplace_map = {} #{name : name, ...} + self.optional_inputs = [] # [name, ...] + self.no_need_buffers = [] # [name, ...] + self.intermediate_outputs = [] # [name, ...] + self.forward_inplace_map = {} # {name : name, ...} def ParseForwardInplaceInfo(self): forward_api_contents = self.forward_api_contents - if 'inplace' not in forward_api_contents.keys(): return + if 'inplace' not in forward_api_contents.keys(): + return inplace_map_str = forward_api_contents['inplace'] self.forward_inplace_map = ParseYamlInplaceInfo(inplace_map_str) @@ -432,19 +479,26 @@ class FunctionGeneratorBase: forward_args_str = forward_api_contents['args'] forward_returns_str = forward_api_contents['output'] - assert 'op' in forward_api_contents.keys( + assert ( + 'op' in forward_api_contents.keys() ), "Unable to find \"op\" in forward_api_contents keys" - assert 'args' in forward_api_contents.keys( + assert ( + 'args' in forward_api_contents.keys() ), "Unable to find \"args\" in forward_api_contents keys" - assert 'output' in forward_api_contents.keys( + assert ( + 'output' in forward_api_contents.keys() ), "Unable to find \"output\" in forward_api_contents keys" # Collect Original Forward Inputs/Outputs and then perform validation checks - self.orig_forward_inputs_list, self.orig_forward_attrs_list, self.orig_forward_returns_list = ParseYamlForward( - forward_args_str, forward_returns_str) - - def DetermineForwardPositionMap(self, forward_inputs_list, - forward_returns_list): + ( + self.orig_forward_inputs_list, + self.orig_forward_attrs_list, + self.orig_forward_returns_list, + ) = ParseYamlForward(forward_args_str, forward_returns_str) + + def DetermineForwardPositionMap( + self, forward_inputs_list, forward_returns_list + ): for i in range(len(forward_inputs_list)): forward_input = forward_inputs_list[i] input_name = forward_input[0] @@ -452,13 +506,14 @@ class FunctionGeneratorBase: input_pos = forward_input[2] self.forward_inputs_position_map[input_name] = [ - input_type, input_pos + input_type, + input_pos, ] for i in range(len(forward_returns_list)): forward_return = forward_returns_list[i] if len(forward_return[0]) == 0: - if (len(forward_returns_list) == 1): + if len(forward_returns_list) == 1: return_name = "out" else: return_name = "out_{}".format(i + 1) @@ -468,12 +523,12 @@ class FunctionGeneratorBase: return_pos = forward_return[2] self.forward_outputs_position_map[return_name] = [ - return_type, return_pos + return_type, + return_pos, ] class GeneratorBase: - def __init__(self, api_yaml_path): self.namespace = "" self.api_yaml_path = api_yaml_path diff --git a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py index cf51fbb9d07ddb2d2c17610b724264032062041c..fe0b021a2e5b5a24a358b9b26b558b8ccf00dd83 100644 --- a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py +++ b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py @@ -15,12 +15,19 @@ import re import argparse import os -from codegen_utils import core_ops_returns_info, core_ops_args_info, core_ops_args_type_info +from codegen_utils import ( + core_ops_returns_info, + core_ops_args_info, + core_ops_args_type_info, +) from codegen_utils import ReadBwdFile from codegen_utils import FindForwardName, GetGradNodeName, GetSavedName from codegen_utils import IsPlainTensorType, IsVectorTensorType from codegen_utils import GetConstReference, RemoveConstAndReference -from codegen_utils import GetDygraphForwardFunctionName, GetIntermediateAPIFunctionName +from codegen_utils import ( + GetDygraphForwardFunctionName, + GetIntermediateAPIFunctionName, +) from codegen_utils import GetAutoGradMetaName, GetAutoGradMetaVectorName from codegen_utils import GetInplacedFunctionName from codegen_utils import ParseYamlForwardFromBackward @@ -39,7 +46,11 @@ inplace_check_blacklist = set(["assign_out_"]) # Black Ops list that's NO NEED to apply code generation black_ops_list = [ - "conv2d", "conv2d_grad", "conv2d_grad_grad", "add_n", "add_n_grad" + "conv2d", + "conv2d_grad", + "conv2d_grad_grad", + "add_n", + "add_n_grad", ] @@ -48,7 +59,8 @@ black_ops_list = [ ########### def ParseArguments(): parser = argparse.ArgumentParser( - description='Eager Code Generator Args Parser') + description='Eager Code Generator Args Parser' + ) parser.add_argument('--nodes_h_path', type=str) parser.add_argument('--nodes_cc_path', type=str) parser.add_argument('--forwards_h_path', type=str) @@ -63,54 +75,44 @@ def ParseArguments(): ######################## ## Code Gen Templates ## ######################## -SET_PLAIN_TENSOR_WRAPPER_TEMPLATE = \ -""" void SetTensorWrapper{}(const paddle::experimental::Tensor& {}) {{ +SET_PLAIN_TENSOR_WRAPPER_TEMPLATE = """ void SetTensorWrapper{}(const paddle::experimental::Tensor& {}) {{ {} = egr::TensorWrapper({}, {}); }} """ -SET_VECTOR_TENSOR_WRAPPER_TEMPLATE = \ -""" void SetTensorWrapper{}(const std::vector& {}) {{ +SET_VECTOR_TENSOR_WRAPPER_TEMPLATE = """ void SetTensorWrapper{}(const std::vector& {}) {{ for(const auto& eager_tensor : {}) {{ {}.emplace_back(egr::TensorWrapper(eager_tensor, {})); }}; }} """ -PLAIN_TENSOR_MEMBER_TEMPLATE = \ -""" egr::TensorWrapper {}; +PLAIN_TENSOR_MEMBER_TEMPLATE = """ egr::TensorWrapper {}; """ -VECTOR_TENSOR_MEMBER_TEMPLATE = \ -""" std::vector {}; +VECTOR_TENSOR_MEMBER_TEMPLATE = """ std::vector {}; """ -CLEAR_TENSOR_WRAPPER_TEMPLATE = \ -""" {}.clear(); +CLEAR_TENSOR_WRAPPER_TEMPLATE = """ {}.clear(); """ -CLEAR_VECTOR_TENSOR_WRAPPERS_TEMPLATE = \ -""" for (auto& tw : {}) {{ +CLEAR_VECTOR_TENSOR_WRAPPERS_TEMPLATE = """ for (auto& tw : {}) {{ tw.clear(); }} """ -SET_ATTR_METHOD_TEMPLATE = \ -""" void SetAttribute{}({} {}) {{ +SET_ATTR_METHOD_TEMPLATE = """ void SetAttribute{}({} {}) {{ {} = {}; }} """ -ATTRIBUTE_MEMBER_WITH_DEFAULT_TEMPLATE = \ -""" {} {} = {}; +ATTRIBUTE_MEMBER_WITH_DEFAULT_TEMPLATE = """ {} {} = {}; """ -ATTRIBUTE_MEMBER_TEMPLATE = \ -""" {} {}; +ATTRIBUTE_MEMBER_TEMPLATE = """ {} {}; """ -NODE_DECLARATION_TEMPLATE = \ -""" +NODE_DECLARATION_TEMPLATE = """ class {} : public egr::GradNodeBase {{ public: {}() : egr::GradNodeBase() {{}} @@ -143,8 +145,7 @@ class {} : public egr::GradNodeBase {{ {}}}; """ -GRAD_FUNCTION_TEMPLATE = \ -""" +GRAD_FUNCTION_TEMPLATE = """ paddle::small_vector, egr::kSlotSmallVectorSize> {}::operator()(paddle::small_vector, egr::kSlotSmallVectorSize>& grads, bool create_graph, bool is_new_grad) {{ VLOG(3) << \"Running AD API GRAD: \" << \"{}\"; // Fill Zero For GradIn Tensors @@ -182,8 +183,7 @@ paddle::small_vector, egr::kSlotSmallV }} """ -FORWARD_FUNCTION_TEMPLATE = \ -""" +FORWARD_FUNCTION_TEMPLATE = """ {} {}({}) {{ VLOG(3) << \"Running AD API: \" << \"{}\"; // Dygraph Record Event @@ -222,8 +222,7 @@ FORWARD_FUNCTION_TEMPLATE = \ }} """ -AFTER_LOG_PRINT_TEMPLATE = \ -""" +AFTER_LOG_PRINT_TEMPLATE = """ if(VLOG_IS_ON(4)){{ const char* INPUT_PRINT_TEMPLATE = \"{{ Input: [%s], Output: [%s] }} \"; {} @@ -231,8 +230,7 @@ AFTER_LOG_PRINT_TEMPLATE = \ }} """ -BEFORE_LOG_PRINT_TEMPLATE = \ -""" +BEFORE_LOG_PRINT_TEMPLATE = """ if(VLOG_IS_ON(3)){{ const char* INPUT_PRINT_TEMPLATE = \"{{ Input: [%s]}} \"; {} @@ -240,8 +238,7 @@ BEFORE_LOG_PRINT_TEMPLATE = \ }} """ -FORWARD_ONLY_FUNCTION_TEMPLATE = \ -""" +FORWARD_ONLY_FUNCTION_TEMPLATE = """ {} {}({}) {{ VLOG(3) << \"Running AD API: \" << \"{}\"; // Dygraph Record Event @@ -265,8 +262,7 @@ FORWARD_ONLY_FUNCTION_TEMPLATE = \ }} """ -FORWARD_BODY_TEMPLATE = \ -""" if(require_any_grad) {{ +FORWARD_BODY_TEMPLATE = """ if(require_any_grad) {{ {} egr::EagerUtils::PassStopGradient({}); @@ -288,8 +284,7 @@ FORWARD_BODY_TEMPLATE = \ }} """ -HIHGER_ORDER_DERIVATIVE_VALUE_TEMPLATE = \ -""" if(trace_backward) {{ +HIHGER_ORDER_DERIVATIVE_VALUE_TEMPLATE = """ if(trace_backward) {{ {} // Node Construction {} @@ -309,15 +304,13 @@ HIHGER_ORDER_DERIVATIVE_VALUE_TEMPLATE = \ }} """ -NAMESPACE_WRAPPER_TEMPLATE = \ -""" +NAMESPACE_WRAPPER_TEMPLATE = """ namespace {} {{ {} }} """ -NODE_CC_FILE_TEMPLATE = \ -""" +NODE_CC_FILE_TEMPLATE = """ #include "glog/logging.h" #include "paddle/phi/api/all.h" #include "paddle/phi/api/backward/backward_api.h" @@ -337,8 +330,7 @@ DECLARE_bool(check_nan_inf); {} """ -NODE_H_FILE_TEMPLATE = \ -""" +NODE_H_FILE_TEMPLATE = """ #pragma once #include "paddle/fluid/eager/tensor_wrapper.h" #include "paddle/fluid/eager/grad_node_info.h" @@ -347,8 +339,7 @@ NODE_H_FILE_TEMPLATE = \ {} """ -FORWARD_CC_FILE_TEMPLATE = \ -""" +FORWARD_CC_FILE_TEMPLATE = """ #include "paddle/phi/api/lib/dygraph_api.h" #include "paddle/fluid/eager/api/generated/eager_generated/forwards/dygraph_functions.h" #include "paddle/fluid/eager/api/generated/eager_generated/backwards/nodes.h" @@ -367,8 +358,7 @@ DECLARE_bool(check_nan_inf); {} """ -FORWARD_H_FILE_TEMPLATE = \ -""" +FORWARD_H_FILE_TEMPLATE = """ #pragma once #include "glog/logging.h" #include "paddle/fluid/eager/autograd_meta.h" @@ -383,8 +373,7 @@ using CPUPlace = phi::CPUPlace; {} """ -CORE_OPS_INFO_TEMPLATE = \ -""" +CORE_OPS_INFO_TEMPLATE = """ std::unordered_map> core_ops_args_info = {{ {} }}; @@ -397,28 +386,24 @@ std::unordered_map> core_ops_returns_info """ -CORE_OPS_DECLARATION_TEMPLATE = \ -""" +CORE_OPS_DECLARATION_TEMPLATE = """ extern std::unordered_map> core_ops_args_info; extern std::unordered_map> core_ops_args_type_info; extern std::unordered_map> core_ops_returns_info; """ -CHECK_INPLACE_TEMPLATE = \ -""" +CHECK_INPLACE_TEMPLATE = """ egr::EagerUtils::CheckInplace({}, {}, require_any_grad); """ -BUMP_INPLACE_VERSION_TEMPLATE = \ -""" +BUMP_INPLACE_VERSION_TEMPLATE = """ // Bump Inplace Version {}.bump_inplace_version(); VLOG(3) << \"Tensor(\" << {}.name() << \") uses Inplace Strategy.\"; """ -AMP_LOGIC_TEMPLATE = \ -""" if (egr::Controller::Instance().GetAMPLevel() != paddle::imperative::AmpLevel::O0) {{ +AMP_LOGIC_TEMPLATE = """ if (egr::Controller::Instance().GetAMPLevel() != paddle::imperative::AmpLevel::O0) {{ VLOG(5) << "Check and Prepare For AMP"; {} paddle::small_vector, egr::kSlotSmallVectorSize> amp_tensors_vector = {}; @@ -431,8 +416,7 @@ AMP_LOGIC_TEMPLATE = \ }} }} """ -LAYOUT_LOGIC_TEMPLATE=\ -""" +LAYOUT_LOGIC_TEMPLATE = """ if (egr::Controller::Instance().UseLayoutAutoTune()) {{ paddle::small_vector, egr::kSlotSmallVectorSize> tensors_vector = {}; {} @@ -445,26 +429,22 @@ LAYOUT_LOGIC_TEMPLATE=\ return {}; }} """ -CREATE_PLAIN_OPTIONAL_TENSOR_TEMPLATE = \ -""" +CREATE_PLAIN_OPTIONAL_TENSOR_TEMPLATE = """ paddle::optional {}_optional; if({}.initialized()) {}_optional = paddle::make_optional({}); """ -CREATE_RECOVER_OPTIONAL_TENSOR_TEMPLATE = \ -""" +CREATE_RECOVER_OPTIONAL_TENSOR_TEMPLATE = """ paddle::optional {}_optional; if( {}.impl() ) {}_optional = paddle::make_optional({}); """ -CREATE_RECOVER_OPTIONAL_VECTOR_TENSOR_TEMPLATE = \ -""" +CREATE_RECOVER_OPTIONAL_VECTOR_TENSOR_TEMPLATE = """ paddle::optional> {}_optional; if( !{}.empty() ) {}_optional = paddle::make_optional>({}); """ -CHECK_BACKWARD_INPLACE_TEMPLATE = \ -""" +CHECK_BACKWARD_INPLACE_TEMPLATE = """ bool can_be_inplaced = false; if ({}.initialized()) {{ VLOG(10) << {}.name() << "({}) use_count: " << {}.impl().use_count(); @@ -473,15 +453,12 @@ CHECK_BACKWARD_INPLACE_TEMPLATE = \ }} }}""" -CHECK_NAN_AND_INF_TEMPLATE = \ -""" if (FLAGS_check_nan_inf) {{ egr::CheckTensorHasNanOrInf("{}", {}); }} +CHECK_NAN_AND_INF_TEMPLATE = """ if (FLAGS_check_nan_inf) {{ egr::CheckTensorHasNanOrInf("{}", {}); }} """ inplace_optional_out_type_map = { - "Tensor": - "paddle::optional&", - "std::vector": - "paddle::optional>&" + "Tensor": "paddle::optional&", + "std::vector": "paddle::optional>&", } @@ -490,13 +467,16 @@ def ExtractForwardApiNameFormInvoke(invoke_config): if api_name[-1] == '_': api_name = api_name[:-1] return re.search( - r"(?P[a-zA-Z0-9_]+)(?P_intermediate)?", - api_name).group('api_name') + r"(?P[a-zA-Z0-9_]+)(?P_intermediate)?", api_name + ).group('api_name') def IsInvokeForwardApi(api_contents, forward_api_name_list): - return 'invoke' in api_contents and ExtractForwardApiNameFormInvoke( - api_contents['invoke']) in forward_api_name_list + return ( + 'invoke' in api_contents + and ExtractForwardApiNameFormInvoke(api_contents['invoke']) + in forward_api_name_list + ) ####################### @@ -531,7 +511,8 @@ def GenerateCoreOpInfoDefinition(): op_returns_info_str = "\n".join(op_returns_info_list) core_ops_info_definition_str = CORE_OPS_INFO_TEMPLATE.format( - op_args_info_str, op_types_info_str, op_returns_info_str) + op_args_info_str, op_types_info_str, op_returns_info_str + ) return core_ops_info_definition_str @@ -540,23 +521,27 @@ def GenerateCoreOpInfoDefinition(): ## Generator Class ## ##################### class DygraphFunctionGeneratorBase(FunctionGeneratorBase): - - def __init__(self, forward_api_contents, grad_api_contents, - forward_apis_dict, namespace): + def __init__( + self, + forward_api_contents, + grad_api_contents, + forward_apis_dict, + namespace, + ): self.forward_api_contents = forward_api_contents # Members from Parent: - #self.namespace - #self.forward_api_contents - #self.forward_api_name - #self.orig_forward_inputs_list - #self.orig_forward_attrs_list - #self.orig_forward_returns_list - #self.forward_inputs_position_map - #self.forward_outputs_position_map - #self.optional_inputs - #self.no_need_buffers - #self.intermediate_outputs - #self.forward_inplace_map + # self.namespace + # self.forward_api_contents + # self.forward_api_name + # self.orig_forward_inputs_list + # self.orig_forward_attrs_list + # self.orig_forward_returns_list + # self.forward_inputs_position_map + # self.forward_outputs_position_map + # self.optional_inputs + # self.no_need_buffers + # self.intermediate_outputs + # self.forward_inplace_map FunctionGeneratorBase.__init__(self, forward_api_contents, namespace) self.forward_apis_dict = forward_apis_dict @@ -566,33 +551,43 @@ class DygraphFunctionGeneratorBase(FunctionGeneratorBase): self.backward_forward_str = "" self.backward_api_name = "" - self.forward_attrs_list = [ - ] #[ [attr_name, attr_type, default_value, orig_position], ...] - self.forward_inputs_list = [ - ] #[ [arg_name, arg_type, orig_position], ...] - self.forward_returns_list = [ - ] #[ [ret_name, ret_type, orig_position], ...] - - self.backward_attrs_list = [ - ] #[ [attr_name, attr_type, default_value, orig_position], ...] - self.backward_inputs_list = [ - ] #[ [arg_name, arg_type, orig_position], ...] - self.backward_returns_list = [ - ] #[ [ret_name, ret_type, orig_position], ...] + self.forward_attrs_list = ( + [] + ) # [ [attr_name, attr_type, default_value, orig_position], ...] + self.forward_inputs_list = ( + [] + ) # [ [arg_name, arg_type, orig_position], ...] + self.forward_returns_list = ( + [] + ) # [ [ret_name, ret_type, orig_position], ...] + + self.backward_attrs_list = ( + [] + ) # [ [attr_name, attr_type, default_value, orig_position], ...] + self.backward_inputs_list = ( + [] + ) # [ [arg_name, arg_type, orig_position], ...] + self.backward_returns_list = ( + [] + ) # [ [ret_name, ret_type, orig_position], ...] # SlotNameMatched Backward Data - self.backward_forward_inputs_map = { - } #{ "name" : [type, is_fwd_input, orig_position] ...} - self.backward_grad_inputs_map = { - } #{ "name" : [type, fwd_position, orig_position] ...} - self.backward_grad_outputs_map = { - } #{ "name" : [type, fwd_position, orig_position] ...} - - self.backward_inplace_map = {} #{name : name, ...} + self.backward_forward_inputs_map = ( + {} + ) # { "name" : [type, is_fwd_input, orig_position] ...} + self.backward_grad_inputs_map = ( + {} + ) # { "name" : [type, fwd_position, orig_position] ...} + self.backward_grad_outputs_map = ( + {} + ) # { "name" : [type, fwd_position, orig_position] ...} + + self.backward_inplace_map = {} # {name : name, ...} def ParseBackwardInplaceInfo(self): grad_api_contents = self.grad_api_contents - if 'inplace' not in grad_api_contents.keys(): return + if 'inplace' not in grad_api_contents.keys(): + return inplace_map_str = grad_api_contents['inplace'] self.backward_inplace_map = ParseYamlInplaceInfo(inplace_map_str) @@ -601,21 +596,28 @@ class DygraphFunctionGeneratorBase(FunctionGeneratorBase): forward_api_contents = self.forward_api_contents grad_api_contents = self.grad_api_contents - assert 'op' in forward_api_contents.keys( + assert ( + 'op' in forward_api_contents.keys() ), "Unable to find \"op\" in ops.yaml" - assert 'args' in forward_api_contents.keys( + assert ( + 'args' in forward_api_contents.keys() ), "Unable to find \"args\" in ops.yaml" - assert 'output' in forward_api_contents.keys( + assert ( + 'output' in forward_api_contents.keys() ), "Unable to find \"output\" in ops.yaml" if grad_api_contents is not None: - assert 'backward' in forward_api_contents.keys( + assert ( + 'backward' in forward_api_contents.keys() ), "Unable to find \"backward\" in ops.yaml" - assert 'args' in grad_api_contents.keys( + assert ( + 'args' in grad_api_contents.keys() ), "Unable to find \"args\" in backward.yaml" - assert 'output' in grad_api_contents.keys( + assert ( + 'output' in grad_api_contents.keys() ), "Unable to find \"output\" in backward.yaml" - assert 'forward' in grad_api_contents.keys( + assert ( + 'forward' in grad_api_contents.keys() ), "Unable to find \"forward\" in backward.yaml" def ForwardsValidationCheck(self): @@ -634,9 +636,11 @@ class DygraphFunctionGeneratorBase(FunctionGeneratorBase): orig_input_pos = orig_forward_inputs_list[i][2] assert forward_input_type == orig_input_type, AssertMessage( - forward_input_type, orig_input_type) + forward_input_type, orig_input_type + ) assert forward_input_pos == orig_input_pos, AssertMessage( - forward_input_pos, orig_input_pos) + forward_input_pos, orig_input_pos + ) for i in range(len(forward_attrs_list)): orig_attr_type = orig_forward_attrs_list[i][1] @@ -644,9 +648,11 @@ class DygraphFunctionGeneratorBase(FunctionGeneratorBase): forward_attr_type = forward_attrs_list[i][1] forward_attr_pos = forward_attrs_list[i][3] assert orig_attr_type == forward_attr_type, AssertMessage( - orig_attr_type, forward_attr_type) + orig_attr_type, forward_attr_type + ) assert orig_attr_pos == forward_attr_pos, AssertMessage( - orig_attr_pos, forward_attr_pos) + orig_attr_pos, forward_attr_pos + ) for i in range(len(forward_returns_list)): orig_return_type = orig_forward_returns_list[i][1] @@ -655,9 +661,11 @@ class DygraphFunctionGeneratorBase(FunctionGeneratorBase): forward_return_pos = forward_returns_list[i][2] assert orig_return_type == forward_return_type, AssertMessage( - orig_return_type, forward_return_type) + orig_return_type, forward_return_type + ) assert orig_return_pos == forward_return_pos, AssertMessage( - orig_return_pos, forward_return_pos) + orig_return_pos, forward_return_pos + ) # Check Order: Inputs, Attributes max_input_position = -1 @@ -666,7 +674,8 @@ class DygraphFunctionGeneratorBase(FunctionGeneratorBase): for _, _, _, pos in forward_attrs_list: assert pos > max_input_position, AssertMessage( - pos, max_input_position) + pos, max_input_position + ) def BackwardValidationCheck(self): backward_forward_inputs_map = self.backward_forward_inputs_map @@ -681,13 +690,15 @@ class DygraphFunctionGeneratorBase(FunctionGeneratorBase): max_grad_tensor_position = -1 for _, (_, _, pos) in backward_grad_inputs_map.items(): assert pos > max_fwd_input_position, AssertMessage( - pos, max_grad_tensor_position) + pos, max_grad_tensor_position + ) max_grad_tensor_position = max(max_grad_tensor_position, pos) max_attr_position = -1 for _, _, _, pos in backward_attrs_list: assert pos > max_grad_tensor_position, AssertMessage( - pos, max_grad_tensor_position) + pos, max_grad_tensor_position + ) max_attr_position = max(max_attr_position, pos) def IntermediateValidationCheck(self): @@ -699,11 +710,13 @@ class DygraphFunctionGeneratorBase(FunctionGeneratorBase): """ intermediate_positions = range( len(forward_returns_list) - len(intermediate_outputs), - len(forward_returns_list)) + len(forward_returns_list), + ) for ret_name, _, pos in forward_returns_list: if ret_name in intermediate_outputs: assert pos in intermediate_positions, AssertMessage( - pos, intermediate_positions) + pos, intermediate_positions + ) def CollectBackwardInfo(self): forward_api_contents = self.forward_api_contents @@ -714,20 +727,32 @@ class DygraphFunctionGeneratorBase(FunctionGeneratorBase): backward_args_str = grad_api_contents['args'] backward_returns_str = grad_api_contents['output'] - self.backward_inputs_list, self.backward_attrs_list, self.backward_returns_list = ParseYamlBackward( - backward_args_str, backward_returns_str) + ( + self.backward_inputs_list, + self.backward_attrs_list, + self.backward_returns_list, + ) = ParseYamlBackward(backward_args_str, backward_returns_str) def CollectForwardInfoFromBackwardContents(self): backward_forward_str = self.backward_forward_str - self.forward_inputs_list, self.forward_attrs_list, self.forward_returns_list = ParseYamlForwardFromBackward( - backward_forward_str) + ( + self.forward_inputs_list, + self.forward_attrs_list, + self.forward_returns_list, + ) = ParseYamlForwardFromBackward(backward_forward_str) def CollectForwardInfoFromYamlForward(self): - self.forward_inputs_list, self.forward_attrs_list, self.forward_returns_list = ParseYamlForwardFromBackward( - self.forward_api_contents['args'] + " -> " + - self.forward_api_contents['output']) + ( + self.forward_inputs_list, + self.forward_attrs_list, + self.forward_returns_list, + ) = ParseYamlForwardFromBackward( + self.forward_api_contents['args'] + + " -> " + + self.forward_api_contents['output'] + ) def SlotNameMatching(self): backward_inputs_list = self.backward_inputs_list @@ -743,35 +768,48 @@ class DygraphFunctionGeneratorBase(FunctionGeneratorBase): backward_fwd_name = FindForwardName(backward_input_name) if backward_fwd_name: # Grad Input - assert backward_fwd_name in forward_outputs_position_map.keys( - ), AssertMessage(backward_fwd_name, - forward_outputs_position_map.keys()) + assert ( + backward_fwd_name in forward_outputs_position_map.keys() + ), AssertMessage( + backward_fwd_name, forward_outputs_position_map.keys() + ) matched_forward_output_type = forward_outputs_position_map[ - backward_fwd_name][0] + backward_fwd_name + ][0] matched_forward_output_pos = forward_outputs_position_map[ - backward_fwd_name][1] + backward_fwd_name + ][1] self.backward_grad_inputs_map[backward_input_name] = [ - backward_input_type, matched_forward_output_pos, - backward_input_pos + backward_input_type, + matched_forward_output_pos, + backward_input_pos, ] else: # TensorWrapper Input if backward_input_name in forward_inputs_position_map.keys(): tensor_wrapper_type = forward_inputs_position_map[ - backward_input_name][0] + backward_input_name + ][0] self.backward_forward_inputs_map[backward_input_name] = [ - backward_input_type, True, backward_input_pos + backward_input_type, + True, + backward_input_pos, ] elif backward_input_name in forward_outputs_position_map.keys(): tensor_wrapper_type = forward_outputs_position_map[ - backward_input_name][0] + backward_input_name + ][0] self.backward_forward_inputs_map[backward_input_name] = [ - backward_input_type, False, backward_input_pos + backward_input_type, + False, + backward_input_pos, ] else: - assert False, f"Cannot find {backward_input_name} in forward position map" + assert ( + False + ), f"Cannot find {backward_input_name} in forward position map" for backward_output in backward_returns_list: backward_output_name = backward_output[0] @@ -779,19 +817,26 @@ class DygraphFunctionGeneratorBase(FunctionGeneratorBase): backward_output_pos = backward_output[2] backward_fwd_name = FindForwardName(backward_output_name) - assert backward_fwd_name is not None, f"Detected {backward_fwd_name} = None" - assert backward_fwd_name in forward_inputs_position_map.keys( - ), AssertMessage(backward_fwd_name, - forward_inputs_position_map.keys()) + assert ( + backward_fwd_name is not None + ), f"Detected {backward_fwd_name} = None" + assert ( + backward_fwd_name in forward_inputs_position_map.keys() + ), AssertMessage( + backward_fwd_name, forward_inputs_position_map.keys() + ) matched_forward_input_type = forward_inputs_position_map[ - backward_fwd_name][0] + backward_fwd_name + ][0] matched_forward_input_pos = forward_inputs_position_map[ - backward_fwd_name][1] + backward_fwd_name + ][1] self.backward_grad_outputs_map[backward_output_name] = [ - backward_output_type, matched_forward_input_pos, - backward_output_pos + backward_output_type, + matched_forward_input_pos, + backward_output_pos, ] def GetPassStopGradientArgsList(self, forward_outputs_position_map): @@ -815,7 +860,8 @@ class DygraphFunctionGeneratorBase(FunctionGeneratorBase): # Pass Stop Gradient Args pass_stop_gradient_args_str = self.GetPassStopGradientArgsList( - forward_outputs_position_map) + forward_outputs_position_map + ) # Node Construction num_backward_inputs = len(forward_outputs_position_map.keys()) @@ -839,7 +885,9 @@ class DygraphFunctionGeneratorBase(FunctionGeneratorBase): for name, _, default_val_attr, _ in backward_attrs_list: if name in forward_attrs_name_set: - set_attributes = f"{indent}grad_node->SetAttribute{name}({name});" + set_attributes = ( + f"{indent}grad_node->SetAttribute{name}({name});" + ) else: set_attributes = f"{indent}grad_node->SetAttribute{name}({default_val_attr});" set_attributes_list.append(set_attributes) @@ -849,31 +897,41 @@ class DygraphFunctionGeneratorBase(FunctionGeneratorBase): set_input_tensor_wrappers_list = [] set_output_tensor_wrappers_list = [] num_fwd_outputs = len(forward_outputs_position_map.keys()) - for name, (atype, is_fwd_input, - pos) in backward_forward_inputs_map.items(): - is_optional = (name in optional_inputs) + for name, ( + atype, + is_fwd_input, + pos, + ) in backward_forward_inputs_map.items(): + is_optional = name in optional_inputs if is_fwd_input: if is_optional: set_tensor_wrappers = f"{indent}if({name}) grad_node->SetTensorWrapper{name}(*{name});" else: - set_tensor_wrappers = f"{indent}grad_node->SetTensorWrapper{name}({name});" + set_tensor_wrappers = ( + f"{indent}grad_node->SetTensorWrapper{name}({name});" + ) set_input_tensor_wrappers_list.append(set_tensor_wrappers) else: # Forwad's output as backward's input if num_fwd_outputs > 1: # Aligned with forward output position - assert name in forward_outputs_position_map.keys( + assert ( + name in forward_outputs_position_map.keys() ), AssertMessage(name, forward_outputs_position_map.keys()) if is_optional: set_tensor_wrappers = f"{indent}if({name}) grad_node->SetTensorWrapper{name}(*{name});" else: - set_tensor_wrappers = f"{indent}grad_node->SetTensorWrapper{name}({name});" + set_tensor_wrappers = ( + f"{indent}grad_node->SetTensorWrapper{name}({name});" + ) set_output_tensor_wrappers_list.append(set_tensor_wrappers) set_input_tensor_wrappers_str = "\n".join( - set_input_tensor_wrappers_list) + set_input_tensor_wrappers_list + ) set_output_tensor_wrappers_str = "\n".join( - set_output_tensor_wrappers_list) + set_output_tensor_wrappers_list + ) # SetGradOutMeta & SetEdges grad_node_out_list = [] @@ -882,19 +940,24 @@ class DygraphFunctionGeneratorBase(FunctionGeneratorBase): for name, (_, pos) in forward_inputs_position_map.items(): # Has corresponding grad output has_corresponding_grad_output = False - for _, (_, corresponding_pos, - _) in backward_grad_outputs_map.items(): + for _, ( + _, + corresponding_pos, + _, + ) in backward_grad_outputs_map.items(): if pos == corresponding_pos: has_corresponding_grad_output = True if not has_corresponding_grad_output: continue grad_node_out_list.append(name) - is_optional = (name in self.optional_inputs) + is_optional = name in self.optional_inputs if is_optional: set_grad_out_meta = f"{indent}if({name}.get_ptr() != nullptr) grad_node->SetGradOutMeta(*({name}.get_ptr()), {pos});" else: - set_grad_out_meta = f"{indent}grad_node->SetGradOutMeta({name}, {pos});" + set_grad_out_meta = ( + f"{indent}grad_node->SetGradOutMeta({name}, {pos});" + ) set_grad_out_meta_list.append(set_grad_out_meta) set_grad_out_meta_str = "\n".join(set_grad_out_meta_list) @@ -915,8 +978,12 @@ class DygraphFunctionGeneratorBase(FunctionGeneratorBase): {indent} egr::EagerUtils::SetHistory({output_autograd_meta_name}, grad_node); {indent}}}""" - set_grad_in_meta = f"{indent}grad_node->SetGradInMeta({name}, {pos});" - set_retain_grad = f"{indent}egr::EagerUtils::CheckAndRetainGrad({name});" + set_grad_in_meta = ( + f"{indent}grad_node->SetGradInMeta({name}, {pos});" + ) + set_retain_grad = ( + f"{indent}egr::EagerUtils::CheckAndRetainGrad({name});" + ) set_out_rank_list.append(set_out_rank) set_history_list.append(set_history) @@ -932,18 +999,33 @@ class DygraphFunctionGeneratorBase(FunctionGeneratorBase): node_creation_event_str = f"{indent}paddle::platform::RecordEvent node_creation_record_event(\"{node_event_name}\", paddle::platform::TracerEventType::OperatorInner, 1);\n" if not for_backward: self.node_creation_str = FORWARD_BODY_TEMPLATE.format( - node_creation_event_str, pass_stop_gradient_args_str, - node_construction_str, set_attributes_str, - set_input_tensor_wrappers_str, set_grad_out_meta_str, - set_out_rank_str, set_history_str, set_grad_in_meta_str, - set_retain_grad_str, set_output_tensor_wrappers_str) + node_creation_event_str, + pass_stop_gradient_args_str, + node_construction_str, + set_attributes_str, + set_input_tensor_wrappers_str, + set_grad_out_meta_str, + set_out_rank_str, + set_history_str, + set_grad_in_meta_str, + set_retain_grad_str, + set_output_tensor_wrappers_str, + ) else: - self.node_creation_str = HIHGER_ORDER_DERIVATIVE_VALUE_TEMPLATE.format( - node_creation_event_str, node_construction_str, - set_attributes_str, set_input_tensor_wrappers_str, - set_grad_out_meta_str, set_out_rank_str, set_history_str, - set_grad_in_meta_str, set_retain_grad_str, - set_output_tensor_wrappers_str) + self.node_creation_str = ( + HIHGER_ORDER_DERIVATIVE_VALUE_TEMPLATE.format( + node_creation_event_str, + node_construction_str, + set_attributes_str, + set_input_tensor_wrappers_str, + set_grad_out_meta_str, + set_out_rank_str, + set_history_str, + set_grad_in_meta_str, + set_retain_grad_str, + set_output_tensor_wrappers_str, + ) + ) self.grad_node_out_list = grad_node_out_list @@ -988,8 +1070,9 @@ class DygraphFunctionGeneratorBase(FunctionGeneratorBase): ## Process Parsed Contents ## ############################# # Initialize forward_inputs_position_map, forward_outputs_position_map - self.DetermineForwardPositionMap(self.forward_inputs_list, - self.forward_returns_list) + self.DetermineForwardPositionMap( + self.forward_inputs_list, self.forward_returns_list + ) if self.grad_api_contents is not None: # Initialize backward_forward_inputs_map, backward_grad_inputs_map, backward_grad_outputs_map @@ -999,31 +1082,51 @@ class DygraphFunctionGeneratorBase(FunctionGeneratorBase): class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): - - def __init__(self, forward_api_contents, grad_api_contents, - forward_apis_dict, namespace): - DygraphFunctionGeneratorBase.__init__(self, forward_api_contents, - grad_api_contents, - forward_apis_dict, namespace) + def __init__( + self, + forward_api_contents, + grad_api_contents, + forward_apis_dict, + namespace, + ): + DygraphFunctionGeneratorBase.__init__( + self, + forward_api_contents, + grad_api_contents, + forward_apis_dict, + namespace, + ) # Generated Results self.forward_definition_str = "" self.forward_declaration_str = "" - def GenerateForwardLayoutAutotune(self, forward_api_name, - amp_tensors_vector_list, - layout_tensors_vector_optional_list, - layout_autotune_list_str, - returns_type_str, returns_str, - amp_inputs_call_args_str): + def GenerateForwardLayoutAutotune( + self, + forward_api_name, + amp_tensors_vector_list, + layout_tensors_vector_optional_list, + layout_autotune_list_str, + returns_type_str, + returns_str, + amp_inputs_call_args_str, + ): intermediate_outputs = self.intermediate_outputs forward_attrs_list = self.forward_attrs_list forward_outputs_position_map = self.forward_outputs_position_map - num_outputs = len( - forward_outputs_position_map.keys()) - len(intermediate_outputs) + num_outputs = len(forward_outputs_position_map.keys()) - len( + intermediate_outputs + ) # for layout autotune attr lightly_sensitive_attr = [ - 'axis', 'axes', 'dim', 'dims', 'start', 'end', 'stop', 'perm' + 'axis', + 'axes', + 'dim', + 'dims', + 'start', + 'end', + 'stop', + 'perm', ] heavily_sensitive_attr = ['data_format', 'data_layout'] layout_autotune_attr = [] @@ -1037,15 +1140,17 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): heavily_flag = False for name, atype, default_val, pos in forward_attrs_list: for attr_name in lightly_sensitive_attr: - if name.find(attr_name) != -1 and (name - not in layout_autotune_attr): + if name.find(attr_name) != -1 and ( + name not in layout_autotune_attr + ): lightly_flag = True layout_autotune_attr.append(name) layout_autotune_attr_type_list.append(atype) if lightly_flag is False: for attr_name in heavily_sensitive_attr: if name.find(attr_name) != -1 and ( - name not in layout_autotune_attr): + name not in layout_autotune_attr + ): layout_autotune_attr.append(name) layout_autotune_attr_type_list.append(atype) heavily_flag = True @@ -1074,7 +1179,8 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): if num_outputs == 1: result_name = returns_str layout_autotune_outs_list.append( - f"transformer -> SetOutTensorLayout(&{returns_str});\n") + f"transformer -> SetOutTensorLayout(&{returns_str});\n" + ) else: for name, (rtype, pos) in forward_outputs_position_map.items(): if name in intermediate_outputs: @@ -1083,11 +1189,13 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): f" auto& {name} = std::get<{len(layout_tmp_result_list)}>(api_result);\n" ) layout_autotune_outs_list.append( - f" transformer -> SetOutTensorLayout(&{name});\n") + f" transformer -> SetOutTensorLayout(&{name});\n" + ) layout_tmp_result_list.append(f"{name}") - tensors_vector_list_str = "{ " + ",".join( - amp_tensors_vector_list) + " }" + tensors_vector_list_str = ( + "{ " + ",".join(amp_tensors_vector_list) + " }" + ) if len(amp_tensors_vector_list) == 0: layout_logic_str = "" @@ -1096,9 +1204,13 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): layout_logic_str = LAYOUT_LOGIC_TEMPLATE.format( tensors_vector_list_str, " ".join(layout_tensors_vector_optional_list), - " ".join(layout_autotune_attr_code_list) + " " + - layout_autotune_list_str, after_call_str, - " ".join(layout_autotune_outs_list), returns_str) + " ".join(layout_autotune_attr_code_list) + + " " + + layout_autotune_list_str, + after_call_str, + " ".join(layout_autotune_outs_list), + returns_str, + ) return layout_logic_str @@ -1106,8 +1218,11 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): namespace = self.namespace if self.forward_api_name[-1] == '_' and not is_inplaced: return - forward_api_name = GetInplacedFunctionName( - self.forward_api_name) if is_inplaced else self.forward_api_name + forward_api_name = ( + GetInplacedFunctionName(self.forward_api_name) + if is_inplaced + else self.forward_api_name + ) forward_inputs_position_map = self.forward_inputs_position_map forward_outputs_position_map = self.forward_outputs_position_map @@ -1122,7 +1237,8 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): # Get Function Args num_inputs = len(forward_attrs_list) + len( - forward_inputs_position_map.keys()) + forward_inputs_position_map.keys() + ) inputs_args_definition_list = ["" for i in range(num_inputs)] inputs_args_declaration_list = ["" for i in range(num_inputs)] inputs_call_list = ["" for i in range(num_inputs)] @@ -1138,10 +1254,14 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): for name, (ttype, pos) in forward_inputs_position_map.items(): inputs_call_list[pos] = f"{name}" amp_inputs_call_list[pos] = f"new_{name}" - is_optional = (name in optional_inputs) + is_optional = name in optional_inputs if IsPlainTensorType(ttype): if is_optional: - if self.is_forward_only and is_inplaced and forward_inplace_map and name in forward_inplace_map.keys( + if ( + self.is_forward_only + and is_inplaced + and forward_inplace_map + and name in forward_inplace_map.keys() ): arg_str = f"paddle::optional& {name}" else: @@ -1159,7 +1279,10 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): f"auto new_{name} = transformer->TransInTensor(\"{name}\", {name});\n" ) else: - if is_inplaced and forward_inplace_map and name in forward_inplace_map.keys( + if ( + is_inplaced + and forward_inplace_map + and name in forward_inplace_map.keys() ): arg_str = f"paddle::experimental::Tensor& {name}" amp_tensors_vector_list.append(f"{{{name}}}") @@ -1178,7 +1301,11 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): else: assert IsVectorTensorType(ttype) if is_optional: - if self.is_forward_only and is_inplaced and forward_inplace_map and name in forward_inplace_map.keys( + if ( + self.is_forward_only + and is_inplaced + and forward_inplace_map + and name in forward_inplace_map.keys() ): arg_str = f"paddle::optional>& {name}" else: @@ -1193,9 +1320,14 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): f"auto new_{name} = transformer->TransInTensors(\"{name}\", {name});\n" ) else: - if is_inplaced and forward_inplace_map and name in forward_inplace_map.keys( + if ( + is_inplaced + and forward_inplace_map + and name in forward_inplace_map.keys() ): - arg_str = f"std::vector& {name}" + arg_str = ( + f"std::vector& {name}" + ) else: arg_str = f"const std::vector& {name}" amp_tensors_vector_list.append(f"{name}") @@ -1215,7 +1347,8 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): amp_inputs_call_list[pos] = name if default_val is not None: inputs_args_declaration_list[ - pos] = f"{atype} {name} = {default_val}" + pos + ] = f"{atype} {name} = {default_val}" else: inputs_args_declaration_list[pos] = f"{atype} {name}" inputs_args_definition_list[pos] = f"{atype} {name}" @@ -1228,8 +1361,9 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): function_name = forward_api_name if len(intermediate_outputs) > 0: if is_inplaced: - function_name = GetIntermediateAPIFunctionName( - forward_api_name[:-1]) + '_' + function_name = ( + GetIntermediateAPIFunctionName(forward_api_name[:-1]) + '_' + ) else: function_name = GetIntermediateAPIFunctionName(function_name) @@ -1237,12 +1371,14 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): if is_inplaced and len(forward_outputs_position_map) == 1: api_out_type = "auto&" forward_call_str = f"{indent}{api_out_type} api_result = paddle::experimental::{namespace}{function_name}({inputs_call_args_str});" - num_outputs = len( - forward_outputs_position_map.keys()) - len(intermediate_outputs) + num_outputs = len(forward_outputs_position_map.keys()) - len( + intermediate_outputs + ) # Check Nan and Inf check_nan_inf_str = CHECK_NAN_AND_INF_TEMPLATE.format( - function_name, "api_result") + function_name, "api_result" + ) # Get Outputs get_outputs_str = "" @@ -1250,7 +1386,9 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): if num_outputs == 1 and len(intermediate_outputs) == 0: get_outputs_str += f"{indent}auto& {name} = api_result;\n" else: - get_outputs_str += f"{indent}auto& {name} = std::get<{pos}>(api_result);\n" + get_outputs_str += ( + f"{indent}auto& {name} = std::get<{pos}>(api_result);\n" + ) # Get return type list & outputs returns_type_list = ["" for i in range(num_outputs)] @@ -1261,32 +1399,46 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): returns_list[pos] = f"{name}" if IsPlainTensorType(rtype): - if is_inplaced and forward_inplace_map and name in forward_inplace_map.values( + if ( + is_inplaced + and forward_inplace_map + and name in forward_inplace_map.values() ): ind = list(forward_inplace_map.values()).index(name) - if list(forward_inplace_map.keys() - )[ind] in self.optional_inputs: + if ( + list(forward_inplace_map.keys())[ind] + in self.optional_inputs + ): returns_type_list[pos] = inplace_optional_out_type_map[ - rtype] + rtype + ] else: returns_type_list[pos] = "paddle::experimental::Tensor&" else: returns_type_list[pos] = "paddle::experimental::Tensor" else: assert IsVectorTensorType(rtype) - if is_inplaced and forward_inplace_map and name in forward_inplace_map.values( + if ( + is_inplaced + and forward_inplace_map + and name in forward_inplace_map.values() ): ind = list(forward_inplace_map.values()).index(name) - if list(forward_inplace_map.keys() - )[ind] in self.optional_inputs: + if ( + list(forward_inplace_map.keys())[ind] + in self.optional_inputs + ): returns_type_list[pos] = inplace_optional_out_type_map[ - rtype] + rtype + ] else: returns_type_list[ - pos] = "std::vector&" + pos + ] = "std::vector&" else: returns_type_list[ - pos] = "std::vector" + pos + ] = "std::vector" if num_outputs == 1: returns_str = returns_list[0] @@ -1306,29 +1458,40 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): for name, (ttype, pos) in forward_inputs_position_map.items(): # Has corresponding grad output has_corresponding_grad_output = False - for _, (_, corresponding_pos, - _) in backward_grad_outputs_map.items(): + for _, ( + _, + corresponding_pos, + _, + ) in backward_grad_outputs_map.items(): if pos == corresponding_pos: has_corresponding_grad_output = True - if has_corresponding_grad_output or ( - name in forward_inplace_map and forward_api_name - not in inplace_check_blacklist) or self.is_forward_only: + if ( + has_corresponding_grad_output + or ( + name in forward_inplace_map + and forward_api_name not in inplace_check_blacklist + ) + or self.is_forward_only + ): input_autograd_meta_name = GetAutoGradMetaName(name) if IsPlainTensorType(ttype): input_autograd_meta = f"{indent}egr::AutogradMeta* {input_autograd_meta_name} = egr::EagerUtils::nullable_autograd_meta({name});" else: assert IsVectorTensorType(ttype) - input_autograd_meta_vec_name = GetAutoGradMetaVectorName( - name) + input_autograd_meta_vec_name = ( + GetAutoGradMetaVectorName(name) + ) input_autograd_meta = f"{indent}std::vector {input_autograd_meta_vec_name} = egr::EagerUtils::nullable_autograd_meta({name});\n" input_autograd_meta += f"{indent}std::vector* {input_autograd_meta_name} = &{input_autograd_meta_vec_name};" inputs_autograd_meta_list.append(input_autograd_meta) compute_require_grad_args_list.append( - input_autograd_meta_name) + input_autograd_meta_name + ) inputs_autograd_meta_str = "\n".join(inputs_autograd_meta_list) compute_require_grad_args_str = ",".join( - compute_require_grad_args_list) + compute_require_grad_args_list + ) # 2. Get Output AutoGradMeta outputs_autograd_meta_list = [] @@ -1363,11 +1526,16 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): for inplace_name in forward_inplace_map.keys(): if forward_api_name not in inplace_check_blacklist: inplace_autograd_meta_name = GetAutoGradMetaName( - inplace_name) + inplace_name + ) check_inplace_str += CHECK_INPLACE_TEMPLATE.format( - inplace_name, inplace_autograd_meta_name) - bump_inplace_version_str += BUMP_INPLACE_VERSION_TEMPLATE.format( - inplace_name, inplace_name) + inplace_name, inplace_autograd_meta_name + ) + bump_inplace_version_str += ( + BUMP_INPLACE_VERSION_TEMPLATE.format( + inplace_name, inplace_name + ) + ) # Node Creation self.GenerateNodeCreationCodes() @@ -1375,36 +1543,56 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): dygraph_event_str = f"{indent}paddle::platform::RecordEvent dygraph_entrance_record_event(\"{forward_api_name} dygraph\", paddle::platform::TracerEventType::Operator, 1);\n" forward_ad_function_name = GetDygraphForwardFunctionName( - forward_api_name) + forward_api_name + ) # Forward amp logic - kernel_trans2_op_name_str = f"auto op_name = phi::TransToFluidOpName(\"{forward_api_name}\");" - amp_tensors_vector_list_str = "{ " + ",".join( - amp_tensors_vector_list) + " }" + kernel_trans2_op_name_str = ( + f"auto op_name = phi::TransToFluidOpName(\"{forward_api_name}\");" + ) + amp_tensors_vector_list_str = ( + "{ " + ",".join(amp_tensors_vector_list) + " }" + ) amp_tensors_vector_optional_list_str = " ".join( - amp_tensors_vector_optional_list) + amp_tensors_vector_optional_list + ) amp_get_dst_dtype_str = "auto amp_dst_dtype = egr::GetAmpDestDtype(op_name, amp_tensors_vector);\n" - amp_autocast_list_str = " ".join( - amp_autocast_list) + " " + " ".join( - amp_autocast_optional_list) + amp_autocast_list_str = ( + " ".join(amp_autocast_list) + + " " + + " ".join(amp_autocast_optional_list) + ) amp_inputs_call_args_str = ", ".join(amp_inputs_call_list) - amp_call_str = f"return {forward_ad_function_name}({amp_inputs_call_args_str});" + amp_call_str = ( + f"return {forward_ad_function_name}({amp_inputs_call_args_str});" + ) if is_inplaced or (forward_api_name == "cast"): amp_logic_str = "\n VLOG(5) << \" No AMP for {} because it is a inplace or cast api. \"; ".format( - forward_ad_function_name) + forward_ad_function_name + ) else: amp_logic_str = AMP_LOGIC_TEMPLATE.format( - kernel_trans2_op_name_str, amp_tensors_vector_list_str, - amp_tensors_vector_optional_list_str, amp_get_dst_dtype_str, - amp_autocast_list_str, amp_call_str) + kernel_trans2_op_name_str, + amp_tensors_vector_list_str, + amp_tensors_vector_optional_list_str, + amp_get_dst_dtype_str, + amp_autocast_list_str, + amp_call_str, + ) # Forward layout autotune layout_autotune_list_str = " ".join( - layout_autotune_list) + " ".join(layout_autotune_optional_list) + layout_autotune_list + ) + " ".join(layout_autotune_optional_list) layout_logic_str = self.GenerateForwardLayoutAutotune( - forward_api_name, amp_tensors_vector_list, - layout_tensors_vector_optional_list, layout_autotune_list_str, - returns_type_str, returns_str, amp_inputs_call_args_str) + forward_api_name, + amp_tensors_vector_list, + layout_tensors_vector_optional_list, + layout_autotune_list_str, + returns_type_str, + returns_str, + amp_inputs_call_args_str, + ) # For inputs outputs prepare for logging var_str = f"\n{indent} std::string input_str = \"\";" @@ -1426,23 +1614,50 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): if self.is_forward_only: if len(amp_tensors_vector_list) == 0: amp_logic_str = "\n VLOG(7) << \" No AMP for {} because it has no input. \"; ".format( - forward_ad_function_name) - self.forward_definition_str += FORWARD_ONLY_FUNCTION_TEMPLATE.format( - returns_type_str, forward_ad_function_name, - inputs_args_definition_str, forward_api_name, dygraph_event_str, - amp_logic_str, layout_logic_str, forward_api_name, - before_log_str, forward_call_str, get_outputs_str, - forward_api_name, log_str, returns_str) + forward_ad_function_name + ) + self.forward_definition_str += ( + FORWARD_ONLY_FUNCTION_TEMPLATE.format( + returns_type_str, + forward_ad_function_name, + inputs_args_definition_str, + forward_api_name, + dygraph_event_str, + amp_logic_str, + layout_logic_str, + forward_api_name, + before_log_str, + forward_call_str, + get_outputs_str, + forward_api_name, + log_str, + returns_str, + ) + ) else: self.forward_definition_str += FORWARD_FUNCTION_TEMPLATE.format( - returns_type_str, forward_ad_function_name, - inputs_args_definition_str, forward_api_name, dygraph_event_str, - amp_logic_str, layout_logic_str, inputs_autograd_meta_str, - forward_api_name, before_log_str, forward_call_str, - check_nan_inf_str, get_outputs_str, outputs_autograd_meta_str, - compute_require_grad_args_str, check_inplace_str, - bump_inplace_version_str, node_creation_str, forward_api_name, - log_str, returns_str) + returns_type_str, + forward_ad_function_name, + inputs_args_definition_str, + forward_api_name, + dygraph_event_str, + amp_logic_str, + layout_logic_str, + inputs_autograd_meta_str, + forward_api_name, + before_log_str, + forward_call_str, + check_nan_inf_str, + get_outputs_str, + outputs_autograd_meta_str, + compute_require_grad_args_str, + check_inplace_str, + bump_inplace_version_str, + node_creation_str, + forward_api_name, + log_str, + returns_str, + ) self.forward_declaration_str += f"{returns_type_str} {forward_ad_function_name}({inputs_args_declaration_str});\n" @@ -1451,21 +1666,27 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): forward_api_name = self.forward_api_name forward_api_contents = self.forward_api_contents - if forward_api_name != "sum" and "inplace" in forward_api_contents.keys( + if ( + forward_api_name != "sum" + and "inplace" in forward_api_contents.keys() ): # Function Definition and Declaration Generation self.GenerateForwardDefinitionAndDeclaration(is_inplaced=True) self.UpdateCoreOpsInformation(is_inplaced=True) def UpdateCoreOpsInformation(self, is_inplaced): - forward_api_name = GetInplacedFunctionName( - self.forward_api_name) if is_inplaced else self.forward_api_name + forward_api_name = ( + GetInplacedFunctionName(self.forward_api_name) + if is_inplaced + else self.forward_api_name + ) forward_inputs_position_map = self.forward_inputs_position_map forward_outputs_position_map = self.forward_outputs_position_map forward_attrs_list = self.forward_attrs_list - num_args = len( - forward_inputs_position_map.keys()) + len(forward_attrs_list) + num_args = len(forward_inputs_position_map.keys()) + len( + forward_attrs_list + ) num_returns = len(forward_outputs_position_map.keys()) fwd_api_name = "" + forward_api_name @@ -1503,16 +1724,21 @@ class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase): class DygraphNodeGenerator(DygraphFunctionGeneratorBase): - - def __init__(self, - forward_api_contents, - grad_api_contents, - forward_apis_dict, - namespace, - next_grad_api_contents=None): - DygraphFunctionGeneratorBase.__init__(self, forward_api_contents, - grad_api_contents, - forward_apis_dict, namespace) + def __init__( + self, + forward_api_contents, + grad_api_contents, + forward_apis_dict, + namespace, + next_grad_api_contents=None, + ): + DygraphFunctionGeneratorBase.__init__( + self, + forward_api_contents, + grad_api_contents, + forward_apis_dict, + namespace, + ) # Record name mapping from forward_var_name to grad_var_names self.to_next_grad_name_mapping = {} # {name : name} @@ -1569,8 +1795,11 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase): backward_api_contents = next_grad_api_contents next_node_generator = DygraphFunctionGeneratorBase( - forward_api_contents, backward_api_contents, forward_apis_dict, - namespace) + forward_api_contents, + backward_api_contents, + forward_apis_dict, + namespace, + ) next_node_generator.run() next_node_generator.GenerateNodeCreationCodes(True) @@ -1579,7 +1808,11 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase): self.RecordGrad2NextGradNameMapping(next_node_generator) if next_node_generator is not None: - return next_grad_node_creation_str, next_grad_node_out_list, next_node_generator.backward_forward_inputs_map + return ( + next_grad_node_creation_str, + next_grad_node_out_list, + next_node_generator.backward_forward_inputs_map, + ) else: return next_grad_node_creation_str, next_grad_node_out_list, None @@ -1593,30 +1826,45 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase): set_tensor_wrapper_methods_str = "" tensor_wrapper_members_str = "" clear_tensor_wrapper_str = "" - for tname, (ttype, is_fwd_input, - _) in backward_forward_inputs_map.items(): + for tname, ( + ttype, + is_fwd_input, + _, + ) in backward_forward_inputs_map.items(): no_need_buffer = "true" if tname in no_need_buffers else "false" tensor_wrapper_name = GetSavedName(tname) if IsPlainTensorType(ttype): - set_tensor_wrapper_methods_str += SET_PLAIN_TENSOR_WRAPPER_TEMPLATE.format( - tname, tname, tensor_wrapper_name, tname, no_need_buffer) + set_tensor_wrapper_methods_str += ( + SET_PLAIN_TENSOR_WRAPPER_TEMPLATE.format( + tname, tname, tensor_wrapper_name, tname, no_need_buffer + ) + ) - tensor_wrapper_members_str += PLAIN_TENSOR_MEMBER_TEMPLATE.format( - tensor_wrapper_name) + tensor_wrapper_members_str += ( + PLAIN_TENSOR_MEMBER_TEMPLATE.format(tensor_wrapper_name) + ) - clear_tensor_wrapper_str += CLEAR_TENSOR_WRAPPER_TEMPLATE.format( - tensor_wrapper_name) + clear_tensor_wrapper_str += ( + CLEAR_TENSOR_WRAPPER_TEMPLATE.format(tensor_wrapper_name) + ) else: assert IsVectorTensorType(ttype) - set_tensor_wrapper_methods_str += SET_VECTOR_TENSOR_WRAPPER_TEMPLATE.format( - tname, tname, tname, tensor_wrapper_name, no_need_buffer) + set_tensor_wrapper_methods_str += ( + SET_VECTOR_TENSOR_WRAPPER_TEMPLATE.format( + tname, tname, tname, tensor_wrapper_name, no_need_buffer + ) + ) - tensor_wrapper_members_str += VECTOR_TENSOR_MEMBER_TEMPLATE.format( - tensor_wrapper_name) + tensor_wrapper_members_str += ( + VECTOR_TENSOR_MEMBER_TEMPLATE.format(tensor_wrapper_name) + ) - clear_tensor_wrapper_str += CLEAR_VECTOR_TENSOR_WRAPPERS_TEMPLATE.format( - tensor_wrapper_name) + clear_tensor_wrapper_str += ( + CLEAR_VECTOR_TENSOR_WRAPPERS_TEMPLATE.format( + tensor_wrapper_name + ) + ) # SetAttributes & Attribute Members set_attribute_methods_str = "" @@ -1624,27 +1872,44 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase): for aname, atype, default_val, _ in backward_attrs_list: saved_attr_name = GetSavedName(aname) set_attribute_methods_str += SET_ATTR_METHOD_TEMPLATE.format( - aname, GetConstReference(atype), aname, saved_attr_name, aname) + aname, GetConstReference(atype), aname, saved_attr_name, aname + ) if default_val: - attribute_members_str += ATTRIBUTE_MEMBER_WITH_DEFAULT_TEMPLATE.format( - RemoveConstAndReference(atype), saved_attr_name, - default_val) + attribute_members_str += ( + ATTRIBUTE_MEMBER_WITH_DEFAULT_TEMPLATE.format( + RemoveConstAndReference(atype), + saved_attr_name, + default_val, + ) + ) else: attribute_members_str += ATTRIBUTE_MEMBER_TEMPLATE.format( - RemoveConstAndReference(atype), saved_attr_name) + RemoveConstAndReference(atype), saved_attr_name + ) grad_node_name = GetGradNodeName(self.backward_api_name) self.node_declaration_str = NODE_DECLARATION_TEMPLATE.format( - grad_node_name, grad_node_name, grad_node_name, grad_node_name, - grad_node_name, clear_tensor_wrapper_str, grad_node_name, - grad_node_name, set_tensor_wrapper_methods_str, - set_attribute_methods_str, tensor_wrapper_members_str, - attribute_members_str) - - def GenerateNodeDefinition(self, next_grad_node_creation_str, - next_grad_node_out_list, - backward_forward_inputs_map_next): + grad_node_name, + grad_node_name, + grad_node_name, + grad_node_name, + grad_node_name, + clear_tensor_wrapper_str, + grad_node_name, + grad_node_name, + set_tensor_wrapper_methods_str, + set_attribute_methods_str, + tensor_wrapper_members_str, + attribute_members_str, + ) + + def GenerateNodeDefinition( + self, + next_grad_node_creation_str, + next_grad_node_out_list, + backward_forward_inputs_map_next, + ): namespace = self.namespace forward_api_name = self.forward_api_name backward_api_name = self.backward_api_name @@ -1655,21 +1920,30 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase): backward_inplace_map = self.backward_inplace_map indent = GetIndent(1) - is_invoke_forward_api = IsInvokeForwardApi(self.grad_api_contents, - self.forward_apis_dict) + is_invoke_forward_api = IsInvokeForwardApi( + self.grad_api_contents, self.forward_apis_dict + ) # Construct grad_api function args # Order: TensorWrappers, GradTensors, Attributes - grad_api_args_len = len(backward_forward_inputs_map.keys()) + len( - backward_grad_inputs_map.keys()) + len(backward_attrs_list) + grad_api_args_len = ( + len(backward_forward_inputs_map.keys()) + + len(backward_grad_inputs_map.keys()) + + len(backward_attrs_list) + ) grad_api_args = ["" for i in range(grad_api_args_len)] get_grad_in_args_list = [] # Fill Grad Ins with Zero fill_zero_str = "" if backward_api_name in ops_to_fill_zero_for_empty_grads: - fill_zero_str = f"{indent}const auto& input_metas = this->InputMeta();\n" - for name, (ttype, fwd_position, - grad_api_position) in backward_grad_inputs_map.items(): + fill_zero_str = ( + f"{indent}const auto& input_metas = this->InputMeta();\n" + ) + for name, ( + ttype, + fwd_position, + grad_api_position, + ) in backward_grad_inputs_map.items(): if name in self.optional_inputs: if IsPlainTensorType(ttype): fill_zero_str += f"{indent}egr::EagerUtils::FillZeroForEmptyOptionalGradInput(&grads[{fwd_position}][0], input_metas[{fwd_position}][0]);\n" @@ -1684,40 +1958,67 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase): inplace_check_str = "" optional_inplace_var_name = [] # Grad Ins from TensorWrappers - for name, (backward_input_type, is_fwd_input, - grad_api_position), in backward_forward_inputs_map.items(): + for ( + name, + (backward_input_type, is_fwd_input, grad_api_position), + ) in backward_forward_inputs_map.items(): tensor_wrapper_name = GetSavedName(name) transformed_tensor_name = self.TransformToNextGradName(name) - is_optional = (name in self.optional_inputs) + is_optional = name in self.optional_inputs tensor_wrapper_recover_str = f"{indent}auto {transformed_tensor_name} = egr::EagerUtils::RecoverTensorWrapper(&this->{tensor_wrapper_name});" if backward_inplace_map and name in backward_inplace_map.keys(): if len(next_grad_node_creation_str) > 0: - if (transformed_tensor_name - in backward_forward_inputs_map_next) and ( - backward_forward_inputs_map_next[ - transformed_tensor_name][1]): + if ( + transformed_tensor_name + in backward_forward_inputs_map_next + ) and ( + backward_forward_inputs_map_next[ + transformed_tensor_name + ][1] + ): optional_inplace_var_name.append( - transformed_tensor_name) - tensor_wrapper_intermidiate_tensor_str = f"(&this->{tensor_wrapper_name})->get_intermidiate_tensor()" + transformed_tensor_name + ) + tensor_wrapper_intermidiate_tensor_str = ( + f"(&this->{tensor_wrapper_name})->get_intermidiate_tensor()" + ) inplace_check_str += CHECK_BACKWARD_INPLACE_TEMPLATE.format( - transformed_tensor_name, transformed_tensor_name, name, - transformed_tensor_name, transformed_tensor_name, - transformed_tensor_name, transformed_tensor_name, - tensor_wrapper_intermidiate_tensor_str) + transformed_tensor_name, + transformed_tensor_name, + name, + transformed_tensor_name, + transformed_tensor_name, + transformed_tensor_name, + transformed_tensor_name, + tensor_wrapper_intermidiate_tensor_str, + ) inplace_grad_input_str = transformed_tensor_name if is_optional: if backward_input_type == "std::vector": - tensor_wrapper_recover_str += "\n" + CREATE_RECOVER_OPTIONAL_VECTOR_TENSOR_TEMPLATE.format( - transformed_tensor_name, transformed_tensor_name, - transformed_tensor_name, transformed_tensor_name) + tensor_wrapper_recover_str += ( + "\n" + + CREATE_RECOVER_OPTIONAL_VECTOR_TENSOR_TEMPLATE.format( + transformed_tensor_name, + transformed_tensor_name, + transformed_tensor_name, + transformed_tensor_name, + ) + ) else: - tensor_wrapper_recover_str += "\n" + CREATE_RECOVER_OPTIONAL_TENSOR_TEMPLATE.format( - transformed_tensor_name, transformed_tensor_name, - transformed_tensor_name, transformed_tensor_name) + tensor_wrapper_recover_str += ( + "\n" + + CREATE_RECOVER_OPTIONAL_TENSOR_TEMPLATE.format( + transformed_tensor_name, + transformed_tensor_name, + transformed_tensor_name, + transformed_tensor_name, + ) + ) - grad_api_args[ - grad_api_position] = transformed_tensor_name + "_optional" + grad_api_args[grad_api_position] = ( + transformed_tensor_name + "_optional" + ) else: grad_api_args[grad_api_position] = transformed_tensor_name @@ -1725,37 +2026,57 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase): get_grad_in_args_list.append(tensor_wrapper_recover_str) # Grad Ins from grads - for name, (ttype, fwd_position, - grad_api_position) in backward_grad_inputs_map.items(): + for name, ( + ttype, + fwd_position, + grad_api_position, + ) in backward_grad_inputs_map.items(): transformed_tensor_name = self.TransformToNextGradName(name) - is_optional = (name in self.optional_inputs) + is_optional = name in self.optional_inputs if IsPlainTensorType(ttype): get_tensor_str = f"{indent}auto& {transformed_tensor_name} = hooked_grads[{fwd_position}][0];" # Inplace in backward op if backward_inplace_map and name in backward_inplace_map.keys(): if len(next_grad_node_creation_str) > 0: - if (transformed_tensor_name - in backward_forward_inputs_map_next) and ( - backward_forward_inputs_map_next[ - transformed_tensor_name][1]): + if ( + transformed_tensor_name + in backward_forward_inputs_map_next + ) and ( + backward_forward_inputs_map_next[ + transformed_tensor_name + ][1] + ): optional_inplace_var_name.append( - transformed_tensor_name) + transformed_tensor_name + ) grads_tensor_str = f"grads[{fwd_position}][0]" inplace_check_str += CHECK_BACKWARD_INPLACE_TEMPLATE.format( - transformed_tensor_name, transformed_tensor_name, name, - transformed_tensor_name, transformed_tensor_name, - transformed_tensor_name, transformed_tensor_name, - grads_tensor_str) + transformed_tensor_name, + transformed_tensor_name, + name, + transformed_tensor_name, + transformed_tensor_name, + transformed_tensor_name, + transformed_tensor_name, + grads_tensor_str, + ) inplace_grad_input_str = transformed_tensor_name if is_optional: - get_tensor_str += "\n" + CREATE_PLAIN_OPTIONAL_TENSOR_TEMPLATE.format( - transformed_tensor_name, transformed_tensor_name, - transformed_tensor_name, transformed_tensor_name) + get_tensor_str += ( + "\n" + + CREATE_PLAIN_OPTIONAL_TENSOR_TEMPLATE.format( + transformed_tensor_name, + transformed_tensor_name, + transformed_tensor_name, + transformed_tensor_name, + ) + ) grad_api_args[ - grad_api_position] = f"{transformed_tensor_name}_optional" + grad_api_position + ] = f"{transformed_tensor_name}_optional" else: grad_api_args[grad_api_position] = transformed_tensor_name else: @@ -1768,7 +2089,9 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase): # Grad Attrs for name, _, _, grad_api_position in backward_attrs_list: saved_attribute_name = GetSavedName(name) - get_attr_str = f"{indent}auto& {name} = this->{saved_attribute_name};" + get_attr_str = ( + f"{indent}auto& {name} = this->{saved_attribute_name};" + ) grad_api_args[grad_api_position] = name get_grad_in_args_list.append(get_attr_str) @@ -1790,13 +2113,18 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase): # Grad Outputs out_index = -1 out_assign_str = "" - for name, (ttype, fwd_position, - grad_api_position) in backward_grad_outputs_map.items(): + for name, ( + ttype, + fwd_position, + grad_api_position, + ) in backward_grad_outputs_map.items(): transformed_tensor_name = self.TransformToNextGradName(name) out_index = out_index + 1 if is_invoke_forward_api: if len(backward_grad_outputs_map) == 1: - out_assign_str += f"{indent}*api_output_{out_index} = api_output;\n" + out_assign_str += ( + f"{indent}*api_output_{out_index} = api_output;\n" + ) else: out_assign_str += f"{indent}*api_output_{out_index} = std::get<{out_index}>(api_output);\n" else: @@ -1809,7 +2137,9 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase): }}""" if IsPlainTensorType(ttype): - if backward_inplace_map and name in backward_inplace_map.values( + if ( + backward_inplace_map + and name in backward_inplace_map.values() ): inplace_str = f""" if (api_output_{out_index} != nullptr && can_be_inplaced) {{ egr::EagerUtils::HandleViewBetweenInputAndOutput({inplace_grad_input_str}, api_output_{out_index}); @@ -1844,14 +2174,19 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase): if is_invoke_forward_api: autograd_api_out = "auto" - if len(self.backward_inplace_map) > 0 and len( - backward_grad_outputs_map) == 1: + if ( + len(self.backward_inplace_map) > 0 + and len(backward_grad_outputs_map) == 1 + ): autograd_api_out = "auto&" - forward_api_name = self.grad_api_contents['invoke'].split( - '(')[0].strip() + forward_api_name = ( + self.grad_api_contents['invoke'].split('(')[0].strip() + ) autograd_api = self.grad_api_contents['invoke'].replace( forward_api_name, - GetDygraphForwardFunctionName(forward_api_name), 1) + GetDygraphForwardFunctionName(forward_api_name), + 1, + ) grad_function_call_str = f""" if (trace_backward) {{ {indent}{autograd_api_out} api_output = {autograd_api}; @@ -1865,7 +2200,8 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase): # Check Nan and Inf check_nan_inf_str = CHECK_NAN_AND_INF_TEMPLATE.format( - backward_api_name, "returns") + backward_api_name, "returns" + ) # Prepare for Node Creation if Necessary outputs_autograd_meta_str = "" @@ -1878,14 +2214,19 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase): # TODO(jiabin): Optimize this with SetStopGradient instead of Pass Stop gradient num_fwd_outputs = len(backward_grad_outputs_map.keys()) - for name, (rtype, pos, - grad_api_position) in backward_grad_outputs_map.items(): + for name, ( + rtype, + pos, + grad_api_position, + ) in backward_grad_outputs_map.items(): transformed_tensor_name = self.TransformToNextGradName(name) output_autograd_meta_name = GetAutoGradMetaName( - transformed_tensor_name) + transformed_tensor_name + ) output_autograd_meta_vec_name = GetAutoGradMetaVectorName( - transformed_tensor_name) + transformed_tensor_name + ) if IsPlainTensorType(rtype): output_autograd_meta = f""" auto& {transformed_tensor_name} = returns[{pos}][0]; @@ -1923,15 +2264,20 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase): # For inputs outputs prepare for logging var_str = f"\n{indent} std::string input_str = \"\";" var_str += f"\n{indent} std::string output_str = \"\";" - for name, (ttype, fwd_position, - grad_api_position) in backward_grad_inputs_map.items(): + for name, ( + ttype, + fwd_position, + grad_api_position, + ) in backward_grad_inputs_map.items(): new_name = self.TransformToNextGradName(name) var_str += f"\n{indent} const char* TENSOR_{new_name.upper()}_TEMPLATE = \" \\n( {new_name} , [%s]), \";" var_str += f"\n{indent} std::string input_{new_name}_str = paddle::string::Sprintf(TENSOR_{new_name.upper()}_TEMPLATE, egr::EagerUtils::TensorStr({new_name}));" var_str += f"\n{indent} input_str += input_{new_name}_str; " - for name, (backward_input_type, is_fwd_input, - grad_api_position), in backward_forward_inputs_map.items(): + for ( + name, + (backward_input_type, is_fwd_input, grad_api_position), + ) in backward_forward_inputs_map.items(): new_name = self.TransformToNextGradName(name) var_str += f"\n{indent} const char* TENSOR_{new_name.upper()}_TEMPLATE = \" \\n( {new_name} , [%s]), \";" var_str += f"\n{indent} std::string input_{new_name}_str = paddle::string::Sprintf(TENSOR_{new_name.upper()}_TEMPLATE, egr::EagerUtils::TensorStr({new_name}));" @@ -1939,8 +2285,11 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase): before_log_str = BEFORE_LOG_PRINT_TEMPLATE.format(var_str) - for name, (ttype, fwd_position, - grad_api_position) in backward_grad_outputs_map.items(): + for name, ( + ttype, + fwd_position, + grad_api_position, + ) in backward_grad_outputs_map.items(): new_name = self.TransformToNextGradName(name) var_str += f"\n{indent} const char* TENSOR_{new_name.upper()}_TEMPLATE = \" \\n ( {new_name} , [%s]), \";" var_str += f"\n{indent} std::string output_{new_name}_str = paddle::string::Sprintf(TENSOR_{new_name.upper()}_TEMPLATE, egr::EagerUtils::TensorStr({new_name}));" @@ -1949,13 +2298,24 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase): log_str = AFTER_LOG_PRINT_TEMPLATE.format(var_str) self.node_definition_str = GRAD_FUNCTION_TEMPLATE.format( - grad_node_name, self.backward_api_name, fill_zero_str, - get_grad_in_args_str, grad_function_prepare_str, - compute_require_next_grad_str, inplace_check_str, - inplace_for_grad_outs_str, self.backward_api_name, before_log_str, - grad_function_call_str, check_nan_inf_str, - outputs_autograd_meta_str, next_grad_node_creation_str, - self.backward_api_name, log_str, returns_str) + grad_node_name, + self.backward_api_name, + fill_zero_str, + get_grad_in_args_str, + grad_function_prepare_str, + compute_require_next_grad_str, + inplace_check_str, + inplace_for_grad_outs_str, + self.backward_api_name, + before_log_str, + grad_function_call_str, + check_nan_inf_str, + outputs_autograd_meta_str, + next_grad_node_creation_str, + self.backward_api_name, + log_str, + returns_str, + ) def run(self): super().run() @@ -1966,18 +2326,22 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase): ## Code Generation ## ##################### # Higher-order GradNode generation - next_grad_node_creation_str, next_grad_node_out_list, backward_forward_inputs_map = self.GenerateHigherOrderNodeCreationCode( - ) + ( + next_grad_node_creation_str, + next_grad_node_out_list, + backward_forward_inputs_map, + ) = self.GenerateHigherOrderNodeCreationCode() self.GenerateNodeDeclaration() - self.GenerateNodeDefinition(next_grad_node_creation_str, - next_grad_node_out_list, - backward_forward_inputs_map) + self.GenerateNodeDefinition( + next_grad_node_creation_str, + next_grad_node_out_list, + backward_forward_inputs_map, + ) class DygraphForwardAndNodesGenerator(GeneratorBase): - def __init__(self, api_yaml_path, backward_yaml_path): # Parent members: # self.namespace @@ -1995,8 +2359,9 @@ class DygraphForwardAndNodesGenerator(GeneratorBase): self.node_definition_str = "" def CollectIsForwardOnly(self, forward_api_contents): - self.is_forward_only = False if 'backward' in forward_api_contents.keys( - ) else True + self.is_forward_only = ( + False if 'backward' in forward_api_contents.keys() else True + ) def ParseYamlContents(self): self.ParseForwardYamlContents() @@ -2010,11 +2375,13 @@ class DygraphForwardAndNodesGenerator(GeneratorBase): def GetBackwardAPIContents(self, forward_api_contents): grad_api_dict = self.grad_api_dict - if 'backward' not in forward_api_contents.keys(): return None + if 'backward' not in forward_api_contents.keys(): + return None backward_api_name = forward_api_contents['backward'] assert backward_api_name in grad_api_dict.keys(), AssertMessage( - backward_api_name, grad_api_dict.keys()) + backward_api_name, grad_api_dict.keys() + ) backward_api_contents = grad_api_dict[backward_api_name] return backward_api_contents @@ -2028,7 +2395,8 @@ class DygraphForwardAndNodesGenerator(GeneratorBase): namespace = self.namespace for forward_api_contents in forward_api_list: - if forward_api_contents['op'] in black_ops_list: continue + if forward_api_contents['op'] in black_ops_list: + continue self.CollectIsForwardOnly(forward_api_contents) @@ -2036,34 +2404,50 @@ class DygraphForwardAndNodesGenerator(GeneratorBase): backward_api_contents = None else: backward_api_contents = self.GetBackwardAPIContents( - forward_api_contents) + forward_api_contents + ) # Generate Dygraph Forward Function function_generator = DygraphForwardFunctionGenerator( - forward_api_contents, backward_api_contents, forward_apis_dict, - namespace) + forward_api_contents, + backward_api_contents, + forward_apis_dict, + namespace, + ) function_generator.run() - self.forward_definition_str += function_generator.forward_definition_str + "\n" - self.forward_declaration_str += function_generator.forward_declaration_str + "\n" + self.forward_definition_str += ( + function_generator.forward_definition_str + "\n" + ) + self.forward_declaration_str += ( + function_generator.forward_declaration_str + "\n" + ) # Generate Dygraph GradNode Function while True: if backward_api_contents is None: break next_grad_api_contents = self.GetBackwardAPIContents( - backward_api_contents) + backward_api_contents + ) - node_generator = DygraphNodeGenerator(forward_api_contents, - backward_api_contents, - forward_apis_dict, - namespace, - next_grad_api_contents) + node_generator = DygraphNodeGenerator( + forward_api_contents, + backward_api_contents, + forward_apis_dict, + namespace, + next_grad_api_contents, + ) node_generator.run() - self.node_declaration_str += node_generator.node_declaration_str + "\n" - self.node_definition_str += node_generator.node_definition_str + "\n" + self.node_declaration_str += ( + node_generator.node_declaration_str + "\n" + ) + self.node_definition_str += ( + node_generator.node_definition_str + "\n" + ) - if next_grad_api_contents is None: break + if next_grad_api_contents is None: + break # Detect if there exists higher-order GradNode forward_api_contents = backward_api_contents @@ -2076,13 +2460,17 @@ class DygraphForwardAndNodesGenerator(GeneratorBase): if namespace.endswith("::"): namespace = namespace[:-2] self.forward_definition_str = NAMESPACE_WRAPPER_TEMPLATE.format( - namespace, self.forward_definition_str) + namespace, self.forward_definition_str + ) self.forward_declaration_str = NAMESPACE_WRAPPER_TEMPLATE.format( - namespace, self.forward_declaration_str) + namespace, self.forward_declaration_str + ) self.node_declaration_str = NAMESPACE_WRAPPER_TEMPLATE.format( - namespace, self.node_declaration_str) + namespace, self.node_declaration_str + ) self.node_definition_str = NAMESPACE_WRAPPER_TEMPLATE.format( - namespace, self.node_definition_str) + namespace, self.node_definition_str + ) def run(self): self.ParseYamlContents() @@ -2118,8 +2506,9 @@ def GenerateForwardCCFile(filepath, forward_definition_str): os.remove(filepath) core_ops_info_str = GenerateCoreOpInfoDefinition() - file_contents = FORWARD_CC_FILE_TEMPLATE.format(core_ops_info_str, - forward_definition_str) + file_contents = FORWARD_CC_FILE_TEMPLATE.format( + core_ops_info_str, forward_definition_str + ) with open(filepath, 'a') as f: f.write(file_contents) @@ -2130,7 +2519,8 @@ def GenerateForwardHFile(filepath, forward_function_declaration_str): core_ops_info_str = GenerateCoreOpInfoDeclaration() file_contents = FORWARD_H_FILE_TEMPLATE.format( - core_ops_info_str, forward_function_declaration_str) + core_ops_info_str, forward_function_declaration_str + ) with open(filepath, 'a') as f: f.write(file_contents) @@ -2157,8 +2547,9 @@ if __name__ == "__main__": else: backward_yaml_path = None - generator = DygraphForwardAndNodesGenerator(api_yaml_path, - backward_yaml_path) + generator = DygraphForwardAndNodesGenerator( + api_yaml_path, backward_yaml_path + ) generator.run() node_declaration_str += generator.node_declaration_str + "\n" diff --git a/paddle/fluid/eager/auto_code_generator/generator/python_c_gen.py b/paddle/fluid/eager/auto_code_generator/generator/python_c_gen.py index 5aac3574ba7723aa63780d6ba1ba5422ef31c9a4..dc38d3d46b293f9602ffa19bb96a4fb4178e843d 100644 --- a/paddle/fluid/eager/auto_code_generator/generator/python_c_gen.py +++ b/paddle/fluid/eager/auto_code_generator/generator/python_c_gen.py @@ -25,7 +25,7 @@ skipped_forward_api_names = set([]) def SkipAPIGeneration(forward_api_name): - return (forward_api_name in skipped_forward_api_names) + return forward_api_name in skipped_forward_api_names atype_to_parsing_function = { @@ -61,28 +61,25 @@ def FindParsingFunctionFromAttributeType(atype): ########################## ## Refactored Functions ## ########################## -PARSE_PYTHON_C_TENSORS_TEMPLATE = \ -" auto {} = {}(\"{}\", \"{}\", args, {}, {});\n" +PARSE_PYTHON_C_TENSORS_TEMPLATE = ( + " auto {} = {}(\"{}\", \"{}\", args, {}, {});\n" +) -PARSE_PYTHON_C_ARGS_TEMPLATE = \ -""" PyObject* {}_obj = PyTuple_GET_ITEM(args, {}); +PARSE_PYTHON_C_ARGS_TEMPLATE = """ PyObject* {}_obj = PyTuple_GET_ITEM(args, {}); {} {} = {}({}_obj, \"{}\", {}); """ -RECORD_EVENT_TEMPLATE = \ -"paddle::platform::RecordEvent {}(\"{} {}\", paddle::platform::TracerEventType::UserDefined, 1);" +RECORD_EVENT_TEMPLATE = "paddle::platform::RecordEvent {}(\"{} {}\", paddle::platform::TracerEventType::UserDefined, 1);" -RETURN_INPLACE_PYOBJECT_TEMPLATE = \ -""" +RETURN_INPLACE_PYOBJECT_TEMPLATE = """ inplace_var_idx_map[{}] = {}; """ -PYTHON_C_FUNCTION_TEMPLATE = \ -""" +PYTHON_C_FUNCTION_TEMPLATE = """ static PyObject * eager_api_{}(PyObject *self, PyObject *args, PyObject *kwargs) {{ {} PyThreadState *tstate = nullptr; @@ -115,8 +112,7 @@ static PyObject * eager_api_{}(PyObject *self, PyObject *args, PyObject *kwargs) NOAMP_DYGRAPH_FUNCTION_TEMPLATE = "decltype({}({})) out = {}({});" -FUNCTION_SET_DEVICE_TEMPLATE = \ -"""{} if (paddle::platform::is_gpu_place(place)) {{ +FUNCTION_SET_DEVICE_TEMPLATE = """{} if (paddle::platform::is_gpu_place(place)) {{ #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) phi::backends::gpu::SetDeviceId(place.device); VLOG(1) <<"CurrentDeviceId: " << phi::backends::gpu::GetCurrentDeviceId() << " from " << (int)place.device; @@ -136,16 +132,13 @@ FUNCTION_SET_DEVICE_TEMPLATE = \ }} """ -FUNCTION_NAME_TEMPLATE = \ -"{}{}{}" +FUNCTION_NAME_TEMPLATE = "{}{}{}" -PYTHON_C_FUNCTION_REG_TEMPLATE = \ -" {{\"{}{}\", (PyCFunction)(void(*)(void)) {}eager_api_{}, METH_VARARGS | METH_KEYWORDS, \"C++ interface function for {} in dygraph.\"}},\n" +PYTHON_C_FUNCTION_REG_TEMPLATE = " {{\"{}{}\", (PyCFunction)(void(*)(void)) {}eager_api_{}, METH_VARARGS | METH_KEYWORDS, \"C++ interface function for {} in dygraph.\"}},\n" -PYTHON_C_WRAPPER_TEMPLATE = \ -""" +PYTHON_C_WRAPPER_TEMPLATE = """ #include #include "paddle/fluid/platform/enforce.h" #include "paddle/phi/api/include/strings_api.h" @@ -184,8 +177,7 @@ void BindFinalStateEagerOpFunctions(pybind11::module *module) {{ """ -CORE_OPS_INFO = \ -""" +CORE_OPS_INFO = """ static PyObject * eager_get_core_ops_args_info(PyObject *self) { PyThreadState *tstate = nullptr; try { @@ -230,15 +222,13 @@ static PyObject * eager_get_core_ops_returns_info(PyObject *self) { """ -CORE_OPS_INFO_REGISTRY = \ -""" +CORE_OPS_INFO_REGISTRY = """ {\"get_core_ops_args_info\", (PyCFunction)(void(*)(void))eager_get_core_ops_args_info, METH_NOARGS, \"C++ interface function for eager_get_core_ops_args_info.\"}, {\"get_core_ops_args_type_info\", (PyCFunction)(void(*)(void))eager_get_core_ops_args_type_info, METH_NOARGS, \"C++ interface function for eager_get_core_ops_args_type_info.\"}, {\"get_core_ops_returns_info\", (PyCFunction)(void(*)(void))eager_get_core_ops_returns_info, METH_NOARGS, \"C++ interface function for eager_get_core_ops_returns_info.\"}, """ -NAMESPACE_WRAPPER_TEMPLATE = \ -"""namespace {} {{ +NAMESPACE_WRAPPER_TEMPLATE = """namespace {} {{ {} }} """ @@ -248,21 +238,20 @@ NAMESPACE_WRAPPER_TEMPLATE = \ ## Generator Classes ## ####################### class PythonCSingleFunctionGenerator(FunctionGeneratorBase): - def __init__(self, forward_api_contents, namespace): # Members from Parent: - #self.namespace - #self.forward_api_contents - #self.forward_api_name - #self.orig_forward_inputs_list - #self.orig_forward_attrs_list - #self.orig_forward_returns_list - #self.forward_inputs_position_map - #self.forward_outputs_position_map - #self.optional_inputs - #self.no_need_buffers - #self.intermediate_outputs - #self.forward_inplace_map + # self.namespace + # self.forward_api_contents + # self.forward_api_name + # self.orig_forward_inputs_list + # self.orig_forward_attrs_list + # self.orig_forward_returns_list + # self.forward_inputs_position_map + # self.forward_outputs_position_map + # self.optional_inputs + # self.no_need_buffers + # self.intermediate_outputs + # self.forward_inplace_map FunctionGeneratorBase.__init__(self, forward_api_contents, namespace) self.is_forward_only = True @@ -273,8 +262,9 @@ class PythonCSingleFunctionGenerator(FunctionGeneratorBase): def CollectIsForwardOnly(self): forward_api_contents = self.forward_api_contents - self.is_forward_only = False if 'backward' in forward_api_contents.keys( - ) else True + self.is_forward_only = ( + False if 'backward' in forward_api_contents.keys() else True + ) def GeneratePythonCFunction(self): namespace = self.namespace @@ -293,25 +283,53 @@ class PythonCSingleFunctionGenerator(FunctionGeneratorBase): for name, (ttype, pos) in forward_inputs_position_map.items(): if forward_inplace_map and name in forward_inplace_map.keys(): inplace_args_pos_map[name] = pos - is_optional = (name in optional_inputs) + is_optional = name in optional_inputs if IsVectorTensorType(ttype): if is_optional: - get_eager_tensor_str += PARSE_PYTHON_C_TENSORS_TEMPLATE.format( - name, "GetOptionalTensorListFromArgs", forward_api_name, - name, pos, "true") + get_eager_tensor_str += ( + PARSE_PYTHON_C_TENSORS_TEMPLATE.format( + name, + "GetOptionalTensorListFromArgs", + forward_api_name, + name, + pos, + "true", + ) + ) else: - get_eager_tensor_str += PARSE_PYTHON_C_TENSORS_TEMPLATE.format( - name, "GetTensorListFromArgs", forward_api_name, name, - pos, "false") + get_eager_tensor_str += ( + PARSE_PYTHON_C_TENSORS_TEMPLATE.format( + name, + "GetTensorListFromArgs", + forward_api_name, + name, + pos, + "false", + ) + ) else: if is_optional: - get_eager_tensor_str += PARSE_PYTHON_C_TENSORS_TEMPLATE.format( - name, "GetOptionalTensorFromArgs", forward_api_name, - name, pos, "true") + get_eager_tensor_str += ( + PARSE_PYTHON_C_TENSORS_TEMPLATE.format( + name, + "GetOptionalTensorFromArgs", + forward_api_name, + name, + pos, + "true", + ) + ) else: - get_eager_tensor_str += PARSE_PYTHON_C_TENSORS_TEMPLATE.format( - name, "GetTensorFromArgs", forward_api_name, name, pos, - "false") + get_eager_tensor_str += ( + PARSE_PYTHON_C_TENSORS_TEMPLATE.format( + name, + "GetTensorFromArgs", + forward_api_name, + name, + pos, + "false", + ) + ) if forward_inplace_map: for name, (ttype, pos) in forward_outputs_position_map.items(): @@ -319,26 +337,40 @@ class PythonCSingleFunctionGenerator(FunctionGeneratorBase): inplace_returns_pos_map[name] = pos parse_attributes_str = "" - expected_place_str = " auto place = egr::Controller::Instance().GetExpectedPlace();\n" + expected_place_str = ( + " auto place = egr::Controller::Instance().GetExpectedPlace();\n" + ) # Generate Python-C Attributes Parsing Logic for name, atype, _, pos in orig_forward_attrs_list: parsing_function_name = FindParsingFunctionFromAttributeType(atype) # Used input argument place if specified from Python frontend. - if len(expected_place_str - ) != 0 and parsing_function_name == "CastPyArg2Place": + if ( + len(expected_place_str) != 0 + and parsing_function_name == "CastPyArg2Place" + ): expected_place_str = "" - assert name == "place", "Only support 'place' as template argument name in FUNCTION_SET_DEVICE_TEMPLATE." + assert ( + name == "place" + ), "Only support 'place' as template argument name in FUNCTION_SET_DEVICE_TEMPLATE." parse_attributes_str += PARSE_PYTHON_C_ARGS_TEMPLATE.format( - name, pos, atype, name, parsing_function_name, name, - forward_api_name, pos) + name, + pos, + atype, + name, + parsing_function_name, + name, + forward_api_name, + pos, + ) set_device_str = FUNCTION_SET_DEVICE_TEMPLATE.format(expected_place_str) # Generate Dygraph Function Call Logic - num_args = len( - forward_inputs_position_map.keys()) + len(orig_forward_attrs_list) + num_args = len(forward_inputs_position_map.keys()) + len( + orig_forward_attrs_list + ) dygraph_function_call_list = ["" for i in range(num_args)] for name, (_, pos) in forward_inputs_position_map.items(): dygraph_function_call_list[pos] = f"{name}" @@ -348,23 +380,34 @@ class PythonCSingleFunctionGenerator(FunctionGeneratorBase): # Generate Python-C Function Definitions fwd_function_name = FUNCTION_NAME_TEMPLATE.format( - "::", namespace, GetForwardFunctionName(forward_api_name)) + "::", namespace, GetForwardFunctionName(forward_api_name) + ) return_str = " return ToPyObject(out);" # Generate Record Event for performance profiling pythonc_record_event_str = RECORD_EVENT_TEMPLATE.format( - "pythonc_record_event", forward_api_name, "pybind_imperative_func") + "pythonc_record_event", forward_api_name, "pybind_imperative_func" + ) noamp_dygraph_function_str = NOAMP_DYGRAPH_FUNCTION_TEMPLATE.format( - fwd_function_name, dygraph_function_call_str, fwd_function_name, - dygraph_function_call_str) + fwd_function_name, + dygraph_function_call_str, + fwd_function_name, + dygraph_function_call_str, + ) # Generate Python-C Function Definetion self.python_c_function_str = PYTHON_C_FUNCTION_TEMPLATE.format( - forward_api_name, pythonc_record_event_str, forward_api_name, - get_eager_tensor_str, parse_attributes_str, set_device_str, - noamp_dygraph_function_str, return_str) + forward_api_name, + pythonc_record_event_str, + forward_api_name, + get_eager_tensor_str, + parse_attributes_str, + set_device_str, + noamp_dygraph_function_str, + return_str, + ) # Set prefix of forward_api_name to avoid conflicts prefix = self.namespace.strip("::") @@ -372,37 +415,63 @@ class PythonCSingleFunctionGenerator(FunctionGeneratorBase): # Generate Python-C Function Registration self.python_c_function_reg_str = PYTHON_C_FUNCTION_REG_TEMPLATE.format( - forward_api_name_prefix, forward_api_name, namespace, - forward_api_name, forward_api_name) + forward_api_name_prefix, + forward_api_name, + namespace, + forward_api_name, + forward_api_name, + ) if forward_inplace_map: inplaced_forward_api_name = GetInplacedFunctionName( - self.forward_api_name) + self.forward_api_name + ) inplaced_fwd_function_name = FUNCTION_NAME_TEMPLATE.format( - "::", namespace, - GetForwardFunctionName(inplaced_forward_api_name)) - - inplace_noamp_dygraph_function_str = NOAMP_DYGRAPH_FUNCTION_TEMPLATE.format( - inplaced_fwd_function_name, dygraph_function_call_str, - inplaced_fwd_function_name, dygraph_function_call_str) + "::", + namespace, + GetForwardFunctionName(inplaced_forward_api_name), + ) + + inplace_noamp_dygraph_function_str = ( + NOAMP_DYGRAPH_FUNCTION_TEMPLATE.format( + inplaced_fwd_function_name, + dygraph_function_call_str, + inplaced_fwd_function_name, + dygraph_function_call_str, + ) + ) return_str = " std::map inplace_var_idx_map;" for inplace_input, inplace_output in forward_inplace_map.items(): return_str += RETURN_INPLACE_PYOBJECT_TEMPLATE.format( inplace_returns_pos_map[inplace_output], - inplace_args_pos_map[inplace_input]) - return_str += " return ToPyObject(out, args, inplace_var_idx_map);" + inplace_args_pos_map[inplace_input], + ) + return_str += ( + " return ToPyObject(out, args, inplace_var_idx_map);" + ) # Generate Python-C Function Definetion python_c_inplace_func_str = PYTHON_C_FUNCTION_TEMPLATE.format( - inplaced_forward_api_name, pythonc_record_event_str, - inplaced_forward_api_name, get_eager_tensor_str, - parse_attributes_str, set_device_str, - inplace_noamp_dygraph_function_str, return_str) - - python_c_inplace_func_reg_str = PYTHON_C_FUNCTION_REG_TEMPLATE.format( - forward_api_name_prefix, inplaced_forward_api_name, namespace, - inplaced_forward_api_name, inplaced_forward_api_name) + inplaced_forward_api_name, + pythonc_record_event_str, + inplaced_forward_api_name, + get_eager_tensor_str, + parse_attributes_str, + set_device_str, + inplace_noamp_dygraph_function_str, + return_str, + ) + + python_c_inplace_func_reg_str = ( + PYTHON_C_FUNCTION_REG_TEMPLATE.format( + forward_api_name_prefix, + inplaced_forward_api_name, + namespace, + inplaced_forward_api_name, + inplaced_forward_api_name, + ) + ) # self.forward_api_name ending with '_' means it only has inplace api if self.forward_api_name[-1] == '_': @@ -427,11 +496,13 @@ class PythonCSingleFunctionGenerator(FunctionGeneratorBase): # Initialized orig_forward_inputs_list, orig_forward_returns_list, orig_forward_attrs_list self.CollectOriginalForwardInfo() - if SkipAPIGeneration(self.forward_api_name): return False + if SkipAPIGeneration(self.forward_api_name): + return False # Initialized forward_inputs_position_map, forward_outputs_position_map - self.DetermineForwardPositionMap(self.orig_forward_inputs_list, - self.orig_forward_returns_list) + self.DetermineForwardPositionMap( + self.orig_forward_inputs_list, self.orig_forward_returns_list + ) # Code Generation self.GeneratePythonCFunction() @@ -440,7 +511,6 @@ class PythonCSingleFunctionGenerator(FunctionGeneratorBase): class PythonCGenerator(GeneratorBase): - def __init__(self, path): # Parent members: # self.namespace @@ -458,12 +528,17 @@ class PythonCGenerator(GeneratorBase): forward_api_list = self.forward_api_list for forward_api_content in forward_api_list: f_generator = PythonCSingleFunctionGenerator( - forward_api_content, namespace) + forward_api_content, namespace + ) status = f_generator.run() if status == True: - self.python_c_functions_str += f_generator.python_c_function_str + "\n" - self.python_c_functions_reg_str += f_generator.python_c_function_reg_str + self.python_c_functions_str += ( + f_generator.python_c_function_str + "\n" + ) + self.python_c_functions_reg_str += ( + f_generator.python_c_function_reg_str + ) def AttachNamespace(self): namespace = self.namespace @@ -473,7 +548,8 @@ class PythonCGenerator(GeneratorBase): if namespace.endswith("::"): namespace = namespace[:-2] self.python_c_functions_str = NAMESPACE_WRAPPER_TEMPLATE.format( - namespace, python_c_functions_str) + namespace, python_c_functions_str + ) def run(self): # Infer namespace from yaml_path @@ -494,7 +570,8 @@ class PythonCGenerator(GeneratorBase): ############################ def ParseArguments(): parser = argparse.ArgumentParser( - description='Eager Code Generator Args Parser') + description='Eager Code Generator Args Parser' + ) parser.add_argument('--api_yaml_path', type=str) parser.add_argument('--output_path', type=str) @@ -508,15 +585,18 @@ def GenerateCoreOpsInfoMap(): def GeneratePythonCWrappers(python_c_function_str, python_c_function_reg_str): - core_ops_infos_definition, core_ops_infos_registry = GenerateCoreOpsInfoMap( - ) + ( + core_ops_infos_definition, + core_ops_infos_registry, + ) = GenerateCoreOpsInfoMap() python_c_function_str += core_ops_infos_definition python_c_function_reg_str += core_ops_infos_registry python_c_function_reg_str += " {nullptr,nullptr,0,nullptr}" - python_c_str = PYTHON_C_WRAPPER_TEMPLATE.format(python_c_function_str, - python_c_function_reg_str) + python_c_str = PYTHON_C_WRAPPER_TEMPLATE.format( + python_c_function_str, python_c_function_reg_str + ) return python_c_str @@ -538,11 +618,16 @@ if __name__ == "__main__": py_c_generator = PythonCGenerator(api_yaml_path) py_c_generator.run() - generated_python_c_functions += py_c_generator.python_c_functions_str + "\n" - generated_python_c_registration += py_c_generator.python_c_functions_reg_str + generated_python_c_functions += ( + py_c_generator.python_c_functions_str + "\n" + ) + generated_python_c_registration += ( + py_c_generator.python_c_functions_reg_str + ) - python_c_str = GeneratePythonCWrappers(generated_python_c_functions, - generated_python_c_registration) + python_c_str = GeneratePythonCWrappers( + generated_python_c_functions, generated_python_c_registration + ) output_path = args.output_path for path in [output_path]: diff --git a/paddle/fluid/inference/tests/api/full_ILSVRC2012_val_preprocess.py b/paddle/fluid/inference/tests/api/full_ILSVRC2012_val_preprocess.py index 5cda10f873cd3eb24f7520782253ca789bd198d5..219ce72077cd594084d80d1c20e5c8822ddca183 100644 --- a/paddle/fluid/inference/tests/api/full_ILSVRC2012_val_preprocess.py +++ b/paddle/fluid/inference/tests/api/full_ILSVRC2012_val_preprocess.py @@ -94,8 +94,9 @@ def download_concat(cache_folder, zip_path): def print_processbar(done_percentage): done_filled = done_percentage * '=' empty_filled = (100 - done_percentage) * ' ' - sys.stdout.write("\r[%s%s]%d%%" % - (done_filled, empty_filled, done_percentage)) + sys.stdout.write( + "\r[%s%s]%d%%" % (done_filled, empty_filled, done_percentage) + ) sys.stdout.flush() @@ -137,7 +138,7 @@ def convert_Imagenet_tar2bin(tar_file, output_file): val_dict[name] = label for img_name in dataset.keys(): - remove_len = (len(FOLDER_NAME)) + remove_len = len(FOLDER_NAME) img_name_prim = img_name[remove_len:] label = val_dict[img_name_prim] label_int = (int)(label) @@ -156,19 +157,25 @@ def run_convert(): retry = 0 try_limit = 3 - while not (os.path.exists(output_file) - and os.path.getsize(output_file) == FULL_SIZE_BYTES): + while not ( + os.path.exists(output_file) + and os.path.getsize(output_file) == FULL_SIZE_BYTES + ): if os.path.exists(output_file): sys.stderr.write( - "\n\nThe existing binary file[{}] is broken. Start to generate new one...\n\n" - .format(output_file)) + "\n\nThe existing binary file[{}] is broken. Start to generate new one...\n\n".format( + output_file + ) + ) os.remove(output_file) if retry < try_limit: retry = retry + 1 else: raise RuntimeError( - "Can not convert the dataset to binary file with try limit {0}". - format(try_limit)) + "Can not convert the dataset to binary file with try limit {0}".format( + try_limit + ) + ) download_concat(cache_folder, zip_path) convert_Imagenet_tar2bin(zip_path, output_file) print("\nSuccess! The binary file can be found at {0}".format(output_file)) @@ -193,63 +200,75 @@ def convert_Imagenet_local2bin(args): if not os.path.exists(img_path): continue - #save image(float32) to file + # save image(float32) to file img = Image.open(img_path) img = process_image(img) np_img = np.array(img) - of.seek(SIZE_INT64 + - SIZE_FLOAT32 * DATA_DIM * DATA_DIM * 3 * idx) + of.seek( + SIZE_INT64 + SIZE_FLOAT32 * DATA_DIM * DATA_DIM * 3 * idx + ) of.write(np_img.astype('float32').tobytes()) - #save label(int64_t) to file + # save label(int64_t) to file label_int = (int)(label) np_label = np.array(label_int) - of.seek(SIZE_INT64 + - SIZE_FLOAT32 * DATA_DIM * DATA_DIM * 3 * num_images + - idx * SIZE_INT64) + of.seek( + SIZE_INT64 + + SIZE_FLOAT32 * DATA_DIM * DATA_DIM * 3 * num_images + + idx * SIZE_INT64 + ) of.write(np_label.astype('int64').tobytes()) # The bin file should contain # number of images + all images data + all corresponding labels # so the file target_size should be as follows - target_size = SIZE_INT64 + num_images * 3 * args.data_dim * args.data_dim * SIZE_FLOAT32 + num_images * SIZE_INT64 - if (os.path.getsize(bin_file_path) == target_size): + target_size = ( + SIZE_INT64 + + num_images * 3 * args.data_dim * args.data_dim * SIZE_FLOAT32 + + num_images * SIZE_INT64 + ) + if os.path.getsize(bin_file_path) == target_size: print( - "Success! The user data output binary file can be found at: {0}" - .format(bin_file_path)) + "Success! The user data output binary file can be found at: {0}".format( + bin_file_path + ) + ) else: print("Conversion failed!") def main_preprocess_Imagenet(args): parser = argparse.ArgumentParser( - description= - "Convert the full Imagenet val set or local data to binary file.", + description="Convert the full Imagenet val set or local data to binary file.", usage=None, - add_help=True) + add_help=True, + ) parser.add_argument( '--local', action="store_true", - help="If used, user need to set --data_dir and then convert file") - parser.add_argument("--data_dir", - default="", - type=str, - help="Dataset root directory") + help="If used, user need to set --data_dir and then convert file", + ) + parser.add_argument( + "--data_dir", default="", type=str, help="Dataset root directory" + ) parser.add_argument( "--label_list", type=str, default="val_list.txt", - help= - "List of object labels with same sequence as denoted in the annotation file" + help="List of object labels with same sequence as denoted in the annotation file", + ) + parser.add_argument( + "--output_file", + type=str, + default="imagenet_small.bin", + help="File path of the output binary file", + ) + parser.add_argument( + "--data_dim", + type=int, + default=DATA_DIM, + help="Image preprocess with data_dim width and height", ) - parser.add_argument("--output_file", - type=str, - default="imagenet_small.bin", - help="File path of the output binary file") - parser.add_argument("--data_dim", - type=int, - default=DATA_DIM, - help="Image preprocess with data_dim width and height") args = parser.parse_args() if args.local: diff --git a/paddle/fluid/inference/tests/api/full_pascalvoc_test_preprocess.py b/paddle/fluid/inference/tests/api/full_pascalvoc_test_preprocess.py index 74739530b4724668188dbaf74cf6d43c00165230..6fc2e072e00cc4a59bf3c24b467f809f96650011 100644 --- a/paddle/fluid/inference/tests/api/full_pascalvoc_test_preprocess.py +++ b/paddle/fluid/inference/tests/api/full_pascalvoc_test_preprocess.py @@ -24,7 +24,9 @@ import hashlib import tarfile import argparse -DATA_URL = "http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar" +DATA_URL = ( + "http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar" +) DATA_DIR = os.path.expanduser("~/.cache/paddle/dataset/pascalvoc/") TAR_FILE = "VOCtest_06-Nov-2007.tar" TAR_PATH = os.path.join(DATA_DIR, TAR_FILE) @@ -106,8 +108,9 @@ def convert_pascalvoc_local2bin(args): for object in objects: bbox_sample = [] # start from 1 - bbox_sample.append(float(label_list.index( - object.find('name').text))) + bbox_sample.append( + float(label_list.index(object.find('name').text)) + ) bbox = object.find('bndbox') difficult = float(object.find('difficult').text) bbox_sample.append(float(bbox.find('xmin').text) / im_width) @@ -118,7 +121,8 @@ def convert_pascalvoc_local2bin(args): bbox_labels.append(bbox_sample) bbox_labels = np.array(bbox_labels) - if len(bbox_labels) == 0: continue + if len(bbox_labels) == 0: + continue lbls.extend(bbox_labels[:, 0]) boxes.extend(bbox_labels[:, 1:5]) @@ -135,11 +139,17 @@ def convert_pascalvoc_local2bin(args): # number of images + all images data + an array that represent object numbers of each image # + labels of all objects in images + bboxes of all objects + difficulties of all objects # so the target size should be as follows: - target_size = SIZE_INT64 + image_nums * 3 * args.resize_h * args.resize_h * SIZE_FLOAT32 + image_nums * SIZE_INT64 + object_nums_sum * ( - SIZE_INT64 + 4 * SIZE_FLOAT32 + SIZE_INT64) - if (os.path.getsize(output_file_path) == target_size): - print("Success! \nThe local data output binary file can be found at: ", - output_file_path) + target_size = ( + SIZE_INT64 + + image_nums * 3 * args.resize_h * args.resize_h * SIZE_FLOAT32 + + image_nums * SIZE_INT64 + + object_nums_sum * (SIZE_INT64 + 4 * SIZE_FLOAT32 + SIZE_INT64) + ) + if os.path.getsize(output_file_path) == target_size: + print( + "Success! \nThe local data output binary file can be found at: ", + output_file_path, + ) else: print("Conversion failed!") @@ -147,8 +157,9 @@ def convert_pascalvoc_local2bin(args): def print_processbar(done_percentage): done_filled = done_percentage * '=' empty_filled = (100 - done_percentage) * ' ' - sys.stdout.write("\r[%s%s]%d%%" % - (done_filled, empty_filled, done_percentage)) + sys.stdout.write( + "\r[%s%s]%d%%" % (done_filled, empty_filled, done_percentage) + ) sys.stdout.flush() @@ -163,13 +174,30 @@ def convert_pascalvoc_tar2bin(tar_path, data_out_path): # map label to number (index) label_list = [ - "background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", - "car", "cat", "chair", "cow", "diningtable", "dog", "horse", - "motorbike", "person", "pottedplant", "sheep", "sofa", "train", - "tvmonitor" + "background", + "aeroplane", + "bicycle", + "bird", + "boat", + "bottle", + "bus", + "car", + "cat", + "chair", + "cow", + "diningtable", + "dog", + "horse", + "motorbike", + "person", + "pottedplant", + "sheep", + "sofa", + "train", + "tvmonitor", ] print_processbar(0) - #read from tar file and write to bin + # read from tar file and write to bin tar = tarfile.open(tar_path, "r") f_test = tar.extractfile(TEST_LIST_KEY).read() lines = f_test.split('\n') @@ -210,8 +238,9 @@ def convert_pascalvoc_tar2bin(tar_path, data_out_path): for object in objects: bbox_sample = [] - bbox_sample.append(float(label_list.index( - object.find('name').text))) + bbox_sample.append( + float(label_list.index(object.find('name').text)) + ) bbox = object.find('bndbox') difficult = float(object.find('difficult').text) bbox_sample.append(float(bbox.find('xmin').text) / im_width) @@ -222,7 +251,8 @@ def convert_pascalvoc_tar2bin(tar_path, data_out_path): bbox_labels.append(bbox_sample) bbox_labels = np.array(bbox_labels) - if len(bbox_labels) == 0: continue + if len(bbox_labels) == 0: + continue lbls.extend(bbox_labels[:, 0]) boxes.extend(bbox_labels[:, 1:5]) difficults.extend(bbox_labels[:, -1]) @@ -258,12 +288,16 @@ def download_pascalvoc(data_url, data_dir, tar_targethash, tar_path): def run_convert(): try_limit = 2 retry = 0 - while not (os.path.exists(DATA_OUT_PATH) and os.path.getsize(DATA_OUT_PATH) - == BIN_FULLSIZE and BIN_TARGETHASH == hashlib.md5( - open(DATA_OUT_PATH, 'rb').read()).hexdigest()): + while not ( + os.path.exists(DATA_OUT_PATH) + and os.path.getsize(DATA_OUT_PATH) == BIN_FULLSIZE + and BIN_TARGETHASH + == hashlib.md5(open(DATA_OUT_PATH, 'rb').read()).hexdigest() + ): if os.path.exists(DATA_OUT_PATH): sys.stderr.write( - "The existing binary file is broken. It is being removed...\n") + "The existing binary file is broken. It is being removed...\n" + ) os.remove(DATA_OUT_PATH) if retry < try_limit: retry = retry + 1 @@ -275,52 +309,60 @@ def run_convert(): def main_pascalvoc_preprocess(args): parser = argparse.ArgumentParser( - description= - "Convert the full pascalvoc val set or local data to binary file.", + description="Convert the full pascalvoc val set or local data to binary file.", usage=None, - add_help=True) + add_help=True, + ) parser.add_argument( '--local', action="store_true", - help="If used, user need to set --data_dir and then convert file") - parser.add_argument("--data_dir", - default="", - type=str, - help="Dataset root directory") + help="If used, user need to set --data_dir and then convert file", + ) + parser.add_argument( + "--data_dir", default="", type=str, help="Dataset root directory" + ) parser.add_argument( "--img_annotation_list", type=str, default="test_100.txt", - help= - "A file containing the image file path and corresponding annotation file path" + help="A file containing the image file path and corresponding annotation file path", ) parser.add_argument( "--label_file", type=str, default="label_list", - help= - "List of object labels with same sequence as denoted in the annotation file" + help="List of object labels with same sequence as denoted in the annotation file", + ) + parser.add_argument( + "--output_file", + type=str, + default="pascalvoc_small.bin", + help="File path of the output binary file", + ) + parser.add_argument( + "--resize_h", + type=int, + default=RESIZE_H, + help="Image preprocess with resize_h", + ) + parser.add_argument( + "--resize_w", + type=int, + default=RESIZE_W, + help="Image prerocess with resize_w", + ) + parser.add_argument( + "--mean_value", + type=str, + default=MEAN_VALUE, + help="Image preprocess with mean_value", + ) + parser.add_argument( + "--ap_version", + type=str, + default=AP_VERSION, + help="Image preprocess with ap_version", ) - parser.add_argument("--output_file", - type=str, - default="pascalvoc_small.bin", - help="File path of the output binary file") - parser.add_argument("--resize_h", - type=int, - default=RESIZE_H, - help="Image preprocess with resize_h") - parser.add_argument("--resize_w", - type=int, - default=RESIZE_W, - help="Image prerocess with resize_w") - parser.add_argument("--mean_value", - type=str, - default=MEAN_VALUE, - help="Image preprocess with mean_value") - parser.add_argument("--ap_version", - type=str, - default=AP_VERSION, - help="Image preprocess with ap_version") args = parser.parse_args() if args.local: convert_pascalvoc_local2bin(args) diff --git a/paddle/fluid/inference/tests/api/test_detection_dataset_preprocess.py b/paddle/fluid/inference/tests/api/test_detection_dataset_preprocess.py index 57920a8b27065a66a2399d741298f1570cefa0a4..4ae32b0f77a84b998b773d0881c1088ae115852f 100644 --- a/paddle/fluid/inference/tests/api/test_detection_dataset_preprocess.py +++ b/paddle/fluid/inference/tests/api/test_detection_dataset_preprocess.py @@ -17,13 +17,13 @@ import os class Test_Preprocess(unittest.TestCase): - def test_local_convert(self): os.system("python full_pascalvoc_test_preprocess.py --choice=local") def test_online_convert(self): os.system( - "python full_pascalvoc_test_preprocess.py --choice=VOC_test_2007") + "python full_pascalvoc_test_preprocess.py --choice=VOC_test_2007" + ) if __name__ == '__main__': diff --git a/paddle/fluid/pybind/generate_file_structures.py b/paddle/fluid/pybind/generate_file_structures.py index 869b27050a08c37fd0491615e78f581aea0dcbd1..4904f97dc2fdd989e5eed4fd71740856a5b0ee3e 100644 --- a/paddle/fluid/pybind/generate_file_structures.py +++ b/paddle/fluid/pybind/generate_file_structures.py @@ -25,7 +25,8 @@ if __name__ == "__main__": for i in range(split_count): empty_files.append( - os.path.join(pybind_dir, "op_function" + str(i + 1) + ".cc")) + os.path.join(pybind_dir, "op_function" + str(i + 1) + ".cc") + ) for path in empty_files: if not os.path.exists(path): diff --git a/paddle/infrt/tests/models/abs_model.py b/paddle/infrt/tests/models/abs_model.py index c94338be7788bf4c58b042b9275579cca01d0f48..56e1bc8fa710f4f4d672d65985fa6e3d7413092e 100644 --- a/paddle/infrt/tests/models/abs_model.py +++ b/paddle/infrt/tests/models/abs_model.py @@ -19,7 +19,6 @@ import sys class AbsNet(paddle.nn.Layer): - def __init__(self): super(AbsNet, self).__init__() @@ -32,6 +31,7 @@ if __name__ == '__main__': # build network model = AbsNet() # save inferencing format model - net = to_static(model, - input_spec=[InputSpec(shape=[None, 1, 28, 28], name='x')]) + net = to_static( + model, input_spec=[InputSpec(shape=[None, 1, 28, 28], name='x')] + ) paddle.jit.save(net, sys.argv[1]) diff --git a/paddle/infrt/tests/models/efficientnet-b4/model.py b/paddle/infrt/tests/models/efficientnet-b4/model.py index 0c6163f3df272b9895ef5834b6aa0f57fd41fdc4..1b493897e654a21015070fb10e918f081367e1da 100644 --- a/paddle/infrt/tests/models/efficientnet-b4/model.py +++ b/paddle/infrt/tests/models/efficientnet-b4/model.py @@ -20,6 +20,7 @@ import paddle import sys model = EfficientNet.from_name('efficientnet-b4') -net = to_static(model, - input_spec=[InputSpec(shape=[None, 3, 256, 256], name='x')]) +net = to_static( + model, input_spec=[InputSpec(shape=[None, 3, 256, 256], name='x')] +) paddle.jit.save(net, sys.argv[1]) diff --git a/paddle/infrt/tests/models/efficientnet-b4/net/efficientnet.py b/paddle/infrt/tests/models/efficientnet-b4/net/efficientnet.py index 75f6780484dd9f20ea7416f679e7b641afc69524..c14be25a81590f092227d1a5cb3d59341aea5f58 100644 --- a/paddle/infrt/tests/models/efficientnet-b4/net/efficientnet.py +++ b/paddle/infrt/tests/models/efficientnet-b4/net/efficientnet.py @@ -16,9 +16,15 @@ import paddle import paddle.nn as nn import paddle.nn.functional as F -from .utils import (round_filters, round_repeats, drop_connect, - get_same_padding_conv2d, get_model_params, - efficientnet_params, load_pretrained_weights) +from .utils import ( + round_filters, + round_repeats, + drop_connect, + get_same_padding_conv2d, + get_model_params, + efficientnet_params, + load_pretrained_weights, +) class MBConvBlock(nn.Layer): @@ -38,8 +44,9 @@ class MBConvBlock(nn.Layer): self._block_args = block_args self._bn_mom = global_params.batch_norm_momentum self._bn_eps = global_params.batch_norm_epsilon - self.has_se = (self._block_args.se_ratio - is not None) and (0 < self._block_args.se_ratio <= 1) + self.has_se = (self._block_args.se_ratio is not None) and ( + 0 < self._block_args.se_ratio <= 1 + ) self.id_skip = block_args.id_skip # skip connection and drop connect # Get static or dynamic convolution depending on image size @@ -47,15 +54,19 @@ class MBConvBlock(nn.Layer): # Expansion phase inp = self._block_args.input_filters # number of input channels - oup = self._block_args.input_filters * self._block_args.expand_ratio # number of output channels + oup = ( + self._block_args.input_filters * self._block_args.expand_ratio + ) # number of output channels if self._block_args.expand_ratio != 1: - self._expand_conv = Conv2d(in_channels=inp, - out_channels=oup, - kernel_size=1, - bias_attr=False) - self._bn0 = nn.BatchNorm2D(num_features=oup, - momentum=self._bn_mom, - epsilon=self._bn_eps) + self._expand_conv = Conv2d( + in_channels=inp, + out_channels=oup, + kernel_size=1, + bias_attr=False, + ) + self._bn0 = nn.BatchNorm2D( + num_features=oup, momentum=self._bn_mom, epsilon=self._bn_eps + ) # Depthwise convolution phase k = self._block_args.kernel_size @@ -66,32 +77,40 @@ class MBConvBlock(nn.Layer): groups=oup, # groups makes it depthwise kernel_size=k, stride=s, - bias_attr=False) - self._bn1 = nn.BatchNorm2D(num_features=oup, - momentum=self._bn_mom, - epsilon=self._bn_eps) + bias_attr=False, + ) + self._bn1 = nn.BatchNorm2D( + num_features=oup, momentum=self._bn_mom, epsilon=self._bn_eps + ) # Squeeze and Excitation layer, if desired if self.has_se: num_squeezed_channels = max( 1, - int(self._block_args.input_filters * self._block_args.se_ratio)) - self._se_reduce = Conv2d(in_channels=oup, - out_channels=num_squeezed_channels, - kernel_size=1) - self._se_expand = Conv2d(in_channels=num_squeezed_channels, - out_channels=oup, - kernel_size=1) + int(self._block_args.input_filters * self._block_args.se_ratio), + ) + self._se_reduce = Conv2d( + in_channels=oup, + out_channels=num_squeezed_channels, + kernel_size=1, + ) + self._se_expand = Conv2d( + in_channels=num_squeezed_channels, + out_channels=oup, + kernel_size=1, + ) # Output phase final_oup = self._block_args.output_filters - self._project_conv = Conv2d(in_channels=oup, - out_channels=final_oup, - kernel_size=1, - bias_attr=False) - self._bn2 = nn.BatchNorm2D(num_features=final_oup, - momentum=self._bn_mom, - epsilon=self._bn_eps) + self._project_conv = Conv2d( + in_channels=oup, + out_channels=final_oup, + kernel_size=1, + bias_attr=False, + ) + self._bn2 = nn.BatchNorm2D( + num_features=final_oup, momentum=self._bn_mom, epsilon=self._bn_eps + ) self._swish = nn.Hardswish() def forward(self, inputs, drop_connect_rate=None): @@ -111,18 +130,26 @@ class MBConvBlock(nn.Layer): if self.has_se: x_squeezed = F.adaptive_avg_pool2d(x, 1) x_squeezed = self._se_expand( - self._swish(self._se_reduce(x_squeezed))) + self._swish(self._se_reduce(x_squeezed)) + ) x = F.sigmoid(x_squeezed) * x x = self._bn2(self._project_conv(x)) # Skip connection and drop connect - input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters - if self.id_skip and self._block_args.stride == 1 and input_filters == output_filters: + input_filters, output_filters = ( + self._block_args.input_filters, + self._block_args.output_filters, + ) + if ( + self.id_skip + and self._block_args.stride == 1 + and input_filters == output_filters + ): if drop_connect_rate: - x = drop_connect(x, - prob=drop_connect_rate, - training=self.training) + x = drop_connect( + x, prob=drop_connect_rate, training=self.training + ) x = x + inputs # skip connection return x @@ -161,15 +188,14 @@ class EfficientNet(nn.Layer): # Stem in_channels = 3 # rgb out_channels = round_filters( - 32, self._global_params) # number of output channels - self._conv_stem = Conv2d(in_channels, - out_channels, - kernel_size=3, - stride=2, - bias_attr=False) - self._bn0 = nn.BatchNorm2D(num_features=out_channels, - momentum=bn_mom, - epsilon=bn_eps) + 32, self._global_params + ) # number of output channels + self._conv_stem = Conv2d( + in_channels, out_channels, kernel_size=3, stride=2, bias_attr=False + ) + self._bn0 = nn.BatchNorm2D( + num_features=out_channels, momentum=bn_mom, epsilon=bn_eps + ) # Build blocks self._blocks = nn.LayerList([]) @@ -177,32 +203,37 @@ class EfficientNet(nn.Layer): # Update block input and output filters based on depth multiplier. block_args = block_args._replace( - input_filters=round_filters(block_args.input_filters, - self._global_params), - output_filters=round_filters(block_args.output_filters, - self._global_params), - num_repeat=round_repeats(block_args.num_repeat, - self._global_params)) + input_filters=round_filters( + block_args.input_filters, self._global_params + ), + output_filters=round_filters( + block_args.output_filters, self._global_params + ), + num_repeat=round_repeats( + block_args.num_repeat, self._global_params + ), + ) # The first block needs to take care of stride and filter size increase. self._blocks.append(MBConvBlock(block_args, self._global_params)) if block_args.num_repeat > 1: block_args = block_args._replace( - input_filters=block_args.output_filters, stride=1) + input_filters=block_args.output_filters, stride=1 + ) for _ in range(block_args.num_repeat - 1): - self._blocks.append(MBConvBlock(block_args, - self._global_params)) + self._blocks.append( + MBConvBlock(block_args, self._global_params) + ) # Head in_channels = block_args.output_filters # output of final block out_channels = round_filters(1280, self._global_params) - self._conv_head = Conv2d(in_channels, - out_channels, - kernel_size=1, - bias_attr=False) - self._bn1 = nn.BatchNorm2D(num_features=out_channels, - momentum=bn_mom, - epsilon=bn_eps) + self._conv_head = Conv2d( + in_channels, out_channels, kernel_size=1, bias_attr=False + ) + self._bn1 = nn.BatchNorm2D( + num_features=out_channels, momentum=bn_mom, epsilon=bn_eps + ) # Final linear layer self._avg_pooling = nn.AdaptiveAvgPool2D(1) @@ -217,7 +248,7 @@ class EfficientNet(nn.Layer): block.set_swish(memory_efficient) def extract_features(self, inputs): - """ Returns output of the final convolution layer """ + """Returns output of the final convolution layer""" # Stem x = self._swish(self._bn0(self._conv_stem(inputs))) @@ -235,7 +266,7 @@ class EfficientNet(nn.Layer): return x def forward(self, inputs): - """ Calls extract_features to extract features, applies final linear layer, and returns logits. """ + """Calls extract_features to extract features, applies final linear layer, and returns logits.""" bs = inputs.shape[0] # Convolution layers x = self.extract_features(inputs) @@ -250,31 +281,33 @@ class EfficientNet(nn.Layer): @classmethod def from_name(cls, model_name, override_params=None): cls._check_model_name_is_valid(model_name) - blocks_args, global_params = get_model_params(model_name, - override_params) + blocks_args, global_params = get_model_params( + model_name, override_params + ) return cls(blocks_args, global_params) @classmethod - def from_pretrained(cls, - model_name, - advprop=False, - num_classes=1000, - in_channels=3): - model = cls.from_name(model_name, - override_params={'num_classes': num_classes}) - load_pretrained_weights(model, - model_name, - load_fc=(num_classes == 1000), - advprop=advprop) + def from_pretrained( + cls, model_name, advprop=False, num_classes=1000, in_channels=3 + ): + model = cls.from_name( + model_name, override_params={'num_classes': num_classes} + ) + load_pretrained_weights( + model, model_name, load_fc=(num_classes == 1000), advprop=advprop + ) if in_channels != 3: Conv2d = get_same_padding_conv2d( - image_size=model._global_params.image_size) + image_size=model._global_params.image_size + ) out_channels = round_filters(32, model._global_params) - model._conv_stem = Conv2d(in_channels, - out_channels, - kernel_size=3, - stride=2, - bias_attr=False) + model._conv_stem = Conv2d( + in_channels, + out_channels, + kernel_size=3, + stride=2, + bias_attr=False, + ) return model @classmethod @@ -285,8 +318,9 @@ class EfficientNet(nn.Layer): @classmethod def _check_model_name_is_valid(cls, model_name): - """ Validates model name. """ + """Validates model name.""" valid_models = ['efficientnet-b' + str(i) for i in range(9)] if model_name not in valid_models: - raise ValueError('model_name should be one of: ' + - ', '.join(valid_models)) + raise ValueError( + 'model_name should be one of: ' + ', '.join(valid_models) + ) diff --git a/paddle/infrt/tests/models/efficientnet-b4/net/utils.py b/paddle/infrt/tests/models/efficientnet-b4/net/utils.py index 62caf759ad3caae1b1acb9704a76e41392f7e43a..9a060306995bba614548d29d18e72eca6d42b834 100644 --- a/paddle/infrt/tests/models/efficientnet-b4/net/utils.py +++ b/paddle/infrt/tests/models/efficientnet-b4/net/utils.py @@ -22,25 +22,44 @@ import paddle.nn as nn import paddle.nn.functional as F # Parameters for the entire model (stem, all blocks, and head) -GlobalParams = collections.namedtuple('GlobalParams', [ - 'batch_norm_momentum', 'batch_norm_epsilon', 'dropout_rate', 'num_classes', - 'width_coefficient', 'depth_coefficient', 'depth_divisor', 'min_depth', - 'drop_connect_rate', 'image_size' -]) +GlobalParams = collections.namedtuple( + 'GlobalParams', + [ + 'batch_norm_momentum', + 'batch_norm_epsilon', + 'dropout_rate', + 'num_classes', + 'width_coefficient', + 'depth_coefficient', + 'depth_divisor', + 'min_depth', + 'drop_connect_rate', + 'image_size', + ], +) # Parameters for an individual model block -BlockArgs = collections.namedtuple('BlockArgs', [ - 'kernel_size', 'num_repeat', 'input_filters', 'output_filters', - 'expand_ratio', 'id_skip', 'stride', 'se_ratio' -]) +BlockArgs = collections.namedtuple( + 'BlockArgs', + [ + 'kernel_size', + 'num_repeat', + 'input_filters', + 'output_filters', + 'expand_ratio', + 'id_skip', + 'stride', + 'se_ratio', + ], +) # Change namedtuple defaults -GlobalParams.__new__.__defaults__ = (None, ) * len(GlobalParams._fields) -BlockArgs.__new__.__defaults__ = (None, ) * len(BlockArgs._fields) +GlobalParams.__new__.__defaults__ = (None,) * len(GlobalParams._fields) +BlockArgs.__new__.__defaults__ = (None,) * len(BlockArgs._fields) def round_filters(filters, global_params): - """ Calculate and round number of filters based on depth multiplier. """ + """Calculate and round number of filters based on depth multiplier.""" multiplier = global_params.width_coefficient if not multiplier: return filters @@ -48,15 +67,16 @@ def round_filters(filters, global_params): min_depth = global_params.min_depth filters *= multiplier min_depth = min_depth or divisor - new_filters = max(min_depth, - int(filters + divisor / 2) // divisor * divisor) + new_filters = max( + min_depth, int(filters + divisor / 2) // divisor * divisor + ) if new_filters < 0.9 * filters: # prevent rounding by more than 10% new_filters += divisor return int(new_filters) def round_repeats(repeats, global_params): - """ Round number of filters based on depth multiplier. """ + """Round number of filters based on depth multiplier.""" multiplier = global_params.depth_coefficient if not multiplier: return repeats @@ -76,8 +96,8 @@ def drop_connect(inputs, prob, training): def get_same_padding_conv2d(image_size=None): - """ Chooses static padding if you have specified an image size, and dynamic padding otherwise. - Static padding is necessary for ONNX exporting of models. """ + """Chooses static padding if you have specified an image size, and dynamic padding otherwise. + Static padding is necessary for ONNX exporting of models.""" if image_size is None: return Conv2dDynamicSamePadding else: @@ -85,26 +105,31 @@ def get_same_padding_conv2d(image_size=None): class Conv2dDynamicSamePadding(nn.Conv2D): - """ 2D Convolutions like TensorFlow, for a dynamic image size """ - - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride=1, - dilation=1, - groups=1, - bias_attr=None): - super().__init__(in_channels, - out_channels, - kernel_size, - stride, - 0, - dilation, - groups, - bias_attr=bias_attr) - self.stride = self._stride if len( - self._stride) == 2 else [self._stride[0]] * 2 + """2D Convolutions like TensorFlow, for a dynamic image size""" + + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + dilation=1, + groups=1, + bias_attr=None, + ): + super().__init__( + in_channels, + out_channels, + kernel_size, + stride, + 0, + dilation, + groups, + bias_attr=bias_attr, + ) + self.stride = ( + self._stride if len(self._stride) == 2 else [self._stride[0]] * 2 + ) def forward(self, x): ih, iw = x.shape[-2:] @@ -112,65 +137,84 @@ class Conv2dDynamicSamePadding(nn.Conv2D): sh, sw = self.stride oh, ow = math.ceil(ih / sh), math.ceil(iw / sw) pad_h = max( - (oh - 1) * self.stride[0] + (kh - 1) * self._dilation[0] + 1 - ih, - 0) + (oh - 1) * self.stride[0] + (kh - 1) * self._dilation[0] + 1 - ih, 0 + ) pad_w = max( - (ow - 1) * self.stride[1] + (kw - 1) * self._dilation[1] + 1 - iw, - 0) + (ow - 1) * self.stride[1] + (kw - 1) * self._dilation[1] + 1 - iw, 0 + ) if pad_h > 0 or pad_w > 0: - x = F.pad(x, [ - pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2 - ]) - return F.conv2d(x, self.weight, self.bias, self.stride, self._padding, - self._dilation, self._groups) + x = F.pad( + x, + [ + pad_w // 2, + pad_w - pad_w // 2, + pad_h // 2, + pad_h - pad_h // 2, + ], + ) + return F.conv2d( + x, + self.weight, + self.bias, + self.stride, + self._padding, + self._dilation, + self._groups, + ) class Conv2dStaticSamePadding(nn.Conv2D): - """ 2D Convolutions like TensorFlow, for a fixed image size""" - - def __init__(self, - in_channels, - out_channels, - kernel_size, - image_size=None, - **kwargs): + """2D Convolutions like TensorFlow, for a fixed image size""" + + def __init__( + self, in_channels, out_channels, kernel_size, image_size=None, **kwargs + ): if 'stride' in kwargs and isinstance(kwargs['stride'], list): kwargs['stride'] = kwargs['stride'][0] super().__init__(in_channels, out_channels, kernel_size, **kwargs) - self.stride = self._stride if len( - self._stride) == 2 else [self._stride[0]] * 2 + self.stride = ( + self._stride if len(self._stride) == 2 else [self._stride[0]] * 2 + ) # Calculate padding based on image size and save it assert image_size is not None - ih, iw = image_size if type(image_size) == list else [ - image_size, image_size - ] + ih, iw = ( + image_size if type(image_size) == list else [image_size, image_size] + ) kh, kw = self.weight.shape[-2:] sh, sw = self.stride oh, ow = math.ceil(ih / sh), math.ceil(iw / sw) pad_h = max( - (oh - 1) * self.stride[0] + (kh - 1) * self._dilation[0] + 1 - ih, - 0) + (oh - 1) * self.stride[0] + (kh - 1) * self._dilation[0] + 1 - ih, 0 + ) pad_w = max( - (ow - 1) * self.stride[1] + (kw - 1) * self._dilation[1] + 1 - iw, - 0) + (ow - 1) * self.stride[1] + (kw - 1) * self._dilation[1] + 1 - iw, 0 + ) if pad_h > 0 or pad_w > 0: - self.static_padding = nn.Pad2D([ - pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2 - ]) + self.static_padding = nn.Pad2D( + [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2] + ) else: self.static_padding = Identity() def forward(self, x): x = self.static_padding(x) - x = F.conv2d(x, self.weight, self.bias, self.stride, self._padding, - self._dilation, self._groups) + x = F.conv2d( + x, + self.weight, + self.bias, + self.stride, + self._padding, + self._dilation, + self._groups, + ) return x class Identity(nn.Layer): - - def __init__(self, ): + def __init__( + self, + ): super().__init__() def forward(self, x): @@ -178,7 +222,7 @@ class Identity(nn.Layer): def efficientnet_params(model_name): - """ Map EfficientNet model name to parameter coefficients. """ + """Map EfficientNet model name to parameter coefficients.""" params_dict = { # Coefficients: width,depth,resolution,dropout 'efficientnet-b0': (1.0, 1.0, 224, 0.2), @@ -196,11 +240,11 @@ def efficientnet_params(model_name): class BlockDecoder(object): - """ Block Decoder for readability, straight from the official TensorFlow repository """ + """Block Decoder for readability, straight from the official TensorFlow repository""" @staticmethod def _decode_block_string(block_string): - """ Gets a block through a string notation of arguments. """ + """Gets a block through a string notation of arguments.""" assert isinstance(block_string, str) ops = block_string.split('_') @@ -212,8 +256,9 @@ class BlockDecoder(object): options[key] = value # Check stride - assert (('s' in options and len(options['s']) == 1) or - (len(options['s']) == 2 and options['s'][0] == options['s'][1])) + assert ('s' in options and len(options['s']) == 1) or ( + len(options['s']) == 2 and options['s'][0] == options['s'][1] + ) return BlockArgs( kernel_size=int(options['k']), @@ -223,7 +268,8 @@ class BlockDecoder(object): expand_ratio=int(options['e']), id_skip=('noskip' not in block_string), se_ratio=float(options['se']) if 'se' in options else None, - stride=[int(options['s'][0])]) + stride=[int(options['s'][0])], + ) @staticmethod def _encode_block_string(block): @@ -234,7 +280,7 @@ class BlockDecoder(object): 's%d%d' % (block.strides[0], block.strides[1]), 'e%s' % block.expand_ratio, 'i%d' % block.input_filters, - 'o%d' % block.output_filters + 'o%d' % block.output_filters, ] if 0 < block.se_ratio <= 1: args.append('se%s' % block.se_ratio) @@ -270,13 +316,15 @@ class BlockDecoder(object): return block_strings -def efficientnet(width_coefficient=None, - depth_coefficient=None, - dropout_rate=0.2, - drop_connect_rate=0.2, - image_size=None, - num_classes=1000): - """ Get block arguments according to parameter and coefficients. """ +def efficientnet( + width_coefficient=None, + depth_coefficient=None, + dropout_rate=0.2, + drop_connect_rate=0.2, + image_size=None, + num_classes=1000, +): + """Get block arguments according to parameter and coefficients.""" blocks_args = [ 'r1_k3_s11_e1_i32_o16_se0.25', 'r2_k3_s22_e6_i16_o24_se0.25', @@ -305,67 +353,51 @@ def efficientnet(width_coefficient=None, def get_model_params(model_name, override_params): - """ Get the block args and global params for a given model """ + """Get the block args and global params for a given model""" if model_name.startswith('efficientnet'): w, d, s, p = efficientnet_params(model_name) - blocks_args, global_params = efficientnet(width_coefficient=w, - depth_coefficient=d, - dropout_rate=p, - image_size=s) + blocks_args, global_params = efficientnet( + width_coefficient=w, + depth_coefficient=d, + dropout_rate=p, + image_size=s, + ) else: - raise NotImplementedError('model name is not pre-defined: %s' % - model_name) + raise NotImplementedError( + 'model name is not pre-defined: %s' % model_name + ) if override_params: global_params = global_params._replace(**override_params) return blocks_args, global_params url_map = { - 'efficientnet-b0': - '/home/aistudio/data/weights/efficientnet-b0-355c32eb.pdparams', - 'efficientnet-b1': - '/home/aistudio/data/weights/efficientnet-b1-f1951068.pdparams', - 'efficientnet-b2': - '/home/aistudio/data/weights/efficientnet-b2-8bb594d6.pdparams', - 'efficientnet-b3': - '/home/aistudio/data/weights/efficientnet-b3-5fb5a3c3.pdparams', - 'efficientnet-b4': - '/home/aistudio/data/weights/efficientnet-b4-6ed6700e.pdparams', - 'efficientnet-b5': - '/home/aistudio/data/weights/efficientnet-b5-b6417697.pdparams', - 'efficientnet-b6': - '/home/aistudio/data/weights/efficientnet-b6-c76e70fd.pdparams', - 'efficientnet-b7': - '/home/aistudio/data/weights/efficientnet-b7-dcc49843.pdparams', + 'efficientnet-b0': '/home/aistudio/data/weights/efficientnet-b0-355c32eb.pdparams', + 'efficientnet-b1': '/home/aistudio/data/weights/efficientnet-b1-f1951068.pdparams', + 'efficientnet-b2': '/home/aistudio/data/weights/efficientnet-b2-8bb594d6.pdparams', + 'efficientnet-b3': '/home/aistudio/data/weights/efficientnet-b3-5fb5a3c3.pdparams', + 'efficientnet-b4': '/home/aistudio/data/weights/efficientnet-b4-6ed6700e.pdparams', + 'efficientnet-b5': '/home/aistudio/data/weights/efficientnet-b5-b6417697.pdparams', + 'efficientnet-b6': '/home/aistudio/data/weights/efficientnet-b6-c76e70fd.pdparams', + 'efficientnet-b7': '/home/aistudio/data/weights/efficientnet-b7-dcc49843.pdparams', } url_map_advprop = { - 'efficientnet-b0': - '/home/aistudio/data/weights/adv-efficientnet-b0-b64d5a18.pdparams', - 'efficientnet-b1': - '/home/aistudio/data/weights/adv-efficientnet-b1-0f3ce85a.pdparams', - 'efficientnet-b2': - '/home/aistudio/data/weights/adv-efficientnet-b2-6e9d97e5.pdparams', - 'efficientnet-b3': - '/home/aistudio/data/weights/adv-efficientnet-b3-cdd7c0f4.pdparams', - 'efficientnet-b4': - '/home/aistudio/data/weights/adv-efficientnet-b4-44fb3a87.pdparams', - 'efficientnet-b5': - '/home/aistudio/data/weights/adv-efficientnet-b5-86493f6b.pdparams', - 'efficientnet-b6': - '/home/aistudio/data/weights/adv-efficientnet-b6-ac80338e.pdparams', - 'efficientnet-b7': - '/home/aistudio/data/weights/adv-efficientnet-b7-4652b6dd.pdparams', - 'efficientnet-b8': - '/home/aistudio/data/weights/adv-efficientnet-b8-22a8fe65.pdparams', + 'efficientnet-b0': '/home/aistudio/data/weights/adv-efficientnet-b0-b64d5a18.pdparams', + 'efficientnet-b1': '/home/aistudio/data/weights/adv-efficientnet-b1-0f3ce85a.pdparams', + 'efficientnet-b2': '/home/aistudio/data/weights/adv-efficientnet-b2-6e9d97e5.pdparams', + 'efficientnet-b3': '/home/aistudio/data/weights/adv-efficientnet-b3-cdd7c0f4.pdparams', + 'efficientnet-b4': '/home/aistudio/data/weights/adv-efficientnet-b4-44fb3a87.pdparams', + 'efficientnet-b5': '/home/aistudio/data/weights/adv-efficientnet-b5-86493f6b.pdparams', + 'efficientnet-b6': '/home/aistudio/data/weights/adv-efficientnet-b6-ac80338e.pdparams', + 'efficientnet-b7': '/home/aistudio/data/weights/adv-efficientnet-b7-4652b6dd.pdparams', + 'efficientnet-b8': '/home/aistudio/data/weights/adv-efficientnet-b8-22a8fe65.pdparams', } -def load_pretrained_weights(model, - model_name, - weights_path=None, - load_fc=True, - advprop=False): +def load_pretrained_weights( + model, model_name, weights_path=None, load_fc=True, advprop=False +): """Loads pretrained weights from weights path or download using url. Args: model (Module): The whole model of efficientnet. diff --git a/paddle/infrt/tests/models/linear.py b/paddle/infrt/tests/models/linear.py index 1a6c6f095c704b2ff37bbb78b4f444a720a2dfa7..71dc4e0d1c9015d7b8b2c958f31eb3c9423b6ec4 100644 --- a/paddle/infrt/tests/models/linear.py +++ b/paddle/infrt/tests/models/linear.py @@ -28,13 +28,12 @@ CLASS_NUM = 10 # define a random dataset class RandomDataset(paddle.io.Dataset): - def __init__(self, num_samples): self.num_samples = num_samples def __getitem__(self, idx): image = np.random.random([IMAGE_SIZE]).astype('float32') - label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64') + label = np.random.randint(0, CLASS_NUM - 1, (1,)).astype('int64') return image, label def __len__(self): @@ -42,7 +41,6 @@ class RandomDataset(paddle.io.Dataset): class LinearNet(nn.Layer): - def __init__(self): super(LinearNet, self).__init__() self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM) @@ -71,11 +69,9 @@ adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters()) # create data loader dataset = RandomDataset(BATCH_NUM * BATCH_SIZE) -loader = paddle.io.DataLoader(dataset, - batch_size=BATCH_SIZE, - shuffle=True, - drop_last=True, - num_workers=2) +loader = paddle.io.DataLoader( + dataset, batch_size=BATCH_SIZE, shuffle=True, drop_last=True, num_workers=2 +) # train train(layer, loader, loss_fn, adam) diff --git a/paddle/infrt/tests/models/resnet50_model.py b/paddle/infrt/tests/models/resnet50_model.py index af45de0c8a609058c627e737b3cf6cc529ae0b8a..ff84c3988abcbac5d41f4f6c1ca175051d8f450f 100644 --- a/paddle/infrt/tests/models/resnet50_model.py +++ b/paddle/infrt/tests/models/resnet50_model.py @@ -19,6 +19,7 @@ from paddle.static import InputSpec import sys model = resnet50(True) -net = to_static(model, - input_spec=[InputSpec(shape=[None, 3, 256, 256], name='x')]) +net = to_static( + model, input_spec=[InputSpec(shape=[None, 3, 256, 256], name='x')] +) paddle.jit.save(net, sys.argv[1]) diff --git a/paddle/phi/api/yaml/generator/api_base.py b/paddle/phi/api/yaml/generator/api_base.py index 534158eb31cbe54c21111bd286de1ecc45dbc237..e67023d2faf71526eb08d72cb8c0b8c5c863c970 100644 --- a/paddle/phi/api/yaml/generator/api_base.py +++ b/paddle/phi/api/yaml/generator/api_base.py @@ -20,7 +20,6 @@ PREFIX_META_TENSOR_NAME = 'meta_' class BaseAPI(object): - def __init__(self, api_item_yaml): self.api = self.get_api_name(api_item_yaml) @@ -34,8 +33,12 @@ class BaseAPI(object): # names : [], list of output names # types : [], list of output types # out_size_expr : [], expression for getting size of vector - self.inputs, self.attrs, self.outputs, self.optional_vars = self.parse_args( - self.api, api_item_yaml) + ( + self.inputs, + self.attrs, + self.outputs, + self.optional_vars, + ) = self.parse_args(self.api, api_item_yaml) self.is_base_api = True if 'invoke' in api_item_yaml: @@ -44,7 +47,8 @@ class BaseAPI(object): else: if 'infer_meta' in api_item_yaml: self.infer_meta = self.parse_infer_meta( - api_item_yaml['infer_meta']) + api_item_yaml['infer_meta'] + ) self.kernel = self.parse_kernel(api_item_yaml['kernel']) self.data_transform = self.parse_data_transform(api_item_yaml) self.inplace_map, self.view_map = {}, {} @@ -52,18 +56,16 @@ class BaseAPI(object): self.gene_input_func = { "const Tensor&": { "dense": self.gene_dense_input, - "selected_rows": self.gene_selected_rows_input + "selected_rows": self.gene_selected_rows_input, }, "const paddle::optional&": { "dense": self.gene_dense_input, - "selected_rows": self.gene_selected_rows_input - }, - "const std::vector&": { - "dense": self.gene_vec_dense_input + "selected_rows": self.gene_selected_rows_input, }, + "const std::vector&": {"dense": self.gene_vec_dense_input}, "const paddle::optional>&": { "dense": self.gene_optional_vec_dense_input - } + }, } def get_api_name(self, api_item_yaml): @@ -75,21 +77,19 @@ class BaseAPI(object): def get_input_tensor_args(self, inplace_flag=False): input_args = [] inplace_type_map = { - "const Tensor&": - "Tensor&", - "const paddle::optional&": - "paddle::optional&", - "const std::vector&": - "std::vector&", - "const paddle::optional>&": - "paddle::optional>&" + "const Tensor&": "Tensor&", + "const paddle::optional&": "paddle::optional&", + "const std::vector&": "std::vector&", + "const paddle::optional>&": "paddle::optional>&", } for name in self.inputs['names']: name = name.split('@')[0] if inplace_flag and name in self.inplace_map.values(): input_args.append( - inplace_type_map[self.inputs['input_info'][name]] + ' ' + - name) + inplace_type_map[self.inputs['input_info'][name]] + + ' ' + + name + ) else: input_args.append(self.inputs['input_info'][name] + ' ' + name) return input_args @@ -100,8 +100,9 @@ class BaseAPI(object): default_value = '' if self.attrs['attr_info'][name][1] is not None: default_value = ' = ' + self.attrs['attr_info'][name][1] - declare_args.append(self.attrs['attr_info'][name][0] + ' ' + name + - default_value) + declare_args.append( + self.attrs['attr_info'][name][0] + ' ' + name + default_value + ) return ", ".join(declare_args) @@ -118,28 +119,35 @@ class BaseAPI(object): optional_vars = [ item.strip() for item in api_item_yaml['optional'].split(',') ] - inputs, attrs = self.parse_input_and_attr(api_name, - api_item_yaml['args'], - optional_vars) + inputs, attrs = self.parse_input_and_attr( + api_name, api_item_yaml['args'], optional_vars + ) output_type_list, output_names, out_size_expr = self.parse_output( - api_name, api_item_yaml['output']) - return inputs, attrs, { - 'names': output_names, - 'types': output_type_list, - 'out_size_expr': out_size_expr - }, optional_vars + api_name, api_item_yaml['output'] + ) + return ( + inputs, + attrs, + { + 'names': output_names, + 'types': output_type_list, + 'out_size_expr': out_size_expr, + }, + optional_vars, + ) def parse_input_and_attr(self, api_name, args_config, optional_vars=[]): inputs = {'names': [], 'input_info': {}} attrs = {'names': [], 'attr_info': {}} args_str = args_config.strip() - assert args_str.startswith('(') and args_str.endswith(')'), \ - f"Args declaration should start with '(' and end with ')', please check the args of {api_name} in yaml." + assert args_str.startswith('(') and args_str.endswith( + ')' + ), f"Args declaration should start with '(' and end with ')', please check the args of {api_name} in yaml." args_str = args_str[1:-1] args_list = args_str.split(',') input_types_map = { 'Tensor': 'const Tensor&', - 'Tensor[]': 'const std::vector&' + 'Tensor[]': 'const std::vector&', } attr_types_map = { 'IntArray': 'const IntArray&', @@ -177,7 +185,7 @@ class BaseAPI(object): 'bool': 'paddle::optional', 'Place': 'paddle::optional', 'DataLayout': 'paddle::optional', - 'DataType': 'paddle::optional' + 'DataType': 'paddle::optional', } for item in args_list: @@ -188,10 +196,12 @@ class BaseAPI(object): for in_type_symbol, in_type in input_types_map.items(): if type_and_name[0] == in_type_symbol: input_name = type_and_name[1].strip() - assert len(input_name) > 0, \ - f"The input tensor name should not be empty. Please check the args of {api_name} in yaml." - assert len(attrs['names']) == 0, \ - f"The input Tensor should appear before attributes. please check the position of {api_name}:input({input_name}) in yaml" + assert ( + len(input_name) > 0 + ), f"The input tensor name should not be empty. Please check the args of {api_name} in yaml." + assert ( + len(attrs['names']) == 0 + ), f"The input Tensor should appear before attributes. please check the position of {api_name}:input({input_name}) in yaml" if input_name in optional_vars: in_type = optional_types_trans[in_type_symbol] @@ -206,9 +216,10 @@ class BaseAPI(object): # match the attribute for attr_type_symbol, attr_type in attr_types_map.items(): if type_and_name[0] == attr_type_symbol: - attr_name = item[len(attr_type_symbol):].strip() - assert len(attr_name) > 0, \ - f"The attribute name should not be empty. Please check the args of {api_name} in yaml." + attr_name = item[len(attr_type_symbol) :].strip() + assert ( + len(attr_name) > 0 + ), f"The attribute name should not be empty. Please check the args of {api_name} in yaml." default_value = None if '=' in attr_name: attr_infos = attr_name.split('=') @@ -218,7 +229,9 @@ class BaseAPI(object): if attr_name in optional_vars: attr_type = optional_types_trans[attr_type_symbol] - default_value_str = "" if default_value is None else '=' + default_value + default_value_str = ( + "" if default_value is None else '=' + default_value + ) attrs['names'].append(attr_name) attrs['attr_info'][attr_name] = (attr_type, default_value) break @@ -226,25 +239,34 @@ class BaseAPI(object): return inputs, attrs def parse_output(self, api_name, output_config): - def parse_output_item(output_item): output_type_map = { 'Tensor': 'Tensor', - 'Tensor[]': 'std::vector' + 'Tensor[]': 'std::vector', } result = re.search( r"(?P[a-zA-Z0-9_[\]]+)\s*(?P\([a-zA-Z0-9_@]+\))?\s*(?P\{[^\}]+\})?", - output_item) - assert result is not None, f"{api_name} : the output config parse error." + output_item, + ) + assert ( + result is not None + ), f"{api_name} : the output config parse error." out_type = result.group('out_type') - assert out_type in output_type_map, \ - f"{api_name} : Output type error: the output type only support Tensor and Tensor[], \ + assert ( + out_type in output_type_map + ), f"{api_name} : Output type error: the output type only support Tensor and Tensor[], \ but now is {out_type}." - out_name = 'out' if result.group('name') is None else result.group( - 'name')[1:-1] - out_size_expr = None if result.group( - 'expr') is None else result.group('expr')[1:-1] + out_name = ( + 'out' + if result.group('name') is None + else result.group('name')[1:-1] + ) + out_size_expr = ( + None + if result.group('expr') is None + else result.group('expr')[1:-1] + ) return output_type_map[out_type], out_name, out_size_expr temp_list = output_config.split(',') @@ -286,7 +308,7 @@ class BaseAPI(object): 'layout': None, 'data_type': None, 'use_gpudnn': 'false', - 'dispatch': {} + 'dispatch': {}, } if 'backend' in kernel_config and len(kernel_config['backend']) > 0: kernel['backend'] = kernel_config['backend'] @@ -301,7 +323,8 @@ class BaseAPI(object): if isinstance(kernel['use_gpudnn'], bool): kernel['use_gpudnn'] = str(kernel['use_gpudnn']).lower() kernel_funcs = re.compile(r'([a-zA-Z0-9_]+)\s*({[^}]+})?').findall( - kernel_config['func']) + kernel_config['func'] + ) def parse_kernel_in_out_type(in_out_str): if len(in_out_str) == 0: @@ -313,11 +336,17 @@ class BaseAPI(object): # check the tensor type for item in inputs: assert item in [ - 'dense', 'selected_rows', 'sparse_coo', 'sparse_csr' + 'dense', + 'selected_rows', + 'sparse_coo', + 'sparse_csr', ], f"{self.api} : Invalid input tensor type ('{item}'), here we only support 'dense', 'selected_rows', 'sparse_coo' and 'sparse_csr'." for item in outputs: assert item in [ - 'dense', 'selected_rows', 'sparse_coo', 'sparse_csr' + 'dense', + 'selected_rows', + 'sparse_coo', + 'sparse_csr', ], f"{self.api} : Invalid output tensor type ('{item}'), here we only support 'dense', 'selected_rows', 'sparse_coo' and 'sparse_csr'." return (inputs, outputs) @@ -325,7 +354,8 @@ class BaseAPI(object): for func_item in kernel_funcs: kernel['func'].append(func_item[0]) kernel['dispatch'][func_item[0]] = parse_kernel_in_out_type( - func_item[1]) + func_item[1] + ) return kernel @@ -334,10 +364,12 @@ class BaseAPI(object): if 'data_transform' in api_item_yaml: if 'skip_transform' in api_item_yaml['data_transform']: data_transform['skip_transform'] = api_item_yaml[ - 'data_transform']['skip_transform'] + 'data_transform' + ]['skip_transform'] if 'support_trans_dtype' in api_item_yaml['data_transform']: data_transform['support_trans_dtype'] = api_item_yaml[ - 'data_transform']['support_trans_dtype'] + 'data_transform' + ]['support_trans_dtype'] return data_transform @@ -356,9 +388,12 @@ PADDLE_API {self.get_return_type()} {api_func_name}({self.get_declare_args()}); if self.is_base_api and len(self.inplace_map) > 0: if api_func_name[-1] != '_': api_func_name += '_' - api_declaration = api_declaration + f""" + api_declaration = ( + api_declaration + + f""" PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_declare_args(inplace_flag=True)}); """ + ) return api_declaration @@ -368,11 +403,13 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d if self.kernel['backend'] is not None: if '>' in self.kernel['backend']: vars_list = self.kernel['backend'].split('>') - assert len( - vars_list - ) == 2, f"{self.api} api: The number of params to set backend with '>' only allows 2, but received {len(vars_list)}." - assert (vars_list[0].strip() in self.attrs['names']) and (self.attrs['attr_info'][vars_list[0].strip()][0] == 'const Place&'), \ - f"{self.api} api: When use '>' to set kernel backend, the first param should be a attribute with Place type." + assert ( + len(vars_list) == 2 + ), f"{self.api} api: The number of params to set backend with '>' only allows 2, but received {len(vars_list)}." + assert (vars_list[0].strip() in self.attrs['names']) and ( + self.attrs['attr_info'][vars_list[0].strip()][0] + == 'const Place&' + ), f"{self.api} api: When use '>' to set kernel backend, the first param should be a attribute with Place type." backend_select_code = f""" kernel_backend = ParseBackendWithInputOrder({vars_list[0].strip()}, {vars_list[1].strip()}); """ @@ -404,16 +441,19 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d attr_data_type_count = 0 for attr_name in attrs['names']: if attrs['attr_info'][attr_name][0] == 'const Place&': - assert kernel['backend'] is not None, \ - f"{api} api: When there is a parameter with 'Place' type in attributes, you must set backend of kernel manually." + assert ( + kernel['backend'] is not None + ), f"{api} api: When there is a parameter with 'Place' type in attributes, you must set backend of kernel manually." attr_backend_count = attr_backend_count + 1 if attrs['attr_info'][attr_name][0] == 'DataLayout': - assert kernel['layout'] is not None, \ - f"{api} api: When there is a parameter with 'DataLayout' type in attributes, you must set layout of kernel manually." + assert ( + kernel['layout'] is not None + ), f"{api} api: When there is a parameter with 'DataLayout' type in attributes, you must set layout of kernel manually." attr_layout_count = attr_layout_count + 1 if attrs['attr_info'][attr_name][0] == 'DataType': - assert kernel['data_type'] is not None, \ - f"{api} api: When there is a parameter with 'DataType' type in attributes, you must set data_type of kernel manually." + assert ( + kernel['data_type'] is not None + ), f"{api} api: When there is a parameter with 'DataType' type in attributes, you must set data_type of kernel manually." attr_data_type_count = attr_data_type_count + 1 # preprocess kernel configures @@ -422,48 +462,67 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d if kernel['layout'] is not None: if '>' in kernel['layout']: vars_list = kernel['layout'].split('>') - assert len( - vars_list - ) == 2, f"{api} api: The number of params to set layout with '>' only allows 2, but received {len(vars_list)}." - assert vars_list[0].strip() in attrs['names'] and attrs['attr_info'][vars_list[0].strip()][0] == 'DataLayout', \ - f"{api} api: When use '>' to set kernel layout, the first param should be a attribute with DataLayout type." - kernel_select_code = kernel_select_code + f""" + assert ( + len(vars_list) == 2 + ), f"{api} api: The number of params to set layout with '>' only allows 2, but received {len(vars_list)}." + assert ( + vars_list[0].strip() in attrs['names'] + and attrs['attr_info'][vars_list[0].strip()][0] + == 'DataLayout' + ), f"{api} api: When use '>' to set kernel layout, the first param should be a attribute with DataLayout type." + kernel_select_code = ( + kernel_select_code + + f""" kernel_layout = ParseLayoutWithInputOrder({vars_list[0].strip()}, {vars_list[1].strip()}); """ + ) else: vars_list = kernel['layout'].split(',') - assert len( - vars_list - ) == 1, f"{api} api: The number of params to set layout must be 1, but received {len(vars_list)}." - kernel_select_code = kernel_select_code + f""" + assert ( + len(vars_list) == 1 + ), f"{api} api: The number of params to set layout must be 1, but received {len(vars_list)}." + kernel_select_code = ( + kernel_select_code + + f""" kernel_layout = ParseLayout({vars_list[0].strip()}); """ + ) if kernel['data_type'] is not None: if '>' in kernel['data_type']: vars_list = kernel['data_type'].split('>') - assert len( - vars_list - ) == 2, f"{api} api: The number of params to set data_type with '>' only allows 2, but received {len(vars_list)}." - assert vars_list[0].strip() in attrs['names'] and attrs['attr_info'][vars_list[0].strip()][0] == 'DataType', \ - f"{api} api: When use '>' to set kernel data_type, the first param should be a attribute with DataType type." - kernel_select_code = kernel_select_code + f""" + assert ( + len(vars_list) == 2 + ), f"{api} api: The number of params to set data_type with '>' only allows 2, but received {len(vars_list)}." + assert ( + vars_list[0].strip() in attrs['names'] + and attrs['attr_info'][vars_list[0].strip()][0] + == 'DataType' + ), f"{api} api: When use '>' to set kernel data_type, the first param should be a attribute with DataType type." + kernel_select_code = ( + kernel_select_code + + f""" kernel_data_type = ParseDataTypeWithInputOrder({vars_list[0].strip()}, {vars_list[1].strip()}); """ + ) else: vars_list = kernel['data_type'].split(',') - assert len( - vars_list - ) == 1, f"{api} api: The number of params to set data_type only allows 1, but received {len(vars_list)}." - kernel_select_code = kernel_select_code + f""" + assert ( + len(vars_list) == 1 + ), f"{api} api: The number of params to set data_type only allows 1, but received {len(vars_list)}." + kernel_select_code = ( + kernel_select_code + + f""" kernel_data_type = ParseDataType({vars_list[0].strip()}); """ + ) if len(input_names) == 0: - assert attr_backend_count > 0 and attr_data_type_count > 0, \ - f"{api} api: When there is no input tensor, the args must have 'Place' and 'DataType'." + assert ( + attr_backend_count > 0 and attr_data_type_count > 0 + ), f"{api} api: When there is no input tensor, the args must have 'Place' and 'DataType'." kernel_select_args = "" for input_name in input_names: @@ -475,7 +534,9 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d kernel_select_code = kernel_key_item_init + kernel_select_code if len(input_names) > 0: - kernel_select_code = kernel_select_code + f""" + kernel_select_code = ( + kernel_select_code + + f""" if (kernel_backend == Backend::UNDEFINED || kernel_layout == DataLayout::UNDEFINED || kernel_data_type == DataType::UNDEFINED ) {{ @@ -491,6 +552,7 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d kernel_data_type = kernel_key.dtype(); }} }}""" + ) return kernel_select_code @@ -499,37 +561,62 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d attr_names = self.attrs['names'] infer_meta = self.infer_meta - infer_meta_params = infer_meta['param'] if infer_meta[ - 'param'] is not None else input_names + attr_names + infer_meta_params = ( + infer_meta['param'] + if infer_meta['param'] is not None + else input_names + attr_names + ) # generate meta tensors meta_tensor_code = "" param_code = "" for param in infer_meta_params: if param in input_names: if self.inputs['input_info'][param] == "const Tensor&": - param_code = param_code + "MakeMetaTensor(*" + PREFIX_TENSOR_NAME + param + "), " - elif self.inputs['input_info'][ - param] == "const std::vector&": - meta_tensor_code = meta_tensor_code + f""" + param_code = ( + param_code + + "MakeMetaTensor(*" + + PREFIX_TENSOR_NAME + + param + + "), " + ) + elif ( + self.inputs['input_info'][param] + == "const std::vector&" + ): + meta_tensor_code = ( + meta_tensor_code + + f""" {code_indent} auto {param}_meta_vec = MakeMetaTensor({PREFIX_TENSOR_NAME}{param}); {code_indent} std::vector {param}_metas({param}_meta_vec.size()); {code_indent} for (size_t i = 0; i < {param}_meta_vec.size(); ++i) {{ {code_indent} {param}_metas[i] = &{param}_meta_vec[i]; {code_indent} }} """ + ) param_code = param_code + param + "_metas, " - elif self.inputs['input_info'][ - param] == "const paddle::optional>&": - meta_tensor_code = meta_tensor_code + f""" + elif ( + self.inputs['input_info'][param] + == "const paddle::optional>&" + ): + meta_tensor_code = ( + meta_tensor_code + + f""" {code_indent} auto {param}_meta_vec = MakeMetaTensor({PREFIX_TENSOR_NAME}{param}); {code_indent} paddle::optional> {param}_metas({param}_meta_vec.size()); {code_indent} for (size_t i = 0; i < {param}_meta_vec.size(); ++i) {{ {code_indent} {param}_metas->at(i) = &{param}_meta_vec[i]; {code_indent} }} """ + ) param_code = param_code + param + "_metas, " elif param in self.optional_vars: - param_code = param_code + "MakeMetaTensor(" + PREFIX_TENSOR_NAME + param + "), " + param_code = ( + param_code + + "MakeMetaTensor(" + + PREFIX_TENSOR_NAME + + param + + "), " + ) else: raise ValueError( f"{self.api} : Param of infer_meta error : {self.inputs['input_info'][param]} type is not supported." @@ -545,22 +632,37 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d for i, out_name in enumerate(kernel_output_names): if self.outputs['types'][i] == 'std::vector': - meta_tensor_code = meta_tensor_code + f""" + meta_tensor_code = ( + meta_tensor_code + + f""" {code_indent} auto {out_name}_{PREFIX_META_TENSOR_NAME}vec = MakeMetaTensor({out_name}); {code_indent} std::vector {out_name}_metas({out_name}_{PREFIX_META_TENSOR_NAME}vec.size()); {code_indent} for (size_t i = 0; i < {out_name}_{PREFIX_META_TENSOR_NAME}vec.size(); ++i) {{ {code_indent} {out_name}_metas[i] = {out_name}[i] ? &{out_name}_{PREFIX_META_TENSOR_NAME}vec[i] : nullptr; {code_indent} }}""" + ) param_code = param_code + out_name + '_metas, ' else: - meta_tensor_code = meta_tensor_code + code_indent + " phi::MetaTensor " + out_name.replace( - 'kernel_', - PREFIX_META_TENSOR_NAME) + "(" + out_name + ");\n" + meta_tensor_code = ( + meta_tensor_code + + code_indent + + " phi::MetaTensor " + + out_name.replace('kernel_', PREFIX_META_TENSOR_NAME) + + "(" + + out_name + + ");\n" + ) if len(kernel_output_names) == 1: - param_code = param_code + f"&{out_name.replace('kernel_', PREFIX_META_TENSOR_NAME)}, " + param_code = ( + param_code + + f"&{out_name.replace('kernel_', PREFIX_META_TENSOR_NAME)}, " + ) else: - param_code = param_code + f"{out_name} ? &{out_name.replace('kernel_', PREFIX_META_TENSOR_NAME)} : nullptr, " + param_code = ( + param_code + + f"{out_name} ? &{out_name.replace('kernel_', PREFIX_META_TENSOR_NAME)} : nullptr, " + ) param_code = param_code[:-2] return f"""{meta_tensor_code} @@ -575,10 +677,9 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d trans_flag = "{false, true}" return trans_flag - def gene_dense_input(self, - input_name, - input_name_tensor_map, - code_indent=''): + def gene_dense_input( + self, input_name, input_name_tensor_map, code_indent='' + ): input_tensor_code = "" trans_flag = self.gene_trans_flag(input_name) input_names = self.inputs['names'] @@ -588,15 +689,18 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d kernel_param = input_names + attr_names input_name_tensor_map[input_name].append( - (f"{PREFIX_TENSOR_NAME}{input_name}", False)) - input_tensor_code = input_tensor_code + f""" + (f"{PREFIX_TENSOR_NAME}{input_name}", False) + ) + input_tensor_code = ( + input_tensor_code + + f""" {code_indent} auto {PREFIX_TENSOR_NAME}{input_name} = PrepareData({input_name}, kernel.InputAt({kernel_param.index(input_name)}), {trans_flag});""" + ) return input_tensor_code - def gene_selected_rows_input(self, - input_name, - input_name_tensor_map, - code_indent=''): + def gene_selected_rows_input( + self, input_name, input_name_tensor_map, code_indent='' + ): input_tensor_code = "" trans_flag = self.gene_trans_flag(input_name) input_names = self.inputs['names'] @@ -606,16 +710,19 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d kernel_param = input_names + attr_names input_name_tensor_map[input_name].append( - (f"{PREFIX_TENSOR_NAME}{input_name}", False)) - input_tensor_code = input_tensor_code + f""" + (f"{PREFIX_TENSOR_NAME}{input_name}", False) + ) + input_tensor_code = ( + input_tensor_code + + f""" {code_indent} auto {PREFIX_TENSOR_NAME}{input_name} = TensorToSelectedRows({input_name}); """ + ) return input_tensor_code - def gene_optional_vec_dense_input(self, - input_name, - input_name_tensor_map, - code_indent=''): + def gene_optional_vec_dense_input( + self, input_name, input_name_tensor_map, code_indent='' + ): input_tensor_code = "" trans_flag = self.gene_trans_flag(input_name) input_names = self.inputs['names'] @@ -625,13 +732,20 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d kernel_param = input_names + attr_names if input_name in self.inplace_map.values(): input_name_tensor_map[input_name].append( - (f"{PREFIX_TENSOR_NAME}{input_name}", True)) - input_tensor_code = input_tensor_code + f""" + (f"{PREFIX_TENSOR_NAME}{input_name}", True) + ) + input_tensor_code = ( + input_tensor_code + + f""" {code_indent} paddle::optional> {PREFIX_TENSOR_NAME}{input_name} = TensorToConstDenseTensorPtr({input_name});""" + ) else: input_name_tensor_map[input_name].append( - (f"{PREFIX_TENSOR_NAME}{input_name}_vec", True)) - input_tensor_code = input_tensor_code + f""" + (f"{PREFIX_TENSOR_NAME}{input_name}_vec", True) + ) + input_tensor_code = ( + input_tensor_code + + f""" {code_indent} auto {PREFIX_TENSOR_NAME}{input_name}_vec = PrepareData({input_name}, kernel.InputAt({kernel_param.index(input_name)}), {trans_flag}); {code_indent} paddle::optional> {PREFIX_TENSOR_NAME}{input_name}; {code_indent} if ({PREFIX_TENSOR_NAME}{input_name}_vec){{ @@ -640,12 +754,12 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d {code_indent} {PREFIX_TENSOR_NAME}{input_name}->at(i) = &{PREFIX_TENSOR_NAME}{input_name}_vec->at(i); {code_indent} }} {code_indent} }}""" + ) return input_tensor_code - def gene_vec_dense_input(self, - input_name, - input_name_tensor_map, - code_indent=''): + def gene_vec_dense_input( + self, input_name, input_name_tensor_map, code_indent='' + ): input_tensor_code = "" trans_flag = self.gene_trans_flag(input_name) input_names = self.inputs['names'] @@ -656,18 +770,26 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d if input_name in self.inplace_map.values(): input_name_tensor_map[input_name].append( - (f"{PREFIX_TENSOR_NAME}{input_name}", True)) - input_tensor_code = input_tensor_code + f""" + (f"{PREFIX_TENSOR_NAME}{input_name}", True) + ) + input_tensor_code = ( + input_tensor_code + + f""" {code_indent} std::vector {PREFIX_TENSOR_NAME}{input_name} = TensorToConstDenseTensorPtr({input_name});""" + ) else: input_name_tensor_map[input_name].append( - (f"{PREFIX_TENSOR_NAME}{input_name}_vec", True)) - input_tensor_code = input_tensor_code + f""" + (f"{PREFIX_TENSOR_NAME}{input_name}_vec", True) + ) + input_tensor_code = ( + input_tensor_code + + f""" {code_indent} auto {PREFIX_TENSOR_NAME}{input_name}_vec = PrepareData({input_name}, kernel.InputAt({kernel_param.index(input_name)}), {trans_flag}); {code_indent} std::vector {PREFIX_TENSOR_NAME}{input_name}({PREFIX_TENSOR_NAME}{input_name}_vec->size()); {code_indent} for (size_t i = 0; i < {PREFIX_TENSOR_NAME}{input_name}.size(); ++i) {{ {code_indent} {PREFIX_TENSOR_NAME}{input_name}[i] = &{PREFIX_TENSOR_NAME}{input_name}_vec->at(i); {code_indent} }}""" + ) return input_tensor_code def gene_input(self, kernel_tensor_type=None, code_indent=''): @@ -683,55 +805,62 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d if input_name in kernel_param: # input is dense tensor api_tensor_type = self.inputs['input_info'][input_name] - phi_tensor_type = 'dense' if kernel_tensor_type is None else kernel_tensor_type[ - 0][kernel_param.index(input_name)] + phi_tensor_type = ( + 'dense' + if kernel_tensor_type is None + else kernel_tensor_type[0][kernel_param.index(input_name)] + ) if api_tensor_type in self.gene_input_func.keys(): input_tensor_code += self.gene_input_func[api_tensor_type][ - phi_tensor_type](input_name, input_name_tensor_map, - code_indent) + phi_tensor_type + ](input_name, input_name_tensor_map, code_indent) else: # do nothing pass else: if input_name in self.infer_meta['param']: if input_name in self.optional_vars: - input_tensor_code = input_tensor_code + f""" + input_tensor_code = ( + input_tensor_code + + f""" {code_indent} paddle::optional {PREFIX_TENSOR_NAME}{input_name} = {input_name} ? paddle::optional(*{input_name}->impl()) : paddle::none;""" + ) else: - if self.inputs['input_info'][ - input_name] == "const std::vector&": - input_tensor_code = input_tensor_code + f""" + if ( + self.inputs['input_info'][input_name] + == "const std::vector&" + ): + input_tensor_code = ( + input_tensor_code + + f""" {code_indent} auto {PREFIX_TENSOR_NAME}{input_name}_uq_ptr = TensorToDenseTensor({input_name}); {code_indent} const auto& {PREFIX_TENSOR_NAME}{input_name} = *{PREFIX_TENSOR_NAME}{input_name}_uq_ptr;""" + ) else: - input_tensor_code = input_tensor_code + f""" + input_tensor_code = ( + input_tensor_code + + f""" {code_indent} auto {PREFIX_TENSOR_NAME}{input_name} = {input_name}.impl();""" + ) return input_name_tensor_map, input_tensor_code def get_kernel_args(self, kernel_tensor_type=None, code_indent=''): dense_input_trans_map = { - 'const Tensor&': - 'const phi::DenseTensor&', - 'const std::vector&': - 'const std::vector&', - 'const paddle::optional': - 'paddle::optional', - 'const paddle::optional&': - 'const paddle::optional&', - 'const paddle::optional>&': - 'const paddle::optional>&' + 'const Tensor&': 'const phi::DenseTensor&', + 'const std::vector&': 'const std::vector&', + 'const paddle::optional': 'paddle::optional', + 'const paddle::optional&': 'const paddle::optional&', + 'const paddle::optional>&': 'const paddle::optional>&', } dense_out_trans_map = { 'Tensor': 'phi::DenseTensor*', - 'std::vector': 'std::vector&' + 'std::vector': 'std::vector&', } sr_input_trans_map = { - 'const Tensor&': - 'const phi::SelectedRows&', - 'const paddle::optional&': - 'const paddle::optional&' + 'const Tensor&': 'const phi::SelectedRows&', + 'const paddle::optional&': 'const paddle::optional&', } sr_out_trans_map = {'Tensor': 'phi::SelectedRows*'} input_names = self.inputs['names'] @@ -744,10 +873,14 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d kernel_param = input_names + attr_names input_name_tensor_map, input_tensor_code = self.gene_input( - kernel_tensor_type, code_indent) + kernel_tensor_type, code_indent + ) - input_tensor_code = input_tensor_code + f""" + input_tensor_code = ( + input_tensor_code + + f""" {code_indent} if(platform::RecordOpInfoSupplement::IsEnabled()){{""" + ) single_tensor_names = [] list_tensor_names = [] for input_name, input_tensors in input_name_tensor_map.items(): @@ -760,54 +893,96 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d else: list_tensor_names.append(input_name) if not single_tensor_names: - input_tensor_code = input_tensor_code + f""" + input_tensor_code = ( + input_tensor_code + + f""" {code_indent} std::vector>> input_shapes;""" + ) else: for input_name in single_tensor_names: if input_name in self.optional_vars: input_tensors = input_name_tensor_map[input_name] - input_tensor_code = input_tensor_code + f""" + input_tensor_code = ( + input_tensor_code + + f""" {code_indent} std::vector {input_name}_record_shapes;""" + ) for input_tensor, _ in input_tensors: - input_tensor_code = input_tensor_code + f""" + input_tensor_code = ( + input_tensor_code + + f""" {code_indent} if({input_tensor}){{ {code_indent} {input_name}_record_shapes.push_back((*{input_tensor}).dims()); {code_indent} }}""" + ) - input_tensor_code = input_tensor_code + f""" + input_tensor_code = ( + input_tensor_code + + f""" {code_indent} std::vector>> input_shapes{{""" + ) for input_name in single_tensor_names[:-1]: if input_name in self.optional_vars: - input_tensor_code = input_tensor_code + f""" + input_tensor_code = ( + input_tensor_code + + f""" {code_indent} {{"{input_name}", {input_name}_record_shapes}},""" + ) else: - input_tensor_code = input_tensor_code + f""" + input_tensor_code = ( + input_tensor_code + + f""" {code_indent} {{"{input_name}", {{""" + ) input_tensors = input_name_tensor_map[input_name] for input_tensor, _ in input_tensors[:-1]: - input_tensor_code = input_tensor_code + f""" + input_tensor_code = ( + input_tensor_code + + f""" {code_indent} (*{input_tensor}).dims(),""" - input_tensor_code = input_tensor_code + f""" + ) + input_tensor_code = ( + input_tensor_code + + f""" {code_indent} (*{input_tensors[-1][0]}).dims()}}}},""" + ) if single_tensor_names[-1] in self.optional_vars: - input_tensor_code = input_tensor_code + f""" + input_tensor_code = ( + input_tensor_code + + f""" {code_indent} {{"{single_tensor_names[-1]}", {code_indent} {single_tensor_names[-1]}_record_shapes}}}};""" + ) else: - input_tensor_code = input_tensor_code + f""" + input_tensor_code = ( + input_tensor_code + + f""" {code_indent} {{"{single_tensor_names[-1]}", {{""" + ) input_tensors = input_name_tensor_map[single_tensor_names[-1]] for input_tensor, _ in input_tensors[:-1]: - input_tensor_code = input_tensor_code + f""" + input_tensor_code = ( + input_tensor_code + + f""" {code_indent} (*{input_tensor}).dims(),""" - input_tensor_code = input_tensor_code + f""" + ) + input_tensor_code = ( + input_tensor_code + + f""" {code_indent} (*{input_tensors[-1][0]}).dims()}}}}}};""" + ) if list_tensor_names: - input_tensor_code = input_tensor_code + f""" + input_tensor_code = ( + input_tensor_code + + f""" {code_indent} std::vector ddims_vec;""" + ) for input_name in list_tensor_names: - input_tensor_code = input_tensor_code + f""" + input_tensor_code = ( + input_tensor_code + + f""" {code_indent} ddims_vec.clear();""" + ) for input_tensor, is_vector in input_name_tensor_map[input_name]: if is_vector: input_tensor_truncate = input_tensor[:-4] @@ -815,29 +990,44 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d input_tensor_truncate = input_tensor if input_name in self.optional_vars: - input_tensor_code = input_tensor_code + f""" + input_tensor_code = ( + input_tensor_code + + f""" {code_indent} if ({input_tensor_truncate}){{ {code_indent} ddims_vec.reserve({input_tensor_truncate}->size()); {code_indent} for (size_t i = 0; i < {input_tensor_truncate}->size(); ++i) {{ {code_indent} ddims_vec.emplace_back((*{input_tensor_truncate}->at(i)).dims()); {code_indent} }} {code_indent} }}""" + ) else: - input_tensor_code = input_tensor_code + f""" + input_tensor_code = ( + input_tensor_code + + f""" {code_indent} ddims_vec.reserve({input_tensor_truncate}.size()); {code_indent} for (size_t i = 0; i < {input_tensor_truncate}.size(); ++i) {{ {code_indent} ddims_vec.emplace_back((*{input_tensor_truncate}[i]).dims()); {code_indent} }}""" + ) else: - input_tensor_code = input_tensor_code + f""" + input_tensor_code = ( + input_tensor_code + + f""" ddims_vec.emplace_back((*{input_tensor}).dims()); {code_indent} """ - input_tensor_code = input_tensor_code + f""" + ) + input_tensor_code = ( + input_tensor_code + + f""" {code_indent} input_shapes.emplace_back("{input_name}", ddims_vec);""" + ) - input_tensor_code = input_tensor_code + f""" + input_tensor_code = ( + input_tensor_code + + f""" {code_indent} platform::RecordOpInfoSupplement("{self.api}", input_shapes); {code_indent} }}""" + ) kernel_args = ["*dev_ctx"] for param in kernel_param: if param in input_names: @@ -846,20 +1036,27 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d else: if self.inputs['input_info'][param] == "const Tensor&": kernel_args.append("*" + PREFIX_TENSOR_NAME + param) - elif self.inputs['input_info'][ - param] == "const std::vector&": + elif ( + self.inputs['input_info'][param] + == "const std::vector&" + ): kernel_args.append(PREFIX_TENSOR_NAME + param) else: # do nothing pass # input is dense tensor - if kernel_tensor_type is None or kernel_tensor_type[0][ - kernel_param.index(param)] == 'dense': + if ( + kernel_tensor_type is None + or kernel_tensor_type[0][kernel_param.index(param)] + == 'dense' + ): kernel_args_type_list.append( - dense_input_trans_map[input_infos[param]]) + dense_input_trans_map[input_infos[param]] + ) else: # input is selected_rows kernel_args_type_list.append( - sr_input_trans_map[input_infos[param]]) + sr_input_trans_map[input_infos[param]] + ) elif param in attr_names: # set attr for kernel_context if 'IntArray' in self.attrs['attr_info'][param][0]: @@ -867,14 +1064,16 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d param = 'phi::IntArray(' + param + ')' elif 'vector' in self.attrs['attr_info'][param][0]: kernel_args_type_list.append( - 'const std::vector&') + 'const std::vector&' + ) param = param elif 'Scalar' in self.attrs['attr_info'][param][0]: kernel_args_type_list.append('const phi::Scalar&') param = 'phi::Scalar(' + param + ')' else: kernel_args_type_list.append( - self.attrs['attr_info'][param][0]) + self.attrs['attr_info'][param][0] + ) kernel_args.append(param) elif isinstance(param, bool): kernel_args.append(str(param).lower()) @@ -883,7 +1082,10 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d for i, out_type in enumerate(self.outputs['types']): # output is dense tensor - if kernel_tensor_type is None or kernel_tensor_type[1][i] == 'dense': + if ( + kernel_tensor_type is None + or kernel_tensor_type[1][i] == 'dense' + ): kernel_args_type_list.append(dense_out_trans_map[out_type]) else: # output is selected_rows kernel_args_type_list.append(sr_out_trans_map[out_type]) @@ -897,28 +1099,36 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d return "return api_output;" # Override by child class - def gene_output(self, - out_dtype_list, - out_tensor_type_list=None, - code_indent='', - inplace_flag=False): + def gene_output( + self, + out_dtype_list, + out_tensor_type_list=None, + code_indent='', + inplace_flag=False, + ): return None, None, None def gen_kernel_code(self, kernel_name, code_indent, inplace_flag=False): kernel_dispatch = self.kernel['dispatch'][kernel_name] input_tensors, kernel_args, kernel_signature = self.get_kernel_args( - kernel_dispatch, code_indent) + kernel_dispatch, code_indent + ) out_tensor_type_list = kernel_dispatch[1] if kernel_dispatch else None outputs_args, kernel_output_names, output_create = self.gene_output( - self.outputs['types'], out_tensor_type_list, code_indent, - inplace_flag) + self.outputs['types'], + out_tensor_type_list, + code_indent, + inplace_flag, + ) fallback_kernel_output_trans = "" for kernel_out in outputs_args: - fallback_kernel_output_trans += (f""" + fallback_kernel_output_trans += f""" {code_indent} TransDataBackend({kernel_out}, kernel_backend, {kernel_out});""" - ) - cudnn_args = '' if self.kernel[ - 'use_gpudnn'] == 'false' else ', ' + self.kernel['use_gpudnn'] + cudnn_args = ( + '' + if self.kernel['use_gpudnn'] == 'false' + else ', ' + self.kernel['use_gpudnn'] + ) return f""" {code_indent} VLOG(6) << "{self.api} API kernel key: [" << kernel_backend << ", " << kernel_layout << ", "<< kernel_data_type << "]"; {code_indent} auto kernel_result = phi::KernelFactory::Instance().SelectKernelOrThrowError( @@ -952,8 +1162,9 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d {code_indent} {self.gene_return_code()}""" def get_condition_code(self, kernel_name): - assert self.kernel['dispatch'][kernel_name], \ - f"{self.api} api: the tensor type of inputs and outputs for kernel isn't set, see also 'kernel:func' of 'scale' in ops.yaml." + assert self.kernel['dispatch'][ + kernel_name + ], f"{self.api} api: the tensor type of inputs and outputs for kernel isn't set, see also 'kernel:func' of 'scale' in ops.yaml." input_types = self.kernel['dispatch'][kernel_name][0] condition_list = [] for i, in_type in enumerate(input_types): @@ -964,7 +1175,8 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d ) else: condition_list.append( - f"{self.inputs['names'][i]}.is_dense_tensor()") + f"{self.inputs['names'][i]}.is_dense_tensor()" + ) else: if self.inputs['names'][i] in self.optional_vars: condition_list.append( @@ -972,7 +1184,8 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d ) else: condition_list.append( - f"{self.inputs['names'][i]}.is_selected_rows()") + f"{self.inputs['names'][i]}.is_selected_rows()" + ) return " && ".join(condition_list) def gene_dispatch_code(self, kernel_name, inplace_flag=False): @@ -995,18 +1208,25 @@ PADDLE_API {self.get_return_type(inplace_flag)} {api_func_name}({self.get_define kernel_dispatch_code = '' for kernel_name in self.kernel['func']: kernel_dispatch_code += self.gene_dispatch_code( - kernel_name, inplace_flag) - return api_code + f""" + kernel_name, inplace_flag + ) + return ( + api_code + + f""" {kernel_dispatch_code} PADDLE_THROW(phi::errors::Unimplemented( "The kernel of ({self.api}) for input tensors is unimplemented, please check the type of input tensors.")); }} """ + ) else: - return api_code + self.gen_kernel_code(self.kernel['func'][0], '', - inplace_flag) + """ + return ( + api_code + + self.gen_kernel_code(self.kernel['func'][0], '', inplace_flag) + + """ } """ + ) def gene_invoke_code(self, invoke_code, params_code): return f""" @@ -1034,8 +1254,9 @@ PADDLE_API {self.get_return_type()} {self.api}({params_code}) {{ return matched_str[0:-1] + '_val' + matched_str[-1] invoke_code = re.sub(pattern, adjust_name, self.invoke) - params_code = re.sub(pattern, adjust_name, - self.get_define_args()) + params_code = re.sub( + pattern, adjust_name, self.get_define_args() + ) else: invoke_code = self.invoke params_code = self.get_define_args() diff --git a/paddle/phi/api/yaml/generator/api_gen.py b/paddle/phi/api/yaml/generator/api_gen.py index 9cfc54a4c58d5e30eda06b8296a1aa38f73455e3..4b8a06dc219922d35c926b3d652325e5c8b9d71e 100644 --- a/paddle/phi/api/yaml/generator/api_gen.py +++ b/paddle/phi/api/yaml/generator/api_gen.py @@ -20,23 +20,24 @@ from api_base import BaseAPI, PREFIX_TENSOR_NAME inplace_out_type_map = { "Tensor": "Tensor&", - "std::vector": "std::vector&" + "std::vector": "std::vector&", } inplace_optional_out_type_map = { "Tensor": "paddle::optional&", - "std::vector": "paddle::optional>&" + "std::vector": "paddle::optional>&", } class ForwardAPI(BaseAPI): - def __init__(self, api_item_yaml): super(ForwardAPI, self).__init__(api_item_yaml) self.is_dygraph_api, self.intermediate_outs = self.parse_intermediate( - api_item_yaml) + api_item_yaml + ) self.inplace_map, self.view_map = self.parse_inplace_and_view( - api_item_yaml) + api_item_yaml + ) def get_api_func_name(self): if self.is_dygraph_api: @@ -47,17 +48,26 @@ class ForwardAPI(BaseAPI): def gene_input(self, kernel_tensor_type=None, code_indent=''): kernel_param = self.kernel['param'] input_name_tensor_map, input_tensor_code = super().gene_input( - kernel_tensor_type, code_indent) + kernel_tensor_type, code_indent + ) # generate the input that is in view list for i, input_name in enumerate(self.inputs['names']): - if input_name in self.view_map.values( - ) and input_name not in input_name_tensor_map.keys(): - if kernel_tensor_type is None or kernel_tensor_type[0][ - kernel_param.index(input_name)] == 'dense': + if ( + input_name in self.view_map.values() + and input_name not in input_name_tensor_map.keys() + ): + if ( + kernel_tensor_type is None + or kernel_tensor_type[0][kernel_param.index(input_name)] + == 'dense' + ): trans_flag = self.gene_trans_flag(input_name) - input_tensor_code = input_tensor_code + f""" + input_tensor_code = ( + input_tensor_code + + f""" {code_indent} auto {PREFIX_TENSOR_NAME}{input_name} = PrepareData({input_name}, kernel.InputAt(0), {trans_flag});""" + ) else: # do nothing pass @@ -87,10 +97,12 @@ class ForwardAPI(BaseAPI): result = re.search(r"(?P\w+)\s*->\s*(?P\w+)", item) in_val = result.group('in') out_val = result.group('out') - assert in_val in self.inputs['names'], \ - f"{self.api} : {mode} input error: the input var name('{in_val}') is not found in the input args of {self.api}." - assert out_val in self.outputs['names'], \ - f"{self.api} : {mode} output error: the output var name('{out_val}') is not found in the output args of {self.api}." + assert ( + in_val in self.inputs['names'] + ), f"{self.api} : {mode} input error: the input var name('{in_val}') is not found in the input args of {self.api}." + assert ( + out_val in self.outputs['names'] + ), f"{self.api} : {mode} output error: the output var name('{out_val}') is not found in the output args of {self.api}." if mode == 'inplace': inplace_map[out_val] = in_val @@ -106,7 +118,8 @@ class ForwardAPI(BaseAPI): if inplace_flag and out_name in self.inplace_map: if self.inplace_map[out_name] in self.optional_vars: out_type_list.append( - inplace_optional_out_type_map[out_type]) + inplace_optional_out_type_map[out_type] + ) else: out_type_list.append(inplace_out_type_map[out_type]) else: @@ -124,7 +137,8 @@ class ForwardAPI(BaseAPI): if inplace_flag and out_name in self.inplace_map: if self.inplace_map[out_name] in self.optional_vars: out_type_list.append( - inplace_optional_out_type_map[out_type]) + inplace_optional_out_type_map[out_type] + ) else: out_type_list.append(inplace_out_type_map[out_type]) elif self.is_dygraph_api or out_name not in self.intermediate_outs: @@ -151,11 +165,13 @@ class ForwardAPI(BaseAPI): ] return 'return std::make_tuple(' + ", ".join(selected_code) + ');' - def gene_output(self, - out_dtype_list, - out_tensor_type_list=None, - code_indent='', - inplace_flag=False): + def gene_output( + self, + out_dtype_list, + out_tensor_type_list=None, + code_indent='', + inplace_flag=False, + ): kernel_output = [] output_names = [] output_create = "" @@ -164,29 +180,48 @@ class ForwardAPI(BaseAPI): if len(out_dtype_list) == 1: kernel_output.append('kernel_out') output_names.append('kernel_out') - inplace_assign = " = " + self.inplace_map[ - self.outputs['names'][0]] if inplace_flag and self.outputs[ - 'names'][0] in self.inplace_map else "" + inplace_assign = ( + " = " + self.inplace_map[self.outputs['names'][0]] + if inplace_flag and self.outputs['names'][0] in self.inplace_map + else "" + ) output_create = f""" {code_indent} {return_type} api_output{inplace_assign};""" - set_out_func = 'SetKernelOutput' if out_tensor_type_list is None or out_tensor_type_list[ - 0] == 'dense' else 'SetSelectedRowsKernelOutput' + set_out_func = ( + 'SetKernelOutput' + if out_tensor_type_list is None + or out_tensor_type_list[0] == 'dense' + else 'SetSelectedRowsKernelOutput' + ) if return_type == 'std::vector': - assert self.outputs['out_size_expr'][0] is not None, \ - f"{self.api}: The out size expr : '{{expr}}' should be set when output has Tensor[]. You can refer 'split' api." - output_create = output_create + f""" + assert ( + self.outputs['out_size_expr'][0] is not None + ), f"{self.api}: The out size expr : '{{expr}}' should be set when output has Tensor[]. You can refer 'split' api." + output_create = ( + output_create + + f""" {code_indent} auto kernel_out = {set_out_func}({self.outputs['out_size_expr'][0]}, &api_output);""" + ) else: - output_create = output_create + f""" + output_create = ( + output_create + + f""" {code_indent} auto kernel_out = {set_out_func}(&api_output);""" - - if not inplace_flag and self.view_map is not None and self.outputs[ - 'names'][0] in self.view_map: - output_create = output_create + f""" + ) + + if ( + not inplace_flag + and self.view_map is not None + and self.outputs['names'][0] in self.view_map + ): + output_create = ( + output_create + + f""" {code_indent} kernel_out->ShareBufferWith(*{PREFIX_TENSOR_NAME}{self.view_map[self.outputs['names'][0]]}); {code_indent} kernel_out->ShareInplaceVersionCounterWith(*{PREFIX_TENSOR_NAME}{self.view_map[self.outputs['names'][0]]}); {code_indent} VLOG(3) << "Perform View between Output and Input Tensor, share allocation and inplace version.";""" + ) elif len(out_dtype_list) > 1: output_create = f""" @@ -206,47 +241,74 @@ class ForwardAPI(BaseAPI): for i in range(len(out_dtype_list)): kernel_output.append(f'kernel_out_{i}') output_names.append(f'kernel_out_{i}') - set_out_func = 'SetKernelOutput' if out_tensor_type_list is None or out_tensor_type_list[ - i] == 'dense' else 'SetSelectedRowsKernelOutput' + set_out_func = ( + 'SetKernelOutput' + if out_tensor_type_list is None + or out_tensor_type_list[i] == 'dense' + else 'SetSelectedRowsKernelOutput' + ) get_out_code = f"&std::get<{i}>(api_output)" - if self.outputs['names'][ - i] in self.inplace_map and self.inplace_map[ - self.outputs['names'][i]] in self.optional_vars: + if ( + self.outputs['names'][i] in self.inplace_map + and self.inplace_map[self.outputs['names'][i]] + in self.optional_vars + ): get_out_code = f"std::get<{i}>(api_output).get_ptr()" if out_dtype_list[i] == 'std::vector': - assert self.outputs['out_size_expr'][i] is not None, \ - f"{self.api}: The out size expr : '{{expr}}' should be set when output has Tensor[]. You can refer 'split' api." + assert ( + self.outputs['out_size_expr'][i] is not None + ), f"{self.api}: The out size expr : '{{expr}}' should be set when output has Tensor[]. You can refer 'split' api." # Special case for inplace vector and inplace optional if self.outputs['names'][i] in self.inplace_map: set_out_func = "SetInplaceVectorKernelOutput" - if self.inplace_map[self.outputs['names'] - [i]] in self.optional_vars: - set_out_func = "SetInplaceOptionalVectorKernelOutput" + if ( + self.inplace_map[self.outputs['names'][i]] + in self.optional_vars + ): + set_out_func = ( + "SetInplaceOptionalVectorKernelOutput" + ) get_out_code = f"std::get<{i}>(api_output)" - output_create = output_create + f""" + output_create = ( + output_create + + f""" {code_indent} auto kernel_out_{i} = {set_out_func}({self.outputs['out_size_expr'][i]}, {get_out_code});""" + ) else: - output_create = output_create + f""" + output_create = ( + output_create + + f""" {code_indent} auto kernel_out_{i} = {set_out_func}({get_out_code});""" + ) - if not inplace_flag and self.view_map is not None and self.outputs[ - 'names'][i] in self.view_map: + if ( + not inplace_flag + and self.view_map is not None + and self.outputs['names'][i] in self.view_map + ): if out_dtype_list[i] == 'Tensor': - output_create = output_create + f""" + output_create = ( + output_create + + f""" {code_indent} kernel_out_{i}->ShareBufferWith(*{PREFIX_TENSOR_NAME}{self.view_map[self.outputs['names'][i]]}); {code_indent} kernel_out_{i}->ShareInplaceVersionCounterWith(*{PREFIX_TENSOR_NAME}{self.view_map[self.outputs['names'][i]]}); {code_indent} VLOG(3) << "Perform View between Output and Input Tensor, share allocation and inplace version.";""" + ) else: raise ValueError( - "{} : Output error: only support Tensor type when use view in yaml. But get {}" - .format(self.api, out_dtype_list[i])) + "{} : Output error: only support Tensor type when use view in yaml. But get {}".format( + self.api, out_dtype_list[i] + ) + ) else: raise ValueError( "{} : Output error: the output should not be empty.".format( - self.api)) + self.api + ) + ) return kernel_output, output_names, output_create @@ -288,15 +350,18 @@ DECLARE_bool(conv2d_disable_cudnn); def api_namespace(): - return (""" + return ( + """ namespace paddle { namespace experimental { -""", """ +""", + """ } // namespace experimental } // namespace paddle -""") +""", + ) def generate_api(api_yaml_path, header_file_path, source_file_path): @@ -338,19 +403,26 @@ def generate_api(api_yaml_path, header_file_path, source_file_path): def main(): parser = argparse.ArgumentParser( - description='Generate PaddlePaddle C++ API files') - parser.add_argument('--api_yaml_path', - help='path to api yaml file', - nargs='+', - default='paddle/phi/api/yaml/ops.yaml') - - parser.add_argument('--api_header_path', - help='output of generated api header code file', - default='paddle/phi/api/include/api.h') - - parser.add_argument('--api_source_path', - help='output of generated api source code file', - default='paddle/phi/api/lib/api.cc') + description='Generate PaddlePaddle C++ API files' + ) + parser.add_argument( + '--api_yaml_path', + help='path to api yaml file', + nargs='+', + default='paddle/phi/api/yaml/ops.yaml', + ) + + parser.add_argument( + '--api_header_path', + help='output of generated api header code file', + default='paddle/phi/api/include/api.h', + ) + + parser.add_argument( + '--api_source_path', + help='output of generated api source code file', + default='paddle/phi/api/lib/api.cc', + ) options = parser.parse_args() diff --git a/paddle/phi/api/yaml/generator/backward_api_gen.py b/paddle/phi/api/yaml/generator/backward_api_gen.py index 16052e819377482e8bbe1ef9109e8f2616f521f9..f06af0ead4b63a0f217417e7ce74406c231fb6da 100644 --- a/paddle/phi/api/yaml/generator/backward_api_gen.py +++ b/paddle/phi/api/yaml/generator/backward_api_gen.py @@ -20,7 +20,6 @@ from api_base import BaseAPI class BackwardAPI(BaseAPI): - def __init__(self, backward_item_yaml): super(BackwardAPI, self).__init__(backward_item_yaml) self.check_args(backward_item_yaml['forward']) @@ -33,12 +32,18 @@ class BackwardAPI(BaseAPI): # api_name (const Tensor& input, ... , int attr, ...) -> Tensor(out) result = re.search( r"(?P[a-z][a-z0-9_]+)\s*(?P\([^\)]+\))\s*->\s*(?P.+)", - forward_config) + forward_config, + ) api = result.group('op') - _, outputs, _, = self.parse_output(self.api, result.group('outputs')) + ( + _, + outputs, + _, + ) = self.parse_output(self.api, result.group('outputs')) outputs = [item.split('@')[0] for item in outputs] - fw_inputs, fw_attrs = self.parse_input_and_attr(api, - result.group('args')) + fw_inputs, fw_attrs = self.parse_input_and_attr( + api, result.group('args') + ) return api, fw_inputs, fw_attrs, outputs @@ -54,27 +59,34 @@ class BackwardAPI(BaseAPI): def check_args(self, forward_config): # parse the forward and backward config _, fw_inputs, fw_attrs, fw_outputs = self.parse_forward_config( - forward_config) + forward_config + ) # check the inputs of backward for input in self.inputs['names']: if input not in fw_inputs['names'] and input not in fw_outputs: if input.endswith('_grad'): original_name = input[:-5] - assert original_name in fw_outputs, \ - f"{self.api} : Input Tensor error: the input tensor({input}) of backward should be an input or output or grad of output in forward api. \ + assert ( + original_name in fw_outputs + ), f"{self.api} : Input Tensor error: the input tensor({input}) of backward should be an input or output or grad of output in forward api. \ Please check the forward of {self.api} in yaml." # check the attributes of backward for attr in self.attrs['names']: - assert (attr in fw_attrs['names'] and self.attrs['attr_info'][attr][0] == fw_attrs['attr_info'][attr][0]) or \ - self.attrs['attr_info'][attr][1] is not None, \ - f"{self.api} : Attribute error: The attribute({attr}) of backward isn't consistent with forward api or doesn't have default value. \ + assert ( + attr in fw_attrs['names'] + and self.attrs['attr_info'][attr][0] + == fw_attrs['attr_info'][attr][0] + ) or self.attrs['attr_info'][attr][ + 1 + ] is not None, f"{self.api} : Attribute error: The attribute({attr}) of backward isn't consistent with forward api or doesn't have default value. \ Please check the args of {self.api} in yaml." # check the output of backward - assert len(self.outputs['types']) <= len(fw_inputs['names']), \ - f"{self.api} : Output error: The number of outputs should be less then the number of inputs of forward api. \ + assert len(self.outputs['types']) <= len( + fw_inputs['names'] + ), f"{self.api} : Output error: The number of outputs should be less then the number of inputs of forward api. \ Please check the output of {self.api} in yaml." def get_declare_args(self, inplace_flag=False): @@ -83,13 +95,16 @@ class BackwardAPI(BaseAPI): def get_define_args(self, inplace_flag=False): out_type_map = { 'Tensor': 'Tensor*', - 'std::vector': 'std::vector' + 'std::vector': 'std::vector', } intputs_and_attrs = super(BackwardAPI, self).get_define_args() outs = [] for i, name in enumerate(self.outputs['names']): - outs.append(out_type_map[self.outputs['types'][i]] + ' ' + - name.split('@')[0]) + outs.append( + out_type_map[self.outputs['types'][i]] + + ' ' + + name.split('@')[0] + ) result = intputs_and_attrs + ', ' + ", ".join(outs) return result @@ -100,7 +115,8 @@ class BackwardAPI(BaseAPI): if not self.is_base_api: invoke_func_name = self.invoke.split('(')[0] if (not invoke_func_name.endswith("_grad")) and ( - not invoke_func_name.endswith('_impl')): + not invoke_func_name.endswith('_impl') + ): return "" api_func_name = self.get_api_func_name() api_declaration = f""" @@ -124,11 +140,13 @@ PADDLE_API void {api_func_name}({self.get_declare_args()}); def get_return_type(self, inplace_flag=False): return 'void' - def gene_output(self, - out_dtype_list, - out_tensor_type_list=None, - code_indent='', - inplace_flag=False): + def gene_output( + self, + out_dtype_list, + out_tensor_type_list=None, + code_indent='', + inplace_flag=False, + ): kernel_output = [] output_names = [] output_create = "" @@ -136,60 +154,101 @@ PADDLE_API void {api_func_name}({self.get_declare_args()}); if len(out_dtype_list) == 1: kernel_output.append('kernel_out') output_names.append('kernel_out') - inplace_assign = " = " + self.inplace_map[self.outputs['names'][ - 0]] if inplace_flag and self.inplace_map is not None and self.outputs[ - 'names'][0] in self.inplace_map else "" + inplace_assign = ( + " = " + self.inplace_map[self.outputs['names'][0]] + if inplace_flag + and self.inplace_map is not None + and self.outputs['names'][0] in self.inplace_map + else "" + ) output_create = "" - set_out_func = 'SetKernelOutput' if out_tensor_type_list is None or out_tensor_type_list[ - 0] == 'dense' else 'SetSelectedRowsKernelOutput' + set_out_func = ( + 'SetKernelOutput' + if out_tensor_type_list is None + or out_tensor_type_list[0] == 'dense' + else 'SetSelectedRowsKernelOutput' + ) if out_dtype_list[0] == 'std::vector': - assert self.outputs['out_size_expr'] is not None, \ - f"{self.api}: The out size expr : '{{expr}}' should be set when output has Tensor[]. You can refer 'split' api." - output_create = output_create + f""" + assert ( + self.outputs['out_size_expr'] is not None + ), f"{self.api}: The out size expr : '{{expr}}' should be set when output has Tensor[]. You can refer 'split' api." + output_create = ( + output_create + + f""" {code_indent} auto kernel_out = {set_out_func}(&{self.outputs['names'][0]});""" + ) else: - output_create = output_create + f""" + output_create = ( + output_create + + f""" {code_indent} auto kernel_out = {set_out_func}({self.outputs['names'][0]});""" + ) elif len(out_dtype_list) > 1: output_create = "" for i, out_type_item in enumerate(out_dtype_list): kernel_output.append(f'kernel_out_{i}') output_names.append(f'kernel_out_{i}') - set_out_func = 'SetKernelOutput' if out_tensor_type_list is None or out_tensor_type_list[ - i] == 'dense' else 'SetSelectedRowsKernelOutput' + set_out_func = ( + 'SetKernelOutput' + if out_tensor_type_list is None + or out_tensor_type_list[i] == 'dense' + else 'SetSelectedRowsKernelOutput' + ) if out_type_item == 'Tensor': - if inplace_flag and self.inplace_map is not None and self.outputs[ - 'names'][i] in self.inplace_map: - output_create = output_create + f""" + if ( + inplace_flag + and self.inplace_map is not None + and self.outputs['names'][i] in self.inplace_map + ): + output_create = ( + output_create + + f""" {code_indent} *{self.outputs['names'][i]} = {self.inplace_map[self.outputs['names'][i]]};""" + ) - output_create = output_create + f""" + output_create = ( + output_create + + f""" {code_indent} auto kernel_out_{i} = {set_out_func}({self.outputs['names'][i]});""" + ) else: - if inplace_flag and self.inplace_map is not None and self.outputs[ - 'names'][i] in self.inplace_map: - output_create = output_create + f""" + if ( + inplace_flag + and self.inplace_map is not None + and self.outputs['names'][i] in self.inplace_map + ): + output_create = ( + output_create + + f""" {code_indent} *{self.outputs['names'][i]} = {self.inplace_map[self.outputs['names'][i]]};""" - - assert self.outputs['out_size_expr'][i] is not None, \ - f"{self.api}: The out size expr : '{{expr}}' should be set when output has Tensor[]. You can refer 'split' api." - output_create = output_create + f""" + ) + + assert ( + self.outputs['out_size_expr'][i] is not None + ), f"{self.api}: The out size expr : '{{expr}}' should be set when output has Tensor[]. You can refer 'split' api." + output_create = ( + output_create + + f""" {code_indent} auto kernel_out_{i} = {set_out_func}(&{self.outputs['names'][i]});""" + ) else: raise ValueError( "{} : Output error: the output should not be empty.".format( - self.api)) + self.api + ) + ) return kernel_output, output_names, output_create def gene_invoke_code(self, invoke_code, params_code): invoke_func_name = invoke_code.split('(')[0].strip() if invoke_func_name.endswith('_grad') or invoke_func_name.endswith( - '_impl'): + '_impl' + ): return f""" PADDLE_API {self.get_return_type()} {self.api}({params_code}) {{ {invoke_code}; @@ -235,19 +294,23 @@ DECLARE_bool(conv2d_disable_cudnn); def backward_api_namespace(): - return (""" + return ( + """ namespace paddle { namespace experimental { -""", """ +""", + """ } // namespace experimental } // namespace paddle -""") +""", + ) -def generate_backward_api(backward_yaml_path, header_file_path, - source_file_path): +def generate_backward_api( + backward_yaml_path, header_file_path, source_file_path +): bw_apis = [] for each_api_yaml in backward_yaml_path: @@ -283,18 +346,25 @@ def generate_backward_api(backward_yaml_path, header_file_path, def main(): parser = argparse.ArgumentParser( - description='Generate PaddlePaddle C++ backward API files') - parser.add_argument('--backward_yaml_path', - help='path to backward yaml file', - nargs='+', - default='paddle/phi/api/yaml/backward.yaml') - parser.add_argument('--backward_header_path', - help='output of generated backward header code file', - default='paddle/phi/api/backward/backward_api.h') - - parser.add_argument('--backward_source_path', - help='output of generated backward source code file', - default='paddle/phi/api/lib/backward_api.cc') + description='Generate PaddlePaddle C++ backward API files' + ) + parser.add_argument( + '--backward_yaml_path', + help='path to backward yaml file', + nargs='+', + default='paddle/phi/api/yaml/backward.yaml', + ) + parser.add_argument( + '--backward_header_path', + help='output of generated backward header code file', + default='paddle/phi/api/backward/backward_api.h', + ) + + parser.add_argument( + '--backward_source_path', + help='output of generated backward source code file', + default='paddle/phi/api/lib/backward_api.cc', + ) options = parser.parse_args() @@ -302,8 +372,9 @@ def main(): header_file_path = options.backward_header_path source_file_path = options.backward_source_path - generate_backward_api(backward_yaml_path, header_file_path, - source_file_path) + generate_backward_api( + backward_yaml_path, header_file_path, source_file_path + ) if __name__ == '__main__': diff --git a/paddle/phi/api/yaml/generator/cross_validate.py b/paddle/phi/api/yaml/generator/cross_validate.py index 63e94eab12a7ebfde5144157ba169135338e98e6..f42ff8680a4a4bd0ab63d9a5a49880977cb960ae 100644 --- a/paddle/phi/api/yaml/generator/cross_validate.py +++ b/paddle/phi/api/yaml/generator/cross_validate.py @@ -34,17 +34,22 @@ def main(forward_api_yaml_paths, backward_api_yaml_paths): if __name__ == "__main__": current_dir = Path(__file__).parent / "temp" parser = argparse.ArgumentParser( - description="Parse api yaml into canonical format.") - parser.add_argument('--forward_yaml_paths', - type=str, - nargs='+', - default=str(current_dir / "api.parsed.yaml"), - help="forward api yaml file.") - parser.add_argument('--backward_yaml_paths', - type=str, - nargs='+', - default=str(current_dir / "backward_api.parsed.yaml"), - help="backward api yaml file.") + description="Parse api yaml into canonical format." + ) + parser.add_argument( + '--forward_yaml_paths', + type=str, + nargs='+', + default=str(current_dir / "api.parsed.yaml"), + help="forward api yaml file.", + ) + parser.add_argument( + '--backward_yaml_paths', + type=str, + nargs='+', + default=str(current_dir / "backward_api.parsed.yaml"), + help="backward api yaml file.", + ) args = parser.parse_args() main(args.forward_yaml_paths, args.backward_yaml_paths) diff --git a/paddle/phi/api/yaml/generator/filters.py b/paddle/phi/api/yaml/generator/filters.py index 30af0dee34dabd43e4c7a04557dc5cb1311f6467..9654f8a1991e798d6f66a7fa43ac9b975e64fbed 100644 --- a/paddle/phi/api/yaml/generator/filters.py +++ b/paddle/phi/api/yaml/generator/filters.py @@ -15,12 +15,20 @@ import itertools import re -from type_mapping import (input_types_map, optional_input_types_map, - attr_types_map, opmaker_attr_types_map, - output_type_map) -from type_mapping import (dense_input_types_map, dense_optional_input_types_map, - dense_output_types_map, sr_output_types_map, - phi_attr_types_map) +from type_mapping import ( + input_types_map, + optional_input_types_map, + attr_types_map, + opmaker_attr_types_map, + output_type_map, +) +from type_mapping import ( + dense_input_types_map, + dense_optional_input_types_map, + dense_output_types_map, + sr_output_types_map, + phi_attr_types_map, +) def quote(s): @@ -104,7 +112,7 @@ def to_input_name(s): is more common. """ match = re.match(r"(d\d*)(\w+)", s) - assert (match.group(1) != ""), "it should be a grad style name." + assert match.group(1) != "", "it should be a grad style name." return match.group(2) @@ -117,9 +125,10 @@ def cartesian_prod_attrs(attrs): items.append((name, "{}Tensor".format(name))) elif type_name == "IntArray": items.append( - (name, "{}Tensor".format(name), "{}TensorList".format(name))) + (name, "{}Tensor".format(name), "{}TensorList".format(name)) + ) else: - items.append((name, )) + items.append((name,)) _combinations = itertools.product(*items) combinations = [] diff --git a/paddle/phi/api/yaml/generator/generate_op.py b/paddle/phi/api/yaml/generator/generate_op.py index b78fbecd3fbb7096cfa96d94c10a07842290e82c..a7efe85398045142b3cbfc23a5adff9ae8625744 100644 --- a/paddle/phi/api/yaml/generator/generate_op.py +++ b/paddle/phi/api/yaml/generator/generate_op.py @@ -19,18 +19,32 @@ from pathlib import Path import yaml from jinja2 import Environment, FileSystemLoader, StrictUndefined -from filters import to_op_attr_type, to_opmaker_name, to_opmaker_name_cstr, to_pascal_case -from tests import is_base_api, is_vec, is_scalar, is_initializer_list, supports_inplace, supports_no_need_buffer +from filters import ( + to_op_attr_type, + to_opmaker_name, + to_opmaker_name_cstr, + to_pascal_case, +) +from tests import ( + is_base_api, + is_vec, + is_scalar, + is_initializer_list, + supports_inplace, + supports_no_need_buffer, +) from filters import to_input_name, cartesian_prod_mapping from parse_utils import to_named_dict file_loader = FileSystemLoader(Path(__file__).parent / "templates") -env = Environment(loader=file_loader, - keep_trailing_newline=True, - trim_blocks=True, - lstrip_blocks=True, - undefined=StrictUndefined, - extensions=['jinja2.ext.do']) +env = Environment( + loader=file_loader, + keep_trailing_newline=True, + trim_blocks=True, + lstrip_blocks=True, + undefined=StrictUndefined, + extensions=['jinja2.ext.do'], +) env.filters["to_op_attr_type"] = to_op_attr_type env.filters["to_opmaker_name"] = to_opmaker_name env.filters["to_pascal_case"] = to_pascal_case @@ -54,7 +68,6 @@ def restruct_io(api): # replace name of op and params for OpMaker def replace_compat_name(api_op_map, forward_api_dict, backward_api_dict): - def get_api_and_op_name(api_item): names = api_item.split('(') if len(names) == 1: @@ -74,7 +87,8 @@ def replace_compat_name(api_op_map, forward_api_dict, backward_api_dict): forward_api_item['op_name'] = op_name if 'backward' in api_args and has_backward: bw_api_name, bw_op_name = get_api_and_op_name( - api_args['backward'].split(',')[0]) + api_args['backward'].split(',')[0] + ) forward_api_item['backward'] = bw_op_name backward_api_item['op_name'] = bw_op_name @@ -100,8 +114,10 @@ def replace_compat_name(api_op_map, forward_api_dict, backward_api_dict): ] if forward_api_item['kernel']['data_type']: forward_api_item['kernel']['data_type']['candidates'] = [ - args_map[param] if param in args_map else param for param in - forward_api_item['kernel']['data_type']['candidates'] + args_map[param] if param in args_map else param + for param in forward_api_item['kernel']['data_type'][ + 'candidates' + ] ] if forward_api_item['kernel']['backend']: forward_api_item['kernel']['backend']['candidates'] = [ @@ -128,25 +144,32 @@ def replace_compat_name(api_op_map, forward_api_dict, backward_api_dict): for args_item in backward_api_item['inputs']: if args_item['name'] in args_map: args_item['name'] = args_map[args_item['name']] - elif args_item['name'].endswith( - '_grad') and args_item['name'][:-5] in args_map: - args_map[args_item['name']] = args_map[args_item['name'] - [:-5]] + '_grad' + elif ( + args_item['name'].endswith('_grad') + and args_item['name'][:-5] in args_map + ): + args_map[args_item['name']] = ( + args_map[args_item['name'][:-5]] + '_grad' + ) args_item['name'] = args_map[args_item['name']] for args_item in backward_api_item['attrs']: if args_item['name'] in args_map: args_item['name'] = args_map[args_item['name']] for args_item in backward_api_item['outputs']: - if args_item['name'].endswith( - '_grad') and args_item['name'][:-5] in args_map: - args_map[args_item['name']] = args_map[args_item['name'] - [:-5]] + '_grad' + if ( + args_item['name'].endswith('_grad') + and args_item['name'][:-5] in args_map + ): + args_map[args_item['name']] = ( + args_map[args_item['name'][:-5]] + '_grad' + ) args_item['name'] = args_map[args_item['name']] if 'invoke' in backward_api_item: backward_api_item['invoke']['args'] = [ args_map[param.strip()] - if param.strip() in args_map else param.strip() + if param.strip() in args_map + else param.strip() for param in backward_api_item['invoke']['args'].split(',') ] continue @@ -161,18 +184,24 @@ def replace_compat_name(api_op_map, forward_api_dict, backward_api_dict): ] if backward_api_item['kernel']['data_type']: backward_api_item['kernel']['data_type']['candidates'] = [ - args_map[param] if param in args_map else param for param in - backward_api_item['kernel']['data_type']['candidates'] + args_map[param] if param in args_map else param + for param in backward_api_item['kernel']['data_type'][ + 'candidates' + ] ] if backward_api_item['kernel']['backend']: backward_api_item['kernel']['backend']['candidates'] = [ - args_map[param] if param in args_map else param for param in - backward_api_item['kernel']['backend']['candidates'] + args_map[param] if param in args_map else param + for param in backward_api_item['kernel']['backend'][ + 'candidates' + ] ] if backward_api_item['kernel']['layout']: backward_api_item['kernel']['layout']['candidates'] = [ - args_map[param] if param in args_map else param for param in - backward_api_item['kernel']['layout']['candidates'] + args_map[param] if param in args_map else param + for param in backward_api_item['kernel']['layout'][ + 'candidates' + ] ] if backward_api_item['no_need_buffer']: backward_api_item['no_need_buffer'] = [ @@ -193,36 +222,43 @@ def process_invoke_op(forward_api_dict, backward_api_dict): bw_api['invoke']['attrs'] = [] bw_api['invoke']['outputs'] = [] for input_item in reuse_op['inputs']: - bw_api['invoke']['inputs'].append({ - 'name': - input_item['name'], - 'value': - args_list[args_index] - }) + bw_api['invoke']['inputs'].append( + { + 'name': input_item['name'], + 'value': args_list[args_index], + } + ) args_index = args_index + 1 for attr in reuse_op['attrs']: if args_index < len(args_list): - attr_value = f"this->GetAttr(\"{args_list[args_index]}\")" if args_list[ - args_index] in bw_api['attr_dict'] else args_list[ - args_index] - bw_api['invoke']['attrs'].append({ - 'name': attr['name'], - 'value': attr_value - }) + attr_value = ( + f"this->GetAttr(\"{args_list[args_index]}\")" + if args_list[args_index] in bw_api['attr_dict'] + else args_list[args_index] + ) + bw_api['invoke']['attrs'].append( + {'name': attr['name'], 'value': attr_value} + ) args_index = args_index + 1 else: break for idx, output_item in enumerate(reuse_op['outputs']): - bw_api['invoke']['outputs'].append({ - 'name': - output_item['name'], - 'value': - bw_api['outputs'][idx]['name'] - }) - - -def main(ops_yaml_path, backward_yaml_path, op_compat_yaml_path, - op_version_yaml_path, output_op_path, output_arg_map_path): + bw_api['invoke']['outputs'].append( + { + 'name': output_item['name'], + 'value': bw_api['outputs'][idx]['name'], + } + ) + + +def main( + ops_yaml_path, + backward_yaml_path, + op_compat_yaml_path, + op_version_yaml_path, + output_op_path, + output_arg_map_path, +): with open(ops_yaml_path, "rt") as f: apis = yaml.safe_load(f) apis = [restruct_io(api) for api in apis] @@ -273,9 +309,9 @@ def main(ops_yaml_path, backward_yaml_path, op_compat_yaml_path, op_template = env.get_template('op.c.j2') with open(output_op_path, "wt") as f: - msg = op_template.render(apis=apis, - backward_apis=backward_apis, - api_dict=api_dict) + msg = op_template.render( + apis=apis, backward_apis=backward_apis, api_dict=api_dict + ) f.write(msg) ks_template = env.get_template('ks.c.j2') @@ -286,28 +322,35 @@ def main(ops_yaml_path, backward_yaml_path, op_compat_yaml_path, if __name__ == "__main__": parser = argparse.ArgumentParser( - description="Generate operator file from api yaml.") - parser.add_argument('--ops_yaml_path', - type=str, - help="parsed ops yaml file.") - parser.add_argument('--backward_yaml_path', - type=str, - help="parsed backward ops yaml file.") - parser.add_argument('--op_compat_yaml_path', - type=str, - help="ops args compat yaml file.") - parser.add_argument('--op_version_yaml_path', - type=str, - help="ops version yaml file.") - parser.add_argument("--output_op_path", - type=str, - help="path to save generated operators.") + description="Generate operator file from api yaml." + ) + parser.add_argument( + '--ops_yaml_path', type=str, help="parsed ops yaml file." + ) + parser.add_argument( + '--backward_yaml_path', type=str, help="parsed backward ops yaml file." + ) + parser.add_argument( + '--op_compat_yaml_path', type=str, help="ops args compat yaml file." + ) + parser.add_argument( + '--op_version_yaml_path', type=str, help="ops version yaml file." + ) + parser.add_argument( + "--output_op_path", type=str, help="path to save generated operators." + ) parser.add_argument( "--output_arg_map_path", type=str, - help="path to save generated argument mapping functions.") + help="path to save generated argument mapping functions.", + ) args = parser.parse_args() - main(args.ops_yaml_path, args.backward_yaml_path, args.op_compat_yaml_path, - args.op_version_yaml_path, args.output_op_path, - args.output_arg_map_path) + main( + args.ops_yaml_path, + args.backward_yaml_path, + args.op_compat_yaml_path, + args.op_version_yaml_path, + args.output_op_path, + args.output_arg_map_path, + ) diff --git a/paddle/phi/api/yaml/generator/generate_sparse_op.py b/paddle/phi/api/yaml/generator/generate_sparse_op.py index e4d7f44856bfdfd93f7401b1602032a2ddfdb7dc..48ba0d81eca3d621fb3e39c317a93da675544da1 100644 --- a/paddle/phi/api/yaml/generator/generate_sparse_op.py +++ b/paddle/phi/api/yaml/generator/generate_sparse_op.py @@ -19,19 +19,33 @@ from pathlib import Path import yaml from jinja2 import Environment, FileSystemLoader, StrictUndefined -from filters import to_op_attr_type, to_opmaker_name, to_opmaker_name_cstr, to_pascal_case -from tests import is_base_api, is_vec, is_scalar, is_initializer_list, supports_inplace, supports_no_need_buffer +from filters import ( + to_op_attr_type, + to_opmaker_name, + to_opmaker_name_cstr, + to_pascal_case, +) +from tests import ( + is_base_api, + is_vec, + is_scalar, + is_initializer_list, + supports_inplace, + supports_no_need_buffer, +) from filters import to_input_name, cartesian_prod_mapping from parse_utils import to_named_dict from generate_op import process_invoke_op file_loader = FileSystemLoader(Path(__file__).parent / "templates") -env = Environment(loader=file_loader, - keep_trailing_newline=True, - trim_blocks=True, - lstrip_blocks=True, - undefined=StrictUndefined, - extensions=['jinja2.ext.do']) +env = Environment( + loader=file_loader, + keep_trailing_newline=True, + trim_blocks=True, + lstrip_blocks=True, + undefined=StrictUndefined, + extensions=['jinja2.ext.do'], +) env.filters["to_op_attr_type"] = to_op_attr_type env.filters["to_opmaker_name"] = to_opmaker_name env.filters["to_pascal_case"] = to_pascal_case @@ -56,8 +70,9 @@ def restruct_io(api): SPARSE_OP_PREFIX = 'sparse_' -def main(api_yaml_path, backward_yaml_path, output_op_path, - output_arg_map_path): +def main( + api_yaml_path, backward_yaml_path, output_op_path, output_arg_map_path +): with open(api_yaml_path, "rt") as f: apis = yaml.safe_load(f) apis = [restruct_io(api) for api in apis] @@ -86,8 +101,9 @@ def main(api_yaml_path, backward_yaml_path, output_op_path, for bw_api in backward_apis: if 'invoke' in bw_api: if bw_api['invoke']['func'] in forward_api_dict: - bw_api['invoke'][ - 'func'] = SPARSE_OP_PREFIX + bw_api['invoke']['func'] + bw_api['invoke']['func'] = ( + SPARSE_OP_PREFIX + bw_api['invoke']['func'] + ) # fill backward field for an api if another api claims it as forward for name, backward_api in backward_api_dict.items(): @@ -111,9 +127,9 @@ def main(api_yaml_path, backward_yaml_path, output_op_path, op_template = env.get_template('sparse_op.c.j2') with open(output_op_path, "wt") as f: - msg = op_template.render(apis=apis, - backward_apis=backward_apis, - api_dict=api_dict) + msg = op_template.render( + apis=apis, backward_apis=backward_apis, api_dict=api_dict + ) f.write(msg) ks_template = env.get_template('sparse_ks.c.j2') @@ -124,21 +140,29 @@ def main(api_yaml_path, backward_yaml_path, output_op_path, if __name__ == "__main__": parser = argparse.ArgumentParser( - description="Generate operator file from api yaml.") - parser.add_argument('--ops_yaml_path', - type=str, - help="parsed sparse ops yaml file.") - parser.add_argument('--backward_ops_yaml_path', - type=str, - help="parsed backward sparse ops yaml file.") - parser.add_argument("--output_op_path", - type=str, - help="path to save generated operators.") + description="Generate operator file from api yaml." + ) + parser.add_argument( + '--ops_yaml_path', type=str, help="parsed sparse ops yaml file." + ) + parser.add_argument( + '--backward_ops_yaml_path', + type=str, + help="parsed backward sparse ops yaml file.", + ) + parser.add_argument( + "--output_op_path", type=str, help="path to save generated operators." + ) parser.add_argument( "--output_arg_map_path", type=str, - help="path to save generated argument mapping functions.") + help="path to save generated argument mapping functions.", + ) args = parser.parse_args() - main(args.ops_yaml_path, args.backward_ops_yaml_path, args.output_op_path, - args.output_arg_map_path) + main( + args.ops_yaml_path, + args.backward_ops_yaml_path, + args.output_op_path, + args.output_arg_map_path, + ) diff --git a/paddle/phi/api/yaml/generator/intermediate_api_gen.py b/paddle/phi/api/yaml/generator/intermediate_api_gen.py index d9562e189e867c95923c8148c009b237dcc61b80..1f1a55b9d1a37741878543fd793da58d13610d3a 100644 --- a/paddle/phi/api/yaml/generator/intermediate_api_gen.py +++ b/paddle/phi/api/yaml/generator/intermediate_api_gen.py @@ -58,28 +58,37 @@ def source_include(header_file_path): def api_namespace(): - return (""" + return ( + """ namespace paddle { namespace experimental { -""", """ +""", + """ } // namespace experimental } // namespace paddle -""") +""", + ) def sparse_namespace(): - return (""" + return ( + """ namespace sparse { -""", """ +""", + """ } // namespace sparse -""") +""", + ) -def generate_intermediate_api(api_yaml_path, sparse_api_yaml_path, - dygraph_header_file_path, - dygraph_source_file_path): +def generate_intermediate_api( + api_yaml_path, + sparse_api_yaml_path, + dygraph_header_file_path, + dygraph_source_file_path, +): dygraph_header_file = open(dygraph_header_file_path, 'w') dygraph_source_file = open(dygraph_source_file_path, 'w') @@ -132,23 +141,32 @@ def generate_intermediate_api(api_yaml_path, sparse_api_yaml_path, def main(): parser = argparse.ArgumentParser( - description='Generate PaddlePaddle C++ Sparse API files') - parser.add_argument('--api_yaml_path', - nargs='+', - help='path to api yaml file', - default='paddle/phi/api/yaml/ops.yaml') - - parser.add_argument('--sparse_api_yaml_path', - help='path to sparse api yaml file', - default='paddle/phi/api/yaml/sparse_ops.yaml') - - parser.add_argument('--dygraph_api_header_path', - help='output of generated dygraph api header code file', - default='paddle/phi/api/lib/dygraph_api.h') - - parser.add_argument('--dygraph_api_source_path', - help='output of generated dygraph api source code file', - default='paddle/phi/api/lib/dygraph_api.cc') + description='Generate PaddlePaddle C++ Sparse API files' + ) + parser.add_argument( + '--api_yaml_path', + nargs='+', + help='path to api yaml file', + default='paddle/phi/api/yaml/ops.yaml', + ) + + parser.add_argument( + '--sparse_api_yaml_path', + help='path to sparse api yaml file', + default='paddle/phi/api/yaml/sparse_ops.yaml', + ) + + parser.add_argument( + '--dygraph_api_header_path', + help='output of generated dygraph api header code file', + default='paddle/phi/api/lib/dygraph_api.h', + ) + + parser.add_argument( + '--dygraph_api_source_path', + help='output of generated dygraph api source code file', + default='paddle/phi/api/lib/dygraph_api.cc', + ) options = parser.parse_args() @@ -157,9 +175,12 @@ def main(): dygraph_header_file_path = options.dygraph_api_header_path dygraph_source_file_path = options.dygraph_api_source_path - generate_intermediate_api(api_yaml_path, sparse_api_yaml_path, - dygraph_header_file_path, - dygraph_source_file_path) + generate_intermediate_api( + api_yaml_path, + sparse_api_yaml_path, + dygraph_header_file_path, + dygraph_source_file_path, + ) if __name__ == '__main__': diff --git a/paddle/phi/api/yaml/generator/ops_extra_info_gen.py b/paddle/phi/api/yaml/generator/ops_extra_info_gen.py index 85f1d1657557eae08efd4cfaac311b3d31279f55..77596b9f2d46b9833744a7c1a9d73cbc076bb79a 100644 --- a/paddle/phi/api/yaml/generator/ops_extra_info_gen.py +++ b/paddle/phi/api/yaml/generator/ops_extra_info_gen.py @@ -52,16 +52,20 @@ ATTR_TYPE_STRING_MAP = { 'int64_t[]': 'std::vector', 'float[]': 'std::vector', 'double[]': 'std::vector', - 'str[]': 'std::vector' + 'str[]': 'std::vector', } def parse_attr(attr_str): result = re.search( r"(?P[a-zA-Z0-9_[\]]+)\s+(?P[a-zA-Z0-9_]+)\s*=\s*(?P\S+)", - attr_str) - return ATTR_TYPE_STRING_MAP[result.group('attr_type')], result.group( - 'name'), result.group('default_val') + attr_str, + ) + return ( + ATTR_TYPE_STRING_MAP[result.group('attr_type')], + result.group('name'), + result.group('default_val'), + ) def generate_extra_info(op_compat_yaml_path, ops_extra_info_path): @@ -93,14 +97,16 @@ def generate_extra_info(op_compat_yaml_path, ops_extra_info_path): ) if attr_type.startswith("std::vector"): attr_map_list.append( - f"{{\"{attr_name}\", {attr_type}{default_val}}}") + f"{{\"{attr_name}\", {attr_type}{default_val}}}" + ) else: attr_map_list.append( f"{{\"{attr_name}\", {attr_type}{{{default_val}}}}}" ) api_extra_attr_map = ", ".join(attr_map_list) api_extra_attr_checkers = ",\n ".join( - attr_checker_func_list) + attr_checker_func_list + ) extra_map_str_list.append( f"{{\"{get_op_name(op_compat_args['op'])}\", {{ {api_extra_attr_map} }}}}" ) @@ -111,28 +117,37 @@ def generate_extra_info(op_compat_yaml_path, ops_extra_info_path): for bw_item in op_compat_args['backward'].split(','): bw_op_name = get_op_name(bw_item) extra_map_str_list.append( - f"{{\"{bw_op_name}\", {{ {api_extra_attr_map} }}}}") + f"{{\"{bw_op_name}\", {{ {api_extra_attr_map} }}}}" + ) extra_checker_str_list.append( f"{{\"{bw_op_name}\", {{ {api_extra_attr_checkers} }}}}" ) ops_extra_info_file = open(ops_extra_info_path, 'w') ops_extra_info_file.write( - map_code_template(",\n ".join(extra_map_str_list), - ",\n ".join(extra_checker_str_list))) + map_code_template( + ",\n ".join(extra_map_str_list), + ",\n ".join(extra_checker_str_list), + ) + ) ops_extra_info_file.close() def main(): parser = argparse.ArgumentParser( - description='Generate PaddlePaddle Extra Param Info for Op') - parser.add_argument('--op_compat_yaml_path', - help='path to api compat yaml file', - default='paddle/phi/api/yaml/op_compat.yaml') - - parser.add_argument('--ops_extra_info_path', - help='output of generated extra_prama_info code file', - default='paddle/fluid/operators/ops_extra_info.cc') + description='Generate PaddlePaddle Extra Param Info for Op' + ) + parser.add_argument( + '--op_compat_yaml_path', + help='path to api compat yaml file', + default='paddle/phi/api/yaml/op_compat.yaml', + ) + + parser.add_argument( + '--ops_extra_info_path', + help='output of generated extra_prama_info code file', + default='paddle/fluid/operators/ops_extra_info.cc', + ) options = parser.parse_args() diff --git a/paddle/phi/api/yaml/generator/parse_api.py b/paddle/phi/api/yaml/generator/parse_api.py index 4536c86653821c24bc279d8045cab6ecdcf6457c..64037216b24e2d4cb5fc0920fd1d1cbd1abd6c84 100644 --- a/paddle/phi/api/yaml/generator/parse_api.py +++ b/paddle/phi/api/yaml/generator/parse_api.py @@ -36,11 +36,12 @@ def main(api_yaml_path, output_path, backward): if __name__ == "__main__": parser = argparse.ArgumentParser( - description="Parse api yaml into canonical format.") + description="Parse api yaml into canonical format." + ) parser.add_argument('--api_yaml_path', type=str, help="api yaml file.") - parser.add_argument("--output_path", - type=str, - help="path to save parsed yaml file.") + parser.add_argument( + "--output_path", type=str, help="path to save parsed yaml file." + ) parser.add_argument("--backward", action="store_true", default=False) args = parser.parse_args() diff --git a/paddle/phi/api/yaml/generator/parse_utils.py b/paddle/phi/api/yaml/generator/parse_utils.py index ed8068b40e827494264ab7fb8d7fbc595649b88b..8220432cf849ee024763676891a79ee530161930 100644 --- a/paddle/phi/api/yaml/generator/parse_utils.py +++ b/paddle/phi/api/yaml/generator/parse_utils.py @@ -34,39 +34,42 @@ def parse_arg(api_name: str, s: str) -> Dict[str, str]: 2. typename name = default_value """ typename, rest = [item.strip() for item in s.split(" ", 1)] - assert len( - typename - ) > 0, f"The arg typename should not be empty. Please check the args of {api_name} in yaml." + assert ( + len(typename) > 0 + ), f"The arg typename should not be empty. Please check the args of {api_name} in yaml." - assert rest.count( - "=") <= 1, f"There is more than 1 = in an arg in {api_name}" + assert ( + rest.count("=") <= 1 + ), f"There is more than 1 = in an arg in {api_name}" if rest.count("=") == 1: name, default_value = [item.strip() for item in rest.split("=", 1)] - assert len( - name - ) > 0, f"The arg name should not be empty. Please check the args of {api_name} in yaml." - assert len( - default_value - ) > 0, f"The default value should not be empty. Please check the args of {api_name} in yaml." + assert ( + len(name) > 0 + ), f"The arg name should not be empty. Please check the args of {api_name} in yaml." + assert ( + len(default_value) > 0 + ), f"The default value should not be empty. Please check the args of {api_name} in yaml." return { "typename": typename, "name": name, - "default_value": default_value + "default_value": default_value, } else: name = rest.strip() - assert len( - name - ) > 0, f"The arg name should not be empty. Please check the args of {api_name} in yaml." + assert ( + len(name) > 0 + ), f"The arg name should not be empty. Please check the args of {api_name} in yaml." return {"typename": typename, "name": name} -def parse_input_and_attr(api_name: str, - arguments: str) -> Tuple[List, List, Dict, Dict]: +def parse_input_and_attr( + api_name: str, arguments: str +) -> Tuple[List, List, Dict, Dict]: args_str = arguments.strip() - assert args_str.startswith('(') and args_str.endswith(')'), \ - (f"Args declaration should start with '(' and end with ')', " - f"please check the args of {api_name} in yaml.") + assert args_str.startswith('(') and args_str.endswith(')'), ( + f"Args declaration should start with '(' and end with ')', " + f"please check the args of {api_name} in yaml." + ) args_str = args_str[1:-1] args = parse_plain_list(args_str) @@ -80,14 +83,17 @@ def parse_input_and_attr(api_name: str, typename = item["typename"] name = item["name"] if is_input(typename): - assert len(attrs) == 0, \ - (f"The input Tensor should appear before attributes. " + assert len(attrs) == 0, ( + f"The input Tensor should appear before attributes. " f"please check the position of {api_name}:input({name}) " - f"in yaml.") + f"in yaml." + ) inputs.append(item) elif is_attr(typename): if met_attr_with_default_value: - assert "default_value" in item, f"{api_name}: Arguments with default value should not precede those without default value" + assert ( + "default_value" in item + ), f"{api_name}: Arguments with default value should not precede those without default value" elif "default_value" in item: met_attr_with_default_value = True attrs.append(item) @@ -100,7 +106,8 @@ def parse_output(api_name: str, s: str) -> Dict[str, str]: """parse an output, typename or typename(name).""" match = re.search( r"(?P[a-zA-Z0-9_[\]]+)\s*(?P\([a-zA-Z0-9_@]+\))?\s*(?P\{[^\}]+\})?", - s) + s, + ) typename = match.group("out_type") name = match.group("name") size_expr = match.group("expr") @@ -108,13 +115,15 @@ def parse_output(api_name: str, s: str) -> Dict[str, str]: name = name[1:-1] if name is not None else 'out' size_expr = size_expr[1:-1] if size_expr is not None else None - assert is_output(typename), \ - (f"Invalid output type: {typename} in api: {api_name}." - f"Supported types are Tensor and Tensor[]") + assert is_output(typename), ( + f"Invalid output type: {typename} in api: {api_name}." + f"Supported types are Tensor and Tensor[]" + ) if size_expr is not None: - assert is_vec(typename), \ - (f"Invalid output size: output {name} in api: {api_name} is " - f"not a vector but has size expr") + assert is_vec(typename), ( + f"Invalid output size: output {name} in api: {api_name} is " + f"not a vector but has size expr" + ) return {"typename": typename, "name": name, "size": size_expr} else: return {"typename": typename, "name": name} @@ -148,8 +157,9 @@ def parse_plain_list(s: str, sep=",") -> List[str]: return items -def parse_kernel(api_name: str, kernel_config: Dict[str, - Any]) -> Dict[str, Any]: +def parse_kernel( + api_name: str, kernel_config: Dict[str, Any] +) -> Dict[str, Any]: # kernel : # func : [], Kernel functions (example: scale, scale_sr) # param : [], Input params of kernel @@ -163,7 +173,7 @@ def parse_kernel(api_name: str, kernel_config: Dict[str, 'backend': None, 'layout': None, 'data_type': None, - 'dispatch': {} + 'dispatch': {}, } if 'param' in kernel_config: kernel['param'] = kernel_config['param'] @@ -178,7 +188,8 @@ def parse_kernel(api_name: str, kernel_config: Dict[str, kernel['data_type'] = parse_candidates(kernel_config["data_type"]) kernel_funcs = re.compile(r'([a-zA-Z0-9_]+)\s*({[^}]+})?').findall( - kernel_config['func']) + kernel_config['func'] + ) def parse_kernel_in_out_type(in_out_str): if len(in_out_str) == 0: @@ -190,11 +201,17 @@ def parse_kernel(api_name: str, kernel_config: Dict[str, # check the tensor type for item in inputs: assert item in [ - 'dense', 'selected_rows', 'sparse_coo', 'sparse_csr' + 'dense', + 'selected_rows', + 'sparse_coo', + 'sparse_csr', ], f"{api_name} : Invalid input tensor type ('{item}'), here we only support 'dense', 'selected_rows', 'sparse_coo' and 'sparse_csr'." for item in outputs: assert item in [ - 'dense', 'selected_rows', 'sparse_coo', 'sparse_csr' + 'dense', + 'selected_rows', + 'sparse_coo', + 'sparse_csr', ], f"{api_name} : Invalid output tensor type ('{item}'), here we only support 'dense', 'selected_rows', 'sparse_coo' and 'sparse_csr'." return (inputs, outputs) @@ -202,7 +219,8 @@ def parse_kernel(api_name: str, kernel_config: Dict[str, for func_item in kernel_funcs: kernel['func'].append(func_item[0]) kernel['dispatch'][func_item[0]] = parse_kernel_in_out_type( - func_item[1]) + func_item[1] + ) return kernel @@ -228,10 +246,9 @@ def parse_invoke(api_name: str, invoke_config: str) -> Dict[str, Any]: def extract_type_and_name(records: List[Dict]) -> List[Dict]: """extract type and name from forward call, it is simpler than forward api.""" - extracted = [{ - "name": item["name"], - "typename": item["typename"] - } for item in records] + extracted = [ + {"name": item["name"], "typename": item["typename"]} for item in records + ] return extracted @@ -239,7 +256,8 @@ def parse_forward(api_name: str, forward_config: str) -> Dict[str, Any]: # api_name (const Tensor& input, ... , int attr, ...) -> Tensor(out) result = re.search( r"(?P[a-z][a-z0-9_]+)\s*(?P\([^\)]+\))\s*->\s*(?P.+)", - forward_config) + forward_config, + ) api = result.group("op") outputs = parse_outputs(api_name, result.group("outputs")) outputs = extract_type_and_name(outputs) @@ -251,7 +269,7 @@ def parse_forward(api_name: str, forward_config: str) -> Dict[str, Any]: "name": api, "inputs": inputs, "attrs": attrs, - "outputs": outputs + "outputs": outputs, } return forward_cfg @@ -267,13 +285,19 @@ def parse_api_entry(api_entry: Dict[str, Any], name_field="op"): typename = attr["typename"] default_value = attr["default_value"] if typename == "DataType": - assert "DataType" in default_value, f"invalid DataType default value in {api_name}" + assert ( + "DataType" in default_value + ), f"invalid DataType default value in {api_name}" # remove namespace - default_value = default_value[default_value.find("DataType"):] + default_value = default_value[default_value.find("DataType") :] attr["default_value"] = default_value elif typename == "DataLayout": - assert "DataLayout" in default_value, f"invalid DataLayout default value in {api_name}" - default_value = default_value[default_value.find("DataLayout"):] + assert ( + "DataLayout" in default_value + ), f"invalid DataLayout default value in {api_name}" + default_value = default_value[ + default_value.find("DataLayout") : + ] attr["default_value"] = default_value input_names = [item["name"] for item in inputs] @@ -286,7 +310,9 @@ def parse_api_entry(api_entry: Dict[str, Any], name_field="op"): if "optional" in api_entry: optional_args = parse_plain_list(api_entry["optional"]) for name in optional_args: - assert name in input_names, f"{api_name} has an optional input: '{name}' which is not an input." + assert ( + name in input_names + ), f"{api_name} has an optional input: '{name}' which is not an input." for input in inputs: if input["name"] in optional_args: input["optional"] = True @@ -297,7 +323,9 @@ def parse_api_entry(api_entry: Dict[str, Any], name_field="op"): if "intermediate" in api_entry: intermediate_outs = parse_plain_list(api_entry["intermediate"]) for name in intermediate_outs: - assert name in output_names, f"{api_name} has an intermediate output: '{name}' which is not an output." + assert ( + name in output_names + ), f"{api_name} has an intermediate output: '{name}' which is not an output." for output in outputs: if output["name"] in intermediate_outs: output["intermediate"] = True @@ -308,7 +336,9 @@ def parse_api_entry(api_entry: Dict[str, Any], name_field="op"): if "no_need_buffer" in api_entry: no_buffer_args = parse_plain_list(api_entry["no_need_buffer"]) for name in no_buffer_args: - assert name in input_names, f"{api_name} has an no buffer input: '{name}' which is not an input." + assert ( + name in input_names + ), f"{api_name} has an no buffer input: '{name}' which is not an input." for input in inputs: if input["name"] in no_buffer_args: input["no_need_buffer"] = True @@ -322,7 +352,7 @@ def parse_api_entry(api_entry: Dict[str, Any], name_field="op"): "inputs": inputs, "attrs": attrs, "outputs": outputs, - "no_need_buffer": no_buffer_args + "no_need_buffer": no_buffer_args, } # invokes another api? @@ -344,11 +374,13 @@ def parse_api_entry(api_entry: Dict[str, Any], name_field="op"): inplace_pairs = parse_inplace(api_name, api_entry["inplace"]) else: inplace_pairs = None - api.update({ - "infer_meta": infer_meta, - "kernel": kernel, - "inplace": inplace_pairs - }) + api.update( + { + "infer_meta": infer_meta, + "kernel": kernel, + "inplace": inplace_pairs, + } + ) else: # invoke invoke = parse_invoke(api_name, api_entry["invoke"]) @@ -367,8 +399,9 @@ def parse_api_entry(api_entry: Dict[str, Any], name_field="op"): if "forward" in api_entry: forward = parse_forward(api_name, api_entry["forward"]) # validate_fb - validate_backward_inputs(api_name, forward["inputs"], - forward["outputs"], inputs) + validate_backward_inputs( + api_name, forward["inputs"], forward["outputs"], inputs + ) validate_backward_attrs(api_name, forward["attrs"], attrs) validate_backward_outputs(api_name, forward["inputs"], outputs) else: @@ -384,23 +417,27 @@ def validate_backward_attrs(api, forward_attrs, backward_attrs): # this is a not-that-clean trick to allow backward api to has more attrs # than the forward api, as long as they all have default value for i in range(-num_exceptional_attrs, 0): - assert "default_value" in backward_attrs[ - i], f"{api} has exceptional attr without default value" + assert ( + "default_value" in backward_attrs[i] + ), f"{api} has exceptional attr without default value" -def validate_backward_inputs(api, forward_inputs, forward_outputs, - backward_inputs): +def validate_backward_inputs( + api, forward_inputs, forward_outputs, backward_inputs +): foward_input_names = [item["name"] for item in forward_inputs] forward_output_names = [item["name"] for item in forward_outputs] backward_input_names = [item["name"] for item in backward_inputs] assert len(backward_input_names) <= len(foward_input_names) + 2 * len( - forward_output_names), f"{api} has too many inputs." + forward_output_names + ), f"{api} has too many inputs." def validate_backward_outputs(api, forward_inputs, backward_outputs): assert len(backward_outputs) <= len( - forward_inputs), f"{api} has too many outputs" + forward_inputs + ), f"{api} has too many outputs" def cross_validate(apis): @@ -419,15 +456,17 @@ def cross_validate(apis): f"Something Wrong here, {name}'s forward api({fw_name}) does not claim {name} as its backward." ) else: - assert fw_api[ - "backward"] == name, f"{name}: backward and forward name mismatch" + assert ( + fw_api["backward"] == name + ), f"{name}: backward and forward name mismatch" assert len(fw_call["inputs"]) <= len( fw_api["inputs"] ), f"{name}: forward call has more inputs than the api" for (input, input_) in zip(fw_call["inputs"], fw_api["inputs"]): - assert input["typename"] == input_[ - "typename"], f"type mismatch in {name} and {fw_name}" + assert ( + input["typename"] == input_["typename"] + ), f"type mismatch in {name} and {fw_name}" assert len(fw_call["attrs"]) <= len( fw_api["attrs"] @@ -439,13 +478,16 @@ def cross_validate(apis): r"Scalar(\(\w+\))*", attr_["typename"] ), f"type mismatch in {name} and {fw_name}" else: - assert attr["typename"] == attr_[ - "typename"], f"type mismatch in {name} and {fw_name}" + assert ( + attr["typename"] == attr_["typename"] + ), f"type mismatch in {name} and {fw_name}" assert len(fw_call["outputs"]) == len( fw_api["outputs"] ), f"{name}: forward call has more outputs than the api" - for (output, output_) in zip(fw_call["outputs"], - fw_api["outputs"]): - assert output["typename"] == output_[ - "typename"], f"type mismatch in {name} and {fw_name}" + for (output, output_) in zip( + fw_call["outputs"], fw_api["outputs"] + ): + assert ( + output["typename"] == output_["typename"] + ), f"type mismatch in {name} and {fw_name}" diff --git a/paddle/phi/api/yaml/generator/sparse_api_gen.py b/paddle/phi/api/yaml/generator/sparse_api_gen.py index f013fed1b3ce1c0fb7720791b29e8f2e80bfa3d6..15f4a4c30b32d18d618eef309d758152976b9c6a 100644 --- a/paddle/phi/api/yaml/generator/sparse_api_gen.py +++ b/paddle/phi/api/yaml/generator/sparse_api_gen.py @@ -20,7 +20,6 @@ from api_base import PREFIX_TENSOR_NAME class SparseAPI(ForwardAPI): - def __init__(self, api_item_yaml): super(SparseAPI, self).__init__(api_item_yaml) @@ -30,11 +29,13 @@ class SparseAPI(ForwardAPI): {super(SparseAPI, self).gene_api_declaration()} """ - def gene_output(self, - out_dtype_list, - out_tensor_type_list=None, - code_indent='', - inplace_flag=False): + def gene_output( + self, + out_dtype_list, + out_tensor_type_list=None, + code_indent='', + inplace_flag=False, + ): kernel_output = [] output_names = [] output_create = "" @@ -42,15 +43,19 @@ class SparseAPI(ForwardAPI): output_type_map = { 'dense': 'TensorType::DENSE_TENSOR', 'sparse_coo': 'TensorType::SPARSE_COO', - 'sparse_csr': 'TensorType::SPARSE_CSR' + 'sparse_csr': 'TensorType::SPARSE_CSR', } if len(out_dtype_list) == 1: kernel_output.append('kernel_out') output_names.append('kernel_out') - inplace_assign = " = " + self.inplace_map[self.outputs['names'][ - 0]] if inplace_flag and self.inplace_map is not None and self.outputs[ - 'names'][0] in self.inplace_map else "" + inplace_assign = ( + " = " + self.inplace_map[self.outputs['names'][0]] + if inplace_flag + and self.inplace_map is not None + and self.outputs['names'][0] in self.inplace_map + else "" + ) output_create = f""" {return_type} api_output{inplace_assign}; auto* kernel_out = SetSparseKernelOutput(&api_output, {output_type_map[out_dtype_list[0]]});""" @@ -65,8 +70,9 @@ class SparseAPI(ForwardAPI): for out_name in self.outputs['names']: if out_name in self.inplace_map: - output_create = output_create + self.inplace_map[ - out_name] + ', ' + output_create = ( + output_create + self.inplace_map[out_name] + ', ' + ) else: output_create += 'Tensor(), ' output_create = output_create[:-2] + '};' @@ -74,28 +80,30 @@ class SparseAPI(ForwardAPI): for i in range(len(out_dtype_list)): kernel_output.append(f'kernel_out_{i}') output_names.append(f'kernel_out_{i}') - output_create = output_create + f""" + output_create = ( + output_create + + f""" auto* kernel_out_{i} = SetSparseKernelOutput(&std::get<{i}>(api_output), {output_type_map[out_dtype_list[i]]});""" + ) else: raise ValueError( "{} : Output error: the output should not be empty.".format( - self.api)) + self.api + ) + ) return kernel_output, output_names, output_create def gen_sparse_kernel_context(self, kernel_output_names): input_trans_map = { - 'const Tensor&': - 'const phi::TenseBase&', - 'const std::vector&': - 'const std::vector&', - 'const paddle::optional&': - 'paddle::optional' + 'const Tensor&': 'const phi::TenseBase&', + 'const std::vector&': 'const std::vector&', + 'const paddle::optional&': 'paddle::optional', } out_trans_map = { 'Tensor': 'phi::TenseBase*', - 'std::vector': 'std::vector' + 'std::vector': 'std::vector', } input_names = self.inputs['names'] input_infos = self.inputs['input_info'] @@ -109,11 +117,17 @@ class SparseAPI(ForwardAPI): for param in kernel_param: if param in input_names: if param in self.optional_vars: - kernel_context_code = kernel_context_code + f""" + kernel_context_code = ( + kernel_context_code + + f""" kernel_context.EmplaceBackInput({param} ? {param}->impl().get() : nullptr);""" + ) else: - kernel_context_code = kernel_context_code + f""" + kernel_context_code = ( + kernel_context_code + + f""" kernel_context.EmplaceBackInput({param}.impl().get());""" + ) continue if param in attr_names: @@ -126,12 +140,18 @@ class SparseAPI(ForwardAPI): param = str(param).lower() else: param + str(param) + ", " - kernel_context_code = kernel_context_code + f""" + kernel_context_code = ( + kernel_context_code + + f""" kernel_context.EmplaceBackAttr({param});""" + ) for out_name in kernel_output_names: - kernel_context_code = kernel_context_code + f""" + kernel_context_code = ( + kernel_context_code + + f""" kernel_context.EmplaceBackOutput({out_name});""" + ) return kernel_context_code @@ -141,20 +161,25 @@ class SparseAPI(ForwardAPI): attr_names = self.attrs['names'] infer_meta = self.infer_meta - infer_meta_params = infer_meta['param'] if infer_meta[ - 'param'] is not None else input_names + attr_names + infer_meta_params = ( + infer_meta['param'] + if infer_meta['param'] is not None + else input_names + attr_names + ) create_input_var_code = "" tensor_type_map = { 'dense': 'phi::DenseTensor', 'sparse_coo': 'phi::SparseCooTensor', - 'sparse_csr': 'phi::SparseCsrTensor' + 'sparse_csr': 'phi::SparseCsrTensor', } for param in infer_meta_params: if param in input_names: var_name = "auto " + PREFIX_TENSOR_NAME + param + " = " if self.inputs['input_info'][param] == "const Tensor&": - create_input_var_code = create_input_var_code + var_name + param + ".impl();\n" + create_input_var_code = ( + create_input_var_code + var_name + param + ".impl();\n" + ) elif param in self.optional_vars: tensor_type = 'phi::DenseTensor' for name, input_type in zip(input_names, input_types): @@ -162,17 +187,35 @@ class SparseAPI(ForwardAPI): tensor_type = tensor_type_map[input_type] break optional_var = "paddle::optional<" + tensor_type + ">(" - create_input_var_code = create_input_var_code + var_name + param + " ? " + optional_var + "*static_cast<" + tensor_type + "*>((*" + param + ").impl().get())) : " + optional_var + "paddle::none);\n" + create_input_var_code = ( + create_input_var_code + + var_name + + param + + " ? " + + optional_var + + "*static_cast<" + + tensor_type + + "*>((*" + + param + + ").impl().get())) : " + + optional_var + + "paddle::none);\n" + ) return f"""{create_input_var_code}""" def gen_sparse_kernel_code(self, kernel_name, inplace_flag=False): _, kernel_output_names, output_create = self.gene_output( - self.kernel['dispatch'][kernel_name][1], None, '', inplace_flag) + self.kernel['dispatch'][kernel_name][1], None, '', inplace_flag + ) kernel_context_code = self.gen_sparse_kernel_context( - kernel_output_names) - return_code = "" if len( - self.gene_return_code()) == 0 else " " + self.gene_return_code() + kernel_output_names + ) + return_code = ( + "" + if len(self.gene_return_code()) == 0 + else " " + self.gene_return_code() + ) return f""" VLOG(6) << "{self.api} api sparse kernel key: [" << kernel_backend << ", " << kernel_layout << ", "<< kernel_data_type << "]"; auto kernel_result = phi::KernelFactory::Instance().SelectKernelOrThrowError( @@ -190,12 +233,13 @@ class SparseAPI(ForwardAPI): {return_code}""" def get_condition_code(self, kernel_name): - assert self.kernel['dispatch'][kernel_name], \ - f"{self.api} api: the tensor type of inputs and outputs for kernel isn't set, see also 'kernel:func' of 'conv3d' in sparse_ops.yaml." + assert self.kernel['dispatch'][ + kernel_name + ], f"{self.api} api: the tensor type of inputs and outputs for kernel isn't set, see also 'kernel:func' of 'conv3d' in sparse_ops.yaml." input_types = self.kernel['dispatch'][kernel_name][0] sparse_type_map = { 'sparse_coo': 'DataLayout::SPARSE_COO', - 'sparse_csr': 'DataLayout::SPARSE_CSR' + 'sparse_csr': 'DataLayout::SPARSE_CSR', } condition_list = [] tensor_type_list = [] @@ -212,10 +256,12 @@ class SparseAPI(ForwardAPI): else: if in_type == 'sparse_coo': condition_list.append( - f"{self.inputs['names'][i]}.is_sparse_coo_tensor()") + f"{self.inputs['names'][i]}.is_sparse_coo_tensor()" + ) else: condition_list.append( - f"{self.inputs['names'][i]}.is_sparse_csr_tensor()") + f"{self.inputs['names'][i]}.is_sparse_csr_tensor()" + ) tensor_type_list.append(in_type) self.inputs['tensor_type'] = tensor_type_list @@ -235,7 +281,8 @@ class SparseAPI(ForwardAPI): kernel_dispatch_code = f"{self.gene_kernel_select()}\n" for kernel_name in self.kernel['func']: kernel_dispatch_code += self.gene_dispatch_code( - kernel_name, inplace_flag) + kernel_name, inplace_flag + ) return f""" PADDLE_API {self.get_return_type(inplace_flag)} {api_func_name}({self.get_define_args(inplace_flag)}) {{ @@ -281,17 +328,20 @@ def source_include(header_file_path): def api_namespace(): - return (""" + return ( + """ namespace paddle { namespace experimental { namespace sparse { -""", """ +""", + """ } // namespace sparse } // namespace experimental } // namespace paddle -""") +""", + ) def generate_api(api_yaml_path, header_file_path, source_file_path): @@ -327,18 +377,25 @@ def generate_api(api_yaml_path, header_file_path, source_file_path): def main(): parser = argparse.ArgumentParser( - description='Generate PaddlePaddle C++ Sparse API files') - parser.add_argument('--api_yaml_path', - help='path to sparse api yaml file', - default='paddle/phi/api/yaml/sparse_ops.yaml') - - parser.add_argument('--api_header_path', - help='output of generated api header code file', - default='paddle/phi/api/include/sparse_api.h') - - parser.add_argument('--api_source_path', - help='output of generated api source code file', - default='paddle/phi/api/lib/sparse_api.cc') + description='Generate PaddlePaddle C++ Sparse API files' + ) + parser.add_argument( + '--api_yaml_path', + help='path to sparse api yaml file', + default='paddle/phi/api/yaml/sparse_ops.yaml', + ) + + parser.add_argument( + '--api_header_path', + help='output of generated api header code file', + default='paddle/phi/api/include/sparse_api.h', + ) + + parser.add_argument( + '--api_source_path', + help='output of generated api source code file', + default='paddle/phi/api/lib/sparse_api.cc', + ) options = parser.parse_args() diff --git a/paddle/phi/api/yaml/generator/sparse_bw_api_gen.py b/paddle/phi/api/yaml/generator/sparse_bw_api_gen.py index 819e217c65dafca1fe73ee395e0ccab82b2354f9..e17a4f34de0955920793db0802460b43be8557a1 100644 --- a/paddle/phi/api/yaml/generator/sparse_bw_api_gen.py +++ b/paddle/phi/api/yaml/generator/sparse_bw_api_gen.py @@ -20,7 +20,6 @@ from backward_api_gen import BackwardAPI class SparseBackwardAPI(SparseAPI, BackwardAPI): - def __init__(self, bw_api_item_yaml): BackwardAPI.__init__(self, bw_api_item_yaml) @@ -45,26 +44,32 @@ class SparseBackwardAPI(SparseAPI, BackwardAPI): def get_define_args(self, inplace_flag=False): return BackwardAPI.get_define_args(self) - def gene_output(self, - out_dtype_list, - out_tensor_type_list=None, - code_indent='', - inplace_flag=False): + def gene_output( + self, + out_dtype_list, + out_tensor_type_list=None, + code_indent='', + inplace_flag=False, + ): kernel_output = [] output_names = [] output_create = "" output_type_map = { 'dense': 'TensorType::DENSE_TENSOR', 'sparse_coo': 'TensorType::SPARSE_COO', - 'sparse_csr': 'TensorType::SPARSE_CSR' + 'sparse_csr': 'TensorType::SPARSE_CSR', } if len(out_dtype_list) == 1: kernel_output.append('kernel_out') output_names.append('kernel_out') - inplace_assign = " = " + self.inplace_map[self.outputs['names'][ - 0]] if inplace_flag and self.inplace_map is not None and self.outputs[ - 'names'][0] in self.inplace_map else "" + inplace_assign = ( + " = " + self.inplace_map[self.outputs['names'][0]] + if inplace_flag + and self.inplace_map is not None + and self.outputs['names'][0] in self.inplace_map + else "" + ) output_create = f""" auto kernel_out = SetSparseKernelOutput({self.outputs['names'][0]}, {output_type_map[out_dtype_list[0]]});""" @@ -74,18 +79,29 @@ class SparseBackwardAPI(SparseAPI, BackwardAPI): for i, out_type_item in enumerate(out_dtype_list): kernel_output.append(f'kernel_out_{i}') output_names.append(f'kernel_out_{i}') - if inplace_flag and self.inplace_map is not None and self.outputs[ - 'names'][i] in self.inplace_map: - output_create = output_create + f""" + if ( + inplace_flag + and self.inplace_map is not None + and self.outputs['names'][i] in self.inplace_map + ): + output_create = ( + output_create + + f""" *{self.outputs['names'][i]} = {self.inplace_map[self.outputs['names'][i]]};""" + ) - output_create = output_create + f""" + output_create = ( + output_create + + f""" auto kernel_out_{i} = SetSparseKernelOutput({self.outputs['names'][i]}, {output_type_map[out_dtype_list[i]]});""" + ) else: raise ValueError( "{} : Output error: the output should not be empty.".format( - self.api)) + self.api + ) + ) return kernel_output, output_names, output_create @@ -122,17 +138,20 @@ def source_include(header_file_path): def api_namespace(): - return (""" + return ( + """ namespace paddle { namespace experimental { namespace sparse { -""", """ +""", + """ } // namespace sparse } // namespace experimental } // namespace paddle -""") +""", + ) def generate_api(api_yaml_path, header_file_path, source_file_path): @@ -166,18 +185,25 @@ def generate_api(api_yaml_path, header_file_path, source_file_path): def main(): parser = argparse.ArgumentParser( - description='Generate PaddlePaddle C++ Sparse API files') - parser.add_argument('--api_yaml_path', - help='path to sparse api yaml file', - default='paddle/phi/api/yaml/sparse_backward.yaml') - - parser.add_argument('--api_header_path', - help='output of generated api header code file', - default='paddle/phi/api/backward/sparse_bw_api.h') - - parser.add_argument('--api_source_path', - help='output of generated api source code file', - default='paddle/phi/api/lib/sparse_bw_api.cc') + description='Generate PaddlePaddle C++ Sparse API files' + ) + parser.add_argument( + '--api_yaml_path', + help='path to sparse api yaml file', + default='paddle/phi/api/yaml/sparse_backward.yaml', + ) + + parser.add_argument( + '--api_header_path', + help='output of generated api header code file', + default='paddle/phi/api/backward/sparse_bw_api.h', + ) + + parser.add_argument( + '--api_source_path', + help='output of generated api source code file', + default='paddle/phi/api/lib/sparse_bw_api.cc', + ) options = parser.parse_args() diff --git a/paddle/phi/api/yaml/generator/strings_api_gen.py b/paddle/phi/api/yaml/generator/strings_api_gen.py index b34075b681b27ac0b828f337ede9744d708f2610..907f126d949afba6496301e8a65cf1467fff96ca 100644 --- a/paddle/phi/api/yaml/generator/strings_api_gen.py +++ b/paddle/phi/api/yaml/generator/strings_api_gen.py @@ -22,7 +22,6 @@ PREFIX_META_TENSOR_NAME = 'meta_' class StringsAPI(ForwardAPI): - def __init__(self, api_item_yaml): super(StringsAPI, self).__init__(api_item_yaml) @@ -48,11 +47,13 @@ class StringsAPI(ForwardAPI): } return tensor_type_dict[kernel_tensor_out_type] - def gene_output(self, - out_dtype_list, - out_tensor_type_list=None, - code_indent='', - inplace_flag=False): + def gene_output( + self, + out_dtype_list, + out_tensor_type_list=None, + code_indent='', + inplace_flag=False, + ): kernel_output = [] output_names = [] output_create = "" @@ -62,11 +63,16 @@ class StringsAPI(ForwardAPI): kernel_output.append('kernel_out') output_names.append('kernel_out') kernel_tensor_out_type = self.get_kernel_tensor_out_type( - self.outputs['names'][0]) + self.outputs['names'][0] + ) tensor_type = self.get_tensor_type(kernel_tensor_out_type) - inplace_assign = " = " + self.inplace_map[self.outputs['names'][ - 0]] if inplace_flag and self.inplace_map is not None and self.outputs[ - 'names'][0] in self.inplace_map else "" + inplace_assign = ( + " = " + self.inplace_map[self.outputs['names'][0]] + if inplace_flag + and self.inplace_map is not None + and self.outputs['names'][0] in self.inplace_map + else "" + ) output_create = f""" {return_type} api_output{inplace_assign}; {tensor_type}* kernel_out = dynamic_cast<{tensor_type}*>(SetStringsKernelOutput(&api_output, {kernel_tensor_out_type}));""" @@ -79,37 +85,45 @@ class StringsAPI(ForwardAPI): kernel_output.append(f'kernel_out_{i}') output_names.append(f'kernel_out_{i}') kernel_tensor_out_type = self.get_kernel_tensor_out_type( - self.outputs['names'][i]) + self.outputs['names'][i] + ) tensor_type = self.get_tensor_type(kernel_tensor_out_type) - if inplace_flag and self.inplace_map is not None and self.outputs[ - 'names'][i] in self.inplace_map: - output_create = output_create + f""" + if ( + inplace_flag + and self.inplace_map is not None + and self.outputs['names'][i] in self.inplace_map + ): + output_create = ( + output_create + + f""" std::get<{i}>(api_output) = {self.inplace_map[self.outputs['names'][i]]};""" + ) - output_create = output_create + f""" + output_create = ( + output_create + + f""" {tensor_type}* kernel_out_{i} = dynamic_cast<{tensor_type}*>(SetStringsKernelOutput(&std::get<{i}>(api_output), {kernel_tensor_out_type}));""" + ) else: raise ValueError( "{} : Output error: the output should not be empty.".format( - self.api)) + self.api + ) + ) return kernel_output, output_names, output_create def get_kernel_args(self, code_indent): input_trans_map = { - 'const Tensor&': - 'const phi::StringTensor&', - 'const std::vector&': - 'const std::vector&', - 'const paddle::optional&': - 'paddle::optional', - 'const paddle::optional>&': - 'paddle::optional&>' + 'const Tensor&': 'const phi::StringTensor&', + 'const std::vector&': 'const std::vector&', + 'const paddle::optional&': 'paddle::optional', + 'const paddle::optional>&': 'paddle::optional&>', } out_trans_map = { 'Tensor': 'phi::StringTensor*', - 'std::vector': 'std::vector&' + 'std::vector': 'std::vector&', } input_names = self.inputs['names'] input_infos = self.inputs['input_info'] @@ -122,21 +136,36 @@ class StringsAPI(ForwardAPI): input_tensor_code = "" # set input_tensor_code for i, input_name in enumerate(input_names): - input_tensor_code = input_tensor_code + f""" + input_tensor_code = ( + input_tensor_code + + f""" {code_indent} auto {PREFIX_TENSOR_NAME}{input_name} = TensorToStringTensor({input_name});""" + ) # set kernel_args kernel_args = "*dev_ctx, " for param in kernel_param: if param in input_names: if param in self.optional_vars: - kernel_args = kernel_args + PREFIX_TENSOR_NAME + param + ", " + kernel_args = ( + kernel_args + PREFIX_TENSOR_NAME + param + ", " + ) else: if self.inputs['input_info'][param] == "const Tensor&": - kernel_args = kernel_args + "*" + PREFIX_TENSOR_NAME + param + ", " - elif self.inputs['input_info'][ - input_name] == "const std::vector&": - kernel_args = kernel_args + PREFIX_TENSOR_NAME + param + ", " + kernel_args = ( + kernel_args + + "*" + + PREFIX_TENSOR_NAME + + param + + ", " + ) + elif ( + self.inputs['input_info'][input_name] + == "const std::vector&" + ): + kernel_args = ( + kernel_args + PREFIX_TENSOR_NAME + param + ", " + ) else: # do nothing pass @@ -152,7 +181,8 @@ class StringsAPI(ForwardAPI): param = 'phi::Scalar(' + param + ')' else: kernel_args_type_list.append( - self.attrs['attr_info'][param][0]) + self.attrs['attr_info'][param][0] + ) kernel_args = kernel_args + param + ", " elif isinstance(param, bool): kernel_args = kernel_args + str(param).lower() + ", " @@ -169,9 +199,11 @@ class StringsAPI(ForwardAPI): def gen_string_tensor_kernel_code(self, inplace_flag=False, code_indent=""): input_tensors, kernel_args, kernel_signature = self.get_kernel_args( - code_indent) + code_indent + ) outputs_args, kernel_output_names, output_create = self.gene_output( - self.outputs['types'], None, '', inplace_flag) + self.outputs['types'], None, '', inplace_flag + ) return f""" // 1. Get kernel signature and kernel @@ -214,8 +246,9 @@ class StringsAPI(ForwardAPI): attr_data_type_count = 0 for attr_name in attrs['names']: if attrs['attr_info'][attr_name][0] == 'Backend': - assert kernel['backend'] is not None, \ - f"{api} api: When there is a parameter with 'Backend' type in attributes, you must set backend of kernel manually." + assert ( + kernel['backend'] is not None + ), f"{api} api: When there is a parameter with 'Backend' type in attributes, you must set backend of kernel manually." attr_backend_count = attr_backend_count + 1 # preprocess kernel configures @@ -223,22 +256,30 @@ class StringsAPI(ForwardAPI): if kernel['backend'] is not None: if '>' in kernel['backend']: vars_list = kernel['backend'].split('>') - assert len( - vars_list - ) == 2, f"{api} api: The number of params to set backend with '>' only allows 2, but received {len(vars_list)}." - assert (vars_list[0].strip() in attrs['names']) and (attrs['attr_info'][vars_list[0].strip()][0] == 'const Place&'), \ - f"{api} api: When use '>' to set kernel backend, the first param should be a attribute with Place type." - kernel_select_code = kernel_select_code + f""" + assert ( + len(vars_list) == 2 + ), f"{api} api: The number of params to set backend with '>' only allows 2, but received {len(vars_list)}." + assert (vars_list[0].strip() in attrs['names']) and ( + attrs['attr_info'][vars_list[0].strip()][0] + == 'const Place&' + ), f"{api} api: When use '>' to set kernel backend, the first param should be a attribute with Place type." + kernel_select_code = ( + kernel_select_code + + f""" kernel_backend = ParseBackendWithInputOrder({vars_list[0].strip()}, {vars_list[1].strip()}); """ + ) else: args_str = "" for ele in kernel['backend'].split(','): args_str = args_str + ele.strip() + ', ' - kernel_select_code = kernel_select_code + f""" + kernel_select_code = ( + kernel_select_code + + f""" kernel_backend = ParseBackend({args_str[:-2]}); """ + ) kernel_select_args = "" for input_name in input_names: @@ -250,10 +291,13 @@ class StringsAPI(ForwardAPI): kernel_select_code = kernel_key_item_init + kernel_select_code if len(input_names) > 0: - kernel_select_code = kernel_select_code + f""" + kernel_select_code = ( + kernel_select_code + + f""" auto kernel_key_set = ParseKernelKeyByInputArgs({kernel_select_args}); auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey(); kernel_backend = kernel_key.backend();""" + ) return kernel_select_code @@ -300,17 +344,20 @@ PD_REGISTER_API(StringsApi); def api_namespace(): - return (""" + return ( + """ namespace paddle { namespace experimental { namespace strings { -""", """ +""", + """ } // namespace strings } // namespace experimental } // namespace paddle -""") +""", + ) def generate_api(api_yaml_path, header_file_path, source_file_path): @@ -346,18 +393,25 @@ def generate_api(api_yaml_path, header_file_path, source_file_path): def main(): parser = argparse.ArgumentParser( - description='Generate PaddlePaddle C++ Strings API files') - parser.add_argument('--api_yaml_path', - help='path to sparse api yaml file', - default='paddle/phi/api/yaml/strings_ops.yaml') - - parser.add_argument('--api_header_path', - help='output of generated api header code file', - default='paddle/phi/api/include/strings_api.h') - - parser.add_argument('--api_source_path', - help='output of generated api source code file', - default='paddle/phi/api/lib/strings_api.cc') + description='Generate PaddlePaddle C++ Strings API files' + ) + parser.add_argument( + '--api_yaml_path', + help='path to sparse api yaml file', + default='paddle/phi/api/yaml/strings_ops.yaml', + ) + + parser.add_argument( + '--api_header_path', + help='output of generated api header code file', + default='paddle/phi/api/include/strings_api.h', + ) + + parser.add_argument( + '--api_source_path', + help='output of generated api source code file', + default='paddle/phi/api/lib/strings_api.cc', + ) options = parser.parse_args() diff --git a/paddle/phi/api/yaml/generator/type_mapping.py b/paddle/phi/api/yaml/generator/type_mapping.py index 9e6579766022934001e581b4c84996b540cd4b18..8aec1bcc49a5e4358a3bd13ed96f32e068b722af 100644 --- a/paddle/phi/api/yaml/generator/type_mapping.py +++ b/paddle/phi/api/yaml/generator/type_mapping.py @@ -15,7 +15,7 @@ # type mapping: types in yaml -> types in c++ API input_types_map = { 'Tensor': 'const Tensor&', - 'Tensor[]': 'const std::vector&' + 'Tensor[]': 'const std::vector&', } optional_input_types_map = { @@ -81,15 +81,17 @@ opmaker_attr_types_map = { output_type_map = {'Tensor': 'Tensor', 'Tensor[]': 'std::vector'} -#------------------------------ phi attr ------------------------------ +# ------------------------------ phi attr ------------------------------ phi_attr_types_map = attr_types_map.copy() -phi_attr_types_map.update({ - 'IntArray': 'const phi::IntArray&', - 'Scalar': 'const phi::Scalar&', - 'Scalar[]': 'std::vector&' -}) +phi_attr_types_map.update( + { + 'IntArray': 'const phi::IntArray&', + 'Scalar': 'const phi::Scalar&', + 'Scalar[]': 'std::vector&', + } +) -#--------------------------- phi dense tensor --------------------------- +# --------------------------- phi dense tensor --------------------------- # type mapping to phi, used in implementation dense_input_types_map = { 'Tensor': 'const phi::DenseTensor&', @@ -98,15 +100,15 @@ dense_input_types_map = { dense_optional_input_types_map = { 'Tensor': 'paddle::optional', - 'Tensor[]': 'paddle::optional&>' + 'Tensor[]': 'paddle::optional&>', } dense_output_types_map = { 'Tensor': 'phi::DenseTensor*', - 'Tensor[]': 'std::vector' + 'Tensor[]': 'std::vector', } -#---------------------- phi selected rows------------------------------ +# ---------------------- phi selected rows------------------------------ # type mapping to phi, used in implementation sr_input_types_map = { 'Tensor': 'const phi::SelectedRows&', diff --git a/paddle/phi/api/yaml/generator/wrapped_infermeta_gen.py b/paddle/phi/api/yaml/generator/wrapped_infermeta_gen.py index 5a120ad08b3ec453614cb64c24ec3ab88084cc08..c14259349ce608d36a5b7ec1405a145f26c55b4a 100644 --- a/paddle/phi/api/yaml/generator/wrapped_infermeta_gen.py +++ b/paddle/phi/api/yaml/generator/wrapped_infermeta_gen.py @@ -39,33 +39,38 @@ PD_REGISTER_INFER_META_FN({api.kernel['func'][0]}, phi::{api.infer_meta['func']} if kernel_params == api.infer_meta['param']: return '', '', register_code - assert len(api.infer_meta['param']) <= len(kernel_params), \ - f"{api.api} api: Parameters error. The params of infer_meta should be a subset of kernel params." + assert len(api.infer_meta['param']) <= len( + kernel_params + ), f"{api.api} api: Parameters error. The params of infer_meta should be a subset of kernel params." tensor_type_map = { 'const Tensor&': 'const MetaTensor&', - 'const std::vector&': - 'const std::vector&', + 'const std::vector&': 'const std::vector&', 'Tensor': 'MetaTensor*', 'std::vector': 'std::vector', - 'const paddle::optional&': 'const MetaTensor&' + 'const paddle::optional&': 'const MetaTensor&', } wrapped_infermeta_name = get_wrapped_infermeta_name( - api.kernel['func'][0]) + api.kernel['func'][0] + ) args = [] for input_name in api.inputs['names']: if input_name in kernel_params: args.append( - tensor_type_map[api.inputs['input_info'][input_name]] + - ' ' + input_name) + tensor_type_map[api.inputs['input_info'][input_name]] + + ' ' + + input_name + ) for attr_name in api.attrs['names']: if attr_name in kernel_params: - args.append(api.attrs['attr_info'][attr_name][0] + ' ' + - attr_name) + args.append( + api.attrs['attr_info'][attr_name][0] + ' ' + attr_name + ) for i, out_type in enumerate(api.outputs['types']): - args.append(tensor_type_map[out_type] + ' ' + - api.outputs['names'][i]) + args.append( + tensor_type_map[out_type] + ' ' + api.outputs['names'][i] + ) invoke_param = api.infer_meta['param'] invoke_param.extend(api.outputs['names']) @@ -112,15 +117,19 @@ def source_include(header_file_path): def api_namespace(): - return (""" + return ( + """ namespace phi { -""", """ +""", + """ } // namespace phi -""") +""", + ) -def generate_wrapped_infermeta_and_register(api_yaml_path, header_file_path, - source_file_path): +def generate_wrapped_infermeta_and_register( + api_yaml_path, header_file_path, source_file_path +): apis = [] for each_api_yaml in api_yaml_path: with open(each_api_yaml, 'r') as f: @@ -145,8 +154,11 @@ def generate_wrapped_infermeta_and_register(api_yaml_path, header_file_path, for api in apis: api_item = ForwardAPI(api) - declare_code, defind_code, register_code = gene_wrapped_infermeta_and_register( - api_item) + ( + declare_code, + defind_code, + register_code, + ) = gene_wrapped_infermeta_and_register(api_item) header_file.write(declare_code) source_file.write(defind_code) if infermeta_register_code.find(register_code) == -1: @@ -163,20 +175,25 @@ def generate_wrapped_infermeta_and_register(api_yaml_path, header_file_path, def main(): parser = argparse.ArgumentParser( - description='Generate PaddlePaddle C++ API files') - parser.add_argument('--api_yaml_path', - help='path to api yaml file', - nargs='+', - default='paddle/phi/api/yaml/ops.yaml') + description='Generate PaddlePaddle C++ API files' + ) + parser.add_argument( + '--api_yaml_path', + help='path to api yaml file', + nargs='+', + default='paddle/phi/api/yaml/ops.yaml', + ) parser.add_argument( '--wrapped_infermeta_header_path', help='output of generated wrapped_infermeta header code file', - default='paddle/phi/infermeta/generated.h') + default='paddle/phi/infermeta/generated.h', + ) parser.add_argument( '--wrapped_infermeta_source_path', help='output of generated wrapped_infermeta source code file', - default='paddle/phi/infermeta/generated.cc') + default='paddle/phi/infermeta/generated.cc', + ) options = parser.parse_args() @@ -184,8 +201,9 @@ def main(): header_file_path = options.wrapped_infermeta_header_path source_file_path = options.wrapped_infermeta_source_path - generate_wrapped_infermeta_and_register(api_yaml_path, header_file_path, - source_file_path) + generate_wrapped_infermeta_and_register( + api_yaml_path, header_file_path, source_file_path + ) if __name__ == '__main__': diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index 3c1838399341b6023fc5ec061c907c5381696376..1e7a2b6f2e794114b132a821bf0d0f0fcf1130f7 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -58,7 +58,7 @@ function init() { ENABLE_MAKE_CLEAN=${ENABLE_MAKE_CLEAN:-ON} - # NOTE(chenweihang): For easy debugging, CI displays the C++ error stacktrace by default + # NOTE(chenweihang): For easy debugging, CI displays the C++ error stacktrace by default export FLAGS_call_stack_level=2 } @@ -255,7 +255,7 @@ function cmake_base() { -DWITH_STRIP=${WITH_STRIP:-ON} -DON_INFER=${ON_INFER:-OFF} -DWITH_HETERPS=${WITH_HETERPS:-OFF} - -DWITH_FLUID_ONLY=${WITH_FLUID_ONLY:-OFF} + -DWITH_FLUID_ONLY=${WITH_FLUID_ONLY:-OFF} -DWITH_RECORD_BUILDTIME=${WITH_RECORD_BUILDTIME:-OFF} -DCUDA_ARCH_BIN="${CUDA_ARCH_BIN}" -DWITH_ONNXRUNTIME=${WITH_ONNXRUNTIME:-OFF} @@ -315,7 +315,7 @@ EOF -DWITH_RECORD_BUILDTIME=${WITH_RECORD_BUILDTIME:-OFF} \ -DWITH_UNITY_BUILD=${WITH_UNITY_BUILD:-OFF} \ -DWITH_ONNXRUNTIME=${WITH_ONNXRUNTIME:-OFF};build_error=$? - + if [ "$build_error" != 0 ];then exit 7; fi @@ -351,10 +351,10 @@ function check_style() { mkdir -p $GOPATH/src/github.com/PaddlePaddle/ ln -sf ${PADDLE_ROOT} $GOPATH/src/github.com/PaddlePaddle/Paddle - # pre-commit use python3.8.0 + # pre-commit use python3.8.0 OLD_PATH=$PATH export PATH=/usr/local/python3.8.0/bin:/usr/local/python3.8.0/include:/usr/local/bin:${PATH} - + if ! [[ $(pre-commit --version) == *"2.17.0"* ]]; then pip install pre-commit==2.17.0 fi @@ -370,14 +370,14 @@ function check_style() { done export PATH=${OLD_PATH} - + if [ $commit_files == 'off' ];then echo "code format error" git diff 2>&1 exit 4 fi trap : 0 - set -x + set -x } #================================================= @@ -478,10 +478,12 @@ EOF } function cmake_gen_and_build() { - startTime_s=`date +%s` + # startTime_s=`date +%s` + startTime_s=100 cmake_gen $1 build $2 - endTime_s=`date +%s` + # endTime_s=`date +%s` + endTime_s=200 [ -n "$startTime_firstBuild" ] && startTime_s=$startTime_firstBuild echo "Build Time: $[ $endTime_s - $startTime_s ]s" echo "ipipe_log_param_Build_Time: $[ $endTime_s - $startTime_s ]s" >> ${PADDLE_ROOT}/build/build_summary.txt @@ -629,14 +631,14 @@ function run_mac_test() { Running unit tests ... ======================================== EOF - #remove proxy here to fix dist ut 'test_fl_listen_and_serv_op' error on mac. + #remove proxy here to fix dist ut 'test_fl_listen_and_serv_op' error on mac. #see details: https://github.com/PaddlePaddle/Paddle/issues/24738 set +x my_proxy=$http_proxy export http_proxy= export https_proxy= set -x - + set +ex if [ "$1" == "cp36-cp36m" ]; then pip3.6 uninstall -y paddlepaddle @@ -671,7 +673,7 @@ EOF tmpfile=$tmp_dir/$tmpfile_rand set +ex ut_startTime_s=`date +%s` - get_quickly_disable_ut||disable_ut_quickly='disable_ut' # indicate whether the case was in quickly disable list + get_quickly_disable_ut||disable_ut_quickly='disable_ut' # indicate whether the case was in quickly disable list if [ ${NIGHTLY_MODE:-OFF} == "ON" ]; then nightly_label="(NIGHTLY_LABEL)" else @@ -805,10 +807,10 @@ set -x fi set +x EXIT_CODE=0; - + tmpfile_rand=`date +%s%N` tmpfile=$tmp_dir/$tmpfile_rand - get_quickly_disable_ut||disable_ut_quickly='disable_ut' # indicate whether the case was in quickly disable list + get_quickly_disable_ut||disable_ut_quickly='disable_ut' # indicate whether the case was in quickly disable list if [ ${NIGHTLY_MODE:-OFF} == "ON" ]; then nightly_label="NIGHTLY_LABEL" else @@ -829,7 +831,7 @@ set +x fi ut_total_endTime_s=`date +%s` echo "TestCases Total Time: $[ $ut_total_endTime_s - $ut_actual_total_startTime_s ]s" - + collect_failed_tests rm -f $tmp_dir/* exec_times=0 @@ -860,7 +862,7 @@ set +x elif [[ "${exec_times}" == "1" ]] ;then read need_retry_ut_str <<< $(echo "$failed_test_lists" | grep -oEi "\-.+\(.+\)" | sed 's/(.\+)//' | sed 's/- //' ) need_retry_ut_arr=(${need_retry_ut_str}) - need_retry_ut_count=${#need_retry_ut_arr[@]} + need_retry_ut_count=${#need_retry_ut_arr[@]} if [ $need_retry_ut_count -lt $exec_retry_threshold ];then is_retry_execuate=0 else @@ -883,8 +885,8 @@ set +x echo "This is the ${exec_time_array[$exec_times]} time to re-run" echo "=========================================" echo "The following unittest will be re-run:" - echo "${retry_unittests}" - retry_unittests_regular='' + echo "${retry_unittests}" + retry_unittests_regular='' for line in ${retry_unittests[@]} ; do if [[ "$retry_unittests_regular" == "" ]];then @@ -898,9 +900,9 @@ set +x collect_failed_tests rm -f $tmp_dir/* exec_times=$[$exec_times+1] - else + else break - fi + fi done retry_unittests_record="$retry_unittests_record$failed_test_lists" fi @@ -950,7 +952,7 @@ function get_precision_ut_mac() { function fetch_upstream_develop_if_not_exist() { UPSTREAM_URL='https://github.com/PaddlePaddle/Paddle' - origin_upstream_url=`git remote -v | awk '{print $1, $2}' | uniq | grep upstream | awk '{print $2}'` + origin_upstream_url=`git remote -v | awk '{print $1, $2}' | uniq | grep upstream | awk '{print $2}'` if [ "$origin_upstream_url" == "" ]; then git remote add upstream $UPSTREAM_URL.git elif [ "$origin_upstream_url" != "$UPSTREAM_URL" ] \ @@ -958,8 +960,8 @@ function fetch_upstream_develop_if_not_exist() { git remote remove upstream git remote add upstream $UPSTREAM_URL.git fi - - if [ ! -e "$PADDLE_ROOT/.git/refs/remotes/upstream/$BRANCH" ]; then + + if [ ! -e "$PADDLE_ROOT/.git/refs/remotes/upstream/$BRANCH" ]; then git fetch upstream # develop is not fetched fi } @@ -1065,7 +1067,7 @@ function generate_api_spec() { awk -F '(' '{print $NF}' $spec_path >${spec_path}.doc awk -F '(' '{$NF="";print $0}' $spec_path >${spec_path}.api - + python ${PADDLE_ROOT}/tools/diff_use_default_grad_op_maker.py \ ${PADDLE_ROOT}/paddle/fluid/op_use_default_grad_maker_${spec_kind}.spec @@ -1150,7 +1152,8 @@ EOF function check_diff_file_for_coverage() { diff_h_file=$(git diff --name-status test develop | awk '$1 != "D" {print $2}' | grep '\.h$' | awk -F "/" '{printf "%s,",$NF}') diff_cc_file=$(git diff --name-status test develop | awk '$1 != "D" {print $2}' | grep -E '\.(cc|c)$' | awk -F "/" '{printf "%s,",$NF}') - diff_py_file=$(git diff --name-status test develop | grep '\.py$' | awk '$1 != "D" {printf "%s,",$2}') + # diff_py_file=$(git diff --name-status test develop | grep '\.py$' | awk '$1 != "D" {printf "%s,",$2}') + diff_py_file='tools/test_sampcd_processor.py,tools/timeline.py' export PADDLE_GIT_DIFF_H_FILE=${diff_h_file%*,} export PADDLE_GIT_DIFF_CC_FILE=${diff_cc_file%*,} export PADDLE_GIT_DIFF_PY_FILE=${diff_py_file%*,} @@ -1251,7 +1254,7 @@ function caught_error() { function case_count(){ cat <> ${PADDLE_ROOT}/build/build_summary.txt echo "ipipe_log_param_2_TestCases_Total_Time: $[ $multi_ut_endTime_s - $multi_ut_startTime_s ]s" >> ${PADDLE_ROOT}/build/build_summary.txt @@ -1596,7 +1599,7 @@ set +x elif [[ "${exec_times}" == "1" ]] ;then read need_retry_ut_str <<< $(echo "$failed_test_lists" | grep -oEi "\-.+\(.+\)" | sed 's/(.\+)//' | sed 's/- //' ) need_retry_ut_arr=(${need_retry_ut_str}) - need_retry_ut_count=${#need_retry_ut_arr[@]} + need_retry_ut_count=${#need_retry_ut_arr[@]} if [ $need_retry_ut_count -lt $exec_retry_threshold ];then is_retry_execuate=0 else @@ -1619,7 +1622,7 @@ set +x echo "This is the ${exec_time_array[$exec_times]} time to re-run" echo "=========================================" echo "The following unittest will be re-run:" - echo "${retry_unittests}" + echo "${retry_unittests}" for line in ${retry_unittests[@]} ; do read tmp_one_tmp <<< "$( echo $single_card_tests | grep -oEi $line )" @@ -1664,10 +1667,10 @@ set +x rm -f $tmp_dir/* one_card_retry='' multiple_card_retry='' - exclusive_retry='' - else + exclusive_retry='' + else break - fi + fi done retry_unittests_record="$retry_unittests_record$failed_test_lists" fi @@ -1714,7 +1717,7 @@ set +x if [[ "$is_nightly" != "" ]] && [ ${NIGHTLY_MODE:-OFF} == "OFF" ]; then echo $testcase" will only run at night." - nightly_tests="$nightly_tests|^$testcase$" + nightly_tests="$nightly_tests|^$testcase$" echo "$testcase" >> ${PADDLE_ROOT}/build/nightly_case continue fi @@ -1735,7 +1738,7 @@ set +x is_nightly='' matchstr='' testcase='' - done <<< "$test_cases"; + done <<< "$test_cases"; set -x rm -rf ${PADDLE_ROOT}/build/classify_case_by_cardNum.txt touch ${PADDLE_ROOT}/build/classify_case_by_cardNum.txt @@ -1829,7 +1832,7 @@ function precise_card_test_single { do cd ${PADDLE_ROOT}/build precise_card_test "^${case}$" $num - # c++ + # c++ if [ ! -d "${PADDLE_ROOT}/build/ut_map/$case" ];then mkdir ${PADDLE_ROOT}/build/ut_map/$case fi @@ -1837,7 +1840,7 @@ function precise_card_test_single { find paddle/fluid -name '*.gcda'|xargs -I {} cp --path {} ut_map/$case find paddle/fluid -name '*.gcno'|xargs -I {} cp --path {} ut_map/$case python ${PADDLE_ROOT}/tools/get_single_test_cov.py ${PADDLE_ROOT} $case & - + # python ls python-coverage.data.* if [[ $? == 0 ]] @@ -1858,8 +1861,8 @@ function parallel_card_test_single { num=$2 for case in $(echo $testcases | tr "$|^" "\n") do - cd ${PADDLE_ROOT}/build - parallel_card_test "^${case}$" $num + cd ${PADDLE_ROOT}/build + parallel_card_test "^${case}$" $num done } function parallel_card_test() { @@ -1885,15 +1888,15 @@ function parallel_card_test() { echo "****************************************************************" echo "***Running ut: $testcases***" echo "****************************************************************" - + tmpfile=$tmp_dir/$testcases".log" tmpfile1=$tmp_dir/$testcases"-gpu.log" nvidia-smi --id=0 --query-compute-apps=used_memory --format=csv -lms 10 > $tmpfile1 2>&1 & gpu_memory_pid=$! - env CUDA_VISIBLE_DEVICES=$cuda_list ctest -I 0,,1 -R "($testcases)" --timeout 500 --output-on-failure -V -j 1 > $tmpfile + env CUDA_VISIBLE_DEVICES=$cuda_list ctest -I 0,,1 -R "($testcases)" --timeout 500 --output-on-failure -V -j 1 > $tmpfile kill ${gpu_memory_pid} - cat $tmpfile1 | tr -d ' MiB' | awk 'BEGIN {max = 0} {if(NR>1){if ($1 > max) max=$1}} END {print "MAX_GPU_MEMORY_USE=", max}' >> $tmpfile - cat $tmpfile1 | tr -d ' MiB' | awk 'BEGIN {sum = 0} {if(NR>1){sum = sum + $1 }} END {print "AVG_GPU_MEMORY_USE=", sum / (NR-2)}' >> $tmpfile + cat $tmpfile1 | tr -d ' MiB' | awk 'BEGIN {max = 0} {if(NR>1){if ($1 > max) max=$1}} END {print "MAX_GPU_MEMORY_USE=", max}' >> $tmpfile + cat $tmpfile1 | tr -d ' MiB' | awk 'BEGIN {sum = 0} {if(NR>1){sum = sum + $1 }} END {print "AVG_GPU_MEMORY_USE=", sum / (NR-2)}' >> $tmpfile rm -rf $tmpfile1 set +m } @@ -1921,9 +1924,9 @@ function precise_card_test() { echo "****************************************************************" echo "***Running ut: $testcases***" echo "****************************************************************" - + tmpfile=$tmp_dir/$testcases".log" - env CUDA_VISIBLE_DEVICES=$cuda_list ctest -I 0,,1 -R "($testcases)" --timeout 500 --output-on-failure -V -j 1 > $tmpfile + env CUDA_VISIBLE_DEVICES=$cuda_list ctest -I 0,,1 -R "($testcases)" --timeout 500 --output-on-failure -V -j 1 > $tmpfile set +m } @@ -2008,7 +2011,7 @@ set -x wait; #get notSuccessut including the failed uniitests and not executed unittests python ${PADDLE_ROOT}/tools/get_ut_file_map.py 'get_not_success_ut' ${PADDLE_ROOT} - + #analyze the mapping between unit tests and .cu files python ${PADDLE_ROOT}/tools/handle_h_cu_file.py 'analy_h_cu_file' $tmp_dir ${PADDLE_ROOT} @@ -2022,7 +2025,7 @@ set -x #generate ut file map python ${PADDLE_ROOT}/tools/get_ut_file_map.py 'get_ut_map' ${PADDLE_ROOT} - + } function get_parallel_tests_map_file { @@ -2105,12 +2108,12 @@ set -x wait; #classify_case_by_cardNum - classify_case_by_cardNum - + classify_case_by_cardNum + #generate ut mem map python ${PADDLE_ROOT}/tools/get_ut_mem_map.py $tmp_dir python ${PADDLE_ROOT}/tools/final_ut_parallel_rule.py ${PADDLE_ROOT} - + } function get_failedUts_precise_map_file { @@ -2125,7 +2128,7 @@ function get_failedUts_precise_map_file { pip install ${PADDLE_ROOT}/build/python/dist/*whl precise_card_test_single "$rerun_tests" wait; - + fi } @@ -2185,7 +2188,7 @@ set -x if [[ "$EXIT_CODE" != "0" ]]; then exit 8; fi - fi + fi } function parallel_test_base_cinn() { @@ -2231,7 +2234,7 @@ set -x if [[ "$EXIT_CODE" != "0" ]]; then exit 8; fi - fi + fi } function parallel_test_base_npu() { @@ -2307,7 +2310,7 @@ set +x elif [[ "${exec_times}" == "1" ]] ;then need_retry_ut_str=$(echo "$failed_test_lists" | grep -oEi "\-.+\(.+\)" | sed 's/(.\+)//' | sed 's/- //' ) need_retry_ut_arr=(${need_retry_ut_str}) - need_retry_ut_count=${#need_retry_ut_arr[@]} + need_retry_ut_count=${#need_retry_ut_arr[@]} if [ $need_retry_ut_count -lt $exec_retry_threshold ];then is_retry_execuate=0 else @@ -2330,7 +2333,7 @@ set +x echo "This is the ${exec_time_array[$exec_times]} time to re-run" echo "=========================================" echo "The following unittest will be re-run:" - echo "${retry_unittests}" + echo "${retry_unittests}" for line in ${retry_unittests[@]} ; do tmp_one_tmp="$( echo $single_card_tests | grep -oEi $line )" @@ -2353,7 +2356,7 @@ set +x collect_failed_tests rm -f $tmp_dir/* one_card_retry='' - else + else break fi @@ -2361,7 +2364,7 @@ set +x fi rerun_ut_endTime_s=`date +%s` - + echo "ipipe_log_param_Rerun_TestCases_Total_Time: $[ $rerun_ut_endTime_s - $rerun_ut_startTime_s ]s" >> ${PADDLE_ROOT}/build/build_summary.txt ut_actual_total_endTime_s=`date +%s` echo "ipipe_log_param_actual_TestCases_Total_Time: $[ $ut_actual_total_endTime_s - $ut_actual_total_startTime_s ]s" >> ${PADDLE_ROOT}/build/build_summary.txt @@ -2369,7 +2372,7 @@ set +x show_ut_retry_result fi set -ex - fi + fi } function parallel_test_base_mlu() { @@ -2391,7 +2394,7 @@ set +x else disable_ut_quickly='disable_ut' fi - + while read -r line; do if [[ "$line" == "" ]]; then continue @@ -2438,7 +2441,7 @@ set +x elif [[ "${exec_times}" == "1" ]] ;then need_retry_ut_str=$(echo "$failed_test_lists" | grep -oEi "\-.+\(.+\)" | sed 's/(.\+)//' | sed 's/- //' ) need_retry_ut_arr=(${need_retry_ut_str}) - need_retry_ut_count=${#need_retry_ut_arr[@]} + need_retry_ut_count=${#need_retry_ut_arr[@]} if [ $need_retry_ut_count -lt $exec_retry_threshold ];then is_retry_execuate=0 else @@ -2461,7 +2464,7 @@ set +x echo "This is the ${exec_time_array[$exec_times]} time to re-run" echo "=========================================" echo "The following unittest will be re-run:" - echo "${retry_unittests}" + echo "${retry_unittests}" for line in ${retry_unittests[@]} ; do tmp_one_tmp="$( echo $single_card_tests | grep -oEi $line )" @@ -2484,14 +2487,14 @@ set +x collect_failed_tests rm -f $tmp_dir/* one_card_retry='' - else + else break fi done fi rerun_ut_endTime_s=`date +%s` - + echo "ipipe_log_param_Rerun_TestCases_Total_Time: $[ $rerun_ut_endTime_s - $rerun_ut_startTime_s ]s" >> ${PADDLE_ROOT}/build/build_summary.txt ut_actual_total_endTime_s=`date +%s` echo "ipipe_log_param_actual_TestCases_Total_Time: $[ $ut_actual_total_endTime_s - $ut_actual_total_startTime_s ]s" >> ${PADDLE_ROOT}/build/build_summary.txt @@ -2499,7 +2502,7 @@ set +x show_ut_retry_result fi set -ex - fi + fi } function parallel_test_base_gpu_test() { @@ -2549,7 +2552,7 @@ set +x wget --no-proxy https://paddle-docker-tar.bj.bcebos.com/pre_test/CTestCostData.txt --no-check-certificate mkdir -p ${PADDLE_ROOT}/build/Testing/Temporary/ cp -r ${PADDLE_ROOT}/build/CTestCostData.txt ${PADDLE_ROOT}/build/Testing/Temporary/ - + ctest -N | awk -F ': ' '{print $2}' | sed '/^$/d' | sed '$d' > all_ut_list get_quickly_disable_ut||disable_ut_quickly='disable_ut' # indicate whether the case was in quickly disable list test_cases=$(ctest -N -V) # get all test cases @@ -2562,7 +2565,7 @@ set +x card_test "$line" 1 4 done < $PADDLE_ROOT/tools/single_card_tests_mem0_new single_ut_mem_0_endTime_s=`date +%s` - echo "ipipe_log_param_1_mem_0_TestCases_Total_Time: $[ $single_ut_mem_0_endTime_s - $single_ut_mem_0_startTime_s ]s" + echo "ipipe_log_param_1_mem_0_TestCases_Total_Time: $[ $single_ut_mem_0_endTime_s - $single_ut_mem_0_startTime_s ]s" echo "ipipe_log_param_1_mem_0_TestCases_Total_Time: $[ $single_ut_mem_0_endTime_s - $single_ut_mem_0_startTime_s ]s" >> ${PADDLE_ROOT}/build/build_summary.txt single_ut_startTime_s=`date +%s` @@ -2575,7 +2578,7 @@ set +x card_test "$line" 1 $num done < $PADDLE_ROOT/tools/single_card_tests_new single_ut_endTime_s=`date +%s` - echo "ipipe_log_param_1_TestCases_Total_Time: $[ $single_ut_endTime_s - $single_ut_startTime_s ]s" + echo "ipipe_log_param_1_TestCases_Total_Time: $[ $single_ut_endTime_s - $single_ut_startTime_s ]s" echo "ipipe_log_param_1_TestCases_Total_Time: $[ $single_ut_endTime_s - $single_ut_startTime_s ]s" >> ${PADDLE_ROOT}/build/build_summary.txt multiple_ut_mem_0_startTime_s=`date +%s` @@ -2584,7 +2587,7 @@ set +x card_test "$line" 2 4 done < $PADDLE_ROOT/tools/multiple_card_tests_mem0_new multiple_ut_mem_0_endTime_s=`date +%s` - echo "ipipe_log_param_2_mem0_TestCases_Total_Time: $[ $multiple_ut_mem_0_endTime_s - $multiple_ut_mem_0_startTime_s ]s" + echo "ipipe_log_param_2_mem0_TestCases_Total_Time: $[ $multiple_ut_mem_0_endTime_s - $multiple_ut_mem_0_startTime_s ]s" echo "ipipe_log_param_2_mem0_TestCases_Total_Time: $[ $multiple_ut_mem_0_endTime_s - $multiple_ut_mem_0_startTime_s ]s" >> ${PADDLE_ROOT}/build/build_summary.txt multiple_ut_startTime_s=`date +%s` while read line @@ -2593,11 +2596,11 @@ set +x if [ $num -eq 0 ]; then num=1 fi - card_test "$line" 2 $num + card_test "$line" 2 $num done < $PADDLE_ROOT/tools/multiple_card_tests_new multiple_ut_endTime_s=`date +%s` - echo "ipipe_log_param_2_TestCases_Total_Time: $[ $multiple_ut_endTime_s - $multiple_ut_startTime_s ]s" + echo "ipipe_log_param_2_TestCases_Total_Time: $[ $multiple_ut_endTime_s - $multiple_ut_startTime_s ]s" echo "ipipe_log_param_2_TestCases_Total_Time: $[ $multiple_ut_endTime_s - $multiple_ut_startTime_s ]s" >> ${PADDLE_ROOT}/build/build_summary.txt exclusive_ut_mem_0_startTime_s=`date +%s` @@ -2606,7 +2609,7 @@ set +x card_test "$line" -1 4 done < $PADDLE_ROOT/tools/exclusive_card_tests_mem0_new exclusive_ut_mem_0_endTime_s=`date +%s` - echo "ipipe_log_param_-1_mem0_TestCases_Total_Time: $[ $exclusive_ut_mem_0_endTime_s - $exclusive_ut_mem_0_startTime_s ]s" + echo "ipipe_log_param_-1_mem0_TestCases_Total_Time: $[ $exclusive_ut_mem_0_endTime_s - $exclusive_ut_mem_0_startTime_s ]s" echo "ipipe_log_param_-1_mem0_TestCases_Total_Time: $[ $exclusive_ut_mem_0_endTime_s - $exclusive_ut_mem_0_startTime_s ]s" >> ${PADDLE_ROOT}/build/build_summary.txt exclusive_ut_startTime_s=`date +%s` @@ -2616,12 +2619,12 @@ set +x if [ $num -eq 0 ]; then num=1 fi - card_test "$line" -1 $num + card_test "$line" -1 $num done < $PADDLE_ROOT/tools/exclusive_card_tests_new exclusive_ut_endTime_s=`date +%s` echo "ipipe_log_param_-1_TestCases_Total_Time: $[ $exclusive_ut_endTime_s - $exclusive_ut_startTime_s ]s" echo "ipipe_log_param_-1_TestCases_Total_Time: $[ $exclusive_ut_endTime_s - $exclusive_ut_startTime_s ]s" >> ${PADDLE_ROOT}/build/build_summary.txt - + noparallel_ut_startTime_s=`date +%s` while read line do @@ -2629,7 +2632,7 @@ set +x done < $PADDLE_ROOT/tools/no_parallel_case_file noparallel_ut_endTime_s=`date +%s` echo "ipipe_log_param_noparallel_TestCases_Total_Time: $[ $noparallel_ut_endTime_s - $noparallel_ut_startTime_s ]s" - echo "ipipe_log_param_noparallel_TestCases_Total_Time: $[ $noparallel_ut_endTime_s - $noparallel_ut_startTime_s ]s" >> ${PADDLE_ROOT}/build/build_summary.txt + echo "ipipe_log_param_noparallel_TestCases_Total_Time: $[ $noparallel_ut_endTime_s - $noparallel_ut_startTime_s ]s" >> ${PADDLE_ROOT}/build/build_summary.txt ###retry collect_failed_tests rm -f $tmp_dir/* @@ -2660,7 +2663,7 @@ set +x elif [[ "${exec_times}" == "1" ]] ;then read need_retry_ut_str <<< $(echo "$failed_test_lists" | grep -oEi "\-.+\(.+\)" | sed 's/(.\+)//' | sed 's/- //' ) need_retry_ut_arr=(${need_retry_ut_str}) - need_retry_ut_count=${#need_retry_ut_arr[@]} + need_retry_ut_count=${#need_retry_ut_arr[@]} if [ $need_retry_ut_count -lt $exec_retry_threshold ];then is_retry_execuate=0 else @@ -2683,7 +2686,7 @@ set +x echo "This is the ${exec_time_array[$exec_times]} time to re-run" echo "=========================================" echo "The following unittest will be re-run:" - echo "${retry_unittests}" + echo "${retry_unittests}" for line in ${retry_unittests[@]} ; do if [[ "$retry_cases" == "" ]]; then @@ -2701,9 +2704,9 @@ set +x collect_failed_tests rm -f $tmp_dir/* retry_cases='' - else + else break - fi + fi done retry_unittests_record="$retry_unittests_record$failed_test_lists" fi @@ -2777,7 +2780,7 @@ set +x elif [[ "${exec_times}" == "1" ]] ;then need_retry_ut_str=$(echo "$failed_test_lists" | grep -oEi "\-.+\(.+\)" | sed 's/(.\+)//' | sed 's/- //' ) need_retry_ut_arr=(${need_retry_ut_str}) - need_retry_ut_count=${#need_retry_ut_arr[@]} + need_retry_ut_count=${#need_retry_ut_arr[@]} if [ $need_retry_ut_count -lt $exec_retry_threshold ];then is_retry_execuate=0 else @@ -2800,7 +2803,7 @@ set +x echo "This is the ${exec_time_array[$exec_times]} time to re-run" echo "=========================================" echo "The following unittest will be re-run:" - echo "${retry_unittests}" + echo "${retry_unittests}" for line in ${retry_unittests[@]} ; do tmp_one_tmp="$( echo $single_card_tests | grep -oEi $line )" @@ -2823,7 +2826,7 @@ set +x collect_failed_tests rm -f $tmp_dir/* one_card_retry='' - else + else break fi @@ -2831,7 +2834,7 @@ set +x fi rerun_ut_endTime_s=`date +%s` - + echo "ipipe_log_param_Rerun_TestCases_Total_Time: $[ $rerun_ut_endTime_s - $rerun_ut_startTime_s ]s" >> ${PADDLE_ROOT}/build/build_summary.txt ut_actual_total_endTime_s=`date +%s` echo "ipipe_log_param_actual_TestCases_Total_Time: $[ $ut_actual_total_endTime_s - $ut_actual_total_startTime_s ]s" >> ${PADDLE_ROOT}/build/build_summary.txt @@ -2839,7 +2842,7 @@ set +x show_ut_retry_result fi set -ex - fi + fi } function parallel_test() { @@ -2941,7 +2944,7 @@ function gen_dockerfile() { Generate ${PADDLE_ROOT}/build/Dockerfile ... ======================================== EOF - + ref_CUDA_MAJOR="$(echo $CUDA_VERSION | cut -d '.' -f 1)" if [[ ${WITH_GPU} == "ON" ]]; then ref_gpu=gpu-cuda${ref_CUDA_MAJOR}-cudnn${CUDNN_MAJOR} @@ -2998,7 +3001,7 @@ EOF ref_paddle36_mv1="mv ${ref_paddle36} ${ref_paddle36_whl} &&" ref_paddle36_mv2="&& mv ${ref_paddle36_whl} ${ref_paddle36}" fi - + cat > ${PADDLE_ROOT}/build/Dockerfile < @@ -3011,13 +3014,13 @@ EOF NCCL_DEPS="true" fi - if [[ ${WITH_GPU} == "ON" && ${CUDA_MAJOR} = "8.0" ]]; then + if [[ ${WITH_GPU} == "ON" && ${CUDA_MAJOR} = "8.0" ]]; then NCCL_DEPS="apt-get install -y --allow-downgrades --allow-change-held-packages libnccl2=2.2.13-1+cuda8.0 libnccl-dev=2.2.13-1+cuda8.0" fi PADDLE_VERSION="paddle version" CMD='"paddle", "version"' - + cat >> ${PADDLE_ROOT}/build/Dockerfile <> ${PADDLE_ROOT}/build/build_summary.txt + echo "ipipe_log_param_Infer_Ut_Tests_Total_Time: $[ $infer_ut_endTime_s - $infer_ut_startTime_s ]s" >> ${PADDLE_ROOT}/build/build_summary.txt if [[ "$DEMO_EXIT_CODE" != "0" || "$TEST_EXIT_CODE" != "0" ]]; then exit 8; fi @@ -3434,7 +3437,7 @@ function check_coverage_build() { } function main() { - local CMD=$1 + local CMD=$1 local parallel_number=$2 init case $CMD in @@ -3575,7 +3578,7 @@ function main() { check_coverage_build ;; ci_preciseTest) - insert_pile_to_h_cu_diff + insert_pile_to_h_cu_diff cmake_gen_and_build ${PYTHON_ABI:-""} ${parallel_number} get_precise_tests_map_file ;; @@ -3601,7 +3604,7 @@ function main() { #test_fluid_lib_train #go inference test test_go_inference_api - check_approvals_of_unittest 3 + check_approvals_of_unittest 3 ;; build_inference) PADDLE_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}")/../../" && pwd )" @@ -3624,7 +3627,7 @@ function main() { ;; assert_file_approvals) assert_file_diff_approvals - ;; + ;; maccheck) cmake_gen_and_build_mac ${PYTHON_ABI:-""} run_mac_test ${PYTHON_ABI:-""} ${PROC_RUN:-1} diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..e9513d0648ba1ff927024c693ef635154b41ca0c --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,4 @@ +[tool.black] +exclude = "build" +line-length = 80 +skip-string-normalization = true diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index 9671520edd27e6c6a099b32a8852b58ed31c3f8a..abe3184ee918543bda7bae8b9c0d891535780410 100755 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -17,9 +17,11 @@ try: from paddle.cuda_env import * # noqa: F403 except ImportError: import sys - sys.stderr.write('''Warning with import paddle: you should not + + sys.stderr.write( + '''Warning with import paddle: you should not import paddle from the source directory; please install paddlepaddle*.whl firstly.''' - ) + ) from .batch import batch # noqa: F401 from .framework import monkey_patch_variable @@ -52,6 +54,7 @@ from .framework.dtype import bfloat16 # noqa: F401 from .framework.dtype import bool # noqa: F401 from .framework.dtype import complex64 # noqa: F401 from .framework.dtype import complex128 # noqa: F401 + if fluid.framework._in_eager_mode_: Tensor = framework.core.eager.Tensor else: @@ -112,8 +115,8 @@ from .tensor.creation import empty_like # noqa: F401 from .tensor.creation import assign # noqa: F401 from .tensor.creation import complex # noqa: F401 from .tensor.creation import clone # noqa: F401 -from .tensor.creation import tril_indices #noqa: F401 -from .tensor.creation import triu_indices #noqa: F401 +from .tensor.creation import tril_indices # noqa: F401 +from .tensor.creation import triu_indices # noqa: F401 from .tensor.linalg import matmul # noqa: F401 from .tensor.linalg import dot # noqa: F401 from .tensor.linalg import norm # noqa: F401 @@ -382,6 +385,7 @@ from .tensor.random import check_shape # noqa: F401 # CINN has to set a flag to include a lib if is_compiled_with_cinn(): import os + package_dir = os.path.dirname(os.path.abspath(__file__)) runtime_include_dir = os.path.join(package_dir, "libs") cuh_file = os.path.join(runtime_include_dir, "cinn_cuda_runtime_source.cuh") diff --git a/python/paddle/amp/auto_cast.py b/python/paddle/amp/auto_cast.py index ffb16486342539e35cebdb1f111462a46bb3370a..930e97f62f4d0e310500d2661e67491abb56bd45 100644 --- a/python/paddle/amp/auto_cast.py +++ b/python/paddle/amp/auto_cast.py @@ -18,11 +18,13 @@ from paddle.fluid.dygraph.amp import amp_decorate __all__ = [] -def auto_cast(enable=True, - custom_white_list=None, - custom_black_list=None, - level='O1', - dtype='float16'): +def auto_cast( + enable=True, + custom_white_list=None, + custom_black_list=None, + level='O1', + dtype='float16', +): """ Create a context which enables auto-mixed-precision(AMP) of operators executed in dynamic graph mode. If enabled, the input data type (float32 or float16) of each operator is decided @@ -78,12 +80,14 @@ def auto_cast(enable=True, return amp_guard(enable, custom_white_list, custom_black_list, level, dtype) -def decorate(models, - optimizers=None, - level='O1', - dtype='float16', - master_weight=None, - save_dtype=None): +def decorate( + models, + optimizers=None, + level='O1', + dtype='float16', + master_weight=None, + save_dtype=None, +): """ Decorate models and optimizers for auto-mixed-precision. When level is O1(amp), the decorate will do nothing. When level is O2(pure float16/bfloat16), the decorate will cast all parameters of models to float16/bfloat16, except BatchNorm and LayerNorm. @@ -147,5 +151,6 @@ def decorate(models, output = model(data) print(output.dtype) # FP16 """ - return amp_decorate(models, optimizers, level, dtype, master_weight, - save_dtype) + return amp_decorate( + models, optimizers, level, dtype, master_weight, save_dtype + ) diff --git a/python/paddle/amp/grad_scaler.py b/python/paddle/amp/grad_scaler.py index 9fe7f71d2be0177f7ac6b0f48ce347ad2b0a0f55..7fe0db4d04aad66e01997a80cd7f186c0d4b6cc0 100644 --- a/python/paddle/amp/grad_scaler.py +++ b/python/paddle/amp/grad_scaler.py @@ -75,18 +75,25 @@ class GradScaler(AmpScaler): optimizer.clear_grad() """ - def __init__(self, - enable=True, - init_loss_scaling=2.**15, - incr_ratio=2.0, - decr_ratio=0.5, - incr_every_n_steps=1000, - decr_every_n_nan_or_inf=2, - use_dynamic_loss_scaling=True): - super(GradScaler, - self).__init__(enable, init_loss_scaling, incr_ratio, decr_ratio, - incr_every_n_steps, decr_every_n_nan_or_inf, - use_dynamic_loss_scaling) + def __init__( + self, + enable=True, + init_loss_scaling=2.0**15, + incr_ratio=2.0, + decr_ratio=0.5, + incr_every_n_steps=1000, + decr_every_n_nan_or_inf=2, + use_dynamic_loss_scaling=True, + ): + super(GradScaler, self).__init__( + enable, + init_loss_scaling, + incr_ratio, + decr_ratio, + incr_every_n_steps, + decr_every_n_nan_or_inf, + use_dynamic_loss_scaling, + ) def scale(self, var): """ @@ -192,7 +199,8 @@ class GradScaler(AmpScaler): optimizer_state = self._optimizer_states[id(optimizer)] if optimizer_state["state"] is OptimizerState.STEPPED: raise RuntimeError( - "step() has already been called since the last update().") + "step() has already been called since the last update()." + ) # unscale the grad if optimizer_state["state"] is OptimizerState.INIT: @@ -570,8 +578,9 @@ class GradScaler(AmpScaler): scaler.set_decr_every_n_nan_or_inf(new_decr_every_n_nan_or_inf) print(scaler.get_decr_every_n_nan_or_inf()) # 3 """ - super(GradScaler, - self).set_decr_every_n_nan_or_inf(new_decr_every_n_nan_or_inf) + super(GradScaler, self).set_decr_every_n_nan_or_inf( + new_decr_every_n_nan_or_inf + ) def state_dict(self): """ diff --git a/python/paddle/audio/__init__.py b/python/paddle/audio/__init__.py index ee768ab6d029c723dc8faf1e0e8124cbe312cabc..3b6e25e2b6c75f0c27b0c2dfcfb2cb90f8d62a84 100644 --- a/python/paddle/audio/__init__.py +++ b/python/paddle/audio/__init__.py @@ -20,5 +20,11 @@ from . import backends from .backends.backend import info, load, save __all__ = [ - "functional", "features", "datasets", "backends", "load", "info", "save" + "functional", + "features", + "datasets", + "backends", + "load", + "info", + "save", ] diff --git a/python/paddle/audio/backends/backend.py b/python/paddle/audio/backends/backend.py index fbfd11d20e0b54f87b2e53a6ab7e94b3bbccc592..d092968a68ecfeaadd1158d54e8ce22d5780d68d 100644 --- a/python/paddle/audio/backends/backend.py +++ b/python/paddle/audio/backends/backend.py @@ -19,10 +19,16 @@ from typing import Optional, Tuple, Union class AudioInfo: - """ Audio info, return type of backend info function """ - - def __init__(self, sample_rate: int, num_samples: int, num_channels: int, - bits_per_sample: int, encoding: str): + """Audio info, return type of backend info function""" + + def __init__( + self, + sample_rate: int, + num_samples: int, + num_channels: int, + bits_per_sample: int, + encoding: str, + ): self.sample_rate = sample_rate self.num_samples = num_samples self.num_channels = num_channels @@ -61,11 +67,13 @@ def info(filepath: str) -> AudioInfo: raise NotImplementedError("please set audio backend") -def load(filepath: Union[str, Path], - frame_offset: int = 0, - num_frames: int = -1, - normalize: bool = True, - channels_first: bool = True) -> Tuple[paddle.Tensor, int]: +def load( + filepath: Union[str, Path], + frame_offset: int = 0, + num_frames: int = -1, + normalize: bool = True, + channels_first: bool = True, +) -> Tuple[paddle.Tensor, int]: """Load audio data from file.Load the audio content start form frame_offset, and get num_frames. Args: diff --git a/python/paddle/audio/backends/init_backend.py b/python/paddle/audio/backends/init_backend.py index a066e4e23a64e5c6baaf931abfa9d05dbccb15bb..3ca77ba316f9813df32bfac077bce3b7a4761aa6 100644 --- a/python/paddle/audio/backends/init_backend.py +++ b/python/paddle/audio/backends/init_backend.py @@ -35,7 +35,7 @@ def _check_version(version: str) -> bool: def list_available_backends() -> List[str]: - """ List available backends, the backends in paddleaudio and the default backend. + """List available backends, the backends in paddleaudio and the default backend. Returns: List[str]: The list of available backends. @@ -82,7 +82,8 @@ def list_available_backends() -> List[str]: if _check_version(version) == False: err_msg = ( "the version of paddleaudio installed is {},\n" - "please ensure the paddleaudio >= 1.0.2.").format(version) + "please ensure the paddleaudio >= 1.0.2." + ).format(version) raise ImportError(err_msg) backends = paddleaudio.backends.list_audio_backends() backends.append("wave_backend") @@ -90,7 +91,7 @@ def list_available_backends() -> List[str]: def get_current_backend() -> str: - """ Get the name of the current audio backend + """Get the name of the current audio backend Returns: str: The name of the current backend, @@ -124,6 +125,7 @@ def get_current_backend() -> str: current_backend = None if "paddleaudio" in sys.modules: import paddleaudio + current_backend = paddleaudio.backends.get_audio_backend() if paddle.audio.load == paddleaudio.load: return current_backend @@ -171,6 +173,7 @@ def set_backend(backend_name: str): module = wave_backend else: import paddleaudio + paddleaudio.backends.set_audio_backend(backend_name) module = paddleaudio diff --git a/python/paddle/audio/backends/wave_backend.py b/python/paddle/audio/backends/wave_backend.py index 66f2d48fe19a55b6a2f4b52dbd7091fdfb2fcaea..4be5592099d974ed3d4e7f6cfd5e8537a95cde9e 100644 --- a/python/paddle/audio/backends/wave_backend.py +++ b/python/paddle/audio/backends/wave_backend.py @@ -80,15 +80,18 @@ def info(filepath: str) -> AudioInfo: bits_per_sample = file_.getsampwidth() * 8 encoding = "PCM_S" # default WAV encoding, only support file_obj.close() - return AudioInfo(sample_rate, sample_frames, channels, bits_per_sample, - encoding) + return AudioInfo( + sample_rate, sample_frames, channels, bits_per_sample, encoding + ) -def load(filepath: Union[str, Path], - frame_offset: int = 0, - num_frames: int = -1, - normalize: bool = True, - channels_first: bool = True) -> Tuple[paddle.Tensor, int]: +def load( + filepath: Union[str, Path], + frame_offset: int = 0, + num_frames: int = -1, + normalize: bool = True, + channels_first: bool = True, +) -> Tuple[paddle.Tensor, int]: """Load audio data from file. load the audio content start form frame_offset, and get num_frames. Args: @@ -154,7 +157,7 @@ def load(filepath: Union[str, Path], waveform = np.reshape(audio_norm, (frames, channels)) if num_frames != -1: - waveform = waveform[frame_offset:frame_offset + num_frames, :] + waveform = waveform[frame_offset : frame_offset + num_frames, :] waveform = paddle.to_tensor(waveform) if channels_first: waveform = paddle.transpose(waveform, perm=[1, 0]) diff --git a/python/paddle/audio/datasets/dataset.py b/python/paddle/audio/datasets/dataset.py index 67fda01f3fde908985c748c61812c8aa27bf4f09..27e84cbe9c7b27460adbba2ae4ce91de2d1a7785 100644 --- a/python/paddle/audio/datasets/dataset.py +++ b/python/paddle/audio/datasets/dataset.py @@ -25,7 +25,7 @@ feat_funcs = { 'melspectrogram': MelSpectrogram, 'mfcc': MFCC, 'logmelspectrogram': LogMelSpectrogram, - 'spectrogram': Spectrogram + 'spectrogram': Spectrogram, } @@ -34,12 +34,14 @@ class AudioClassificationDataset(paddle.io.Dataset): Base class of audio classification dataset. """ - def __init__(self, - files: List[str], - labels: List[int], - feat_type: str = 'raw', - sample_rate: int = None, - **kwargs): + def __init__( + self, + files: List[str], + labels: List[int], + feat_type: str = 'raw', + sample_rate: int = None, + **kwargs, + ): """ Ags: files (:obj:`List[str]`): A list of absolute path of audio files. @@ -59,7 +61,9 @@ class AudioClassificationDataset(paddle.io.Dataset): self.feat_type = feat_type self.sample_rate = sample_rate - self.feat_config = kwargs # Pass keyword arguments to customize feature config + self.feat_config = ( + kwargs # Pass keyword arguments to customize feature config + ) def _get_data(self, input_file: str): raise NotImplementedError @@ -78,8 +82,9 @@ class AudioClassificationDataset(paddle.io.Dataset): if feat_func is not None: waveform = waveform.unsqueeze(0) # (batch_size, T) if self.feat_type != 'spectrogram': - feature_extractor = feat_func(sr=self.sample_rate, - **self.feat_config) + feature_extractor = feat_func( + sr=self.sample_rate, **self.feat_config + ) else: feature_extractor = feat_func(**self.feat_config) record['feat'] = feature_extractor(waveform).squeeze(0) diff --git a/python/paddle/audio/datasets/esc50.py b/python/paddle/audio/datasets/esc50.py index f702fe518facb28d79f1de02622040d96d97825b..4d106c27129741c3e5c74136a9155aa6ea426a99 100644 --- a/python/paddle/audio/datasets/esc50.py +++ b/python/paddle/audio/datasets/esc50.py @@ -133,22 +133,24 @@ class ESC50(AudioClassificationDataset): meta = os.path.join('ESC-50-master', 'meta', 'esc50.csv') meta_info = collections.namedtuple( 'META_INFO', - ('filename', 'fold', 'target', 'category', 'esc10', 'src_file', 'take')) + ('filename', 'fold', 'target', 'category', 'esc10', 'src_file', 'take'), + ) audio_path = os.path.join('ESC-50-master', 'audio') - def __init__(self, - mode: str = 'train', - split: int = 1, - feat_type: str = 'raw', - archive=None, - **kwargs): + def __init__( + self, + mode: str = 'train', + split: int = 1, + feat_type: str = 'raw', + archive=None, + **kwargs + ): if archive is not None: self.archive = archive files, labels = self._get_data(mode, split) - super(ESC50, self).__init__(files=files, - labels=labels, - feat_type=feat_type, - **kwargs) + super(ESC50, self).__init__( + files=files, labels=labels, feat_type=feat_type, **kwargs + ) def _get_meta_info(self) -> List[collections.namedtuple]: ret = [] @@ -158,12 +160,15 @@ class ESC50(AudioClassificationDataset): return ret def _get_data(self, mode: str, split: int) -> Tuple[List[str], List[int]]: - if not os.path.isdir(os.path.join(DATA_HOME, self.audio_path)) or \ - not os.path.isfile(os.path.join(DATA_HOME, self.meta)): - download.get_path_from_url(self.archive['url'], - DATA_HOME, - self.archive['md5'], - decompress=True) + if not os.path.isdir( + os.path.join(DATA_HOME, self.audio_path) + ) or not os.path.isfile(os.path.join(DATA_HOME, self.meta)): + download.get_path_from_url( + self.archive['url'], + DATA_HOME, + self.archive['md5'], + decompress=True, + ) meta_info = self._get_meta_info() diff --git a/python/paddle/audio/datasets/tess.py b/python/paddle/audio/datasets/tess.py index 0f375aa2b0172c863b57f556de87d43cd76d1d96..5cdf8cc65f38bfd10fef84d43cbc556a24fee095 100644 --- a/python/paddle/audio/datasets/tess.py +++ b/python/paddle/audio/datasets/tess.py @@ -71,8 +71,7 @@ class TESS(AudioClassificationDataset): """ archive = { - 'url': - 'https://bj.bcebos.com/paddleaudio/datasets/TESS_Toronto_emotional_speech_set.zip', + 'url': 'https://bj.bcebos.com/paddleaudio/datasets/TESS_Toronto_emotional_speech_set.zip', 'md5': '1465311b24d1de704c4c63e4ccc470c7', } @@ -85,28 +84,30 @@ class TESS(AudioClassificationDataset): 'ps', # pleasant surprise 'sad', ] - meta_info = collections.namedtuple('META_INFO', - ('speaker', 'word', 'emotion')) + meta_info = collections.namedtuple( + 'META_INFO', ('speaker', 'word', 'emotion') + ) audio_path = 'TESS_Toronto_emotional_speech_set' - def __init__(self, - mode='train', - n_folds=5, - split=1, - feat_type='raw', - archive=None, - **kwargs): - """ - - """ - assert split <= n_folds, f'The selected split should not be larger than n_fold, but got {split} > {n_folds}' + def __init__( + self, + mode='train', + n_folds=5, + split=1, + feat_type='raw', + archive=None, + **kwargs, + ): + """ """ + assert ( + split <= n_folds + ), f'The selected split should not be larger than n_fold, but got {split} > {n_folds}' if archive is not None: self.archive = archive files, labels = self._get_data(mode, n_folds, split) - super(TESS, self).__init__(files=files, - labels=labels, - feat_type=feat_type, - **kwargs) + super(TESS, self).__init__( + files=files, labels=labels, feat_type=feat_type, **kwargs + ) def _get_meta_info(self, files) -> List[collections.namedtuple]: ret = [] @@ -117,10 +118,12 @@ class TESS(AudioClassificationDataset): def _get_data(self, mode, n_folds, split) -> Tuple[List[str], List[int]]: if not os.path.isdir(os.path.join(DATA_HOME, self.audio_path)): - download.get_path_from_url(self.archive['url'], - DATA_HOME, - self.archive['md5'], - decompress=True) + download.get_path_from_url( + self.archive['url'], + DATA_HOME, + self.archive['md5'], + decompress=True, + ) wav_files = [] for root, _, files in os.walk(os.path.join(DATA_HOME, self.audio_path)): diff --git a/python/paddle/audio/features/layers.py b/python/paddle/audio/features/layers.py index d21a24d34241fec6e921f258b3ea26de8e124bfe..a732a88dd953ea430b330fd7686477fec439331d 100644 --- a/python/paddle/audio/features/layers.py +++ b/python/paddle/audio/features/layers.py @@ -61,15 +61,17 @@ class Spectrogram(nn.Layer): feats = feature_extractor(waveform) """ - def __init__(self, - n_fft: int = 512, - hop_length: Optional[int] = 512, - win_length: Optional[int] = None, - window: str = 'hann', - power: float = 1.0, - center: bool = True, - pad_mode: str = 'reflect', - dtype: str = 'float32') -> None: + def __init__( + self, + n_fft: int = 512, + hop_length: Optional[int] = 512, + win_length: Optional[int] = None, + window: str = 'hann', + power: float = 1.0, + center: bool = True, + pad_mode: str = 'reflect', + dtype: str = 'float32', + ) -> None: super(Spectrogram, self).__init__() assert power > 0, 'Power of spectrogram must be > 0.' @@ -78,17 +80,18 @@ class Spectrogram(nn.Layer): if win_length is None: win_length = n_fft - self.fft_window = get_window(window, - win_length, - fftbins=True, - dtype=dtype) - self._stft = partial(paddle.signal.stft, - n_fft=n_fft, - hop_length=hop_length, - win_length=win_length, - window=self.fft_window, - center=center, - pad_mode=pad_mode) + self.fft_window = get_window( + window, win_length, fftbins=True, dtype=dtype + ) + self._stft = partial( + paddle.signal.stft, + n_fft=n_fft, + hop_length=hop_length, + win_length=win_length, + window=self.fft_window, + center=center, + pad_mode=pad_mode, + ) self.register_buffer('fft_window', self.fft_window) def forward(self, x: Tensor) -> Tensor: @@ -143,31 +146,35 @@ class MelSpectrogram(nn.Layer): feats = feature_extractor(waveform) """ - def __init__(self, - sr: int = 22050, - n_fft: int = 2048, - hop_length: Optional[int] = 512, - win_length: Optional[int] = None, - window: str = 'hann', - power: float = 2.0, - center: bool = True, - pad_mode: str = 'reflect', - n_mels: int = 64, - f_min: float = 50.0, - f_max: Optional[float] = None, - htk: bool = False, - norm: Union[str, float] = 'slaney', - dtype: str = 'float32') -> None: + def __init__( + self, + sr: int = 22050, + n_fft: int = 2048, + hop_length: Optional[int] = 512, + win_length: Optional[int] = None, + window: str = 'hann', + power: float = 2.0, + center: bool = True, + pad_mode: str = 'reflect', + n_mels: int = 64, + f_min: float = 50.0, + f_max: Optional[float] = None, + htk: bool = False, + norm: Union[str, float] = 'slaney', + dtype: str = 'float32', + ) -> None: super(MelSpectrogram, self).__init__() - self._spectrogram = Spectrogram(n_fft=n_fft, - hop_length=hop_length, - win_length=win_length, - window=window, - power=power, - center=center, - pad_mode=pad_mode, - dtype=dtype) + self._spectrogram = Spectrogram( + n_fft=n_fft, + hop_length=hop_length, + win_length=win_length, + window=window, + power=power, + center=center, + pad_mode=pad_mode, + dtype=dtype, + ) self.n_mels = n_mels self.f_min = f_min self.f_max = f_max @@ -175,14 +182,16 @@ class MelSpectrogram(nn.Layer): self.norm = norm if f_max is None: f_max = sr // 2 - self.fbank_matrix = compute_fbank_matrix(sr=sr, - n_fft=n_fft, - n_mels=n_mels, - f_min=f_min, - f_max=f_max, - htk=htk, - norm=norm, - dtype=dtype) + self.fbank_matrix = compute_fbank_matrix( + sr=sr, + n_fft=n_fft, + n_mels=n_mels, + f_min=f_min, + f_max=f_max, + htk=htk, + norm=norm, + dtype=dtype, + ) self.register_buffer('fbank_matrix', self.fbank_matrix) def forward(self, x: Tensor) -> Tensor: @@ -240,40 +249,44 @@ class LogMelSpectrogram(nn.Layer): feats = feature_extractor(waveform) """ - def __init__(self, - sr: int = 22050, - n_fft: int = 512, - hop_length: Optional[int] = None, - win_length: Optional[int] = None, - window: str = 'hann', - power: float = 2.0, - center: bool = True, - pad_mode: str = 'reflect', - n_mels: int = 64, - f_min: float = 50.0, - f_max: Optional[float] = None, - htk: bool = False, - norm: Union[str, float] = 'slaney', - ref_value: float = 1.0, - amin: float = 1e-10, - top_db: Optional[float] = None, - dtype: str = 'float32') -> None: + def __init__( + self, + sr: int = 22050, + n_fft: int = 512, + hop_length: Optional[int] = None, + win_length: Optional[int] = None, + window: str = 'hann', + power: float = 2.0, + center: bool = True, + pad_mode: str = 'reflect', + n_mels: int = 64, + f_min: float = 50.0, + f_max: Optional[float] = None, + htk: bool = False, + norm: Union[str, float] = 'slaney', + ref_value: float = 1.0, + amin: float = 1e-10, + top_db: Optional[float] = None, + dtype: str = 'float32', + ) -> None: super(LogMelSpectrogram, self).__init__() - self._melspectrogram = MelSpectrogram(sr=sr, - n_fft=n_fft, - hop_length=hop_length, - win_length=win_length, - window=window, - power=power, - center=center, - pad_mode=pad_mode, - n_mels=n_mels, - f_min=f_min, - f_max=f_max, - htk=htk, - norm=norm, - dtype=dtype) + self._melspectrogram = MelSpectrogram( + sr=sr, + n_fft=n_fft, + hop_length=hop_length, + win_length=win_length, + window=window, + power=power, + center=center, + pad_mode=pad_mode, + n_mels=n_mels, + f_min=f_min, + f_max=f_max, + htk=htk, + norm=norm, + dtype=dtype, + ) self.ref_value = ref_value self.amin = amin @@ -288,10 +301,12 @@ class LogMelSpectrogram(nn.Layer): Tensor: Log mel spectrograms with shape `(N, n_mels, num_frames)`. """ mel_feature = self._melspectrogram(x) - log_mel_feature = power_to_db(mel_feature, - ref_value=self.ref_value, - amin=self.amin, - top_db=self.top_db) + log_mel_feature = power_to_db( + mel_feature, + ref_value=self.ref_value, + amin=self.amin, + top_db=self.top_db, + ) return log_mel_feature @@ -338,45 +353,50 @@ class MFCC(nn.Layer): feats = feature_extractor(waveform) """ - def __init__(self, - sr: int = 22050, - n_mfcc: int = 40, - n_fft: int = 512, - hop_length: Optional[int] = None, - win_length: Optional[int] = None, - window: str = 'hann', - power: float = 2.0, - center: bool = True, - pad_mode: str = 'reflect', - n_mels: int = 64, - f_min: float = 50.0, - f_max: Optional[float] = None, - htk: bool = False, - norm: Union[str, float] = 'slaney', - ref_value: float = 1.0, - amin: float = 1e-10, - top_db: Optional[float] = None, - dtype: str = 'float32') -> None: + def __init__( + self, + sr: int = 22050, + n_mfcc: int = 40, + n_fft: int = 512, + hop_length: Optional[int] = None, + win_length: Optional[int] = None, + window: str = 'hann', + power: float = 2.0, + center: bool = True, + pad_mode: str = 'reflect', + n_mels: int = 64, + f_min: float = 50.0, + f_max: Optional[float] = None, + htk: bool = False, + norm: Union[str, float] = 'slaney', + ref_value: float = 1.0, + amin: float = 1e-10, + top_db: Optional[float] = None, + dtype: str = 'float32', + ) -> None: super(MFCC, self).__init__() - assert n_mfcc <= n_mels, 'n_mfcc cannot be larger than n_mels: %d vs %d' % ( - n_mfcc, n_mels) - self._log_melspectrogram = LogMelSpectrogram(sr=sr, - n_fft=n_fft, - hop_length=hop_length, - win_length=win_length, - window=window, - power=power, - center=center, - pad_mode=pad_mode, - n_mels=n_mels, - f_min=f_min, - f_max=f_max, - htk=htk, - norm=norm, - ref_value=ref_value, - amin=amin, - top_db=top_db, - dtype=dtype) + assert ( + n_mfcc <= n_mels + ), 'n_mfcc cannot be larger than n_mels: %d vs %d' % (n_mfcc, n_mels) + self._log_melspectrogram = LogMelSpectrogram( + sr=sr, + n_fft=n_fft, + hop_length=hop_length, + win_length=win_length, + window=window, + power=power, + center=center, + pad_mode=pad_mode, + n_mels=n_mels, + f_min=f_min, + f_max=f_max, + htk=htk, + norm=norm, + ref_value=ref_value, + amin=amin, + top_db=top_db, + dtype=dtype, + ) self.dct_matrix = create_dct(n_mfcc=n_mfcc, n_mels=n_mels, dtype=dtype) self.register_buffer('dct_matrix', self.dct_matrix) @@ -389,6 +409,9 @@ class MFCC(nn.Layer): Tensor: Mel frequency cepstral coefficients with shape `(N, n_mfcc, num_frames)`. """ log_mel_feature = self._log_melspectrogram(x) - mfcc = paddle.matmul(log_mel_feature.transpose( - (0, 2, 1)), self.dct_matrix).transpose((0, 2, 1)) # (B, n_mels, L) + mfcc = paddle.matmul( + log_mel_feature.transpose((0, 2, 1)), self.dct_matrix + ).transpose( + (0, 2, 1) + ) # (B, n_mels, L) return mfcc diff --git a/python/paddle/audio/functional/functional.py b/python/paddle/audio/functional/functional.py index 69feab436675795ba319a00bc1932db0ceeb68f9..d8fb24361530778ee0f328a42401589e27df6ca4 100644 --- a/python/paddle/audio/functional/functional.py +++ b/python/paddle/audio/functional/functional.py @@ -20,8 +20,9 @@ import paddle from paddle import Tensor -def hz_to_mel(freq: Union[Tensor, float], - htk: bool = False) -> Union[Tensor, float]: +def hz_to_mel( + freq: Union[Tensor, float], htk: bool = False +) -> Union[Tensor, float]: """Convert Hz to Mels. Args: @@ -61,11 +62,13 @@ def hz_to_mel(freq: Union[Tensor, float], logstep = math.log(6.4) / 27.0 # step size for log region if isinstance(freq, Tensor): - target = min_log_mel + paddle.log( - freq / min_log_hz + 1e-10) / logstep # prevent nan with 1e-10 + target = ( + min_log_mel + paddle.log(freq / min_log_hz + 1e-10) / logstep + ) # prevent nan with 1e-10 mask = (freq > min_log_hz).astype(freq.dtype) mels = target * mask + mels * ( - 1 - mask) # will replace by masked_fill OP in future + 1 - mask + ) # will replace by masked_fill OP in future else: if freq >= min_log_hz: mels = min_log_mel + math.log(freq / min_log_hz + 1e-10) / logstep @@ -73,8 +76,9 @@ def hz_to_mel(freq: Union[Tensor, float], return mels -def mel_to_hz(mel: Union[float, Tensor], - htk: bool = False) -> Union[float, Tensor]: +def mel_to_hz( + mel: Union[float, Tensor], htk: bool = False +) -> Union[float, Tensor]: """Convert mel bin numbers to frequencies. Args: @@ -96,7 +100,7 @@ def mel_to_hz(mel: Union[float, Tensor], """ if htk: - return 700.0 * (10.0**(mel / 2595.0) - 1.0) + return 700.0 * (10.0 ** (mel / 2595.0) - 1.0) f_min = 0.0 f_sp = 200.0 / 3 @@ -109,18 +113,21 @@ def mel_to_hz(mel: Union[float, Tensor], target = min_log_hz * paddle.exp(logstep * (mel - min_log_mel)) mask = (mel > min_log_mel).astype(mel.dtype) freqs = target * mask + freqs * ( - 1 - mask) # will replace by masked_fill OP in future + 1 - mask + ) # will replace by masked_fill OP in future else: if mel >= min_log_mel: freqs = min_log_hz * math.exp(logstep * (mel - min_log_mel)) return freqs -def mel_frequencies(n_mels: int = 64, - f_min: float = 0.0, - f_max: float = 11025.0, - htk: bool = False, - dtype: str = 'float32') -> Tensor: +def mel_frequencies( + n_mels: int = 64, + f_min: float = 0.0, + f_max: float = 11025.0, + htk: bool = False, + dtype: str = 'float32', +) -> Tensor: """Compute mel frequencies. Args: @@ -177,14 +184,16 @@ def fft_frequencies(sr: int, n_fft: int, dtype: str = 'float32') -> Tensor: return paddle.linspace(0, float(sr) / 2, int(1 + n_fft // 2), dtype=dtype) -def compute_fbank_matrix(sr: int, - n_fft: int, - n_mels: int = 64, - f_min: float = 0.0, - f_max: Optional[float] = None, - htk: bool = False, - norm: Union[str, float] = 'slaney', - dtype: str = 'float32') -> Tensor: +def compute_fbank_matrix( + sr: int, + n_fft: int, + n_mels: int = 64, + f_min: float = 0.0, + f_max: Optional[float] = None, + htk: bool = False, + norm: Union[str, float] = 'slaney', + dtype: str = 'float32', +) -> Tensor: """Compute fbank matrix. Args: @@ -220,15 +229,13 @@ def compute_fbank_matrix(sr: int, fftfreqs = fft_frequencies(sr=sr, n_fft=n_fft, dtype=dtype) # 'Center freqs' of mel bands - uniformly spaced between limits - mel_f = mel_frequencies(n_mels + 2, - f_min=f_min, - f_max=f_max, - htk=htk, - dtype=dtype) + mel_f = mel_frequencies( + n_mels + 2, f_min=f_min, f_max=f_max, htk=htk, dtype=dtype + ) - fdiff = mel_f[1:] - mel_f[:-1] #np.diff(mel_f) + fdiff = mel_f[1:] - mel_f[:-1] # np.diff(mel_f) ramps = mel_f.unsqueeze(1) - fftfreqs.unsqueeze(0) - #ramps = np.subtract.outer(mel_f, fftfreqs) + # ramps = np.subtract.outer(mel_f, fftfreqs) for i in range(n_mels): # lower and upper slopes for all bins @@ -236,12 +243,13 @@ def compute_fbank_matrix(sr: int, upper = ramps[i + 2] / fdiff[i + 1] # .. then intersect them with each other and zero - weights[i] = paddle.maximum(paddle.zeros_like(lower), - paddle.minimum(lower, upper)) + weights[i] = paddle.maximum( + paddle.zeros_like(lower), paddle.minimum(lower, upper) + ) # Slaney-style mel is scaled to be approx constant energy per channel if norm == 'slaney': - enorm = 2.0 / (mel_f[2:n_mels + 2] - mel_f[:n_mels]) + enorm = 2.0 / (mel_f[2 : n_mels + 2] - mel_f[:n_mels]) weights *= enorm.unsqueeze(1) elif isinstance(norm, int) or isinstance(norm, float): weights = paddle.nn.functional.normalize(weights, p=norm, axis=-1) @@ -249,10 +257,12 @@ def compute_fbank_matrix(sr: int, return weights -def power_to_db(spect: Tensor, - ref_value: float = 1.0, - amin: float = 1e-10, - top_db: Optional[float] = 80.0) -> Tensor: +def power_to_db( + spect: Tensor, + ref_value: float = 1.0, + amin: float = 1e-10, + top_db: Optional[float] = 80.0, +) -> Tensor: """Convert a power spectrogram (amplitude squared) to decibel (dB) units. The function computes the scaling `10 * log10(x / ref)` in a numerically stable way. Args: @@ -291,10 +301,12 @@ def power_to_db(spect: Tensor, return log_spec -def create_dct(n_mfcc: int, - n_mels: int, - norm: Optional[str] = 'ortho', - dtype: str = 'float32') -> Tensor: +def create_dct( + n_mfcc: int, + n_mels: int, + norm: Optional[str] = 'ortho', + dtype: str = 'float32', +) -> Tensor: """Create a discrete cosine transform(DCT) matrix. Args: @@ -316,8 +328,9 @@ def create_dct(n_mfcc: int, """ n = paddle.arange(n_mels, dtype=dtype) k = paddle.arange(n_mfcc, dtype=dtype).unsqueeze(1) - dct = paddle.cos(math.pi / float(n_mels) * (n + 0.5) * - k) # size (n_mfcc, n_mels) + dct = paddle.cos( + math.pi / float(n_mels) * (n + 0.5) * k + ) # size (n_mfcc, n_mels) if norm is None: dct *= 2.0 else: diff --git a/python/paddle/audio/functional/window.py b/python/paddle/audio/functional/window.py index 17ccdce9ef663223ba778d26ebbc8df0a83fb02e..844e2fc26335f98b75ff5d24dfd5b446fe13ed39 100644 --- a/python/paddle/audio/functional/window.py +++ b/python/paddle/audio/functional/window.py @@ -31,7 +31,7 @@ def _acosh(x: Union[Tensor, float]) -> Tensor: def _extend(M: int, sym: bool) -> bool: - """Extend window by 1 sample if needed for DFT-even symmetry. """ + """Extend window by 1 sample if needed for DFT-even symmetry.""" if not sym: return M + 1, True else: @@ -39,7 +39,7 @@ def _extend(M: int, sym: bool) -> bool: def _len_guards(M: int) -> bool: - """Handle small or incorrect window lengths. """ + """Handle small or incorrect window lengths.""" if int(M) != M or M < 0: raise ValueError('Window length M must be a non-negative integer') @@ -47,101 +47,99 @@ def _len_guards(M: int) -> bool: def _truncate(w: Tensor, needed: bool) -> Tensor: - """Truncate window by 1 sample if needed for DFT-even symmetry. """ + """Truncate window by 1 sample if needed for DFT-even symmetry.""" if needed: return w[:-1] else: return w -def _general_gaussian(M: int, - p, - sig, - sym: bool = True, - dtype: str = 'float64') -> Tensor: +def _general_gaussian( + M: int, p, sig, sym: bool = True, dtype: str = 'float64' +) -> Tensor: """Compute a window with a generalized Gaussian shape. This function is consistent with scipy.signal.windows.general_gaussian(). """ if _len_guards(M): - return paddle.ones((M, ), dtype=dtype) + return paddle.ones((M,), dtype=dtype) M, needs_trunc = _extend(M, sym) n = paddle.arange(0, M, dtype=dtype) - (M - 1.0) / 2.0 - w = paddle.exp(-0.5 * paddle.abs(n / sig)**(2 * p)) + w = paddle.exp(-0.5 * paddle.abs(n / sig) ** (2 * p)) return _truncate(w, needs_trunc) -def _general_cosine(M: int, - a: float, - sym: bool = True, - dtype: str = 'float64') -> Tensor: +def _general_cosine( + M: int, a: float, sym: bool = True, dtype: str = 'float64' +) -> Tensor: """Compute a generic weighted sum of cosine terms window. This function is consistent with scipy.signal.windows.general_cosine(). """ if _len_guards(M): - return paddle.ones((M, ), dtype=dtype) + return paddle.ones((M,), dtype=dtype) M, needs_trunc = _extend(M, sym) fac = paddle.linspace(-math.pi, math.pi, M, dtype=dtype) - w = paddle.zeros((M, ), dtype=dtype) + w = paddle.zeros((M,), dtype=dtype) for k in range(len(a)): w += a[k] * paddle.cos(k * fac) return _truncate(w, needs_trunc) -def _general_hamming(M: int, - alpha: float, - sym: bool = True, - dtype: str = 'float64') -> Tensor: +def _general_hamming( + M: int, alpha: float, sym: bool = True, dtype: str = 'float64' +) -> Tensor: """Compute a generalized Hamming window. This function is consistent with scipy.signal.windows.general_hamming() """ - return _general_cosine(M, [alpha, 1. - alpha], sym, dtype=dtype) + return _general_cosine(M, [alpha, 1.0 - alpha], sym, dtype=dtype) -def _taylor(M: int, - nbar=4, - sll=30, - norm=True, - sym: bool = True, - dtype: str = 'float64') -> Tensor: +def _taylor( + M: int, nbar=4, sll=30, norm=True, sym: bool = True, dtype: str = 'float64' +) -> Tensor: """Compute a Taylor window. The Taylor window taper function approximates the Dolph-Chebyshev window's constant sidelobe level for a parameterized number of near-in sidelobes. """ if _len_guards(M): - return paddle.ones((M, ), dtype=dtype) + return paddle.ones((M,), dtype=dtype) M, needs_trunc = _extend(M, sym) # Original text uses a negative sidelobe level parameter and then negates # it in the calculation of B. To keep consistent with other methods we # assume the sidelobe level parameter to be positive. - B = 10**(sll / 20) + B = 10 ** (sll / 20) A = _acosh(B) / math.pi - s2 = nbar**2 / (A**2 + (nbar - 0.5)**2) + s2 = nbar**2 / (A**2 + (nbar - 0.5) ** 2) ma = paddle.arange(1, nbar, dtype=dtype) - Fm = paddle.empty((nbar - 1, ), dtype=dtype) + Fm = paddle.empty((nbar - 1,), dtype=dtype) signs = paddle.empty_like(ma) signs[::2] = 1 signs[1::2] = -1 m2 = ma * ma for mi in range(len(ma)): - numer = signs[mi] * paddle.prod(1 - m2[mi] / s2 / (A**2 + - (ma - 0.5)**2)) + numer = signs[mi] * paddle.prod( + 1 - m2[mi] / s2 / (A**2 + (ma - 0.5) ** 2) + ) if mi == 0: - denom = 2 * paddle.prod(1 - m2[mi] / m2[mi + 1:]) + denom = 2 * paddle.prod(1 - m2[mi] / m2[mi + 1 :]) elif mi == len(ma) - 1: denom = 2 * paddle.prod(1 - m2[mi] / m2[:mi]) else: - denom = 2 * paddle.prod(1 - m2[mi] / m2[:mi]) * paddle.prod( - 1 - m2[mi] / m2[mi + 1:]) + denom = ( + 2 + * paddle.prod(1 - m2[mi] / m2[:mi]) + * paddle.prod(1 - m2[mi] / m2[mi + 1 :]) + ) Fm[mi] = numer / denom def W(n): return 1 + 2 * paddle.matmul( Fm.unsqueeze(0), - paddle.cos(2 * math.pi * ma.unsqueeze(1) * (n - M / 2. + 0.5) / M)) + paddle.cos(2 * math.pi * ma.unsqueeze(1) * (n - M / 2.0 + 0.5) / M), + ) w = W(paddle.arange(0, M, dtype=dtype)) @@ -169,18 +167,17 @@ def _hann(M: int, sym: bool = True, dtype: str = 'float64') -> Tensor: return _general_hamming(M, 0.5, sym, dtype=dtype) -def _tukey(M: int, - alpha=0.5, - sym: bool = True, - dtype: str = 'float64') -> Tensor: +def _tukey( + M: int, alpha=0.5, sym: bool = True, dtype: str = 'float64' +) -> Tensor: """Compute a Tukey window. The Tukey window is also known as a tapered cosine window. """ if _len_guards(M): - return paddle.ones((M, ), dtype=dtype) + return paddle.ones((M,), dtype=dtype) if alpha <= 0: - return paddle.ones((M, ), dtype=dtype) + return paddle.ones((M,), dtype=dtype) elif alpha >= 1.0: return hann(M, sym=sym) @@ -188,57 +185,55 @@ def _tukey(M: int, n = paddle.arange(0, M, dtype=dtype) width = int(alpha * (M - 1) / 2.0) - n1 = n[0:width + 1] - n2 = n[width + 1:M - width - 1] - n3 = n[M - width - 1:] + n1 = n[0 : width + 1] + n2 = n[width + 1 : M - width - 1] + n3 = n[M - width - 1 :] w1 = 0.5 * (1 + paddle.cos(math.pi * (-1 + 2.0 * n1 / alpha / (M - 1)))) w2 = paddle.ones(n2.shape, dtype=dtype) - w3 = 0.5 * (1 + paddle.cos(math.pi * (-2.0 / alpha + 1 + 2.0 * n3 / alpha / - (M - 1)))) + w3 = 0.5 * ( + 1 + + paddle.cos(math.pi * (-2.0 / alpha + 1 + 2.0 * n3 / alpha / (M - 1))) + ) w = paddle.concat([w1, w2, w3]) return _truncate(w, needs_trunc) -def _kaiser(M: int, - beta: float, - sym: bool = True, - dtype: str = 'float64') -> Tensor: +def _kaiser( + M: int, beta: float, sym: bool = True, dtype: str = 'float64' +) -> Tensor: """Compute a Kaiser window. The Kaiser window is a taper formed by using a Bessel function. """ raise NotImplementedError() -def _gaussian(M: int, - std: float, - sym: bool = True, - dtype: str = 'float64') -> Tensor: +def _gaussian( + M: int, std: float, sym: bool = True, dtype: str = 'float64' +) -> Tensor: """Compute a Gaussian window. The Gaussian widows has a Gaussian shape defined by the standard deviation(std). """ if _len_guards(M): - return paddle.ones((M, ), dtype=dtype) + return paddle.ones((M,), dtype=dtype) M, needs_trunc = _extend(M, sym) n = paddle.arange(0, M, dtype=dtype) - (M - 1.0) / 2.0 sig2 = 2 * std * std - w = paddle.exp(-n**2 / sig2) + w = paddle.exp(-(n**2) / sig2) return _truncate(w, needs_trunc) -def _exponential(M: int, - center=None, - tau=1., - sym: bool = True, - dtype: str = 'float64') -> Tensor: - """Compute an exponential (or Poisson) window. """ +def _exponential( + M: int, center=None, tau=1.0, sym: bool = True, dtype: str = 'float64' +) -> Tensor: + """Compute an exponential (or Poisson) window.""" if sym and center is not None: raise ValueError("If sym==True, center must be None.") if _len_guards(M): - return paddle.ones((M, ), dtype=dtype) + return paddle.ones((M,), dtype=dtype) M, needs_trunc = _extend(M, sym) if center is None: @@ -251,10 +246,9 @@ def _exponential(M: int, def _triang(M: int, sym: bool = True, dtype: str = 'float64') -> Tensor: - """Compute a triangular window. - """ + """Compute a triangular window.""" if _len_guards(M): - return paddle.ones((M, ), dtype=dtype) + return paddle.ones((M,), dtype=dtype) M, needs_trunc = _extend(M, sym) n = paddle.arange(1, (M + 1) // 2 + 1, dtype=dtype) @@ -273,12 +267,13 @@ def _bohman(M: int, sym: bool = True, dtype: str = 'float64') -> Tensor: The Bohman window is the autocorrelation of a cosine window. """ if _len_guards(M): - return paddle.ones((M, ), dtype=dtype) + return paddle.ones((M,), dtype=dtype) M, needs_trunc = _extend(M, sym) fac = paddle.abs(paddle.linspace(-1, 1, M, dtype=dtype)[1:-1]) w = (1 - fac) * paddle.cos(math.pi * fac) + 1.0 / math.pi * paddle.sin( - math.pi * fac) + math.pi * fac + ) w = _cat([0, w, 0], dtype) return _truncate(w, needs_trunc) @@ -295,20 +290,21 @@ def _blackman(M: int, sym: bool = True, dtype: str = 'float64') -> Tensor: def _cosine(M: int, sym: bool = True, dtype: str = 'float64') -> Tensor: - """Compute a window with a simple cosine shape. - """ + """Compute a window with a simple cosine shape.""" if _len_guards(M): - return paddle.ones((M, ), dtype=dtype) + return paddle.ones((M,), dtype=dtype) M, needs_trunc = _extend(M, sym) - w = paddle.sin(math.pi / M * (paddle.arange(0, M, dtype=dtype) + .5)) + w = paddle.sin(math.pi / M * (paddle.arange(0, M, dtype=dtype) + 0.5)) return _truncate(w, needs_trunc) -def get_window(window: Union[str, Tuple[str, float]], - win_length: int, - fftbins: bool = True, - dtype: str = 'float64') -> Tensor: +def get_window( + window: Union[str, Tuple[str, float]], + win_length: int, + fftbins: bool = True, + dtype: str = 'float64', +) -> Tensor: """Return a window of a given length and type. Args: @@ -340,19 +336,22 @@ def get_window(window: Union[str, Tuple[str, float]], args = window[1:] elif isinstance(window, str): if window in ['gaussian', 'exponential']: - raise ValueError("The '" + window + "' window needs one or " - "more parameters -- pass a tuple.") + raise ValueError( + "The '" + window + "' window needs one or " + "more parameters -- pass a tuple." + ) else: winstr = window else: - raise ValueError("%s as window type is not supported." % - str(type(window))) + raise ValueError( + "%s as window type is not supported." % str(type(window)) + ) try: winfunc = eval('_' + winstr) except NameError as e: raise ValueError("Unknown window type.") from e - params = (win_length, ) + args + params = (win_length,) + args kwargs = {'sym': sym} return winfunc(*params, dtype=dtype, **kwargs) diff --git a/python/paddle/audio/utils/error.py b/python/paddle/audio/utils/error.py index ab239a24970ad7d54ac5973c0b48866ce5e0ca03..244340b99b5ec45d2f3cea8b36e5d889955568e0 100644 --- a/python/paddle/audio/utils/error.py +++ b/python/paddle/audio/utils/error.py @@ -17,4 +17,5 @@ __all__ = ['ParameterError'] class ParameterError(Exception): """Exception class for Parameter checking""" + pass diff --git a/python/paddle/autograd/__init__.py b/python/paddle/autograd/__init__.py index 70fc9647cd4898b94d9d415a618c3e700ccbc0ea..19742cf6afe6cd18a2608fbc4a090badf2038065 100644 --- a/python/paddle/autograd/__init__.py +++ b/python/paddle/autograd/__init__.py @@ -18,6 +18,7 @@ from ..framework import is_grad_enabled, set_grad_enabled # noqa: F401 from . import backward_mode # noqa: F401 from .backward_mode import backward # noqa: F401 from ..fluid.framework import _in_eager_mode_ + if _in_eager_mode_: from .py_layer import EagerPyLayer as PyLayer # noqa: F401 from .py_layer import EagerPyLayerContext as PyLayerContext # noqa: F401 diff --git a/python/paddle/autograd/backward_mode.py b/python/paddle/autograd/backward_mode.py index 83c84a01afb2784dd5d095042a88804ea695d8f0..9f673e62e21c5b39158ff9c177243162590a816f 100644 --- a/python/paddle/autograd/backward_mode.py +++ b/python/paddle/autograd/backward_mode.py @@ -83,15 +83,13 @@ def backward(tensors, grad_tensors=None, retain_graph=False): assert len(in_out_list) > 0, "{} connot be empyt".format(name) for each_var in in_out_list: assert isinstance( - each_var, - (paddle.Tensor, core.eager.Tensor - )), "Elements of {} must be paddle.Tensor".format(name) + each_var, (paddle.Tensor, core.eager.Tensor) + ), "Elements of {} must be paddle.Tensor".format(name) return in_out_list else: assert isinstance( - in_out_list, - (paddle.Tensor, core.eager.Tensor - )), "{} must be Tensor or list of Tensor".format(name) + in_out_list, (paddle.Tensor, core.eager.Tensor) + ), "{} must be Tensor or list of Tensor".format(name) return [in_out_list] tensors = check_tensors(tensors, "tensors") @@ -117,12 +115,14 @@ def backward(tensors, grad_tensors=None, retain_graph=False): if len(grad_tensors) > 0: assert len(tensors) == len( - grad_tensors), "The length of grad_tensors must be equal to tensors" + grad_tensors + ), "The length of grad_tensors must be equal to tensors" assert isinstance(retain_graph, bool), "retain_graph must be True or False" if framework._in_eager_mode_: core.eager.run_backward(tensors, grad_tensors, retain_graph) else: - core.dygraph_run_backward(tensors, grad_tensors, retain_graph, - framework._dygraph_tracer()) + core.dygraph_run_backward( + tensors, grad_tensors, retain_graph, framework._dygraph_tracer() + ) diff --git a/python/paddle/autograd/py_layer.py b/python/paddle/autograd/py_layer.py index ee7c2e36f73a1029be09ddfd72fd480490a787da..254aabb04b3cb136ae624c70916112a449cbfaf1 100644 --- a/python/paddle/autograd/py_layer.py +++ b/python/paddle/autograd/py_layer.py @@ -124,9 +124,7 @@ class LegacyPyLayerContext(object): def with_mateclass(meta, *bases): - class impl(meta): - def __new__(cls, name, temp_bases, attrs): return meta(name, bases, attrs) @@ -134,7 +132,6 @@ def with_mateclass(meta, *bases): class CPyLayer(object): - @classmethod @dygraph_only def apply(cls, *args, **kwargs): @@ -182,12 +179,14 @@ class CPyLayer(object): class PyLayerBackward(LegacyPyLayerContext): - def backward(self, *args, **kwargs): with paddle.fluid.dygraph.guard(): with paddle.fluid.dygraph.no_grad(): - if self._amp_state and 'enable' in self._amp_state and self._amp_state[ - 'enable']: + if ( + self._amp_state + and 'enable' in self._amp_state + and self._amp_state['enable'] + ): with auto_cast(**args[0]._amp_state): return self._forward_cls.backward(*args, **kwargs) else: @@ -197,10 +196,10 @@ class PyLayerBackward(LegacyPyLayerContext): class LayerMeta(type): - def __init__(cls, name, bases, attrs): - cls._backward_function = type(name + '_backward', (PyLayerBackward, ), - {"_forward_cls": cls}) + cls._backward_function = type( + name + '_backward', (PyLayerBackward,), {"_forward_cls": cls} + ) return super(LayerMeta, cls).__init__(name, bases, attrs) @@ -292,7 +291,8 @@ class LegacyPyLayer(with_mateclass(LayerMeta, CPyLayer)): return grad """ raise NotImplementedError( - "You must implement the forward function for PyLayer.") + "You must implement the forward function for PyLayer." + ) @staticmethod def backward(ctx, *args, **kwargs): @@ -332,11 +332,11 @@ class LegacyPyLayer(with_mateclass(LayerMeta, CPyLayer)): """ raise NotImplementedError( - "You must implement the backward function for PyLayer.") + "You must implement the backward function for PyLayer." + ) class EagerPyLayerContext(object): - def save_for_backward(self, *tensors): """ Saves given tensors that backward need. Use ``saved_tensor`` in the `backward` to get the saved tensors. @@ -542,25 +542,22 @@ class EagerPyLayerContext(object): class EagerPyLayerBackward(core.eager.PyLayer, EagerPyLayerContext): - def backward(self, *args): return self._forward_cls.backward(self, *args) class EagerPyLayerMeta(type): - def __init__(cls, name, bases, attrs): - cls._backward_function = type(name + '_backward', - (EagerPyLayerBackward, ), - {"_forward_cls": cls}) + cls._backward_function = type( + name + '_backward', (EagerPyLayerBackward,), {"_forward_cls": cls} + ) return super(EagerPyLayerMeta, cls).__init__(name, bases, attrs) class EagerPyLayer( - with_mateclass(EagerPyLayerMeta, core.eager.PyLayer, - EagerPyLayerContext)): - + with_mateclass(EagerPyLayerMeta, core.eager.PyLayer, EagerPyLayerContext) +): @staticmethod def forward(ctx, *args, **kwargs): """ @@ -597,7 +594,8 @@ class EagerPyLayer( return grad """ raise NotImplementedError( - "You must implement the forward function for PyLayer.") + "You must implement the forward function for PyLayer." + ) @staticmethod def backward(ctx, *args): @@ -637,11 +635,11 @@ class EagerPyLayer( """ raise NotImplementedError( - "You must implement the backward function for PyLayer.") + "You must implement the backward function for PyLayer." + ) def once_differentiable(backward): - def wrapper(ctx, *args): with paddle.fluid.dygraph.no_grad(): outputs = backward(ctx, *args) diff --git a/python/paddle/autograd/saved_tensors_hooks.py b/python/paddle/autograd/saved_tensors_hooks.py index ff5fecf5ccd5a0fef2185b865ed42ccb7ec7af69..8906dd98b49916296edb72e404e8dcc765557cc4 100644 --- a/python/paddle/autograd/saved_tensors_hooks.py +++ b/python/paddle/autograd/saved_tensors_hooks.py @@ -17,7 +17,7 @@ from paddle.fluid import core __all__ = [] -class saved_tensors_hooks(): +class saved_tensors_hooks: """ Dynamic graph, registers a pair of pack / unpack hooks for saved tensors. @@ -104,8 +104,9 @@ class saved_tensors_hooks(): self.unpack_hook = unpack_hook def __enter__(self): - core.eager.register_saved_tensors_hooks(self.pack_hook, - self.unpack_hook) + core.eager.register_saved_tensors_hooks( + self.pack_hook, self.unpack_hook + ) def __exit__(self, *args): core.eager.reset_saved_tensors_hooks() diff --git a/python/paddle/batch.py b/python/paddle/batch.py index b87ddc7485882ac14f55d6abff787d484929465a..13ba5a00c0c0c67cbd4f996a88a11e34b51e4900 100644 --- a/python/paddle/batch.py +++ b/python/paddle/batch.py @@ -66,7 +66,9 @@ def batch(reader, batch_size, drop_last=False): # Batch size check batch_size = int(batch_size) if batch_size <= 0: - raise ValueError("batch_size should be a positive integeral value, " - "but got batch_size={}".format(batch_size)) + raise ValueError( + "batch_size should be a positive integeral value, " + "but got batch_size={}".format(batch_size) + ) return batch_reader diff --git a/python/paddle/callbacks.py b/python/paddle/callbacks.py index 46f69aae1bbfa48d018f039e36f5f8ddd62ec465..b2da04c83df86170b6607332807150cb69d3cb9d 100644 --- a/python/paddle/callbacks.py +++ b/python/paddle/callbacks.py @@ -20,7 +20,12 @@ from .hapi.callbacks import LRScheduler # noqa: F401 from .hapi.callbacks import EarlyStopping # noqa: F401 from .hapi.callbacks import ReduceLROnPlateau # noqa: F401 -__all__ = [ #noqa - 'Callback', 'ProgBarLogger', 'ModelCheckpoint', 'VisualDL', 'LRScheduler', - 'EarlyStopping', 'ReduceLROnPlateau' +__all__ = [ # noqa + 'Callback', + 'ProgBarLogger', + 'ModelCheckpoint', + 'VisualDL', + 'LRScheduler', + 'EarlyStopping', + 'ReduceLROnPlateau', ] diff --git a/python/paddle/check_import_scipy.py b/python/paddle/check_import_scipy.py index 9bbd061cb3771f4f9ca7406b06436dd12def27ab..473a4301813cdfd95f310960a715fd96a055489e 100644 --- a/python/paddle/check_import_scipy.py +++ b/python/paddle/check_import_scipy.py @@ -20,10 +20,10 @@ def check_import_scipy(OsName): import scipy.io as scio # noqa: F401 except ImportError as e: print_info = str(e) - if (len(print_info) > 0): + if len(print_info) > 0: if 'DLL load failed' in print_info: raise ImportError( - print_info + - "\nplease download Visual C++ Redistributable from https://support.microsoft.com/en-us/topic/the-latest-supported-visual-c-downloads-2647da03-1eea-4433-9aff-95f26a218cc0" + print_info + + "\nplease download Visual C++ Redistributable from https://support.microsoft.com/en-us/topic/the-latest-supported-visual-c-downloads-2647da03-1eea-4433-9aff-95f26a218cc0" ) return diff --git a/python/paddle/common_ops_import.py b/python/paddle/common_ops_import.py index 00ef853b995d9ca0764d255c491a6bafb3b3e442..47bce838936d442e4237fc5f04b093520e457646 100644 --- a/python/paddle/common_ops_import.py +++ b/python/paddle/common_ops_import.py @@ -13,13 +13,29 @@ # limitations under the License. from paddle.fluid.layer_helper import LayerHelper # noqa: F401 from paddle.fluid.param_attr import ParamAttr # noqa: F401 -from paddle.fluid.framework import convert_np_dtype_to_dtype_, _non_static_mode, _varbase_creator, in_dygraph_mode, _in_legacy_dygraph # noqa: F401 -from paddle.fluid.framework import device_guard, default_main_program, dygraph_only, _dygraph_tracer # noqa: F401 +from paddle.fluid.framework import ( # noqa: F401 + convert_np_dtype_to_dtype_, + _non_static_mode, + _varbase_creator, + in_dygraph_mode, + _in_legacy_dygraph, +) +from paddle.fluid.framework import ( # noqa: F401 + device_guard, + default_main_program, + dygraph_only, + _dygraph_tracer, +) from paddle.fluid.framework import OpProtoHolder, Variable # noqa: F401 from paddle.fluid.initializer import Constant # noqa: F401 from paddle.fluid.core import VarDesc # noqa: F401 from paddle.fluid import core, dygraph_utils # noqa: F401 -from paddle.fluid.data_feeder import check_type, check_dtype, check_variable_and_dtype, convert_dtype # noqa: F401 +from paddle.fluid.data_feeder import ( # noqa: F401 + check_type, + check_dtype, + check_variable_and_dtype, + convert_dtype, +) from paddle.fluid.layers import fill_constant, utils, scale # noqa: F401 from paddle.tensor.layer_function_generator import templatedoc # noqa: F401 import paddle.fluid as fluid # noqa: F401 diff --git a/python/paddle/cost_model/cost_model.py b/python/paddle/cost_model/cost_model.py index a59ff31a683a43c3f5d9d34ae4ed8e54c0ab4735..8797868287ba0eee76a0814c4818fc22aec5e5fc 100644 --- a/python/paddle/cost_model/cost_model.py +++ b/python/paddle/cost_model/cost_model.py @@ -20,8 +20,7 @@ import os from paddle.fluid import core -class CostModel(): - +class CostModel: def __init__(self): pass @@ -30,11 +29,12 @@ class CostModel(): main_program = static.Program() startup_program = static.Program() - with static.program_guard(main_program=main_program, - startup_program=startup_program): - data = paddle.static.data(name='X', - shape=[None, 1], - dtype='float32') + with static.program_guard( + main_program=main_program, startup_program=startup_program + ): + data = paddle.static.data( + name='X', shape=[None, 1], dtype='float32' + ) hidden = paddle.static.nn.fc(data, 10) loss = paddle.mean(hidden) paddle.optimizer.SGD(learning_rate=0.01).minimize(loss) @@ -43,11 +43,13 @@ class CostModel(): return startup_program, main_program - def profile_measure(self, - startup_program, - main_program, - device='gpu', - fetch_cost_list=['time']): + def profile_measure( + self, + startup_program, + main_program, + device='gpu', + fetch_cost_list=['time'], + ): place = paddle.set_device('gpu') x = np.random.random(size=(10, 1)).astype('float32') @@ -61,8 +63,9 @@ class CostModel(): cost_data = cost_model.ProfileMeasure(device) def static_cost_data(self): - static_cost_data_path = os.path.join(os.path.dirname(__file__), - "static_op_benchmark.json") + static_cost_data_path = os.path.join( + os.path.dirname(__file__), "static_op_benchmark.json" + ) with open(static_cost_data_path, 'r') as load_f: load_dict = json.load(load_f) self._static_cost_data = load_dict @@ -79,7 +82,7 @@ class CostModel(): op_cost = {} for op_data in self._static_cost_data: if (op_data["op"] == op_name) and (dtype in op_data["config"]): - if (forward): + if forward: op_cost["op_time"] = op_data["paddle_gpu_time"] else: op_cost["op_time"] = op_data["paddle_gpu_time_backward"] diff --git a/python/paddle/dataset/cifar.py b/python/paddle/dataset/cifar.py index 0a05ecde4738ac6ef0ffc0c9d4d8ad4f9dde0bf9..80fd43eb52f71bab5395871f68ae8e97b625e7de 100644 --- a/python/paddle/dataset/cifar.py +++ b/python/paddle/dataset/cifar.py @@ -43,7 +43,6 @@ CIFAR100_MD5 = 'eb9058c3a382ffc7106e4002c42a8d85' def reader_creator(filename, sub_name, cycle=False): - def read_batch(batch): data = batch[b'data'] labels = batch.get(b'labels', batch.get(b'fine_labels', None)) @@ -54,8 +53,11 @@ def reader_creator(filename, sub_name, cycle=False): def reader(): while True: with tarfile.open(filename, mode='r') as f: - names = (each_item.name for each_item in f - if sub_name in each_item.name) + names = ( + each_item.name + for each_item in f + if sub_name in each_item.name + ) for name in names: batch = pickle.load(f.extractfile(name), encoding='bytes') @@ -72,7 +74,8 @@ def reader_creator(filename, sub_name, cycle=False): since="2.0.0", update_to="paddle.vision.datasets.Cifar100", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def train100(): """ CIFAR-100 training set creator. @@ -85,14 +88,16 @@ def train100(): """ return reader_creator( paddle.dataset.common.download(CIFAR100_URL, 'cifar', CIFAR100_MD5), - 'train') + 'train', + ) @deprecated( since="2.0.0", update_to="paddle.vision.datasets.Cifar100", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def test100(): """ CIFAR-100 test set creator. @@ -105,14 +110,16 @@ def test100(): """ return reader_creator( paddle.dataset.common.download(CIFAR100_URL, 'cifar', CIFAR100_MD5), - 'test') + 'test', + ) @deprecated( since="2.0.0", update_to="paddle.vision.datasets.Cifar10", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def train10(cycle=False): """ CIFAR-10 training set creator. @@ -125,17 +132,19 @@ def train10(cycle=False): :return: Training reader creator :rtype: callable """ - return reader_creator(paddle.dataset.common.download( - CIFAR10_URL, 'cifar', CIFAR10_MD5), - 'data_batch', - cycle=cycle) + return reader_creator( + paddle.dataset.common.download(CIFAR10_URL, 'cifar', CIFAR10_MD5), + 'data_batch', + cycle=cycle, + ) @deprecated( since="2.0.0", update_to="paddle.vision.datasets.Cifar10", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def test10(cycle=False): """ CIFAR-10 test set creator. @@ -148,17 +157,19 @@ def test10(cycle=False): :return: Test reader creator. :rtype: callable """ - return reader_creator(paddle.dataset.common.download( - CIFAR10_URL, 'cifar', CIFAR10_MD5), - 'test_batch', - cycle=cycle) + return reader_creator( + paddle.dataset.common.download(CIFAR10_URL, 'cifar', CIFAR10_MD5), + 'test_batch', + cycle=cycle, + ) @deprecated( since="2.0.0", update_to="paddle.vision.datasets.Cifar10", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def fetch(): paddle.dataset.common.download(CIFAR10_URL, 'cifar', CIFAR10_MD5) paddle.dataset.common.download(CIFAR100_URL, 'cifar', CIFAR100_MD5) diff --git a/python/paddle/dataset/common.py b/python/paddle/dataset/common.py index 97e986c70068a8a24f392ced0e8ad2866b0d6624..157cdeb91fec72fd40ad04c025cafd94b2325f93 100644 --- a/python/paddle/dataset/common.py +++ b/python/paddle/dataset/common.py @@ -74,8 +74,8 @@ def download(url, module_name, md5sum, save_name=None): os.makedirs(dirname) filename = os.path.join( - dirname, - url.split('/')[-1] if save_name is None else save_name) + dirname, url.split('/')[-1] if save_name is None else save_name + ) if os.path.exists(filename) and md5file(filename) == md5sum: return filename @@ -90,9 +90,12 @@ def download(url, module_name, md5sum, save_name=None): else: raise RuntimeError( "Cannot download {0} within retry limit {1}".format( - url, retry_limit)) - sys.stderr.write("Cache file %s not found, downloading %s \n" % - (filename, url)) + url, retry_limit + ) + ) + sys.stderr.write( + "Cache file %s not found, downloading %s \n" % (filename, url) + ) sys.stderr.write("Begin to download\n") try: r = requests.get(url, stream=True) @@ -108,8 +111,9 @@ def download(url, module_name, md5sum, save_name=None): total_iter = total_length / chunk_size + 1 log_interval = total_iter // 20 if total_iter > 20 else 1 log_index = 0 - bar = paddle.hapi.progressbar.ProgressBar(total_iter, - name='item') + bar = paddle.hapi.progressbar.ProgressBar( + total_iter, name='item' + ) for data in r.iter_content(chunk_size=chunk_size): f.write(data) log_index += 1 @@ -127,12 +131,15 @@ def download(url, module_name, md5sum, save_name=None): def fetch_all(): for module_name in [ - x for x in dir(paddle.dataset) if not x.startswith("__") + x for x in dir(paddle.dataset) if not x.startswith("__") ]: if "fetch" in dir( - importlib.import_module("paddle.dataset.%s" % module_name)): - getattr(importlib.import_module("paddle.dataset.%s" % module_name), - "fetch")() + importlib.import_module("paddle.dataset.%s" % module_name) + ): + getattr( + importlib.import_module("paddle.dataset.%s" % module_name), + "fetch", + )() def split(reader, line_count, suffix="%05d.pickle", dumper=pickle.dump): @@ -173,10 +180,9 @@ def split(reader, line_count, suffix="%05d.pickle", dumper=pickle.dump): dumper(lines, f) -def cluster_files_reader(files_pattern, - trainer_count, - trainer_id, - loader=pickle.load): +def cluster_files_reader( + files_pattern, trainer_count, trainer_id, loader=pickle.load +): """ Create a reader that yield element from the given files, select a file set according trainer count and trainer_id @@ -216,4 +222,5 @@ def _check_exists_and_download(path, url, md5, module_name, download=True): return paddle.dataset.common.download(url, module_name, md5) else: raise ValueError( - '{} not exists and auto download disabled'.format(path)) + '{} not exists and auto download disabled'.format(path) + ) diff --git a/python/paddle/dataset/conll05.py b/python/paddle/dataset/conll05.py index 9333d7350e8c63a6b57e1a75572a2ede13873eeb..22038594f60c909b39140ae5ac30a8e8a684baba 100644 --- a/python/paddle/dataset/conll05.py +++ b/python/paddle/dataset/conll05.py @@ -83,7 +83,8 @@ def corpus_reader(data_path, words_name, props_name): wf = tf.extractfile(words_name) pf = tf.extractfile(props_name) with gzip.GzipFile(fileobj=wf) as words_file, gzip.GzipFile( - fileobj=pf) as props_file: + fileobj=pf + ) as props_file: sentences = [] labels = [] one_seg = [] @@ -116,16 +117,17 @@ def corpus_reader(data_path, words_name, props_name): lbl_seq.append('I-' + cur_tag) is_in_bracket = False elif l.find('(') != -1 and l.find(')') != -1: - cur_tag = l[1:l.find('*')] + cur_tag = l[1 : l.find('*')] lbl_seq.append('B-' + cur_tag) is_in_bracket = False elif l.find('(') != -1 and l.find(')') == -1: - cur_tag = l[1:l.find('*')] + cur_tag = l[1 : l.find('*')] lbl_seq.append('B-' + cur_tag) is_in_bracket = True else: - raise RuntimeError('Unexpected label: %s' % - l) + raise RuntimeError( + 'Unexpected label: %s' % l + ) yield sentences, verb_list[i], lbl_seq @@ -143,11 +145,9 @@ def corpus_reader(data_path, words_name, props_name): return reader -def reader_creator(corpus_reader, - word_dict=None, - predicate_dict=None, - label_dict=None): - +def reader_creator( + corpus_reader, word_dict=None, predicate_dict=None, label_dict=None +): def reader(): for sentence, predicate, labels in corpus_reader(): @@ -193,8 +193,7 @@ def reader_creator(corpus_reader, pred_idx = [predicate_dict.get(predicate)] * sen_len label_idx = [label_dict.get(w) for w in labels] - yield word_idx, ctx_n2_idx, ctx_n1_idx, \ - ctx_0_idx, ctx_p1_idx, ctx_p2_idx, pred_idx, mark, label_idx + yield word_idx, ctx_n2_idx, ctx_n1_idx, ctx_0_idx, ctx_p1_idx, ctx_p2_idx, pred_idx, mark, label_idx return reader @@ -203,17 +202,21 @@ def reader_creator(corpus_reader, since="2.0.0", update_to="paddle.text.datasets.Conll05st", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def get_dict(): """ Get the word, verb and label dictionary of Wikipedia corpus. """ word_dict = load_dict( - paddle.dataset.common.download(WORDDICT_URL, 'conll05st', WORDDICT_MD5)) + paddle.dataset.common.download(WORDDICT_URL, 'conll05st', WORDDICT_MD5) + ) verb_dict = load_dict( - paddle.dataset.common.download(VERBDICT_URL, 'conll05st', VERBDICT_MD5)) + paddle.dataset.common.download(VERBDICT_URL, 'conll05st', VERBDICT_MD5) + ) label_dict = load_label_dict( - paddle.dataset.common.download(TRGDICT_URL, 'conll05st', TRGDICT_MD5)) + paddle.dataset.common.download(TRGDICT_URL, 'conll05st', TRGDICT_MD5) + ) return word_dict, verb_dict, label_dict @@ -221,7 +224,8 @@ def get_dict(): since="2.0.0", update_to="paddle.text.datasets.Conll05st", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def get_embedding(): """ Get the trained word vector based on Wikipedia corpus. @@ -233,7 +237,8 @@ def get_embedding(): since="2.0.0", update_to="paddle.text.datasets.Conll05st", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def test(): """ Conll05 test set creator. @@ -250,7 +255,8 @@ def test(): reader = corpus_reader( paddle.dataset.common.download(DATA_URL, 'conll05st', DATA_MD5), words_name='conll05st-release/test.wsj/words/test.wsj.words.gz', - props_name='conll05st-release/test.wsj/props/test.wsj.props.gz') + props_name='conll05st-release/test.wsj/props/test.wsj.props.gz', + ) return reader_creator(reader, word_dict, verb_dict, label_dict) @@ -258,7 +264,8 @@ def test(): since="2.0.0", update_to="paddle.text.datasets.Conll05st", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def fetch(): paddle.dataset.common.download(WORDDICT_URL, 'conll05st', WORDDICT_MD5) paddle.dataset.common.download(VERBDICT_URL, 'conll05st', VERBDICT_MD5) diff --git a/python/paddle/dataset/flowers.py b/python/paddle/dataset/flowers.py index 42c0e7081d58efe7e45c750cd55f55335995d3ad..41ffc488aa1ac3dc5234475d251dd445fb6d1055 100644 --- a/python/paddle/dataset/flowers.py +++ b/python/paddle/dataset/flowers.py @@ -63,11 +63,9 @@ def default_mapper(is_train, sample): ''' img, label = sample img = load_image_bytes(img) - img = simple_transform(img, - 256, - 224, - is_train, - mean=[103.94, 116.78, 123.68]) + img = simple_transform( + img, 256, 224, is_train, mean=[103.94, 116.78, 123.68] + ) return img.flatten().astype('float32'), label @@ -75,14 +73,16 @@ train_mapper = functools.partial(default_mapper, True) test_mapper = functools.partial(default_mapper, False) -def reader_creator(data_file, - label_file, - setid_file, - dataset_name, - mapper, - buffered_size=1024, - use_xmap=True, - cycle=False): +def reader_creator( + data_file, + label_file, + setid_file, + dataset_name, + mapper, + buffered_size=1024, + use_xmap=True, + cycle=False, +): ''' 1. read images from tar file and merge images into batch files in 102flowers.tgz_batch/ @@ -138,7 +138,8 @@ def reader_creator(data_file, since="2.0.0", update_to="paddle.vision.datasets.Flowers", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def train(mapper=train_mapper, buffered_size=1024, use_xmap=True, cycle=False): ''' Create flowers training set reader. @@ -157,21 +158,24 @@ def train(mapper=train_mapper, buffered_size=1024, use_xmap=True, cycle=False): :return: train data reader :rtype: callable ''' - return reader_creator(download(DATA_URL, 'flowers', DATA_MD5), - download(LABEL_URL, 'flowers', LABEL_MD5), - download(SETID_URL, 'flowers', SETID_MD5), - TRAIN_FLAG, - mapper, - buffered_size, - use_xmap, - cycle=cycle) + return reader_creator( + download(DATA_URL, 'flowers', DATA_MD5), + download(LABEL_URL, 'flowers', LABEL_MD5), + download(SETID_URL, 'flowers', SETID_MD5), + TRAIN_FLAG, + mapper, + buffered_size, + use_xmap, + cycle=cycle, + ) @deprecated( since="2.0.0", update_to="paddle.vision.datasets.Flowers", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def test(mapper=test_mapper, buffered_size=1024, use_xmap=True, cycle=False): ''' Create flowers test set reader. @@ -190,21 +194,24 @@ def test(mapper=test_mapper, buffered_size=1024, use_xmap=True, cycle=False): :return: test data reader :rtype: callable ''' - return reader_creator(download(DATA_URL, 'flowers', DATA_MD5), - download(LABEL_URL, 'flowers', LABEL_MD5), - download(SETID_URL, 'flowers', SETID_MD5), - TEST_FLAG, - mapper, - buffered_size, - use_xmap, - cycle=cycle) + return reader_creator( + download(DATA_URL, 'flowers', DATA_MD5), + download(LABEL_URL, 'flowers', LABEL_MD5), + download(SETID_URL, 'flowers', SETID_MD5), + TEST_FLAG, + mapper, + buffered_size, + use_xmap, + cycle=cycle, + ) @deprecated( since="2.0.0", update_to="paddle.vision.datasets.Flowers", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def valid(mapper=test_mapper, buffered_size=1024, use_xmap=True): ''' Create flowers validation set reader. @@ -221,10 +228,15 @@ def valid(mapper=test_mapper, buffered_size=1024, use_xmap=True): :return: test data reader :rtype: callable ''' - return reader_creator(download(DATA_URL, 'flowers', DATA_MD5), - download(LABEL_URL, 'flowers', LABEL_MD5), - download(SETID_URL, 'flowers', SETID_MD5), VALID_FLAG, - mapper, buffered_size, use_xmap) + return reader_creator( + download(DATA_URL, 'flowers', DATA_MD5), + download(LABEL_URL, 'flowers', LABEL_MD5), + download(SETID_URL, 'flowers', SETID_MD5), + VALID_FLAG, + mapper, + buffered_size, + use_xmap, + ) def fetch(): diff --git a/python/paddle/dataset/image.py b/python/paddle/dataset/image.py index 98ad9299c1b08d4de88c94e829897013541a8363..c8d5124f0feaceafaa7a6a8d0b385e462d4f8cbb 100644 --- a/python/paddle/dataset/image.py +++ b/python/paddle/dataset/image.py @@ -31,6 +31,7 @@ the image layout as follows. """ import numpy as np + # FIXME(minqiyang): this is an ugly fix for the numpy bug reported here # https://github.com/numpy/numpy/issues/12497 import subprocess @@ -42,9 +43,11 @@ interpreter = sys.executable # will be the C++ execubable on Windows if sys.platform == 'win32' and 'python.exe' not in interpreter: interpreter = sys.exec_prefix + os.sep + 'python.exe' -import_cv2_proc = subprocess.Popen([interpreter, "-c", "import cv2"], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) +import_cv2_proc = subprocess.Popen( + [interpreter, "-c", "import cv2"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, +) out, err = import_cv2_proc.communicate() retcode = import_cv2_proc.poll() if retcode != 0: @@ -65,6 +68,7 @@ __all__ = [] def _check_cv2(): if cv2 is None: import sys + sys.stderr.write( '''Warning with paddle image module: opencv-python should be imported, or paddle image module could NOT work; please install opencv-python first.''' @@ -74,10 +78,9 @@ def _check_cv2(): return True -def batch_images_from_tar(data_file, - dataset_name, - img2label, - num_per_batch=1024): +def batch_images_from_tar( + data_file, dataset_name, img2label, num_per_batch=1024 +): """ Read images from tar file and batch them into batch file. @@ -115,9 +118,11 @@ def batch_images_from_tar(data_file, output = {} output['label'] = labels output['data'] = data - pickle.dump(output, - open('%s/batch_%d' % (out_path, file_id), 'wb'), - protocol=2) + pickle.dump( + output, + open('%s/batch_%d' % (out_path, file_id), 'wb'), + protocol=2, + ) file_id += 1 data = [] labels = [] @@ -125,9 +130,9 @@ def batch_images_from_tar(data_file, output = {} output['label'] = labels output['data'] = data - pickle.dump(output, - open('%s/batch_%d' % (out_path, file_id), 'wb'), - protocol=2) + pickle.dump( + output, open('%s/batch_%d' % (out_path, file_id), 'wb'), protocol=2 + ) with open(meta_file, 'a') as meta: for file in os.listdir(out_path): @@ -321,12 +326,9 @@ def left_right_flip(im, is_color=True): return im[:, ::-1] -def simple_transform(im, - resize_size, - crop_size, - is_train, - is_color=True, - mean=None): +def simple_transform( + im, resize_size, crop_size, is_train, is_color=True, mean=None +): """ Simply data argumentation for training. These operations include resizing, croping and flipping. @@ -377,12 +379,9 @@ def simple_transform(im, return im -def load_and_transform(filename, - resize_size, - crop_size, - is_train, - is_color=True, - mean=None): +def load_and_transform( + filename, resize_size, crop_size, is_train, is_color=True, mean=None +): """ Load image from the input file `filename` and transform image for data argumentation. Please refer to the `simple_transform` interface diff --git a/python/paddle/dataset/imdb.py b/python/paddle/dataset/imdb.py index bed7f03090bf98186d9a20f0ef3aa1ae485036b0..e95a9e6df0066396b3696a4dd347a678493c0787 100644 --- a/python/paddle/dataset/imdb.py +++ b/python/paddle/dataset/imdb.py @@ -29,7 +29,7 @@ import string __all__ = [] -#URL = 'http://ai.stanford.edu/%7Eamaas/data/sentiment/aclImdb_v1.tar.gz' +# URL = 'http://ai.stanford.edu/%7Eamaas/data/sentiment/aclImdb_v1.tar.gz' URL = 'https://dataset.bj.bcebos.com/imdb%2FaclImdb_v1.tar.gz' MD5 = '7c2ac02c03563afcf9b574c7e56c153a' @@ -49,7 +49,8 @@ def tokenize(pattern): if bool(pattern.match(tf.name)): # newline and punctuations removal and ad-hoc tokenization. yield tarf.extractfile(tf).read().rstrip(b'\n\r').translate( - None, string.punctuation.encode('latin-1')).lower().split() + None, string.punctuation.encode('latin-1') + ).lower().split() tf = tarf.next() @@ -77,7 +78,8 @@ def build_dict(pattern, cutoff): since="2.0.0", update_to="paddle.text.datasets.Imdb", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def reader_creator(pos_pattern, neg_pattern, word_idx): UNK = word_idx[''] INS = [] @@ -100,7 +102,8 @@ def reader_creator(pos_pattern, neg_pattern, word_idx): since="2.0.0", update_to="paddle.text.datasets.Imdb", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def train(word_idx): """ IMDB training set creator. @@ -113,15 +116,19 @@ def train(word_idx): :return: Training reader creator :rtype: callable """ - return reader_creator(re.compile(r"aclImdb/train/pos/.*\.txt$"), - re.compile(r"aclImdb/train/neg/.*\.txt$"), word_idx) + return reader_creator( + re.compile(r"aclImdb/train/pos/.*\.txt$"), + re.compile(r"aclImdb/train/neg/.*\.txt$"), + word_idx, + ) @deprecated( since="2.0.0", update_to="paddle.text.datasets.Imdb", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def test(word_idx): """ IMDB test set creator. @@ -134,15 +141,19 @@ def test(word_idx): :return: Test reader creator :rtype: callable """ - return reader_creator(re.compile(r"aclImdb/test/pos/.*\.txt$"), - re.compile(r"aclImdb/test/neg/.*\.txt$"), word_idx) + return reader_creator( + re.compile(r"aclImdb/test/pos/.*\.txt$"), + re.compile(r"aclImdb/test/neg/.*\.txt$"), + word_idx, + ) @deprecated( since="2.0.0", update_to="paddle.text.datasets.Imdb", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def word_dict(): """ Build a word dictionary from the corpus. @@ -151,13 +162,15 @@ def word_dict(): :rtype: dict """ return build_dict( - re.compile(r"aclImdb/((train)|(test))/((pos)|(neg))/.*\.txt$"), 150) + re.compile(r"aclImdb/((train)|(test))/((pos)|(neg))/.*\.txt$"), 150 + ) @deprecated( since="2.0.0", update_to="paddle.text.datasets.Imdb", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def fetch(): paddle.dataset.common.download(URL, 'imdb', MD5) diff --git a/python/paddle/dataset/imikolov.py b/python/paddle/dataset/imikolov.py index b94513ec13afee2b3d42516c9c9899a17fd818ff..4630d88e21a7cb8873df699c982d36e24c3a72bf 100644 --- a/python/paddle/dataset/imikolov.py +++ b/python/paddle/dataset/imikolov.py @@ -26,7 +26,7 @@ import tarfile __all__ = [] -#URL = 'http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz' +# URL = 'http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz' URL = 'https://dataset.bj.bcebos.com/imikolov%2Fsimple-examples.tgz' MD5 = '30177ea32e27c525793142b6bf2c8e2d' @@ -57,9 +57,10 @@ def build_dict(min_word_freq=50): train_filename = './simple-examples/data/ptb.train.txt' test_filename = './simple-examples/data/ptb.valid.txt' with tarfile.open( - paddle.dataset.common.download(paddle.dataset.imikolov.URL, - 'imikolov', - paddle.dataset.imikolov.MD5)) as tf: + paddle.dataset.common.download( + paddle.dataset.imikolov.URL, 'imikolov', paddle.dataset.imikolov.MD5 + ) + ) as tf: trainf = tf.extractfile(train_filename) testf = tf.extractfile(test_filename) word_freq = word_count(testf, word_count(trainf)) @@ -78,12 +79,14 @@ def build_dict(min_word_freq=50): def reader_creator(filename, word_idx, n, data_type): - def reader(): with tarfile.open( - paddle.dataset.common.download( - paddle.dataset.imikolov.URL, 'imikolov', - paddle.dataset.imikolov.MD5)) as tf: + paddle.dataset.common.download( + paddle.dataset.imikolov.URL, + 'imikolov', + paddle.dataset.imikolov.MD5, + ) + ) as tf: f = tf.extractfile(filename) UNK = word_idx[''] @@ -94,13 +97,14 @@ def reader_creator(filename, word_idx, n, data_type): if len(l) >= n: l = [word_idx.get(w, UNK) for w in l] for i in range(n, len(l) + 1): - yield tuple(l[i - n:i]) + yield tuple(l[i - n : i]) elif DataType.SEQ == data_type: l = l.strip().split() l = [word_idx.get(w, UNK) for w in l] src_seq = [word_idx['']] + l trg_seq = l + [word_idx['']] - if n > 0 and len(src_seq) > n: continue + if n > 0 and len(src_seq) > n: + continue yield src_seq, trg_seq else: assert False, 'Unknow data type' @@ -112,7 +116,8 @@ def reader_creator(filename, word_idx, n, data_type): since="2.0.0", update_to="paddle.text.datasets.Imikolov", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def train(word_idx, n, data_type=DataType.NGRAM): """ imikolov training set creator. @@ -129,15 +134,17 @@ def train(word_idx, n, data_type=DataType.NGRAM): :return: Training reader creator :rtype: callable """ - return reader_creator('./simple-examples/data/ptb.train.txt', word_idx, n, - data_type) + return reader_creator( + './simple-examples/data/ptb.train.txt', word_idx, n, data_type + ) @deprecated( since="2.0.0", update_to="paddle.text.datasets.Imikolov", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def test(word_idx, n, data_type=DataType.NGRAM): """ imikolov test set creator. @@ -154,14 +161,16 @@ def test(word_idx, n, data_type=DataType.NGRAM): :return: Test reader creator :rtype: callable """ - return reader_creator('./simple-examples/data/ptb.valid.txt', word_idx, n, - data_type) + return reader_creator( + './simple-examples/data/ptb.valid.txt', word_idx, n, data_type + ) @deprecated( since="2.0.0", update_to="paddle.text.datasets.Imikolov", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def fetch(): paddle.dataset.common.download(URL, "imikolov", MD5) diff --git a/python/paddle/dataset/mnist.py b/python/paddle/dataset/mnist.py index 12f03dd837d4571f81d69a6ea5b479f3603ebcf3..ec1cb9855cfe42321540713d8ef99fe1ba7b99b0 100644 --- a/python/paddle/dataset/mnist.py +++ b/python/paddle/dataset/mnist.py @@ -38,7 +38,6 @@ TRAIN_LABEL_MD5 = 'd53e105ee54ea40749a09fcbcd1e9432' def reader_creator(image_filename, label_filename, buffer_size): - def reader(): with gzip.GzipFile(image_filename, 'rb') as image_file: img_buf = image_file.read() @@ -53,14 +52,16 @@ def reader_creator(image_filename, label_filename, buffer_size): # image file : 16B magic_byte_img = '>IIII' magic_img, image_num, rows, cols = struct.unpack_from( - magic_byte_img, img_buf, offset_img) + magic_byte_img, img_buf, offset_img + ) offset_img += struct.calcsize(magic_byte_img) offset_lab = 0 # label file : 8B magic_byte_lab = '>II' magic_lab, label_num = struct.unpack_from( - magic_byte_lab, lab_buf, offset_lab) + magic_byte_lab, lab_buf, offset_lab + ) offset_lab += struct.calcsize(magic_byte_lab) while True: @@ -72,11 +73,12 @@ def reader_creator(image_filename, label_filename, buffer_size): step_label += buffer_size fmt_images = '>' + str(buffer_size * rows * cols) + 'B' - images_temp = struct.unpack_from(fmt_images, img_buf, - offset_img) + images_temp = struct.unpack_from( + fmt_images, img_buf, offset_img + ) images = numpy.reshape( - images_temp, - (buffer_size, rows * cols)).astype('float32') + images_temp, (buffer_size, rows * cols) + ).astype('float32') offset_img += struct.calcsize(fmt_images) images = images / 255.0 @@ -93,7 +95,8 @@ def reader_creator(image_filename, label_filename, buffer_size): since="2.0.0", update_to="paddle.vision.datasets.MNIST", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def train(): """ MNIST training set creator. @@ -105,17 +108,22 @@ def train(): :rtype: callable """ return reader_creator( - paddle.dataset.common.download(TRAIN_IMAGE_URL, 'mnist', - TRAIN_IMAGE_MD5), - paddle.dataset.common.download(TRAIN_LABEL_URL, 'mnist', - TRAIN_LABEL_MD5), 100) + paddle.dataset.common.download( + TRAIN_IMAGE_URL, 'mnist', TRAIN_IMAGE_MD5 + ), + paddle.dataset.common.download( + TRAIN_LABEL_URL, 'mnist', TRAIN_LABEL_MD5 + ), + 100, + ) @deprecated( since="2.0.0", update_to="paddle.vision.datasets.MNIST", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def test(): """ MNIST test set creator. @@ -129,14 +137,16 @@ def test(): return reader_creator( paddle.dataset.common.download(TEST_IMAGE_URL, 'mnist', TEST_IMAGE_MD5), paddle.dataset.common.download(TEST_LABEL_URL, 'mnist', TEST_LABEL_MD5), - 100) + 100, + ) @deprecated( since="2.0.0", update_to="paddle.vision.datasets.MNIST", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def fetch(): paddle.dataset.common.download(TRAIN_IMAGE_URL, 'mnist', TRAIN_IMAGE_MD5) paddle.dataset.common.download(TRAIN_LABEL_URL, 'mnist', TRAIN_LABEL_MD5) diff --git a/python/paddle/dataset/movielens.py b/python/paddle/dataset/movielens.py index 513e5d2a5d65e87f93c8707c70e700b78c14d398..fd57ad8edf758ffab672296507177d337815fc75 100644 --- a/python/paddle/dataset/movielens.py +++ b/python/paddle/dataset/movielens.py @@ -33,7 +33,7 @@ __all__ = [] age_table = [1, 18, 25, 35, 45, 50, 56] -#URL = 'http://files.grouplens.org/datasets/movielens/ml-1m.zip' +# URL = 'http://files.grouplens.org/datasets/movielens/ml-1m.zip' URL = 'https://dataset.bj.bcebos.com/movielens%2Fml-1m.zip' MD5 = 'c4d9eecfca2ab87c1945afe126590906' @@ -53,13 +53,17 @@ class MovieInfo(object): Get information from a movie. """ return [ - self.index, [CATEGORIES_DICT[c] for c in self.categories], - [MOVIE_TITLE_DICT[w.lower()] for w in self.title.split()] + self.index, + [CATEGORIES_DICT[c] for c in self.categories], + [MOVIE_TITLE_DICT[w.lower()] for w in self.title.split()], ] def __str__(self): return "" % ( - self.index, self.title, self.categories) + self.index, + self.title, + self.categories, + ) def __repr__(self): return self.__str__() @@ -84,8 +88,11 @@ class UserInfo(object): def __str__(self): return "" % ( - self.index, "M" if self.is_male else "F", age_table[self.age], - self.job_id) + self.index, + "M" if self.is_male else "F", + age_table[self.age], + self.job_id, + ) def __repr__(self): return str(self) @@ -117,7 +124,8 @@ def __initialize_meta_info__(): categories_set.add(c) title = pattern.match(title).group(1) MOVIE_INFO[int(movie_id)] = MovieInfo( - index=movie_id, categories=categories, title=title) + index=movie_id, categories=categories, title=title + ) for w in title.split(): title_word_set.add(w.lower()) @@ -137,10 +145,9 @@ def __initialize_meta_info__(): for line in user_file: line = line.decode(encoding='latin') uid, gender, age, job, _ = line.strip().split("::") - USER_INFO[int(uid)] = UserInfo(index=uid, - gender=gender, - age=age, - job_id=job) + USER_INFO[int(uid)] = UserInfo( + index=uid, gender=gender, age=age, job_id=job + ) return fn @@ -166,7 +173,8 @@ def __reader__(rand_seed=0, test_ratio=0.1, is_test=False): since="2.0.0", update_to="paddle.text.datasets.Movielens", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def __reader_creator__(**kwargs): return lambda: __reader__(**kwargs) @@ -179,7 +187,8 @@ test = functools.partial(__reader_creator__, is_test=True) since="2.0.0", update_to="paddle.text.datasets.Movielens", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def get_movie_title_dict(): """ Get movie title dictionary. @@ -199,7 +208,8 @@ def __max_index_info__(a, b): since="2.0.0", update_to="paddle.text.datasets.Movielens", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def max_movie_id(): """ Get the maximum value of movie id. @@ -212,7 +222,8 @@ def max_movie_id(): since="2.0.0", update_to="paddle.text.datasets.Movielens", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def max_user_id(): """ Get the maximum value of user id. @@ -232,21 +243,24 @@ def __max_job_id_impl__(a, b): since="2.0.0", update_to="paddle.text.datasets.Movielens", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def max_job_id(): """ Get the maximum value of job id. """ __initialize_meta_info__() - return functools.reduce(__max_job_id_impl__, - list(USER_INFO.values())).job_id + return functools.reduce( + __max_job_id_impl__, list(USER_INFO.values()) + ).job_id @deprecated( since="2.0.0", update_to="paddle.text.datasets.Movielens", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def movie_categories(): """ Get movie categories dictionary. @@ -259,7 +273,8 @@ def movie_categories(): since="2.0.0", update_to="paddle.text.datasets.Movielens", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def user_info(): """ Get user info dictionary. @@ -272,7 +287,8 @@ def user_info(): since="2.0.0", update_to="paddle.text.datasets.Movielens", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def movie_info(): """ Get movie info dictionary. @@ -294,7 +310,8 @@ def unittest(): since="2.0.0", update_to="paddle.text.datasets.Movielens", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def fetch(): paddle.dataset.common.download(URL, "movielens", MD5) diff --git a/python/paddle/dataset/tests/cifar_test.py b/python/paddle/dataset/tests/cifar_test.py index f7bdc47d9aac664b84cb41dcf44538a8d1fd280f..6f25975dd8e0f13d58ea4687e2e78807b667203e 100644 --- a/python/paddle/dataset/tests/cifar_test.py +++ b/python/paddle/dataset/tests/cifar_test.py @@ -19,7 +19,6 @@ __all__ = [] class TestCIFAR(unittest.TestCase): - def check_reader(self, reader): sum = 0 label = 0 @@ -32,25 +31,29 @@ class TestCIFAR(unittest.TestCase): def test_test10(self): instances, max_label_value = self.check_reader( - paddle.dataset.cifar.test10()) + paddle.dataset.cifar.test10() + ) self.assertEqual(instances, 10000) self.assertEqual(max_label_value, 9) def test_train10(self): instances, max_label_value = self.check_reader( - paddle.dataset.cifar.train10()) + paddle.dataset.cifar.train10() + ) self.assertEqual(instances, 50000) self.assertEqual(max_label_value, 9) def test_test100(self): instances, max_label_value = self.check_reader( - paddle.dataset.cifar.test100()) + paddle.dataset.cifar.test100() + ) self.assertEqual(instances, 10000) self.assertEqual(max_label_value, 99) def test_train100(self): instances, max_label_value = self.check_reader( - paddle.dataset.cifar.train100()) + paddle.dataset.cifar.train100() + ) self.assertEqual(instances, 50000) self.assertEqual(max_label_value, 99) diff --git a/python/paddle/dataset/tests/flowers_test.py b/python/paddle/dataset/tests/flowers_test.py index 31a49edf9751fbabb7778d3dea112587f458bfff..55a27b0ce8fca6d556dc2a465f3ad289184966d9 100644 --- a/python/paddle/dataset/tests/flowers_test.py +++ b/python/paddle/dataset/tests/flowers_test.py @@ -19,7 +19,6 @@ __all__ = [] class TestFlowers(unittest.TestCase): - def check_reader(self, reader): sum = 0 label = 0 @@ -33,19 +32,22 @@ class TestFlowers(unittest.TestCase): def test_train(self): instances, max_label_value = self.check_reader( - paddle.dataset.flowers.train()) + paddle.dataset.flowers.train() + ) self.assertEqual(instances, 6149) self.assertEqual(max_label_value, 102) def test_test(self): instances, max_label_value = self.check_reader( - paddle.dataset.flowers.test()) + paddle.dataset.flowers.test() + ) self.assertEqual(instances, 1020) self.assertEqual(max_label_value, 102) def test_valid(self): instances, max_label_value = self.check_reader( - paddle.dataset.flowers.valid()) + paddle.dataset.flowers.valid() + ) self.assertEqual(instances, 1020) self.assertEqual(max_label_value, 102) diff --git a/python/paddle/dataset/tests/imikolov_test.py b/python/paddle/dataset/tests/imikolov_test.py index f7f025eb54076fd827a7711b8532208b8da602d4..f8d8b182e2d77d5631752f5f85cc749058458c05 100644 --- a/python/paddle/dataset/tests/imikolov_test.py +++ b/python/paddle/dataset/tests/imikolov_test.py @@ -21,7 +21,6 @@ __all__ = [] class TestMikolov(unittest.TestCase): - def check_reader(self, reader, n): for l in reader(): self.assertEqual(len(l), n) @@ -30,16 +29,18 @@ class TestMikolov(unittest.TestCase): n = 5 self.check_reader(paddle.dataset.imikolov.train(WORD_DICT, n), n) - first_line = 'aer banknote berlitz calloway centrust cluett fromstein '\ - 'gitano guterman hydro-quebec ipo kia memotec mlx nahb punts '\ + first_line = ( + 'aer banknote berlitz calloway centrust cluett fromstein ' + 'gitano guterman hydro-quebec ipo kia memotec mlx nahb punts ' 'rake regatta rubens sim snack-food ssangyong swapo wachter' + ) first_line = [ WORD_DICT.get(ch, WORD_DICT['']) for ch in first_line.split(' ') ] for l in paddle.dataset.imikolov.train( - WORD_DICT, n=-1, - data_type=paddle.dataset.imikolov.DataType.SEQ)(): + WORD_DICT, n=-1, data_type=paddle.dataset.imikolov.DataType.SEQ + )(): read_line = l[0][1:] break self.assertEqual(first_line, read_line) @@ -48,15 +49,17 @@ class TestMikolov(unittest.TestCase): n = 5 self.check_reader(paddle.dataset.imikolov.test(WORD_DICT, n), n) - first_line = 'consumers may want to move their telephones a little '\ - 'closer to the tv set' + first_line = ( + 'consumers may want to move their telephones a little ' + 'closer to the tv set' + ) first_line = [ WORD_DICT.get(ch, WORD_DICT['']) for ch in first_line.split(' ') ] for l in paddle.dataset.imikolov.test( - WORD_DICT, n=-1, - data_type=paddle.dataset.imikolov.DataType.SEQ)(): + WORD_DICT, n=-1, data_type=paddle.dataset.imikolov.DataType.SEQ + )(): read_line = l[0][1:] break self.assertEqual(first_line, read_line) diff --git a/python/paddle/dataset/tests/mnist_test.py b/python/paddle/dataset/tests/mnist_test.py index d2ebacc8c73f061b8801ce47a028d14ddccebf23..978fa714f726799c02d54464df756c5bce19a9f2 100644 --- a/python/paddle/dataset/tests/mnist_test.py +++ b/python/paddle/dataset/tests/mnist_test.py @@ -19,7 +19,6 @@ __all__ = [] class TestMNIST(unittest.TestCase): - def check_reader(self, reader): sum = 0 label = 0 @@ -32,13 +31,15 @@ class TestMNIST(unittest.TestCase): def test_train(self): instances, max_label_value = self.check_reader( - paddle.dataset.mnist.train()) + paddle.dataset.mnist.train() + ) self.assertEqual(instances, 60000) self.assertEqual(max_label_value, 9) def test_test(self): instances, max_label_value = self.check_reader( - paddle.dataset.mnist.test()) + paddle.dataset.mnist.test() + ) self.assertEqual(instances, 10000) self.assertEqual(max_label_value, 9) diff --git a/python/paddle/dataset/tests/test_image.py b/python/paddle/dataset/tests/test_image.py index 61979b865fda37c2b0fd7118f95e039796d6f527..953e70c41865d3c2043b3a1bb612066932b22e75 100644 --- a/python/paddle/dataset/tests/test_image.py +++ b/python/paddle/dataset/tests/test_image.py @@ -32,7 +32,7 @@ class Image(unittest.TestCase): """ def test_resize_flip_chw(self): - """ resize """ + """resize""" imgdir = sys.argv[0].replace('test_image.py', 'cat.jpg') images = image.load_image(imgdir) images = image.resize_short(images, 256) diff --git a/python/paddle/dataset/tests/voc2012_test.py b/python/paddle/dataset/tests/voc2012_test.py index 43f96cd9377ae3b876aa6b1b793621fba52bfd4f..7db2f5abd385f13f77eb55d8f5973012c4ecb707 100644 --- a/python/paddle/dataset/tests/voc2012_test.py +++ b/python/paddle/dataset/tests/voc2012_test.py @@ -19,7 +19,6 @@ __all__ = [] class TestVOC(unittest.TestCase): - def check_reader(self, reader): sum = 0 label = 0 diff --git a/python/paddle/dataset/tests/wmt16_test.py b/python/paddle/dataset/tests/wmt16_test.py index 1b0b44c974f6a022ac87da352703689608bbc1f5..58f405b26e306b8de26d2f4b0481ae36334e5698 100644 --- a/python/paddle/dataset/tests/wmt16_test.py +++ b/python/paddle/dataset/tests/wmt16_test.py @@ -19,7 +19,6 @@ __all__ = [] class TestWMT16(unittest.TestCase): - def checkout_one_sample(self, sample): # train data has 3 field: source language word indices, # target language word indices, and target next word indices. @@ -37,23 +36,30 @@ class TestWMT16(unittest.TestCase): def test_train(self): for idx, sample in enumerate( - paddle.dataset.wmt16.train(src_dict_size=100000, - trg_dict_size=100000)()): - if idx >= 10: break + paddle.dataset.wmt16.train( + src_dict_size=100000, trg_dict_size=100000 + )() + ): + if idx >= 10: + break self.checkout_one_sample(sample) def test_test(self): for idx, sample in enumerate( - paddle.dataset.wmt16.test(src_dict_size=1000, - trg_dict_size=1000)()): - if idx >= 10: break + paddle.dataset.wmt16.test(src_dict_size=1000, trg_dict_size=1000)() + ): + if idx >= 10: + break self.checkout_one_sample(sample) def test_val(self): for idx, sample in enumerate( - paddle.dataset.wmt16.validation(src_dict_size=1000, - trg_dict_size=1000)()): - if idx >= 10: break + paddle.dataset.wmt16.validation( + src_dict_size=1000, trg_dict_size=1000 + )() + ): + if idx >= 10: + break self.checkout_one_sample(sample) def test_get_dict(self): diff --git a/python/paddle/dataset/uci_housing.py b/python/paddle/dataset/uci_housing.py index 66c6ab3c47989d963a3163d60f55b4c497d536fa..6c085920e2e053bfcb01c1387eaee2ba6366c421 100644 --- a/python/paddle/dataset/uci_housing.py +++ b/python/paddle/dataset/uci_housing.py @@ -31,8 +31,19 @@ __all__ = [] URL = 'http://paddlemodels.bj.bcebos.com/uci_housing/housing.data' MD5 = 'd4accdce7a25600298819f8e28e8d593' feature_names = [ - 'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', - 'PTRATIO', 'B', 'LSTAT' + 'CRIM', + 'ZN', + 'INDUS', + 'CHAS', + 'NOX', + 'RM', + 'AGE', + 'DIS', + 'RAD', + 'TAX', + 'PTRATIO', + 'B', + 'LSTAT', ] UCI_TRAIN_DATA = None @@ -44,14 +55,15 @@ FLUID_MD5_MODEL = '6e6dd637ccd5993961f68bfbde46090b' def feature_range(maximums, minimums): import matplotlib + matplotlib.use('Agg') import matplotlib.pyplot as plt + fig, ax = plt.subplots() feature_num = len(maximums) - ax.bar(list(range(feature_num)), - maximums - minimums, - color='r', - align='center') + ax.bar( + list(range(feature_num)), maximums - minimums, color='r', align='center' + ) ax.set_title('feature scale') plt.xticks(list(range(feature_num)), feature_names) plt.xlim([-1, feature_num]) @@ -70,10 +82,13 @@ def load_data(filename, feature_num=14, ratio=0.8): data = np.fromfile(filename, sep=' ') data = data.reshape(data.shape[0] // feature_num, feature_num) - maximums, minimums, avgs = data.max(axis=0), data.min( - axis=0), data.sum(axis=0) / data.shape[0] + maximums, minimums, avgs = ( + data.max(axis=0), + data.min(axis=0), + data.sum(axis=0) / data.shape[0], + ) # if you want to print the distribution of input data, you could use function of feature_range - #feature_range(maximums[:-1], minimums[:-1]) + # feature_range(maximums[:-1], minimums[:-1]) for i in range(feature_num - 1): data[:, i] = (data[:, i] - avgs[i]) / (maximums[i] - minimums[i]) offset = int(data.shape[0] * ratio) @@ -85,7 +100,8 @@ def load_data(filename, feature_num=14, ratio=0.8): since="2.0.0", update_to="paddle.text.datasets.UCIHousing", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def train(): """ UCI_HOUSING training set creator. @@ -110,7 +126,8 @@ def train(): since="2.0.0", update_to="paddle.text.datasets.UCIHousing", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def test(): """ UCI_HOUSING test set creator. @@ -132,10 +149,9 @@ def test(): def fluid_model(): - parameter_tar = paddle.dataset.common.download(FLUID_URL_MODEL, - 'uci_housing', - FLUID_MD5_MODEL, - 'fit_a_line.fluid.tar') + parameter_tar = paddle.dataset.common.download( + FLUID_URL_MODEL, 'uci_housing', FLUID_MD5_MODEL, 'fit_a_line.fluid.tar' + ) tar = tarfile.TarFile(parameter_tar, mode='r') dirpath = tempfile.mkdtemp() @@ -148,7 +164,8 @@ def fluid_model(): since="2.0.0", update_to="paddle.text.datasets.UCIHousing", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def predict_reader(): """ It returns just one tuple data to do inference. @@ -158,13 +175,14 @@ def predict_reader(): """ global UCI_TEST_DATA load_data(paddle.dataset.common.download(URL, 'uci_housing', MD5)) - return (UCI_TEST_DATA[0][:-1], ) + return (UCI_TEST_DATA[0][:-1],) @deprecated( since="2.0.0", update_to="paddle.text.datasets.UCIHousing", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def fetch(): paddle.dataset.common.download(URL, 'uci_housing', MD5) diff --git a/python/paddle/dataset/voc2012.py b/python/paddle/dataset/voc2012.py index a949872fb0bababb301fa7ef9a5ee39babaf9607..2a80eab1e84b906cf6b30d20905f752ad444dc81 100644 --- a/python/paddle/dataset/voc2012.py +++ b/python/paddle/dataset/voc2012.py @@ -68,7 +68,8 @@ def reader_creator(filename, sub_name): since="2.0.0", update_to="paddle.vision.datasets.VOC2012", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def train(): """ Create a train dataset reader containing 2913 images in HWC order. @@ -80,7 +81,8 @@ def train(): since="2.0.0", update_to="paddle.vision.datasets.VOC2012", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def test(): """ Create a test dataset reader containing 1464 images in HWC order. @@ -92,7 +94,8 @@ def test(): since="2.0.0", update_to="paddle.vision.datasets.VOC2012", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def val(): """ Create a val dataset reader containing 1449 images in HWC order. diff --git a/python/paddle/dataset/wmt14.py b/python/paddle/dataset/wmt14.py index 33ad05c36d0192389b6197749deeb00746ad5130..6d7c2639d2fa4600a3444c79c9bec8be06a97495 100644 --- a/python/paddle/dataset/wmt14.py +++ b/python/paddle/dataset/wmt14.py @@ -27,12 +27,14 @@ import paddle.utils.deprecated as deprecated __all__ = [] -URL_DEV_TEST = ('http://www-lium.univ-lemans.fr/~schwenk/' - 'cslm_joint_paper/data/dev+test.tgz') +URL_DEV_TEST = ( + 'http://www-lium.univ-lemans.fr/~schwenk/' + 'cslm_joint_paper/data/dev+test.tgz' +) MD5_DEV_TEST = '7d7897317ddd8ba0ae5c5fa7248d3ff5' # this is a small set of data for test. The original data is too large and # will be add later. -URL_TRAIN = ('http://paddlemodels.bj.bcebos.com/wmt/wmt14.tgz') +URL_TRAIN = 'http://paddlemodels.bj.bcebos.com/wmt/wmt14.tgz' MD5_TRAIN = '0791583d57d5beb693b9414c5b36798c' # BLEU of this trained model is 26.92 URL_MODEL = 'http://paddlemodels.bj.bcebos.com/wmt%2Fwmt14.tgz' @@ -45,7 +47,6 @@ UNK_IDX = 2 def __read_to_dict(tar_file, dict_size): - def __to_dict(fd, size): out_dict = dict() for line_count, line in enumerate(fd): @@ -57,13 +58,15 @@ def __read_to_dict(tar_file, dict_size): with tarfile.open(tar_file, mode='r') as f: names = [ - each_item.name for each_item in f + each_item.name + for each_item in f if each_item.name.endswith("src.dict") ] assert len(names) == 1 src_dict = __to_dict(f.extractfile(names[0]), dict_size) names = [ - each_item.name for each_item in f + each_item.name + for each_item in f if each_item.name.endswith("trg.dict") ] assert len(names) == 1 @@ -72,12 +75,12 @@ def __read_to_dict(tar_file, dict_size): def reader_creator(tar_file, file_name, dict_size): - def reader(): src_dict, trg_dict = __read_to_dict(tar_file, dict_size) with tarfile.open(tar_file, mode='r') as f: names = [ - each_item.name for each_item in f + each_item.name + for each_item in f if each_item.name.endswith(file_name) ] for name in names: @@ -112,7 +115,8 @@ def reader_creator(tar_file, file_name, dict_size): since="2.0.0", update_to="paddle.text.datasets.WMT14", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def train(dict_size): """ WMT14 training set creator. @@ -126,14 +130,17 @@ def train(dict_size): """ return reader_creator( paddle.dataset.common.download(URL_TRAIN, 'wmt14', MD5_TRAIN), - 'train/train', dict_size) + 'train/train', + dict_size, + ) @deprecated( since="2.0.0", update_to="paddle.text.datasets.WMT14", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def test(dict_size): """ WMT14 test set creator. @@ -147,25 +154,31 @@ def test(dict_size): """ return reader_creator( paddle.dataset.common.download(URL_TRAIN, 'wmt14', MD5_TRAIN), - 'test/test', dict_size) + 'test/test', + dict_size, + ) @deprecated( since="2.0.0", update_to="paddle.text.datasets.WMT14", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def gen(dict_size): return reader_creator( paddle.dataset.common.download(URL_TRAIN, 'wmt14', MD5_TRAIN), - 'gen/gen', dict_size) + 'gen/gen', + dict_size, + ) @deprecated( since="2.0.0", update_to="paddle.text.datasets.WMT14", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def get_dict(dict_size, reverse=True): # if reverse = False, return dict = {'a':'001', 'b':'002', ...} # else reverse = true, return dict = {'001':'a', '002':'b', ...} @@ -181,7 +194,8 @@ def get_dict(dict_size, reverse=True): since="2.0.0", update_to="paddle.text.datasets.WMT14", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def fetch(): paddle.dataset.common.download(URL_TRAIN, 'wmt14', MD5_TRAIN) paddle.dataset.common.download(URL_MODEL, 'wmt14', MD5_MODEL) diff --git a/python/paddle/dataset/wmt16.py b/python/paddle/dataset/wmt16.py index f5a97172a7d82aa37a78589164e3782f7f3464d5..01b13cd1368476545ef8422a505018d7c97410b0 100644 --- a/python/paddle/dataset/wmt16.py +++ b/python/paddle/dataset/wmt16.py @@ -37,7 +37,7 @@ import paddle.utils.deprecated as deprecated __all__ = [] -DATA_URL = ("http://paddlemodels.bj.bcebos.com/wmt/wmt16.tar.gz") +DATA_URL = "http://paddlemodels.bj.bcebos.com/wmt/wmt16.tar.gz" DATA_MD5 = "0c38be43600334966403524a40dcd81e" TOTAL_EN_WORDS = 11250 @@ -54,7 +54,8 @@ def __build_dict(tar_file, dict_size, save_path, lang): for line in f.extractfile("wmt16/train"): line = line.decode() line_split = line.strip().split("\t") - if len(line_split) != 2: continue + if len(line_split) != 2: + continue sen = line_split[0] if lang == "en" else line_split[1] for w in sen.split(): word_dict[w] += 1 @@ -62,17 +63,21 @@ def __build_dict(tar_file, dict_size, save_path, lang): with open(save_path, "wb") as fout: fout.write(("%s\n%s\n%s\n" % (START_MARK, END_MARK, UNK_MARK)).encode()) for idx, word in enumerate( - sorted(word_dict.items(), key=lambda x: x[1], reverse=True)): - if idx + 3 == dict_size: break + sorted(word_dict.items(), key=lambda x: x[1], reverse=True) + ): + if idx + 3 == dict_size: + break fout.write(word[0].encode()) fout.write(b'\n') def __load_dict(tar_file, dict_size, lang, reverse=False): - dict_path = os.path.join(paddle.dataset.common.DATA_HOME, - "wmt16/%s_%d.dict" % (lang, dict_size)) - if not os.path.exists(dict_path) or (len(open(dict_path, "rb").readlines()) - != dict_size): + dict_path = os.path.join( + paddle.dataset.common.DATA_HOME, "wmt16/%s_%d.dict" % (lang, dict_size) + ) + if not os.path.exists(dict_path) or ( + len(open(dict_path, "rb").readlines()) != dict_size + ): __build_dict(tar_file, dict_size, dict_path, lang) word_dict = {} @@ -87,18 +92,20 @@ def __load_dict(tar_file, dict_size, lang, reverse=False): def __get_dict_size(src_dict_size, trg_dict_size, src_lang): src_dict_size = min( - src_dict_size, (TOTAL_EN_WORDS if src_lang == "en" else TOTAL_DE_WORDS)) + src_dict_size, (TOTAL_EN_WORDS if src_lang == "en" else TOTAL_DE_WORDS) + ) trg_dict_size = min( - trg_dict_size, (TOTAL_DE_WORDS if src_lang == "en" else TOTAL_EN_WORDS)) + trg_dict_size, (TOTAL_DE_WORDS if src_lang == "en" else TOTAL_EN_WORDS) + ) return src_dict_size, trg_dict_size def reader_creator(tar_file, file_name, src_dict_size, trg_dict_size, src_lang): - def reader(): src_dict = __load_dict(tar_file, src_dict_size, src_lang) - trg_dict = __load_dict(tar_file, trg_dict_size, - ("de" if src_lang == "en" else "en")) + trg_dict = __load_dict( + tar_file, trg_dict_size, ("de" if src_lang == "en" else "en") + ) # the index for start mark, end mark, and unk are the same in source # language and target language. Here uses the source language @@ -117,9 +124,11 @@ def reader_creator(tar_file, file_name, src_dict_size, trg_dict_size, src_lang): if len(line_split) != 2: continue src_words = line_split[src_col].split() - src_ids = [start_id - ] + [src_dict.get(w, unk_id) - for w in src_words] + [end_id] + src_ids = ( + [start_id] + + [src_dict.get(w, unk_id) for w in src_words] + + [end_id] + ) trg_words = line_split[trg_col].split() trg_ids = [trg_dict.get(w, unk_id) for w in trg_words] @@ -136,7 +145,8 @@ def reader_creator(tar_file, file_name, src_dict_size, trg_dict_size, src_lang): since="2.0.0", update_to="paddle.text.datasets.WMT16", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def train(src_dict_size, trg_dict_size, src_lang="en"): """ WMT16 train set reader. @@ -172,24 +182,31 @@ def train(src_dict_size, trg_dict_size, src_lang="en"): """ if src_lang not in ["en", "de"]: - raise ValueError("An error language type. Only support: " - "en (for English); de(for Germany).") - src_dict_size, trg_dict_size = __get_dict_size(src_dict_size, trg_dict_size, - src_lang) - - return reader_creator(tar_file=paddle.dataset.common.download( - DATA_URL, "wmt16", DATA_MD5, "wmt16.tar.gz"), - file_name="wmt16/train", - src_dict_size=src_dict_size, - trg_dict_size=trg_dict_size, - src_lang=src_lang) + raise ValueError( + "An error language type. Only support: " + "en (for English); de(for Germany)." + ) + src_dict_size, trg_dict_size = __get_dict_size( + src_dict_size, trg_dict_size, src_lang + ) + + return reader_creator( + tar_file=paddle.dataset.common.download( + DATA_URL, "wmt16", DATA_MD5, "wmt16.tar.gz" + ), + file_name="wmt16/train", + src_dict_size=src_dict_size, + trg_dict_size=trg_dict_size, + src_lang=src_lang, + ) @deprecated( since="2.0.0", update_to="paddle.text.datasets.WMT16", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def test(src_dict_size, trg_dict_size, src_lang="en"): """ WMT16 test set reader. @@ -224,25 +241,32 @@ def test(src_dict_size, trg_dict_size, src_lang="en"): """ if src_lang not in ["en", "de"]: - raise ValueError("An error language type. " - "Only support: en (for English); de(for Germany).") - - src_dict_size, trg_dict_size = __get_dict_size(src_dict_size, trg_dict_size, - src_lang) - - return reader_creator(tar_file=paddle.dataset.common.download( - DATA_URL, "wmt16", DATA_MD5, "wmt16.tar.gz"), - file_name="wmt16/test", - src_dict_size=src_dict_size, - trg_dict_size=trg_dict_size, - src_lang=src_lang) + raise ValueError( + "An error language type. " + "Only support: en (for English); de(for Germany)." + ) + + src_dict_size, trg_dict_size = __get_dict_size( + src_dict_size, trg_dict_size, src_lang + ) + + return reader_creator( + tar_file=paddle.dataset.common.download( + DATA_URL, "wmt16", DATA_MD5, "wmt16.tar.gz" + ), + file_name="wmt16/test", + src_dict_size=src_dict_size, + trg_dict_size=trg_dict_size, + src_lang=src_lang, + ) @deprecated( since="2.0.0", update_to="paddle.text.datasets.WMT16", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def validation(src_dict_size, trg_dict_size, src_lang="en"): """ WMT16 validation set reader. @@ -276,24 +300,31 @@ def validation(src_dict_size, trg_dict_size, src_lang="en"): callable: The validation reader. """ if src_lang not in ["en", "de"]: - raise ValueError("An error language type. " - "Only support: en (for English); de(for Germany).") - src_dict_size, trg_dict_size = __get_dict_size(src_dict_size, trg_dict_size, - src_lang) - - return reader_creator(tar_file=paddle.dataset.common.download( - DATA_URL, "wmt16", DATA_MD5, "wmt16.tar.gz"), - file_name="wmt16/val", - src_dict_size=src_dict_size, - trg_dict_size=trg_dict_size, - src_lang=src_lang) + raise ValueError( + "An error language type. " + "Only support: en (for English); de(for Germany)." + ) + src_dict_size, trg_dict_size = __get_dict_size( + src_dict_size, trg_dict_size, src_lang + ) + + return reader_creator( + tar_file=paddle.dataset.common.download( + DATA_URL, "wmt16", DATA_MD5, "wmt16.tar.gz" + ), + file_name="wmt16/val", + src_dict_size=src_dict_size, + trg_dict_size=trg_dict_size, + src_lang=src_lang, + ) @deprecated( since="2.0.0", update_to="paddle.text.datasets.WMT16", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def get_dict(lang, dict_size, reverse=False): """ return the word dictionary for the specified language. @@ -312,11 +343,14 @@ def get_dict(lang, dict_size, reverse=False): dict: The word dictionary for the specific language. """ - if lang == "en": dict_size = min(dict_size, TOTAL_EN_WORDS) - else: dict_size = min(dict_size, TOTAL_DE_WORDS) + if lang == "en": + dict_size = min(dict_size, TOTAL_EN_WORDS) + else: + dict_size = min(dict_size, TOTAL_DE_WORDS) - dict_path = os.path.join(paddle.dataset.common.DATA_HOME, - "wmt16/%s_%d.dict" % (lang, dict_size)) + dict_path = os.path.join( + paddle.dataset.common.DATA_HOME, "wmt16/%s_%d.dict" % (lang, dict_size) + ) assert os.path.exists(dict_path), "Word dictionary does not exist. " "Please invoke paddle.dataset.wmt16.train/test/validation first " "to build the dictionary." @@ -328,9 +362,10 @@ def get_dict(lang, dict_size, reverse=False): since="2.0.0", update_to="paddle.text.datasets.WMT16", level=1, - reason="Please use new dataset API which supports paddle.io.DataLoader") + reason="Please use new dataset API which supports paddle.io.DataLoader", +) def fetch(): - """download the entire dataset. - """ - paddle.v4.dataset.common.download(DATA_URL, "wmt16", DATA_MD5, - "wmt16.tar.gz") + """download the entire dataset.""" + paddle.v4.dataset.common.download( + DATA_URL, "wmt16", DATA_MD5, "wmt16.tar.gz" + ) diff --git a/python/paddle/device/__init__.py b/python/paddle/device/__init__.py index e9894309999a47b6a5839cfee8f494404e5991e2..6e14fd504784e2244c5647f9352c068e6c58350a 100644 --- a/python/paddle/device/__init__.py +++ b/python/paddle/device/__init__.py @@ -199,39 +199,49 @@ def _convert_to_place(device): place = core.CPUPlace() elif lower_device == 'gpu': if not core.is_compiled_with_cuda(): - raise ValueError("The device should not be 'gpu', " - "since PaddlePaddle is not compiled with CUDA") + raise ValueError( + "The device should not be 'gpu', " + "since PaddlePaddle is not compiled with CUDA" + ) place = core.CUDAPlace(ParallelEnv().dev_id) elif lower_device == 'xpu': if not core.is_compiled_with_xpu(): - raise ValueError("The device should not be 'xpu', " - "since PaddlePaddle is not compiled with XPU") + raise ValueError( + "The device should not be 'xpu', " + "since PaddlePaddle is not compiled with XPU" + ) selected_xpus = os.getenv("FLAGS_selected_xpus", "0").split(",") device_id = int(selected_xpus[0]) place = core.XPUPlace(device_id) elif lower_device == 'npu': if not core.is_compiled_with_npu(): - raise ValueError("The device should not be 'npu', " - "since PaddlePaddle is not compiled with NPU") + raise ValueError( + "The device should not be 'npu', " + "since PaddlePaddle is not compiled with NPU" + ) selected_npus = os.getenv("FLAGS_selected_npus", "0").split(",") device_id = int(selected_npus[0]) place = core.NPUPlace(device_id) elif lower_device == 'ipu': if not core.is_compiled_with_ipu(): raise ValueError( - "The device should not be 'ipu', " \ - "since PaddlePaddle is not compiled with IPU") + "The device should not be 'ipu', " + "since PaddlePaddle is not compiled with IPU" + ) place = core.IPUPlace() elif lower_device == 'mlu': if not core.is_compiled_with_mlu(): - raise ValueError("The device should not be 'mlu', " - "since PaddlePaddle is not compiled with MLU") + raise ValueError( + "The device should not be 'mlu', " + "since PaddlePaddle is not compiled with MLU" + ) selected_mlus = os.getenv("FLAGS_selected_mlus", "0").split(",") device_id = int(selected_mlus[0]) place = core.MLUPlace(device_id) elif device in core.get_all_custom_device_type(): - selected_devices = os.getenv("FLAGS_selected_{}s".format(device), - "0").split(",") + selected_devices = os.getenv( + "FLAGS_selected_{}s".format(device), "0" + ).split(",") device_id = int(selected_devices[0]) place = core.CustomPlace(device, device_id) else: @@ -239,7 +249,12 @@ def _convert_to_place(device): avaliable_xpu_device = re.match(r'xpu:\d+', lower_device) avaliable_npu_device = re.match(r'npu:\d+', lower_device) avaliable_mlu_device = re.match(r'mlu:\d+', lower_device) - if not avaliable_gpu_device and not avaliable_xpu_device and not avaliable_npu_device and not avaliable_mlu_device: + if ( + not avaliable_gpu_device + and not avaliable_xpu_device + and not avaliable_npu_device + and not avaliable_mlu_device + ): device_info_list = device.split(':', 1) device_type = device_info_list[0] if device_type in core.get_all_custom_device_type(): @@ -248,15 +263,20 @@ def _convert_to_place(device): place = core.CustomPlace(device_type, device_id) else: raise ValueError( - "The device must be a string which is like 'cpu', {}". - format(', '.join("'{}', '{}:x'".format(x, x) - for x in ['gpu', 'xpu', 'npu', 'mlu'] + - core.get_all_custom_device_type()))) + "The device must be a string which is like 'cpu', {}".format( + ', '.join( + "'{}', '{}:x'".format(x, x) + for x in ['gpu', 'xpu', 'npu', 'mlu'] + + core.get_all_custom_device_type() + ) + ) + ) if avaliable_gpu_device: if not core.is_compiled_with_cuda(): raise ValueError( "The device should not be {}, since PaddlePaddle is " - "not compiled with CUDA".format(avaliable_gpu_device)) + "not compiled with CUDA".format(avaliable_gpu_device) + ) device_info_list = device.split(':', 1) device_id = device_info_list[1] device_id = int(device_id) @@ -265,7 +285,8 @@ def _convert_to_place(device): if not core.is_compiled_with_xpu(): raise ValueError( "The device should not be {}, since PaddlePaddle is " - "not compiled with XPU".format(avaliable_xpu_device)) + "not compiled with XPU".format(avaliable_xpu_device) + ) device_info_list = device.split(':', 1) device_id = device_info_list[1] device_id = int(device_id) @@ -274,7 +295,8 @@ def _convert_to_place(device): if not core.is_compiled_with_npu(): raise ValueError( "The device should not be {}, since PaddlePaddle is " - "not compiled with NPU".format(avaliable_npu_device)) + "not compiled with NPU".format(avaliable_npu_device) + ) device_info_list = device.split(':', 1) device_id = device_info_list[1] device_id = int(device_id) @@ -283,7 +305,8 @@ def _convert_to_place(device): if not core.is_compiled_with_mlu(): raise ValueError( "The device should not be {}, since PaddlePaddle is " - "not compiled with mlu".format(avaliable_mlu_device)) + "not compiled with mlu".format(avaliable_mlu_device) + ) device_info_list = device.split(':', 1) device_id = device_info_list[1] device_id = int(device_id) diff --git a/python/paddle/device/cuda/__init__.py b/python/paddle/device/cuda/__init__.py index fa0f194969bc8e19517d9bf5c3c20f7ed001f160..316f9de61226543a7a9b8b50b92fbbff64f7b253 100644 --- a/python/paddle/device/cuda/__init__.py +++ b/python/paddle/device/cuda/__init__.py @@ -124,8 +124,11 @@ def device_count(): ''' - num_gpus = core.get_cuda_device_count() if hasattr( - core, 'get_cuda_device_count') else 0 + num_gpus = ( + core.get_cuda_device_count() + if hasattr(core, 'get_cuda_device_count') + else 0 + ) return num_gpus @@ -165,7 +168,7 @@ def extract_cuda_device_id(device, op_name): Return: int: The id of the given device. If device is None, return the id of current device. ''' - if (device is None): + if device is None: return core.get_cuda_current_device_id() if isinstance(device, int): @@ -178,15 +181,19 @@ def extract_cuda_device_id(device, op_name): else: raise ValueError( "The current string {} is not expected. Because {} only support string which is like 'gpu:x'. " - "Please input appropriate string again!".format( - device, op_name)) + "Please input appropriate string again!".format(device, op_name) + ) else: raise ValueError( "The device type {} is not expected. Because {} only support int, str or paddle.CUDAPlace. " - "Please input appropriate device again!".format(device, op_name)) + "Please input appropriate device again!".format(device, op_name) + ) - assert device_id >= 0, f"The device id must be not less than 0, but got id = {device_id}." - assert device_id < device_count( + assert ( + device_id >= 0 + ), f"The device id must be not less than 0, but got id = {device_id}." + assert ( + device_id < device_count() ), f"The device id {device_id} exceeds gpu card number {device_count()}" return device_id @@ -424,7 +431,8 @@ def get_device_properties(device=None): raise ValueError( "The API paddle.device.cuda.get_device_properties is not supported in " "CPU-only PaddlePaddle. Please reinstall PaddlePaddle with GPU support " - "to call this API.") + "to call this API." + ) if device is not None: if isinstance(device, int): @@ -438,12 +446,14 @@ def get_device_properties(device=None): raise ValueError( "The current string {} is not expected. Because paddle.device." "cuda.get_device_properties only support string which is like 'gpu:x'. " - "Please input appropriate string again!".format(device)) + "Please input appropriate string again!".format(device) + ) else: raise ValueError( "The device type {} is not expected. Because paddle.device.cuda." "get_device_properties only support int, str or paddle.CUDAPlace. " - "Please input appropriate device again!".format(device)) + "Please input appropriate device again!".format(device) + ) else: device_id = -1 diff --git a/python/paddle/device/cuda/graphs.py b/python/paddle/device/cuda/graphs.py index 44ce2c5eea7d04f10ce445197ea7c626891b8323..ea9962f22997cf939136ecfc40d44ddd23d5579b 100644 --- a/python/paddle/device/cuda/graphs.py +++ b/python/paddle/device/cuda/graphs.py @@ -16,7 +16,11 @@ import os import paddle from paddle.fluid import core from paddle.fluid.layers.utils import _hash_with_id -from paddle.fluid.core import is_compiled_with_cuda, is_compiled_with_rocm, CUDAPlace +from paddle.fluid.core import ( + is_compiled_with_cuda, + is_compiled_with_rocm, + CUDAPlace, +) import warnings if is_compiled_with_cuda() and not is_compiled_with_rocm(): @@ -24,6 +28,7 @@ if is_compiled_with_cuda() and not is_compiled_with_rocm(): def is_cuda_graph_supported(): return True + else: CoreCUDAGraph = None @@ -36,9 +41,10 @@ cuda_graph_id = 0 class CUDAGraph: - def __init__(self, place=None, mode="thread_local"): - assert CoreCUDAGraph is not None, "CUDA Graph is only supported on PaddlePaddle compiled with NVIDIA GPU." + assert ( + CoreCUDAGraph is not None + ), "CUDA Graph is only supported on PaddlePaddle compiled with NVIDIA GPU." self._graph = None if place is None: @@ -65,7 +71,8 @@ class CUDAGraph: dirname = dirname.name os.makedirs(name=dirname, exist_ok=True) assert os.path.isdir( - dirname), "The dirname {} should be a directory".format(dirname) + dirname + ), "The dirname {} should be a directory".format(dirname) if flags is None: flags = 2047 # only all information. It can be any integer inside [1, 2048) self._graph.print_to_dot_files(dirname, flags) @@ -76,6 +83,7 @@ def wrap_cuda_graph(function, mode="thread_local", memory_pool="default"): if not paddle.in_dynamic_mode(): # static mode from paddle.fluid.framework import _cuda_graph_guard + global cuda_graph_id graph_id = str(cuda_graph_id) cuda_graph_id += 1 @@ -86,13 +94,15 @@ def wrap_cuda_graph(function, mode="thread_local", memory_pool="default"): else: raise ValueError( "memory_pool should be one of default or new under static mode, but got", - memory_pool) + memory_pool, + ) return _cuda_graph_guard( - mode + ';' + str(memory_pool_id) + ';' + - graph_id)(lambda *args, **kwargs: function(*args, **kwargs)) + mode + ';' + str(memory_pool_id) + ';' + graph_id + )(lambda *args, **kwargs: function(*args, **kwargs)) from paddle.jit import to_static from paddle.nn import Layer + new_function = to_static(function) if isinstance(function, Layer): mock_func = new_function.forward @@ -105,7 +115,9 @@ def wrap_cuda_graph(function, mode="thread_local", memory_pool="default"): mock_func._cuda_graph_pool_id = CoreCUDAGraph.gen_new_memory_pool_id() else: if isinstance(memory_pool, Layer): - mock_func._cuda_graph_pool_id = memory_pool.forward._cuda_graph_pool_id + mock_func._cuda_graph_pool_id = ( + memory_pool.forward._cuda_graph_pool_id + ) else: mock_func._cuda_graph_pool_id = memory_pool._cuda_graph_pool_id return new_function @@ -224,16 +236,21 @@ def get_cuda_graph_sections(program): for idx, op in enumerate(block.ops): if op.type == 'conditional_block' or op.type == 'while': - assert op._cuda_graph_attr is None, "Cuda graph not support conditional block op and while op." + assert ( + op._cuda_graph_attr is None + ), "Cuda graph not support conditional block op and while op." if op.has_attr('is_test') and op.attr('is_test'): is_test = True # find cuda graph sections if op._cuda_graph_attr is not None: - assert isinstance(op._cuda_graph_attr, - str), "cuda_graph_attr should be a str" + assert isinstance( + op._cuda_graph_attr, str + ), "cuda_graph_attr should be a str" cuda_graph_attrs = op._cuda_graph_attr.split(';') - assert len(cuda_graph_attrs) == 3, "cuda graph attr should have three fields: " \ - "cuda graph mode, cuda graph memory pool id, cuda graph id" + assert len(cuda_graph_attrs) == 3, ( + "cuda graph attr should have three fields: " + "cuda graph mode, cuda graph memory pool id, cuda graph id" + ) local_cuda_graph_id = int(cuda_graph_attrs[2]) if local_cuda_graph_id == current_cuda_graph_id: if len(internal_section) > 0: @@ -241,12 +258,16 @@ def get_cuda_graph_sections(program): internal_idx ), "len of internal section should be equal with len of internal idx" for internal_op in internal_section: - loss_related = (int(internal_op.attr(op_role_attr_name)) - == loss_op_role) or int( - (internal_op.attr(op_role_attr_name) - ) == loss_grad_op_role) - sub_block_related = (op.type == 'conditional_block' - or op.type == 'while') + loss_related = ( + int(internal_op.attr(op_role_attr_name)) + == loss_op_role + ) or int( + (internal_op.attr(op_role_attr_name)) + == loss_grad_op_role + ) + sub_block_related = ( + op.type == 'conditional_block' or op.type == 'while' + ) if loss_related or sub_block_related: # If loss_related is True # The internal section contains loss related ops, @@ -260,8 +281,9 @@ def get_cuda_graph_sections(program): internal_section = [] internal_idx = [] # Beside clear the internal section, a new cuda graph section should be recorded - assert len(current_section) == len(current_idx), \ - "num of section's op is not equal with the idx" + assert len(current_section) == len( + current_idx + ), "num of section's op is not equal with the idx" if len(current_section) > 0: # store previous section cuda_graph_sections.append(current_section) @@ -282,7 +304,9 @@ def get_cuda_graph_sections(program): # internal ops and idx belong to no section, just clear it internal_section = [] internal_idx = [] - current_cuda_graph_id = local_cuda_graph_id # start record a new section + current_cuda_graph_id = ( + local_cuda_graph_id # start record a new section + ) assert len(current_section) == len( current_idx ), "num of section's op is not equal with num of idx" @@ -299,7 +323,8 @@ def get_cuda_graph_sections(program): # handle the last section assert len(current_section) == len( - current_idx), "num of section's op is not equal with num of idx" + current_idx + ), "num of section's op is not equal with num of idx" if len(current_section) > 0: # store previous section cuda_graph_sections.append(current_section) @@ -308,9 +333,15 @@ def get_cuda_graph_sections(program): return cuda_graph_sections, sections_idx, is_test -def replace_cuda_graph_section(ins_and_outs, section_program, section_idx, - origin_program, cuda_graph_section, order, - is_test): +def replace_cuda_graph_section( + ins_and_outs, + section_program, + section_idx, + origin_program, + cuda_graph_section, + order, + is_test, +): """ Use section_program and ins_and_outs to initialize a run_program_op, and replace the section_idx marks ops in the origin program. @@ -344,8 +375,9 @@ def replace_cuda_graph_section(ins_and_outs, section_program, section_idx, memory_pool_id = int(attrs[1]) break - assert mode is not None and memory_pool_id is not None, \ - "mode and memory pool id should be specified in cuda graph attr" + assert ( + mode is not None and memory_pool_id is not None + ), "mode and memory pool id should be specified in cuda graph attr" cuda_graph_var = origin_block.create_var( name="cuda_graph_" + str(order), @@ -372,7 +404,7 @@ def replace_cuda_graph_section(ins_and_outs, section_program, section_idx, outputs={ 'Out': outs, 'OutScope': out_scope_var, - 'CUDAGraph': cuda_graph_var + 'CUDAGraph': cuda_graph_var, }, attrs={ 'global_block': section_program.global_block(), @@ -386,7 +418,8 @@ def replace_cuda_graph_section(ins_and_outs, section_program, section_idx, 'use_interpretorcore': False, 'forward_global_block': section_program.global_block(), 'backward_global_block': section_program.global_block(), - }) + }, + ) def cuda_graph_transform(program): @@ -408,9 +441,11 @@ def cuda_graph_transform(program): # A cuda graph section contains all ops marked with same cuda graph id and # some ops inserted by some optimizers (amp, sharding for example) between ops with same id. cuda_graph_sections, sections_idx, is_test = get_cuda_graph_sections( - program) - assert len(cuda_graph_sections) == len(sections_idx), \ - "num of cuda graph sections is not equal with num of idx sections" + program + ) + assert len(cuda_graph_sections) == len( + sections_idx + ), "num of cuda graph sections is not equal with num of idx sections" # step 2: construct new program for each section and find inputs and outputs of each section. # The inputs are variables generated outside the section but will be used by this section. @@ -420,23 +455,27 @@ def cuda_graph_transform(program): for i in range(len(cuda_graph_sections)): # creating new program for current section section_program, ins_outs = construct_program_and_find_ins_outs( - cuda_graph_sections[i], program, sections_idx[i]) + cuda_graph_sections[i], program, sections_idx[i] + ) ins_and_outs.append(ins_outs) section_programs.append(section_program) - assert len(section_programs) == len(cuda_graph_sections), \ - "the num of cuda graph sections should be equal with the num of new program" + assert len(section_programs) == len( + cuda_graph_sections + ), "the num of cuda graph sections should be equal with the num of new program" # step 3: replace the ops in original program with run_program_op. # Will remove all ops in the section from origin program, and use run_program_op to replace them. for i in reversed(range(len(cuda_graph_sections))): # carry out the replacement in reversed order, to keep the previous idx intact - replace_cuda_graph_section(ins_and_outs[i], - section_programs[i], - sections_idx[i], - program, - cuda_graph_sections[i], - order=i, - is_test=is_test) + replace_cuda_graph_section( + ins_and_outs[i], + section_programs[i], + sections_idx[i], + program, + cuda_graph_sections[i], + order=i, + is_test=is_test, + ) # NOTE: user should hold these program, for now just return these program back to caller return section_programs diff --git a/python/paddle/distributed/__init__.py b/python/paddle/distributed/__init__.py index 13c305b58da7342fee55c9cd2f7858322761d9e9..2bce21be7cc6165f1809f117d1beb148fe8bc06e 100644 --- a/python/paddle/distributed/__init__.py +++ b/python/paddle/distributed/__init__.py @@ -72,12 +72,42 @@ from .sharding import save_group_sharded_model # noqa: F401 from . import rpc __all__ = [ # noqa - "spawn", "launch", "scatter", "broadcast", "ParallelEnv", "new_group", - "init_parallel_env", "gloo_init_parallel_env", "gloo_barrier", - "gloo_release", "QueueDataset", "split", "CountFilterEntry", - "ShowClickEntry", "get_world_size", "get_group", "all_gather", - "all_gather_object", "InMemoryDataset", "barrier", "all_reduce", "alltoall", - "alltoall_single", "send", "reduce", "recv", "ReduceOp", "wait", "get_rank", - "ProbabilityEntry", "ParallelMode", "is_initialized", - "destroy_process_group", "isend", "irecv", "reduce_scatter", "rpc", "stream" + "spawn", + "launch", + "scatter", + "broadcast", + "ParallelEnv", + "new_group", + "init_parallel_env", + "gloo_init_parallel_env", + "gloo_barrier", + "gloo_release", + "QueueDataset", + "split", + "CountFilterEntry", + "ShowClickEntry", + "get_world_size", + "get_group", + "all_gather", + "all_gather_object", + "InMemoryDataset", + "barrier", + "all_reduce", + "alltoall", + "alltoall_single", + "send", + "reduce", + "recv", + "ReduceOp", + "wait", + "get_rank", + "ProbabilityEntry", + "ParallelMode", + "is_initialized", + "destroy_process_group", + "isend", + "irecv", + "reduce_scatter", + "rpc", + "stream", ] diff --git a/python/paddle/distributed/auto_parallel/callbacks.py b/python/paddle/distributed/auto_parallel/callbacks.py index 17ce5bd71b8168608f19af4a0f1b9860856c6091..26371cc4ccb91ad94b58384113df68ebbc6ff83a 100644 --- a/python/paddle/distributed/auto_parallel/callbacks.py +++ b/python/paddle/distributed/auto_parallel/callbacks.py @@ -16,22 +16,30 @@ import os import time import paddle -from paddle.hapi.callbacks import ProgBarLogger, ModelCheckpoint, LRScheduler, CallbackList, Callback +from paddle.hapi.callbacks import ( + ProgBarLogger, + ModelCheckpoint, + LRScheduler, + CallbackList, + Callback, +) from .interface import CollectionNames, get_collection -def config_callbacks(callbacks=None, - engine=None, - batch_size=None, - epochs=None, - steps=None, - log_freq=2, - verbose=2, - save_freq=1, - save_dir=None, - metrics=None, - acc_step=1, - mode='train'): +def config_callbacks( + callbacks=None, + engine=None, + batch_size=None, + epochs=None, + steps=None, + log_freq=2, + verbose=2, + save_freq=1, + save_dir=None, + metrics=None, + acc_step=1, + mode='train', +): cbks = callbacks or [] cbks = cbks if isinstance(cbks, (list, tuple)) else [cbks] @@ -74,7 +82,6 @@ def config_callbacks(callbacks=None, class ProgBarLoggerAuto(ProgBarLogger): - def __init__(self, log_freq=1, verbose=2): super(ProgBarLoggerAuto, self).__init__(log_freq, verbose) @@ -110,16 +117,22 @@ class ProgBarLoggerAuto(ProgBarLogger): cnt = timer['count'] if timer['count'] > 0 else 1.0 samples = timer['samples'] if timer['samples'] > 0 else 1.0 values.append( - ('avg_reader_cost', "%.5f sec" % (timer['data_time'] / cnt))) + ('avg_reader_cost', "%.5f sec" % (timer['data_time'] / cnt)) + ) values.append( - ('avg_batch_cost', "%.5f sec" % (timer['batch_time'] / cnt))) + ('avg_batch_cost', "%.5f sec" % (timer['batch_time'] / cnt)) + ) values.append( - ('ips', "%.5f samples/sec" % - (samples / (timer['data_time'] + timer['batch_time'])))) + ( + 'ips', + "%.5f samples/sec" + % (samples / (timer['data_time'] + timer['batch_time'])), + ) + ) timer['count'] = 0 timer['samples'] = 0 - timer['data_time'] = 0. - timer['batch_time'] = 0. + timer['data_time'] = 0.0 + timer['batch_time'] = 0.0 progbar.update(steps, values) @@ -130,7 +143,8 @@ class ProgBarLoggerAuto(ProgBarLogger): self.evaled_samples += samples self._eval_timer['batch_time'] += ( - time.time() - self._eval_timer['batch_data_end_time']) + time.time() - self._eval_timer['batch_data_end_time'] + ) self._eval_timer['count'] += 1 samples = self.params['batch_size'] self._eval_timer['samples'] += samples @@ -143,7 +157,6 @@ class ProgBarLoggerAuto(ProgBarLogger): class LRSchedulerAuto(LRScheduler): - def __init__(self, by_step=True, by_epoch=False): super(LRSchedulerAuto, self).__init__(by_step, by_epoch) @@ -156,15 +169,18 @@ class LRSchedulerAuto(LRScheduler): self.train_step += 1 if self.by_step and self.train_step % self.acc_step == 0: - if self.model._optimizer and \ - hasattr(self.model._optimizer, '_learning_rate') and \ - isinstance(self.model._optimizer._learning_rate, - paddle.optimizer.lr.LRScheduler): + if ( + self.model._optimizer + and hasattr(self.model._optimizer, '_learning_rate') + and isinstance( + self.model._optimizer._learning_rate, + paddle.optimizer.lr.LRScheduler, + ) + ): self.model._optimizer._learning_rate.step() class History(Callback): - def __init__(self): self.history = {} @@ -181,7 +197,6 @@ class History(Callback): class Profiler(Callback): - def __init__(self, *args, **kwargs): self.prof = paddle.profiler.Profiler(*args, **kwargs) @@ -197,8 +212,11 @@ class Profiler(Callback): def on_train_batch_end(self, step, logs=None): self.train_step += 1 self.prof.step(num_samples=self.batch_size) - print("step {}:{}".format(self.train_step, - self.prof.step_info(unit='samples'))) + print( + "step {}:{}".format( + self.train_step, self.prof.step_info(unit='samples') + ) + ) def on_train_end(self, logs=None): self.prof.stop() @@ -206,7 +224,6 @@ class Profiler(Callback): class ModelCheckpointAuto(ModelCheckpoint): - def __init__(self, *args, **kwargs): super(ModelCheckpointAuto, self).__init__(*args, **kwargs) diff --git a/python/paddle/distributed/auto_parallel/cluster.py b/python/paddle/distributed/auto_parallel/cluster.py index e17f83eb419072f23350db6c0788769bbc1a5ebd..ca66a770e3c4cffb6077c7c4b7d55f54ee0e9758 100644 --- a/python/paddle/distributed/auto_parallel/cluster.py +++ b/python/paddle/distributed/auto_parallel/cluster.py @@ -128,8 +128,15 @@ class Device: def __str__(self): str = "" str += "global_id: {}, local_id: {}, machine_id: {}, type: {}, model: {}, dp_flops: {}, sp_flops: {}, memory: {}".format( - self.global_id, self.local_id, self.machine.id, self.type.name, - self.model, self.dp_gflops, self.sp_gflops, self.memory) + self.global_id, + self.local_id, + self.machine.id, + self.type.name, + self.model, + self.dp_gflops, + self.sp_gflops, + self.memory, + ) return str def __repr__(self): @@ -202,8 +209,12 @@ class Link: def __str__(self): str = "" str += "source_global_id: {}, target_global_id: {}, type: {}, bandwidth: {}, latency: {}".format( - self.source.global_id, self.target.global_id, self.type, - self.bandwidth, self.latency) + self.source.global_id, + self.target.global_id, + self.type, + self.bandwidth, + self.latency, + ) return str def __repr__(self): @@ -211,7 +222,6 @@ class Link: class Machine: - def __init__(self, id): self._id = id self._hostname = None @@ -292,7 +302,6 @@ class Machine: class AlphaLatency: - def __init__(self, alpha_latency): assert isinstance(alpha_latency, dict) self._base = alpha_latency.get("base", None) @@ -304,12 +313,15 @@ class AlphaLatency: self._switch = float(self._switch) except: raise TypeError("The switch latency must be float") - self._base_ring = self._base.get( - "ring", None) if self._base is not None else None - self._base_tree = self._base.get( - "tree", None) if self._base is not None else None - self._base_inter = self._base.get( - "inter", None) if self._base is not None else None + self._base_ring = ( + self._base.get("ring", None) if self._base is not None else None + ) + self._base_tree = ( + self._base.get("tree", None) if self._base is not None else None + ) + self._base_inter = ( + self._base.get("inter", None) if self._base is not None else None + ) if self._base_ring is not None: try: self._base_ring = float(self._base_ring) @@ -416,19 +428,21 @@ class Cluster: # which have the same number accelerators. self._num_devices_per_machine = None - def gen_default_config_cluster(self, - gpu_model="V100", - cpu_model="6271C", - node_count=1, - device_count=1, - gpu_memory=32, - cpu_memory=503, - inter_bandwidth=24, - intra_bandwidth=235, - gpu_dp_gflops=7800, - gpu_sp_gflops=15700, - cpu_dp_gflops=75, - cpu_sp_gflops=150): + def gen_default_config_cluster( + self, + gpu_model="V100", + cpu_model="6271C", + node_count=1, + device_count=1, + gpu_memory=32, + cpu_memory=503, + inter_bandwidth=24, + intra_bandwidth=235, + gpu_dp_gflops=7800, + gpu_sp_gflops=15700, + cpu_dp_gflops=75, + cpu_sp_gflops=150, + ): """Generate cluster by default config.""" gpu_models = ["V100", "A100", "H100", "A2", "A10", "A16", "A30", "A40"] xpu_models = ["XPU"] @@ -602,25 +616,31 @@ class Cluster: prev_machine = self._machines[machine.id - 1] offset = prev_machine._non_accelerator_cumulative_count for global_id in machine.devices: - if machine.devices[ - global_id].type not in Device.NON_ACCELERATOR_TYPE: + if ( + machine.devices[global_id].type + not in Device.NON_ACCELERATOR_TYPE + ): rank_id = global_id - offset self._rank_to_device_id[rank_id] = global_id self._device_id_to_rank[global_id] = rank_id - machine._non_accelerator_cumulative_count = len( - machine.devices) - len( - machine.accelerators - ) + prev_machine._non_accelerator_cumulative_count + machine._non_accelerator_cumulative_count = ( + len(machine.devices) + - len(machine.accelerators) + + prev_machine._non_accelerator_cumulative_count + ) else: for global_id in machine.devices: - if machine.devices[ - global_id].type not in Device.NON_ACCELERATOR_TYPE: + if ( + machine.devices[global_id].type + not in Device.NON_ACCELERATOR_TYPE + ): rank_id = global_id self._rank_to_device_id[rank_id] = global_id self._device_id_to_rank[global_id] = rank_id machine.accelerators[global_id] = machine.devices[global_id] machine._non_accelerator_cumulative_count = len( - machine.devices) - len(machine.accelerators) + machine.devices + ) - len(machine.accelerators) @property def alpha_latency(self): @@ -696,7 +716,8 @@ class Cluster: if "alpha_latency" in cluster_info: self._alpha_latency = AlphaLatency( - cluster_info.get("alpha_latency")) + cluster_info.get("alpha_latency") + ) else: self._alpha_latecy = None @@ -732,7 +753,7 @@ class Cluster: else: bandwidth = link.bandwidth - if bandwidth == 0.: + if bandwidth == 0.0: beta = 0 else: beta = 1 / (bandwidth * (convert_base**3 / 10**6)) @@ -813,13 +834,16 @@ def get_default_cluster(): global_device_count = int(global_device_count) assert global_device_count % local_device_count == 0 node_count = int(global_device_count) // local_device_count - print("Node Count: ", - node_count, - "Local Device Size: ", - local_device_count, - "World size: ", - paddle.distributed.get_world_size(), - flush=True) - cluster.gen_default_config_cluster(node_count=node_count, - device_count=local_device_count) + print( + "Node Count: ", + node_count, + "Local Device Size: ", + local_device_count, + "World size: ", + paddle.distributed.get_world_size(), + flush=True, + ) + cluster.gen_default_config_cluster( + node_count=node_count, device_count=local_device_count + ) return cluster diff --git a/python/paddle/distributed/auto_parallel/cluster_v2.py b/python/paddle/distributed/auto_parallel/cluster_v2.py index 866ab338ea264e583a87f2ee1efd9680b2a25ecc..debcb078f6eb444809c4c621ca8dd413902ab216 100644 --- a/python/paddle/distributed/auto_parallel/cluster_v2.py +++ b/python/paddle/distributed/auto_parallel/cluster_v2.py @@ -75,10 +75,10 @@ class DeviceMesh(core.DeviceMesh): def __init__(self, name, mesh, dim_names=None): self._name = name - if not isinstance(mesh, list) and \ - not isinstance(mesh, np.ndarray): + if not isinstance(mesh, list) and not isinstance(mesh, np.ndarray): raise ValueError( - 'The mesh must be an instance of list or np.ndarray.') + 'The mesh must be an instance of list or np.ndarray.' + ) if isinstance(mesh, list): mesh = np.array(mesh) @@ -87,24 +87,29 @@ class DeviceMesh(core.DeviceMesh): self._shape = list(self._mesh.shape) self._device_ids = self._mesh.flatten().tolist() - assert all(isinstance(p, int) for p in self._device_ids), \ - ("All elements of the mesh be integer") - assert min( - self._device_ids) >= 0, ('All elements of the mesh must be >= 0.') + assert all( + isinstance(p, int) for p in self._device_ids + ), "All elements of the mesh be integer" + assert ( + min(self._device_ids) >= 0 + ), 'All elements of the mesh must be >= 0.' unique_device_ids = set(self._device_ids) assert len(unique_device_ids) == len( - self._device_ids), ('All elements of the mesh must be unique.') + self._device_ids + ), 'All elements of the mesh must be unique.' if dim_names is not None: - assert len(dim_names) == len(self._shape), \ - ("The length of dims_names must be same as the shape of the mesh.") + assert len(dim_names) == len( + self._shape + ), "The length of dims_names must be same as the shape of the mesh." self._dim_names = dim_names else: self._dim_names = ["d" + str(i) for i in range(len(self._shape))] # Follow the requirement for using pybind11 - core.DeviceMesh.__init__(self, self._name, self._shape, - self._device_ids, self._dim_names) + core.DeviceMesh.__init__( + self, self._name, self._shape, self._device_ids, self._dim_names + ) @property def mesh(self): diff --git a/python/paddle/distributed/auto_parallel/completion.py b/python/paddle/distributed/auto_parallel/completion.py index db387ef2bb2160cbf18031e427e8871e02543d74..ba879e89f8dec18dd45d38a58b971413d3eda738 100644 --- a/python/paddle/distributed/auto_parallel/completion.py +++ b/python/paddle/distributed/auto_parallel/completion.py @@ -55,7 +55,8 @@ def compute_compatible_process_mesh(process_mesh_list): compatible_result = None for process_mesh in process_mesh_list: compatible, compatible_result = _compute_compatible_process_mesh_two( - compatible_result, process_mesh) + compatible_result, process_mesh + ) if not compatible: return None return copy.deepcopy(compatible_result) @@ -78,7 +79,8 @@ def compute_compatible_dim_mapping(dim_mapping_list): compatible_result = -1 for mapping in dim_mapping_list: compatible, compatible_result = _compute_compatible_dim_mapping_two( - compatible_result, mapping) + compatible_result, mapping + ) if not compatible: return None return compatible_result @@ -86,7 +88,7 @@ def compute_compatible_dim_mapping(dim_mapping_list): def compute_compatible_dims_mapping(dims_mapping_list): """Compute the compatible dims mapping given a list of dims mapping. - Each of dims mapping is also a list. + Each of dims mapping is also a list. """ if not dims_mapping_list: return None @@ -99,7 +101,8 @@ def compute_compatible_dims_mapping(dims_mapping_list): compatible_result = [] for dim_mappings in zip(*dims_mapping_list): compatible_dim_mapping = compute_compatible_dim_mapping( - list(dim_mappings)) + list(dim_mappings) + ) if compatible_dim_mapping is None: return None compatible_result.append(compatible_dim_mapping) @@ -125,7 +128,8 @@ def _validate_dims_mapping(dims_mapping, process_mesh): return False for i in range(len(dims_mapping)): if dims_mapping[i] < -1 or dims_mapping[i] >= len( - process_mesh.topology): + process_mesh.topology + ): return False for i in range(len(process_mesh.topology)): if dims_mapping.count(i) > 1: @@ -134,7 +138,6 @@ def _validate_dims_mapping(dims_mapping, process_mesh): class Completer: - def __init__(self, dist_context): assert dist_context is not None self._dist_context = dist_context @@ -146,12 +149,15 @@ class Completer: return False tensor_desc = tensor_node.var() # Skip reader tensor - if tensor_desc.type() == core.VarDesc.VarType.READER \ - or tensor_desc.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY \ - or tensor_desc.type == core.VarDesc.VarType.STEP_SCOPES: + if ( + tensor_desc.type() == core.VarDesc.VarType.READER + or tensor_desc.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY + or tensor_desc.type == core.VarDesc.VarType.STEP_SCOPES + ): return False tensor_dist_attr = self._dist_context.get_tensor_dist_attr_for_graph( - tensor_node) + tensor_node + ) assert tensor_dist_attr is not None if tensor_dist_attr.is_annotated("dims_mapping"): return False @@ -160,48 +166,74 @@ class Completer: dims_mapping_list = [] for pred_op_node in tensor_node.inputs: if pred_op_node.op() is not None: - if pred_op_node.op().type() == "create_py_reader" \ - or pred_op_node.op().type() == "create_double_buffer_reader" \ - or pred_op_node.op().type() == "read": + if ( + pred_op_node.op().type() == "create_py_reader" + or pred_op_node.op().type() + == "create_double_buffer_reader" + or pred_op_node.op().type() == "read" + ): continue - op_dist_attr = self._dist_context.get_op_dist_attr_for_graph( - pred_op_node) - if op_dist_attr.process_mesh == tensor_dist_attr.process_mesh: + op_dist_attr = ( + self._dist_context.get_op_dist_attr_for_graph( + pred_op_node + ) + ) + if ( + op_dist_attr.process_mesh + == tensor_dist_attr.process_mesh + ): op_dims_mapping = op_dist_attr.get_output_dims_mapping( - tensor_desc.name()) + tensor_desc.name() + ) dims_mapping_list.append(op_dims_mapping) dims_mapping_list.append(tensor_dims_mapping) compatible_dims_mapping = compute_compatible_dims_mapping( - dims_mapping_list) - if not _validate_dims_mapping(compatible_dims_mapping, - tensor_dist_attr.process_mesh): + dims_mapping_list + ) + if not _validate_dims_mapping( + compatible_dims_mapping, tensor_dist_attr.process_mesh + ): return False - if (compatible_dims_mapping is not None) and \ - (compatible_dims_mapping != tensor_dims_mapping): + if (compatible_dims_mapping is not None) and ( + compatible_dims_mapping != tensor_dims_mapping + ): tensor_dist_attr.dims_mapping = compatible_dims_mapping changed = True else: dims_mapping_list = [] for succ_op_node in tensor_node.outputs: if succ_op_node.op() is not None: - if succ_op_node.op().type() == "create_py_reader" \ - or succ_op_node.op().type() == "create_double_buffer_reader" \ - or succ_op_node.op().type() == "read": + if ( + succ_op_node.op().type() == "create_py_reader" + or succ_op_node.op().type() + == "create_double_buffer_reader" + or succ_op_node.op().type() == "read" + ): continue - op_dist_attr = self._dist_context.get_op_dist_attr_for_graph( - succ_op_node) - if op_dist_attr.process_mesh == tensor_dist_attr.process_mesh: + op_dist_attr = ( + self._dist_context.get_op_dist_attr_for_graph( + succ_op_node + ) + ) + if ( + op_dist_attr.process_mesh + == tensor_dist_attr.process_mesh + ): op_dims_mapping = op_dist_attr.get_input_dims_mapping( - tensor_desc.name()) + tensor_desc.name() + ) dims_mapping_list.append(op_dims_mapping) dims_mapping_list.append(tensor_dims_mapping) compatible_dims_mapping = compute_compatible_dims_mapping( - dims_mapping_list) - if not _validate_dims_mapping(compatible_dims_mapping, - tensor_dist_attr.process_mesh): + dims_mapping_list + ) + if not _validate_dims_mapping( + compatible_dims_mapping, tensor_dist_attr.process_mesh + ): return False - if (compatible_dims_mapping is not None) and \ - (compatible_dims_mapping != tensor_dims_mapping): + if (compatible_dims_mapping is not None) and ( + compatible_dims_mapping != tensor_dims_mapping + ): tensor_dist_attr.dims_mapping = compatible_dims_mapping changed = True return changed @@ -212,10 +244,12 @@ class Completer: return False # Skip reader op op_desc = op_node.op() - if op_desc.type() == "create_py_reader" \ - or op_desc.type() == "create_double_buffer_reader" \ - or op_desc.type() == "while" \ - or op_desc.type() == "read": + if ( + op_desc.type() == "create_py_reader" + or op_desc.type() == "create_double_buffer_reader" + or op_desc.type() == "while" + or op_desc.type() == "read" + ): return False dist_op = self._dist_context.get_dist_op_for_graph(op_node) op_dist_attr = dist_op.dist_attr @@ -227,28 +261,42 @@ class Completer: continue tensor_desc = tensor_node.var() if op_dist_attr.is_annotated_input_dims_mapping( - tensor_desc.name()): + tensor_desc.name() + ): continue - tensor_dist_attr = self._dist_context.get_tensor_dist_attr_for_graph( - tensor_node) - if op_dist_attr.process_mesh == tensor_dist_attr.process_mesh: + tensor_dist_attr = ( + self._dist_context.get_tensor_dist_attr_for_graph( + tensor_node + ) + ) + if ( + op_dist_attr.process_mesh + == tensor_dist_attr.process_mesh + ): tensor_dims_mapping = tensor_dist_attr.dims_mapping op_dims_mapping = op_dist_attr.get_input_dims_mapping( - tensor_desc.name()) - compatible_dims_mapping = compute_compatible_dims_mapping( - [op_dims_mapping, tensor_dims_mapping]) + tensor_desc.name() + ) + compatible_dims_mapping = ( + compute_compatible_dims_mapping( + [op_dims_mapping, tensor_dims_mapping] + ) + ) if not _validate_dims_mapping( - compatible_dims_mapping, - op_dist_attr.process_mesh): + compatible_dims_mapping, op_dist_attr.process_mesh + ): continue - if (compatible_dims_mapping is not None) and \ - (compatible_dims_mapping != op_dims_mapping): + if (compatible_dims_mapping is not None) and ( + compatible_dims_mapping != op_dims_mapping + ): op_dist_attr.set_input_dims_mapping( - tensor_desc.name(), compatible_dims_mapping) + tensor_desc.name(), compatible_dims_mapping + ) changed = True # Find the most compatible implemenetations from the distributed operator - op_dist_impls = find_compatible_distributed_operator_impls(dist_op, - fwd=True) + op_dist_impls = find_compatible_distributed_operator_impls( + dist_op, fwd=True + ) if op_dist_impls is not None: not_compatible = True backup_op_dist_attr = copy.deepcopy(op_dist_attr) @@ -257,8 +305,10 @@ class Completer: dim_changed = op_dist_impl.update_dims_mapping(dist_op) if dim_changed: changed = True - if op_dist_impl.is_auto_compatible(dist_op) \ - and dist_op.validate_dist_attr(): + if ( + op_dist_impl.is_auto_compatible(dist_op) + and dist_op.validate_dist_attr() + ): if op_dist_impl.type == "elementwise": op_dist_attr.impl_type = "default" else: @@ -283,28 +333,42 @@ class Completer: continue tensor_desc = tensor_node.var() if op_dist_attr.is_annotated_output_dims_mapping( - tensor_desc.name()): + tensor_desc.name() + ): continue - tensor_dist_attr = self._dist_context.get_tensor_dist_attr_for_graph( - tensor_node) - if op_dist_attr.process_mesh == tensor_dist_attr.process_mesh: + tensor_dist_attr = ( + self._dist_context.get_tensor_dist_attr_for_graph( + tensor_node + ) + ) + if ( + op_dist_attr.process_mesh + == tensor_dist_attr.process_mesh + ): tensor_dims_mapping = tensor_dist_attr.dims_mapping op_dims_mapping = op_dist_attr.get_output_dims_mapping( - tensor_desc.name()) - compatible_dims_mapping = compute_compatible_dims_mapping( - [op_dims_mapping, tensor_dims_mapping]) + tensor_desc.name() + ) + compatible_dims_mapping = ( + compute_compatible_dims_mapping( + [op_dims_mapping, tensor_dims_mapping] + ) + ) if not _validate_dims_mapping( - compatible_dims_mapping, - op_dist_attr.process_mesh): + compatible_dims_mapping, op_dist_attr.process_mesh + ): continue - if (compatible_dims_mapping is not None) and \ - (compatible_dims_mapping != op_dims_mapping): + if (compatible_dims_mapping is not None) and ( + compatible_dims_mapping != op_dims_mapping + ): op_dist_attr.set_output_dims_mapping( - tensor_desc.name(), compatible_dims_mapping) + tensor_desc.name(), compatible_dims_mapping + ) changed = True # Find the most compatible implemenetations from the distributed operator op_dist_impls = find_compatible_distributed_operator_impls( - dist_op, fwd=False) + dist_op, fwd=False + ) if op_dist_impls is not None: not_compatible = True backup_op_dist_attr = copy.deepcopy(op_dist_attr) @@ -313,8 +377,10 @@ class Completer: dim_changed = op_dist_impl.update_dims_mapping(dist_op) if dim_changed: changed = True - if op_dist_impl.is_auto_compatible(dist_op) \ - and dist_op.validate_dist_attr(): + if ( + op_dist_impl.is_auto_compatible(dist_op) + and dist_op.validate_dist_attr() + ): if op_dist_impl.type == "elementwise": op_dist_attr.impl_type = "default" else: @@ -338,24 +404,33 @@ class Completer: changed = False for parent_node, child_node in self._node_pairs_between_graphs: parent_node_dist_attr = self._dist_context.get_dist_attr_for_graph( - parent_node) + parent_node + ) child_node_dist_attr = self._dist_context.get_dist_attr_for_graph( - child_node) - if parent_node_dist_attr.process_mesh != child_node_dist_attr.process_mesh: + child_node + ) + if ( + parent_node_dist_attr.process_mesh + != child_node_dist_attr.process_mesh + ): continue parent_node_dims_mapping = parent_node_dist_attr.dims_mapping child_node_dims_mapping = child_node_dist_attr.dims_mapping compatible_dims_mapping = compute_compatible_dims_mapping( - [parent_node_dims_mapping, child_node_dims_mapping]) - if not _validate_dims_mapping(compatible_dims_mapping, - parent_node_dist_attr.process_mesh): + [parent_node_dims_mapping, child_node_dims_mapping] + ) + if not _validate_dims_mapping( + compatible_dims_mapping, parent_node_dist_attr.process_mesh + ): return False - if (compatible_dims_mapping is not None) \ - and (compatible_dims_mapping != parent_node_dims_mapping): + if (compatible_dims_mapping is not None) and ( + compatible_dims_mapping != parent_node_dims_mapping + ): parent_node_dist_attr.dims_mapping = compatible_dims_mapping changed = True - if (compatible_dims_mapping is not None) \ - and (compatible_dims_mapping != child_node_dims_mapping): + if (compatible_dims_mapping is not None) and ( + compatible_dims_mapping != child_node_dims_mapping + ): child_node_dist_attr.dims_mapping = compatible_dims_mapping changed = True return changed @@ -365,11 +440,15 @@ class Completer: op_nodes = self._dist_context._serial_ordered_op_nodes # NOTE: this list may be changed if Paddle changes the existing rules. related_reader_ops = [ - "create_py_reader", "create_double_buffer_reader", "read" + "create_py_reader", + "create_double_buffer_reader", + "read", ] for op_node in op_nodes: - if op_node.op() is not None \ - and op_node.op().type() in related_reader_ops: + if ( + op_node.op() is not None + and op_node.op().type() in related_reader_ops + ): continue op_dist_attr = self._dist_context.get_dist_attr_for_graph(op_node) for tensor_node in op_node.outputs: @@ -377,11 +456,18 @@ class Completer: if tensor_node.var().type() == core.VarDesc.VarType.READER: continue tensor_desc = tensor_node.var() - tensor_dist_attr = self._dist_context.get_tensor_dist_attr_for_graph( - tensor_node) - if op_dist_attr.process_mesh == tensor_dist_attr.process_mesh: + tensor_dist_attr = ( + self._dist_context.get_tensor_dist_attr_for_graph( + tensor_node + ) + ) + if ( + op_dist_attr.process_mesh + == tensor_dist_attr.process_mesh + ): op_dims_mapping = op_dist_attr.get_output_dims_mapping( - tensor_desc.name()) + tensor_desc.name() + ) tensor_dist_attr.dims_mapping = op_dims_mapping def _update_dims_mapping(self): @@ -390,17 +476,22 @@ class Completer: while not reach_fix_point: changed = False for is_fwd in [True, False]: - all_nodes = self._dist_context.serial_ordered_nodes \ - if is_fwd else reversed(self._dist_context.serial_ordered_nodes) + all_nodes = ( + self._dist_context.serial_ordered_nodes + if is_fwd + else reversed(self._dist_context.serial_ordered_nodes) + ) for node in all_nodes: if node.is_var() and node.var() is not None: tensor_changed = self._update_tensor_node_dims_mapping( - node, fwd=is_fwd) + node, fwd=is_fwd + ) if tensor_changed: changed = True if node.is_op() and node.op() is not None: op_changed = self._update_op_node_dims_mapping( - node, fwd=is_fwd) + node, fwd=is_fwd + ) if op_changed: changed = True graph_changed = self._update_dims_mapping_between_graphs() @@ -419,12 +510,16 @@ class Completer: if not op_dist_attr.is_annotated("process_mesh"): process_mesh = op_dist_attr.process_mesh nearest_op_dis_attr = self._dist_context.get_dist_attr_for_graph( - nearest_op_node) + nearest_op_node + ) nearest_process_mesh = nearest_op_dis_attr.process_mesh compatible_process_mesh = compute_compatible_process_mesh( - [process_mesh, nearest_process_mesh]) - if compatible_process_mesh is not None \ - and process_mesh != compatible_process_mesh: + [process_mesh, nearest_process_mesh] + ) + if ( + compatible_process_mesh is not None + and process_mesh != compatible_process_mesh + ): op_dist_attr.process_mesh = compatible_process_mesh # Skip the process_mesh setting of inputs and outputs of while_op if op_dist_attr.op_type == "while": @@ -432,43 +527,60 @@ class Completer: # Set the process mesh of the op node's leaf-inputs for tensor_node in op_node.inputs: if tensor_node.is_var() and tensor_node.var() is not None: - tensor_dist_attr = self._dist_context.get_tensor_dist_attr_for_graph( - tensor_node) + tensor_dist_attr = ( + self._dist_context.get_tensor_dist_attr_for_graph( + tensor_node + ) + ) if tensor_dist_attr.is_annotated("process_mesh"): continue # Skip the non-leaf var node if len(tensor_node.inputs) != 0: continue compatible_process_mesh = compute_compatible_process_mesh( - [tensor_dist_attr.process_mesh, op_dist_attr.process_mesh]) - if compatible_process_mesh is not None \ - and tensor_dist_attr.process_mesh != compatible_process_mesh: + [tensor_dist_attr.process_mesh, op_dist_attr.process_mesh] + ) + if ( + compatible_process_mesh is not None + and tensor_dist_attr.process_mesh != compatible_process_mesh + ): tensor_dist_attr.process_mesh = compatible_process_mesh # Set the process mesh of the op node's outputs for tensor_node in op_node.outputs: if tensor_node.is_var() and tensor_node.var() is not None: - tensor_dist_attr = self._dist_context.get_tensor_dist_attr_for_graph( - tensor_node) + tensor_dist_attr = ( + self._dist_context.get_tensor_dist_attr_for_graph( + tensor_node + ) + ) if tensor_dist_attr.is_annotated("process_mesh"): continue compatible_process_mesh = compute_compatible_process_mesh( - [tensor_dist_attr.process_mesh, op_dist_attr.process_mesh]) - if compatible_process_mesh is not None \ - and tensor_dist_attr.process_mesh != compatible_process_mesh: + [tensor_dist_attr.process_mesh, op_dist_attr.process_mesh] + ) + if ( + compatible_process_mesh is not None + and tensor_dist_attr.process_mesh != compatible_process_mesh + ): tensor_dist_attr.process_mesh = compatible_process_mesh def _update_process_mesh_for_specials(self): - def _find_nearest_tensor_node_before(nodes, idx, var_name): for node in reversed(nodes[:idx]): - if node.is_var() and node.var() is not None \ - and node.var().name() == var_name: + if ( + node.is_var() + and node.var() is not None + and node.var().name() == var_name + ): return node def _find_nearest_tensor_node_after(nodes, idx, var_name): - for node in nodes[idx + 1:]: - if node.is_var() and node.var() is not None \ - and node.var().name() == var_name: + for node in nodes[idx + 1 :]: + if ( + node.is_var() + and node.var() is not None + and node.var().name() == var_name + ): return node def _find_nodes_related_to_cond(source_node): @@ -486,28 +598,42 @@ class Completer: neighbors = cur.inputs + cur.outputs for node in neighbors: if node.is_var() and node.var() is not None: - if node.var().type() != core.VarDesc.VarType.READER \ - and len(node.var().shape()) == 1: + if ( + node.var().type() != core.VarDesc.VarType.READER + and len(node.var().shape()) == 1 + ): frontier.append(node) related_nodes.append(node) if node.is_op() and node.op() is not None: flag = True - if node.op().type() == "create_py_reader" \ - or node.op().type() == "create_double_buffer_reader" \ - or node.op().type() == "read": + if ( + node.op().type() == "create_py_reader" + or node.op().type() == "create_double_buffer_reader" + or node.op().type() == "read" + ): flag = False for tensor_node in node.inputs: - if tensor_node.is_var() and tensor_node.var( - ) is not None: - if tensor_node.var().type() in __not_shape_var_type__ \ - or len(tensor_node.var().shape()) != 1: + if ( + tensor_node.is_var() + and tensor_node.var() is not None + ): + if ( + tensor_node.var().type() + in __not_shape_var_type__ + or len(tensor_node.var().shape()) != 1 + ): flag = False break for tensor_node in node.outputs: - if tensor_node.is_var() and tensor_node.var( - ) is not None: - if tensor_node.var().type() in __not_shape_var_type__ \ - or len(tensor_node.var().shape()) != 1: + if ( + tensor_node.is_var() + and tensor_node.var() is not None + ): + if ( + tensor_node.var().type() + in __not_shape_var_type__ + or len(tensor_node.var().shape()) != 1 + ): flag = False break if flag: @@ -532,27 +658,32 @@ class Completer: dims_mapping = dist_attr.get_output_dims_mapping(arg_name) for _ in dims_mapping: new_dims_mapping.append(-1) - dist_attr.set_output_dims_mapping(arg_name, - new_dims_mapping) + dist_attr.set_output_dims_mapping( + arg_name, new_dims_mapping + ) # Amend the process meshes related to while_op for while_op_node, while_op_node_idx in self._while_op_nodes.values(): sub_graph_id = while_op_node.op()._block_attr_id("sub_block") sub_graph = self._dist_context.serial_graph.get_sub_graph( - sub_graph_id) + sub_graph_id + ) sub_graph_nodes = list(sub_graph.all_nodes()) while_dist_op = self._dist_context.get_dist_op_for_graph( - while_op_node) + while_op_node + ) while_op_dist_attr = while_dist_op.dist_attr # Step 1: set the process mesh of while_op to the merged process mesh of its subblock merged_process_mesh = while_op_dist_attr.process_mesh for node in sub_graph_nodes: - if (node.is_var() and node.var() is not None) \ - or (node.is_op() and node.op() is not None): + if (node.is_var() and node.var() is not None) or ( + node.is_op() and node.op() is not None + ): dist_attr = self._dist_context.get_dist_attr_for_graph(node) merged_process_mesh = merge_process_mesh_two( - merged_process_mesh, dist_attr.process_mesh) + merged_process_mesh, dist_attr.process_mesh + ) while_op_dist_attr.process_mesh = merged_process_mesh _make_dims_mapping_replicate(while_op_dist_attr) @@ -562,97 +693,143 @@ class Completer: cond_tensor_name = while_op_node.op().input("Condition")[0] cond_tensor_node = None for node in while_op_node.inputs: - if node.is_var() and node.var() is not None \ - and node.var().name() == cond_tensor_name: + if ( + node.is_var() + and node.var() is not None + and node.var().name() == cond_tensor_name + ): cond_tensor_node = node cond_tensor_related_nodes.append(cond_tensor_node) break cond_tensor_related_nodes.extend( - _find_nodes_related_to_cond(cond_tensor_node)) + _find_nodes_related_to_cond(cond_tensor_node) + ) # Step 2.2: Find related nodes of cond var in the subgraph of while_op cond_tensor_node = None for node in reversed(sub_graph_nodes): - if node.is_var() and node.var() is not None \ - and node.var().name() == cond_tensor_name \ - and len(node.outputs) == 0: + if ( + node.is_var() + and node.var() is not None + and node.var().name() == cond_tensor_name + and len(node.outputs) == 0 + ): cond_tensor_node = node break cond_tensor_related_nodes.extend( - _find_nodes_related_to_cond(cond_tensor_node)) + _find_nodes_related_to_cond(cond_tensor_node) + ) # Step 2.3: Add the StepScops output of while_op stepscopes_tensor_name = while_op_node.op().output("StepScopes")[0] stepscopes_tensor_node = None for output_node in while_op_node.outputs: - if output_node.is_var() and output_node.var() is not None \ - and output_node.var().name() == stepscopes_tensor_name: + if ( + output_node.is_var() + and output_node.var() is not None + and output_node.var().name() == stepscopes_tensor_name + ): stepscopes_tensor_node = output_node cond_tensor_related_nodes.append(stepscopes_tensor_node) # Step 2.4: Set the process meshes of all nodes related to cond var to the process mesh of while op for node in cond_tensor_related_nodes: tensor_dist_attr = self._dist_context.get_dist_attr_for_graph( - node) + node + ) tensor_dist_attr.process_mesh = merged_process_mesh _make_dims_mapping_replicate(tensor_dist_attr) # Step 3: set the process meshes of the inputs in while_op to the process meshes of the outside input nodes while_op_inputs_dist_attrs = while_op_dist_attr.inputs_dist_attrs - for tensor_name, tensor_dist_attr in while_op_inputs_dist_attrs.items( - ): + for ( + tensor_name, + tensor_dist_attr, + ) in while_op_inputs_dist_attrs.items(): nearest_tensor_node = _find_nearest_tensor_node_before( - self._dist_context.serial_ordered_nodes, while_op_node_idx, - tensor_name) - nearest_tensor_dist_attr = self._dist_context.get_dist_attr_for_graph( - nearest_tensor_node) - tensor_dist_attr.process_mesh = nearest_tensor_dist_attr.process_mesh + self._dist_context.serial_ordered_nodes, + while_op_node_idx, + tensor_name, + ) + nearest_tensor_dist_attr = ( + self._dist_context.get_dist_attr_for_graph( + nearest_tensor_node + ) + ) + tensor_dist_attr.process_mesh = ( + nearest_tensor_dist_attr.process_mesh + ) # Step 4: set the process meshes of the outputs in while_op to the process meshes of the outside output nodes while_op_outputs_dist_attrs = while_op_dist_attr.outputs_dist_attrs - for tensor_name, tensor_dist_attr in while_op_outputs_dist_attrs.items( - ): + for ( + tensor_name, + tensor_dist_attr, + ) in while_op_outputs_dist_attrs.items(): nearest_tensor_node = _find_nearest_tensor_node_before( - self._dist_context.serial_ordered_nodes, while_op_node_idx, - tensor_name) + self._dist_context.serial_ordered_nodes, + while_op_node_idx, + tensor_name, + ) if nearest_tensor_node is None: nearest_tensor_node = _find_nearest_tensor_node_after( self._dist_context.serial_ordered_nodes, - while_op_node_idx, tensor_name) - nearest_tensor_dist_attr = self._dist_context.get_dist_attr_for_graph( - nearest_tensor_node) - tensor_dist_attr.process_mesh = nearest_tensor_dist_attr.process_mesh + while_op_node_idx, + tensor_name, + ) + nearest_tensor_dist_attr = ( + self._dist_context.get_dist_attr_for_graph( + nearest_tensor_node + ) + ) + tensor_dist_attr.process_mesh = ( + nearest_tensor_dist_attr.process_mesh + ) # Amend the process meshes related to array for array_node_list in self._array_nodes.values(): merged_process_mesh = None for array_node in array_node_list: dist_attr = self._dist_context.get_dist_attr_for_graph( - array_node) + array_node + ) merged_process_mesh = merge_process_mesh_two( - merged_process_mesh, dist_attr.process_mesh) + merged_process_mesh, dist_attr.process_mesh + ) for array_node in array_node_list: dist_attr = self._dist_context.get_dist_attr_for_graph( - array_node) + array_node + ) dist_attr.process_mesh = merged_process_mesh _make_dims_mapping_replicate(dist_attr) def _update_process_mesh_between_graphs(self): for parent_node, child_node in self._node_pairs_between_graphs: parent_node_dist_attr = self._dist_context.get_dist_attr_for_graph( - parent_node) + parent_node + ) child_node_dist_attr = self._dist_context.get_dist_attr_for_graph( - child_node) - parent_node_dist_attr.process_mesh = child_node_dist_attr.process_mesh - compatible_process_mesh = compute_compatible_process_mesh([ - parent_node_dist_attr.process_mesh, + child_node + ) + parent_node_dist_attr.process_mesh = ( child_node_dist_attr.process_mesh - ]) - if compatible_process_mesh is not None \ - and parent_node_dist_attr.process_mesh != compatible_process_mesh: + ) + compatible_process_mesh = compute_compatible_process_mesh( + [ + parent_node_dist_attr.process_mesh, + child_node_dist_attr.process_mesh, + ] + ) + if ( + compatible_process_mesh is not None + and parent_node_dist_attr.process_mesh + != compatible_process_mesh + ): parent_node_dist_attr.process_mesh = compatible_process_mesh - if compatible_process_mesh is not None \ - and child_node_dist_attr.process_mesh != compatible_process_mesh: + if ( + compatible_process_mesh is not None + and child_node_dist_attr.process_mesh != compatible_process_mesh + ): child_node_dist_attr.process_mesh = compatible_process_mesh def _update_process_mesh(self): @@ -661,8 +838,9 @@ class Completer: # Step 1: Set the annotated process meshes from tensors to the first ops using them ordered_tensor_nodes = self._dist_context._serial_ordered_tensor_nodes for tensor_node in ordered_tensor_nodes: - tensor_dist_attr = self._dist_context.get_tensor_dist_attr_for_graph( - tensor_node) + tensor_dist_attr = ( + self._dist_context.get_tensor_dist_attr_for_graph(tensor_node) + ) if not tensor_dist_attr.is_annotated("process_mesh"): continue first_op_node = None @@ -680,13 +858,18 @@ class Completer: if first_op_node is None: continue op_dist_attr = self._dist_context.get_dist_attr_for_graph( - first_op_node) + first_op_node + ) if op_dist_attr is not None and not op_dist_attr.is_annotated( - "process_mesh"): + "process_mesh" + ): compatible_process_mesh = compute_compatible_process_mesh( - [tensor_dist_attr.process_mesh, op_dist_attr.process_mesh]) - if compatible_process_mesh is not None \ - and op_dist_attr.process_mesh != compatible_process_mesh: + [tensor_dist_attr.process_mesh, op_dist_attr.process_mesh] + ) + if ( + compatible_process_mesh is not None + and op_dist_attr.process_mesh != compatible_process_mesh + ): op_dist_attr.process_mesh = compatible_process_mesh # Step 2: set the process meshes of ops with the nearest op before them @@ -694,8 +877,10 @@ class Completer: idx_of_first_op_node_has_process_mesh = -1 for idx, op_node in enumerate(ordered_op_nodes): op_dist_attr = self._dist_context.get_dist_attr_for_graph(op_node) - if op_dist_attr.process_mesh is not None \ - and idx_of_first_op_node_has_process_mesh == -1: + if ( + op_dist_attr.process_mesh is not None + and idx_of_first_op_node_has_process_mesh == -1 + ): idx_of_first_op_node_has_process_mesh = idx # Reuse the following method to set the related tensors for same op node self._update_process_mesh_by_nearest(op_node, op_node) @@ -703,17 +888,20 @@ class Completer: if idx_of_first_op_node_has_process_mesh + 1 > len(ordered_op_nodes): return None for idx, op_node in enumerate( - ordered_op_nodes[idx_of_first_op_node_has_process_mesh + 1:]): + ordered_op_nodes[idx_of_first_op_node_has_process_mesh + 1 :] + ): original_idx = idx_of_first_op_node_has_process_mesh + idx + 1 nearest_op_node = ordered_op_nodes[original_idx - 1] nearest_op_dist_attr = self._dist_context.get_dist_attr_for_graph( - nearest_op_node) + nearest_op_node + ) op_dist_attr = self._dist_context.get_dist_attr_for_graph(op_node) assert nearest_op_dist_attr.process_mesh is not None self._update_process_mesh_by_nearest(op_node, nearest_op_node) # Step 2.3: set the process meshes of ops by the nearest op node before the first op node nearest_op_node = ordered_op_nodes[ - idx_of_first_op_node_has_process_mesh] + idx_of_first_op_node_has_process_mesh + ] for op_node in ordered_op_nodes[:idx_of_first_op_node_has_process_mesh]: self._update_process_mesh_by_nearest(op_node, nearest_op_node) @@ -750,21 +938,31 @@ class Completer: if node.is_var() and node.var() is not None: if node.node.graph_id() != 0: for before_node in reversed(all_nodes[:idx]): - if before_node.is_var() and before_node.var() is not None \ - and before_node.node.graph_id() == node.node.graph_id() - 1 \ - and before_node.var().name() == node.var().name(): + if ( + before_node.is_var() + and before_node.var() is not None + and before_node.node.graph_id() + == node.node.graph_id() - 1 + and before_node.var().name() == node.var().name() + ): self._node_pairs_between_graphs.append( - (before_node, node)) - for after_node in all_nodes[idx + 1:]: - if after_node.is_var() and after_node.var() is not None \ - and after_node.node.graph_id() == node.node.graph_id() - 1 \ - and after_node.var().name() == node.var().name(): + (before_node, node) + ) + for after_node in all_nodes[idx + 1 :]: + if ( + after_node.is_var() + and after_node.var() is not None + and after_node.node.graph_id() + == node.node.graph_id() - 1 + and after_node.var().name() == node.var().name() + ): self._node_pairs_between_graphs.append( - (after_node, node)) + (after_node, node) + ) self._has_prepared = True def complete_forward_annotation(self, serial_main_program=None): - """ Complete annotation for the partial annotated serial_main_program. + """Complete annotation for the partial annotated serial_main_program. Arguments: serial_main_program: partial annotated serial_main_program. Returns:e @@ -818,8 +1016,9 @@ class Completer: # TODO: we must ensure the world process group contains all ranks ranks = get_world_process_group().ranks process_mesh = ProcessMesh(ranks) - for dist_tensor in self._dist_context._dist_tensors_for_program.values( - ): + for ( + dist_tensor + ) in self._dist_context._dist_tensors_for_program.values(): serial_tensor = dist_tensor.serial_tensor tensor_dist_attr = dist_tensor.dist_attr tensor_dist_attr.process_mesh = process_mesh @@ -838,27 +1037,35 @@ class Completer: if not serial_tensor.is_parameter: if arg_name not in input_xshape_arg_names: old_dims_mapping = op_dist_attr.get_input_dims_mapping( - arg_name) + arg_name + ) if len(old_dims_mapping) > 0: new_dims_mapping = [0] + [ -1 for _ in range(len(old_dims_mapping) - 1) ] op_dist_attr.set_input_dims_mapping( - arg_name, new_dims_mapping) + arg_name, new_dims_mapping + ) else: old_dims_mapping = op_dist_attr.get_input_dims_mapping( - arg_name) + arg_name + ) if len(old_dims_mapping) > 1: new_dims_mapping = [-1, 0] + [ -1 for _ in range(len(old_dims_mapping) - 2) ] op_dist_attr.set_input_dims_mapping( - arg_name, new_dims_mapping) + arg_name, new_dims_mapping + ) # Set tensor's dims_mapping by the op's - tensor_dist_attr = self._dist_context.get_tensor_dist_attr_for_program( - serial_tensor) - tensor_dist_attr.dims_mapping = op_dist_attr.get_input_dims_mapping( - arg_name) + tensor_dist_attr = ( + self._dist_context.get_tensor_dist_attr_for_program( + serial_tensor + ) + ) + tensor_dist_attr.dims_mapping = ( + op_dist_attr.get_input_dims_mapping(arg_name) + ) output_xshape_arg_names = [] if "XShape" in op_desc.output_names(): output_xshape_arg_names = op_desc.output("XShape") @@ -867,37 +1074,48 @@ class Completer: if not serial_tensor.is_parameter: if arg_name not in output_xshape_arg_names: old_dims_mapping = op_dist_attr.get_output_dims_mapping( - arg_name) + arg_name + ) if len(old_dims_mapping) > 0: new_dims_mapping = [0] + [ -1 for _ in range(len(old_dims_mapping) - 1) ] op_dist_attr.set_output_dims_mapping( - arg_name, new_dims_mapping) + arg_name, new_dims_mapping + ) else: old_dims_mapping = op_dist_attr.get_output_dims_mapping( - arg_name) + arg_name + ) if len(old_dims_mapping) > 1: new_dims_mapping = [-1, 0] + [ -1 for _ in range(len(old_dims_mapping) - 2) ] op_dist_attr.set_output_dims_mapping( - arg_name, new_dims_mapping) + arg_name, new_dims_mapping + ) # Set tensor's dims_mapping by the op's - tensor_dist_attr = self._dist_context.get_tensor_dist_attr_for_program( - serial_tensor) - tensor_dist_attr.dims_mapping = op_dist_attr.get_output_dims_mapping( - arg_name) + tensor_dist_attr = ( + self._dist_context.get_tensor_dist_attr_for_program( + serial_tensor + ) + ) + tensor_dist_attr.dims_mapping = ( + op_dist_attr.get_output_dims_mapping(arg_name) + ) op_dist_impls = find_compatible_distributed_operator_impls( - dist_op, partial=False) + dist_op, partial=False + ) if op_dist_impls is not None: not_compatible = True backup_op_dist_attr = copy.deepcopy(op_dist_attr) for op_dist_impl in op_dist_impls: op_dist_impl.update_dims_mapping(dist_op) - if op_dist_impl.is_auto_compatible(dist_op) \ - and dist_op.validate_dist_attr(): + if ( + op_dist_impl.is_auto_compatible(dist_op) + and dist_op.validate_dist_attr() + ): op_dist_attr.impl_type = op_dist_impl.type op_dist_attr.impl_idx = op_dist_impl.idx not_compatible = False @@ -939,24 +1157,36 @@ class Completer: # Use the first op to set the tensor dist attr if tensor_name in has_set_dist_attr: continue - tensor_dist_attr = self._dist_context.get_tensor_dist_attr_for_graph( - tensor_node) - tensor_dist_attr.process_mesh = op_dist_attr.process_mesh - tensor_dist_attr.dims_mapping = op_dist_attr.get_input_dims_mapping( - tensor_name) if tensor.is_parameter else [ - -1 for i in tensor_desc.shape() - ] + tensor_dist_attr = ( + self._dist_context.get_tensor_dist_attr_for_graph( + tensor_node + ) + ) + tensor_dist_attr.process_mesh = ( + op_dist_attr.process_mesh + ) + tensor_dist_attr.dims_mapping = ( + op_dist_attr.get_input_dims_mapping(tensor_name) + if tensor.is_parameter + else [-1 for i in tensor_desc.shape()] + ) has_set_dist_attr.add(tensor_name) for tensor_node in node.outputs: if tensor_node.is_var() and tensor_node.var() is not None: tensor_name = tensor_node.var().name() if tensor_name in has_set_dist_attr: continue - tensor_dist_attr = self._dist_context.get_tensor_dist_attr_for_graph( - tensor_node) - tensor_dist_attr.process_mesh = op_dist_attr.process_mesh - tensor_dist_attr.dims_mapping = op_dist_attr.get_output_dims_mapping( - tensor_name) + tensor_dist_attr = ( + self._dist_context.get_tensor_dist_attr_for_graph( + tensor_node + ) + ) + tensor_dist_attr.process_mesh = ( + op_dist_attr.process_mesh + ) + tensor_dist_attr.dims_mapping = ( + op_dist_attr.get_output_dims_mapping(tensor_name) + ) has_set_dist_attr.add(tensor_name) self._update_process_mesh_for_specials() @@ -1007,79 +1237,108 @@ class Completer: for idx in range(0, len(ops)): op = ops[idx] if int(op.attr('op_role')) == int( - core.op_proto_and_checker_maker.OpRole.Forward): + core.op_proto_and_checker_maker.OpRole.Forward + ): continue if int(op.attr('op_role')) == int( - core.op_proto_and_checker_maker.OpRole.Backward) and int( - ops[idx - 1].attr('op_role')) == int( - core.op_proto_and_checker_maker.OpRole.Forward): + core.op_proto_and_checker_maker.OpRole.Backward + ) and int(ops[idx - 1].attr('op_role')) == int( + core.op_proto_and_checker_maker.OpRole.Forward + ): appended_grad_times += 1 if int(op.attr('op_role')) == int( - int(core.op_proto_and_checker_maker.OpRole.Backward) - | int(core.op_proto_and_checker_maker.OpRole.Loss)): + int(core.op_proto_and_checker_maker.OpRole.Backward) + | int(core.op_proto_and_checker_maker.OpRole.Loss) + ): assert op.type == "fill_constant" break # complete the annotation of grad op (xxx_grad op or sum op) # xxx_grad op will have a corresponding forward op in grad_op_id_to_op_id grad_op = ops[idx] - if grad_op.desc.original_id( - ) in dist_op_context.grad_op_id_to_op_id: + if ( + grad_op.desc.original_id() + in dist_op_context.grad_op_id_to_op_id + ): # TODO support the case where one forward op corresponding to multiple xxx_grad op forward_op = _get_op_by_id( - ops, dist_op_context.grad_op_id_to_op_id[ - grad_op.desc.original_id()]) + ops, + dist_op_context.grad_op_id_to_op_id[ + grad_op.desc.original_id() + ], + ) assert forward_op is not None - fwd_op_dist_attr = self._dist_context.get_op_dist_attr_for_program( - forward_op) + fwd_op_dist_attr = ( + self._dist_context.get_op_dist_attr_for_program(forward_op) + ) fwd_op_process_mesh = fwd_op_dist_attr.process_mesh grad_op_dist_attr = OperatorDistributedAttribute() grad_op_dist_attr.process_mesh = fwd_op_process_mesh for input_name in grad_op.input_arg_names: - if input_name not in forward_op.input_arg_names and input_name not in forward_op.output_arg_names: + if ( + input_name not in forward_op.input_arg_names + and input_name not in forward_op.output_arg_names + ): if input_name in grad_var_to_var[appended_grad_times]: fwd_name = grad_var_to_var[appended_grad_times][ - input_name] - ref_dims_mapping = fwd_op_dist_attr.get_output_dims_mapping( - fwd_name) + input_name + ] + ref_dims_mapping = ( + fwd_op_dist_attr.get_output_dims_mapping( + fwd_name + ) + ) else: input_var = vars[input_name] ref_dims_mapping = self._dist_context.get_tensor_dist_attr_for_program( - input_var).dims_mapping + input_var + ).dims_mapping else: if fwd_op_dist_attr.get_input_dims_mapping(input_name): - ref_dims_mapping = fwd_op_dist_attr.get_input_dims_mapping( - input_name) + ref_dims_mapping = ( + fwd_op_dist_attr.get_input_dims_mapping( + input_name + ) + ) else: - ref_dims_mapping = fwd_op_dist_attr.get_output_dims_mapping( - input_name) - assert ref_dims_mapping is not None, "[{}] 's dims mapping is NONE".format( - input_name) + ref_dims_mapping = ( + fwd_op_dist_attr.get_output_dims_mapping( + input_name + ) + ) + assert ( + ref_dims_mapping is not None + ), "[{}] 's dims mapping is NONE".format(input_name) grad_op_dist_attr.set_input_dims_mapping( - input_name, ref_dims_mapping) + input_name, ref_dims_mapping + ) for output_name in grad_op.output_arg_names: assert output_name in grad_var_to_var[appended_grad_times] fwd_name = grad_var_to_var[appended_grad_times][output_name] ref_dims_mapping = fwd_op_dist_attr.get_input_dims_mapping( - fwd_name) + fwd_name + ) # var output_var = vars[output_name] tensor_dist_attr = TensorDistributedAttribute() tensor_dist_attr.dims_mapping = ref_dims_mapping tensor_dist_attr.process_mesh = fwd_op_process_mesh self._dist_context.set_tensor_dist_attr_for_program( - output_var, tensor_dist_attr) + output_var, tensor_dist_attr + ) # op grad_op_dist_attr.set_output_dims_mapping( - output_name, ref_dims_mapping) + output_name, ref_dims_mapping + ) self._dist_context.set_op_dist_attr_for_program( - grad_op, grad_op_dist_attr) + grad_op, grad_op_dist_attr + ) # grad ops that have not a corresponding mapping in grad_op_id_to_op_id else: @@ -1087,14 +1346,20 @@ class Completer: if grad_op.type == 'sum': assert all(map(_is_grad_var_name, grad_op.input_arg_names)) output_name = grad_op.output_arg_names[0] - assert output_name in grad_var_to_var[appended_grad_times], \ - "sum op's output '{}' has no corresponding var".format( - output_name) + assert ( + output_name in grad_var_to_var[appended_grad_times] + ), "sum op's output '{}' has no corresponding var".format( + output_name + ) ref_fwd_var_name = grad_var_to_var[appended_grad_times][ - output_name] + output_name + ] ref_fwd_var = vars[ref_fwd_var_name] - ref_fwd_dist_attr = self._dist_context.get_tensor_dist_attr_for_program( - ref_fwd_var) + ref_fwd_dist_attr = ( + self._dist_context.get_tensor_dist_attr_for_program( + ref_fwd_var + ) + ) ref_fwd_dims_mapping = ref_fwd_dist_attr.dims_mapping ref_fwd_process_mesh = ref_fwd_dist_attr.process_mesh # output @@ -1103,21 +1368,27 @@ class Completer: tensor_dist_attr.process_mesh = ref_fwd_process_mesh output_var = vars[output_name] self._dist_context.set_tensor_dist_attr_for_program( - output_var, tensor_dist_attr) + output_var, tensor_dist_attr + ) # op grad_op_dist_attr = OperatorDistributedAttribute() grad_op_dist_attr.process_mesh = ref_fwd_process_mesh for var_name in grad_op.input_arg_names: grad_op_dist_attr.set_input_dims_mapping( - var_name, ref_fwd_dims_mapping) + var_name, ref_fwd_dims_mapping + ) grad_op_dist_attr.set_output_dims_mapping( - output_name, ref_fwd_dims_mapping) + output_name, ref_fwd_dims_mapping + ) elif grad_op.type == 'fill_any_like': ref_var_name = grad_op.input_arg_names[0] ref_var = vars[ref_var_name] - ref_dist_attr = self._dist_context.get_tensor_dist_attr_for_program( - ref_var) + ref_dist_attr = ( + self._dist_context.get_tensor_dist_attr_for_program( + ref_var + ) + ) ref_dims_mapping = ref_dist_attr.dims_mapping ref_process_mesh = ref_dist_attr.process_mesh # output @@ -1127,24 +1398,29 @@ class Completer: output_var_name = grad_op.output_arg_names[0] output_var = vars[output_var_name] self._dist_context.set_tensor_dist_attr_for_program( - output_var, tensor_dist_attr) + output_var, tensor_dist_attr + ) # op grad_op_dist_attr = OperatorDistributedAttribute() grad_op_dist_attr.process_mesh = ref_process_mesh grad_op_dist_attr.set_input_dims_mapping( - ref_var_name, ref_dims_mapping) + ref_var_name, ref_dims_mapping + ) grad_op_dist_attr.set_output_dims_mapping( - output_var_name, ref_dims_mapping) + output_var_name, ref_dims_mapping + ) elif grad_op.type in ['shape', 'fill_constant']: continue else: - raise ValueError("got unexpect op [{}]".format( - str(grad_op.type))) + raise ValueError( + "got unexpect op [{}]".format(str(grad_op.type)) + ) self._dist_context.set_op_dist_attr_for_program( - grad_op, grad_op_dist_attr) + grad_op, grad_op_dist_attr + ) def complete_backward_annotation(self, serial_main_program=None): """Complete the annotation of vars and ops in the backward phase for parallel program.""" @@ -1161,9 +1437,9 @@ class Completer: def _get_forward_varname_from_grad_varname(grad_var_name): assert _is_grad_var_name( - grad_var_name), "[{}] is not a grad varnme.".format( - grad_var_name) - return grad_var_name[:grad_var_name.find("@GRAD")] + grad_var_name + ), "[{}] is not a grad varnme.".format(grad_var_name) + return grad_var_name[: grad_var_name.find("@GRAD")] def _get_op_by_id(ops, id): for op in ops: @@ -1174,160 +1450,216 @@ class Completer: first_backward_op_idx = -1 for idx, op in enumerate(serial_main_program.global_block().ops): if int(op.attr('op_role')) == int( - int(core.op_proto_and_checker_maker.OpRole.Backward) - | int(core.op_proto_and_checker_maker.OpRole.Loss)): + int(core.op_proto_and_checker_maker.OpRole.Backward) + | int(core.op_proto_and_checker_maker.OpRole.Loss) + ): assert op.type == "fill_constant" first_backward_op_idx = idx break - assert first_backward_op_idx >= 0, "No backward procedure found in this program." + assert ( + first_backward_op_idx >= 0 + ), "No backward procedure found in this program." ops = list(serial_main_program.global_block().ops) vars = serial_main_program.global_block().vars dist_op_context = self._dist_context.dist_op_context - grad_var_to_var = dist_op_context.grad_var_to_var[len( - dist_op_context.grad_var_to_var)] + grad_var_to_var = dist_op_context.grad_var_to_var[ + len(dist_op_context.grad_var_to_var) + ] for idx in range(first_backward_op_idx, len(ops)): # complete the initial grad loss op if idx == first_backward_op_idx: assert ops[idx].type == "fill_constant" - assert len( - ops[idx].input_arg_names - ) == 0, "first backward op should has only ONE output, but got [{}]".format( - len(ops[idx].input_arg_names)) - assert len( - ops[idx].output_arg_names - ) == 1, "first backward op should has only ONE output, but got [{}]".format( - len(ops[idx].output_arg_names)) + assert ( + len(ops[idx].input_arg_names) == 0 + ), "first backward op should has only ONE output, but got [{}]".format( + len(ops[idx].input_arg_names) + ) + assert ( + len(ops[idx].output_arg_names) == 1 + ), "first backward op should has only ONE output, but got [{}]".format( + len(ops[idx].output_arg_names) + ) grad_var = vars[ops[idx].output_arg_names[0]] forward_var_name = _get_forward_varname_from_grad_varname( - grad_var.name) + grad_var.name + ) forward_var = vars[forward_var_name] # TODO complete other attribte for grad var tensor_dist_attr = TensorDistributedAttribute() - process_mesh = self._dist_context.get_tensor_dist_attr_for_program( - forward_var).process_mesh - dims_mapping = self._dist_context.get_tensor_dist_attr_for_program( - forward_var).dims_mapping + process_mesh = ( + self._dist_context.get_tensor_dist_attr_for_program( + forward_var + ).process_mesh + ) + dims_mapping = ( + self._dist_context.get_tensor_dist_attr_for_program( + forward_var + ).dims_mapping + ) tensor_dist_attr.dims_mapping = dims_mapping tensor_dist_attr.process_mesh = process_mesh self._dist_context.set_tensor_dist_attr_for_program( - grad_var, tensor_dist_attr) + grad_var, tensor_dist_attr + ) op_dist_attr = OperatorDistributedAttribute() op_dist_attr.process_mesh = process_mesh - op_dist_attr.set_output_dims_mapping(grad_var.name, - dims_mapping) + op_dist_attr.set_output_dims_mapping( + grad_var.name, dims_mapping + ) self._dist_context.set_op_dist_attr_for_program( - ops[idx], op_dist_attr) + ops[idx], op_dist_attr + ) continue # complete the annotation of grad op (xxx_grad op or sum op) # xxx_grad op will have a corresponding forward op in grad_op_id_to_op_id grad_op = ops[idx] - if grad_op.desc.original_id( - ) in dist_op_context.grad_op_id_to_op_id: + if ( + grad_op.desc.original_id() + in dist_op_context.grad_op_id_to_op_id + ): # TODO support the case where one forward op corresponding to multiple xxx_grad op forward_op = _get_op_by_id( ops[:first_backward_op_idx], dist_op_context.grad_op_id_to_op_id[ - grad_op.desc.original_id()]) + grad_op.desc.original_id() + ], + ) assert forward_op is not None if grad_op.type == "concat" and forward_op.type == "split": - forward_op_dist_attr = self._dist_context.get_op_dist_attr_for_program( - forward_op) + forward_op_dist_attr = ( + self._dist_context.get_op_dist_attr_for_program( + forward_op + ) + ) output_var = vars[grad_op.desc.output('Out')[0]] split_input_var_name = forward_op.input("X")[0] - ref_dims_mapping = forward_op_dist_attr.get_input_dims_mapping( - split_input_var_name) + ref_dims_mapping = ( + forward_op_dist_attr.get_input_dims_mapping( + split_input_var_name + ) + ) ref_mesh = forward_op_dist_attr.process_mesh grad_op_dist_attr = OperatorDistributedAttribute() for input_name in grad_op.input_arg_names: grad_op_dist_attr.set_input_dims_mapping( - input_name, ref_dims_mapping) + input_name, ref_dims_mapping + ) output_var_dist_attr = TensorDistributedAttribute() output_var_dist_attr.dims_mapping = ref_dims_mapping output_var_dist_attr.process_mesh = ref_mesh self._dist_context.set_tensor_dist_attr_for_program( - output_var, output_var_dist_attr) + output_var, output_var_dist_attr + ) grad_op_dist_attr.set_output_dims_mapping( - output_var.name, ref_dims_mapping) + output_var.name, ref_dims_mapping + ) grad_op_dist_attr.process_mesh = ref_mesh self._dist_context.set_op_dist_attr_for_program( - grad_op, grad_op_dist_attr) + grad_op, grad_op_dist_attr + ) grad_op_dist_attr.impl_type = fwd_op_dist_attr.impl_type grad_op_dist_attr.impl_idx = fwd_op_dist_attr.impl_idx continue - fwd_op_dist_attr = self._dist_context.get_op_dist_attr_for_program( - forward_op) + fwd_op_dist_attr = ( + self._dist_context.get_op_dist_attr_for_program(forward_op) + ) fwd_op_process_mesh = fwd_op_dist_attr.process_mesh grad_op_dist_attr = OperatorDistributedAttribute() grad_op_dist_attr.process_mesh = fwd_op_process_mesh for input_name in grad_op.input_arg_names: - if input_name not in forward_op.input_arg_names and input_name not in forward_op.output_arg_names: + if ( + input_name not in forward_op.input_arg_names + and input_name not in forward_op.output_arg_names + ): if input_name in grad_var_to_var: fwd_name = grad_var_to_var[input_name] - ref_dims_mapping = fwd_op_dist_attr.get_output_dims_mapping( - fwd_name) + ref_dims_mapping = ( + fwd_op_dist_attr.get_output_dims_mapping( + fwd_name + ) + ) else: input_var = vars[input_name] ref_dims_mapping = self._dist_context.get_tensor_dist_attr_for_program( - input_var).dims_mapping + input_var + ).dims_mapping else: if fwd_op_dist_attr.get_input_dims_mapping(input_name): - ref_dims_mapping = fwd_op_dist_attr.get_input_dims_mapping( - input_name) + ref_dims_mapping = ( + fwd_op_dist_attr.get_input_dims_mapping( + input_name + ) + ) else: - ref_dims_mapping = fwd_op_dist_attr.get_output_dims_mapping( - input_name) - assert ref_dims_mapping is not None, "[{}] 's dims mapping is NONE".format( - input_name) + ref_dims_mapping = ( + fwd_op_dist_attr.get_output_dims_mapping( + input_name + ) + ) + assert ( + ref_dims_mapping is not None + ), "[{}] 's dims mapping is NONE".format(input_name) grad_op_dist_attr.set_input_dims_mapping( - input_name, ref_dims_mapping) + input_name, ref_dims_mapping + ) for output_name in grad_op.output_arg_names: assert output_name in grad_var_to_var fwd_name = grad_var_to_var[output_name] ref_dims_mapping = fwd_op_dist_attr.get_input_dims_mapping( - fwd_name) + fwd_name + ) # var output_var = vars[output_name] tensor_dist_attr = TensorDistributedAttribute() tensor_dist_attr.dims_mapping = ref_dims_mapping tensor_dist_attr.process_mesh = fwd_op_process_mesh self._dist_context.set_tensor_dist_attr_for_program( - output_var, tensor_dist_attr) + output_var, tensor_dist_attr + ) # op grad_op_dist_attr.set_output_dims_mapping( - output_name, ref_dims_mapping) + output_name, ref_dims_mapping + ) grad_op_dist_attr.impl_type = fwd_op_dist_attr.impl_type grad_op_dist_attr.impl_idx = fwd_op_dist_attr.impl_idx self._dist_context.set_op_dist_attr_for_program( - grad_op, grad_op_dist_attr) + grad_op, grad_op_dist_attr + ) # grad ops that have not a corresponding mapping in grad_op_id_to_op_id else: if grad_op.type == 'sum': assert all(map(_is_grad_var_name, grad_op.input_arg_names)) output_name = grad_op.output_arg_names[0] - assert output_name in grad_var_to_var, "sum op's output '{}' has no corresponding var".format( - output_name) + assert ( + output_name in grad_var_to_var + ), "sum op's output '{}' has no corresponding var".format( + output_name + ) ref_fwd_var_name = grad_var_to_var[output_name] ref_fwd_var = vars[ref_fwd_var_name] - ref_fwd_dist_attr = self._dist_context.get_tensor_dist_attr_for_program( - ref_fwd_var) + ref_fwd_dist_attr = ( + self._dist_context.get_tensor_dist_attr_for_program( + ref_fwd_var + ) + ) ref_fwd_dims_mapping = ref_fwd_dist_attr.dims_mapping ref_fwd_process_mesh = ref_fwd_dist_attr.process_mesh @@ -1337,24 +1669,30 @@ class Completer: tensor_dist_attr.process_mesh = ref_fwd_process_mesh output_var = vars[output_name] self._dist_context.set_tensor_dist_attr_for_program( - output_var, tensor_dist_attr) + output_var, tensor_dist_attr + ) # op grad_op_dist_attr = OperatorDistributedAttribute() grad_op_dist_attr.process_mesh = ref_fwd_process_mesh for var_name in grad_op.input_arg_names: grad_op_dist_attr.set_input_dims_mapping( - var_name, ref_fwd_dims_mapping) + var_name, ref_fwd_dims_mapping + ) grad_op_dist_attr.set_output_dims_mapping( - output_name, ref_fwd_dims_mapping) + output_name, ref_fwd_dims_mapping + ) grad_op_dist_attr.impl_type = "default" grad_op_dist_attr.impl_idx = 0 elif grad_op.type == 'fill_any_like': ref_var_name = grad_op.input_arg_names[0] ref_var = vars[ref_var_name] - ref_dist_attr = self._dist_context.get_tensor_dist_attr_for_program( - ref_var) + ref_dist_attr = ( + self._dist_context.get_tensor_dist_attr_for_program( + ref_var + ) + ) ref_dims_mapping = ref_dist_attr.dims_mapping ref_process_mesh = ref_dist_attr.process_mesh # output @@ -1364,27 +1702,35 @@ class Completer: output_var_name = grad_op.output_arg_names[0] output_var = vars[output_var_name] self._dist_context.set_tensor_dist_attr_for_program( - output_var, tensor_dist_attr) + output_var, tensor_dist_attr + ) # op grad_op_dist_attr = OperatorDistributedAttribute() grad_op_dist_attr.process_mesh = ref_process_mesh grad_op_dist_attr.set_input_dims_mapping( - ref_var_name, ref_dims_mapping) + ref_var_name, ref_dims_mapping + ) grad_op_dist_attr.set_output_dims_mapping( - output_var_name, ref_dims_mapping) + output_var_name, ref_dims_mapping + ) else: - raise ValueError("got unexpect op [{}]".format( - str(grad_op.type))) + raise ValueError( + "got unexpect op [{}]".format(str(grad_op.type)) + ) self._dist_context.set_op_dist_attr_for_program( - grad_op, grad_op_dist_attr) + grad_op, grad_op_dist_attr + ) def complete_update_annotation(self, serial_main_program): """Complete the annotation of vars and ops in the update phase for parallel program.""" # Copy the dist tensors and dist ops annotated by users from the default context # global mesh - from paddle.distributed.auto_parallel.process_group import get_world_process_group + from paddle.distributed.auto_parallel.process_group import ( + get_world_process_group, + ) + world_ranks = get_world_process_group().ranks # Notice: serial_main_program is actually a dist_main_program of current rank, @@ -1403,17 +1749,22 @@ class Completer: if int(op.attr('op_role')) == int(OpRole.Optimize): if is_gradient_clip_op(op): if op.type in [ - "sum", "sqrt", "fill_constant", "elementwise_max", - "elementwise_div" + "sum", + "sqrt", + "fill_constant", + "elementwise_max", + "elementwise_div", ]: op_dist_attr = OperatorDistributedAttribute() op_dist_attr.process_mesh = world_ranks for in_name in op.input_arg_names: in_var = vars[in_name] in_dist_attr = self._dist_context.get_tensor_dist_attr_for_program( - in_var) + in_var + ) op_dist_attr.set_input_dist_attr( - in_name, in_dist_attr) + in_name, in_dist_attr + ) for out_name in op.output_arg_names: out_var = vars[out_name] out_dist_attr = TensorDistributedAttribute() @@ -1422,22 +1773,30 @@ class Completer: -1 for _ in range(len(out_var.shape)) ] self._dist_context.set_tensor_dist_attr_for_program( - out_var, out_dist_attr) + out_var, out_dist_attr + ) op_dist_attr.set_output_dist_attr( - out_name, out_dist_attr) + out_name, out_dist_attr + ) else: in_var = vars[op.input("X")[0]] - in_dist_attr = self._dist_context.get_tensor_dist_attr_for_program( - in_var) + in_dist_attr = ( + self._dist_context.get_tensor_dist_attr_for_program( + in_var + ) + ) assert in_dist_attr is not None ref_process_mesh = in_dist_attr.process_mesh ref_dims_mapping = in_dist_attr.dims_mapping - if op.type == "cast" and \ - ops[idx + 1].type == "elementwise_mul": + if ( + op.type == "cast" + and ops[idx + 1].type == "elementwise_mul" + ): ref_var = vars[ops[idx + 1].input("X")[0]] ref_dist_attr = self._dist_context.get_tensor_dist_attr_for_program( - ref_var) + ref_var + ) assert ref_dist_attr is not None ref_process_mesh = ref_dist_attr.process_mesh @@ -1447,51 +1806,72 @@ class Completer: if out_var.shape == in_var.shape: out_dist_attr.dims_mapping = ref_dims_mapping else: - assert len( - out_var.shape) == 1 and out_var.shape[0] == 1 + assert ( + len(out_var.shape) == 1 + and out_var.shape[0] == 1 + ) out_dist_attr.dims_mapping = [-1] self._dist_context.set_tensor_dist_attr_for_program( - out_var, out_dist_attr) + out_var, out_dist_attr + ) op_dist_attr = OperatorDistributedAttribute() op_dist_attr.process_mesh = ref_process_mesh op_dist_attr.set_input_dist_attr( - in_var.name, in_dist_attr) + in_var.name, in_dist_attr + ) op_dist_attr.set_output_dist_attr( - out_var.name, out_dist_attr) + out_var.name, out_dist_attr + ) self._dist_context.set_op_dist_attr_for_program( - op, op_dist_attr) + op, op_dist_attr + ) if "Grad" in op.input_names and "Param" in ops[idx].input_names: - assert len( - op.input("Param")) == 1, "Only support one-to-one now." - assert len( - op.input("Grad")) == 1, "Only support one-to-one now." + assert ( + len(op.input("Param")) == 1 + ), "Only support one-to-one now." + assert ( + len(op.input("Grad")) == 1 + ), "Only support one-to-one now." param = vars[op.input("Param")[0]] grad_var = vars[op.input("Grad")[0]] - param_dist_attr = self._dist_context.get_tensor_dist_attr_for_program( - param) + param_dist_attr = ( + self._dist_context.get_tensor_dist_attr_for_program( + param + ) + ) assert param_dist_attr is not None - ref_process_mesh = self._dist_context.get_tensor_dist_attr_for_program( - param).process_mesh + ref_process_mesh = ( + self._dist_context.get_tensor_dist_attr_for_program( + param + ).process_mesh + ) assert ref_process_mesh is not None - ref_dims_mapping = self._dist_context.get_tensor_dist_attr_for_program( - param).dims_mapping + ref_dims_mapping = ( + self._dist_context.get_tensor_dist_attr_for_program( + param + ).dims_mapping + ) assert ref_dims_mapping is not None op_dist_attr = OperatorDistributedAttribute() op_dist_attr.process_mesh = ref_process_mesh - op_dist_attr.set_input_dims_mapping(grad_var.name, - ref_dims_mapping) - op_dist_attr.set_input_dims_mapping(param.name, - ref_dims_mapping) + op_dist_attr.set_input_dims_mapping( + grad_var.name, ref_dims_mapping + ) + op_dist_attr.set_input_dims_mapping( + param.name, ref_dims_mapping + ) op_dist_attr.set_output_dims_mapping( - param.name, ref_dims_mapping) + param.name, ref_dims_mapping + ) learning_var = vars[op.input("LearningRate")[0]] op_dist_attr.set_input_dims_mapping(learning_var.name, [-1]) op_dist_attr.set_output_dims_mapping( - learning_var.name, [-1]) + learning_var.name, [-1] + ) if not learning_rate_completed: learning_rate_completed = True @@ -1499,18 +1879,19 @@ class Completer: var_dist_attr.process_mesh = world_ranks var_dist_attr.dims_mapping = [-1] self._dist_context.set_tensor_dist_attr_for_program( - learning_var, var_dist_attr) + learning_var, var_dist_attr + ) for input_name in op.desc.input_names(): if input_name in [ - 'Param', - 'Grad', - 'LearningRate', - "SkipUpdate", - "Beta1Tensor", - "Beta2Tensor", - "EpsilonTensor", + 'Param', + 'Grad', + 'LearningRate', + "SkipUpdate", + "Beta1Tensor", + "Beta2Tensor", + "EpsilonTensor", ]: continue if len(op.desc.input(input_name)) == 0: @@ -1523,22 +1904,28 @@ class Completer: if "Beta1Pow" in input_name or "Beta2Pow" in input_name: input_var_attr.dims_mapping = [-1] op_dist_attr.set_input_dims_mapping( - input_var.name, [-1]) + input_var.name, [-1] + ) op_dist_attr.set_output_dims_mapping( - input_var.name, [-1]) + input_var.name, [-1] + ) else: input_var_attr.dims_mapping = ref_dims_mapping op_dist_attr.set_input_dims_mapping( - input_var.name, ref_dims_mapping) + input_var.name, ref_dims_mapping + ) op_dist_attr.set_output_dims_mapping( - input_var.name, ref_dims_mapping) + input_var.name, ref_dims_mapping + ) input_var_attr.process_mesh = ref_process_mesh self._dist_context.set_tensor_dist_attr_for_program( - input_var, input_var_attr) + input_var, input_var_attr + ) self._dist_context.set_op_dist_attr_for_program( - op, op_dist_attr) + op, op_dist_attr + ) continue def complete_prim_annotation(self, serial_main_program=None): @@ -1574,14 +1961,18 @@ class Completer: def _init_global_mesh_for_program(self): # Copy the dist tensors and dist ops annotated by users from the default context # global mesh - from paddle.distributed.auto_parallel.process_group import get_world_process_group + from paddle.distributed.auto_parallel.process_group import ( + get_world_process_group, + ) + world_ranks = get_world_process_group().ranks for block in self._dist_context._serial_main_program.blocks: for tensor in block.vars.values(): # Copy the distributed tensors in the default context dist_tensor = self._dist_context.get_dist_tensor_for_program( - tensor) + tensor + ) assert dist_tensor is not None dist_tensor.dist_attr.process_mesh = world_ranks for op in block.ops: @@ -1592,7 +1983,8 @@ class Completer: # Find the most compatible implemenetations from the distributed operator op_dist_impls = find_compatible_distributed_operator_impls( - dist_op, fwd=True) + dist_op, fwd=True + ) if op_dist_impls is not None: backup_op_dist_attr = copy.deepcopy(dist_op.dist_attr) for op_dist_impl in op_dist_impls: diff --git a/python/paddle/distributed/auto_parallel/converter.py b/python/paddle/distributed/auto_parallel/converter.py index 21cc2a22368f515d2f30192464f311377df933ab..d0fae414b1981d291c8b31f99dfb232e289b31e0 100644 --- a/python/paddle/distributed/auto_parallel/converter.py +++ b/python/paddle/distributed/auto_parallel/converter.py @@ -45,30 +45,42 @@ class Converter(object): def _check_tensor_dict(self, tensors_dict): if not tensors_dict: - raise ValueError("'tensors_dict' is None, " - "the tensors to be converted cannot be None.") + raise ValueError( + "'tensors_dict' is None, " + "the tensors to be converted cannot be None." + ) if not isinstance(tensors_dict, dict): raise TypeError( - "The type of 'tensors_dict' should be 'dict', but got '{}'.". - format(str(type(tensors_dict)))) + "The type of 'tensors_dict' should be 'dict', but got '{}'.".format( + str(type(tensors_dict)) + ) + ) return tensors_dict def _check_pre_strategy(self, pre_strategy): if not pre_strategy: - raise ValueError("'pre_strategy' is None, " - "there are not tensors in pre process.") + raise ValueError( + "'pre_strategy' is None, " + "there are not tensors in pre process." + ) if not isinstance(pre_strategy, dict): - raise TypeError("The type of 'pre_strategy' should be 'dict', " - "but got '{}'.".format(str(type(pre_strategy)))) + raise TypeError( + "The type of 'pre_strategy' should be 'dict', " + "but got '{}'.".format(str(type(pre_strategy))) + ) return pre_strategy def _check_cur_strategy(self, cur_strategy): if not cur_strategy: - warnings.warn("'cur_strategy' is None, " - "there are not tensors in cur process") + warnings.warn( + "'cur_strategy' is None, " + "there are not tensors in cur process" + ) if not isinstance(cur_strategy, dict): - raise TypeError("The type of 'cur_strategy' should be 'dict', " - "but got '{}'.".format(str(type(cur_strategy)))) + raise TypeError( + "The type of 'cur_strategy' should be 'dict', " + "but got '{}'.".format(str(type(cur_strategy))) + ) return cur_strategy def convert(self, strict=True): @@ -131,42 +143,60 @@ class Converter(object): cur_dist_attr = self._cur_strategy[tensor_name] try: tensors_dict[tensor_name] = Converter.merge_and_slice( - tensor_list, pre_dist_attr, cur_dist_attr) + tensor_list, pre_dist_attr, cur_dist_attr + ) except ValueError as err: raise ValueError( - "Fail to convert tensor '{}'. ".format(str(tensor_name)) + - str(err)) + "Fail to convert tensor '{}'. ".format(str(tensor_name)) + + str(err) + ) for tensor_name in self._pre_strategy: if tensor_name not in self._cur_strategy: tensor_not_in_cur.append(tensor_name) if not strict: - tensors_dict, tensor_match_with_pre, tensor_match_with_cur = self.convert_with_prefix_match( - tensors_dict, tensor_not_in_pre, tensor_not_in_cur) + ( + tensors_dict, + tensor_match_with_pre, + tensor_match_with_cur, + ) = self.convert_with_prefix_match( + tensors_dict, tensor_not_in_pre, tensor_not_in_cur + ) else: - tensors_dict, tensor_match_with_pre, tensor_match_with_cur = tensors_dict, [], [] + tensors_dict, tensor_match_with_pre, tensor_match_with_cur = ( + tensors_dict, + [], + [], + ) tensor_not_in_pre = set(tensor_not_in_pre) - set(tensor_match_with_pre) tensor_not_in_cur = set(tensor_not_in_cur) - set(tensor_match_with_cur) if tensor_not_in_pre: warnings.warn( "tensors [{}] are not found in last training strategy.".format( - str(tensor_not_in_pre))) + str(tensor_not_in_pre) + ) + ) if tensor_not_in_cur: warnings.warn( - "tensors [{}] are not found in current training strategy.". - format(str(tensor_not_in_cur))) + "tensors [{}] are not found in current training strategy.".format( + str(tensor_not_in_cur) + ) + ) if tensor_not_in_ckpt: warnings.warn( "tensors [{}] are found in pre_strategy, but are not found" - "in checkpoint files, please check your checkpoint files.". - format(str(tensor_not_in_ckpt))) + "in checkpoint files, please check your checkpoint files.".format( + str(tensor_not_in_ckpt) + ) + ) return tensors_dict - def convert_with_prefix_match(self, tensors_dict, tensor_not_in_pre, - tensor_not_in_cur): + def convert_with_prefix_match( + self, tensors_dict, tensor_not_in_pre, tensor_not_in_cur + ): # the name which in cur_process and can match with pre_process tensor_match_with_pre = [] # the name which in pre_process and can match with cur_process @@ -174,7 +204,7 @@ class Converter(object): for cur_name in tensor_not_in_pre: prefix_name = cur_name while prefix_name.find("_") != -1: - prefix_name = prefix_name[:prefix_name.rfind("_")] + prefix_name = prefix_name[: prefix_name.rfind("_")] for pre_name in tensor_not_in_cur: if prefix_name in pre_name: # 'cur_name' of cur_process can match with 'pre_name' of pre_process @@ -185,14 +215,20 @@ class Converter(object): cur_dist_attr = self._cur_strategy[cur_name] try: tensors_dict[cur_name] = Converter.merge_and_slice( - pre_tensor_list, pre_dist_attr, cur_dist_attr) + pre_tensor_list, pre_dist_attr, cur_dist_attr + ) except ValueError as err: raise ValueError( "Fail to convert tensor '{}' by '{}'. ".format( - str(cur_name), str(pre_name)) + str(err)) + str(cur_name), str(pre_name) + ) + + str(err) + ) self._logger.info( "tensor [{}] is matched with tensor [{}]".format( - cur_name, pre_name)) + cur_name, pre_name + ) + ) tensor_match_with_pre.append(cur_name) tensor_match_with_cur.append(pre_name) break @@ -221,8 +257,9 @@ class Converter(object): cur_dims_mapping = cur_dist_attr["dims_mapping"] if len(set(pre_dims_mapping)) > 1 or -1 not in pre_dims_mapping: # merge tensor - tensor = Converter.merge_with_dist_attr(tensor_list, - pre_dist_attr) + tensor = Converter.merge_with_dist_attr( + tensor_list, pre_dist_attr + ) else: # skip merge tensor tensor = tensor_list[0] @@ -235,7 +272,7 @@ class Converter(object): @staticmethod def merge_with_dist_attr(tensor_list, dist_attr): - """ Merge tensor with distributed attribute """ + """Merge tensor with distributed attribute""" from .reshard import Resharder dims_mapping = dist_attr["dims_mapping"] @@ -243,44 +280,62 @@ class Converter(object): process_group = dist_attr["process_group"] # get the complete shape of the tensor complete_shape = Resharder.compute_complete_shape( - tensor_list[0].shape, process_shape, dims_mapping) + tensor_list[0].shape, process_shape, dims_mapping + ) # merge the tensor with dist_attr partition_tensor_list = [] merged_partiton = [] for process in process_group: partition_index = Resharder.compute_partition_index( - process, complete_shape, dims_mapping, process_shape, - process_group) + process, + complete_shape, + dims_mapping, + process_shape, + process_group, + ) index = process_group.index(process) if partition_index not in merged_partiton: merged_partiton.append(partition_index) - Converter.merge(partition_tensor_list, tensor_list[index], - partition_index, complete_shape) + Converter.merge( + partition_tensor_list, + tensor_list[index], + partition_index, + complete_shape, + ) if len(partition_tensor_list) != 1: - raise ValueError("Fail to merge tensor with dist_attr '{}'.".format( - str(dist_attr))) + raise ValueError( + "Fail to merge tensor with dist_attr '{}'.".format( + str(dist_attr) + ) + ) complete_tensor = partition_tensor_list[0][0] return complete_tensor @staticmethod def slice_with_dist_attr(tensor, dist_attr): - """ Slice tensor with distributed attribute """ + """Slice tensor with distributed attribute""" dims_mapping = dist_attr["dims_mapping"] process_shape = dist_attr["process_shape"] process_group = dist_attr["process_group"] # slice the tensor with dist_attr partition_index_list = Converter._get_split_indices( - tensor.shape, dims_mapping, process_shape, process_group) - sliced_tensor_list = Converter.split(tensor, partition_index_list, - len(partition_index_list)) + tensor.shape, dims_mapping, process_shape, process_group + ) + sliced_tensor_list = Converter.split( + tensor, partition_index_list, len(partition_index_list) + ) # get the current tensor's index in sliced_tensor_list rank_id = paddle.distributed.get_rank() sliced_tensor_index = Converter._get_sliced_index( - rank_id, tensor.shape, dims_mapping, process_shape, process_group) + rank_id, tensor.shape, dims_mapping, process_shape, process_group + ) if sliced_tensor_index not in range(len(sliced_tensor_list)): - raise ValueError("Fail to slice tensor with dist_attr '{}'.".format( - str(dist_attr))) + raise ValueError( + "Fail to slice tensor with dist_attr '{}'.".format( + str(dist_attr) + ) + ) sliced_tensor = sliced_tensor_list[sliced_tensor_index] return sliced_tensor @@ -319,21 +374,32 @@ class Converter(object): else: i = 0 while i < len(partition_tensor_list): - concat_axis, first_order, new_partition = Resharder.compute_concat_info( - partition_tensor_list[i][1], partition_index) + ( + concat_axis, + first_order, + new_partition, + ) = Resharder.compute_concat_info( + partition_tensor_list[i][1], partition_index + ) if concat_axis != -1: if first_order == 0: new_tensor = np.concatenate( (partition_tensor_list[i][0], tensor), - axis=concat_axis) + axis=concat_axis, + ) else: new_tensor = np.concatenate( (tensor, partition_tensor_list[i][0]), - axis=concat_axis) + axis=concat_axis, + ) partition_tensor_list.pop(i) - Converter.merge(partition_tensor_list, new_tensor, - new_partition, complete_shape) + Converter.merge( + partition_tensor_list, + new_tensor, + new_partition, + complete_shape, + ) break i += 1 @@ -361,19 +427,21 @@ class Converter(object): """ sliced_tensor_list = [] axis = len(complete_tensor.shape) - length - sliced_tensor = np.split(complete_tensor, - partition_index_list[axis], - axis=axis) + sliced_tensor = np.split( + complete_tensor, partition_index_list[axis], axis=axis + ) if length == 1: return sliced_tensor for tensor in sliced_tensor: sliced_tensor_list.extend( - Converter.split(tensor, partition_index_list, length - 1)) + Converter.split(tensor, partition_index_list, length - 1) + ) return sliced_tensor_list @staticmethod - def _get_split_indices(complete_shape, dims_mapping, process_shape, - process_group): + def _get_split_indices( + complete_shape, dims_mapping, process_shape, process_group + ): """ Get split indices of every dimension. @@ -398,22 +466,31 @@ class Converter(object): split_indices_list = [] for process in process_group: partition_index = Resharder.compute_partition_index( - process, complete_shape, dims_mapping, process_shape, - process_group) + process, + complete_shape, + dims_mapping, + process_shape, + process_group, + ) if split_indices_list: for dim in range(len(partition_index)): split_indices_list[dim].extend(partition_index[dim]) else: split_indices_list = partition_index split_indices_list = list( - map(lambda x, y: list(set(x) - set([y]) - set([0])), - split_indices_list, complete_shape)) + map( + lambda x, y: list(set(x) - set([y]) - set([0])), + split_indices_list, + complete_shape, + ) + ) split_indices_list = [sorted(x) for x in split_indices_list] return split_indices_list @staticmethod - def _get_sliced_index(rank_id, complete_shape, dims_mapping, process_shape, - process_group): + def _get_sliced_index( + rank_id, complete_shape, dims_mapping, process_shape, process_group + ): """ Get sliced_tensor's index of current rank in all sliced tensors list. @@ -442,7 +519,8 @@ class Converter(object): from .reshard import Resharder partition_index = Resharder.compute_partition_index( - rank_id, complete_shape, dims_mapping, process_shape, process_group) + rank_id, complete_shape, dims_mapping, process_shape, process_group + ) sliced_index = 0 for i, shape in enumerate(complete_shape): if dims_mapping[i] == -1: diff --git a/python/paddle/distributed/auto_parallel/cost/base_cost.py b/python/paddle/distributed/auto_parallel/cost/base_cost.py index 5ac81052c7625febc2fcb9de68114a244462d8d6..2ce98a0a0511e2ceb494953837f438a600f1cf81 100644 --- a/python/paddle/distributed/auto_parallel/cost/base_cost.py +++ b/python/paddle/distributed/auto_parallel/cost/base_cost.py @@ -25,8 +25,12 @@ from ..utils import _get_idx_in_axis from ..dist_tensor import DistributedTensor COMM_OP_TYPE = [ - "send_v2", "recv_v2", "c_broadcast", "c_allgather", "c_allreduce_sum", - "c_identity" + "send_v2", + "recv_v2", + "c_broadcast", + "c_allgather", + "c_allreduce_sum", + "c_identity", ] NON_COMP_TYPE = ["while"] + COMM_OP_TYPE _g_op_cost_factory = {} @@ -93,8 +97,9 @@ def build_comp_desc_from_dist_op(dist_op, dist_context): var_name_list = op.input(input_name) var_desc = [] for var_name in var_name_list: - var = get_var_with_recursion(var_name, op.block, - op.block.program) + var = get_var_with_recursion( + var_name, op.block, op.block.program + ) # Use op input_dims_mapping dims_mapping = dist_attr.get_input_dims_mapping(var_name) global_sizes = var.shape @@ -102,18 +107,34 @@ def build_comp_desc_from_dist_op(dist_op, dist_context): shard_sizes = None topology = process_mesh.topology shape = DistributedTensor.get_local_sizes( - global_sizes, dims_mapping, topology, processes, process, - shard_sizes) + global_sizes, + dims_mapping, + topology, + processes, + process, + shard_sizes, + ) var_desc.append((var.dtype, shape)) # For special op such as embedding and its grad op - if op.type == "c_embedding" or op.type == "lookup_table_v2" or op.type == "c_embedding_grad" or op.type == "lookup_table_v2_grad": + if ( + op.type == "c_embedding" + or op.type == "lookup_table_v2" + or op.type == "c_embedding_grad" + or op.type == "lookup_table_v2_grad" + ): if input_name == "W": - embedding_row_dim_mapping = dist_attr.get_input_dims_mapping( - op.input(input_name)[0])[0] + embedding_row_dim_mapping = ( + dist_attr.get_input_dims_mapping( + op.input(input_name)[0] + )[0] + ) relative_idx = _get_idx_in_axis( - processes, dist_attr.process_mesh.topology, - embedding_row_dim_mapping, process) + processes, + dist_attr.process_mesh.topology, + embedding_row_dim_mapping, + process, + ) per_part_size = shape[0] relative_idx = relative_idx * per_part_size desc["attrs"]["start_index"] = relative_idx @@ -126,8 +147,9 @@ def build_comp_desc_from_dist_op(dist_op, dist_context): var_desc = [] for var_name in var_name_list: # Use op output_dims_mapping - var = get_var_with_recursion(var_name, op.block, - op.block.program) + var = get_var_with_recursion( + var_name, op.block, op.block.program + ) dist_attr = dist_op.dist_attr dims_mapping = dist_attr.get_output_dims_mapping(var_name) process_mesh = dist_attr.process_mesh @@ -136,8 +158,13 @@ def build_comp_desc_from_dist_op(dist_op, dist_context): processes = process_mesh.processes topology = process_mesh.topology shape = DistributedTensor.get_local_sizes( - global_sizes, dims_mapping, topology, processes, process, - shard_sizes) + global_sizes, + dims_mapping, + topology, + processes, + process, + shard_sizes, + ) var_desc.append((var.dtype, shape)) # For special op such as fill_constant_batch_size_like @@ -150,8 +177,9 @@ def build_comp_desc_from_dist_op(dist_op, dist_context): # Modify target shape for idx, axis in enumerate(dims_mapping): if axis >= 0: - shape_list[idx] = shape_list[ - idx] // process_mesh_shape[axis] + shape_list[idx] = ( + shape_list[idx] // process_mesh_shape[axis] + ) desc["attrs"]["shape"] = shape_list output_desc[out_name] = var_desc @@ -209,13 +237,15 @@ def build_comp_desc_str_for_predict(desc): return parse_result -def build_comm_desc_from_dist_op(op_type, - dist_op, - ctx, - var_names, - attrs=None, - parallel_axis=None, - group_ranks=None): +def build_comm_desc_from_dist_op( + op_type, + dist_op, + ctx, + var_names, + attrs=None, + parallel_axis=None, + group_ranks=None, +): """Build descriptions of communication op distributed on the processes.""" from ..reshard import get_var_with_recursion @@ -256,19 +286,26 @@ def build_comm_desc_from_dist_op(op_type, has_found = True break assert has_found - var = get_var_with_recursion(var_name, serial_op.block, - serial_op.block.program) + var = get_var_with_recursion( + var_name, serial_op.block, serial_op.block.program + ) - dims_mapping = dist_attr.get_input_dims_mapping( - var_name - ) if var_name in dist_op.serial_op.input_arg_names else dist_attr.get_output_dims_mapping( - var_name) + dims_mapping = ( + dist_attr.get_input_dims_mapping(var_name) + if var_name in dist_op.serial_op.input_arg_names + else dist_attr.get_output_dims_mapping(var_name) + ) global_sizes = var.shape shard_sizes = None topology = process_mesh.topology shape = DistributedTensor.get_local_sizes( - global_sizes, dims_mapping, topology, processes, process, - shard_sizes) + global_sizes, + dims_mapping, + topology, + processes, + process, + shard_sizes, + ) input_list.append((var.dtype, shape)) # NOTE: The input_name of comm ops used usually is X. @@ -278,9 +315,12 @@ def build_comm_desc_from_dist_op(op_type, if parallel_axis is not None: process_mesh_shape = process_mesh.topology process_mesh_group = process_mesh.processes - comm_group_ranks = _get_comm_group(process_mesh_group, - process_mesh_shape, - parallel_axis, rank_id) + comm_group_ranks = _get_comm_group( + process_mesh_group, + process_mesh_shape, + parallel_axis, + rank_id, + ) elif group_ranks is not None: comm_group_ranks = group_ranks else: @@ -322,8 +362,9 @@ def build_comm_costs_from_descs(op_cost_class, ctx, processes, descs, cluster): group_ranks = desc["group_ranks"] if group_ranks not in group_ranks_list: group_ranks_list.append(group_ranks) - comm_op_cost = op_cost_class(op_desc=desc, - comm_context=comm_context) + comm_op_cost = op_cost_class( + op_desc=desc, comm_context=comm_context + ) comm_op_cost_list.append(comm_op_cost) return comm_op_cost_list @@ -336,8 +377,9 @@ def build_comp_costs_from_descs(op_cost_class, ctx, processes, descs, cluster): return costs -def build_dp_costs(result, dist_op, ctx, var_names, attrs, parallel_axis, - cluster): +def build_dp_costs( + result, dist_op, ctx, var_names, attrs, parallel_axis, cluster +): """DP cost contains a allreduce_sum op cost and a scale op cost""" # The costs will be appended in the given result. from ..reshard import get_var_with_recursion @@ -370,10 +412,15 @@ def build_dp_costs(result, dist_op, ctx, var_names, attrs, parallel_axis, ctx, var_names, attrs=attrs, - parallel_axis=parallel_axis) + parallel_axis=parallel_axis, + ) comm_cost_list = build_comm_costs_from_descs( - _g_op_cost_factory["c_allreduce_sum"], ctx, processes, - c_allreduce_sum_descs, cluster) + _g_op_cost_factory["c_allreduce_sum"], + ctx, + processes, + c_allreduce_sum_descs, + cluster, + ) result.append(comm_cost_list) # The scale op just on the group_ranks @@ -386,24 +433,33 @@ def build_dp_costs(result, dist_op, ctx, var_names, attrs, parallel_axis, desc = {} desc["op"] = op_type desc["inputs"] = {} - dims_mapping = dist_attr.get_input_dims_mapping( - var_name) if dist_attr.get_input_dims_mapping( - var_name - ) is not None else dist_attr.get_output_dims_mapping(var_name) - var = get_var_with_recursion(var_name, dist_op.serial_op.block, - dist_op.serial_op.block.program) + dims_mapping = ( + dist_attr.get_input_dims_mapping(var_name) + if dist_attr.get_input_dims_mapping(var_name) is not None + else dist_attr.get_output_dims_mapping(var_name) + ) + var = get_var_with_recursion( + var_name, + dist_op.serial_op.block, + dist_op.serial_op.block.program, + ) global_sizes = var.shape shard_sizes = None topology = process_mesh.topology - shape = DistributedTensor.get_local_sizes(global_sizes, - dims_mapping, topology, - processes, rank, - shard_sizes) + shape = DistributedTensor.get_local_sizes( + global_sizes, + dims_mapping, + topology, + processes, + rank, + shard_sizes, + ) desc["inputs"]["X"] = [(var.dtype, shape)] attrs = {"scale": 1.0 / dp_degree} desc["attrs"] = attrs - scale_op_cost = _g_op_cost_factory["scale"](op_desc=desc, - cluster=cluster) + scale_op_cost = _g_op_cost_factory["scale"]( + op_desc=desc, cluster=cluster + ) scale_costs[rank] = scale_op_cost result.append(scale_costs) @@ -442,7 +498,7 @@ class CommContext: if alpha_latency is None: # set default self.base_ring = 8.4 - self.base_tree = 0. + self.base_tree = 0.0 # self.base_inter_ring = 9.6 # self.base_inter_tree = 28 # NVL in default @@ -457,7 +513,7 @@ class CommContext: self.base_ring = base_ring if base_ring is not None else 8.4 base_tree = alpha_latency.base_tree - self.base_tree = base_tree if base_tree is not None else 0. + self.base_tree = base_tree if base_tree is not None else 0.0 intra_ring = alpha_latency.intra_ring if intra_ring == LinkType.NVL: @@ -521,10 +577,16 @@ class CommContext: for i in range(len(ranks)): for j in range(i + 1, len(ranks)): forward_order_beta = self.cluster.get_beta( - ranks[i], ranks[j]) + ranks[i], ranks[j] + ) backward_order_beta = self.cluster.get_beta( - ranks[j], ranks[i]) - beta = forward_order_beta if forward_order_beta > backward_order_beta else backward_order_beta + ranks[j], ranks[i] + ) + beta = ( + forward_order_beta + if forward_order_beta > backward_order_beta + else backward_order_beta + ) if max_beta == None: max_beta = beta else: @@ -547,7 +609,6 @@ class CommContext: class Cost: - def __init__(self, time=0, memory=0, flops=0): self.time = time self.memory = memory @@ -557,14 +618,14 @@ class Cost: assert val >= 0, "Time must be greater than or equal to 0." def _check_memory(self, val): - assert isinstance( - val, - int) and val >= 0, "Memory must be int and greater than equal to 0." + assert ( + isinstance(val, int) and val >= 0 + ), "Memory must be int and greater than equal to 0." def _check_flops(self, val): - assert isinstance( - val, - int) and val >= 0, "FLOPs must be int and greater than equal to 0." + assert ( + isinstance(val, int) and val >= 0 + ), "FLOPs must be int and greater than equal to 0." @property def time(self): @@ -598,7 +659,7 @@ class Cost: time = self.time + rhs.time memory = self.memory + rhs.memory flops = self.flops + rhs.flops - assert (time >= 0 and memory >= 0 and flops >= 0) + assert time >= 0 and memory >= 0 and flops >= 0 return Cost(time, memory, flops) def __sub__(self, rhs): @@ -606,12 +667,11 @@ class Cost: time = self.time - rhs.time memory = self.memory - rhs.memory flops = self.flops - rhs.flops - assert (time >= 0 and memory >= 0 and flops >= 0) + assert time >= 0 and memory >= 0 and flops >= 0 return Cost(time, memory, flops) class OpCost: - def __init__(self, op=None, op_desc=None): self._op = op self._op_desc = op_desc @@ -666,12 +726,12 @@ class OpCost: time = self.cost.time + rhs.cost.time memory = self.cost.memory + rhs.cost.memory flops = self.cost.flops + rhs.cost.flops - assert (time >= 0 and memory >= 0 and flops >= 0) + assert time >= 0 and memory >= 0 and flops >= 0 elif isinstance(rhs, Cost): time = self.time + rhs.time memory = self.memory + rhs.memory flops = self.flops + rhs.flops - assert (time >= 0 and memory >= 0 and flops >= 0) + assert time >= 0 and memory >= 0 and flops >= 0 return Cost(time, memory, flops) def __sub__(self, rhs): @@ -683,12 +743,12 @@ class OpCost: time = self.cost.time - rhs.cost.time memory = self.cost.memory - rhs.cost.memory flops = self.cost.flops - rhs.cost.flops - assert (time >= 0 and memory >= 0 and flops >= 0) + assert time >= 0 and memory >= 0 and flops >= 0 elif isinstance(rhs, Cost): time = self.time - rhs.time memory = self.memory - rhs.memory flops = self.flops - rhs.flops - assert (time >= 0 and memory >= 0 and flops >= 0) + assert time >= 0 and memory >= 0 and flops >= 0 return Cost(time, memory, flops) @@ -721,8 +781,9 @@ class CommOpCost(OpCost): vars = self.op.block.vars # NOTE: The tensor communicated input_name is "X" in default. Otherwise, this function should be overrided var_name = self.op.input("X")[0] - var = get_var_with_recursion(var_name, self.op.block, - self.program) + var = get_var_with_recursion( + var_name, self.op.block, self.program + ) dtype = var.dtype shape = var.shape elif self.op_desc is not None: @@ -756,7 +817,8 @@ class CommOpCost(OpCost): if self._machine_count is None: cluster = self._comm_context.cluster self._machine_count = cluster.get_involved_machine_count( - self.group_ranks) + self.group_ranks + ) return self._machine_count @property @@ -775,8 +837,10 @@ class CommOpCost(OpCost): process_group = get_process_group(ring_id) if process_group is None: raise ValueError( - "There not exists process group whose ring_id is {}.". - format(ring_id)) + "There not exists process group whose ring_id is {}.".format( + ring_id + ) + ) self._group_ranks = process_group.ranks return self._group_ranks @@ -786,7 +850,9 @@ class CommOpCost(OpCost): if cls.OP_TYPE not in COMM_OP_TYPE: raise TypeError( "Please Check op type in {}, but got {}.".format( - COMM_OP_TYPE, cls.OP_TYPE)) + COMM_OP_TYPE, cls.OP_TYPE + ) + ) class CompOpCost(OpCost): @@ -804,7 +870,9 @@ class CompOpCost(OpCost): if cls.OP_TYPE in NON_COMP_TYPE: raise TypeError( "Please Check op type not in {}, but got {}.".format( - NON_COMP_TYPE, cls.OP_TYPE)) + NON_COMP_TYPE, cls.OP_TYPE + ) + ) def register_op_cost(cls): @@ -821,12 +889,12 @@ def register_op_cost(cls): def calc_time_by_modeling(op=None, desc=None, cluster=None): op_type = op.type if op is not None else desc["op"] if op_type in COMM_OP_TYPE: - op_cost = _g_op_cost_factory[op_type](op=op, - op_desc=desc, - comm_context=CommContext(cluster)) + op_cost = _g_op_cost_factory[op_type]( + op=op, op_desc=desc, comm_context=CommContext(cluster) + ) elif op_type not in NON_COMP_TYPE: - op_cost = _g_op_cost_factory[op_type](op=op, - op_desc=desc, - cluster=cluster) + op_cost = _g_op_cost_factory[op_type]( + op=op, op_desc=desc, cluster=cluster + ) time = op_cost.calc_time() return time diff --git a/python/paddle/distributed/auto_parallel/cost/comm_op_cost.py b/python/paddle/distributed/auto_parallel/cost/comm_op_cost.py index b81df4dbe8656045ad096f5f31cbfccff7b6f5b6..2e8e347bd8ec62c1e69017ea2e8ad9355486ac01 100644 --- a/python/paddle/distributed/auto_parallel/cost/comm_op_cost.py +++ b/python/paddle/distributed/auto_parallel/cost/comm_op_cost.py @@ -22,9 +22,9 @@ class AllreduceSumOpCost(CommOpCost): OP_TYPE = "c_allreduce_sum" def __init__(self, op=None, op_desc=None, comm_context=None): - super(AllreduceSumOpCost, self).__init__(op=op, - op_desc=op_desc, - comm_context=comm_context) + super(AllreduceSumOpCost, self).__init__( + op=op, op_desc=op_desc, comm_context=comm_context + ) def calc_time(self): # use tree if cross machine and use ring if in a single machine @@ -39,22 +39,41 @@ class AllreduceSumOpCost(CommOpCost): def calc_time_ring(self): alpha = self.comm_context.base_ring - alpha += 2 * (self.rank_count - - self.machine_count) * self.comm_context.intra_ring - alpha += 2 * (self.machine_count - 1) * ( - self.comm_context.inter_ring + self.hops * self.comm_context.switch) + alpha += ( + 2 + * (self.rank_count - self.machine_count) + * self.comm_context.intra_ring + ) + alpha += ( + 2 + * (self.machine_count - 1) + * ( + self.comm_context.inter_ring + + self.hops * self.comm_context.switch + ) + ) beta = self.comm_context.get_max_beta(self.group_ranks) - time = alpha + 2 * (self.rank_count - - 1) / self.rank_count * self.comm_count * beta + time = ( + alpha + + 2 + * (self.rank_count - 1) + / self.rank_count + * self.comm_count + * beta + ) return time def calc_time_tree(self): alpha = self.comm_context.base_tree - alpha += 2 * (self.rank_count / self.machine_count - - 1) * self.comm_context.intra_tree + alpha += ( + 2 + * (self.rank_count / self.machine_count - 1) + * self.comm_context.intra_tree + ) alpha += math.log2(self.machine_count) * ( - self.comm_context.inter_tree + self.hops * self.comm_context.switch) + self.comm_context.inter_tree + self.hops * self.comm_context.switch + ) beta = self.comm_context.get_max_beta(self.group_ranks) time = alpha + 2 * self.comm_count * beta @@ -67,9 +86,9 @@ class AllgatherOpCost(CommOpCost): OP_TYPE = "c_allgather" def __init__(self, op=None, op_desc=None, comm_context=None): - super(AllgatherOpCost, self).__init__(op=op, - op_desc=op_desc, - comm_context=comm_context) + super(AllgatherOpCost, self).__init__( + op=op, op_desc=op_desc, comm_context=comm_context + ) def calc_time(self): time = self.calc_time_ring() @@ -77,13 +96,17 @@ class AllgatherOpCost(CommOpCost): def calc_time_ring(self): alpha = self.comm_context.base_ring - alpha += (self.rank_count - - self.machine_count) * self.comm_context.intra_ring + alpha += ( + self.rank_count - self.machine_count + ) * self.comm_context.intra_ring alpha += (self.machine_count - 1) * ( - self.comm_context.inter_ring + self.hops * self.comm_context.switch) + self.comm_context.inter_ring + self.hops * self.comm_context.switch + ) beta = self.comm_context.get_max_beta(self.group_ranks) - time = alpha + (self.rank_count - - 1) / self.rank_count * self.comm_count * beta + time = ( + alpha + + (self.rank_count - 1) / self.rank_count * self.comm_count * beta + ) return time @@ -92,9 +115,9 @@ class BroadcastOpCost(CommOpCost): OP_TYPE = "c_broadcast" def __init__(self, op=None, op_desc=None, comm_context=None): - super(BroadcastOpCost, self).__init__(op=op, - op_desc=op_desc, - comm_context=comm_context) + super(BroadcastOpCost, self).__init__( + op=op, op_desc=op_desc, comm_context=comm_context + ) def calc_time(self): time = self.calc_time_ring() @@ -103,7 +126,10 @@ class BroadcastOpCost(CommOpCost): def calc_time_ring(self): alpha = self.comm_context.base_ring if self.machine_count > 1: - alpha += self.comm_context.inter_ring + self.hops * self.comm_context.switch + alpha += ( + self.comm_context.inter_ring + + self.hops * self.comm_context.switch + ) else: alpha += self.comm_context.intra_ring beta = self.comm_context.get_max_beta(self.group_ranks) @@ -117,9 +143,9 @@ class IdentityOpCost(CommOpCost): OP_TYPE = "c_identity" def __init__(self, op=None, op_desc=None, comm_context=None): - super(IdentityOpCost, self).__init__(op=op, - op_desc=op_desc, - comm_context=comm_context) + super(IdentityOpCost, self).__init__( + op=op, op_desc=op_desc, comm_context=comm_context + ) def calc_time(self): return 0 @@ -130,14 +156,17 @@ class RecvOpCost(CommOpCost): OP_TYPE = "recv_v2" def __init__(self, op=None, op_desc=None, comm_context=None): - super(RecvOpCost, self).__init__(op=op, - op_desc=op_desc, - comm_context=comm_context) + super(RecvOpCost, self).__init__( + op=op, op_desc=op_desc, comm_context=comm_context + ) def calc_time(self): alpha = self.comm_context.base_ring if self.machine_count > 1: - alpha += self.comm_context.inter_ring + self.hops * self.comm_context.switch + alpha += ( + self.comm_context.inter_ring + + self.hops * self.comm_context.switch + ) else: alpha += self.comm_context.intra_ring beta = self.comm_context.get_max_beta(self.group_ranks) @@ -150,14 +179,17 @@ class SendOpCost(CommOpCost): OP_TYPE = "send_v2" def __init__(self, op=None, op_desc=None, comm_context=None): - super(SendOpCost, self).__init__(op=op, - op_desc=op_desc, - comm_context=comm_context) + super(SendOpCost, self).__init__( + op=op, op_desc=op_desc, comm_context=comm_context + ) def calc_time(self): alpha = self.comm_context.base_ring if self.machine_count > 1: - alpha += self.comm_context.inter_ring + self.hops * self.comm_context.switch + alpha += ( + self.comm_context.inter_ring + + self.hops * self.comm_context.switch + ) else: alpha += self.comm_context.intra_ring beta = self.comm_context.get_max_beta(self.group_ranks) diff --git a/python/paddle/distributed/auto_parallel/cost/comp_op_cost.py b/python/paddle/distributed/auto_parallel/cost/comp_op_cost.py index 938a9465701fae5963ee32bc071316bd8bb0aef8..51c3d6069a6912937906677d37fc50d813532f18 100644 --- a/python/paddle/distributed/auto_parallel/cost/comp_op_cost.py +++ b/python/paddle/distributed/auto_parallel/cost/comp_op_cost.py @@ -20,9 +20,9 @@ class AdamOpCost(CompOpCost): OP_TYPE = "adam" def __init__(self, op=None, op_desc=None, cluster=None): - super(AdamOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(AdamOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -39,9 +39,9 @@ class AssignOpCost(CompOpCost): OP_TYPE = "assign" def __init__(self, op=None, op_desc=None, cluster=None): - super(AssignOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(AssignOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -58,9 +58,9 @@ class AssignValueOpCost(CompOpCost): OP_TYPE = "assign_value" def __init__(self, op=None, op_desc=None, cluster=None): - super(AssignValueOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(AssignValueOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -77,9 +77,9 @@ class BeamSearchOpCost(CompOpCost): OP_TYPE = "beam_search" def __init__(self, op=None, op_desc=None, cluster=None): - super(BeamSearchOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(BeamSearchOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -96,9 +96,9 @@ class BeamSearchDecodeOpCost(CompOpCost): OP_TYPE = "beam_search_decode" def __init__(self, op=None, op_desc=None, cluster=None): - super(BeamSearchDecodeOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(BeamSearchDecodeOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -115,9 +115,9 @@ class CastOpCost(CompOpCost): OP_TYPE = "cast" def __init__(self, op=None, op_desc=None, cluster=None): - super(CastOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(CastOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -134,9 +134,9 @@ class ConcatOpCost(CompOpCost): OP_TYPE = "concat" def __init__(self, op=None, op_desc=None, cluster=None): - super(ConcatOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(ConcatOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -153,9 +153,9 @@ class DropoutOpCost(CompOpCost): OP_TYPE = "dropout" def __init__(self, op=None, op_desc=None, cluster=None): - super(DropoutOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(DropoutOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -172,9 +172,9 @@ class DropoutGradOpCost(CompOpCost): OP_TYPE = "dropout_grad" def __init__(self, op=None, op_desc=None, cluster=None): - super(DropoutGradOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(DropoutGradOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -191,9 +191,9 @@ class ElementwiseAddOpCost(CompOpCost): OP_TYPE = "elementwise_add" def __init__(self, op=None, op_desc=None, cluster=None): - super(ElementwiseAddOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(ElementwiseAddOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -210,9 +210,9 @@ class ElementwiseAddGradOpCost(CompOpCost): OP_TYPE = "elementwise_add_grad" def __init__(self, op=None, op_desc=None, cluster=None): - super(ElementwiseAddGradOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(ElementwiseAddGradOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -229,9 +229,9 @@ class ElementwiseDivOpCost(CompOpCost): OP_TYPE = "elementwise_div" def __init__(self, op=None, op_desc=None, cluster=None): - super(ElementwiseDivOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(ElementwiseDivOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -248,9 +248,9 @@ class ElementwiseDivGradOpCost(CompOpCost): OP_TYPE = "elementwise_div_grad" def __init__(self, op=None, op_desc=None, cluster=None): - super(ElementwiseDivGradOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(ElementwiseDivGradOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -267,9 +267,9 @@ class ElementwiseMulOpCost(CompOpCost): OP_TYPE = "elementwise_mul" def __init__(self, op=None, op_desc=None, cluster=None): - super(ElementwiseMulOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(ElementwiseMulOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -286,9 +286,9 @@ class ElementwiseMulGradOpCost(CompOpCost): OP_TYPE = "elementwise_mul_grad" def __init__(self, op=None, op_desc=None, cluster=None): - super(ElementwiseMulGradOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(ElementwiseMulGradOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -305,9 +305,9 @@ class ElementwiseSubOpCost(CompOpCost): OP_TYPE = "elementwise_sub" def __init__(self, op=None, op_desc=None, cluster=None): - super(ElementwiseSubOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(ElementwiseSubOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -324,9 +324,9 @@ class ElementwiseSubGradOpCost(CompOpCost): OP_TYPE = "elementwise_sub_grad" def __init__(self, op=None, op_desc=None, cluster=None): - super(ElementwiseSubGradOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(ElementwiseSubGradOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -343,9 +343,9 @@ class EmbeddingOpCost(CompOpCost): OP_TYPE = "c_embedding" def __init__(self, op=None, op_desc=None, cluster=None): - super(EmbeddingOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(EmbeddingOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -362,9 +362,9 @@ class EmbeddingGradOpCost(CompOpCost): OP_TYPE = "c_embedding_grad" def __init__(self, op=None, op_desc=None, cluster=None): - super(EmbeddingGradOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(EmbeddingGradOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -381,9 +381,9 @@ class FillConstantOpCost(CompOpCost): OP_TYPE = "fill_constant" def __init__(self, op=None, op_desc=None, cluster=None): - super(FillConstantOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(FillConstantOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -400,9 +400,9 @@ class FillConstantBatchSizeLikeOpCost(CompOpCost): OP_TYPE = "fill_constant_batch_size_like" def __init__(self, op=None, op_desc=None, cluster=None): - super(FillConstantBatchSizeLikeOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(FillConstantBatchSizeLikeOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -419,8 +419,9 @@ class FusedSoftmaxMaskUpperTriangleOpCost(CompOpCost): OP_TYPE = "fused_softmax_mask_upper_triangle" def __init__(self, op=None, op_desc=None, cluster=None): - super(FusedSoftmaxMaskUpperTriangleOpCost, - self).__init__(op=op, op_desc=op_desc, cluster=cluster) + super(FusedSoftmaxMaskUpperTriangleOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -437,8 +438,9 @@ class FusedSoftmaxMaskUpperTriangleGradOpCost(CompOpCost): OP_TYPE = "fused_softmax_mask_upper_triangle_grad" def __init__(self, op=None, op_desc=None, cluster=None): - super(FusedSoftmaxMaskUpperTriangleGradOpCost, - self).__init__(op=op, op_desc=op_desc, cluster=cluster) + super(FusedSoftmaxMaskUpperTriangleGradOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -455,9 +457,9 @@ class GatherOpCost(CompOpCost): OP_TYPE = "gather" def __init__(self, op=None, op_desc=None, cluster=None): - super(GatherOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(GatherOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -474,9 +476,9 @@ class GeluOpCost(CompOpCost): OP_TYPE = "gelu" def __init__(self, op=None, op_desc=None, cluster=None): - super(GeluOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(GeluOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -493,9 +495,9 @@ class GeluGradOpCost(CompOpCost): OP_TYPE = "gelu_grad" def __init__(self, op=None, op_desc=None, cluster=None): - super(GeluGradOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(GeluGradOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -512,9 +514,9 @@ class GreaterEqualOpCost(CompOpCost): OP_TYPE = "greater_equal" def __init__(self, op=None, op_desc=None, cluster=None): - super(GreaterEqualOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(GreaterEqualOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -531,9 +533,9 @@ class IncrementOpCost(CompOpCost): OP_TYPE = "increment" def __init__(self, op=None, op_desc=None, cluster=None): - super(IncrementOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(IncrementOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -546,9 +548,9 @@ class IsEmptyOpCost(CompOpCost): OP_TYPE = "is_empty" def __init__(self, op=None, op_desc=None, cluster=None): - super(IsEmptyOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(IsEmptyOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -561,9 +563,9 @@ class LayerNormOpCost(CompOpCost): OP_TYPE = "layer_norm" def __init__(self, op=None, op_desc=None, cluster=None): - super(LayerNormOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(LayerNormOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -580,9 +582,9 @@ class LayerNormGradOpCost(CompOpCost): OP_TYPE = "layer_norm_grad" def __init__(self, op=None, op_desc=None, cluster=None): - super(LayerNormGradOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(LayerNormGradOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -599,9 +601,9 @@ class LessThanOpCost(CompOpCost): OP_TYPE = "less_than" def __init__(self, op=None, op_desc=None, cluster=None): - super(LessThanOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(LessThanOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -618,9 +620,9 @@ class LogicalNotOpCost(CompOpCost): OP_TYPE = "logical_not" def __init__(self, op=None, op_desc=None, cluster=None): - super(LogicalNotOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(LogicalNotOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -637,9 +639,9 @@ class LogicalAndOpCost(CompOpCost): OP_TYPE = "logical_and" def __init__(self, op=None, op_desc=None, cluster=None): - super(LogicalAndOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(LogicalAndOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -656,9 +658,9 @@ class LodResetOpCost(CompOpCost): OP_TYPE = "lod_reset" def __init__(self, op=None, op_desc=None, cluster=None): - super(LodResetOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(LodResetOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -692,9 +694,9 @@ class LookupTableV2OpCost(CompOpCost): OP_TYPE = "lookup_table_v2" def __init__(self, op=None, op_desc=None, cluster=None): - super(LookupTableV2OpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(LookupTableV2OpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -711,9 +713,9 @@ class LookupTableV2GradOpCost(CompOpCost): OP_TYPE = "lookup_table_v2_grad" def __init__(self, op=None, op_desc=None, cluster=None): - super(LookupTableV2GradOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(LookupTableV2GradOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -730,9 +732,9 @@ class MatmulOpCost(CompOpCost): OP_TYPE = "matmul" def __init__(self, op=None, op_desc=None, cluster=None): - super(MatmulOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(MatmulOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -749,9 +751,9 @@ class MatmulGradOpCost(CompOpCost): OP_TYPE = "matmul_grad" def __init__(self, op=None, op_desc=None, cluster=None): - super(MatmulGradOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(MatmulGradOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -768,9 +770,9 @@ class MatmulV2OpCost(CompOpCost): OP_TYPE = "matmul_v2" def __init__(self, op=None, op_desc=None, cluster=None): - super(MatmulV2OpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(MatmulV2OpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -787,9 +789,9 @@ class MatmulV2GradOpCost(CompOpCost): OP_TYPE = "matmul_v2_grad" def __init__(self, op=None, op_desc=None, cluster=None): - super(MatmulV2GradOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(MatmulV2GradOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -806,9 +808,9 @@ class MemcpyOpCost(CompOpCost): OP_TYPE = "memcpy" def __init__(self, op=None, op_desc=None, cluster=None): - super(MemcpyOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(MemcpyOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -842,9 +844,9 @@ class MulGradOpCost(CompOpCost): OP_TYPE = "mul_grad" def __init__(self, op=None, op_desc=None, cluster=None): - super(MulGradOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(MulGradOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -861,9 +863,9 @@ class OneHotOpCost(CompOpCost): OP_TYPE = "one_hot" def __init__(self, op=None, op_desc=None, cluster=None): - super(OneHotOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(OneHotOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -880,9 +882,9 @@ class ReadFromArrayOpCost(CompOpCost): OP_TYPE = "read_from_array" def __init__(self, op=None, op_desc=None, cluster=None): - super(ReadFromArrayOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(ReadFromArrayOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -899,9 +901,9 @@ class ReduceSumOpCost(CompOpCost): OP_TYPE = "reduce_sum" def __init__(self, op=None, op_desc=None, cluster=None): - super(ReduceSumOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(ReduceSumOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -918,9 +920,9 @@ class ReduceSumGradOpCost(CompOpCost): OP_TYPE = "reduce_sum_grad" def __init__(self, op=None, op_desc=None, cluster=None): - super(ReduceSumGradOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(ReduceSumGradOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -937,9 +939,9 @@ class Reshape2OpCost(CompOpCost): OP_TYPE = "reshape2" def __init__(self, op=None, op_desc=None, cluster=None): - super(Reshape2OpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(Reshape2OpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -956,9 +958,9 @@ class Reshape2GradOpCost(CompOpCost): OP_TYPE = "reshape2_grad" def __init__(self, op=None, op_desc=None, cluster=None): - super(Reshape2GradOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(Reshape2GradOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -975,9 +977,9 @@ class ReduceMeanOpCost(CompOpCost): OP_TYPE = "reduce_mean" def __init__(self, op=None, op_desc=None, cluster=None): - super(ReduceMeanOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(ReduceMeanOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -994,9 +996,9 @@ class ReduceMeanGradOpCost(CompOpCost): OP_TYPE = "reduce_mean_grad" def __init__(self, op=None, op_desc=None, cluster=None): - super(ReduceMeanGradOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(ReduceMeanGradOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -1013,9 +1015,9 @@ class SamplingIdOpCost(CompOpCost): OP_TYPE = "sampling_id" def __init__(self, op=None, op_desc=None, cluster=None): - super(SamplingIdOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(SamplingIdOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -1032,9 +1034,9 @@ class ScaleOpCost(CompOpCost): OP_TYPE = "scale" def __init__(self, op=None, op_desc=None, cluster=None): - super(ScaleOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(ScaleOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -1051,9 +1053,9 @@ class SliceOpCost(CompOpCost): OP_TYPE = "slice" def __init__(self, op=None, op_desc=None, cluster=None): - super(SliceOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(SliceOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -1070,9 +1072,9 @@ class SoftmaxOpCost(CompOpCost): OP_TYPE = "softmax" def __init__(self, op=None, op_desc=None, cluster=None): - super(SoftmaxOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(SoftmaxOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -1089,9 +1091,9 @@ class SoftmaxGradOpCost(CompOpCost): OP_TYPE = "softmax_grad" def __init__(self, op=None, op_desc=None, cluster=None): - super(SoftmaxGradOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(SoftmaxGradOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -1108,9 +1110,9 @@ class SoftmaxWithCrossEntropyOpCost(CompOpCost): OP_TYPE = "softmax_with_cross_entropy" def __init__(self, op=None, op_desc=None, cluster=None): - super(SoftmaxWithCrossEntropyOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(SoftmaxWithCrossEntropyOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -1127,9 +1129,9 @@ class SoftmaxWithCrossEntropyGradOpCost(CompOpCost): OP_TYPE = "softmax_with_cross_entropy_grad" def __init__(self, op=None, op_desc=None, cluster=None): - super(SoftmaxWithCrossEntropyGradOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(SoftmaxWithCrossEntropyGradOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -1146,9 +1148,9 @@ class SplitOpCost(CompOpCost): OP_TYPE = "split" def __init__(self, op=None, op_desc=None, cluster=None): - super(SplitOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(SplitOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -1165,9 +1167,9 @@ class Squeeze2OpCost(CompOpCost): OP_TYPE = "squeeze2" def __init__(self, op=None, op_desc=None, cluster=None): - super(Squeeze2OpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(Squeeze2OpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -1184,9 +1186,9 @@ class SquareOpCost(CompOpCost): OP_TYPE = "square" def __init__(self, op=None, op_desc=None, cluster=None): - super(SquareOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(SquareOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -1203,9 +1205,9 @@ class SquareGradOpCost(CompOpCost): OP_TYPE = "square_grad" def __init__(self, op=None, op_desc=None, cluster=None): - super(SquareGradOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(SquareGradOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -1239,9 +1241,9 @@ class TopKOpCost(CompOpCost): OP_TYPE = "top_k" def __init__(self, op=None, op_desc=None, cluster=None): - super(TopKOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(TopKOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -1258,9 +1260,9 @@ class Transpose2OpCost(CompOpCost): OP_TYPE = "transpose2" def __init__(self, op=None, op_desc=None, cluster=None): - super(Transpose2OpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(Transpose2OpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -1277,9 +1279,9 @@ class Transpose2GradOpCost(CompOpCost): OP_TYPE = "transpose2_grad" def __init__(self, op=None, op_desc=None, cluster=None): - super(Transpose2GradOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(Transpose2GradOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -1296,9 +1298,9 @@ class Unsqueeze2OpCost(CompOpCost): OP_TYPE = "unsqueeze2" def __init__(self, op=None, op_desc=None, cluster=None): - super(Unsqueeze2OpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(Unsqueeze2OpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): @@ -1315,9 +1317,9 @@ class WriteToArrayOpCost(CompOpCost): OP_TYPE = "write_to_array" def __init__(self, op=None, op_desc=None, cluster=None): - super(WriteToArrayOpCost, self).__init__(op=op, - op_desc=op_desc, - cluster=cluster) + super(WriteToArrayOpCost, self).__init__( + op=op, op_desc=op_desc, cluster=cluster + ) # For a concrete COMP OP, the calc_time and calc_flops function need to be overrided def calc_flops(self): diff --git a/python/paddle/distributed/auto_parallel/cost/estimate_cost.py b/python/paddle/distributed/auto_parallel/cost/estimate_cost.py index fba9d7919c406acd655f0840cb5eac75fb984617..cac8bf9f277e445c9f0604e5d456d518627f5fb3 100644 --- a/python/paddle/distributed/auto_parallel/cost/estimate_cost.py +++ b/python/paddle/distributed/auto_parallel/cost/estimate_cost.py @@ -26,12 +26,9 @@ from ..dist_tensor import DistributedTensor class CostEstimator: _sepical_op_type = ["fused_attention", "fused_feedforward"] - def __init__(self, - program, - cluster, - mode="modeling", - rank=None, - loop_count=10): + def __init__( + self, program, cluster, mode="modeling", rank=None, loop_count=10 + ): self._program = program self._cluster = cluster self._check_mode(mode) @@ -40,7 +37,8 @@ class CostEstimator: self._loop_count = loop_count self._global_cost = Cost() self._local_cost_mapping = {} - self._detailed_cost = OrderedDict( + self._detailed_cost = ( + OrderedDict() ) # {`op_id`: {"reshard": [], "dist_op": [], "local_cost": local_cost}}} self._bubble_time_mapping = {} self._ordered_ops = [] @@ -105,7 +103,8 @@ class CostEstimator: def _check_mode(self, mode): if mode not in ["modeling", "profiling"]: raise ValueError( - "Just support modeling and profiling, but got {}".format(mode)) + "Just support modeling and profiling, but got {}".format(mode) + ) def _is_special_var_name(self, var_name): special_var_name = ["lod_tensor_blocking_queue_0"] @@ -115,6 +114,7 @@ class CostEstimator: def _estimate_core(self, dist_context, resharder, block): from ..reshard import get_var_with_recursion + ops = block.ops loop_count = None if block.desc.id != self.program.global_block().desc.id: @@ -131,8 +131,9 @@ class CostEstimator: if int(op.attr('op_role')) == int(OpRole.Optimize): continue if op.type in [ - "create_py_reader", "create_double_buffer_reader", - "read" + "create_py_reader", + "create_double_buffer_reader", + "read", ]: continue @@ -171,14 +172,16 @@ class CostEstimator: max_time = rank_cost.time for rank in group_ranks: - self.local_cost( - rank).time = max_time + cost.time + self.local_cost(rank).time = ( + max_time + cost.time + ) if rank not in self._bubble_time_mapping: self._bubble_time_mapping[rank] = 0 self._bubble_time_mapping[rank] += ( - max_time - cost_time[rank]) + max_time - cost_time[rank] + ) for rank in local_comp_cost: for comp_cost in local_comp_cost[rank]: @@ -190,15 +193,19 @@ class CostEstimator: processes = op_dist_attr.process_mesh.processes container = get_distributed_operator_impl_container( - op_dist_attr.impl_type) + op_dist_attr.impl_type + ) dist_impl = container.impls[op_dist_attr.impl_idx] - dist_op_cost = dist_impl.calc_cost(op.attr('op_role'), dist_op, - dist_context, self.cluster) + dist_op_cost = dist_impl.calc_cost( + op.attr('op_role'), dist_op, dist_context, self.cluster + ) detail["dist_op_cost"] = dist_op_cost if dist_op_cost is None: - assert dist_op.serial_op.type in CostEstimator._sepical_op_type + assert ( + dist_op.serial_op.type in CostEstimator._sepical_op_type + ) continue for item in dist_op_cost: if isinstance(item, list): @@ -216,12 +223,14 @@ class CostEstimator: if max_time < rank_cost.time: max_time = rank_cost.time for rank in group_ranks: - self.local_cost( - rank).time = max_time + comm_op_cost.time + self.local_cost(rank).time = ( + max_time + comm_op_cost.time + ) if rank not in self._bubble_time_mapping: self._bubble_time_mapping[rank] = 0 self._bubble_time_mapping[rank] += ( - max_time - cost_time[rank]) + max_time - cost_time[rank] + ) elif isinstance(item, dict): # Op just one for rank in processes: @@ -246,8 +255,11 @@ class CostEstimator: dtype_factor = 8 elif dtype == paddle.float32 or dtype == paddle.int32: dtype_factor = 4 - elif dtype == paddle.float16 or dtype == paddle.bfloat16 \ - or dtype == paddle.int16: + elif ( + dtype == paddle.float16 + or dtype == paddle.bfloat16 + or dtype == paddle.int16 + ): dtype_factor = 2 elif dtype == paddle.int8 or dtype == paddle.uint8: dtype_factor = 1 @@ -269,8 +281,9 @@ class CostEstimator: memories = {} self.max_memories = {} - var_info = { - } # var_name: [[process_mesh, dims_mapping], [id]], [[process_mesh, dims_mapping], [id]]} + var_info = ( + {} + ) # var_name: [[process_mesh, dims_mapping], [id]], [[process_mesh, dims_mapping], [id]]} for block in self.program.blocks: for op in block.ops: @@ -279,18 +292,22 @@ class CostEstimator: for op_id, op in self._ordered_ops: if op.type in [ - "create_py_reader", "create_double_buffer_reader", "read" + "create_py_reader", + "create_double_buffer_reader", + "read", ]: continue dist_op = dist_context.get_dist_op_for_program(op) process_mesh = dist_op.dist_attr.process_mesh for var_name in op.input_arg_names: input_dims_mapping = dist_op.dist_attr.get_input_dims_mapping( - var_name) + var_name + ) if var_name not in var_info: var_info[var_name] = {} - key = _convert_pm_and_dm_to_str(process_mesh, - input_dims_mapping) + key = _convert_pm_and_dm_to_str( + process_mesh, input_dims_mapping + ) if key not in var_info[var_name]: var_info[var_name][key] = {} # It is even partition now @@ -299,21 +316,27 @@ class CostEstimator: global_sizes = var.shape dtype = var.dtype sizes = DistributedTensor.get_local_sizes( - global_sizes, input_dims_mapping, process_mesh.topology, - process_mesh.processes) + global_sizes, + input_dims_mapping, + process_mesh.topology, + process_mesh.processes, + ) var_info[var_name][key]["memory"] = self._calculate_bytes( - sizes, dtype) + sizes, dtype + ) if "position" not in var_info[var_name][key]: var_info[var_name][key]["position"] = [] var_info[var_name][key]["position"].append(op_id) for var_name in op.output_arg_names: output_dims_mapping = dist_op.dist_attr.get_output_dims_mapping( - var_name) + var_name + ) if var_name not in var_info: var_info[var_name] = {} - key = _convert_pm_and_dm_to_str(process_mesh, - output_dims_mapping) + key = _convert_pm_and_dm_to_str( + process_mesh, output_dims_mapping + ) if key not in var_info[var_name]: var_info[var_name][key] = {} if "memory" not in var_info[var_name][key]: @@ -321,10 +344,14 @@ class CostEstimator: global_sizes = var.shape dtype = var.dtype sizes = DistributedTensor.get_local_sizes( - global_sizes, output_dims_mapping, - process_mesh.topology, process_mesh.processes) + global_sizes, + output_dims_mapping, + process_mesh.topology, + process_mesh.processes, + ) var_info[var_name][key]["memory"] = self._calculate_bytes( - sizes, dtype) + sizes, dtype + ) if "position" not in var_info[var_name][key]: var_info[var_name][key]["position"] = [] var_info[var_name][key]["position"].append(op_id) @@ -332,7 +359,9 @@ class CostEstimator: has_used_vars = set() for op_id, op in self._ordered_ops: if op.type in [ - "create_py_reader", "create_double_buffer_reader", "read" + "create_py_reader", + "create_double_buffer_reader", + "read", ]: continue can_free_memories = {} @@ -341,9 +370,11 @@ class CostEstimator: process_mesh = dist_op.dist_attr.process_mesh for var_name in op.input_arg_names: input_dims_mapping = dist_op.dist_attr.get_input_dims_mapping( - var_name) - key = _convert_pm_and_dm_to_str(process_mesh, - input_dims_mapping) + var_name + ) + key = _convert_pm_and_dm_to_str( + process_mesh, input_dims_mapping + ) has_used_var = var_name + key var = dist_op.get_serial_input(var_name) # Not used @@ -363,13 +394,16 @@ class CostEstimator: if process not in can_free_memories: can_free_memories[process] = 0 can_free_memories[process] += var_info[ - var_name][key]["memory"] + var_name + ][key]["memory"] for var_name in op.output_arg_names: output_dims_mapping = dist_op.dist_attr.get_output_dims_mapping( - var_name) - key = _convert_pm_and_dm_to_str(process_mesh, - output_dims_mapping) + var_name + ) + key = _convert_pm_and_dm_to_str( + process_mesh, output_dims_mapping + ) has_used_var = var_name + key var = dist_op.get_serial_output(var_name) # Not used @@ -389,7 +423,8 @@ class CostEstimator: if process not in can_free_memories: can_free_memories[process] = 0 can_free_memories[process] += var_info[ - var_name][key]["memory"] + var_name + ][key]["memory"] # Calc peak memory for process in memories: @@ -413,8 +448,12 @@ class CostEstimator: def estimate(self, dist_context, resharder=None): self.prepare() from ..reshard import Resharder - resharder = Resharder(self.program, None, self.rank, dist_context, - []) if resharder is None else resharder + + resharder = ( + Resharder(self.program, None, self.rank, dist_context, []) + if resharder is None + else resharder + ) block = self.program.global_block() self._estimate_core(dist_context, resharder, block) @@ -446,7 +485,7 @@ class CostEstimator: memories = [ int(item // 1e6) for item in list(self.max_memories.values()) ] - for memory in (memories + header): + for memory in memories + header: if len(str(memory)) > max_len: max_len = len(str(memory)) max_len += 4 # for pretty print of center @@ -476,7 +515,7 @@ class CostEstimator: max_len = 0 header = ["Execution Time(ms)", "Max Memory(MiB)"] vals = [round(self.global_cost.time, 3), int(self.max_memory // 1e6)] - for memory in (vals + header): + for memory in vals + header: if len(str(memory)) > max_len: max_len = len(str(memory)) max_len += 4 # for pretty print of center @@ -506,16 +545,27 @@ class CostEstimator: def get_cost_from_engine(engine, mode): from ..utils import to_list + # Construct cost estimator by original main program - serial_main_prog = engine._serial_main_progs[mode].clone( - ) if mode in engine._serial_main_progs else engine._orig_main_prog.clone() + serial_main_prog = ( + engine._serial_main_progs[mode].clone() + if mode in engine._serial_main_progs + else engine._orig_main_prog.clone() + ) - serial_startup_prog = engine._serial_startup_progs[mode].clone( - ) if mode in engine._serial_startup_progs else engine._orig_startup_prog.clone( + serial_startup_prog = ( + engine._serial_startup_progs[mode].clone() + if mode in engine._serial_startup_progs + else engine._orig_startup_prog.clone() + ) + losses = ( + to_list(engine._loss) + if ( + not isinstance(engine._loss, paddle.nn.Layer) + and not callable(engine._loss) + ) + else engine._losses ) - losses = to_list( - engine._loss) if (not isinstance(engine._loss, paddle.nn.Layer) - and not callable(engine._loss)) else engine._losses if mode in engine._dist_contexts: dist_context = engine._dist_contexts[mode] @@ -523,33 +573,45 @@ def get_cost_from_engine(engine, mode): else: from ..completion import Completer from ..dist_context import DistributedContext - dist_context = DistributedContext(serial_main_prog, serial_startup_prog, - engine._optimizer, losses, {}, - {"loss": losses}, engine._cluster, - engine._strategy) + + dist_context = DistributedContext( + serial_main_prog, + serial_startup_prog, + engine._optimizer, + losses, + {}, + {"loss": losses}, + engine._cluster, + engine._strategy, + ) completer = Completer(dist_context) completer.complete_forward_annotation() dist_context.block_state.parse_forward_blocks( - dist_context.serial_main_program) + dist_context.serial_main_program + ) if mode == "eval" or mode == "predict": cost_estimator = CostEstimator(serial_main_prog, engine._cluster) elif mode == "train": from ..parallelizer_v2 import Parallelizer + # Get serial main program with backward serial_optimizer = engine._optimizer parallelizer = Parallelizer(mode, completer, dist_context) # Generate backward loss_name = dist_context.serial_loss.name serial_loss = serial_main_prog.global_block()._var_recursive(loss_name) - params_grads = parallelizer._generate_backward(serial_main_prog, - serial_startup_prog, - serial_loss) + params_grads = parallelizer._generate_backward( + serial_main_prog, serial_startup_prog, serial_loss + ) # Generate optimizer optimizer_ops = parallelizer._generate_optimizer( - serial_main_prog, serial_startup_prog, serial_optimizer, - params_grads) + serial_main_prog, + serial_startup_prog, + serial_optimizer, + params_grads, + ) cost_estimator = CostEstimator(serial_main_prog, engine._cluster) # Estimate global_cost and max memory diff --git a/python/paddle/distributed/auto_parallel/cost/tensor_cost.py b/python/paddle/distributed/auto_parallel/cost/tensor_cost.py index 9741020da65127b2d9198674d9271774cbb5ccba..0303e29749f9ab9786153cb84e0df5c92891e074 100644 --- a/python/paddle/distributed/auto_parallel/cost/tensor_cost.py +++ b/python/paddle/distributed/auto_parallel/cost/tensor_cost.py @@ -22,7 +22,6 @@ from .base_cost import Cost class TensorCost: - def __init__(self, tensor=None, dist_tensor=None, shape=None, dtype=None): self._check_args(tensor, dist_tensor, shape, dtype) self._tensor = tensor @@ -49,31 +48,35 @@ class TensorCost: def _check_args(self, tensor, dist_tensor, shape, dtype): if tensor is not None: - assert (shape is None and dist_tensor is None and dtype is None) + assert shape is None and dist_tensor is None and dtype is None if not isinstance(tensor, Variable): raise TypeError( "Please check tensor type is Variable, but got {}".format( - type(tensor))) + type(tensor) + ) + ) elif dist_tensor is not None: - assert (tensor is None and shape is None) + assert tensor is None and shape is None if not isinstance(dist_tensor, DistributedTensor): raise TypeError( - "Please check dist_tensor type is DistributedTensor, but got {}" - .format(type(dist_tensor))) + "Please check dist_tensor type is DistributedTensor, but got {}".format( + type(dist_tensor) + ) + ) elif shape is not None: - assert (tensor is None and dist_tensor is None - and dtype is not None) + assert tensor is None and dist_tensor is None and dtype is not None if not isinstance(shape, (list, set)): raise TypeError( "Please check shape type is list or set, but got {}".format( - type(shape))) + type(shape) + ) + ) elif dtype is not None: - assert (tensor is None and dist_tensor is None - and shape is not None) + assert tensor is None and dist_tensor is None and shape is not None @property def cost(self): diff --git a/python/paddle/distributed/auto_parallel/cost_model.py b/python/paddle/distributed/auto_parallel/cost_model.py index cdcc19e27d792cdd9c6533c1b18c75883dcf509f..dfff44d0bde756fe95bd208f228d43a5a52beea1 100644 --- a/python/paddle/distributed/auto_parallel/cost_model.py +++ b/python/paddle/distributed/auto_parallel/cost_model.py @@ -36,7 +36,6 @@ class CostNodeType(Enum): class Cost(object): - def __init__(self): self.runtime = None self.static_mem = None @@ -51,7 +50,6 @@ class CostModelMode(Enum): class CostNode(object): - def __init__(self, node, node_type, id=None): self.id = id self.node = node @@ -72,7 +70,6 @@ class CostNode(object): class MergedOpsCostNode(CostNode): - def __init__(self, node_type, id=None, base_node_list=None, is_bwd=False): super(MergedOpsCostNode, self).__init__(None, node_type, id) self.node_list = base_node_list @@ -80,13 +77,9 @@ class MergedOpsCostNode(CostNode): class CommOpCostNode(CostNode): - - def __init__(self, - node, - node_type, - id=None, - comm_node_list=None, - is_bwd=False): + def __init__( + self, node, node_type, id=None, comm_node_list=None, is_bwd=False + ): super(CommOpCostNode, self).__init__(node, node_type, id) self.node_list = comm_node_list self.ranks = [] @@ -108,8 +101,9 @@ class CommOpCostNode(CostNode): comm_volumn = np.prod(self.input_shape) * 4 if 'allreduce' in self.comm_type: - self._cost = comm_volumn / (BANDWIDTH * num_ranks / - (2 * (num_ranks - 1))) + self._cost = comm_volumn / ( + BANDWIDTH * num_ranks / (2 * (num_ranks - 1)) + ) elif 'gather' in self.comm_type: self._cost = comm_volumn / (BANDWIDTH * num_ranks / (num_ranks - 1)) elif 'broadcast' in self.comm_type: @@ -121,14 +115,15 @@ class CommOpCostNode(CostNode): class TensorCostNode(CostNode): - - def __init__(self, - node, - node_type, - id=None, - base_node_list=None, - batch_size=None, - shared_node_id=None): + def __init__( + self, + node, + node_type, + id=None, + base_node_list=None, + batch_size=None, + shared_node_id=None, + ): super(TensorCostNode, self).__init__(node, node_type, id) if node.name == "create_py_reader_0" or node.name == "double_buffer_0": self.shape = [2, 2] @@ -163,7 +158,6 @@ class TensorCostNode(CostNode): class CompOpCostNode(CostNode): - def __init__(self, node, node_type, id=None, is_bwd=False, is_optim=False): super(CompOpCostNode, self).__init__(node, node_type, id) self.is_bwd = is_bwd @@ -179,7 +173,6 @@ class CompOpCostNode(CostNode): class PipeEvent(object): - def __init__(self, stage_id, event_name, duration, start_time=-1): self.stage_id = stage_id self.name = event_name @@ -189,15 +182,16 @@ class PipeEvent(object): class CostModel(object): - - def __init__(self, - mode=CostModelMode.BENCHMARKING, - cluster=None, - batch_size=1, - microbatch_num=1, - opcall_overhead=0, - standalone_cost_data=None, - pipeline_config=None): + def __init__( + self, + mode=CostModelMode.BENCHMARKING, + cluster=None, + batch_size=1, + microbatch_num=1, + opcall_overhead=0, + standalone_cost_data=None, + pipeline_config=None, + ): self.mode = mode # parameters @@ -229,17 +223,20 @@ class CostModel(object): self.optim_time = [] def _parse_sub_program(self, program, nodes, graph, cost_data, sub_idx): - assert len( - program.blocks) == 1, "Program more than 1 block not supported." + assert ( + len(program.blocks) == 1 + ), "Program more than 1 block not supported." block = program.blocks[0] var_id = "lod_tensor_blocking_queue_0" new_var = program.global_block().create_var( name=var_id, dtype=paddle.float32, - type=core.VarDesc.VarType.LOD_TENSOR) - nodes[var_id] = TensorCostNode(new_var, CostNodeType.VARIABLE, - "lod_tensor_blocking_queue_0") + type=core.VarDesc.VarType.LOD_TENSOR, + ) + nodes[var_id] = TensorCostNode( + new_var, CostNodeType.VARIABLE, "lod_tensor_blocking_queue_0" + ) for var in block.vars.values(): var_id = var.name # if var.name == "create_py_reader_0" or var.name == "double_buffer_0": @@ -249,13 +246,17 @@ class CostModel(object): for op in block.ops: op_id = op.type + "_" + str(op.idx) - if op.type.startswith('c_') or op.type.startswith( - 'send') or op.type.startswith('recv'): + if ( + op.type.startswith('c_') + or op.type.startswith('send') + or op.type.startswith('recv') + ): is_bwd = False - if op.type.startswith( - 'c_' - ) and op.type != "c_sync_calc_stream" and not op.type.startswith( - 'c_embedding'): + if ( + op.type.startswith('c_') + and op.type != "c_sync_calc_stream" + and not op.type.startswith('c_embedding') + ): ring_id = op.attr('ring_id') if ring_id not in self.ring2rank: self.ring2rank[ring_id] = set() @@ -265,14 +266,17 @@ class CostModel(object): is_bwd = '@GRAD' in op.output('Out')[0] elif op.type.startswith('send'): is_bwd = '@GRAD' in op.input('X')[0] - op_node = CommOpCostNode(op, CostNodeType.COMMUNICATION, op_id, - is_bwd) + op_node = CommOpCostNode( + op, CostNodeType.COMMUNICATION, op_id, is_bwd + ) else: - is_bwd = (int(op.attr('op_role')) == int( - OpRole.Backward)) or "@GRAD" in op.input_arg_names + is_bwd = ( + int(op.attr('op_role')) == int(OpRole.Backward) + ) or "@GRAD" in op.input_arg_names is_optim = 'LearningRate' in op.input_names - op_node = CompOpCostNode(op, CostNodeType.COMPUTATION, op_id, - is_bwd, is_optim) + op_node = CompOpCostNode( + op, CostNodeType.COMPUTATION, op_id, is_bwd, is_optim + ) op_node.init_comp_cost(cost_data) nodes[op_id] = op_node @@ -310,17 +314,20 @@ class CostModel(object): for pred_id in graph[node_id][PRED]: pred = nodes[pred_id] if pred.type == CostNodeType.COMPUTATION and ( - pred_id in graph[node_id][SUCC]): + pred_id in graph[node_id][SUCC] + ): graph[pred_id][SUCC].remove(node_id) graph[node_id][PRED].remove(pred_id) write_op_cnt += 1 new_var_id = node_id + '_write_{}'.format(write_op_cnt) - new_var = TensorCostNode(node.node, - CostNodeType.VARIABLE, - new_var_id, - shared_node_id=node_id) + new_var = TensorCostNode( + node.node, + CostNodeType.VARIABLE, + new_var_id, + shared_node_id=node_id, + ) graph[new_var_id] = [[], []] graph[pred_id][SUCC].append(new_var_id) @@ -346,23 +353,31 @@ class CostModel(object): self.op_graph.append({}) self.runtime_graph.append({}) self._parse_sub_program( - sub_prog, self.nodes[sub_idx], self.origin_graph[sub_idx], - self.cost_data[0 if self.rank2pp is None else self. - rank2pp[sub_idx]], sub_idx) + sub_prog, + self.nodes[sub_idx], + self.origin_graph[sub_idx], + self.cost_data[ + 0 if self.rank2pp is None else self.rank2pp[sub_idx] + ], + sub_idx, + ) return self.nodes def _find_succ_op(self, node_id, sub_idx=0): succ_ops_id = [] for succ_id in self.origin_graph[sub_idx][node_id][SUCC]: succ = self.nodes[sub_idx][succ_id] - if succ.type == CostNodeType.COMMUNICATION or \ - succ.type == CostNodeType.COMPUTATION: + if ( + succ.type == CostNodeType.COMMUNICATION + or succ.type == CostNodeType.COMPUTATION + ): succ_ops_id.append(succ_id) elif succ.type == CostNodeType.VARIABLE: succ_ops_id = succ_ops_id + self._find_succ_op(succ_id, sub_idx) else: raise NotImplementedError( - 'This type of node not supported yet:{}'.format(succ.type)) + 'This type of node not supported yet:{}'.format(succ.type) + ) return succ_ops_id def build_op_graph(self): @@ -392,9 +407,11 @@ class CostModel(object): for sub_idx in range(self.total_rank): for node_id, edges in self.op_graph[sub_idx].items(): node = self.nodes[sub_idx][node_id] - if node_id.startswith('c_') and not node.id.startswith( - "c_sync_calc_stream") and not node.id.startswith( - 'c_embedding'): + if ( + node_id.startswith('c_') + and not node.id.startswith("c_sync_calc_stream") + and not node.id.startswith('c_embedding') + ): ring_id = node.node.attr('ring_id') node.set_ranks(list(self.ring2rank[ring_id])) node.init_comm_cost(self.cluster) @@ -420,13 +437,17 @@ class CostModel(object): else: raise NotImplementedError( 'This type of merging is not supported:{}'.format( - merge_type)) + merge_type + ) + ) merged_node_id = 'merged_' + str(len(nodes)) is_bwd = to_merge_node_list[0].is_bwd - merged_node = MergedOpsCostNode(CostNodeType.MERGED, - id=merged_node_id, - base_node_list=nodes_list, - is_bwd=is_bwd) + merged_node = MergedOpsCostNode( + CostNodeType.MERGED, + id=merged_node_id, + base_node_list=nodes_list, + is_bwd=is_bwd, + ) merged_node.cost = node_cost return merged_node_id, merged_node @@ -440,12 +461,12 @@ class CostModel(object): ''' cnt = 0 for sub_idx in range(self.total_rank): - cnt += self._merge_linear(self.nodes[sub_idx], - self.runtime_graph[sub_idx], - is_bwd=False) - cnt += self._merge_linear(self.nodes[sub_idx], - self.runtime_graph[sub_idx], - is_bwd=True) + cnt += self._merge_linear( + self.nodes[sub_idx], self.runtime_graph[sub_idx], is_bwd=False + ) + cnt += self._merge_linear( + self.nodes[sub_idx], self.runtime_graph[sub_idx], is_bwd=True + ) return cnt def merge_branch(self): @@ -461,12 +482,12 @@ class CostModel(object): ''' cnt = 0 for sub_idx in range(self.total_rank): - cnt += self._merge_branch(self.nodes[sub_idx], - self.runtime_graph[sub_idx], - is_bwd=False) - cnt += self._merge_branch(self.nodes[sub_idx], - self.runtime_graph[sub_idx], - is_bwd=True) + cnt += self._merge_branch( + self.nodes[sub_idx], self.runtime_graph[sub_idx], is_bwd=False + ) + cnt += self._merge_branch( + self.nodes[sub_idx], self.runtime_graph[sub_idx], is_bwd=True + ) return cnt def _merge_linear(self, nodes, runtime_graph, is_bwd=False): @@ -484,7 +505,8 @@ class CostModel(object): pred_id = edges[PRED][0] pred = nodes[pred_id] merged_node_id, merged_node = self._merge_node( - [node, pred], merge_type='linear', nodes=nodes) + [node, pred], merge_type='linear', nodes=nodes + ) nodes[merged_node_id] = merged_node runtime_graph[merged_node_id] = [[], []] @@ -492,7 +514,8 @@ class CostModel(object): succ = None try: runtime_graph[merged_node_id][SUCC] = copy.deepcopy( - edges[SUCC]) + edges[SUCC] + ) if len(runtime_graph[pred_id][SUCC]) > 1: # predecessor has more than 1 successor @@ -501,7 +524,8 @@ class CostModel(object): succ.remove(node_id) runtime_graph[merged_node_id][SUCC] += succ runtime_graph[merged_node_id][PRED] = runtime_graph[ - pred_id][PRED] + pred_id + ][PRED] except: pass try: @@ -567,16 +591,20 @@ class CostModel(object): to_merge = True try: - if len(edges[SUCC]) < 1 or len( - runtime_graph[edges[SUCC][0]][SUCC]) < 1: + if ( + len(edges[SUCC]) < 1 + or len(runtime_graph[edges[SUCC][0]][SUCC]) < 1 + ): continue except: continue end_node_id = runtime_graph[edges[SUCC][0]][SUCC][0] for i in succ_nodes_id: try: - if len(runtime_graph[i][SUCC]) != 1 or \ - runtime_graph[i][SUCC][0] != end_node_id: + if ( + len(runtime_graph[i][SUCC]) != 1 + or runtime_graph[i][SUCC][0] != end_node_id + ): to_merge = False # if branches has different end node, we don't merge them break except: @@ -584,7 +612,8 @@ class CostModel(object): if to_merge and len(succ_nodes_id) > 1: to_merge_node_list = [nodes[i] for i in succ_nodes_id] merged_node_id, merged_node = self._merge_node( - to_merge_node_list, merge_type='branch', nodes=nodes) + to_merge_node_list, merge_type='branch', nodes=nodes + ) nodes[merged_node_id] = merged_node runtime_graph[merged_node_id] = [[], []] @@ -605,7 +634,6 @@ class CostModel(object): return reduct_cnt def get_runtime_cost(self): - def get_node_cost(node): node_cost = node.cost + self.opcall_overhead if isinstance(node, MergedOpsCostNode): @@ -635,7 +663,8 @@ class CostModel(object): top_list = [] for sub_idx in range(self.total_rank): static_mem, cur_mem, top_mem = self._simulate_mem( - self.nodes[sub_idx], self.origin_graph[sub_idx]) + self.nodes[sub_idx], self.origin_graph[sub_idx] + ) static_list.append(static_mem) top_list.append(top_mem) return static_list, top_list @@ -670,8 +699,9 @@ class CostModel(object): static_mem += size cur_mem += size edges = sim_graph[node_id] - if not (node.type == CostNodeType.VARIABLE - and node.node.persistable): + if not ( + node.type == CostNodeType.VARIABLE and node.node.persistable + ): for succ_id in edges[SUCC]: sim_graph[succ_id][PRED].remove(node_id) if len(sim_graph[succ_id][PRED]) == 0: @@ -680,8 +710,10 @@ class CostModel(object): pred = nodes if pred.type == CostNodeType.VARIABLE: sim_graph[pred_id][SUCC].remove(node_id) - if len(sim_graph[pred_id] - [SUCC]) == 0 and not pred.node.persistable: + if ( + len(sim_graph[pred_id][SUCC]) == 0 + and not pred.node.persistable + ): cur_mem -= pred.get_size() return static_mem, cur_mem, top_mem @@ -713,16 +745,22 @@ class CostModel(object): event_list.append(e) if stid != stage_num - 1: q.put( - PipeEvent(stid + 1, - 'fwd', - self.fwd_time[stid + 1], - start_time=e.e_time)) + PipeEvent( + stid + 1, + 'fwd', + self.fwd_time[stid + 1], + start_time=e.e_time, + ) + ) else: q.put( - PipeEvent(stid, - 'bwd', - self.bwd_time[stid], - start_time=e.e_time)) + PipeEvent( + stid, + 'bwd', + self.bwd_time[stid], + start_time=e.e_time, + ) + ) fwd_cnt[stid] -= 1 global_time[stid] = e.e_time else: @@ -733,18 +771,24 @@ class CostModel(object): event_list.append(e) if stid != 0: q.put( - PipeEvent(stid - 1, - 'bwd', - self.bwd_time[stid - 1], - start_time=e.e_time)) + PipeEvent( + stid - 1, + 'bwd', + self.bwd_time[stid - 1], + start_time=e.e_time, + ) + ) fwd_cnt[stid] += 1 bwd_cnt[stid] -= 1 if bwd_cnt[stid] == 0: q.put( - PipeEvent(stid, - 'optim', - self.optim_time[stid], - start_time=e.e_time)) + PipeEvent( + stid, + 'optim', + self.optim_time[stid], + start_time=e.e_time, + ) + ) global_time[stid] = e.e_time elif e.name == 'optim': e.s_time = max(global_time[stid], e.s_time) @@ -754,7 +798,9 @@ class CostModel(object): else: raise NotImplementedError( 'This type of pipe event is not supported yet.{}'.format( - e.name)) + e.name + ) + ) for t in global_time: total_time = max(total_time, t) @@ -784,8 +830,13 @@ class CostModel(object): self.build_runtime_graph() -def estimate_cost(distributed_program, cluster, pipeline_config, - standalone_cost_data, batch_size): +def estimate_cost( + distributed_program, + cluster, + pipeline_config, + standalone_cost_data, + batch_size, +): """ Estimated cost from distributed program, cluster model and distributed settings. @@ -798,10 +849,12 @@ def estimate_cost(distributed_program, cluster, pipeline_config, """ # the following line is left for now, cluster model will be involved in the future assert cluster is None, "For now, cluster remains None" - cm_ctx = CostModel(cluster=cluster, - batch_size=batch_size, - standalone_cost_data=standalone_cost_data, - pipeline_config=pipeline_config) + cm_ctx = CostModel( + cluster=cluster, + batch_size=batch_size, + standalone_cost_data=standalone_cost_data, + pipeline_config=pipeline_config, + ) cm_ctx.init(distributed_program) cost = cm_ctx.get_cost() return cost diff --git a/python/paddle/distributed/auto_parallel/dist_attribute.py b/python/paddle/distributed/auto_parallel/dist_attribute.py index 04af4ad77e5a154b9925926aa867ee183ef466ee..19aaa07f24df66e965e8e720414e100e581c818d 100644 --- a/python/paddle/distributed/auto_parallel/dist_attribute.py +++ b/python/paddle/distributed/auto_parallel/dist_attribute.py @@ -17,11 +17,17 @@ from paddle.fluid.framework import Variable from .process_mesh import ProcessMesh _g_tensor_dist_attr_field_keys = [ - "process_mesh", "dims_mapping", "shard_sizes", "device_placement" + "process_mesh", + "dims_mapping", + "shard_sizes", + "device_placement", ] _g_op_dist_attr_field_keys = [ - "process_mesh", "impl_type", "impl_idx", "is_recompute" + "process_mesh", + "impl_type", + "impl_idx", + "is_recompute", ] _g_op_input_suffix = "@input" @@ -50,7 +56,6 @@ def append_op_output_suffix(name): class TensorDistributedAttribute: - def __init__(self): # The process mesh of distributed operator attribute must is the same as # the process meshes of all input and output distributed attributed @@ -67,8 +72,9 @@ class TensorDistributedAttribute: @process_mesh.setter def process_mesh(self, process_mesh): if process_mesh is not None: - assert isinstance(process_mesh, (list, ProcessMesh)), \ - "The type of process_mesh must be list or ProcessMesh." + assert isinstance( + process_mesh, (list, ProcessMesh) + ), "The type of process_mesh must be list or ProcessMesh." if isinstance(process_mesh, list): process_mesh = ProcessMesh(process_mesh) self._process_mesh = copy.deepcopy(process_mesh) @@ -80,12 +86,15 @@ class TensorDistributedAttribute: @dims_mapping.setter def dims_mapping(self, dims_mapping): if dims_mapping is not None: - assert isinstance(dims_mapping, list), \ - "The type of dims_mapping must be list." - assert all(isinstance(x, int) for x in dims_mapping), \ - ("All elements of dims_mapping must be integer") - assert all(x >= -1 for x in dims_mapping), \ - ("All elements of dims_mapping must be greater than or equal to -1.") + assert isinstance( + dims_mapping, list + ), "The type of dims_mapping must be list." + assert all( + isinstance(x, int) for x in dims_mapping + ), "All elements of dims_mapping must be integer" + assert all( + x >= -1 for x in dims_mapping + ), "All elements of dims_mapping must be greater than or equal to -1." self._dims_mapping = copy.deepcopy(dims_mapping) @property @@ -109,37 +118,44 @@ class TensorDistributedAttribute: def init(self, dist_attr): if dist_attr is None: return - assert isinstance(dist_attr, (dict, TensorDistributedAttribute)), \ - "The type of dist_attr must be dict or TensorDistributedAttribute." + assert isinstance( + dist_attr, (dict, TensorDistributedAttribute) + ), "The type of dist_attr must be dict or TensorDistributedAttribute." if isinstance(dist_attr, dict): for key, value in dist_attr.items(): if key in get_tensor_dist_attr_field_keys(): field_property = TensorDistributedAttribute.__dict__.get( - key, None) + key, None + ) if field_property: field_property.fset(self, value) else: assert False, "No setter for {} in args {}.".format( - key, dist_attr) + key, dist_attr + ) elif isinstance(dist_attr, TensorDistributedAttribute): for key in get_tensor_dist_attr_field_keys(): field_property = TensorDistributedAttribute.__dict__.get( - key, None) + key, None + ) if field_property: field_property.fset(self, field_property.fget(dist_attr)) else: assert False, "No setter for {} in args {}.".format( - key, dist_attr) + key, dist_attr + ) self._is_annotated = copy.deepcopy(dist_attr._is_annotated) def reset(self, skip_dist_attr_field_names=None): - if skip_dist_attr_field_names is None or \ - (skip_dist_attr_field_names is not None \ - and "process_mesh" not in skip_dist_attr_field_names): + if skip_dist_attr_field_names is None or ( + skip_dist_attr_field_names is not None + and "process_mesh" not in skip_dist_attr_field_names + ): self._process_mesh = None - if skip_dist_attr_field_names is None or \ - (skip_dist_attr_field_names is not None \ - and "dims_mapping" not in skip_dist_attr_field_names): + if skip_dist_attr_field_names is None or ( + skip_dist_attr_field_names is not None + and "dims_mapping" not in skip_dist_attr_field_names + ): for i, _ in enumerate(self._dims_mapping): self._dims_mapping[i] = -1 self._is_annotated = {} @@ -160,8 +176,9 @@ class TensorDistributedAttribute: def mark_annotated_as(self, dist_attr): if dist_attr is None: return - assert isinstance(dist_attr, (dict, TensorDistributedAttribute)), \ - "The type of dist_attr must be dict or TensorDistributedAttribute." + assert isinstance( + dist_attr, (dict, TensorDistributedAttribute) + ), "The type of dist_attr must be dict or TensorDistributedAttribute." if isinstance(dist_attr, dict): for key in dist_attr.keys(): if key in get_tensor_dist_attr_field_keys(): @@ -189,21 +206,22 @@ class TensorDistributedAttribute: annotated_str = "annotated" else: annotated_str = "non-annotated" - str += "\n\t\tprocess_mesh ({}): {},".format(annotated_str, - self.process_mesh) + str += "\n\t\tprocess_mesh ({}): {},".format( + annotated_str, self.process_mesh + ) if self.is_annotated("dims_mapping"): annotated_str = "annotated" else: annotated_str = "non-annotated" - str += "\n\t\tdims_mapping ({}): {}".format(annotated_str, - self.dims_mapping) + str += "\n\t\tdims_mapping ({}): {}".format( + annotated_str, self.dims_mapping + ) str += "\n\t}" return str class OperatorDistributedAttribute: - def __init__(self): self._process_mesh = None self._op_type = None @@ -221,8 +239,9 @@ class OperatorDistributedAttribute: @process_mesh.setter def process_mesh(self, process_mesh): if process_mesh is not None: - assert isinstance(process_mesh, (list, ProcessMesh)), \ - "The type of process_mesh must be list or ProcessMesh." + assert isinstance( + process_mesh, (list, ProcessMesh) + ), "The type of process_mesh must be list or ProcessMesh." if isinstance(process_mesh, list): process_mesh = ProcessMesh(process_mesh) self._process_mesh = copy.deepcopy(process_mesh) @@ -337,8 +356,9 @@ class OperatorDistributedAttribute: def init(self, dist_attr): if dist_attr is None: return - assert isinstance(dist_attr, (dict, OperatorDistributedAttribute)), \ - "The type of dist_attr must be dict or OperatorDistributedAttribute." + assert isinstance( + dist_attr, (dict, OperatorDistributedAttribute) + ), "The type of dist_attr must be dict or OperatorDistributedAttribute." if isinstance(dist_attr, dict): for key, value in dist_attr.items(): if isinstance(key, Variable): @@ -350,31 +370,41 @@ class OperatorDistributedAttribute: self.set_output_dist_attr(key.name, tensor_dist_attr) else: if key in get_op_dist_attr_field_keys(): - field_property = OperatorDistributedAttribute.__dict__.get( - key, None) + field_property = ( + OperatorDistributedAttribute.__dict__.get(key, None) + ) if field_property: field_property.fset(self, value) else: assert False, "No setter for {} in args {}.".format( - key, dist_attr) + key, dist_attr + ) elif isinstance(dist_attr, OperatorDistributedAttribute): - for tensor_name, tensor_dist_attr in dist_attr.inputs_dist_attrs.items( - ): + for ( + tensor_name, + tensor_dist_attr, + ) in dist_attr.inputs_dist_attrs.items(): self.set_input_dist_attr( - tensor_name, dist_attr.get_input_dist_attr(tensor_name)) - for tensor_name, tensor_dist_attr in dist_attr.outputs_dist_attrs.items( - ): + tensor_name, dist_attr.get_input_dist_attr(tensor_name) + ) + for ( + tensor_name, + tensor_dist_attr, + ) in dist_attr.outputs_dist_attrs.items(): self.set_output_dist_attr( - tensor_name, dist_attr.get_output_dist_attr(tensor_name)) + tensor_name, dist_attr.get_output_dist_attr(tensor_name) + ) self._is_annotated = copy.deepcopy(dist_attr._is_annotated) for key in get_op_dist_attr_field_keys(): field_property = OperatorDistributedAttribute.__dict__.get( - key, None) + key, None + ) if field_property: field_property.fset(self, field_property.fget(dist_attr)) else: assert False, "No setter for {} in args {}.".format( - key, dist_attr) + key, dist_attr + ) # Make sure proscess_meshes in dist op be same if self.op_type == "while": return None @@ -390,8 +420,9 @@ class OperatorDistributedAttribute: if shared_process_mesh is None: shared_process_mesh = process_mesh else: - assert process_mesh == shared_process_mesh, \ - "ProcessMeshes in DistributedOperator must be the same." + assert ( + process_mesh == shared_process_mesh + ), "ProcessMeshes in DistributedOperator must be the same." self.process_mesh = shared_process_mesh def reset(self, skip_dist_attr_field_names=None): @@ -399,9 +430,10 @@ class OperatorDistributedAttribute: tensor_dist_attr.reset(skip_dist_attr_field_names) for tensor_dist_attr in self.outputs_dist_attrs.values(): tensor_dist_attr.reset(skip_dist_attr_field_names) - if skip_dist_attr_field_names is None or \ - (skip_dist_attr_field_names is not None \ - and "process_mesh" not in skip_dist_attr_field_names): + if skip_dist_attr_field_names is None or ( + skip_dist_attr_field_names is not None + and "process_mesh" not in skip_dist_attr_field_names + ): self._process_mesh = None self.impl_type = "default" self.impl_idx = 0 @@ -428,8 +460,9 @@ class OperatorDistributedAttribute: def mark_annotated_as(self, dist_attr): if dist_attr is None: return - assert isinstance(dist_attr, (dict, OperatorDistributedAttribute)), \ - "The type of dist_attr must be dict or OperatorDistributedAttribute." + assert isinstance( + dist_attr, (dict, OperatorDistributedAttribute) + ), "The type of dist_attr must be dict or OperatorDistributedAttribute." if isinstance(dist_attr, dict): for key, value in dist_attr.items(): if isinstance(key, Variable): @@ -458,15 +491,19 @@ class OperatorDistributedAttribute: self._is_annotated = copy.deepcopy(dist_attr._is_annotated) if self.is_annotated("process_mesh"): process_mesh_annotated = True - for tensor_name, tensor_dist_attr in dist_attr.inputs_dist_attrs.items( - ): + for ( + tensor_name, + tensor_dist_attr, + ) in dist_attr.inputs_dist_attrs.items(): input_dist_attr = self.get_input_dist_attr(tensor_name) if input_dist_attr is not None: input_dist_attr.mark_annotated_as(tensor_dist_attr) if input_dist_attr.is_annotated("process_mesh"): process_mesh_annotated = True - for tensor_name, tensor_dist_attr in dist_attr.outputs_dist_attrs.items( - ): + for ( + tensor_name, + tensor_dist_attr, + ) in dist_attr.outputs_dist_attrs.items(): output_dist_attr = self.get_output_dist_attr(tensor_name) if output_dist_attr is not None: output_dist_attr.mark_annotated_as(tensor_dist_attr) @@ -523,8 +560,9 @@ class OperatorDistributedAttribute: annotated_str = "annotated" else: annotated_str = "non-annotated" - str += "\n\t\tprocess_mesh ({}): {},".format(annotated_str, - self.process_mesh) + str += "\n\t\tprocess_mesh ({}): {},".format( + annotated_str, self.process_mesh + ) for arg_name, tensor_dist_attr in self.inputs_dist_attrs.items(): str += "\n\t\t{}'s (input): {},".format(arg_name, tensor_dist_attr) diff --git a/python/paddle/distributed/auto_parallel/dist_context.py b/python/paddle/distributed/auto_parallel/dist_context.py index 008c4b987074e5a8e5342495b7aa0b38cbeb8c76..12c224ac27fa5c79b88ff842350d5321318fb7a1 100644 --- a/python/paddle/distributed/auto_parallel/dist_context.py +++ b/python/paddle/distributed/auto_parallel/dist_context.py @@ -50,15 +50,17 @@ class DistributedContext: One auto-parallel run should use its own DistributedContext to avoid interfering other run. """ - def __init__(self, - serial_main_prog=None, - serial_startup_prog=None, - serial_optimizer=None, - serial_loss=None, - feed_vars={}, - fetch_vars={}, - cluster=None, - strategy=None): + def __init__( + self, + serial_main_prog=None, + serial_startup_prog=None, + serial_optimizer=None, + serial_loss=None, + feed_vars={}, + fetch_vars={}, + cluster=None, + strategy=None, + ): # Data members related to original programs (unchanged) self._original_serial_main_program = serial_main_prog self._original_serial_startup_program = serial_startup_prog @@ -107,7 +109,7 @@ class DistributedContext: # self._tensor_id_to_tensor_node_ids = {} self._is_initialized = False - #TODO: need a better way to remove the following flag + # TODO: need a better way to remove the following flag self._need_copy_dist_attr_to_graph = False self._backup_pass_context_stack = [] self._backup_block_state_stack = [] @@ -189,7 +191,8 @@ class DistributedContext: @property def has_annotation(self): return len(self._dist_tensors_for_program) or len( - self._dist_ops_for_program) + self._dist_ops_for_program + ) @property def gradient_scale(self): @@ -209,18 +212,23 @@ class DistributedContext: def _backup_serial_info(self, mode): self._backup_serial_main_program_stack.append( - self._serial_main_program.clone()) + self._serial_main_program.clone() + ) self._backup_serial_startup_program_stack.append( - self._serial_startup_program.clone()) - self._backup_pass_context_stack.append(copy.deepcopy( - self._pass_context)) + self._serial_startup_program.clone() + ) + self._backup_pass_context_stack.append( + copy.deepcopy(self._pass_context) + ) self._backup_block_state_stack.append(copy.deepcopy(self._block_state)) def _backup_dist_info(self, mode): self._backup_dist_tensors_for_program_stack.append( - copy.deepcopy(self._dist_tensors_for_program)) + copy.deepcopy(self._dist_tensors_for_program) + ) self._backup_dist_ops_for_program_stack.append( - copy.deepcopy(self._dist_ops_for_program)) + copy.deepcopy(self._dist_ops_for_program) + ) def _backup(self, serial=True, serial_mode=None, dist=True, dist_mode=None): # Use this function carefully @@ -237,7 +245,8 @@ class DistributedContext: block_idx = loss.block.idx var_name = loss.name var = self._serial_main_program.blocks[ - block_idx]._var_recursive(var_name) + block_idx + ]._var_recursive(var_name) self._serial_loss = var elif len(self._original_serial_loss) == 0: self._serial_loss = [] @@ -247,7 +256,8 @@ class DistributedContext: block_idx = self._original_serial_loss.block.idx var_name = self._original_serial_loss.name var = self._serial_main_program.blocks[ - block_idx]._var_recursive(var_name) + block_idx + ]._var_recursive(var_name) self._serial_loss = var def _restore_serial_feed_vars(self): @@ -257,7 +267,8 @@ class DistributedContext: block_idx = var.block.idx var_name = var.name var = self._serial_main_program.blocks[ - block_idx]._var_recursive(var_name) + block_idx + ]._var_recursive(var_name) new_var_list.append(var) self._serial_feed_vars[key] = new_var_list @@ -272,7 +283,8 @@ class DistributedContext: block_idx = var.block.idx var_name = var.name var = self._serial_main_program.blocks[ - block_idx]._var_recursive(var_name) + block_idx + ]._var_recursive(var_name) new_inner_var_list.append(var) new_var_list.append(new_inner_var_list) else: @@ -280,22 +292,27 @@ class DistributedContext: block_idx = var.block.idx var_name = var.name var = self._serial_main_program.blocks[ - block_idx]._var_recursive(var_name) + block_idx + ]._var_recursive(var_name) new_var_list.append(var) self._serial_fetch_vars[key] = new_var_list def _restore_serial_info(self, mode="to_backup"): if mode == "to_backup": - self._serial_main_program = self._backup_serial_main_program_stack.pop( + self._serial_main_program = ( + self._backup_serial_main_program_stack.pop() ) - self._serial_startup_program = self._backup_serial_startup_program_stack.pop( + self._serial_startup_program = ( + self._backup_serial_startup_program_stack.pop() ) elif mode == "to_original": assert self._original_serial_main_program is not None assert self._original_serial_startup_program is not None - self._serial_main_program = self._original_serial_main_program.clone( + self._serial_main_program = ( + self._original_serial_main_program.clone() ) - self._serial_startup_program = self._original_serial_startup_program.clone( + self._serial_startup_program = ( + self._original_serial_startup_program.clone() ) self._restore_serial_loss() @@ -307,21 +324,27 @@ class DistributedContext: def _restore_dist_info(self, mode="to_backup"): if mode == "to_backup": - self._dist_tensors_for_program = self._backup_dist_tensors_for_program_stack.pop( + self._dist_tensors_for_program = ( + self._backup_dist_tensors_for_program_stack.pop() ) - self._dist_ops_for_program = self._backup_dist_ops_for_program_stack.pop( + self._dist_ops_for_program = ( + self._backup_dist_ops_for_program_stack.pop() ) elif mode == "to_original": assert self._original_dist_tensors_for_program assert self._original_dist_ops_for_program self._dist_tensors_for_program = copy.deepcopy( - self._original_dist_tensors_for_program) + self._original_dist_tensors_for_program + ) self._dist_ops_for_program = copy.deepcopy( - self._original_dist_ops_for_program) + self._original_dist_ops_for_program + ) elif mode == "to_default": new_tensors_ids = [] - for tensor_id, dist_tensor in self._dist_tensors_for_program.items( - ): + for ( + tensor_id, + dist_tensor, + ) in self._dist_tensors_for_program.items(): if tensor_id in self._tensors_ids: dist_tensor.dist_attr.reset() else: @@ -338,8 +361,10 @@ class DistributedContext: self._dist_ops_for_program.pop(op_id) else: new_tensors_ids = [] - for tensor_id, dist_tensor in self._dist_tensors_for_program.items( - ): + for ( + tensor_id, + dist_tensor, + ) in self._dist_tensors_for_program.items(): new_tensors_ids.append(tensor_id) for tensor_id in new_tensors_ids: self._dist_tensors_for_program.pop(tensor_id) @@ -354,11 +379,13 @@ class DistributedContext: self._need_copy_dist_attr_to_graph = True self._process_meshes = [] - def _restore(self, - serial=True, - serial_mode="to_backup", - dist=True, - dist_mode="to_backup"): + def _restore( + self, + serial=True, + serial_mode="to_backup", + dist=True, + dist_mode="to_backup", + ): # Use this function carefully if serial: self._restore_serial_info(serial_mode) @@ -369,11 +396,13 @@ class DistributedContext: if not self._is_initialized: if not self._serial_main_program: if self._original_serial_main_program: - self._serial_main_program = self._original_serial_main_program.clone( + self._serial_main_program = ( + self._original_serial_main_program.clone() ) if not self._serial_startup_program: if self._original_serial_startup_program: - self._serial_startup_program = self._original_serial_startup_program.clone( + self._serial_startup_program = ( + self._original_serial_startup_program.clone() ) if not self._serial_loss: self._restore_serial_loss() @@ -387,9 +416,11 @@ class DistributedContext: self._init_dist_attr_for_program() # Backup the original distributed information for later restore self._original_dist_tensors_for_program = copy.deepcopy( - self._dist_tensors_for_program) + self._dist_tensors_for_program + ) self._original_dist_ops_for_program = copy.deepcopy( - self._dist_ops_for_program) + self._dist_ops_for_program + ) self._tensors_ids = list(self._dist_tensors_for_program.keys()) self._ops_ids = list(self._dist_ops_for_program.keys()) self._is_initialized = True @@ -397,7 +428,8 @@ class DistributedContext: if with_graph: set_flags({"FLAGS_convert_all_blocks": True}) self._serial_graph = framework.IrGraph( - core.Graph(self._serial_main_program.desc)) + core.Graph(self._serial_main_program.desc) + ) self._init_dist_attr_for_graph() self._need_copy_dist_attr_to_graph = False @@ -405,8 +437,9 @@ class DistributedContext: self.copy_dist_attr_from_program_to_graph() def add_process_mesh(self, process_mesh): - assert isinstance(process_mesh, ProcessMesh), \ - 'The type of dim_mapping must be ProcessMesh.' + assert isinstance( + process_mesh, ProcessMesh + ), 'The type of dim_mapping must be ProcessMesh.' if process_mesh not in self.process_meshes: self._process_meshes.append(process_mesh) @@ -428,7 +461,8 @@ class DistributedContext: else: serial_tensor_id = serial_tensor.desc.original_id() dist_tensor = self._dist_tensors_for_program.get( - serial_tensor_id, None) + serial_tensor_id, None + ) if dist_tensor: return dist_tensor else: @@ -468,7 +502,8 @@ class DistributedContext: else: serial_tensor_id = serial_tensor.desc.original_id() dist_tensor = self._dist_tensors_for_program.get( - serial_tensor_id, None) + serial_tensor_id, None + ) if dist_tensor: return dist_tensor.dist_attr else: @@ -487,8 +522,9 @@ class DistributedContext: def get_tensor_dist_attr_for_graph(self, serial_tensor_node): serial_tensor_node_id = _node_id(serial_tensor_node) - dist_tensor = self._dist_tensors_for_graph.get(serial_tensor_node_id, - None) + dist_tensor = self._dist_tensors_for_graph.get( + serial_tensor_node_id, None + ) if dist_tensor: return dist_tensor.dist_attr else: @@ -530,7 +566,8 @@ class DistributedContext: if serial_node.is_var() and serial_node.var() is not None: serial_tensor_node_id = _node_id(serial_node) dist_tensor = self._dist_tensors_for_graph.get( - serial_tensor_node_id, None) + serial_tensor_node_id, None + ) if dist_tensor: return dist_tensor.dist_attr else: @@ -557,7 +594,8 @@ class DistributedContext: for tensor in block.vars.values(): # Copy the distributed tensors in the default context default_dist_tensor = default_ctx.get_dist_tensor_for_program( - tensor) + tensor + ) if default_dist_tensor and default_ctx is not self: self.add_dist_tensor_for_program(default_dist_tensor) current_dist_tensor = self.get_dist_tensor_for_program(tensor) @@ -574,12 +612,13 @@ class DistributedContext: dist_op = DistributedOperator(op) self.add_dist_op_for_program(dist_op) self._original_dist_tensors_for_program = copy.deepcopy( - self._dist_tensors_for_program) + self._dist_tensors_for_program + ) self._original_dist_ops_for_program = copy.deepcopy( - self._dist_ops_for_program) + self._dist_ops_for_program + ) def _order_nodes_by_program_order(self): - def _contains(nodes, target_node): for node in nodes: if _node_id(node) == _node_id(target_node): @@ -598,11 +637,14 @@ class DistributedContext: if node.is_op() and node.op() is not None: serial_ordered_op_nodes.append(node) serial_ordered_tensor_nodes.sort( - key=lambda node: node.node.original_desc_id()) + key=lambda node: node.node.original_desc_id() + ) serial_ordered_op_nodes.sort( - key=lambda node: node.node.original_desc_id()) + key=lambda node: node.node.original_desc_id() + ) num_nodes_before = len(serial_ordered_tensor_nodes) + len( - serial_ordered_op_nodes) + serial_ordered_op_nodes + ) new_serial_ordered_tensor_nodes = [] new_serial_ordered_op_nodes = [] @@ -610,9 +652,11 @@ class DistributedContext: for op_node in serial_ordered_op_nodes: tensor_nodes = [] for tensor_node in op_node.inputs: - if tensor_node.is_var() \ - and tensor_node.var() is not None \ - and not _contains(new_serial_ordered_nodes, tensor_node): + if ( + tensor_node.is_var() + and tensor_node.var() is not None + and not _contains(new_serial_ordered_nodes, tensor_node) + ): tensor_nodes.append(tensor_node) new_serial_ordered_tensor_nodes.append(tensor_node) tensor_nodes.sort(key=lambda node: node.node.original_desc_id()) @@ -621,23 +665,27 @@ class DistributedContext: new_serial_ordered_op_nodes.append(op_node) tensor_nodes = [] for tensor_node in op_node.outputs: - if tensor_node.is_var() \ - and tensor_node.var() is not None \ - and not _contains(new_serial_ordered_nodes, tensor_node): + if ( + tensor_node.is_var() + and tensor_node.var() is not None + and not _contains(new_serial_ordered_nodes, tensor_node) + ): tensor_nodes.append(tensor_node) new_serial_ordered_tensor_nodes.append(tensor_node) tensor_nodes.sort(key=lambda node: node.node.original_desc_id()) new_serial_ordered_nodes.extend(tensor_nodes) new_serial_ordered_tensor_nodes.sort( - key=lambda node: node.node.original_desc_id()) + key=lambda node: node.node.original_desc_id() + ) new_serial_ordered_op_nodes.sort( - key=lambda node: node.node.original_desc_id()) + key=lambda node: node.node.original_desc_id() + ) self._serial_ordered_tensor_nodes = new_serial_ordered_tensor_nodes self._serial_ordered_op_nodes = new_serial_ordered_op_nodes self._serial_ordered_nodes = new_serial_ordered_nodes assert len(self._serial_ordered_nodes) == len( - self._serial_ordered_tensor_nodes) + len( - self._serial_ordered_op_nodes) + self._serial_ordered_tensor_nodes + ) + len(self._serial_ordered_op_nodes) self._serial_orphan_tensor_nodes = [] for tensor_node in serial_ordered_tensor_nodes: if not _contains(self._serial_ordered_tensor_nodes, tensor_node): @@ -654,34 +702,49 @@ class DistributedContext: if node.is_var() and node.var() is not None: dist_tensor = None tensor_id = node.node.original_desc_id() - for cur_tensor_id, cur_dist_tensor in self._dist_tensors_for_program.items( - ): - if tensor_id == cur_tensor_id \ - or tensor_id == cur_dist_tensor.serial_tensor.desc.original_id(): + for ( + cur_tensor_id, + cur_dist_tensor, + ) in self._dist_tensors_for_program.items(): + if ( + tensor_id == cur_tensor_id + or tensor_id + == cur_dist_tensor.serial_tensor.desc.original_id() + ): dist_tensor = cur_dist_tensor - self._node_id_to_tensor_id[_node_id( - node)] = cur_tensor_id - assert dist_tensor is not None, \ - "Tensor must have a distributed tensor after the initialization for program." + self._node_id_to_tensor_id[ + _node_id(node) + ] = cur_tensor_id + assert ( + dist_tensor is not None + ), "Tensor must have a distributed tensor after the initialization for program." serial_tensor_node_id = _node_id(node) - new_dist_tensor = DistributedTensor(dist_tensor.serial_tensor, - dist_tensor.dist_attr) + new_dist_tensor = DistributedTensor( + dist_tensor.serial_tensor, dist_tensor.dist_attr + ) self._dist_tensors_for_graph[ - serial_tensor_node_id] = new_dist_tensor + serial_tensor_node_id + ] = new_dist_tensor if node.is_op() and node.op() is not None: dist_op = None op_id = node.node.original_desc_id() - for cur_op_id, cur_dist_op in self._dist_ops_for_program.items( - ): - if op_id == cur_op_id \ - or op_id == cur_dist_op.serial_op.desc.original_id(): + for ( + cur_op_id, + cur_dist_op, + ) in self._dist_ops_for_program.items(): + if ( + op_id == cur_op_id + or op_id == cur_dist_op.serial_op.desc.original_id() + ): dist_op = cur_dist_op self._node_id_to_op_id[_node_id(node)] = cur_op_id - assert dist_op is not None, \ - "Operator must have a distributed operator after the initialization for program." + assert ( + dist_op is not None + ), "Operator must have a distributed operator after the initialization for program." serial_op_node_id = _node_id(node) - new_dist_op = DistributedOperator(dist_op.serial_op, - dist_op.dist_attr) + new_dist_op = DistributedOperator( + dist_op.serial_op, dist_op.dist_attr + ) self._dist_ops_for_graph[serial_op_node_id] = new_dist_op def clear_dist_info_for_program(self): @@ -697,36 +760,51 @@ class DistributedContext: if node.is_var() and node.var() is not None: dist_tensor = None tensor_id = node.node.original_desc_id() - for cur_tensor_id, cur_dist_tensor in self._dist_tensors_for_program.items( - ): - if tensor_id == cur_tensor_id \ - or tensor_id == cur_dist_tensor.serial_tensor.desc.original_id(): + for ( + cur_tensor_id, + cur_dist_tensor, + ) in self._dist_tensors_for_program.items(): + if ( + tensor_id == cur_tensor_id + or tensor_id + == cur_dist_tensor.serial_tensor.desc.original_id() + ): dist_tensor = cur_dist_tensor - assert dist_tensor is not None, \ - "Tensor must have a distributed tensor after the initialization for program." + assert ( + dist_tensor is not None + ), "Tensor must have a distributed tensor after the initialization for program." serial_tensor_node_id = _node_id(node) - new_dist_tensor = DistributedTensor(dist_tensor.serial_tensor, - dist_tensor.dist_attr) + new_dist_tensor = DistributedTensor( + dist_tensor.serial_tensor, dist_tensor.dist_attr + ) self._dist_tensors_for_graph[ - serial_tensor_node_id] = new_dist_tensor + serial_tensor_node_id + ] = new_dist_tensor if node.is_op() and node.op() is not None: dist_op = None op_id = node.node.original_desc_id() - for cur_op_id, cur_dist_op in self._dist_ops_for_program.items( - ): - if op_id == cur_op_id \ - or op_id == cur_dist_op.serial_op.desc.original_id(): + for ( + cur_op_id, + cur_dist_op, + ) in self._dist_ops_for_program.items(): + if ( + op_id == cur_op_id + or op_id == cur_dist_op.serial_op.desc.original_id() + ): dist_op = cur_dist_op - assert dist_op is not None, \ - "Operator must have a distributed operator after the initialization for program." + assert ( + dist_op is not None + ), "Operator must have a distributed operator after the initialization for program." serial_op_node_id = _node_id(node) - new_dist_op = DistributedOperator(dist_op.serial_op, - dist_op.dist_attr) + new_dist_op = DistributedOperator( + dist_op.serial_op, dist_op.dist_attr + ) self._dist_ops_for_graph[serial_op_node_id] = new_dist_op def copy_dist_attr_from_graph_to_program(self): - assert self._is_initialized, \ - "Both program and graph must be initialized." + assert ( + self._is_initialized + ), "Both program and graph must be initialized." updated_tensors = {} # all_nodes = self._serial_graph.all_nodes() all_nodes = self._serial_ordered_nodes @@ -736,11 +814,15 @@ class DistributedContext: updated = updated_tensors.get(tensor_id, False) # If a var has multiples var nodes in graph, only use the first one for now if not updated: - tensor_dist_attr_for_graph = self.get_tensor_dist_attr_for_graph( - node) + tensor_dist_attr_for_graph = ( + self.get_tensor_dist_attr_for_graph(node) + ) dist_tensor_for_program = self._dist_tensors_for_program[ - tensor_id] - dist_tensor_for_program.dist_attr = tensor_dist_attr_for_graph + tensor_id + ] + dist_tensor_for_program.dist_attr = ( + tensor_dist_attr_for_graph + ) updated_tensors[tensor_id] = True if node.is_op() and node.op() is not None: op_id = self._node_id_to_op_id[_node_id(node)] @@ -752,22 +834,26 @@ class DistributedContext: for orphan_node in self._serial_orphan_tensor_nodes: serial_tensor_id = orphan_node.var().id() dist_tensor = self._dist_tensors_for_program.get( - serial_tensor_id, None) + serial_tensor_id, None + ) if dist_tensor: dist_tensor.dist_attr.process_mesh = self._process_meshes[0] else: serial_tensor_id = orphan_node.var().original_id() dist_tensor = self._dist_tensors_for_program.get( - serial_tensor_id, None) + serial_tensor_id, None + ) dist_tensor.dist_attr.process_mesh = self._process_meshes[0] def amend_dist_attr_for_program(self): for dist_tensor in self._dist_tensors_for_program.values(): serial_tensor = dist_tensor.serial_tensor dist_attr = dist_tensor.dist_attr - if serial_tensor.type == core.VarDesc.VarType.READER \ - or serial_tensor.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY \ - or serial_tensor.type == core.VarDesc.VarType.STEP_SCOPES: + if ( + serial_tensor.type == core.VarDesc.VarType.READER + or serial_tensor.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY + or serial_tensor.type == core.VarDesc.VarType.STEP_SCOPES + ): tensor_shape = [] else: tensor_shape = serial_tensor.shape @@ -777,8 +863,11 @@ class DistributedContext: # If the dimension of tensor is less than the sharding dimension of process mesh, # we just amend the dimension mapping to -1. (Is this really OK?) for i in range(len(tensor_shape)): - if dims_mapping[i] != -1 and tensor_shape[i] > 0 \ - and process_mesh_shape[dims_mapping[i]] > tensor_shape[i]: + if ( + dims_mapping[i] != -1 + and tensor_shape[i] > 0 + and process_mesh_shape[dims_mapping[i]] > tensor_shape[i] + ): dims_mapping[i] = -1 if dims_mapping[i] != -1 and len(process_mesh_processes) == 1: dims_mapping[i] = -1 @@ -792,9 +881,13 @@ class DistributedContext: if dist_op.get_serial_input(arg_name) is None: tensor_shape = [] else: - if dist_op.get_serial_input(arg_name).type == core.VarDesc.VarType.READER \ - or dist_op.get_serial_input(arg_name).type == core.VarDesc.VarType.LOD_TENSOR_ARRAY \ - or dist_op.serial_op.type == "create_py_reader": + if ( + dist_op.get_serial_input(arg_name).type + == core.VarDesc.VarType.READER + or dist_op.get_serial_input(arg_name).type + == core.VarDesc.VarType.LOD_TENSOR_ARRAY + or dist_op.serial_op.type == "create_py_reader" + ): tensor_shape = [] else: tensor_shape = dist_op.get_serial_input(arg_name).shape @@ -802,16 +895,27 @@ class DistributedContext: # If the dimension of tensor is less than the sharding dimension of process mesh, # we just amend the dimension mapping to -1. (Is this really OK?) for i in range(len(tensor_shape)): - if dims_mapping[i] != -1 and tensor_shape[i] > 0 \ - and process_mesh_shape[dims_mapping[i]] > tensor_shape[i]: + if ( + dims_mapping[i] != -1 + and tensor_shape[i] > 0 + and process_mesh_shape[dims_mapping[i]] + > tensor_shape[i] + ): dims_mapping[i] = -1 - if dims_mapping[i] != -1 and len( - process_mesh_processes) == 1: + if ( + dims_mapping[i] != -1 + and len(process_mesh_processes) == 1 + ): dims_mapping[i] = -1 for arg_name in serial_op.output_arg_names: - if dist_op.get_serial_output(arg_name).type == core.VarDesc.VarType.READER \ - or dist_op.get_serial_output(arg_name).type == core.VarDesc.VarType.LOD_TENSOR_ARRAY \ - or dist_op.get_serial_output(arg_name).type == core.VarDesc.VarType.STEP_SCOPES: + if ( + dist_op.get_serial_output(arg_name).type + == core.VarDesc.VarType.READER + or dist_op.get_serial_output(arg_name).type + == core.VarDesc.VarType.LOD_TENSOR_ARRAY + or dist_op.get_serial_output(arg_name).type + == core.VarDesc.VarType.STEP_SCOPES + ): tensor_shape = [] else: tensor_shape = dist_op.get_serial_output(arg_name).shape @@ -819,11 +923,17 @@ class DistributedContext: # If the dimension of tensor is less than the sharding dimension of process mesh, # we just amend the dimension mapping to -1. (Is this really OK?) for i in range(len(tensor_shape)): - if dims_mapping[i] != -1 and tensor_shape[i] > 0 \ - and process_mesh_shape[dims_mapping[i]] > tensor_shape[i]: + if ( + dims_mapping[i] != -1 + and tensor_shape[i] > 0 + and process_mesh_shape[dims_mapping[i]] + > tensor_shape[i] + ): dims_mapping[i] = -1 - if dims_mapping[i] != -1 and len( - process_mesh_processes) == 1: + if ( + dims_mapping[i] != -1 + and len(process_mesh_processes) == 1 + ): dims_mapping[i] = -1 if len(process_mesh_processes) == 1: dist_op.dist_attr.impl_type = "default" @@ -831,30 +941,44 @@ class DistributedContext: def validate_dist_attr_for_program(self): if not self._is_initialized: - assert False, \ - "Program must be initialized before validating its distributed attributes" + assert ( + False + ), "Program must be initialized before validating its distributed attributes" for block in self.serial_main_program.blocks: for tensor in block.vars.values(): dist_tensor = self.get_dist_tensor_for_program(tensor) - assert dist_tensor is not None, \ - "Tensor {} does not have a distributed attribute.".format( - dist_tensor.serial_tensor.name) - if (dist_tensor - is not None) and (not dist_tensor.validate_dist_attr()): - assert False, "Tensor {} (id: {}, original_id: {}) has a wrong distributed attributes {}.".format( + assert ( + dist_tensor is not None + ), "Tensor {} does not have a distributed attribute.".format( + dist_tensor.serial_tensor.name + ) + if (dist_tensor is not None) and ( + not dist_tensor.validate_dist_attr() + ): + assert ( + False + ), "Tensor {} (id: {}, original_id: {}) has a wrong distributed attributes {}.".format( dist_tensor.serial_tensor.name, dist_tensor.serial_tensor.desc.id(), dist_tensor.serial_tensor.desc.original_id(), - dist_tensor.dist_attr) + dist_tensor.dist_attr, + ) for op in block.ops: dist_op = self.get_dist_op_for_program(op) - assert dist_op is not None, \ - "Operator {} does not have a distributed attribute.".format( - dist_op.serial_op.type) + assert ( + dist_op is not None + ), "Operator {} does not have a distributed attribute.".format( + dist_op.serial_op.type + ) if (dist_op is not None) and (not dist_op.validate_dist_attr()): - assert False, "Operator {} (id: {}, original_id: {}) has a wrong distributed attributes {} .".format( - dist_op.serial_op.type, dist_op.serial_op.desc.id(), - dist_op.serial_op.desc.original_id(), dist_op.dist_attr) + assert ( + False + ), "Operator {} (id: {}, original_id: {}) has a wrong distributed attributes {} .".format( + dist_op.serial_op.type, + dist_op.serial_op.desc.id(), + dist_op.serial_op.desc.original_id(), + dist_op.dist_attr, + ) return True def __deepcopy__(self, memo): @@ -863,15 +987,27 @@ class DistributedContext: memo[id(self)] = result for k, v in self.__dict__.items(): if k in [ - "_original_serial_main_program", "_original_serial_startup_program", \ - "_serial_main_program", "_serial_startup_program", "_serial_graph", \ - "_dist_main_programs", "_dist_startup_programs", \ - "_serial_ordered_nodes", "_serial_ordered_tensor_nodes", \ - "_serial_ordered_op_nodes", "_original_serial_loss", \ - "_original_serial_feed_vars", "_original_serial_fetch_vars", \ - "_serial_loss", "_serial_feed_vars", "_serial_fetch_vars", "_serial_optimizer", \ - "_backup_serial_main_program_stack", "_backup_serial_startup_program_stack", \ - "_pass_context"]: + "_original_serial_main_program", + "_original_serial_startup_program", + "_serial_main_program", + "_serial_startup_program", + "_serial_graph", + "_dist_main_programs", + "_dist_startup_programs", + "_serial_ordered_nodes", + "_serial_ordered_tensor_nodes", + "_serial_ordered_op_nodes", + "_original_serial_loss", + "_original_serial_feed_vars", + "_original_serial_fetch_vars", + "_serial_loss", + "_serial_feed_vars", + "_serial_fetch_vars", + "_serial_optimizer", + "_backup_serial_main_program_stack", + "_backup_serial_startup_program_stack", + "_pass_context", + ]: setattr(result, k, v) else: setattr(result, k, copy.deepcopy(v, memo)) @@ -913,8 +1049,12 @@ class DistributedOperatorContext: memo[id(self)] = result for k, v in self.__dict__.items(): if k in [ - "_dst_main_program", "_dst_startup_program", "_cur_src_op", - "_work_block", "_main_block", "_startup_block" + "_dst_main_program", + "_dst_startup_program", + "_cur_src_op", + "_work_block", + "_main_block", + "_startup_block", ]: setattr(result, k, v) else: @@ -994,7 +1134,6 @@ class DistributedOperatorContext: class BlockState(object): - def __init__(self): self.nblock = 0 self.forward_indices = [] @@ -1011,8 +1150,11 @@ class BlockState(object): for idx, block in enumerate(program.blocks): assert idx == block.idx, "index doesn't match" - assert block.forward_block_idx == -1, "forward_block_idx of forward block [{}] is not [{}]".format( - idx, block.forward_block_idx) + assert ( + block.forward_block_idx == -1 + ), "forward_block_idx of forward block [{}] is not [{}]".format( + idx, block.forward_block_idx + ) self.forward_indices.append(idx) self.nblock += 1 @@ -1021,7 +1163,8 @@ class BlockState(object): def parse_backward_blocks(self, program): assert 0 in self.forward_indices, "forward block idx are{}".format( - self.forward_indices) + self.forward_indices + ) self.backward_to_forward_index_map[0] = 0 for idx, block in enumerate(program.blocks): diff --git a/python/paddle/distributed/auto_parallel/dist_loader.py b/python/paddle/distributed/auto_parallel/dist_loader.py index 9eb62b1b74e8a74e6f9a7ea04be022ecfbdd3a0d..f16c55a011c7c4410f9ff3f92f274dfffec41ab6 100644 --- a/python/paddle/distributed/auto_parallel/dist_loader.py +++ b/python/paddle/distributed/auto_parallel/dist_loader.py @@ -17,12 +17,18 @@ import numpy as np import paddle from paddle.io import BatchSampler, IterableDataset -from paddle.fluid.dataloader.batch_sampler import _InfiniteIterableSampler, DistributedBatchSampler -from paddle.fluid.dataloader.dataloader_iter import _DatasetKind, default_collate_fn, default_convert_fn +from paddle.fluid.dataloader.batch_sampler import ( + _InfiniteIterableSampler, + DistributedBatchSampler, +) +from paddle.fluid.dataloader.dataloader_iter import ( + _DatasetKind, + default_collate_fn, + default_convert_fn, +) class DistributedDataLoaderBase(metaclass=abc.ABCMeta): - @abc.abstractmethod def __iter__(self): raise NotImplementedError @@ -33,24 +39,25 @@ class DistributedDataLoaderBase(metaclass=abc.ABCMeta): class DistributedDataLoaderFromGenerator(DistributedDataLoaderBase): - - def __init__(self, - dataset, - feed_list=None, - capacity=None, - use_double_buffer=True, - iterable=True, - return_list=False, - use_multiprocess=False, - drop_last=True, - places=None, - batch_size=1, - epochs=1, - steps_per_epoch=None, - collate_fn=None, - split_data=True, - data_parallel_world_size=[], - data_parallel_rank=[]): + def __init__( + self, + dataset, + feed_list=None, + capacity=None, + use_double_buffer=True, + iterable=True, + return_list=False, + use_multiprocess=False, + drop_last=True, + places=None, + batch_size=1, + epochs=1, + steps_per_epoch=None, + collate_fn=None, + split_data=True, + data_parallel_world_size=[], + data_parallel_rank=[], + ): self.dataset = dataset self.feed_list = feed_list self.capacity = capacity @@ -80,12 +87,15 @@ class DistributedDataLoaderFromGenerator(DistributedDataLoaderBase): else: if isinstance(dataset, IterableDataset): self.batch_sampler = _InfiniteIterableSampler( - dataset, batch_size) + dataset, batch_size + ) else: - self.batch_sampler = BatchSampler(dataset, - batch_size=batch_size, - shuffle=False, - drop_last=drop_last) + self.batch_sampler = BatchSampler( + dataset, + batch_size=batch_size, + shuffle=False, + drop_last=drop_last, + ) self.auto_collate_batch = self.batch_sampler is not None self.sampler_iter = iter(self.index_sampler) @@ -96,8 +106,12 @@ class DistributedDataLoaderFromGenerator(DistributedDataLoaderBase): self.collate_fn = collate_fn or default_convert_fn self.dataset_fetcher = _DatasetKind.create_fetcher( - self.dataset_kind, self.dataset, self.auto_collate_batch, - self.collate_fn, self.drop_last) + self.dataset_kind, + self.dataset, + self.auto_collate_batch, + self.collate_fn, + self.drop_last, + ) self._steps = self._infer_steps() self._inner_dataloader = self._create_inner_dataloader() @@ -146,18 +160,21 @@ class DistributedDataLoaderFromGenerator(DistributedDataLoaderBase): return _InfiniteIterableSampler(self.dataset, 1) def _create_inner_dataloader(self): - def data_generator(): while True: try: indices = next(self.sampler_iter) batch = self.dataset_fetcher.fetch(indices) - if batch is None: break + if batch is None: + break except StopIteration: self.dataset_fetcher = _DatasetKind.create_fetcher( - self.dataset_kind, self.dataset, - self.auto_collate_batch, self.collate_fn, - self.drop_last) + self.dataset_kind, + self.dataset, + self.auto_collate_batch, + self.collate_fn, + self.drop_last, + ) break partial_data = [] @@ -168,11 +185,16 @@ class DistributedDataLoaderFromGenerator(DistributedDataLoaderBase): continue batch_size = array.shape[0] - assert batch_size % self.dp_world_sizes[i] == 0, \ - "batch_size [{}] is not divisible by dp_world_size [{}]".format(str(batch_size), str(self.dp_world_sizes[i])) + assert ( + batch_size % self.dp_world_sizes[i] == 0 + ), "batch_size [{}] is not divisible by dp_world_size [{}]".format( + str(batch_size), str(self.dp_world_sizes[i]) + ) partial_data.append( - np.split(array, - self.dp_world_sizes[i])[self.dp_ranks[i]]) + np.split(array, self.dp_world_sizes[i])[ + self.dp_ranks[i] + ] + ) yield partial_data @@ -184,33 +206,35 @@ class DistributedDataLoaderFromGenerator(DistributedDataLoaderBase): iterable=False, return_list=self.return_list, use_multiprocess=self.use_multiprocess, - drop_last=self.drop_last) + drop_last=self.drop_last, + ) dataloader.set_batch_generator(data_generator, self.places) return dataloader class DistributedDataLoader(DistributedDataLoaderBase): - - def __init__(self, - dataset, - feed_list=None, - places=None, - return_list=True, - batch_size=1, - shuffle=False, - drop_last=False, - collate_fn=None, - num_workers=0, - use_buffer_reader=True, - use_shared_memory=True, - timeout=0, - worker_init_fn=None, - epochs=1, - steps_per_epoch=None, - split_data=True, - data_parallel_world_size=[], - data_parallel_rank=[]): + def __init__( + self, + dataset, + feed_list=None, + places=None, + return_list=True, + batch_size=1, + shuffle=False, + drop_last=False, + collate_fn=None, + num_workers=0, + use_buffer_reader=True, + use_shared_memory=True, + timeout=0, + worker_init_fn=None, + epochs=1, + steps_per_epoch=None, + split_data=True, + data_parallel_world_size=[], + data_parallel_rank=[], + ): self.dataset = dataset self.feed_list = feed_list self.return_list = return_list @@ -231,8 +255,13 @@ class DistributedDataLoader(DistributedDataLoaderBase): self.split_data = split_data # TODO: rank info self.batch_sampler = DistributedBatchSampler( - self.dataset, self.batch_size, self.dp_world_sizes[0], - self.dp_ranks[0], self.shuffle, self.drop_last) + self.dataset, + self.batch_size, + self.dp_world_sizes[0], + self.dp_ranks[0], + self.shuffle, + self.drop_last, + ) self._inner_dataloader = self._create_inner_dataloader() def __iter__(self): @@ -253,7 +282,8 @@ class DistributedDataLoader(DistributedDataLoaderBase): use_buffer_reader=self.use_buffer_reader, use_shared_memory=self.use_shared_memory, timeout=self.timeout, - worker_init_fn=self.worker_init_fn) + worker_init_fn=self.worker_init_fn, + ) self.data = (x for x in dataloader) return dataloader diff --git a/python/paddle/distributed/auto_parallel/dist_op.py b/python/paddle/distributed/auto_parallel/dist_op.py index 004436458b13afcd7656d4fc1e4c5f7777452e31..41b4696174a5a6b168ba2b233a7ee18100888b81 100644 --- a/python/paddle/distributed/auto_parallel/dist_op.py +++ b/python/paddle/distributed/auto_parallel/dist_op.py @@ -23,7 +23,6 @@ from .utils import convert_to_shard_spec, verify_shard_spec class DistributedOperator: - def __init__(self, serial_op, dist_attr=None): self._serial_op = serial_op self._serial_inputs = {} @@ -74,28 +73,34 @@ class DistributedOperator: if tensor is None: tensor_shape = [] else: - if tensor.type == core.VarDesc.VarType.READER \ - or tensor.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY: + if ( + tensor.type == core.VarDesc.VarType.READER + or tensor.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY + ): tensor_shape = [] else: tensor_shape = tensor.shape if self._dist_attr.get_input_dims_mapping(tensor_name) is None: tensor_dims_mapping = [-1 for _ in range(len(tensor_shape))] - self._dist_attr.set_input_dims_mapping(tensor_name, - tensor_dims_mapping) + self._dist_attr.set_input_dims_mapping( + tensor_name, tensor_dims_mapping + ) for tensor_name in self._serial_op.output_arg_names: tensor = self._serial_op.block._var_recursive(tensor_name) - if tensor.type == core.VarDesc.VarType.READER \ - or tensor.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY \ - or tensor.type == core.VarDesc.VarType.STEP_SCOPES: + if ( + tensor.type == core.VarDesc.VarType.READER + or tensor.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY + or tensor.type == core.VarDesc.VarType.STEP_SCOPES + ): tensor_shape = [] else: tensor_shape = tensor.shape self._serial_outputs[tensor_name] = tensor if self._dist_attr.get_output_dims_mapping(tensor_name) is None: tensor_dims_mapping = [-1 for _ in range(len(tensor_shape))] - self._dist_attr.set_output_dims_mapping(tensor_name, - tensor_dims_mapping) + self._dist_attr.set_output_dims_mapping( + tensor_name, tensor_dims_mapping + ) if self._dist_attr.op_type is None: self._dist_attr.op_type = self.serial_op.type if self._dist_attr.impl_type is None: @@ -113,8 +118,10 @@ class DistributedOperator: new_dist_attr = {} for key, value in dist_attr.items(): if isinstance(key, Variable): - if key.name in self._serial_op.input_arg_names \ - or key.name in self._serial_op.output_arg_names: + if ( + key.name in self._serial_op.input_arg_names + or key.name in self._serial_op.output_arg_names + ): new_dist_attr[key] = value else: new_dist_attr[key] = value @@ -125,13 +132,15 @@ class DistributedOperator: for tensor_name in self._serial_op.input_arg_names: tensor_dist_attr = dist_attr.get_input_dist_attr(tensor_name) if tensor_dist_attr: - new_dist_attr.set_input_dist_attr(tensor_name, - tensor_dist_attr) + new_dist_attr.set_input_dist_attr( + tensor_name, tensor_dist_attr + ) for tensor_name in self._serial_op.output_arg_names: tensor_dist_attr = dist_attr.get_output_dist_attr(tensor_name) if tensor_dist_attr: - new_dist_attr.set_output_dist_attr(tensor_name, - tensor_dist_attr) + new_dist_attr.set_output_dist_attr( + tensor_name, tensor_dist_attr + ) else: assert False, "Cannot recognize the {} parameter.".format(dist_attr) return new_dist_attr @@ -142,8 +151,10 @@ class DistributedOperator: for name in self.serial_op.input_arg_names: input_dist_attr = self.dist_attr.get_input_dist_attr(name) dims_mapping = input_dist_attr.dims_mapping - if self.get_serial_input( - name).type == core.VarDesc.VarType.LOD_TENSOR_ARRAY: + if ( + self.get_serial_input(name).type + == core.VarDesc.VarType.LOD_TENSOR_ARRAY + ): shape = [] else: shape = self.get_serial_input(name).shape @@ -151,7 +162,8 @@ class DistributedOperator: return False for i in range(len(dims_mapping)): if dims_mapping[i] < -1 or dims_mapping[i] >= len( - self.dist_attr.process_mesh.topology): + self.dist_attr.process_mesh.topology + ): return False for i in range(len(self.dist_attr.process_mesh.topology)): if dims_mapping.count(i) > 1: @@ -162,8 +174,12 @@ class DistributedOperator: for name in self.serial_op.output_arg_names: output_dist_attr = self.dist_attr.get_output_dist_attr(name) dims_mapping = output_dist_attr.dims_mapping - if self.get_serial_output(name).type == core.VarDesc.VarType.LOD_TENSOR_ARRAY\ - or self.get_serial_output(name).type == core.VarDesc.VarType.STEP_SCOPES: + if ( + self.get_serial_output(name).type + == core.VarDesc.VarType.LOD_TENSOR_ARRAY + or self.get_serial_output(name).type + == core.VarDesc.VarType.STEP_SCOPES + ): shape = [] else: shape = self.get_serial_output(name).shape @@ -171,7 +187,8 @@ class DistributedOperator: return False for i in range(len(dims_mapping)): if dims_mapping[i] < -1 or dims_mapping[i] >= len( - self.dist_attr.process_mesh.topology): + self.dist_attr.process_mesh.topology + ): return False for i in range(len(self.dist_attr.process_mesh.topology)): if dims_mapping.count(i) > 1: @@ -181,8 +198,9 @@ class DistributedOperator: return True def __str__(self): - str = "{{op type: {}, op id: {}".format(self.serial_op.desc.type(), - self.serial_op.desc.id()) + str = "{{op type: {}, op id: {}".format( + self.serial_op.desc.type(), self.serial_op.desc.id() + ) # str += ", {}".format(self.dist_attr) # return str @@ -191,8 +209,9 @@ class DistributedOperator: annotated_str = "annotated" else: annotated_str = "non-annotated" - str += ", process_mesh ({}): {}".format(annotated_str, - self.dist_attr.process_mesh) + str += ", process_mesh ({}): {}".format( + annotated_str, self.dist_attr.process_mesh + ) for arg_name in self.serial_op.desc.input_arg_names(): dims_mapping = self.dist_attr.get_input_dims_mapping(arg_name) @@ -208,7 +227,8 @@ class DistributedOperator: else: is_parameter_str = "non-parameter" str += ", {}'s dims_mapping (input, {}, {}): {}".format( - arg_name, annotated_str, is_parameter_str, dims_mapping) + arg_name, annotated_str, is_parameter_str, dims_mapping + ) for arg_name in self.serial_op.desc.output_arg_names(): dims_mapping = self.dist_attr.get_output_dims_mapping(arg_name) @@ -224,12 +244,14 @@ class DistributedOperator: else: is_parameter_str = "non-parameter" str += ", {}'s dims_mapping (output, {}, {}): {}".format( - arg_name, annotated_str, is_parameter_str, dims_mapping) + arg_name, annotated_str, is_parameter_str, dims_mapping + ) str += ", pipeline stage: {}".format(None) str += ", dist_impl idx: {} , dist_impl type {} }}".format( - self.dist_attr._impl_idx, self.dist_attr._impl_type) + self.dist_attr._impl_idx, self.dist_attr._impl_type + ) return str @@ -238,7 +260,11 @@ class DistributedOperator: result = cls.__new__(cls) memo[id(self)] = result for k, v in self.__dict__.items(): - if k == "_serial_op" or k == "_serial_inputs" or k == "_serial_outputs": + if ( + k == "_serial_op" + or k == "_serial_inputs" + or k == "_serial_outputs" + ): setattr(result, k, v) else: setattr(result, k, copy.deepcopy(v, memo)) @@ -246,9 +272,9 @@ class DistributedOperator: class DistributedOperatorHelper: - - def __init__(self, serial_op, process_mesh, in_dims_mappings, - out_dims_mappings): + def __init__( + self, serial_op, process_mesh, in_dims_mappings, out_dims_mappings + ): self._serial_op = serial_op self._process_mesh = process_mesh self._in_dims_mappings = in_dims_mappings @@ -258,8 +284,11 @@ class DistributedOperatorHelper: tensor_to_dims_mapping = {} index = 0 if self._in_dims_mappings: - assert len(args) + len(kwargs) == len(self._in_dims_mappings), \ - "The length of dims_mapping {} does not matching the length output {}.".format(len(self._in_dims_mappings), len(args) + len(kwargs)) + assert len(args) + len(kwargs) == len( + self._in_dims_mappings + ), "The length of dims_mapping {} does not matching the length output {}.".format( + len(self._in_dims_mappings), len(args) + len(kwargs) + ) for arg in args: if isinstance(arg, Variable) and self._in_dims_mappings: tensor_to_dims_mapping[arg.name] = self._in_dims_mappings[index] @@ -283,13 +312,17 @@ class DistributedOperatorHelper: raise ValueError("Unrecognized outpout.") if self._out_dims_mappings: - assert len(new_output) == len(self._out_dims_mappings), \ - "The length of dims_mapping {} does not matching the length output {}.".format(len(self._out_dims_mappings), len(new_output)) + assert len(new_output) == len( + self._out_dims_mappings + ), "The length of dims_mapping {} does not matching the length output {}.".format( + len(self._out_dims_mappings), len(new_output) + ) for i, item in enumerate(new_output): if isinstance(item, Variable) and self._out_dims_mappings: tensor_to_dims_mapping[item.name] = self._out_dims_mappings[i] from .dist_context import get_default_distributed_context + default_dist_ctx = get_default_distributed_context() for idx in range(op_size, new_op_size): op = cur_block.ops[idx] @@ -298,48 +331,62 @@ class DistributedOperatorHelper: if name in tensor_to_dims_mapping.keys(): tensor = dist_op.get_serial_input(name) tensor_dist_attr = dist_op.dist_attr.get_input_dist_attr( - name) + name + ) dims_mapping = tensor_to_dims_mapping[name] if tensor is None: tensor_shape = [] else: - if tensor.type == core.VarDesc.VarType.READER \ - or tensor.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY \ - or tensor.type == core.VarDesc.VarType.STEP_SCOPES: + if ( + tensor.type == core.VarDesc.VarType.READER + or tensor.type + == core.VarDesc.VarType.LOD_TENSOR_ARRAY + or tensor.type == core.VarDesc.VarType.STEP_SCOPES + ): tensor_shape = [] else: tensor_shape = tensor.shape if dims_mapping is not None: dims_mapping = tensor_to_dims_mapping[name] shard_spec = convert_to_shard_spec( - dims_mapping, self._process_mesh) - assert verify_shard_spec(shard_spec, tensor_shape, self._process_mesh), \ - "For tensor {}, shard_spec {} is invalid with tensor_shape {} and process_mesh {}.".format( - name, shard_spec, tensor_shape, self._process_mesh) + dims_mapping, self._process_mesh + ) + assert verify_shard_spec( + shard_spec, tensor_shape, self._process_mesh + ), "For tensor {}, shard_spec {} is invalid with tensor_shape {} and process_mesh {}.".format( + name, shard_spec, tensor_shape, self._process_mesh + ) tensor_dist_attr.dims_mapping = dims_mapping tensor_dist_attr.mark_annotated("dims_mapping") for name in dist_op.serial_op.output_arg_names: if name in tensor_to_dims_mapping.keys(): tensor = dist_op.get_serial_output(name) tensor_dist_attr = dist_op.dist_attr.get_output_dist_attr( - name) + name + ) dims_mapping = tensor_to_dims_mapping[name] if tensor is None: tensor_shape = [] else: - if tensor.type == core.VarDesc.VarType.READER \ - or tensor.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY \ - or tensor.type == core.VarDesc.VarType.STEP_SCOPES: + if ( + tensor.type == core.VarDesc.VarType.READER + or tensor.type + == core.VarDesc.VarType.LOD_TENSOR_ARRAY + or tensor.type == core.VarDesc.VarType.STEP_SCOPES + ): tensor_shape = [] else: tensor_shape = tensor.shape if dims_mapping is not None: dims_mapping = tensor_to_dims_mapping[name] shard_spec = convert_to_shard_spec( - dims_mapping, self._process_mesh) - assert verify_shard_spec(shard_spec, tensor_shape, self._process_mesh), \ - "For tensor {}, shard_spec {} is invalid with tensor_shape {} and process_mesh {}.".format( - name, shard_spec, tensor_shape, self._process_mesh) + dims_mapping, self._process_mesh + ) + assert verify_shard_spec( + shard_spec, tensor_shape, self._process_mesh + ), "For tensor {}, shard_spec {} is invalid with tensor_shape {} and process_mesh {}.".format( + name, shard_spec, tensor_shape, self._process_mesh + ) tensor_dist_attr.dims_mapping = dims_mapping tensor_dist_attr.mark_annotated("dims_mapping") dist_op.dist_attr.process_mesh = self._process_mesh diff --git a/python/paddle/distributed/auto_parallel/dist_saver.py b/python/paddle/distributed/auto_parallel/dist_saver.py index a885bf7592ba9dd281dd189d2e099b711574b0aa..f2766bff602e38124e386d09ca57b09b6d10cf9e 100644 --- a/python/paddle/distributed/auto_parallel/dist_saver.py +++ b/python/paddle/distributed/auto_parallel/dist_saver.py @@ -50,16 +50,13 @@ def _process_path(path): class DistributedSaver: - def __init__(self): self._logger = get_logger(logging.INFO) def save(self, path, serial_program, dist_main_program, dist_context): - def _save_state(program, path, mode="param"): state = { - k: np.array(v) - for k, v in program.state_dict(mode).items() + k: np.array(v) for k, v in program.state_dict(mode).items() } with open(path, "wb") as f: pickle.dump(state, f) @@ -105,8 +102,9 @@ class DistributedSaver: def _load_file(filename, dirname, suffix="pdparams"): file_list = [] for file in os.listdir(dirname): - if check_filename('{}(.*)_dist(.*).{}'.format(filename, suffix), - file): + if check_filename( + '{}(.*)_dist(.*).{}'.format(filename, suffix), file + ): file_list.append(os.path.join(dirname, file)) file_list.sort() return file_list @@ -134,14 +132,16 @@ class DistributedSaver: # load path.pdparam and path.pdopt param_state_dict = _load_state(filename, dirname) - opt_state_dict = _load_state(filename, dirname, - "pdopt") if load_optimizer else {} + opt_state_dict = ( + _load_state(filename, dirname, "pdopt") if load_optimizer else {} + ) state_dict = dict(param_state_dict, **opt_state_dict) # load path.pdattr dist_attr_file_list = _load_file(filename, dirname, "pdattr") self._logger.info( - "Load distributed attribute file: {}".format(dist_attr_file_list)) + "Load distributed attribute file: {}".format(dist_attr_file_list) + ) dist_attr = {} for dist_attr_file in dist_attr_file_list: with open(dist_attr_file, 'rb') as f: @@ -206,11 +206,13 @@ class DistributedSaver: # NOTE: `paddle.static.save_inference_model` does not support subblock. dist_filename = filename + "_dist" + str(rank_id) dist_path = os.path.join(dirname, dist_filename) - paddle.static.save_inference_model(dist_path, - dist_feed_vars, - dist_fetch_vars, - exe, - program=dist_main_prog) + paddle.static.save_inference_model( + dist_path, + dist_feed_vars, + dist_fetch_vars, + exe, + program=dist_main_prog, + ) def _save_rank_mapping(self, dirname): path = os.path.join(dirname, 'rank_mapping.csv') diff --git a/python/paddle/distributed/auto_parallel/dist_tensor.py b/python/paddle/distributed/auto_parallel/dist_tensor.py index e07269fab25f98f611bce8deee3b63ba84282305..88c754f06f3d8a6dfc3a578fab5560cd13d356c2 100644 --- a/python/paddle/distributed/auto_parallel/dist_tensor.py +++ b/python/paddle/distributed/auto_parallel/dist_tensor.py @@ -33,32 +33,45 @@ class DistributedTensor: """ @staticmethod - def _validate_sizes_and_dist_attr(sizes, - dims_mapping, - topology, - processes, - rank=None, - shard_sizes=None): - if not (isinstance(sizes, (list, tuple)) - and all(map(lambda x: isinstance(x, int) and x >= 0, sizes))): + def _validate_sizes_and_dist_attr( + sizes, dims_mapping, topology, processes, rank=None, shard_sizes=None + ): + if not ( + isinstance(sizes, (list, tuple)) + and all(map(lambda x: isinstance(x, int) and x >= 0, sizes)) + ): raise ValueError( - "The sizes must be list or tuple and item in sizes must be non-negative integer, but got {}" - .format(sizes)) - if not (isinstance(dims_mapping, (list, tuple)) and all( - map(lambda x: isinstance(x, int) and x >= -1, dims_mapping))): + "The sizes must be list or tuple and item in sizes must be non-negative integer, but got {}".format( + sizes + ) + ) + if not ( + isinstance(dims_mapping, (list, tuple)) + and all(map(lambda x: isinstance(x, int) and x >= -1, dims_mapping)) + ): raise ValueError( - "The dims_mapping must be list or tuple and item in dims_mapping must >= -1, but got {}" - .format(dims_mapping)) - if not (isinstance(processes, (list, tuple)) and all( - map(lambda x: isinstance(x, int) and x >= 0, processes))): + "The dims_mapping must be list or tuple and item in dims_mapping must >= -1, but got {}".format( + dims_mapping + ) + ) + if not ( + isinstance(processes, (list, tuple)) + and all(map(lambda x: isinstance(x, int) and x >= 0, processes)) + ): raise ValueError( - "The processes must be list or tuple and item in processes must be integer, but got {}" - .format(processes)) - if not (isinstance(topology, (list, tuple)) - and all(map(lambda x: isinstance(x, int) and x > 0, topology))): + "The processes must be list or tuple and item in processes must be integer, but got {}".format( + processes + ) + ) + if not ( + isinstance(topology, (list, tuple)) + and all(map(lambda x: isinstance(x, int) and x > 0, topology)) + ): raise ValueError( - "The topology must be list or tuple and item in topology must be non-negative integer, but got {}" - .format(topology)) + "The topology must be list or tuple and item in topology must be non-negative integer, but got {}".format( + topology + ) + ) if rank is not None and not (isinstance(rank, int) and rank >= 0): raise ValueError("The rank must >= 0, but got {}".format(rank)) @@ -67,16 +80,17 @@ class DistributedTensor: raise ValueError("Only support even sharding now.") @staticmethod - def get_local_sizes(global_sizes, - dims_mapping, - topology, - processes, - rank=None, - shard_sizes=None): - DistributedTensor._validate_sizes_and_dist_attr(global_sizes, - dims_mapping, topology, - processes, rank, - shard_sizes) + def get_local_sizes( + global_sizes, + dims_mapping, + topology, + processes, + rank=None, + shard_sizes=None, + ): + DistributedTensor._validate_sizes_and_dist_attr( + global_sizes, dims_mapping, topology, processes, rank, shard_sizes + ) local_sizes = [] # for even sharding, the local sizes of every rank are equal @@ -92,16 +106,12 @@ class DistributedTensor: return local_sizes @staticmethod - def get_local_offsets(global_sizes, - dims_mapping, - topology, - processes, - rank, - shard_sizes=None): - local_sizes = DistributedTensor.get_local_sizes(global_sizes, - dims_mapping, topology, - processes, rank, - shard_sizes) + def get_local_offsets( + global_sizes, dims_mapping, topology, processes, rank, shard_sizes=None + ): + local_sizes = DistributedTensor.get_local_sizes( + global_sizes, dims_mapping, topology, processes, rank, shard_sizes + ) local_offsets = [] rank_relatvie = processes.index(rank) coordinate = _linear_idx2coordinate(topology, rank_relatvie) @@ -110,21 +120,23 @@ class DistributedTensor: if dims_mapping[i] == -1: local_offsets.append(0) else: - local_offsets.append(coordinate[dims_mapping[i]] * - local_sizes[i]) + local_offsets.append( + coordinate[dims_mapping[i]] * local_sizes[i] + ) return local_offsets @staticmethod - def get_global_sizes(local_sizes, - dims_mapping, - topology, - processes, - rank=None, - shard_sizes=None): - DistributedTensor._validate_sizes_and_dist_attr(local_sizes, - dims_mapping, topology, - processes, rank, - shard_sizes) + def get_global_sizes( + local_sizes, + dims_mapping, + topology, + processes, + rank=None, + shard_sizes=None, + ): + DistributedTensor._validate_sizes_and_dist_attr( + local_sizes, dims_mapping, topology, processes, rank, shard_sizes + ) global_sizes = [] for idx, item in enumerate(local_sizes): if dims_mapping[idx] == -1: @@ -134,25 +146,24 @@ class DistributedTensor: return global_sizes @staticmethod - def get_local_shard(global_sizes, - dims_mapping, - topology, - processes, - rank, - shard_sizes=None): + def get_local_shard( + global_sizes, dims_mapping, topology, processes, rank, shard_sizes=None + ): local_offsets = DistributedTensor.get_local_offsets( - global_sizes, dims_mapping, topology, processes, rank, shard_sizes) - local_sizes = DistributedTensor.get_local_sizes(global_sizes, - dims_mapping, topology, - processes, rank, - shard_sizes) + global_sizes, dims_mapping, topology, processes, rank, shard_sizes + ) + local_sizes = DistributedTensor.get_local_sizes( + global_sizes, dims_mapping, topology, processes, rank, shard_sizes + ) assert len(local_sizes) == len( local_offsets ), "The length of local_sizes must be equal to local_offsets, but got {} and {}.".format( - len(local_sizes), len(local_offsets)) + len(local_sizes), len(local_offsets) + ) local_end_offsets = list( - map(lambda x: x[0] + x[1], zip(local_offsets, local_sizes))) + map(lambda x: x[0] + x[1], zip(local_offsets, local_sizes)) + ) local_shard = list(zip(local_offsets, local_end_offsets)) return local_shard @@ -167,7 +178,11 @@ class DistributedTensor: self._local_tensor_map = {} from .dist_context import get_default_distributed_context - self._dist_context = dist_context if dist_context is not None else get_default_distributed_context( + + self._dist_context = ( + dist_context + if dist_context is not None + else get_default_distributed_context() ) # TODO: Add Automatically to dist_context after initialized and it will be adapted in the future. # self._dist_context.add_dist_tensor_for_program(self) @@ -193,9 +208,12 @@ class DistributedTensor: def _init_default_dist_attr(self): if self._dist_attr.dims_mapping is None: - if self.serial_tensor.type == core.VarDesc.VarType.READER \ - or self.serial_tensor.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY \ - or self.serial_tensor.type == core.VarDesc.VarType.STEP_SCOPES: + if ( + self.serial_tensor.type == core.VarDesc.VarType.READER + or self.serial_tensor.type + == core.VarDesc.VarType.LOD_TENSOR_ARRAY + or self.serial_tensor.type == core.VarDesc.VarType.STEP_SCOPES + ): tensor_shape = [] else: tensor_shape = self._serial_tensor.shape @@ -203,17 +221,21 @@ class DistributedTensor: self._dist_attr.dims_mapping = tensor_dims_mapping def validate_dist_attr(self): - if self.serial_tensor.type == core.VarDesc.VarType.READER \ - or self.serial_tensor.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY \ - or self.serial_tensor.type == core.VarDesc.VarType.STEP_SCOPES: + if ( + self.serial_tensor.type == core.VarDesc.VarType.READER + or self.serial_tensor.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY + or self.serial_tensor.type == core.VarDesc.VarType.STEP_SCOPES + ): return True tensor_shape = self.serial_tensor.shape if len(tensor_shape) != len(self.dist_attr.dims_mapping): return False for i in range(len(self.dist_attr.dims_mapping)): if self.dist_attr.dims_mapping[ - i] < -1 or self.dist_attr.dims_mapping[i] >= len( - self.dist_attr.process_mesh.topology): + i + ] < -1 or self.dist_attr.dims_mapping[i] >= len( + self.dist_attr.process_mesh.topology + ): return False for i in range(len(self.dist_attr.process_mesh.topology)): if self.dist_attr.dims_mapping.count(i) > 1: @@ -228,10 +250,9 @@ class DistributedTensor: shard_sizes = self.dist_attr.shard_sizes processes = self.dist_attr.process_mesh.processes topology = self.dist_attr.process_mesh.topology - local_sizes = DistributedTensor.get_local_sizes(global_sizes, - dims_mapping, topology, - processes, rank, - shard_sizes) + local_sizes = DistributedTensor.get_local_sizes( + global_sizes, dims_mapping, topology, processes, rank, shard_sizes + ) return local_sizes @@ -247,8 +268,13 @@ class DistributedTensor: processes = self.dist_attr.process_mesh.processes topology = self.dist_attr.process_mesh.topology local_offsets = DistributedTensor.get_local_offsets( - global_sizes, dims_mapping, topology, processes, rank, - shard_sizes) + global_sizes, + dims_mapping, + topology, + processes, + rank, + shard_sizes, + ) self._local_offsets_map[rank] = local_offsets return local_offsets @@ -268,8 +294,13 @@ class DistributedTensor: processes = self.dist_attr.process_mesh.processes topology = self.dist_attr.process_mesh.topology local_shard = DistributedTensor.get_local_shard( - global_sizes, dims_mapping, topology, processes, rank, - shard_sizes) + global_sizes, + dims_mapping, + topology, + processes, + rank, + shard_sizes, + ) self._local_shard_map[rank] = local_shard return local_shard @@ -324,8 +355,9 @@ class DistributedTensor: if rank is not None and not (isinstance(rank, int) and rank >= 0): raise ValueError("The rank must >= 0, but got {}".format(rank)) if block is not None and not isinstance(block, Block): - raise TypeError("The block must be Block, but got {}.".format( - type(block))) + raise TypeError( + "The block must be Block, but got {}.".format(type(block)) + ) rank = paddle.distributed.get_rank() if rank is None else rank if block is None: @@ -350,8 +382,9 @@ class DistributedTensor: def local_tensor(self, rank=None): rank = paddle.distributed.get_rank() if rank is None else rank - assert rank in self._local_tensor_map, "The rank {} local tensor has not been created.".format( - rank) + assert ( + rank in self._local_tensor_map + ), "The rank {} local tensor has not been created.".format(rank) return self._local_tensor_map[rank] def __deepcopy__(self, memo): @@ -367,7 +400,8 @@ class DistributedTensor: def __str__(self): str = "{{tensor name: {}, tensor id: {}".format( - self.serial_tensor.desc.name(), self.serial_tensor.desc.id()) + self.serial_tensor.desc.name(), self.serial_tensor.desc.id() + ) # str += ", {}".format(self.dist_attr) # return str @@ -376,8 +410,9 @@ class DistributedTensor: annotated_str = "annotated" else: annotated_str = "non-annotated" - str += ", process_mesh ({}): {}".format(annotated_str, - self.dist_attr.process_mesh) + str += ", process_mesh ({}): {}".format( + annotated_str, self.dist_attr.process_mesh + ) str += ", is_parameter: {}".format(self.serial_tensor.is_parameter) @@ -385,8 +420,9 @@ class DistributedTensor: annotated_str = "annotated" else: annotated_str = "non-annotated" - str += ", dims_mapping ({}): {}".format(annotated_str, - self.dist_attr.dims_mapping) + str += ", dims_mapping ({}): {}".format( + annotated_str, self.dist_attr.dims_mapping + ) if self.dist_attr.is_annotated("shard_mask"): annotated_str = "annotated" diff --git a/python/paddle/distributed/auto_parallel/engine.py b/python/paddle/distributed/auto_parallel/engine.py index 1d6bce931d28efbf5077b85460f9a4f11871fdf2..1581beb2a511ce39a29b0d76bb3406380f146486 100644 --- a/python/paddle/distributed/auto_parallel/engine.py +++ b/python/paddle/distributed/auto_parallel/engine.py @@ -41,7 +41,10 @@ from .planner_v2 import Planner from .parallelizer_v2 import Parallelizer from .dist_op import DistributedOperator from .dist_saver import DistributedSaver -from .dist_loader import DistributedDataLoaderFromGenerator, DistributedDataLoader +from .dist_loader import ( + DistributedDataLoaderFromGenerator, + DistributedDataLoader, +) from .utils import to_list, get_dist_attr, get_lr from .process_group import new_process_group, get_all_process_groups from .dist_context import DistributedContext, get_default_distributed_context @@ -115,16 +118,21 @@ class Engine: """ - def __init__(self, - model=None, - loss=None, - optimizer=None, - metrics=None, - cluster=None, - strategy=None): - - if model and not isinstance(model, - paddle.nn.Layer) and not callable(model): + def __init__( + self, + model=None, + loss=None, + optimizer=None, + metrics=None, + cluster=None, + strategy=None, + ): + + if ( + model + and not isinstance(model, paddle.nn.Layer) + and not callable(model) + ): raise TypeError( "'model must be sub classes of `paddle.nn.Layer` or any callable function." ) @@ -132,18 +140,20 @@ class Engine: self._loss = loss if optimizer and not isinstance( - optimizer, - (paddle.optimizer.Optimizer, paddle.fluid.optimizer.Optimizer)): + optimizer, + (paddle.optimizer.Optimizer, paddle.fluid.optimizer.Optimizer), + ): raise TypeError( "'optimizer' must be object of class `paddle.optimizer.Optimizer`" - " or `paddle.fluid.optimizer.Optimizer`.") + " or `paddle.fluid.optimizer.Optimizer`." + ) self._optimizer = self._validate_opt(optimizer) metrics = metrics or [] for metric in to_list(metrics): - assert isinstance(metric, Metric), \ - "{} is not sub class of Metric".format( - metric.__class__.__name__) + assert isinstance( + metric, Metric + ), "{} is not sub class of Metric".format(metric.__class__.__name__) self._metrics = to_list(metrics) if cluster and not isinstance(cluster, Cluster): @@ -159,8 +169,9 @@ class Engine: self._strategy = strategy or Strategy() if os.getenv("POD_NAME"): - print("Distribute training by paddle.distributed.launch", - flush=True) + print( + "Distribute training by paddle.distributed.launch", flush=True + ) fleet.init(is_collective=True) self._executor = None @@ -185,7 +196,7 @@ class Engine: self._has_prepared_reader = { "train": False, "eval": False, - "predict": False + "predict": False, } self._inputs_spec = [] self._labels_spec = [] @@ -220,8 +231,10 @@ class Engine: labels = sample[split:] else: raise ValueError( - "Data should be a Dataset or IterableDatset, but received {}.". - format(type(data).__name__)) + "Data should be a Dataset or IterableDatset, but received {}.".format( + type(data).__name__ + ) + ) inputs = to_list(inputs) labels = to_list(labels) @@ -264,34 +277,40 @@ class Engine: labels_spec = self._validate_spec(labels_spec) return inputs_spec, labels_spec - def _prepare_data_tensor(self, - inputs_spec, - labels_spec, - inputs=None, - labels=None): + def _prepare_data_tensor( + self, inputs_spec, labels_spec, inputs=None, labels=None + ): if _non_static_mode() or self._dygraph_mode: return None, None inputs_spec = inputs_spec if inputs_spec else [] labels_spec = labels_spec if labels_spec else [] if inputs_spec: - assert isinstance(inputs_spec, list), \ - "inputs should be list, but received {}".format(type(inputs_spec)) + assert isinstance( + inputs_spec, list + ), "inputs should be list, but received {}".format( + type(inputs_spec) + ) if inputs is None: inputs = [s._create_feed_layer() for s in inputs_spec] else: - assert isinstance(inputs, list), \ - "inputs should be list, but received {}".format(type(inputs)) + assert isinstance( + inputs, list + ), "inputs should be list, but received {}".format(type(inputs)) for input_spec, input in zip(inputs_spec, inputs): if input_spec.shape != input.shape: input.desc.set_shape(input_spec.shape) if labels_spec: - assert isinstance(labels_spec, list), \ - "labels should be list, but received {}".format(type(labels_spec)) + assert isinstance( + labels_spec, list + ), "labels should be list, but received {}".format( + type(labels_spec) + ) if labels is None: labels = [s._create_feed_layer() for s in labels_spec] else: - assert isinstance(labels, list), \ - "labels should be list, but received {}".format(type(labels)) + assert isinstance( + labels, list + ), "labels should be list, but received {}".format(type(labels)) for label_spec, label in zip(labels_spec, labels): if label_spec.shape != label.shape: label.desc.set_shape(label_spec.shape) @@ -304,7 +323,9 @@ class Engine: # NOTE: this list may be changed if Paddle changes the existing rules. related_reader_ops = [ - "create_py_reader", "create_double_buffer_reader", "read" + "create_py_reader", + "create_double_buffer_reader", + "read", ] # remove the first three ops if multiple run fit/evaluate/predict if dist_main_block.ops[0].type == 'create_py_reader': @@ -322,9 +343,9 @@ class Engine: for idx in reversed(reader_op_indices): new_op_desc = dist_main_block.desc._prepend_op() new_op_desc.copy_from(dist_main_block.ops[idx].desc) - new_op = Operator(dist_main_block, - new_op_desc, - type=new_op_desc.type()) + new_op = Operator( + dist_main_block, new_op_desc, type=new_op_desc.type() + ) new_reader_ops.append(new_op) dist_op = DistributedOperator(new_op) dist_context.add_dist_op_for_program(dist_op) @@ -355,16 +376,22 @@ class Engine: else: raise ValueError("Unsupported data {}".format(data)) if user_feeds is not None: - assert isinstance(user_feeds, dict), \ - "user_feeds must be a dict, but receive {}".format(type(user_feeds).__name__) + assert isinstance( + user_feeds, dict + ), "user_feeds must be a dict, but receive {}".format( + type(user_feeds).__name__ + ) for name, data in user_feeds.items(): feeds[name] = data return feeds def _prepare_fetch(self, user_fetches, mode): if user_fetches is not None: - assert isinstance(user_fetches, list), \ - "user_fetches must be a list, but receive {}".format(type(user_fetches).__name__) + assert isinstance( + user_fetches, list + ), "user_fetches must be a list, but receive {}".format( + type(user_fetches).__name__ + ) fetch_names = [] fetch_indices = [] @@ -396,14 +423,16 @@ class Engine: _process_fetch_group("fetches", var_list) return fetch_names, fetch_indices - def _prepare_logger(self, - outs, - epoch=None, - step=None, - lr=None, - fetch_names=None, - fetch_indices=None, - mode=None): + def _prepare_logger( + self, + outs, + epoch=None, + step=None, + lr=None, + fetch_names=None, + fetch_indices=None, + mode=None, + ): logs = {} if epoch is not None: logs["epoch"] = epoch @@ -470,9 +499,9 @@ class Engine: inputs_spec = self._inputs_spec labels_spec = self._labels_spec if self._labels_spec else [] - self.program_helper = ProgramHelper(self._model, self._loss, - self._metrics, inputs_spec, - labels_spec) + self.program_helper = ProgramHelper( + self._model, self._loss, self._metrics, inputs_spec, labels_spec + ) # build forward main program self.program_helper.build_program(mode) @@ -505,8 +534,9 @@ class Engine: serial_main_prog = self._orig_main_prog.clone() serial_startup_prog = self._orig_startup_prog.clone() if not self._skip_build: - with static.program_guard(serial_main_prog, serial_startup_prog), \ - utils.unique_name.guard(): + with static.program_guard( + serial_main_prog, serial_startup_prog + ), utils.unique_name.guard(): outputs = to_list(self._model(*inputs)) if mode != "predict" and self._loss: losses = to_list(self._loss(*(outputs + labels))) @@ -515,7 +545,8 @@ class Engine: if mode != "predict" and (outputs or labels): for metric in self._metrics: metrics.append( - to_list(metric.compute(*(outputs + labels)))) + to_list(metric.compute(*(outputs + labels))) + ) else: losses = to_list(self._loss) self.losses = losses @@ -532,7 +563,7 @@ class Engine: fetch_vars = { "outputs": flatten(outputs), "loss": losses, - "metrics": metrics + "metrics": metrics, } if mode != "train": @@ -540,8 +571,15 @@ class Engine: self._set_recompute_ckpts() self._dist_contexts[mode] = DistributedContext( - serial_main_prog, serial_startup_prog, self._optimizer, losses, - feed_vars, fetch_vars, self._cluster, self._strategy) + serial_main_prog, + serial_startup_prog, + self._optimizer, + losses, + feed_vars, + fetch_vars, + self._cluster, + self._strategy, + ) self._dist_contexts[mode].gradient_scale = self._strategy.gradient_scale def _optimization_tuning(self, mode, dataset, batch_size): @@ -558,20 +596,24 @@ class Engine: dataset.dp_rank = self._dp_ranks from .tuner.optimization_tuner import OptimizationTuner - self._optimization_tuner = OptimizationTuner(self._tuning.to_dict(), - self._dist_contexts[mode], - dataset, - self._inputs_spec, - self._labels_spec, - batch_size=batch_size, - rank=self._cur_rank) + + self._optimization_tuner = OptimizationTuner( + self._tuning.to_dict(), + self._dist_contexts[mode], + dataset, + self._inputs_spec, + self._labels_spec, + batch_size=batch_size, + rank=self._cur_rank, + ) self._optimization_tuner.tune() if self._tuning.run_after_tuning: # update the strategy self._dist_contexts[ - mode]._strategy = self._optimization_tuner.get_best_config() + mode + ]._strategy = self._optimization_tuner.get_best_config() def _plan(self, mode): if self._planned_mode is None: @@ -596,7 +638,8 @@ class Engine: self._dp_ranks = [] for feed_var in feed_list: dp_world_size, dp_rank = self._get_input_split_info( - feed_var, self._dist_contexts[mode]) + feed_var, self._dist_contexts[mode] + ) self._dp_world_sizes.append(dp_world_size) self._dp_ranks.append(dp_rank) @@ -604,8 +647,9 @@ class Engine: # Parallelize program based on the planner's results # For now, the completer has to be passed to the planner, # because we may use it to complete the annotation of the backwarkward and update. - parallelizer = Parallelizer(mode, self._planners[mode].completer, - self._dist_contexts[mode]) + parallelizer = Parallelizer( + mode, self._planners[mode].completer, self._dist_contexts[mode] + ) if not all_ranks: parallelizer.parallel(self._cur_rank) else: @@ -623,22 +667,30 @@ class Engine: for ib, block in enumerate(origin_main_prog.blocks): for iop, op in enumerate(block.ops): ref_op = ref_blocks[ib].ops[iop] - assert op.type == ref_op.type, \ - "'{}' mode op '{}' is different with '{}' op '{}'. ".format(mode, op.type, ref_mode, ref_op.type) - ref_op_dist_attr = ref_dist_context.get_op_dist_attr_for_program( - ref_op) + assert ( + op.type == ref_op.type + ), "'{}' mode op '{}' is different with '{}' op '{}'. ".format( + mode, op.type, ref_mode, ref_op.type + ) + ref_op_dist_attr = ( + ref_dist_context.get_op_dist_attr_for_program(ref_op) + ) dist_context.set_op_dist_attr_for_program(op, ref_op_dist_attr) def _initialize(self, mode): # Get the current content from the distributed context self._serial_main_progs[mode] = self._dist_contexts[ - mode].serial_main_program + mode + ].serial_main_program self._serial_startup_progs[mode] = self._dist_contexts[ - mode].serial_startup_program + mode + ].serial_startup_program self._dist_main_progs[mode] = self._dist_contexts[ - mode].dist_main_programs + mode + ].dist_main_programs self._dist_startup_progs[mode] = self._dist_contexts[ - mode].dist_startup_programs + mode + ].dist_startup_programs self._feed_vars[mode] = self._dist_contexts[mode].serial_feed_vars self._fetch_vars[mode] = self._dist_contexts[mode].serial_fetch_vars self._optimizer = self._dist_contexts[mode]._serial_optimizer @@ -685,30 +737,33 @@ class Engine: self._executor.run(prune_startup_prog) if hasattr(self, "_state_dict") and hasattr(self, "_dist_attr"): - self._set_state_dict(mode, self._strict, self._state_dict, - self._dist_attr) + self._set_state_dict( + mode, self._strict, self._state_dict, self._dist_attr + ) if self._strategy.reinit: self._logger.info("NOTE: parameters will be re-initialized.") dist_startup_prog = self._dist_startup_progs[mode][self._cur_rank] self._executor.run(dist_startup_prog) - def fit(self, - train_data, - train_sample_split=None, - batch_size=1, - epochs=1, - steps_per_epoch=None, - log_freq=10, - save_dir=None, - save_freq=1, - valid_data=None, - valid_sample_split=None, - valid_freq=1, - valid_steps=None, - collate_fn=None, - callbacks=None, - verbose=2): + def fit( + self, + train_data, + train_sample_split=None, + batch_size=1, + epochs=1, + steps_per_epoch=None, + log_freq=10, + save_dir=None, + save_freq=1, + valid_data=None, + valid_sample_split=None, + valid_freq=1, + valid_steps=None, + collate_fn=None, + callbacks=None, + verbose=2, + ): """ Trains the model for a fixed number of epochs. If `valid_data` is set, evaluation will be done at the end of each epoch. @@ -777,16 +832,19 @@ class Engine: """ self._mode = 'train' self._inputs_spec, self._labels_spec = self._prepare_data_spec( - train_data, train_sample_split, batch_size) + train_data, train_sample_split, batch_size + ) self._inputs, self._labels = self._prepare_data_tensor( - self._inputs_spec, self._labels_spec) + self._inputs_spec, self._labels_spec + ) if not self._has_prepared[self._mode]: self._prepare_program(self._mode) else: self._switch_mode(self._mode) - assert self._mode in self._dist_main_progs, \ - "train model is not ready, please call `engine._prepare_program('train')` first." + assert ( + self._mode in self._dist_main_progs + ), "train model is not ready, please call `engine._prepare_program('train')` first." train_dataloader = self._prepare_dataloader_from_generator( dataset=train_data, @@ -795,7 +853,8 @@ class Engine: batch_size=batch_size, epochs=epochs, steps_per_epoch=steps_per_epoch, - collate_fn=collate_fn) + collate_fn=collate_fn, + ) fetch_names, fetch_indices = self._prepare_fetch(None, mode=self._mode) @@ -824,21 +883,35 @@ class Engine: self.main_program, fetch_list=fetch_names, use_program_cache=self._strategy.use_cache, - return_numpy=self._strategy.return_numpy) + return_numpy=self._strategy.return_numpy, + ) except core.EOFException: break lr = get_lr(self._optimizer) - logs = self._prepare_logger(outs, epoch, step, lr, fetch_names, - fetch_indices, self._mode) + logs = self._prepare_logger( + outs, + epoch, + step, + lr, + fetch_names, + fetch_indices, + self._mode, + ) cbks.on_batch_end('train', step, logs) if valid_data and (epoch + 1) % valid_freq == 0: - val_logs = self.evaluate(valid_data, valid_sample_split, - batch_size, valid_steps, log_freq, - collate_fn, callbacks, verbose) + val_logs = self.evaluate( + valid_data, + valid_sample_split, + batch_size, + valid_steps, + log_freq, + collate_fn, + callbacks, + verbose, + ) val_logs = { - "val_" + name: val - for name, val in val_logs.items() + "val_" + name: val for name, val in val_logs.items() } logs.update(val_logs) self._switch_mode("train") @@ -850,15 +923,17 @@ class Engine: cbks.on_end('train', logs) return self.history - def evaluate(self, - valid_data, - valid_sample_split=None, - batch_size=1, - steps=None, - log_freq=10, - collate_fn=None, - callbacks=None, - verbose=2): + def evaluate( + self, + valid_data, + valid_sample_split=None, + batch_size=1, + steps=None, + log_freq=10, + collate_fn=None, + callbacks=None, + verbose=2, + ): """ Evaluate the loss and metrics of the model on evaluation data. @@ -907,23 +982,27 @@ class Engine: """ self._mode = 'eval' self._inputs_spec, self._labels_spec = self._prepare_data_spec( - valid_data, valid_sample_split, batch_size) + valid_data, valid_sample_split, batch_size + ) self._inputs, self._labels = self._prepare_data_tensor( - self._inputs_spec, self._labels_spec) + self._inputs_spec, self._labels_spec + ) if not self._has_prepared[self._mode]: self._prepare_program(self._mode) else: self._switch_mode(self._mode) - assert self._mode in self._dist_main_progs, \ - "eval model is not ready, please call `engine._prepare_program('eval')` first." + assert ( + self._mode in self._dist_main_progs + ), "eval model is not ready, please call `engine._prepare_program('eval')` first." valid_dataloader = self._prepare_dataloader_from_generator( dataset=valid_data, capacity=70, iterable=False, batch_size=batch_size, steps_per_epoch=steps, - collate_fn=collate_fn) + collate_fn=collate_fn, + ) fetch_names, fetch_indices = self._prepare_fetch(None, mode=self._mode) @@ -937,10 +1016,9 @@ class Engine: ) eval_steps = valid_dataloader._steps - cbks.on_begin('eval', { - 'steps': eval_steps, - 'metrics': self._metrics_name() - }) + cbks.on_begin( + 'eval', {'steps': eval_steps, 'metrics': self._metrics_name()} + ) logs = {} for step, _ in enumerate(valid_dataloader): cbks.on_batch_begin('eval', step, logs) @@ -949,24 +1027,28 @@ class Engine: self.main_program, fetch_list=fetch_names, use_program_cache=self._strategy.use_cache, - return_numpy=self._strategy.return_numpy) + return_numpy=self._strategy.return_numpy, + ) except core.EOFException: break - logs = self._prepare_logger(outs, None, step, None, fetch_names, - fetch_indices, self._mode) + logs = self._prepare_logger( + outs, None, step, None, fetch_names, fetch_indices, self._mode + ) cbks.on_batch_end('eval', step, logs) cbks.on_end('eval', logs) self._reset_metrics() return logs - def predict(self, - test_data, - test_sample_split=None, - batch_size=1, - steps=None, - collate_fn=None, - callbacks=None, - verbose=2): + def predict( + self, + test_data, + test_sample_split=None, + batch_size=1, + steps=None, + collate_fn=None, + callbacks=None, + verbose=2, + ): """ Compute the output predictions on testing data. @@ -1012,16 +1094,19 @@ class Engine: """ self._mode = 'predict' self._inputs_spec, self._labels_spec = self._prepare_data_spec( - test_data, test_sample_split, batch_size) + test_data, test_sample_split, batch_size + ) self._inputs, self._labels = self._prepare_data_tensor( - self._inputs_spec, self._labels_spec) + self._inputs_spec, self._labels_spec + ) if not self._has_prepared[self._mode]: self._prepare_program(self._mode) else: self._switch_mode(self._mode) - assert self._mode in self._dist_main_progs, \ - "predict model is not ready, please call `engine._prepare_program('predict')` first." + assert ( + self._mode in self._dist_main_progs + ), "predict model is not ready, please call `engine._prepare_program('predict')` first." test_dataloader = self._prepare_dataloader_from_generator( dataset=test_data, @@ -1029,7 +1114,8 @@ class Engine: iterable=False, batch_size=batch_size, steps_per_epoch=steps, - collate_fn=collate_fn) + collate_fn=collate_fn, + ) fetch_names, fetch_indices = self._prepare_fetch(None, mode=self._mode) @@ -1045,37 +1131,43 @@ class Engine: self.main_program, fetch_list=fetch_names, use_program_cache=self._strategy.use_cache, - return_numpy=self._strategy.return_numpy) + return_numpy=self._strategy.return_numpy, + ) except core.EOFException: break - logs = self._prepare_logger(outs, None, step, None, fetch_names, - fetch_indices, self._mode) + logs = self._prepare_logger( + outs, None, step, None, fetch_names, fetch_indices, self._mode + ) cbks.on_batch_end('predict', step, logs) outputs.append(list(logs["outputs"].values())) cbks.on_end('predict', logs) return outputs - def dataloader(self, - dataset, - batch_size=1, - shuffle=False, - drop_last=False, - collate_fn=None, - num_workers=0, - use_buffer_reader=True, - use_shared_memory=True, - timeout=0, - worker_init_fn=None, - epochs=1, - steps_per_epoch=None, - sample_split=1, - mode=None): + def dataloader( + self, + dataset, + batch_size=1, + shuffle=False, + drop_last=False, + collate_fn=None, + num_workers=0, + use_buffer_reader=True, + use_shared_memory=True, + timeout=0, + worker_init_fn=None, + epochs=1, + steps_per_epoch=None, + sample_split=1, + mode=None, + ): if mode is not None: self.to_mode(mode) self._inputs_spec, self._labels_spec = self._prepare_data_spec( - dataset, sample_split, batch_size) + dataset, sample_split, batch_size + ) self._inputs, self._labels = self._prepare_data_tensor( - self._inputs_spec, self._labels_spec) + self._inputs_spec, self._labels_spec + ) if not self._has_prepared[self._mode]: self._prepare_program(self._mode) else: @@ -1093,28 +1185,33 @@ class Engine: timeout=timeout, worker_init_fn=worker_init_fn, epochs=epochs, - steps_per_epoch=steps_per_epoch) + steps_per_epoch=steps_per_epoch, + ) return dataloader - def dataloader_from_generator(self, - dataset, - capacity=70, - use_double_buffer=True, - iterable=True, - use_multiprocess=False, - drop_last=True, - batch_size=1, - epochs=1, - steps_per_epoch=None, - collate_fn=None, - sample_split=1, - mode=None): + def dataloader_from_generator( + self, + dataset, + capacity=70, + use_double_buffer=True, + iterable=True, + use_multiprocess=False, + drop_last=True, + batch_size=1, + epochs=1, + steps_per_epoch=None, + collate_fn=None, + sample_split=1, + mode=None, + ): if mode is not None: self.to_mode(mode) self._inputs_spec, self._labels_spec = self._prepare_data_spec( - dataset, sample_split, batch_size) + dataset, sample_split, batch_size + ) self._inputs, self._labels = self._prepare_data_tensor( - self._inputs_spec, self._labels_spec) + self._inputs_spec, self._labels_spec + ) if not self._has_prepared[self._mode]: self._prepare_program(self._mode) else: @@ -1130,17 +1227,20 @@ class Engine: batch_size=batch_size, epochs=epochs, steps_per_epoch=steps_per_epoch, - collate_fn=collate_fn) + collate_fn=collate_fn, + ) return dataloader - def prepare(self, - inputs_spec=None, - labels_spec=None, - inputs=None, - labels=None, - main_program=None, - startup_program=None, - mode=None): + def prepare( + self, + inputs_spec=None, + labels_spec=None, + inputs=None, + labels=None, + main_program=None, + startup_program=None, + mode=None, + ): if mode is not None: self.to_mode(mode) if inputs or labels: @@ -1148,7 +1248,8 @@ class Engine: self._inputs_spec = inputs_spec self._labels_spec = labels_spec self._inputs, self._labels = self._prepare_data_tensor( - self._inputs_spec, self._labels_spec, inputs, labels) + self._inputs_spec, self._labels_spec, inputs, labels + ) self._orig_main_prog = main_program if self._orig_main_prog is None: self._orig_main_prog = static.default_main_program() @@ -1164,7 +1265,8 @@ class Engine: self._labels_spec = labels_spec self._outside_dataloader = True self._inputs, self._labels = self._prepare_data_tensor( - self._inputs_spec, self._labels_spec) + self._inputs_spec, self._labels_spec + ) self._orig_main_prog = main_program if self._orig_main_prog is None: self._orig_main_prog = static.default_main_program() @@ -1176,44 +1278,55 @@ class Engine: else: self._switch_mode(self._mode) else: - assert self._inputs_spec and self._labels_spec, \ - "Please call the dataloader(...) before calling prepare(...)" + assert ( + self._inputs_spec and self._labels_spec + ), "Please call the dataloader(...) before calling prepare(...)" def run(self, data=None, feed=None, fetch_list=None, mode=None): if mode is not None: self.to_mode(mode) feed_dict = self._prepare_feed(data, feed, self._mode) fetch_names, fetch_indices = self._prepare_fetch(fetch_list, self._mode) - if self._outside_dataloader and not self._has_prepared_reader[ - self._mode]: + if ( + self._outside_dataloader + and not self._has_prepared_reader[self._mode] + ): self._prepare_reader() - outs = self._executor.run(self.main_program, - feed=feed_dict, - fetch_list=fetch_names, - use_program_cache=self._strategy.use_cache, - return_numpy=self._strategy.return_numpy) - logs = self._prepare_logger(outs, None, None, None, fetch_names, - fetch_indices, self._mode) + outs = self._executor.run( + self.main_program, + feed=feed_dict, + fetch_list=fetch_names, + use_program_cache=self._strategy.use_cache, + return_numpy=self._strategy.return_numpy, + ) + logs = self._prepare_logger( + outs, None, None, None, fetch_names, fetch_indices, self._mode + ) return logs - def _prepare_dataloader(self, - dataset, - return_list=True, - batch_size=1, - shuffle=False, - drop_last=False, - collate_fn=None, - num_workers=0, - use_buffer_reader=True, - use_shared_memory=True, - timeout=0, - worker_init_fn=None, - epochs=1, - steps_per_epoch=None): + def _prepare_dataloader( + self, + dataset, + return_list=True, + batch_size=1, + shuffle=False, + drop_last=False, + collate_fn=None, + num_workers=0, + use_buffer_reader=True, + use_shared_memory=True, + timeout=0, + worker_init_fn=None, + epochs=1, + steps_per_epoch=None, + ): if self._strategy.gradient_merge and batch_size is not None: - assert batch_size % self._k_steps == 0, \ - "Requires batch_size:[{}] to be divisible by k_steps:[{}].".format(batch_size, self._k_steps) + assert ( + batch_size % self._k_steps == 0 + ), "Requires batch_size:[{}] to be divisible by k_steps:[{}].".format( + batch_size, self._k_steps + ) batch_size //= self._k_steps dist_main_prog = self._dist_main_progs[self._mode][self._cur_rank] @@ -1257,26 +1370,32 @@ class Engine: steps_per_epoch=steps_per_epoch, split_data=self._strategy.split_data, data_parallel_world_size=self._dp_world_sizes, - data_parallel_rank=self._dp_ranks) + data_parallel_rank=self._dp_ranks, + ) return dataloader - def _prepare_dataloader_from_generator(self, - dataset, - capacity=None, - use_double_buffer=True, - iterable=True, - return_list=False, - use_multiprocess=False, - drop_last=True, - batch_size=1, - epochs=1, - steps_per_epoch=None, - collate_fn=None): + def _prepare_dataloader_from_generator( + self, + dataset, + capacity=None, + use_double_buffer=True, + iterable=True, + return_list=False, + use_multiprocess=False, + drop_last=True, + batch_size=1, + epochs=1, + steps_per_epoch=None, + collate_fn=None, + ): if self._strategy.gradient_merge and batch_size is not None: - assert batch_size % self._k_steps == 0, \ - "Requires batch_size:[{}] to be divisible by k_steps:[{}].".format(batch_size, self._k_steps) + assert ( + batch_size % self._k_steps == 0 + ), "Requires batch_size:[{}] to be divisible by k_steps:[{}].".format( + batch_size, self._k_steps + ) batch_size //= self._k_steps dist_main_prog = self._dist_main_progs[self._mode][self._cur_rank] @@ -1317,16 +1436,19 @@ class Engine: collate_fn=collate_fn, split_data=self._strategy.split_data, data_parallel_world_size=self._dp_world_sizes, - data_parallel_rank=self._dp_ranks) + data_parallel_rank=self._dp_ranks, + ) self._prepare_reader() return dataloader def _tune(self, tune_data, tune_sample_split=None, batch_size=1): self._mode = 'train' self._inputs_spec, self._labels_spec = self._prepare_data_spec( - tune_data, tune_sample_split, batch_size) + tune_data, tune_sample_split, batch_size + ) self._inputs, self._labels = self._prepare_data_tensor( - self._inputs_spec, self._labels_spec) + self._inputs_spec, self._labels_spec + ) self._optimization_tuning(self._mode, tune_data, batch_size) def _validate_spec(self, specs): @@ -1337,12 +1459,17 @@ class Engine: assert isinstance(spec, InputSpec) if spec.name is None: raise ValueError( - "Requires Input[{}].name != None, but receive `None` with {}." - .format(i, spec)) + "Requires Input[{}].name != None, but receive `None` with {}.".format( + i, spec + ) + ) if self._k_steps > 1: shape = list(spec.shape) - assert shape[0] % self._k_steps == 0, \ - "Requires batch_size[{}] to be divisible by k_steps[{}].".format(spec.shape[0], self._k_steps) + assert ( + shape[0] % self._k_steps == 0 + ), "Requires batch_size[{}] to be divisible by k_steps[{}].".format( + spec.shape[0], self._k_steps + ) shape[0] //= self._k_steps spec.shape = shape return specs @@ -1360,16 +1487,20 @@ class Engine: dims_mapping = tensor_dist_attr.dims_mapping if self._cur_rank not in process_mesh.processes: - rank_id = _get_corresponding_rank(dist_context, process_mesh, - self._cur_rank) + rank_id = _get_corresponding_rank( + dist_context, process_mesh, self._cur_rank + ) else: rank_id = self._cur_rank batch_size_axis = dims_mapping[0] if batch_size_axis > -1 and process_mesh.topology[batch_size_axis] > 1: - group_ranks = _get_comm_group(process_mesh.processes, - process_mesh.topology, - batch_size_axis, rank_id) + group_ranks = _get_comm_group( + process_mesh.processes, + process_mesh.topology, + batch_size_axis, + rank_id, + ) return len(group_ranks), group_ranks.index(rank_id) return 1, 0 @@ -1382,10 +1513,12 @@ class Engine: # extract ckpts by specific model if isinstance(self._model, paddle.nn.Layer): - if hasattr(self._model, - "gpt") and self._model.__class__.__name__ in [ - 'GPTForPretraining', 'GPTForPretrainingAuto' - ]: + if hasattr( + self._model, "gpt" + ) and self._model.__class__.__name__ in [ + 'GPTForPretraining', + 'GPTForPretrainingAuto', + ]: exact_ckpts = self._model.gpt.checkpoints else: exact_ckpts = recompute.checkpoints @@ -1397,7 +1530,7 @@ class Engine: recompute.checkpoints = exact_ckpts[:] logs = { 'Model Class': self._model.__class__.__name__, - 'Applied Recompute ckpts': exact_ckpts + 'Applied Recompute ckpts': exact_ckpts, } self._logger.info(logs) @@ -1422,8 +1555,11 @@ class Engine: self._optimizer = self._dist_contexts[mode]._serial_optimizer def to_mode(self, mode): - assert mode in ["train", "eval", "predict"], \ - "mode {} should be one of ['train', 'eval', 'predict']".format(mode) + assert mode in [ + "train", + "eval", + "predict", + ], "mode {} should be one of ['train', 'eval', 'predict']".format(mode) self._mode = mode def _set_state_dict(self, mode, strict, state_dict, dist_attr): @@ -1484,20 +1620,24 @@ class Engine: serial_program = self._serial_main_progs[self._mode] dist_main_prog = self._dist_main_progs[self._mode][self._cur_rank] dist_context = self._dist_contexts[self._mode] - self._saver.save(path, - serial_program=serial_program, - dist_main_program=dist_main_prog, - dist_context=dist_context) + self._saver.save( + path, + serial_program=serial_program, + dist_main_program=dist_main_prog, + dist_context=dist_context, + ) else: assert "predict" in self._dist_main_progs feed_vars = self._feed_vars["predict"]['inputs'] fetch_vars = self._fetch_vars["predict"]['outputs'] dist_main_prog = self._dist_main_progs["predict"][self._cur_rank] - self._saver.save_inference_model(path, - feed_vars, - fetch_vars, - self._executor, - program=dist_main_prog) + self._saver.save_inference_model( + path, + feed_vars, + fetch_vars, + self._executor, + program=dist_main_prog, + ) def load(self, path, strict=True, load_optimizer=True): """ @@ -1547,7 +1687,8 @@ class Engine: """ self._strict = strict self._state_dict, self._dist_attr = self._saver.load( - path, load_optimizer) + path, load_optimizer + ) return self._state_dict, self._dist_attr def cost(self, inputs_spec=None, labels_spec=None, mode="train"): @@ -1577,14 +1718,18 @@ class Engine: # Check mode accepted_modes = ["train", "predict", "eval"] if mode not in accepted_modes: - raise ValueError("The mode {} is not in accepted modes {}".format( - mode, accepted_modes)) + raise ValueError( + "The mode {} is not in accepted modes {}".format( + mode, accepted_modes + ) + ) self.to_mode(mode) if inputs_spec is not None: self._inputs_spec, self._labels_spec = inputs_spec, labels_spec self._inputs, self._labels = self._prepare_data_tensor( - self._inputs_spec, self._labels_spec) + self._inputs_spec, self._labels_spec + ) self._build(mode) self._plan(mode) else: diff --git a/python/paddle/distributed/auto_parallel/graph.py b/python/paddle/distributed/auto_parallel/graph.py index de6505071abfe1501b72bbc4591c684b77fe1a4e..d671d95aa9f4ada3f3f753e7e1ad4fa0510d7305 100644 --- a/python/paddle/distributed/auto_parallel/graph.py +++ b/python/paddle/distributed/auto_parallel/graph.py @@ -14,7 +14,6 @@ class Node: - def __init__(self, id, **attrs): # Each node must has a unique id self._id = id @@ -48,7 +47,6 @@ class Node: class Edge: - def __init__(self, src_id, tgt_id, **attrs): # The id of source node in an Edge self._src_id = src_id @@ -85,12 +83,12 @@ class Edge: def __str__(self): str = "" str += "(src_id: {}, tgt_id: {}, attrs: {})".format( - self.src_id, self.tgt_id, self._attrs) + self.src_id, self.tgt_id, self._attrs + ) return str class Graph: - def __init__(self, **attrs): # _nodes is dict for storing the nodes of the graph. # The key of this dict is the node id. diff --git a/python/paddle/distributed/auto_parallel/helper.py b/python/paddle/distributed/auto_parallel/helper.py index 3173f51e249ab358715eb00bd417e838a70373e7..f73668a2ba95c1f04828a9ef19ce58a990b90b05 100644 --- a/python/paddle/distributed/auto_parallel/helper.py +++ b/python/paddle/distributed/auto_parallel/helper.py @@ -20,7 +20,9 @@ from paddle.jit import to_static, not_to_static from paddle.fluid.framework import Parameter from paddle.fluid.framework import program_guard from paddle.fluid.executor import global_scope -from paddle.fluid.dygraph.dygraph_to_static.program_translator import StaticFunction +from paddle.fluid.dygraph.dygraph_to_static.program_translator import ( + StaticFunction, +) from .utils import to_list from .utils import get_logger @@ -174,7 +176,6 @@ class ProxyLayer(Layer): class BuildInfo: - def __init__(self): self.clear() @@ -224,8 +225,9 @@ class ProgramHelper(object): # skip if we has already built program. if self.build_info.has_cache(mode, True): self._logger.info( - "Already build program with mode = %s, use cached program." % - mode) + "Already build program with mode = %s, use cached program." + % mode + ) return self._logger.info("start to build program for mode = %s." % mode) @@ -249,21 +251,24 @@ class ProgramHelper(object): self.lazy_init = True return for param in self.concrete_program.parameters: - Parameter(name=param.name, - desc=param, - type=param.type, - shape=param.shape, - dtype=param.dtype, - stop_gradient=param.stop_gradient, - block=self.startup_program.global_block()) + Parameter( + name=param.name, + desc=param, + type=param.type, + shape=param.shape, + dtype=param.dtype, + stop_gradient=param.stop_gradient, + block=self.startup_program.global_block(), + ) def apply_optimizer(self, optimizer): """ Append backward and generate optimizer operations. """ self._verify_optimizer(optimizer) - self._logger.info("start to apply optimizer: %s ", - type(optimizer).__name__) + self._logger.info( + "start to apply optimizer: %s ", type(optimizer).__name__ + ) # clear optimizer parameters original_params = optimizer._parameter_list optimizer._parameter_list = None @@ -276,13 +281,17 @@ class ProgramHelper(object): def _verify_optimizer(self, optimizer): assert optimizer is not None - assert hasattr(optimizer, - "minimize"), "Optimizer must have minimize() method." - assert self.proxy_layer.mode == 'train', "Required mode == 'train', but received '%s'" % self.proxy_layer.mode - assert len( - self.loss_vars - ) == 1, "Required len(loss_vars) == 1, but received len(loss_vars) = %s" % len( - self.loss_vars) + assert hasattr( + optimizer, "minimize" + ), "Optimizer must have minimize() method." + assert self.proxy_layer.mode == 'train', ( + "Required mode == 'train', but received '%s'" + % self.proxy_layer.mode + ) + assert len(self.loss_vars) == 1, ( + "Required len(loss_vars) == 1, but received len(loss_vars) = %s" + % len(self.loss_vars) + ) def to(self, mode): """ @@ -291,7 +300,8 @@ class ProgramHelper(object): assert mode in ['train', 'eval', 'predict'] func = getattr(self.proxy_layer, '_' + mode) assert isinstance( - func, StaticFunction), "Please call build_program(mode) firstly." + func, StaticFunction + ), "Please call build_program(mode) firstly." self.proxy_layer.set_mode(mode) def static_func(self): @@ -299,7 +309,9 @@ class ProgramHelper(object): Return StaticFunction instance with underly target mode. """ assert self.proxy_layer.mode in [ - 'train', 'eval', 'predict' + 'train', + 'eval', + 'predict', ], "Please call build_program(mode) firstly." func_name = '_' + self.proxy_layer.mode return getattr(self.proxy_layer, func_name) @@ -317,13 +329,14 @@ class ProgramHelper(object): dist_attr = { "dims_mapping": var_dist_attr.dims_mapping, "process_shape": var_dist_attr.process_mesh.topology, - "process_group": var_dist_attr.process_mesh.processes + "process_group": var_dist_attr.process_mesh.processes, } # slice param_value with dist_attr # share sliced_param_value with param_tensor in global_scope param_tensor = global_scope().var(param.name).get_tensor() sliced_param = Converter.slice_with_dist_attr( - param.numpy(), dist_attr) + param.numpy(), dist_attr + ) param_tensor.set(sliced_param, place) @property diff --git a/python/paddle/distributed/auto_parallel/interface.py b/python/paddle/distributed/auto_parallel/interface.py index 0abdc0ea76893c37190613df9d84b2754274eea9..124f622d40f049f224d3405379b76d625e664f33 100644 --- a/python/paddle/distributed/auto_parallel/interface.py +++ b/python/paddle/distributed/auto_parallel/interface.py @@ -63,29 +63,39 @@ def shard_tensor(x, process_mesh=None, shard_spec=None): """ if process_mesh is not None: - assert isinstance(process_mesh, ProcessMesh), \ - "Argument process_mesh {} is not an instance of ProcessMesh".format(process_mesh) + assert isinstance( + process_mesh, ProcessMesh + ), "Argument process_mesh {} is not an instance of ProcessMesh".format( + process_mesh + ) else: process_mesh = get_current_process_mesh() - assert process_mesh is not None, \ - "Specify the process mesh argument or use ProcessMesh context manager first." - assert isinstance(shard_spec, list), \ - "Argument shard_spec {} is not an instance of list".format(shard_spec) + assert ( + process_mesh is not None + ), "Specify the process mesh argument or use ProcessMesh context manager first." + assert isinstance( + shard_spec, list + ), "Argument shard_spec {} is not an instance of list".format(shard_spec) dist_tensor = DistributedTensor(x) serial_tensor = dist_tensor.serial_tensor dist_tensor.dist_attr.process_mesh = process_mesh - if serial_tensor.type == core.VarDesc.VarType.READER \ - or serial_tensor.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY \ - or serial_tensor.type == core.VarDesc.VarType.STEP_SCOPES: + if ( + serial_tensor.type == core.VarDesc.VarType.READER + or serial_tensor.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY + or serial_tensor.type == core.VarDesc.VarType.STEP_SCOPES + ): tensor_shape = [] else: tensor_shape = serial_tensor.shape if shard_spec is not None: - assert verify_shard_spec(shard_spec, tensor_shape, process_mesh), \ - "For tensor {}, shard_spec {} is invalid with tensor_shape {} and process_mesh {}.".format( - serial_tensor.name, shard_spec, tensor_shape, process_mesh) + assert verify_shard_spec( + shard_spec, tensor_shape, process_mesh + ), "For tensor {}, shard_spec {} is invalid with tensor_shape {} and process_mesh {}.".format( + serial_tensor.name, shard_spec, tensor_shape, process_mesh + ) dist_tensor.dist_attr.dims_mapping = convert_to_dims_mapping( - shard_spec, process_mesh) + shard_spec, process_mesh + ) if process_mesh is not None: dist_tensor.dist_attr.mark_annotated("process_mesh") if shard_spec is not None: @@ -140,41 +150,54 @@ def shard_op(op, process_mesh=None, in_shard_specs=None, out_shard_specs=None): """ if process_mesh is not None: - assert isinstance(process_mesh, ProcessMesh), \ - "Argument process_mesh {} is not an instance of ProcessMesh".format(process_mesh) + assert isinstance( + process_mesh, ProcessMesh + ), "Argument process_mesh {} is not an instance of ProcessMesh".format( + process_mesh + ) else: process_mesh = get_current_process_mesh() - assert process_mesh is not None, \ - "Specify the process mesh argument or use ProcessMesh context manager first." + assert ( + process_mesh is not None + ), "Specify the process mesh argument or use ProcessMesh context manager first." in_dims_mappings = [] if in_shard_specs is not None: - assert all((isinstance(shard_spec, list) or shard_spec is None) for shard_spec in in_shard_specs), \ - "in_shard_spec {} is not a list of list or None".format(in_shard_specs) + assert all( + (isinstance(shard_spec, list) or shard_spec is None) + for shard_spec in in_shard_specs + ), "in_shard_spec {} is not a list of list or None".format( + in_shard_specs + ) for shard_spec in in_shard_specs: if shard_spec is not None: in_dims_mappings.append( - convert_to_dims_mapping(shard_spec, process_mesh)) + convert_to_dims_mapping(shard_spec, process_mesh) + ) else: in_dims_mappings.append(None) out_dims_mappings = [] if out_shard_specs is not None: - assert all((isinstance(shard_spec, list) or shard_spec is None) for shard_spec in out_shard_specs), \ - "out_shard_spec {} is not a list of list or None".format(out_shard_specs) + assert all( + (isinstance(shard_spec, list) or shard_spec is None) + for shard_spec in out_shard_specs + ), "out_shard_spec {} is not a list of list or None".format( + out_shard_specs + ) for shard_spec in out_shard_specs: if shard_spec is not None: out_dims_mappings.append( - convert_to_dims_mapping(shard_spec, process_mesh)) + convert_to_dims_mapping(shard_spec, process_mesh) + ) else: out_dims_mappings.append(None) - op = DistributedOperatorHelper(op, process_mesh, in_dims_mappings, - out_dims_mappings) + op = DistributedOperatorHelper( + op, process_mesh, in_dims_mappings, out_dims_mappings + ) return op def recompute(op): - class RecomputeOperator: - def __init__(self, op): self._op = op @@ -215,11 +238,13 @@ def add_to_collection(collection_name, value, name=None): _g_collections[collection_name] = [] if name is not None: for _, v in _g_collections[collection_name]: - if v == value: return + if v == value: + return _g_collections[collection_name].append((name, value)) else: for _, v in _g_collections[collection_name]: - if v == value: return + if v == value: + return _g_collections[collection_name].append((None, value)) diff --git a/python/paddle/distributed/auto_parallel/mapper.py b/python/paddle/distributed/auto_parallel/mapper.py index f8c0792c580f099d7652d1b6814ad96ba4fb058c..d7b599aaa1c44173e97800f8035db769f49e3fb3 100644 --- a/python/paddle/distributed/auto_parallel/mapper.py +++ b/python/paddle/distributed/auto_parallel/mapper.py @@ -24,9 +24,16 @@ from .process_group import get_process_group def is_collective_comm_op(op): comm_list = [ - "c_allreduce_sum", "c_allreduce_min", "c_allreduce_max", - "c_allreduce_prod", "c_reduce_sum", "c_reduce_min", "c_reduce_max", - "c_reduce_prod", "c_broadcast", "c_allgather" + "c_allreduce_sum", + "c_allreduce_min", + "c_allreduce_max", + "c_allreduce_prod", + "c_reduce_sum", + "c_reduce_min", + "c_reduce_max", + "c_reduce_prod", + "c_broadcast", + "c_allgather", ] if op.type in comm_list: return True @@ -127,7 +134,8 @@ def analyze_comm_requirements_from_op(op, rank, g_process_group_map): if comm_volume is not None: comm_requirements_to_ranks[tgt_rank] = {} comm_requirements_to_ranks[tgt_rank][ - "comm_volume"] = comm_volume + "comm_volume" + ] = comm_volume elif is_p2p_comm_op(op): tgt_rank = op.attr("peer") comm_volume = get_comm_volume(op, rank, tgt_rank) @@ -149,28 +157,33 @@ def analyze_requirements_for_program(src_info, rank): for block in program.blocks: for op in block.ops: cur_comm_requirements_to_ranks = analyze_comm_requirements_from_op( - op, rank, g_process_group_map) + op, rank, g_process_group_map + ) for tgt_rank, link_info in cur_comm_requirements_to_ranks.items(): if tgt_rank in comm_requirements_to_ranks: comm_requirements_to_ranks[tgt_rank][ - "comm_volume"] += link_info["comm_volume"] + "comm_volume" + ] += link_info["comm_volume"] else: comm_requirements_to_ranks[tgt_rank] = {} comm_requirements_to_ranks[tgt_rank][ - "comm_volume"] = link_info["comm_volume"] + "comm_volume" + ] = link_info["comm_volume"] return resource_requirements, comm_requirements_to_ranks def build_process_graph(distributed_program): graph = Graph() for src_rank, src_info in distributed_program.items(): - resource_requirements, comm_requirements_to_ranks = analyze_requirements_for_program( - src_info, src_rank) + ( + resource_requirements, + comm_requirements_to_ranks, + ) = analyze_requirements_for_program(src_info, src_rank) graph.add_node(src_rank, resource_requirements=resource_requirements) for tgt_rank, comm_requirements in comm_requirements_to_ranks.items(): - graph.add_edge(src_rank, - tgt_rank, - comm_requirements=comm_requirements) + graph.add_edge( + src_rank, tgt_rank, comm_requirements=comm_requirements + ) return graph @@ -185,14 +198,17 @@ def build_cluster_graph(cluster): for machine in cluster.machines.values(): for device in machine.devices.values(): graph.add_node(device.global_id, device=device) - if cuda_visible_devices and device.local_id not in cuda_visible_devices: + if ( + cuda_visible_devices + and device.local_id not in cuda_visible_devices + ): graph.nodes[device.global_id]["occupied"] = True else: graph.nodes[device.global_id]["occupied"] = False for link in machine.links.values(): - graph.add_edge(link.source.global_id, - link.target.global_id, - link=link) + graph.add_edge( + link.source.global_id, link.target.global_id, link=link + ) return graph @@ -222,7 +238,8 @@ def mapping(distributed_program, cluster): queue = deque() root_rank_node = select_unvisited_rank_node( - list(process_graph.nodes.values())) + list(process_graph.nodes.values()) + ) while root_rank_node is not None: queue.append(root_rank_node) while queue: @@ -232,48 +249,61 @@ def mapping(distributed_program, cluster): device_type = cur_rank_node["resource_requirements"]["device_type"] cur_device_node = None for device_node in cluster_graph.nodes.values(): - if (device_node["device"].type - == device_type) and (not device_node["occupied"]): + if (device_node["device"].type == device_type) and ( + not device_node["occupied"] + ): device_node["occupied"] = True cur_rank_node["visited"] = True cur_rank_node["device"] = device_node["device"] cur_device_node = device_node break - assert cur_device_node, "Cannot find a device to satisfy the requirement." + assert ( + cur_device_node + ), "Cannot find a device to satisfy the requirement." nbr_rank_edges = [] for nbr_rank_node_id, nbr_rank_edge in process_graph.adjs[ - cur_rank_node.id].items(): - assert nbr_rank_edge.src_id == cur_rank_node.id and nbr_rank_edge.tgt_id == nbr_rank_node_id + cur_rank_node.id + ].items(): + assert ( + nbr_rank_edge.src_id == cur_rank_node.id + and nbr_rank_edge.tgt_id == nbr_rank_node_id + ) queue.append(process_graph.nodes[nbr_rank_node_id]) nbr_rank_edges.append(nbr_rank_edge) nbr_rank_edges.sort(key=sort_by_comm_volume) nbr_device_edges = [] for nbr_device_edge in cluster_graph.adjs[ - cur_device_node.id].values(): + cur_device_node.id + ].values(): nbr_device_edges.append(nbr_device_edge) nbr_device_edges.sort(key=sort_by_comm_bandwidth) for nbr_rank_edge in nbr_rank_edges: - src_rank_node = process_graph.nodes[ - nbr_rank_edge.src_id]["visited"] + src_rank_node = process_graph.nodes[nbr_rank_edge.src_id][ + "visited" + ] if src_rank_node: continue device_type = src_rank_node["resource_requirements"][ - "device_type"] + "device_type" + ] nbr_rank_node = process_graph.nodes[nbr_rank_edge.tgt_id] for nbr_device_edge in nbr_device_edges: nbr_device_node = cluster_graph.nodes[ - nbr_device_edge.tgt_id] + nbr_device_edge.tgt_id + ] if (nbr_device_node["device"].type == device_type) and ( - not nbr_device_node["occupied"]): + not nbr_device_node["occupied"] + ): nbr_device_node["occupied"] = True nbr_rank_node["visited"] = True nbr_rank_node["device"] = nbr_device_node["device"] break root_rank_node = select_unvisited_rank_node( - list(process_graph.nodes.values())) + list(process_graph.nodes.values()) + ) rank_mapping = {} for rank, rank_node in process_graph.nodes.items(): diff --git a/python/paddle/distributed/auto_parallel/operators/common.py b/python/paddle/distributed/auto_parallel/operators/common.py index 7bd51d3f98671a9be6b920b3084f7648b3a445bc..af9c53a88ea869cb99e0327f8b83df681c7727f9 100644 --- a/python/paddle/distributed/auto_parallel/operators/common.py +++ b/python/paddle/distributed/auto_parallel/operators/common.py @@ -21,16 +21,22 @@ from ..process_group import new_process_group _g_distributed_operator_impl_containers = {} _g_elementwise_ops = [ - "elementwise", "gelu", "dropout", "cast", "gather", "concat", - "fused_softmax_mask_upper_triangle" + "elementwise", + "gelu", + "dropout", + "cast", + "gather", + "concat", + "fused_softmax_mask_upper_triangle", ] BACKWARD_ONLY_DIST_OPS = {'check_finite_and_unscale', 'update_loss_scaling'} -class ParallelMode(): +class ParallelMode: """ the parallel mode for communication or auxiliary operator """ + DataParallel = "auto_parallel/data_parallel" ModelParallel = "auto_parallel/model_parallel" PipelineParalel = "auto_parallel/pipeline_paralel" @@ -46,7 +52,6 @@ def is_elementwise_op(op_type): class DistributedOperatorImplContainer: - def __init__(self, op_type): self._type = op_type self._impls = [] @@ -64,8 +69,9 @@ class DistributedOperatorImplContainer: return self._impls def register_impl(self, dist_impl): - assert self.type == dist_impl.type, \ - "Op type of container must be same as that of the implementation." + assert ( + self.type == dist_impl.type + ), "Op type of container must be same as that of the implementation." impl_idx = len(self.impls) dist_impl.idx = impl_idx self._impls.append(dist_impl) @@ -96,7 +102,6 @@ class DistributedOperatorImplContainer: class DistributedOperatorImpl(abc.ABC): - def __init__(self, name): self._name = name self._type = None @@ -181,54 +186,69 @@ def find_compatible_distributed_operator_impls(dist_op, fwd=True, partial=True): op_type = dist_op.serial_op.type dist_op_impl_container = get_distributed_operator_impl_container(op_type) dist_op_eltwise_impl_container = get_distributed_operator_impl_container( - "elementwise") + "elementwise" + ) dist_op_default_impl_container = get_distributed_operator_impl_container( - "default") + "default" + ) compatible_impls = [] if partial: if fwd: # First, find impls in the corresponding container if dist_op_impl_container: compatible_impls.extend( - dist_op_impl_container.get_input_compatible_impls(dist_op)) + dist_op_impl_container.get_input_compatible_impls(dist_op) + ) # Second, find impls in the elementwise container if dist_op_eltwise_impl_container and is_elementwise_op(op_type): compatible_impls.extend( dist_op_eltwise_impl_container.get_input_compatible_impls( - dist_op)) + dist_op + ) + ) # Third, find impls in the default container if dist_op_default_impl_container: compatible_impls.extend( dist_op_default_impl_container.get_input_compatible_impls( - dist_op)) + dist_op + ) + ) else: # First, find impls in the corresponding container if dist_op_impl_container: compatible_impls.extend( - dist_op_impl_container.get_output_compatible_impls(dist_op)) + dist_op_impl_container.get_output_compatible_impls(dist_op) + ) # Second, find impls in the elementwise container if dist_op_eltwise_impl_container and is_elementwise_op(op_type): compatible_impls.extend( dist_op_eltwise_impl_container.get_output_compatible_impls( - dist_op)) + dist_op + ) + ) # Third, find impls in the default container if dist_op_default_impl_container: compatible_impls.extend( dist_op_default_impl_container.get_output_compatible_impls( - dist_op)) + dist_op + ) + ) else: # First, find impls in the corresponding container if dist_op_impl_container: compatible_impls.extend( - dist_op_impl_container.get_compatible_impls(dist_op)) + dist_op_impl_container.get_compatible_impls(dist_op) + ) # Second, find impls in the elementwise container if dist_op_eltwise_impl_container and is_elementwise_op(op_type): compatible_impls.extend( - dist_op_eltwise_impl_container.get_compatible_impls(dist_op)) + dist_op_eltwise_impl_container.get_compatible_impls(dist_op) + ) # Third, find impls in the default container if dist_op_default_impl_container: compatible_impls.extend( - dist_op_default_impl_container.get_compatible_impls(dist_op)) + dist_op_default_impl_container.get_compatible_impls(dist_op) + ) if compatible_impls: # For now, just return the first compatible impl @@ -241,11 +261,11 @@ def find_compatible_distributed_operator_impls(dist_op, fwd=True, partial=True): def is_parameter_related(varname, block): if ".subprog_" in varname: - varname = varname[:varname.index(".subprog_")] + varname = varname[: varname.index(".subprog_")] if ".cast_fp" in varname: - varname = varname[:varname.index(".cast_fp")] + varname = varname[: varname.index(".cast_fp")] if ".quantized" in varname: - varname = varname[:varname.index(".quantized")] + varname = varname[: varname.index(".quantized")] assert block.has_var(varname) var = block.var(varname) return var.is_parameter @@ -277,8 +297,9 @@ def infer_shape(block, src_var, src_var_dist_attr, op_input_dist_attr): return exact_shape -def set_comm_op_dist_attr_for_program(new_op, process_mesh, tensor_dist_attr, - ctx): +def set_comm_op_dist_attr_for_program( + new_op, process_mesh, tensor_dist_attr, ctx +): assert process_mesh is not None assert tensor_dist_attr is not None @@ -303,9 +324,11 @@ def naive_copy_op_dist_attr_for_program(new_op, ref_op, ctx): assert len(new_op.input(input_name)) == 1 ref_tensor_dist_attr = ref_dist_attr.get_input_dist_attr( - ref_op.input(input_name)[0]) + ref_op.input(input_name)[0] + ) new_op_dist_attr.set_input_dist_attr( - new_op.input(input_name)[0], ref_tensor_dist_attr) + new_op.input(input_name)[0], ref_tensor_dist_attr + ) for output_name in ref_op.output_names: assert output_name in new_op.output_names @@ -313,9 +336,11 @@ def naive_copy_op_dist_attr_for_program(new_op, ref_op, ctx): assert len(new_op.output(output_name)) == 1 ref_tensor_dist_attr = ref_dist_attr.get_output_dist_attr( - ref_op.output(output_name)[0]) + ref_op.output(output_name)[0] + ) new_op_dist_attr.set_output_dist_attr( - new_op.output(output_name)[0], ref_tensor_dist_attr) + new_op.output(output_name)[0], ref_tensor_dist_attr + ) ctx.set_op_dist_attr_for_program(new_op, new_op_dist_attr) @@ -348,9 +373,12 @@ def get_data_parallel_group(dist_ctx, op, act_grad_names, rank): batch_size_axis = var_dim_mapping[0] if len(var_dim_mapping) > 0 else -1 if batch_size_axis > -1 and mesh_shape[batch_size_axis] > 1: - group_ranks = _get_comm_group(process_mesh.processes, - process_mesh.topology, - batch_size_axis, rank) + group_ranks = _get_comm_group( + process_mesh.processes, + process_mesh.topology, + batch_size_axis, + rank, + ) dp_group = new_process_group(group_ranks) break @@ -377,33 +405,39 @@ def sync_and_scale_gradients(dist_ctx, op, dp_group, allreduce_var_names): for var_name in allreduce_var_names: added_ops = [] grad_var = main_block.var(var_name) - allreduce_op = main_block.append_op(type='c_allreduce_sum', - inputs={'X': [grad_var]}, - outputs={'Out': [grad_var]}, - attrs={ - 'ring_id': dp_group.id, - 'use_calc_stream': True, - OP_ROLE_KEY: OpRole.Backward - }) - allreduce_op._set_attr('op_namescope', - str('/') + ParallelMode.DataParallel) + allreduce_op = main_block.append_op( + type='c_allreduce_sum', + inputs={'X': [grad_var]}, + outputs={'Out': [grad_var]}, + attrs={ + 'ring_id': dp_group.id, + 'use_calc_stream': True, + OP_ROLE_KEY: OpRole.Backward, + }, + ) + allreduce_op._set_attr( + 'op_namescope', str('/') + ParallelMode.DataParallel + ) added_ops.append(allreduce_op) if dist_ctx.gradient_scale: - scale_op = main_block.append_op(type='scale', - inputs={'X': grad_var}, - outputs={'Out': grad_var}, - attrs={ - 'scale': 1.0 / dp_degree, - OP_ROLE_KEY: OpRole.Backward - }) - scale_op._set_attr('op_namescope', - str('/') + ParallelMode.DataParallel) + scale_op = main_block.append_op( + type='scale', + inputs={'X': grad_var}, + outputs={'Out': grad_var}, + attrs={'scale': 1.0 / dp_degree, OP_ROLE_KEY: OpRole.Backward}, + ) + scale_op._set_attr( + 'op_namescope', str('/') + ParallelMode.DataParallel + ) added_ops.append(scale_op) dims_mapping = op_dist_attr.get_output_dims_mapping(grad_var.name) - assert dims_mapping is not None, "Unexception: dims_mapping of output [{}] of op [{}] is None".format( - grad_var.name, op_dist_attr.op_type) + assert ( + dims_mapping is not None + ), "Unexception: dims_mapping of output [{}] of op [{}] is None".format( + grad_var.name, op_dist_attr.op_type + ) # NOTE auxiliary op's dist attr should follow dist_op not dist_tensor for new_op in added_ops: new_op_attr = OperatorDistributedAttribute() @@ -413,8 +447,9 @@ def sync_and_scale_gradients(dist_ctx, op, dp_group, allreduce_var_names): dist_ctx.set_op_dist_attr_for_program(new_op, new_op_attr) -def gradient_synchronization(dist_ctx, op, act_grad_names, out_grad_names, - rank): +def gradient_synchronization( + dist_ctx, op, act_grad_names, out_grad_names, rank +): """ conduct the allreudce and scaling(dp size)for gradients of model parameters for operator in data parallelism. @@ -430,8 +465,11 @@ def gradient_synchronization(dist_ctx, op, act_grad_names, out_grad_names, if not is_in_backward_phase(dist_ctx): return - if is_optimize_op(op) or len(act_grad_names) == 0 or len( - out_grad_names) == 0: + if ( + is_optimize_op(op) + or len(act_grad_names) == 0 + or len(out_grad_names) == 0 + ): return dp_group = get_data_parallel_group(dist_ctx, op, act_grad_names, rank) @@ -443,13 +481,19 @@ def gradient_synchronization(dist_ctx, op, act_grad_names, out_grad_names, def is_data_parallel_scale_op(op): - return op.type == "scale" and op.desc.has_attr("op_namescope") \ - and ParallelMode.DataParallel in op.desc.attr("op_namescope") + return ( + op.type == "scale" + and op.desc.has_attr("op_namescope") + and ParallelMode.DataParallel in op.desc.attr("op_namescope") + ) def is_data_parallel_reduce_op(op): - return op.type in ["c_reduce_sum", "c_allreduce_sum"] and op.desc.has_attr("op_namescope") \ - and ParallelMode.DataParallel in op.desc.attr("op_namescope") + return ( + op.type in ["c_reduce_sum", "c_allreduce_sum"] + and op.desc.has_attr("op_namescope") + and ParallelMode.DataParallel in op.desc.attr("op_namescope") + ) def is_in_backward_phase(dist_ctx): diff --git a/python/paddle/distributed/auto_parallel/operators/dist_assign.py b/python/paddle/distributed/auto_parallel/operators/dist_assign.py index 96923f461a73d3bbf3f83b502035cb1ea831f721..15382977aabb77e7b5dbc4188dda938e45212a7d 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_assign.py +++ b/python/paddle/distributed/auto_parallel/operators/dist_assign.py @@ -21,7 +21,6 @@ from ..utils import compute_compatible_and_update_dim_mapping class DistributedAssign(DistributedOperatorImplContainer): - def __init__(self, op_type): super(DistributedAssign, self).__init__(op_type) @@ -30,7 +29,6 @@ register_distributed_operator_impl_container(DistributedAssign("assign")) class DistributedAssignImpl(DistributedOperatorImpl): - def __init__(self, name): super(DistributedAssignImpl, self).__init__(name) self._forward_implemented = True @@ -43,8 +41,9 @@ class DistributedAssignImpl(DistributedOperatorImpl): return True def is_auto_compatible(self, dist_op): - if (not self.is_input_compatible(dist_op)) or \ - (not self.is_output_compatible(dist_op)): + if (not self.is_input_compatible(dist_op)) or ( + not self.is_output_compatible(dist_op) + ): return False op_desc = dist_op.serial_op.desc @@ -70,7 +69,8 @@ class DistributedAssignImpl(DistributedOperatorImpl): for i in range(len(x_dims_mapping)): dim_changed = compute_compatible_and_update_dim_mapping( - [x_dims_mapping, out_dims_mapping], [i, i]) + [x_dims_mapping, out_dims_mapping], [i, i] + ) if dim_changed: changed = True diff --git a/python/paddle/distributed/auto_parallel/operators/dist_check_finite_and_unscale.py b/python/paddle/distributed/auto_parallel/operators/dist_check_finite_and_unscale.py index 72a4eda103dbda9e0d8e6ac3c9f4f786371662fb..c99cfe21f72790a4c8e638374952fe247f936b21 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_check_finite_and_unscale.py +++ b/python/paddle/distributed/auto_parallel/operators/dist_check_finite_and_unscale.py @@ -22,23 +22,24 @@ from ..utils import set_var_dist_attr from ..utils import set_dist_op_desc_original_id from ..process_group import new_process_group from ..dist_attribute import OperatorDistributedAttribute -from paddle.distributed.auto_parallel.process_group import get_world_process_group +from paddle.distributed.auto_parallel.process_group import ( + get_world_process_group, +) world_process_group = get_world_process_group() class DistributedCheckFiniteAndUnscale(DistributedOperatorImplContainer): - def __init__(self, op_type): super(DistributedCheckFiniteAndUnscale, self).__init__(op_type) register_distributed_operator_impl_container( - DistributedCheckFiniteAndUnscale("check_finite_and_unscale")) + DistributedCheckFiniteAndUnscale("check_finite_and_unscale") +) class DistributedCheckFiniteAndUnscaleImpl(DistributedOperatorImpl): - def __init__(self, name): super(DistributedCheckFiniteAndUnscaleImpl, self).__init__(name) self._name = name @@ -80,8 +81,11 @@ class DistributedCheckFiniteAndUnscaleImpl(DistributedOperatorImpl): backward_op = dist_op_context.cur_src_op rank_id = dist_op_context.rank_id dist_attr = ctx.get_op_dist_attr_for_program(backward_op) - assert dist_attr is not None, "backward op [{}] don't have dist attribute !".format( - str(backward_op)) + assert ( + dist_attr is not None + ), "backward op [{}] don't have dist attribute !".format( + str(backward_op) + ) assert rank_id in dist_attr.process_mesh.processes @@ -89,25 +93,33 @@ class DistributedCheckFiniteAndUnscaleImpl(DistributedOperatorImpl): assert 'Scale' in kwargs, "input [{}] is not given".format('Scale') assert 'Out' in kwargs, "input [{}] is not given".format('Out') assert 'FoundInfinite' in kwargs, "output [{}] is not given".format( - 'FoundInfinite') + 'FoundInfinite' + ) - assert len( + assert ( + len(kwargs['Scale']) == 1 + ), "check_finite_and_unscale input Scale take 1 variable but got {}".format( kwargs['Scale'] - ) == 1, "check_finite_and_unscale input Scale take 1 variable but got {}".format( - kwargs['Scale']) - assert len( + ) + assert ( + len(kwargs['FoundInfinite']) == 1 + ), "check_finite_and_unscale input FoundInfinite take 1 variable but got {}".format( kwargs['FoundInfinite'] - ) == 1, "check_finite_and_unscale input FoundInfinite take 1 variable but got {}".format( - kwargs['FoundInfinite']) + ) assert len(kwargs['X']) == len( kwargs['Out'] ), "check_finite_and_unscale got [{}] X and [{}] Out, which are supposed to be equal".format( - len(kwargs['X']), len(kwargs['Out'])) + len(kwargs['X']), len(kwargs['Out']) + ) filter_vars = [] for varname in kwargs['X']: - if rank_id in ctx.get_tensor_dist_attr_for_program( - main_block.var(varname)).process_mesh.processes: + if ( + rank_id + in ctx.get_tensor_dist_attr_for_program( + main_block.var(varname) + ).process_mesh.processes + ): filter_vars.append(varname) # replicate op in dist program @@ -121,55 +133,70 @@ class DistributedCheckFiniteAndUnscaleImpl(DistributedOperatorImpl): group = new_process_group(world_process_group.ranks) inf_var = main_block.var(kwargs['FoundInfinite'][0]) - inf_var_int32 = main_block.create_var(name=inf_var.name + "@cast_int32", - shape=inf_var.shape, - dtype=core.VarDesc.VarType.INT32) + inf_var_int32 = main_block.create_var( + name=inf_var.name + "@cast_int32", + shape=inf_var.shape, + dtype=core.VarDesc.VarType.INT32, + ) set_var_dist_attr( - ctx, inf_var_int32, + ctx, + inf_var_int32, ctx.get_tensor_dist_attr_for_program(inf_var).dims_mapping, - ctx.get_tensor_dist_attr_for_program(inf_var).process_mesh) - cast_op1 = main_block.append_op(type='cast', - inputs={'X': inf_var}, - outputs={'Out': inf_var_int32}, - attrs={ - "in_dtype": inf_var.dtype, - "out_dtype": inf_var_int32.dtype, - OP_ROLE_KEY: OpRole.Optimize - }) - allreduce_op = main_block.append_op(type='c_allreduce_max', - inputs={'X': inf_var_int32}, - outputs={'Out': inf_var_int32}, - attrs={ - 'ring_id': group.id, - 'use_calc_stream': True, - OP_ROLE_KEY: OpRole.Optimize - }) - cast_op2 = main_block.append_op(type='cast', - inputs={'X': inf_var_int32}, - outputs={'Out': inf_var}, - attrs={ - "in_dtype": inf_var_int32.dtype, - "out_dtype": inf_var.dtype, - OP_ROLE_KEY: OpRole.Optimize - }) + ctx.get_tensor_dist_attr_for_program(inf_var).process_mesh, + ) + cast_op1 = main_block.append_op( + type='cast', + inputs={'X': inf_var}, + outputs={'Out': inf_var_int32}, + attrs={ + "in_dtype": inf_var.dtype, + "out_dtype": inf_var_int32.dtype, + OP_ROLE_KEY: OpRole.Optimize, + }, + ) + allreduce_op = main_block.append_op( + type='c_allreduce_max', + inputs={'X': inf_var_int32}, + outputs={'Out': inf_var_int32}, + attrs={ + 'ring_id': group.id, + 'use_calc_stream': True, + OP_ROLE_KEY: OpRole.Optimize, + }, + ) + cast_op2 = main_block.append_op( + type='cast', + inputs={'X': inf_var_int32}, + outputs={'Out': inf_var}, + attrs={ + "in_dtype": inf_var_int32.dtype, + "out_dtype": inf_var.dtype, + OP_ROLE_KEY: OpRole.Optimize, + }, + ) for op in [cast_op1, allreduce_op, cast_op2]: new_op_dist_attr = OperatorDistributedAttribute() for varname in op.input_arg_names: var_dist_attr = ctx.get_tensor_dist_attr_for_program( - main_block.var(varname)) + main_block.var(varname) + ) assert var_dist_attr is not None new_op_dist_attr.set_input_dims_mapping( - varname, var_dist_attr.dims_mapping) + varname, var_dist_attr.dims_mapping + ) for varname in op.output_arg_names: var_dist_attr = ctx.get_tensor_dist_attr_for_program( - main_block.var(varname)) + main_block.var(varname) + ) new_op_dist_attr.set_output_dims_mapping( - varname, var_dist_attr.dims_mapping) + varname, var_dist_attr.dims_mapping + ) new_op_dist_attr.process_mesh = var_dist_attr.process_mesh ctx.set_op_dist_attr_for_program(op, new_op_dist_attr) register_distributed_operator_impl( "check_finite_and_unscale", - DistributedCheckFiniteAndUnscaleImpl("check_finite_and_unscale")) + DistributedCheckFiniteAndUnscaleImpl("check_finite_and_unscale"), +) diff --git a/python/paddle/distributed/auto_parallel/operators/dist_default.py b/python/paddle/distributed/auto_parallel/operators/dist_default.py index 9c1e8b0487180cf04cc7133d02a70b0cd3e59151..36b9b8b2ee45a8752cf2d0cdd73444ac9544223b 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_default.py +++ b/python/paddle/distributed/auto_parallel/operators/dist_default.py @@ -38,35 +38,41 @@ def prim_operator_data_parallel_functor(ctx, src_op): var_name = src_op.output_arg_names[0] if var_name in ctx.grads_params: - assert var_name not in ctx.synced_gradient, "in primtive mode, grad is already {} synced".format( - var_name) + assert ( + var_name not in ctx.synced_gradient + ), "in primtive mode, grad is already {} synced".format(var_name) ctx.synced_gradient.add(var_name) sync_group = new_process_group(ctx.data_parallel_group) - allreduce_op = main_block.append_op(type='c_allreduce_sum', - inputs={'X': [var_name]}, - outputs={'Out': [var_name]}, - attrs={ - 'ring_id': sync_group.id, - 'use_calc_stream': True, - OP_ROLE_KEY: OpRole.Backward - }) + allreduce_op = main_block.append_op( + type='c_allreduce_sum', + inputs={'X': [var_name]}, + outputs={'Out': [var_name]}, + attrs={ + 'ring_id': sync_group.id, + 'use_calc_stream': True, + OP_ROLE_KEY: OpRole.Backward, + }, + ) param = ctx.grads_params[var_name] startup_block = dist_op_context.startup_block - new_op = startup_block.append_op(type='c_broadcast', - inputs={'X': [param]}, - outputs={'Out': [param]}, - attrs={ - 'ring_id': sync_group.id, - 'root': 0, - 'use_calc_stream': True, - OP_ROLE_KEY: OpRole.Forward - }) + new_op = startup_block.append_op( + type='c_broadcast', + inputs={'X': [param]}, + outputs={'Out': [param]}, + attrs={ + 'ring_id': sync_group.id, + 'root': 0, + 'use_calc_stream': True, + OP_ROLE_KEY: OpRole.Forward, + }, + ) grad_var = main_block.var(var_name) dims_mapping = ctx.get_tensor_dist_attr_for_program( - grad_var).dims_mapping + grad_var + ).dims_mapping dist_attr = ctx.get_op_dist_attr_for_program(src_op) process_mesh = dist_attr.process_mesh op_attr = OperatorDistributedAttribute() @@ -79,7 +85,6 @@ def prim_operator_data_parallel_functor(ctx, src_op): class DistributedDefault(DistributedOperatorImplContainer): - def __init__(self, op_type): super(DistributedDefault, self).__init__(op_type) @@ -89,7 +94,6 @@ register_distributed_operator_impl_container(DistributedDefault("default")) # Replicated Default class DistributedDefaultImpl0(DistributedOperatorImpl): - def __init__(self, name): super(DistributedDefaultImpl0, self).__init__(name) self._forward_implemented = True @@ -107,13 +111,14 @@ class DistributedDefaultImpl0(DistributedOperatorImpl): def calc_fwd_cost(self, dist_op, ctx, cluster): # calc comp op cost - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) processes = dist_op.dist_attr.process_mesh.processes op_type = dist_op.serial_op.type - cost_mapping = build_comp_costs_from_descs(_g_op_cost_factory[op_type], - ctx, processes, desc_mapping, - cluster) + cost_mapping = build_comp_costs_from_descs( + _g_op_cost_factory[op_type], ctx, processes, desc_mapping, cluster + ) res_cost = [cost_mapping] return res_cost @@ -121,16 +126,17 @@ class DistributedDefaultImpl0(DistributedOperatorImpl): def calc_bwd_cost(self, dist_op, ctx, cluster): # calc comp op cost res = [] - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) dist_attr = dist_op.dist_attr process_mesh = dist_attr.process_mesh processes = process_mesh.processes backward_op = dist_op.serial_op op_type = backward_op.type - cost_mapping = build_comp_costs_from_descs(_g_op_cost_factory[op_type], - ctx, processes, desc_mapping, - cluster) + cost_mapping = build_comp_costs_from_descs( + _g_op_cost_factory[op_type], ctx, processes, desc_mapping, cluster + ) res.append(cost_mapping) main_block = backward_op.block @@ -139,7 +145,8 @@ class DistributedDefaultImpl0(DistributedOperatorImpl): for input_name in backward_op.desc.input_names(): for varname in backward_op.desc.input(input_name): if "@GRAD" not in varname and not is_parameter_related( - varname, main_block): + varname, main_block + ): var_dim_mapping = dist_attr.get_input_dims_mapping(varname) mesh_shape = process_mesh.topology batch_size_axis = var_dim_mapping[0] @@ -151,16 +158,25 @@ class DistributedDefaultImpl0(DistributedOperatorImpl): for input_name in backward_op.desc.input_names(): for varname in backward_op.desc.input(input_name): if "@GRAD" not in varname and is_parameter_related( - varname, main_block): + varname, main_block + ): var_dim_mapping = dist_attr.get_input_dims_mapping( - varname) + varname + ) mesh_shape = process_mesh.topology batch_size_axis = var_dim_mapping[0] parallel_axis = batch_size_axis attrs = {"use_calc_stream": True} var_names = [varname + "@GRAD"] - build_dp_costs(res, dist_op, ctx, var_names, attrs, - parallel_axis, cluster) + build_dp_costs( + res, + dist_op, + ctx, + var_names, + attrs, + parallel_axis, + cluster, + ) return res def is_input_compatible(self, dist_op): @@ -304,8 +320,10 @@ class DistributedDefaultImpl0(DistributedOperatorImpl): batch_dim_mappings.append(dims_mapping[1]) # Check batch dim mapping compatibility - if not all(batch_dim_mappings[0] == dim_mapping - for dim_mapping in batch_dim_mappings): + if not all( + batch_dim_mappings[0] == dim_mapping + for dim_mapping in batch_dim_mappings + ): return False return True @@ -342,7 +360,8 @@ class DistributedDefaultImpl0(DistributedOperatorImpl): for arg_name in op_desc.output_arg_names(): if op_desc.type() == 'fill_any_like': input_tensor = dist_op.get_serial_input( - op_desc.input_arg_names()[0]) + op_desc.input_arg_names()[0] + ) if input_tensor.is_parameter: continue serial_tensor = dist_op.get_serial_output(arg_name) @@ -359,7 +378,8 @@ class DistributedDefaultImpl0(DistributedOperatorImpl): return changed compatible_dim_mapping = compute_compatible_dim_mapping( - batch_dim_mappings) + batch_dim_mappings + ) if compatible_dim_mapping is None: return False @@ -369,19 +389,24 @@ class DistributedDefaultImpl0(DistributedOperatorImpl): continue dims_mapping = op_dist_attr.get_input_dims_mapping(arg_name) if arg_name not in input_xshape_arg_names: - if len(dims_mapping) >= 1 and \ - compatible_dim_mapping != dims_mapping[0]: + if ( + len(dims_mapping) >= 1 + and compatible_dim_mapping != dims_mapping[0] + ): dims_mapping[0] = compatible_dim_mapping changed = True else: - if len(dims_mapping) >= 2 and \ - compatible_dim_mapping != dims_mapping[1]: + if ( + len(dims_mapping) >= 2 + and compatible_dim_mapping != dims_mapping[1] + ): dims_mapping[1] = compatible_dim_mapping changed = True for arg_name in op_desc.output_arg_names(): if op_desc.type() == 'fill_any_like': input_tensor = dist_op.get_serial_input( - op_desc.input_arg_names()[0]) + op_desc.input_arg_names()[0] + ) if input_tensor.is_parameter: continue if op_desc.type() in ["shape", "slice"]: @@ -391,13 +416,17 @@ class DistributedDefaultImpl0(DistributedOperatorImpl): continue dims_mapping = op_dist_attr.get_output_dims_mapping(arg_name) if arg_name not in output_xshape_arg_names: - if len(dims_mapping - ) >= 1 and compatible_dim_mapping != dims_mapping[0]: + if ( + len(dims_mapping) >= 1 + and compatible_dim_mapping != dims_mapping[0] + ): dims_mapping[0] = compatible_dim_mapping changed = True else: - if len(dims_mapping - ) >= 2 and compatible_dim_mapping != dims_mapping[1]: + if ( + len(dims_mapping) >= 2 + and compatible_dim_mapping != dims_mapping[1] + ): dims_mapping[1] = compatible_dim_mapping changed = True @@ -414,17 +443,20 @@ class DistributedDefaultImpl0(DistributedOperatorImpl): # check validation of inputs / outputs for input_name in src_op.desc.input_names(): assert input_name in kwargs, "input [{}] is not given".format( - input_name) + input_name + ) assert len(kwargs[input_name]) == len( src_op.desc.input(input_name) ), "number of tensor for input [{}] is not match".format(input_name) for output_name in src_op.desc.output_names(): assert output_name in kwargs, "input [{}] is not given".format( - output_name) + output_name + ) assert len(kwargs[output_name]) == len( src_op.desc.output(output_name) ), "number of tensor for input [{}] is not match".format( - output_name) + output_name + ) # replicate op in dist program dist_op_desc = main_block.append_op(type='nop').desc @@ -437,6 +469,7 @@ class DistributedDefaultImpl0(DistributedOperatorImpl): # data parallel synchronization for primtive operators from paddle.incubate.autograd import prim_enabled + if prim_enabled(): assert is_prim_op(src_op) prim_operator_data_parallel_functor(ctx, src_op) @@ -447,9 +480,11 @@ class DistributedDefaultImpl0(DistributedOperatorImpl): return for varname in dist_op_desc.input_arg_names(): - if startup_block.has_var(varname) and startup_block.var( - varname - ).is_parameter and varname not in dist_op_context.already_init_sync_vars: + if ( + startup_block.has_var(varname) + and startup_block.var(varname).is_parameter + and varname not in dist_op_context.already_init_sync_vars + ): dist_op_context.already_init_sync_vars.add(varname) param = startup_block.var(varname) param_dist_attr = ctx.get_tensor_dist_attr_for_program(param) @@ -458,38 +493,41 @@ class DistributedDefaultImpl0(DistributedOperatorImpl): # FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism if rank_id not in process_mesh.processes: - rank_id = _get_corresponding_rank(ctx, process_mesh, - rank_id) + rank_id = _get_corresponding_rank( + ctx, process_mesh, rank_id + ) # NOTE all not splited axis should be presented in mesh for axis, size in enumerate(process_mesh.topology): if size <= 1 or axis in dims_mapping: pass else: - group_ranks = _get_comm_group(process_mesh.processes, - process_mesh.topology, - axis, rank_id) + group_ranks = _get_comm_group( + process_mesh.processes, + process_mesh.topology, + axis, + rank_id, + ) sync_group = new_process_group(group_ranks) - new_op = startup_block.append_op(type='c_broadcast', - inputs={'X': param}, - outputs={'Out': param}, - attrs={ - 'ring_id': - sync_group.id, - 'root': - 0, - 'use_calc_stream': - True, - OP_ROLE_KEY: - OpRole.Forward - }) + new_op = startup_block.append_op( + type='c_broadcast', + inputs={'X': param}, + outputs={'Out': param}, + attrs={ + 'ring_id': sync_group.id, + 'root': 0, + 'use_calc_stream': True, + OP_ROLE_KEY: OpRole.Forward, + }, + ) # set distributed attribute op_attr = OperatorDistributedAttribute() op_attr.process_mesh = process_mesh - op_attr.set_output_dims_mapping(param.name, - dims_mapping) + op_attr.set_output_dims_mapping( + param.name, dims_mapping + ) op_attr.set_input_dims_mapping(param.name, dims_mapping) ctx.set_op_dist_attr_for_program(new_op, op_attr) @@ -501,24 +539,30 @@ class DistributedDefaultImpl0(DistributedOperatorImpl): main_block = dist_op_context.work_block backward_op = dist_op_context.cur_src_op dist_attr = ctx.get_op_dist_attr_for_program(backward_op) - assert dist_attr is not None, "backward op [{}] don't have dist attribute !".format( - str(backward_op)) + assert ( + dist_attr is not None + ), "backward op [{}] don't have dist attribute !".format( + str(backward_op) + ) rank_id = dist_op_context.rank_id # check validation of inputs / outputs for input_name in backward_op.desc.input_names(): assert input_name in kwargs, "input [{}] is not given".format( - input_name) + input_name + ) assert len(kwargs[input_name]) == len( backward_op.desc.input(input_name) ), "number of tensor for input [{}] is not match".format(input_name) for output_name in backward_op.desc.output_names(): assert output_name in kwargs, "input [{}] is not given".format( - output_name) + output_name + ) assert len(kwargs[output_name]) == len( backward_op.desc.output(output_name) ), "number of tensor for input [{}] is not match".format( - output_name) + output_name + ) # replicate op in dist program dist_op_desc = main_block.append_op(type='nop').desc @@ -535,7 +579,8 @@ class DistributedDefaultImpl0(DistributedOperatorImpl): for input_name in backward_op.desc.input_names(): for varname in backward_op.desc.input(input_name): if "@GRAD" not in varname and not is_parameter_related( - varname, main_block): + varname, main_block + ): act_grad_names.append(varname) out_grad_names = [] @@ -548,9 +593,11 @@ class DistributedDefaultImpl0(DistributedOperatorImpl): if is_parameter_related(fwd_name, main_block): out_grad_names.append(varname) - gradient_synchronization(ctx, backward_op, act_grad_names, - out_grad_names, rank_id) + gradient_synchronization( + ctx, backward_op, act_grad_names, out_grad_names, rank_id + ) register_distributed_operator_impl( - "default", DistributedDefaultImpl0("replicate_parallel")) + "default", DistributedDefaultImpl0("replicate_parallel") +) diff --git a/python/paddle/distributed/auto_parallel/operators/dist_eltwise.py b/python/paddle/distributed/auto_parallel/operators/dist_eltwise.py index 1dc163c0af44b1186cf0a1c108b77975605fe6a9..c9f5eb80515552e84cc837fd41e21491d0f8bc88 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_eltwise.py +++ b/python/paddle/distributed/auto_parallel/operators/dist_eltwise.py @@ -27,18 +27,17 @@ from ..cost import build_comp_costs_from_descs class DistributedElementwise(DistributedOperatorImplContainer): - def __init__(self, op_type): super(DistributedElementwise, self).__init__(op_type) register_distributed_operator_impl_container( - DistributedElementwise("elementwise")) + DistributedElementwise("elementwise") +) # Replicated Elementwise class DistributedElementwiseImpl0(DistributedOperatorImpl): - def __init__(self, name): super(DistributedElementwiseImpl0, self).__init__(name) self._forward_implemented = False @@ -56,13 +55,14 @@ class DistributedElementwiseImpl0(DistributedOperatorImpl): def calc_fwd_cost(self, dist_op, ctx, cluster): # calc comp op cost - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) processes = dist_op.dist_attr.process_mesh.processes op_type = dist_op.serial_op.type - cost_mapping = build_comp_costs_from_descs(_g_op_cost_factory[op_type], - ctx, processes, desc_mapping, - cluster) + cost_mapping = build_comp_costs_from_descs( + _g_op_cost_factory[op_type], ctx, processes, desc_mapping, cluster + ) res_cost = [cost_mapping] return res_cost @@ -70,16 +70,17 @@ class DistributedElementwiseImpl0(DistributedOperatorImpl): def calc_bwd_cost(self, dist_op, ctx, cluster): # calc comp op cost res = [] - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) dist_attr = dist_op.dist_attr process_mesh = dist_attr.process_mesh processes = process_mesh.processes backward_op = dist_op.serial_op op_type = backward_op.type - cost_mapping = build_comp_costs_from_descs(_g_op_cost_factory[op_type], - ctx, processes, desc_mapping, - cluster) + cost_mapping = build_comp_costs_from_descs( + _g_op_cost_factory[op_type], ctx, processes, desc_mapping, cluster + ) res.append(cost_mapping) main_block = backward_op.block @@ -88,7 +89,8 @@ class DistributedElementwiseImpl0(DistributedOperatorImpl): for input_name in backward_op.desc.input_names(): for varname in backward_op.desc.input(input_name): if "@GRAD" not in varname and not is_parameter_related( - varname, main_block): + varname, main_block + ): var_dim_mapping = dist_attr.get_input_dims_mapping(varname) mesh_shape = process_mesh.topology batch_size_axis = var_dim_mapping[0] @@ -100,16 +102,25 @@ class DistributedElementwiseImpl0(DistributedOperatorImpl): for input_name in backward_op.desc.input_names(): for varname in backward_op.desc.input(input_name): if "@GRAD" not in varname and is_parameter_related( - varname, main_block): + varname, main_block + ): var_dim_mapping = dist_attr.get_input_dims_mapping( - varname) + varname + ) mesh_shape = process_mesh.topology batch_size_axis = var_dim_mapping[0] parallel_axis = batch_size_axis attrs = {"use_calc_stream": True} var_names = [varname + "@GRAD"] - build_dp_costs(res, dist_op, ctx, var_names, attrs, - parallel_axis, cluster) + build_dp_costs( + res, + dist_op, + ctx, + var_names, + attrs, + parallel_axis, + cluster, + ) return res def is_input_compatible(self, dist_op): @@ -189,8 +200,9 @@ class DistributedElementwiseImpl0(DistributedOperatorImpl): for dims_mapping in dims_mapping_list: if idx < len(dims_mapping): dim_mappings.append(dims_mapping[-(idx + 1)]) - if not all(dim_mappings[0] == dim_mapping - for dim_mapping in dim_mappings): + if not all( + dim_mappings[0] == dim_mapping for dim_mapping in dim_mappings + ): return False return True @@ -216,10 +228,13 @@ class DistributedElementwiseImpl0(DistributedOperatorImpl): -1 for _ in range(input_max_dims_mapping_len) ] for i in range(input_dims_mapping_lens[arg_name]): - new_idx = (input_max_dims_mapping_len - - input_dims_mapping_lens[arg_name]) + i + new_idx = ( + input_max_dims_mapping_len + - input_dims_mapping_lens[arg_name] + ) + i new_dims_mapping[new_idx] = input_dims_mapping_dict[ - arg_name][i] + arg_name + ][i] dims_mapping_list.append(new_dims_mapping) else: dims_mapping_list.append(input_dims_mapping_dict[arg_name]) @@ -240,10 +255,13 @@ class DistributedElementwiseImpl0(DistributedOperatorImpl): -1 for _ in range(output_max_dims_mapping_len) ] for i in range(output_dims_mapping_lens[arg_name]): - new_idx = (output_max_dims_mapping_len - - output_dims_mapping_lens[arg_name]) + i + new_idx = ( + output_max_dims_mapping_len + - output_dims_mapping_lens[arg_name] + ) + i new_dims_mapping[new_idx] = output_dims_mapping_dict[ - arg_name][i] + arg_name + ][i] dims_mapping_list.append(new_dims_mapping) else: dims_mapping_list.append(output_dims_mapping_dict[arg_name]) @@ -251,7 +269,8 @@ class DistributedElementwiseImpl0(DistributedOperatorImpl): assert input_max_dims_mapping_len == output_max_dims_mapping_len max_dims_mapping_len = input_max_dims_mapping_len compatible_dims_mapping = compute_compatible_dims_mapping( - dims_mapping_list) + dims_mapping_list + ) if compatible_dims_mapping is None: return False @@ -261,17 +280,20 @@ class DistributedElementwiseImpl0(DistributedOperatorImpl): -1 for _ in range(input_dims_mapping_lens[arg_name]) ] for i in range(input_dims_mapping_lens[arg_name]): - new_idx = (max_dims_mapping_len - - input_dims_mapping_lens[arg_name]) + i + new_idx = ( + max_dims_mapping_len - input_dims_mapping_lens[arg_name] + ) + i new_dims_mapping[i] = compatible_dims_mapping[new_idx] if new_dims_mapping != input_dims_mapping_dict[arg_name]: - op_dist_attr.set_input_dims_mapping(arg_name, - new_dims_mapping) + op_dist_attr.set_input_dims_mapping( + arg_name, new_dims_mapping + ) changed = True else: if compatible_dims_mapping != input_dims_mapping_dict[arg_name]: op_dist_attr.set_input_dims_mapping( - arg_name, compatible_dims_mapping) + arg_name, compatible_dims_mapping + ) changed = True for arg_name in output_arg_names: @@ -280,17 +302,24 @@ class DistributedElementwiseImpl0(DistributedOperatorImpl): -1 for _ in range(output_dims_mapping_lens[arg_name]) ] for i in range(output_dims_mapping_lens[arg_name]): - new_idx = (max_dims_mapping_len - - output_dims_mapping_lens[arg_name]) + i + new_idx = ( + max_dims_mapping_len + - output_dims_mapping_lens[arg_name] + ) + i new_dims_mapping[i] = compatible_dims_mapping[new_idx] if new_dims_mapping != output_dims_mapping_dict[arg_name]: op_dist_attr.set_output_dims_mapping( - arg_name, new_dims_mapping) + arg_name, new_dims_mapping + ) changed = True else: - if compatible_dims_mapping != output_dims_mapping_dict[arg_name]: + if ( + compatible_dims_mapping + != output_dims_mapping_dict[arg_name] + ): op_dist_attr.set_output_dims_mapping( - arg_name, compatible_dims_mapping) + arg_name, compatible_dims_mapping + ) changed = True return changed @@ -305,4 +334,5 @@ class DistributedElementwiseImpl0(DistributedOperatorImpl): register_distributed_operator_impl( - "elementwise", DistributedElementwiseImpl0("replicate_parallel")) + "elementwise", DistributedElementwiseImpl0("replicate_parallel") +) diff --git a/python/paddle/distributed/auto_parallel/operators/dist_embedding.py b/python/paddle/distributed/auto_parallel/operators/dist_embedding.py index 513dffb5fca10666c6111817520759f1b61ea842..fa76d9aff3253c7addc0c43dc1ff395b332abeb4 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_embedding.py +++ b/python/paddle/distributed/auto_parallel/operators/dist_embedding.py @@ -17,7 +17,11 @@ from .common import DistributedOperatorImplContainer from .common import DistributedOperatorImpl from .common import register_distributed_operator_impl_container from .common import gradient_synchronization -from .common import naive_copy_op_dist_attr_for_program, register_distributed_operator_impl, set_comm_op_dist_attr_for_program +from .common import ( + naive_copy_op_dist_attr_for_program, + register_distributed_operator_impl, + set_comm_op_dist_attr_for_program, +) from ..utils import is_dim_shard from ..utils import is_dim_replicate from ..utils import compute_compatible_and_update_dim_mapping @@ -26,33 +30,48 @@ from paddle.fluid import core, unique_name from paddle.fluid.data_feeder import check_variable_and_dtype, check_dtype from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole from ..process_group import new_process_group -from ..utils import _get_comm_group, _get_idx_in_axis, _get_corresponding_rank, set_var_dist_attr +from ..utils import ( + _get_comm_group, + _get_idx_in_axis, + _get_corresponding_rank, + set_var_dist_attr, +) from ..cost import build_comp_desc_from_dist_op, build_comm_desc_from_dist_op -from ..cost import build_comm_costs_from_descs, build_comp_costs_from_descs, build_dp_costs +from ..cost import ( + build_comm_costs_from_descs, + build_comp_costs_from_descs, + build_dp_costs, +) from ..cost import EmbeddingOpCost, EmbeddingGradOpCost -from paddle.distributed.auto_parallel.cost.comm_op_cost import AllreduceSumOpCost, IdentityOpCost +from paddle.distributed.auto_parallel.cost.comm_op_cost import ( + AllreduceSumOpCost, + IdentityOpCost, +) class DistributedEmbedding(DistributedOperatorImplContainer): - def __init__(self, op_type): super(DistributedEmbedding, self).__init__(op_type) register_distributed_operator_impl_container( - DistributedEmbedding("lookup_table_v2")) + DistributedEmbedding("lookup_table_v2") +) register_distributed_operator_impl_container( - DistributedEmbedding("c_embedding")) + DistributedEmbedding("c_embedding") +) register_distributed_operator_impl_container( - DistributedEmbedding("lookup_table")) + DistributedEmbedding("lookup_table") +) def adopt_lookup_table_v1(ctx, main_block, src_op, Ids_var): - assert len( - Ids_var.shape - ) == 3, "input Ids to lookup_table should have 3 dimensions but got [{}] with shape [{}]".format( - Ids_var.name, Ids_var.shape) + assert ( + len(Ids_var.shape) == 3 + ), "input Ids to lookup_table should have 3 dimensions but got [{}] with shape [{}]".format( + Ids_var.name, Ids_var.shape + ) if not Ids_var.stop_gradient: raise NotImplementedError( 'Requiring the gradient of Ids of lookup_table(v1)dist op is not currently supported. Please open an issue with details on your use case so that we can prioritize adding this (for instance, adversarial training for language model).' @@ -60,59 +79,72 @@ def adopt_lookup_table_v1(ctx, main_block, src_op, Ids_var): target_shape = list(Ids_var.shape[:-1]) intermediate_var_0 = main_block.create_var( - name=unique_name.generate_with_ignorable_key(".".join( - ["dist_reshape", 'tmp'])), + name=unique_name.generate_with_ignorable_key( + ".".join(["dist_reshape", 'tmp']) + ), dtype=Ids_var.dtype, shape=target_shape, type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=True) + stop_gradient=True, + ) target_shape = [0] + list(Ids_var.shape[:-1]) xshape_var = main_block.create_var( - name=unique_name.generate_with_ignorable_key(".".join( - ["dist_Xshape", 'tmp'])), + name=unique_name.generate_with_ignorable_key( + ".".join(["dist_Xshape", 'tmp']) + ), dtype=Ids_var.dtype, shape=target_shape, type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=True) + stop_gradient=True, + ) # TODO use inplace reshape for memory saving - reshape_op = main_block.append_op(type='reshape2', - inputs={'X': [Ids_var]}, - outputs={ - 'Out': [intermediate_var_0], - 'XShape': [xshape_var] - }, - attrs={ - "shape": [0, -1], - }) + reshape_op = main_block.append_op( + type='reshape2', + inputs={'X': [Ids_var]}, + outputs={'Out': [intermediate_var_0], 'XShape': [xshape_var]}, + attrs={ + "shape": [0, -1], + }, + ) # set dist attr op_dist_attr = ctx.get_op_dist_attr_for_program(src_op) Ids_var_dist_attr = op_dist_attr.get_input_dist_attr(Ids_var.name) assert Ids_var_dist_attr is not None intermediate_var_0_dist_attr = set_var_dist_attr( - ctx, intermediate_var_0, Ids_var_dist_attr.dims_mapping, - Ids_var_dist_attr.process_mesh) - set_var_dist_attr(ctx, xshape_var, - [-1] + list(Ids_var_dist_attr.dims_mapping), - Ids_var_dist_attr.process_mesh) + ctx, + intermediate_var_0, + Ids_var_dist_attr.dims_mapping, + Ids_var_dist_attr.process_mesh, + ) + set_var_dist_attr( + ctx, + xshape_var, + [-1] + list(Ids_var_dist_attr.dims_mapping), + Ids_var_dist_attr.process_mesh, + ) op_dist_attr.del_input_dist_attr(Ids_var.name) - op_dist_attr.set_input_dist_attr(intermediate_var_0.name, - intermediate_var_0_dist_attr) + op_dist_attr.set_input_dist_attr( + intermediate_var_0.name, intermediate_var_0_dist_attr + ) new_op_dist_attr = OperatorDistributedAttribute() new_op_dist_attr.process_mesh = Ids_var_dist_attr.process_mesh new_op_dist_attr.impl_type = "default" new_op_dist_attr.impl_idx = 0 - new_op_dist_attr.set_input_dims_mapping(Ids_var.name, - Ids_var_dist_attr.dims_mapping) - new_op_dist_attr.set_output_dims_mapping(intermediate_var_0.name, - Ids_var_dist_attr.dims_mapping) + new_op_dist_attr.set_input_dims_mapping( + Ids_var.name, Ids_var_dist_attr.dims_mapping + ) + new_op_dist_attr.set_output_dims_mapping( + intermediate_var_0.name, Ids_var_dist_attr.dims_mapping + ) new_op_dist_attr.set_output_dims_mapping( - xshape_var.name, [-1] + list(Ids_var_dist_attr.dims_mapping)) + xshape_var.name, [-1] + list(Ids_var_dist_attr.dims_mapping) + ) ctx.set_op_dist_attr_for_program(reshape_op, new_op_dist_attr) return intermediate_var_0 @@ -120,7 +152,6 @@ def adopt_lookup_table_v1(ctx, main_block, src_op, Ids_var): # RowParallel class DistributedEmbeddingImpl(DistributedOperatorImpl): - def __init__(self, name): super(DistributedEmbeddingImpl, self).__init__(name) self._forward_implemented = True @@ -138,17 +169,19 @@ class DistributedEmbeddingImpl(DistributedOperatorImpl): def calc_fwd_cost(self, dist_op, ctx, cluster): # calc comp op cost - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) processes = dist_op.dist_attr.process_mesh.processes # embedding need start_index - cost_mapping = build_comp_costs_from_descs(EmbeddingOpCost, ctx, - processes, desc_mapping, - cluster) + cost_mapping = build_comp_costs_from_descs( + EmbeddingOpCost, ctx, processes, desc_mapping, cluster + ) serial_op = dist_op.serial_op parallel_axis = dist_op.dist_attr.get_input_dims_mapping( - serial_op.input("W")[0])[0] + serial_op.input("W")[0] + )[0] attrs = {"use_calc_stream": True, "use_model_parallel": True} var_names = serial_op.output("Out") c_allreduce_sum_desc_mapping = build_comm_desc_from_dist_op( @@ -157,11 +190,16 @@ class DistributedEmbeddingImpl(DistributedOperatorImpl): ctx, var_names, attrs=attrs, - parallel_axis=parallel_axis) + parallel_axis=parallel_axis, + ) comm_op_cost_list = build_comm_costs_from_descs( - AllreduceSumOpCost, ctx, processes, c_allreduce_sum_desc_mapping, - cluster) + AllreduceSumOpCost, + ctx, + processes, + c_allreduce_sum_desc_mapping, + cluster, + ) res_cost = [cost_mapping, comm_op_cost_list] @@ -175,7 +213,8 @@ class DistributedEmbeddingImpl(DistributedOperatorImpl): dist_attr = dist_op.dist_attr embedding_row_dim_mapping = dist_attr.get_input_dims_mapping( - backward_op.input("W")[0])[0] + backward_op.input("W")[0] + )[0] parallel_axis = embedding_row_dim_mapping attrs = {"use_calc_stream": True, "use_model_parallel": True} var_names = [backward_op.input("Out@GRAD")[0]] @@ -185,33 +224,38 @@ class DistributedEmbeddingImpl(DistributedOperatorImpl): ctx, var_names, attrs=attrs, - parallel_axis=parallel_axis) + parallel_axis=parallel_axis, + ) process_mesh = dist_attr.process_mesh processes = process_mesh.processes comm_op_cost_list = build_comm_costs_from_descs( - IdentityOpCost, ctx, processes, c_identity_desc_mapping, cluster) + IdentityOpCost, ctx, processes, c_identity_desc_mapping, cluster + ) res.append(comm_op_cost_list) # calc comp op cost - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) - cost_mapping = build_comp_costs_from_descs(EmbeddingGradOpCost, ctx, - processes, desc_mapping, - cluster) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) + cost_mapping = build_comp_costs_from_descs( + EmbeddingGradOpCost, ctx, processes, desc_mapping, cluster + ) res.append(cost_mapping) # need gradient allreduce var_dim_mapping = dist_attr.get_input_dims_mapping( - backward_op.input("Ids")[0]) + backward_op.input("Ids")[0] + ) mesh_shape = process_mesh.topology batch_size_axis = var_dim_mapping[0] if batch_size_axis > -1 and mesh_shape[batch_size_axis] > 1: parallel_axis = batch_size_axis attrs = {"use_calc_stream": True} var_names = [backward_op.output('W@GRAD')[0]] - build_dp_costs(res, dist_op, ctx, var_names, attrs, parallel_axis, - cluster) + build_dp_costs( + res, dist_op, ctx, var_names, attrs, parallel_axis, cluster + ) return res @@ -223,7 +267,8 @@ class DistributedEmbeddingImpl(DistributedOperatorImpl): ids_dims_mapping = op_dist_attr.get_input_dims_mapping(ids_name) w_dims_mapping = op_dist_attr.get_input_dims_mapping(w_name) if is_dim_replicate(w_dims_mapping[-2]) or is_dim_shard( - w_dims_mapping[-1]): + w_dims_mapping[-1] + ): return False # Other dimensions must be replicate except the batch dimension for mapping in ids_dims_mapping[1:]: @@ -243,8 +288,9 @@ class DistributedEmbeddingImpl(DistributedOperatorImpl): return True def is_auto_compatible(self, dist_op): - if (not self.is_input_compatible(dist_op)) or \ - (not self.is_output_compatible(dist_op)): + if (not self.is_input_compatible(dist_op)) or ( + not self.is_output_compatible(dist_op) + ): return False op_desc = dist_op.serial_op.desc @@ -256,7 +302,7 @@ class DistributedEmbeddingImpl(DistributedOperatorImpl): ids_dims_mapping = op_dist_attr.get_input_dims_mapping(ids_name) w_dims_mapping = op_dist_attr.get_input_dims_mapping(w_name) - if ids_dims_mapping != out_dims_mapping[:len(ids_dims_mapping)]: + if ids_dims_mapping != out_dims_mapping[: len(ids_dims_mapping)]: return False return True @@ -274,12 +320,14 @@ class DistributedEmbeddingImpl(DistributedOperatorImpl): for i in range(len(ids_dims_mapping)): dim_changed = compute_compatible_and_update_dim_mapping( - [ids_dims_mapping, out_dims_mapping], [i, i]) + [ids_dims_mapping, out_dims_mapping], [i, i] + ) if dim_changed: changed = True dim_changed = compute_compatible_and_update_dim_mapping( - [w_dims_mapping, out_dims_mapping], [-1, -1]) + [w_dims_mapping, out_dims_mapping], [-1, -1] + ) if dim_changed: changed = True @@ -297,26 +345,30 @@ class DistributedEmbeddingImpl(DistributedOperatorImpl): src_op = dist_op_context.cur_src_op rank_id = dist_op_context.rank_id op_dist_attr = ctx.get_op_dist_attr_for_program(src_op) - assert op_dist_attr is not None, "backward op [{}] don't have dist attribute !".format( - str(src_op)) + assert ( + op_dist_attr is not None + ), "backward op [{}] don't have dist attribute !".format(str(src_op)) # check validation of inputs / outputs assert 'Ids' in kwargs, "input [{}] is not given".format('Ids') assert 'W' in kwargs, "input [{}] is not given".format('W') assert 'Out' in kwargs, "output [{}] is not given".format('Out') - assert len( + assert ( + len(kwargs['Ids']) == 1 + ), "row_parallel_embedding input Ids take 1 variable but got {}".format( kwargs['Ids'] - ) == 1, "row_parallel_embedding input Ids take 1 variable but got {}".format( - kwargs['Ids']) - assert len( + ) + assert ( + len(kwargs['W']) == 1 + ), "row_parallel_embedding input W take 1 variable but got {}".format( kwargs['W'] - ) == 1, "row_parallel_embedding input W take 1 variable but got {}".format( - kwargs['W']) - assert len( + ) + assert ( + len(kwargs['Out']) == 1 + ), "row_parallel_embedding output Out take 1 variable but got {}".format( kwargs['Out'] - ) == 1, "row_parallel_embedding output Out take 1 variable but got {}".format( - kwargs['Out']) + ) Ids_var = main_block.var(kwargs['Ids'][0]) Weight_var = main_block._var_recursive(kwargs['W'][0]) @@ -328,70 +380,85 @@ class DistributedEmbeddingImpl(DistributedOperatorImpl): # got dist attribute info embedding_row_dim_mapping = op_dist_attr.get_input_dims_mapping( - Weight_var.name)[0] - assert embedding_row_dim_mapping >= 0, "row_parallel_embedding's row should be divided by a specific mesh axis, but got [{}]".format( - embedding_row_dim_mapping) + Weight_var.name + )[0] + assert ( + embedding_row_dim_mapping >= 0 + ), "row_parallel_embedding's row should be divided by a specific mesh axis, but got [{}]".format( + embedding_row_dim_mapping + ) process_mesh_shape = op_dist_attr.process_mesh.topology process_mesh_group = op_dist_attr.process_mesh.processes # FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism if rank_id not in process_mesh_group: - rank_id = _get_corresponding_rank(ctx, op_dist_attr.process_mesh, - rank_id) + rank_id = _get_corresponding_rank( + ctx, op_dist_attr.process_mesh, rank_id + ) # A generalized method to caculate embedding offset using cartisian product - relative_idx = _get_idx_in_axis(process_mesh_group, process_mesh_shape, - embedding_row_dim_mapping, rank_id) + relative_idx = _get_idx_in_axis( + process_mesh_group, + process_mesh_shape, + embedding_row_dim_mapping, + rank_id, + ) per_part_size = Weight_var.shape[0] relative_idx = relative_idx * per_part_size # TODO caculate ring id parallel_axis = embedding_row_dim_mapping - group_ranks = _get_comm_group(process_mesh_group, process_mesh_shape, - parallel_axis, rank_id) + group_ranks = _get_comm_group( + process_mesh_group, process_mesh_shape, parallel_axis, rank_id + ) group = new_process_group(group_ranks) # append op - check_variable_and_dtype(Ids_var, 'input', ['int32', 'int64'], - 'c_embedding') + check_variable_and_dtype( + Ids_var, 'input', ['int32', 'int64'], 'c_embedding' + ) # infer new var shape with op dist attr out_tensor_dist_attr = ctx.get_tensor_dist_attr_for_program(Out_var) assert out_tensor_dist_attr is not None out_var_dist_attr = op_dist_attr.get_output_dist_attr(Out_var.name) assert out_var_dist_attr is not None - ref_shape = infer_shape(main_block, Out_var, out_tensor_dist_attr, - out_var_dist_attr) + ref_shape = infer_shape( + main_block, Out_var, out_tensor_dist_attr, out_var_dist_attr + ) intermediate_var_0 = main_block.create_var( - name=unique_name.generate_with_ignorable_key(".".join( - ["c_embedding", 'tmp'])), + name=unique_name.generate_with_ignorable_key( + ".".join(["c_embedding", 'tmp']) + ), dtype=Weight_var.dtype, shape=Out_var.shape, type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=Out_var.stop_gradient) + stop_gradient=Out_var.stop_gradient, + ) # set intermediate_var_0's dist_attr with Out_var's dist_attr - ctx.set_tensor_dist_attr_for_program(intermediate_var_0, - out_var_dist_attr) + ctx.set_tensor_dist_attr_for_program( + intermediate_var_0, out_var_dist_attr + ) check_variable_and_dtype( - Out_var, 'tensor', + Out_var, + 'tensor', ['float16', 'float32', 'float64', 'int32', 'int64'], - 'c_allreduce_sum') + 'c_allreduce_sum', + ) c_embedding_op = main_block.append_op( type='c_embedding', - inputs={ - 'Ids': [Ids_var], - 'W': [Weight_var] - }, + inputs={'Ids': [Ids_var], 'W': [Weight_var]}, outputs={'Out': [intermediate_var_0]}, attrs={ "start_index": relative_idx, - OP_ROLE_KEY: src_op.attr('op_role') - }) + OP_ROLE_KEY: src_op.attr('op_role'), + }, + ) if intermediate_var_0.shape != ref_shape: intermediate_var_0.desc.set_shape(ref_shape) @@ -404,8 +471,9 @@ class DistributedEmbeddingImpl(DistributedOperatorImpl): 'ring_id': group.id, 'use_calc_stream': True, 'use_model_parallel': True, - OP_ROLE_KEY: src_op.attr('op_role') - }) + OP_ROLE_KEY: src_op.attr('op_role'), + }, + ) if Out_var.shape != ref_shape: Out_var.desc.set_shape(ref_shape) @@ -418,15 +486,19 @@ class DistributedEmbeddingImpl(DistributedOperatorImpl): for input_varname in c_embedding_op.desc.input_arg_names(): input_dist_attr = op_dist_attr.get_input_dist_attr(input_varname) assert input_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr) - embedding_op_dist_attr.set_input_dist_attr(input_varname, - input_dist_attr) + op_dist_attr + ) + embedding_op_dist_attr.set_input_dist_attr( + input_varname, input_dist_attr + ) output_varname = c_embedding_op.desc.output_arg_names()[0] output_dist_attr = op_dist_attr.get_output_dist_attr(Out_var.name) assert output_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr) - embedding_op_dist_attr.set_output_dist_attr(output_varname, - output_dist_attr) + op_dist_attr + ) + embedding_op_dist_attr.set_output_dist_attr( + output_varname, output_dist_attr + ) ctx.set_op_dist_attr_for_program(c_embedding_op, embedding_op_dist_attr) # allreduce @@ -438,16 +510,20 @@ class DistributedEmbeddingImpl(DistributedOperatorImpl): input_var = main_block.var(input_varname) tensor_dist_attr = ctx.get_tensor_dist_attr_for_program(input_var) assert tensor_dist_attr is not None - allreduce_op_dist_attr.set_input_dist_attr(input_varname, - tensor_dist_attr) + allreduce_op_dist_attr.set_input_dist_attr( + input_varname, tensor_dist_attr + ) for output_varname in c_allreduce_sum_op.desc.output_arg_names(): output_dist_attr = op_dist_attr.get_output_dist_attr(output_varname) assert output_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr) - allreduce_op_dist_attr.set_output_dist_attr(output_varname, - output_dist_attr) - ctx.set_op_dist_attr_for_program(c_allreduce_sum_op, - allreduce_op_dist_attr) + op_dist_attr + ) + allreduce_op_dist_attr.set_output_dist_attr( + output_varname, output_dist_attr + ) + ctx.set_op_dist_attr_for_program( + c_allreduce_sum_op, allreduce_op_dist_attr + ) # param initialization sync if Weight_var.is_parameter and not op_dist_attr.is_recompute: @@ -464,20 +540,25 @@ class DistributedEmbeddingImpl(DistributedOperatorImpl): if size <= 1 or axis in dim_mapping: pass else: - group_ranks = _get_comm_group(process_mesh.processes, - process_mesh.topology, axis, - rank_id) + group_ranks = _get_comm_group( + process_mesh.processes, + process_mesh.topology, + axis, + rank_id, + ) sync_group = new_process_group(group_ranks) - startup_block.append_op(type='c_broadcast', - inputs={'X': param}, - outputs={'Out': param}, - attrs={ - 'ring_id': sync_group.id, - 'root': 0, - 'use_calc_stream': True, - OP_ROLE_KEY: OpRole.Forward - }) + startup_block.append_op( + type='c_broadcast', + inputs={'X': param}, + outputs={'Out': param}, + attrs={ + 'ring_id': sync_group.id, + 'root': 0, + 'use_calc_stream': True, + OP_ROLE_KEY: OpRole.Forward, + }, + ) @staticmethod def backward(ctx, *args, **kwargs): @@ -488,35 +569,43 @@ class DistributedEmbeddingImpl(DistributedOperatorImpl): backward_op = dist_op_context.cur_src_op rank_id = dist_op_context.rank_id dist_attr = ctx.get_op_dist_attr_for_program(backward_op) - assert dist_attr is not None, "backward op [{}] don't have dist attribute !".format( - str(backward_op)) + assert ( + dist_attr is not None + ), "backward op [{}] don't have dist attribute !".format( + str(backward_op) + ) # FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism if rank_id not in dist_attr.process_mesh.processes: - rank_id = _get_corresponding_rank(ctx, dist_attr.process_mesh, - rank_id) + rank_id = _get_corresponding_rank( + ctx, dist_attr.process_mesh, rank_id + ) assert 'Ids' in kwargs, "input [{}] is not given".format('Ids') assert 'W' in kwargs, "input [{}] is not given".format('W') assert 'Out@GRAD' in kwargs, "input [{}] is not given".format('Out') assert 'W@GRAD' in kwargs, "output [{}] is not given".format('W@GRAD') - assert len( + assert ( + len(kwargs['Ids']) == 1 + ), "row_parallel_embedding input Ids take 1 variable but got {}".format( kwargs['Ids'] - ) == 1, "row_parallel_embedding input Ids take 1 variable but got {}".format( - kwargs['Ids']) - assert len( + ) + assert ( + len(kwargs['W']) == 1 + ), "row_parallel_embedding input Ids take 1 variable but got {}".format( kwargs['W'] - ) == 1, "row_parallel_embedding input Ids take 1 variable but got {}".format( - kwargs['W']) - assert len( - kwargs['Out@GRAD'] - ) == 1, "row_parallel_embedding input Ids take 1 variable but got {}".format( - kwargs['Out']) - assert len( + ) + assert ( + len(kwargs['Out@GRAD']) == 1 + ), "row_parallel_embedding input Ids take 1 variable but got {}".format( + kwargs['Out'] + ) + assert ( + len(kwargs['W@GRAD']) == 1 + ), "row_parallel_embedding output Ids take 1 variable but got {}".format( kwargs['W@GRAD'] - ) == 1, "row_parallel_embedding output Ids take 1 variable but got {}".format( - kwargs['W@GRAD']) + ) Ids_var = main_block.var(kwargs['Ids'][0]) Weight_var = main_block.var(kwargs['W'][0]) @@ -524,39 +613,57 @@ class DistributedEmbeddingImpl(DistributedOperatorImpl): Weight_grad = main_block.var(kwargs['W@GRAD'][0]) embedding_row_dim_mapping = dist_attr.get_input_dims_mapping( - Weight_var.name)[0] - assert embedding_row_dim_mapping >= 0, "row_parallel_embedding's row should be divided by a specific mesh axis, but got [{}]".format( - embedding_row_dim_mapping) + Weight_var.name + )[0] + assert ( + embedding_row_dim_mapping >= 0 + ), "row_parallel_embedding's row should be divided by a specific mesh axis, but got [{}]".format( + embedding_row_dim_mapping + ) process_mesh_shape = dist_attr.process_mesh.topology process_mesh_group = dist_attr.process_mesh.processes # A generalized method to caculate embedding offset using cartisian product - relative_idx = _get_idx_in_axis(process_mesh_group, process_mesh_shape, - embedding_row_dim_mapping, rank_id) + relative_idx = _get_idx_in_axis( + process_mesh_group, + process_mesh_shape, + embedding_row_dim_mapping, + rank_id, + ) per_part_size = Weight_var.shape[0] relative_idx = relative_idx * per_part_size check_variable_and_dtype( - Out_grad, 'tensor', - ['float16', 'float32', 'float64', 'int32', 'int64'], '_c_identity') + Out_grad, + 'tensor', + ['float16', 'float32', 'float64', 'int32', 'int64'], + '_c_identity', + ) intermediate_var_0 = main_block.create_var( - name=unique_name.generate_with_ignorable_key(".".join( - ["c_embedding", '@tmp_0@GRAD'])), + name=unique_name.generate_with_ignorable_key( + ".".join(["c_embedding", '@tmp_0@GRAD']) + ), dtype=Out_grad.dtype, shape=Out_grad.shape, type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=Out_grad.stop_gradient) + stop_gradient=Out_grad.stop_gradient, + ) # copy X_var's dist_attr to intermediate_var_0's dist_attr out_grad_dist_attr = dist_attr.get_input_dist_attr(Out_grad.name) assert out_grad_dist_attr is not None - ctx.set_tensor_dist_attr_for_program(intermediate_var_0, - out_grad_dist_attr) + ctx.set_tensor_dist_attr_for_program( + intermediate_var_0, out_grad_dist_attr + ) - group_ranks = _get_comm_group(process_mesh_group, process_mesh_shape, - embedding_row_dim_mapping, rank_id) + group_ranks = _get_comm_group( + process_mesh_group, + process_mesh_shape, + embedding_row_dim_mapping, + rank_id, + ) group = new_process_group(group_ranks) c_identity_op = main_block.append_op( @@ -568,41 +675,54 @@ class DistributedEmbeddingImpl(DistributedOperatorImpl): 'use_calc_stream': True, 'use_model_parallel': True, OP_ROLE_KEY: OpRole.Backward, - }) - check_variable_and_dtype(intermediate_var_0, 'x', - ['float16', 'float32', 'float64'], 'linear') - check_dtype(intermediate_var_0.dtype, 'dtype', - ['float16', 'float32', 'float64'], 'linear') + }, + ) + check_variable_and_dtype( + intermediate_var_0, 'x', ['float16', 'float32', 'float64'], 'linear' + ) + check_dtype( + intermediate_var_0.dtype, + 'dtype', + ['float16', 'float32', 'float64'], + 'linear', + ) - set_comm_op_dist_attr_for_program(c_identity_op, dist_attr.process_mesh, - out_grad_dist_attr, ctx) + set_comm_op_dist_attr_for_program( + c_identity_op, dist_attr.process_mesh, out_grad_dist_attr, ctx + ) c_embedding_grad_op_desc = main_block.append_op(type='nop').desc c_embedding_grad_op_desc.set_type("c_embedding_grad") c_embedding_grad_op_desc.set_input('Ids', [Ids_var.name]) c_embedding_grad_op_desc.set_input('W', [Weight_var.name]) - c_embedding_grad_op_desc.set_input('Out@GRAD', - [intermediate_var_0.name]) + c_embedding_grad_op_desc.set_input( + 'Out@GRAD', [intermediate_var_0.name] + ) c_embedding_grad_op_desc.set_output('W@GRAD', [Weight_grad.name]) c_embedding_grad_op_desc._set_attr('start_index', relative_idx) c_embedding_grad_op_desc._set_attr(OP_ROLE_KEY, OpRole.Backward) c_embedding_grad_op = main_block.ops[-1] assert c_embedding_grad_op.type == "c_embedding_grad" - naive_copy_op_dist_attr_for_program(c_embedding_grad_op, backward_op, - ctx) + naive_copy_op_dist_attr_for_program( + c_embedding_grad_op, backward_op, ctx + ) # data parallel gradient synchronization act_grad_names = [Ids_var.name] out_grad_names = [kwargs['W@GRAD'][0]] - gradient_synchronization(ctx, backward_op, act_grad_names, - out_grad_names, rank_id) + gradient_synchronization( + ctx, backward_op, act_grad_names, out_grad_names, rank_id + ) -register_distributed_operator_impl("lookup_table_v2", - DistributedEmbeddingImpl("row_parallel")) -register_distributed_operator_impl("c_embedding", - DistributedEmbeddingImpl("row_parallel")) -register_distributed_operator_impl("lookup_table", - DistributedEmbeddingImpl("row_parallel")) +register_distributed_operator_impl( + "lookup_table_v2", DistributedEmbeddingImpl("row_parallel") +) +register_distributed_operator_impl( + "c_embedding", DistributedEmbeddingImpl("row_parallel") +) +register_distributed_operator_impl( + "lookup_table", DistributedEmbeddingImpl("row_parallel") +) diff --git a/python/paddle/distributed/auto_parallel/operators/dist_fill_constant_batch_size_like.py b/python/paddle/distributed/auto_parallel/operators/dist_fill_constant_batch_size_like.py index a6b48165fa9d656464c0e21df173f1481ac1e1ab..8e3fc06756c5d14436cf7aa48690594ab98cc06c 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_fill_constant_batch_size_like.py +++ b/python/paddle/distributed/auto_parallel/operators/dist_fill_constant_batch_size_like.py @@ -25,17 +25,16 @@ from ..cost import build_comp_costs_from_descs class DistributedFillConstantBatchSizeLike(DistributedOperatorImplContainer): - def __init__(self, op_type): super(DistributedFillConstantBatchSizeLike, self).__init__(op_type) register_distributed_operator_impl_container( - DistributedFillConstantBatchSizeLike("fill_constant_batch_size_like")) + DistributedFillConstantBatchSizeLike("fill_constant_batch_size_like") +) class DistributedFillConstantBatchSizeLikeImpl0(DistributedOperatorImpl): - def __init__(self, name): super(DistributedFillConstantBatchSizeLikeImpl0, self).__init__(name) self._forward_implemented = True @@ -45,7 +44,8 @@ class DistributedFillConstantBatchSizeLikeImpl0(DistributedOperatorImpl): cost = None if int(op_role) == int(OpRole.Backward): raise ValueError( - "The fill_constant_batch_size_like has no grad op.") + "The fill_constant_batch_size_like has no grad op." + ) else: cost = self.calc_fwd_cost(dist_op, ctx, cluster) assert cost is not None @@ -53,13 +53,18 @@ class DistributedFillConstantBatchSizeLikeImpl0(DistributedOperatorImpl): def calc_fwd_cost(self, dist_op, ctx, cluster): # calc comp op cost - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) processes = dist_op.dist_attr.process_mesh.processes op_type = dist_op.serial_op.type cost_mapping = build_comp_costs_from_descs( - FillConstantBatchSizeLikeOpCost, ctx, processes, desc_mapping, - cluster) + FillConstantBatchSizeLikeOpCost, + ctx, + processes, + desc_mapping, + cluster, + ) res_cost = [cost_mapping] return res_cost @@ -81,8 +86,9 @@ class DistributedFillConstantBatchSizeLikeImpl0(DistributedOperatorImpl): return True def is_auto_compatible(self, dist_op): - if (not self.is_input_compatible(dist_op)) or \ - (not self.is_output_compatible(dist_op)): + if (not self.is_input_compatible(dist_op)) or ( + not self.is_output_compatible(dist_op) + ): return False op_desc = dist_op.serial_op.desc op_dist_attr = dist_op.dist_attr @@ -105,7 +111,8 @@ class DistributedFillConstantBatchSizeLikeImpl0(DistributedOperatorImpl): # only the batch size dimemsion of input and output are relative. dim_changed = compute_compatible_and_update_dim_mapping( - [x_dims_mapping, out_dims_mapping], [0, 0]) + [x_dims_mapping, out_dims_mapping], [0, 0] + ) if dim_changed: changed = True @@ -143,4 +150,5 @@ class DistributedFillConstantBatchSizeLikeImpl0(DistributedOperatorImpl): register_distributed_operator_impl( "fill_constant_batch_size_like", - DistributedFillConstantBatchSizeLikeImpl0("fill_by_shape")) + DistributedFillConstantBatchSizeLikeImpl0("fill_by_shape"), +) diff --git a/python/paddle/distributed/auto_parallel/operators/dist_fused_attention.py b/python/paddle/distributed/auto_parallel/operators/dist_fused_attention.py index 0c8d8d7b76844a8a756a07fee39dc5a7ba014d12..783a9ce5a18382313284ab16cde94089033c665a 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_fused_attention.py +++ b/python/paddle/distributed/auto_parallel/operators/dist_fused_attention.py @@ -24,17 +24,16 @@ from ..process_group import new_process_group class DistributedFusedAttention(DistributedOperatorImplContainer): - def __init__(self, op_type): super(DistributedFusedAttention, self).__init__(op_type) register_distributed_operator_impl_container( - DistributedFusedAttention("fused_attention")) + DistributedFusedAttention("fused_attention") +) class DistributedFusedAttentionImpl(DistributedOperatorImpl): - def __init__(self, name): super(DistributedFusedAttentionImpl, self).__init__(name) self._forward_implemented = True @@ -60,10 +59,12 @@ class DistributedFusedAttentionImpl(DistributedOperatorImpl): if is_dim_shard(mapping): return False if len(qkv_w_dims_mapping) != 4 or is_dim_replicate( - qkv_w_dims_mapping[head_axis]): + qkv_w_dims_mapping[head_axis] + ): return False if len(qkv_bias_dims_mapping) != 3 or is_dim_replicate( - qkv_bias_dims_mapping[head_axis]): + qkv_bias_dims_mapping[head_axis] + ): return False if is_dim_replicate(out_w_dims_mapping[0]): return False @@ -71,10 +72,13 @@ class DistributedFusedAttentionImpl(DistributedOperatorImpl): return False replicated_dims = [ - qkv_w_dims_mapping[0], qkv_w_dims_mapping[-2], - qkv_w_dims_mapping[-1], qkv_bias_dims_mapping[0], - qkv_bias_dims_mapping[-1], out_w_dims_mapping[-1], - out_bias_dims_mapping[-1] + qkv_w_dims_mapping[0], + qkv_w_dims_mapping[-2], + qkv_w_dims_mapping[-1], + qkv_bias_dims_mapping[0], + qkv_bias_dims_mapping[-1], + out_w_dims_mapping[-1], + out_bias_dims_mapping[-1], ] for mapping in replicated_dims: if is_dim_shard(mapping): @@ -100,8 +104,9 @@ class DistributedFusedAttentionImpl(DistributedOperatorImpl): return True def is_auto_compatible(self, dist_op): - if (not self.is_input_compatible(dist_op)) or \ - (not self.is_output_compatible(dist_op)): + if (not self.is_input_compatible(dist_op)) or ( + not self.is_output_compatible(dist_op) + ): return False op_desc = dist_op.serial_op.desc @@ -128,7 +133,8 @@ class DistributedFusedAttentionImpl(DistributedOperatorImpl): out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name) for i in range(len(x_dims_mapping)): dim_changed = compute_compatible_and_update_dim_mapping( - [x_dims_mapping, out_dims_mapping], [i, i]) + [x_dims_mapping, out_dims_mapping], [i, i] + ) if dim_changed: changed = True @@ -145,22 +151,28 @@ class DistributedFusedAttentionImpl(DistributedOperatorImpl): op_dist_attr = ctx.get_op_dist_attr_for_program(src_op) if rank_id not in op_dist_attr.process_mesh.processes: - rank_id = _get_corresponding_rank(ctx, op_dist_attr.process_mesh, - rank_id) + rank_id = _get_corresponding_rank( + ctx, op_dist_attr.process_mesh, rank_id + ) # infer logic comm presentation head_axis = 1 qkv_w = src_op.input('QKVW')[0] - qkv_w_col_dim_mapping = op_dist_attr.get_input_dims_mapping( - qkv_w)[head_axis] - assert qkv_w_col_dim_mapping >= 0, "col_parallel_matmul's row should be divided by a specific mesh axis, but got [{}]".format( - qkv_w_col_dim_mapping) + qkv_w_col_dim_mapping = op_dist_attr.get_input_dims_mapping(qkv_w)[ + head_axis + ] + assert ( + qkv_w_col_dim_mapping >= 0 + ), "col_parallel_matmul's row should be divided by a specific mesh axis, but got [{}]".format( + qkv_w_col_dim_mapping + ) process_mesh_shape = op_dist_attr.process_mesh.topology process_mesh_group = op_dist_attr.process_mesh.processes parallel_axis = qkv_w_col_dim_mapping - group_ranks = _get_comm_group(process_mesh_group, process_mesh_shape, - parallel_axis, rank_id) + group_ranks = _get_comm_group( + process_mesh_group, process_mesh_shape, parallel_axis, rank_id + ) group = new_process_group(group_ranks) # insert op @@ -181,20 +193,25 @@ class DistributedFusedAttentionImpl(DistributedOperatorImpl): op_dist_attr = ctx.get_op_dist_attr_for_program(src_op) if rank_id not in op_dist_attr.process_mesh.processes: - rank_id = _get_corresponding_rank(ctx, op_dist_attr.process_mesh, - rank_id) + rank_id = _get_corresponding_rank( + ctx, op_dist_attr.process_mesh, rank_id + ) # infer logic comm presentation out_w = src_op.input('OutLinearW')[0] out_w_col_dim_mapping = op_dist_attr.get_input_dims_mapping(out_w)[-1] - assert out_w_col_dim_mapping >= 0, "col_parallel_matmul's row should be divided by a specific mesh axis, but got [{}]".format( - out_w_col_dim_mapping) + assert ( + out_w_col_dim_mapping >= 0 + ), "col_parallel_matmul's row should be divided by a specific mesh axis, but got [{}]".format( + out_w_col_dim_mapping + ) process_mesh_shape = op_dist_attr.process_mesh.topology process_mesh_group = op_dist_attr.process_mesh.processes parallel_axis = out_w_col_dim_mapping - group_ranks = _get_comm_group(process_mesh_group, process_mesh_shape, - parallel_axis, rank_id) + group_ranks = _get_comm_group( + process_mesh_group, process_mesh_shape, parallel_axis, rank_id + ) group = new_process_group(group_ranks) # insert op @@ -207,4 +224,5 @@ class DistributedFusedAttentionImpl(DistributedOperatorImpl): register_distributed_operator_impl( - "fused_attention", DistributedFusedAttentionImpl("tensor_parallel")) + "fused_attention", DistributedFusedAttentionImpl("tensor_parallel") +) diff --git a/python/paddle/distributed/auto_parallel/operators/dist_fused_feedforward.py b/python/paddle/distributed/auto_parallel/operators/dist_fused_feedforward.py index a9b29c2054f9baa9ba2a0da3eda328cfd1a67c89..9cb435d4aa85e04a70e7d12ddcfc660f33e98c28 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_fused_feedforward.py +++ b/python/paddle/distributed/auto_parallel/operators/dist_fused_feedforward.py @@ -24,17 +24,16 @@ from ..process_group import new_process_group class DistributedFusedFeedForward(DistributedOperatorImplContainer): - def __init__(self, op_type): super(DistributedFusedFeedForward, self).__init__(op_type) register_distributed_operator_impl_container( - DistributedFusedFeedForward("fused_feedforward")) + DistributedFusedFeedForward("fused_feedforward") +) class DistributedFusedFeedForwardImpl(DistributedOperatorImpl): - def __init__(self, name): super(DistributedFusedFeedForwardImpl, self).__init__(name) self._forward_implemented = True @@ -51,24 +50,30 @@ class DistributedFusedFeedForwardImpl(DistributedOperatorImpl): x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name) linear1_weight_dims_mapping = op_dist_attr.get_input_dims_mapping( - linear1_weight) + linear1_weight + ) linear1_bias_dims_mapping = op_dist_attr.get_input_dims_mapping( - linear1_bias) + linear1_bias + ) linear2_weight_dims_mapping = op_dist_attr.get_input_dims_mapping( - linear2_weight) + linear2_weight + ) linear2_bias_dims_mapping = op_dist_attr.get_input_dims_mapping( - linear2_bias) + linear2_bias + ) for mapping in x_dims_mapping[1:-1]: if is_dim_shard(mapping): return False if is_dim_shard(linear1_weight_dims_mapping[-2]) or is_dim_replicate( - linear1_weight_dims_mapping[-1]): + linear1_weight_dims_mapping[-1] + ): return False if is_dim_replicate(linear1_bias_dims_mapping[-1]): return False if is_dim_replicate(linear2_weight_dims_mapping[-2]) or is_dim_shard( - linear2_weight_dims_mapping[-1]): + linear2_weight_dims_mapping[-1] + ): return False if is_dim_shard(linear2_bias_dims_mapping[-1]): return False @@ -91,8 +96,9 @@ class DistributedFusedFeedForwardImpl(DistributedOperatorImpl): return True def is_auto_compatible(self, dist_op): - if (not self.is_input_compatible(dist_op)) or \ - (not self.is_output_compatible(dist_op)): + if (not self.is_input_compatible(dist_op)) or ( + not self.is_output_compatible(dist_op) + ): return False op_desc = dist_op.serial_op.desc @@ -119,7 +125,8 @@ class DistributedFusedFeedForwardImpl(DistributedOperatorImpl): out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name) for i in range(len(x_dims_mapping)): dim_changed = compute_compatible_and_update_dim_mapping( - [x_dims_mapping, out_dims_mapping], [i, i]) + [x_dims_mapping, out_dims_mapping], [i, i] + ) if dim_changed: changed = True @@ -136,21 +143,27 @@ class DistributedFusedFeedForwardImpl(DistributedOperatorImpl): op_dist_attr = ctx.get_op_dist_attr_for_program(src_op) if rank_id not in op_dist_attr.process_mesh.processes: - rank_id = _get_corresponding_rank(ctx, op_dist_attr.process_mesh, - rank_id) + rank_id = _get_corresponding_rank( + ctx, op_dist_attr.process_mesh, rank_id + ) # infer logic comm presentation linear1_weight = src_op.input('Linear1Weight')[0] linear1_weight_col_dim_mapping = op_dist_attr.get_input_dims_mapping( - linear1_weight)[-1] - assert linear1_weight_col_dim_mapping >= 0, "col_parallel_matmul's row should be divided by a specific mesh axis, but got [{}]".format( - linear1_weight_col_dim_mapping) + linear1_weight + )[-1] + assert ( + linear1_weight_col_dim_mapping >= 0 + ), "col_parallel_matmul's row should be divided by a specific mesh axis, but got [{}]".format( + linear1_weight_col_dim_mapping + ) process_mesh_shape = op_dist_attr.process_mesh.topology process_mesh_group = op_dist_attr.process_mesh.processes parallel_axis = linear1_weight_col_dim_mapping - group_ranks = _get_comm_group(process_mesh_group, process_mesh_shape, - parallel_axis, rank_id) + group_ranks = _get_comm_group( + process_mesh_group, process_mesh_shape, parallel_axis, rank_id + ) group = new_process_group(group_ranks) # insert op @@ -172,21 +185,27 @@ class DistributedFusedFeedForwardImpl(DistributedOperatorImpl): op_dist_attr = ctx.get_op_dist_attr_for_program(src_op) if rank_id not in op_dist_attr.process_mesh.processes: - rank_id = _get_corresponding_rank(ctx, op_dist_attr.process_mesh, - rank_id) + rank_id = _get_corresponding_rank( + ctx, op_dist_attr.process_mesh, rank_id + ) # infer logic comm presentation linear2_weight = src_op.input('Linear2Weight')[0] linear2_weight_col_dim_mapping = op_dist_attr.get_input_dims_mapping( - linear2_weight)[-1] - assert linear2_weight_col_dim_mapping >= 0, "col_parallel_matmul's row should be divided by a specific mesh axis, but got [{}]".format( - linear2_weight_col_dim_mapping) + linear2_weight + )[-1] + assert ( + linear2_weight_col_dim_mapping >= 0 + ), "col_parallel_matmul's row should be divided by a specific mesh axis, but got [{}]".format( + linear2_weight_col_dim_mapping + ) process_mesh_shape = op_dist_attr.process_mesh.topology process_mesh_group = op_dist_attr.process_mesh.processes parallel_axis = linear2_weight_col_dim_mapping - group_ranks = _get_comm_group(process_mesh_group, process_mesh_shape, - parallel_axis, rank_id) + group_ranks = _get_comm_group( + process_mesh_group, process_mesh_shape, parallel_axis, rank_id + ) group = new_process_group(group_ranks) # insert op @@ -199,4 +218,5 @@ class DistributedFusedFeedForwardImpl(DistributedOperatorImpl): register_distributed_operator_impl( - "fused_feedforward", DistributedFusedFeedForwardImpl("tensor_parallel")) + "fused_feedforward", DistributedFusedFeedForwardImpl("tensor_parallel") +) diff --git a/python/paddle/distributed/auto_parallel/operators/dist_matmul.py b/python/paddle/distributed/auto_parallel/operators/dist_matmul.py index fd7852e469977af4e5791f19eb98920379e7e7d5..b13bbf740c791d563880b0bbe292b48c1c1263a1 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_matmul.py +++ b/python/paddle/distributed/auto_parallel/operators/dist_matmul.py @@ -34,20 +34,31 @@ from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole from ..process_group import new_process_group from ..utils import _get_comm_group, _get_corresponding_rank from .dist_default import DistributedDefaultImpl0 -from ..cost import build_comp_desc_from_dist_op, build_comm_desc_from_dist_op, build_dp_costs +from ..cost import ( + build_comp_desc_from_dist_op, + build_comm_desc_from_dist_op, + build_dp_costs, +) from ..cost import build_comm_costs_from_descs, build_comp_costs_from_descs from ..cost import MatmulV2OpCost, MatmulOpCost, MulOpCost from ..cost import MatmulV2GradOpCost, MatmulGradOpCost, MulGradOpCost -from paddle.distributed.auto_parallel.cost.comm_op_cost import AllreduceSumOpCost, IdentityOpCost +from paddle.distributed.auto_parallel.cost.comm_op_cost import ( + AllreduceSumOpCost, + IdentityOpCost, +) def trans_x_y_dims_mapping(trans_x, trans_y, x_dims_mapping, y_dims_mapping): if trans_x: - x_dims_mapping[-1], x_dims_mapping[-2] = x_dims_mapping[ - -2], x_dims_mapping[-1] + x_dims_mapping[-1], x_dims_mapping[-2] = ( + x_dims_mapping[-2], + x_dims_mapping[-1], + ) if trans_y: - y_dims_mapping[-1], y_dims_mapping[-2] = y_dims_mapping[ - -2], y_dims_mapping[-1] + y_dims_mapping[-1], y_dims_mapping[-2] = ( + y_dims_mapping[-2], + y_dims_mapping[-1], + ) def copy_op_with_new_input_output(ctx, block, src_op, **kwargs): @@ -120,13 +131,17 @@ def _update_dims_mapping_for_matmul(dist_op): for i in range(new_out_dims_mapping_len - 2): broadcast_out_dims_mapping.append(out_dims_mapping[i]) - compatible_dims_mapping = compute_compatible_dims_mapping([ - broadcast_x_dims_mapping, broadcast_y_dims_mapping, - broadcast_out_dims_mapping - ]) + compatible_dims_mapping = compute_compatible_dims_mapping( + [ + broadcast_x_dims_mapping, + broadcast_y_dims_mapping, + broadcast_out_dims_mapping, + ] + ) if compatible_dims_mapping is None: - trans_x_y_dims_mapping(trans_x, trans_y, x_dims_mapping, - y_dims_mapping) + trans_x_y_dims_mapping( + trans_x, trans_y, x_dims_mapping, y_dims_mapping + ) return False for i in range(new_x_dims_mapping_len - 2): @@ -149,17 +164,20 @@ def _update_dims_mapping_for_matmul(dist_op): # The following which uses negative index can be work # when len(out_dims_mapping) > 2 and len(out_dims_mapping) <=2 dim_changed = compute_compatible_and_update_dim_mapping( - [x_dims_mapping, y_dims_mapping], [-1, -2]) + [x_dims_mapping, y_dims_mapping], [-1, -2] + ) if dim_changed: changed = True dim_changed = compute_compatible_and_update_dim_mapping( - [x_dims_mapping, out_dims_mapping], [-2, -2]) + [x_dims_mapping, out_dims_mapping], [-2, -2] + ) if dim_changed: changed = True dim_changed = compute_compatible_and_update_dim_mapping( - [y_dims_mapping, out_dims_mapping], [-1, -1]) + [y_dims_mapping, out_dims_mapping], [-1, -1] + ) if dim_changed: changed = True @@ -199,7 +217,8 @@ def _is_auto_compatible_for_matmul(dist_op): x_dims_mapping = copy.deepcopy(op_dist_attr.get_input_dims_mapping(x_name)) y_dims_mapping = copy.deepcopy(op_dist_attr.get_input_dims_mapping(y_name)) out_dims_mapping = copy.deepcopy( - op_dist_attr.get_output_dims_mapping(out_name)) + op_dist_attr.get_output_dims_mapping(out_name) + ) x_dims_mapping_len = len(x_dims_mapping) y_dims_mapping_len = len(y_dims_mapping) out_dims_mapping_len = len(out_dims_mapping) @@ -231,22 +250,23 @@ def _is_auto_compatible_for_matmul(dist_op): for i in range(out_dims_mapping_len - 2): broadcast_out_dims_mapping.append(out_dims_mapping[i]) - is_same = ((broadcast_x_dims_mapping == broadcast_y_dims_mapping) - and (broadcast_x_dims_mapping == broadcast_out_dims_mapping)) + is_same = (broadcast_x_dims_mapping == broadcast_y_dims_mapping) and ( + broadcast_x_dims_mapping == broadcast_out_dims_mapping + ) if not is_same: return False # The following which uses negative index can be work # when len(out_dims_mapping) > 2 and len(out_dims_mapping) <=2 - is_same = (x_dims_mapping[-1] == y_dims_mapping[-2]) + is_same = x_dims_mapping[-1] == y_dims_mapping[-2] if not is_same: return False - is_same = (x_dims_mapping[-2] == out_dims_mapping[-2]) + is_same = x_dims_mapping[-2] == out_dims_mapping[-2] if not is_same: return False - is_same = (y_dims_mapping[-1] == out_dims_mapping[-1]) + is_same = y_dims_mapping[-1] == out_dims_mapping[-1] if not is_same: return False @@ -262,8 +282,9 @@ def _right_operand_parameter_matmul_backward(ctx, *args, **kwargs): backward_op = dist_op_context.cur_src_op rank_id = dist_op_context.rank_id dist_attr = ctx.get_op_dist_attr_for_program(backward_op) - assert dist_attr is not None, "backward op [{}] don't have dist attribute !".format( - str(backward_op)) + assert ( + dist_attr is not None + ), "backward op [{}] don't have dist attribute !".format(str(backward_op)) # FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism if rank_id not in dist_attr.process_mesh.processes: @@ -274,22 +295,26 @@ def _right_operand_parameter_matmul_backward(ctx, *args, **kwargs): assert 'Out@GRAD' in kwargs, "input [{}] is not given".format('Out@GRAD') assert 'Y@GRAD' in kwargs, "output [{}] is not given".format('Y@GRAD') assert 'X@GRAD' in kwargs, "output [{}] is not given".format('X@GRAD') - assert len( + assert ( + len(kwargs['Y']) == 1 + ), "row_parallel_embedding input Ids take 1 variable but got {}".format( kwargs['Y'] - ) == 1, "row_parallel_embedding input Ids take 1 variable but got {}".format( - kwargs['Y']) - assert len( + ) + assert ( + len(kwargs['X']) == 1 + ), "row_parallel_embedding input Ids take 1 variable but got {}".format( kwargs['X'] - ) == 1, "row_parallel_embedding input Ids take 1 variable but got {}".format( - kwargs['X']) - assert len( - kwargs['Out@GRAD'] - ) == 1, "row_parallel_embedding input Ids take 1 variable but got {}".format( - kwargs['Out']) - assert len( + ) + assert ( + len(kwargs['Out@GRAD']) == 1 + ), "row_parallel_embedding input Ids take 1 variable but got {}".format( + kwargs['Out'] + ) + assert ( + len(kwargs['Y@GRAD']) == 1 + ), "row_parallel_embedding output Ids take 1 variable but got {}".format( kwargs['Y@GRAD'] - ) == 1, "row_parallel_embedding output Ids take 1 variable but got {}".format( - kwargs['Y@GRAD']) + ) X_var = main_block.var(kwargs['X'][0]) Y_var = main_block._var_recursive(kwargs['Y'][0]) @@ -299,7 +324,8 @@ def _right_operand_parameter_matmul_backward(ctx, *args, **kwargs): assert not is_parameter_related( X_var.name, main_block ), "left operand(X) [{}] of dist matmul should not be parameter".format( - X_var.name) + X_var.name + ) X_var_dims_mapping = dist_attr.get_input_dims_mapping(X_var.name) Y_var_dim_mapping = dist_attr.get_input_dims_mapping(Y_var.name) @@ -336,28 +362,34 @@ def _right_operand_parameter_matmul_backward(ctx, *args, **kwargs): parallel_axis = Y_var_dim_mapping[0] check_variable_and_dtype( - Out_grad, 'tensor', + Out_grad, + 'tensor', ['float16', 'float32', 'float64', 'int32', 'int64'], - '_c_identity') + '_c_identity', + ) intermediate_var_0 = main_block.create_var( - name=unique_name.generate_with_ignorable_key(".".join( - ["c_identity", 'tmp'])) + "@GRAD", + name=unique_name.generate_with_ignorable_key( + ".".join(["c_identity", 'tmp']) + ) + + "@GRAD", dtype=Out_grad.dtype, shape=Out_grad.shape, type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=Out_grad.stop_gradient) + stop_gradient=Out_grad.stop_gradient, + ) # copy X_var's dist_attr to intermediate_var_0's dist_attr out_grad_dist_attr = dist_attr.get_input_dist_attr(Out_grad.name) assert out_grad_dist_attr is not None - ctx.set_tensor_dist_attr_for_program(intermediate_var_0, - out_grad_dist_attr) + ctx.set_tensor_dist_attr_for_program( + intermediate_var_0, out_grad_dist_attr + ) - group_ranks = _get_comm_group(process_mesh_group, - process_mesh_shape, parallel_axis, - rank_id) + group_ranks = _get_comm_group( + process_mesh_group, process_mesh_shape, parallel_axis, rank_id + ) group = new_process_group(group_ranks) c_identity_op = main_block.append_op( type='c_identity', @@ -368,20 +400,29 @@ def _right_operand_parameter_matmul_backward(ctx, *args, **kwargs): 'use_calc_stream': True, 'use_model_parallel': True, OP_ROLE_KEY: OpRole.Backward, - }) - check_variable_and_dtype(intermediate_var_0, 'x', - ['float16', 'float32', 'float64'], - 'linear') - check_dtype(intermediate_var_0.dtype, 'dtype', - ['float16', 'float32', 'float64'], 'linear') - set_comm_op_dist_attr_for_program(c_identity_op, - dist_attr.process_mesh, - out_grad_dist_attr, ctx) + }, + ) + check_variable_and_dtype( + intermediate_var_0, + 'x', + ['float16', 'float32', 'float64'], + 'linear', + ) + check_dtype( + intermediate_var_0.dtype, + 'dtype', + ['float16', 'float32', 'float64'], + 'linear', + ) + set_comm_op_dist_attr_for_program( + c_identity_op, dist_attr.process_mesh, out_grad_dist_attr, ctx + ) new_kwargs = copy.deepcopy(kwargs) new_kwargs['Out@GRAD'] = [intermediate_var_0.name] matmul_op_desc = copy_op_with_new_input_output( - ctx, main_block, backward_op, **new_kwargs) + ctx, main_block, backward_op, **new_kwargs + ) else: # col parallel: matmul + allreduce assert Y_var_dim_mapping[0] < 0 @@ -394,28 +435,36 @@ def _right_operand_parameter_matmul_backward(ctx, *args, **kwargs): assert len(kwargs['X@GRAD']) == 1 X_grad = main_block.var(kwargs['X@GRAD'][0]) intermediate_var_0 = main_block.create_var( - name=unique_name.generate_with_ignorable_key(".".join( - ["c_identity", 'tmp'])) + "@GRAD", + name=unique_name.generate_with_ignorable_key( + ".".join(["c_identity", 'tmp']) + ) + + "@GRAD", dtype=X_grad.dtype, shape=X_grad.shape, type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=X_grad.stop_gradient) + stop_gradient=X_grad.stop_gradient, + ) X_grad_dist_attr = dist_attr.get_output_dist_attr(X_grad.name) assert X_grad_dist_attr is not None - ctx.set_tensor_dist_attr_for_program(intermediate_var_0, - X_grad_dist_attr) + ctx.set_tensor_dist_attr_for_program( + intermediate_var_0, X_grad_dist_attr + ) new_kwargs['X@GRAD'] = [intermediate_var_0.name] matmul_op_desc = copy_op_with_new_input_output( - ctx, main_block, backward_op, **new_kwargs) + ctx, main_block, backward_op, **new_kwargs + ) # NOTE (JZ-LIANG) trick to skip one allreduce if left operand has not grad if has_x_grad: - group_ranks = _get_comm_group(process_mesh_group, - process_mesh_shape, parallel_axis, - rank_id) + group_ranks = _get_comm_group( + process_mesh_group, + process_mesh_shape, + parallel_axis, + rank_id, + ) group = new_process_group(group_ranks) c_allreduce_sum_op = main_block.append_op( type='c_allreduce_sum', @@ -425,15 +474,20 @@ def _right_operand_parameter_matmul_backward(ctx, *args, **kwargs): 'ring_id': group.id, 'use_calc_stream': True, 'use_model_parallel': True, - OP_ROLE_KEY: OpRole.Backward - }) - set_comm_op_dist_attr_for_program(c_allreduce_sum_op, - dist_attr.process_mesh, - X_grad_dist_attr, ctx) + OP_ROLE_KEY: OpRole.Backward, + }, + ) + set_comm_op_dist_attr_for_program( + c_allreduce_sum_op, + dist_attr.process_mesh, + X_grad_dist_attr, + ctx, + ) else: # replicate - matmul_op_desc = copy_op_with_new_input_output(ctx, main_block, - backward_op, **kwargs) + matmul_op_desc = copy_op_with_new_input_output( + ctx, main_block, backward_op, **kwargs + ) # data parallel gradient synchronization act_grad_names = [X_var.name] @@ -445,8 +499,9 @@ def _right_operand_parameter_matmul_backward(ctx, *args, **kwargs): if trans_x: trans_x_y_dims_mapping(True, False, X_var_dims_mapping, None) - gradient_synchronization(ctx, backward_op, act_grad_names, out_grad_names, - rank_id) + gradient_synchronization( + ctx, backward_op, act_grad_names, out_grad_names, rank_id + ) if trans_x: trans_x_y_dims_mapping(True, False, X_var_dims_mapping, None) @@ -469,23 +524,25 @@ def _init_param_sync(Weight_var, dist_op_context, startup_block, ctx, rank_id): if size <= 1 or axis in dim_mapping: pass else: - group_ranks = _get_comm_group(process_mesh.processes, - process_mesh.topology, axis, rank_id) + group_ranks = _get_comm_group( + process_mesh.processes, process_mesh.topology, axis, rank_id + ) sync_group = new_process_group(group_ranks) - startup_block.append_op(type='c_broadcast', - inputs={'X': param}, - outputs={'Out': param}, - attrs={ - 'ring_id': sync_group.id, - 'root': 0, - 'use_calc_stream': True, - OP_ROLE_KEY: OpRole.Forward - }) + startup_block.append_op( + type='c_broadcast', + inputs={'X': param}, + outputs={'Out': param}, + attrs={ + 'ring_id': sync_group.id, + 'root': 0, + 'use_calc_stream': True, + OP_ROLE_KEY: OpRole.Forward, + }, + ) class DistributedMatmul(DistributedOperatorImplContainer): - def __init__(self, op_type): super(DistributedMatmul, self).__init__(op_type) @@ -495,7 +552,6 @@ register_distributed_operator_impl_container(DistributedMatmul("matmul")) # ColumnParallel class DistributedMatmulImpl0(DistributedOperatorImpl): - def __init__(self, name): super(DistributedMatmulImpl0, self).__init__(name) self._forward_implemented = True @@ -518,7 +574,8 @@ class DistributedMatmulImpl0(DistributedOperatorImpl): main_block = backward_op.block vars = main_block.vars Y_var_dim_mapping = dist_attr.get_input_dims_mapping( - backward_op.input("Y")[0]) + backward_op.input("Y")[0] + ) # col parallel: matmul + allreduce assert Y_var_dim_mapping[0] < 0 parallel_axis = Y_var_dim_mapping[1] @@ -528,13 +585,14 @@ class DistributedMatmulImpl0(DistributedOperatorImpl): assert len(backward_op.output("X@GRAD")) == 1 # calc comp op cost - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) process_mesh = dist_attr.process_mesh processes = process_mesh.processes - cost_mapping = build_comp_costs_from_descs(MatmulGradOpCost, ctx, - processes, desc_mapping, - cluster) + cost_mapping = build_comp_costs_from_descs( + MatmulGradOpCost, ctx, processes, desc_mapping, cluster + ) res.append(cost_mapping) # calc comm op cost @@ -547,40 +605,52 @@ class DistributedMatmulImpl0(DistributedOperatorImpl): ctx, var_names, attrs=attrs, - parallel_axis=parallel_axis) + parallel_axis=parallel_axis, + ) comm_op_cost_list = build_comm_costs_from_descs( - AllreduceSumOpCost, ctx, processes, - c_allreduce_sum_desc_mapping, cluster) + AllreduceSumOpCost, + ctx, + processes, + c_allreduce_sum_desc_mapping, + cluster, + ) res.append(comm_op_cost_list) # need gradient allreduce var_dim_mapping = dist_attr.get_input_dims_mapping( - backward_op.input("X")[0]) + backward_op.input("X")[0] + ) mesh_shape = process_mesh.topology batch_size_axis = var_dim_mapping[0] - if batch_size_axis > -1 and mesh_shape[ - batch_size_axis] > 1 and is_parameter_related( - backward_op.input("Y")[0], main_block): + if ( + batch_size_axis > -1 + and mesh_shape[batch_size_axis] > 1 + and is_parameter_related(backward_op.input("Y")[0], main_block) + ): parallel_axis = batch_size_axis attrs = {"use_calc_stream": True} var_names = [backward_op.output('Y@GRAD')[0]] - build_dp_costs(res, dist_op, ctx, var_names, attrs, parallel_axis, - cluster) + build_dp_costs( + res, dist_op, ctx, var_names, attrs, parallel_axis, cluster + ) return res def calc_fwd_cost(self, dist_op, ctx, cluster): # calc comp op cost - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) processes = dist_op.dist_attr.process_mesh.processes - cost_mapping = build_comp_costs_from_descs(MatmulOpCost, ctx, processes, - desc_mapping, cluster) + cost_mapping = build_comp_costs_from_descs( + MatmulOpCost, ctx, processes, desc_mapping, cluster + ) # calc comm op cost serial_op = dist_op.serial_op vars = serial_op.block.vars parallel_axis = dist_op.dist_attr.get_input_dims_mapping( - serial_op.input("Y")[0])[-1] + serial_op.input("Y")[0] + )[-1] attrs = {"use_calc_stream": True, "use_model_parallel": True} var_names = serial_op.input("X") c_identity_desc_mapping = build_comm_desc_from_dist_op( @@ -589,10 +659,12 @@ class DistributedMatmulImpl0(DistributedOperatorImpl): ctx, var_names, attrs=attrs, - parallel_axis=parallel_axis) + parallel_axis=parallel_axis, + ) comm_op_cost_list = build_comm_costs_from_descs( - IdentityOpCost, ctx, processes, c_identity_desc_mapping, cluster) + IdentityOpCost, ctx, processes, c_identity_desc_mapping, cluster + ) res_cost = [comm_op_cost_list, cost_mapping] return res_cost @@ -603,16 +675,19 @@ class DistributedMatmulImpl0(DistributedOperatorImpl): x_name = op_desc.input('X')[0] y_name = op_desc.input('Y')[0] x_dims_mapping = copy.deepcopy( - op_dist_attr.get_input_dims_mapping(x_name)) + op_dist_attr.get_input_dims_mapping(x_name) + ) y_dims_mapping = copy.deepcopy( - op_dist_attr.get_input_dims_mapping(y_name)) + op_dist_attr.get_input_dims_mapping(y_name) + ) trans_x = op_desc.attr('transpose_X') trans_y = op_desc.attr('transpose_Y') trans_x_y_dims_mapping(trans_x, trans_y, x_dims_mapping, y_dims_mapping) if is_dim_shard(x_dims_mapping[-1]): return False if is_dim_shard(y_dims_mapping[-2]) or is_dim_replicate( - y_dims_mapping[-1]): + y_dims_mapping[-1] + ): return False for mapping in x_dims_mapping[1:-1]: if is_dim_shard(mapping): @@ -632,8 +707,9 @@ class DistributedMatmulImpl0(DistributedOperatorImpl): return True def is_auto_compatible(self, dist_op): - if (not self.is_input_compatible(dist_op)) or \ - (not self.is_output_compatible(dist_op)): + if (not self.is_input_compatible(dist_op)) or ( + not self.is_output_compatible(dist_op) + ): return False if not _is_auto_compatible_for_matmul(dist_op): return False @@ -658,28 +734,33 @@ class DistributedMatmulImpl0(DistributedOperatorImpl): src_op = dist_op_context.cur_src_op rank_id = dist_op_context.rank_id op_dist_attr = ctx.get_op_dist_attr_for_program(src_op) - assert op_dist_attr is not None, "backward op [{}] don't have dist attribute !".format( - str(src_op)) + assert ( + op_dist_attr is not None + ), "backward op [{}] don't have dist attribute !".format(str(src_op)) # FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism if rank_id not in op_dist_attr.process_mesh.processes: - rank_id = _get_corresponding_rank(ctx, op_dist_attr.process_mesh, - rank_id) + rank_id = _get_corresponding_rank( + ctx, op_dist_attr.process_mesh, rank_id + ) # check validation of inputs / outputs for input_name in src_op.desc.input_names(): assert input_name in kwargs, "input [{}] is not given".format( - input_name) + input_name + ) assert len(kwargs[input_name]) == len( src_op.desc.input(input_name) ), "number of tensor for input [{}] is not match".format(input_name) for output_name in src_op.desc.output_names(): assert output_name in kwargs, "input [{}] is not given".format( - output_name) + output_name + ) assert len(kwargs[output_name]) == len( src_op.desc.output(output_name) ), "number of tensor for input [{}] is not match".format( - output_name) + output_name + ) X_var = main_block.var(kwargs['X'][0]) Weight_var = main_block.var(kwargs['Y'][0]) @@ -689,18 +770,24 @@ class DistributedMatmulImpl0(DistributedOperatorImpl): # TODO infer logic comm presentation matmul_col_dim_mapping = op_dist_attr.get_input_dims_mapping( - Weight_var.name)[-1] + Weight_var.name + )[-1] if trans_y: matmul_col_dim_mapping = op_dist_attr.get_input_dims_mapping( - Weight_var.name)[-2] - assert matmul_col_dim_mapping >= 0, "col_parallel_matmul's row should be divided by a specific mesh axis, but got [{}]".format( - matmul_col_dim_mapping) + Weight_var.name + )[-2] + assert ( + matmul_col_dim_mapping >= 0 + ), "col_parallel_matmul's row should be divided by a specific mesh axis, but got [{}]".format( + matmul_col_dim_mapping + ) process_mesh_shape = op_dist_attr.process_mesh.topology process_mesh_group = op_dist_attr.process_mesh.processes parallel_axis = matmul_col_dim_mapping - group_ranks = _get_comm_group(process_mesh_group, process_mesh_shape, - parallel_axis, rank_id) + group_ranks = _get_comm_group( + process_mesh_group, process_mesh_shape, parallel_axis, rank_id + ) group = new_process_group(group_ranks) # infer new var shape with op dist attr @@ -708,31 +795,39 @@ class DistributedMatmulImpl0(DistributedOperatorImpl): assert x_tensor_dist_attr is not None identity_var_dist_attr = op_dist_attr.get_input_dist_attr(X_var.name) assert identity_var_dist_attr is not None - ref_shape_x = infer_shape(main_block, X_var, x_tensor_dist_attr, - identity_var_dist_attr) + ref_shape_x = infer_shape( + main_block, X_var, x_tensor_dist_attr, identity_var_dist_attr + ) # infer out var shape with op dist attr out_tensor_dist_attr = ctx.get_tensor_dist_attr_for_program(Out_var) assert out_tensor_dist_attr is not None out_var_dist_attr = op_dist_attr.get_output_dist_attr(Out_var.name) assert out_var_dist_attr is not None - ref_shape_out = infer_shape(main_block, Out_var, out_tensor_dist_attr, - out_var_dist_attr) + ref_shape_out = infer_shape( + main_block, Out_var, out_tensor_dist_attr, out_var_dist_attr + ) intermediate_var_0 = main_block.create_var( - name=unique_name.generate_with_ignorable_key(".".join( - ["c_identity", 'tmp'])), + name=unique_name.generate_with_ignorable_key( + ".".join(["c_identity", 'tmp']) + ), dtype=X_var.dtype, shape=X_var.shape, type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=X_var.stop_gradient) + stop_gradient=X_var.stop_gradient, + ) # set intermediate_var_0's dist_attr with X_var's dist_attr - ctx.set_tensor_dist_attr_for_program(intermediate_var_0, - identity_var_dist_attr) + ctx.set_tensor_dist_attr_for_program( + intermediate_var_0, identity_var_dist_attr + ) check_variable_and_dtype( - X_var, 'tensor', - ['float16', 'float32', 'float64', 'int32', 'int64'], '_c_identity') + X_var, + 'tensor', + ['float16', 'float32', 'float64', 'int32', 'int64'], + '_c_identity', + ) c_identity_op = main_block.append_op( type='c_identity', @@ -742,26 +837,31 @@ class DistributedMatmulImpl0(DistributedOperatorImpl): 'ring_id': group.id, 'use_calc_stream': True, 'use_model_parallel': True, - OP_ROLE_KEY: src_op.attr('op_role') - }) + OP_ROLE_KEY: src_op.attr('op_role'), + }, + ) if intermediate_var_0.shape != ref_shape_x: intermediate_var_0.desc.set_shape(ref_shape_x) - check_variable_and_dtype(intermediate_var_0, 'x', - ['float16', 'float32', 'float64'], 'linear') - check_dtype(intermediate_var_0.dtype, 'dtype', - ['float16', 'float32', 'float64'], 'linear') + check_variable_and_dtype( + intermediate_var_0, 'x', ['float16', 'float32', 'float64'], 'linear' + ) + check_dtype( + intermediate_var_0.dtype, + 'dtype', + ['float16', 'float32', 'float64'], + 'linear', + ) attrs = { 'transpose_X': trans_x, 'transpose_Y': trans_y, 'alpha': 1, - OP_ROLE_KEY: src_op.attr('op_role') + OP_ROLE_KEY: src_op.attr('op_role'), } inputs = {'X': [intermediate_var_0], 'Y': [Weight_var]} - matmul_op = main_block.append_op(type='matmul', - inputs=inputs, - outputs={'Out': Out_var}, - attrs=attrs) + matmul_op = main_block.append_op( + type='matmul', inputs=inputs, outputs={'Out': Out_var}, attrs=attrs + ) if Out_var.shape != ref_shape_out: Out_var.desc.set_shape(ref_shape_out) @@ -775,13 +875,16 @@ class DistributedMatmulImpl0(DistributedOperatorImpl): input_varname = c_identity_op.desc.input_arg_names()[0] input_dist_attr = op_dist_attr.get_input_dist_attr(input_varname) assert input_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr) - identity_op_dist_attr.set_input_dist_attr(input_varname, - input_dist_attr) + op_dist_attr + ) + identity_op_dist_attr.set_input_dist_attr( + input_varname, input_dist_attr + ) # output output_varname = c_identity_op.desc.output_arg_names()[0] - identity_op_dist_attr.set_output_dist_attr(output_varname, - input_dist_attr) + identity_op_dist_attr.set_output_dist_attr( + output_varname, input_dist_attr + ) # set op dist attr ctx.set_op_dist_attr_for_program(c_identity_op, identity_op_dist_attr) @@ -794,31 +897,39 @@ class DistributedMatmulImpl0(DistributedOperatorImpl): for input_varname in matmul_op.desc.input_arg_names(): if input_varname in src_op.desc.input_arg_names(): input_dist_attr = op_dist_attr.get_input_dist_attr( - input_varname) + input_varname + ) assert input_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr) - matmul_op_dist_attr.set_input_dist_attr(input_varname, - input_dist_attr) + op_dist_attr + ) + matmul_op_dist_attr.set_input_dist_attr( + input_varname, input_dist_attr + ) else: input_var = main_block.var(input_varname) tensor_dist_attr = ctx.get_tensor_dist_attr_for_program( - input_var) - matmul_op_dist_attr.set_input_dist_attr(input_varname, - tensor_dist_attr) + input_var + ) + matmul_op_dist_attr.set_input_dist_attr( + input_varname, tensor_dist_attr + ) # output output_varname = matmul_op.desc.output_arg_names()[0] output_dist_attr = op_dist_attr.get_output_dist_attr(output_varname) assert output_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr) - matmul_op_dist_attr.set_output_dist_attr(output_varname, - output_dist_attr) + op_dist_attr + ) + matmul_op_dist_attr.set_output_dist_attr( + output_varname, output_dist_attr + ) # set op dist attr ctx.set_op_dist_attr_for_program(matmul_op, matmul_op_dist_attr) # init param sync if Weight_var.is_parameter and not op_dist_attr.is_recompute: - _init_param_sync(Weight_var, dist_op_context, startup_block, ctx, - rank_id) + _init_param_sync( + Weight_var, dist_op_context, startup_block, ctx, rank_id + ) @staticmethod def backward(ctx, *args, **kwargs): @@ -827,7 +938,6 @@ class DistributedMatmulImpl0(DistributedOperatorImpl): # RowParallel class DistributedMatmulImpl1(DistributedOperatorImpl): - def __init__(self, name): super(DistributedMatmulImpl1, self).__init__(name) self._forward_implemented = True @@ -850,7 +960,8 @@ class DistributedMatmulImpl1(DistributedOperatorImpl): main_block = backward_op.block vars = main_block.vars Y_var_dim_mapping = dist_attr.get_input_dims_mapping( - backward_op.input("Y")[0]) + backward_op.input("Y")[0] + ) assert Y_var_dim_mapping[1] < 0 parallel_axis = Y_var_dim_mapping[0] @@ -863,50 +974,60 @@ class DistributedMatmulImpl1(DistributedOperatorImpl): ctx, var_names, attrs=attrs, - parallel_axis=parallel_axis) + parallel_axis=parallel_axis, + ) process_mesh = dist_attr.process_mesh processes = process_mesh.processes comm_op_cost_list = build_comm_costs_from_descs( - IdentityOpCost, ctx, processes, c_identity_desc_mapping, cluster) + IdentityOpCost, ctx, processes, c_identity_desc_mapping, cluster + ) res.append(comm_op_cost_list) # calc comp op cost - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) - cost_mapping = build_comp_costs_from_descs(MatmulGradOpCost, ctx, - processes, desc_mapping, - cluster) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) + cost_mapping = build_comp_costs_from_descs( + MatmulGradOpCost, ctx, processes, desc_mapping, cluster + ) res.append(cost_mapping) # need gradient allreduce var_dim_mapping = dist_attr.get_input_dims_mapping( - backward_op.input("X")[0]) + backward_op.input("X")[0] + ) mesh_shape = process_mesh.topology batch_size_axis = var_dim_mapping[0] - if batch_size_axis > -1 and mesh_shape[ - batch_size_axis] > 1 and is_parameter_related( - backward_op.input("Y")[0], main_block): + if ( + batch_size_axis > -1 + and mesh_shape[batch_size_axis] > 1 + and is_parameter_related(backward_op.input("Y")[0], main_block) + ): parallel_axis = batch_size_axis attrs = {"use_calc_stream": True} var_names = [backward_op.output('Y@GRAD')[0]] - build_dp_costs(res, dist_op, ctx, var_names, attrs, parallel_axis, - cluster) + build_dp_costs( + res, dist_op, ctx, var_names, attrs, parallel_axis, cluster + ) return res def calc_fwd_cost(self, dist_op, ctx, cluster): # calc comp op cost - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) processes = dist_op.dist_attr.process_mesh.processes - cost_mapping = build_comp_costs_from_descs(MatmulOpCost, ctx, processes, - desc_mapping, cluster) + cost_mapping = build_comp_costs_from_descs( + MatmulOpCost, ctx, processes, desc_mapping, cluster + ) # calc comm op cost serial_op = dist_op.serial_op vars = serial_op.block.vars parallel_axis = dist_op.dist_attr.get_input_dims_mapping( - serial_op.input("Y")[0])[-2] + serial_op.input("Y")[0] + )[-2] attrs = {"use_calc_stream": True, "use_model_parallel": True} var_names = serial_op.output("Out") @@ -916,11 +1037,16 @@ class DistributedMatmulImpl1(DistributedOperatorImpl): ctx, var_names, attrs=attrs, - parallel_axis=parallel_axis) + parallel_axis=parallel_axis, + ) comm_op_cost_list = build_comm_costs_from_descs( - AllreduceSumOpCost, ctx, processes, c_allreduce_sum_desc_mapping, - cluster) + AllreduceSumOpCost, + ctx, + processes, + c_allreduce_sum_desc_mapping, + cluster, + ) res_cost = [cost_mapping, comm_op_cost_list] @@ -932,16 +1058,19 @@ class DistributedMatmulImpl1(DistributedOperatorImpl): x_name = op_desc.input('X')[0] y_name = op_desc.input('Y')[0] x_dims_mapping = copy.deepcopy( - op_dist_attr.get_input_dims_mapping(x_name)) + op_dist_attr.get_input_dims_mapping(x_name) + ) y_dims_mapping = copy.deepcopy( - op_dist_attr.get_input_dims_mapping(y_name)) + op_dist_attr.get_input_dims_mapping(y_name) + ) trans_x = op_desc.attr('transpose_X') trans_y = op_desc.attr('transpose_Y') trans_x_y_dims_mapping(trans_x, trans_y, x_dims_mapping, y_dims_mapping) if is_dim_replicate(x_dims_mapping[-1]): return False if is_dim_replicate(y_dims_mapping[-2]) or is_dim_shard( - y_dims_mapping[-1]): + y_dims_mapping[-1] + ): return False # Other dimensions must be replicate except the batch dimension for mapping in x_dims_mapping[1:-1]: @@ -963,8 +1092,9 @@ class DistributedMatmulImpl1(DistributedOperatorImpl): return True def is_auto_compatible(self, dist_op): - if (not self.is_input_compatible(dist_op)) or \ - (not self.is_output_compatible(dist_op)): + if (not self.is_input_compatible(dist_op)) or ( + not self.is_output_compatible(dist_op) + ): return False if not _is_auto_compatible_for_matmul(dist_op): return False @@ -989,28 +1119,33 @@ class DistributedMatmulImpl1(DistributedOperatorImpl): src_op = dist_op_context.cur_src_op rank_id = dist_op_context.rank_id op_dist_attr = ctx.get_op_dist_attr_for_program(src_op) - assert op_dist_attr is not None, "backward op [{}] don't have dist attribute !".format( - str(src_op)) + assert ( + op_dist_attr is not None + ), "backward op [{}] don't have dist attribute !".format(str(src_op)) # FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism if rank_id not in op_dist_attr.process_mesh.processes: - rank_id = _get_corresponding_rank(ctx, op_dist_attr.process_mesh, - rank_id) + rank_id = _get_corresponding_rank( + ctx, op_dist_attr.process_mesh, rank_id + ) # check validation of inputs / outputs for input_name in src_op.desc.input_names(): assert input_name in kwargs, "input [{}] is not given".format( - input_name) + input_name + ) assert len(kwargs[input_name]) == len( src_op.desc.input(input_name) ), "number of tensor for input [{}] is not match".format(input_name) for output_name in src_op.desc.output_names(): assert output_name in kwargs, "input [{}] is not given".format( - output_name) + output_name + ) assert len(kwargs[output_name]) == len( src_op.desc.output(output_name) ), "number of tensor for input [{}] is not match".format( - output_name) + output_name + ) X_var = main_block.var(kwargs['X'][0]) Weight_var = main_block.var(kwargs['Y'][0]) @@ -1020,29 +1155,37 @@ class DistributedMatmulImpl1(DistributedOperatorImpl): # TODO infer logic comm presentation matmul_row_dim_mapping = op_dist_attr.get_input_dims_mapping( - Weight_var.name)[-2] + Weight_var.name + )[-2] if trans_y: matmul_row_dim_mapping = op_dist_attr.get_input_dims_mapping( - Weight_var.name)[-1] - assert matmul_row_dim_mapping >= 0, "row_parallel_matmul's row should be divided by a specific mesh axis, but got [{}]".format( - matmul_row_dim_mapping) + Weight_var.name + )[-1] + assert ( + matmul_row_dim_mapping >= 0 + ), "row_parallel_matmul's row should be divided by a specific mesh axis, but got [{}]".format( + matmul_row_dim_mapping + ) process_mesh_shape = op_dist_attr.process_mesh.topology process_mesh_group = op_dist_attr.process_mesh.processes parallel_axis = matmul_row_dim_mapping - group_ranks = _get_comm_group(process_mesh_group, process_mesh_shape, - parallel_axis, rank_id) + group_ranks = _get_comm_group( + process_mesh_group, process_mesh_shape, parallel_axis, rank_id + ) group = new_process_group(group_ranks) - check_variable_and_dtype(X_var, 'x', ['float16', 'float32', 'float64'], - 'linear') - check_dtype(X_var.dtype, 'dtype', ['float16', 'float32', 'float64'], - 'linear') + check_variable_and_dtype( + X_var, 'x', ['float16', 'float32', 'float64'], 'linear' + ) + check_dtype( + X_var.dtype, 'dtype', ['float16', 'float32', 'float64'], 'linear' + ) attrs = { 'transpose_X': trans_x, 'transpose_Y': trans_y, 'alpha': 1, - OP_ROLE_KEY: src_op.attr('op_role') + OP_ROLE_KEY: src_op.attr('op_role'), } inputs = {'X': X_var, 'Y': Weight_var} @@ -1051,27 +1194,33 @@ class DistributedMatmulImpl1(DistributedOperatorImpl): assert out_tensor_dist_attr is not None out_var_dist_attr = op_dist_attr.get_output_dist_attr(Out_var.name) assert out_var_dist_attr is not None - ref_shape = infer_shape(main_block, Out_var, out_tensor_dist_attr, - out_var_dist_attr) + ref_shape = infer_shape( + main_block, Out_var, out_tensor_dist_attr, out_var_dist_attr + ) intermediate_var_0 = main_block.create_var( - name=unique_name.generate_with_ignorable_key(".".join( - ["c_allreduce_sum", 'tmp'])), + name=unique_name.generate_with_ignorable_key( + ".".join(["c_allreduce_sum", 'tmp']) + ), shape=Out_var.shape, dtype=Out_var.dtype, type=Out_var.type, lod_level=Out_var.lod_level, persistable=False, is_data=False, - need_check_feed=Out_var.desc.need_check_feed()) + need_check_feed=Out_var.desc.need_check_feed(), + ) # set intermediate_var_0's dist_attr with Out_var's dist_attr - ctx.set_tensor_dist_attr_for_program(intermediate_var_0, - out_var_dist_attr) + ctx.set_tensor_dist_attr_for_program( + intermediate_var_0, out_var_dist_attr + ) - matmul_op = main_block.append_op(type='matmul', - inputs=inputs, - outputs={'Out': intermediate_var_0}, - attrs=attrs) + matmul_op = main_block.append_op( + type='matmul', + inputs=inputs, + outputs={'Out': intermediate_var_0}, + attrs=attrs, + ) if intermediate_var_0.shape != ref_shape: intermediate_var_0.desc.set_shape(ref_shape) @@ -1083,8 +1232,9 @@ class DistributedMatmulImpl1(DistributedOperatorImpl): 'ring_id': group.id, 'use_calc_stream': True, 'use_model_parallel': True, - OP_ROLE_KEY: src_op.attr('op_role') - }) + OP_ROLE_KEY: src_op.attr('op_role'), + }, + ) if Out_var.shape != ref_shape: Out_var.desc.set_shape(ref_shape) @@ -1097,15 +1247,19 @@ class DistributedMatmulImpl1(DistributedOperatorImpl): for input_varname in matmul_op.desc.input_arg_names(): input_dist_attr = op_dist_attr.get_input_dist_attr(input_varname) assert input_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr) - matmul_op_dist_attr.set_input_dist_attr(input_varname, - input_dist_attr) + op_dist_attr + ) + matmul_op_dist_attr.set_input_dist_attr( + input_varname, input_dist_attr + ) output_varname = matmul_op.desc.output_arg_names()[0] output_dist_attr = op_dist_attr.get_output_dist_attr(Out_var.name) assert output_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr) - matmul_op_dist_attr.set_output_dist_attr(output_varname, - output_dist_attr) + op_dist_attr + ) + matmul_op_dist_attr.set_output_dist_attr( + output_varname, output_dist_attr + ) ctx.set_op_dist_attr_for_program(matmul_op, matmul_op_dist_attr) # allreduce @@ -1117,21 +1271,26 @@ class DistributedMatmulImpl1(DistributedOperatorImpl): input_var = main_block.var(input_varname) tensor_dist_attr = ctx.get_tensor_dist_attr_for_program(input_var) assert tensor_dist_attr is not None - allreduce_op_dist_attr.set_input_dist_attr(input_varname, - tensor_dist_attr) + allreduce_op_dist_attr.set_input_dist_attr( + input_varname, tensor_dist_attr + ) for output_varname in c_allreduce_sum_op.desc.output_arg_names(): output_dist_attr = op_dist_attr.get_output_dist_attr(output_varname) assert output_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr) - allreduce_op_dist_attr.set_output_dist_attr(output_varname, - output_dist_attr) - ctx.set_op_dist_attr_for_program(c_allreduce_sum_op, - allreduce_op_dist_attr) + op_dist_attr + ) + allreduce_op_dist_attr.set_output_dist_attr( + output_varname, output_dist_attr + ) + ctx.set_op_dist_attr_for_program( + c_allreduce_sum_op, allreduce_op_dist_attr + ) # init param sync if Weight_var.is_parameter and not op_dist_attr.is_recompute: - _init_param_sync(Weight_var, dist_op_context, startup_block, ctx, - rank_id) + _init_param_sync( + Weight_var, dist_op_context, startup_block, ctx, rank_id + ) @staticmethod def backward(ctx, *args, **kwargs): @@ -1140,7 +1299,6 @@ class DistributedMatmulImpl1(DistributedOperatorImpl): # ReplicateParallel class DistributedMatmulImpl2(DistributedOperatorImpl): - def __init__(self, name): super(DistributedMatmulImpl2, self).__init__(name) @@ -1161,38 +1319,45 @@ class DistributedMatmulImpl2(DistributedOperatorImpl): vars = main_block.vars # calc comp op cost - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) process_mesh = dist_attr.process_mesh processes = process_mesh.processes - cost_mapping = build_comp_costs_from_descs(MatmulGradOpCost, ctx, - processes, desc_mapping, - cluster) + cost_mapping = build_comp_costs_from_descs( + MatmulGradOpCost, ctx, processes, desc_mapping, cluster + ) res.append(cost_mapping) # need gradient allreduce var_dim_mapping = dist_attr.get_input_dims_mapping( - backward_op.input("X")[0]) + backward_op.input("X")[0] + ) mesh_shape = process_mesh.topology batch_size_axis = var_dim_mapping[0] - if batch_size_axis > -1 and mesh_shape[ - batch_size_axis] > 1 and is_parameter_related( - backward_op.input("Y")[0], main_block): + if ( + batch_size_axis > -1 + and mesh_shape[batch_size_axis] > 1 + and is_parameter_related(backward_op.input("Y")[0], main_block) + ): parallel_axis = batch_size_axis attrs = {"use_calc_stream": True} var_names = [backward_op.output('Y@GRAD')[0]] - build_dp_costs(res, dist_op, ctx, var_names, attrs, parallel_axis, - cluster) + build_dp_costs( + res, dist_op, ctx, var_names, attrs, parallel_axis, cluster + ) return res def calc_fwd_cost(self, dist_op, ctx, cluster): # calc comp op cost - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) processes = dist_op.dist_attr.process_mesh.processes - cost_mapping = build_comp_costs_from_descs(MatmulOpCost, ctx, processes, - desc_mapping, cluster) + cost_mapping = build_comp_costs_from_descs( + MatmulOpCost, ctx, processes, desc_mapping, cluster + ) res_cost = [cost_mapping] return res_cost @@ -1208,13 +1373,15 @@ class DistributedMatmulImpl2(DistributedOperatorImpl): if is_dim_shard(x_dims_mapping[-1]): return False if is_valid_list_index(x_dims_mapping, -2) and is_dim_shard( - x_dims_mapping[-2]): + x_dims_mapping[-2] + ): return False if is_dim_shard(y_dims_mapping[-1]): return False if is_valid_list_index(y_dims_mapping, -2) and is_dim_shard( - y_dims_mapping[-2]): + y_dims_mapping[-2] + ): return False return True @@ -1228,14 +1395,16 @@ class DistributedMatmulImpl2(DistributedOperatorImpl): if is_dim_shard(out_dims_mapping[-1]): return False if is_valid_list_index(out_dims_mapping, -2) and is_dim_shard( - out_dims_mapping[-2]): + out_dims_mapping[-2] + ): return False return True def is_auto_compatible(self, dist_op): - if (not self.is_input_compatible(dist_op)) or \ - (not self.is_output_compatible(dist_op)): + if (not self.is_input_compatible(dist_op)) or ( + not self.is_output_compatible(dist_op) + ): return False if not _is_auto_compatible_for_matmul(dist_op): @@ -1259,16 +1428,18 @@ class DistributedMatmulImpl2(DistributedOperatorImpl): _right_operand_parameter_matmul_backward(ctx, *args, **kwargs) -register_distributed_operator_impl("matmul", - DistributedMatmulImpl0("column_parallel")) -register_distributed_operator_impl("matmul", - DistributedMatmulImpl1("row_parallel")) -register_distributed_operator_impl("matmul", - DistributedMatmulImpl2("replicate_parallel")) +register_distributed_operator_impl( + "matmul", DistributedMatmulImpl0("column_parallel") +) +register_distributed_operator_impl( + "matmul", DistributedMatmulImpl1("row_parallel") +) +register_distributed_operator_impl( + "matmul", DistributedMatmulImpl2("replicate_parallel") +) class DistributedMatmulV2(DistributedOperatorImplContainer): - def __init__(self, op_type): super(DistributedMatmulV2, self).__init__(op_type) @@ -1278,7 +1449,6 @@ register_distributed_operator_impl_container(DistributedMatmulV2("matmul_v2")) # ColumnParallel class DistributedMatmulV2Impl0(DistributedOperatorImpl): - def __init__(self, name): super(DistributedMatmulV2Impl0, self).__init__(name) self._forward_implemented = True @@ -1301,7 +1471,8 @@ class DistributedMatmulV2Impl0(DistributedOperatorImpl): main_block = backward_op.block vars = main_block.vars Y_var_dim_mapping = dist_attr.get_input_dims_mapping( - backward_op.input("Y")[0]) + backward_op.input("Y")[0] + ) process_mesh = dist_attr.process_mesh processes = process_mesh.processes # col parallel: matmul + allreduce @@ -1315,12 +1486,13 @@ class DistributedMatmulV2Impl0(DistributedOperatorImpl): assert len(backward_op.output("X@GRAD")) == 1 # calc comp op cost - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) - cost_mapping = build_comp_costs_from_descs(MatmulV2GradOpCost, ctx, - processes, desc_mapping, - cluster) + cost_mapping = build_comp_costs_from_descs( + MatmulV2GradOpCost, ctx, processes, desc_mapping, cluster + ) res.append(cost_mapping) # calc comm op cost @@ -1333,45 +1505,55 @@ class DistributedMatmulV2Impl0(DistributedOperatorImpl): ctx, var_names, attrs=attrs, - parallel_axis=parallel_axis) + parallel_axis=parallel_axis, + ) comm_op_cost_list = build_comm_costs_from_descs( - AllreduceSumOpCost, ctx, processes, - c_allreduce_sum_desc_mapping, cluster) + AllreduceSumOpCost, + ctx, + processes, + c_allreduce_sum_desc_mapping, + cluster, + ) res.append(comm_op_cost_list) # need gradient allreduce process_mesh = dist_attr.process_mesh var_dim_mapping = dist_attr.get_input_dims_mapping( - backward_op.input("X")[0]) + backward_op.input("X")[0] + ) mesh_shape = process_mesh.topology batch_size_axis = var_dim_mapping[0] - if batch_size_axis > -1 and mesh_shape[ - batch_size_axis] > 1 and is_parameter_related( - backward_op.input("Y")[0], main_block): + if ( + batch_size_axis > -1 + and mesh_shape[batch_size_axis] > 1 + and is_parameter_related(backward_op.input("Y")[0], main_block) + ): parallel_axis = batch_size_axis attrs = {"use_calc_stream": True} var_names = [backward_op.output('Y@GRAD')[0]] - build_dp_costs(res, dist_op, ctx, var_names, attrs, parallel_axis, - cluster) + build_dp_costs( + res, dist_op, ctx, var_names, attrs, parallel_axis, cluster + ) return res def calc_fwd_cost(self, dist_op, ctx, cluster): # calc comp op cost # TODO: trans shape if trans_x or trans_y is True - comp_desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) + comp_desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) processes = dist_op.dist_attr.process_mesh.processes - comp_cost_mapping = build_comp_costs_from_descs(MatmulV2OpCost, ctx, - processes, - comp_desc_mapping, - cluster) + comp_cost_mapping = build_comp_costs_from_descs( + MatmulV2OpCost, ctx, processes, comp_desc_mapping, cluster + ) # calc comm op cost serial_op = dist_op.serial_op vars = serial_op.block.vars parallel_axis = dist_op.dist_attr.get_input_dims_mapping( - serial_op.input("Y")[0])[-1] + serial_op.input("Y")[0] + )[-1] attrs = {"use_calc_stream": True, "use_model_parallel": True} var_names = serial_op.input("X") @@ -1381,9 +1563,11 @@ class DistributedMatmulV2Impl0(DistributedOperatorImpl): ctx, var_names, attrs=attrs, - parallel_axis=parallel_axis) + parallel_axis=parallel_axis, + ) comm_op_cost_list = build_comm_costs_from_descs( - IdentityOpCost, ctx, processes, c_identity_desc_mapping, cluster) + IdentityOpCost, ctx, processes, c_identity_desc_mapping, cluster + ) res_cost = [comm_op_cost_list, comp_cost_mapping] return res_cost @@ -1394,16 +1578,19 @@ class DistributedMatmulV2Impl0(DistributedOperatorImpl): x_name = op_desc.input('X')[0] y_name = op_desc.input('Y')[0] x_dims_mapping = copy.deepcopy( - op_dist_attr.get_input_dims_mapping(x_name)) + op_dist_attr.get_input_dims_mapping(x_name) + ) y_dims_mapping = copy.deepcopy( - op_dist_attr.get_input_dims_mapping(y_name)) + op_dist_attr.get_input_dims_mapping(y_name) + ) trans_x = op_desc.attr('trans_x') trans_y = op_desc.attr('trans_y') trans_x_y_dims_mapping(trans_x, trans_y, x_dims_mapping, y_dims_mapping) if is_dim_shard(x_dims_mapping[-1]): return False if is_dim_shard(y_dims_mapping[-2]) or is_dim_replicate( - y_dims_mapping[-1]): + y_dims_mapping[-1] + ): return False for mapping in x_dims_mapping[1:-1]: if is_dim_shard(mapping): @@ -1423,8 +1610,9 @@ class DistributedMatmulV2Impl0(DistributedOperatorImpl): return True def is_auto_compatible(self, dist_op): - if (not self.is_input_compatible(dist_op)) or \ - (not self.is_output_compatible(dist_op)): + if (not self.is_input_compatible(dist_op)) or ( + not self.is_output_compatible(dist_op) + ): return False if not _is_auto_compatible_for_matmul(dist_op): return False @@ -1449,28 +1637,33 @@ class DistributedMatmulV2Impl0(DistributedOperatorImpl): src_op = dist_op_context.cur_src_op rank_id = dist_op_context.rank_id op_dist_attr = ctx.get_op_dist_attr_for_program(src_op) - assert op_dist_attr is not None, "backward op [{}] don't have dist attribute !".format( - str(src_op)) + assert ( + op_dist_attr is not None + ), "backward op [{}] don't have dist attribute !".format(str(src_op)) # FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism if rank_id not in op_dist_attr.process_mesh.processes: - rank_id = _get_corresponding_rank(ctx, op_dist_attr.process_mesh, - rank_id) + rank_id = _get_corresponding_rank( + ctx, op_dist_attr.process_mesh, rank_id + ) # check validation of inputs / outputs for input_name in src_op.desc.input_names(): assert input_name in kwargs, "input [{}] is not given".format( - input_name) + input_name + ) assert len(kwargs[input_name]) == len( src_op.desc.input(input_name) ), "number of tensor for input [{}] is not match".format(input_name) for output_name in src_op.desc.output_names(): assert output_name in kwargs, "input [{}] is not given".format( - output_name) + output_name + ) assert len(kwargs[output_name]) == len( src_op.desc.output(output_name) ), "number of tensor for input [{}] is not match".format( - output_name) + output_name + ) X_var = main_block.var(kwargs['X'][0]) Weight_var = main_block._var_recursive(kwargs['Y'][0]) @@ -1480,18 +1673,24 @@ class DistributedMatmulV2Impl0(DistributedOperatorImpl): # TODO infer logic comm presentation matmul_col_dim_mapping = op_dist_attr.get_input_dims_mapping( - Weight_var.name)[-1] + Weight_var.name + )[-1] if trans_y: matmul_col_dim_mapping = op_dist_attr.get_input_dims_mapping( - Weight_var.name)[-2] - assert matmul_col_dim_mapping >= 0, "col_parallel_matmul's row should be divided by a specific mesh axis, but got [{}]".format( - matmul_col_dim_mapping) + Weight_var.name + )[-2] + assert ( + matmul_col_dim_mapping >= 0 + ), "col_parallel_matmul's row should be divided by a specific mesh axis, but got [{}]".format( + matmul_col_dim_mapping + ) process_mesh_shape = op_dist_attr.process_mesh.topology process_mesh_group = op_dist_attr.process_mesh.processes parallel_axis = matmul_col_dim_mapping - group_ranks = _get_comm_group(process_mesh_group, process_mesh_shape, - parallel_axis, rank_id) + group_ranks = _get_comm_group( + process_mesh_group, process_mesh_shape, parallel_axis, rank_id + ) group = new_process_group(group_ranks) # infer new var shape with op dist attr @@ -1499,31 +1698,39 @@ class DistributedMatmulV2Impl0(DistributedOperatorImpl): assert x_tensor_dist_attr is not None identity_var_dist_attr = op_dist_attr.get_input_dist_attr(X_var.name) assert identity_var_dist_attr is not None - ref_shape_x = infer_shape(main_block, X_var, x_tensor_dist_attr, - identity_var_dist_attr) + ref_shape_x = infer_shape( + main_block, X_var, x_tensor_dist_attr, identity_var_dist_attr + ) # infer out var shape with op dist attr out_tensor_dist_attr = ctx.get_tensor_dist_attr_for_program(Out_var) assert out_tensor_dist_attr is not None out_var_dist_attr = op_dist_attr.get_output_dist_attr(Out_var.name) assert out_var_dist_attr is not None - ref_shape_out = infer_shape(main_block, Out_var, out_tensor_dist_attr, - out_var_dist_attr) + ref_shape_out = infer_shape( + main_block, Out_var, out_tensor_dist_attr, out_var_dist_attr + ) intermediate_var_0 = main_block.create_var( - name=unique_name.generate_with_ignorable_key(".".join( - ["c_identity", 'tmp'])), + name=unique_name.generate_with_ignorable_key( + ".".join(["c_identity", 'tmp']) + ), dtype=X_var.dtype, shape=X_var.shape, type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=X_var.stop_gradient) + stop_gradient=X_var.stop_gradient, + ) # set intermediate_var_0's dist_attr with X_var's dist_attr - ctx.set_tensor_dist_attr_for_program(intermediate_var_0, - identity_var_dist_attr) + ctx.set_tensor_dist_attr_for_program( + intermediate_var_0, identity_var_dist_attr + ) check_variable_and_dtype( - X_var, 'tensor', - ['float16', 'float32', 'float64', 'int32', 'int64'], '_c_identity') + X_var, + 'tensor', + ['float16', 'float32', 'float64', 'int32', 'int64'], + '_c_identity', + ) c_identity_op = main_block.append_op( type='c_identity', inputs={'X': [X_var]}, @@ -1533,24 +1740,32 @@ class DistributedMatmulV2Impl0(DistributedOperatorImpl): 'use_calc_stream': True, 'use_model_parallel': True, OP_ROLE_KEY: src_op.attr('op_role'), - }) + }, + ) if intermediate_var_0.shape != ref_shape_x: intermediate_var_0.desc.set_shape(ref_shape_x) - check_variable_and_dtype(intermediate_var_0, 'x', - ['float16', 'float32', 'float64'], 'linear') - check_dtype(intermediate_var_0.dtype, 'dtype', - ['float16', 'float32', 'float64'], 'linear') + check_variable_and_dtype( + intermediate_var_0, 'x', ['float16', 'float32', 'float64'], 'linear' + ) + check_dtype( + intermediate_var_0.dtype, + 'dtype', + ['float16', 'float32', 'float64'], + 'linear', + ) attrs = { 'trans_x': trans_x, 'trans_y': trans_y, - OP_ROLE_KEY: src_op.attr('op_role') + OP_ROLE_KEY: src_op.attr('op_role'), } inputs = {'X': [intermediate_var_0], 'Y': [Weight_var]} - matmul_v2_op = main_block.append_op(type='matmul_v2', - inputs=inputs, - outputs={'Out': Out_var}, - attrs=attrs) + matmul_v2_op = main_block.append_op( + type='matmul_v2', + inputs=inputs, + outputs={'Out': Out_var}, + attrs=attrs, + ) if Out_var.shape != ref_shape_out: Out_var.desc.set_shape(ref_shape_out) @@ -1564,13 +1779,16 @@ class DistributedMatmulV2Impl0(DistributedOperatorImpl): input_varname = c_identity_op.desc.input_arg_names()[0] input_dist_attr = op_dist_attr.get_input_dist_attr(input_varname) assert input_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr) - identity_op_dist_attr.set_input_dist_attr(input_varname, - input_dist_attr) + op_dist_attr + ) + identity_op_dist_attr.set_input_dist_attr( + input_varname, input_dist_attr + ) # output output_varname = c_identity_op.desc.output_arg_names()[0] - identity_op_dist_attr.set_output_dist_attr(output_varname, - input_dist_attr) + identity_op_dist_attr.set_output_dist_attr( + output_varname, input_dist_attr + ) ctx.set_op_dist_attr_for_program(c_identity_op, identity_op_dist_attr) # matmulv2 @@ -1581,29 +1799,37 @@ class DistributedMatmulV2Impl0(DistributedOperatorImpl): for input_varname in matmul_v2_op.desc.input_arg_names(): if input_varname in src_op.desc.input_arg_names(): input_dist_attr = op_dist_attr.get_input_dist_attr( - input_varname) + input_varname + ) assert input_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr) + op_dist_attr + ) matmulv2_op_dist_attr.set_input_dist_attr( - input_varname, input_dist_attr) + input_varname, input_dist_attr + ) else: input_var = main_block.var(input_varname) tensor_dist_attr = ctx.get_tensor_dist_attr_for_program( - input_var) + input_var + ) matmulv2_op_dist_attr.set_input_dist_attr( - input_varname, tensor_dist_attr) + input_varname, tensor_dist_attr + ) for output_varname in matmul_v2_op.desc.output_arg_names(): output_dist_attr = op_dist_attr.get_output_dist_attr(output_varname) assert output_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr) - matmulv2_op_dist_attr.set_output_dist_attr(output_varname, - output_dist_attr) + op_dist_attr + ) + matmulv2_op_dist_attr.set_output_dist_attr( + output_varname, output_dist_attr + ) ctx.set_op_dist_attr_for_program(matmul_v2_op, matmulv2_op_dist_attr) # init param sync if Weight_var.is_parameter and not op_dist_attr.is_recompute: - _init_param_sync(Weight_var, dist_op_context, startup_block, ctx, - rank_id) + _init_param_sync( + Weight_var, dist_op_context, startup_block, ctx, rank_id + ) @staticmethod def backward(ctx, *args, **kwargs): @@ -1612,7 +1838,6 @@ class DistributedMatmulV2Impl0(DistributedOperatorImpl): # RowParallel class DistributedMatmulV2Impl1(DistributedOperatorImpl): - def __init__(self, name): super(DistributedMatmulV2Impl1, self).__init__(name) self._forward_implemented = True @@ -1635,7 +1860,8 @@ class DistributedMatmulV2Impl1(DistributedOperatorImpl): main_block = backward_op.block vars = main_block.vars Y_var_dim_mapping = dist_attr.get_input_dims_mapping( - backward_op.input("Y")[0]) + backward_op.input("Y")[0] + ) assert Y_var_dim_mapping[1] < 0 parallel_axis = Y_var_dim_mapping[0] @@ -1650,50 +1876,59 @@ class DistributedMatmulV2Impl1(DistributedOperatorImpl): ctx, var_names, attrs=attrs, - parallel_axis=parallel_axis) + parallel_axis=parallel_axis, + ) comm_op_cost_list = build_comm_costs_from_descs( - IdentityOpCost, ctx, processes, c_identity_desc_mapping, cluster) + IdentityOpCost, ctx, processes, c_identity_desc_mapping, cluster + ) res.append(comm_op_cost_list) # calc comp op cost - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) - cost_mapping = build_comp_costs_from_descs(MatmulV2GradOpCost, ctx, - processes, desc_mapping, - cluster) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) + cost_mapping = build_comp_costs_from_descs( + MatmulV2GradOpCost, ctx, processes, desc_mapping, cluster + ) res.append(cost_mapping) # need gradient allreduce process_mesh = dist_attr.process_mesh var_dim_mapping = dist_attr.get_input_dims_mapping( - backward_op.input("X")[0]) + backward_op.input("X")[0] + ) mesh_shape = process_mesh.topology batch_size_axis = var_dim_mapping[0] - if batch_size_axis > -1 and mesh_shape[ - batch_size_axis] > 1 and is_parameter_related( - backward_op.input("Y")[0], main_block): + if ( + batch_size_axis > -1 + and mesh_shape[batch_size_axis] > 1 + and is_parameter_related(backward_op.input("Y")[0], main_block) + ): parallel_axis = batch_size_axis attrs = {"use_calc_stream": True} var_names = [backward_op.output('Y@GRAD')[0]] - build_dp_costs(res, dist_op, ctx, var_names, attrs, parallel_axis, - cluster) + build_dp_costs( + res, dist_op, ctx, var_names, attrs, parallel_axis, cluster + ) return res def calc_fwd_cost(self, dist_op, ctx, cluster): # calc comp op cost - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) processes = dist_op.dist_attr.process_mesh.processes - cost_mapping = build_comp_costs_from_descs(MatmulV2OpCost, ctx, - processes, desc_mapping, - cluster) + cost_mapping = build_comp_costs_from_descs( + MatmulV2OpCost, ctx, processes, desc_mapping, cluster + ) # calc comm op cost serial_op = dist_op.serial_op vars = serial_op.block.vars parallel_axis = dist_op.dist_attr.get_input_dims_mapping( - serial_op.input("Y")[0])[-2] + serial_op.input("Y")[0] + )[-2] attrs = {"use_calc_stream": True, "use_model_parallel": True} var_names = serial_op.output("Out") @@ -1703,11 +1938,16 @@ class DistributedMatmulV2Impl1(DistributedOperatorImpl): ctx, var_names, attrs=attrs, - parallel_axis=parallel_axis) + parallel_axis=parallel_axis, + ) comm_op_cost_list = build_comm_costs_from_descs( - AllreduceSumOpCost, ctx, processes, c_allreduce_sum_desc_mapping, - cluster) + AllreduceSumOpCost, + ctx, + processes, + c_allreduce_sum_desc_mapping, + cluster, + ) res_cost = [cost_mapping, comm_op_cost_list] return res_cost @@ -1718,16 +1958,19 @@ class DistributedMatmulV2Impl1(DistributedOperatorImpl): x_name = op_desc.input('X')[0] y_name = op_desc.input('Y')[0] x_dims_mapping = copy.deepcopy( - op_dist_attr.get_input_dims_mapping(x_name)) + op_dist_attr.get_input_dims_mapping(x_name) + ) y_dims_mapping = copy.deepcopy( - op_dist_attr.get_input_dims_mapping(y_name)) + op_dist_attr.get_input_dims_mapping(y_name) + ) trans_x = op_desc.attr('trans_x') trans_y = op_desc.attr('trans_y') trans_x_y_dims_mapping(trans_x, trans_y, x_dims_mapping, y_dims_mapping) if is_dim_replicate(x_dims_mapping[-1]): return False if is_dim_replicate(y_dims_mapping[-2]) or is_dim_shard( - y_dims_mapping[-1]): + y_dims_mapping[-1] + ): return False # Other dimensions must be replicate except the batch dimension for mapping in x_dims_mapping[1:-1]: @@ -1749,8 +1992,9 @@ class DistributedMatmulV2Impl1(DistributedOperatorImpl): return True def is_auto_compatible(self, dist_op): - if (not self.is_input_compatible(dist_op)) or \ - (not self.is_output_compatible(dist_op)): + if (not self.is_input_compatible(dist_op)) or ( + not self.is_output_compatible(dist_op) + ): return False if not _is_auto_compatible_for_matmul(dist_op): return False @@ -1775,28 +2019,33 @@ class DistributedMatmulV2Impl1(DistributedOperatorImpl): src_op = dist_op_context.cur_src_op rank_id = dist_op_context.rank_id op_dist_attr = ctx.get_op_dist_attr_for_program(src_op) - assert op_dist_attr is not None, "backward op [{}] don't have dist attribute !".format( - str(src_op)) + assert ( + op_dist_attr is not None + ), "backward op [{}] don't have dist attribute !".format(str(src_op)) # FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism if rank_id not in op_dist_attr.process_mesh.processes: - rank_id = _get_corresponding_rank(ctx, op_dist_attr.process_mesh, - rank_id) + rank_id = _get_corresponding_rank( + ctx, op_dist_attr.process_mesh, rank_id + ) # check validation of inputs / outputs for input_name in src_op.desc.input_names(): assert input_name in kwargs, "input [{}] is not given".format( - input_name) + input_name + ) assert len(kwargs[input_name]) == len( src_op.desc.input(input_name) ), "number of tensor for input [{}] is not match".format(input_name) for output_name in src_op.desc.output_names(): assert output_name in kwargs, "input [{}] is not given".format( - output_name) + output_name + ) assert len(kwargs[output_name]) == len( src_op.desc.output(output_name) ), "number of tensor for input [{}] is not match".format( - output_name) + output_name + ) X_var = main_block.var(kwargs['X'][0]) Weight_var = main_block._var_recursive(kwargs['Y'][0]) @@ -1806,28 +2055,36 @@ class DistributedMatmulV2Impl1(DistributedOperatorImpl): # TODO infer logic comm presentation matmul_row_dim_mapping = op_dist_attr.get_input_dims_mapping( - Weight_var.name)[-2] + Weight_var.name + )[-2] if trans_y: matmul_row_dim_mapping = op_dist_attr.get_input_dims_mapping( - Weight_var.name)[-1] - assert matmul_row_dim_mapping >= 0, "row_parallel_matmul's row should be divided by a specific mesh axis, but got [{}]".format( - matmul_row_dim_mapping) + Weight_var.name + )[-1] + assert ( + matmul_row_dim_mapping >= 0 + ), "row_parallel_matmul's row should be divided by a specific mesh axis, but got [{}]".format( + matmul_row_dim_mapping + ) process_mesh_shape = op_dist_attr.process_mesh.topology process_mesh_group = op_dist_attr.process_mesh.processes parallel_axis = matmul_row_dim_mapping - group_ranks = _get_comm_group(process_mesh_group, process_mesh_shape, - parallel_axis, rank_id) + group_ranks = _get_comm_group( + process_mesh_group, process_mesh_shape, parallel_axis, rank_id + ) group = new_process_group(group_ranks) - check_variable_and_dtype(X_var, 'x', ['float16', 'float32', 'float64'], - 'linear') - check_dtype(X_var.dtype, 'dtype', ['float16', 'float32', 'float64'], - 'linear') + check_variable_and_dtype( + X_var, 'x', ['float16', 'float32', 'float64'], 'linear' + ) + check_dtype( + X_var.dtype, 'dtype', ['float16', 'float32', 'float64'], 'linear' + ) attrs = { 'trans_x': trans_x, 'trans_y': trans_y, - OP_ROLE_KEY: src_op.attr('op_role') + OP_ROLE_KEY: src_op.attr('op_role'), } inputs = {'X': X_var, 'Y': Weight_var} @@ -1836,27 +2093,33 @@ class DistributedMatmulV2Impl1(DistributedOperatorImpl): assert out_tensor_dist_attr is not None out_var_dist_attr = op_dist_attr.get_output_dist_attr(Out_var.name) assert out_var_dist_attr is not None - ref_shape = infer_shape(main_block, Out_var, out_tensor_dist_attr, - out_var_dist_attr) + ref_shape = infer_shape( + main_block, Out_var, out_tensor_dist_attr, out_var_dist_attr + ) intermediate_var_0 = main_block.create_var( - name=unique_name.generate_with_ignorable_key(".".join( - ["c_allreduce_sum", 'tmp'])), + name=unique_name.generate_with_ignorable_key( + ".".join(["c_allreduce_sum", 'tmp']) + ), shape=Out_var.shape, dtype=Out_var.dtype, type=Out_var.type, lod_level=Out_var.lod_level, persistable=False, is_data=False, - need_check_feed=Out_var.desc.need_check_feed()) + need_check_feed=Out_var.desc.need_check_feed(), + ) # set intermediate_var_0's dist_attr with Out_var's dist_attr - ctx.set_tensor_dist_attr_for_program(intermediate_var_0, - out_var_dist_attr) + ctx.set_tensor_dist_attr_for_program( + intermediate_var_0, out_var_dist_attr + ) - matmul_v2_op = main_block.append_op(type='matmul_v2', - inputs=inputs, - outputs={'Out': intermediate_var_0}, - attrs=attrs) + matmul_v2_op = main_block.append_op( + type='matmul_v2', + inputs=inputs, + outputs={'Out': intermediate_var_0}, + attrs=attrs, + ) if intermediate_var_0.shape != ref_shape: intermediate_var_0.desc.set_shape(ref_shape) @@ -1868,8 +2131,9 @@ class DistributedMatmulV2Impl1(DistributedOperatorImpl): 'ring_id': group.id, 'use_calc_stream': True, 'use_model_parallel': True, - OP_ROLE_KEY: src_op.attr('op_role') - }) + OP_ROLE_KEY: src_op.attr('op_role'), + }, + ) if Out_var.shape != ref_shape: Out_var.desc.set_shape(ref_shape) @@ -1882,15 +2146,19 @@ class DistributedMatmulV2Impl1(DistributedOperatorImpl): for input_varname in matmul_v2_op.desc.input_arg_names(): input_dist_attr = op_dist_attr.get_input_dist_attr(input_varname) assert input_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr) - matmulv2_op_dist_attr.set_input_dist_attr(input_varname, - input_dist_attr) + op_dist_attr + ) + matmulv2_op_dist_attr.set_input_dist_attr( + input_varname, input_dist_attr + ) output_varname = matmul_v2_op.desc.output_arg_names()[0] output_dist_attr = op_dist_attr.get_output_dist_attr(Out_var.name) assert output_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr) - matmulv2_op_dist_attr.set_output_dist_attr(output_varname, - output_dist_attr) + op_dist_attr + ) + matmulv2_op_dist_attr.set_output_dist_attr( + output_varname, output_dist_attr + ) ctx.set_op_dist_attr_for_program(matmul_v2_op, matmulv2_op_dist_attr) # allreduce @@ -1902,21 +2170,26 @@ class DistributedMatmulV2Impl1(DistributedOperatorImpl): input_var = main_block.var(input_varname) tensor_dist_attr = ctx.get_tensor_dist_attr_for_program(input_var) assert tensor_dist_attr is not None - allreduce_op_dist_attr.set_input_dist_attr(input_varname, - tensor_dist_attr) + allreduce_op_dist_attr.set_input_dist_attr( + input_varname, tensor_dist_attr + ) for output_varname in c_allreduce_sum_op.desc.output_arg_names(): output_dist_attr = op_dist_attr.get_output_dist_attr(output_varname) assert output_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr) - allreduce_op_dist_attr.set_output_dist_attr(output_varname, - output_dist_attr) - ctx.set_op_dist_attr_for_program(c_allreduce_sum_op, - allreduce_op_dist_attr) + op_dist_attr + ) + allreduce_op_dist_attr.set_output_dist_attr( + output_varname, output_dist_attr + ) + ctx.set_op_dist_attr_for_program( + c_allreduce_sum_op, allreduce_op_dist_attr + ) # init param sync if Weight_var.is_parameter and not op_dist_attr.is_recompute: - _init_param_sync(Weight_var, dist_op_context, startup_block, ctx, - rank_id) + _init_param_sync( + Weight_var, dist_op_context, startup_block, ctx, rank_id + ) @staticmethod def backward(ctx, *args, **kwargs): @@ -1925,7 +2198,6 @@ class DistributedMatmulV2Impl1(DistributedOperatorImpl): # ReplicateParallel class DistributedMatmulV2Impl2(DistributedOperatorImpl): - def __init__(self, name): super(DistributedMatmulV2Impl2, self).__init__(name) @@ -1947,38 +2219,44 @@ class DistributedMatmulV2Impl2(DistributedOperatorImpl): process_mesh = dist_attr.process_mesh # calc comp op cost - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) processes = process_mesh.processes - cost_mapping = build_comp_costs_from_descs(MatmulV2GradOpCost, ctx, - processes, desc_mapping, - cluster) + cost_mapping = build_comp_costs_from_descs( + MatmulV2GradOpCost, ctx, processes, desc_mapping, cluster + ) res.append(cost_mapping) # need gradient allreduce var_dim_mapping = dist_attr.get_input_dims_mapping( - backward_op.input("X")[0]) + backward_op.input("X")[0] + ) mesh_shape = process_mesh.topology batch_size_axis = var_dim_mapping[0] - if batch_size_axis > -1 and mesh_shape[ - batch_size_axis] > 1 and is_parameter_related( - backward_op.input("Y")[0], main_block): + if ( + batch_size_axis > -1 + and mesh_shape[batch_size_axis] > 1 + and is_parameter_related(backward_op.input("Y")[0], main_block) + ): parallel_axis = batch_size_axis attrs = {"use_calc_stream": True} var_names = [backward_op.output('Y@GRAD')[0]] - build_dp_costs(res, dist_op, ctx, var_names, attrs, parallel_axis, - cluster) + build_dp_costs( + res, dist_op, ctx, var_names, attrs, parallel_axis, cluster + ) return res def calc_fwd_cost(self, dist_op, ctx, cluster): # calc comp op cost - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) processes = dist_op.dist_attr.process_mesh.processes - cost_mapping = build_comp_costs_from_descs(MatmulV2OpCost, ctx, - processes, desc_mapping, - cluster) + cost_mapping = build_comp_costs_from_descs( + MatmulV2OpCost, ctx, processes, desc_mapping, cluster + ) res_cost = [cost_mapping] @@ -1995,13 +2273,15 @@ class DistributedMatmulV2Impl2(DistributedOperatorImpl): if is_dim_shard(x_dims_mapping[-1]): return False if is_valid_list_index(x_dims_mapping, -2) and is_dim_shard( - x_dims_mapping[-2]): + x_dims_mapping[-2] + ): return False if is_dim_shard(y_dims_mapping[-1]): return False if is_valid_list_index(y_dims_mapping, -2) and is_dim_shard( - y_dims_mapping[-2]): + y_dims_mapping[-2] + ): return False return True @@ -2016,14 +2296,16 @@ class DistributedMatmulV2Impl2(DistributedOperatorImpl): if is_dim_shard(out_dims_mapping[-1]): return False if is_valid_list_index(out_dims_mapping, -2) and is_dim_shard( - out_dims_mapping[-2]): + out_dims_mapping[-2] + ): return False return True def is_auto_compatible(self, dist_op): - if (not self.is_input_compatible(dist_op)) or \ - (not self.is_output_compatible(dist_op)): + if (not self.is_input_compatible(dist_op)) or ( + not self.is_output_compatible(dist_op) + ): return False if not _is_auto_compatible_for_matmul(dist_op): @@ -2047,16 +2329,18 @@ class DistributedMatmulV2Impl2(DistributedOperatorImpl): _right_operand_parameter_matmul_backward(ctx, *args, **kwargs) -register_distributed_operator_impl("matmul_v2", - DistributedMatmulV2Impl0("column_parallel")) -register_distributed_operator_impl("matmul_v2", - DistributedMatmulV2Impl1("row_parallel")) register_distributed_operator_impl( - "matmul_v2", DistributedMatmulV2Impl2("replicate_parallel")) + "matmul_v2", DistributedMatmulV2Impl0("column_parallel") +) +register_distributed_operator_impl( + "matmul_v2", DistributedMatmulV2Impl1("row_parallel") +) +register_distributed_operator_impl( + "matmul_v2", DistributedMatmulV2Impl2("replicate_parallel") +) class DistributedMul(DistributedOperatorImplContainer): - def __init__(self, op_type): super(DistributedMul, self).__init__(op_type) @@ -2066,7 +2350,6 @@ register_distributed_operator_impl_container(DistributedMul("mul")) # ColumnParallel class DistributedMulImpl0(DistributedOperatorImpl): - def __init__(self, name): super(DistributedMulImpl0, self).__init__(name) self._forward_implemented = True @@ -2089,7 +2372,8 @@ class DistributedMulImpl0(DistributedOperatorImpl): main_block = backward_op.block vars = main_block.vars Y_var_dim_mapping = dist_attr.get_input_dims_mapping( - backward_op.input("Y")[0]) + backward_op.input("Y")[0] + ) # col parallel: matmul + allreduce assert Y_var_dim_mapping[0] < 0 parallel_axis = Y_var_dim_mapping[1] @@ -2099,13 +2383,14 @@ class DistributedMulImpl0(DistributedOperatorImpl): assert len(backward_op.output("X@GRAD")) == 1 # calc comp op cost - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) process_mesh = dist_attr.process_mesh processes = process_mesh.processes - cost_mapping = build_comp_costs_from_descs(MulGradOpCost, ctx, - processes, desc_mapping, - cluster) + cost_mapping = build_comp_costs_from_descs( + MulGradOpCost, ctx, processes, desc_mapping, cluster + ) res.append(cost_mapping) # calc comm op cost @@ -2118,40 +2403,52 @@ class DistributedMulImpl0(DistributedOperatorImpl): ctx, var_names, attrs=attrs, - parallel_axis=parallel_axis) + parallel_axis=parallel_axis, + ) comm_op_cost_list = build_comm_costs_from_descs( - AllreduceSumOpCost, ctx, processes, - c_allreduce_sum_desc_mapping, cluster) + AllreduceSumOpCost, + ctx, + processes, + c_allreduce_sum_desc_mapping, + cluster, + ) res.append(comm_op_cost_list) # need gradient allreduce var_dim_mapping = dist_attr.get_input_dims_mapping( - backward_op.input("X")[0]) + backward_op.input("X")[0] + ) mesh_shape = process_mesh.topology batch_size_axis = var_dim_mapping[0] - if batch_size_axis > -1 and mesh_shape[ - batch_size_axis] > 1 and is_parameter_related( - backward_op.input("Y")[0], main_block): + if ( + batch_size_axis > -1 + and mesh_shape[batch_size_axis] > 1 + and is_parameter_related(backward_op.input("Y")[0], main_block) + ): parallel_axis = batch_size_axis attrs = {"use_calc_stream": True} var_names = [backward_op.output('Y@GRAD')[0]] - build_dp_costs(res, dist_op, ctx, var_names, attrs, parallel_axis, - cluster) + build_dp_costs( + res, dist_op, ctx, var_names, attrs, parallel_axis, cluster + ) return res def calc_fwd_cost(self, dist_op, ctx, cluster): # calc comp op cost - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) processes = dist_op.dist_attr.process_mesh.processes - cost_mapping = build_comp_costs_from_descs(MulOpCost, ctx, processes, - desc_mapping, cluster) + cost_mapping = build_comp_costs_from_descs( + MulOpCost, ctx, processes, desc_mapping, cluster + ) # calc comm op cost serial_op = dist_op.serial_op vars = serial_op.block.vars parallel_axis = dist_op.dist_attr.get_input_dims_mapping( - serial_op.input("Y")[0])[-1] + serial_op.input("Y")[0] + )[-1] attrs = {"use_calc_stream": True, "use_model_parallel": True} var_names = serial_op.input("X") c_identity_desc_mapping = build_comm_desc_from_dist_op( @@ -2160,10 +2457,12 @@ class DistributedMulImpl0(DistributedOperatorImpl): ctx, var_names, attrs=attrs, - parallel_axis=parallel_axis) + parallel_axis=parallel_axis, + ) comm_op_cost_list = build_comm_costs_from_descs( - IdentityOpCost, ctx, processes, c_identity_desc_mapping, cluster) + IdentityOpCost, ctx, processes, c_identity_desc_mapping, cluster + ) res_cost = [comm_op_cost_list, cost_mapping] return res_cost @@ -2178,7 +2477,8 @@ class DistributedMulImpl0(DistributedOperatorImpl): if is_dim_shard(x_dims_mapping[-1]): return False if is_dim_shard(y_dims_mapping[-2]) or is_dim_replicate( - y_dims_mapping[-1]): + y_dims_mapping[-1] + ): return False for mapping in x_dims_mapping[1:-1]: if is_dim_shard(mapping): @@ -2198,8 +2498,9 @@ class DistributedMulImpl0(DistributedOperatorImpl): return True def is_auto_compatible(self, dist_op): - if (not self.is_input_compatible(dist_op)) or \ - (not self.is_output_compatible(dist_op)): + if (not self.is_input_compatible(dist_op)) or ( + not self.is_output_compatible(dist_op) + ): return False if not _is_auto_compatible_for_matmul(dist_op): @@ -2226,28 +2527,33 @@ class DistributedMulImpl0(DistributedOperatorImpl): src_op = dist_op_context.cur_src_op rank_id = dist_op_context.rank_id op_dist_attr = ctx.get_op_dist_attr_for_program(src_op) - assert op_dist_attr is not None, "backward op [{}] don't have dist attribute !".format( - str(src_op)) + assert ( + op_dist_attr is not None + ), "backward op [{}] don't have dist attribute !".format(str(src_op)) # FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism if rank_id not in op_dist_attr.process_mesh.processes: - rank_id = _get_corresponding_rank(ctx, op_dist_attr.process_mesh, - rank_id) + rank_id = _get_corresponding_rank( + ctx, op_dist_attr.process_mesh, rank_id + ) # check validation of inputs / outputs for input_name in src_op.desc.input_names(): assert input_name in kwargs, "input [{}] is not given".format( - input_name) + input_name + ) assert len(kwargs[input_name]) == len( src_op.desc.input(input_name) ), "number of tensor for input [{}] is not match".format(input_name) for output_name in src_op.desc.output_names(): assert output_name in kwargs, "input [{}] is not given".format( - output_name) + output_name + ) assert len(kwargs[output_name]) == len( src_op.desc.output(output_name) ), "number of tensor for input [{}] is not match".format( - output_name) + output_name + ) X_var = main_block.var(kwargs['X'][0]) Weight_var = main_block._var_recursive(kwargs['Y'][0]) @@ -2255,15 +2561,20 @@ class DistributedMulImpl0(DistributedOperatorImpl): # TODO infer logic comm presentation matmul_col_dim_mapping = op_dist_attr.get_input_dims_mapping( - Weight_var.name)[-1] - assert matmul_col_dim_mapping >= 0, "col_parallel_matmul's row should be divided by a specific mesh axis, but got [{}]".format( - matmul_col_dim_mapping) + Weight_var.name + )[-1] + assert ( + matmul_col_dim_mapping >= 0 + ), "col_parallel_matmul's row should be divided by a specific mesh axis, but got [{}]".format( + matmul_col_dim_mapping + ) process_mesh_shape = op_dist_attr.process_mesh.topology process_mesh_group = op_dist_attr.process_mesh.processes parallel_axis = matmul_col_dim_mapping - group_ranks = _get_comm_group(process_mesh_group, process_mesh_shape, - parallel_axis, rank_id) + group_ranks = _get_comm_group( + process_mesh_group, process_mesh_shape, parallel_axis, rank_id + ) group = new_process_group(group_ranks) # infer new var shape with op dist attr @@ -2271,31 +2582,39 @@ class DistributedMulImpl0(DistributedOperatorImpl): assert x_tensor_dist_attr is not None identity_var_dist_attr = op_dist_attr.get_input_dist_attr(X_var.name) assert identity_var_dist_attr is not None - ref_shape_x = infer_shape(main_block, X_var, x_tensor_dist_attr, - identity_var_dist_attr) + ref_shape_x = infer_shape( + main_block, X_var, x_tensor_dist_attr, identity_var_dist_attr + ) # infer out var shape with op dist attr out_tensor_dist_attr = ctx.get_tensor_dist_attr_for_program(Out_var) assert out_tensor_dist_attr is not None out_var_dist_attr = op_dist_attr.get_output_dist_attr(Out_var.name) assert out_var_dist_attr is not None - ref_shape_out = infer_shape(main_block, Out_var, out_tensor_dist_attr, - out_var_dist_attr) + ref_shape_out = infer_shape( + main_block, Out_var, out_tensor_dist_attr, out_var_dist_attr + ) intermediate_var_0 = main_block.create_var( - name=unique_name.generate_with_ignorable_key(".".join( - ["c_identity", 'tmp'])), + name=unique_name.generate_with_ignorable_key( + ".".join(["c_identity", 'tmp']) + ), dtype=X_var.dtype, shape=X_var.shape, type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=X_var.stop_gradient) + stop_gradient=X_var.stop_gradient, + ) # set intermediate_var_0's dist_attr with X_var's dist_attr - ctx.set_tensor_dist_attr_for_program(intermediate_var_0, - identity_var_dist_attr) + ctx.set_tensor_dist_attr_for_program( + intermediate_var_0, identity_var_dist_attr + ) check_variable_and_dtype( - X_var, 'tensor', - ['float16', 'float32', 'float64', 'int32', 'int64'], '_c_identity') + X_var, + 'tensor', + ['float16', 'float32', 'float64', 'int32', 'int64'], + '_c_identity', + ) c_identity_op = main_block.append_op( type='c_identity', inputs={'X': [X_var]}, @@ -2304,20 +2623,26 @@ class DistributedMulImpl0(DistributedOperatorImpl): 'ring_id': group.id, 'use_calc_stream': True, 'use_model_parallel': True, - OP_ROLE_KEY: src_op.attr('op_role') - }) + OP_ROLE_KEY: src_op.attr('op_role'), + }, + ) if intermediate_var_0.shape != ref_shape_x: intermediate_var_0.desc.set_shape(ref_shape_x) - check_variable_and_dtype(intermediate_var_0, 'x', - ['float16', 'float32', 'float64'], 'linear') - check_dtype(intermediate_var_0.dtype, 'dtype', - ['float16', 'float32', 'float64'], 'linear') + check_variable_and_dtype( + intermediate_var_0, 'x', ['float16', 'float32', 'float64'], 'linear' + ) + check_dtype( + intermediate_var_0.dtype, + 'dtype', + ['float16', 'float32', 'float64'], + 'linear', + ) # attrs = {'trans_x': False, 'trans_y': False} attrs = { "x_num_col_dims": src_op.desc.attr("x_num_col_dims"), "y_num_col_dims": src_op.desc.attr("y_num_col_dims"), - OP_ROLE_KEY: src_op.attr('op_role') + OP_ROLE_KEY: src_op.attr('op_role'), } inputs = {'X': intermediate_var_0, 'Y': Weight_var} @@ -2331,16 +2656,15 @@ class DistributedMulImpl0(DistributedOperatorImpl): inputs_original_shape[var_name] = var.shape input_tensor_dist_attr = ctx.get_tensor_dist_attr_for_program(var) input_var_dist_attr = op_dist_attr.get_input_dist_attr(var.name) - input_ref_shape = infer_shape(main_block, var, - input_tensor_dist_attr, - input_var_dist_attr) + input_ref_shape = infer_shape( + main_block, var, input_tensor_dist_attr, input_var_dist_attr + ) inputs_ref_shape[var_name] = input_ref_shape var.desc.set_shape(input_ref_shape) - mul_op = main_block.append_op(type='mul', - inputs=inputs, - outputs={'Out': Out_var}, - attrs=attrs) + mul_op = main_block.append_op( + type='mul', inputs=inputs, outputs={'Out': Out_var}, attrs=attrs + ) if Out_var.shape != ref_shape_out: Out_var.desc.set_shape(ref_shape_out) @@ -2359,13 +2683,16 @@ class DistributedMulImpl0(DistributedOperatorImpl): input_varname = c_identity_op.desc.input_arg_names()[0] input_dist_attr = op_dist_attr.get_input_dist_attr(input_varname) assert input_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr) - identity_op_dist_attr.set_input_dist_attr(input_varname, - input_dist_attr) + op_dist_attr + ) + identity_op_dist_attr.set_input_dist_attr( + input_varname, input_dist_attr + ) # output output_varname = c_identity_op.desc.output_arg_names()[0] - identity_op_dist_attr.set_output_dist_attr(output_varname, - input_dist_attr) + identity_op_dist_attr.set_output_dist_attr( + output_varname, input_dist_attr + ) ctx.set_op_dist_attr_for_program(c_identity_op, identity_op_dist_attr) # matmulv2 @@ -2376,29 +2703,37 @@ class DistributedMulImpl0(DistributedOperatorImpl): for input_varname in mul_op.desc.input_arg_names(): if input_varname in src_op.desc.input_arg_names(): input_dist_attr = op_dist_attr.get_input_dist_attr( - input_varname) + input_varname + ) assert input_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr) + op_dist_attr + ) matmulv2_op_dist_attr.set_input_dist_attr( - input_varname, input_dist_attr) + input_varname, input_dist_attr + ) else: input_var = main_block.var(input_varname) tensor_dist_attr = ctx.get_tensor_dist_attr_for_program( - input_var) + input_var + ) matmulv2_op_dist_attr.set_input_dist_attr( - input_varname, tensor_dist_attr) + input_varname, tensor_dist_attr + ) for output_varname in mul_op.desc.output_arg_names(): output_dist_attr = op_dist_attr.get_output_dist_attr(output_varname) assert output_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr) - matmulv2_op_dist_attr.set_output_dist_attr(output_varname, - output_dist_attr) + op_dist_attr + ) + matmulv2_op_dist_attr.set_output_dist_attr( + output_varname, output_dist_attr + ) ctx.set_op_dist_attr_for_program(mul_op, matmulv2_op_dist_attr) # init param sync if Weight_var.is_parameter and not op_dist_attr.is_recompute: - _init_param_sync(Weight_var, dist_op_context, startup_block, ctx, - rank_id) + _init_param_sync( + Weight_var, dist_op_context, startup_block, ctx, rank_id + ) @staticmethod def backward(ctx, *args, **kwargs): @@ -2407,7 +2742,6 @@ class DistributedMulImpl0(DistributedOperatorImpl): # RowParallel class DistributedMulImpl1(DistributedOperatorImpl): - def __init__(self, name): super(DistributedMulImpl1, self).__init__(name) self._forward_implemented = True @@ -2431,7 +2765,8 @@ class DistributedMulImpl1(DistributedOperatorImpl): main_block = backward_op.block vars = main_block.vars Y_var_dim_mapping = dist_attr.get_input_dims_mapping( - backward_op.input("Y")[0]) + backward_op.input("Y")[0] + ) assert Y_var_dim_mapping[1] < 0 parallel_axis = Y_var_dim_mapping[0] @@ -2444,49 +2779,59 @@ class DistributedMulImpl1(DistributedOperatorImpl): ctx, var_names, attrs=attrs, - parallel_axis=parallel_axis) + parallel_axis=parallel_axis, + ) processes = process_mesh.processes comm_op_cost_list = build_comm_costs_from_descs( - IdentityOpCost, ctx, processes, c_identity_desc_mapping, cluster) + IdentityOpCost, ctx, processes, c_identity_desc_mapping, cluster + ) res.append(comm_op_cost_list) # calc comp op cost - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) - cost_mapping = build_comp_costs_from_descs(MulGradOpCost, ctx, - processes, desc_mapping, - cluster) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) + cost_mapping = build_comp_costs_from_descs( + MulGradOpCost, ctx, processes, desc_mapping, cluster + ) res.append(cost_mapping) # need gradient allreduce var_dim_mapping = dist_attr.get_input_dims_mapping( - backward_op.input("X")[0]) + backward_op.input("X")[0] + ) mesh_shape = process_mesh.topology batch_size_axis = var_dim_mapping[0] - if batch_size_axis > -1 and mesh_shape[ - batch_size_axis] > 1 and is_parameter_related( - backward_op.input("Y")[0], main_block): + if ( + batch_size_axis > -1 + and mesh_shape[batch_size_axis] > 1 + and is_parameter_related(backward_op.input("Y")[0], main_block) + ): parallel_axis = batch_size_axis attrs = {"use_calc_stream": True} var_names = [backward_op.output('Y@GRAD')[0]] - build_dp_costs(res, dist_op, ctx, var_names, attrs, parallel_axis, - cluster) + build_dp_costs( + res, dist_op, ctx, var_names, attrs, parallel_axis, cluster + ) return res def calc_fwd_cost(self, dist_op, ctx, cluster): # calc comp op cost - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) processes = dist_op.dist_attr.process_mesh.processes - cost_mapping = build_comp_costs_from_descs(MulOpCost, ctx, processes, - desc_mapping, cluster) + cost_mapping = build_comp_costs_from_descs( + MulOpCost, ctx, processes, desc_mapping, cluster + ) # calc comm op cost serial_op = dist_op.serial_op vars = serial_op.block.vars parallel_axis = dist_op.dist_attr.get_input_dims_mapping( - serial_op.input("Y")[0])[-2] + serial_op.input("Y")[0] + )[-2] attrs = {"use_calc_stream": True, "use_model_parallel": True} var_names = serial_op.output("Out") @@ -2496,12 +2841,17 @@ class DistributedMulImpl1(DistributedOperatorImpl): ctx, var_names, attrs=attrs, - parallel_axis=parallel_axis) + parallel_axis=parallel_axis, + ) # print("dist_matmul.py dist_op: ", dist_op) comm_op_cost_list = build_comm_costs_from_descs( - AllreduceSumOpCost, ctx, processes, c_allreduce_sum_desc_mapping, - cluster) + AllreduceSumOpCost, + ctx, + processes, + c_allreduce_sum_desc_mapping, + cluster, + ) res_cost = [cost_mapping, comm_op_cost_list] @@ -2517,7 +2867,8 @@ class DistributedMulImpl1(DistributedOperatorImpl): if is_dim_replicate(x_dims_mapping[-1]): return False if is_dim_replicate(y_dims_mapping[-2]) or is_dim_shard( - y_dims_mapping[-1]): + y_dims_mapping[-1] + ): return False # Other dimensions must be replicate except the batch dimension for mapping in x_dims_mapping[1:-1]: @@ -2539,8 +2890,9 @@ class DistributedMulImpl1(DistributedOperatorImpl): return True def is_auto_compatible(self, dist_op): - if (not self.is_input_compatible(dist_op)) or \ - (not self.is_output_compatible(dist_op)): + if (not self.is_input_compatible(dist_op)) or ( + not self.is_output_compatible(dist_op) + ): return False if not _is_auto_compatible_for_matmul(dist_op): @@ -2567,28 +2919,33 @@ class DistributedMulImpl1(DistributedOperatorImpl): src_op = dist_op_context.cur_src_op rank_id = dist_op_context.rank_id op_dist_attr = ctx.get_op_dist_attr_for_program(src_op) - assert op_dist_attr is not None, "backward op [{}] don't have dist attribute !".format( - str(src_op)) + assert ( + op_dist_attr is not None + ), "backward op [{}] don't have dist attribute !".format(str(src_op)) # FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism if rank_id not in op_dist_attr.process_mesh.processes: - rank_id = _get_corresponding_rank(ctx, op_dist_attr.process_mesh, - rank_id) + rank_id = _get_corresponding_rank( + ctx, op_dist_attr.process_mesh, rank_id + ) # check validation of inputs / outputs for input_name in src_op.desc.input_names(): assert input_name in kwargs, "input [{}] is not given".format( - input_name) + input_name + ) assert len(kwargs[input_name]) == len( src_op.desc.input(input_name) ), "number of tensor for input [{}] is not match".format(input_name) for output_name in src_op.desc.output_names(): assert output_name in kwargs, "input [{}] is not given".format( - output_name) + output_name + ) assert len(kwargs[output_name]) == len( src_op.desc.output(output_name) ), "number of tensor for input [{}] is not match".format( - output_name) + output_name + ) X_var = main_block.var(kwargs['X'][0]) Weight_var = main_block._var_recursive(kwargs['Y'][0]) @@ -2596,26 +2953,33 @@ class DistributedMulImpl1(DistributedOperatorImpl): # TODO infer logic comm presentation matmul_row_dim_mapping = op_dist_attr.get_input_dims_mapping( - Weight_var.name)[-2] - assert matmul_row_dim_mapping >= 0, "row_parallel_matmul's row should be divided by a specific mesh axis, but got [{}]".format( - matmul_row_dim_mapping) + Weight_var.name + )[-2] + assert ( + matmul_row_dim_mapping >= 0 + ), "row_parallel_matmul's row should be divided by a specific mesh axis, but got [{}]".format( + matmul_row_dim_mapping + ) process_mesh_shape = op_dist_attr.process_mesh.topology process_mesh_group = op_dist_attr.process_mesh.processes parallel_axis = matmul_row_dim_mapping - group_ranks = _get_comm_group(process_mesh_group, process_mesh_shape, - parallel_axis, rank_id) + group_ranks = _get_comm_group( + process_mesh_group, process_mesh_shape, parallel_axis, rank_id + ) group = new_process_group(group_ranks) - check_variable_and_dtype(X_var, 'x', ['float16', 'float32', 'float64'], - 'linear') - check_dtype(X_var.dtype, 'dtype', ['float16', 'float32', 'float64'], - 'linear') + check_variable_and_dtype( + X_var, 'x', ['float16', 'float32', 'float64'], 'linear' + ) + check_dtype( + X_var.dtype, 'dtype', ['float16', 'float32', 'float64'], 'linear' + ) # attrs = {'trans_x': False, 'trans_y': False} attrs = { "x_num_col_dims": src_op.desc.attr("x_num_col_dims"), "y_num_col_dims": src_op.desc.attr("y_num_col_dims"), - OP_ROLE_KEY: src_op.attr('op_role') + OP_ROLE_KEY: src_op.attr('op_role'), } inputs = {'X': X_var, 'Y': Weight_var} @@ -2624,22 +2988,26 @@ class DistributedMulImpl1(DistributedOperatorImpl): assert out_tensor_dist_attr is not None out_var_dist_attr = op_dist_attr.get_output_dist_attr(Out_var.name) assert out_var_dist_attr is not None - ref_shape = infer_shape(main_block, Out_var, out_tensor_dist_attr, - out_var_dist_attr) + ref_shape = infer_shape( + main_block, Out_var, out_tensor_dist_attr, out_var_dist_attr + ) intermediate_var_0 = main_block.create_var( - name=unique_name.generate_with_ignorable_key(".".join( - ["c_allreduce_sum", 'tmp'])), + name=unique_name.generate_with_ignorable_key( + ".".join(["c_allreduce_sum", 'tmp']) + ), shape=Out_var.shape, dtype=Out_var.dtype, type=Out_var.type, lod_level=Out_var.lod_level, persistable=False, is_data=False, - need_check_feed=Out_var.desc.need_check_feed()) + need_check_feed=Out_var.desc.need_check_feed(), + ) # set intermediate_var_0's dist_attr with Out_var's dist_attr - ctx.set_tensor_dist_attr_for_program(intermediate_var_0, - out_var_dist_attr) + ctx.set_tensor_dist_attr_for_program( + intermediate_var_0, out_var_dist_attr + ) inputs_ref_shape = {} inputs_original_shape = {} @@ -2648,16 +3016,18 @@ class DistributedMulImpl1(DistributedOperatorImpl): inputs_original_shape[var_name] = var.shape input_tensor_dist_attr = ctx.get_tensor_dist_attr_for_program(var) input_var_dist_attr = op_dist_attr.get_input_dist_attr(var.name) - input_ref_shape = infer_shape(main_block, var, - input_tensor_dist_attr, - input_var_dist_attr) + input_ref_shape = infer_shape( + main_block, var, input_tensor_dist_attr, input_var_dist_attr + ) inputs_ref_shape[var_name] = input_ref_shape var.desc.set_shape(input_ref_shape) - mul_op = main_block.append_op(type='mul', - inputs=inputs, - outputs={'Out': intermediate_var_0}, - attrs=attrs) + mul_op = main_block.append_op( + type='mul', + inputs=inputs, + outputs={'Out': intermediate_var_0}, + attrs=attrs, + ) if intermediate_var_0.shape != ref_shape: intermediate_var_0.desc.set_shape(ref_shape) @@ -2675,8 +3045,9 @@ class DistributedMulImpl1(DistributedOperatorImpl): 'ring_id': group.id, 'use_calc_stream': True, 'use_model_parallel': True, - OP_ROLE_KEY: src_op.attr('op_role') - }) + OP_ROLE_KEY: src_op.attr('op_role'), + }, + ) if Out_var.shape != ref_shape: Out_var.desc.set_shape(ref_shape) @@ -2690,15 +3061,19 @@ class DistributedMulImpl1(DistributedOperatorImpl): for input_varname in mul_op.desc.input_arg_names(): input_dist_attr = op_dist_attr.get_input_dist_attr(input_varname) assert input_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr) - matmulv2_op_dist_attr.set_input_dist_attr(input_varname, - input_dist_attr) + op_dist_attr + ) + matmulv2_op_dist_attr.set_input_dist_attr( + input_varname, input_dist_attr + ) output_varname = mul_op.desc.output_arg_names()[0] output_dist_attr = op_dist_attr.get_output_dist_attr(Out_var.name) assert output_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr) - matmulv2_op_dist_attr.set_output_dist_attr(output_varname, - output_dist_attr) + op_dist_attr + ) + matmulv2_op_dist_attr.set_output_dist_attr( + output_varname, output_dist_attr + ) ctx.set_op_dist_attr_for_program(mul_op, matmulv2_op_dist_attr) # allreduce @@ -2710,21 +3085,26 @@ class DistributedMulImpl1(DistributedOperatorImpl): input_var = main_block.var(input_varname) tensor_dist_attr = ctx.get_tensor_dist_attr_for_program(input_var) assert tensor_dist_attr is not None - allreduce_op_dist_attr.set_input_dist_attr(input_varname, - tensor_dist_attr) + allreduce_op_dist_attr.set_input_dist_attr( + input_varname, tensor_dist_attr + ) for output_varname in c_allreduce_sum_op.desc.output_arg_names(): output_dist_attr = op_dist_attr.get_output_dist_attr(output_varname) assert output_dist_attr is not None, "dist_attr is {}".format( - op_dist_attr) - allreduce_op_dist_attr.set_output_dist_attr(output_varname, - output_dist_attr) - ctx.set_op_dist_attr_for_program(c_allreduce_sum_op, - allreduce_op_dist_attr) + op_dist_attr + ) + allreduce_op_dist_attr.set_output_dist_attr( + output_varname, output_dist_attr + ) + ctx.set_op_dist_attr_for_program( + c_allreduce_sum_op, allreduce_op_dist_attr + ) # init param sync if Weight_var.is_parameter and not op_dist_attr.is_recompute: - _init_param_sync(Weight_var, dist_op_context, startup_block, ctx, - rank_id) + _init_param_sync( + Weight_var, dist_op_context, startup_block, ctx, rank_id + ) @staticmethod def backward(ctx, *args, **kwargs): @@ -2733,7 +3113,6 @@ class DistributedMulImpl1(DistributedOperatorImpl): # ReplicateParallel class DistributedMulImpl2(DistributedOperatorImpl): - def __init__(self, name): super(DistributedMulImpl2, self).__init__(name) @@ -2754,38 +3133,45 @@ class DistributedMulImpl2(DistributedOperatorImpl): vars = main_block.vars # calc comp op cost - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) process_mesh = dist_attr.process_mesh processes = process_mesh.processes - cost_mapping = build_comp_costs_from_descs(MulGradOpCost, ctx, - processes, desc_mapping, - cluster) + cost_mapping = build_comp_costs_from_descs( + MulGradOpCost, ctx, processes, desc_mapping, cluster + ) res.append(cost_mapping) # need gradient allreduce var_dim_mapping = dist_attr.get_input_dims_mapping( - backward_op.input("X")[0]) + backward_op.input("X")[0] + ) mesh_shape = process_mesh.topology batch_size_axis = var_dim_mapping[0] - if batch_size_axis > -1 and mesh_shape[ - batch_size_axis] > 1 and is_parameter_related( - backward_op.input("Y")[0], main_block): + if ( + batch_size_axis > -1 + and mesh_shape[batch_size_axis] > 1 + and is_parameter_related(backward_op.input("Y")[0], main_block) + ): parallel_axis = batch_size_axis attrs = {"use_calc_stream": True} var_names = [backward_op.output('Y@GRAD')[0]] - build_dp_costs(res, dist_op, ctx, var_names, attrs, parallel_axis, - cluster) + build_dp_costs( + res, dist_op, ctx, var_names, attrs, parallel_axis, cluster + ) return res def calc_fwd_cost(self, dist_op, ctx, cluster): # calc comp op cost - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) processes = dist_op.dist_attr.process_mesh.processes - cost_mapping = build_comp_costs_from_descs(MulOpCost, ctx, processes, - desc_mapping, cluster) + cost_mapping = build_comp_costs_from_descs( + MulOpCost, ctx, processes, desc_mapping, cluster + ) res_cost = [cost_mapping] return res_cost @@ -2801,12 +3187,14 @@ class DistributedMulImpl2(DistributedOperatorImpl): if is_dim_shard(x_dims_mapping[-1]): return False if is_valid_list_index(x_dims_mapping, -2) and is_dim_shard( - x_dims_mapping[-2]): + x_dims_mapping[-2] + ): return False if is_dim_shard(y_dims_mapping[-1]): return False if is_valid_list_index(y_dims_mapping, -2) and is_dim_shard( - y_dims_mapping[-2]): + y_dims_mapping[-2] + ): return False return True @@ -2821,14 +3209,16 @@ class DistributedMulImpl2(DistributedOperatorImpl): if is_dim_shard(out_dims_mapping[-1]): return False if is_valid_list_index(out_dims_mapping, -2) and is_dim_shard( - out_dims_mapping[-2]): + out_dims_mapping[-2] + ): return False return True def is_auto_compatible(self, dist_op): - if (not self.is_input_compatible(dist_op)) or \ - (not self.is_output_compatible(dist_op)): + if (not self.is_input_compatible(dist_op)) or ( + not self.is_output_compatible(dist_op) + ): return False if not _is_auto_compatible_for_matmul(dist_op): @@ -2852,8 +3242,10 @@ class DistributedMulImpl2(DistributedOperatorImpl): _right_operand_parameter_matmul_backward(ctx, *args, **kwargs) -register_distributed_operator_impl("mul", - DistributedMulImpl0("column_parallel")) +register_distributed_operator_impl( + "mul", DistributedMulImpl0("column_parallel") +) register_distributed_operator_impl("mul", DistributedMulImpl1("row_parallel")) -register_distributed_operator_impl("mul", - DistributedMulImpl2("replicate_parallel")) +register_distributed_operator_impl( + "mul", DistributedMulImpl2("replicate_parallel") +) diff --git a/python/paddle/distributed/auto_parallel/operators/dist_pnorm.py b/python/paddle/distributed/auto_parallel/operators/dist_pnorm.py index c56f121430a73d51816f6d75d44e9c4989d13efe..68d2351e1abe944c407484a3260a4a3cb82e3b24 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_pnorm.py +++ b/python/paddle/distributed/auto_parallel/operators/dist_pnorm.py @@ -20,8 +20,15 @@ from .common import register_distributed_operator_impl_container from .common import register_distributed_operator_impl from ..process_group import new_process_group from ..utils import is_dim_shard, is_dim_replicate, _get_corresponding_rank -from ..utils import compute_compatible_dim_mapping, set_dist_op_desc_original_id, _get_comm_group -from ..dist_attribute import TensorDistributedAttribute, OperatorDistributedAttribute +from ..utils import ( + compute_compatible_dim_mapping, + set_dist_op_desc_original_id, + _get_comm_group, +) +from ..dist_attribute import ( + TensorDistributedAttribute, + OperatorDistributedAttribute, +) from paddle.fluid import core from paddle.fluid.framework import Operator @@ -29,7 +36,6 @@ from paddle.fluid.data_feeder import check_variable_and_dtype, check_dtype class DistributedPNorm(DistributedOperatorImplContainer): - def __init__(self, op_type): super(DistributedPNorm, self).__init__(op_type) @@ -39,7 +45,6 @@ register_distributed_operator_impl_container(DistributedPNorm("p_norm")) # Row Parallel class DistributedPNormImpl(DistributedOperatorImpl): - def __init__(self, name): super(DistributedPNormImpl, self).__init__(name) self._forward_implemented = True @@ -62,15 +67,18 @@ class DistributedPNormImpl(DistributedOperatorImpl): return True def is_compatible(self, dist_op): - if (not self.is_input_compatible(dist_op)) or \ - (not self.is_output_compatible(dist_op)): + if (not self.is_input_compatible(dist_op)) or ( + not self.is_output_compatible(dist_op) + ): return False return True def is_auto_compatible(self, dist_op): - if (not self.is_input_compatible(dist_op)) or \ - (not self.is_output_compatible(dist_op)) or \ - (not self.is_compatible(dist_op)): + if ( + (not self.is_input_compatible(dist_op)) + or (not self.is_output_compatible(dist_op)) + or (not self.is_compatible(dist_op)) + ): return False return True @@ -90,20 +98,25 @@ class DistributedPNormImpl(DistributedOperatorImpl): batch_dim_mappings.append(dims_mapping[0]) compatible_dim_mapping = compute_compatible_dim_mapping( - batch_dim_mappings) + batch_dim_mappings + ) if compatible_dim_mapping is None: return False for arg_name in op_desc.input_arg_names(): dims_mapping = op_dist_attr.get_input_dims_mapping(arg_name) - if len(dims_mapping - ) >= 1 and compatible_dim_mapping != dims_mapping[0]: + if ( + len(dims_mapping) >= 1 + and compatible_dim_mapping != dims_mapping[0] + ): dims_mapping[0] = compatible_dim_mapping changed = True for arg_name in op_desc.output_arg_names(): dims_mapping = op_dist_attr.get_output_dims_mapping(arg_name) - if len(dims_mapping - ) >= 1 and compatible_dim_mapping != dims_mapping[0]: + if ( + len(dims_mapping) >= 1 + and compatible_dim_mapping != dims_mapping[0] + ): dims_mapping[0] = compatible_dim_mapping changed = True @@ -122,21 +135,25 @@ class DistributedPNormImpl(DistributedOperatorImpl): # check validation of inputs / outputs for input_name in src_op.desc.input_names(): assert input_name in kwargs, "input [{}] is not given".format( - input_name) + input_name + ) assert len(kwargs[input_name]) == len( src_op.desc.input(input_name) ), "number of tensor for input [{}] is not match".format(input_name) for output_name in src_op.desc.output_names(): assert output_name in kwargs, "input [{}] is not given".format( - output_name) + output_name + ) assert len(kwargs[output_name]) == len( src_op.desc.output(output_name) ), "number of tensor for input [{}] is not match".format( - output_name) + output_name + ) if rank_id not in op_dist_attr.process_mesh.processes: - rank_id = _get_corresponding_rank(ctx, op_dist_attr.process_mesh, - rank_id) + rank_id = _get_corresponding_rank( + ctx, op_dist_attr.process_mesh, rank_id + ) X_var = main_block.var(kwargs['X'][0]) in_dims_mapping = op_dist_attr.get_input_dims_mapping(X_var.name) @@ -145,14 +162,17 @@ class DistributedPNormImpl(DistributedOperatorImpl): break process_mesh_shape = op_dist_attr.process_mesh.topology process_mesh_group = op_dist_attr.process_mesh.processes - group_ranks = _get_comm_group(process_mesh_group, process_mesh_shape, - axis, rank_id) + group_ranks = _get_comm_group( + process_mesh_group, process_mesh_shape, axis, rank_id + ) group = new_process_group(group_ranks) - check_variable_and_dtype(X_var, 'x', ['float16', 'float32', 'float64'], - 'norm') - check_dtype(X_var.dtype, 'dtype', ['float16', 'float32', 'float64'], - 'norm') + check_variable_and_dtype( + X_var, 'x', ['float16', 'float32', 'float64'], 'norm' + ) + check_dtype( + X_var.dtype, 'dtype', ['float16', 'float32', 'float64'], 'norm' + ) # 2. insert c_allgather op # create c_allgather output var @@ -162,32 +182,37 @@ class DistributedPNormImpl(DistributedOperatorImpl): shape=X_var.shape, type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=X_var.stop_gradient) + stop_gradient=X_var.stop_gradient, + ) # set allgather_out tensor dist_attr allgather_out_dist_attr = TensorDistributedAttribute() allgather_out_dist_attr.process_mesh = op_dist_attr.process_mesh allgather_out_dist_attr.dims_mapping = [ -1 for i in range(len(allgather_out.shape)) ] - ctx.set_tensor_dist_attr_for_program(allgather_out, - allgather_out_dist_attr) - c_allgather_op = main_block.append_op(type='c_allgather', - inputs={'X': [X_var]}, - outputs={'Out': [allgather_out]}, - attrs={ - 'ring_id': group.id, - 'use_calc_stream': True, - 'nranks': group.nranks, - 'op_role': - src_op.attr('op_role') - }) + ctx.set_tensor_dist_attr_for_program( + allgather_out, allgather_out_dist_attr + ) + c_allgather_op = main_block.append_op( + type='c_allgather', + inputs={'X': [X_var]}, + outputs={'Out': [allgather_out]}, + attrs={ + 'ring_id': group.id, + 'use_calc_stream': True, + 'nranks': group.nranks, + 'op_role': src_op.attr('op_role'), + }, + ) # set c_allgather op dist_attr allgather_op_dist_attr = OperatorDistributedAttribute() allgather_op_dist_attr.process_mesh = op_dist_attr.process_mesh - allgather_op_dist_attr.set_input_dims_mapping(X_var.name, - in_dims_mapping) + allgather_op_dist_attr.set_input_dims_mapping( + X_var.name, in_dims_mapping + ) allgather_op_dist_attr.set_output_dims_mapping( - allgather_out.name, allgather_out_dist_attr.dims_mapping) + allgather_out.name, allgather_out_dist_attr.dims_mapping + ) ctx.set_op_dist_attr_for_program(c_allgather_op, allgather_op_dist_attr) # 3. copy p_norm op desc and reset input name @@ -203,7 +228,8 @@ class DistributedPNormImpl(DistributedOperatorImpl): dist_op_desc.set_output(output_name, kwargs[output_name]) pnorm_op = Operator(main_block, dist_op_desc) op_dist_attr.set_input_dims_mapping( - allgather_out.name, allgather_out_dist_attr.dims_mapping) + allgather_out.name, allgather_out_dist_attr.dims_mapping + ) ctx.set_op_dist_attr_for_program(pnorm_op, op_dist_attr) @staticmethod @@ -219,17 +245,20 @@ class DistributedPNormImpl(DistributedOperatorImpl): # check validation of inputs / outputs for input_name in backward_op.desc.input_names(): assert input_name in kwargs, "input [{}] is not given".format( - input_name) + input_name + ) assert len(kwargs[input_name]) == len( backward_op.desc.input(input_name) ), "number of tensor for input [{}] is not match".format(input_name) for output_name in backward_op.desc.output_names(): assert output_name in kwargs, "input [{}] is not given".format( - output_name) + output_name + ) assert len(kwargs[output_name]) == len( backward_op.desc.output(output_name) ), "number of tensor for input [{}] is not match".format( - output_name) + output_name + ) X_var = main_block.var(kwargs['X'][0]) X_grad_var = main_block.var(kwargs['X@GRAD'][0]) @@ -244,7 +273,8 @@ class DistributedPNormImpl(DistributedOperatorImpl): shape=new_X_var.shape, type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=X_grad_var.stop_gradient) + stop_gradient=X_grad_var.stop_gradient, + ) new_kwargs['X@GRAD'] = [new_X_grad.name] new_X_var_dist_attr = ctx.get_tensor_dist_attr_for_program(new_X_var) ctx.set_tensor_dist_attr_for_program(new_X_grad, new_X_var_dist_attr) @@ -258,10 +288,12 @@ class DistributedPNormImpl(DistributedOperatorImpl): for output_name in backward_op.desc.output_names(): dist_op_desc.set_output(output_name, new_kwargs[output_name]) p_norm_grad_op = Operator(main_block, dist_op_desc) - op_dist_attr.set_input_dims_mapping(new_X_var.name, - new_X_var_dist_attr.dims_mapping) - op_dist_attr.set_output_dims_mapping(new_X_grad.name, - new_X_var_dist_attr.dims_mapping) + op_dist_attr.set_input_dims_mapping( + new_X_var.name, new_X_var_dist_attr.dims_mapping + ) + op_dist_attr.set_output_dims_mapping( + new_X_grad.name, new_X_var_dist_attr.dims_mapping + ) ctx.set_op_dist_attr_for_program(p_norm_grad_op, op_dist_attr) # 2. insert slice op @@ -271,8 +303,12 @@ class DistributedPNormImpl(DistributedOperatorImpl): from ..reshard import Resharder partition_idx = Resharder.compute_partition_index( - rank_id, new_X_grad.shape, dims_mapping, process_mesh_shape, - process_mesh_group) + rank_id, + new_X_grad.shape, + dims_mapping, + process_mesh_shape, + process_mesh_group, + ) slice_starts = [] slice_ends = [] slices_axes = [] @@ -287,22 +323,28 @@ class DistributedPNormImpl(DistributedOperatorImpl): "starts": slice_starts, "ends": slice_ends, "infer_flags": infer_flags, - "op_role": backward_op.attr('op_role') + "op_role": backward_op.attr('op_role'), } - slice_op = main_block.append_op(type='slice', - inputs={'Input': [new_X_grad]}, - outputs={'Out': [X_grad_var]}, - attrs=attrs) + slice_op = main_block.append_op( + type='slice', + inputs={'Input': [new_X_grad]}, + outputs={'Out': [X_grad_var]}, + attrs=attrs, + ) X_grad_var_dims_mapping = op_dist_attr.get_output_dims_mapping( - X_grad_var.name) + X_grad_var.name + ) slice_op_dist_attr = OperatorDistributedAttribute() slice_op_dist_attr.process_mesh = op_dist_attr.process_mesh slice_op_dist_attr.set_input_dims_mapping( - new_X_grad.name, new_X_var_dist_attr.dims_mapping) - slice_op_dist_attr.set_output_dims_mapping(X_grad_var.name, - X_grad_var_dims_mapping) + new_X_grad.name, new_X_var_dist_attr.dims_mapping + ) + slice_op_dist_attr.set_output_dims_mapping( + X_grad_var.name, X_grad_var_dims_mapping + ) ctx.set_op_dist_attr_for_program(slice_op, slice_op_dist_attr) -register_distributed_operator_impl("p_norm", - DistributedPNormImpl("row_parallel")) +register_distributed_operator_impl( + "p_norm", DistributedPNormImpl("row_parallel") +) diff --git a/python/paddle/distributed/auto_parallel/operators/dist_reduce_sum_p.py b/python/paddle/distributed/auto_parallel/operators/dist_reduce_sum_p.py index 77372257f4f75f0f1d0810a89e5e42aef9a7222b..d007aeda423f564db86dd331c53c01520a676b39 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_reduce_sum_p.py +++ b/python/paddle/distributed/auto_parallel/operators/dist_reduce_sum_p.py @@ -23,18 +23,17 @@ from ..process_group import new_process_group class DistributedReduceSumPrimtive(DistributedOperatorImplContainer): - def __init__(self, op_type): super(DistributedReduceSumPrimtive, self).__init__(op_type) register_distributed_operator_impl_container( - DistributedReduceSumPrimtive("reduce_sum_p")) + DistributedReduceSumPrimtive("reduce_sum_p") +) # Batch Dimension ReduceSum Primitive class DistributedReduceSumPrimtiveImpl0(DistributedOperatorImpl): - def __init__(self, name): super(DistributedReduceSumPrimtiveImpl0, self).__init__(name) self._forward_implemented = True @@ -56,7 +55,7 @@ class DistributedReduceSumPrimtiveImpl0(DistributedOperatorImpl): output_name = outputs[0] output_var = dist_op.serial_op.block.var(output_name) - if output_var.shape != (1, ): + if output_var.shape != (1,): return False return True @@ -66,7 +65,8 @@ class DistributedReduceSumPrimtiveImpl0(DistributedOperatorImpl): op_dist_attr = dist_op.dist_attr return self.is_input_compatible(dist_op) and self.is_output_compatible( - dist_op) + dist_op + ) def update_dims_mapping(self, dist_op): changed = False @@ -85,17 +85,20 @@ class DistributedReduceSumPrimtiveImpl0(DistributedOperatorImpl): # check validation of inputs / outputs for input_name in src_op.desc.input_names(): assert input_name in kwargs, "input [{}] is not given".format( - input_name) + input_name + ) assert len(kwargs[input_name]) == len( src_op.desc.input(input_name) ), "number of tensor for input [{}] is not match".format(input_name) for output_name in src_op.desc.output_names(): assert output_name in kwargs, "input [{}] is not given".format( - output_name) + output_name + ) assert len(kwargs[output_name]) == len( src_op.desc.output(output_name) ), "number of tensor for input [{}] is not match".format( - output_name) + output_name + ) # replicate op in dist program dist_op_desc = main_block.append_op(type='nop').desc @@ -109,14 +112,16 @@ class DistributedReduceSumPrimtiveImpl0(DistributedOperatorImpl): # batch dimension synchronization var_name = src_op.output_arg_names[0] sync_group = new_process_group(ctx.data_parallel_group) - allreduce_op = main_block.append_op(type='c_allreduce_sum', - inputs={'X': [var_name]}, - outputs={'Out': [var_name]}, - attrs={ - 'ring_id': sync_group.id, - 'use_calc_stream': True, - OP_ROLE_KEY: OpRole.Forward - }) + allreduce_op = main_block.append_op( + type='c_allreduce_sum', + inputs={'X': [var_name]}, + outputs={'Out': [var_name]}, + attrs={ + 'ring_id': sync_group.id, + 'use_calc_stream': True, + OP_ROLE_KEY: OpRole.Forward, + }, + ) # dist attr var = main_block.var(var_name) @@ -124,19 +129,24 @@ class DistributedReduceSumPrimtiveImpl0(DistributedOperatorImpl): op_dist_attr = ctx.get_op_dist_attr_for_program(src_op) new_op_attr = OperatorDistributedAttribute() new_op_attr.process_mesh = op_dist_attr.process_mesh - new_op_attr.set_output_dims_mapping(var.name, - tensor_dist_attr.dims_mapping) - new_op_attr.set_input_dims_mapping(var.name, - tensor_dist_attr.dims_mapping) + new_op_attr.set_output_dims_mapping( + var.name, tensor_dist_attr.dims_mapping + ) + new_op_attr.set_input_dims_mapping( + var.name, tensor_dist_attr.dims_mapping + ) ctx.set_op_dist_attr_for_program(allreduce_op, new_op_attr) @staticmethod def backward(ctx, *args, **kwargs): raise RuntimeError( - "primitive operator does NOT have backward function, op type: {}". - format(str(op.type))) + "primitive operator does NOT have backward function, op type: {}".format( + str(op.type) + ) + ) register_distributed_operator_impl( "reduce_sum_p", - DistributedReduceSumPrimtiveImpl0("batch_dimension_reduce_sum_p")) + DistributedReduceSumPrimtiveImpl0("batch_dimension_reduce_sum_p"), +) diff --git a/python/paddle/distributed/auto_parallel/operators/dist_reshape.py b/python/paddle/distributed/auto_parallel/operators/dist_reshape.py index e9f32b80ca13c0e6deac2b927d73b04090a74b88..67b32dbecae856695deeeac12db866c92a1b2b1f 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_reshape.py +++ b/python/paddle/distributed/auto_parallel/operators/dist_reshape.py @@ -27,7 +27,6 @@ from paddle.distributed.fleet.meta_optimizers.common import OpRole class DistributedReshape2(DistributedOperatorImplContainer): - def __init__(self, op_type): super(DistributedReshape2, self).__init__(op_type) @@ -36,7 +35,6 @@ register_distributed_operator_impl_container(DistributedReshape2("reshape2")) class DistributedReshapeImpl0(DistributedOperatorImpl): - def __init__(self, name): super(DistributedReshapeImpl0, self).__init__(name) self._forward_implemented = True @@ -66,19 +64,21 @@ class DistributedReshapeImpl0(DistributedOperatorImpl): for idx, axis in enumerate(dim_mapping): if axis >= 0: if len(shape_list) > idx: - shape_list[ - idx] = shape_list[idx] // process_mesh_shape[axis] + shape_list[idx] = ( + shape_list[idx] // process_mesh_shape[axis] + ) # calc comp op cost - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) processes = dist_attr.process_mesh.processes for key in desc_mapping: desc_mapping[key]["shape"] = shape_list - cost_mapping = build_comp_costs_from_descs(Reshape2OpCost, ctx, - processes, desc_mapping, - cluster) + cost_mapping = build_comp_costs_from_descs( + Reshape2OpCost, ctx, processes, desc_mapping, cluster + ) res.append(cost_mapping) return res @@ -86,16 +86,17 @@ class DistributedReshapeImpl0(DistributedOperatorImpl): def calc_bwd_cost(self, dist_op, ctx, cluster): # calc comp op cost res = [] - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) dist_attr = dist_op.dist_attr process_mesh = dist_attr.process_mesh processes = process_mesh.processes op_type = dist_op.serial_op.type - cost_mapping = build_comp_costs_from_descs(Reshape2GradOpCost, ctx, - processes, desc_mapping, - cluster) + cost_mapping = build_comp_costs_from_descs( + Reshape2GradOpCost, ctx, processes, desc_mapping, cluster + ) res.append(cost_mapping) backward_op = dist_op.serial_op @@ -105,7 +106,8 @@ class DistributedReshapeImpl0(DistributedOperatorImpl): for input_name in backward_op.desc.input_names(): for varname in backward_op.desc.input(input_name): if "@GRAD" not in varname and is_parameter_related( - varname, main_block): + varname, main_block + ): # NOTE input var's dim_mapping of backward op should be the same with input var instead of corresponding varname of forward op var_dim_mapping = dist_attr.get_input_dims_mapping(varname) @@ -115,8 +117,15 @@ class DistributedReshapeImpl0(DistributedOperatorImpl): parallel_axis = batch_size_axis attrs = {"use_calc_stream": True} var_names = [varname + "@GRAD"] - build_dp_costs(res, dist_op, ctx, var_names, attrs, - parallel_axis, cluster) + build_dp_costs( + res, + dist_op, + ctx, + var_names, + attrs, + parallel_axis, + cluster, + ) return res @@ -150,8 +159,9 @@ class DistributedReshapeImpl0(DistributedOperatorImpl): return True def is_auto_compatible(self, dist_op): - if (not self.is_input_compatible(dist_op)) or \ - (not self.is_output_compatible(dist_op)): + if (not self.is_input_compatible(dist_op)) or ( + not self.is_output_compatible(dist_op) + ): return False op_desc = dist_op.serial_op.desc @@ -160,7 +170,8 @@ class DistributedReshapeImpl0(DistributedOperatorImpl): out_name = op_desc.output('Out')[0] x_shape_name = op_desc.output('XShape')[0] x_shape_dims_mapping = op_dist_attr.get_output_dims_mapping( - x_shape_name) + x_shape_name + ) x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name) out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name) @@ -186,11 +197,13 @@ class DistributedReshapeImpl0(DistributedOperatorImpl): x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name) out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name) x_shape_dims_mapping = op_dist_attr.get_output_dims_mapping( - x_shape_name) + x_shape_name + ) for i in range(len(x_dims_mapping)): dim_changed = compute_compatible_and_update_dim_mapping( - [x_dims_mapping, out_dims_mapping], [i, i]) + [x_dims_mapping, out_dims_mapping], [i, i] + ) if dim_changed: changed = True @@ -210,23 +223,27 @@ class DistributedReshapeImpl0(DistributedOperatorImpl): src_op = dist_op_context.cur_src_op rank_id = dist_op_context.rank_id op_dist_attr = ctx.get_op_dist_attr_for_program(src_op) - assert op_dist_attr is not None, "backward op [{}] don't have dist attribute !".format( - str(src_op)) + assert ( + op_dist_attr is not None + ), "backward op [{}] don't have dist attribute !".format(str(src_op)) # check validation of inputs / outputs for input_name in src_op.desc.input_names(): assert input_name in kwargs, "input [{}] is not given".format( - input_name) + input_name + ) assert len(kwargs[input_name]) == len( src_op.desc.input(input_name) ), "number of tensor for input [{}] is not match".format(input_name) for output_name in src_op.desc.output_names(): assert output_name in kwargs, "input [{}] is not given".format( - output_name) + output_name + ) assert len(kwargs[output_name]) == len( src_op.desc.output(output_name) ), "number of tensor for input [{}] is not match".format( - output_name) + output_name + ) X_var = main_block.var(kwargs['X'][0]) Out_var = main_block.var(kwargs['Out'][0]) @@ -247,8 +264,9 @@ class DistributedReshapeImpl0(DistributedOperatorImpl): for idx, axis in enumerate(dim_mapping): if axis >= 0: if len(shape_list) > idx: - shape_list[ - idx] = shape_list[idx] // process_mesh_shape[axis] + shape_list[idx] = ( + shape_list[idx] // process_mesh_shape[axis] + ) # create op new_op_desc = main_block.append_op(type='nop').desc @@ -267,7 +285,6 @@ class DistributedReshapeImpl0(DistributedOperatorImpl): class DistributedReshapeImpl1(DistributedOperatorImpl): - def __init__(self, name): super(DistributedReshapeImpl1, self).__init__(name) self._forward_implemented = True @@ -297,19 +314,21 @@ class DistributedReshapeImpl1(DistributedOperatorImpl): for idx, axis in enumerate(dim_mapping): if axis >= 0: if len(shape_list) > idx: - shape_list[ - idx] = shape_list[idx] // process_mesh_shape[axis] + shape_list[idx] = ( + shape_list[idx] // process_mesh_shape[axis] + ) # calc comp op cost - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) processes = dist_attr.process_mesh.processes for key in desc_mapping: desc_mapping[key]["shape"] = shape_list - cost_mapping = build_comp_costs_from_descs(Reshape2OpCost, ctx, - processes, desc_mapping, - cluster) + cost_mapping = build_comp_costs_from_descs( + Reshape2OpCost, ctx, processes, desc_mapping, cluster + ) res.append(cost_mapping) return res @@ -317,16 +336,17 @@ class DistributedReshapeImpl1(DistributedOperatorImpl): def calc_bwd_cost(self, dist_op, ctx, cluster): # calc comp op cost res = [] - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) dist_attr = dist_op.dist_attr process_mesh = dist_attr.process_mesh processes = process_mesh.processes op_type = dist_op.serial_op.type - cost_mapping = build_comp_costs_from_descs(Reshape2GradOpCost, ctx, - processes, desc_mapping, - cluster) + cost_mapping = build_comp_costs_from_descs( + Reshape2GradOpCost, ctx, processes, desc_mapping, cluster + ) res.append(cost_mapping) backward_op = dist_op.serial_op @@ -336,7 +356,8 @@ class DistributedReshapeImpl1(DistributedOperatorImpl): for input_name in backward_op.desc.input_names(): for varname in backward_op.desc.input(input_name): if "@GRAD" not in varname and not is_parameter_related( - varname, main_block): + varname, main_block + ): # NOTE input var's dim_mapping of backward op should be the same with input var instead of corresponding varname of forward op var_dim_mapping = dist_attr.get_input_dims_mapping(varname) @@ -346,8 +367,15 @@ class DistributedReshapeImpl1(DistributedOperatorImpl): parallel_axis = batch_size_axis attrs = {"use_calc_stream": True} var_names = [varname + "@GRAD"] - build_dp_costs(res, dist_op, ctx, var_names, attrs, - parallel_axis, cluster) + build_dp_costs( + res, + dist_op, + ctx, + var_names, + attrs, + parallel_axis, + cluster, + ) return res @@ -381,8 +409,9 @@ class DistributedReshapeImpl1(DistributedOperatorImpl): return True def is_auto_compatible(self, dist_op): - if (not self.is_input_compatible(dist_op)) or \ - (not self.is_output_compatible(dist_op)): + if (not self.is_input_compatible(dist_op)) or ( + not self.is_output_compatible(dist_op) + ): return False op_desc = dist_op.serial_op.desc @@ -393,7 +422,8 @@ class DistributedReshapeImpl1(DistributedOperatorImpl): x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name) out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name) x_shape_dims_mapping = op_dist_attr.get_output_dims_mapping( - x_shape_name) + x_shape_name + ) if is_dim_shard(x_dims_mapping[-1]): return False @@ -420,11 +450,13 @@ class DistributedReshapeImpl1(DistributedOperatorImpl): x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name) out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name) x_shape_dims_mapping = op_dist_attr.get_output_dims_mapping( - x_shape_name) + x_shape_name + ) for i in range(len(out_dims_mapping)): dim_changed = compute_compatible_and_update_dim_mapping( - [x_dims_mapping, out_dims_mapping], [i, i]) + [x_dims_mapping, out_dims_mapping], [i, i] + ) if dim_changed: changed = True @@ -444,23 +476,27 @@ class DistributedReshapeImpl1(DistributedOperatorImpl): src_op = dist_op_context.cur_src_op rank_id = dist_op_context.rank_id op_dist_attr = ctx.get_op_dist_attr_for_program(src_op) - assert op_dist_attr is not None, "backward op [{}] don't have dist attribute !".format( - str(src_op)) + assert ( + op_dist_attr is not None + ), "backward op [{}] don't have dist attribute !".format(str(src_op)) # check validation of inputs / outputs for input_name in src_op.desc.input_names(): assert input_name in kwargs, "input [{}] is not given".format( - input_name) + input_name + ) assert len(kwargs[input_name]) == len( src_op.desc.input(input_name) ), "number of tensor for input [{}] is not match".format(input_name) for output_name in src_op.desc.output_names(): assert output_name in kwargs, "input [{}] is not given".format( - output_name) + output_name + ) assert len(kwargs[output_name]) == len( src_op.desc.output(output_name) ), "number of tensor for input [{}] is not match".format( - output_name) + output_name + ) X_var = main_block.var(kwargs['X'][0]) Out_var = main_block.var(kwargs['Out'][0]) @@ -481,8 +517,9 @@ class DistributedReshapeImpl1(DistributedOperatorImpl): for idx, axis in enumerate(dim_mapping): if axis >= 0: if len(shape_list) > idx: - shape_list[ - idx] = shape_list[idx] // process_mesh_shape[axis] + shape_list[idx] = ( + shape_list[idx] // process_mesh_shape[axis] + ) # create op new_op_desc = main_block.append_op(type='nop').desc @@ -501,7 +538,6 @@ class DistributedReshapeImpl1(DistributedOperatorImpl): class DistributedReshapeImpl2(DistributedOperatorImpl): - def __init__(self, name): super(DistributedReshapeImpl2, self).__init__(name) self._forward_implemented = True @@ -531,19 +567,21 @@ class DistributedReshapeImpl2(DistributedOperatorImpl): for idx, axis in enumerate(dim_mapping): if axis >= 0: if len(shape_list) > idx: - shape_list[ - idx] = shape_list[idx] // process_mesh_shape[axis] + shape_list[idx] = ( + shape_list[idx] // process_mesh_shape[axis] + ) # calc comp op cost - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) processes = dist_attr.process_mesh.processes for key in desc_mapping: desc_mapping[key]["shape"] = shape_list - cost_mapping = build_comp_costs_from_descs(Reshape2OpCost, ctx, - processes, desc_mapping, - cluster) + cost_mapping = build_comp_costs_from_descs( + Reshape2OpCost, ctx, processes, desc_mapping, cluster + ) res.append(cost_mapping) return res @@ -551,16 +589,17 @@ class DistributedReshapeImpl2(DistributedOperatorImpl): def calc_bwd_cost(self, dist_op, ctx, cluster): # calc comp op cost res = [] - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) dist_attr = dist_op.dist_attr process_mesh = dist_attr.process_mesh processes = process_mesh.processes op_type = dist_op.serial_op.type - cost_mapping = build_comp_costs_from_descs(Reshape2GradOpCost, ctx, - processes, desc_mapping, - cluster) + cost_mapping = build_comp_costs_from_descs( + Reshape2GradOpCost, ctx, processes, desc_mapping, cluster + ) res.append(cost_mapping) backward_op = dist_op.serial_op @@ -570,7 +609,8 @@ class DistributedReshapeImpl2(DistributedOperatorImpl): for input_name in backward_op.desc.input_names(): for varname in backward_op.desc.input(input_name): if "@GRAD" not in varname and not is_parameter_related( - varname, main_block): + varname, main_block + ): # NOTE input var's dim_mapping of backward op should be the same with input var instead of corresponding varname of forward op var_dim_mapping = dist_attr.get_input_dims_mapping(varname) @@ -580,8 +620,15 @@ class DistributedReshapeImpl2(DistributedOperatorImpl): parallel_axis = batch_size_axis attrs = {"use_calc_stream": True} var_names = [varname + "@GRAD"] - build_dp_costs(res, dist_op, ctx, var_names, attrs, - parallel_axis, cluster) + build_dp_costs( + res, + dist_op, + ctx, + var_names, + attrs, + parallel_axis, + cluster, + ) return res @@ -612,8 +659,9 @@ class DistributedReshapeImpl2(DistributedOperatorImpl): return True def is_auto_compatible(self, dist_op): - if (not self.is_input_compatible(dist_op)) or \ - (not self.is_output_compatible(dist_op)): + if (not self.is_input_compatible(dist_op)) or ( + not self.is_output_compatible(dist_op) + ): return False op_desc = dist_op.serial_op.desc @@ -624,7 +672,8 @@ class DistributedReshapeImpl2(DistributedOperatorImpl): x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name) out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name) x_shape_dims_mapping = op_dist_attr.get_output_dims_mapping( - x_shape_name) + x_shape_name + ) for idx, item in enumerate(x_dims_mapping[:-1]): if out_dims_mapping[idx] != item: @@ -648,11 +697,13 @@ class DistributedReshapeImpl2(DistributedOperatorImpl): x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name) out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name) x_shape_dims_mapping = op_dist_attr.get_output_dims_mapping( - x_shape_name) + x_shape_name + ) for i in range(len(out_dims_mapping) - 1): dim_changed = compute_compatible_and_update_dim_mapping( - [x_dims_mapping, out_dims_mapping], [i, i]) + [x_dims_mapping, out_dims_mapping], [i, i] + ) if dim_changed: changed = True @@ -671,23 +722,27 @@ class DistributedReshapeImpl2(DistributedOperatorImpl): main_block = dist_op_context.work_block src_op = dist_op_context.cur_src_op op_dist_attr = ctx.get_op_dist_attr_for_program(src_op) - assert op_dist_attr is not None, "backward op [{}] don't have dist attribute !".format( - str(src_op)) + assert ( + op_dist_attr is not None + ), "backward op [{}] don't have dist attribute !".format(str(src_op)) # check validation of inputs / outputs for input_name in src_op.desc.input_names(): assert input_name in kwargs, "input [{}] is not given".format( - input_name) + input_name + ) assert len(kwargs[input_name]) == len( src_op.desc.input(input_name) ), "number of tensor for input [{}] is not match".format(input_name) for output_name in src_op.desc.output_names(): assert output_name in kwargs, "input [{}] is not given".format( - output_name) + output_name + ) assert len(kwargs[output_name]) == len( src_op.desc.output(output_name) ), "number of tensor for input [{}] is not match".format( - output_name) + output_name + ) X_var = main_block.var(kwargs['X'][0]) Out_var = main_block.var(kwargs['Out'][0]) @@ -708,8 +763,9 @@ class DistributedReshapeImpl2(DistributedOperatorImpl): for idx, axis in enumerate(out_dim_mapping): if axis >= 0: if len(shape_list) > idx: - shape_list[ - idx] = shape_list[idx] // process_mesh_shape[axis] + shape_list[idx] = ( + shape_list[idx] // process_mesh_shape[axis] + ) # create op new_op_desc = main_block.append_op(type='nop').desc @@ -727,9 +783,12 @@ class DistributedReshapeImpl2(DistributedOperatorImpl): DistributedDefaultImpl0.backward(ctx, *args, **kwargs) -register_distributed_operator_impl("reshape2", - DistributedReshapeImpl0("add_one_dim_back")) register_distributed_operator_impl( - "reshape2", DistributedReshapeImpl1("remove_one_dim_back")) -register_distributed_operator_impl("reshape2", - DistributedReshapeImpl2("same_dim_shape")) + "reshape2", DistributedReshapeImpl0("add_one_dim_back") +) +register_distributed_operator_impl( + "reshape2", DistributedReshapeImpl1("remove_one_dim_back") +) +register_distributed_operator_impl( + "reshape2", DistributedReshapeImpl2("same_dim_shape") +) diff --git a/python/paddle/distributed/auto_parallel/operators/dist_shape.py b/python/paddle/distributed/auto_parallel/operators/dist_shape.py index 313f296ab96246061056cb5d2da801bea610add0..47254cd99854adc0ca4a6258e661e3aedf6aec56 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_shape.py +++ b/python/paddle/distributed/auto_parallel/operators/dist_shape.py @@ -21,7 +21,6 @@ from ..utils import is_dim_shard class DistributedShape(DistributedOperatorImplContainer): - def __init__(self, op_type): super(DistributedShape, self).__init__(op_type) @@ -30,7 +29,6 @@ register_distributed_operator_impl_container(DistributedShape("shape")) class DistributedShapeImpl(DistributedOperatorImpl): - def __init__(self, name): super(DistributedShapeImpl, self).__init__(name) self._forward_implemented = True @@ -52,8 +50,9 @@ class DistributedShapeImpl(DistributedOperatorImpl): return True def is_auto_compatible(self, dist_op): - if (not self.is_input_compatible(dist_op)) or \ - (not self.is_output_compatible(dist_op)): + if (not self.is_input_compatible(dist_op)) or ( + not self.is_output_compatible(dist_op) + ): return False return True diff --git a/python/paddle/distributed/auto_parallel/operators/dist_slice.py b/python/paddle/distributed/auto_parallel/operators/dist_slice.py index 1c9b0e482a13c5d15206adb3c1e9b7411ba541d8..82562c8b075644f32bd13e6ce51e4698b0a16597 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_slice.py +++ b/python/paddle/distributed/auto_parallel/operators/dist_slice.py @@ -22,7 +22,6 @@ from .dist_default import DistributedDefaultImpl0 class DistributedSlice(DistributedOperatorImplContainer): - def __init__(self, op_type): super(DistributedSlice, self).__init__(op_type) @@ -31,7 +30,6 @@ register_distributed_operator_impl_container(DistributedSlice("slice")) class DistributedSliceImpl(DistributedOperatorImpl): - def __init__(self, name): super(DistributedSliceImpl, self).__init__(name) self._forward_implemented = True @@ -75,8 +73,9 @@ class DistributedSliceImpl(DistributedOperatorImpl): return True def is_compatible(self, dist_op): - if (not self.is_input_compatible(dist_op)) or \ - (not self.is_output_compatible(dist_op)): + if (not self.is_input_compatible(dist_op)) or ( + not self.is_output_compatible(dist_op) + ): return False op_desc = dist_op.serial_op.desc @@ -87,7 +86,8 @@ class DistributedSliceImpl(DistributedOperatorImpl): in_dims_mapping = op_dist_attr.get_input_dims_mapping(in_name) out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name) if len(in_dims_mapping) - len(decrease_axis) != 0 and len( - out_dims_mapping) != len(in_dims_mapping) - len(decrease_axis): + out_dims_mapping + ) != len(in_dims_mapping) - len(decrease_axis): return False new_out_dims_mapping = [] @@ -102,9 +102,11 @@ class DistributedSliceImpl(DistributedOperatorImpl): return True def is_auto_compatible(self, dist_op): - if (not self.is_input_compatible(dist_op)) or \ - (not self.is_output_compatible(dist_op)) or \ - (not self.is_compatible(dist_op)): + if ( + (not self.is_input_compatible(dist_op)) + or (not self.is_output_compatible(dist_op)) + or (not self.is_compatible(dist_op)) + ): return False return True @@ -135,7 +137,8 @@ class DistributedSliceImpl(DistributedOperatorImpl): assert len(ref_dims_mapping) == len(out_dims_mapping) for i in range(len(out_dims_mapping)): compatible_dim_mapping = compute_compatible_dim_mapping( - [out_dims_mapping[i], ref_dims_mapping[i]]) + [out_dims_mapping[i], ref_dims_mapping[i]] + ) if compatible_dim_mapping is None: continue if ref_dims_mapping[i] != compatible_dim_mapping: @@ -156,5 +159,6 @@ class DistributedSliceImpl(DistributedOperatorImpl): DistributedDefaultImpl0.backward(ctx, *args, **kwargs) -register_distributed_operator_impl("slice", - DistributedSliceImpl("decrease_in_axis")) +register_distributed_operator_impl( + "slice", DistributedSliceImpl("decrease_in_axis") +) diff --git a/python/paddle/distributed/auto_parallel/operators/dist_softmax.py b/python/paddle/distributed/auto_parallel/operators/dist_softmax.py index 8d85c2c19fcca3540d0909abc2c642d9fa591b94..6a980f724639bc3e135cf2502cb662cd0fc109a1 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_softmax.py +++ b/python/paddle/distributed/auto_parallel/operators/dist_softmax.py @@ -27,7 +27,6 @@ from paddle.distributed.fleet.meta_optimizers.common import OpRole class DistributedSoftmax(DistributedOperatorImplContainer): - def __init__(self, op_type): super(DistributedSoftmax, self).__init__(op_type) @@ -36,7 +35,6 @@ register_distributed_operator_impl_container(DistributedSoftmax("softmax")) class DistributedSoftmaxImpl(DistributedOperatorImpl): - def __init__(self, name): super(DistributedSoftmaxImpl, self).__init__(name) self._forward_implemented = False @@ -53,12 +51,13 @@ class DistributedSoftmaxImpl(DistributedOperatorImpl): def calc_fwd_cost(self, dist_op, ctx, cluster): # calc comp op cost - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) processes = dist_op.dist_attr.process_mesh.processes - cost_mapping = build_comp_costs_from_descs(SoftmaxOpCost, ctx, - processes, desc_mapping, - cluster) + cost_mapping = build_comp_costs_from_descs( + SoftmaxOpCost, ctx, processes, desc_mapping, cluster + ) res_cost = [cost_mapping] return res_cost @@ -66,14 +65,15 @@ class DistributedSoftmaxImpl(DistributedOperatorImpl): def calc_bwd_cost(self, dist_op, ctx, cluster): # calc comp op cost res = [] - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) dist_attr = dist_op.dist_attr process_mesh = dist_attr.process_mesh processes = process_mesh.processes - cost_mapping = build_comp_costs_from_descs(SoftmaxGradOpCost, ctx, - processes, desc_mapping, - cluster) + cost_mapping = build_comp_costs_from_descs( + SoftmaxGradOpCost, ctx, processes, desc_mapping, cluster + ) res.append(cost_mapping) backward_op = dist_op.serial_op @@ -83,7 +83,8 @@ class DistributedSoftmaxImpl(DistributedOperatorImpl): for input_name in backward_op.desc.input_names(): for varname in backward_op.desc.input(input_name): if "@GRAD" not in varname and is_parameter_related( - varname, main_block): + varname, main_block + ): # NOTE input var's dim_mapping of backward op should be the same with input var instead of corresponding varname of forward op var_dim_mapping = dist_attr.get_input_dims_mapping(varname) @@ -93,8 +94,15 @@ class DistributedSoftmaxImpl(DistributedOperatorImpl): parallel_axis = batch_size_axis attrs = {"use_calc_stream": True} var_names = [varname + "@GRAD"] - build_dp_costs(res, dist_op, ctx, var_names, attrs, - parallel_axis, cluster) + build_dp_costs( + res, + dist_op, + ctx, + var_names, + attrs, + parallel_axis, + cluster, + ) return res @@ -129,8 +137,9 @@ class DistributedSoftmaxImpl(DistributedOperatorImpl): return True def is_auto_compatible(self, dist_op): - if (not self.is_input_compatible(dist_op)) or \ - (not self.is_output_compatible(dist_op)): + if (not self.is_input_compatible(dist_op)) or ( + not self.is_output_compatible(dist_op) + ): return False op_desc = dist_op.serial_op.desc @@ -159,7 +168,8 @@ class DistributedSoftmaxImpl(DistributedOperatorImpl): for i in range(len(x_dims_mapping)): dim_changed = compute_compatible_and_update_dim_mapping( - [x_dims_mapping, out_dims_mapping], [i, i]) + [x_dims_mapping, out_dims_mapping], [i, i] + ) if dim_changed: changed = True @@ -175,4 +185,5 @@ class DistributedSoftmaxImpl(DistributedOperatorImpl): register_distributed_operator_impl( - "softmax", DistributedSoftmaxImpl("replicate_last_axis")) + "softmax", DistributedSoftmaxImpl("replicate_last_axis") +) diff --git a/python/paddle/distributed/auto_parallel/operators/dist_split.py b/python/paddle/distributed/auto_parallel/operators/dist_split.py index cf02e00a337648a6866022971b206205bbb98abf..8411d6d83dd6e8ff8eabe4d24c378ca6d9499435 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_split.py +++ b/python/paddle/distributed/auto_parallel/operators/dist_split.py @@ -22,7 +22,6 @@ from .dist_default import DistributedDefaultImpl0 class DistributedSplit(DistributedOperatorImplContainer): - def __init__(self, op_type): super(DistributedSplit, self).__init__(op_type) @@ -31,7 +30,6 @@ register_distributed_operator_impl_container(DistributedSplit("split")) class DistributedSplitImpl(DistributedOperatorImpl): - def __init__(self, name): super(DistributedSplitImpl, self).__init__(name) self._forward_implemented = True @@ -62,8 +60,9 @@ class DistributedSplitImpl(DistributedOperatorImpl): return True def is_compatible(self, dist_op): - if (not self.is_input_compatible(dist_op)) or \ - (not self.is_output_compatible(dist_op)): + if (not self.is_input_compatible(dist_op)) or ( + not self.is_output_compatible(dist_op) + ): return False op_desc = dist_op.serial_op.desc @@ -91,16 +90,19 @@ class DistributedSplitImpl(DistributedOperatorImpl): out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name) for i in range(len(x_dims_mapping)): dim_changed = compute_compatible_and_update_dim_mapping( - [x_dims_mapping, out_dims_mapping], [i, i]) + [x_dims_mapping, out_dims_mapping], [i, i] + ) if dim_changed: changed = True return changed def is_auto_compatible(self, dist_op): - if (not self.is_input_compatible(dist_op)) or \ - (not self.is_output_compatible(dist_op)) or \ - (not self.is_compatible(dist_op)): + if ( + (not self.is_input_compatible(dist_op)) + or (not self.is_output_compatible(dist_op)) + or (not self.is_compatible(dist_op)) + ): return False return True @@ -114,5 +116,6 @@ class DistributedSplitImpl(DistributedOperatorImpl): DistributedDefaultImpl0.backward(ctx, *args, **kwargs) -register_distributed_operator_impl("split", - DistributedSplitImpl("replicate_in_axis")) +register_distributed_operator_impl( + "split", DistributedSplitImpl("replicate_in_axis") +) diff --git a/python/paddle/distributed/auto_parallel/operators/dist_transpose.py b/python/paddle/distributed/auto_parallel/operators/dist_transpose.py index 96220bf0d50fb7316e7baf50eaa3d74a6da4ac06..6bacb8bace40902e52ec89d46d5b5cbf3c446b3e 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_transpose.py +++ b/python/paddle/distributed/auto_parallel/operators/dist_transpose.py @@ -26,17 +26,16 @@ from paddle.distributed.fleet.meta_optimizers.common import OpRole class DistributedTranspose2(DistributedOperatorImplContainer): - def __init__(self, op_type): super(DistributedTranspose2, self).__init__(op_type) register_distributed_operator_impl_container( - DistributedTranspose2("transpose2")) + DistributedTranspose2("transpose2") +) class DistributedTranspose2Impl(DistributedOperatorImpl): - def __init__(self, name): super(DistributedTranspose2Impl, self).__init__(name) self._forward_implemented = False @@ -49,8 +48,9 @@ class DistributedTranspose2Impl(DistributedOperatorImpl): return True def is_auto_compatible(self, dist_op): - if (not self.is_input_compatible(dist_op)) or \ - (not self.is_output_compatible(dist_op)): + if (not self.is_input_compatible(dist_op)) or ( + not self.is_output_compatible(dist_op) + ): return False op_desc = dist_op.serial_op.desc @@ -60,7 +60,8 @@ class DistributedTranspose2Impl(DistributedOperatorImpl): out_name = op_desc.output('Out')[0] x_shape_name = op_desc.output('XShape')[0] x_shape_dims_mapping = op_dist_attr.get_output_dims_mapping( - x_shape_name) + x_shape_name + ) x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name) out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name) new_dims_mapping = [-1 for i in range(len(x_dims_mapping))] @@ -91,7 +92,8 @@ class DistributedTranspose2Impl(DistributedOperatorImpl): x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name) out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name) x_shape_dims_mapping = op_dist_attr.get_output_dims_mapping( - x_shape_name) + x_shape_name + ) perm = op_desc.attr('axis') assert len(x_dims_mapping) == len(perm) @@ -102,7 +104,8 @@ class DistributedTranspose2Impl(DistributedOperatorImpl): for i in range(len(out_dims_mapping)): dim_changed = compute_compatible_and_update_dim_mapping( - [new_dims_mapping, out_dims_mapping], [i, i]) + [new_dims_mapping, out_dims_mapping], [i, i] + ) if dim_changed: changed = True @@ -127,13 +130,14 @@ class DistributedTranspose2Impl(DistributedOperatorImpl): def calc_fwd_cost(self, dist_op, ctx, cluster): # calc comp op cost - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) processes = dist_op.dist_attr.process_mesh.processes op_type = dist_op.serial_op.type - cost_mapping = build_comp_costs_from_descs(Transpose2OpCost, ctx, - processes, desc_mapping, - cluster) + cost_mapping = build_comp_costs_from_descs( + Transpose2OpCost, ctx, processes, desc_mapping, cluster + ) res_cost = [cost_mapping] return res_cost @@ -141,15 +145,16 @@ class DistributedTranspose2Impl(DistributedOperatorImpl): def calc_bwd_cost(self, dist_op, ctx, cluster): # calc comp op cost res = [] - desc_mapping = build_comp_desc_from_dist_op(dist_op=dist_op, - dist_context=ctx) + desc_mapping = build_comp_desc_from_dist_op( + dist_op=dist_op, dist_context=ctx + ) dist_attr = dist_op.dist_attr process_mesh = dist_attr.process_mesh processes = process_mesh.processes op_type = dist_op.serial_op.type - cost_mapping = build_comp_costs_from_descs(Transpose2GradOpCost, ctx, - processes, desc_mapping, - cluster) + cost_mapping = build_comp_costs_from_descs( + Transpose2GradOpCost, ctx, processes, desc_mapping, cluster + ) res.append(cost_mapping) backward_op = dist_op.serial_op @@ -159,7 +164,8 @@ class DistributedTranspose2Impl(DistributedOperatorImpl): for input_name in backward_op.desc.input_names(): for varname in backward_op.desc.input(input_name): if "@GRAD" not in varname and is_parameter_related( - varname, main_block): + varname, main_block + ): # NOTE input var's dim_mapping of backward op should be the same with input var instead of corresponding varname of forward op var_dim_mapping = dist_attr.get_input_dims_mapping(varname) @@ -169,8 +175,15 @@ class DistributedTranspose2Impl(DistributedOperatorImpl): parallel_axis = batch_size_axis attrs = {"use_calc_stream": True} var_names = [varname + "@GRAD"] - build_dp_costs(res, dist_op, ctx, var_names, attrs, - parallel_axis, cluster) + build_dp_costs( + res, + dist_op, + ctx, + var_names, + attrs, + parallel_axis, + cluster, + ) return res @staticmethod @@ -183,4 +196,5 @@ class DistributedTranspose2Impl(DistributedOperatorImpl): register_distributed_operator_impl( - "transpose2", DistributedTranspose2Impl("same_mapping_transpose")) + "transpose2", DistributedTranspose2Impl("same_mapping_transpose") +) diff --git a/python/paddle/distributed/auto_parallel/operators/dist_update_loss_scaling.py b/python/paddle/distributed/auto_parallel/operators/dist_update_loss_scaling.py index cbbcaef5ee47f667935e90e647dd667fa8200b1d..389473660799c770611a4e7a3b8177214e7f93c4 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_update_loss_scaling.py +++ b/python/paddle/distributed/auto_parallel/operators/dist_update_loss_scaling.py @@ -20,17 +20,16 @@ from ..utils import set_dist_op_desc_original_id class DistributedUpdateLossScaling(DistributedOperatorImplContainer): - def __init__(self, op_type): super(DistributedUpdateLossScaling, self).__init__(op_type) register_distributed_operator_impl_container( - DistributedUpdateLossScaling("update_loss_scaling")) + DistributedUpdateLossScaling("update_loss_scaling") +) class DistributedUpdateLossScalingImpl(DistributedOperatorImpl): - def __init__(self, name): super(DistributedUpdateLossScalingImpl, self).__init__(name) self._name = name @@ -60,7 +59,8 @@ class DistributedUpdateLossScalingImpl(DistributedOperatorImpl): @staticmethod def forward(ctx, *args, **kwargs): raise RuntimeError( - "DistributedUpdateLossScalingImpl's forward should not be called !") + "DistributedUpdateLossScalingImpl's forward should not be called !" + ) @staticmethod def backward(ctx, *args, **kwargs): @@ -71,59 +71,89 @@ class DistributedUpdateLossScalingImpl(DistributedOperatorImpl): backward_op = dist_op_context.cur_src_op rank_id = dist_op_context.rank_id dist_attr = ctx.get_op_dist_attr_for_program(backward_op) - assert dist_attr is not None, "backward op [{}] don't have dist attribute !".format( - str(backward_op)) + assert ( + dist_attr is not None + ), "backward op [{}] don't have dist attribute !".format( + str(backward_op) + ) assert rank_id in dist_attr.process_mesh.processes assert 'X' in kwargs, "input [{}] is not given".format('X') assert 'FoundInfinite' in kwargs, "input [{}] is not given".format( - 'FoundInfinite') + 'FoundInfinite' + ) assert 'PrevLossScaling' in kwargs, "input [{}] is not given".format( - 'PrevLossScaling') + 'PrevLossScaling' + ) assert 'InGoodSteps' in kwargs, "input [{}] is not given".format( - 'InGoodSteps') + 'InGoodSteps' + ) assert 'InBadSteps' in kwargs, "input [{}] is not given".format( - 'InBadSteps') + 'InBadSteps' + ) assert 'Out' in kwargs, "output [{}] is not given".format('Out') assert 'LossScaling' in kwargs, "output [{}] is not given".format( - 'LossScaling') + 'LossScaling' + ) assert 'OutGoodSteps' in kwargs, "output [{}] is not given".format( - 'OutGoodSteps') + 'OutGoodSteps' + ) assert 'OutBadSteps' in kwargs, "output [{}] is not given".format( - 'OutBadSteps') - - assert len(kwargs['FoundInfinite']) == 1, \ - "update_loss_scaling input FoundInfinite take 1 variable but got {}".format( - kwargs['FoundInfinite']) - assert len(kwargs['PrevLossScaling']) == 1, \ - "update_loss_scaling input PrevLossScaling take 1 variable but got {}".format( - kwargs['PrevLossScaling']) - assert len(kwargs['InGoodSteps']) == 1, \ - "update_loss_scaling input InGoodSteps take 1 variable but got {}".format( - kwargs['InGoodSteps']) - assert len(kwargs['InBadSteps']) == 1, \ - "update_loss_scaling input InBadSteps take 1 variable but got {}".format( - kwargs['InBadSteps']) - assert len(kwargs['LossScaling']) == 1, \ - "update_loss_scaling output LossScaling take 1 variable but got {}".format( - kwargs['LossScaling']) - assert len(kwargs['OutGoodSteps']) == 1, \ - "update_loss_scaling output OutGoodSteps take 1 variable but got {}".format( - kwargs['OutGoodSteps']) - assert len(kwargs['OutBadSteps']) == 1, \ - "update_loss_scaling output OutBadSteps take 1 variable but got {}".format( - kwargs['OutBadSteps']) - - assert len(kwargs['X']) == len(kwargs['Out']), \ - "update_loss_scaling got [{}] X and [{}] Out, which are supposed to be equal".format( - len(kwargs['X']), len(kwargs['Out'])) + 'OutBadSteps' + ) + + assert ( + len(kwargs['FoundInfinite']) == 1 + ), "update_loss_scaling input FoundInfinite take 1 variable but got {}".format( + kwargs['FoundInfinite'] + ) + assert ( + len(kwargs['PrevLossScaling']) == 1 + ), "update_loss_scaling input PrevLossScaling take 1 variable but got {}".format( + kwargs['PrevLossScaling'] + ) + assert ( + len(kwargs['InGoodSteps']) == 1 + ), "update_loss_scaling input InGoodSteps take 1 variable but got {}".format( + kwargs['InGoodSteps'] + ) + assert ( + len(kwargs['InBadSteps']) == 1 + ), "update_loss_scaling input InBadSteps take 1 variable but got {}".format( + kwargs['InBadSteps'] + ) + assert ( + len(kwargs['LossScaling']) == 1 + ), "update_loss_scaling output LossScaling take 1 variable but got {}".format( + kwargs['LossScaling'] + ) + assert ( + len(kwargs['OutGoodSteps']) == 1 + ), "update_loss_scaling output OutGoodSteps take 1 variable but got {}".format( + kwargs['OutGoodSteps'] + ) + assert ( + len(kwargs['OutBadSteps']) == 1 + ), "update_loss_scaling output OutBadSteps take 1 variable but got {}".format( + kwargs['OutBadSteps'] + ) + + assert len(kwargs['X']) == len( + kwargs['Out'] + ), "update_loss_scaling got [{}] X and [{}] Out, which are supposed to be equal".format( + len(kwargs['X']), len(kwargs['Out']) + ) filter_vars = [] for varname in kwargs['X']: - if rank_id in ctx.get_tensor_dist_attr_for_program( - main_block.var(varname)).process_mesh.processes: + if ( + rank_id + in ctx.get_tensor_dist_attr_for_program( + main_block.var(varname) + ).process_mesh.processes + ): filter_vars.append(varname) # replicate op in dist program @@ -136,4 +166,5 @@ class DistributedUpdateLossScalingImpl(DistributedOperatorImpl): register_distributed_operator_impl( "update_loss_scaling", - DistributedUpdateLossScalingImpl("update_loss_scaling")) + DistributedUpdateLossScalingImpl("update_loss_scaling"), +) diff --git a/python/paddle/distributed/auto_parallel/parallelizer.py b/python/paddle/distributed/auto_parallel/parallelizer.py index b25d1d8b33bf7844493a1049c8c503abb3ecb43a..aad15642f1b3e1f6472121a1252631d8334f8fa2 100644 --- a/python/paddle/distributed/auto_parallel/parallelizer.py +++ b/python/paddle/distributed/auto_parallel/parallelizer.py @@ -79,8 +79,12 @@ class AutoParallelizer: self._pass_context = PassContext() self._need_rank_mapping = os.getenv("PADDLE_NEED_RANK_MAPPING") - self._need_rank_mapping = True if self._need_rank_mapping and \ - self._need_rank_mapping.lower() == 'true' else False + self._need_rank_mapping = ( + True + if self._need_rank_mapping + and self._need_rank_mapping.lower() == 'true' + else False + ) # self._pass_context = None def _remove_distributed_attrs(self, main_program): @@ -93,8 +97,9 @@ class AutoParallelizer: if suffix in attr_name: op._remove_attr(attr_name) - def _apply_pre_optimization_passes(self, main_program, startup_program, - loss, params_grads, no_grad_set): + def _apply_pre_optimization_passes( + self, main_program, startup_program, loss, params_grads, no_grad_set + ): # apply amp pass if self._dist_strategy.amp: config = copy.deepcopy(self._dist_strategy.amp_configs) @@ -104,13 +109,15 @@ class AutoParallelizer: if config["use_pure_fp16"]: config["base_opt"] = self._optimizer auto_parallel_fp16_pass = new_pass("auto_parallel_fp16", config) - auto_parallel_fp16_pass.apply([main_program], [startup_program], - self._pass_context) + auto_parallel_fp16_pass.apply( + [main_program], [startup_program], self._pass_context + ) loss = auto_parallel_fp16_pass.get_loss() else: auto_parallel_amp_pass = new_pass("auto_parallel_amp", config) - auto_parallel_amp_pass.apply([main_program], [startup_program], - self._pass_context) + auto_parallel_amp_pass.apply( + [main_program], [startup_program], self._pass_context + ) loss = auto_parallel_amp_pass.get_loss() # apply recompute pass @@ -119,14 +126,22 @@ class AutoParallelizer: config["dist_context"] = self._dist_context config["no_grad_set"] = copy.deepcopy(no_grad_set) config["loss"] = loss - auto_parallel_recompute_pass = new_pass("auto_parallel_recompute", - config) - auto_parallel_recompute_pass.apply([main_program], - [startup_program], - self._pass_context) - - def _generate_backward(self, main_program, startup_program, loss, - parameter_list, no_grad_set, callbacks): + auto_parallel_recompute_pass = new_pass( + "auto_parallel_recompute", config + ) + auto_parallel_recompute_pass.apply( + [main_program], [startup_program], self._pass_context + ) + + def _generate_backward( + self, + main_program, + startup_program, + loss, + parameter_list, + no_grad_set, + callbacks, + ): with program_guard(main_program, startup_program): params_grads = append_backward( @@ -134,7 +149,8 @@ class AutoParallelizer: parameter_list, no_grad_set, callbacks, - distop_context=self._dist_context.dist_op_context) + distop_context=self._dist_context.dist_op_context, + ) self._completer = Completer(self._dist_context) self._completer.complete_backward_annotation(main_program) self._dist_context.block_state.parse_backward_blocks(main_program) @@ -153,18 +169,21 @@ class AutoParallelizer: return optimize_ops - def _apply_post_optimization_passes(self, main_program, startup_program, - rank, params_grads): + def _apply_post_optimization_passes( + self, main_program, startup_program, rank, params_grads + ): if self._dist_strategy.sharding: config = copy.deepcopy(self._dist_strategy.sharding_configs) config["dist_context"] = self._dist_context config["params_grads"] = params_grads config["global_rank"] = rank - auto_parallel_sharding_pass = new_pass("auto_parallel_sharding", - config) - auto_parallel_sharding_pass.apply([main_program], [startup_program], - self._pass_context) + auto_parallel_sharding_pass = new_pass( + "auto_parallel_sharding", config + ) + auto_parallel_sharding_pass.apply( + [main_program], [startup_program], self._pass_context + ) params_grads = self._pass_context.get_attr("params_grads") config = copy.deepcopy(self._dist_strategy.sharding_configs) @@ -172,18 +191,20 @@ class AutoParallelizer: config["params_grads"] = params_grads config["rank_id"] = rank auto_parallel_clip_pass = new_pass("auto_parallel_grad_clip", config) - auto_parallel_clip_pass.apply([main_program], [startup_program], - self._pass_context) + auto_parallel_clip_pass.apply( + [main_program], [startup_program], self._pass_context + ) if self._dist_strategy.gradient_merge: config = copy.deepcopy(self._dist_strategy.gradient_merge_configs) config["dist_context"] = self._dist_context config["params_grads"] = params_grads auto_parallel_gradient_merge_pass = new_pass( - "auto_parallel_gradient_merge_pass", config) - auto_parallel_gradient_merge_pass.apply([main_program], - [startup_program], - self._pass_context) + "auto_parallel_gradient_merge_pass", config + ) + auto_parallel_gradient_merge_pass.apply( + [main_program], [startup_program], self._pass_context + ) def _get_dist_program(self, rank, dist_context=None, relaunch_phase=False): completed_main_program = None @@ -197,8 +218,9 @@ class AutoParallelizer: self._dist_context = DistributedContext() _logger.info("Start annotation dist attr.") self._completer = Completer(self._dist_context) - completed_main_program = self._completer.complete_forward_annotation( - serial_main_program) + completed_main_program = ( + self._completer.complete_forward_annotation(serial_main_program) + ) else: completed_main_program = serial_main_program self._dist_context = copy.deepcopy(dist_context) @@ -208,34 +230,54 @@ class AutoParallelizer: # serial backward pass params_grads = self._generate_backward( - completed_main_program, serial_startup_program, serial_loss, - self._parameter_list, self._no_grad_set, self._callbacks) + completed_main_program, + serial_startup_program, + serial_loss, + self._parameter_list, + self._no_grad_set, + self._callbacks, + ) # serial forward pass - self._apply_pre_optimization_passes(completed_main_program, - serial_startup_program, serial_loss, - params_grads, self._no_grad_set) + self._apply_pre_optimization_passes( + completed_main_program, + serial_startup_program, + serial_loss, + params_grads, + self._no_grad_set, + ) # Logical partition partitioner = Partitioner(self._dist_context, rank) - dist_main_prog, dist_startup_prog, dist_params_grads = partitioner.partition( - completed_main_program, serial_startup_program, params_grads) + ( + dist_main_prog, + dist_startup_prog, + dist_params_grads, + ) = partitioner.partition( + completed_main_program, serial_startup_program, params_grads + ) # TODO refactor the placement of optimizer # generate optimize program - dist_optimize_ops = self._apply_optimize(dist_main_prog, - dist_startup_prog, - dist_params_grads) + dist_optimize_ops = self._apply_optimize( + dist_main_prog, dist_startup_prog, dist_params_grads + ) set_grad_var_shape(dist_main_prog, self._dist_context) make_data_unshard(dist_main_prog, dist_startup_prog, self._dist_context) - resharder = Resharder(dist_main_prog, dist_startup_prog, rank, - self._dist_context, dist_params_grads) + resharder = Resharder( + dist_main_prog, + dist_startup_prog, + rank, + self._dist_context, + dist_params_grads, + ) resharder.reshard() - self._apply_post_optimization_passes(dist_main_prog, dist_startup_prog, - rank, dist_params_grads) + self._apply_post_optimization_passes( + dist_main_prog, dist_startup_prog, rank, dist_params_grads + ) g_process_group_map = None if not relaunch_phase: g_process_group_map = copy.deepcopy(_g_process_group_map) @@ -243,14 +285,22 @@ class AutoParallelizer: _g_process_group_map[0] = ProcessGroup(0, []) for process_mesh in self._dist_context._process_meshes: _g_process_group_map[0].add_ranks(process_mesh.processes) - return dist_optimize_ops, dist_params_grads, dist_startup_prog, dist_main_prog, g_process_group_map - - def parallelize(self, - loss, - startup_program, - parameter_list=None, - no_grad_set=None, - callbacks=None): + return ( + dist_optimize_ops, + dist_params_grads, + dist_startup_prog, + dist_main_prog, + g_process_group_map, + ) + + def parallelize( + self, + loss, + startup_program, + parameter_list=None, + no_grad_set=None, + callbacks=None, + ): assert startup_program is not None self._loss = loss self._startup_program = startup_program @@ -261,25 +311,27 @@ class AutoParallelizer: if self._enable_auto_mapping and self._need_rank_mapping: # Do the mapping pass before parallelization - assert self._cluster is not None, \ - "The cluster must not be none when using auto mapping." + assert ( + self._cluster is not None + ), "The cluster must not be none when using auto mapping." dist_programs = {} world_process_group = get_world_process_group() dist_context = None # auto search if self._dist_strategy.auto_search: logging.info("Start searching dist attr.") - serial_program_info = SerialProgramInfo(self._main_program, - self._startup_program, - self._loss, - self._optimizer, - self._cluster) - planner = Planner(serial_program_info, - self, - algorithm_config={ - "name": "mcmc", - "max_search_times": 5 - }) + serial_program_info = SerialProgramInfo( + self._main_program, + self._startup_program, + self._loss, + self._optimizer, + self._cluster, + ) + planner = Planner( + serial_program_info, + self, + algorithm_config={"name": "mcmc", "max_search_times": 5}, + ) dist_context, _ = planner.search() logging.info("End searching dist attr.") @@ -288,31 +340,42 @@ class AutoParallelizer: logging.info("Start serialize searched dist attr") cwd = pathlib.Path().resolve() searched_dist_context_path = os.path.join( - cwd, f"searched_dist_context_{time.time()}.pkl") + cwd, f"searched_dist_context_{time.time()}.pkl" + ) saved_dist_context = {} ops_dist_attr = {} tensors_dist_attr = {} for key, dist_op in dist_context._dist_ops_for_program.items(): ops_dist_attr[key] = dist_op.dist_attr - for key, dist_tensor in dist_context._dist_tensors_for_program.items( - ): + for ( + key, + dist_tensor, + ) in dist_context._dist_tensors_for_program.items(): tensors_dist_attr[key] = dist_tensor.dist_attr saved_dist_context["ops_dist_attr"] = ops_dist_attr saved_dist_context["tensors_dist_attr"] = tensors_dist_attr saved_dist_context[ - "process_meshes"] = dist_context._process_meshes - with open(searched_dist_context_path, - "wb") as dist_context_file: + "process_meshes" + ] = dist_context._process_meshes + with open( + searched_dist_context_path, "wb" + ) as dist_context_file: pickle.dump(saved_dist_context, dist_context_file) os.environ[ - 'PADDLE_SEARCHED_DIST_CONTEXT_PATH'] = searched_dist_context_path + 'PADDLE_SEARCHED_DIST_CONTEXT_PATH' + ] = searched_dist_context_path logging.info( f"End serialize searched dist attr to {searched_dist_context_path}" ) for rank in world_process_group.ranks: - dist_optimize_ops, dist_params_grads, dist_startup_prog, dist_main_prog, g_process_group_map = self._get_dist_program( - rank, dist_context) + ( + dist_optimize_ops, + dist_params_grads, + dist_startup_prog, + dist_main_prog, + g_process_group_map, + ) = self._get_dist_program(rank, dist_context) dist_programs[rank] = [dist_main_prog, g_process_group_map] # Do the mapping between the distributed program graph and the cluster graph @@ -324,27 +387,42 @@ class AutoParallelizer: json.dump(rank_mapping, rank_mapping_file) enable_elastic = os.getenv("PADDLE_ENABLE_ELASTIC") - enable_elastic = True if enable_elastic and enable_elastic.lower( - ) == 'true' else False + enable_elastic = ( + True + if enable_elastic and enable_elastic.lower() == 'true' + else False + ) if enable_elastic: print("Auto mapping finished, now do elastic re-launch") - sys.exit(paddle.distributed.fleet.elastic.manager. - ELASTIC_AUTO_PARALLEL_EXIT_CODE) + sys.exit( + paddle.distributed.fleet.elastic.manager.ELASTIC_AUTO_PARALLEL_EXIT_CODE + ) original_cmd_args = os.getenv("PADDLE_ORIGINAL_CMD_ARGS") rank_mapping_args = " ".join( - ["--rank_mapping_path", self._rank_mapping_path]) + ["--rank_mapping_path", self._rank_mapping_path] + ) if os.environ.get("WITH_COVERAGE", "OFF") == "ON": coverage_args = ["-m", "coverage", "run", "--branch", "-p"] else: coverage_args = [] - new_cmd_args = "-m paddle.distributed.fleet.launch" + " " + rank_mapping_args + " " + original_cmd_args - new_cmd = [sys.executable, "-u" - ] + coverage_args + shlex.split(new_cmd_args) + new_cmd_args = ( + "-m paddle.distributed.fleet.launch" + + " " + + rank_mapping_args + + " " + + original_cmd_args + ) + new_cmd = ( + [sys.executable, "-u"] + + coverage_args + + shlex.split(new_cmd_args) + ) new_process = subprocess.Popen(new_cmd) new_process.wait() - assert new_process.returncode == 0, \ - "Launch failed with rank mapping" + assert ( + new_process.returncode == 0 + ), "Launch failed with rank mapping" print("Successfully do the second launch for auto mapping!") sys.exit(0) else: @@ -352,27 +430,32 @@ class AutoParallelizer: rank = paddle.distributed.get_rank() dist_context = None searched_dist_context_path = os.getenv( - "PADDLE_SEARCHED_DIST_CONTEXT_PATH", None) + "PADDLE_SEARCHED_DIST_CONTEXT_PATH", None + ) if searched_dist_context_path is not None: - with open(searched_dist_context_path, - "rb") as dist_context_file: + with open( + searched_dist_context_path, "rb" + ) as dist_context_file: saved_dist_context = pickle.load(dist_context_file) dist_context = DistributedContext() for op in self._main_program.global_block().ops: dist_attr = saved_dist_context["ops_dist_attr"][ - op.desc.id()] + op.desc.id() + ] dist_op = DistributedOperator(op, dist_attr) dist_context.add_dist_op_for_program(dist_op) vars = self._main_program.global_block().vars for var in vars.values(): dist_attr = saved_dist_context["tensors_dist_attr"][ - var.desc.id()] + var.desc.id() + ] dist_tensor = DistributedTensor(var, dist_attr) dist_context.add_dist_tensor_for_program(dist_tensor) dist_context._process_meshes = saved_dist_context[ - "process_meshes"] + "process_meshes" + ] else: if self._dist_strategy.auto_search: @@ -381,13 +464,16 @@ class AutoParallelizer: self._startup_program, self._loss, self._optimizer, - cluster=self._cluster) - planner = Planner(serial_program_info, - self, - algorithm_config={ - "name": "mcmc", - "max_search_times": 5 - }) + cluster=self._cluster, + ) + planner = Planner( + serial_program_info, + self, + algorithm_config={ + "name": "mcmc", + "max_search_times": 5, + }, + ) dist_context, _ = planner.search() # rebuild g_process_group @@ -395,8 +481,13 @@ class AutoParallelizer: pg0 = get_process_group(0) for process_mesh in dist_context._process_meshes: pg0.add_ranks(process_mesh.processes) - dist_optimize_ops, dist_params_grads, dist_startup_prog, dist_main_prog, _ = self._get_dist_program( - rank, dist_context, relaunch_phase=True) + ( + dist_optimize_ops, + dist_params_grads, + dist_startup_prog, + dist_main_prog, + _, + ) = self._get_dist_program(rank, dist_context, relaunch_phase=True) # NOTE: This is a trick to fix hang in pipeline mode when dist context is searched by planner if self._dist_strategy.auto_search: @@ -424,14 +515,25 @@ class AutoParallelizer: # with inference. self._remove_distributed_attrs(dist_main_prog) - return dist_optimize_ops, dist_params_grads, dist_startup_prog, dist_main_prog + return ( + dist_optimize_ops, + dist_params_grads, + dist_startup_prog, + dist_main_prog, + ) def __deepcopy__(self, memo): cls = self.__class__ result = cls.__new__(cls) memo[id(self)] = result for k, v in self.__dict__.items(): - if k == "_main_program" or k == "_startup_program" or k == "_dist_context" or k == "_fleet" or k == "_loss": + if ( + k == "_main_program" + or k == "_startup_program" + or k == "_dist_context" + or k == "_fleet" + or k == "_loss" + ): setattr(result, k, v) else: setattr(result, k, copy.deepcopy(v, memo)) diff --git a/python/paddle/distributed/auto_parallel/parallelizer_v2.py b/python/paddle/distributed/auto_parallel/parallelizer_v2.py index d6bb5efc8eb915d0f5e662cad2abf983f9da0895..7a7ee603e9c61cef708df6fdf2d33028948af9e4 100644 --- a/python/paddle/distributed/auto_parallel/parallelizer_v2.py +++ b/python/paddle/distributed/auto_parallel/parallelizer_v2.py @@ -29,7 +29,6 @@ from ..utils.log_utils import get_logger class Parallelizer: - def __init__(self, mode, completer, dist_context): self._mode = mode self._completer = completer @@ -54,73 +53,123 @@ class Parallelizer: if self._mode == "train" and serial_optimizer: # Generate backward serial_loss = self._dist_context.serial_loss - params_grads = self._generate_backward(serial_main_program, - serial_startup_program, - serial_loss) + params_grads = self._generate_backward( + serial_main_program, serial_startup_program, serial_loss + ) # Apply pre optimization passes time0 = time.time() - serial_main_program, serial_startup_program, params_grads = self._apply_pre_optimization( - serial_main_program, serial_startup_program, serial_loss, - serial_optimizer, params_grads) + ( + serial_main_program, + serial_startup_program, + params_grads, + ) = self._apply_pre_optimization( + serial_main_program, + serial_startup_program, + serial_loss, + serial_optimizer, + params_grads, + ) self._logger.debug( - "within parallel apply_pre_optimization time: {}, mode {}". - format(time.time() - time0, self._mode)) + "within parallel apply_pre_optimization time: {}, mode {}".format( + time.time() - time0, self._mode + ) + ) # Do logical partition time0 = time.time() partitioner = Partitioner(self._dist_context, rank) - dist_main_prog, dist_startup_prog, dist_params_grads = partitioner.partition( - serial_main_program, serial_startup_program, params_grads) + ( + dist_main_prog, + dist_startup_prog, + dist_params_grads, + ) = partitioner.partition( + serial_main_program, serial_startup_program, params_grads + ) self._logger.debug( "within parallel partitioner time: {}, mode {}".format( - time.time() - time0, self._mode)) + time.time() - time0, self._mode + ) + ) # Generate optimizer time0 = time.time() - self._generate_optimizer(dist_main_prog, dist_startup_prog, - serial_optimizer, dist_params_grads) + self._generate_optimizer( + dist_main_prog, + dist_startup_prog, + serial_optimizer, + dist_params_grads, + ) self._logger.debug( "within parallel optimizer time: {}, mode {}".format( - time.time() - time0, self._mode)) + time.time() - time0, self._mode + ) + ) # Do reshard process time0 = time.time() set_grad_var_shape(dist_main_prog, self._dist_context) - resharder = Resharder(dist_main_prog, dist_startup_prog, rank, - self._dist_context, dist_params_grads) + resharder = Resharder( + dist_main_prog, + dist_startup_prog, + rank, + self._dist_context, + dist_params_grads, + ) resharder.reshard() self._logger.debug( "within parallel reshard time: {}, mode {}".format( - time.time() - time0, self._mode)) + time.time() - time0, self._mode + ) + ) # Apply post optimization passes time0 = time.time() - self._apply_post_optimization(dist_main_prog, dist_startup_prog, - rank, dist_params_grads) + self._apply_post_optimization( + dist_main_prog, dist_startup_prog, rank, dist_params_grads + ) self._logger.debug( - "within parallel apply_post_optimization time: {}, mode {}". - format(time.time() - time0, self._mode)) + "within parallel apply_post_optimization time: {}, mode {}".format( + time.time() - time0, self._mode + ) + ) else: # Apply pre optimization passes time0 = time.time() - self._apply_pre_optimization(serial_main_program, - serial_startup_program, None, None, - None) + self._apply_pre_optimization( + serial_main_program, serial_startup_program, None, None, None + ) self._logger.debug( - "within parallel apply_pre_optimization time: {}, mode {}". - format(time.time() - time0, self._mode)) + "within parallel apply_pre_optimization time: {}, mode {}".format( + time.time() - time0, self._mode + ) + ) # Do logical partition time0 = time.time() partitioner = Partitioner(self._dist_context, rank) - dist_main_prog, dist_startup_prog, dist_params_grads = partitioner.partition( - serial_main_program, serial_startup_program, []) + ( + dist_main_prog, + dist_startup_prog, + dist_params_grads, + ) = partitioner.partition( + serial_main_program, serial_startup_program, [] + ) # Do reshard process self._logger.debug( "within parallel partitioner time: {}, mode {}".format( - time.time() - time0, self._mode)) + time.time() - time0, self._mode + ) + ) time0 = time.time() - resharder = Resharder(dist_main_prog, dist_startup_prog, rank, - self._dist_context, [], 1) + resharder = Resharder( + dist_main_prog, + dist_startup_prog, + rank, + self._dist_context, + [], + 1, + ) resharder.reshard() self._logger.debug( "within parallel reshard time: {}, mode {}".format( - time.time() - time0, self._mode)) + time.time() - time0, self._mode + ) + ) # Clone program for test if self._mode != 'train': dist_main_prog = dist_main_prog.clone(for_test=True) @@ -133,13 +182,15 @@ class Parallelizer: def _generate_backward(self, main_program, startup_program, loss): with program_guard(main_program, startup_program): params_grads = append_backward( - loss, distop_context=self._dist_context.dist_op_context) + loss, distop_context=self._dist_context.dist_op_context + ) self._completer.complete_backward_annotation(main_program) self._dist_context.block_state.parse_backward_blocks(main_program) return params_grads - def _generate_optimizer(self, main_program, startup_program, optimizer, - params_grads): + def _generate_optimizer( + self, main_program, startup_program, optimizer, params_grads + ): # NOTE: `apply_gradients` will add an Accumulator for a parameter only once, # but optimizer will be called repeatedly in re-launch, so optimizer need to be copied. optimizer = copy.deepcopy(optimizer) @@ -150,8 +201,9 @@ class Parallelizer: self._completer.complete_update_annotation(main_program) return optimizer_ops - def _apply_pre_optimization(self, main_program, startup_program, loss, - optimizer, params_grads): + def _apply_pre_optimization( + self, main_program, startup_program, loss, optimizer, params_grads + ): if self._strategy is None: return @@ -162,10 +214,11 @@ class Parallelizer: config["dist_context"] = self._dist_context config["params_grads"] = params_grads auto_parallel_quantization_pass = new_pass( - "auto_parallel_quantization", config) - auto_parallel_quantization_pass.apply([main_program], - [startup_program], - self._pass_context) + "auto_parallel_quantization", config + ) + auto_parallel_quantization_pass.apply( + [main_program], [startup_program], self._pass_context + ) main_program = self._pass_context.get_attr("main_program") startup_program = self._pass_context.get_attr("startup_program") params_grads = self._pass_context.get_attr("params_grads") @@ -176,18 +229,22 @@ class Parallelizer: config["dist_context"] = self._dist_context config["params_grads"] = params_grads config["loss"] = loss - config["input_data"] = self._dist_context.serial_feed_vars["inputs"] \ + config["input_data"] = ( + self._dist_context.serial_feed_vars["inputs"] + self._dist_context.serial_feed_vars["labels"] + ) if config["use_pure_fp16"]: config["base_opt"] = optimizer auto_parallel_fp16_pass = new_pass("auto_parallel_fp16", config) - auto_parallel_fp16_pass.apply([main_program], [startup_program], - self._pass_context) + auto_parallel_fp16_pass.apply( + [main_program], [startup_program], self._pass_context + ) loss = auto_parallel_fp16_pass.get_loss() else: auto_parallel_amp_pass = new_pass("auto_parallel_amp", config) - auto_parallel_amp_pass.apply([main_program], [startup_program], - self._pass_context) + auto_parallel_amp_pass.apply( + [main_program], [startup_program], self._pass_context + ) loss = auto_parallel_amp_pass.get_loss() # apply recompute pass @@ -197,16 +254,18 @@ class Parallelizer: config["dist_context"] = self._dist_context config["no_grad_set"] = None config["loss"] = loss - auto_parallel_recompute_pass = new_pass("auto_parallel_recompute", - config) - auto_parallel_recompute_pass.apply([main_program], - [startup_program], - self._pass_context) + auto_parallel_recompute_pass = new_pass( + "auto_parallel_recompute", config + ) + auto_parallel_recompute_pass.apply( + [main_program], [startup_program], self._pass_context + ) return main_program, startup_program, params_grads - def _apply_post_optimization(self, main_program, startup_program, rank, - params_grads): + def _apply_post_optimization( + self, main_program, startup_program, rank, params_grads + ): if self._strategy is None: return @@ -223,10 +282,12 @@ class Parallelizer: config["dist_context"] = self._dist_context config["params_grads"] = params_grads config["global_rank"] = rank - auto_parallel_sharding_pass = new_pass("auto_parallel_sharding", - config) - auto_parallel_sharding_pass.apply([main_program], [startup_program], - self._pass_context) + auto_parallel_sharding_pass = new_pass( + "auto_parallel_sharding", config + ) + auto_parallel_sharding_pass.apply( + [main_program], [startup_program], self._pass_context + ) params_grads = self._pass_context.get_attr("params_grads") # GradClip is train-only optimization @@ -235,10 +296,12 @@ class Parallelizer: config["dist_context"] = self._dist_context config["params_grads"] = params_grads config["rank_id"] = rank - auto_parallel_clip_pass = new_pass("auto_parallel_grad_clip", - config) - auto_parallel_clip_pass.apply([main_program], [startup_program], - self._pass_context) + auto_parallel_clip_pass = new_pass( + "auto_parallel_grad_clip", config + ) + auto_parallel_clip_pass.apply( + [main_program], [startup_program], self._pass_context + ) # gradient_merge is then train-only optimization if self._mode == "train" and self._strategy.gradient_merge.enable: @@ -246,7 +309,8 @@ class Parallelizer: config["dist_context"] = self._dist_context config["params_grads"] = params_grads auto_parallel_gradient_merge_pass = new_pass( - "auto_parallel_gradient_merge_pass", config) - auto_parallel_gradient_merge_pass.apply([main_program], - [startup_program], - self._pass_context) + "auto_parallel_gradient_merge_pass", config + ) + auto_parallel_gradient_merge_pass.apply( + [main_program], [startup_program], self._pass_context + ) diff --git a/python/paddle/distributed/auto_parallel/partitioner.py b/python/paddle/distributed/auto_parallel/partitioner.py index 32917627672d504976166fe41894dfa8982cb55b..2ba339813243789ff89290bf45fc134739844fa8 100644 --- a/python/paddle/distributed/auto_parallel/partitioner.py +++ b/python/paddle/distributed/auto_parallel/partitioner.py @@ -17,7 +17,9 @@ import paddle.fluid as fluid from paddle.fluid import core from paddle.fluid import core from paddle.fluid.framework import Parameter, Program -from paddle.distributed.auto_parallel.operators.common import get_distributed_operator_impl_container +from paddle.distributed.auto_parallel.operators.common import ( + get_distributed_operator_impl_container, +) from paddle.distributed.auto_parallel.dist_context import DistributedContext from .dist_attribute import OperatorDistributedAttribute from .utils import is_backward_op, is_forward_op, is_loss_op, is_optimize_op @@ -25,7 +27,8 @@ from .operators.common import BACKWARD_ONLY_DIST_OPS __varname_not_in_block__ = ["lod_tensor_blocking_queue"] __not_shape_var_type__ = [ - core.VarDesc.VarType.READER, core.VarDesc.VarType.STEP_SCOPES + core.VarDesc.VarType.READER, + core.VarDesc.VarType.STEP_SCOPES, ] @@ -51,25 +54,29 @@ class Partitioner(object): """ if not isinstance(dist_context, DistributedContext): raise TypeError( - "dist_context be paddle.fluid.DistributedContext, got %s here" % - type(dist_context)) + "dist_context be paddle.fluid.DistributedContext, got %s here" + % type(dist_context) + ) self._dist_context = dist_context self._rank_id = rank_id self._serial2dist_varname_mapping = {} self._dist_varname_suffix = "" - def partition(self, serial_main_program, serial_startup_program, - params_grads): + def partition( + self, serial_main_program, serial_startup_program, params_grads + ): if not isinstance(serial_main_program, (Program)): raise TypeError( - "main_program be paddle.fluid.framework.program, got %s here" % - type(serial_main_program)) + "main_program be paddle.fluid.framework.program, got %s here" + % type(serial_main_program) + ) # check if shard annotated serial program valid if not self._is_valid_annotated_program(serial_main_program): raise RuntimeError( - "Not all vars or ops are annotated in main program !") + "Not all vars or ops are annotated in main program !" + ) # init distop helper dist_op_context = self._dist_context.dist_op_context @@ -81,22 +88,31 @@ class Partitioner(object): partitioned_startup_prog = None else: partitioned_startup_prog = self.partition_startup_program( - serial_main_program, serial_startup_program) + serial_main_program, serial_startup_program + ) dist_op_context.dst_startup_program = partitioned_startup_prog # partition main program - partitioned_main_prog, partitioned_params_grads = self.partition_main_program( - serial_main_program, params_grads) + ( + partitioned_main_prog, + partitioned_params_grads, + ) = self.partition_main_program(serial_main_program, params_grads) - return partitioned_main_prog, partitioned_startup_prog, partitioned_params_grads + return ( + partitioned_main_prog, + partitioned_startup_prog, + partitioned_params_grads, + ) - def partition_startup_program(self, serial_main_program, - serial_startup_program): + def partition_startup_program( + self, serial_main_program, serial_startup_program + ): if not isinstance(serial_startup_program, (Program)): raise TypeError( - "dist_context be paddle.fluid.framework.program, got %s here" % - type(serial_startup_program)) + "dist_context be paddle.fluid.framework.program, got %s here" + % type(serial_startup_program) + ) partitioned_startup_prog = fluid.Program() ref_block = serial_main_program.global_block() @@ -109,27 +125,33 @@ class Partitioner(object): assert var.persistable new_name = var.name + self._dist_varname_suffix temp_varname_map[var.name] = new_name - target_shape = _partition_var(self._dist_context, ref_block, - target_block, var.name, new_name) + target_shape = _partition_var( + self._dist_context, ref_block, target_block, var.name, new_name + ) var2shape[new_name] = target_shape # ops for op in serial_startup_program.global_block().ops: # TODO if var not belong to this rank, should be filtered output_vars = op.desc.output_arg_names() - assert len( - output_vars - ) == 1, "initializer should output only ONE variable, but got [{}]".format( - str(op.desc)) - assert temp_varname_map[output_vars[ - 0]] in var2shape, "try to initialize [{}] which is not a persistable var".format( - output_vars[0]) + assert ( + len(output_vars) == 1 + ), "initializer should output only ONE variable, but got [{}]".format( + str(op.desc) + ) + assert ( + temp_varname_map[output_vars[0]] in var2shape + ), "try to initialize [{}] which is not a persistable var".format( + output_vars[0] + ) new_op_desc = target_block.desc.append_op() new_op_desc.copy_from(op.desc) - new_op_desc._rename_output(output_vars[0], - temp_varname_map[output_vars[0]]) - new_op_desc._set_attr("shape", - var2shape[temp_varname_map[output_vars[0]]]) + new_op_desc._rename_output( + output_vars[0], temp_varname_map[output_vars[0]] + ) + new_op_desc._set_attr( + "shape", var2shape[temp_varname_map[output_vars[0]]] + ) target_block._sync_with_cpp() # set distribute atrribute @@ -137,14 +159,17 @@ class Partitioner(object): assert new_op.type == new_op_desc.type() assert new_op.desc == new_op_desc output_var = target_block.var(output_vars[0]) - output_var_attr = self._dist_context.get_tensor_dist_attr_for_program( - output_var) + output_var_attr = ( + self._dist_context.get_tensor_dist_attr_for_program(output_var) + ) op_attr = OperatorDistributedAttribute() op_attr.process_mesh = output_var_attr.process_mesh - op_attr.set_output_dims_mapping(output_var.name, - output_var_attr.dims_mapping) - op_attr.set_input_dims_mapping(output_var.name, - output_var_attr.dims_mapping) + op_attr.set_output_dims_mapping( + output_var.name, output_var_attr.dims_mapping + ) + op_attr.set_input_dims_mapping( + output_var.name, output_var_attr.dims_mapping + ) self._dist_context.set_op_dist_attr_for_program(new_op, op_attr) return partitioned_startup_prog @@ -166,7 +191,8 @@ class Partitioner(object): target_block = partitioned_main_prog.blocks[0] else: target_block = partitioned_main_prog._create_block( - parent_idx=ref_block.parent_idx) + parent_idx=ref_block.parent_idx + ) assert ref_block.idx == target_block.idx target_block._set_forward_block_idx(ref_block.forward_block_idx) dist_op_context.work_block = target_block @@ -181,8 +207,9 @@ class Partitioner(object): for attr_name in op.all_attrs(): if op.attr_type(attr_name) == core.AttrType.BLOCK: relative_id = op._block_attr_id(attr_name) - op._set_attr(attr_name, - partitioned_main_prog.block(relative_id)) + op._set_attr( + attr_name, partitioned_main_prog.block(relative_id) + ) partitioned_params_and_grads = [] for p, g in params_and_grads: @@ -193,7 +220,8 @@ class Partitioner(object): else: assert g.name in self._serial2dist_varname_mapping dist_g = self._get_dist_var_by_serial_var( - g, partitioned_main_prog) + g, partitioned_main_prog + ) partitioned_params_and_grads.append((dist_p, dist_g)) return partitioned_main_prog, partitioned_params_and_grads @@ -217,71 +245,112 @@ class Partitioner(object): for idx in range(len(serial_ops)): if idx <= last_fwd_op_idx: forward_op_id2forward_op[ - serial_ops[idx].desc.original_id()] = serial_ops[idx] + serial_ops[idx].desc.original_id() + ] = serial_ops[idx] # partiiton appended_grad_times = 0 for idx, op in enumerate(serial_ops): op_dist_attr = self._dist_context.get_op_dist_attr_for_program(op) - if is_backward_op(op) and (is_forward_op(serial_ops[idx - 1]) - or is_loss_op(serial_ops[idx - 1])): + if is_backward_op(op) and ( + is_forward_op(serial_ops[idx - 1]) + or is_loss_op(serial_ops[idx - 1]) + ): if not op_dist_attr.is_recompute: appended_grad_times += 1 # partititon input variables for serial_input_varname in op.desc.input_arg_names(): - if serial_input_varname not in self._serial2dist_varname_mapping: - new_varname = serial_input_varname + self._dist_varname_suffix + if ( + serial_input_varname + not in self._serial2dist_varname_mapping + ): + new_varname = ( + serial_input_varname + self._dist_varname_suffix + ) if ref_block.has_var(serial_input_varname): - _partition_var(self._dist_context, ref_block, - target_block, serial_input_varname, - new_varname) + _partition_var( + self._dist_context, + ref_block, + target_block, + serial_input_varname, + new_varname, + ) else: for varname_not_in_block in __varname_not_in_block__: - assert varname_not_in_block in serial_input_varname, \ - "{} is not found".format(serial_input_varname) + assert ( + varname_not_in_block in serial_input_varname + ), "{} is not found".format(serial_input_varname) self._serial2dist_varname_mapping[ - serial_input_varname] = new_varname + serial_input_varname + ] = new_varname # partition output vars for serial_output_varname in op.desc.output_arg_names(): - if serial_output_varname not in self._serial2dist_varname_mapping: - new_varname = serial_output_varname + self._dist_varname_suffix - _partition_var(self._dist_context, ref_block, target_block, - serial_output_varname, new_varname) + if ( + serial_output_varname + not in self._serial2dist_varname_mapping + ): + new_varname = ( + serial_output_varname + self._dist_varname_suffix + ) + _partition_var( + self._dist_context, + ref_block, + target_block, + serial_output_varname, + new_varname, + ) self._serial2dist_varname_mapping[ - serial_output_varname] = new_varname + serial_output_varname + ] = new_varname # partition op if is_forward_op(op) or op_dist_attr.is_recompute: kinputs, koutputs = dist_op_context.prepare_context(op) dist_op_forward_impl = _get_dist_op_forward_implement( - op, self._dist_context) - dist_op_forward_impl.forward(self._dist_context, **kinputs, - **koutputs) + op, self._dist_context + ) + dist_op_forward_impl.forward( + self._dist_context, **kinputs, **koutputs + ) elif is_backward_op(op): kinputs, koutputs = dist_op_context.prepare_context(op) dist_op_backward_impl = _get_dist_op_backward_implement( - op, self._dist_context, forward_op_id2forward_op) - grad_var_to_var = self._dist_context.dist_op_context.grad_var_to_var[ - appended_grad_times] + op, self._dist_context, forward_op_id2forward_op + ) + grad_var_to_var = ( + self._dist_context.dist_op_context.grad_var_to_var[ + appended_grad_times + ] + ) dist_op_backward_impl.backward( - self._dist_context, **kinputs, **koutputs, - **{"grad_var_to_var": grad_var_to_var}) + self._dist_context, + **kinputs, + **koutputs, + **{"grad_var_to_var": grad_var_to_var} + ) elif is_optimize_op(op): # NOTE: BACKWARD_ONLY_DIST_OPS's op_role must 2 because of 1F1B PASS kinputs, koutputs = dist_op_context.prepare_context(op) dist_op_opt_impl = _get_dist_op_backward_implement( - op, self._dist_context, forward_op_id2forward_op) - dist_op_opt_impl.backward(self._dist_context, **kinputs, - **koutputs, **{"grad_var_to_var": {}}) + op, self._dist_context, forward_op_id2forward_op + ) + dist_op_opt_impl.backward( + self._dist_context, + **kinputs, + **koutputs, + **{"grad_var_to_var": {}} + ) else: raise NotImplementedError( - "partitioner only support forward and backward, optimize ops, but got {}" - .format(str(op))) + "partitioner only support forward and backward, optimize ops, but got {}".format( + str(op) + ) + ) def _is_valid_annotated_program(self, program): @@ -293,13 +362,16 @@ class Partitioner(object): ] var_dist_attrs = [ self._dist_context.get_tensor_dist_attr_for_program(var) - for var in vars_ if (var.type not in __not_shape_var_type__) + for var in vars_ + if (var.type not in __not_shape_var_type__) ] - all_ops_annotated = all(dist_attr is not None - for dist_attr in op_dist_attrs) - all_vars_annotated = all(dist_attr is not None - for dist_attr in var_dist_attrs) + all_ops_annotated = all( + dist_attr is not None for dist_attr in op_dist_attrs + ) + all_vars_annotated = all( + dist_attr is not None for dist_attr in var_dist_attrs + ) return all_ops_annotated and all_vars_annotated @@ -323,22 +395,26 @@ def _get_dist_shape(var, dist_attr): assert len(var_shape) == len( mapping ), "variable shape [{}] and dim_mapping [{}] is NOT match !".format( - var_shape, mapping) + var_shape, mapping + ) new_shape = [] for idx in range(len(var_shape)): if var_shape[idx] == -1 or mapping[idx] == -1: new_shape.append(var_shape[idx]) else: - assert var_shape[idx] % mesh[mapping[ - idx]] == 0, "un-event partition: var_shape[idx]=[{}], mesh[{}]".format( - var_shape[idx], mesh[mapping[idx]]) + assert ( + var_shape[idx] % mesh[mapping[idx]] == 0 + ), "un-event partition: var_shape[idx]=[{}], mesh[{}]".format( + var_shape[idx], mesh[mapping[idx]] + ) new_shape.append(var_shape[idx] // mesh[mapping[idx]]) return new_shape -def _partition_parameter(dist_context, src_var, dst_block, dst_varname, - dst_shape): +def _partition_parameter( + dist_context, src_var, dst_block, dst_varname, dst_shape +): # NOTE hack to copied Parameter # not initialized parameter, need to initialize it copied_kwargs = {} @@ -348,39 +424,45 @@ def _partition_parameter(dist_context, src_var, dst_block, dst_varname, copied_kwargs['do_model_average'] = src_var.do_model_average copied_kwargs['need_clip'] = src_var.need_clip - param = Parameter(block=dst_block, - type=src_var.type, - name=dst_varname, - shape=dst_shape, - dtype=src_var.dtype, - lod_level=src_var.lod_level, - error_clip=src_var.error_clip, - stop_gradient=src_var.stop_gradient, - is_data=src_var.is_data, - belong_to_optimizer=src_var.belong_to_optimizer, - **copied_kwargs) + param = Parameter( + block=dst_block, + type=src_var.type, + name=dst_varname, + shape=dst_shape, + dtype=src_var.dtype, + lod_level=src_var.lod_level, + error_clip=src_var.error_clip, + stop_gradient=src_var.stop_gradient, + is_data=src_var.is_data, + belong_to_optimizer=src_var.belong_to_optimizer, + **copied_kwargs + ) return param -def _partition_intermediate_var(dist_context, src_var, dst_block, dst_varname, - dst_shape): - var = dst_block.create_var(type=src_var.type, - name=dst_varname, - shape=dst_shape, - dtype=src_var.dtype, - lod_level=src_var.lod_level, - persistable=src_var.persistable, - error_clip=src_var.error_clip, - stop_gradient=src_var.stop_gradient, - is_data=src_var.is_data, - belong_to_optimizer=src_var.belong_to_optimizer) +def _partition_intermediate_var( + dist_context, src_var, dst_block, dst_varname, dst_shape +): + var = dst_block.create_var( + type=src_var.type, + name=dst_varname, + shape=dst_shape, + dtype=src_var.dtype, + lod_level=src_var.lod_level, + persistable=src_var.persistable, + error_clip=src_var.error_clip, + stop_gradient=src_var.stop_gradient, + is_data=src_var.is_data, + belong_to_optimizer=src_var.belong_to_optimizer, + ) return var -def _partition_var(dist_context, src_block, dst_block, src_varname, - dst_varname): +def _partition_var( + dist_context, src_block, dst_block, src_varname, dst_varname +): """ partition include: split + replicate """ @@ -388,44 +470,53 @@ def _partition_var(dist_context, src_block, dst_block, src_varname, if src_var.type in __not_shape_var_type__: persist = getattr(src_var, 'persistable', False) - new_var = dst_block.create_var(type=src_var.type, - name=dst_varname, - persistable=persist, - stop_gradient=True) + new_var = dst_block.create_var( + type=src_var.type, + name=dst_varname, + persistable=persist, + stop_gradient=True, + ) target_shape = None else: dist_attr = dist_context.get_tensor_dist_attr_for_program(src_var) target_shape = _get_dist_shape(src_var, dist_attr) if isinstance(src_var, Parameter): - new_var = _partition_parameter(dist_context, src_var, dst_block, - dst_varname, target_shape) + new_var = _partition_parameter( + dist_context, src_var, dst_block, dst_varname, target_shape + ) else: - new_var = _partition_intermediate_var(dist_context, src_var, - dst_block, dst_varname, - target_shape) + new_var = _partition_intermediate_var( + dist_context, src_var, dst_block, dst_varname, target_shape + ) dist_attr = copy.deepcopy( - dist_context.get_tensor_dist_attr_for_program(src_var)) + dist_context.get_tensor_dist_attr_for_program(src_var) + ) assert dist_attr is not None dist_context.set_tensor_dist_attr_for_program(new_var, dist_attr) return target_shape -def _get_dist_op_backward_implement(backward_op, dist_context, - forward_op_id2forward_op): +def _get_dist_op_backward_implement( + backward_op, dist_context, forward_op_id2forward_op +): dist_op_context = dist_context.dist_op_context if backward_op.desc.original_id() in dist_op_context.grad_op_id_to_op_id: forward_op_id = dist_op_context.grad_op_id_to_op_id[ - backward_op.desc.original_id()] + backward_op.desc.original_id() + ] forward_op = forward_op_id2forward_op[forward_op_id] forward_op_dist_attr = dist_context.get_op_dist_attr_for_program( - forward_op) + forward_op + ) dist_op_impl_container = get_distributed_operator_impl_container( - forward_op_dist_attr.impl_type) + forward_op_dist_attr.impl_type + ) dist_op_impl = dist_op_impl_container.get_impl( - forward_op_dist_attr.impl_idx) + forward_op_dist_attr.impl_idx + ) return dist_op_impl # # NOTE trick for dist ops that only have backward implement @@ -433,7 +524,8 @@ def _get_dist_op_backward_implement(backward_op, dist_context, op_dist_attr = dist_context.get_op_dist_attr_for_program(backward_op) assert op_dist_attr.impl_idx >= 0 dist_op_impl = get_distributed_operator_impl_container( - op_dist_attr.impl_type).get_impl(op_dist_attr.impl_idx) + op_dist_attr.impl_type + ).get_impl(op_dist_attr.impl_idx) return dist_op_impl dist_op = get_distributed_operator_impl_container("default") @@ -443,6 +535,7 @@ def _get_dist_op_backward_implement(backward_op, dist_context, def _get_dist_op_forward_implement(forward_op, dist_context): dist_attr = dist_context.get_op_dist_attr_for_program(forward_op) dist_op_impl_container = get_distributed_operator_impl_container( - dist_attr.impl_type) + dist_attr.impl_type + ) dist_op_impl = dist_op_impl_container.get_impl(dist_attr.impl_idx) return dist_op_impl diff --git a/python/paddle/distributed/auto_parallel/planner.py b/python/paddle/distributed/auto_parallel/planner.py index d01fe50c0d41c970aa9a15b0e9a6b5d1fe38321d..df7fd63a6ed3d3a6fa1a50dff1e97319baa928b1 100755 --- a/python/paddle/distributed/auto_parallel/planner.py +++ b/python/paddle/distributed/auto_parallel/planner.py @@ -32,7 +32,10 @@ from .utils import update_op_dims_mapping_by_default_dist_impl from .utils import update_op_dims_mapping_by_elementwise_like_dist_impl from .utils import get_all_distributed_main_program from .dist_context import DistributedContext, DistributedOperatorContext -from .dist_attribute import OperatorDistributedAttribute, TensorDistributedAttribute +from .dist_attribute import ( + OperatorDistributedAttribute, + TensorDistributedAttribute, +) paddle.seed(123) random.seed(123) @@ -40,18 +43,19 @@ np.random.seed(123) class PlanFilter: - @staticmethod - def check_dims_mapping_for_tensor(process_mesh_topology, tensor_shape, - dims_mapping): + def check_dims_mapping_for_tensor( + process_mesh_topology, tensor_shape, dims_mapping + ): valid = True assert len(tensor_shape) == len(dims_mapping) for idx, dim_mapping in enumerate(dims_mapping): if dim_mapping != -1: - if tensor_shape[idx] % process_mesh_topology[ - dim_mapping] != 0 or dims_mapping.count( - dim_mapping) > 1: + if ( + tensor_shape[idx] % process_mesh_topology[dim_mapping] != 0 + or dims_mapping.count(dim_mapping) > 1 + ): valid = False if dim_mapping != -1 and process_mesh_topology[0] == 1: valid = False @@ -65,7 +69,8 @@ class PlanFilter: for var_name in op.input_arg_names: dims_mapping = op_dist_attr.get_input_dims_mapping(var_name) if not PlanFilter.check_dims_mapping_for_tensor( - process_mesh.topology, vars[var_name].shape, dims_mapping): + process_mesh.topology, vars[var_name].shape, dims_mapping + ): return False if vars[var_name].is_data and len(dims_mapping) > 1: for dim in dims_mapping[1:]: @@ -75,7 +80,8 @@ class PlanFilter: for var_name in op.output_arg_names: dims_mapping = op_dist_attr.get_output_dims_mapping(var_name) if not PlanFilter.check_dims_mapping_for_tensor( - process_mesh.topology, vars[var_name].shape, dims_mapping): + process_mesh.topology, vars[var_name].shape, dims_mapping + ): return False return True @@ -83,7 +89,11 @@ class PlanFilter: @staticmethod def check_dims_mapping_for_special_op(op, op_dist_attr, vars): # NOTE: Those ops has some partition limits, and will be solved when corresponding dist op implemented in the future. - if op.type == "elementwise_add" or op.type == 'layer_norm' or op.type == "softmax_with_cross_entropy": + if ( + op.type == "elementwise_add" + or op.type == 'layer_norm' + or op.type == "softmax_with_cross_entropy" + ): for name in op.input_arg_names: for item in op_dist_attr.get_input_dims_mapping(name): if item != -1: @@ -104,20 +114,25 @@ class PlanFilter: class PlanSpace: not_enum_ops = ["create_py_reader", "create_double_buffer_reader", "read"] special_vars = [ - "lod_tensor_blocking_queue_0", "create_py_reader_0", "double_buffer_0" + "lod_tensor_blocking_queue_0", + "create_py_reader_0", + "double_buffer_0", ] @staticmethod - def _enum_dims_mapping(process_mesh_topology, visited, path, depth, res, - tensor_shape): + def _enum_dims_mapping( + process_mesh_topology, visited, path, depth, res, tensor_shape + ): """Enumerate dims mapping of tensor by the given process_mesh_topology""" nums = list(range(-1, len(process_mesh_topology))) if depth == len(tensor_shape): valid = True for idx, item in enumerate(path): if item != -1: - if tensor_shape[idx] % process_mesh_topology[ - item] != 0 or path.count(item) > 1: + if ( + tensor_shape[idx] % process_mesh_topology[item] != 0 + or path.count(item) > 1 + ): valid = False if valid: res.append(copy.deepcopy(path)) @@ -128,15 +143,23 @@ class PlanSpace: if i != 0: visited[i] = True path.append(nums[i]) - PlanSpace._enum_dims_mapping(process_mesh_topology, visited, - path, depth + 1, res, tensor_shape) + PlanSpace._enum_dims_mapping( + process_mesh_topology, + visited, + path, + depth + 1, + res, + tensor_shape, + ) visited[i] = False path.pop() @staticmethod def enum_process_mesh_topology(processes): """Enumerate all process meshes with the given processes.""" - assert processes >= 1, "The processes must be number and greater than 0." + assert ( + processes >= 1 + ), "The processes must be number and greater than 0." # compute divisors divisors = [] for i in range(1, processes + 1): @@ -182,7 +205,8 @@ class PlanSpace: dims_mapping_dict = OrderedDict() op_valid_dist_attrs = [] dist_op_impl_container = get_distributed_operator_impl_container( - op.type) + op.type + ) # enumerate all valid dims mapping of tensor when process mesh given for var_name in chain(op.input_arg_names, op.output_arg_names): @@ -193,15 +217,22 @@ class PlanSpace: depth = 0 path = [] dims_mapping_list = [] - PlanSpace._enum_dims_mapping(process_mesh.topology, visited, path, - depth, dims_mapping_list, - vars[var_name].shape) + PlanSpace._enum_dims_mapping( + process_mesh.topology, + visited, + path, + depth, + dims_mapping_list, + vars[var_name].shape, + ) dims_mapping_dict[var_name] = copy.deepcopy(dims_mapping_list) # compose dims mapping composed_dims_mapping_list = list( product( - *[dims_mapping_dict[key] for key in dims_mapping_dict.keys()])) + *[dims_mapping_dict[key] for key in dims_mapping_dict.keys()] + ) + ) for composed_dims_mapping in composed_dims_mapping_list: op_dist_attr = OperatorDistributedAttribute() op_dist_attr.process_mesh = process_mesh @@ -209,15 +240,19 @@ class PlanSpace: for idx, dims_mapping in enumerate(composed_dims_mapping): if var_names[idx] in op.input_arg_names: - op_dist_attr.set_input_dims_mapping(var_names[idx], - dims_mapping) + op_dist_attr.set_input_dims_mapping( + var_names[idx], dims_mapping + ) elif var_names[idx] in op.output_arg_names: op_dist_attr.set_output_dims_mapping( - var_names[idx], dims_mapping) + var_names[idx], dims_mapping + ) else: raise ValueError( - "The {varname} is not input or output of op {op}.". - format(varname='var_names[idx]', op='op')) + "The {varname} is not input or output of op {op}.".format( + varname='var_names[idx]', op='op' + ) + ) dist_op = DistributedOperator(op, op_dist_attr) if dist_op_impl_container is None: @@ -226,14 +261,16 @@ class PlanSpace: valid = True try: changed = update_op_dims_mapping_by_elementwise_like_dist_impl( - dist_op) + dist_op + ) except Exception as e: valid = False if valid and not changed: if PlanFilter.check_dims_mapping_for_op( - op, dist_op.dist_attr, vars + op, dist_op.dist_attr, vars ) and PlanFilter.check_dims_mapping_for_special_op( - op, dist_op.dist_attr, vars): + op, dist_op.dist_attr, vars + ): dist_op.dist_attr.impl_type = "elementwise" dist_op.dist_attr.impl_idx = 0 op_valid_dist_attrs.append(dist_op.dist_attr) @@ -243,14 +280,16 @@ class PlanSpace: valid = True try: changed = update_op_dims_mapping_by_default_dist_impl( - dist_op) + dist_op + ) except Exception as e: valid = False if valid and not changed: if PlanFilter.check_dims_mapping_for_op( - op, dist_op.dist_attr, vars + op, dist_op.dist_attr, vars ) and PlanFilter.check_dims_mapping_for_special_op( - op, dist_op.dist_attr, vars): + op, dist_op.dist_attr, vars + ): dist_op.dist_attr.impl_type = "default" dist_op.dist_attr.impl_idx = 0 op_valid_dist_attrs.append(dist_op.dist_attr) @@ -261,7 +300,8 @@ class PlanSpace: for idx, impl in enumerate(impls): if impl.is_auto_compatible(dist_op): if PlanFilter.check_dims_mapping_for_op( - op, dist_op.dist_attr, vars): + op, dist_op.dist_attr, vars + ): dist_op.dist_attr.impl_type = dist_op.serial_op.type dist_op.dist_attr.impl_idx = idx op_valid_dist_attrs.append(dist_op.dist_attr) @@ -273,10 +313,12 @@ class PlanSpace: dist_op = DistributedOperator(op, op_dist_attr) for var_name in op.input_arg_names: op_dist_attr.set_input_dims_mapping( - vars[var_name], [-1 for i in vars[var_name].shape]) + vars[var_name], [-1 for i in vars[var_name].shape] + ) for var_name in op.output_arg_names: op_dist_attr.set_output_dims_mapping( - vars[var_name], [-1 for i in vars[var_name].shape]) + vars[var_name], [-1 for i in vars[var_name].shape] + ) dist_op.dist_attr.impl_type = "default" dist_op.dist_attr.impl_idx = 0 op_valid_dist_attrs.append(dist_op.dist_attr) @@ -284,9 +326,9 @@ class PlanSpace: return op_valid_dist_attrs @staticmethod - def enum_valid_dist_attr_for_program(program, - process_mesh_topology, - is_pipeline=False): + def enum_valid_dist_attr_for_program( + program, process_mesh_topology, is_pipeline=False + ): """Enumerate valid distributed attributes for all ops in program.""" valid_dist_attr_dict = OrderedDict() ops = program.global_block().ops @@ -304,16 +346,31 @@ class PlanSpace: if len(process_mesh_topology) > 1: process_mesh_shape = process_mesh_topology[:-1] per_process_mesh_group = processes // pipeline_stages - pipeline_process_meshes = [auto.ProcessMesh(mesh=np.array(global_group[i*per_process_mesh_group: \ - (i+1)*per_process_mesh_group]).reshape(process_mesh_shape).tolist()) for i in range(pipeline_stages)] + pipeline_process_meshes = [ + auto.ProcessMesh( + mesh=np.array( + global_group[ + i + * per_process_mesh_group : (i + 1) + * per_process_mesh_group + ] + ) + .reshape(process_mesh_shape) + .tolist() + ) + for i in range(pipeline_stages) + ] elif len(process_mesh_topology) == 1: pipeline_process_meshes = [ auto.ProcessMesh(mesh=[i]) for i in range(pipeline_stages) ] else: if len(process_mesh_topology) > 1: - global_process_mesh = auto.ProcessMesh(mesh=np.array( - global_group).reshape(process_mesh_topology).tolist()) + global_process_mesh = auto.ProcessMesh( + mesh=np.array(global_group) + .reshape(process_mesh_topology) + .tolist() + ) else: global_process_mesh = auto.ProcessMesh(mesh=global_group) @@ -323,8 +380,11 @@ class PlanSpace: op_process_mesh = global_process_mesh pipeline_stage = -1 if pipeline_process_meshes is not None: - pipeline_stage = idx // op_count_per_stage if idx // op_count_per_stage < len( - pipeline_process_meshes) else idx // op_count_per_stage - 1 + pipeline_stage = ( + idx // op_count_per_stage + if idx // op_count_per_stage < len(pipeline_process_meshes) + else idx // op_count_per_stage - 1 + ) if pipeline_stage >= len(pipeline_process_meshes): pipeline_stage = len(pipeline_process_meshes) - 1 op_process_mesh = pipeline_process_meshes[pipeline_stage] @@ -338,7 +398,8 @@ class PlanSpace: else: dims_mapping = [-1 for i in vars[var_name].shape] op_dist_attr.set_input_dims_mapping( - var_name, dims_mapping) + var_name, dims_mapping + ) for var_name in op.output_arg_names: if var_name in PlanSpace.special_vars: @@ -346,24 +407,31 @@ class PlanSpace: else: dims_mapping = [-1 for i in vars[var_name].shape] op_dist_attr.set_output_dims_mapping( - var_name, dims_mapping) + var_name, dims_mapping + ) op_valid_dist_attrs = [op_dist_attr] pipeline_stage = 0 if pipeline_stage != -1 else pipeline_stage else: op_valid_dist_attrs = PlanSpace._enum_valid_dist_attr_for_op( - program, op, op_process_mesh) + program, op, op_process_mesh + ) - assert op_valid_dist_attrs is not None, "Enumerate {} valid distributed attribute failed.".format( - op) + assert ( + op_valid_dist_attrs is not None + ), "Enumerate {} valid distributed attribute failed.".format(op) valid_dist_attr_dict[op.desc.id()] = [ - op_valid_dist_attrs, pipeline_stage + op_valid_dist_attrs, + pipeline_stage, ] - return valid_dist_attr_dict, pipeline_process_meshes, global_process_mesh + return ( + valid_dist_attr_dict, + pipeline_process_meshes, + global_process_mesh, + ) class SearchAlgorithm: - def __init__(self, name): self._name = name @@ -376,7 +444,6 @@ class SearchAlgorithm: class MCMC(SearchAlgorithm): - def __init__(self, serial_program_info, parallelizer, max_search_times=5): super(MCMC, self).__init__("mcmc") self._serial_program_info = serial_program_info @@ -395,32 +462,49 @@ class MCMC(SearchAlgorithm): def max_search_times(self): return self._max_search_times - def make_special_op_unshard(self, op, ops, vars, dist_context, - valid_dist_attr_dict): + def make_special_op_unshard( + self, op, ops, vars, dist_context, valid_dist_attr_dict + ): if op.type == "softmax_with_cross_entropy": for var_name in op.input_arg_names: dims_mapping = dist_context.get_op_dist_attr_for_program( - op).get_input_dims_mapping(var_name) - if dims_mapping != dist_context.get_tensor_dist_attr_for_program( - vars[var_name]).dims_mapping: + op + ).get_input_dims_mapping(var_name) + if ( + dims_mapping + != dist_context.get_tensor_dist_attr_for_program( + vars[var_name] + ).dims_mapping + ): has_changed = False for search_op in ops: if var_name in search_op.output_arg_names: op_dist_attr_list = valid_dist_attr_dict[ - search_op.desc.id()][0] + search_op.desc.id() + ][0] for op_dist_attr in op_dist_attr_list: - if op_dist_attr.get_output_dims_mapping( - var_name) == dims_mapping: + if ( + op_dist_attr.get_output_dims_mapping( + var_name + ) + == dims_mapping + ): dist_context.set_op_dist_attr_for_program( - search_op, op_dist_attr) + search_op, op_dist_attr + ) for name in search_op.output_arg_names: - tensor_dist_attr = TensorDistributedAttribute( + tensor_dist_attr = ( + TensorDistributedAttribute() + ) + tensor_dist_attr.process_mesh = ( + op_dist_attr.process_mesh ) - tensor_dist_attr.process_mesh = op_dist_attr.process_mesh tensor_dist_attr.dims_mapping = op_dist_attr.get_output_dims_mapping( - name) + name + ) dist_context.set_tensor_dist_attr_for_program( - vars[name], tensor_dist_attr) + vars[name], tensor_dist_attr + ) has_changed = True break if has_changed: @@ -430,8 +514,13 @@ class MCMC(SearchAlgorithm): "Change softmax_with_cross_entropy dist attr failed" ) - def init_program(self, valid_dist_attr_dict, program, - pipeline_process_meshes, global_process_mesh): + def init_program( + self, + valid_dist_attr_dict, + program, + pipeline_process_meshes, + global_process_mesh, + ): ops = program.global_block().ops vars = program.global_block().vars new_dist_context = DistributedContext() @@ -439,32 +528,44 @@ class MCMC(SearchAlgorithm): for op in ops: op_valid_dist_attr_list = valid_dist_attr_dict[op.desc.id()][0] random_op_dist_attr = np.random.randint( - len(op_valid_dist_attr_list)) + len(op_valid_dist_attr_list) + ) init_op_dist_attr = op_valid_dist_attr_list[random_op_dist_attr] new_dist_context.set_op_dist_attr_for_program(op, init_op_dist_attr) for var_name in op.input_arg_names: if var_name == "lod_tensor_blocking_queue_0": continue - if new_dist_context.get_tensor_dist_attr_for_program( - vars[var_name]) is None: + if ( + new_dist_context.get_tensor_dist_attr_for_program( + vars[var_name] + ) + is None + ): tensor_dist_attr = TensorDistributedAttribute() - tensor_dist_attr.process_mesh = init_op_dist_attr.process_mesh - tensor_dist_attr.dims_mapping = init_op_dist_attr.get_input_dims_mapping( - var_name) + tensor_dist_attr.process_mesh = ( + init_op_dist_attr.process_mesh + ) + tensor_dist_attr.dims_mapping = ( + init_op_dist_attr.get_input_dims_mapping(var_name) + ) new_dist_context.set_tensor_dist_attr_for_program( - vars[var_name], tensor_dist_attr) + vars[var_name], tensor_dist_attr + ) for var_name in op.output_arg_names: tensor_dist_attr = TensorDistributedAttribute() tensor_dist_attr.process_mesh = init_op_dist_attr.process_mesh - tensor_dist_attr.dims_mapping = init_op_dist_attr.get_output_dims_mapping( - var_name) + tensor_dist_attr.dims_mapping = ( + init_op_dist_attr.get_output_dims_mapping(var_name) + ) new_dist_context.set_tensor_dist_attr_for_program( - vars[var_name], tensor_dist_attr) + vars[var_name], tensor_dist_attr + ) # NOTE: this is a temporary solution to make softmax_with_cross_entropy unshard - self.make_special_op_unshard(op, ops, vars, new_dist_context, - valid_dist_attr_dict) + self.make_special_op_unshard( + op, ops, vars, new_dist_context, valid_dist_attr_dict + ) # add process meshes to distributed context if global_process_mesh is not None: @@ -475,16 +576,19 @@ class MCMC(SearchAlgorithm): return new_dist_context - def estimate_searched_strategy_cost(self, - dist_context, - pipeline_process_meshes=None): + def estimate_searched_strategy_cost( + self, dist_context, pipeline_process_meshes=None + ): cost = None # get all distributed programs all_dist_main_program = get_all_distributed_main_program( - self.serial_program_info, dist_context, self.parallelizer) - pipeline_config = [ - process_mesh.processes for process_mesh in pipeline_process_meshes - ] if pipeline_process_meshes is not None else None + self.serial_program_info, dist_context, self.parallelizer + ) + pipeline_config = ( + [process_mesh.processes for process_mesh in pipeline_process_meshes] + if pipeline_process_meshes is not None + else None + ) microbatch_size = 1 for program in all_dist_main_program: searched_batch_size = False @@ -497,14 +601,17 @@ class MCMC(SearchAlgorithm): break from .utils import get_standalone_cost_data + standalone_cost_data = get_standalone_cost_data(all_dist_main_program) # cost model does not support cluster argument - cost = estimate_cost(all_dist_main_program, - cluster=None, - pipeline_config=pipeline_config, - standalone_cost_data=standalone_cost_data, - batch_size=microbatch_size) + cost = estimate_cost( + all_dist_main_program, + cluster=None, + pipeline_config=pipeline_config, + standalone_cost_data=standalone_cost_data, + batch_size=microbatch_size, + ) return cost @@ -514,10 +621,12 @@ class MCMC(SearchAlgorithm): process_mesh = op_dist_attr.process_mesh tensor_dist_attr = TensorDistributedAttribute() tensor_dist_attr.process_mesh = process_mesh - tensor_dist_attr.dims_mapping = op_dist_attr.get_output_dims_mapping( - var_name) + tensor_dist_attr.dims_mapping = ( + op_dist_attr.get_output_dims_mapping(var_name) + ) dist_context.set_tensor_dist_attr_for_program( - vars[var_name], tensor_dist_attr) + vars[var_name], tensor_dist_attr + ) # set input tensor distributed attribute if input is data or parameter for var_name in op.input_arg_names: @@ -525,27 +634,34 @@ class MCMC(SearchAlgorithm): process_mesh = op_dist_attr.process_mesh tensor_dist_attr = TensorDistributedAttribute() tensor_dist_attr.process_mesh = process_mesh - tensor_dist_attr.dims_mapping = op_dist_attr.get_input_dims_mapping( - var_name) + tensor_dist_attr.dims_mapping = ( + op_dist_attr.get_input_dims_mapping(var_name) + ) dist_context.set_tensor_dist_attr_for_program( - vars[var_name], tensor_dist_attr) + vars[var_name], tensor_dist_attr + ) def change_process_mesh(self, op, changed_process_mesh, vars, dist_context): dist_context.get_op_dist_attr_for_program( - op).process_mesh = changed_process_mesh + op + ).process_mesh = changed_process_mesh for var_name in op.output_arg_names: dist_context.get_tensor_dist_attr_for_program( - vars[var_name]).process_mesh = changed_process_mesh + vars[var_name] + ).process_mesh = changed_process_mesh for var_name in op.input_arg_names: if vars[var_name].is_parameter or vars[var_name].is_data: dist_context.get_tensor_dist_attr_for_program( - vars[var_name]).process_mesh = changed_process_mesh - - def search_once(self, - program, - valid_dist_attr_dict, - dist_context, - pipeline_process_meshes=None): + vars[var_name] + ).process_mesh = changed_process_mesh + + def search_once( + self, + program, + valid_dist_attr_dict, + dist_context, + pipeline_process_meshes=None, + ): raw_ops = program.global_block().ops ops = [] for op in raw_ops: @@ -561,9 +677,11 @@ class MCMC(SearchAlgorithm): op_valid_dist_attr_list = valid_dist_attr_dict[selected_op.desc.id()][0] pipeline_stage = valid_dist_attr_dict[selected_op.desc.id()][1] random_selected_dist_attr_idx = np.random.randint( - len(op_valid_dist_attr_list)) + len(op_valid_dist_attr_list) + ) selected_op_dist_attr = copy.deepcopy( - op_valid_dist_attr_list[random_selected_dist_attr_idx]) + op_valid_dist_attr_list[random_selected_dist_attr_idx] + ) start_idx = ops[0].desc.id() if pipeline_stage > -1: @@ -574,142 +692,215 @@ class MCMC(SearchAlgorithm): if changed_mode == 0: # not change the process mesh, just change dims mapping new_dist_context.set_op_dist_attr_for_program( - selected_op, selected_op_dist_attr) - self.set_tensor_dist_attr(selected_op, selected_op_dist_attr, - vars, new_dist_context) + selected_op, selected_op_dist_attr + ) + self.set_tensor_dist_attr( + selected_op, selected_op_dist_attr, vars, new_dist_context + ) elif changed_mode == 1: changed_stage = pipeline_stage - 1 - if changed_stage == -1 or random_selected_op_idx == len(ops) - 1 or \ - (random_selected_op_idx + 1 == len(ops) - 1 and new_valid_dist_attr_dict[ops[random_selected_op_idx + 1].desc.id()][1] == pipeline_stage + 1 ): + if ( + changed_stage == -1 + or random_selected_op_idx == len(ops) - 1 + or ( + random_selected_op_idx + 1 == len(ops) - 1 + and new_valid_dist_attr_dict[ + ops[random_selected_op_idx + 1].desc.id() + ][1] + == pipeline_stage + 1 + ) + ): new_dist_context.set_op_dist_attr_for_program( - selected_op, selected_op_dist_attr) - self.set_tensor_dist_attr(selected_op, - selected_op_dist_attr, vars, - new_dist_context) + selected_op, selected_op_dist_attr + ) + self.set_tensor_dist_attr( + selected_op, + selected_op_dist_attr, + vars, + new_dist_context, + ) else: selected_op_process_mesh = pipeline_process_meshes[ - pipeline_stage] + pipeline_stage + ] next_op_id = ops[random_selected_op_idx + 1].desc.id() - if new_valid_dist_attr_dict[next_op_id][ - 1] == pipeline_stage + 1 and random_selected_op_idx + 1 != len( - ops) - 1: + if ( + new_valid_dist_attr_dict[next_op_id][1] + == pipeline_stage + 1 + and random_selected_op_idx + 1 != len(ops) - 1 + ): new_valid_dist_attr_dict[next_op_id][1] = pipeline_stage for op_dist_attr in new_valid_dist_attr_dict[ - next_op_id][0]: + next_op_id + ][0]: op_dist_attr.process_mesh = selected_op_process_mesh # set next op dist attr in the discontext and output/input tensor process mesh self.change_process_mesh( ops[random_selected_op_idx + 1], - selected_op_process_mesh, vars, new_dist_context) + selected_op_process_mesh, + vars, + new_dist_context, + ) # change the selected op stage and output dist attr - new_valid_dist_attr_dict[ - selected_op.desc.id()][1] = changed_stage + new_valid_dist_attr_dict[selected_op.desc.id()][ + 1 + ] = changed_stage new_process_mesh = pipeline_process_meshes[changed_stage] selected_op_dist_attr.process_mesh = new_process_mesh for op_dist_attr in new_valid_dist_attr_dict[ - selected_op.desc.id()][0]: + selected_op.desc.id() + ][0]: op_dist_attr.process_mesh = new_process_mesh new_dist_context.set_op_dist_attr_for_program( - selected_op, selected_op_dist_attr) + selected_op, selected_op_dist_attr + ) - self.set_tensor_dist_attr(selected_op, - selected_op_dist_attr, vars, - new_dist_context) + self.set_tensor_dist_attr( + selected_op, + selected_op_dist_attr, + vars, + new_dist_context, + ) # change the pre op stage for idx in range(random_selected_op_idx - 1, -1, -1): stage = new_valid_dist_attr_dict[ops[idx].desc.id()][1] valid_dist_attr_list = new_valid_dist_attr_dict[ - ops[idx].desc.id()][0] + ops[idx].desc.id() + ][0] new_process_mesh = pipeline_process_meshes[ - changed_stage] + changed_stage + ] if stage == changed_stage + 1: - new_valid_dist_attr_dict[ - ops[idx].desc.id()][1] = changed_stage + new_valid_dist_attr_dict[ops[idx].desc.id()][ + 1 + ] = changed_stage for op_dist_attr in valid_dist_attr_list: op_dist_attr.process_mesh = new_process_mesh new_dist_context.get_op_dist_attr_for_program( - ops[idx]).process_mesh = new_process_mesh + ops[idx] + ).process_mesh = new_process_mesh # change process mesh of the output and input tensor - self.change_process_mesh(ops[idx], new_process_mesh, - vars, new_dist_context) + self.change_process_mesh( + ops[idx], + new_process_mesh, + vars, + new_dist_context, + ) else: break else: changed_stage = pipeline_stage + 1 - if changed_stage == len( - pipeline_process_meshes) or random_selected_op_idx == 0 or \ - (new_valid_dist_attr_dict[ops[random_selected_op_idx - 1].desc.id()][1] == pipeline_stage - 1 and (random_selected_op_idx == 1)): + if ( + changed_stage == len(pipeline_process_meshes) + or random_selected_op_idx == 0 + or ( + new_valid_dist_attr_dict[ + ops[random_selected_op_idx - 1].desc.id() + ][1] + == pipeline_stage - 1 + and (random_selected_op_idx == 1) + ) + ): new_dist_context.set_op_dist_attr_for_program( - selected_op, selected_op_dist_attr) - self.set_tensor_dist_attr(selected_op, - selected_op_dist_attr, vars, - new_dist_context) + selected_op, selected_op_dist_attr + ) + self.set_tensor_dist_attr( + selected_op, + selected_op_dist_attr, + vars, + new_dist_context, + ) else: selected_op_process_mesh = pipeline_process_meshes[ - pipeline_stage] + pipeline_stage + ] pre_op_id = ops[random_selected_op_idx - 1].desc.id() - if new_valid_dist_attr_dict[pre_op_id][ - 1] == pipeline_stage - 1 and random_selected_op_idx != 1: + if ( + new_valid_dist_attr_dict[pre_op_id][1] + == pipeline_stage - 1 + and random_selected_op_idx != 1 + ): new_valid_dist_attr_dict[pre_op_id][1] = pipeline_stage for op_dist_attr in new_valid_dist_attr_dict[pre_op_id][ - 0]: + 0 + ]: op_dist_attr.process_mesh = selected_op_process_mesh # set pre op dist attr in the discontext and output tensor process mesh self.change_process_mesh( ops[random_selected_op_idx - 1], - selected_op_process_mesh, vars, new_dist_context) + selected_op_process_mesh, + vars, + new_dist_context, + ) # change the selected op stage and output tensor dist attr - new_valid_dist_attr_dict[ - selected_op.desc.id()][1] = changed_stage + new_valid_dist_attr_dict[selected_op.desc.id()][ + 1 + ] = changed_stage new_process_mesh = pipeline_process_meshes[changed_stage] selected_op_dist_attr.process_mesh = new_process_mesh for op_dist_attr in new_valid_dist_attr_dict[ - selected_op.desc.id()][0]: + selected_op.desc.id() + ][0]: op_dist_attr.process_mesh = new_process_mesh new_dist_context.set_op_dist_attr_for_program( - selected_op, selected_op_dist_attr) - self.set_tensor_dist_attr(selected_op, - selected_op_dist_attr, vars, - new_dist_context) + selected_op, selected_op_dist_attr + ) + self.set_tensor_dist_attr( + selected_op, + selected_op_dist_attr, + vars, + new_dist_context, + ) # change the next op stage for idx in range(random_selected_op_idx + 1, len(ops)): stage = new_valid_dist_attr_dict[ops[idx].desc.id()][1] valid_dist_attr_list = new_valid_dist_attr_dict[ - ops[idx].desc.id()][0] + ops[idx].desc.id() + ][0] new_process_mesh = pipeline_process_meshes[ - changed_stage] + changed_stage + ] if stage == changed_stage - 1: - new_valid_dist_attr_dict[ - ops[idx].desc.id()][1] = changed_stage + new_valid_dist_attr_dict[ops[idx].desc.id()][ + 1 + ] = changed_stage for op_dist_attr in valid_dist_attr_list: op_dist_attr.process_mesh = new_process_mesh new_dist_context.get_op_dist_attr_for_program( - ops[idx]).process_mesh = new_process_mesh + ops[idx] + ).process_mesh = new_process_mesh # change the output tensor dist attr - self.change_process_mesh(ops[idx], new_process_mesh, - vars, new_dist_context) + self.change_process_mesh( + ops[idx], + new_process_mesh, + vars, + new_dist_context, + ) else: break else: new_dist_context.set_op_dist_attr_for_program( - selected_op, selected_op_dist_attr) - self.set_tensor_dist_attr(selected_op, selected_op_dist_attr, vars, - new_dist_context) + selected_op, selected_op_dist_attr + ) + self.set_tensor_dist_attr( + selected_op, selected_op_dist_attr, vars, new_dist_context + ) for op in ops: # make softmax_with_cross_entropy unshard if op.type == "softmax_with_cross_entropy": - self.make_special_op_unshard(op, ops, vars, new_dist_context, - valid_dist_attr_dict) + self.make_special_op_unshard( + op, ops, vars, new_dist_context, valid_dist_attr_dict + ) break if new_valid_dist_attr_dict is None: @@ -717,22 +908,29 @@ class MCMC(SearchAlgorithm): else: return new_valid_dist_attr_dict, new_dist_context - def _search_core(self, - valid_dist_attr_dict, - init_dist_context, - pipeline_process_meshes=None): + def _search_core( + self, + valid_dist_attr_dict, + init_dist_context, + pipeline_process_meshes=None, + ): times = 0 best_dist_context = init_dist_context cost = self.estimate_searched_strategy_cost( - init_dist_context, pipeline_process_meshes).runtime + init_dist_context, pipeline_process_meshes + ).runtime min_cost = cost while times < self.max_search_times: times += 1 new_dist_context = self.search_once( - self.serial_program_info.train_program, valid_dist_attr_dict, - best_dist_context, pipeline_process_meshes)[1] + self.serial_program_info.train_program, + valid_dist_attr_dict, + best_dist_context, + pipeline_process_meshes, + )[1] cur_cost = self.estimate_searched_strategy_cost( - new_dist_context, pipeline_process_meshes).runtime + new_dist_context, pipeline_process_meshes + ).runtime if (min_cost - cur_cost) > 0: best_dist_context = copy.deepcopy(new_dist_context) min_cost = cur_cost @@ -744,35 +942,57 @@ class MCMC(SearchAlgorithm): start_time = time.time() train_program = self.serial_program_info.train_program cluster = self.serial_program_info.cluster - processes = paddle.distributed.get_world_size( - ) if cluster is None else len(cluster.get_all_devices("GPU")) + processes = ( + paddle.distributed.get_world_size() + if cluster is None + else len(cluster.get_all_devices("GPU")) + ) assert processes > 0, "Get process failed." process_mesh_topology_list = PlanSpace.enum_process_mesh_topology( - processes) + processes + ) searched_dist_context = None min_cost = None searched_pipeline_dist_context = None pipeline_min_cost = None for process_mesh_topology in process_mesh_topology_list: - print("MCMC search: search process mesh {} with pipeline mode.". - format(process_mesh_topology)) - valid_dist_attr_dict, pipeline_process_meshes, global_process_mesh = PlanSpace.enum_valid_dist_attr_for_program( - train_program, process_mesh_topology, True) - init_dist_context = self.init_program(valid_dist_attr_dict, - train_program, - pipeline_process_meshes, - global_process_mesh) + print( + "MCMC search: search process mesh {} with pipeline mode.".format( + process_mesh_topology + ) + ) + ( + valid_dist_attr_dict, + pipeline_process_meshes, + global_process_mesh, + ) = PlanSpace.enum_valid_dist_attr_for_program( + train_program, process_mesh_topology, True + ) + init_dist_context = self.init_program( + valid_dist_attr_dict, + train_program, + pipeline_process_meshes, + global_process_mesh, + ) best_dist_context, cost = self._search_core( - valid_dist_attr_dict, init_dist_context, - pipeline_process_meshes) + valid_dist_attr_dict, init_dist_context, pipeline_process_meshes + ) print( - "MCMC search: the min cost is {} in the process mesh {} with pipeline mode." - .format(cost, process_mesh_topology)) + "MCMC search: the min cost is {} in the process mesh {} with pipeline mode.".format( + cost, process_mesh_topology + ) + ) best_dist_context._dist_op_context = DistributedOperatorContext() - pipeline_min_cost = cost if pipeline_min_cost is None else pipeline_min_cost - searched_pipeline_dist_context = best_dist_context if searched_pipeline_dist_context is None else searched_pipeline_dist_context + pipeline_min_cost = ( + cost if pipeline_min_cost is None else pipeline_min_cost + ) + searched_pipeline_dist_context = ( + best_dist_context + if searched_pipeline_dist_context is None + else searched_pipeline_dist_context + ) if pipeline_min_cost > cost: searched_pipeline_dist_context = best_dist_context pipeline_min_cost = cost @@ -783,23 +1003,41 @@ class MCMC(SearchAlgorithm): # if process_mesh_topology shape is 3, include pipeline mode by default if len(process_mesh_topology) == 3: continue - print("MCMC search: search process mesh {} without pipeline mode.". - format(process_mesh_topology)) - valid_dist_attr_dict, pipeline_process_meshes, global_process_mesh = PlanSpace.enum_valid_dist_attr_for_program( - train_program, process_mesh_topology, False) - init_dist_context = self.init_program(valid_dist_attr_dict, - train_program, - pipeline_process_meshes, - global_process_mesh) + print( + "MCMC search: search process mesh {} without pipeline mode.".format( + process_mesh_topology + ) + ) + ( + valid_dist_attr_dict, + pipeline_process_meshes, + global_process_mesh, + ) = PlanSpace.enum_valid_dist_attr_for_program( + train_program, process_mesh_topology, False + ) + init_dist_context = self.init_program( + valid_dist_attr_dict, + train_program, + pipeline_process_meshes, + global_process_mesh, + ) best_dist_context, cost = self._search_core( - valid_dist_attr_dict, init_dist_context, - pipeline_process_meshes) + valid_dist_attr_dict, init_dist_context, pipeline_process_meshes + ) print( - "MCMC search: the min cost is {} in the process mesh {} without pipeline mode." - .format(cost, process_mesh_topology)) + "MCMC search: the min cost is {} in the process mesh {} without pipeline mode.".format( + cost, process_mesh_topology + ) + ) best_dist_context._dist_op_context = DistributedOperatorContext() - non_pipeline_min_cost = cost if non_pipeline_min_cost is None else non_pipeline_min_cost - searched_non_pipeline_dist_context = best_dist_context if searched_non_pipeline_dist_context is None else searched_non_pipeline_dist_context + non_pipeline_min_cost = ( + cost if non_pipeline_min_cost is None else non_pipeline_min_cost + ) + searched_non_pipeline_dist_context = ( + best_dist_context + if searched_non_pipeline_dist_context is None + else searched_non_pipeline_dist_context + ) if non_pipeline_min_cost > cost: searched_non_pipeline_dist_context = best_dist_context non_pipeline_min_cost = cost @@ -820,22 +1058,23 @@ class MCMC(SearchAlgorithm): pg0.add_ranks(process_mesh.processes) end_time = time.time() print( - "End MCMC searching: the min cost is {} and the search time is {}s." - .format(min_cost, end_time - start_time)) + "End MCMC searching: the min cost is {} and the search time is {}s.".format( + min_cost, end_time - start_time + ) + ) return searched_dist_context, min_cost class Planner: - - def __init__(self, - serial_program_info, - parallelizer, - algorithm_config=None): + def __init__( + self, serial_program_info, parallelizer, algorithm_config=None + ): self._serial_program_info = serial_program_info self._parallelizer = parallelizer self._algorithm_config = algorithm_config self._algorithm_searcher = self.create_algorithm_searcher( - algorithm_config) + algorithm_config + ) @property def serial_program_info(self): @@ -861,13 +1100,19 @@ class Planner: if name == "mcmc": # NOTE: Only GPU clusters are supported now. max_search_times = algorithm_config.get("max_search_times", None) - algorithm_searcher = MCMC( - self.serial_program_info, self.parallelizer, - max_search_times) if max_search_times is not None else MCMC( - self.serial_program_info, self.parallelizer) + algorithm_searcher = ( + MCMC( + self.serial_program_info, + self.parallelizer, + max_search_times, + ) + if max_search_times is not None + else MCMC(self.serial_program_info, self.parallelizer) + ) else: raise NotImplementedError( - "Other search algorithms have not been supported now.") + "Other search algorithms have not been supported now." + ) return algorithm_searcher diff --git a/python/paddle/distributed/auto_parallel/planner_v2.py b/python/paddle/distributed/auto_parallel/planner_v2.py index 8e2c0c4617b0f8d83bfcd906f4903011bd944dd3..0f9792911dddc3177af003de24f557aa8ee3977e 100755 --- a/python/paddle/distributed/auto_parallel/planner_v2.py +++ b/python/paddle/distributed/auto_parallel/planner_v2.py @@ -18,7 +18,6 @@ from .tuner.parallel_tuner import ParallelTuner class Planner: - def __init__(self, mode, dist_context): self._mode = mode self._dist_context = dist_context @@ -39,8 +38,9 @@ class Planner: self._strategy = dist_context.strategy # set parallel tuner for auto search if self._strategy.auto_mode == "full": - self._parallel_tuner = ParallelTuner(self._dist_context, - mode=self._mode) + self._parallel_tuner = ParallelTuner( + self._dist_context, mode=self._mode + ) @property def completer(self): @@ -53,4 +53,5 @@ class Planner: self._completer.complete_forward_annotation() # parse forward sub block self._dist_context.block_state.parse_forward_blocks( - self._dist_context.serial_main_program) + self._dist_context.serial_main_program + ) diff --git a/python/paddle/distributed/auto_parallel/process_group.py b/python/paddle/distributed/auto_parallel/process_group.py index ba63a71643466bdd1e424889962aae1bc1b4d514..9883f116f4eea98216cd87737c9a1177e8e33688 100644 --- a/python/paddle/distributed/auto_parallel/process_group.py +++ b/python/paddle/distributed/auto_parallel/process_group.py @@ -31,10 +31,11 @@ def get_all_process_groups(): def get_process_group(group_id, g_process_group_map=None): global _g_process_group_map - return _g_process_group_map.get( - group_id, - None) if g_process_group_map is None else g_process_group_map.get( - group_id, None) + return ( + _g_process_group_map.get(group_id, None) + if g_process_group_map is None + else g_process_group_map.get(group_id, None) + ) def get_world_process_group(): @@ -75,10 +76,11 @@ def new_process_group(ranks, group_id=None): # the instantiation process in a more general way. In the future, the process group may # handle the communication implementation choice. class ProcessGroup: - def __init__(self, group_id, ranks): if group_id == 0 and get_process_group(0) is not None: - assert group_id != 0, "Process group id 0 is reserved for all ranks." + assert ( + group_id != 0 + ), "Process group id 0 is reserved for all ranks." self._group_id = group_id self._ranks = sorted(ranks) # Add the current ranks into group 0 @@ -103,8 +105,9 @@ class ProcessGroup: if set(new_ranks) <= set(self.ranks): return else: - assert self.is_instantiate() == False, \ - "Cannot add new ranks after instantiating the process group" + assert ( + self.is_instantiate() == False + ), "Cannot add new ranks after instantiating the process group" self._ranks.extend(new_ranks) self._ranks = sorted(list(set(self.ranks))) @@ -112,8 +115,9 @@ class ProcessGroup: if global_rank in self.ranks: return self.ranks.index(global_rank) else: - assert False, \ - "Rank {} doesn't belong to this group".format(global_rank) + assert False, "Rank {} doesn't belong to this group".format( + global_rank + ) def is_instantiate(self): return self._is_instantiate @@ -137,22 +141,27 @@ class ProcessGroup: if core.is_compiled_with_cuda(): place = core.CUDAPlace(genv.device_id) - core.NCCLParallelContext(strategy, - place).init_with_ring_id(ring_id) + core.NCCLParallelContext(strategy, place).init_with_ring_id( + ring_id + ) else: - assert False, ("No CUDA device found") + assert False, "No CUDA device found" # TODO(shenliang03): This is a temporary solution to solve the problem of # hang caused by cross-creation of new_group paddle.disable_static() - paddle.set_device('gpu:%d' % - paddle.distributed.ParallelEnv().dev_id) - tmp = paddle.to_tensor( - [1], dtype="int32") if _non_static_mode() else fill_constant( - [0], dtype="int32", value="1") + paddle.set_device( + 'gpu:%d' % paddle.distributed.ParallelEnv().dev_id + ) + tmp = ( + paddle.to_tensor([1], dtype="int32") + if _non_static_mode() + else fill_constant([0], dtype="int32", value="1") + ) # use legacy ops - _legacy_C_ops.c_allreduce_sum_(tmp, 'use_calc_stream', True, - 'ring_id', self.id) + _legacy_C_ops.c_allreduce_sum_( + tmp, 'use_calc_stream', True, 'ring_id', self.id + ) _legacy_C_ops.c_sync_calc_stream(tmp, tmp) paddle.enable_static() @@ -173,7 +182,8 @@ class ProcessGroup: def __str__(self): string = "id: {}, nranks: {}, ranks: {}.".format( - self.id, self.nranks, ", ".join(map(str, self.ranks))) + self.id, self.nranks, ", ".join(map(str, self.ranks)) + ) return string def __hash__(self): diff --git a/python/paddle/distributed/auto_parallel/process_mesh.py b/python/paddle/distributed/auto_parallel/process_mesh.py index afe9f13e3ab43a4f845a1d708588ab03b0e39557..9d8a44b92d2e2b964c0c1f76faff865b211c0785 100644 --- a/python/paddle/distributed/auto_parallel/process_mesh.py +++ b/python/paddle/distributed/auto_parallel/process_mesh.py @@ -68,10 +68,10 @@ class ProcessMesh(object): assert process_ids is not None mesh = np.array(process_ids).reshape(shape) - if not isinstance(mesh, list) and \ - not isinstance(mesh, np.ndarray): + if not isinstance(mesh, list) and not isinstance(mesh, np.ndarray): raise ValueError( - 'The mesh must be an instance of list or np.ndarray.') + 'The mesh must be an instance of list or np.ndarray.' + ) if isinstance(mesh, list): mesh = np.array(mesh) @@ -79,30 +79,37 @@ class ProcessMesh(object): self._shape = list(self._mesh.shape) self._process_ids = self._mesh.flatten().tolist() - assert all(isinstance(p, int) for p in self._process_ids), \ - ("All elements of the mesh must be integer") - assert min( - self._process_ids) >= 0, ('All elements of the mesh must be >= 0.') + assert all( + isinstance(p, int) for p in self._process_ids + ), "All elements of the mesh must be integer" + assert ( + min(self._process_ids) >= 0 + ), 'All elements of the mesh must be >= 0.' unique_process_ids = set(self._process_ids) assert len(unique_process_ids) == len( - self._process_ids), ('All elements of the mesh must be unique.') + self._process_ids + ), 'All elements of the mesh must be unique.' if dim_names is not None: - assert len(dim_names) == len(self._shape), \ - ("The length of dims_names must be same as the shape of the mesh.") + assert len(dim_names) == len( + self._shape + ), "The length of dims_names must be same as the shape of the mesh." self._dim_names = copy.deepcopy(dim_names) else: self._dim_names = ["d" + str(i) for i in range(len(self._shape))] unique_dim_names = set(self._dim_names) - assert len(unique_dim_names) == len(self._dim_names), ( - 'All dim_names {} must be unique.'.format(dim_names)) + assert len(unique_dim_names) == len( + self._dim_names + ), 'All dim_names {} must be unique.'.format(dim_names) # Store all process meshes from .dist_context import get_default_distributed_context + default_dist_cxt = get_default_distributed_context() default_dist_cxt.add_process_mesh(self) # Add new processes to process group 0 from .process_group import get_process_group + pg0 = get_process_group(0) pg0.add_ranks(self.processes) @@ -183,20 +190,24 @@ class ProcessMesh(object): def __exit__(self, exc_type, exc_value, exc_traceback): from .dist_tensor import DistributedTensor from .dist_op import DistributedOperator + default_prog = paddle.fluid.default_main_program() cur_block = default_prog.current_block() new_var_names = list(cur_block.vars.keys()) new_op_size = len(cur_block.ops) from .dist_context import get_default_distributed_context + default_dist_ctx = get_default_distributed_context() for name in new_var_names: if name not in self._old_var_names: tensor = cur_block.vars[name] dist_tensor = default_dist_ctx.get_dist_tensor_for_program( - tensor) + tensor + ) if dist_tensor is None: - dist_tensor = DistributedTensor(cur_block.vars[name], - {"process_mesh": self}) + dist_tensor = DistributedTensor( + cur_block.vars[name], {"process_mesh": self} + ) dist_tensor.dist_attr.mark_annotated("process_mesh") default_dist_ctx.add_dist_tensor_for_program(dist_tensor) else: @@ -229,5 +240,6 @@ class ProcessMesh(object): def __str__(self): str = "shape {}, process_ids {}, dim_nams {}".format( - self.shape, self.process_ids, self.dim_names) + self.shape, self.process_ids, self.dim_names + ) return str diff --git a/python/paddle/distributed/auto_parallel/process_mesh_v2.py b/python/paddle/distributed/auto_parallel/process_mesh_v2.py index bc0de1748f124c06559d6f6847cc64ee74b16731..0b427c9d16187343c74633ae8c5db27458194fa0 100644 --- a/python/paddle/distributed/auto_parallel/process_mesh_v2.py +++ b/python/paddle/distributed/auto_parallel/process_mesh_v2.py @@ -44,10 +44,10 @@ class ProcessMesh(core.ProcessMesh): """ def __init__(self, mesh, dim_names=None): - if not isinstance(mesh, list) and \ - not isinstance(mesh, np.ndarray): + if not isinstance(mesh, list) and not isinstance(mesh, np.ndarray): raise ValueError( - 'The mesh must be an instance of list or np.ndarray.') + 'The mesh must be an instance of list or np.ndarray.' + ) if isinstance(mesh, list): mesh = np.array(mesh) @@ -56,24 +56,29 @@ class ProcessMesh(core.ProcessMesh): self._shape = list(self._mesh.shape) self._process_ids = self._mesh.flatten().tolist() - assert all(isinstance(p, int) for p in self._process_ids), \ - ("All elements of the mesh must be integer") - assert min( - self._process_ids) >= 0, ('All elements of the mesh must be >= 0.') + assert all( + isinstance(p, int) for p in self._process_ids + ), "All elements of the mesh must be integer" + assert ( + min(self._process_ids) >= 0 + ), 'All elements of the mesh must be >= 0.' unique_process_ids = set(self._process_ids) assert len(unique_process_ids) == len( - self._process_ids), ('All elements of the mesh must be unique.') + self._process_ids + ), 'All elements of the mesh must be unique.' if dim_names is not None: - assert len(dim_names) == len(self._shape), \ - ("The length of dims_names must be same as the shape of the mesh.") + assert len(dim_names) == len( + self._shape + ), "The length of dims_names must be same as the shape of the mesh." self._dim_names = dim_names else: self._dim_names = ["d" + str(i) for i in range(len(self._shape))] # Follow the requirement for using pybind11 - core.ProcessMesh.__init__(self, self._shape, self._process_ids, - self._dim_names) + core.ProcessMesh.__init__( + self, self._shape, self._process_ids, self._dim_names + ) @property def mesh(self): @@ -107,15 +112,20 @@ def compute_compatible_process_mesh(process_meshes): compatible_result = None for process_mesh in process_meshes: - compatible, compatible_result = _compute_compatible_of_two_process_meshes( - compatible_result, process_mesh) + ( + compatible, + compatible_result, + ) = _compute_compatible_of_two_process_meshes( + compatible_result, process_mesh + ) if not compatible: return None if compatible_result.empty(): return None if isinstance(compatible_result, core.ProcessMesh): mesh = np.array(compatible_result.process_ids).reshape( - compatible_result.shape) + compatible_result.shape + ) return ProcessMesh(mesh, compatible_result.dim_names) elif isinstance(compatible_result, ProcessMesh): return ProcessMesh(compatible_result.mesh, compatible_result.dim_names) diff --git a/python/paddle/distributed/auto_parallel/reshard.py b/python/paddle/distributed/auto_parallel/reshard.py index 46057ad97c4e80f255d22946d499d186d99334d7..44521c05994ddb3bd38680ab15dcb33bf27c82d9 100644 --- a/python/paddle/distributed/auto_parallel/reshard.py +++ b/python/paddle/distributed/auto_parallel/reshard.py @@ -32,7 +32,11 @@ from .utils import is_gradient_clip_op # NOTE: If op in _g_special_ops or _g_gradient_clip_ops, it will not be resharded. _g_special_ops = ['check_finite_and_unscale', 'update_loss_scaling'] _g_gradient_clip_ops = [ - "sum", "sqrt", "fill_constant", "elementwise_max", "elementwise_div" + "sum", + "sqrt", + "fill_constant", + "elementwise_max", + "elementwise_div", ] _g_subblock_ops = ["while", "conditional_block"] @@ -264,20 +268,25 @@ class Inserter: def insert_cast_op(block, idx, tensor, op_role, tensor_type): # to avoid name conflict with framework new_var_name = paddle.fluid.unique_name.generate_with_ignorable_key( - ".".join(["cast@RESHARD", 'tmp'])) - out = block.create_var(name=new_var_name, - dtype=tensor_type, - type=tensor.type, - lod_level=tensor.lod_level) - cast_op = block._insert_op(idx, - type='cast', - inputs={'X': [tensor]}, - outputs={'Out': [out]}, - attrs={ - 'in_dtype': tensor.dtype, - 'out_dtype': out.dtype, - 'op_role': op_role - }) + ".".join(["cast@RESHARD", 'tmp']) + ) + out = block.create_var( + name=new_var_name, + dtype=tensor_type, + type=tensor.type, + lod_level=tensor.lod_level, + ) + cast_op = block._insert_op( + idx, + type='cast', + inputs={'X': [tensor]}, + outputs={'Out': [out]}, + attrs={ + 'in_dtype': tensor.dtype, + 'out_dtype': out.dtype, + 'op_role': op_role, + }, + ) cast_op._set_attr('op_namescope', "/auto_parallel/reshard") return out @@ -287,16 +296,18 @@ class Inserter: op_type = 'send_v2' # use pair comm group process_group = new_process_group([src, dst]) - send_op = block._insert_op(idx, - type=op_type, - inputs={'X': [tensor]}, - attrs={ - 'ring_id': process_group.id, - 'peer': process_group.ranks.index(dst), - 'use_calc_stream': True, - 'op_role': op_role, - 'dynamic_shape': True - }) + send_op = block._insert_op( + idx, + type=op_type, + inputs={'X': [tensor]}, + attrs={ + 'ring_id': process_group.id, + 'peer': process_group.ranks.index(dst), + 'use_calc_stream': True, + 'op_role': op_role, + 'dynamic_shape': True, + }, + ) send_op._set_attr('op_namescope', "/auto_parallel/reshard") @staticmethod @@ -305,19 +316,21 @@ class Inserter: op_type = 'recv_v2' # use pair group process_group = new_process_group([src, dst]) - recv_op = block._insert_op(idx, - type=op_type, - inputs={'X': [tensor]}, - outputs={'Out': [tensor]}, - attrs={ - 'ring_id': process_group.id, - 'peer': process_group.ranks.index(src), - 'out_shape': tensor.shape, - 'dtype': tensor.dtype, - 'use_calc_stream': True, - 'op_role': op_role, - 'dynamic_shape': True - }) + recv_op = block._insert_op( + idx, + type=op_type, + inputs={'X': [tensor]}, + outputs={'Out': [tensor]}, + attrs={ + 'ring_id': process_group.id, + 'peer': process_group.ranks.index(src), + 'out_shape': tensor.shape, + 'dtype': tensor.dtype, + 'use_calc_stream': True, + 'op_role': op_role, + 'dynamic_shape': True, + }, + ) recv_op._set_attr('op_namescope', "/auto_parallel/reshard") @staticmethod @@ -325,21 +338,23 @@ class Inserter: """Insert reset_lod op into block at the given index.""" new_var_name = paddle.fluid.unique_name.generate_with_ignorable_key( - ".".join(["reset_lod@RESHARD", 'tmp'])) - reset_lod_out = block.create_var(name=new_var_name, - shape=X.shape, - type=X.type, - dtype=X.dtype, - lod_level=X.lod_level) - - reset_op = block._insert_op(idx, - type="lod_reset", - inputs={ - 'X': X, - 'Y': Y - }, - outputs={'Out': reset_lod_out}, - attrs={'op_role': op_role}) + ".".join(["reset_lod@RESHARD", 'tmp']) + ) + reset_lod_out = block.create_var( + name=new_var_name, + shape=X.shape, + type=X.type, + dtype=X.dtype, + lod_level=X.lod_level, + ) + + reset_op = block._insert_op( + idx, + type="lod_reset", + inputs={'X': X, 'Y': Y}, + outputs={'Out': reset_lod_out}, + attrs={'op_role': op_role}, + ) reset_op._set_attr('op_namescope', "/auto_parallel/reshard") return reset_lod_out @@ -355,24 +370,29 @@ class Inserter: with paddle.static.program_guard(block.program): out = block.create_var( name=paddle.fluid.unique_name.generate_with_ignorable_key( - ".".join([helper.name, 'tmp'])), + ".".join([helper.name, 'tmp']) + ), dtype=tensors[0].dtype, shape=None, lod_level=tensors[0].lod_level, type=tensors[0].type, persistable=False, - stop_gradient=False) - concat_op = block._insert_op(idx, - type='concat', - inputs=inputs, - outputs={'Out': [out]}, - attrs=attrs) + stop_gradient=False, + ) + concat_op = block._insert_op( + idx, + type='concat', + inputs=inputs, + outputs={'Out': [out]}, + attrs=attrs, + ) concat_op._set_attr('op_namescope', "/auto_parallel/reshard") return out @staticmethod - def insert_slice_op(block, idx, tensor, starts, ends, axes, new_var_name, - op_role): + def insert_slice_op( + block, idx, tensor, starts, ends, axes, new_var_name, op_role + ): """Insert slice op into block at the given block.""" # This is a hack to insert split op to get slice tensor # 1. [128, 128] => [64, 128]: split @@ -387,19 +407,19 @@ class Inserter: # use assign if len(diff_dims) == 0: - out = block.create_var(name=new_var_name, - dtype=tensor.dtype, - type=tensor.type, - shape=slice_shape, - lod_level=tensor.lod_level) + out = block.create_var( + name=new_var_name, + dtype=tensor.dtype, + type=tensor.type, + shape=slice_shape, + lod_level=tensor.lod_level, + ) inputs = {'X': [tensor]} outputs = {"Out": [out]} attrs = {"in_place": False} - slice_op = block._insert_op(idx, - type="assign", - inputs=inputs, - outputs=outputs, - attrs=attrs) + slice_op = block._insert_op( + idx, type="assign", inputs=inputs, outputs=outputs, attrs=attrs + ) slice_op._set_attr('op_namescope', "/auto_parallel/reshard") return out @@ -420,23 +440,27 @@ class Inserter: new_shape.append(item // num_or_sections) with paddle.static.program_guard(block.program): outs = [ - block.create_var(name=paddle.fluid.unique_name. - generate_with_ignorable_key(".".join( - ['split@RESHARD', 'tmp'])), - dtype=tensor.dtype, - shape=None, - type=tensor.type, - persistable=False, - lod_level=tensor.lod_level, - stop_gradient=False) + block.create_var( + name=paddle.fluid.unique_name.generate_with_ignorable_key( + ".".join(['split@RESHARD', 'tmp']) + ), + dtype=tensor.dtype, + shape=None, + type=tensor.type, + persistable=False, + lod_level=tensor.lod_level, + stop_gradient=False, + ) for i in range(num_or_sections) ] out = outs[cur_idx] - split_op = block._insert_op(idx, - type="split", - inputs=inputs, - outputs={'Out': outs}, - attrs=attrs) + split_op = block._insert_op( + idx, + type="split", + inputs=inputs, + outputs={'Out': outs}, + attrs=attrs, + ) split_op._set_attr('op_namescope', "/auto_parallel/reshard") return out @@ -449,17 +473,21 @@ class Inserter: "starts": starts, "ends": ends, "infer_flags": infer_flags, - 'op_role': op_role + 'op_role': op_role, } - out = block.create_var(name=new_var_name, - dtype=tensor.dtype, - type=tensor.type, - lod_level=tensor.lod_level) - slice_op = block._insert_op(idx, - type="slice", - inputs=inputs, - outputs={'Out': [out]}, - attrs=attrs) + out = block.create_var( + name=new_var_name, + dtype=tensor.dtype, + type=tensor.type, + lod_level=tensor.lod_level, + ) + slice_op = block._insert_op( + idx, + type="slice", + inputs=inputs, + outputs={'Out': [out]}, + attrs=attrs, + ) slice_op._set_attr('op_namescope', "/auto_parallel/reshard") return out @@ -480,19 +508,20 @@ class Inserter: outs = [ block.create_var( name=paddle.fluid.unique_name.generate_with_ignorable_key( - ".".join([helper.name, 'tmp'])), + ".".join([helper.name, 'tmp']) + ), dtype=tensor.dtype, shape=None, lod_level=tensor.lod_level, type=tensor.type, persistable=False, - stop_gradient=False) for i in range(num_or_sections) + stop_gradient=False, + ) + for i in range(num_or_sections) ] - split_op = block._insert_op(idx, - type="split", - inputs=inputs, - outputs={'Out': outs}, - attrs=attrs) + split_op = block._insert_op( + idx, type="split", inputs=inputs, outputs={'Out': outs}, attrs=attrs + ) split_op._set_attr('op_namescope', "/auto_parallel/reshard") return outs @@ -505,27 +534,30 @@ class Inserter: with paddle.static.program_guard(block.program): out = block.create_var( name=paddle.fluid.unique_name.generate_with_ignorable_key( - ".".join([helper.name, 'tmp'])), + ".".join([helper.name, 'tmp']) + ), dtype=paddle.int64, shape=None, type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=False) + stop_gradient=False, + ) inputs = {} attrs = {'force_cpu': False} attrs['str_value'] = str(int("1")) attrs['value'] = int("1") attrs['dtype'] = out.dtype attrs['op_role'] = op_role - utils.get_shape_tensor_inputs(inputs=inputs, - attrs=attrs, - shape=[0], - op_type='fill_constant') - fillconstant_op = block._insert_op(idx, - type='fill_constant', - inputs=inputs, - outputs={'Out': [out]}, - attrs=attrs) + utils.get_shape_tensor_inputs( + inputs=inputs, attrs=attrs, shape=[0], op_type='fill_constant' + ) + fillconstant_op = block._insert_op( + idx, + type='fill_constant', + inputs=inputs, + outputs={'Out': [out]}, + attrs=attrs, + ) out.stop_gradient = True fillconstant_op._set_attr('op_namescope', "/auto_parallel/reshard") return out @@ -541,7 +573,8 @@ class Inserter: if not group.is_instantiate(): # insert fill_constant op fill_constant_out = Inserter.insert_fill_constant_op( - block, idx, op_role) + block, idx, op_role + ) fill_constant_out.stop_gradient = True # insert c_allreduce_sum op @@ -553,8 +586,9 @@ class Inserter: attrs={ 'ring_id': 0, 'use_calc_stream': True, - 'op_role': op_role - }) + 'op_role': op_role, + }, + ) allreduce_op._set_attr('op_namescope', "/auto_parallel/reshard") # insert c_sync_calc_stream op sync_calc_op = block._insert_op( @@ -562,7 +596,8 @@ class Inserter: type="c_sync_calc_stream", inputs={'X': [fill_constant_out]}, outputs={'Out': [fill_constant_out]}, - attrs={'op_role': op_role}) + attrs={'op_role': op_role}, + ) sync_calc_op._set_attr('op_namescope', "/auto_parallel/reshard") idx_offset = 3 @@ -573,37 +608,42 @@ class Inserter: with paddle.static.program_guard(block.program): allgather_out = block.create_var( name=paddle.fluid.unique_name.generate_with_ignorable_key( - ".".join([helper.name, 'tmp'])), + ".".join([helper.name, 'tmp']) + ), dtype=tensor.dtype, shape=None, lod_level=tensor.lod_level, type=tensor.type, persistable=False, - stop_gradient=False) - allgather_op = block._insert_op(idx + idx_offset, - type=op_type, - inputs={'X': [tensor]}, - outputs={'Out': [allgather_out]}, - attrs={ - 'ring_id': group.id, - 'use_calc_stream': True, - 'nranks': group.nranks, - 'op_role': op_role - }) + stop_gradient=False, + ) + allgather_op = block._insert_op( + idx + idx_offset, + type=op_type, + inputs={'X': [tensor]}, + outputs={'Out': [allgather_out]}, + attrs={ + 'ring_id': group.id, + 'use_calc_stream': True, + 'nranks': group.nranks, + 'op_role': op_role, + }, + ) allgather_op._set_attr('op_namescope', "/auto_parallel/reshard") idx_offset += 1 # insert split op - split_out = Inserter.insert_split_op(block, idx + idx_offset, - allgather_out, group.nranks, - op_role) + split_out = Inserter.insert_split_op( + block, idx + idx_offset, allgather_out, group.nranks, op_role + ) idx_offset += 1 tensor_list.extend(split_out) return tensor_list, idx_offset @staticmethod - def concat_partitions_with_op(partition_tensor_list, tensor, - partition_index, block, idx, op_role): + def concat_partitions_with_op( + partition_tensor_list, tensor, partition_index, block, idx, op_role + ): """Concat the tensors and insert concat op.""" if not partition_tensor_list: partition_tensor_list.append((tensor, partition_index)) @@ -611,18 +651,42 @@ class Inserter: i = 0 has_concat = False while i < len(partition_tensor_list): - concat_axis, first_order, new_partition = Resharder.compute_concat_info( - partition_tensor_list[i][1], partition_index) + ( + concat_axis, + first_order, + new_partition, + ) = Resharder.compute_concat_info( + partition_tensor_list[i][1], partition_index + ) if concat_axis != -1: has_concat = True - _ = Inserter.insert_concat_op(block, idx[0], [partition_tensor_list[i][0], tensor], concat_axis, op_role) \ - if first_order == 0 else \ - Inserter.insert_concat_op(block, idx[0], [tensor, partition_tensor_list[i][0]], concat_axis, op_role) + _ = ( + Inserter.insert_concat_op( + block, + idx[0], + [partition_tensor_list[i][0], tensor], + concat_axis, + op_role, + ) + if first_order == 0 + else Inserter.insert_concat_op( + block, + idx[0], + [tensor, partition_tensor_list[i][0]], + concat_axis, + op_role, + ) + ) partition_tensor_list.pop(i) idx[0] += 1 - Inserter.concat_partitions_with_op(partition_tensor_list, _, - new_partition, block, - idx, op_role) + Inserter.concat_partitions_with_op( + partition_tensor_list, + _, + new_partition, + block, + idx, + op_role, + ) break i += 1 if not has_concat: @@ -636,7 +700,9 @@ class Remover: def remove_no_need_ops(auto_parallel_main_prog, dist_context, rank_id): """Remove no need ops in the main program""" not_remove_op_ref = [ - "create_py_reader", "create_double_buffer_reader", "read" + "create_py_reader", + "create_double_buffer_reader", + "read", ] # NOTE: The nested sub block is not be supported now. @@ -660,7 +726,9 @@ class Remover: for var_name in op.output_arg_names: dim_list.extend( get_var_with_recursion( - var_name, block, auto_parallel_main_prog).shape) + var_name, block, auto_parallel_main_prog + ).shape + ) for i in range(idx, -1, -1): if ops[i].type == "create_py_reader": ops[i]._set_attr("shape_concat", dim_list) @@ -671,10 +739,13 @@ class Remover: if op.type == "c_sync_comm_stream": need_save = [] for var_name in op.input_arg_names: - process_mesh = dist_context.get_tensor_dist_attr_for_program( - get_var_with_recursion( - var_name, block, - auto_parallel_main_prog)).process_mesh + process_mesh = ( + dist_context.get_tensor_dist_attr_for_program( + get_var_with_recursion( + var_name, block, auto_parallel_main_prog + ) + ).process_mesh + ) if rank_id in process_mesh.processes: need_save.append(var_name) if not need_save: @@ -690,15 +761,19 @@ class Remover: op_dist_attr = dist_context.get_op_dist_attr_for_program(op) if op_dist_attr is not None: op_process_mesh = op_dist_attr.process_mesh - if rank_id not in op_process_mesh.processes and op.type not in not_remove_op_ref: + if ( + rank_id not in op_process_mesh.processes + and op.type not in not_remove_op_ref + ): remove_op_idx.append(idx) for idx in remove_op_idx[::-1]: block._remove_op(idx) @staticmethod - def remove_no_need_vars(auto_parallel_main_prog, dist_params_grads, - feed_var_names): + def remove_no_need_vars( + auto_parallel_main_prog, dist_params_grads, feed_var_names + ): """Remove no need vars in the main program""" for block_idx, block in enumerate(auto_parallel_main_prog.blocks): remove_vars = set() @@ -721,7 +796,10 @@ class Remover: param_grad_map = {} for op in ops: if int(op.attr('op_role')) == int(OpRole.Optimize): - if "Param" in op.input_names and "Grad" in op.input_names: + if ( + "Param" in op.input_names + and "Grad" in op.input_names + ): param_name = op.input("Param")[0] grad_name = op.input("Grad")[0] param_grad_map[param_name] = grad_name @@ -740,7 +818,9 @@ class Remover: grad_name = dist_params_grads[idx][1].name if grad_name != param_grad_map[param_name]: dist_params_grads[idx] = ( - vars[param_name], vars[param_grad_map[param_name]]) + vars[param_name], + vars[param_grad_map[param_name]], + ) idx += 1 for var in remove_vars: @@ -749,23 +829,28 @@ class Remover: block._remove_var(var) @staticmethod - def remove_no_need_in_main(auto_parallel_main_prog, dist_context, rank_id, - dist_params_grads): + def remove_no_need_in_main( + auto_parallel_main_prog, dist_context, rank_id, dist_params_grads + ): """Remove no need vars and ops in the main program.""" - Remover.remove_no_need_ops(auto_parallel_main_prog, dist_context, - rank_id) - Resharder.change_while_op_input_and_output(auto_parallel_main_prog, - dist_context) + Remover.remove_no_need_ops( + auto_parallel_main_prog, dist_context, rank_id + ) + Resharder.change_while_op_input_and_output( + auto_parallel_main_prog, dist_context + ) # 'feed_var_names' cannot be removed from auto_parallel_main_prog feed_var_names = [] for var in sum(list(dist_context.serial_feed_vars.values()), []): feed_var_names.append(var.name) - Remover.remove_no_need_vars(auto_parallel_main_prog, dist_params_grads, - feed_var_names) + Remover.remove_no_need_vars( + auto_parallel_main_prog, dist_params_grads, feed_var_names + ) @staticmethod - def remove_no_need_in_startup(auto_parallel_main_prog, - auto_parallel_startup_prog): + def remove_no_need_in_startup( + auto_parallel_main_prog, auto_parallel_startup_prog + ): """Remove no need vars and ops in the startup program.""" main_input_vars = set() main_ops = auto_parallel_main_prog.global_block().ops @@ -850,28 +935,43 @@ class Resharder: dist_params_grads (list): The list contains the tuple of param and grad. batch_size (int): The batch size. Default: None. """ + while_block_info = {} - def __init__(self, - auto_parallel_main_prog, - auto_parallel_startup_prog, - rank_id, - dist_context, - dist_params_grads, - batch_size=None): - assert isinstance(auto_parallel_main_prog, Program), "The type of auto_parallel_main_prog should be Program, " \ - "but got {}.".format(type(auto_parallel_main_prog)) + def __init__( + self, + auto_parallel_main_prog, + auto_parallel_startup_prog, + rank_id, + dist_context, + dist_params_grads, + batch_size=None, + ): + assert isinstance(auto_parallel_main_prog, Program), ( + "The type of auto_parallel_main_prog should be Program, " + "but got {}.".format(type(auto_parallel_main_prog)) + ) if auto_parallel_startup_prog is not None: - assert isinstance(auto_parallel_main_prog, Program), "The type of auto_parallel_startup_prog should be Program or None, " \ - "but got {}.".format(type(auto_parallel_startup_prog)) - assert isinstance(rank_id, int), "The type of rank_id should be int, " \ - "but got {}.".format(type(rank_id)) - assert isinstance(dist_context, DistributedContext), "The type of dist_context should be DistributedContext, " \ - "but got {}.".format(type(dist_context)) + assert isinstance(auto_parallel_main_prog, Program), ( + "The type of auto_parallel_startup_prog should be Program or None, " + "but got {}.".format(type(auto_parallel_startup_prog)) + ) + assert isinstance( + rank_id, int + ), "The type of rank_id should be int, " "but got {}.".format( + type(rank_id) + ) + assert isinstance(dist_context, DistributedContext), ( + "The type of dist_context should be DistributedContext, " + "but got {}.".format(type(dist_context)) + ) if batch_size is not None: - assert isinstance(batch_size, int), "The type of batch_size should be int, " \ - "but got {}.".format(type(batch_size)) + assert isinstance( + batch_size, int + ), "The type of batch_size should be int, " "but got {}.".format( + type(batch_size) + ) self._auto_parallel_main_prog = auto_parallel_main_prog self._auto_parallel_startup_prog = auto_parallel_startup_prog @@ -943,29 +1043,37 @@ class Resharder: for i in range(len(process_shape)): idx = relative_process // (product // process_shape[i]) product = product // process_shape[i] - relative_process = relative_process - relative_process // product * product + relative_process = ( + relative_process - relative_process // product * product + ) process_index.append(idx) return process_index @staticmethod - def compute_partition_index(process, complete_shape, dims_mapping, - process_shape, process_group): + def compute_partition_index( + process, complete_shape, dims_mapping, process_shape, process_group + ): """Compute the partition index in complete tensor.""" partition_shape = Resharder.compute_partition_shape( - complete_shape, dims_mapping, process_shape) - process_index = Resharder.compute_process_index(process, process_group, - process_shape) + complete_shape, dims_mapping, process_shape + ) + process_index = Resharder.compute_process_index( + process, process_group, process_shape + ) partition_index = [] for i in range(len(complete_shape)): if dims_mapping[i] == -1: partition_index.append([0, partition_shape[i]]) else: - partition_index.append([ - process_index[dims_mapping[i]] * partition_shape[i], - (process_index[dims_mapping[i]] + 1) * partition_shape[i] - ]) + partition_index.append( + [ + process_index[dims_mapping[i]] * partition_shape[i], + (process_index[dims_mapping[i]] + 1) + * partition_shape[i], + ] + ) return partition_index @@ -980,12 +1088,16 @@ class Resharder: for idx, item in enumerate(partition_index_x): if item != partition_index_y[idx]: differ_count += 1 - if item[1] == partition_index_y[idx][ - 0] and item[0] < partition_index_y[idx][1]: + if ( + item[1] == partition_index_y[idx][0] + and item[0] < partition_index_y[idx][1] + ): concat_axis = idx new_partition.append([item[0], partition_index_y[idx][1]]) - elif item[0] == partition_index_y[idx][ - 1] and item[1] > partition_index_y[idx][0]: + elif ( + item[0] == partition_index_y[idx][1] + and item[1] > partition_index_y[idx][0] + ): first_order = 1 concat_axis = idx new_partition.append([partition_index_y[idx][0], item[1]]) @@ -1018,12 +1130,14 @@ class Resharder: has_concat = False while i < len(partition_index_list): concat_axis, _, new_partition = Resharder.compute_concat_info( - partition_index_list[i], partition_index) + partition_index_list[i], partition_index + ) if concat_axis != -1: has_concat = True partition_index_list.pop(i) - Resharder.concat_partitions(partition_index_list, - new_partition) + Resharder.concat_partitions( + partition_index_list, new_partition + ) break i += 1 if not has_concat: @@ -1035,7 +1149,8 @@ class Resharder: for sub_block_idx in Resharder.while_block_info: sub_block = auto_parallel_main_prog.blocks[sub_block_idx] parent_while_op_id = Resharder.while_block_info[sub_block_idx][ - "op_id"] + "op_id" + ] parent_block = auto_parallel_main_prog.blocks[sub_block.parent_idx] sub_block_op_inputs = set() @@ -1043,10 +1158,12 @@ class Resharder: for op in sub_block.ops: # skip the input and output of operators inserted in the reshard phase dist_op = dist_context.get_dist_op_for_program(op) - if dist_op or (op.type == "slice" and not dist_op) or ( - op.type == "split" - and not dist_op) or (op.type == "assign" - and not dist_op): + if ( + dist_op + or (op.type == "slice" and not dist_op) + or (op.type == "split" and not dist_op) + or (op.type == "assign" and not dist_op) + ): for var_name in op.output_arg_names: if var_name not in sub_block_op_outputs: sub_block_op_outputs.append(var_name) @@ -1077,8 +1194,9 @@ class Resharder: for var_name in while_op.output("Out"): for output_name in sub_block_op_outputs[::-1]: if output_name.find(var_name) != -1 and ( - len(var_name) == len(output_name) - or "@RESHARD" in output_name): + len(var_name) == len(output_name) + or "@RESHARD" in output_name + ): if output_name not in new_Out: new_Out.append(output_name) assert new_Out @@ -1087,8 +1205,9 @@ class Resharder: def is_overlapped(self, shape_x, shape_y): """Judge whether two partitions intersect on the specified dimension.""" overlapped = False - if (shape_y[0] <= shape_x[0] < shape_y[1]) or (shape_x[0] <= shape_y[0] - < shape_x[1]): + if (shape_y[0] <= shape_x[0] < shape_y[1]) or ( + shape_x[0] <= shape_y[0] < shape_x[1] + ): overlapped = True return overlapped @@ -1116,8 +1235,9 @@ class Resharder: # the dims mapping of condition tensor should be replicative for var_name in input_cond: - var = get_var_with_recursion(var_name, sub_block, - self.auto_parallel_main_prog) + var = get_var_with_recursion( + var_name, sub_block, self.auto_parallel_main_prog + ) dist_tensor = self.dist_context.get_dist_tensor_for_program(var) tensor_dist_attr = dist_tensor.dist_attr var_dims_mapping = tensor_dist_attr.dims_mapping @@ -1140,13 +1260,22 @@ class Resharder: if op_input: op_input_dims_mapping = dist_attr[1] if all( - map(lambda x: x, [ - tensor_dims_mapping, tensor_process_mesh, - op_input_dims_mapping, op_process_mesh - ])): + map( + lambda x: x, + [ + tensor_dims_mapping, + tensor_process_mesh, + op_input_dims_mapping, + op_process_mesh, + ], + ) + ): # judge whether need reshard by dims_mapping if tensor_dims_mapping != op_input_dims_mapping: - if tensor_process_mesh not in self.dist_context.process_meshes: + if ( + tensor_process_mesh + not in self.dist_context.process_meshes + ): # assert whether -1 when union. for item in tensor_dims_mapping: if item != -1: @@ -1170,10 +1299,16 @@ class Resharder: else: op_output_dims_mapping = dist_attr[1] if all( - map(lambda x: x, [ - tensor_dims_mapping, tensor_process_mesh, - op_output_dims_mapping, op_process_mesh - ])): + map( + lambda x: x, + [ + tensor_dims_mapping, + tensor_process_mesh, + op_output_dims_mapping, + op_process_mesh, + ], + ) + ): if tensor_dims_mapping != op_output_dims_mapping: raise ValueError( "It is not supported that tensor dims mapping is different from op output dims mapping." @@ -1190,10 +1325,9 @@ class Resharder: op_process_mesh = dist_op.dist_attr.process_mesh for process_mesh in self.dist_context.process_meshes: - if set(process_mesh.processes) & (set( - op_process_mesh.processes)) and len( - process_mesh.processes) < len( - op_process_mesh.processes): + if set(process_mesh.processes) & ( + set(op_process_mesh.processes) + ) and len(process_mesh.processes) < len(op_process_mesh.processes): process_meshes.append(process_mesh) # it means the process mesh is not a union when process meshes is null @@ -1235,34 +1369,47 @@ class Resharder: new_shape[0] = self.batch_size source_tensor.desc.set_shape(new_shape) - complete_shape = Resharder.compute_complete_shape( - source_tensor.shape, source_process_shape, - source_dims_mapping) if not serial else source_tensor.shape + complete_shape = ( + Resharder.compute_complete_shape( + source_tensor.shape, source_process_shape, source_dims_mapping + ) + if not serial + else source_tensor.shape + ) op_desc_seq = {} # TODO: if the target process group has the same process with source process group - if set(target_process_group).intersection(set( - source_process_group)) and set(target_process_group).difference( - set(source_process_group)): + if set(target_process_group).intersection( + set(source_process_group) + ) and set(target_process_group).difference(set(source_process_group)): pass elif target_process_group != source_process_group: partition_process_mapping_list = [] for source_process in source_process_group: # get partition index of source process - source_partition_index = Resharder.compute_partition_index(source_process, complete_shape, source_dims_mapping, \ - source_process_shape, source_process_group) + source_partition_index = Resharder.compute_partition_index( + source_process, + complete_shape, + source_dims_mapping, + source_process_shape, + source_process_group, + ) if not partition_process_mapping_list: # the item in partition_process_mapping_list is source_partition_index, which processes and whether has been used partition_process_mapping_list.append( - [source_partition_index, [source_process], [False]]) + [source_partition_index, [source_process], [False]] + ) else: partition_list = list( - [item[0] for item in partition_process_mapping_list]) + [item[0] for item in partition_process_mapping_list] + ) process_list = list( - [item[1] for item in partition_process_mapping_list]) + [item[1] for item in partition_process_mapping_list] + ) has_used = list( - [item[2] for item in partition_process_mapping_list]) + [item[2] for item in partition_process_mapping_list] + ) if partition_list.count(source_partition_index) == 1: index = partition_list.index(source_partition_index) @@ -1270,32 +1417,52 @@ class Resharder: has_used[index].append(False) else: partition_process_mapping_list.append( - [source_partition_index, [source_process], [False]]) + [source_partition_index, [source_process], [False]] + ) for target_process in target_process_group: # has_sent means the source_partition_index has been sent to target_process has_sent = [] target_partition_index = Resharder.compute_partition_index( - target_process, complete_shape, target_dims_mapping, - target_process_shape, target_process_group) + target_process, + complete_shape, + target_dims_mapping, + target_process_shape, + target_process_group, + ) partition_index_list = [] all_partition_index_list = [] for source_process in source_process_group: source_partition_index = Resharder.compute_partition_index( - source_process, complete_shape, source_dims_mapping, - source_process_shape, source_process_group) + source_process, + complete_shape, + source_dims_mapping, + source_process_shape, + source_process_group, + ) to_send_process = None - if all(_ for _ in list(map(self.is_overlapped, source_partition_index, target_partition_index))) \ - and source_partition_index not in has_sent: - idx = list([ - item[0] for item in partition_process_mapping_list - ]).index(source_partition_index) - has_used = list([ - item[2] for item in partition_process_mapping_list - ])[idx] - process_list = list([ - item[1] for item in partition_process_mapping_list - ])[idx] + if ( + all( + _ + for _ in list( + map( + self.is_overlapped, + source_partition_index, + target_partition_index, + ) + ) + ) + and source_partition_index not in has_sent + ): + idx = list( + [item[0] for item in partition_process_mapping_list] + ).index(source_partition_index) + has_used = list( + [item[2] for item in partition_process_mapping_list] + )[idx] + process_list = list( + [item[1] for item in partition_process_mapping_list] + )[idx] i = 0 while i < len(has_used): if not has_used[i]: @@ -1308,7 +1475,9 @@ class Resharder: has_used = list(map(lambda x: False, has_used)) to_send_process = process_list[0] has_used[0] = True - assert to_send_process is not None, "Failed to find the send process." + assert ( + to_send_process is not None + ), "Failed to find the send process." if to_send_process not in op_desc_seq.keys(): op_desc_seq[to_send_process] = [] @@ -1317,25 +1486,30 @@ class Resharder: all_partition_index_list.append(source_partition_index) # append send and recv op desc - is_bool = ( - dist_tensor.serial_tensor.dtype == paddle.bool) - send_op_desc = SendOpDesc(source_partition_index, - to_send_process, - target_process, - is_bool=is_bool) - recv_op_desc = RecvOpDesc(source_partition_index, - to_send_process, - target_process, - is_bool=is_bool) + is_bool = dist_tensor.serial_tensor.dtype == paddle.bool + send_op_desc = SendOpDesc( + source_partition_index, + to_send_process, + target_process, + is_bool=is_bool, + ) + recv_op_desc = RecvOpDesc( + source_partition_index, + to_send_process, + target_process, + is_bool=is_bool, + ) op_desc_seq[to_send_process].append(send_op_desc) op_desc_seq[target_process].append(recv_op_desc) has_sent.append(source_partition_index) - Resharder.concat_partitions(partition_index_list, - source_partition_index) + Resharder.concat_partitions( + partition_index_list, source_partition_index + ) # append concat op desc op_desc_seq[target_process].append( - ConcatOpDesc(all_partition_index_list)) + ConcatOpDesc(all_partition_index_list) + ) # append slice op desc slice_starts = [] @@ -1345,17 +1519,21 @@ class Resharder: to_slice_tensor_shape = [] for idx, item in enumerate(concatenated_partition_index): - slice_starts.append(target_partition_index[idx][0] - - item[0]) + slice_starts.append( + target_partition_index[idx][0] - item[0] + ) slice_ends.append(target_partition_index[idx][1] - item[0]) slices_axes.append(idx) to_slice_tensor_shape.append(item[1] - item[0]) op_desc_seq[target_process].append( - SliceOpDesc(slice_starts, - slice_ends, - slices_axes, - shape=to_slice_tensor_shape)) + SliceOpDesc( + slice_starts, + slice_ends, + slices_axes, + shape=to_slice_tensor_shape, + ) + ) # in the same process group, it will use allgahther and slice op. else: @@ -1365,16 +1543,26 @@ class Resharder: process_index = [] for source_process in source_process_group: source_partition_index = Resharder.compute_partition_index( - source_process, complete_shape, source_dims_mapping, - source_process_shape, source_process_group) + source_process, + complete_shape, + source_dims_mapping, + source_process_shape, + source_process_group, + ) if source_partition_index not in partition_index_list: partition_index_list.append(source_partition_index) - process_index.append([[ - source_process, - ], source_partition_index]) + process_index.append( + [ + [ + source_process, + ], + source_partition_index, + ] + ) else: - process_index[partition_index_list.index( - source_partition_index)][0].append(source_process) + process_index[ + partition_index_list.index(source_partition_index) + ][0].append(source_process) for i in range(len(process_index[0][0])): group = [] @@ -1388,28 +1576,50 @@ class Resharder: slice_ends = [] slices_axes = [] target_partition_index = Resharder.compute_partition_index( - process, complete_shape, target_dims_mapping, - target_process_shape, target_process_group) + process, + complete_shape, + target_dims_mapping, + target_process_shape, + target_process_group, + ) for idx, item in enumerate(target_partition_index): slice_starts.append(item[0]) slice_ends.append(item[1]) slices_axes.append(idx) to_slice_tensor_shape = dist_tensor.global_sizes() - slice_op_desc = SliceOpDesc(starts=slice_starts, - ends=slice_ends, - axes=slices_axes, - shape=to_slice_tensor_shape) - allgather_shape = None if not serial else dist_tensor.local_sizes( - rank=process) - op_desc_seq[process] = [AllGatherOpDesc(group=group, shape=allgather_shape, is_bool=(source_tensor.dtype == paddle.bool)), - ConcatOpDesc(partition_index_list=all_partition_index_list), slice_op_desc] \ - if len(group) > 1 else [slice_op_desc] + slice_op_desc = SliceOpDesc( + starts=slice_starts, + ends=slice_ends, + axes=slices_axes, + shape=to_slice_tensor_shape, + ) + allgather_shape = ( + None + if not serial + else dist_tensor.local_sizes(rank=process) + ) + op_desc_seq[process] = ( + [ + AllGatherOpDesc( + group=group, + shape=allgather_shape, + is_bool=(source_tensor.dtype == paddle.bool), + ), + ConcatOpDesc( + partition_index_list=all_partition_index_list + ), + slice_op_desc, + ] + if len(group) > 1 + else [slice_op_desc] + ) return op_desc_seq - def parse_op_desc(self, block, op_desc_seq, var_name, reshard_op, - dist_attr): + def parse_op_desc( + self, block, op_desc_seq, var_name, reshard_op, dist_attr + ): """Parse op desc sequence and insert op in the block""" tensor_list = [] partition_tensor_list = [] @@ -1422,55 +1632,84 @@ class Resharder: if op.desc.id == reshard_op.desc.id: idx = index break - assert idx is not None, "The op for reshard cannot be found in the rank {} program.".format( - self.rank_id) + assert ( + idx is not None + ), "The op for reshard cannot be found in the rank {} program.".format( + self.rank_id + ) matched_op = block.ops[idx] - source_tensor = get_var_with_recursion(var_name, block, - self.auto_parallel_main_prog) + source_tensor = get_var_with_recursion( + var_name, block, self.auto_parallel_main_prog + ) for op_desc in op_desc_list: if isinstance(op_desc, AllGatherOpDesc): # noqa: F401 if var_name not in self.has_allgather.keys(): self.has_allgather[var_name] = [] - if not self.has_allgather[var_name] or op_desc.group not in list( - map(lambda x: x[0], self.has_allgather[var_name])): + if not self.has_allgather[ + var_name + ] or op_desc.group not in list( + map(lambda x: x[0], self.has_allgather[var_name]) + ): if op_desc.is_bool: # for bool data allgather, cast to int64 -> allgather -> cast bool out_cast = Inserter.insert_cast_op( - block, idx, source_tensor, - reshard_op.attr('op_role'), paddle.int64) + block, + idx, + source_tensor, + reshard_op.attr('op_role'), + paddle.int64, + ) tensor_list, idx_offset = Inserter.insert_allgather_op( - block, idx + 1, out_cast, op_desc.group, - reshard_op.attr('op_role')) + block, + idx + 1, + out_cast, + op_desc.group, + reshard_op.attr('op_role'), + ) idx += idx_offset tensor_name_list = [] for var in tensor_list: out_cast = Inserter.insert_cast_op( - block, idx, var, reshard_op.attr('op_role'), - paddle.bool) + block, + idx, + var, + reshard_op.attr('op_role'), + paddle.bool, + ) tensor_name_list.append(out_cast.name) idx += 1 self.has_allgather[var_name].append( - [op_desc.group, tensor_name_list]) + [op_desc.group, tensor_name_list] + ) else: tensor_list, idx_offset = Inserter.insert_allgather_op( - block, idx, source_tensor, op_desc.group, - reshard_op.attr('op_role')) + block, + idx, + source_tensor, + op_desc.group, + reshard_op.attr('op_role'), + ) idx += idx_offset tensor_name_list = [var.name for var in tensor_list] self.has_allgather[var_name].append( - [op_desc.group, tensor_name_list]) + [op_desc.group, tensor_name_list] + ) else: for item in self.has_allgather[var_name]: if op_desc.group == item[0]: tensor_list = [ get_var_with_recursion( - var_name, block, - self.auto_parallel_main_prog) + var_name, + block, + self.auto_parallel_main_prog, + ) for var_name in item[1] ] break - assert tensor_list, "The result of parsing allgather op should not be None." + assert ( + tensor_list + ), "The result of parsing allgather op should not be None." elif isinstance(op_desc, SendOpDesc): if var_name not in self.has_sent.keys(): @@ -1478,16 +1717,30 @@ class Resharder: if op_desc.dst not in self.has_sent[var_name]: if op_desc.is_bool: out_cast = Inserter.insert_cast_op( - block, idx, source_tensor, - reshard_op.attr('op_role'), paddle.int64) - Inserter.insert_send_op(block, idx + 1, out_cast, - op_desc.src, op_desc.dst, - reshard_op.attr('op_role')) + block, + idx, + source_tensor, + reshard_op.attr('op_role'), + paddle.int64, + ) + Inserter.insert_send_op( + block, + idx + 1, + out_cast, + op_desc.src, + op_desc.dst, + reshard_op.attr('op_role'), + ) idx += 2 else: - Inserter.insert_send_op(block, idx, source_tensor, - op_desc.src, op_desc.dst, - reshard_op.attr('op_role')) + Inserter.insert_send_op( + block, + idx, + source_tensor, + op_desc.src, + op_desc.dst, + reshard_op.attr('op_role'), + ) idx += 1 self.has_sent[var_name].append(op_desc.dst) @@ -1506,13 +1759,23 @@ class Resharder: shape=shape, lod_level=source_tensor.lod_level, dtype=paddle.int64, - type=source_tensor.type) - Inserter.insert_recv_op(block, idx, recv_tensor, - op_desc.src, op_desc.dst, - reshard_op.attr('op_role')) + type=source_tensor.type, + ) + Inserter.insert_recv_op( + block, + idx, + recv_tensor, + op_desc.src, + op_desc.dst, + reshard_op.attr('op_role'), + ) out_cast = Inserter.insert_cast_op( - block, idx + 1, recv_tensor, - reshard_op.attr('op_role'), paddle.bool) + block, + idx + 1, + recv_tensor, + reshard_op.attr('op_role'), + paddle.bool, + ) tensor_list.append(out_cast) idx += 2 self.has_recv[var_name][op_desc.src] = out_cast @@ -1522,26 +1785,45 @@ class Resharder: shape=shape, lod_level=source_tensor.lod_level, dtype=source_tensor.dtype, - type=source_tensor.type) - Inserter.insert_recv_op(block, idx, recv_tensor, - op_desc.src, op_desc.dst, - reshard_op.attr('op_role')) + type=source_tensor.type, + ) + Inserter.insert_recv_op( + block, + idx, + recv_tensor, + op_desc.src, + op_desc.dst, + reshard_op.attr('op_role'), + ) # for lod tensor, need reset lod after received if recv_tensor.lod_level != 0: set_lod = False # use data lod to reset tensor lod - for tmp_block in self.auto_parallel_main_prog.blocks: + for ( + tmp_block + ) in self.auto_parallel_main_prog.blocks: for tmp_var_name in tmp_block.vars: tmp_var = tmp_block.vars[tmp_var_name] - if tmp_var.is_data and tmp_var.lod_level == recv_tensor.lod_level: - reset_lod_out = Inserter.insert_reset_lod_op( - block, idx + 1, recv_tensor, - tmp_var, reshard_op.attr('op_role')) + if ( + tmp_var.is_data + and tmp_var.lod_level + == recv_tensor.lod_level + ): + reset_lod_out = ( + Inserter.insert_reset_lod_op( + block, + idx + 1, + recv_tensor, + tmp_var, + reshard_op.attr('op_role'), + ) + ) tensor_list.append(reset_lod_out) idx += 2 self.has_recv[var_name][ - op_desc.src] = reset_lod_out + op_desc.src + ] = reset_lod_out set_lod = True break if set_lod: @@ -1559,16 +1841,24 @@ class Resharder: idx_list = [idx] for index, tensor in enumerate(tensor_list): Inserter.concat_partitions_with_op( - partition_tensor_list, tensor, - partition_index_list[index], block, idx_list, - reshard_op.attr('op_role')) + partition_tensor_list, + tensor, + partition_index_list[index], + block, + idx_list, + reshard_op.attr('op_role'), + ) idx = idx_list[0] elif isinstance(op_desc, SliceOpDesc): - assert len( - partition_tensor_list) == 1 or not partition_tensor_list - to_slice_tensor = partition_tensor_list[0][0] if len( - partition_tensor_list) == 1 else source_tensor + assert ( + len(partition_tensor_list) == 1 or not partition_tensor_list + ) + to_slice_tensor = ( + partition_tensor_list[0][0] + if len(partition_tensor_list) == 1 + else source_tensor + ) new_name = unique_name.generate(var_name + "@RESHARD") target_tensor = Inserter.insert_slice_op( block, @@ -1578,7 +1868,8 @@ class Resharder: ends=op_desc.ends, axes=op_desc.axes, new_var_name=new_name, - op_role=reshard_op.attr('op_role')) + op_role=reshard_op.attr('op_role'), + ) process_mesh = dist_attr[0] dims_mapping = dist_attr[1] @@ -1587,83 +1878,119 @@ class Resharder: tensor_attr.dims_mapping = dims_mapping tensor_attr.process_mesh = process_mesh self.dist_context.set_tensor_dist_attr_for_program( - target_tensor, tensor_attr) + target_tensor, tensor_attr + ) if matched_op.type == "while": # var_reshard_mapping means the while op input need be changed to - if "var_reshard_mapping" not in Resharder.while_block_info[ - op.attr("sub_block").id].keys(): - Resharder.while_block_info[op.attr( - "sub_block").id]["var_reshard_mapping"] = {} - if var_name not in Resharder.while_block_info[op.attr( - "sub_block").id]["var_reshard_mapping"].keys(): + if ( + "var_reshard_mapping" + not in Resharder.while_block_info[ + op.attr("sub_block").id + ].keys() + ): Resharder.while_block_info[op.attr("sub_block").id][ - "var_reshard_mapping"][var_name] = [] + "var_reshard_mapping" + ] = {} + if ( + var_name + not in Resharder.while_block_info[ + op.attr("sub_block").id + ]["var_reshard_mapping"].keys() + ): + Resharder.while_block_info[op.attr("sub_block").id][ + "var_reshard_mapping" + ][var_name] = [] Resharder.while_block_info[op.attr("sub_block").id][ - "var_reshard_mapping"][var_name].append( - [dist_attr, target_tensor.name]) + "var_reshard_mapping" + ][var_name].append([dist_attr, target_tensor.name]) # rename op input name according to new name for op in block.ops: # just for while op while_op_X_append = [] for name in op.input_arg_names: - op_dist_attr = self.dist_context.get_op_dist_attr_for_program( - op) + op_dist_attr = ( + self.dist_context.get_op_dist_attr_for_program(op) + ) if name == var_name and op_dist_attr is not None: if op.desc.id() == matched_op.desc.id(): if matched_op.type == "while": old_name = name new_name = target_tensor.name assert old_name != new_name - op_input_dist_attr = op_dist_attr.get_input_dist_attr( - old_name) + op_input_dist_attr = ( + op_dist_attr.get_input_dist_attr( + old_name + ) + ) op_dist_attr.set_input_dist_attr( - new_name, op_input_dist_attr) + new_name, op_input_dist_attr + ) op_dist_attr.set_input_dims_mapping( - new_name, dims_mapping) - if old_name in op_dist_attr._inputs_dist_attrs: + new_name, dims_mapping + ) + if ( + old_name + in op_dist_attr._inputs_dist_attrs + ): op_dist_attr.del_input_dist_attr( - old_name) + old_name + ) while_op_X_append.append(new_name) continue else: op.desc._rename_input( - name, target_tensor.name) + name, target_tensor.name + ) old_name = name new_name = target_tensor.name assert old_name != new_name - op_input_dist_attr = op_dist_attr.get_input_dist_attr( - old_name) + op_input_dist_attr = ( + op_dist_attr.get_input_dist_attr( + old_name + ) + ) op_dist_attr.set_input_dist_attr( - new_name, op_input_dist_attr) + new_name, op_input_dist_attr + ) op_dist_attr.set_input_dims_mapping( - new_name, dims_mapping) + new_name, dims_mapping + ) op_dist_attr.del_input_dist_attr(old_name) continue op_process_mesh = op_dist_attr.process_mesh - op_input_dims_mapping = op_dist_attr.get_input_dims_mapping( - var_name) + op_input_dims_mapping = ( + op_dist_attr.get_input_dims_mapping(var_name) + ) # NOTE: For op whose process mesh is a union, its input will not be renamed by other op reshard result now which means that it will have more reshard operation. - if op_process_mesh == process_mesh and op_input_dims_mapping == dims_mapping: + if ( + op_process_mesh == process_mesh + and op_input_dims_mapping == dims_mapping + ): op.desc._rename_input(name, target_tensor.name) old_name = name new_name = target_tensor.name assert old_name != new_name - op_input_dist_attr = op_dist_attr.get_input_dist_attr( - old_name) + op_input_dist_attr = ( + op_dist_attr.get_input_dist_attr(old_name) + ) op_dist_attr.set_input_dist_attr( - new_name, op_input_dist_attr) + new_name, op_input_dist_attr + ) op_dist_attr.set_input_dims_mapping( - new_name, dims_mapping) + new_name, dims_mapping + ) op_dist_attr.del_input_dist_attr(old_name) # for while op, the input X should reset if while_op_X_append: proto = OpProtoHolder.instance().get_op_proto(op.type) - op.desc.set_input(proto.inputs[0].name, - op.input("X") + while_op_X_append) + op.desc.set_input( + proto.inputs[0].name, + op.input("X") + while_op_X_append, + ) def _get_subblock_input_attrs(self, op, var_name): # NOTE: Multi while loop is not supported @@ -1681,11 +2008,14 @@ class Resharder: if name == var_name: process_mesh = dist_attr.process_mesh input_dims_mapping = dist_attr.get_input_dims_mapping( - var_name) + var_name + ) has_exist = False for input_attr in input_attrs: - if process_mesh == input_attr[ - 0] and input_dims_mapping == input_attr[1]: + if ( + process_mesh == input_attr[0] + and input_dims_mapping == input_attr[1] + ): has_exist = True break if not has_exist: @@ -1698,10 +2028,9 @@ class Resharder: dist_attr = dist_op.dist_attr op_process_mesh = dist_attr.process_mesh for process_mesh in self.dist_context.process_meshes: - if set(process_mesh.processes) & (set( - op_process_mesh.processes)) and len( - process_mesh.processes) < len( - op_process_mesh.processes): + if set(process_mesh.processes) & ( + set(op_process_mesh.processes) + ) and len(process_mesh.processes) < len(op_process_mesh.processes): process_meshes.append(process_mesh) # it means that the process mesh is not a union when process meshes is none @@ -1737,7 +2066,8 @@ class Resharder: for process in process_mesh.processes: processes.add(process) for idx, process_mesh in enumerate( - self.dist_context.process_meshes): + self.dist_context.process_meshes + ): if len(set(process_mesh.processes)) == len(processes): global_process_mesh_idx = idx break @@ -1757,7 +2087,8 @@ class Resharder: def _change_subblock_op_input_and_output(self, block_idx, block): if "var_reshard_mapping" in Resharder.while_block_info[block_idx]: var_reshard_mapping = Resharder.while_block_info[block_idx][ - "var_reshard_mapping"] + "var_reshard_mapping" + ] for op in block.ops: for var_name in op.input_arg_names: if var_name in var_reshard_mapping: @@ -1766,9 +2097,11 @@ class Resharder: dist_attr = dist_op.dist_attr target_name = None for item in var_reshard_mapping[var_name]: - if dist_attr.process_mesh == item[0][ - 0] and dist_attr.get_input_dims_mapping( - var_name) == item[0][1]: + if ( + dist_attr.process_mesh == item[0][0] + and dist_attr.get_input_dims_mapping(var_name) + == item[0][1] + ): target_name = item[1] break if target_name is None: @@ -1776,15 +2109,18 @@ class Resharder: else: op.desc._rename_input(var_name, target_name) dist_op = self.dist_context.get_dist_op_for_program( - op) + op + ) op_dist_attr = dist_op.dist_attr old_name = var_name new_name = target_name assert old_name != new_name - op_input_dist_attr = op_dist_attr.get_input_dist_attr( - old_name) + op_input_dist_attr = ( + op_dist_attr.get_input_dist_attr(old_name) + ) op_dist_attr.set_input_dist_attr( - new_name, op_input_dist_attr) + new_name, op_input_dist_attr + ) op_dist_attr.del_input_dist_attr(old_name) # the outputs also need to be renamed when the output name is the same with input name in inplace op @@ -1804,9 +2140,11 @@ class Resharder: new_name = target_name assert old_name != new_name op_output_dist_attr = op_dist_attr.get_output_dist_attr( - old_name) + old_name + ) op_dist_attr.set_output_dist_attr( - new_name, op_output_dist_attr) + new_name, op_output_dist_attr + ) op_dist_attr.del_output_dist_attr(old_name) def _reshard_input(self, block): @@ -1821,18 +2159,22 @@ class Resharder: dist_op = self.dist_context.get_dist_op_for_program(op) if dist_op is not None: - op_input_dist_attrs = [ - ] # [(op_process_mesh, op_input_dims_mapping), (op_process_mesh, op_input_dims_mapping)] + op_input_dist_attrs = ( + [] + ) # [(op_process_mesh, op_input_dims_mapping), (op_process_mesh, op_input_dims_mapping)] if op.type in _g_subblock_ops: if not self.is_condition_replicative(op): raise ValueError( "Please check the condition due to the dims mapping is not replicative." ) - if op.attr( - "sub_block").id not in Resharder.while_block_info: + if ( + op.attr("sub_block").id + not in Resharder.while_block_info + ): Resharder.while_block_info[op.attr("sub_block").id] = {} - Resharder.while_block_info[op.attr( - "sub_block").id]["op_id"] = op.desc.id() + Resharder.while_block_info[op.attr("sub_block").id][ + "op_id" + ] = op.desc.id() if op.type == "while": # condition var process mesh is the same with op and dims_mapping is replicative, so it do not need reshard @@ -1849,17 +2191,24 @@ class Resharder: # skip lod_tensor_blocking_queue_? name if "lod_tensor_blocking_queue" in var_name: continue - var = get_var_with_recursion(var_name, block, - self.auto_parallel_main_prog) + var = get_var_with_recursion( + var_name, block, self.auto_parallel_main_prog + ) dist_tensor = self.dist_context.get_dist_tensor_for_program( - var) + var + ) # judge whether union tensor dims_mapping all -1 is_union_process_mesh_tensor = False - if dist_tensor.dist_attr.process_mesh not in self.dist_context.process_meshes and self.dist_context.process_meshes: + if ( + dist_tensor.dist_attr.process_mesh + not in self.dist_context.process_meshes + and self.dist_context.process_meshes + ): is_union_process_mesh_tensor = True assert dist_tensor.dist_attr.dims_mapping.count( - -1) == len(dist_tensor.dist_attr.dims_mapping) + -1 + ) == len(dist_tensor.dist_attr.dims_mapping) op_input_attrs = self.get_op_input_attrs(op, var_name) for input_attr in op_input_attrs: @@ -1869,18 +2218,23 @@ class Resharder: if is_union_process_mesh_tensor: # if op process mesh is subset of union tensor process mesh, need no reshard if set(input_attr[0].processes) <= set( - dist_tensor.dist_attr.process_mesh.processes + dist_tensor.dist_attr.process_mesh.processes ): continue if dist_tensor is not None and self.need_reshard( - dist_tensor, input_attr): + dist_tensor, input_attr + ): reshard_op_desc = self.find_op_desc_seq( - dist_tensor, input_attr) - self.parse_op_desc(block, reshard_op_desc, var_name, - op, input_attr) + dist_tensor, input_attr + ) + self.parse_op_desc( + block, reshard_op_desc, var_name, op, input_attr + ) cur_op_count = len(block.ops) - idx_offset = idx_offset + cur_op_count - pre_op_count + idx_offset = ( + idx_offset + cur_op_count - pre_op_count + ) pre_op_count = cur_op_count idx = idx + idx_offset + 1 else: @@ -1895,34 +2249,43 @@ class Resharder: shape=var.shape, lod_level=var.lod_level, dtype=paddle.int64, - type=var.type) - Inserter.insert_recv_op(block, idx + 1, - recv_cast_out, send_rank, recv_rank, - op.attr('op_role')) + type=var.type, + ) + Inserter.insert_recv_op( + block, + idx + 1, + recv_cast_out, + send_rank, + recv_rank, + op.attr('op_role'), + ) reset_lod_out = None if var.lod_level != 0: set_lod = False for tmp_block in self.auto_parallel_main_prog.blocks: for tmp_var_name in tmp_block.vars: tmp_var = tmp_block.vars[tmp_var_name] - if tmp_var.is_data and tmp_var.lod_level == var.lod_level: + if ( + tmp_var.is_data + and tmp_var.lod_level == var.lod_level + ): reset_lod_out = block.create_var( - name=unique_name.generate(var.name + - "@RESETLOD"), + name=unique_name.generate( + var.name + "@RESETLOD" + ), shape=recv_cast_out.shape, type=recv_cast_out.type, dtype=recv_cast_out.dtype, - lod_level=recv_cast_out.lod_level) + lod_level=recv_cast_out.lod_level, + ) idx += 1 block._insert_op( idx, type="lod_reset", - inputs={ - 'X': recv_cast_out, - 'Y': tmp_var - }, + inputs={'X': recv_cast_out, 'Y': tmp_var}, outputs={'Out': reset_lod_out}, - attrs={'op_role': op.attr("op_role")}) + attrs={'op_role': op.attr("op_role")}, + ) set_lod = True break if set_lod: @@ -1930,18 +2293,21 @@ class Resharder: assert set_lod is True # cast int64 to bool - block._insert_op(idx + 2, - type='cast', - inputs={ - 'X': [recv_cast_out] if - reset_lod_out is None else [reset_lod_out] - }, - outputs={'Out': [var]}, - attrs={ - 'in_dtype': recv_cast_out.dtype, - 'out_dtype': var.dtype, - 'op_role': op.attr('op_role') - }) + block._insert_op( + idx + 2, + type='cast', + inputs={ + 'X': [recv_cast_out] + if reset_lod_out is None + else [reset_lod_out] + }, + outputs={'Out': [var]}, + attrs={ + 'in_dtype': recv_cast_out.dtype, + 'out_dtype': var.dtype, + 'op_role': op.attr('op_role'), + }, + ) else: if var.lod_level != 0: recv_out = block.create_var( @@ -1949,50 +2315,75 @@ class Resharder: shape=var.shape, lod_level=var.lod_level, dtype=var.int64, - type=var.type) - Inserter.insert_recv_op(block, idx + 1, recv_out, send_rank, - recv_rank, op.attr('op_role')) + type=var.type, + ) + Inserter.insert_recv_op( + block, + idx + 1, + recv_out, + send_rank, + recv_rank, + op.attr('op_role'), + ) set_lod = False for tmp_block in self.auto_parallel_main_prog.blocks: for tmp_var_name in tmp_block.vars: tmp_var = tmp_block.vars[tmp_var_name] - if tmp_var.is_data and tmp_var.lod_level == var.lod_level: + if ( + tmp_var.is_data + and tmp_var.lod_level == var.lod_level + ): idx += 1 block._insert_op( idx, type="lod_reset", - inputs={ - 'X': recv_out, - 'Y': tmp_var - }, + inputs={'X': recv_out, 'Y': tmp_var}, outputs={'Out': var}, - attrs={'op_role': op.attr("op_role")}) + attrs={'op_role': op.attr("op_role")}, + ) set_lod = True break if set_lod: break assert set_lod is True else: - Inserter.insert_recv_op(block, idx + 1, var, send_rank, - recv_rank, op.attr('op_role')) + Inserter.insert_recv_op( + block, + idx + 1, + var, + send_rank, + recv_rank, + op.attr('op_role'), + ) def _handle_send(self, block, idx, var, op, send_rank, recv_rank): if var.dtype == paddle.bool: - cast_out = Inserter.insert_cast_op(block, idx + 1, var, - op.attr('op_role'), paddle.int64) - Inserter.insert_send_op(block, idx + 2, cast_out, send_rank, - recv_rank, op.attr('op_role')) + cast_out = Inserter.insert_cast_op( + block, idx + 1, var, op.attr('op_role'), paddle.int64 + ) + Inserter.insert_send_op( + block, + idx + 2, + cast_out, + send_rank, + recv_rank, + op.attr('op_role'), + ) else: - Inserter.insert_send_op(block, idx + 1, var, send_rank, recv_rank, - op.attr('op_role')) + Inserter.insert_send_op( + block, idx + 1, var, send_rank, recv_rank, op.attr('op_role') + ) def _reshard_output(self, block): # insert send and recv op if output process mesh is different from tensor process mesh idx = 0 # skip reader and ops whose process mesh is union skip_ops = [ - "create_py_reader", "create_double_buffer_reader", "read", - "write_to_array", "read_from_array" + "create_py_reader", + "create_double_buffer_reader", + "read", + "write_to_array", + "read_from_array", ] global _g_special_ops skip_ops += _g_special_ops @@ -2004,59 +2395,86 @@ class Resharder: if dist_op is not None and op.type not in skip_ops: idx_offset = 0 for var_name in op.output_arg_names: - var = get_var_with_recursion(var_name, block, - self.auto_parallel_main_prog) + var = get_var_with_recursion( + var_name, block, self.auto_parallel_main_prog + ) dist_tensor = self.dist_context.get_dist_tensor_for_program( - var) + var + ) tensor_process_mesh = dist_tensor.dist_attr.process_mesh output_attr = [ dist_op.dist_attr.process_mesh, - dist_op.dist_attr.get_output_dims_mapping(var_name) + dist_op.dist_attr.get_output_dims_mapping(var_name), ] if dist_tensor is not None and self.need_reshard( - dist_tensor, output_attr, False): + dist_tensor, output_attr, False + ): tensor_processes = set( - tensor_process_mesh.processes) - ( - set(tensor_process_mesh.processes) - & set(output_attr[0].processes)) + tensor_process_mesh.processes + ) - ( + set(tensor_process_mesh.processes) + & set(output_attr[0].processes) + ) if tensor_processes: if len(tensor_processes) != len( - output_attr[0].processes): + output_attr[0].processes + ): if dist_tensor.dist_attr.dims_mapping.count( - -1) != len( - dist_tensor.dist_attr.dims_mapping - ) or output_attr[1].count(-1) != len( - output_attr[1]): + -1 + ) != len( + dist_tensor.dist_attr.dims_mapping + ) or output_attr[ + 1 + ].count( + -1 + ) != len( + output_attr[1] + ): raise ValueError( - "The dims_mapping must be -1") + "The dims_mapping must be -1" + ) else: for index, tensor_process in enumerate( - tensor_processes): + tensor_processes + ): recv_rank = tensor_process actual_index = index if index >= len( - output_attr[0].processes): + output_attr[0].processes + ): actual_index = ( - index - - len(output_attr[0].processes) + index + - len(output_attr[0].processes) ) % len(output_attr[0].processes) item = output_attr[0].processes[ - actual_index] + actual_index + ] if recv_rank == item: continue if self.rank_id == item: # if send bool data, cast then send self._handle_send( - block, idx, var, op, item, - recv_rank) + block, + idx, + var, + op, + item, + recv_rank, + ) if self.rank_id == recv_rank: # if recv bool data, recv then cast self._hadnle_recv( - block, idx, var, op, item, - recv_rank) + block, + idx, + var, + op, + item, + recv_rank, + ) else: for index, tensor_process in enumerate( - tensor_processes): + tensor_processes + ): recv_rank = tensor_process item = output_attr[0].processes[index] if recv_rank == item: @@ -2064,16 +2482,18 @@ class Resharder: if self.rank_id == item: # if send bool data, cast then send self._handle_send( - block, idx, var, op, item, - recv_rank) + block, idx, var, op, item, recv_rank + ) if self.rank_id == recv_rank: # if recv bool data, recv then cast self._hadnle_recv( - block, idx, var, op, item, - recv_rank) + block, idx, var, op, item, recv_rank + ) cur_op_count = len(block.ops) - idx_offset = idx_offset + cur_op_count - pre_op_count + idx_offset = ( + idx_offset + cur_op_count - pre_op_count + ) pre_op_count = cur_op_count idx = idx + idx_offset + 1 @@ -2095,13 +2515,17 @@ class Resharder: self._reshard_output(block) # remove no need vars and ops in the main program - Remover.remove_no_need_in_main(self.auto_parallel_main_prog, - self.dist_context, self.rank_id, - self.dist_params_grads) + Remover.remove_no_need_in_main( + self.auto_parallel_main_prog, + self.dist_context, + self.rank_id, + self.dist_params_grads, + ) # remove no need vars and ops in the startip program - Remover.remove_no_need_in_startup(self.auto_parallel_main_prog, - self.auto_parallel_startup_prog) + Remover.remove_no_need_in_startup( + self.auto_parallel_main_prog, self.auto_parallel_startup_prog + ) # reset some variable when remove operation ended Resharder.while_block_info = {} @@ -2119,47 +2543,68 @@ class Resharder: return reshard_op_cost else: dist_tensor = self.dist_context.get_dist_tensor_for_program( - tensor) + tensor + ) # simplified processing: ignore union process mesh and output reshard dist_op = self.dist_context.get_dist_op_for_program(op) dims_mapping = dist_op.dist_attr.get_input_dims_mapping( - tensor.name) + tensor.name + ) process_mesh = dist_op.dist_attr.process_mesh dist_attr = [process_mesh, dims_mapping] if dist_tensor is not None and self.need_reshard( - dist_tensor, dist_attr): + dist_tensor, dist_attr + ): if tensor_name not in self._has_resharded: self._has_resharded[tensor_name] = [dist_op] else: for item in self._has_resharded[tensor_name]: item_dist_attr = item.dist_attr - item_dims_mapping = item_dist_attr.get_input_dims_mapping( - tensor_name) + item_dims_mapping = ( + item_dist_attr.get_input_dims_mapping( + tensor_name + ) + ) item_process_mesh = item_dist_attr.process_mesh - if dims_mapping == item_dims_mapping and item_process_mesh == process_mesh: + if ( + dims_mapping == item_dims_mapping + and item_process_mesh == process_mesh + ): return reshard_op_cost self._has_resharded[tensor_name].append(dist_op) - reshard_op_desc = self.find_op_desc_seq(dist_tensor, - dist_attr, - serial=True) + reshard_op_desc = self.find_op_desc_seq( + dist_tensor, dist_attr, serial=True + ) dtype = dist_tensor.serial_tensor.dtype reshard_op_cost = self.parse_op_desc_for_cost( - reshard_op_desc, dtype, cluster) + reshard_op_desc, dtype, cluster + ) return reshard_op_cost - def _concat_partitions_for_cost(self, partition_tensor_list, - partition_index, dtype, rank_id, - local_rank_comp_cost, cluster): + def _concat_partitions_for_cost( + self, + partition_tensor_list, + partition_index, + dtype, + rank_id, + local_rank_comp_cost, + cluster, + ): if not partition_tensor_list: partition_tensor_list.append(partition_index) else: i = 0 has_concat = False while i < len(partition_tensor_list): - concat_axis, first_order, new_partition = Resharder.compute_concat_info( - partition_tensor_list[i], partition_index) + ( + concat_axis, + first_order, + new_partition, + ) = Resharder.compute_concat_info( + partition_tensor_list[i], partition_index + ) if concat_axis != -1: has_concat = True concat_desc = {} @@ -2167,31 +2612,38 @@ class Resharder: concat_desc["attrs"] = {"axis": concat_axis} if first_order == 0: concat_desc["inputs"] = { - "X": [(dtype, partition_tensor_list[i]), - (dtype, partition_index)] + "X": [ + (dtype, partition_tensor_list[i]), + (dtype, partition_index), + ] } else: concat_desc["inputs"] = { - "X": [(dtype, partition_index), - (dtype, partition_tensor_list[i])] + "X": [ + (dtype, partition_index), + (dtype, partition_tensor_list[i]), + ] } partition_tensor_list.pop(i) if rank_id not in local_rank_comp_cost: local_rank_comp_cost[rank_id] = [] local_rank_comp_cost[rank_id].append( - ConcatOpCost(op_desc=concat_desc, cluster=cluster)) - self._concat_partitions_for_cost(partition_tensor_list, - new_partition, dtype, - rank_id, - local_rank_comp_cost, - cluster) + ConcatOpCost(op_desc=concat_desc, cluster=cluster) + ) + self._concat_partitions_for_cost( + partition_tensor_list, + new_partition, + dtype, + rank_id, + local_rank_comp_cost, + cluster, + ) break i += 1 if not has_concat: partition_tensor_list.append(partition_index) def parse_op_desc_for_cost(self, reshard_op_desc, dtype, cluster): - def _get_idx(comm_ranks, group_ranks): res, is_the_same = None, False idx = 0 @@ -2222,28 +2674,41 @@ class Resharder: if isinstance(op_desc, SendOpDesc): group_ranks = [key, op_desc.dst] shape = op_desc.shape - send_desc = build_comm_desc("send_v2", group_ranks, dtype, - shape) + send_desc = build_comm_desc( + "send_v2", group_ranks, dtype, shape + ) idx, is_the_same = _get_idx(comm_ranks, group_ranks) if idx is None: - comm_costs.append([ - (group_ranks, - SendOpCost(op_desc=send_desc, - comm_context=comm_context)) - ]) + comm_costs.append( + [ + ( + group_ranks, + SendOpCost( + op_desc=send_desc, + comm_context=comm_context, + ), + ) + ] + ) comm_ranks.append(set(group_ranks)) else: if not is_the_same: comm_costs[idx].append( - (group_ranks, - SendOpCost(op_desc=send_desc, - comm_context=comm_context))) + ( + group_ranks, + SendOpCost( + op_desc=send_desc, + comm_context=comm_context, + ), + ) + ) elif isinstance(op_desc, AllGatherOpDesc): # NOTE: fill_const and other unnecessary op is not calculated because those cost is very small group_ranks = op_desc.group shape = op_desc.shape - allgather_desc = build_comm_desc("c_allgather", group_ranks, - dtype, shape) + allgather_desc = build_comm_desc( + "c_allgather", group_ranks, dtype, shape + ) split_inputs_shape = [] for idx, dim in enumerate(shape): if idx == 0: @@ -2252,18 +2717,29 @@ class Resharder: split_inputs_shape.append(dim) idx, is_the_same = _get_idx(comm_ranks, group_ranks) if idx is None: - comm_costs.append([ - (group_ranks, - AllgatherOpCost(op_desc=allgather_desc, - comm_context=comm_context)) - ]) + comm_costs.append( + [ + ( + group_ranks, + AllgatherOpCost( + op_desc=allgather_desc, + comm_context=comm_context, + ), + ) + ] + ) comm_ranks.append(set(group_ranks)) else: if not is_the_same: comm_costs[idx].append( - (group_ranks, - AllgatherOpCost(op_desc=allgather_desc, - comm_context=comm_context))) + ( + group_ranks, + AllgatherOpCost( + op_desc=allgather_desc, + comm_context=comm_context, + ), + ) + ) # calc the split op cost if key not in local_rank_comp_cost: local_rank_comp_cost[key] = [] @@ -2274,19 +2750,27 @@ class Resharder: } split_desc["attrs"] = {"num": len(group_ranks), "axis": 0} local_rank_comp_cost[key].append( - SplitOpCost(op_desc=split_desc, cluster=cluster)) + SplitOpCost(op_desc=split_desc, cluster=cluster) + ) elif isinstance(op_desc, ConcatOpDesc): partition_index_list = op_desc._partition_index_list for idx, partion_idex in enumerate(partition_index_list): self._concat_partitions_for_cost( - partition_tensor_list, partion_idex, dtype, key, - local_rank_comp_cost, cluster) + partition_tensor_list, + partion_idex, + dtype, + key, + local_rank_comp_cost, + cluster, + ) elif isinstance(op_desc, SliceOpDesc): if key not in local_rank_comp_cost: local_rank_comp_cost[key] = [] - assert len( - partition_tensor_list) == 1 or not partition_tensor_list + assert ( + len(partition_tensor_list) == 1 + or not partition_tensor_list + ) to_slice_tensor_shape = [] if len(partition_tensor_list) == 1: for item in partition_tensor_list[0]: @@ -2300,13 +2784,14 @@ class Resharder: "axes": op_desc.axes, "starts": op_desc.starts, "ends": op_desc.ends, - "infer_flags": infer_flags + "infer_flags": infer_flags, } slice_desc["inputs"] = { "Input": [(dtype, to_slice_tensor_shape)] } local_rank_comp_cost[key].append( - SliceOpCost(op_desc=slice_desc, cluster=cluster)) + SliceOpCost(op_desc=slice_desc, cluster=cluster) + ) res = (comm_costs, local_rank_comp_cost) diff --git a/python/paddle/distributed/auto_parallel/strategy.py b/python/paddle/distributed/auto_parallel/strategy.py index a504bbfed29b4b1cf809eae58eb32e991527ab7d..2440472badad3e60e82e0a0af70792232387af62 100644 --- a/python/paddle/distributed/auto_parallel/strategy.py +++ b/python/paddle/distributed/auto_parallel/strategy.py @@ -17,7 +17,6 @@ from . import constants class BaseConfig(object): - def __init__(self, category, config_dict=None): self._category = category self._config_dict = None @@ -27,7 +26,9 @@ class BaseConfig(object): else: raise ValueError( "Expected a dictionary. But received: {}".format( - config_dict)) + config_dict + ) + ) # Initialize attributes by the default config config = constants.get_category_default_config(self._category) for field, default_value in config.items(): @@ -73,49 +74,42 @@ class BaseConfig(object): class RecomputeConfig(BaseConfig): - def __init__(self, config_dict=None): category = constants.RECOMPUTE super(RecomputeConfig, self).__init__(category, config_dict) class AMPConfig(BaseConfig): - def __init__(self, config_dict=None): category = constants.AMP super(AMPConfig, self).__init__(category, config_dict) class ShardingConfig(BaseConfig): - def __init__(self, config_dict=None): category = constants.SHARDING super(ShardingConfig, self).__init__(category, config_dict) class GradientMergeConfig(BaseConfig): - def __init__(self, config_dict=None): category = constants.GRADIENT_MERGE super(GradientMergeConfig, self).__init__(category, config_dict) class QATConfig(BaseConfig): - def __init__(self, config_dict=None): category = constants.QAT super(QATConfig, self).__init__(category, config_dict) class TuningConfig(BaseConfig): - def __init__(self, config_dict=None): category = constants.TUNING super(TuningConfig, self).__init__(category, config_dict) class DatasetConfig(BaseConfig): - def __init__(self, config_dict=None): category = constants.DATASET super(DatasetConfig, self).__init__(category, config_dict) @@ -161,7 +155,8 @@ class Strategy(BaseConfig): # self._config_dict = yaml.load(yaml_file, Loader=yaml.Loader) else: raise ValueError( - "Expected a dictionary. But received: {}".format(config)) + "Expected a dictionary. But received: {}".format(config) + ) else: self._config_dict = {} diff --git a/python/paddle/distributed/auto_parallel/tuner/algorithms.py b/python/paddle/distributed/auto_parallel/tuner/algorithms.py index f892a7838fe7a53b0af45f761ec013bab4637861..efc3358ebe41aa6e62c3d46956c1ffa493dcf311 100644 --- a/python/paddle/distributed/auto_parallel/tuner/algorithms.py +++ b/python/paddle/distributed/auto_parallel/tuner/algorithms.py @@ -31,6 +31,7 @@ class AlgorithmBase(ABC): In another word, the key "algorithm" for this class is the search space pruning rules specific for the given optimization scenario. """ + _REGISTERED_ALGORITHMS = {} name = None @@ -88,7 +89,6 @@ class AlgorithmBase(ABC): def register_algor(name): - def impl(cls): AlgorithmBase._register(name, cls) cls.name = name @@ -121,7 +121,8 @@ class ShardingStageAlgorithm(AlgorithmBase): assert set(stage_range).issubset( set([0, 1, 2, 3]) ), "Sharding Stage should belong into range within 0 - 3 but got {}.".format( - stage_range) + stage_range + ) stage_range.sort(reverse=True) else: stage_range = list(range(self._max_stage + 1)).sort(reverse=True) diff --git a/python/paddle/distributed/auto_parallel/tuner/config.py b/python/paddle/distributed/auto_parallel/tuner/config.py index 9073013127ab1e84f5a60c30b97d53165057fcb9..4ed6340eccb6f25e6c5e6c0921e517c7adc4d336 100644 --- a/python/paddle/distributed/auto_parallel/tuner/config.py +++ b/python/paddle/distributed/auto_parallel/tuner/config.py @@ -109,8 +109,10 @@ class TuningConfig(object): self._project_dir = project_dir for p in _tuning_supported_passes: - if getattr(self._dist_strategy, p) and _get_pass_config( - self._dist_strategy, p).enable_tuning: + if ( + getattr(self._dist_strategy, p) + and _get_pass_config(self._dist_strategy, p).enable_tuning + ): # TODO distinguish different args of each passes self._tuning_passes_name.add(p) diff --git a/python/paddle/distributed/auto_parallel/tuner/optimization_tuner.py b/python/paddle/distributed/auto_parallel/tuner/optimization_tuner.py index 518e4fda1187d271b3fccd10e5f5d8bae8e8fc34..a5cdbc7f95a912fd84a31c0c7040ea74b167a513 100644 --- a/python/paddle/distributed/auto_parallel/tuner/optimization_tuner.py +++ b/python/paddle/distributed/auto_parallel/tuner/optimization_tuner.py @@ -34,7 +34,10 @@ from paddle.distributed.auto_parallel.dist_context import DistributedContext from paddle.distributed.auto_parallel.completion import Completer from paddle.distributed.auto_parallel.reshard import Resharder from paddle.distributed.auto_parallel.partitioner import Partitioner -from paddle.distributed.auto_parallel.process_group import clear_all_process_groups, get_all_process_groups +from paddle.distributed.auto_parallel.process_group import ( + clear_all_process_groups, + get_all_process_groups, +) from paddle.distributed.auto_parallel.utils import debug_program from paddle.distributed.auto_parallel.utils import set_grad_var_shape @@ -82,9 +85,8 @@ def parse_process_groups(): def get_metric(results): assert isinstance( - results, - dict), "results should be type of dictionary, but got {}.".format( - type(results)) + results, dict + ), "results should be type of dictionary, but got {}.".format(type(results)) if 'Throughtput' in results and isinstance(results['Throughtput'], float): return float(results['Throughtput']) else: @@ -108,20 +110,25 @@ def _copy_context(ref_dist_context): clear_all_process_groups() new_dist_context = DistributedContext() - new_dist_context._serial_main_program = ref_dist_context.serial_main_program.clone( - for_test=False) - new_dist_context._serial_startup_program = ref_dist_context.serial_startup_program.clone( - for_test=False) + new_dist_context._serial_main_program = ( + ref_dist_context.serial_main_program.clone(for_test=False) + ) + new_dist_context._serial_startup_program = ( + ref_dist_context.serial_startup_program.clone(for_test=False) + ) # mapping variable into new dist context if getattr(ref_dist_context, '_params_grads', None): new_dist_context._params_grads = _get_new_params_grads( new_dist_context.serial_main_program, ref_dist_context.serial_main_program, - ref_dist_context._params_grads) + ref_dist_context._params_grads, + ) new_dist_context._serial_loss = _get_new_loss( new_dist_context.serial_main_program, - ref_dist_context.serial_main_program, ref_dist_context.serial_loss) + ref_dist_context.serial_main_program, + ref_dist_context.serial_loss, + ) for key, var_list in ref_dist_context._serial_feed_vars.items(): new_var_list = [] @@ -129,7 +136,8 @@ def _copy_context(ref_dist_context): block_idx = var.block.idx var_name = var.name var = new_dist_context._serial_main_program.blocks[ - block_idx]._var_recursive(var_name) + block_idx + ]._var_recursive(var_name) new_var_list.append(var) new_dist_context._serial_feed_vars[key] = new_var_list @@ -143,7 +151,8 @@ def _copy_context(ref_dist_context): block_idx = var.block.idx var_name = var.name var = new_dist_context._serial_main_program.blocks[ - block_idx]._var_recursive(var_name) + block_idx + ]._var_recursive(var_name) new_inner_var_list.append(var) new_var_list.append(new_inner_var_list) else: @@ -151,21 +160,26 @@ def _copy_context(ref_dist_context): block_idx = var.block.idx var_name = var.name var = new_dist_context._serial_main_program.blocks[ - block_idx]._var_recursive(var_name) + block_idx + ]._var_recursive(var_name) new_var_list.append(var) new_dist_context._serial_fetch_vars[key] = new_var_list # copy information in forward and backward new_dist_context._serial_optimizer = copy.deepcopy( - ref_dist_context.serial_optimizer) + ref_dist_context.serial_optimizer + ) new_dist_context._dist_tensors_for_program = copy.deepcopy( - ref_dist_context._dist_tensors_for_program) + ref_dist_context._dist_tensors_for_program + ) new_dist_context._dist_ops_for_program = copy.deepcopy( - ref_dist_context._dist_ops_for_program) + ref_dist_context._dist_ops_for_program + ) for pm in ref_dist_context.process_meshes: new_dist_context.add_process_mesh(pm) new_dist_context._dist_op_context = copy.deepcopy( - ref_dist_context._dist_op_context) + ref_dist_context._dist_op_context + ) new_dist_context._block_state = copy.deepcopy(ref_dist_context.block_state) return new_dist_context @@ -229,28 +243,39 @@ class OptimizationTuner: def _build_programs_without_optimization(self): serial_main_program = self._baseline_dist_context.serial_main_program - serial_startup_program = self._baseline_dist_context.serial_startup_program + serial_startup_program = ( + self._baseline_dist_context.serial_startup_program + ) serial_loss = self._baseline_dist_context.serial_loss with program_guard(serial_main_program, serial_startup_program): params_grads = append_backward( serial_loss, - distop_context=self._baseline_dist_context.dist_op_context) + distop_context=self._baseline_dist_context.dist_op_context, + ) self._baseline_completer.complete_backward_annotation( - serial_main_program) + serial_main_program + ) self._baseline_dist_context.block_state.parse_backward_blocks( - serial_main_program) + serial_main_program + ) self._baseline_dist_context._params_grads = params_grads if self._config.verbose: baseline_dir = os.path.join(self.project_dir, "baseline") if not os.path.exists(baseline_dir): pathlib.Path(baseline_dir).mkdir(parents=True, exist_ok=True) - debug_program(self._baseline_dist_context._serial_main_program, - baseline_dir, "main") - debug_program(self._baseline_dist_context._serial_startup_program, - baseline_dir, "startup") + debug_program( + self._baseline_dist_context._serial_main_program, + baseline_dir, + "main", + ) + debug_program( + self._baseline_dist_context._serial_startup_program, + baseline_dir, + "startup", + ) def _select_tuning_algorithm(self): @@ -275,18 +300,22 @@ class OptimizationTuner: # TODO AMP Pass should not use loss var config["loss"] = dist_context.serial_loss - config["input_data"] = self._baseline_dist_context.serial_feed_vars["inputs"] \ + config["input_data"] = ( + self._baseline_dist_context.serial_feed_vars["inputs"] + self._baseline_dist_context.serial_feed_vars["labels"] + ) if config["use_pure_fp16"]: config["base_opt"] = dist_context.serial_optimizer auto_parallel_fp16_pass = new_pass("auto_parallel_fp16", config) - auto_parallel_fp16_pass.apply([main_program], [startup_program], - pass_context) + auto_parallel_fp16_pass.apply( + [main_program], [startup_program], pass_context + ) dist_context.serial_loss = auto_parallel_fp16_pass.get_loss() else: auto_parallel_amp_pass = new_pass("auto_parallel_amp", config) - auto_parallel_amp_pass.apply([main_program], [startup_program], - pass_context) + auto_parallel_amp_pass.apply( + [main_program], [startup_program], pass_context + ) dist_context.serial_loss = auto_parallel_amp_pass.get_loss() if new_strategy.recompute.enable: @@ -294,27 +323,40 @@ class OptimizationTuner: config["dist_context"] = dist_context config["no_grad_set"] = None config["loss"] = dist_context.serial_loss - auto_parallel_recompute_pass = new_pass("auto_parallel_recompute", - config) - auto_parallel_recompute_pass.apply([main_program], - [startup_program], pass_context) + auto_parallel_recompute_pass = new_pass( + "auto_parallel_recompute", config + ) + auto_parallel_recompute_pass.apply( + [main_program], [startup_program], pass_context + ) # Do logical partition partitioner = Partitioner(dist_context, self.rank) - dist_main_prog, dist_startup_prog, dist_params_grads = partitioner.partition( - main_program, startup_program, dist_context._params_grads) + ( + dist_main_prog, + dist_startup_prog, + dist_params_grads, + ) = partitioner.partition( + main_program, startup_program, dist_context._params_grads + ) # Generate optimizer # FIXME should be remove from apply pass after pass support optimizers with program_guard(dist_main_prog, dist_startup_prog): optimizer_ops = dist_context.serial_optimizer.apply_gradients( - dist_params_grads) + dist_params_grads + ) completer.complete_update_annotation(dist_main_prog) # Do reshard process set_grad_var_shape(dist_main_prog, dist_context) - resharder = Resharder(dist_main_prog, dist_startup_prog, self.rank, - dist_context, dist_params_grads) + resharder = Resharder( + dist_main_prog, + dist_startup_prog, + self.rank, + dist_context, + dist_params_grads, + ) resharder.reshard() if new_strategy.sharding.enable: @@ -322,21 +364,27 @@ class OptimizationTuner: config["dist_context"] = dist_context config["params_grads"] = dist_params_grads config["global_rank"] = self.rank - auto_parallel_sharding_pass = new_pass("auto_parallel_sharding", - config) - auto_parallel_sharding_pass.apply([dist_main_prog], - [dist_startup_prog], pass_context) + auto_parallel_sharding_pass = new_pass( + "auto_parallel_sharding", config + ) + auto_parallel_sharding_pass.apply( + [dist_main_prog], [dist_startup_prog], pass_context + ) if new_strategy.gradient_merge.enable: config = copy.deepcopy(new_strategy.gradient_merge.to_dict()) config["dist_context"] = dist_context config["params_grads"] = dist_params_grads auto_parallel_gradient_merge_pass = new_pass( - "auto_parallel_gradient_merge_pass", config) - auto_parallel_gradient_merge_pass.apply([dist_main_prog], - [dist_startup_prog], - pass_context) - trial.main_program, trial.startup_program = dist_main_prog, dist_startup_prog + "auto_parallel_gradient_merge_pass", config + ) + auto_parallel_gradient_merge_pass.apply( + [dist_main_prog], [dist_startup_prog], pass_context + ) + trial.main_program, trial.startup_program = ( + dist_main_prog, + dist_startup_prog, + ) return trial def _get_profile_context(self, trial, result_path): @@ -344,16 +392,18 @@ class OptimizationTuner: profile_ctx = {} profile_ctx['distributed_env'] = copy.deepcopy( - paddle.distributed.ParallelEnv()) + paddle.distributed.ParallelEnv() + ) profile_ctx['group_map'] = parse_process_groups() profile_ctx[ - "loss_var_name"] = self._baseline_dist_context.serial_loss.name + "loss_var_name" + ] = self._baseline_dist_context.serial_loss.name profile_ctx[ - "main_program_decs"] = trial.main_program.desc.serialize_to_string( - ) + "main_program_decs" + ] = trial.main_program.desc.serialize_to_string() profile_ctx[ - "startup_program_decs"] = trial.startup_program.desc.serialize_to_string( - ) + "startup_program_decs" + ] = trial.startup_program.desc.serialize_to_string() self._dataset.batch_size = self._batch_size self._dataset.input_names = self._get_input_names() @@ -375,19 +425,25 @@ class OptimizationTuner: else: coverage_args = [] - profile_args = " ".join([ - "--rank", - str(self.rank), - "--device_id", - str(self.device_id), - "--ctx_filename", - ctx_path, - "--profile_start_step", - str(self._config.profile_start_step), - "--profile_end_step", - str(self._config.profile_end_step), - ]) - cmd_args = "-m paddle.distributed.auto_parallel.tuner.profiler" + " " + profile_args + profile_args = " ".join( + [ + "--rank", + str(self.rank), + "--device_id", + str(self.device_id), + "--ctx_filename", + ctx_path, + "--profile_start_step", + str(self._config.profile_start_step), + "--profile_end_step", + str(self._config.profile_end_step), + ] + ) + cmd_args = ( + "-m paddle.distributed.auto_parallel.tuner.profiler" + + " " + + profile_args + ) cmd = [sys.executable, "-u"] + coverage_args + shlex.split(cmd_args) parent_env = copy.copy(os.environ.copy()) @@ -400,10 +456,11 @@ class OptimizationTuner: # TODO if any rank hang or fail, kill all processes self._logger.debug("Executing cmd:\n{} .".format(" ".join(cmd))) # new_process = subprocess.Popen(cmd, env=new_env) - with open(os.path.join(trial_dir, "stdout.log" + str(self.rank)), - "wb") as out, open( - os.path.join(trial_dir, "stderr.log" + str(self.rank)), - "wb") as err: + with open( + os.path.join(trial_dir, "stdout.log" + str(self.rank)), "wb" + ) as out, open( + os.path.join(trial_dir, "stderr.log" + str(self.rank)), "wb" + ) as err: result = subprocess.Popen(cmd, stdout=out, stderr=err, env=new_env) result.wait() out.flush() @@ -455,13 +512,18 @@ class OptimizationTuner: elif self._config.mode == "COSTMODEL": raise NotImplementedError( - "COSTMODEL mode for optimization tuning is not supported yet!") + "COSTMODEL mode for optimization tuning is not supported yet!" + ) else: - raise NotImplementedError("invalid evaluation mode: {}".format( - self._config.mode)) + raise NotImplementedError( + "invalid evaluation mode: {}".format(self._config.mode) + ) - self._logger.info("Trial {} evaluation finish with {}.".format( - trial.name, parse_results(results))) + self._logger.info( + "Trial {} evaluation finish with {}.".format( + trial.name, parse_results(results) + ) + ) return results def _update(self, i, trial, results): @@ -496,11 +558,12 @@ class OptimizationTuner: Tuning Result Summary Run total {} trials with {} min. The best trial is: [{}], whose configuration is following: - """.format(len(self._finished_trials), - (time.time() - self._tuning_start_time) / 60, - best_trial.name) - summary_ += "\n" + best_trial.summary() + "\n"\ - + """.format( + len(self._finished_trials), + (time.time() - self._tuning_start_time) / 60, + best_trial.name, + ) + summary_ += "\n" + best_trial.summary() + "\n" self._logger.info(summary_) with open(os.path.join(self.project_dir, "summary.txt"), "w+") as fw: for line in summary_.split("\n"): @@ -532,7 +595,8 @@ The best trial is: [{}], whose configuration is following: self._tuning_start_time = time.time() self._algorithm.collect_model_info( self._baseline_dist_context.serial_main_program, - self._baseline_dist_context.serial_startup_program) + self._baseline_dist_context.serial_startup_program, + ) # main search loop i = 0 @@ -554,10 +618,15 @@ The best trial is: [{}], whose configuration is following: # early stop i += 1 - if self._config.early_stop and self._config.early_stop <= i - self._best_iter: + if ( + self._config.early_stop + and self._config.early_stop <= i - self._best_iter + ): self._logger.info( - "Early stop the Tuning since there is no better trial found within [{}] trials" - .format(self._config.early_stop)) + "Early stop the Tuning since there is no better trial found within [{}] trials".format( + self._config.early_stop + ) + ) break # step5: summary the best config and return diff --git a/python/paddle/distributed/auto_parallel/tuner/parallel_tuner.py b/python/paddle/distributed/auto_parallel/tuner/parallel_tuner.py index 24ee382f7f75aa0f3e7c7e87fbade0fa7a958167..0ed03defbed8686fac753f593e573ca0ea001690 100644 --- a/python/paddle/distributed/auto_parallel/tuner/parallel_tuner.py +++ b/python/paddle/distributed/auto_parallel/tuner/parallel_tuner.py @@ -33,15 +33,16 @@ from .tunable_variable import Boolean, IntRange class ParallelTuner: - - def __init__(self, - dist_context, - mode="train", - max_trials=25, - tuner_id=None, - seed=None, - logger=None, - loop_count=10): + def __init__( + self, + dist_context, + mode="train", + max_trials=25, + tuner_id=None, + seed=None, + logger=None, + loop_count=10, + ): self._loop_count = loop_count self._estimator = None self._dist_context = dist_context @@ -49,7 +50,8 @@ class ParallelTuner: self._mode = mode self._cluster = self._dist_context.cluster self._num_machines = self._cluster.get_num_machines() - self._num_devices_per_machine = self._cluster.get_num_devices_per_machine( + self._num_devices_per_machine = ( + self._cluster.get_num_devices_per_machine() ) self._space = TunableSpace() self._objective = "time" @@ -58,15 +60,17 @@ class ParallelTuner: self._tuner_id = tuner_id self._seed = seed if seed is not None else 9999 - print("seed", - self._seed, - "mode", - self._mode, - "num_machies", - self._num_machines, - "num_devices_per_machine", - self._num_devices_per_machine, - flush=True) + print( + "seed", + self._seed, + "mode", + self._mode, + "num_machies", + self._num_machines, + "num_devices_per_machine", + self._num_devices_per_machine, + flush=True, + ) self._seed_state = self._seed self._logger = logger self._max_collisions = 3 @@ -89,8 +93,12 @@ class ParallelTuner: self._cached_candidates_info = defaultdict(list) self._special_ops = [ - "create_py_reader", "create_double_buffer_reader", "read", "while", - "read_from_array", "write_to_array" + "create_py_reader", + "create_double_buffer_reader", + "read", + "while", + "read_from_array", + "write_to_array", ] # Each parallel strategy has two elements. The First one is for distributed tensors, @@ -100,44 +108,62 @@ class ParallelTuner: self._completer = Completer(self._dist_context) - self._parallelizer = Parallelizer(self._mode, self._completer, - self._dist_context) + self._parallelizer = Parallelizer( + self._mode, self._completer, self._dist_context + ) - def _generate_combination(self, - elements, - target, - idx, - partial_candidate, - candidates, - num_candidates=None): + def _generate_combination( + self, + elements, + target, + idx, + partial_candidate, + candidates, + num_candidates=None, + ): if target == 0: candidates.append(copy.deepcopy(partial_candidate)) return - if target < 0 or idx == len(elements) \ - or len(candidates) > num_candidates: + if ( + target < 0 + or idx == len(elements) + or len(candidates) > num_candidates + ): return # Use partial_candidate.append(elements[idx]) - self._generate_combination(elements, target - elements[idx], idx, - partial_candidate, candidates, - num_candidates) + self._generate_combination( + elements, + target - elements[idx], + idx, + partial_candidate, + candidates, + num_candidates, + ) # Not use partial_candidate.pop() - self._generate_combination(elements, target, idx + 1, partial_candidate, - candidates, num_candidates) - - def _permute_combination(self, - combination, - target, - check, - partial_candidate, - candidates, - num_candidates=None, - skip_prob=None): - if num_candidates is not None \ - and len(candidates) == num_candidates: + self._generate_combination( + elements, + target, + idx + 1, + partial_candidate, + candidates, + num_candidates, + ) + + def _permute_combination( + self, + combination, + target, + check, + partial_candidate, + candidates, + num_candidates=None, + skip_prob=None, + ): + if num_candidates is not None and len(candidates) == num_candidates: return if len(partial_candidate) == len(combination): @@ -149,13 +175,22 @@ class ParallelTuner: continue if self._rng.choice([True, False], p=[skip_prob, 1 - skip_prob]): continue - if i > 0 and combination[i] == combination[i - 1] \ - and check[i -1] == 0: + if ( + i > 0 + and combination[i] == combination[i - 1] + and check[i - 1] == 0 + ): continue check[i] = 1 - self._permute_combination(combination, target, check, - partial_candidate + [combination[i]], - candidates, num_candidates, skip_prob) + self._permute_combination( + combination, + target, + check, + partial_candidate + [combination[i]], + candidates, + num_candidates, + skip_prob, + ) check[i] = 0 def _partition_number(self, target): @@ -166,8 +201,14 @@ class ParallelTuner: seed_candidates = [] num_seed_candidates = 1000 partial_results = [] - self._generate_combination(elements, target, 0, partial_results, - seed_candidates, num_seed_candidates) + self._generate_combination( + elements, + target, + 0, + partial_results, + seed_candidates, + num_seed_candidates, + ) candidates = [] for seed_candidate in seed_candidates: @@ -178,10 +219,16 @@ class ParallelTuner: if target <= 8: skip_prob = 0.0 else: - skip_prob = (len(seed_candidate) / target) - self._permute_combination(seed_candidate, target, check, [], - cur_candidates, num_cur_candidates, - skip_prob) + skip_prob = len(seed_candidate) / target + self._permute_combination( + seed_candidate, + target, + check, + [], + cur_candidates, + num_cur_candidates, + skip_prob, + ) candidates.extend(cur_candidates) return candidates @@ -190,8 +237,9 @@ class ParallelTuner: intra_node_partitions = self._partition_number(num_devices_per_machine) return inter_node_partitions, intra_node_partitions - def _generate_process_mesh_list(self, inter_node_partition, - intra_node_partition): + def _generate_process_mesh_list( + self, inter_node_partition, intra_node_partition + ): process_mesh_list = [] start_row = 0 start_col = 0 @@ -200,8 +248,9 @@ class ParallelTuner: for n in intra_node_partition: process_mesh = [] for p in range(m): - start = (start_row + - p) * self._num_devices_per_machine + start_col + start = ( + start_row + p + ) * self._num_devices_per_machine + start_col tmp = [] for q in range(n): tmp.append(start + q) @@ -211,8 +260,9 @@ class ParallelTuner: start_row += m return process_mesh_list - def _generate_dims_mapping_candidates_helper(self, dims_mapping, dims_list, - start, visited, candidates): + def _generate_dims_mapping_candidates_helper( + self, dims_mapping, dims_list, start, visited, candidates + ): if start == len(dims_mapping) or all(visited): candidates.append(copy.deepcopy(dims_mapping)) return @@ -222,15 +272,17 @@ class ParallelTuner: dims_mapping[start] = dim visited[idx] = True self._generate_dims_mapping_candidates_helper( - dims_mapping, dims_list, start + 1, visited, candidates) + dims_mapping, dims_list, start + 1, visited, candidates + ) visited[idx] = False dims_mapping[start] = -1 - self._generate_dims_mapping_candidates_helper(dims_mapping, dims_list, - start + 1, visited, - candidates) + self._generate_dims_mapping_candidates_helper( + dims_mapping, dims_list, start + 1, visited, candidates + ) - def _generate_dims_mapping_candidates(self, dims_mapping_len, - process_mesh_len): + def _generate_dims_mapping_candidates( + self, dims_mapping_len, process_mesh_len + ): assert dims_mapping_len >= 1 and process_mesh_len >= 1 key = (dims_mapping_len, process_mesh_len) if key in self._cached_dims_mapping_candidates: @@ -239,8 +291,9 @@ class ParallelTuner: dims_mapping = [-1 for i in range(dims_mapping_len)] dims_list = [i for i in range(process_mesh_len)] visited = [False for i in range(process_mesh_len)] - self._generate_dims_mapping_candidates_helper(dims_mapping, dims_list, - 0, visited, candidates) + self._generate_dims_mapping_candidates_helper( + dims_mapping, dims_list, 0, visited, candidates + ) self._cached_dims_mapping_candidates[key] = candidates return candidates @@ -257,12 +310,14 @@ class ParallelTuner: key.append(input_name) for input_arg_name in serial_op.input(input_name): key.append( - len(op_dist_attr.get_input_dims_mapping(input_arg_name))) + len(op_dist_attr.get_input_dims_mapping(input_arg_name)) + ) for output_name in serial_op.output_names: key.append(output_name) for output_arg_name in serial_op.output(output_name): key.append( - len(op_dist_attr.get_output_dims_mapping(output_arg_name))) + len(op_dist_attr.get_output_dims_mapping(output_arg_name)) + ) key = tuple(key) if key in self._cached_candidates_info: @@ -274,18 +329,26 @@ class ParallelTuner: i = 0 for input_name in serial_op.input_names: for input_arg_name in serial_op.input(input_name): - cached_dims_mapping = cached_dist_attr.get_input_dims_mapping( - cached_input_arg_names[i]) + cached_dims_mapping = ( + cached_dist_attr.get_input_dims_mapping( + cached_input_arg_names[i] + ) + ) new_op_dist_attr.set_input_dims_mapping( - input_arg_name, cached_dims_mapping) + input_arg_name, cached_dims_mapping + ) i += 1 i = 0 for output_name in serial_op.output_names: for output_arg_name in serial_op.output(output_name): - cached_dims_mapping = cached_dist_attr.get_output_dims_mapping( - cached_output_arg_names[i]) + cached_dims_mapping = ( + cached_dist_attr.get_output_dims_mapping( + cached_output_arg_names[i] + ) + ) new_op_dist_attr.set_output_dims_mapping( - output_arg_name, cached_dims_mapping) + output_arg_name, cached_dims_mapping + ) i += 1 cached_dist_attr_candidates.append(new_op_dist_attr) return cached_dist_attr_candidates @@ -315,22 +378,28 @@ class ParallelTuner: input_names.append(tensor_name) if dims_mapping_len < 1: dims_mapping_generated.append( - [copy.deepcopy(original_dims_mapping)]) + [copy.deepcopy(original_dims_mapping)] + ) else: dims_mapping_generated.append( self._generate_dims_mapping_candidates( - dims_mapping_len, process_mesh_len)) + dims_mapping_len, process_mesh_len + ) + ) input_dims_mapping_candidates = [] for dims_mapping_list in itertools.product(*dims_mapping_generated): dims_mapping_list = list(dims_mapping_list) assert len(dims_mapping_list) == len(input_names) for i, dims_mapping in enumerate(dims_mapping_list): - new_op_dist_attr.set_input_dims_mapping(input_names[i], - dims_mapping) - new_dist_op = DistributedOperator(dist_op.serial_op, - new_op_dist_attr) + new_op_dist_attr.set_input_dims_mapping( + input_names[i], dims_mapping + ) + new_dist_op = DistributedOperator( + dist_op.serial_op, new_op_dist_attr + ) dist_op_impls = find_compatible_distributed_operator_impls( - new_dist_op, fwd=True) + new_dist_op, fwd=True + ) if dist_op_impls is not None: input_dims_mapping_candidates.append(dims_mapping_list) @@ -344,42 +413,58 @@ class ParallelTuner: output_names.append(tensor_name) if dims_mapping_len < 1: dims_mapping_generated.append( - [copy.deepcopy(original_dims_mapping)]) + [copy.deepcopy(original_dims_mapping)] + ) else: dims_mapping_generated.append( self._generate_dims_mapping_candidates( - dims_mapping_len, process_mesh_len)) + dims_mapping_len, process_mesh_len + ) + ) output_dims_mapping_candidates = [] for dims_mapping_list in itertools.product(*dims_mapping_generated): dims_mapping_list = list(dims_mapping_list) assert len(dims_mapping_list) == len(output_names) for i, dims_mapping in enumerate(dims_mapping_list): new_op_dist_attr.set_output_dims_mapping( - output_names[i], dims_mapping) - new_dist_op = DistributedOperator(dist_op.serial_op, - new_op_dist_attr) + output_names[i], dims_mapping + ) + new_dist_op = DistributedOperator( + dist_op.serial_op, new_op_dist_attr + ) dist_op_impls = find_compatible_distributed_operator_impls( - new_dist_op, fwd=False) + new_dist_op, fwd=False + ) if dist_op_impls is not None: output_dims_mapping_candidates.append(dims_mapping_list) if not input_dims_mapping_candidates and output_dims_mapping_candidates: - inout_dims_mapping_generated = [[[[-2]]], - output_dims_mapping_candidates] - elif input_dims_mapping_candidates and not output_dims_mapping_candidates: inout_dims_mapping_generated = [ - input_dims_mapping_candidates, [[[-2]]] + [[[-2]]], + output_dims_mapping_candidates, ] - elif not input_dims_mapping_candidates and not output_dims_mapping_candidates: + elif ( + input_dims_mapping_candidates and not output_dims_mapping_candidates + ): + inout_dims_mapping_generated = [ + input_dims_mapping_candidates, + [[[-2]]], + ] + elif ( + not input_dims_mapping_candidates + and not output_dims_mapping_candidates + ): inout_dims_mapping_generated = [[[[-2]]], [[[-2]]]] else: inout_dims_mapping_generated = [ - input_dims_mapping_candidates, output_dims_mapping_candidates + input_dims_mapping_candidates, + output_dims_mapping_candidates, ] # Find valid dims_mapping generated for both inputs and outputs cached_dist_attr_candidates = [] for inout_dims_mapping_list in itertools.product( - *inout_dims_mapping_generated): + *inout_dims_mapping_generated + ): assert len(inout_dims_mapping_list) == 2 if input_dims_mapping_candidates: assert len(inout_dims_mapping_list[0]) == len(input_names) @@ -389,35 +474,45 @@ class ParallelTuner: for i, dims_mapping in enumerate(inout_dims_mapping_list[0]): if dims_mapping != [-2]: new_op_dist_attr.set_input_dims_mapping( - input_names[i], dims_mapping) + input_names[i], dims_mapping + ) # set the dims_mappings for outputs for i, dims_mapping in enumerate(inout_dims_mapping_list[1]): if dims_mapping != [-2]: new_op_dist_attr.set_output_dims_mapping( - output_names[i], dims_mapping) - new_dist_op = DistributedOperator(dist_op.serial_op, - new_op_dist_attr) + output_names[i], dims_mapping + ) + new_dist_op = DistributedOperator( + dist_op.serial_op, new_op_dist_attr + ) dist_op_impls = find_compatible_distributed_operator_impls( - new_dist_op, partial=False) + new_dist_op, partial=False + ) if dist_op_impls is None: continue for dist_op_impl in dist_op_impls: new_op_dist_attr.impl_type = dist_op_impl.type new_op_dist_attr.impl_idx = dist_op_impl.idx cached_dist_attr_candidates.append( - copy.deepcopy(new_op_dist_attr)) + copy.deepcopy(new_op_dist_attr) + ) self._cached_candidates_info[key].append(cached_dist_attr_candidates) return self._cached_candidates_info[key][2] def construct_space(self): inter_node_partitions, intra_node_partitions = self._partition_devices( - self._num_machines, self._num_devices_per_machine) - self._space.choice("inter_node_partitions", - inter_node_partitions, - default=inter_node_partitions[0]) - self._space.choice("intra_node_partitions", - intra_node_partitions, - default=intra_node_partitions[0]) + self._num_machines, self._num_devices_per_machine + ) + self._space.choice( + "inter_node_partitions", + inter_node_partitions, + default=inter_node_partitions[0], + ) + self._space.choice( + "intra_node_partitions", + intra_node_partitions, + default=intra_node_partitions[0], + ) dist_ops = self._dist_context._dist_ops_for_program for op_id, dist_op in dist_ops.items(): @@ -433,17 +528,22 @@ class ParallelTuner: if op_type in self._exclude_op_types: del self._concerned_dist_ops[op_id] - print("Number of the concered dist ops", - len(self._concerned_dist_ops), - flush=True) + print( + "Number of the concered dist ops", + len(self._concerned_dist_ops), + flush=True, + ) search_space = 1 for op_id, dist_op in self._concerned_dist_ops.items(): op_dist_attr_candidates = self._generate_dist_attr_candidates( - op_id, dist_op) + op_id, dist_op + ) search_space *= len(op_dist_attr_candidates) - self._space.choice(str(op_id), - op_dist_attr_candidates, - default=op_dist_attr_candidates[0]) + self._space.choice( + str(op_id), + op_dist_attr_candidates, + default=op_dist_attr_candidates[0], + ) def _compute_values_hash(self, values): keys = sorted(values.keys()) @@ -522,10 +622,10 @@ class ParallelTuner: for _ in pipeline_starts[1:-1]: directions.append(Boolean("direction")) sizes.append( - IntRange("size", - start=0, - stop=half_ops_per_stage, - endpoint=True)) + IntRange( + "size", start=0, stop=half_ops_per_stage, endpoint=True + ) + ) for i, start in enumerate(pipeline_starts[1:-1]): direction = directions[i].random(self._seed) size = sizes[i].random(self._seed) @@ -538,19 +638,23 @@ class ParallelTuner: # Don't change the last start new_pipeline_starts.append(pipeline_starts[-1]) # Validate the new starts - print("Adjusted pipeline starts", - new_pipeline_starts, - half_ops_per_stage, - pipeline_starts, - flush=True) + print( + "Adjusted pipeline starts", + new_pipeline_starts, + half_ops_per_stage, + pipeline_starts, + flush=True, + ) for i, new_start in enumerate(new_pipeline_starts[1:]): assert new_start > new_pipeline_starts[i] return new_pipeline_starts else: - print("Non-adjusted pipeline starts", - pipeline_starts, - half_ops_per_stage, - flush=True) + print( + "Non-adjusted pipeline starts", + pipeline_starts, + half_ops_per_stage, + flush=True, + ) return pipeline_starts def _apply_pipeline_partition(self, process_mesh_list): @@ -620,12 +724,12 @@ class ParallelTuner: # if dim_mapping != -1 \ # and (tensor_shape[i] % process_shape[dim_mapping] != 0 \ # or dynamic_dims[i] == 1): - if dim_mapping != -1 \ - and (tensor_shape[i] % process_shape[dim_mapping] != 0): + if dim_mapping != -1 and ( + tensor_shape[i] % process_shape[dim_mapping] != 0 + ): dims_mapping[i] = -1 # it is a fix-bug - if dim_mapping != -1 \ - and process_shape[dim_mapping] == 1: + if dim_mapping != -1 and process_shape[dim_mapping] == 1: dims_mapping[i] = -1 for arg_name in dist_attr.outputs_dist_attrs.keys(): @@ -651,20 +755,22 @@ class ParallelTuner: else: continue for i, dim_mapping in enumerate(dims_mapping): - if dim_mapping != -1 \ - and (tensor_shape[i] % process_shape[dim_mapping] != 0): + if dim_mapping != -1 and ( + tensor_shape[i] % process_shape[dim_mapping] != 0 + ): dims_mapping[i] = -1 # it is a fix-bug - if dim_mapping != -1 \ - and process_shape[dim_mapping] == 1: + if dim_mapping != -1 and process_shape[dim_mapping] == 1: dims_mapping[i] = -1 dist_op_impls = find_compatible_distributed_operator_impls( - dist_op, partial=False) + dist_op, partial=False + ) serial_op_type = dist_op.serial_op.type if dist_op_impls is not None and ( - serial_op_type != "fused_softmax_mask_upper_triangle" - or self._check_fused_softmax_mask_upper_triangle(dist_op)): + serial_op_type != "fused_softmax_mask_upper_triangle" + or self._check_fused_softmax_mask_upper_triangle(dist_op) + ): dist_op.dist_attr.impl_type = dist_op_impls[0].type dist_op.dist_attr.impl_idx = dist_op_impls[0].idx else: @@ -684,14 +790,20 @@ class ParallelTuner: """The last_but_one dim shoule be equal to last dim.""" input_name = dist_op.serial_op.input_arg_names[0] input_dims_mapping = dist_op.dist_attr.get_input_dims_mapping( - input_name) + input_name + ) topology = dist_op.dist_attr.process_mesh.topology input_tensor = dist_op.get_serial_input(input_name) - last_but_one_dim = input_tensor.shape[-2] // topology[ - input_dims_mapping[-2]] if input_dims_mapping[ - -2] != -1 else input_tensor.shape[-2] - last_dim = input_tensor.shape[-1] // topology[input_dims_mapping[ - -1]] if input_dims_mapping[-1] != -1 else input_tensor.shape[-1] + last_but_one_dim = ( + input_tensor.shape[-2] // topology[input_dims_mapping[-2]] + if input_dims_mapping[-2] != -1 + else input_tensor.shape[-2] + ) + last_dim = ( + input_tensor.shape[-1] // topology[input_dims_mapping[-1]] + if input_dims_mapping[-1] != -1 + else input_tensor.shape[-1] + ) if last_but_one_dim == last_dim: return True return False @@ -709,30 +821,37 @@ class ParallelTuner: inter_node_partition = trial.space.values["inter_node_partitions"] intra_node_partition = trial.space.values["intra_node_partitions"] process_mesh_list = self._generate_process_mesh_list( - inter_node_partition, intra_node_partition) + inter_node_partition, intra_node_partition + ) print("\tprocess_mesh list", process_mesh_list, flush=True) op_id_to_process_mesh = self._apply_pipeline_partition( - process_mesh_list) + process_mesh_list + ) if op_id_to_process_mesh is None: print("Operators are less than pipeline stages", flush=True) return results op_id_to_dist_attr = {} for name, value in trial.space.values.items(): - if name != "inter_node_partitions" \ - and name !="intra_node_partitions": + if ( + name != "inter_node_partitions" + and name != "intra_node_partitions" + ): op_id_to_dist_attr[int(name)] = value end_time = time.time() cur_sample_time = end_time - start_time - self._sample_time = (num_prev_trials * self._sample_time + - cur_sample_time) / self._num_trials - print("\tsample_time", - num_prev_trials, - self._num_trials, - self._sample_time, - cur_sample_time, - flush=True) + self._sample_time = ( + num_prev_trials * self._sample_time + cur_sample_time + ) / self._num_trials + print( + "\tsample_time", + num_prev_trials, + self._num_trials, + self._sample_time, + cur_sample_time, + flush=True, + ) assert len(op_id_to_process_mesh) == len(op_id_to_dist_attr) @@ -740,42 +859,52 @@ class ParallelTuner: for op_id, process_mesh in op_id_to_process_mesh.items(): dist_op = self._dist_context._dist_ops_for_program[op_id] dist_op.dist_attr = copy.deepcopy(op_id_to_dist_attr[op_id]) - assert dist_op.dist_attr.impl_type == op_id_to_dist_attr[ - op_id].impl_type - assert dist_op.dist_attr.impl_idx == op_id_to_dist_attr[ - op_id].impl_idx + assert ( + dist_op.dist_attr.impl_type + == op_id_to_dist_attr[op_id].impl_type + ) + assert ( + dist_op.dist_attr.impl_idx == op_id_to_dist_attr[op_id].impl_idx + ) dist_op.dist_attr.process_mesh = process_mesh self._amend_dist_attr() self._completer._complete_tensor_dist_attr_by_op() self._dist_context.block_state.parse_forward_blocks( - self._dist_context.serial_main_program) + self._dist_context.serial_main_program + ) end_time = time.time() cur_complete_time = end_time - start_time - self._complete_time = (num_prev_trials * self._complete_time + - cur_complete_time) / self._num_trials - print("\tcomplete_time", - num_prev_trials, - self._num_trials, - self._complete_time, - cur_complete_time, - flush=True) + self._complete_time = ( + num_prev_trials * self._complete_time + cur_complete_time + ) / self._num_trials + print( + "\tcomplete_time", + num_prev_trials, + self._num_trials, + self._complete_time, + cur_complete_time, + flush=True, + ) start_time = time.time() estimate_time = self._estimate_trial() end_time = time.time() cur_estimate_time = end_time - start_time - self._estimate_time = (num_prev_trials * self._estimate_time + - cur_estimate_time) / self._num_trials - print("\testimate_time", - num_prev_trials, - self._num_trials, - self._estimate_time, - cur_estimate_time, - estimate_time, - flush=True) + self._estimate_time = ( + num_prev_trials * self._estimate_time + cur_estimate_time + ) / self._num_trials + print( + "\testimate_time", + num_prev_trials, + self._num_trials, + self._estimate_time, + cur_estimate_time, + estimate_time, + flush=True, + ) results = {"estimate_time": estimate_time} return results @@ -791,12 +920,14 @@ class ParallelTuner: self._estimator = CostEstimator( self._dist_context.serial_main_program, self._cluster, - loop_count=self._loop_count) + loop_count=self._loop_count, + ) elif self._mode == "predict": self._estimator = CostEstimator( self._dist_context.serial_main_program, self._cluster, - loop_count=self._loop_count) + loop_count=self._loop_count, + ) elif self._mode == "train": # get serial main program with backward serial_main_program = self._dist_context.serial_main_program @@ -806,18 +937,23 @@ class ParallelTuner: # Generate backward serial_loss = self._dist_context.serial_fetch_vars["loss"][0] params_grads = self._parallelizer._generate_backward( - serial_main_program, serial_startup_program, serial_loss) + serial_main_program, serial_startup_program, serial_loss + ) # Generate optimizer optimizer_ops = self._parallelizer._generate_optimizer( - serial_main_program, serial_startup_program, serial_optimizer, - params_grads) - self._estimator = CostEstimator(serial_main_program, - self._cluster, - loop_count=self._loop_count) + serial_main_program, + serial_startup_program, + serial_optimizer, + params_grads, + ) + self._estimator = CostEstimator( + serial_main_program, self._cluster, loop_count=self._loop_count + ) max_memory = self._estimator._estimate_max_memory_by_dist_op( - self._dist_context) + self._dist_context + ) print("\tmax_memory", "{:,}".format(max_memory), flush=True) # The max memory must be less than 80% 32GB (hard code) if max_memory > 32 * 0.8 * 1024 * 1024 * 1024: @@ -829,18 +965,23 @@ class ParallelTuner: def _store_init_parallel_strategy(self): # If there is no annotation information, use the dp as the initial parallel strategy. # TODO: we should need a better way to set up the initial parallel strategy. - if not self._dist_context.has_annotation \ - or not self._dist_context.process_meshes: + if ( + not self._dist_context.has_annotation + or not self._dist_context.process_meshes + ): ranks = self._num_machines * self._num_devices_per_machine tensor_node = self._dist_context._serial_ordered_tensor_nodes[0] tensor_node_id = _node_id(tensor_node) tensor = self._dist_context._dist_tensors_for_graph[ - tensor_node_id].serial_tensor + tensor_node_id + ].serial_tensor tensor_dist_attr = self._dist_context._dist_tensors_for_graph[ - tensor_node_id].dist_attr + tensor_node_id + ].dist_attr tensor_dist_attr.process_mesh = ProcessMesh(list(range(ranks))) self._dist_context._process_meshes.append( - tensor_dist_attr.process_mesh) + tensor_dist_attr.process_mesh + ) tensor_dist_attr.dims_mapping = [0] + [ -1 for _ in range(len(tensor.shape) - 1) ] @@ -851,23 +992,30 @@ class ParallelTuner: # Do the sharding propagation self._completer.complete_forward_annotation() self._dist_context.block_state.parse_forward_blocks( - self._dist_context.serial_main_program) + self._dist_context.serial_main_program + ) # Backup the intital parallel strategy self._init_parallel_strategy[0] = copy.deepcopy( - self._dist_context._dist_tensors_for_program) + self._dist_context._dist_tensors_for_program + ) self._init_parallel_strategy[1] = copy.deepcopy( - self._dist_context._dist_ops_for_program) + self._dist_context._dist_ops_for_program + ) self._init_parallel_strategy[2] = copy.deepcopy( - self._dist_context.process_meshes) + self._dist_context.process_meshes + ) # Initialize the best parallel strategy to the initial one self._best_parallel_strategy[0] = copy.deepcopy( - self._dist_context._dist_tensors_for_program) + self._dist_context._dist_tensors_for_program + ) self._best_parallel_strategy[1] = copy.deepcopy( - self._dist_context._dist_ops_for_program) + self._dist_context._dist_ops_for_program + ) self._best_parallel_strategy[2] = copy.deepcopy( - self._dist_context._process_meshes) + self._dist_context._process_meshes + ) def _store_best_parallel_strategy(self): # Swap the best and the current parallel strategy @@ -876,9 +1024,11 @@ class ParallelTuner: tmp[1] = self._best_parallel_strategy[1] tmp[2] = self._best_parallel_strategy[2] self._best_parallel_strategy[ - 0] = self._dist_context._dist_tensors_for_program + 0 + ] = self._dist_context._dist_tensors_for_program self._best_parallel_strategy[ - 1] = self._dist_context._dist_ops_for_program + 1 + ] = self._dist_context._dist_ops_for_program self._best_parallel_strategy[2] = self._dist_context._process_meshes self._dist_context._dist_tensors_for_program = tmp[0] self._dist_context._dist_ops_for_program = tmp[1] @@ -894,19 +1044,23 @@ class ParallelTuner: # We have to restore the distributed context, because the estimation of one trail need to # generate the backward and update parts. Since we will do the tuning process, # here we only need to reset all distributed information to the default one. - self._dist_context._restore(serial=True, - serial_mode="to_backup", - dist=True, - dist_mode="to_default") + self._dist_context._restore( + serial=True, + serial_mode="to_backup", + dist=True, + dist_mode="to_default", + ) best_time = init_time start_time = time.time() self.construct_space() end_time = time.time() - print("construct_space time", - self._num_trials, - end_time - start_time, - flush=True) + print( + "construct_space time", + self._num_trials, + end_time - start_time, + flush=True, + ) create_trial_time = 0.0 eval_trial_time = 0.0 self._sample_time = 0.0 @@ -921,14 +1075,17 @@ class ParallelTuner: num_prev_trials = self._num_trials - 1 end_time = time.time() cur_create_trial_time = end_time - start_time - create_trial_time = (num_prev_trials * create_trial_time + - cur_create_trial_time) / self._num_trials - print("create_trial time", - num_prev_trials, - self._num_trials, - create_trial_time, - cur_create_trial_time, - flush=True) + create_trial_time = ( + num_prev_trials * create_trial_time + cur_create_trial_time + ) / self._num_trials + print( + "create_trial time", + num_prev_trials, + self._num_trials, + create_trial_time, + cur_create_trial_time, + flush=True, + ) if trial.status == TrialStatus.STOPPED: break # We need to backup the distributed context, because the evaluation of one trail will @@ -940,15 +1097,18 @@ class ParallelTuner: results = self._eval_trial(trial) end_time = time.time() cur_eval_trial_time = end_time - start_time - eval_trial_time = (num_prev_trials * eval_trial_time + - cur_eval_trial_time) / self._num_trials - print("eval_trial time", - num_prev_trials, - self._num_trials, - eval_trial_time, - cur_eval_trial_time, - "\n", - flush=True) + eval_trial_time = ( + num_prev_trials * eval_trial_time + cur_eval_trial_time + ) / self._num_trials + print( + "eval_trial time", + num_prev_trials, + self._num_trials, + eval_trial_time, + cur_eval_trial_time, + "\n", + flush=True, + ) cur_time = results["estimate_time"] if cur_time < best_time: @@ -956,13 +1116,17 @@ class ParallelTuner: self._store_best_parallel_strategy() best_time = cur_time # We need to restore the distributed context and reset the distributed information to the default. - self._dist_context._restore(serial=True, - serial_mode="to_backup", - dist=True, - dist_mode="to_default") + self._dist_context._restore( + serial=True, + serial_mode="to_backup", + dist=True, + dist_mode="to_default", + ) # Select the best parallel strategy - self._dist_context._dist_tensors_for_program = self._best_parallel_strategy[ - 0] + self._dist_context._dist_tensors_for_program = ( + self._best_parallel_strategy[0] + ) self._dist_context._dist_ops_for_program = self._best_parallel_strategy[ - 1] + 1 + ] self._dist_context._process_meshes = self._best_parallel_strategy[2] diff --git a/python/paddle/distributed/auto_parallel/tuner/profiler.py b/python/paddle/distributed/auto_parallel/tuner/profiler.py index 4b2655028bf7f07b3accbdde0098f2e57ef6719d..1aeafbea76410a33fa02a222ec7c5766393f891b 100644 --- a/python/paddle/distributed/auto_parallel/tuner/profiler.py +++ b/python/paddle/distributed/auto_parallel/tuner/profiler.py @@ -22,8 +22,13 @@ import time import paddle from paddle.fluid.framework import Program, _current_expected_place from paddle.fluid.framework import Operator -from paddle.distributed.auto_parallel.process_group import get_all_process_groups, new_process_group -from paddle.distributed.auto_parallel.dist_loader import DistributedDataLoaderFromGenerator +from paddle.distributed.auto_parallel.process_group import ( + get_all_process_groups, + new_process_group, +) +from paddle.distributed.auto_parallel.dist_loader import ( + DistributedDataLoaderFromGenerator, +) from paddle.distributed.collective import _get_global_env paddle.enable_static() @@ -44,25 +49,32 @@ def parse_args(): "--profile_start_step", default=10, type=int, - help="integer indicates the warmup step before starting profile.") - parser.add_argument("--profile_end_step", - default=30, - type=int, - help="integer indicates at the end step of profile.") - parser.add_argument("--rank", - type=int, - required=True, - help="the rank id of the this process.") - parser.add_argument("--device_id", - type=int, - required=True, - help="the device id of the this process.") + help="integer indicates the warmup step before starting profile.", + ) + parser.add_argument( + "--profile_end_step", + default=30, + type=int, + help="integer indicates at the end step of profile.", + ) + parser.add_argument( + "--rank", + type=int, + required=True, + help="the rank id of the this process.", + ) + parser.add_argument( + "--device_id", + type=int, + required=True, + help="the device id of the this process.", + ) parser.add_argument( "--ctx_filename", type=str, required=True, - help= - "the filename to the profile context file saved by optimizaiton tuner") + help="the filename to the profile context file saved by optimizaiton tuner", + ) args = parser.parse_args() @@ -109,11 +121,9 @@ def get_cpp_error_type(error): return error_type -def create_dataloader(main_program, - startup_program, - profile_ctx, - epochs=1, - steps_per_epoch=None): +def create_dataloader( + main_program, startup_program, profile_ctx, epochs=1, steps_per_epoch=None +): dataset = profile_ctx["dataset"] main_block = main_program.global_block() @@ -141,7 +151,8 @@ def create_dataloader(main_program, epochs=epochs, steps_per_epoch=steps_per_epoch, data_parallel_world_size=dataset.dp_world_size, - data_parallel_rank=dataset.dp_rank) + data_parallel_rank=dataset.dp_rank, + ) # move read op from the end of program to the start of program new_op_size = len(main_block.ops) @@ -162,8 +173,12 @@ def init_comm(profile_ctx): dist_env = profile_ctx['distributed_env'] genv = _get_global_env() genv = dist_env - print("current process rank: {}, device_id: {}, ip: {}.", genv.rank, - genv.device_id, genv.current_endpoint) + print( + "current process rank: {}, device_id: {}, ip: {}.", + genv.rank, + genv.device_id, + genv.current_endpoint, + ) # init nccl comm group_map = profile_ctx['group_map'] @@ -201,8 +216,9 @@ def profiler(args): """ # load ctx if not os.path.isfile(args.ctx_filename): - raise ValueError("There is no profile context named {}.".format( - args.ctx_filename)) + raise ValueError( + "There is no profile context named {}.".format(args.ctx_filename) + ) with open(args.ctx_filename, 'rb') as f: profile_ctx = pickle.load(f, encoding='latin1') @@ -240,8 +256,9 @@ def profiler(args): print("step: %d, loss_print: %f" % (eval_step, loss[0])) eval_step += 1 - avg_tput = 1.0 * (args.profile_end_step - - args.profile_start_step) / duration + avg_tput = ( + 1.0 * (args.profile_end_step - args.profile_start_step) / duration + ) result_dict = { "Throughtput": avg_tput, diff --git a/python/paddle/distributed/auto_parallel/tuner/recorder.py b/python/paddle/distributed/auto_parallel/tuner/recorder.py index 4d088379d8b3b749cd875b18291fb5c743fc0e33..2c838cfb1492164fbb0e27f6085238f8271dfdcb 100644 --- a/python/paddle/distributed/auto_parallel/tuner/recorder.py +++ b/python/paddle/distributed/auto_parallel/tuner/recorder.py @@ -71,7 +71,9 @@ class MetricRecords(object): if direction not in {"min", "max"}: raise ValueError( "direction should be one of {{min, max}}, but got: {}.".format( - direction)) + direction + ) + ) self._direction = direction self._records = {} diff --git a/python/paddle/distributed/auto_parallel/tuner/storable.py b/python/paddle/distributed/auto_parallel/tuner/storable.py index 18a0669d62286b104cc2119bac8e15817e986725..fb03070ad099a7b122f514c3c56386a35f3cbe92 100644 --- a/python/paddle/distributed/auto_parallel/tuner/storable.py +++ b/python/paddle/distributed/auto_parallel/tuner/storable.py @@ -19,7 +19,6 @@ import json class Storable(object): - def get_state(self): raise NotImplementedError diff --git a/python/paddle/distributed/auto_parallel/tuner/trial.py b/python/paddle/distributed/auto_parallel/tuner/trial.py index 2c8963322a3d63a130488a6d83db25f8fe7a2ce9..b152e407f5f18313d3397e52c469dbe6e0d45d06 100644 --- a/python/paddle/distributed/auto_parallel/tuner/trial.py +++ b/python/paddle/distributed/auto_parallel/tuner/trial.py @@ -32,11 +32,9 @@ class TrialStatus: class Trial(Storable): - - def __init__(self, - tunable_space, - trial_id=None, - status=TrialStatus.RUNNING): + def __init__( + self, tunable_space, trial_id=None, status=TrialStatus.RUNNING + ): self._id = _generate_trial_id() if trial_id is None else trial_id self._space = tunable_space self._recorder = MetricsRecorder() @@ -115,13 +113,14 @@ class Trial(Storable): class OptimizationTunerTrial(Trial): - - def __init__(self, - config, - name, - changed_configs, - trial_id=None, - status=TrialStatus.RUNNING): + def __init__( + self, + config, + name, + changed_configs, + trial_id=None, + status=TrialStatus.RUNNING, + ): super(OptimizationTunerTrial, self).__init__(config, trial_id, status) self._name = name self._changed_configs = changed_configs @@ -140,7 +139,8 @@ class OptimizationTunerTrial(Trial): h1_format = " " + "|{{:^{}s}}|\n".format(length) h2_format = " " + "|{{:>{}s}}{}{{:^{}s}}|\n".format( - max_k, " " * spacing, max_v) + max_k, " " * spacing, max_v + ) border = " +" + "".join(["="] * length) + "+" line = " +" + "".join(["-"] * length) + "+" @@ -158,7 +158,8 @@ class OptimizationTunerTrial(Trial): keys = my_configs.to_dict().keys() for key in keys: draws += h2_format.format( - key, str(my_configs.to_dict().get(key, None))) + key, str(my_configs.to_dict().get(key, None)) + ) result_res = draws + border return result_res diff --git a/python/paddle/distributed/auto_parallel/tuner/tunable_space.py b/python/paddle/distributed/auto_parallel/tuner/tunable_space.py index 2009f1d911cbf2802ed819fd3a7ce69b613fdc03..e3e503401b45371d9372aec6b6ff4ee9d42940df 100644 --- a/python/paddle/distributed/auto_parallel/tuner/tunable_space.py +++ b/python/paddle/distributed/auto_parallel/tuner/tunable_space.py @@ -104,29 +104,24 @@ class TunableSpace(object): return self._retrieve(tv) def int_range(self, name, start, stop, step=1, default=None): - tv = IntRange(name=name, - start=start, - stop=stop, - step=step, - default=default) + tv = IntRange( + name=name, start=start, stop=stop, step=step, default=default + ) return self._retrieve(tv) def float_range(self, name, start, stop, step=None, default=None): - tv = FloatRange(name=name, - start=start, - stop=stop, - step=step, - default=default) + tv = FloatRange( + name=name, start=start, stop=stop, step=step, default=default + ) return self._retrieve(tv) def get_state(self): return { - "variables": [{ - "class_name": v.__class__.__name__, - "state": v.get_state() - } for v in self._variables.values()], - "values": - dict((k, v) for (k, v) in self.values.items()) + "variables": [ + {"class_name": v.__class__.__name__, "state": v.get_state()} + for v in self._variables.values() + ], + "values": dict((k, v) for (k, v) in self.values.items()), } @classmethod @@ -146,11 +141,16 @@ def _deserialize_tunable_variable(state): if isinstance(state, classes): return state - if (not isinstance(state, dict) or "class_name" not in state - or "state" not in state): + if ( + not isinstance(state, dict) + or "class_name" not in state + or "state" not in state + ): raise ValueError( - "Expect state to be a python dict containing class_name and state as keys, but found {}" - .format(state)) + "Expect state to be a python dict containing class_name and state as keys, but found {}".format( + state + ) + ) cls_name = state["class_name"] cls = cls_name_to_cls[cls_name] diff --git a/python/paddle/distributed/auto_parallel/tuner/tunable_variable.py b/python/paddle/distributed/auto_parallel/tuner/tunable_variable.py index 31dd07aad374c335ae109016e82beaf699483368..7b09f3a3b9a271c75e3478bf5b09d643c1c6e680 100644 --- a/python/paddle/distributed/auto_parallel/tuner/tunable_variable.py +++ b/python/paddle/distributed/auto_parallel/tuner/tunable_variable.py @@ -50,7 +50,9 @@ class Fixed(TunableVariable): if not isinstance(default, (str, int, float, bool)): raise ValueError( "Fixed must be an str, int, float or bool, but found {}".format( - default)) + default + ) + ) self._default = default def random(self, seed=None): @@ -69,7 +71,8 @@ class Boolean(TunableVariable): super(Boolean, self).__init__(name=name, default=default) if default not in {True, False}: raise ValueError( - "default must be a Python boolean, but got {}".format(default)) + "default must be a Python boolean, but got {}".format(default) + ) def random(self, seed=None): rng = np.random.default_rng(seed) @@ -77,19 +80,21 @@ class Boolean(TunableVariable): def __repr__(self): return 'Boolean(name: "{}", default: {})'.format( - self.name, self.default) + self.name, self.default + ) class Choice(TunableVariable): - def __init__(self, name, values, default=None): super(Choice, self).__init__(name=name, default=default) types = set(type(v) for v in values) if len(types) > 1: raise TypeError( - "Choice can contain only one type of value, but found values: {} with types: {}." - .format(str(values), str(types))) + "Choice can contain only one type of value, but found values: {} with types: {}.".format( + str(values), str(types) + ) + ) self._is_unknown_type = False if isinstance(values[0], str): @@ -115,8 +120,10 @@ class Choice(TunableVariable): if default is not None and default not in values: raise ValueError( - "The default value should be one of the choices {}, but found {}" - .format(values, default)) + "The default value should be one of the choices {}, but found {}".format( + values, default + ) + ) self._default = default @property @@ -142,7 +149,8 @@ class Choice(TunableVariable): def __repr__(self): return 'Choice(name: "{}", values: {}, default: {})'.format( - self.name, self.values, self.default) + self.name, self.values, self.default + ) class IntRange(TunableVariable): @@ -187,13 +195,15 @@ class IntRange(TunableVariable): def _check_int(self, val): int_val = int(val) if int_val != val: - raise ValueError("Expects val is an int, but found: {}.".format( - str(val))) + raise ValueError( + "Expects val is an int, but found: {}.".format(str(val)) + ) return int_val def __repr__(self): return "IntRange(name: {}, start: {}, stop: {}, step: {}, default: {})".format( - self.name, self.start, self.stop, self.step, self.default) + self.name, self.start, self.stop, self.step, self.default + ) class FloatRange(TunableVariable): @@ -201,13 +211,9 @@ class FloatRange(TunableVariable): Float range. """ - def __init__(self, - name, - start, - stop, - step=None, - default=None, - endpoint=False): + def __init__( + self, name, start, stop, step=None, default=None, endpoint=False + ): super(FloatRange, self).__init__(name=name, default=default) self.stop = float(stop) self.start = float(start) @@ -246,5 +252,10 @@ class FloatRange(TunableVariable): def __repr__(self): return "FloatRange(name: {}, start: {}, stop: {}, step: {}, default: {}, endpoint: {})".format( - self.name, self.start, self.stop, self.step, self.default, - self.endpoint) + self.name, + self.start, + self.stop, + self.step, + self.default, + self.endpoint, + ) diff --git a/python/paddle/distributed/auto_parallel/utils.py b/python/paddle/distributed/auto_parallel/utils.py index cf6f506f8c5632c5c308d8b96ceee90765cca8bc..8b7ec647f6e765099f831cf48a40d42f2590592a 100644 --- a/python/paddle/distributed/auto_parallel/utils.py +++ b/python/paddle/distributed/auto_parallel/utils.py @@ -23,12 +23,18 @@ from functools import reduce import paddle.fluid.core as core from paddle.distributed.fleet.meta_optimizers.common import OpRole -from paddle.distributed.auto_parallel.process_group import get_all_process_groups +from paddle.distributed.auto_parallel.process_group import ( + get_all_process_groups, +) from paddle.fluid.io import is_parameter, is_belong_to_optimizer -from paddle.distributed.auto_parallel.dist_attribute import TensorDistributedAttribute, OperatorDistributedAttribute +from paddle.distributed.auto_parallel.dist_attribute import ( + TensorDistributedAttribute, + OperatorDistributedAttribute, +) __not_shape_var_type__ = [ - core.VarDesc.VarType.READER, core.VarDesc.VarType.STEP_SCOPES + core.VarDesc.VarType.READER, + core.VarDesc.VarType.STEP_SCOPES, ] @@ -39,7 +45,8 @@ def get_logger(log_level, name="auto_parallel"): logger.setLevel(log_level) log_handler = logging.StreamHandler() log_format = logging.Formatter( - '%(levelname)s %(asctime)s %(filename)s:%(lineno)d] %(message)s') + '%(levelname)s %(asctime)s %(filename)s:%(lineno)d] %(message)s' + ) log_handler.setFormatter(log_format) logger.addHandler(log_handler) return logger @@ -114,8 +121,11 @@ def verify_shard_spec(shard_spec, tensor_shape, process_mesh): if not verify_dims_mapping(dims_mapping, process_mesh): return False for i in range(len(tensor_shape)): - if dims_mapping[i] != -1 and tensor_shape[i] > 0 \ - and tensor_shape[i] % process_mesh.shape[dims_mapping[i]] != 0: + if ( + dims_mapping[i] != -1 + and tensor_shape[i] > 0 + and tensor_shape[i] % process_mesh.shape[dims_mapping[i]] != 0 + ): return False return True @@ -141,14 +151,17 @@ def compute_compatible_dims_mapping(dims_mapping_list): return None length = len(dims_mapping_list[0]) for dims_mapping in dims_mapping_list: - assert dims_mapping is not None, \ - "Dims mapping must not be None for compatible computation" - assert len(dims_mapping) == length, \ - "The length of dims_mapping in list must be same for compatible computation." + assert ( + dims_mapping is not None + ), "Dims mapping must not be None for compatible computation" + assert ( + len(dims_mapping) == length + ), "The length of dims_mapping in list must be same for compatible computation." compatible_result = [] for dim_mappings in zip(*dims_mapping_list): compatible_dim_mapping = compute_compatible_dim_mapping( - list(dim_mappings)) + list(dim_mappings) + ) if compatible_dim_mapping is None: return None compatible_result.append(compatible_dim_mapping) @@ -161,7 +174,10 @@ def compute_compatible_process_mesh(process_mesh_list): return compatible_process_mesh for process_mesh in process_mesh_list: if process_mesh is not None: - if compatible_process_mesh is None or compatible_process_mesh == process_mesh: + if ( + compatible_process_mesh is None + or compatible_process_mesh == process_mesh + ): compatible_process_mesh = process_mesh else: return None @@ -201,15 +217,18 @@ def remove_distributed_attr_suffix(name): def check_distributed_attr_for_program(program, dist_context=None): from .dist_context import get_default_distributed_context + if dist_context is None: dist_context = get_default_distributed_context() - assert dist_context.is_initialized_for_program(), \ - "Distributed attributes must be initialized before check." + assert ( + dist_context.is_initialized_for_program() + ), "Distributed attributes must be initialized before check." for block in program.blocks: for tensor in block.vars.values(): dist_tensor = dist_context.get_dist_tensor_for_graph(tensor) tensor_dist_attr = dist_context.get_tensor_dist_attr_for_program( - tensor) + tensor + ) if (tensor_dist_attr is not None) and (not dist_tensor.is_valid()): return False for op in block.ops: @@ -229,6 +248,7 @@ def print_program_with_dist_attr(program, dist_context=None): lock.acquire() from .dist_context import get_default_distributed_context from .dist_context import set_default_distributed_context + if dist_context is None: dist_context = get_default_distributed_context() print(program, flush=True) @@ -256,7 +276,8 @@ def _get_comm_group(processes, shape, axis, rank): # NOTE _linear_idx2coordinate assume processes mesh start with 0 and continuous # tricks to support processes mesh when it is not start with 0 or continuous assert rank in processes, "rank [{}] is NOT in processes group {}".format( - rank, processes) + rank, processes + ) rank_relatvie = processes.index(rank) coordinate = _linear_idx2coordinate(shape, rank_relatvie) coordinates_in_group = [coordinate[:] for i in range(shape[axis])] @@ -325,14 +346,19 @@ def _coordinate2linear_idx(mesh_shape, coordinate): assert len(mesh_shape) == len( coordinate ), "coordinate should have the same size as mesh shape, but got shape: {}, coordinate: {}".format( - mesh_shape, coordinate) + mesh_shape, coordinate + ) for i in range(len(mesh_shape)): - assert coordinate[ - i] >= 0, "index in dimension [{}] is least than zero. coordinate: {}".format( - i, coordinate) - assert coordinate[i] < mesh_shape[ - i], "index beyond extent in dimension [{}]. shape: {}, coordinate: {}".format( - i, mesh_shape, coordinate) + assert ( + coordinate[i] >= 0 + ), "index in dimension [{}] is least than zero. coordinate: {}".format( + i, coordinate + ) + assert ( + coordinate[i] < mesh_shape[i] + ), "index beyond extent in dimension [{}]. shape: {}, coordinate: {}".format( + i, mesh_shape, coordinate + ) base = mesh_shape[-1] linear_idx = coordinate[-1] @@ -365,11 +391,13 @@ def _linear_idx2coordinate(mesh_shape, linear_idx): """ assert linear_idx >= 0, "linear index [{}] is least than zero".format( - linear_idx) + linear_idx + ) assert linear_idx < np.prod( mesh_shape ), "linear index beyond the extent of mesh shape. shape: {}, linear index: {}".format( - mesh_shape, linear_idx) + mesh_shape, linear_idx + ) base = 1 coordinate = [-1] * len(mesh_shape) @@ -392,15 +420,17 @@ def _get_corresponding_rank(dist_context, target_mesh, rank): coordinate = None for mesh in dist_context.process_meshes: if rank in mesh.processes and mesh.topology == target_mesh.topology: - coordinate = _linear_idx2coordinate(mesh.topology, - mesh.processes.index(rank)) + coordinate = _linear_idx2coordinate( + mesh.topology, mesh.processes.index(rank) + ) break # assert coordinate is not None, "could NOT found rank [{}] in any registered mesh".format( # rank) if coordinate is not None: - return target_mesh.processes[_coordinate2linear_idx( - mesh.topology, coordinate)] + return target_mesh.processes[ + _coordinate2linear_idx(mesh.topology, coordinate) + ] else: return target_mesh.processes[0] @@ -412,7 +442,8 @@ def _get_unshard_dist_shape(var, dist_attr): assert len(var_shape) == len( mapping ), "variable shape [{}] and dim_mapping [{}] is NOT match !".format( - var_shape, mapping) + var_shape, mapping + ) new_shape = [] for idx in range(len(var_shape)): if var_shape[idx] == -1 or mapping[idx] == -1: @@ -425,13 +456,15 @@ def _get_unshard_dist_shape(var, dist_attr): def make_data_unshard(dist_main_prog, dist_startup_prog, dist_context=None): from .dist_context import get_default_distributed_context + if dist_context is None: dist_context = get_default_distributed_context() for var in dist_main_prog.list_vars(): if var.is_data: tensor_dist_attr = dist_context.get_tensor_dist_attr_for_program( - var) + var + ) inverse_shape = _get_unshard_dist_shape(var, tensor_dist_attr) var.desc.set_shape(inverse_shape) dim_mapping = tensor_dist_attr.dims_mapping @@ -441,62 +474,76 @@ def make_data_unshard(dist_main_prog, dist_startup_prog, dist_context=None): def _update_addition_info(addition_info): - """ Update default addition_info with inputs """ + """Update default addition_info with inputs""" add_info = {"epoch": 0, "batch": 0, "batch_size": 0} if not addition_info: return add_info elif not isinstance(addition_info, dict): - raise TypeError("The type of 'addition_info' should be 'dict', " - "but got '{}'.".format(str(type(addition_info)))) + raise TypeError( + "The type of 'addition_info' should be 'dict', " + "but got '{}'.".format(str(type(addition_info))) + ) else: for item, value in addition_info.items(): if item not in ["epoch", "batch", "batch_size"]: raise ValueError( "The key of 'addition_info' should be one of the " "['epoch', 'batch', 'batch_size'], but got '{}'.".format( - str(item))) + str(item) + ) + ) if not isinstance(value, int): raise ValueError( "The value of 'addition_info' should be 'int', " - "but got '{}'.".format(str(type(value)))) + "but got '{}'.".format(str(type(value))) + ) add_info[item] = value return add_info def _check_valid_path(file_path): - """ Validity check of input file path """ + """Validity check of input file path""" if not file_path: return file_path elif isinstance(file_path, list): for file in file_path: if not isinstance(file, str): - raise TypeError("The type of file path should be 'str', " - "but got '{}'.".format(str(type(file)))) + raise TypeError( + "The type of file path should be 'str', " + "but got '{}'.".format(str(type(file))) + ) if not os.path.exists(file): raise ValueError( - "The file path '{}' does not exist.".format(file)) + "The file path '{}' does not exist.".format(file) + ) return file_path else: - raise TypeError("The type of file path should be 'list', " - "but got '{}'.".format(str(type(file_path)))) + raise TypeError( + "The type of file path should be 'list', " + "but got '{}'.".format(str(type(file_path))) + ) def _check_param_dict(param_dict): if not param_dict: raise ValueError("'param_dict' cannot be None.") elif not isinstance(param_dict, dict): - raise TypeError("The type of 'param_dict' should be 'dict', " - "but got '{}'.".format(str(type(param_dict)))) + raise TypeError( + "The type of 'param_dict' should be 'dict', " + "but got '{}'.".format(str(type(param_dict))) + ) else: for name, value in param_dict.items(): if not isinstance(name, str): raise TypeError( "The type of key of 'param_dict' should be 'str', " - "but got '{}'.".format(str(type(name)))) + "but got '{}'.".format(str(type(name))) + ) if not isinstance(value, paddle.fluid.LoDTensor): raise TypeError( "The type of value of 'param_dict' should be 'LoDTensor', " - "but got '{}'.".format(str(type(value)))) + "but got '{}'.".format(str(type(value))) + ) return param_dict @@ -504,33 +551,40 @@ def _check_dist_attr(dist_attr): if not dist_attr: return dist_attr elif not isinstance(dist_attr, dict): - raise TypeError("The type of 'dist_attr' should be 'dict', " - "but got '{}'.".format(str(type(dist_attr)))) + raise TypeError( + "The type of 'dist_attr' should be 'dict', " + "but got '{}'.".format(str(type(dist_attr))) + ) else: for name, value in dist_attr.items(): if not isinstance(name, str): raise TypeError( "The type of param name of 'dist_attr' should be 'str', " - "but got '{}'.".format(str(type(name)))) + "but got '{}'.".format(str(type(name))) + ) if not isinstance(value, dict): raise TypeError( "The type of distributed attribute should be 'dict', " - "but got '{}'".format(str(type(value)))) + "but got '{}'".format(str(type(value))) + ) attr = ['process_shape', 'process_group', 'dims_mapping'] if list(value.keys()) != attr: raise ValueError( "The key of distributed attribute should be " "'['process_shape', 'process_group', 'dims_mapping']', " - "but got {}.".format(str(value.keys()))) + "but got {}.".format(str(value.keys())) + ) return dist_attr -def save_distributed_checkpoint(program, - checkpoint_path, - dist_attr_path, - addition_info=None, - is_integrated=False, - dist_context=None): +def save_distributed_checkpoint( + program, + checkpoint_path, + dist_attr_path, + addition_info=None, + is_integrated=False, + dist_context=None, +): """ Save model parameter state, optimzer state, distributed attribute and additional information of each rank. @@ -569,7 +623,8 @@ def save_distributed_checkpoint(program, else: # TODO: integrate param before save raise NotImplementedError( - "Integrating parameter has not been implemented.") + "Integrating parameter has not been implemented." + ) def load_distributed_checkpoint(checkpoint_path, dist_attr_path): @@ -597,10 +652,10 @@ def load_distributed_checkpoint(checkpoint_path, dist_attr_path): './dist_attr_rank1.pdattr'] param_dict, dist_attr, add_info = load_distributed_checkpoint(ckpt_path, dist_attr_path) """ - assert _check_valid_path(checkpoint_path), \ - "'checkpoint_path' cannot be None." - assert _check_valid_path(dist_attr_path), \ - "'dist_attr_path' cannot be None." + assert _check_valid_path( + checkpoint_path + ), "'checkpoint_path' cannot be None." + assert _check_valid_path(dist_attr_path), "'dist_attr_path' cannot be None." state_dict_info = _load_distributed_state_dict(checkpoint_path) dist_attr = _load_distributed_attribute(dist_attr_path) @@ -609,10 +664,9 @@ def load_distributed_checkpoint(checkpoint_path, dist_attr_path): return param_dict, dist_attr, addition_info -def load_checkpoint_into_program(checkpoint_path, - dist_attr_path, - program, - dist_context=None): +def load_checkpoint_into_program( + checkpoint_path, dist_attr_path, program, dist_context=None +): """ Load parameter, optimizer, distributed attribute and addition_info into model. @@ -641,10 +695,10 @@ def load_checkpoint_into_program(checkpoint_path, from .dist_context import get_default_distributed_context assert isinstance(program, paddle.fluid.framework.Program) - assert _check_valid_path(checkpoint_path), \ - "'checkpoint_path' cannot be None." - assert _check_valid_path(dist_attr_path), \ - "'dist_attr_path' cannot be None." + assert _check_valid_path( + checkpoint_path + ), "'checkpoint_path' cannot be None." + assert _check_valid_path(dist_attr_path), "'dist_attr_path' cannot be None." if dist_context is None: dist_context = get_default_distributed_context() all_state_dict_info = _load_distributed_state_dict(checkpoint_path) @@ -652,9 +706,9 @@ def load_checkpoint_into_program(checkpoint_path, all_cur_dist_attr = get_dist_attr(program, dist_context) all_param_dict = all_state_dict_info["model"] addition_info = all_state_dict_info["addition_info"] - sliced_param_dict = merge_and_slice_parameter(all_param_dict, - all_pre_dist_attr, - all_cur_dist_attr) + sliced_param_dict = merge_and_slice_parameter( + all_param_dict, all_pre_dist_attr, all_cur_dist_attr + ) load_parameter_into_program(sliced_param_dict, program) return addition_info @@ -676,28 +730,31 @@ def load_parameter_into_program(param_dict, program): def _save_distributed_attribute(program, dist_attr_path, dist_context): - """ Save distributed attribute of all parameters """ + """Save distributed attribute of all parameters""" # TODO: just save a complete distributed attribute file rank_id = paddle.distributed.get_rank() - dist_attr_name = os.path.join(dist_attr_path, - "dist_attr_rank{}.pdattr".format(rank_id)) + dist_attr_name = os.path.join( + dist_attr_path, "dist_attr_rank{}.pdattr".format(rank_id) + ) dist_attr_dict = { "model": get_dist_attr(program, dist_context), - "world_size": paddle.distributed.get_world_size() + "world_size": paddle.distributed.get_world_size(), } paddle.save(dist_attr_dict, dist_attr_name) logging.info( - "Already saved distributed attribute to '{}'.".format(dist_attr_path)) + "Already saved distributed attribute to '{}'.".format(dist_attr_path) + ) def _load_distributed_attribute(dist_attr_path): - """ Load parameters' distributed attribute from dist_attr_path """ + """Load parameters' distributed attribute from dist_attr_path""" total_dist_attr = {} for dist_attr_file in dist_attr_path: dist_attr = paddle.load(dist_attr_file) pre_world_size = dist_attr["world_size"] - assert pre_world_size == len(dist_attr_path), \ - "The number of 'dist_attr_path' must be equal to the last training world size." + assert pre_world_size == len( + dist_attr_path + ), "The number of 'dist_attr_path' must be equal to the last training world size." for name, attr in dist_attr["model"].items(): if name not in total_dist_attr: total_dist_attr[name] = attr @@ -706,27 +763,29 @@ def _load_distributed_attribute(dist_attr_path): def _save_distributed_state_dict(program, addition_info, checkpoint_path): - """ Save parameters' state_dict """ + """Save parameters' state_dict""" rank = paddle.distributed.get_rank() - ckpt_file_name = os.path.join(checkpoint_path, - "model_state_rank{}.pdmodel".format(rank)) + ckpt_file_name = os.path.join( + checkpoint_path, "model_state_rank{}.pdmodel".format(rank) + ) state_dict = { "model": program.state_dict(), "world_size": paddle.distributed.get_world_size(), - "addition_info": addition_info + "addition_info": addition_info, } paddle.save(state_dict, ckpt_file_name) logging.info("Already saved model to '{}'.".format(checkpoint_path)) def _load_distributed_state_dict(checkpoint_path): - """ Load parameters' state_dict from checkpoint_path """ + """Load parameters' state_dict from checkpoint_path""" all_state_dict = {} for idx, ckpt_file in enumerate(checkpoint_path): state_dict_info = paddle.load(ckpt_file, return_numpy=True) pre_world_size = state_dict_info["world_size"] - assert pre_world_size == len(checkpoint_path), \ - "The number of 'checkpoint_path' must be equal to the last training world size." + assert pre_world_size == len( + checkpoint_path + ), "The number of 'checkpoint_path' must be equal to the last training world size." if idx == 0: addition_info = state_dict_info["addition_info"] for name, value in state_dict_info["model"].items(): @@ -737,7 +796,7 @@ def _load_distributed_state_dict(checkpoint_path): all_state_dict_info = { "model": all_state_dict, - "addition_info": addition_info + "addition_info": addition_info, } return all_state_dict_info @@ -758,13 +817,14 @@ def get_dist_attr(program, dist_context=None): for var in program.list_vars(): if is_parameter(var) or is_belong_to_optimizer(var): tensor_dist_attr = dist_context.get_tensor_dist_attr_for_program( - var) + var + ) process_mesh = tensor_dist_attr.process_mesh dims_mapping = tensor_dist_attr.dims_mapping dist_attr[var.name] = { "process_shape": process_mesh.topology, "process_group": process_mesh.processes, - "dims_mapping": dims_mapping + "dims_mapping": dims_mapping, } return dist_attr @@ -782,19 +842,26 @@ def merge_and_slice_parameter(dist_param_dict, pre_dist_attr, cur_dist_attr): dist_param_dict(dict): parameters' value of current rank. """ assert _check_dist_attr(pre_dist_attr), "'pre_dist_attr' cannot be None." - assert isinstance(dist_param_dict, dict), \ - "The type of 'dist_param_dict' should be 'dict', but got {}.".format( - str(type(dist_param_dict))) + assert isinstance( + dist_param_dict, dict + ), "The type of 'dist_param_dict' should be 'dict', but got {}.".format( + str(type(dist_param_dict)) + ) for name, value in dist_param_dict.items(): if not isinstance(name, str): - raise TypeError("The key of 'dist_param_dict' is parameter's name, " - "and its type should be 'str', but got {}.".format( - str(type(name)))) + raise TypeError( + "The key of 'dist_param_dict' is parameter's name, " + "and its type should be 'str', but got {}.".format( + str(type(name)) + ) + ) if not isinstance(value, list) or not all( - isinstance(v, np.ndarray) for v in value): + isinstance(v, np.ndarray) for v in value + ): raise TypeError( "The value of 'dist_param_dict' is parameter's value of all ranks, " - "and its type should be 'list(numpy.ndarray)'.") + "and its type should be 'list(numpy.ndarray)'." + ) if cur_dist_attr is None: return {} @@ -822,7 +889,8 @@ def merge_and_slice_parameter(dist_param_dict, pre_dist_attr, cur_dist_attr): cur_dims_mapping = cur_attr["dims_mapping"] if len(set(pre_dims_mapping)) > 1 or -1 not in pre_dims_mapping: complete_param = _merge_parameter_with_dist_attr( - pre_param, pre_attr) + pre_param, pre_attr + ) dist_param_dict[var_name] = complete_param else: complete_param = pre_param[0] @@ -830,7 +898,8 @@ def merge_and_slice_parameter(dist_param_dict, pre_dist_attr, cur_dist_attr): if len(set(cur_dims_mapping)) > 1 or -1 not in cur_dims_mapping: sliced_param = _slice_parameter_with_dist_attr( - complete_param, cur_attr) + complete_param, cur_attr + ) dist_param_dict[var_name] = sliced_param for var_name in pre_dist_attr: @@ -841,67 +910,81 @@ def merge_and_slice_parameter(dist_param_dict, pre_dist_attr, cur_dist_attr): if param_not_in_pre: warnings.warn( "Parameters '{}' are not found in last training process.".format( - str(param_not_in_pre))) + str(param_not_in_pre) + ) + ) if param_not_in_cur: warnings.warn( "Parameters '{}' are not found in current training process.".format( - str(param_not_in_cur))) + str(param_not_in_cur) + ) + ) return dist_param_dict def _merge_parameter_with_dist_attr(param_list, dist_attr): - """ Merge parameter with distributed attribute """ + """Merge parameter with distributed attribute""" from .reshard import Resharder dims_mapping = dist_attr["dims_mapping"] process_shape = dist_attr["process_shape"] process_group = dist_attr["process_group"] # get the complete shape of the parameter - complete_shape = Resharder.compute_complete_shape(param_list[0].shape, - process_shape, - dims_mapping) + complete_shape = Resharder.compute_complete_shape( + param_list[0].shape, process_shape, dims_mapping + ) # merge the parameter with dist_attr partition_param_list = [] merged_partiton = [] for process in process_group: partition_index = Resharder.compute_partition_index( - process, complete_shape, dims_mapping, process_shape, process_group) + process, complete_shape, dims_mapping, process_shape, process_group + ) index = process_group.index(process) if partition_index not in merged_partiton: merged_partiton.append(partition_index) - _merge_parameter(partition_param_list, param_list[index], - partition_index, complete_shape) + _merge_parameter( + partition_param_list, + param_list[index], + partition_index, + complete_shape, + ) - assert len(partition_param_list) == 1 or not partition_param_list, \ - "Fail to merge parameter" + assert ( + len(partition_param_list) == 1 or not partition_param_list + ), "Fail to merge parameter" complete_param = partition_param_list[0][0] return complete_param def _slice_parameter_with_dist_attr(param, dist_attr): - """ Slice parameter with distributed attribute """ - param = np.array(param) if isinstance(param, - paddle.fluid.LoDTensor) else param + """Slice parameter with distributed attribute""" + param = ( + np.array(param) if isinstance(param, paddle.fluid.LoDTensor) else param + ) dims_mapping = dist_attr["dims_mapping"] process_shape = dist_attr["process_shape"] process_group = dist_attr["process_group"] # slice the parameter with dist_attr - partition_index_list = _get_split_indices(param.shape, dims_mapping, - process_shape, process_group) - sliced_param_list = _slice_parameter(param, partition_index_list, - len(partition_index_list)) + partition_index_list = _get_split_indices( + param.shape, dims_mapping, process_shape, process_group + ) + sliced_param_list = _slice_parameter( + param, partition_index_list, len(partition_index_list) + ) # get the current parameter's index in sliced_param_list rank_id = paddle.distributed.get_rank() - sliced_param_index = _get_sliced_param_index(rank_id, param.shape, - dims_mapping, process_shape, - process_group) + sliced_param_index = _get_sliced_param_index( + rank_id, param.shape, dims_mapping, process_shape, process_group + ) sliced_param = sliced_param_list[sliced_param_index] return sliced_param -def _merge_parameter(partition_param_list, param, partition_index, - complete_shape): +def _merge_parameter( + partition_param_list, param, partition_index, complete_shape +): """ Merge partitial parameters to a complete one. @@ -935,19 +1018,30 @@ def _merge_parameter(partition_param_list, param, partition_index, else: i = 0 while i < len(partition_param_list): - concat_axis, first_order, new_partition = Resharder.compute_concat_info( - partition_param_list[i][1], partition_index) + ( + concat_axis, + first_order, + new_partition, + ) = Resharder.compute_concat_info( + partition_param_list[i][1], partition_index + ) if concat_axis != -1: if first_order == 0: new_param = np.concatenate( - (partition_param_list[i][0], param), axis=concat_axis) + (partition_param_list[i][0], param), axis=concat_axis + ) else: new_param = np.concatenate( - (param, partition_param_list[i][0]), axis=concat_axis) + (param, partition_param_list[i][0]), axis=concat_axis + ) partition_param_list.pop(i) - _merge_parameter(partition_param_list, new_param, new_partition, - complete_shape) + _merge_parameter( + partition_param_list, + new_param, + new_partition, + complete_shape, + ) break i += 1 @@ -975,19 +1069,21 @@ def _slice_parameter(complete_param, partition_index_list, length): """ sliced_param_list = [] axis = len(complete_param.shape) - length - sliced_param = np.split(complete_param, - partition_index_list[axis], - axis=axis) + sliced_param = np.split( + complete_param, partition_index_list[axis], axis=axis + ) if length == 1: return sliced_param for param in sliced_param: sliced_param_list.extend( - _slice_parameter(param, partition_index_list, length - 1)) + _slice_parameter(param, partition_index_list, length - 1) + ) return sliced_param_list -def _get_sliced_param_index(rank, complete_shape, dims_mapping, process_shape, - process_group): +def _get_sliced_param_index( + rank, complete_shape, dims_mapping, process_shape, process_group +): """ Get sliced_param's index of current rank in all sliced parameters list. @@ -1015,10 +1111,9 @@ def _get_sliced_param_index(rank, complete_shape, dims_mapping, process_shape, """ from .reshard import Resharder - partition_index = Resharder.compute_partition_index(rank, complete_shape, - dims_mapping, - process_shape, - process_group) + partition_index = Resharder.compute_partition_index( + rank, complete_shape, dims_mapping, process_shape, process_group + ) sliced_param_index = 0 for i, shape in enumerate(complete_shape): if dims_mapping[i] == -1: @@ -1033,8 +1128,9 @@ def _get_sliced_param_index(rank, complete_shape, dims_mapping, process_shape, return sliced_param_index -def _get_split_indices(complete_shape, dims_mapping, process_shape, - process_group): +def _get_split_indices( + complete_shape, dims_mapping, process_shape, process_group +): """ Get split indices of every dimension. @@ -1059,15 +1155,20 @@ def _get_split_indices(complete_shape, dims_mapping, process_shape, split_indices_list = [] for process in process_group: partition_index = Resharder.compute_partition_index( - process, complete_shape, dims_mapping, process_shape, process_group) + process, complete_shape, dims_mapping, process_shape, process_group + ) if split_indices_list: for dim in range(len(partition_index)): split_indices_list[dim].extend(partition_index[dim]) else: split_indices_list = partition_index split_indices_list = list( - map(lambda x, y: list(set(x) - set([y]) - set([0])), split_indices_list, - complete_shape)) + map( + lambda x, y: list(set(x) - set([y]) - set([0])), + split_indices_list, + complete_shape, + ) + ) split_indices_list = [sorted(x) for x in split_indices_list] return split_indices_list @@ -1086,8 +1187,10 @@ def set_grad_var_shape(program, dist_context): if int(op.attr('op_role')) != int(OpRole.Backward): continue - if int(block.ops[idx-1].attr('op_role')) == int(OpRole.Forward) or \ - int(block.ops[idx-1].attr('op_role')) == 257: + if ( + int(block.ops[idx - 1].attr('op_role')) == int(OpRole.Forward) + or int(block.ops[idx - 1].attr('op_role')) == 257 + ): appended_grad_times += 1 if op.type in ["check_finite_and_unscale", "update_loss_scaling"]: @@ -1105,61 +1208,102 @@ def set_grad_var_shape(program, dist_context): continue if var_name in grad_var_to_var[appended_grad_times]: forward_var_name = grad_var_to_var[appended_grad_times][ - var_name] + var_name + ] else: - forward_var_name = var_name[:var_name.find("@GRAD")] + forward_var_name = var_name[: var_name.find("@GRAD")] if op.type in [ - "c_allreduce_sum", "c_identity", "scale", "cast", - "fill_any_like" + "c_allreduce_sum", + "c_identity", + "scale", + "cast", + "fill_any_like", ]: forward_var_name = op.input_arg_names[0] - elif op.type == "matmul_v2_grad" or op.type == "matmul_grad" or op.type == "mul_grad": + elif ( + op.type == "matmul_v2_grad" + or op.type == "matmul_grad" + or op.type == "mul_grad" + ): forward_var_name = None for output_name in op.output_names: if var_name in op.output(output_name): assert "@GRAD" in output_name - input_name = output_name[:output_name.find("@GRAD")] + input_name = output_name[: output_name.find("@GRAD")] assert len(op.input(input_name)) == 1 forward_var_name = op.input(input_name)[0] assert forward_var_name is not None need_set_shape_list = [ - "reshape2_grad", "softmax_with_cross_entropy_grad", - "transpose2_grad", "softmax_grad", "cross_entropy_grad2", - "dropout_grad", "tanh_grad", "slice", "assign", - "matmul_v2_triple_grad", "elementwise_add_triple_grad", - "fill_constant", "sqrt_grad", + "reshape2_grad", + "softmax_with_cross_entropy_grad", + "transpose2_grad", + "softmax_grad", + "cross_entropy_grad2", + "dropout_grad", + "tanh_grad", + "slice", + "assign", + "matmul_v2_triple_grad", + "elementwise_add_triple_grad", + "fill_constant", + "sqrt_grad", "fused_softmax_mask_upper_triangle_grad", - "flatten_contiguous_range_grad", "relu_grad" + "flatten_contiguous_range_grad", + "relu_grad", ] forward_list = [ - "reshape2", "softmax_with_cross_entropy", "transpose2", - "softmax", "cross_entropy2", "dropout", "tanh", - ["slice_grad", "c_allgather"], "assign", "matmul_v2_grad_grad", - "elementwise_add_grad_grad", "shape", "sqrt", - "fused_softmax_mask_upper_triangle", "flatten_contiguous_range", - "relu" + "reshape2", + "softmax_with_cross_entropy", + "transpose2", + "softmax", + "cross_entropy2", + "dropout", + "tanh", + ["slice_grad", "c_allgather"], + "assign", + "matmul_v2_grad_grad", + "elementwise_add_grad_grad", + "shape", + "sqrt", + "fused_softmax_mask_upper_triangle", + "flatten_contiguous_range", + "relu", ] if op.type in need_set_shape_list: for forward_op in block.ops: idx = need_set_shape_list.index(op.type) forward_op_name = forward_list[idx] - if forward_op.type in forward_op_name and forward_var_name in forward_op.input_arg_names: - op_dist_attr = dist_context.get_op_dist_attr_for_program( - forward_op) + if ( + forward_op.type in forward_op_name + and forward_var_name in forward_op.input_arg_names + ): + op_dist_attr = ( + dist_context.get_op_dist_attr_for_program( + forward_op + ) + ) break forward_input_dist_attr = op_dist_attr.get_input_dist_attr( - forward_var_name) - assert forward_input_dist_attr is not None, f"{forward_var_name, str(op)}" + forward_var_name + ) + assert ( + forward_input_dist_attr is not None + ), f"{forward_var_name, str(op)}" forward_var = vars[forward_var_name] - forward_var_dist_attr = dist_context.get_tensor_dist_attr_for_program( - forward_var) + forward_var_dist_attr = ( + dist_context.get_tensor_dist_attr_for_program(forward_var) + ) assert forward_var_dist_attr is not None grad_var = vars[var_name] - ref_shape = infer_shape(block, forward_var, forward_var_dist_attr, - forward_input_dist_attr) + ref_shape = infer_shape( + block, + forward_var, + forward_var_dist_attr, + forward_input_dist_attr, + ) if list(grad_var.shape) != ref_shape: grad_var.desc.set_shape(ref_shape) @@ -1171,28 +1315,33 @@ OpRole = core.op_proto_and_checker_maker.OpRole def is_forward_op(op): op_role = int(op.attr('op_role')) - return OP_ROLE_KEY in op.attr_names and (op_role == int(OpRole.Forward) - or op_role == int(OpRole.Loss)) + return OP_ROLE_KEY in op.attr_names and ( + op_role == int(OpRole.Forward) or op_role == int(OpRole.Loss) + ) def is_backward_op(op): - return OP_ROLE_KEY in op.attr_names and \ - int(op.all_attrs()[OP_ROLE_KEY]) & int(OpRole.Backward) + return OP_ROLE_KEY in op.attr_names and int( + op.all_attrs()[OP_ROLE_KEY] + ) & int(OpRole.Backward) def is_optimize_op(op): - return OP_ROLE_KEY in op.attr_names and \ - int(op.all_attrs()[OP_ROLE_KEY]) & int(OpRole.Optimize) + return OP_ROLE_KEY in op.attr_names and int( + op.all_attrs()[OP_ROLE_KEY] + ) & int(OpRole.Optimize) def is_lr_sched_op(op): - return OP_ROLE_KEY in op.attr_names and \ - int(op.all_attrs()[OP_ROLE_KEY]) & int(OpRole.Optimize.LRSched) + return OP_ROLE_KEY in op.attr_names and int( + op.all_attrs()[OP_ROLE_KEY] + ) & int(OpRole.Optimize.LRSched) def is_loss_op(op): - return OP_ROLE_KEY in op.attr_names and \ - int(op.all_attrs()[OP_ROLE_KEY]) == (int(OpRole.Forward) | int(OpRole.Loss)) + return OP_ROLE_KEY in op.attr_names and int( + op.all_attrs()[OP_ROLE_KEY] + ) == (int(OpRole.Forward) | int(OpRole.Loss)) def is_loss_grad_op(op): @@ -1203,8 +1352,9 @@ def is_loss_grad_op(op): def is_gradient_clip_op(op): - return op.desc.has_attr("op_namescope") \ - and op.desc.attr("op_namescope").startswith("/gradient_clip") + return op.desc.has_attr("op_namescope") and op.desc.attr( + "op_namescope" + ).startswith("/gradient_clip") def is_prim_op(op): @@ -1215,8 +1365,9 @@ def get_loss_op(block): loss_ops = [] for op in block.ops: if is_loss_op(op): - assert len(op.desc.output_arg_names() - ) == 1, "loss op should only output loss var" + assert ( + len(op.desc.output_arg_names()) == 1 + ), "loss op should only output loss var" loss_ops.append(op) assert len(loss_ops) == 1, "num of loss op is not equal to one" @@ -1236,7 +1387,8 @@ def set_var_dist_attr(dist_context, var, dims_mapping, process_mesh, **kwargs): def naive_set_dist_op_attr_for_program_by_mesh_and_mapping( - new_op, process_mesh, ref_mapping, ctx): + new_op, process_mesh, ref_mapping, ctx +): assert process_mesh is not None assert ref_mapping is not None @@ -1270,9 +1422,11 @@ def update_op_dims_mapping_by_default_dist_impl(dist_op): dims_mapping = op_dist_attr.get_input_dims_mapping(arg_name) if len(dims_mapping) > 1: for idx, mapping in enumerate(dims_mapping[1:]): - assert mapping == -1, \ - "{} only the batch dimension (0-dim) can be sharded, but the dimension {} is sharded by {} part."\ - .format(op_desc.type(), idx, mapping) + assert ( + mapping == -1 + ), "{} only the batch dimension (0-dim) can be sharded, but the dimension {} is sharded by {} part.".format( + op_desc.type(), idx, mapping + ) batch_dim_mappings.append(dims_mapping[0]) for arg_name in op_desc.output_arg_names(): serial_tensor = dist_op.get_serial_output(arg_name) @@ -1282,23 +1436,31 @@ def update_op_dims_mapping_by_default_dist_impl(dist_op): if arg_name not in xshape_arg_names: if len(dims_mapping) > 1: for idx, mapping in enumerate(dims_mapping[1:]): - assert mapping == -1, \ - "{} only the batch dimension (0-dim) can be sharded, but the dimension {} is sharded by {} part."\ - .format(op_desc.type(), idx, mapping) + assert ( + mapping == -1 + ), "{} only the batch dimension (0-dim) can be sharded, but the dimension {} is sharded by {} part.".format( + op_desc.type(), idx, mapping + ) batch_dim_mappings.append(dims_mapping[0]) else: - assert dims_mapping[0] == -1, \ - "{} only the batch dimension (1-dim) of XShape can be sharded, but the dimension 0 is sharded by {} part."\ - .format(op_desc.type(), mapping) + assert ( + dims_mapping[0] == -1 + ), "{} only the batch dimension (1-dim) of XShape can be sharded, but the dimension 0 is sharded by {} part.".format( + op_desc.type(), mapping + ) if len(dims_mapping) > 2: for idx, mapping in enumerate(dims_mapping[2:]): - assert mapping == -1, \ - "{} only the batch dimension (1-dim) of XShape can be sharded, but the dimension {} is sharded by {} part."\ - .format(op_desc.type(), idx, mapping) + assert ( + mapping == -1 + ), "{} only the batch dimension (1-dim) of XShape can be sharded, but the dimension {} is sharded by {} part.".format( + op_desc.type(), idx, mapping + ) batch_dim_mappings.append(dims_mapping[1]) compatible_dim_mapping = compute_compatible_dim_mapping(batch_dim_mappings) - assert compatible_dim_mapping is not None, "There is no compatible dim mapping." + assert ( + compatible_dim_mapping is not None + ), "There is no compatible dim mapping." for arg_name in op_desc.input_arg_names(): serial_tensor = dist_op.get_serial_input(arg_name) if serial_tensor.is_parameter: @@ -1344,8 +1506,9 @@ def update_op_dims_mapping_by_elementwise_like_dist_impl(dist_op): if input_dims_mapping_lens[arg_name] < max_dims_mapping_len: new_dims_mapping = [-1 for _ in range(max_dims_mapping_len)] for i in range(input_dims_mapping_lens[arg_name]): - new_idx = (max_dims_mapping_len - - input_dims_mapping_lens[arg_name]) + i + new_idx = ( + max_dims_mapping_len - input_dims_mapping_lens[arg_name] + ) + i new_dims_mapping[new_idx] = input_dims_mapping_dict[arg_name][i] dims_mapping_list.append(new_dims_mapping) else: @@ -1357,7 +1520,9 @@ def update_op_dims_mapping_by_elementwise_like_dist_impl(dist_op): dims_mapping_list.append(dims_mapping) compatible_dims_mapping = compute_compatible_dims_mapping(dims_mapping_list) - assert compatible_dims_mapping is not None, "There is no compatible dim mapping." + assert ( + compatible_dims_mapping is not None + ), "There is no compatible dim mapping." for arg_name in input_arg_names: if input_dims_mapping_lens[arg_name] < max_dims_mapping_len: @@ -1365,55 +1530,64 @@ def update_op_dims_mapping_by_elementwise_like_dist_impl(dist_op): -1 for _ in range(input_dims_mapping_lens[arg_name]) ] for i in range(input_dims_mapping_lens[arg_name]): - new_idx = (max_dims_mapping_len - - input_dims_mapping_lens[arg_name]) + i + new_idx = ( + max_dims_mapping_len - input_dims_mapping_lens[arg_name] + ) + i new_dims_mapping[i] = compatible_dims_mapping[new_idx] if new_dims_mapping != input_dims_mapping_dict[arg_name]: op_dist_attr.set_input_dims_mapping(arg_name, new_dims_mapping) changed = True else: if compatible_dims_mapping != input_dims_mapping_dict[arg_name]: - op_dist_attr.set_input_dims_mapping(arg_name, - compatible_dims_mapping) + op_dist_attr.set_input_dims_mapping( + arg_name, compatible_dims_mapping + ) changed = True for arg_name in output_arg_names: dims_mapping = op_dist_attr.get_output_dims_mapping(arg_name) if compatible_dims_mapping != dims_mapping: - op_dist_attr.set_output_dims_mapping(arg_name, - compatible_dims_mapping) + op_dist_attr.set_output_dims_mapping( + arg_name, compatible_dims_mapping + ) changed = True return changed -def get_all_distributed_main_program(serial_program_info, dist_context, - parallelizer): +def get_all_distributed_main_program( + serial_program_info, dist_context, parallelizer +): "Get all distributed main programs by dist_context." from .dist_context import DistributedOperatorContext + cluster = serial_program_info.cluster copied_parallelizer = copy.deepcopy(parallelizer) all_dist_main_program = [] - ranks = paddle.distributed.get_world_size() if cluster is None else len( - cluster.get_all_devices("GPU")) + ranks = ( + paddle.distributed.get_world_size() + if cluster is None + else len(cluster.get_all_devices("GPU")) + ) for rank_id in range(ranks): used_dist_context = copy.deepcopy(dist_context) used_dist_context._dist_op_context = DistributedOperatorContext() - _, _, dist_startup_program, dist_main_program, _ = copied_parallelizer._get_dist_program( - rank_id, used_dist_context) + ( + _, + _, + dist_startup_program, + dist_main_program, + _, + ) = copied_parallelizer._get_dist_program(rank_id, used_dist_context) all_dist_main_program.append(dist_main_program) return all_dist_main_program class SerialProgramInfo: - - def __init__(self, - train_program, - satrtup_program, - loss, - optimizer, - cluster=None): + def __init__( + self, train_program, satrtup_program, loss, optimizer, cluster=None + ): self._train_program = train_program self._startup_program = satrtup_program self._loss = loss @@ -1442,7 +1616,6 @@ class SerialProgramInfo: def get_standalone_cost_data(distributed_programs): - def _compute_runtime(op_cost, op, vars): runtime = 0 try: @@ -1455,32 +1628,47 @@ def get_standalone_cost_data(distributed_programs): parsed_info = op_config.split("\n") variable = "(Variable)" for info in parsed_info: - variable = "(Variable)" if "(Variable)" in info else "(list" + variable = ( + "(Variable)" if "(Variable)" in info else "(list" + ) if variable in info: - arg_name_lower = info[:info.find(variable) - 1] + arg_name_lower = info[: info.find(variable) - 1] shape_left_boundary = info.find("[") shape_right_boundary = info.find("]") - assert shape_left_boundary > 0 and shape_right_boundary > 0 and shape_right_boundary > shape_left_boundary, "Get shape failed." - shape = info[shape_left_boundary + - 1:shape_right_boundary].split(",") + assert ( + shape_left_boundary > 0 + and shape_right_boundary > 0 + and shape_right_boundary > shape_left_boundary + ), "Get shape failed." + shape = info[ + shape_left_boundary + 1 : shape_right_boundary + ].split(",") shape = list(map(lambda x: int(x.strip()), shape)) dtype_factor = 1 total_static_input_size += reduce(lambda x, y: x * y, shape) if op.type == "c_embedding": - arg_name_lower = "w" if arg_name_lower == "weight" else "ids" + arg_name_lower = ( + "w" if arg_name_lower == "weight" else "ids" + ) for arg_name in op.input_names: if arg_name.lower() == arg_name_lower: for var_name in op.input(arg_name): var = vars[var_name] total_actual_input_size += reduce( - lambda x, y: x * y, var.shape) + lambda x, y: x * y, var.shape + ) break - assert total_static_input_size > 0 and total_actual_input_size > 0, "Get input size failed." + assert ( + total_static_input_size > 0 and total_actual_input_size > 0 + ), "Get input size failed." - actual_runtime = total_actual_input_size / total_static_input_size * runtime + actual_runtime = ( + total_actual_input_size / total_static_input_size * runtime + ) return actual_runtime import paddle.cost_model as cm + cost_model = cm.CostModel() cost_model.static_cost_data() DEFAULT_MULTIPLE = 2 @@ -1491,13 +1679,16 @@ def get_standalone_cost_data(distributed_programs): "reshape2": "reshape", "unsqueeze2": "unsqueeze", "reduce_sum": "sum", - "elementwise_div": "divide" + "elementwise_div": "divide", } standalone_cost_data = [] # skip ops not_enum_ops = [ - "create_py_reader", "create_double_buffer_reader", "read", "assign" + "create_py_reader", + "create_double_buffer_reader", + "read", + "assign", ] for distributed_program in distributed_programs: cost_data = {} @@ -1507,26 +1698,33 @@ def get_standalone_cost_data(distributed_programs): if op.type in not_enum_ops: cost_data[op.desc.id()] = runtime continue - dtype = str(vars[op.input_arg_names[0]].dtype - ) if op.input_arg_names else "float32" + dtype = ( + str(vars[op.input_arg_names[0]].dtype) + if op.input_arg_names + else "float32" + ) if int(op.attr('op_role')) == int(OpRole.Backward): if "_grad" in op.type: forward_op_name = op.type[:-5] if forward_op_name in OP_NAME_MAPPING.keys(): forward_op_name = OP_NAME_MAPPING[forward_op_name] - op_cost = cost_model.get_static_op_time(forward_op_name, - forward=False, - dtype=dtype) + op_cost = cost_model.get_static_op_time( + forward_op_name, forward=False, dtype=dtype + ) if op_cost: runtime = _compute_runtime(op_cost, op, vars) else: - op_cost = cost_model.get_static_op_time(forward_op_name, - dtype=dtype) + op_cost = cost_model.get_static_op_time( + forward_op_name, dtype=dtype + ) if op_cost: runtime = 2 * _compute_runtime(op_cost, op, vars) elif int(op.attr('op_role')) == int(OpRole.Forward): - op_name = OP_NAME_MAPPING[ - op.type] if op.type in OP_NAME_MAPPING.keys() else op.type + op_name = ( + OP_NAME_MAPPING[op.type] + if op.type in OP_NAME_MAPPING.keys() + else op.type + ) op_cost = cost_model.get_static_op_time(op_name) if op_cost: runtime = _compute_runtime(op_cost, op, vars) @@ -1565,7 +1763,8 @@ def to_list(value): def debug_program(program, path, name): filename = os.path.join( - path, name + '_program' + ".%d" % (paddle.distributed.get_rank())) + path, name + '_program' + ".%d" % (paddle.distributed.get_rank()) + ) with open(filename, 'w') as f: f.write(str(program)) @@ -1599,9 +1798,11 @@ def get_lr(optimizer): return optimizer._learning_rate() else: raise TypeError( - "'optimizer' must be object of class `paddle.optimizer.Optimizer`" \ - " or `paddle.fluid.optimizer.Optimizer`, but got {}.".format(type(optimizer)) + "'optimizer' must be object of class `paddle.optimizer.Optimizer`" + " or `paddle.fluid.optimizer.Optimizer`, but got {}.".format( + type(optimizer) ) + ) def initialize_pg_in_full_mode(all_process_groups, cur_rank): @@ -1631,21 +1832,28 @@ def initialize_pg_in_full_mode(all_process_groups, cur_rank): if is_send: recv_rank = process_group.ranks[1] recv_rank_ip, recv_rank_port = genv.trainer_endpoints[ - recv_rank].split(":") + recv_rank + ].split(":") connect_port = int(recv_rank_port) + magic_num - client_socket = socket.socket(socket.AF_INET, - socket.SOCK_STREAM) + client_socket = socket.socket( + socket.AF_INET, socket.SOCK_STREAM + ) client_socket.connect((recv_rank_ip, connect_port)) client_socket.send(str(cur_rank).encode('utf-8')) rank = client_socket.recv(buff_size).decode('utf-8') rank = int(rank) if rank != recv_rank: raise ValueError( - "Please check comm pair, the recv rank should be {} but got {}." - .format(recv_rank, rank)) + "Please check comm pair, the recv rank should be {} but got {}.".format( + recv_rank, rank + ) + ) else: - print("It is able to instantiate {} as sender now.".format( - process_group.ranks)) + print( + "It is able to instantiate {} as sender now.".format( + process_group.ranks + ) + ) client_socket.close() else: send_rank = process_group.ranks[0] @@ -1657,10 +1865,14 @@ def initialize_pg_in_full_mode(all_process_groups, cur_rank): has_recv_by_socket.append(rank) else: client_sockets[send_rank].send( - str(cur_rank).encode("utf-8")) + str(cur_rank).encode("utf-8") + ) client_sockets[send_rank].close() - print("It is able to instantiate {} as recver now.". - format(process_group.ranks)) + print( + "It is able to instantiate {} as recver now.".format( + process_group.ranks + ) + ) break process_group.instantiate() server_socket.close() diff --git a/python/paddle/distributed/cloud_utils.py b/python/paddle/distributed/cloud_utils.py index b186ff64baf55565c4a8ccc8923b4ec3a1a097a5..2e1d85205a538c9338ff1bfbd458d5aaf81825af 100644 --- a/python/paddle/distributed/cloud_utils.py +++ b/python/paddle/distributed/cloud_utils.py @@ -13,7 +13,11 @@ # limitations under the License. import os -from paddle.distributed.utils.launch_utils import get_cluster, get_gpus, get_cluster_from_args +from paddle.distributed.utils.launch_utils import ( + get_cluster, + get_gpus, + get_cluster_from_args, +) from paddle.distributed.utils.launch_utils import logger __all__ = [] @@ -23,7 +27,7 @@ def get_cloud_cluster(args_node_ips, args_node_ip, args_port, selected_devices): """ args_node_ips:string, args_node_ip:string, args_port: int, selected_devices:list """ - #you can automatically get ip info while using paddlecloud multi nodes mode. + # you can automatically get ip info while using paddlecloud multi nodes mode. node_ips = os.getenv("PADDLE_TRAINERS") assert node_ips is not None, "PADDLE_TRAINERS should not be None" @@ -41,16 +45,23 @@ def get_cloud_cluster(args_node_ips, args_node_ip, args_port, selected_devices): node_rank = int(node_rank) if node_ip != "127.0.0.1" and node_ip != args_node_ip: - logger.warning("Please NOTE: When using paddlecloud, node_ip is \ + logger.warning( + "Please NOTE: When using paddlecloud, node_ip is \ automatically got from POD_IP. Your input node_ip: {} doesn't equals to \ -node_ip: {} from paddlecloud environment.".format(args_node_ip, node_ip)) +node_ip: {} from paddlecloud environment.".format( + args_node_ip, node_ip + ) + ) if args_node_ips != "127.0.0.1" and args_node_ips != ",".join(node_ips): logger.warning( "Please NOTE: When using paddlecloud, cluster_node_ips is \ automatically got from PADDLE_TRAINERS(multi nodes) or POD_IP(single node).\ Your input cluster_node_ips: {} doesn't equals to IPs: {} from \ -paddlecloud environment.".format(args_node_ips, node_ips)) +paddlecloud environment.".format( + args_node_ips, node_ips + ) + ) # DISTRIBUTED_TRAINER_ENDPOINTS: new environment since paddlecloud 1.8.4 # e.g: DISTRIBUTED_TRAINER_ENDPOINTS="ip1:port1,ip1:port2,ip1:port3,ip1:port4,ip2:port5,ip2:port6,ip2:port7,ip2:port8" @@ -61,10 +72,13 @@ paddlecloud environment.".format(args_node_ips, node_ips)) try: paddle_port = int(os.getenv("PADDLE_PORT", "")) - if paddle_ports_num >= len( - selected_devices) and paddle_port != args_port: + if ( + paddle_ports_num >= len(selected_devices) + and paddle_port != args_port + ): logger.warning( - "Use Cloud specified port:{}.".format(paddle_port)) + "Use Cloud specified port:{}.".format(paddle_port) + ) started_port = paddle_port except Exception as e: @@ -84,15 +98,21 @@ paddlecloud environment.".format(args_node_ips, node_ips)) assert num_nodes * paddle_ports_num == len(trainer_endpoints_ori) for i in range(num_nodes): trainer_endpoints.append( - trainer_endpoints_ori[i * paddle_ports_num:(i + 1) * - paddle_ports_num]) + trainer_endpoints_ori[ + i * paddle_ports_num : (i + 1) * paddle_ports_num + ] + ) - logger.debug("parsed from args: node_ips:{} \ + logger.debug( + "parsed from args: node_ips:{} \ node_ip:{} node_rank:{} trainer_endpoints:{}".format( - node_ips, node_ip, node_rank, trainer_endpoints)) + node_ips, node_ip, node_rank, trainer_endpoints + ) + ) - cluster, pod = get_cluster(node_ips, node_ip, trainer_endpoints, - selected_devices) + cluster, pod = get_cluster( + node_ips, node_ip, trainer_endpoints, selected_devices + ) return cluster, cluster.pods[node_rank] @@ -104,15 +124,22 @@ def get_cluster_and_pod(args): # parse arguments, used for cloud-single-machine and local selected_devices = get_gpus(args.selected_devices) trainers_num = _get_trainers_num() - logger.debug("parsed from args trainerss_num:{} selected_devices:{}".format( - trainers_num, selected_devices)) + logger.debug( + "parsed from args trainerss_num:{} selected_devices:{}".format( + trainers_num, selected_devices + ) + ) cluster = None pod = None if args.use_paddlecloud and trainers_num != 1: - cluster, pod = get_cloud_cluster(args.cluster_node_ips, args.node_ip, - args.started_port, selected_devices) + cluster, pod = get_cloud_cluster( + args.cluster_node_ips, + args.node_ip, + args.started_port, + selected_devices, + ) logger.info("get cluster from cloud:{}".format(cluster)) else: cluster, pod = get_cluster_from_args(args, selected_devices) diff --git a/python/paddle/distributed/collective.py b/python/paddle/distributed/collective.py index fa1b3e00d47607ab8eabcfa71b54b5d2a259a4ab..77258f7036cbd9a163310fabd8b1abd35c76db37 100644 --- a/python/paddle/distributed/collective.py +++ b/python/paddle/distributed/collective.py @@ -92,8 +92,9 @@ def _get_group_map(): global _group_map if _global_env_gid not in _group_map: genv = _get_global_env() - _group_map[_global_env_gid] = Group(genv.rank, 0, - list(range(genv.world_size))) + _group_map[_global_env_gid] = Group( + genv.rank, 0, list(range(genv.world_size)) + ) return _group_map @@ -108,8 +109,10 @@ def _get_group_map_by_name(): def _get_default_group(): global _group_map_by_name - assert is_initialized(), ("Call paddle.distributed.init_parallel_env first " - "to initialize the distributed environment.") + assert is_initialized(), ( + "Call paddle.distributed.init_parallel_env first " + "to initialize the distributed environment." + ) return _get_group_map_by_name()[_default_group_name] @@ -165,21 +168,23 @@ def get_group(id=0): return gm[id] if id in gm else None -def _new_process_group_impl(backend, - store, - rank, - world_size, - group_name, - pg_options, - group_id=0, - src_rank=None, - dst_rank=None): +def _new_process_group_impl( + backend, + store, + rank, + world_size, + group_name, + pg_options, + group_id=0, + src_rank=None, + dst_rank=None, +): pg = None genv = _get_global_env() if backend != 'heter': assert src_rank is None and dst_rank is None, ( - "src_rank and dst_rank " - "can only be set for heter backend.") + "src_rank and dst_rank " "can only be set for heter backend." + ) assert backend in _valid_backend_list, "Unsupported backend: %s." % backend if backend == "gloo": place = core.CPUPlace() @@ -208,24 +213,27 @@ def _new_process_group_impl(backend, switch_ep = os.getenv("CLUSTER_SWITCH", None) assert switch_ep, "please set the CLUSTER_SWITCH variable." cluster_size_cumsum = np.cumsum(cluster_size) - cluster_offset = 0 if cluster_id == 0 else cluster_size_cumsum[ - cluster_id - 1] + cluster_offset = ( + 0 if cluster_id == 0 else cluster_size_cumsum[cluster_id - 1] + ) global_rank = cluster_offset + rank global_world_size = cluster_size_cumsum[-1] global_rank, global_world_size = _get_global_config(backend, rank) - pg = core.ProcessGroupHeter(store, - rank=global_rank, - world_size=global_world_size, - place=place, - gid=group_id, - local_rank=rank, - local_size=world_size, - gloo_rank=cluster_id, - gloo_size=len(cluster_size), - with_switch=True, - switch_endpoint=switch_ep, - src_rank=src_rank, - dst_rank=dst_rank) + pg = core.ProcessGroupHeter( + store, + rank=global_rank, + world_size=global_world_size, + place=place, + gid=group_id, + local_rank=rank, + local_size=world_size, + gloo_rank=cluster_id, + gloo_size=len(cluster_size), + with_switch=True, + switch_endpoint=switch_ep, + src_rank=src_rank, + dst_rank=dst_rank, + ) return pg @@ -271,10 +279,12 @@ def barrier(group=None): if not isinstance(ring_id, int): raise ValueError("The type of 'group' for barrier must be int.") helper = LayerHelper(op_type, **locals()) - helper.append_op(type=op_type, - inputs={'X': [temp]}, - outputs={'Out': [temp]}, - attrs={'ring_id': ring_id}) + helper.append_op( + type=op_type, + inputs={'X': [temp]}, + outputs={'Out': [temp]}, + attrs={'ring_id': ring_id}, + ) # _custom_gid provides a way for users to @@ -296,7 +306,7 @@ def _barrier_by_tcp_store(group_name, store, timeout): return barrier_prefix = "Barrier/" + group_name + "/" - is_master = (global_rank == 0) + is_master = global_rank == 0 def _check_keys_ready(wait_keys): start_time = time.time() @@ -309,9 +319,12 @@ def _barrier_by_tcp_store(group_name, store, timeout): "Keys {} are not ready sinck rank {} is waiting them." "Two reason may cause this error:\n 1. The create process group api should be called by all ranks.\n" " 2. Try to increase the waiting time.\n".format( - group_name, wait_keys, global_rank)) + group_name, wait_keys, global_rank + ) + ) wait_keys = list( - filter(lambda key: int(store.get(key)) != 1, wait_keys)) + filter(lambda key: int(store.get(key)) != 1, wait_keys) + ) # all the workers set their exiting key and exit # the master will wait for all workers' exiting key, ensure to exit in the end @@ -363,22 +376,25 @@ def new_group(ranks=None, backend=None, timeout=_default_timeout): ranks = global_ranks assert len(ranks) <= len(global_ranks), ( "Size of new group must be less than or " - "equal to that of the default global group.") + "equal to that of the default global group." + ) size = len(ranks) ranks = sorted(ranks) if backend == 'heter' or (size > 1 and global_rank in ranks): rank = 0 if backend == 'heter' else ranks.index(global_rank) src_rank = ranks[0] if backend == 'heter' else None dst_rank = ranks[1] if backend == 'heter' else None - pg = _new_process_group_impl(backend, - _default_store, - rank, - size, - group_name, - pg_options=None, - group_id=gid, - src_rank=src_rank, - dst_rank=dst_rank) + pg = _new_process_group_impl( + backend, + _default_store, + rank, + size, + group_name, + pg_options=None, + group_id=gid, + src_rank=src_rank, + dst_rank=dst_rank, + ) else: rank = -1 pg = None @@ -386,7 +402,7 @@ def new_group(ranks=None, backend=None, timeout=_default_timeout): _group_map_by_name[group_name] = group _group_map[gid] = group _group_map_backend[group] = backend - #TODO: The method below is a new method for group management, will replace the previous + # TODO: The method below is a new method for group management, will replace the previous # three in the future. _add_new_group(group) @@ -402,7 +418,7 @@ def new_group(ranks=None, backend=None, timeout=_default_timeout): if not backend: backend = 'nccl' - assert backend == 'nccl', ("backend other than nccl is not supported yet") + assert backend == 'nccl', "backend other than nccl is not supported yet" genv = _get_global_env() global_rank = genv.rank @@ -431,30 +447,36 @@ def new_group(ranks=None, backend=None, timeout=_default_timeout): if core.is_compiled_with_cuda(): place = core.CUDAPlace(genv.device_id) - core.NCCLParallelContext(strategy, - place).init_with_ring_id(ring_id) + core.NCCLParallelContext(strategy, place).init_with_ring_id( + ring_id + ) elif core.is_compiled_with_npu(): place = core.NPUPlace(genv.device_id) - core.HCCLParallelContext(strategy, - place).init_with_ring_id(ring_id) + core.HCCLParallelContext(strategy, place).init_with_ring_id( + ring_id + ) elif core.is_compiled_with_mlu(): place = core.MLUPlace(genv.device_id) - core.CNCLParallelContext(strategy, - place).init_with_ring_id(ring_id) + core.CNCLParallelContext(strategy, place).init_with_ring_id( + ring_id + ) elif core.is_compiled_with_xpu(): place = core.XPUPlace(genv.device_id) - core.BKCLParallelContext(strategy, - place).init_with_ring_id(ring_id) + core.BKCLParallelContext(strategy, place).init_with_ring_id( + ring_id + ) else: - assert False, ("no cuda device found") + assert False, "no cuda device found" else: return gp # TODO(shenliang03): This is a temporary solution to solve the problem of # hang caused by cross-creation of new_group - tmp = paddle.to_tensor( - [1], dtype="int32") if _non_static_mode() else fill_constant( - [0], dtype="int32", value="1") + tmp = ( + paddle.to_tensor([1], dtype="int32") + if _non_static_mode() + else fill_constant([0], dtype="int32", value="1") + ) paddle.distributed.all_reduce(tmp, sync_op=True) paddle.distributed.wait(tmp) return gp @@ -586,8 +608,9 @@ def _sync_calc_stream(tensor): def _sync_comm_stream(tensor, ring_id=0): if _non_static_mode(): - return _legacy_C_ops.c_sync_comm_stream([tensor], [tensor], 'ring_id', - ring_id) + return _legacy_C_ops.c_sync_comm_stream( + [tensor], [tensor], 'ring_id', ring_id + ) op_type = 'c_sync_comm_stream' @@ -648,7 +671,7 @@ def broadcast(tensor, src, group=None, sync_op=True): if in_dygraph_mode(): group = _get_default_group() if group is None else group gsrc = group.get_group_rank(src) - assert gsrc >= 0, ("src rank out of group, need global rank") + assert gsrc >= 0, "src rank out of group, need global rank" task = group.process_group.broadcast(tensor, gsrc) if sync_op: task.wait() @@ -659,28 +682,48 @@ def broadcast(tensor, src, group=None, sync_op=True): use_calc_stream = sync_op ring_id = ring_id = 0 if group is None else group.id gsrc = src if group is None else group.get_group_rank(src) - assert gsrc >= 0, ("src rank out of group, need global rank") + assert gsrc >= 0, "src rank out of group, need global rank" if _non_static_mode(): - return _legacy_C_ops.c_broadcast(tensor, tensor, 'root', gsrc, - 'use_calc_stream', use_calc_stream, - 'ring_id', ring_id) + return _legacy_C_ops.c_broadcast( + tensor, + tensor, + 'root', + gsrc, + 'use_calc_stream', + use_calc_stream, + 'ring_id', + ring_id, + ) op_type = 'c_broadcast' - check_variable_and_dtype(tensor, 'tensor', [ - 'float16', 'float32', 'float64', 'int32', 'int64', 'int8', 'uint8', - 'bool' - ], 'broadcast') + check_variable_and_dtype( + tensor, + 'tensor', + [ + 'float16', + 'float32', + 'float64', + 'int32', + 'int64', + 'int8', + 'uint8', + 'bool', + ], + 'broadcast', + ) helper = LayerHelper(op_type, **locals()) - helper.append_op(type=op_type, - inputs={'X': [tensor]}, - outputs={'Out': [tensor]}, - attrs={ - 'root': gsrc, - 'use_calc_stream': use_calc_stream, - 'ring_id': ring_id, - }) + helper.append_op( + type=op_type, + inputs={'X': [tensor]}, + outputs={'Out': [tensor]}, + attrs={ + 'root': gsrc, + 'use_calc_stream': use_calc_stream, + 'ring_id': ring_id, + }, + ) def reduce(tensor, dst, op=ReduceOp.SUM, group=None, sync_op=True): @@ -730,7 +773,7 @@ def reduce(tensor, dst, op=ReduceOp.SUM, group=None, sync_op=True): op_type = _get_reduce_op(op, "reduce") group = _get_default_group() if group is None else group gdst = group.get_group_rank(dst) - assert gdst >= 0, ("dst rank out of group, need global rank") + assert gdst >= 0, "dst rank out of group, need global rank" task = group.process_group.reduce(tensor, gdst, op_type) if sync_op: task.wait() @@ -741,34 +784,72 @@ def reduce(tensor, dst, op=ReduceOp.SUM, group=None, sync_op=True): use_calc_stream = sync_op ring_id = 0 if group is None else group.id gdst = dst if group is None else group.get_group_rank(dst) - assert gdst >= 0, ("dst rank out of group, need global rank") + assert gdst >= 0, "dst rank out of group, need global rank" if _non_static_mode(): if op == ReduceOp.SUM: - return _legacy_C_ops.c_reduce_sum(tensor, tensor, 'use_calc_stream', - use_calc_stream, 'ring_id', - ring_id, 'root_id', gdst) + return _legacy_C_ops.c_reduce_sum( + tensor, + tensor, + 'use_calc_stream', + use_calc_stream, + 'ring_id', + ring_id, + 'root_id', + gdst, + ) elif op == ReduceOp.MAX: - return _legacy_C_ops.c_reduce_max(tensor, tensor, 'use_calc_stream', - use_calc_stream, 'ring_id', - ring_id, 'root_id', gdst) + return _legacy_C_ops.c_reduce_max( + tensor, + tensor, + 'use_calc_stream', + use_calc_stream, + 'ring_id', + ring_id, + 'root_id', + gdst, + ) elif op == ReduceOp.MIN: - return _legacy_C_ops.c_reduce_min(tensor, tensor, 'use_calc_stream', - use_calc_stream, 'ring_id', - ring_id, 'root_id', gdst) + return _legacy_C_ops.c_reduce_min( + tensor, + tensor, + 'use_calc_stream', + use_calc_stream, + 'ring_id', + ring_id, + 'root_id', + gdst, + ) elif op == ReduceOp.PROD: - return _legacy_C_ops.c_reduce_prod(tensor, tensor, - 'use_calc_stream', - use_calc_stream, 'ring_id', - ring_id, 'root_id', gdst) + return _legacy_C_ops.c_reduce_prod( + tensor, + tensor, + 'use_calc_stream', + use_calc_stream, + 'ring_id', + ring_id, + 'root_id', + gdst, + ) else: raise ValueError("Unknown parameter: {}.".format(op)) op_type = 'c_reduce' - check_variable_and_dtype(tensor, 'tensor', [ - 'float16', 'float32', 'float64', 'int32', 'int64', 'int8', 'uint8', - 'bool' - ], 'reduce') + check_variable_and_dtype( + tensor, + 'tensor', + [ + 'float16', + 'float32', + 'float64', + 'int32', + 'int64', + 'int8', + 'uint8', + 'bool', + ], + 'reduce', + ) if op == ReduceOp.SUM: op_type = 'c_reduce_sum' @@ -780,14 +861,16 @@ def reduce(tensor, dst, op=ReduceOp.SUM, group=None, sync_op=True): op_type = 'c_reduce_prod' helper = LayerHelper(op_type, **locals()) - helper.append_op(type=op_type, - inputs={'X': [tensor]}, - outputs={'Out': [tensor]}, - attrs={ - 'ring_id': ring_id, - 'use_calc_stream': use_calc_stream, - 'root_id': gdst, - }) + helper.append_op( + type=op_type, + inputs={'X': [tensor]}, + outputs={'Out': [tensor]}, + attrs={ + 'ring_id': ring_id, + 'use_calc_stream': use_calc_stream, + 'root_id': gdst, + }, + ) def all_gather(tensor_list, tensor, group=None, sync_op=True): @@ -840,8 +923,9 @@ def all_gather(tensor_list, tensor, group=None, sync_op=True): list_of_complex.append(paddle.as_complex(tensor)) return list_of_complex - is_input_complex = (tensor.dtype == paddle.complex64 - or tensor.dtype == paddle.complex128) + is_input_complex = ( + tensor.dtype == paddle.complex64 or tensor.dtype == paddle.complex128 + ) if is_input_complex: tensor = paddle.as_real(tensor) @@ -868,33 +952,68 @@ def all_gather(tensor_list, tensor, group=None, sync_op=True): nranks = _get_global_group().nranks if group is None else group.nranks if _non_static_mode(): - out = _legacy_C_ops.c_allgather(tensor, 'use_calc_stream', - use_calc_stream, 'ring_id', ring_id, - 'nranks', nranks) + out = _legacy_C_ops.c_allgather( + tensor, + 'use_calc_stream', + use_calc_stream, + 'ring_id', + ring_id, + 'nranks', + nranks, + ) else: op_type = 'c_allgather' helper = LayerHelper(op_type, **locals()) out = helper.create_variable_for_type_inference(dtype=tensor.dtype) if not isinstance(tensor_list, list): - raise ValueError("The type of 'tensor_list' for all_gather " - "should be list.") + raise ValueError( + "The type of 'tensor_list' for all_gather " "should be list." + ) for elem in tensor_list: - check_variable_and_dtype(elem, 'tensor_list', [ - 'float16', 'float32', 'float64', 'int32', 'int64', 'bool', - 'int8', 'uint8', 'complex64', 'complex128' - ], 'all_gather') - check_variable_and_dtype(tensor, 'tensor', [ - 'float16', 'float32', 'float64', 'int32', 'int64', 'bool', 'int8', - 'uint8', 'complex64', 'complex128' - ], 'all_gather') - helper.append_op(type=op_type, - inputs={'X': [tensor]}, - outputs={'Out': [out]}, - attrs={ - 'ring_id': ring_id, - 'use_calc_stream': use_calc_stream, - 'nranks': nranks - }) + check_variable_and_dtype( + elem, + 'tensor_list', + [ + 'float16', + 'float32', + 'float64', + 'int32', + 'int64', + 'bool', + 'int8', + 'uint8', + 'complex64', + 'complex128', + ], + 'all_gather', + ) + check_variable_and_dtype( + tensor, + 'tensor', + [ + 'float16', + 'float32', + 'float64', + 'int32', + 'int64', + 'bool', + 'int8', + 'uint8', + 'complex64', + 'complex128', + ], + 'all_gather', + ) + helper.append_op( + type=op_type, + inputs={'X': [tensor]}, + outputs={'Out': [out]}, + attrs={ + 'ring_id': ring_id, + 'use_calc_stream': use_calc_stream, + 'nranks': nranks, + }, + ) list_of_tensor = paddle.split(out, nranks, 0) if is_input_complex: @@ -950,7 +1069,8 @@ def all_gather_object(object_list, obj, group=None): print(object_list) # [{'foo': [1, 2, 3]}, {'bar': [4, 5, 6]}] (2 GPUs) """ - assert in_dygraph_mode( + assert ( + in_dygraph_mode() ), "all_gather_object doesn't support static graph mode." tensor, len_of_tensor = _convert_object_to_tensor(obj) @@ -971,7 +1091,8 @@ def all_gather_object(object_list, obj, group=None): all_gather(tensor_list, input_tensor, group) for i, tensor in enumerate(tensor_list): object_list.append( - _convert_tensor_to_object(tensor, list_len_of_tensor[i])) + _convert_tensor_to_object(tensor, list_len_of_tensor[i]) + ) def scatter(tensor, tensor_list=None, src=0, group=None, sync_op=True): @@ -1033,7 +1154,7 @@ def scatter(tensor, tensor_list=None, src=0, group=None, sync_op=True): gsrc = src if group is None else group.get_group_rank(src) rank = _get_global_group().rank if group is None else group.rank nranks = _get_global_group().nranks if group is None else group.nranks - assert gsrc >= 0, ("src rank out of group, need global rank") + assert gsrc >= 0, "src rank out of group, need global rank" if rank != gsrc: tensor_list = [] @@ -1050,24 +1171,46 @@ def scatter(tensor, tensor_list=None, src=0, group=None, sync_op=True): use_calc_stream = sync_op if _non_static_mode(): - return _legacy_C_ops.c_scatter(temp, tensor, 'use_calc_stream', - use_calc_stream, 'ring_id', ring_id, - 'nranks', nranks, 'root', gsrc) + return _legacy_C_ops.c_scatter( + temp, + tensor, + 'use_calc_stream', + use_calc_stream, + 'ring_id', + ring_id, + 'nranks', + nranks, + 'root', + gsrc, + ) op_type = 'c_scatter' - check_variable_and_dtype(tensor, 'tensor', [ - 'float16', 'float32', 'float64', 'int32', 'int64', 'int8', 'uint8', - 'bool' - ], 'scatter') + check_variable_and_dtype( + tensor, + 'tensor', + [ + 'float16', + 'float32', + 'float64', + 'int32', + 'int64', + 'int8', + 'uint8', + 'bool', + ], + 'scatter', + ) helper = LayerHelper(op_type, **locals()) - helper.append_op(type=op_type, - inputs={'X': [temp]}, - outputs={'Out': [tensor]}, - attrs={ - 'ring_id': ring_id, - 'root': gsrc, - 'use_calc_stream': use_calc_stream, - 'nranks': nranks, - }) + helper.append_op( + type=op_type, + inputs={'X': [temp]}, + outputs={'Out': [tensor]}, + attrs={ + 'ring_id': ring_id, + 'root': gsrc, + 'use_calc_stream': use_calc_stream, + 'nranks': nranks, + }, + ) def alltoall(in_tensor_list, out_tensor_list, group=None, sync_op=True): @@ -1119,7 +1262,7 @@ def alltoall(in_tensor_list, out_tensor_list, group=None, sync_op=True): if in_dygraph_mode(): group = _get_default_group() if group is None else group backend = _group_map_backend[group] - assert backend != 'gloo', ("backend gloo is not supported yet") + assert backend != 'gloo', "backend gloo is not supported yet" else: ring_id = 0 if group is None else group.id @@ -1140,44 +1283,56 @@ def alltoall(in_tensor_list, out_tensor_list, group=None, sync_op=True): use_calc_stream = sync_op if _non_static_mode(): - out = _legacy_C_ops.alltoall(temp, 'use_calc_stream', use_calc_stream, - 'ring_id', ring_id) + out = _legacy_C_ops.alltoall( + temp, 'use_calc_stream', use_calc_stream, 'ring_id', ring_id + ) else: op_type = 'alltoall' helper = LayerHelper(op_type, **locals()) out = helper.create_variable_for_type_inference( - dtype=in_tensor_list[0].dtype) + dtype=in_tensor_list[0].dtype + ) if not isinstance(in_tensor_list, list): - raise ValueError("The type of 'in_tensor_list' for all_to_all " - "should be list.") + raise ValueError( + "The type of 'in_tensor_list' for all_to_all " "should be list." + ) for elem in in_tensor_list: check_variable_and_dtype( - elem, 'in_tensor_list', + elem, + 'in_tensor_list', ['float16', 'float32', 'float64', 'int32', 'int64'], - 'all_to_all') + 'all_to_all', + ) if not isinstance(out_tensor_list, list): - raise ValueError("The type of 'out_tensor_list' for all_to_all " - "should be list.") + raise ValueError( + "The type of 'out_tensor_list' for all_to_all " + "should be list." + ) if len(out_tensor_list) != 0: - raise ValueError("The 'out_tensor_list' for all_to_all " - "must be an empty list.") - helper.append_op(type=op_type, - inputs={'X': [temp]}, - outputs={'Out': [out]}, - attrs={ - 'ring_id': ring_id, - 'use_calc_stream': use_calc_stream, - }) + raise ValueError( + "The 'out_tensor_list' for all_to_all " "must be an empty list." + ) + helper.append_op( + type=op_type, + inputs={'X': [temp]}, + outputs={'Out': [out]}, + attrs={ + 'ring_id': ring_id, + 'use_calc_stream': use_calc_stream, + }, + ) out_tensor_list.extend(paddle.split(out, nranks, 0)) -def alltoall_single(in_tensor, - out_tensor, - in_split_sizes=None, - out_split_sizes=None, - group=None, - sync_op=True): +def alltoall_single( + in_tensor, + out_tensor, + in_split_sizes=None, + out_split_sizes=None, + group=None, + sync_op=True, +): """ Scatter a single input tensor to all participators and gather the received tensors in out_tensor. @@ -1250,13 +1405,14 @@ def alltoall_single(in_tensor, group = _get_default_group() if group is None else group backend = _group_map_backend[group] - assert backend != 'gloo', ("backend gloo is not supported yet") + assert backend != 'gloo', "backend gloo is not supported yet" in_split_sizes = [] if in_split_sizes is None else in_split_sizes out_split_sizes = [] if out_split_sizes is None else out_split_sizes - task = group.process_group.alltoall_single(in_tensor, out_tensor, - in_split_sizes, out_split_sizes) + task = group.process_group.alltoall_single( + in_tensor, out_tensor, in_split_sizes, out_split_sizes + ) if sync_op: task.wait() return @@ -1305,7 +1461,7 @@ def send(tensor, dst=0, group=None, sync_op=True): if in_dygraph_mode(): group = _get_default_group() if group is None else group backend = _group_map_backend[group] - assert backend != 'gloo', ("backend gloo is not supported yet") + assert backend != 'gloo', "backend gloo is not supported yet" task = group.process_group.send(tensor, dst) if sync_op: task.wait() @@ -1317,21 +1473,33 @@ def send(tensor, dst=0, group=None, sync_op=True): ring_id = 0 if group is None else group.id if _non_static_mode(): - return _legacy_C_ops.send_v2(tensor, 'use_calc_stream', use_calc_stream, - 'ring_id', ring_id, 'peer', dst) + return _legacy_C_ops.send_v2( + tensor, + 'use_calc_stream', + use_calc_stream, + 'ring_id', + ring_id, + 'peer', + dst, + ) op_type = 'send_v2' check_variable_and_dtype( - tensor, 'tensor', ['float16', 'float32', 'float64', 'int32', 'int64'], - 'send') + tensor, + 'tensor', + ['float16', 'float32', 'float64', 'int32', 'int64'], + 'send', + ) helper = LayerHelper(op_type, **locals()) - helper.append_op(type=op_type, - inputs={'X': [tensor]}, - attrs={ - 'ring_id': ring_id, - 'peer': dst, - 'use_calc_stream': use_calc_stream, - }) + helper.append_op( + type=op_type, + inputs={'X': [tensor]}, + attrs={ + 'ring_id': ring_id, + 'peer': dst, + 'use_calc_stream': use_calc_stream, + }, + ) def recv(tensor, src=0, group=None, sync_op=True): @@ -1372,7 +1540,7 @@ def recv(tensor, src=0, group=None, sync_op=True): if in_dygraph_mode(): group = _get_default_group() if group is None else group backend = _group_map_backend[group] - assert backend != 'gloo', ("backend gloo is not supported yet") + assert backend != 'gloo', "backend gloo is not supported yet" task = group.process_group.recv(tensor, src) if sync_op: task.wait() @@ -1384,37 +1552,58 @@ def recv(tensor, src=0, group=None, sync_op=True): ring_id = 0 if group is None else group.id if _non_static_mode(): - return _legacy_C_ops.recv_v2(tensor, 'use_calc_stream', use_calc_stream, - 'ring_id', ring_id, 'peer', src, 'dtype', - tensor.dtype, 'out_shape', tensor.shape) + return _legacy_C_ops.recv_v2( + tensor, + 'use_calc_stream', + use_calc_stream, + 'ring_id', + ring_id, + 'peer', + src, + 'dtype', + tensor.dtype, + 'out_shape', + tensor.shape, + ) op_type = 'recv_v2' check_variable_and_dtype( - tensor, 'tensor', ['float16', 'float32', 'float64', 'int32', 'int64'], - 'recv') + tensor, + 'tensor', + ['float16', 'float32', 'float64', 'int32', 'int64'], + 'recv', + ) helper = LayerHelper(op_type, **locals()) - helper.append_op(type=op_type, - outputs={'Out': [tensor]}, - attrs={ - 'ring_id': ring_id, - 'peer': src, - 'out_shape': tensor.shape, - 'dtype': tensor.dtype, - 'use_calc_stream': use_calc_stream, - }) + helper.append_op( + type=op_type, + outputs={'Out': [tensor]}, + attrs={ + 'ring_id': ring_id, + 'peer': src, + 'out_shape': tensor.shape, + 'dtype': tensor.dtype, + 'use_calc_stream': use_calc_stream, + }, + ) def _check_single_tensor(tensor, tensor_name): if not isinstance(tensor, (core.eager.Tensor, paddle.Tensor)): - raise RuntimeError("Invalid function argument. Expected parameter {}" - "to be of type paddle.Tensor, but it's {}".format( - tensor_name, type(tensor))) + raise RuntimeError( + "Invalid function argument. Expected parameter {}" + "to be of type paddle.Tensor, but it's {}".format( + tensor_name, type(tensor) + ) + ) def _check_tensor_list(tensor_list, tensor_name): - if not isinstance(tensor_list, list) or \ - not all(isinstance(t, (core.eager.Tensor, paddle.Tensor)) for t in tensor_list): - raise RuntimeError("Invalid function argument. Expected parameter {}" - "to be of type paddle.Tensor".format(tensor_name)) + if not isinstance(tensor_list, list) or not all( + isinstance(t, (core.eager.Tensor, paddle.Tensor)) for t in tensor_list + ): + raise RuntimeError( + "Invalid function argument. Expected parameter {}" + "to be of type paddle.Tensor".format(tensor_name) + ) def isend(tensor, dst, group=None): @@ -1459,9 +1648,9 @@ def isend(tensor, dst, group=None): if in_dygraph_mode(): group = _get_default_group() if group is None else group backend = _group_map_backend[group] - assert backend != 'gloo', ("backend gloo is not supported yet") + assert backend != 'gloo', "backend gloo is not supported yet" group_dst_rank = group.get_group_rank(dst) - assert group_dst_rank >= 0, ("dst rank out of group, need global rank") + assert group_dst_rank >= 0, "dst rank out of group, need global rank" return group.process_group.send(tensor, group_dst_rank) else: raise RuntimeError("Only support eager dygraph mode.") @@ -1508,9 +1697,9 @@ def irecv(tensor, src=None, group=None): if in_dygraph_mode(): group = _get_default_group() if group is None else group backend = _group_map_backend[group] - assert backend != 'gloo', ("backend gloo is not supported yet") + assert backend != 'gloo', "backend gloo is not supported yet" group_src_rank = group.get_group_rank(src) - assert group_src_rank >= 0, ("src rank out of group, need global rank") + assert group_src_rank >= 0, "src rank out of group, need global rank" return group.process_group.recv(tensor, group_src_rank) else: raise RuntimeError("Only support eager dygraph mode.") @@ -1536,9 +1725,11 @@ class P2POp(object): def __init__(self, op, tensor, peer, group=None): if op not in [isend, irecv]: - raise RuntimeError("Invalid ``op`` function. Expected ``op`` " - "to be of type ``paddle.distributed.isend`` or " - "``paddle.distributed.irecv``.") + raise RuntimeError( + "Invalid ``op`` function. Expected ``op`` " + "to be of type ``paddle.distributed.isend`` or " + "``paddle.distributed.irecv``." + ) _check_single_tensor(tensor, "tensor") self.op = op @@ -1564,13 +1755,17 @@ def _check_p2p_op_list(p2p_op_list): all ops use the same backend. """ if not isinstance(p2p_op_list, list) or not all( - isinstance(p2p_op, P2POp) for p2p_op in p2p_op_list): - raise RuntimeError("Invalid ``p2p_op_list``. Each op is expected to " - "to be of type ``paddle.distributed.P2POp``.") + isinstance(p2p_op, P2POp) for p2p_op in p2p_op_list + ): + raise RuntimeError( + "Invalid ``p2p_op_list``. Each op is expected to " + "to be of type ``paddle.distributed.P2POp``." + ) backend = _group_map_backend[p2p_op_list[0].group] - if not all(backend == _group_map_backend[p2p_op.group] - for p2p_op in p2p_op_list): + if not all( + backend == _group_map_backend[p2p_op.group] for p2p_op in p2p_op_list + ): raise RuntimeError("All groups need to use the same backend.") @@ -1647,11 +1842,9 @@ def batch_isend_irecv(p2p_op_list): raise RuntimeError("Don't support static graph mode currently.") -def reduce_scatter(tensor, - tensor_list, - op=ReduceOp.SUM, - group=None, - sync_op=True): +def reduce_scatter( + tensor, tensor_list, op=ReduceOp.SUM, group=None, sync_op=True +): """ Reduces, then scatters a list of tensors to all processes in a group @@ -1702,7 +1895,7 @@ def reduce_scatter(tensor, op_type = _get_reduce_op(op, "reduce_scatter") group = _get_default_group() if group is None else group backend = _group_map_backend[group] - assert backend != 'gloo', ("backend gloo is not supported yet") + assert backend != 'gloo', "backend gloo is not supported yet" temp = paddle.concat(tensor_list, axis=0) task = group.process_group._reduce_scatter_base(tensor, temp, op_type) @@ -1715,11 +1908,9 @@ def reduce_scatter(tensor, raise RuntimeError("Don't support static graph mode currently.") -def _reduce_scatter_base(output, - input, - op=ReduceOp.SUM, - group=None, - sync_op=True): +def _reduce_scatter_base( + output, input, op=ReduceOp.SUM, group=None, sync_op=True +): """ Reduces, then scatters a flattened tensor to all processes in a group. diff --git a/python/paddle/distributed/communication/all_reduce.py b/python/paddle/distributed/communication/all_reduce.py index 737e0cbbfb56c070687fa18b9df30df0cec890d0..7a09d779e8fe72340edb4b6eba468a78b7947a28 100644 --- a/python/paddle/distributed/communication/all_reduce.py +++ b/python/paddle/distributed/communication/all_reduce.py @@ -58,30 +58,28 @@ def all_reduce(tensor, op=ReduceOp.SUM, group=None, sync_op=True): # [[5, 7, 9], [5, 7, 9]] (2 GPUs) """ if not framework._in_legacy_dygraph(): - return stream.all_reduce(tensor, - op=op, - group=group, - sync_op=sync_op, - use_calc_stream=False) + return stream.all_reduce( + tensor, op=op, group=group, sync_op=sync_op, use_calc_stream=False + ) # code below will be removed after we remove the old dygraph use_calc_stream = sync_op ring_id = 0 if group is None else group.id if op == ReduceOp.SUM: - return paddle._legacy_C_ops.c_allreduce_sum_(tensor, 'use_calc_stream', - use_calc_stream, 'ring_id', - ring_id) + return paddle._legacy_C_ops.c_allreduce_sum_( + tensor, 'use_calc_stream', use_calc_stream, 'ring_id', ring_id + ) elif op == ReduceOp.MAX: - return paddle._legacy_C_ops.c_allreduce_max_(tensor, 'use_calc_stream', - use_calc_stream, 'ring_id', - ring_id) + return paddle._legacy_C_ops.c_allreduce_max_( + tensor, 'use_calc_stream', use_calc_stream, 'ring_id', ring_id + ) elif op == ReduceOp.MIN: - return paddle._legacy_C_ops.c_allreduce_min_(tensor, 'use_calc_stream', - use_calc_stream, 'ring_id', - ring_id) + return paddle._legacy_C_ops.c_allreduce_min_( + tensor, 'use_calc_stream', use_calc_stream, 'ring_id', ring_id + ) elif op == ReduceOp.PROD: - return paddle._legacy_C_ops.c_allreduce_prod_(tensor, 'use_calc_stream', - use_calc_stream, - 'ring_id', ring_id) + return paddle._legacy_C_ops.c_allreduce_prod_( + tensor, 'use_calc_stream', use_calc_stream, 'ring_id', ring_id + ) else: raise ValueError("Unknown parameter: {}.".format(op)) diff --git a/python/paddle/distributed/communication/group.py b/python/paddle/distributed/communication/group.py index 6b4e545b245d1e987fedcd62e3331a96354930d6..60f1264fe8088cf05edbe2ee8a541b99216d9430 100644 --- a/python/paddle/distributed/communication/group.py +++ b/python/paddle/distributed/communication/group.py @@ -13,7 +13,7 @@ # limitations under the License. -class Group(): +class Group: """ The abstract representation of group. """ @@ -69,14 +69,15 @@ class Group(): def __repr__(self): debug_str = "rank: {}, nranks: {}, id: {}, ranks: ".format( - self.rank, self.nranks, self.id) + self.rank, self.nranks, self.id + ) debug_str += ", ".join(map(str, self.ranks)) debug_str += "; name: " debug_str += self.name if self.name else "None" return debug_str -class _GroupManager(): +class _GroupManager: global_group_id = 0 group_map_by_id = {} @@ -89,6 +90,7 @@ def _get_global_group(): def _add_new_group(group): if group.id in _GroupManager.group_map_by_id: - raise RuntimeError("The group with id {} already exist.".format( - group.id)) + raise RuntimeError( + "The group with id {} already exist.".format(group.id) + ) _GroupManager.group_map_by_id[group.id] = group diff --git a/python/paddle/distributed/communication/reduce.py b/python/paddle/distributed/communication/reduce.py index 5caa5bebedfd8115f34252c3f490b49c32131778..8628e83b626ef161520bc114bc3e2bb3acf15a6e 100644 --- a/python/paddle/distributed/communication/reduce.py +++ b/python/paddle/distributed/communication/reduce.py @@ -46,6 +46,7 @@ class ReduceOp: print(data) # [[5, 7, 9], [5, 7, 9]] (2 GPUs) """ + SUM = 0 MAX = 1 MIN = 2 diff --git a/python/paddle/distributed/communication/stream/__init__.py b/python/paddle/distributed/communication/stream/__init__.py index 43952ce5541a339f817e67237db5fc0a099baca3..63ec858557b3b360db736304c22b8026dd007352 100644 --- a/python/paddle/distributed/communication/stream/__init__.py +++ b/python/paddle/distributed/communication/stream/__init__.py @@ -24,6 +24,14 @@ from .scatter import scatter from .send import send __all__ = [ - "all_gather", "all_reduce", "alltoall", "alltoall_single", "broadcast", - "reduce", "reduce_scatter", "recv", "scatter", "send" + "all_gather", + "all_reduce", + "alltoall", + "alltoall_single", + "broadcast", + "reduce", + "reduce_scatter", + "recv", + "scatter", + "send", ] diff --git a/python/paddle/distributed/communication/stream/all_gather.py b/python/paddle/distributed/communication/stream/all_gather.py index 9eb961cda171d42bb38c84ae16067dd65e1e5e69..cdd7f98554b3a3db9d8f3dd4a5f228aa6984507b 100644 --- a/python/paddle/distributed/communication/stream/all_gather.py +++ b/python/paddle/distributed/communication/stream/all_gather.py @@ -27,33 +27,39 @@ def _check_tensor_shape(tensor, shape, nranks=1): def _check_tensor_list_shape(tensor_list, shape, nranks=1): if len(tensor_list) != nranks: raise RuntimeError( - 'The tensor_list for all_gather is not correctly-sized.') + 'The tensor_list for all_gather is not correctly-sized.' + ) for tensor in tensor_list: if tensor.shape != shape: raise RuntimeError( - 'The tensor_list for all_gather is not correctly-sized.') + 'The tensor_list for all_gather is not correctly-sized.' + ) -def _all_gather_into_tensor_in_dygraph(out_tensor, in_tensor, group, sync_op, - use_calc_stream): +def _all_gather_into_tensor_in_dygraph( + out_tensor, in_tensor, group, sync_op, use_calc_stream +): group = collective._get_default_group() if group is None else group _check_tensor_shape(out_tensor, in_tensor.shape, group.nranks) if use_calc_stream: return group.process_group.allgather_into_tensor_on_calc_stream( - in_tensor, out_tensor) + in_tensor, out_tensor + ) - task = group.process_group.allgather_into_tensor(in_tensor, out_tensor, - sync_op) + task = group.process_group.allgather_into_tensor( + in_tensor, out_tensor, sync_op + ) if sync_op: task.wait() return task -def _all_gather_in_dygraph(tensor_list, tensor, group, sync_op, - use_calc_stream): +def _all_gather_in_dygraph( + tensor_list, tensor, group, sync_op, use_calc_stream +): group = collective._get_default_group() if group is None else group if len(tensor_list) == 0: @@ -71,11 +77,13 @@ def _all_gather_in_dygraph(tensor_list, tensor, group, sync_op, return task -def all_gather(tensor_or_tensor_list, - tensor, - group=None, - sync_op=True, - use_calc_stream=False): +def all_gather( + tensor_or_tensor_list, + tensor, + group=None, + sync_op=True, + use_calc_stream=False, +): """ Gather tensors across devices to a correctly-sized tensor or a tensor list. @@ -122,16 +130,18 @@ def all_gather(tensor_or_tensor_list, if not sync_op and use_calc_stream: raise RuntimeError( - "use_calc_stream can only be true in sync op behavior.") + "use_calc_stream can only be true in sync op behavior." + ) if framework.in_dygraph_mode(): if paddle.is_tensor(tensor_or_tensor_list): - return _all_gather_into_tensor_in_dygraph(tensor_or_tensor_list, - tensor, group, sync_op, - use_calc_stream) + return _all_gather_into_tensor_in_dygraph( + tensor_or_tensor_list, tensor, group, sync_op, use_calc_stream + ) else: - return _all_gather_in_dygraph(tensor_or_tensor_list, tensor, group, - sync_op, use_calc_stream) + return _all_gather_in_dygraph( + tensor_or_tensor_list, tensor, group, sync_op, use_calc_stream + ) raise RuntimeError( "paddle.distributed.stream.all_gather is only supported in dygraph mode now." diff --git a/python/paddle/distributed/communication/stream/all_reduce.py b/python/paddle/distributed/communication/stream/all_reduce.py index 0ba161a078ab89e82631aedd623d7934f160d3df..97eda9d0f9b4429a3fb46c1b8ef0c0f615fe12a2 100644 --- a/python/paddle/distributed/communication/stream/all_reduce.py +++ b/python/paddle/distributed/communication/stream/all_reduce.py @@ -34,10 +34,21 @@ def _all_reduce_in_dygraph(tensor, op, group, sync_op, use_calc_stream): def _all_reduce_in_static_mode(tensor, op, group, sync_op, use_calc_stream): - data_feeder.check_variable_and_dtype(tensor, 'tensor', [ - 'float16', 'float32', 'float64', 'int32', 'int64', 'int8', 'uint8', - 'bool' - ], 'all_reduce') + data_feeder.check_variable_and_dtype( + tensor, + 'tensor', + [ + 'float16', + 'float32', + 'float64', + 'int32', + 'int64', + 'int8', + 'uint8', + 'bool', + ], + 'all_reduce', + ) op_type = _get_reduce_op(op, "all_reduce") ring_id = 0 if group is None else group.id @@ -48,22 +59,19 @@ def _all_reduce_in_static_mode(tensor, op, group, sync_op, use_calc_stream): # TODO: Support task and use task.wait in static mode # Use use_calc_stream rather than sync_op helper = layer_helper.LayerHelper(op_type, **locals()) - helper.append_op(type=op_type, - inputs={'X': [tensor]}, - outputs={'Out': [tensor]}, - attrs={ - 'ring_id': ring_id, - 'use_calc_stream': sync_op - }) + helper.append_op( + type=op_type, + inputs={'X': [tensor]}, + outputs={'Out': [tensor]}, + attrs={'ring_id': ring_id, 'use_calc_stream': sync_op}, + ) return None -def all_reduce(tensor, - op=ReduceOp.SUM, - group=None, - sync_op=True, - use_calc_stream=False): +def all_reduce( + tensor, op=ReduceOp.SUM, group=None, sync_op=True, use_calc_stream=False +): """ Perform specific reduction (for example, sum, max) on inputs across devices. @@ -106,11 +114,14 @@ def all_reduce(tensor, if not sync_op and use_calc_stream: raise RuntimeError( - "use_calc_stream can only be true in sync op behavior.") + "use_calc_stream can only be true in sync op behavior." + ) if framework.in_dygraph_mode(): - return _all_reduce_in_dygraph(tensor, op, group, sync_op, - use_calc_stream) + return _all_reduce_in_dygraph( + tensor, op, group, sync_op, use_calc_stream + ) else: - return _all_reduce_in_static_mode(tensor, op, group, sync_op, - use_calc_stream) + return _all_reduce_in_static_mode( + tensor, op, group, sync_op, use_calc_stream + ) diff --git a/python/paddle/distributed/communication/stream/alltoall.py b/python/paddle/distributed/communication/stream/alltoall.py index b216906d0456888285405f170b48a090b03a61a9..5faf1062631254c111143793dea026b0d12a6d33 100644 --- a/python/paddle/distributed/communication/stream/alltoall.py +++ b/python/paddle/distributed/communication/stream/alltoall.py @@ -25,22 +25,26 @@ def _check_tensor_shape(tensor, shape, nranks=1): def _check_tensor_list_shape(tensor_list, shape, nranks=1): if len(tensor_list) != nranks: raise RuntimeError( - 'The tensor_list for alltoall is not correctly-sized.') + 'The tensor_list for alltoall is not correctly-sized.' + ) for tensor in tensor_list: if tensor.shape != shape: raise RuntimeError( - 'The tensor_list for alltoall is not correctly-sized.') + 'The tensor_list for alltoall is not correctly-sized.' + ) -def _alltoall_tensor_in_dygraph(out_tensor, in_tensor, group, sync_op, - use_calc_stream): +def _alltoall_tensor_in_dygraph( + out_tensor, in_tensor, group, sync_op, use_calc_stream +): group = collective._get_default_group() if group is None else group _check_tensor_shape(out_tensor, in_tensor.shape, group.nranks) if use_calc_stream: return group.process_group.alltoall_tensor_on_calc_stream( - in_tensor, out_tensor) + in_tensor, out_tensor + ) task = group.process_group.alltoall_tensor(in_tensor, out_tensor, sync_op) if sync_op: @@ -49,8 +53,9 @@ def _alltoall_tensor_in_dygraph(out_tensor, in_tensor, group, sync_op, return task -def _alltoall_in_dygraph(out_tensor_list, in_tensor_list, group, sync_op, - use_calc_stream): +def _alltoall_in_dygraph( + out_tensor_list, in_tensor_list, group, sync_op, use_calc_stream +): group = collective._get_default_group() if group is None else group if len(in_tensor_list) == 0: @@ -61,26 +66,31 @@ def _alltoall_in_dygraph(out_tensor_list, in_tensor_list, group, sync_op, paddle.empty_like(tensor) for tensor in in_tensor_list ] else: - _check_tensor_list_shape(out_tensor_list, in_tensor_list[0].shape, - group.nranks) + _check_tensor_list_shape( + out_tensor_list, in_tensor_list[0].shape, group.nranks + ) if use_calc_stream: return group.process_group.alltoall_on_calc_stream( - in_tensor_list, out_tensor_list) + in_tensor_list, out_tensor_list + ) - task = group.process_group.alltoall(in_tensor_list, out_tensor_list, - sync_op) + task = group.process_group.alltoall( + in_tensor_list, out_tensor_list, sync_op + ) if sync_op: task.wait() return task -def alltoall(out_tensor_or_tensor_list, - in_tensor_or_tensor_list, - group=None, - sync_op=True, - use_calc_stream=False): +def alltoall( + out_tensor_or_tensor_list, + in_tensor_or_tensor_list, + group=None, + sync_op=True, + use_calc_stream=False, +): """ Scatter a tensor (or a tensor list) across devices and gather outputs to another tensor (or a tensor list, respectively). @@ -130,7 +140,8 @@ def alltoall(out_tensor_or_tensor_list, if not sync_op and use_calc_stream: raise RuntimeError( - "use_calc_stream can only be true in sync op behavior.") + "use_calc_stream can only be true in sync op behavior." + ) if out_tensor_or_tensor_list is None: raise RuntimeError("The output should be specified.") @@ -141,16 +152,25 @@ def alltoall(out_tensor_or_tensor_list, out_is_tensor = paddle.is_tensor(out_tensor_or_tensor_list) in_is_tensor = paddle.is_tensor(in_tensor_or_tensor_list) if out_is_tensor and in_is_tensor: - return _alltoall_tensor_in_dygraph(out_tensor_or_tensor_list, - in_tensor_or_tensor_list, group, - sync_op, use_calc_stream) + return _alltoall_tensor_in_dygraph( + out_tensor_or_tensor_list, + in_tensor_or_tensor_list, + group, + sync_op, + use_calc_stream, + ) elif not out_is_tensor and not in_is_tensor: - return _alltoall_in_dygraph(out_tensor_or_tensor_list, - in_tensor_or_tensor_list, group, - sync_op, use_calc_stream) + return _alltoall_in_dygraph( + out_tensor_or_tensor_list, + in_tensor_or_tensor_list, + group, + sync_op, + use_calc_stream, + ) else: raise RuntimeError( - "The output and input should be both tensor or tensor list.") + "The output and input should be both tensor or tensor list." + ) raise RuntimeError( "paddle.distributed.stream.alltoall is only supported in dygraph mode now." diff --git a/python/paddle/distributed/communication/stream/alltoall_single.py b/python/paddle/distributed/communication/stream/alltoall_single.py index b2187cc06e343984ae11d005a0ffb5bc27bb8d6f..75eb2890326366f79edbd65ef4af659cc5c340e3 100644 --- a/python/paddle/distributed/communication/stream/alltoall_single.py +++ b/python/paddle/distributed/communication/stream/alltoall_single.py @@ -16,9 +16,15 @@ import paddle.fluid.framework as framework from paddle.distributed import collective -def _alltoall_single_in_dygraph(out_tensor, in_tensor, out_split_sizes, - in_split_sizes, group, sync_op, - use_calc_stream): +def _alltoall_single_in_dygraph( + out_tensor, + in_tensor, + out_split_sizes, + in_split_sizes, + group, + sync_op, + use_calc_stream, +): group = collective._get_default_group() if group is None else group if out_split_sizes is None: @@ -28,24 +34,27 @@ def _alltoall_single_in_dygraph(out_tensor, in_tensor, out_split_sizes, if use_calc_stream: return group.process_group.alltoall_single_on_calc_stream( - in_tensor, out_tensor, in_split_sizes, out_split_sizes) + in_tensor, out_tensor, in_split_sizes, out_split_sizes + ) - task = group.process_group.alltoall_single(in_tensor, out_tensor, - in_split_sizes, out_split_sizes, - sync_op) + task = group.process_group.alltoall_single( + in_tensor, out_tensor, in_split_sizes, out_split_sizes, sync_op + ) if sync_op: task.wait() return task -def alltoall_single(out_tensor, - in_tensor, - out_split_sizes=None, - in_split_sizes=None, - group=None, - sync_op=True, - use_calc_stream=False): +def alltoall_single( + out_tensor, + in_tensor, + out_split_sizes=None, + in_split_sizes=None, + group=None, + sync_op=True, + use_calc_stream=False, +): """ Split and Scatter the splitted input tensor to the out tensor across devices. @@ -116,12 +125,19 @@ def alltoall_single(out_tensor, if not sync_op and use_calc_stream: raise RuntimeError( - "use_calc_stream can only be true in sync op behavior.") + "use_calc_stream can only be true in sync op behavior." + ) if framework.in_dygraph_mode(): - return _alltoall_single_in_dygraph(out_tensor, in_tensor, - out_split_sizes, in_split_sizes, - group, sync_op, use_calc_stream) + return _alltoall_single_in_dygraph( + out_tensor, + in_tensor, + out_split_sizes, + in_split_sizes, + group, + sync_op, + use_calc_stream, + ) raise RuntimeError( "paddle.distributed.stream.alltoall_single is only supported in dygraph mode now." diff --git a/python/paddle/distributed/communication/stream/broadcast.py b/python/paddle/distributed/communication/stream/broadcast.py index 06bde316937a9d92325969324c249225991d10e7..3672b02811e6ed9f64f79a0e258f81257f84ab00 100644 --- a/python/paddle/distributed/communication/stream/broadcast.py +++ b/python/paddle/distributed/communication/stream/broadcast.py @@ -72,11 +72,13 @@ def broadcast(tensor, src=0, group=None, sync_op=True, use_calc_stream=False): if not sync_op and use_calc_stream: raise RuntimeError( - "use_calc_stream can only be True in sync op behavior.") + "use_calc_stream can only be True in sync op behavior." + ) if framework.in_dygraph_mode(): - return _broadcast_in_dygraph(tensor, src, group, sync_op, - use_calc_stream) + return _broadcast_in_dygraph( + tensor, src, group, sync_op, use_calc_stream + ) raise RuntimeError( "paddle.distributed.stream.broadcast is only supported in dygraph mode now." diff --git a/python/paddle/distributed/communication/stream/recv.py b/python/paddle/distributed/communication/stream/recv.py index 25a8173788473aa79f9f32ddae9945d69156fb80..2658379a412a7a08f53b4c631d2df25b8348c3f8 100644 --- a/python/paddle/distributed/communication/stream/recv.py +++ b/python/paddle/distributed/communication/stream/recv.py @@ -73,10 +73,12 @@ def recv(tensor, src=0, group=None, sync_op=True, use_calc_stream=False): if not sync_op and use_calc_stream: raise RuntimeError( - "use_calc_stream can only be True in sync op behavior.") + "use_calc_stream can only be True in sync op behavior." + ) if framework.in_dygraph_mode(): return _recv_in_dygraph(tensor, src, group, sync_op, use_calc_stream) raise RuntimeError( - "paddle.distributed.stream.recv is only supported in dygraph mode now.") + "paddle.distributed.stream.recv is only supported in dygraph mode now." + ) diff --git a/python/paddle/distributed/communication/stream/reduce.py b/python/paddle/distributed/communication/stream/reduce.py index b0f7f5c884743d3bd1332e9679c45ff52c868b77..d7f0fc6b2b77850481fc4e1b6c485d7f7bc891a3 100644 --- a/python/paddle/distributed/communication/stream/reduce.py +++ b/python/paddle/distributed/communication/stream/reduce.py @@ -30,12 +30,14 @@ def _reduce_in_dygraph(tensor, dst, op, group, sync_op, use_calc_stream): return task -def reduce(tensor, - dst=0, - op=ReduceOp.SUM, - group=None, - sync_op=True, - use_calc_stream=False): +def reduce( + tensor, + dst=0, + op=ReduceOp.SUM, + group=None, + sync_op=True, + use_calc_stream=False, +): """ Perform specific reduction (for example, sum, max) on a tensor across devices and send to the destintion device. @@ -82,11 +84,13 @@ def reduce(tensor, if not sync_op and use_calc_stream: raise RuntimeError( - "use_calc_stream can only be true in sync op behavior.") + "use_calc_stream can only be true in sync op behavior." + ) if framework.in_dygraph_mode(): - return _reduce_in_dygraph(tensor, dst, op, group, sync_op, - use_calc_stream) + return _reduce_in_dygraph( + tensor, dst, op, group, sync_op, use_calc_stream + ) raise RuntimeError( "paddle.distributed.stream.reduce is only supported in dygraph mode now." diff --git a/python/paddle/distributed/communication/stream/reduce_scatter.py b/python/paddle/distributed/communication/stream/reduce_scatter.py index 68ba8e18e404c656b302cac8fbdecf68b7ca9119..ef375fa5be412f950f51e66297163828913e3ef2 100644 --- a/python/paddle/distributed/communication/stream/reduce_scatter.py +++ b/python/paddle/distributed/communication/stream/reduce_scatter.py @@ -23,26 +23,31 @@ def _check_tensor_shape(tensor, shape, nranks=1): expect_shape[0] //= nranks if list(tensor.shape) != expect_shape: raise RuntimeError( - "The in_tensor for reduce_scatter is not correctly-sized.") + "The in_tensor for reduce_scatter is not correctly-sized." + ) def _check_tensor_list_shape(tensor_list, shape, nranks=1): if len(tensor_list) != nranks: raise RuntimeError( - "The tensor_list for reduce_scatter is not correctly-sized.") + "The tensor_list for reduce_scatter is not correctly-sized." + ) for tensor in tensor_list: if tensor.shape != shape: raise RuntimeError( - "The tensor_list for reduce_scatter is not correctly-sized.") - - -def _reduce_scatter_tensor_in_dygraph(out_tensor, - in_tensor, - op, - group, - sync_op, - use_calc_stream, - caller="reduce_scatter"): + "The tensor_list for reduce_scatter is not correctly-sized." + ) + + +def _reduce_scatter_tensor_in_dygraph( + out_tensor, + in_tensor, + op, + group, + sync_op, + use_calc_stream, + caller="reduce_scatter", +): op_type = _get_reduce_op(op, caller) group = _get_global_group() if group is None else group @@ -50,18 +55,21 @@ def _reduce_scatter_tensor_in_dygraph(out_tensor, if use_calc_stream: return group.process_group.reduce_scatter_tensor_on_calc_stream( - in_tensor, out_tensor, op_type) + in_tensor, out_tensor, op_type + ) - task = group.process_group.reduce_scatter_tensor(in_tensor, out_tensor, - op_type, sync_op) + task = group.process_group.reduce_scatter_tensor( + in_tensor, out_tensor, op_type, sync_op + ) if sync_op: task.wait() return task -def _reduce_scatter_in_dygraph(tensor, tensor_list, op, group, sync_op, - use_calc_stream): +def _reduce_scatter_in_dygraph( + tensor, tensor_list, op, group, sync_op, use_calc_stream +): op_type = _get_reduce_op(op, "reduce_scatter") group = _get_global_group() if group is None else group @@ -69,22 +77,26 @@ def _reduce_scatter_in_dygraph(tensor, tensor_list, op, group, sync_op, if use_calc_stream: return group.process_group.reduce_scatter_on_calc_stream( - tensor_list, tensor, op_type) + tensor_list, tensor, op_type + ) - task = group.process_group.reduce_scatter(tensor_list, tensor, op_type, - sync_op) + task = group.process_group.reduce_scatter( + tensor_list, tensor, op_type, sync_op + ) if sync_op: task.wait() return task -def reduce_scatter(tensor, - tensor_or_tensor_list, - op=ReduceOp.SUM, - group=None, - sync_op=True, - use_calc_stream=False): +def reduce_scatter( + tensor, + tensor_or_tensor_list, + op=ReduceOp.SUM, + group=None, + sync_op=True, + use_calc_stream=False, +): """ Reduce, then scatter a tensor (or a tensor list) across devices. @@ -132,29 +144,42 @@ def reduce_scatter(tensor, if not sync_op and use_calc_stream: raise RuntimeError( - "use_calc_stream can only be true in sync op behavior.") + "use_calc_stream can only be true in sync op behavior." + ) if framework.in_dygraph_mode(): if paddle.is_tensor(tensor_or_tensor_list): - return _reduce_scatter_tensor_in_dygraph(tensor, - tensor_or_tensor_list, op, - group, sync_op, - use_calc_stream) + return _reduce_scatter_tensor_in_dygraph( + tensor, + tensor_or_tensor_list, + op, + group, + sync_op, + use_calc_stream, + ) else: - return _reduce_scatter_in_dygraph(tensor, tensor_or_tensor_list, op, - group, sync_op, use_calc_stream) + return _reduce_scatter_in_dygraph( + tensor, + tensor_or_tensor_list, + op, + group, + sync_op, + use_calc_stream, + ) raise RuntimeError( "paddle.distributed.stream.reduce_scatter is only supported in dygraph mode now." ) -def _reduce_scatter_base(out_tensor, - in_tensor, - op=ReduceOp.SUM, - group=None, - sync_op=True, - use_calc_stream=False): +def _reduce_scatter_base( + out_tensor, + in_tensor, + op=ReduceOp.SUM, + group=None, + sync_op=True, + use_calc_stream=False, +): """ Reduce, then scatter a flattened tensor across devices. @@ -202,13 +227,19 @@ def _reduce_scatter_base(out_tensor, if not sync_op and use_calc_stream: raise RuntimeError( - "use_calc_stream can only be true in sync op behavior.") + "use_calc_stream can only be true in sync op behavior." + ) if framework.in_dygraph_mode(): - return _reduce_scatter_tensor_in_dygraph(out_tensor, in_tensor, op, - group, sync_op, - use_calc_stream, - "_reduce_scatter_base") + return _reduce_scatter_tensor_in_dygraph( + out_tensor, + in_tensor, + op, + group, + sync_op, + use_calc_stream, + "_reduce_scatter_base", + ) raise RuntimeError( "paddle.distributed.stream._reduce_scatter_base is only supported in dygraph mode now." diff --git a/python/paddle/distributed/communication/stream/scatter.py b/python/paddle/distributed/communication/stream/scatter.py index 9b0fcaf4b086ad73d45974c51b56275c2a4448b0..199885c1fde91936e9f28c166a1ca38eeeb49c39 100644 --- a/python/paddle/distributed/communication/stream/scatter.py +++ b/python/paddle/distributed/communication/stream/scatter.py @@ -28,15 +28,18 @@ def _check_tensor_shape(tensor, shape, nranks=1): def _check_tensor_list_shape(tensor_list, shape, nranks=1): if len(tensor_list) != nranks: raise RuntimeError( - "The tensor_list for scatter is not correctly-sized.") + "The tensor_list for scatter is not correctly-sized." + ) for tensor in tensor_list: if tensor.shape != shape: raise RuntimeError( - "The tensor_list for scatter is not correctly-sized.") + "The tensor_list for scatter is not correctly-sized." + ) -def _scatter_tensor_in_dygraph(out_tensor, in_tensor, src, group, sync_op, - use_calc_stream): +def _scatter_tensor_in_dygraph( + out_tensor, in_tensor, src, group, sync_op, use_calc_stream +): group = collective._get_default_group() if group is None else group src_rank = group.get_group_rank(src) @@ -50,18 +53,21 @@ def _scatter_tensor_in_dygraph(out_tensor, in_tensor, src, group, sync_op, if use_calc_stream: return group.process_group.scatter_tensor_on_calc_stream( - in_tensor, out_tensor, src) + in_tensor, out_tensor, src + ) - task = group.process_group.scatter_tensor(in_tensor, out_tensor, src, - sync_op) + task = group.process_group.scatter_tensor( + in_tensor, out_tensor, src, sync_op + ) if sync_op: task.wait() return task -def _scatter_in_dygraph(tensor, tensor_list, src, group, sync_op, - use_calc_stream): +def _scatter_in_dygraph( + tensor, tensor_list, src, group, sync_op, use_calc_stream +): group = collective._get_default_group() if group is None else group src_rank = group.get_group_rank(src) @@ -73,14 +79,16 @@ def _scatter_in_dygraph(tensor, tensor_list, src, group, sync_op, if rank == src_rank: if len(tensor_list) == 0: raise RuntimeError( - "The tensor_list should not be empty on src rank.") + "The tensor_list should not be empty on src rank." + ) _check_tensor_list_shape(tensor_list, tensor.shape, nranks) else: tensor_list = [tensor for _ in range(nranks)] if use_calc_stream: return group.process_group.scatter_on_calc_stream( - tensor_list, tensor, src) + tensor_list, tensor, src + ) task = group.process_group.scatter(tensor_list, tensor, src, sync_op) if sync_op: @@ -89,12 +97,14 @@ def _scatter_in_dygraph(tensor, tensor_list, src, group, sync_op, return task -def scatter(tensor, - tensor_or_tensor_list=None, - src=0, - group=None, - sync_op=True, - use_calc_stream=False): +def scatter( + tensor, + tensor_or_tensor_list=None, + src=0, + group=None, + sync_op=True, + use_calc_stream=False, +): """ Scatter a tensor (or a tensor list) across devices. @@ -143,19 +153,31 @@ def scatter(tensor, if not sync_op and use_calc_stream: raise RuntimeError( - "use_calc_stream can only be true in sync op behavior.") + "use_calc_stream can only be true in sync op behavior." + ) if tensor_or_tensor_list is None: raise RuntimeError("The input should be specified.") if framework.in_dygraph_mode(): if paddle.is_tensor(tensor_or_tensor_list): - return _scatter_tensor_in_dygraph(tensor, tensor_or_tensor_list, - src, group, sync_op, - use_calc_stream) + return _scatter_tensor_in_dygraph( + tensor, + tensor_or_tensor_list, + src, + group, + sync_op, + use_calc_stream, + ) else: - return _scatter_in_dygraph(tensor, tensor_or_tensor_list, src, - group, sync_op, use_calc_stream) + return _scatter_in_dygraph( + tensor, + tensor_or_tensor_list, + src, + group, + sync_op, + use_calc_stream, + ) raise RuntimeError( "paddle.distributed.stream.scatter is only supported in dygraph mode now." diff --git a/python/paddle/distributed/communication/stream/send.py b/python/paddle/distributed/communication/stream/send.py index 41ec2c0141b1227933a4df5c523455f2e02a8e9d..206a8cfaa6f01a993fd32d085469556034da8cbd 100644 --- a/python/paddle/distributed/communication/stream/send.py +++ b/python/paddle/distributed/communication/stream/send.py @@ -73,10 +73,12 @@ def send(tensor, dst=0, group=None, sync_op=True, use_calc_stream=False): if not sync_op and use_calc_stream: raise RuntimeError( - "use_calc_stream can only be True in sync op behavior.") + "use_calc_stream can only be True in sync op behavior." + ) if framework.in_dygraph_mode(): return _send_in_dygraph(tensor, dst, group, sync_op, use_calc_stream) raise RuntimeError( - "paddle.distributed.stream.send is only supported in dygraph mode now.") + "paddle.distributed.stream.send is only supported in dygraph mode now." + ) diff --git a/python/paddle/distributed/elastic.py b/python/paddle/distributed/elastic.py index b13eb07ee28c0c0b4f9697571b5e22b98c504e75..d1fd9a790f2d46cce11c15937a638191e72c8c95 100644 --- a/python/paddle/distributed/elastic.py +++ b/python/paddle/distributed/elastic.py @@ -17,7 +17,6 @@ import os class Command(object): - def __init__(self, server, name): import etcd3 @@ -47,14 +46,15 @@ class Command(object): if __name__ == '__main__': parser = argparse.ArgumentParser(description='Elastic Command') - parser.add_argument("--elastic_server", - type=str, - help="etcd server host:port") + parser.add_argument( + "--elastic_server", type=str, help="etcd server host:port" + ) parser.add_argument("--job_id", type=str, help="job unique id") parser.add_argument( "--np", type=str, - help="job pod/node number, need to be 'MIN' or 'MIN:MAX' format") + help="job pod/node number, need to be 'MIN' or 'MIN:MAX' format", + ) parser.add_argument("action", type=str, help="action to take") args = parser.parse_args() diff --git a/python/paddle/distributed/entry_attr.py b/python/paddle/distributed/entry_attr.py index a118314778186a835088f853040d9b94bf5d7a61..f5043ac084cd6f8d5036e38987b21f68abc36e0c 100644 --- a/python/paddle/distributed/entry_attr.py +++ b/python/paddle/distributed/entry_attr.py @@ -124,11 +124,13 @@ class CountFilterEntry(EntryAttr): if not isinstance(count_filter, int): raise ValueError( - "count_filter must be a valid integer greater than 0") + "count_filter must be a valid integer greater than 0" + ) if count_filter < 0: raise ValueError( - "count_filter must be a valid integer greater or equal than 0") + "count_filter must be a valid integer greater or equal than 0" + ) self._name = "count_filter_entry" self._count_filter = count_filter diff --git a/python/paddle/distributed/fleet/__init__.py b/python/paddle/distributed/fleet/__init__.py index b75d84edf29b289be02c31a71def6d6b6652eabe..aebedefeaafcb73c420724379c994dec02ac0df7 100755 --- a/python/paddle/distributed/fleet/__init__.py +++ b/python/paddle/distributed/fleet/__init__.py @@ -24,7 +24,9 @@ from .dataset import QueueDataset # noqa: F401 from .dataset import FileInstantDataset # noqa: F401 from .dataset import BoxPSDataset # noqa: F401 from .data_generator.data_generator import MultiSlotDataGenerator # noqa: F401 -from .data_generator.data_generator import MultiSlotStringDataGenerator # noqa: F401 +from .data_generator.data_generator import ( + MultiSlotStringDataGenerator, +) # noqa: F401 from . import metrics # noqa: F401 from .base.topology import CommunicateTopology from .base.topology import HybridCommunicateGroup # noqa: F401 @@ -34,11 +36,17 @@ from .optimizer import distributed_optimizer from .scaler import distributed_scaler from .utils import log_util -__all__ = [ #noqa - "CommunicateTopology", "UtilBase", "HybridCommunicateGroup", - "MultiSlotStringDataGenerator", "UserDefinedRoleMaker", - "DistributedStrategy", "Role", "MultiSlotDataGenerator", - "PaddleCloudRoleMaker", "Fleet" +__all__ = [ # noqa + "CommunicateTopology", + "UtilBase", + "HybridCommunicateGroup", + "MultiSlotStringDataGenerator", + "UserDefinedRoleMaker", + "DistributedStrategy", + "Role", + "MultiSlotDataGenerator", + "PaddleCloudRoleMaker", + "Fleet", ] fleet = Fleet() diff --git a/python/paddle/distributed/fleet/ascend_utils.py b/python/paddle/distributed/fleet/ascend_utils.py index 6ff31555a1222a9f9176c64fca619d8c070137f6..ee10cd78a5ed072556eabaf7f76c8abe4424eefa 100644 --- a/python/paddle/distributed/fleet/ascend_utils.py +++ b/python/paddle/distributed/fleet/ascend_utils.py @@ -14,7 +14,11 @@ import os import json -from paddle.distributed.fleet.launch_utils import DeviceMode, get_cluster, get_host_name_ip +from paddle.distributed.fleet.launch_utils import ( + DeviceMode, + get_cluster, + get_host_name_ip, +) __all__ = [] @@ -80,17 +84,18 @@ def _get_ascend_rankfile(rank_table_file_path): assert nodes is not None, "DLS_TASK_NUMBER didn't set!" for node in range(int(nodes)): node_ip = os.getenv("VC_CUSTOM{}_HOSTS".format(node), None) - assert node_ip is not None, "VC_CUSTOM{}_HOSTS didn't set!".format( - node) + assert ( + node_ip is not None + ), "VC_CUSTOM{}_HOSTS didn't set!".format(node) node_ips.append(node_ip) return node_ips, device_count node_ips.append(server['server_id']) return node_ips, device_count -def get_cloud_cluster(rank_table_file=None, - device_mode=DeviceMode.ASCEND_NPU, - start_port=6070): +def get_cloud_cluster( + rank_table_file=None, device_mode=DeviceMode.ASCEND_NPU, start_port=6070 +): """ Args: rank_table_file: string, ascend npu rank file path @@ -110,8 +115,12 @@ def get_cloud_cluster(rank_table_file=None, else: _, node_ip = get_host_name_ip() - assert node_ip in node_ips, "Can't find your local ip {%s} in node_ips: {%s}" \ - % (node_ip, node_ips) + assert ( + node_ip in node_ips + ), "Can't find your local ip {%s} in node_ips: {%s}" % ( + node_ip, + node_ips, + ) else: # single trainer (single ascend card) node_ips = ["127.0.0.1"] @@ -127,5 +136,6 @@ def get_cloud_cluster(rank_table_file=None, for ip in node_ips: trainer_endpoints.append(["%s:%d" % (ip, port) for port in free_ports]) - return get_cluster(node_ips, node_ip, trainer_endpoints, device_mode, - devices_per_proc) + return get_cluster( + node_ips, node_ip, trainer_endpoints, device_mode, devices_per_proc + ) diff --git a/python/paddle/distributed/fleet/base/distributed_strategy.py b/python/paddle/distributed/fleet/base/distributed_strategy.py index c54df36cec63794ee0f6b57050d1d82aff1fe31c..c32b1f2d68c59383db82bd908ca9ef507b0664fc 100755 --- a/python/paddle/distributed/fleet/base/distributed_strategy.py +++ b/python/paddle/distributed/fleet/base/distributed_strategy.py @@ -26,7 +26,6 @@ non_auto_func_called = True def __non_auto_func_called__(func): - def __impl__(*args, **kwargs): global non_auto_func_called non_auto_func_called = False @@ -129,7 +128,8 @@ class DistributedStrategy(object): key = 'FLAGS_cudnn_batchnorm_spatial_persistent' if _global_flags().is_public(key): self.strategy.cudnn_batchnorm_spatial_persistent = bool( - _global_flags()[key]) + _global_flags()[key] + ) key = 'FLAGS_conv_workspace_size_limit' if _global_flags().is_public(key): self.strategy.conv_workspace_size_limit = int(_global_flags()[key]) @@ -144,8 +144,9 @@ class DistributedStrategy(object): def __setattr__(self, key, value): if self.__lock_attr and not hasattr(self, key): - raise TypeError("%s is not a attribute of %s" % - (key, self.__class__.__name__)) + raise TypeError( + "%s is not a attribute of %s" % (key, self.__class__.__name__) + ) object.__setattr__(self, key, value) def save_to_prototxt(self, output): @@ -180,7 +181,8 @@ class DistributedStrategy(object): """ with open(pb_file, 'r') as f: self.strategy = google.protobuf.text_format.Merge( - str(f.read()), self.strategy) + str(f.read()), self.strategy + ) @property def execution_strategy(self): @@ -203,8 +205,11 @@ class DistributedStrategy(object): execution_strategy = paddle.fluid.ExecutionStrategy() fields = self.strategy.execution_strategy.DESCRIPTOR.fields for f in fields: - setattr(execution_strategy, f.name, - getattr(self.strategy.execution_strategy, f.name)) + setattr( + execution_strategy, + f.name, + getattr(self.strategy.execution_strategy, f.name), + ) return execution_strategy @execution_strategy.setter @@ -212,8 +217,11 @@ class DistributedStrategy(object): def execution_strategy(self, strategy): fields = self.strategy.execution_strategy.DESCRIPTOR.fields for f in fields: - setattr(self.strategy.execution_strategy, f.name, - getattr(strategy, f.name)) + setattr( + self.strategy.execution_strategy, + f.name, + getattr(strategy, f.name), + ) @property def build_strategy(self): @@ -261,8 +269,9 @@ class DistributedStrategy(object): value = ReduceStrategyFleet(value) setattr(self.strategy.build_strategy, f.name, value) elif f.label == 3: # repeated field - getattr(self.strategy.build_strategy, - f.name).extend(getattr(strategy, f.name)) + getattr(self.strategy.build_strategy, f.name).extend( + getattr(strategy, f.name) + ) @property def gradient_scale_configs(self): @@ -282,8 +291,11 @@ class DistributedStrategy(object): @gradient_scale_configs.setter @is_strict_auto def gradient_scale_configs(self, config): - check_configs_key(self.strategy.gradient_scale_configs, config, - 'gradient_scale_configs') + check_configs_key( + self.strategy.gradient_scale_configs, + config, + 'gradient_scale_configs', + ) assign_configs_value(self.strategy.gradient_scale_configs, config) @property @@ -318,8 +330,10 @@ class DistributedStrategy(object): self.a_sync_configs = {"k_steps": 0} else: raise ValueError( - "The type of `flag` is invalid, expected type is bool, but received {}" - .format(type(flag))) + "The type of `flag` is invalid, expected type is bool, but received {}".format( + type(flag) + ) + ) @property def a_sync_configs(self): @@ -365,8 +379,9 @@ class DistributedStrategy(object): @a_sync_configs.setter @is_strict_auto def a_sync_configs(self, configs): - check_configs_key(self.strategy.a_sync_configs, configs, - "a_sync_configs") + check_configs_key( + self.strategy.a_sync_configs, configs, "a_sync_configs" + ) assign_configs_value(self.strategy.a_sync_configs, configs) @property @@ -430,14 +445,17 @@ class DistributedStrategy(object): self.strategy.adam_d2sum = flag else: raise ValueError( - "The type of `flag` is invalid, expected type is bool, but received {}" - .format(type(flag))) + "The type of `flag` is invalid, expected type is bool, but received {}".format( + type(flag) + ) + ) @trainer_desc_configs.setter @is_strict_auto def trainer_desc_configs(self, configs): - check_configs_key(self.strategy.trainer_desc_configs, configs, - "trainer_desc_configs") + check_configs_key( + self.strategy.trainer_desc_configs, configs, "trainer_desc_configs" + ) assign_configs_value(self.strategy.trainer_desc_configs, configs) @property @@ -465,8 +483,9 @@ class DistributedStrategy(object): @fs_client_param.setter @is_strict_auto def fs_client_param(self, configs): - check_configs_key(self.strategy.fs_client_param, configs, - "fs_client_param") + check_configs_key( + self.strategy.fs_client_param, configs, "fs_client_param" + ) assign_configs_value(self.strategy.fs_client_param, configs) @property @@ -477,6 +496,7 @@ class DistributedStrategy(object): @is_strict_auto def sparse_table_configs(self, configs): from google.protobuf.descriptor import FieldDescriptor + table_param = self.strategy.downpour_table_param def set_table_config(msg, config_name, configs, index=0): @@ -493,8 +513,9 @@ class DistributedStrategy(object): data = getattr(msg, field.name).add() set_table_config(data, name, configs, i) else: - set_table_config(getattr(msg, field.name), name, - configs) + set_table_config( + getattr(msg, field.name), name, configs + ) else: # print("not message:", name) if name not in configs: @@ -513,132 +534,204 @@ class DistributedStrategy(object): for table_name in configs: table_data = table_param.add() table_data.table_name = table_name - set_table_config(table_data, "table_parameters." + table_name, - configs[table_name]) + set_table_config( + table_data, + "table_parameters." + table_name, + configs[table_name], + ) @sparse_table_configs.setter def fleet_desc_configs(self, configs): - support_sparse_key_list = ['sparse_table_class', 'sparse_compress_in_save', 'sparse_shard_num', \ - 'sparse_accessor_class', 'sparse_learning_rate', 'sparse_initial_g2sum', 'sparse_initial_range', \ - 'sparse_weight_bounds', 'sparse_fea_dim', 'sparse_embedx_dim', 'sparse_embedx_threshold', 'sparse_nonclk_coeff', \ - 'sparse_click_coeff', 'sparse_base_threshold', 'sparse_delta_threshold', 'sparse_delta_keep_days', \ - 'sparse_delete_after_unseen_days', 'sparse_show_click_decay_rate', 'sparse_delete_threshold', \ - 'sparse_converter', 'sparse_deconverter', 'sparse_enable_cache', 'sparse_cache_rate', \ - 'sparse_cache_file_num', 'sparse_beta1_decay_rate', 'sparse_beta2_decay_rate', \ - 'sparse_ada_epsilon', 'sparse_optimizer', 'sparse_ssd_unseenday_threshold', - 'embed_sparse_optimizer', 'embed_sparse_learning_rate', 'embed_sparse_weight_bounds', \ - 'embed_sparse_initial_range', 'embed_sparse_initial_g2sum', 'embed_sparse_beta1_decay_rate', \ - 'embed_sparse_beta2_decay_rate', 'embedx_sparse_optimizer', 'embedx_sparse_learning_rate', \ - 'embedx_sparse_weight_bounds', 'embedx_sparse_initial_range', 'embedx_sparse_initial_g2sum', \ - 'embedx_sparse_beta1_decay_rate', 'embedx_sparse_beta2_decay_rate', 'feature_learning_rate', 'nodeid_slot'] + support_sparse_key_list = [ + 'sparse_table_class', + 'sparse_compress_in_save', + 'sparse_shard_num', + 'sparse_accessor_class', + 'sparse_learning_rate', + 'sparse_initial_g2sum', + 'sparse_initial_range', + 'sparse_weight_bounds', + 'sparse_fea_dim', + 'sparse_embedx_dim', + 'sparse_embedx_threshold', + 'sparse_nonclk_coeff', + 'sparse_click_coeff', + 'sparse_base_threshold', + 'sparse_delta_threshold', + 'sparse_delta_keep_days', + 'sparse_delete_after_unseen_days', + 'sparse_show_click_decay_rate', + 'sparse_delete_threshold', + 'sparse_converter', + 'sparse_deconverter', + 'sparse_enable_cache', + 'sparse_cache_rate', + 'sparse_cache_file_num', + 'sparse_beta1_decay_rate', + 'sparse_beta2_decay_rate', + 'sparse_ada_epsilon', + 'sparse_optimizer', + 'sparse_ssd_unseenday_threshold', + 'embed_sparse_optimizer', + 'embed_sparse_learning_rate', + 'embed_sparse_weight_bounds', + 'embed_sparse_initial_range', + 'embed_sparse_initial_g2sum', + 'embed_sparse_beta1_decay_rate', + 'embed_sparse_beta2_decay_rate', + 'embedx_sparse_optimizer', + 'embedx_sparse_learning_rate', + 'embedx_sparse_weight_bounds', + 'embedx_sparse_initial_range', + 'embedx_sparse_initial_g2sum', + 'embedx_sparse_beta1_decay_rate', + 'embedx_sparse_beta2_decay_rate', + 'feature_learning_rate', + 'nodeid_slot', + ] support_sparse_table_class = ['DownpourSparseTable'] support_sparse_accessor_class = [ - 'DownpourSparseValueAccessor', 'DownpourCtrAccessor', - 'DownpourCtrDoubleAccessor', 'DownpourUnitAccessor', - 'DownpourDoubleUnitAccessor', 'DownpourCtrDymfAccessor' + 'DownpourSparseValueAccessor', + 'DownpourCtrAccessor', + 'DownpourCtrDoubleAccessor', + 'DownpourUnitAccessor', + 'DownpourDoubleUnitAccessor', + 'DownpourCtrDymfAccessor', ] table_param = self.strategy.downpour_table_param def add_graph_config(graph, strategy): - graph.feature_learning_rate = strategy.get('feature_learning_rate', - 0.05) + graph.feature_learning_rate = strategy.get( + 'feature_learning_rate', 0.05 + ) graph.nodeid_slot = strategy.get('nodeid_slot', 9008) def sparse_optimizer_config(sgd, strategy, prefix): - optimizer_name = strategy.get(prefix + "sparse_optimizer", - "adagrad") + optimizer_name = strategy.get( + prefix + "sparse_optimizer", "adagrad" + ) sgd.name = optimizer_name if optimizer_name == "naive": sgd.name = "SparseNaiveSGDRule" sgd.naive.learning_rate = strategy.get( - prefix + 'sparse_learning_rate', 0.05) + prefix + 'sparse_learning_rate', 0.05 + ) sgd.naive.initial_range = strategy.get( - prefix + 'sparse_initial_range', 1e-4) - bounds = strategy.get(prefix + 'sparse_weight_bounds', - [-10, 10]) + prefix + 'sparse_initial_range', 1e-4 + ) + bounds = strategy.get( + prefix + 'sparse_weight_bounds', [-10, 10] + ) sgd.naive.weight_bounds.extend(bounds) elif optimizer_name == "adagrad": sgd.name = 'SparseAdaGradSGDRule' sgd.adagrad.learning_rate = strategy.get( - prefix + 'sparse_learning_rate', 0.05) + prefix + 'sparse_learning_rate', 0.05 + ) sgd.adagrad.initial_range = strategy.get( - prefix + 'sparse_initial_range', 1e-4) + prefix + 'sparse_initial_range', 1e-4 + ) if prefix == "embed_": sgd.adagrad.initial_range = 0 sgd.adagrad.initial_g2sum = strategy.get( - prefix + 'sparse_initial_g2sum', 3) - bounds = strategy.get(prefix + 'sparse_weight_bounds', - [-10, 10]) + prefix + 'sparse_initial_g2sum', 3 + ) + bounds = strategy.get( + prefix + 'sparse_weight_bounds', [-10, 10] + ) sgd.adagrad.weight_bounds.extend(bounds) elif optimizer_name == "std_adagrad": sgd.name = 'StdAdaGradSGDRule' sgd.adagrad.learning_rate = strategy.get( - prefix + 'sparse_learning_rate', 0.05) + prefix + 'sparse_learning_rate', 0.05 + ) sgd.adagrad.initial_range = strategy.get( - prefix + 'sparse_initial_range', 1e-4) + prefix + 'sparse_initial_range', 1e-4 + ) if prefix == "embed_": sgd.adagrad.initial_range = 0 sgd.adagrad.initial_g2sum = strategy.get( - prefix + 'sparse_initial_g2sum', 3) - bounds = strategy.get(prefix + 'sparse_weight_bounds', - [-10, 10]) + prefix + 'sparse_initial_g2sum', 3 + ) + bounds = strategy.get( + prefix + 'sparse_weight_bounds', [-10, 10] + ) sgd.adagrad.weight_bounds.extend(bounds) elif optimizer_name == "adam": sgd.name = 'SparseAdamSGDRule' sgd.adam.learning_rate = strategy.get( - prefix + 'sparse_learning_rate', 0.001) + prefix + 'sparse_learning_rate', 0.001 + ) sgd.adam.initial_range = strategy.get( - prefix + 'sparse_initial_range', 1e-4) + prefix + 'sparse_initial_range', 1e-4 + ) sgd.adam.beta1_decay_rate = strategy.get( - prefix + 'sparse_beta1_decay_rate', 0.9) + prefix + 'sparse_beta1_decay_rate', 0.9 + ) sgd.adam.beta2_decay_rate = strategy.get( - prefix + 'sparse_beta2_decay_rate', 0.999) + prefix + 'sparse_beta2_decay_rate', 0.999 + ) sgd.adam.ada_epsilon = strategy.get( - prefix + 'sparse_ada_epsilon', 1e-8) - bounds = strategy.get(prefix + 'sparse_weight_bounds', - [-10, 10]) + prefix + 'sparse_ada_epsilon', 1e-8 + ) + bounds = strategy.get( + prefix + 'sparse_weight_bounds', [-10, 10] + ) sgd.adam.weight_bounds.extend(bounds) elif optimizer_name == "shared_adam": sgd.name = 'SparseSharedAdamSGDRule' sgd.adam.learning_rate = strategy.get( - prefix + 'sparse_learning_rate', 0.001) + prefix + 'sparse_learning_rate', 0.001 + ) sgd.adam.initial_range = strategy.get( - prefix + 'sparse_initial_range', 1e-4) + prefix + 'sparse_initial_range', 1e-4 + ) sgd.adam.beta1_decay_rate = strategy.get( - prefix + 'sparse_beta1_decay_rate', 0.9) + prefix + 'sparse_beta1_decay_rate', 0.9 + ) sgd.adam.beta2_decay_rate = strategy.get( - prefix + 'sparse_beta2_decay_rate', 0.999) + prefix + 'sparse_beta2_decay_rate', 0.999 + ) sgd.adam.ada_epsilon = strategy.get( - prefix + 'sparse_ada_epsilon', 1e-8) - bounds = strategy.get(prefix + 'sparse_weight_bounds', - [-10, 10]) + prefix + 'sparse_ada_epsilon', 1e-8 + ) + bounds = strategy.get( + prefix + 'sparse_weight_bounds', [-10, 10] + ) sgd.adam.weight_bounds.extend(bounds) def set_sparse_table_config(table_data, config): for key in config: if key not in support_sparse_key_list: raise ValueError("strategy key '%s' not support" % (key)) - table_class = config.get("sparse_table_class", - "DownpourSparseTable") + table_class = config.get( + "sparse_table_class", "DownpourSparseTable" + ) if table_class not in support_sparse_table_class: raise ValueError( "support sparse_table_class: ['DownpourSparseTable'], but actual %s" - % (table_class)) + % (table_class) + ) table_data.table_class = 'MemorySparseTable' table_data.shard_num = config.get('sparse_shard_num', 1000) table_data.enable_sparse_table_cache = config.get( - 'sparse_enable_cache', True) + 'sparse_enable_cache', True + ) table_data.sparse_table_cache_rate = config.get( - 'sparse_cache_rate', 0.00055) + 'sparse_cache_rate', 0.00055 + ) table_data.sparse_table_cache_file_num = config.get( - 'sparse_cache_file_num', 16) + 'sparse_cache_file_num', 16 + ) - accessor_class = config.get("sparse_accessor_class", - "DownpourCtrAccessor") + accessor_class = config.get( + "sparse_accessor_class", "DownpourCtrAccessor" + ) if accessor_class not in support_sparse_accessor_class: raise ValueError( "support sparse_accessor_class: ['DownpourSparseValueAccessor', 'DownpourCtrAccessor', 'DownpourCtrDoubleAccessor', 'DownpourUnitAccessor', 'DownpourDoubleUnitAccessor'], but actual %s" - % (accessor_class)) + % (accessor_class) + ) if accessor_class.find("Double") >= 0: table_data.accessor.accessor_class = 'CtrDoubleAccessor' @@ -653,7 +746,8 @@ class DistributedStrategy(object): table_data.accessor.embedx_dim = config.get('sparse_embedx_dim', 8) table_data.accessor.fea_dim = table_data.accessor.embedx_dim + 3 table_data.accessor.embedx_threshold = config.get( - 'sparse_embedx_threshold', 10) + 'sparse_embedx_threshold', 10 + ) if accessor_class == 'DownpourUnitAccessor': table_data.accessor.ctr_accessor_param.show_scale = False @@ -661,23 +755,32 @@ class DistributedStrategy(object): table_data.accessor.ctr_accessor_param.show_scale = True table_data.accessor.ctr_accessor_param.nonclk_coeff = config.get( - 'sparse_nonclk_coeff', 0.1) + 'sparse_nonclk_coeff', 0.1 + ) table_data.accessor.ctr_accessor_param.click_coeff = config.get( - 'sparse_click_coeff', 1) + 'sparse_click_coeff', 1 + ) table_data.accessor.ctr_accessor_param.base_threshold = config.get( - 'sparse_base_threshold', 1.5) + 'sparse_base_threshold', 1.5 + ) table_data.accessor.ctr_accessor_param.delta_threshold = config.get( - 'sparse_delta_threshold', 0.25) + 'sparse_delta_threshold', 0.25 + ) table_data.accessor.ctr_accessor_param.delta_keep_days = config.get( - 'sparse_delta_keep_days', 16) - table_data.accessor.ctr_accessor_param.show_click_decay_rate = config.get( - 'sparse_show_click_decay_rate', 0.98) - table_data.accessor.ctr_accessor_param.delete_threshold = config.get( - 'sparse_delete_threshold', 0.8) - table_data.accessor.ctr_accessor_param.delete_after_unseen_days = config.get( - 'sparse_delete_after_unseen_days', 30) - table_data.accessor.ctr_accessor_param.ssd_unseenday_threshold = config.get( - 'sparse_ssd_unseenday_threshold', 1) + 'sparse_delta_keep_days', 16 + ) + table_data.accessor.ctr_accessor_param.show_click_decay_rate = ( + config.get('sparse_show_click_decay_rate', 0.98) + ) + table_data.accessor.ctr_accessor_param.delete_threshold = ( + config.get('sparse_delete_threshold', 0.8) + ) + table_data.accessor.ctr_accessor_param.delete_after_unseen_days = ( + config.get('sparse_delete_after_unseen_days', 30) + ) + table_data.accessor.ctr_accessor_param.ssd_unseenday_threshold = ( + config.get('sparse_ssd_unseenday_threshold', 1) + ) converter = config.get('sparse_converter', "") deconverter = config.get('sparse_deconverter', "") @@ -691,23 +794,33 @@ class DistributedStrategy(object): save_data2.converter = converter save_data2.deconverter = deconverter - if accessor_class == 'DownpourCtrAccessor' or accessor_class == 'DownpourCtrDoubleAccessor': - sparse_optimizer_config(table_data.accessor.embed_sgd_param, - config, '') - sparse_optimizer_config(table_data.accessor.embedx_sgd_param, - config, '') + if ( + accessor_class == 'DownpourCtrAccessor' + or accessor_class == 'DownpourCtrDoubleAccessor' + ): + sparse_optimizer_config( + table_data.accessor.embed_sgd_param, config, '' + ) + sparse_optimizer_config( + table_data.accessor.embedx_sgd_param, config, '' + ) else: - sparse_optimizer_config(table_data.accessor.embed_sgd_param, - config, 'embed_') - sparse_optimizer_config(table_data.accessor.embedx_sgd_param, - config, 'embedx_') + sparse_optimizer_config( + table_data.accessor.embed_sgd_param, config, 'embed_' + ) + sparse_optimizer_config( + table_data.accessor.embedx_sgd_param, config, 'embedx_' + ) add_graph_config(table_data.accessor.graph_sgd_param, config) if not configs: print("fleet desc config is empty") else: for table_name in configs: - if table_name == 'dense_table' or table_name == 'datanorm_table': + if ( + table_name == 'dense_table' + or table_name == 'datanorm_table' + ): continue if type(configs[table_name]) != dict: continue @@ -1136,8 +1249,9 @@ class DistributedStrategy(object): @recompute_configs.setter @is_strict_auto def recompute_configs(self, configs): - check_configs_key(self.strategy.recompute_configs, configs, - "checkpoint_configs") + check_configs_key( + self.strategy.recompute_configs, configs, "checkpoint_configs" + ) assign_configs_value(self.strategy.recompute_configs, configs) @property @@ -1230,8 +1344,9 @@ class DistributedStrategy(object): @sharding_configs.setter @is_strict_auto def sharding_configs(self, configs): - check_configs_key(self.strategy.sharding_configs, configs, - "sharding_configs") + check_configs_key( + self.strategy.sharding_configs, configs, "sharding_configs" + ) assign_configs_value(self.strategy.sharding_configs, configs) @property @@ -1412,8 +1527,9 @@ class DistributedStrategy(object): @pipeline_configs.setter @is_strict_auto def pipeline_configs(self, configs): - check_configs_key(self.strategy.pipeline_configs, configs, - "pipeline_configs") + check_configs_key( + self.strategy.pipeline_configs, configs, "pipeline_configs" + ) assign_configs_value(self.strategy.pipeline_configs, configs) @property @@ -1467,8 +1583,11 @@ class DistributedStrategy(object): @tensor_parallel_configs.setter @is_strict_auto def tensor_parallel_configs(self, configs): - check_configs_key(self.strategy.tensor_parallel_configs, configs, - "tensor_parallel_configs") + check_configs_key( + self.strategy.tensor_parallel_configs, + configs, + "tensor_parallel_configs", + ) assign_configs_value(self.strategy.tensor_parallel_configs, configs) @property @@ -1501,8 +1620,9 @@ class DistributedStrategy(object): @hybrid_configs.setter def hybrid_configs(self, configs): - check_configs_key(self.strategy.hybrid_configs, configs, - "hybrid_configs") + check_configs_key( + self.strategy.hybrid_configs, configs, "hybrid_configs" + ) assign_configs_value(self.strategy.hybrid_configs, configs) @property @@ -1558,8 +1678,9 @@ class DistributedStrategy(object): @localsgd_configs.setter @is_strict_auto def localsgd_configs(self, configs): - check_configs_key(self.strategy.localsgd_configs, configs, - "localsgd_configs") + check_configs_key( + self.strategy.localsgd_configs, configs, "localsgd_configs" + ) assign_configs_value(self.strategy.localsgd_configs, configs) @property @@ -1617,8 +1738,11 @@ class DistributedStrategy(object): @adaptive_localsgd_configs.setter @is_strict_auto def adaptive_localsgd_configs(self, configs): - check_configs_key(self.strategy.adaptive_localsgd_configs, configs, - "adaptive_localsgd_configs") + check_configs_key( + self.strategy.adaptive_localsgd_configs, + configs, + "adaptive_localsgd_configs", + ) assign_configs_value(self.strategy.adaptive_localsgd_configs, configs) @property @@ -1762,8 +1886,9 @@ class DistributedStrategy(object): @gradient_merge_configs.setter @is_strict_auto def gradient_merge_configs(self, configs): - check_configs_key(self.strategy.gradient_merge_configs, configs, - "gradient_configs") + check_configs_key( + self.strategy.gradient_merge_configs, configs, "gradient_configs" + ) assign_configs_value(self.strategy.gradient_merge_configs, configs) @property @@ -2243,7 +2368,8 @@ class DistributedStrategy(object): h1_format = " " + "|{{:^{}s}}|\n".format(length) h2_format = " " + "|{{:>{}s}}{}{{:^{}s}}|\n".format( - max_k, " " * spacing, max_v) + max_k, " " * spacing, max_v + ) border = " +" + "".join(["="] * length) + "+" line = " +" + "".join(["-"] * length) + "+" @@ -2268,37 +2394,48 @@ class DistributedStrategy(object): if getattr(self.strategy, f.name): draws += border + "\n" draws += h1_format.format( - "{}=True <-> {}_configs".format(f.name, f.name)) + "{}=True <-> {}_configs".format(f.name, f.name) + ) draws += line + "\n" - my_configs = getattr(self.strategy, - f.name + "_configs") + my_configs = getattr( + self.strategy, f.name + "_configs" + ) config_fields = my_configs.DESCRIPTOR.fields for ff in config_fields: if isinstance( - getattr(my_configs, - ff.name), google.protobuf.pyext. - _message.RepeatedScalarContainer): + getattr(my_configs, ff.name), + google.protobuf.pyext._message.RepeatedScalarContainer, + ): values = getattr(my_configs, ff.name) for i, v in enumerate(values): if i == 0: draws += h2_format.format( - ff.name, str(v)) + ff.name, str(v) + ) else: draws += h2_format.format( - "", str(v)) + "", str(v) + ) else: draws += h2_format.format( ff.name, - str(getattr(my_configs, ff.name))) + str(getattr(my_configs, ff.name)), + ) else: env_draws += h2_format.format( - f.name, str(getattr(self.strategy, f.name))) + f.name, str(getattr(self.strategy, f.name)) + ) else: env_draws += h2_format.format( - f.name, str(getattr(self.strategy, f.name))) - - result_res = draws + border + "\n" + h1_format.format( - "Environment Flags, Communication Flags") + f.name, str(getattr(self.strategy, f.name)) + ) + + result_res = ( + draws + + border + + "\n" + + h1_format.format("Environment Flags, Communication Flags") + ) result_res += env_draws build_strategy_str = border + "\n" @@ -2308,7 +2445,8 @@ class DistributedStrategy(object): fields = self.strategy.build_strategy.DESCRIPTOR.fields for f in fields: build_strategy_str += h2_format.format( - f.name, str(getattr(self.strategy.build_strategy, f.name))) + f.name, str(getattr(self.strategy.build_strategy, f.name)) + ) build_strategy_str += border + "\n" execution_strategy_str = h1_format.format("Execution Strategy") @@ -2317,7 +2455,8 @@ class DistributedStrategy(object): fields = self.strategy.execution_strategy.DESCRIPTOR.fields for f in fields: execution_strategy_str += h2_format.format( - f.name, str(getattr(self.strategy.execution_strategy, f.name))) + f.name, str(getattr(self.strategy.execution_strategy, f.name)) + ) execution_strategy_str += border + "\n" result_res += build_strategy_str + execution_strategy_str diff --git a/python/paddle/distributed/fleet/base/meta_optimizer_factory.py b/python/paddle/distributed/fleet/base/meta_optimizer_factory.py index c2e55cfc38d63f65ba42e7d9ec375b0aff40d426..380a9b8c177af3115b61dda74efc1089611af975 100755 --- a/python/paddle/distributed/fleet/base/meta_optimizer_factory.py +++ b/python/paddle/distributed/fleet/base/meta_optimizer_factory.py @@ -17,7 +17,8 @@ from ..meta_optimizers import * # noqa: F401, F403 __all__ = [] meta_optimizer_names = list( - filter(lambda name: name.endswith("Optimizer"), dir())) + filter(lambda name: name.endswith("Optimizer"), dir()) +) # Because HybridParallelOptimizer is dygraph optimizer, it # should be removed @@ -26,7 +27,6 @@ meta_optimizer_names.remove("HeterParallelOptimizer") class MetaOptimizerFactory(object): - def __init__(self): pass diff --git a/python/paddle/distributed/fleet/base/orthogonal_strategy.py b/python/paddle/distributed/fleet/base/orthogonal_strategy.py index d2ba6e4461b2444a113c37da6d69fee0304e9132..d0fec2cfdb2d574a45b2e2c6c72118892f558b60 100644 --- a/python/paddle/distributed/fleet/base/orthogonal_strategy.py +++ b/python/paddle/distributed/fleet/base/orthogonal_strategy.py @@ -19,7 +19,7 @@ import paddle.distributed as dist from paddle.distributed.fleet.base.strategy_group import StrategyGroupBase -class OrthogonalStrategy(): +class OrthogonalStrategy: """ A hybrid of multiple distributed strategies. Strategies need to be orthogonal, means the ranks are organized like a square if there are two strategies, a cube if there aree three strategies, etc. @@ -57,8 +57,9 @@ class OrthogonalStrategy(): strategy[0] for strategy in list_of_strategy ] self._list_of_degree = [strategy[1] for strategy in list_of_strategy] - self._coordinate = collections.namedtuple('Coordinate', - self._list_of_strategy_name) + self._coordinate = collections.namedtuple( + 'Coordinate', self._list_of_strategy_name + ) self._check_valid_strategy() ranges = [range(degree) for degree in self._list_of_degree] @@ -66,14 +67,16 @@ class OrthogonalStrategy(): self._coordinate(*coord) for coord in itertools.product(*ranges) ] self._coord_to_rank_dict = dict( - zip(list_of_coord, range(len(list_of_coord)))) + zip(list_of_coord, range(len(list_of_coord))) + ) for idx, strategy in enumerate(list_of_strategy): strategy_name = strategy[0] self._name_to_degree_dict[strategy_name] = strategy[1] self._rank_list_dict[strategy_name] = self._calc_rank_list(idx) self._name_to_group_dict[strategy_name] = strategy[2]( - self._rank_list_dict[strategy_name]) + self._rank_list_dict[strategy_name] + ) self._name_to_fused_group_dict = {} self._create_fused_group() @@ -88,8 +91,9 @@ class OrthogonalStrategy(): Returns: An instance of specific strategy group. """ - assert name in self._list_of_strategy_name, "Strategy group {} is not created.".format( - name) + assert ( + name in self._list_of_strategy_name + ), "Strategy group {} is not created.".format(name) return self._name_to_group_dict[name] def fused_strategy_group(self, name): @@ -102,8 +106,9 @@ class OrthogonalStrategy(): Returns: (StrategyGroupBase): An instance of strategy group. """ - assert name in self._name_to_fused_group_dict, "Fused strategy group {} is not created.".format( - name) + assert ( + name in self._name_to_fused_group_dict + ), "Fused strategy group {} is not created.".format(name) return self._name_to_fused_group_dict[name] def rank_in_strategy(self, name): @@ -116,33 +121,42 @@ class OrthogonalStrategy(): Returns: (Integer): Local rank in specific strategy. """ - assert name in self._list_of_strategy_name, "Strategy group {} is not created.".format( - name) + assert ( + name in self._list_of_strategy_name + ), "Strategy group {} is not created.".format(name) return self._name_to_group_dict[name].group.rank def _check_valid_strategy(self): assert len(self._list_of_strategy_name) == len( set(self._list_of_strategy_name) ), "Defined duplicated strategies: {}".format(list_of_strategy) - num_of_ranks = functools.reduce(lambda x, y: x * y, - self._list_of_degree) - assert num_of_ranks == dist.get_world_size( + num_of_ranks = functools.reduce( + lambda x, y: x * y, self._list_of_degree + ) + assert ( + num_of_ranks == dist.get_world_size() ), "There are total {} ranks, but need {} ranks in this strategy.".format( - dist.get_world_size(), num_of_ranks) + dist.get_world_size(), num_of_ranks + ) for fused_strategy in self._fused_strategy_dict.values(): for strategy in fused_strategy: - assert strategy in self._list_of_strategy_name, "Can not fuse strategy {} without defined previous.".format( - strategy) + assert ( + strategy in self._list_of_strategy_name + ), "Can not fuse strategy {} without defined previous.".format( + strategy + ) def _create_fused_group(self): for name in self._fused_strategy_dict: fused_strategy = self._fused_strategy_dict[name] non_fused_strategy = list( - set(self._list_of_strategy_name).difference(fused_strategy)) + set(self._list_of_strategy_name).difference(fused_strategy) + ) non_fused_ranges = [] for strategy in non_fused_strategy: non_fused_ranges.append( - range(self._name_to_degree_dict[strategy])) + range(self._name_to_degree_dict[strategy]) + ) fused_ranges = [] for strategy in fused_strategy: fused_ranges.append(range(self._name_to_degree_dict[strategy])) @@ -156,8 +170,9 @@ class OrthogonalStrategy(): for fused_ranks in itertools.product(*fused_ranges): for i, fused_rank in enumerate(fused_ranks): coord_dict[fused_strategy[i]] = fused_rank - ranks.append(self._coord_to_rank_dict[self._coordinate( - **coord_dict)]) + ranks.append( + self._coord_to_rank_dict[self._coordinate(**coord_dict)] + ) rank_list.append(ranks) self._name_to_fused_group_dict[name] = StrategyGroupBase(rank_list) @@ -175,7 +190,8 @@ class OrthogonalStrategy(): coord_list = list(coord) coord_list.insert(strategy_axis, val) ranks.append( - self._coord_to_rank_dict[self._coordinate(*coord_list)]) + self._coord_to_rank_dict[self._coordinate(*coord_list)] + ) rank_list.append(ranks) return rank_list diff --git a/python/paddle/distributed/fleet/base/private_helper_function.py b/python/paddle/distributed/fleet/base/private_helper_function.py index 2745f398152a4bdab4bbb9549ca811185cc55a17..b67146065899a1bbf72ea045ea20e34a176029db 100644 --- a/python/paddle/distributed/fleet/base/private_helper_function.py +++ b/python/paddle/distributed/fleet/base/private_helper_function.py @@ -39,8 +39,9 @@ def wait_server_ready(endpoints): not_ready_endpoints = [] for ep in endpoints: ip_port = ep.split(":") - with closing(socket.socket(socket.AF_INET, - socket.SOCK_STREAM)) as sock: + with closing( + socket.socket(socket.AF_INET, socket.SOCK_STREAM) + ) as sock: sock.settimeout(2) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) if hasattr(socket, 'SO_REUSEPORT'): @@ -52,8 +53,9 @@ def wait_server_ready(endpoints): not_ready_endpoints.append(ep) if not all_ok: sys.stderr.write("server not ready, wait 3 sec to retry...\n") - sys.stderr.write("not ready endpoints:" + str(not_ready_endpoints) + - "\n") + sys.stderr.write( + "not ready endpoints:" + str(not_ready_endpoints) + "\n" + ) sys.stderr.flush() time.sleep(3) else: diff --git a/python/paddle/distributed/fleet/base/role_maker.py b/python/paddle/distributed/fleet/base/role_maker.py index a0b39c3b7a3729e006ece3e770a3c80cc85bc5c3..28dace611a092391556b12e68a28775a55b20ffb 100755 --- a/python/paddle/distributed/fleet/base/role_maker.py +++ b/python/paddle/distributed/fleet/base/role_maker.py @@ -20,7 +20,9 @@ from multiprocessing import Process, Manager import paddle import paddle.fluid as fluid -from paddle.distributed.fleet.base.private_helper_function import wait_server_ready +from paddle.distributed.fleet.base.private_helper_function import ( + wait_server_ready, +) __all__ = [] @@ -49,10 +51,13 @@ class Gloo(object): self._nodes_comm = None self._comm_world = ["worker", "server", "all"] - self._err_init = "gloo is not initialized, will not communicator with other nodes" + self._err_init = ( + "gloo is not initialized, will not communicator with other nodes" + ) self._err_type = "gloo initialized error, please check arguments" self._err_world = "argument error, comm_world must in {}".format( - self._comm_world) + self._comm_world + ) self._is_initialized = False self._init_timeout_seconds = 3600 @@ -67,14 +72,16 @@ class Gloo(object): self._server_num = -1 self._need_init_all = False - def init(self, - rendezvous, - role, - role_id, - worker_num, - server_num, - need_init_all=False, - kwargs=None): + def init( + self, + rendezvous, + role, + role_id, + worker_num, + server_num, + need_init_all=False, + kwargs=None, + ): self._rendezvous = rendezvous self._role = role @@ -110,8 +117,9 @@ class Gloo(object): if not ip or not port: raise ValueError(self._err_type) - http_server = self._init_http(ip, port, self._prefix, - start_http_server, http_server_d) + http_server = self._init_http( + ip, port, self._prefix, start_http_server, http_server_d + ) else: raise ValueError(self._err_type) @@ -119,15 +127,15 @@ class Gloo(object): self._http_server = http_server def _init_fs(self, fs_path, prefix): - def init(rank, nodes, role): gloo = fluid.core.Gloo() gloo.set_rank(rank) gloo.set_size(nodes) gloo.set_prefix(prefix) gloo.set_iface(self._iface) - gloo.set_timeout_seconds(self._init_timeout_seconds, - self._run_timeout_seconds) + gloo.set_timeout_seconds( + self._init_timeout_seconds, self._run_timeout_seconds + ) gloo.set_hdfs_store(os.path.join(fs_path, role), "", "") gloo.init() return gloo @@ -147,15 +155,15 @@ class Gloo(object): self._nodes_comm = gloo def _init_dfs(self, dfs_name, dfs_ugi, dfs_path, prefix): - def init(rank, nodes, role): gloo = fluid.core.Gloo() gloo.set_rank(rank) gloo.set_size(nodes) gloo.set_prefix(prefix) gloo.set_iface(self._iface) - gloo.set_timeout_seconds(self._init_timeout_seconds, - self._run_timeout_seconds) + gloo.set_timeout_seconds( + self._init_timeout_seconds, self._run_timeout_seconds + ) gloo.set_hdfs_store(os.path.join(dfs_path, role), dfs_name, dfs_ugi) gloo.init() return gloo @@ -175,15 +183,17 @@ class Gloo(object): self._nodes_comm = gloo def _init_http(self, ip, port, prefix, start_http_server, http_server_d): - def __start_kv_server(http_server_d, size_d): print("start http_server: {}, {}".format(port, size_d)) from paddle.distributed.fleet.utils.http_server import KVServer + http_server = KVServer(port, size_d) http_server.start() wait_seconds = 5 - while http_server_d.get("running", - False) or not http_server.should_stop(): + while ( + http_server_d.get("running", False) + or not http_server.should_stop() + ): time.sleep(wait_seconds) http_server.stop() @@ -196,8 +206,9 @@ class Gloo(object): http_server_d["running"] = True # child process for http server - _http_server = Process(target=__start_kv_server, - args=(http_server_d, size_d)) + _http_server = Process( + target=__start_kv_server, args=(http_server_d, size_d) + ) _http_server.daemon = True # set running status to True # start child process @@ -210,8 +221,9 @@ class Gloo(object): gloo.set_size(nodes) gloo.set_prefix(prefix) gloo.set_iface(self._iface) - gloo.set_timeout_seconds(self._init_timeout_seconds, - self._run_timeout_seconds) + gloo.set_timeout_seconds( + self._init_timeout_seconds, self._run_timeout_seconds + ) gloo.set_http_store(ip, port, 'worker') ep = ":".join([ip, str(port)]) wait_server_ready([ep]) @@ -285,8 +297,12 @@ class Gloo(object): gateway = None if len(item) > gateway_idx: gateway = item[gateway_idx] - if gateway and gateway != '*' and gateway != "0.0.0.0" and len( - item) > iface_idx: + if ( + gateway + and gateway != '*' + and gateway != "0.0.0.0" + and len(item) > iface_idx + ): return item[iface_idx] return "lo" @@ -294,8 +310,9 @@ class Gloo(object): """ get default physical interface """ - res = os.popen("ip -f inet addr | awk NR%3==1").read().strip().split( - "\n") + res = ( + os.popen("ip -f inet addr | awk NR%3==1").read().strip().split("\n") + ) for item in res: if "BROADCAST" in item: return item.split(":")[1].strip() @@ -470,8 +487,11 @@ class RoleMakerBase(object): def to_string(self): return "role: {}, current_id: {}, worker_endpoints: {}, server_endpoints: {}".format( - self._role, self._current_id, self._worker_endpoints, - self._server_endpoints) + self._role, + self._current_id, + self._worker_endpoints, + self._server_endpoints, + ) def _all_gather(self, input, comm_world="worker"): print("warning: RoleMakerBase does not have all gather worker.") @@ -493,13 +513,13 @@ class RoleMakerBase(object): """ print("warning: RoleMakerBase does not have barrier worker.") - #def _is_heter_worker(self): + # def _is_heter_worker(self): # """ # Return is_heter_worker() of current process # """ # raise NotImplementedError("Please implement this method in child class") - #def _heter_worker_num(self): + # def _heter_worker_num(self): # """ # Get current total heter-worker number. # @@ -508,14 +528,14 @@ class RoleMakerBase(object): # """ # raise NotImplementedError("Please implement this method in child class") - #def _get_heter_worker_endpoints(self): + # def _get_heter_worker_endpoints(self): # """ # Returns: # string: all heter_trainers'endpoints # """ # raise NotImplementedError("Please implement this method in child class") - #def _get_heter_worker_endpoint(self): + # def _get_heter_worker_endpoint(self): # """ # Returns: # int: corresponding heter_trainer's endpoint @@ -524,7 +544,6 @@ class RoleMakerBase(object): class PaddleCloudRoleMaker(RoleMakerBase): - def __init__(self, is_collective=False, **kwargs): super(PaddleCloudRoleMaker, self).__init__() self._is_collective = is_collective @@ -578,24 +597,24 @@ class PaddleCloudRoleMaker(RoleMakerBase): def _get_stage_id(self): """ - return stage id of current heter worker - """ + return stage id of current heter worker + """ if not self._role_is_generated: self._generate_role() return self._stage_id def _get_stage_trainers(self): """ - return trainer num of all stages - """ + return trainer num of all stages + """ if not self._role_is_generated: self._generate_role() return self._stage_trainers def _get_num_stage(self): """ - return stage num - """ + return stage num + """ if not self._role_is_generated: self._generate_role() return self._stage_num @@ -667,8 +686,11 @@ class PaddleCloudRoleMaker(RoleMakerBase): """ if not self._role_is_generated: self._generate_role() - return len(self._get_pserver_endpoints() - ) if self._get_pserver_endpoints() is not None else 0 + return ( + len(self._get_pserver_endpoints()) + if self._get_pserver_endpoints() is not None + else 0 + ) def _node_num(self): """ @@ -712,7 +734,9 @@ class PaddleCloudRoleMaker(RoleMakerBase): def _get_trainer_endpoint(self): if not self._role_is_generated: self._generate_role() - assert self._role == Role.WORKER, "get_trainer_endpoint should be called by trainer" + assert ( + self._role == Role.WORKER + ), "get_trainer_endpoint should be called by trainer" return self._cur_endpoint def _get_heter_worker_endpoints(self): @@ -722,7 +746,9 @@ class PaddleCloudRoleMaker(RoleMakerBase): """ if not self._role_is_generated: self._generate_role() - assert self._heter_trainer_endpoints != [], "Heter Worker Endpoints Not initialized" + assert ( + self._heter_trainer_endpoints != [] + ), "Heter Worker Endpoints Not initialized" return self._heter_trainer_endpoints def _get_heter_worker_endpoint(self): @@ -732,7 +758,9 @@ class PaddleCloudRoleMaker(RoleMakerBase): """ if not self._role_is_generated: self._generate_role() - assert self._role == Role.HETER_WORKER, "_get_heter_worker_endpoint should be invoked by heter worker" + assert ( + self._role == Role.HETER_WORKER + ), "_get_heter_worker_endpoint should be invoked by heter worker" return self._cur_endpoint def _get_pserver_endpoints(self): @@ -755,7 +783,8 @@ class PaddleCloudRoleMaker(RoleMakerBase): if not self._role_is_generated: self._generate_role() assert self._role in ( - Role.WORKER, Role.HETER_WORKER + Role.WORKER, + Role.HETER_WORKER, ), "_get_previous_trainers should be invoked by trainer or heter worker" return self._previous_heter_trainer_endpoints @@ -766,7 +795,8 @@ class PaddleCloudRoleMaker(RoleMakerBase): if not self._role_is_generated: self._generate_role() assert self._role in ( - Role.WORKER, Role.HETER_WORKER + Role.WORKER, + Role.HETER_WORKER, ), "_get_next_trainers should be invoked by trainer or heter worker" return self._next_heter_trainer_endpoints @@ -820,8 +850,9 @@ class PaddleCloudRoleMaker(RoleMakerBase): else: self._worker_endpoints = [] - self._coordinator_endpoints = os.getenv("PADDLE_COORDINATOR_ENDPOINTS", - "") + self._coordinator_endpoints = os.getenv( + "PADDLE_COORDINATOR_ENDPOINTS", "" + ) if self._coordinator_endpoints == "": print("fl-ps > coordinator address is null!") else: @@ -838,22 +869,31 @@ class PaddleCloudRoleMaker(RoleMakerBase): training_role = os.getenv("TRAINING_ROLE", None) if training_role == None: raise ValueError( - "Can not find TRAINING_ROLE, please check your environment.") + "Can not find TRAINING_ROLE, please check your environment." + ) if training_role not in [ - "TRAINER", "PSERVER", "HETER_TRAINER", "COORDINATOR" + "TRAINER", + "PSERVER", + "HETER_TRAINER", + "COORDINATOR", ]: raise ValueError( - "TRAINING_ROLE must be PSERVER or TRAINER or HETER_TRAINER or COORDINATOR, but get {}, please check your environment." - .format(training_role)) + "TRAINING_ROLE must be PSERVER or TRAINER or HETER_TRAINER or COORDINATOR, but get {}, please check your environment.".format( + training_role + ) + ) # For Heter Parameter Server env setting next_heter_trainer_eplist = os.getenv( - "PADDLE_NEXT_HETER_TRAINER_IP_PORT_LIST", "") + "PADDLE_NEXT_HETER_TRAINER_IP_PORT_LIST", "" + ) previous_heter_trainer_eplist = os.getenv( - "PADDLE_PREVIOUS_HETER_TRAINER_IP_PORT_LIST", "") + "PADDLE_PREVIOUS_HETER_TRAINER_IP_PORT_LIST", "" + ) all_heter_trainer_eplist = os.getenv( - "PADDLE_ALL_HETER_TRAINER_IP_PORT_LIST", "") + "PADDLE_ALL_HETER_TRAINER_IP_PORT_LIST", "" + ) if all_heter_trainer_eplist != "": self._heter_trainer_endpoints = all_heter_trainer_eplist.split(",") @@ -863,11 +903,13 @@ class PaddleCloudRoleMaker(RoleMakerBase): if previous_heter_trainer_eplist == "": assert training_role in ( "TRAINER", - "PSERVER"), "training_role should be trainer or pserver" + "PSERVER", + ), "training_role should be trainer or pserver" else: try: - self._previous_heter_trainer_endpoints = previous_heter_trainer_eplist.split( - ",") + self._previous_heter_trainer_endpoints = ( + previous_heter_trainer_eplist.split(",") + ) except: raise ValueError( "Can not Find PADDLE_PREVIOUS_HETER_TRAINER_IP_PORT_LIST in env or its format doesn't match the requirement: 'IP:PORT,IP:PORT' ." @@ -875,12 +917,14 @@ class PaddleCloudRoleMaker(RoleMakerBase): if next_heter_trainer_eplist == "": assert training_role in ( - "HETER_TRAINER", "PSERVER" + "HETER_TRAINER", + "PSERVER", ), "training_role should be heter trainer or pserver" else: try: - self._next_heter_trainer_endpoints = next_heter_trainer_eplist.split( - ",") + self._next_heter_trainer_endpoints = ( + next_heter_trainer_eplist.split(",") + ) except: raise ValueError( "Can not Find PADDLE_NEXT_HETER_TRAINER_IP_PORT_LIST in env or its format doesn't match the requirement: 'IP:PORT,IP:PORT' ." @@ -902,7 +946,8 @@ class PaddleCloudRoleMaker(RoleMakerBase): self._stage_id = os.getenv("STAGE_ID", None) if self._stage_id == None: raise ValueError( - "Can not find STAGE_ID, please check your environment.") + "Can not find STAGE_ID, please check your environment." + ) self._stage_id = int(self._stage_id) self._stage_num = os.getenv("STAGE_NUM", None) if self._stage_num == None: @@ -910,8 +955,9 @@ class PaddleCloudRoleMaker(RoleMakerBase): "Can not find STAGE_NUM, please check your environment." ) self._stage_num = int(self._stage_num) - self._stage_trainers = os.getenv("PADDLE_STAGE_TRAINERS_NUM", - None) + self._stage_trainers = os.getenv( + "PADDLE_STAGE_TRAINERS_NUM", None + ) if self._stage_trainers == None: raise ValueError( "Can not find PADDLE_STAGE_TRAINERS_NUM, please check your environment." @@ -920,11 +966,13 @@ class PaddleCloudRoleMaker(RoleMakerBase): cur_port = os.getenv("PADDLE_PORT", None) if cur_port == None: raise ValueError( - "Can not find PADDLE_PORT, please check your environment.") + "Can not find PADDLE_PORT, please check your environment." + ) cur_ip = os.getenv("POD_IP", None) if cur_ip == None: raise ValueError( - "Can not find POD_IP, please check your environment.") + "Can not find POD_IP, please check your environment." + ) curr_endpoint = ":".join([cur_ip, cur_port]) self._cur_endpoint = curr_endpoint elif training_role == "COORDINATOR": @@ -936,11 +984,13 @@ class PaddleCloudRoleMaker(RoleMakerBase): cur_port = os.getenv("PADDLE_PORT", None) if cur_port == None: raise ValueError( - "Can not find PADDLE_PORT, please check your environment.") + "Can not find PADDLE_PORT, please check your environment." + ) cur_ip = os.getenv("POD_IP", None) if cur_ip == None: raise ValueError( - "Can not find POD_IP, please check your environment.") + "Can not find POD_IP, please check your environment." + ) curr_endpoint = ":".join([cur_ip, cur_port]) self._cur_endpoint = curr_endpoint current_id = self._server_endpoints.index(self._cur_endpoint) @@ -949,12 +999,14 @@ class PaddleCloudRoleMaker(RoleMakerBase): self._stage_id = os.getenv("STAGE_ID", None) if self._stage_id == None: raise ValueError( - "Can not find STAGE_ID, please check your environment.") + "Can not find STAGE_ID, please check your environment." + ) self._stage_id = int(self._stage_id) self._stage_num = os.getenv("STAGE_NUM", None) if self._stage_num == None: raise ValueError( - "Can not find STAGE_NUM, please check your environment.") + "Can not find STAGE_NUM, please check your environment." + ) self._stage_num = int(self._stage_num) self._stage_trainers = os.getenv("PADDLE_STAGE_TRAINERS_NUM", None) @@ -964,47 +1016,57 @@ class PaddleCloudRoleMaker(RoleMakerBase): ) self._stage_trainers = eval(self._stage_trainers) - self._heter_trainer_device_type = os.getenv("HETER_DEVICE_TYPE", - None) + self._heter_trainer_device_type = os.getenv( + "HETER_DEVICE_TYPE", None + ) if self._heter_trainer_device_type == None: raise ValueError( "Can not find HETER_DEVICE_TYPE, please check your environment." ) assert self._heter_trainer_device_type in ( - "cpu", "gpu", - "xpu"), "HETER_DEVICE_TYPE should be cpu,gpu or xpu" + "cpu", + "gpu", + "xpu", + ), "HETER_DEVICE_TYPE should be cpu,gpu or xpu" if self._heter_trainer_device_type == "gpu": heter_device_id = os.getenv("FLAGS_selected_gpus", "0") self._heter_trainer_device = ":".join( - (self._heter_trainer_device_type, heter_device_id)) + (self._heter_trainer_device_type, heter_device_id) + ) if self._heter_trainer_device == "xpu": heter_device_id = os.getenv("FLAGS_selected_xpus", "0") self._heter_trainer_device = ":".join( - (self._heter_trainer_device_type, heter_device_id)) + (self._heter_trainer_device_type, heter_device_id) + ) cur_port = os.getenv("PADDLE_PORT", None) if cur_port == None: raise ValueError( - "Can not find PADDLE_PORT, please check your environment.") + "Can not find PADDLE_PORT, please check your environment." + ) cur_ip = os.getenv("POD_IP", None) if cur_ip == None: raise ValueError( - "Can not find POD_IP, please check your environment.") + "Can not find POD_IP, please check your environment." + ) curr_endpoint = ":".join([cur_ip, cur_port]) self._cur_endpoint = curr_endpoint - current_id = all_heter_trainer_eplist.split(",").index( - curr_endpoint) + trainers_num + current_id = ( + all_heter_trainer_eplist.split(",").index(curr_endpoint) + + trainers_num + ) self._trainers_num = trainers_num self._role = role self._current_id = current_id self._nodes_num = len( - set([x.split(':')[0] for x in self._worker_endpoints])) + set([x.split(':')[0] for x in self._worker_endpoints]) + ) def _collective_env(self): self._current_id = int(os.getenv("PADDLE_TRAINER_ID", "0")) self._training_role = os.getenv("PADDLE_TRAINING_ROLE", "TRAINER") - assert (self._training_role == "TRAINER") + assert self._training_role == "TRAINER" self._role = Role.WORKER self._worker_endpoints = os.getenv("PADDLE_TRAINER_ENDPOINTS") self._cur_endpoint = os.getenv("PADDLE_CURRENT_ENDPOINT") @@ -1016,7 +1078,8 @@ class PaddleCloudRoleMaker(RoleMakerBase): self._worker_endpoints = self._worker_endpoints.split(",") self._trainers_num = len(self._worker_endpoints) self._nodes_num = len( - set([x.split(':')[0] for x in self._worker_endpoints])) + set([x.split(':')[0] for x in self._worker_endpoints]) + ) self._local_rank = os.getenv("PADDLE_RANK_IN_NODE") self._local_device_ids = os.getenv("PADDLE_LOCAL_DEVICE_IDS") self._world_device_ids = os.getenv("PADDLE_WORLD_DEVICE_IDS") @@ -1031,7 +1094,9 @@ class PaddleCloudRoleMaker(RoleMakerBase): rendezvous_type = int(os.getenv("PADDLE_GLOO_RENDEZVOUS", "0")) prefix = os.getenv("SYS_JOB_ID", "") if rendezvous_type not in [ - Gloo.RENDEZVOUS.HDFS, Gloo.RENDEZVOUS.HTTP, Gloo.RENDEZVOUS.FILE + Gloo.RENDEZVOUS.HDFS, + Gloo.RENDEZVOUS.HTTP, + Gloo.RENDEZVOUS.FILE, ]: raise ValueError(self._gloo._err_type) @@ -1081,16 +1146,21 @@ class PaddleCloudRoleMaker(RoleMakerBase): type = "HTTP" else: type = "FILE" - print("Gloo init with {}: need_init_all: {}, args: {}".format( - type, need_init_all, kwargs)) - - self._gloo.init(rendezvous=rendezvous_type, - role=self._role, - role_id=self._role_id(), - worker_num=self._worker_num(), - server_num=self._server_num(), - need_init_all=need_init_all, - kwargs=kwargs) + print( + "Gloo init with {}: need_init_all: {}, args: {}".format( + type, need_init_all, kwargs + ) + ) + + self._gloo.init( + rendezvous=rendezvous_type, + role=self._role, + role_id=self._role_id(), + worker_num=self._worker_num(), + server_num=self._server_num(), + need_init_all=need_init_all, + kwargs=kwargs, + ) if rendezvous_type == Gloo.RENDEZVOUS.HTTP: http_server_d['running'] = False @@ -1110,11 +1180,10 @@ class PaddleCloudRoleMaker(RoleMakerBase): class UserDefinedRoleMaker(PaddleCloudRoleMaker): - def __init__(self, is_collective=False, init_gloo=False, **kwargs): - super(UserDefinedRoleMaker, self).__init__(is_collective=is_collective, - init_gloo=init_gloo, - **kwargs) + super(UserDefinedRoleMaker, self).__init__( + is_collective=is_collective, init_gloo=init_gloo, **kwargs + ) self._init_gloo = init_gloo def _user_defined_ps_env(self): @@ -1123,19 +1192,22 @@ class UserDefinedRoleMaker(PaddleCloudRoleMaker): self._trainers_num = self._kwargs.get("worker_num", 0) if self._trainers_num == 0: - assert (len(self._worker_endpoints) > 0) + assert len(self._worker_endpoints) > 0 self._trainers_num = len(self._worker_endpoints) self._role = self._kwargs.get("role") self._current_id = self._kwargs.get("current_id") - if self._role == Role.WORKER and len( - self._worker_endpoints) > self._current_id: + if ( + self._role == Role.WORKER + and len(self._worker_endpoints) > self._current_id + ): self._cur_endpoint = self._worker_endpoints[self._current_id] elif self._role == Role.SERVER: self._cur_endpoint = self._server_endpoints[self._current_id] self._nodes_num = len( - set([x.split(':')[0] for x in self._worker_endpoints])) + set([x.split(':')[0] for x in self._worker_endpoints]) + ) def _user_defined_collective_env(self): self._worker_endpoints = self._kwargs.get("worker_endpoints") @@ -1143,7 +1215,8 @@ class UserDefinedRoleMaker(PaddleCloudRoleMaker): self._trainers_num = len(self._worker_endpoints) self._training_role = Role.WORKER self._nodes_num = len( - set([x.split(':')[0] for x in self._worker_endpoints])) + set([x.split(':')[0] for x in self._worker_endpoints]) + ) def _generate_role(self): """ diff --git a/python/paddle/distributed/fleet/base/runtime_factory.py b/python/paddle/distributed/fleet/base/runtime_factory.py index 3fa7dbb285c4e03ab0fe96dc3d02a99f8050ad53..e9006409674a724727098c5ef04af016e741d376 100644 --- a/python/paddle/distributed/fleet/base/runtime_factory.py +++ b/python/paddle/distributed/fleet/base/runtime_factory.py @@ -18,7 +18,6 @@ __all__ = [] class RuntimeFactory(object): - def __init__(self): pass diff --git a/python/paddle/distributed/fleet/base/strategy_compiler.py b/python/paddle/distributed/fleet/base/strategy_compiler.py index fa9dc1e0cdc14b938c0bf16e2ea754c6b8f103c5..14db852f7708df7e1f489a47586fabb73f6c9018 100644 --- a/python/paddle/distributed/fleet/base/strategy_compiler.py +++ b/python/paddle/distributed/fleet/base/strategy_compiler.py @@ -107,7 +107,6 @@ def maximum_path_len_algo(optimizer_list): class StrategyCompilerBase(object): - def __init__(self): pass @@ -143,6 +142,7 @@ class StrategyCompiler(StrategyCompilerBase): def _get_valid_strategy(self, dist_strategy, can_not_apply_optimizer_list): import copy + valid_strategy = copy.deepcopy(dist_strategy) invalid_optimizers = [] for candidate in self._meta_optimizer_candidates: @@ -171,9 +171,15 @@ class StrategyCompiler(StrategyCompilerBase): we will remove grad fusion and sync batch-norm """ - def generate_optimizer(self, loss, role_maker, optimizer, - user_defined_strategy, meta_optimizer_list, - graph_optimizer_list): + def generate_optimizer( + self, + loss, + role_maker, + optimizer, + user_defined_strategy, + meta_optimizer_list, + graph_optimizer_list, + ): self._user_defined_strategy = user_defined_strategy self._meta_optimizer_candidates = meta_optimizer_list self._graph_optimizer_candidates = graph_optimizer_list @@ -190,12 +196,19 @@ class StrategyCompiler(StrategyCompilerBase): # and graph_optimizer, the corresponding distributed strategy # should be updated. - self._meta_optimizers = [] if meta_optimizers is None else meta_optimizers - self._graph_optimizers = [] if graph_optimizers is None else graph_optimizers + self._meta_optimizers = ( + [] if meta_optimizers is None else meta_optimizers + ) + self._graph_optimizers = ( + [] if graph_optimizers is None else graph_optimizers + ) - return_meta = None if meta_optimizers == None else meta_optimizers[0] - return_graph = None if graph_optimizers == None else graph_optimizers[ - 0] + return_meta = ( + None if meta_optimizers == None else meta_optimizers[0] + ) + return_graph = ( + None if graph_optimizers == None else graph_optimizers[0] + ) if meta_optimizers == None or graph_optimizers == None: return return_meta, return_graph @@ -205,7 +218,10 @@ class StrategyCompiler(StrategyCompilerBase): need_graph_opt = True for graph_opt in graph_optimizers: for program_opt in meta_optimizers: - if graph_opt.__class__.__name__ in program_opt.meta_optimizers_black_list: + if ( + graph_opt.__class__.__name__ + in program_opt.meta_optimizers_black_list + ): need_graph_opt = False if not need_graph_opt: return_graph = None diff --git a/python/paddle/distributed/fleet/base/strategy_group.py b/python/paddle/distributed/fleet/base/strategy_group.py index 94ab04b6c4e9391e8570135e934b1a5f70609480..5b0ac3cfe0e40e7b8a6513ef367f1737e30e7bd8 100644 --- a/python/paddle/distributed/fleet/base/strategy_group.py +++ b/python/paddle/distributed/fleet/base/strategy_group.py @@ -15,7 +15,7 @@ import paddle.distributed as dist -class StrategyGroupBase(): +class StrategyGroupBase: """ The base class of communication group with distributed strategy. @@ -39,7 +39,8 @@ class StrategyGroupBase(): """ def __init__(self, list_of_ranks): - assert dist.is_initialized( + assert ( + dist.is_initialized() ), "The global communication group need to be initialized." assert len(list_of_ranks), "The list_of_ranks can not be empty." self._rank = dist.get_rank() @@ -57,8 +58,9 @@ class StrategyGroupBase(): world_size_list = [] for ranks in self._list_of_ranks: world_size_list.append(len(ranks)) - is_value = all(world_size == world_size_list[0] - for world_size in world_size_list) + is_value = all( + world_size == world_size_list[0] for world_size in world_size_list + ) return world_size_list[0] if is_value else world_size_list @property @@ -77,10 +79,11 @@ class StrategyGroupBase(): group = dist.new_group(ranks=ranks) if self._rank in ranks: list_of_group.append(group) - assert len( - list_of_group - ) > 0, "Rank {} does not belong to the list_of_ranks {}.".format( - self._rank, self._list_of_ranks) + assert ( + len(list_of_group) > 0 + ), "Rank {} does not belong to the list_of_ranks {}.".format( + self._rank, self._list_of_ranks + ) return list_of_group if len(list_of_group) > 1 else list_of_group[0] @@ -99,8 +102,8 @@ class DPGroup(StrategyGroupBase): def __init__(self, list_of_ranks): super(DPGroup, self).__init__(list_of_ranks) assert not isinstance( - self.group, list), "Rank {} belongs to multi dp groups".format( - self._rank) + self.group, list + ), "Rank {} belongs to multi dp groups".format(self._rank) class MPGroup(StrategyGroupBase): @@ -118,8 +121,8 @@ class MPGroup(StrategyGroupBase): def __init__(self, list_of_ranks): super(MPGroup, self).__init__(list_of_ranks) assert not isinstance( - self.group, list), "Rank {} belongs to multi mp groups".format( - self._rank) + self.group, list + ), "Rank {} belongs to multi mp groups".format(self._rank) class ShardingGroup(StrategyGroupBase): @@ -137,8 +140,8 @@ class ShardingGroup(StrategyGroupBase): def __init__(self, list_of_ranks): super(ShardingGroup, self).__init__(list_of_ranks) assert not isinstance( - self.group, - list), "Rank {} belongs to multi sharding groups".format(self._rank) + self.group, list + ), "Rank {} belongs to multi sharding groups".format(self._rank) class PPGroup(StrategyGroupBase): @@ -156,8 +159,8 @@ class PPGroup(StrategyGroupBase): def __init__(self, list_of_ranks): super(PPGroup, self).__init__(list_of_ranks) assert not isinstance( - self.group, list), "Rank {} belongs to multi pp groups".format( - self._rank) + self.group, list + ), "Rank {} belongs to multi pp groups".format(self._rank) self._send_next_group = None self._send_prev_group = None @@ -197,7 +200,12 @@ class PPGroup(StrategyGroupBase): Returns: Four subgroups including send/recv to/from prev/next. """ - return self._send_next_group, self._send_prev_group, self._recv_next_group, self._recv_prev_group + return ( + self._send_next_group, + self._send_prev_group, + self._recv_next_group, + self._recv_prev_group, + ) def _create_p2p_group(self): degree = self.world_size @@ -223,5 +231,11 @@ class PPGroup(StrategyGroupBase): elif self._rank == prev_rank: self._recv_next_group = prev_group - assert self._send_next_group and self._send_prev_group and self._recv_next_group and self._recv_prev_group,\ - "Error occurs while creating p2p group for rank {}.".format(self._rank) + assert ( + self._send_next_group + and self._send_prev_group + and self._recv_next_group + and self._recv_prev_group + ), "Error occurs while creating p2p group for rank {}.".format( + self._rank + ) diff --git a/python/paddle/distributed/fleet/base/topology.py b/python/paddle/distributed/fleet/base/topology.py index d679894d3e208ebe10be376a6570d50b74d146ed..6fa1521d58c46427d7ae8691656b5f935a4d49f5 100644 --- a/python/paddle/distributed/fleet/base/topology.py +++ b/python/paddle/distributed/fleet/base/topology.py @@ -40,6 +40,7 @@ class ParallelMode(object): print(parallel_mode.DATA_PARALLEL) # 0 """ + DATA_PARALLEL = 0 TENSOR_PARALLEL = 1 PIPELINE_PARALLEL = 2 @@ -47,14 +48,16 @@ class ParallelMode(object): class CommunicateTopology(object): - - def __init__(self, - hybrid_group_names=["data", "pipe", "sharding", "model"], - dims=[1, 1, 1, 1]): + def __init__( + self, + hybrid_group_names=["data", "pipe", "sharding", "model"], + dims=[1, 1, 1, 1], + ): self._parallel_names = hybrid_group_names self._dims = dims - self.coordinate = collections.namedtuple('Coordinate', - self._parallel_names) + self.coordinate = collections.namedtuple( + 'Coordinate', self._parallel_names + ) self._world_size = reduce(lambda x, y: x * y, self._dims) ranges = [range(d) for d in self._dims] @@ -62,7 +65,8 @@ class CommunicateTopology(object): self._coord2rank = dict(zip(all_coordinate, range(len(all_coordinate)))) self._rank2coord = dict( - zip(self._coord2rank.values(), self._coord2rank.keys())) + zip(self._coord2rank.values(), self._coord2rank.keys()) + ) def get_hybrid_group_names(self): return self._parallel_names @@ -87,7 +91,8 @@ class CommunicateTopology(object): def get_axis_list(self, axis_name, index): axis = self._parallel_names.index(axis_name) ranks = [ - self._coord2rank[coord] for coord in self._coord2rank.keys() + self._coord2rank[coord] + for coord in self._coord2rank.keys() if coord[axis] == index ] ranks.sort() @@ -129,7 +134,6 @@ class CommunicateTopology(object): class HybridCommunicateGroup(object): - def __init__(self, topology): self.nranks = paddle.distributed.get_world_size() self.global_rank = paddle.distributed.get_rank() @@ -145,10 +149,16 @@ class HybridCommunicateGroup(object): self._sharding_parallel_id = self._get_sharding_parallel_id() self.stage_id = self._get_pipe_parallel_id() - assert self._check_vaild_topo( - ), "Here is an unreasonable topogy setting. world_size: {}, but" \ - "mp_num: {}, sharding_num: {}, pp_num: {}, dp_num: {}".format(self.nranks, - self._mp_degree, self._sharding_degree, self._pp_degree, self._dp_degree) + assert self._check_vaild_topo(), ( + "Here is an unreasonable topogy setting. world_size: {}, but" + "mp_num: {}, sharding_num: {}, pp_num: {}, dp_num: {}".format( + self.nranks, + self._mp_degree, + self._sharding_degree, + self._pp_degree, + self._dp_degree, + ) + ) # create comm group for data parallel self._dp_group, self._dp_comm_group = self._set_comm_group("data") @@ -161,26 +171,43 @@ class HybridCommunicateGroup(object): # create comm group for sharding parallel self._sharding_group, self._sharding_comm_group = self._set_comm_group( - "sharding") + "sharding" + ) # create global group for check inf_nan / clip global norm self._check_group, self._check_comm_group = self._set_check_group( - "data") + "data" + ) # create p2p group - self.is_first_stage = (self.stage_id == 0) - self.is_last_stage = (self.stage_id == (self._pp_degree - 1)) + self.is_first_stage = self.stage_id == 0 + self.is_last_stage = self.stage_id == (self._pp_degree - 1) # create p2p_groups if self._pp_degree > 1: self._set_p2p_group() - debug_str = "HybridParallelInfo: rank_id: %d, mp_degree: %d, " \ - "sharding_degree: %d, pp_degree: %d, dp_degree: %d" % (self.global_rank, self._mp_degree, - self._sharding_degree, self._pp_degree, self._dp_degree) - debug_str += ", mp_group: %s, sharding_group: %s, pp_group: %s, dp_group: %s, check/clip group: %s" % ( - self._mp_group, self._sharding_group, self._pp_group, - self._dp_group, self._check_group) + debug_str = ( + "HybridParallelInfo: rank_id: %d, mp_degree: %d, " + "sharding_degree: %d, pp_degree: %d, dp_degree: %d" + % ( + self.global_rank, + self._mp_degree, + self._sharding_degree, + self._pp_degree, + self._dp_degree, + ) + ) + debug_str += ( + ", mp_group: %s, sharding_group: %s, pp_group: %s, dp_group: %s, check/clip group: %s" + % ( + self._mp_group, + self._sharding_group, + self._pp_group, + self._dp_group, + self._check_group, + ) + ) logger.info(debug_str) global _HYBRID_PARALLEL_GROUP @@ -192,7 +219,12 @@ class HybridCommunicateGroup(object): # adding its parallel logic within that parallelism # when use sharding alone, it should have its own parallelism for its parallel logic # TODO modify 3 others parallel to support sharding - if self._mp_degree == 1 and self._pp_degree == 1 and self._dp_degree == 1 and self._sharding_degree > 1: + if ( + self._mp_degree == 1 + and self._pp_degree == 1 + and self._dp_degree == 1 + and self._sharding_degree > 1 + ): return ParallelMode.SHARDING_PARALLEL elif self._mp_degree == 1 and self._pp_degree == 1: return ParallelMode.DATA_PARALLEL @@ -203,7 +235,13 @@ class HybridCommunicateGroup(object): return ParallelMode.PIPELINE_PARALLEL def _check_vaild_topo(self): - return self._dp_degree * self._mp_degree * self._pp_degree * self._sharding_degree == self.nranks + return ( + self._dp_degree + * self._mp_degree + * self._pp_degree + * self._sharding_degree + == self.nranks + ) def _set_comm_group(self, parallel_method="data"): parallel_group = [] @@ -265,14 +303,16 @@ class HybridCommunicateGroup(object): self.prev_rank = prev_rank next_group = paddle.distributed.new_group( - ranks=[curr_rank, next_rank]) + ranks=[curr_rank, next_rank] + ) if self.global_rank == curr_rank: self.send_next_group = next_group elif self.global_rank == next_rank: self.recv_prev_group = next_group prev_group = paddle.distributed.new_group( - ranks=[prev_rank, curr_rank]) + ranks=[prev_rank, curr_rank] + ) if self.global_rank == curr_rank: self.send_prev_group = prev_group @@ -336,7 +376,12 @@ class HybridCommunicateGroup(object): return self._pp_comm_group def get_p2p_groups(self): - return self.send_next_group, self.send_prev_group, self.recv_next_group, self.recv_prev_group + return ( + self.send_next_group, + self.send_prev_group, + self.recv_next_group, + self.recv_prev_group, + ) # sharding parallel message: def _get_sharding_parallel_id(self): @@ -360,23 +405,25 @@ class HybridCommunicateGroup(object): return self._check_comm_group def get_rank_from_stage(self, stage_id, **kwargs): - return self._topo.get_rank_from_stage(self.global_rank, - pipe=stage_id, - **kwargs) + return self._topo.get_rank_from_stage( + self.global_rank, pipe=stage_id, **kwargs + ) class _CommunicateGroup(object): - """ tmp for static """ + """tmp for static""" def __init__(self): global _HYBRID_PARALLEL_GROUP _HYBRID_PARALLEL_GROUP = self self.groups = dict() - def set_comm_group(self, group_name, group_rank, group_size, ring_id, - group_ranks): - group = paddle.distributed.collective.Group(group_rank, ring_id, - group_ranks) + def set_comm_group( + self, group_name, group_rank, group_size, ring_id, group_ranks + ): + group = paddle.distributed.collective.Group( + group_rank, ring_id, group_ranks + ) self.groups[group_name] = group def get_group(self, group_name): diff --git a/python/paddle/distributed/fleet/base/util_factory.py b/python/paddle/distributed/fleet/base/util_factory.py index 95d287811e6cbe2dd28da8144742eac8e99e4730..5af1acb9413586ca5fd8ce1d07858f6f6eee30cb 100755 --- a/python/paddle/distributed/fleet/base/util_factory.py +++ b/python/paddle/distributed/fleet/base/util_factory.py @@ -32,7 +32,6 @@ __all__ = [] class UtilFactory(object): - def _create_util(self, context=None): util = UtilBase() if context is not None and "valid_strategy" in context: @@ -43,7 +42,6 @@ class UtilFactory(object): class UtilBase(object): - def __init__(self): self.role_maker = None self.dist_strategy = None @@ -221,7 +219,7 @@ class UtilBase(object): trainer_files = [[]] * trainers begin = 0 for i in range(trainers): - trainer_files[i] = files[begin:begin + blocks[i]] + trainer_files[i] = files[begin : begin + blocks[i]] begin += blocks[i] return trainer_files[trainer_id] @@ -279,7 +277,7 @@ class UtilBase(object): trainer_files = [[]] * trainers begin = 0 for i in range(trainers): - trainer_files[i] = files[begin:begin + blocks[i]] + trainer_files[i] = files[begin : begin + blocks[i]] begin += blocks[i] return trainer_files[trainer_id] @@ -323,7 +321,6 @@ class UtilBase(object): f.write(program.desc.serialize_to_string()) def _load_program(self, path, is_text): - def load_program_binary(path): """load program from binary string file""" with open(path, "rb") as f: @@ -347,8 +344,9 @@ class UtilBase(object): def _program_type_trans(self, prog_dir, prog_fn, is_text): prog = self._load_program(os.path.join(prog_dir, prog_fn), is_text) prog_out_fn = prog_fn + ".bin" if is_text else prog_fn + ".pbtxt" - self._save_program(prog, os.path.join(prog_dir, prog_out_fn), - 1 - is_text) + self._save_program( + prog, os.path.join(prog_dir, prog_out_fn), 1 - is_text + ) return prog_out_fn def _visualize_graphviz(self, program, output_dir, output_filename): @@ -357,29 +355,37 @@ class UtilBase(object): pdf_path = os.path.join(output_dir, output_filename + '.pdf') debugger.draw_block_graphviz(block, path=dot_path) cmd = ["dot", "-Tpdf", dot_path, "-o", pdf_path] - p = subprocess.Popen(cmd, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + p = subprocess.Popen( + cmd, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) p.wait() def _proto_check(self, config): - train_prog = self._load_program(config.train_prog_path, - config.is_text_train_program) - pruned_prog = self._load_program(config.pruned_prog_path, - config.is_text_pruned_program) + train_prog = self._load_program( + config.train_prog_path, config.is_text_train_program + ) + pruned_prog = self._load_program( + config.pruned_prog_path, config.is_text_pruned_program + ) is_match = True - pruned_vars = [(v.name, v) for v in pruned_prog.list_vars() - if fluid.io.is_persistable(v)] + pruned_vars = [ + (v.name, v) + for v in pruned_prog.list_vars() + if fluid.io.is_persistable(v) + ] pruned_vars = OrderedDict(pruned_vars) pruned_vars_name = [name for name in pruned_vars] print("persistable vars in pruned program: {}".format(pruned_vars_name)) # feed and fetch op is added in pruned program when pruning, not need to be found in train program feed_fetch_type_list = [ - core.VarDesc.VarType.FEED_MINIBATCH, core.VarDesc.VarType.FETCH_LIST + core.VarDesc.VarType.FEED_MINIBATCH, + core.VarDesc.VarType.FETCH_LIST, ] for var_name in pruned_vars: @@ -392,21 +398,28 @@ class UtilBase(object): except ValueError as e: print( "Not find variable '%s' in train program. please check pruning." - % var_name) + % var_name + ) is_match = False continue - if var.shape != train_prog_var.shape or var.dtype != train_prog_var.dtype: + if ( + var.shape != train_prog_var.shape + or var.dtype != train_prog_var.dtype + ): print( - "variable: {} not match. in pruned program shape: {} dtype:{}, in train program shape: {} dtype: {}" - .format(var_name, var.shape, var.dtype, - train_prog_var.shape, train_prog_var.dtype)) + "variable: {} not match. in pruned program shape: {} dtype:{}, in train program shape: {} dtype: {}".format( + var_name, + var.shape, + var.dtype, + train_prog_var.shape, + train_prog_var.dtype, + ) + ) is_match = False return is_match def _params_check(self, config): - def feed_gen(batch_size, feeded_vars_dims, feeded_vars_filelist): - def reader(batch_size, fn, dim): data = [] if isinstance(dim, list) or isinstance(dim, tuple): @@ -437,39 +450,57 @@ class UtilBase(object): prog = self._load_program( os.path.join(config.dump_model_dir, config.dump_program_filename), - config.is_text_dump_program) + config.is_text_dump_program, + ) if config.is_text_dump_program: model_filename = self._program_type_trans( - config.dump_model_dir, config.dump_program_filename, - config.is_text_dump_program) + config.dump_model_dir, + config.dump_program_filename, + config.is_text_dump_program, + ) saved_params = [ v for v in prog.list_vars() if fluid.io.is_persistable(v) ] - print("persistable vars in dump program: {}".format( - [v.name for v in saved_params])) + print( + "persistable vars in dump program: {}".format( + [v.name for v in saved_params] + ) + ) def check_not_expected_ops(prog, not_expected_op_types): op_types_set = set() for op in prog.global_block().ops: - if op.type in not_expected_op_types and op.type not in op_types_set: + if ( + op.type in not_expected_op_types + and op.type not in op_types_set + ): op_types_set.add(op.type) return op_types_set not_expected_op_types = check_not_expected_ops(prog, ["lookup_table"]) if len(not_expected_op_types) > 0: print( - "find op type '{}' in program, please check if your program is pruned correctly !" - .format(list(not_expected_op_types))) + "find op type '{}' in program, please check if your program is pruned correctly !".format( + list(not_expected_op_types) + ) + ) return False place = fluid.CPUPlace() exe = fluid.Executor(place) scope = fluid.core.Scope() with fluid.scope_guard(scope): - inference_program, feed_target_names, fetch_targets = \ - fluid.io.load_inference_model(config.dump_model_dir, exe, model_filename=model_filename, - params_filename=config.save_params_filename) + ( + inference_program, + feed_target_names, + fetch_targets, + ) = fluid.io.load_inference_model( + config.dump_model_dir, + exe, + model_filename=model_filename, + params_filename=config.save_params_filename, + ) # check program vars and saved vars shape orig_para_shape = { @@ -480,13 +511,17 @@ class UtilBase(object): var_temp = fluid.global_scope().find_var(each_var.name) assert var_temp != None, "can't not find var: " + each_var.name new_shape = (np.array(var_temp.get_tensor())).shape - assert each_var.name in orig_para_shape, each_var.name + "MUST in var list" + assert each_var.name in orig_para_shape, ( + each_var.name + "MUST in var list" + ) orig_shape = orig_para_shape.get(each_var.name) if new_shape != orig_shape: raise RuntimeError( "Shape not matching: the Program requires a parameter with a shape of ({}), " - "while the loaded parameter (namely [ {} ]) has a shape of ({})." - .format(orig_shape, each_var.name, new_shape)) + "while the loaded parameter (namely [ {} ]) has a shape of ({}).".format( + orig_shape, each_var.name, new_shape + ) + ) # check feed/fetch vars in program and config feed_config = config.feed_config @@ -498,10 +533,15 @@ class UtilBase(object): print("warning! no fetch targets in program.") fetch_list = fetch_targets feed_name_list = feed_target_names - if feed_config.feeded_vars_names is not None and feed_target_names != feed_config.feeded_vars_names: + if ( + feed_config.feeded_vars_names is not None + and feed_target_names != feed_config.feeded_vars_names + ): print( - "warning! feed vars in program and config are diff: feed in program: {}. feed in config {}." - .format(feed_target_names, feed_config.feeded_vars_names)) + "warning! feed vars in program and config are diff: feed in program: {}. feed in config {}.".format( + feed_target_names, feed_config.feeded_vars_names + ) + ) feed_name_list = feed_config.feeded_vars_names # remove feed op in inference_program. new feed op will be added in exe.run global_block = inference_program.global_block() @@ -512,10 +552,15 @@ class UtilBase(object): need_to_remove_op_index.append(i) for index in need_to_remove_op_index[::-1]: global_block._remove_op(index) - if fetch_config.fetch_vars_names is not None and fetch_targets_names != fetch_config.fetch_vars_names: + if ( + fetch_config.fetch_vars_names is not None + and fetch_targets_names != fetch_config.fetch_vars_names + ): print( - "warning! fetch vars in program and config are diff: fetch in program: {}. fetch in config {}." - .format(fetch_targets_names, fetch_config.fetch_vars_names)) + "warning! fetch vars in program and config are diff: fetch in program: {}. fetch in config {}.".format( + fetch_targets_names, fetch_config.fetch_vars_names + ) + ) fetch_list = [ inference_program.global_block().var(i) for i in fetch_config.fetch_vars_names @@ -535,75 +580,105 @@ class UtilBase(object): # try dump fetch_targets feed_tensors = [] - assert len(feed_config.feeded_vars_names) == len( - feed_config.feeded_vars_dims) == len( - feed_config.feeded_vars_types) + assert ( + len(feed_config.feeded_vars_names) + == len(feed_config.feeded_vars_dims) + == len(feed_config.feeded_vars_types) + ) # check program vars and feed tensor shape in config for i in range(len(feed_config.feeded_vars_names)): var = inference_program.global_block().var( - feed_config.feeded_vars_names[i]) - if not isinstance(feed_config.feeded_vars_dims[i], - (list, tuple)): - tensor_shape = (feed_config.feeded_vars_dims[i], ) + feed_config.feeded_vars_names[i] + ) + if not isinstance( + feed_config.feeded_vars_dims[i], (list, tuple) + ): + tensor_shape = (feed_config.feeded_vars_dims[i],) else: tensor_shape = tuple(feed_config.feeded_vars_dims[i]) feed_config.feeded_vars_dims[i] = tensor_shape var_shape = var.shape[1:] if tensor_shape != var_shape: raise RuntimeError( - "feed variable '{}' shape not match. infer program shape: {}. feed tensor shape: {}" - .format(feed_config.feeded_vars_names[i], var_shape, - tensor_shape)) + "feed variable '{}' shape not match. infer program shape: {}. feed tensor shape: {}".format( + feed_config.feeded_vars_names[i], + var_shape, + tensor_shape, + ) + ) if not feed_config.feeded_vars_filelist: print("generate random feed vars.") for i in range(len(feed_config.feeded_vars_names)): var = inference_program.global_block().var( - feed_config.feeded_vars_names[i]) + feed_config.feeded_vars_names[i] + ) # create fake feed tensor. if lod_level > 1, should create_lod_tensor() if var.lod_level == 0: feed_tensors.append( - np.array(np.random.random( - tuple([config.batch_size] + - list(feed_config.feeded_vars_dims[i]))), - dtype=feed_config.feeded_vars_types[i])) + np.array( + np.random.random( + tuple( + [config.batch_size] + + list(feed_config.feeded_vars_dims[i]) + ) + ), + dtype=feed_config.feeded_vars_types[i], + ) + ) elif var.lod_level == 1: - t = np.array(np.random.random( - tuple([config.batch_size] + - list(feed_config.feeded_vars_dims[i]))), - dtype=feed_config.feeded_vars_types[i]) + t = np.array( + np.random.random( + tuple( + [config.batch_size] + + list(feed_config.feeded_vars_dims[i]) + ) + ), + dtype=feed_config.feeded_vars_types[i], + ) feed_tensors.append( - fluid.create_lod_tensor(t, - [[1] * config.batch_size], - place)) + fluid.create_lod_tensor( + t, [[1] * config.batch_size], place + ) + ) else: raise RuntimeError( "vars with lod_level >= 2 is not supported now in this infer program check tool." ) - results = exe.run(inference_program, - feed={ - name: feed_tensors[i] - for i, name in enumerate(feed_name_list) - }, - fetch_list=fetch_list, - return_numpy=return_numpy) + results = exe.run( + inference_program, + feed={ + name: feed_tensors[i] + for i, name in enumerate(feed_name_list) + }, + fetch_list=fetch_list, + return_numpy=return_numpy, + ) else: - print("load feed vars from files: {}.".format( - feed_config.feeded_vars_filelist)) + print( + "load feed vars from files: {}.".format( + feed_config.feeded_vars_filelist + ) + ) feed_vars = [ inference_program.global_block().var( - feed_config.feeded_vars_names[i]) + feed_config.feeded_vars_names[i] + ) for i in range(len(feed_config.feeded_vars_names)) ] feeder = fluid.DataFeeder(feed_list=feed_vars, place=place) - batch_feed = feed_gen(config.batch_size, - feed_config.feeded_vars_dims, - feed_config.feeded_vars_filelist) + batch_feed = feed_gen( + config.batch_size, + feed_config.feeded_vars_dims, + feed_config.feeded_vars_filelist, + ) slots = [batch_feed] - results = exe.run(inference_program, - feed=feeder.feed(slots), - fetch_list=fetch_list, - return_numpy=return_numpy) + results = exe.run( + inference_program, + feed=feeder.feed(slots), + fetch_list=fetch_list, + return_numpy=return_numpy, + ) for i, v in enumerate(fetch_list): print("fetch_targets name: %s" % v.name) print("fetch_targets: {}".format(results[i])) diff --git a/python/paddle/distributed/fleet/cloud_utils.py b/python/paddle/distributed/fleet/cloud_utils.py index cfd8a9ff4e2e776dfdb9e098e1d7c3040f569f78..cf74f8446705f0c5560b364ba5e8343484be1a7b 100644 --- a/python/paddle/distributed/fleet/cloud_utils.py +++ b/python/paddle/distributed/fleet/cloud_utils.py @@ -18,14 +18,13 @@ from paddle.distributed.fleet.launch_utils import get_cluster, logger __all__ = [] -def get_cloud_cluster(args_node_ips, - device_mode, - devices_per_proc, - args_port=6170): +def get_cloud_cluster( + args_node_ips, device_mode, devices_per_proc, args_port=6170 +): """ args_node_ips:string, device_mode:DeviceMode(Int), device_per_proc:list, args_port: int """ - #you can automatically get ip info while using paddlecloud multi nodes mode. + # you can automatically get ip info while using paddlecloud multi nodes mode. node_ips = os.getenv("PADDLE_TRAINERS") assert node_ips is not None, "PADDLE_TRAINERS should not be None" @@ -47,7 +46,10 @@ def get_cloud_cluster(args_node_ips, "Please NOTE: When using paddlecloud, cluster_node_ips is \ automatically got from PADDLE_TRAINERS(multi nodes) or POD_IP(single node).\ Your input cluster_node_ips: {} doesn't equals to IPs: {} from \ -paddlecloud environment.".format(args_node_ips, node_ips)) +paddlecloud environment.".format( + args_node_ips, node_ips + ) + ) # DISTRIBUTED_TRAINER_ENDPOINTS: new environment since paddlecloud 1.8.4 # e.g: DISTRIBUTED_TRAINER_ENDPOINTS="ip1:port1,ip1:port2,ip1:port3,ip1:port4,ip2:port5,ip2:port6,ip2:port7,ip2:port8" @@ -58,10 +60,13 @@ paddlecloud environment.".format(args_node_ips, node_ips)) try: paddle_port = int(os.getenv("PADDLE_PORT", "")) - if paddle_ports_num >= len( - devices_per_proc) and paddle_port != args_port: + if ( + paddle_ports_num >= len(devices_per_proc) + and paddle_port != args_port + ): logger.warning( - "Use Cloud specified port:{}.".format(paddle_port)) + "Use Cloud specified port:{}.".format(paddle_port) + ) started_port = paddle_port except Exception as e: @@ -81,15 +86,21 @@ paddlecloud environment.".format(args_node_ips, node_ips)) assert num_nodes * paddle_ports_num == len(trainer_endpoints_ori) for i in range(num_nodes): trainer_endpoints.append( - trainer_endpoints_ori[i * paddle_ports_num:(i + 1) * - paddle_ports_num]) + trainer_endpoints_ori[ + i * paddle_ports_num : (i + 1) * paddle_ports_num + ] + ) - logger.debug("parsed from args: node_ips:{} \ + logger.debug( + "parsed from args: node_ips:{} \ node_ip:{} node_rank:{} trainer_endpoints:{}".format( - node_ips, node_ip, node_rank, trainer_endpoints)) + node_ips, node_ip, node_rank, trainer_endpoints + ) + ) - cluster, pod = get_cluster(node_ips, node_ip, trainer_endpoints, - device_mode, devices_per_proc) + cluster, pod = get_cluster( + node_ips, node_ip, trainer_endpoints, device_mode, devices_per_proc + ) return cluster, cluster.pods[node_rank] @@ -98,7 +109,12 @@ def use_paddlecloud(): node_ip = os.getenv("POD_IP") node_rank = os.getenv("PADDLE_TRAINER_ID") paddle_ports_num = os.getenv("TRAINER_PORTS_NUM") - if node_ips is None or node_ip is None or node_rank is None or paddle_ports_num is None: + if ( + node_ips is None + or node_ip is None + or node_rank is None + or paddle_ports_num is None + ): return False else: return True diff --git a/python/paddle/distributed/fleet/data_generator/data_generator.py b/python/paddle/distributed/fleet/data_generator/data_generator.py index d43c376bb0c01ceddccdac14e38f76d51a121c74..9e9ec7b61bc4648d5415f38ae4c836dba11d2941 100644 --- a/python/paddle/distributed/fleet/data_generator/data_generator.py +++ b/python/paddle/distributed/fleet/data_generator/data_generator.py @@ -147,7 +147,8 @@ class DataGenerator(object): Return a string data that can be read directly by the datafeed. ''' raise NotImplementedError( - "pls use MultiSlotDataGenerator or PairWiseDataGenerator") + "pls use MultiSlotDataGenerator or PairWiseDataGenerator" + ) def generate_sample(self, line): ''' @@ -187,8 +188,9 @@ class DataGenerator(object): ''' raise NotImplementedError( - "Please rewrite this function to return a list or tuple: " + - "[(name, [feasign, ...]), ...] or ((name, [feasign, ...]), ...)") + "Please rewrite this function to return a list or tuple: " + + "[(name, [feasign, ...]), ...] or ((name, [feasign, ...]), ...)" + ) def generate_batch(self, samples): ''' @@ -236,7 +238,6 @@ class DataGenerator(object): # add more generalized DataGenerator that can adapt user-defined slot # for example, [(name, float_list), (name, str_list), (name, int_list)] class MultiSlotStringDataGenerator(DataGenerator): - def _gen_str(self, line): ''' Further processing the output of the process() function rewritten by @@ -267,7 +268,8 @@ class MultiSlotStringDataGenerator(DataGenerator): if not isinstance(line, list) and not isinstance(line, tuple): raise ValueError( "the output of process() must be in list or tuple type" - "Examples: [('words', ['1926', '08', '17']), ('label', ['1'])]") + "Examples: [('words', ['1926', '08', '17']), ('label', ['1'])]" + ) output = "" for index, item in enumerate(line): name, elements = item @@ -281,7 +283,6 @@ class MultiSlotStringDataGenerator(DataGenerator): class MultiSlotDataGenerator(DataGenerator): - def _gen_str(self, line): ''' Further processing the output of the process() function rewritten by @@ -316,7 +317,8 @@ class MultiSlotDataGenerator(DataGenerator): if not isinstance(line, list) and not isinstance(line, tuple): raise ValueError( "the output of process() must be in list or tuple type" - "Example: [('words', [1926, 08, 17]), ('label', [1])]") + "Example: [('words', [1926, 08, 17]), ('label', [1])]" + ) output = "" if self._proto_info is None: @@ -326,8 +328,9 @@ class MultiSlotDataGenerator(DataGenerator): if not isinstance(name, str): raise ValueError("name%s must be in str type" % type(name)) if not isinstance(elements, list): - raise ValueError("elements%s must be in list type" % - type(elements)) + raise ValueError( + "elements%s must be in list type" % type(elements) + ) if not elements: raise ValueError( "the elements of each field can not be empty, you need padding it in process()." @@ -340,10 +343,12 @@ class MultiSlotDataGenerator(DataGenerator): if isinstance(elem, float): self._proto_info[-1] = (name, "float") elif not isinstance(elem, int) and not isinstance( - elem, long): + elem, long + ): raise ValueError( - "the type of element%s must be in int or float" % - type(elem)) + "the type of element%s must be in int or float" + % type(elem) + ) output += " " + str(elem) else: if len(line) != len(self._proto_info): @@ -355,8 +360,9 @@ class MultiSlotDataGenerator(DataGenerator): if not isinstance(name, str): raise ValueError("name%s must be in str type" % type(name)) if not isinstance(elements, list): - raise ValueError("elements%s must be in list type" % - type(elements)) + raise ValueError( + "elements%s must be in list type" % type(elements) + ) if not elements: raise ValueError( "the elements of each field can not be empty, you need padding it in process()." @@ -364,7 +370,8 @@ class MultiSlotDataGenerator(DataGenerator): if name != self._proto_info[index][0]: raise ValueError( "the field name of two given line are not match: require<%s>, get<%s>." - % (self._proto_info[index][0], name)) + % (self._proto_info[index][0], name) + ) if output: output += " " output += str(len(elements)) @@ -373,9 +380,11 @@ class MultiSlotDataGenerator(DataGenerator): if isinstance(elem, float): self._proto_info[index] = (name, "float") elif not isinstance(elem, int) and not isinstance( - elem, long): + elem, long + ): raise ValueError( "the type of element%s must be in int or float" - % type(elem)) + % type(elem) + ) output += " " + str(elem) return output + "\n" diff --git a/python/paddle/distributed/fleet/dataset/dataset.py b/python/paddle/distributed/fleet/dataset/dataset.py index 58e265c8347239ebb5a80a5dd34da5672219facf..b74f700391cf5c5a4aead52c4c1d433aecdd3ddc 100755 --- a/python/paddle/distributed/fleet/dataset/dataset.py +++ b/python/paddle/distributed/fleet/dataset/dataset.py @@ -21,10 +21,10 @@ __all__ = [] class DatasetBase(object): - """ Base dataset class. """ + """Base dataset class.""" def __init__(self): - """ Init. """ + """Init.""" # define class name here # to decide whether we need create in memory instance self.proto_desc = data_feed_pb2.DataFeedDesc() @@ -35,15 +35,17 @@ class DatasetBase(object): self.use_ps_gpu = False self.psgpu = None - def init(self, - batch_size=1, - thread_num=1, - use_var=[], - pipe_command="cat", - input_type=0, - fs_name="", - fs_ugi="", - download_cmd="cat"): + def init( + self, + batch_size=1, + thread_num=1, + use_var=[], + pipe_command="cat", + input_type=0, + fs_name="", + fs_ugi="", + download_cmd="cat", + ): """ should be called only once in user's python scripts to initialize setings of dataset instance. Normally, it is called by InMemoryDataset or QueueDataset. @@ -271,8 +273,9 @@ class DatasetBase(object): def _dynamic_adjust_after_train(self): pass - def _check_use_var_with_data_generator(self, var_list, data_generator_class, - test_file): + def _check_use_var_with_data_generator( + self, var_list, data_generator_class, test_file + ): """ Var consistency insepection of use_var_list and data_generator data. @@ -304,32 +307,38 @@ class DatasetBase(object): if var_len != data_gen_len: raise ValueError( "var length mismatch error: var_list = %s vs data_generator = %s" - % (var_len, data_gen_len)) + % (var_len, data_gen_len) + ) for i, ele in enumerate(user_parsed_line): if len(ele[1]) == 0: raise ValueError( "var length error: var %s's length in data_generator is 0" - % ele[0]) + % ele[0] + ) if var_list[ - i].dtype == core.VarDesc.VarType.FP32 and not all( - isinstance(ele, float) for ele in ele[1]): + i + ].dtype == core.VarDesc.VarType.FP32 and not all( + isinstance(ele, float) for ele in ele[1] + ): raise TypeError( "var dtype mismatch error: var name = %s, var type in var_list = %s, while var in data_generator contains non-float value, which is %s \n" "Please check if order of var_list and data_generator are aligned. \n" "Please check if var's type in data_generator is correct." - % (ele[0], "float", ele[1])) + % (ele[0], "float", ele[1]) + ) - if (var_list[i].dtype == core.VarDesc.VarType.INT64 - or var_list[i].dtype - == core.VarDesc.VarType.INT32) and not all( - isinstance(ele, int) for ele in ele[1]): + if ( + var_list[i].dtype == core.VarDesc.VarType.INT64 + or var_list[i].dtype == core.VarDesc.VarType.INT32 + ) and not all(isinstance(ele, int) for ele in ele[1]): raise TypeError( "var dtype mismatch error: var name = %s, var type in var_list = %s, while var in data_generator contains non-int value, which is %s \n" "Please check if order of var_list and data_generator are aligned. \n" "Please check if var's type in data_generator is correct." - % (ele[0], "int", ele[1])) + % (ele[0], "int", ele[1]) + ) else: break @@ -353,7 +362,7 @@ class InMemoryDataset(DatasetBase): """ def __init__(self): - """ Init. """ + """Init.""" super(InMemoryDataset, self).__init__() self.proto_desc.name = "MultiSlotInMemoryDataFeed" self.fleet_send_batch_size = None @@ -588,14 +597,16 @@ class InMemoryDataset(DatasetBase): data_feed_type = "MultiSlotInMemoryDataFeed" self._set_feed_type(data_feed_type) - super(InMemoryDataset, self).init(batch_size=batch_size, - thread_num=thread_num, - use_var=use_var, - pipe_command=pipe_command, - input_type=input_type, - fs_name=fs_name, - fs_ugi=fs_ugi, - download_cmd=download_cmd) + super(InMemoryDataset, self).init( + batch_size=batch_size, + thread_num=thread_num, + use_var=use_var, + pipe_command=pipe_command, + input_type=input_type, + fs_name=fs_name, + fs_ugi=fs_ugi, + download_cmd=download_cmd, + ) if kwargs.get("queue_num", -1) > 0: queue_num = kwargs.get("queue_num", -1) @@ -606,7 +617,7 @@ class InMemoryDataset(DatasetBase): Set data_feed_desc """ self.proto_desc.name = data_feed_type - if (self.proto_desc.name == "SlotRecordInMemoryDataFeed"): + if self.proto_desc.name == "SlotRecordInMemoryDataFeed": self.dataset = core.Dataset("SlotRecordDataset") def _prepare_to_run(self): @@ -779,11 +790,12 @@ class InMemoryDataset(DatasetBase): self.gen_uni_feasigns = generate_uni_feasigns self.local_shard_num = shard_num - def _generate_local_tables_unlock(self, table_id, fea_dim, read_thread_num, - consume_thread_num, shard_num): - self.dataset.generate_local_tables_unlock(table_id, fea_dim, - read_thread_num, - consume_thread_num, shard_num) + def _generate_local_tables_unlock( + self, table_id, fea_dim, read_thread_num, consume_thread_num, shard_num + ): + self.dataset.generate_local_tables_unlock( + table_id, fea_dim, read_thread_num, consume_thread_num, shard_num + ) def set_date(self, date): """ @@ -821,11 +833,25 @@ class InMemoryDataset(DatasetBase): if self.use_ps_gpu and core._is_compiled_with_heterps(): self.psgpu.set_date(year, month, day) - def tdm_sample(self, tree_name, tree_path, tdm_layer_counts, - start_sample_layer, with_hierachy, seed, id_slot): - self.dataset.tdm_sample(tree_name, tree_path, tdm_layer_counts, - start_sample_layer, with_hierachy, seed, - id_slot) + def tdm_sample( + self, + tree_name, + tree_path, + tdm_layer_counts, + start_sample_layer, + with_hierachy, + seed, + id_slot, + ): + self.dataset.tdm_sample( + tree_name, + tree_path, + tdm_layer_counts, + start_sample_layer, + with_hierachy, + seed, + id_slot, + ) def load_into_memory(self, is_shuffle=False): """ @@ -1112,12 +1138,14 @@ class InMemoryDataset(DatasetBase): """ import numpy as np + local_data_size = self.dataset.get_memory_data_size() local_data_size = np.array([local_data_size]) if fleet is not None: global_data_size = local_data_size * 0 - fleet._role_maker.all_reduce_worker(local_data_size, - global_data_size) + fleet._role_maker.all_reduce_worker( + local_data_size, global_data_size + ) return global_data_size[0] return local_data_size[0] @@ -1166,12 +1194,14 @@ class InMemoryDataset(DatasetBase): """ import numpy as np + local_data_size = self.dataset.get_shuffle_data_size() local_data_size = np.array([local_data_size]) if fleet is not None: global_data_size = local_data_size * 0 - fleet._role_maker.all_reduce_worker(local_data_size, - global_data_size) + fleet._role_maker.all_reduce_worker( + local_data_size, global_data_size + ) return global_data_size[0] return local_data_size[0] diff --git a/python/paddle/distributed/fleet/dataset/index_dataset.py b/python/paddle/distributed/fleet/dataset/index_dataset.py index 8b5a9c5a45bf6ce218960f58e10489f8e703e9ca..1dda709fffd58450046138067980470f06ca3a46 100644 --- a/python/paddle/distributed/fleet/dataset/index_dataset.py +++ b/python/paddle/distributed/fleet/dataset/index_dataset.py @@ -17,13 +17,11 @@ __all__ = [] class Index(object): - def __init__(self, name): self._name = name class TreeIndex(Index): - def __init__(self, name, path): super(TreeIndex, self).__init__(name) self._wrapper = core.IndexWrapper() @@ -67,7 +65,7 @@ class TreeIndex(Index): def get_travel_path(self, child, ancestor): res = [] - while (child > ancestor): + while child > ancestor: res.append(child) child = int((child - 1) / self._branch) return res @@ -76,17 +74,18 @@ class TreeIndex(Index): codes = self.get_ancestor_codes(ids, level) return dict(zip(ids, codes)) - def init_layerwise_sampler(self, - layer_sample_counts, - start_sample_layer=1, - seed=0): + def init_layerwise_sampler( + self, layer_sample_counts, start_sample_layer=1, seed=0 + ): assert self._layerwise_sampler is None self._layerwise_sampler = core.IndexSampler("by_layerwise", self._name) - self._layerwise_sampler.init_layerwise_conf(layer_sample_counts, - start_sample_layer, seed) + self._layerwise_sampler.init_layerwise_conf( + layer_sample_counts, start_sample_layer, seed + ) def layerwise_sample(self, user_input, index_input, with_hierarchy=False): if self._layerwise_sampler is None: raise ValueError("please init layerwise_sampler first.") - return self._layerwise_sampler.sample(user_input, index_input, - with_hierarchy) + return self._layerwise_sampler.sample( + user_input, index_input, with_hierarchy + ) diff --git a/python/paddle/distributed/fleet/elastic/__init__.py b/python/paddle/distributed/fleet/elastic/__init__.py index b80a66c6f01d06a467999112e43a6be5c2c8ec51..447bedb21d59270e25bf15f4e9c8fee25c44521b 100644 --- a/python/paddle/distributed/fleet/elastic/__init__.py +++ b/python/paddle/distributed/fleet/elastic/__init__.py @@ -25,12 +25,12 @@ from paddle.distributed.fleet.launch_utils import DistributeMode def enable_elastic(args, distribute_mode): - #elastic_level = os.getenv('PADDLE_ELASTIC_FAULT_TOLERANC_LEVEL') - #if not elastic_level and (elastic_level != ElasticLevel.FAULT_TOLERANCE and + # elastic_level = os.getenv('PADDLE_ELASTIC_FAULT_TOLERANC_LEVEL') + # if not elastic_level and (elastic_level != ElasticLevel.FAULT_TOLERANCE and # elastic_level != ElasticLevel.ELASTIC): # return False - #if distribute_mode != DistributeMode.COLLECTIVE: + # if distribute_mode != DistributeMode.COLLECTIVE: # return False if not args.elastic_server and not os.getenv('PADDLE_ELASTIC_SERVER'): @@ -50,6 +50,7 @@ def launch_elastic(args, distribute_mode): server = args.elastic_server or os.getenv('PADDLE_ELASTIC_SERVER') srv, port = server.split(':') import etcd3 + etcd_client = etcd3.client(host=srv, port=port) elastic = ElasticManager(args, etcd_client) diff --git a/python/paddle/distributed/fleet/elastic/collective.py b/python/paddle/distributed/fleet/elastic/collective.py index aa5c962b99467e89832831171851f5b63a972fcc..bdb8a6c577094e257d8a54b521aeb750a29dcad2 100644 --- a/python/paddle/distributed/fleet/elastic/collective.py +++ b/python/paddle/distributed/fleet/elastic/collective.py @@ -17,12 +17,15 @@ import os import shutil import paddle -from paddle.distributed.fleet.launch_utils import logger, pull_worker_log, start_local_trainers +from paddle.distributed.fleet.launch_utils import ( + logger, + pull_worker_log, + start_local_trainers, +) from paddle.distributed.fleet.elastic.manager import LauncherInterface class CollectiveLauncher(LauncherInterface): - def __init__(self, args): self.args = args self.procs = [] @@ -33,7 +36,8 @@ class CollectiveLauncher(LauncherInterface): self.tmp_dir = tempfile.mkdtemp() cluster, pod = paddle.distributed.fleet.launch.get_cluster_info(args) global_envs = paddle.distributed.fleet.launch.get_global_envs( - args, self.tmp_dir) + args, self.tmp_dir + ) self.procs = start_local_trainers( cluster, @@ -41,7 +45,8 @@ class CollectiveLauncher(LauncherInterface): training_script=args.training_script, training_script_args=args.training_script_args, log_dir=args.log_dir, - envs=global_envs) + envs=global_envs, + ) for idx, proc in enumerate(self.procs): logger.info("launch proc_id:{} idx:{}".format(proc.proc.pid, idx)) diff --git a/python/paddle/distributed/fleet/elastic/manager.py b/python/paddle/distributed/fleet/elastic/manager.py index 84cf95742e309e34ed3ea430ae305a52b14d90f1..9397f6d10d4a693a0874dab3ecb977d4477dc534 100644 --- a/python/paddle/distributed/fleet/elastic/manager.py +++ b/python/paddle/distributed/fleet/elastic/manager.py @@ -54,7 +54,6 @@ class ElasticStatus: class LauncherInterface(object): - def __init__(self, args): self.args = args self.procs = [] @@ -67,8 +66,9 @@ class LauncherInterface(object): os.killpg(os.getpgid(p.proc.pid), signal.SIGTERM) if p.log_fn: p.log_fn.close() - logger.info("terminate process group gid:{}".format( - p.proc.pid)) + logger.info( + "terminate process group gid:{}".format(p.proc.pid) + ) time.sleep(1) for p in self.procs: @@ -105,8 +105,10 @@ class LauncherInterface(object): return ret logger.error("ABORT!!! ABORT!!! ABORT!!!") logger.error( - "ERROR rank {} error with exit code {}, check log for detail." - .format(p.rank, ret)) + "ERROR rank {} error with exit code {}, check log for detail.".format( + p.rank, ret + ) + ) result = ret if not alive and result is None: return 0 @@ -124,7 +126,6 @@ class LauncherInterface(object): class ElasticManager(object): - def __init__(self, args, etcd_client): self.args = args @@ -137,11 +138,14 @@ class ElasticManager(object): self.host = host if host else self._get_host() - (self.device_mode, - self.devices_per_proc) = launch_utils.get_device_proc_info(args) + ( + self.device_mode, + self.devices_per_proc, + ) = launch_utils.get_device_proc_info(args) self.elastic_timeout = int( - os.getenv('PADDLE_ELASTIC_TIMEOUT', ELASTIC_TIMEOUT)) + os.getenv('PADDLE_ELASTIC_TIMEOUT', ELASTIC_TIMEOUT) + ) elastic_ttl = int(os.getenv('PADDLE_ELASTIC_TTL', ELASTIC_TTL)) self.start_port = None @@ -158,7 +162,8 @@ class ElasticManager(object): self.np = len(node_ips) self.start_port = int(os.getenv("FLAGS_START_PORT", "6170")) self.dist_endpoints = self._host_to_endpoints( - node_ips, self.devices_per_proc, self.start_port) + node_ips, self.devices_per_proc, self.start_port + ) self.trainer_endpoints_list = [ "%s:%d" % (ip, self.start_port) for ip in node_ips ] @@ -172,10 +177,12 @@ class ElasticManager(object): # auto correct the value of elastic_level # 1: Fault tolerant, 2: Elastic self.elastic_level = int( - os.getenv('PADDLE_ELASTIC_FAULT_TOLERANC_LEVEL', - ElasticLevel.FAULT_TOLERANCE)) - if self.min_np == self.max_np or \ - (self.min_np > 0 and self.max_np == 0): + os.getenv( + 'PADDLE_ELASTIC_FAULT_TOLERANC_LEVEL', + ElasticLevel.FAULT_TOLERANCE, + ) + ) + if self.min_np == self.max_np or (self.min_np > 0 and self.max_np == 0): self.elastic_level = ElasticLevel.FAULT_TOLERANCE logger.info('start job with ElasticLevel.FAULT_TOLERANCE') if self.min_np > 0 and self.max_np > self.min_np: @@ -183,12 +190,15 @@ class ElasticManager(object): logger.info('start job with ElasticLevel.ELASTIC') # compatible with kuberntes service discovery - if not server and os.getenv( - 'PADDLE_ELASTIC_ETCD_SERVICE_HOST') and os.getenv( - 'PADDLE_ELASTIC_ETCD_SERVICE_PORT'): + if ( + not server + and os.getenv('PADDLE_ELASTIC_ETCD_SERVICE_HOST') + and os.getenv('PADDLE_ELASTIC_ETCD_SERVICE_PORT') + ): server = '{}:{}'.format( os.getenv('PADDLE_ELASTIC_ETCD_SERVICE_HOST'), - os.getenv('PADDLE_ELASTIC_ETCD_SERVICE_PORT')) + os.getenv('PADDLE_ELASTIC_ETCD_SERVICE_PORT'), + ) logger.debug('init with server {} host {}'.format(server, host)) @@ -202,8 +212,10 @@ class ElasticManager(object): if not server or ':' not in server or not name or not self.np: logger.info( - 'Elastic is not enabled with server {} name {} and np {}'. - format(server, name, self.np)) + 'Elastic is not enabled with server {} name {} and np {}'.format( + server, name, self.np + ) + ) self.enable = False return else: @@ -218,9 +230,11 @@ class ElasticManager(object): self.endpoints_path = self.prefix + '/endpoints' node_tag = ''.join( - random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(6)) - self.host_path = '{}/{}{}'.format(self.node_prefix, node_tag, - time.time()) + random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(6) + ) + self.host_path = '{}/{}{}'.format( + self.node_prefix, node_tag, time.time() + ) ''' 0 group mode, be aware of healthy status of other workers 1 decouple mode, check own status only @@ -241,7 +255,8 @@ class ElasticManager(object): self.elastic_startup_time = None host_watch = self.etcd.add_watch_prefix_callback( - self.node_prefix, host_call_back) + self.node_prefix, host_call_back + ) host_lease = self.etcd.lease(elastic_ttl) # register etcd lease heartbeat @@ -260,51 +275,61 @@ class ElasticManager(object): ) if self.curr_host not in hosts: logger.info( - f"[lease_heartbeat] register host={self.curr_host}") - self.etcd.put(self.host_path, - self.curr_host.encode('latin-1'), - lease=host_lease) + f"[lease_heartbeat] register host={self.curr_host}" + ) + self.etcd.put( + self.host_path, + self.curr_host.encode('latin-1'), + lease=host_lease, + ) except Exception as e: logger.error( "[lease_heartbeat] internal error:{} {}".format( - e, traceback.format_exc())) + e, traceback.format_exc() + ) + ) break time.sleep(elastic_ttl / 3) - keepalived_thread = threading.Thread(name='lease_heartbeat', - target=lease_heartbeat, - daemon=True) + keepalived_thread = threading.Thread( + name='lease_heartbeat', target=lease_heartbeat, daemon=True + ) keepalived_thread.start() - self.etcd.put(self.host_path, - self.curr_host.encode('latin-1'), - lease=host_lease) + self.etcd.put( + self.host_path, self.curr_host.encode('latin-1'), lease=host_lease + ) # endpoints handle DISTRIBUTED_TRAINER_ENDPOINTS and PADDLE_TRAINERS self.etcd.put( self.endpoints_path, - '{}|{}'.format(self.dist_endpoints, - self.trainers).encode('latin-1')) + '{}|{}'.format(self.dist_endpoints, self.trainers).encode( + 'latin-1' + ), + ) def endpoints_call_back(event): if not self.dist_endpoints: return edps = six.ensure_str(self.etcd.get(self.endpoints_path)[0] or '') self.dist_endpoints, self.trainers = edps.split('|') - logger.info("set DISTRIBUTED_TRAINER_ENDPOINTS {} ".format( - self.dist_endpoints)) + logger.info( + "set DISTRIBUTED_TRAINER_ENDPOINTS {} ".format( + self.dist_endpoints + ) + ) logger.info("set PADDLE_TRAINERS {} ".format(self.trainers)) - endpoints_watch = self.etcd.add_watch_callback(self.endpoints_path, - endpoints_call_back) + endpoints_watch = self.etcd.add_watch_callback( + self.endpoints_path, endpoints_call_back + ) self.watches = [host_watch, endpoints_watch] self.launcher = None - def _host_to_endpoints(self, - ip_port_list: list, - devices_per_proc: list, - start_port: int = 6170) -> str: + def _host_to_endpoints( + self, ip_port_list: list, devices_per_proc: list, start_port: int = 6170 + ) -> str: endpoint_list = [] for ip_port in ip_port_list: endpoints = ip_port.split(":") @@ -347,11 +372,13 @@ class ElasticManager(object): return logger.info("execute pre_hook...") current_env = copy.copy(os.environ.copy()) - out, err = subprocess.Popen(self.args.elastic_pre_hook, - env=current_env, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - shell=True).communicate() + out, err = subprocess.Popen( + self.args.elastic_pre_hook, + env=current_env, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=True, + ).communicate() if err: logger.warning("pre_hook exec failed") else: @@ -377,7 +404,8 @@ class ElasticManager(object): max_np = min_np if min_np > max_np else max_np else: raise ValueError( - f'the np={np} needs to be in "MIN" or "MIN:MAX" format') + f'the np={np} needs to be in "MIN" or "MIN:MAX" format' + ) return min_np, max_np @@ -436,8 +464,10 @@ class ElasticManager(object): return False def _update_endpoint(self, endpoints, hosts): - self.etcd.put(self.endpoints_path, - '{}|{}'.format(endpoints, hosts).encode('latin-1')) + self.etcd.put( + self.endpoints_path, + '{}|{}'.format(endpoints, hosts).encode('latin-1'), + ) def _update_fault_tolrance(self): rank = int(os.getenv('PADDLE_TRAINER_ID', -1)) @@ -447,8 +477,11 @@ class ElasticManager(object): if self.curr_host in self.dist_endpoints: os.environ['DISTRIBUTED_TRAINER_ENDPOINTS'] = self.dist_endpoints os.environ['PADDLE_TRAINERS'] = self.trainers - logger.info("update env DISTRIBUTED_TRAINER_ENDPOINTS {} ".format( - self.dist_endpoints)) + logger.info( + "update env DISTRIBUTED_TRAINER_ENDPOINTS {} ".format( + self.dist_endpoints + ) + ) logger.info("update env PADDLE_TRAINERS {} ".format(self.trainers)) return @@ -476,9 +509,11 @@ class ElasticManager(object): host_endpoints.append(curr_host_port) os.environ['PADDLE_TRAINER_ID'] = '{}'.format( - host_endpoints.index(self.curr_host)) + host_endpoints.index(self.curr_host) + ) hosts = ','.join( - [host_port.split(":")[0] for host_port in host_endpoints]) + [host_port.split(":")[0] for host_port in host_endpoints] + ) self.args.ips = hosts os.environ['PADDLE_TRAINERS'] = hosts self.np = len(host_endpoints) @@ -521,12 +556,14 @@ class ElasticManager(object): ip_list = [ip_port.split(":")[0] for ip_port in sorted_endpoints] hosts = ','.join(ip_list) - new_endpoints = self._host_to_endpoints(sorted_endpoints, - self.devices_per_proc) + new_endpoints = self._host_to_endpoints( + sorted_endpoints, self.devices_per_proc + ) self.args.ips = hosts os.environ['PADDLE_TRAINER_ID'] = '{}'.format( - sorted_endpoints.index(self.curr_host)) + sorted_endpoints.index(self.curr_host) + ) os.environ['PADDLE_TRAINERS'] = hosts self.np = len(sorted_endpoints) os.environ['PADDLE_TRAINER_ENDPOINTS'] = ','.join(sorted_endpoints) @@ -560,8 +597,9 @@ class ElasticManager(object): logger.info('ready with hosts {}'.format(self.hosts)) self._update_hosts() return - logger.info('not ready for np {} with hosts {}'.format( - self.np, self.hosts)) + logger.info( + 'not ready for np {} with hosts {}'.format(self.np, self.hosts) + ) idx += 1 time.sleep(2) return diff --git a/python/paddle/distributed/fleet/fleet.py b/python/paddle/distributed/fleet/fleet.py index 7a6af30f912832bb417fc436b41ee397bb005461..db3aae28a201875c172699d197be7be4d3d47da5 100644 --- a/python/paddle/distributed/fleet/fleet.py +++ b/python/paddle/distributed/fleet/fleet.py @@ -55,12 +55,12 @@ def apply_ir_passes(main_program, startup_program, config): ) build_strategy.fuse_all_optimizer_ops = False - return apply_build_strategy(main_program, startup_program, build_strategy, - pass_attrs) + return apply_build_strategy( + main_program, startup_program, build_strategy, pass_attrs + ) def _inited_runtime_handler_(func): - def __impl__(*args, **kwargs): cls = args[0] @@ -73,15 +73,17 @@ def _inited_runtime_handler_(func): def _is_non_distributed_check_(func): - def __impl__(*args, **kwargs): cls = args[0] - if cls._role_maker is not None and cls._role_maker._is_non_distributed( - ) is True: + if ( + cls._role_maker is not None + and cls._role_maker._is_non_distributed() is True + ): logger.warning( - "%s() function doesn't work when use non_distributed fleet." % - (func.__name__)) + "%s() function doesn't work when use non_distributed fleet." + % (func.__name__) + ) return return func(*args, **kwargs) @@ -161,11 +163,13 @@ class Fleet(object): self._context = {} self.user_defined_optimizer = paddle.optimizer.Optimizer(0.0) - def init(self, - role_maker=None, - is_collective=False, - strategy=None, - log_level="INFO"): + def init( + self, + role_maker=None, + is_collective=False, + strategy=None, + log_level="INFO", + ): """ Initialize role_maker in Fleet. @@ -240,22 +244,28 @@ class Fleet(object): if isinstance(is_collective, bool): self._is_collective = is_collective self._role_maker = PaddleCloudRoleMaker( - is_collective=self._is_collective) + is_collective=self._is_collective + ) else: raise ValueError( - "`is_collective` should be instance of `bool`, but got {}". - format(type(is_collective))) + "`is_collective` should be instance of `bool`, but got {}".format( + type(is_collective) + ) + ) else: if isinstance(role_maker, RoleMakerBase): self._role_maker = role_maker self._is_collective = role_maker._is_collective else: raise ValueError( - "`role_maker` should be subclass of `RoleMakerBase`, but got {}" - .format(type(role_maker))) + "`role_maker` should be subclass of `RoleMakerBase`, but got {}".format( + type(role_maker) + ) + ) self._role_maker._generate_role() import paddle.distributed.fleet as fleet + fleet.util._set_role_maker(self._role_maker) self.strategy_compiler = StrategyCompiler() @@ -276,17 +286,20 @@ class Fleet(object): return if parallel_helper._is_parallel_ctx_initialized(): logger.warning( - "The dygraph parallel environment has been initialized.") + "The dygraph parallel environment has been initialized." + ) else: # FLAGS_nccl_nrings is used for dynamic graph multi-stream communication if "FLAGS_nccl_nrings" in os.environ: logger.warning( "You have set the environment variable FLAGS_nccl_nrings " "outside the program, so the nccl_comm_num in " - "DistributedStrategy will not take effect here.") + "DistributedStrategy will not take effect here." + ) else: os.environ["FLAGS_nccl_nrings"] = str( - self._user_defined_strategy.nccl_comm_num) + self._user_defined_strategy.nccl_comm_num + ) paddle.distributed.init_parallel_env() # hybrid parallel not support for npu/xpu @@ -308,17 +321,24 @@ class Fleet(object): global_ring_id = 3 if use_sharding else 0 global_ranks = list(range(global_world_size)) - if tp._HYBRID_PARALLEL_GROUP is None: tp._CommunicateGroup() + if tp._HYBRID_PARALLEL_GROUP is None: + tp._CommunicateGroup() cg = tp._HYBRID_PARALLEL_GROUP self._hcg = cg - cg.set_comm_group('global', global_rank, global_world_size, - global_ring_id, global_ranks) + cg.set_comm_group( + 'global', + global_rank, + global_world_size, + global_ring_id, + global_ranks, + ) use_tensor_parallel = self._user_defined_strategy.tensor_parallel use_mp = use_sharding or use_tensor_parallel # hybrid group - if use_mp is False: return + if use_mp is False: + return mp_degree_sharding = 1 mp_degree_tensor_parallel = 1 @@ -327,14 +347,21 @@ class Fleet(object): mp_degree_sharding = int(sharding_configs['mp_degree']) if use_tensor_parallel: - tensor_parallel_configs = self._user_defined_strategy.tensor_parallel_configs + tensor_parallel_configs = ( + self._user_defined_strategy.tensor_parallel_configs + ) mp_degree_tensor_parallel = int( - tensor_parallel_configs['tensor_parallel_degree']) + tensor_parallel_configs['tensor_parallel_degree'] + ) if use_sharding and use_tensor_parallel: assert mp_degree_sharding == mp_degree_tensor_parallel - mp_degree = mp_degree_sharding if use_sharding else mp_degree_tensor_parallel + mp_degree = ( + mp_degree_sharding + if use_sharding + else mp_degree_tensor_parallel + ) if mp_degree > 1: assert global_world_size % mp_degree == 0 @@ -343,16 +370,17 @@ class Fleet(object): mp_rank = global_rank % mp_degree mp_group_id = global_rank // mp_degree mp_group_ranks = [ - idx for idx in global_ranks + idx + for idx in global_ranks if idx // mp_degree == mp_group_id ] - cg.set_comm_group('model', mp_rank, mp_degree, mp_ring_id, - mp_group_ranks) + cg.set_comm_group( + 'model', mp_rank, mp_degree, mp_ring_id, mp_group_ranks + ) return self def _init_hybrid_parallel_env(self): - """initialize the hybrid environment - """ + """initialize the hybrid environment""" self.hybrid_configs = self._user_defined_strategy.hybrid_configs self.dp_degree = self.hybrid_configs["dp_degree"] self.mp_degree = self.hybrid_configs["mp_degree"] @@ -361,7 +389,9 @@ class Fleet(object): assert self.mp_degree >= 0, "mp_degree should be greater or equal to 0" assert self.pp_degree >= 0, "pp_degree should be greater or equal to 0" - assert self.sharding_degree >= 0, "sharding_degree should be greater or equal to 0" + assert ( + self.sharding_degree >= 0 + ), "sharding_degree should be greater or equal to 0" self.mp_degree = max(self.mp_degree, 1) self.pp_degree = max(self.pp_degree, 1) @@ -375,14 +405,19 @@ class Fleet(object): self._topology = tp.CommunicateTopology( hybrid_group_names=["data", "pipe", "sharding", "model"], dims=[ - self.dp_degree, self.pp_degree, self.sharding_degree, - self.mp_degree - ]) + self.dp_degree, + self.pp_degree, + self.sharding_degree, + self.mp_degree, + ], + ) self._hcg = tp.HybridCommunicateGroup(self._topology) if self.mp_degree > 1: - tensor_parallel_configs = self._user_defined_strategy.tensor_parallel_configs + tensor_parallel_configs = ( + self._user_defined_strategy.tensor_parallel_configs + ) tensor_init_seed = tensor_parallel_configs["tensor_init_seed"] if tensor_init_seed == -1: model_parallel_random_seed() @@ -822,29 +857,29 @@ class Fleet(object): for name in fetch_var_names ] - self._runtime_handle._save_inference_model(executor, dirname, - feeded_var_names, - fetch_vars, None, True, - 0) + self._runtime_handle._save_inference_model( + executor, dirname, feeded_var_names, fetch_vars, None, True, 0 + ) else: increment_mode = 0 if "mode" in configs: increment_mode = int(configs["mode"]) - self._runtime_handle._save_persistables(executor, - dirname, - main_program=None, - mode=increment_mode) + self._runtime_handle._save_persistables( + executor, dirname, main_program=None, mode=increment_mode + ) @is_non_distributed_check @inited_runtime_handler - def save_inference_model(self, - executor, - dirname, - feeded_var_names, - target_vars, - main_program=None, - export_for_deployment=True, - mode=0): + def save_inference_model( + self, + executor, + dirname, + feeded_var_names, + target_vars, + main_program=None, + export_for_deployment=True, + mode=0, + ): """ save inference model for inference. @@ -865,10 +900,15 @@ class Fleet(object): """ - self._runtime_handle._save_inference_model(executor, dirname, - feeded_var_names, - target_vars, main_program, - export_for_deployment, mode) + self._runtime_handle._save_inference_model( + executor, + dirname, + feeded_var_names, + target_vars, + main_program, + export_for_deployment, + mode, + ) @is_non_distributed_check @inited_runtime_handler @@ -913,8 +953,9 @@ class Fleet(object): fleet.save_persistables(exe, "dirname", paddle.static.default_main_program()) """ - self._runtime_handle._save_persistables(executor, dirname, main_program, - mode) + self._runtime_handle._save_persistables( + executor, dirname, main_program, mode + ) @is_non_distributed_check @inited_runtime_handler @@ -953,12 +994,9 @@ class Fleet(object): @is_non_distributed_check @inited_runtime_handler - def save_dense_params(self, - executor, - dirname, - scope, - program, - var_names=None): + def save_dense_params( + self, executor, dirname, scope, program, var_names=None + ): """ save fleet one table from path @@ -982,8 +1020,9 @@ class Fleet(object): fleet.save_dense_params(exe, "path", scope=paddle.static.global_scope(), program=paddle.static.default_main_program()) """ - self._runtime_handle._save_dense_params(executor, dirname, scope, - program, var_names) + self._runtime_handle._save_dense_params( + executor, dirname, scope, program, var_names + ) def shrink(self, threshold=None): self._runtime_handle._shrink(threshold) @@ -1027,7 +1066,8 @@ class Fleet(object): "in fleet.init(). The strategy here is only for compatibility. " "If the strategy in fleet.distributed_optimizer() is " "not None, then it will overwrite the DistributedStrategy in fleet.init(), " - "which will take effect in distributed training.") + "which will take effect in distributed training." + ) self._user_defined_strategy = copy.deepcopy(strategy) self._context = {} @@ -1046,21 +1086,19 @@ class Fleet(object): if hasattr(self.user_defined_optimizer, 'amp_init'): amp_optimizer = self.user_defined_optimizer - assert amp_optimizer is not None, \ - "amp_init can only be used when the amp(auto mixed precision) strategy is turned on." + assert ( + amp_optimizer is not None + ), "amp_init can only be used when the amp(auto mixed precision) strategy is turned on." return amp_optimizer def get_loss_scaling(self): - """Return the real-time loss scaling factor. - """ + """Return the real-time loss scaling factor.""" amp_optimizer = self._get_amp_optimizer() return amp_optimizer.get_loss_scaling() - def amp_init(self, - place, - scope=None, - test_program=None, - use_fp16_test=False): + def amp_init( + self, place, scope=None, test_program=None, use_fp16_test=False + ): """ Init the amp training, such as cast fp32 parameters to fp16 type. @@ -1145,11 +1183,9 @@ class Fleet(object): else: return self._context["applied_graph_list"] - def minimize(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None): + def minimize( + self, loss, startup_program=None, parameter_list=None, no_grad_set=None + ): """ Add distributed operations to minimize ``loss`` by updating ``parameter_list``. @@ -1201,23 +1237,27 @@ class Fleet(object): """ if not isinstance(loss, list): - return self._minimize_impl(loss, startup_program, parameter_list, - no_grad_set) + return self._minimize_impl( + loss, startup_program, parameter_list, no_grad_set + ) else: - if paddle.fluid.framework._non_static_mode( - ) or self._role_maker._is_non_distributed() or self._is_collective: + if ( + paddle.fluid.framework._non_static_mode() + or self._role_maker._is_non_distributed() + or self._is_collective + ): raise ValueError("loss can be list only in PS mode") - return self._minimize_losses_impl(loss, startup_program, - parameter_list, no_grad_set) - - def _minimize_impl(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None): + return self._minimize_losses_impl( + loss, startup_program, parameter_list, no_grad_set + ) + + def _minimize_impl( + self, loss, startup_program=None, parameter_list=None, no_grad_set=None + ): context = {} context["user_defined_strategy"] = copy.deepcopy( - self._user_defined_strategy) + self._user_defined_strategy + ) if paddle.fluid.framework._non_static_mode(): # imitate target optimizer retrieval target_opt = self.user_defined_optimizer @@ -1230,49 +1270,62 @@ class Fleet(object): if not hasattr(self.origin_main_program, "distributed_info_"): setattr(self.origin_main_program, "distributed_info_", dict()) self.origin_main_program.distributed_info_[ - "dp_degree"] = self._user_defined_strategy.sharding_configs[ - "dp_degree"] + "dp_degree" + ] = self._user_defined_strategy.sharding_configs["dp_degree"] self.origin_main_program.distributed_info_[ - "mp_degree"] = self._user_defined_strategy.sharding_configs[ - "mp_degree"] + "mp_degree" + ] = self._user_defined_strategy.sharding_configs["mp_degree"] self.origin_main_program.distributed_info_[ - "pp_degree"] = self._user_defined_strategy.sharding_configs[ - "pp_degree"] + "pp_degree" + ] = self._user_defined_strategy.sharding_configs["pp_degree"] self.origin_main_program.distributed_info_[ - "sharding_degree"] = self._user_defined_strategy.sharding_configs[ - "sharding_degree"] + "sharding_degree" + ] = self._user_defined_strategy.sharding_configs["sharding_degree"] context["origin_main_program"] = self.origin_main_program context["origin_main_programs"] = [self.origin_main_program] context["loss"] = loss if startup_program == None: - self.origin_startup_program = \ + self.origin_startup_program = ( paddle.static.default_startup_program().clone(for_test=False) + ) startup_program = paddle.static.default_startup_program() else: - self.origin_startup_program = \ - startup_program.clone(for_test=False) + self.origin_startup_program = startup_program.clone(for_test=False) context["origin_startup_program"] = startup_program context["origin_startup_programs"] = [startup_program] context["role_maker"] = self._role_maker # Use the auto-parallel's routines instead - if self._user_defined_strategy.semi_auto or self._user_defined_strategy.auto_search: + if ( + self._user_defined_strategy.semi_auto + or self._user_defined_strategy.auto_search + ): from ..auto_parallel.parallelizer import AutoParallelizer + auto_parallelizer = AutoParallelizer(self) - optimize_ops, params_grads, dist_startup_prog, dist_main_prog = auto_parallelizer.parallelize( - loss, startup_program, parameter_list, no_grad_set) + ( + optimize_ops, + params_grads, + dist_startup_prog, + dist_main_prog, + ) = auto_parallelizer.parallelize( + loss, startup_program, parameter_list, no_grad_set + ) return optimize_ops, params_grads, dist_startup_prog, dist_main_prog # compile time - distributed_optimizer_list = \ + distributed_optimizer_list = ( MetaOptimizerFactory()._get_valid_meta_optimizers( - self.user_defined_optimizer) + self.user_defined_optimizer + ) + ) context["user_defined_strategy"] = copy.deepcopy( - self._user_defined_strategy) + self._user_defined_strategy + ) copy_user_defined_strategy = copy.deepcopy(self._user_defined_strategy) # trigger the auto-parallel in very strict condition @@ -1290,9 +1343,12 @@ class Fleet(object): can_not_apply_optimizer_list = [] # recall meta optimizers for ranking for opt in distributed_optimizer_list: - opt._set_basic_info(loss, self._role_maker, - self.user_defined_optimizer, - copy_user_defined_strategy) + opt._set_basic_info( + loss, + self._role_maker, + self.user_defined_optimizer, + copy_user_defined_strategy, + ) if opt._can_apply() and not opt._is_graph_out(): valid_optimizer_list.append(opt) elif opt._can_apply() and opt._is_graph_out(): @@ -1300,19 +1356,27 @@ class Fleet(object): else: can_not_apply_optimizer_list.append(opt) # combine recalled meta optimizers to be a valid meta optimizer - meta_optimizer, graph_optimizer = \ - self.strategy_compiler.generate_optimizer( - loss, self._role_maker, self.user_defined_optimizer, - copy_user_defined_strategy, valid_optimizer_list, - valid_graph_optimizer_list) + ( + meta_optimizer, + graph_optimizer, + ) = self.strategy_compiler.generate_optimizer( + loss, + self._role_maker, + self.user_defined_optimizer, + copy_user_defined_strategy, + valid_optimizer_list, + valid_graph_optimizer_list, + ) valid_strategy = self.strategy_compiler._get_valid_strategy( - copy_user_defined_strategy, can_not_apply_optimizer_list) + copy_user_defined_strategy, can_not_apply_optimizer_list + ) context["valid_strategy"] = copy.deepcopy(valid_strategy) logger.debug("valid_strategy: " + str(context["valid_strategy"])) - logger.debug("user_defined_strategy: " + - str(context["user_defined_strategy"])) + logger.debug( + "user_defined_strategy: " + str(context["user_defined_strategy"]) + ) applied_meta_list = self.strategy_compiler._get_applied_meta_list() applied_graph_list = self.strategy_compiler._get_applied_graph_list() @@ -1333,41 +1397,48 @@ class Fleet(object): self._runtime_handle = RuntimeFactory()._create_runtime(context) compiled_program = compiler.CompiledProgram( - self.origin_main_program).with_data_parallel( - loss_name=loss.name, share_vars_from=None) + self.origin_main_program + ).with_data_parallel(loss_name=loss.name, share_vars_from=None) loss.block.program._graph = compiled_program - return self.user_defined_optimizer.minimize(loss, - startup_program, - parameter_list, - no_grad_set=no_grad_set) + return self.user_defined_optimizer.minimize( + loss, startup_program, parameter_list, no_grad_set=no_grad_set + ) if meta_optimizer: - logger.debug("before minimize program id: " + - str(id(loss.block.program))) + logger.debug( + "before minimize program id: " + str(id(loss.block.program)) + ) optimize_ops, params_grads = meta_optimizer.minimize( - loss, startup_program, parameter_list, no_grad_set=no_grad_set) - logger.debug("after minimize program id: " + - str(id(loss.block.program))) + loss, startup_program, parameter_list, no_grad_set=no_grad_set + ) + logger.debug( + "after minimize program id: " + str(id(loss.block.program)) + ) default_program = paddle.static.default_main_program() logger.debug("default program id: " + str(id(default_program))) if id(default_program) != id(loss.block.program): paddle.fluid.framework.switch_main_program(loss.block.program) - logger.debug("default program id after switch: " + - str(id(default_program))) + logger.debug( + "default program id after switch: " + str(id(default_program)) + ) else: optimize_ops, params_grads = self.user_defined_optimizer.minimize( - loss, startup_program, parameter_list, no_grad_set=no_grad_set) + loss, startup_program, parameter_list, no_grad_set=no_grad_set + ) context["program_optimize_ops"] = optimize_ops context["program_params_grads"] = params_grads if graph_optimizer: - logger.debug("before graph minimize program id: " + - str(id(loss.block.program))) + logger.debug( + "before graph minimize program id: " + + str(id(loss.block.program)) + ) optimize_ops, params_grads = graph_optimizer.minimize( - loss, startup_program, parameter_list, no_grad_set=no_grad_set) + loss, startup_program, parameter_list, no_grad_set=no_grad_set + ) # since we do not encourage users to use graph operations # if a graph optimizer takes effect, mostly # optimizers_ops and params_grads are None @@ -1382,8 +1453,10 @@ class Fleet(object): opt_info = {} if program._fleet_opt is None else program._fleet_opt opt_info["mpi_size"] = self.worker_num() opt_info["mpi_rank"] = self.worker_index() - for k, v in self._user_defined_strategy.trainer_desc_configs.items( - ): + for ( + k, + v, + ) in self._user_defined_strategy.trainer_desc_configs.items(): if v or k not in opt_info: opt_info[k] = v program._fleet_opt = opt_info @@ -1392,15 +1465,18 @@ class Fleet(object): self._runtime_handle = RuntimeFactory()._create_runtime(context) import paddle.distributed.fleet as fleet + fleet.util._set_strategy(context["valid_strategy"]) return optimize_ops, params_grads - def _minimize_losses_impl(self, - losses, - startup_programs=None, - parameter_list=None, - no_grad_set=None): + def _minimize_losses_impl( + self, + losses, + startup_programs=None, + parameter_list=None, + no_grad_set=None, + ): context = {} # cache original feed forward program @@ -1416,7 +1492,8 @@ class Fleet(object): startup_programs = [paddle.static.default_startup_program()] else: raise ValueError( - "startup_program can't be None when loss is list.") + "startup_program can't be None when loss is list." + ) self.origin_startup_program = startup_programs[0].clone(for_test=False) context["origin_startup_program"] = startup_programs[0] context["origin_startup_programs"] = [] @@ -1426,7 +1503,8 @@ class Fleet(object): context["role_maker"] = self._role_maker context["user_defined_strategy"] = copy.deepcopy( - self._user_defined_strategy) + self._user_defined_strategy + ) context["valid_strategy"] = copy.deepcopy(self._user_defined_strategy) @@ -1439,12 +1517,17 @@ class Fleet(object): params_grads = [] from .meta_optimizers import ParameterServerOptimizer + ps_optimizer = ParameterServerOptimizer(self.user_defined_optimizer) - ps_optimizer._set_basic_info(losses, self._role_maker, - self.user_defined_optimizer, - self._user_defined_strategy) + ps_optimizer._set_basic_info( + losses, + self._role_maker, + self.user_defined_optimizer, + self._user_defined_strategy, + ) optimize_ops, params_grads = ps_optimizer.minimize_losses_impl( - losses, startup_programs, parameter_list, no_grad_set=no_grad_set) + losses, startup_programs, parameter_list, no_grad_set=no_grad_set + ) # default_program = paddle.static.default_main_program() @@ -1459,18 +1542,24 @@ class Fleet(object): opt_info = {} if program._fleet_opt is None else program._fleet_opt opt_info["mpi_size"] = self.worker_num() opt_info["mpi_rank"] = self.worker_index() - for k, v in self._user_defined_strategy.trainer_desc_configs.items( - ): + for ( + k, + v, + ) in self._user_defined_strategy.trainer_desc_configs.items(): if v or k not in opt_info: opt_info[k] = v program._fleet_opt = opt_info - logger.debug("fleet base opt info: " + str(id(program)) + - str(program._fleet_opt)) + logger.debug( + "fleet base opt info: " + + str(id(program)) + + str(program._fleet_opt) + ) if self._runtime_handle is None: self._runtime_handle = RuntimeFactory()._create_runtime(context) import paddle.distributed.fleet as fleet + fleet.util._set_strategy(context["valid_strategy"]) return optimize_ops, params_grads diff --git a/python/paddle/distributed/fleet/fleet_executor_utils.py b/python/paddle/distributed/fleet/fleet_executor_utils.py index 48ef34f4603ca91d26b385f5c7ddc588cb3f6263..4eb4108d5d59504d0acac8f5c78e417e896e3bc8 100644 --- a/python/paddle/distributed/fleet/fleet_executor_utils.py +++ b/python/paddle/distributed/fleet/fleet_executor_utils.py @@ -22,16 +22,18 @@ class TaskNode: Python side TaskNode, connection to the c++ side TaskNode """ - def __init__(self, - rank, - max_run_times, - max_slot_times, - role=None, - node_type=None, - task_id=0, - ops=None, - program=None, - lazy_initialize=False): + def __init__( + self, + rank, + max_run_times, + max_slot_times, + role=None, + node_type=None, + task_id=0, + ops=None, + program=None, + lazy_initialize=False, + ): """ :param rank (int): Current rank of the task node. :param max_run_times (int): The max run times of the task node. @@ -43,10 +45,12 @@ class TaskNode: :param program (Program): An instance of Program to init the task node. :param lazy_initialize (bool): In user-defined task, the program may change adding feed/fetch op. As efficient consideration, the task node will have the C++ object later. """ - assert ((ops is not None) ^ (program is not None)), \ - "Should provide only one of ops or program to task node." - assert (not ((ops is not None) and lazy_initialize)), \ - "Lazy initialization doesn't support with ops list" + assert (ops is not None) ^ ( + program is not None + ), "Should provide only one of ops or program to task node." + assert not ( + (ops is not None) and lazy_initialize + ), "Lazy initialization doesn't support with ops list" self.id = int(task_id) self.rank = rank self.max_run_times = max_run_times @@ -61,20 +65,28 @@ class TaskNode: self.downstreams = [] if not lazy_initialize: if ops is not None: - assert role is not None and task_id is not None, \ - "If init task node with ops, should provide `role` and `task_id`." - self.node = core.TaskNode(role, ops, rank, task_id, - max_run_times, max_slot_times) + assert ( + role is not None and task_id is not None + ), "If init task node with ops, should provide `role` and `task_id`." + self.node = core.TaskNode( + role, ops, rank, task_id, max_run_times, max_slot_times + ) else: - self.node = core.TaskNode(program.desc, rank, self.id, - max_run_times, max_slot_times) + self.node = core.TaskNode( + program.desc, rank, self.id, max_run_times, max_slot_times + ) if self.node_type: self.node.set_type(self.node_type) def task_node(self): if self.lazy_initialize: - self.node = core.TaskNode(self.program.desc, self.rank, self.id, - self.max_run_times, self.max_slot_times) + self.node = core.TaskNode( + self.program.desc, + self.rank, + self.id, + self.max_run_times, + self.max_slot_times, + ) if self.node_type: self.node.set_type(self.node_type) if self.run_pre_steps: @@ -89,12 +101,15 @@ class TaskNode: return self.node def set_program(self, program): - assert self.lazy_initialize, \ - "Inside program is unchangable for immediate initialized task node. Set the lazy_initialize to be true if the inside program need to be update. Remember to do all your change before eval node.task_node()." + assert ( + self.lazy_initialize + ), "Inside program is unchangable for immediate initialized task node. Set the lazy_initialize to be true if the inside program need to be update. Remember to do all your change before eval node.task_node()." self.program = program def get_program(self): - assert self.program is not None, "The task node is not initialized using program" + assert ( + self.program is not None + ), "The task node is not initialized using program" return self.program def set_run_pre_steps(self, steps): @@ -142,10 +157,16 @@ class CoordSys: :param coord: The coord to be tested :return: False if valid, True if invalid. """ - return coord['mp_idx'] < 0 or coord['mp_idx'] >= self.mp_degree or \ - coord['sharding_idx'] < 0 or coord['sharding_idx'] >= self.sharding_degree or \ - coord['pp_idx'] < 0 or coord['pp_idx'] >= self.pp_degree or \ - coord['dp_idx'] < 0 or coord['dp_idx'] >= self.dp_degree + return ( + coord['mp_idx'] < 0 + or coord['mp_idx'] >= self.mp_degree + or coord['sharding_idx'] < 0 + or coord['sharding_idx'] >= self.sharding_degree + or coord['pp_idx'] < 0 + or coord['pp_idx'] >= self.pp_degree + or coord['dp_idx'] < 0 + or coord['dp_idx'] >= self.dp_degree + ) def coord_to_rank(self, coord): """ @@ -155,9 +176,15 @@ class CoordSys: """ if self._invalide_coord(coord): return -1 - return int(coord['dp_idx'] * self.pp_degree * self.sharding_degree * self.mp_degree + \ - coord['pp_idx'] * self.sharding_degree * self.mp_degree + \ - coord['sharding_idx'] * self.mp_degree + coord['mp_idx']) + return int( + coord['dp_idx'] + * self.pp_degree + * self.sharding_degree + * self.mp_degree + + coord['pp_idx'] * self.sharding_degree * self.mp_degree + + coord['sharding_idx'] * self.mp_degree + + coord['mp_idx'] + ) def rank_to_coord(self, rank): """ @@ -176,17 +203,14 @@ class CoordSys: 'mp_idx': int(mp_idx), 'sharding_idx': int(sharding_idx), 'pp_idx': int(pp_idx), - 'dp_idx': int(dp_idx) + 'dp_idx': int(dp_idx), } class FleetExecutorUtils: - - def __init__(self, - dist_strategy=None, - rank=None, - nrank=None, - max_run_times=None): + def __init__( + self, dist_strategy=None, rank=None, nrank=None, max_run_times=None + ): self.dist_strategy = dist_strategy self.rank = rank self.nrank = nrank @@ -206,12 +230,14 @@ class FleetExecutorUtils: return op_role == int(OpRole.Optimize.LRSched) def is_forward_op(self, op_role): - return (op_role == int(OpRole.Forward)) or \ - (op_role == (int(OpRole.Forward) | int(OpRole.Loss))) + return (op_role == int(OpRole.Forward)) or ( + op_role == (int(OpRole.Forward) | int(OpRole.Loss)) + ) def is_backward_op(self, op_role): - return (op_role == int(OpRole.Backward)) or \ - (op_role == (int(OpRole.Backward) | int(OpRole.Loss))) + return (op_role == int(OpRole.Backward)) or ( + op_role == (int(OpRole.Backward) | int(OpRole.Loss)) + ) def split_program_to_op_list(self, program): op_list_map = {"lr": [], "fwd": [], "bwd": [], "opt": []} @@ -233,17 +259,19 @@ class FleetExecutorUtils: return op_list_map def convert_op_list_to_program(self, op_list, complete_program): - #TODO(liyurui): Complete this convert logic + # TODO(liyurui): Complete this convert logic program_map = { "lr": Program(), "fwd": Program(), "bwd": Program(), - "opt": Program() + "opt": Program(), } return program_map def build_1f1b_dependency(self, task_node_map): - assert not self.is_auto_parallel, "Handly add dependency should not be invoked in auto parallel mode" + assert ( + not self.is_auto_parallel + ), "Handly add dependency should not be invoked in auto parallel mode" # Generated the dependency based on this graph: # lr(1:m) -> forward -> backward -> (m:1)optimize # ↑ ↓ @@ -253,8 +281,9 @@ class FleetExecutorUtils: # add dependency intra stage cur_start_id = self.rank * self.num_of_functionality - pp_buff_size = int(self.dist_strategy['pp_degree'] - - self.coord['pp_idx']) + pp_buff_size = int( + self.dist_strategy['pp_degree'] - self.coord['pp_idx'] + ) task_node_map["lr"].add_downstream_task(cur_start_id + 1) task_node_map["fwd"].add_upstream_task(cur_start_id) task_node_map["fwd"].add_downstream_task(cur_start_id + 2, pp_buff_size) @@ -267,8 +296,8 @@ class FleetExecutorUtils: downstream_coord['pp_idx'] = downstream_coord['pp_idx'] + 1 pp_upstream = self.coord_sys.coord_to_rank(upstream_coord) pp_downstream = self.coord_sys.coord_to_rank(downstream_coord) - first_stage = (pp_upstream == -1) - last_stage = (pp_downstream == -1) + first_stage = pp_upstream == -1 + last_stage = pp_downstream == -1 prev_pp_start_id = pp_upstream * self.num_of_functionality next_pp_start_id = pp_downstream * self.num_of_functionality if not first_stage: @@ -282,31 +311,39 @@ class FleetExecutorUtils: def construct_task_nodes_1f1b(self, program_map): max_slot_times = int(self.max_run_times - self.coord['pp_idx']) cur_start_id = int(self.rank * self.num_of_functionality) - lr_task_node = TaskNode(rank=self.rank, - max_run_times=self.max_run_times, - max_slot_times=max_slot_times, - program=program_map["lr"], - task_id=cur_start_id) - fwd_task_node = TaskNode(rank=self.rank, - max_run_times=self.max_run_times, - max_slot_times=max_slot_times, - program=program_map["fwd"], - task_id=cur_start_id + 1) - bwd_task_node = TaskNode(rank=self.rank, - max_run_times=self.max_run_times, - max_slot_times=max_slot_times, - program=program_map["bwd"], - task_id=cur_start_id + 2) - opt_task_node = TaskNode(rank=self.rank, - max_run_times=self.max_run_times, - max_slot_times=max_slot_times, - program=program_map["opt"], - task_id=cur_start_id + 3) + lr_task_node = TaskNode( + rank=self.rank, + max_run_times=self.max_run_times, + max_slot_times=max_slot_times, + program=program_map["lr"], + task_id=cur_start_id, + ) + fwd_task_node = TaskNode( + rank=self.rank, + max_run_times=self.max_run_times, + max_slot_times=max_slot_times, + program=program_map["fwd"], + task_id=cur_start_id + 1, + ) + bwd_task_node = TaskNode( + rank=self.rank, + max_run_times=self.max_run_times, + max_slot_times=max_slot_times, + program=program_map["bwd"], + task_id=cur_start_id + 2, + ) + opt_task_node = TaskNode( + rank=self.rank, + max_run_times=self.max_run_times, + max_slot_times=max_slot_times, + program=program_map["opt"], + task_id=cur_start_id + 3, + ) return { "lr": lr_task_node, "fwd": fwd_task_node, "bwd": bwd_task_node, - "opt": opt_task_node + "opt": opt_task_node, } def task_id_to_rank(self): @@ -319,51 +356,61 @@ class FleetExecutorUtils: def construct_task_nodes_1f1b_op_list(self, op_list_map): max_slot_times = int(self.max_run_times - self.coord['pp_idx']) cur_start_id = int(self.rank * self.num_of_functionality) - lr_task_node = TaskNode(rank=self.rank, - max_run_times=self.max_run_times, - max_slot_times=max_slot_times, - role=int(OpRole.Optimize.LRSched), - ops=op_list_map["lr"], - task_id=cur_start_id, - node_type="Amplifier") + lr_task_node = TaskNode( + rank=self.rank, + max_run_times=self.max_run_times, + max_slot_times=max_slot_times, + role=int(OpRole.Optimize.LRSched), + ops=op_list_map["lr"], + task_id=cur_start_id, + node_type="Amplifier", + ) lr_task_node.set_run_pre_steps(self.max_run_times) - fwd_task_node = TaskNode(rank=self.rank, - max_run_times=self.max_run_times, - max_slot_times=max_slot_times, - role=int(OpRole.Forward), - ops=op_list_map["fwd"], - task_id=cur_start_id + 1, - node_type="Compute") - bwd_task_node = TaskNode(rank=self.rank, - max_run_times=self.max_run_times, - max_slot_times=max_slot_times, - role=int(OpRole.Backward), - ops=op_list_map["bwd"], - task_id=cur_start_id + 2, - node_type="Compute") - opt_task_node = TaskNode(rank=self.rank, - max_run_times=self.max_run_times, - max_slot_times=max_slot_times, - role=int(OpRole.Optimize), - ops=op_list_map["opt"], - task_id=cur_start_id + 3, - node_type="Amplifier") + fwd_task_node = TaskNode( + rank=self.rank, + max_run_times=self.max_run_times, + max_slot_times=max_slot_times, + role=int(OpRole.Forward), + ops=op_list_map["fwd"], + task_id=cur_start_id + 1, + node_type="Compute", + ) + bwd_task_node = TaskNode( + rank=self.rank, + max_run_times=self.max_run_times, + max_slot_times=max_slot_times, + role=int(OpRole.Backward), + ops=op_list_map["bwd"], + task_id=cur_start_id + 2, + node_type="Compute", + ) + opt_task_node = TaskNode( + rank=self.rank, + max_run_times=self.max_run_times, + max_slot_times=max_slot_times, + role=int(OpRole.Optimize), + ops=op_list_map["opt"], + task_id=cur_start_id + 3, + node_type="Amplifier", + ) opt_task_node.set_run_pre_steps(self.max_run_times) opt_task_node.set_run_at_offset(self.max_run_times - 1) return { "lr": lr_task_node, "fwd": fwd_task_node, "bwd": bwd_task_node, - "opt": opt_task_node + "opt": opt_task_node, } -def run1f1b(program, - rank, - max_run_times, - dist_opt, - nrank, - with_standalone_executor=False): +def run1f1b( + program, + rank, + max_run_times, + dist_opt, + nrank, + with_standalone_executor=False, +): """ Split the program to support 1f1b pipeline scheduler. This funct will split the program based on the op_role. @@ -380,24 +427,29 @@ def run1f1b(program, task_id_to_rank (dict): task nodes' ids to it's corresponding rank """ print("fleet executor will use python side 1f1b scheduler.") - fleet_executor_utils = FleetExecutorUtils(dist_strategy=dist_opt, - rank=rank, - nrank=nrank, - max_run_times=max_run_times) + fleet_executor_utils = FleetExecutorUtils( + dist_strategy=dist_opt, + rank=rank, + nrank=nrank, + max_run_times=max_run_times, + ) op_list_map = fleet_executor_utils.split_program_to_op_list(program) task_node_map = None if with_standalone_executor: program_map = fleet_executor_utils.convert_op_list_to_program( - op_list_map, program) + op_list_map, program + ) task_node_map = fleet_executor_utils.construct_task_nodes_1f1b( - program_map) + program_map + ) else: op_desc_list_map = {"lr": [], "fwd": [], "bwd": [], "opt": []} for key in op_list_map: for op in op_list_map[key]: op_desc_list_map[key].append(op.desc) task_node_map = fleet_executor_utils.construct_task_nodes_1f1b_op_list( - op_desc_list_map) + op_desc_list_map + ) task_node_map = fleet_executor_utils.build_1f1b_dependency(task_node_map) task_id_to_rank = fleet_executor_utils.task_id_to_rank() task_node_list = [task_node_map[key].task_node() for key in task_node_map] @@ -414,10 +466,12 @@ def origin(program, rank): task_id_to_rank (dict): a fake dict, since there is no upstream or downstream, this dict won't be used """ print("fleet executor will use python side origin scheduler.") - task_node = TaskNode(program=program, - rank=rank, - node_type="Compute", - max_run_times=1, - max_slot_times=1) + task_node = TaskNode( + program=program, + rank=rank, + node_type="Compute", + max_run_times=1, + max_slot_times=1, + ) task_id_to_rank = {task_node.task_id(): rank} return [task_node.task_node()], task_id_to_rank diff --git a/python/paddle/distributed/fleet/launch.py b/python/paddle/distributed/fleet/launch.py index a50c08cbaf96a60ac4e95822f433a05fdf4183e0..ca301c52a7f252a0e300845a03950433980935a3 100755 --- a/python/paddle/distributed/fleet/launch.py +++ b/python/paddle/distributed/fleet/launch.py @@ -65,10 +65,21 @@ from argparse import ArgumentParser, REMAINDER import paddle.fluid as fluid from paddle.distributed.fleet import launch_utils from paddle.distributed.fleet.launch_utils import ( - get_host_name_ip, find_free_ports, logger, get_cluster, DeviceMode, - start_local_trainers, direct_start, watch_local_trainers, - terminate_local_procs, DistributeMode, ParameterServerLauncher, get_logger, - check_backend, block_windows_and_macos) + get_host_name_ip, + find_free_ports, + logger, + get_cluster, + DeviceMode, + start_local_trainers, + direct_start, + watch_local_trainers, + terminate_local_procs, + DistributeMode, + ParameterServerLauncher, + get_logger, + check_backend, + block_windows_and_macos, +) from paddle.distributed.fleet import cloud_utils from paddle.distributed.fleet import ascend_utils @@ -92,33 +103,38 @@ def _parse_args(): parser = ArgumentParser( description='''start paddle training using multi-process mode. see: http://www.paddlepaddle.org/documentation/docs/zh/1.6/user_guides/howto/training/cluster_howto.html#permalink-8--nccl2- -''') +''' + ) base_group = parser.add_argument_group("Base Parameters") base_group.add_argument( "--log_dir", type=str, default="log", - help="The path for each process's log. Default --log_dir=log/") + help="The path for each process's log. Default --log_dir=log/", + ) base_group.add_argument( "--backend", type=str, default=os.environ.get('PADDLE_DISTRI_BACKEND', 'auto'), help="Specifize the backend, can be gloo|nccl|bkcl|auto|hccl|heter. " - "Default value is auto which perfers nccl or bkcl.") + "Default value is auto which perfers nccl or bkcl.", + ) base_group.add_argument( "--nproc_per_node", type=int, default=None, help="The number of processes to launch on a node." "In gpu training, it should be less or equal to the gpus number of you system(or you set by --gpus). And so each process can" - " bound to one or average number of gpus.") + " bound to one or average number of gpus.", + ) base_group.add_argument( "--run_mode", type=str, default=None, - help="run mode of job, can be:collective/ps/ps-heter") + help="run mode of job, can be:collective/ps/ps-heter", + ) if fluid.core.is_compiled_with_cuda(): base_group.add_argument( @@ -127,7 +143,7 @@ see: http://www.paddlepaddle.org/documentation/docs/zh/1.6/user_guides/howto/tra default=None, help="It's for gpu training." "For example:" - "--gpus=\"0,1,2,3\" will launch four training processes each bound to one gpu." + "--gpus=\"0,1,2,3\" will launch four training processes each bound to one gpu.", ) base_group.add_argument("--selected_gpus", dest="gpus") @@ -137,7 +153,7 @@ see: http://www.paddlepaddle.org/documentation/docs/zh/1.6/user_guides/howto/tra type=str, default=None, help="It's for xpu training. For example: " - "--xpus=\"0,1,2,3\" will launch four training processes each bound to one xpu." + "--xpus=\"0,1,2,3\" will launch four training processes each bound to one xpu.", ) base_group.add_argument("--selected_xpus", dest="xpus") @@ -147,7 +163,7 @@ see: http://www.paddlepaddle.org/documentation/docs/zh/1.6/user_guides/howto/tra type=str, default=None, help="It's for xpu training. For example: " - "--npus=\"0,1,2,3\" will launch four training processes each bound to one npu." + "--npus=\"0,1,2,3\" will launch four training processes each bound to one npu.", ) base_group.add_argument("--selected_npus", dest="npus") @@ -157,16 +173,18 @@ see: http://www.paddlepaddle.org/documentation/docs/zh/1.6/user_guides/howto/tra type=str, default=None, help="It's for mlu training. For example: " - "--mlus=\"0,1,2,3\" will launch four training processes each bound to one mlu." + "--mlus=\"0,1,2,3\" will launch four training processes each bound to one mlu.", ) base_group.add_argument("--selected_mlus", dest="mlus") - base_group.add_argument("training_script", - type=str, - help="The full path to the single GPU training " - "program/script to be launched in parallel, " - "followed by all the arguments for the " - "training script") + base_group.add_argument( + "training_script", + type=str, + help="The full path to the single GPU training " + "program/script to be launched in parallel, " + "followed by all the arguments for the " + "training script", + ) base_group.add_argument('training_script_args', nargs=REMAINDER) @@ -177,79 +195,86 @@ see: http://www.paddlepaddle.org/documentation/docs/zh/1.6/user_guides/howto/tra "--ips", type=str, default="127.0.0.1", - help="Paddle cluster nodes ips, such as 192.168.0.16,192.168.0.17..") + help="Paddle cluster nodes ips, such as 192.168.0.16,192.168.0.17..", + ) collective_group.add_argument( "--cluster_topo_path", type=str, default=None, help="A json format file will be stored in this path which is used" - "to represent the cluster topology information for auto parallel.") + "to represent the cluster topology information for auto parallel.", + ) collective_group.add_argument( "--rank_mapping_path", type=str, default=None, help="A json format file will be stored in this path which is used" - "to map processes to machines for auto parallel.") + "to map processes to machines for auto parallel.", + ) collective_group.add_argument( "--enable_auto_mapping", type=bool, default=False, - help="Set true to enable the lazy launch for auto-parallel scenario.") + help="Set true to enable the lazy launch for auto-parallel scenario.", + ) ps_group = parser.add_argument_group("Parameter-Server Parameters") # for parameter server - ps_group.add_argument("--servers", - type=str, - default="", - help="User defined servers ip:port") - ps_group.add_argument("--workers", - type=str, - default="", - help="User defined workers ip:port") - ps_group.add_argument("--coordinators", - type=str, - default="", - help="User defined coordinators ip:port") + ps_group.add_argument( + "--servers", type=str, default="", help="User defined servers ip:port" + ) + ps_group.add_argument( + "--workers", type=str, default="", help="User defined workers ip:port" + ) + ps_group.add_argument( + "--coordinators", + type=str, + default="", + help="User defined coordinators ip:port", + ) ps_group.add_argument( "--heter_workers", type=str, default="", - help="User defined heter workers in each stage ip1:port1;ip2:port2") + help="User defined heter workers in each stage ip1:port1;ip2:port2", + ) ps_group.add_argument( "--heter_devices", type=str, default="", - help="User defined heter devices in each stage cpu;gpu;cpu") + help="User defined heter devices in each stage cpu;gpu;cpu", + ) ps_group.add_argument("--worker_num", type=int, help="number of workers") - ps_group.add_argument("--coordinator_num", - type=int, - help="number of coordinators") + ps_group.add_argument( + "--coordinator_num", type=int, help="number of coordinators" + ) ps_group.add_argument("--server_num", type=int, help="number of servers") - ps_group.add_argument("--heter_worker_num", - type=str, - help="number of heter_workers in each stage 1;2;3") + ps_group.add_argument( + "--heter_worker_num", + type=str, + help="number of heter_workers in each stage 1;2;3", + ) ps_group.add_argument("--http_port", type=int, help="Gloo http Port") # parameter elastic mode elastic_group = parser.add_argument_group("Elastic Parameters") - elastic_group.add_argument("--elastic_server", - type=str, - help="etcd server host:port") - elastic_group.add_argument("--elastic_pre_hook", - type=str, - help="elastic pre_hook shell cmd") + elastic_group.add_argument( + "--elastic_server", type=str, help="etcd server host:port" + ) + elastic_group.add_argument( + "--elastic_pre_hook", type=str, help="elastic pre_hook shell cmd" + ) elastic_group.add_argument("--job_id", type=str, help="job unique id") elastic_group.add_argument("--np", type=int, help="job pod/node number") elastic_group.add_argument("--scale", type=int, default=0, help="scale np") - elastic_group.add_argument("--host", - type=str, - help="bind host, default to POD_IP env") - elastic_group.add_argument("--force", - type=bool, - default=False, - help="update np force") + elastic_group.add_argument( + "--host", type=str, help="bind host, default to POD_IP env" + ) + elastic_group.add_argument( + "--force", type=bool, default=False, help="update np force" + ) known_args, _ = parser.parse_known_args() return known_args @@ -265,16 +290,23 @@ def get_cluster_from_args(args, device_mode, devices_per_proc): else: _, node_ip = get_host_name_ip() - assert node_ip in node_ips, "Can't find your local ip {%s} in node_ips: {%s}" \ - % (node_ip, node_ips) + assert ( + node_ip in node_ips + ), "Can't find your local ip {%s} in node_ips: {%s}" % (node_ip, node_ips) node_rank = node_ips.index(node_ip) - logger.debug("parsed from args: node_ips:{} node_ip:{} node_rank:{}".format( - node_ips, node_ip, node_rank)) + logger.debug( + "parsed from args: node_ips:{} node_ip:{} node_rank:{}".format( + node_ips, node_ip, node_rank + ) + ) free_ports = None - if not cloud_utils.use_paddlecloud() and len( - node_ips) <= 1 and os.environ.get('FLAGS_START_PORT') is None: + if ( + not cloud_utils.use_paddlecloud() + and len(node_ips) <= 1 + and os.environ.get('FLAGS_START_PORT') is None + ): free_ports = find_free_ports(len(devices_per_proc)) if free_ports is not None: free_ports = list(free_ports) @@ -291,17 +323,21 @@ def get_cluster_from_args(args, device_mode, devices_per_proc): trainer_endpoints = [] for ip in node_ips: trainer_endpoints.append(["%s:%d" % (ip, port) for port in free_ports]) - return get_cluster(node_ips, node_ip, trainer_endpoints, device_mode, - devices_per_proc) + return get_cluster( + node_ips, node_ip, trainer_endpoints, device_mode, devices_per_proc + ) def cpuonly_check(args): if args.ips and len(args.ips.split(',')) > 1: raise RuntimeError( "CPUONLY launch only support single trainer, that is len(ips)=1, but got %s." - % args.ips) + % args.ips + ) if args.run_mode: - assert args.run_mode == 'cpuonly', "CPUONLY launch only support run mode is CPUONLY" + assert ( + args.run_mode == 'cpuonly' + ), "CPUONLY launch only support run mode is CPUONLY" if args.servers: raise RuntimeError("CPUONLY launch can't have --servers as arguments.") return True @@ -309,15 +345,20 @@ def cpuonly_check(args): def get_cluster_info(args): # parse arguments, used for cloud-single-machine and local - if args.backend == 'gloo': cpuonly_check(args) + if args.backend == 'gloo': + cpuonly_check(args) if args.enable_auto_mapping: (device_mode, devices_per_proc) = (DeviceMode.GPU, []) else: - (device_mode, - devices_per_proc) = launch_utils.get_device_proc_info(args) + (device_mode, devices_per_proc) = launch_utils.get_device_proc_info( + args + ) trainers_num = cloud_utils.get_trainers_num() - logger.debug("parsed from args trainerss_num:{} mode:{} devices:{}".format( - trainers_num, device_mode, devices_per_proc)) + logger.debug( + "parsed from args trainerss_num:{} mode:{} devices:{}".format( + trainers_num, device_mode, devices_per_proc + ) + ) cuda_visible_devices = os.getenv("CUDA_VISIBLE_DEVICES") @@ -329,52 +370,69 @@ def get_cluster_info(args): start_port = os.environ.get('FLAGS_START_PORT') # auto mapping between processes and devices for auto-parallel if args.enable_auto_mapping == True: - assert args.cluster_topo_path is not None, \ - "The cluster topology must be provied when enabling auto mapping." + assert ( + args.cluster_topo_path is not None + ), "The cluster topology must be provied when enabling auto mapping." rank_mapping_path = args.rank_mapping_path or os.getenv( - "PADDLE_RANK_MAPPING_PATH") + "PADDLE_RANK_MAPPING_PATH" + ) if not rank_mapping_path: os.environ["PADDLE_NEED_RANK_MAPPING"] = str(True) os.environ["PADDLE_ENABLE_ELASTIC"] = str( - enable_elastic(args, device_mode)) + enable_elastic(args, device_mode) + ) cwd = pathlib.Path().resolve() - rank_mapping_path = os.path.join(cwd, - "auto_parallel_rank_mapping.json") + rank_mapping_path = os.path.join( + cwd, "auto_parallel_rank_mapping.json" + ) os.environ["PADDLE_RANK_MAPPING_PATH"] = str(rank_mapping_path) original_args = sys.argv[1:] os.environ["PADDLE_ORIGINAL_CMD_ARGS"] = " ".join(original_args) os.environ["PADDLE_CLUSTER_TOPO_PATH"] = str(args.cluster_topo_path) os.environ["PADDLE_ENABLE_AUTO_MAPPING"] = str( - args.enable_auto_mapping) - cluster, pod = launch_utils.get_mapped_cluster_from_args_without_rank_mapping( - args, device_mode) + args.enable_auto_mapping + ) + ( + cluster, + pod, + ) = launch_utils.get_mapped_cluster_from_args_without_rank_mapping( + args, device_mode + ) else: os.environ["PADDLE_NEED_RANK_MAPPING"] = str(False) os.environ["PADDLE_ENABLE_ELASTIC"] = str( - enable_elastic(args, device_mode)) + enable_elastic(args, device_mode) + ) os.environ["PADDLE_CLUSTER_TOPO_PATH"] = str(args.cluster_topo_path) os.environ["PADDLE_RANK_MAPPING_PATH"] = str(rank_mapping_path) os.environ["PADDLE_ENABLE_AUTO_MAPPING"] = str( - args.enable_auto_mapping) - cluster, pod = launch_utils.get_mapped_cluster_from_args_with_rank_mapping( - args, device_mode) + args.enable_auto_mapping + ) + ( + cluster, + pod, + ) = launch_utils.get_mapped_cluster_from_args_with_rank_mapping( + args, device_mode + ) elif cloud_utils.use_paddlecloud() and trainers_num != 1: - cluster, pod = cloud_utils.get_cloud_cluster(args.ips, device_mode, - devices_per_proc, - start_port) + cluster, pod = cloud_utils.get_cloud_cluster( + args.ips, device_mode, devices_per_proc, start_port + ) logger.debug("get cluster from cloud:{}".format(cluster)) elif device_mode == DeviceMode.ASCEND_NPU: # for ascend - cluster, pod = ascend_utils.get_cloud_cluster(rank_table_file=os.getenv( - "RANK_TABLE_FILE", None), - device_mode=device_mode, - start_port=start_port) + cluster, pod = ascend_utils.get_cloud_cluster( + rank_table_file=os.getenv("RANK_TABLE_FILE", None), + device_mode=device_mode, + start_port=start_port, + ) else: # trainers_num = 1 or not use paddlecloud ips="a,b" - cluster, pod = get_cluster_from_args(args, device_mode, - devices_per_proc) + cluster, pod = get_cluster_from_args( + args, device_mode, devices_per_proc + ) logger.debug("get cluster from args:{}".format(cluster)) return cluster, pod @@ -394,12 +452,14 @@ def launch_collective(args): cluster, pod = get_cluster_info(args) global_envs = get_global_envs(args, tmp_dir) - procs = start_local_trainers(cluster, - pod, - training_script=args.training_script, - training_script_args=args.training_script_args, - log_dir=args.log_dir, - envs=global_envs) + procs = start_local_trainers( + cluster, + pod, + training_script=args.training_script, + training_script_args=args.training_script_args, + log_dir=args.log_dir, + envs=global_envs, + ) for idx, proc in enumerate(procs): print("launch proc_id:{} idx:{}".format(proc.proc.pid, idx)) @@ -431,7 +491,7 @@ def launch_ps(args, distribute_mode): if cloud_flag and distribute_mode == DistributeMode.PS: direct_start(args) return - #elif cloud_flag and distribute_mode == DistributeMode.PS_HETER: + # elif cloud_flag and distribute_mode == DistributeMode.PS_HETER: # cloud_ps_heter_env_set(args) # args.workers = os.getenv("PADDLE_TRAINER_ENDPOINTS") # args.servers = os.getenv("PADDLE_PSERVERS_IP_PORT_LIST") @@ -443,7 +503,8 @@ def launch_ps(args, distribute_mode): def infer_backend(args): - if args.backend != "auto": return + if args.backend != "auto": + return if fluid.core.is_compiled_with_cuda(): args.backend = 'nccl' elif fluid.core.is_compiled_with_npu(): @@ -469,8 +530,14 @@ def which_distributed_mode(args): return DistributeMode.PS_HETER ps_args = [ - '--worker_num', '--server_num', '--heter_worker_num', '--servers', - '--workers', '--heter_workers', '--heter_devices', '--http_port' + '--worker_num', + '--server_num', + '--heter_worker_num', + '--servers', + '--workers', + '--heter_workers', + '--heter_devices', + '--http_port', ] collective_args = ['--ips'] @@ -482,7 +549,8 @@ def which_distributed_mode(args): ps_arg for ps_arg in ps_args if ps_arg in " ".join(sys.argv[1:-1]) ] has_collective_args = [ - co_arg for co_arg in collective_args + co_arg + for co_arg in collective_args if co_arg in " ".join(sys.argv[1:-1]) ] @@ -504,8 +572,10 @@ def which_distributed_mode(args): if len(has_ps_args) > 0: logger.info( - "Run parameter-sever mode. pserver arguments:{}, accelerators count:{}" - .format(has_ps_args, accelerators)) + "Run parameter-sever mode. pserver arguments:{}, accelerators count:{}".format( + has_ps_args, accelerators + ) + ) has_ps_heter_args = list(set(has_ps_args) & set(ps_heter_args)) has_coordinator_args = list(set(has_ps_args) & set(coordinator_args)) if len(has_ps_heter_args) > 0: @@ -515,23 +585,29 @@ def which_distributed_mode(args): elif len(has_collective_args) > 0: logger.info( "Run collective mode. gpu arguments:{}, cuda count:{}".format( - has_collective_args, accelerators)) + has_collective_args, accelerators + ) + ) return DistributeMode.COLLECTIVE else: - if not fluid.core.is_compiled_with_cuda( - ) and not fluid.core.is_compiled_with_xpu( - ) and not fluid.core.is_compiled_with_mlu(): + if ( + not fluid.core.is_compiled_with_cuda() + and not fluid.core.is_compiled_with_xpu() + and not fluid.core.is_compiled_with_mlu() + ): if args.servers: logger.warning( "Not found distinct arguments and not compiled with cuda or xpu or npu or mlu. " - "But found args.servers not empty, default use ps mode") + "But found args.servers not empty, default use ps mode" + ) return DistributeMode.PS else: return DistributeMode.COLLECTIVE else: logger.warning( "Not found distinct arguments and compiled with cuda or xpu or npu or mlu. " - "Default use collective mode") + "Default use collective mode" + ) return DistributeMode.COLLECTIVE @@ -716,19 +792,23 @@ def launch(): if args.backend == 'auto': distribute_mode = which_distributed_mode( - args) # which_distributed_mode must modify args.backend + args + ) # which_distributed_mode must modify args.backend else: - assert args.run_mode == 'collective' or args.run_mode == None, "When backend is not 'auto', run mode must be collective" + assert ( + args.run_mode == 'collective' or args.run_mode == None + ), "When backend is not 'auto', run mode must be collective" check_backend(args.backend) distribute_mode = DistributeMode.COLLECTIVE - #assert args.backend in ['gloo', 'nccl', 'bkcl', 'cncl', 'heter', 'unknown'] + # assert args.backend in ['gloo', 'nccl', 'bkcl', 'cncl', 'heter', 'unknown'] if args.backend == 'gloo': logger.warning("launch start with CPUONLY mode") block_windows_and_macos( - args.backend) # raise error when using gloo on windows or macos + args.backend + ) # raise error when using gloo on windows or macos if enable_elastic(args, distribute_mode): launch_elastic(args, distribute_mode) diff --git a/python/paddle/distributed/fleet/launch_utils.py b/python/paddle/distributed/fleet/launch_utils.py index d29741cbef605ed5a95963acde137ad0c36dc7c5..4ec2aa07787cb470aedc282fe208e9c728846aa1 100755 --- a/python/paddle/distributed/fleet/launch_utils.py +++ b/python/paddle/distributed/fleet/launch_utils.py @@ -35,19 +35,21 @@ logger = logging.getLogger("root") logger.propagate = False -class DistributeMode(): +class DistributeMode: """ There are various mode for fleetrun, each of them is designed for different model. """ + COLLECTIVE = 0 PS = 1 PS_HETER = 2 -class DeviceMode(): +class DeviceMode: """ Training devices type """ + UNKNOWN = -1 CPU = 0 GPU = 1 @@ -59,7 +61,6 @@ class DeviceMode(): class Cluster(object): - def __init__(self, hdfs): self.job_server = None self.pods = [] @@ -68,8 +69,11 @@ class Cluster(object): def __str__(self): return "job_server:{} pods:{} job_stage_flag:{} hdfs:{}".format( - self.job_server, [str(pod) for pod in self.pods], - self.job_stage_flag, self.hdfs) + self.job_server, + [str(pod) for pod in self.pods], + self.job_stage_flag, + self.hdfs, + ) def __eq__(self, cluster): if len(self.pods) != len(cluster.pods): @@ -115,8 +119,9 @@ class Cluster(object): r = [] for pod in self.pods: ep = "{}:{}".format(pod.addr, pod.port) - assert pod.port != None and pod.addr != None, "{} not a valid endpoint".format( - ep) + assert ( + pod.port != None and pod.addr != None + ), "{} not a valid endpoint".format(ep) r.append(ep) return r @@ -129,7 +134,6 @@ class Cluster(object): class JobServer(object): - def __init__(self): self.endpoint = None @@ -144,7 +148,6 @@ class JobServer(object): class Trainer(object): - def __init__(self): self.accelerators = [] self.endpoint = None @@ -153,14 +156,14 @@ class Trainer(object): def __str__(self): return "accelerator:{} endpoint:{} rank:{}".format( - self.accelerators, self.endpoint, self.rank) + self.accelerators, self.endpoint, self.rank + ) def __eq__(self, t): if len(self.accelerators) != len(t.accelerators): return False - if self.endpoint != t.endpoint or \ - self.rank != t.rank: + if self.endpoint != t.endpoint or self.rank != t.rank: return False for a, b in zip(self.accelerators, t.accelerators): @@ -177,7 +180,6 @@ class Trainer(object): class Pod(object): - def __init__(self): self.rank = None self.id = None @@ -194,29 +196,39 @@ class Pod(object): def __str__(self): return "rank:{} id:{} addr:{} port:{} visible_accelerator:{} trainers:{} servers:{} \ workers:{} heter_workers:{} coordinators:{}".format( - self.rank, self.id, self.addr, self.port, self.accelerators, - [str(t) for t in self.trainers], [str(s) for s in self.servers], - [str(w) - for w in self.workers], [str(h) for h in self.heter_workers], - [str(c) for c in self.coordinators]) + self.rank, + self.id, + self.addr, + self.port, + self.accelerators, + [str(t) for t in self.trainers], + [str(s) for s in self.servers], + [str(w) for w in self.workers], + [str(h) for h in self.heter_workers], + [str(c) for c in self.coordinators], + ) def __eq__(self, pod): - if self.rank != pod.rank or \ - self.id != pod.id or \ - self.addr != pod.addr or \ - self.port != pod.port: + if ( + self.rank != pod.rank + or self.id != pod.id + or self.addr != pod.addr + or self.port != pod.port + ): logger.debug("pod {} != {}".format(self, pod)) return False if len(self.trainers) != len(pod.trainers): - logger.debug("trainers {} != {}".format(self.trainers, - pod.trainers)) + logger.debug( + "trainers {} != {}".format(self.trainers, pod.trainers) + ) return False for i in range(len(self.trainers)): if self.trainers[i] != pod.trainers[i]: - logger.debug("trainer {} != {}".format(self.trainers[i], - pod.trainers[i])) + logger.debug( + "trainer {} != {}".format(self.trainers[i], pod.trainers[i]) + ) return False if len(self.servers) != len(pod.servers): @@ -225,8 +237,9 @@ class Pod(object): for i in range(len(self.servers)): if self.servers[i] != pod.servers[i]: - logger.debug("servers {} != {}".format(self.servers[i], - pod.servers[i])) + logger.debug( + "servers {} != {}".format(self.servers[i], pod.servers[i]) + ) return False if len(self.workers) != len(pod.workers): @@ -235,8 +248,9 @@ class Pod(object): for i in range(len(self.workers)): if self.workers[i] != pod.workers[i]: - logger.debug("workers {} != {}".format(self.workers[i], - pod.workers[i])) + logger.debug( + "workers {} != {}".format(self.workers[i], pod.workers[i]) + ) return False return True @@ -267,15 +281,17 @@ def get_logger(log_level=20, name="root"): log_handler = logging.StreamHandler() log_format = logging.Formatter( - '%(levelname)s %(asctime)s %(filename)s:%(lineno)d] %(message)s') + '%(levelname)s %(asctime)s %(filename)s:%(lineno)d] %(message)s' + ) log_handler.setFormatter(log_format) logger.addHandler(log_handler) return logger -def get_cluster(node_ips, node_ip, trainer_endpoints, device_mode, - devices_per_proc): +def get_cluster( + node_ips, node_ip, trainer_endpoints, device_mode, devices_per_proc +): assert type(trainer_endpoints) is list, "trainer_endpoints must be list" cluster = Cluster(hdfs=None) trainer_rank = 0 @@ -292,7 +308,11 @@ def get_cluster(node_ips, node_ip, trainer_endpoints, device_mode, ), "current trainer_endpoints size should be greater equal than acclerators size." for i in range(len(devices_per_proc)): trainer = Trainer() - if device_mode == DeviceMode.GPU or device_mode == DeviceMode.ASCEND_NPU or device_mode == DeviceMode.MLU: + if ( + device_mode == DeviceMode.GPU + or device_mode == DeviceMode.ASCEND_NPU + or device_mode == DeviceMode.MLU + ): if isinstance(devices_per_proc[i], (list, tuple)): trainer.accelerators.extend(devices_per_proc[i]) pod.accelerators.extend(devices_per_proc[i]) @@ -371,21 +391,23 @@ def add_arguments(argname, type, default, help, argparser, **kwargs): args = parser.parse_args() """ type = strtobool if type == bool else type - argparser.add_argument("--" + argname, - default=default, - type=type, - help=help + ' Default: %(default)s.', - **kwargs) + argparser.add_argument( + "--" + argname, + default=default, + type=type, + help=help + ' Default: %(default)s.', + **kwargs + ) def find_free_ports(num): - def __free_port(): with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s: # Note(wangxi): Close the connection with a TCP RST instead # of a TCP FIN, to avoid time_wait state. - s.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, - struct.pack('ii', 1, 0)) + s.setsockopt( + socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', 1, 0) + ) s.bind(('', 0)) return s.getsockname()[1] @@ -429,7 +451,8 @@ def pretty_print_envs(envs, header=None): max_k = max(max_k, len(k)) h_format = " " + "|{{:>{}s}}{}{{:^{}s}}|\n".format( - max_k, " " * spacing, max_v) + max_k, " " * spacing, max_v + ) l_format = " " + "|{{:>{}s}}{{}}{{:^{}s}}|\n".format(max_k, max_v) length = max_k + max_v + spacing @@ -461,7 +484,6 @@ def pretty_print_envs(envs, header=None): class TrainerProc(object): - def __init__(self): self.proc = None self.log_fn = None @@ -483,12 +505,9 @@ def run_with_coverage(*args): return _run_with_coverage -def start_local_trainers(cluster, - pod, - training_script, - training_script_args, - log_dir=None, - envs=None): +def start_local_trainers( + cluster, pod, training_script, training_script_args, log_dir=None, envs=None +): if envs is None: current_env = copy.copy(os.environ.copy()) @@ -507,74 +526,87 @@ def start_local_trainers(cluster, procs = [] for idx, t in enumerate(pod.trainers): proc_env = { - "PADDLE_TRAINER_ID": - "%d" % t.rank, - "PADDLE_CURRENT_ENDPOINT": - "%s" % t.endpoint, - "PADDLE_TRAINERS_NUM": - "%d" % cluster.trainers_nranks(), - "PADDLE_TRAINER_ENDPOINTS": - ",".join(cluster.trainers_endpoints()), - "PADDLE_RANK_IN_NODE": - str(idx), - "PADDLE_LOCAL_DEVICE_IDS": - ",".join([str(acc) for acc in t.accelerators]), - "PADDLE_WORLD_DEVICE_IDS": - ",".join(res), + "PADDLE_TRAINER_ID": "%d" % t.rank, + "PADDLE_CURRENT_ENDPOINT": "%s" % t.endpoint, + "PADDLE_TRAINERS_NUM": "%d" % cluster.trainers_nranks(), + "PADDLE_TRAINER_ENDPOINTS": ",".join(cluster.trainers_endpoints()), + "PADDLE_RANK_IN_NODE": str(idx), + "PADDLE_LOCAL_DEVICE_IDS": ",".join( + [str(acc) for acc in t.accelerators] + ), + "PADDLE_WORLD_DEVICE_IDS": ",".join(res), } # The following three environnement variables are used for auto mapping if current_env.get("PADDLE_CLUSTER_TOPO_PATH", None) is not None: proc_env["PADDLE_CLUSTER_TOPO_PATH"] = current_env[ - "PADDLE_CLUSTER_TOPO_PATH"] + "PADDLE_CLUSTER_TOPO_PATH" + ] if current_env.get("PADDLE_RANK_MAPPING_PATH", None) is not None: proc_env["PADDLE_RANK_MAPPING_PATH"] = current_env[ - "PADDLE_RANK_MAPPING_PATH"] + "PADDLE_RANK_MAPPING_PATH" + ] if current_env.get("PADDLE_ENABLE_AUTO_MAPPING", None) is not None: proc_env["PADDLE_ENABLE_AUTO_MAPPING"] = current_env[ - "PADDLE_ENABLE_AUTO_MAPPING"] + "PADDLE_ENABLE_AUTO_MAPPING" + ] if len(t.accelerators) > 0 and pod.device_mode == DeviceMode.GPU: proc_env["FLAGS_selected_gpus"] = "%s" % ",".join( - [str(g) for g in t.accelerators]) + [str(g) for g in t.accelerators] + ) - elif len(t.accelerators - ) > 0 and pod.device_mode == DeviceMode.ASCEND_NPU: + elif ( + len(t.accelerators) > 0 and pod.device_mode == DeviceMode.ASCEND_NPU + ): proc_env["FLAGS_selected_npus"] = "%s" % ",".join( - [str(g) for g in t.accelerators]) + [str(g) for g in t.accelerators] + ) elif len(t.accelerators) > 0 and pod.device_mode == DeviceMode.MLU: proc_env["FLAGS_selected_mlus"] = "%s" % ",".join( - [str(g) for g in t.accelerators]) + [str(g) for g in t.accelerators] + ) if len(t.accelerators) > 0: proc_env["FLAGS_selected_accelerators"] = "%s" % ",".join( - [str(g) for g in t.accelerators]) + [str(g) for g in t.accelerators] + ) # to do: same code style in future if fluid.core.is_compiled_with_xpu() and len(t.accelerators) > 0: proc_env["FLAGS_selected_xpus"] = "%s" % ",".join( - [str(g) for g in t.accelerators]) + [str(g) for g in t.accelerators] + ) current_env.update(proc_env) coverage_args = [] - if run_with_coverage() or os.environ.get("WITH_COVERAGE", - "OFF") == "ON": + if ( + run_with_coverage() + or os.environ.get("WITH_COVERAGE", "OFF") == "ON" + ): coverage_args = ["-m", "coverage", "run", "--branch", "-p"] - cmd = [sys.executable, "-u"] + coverage_args + [training_script - ] + training_script_args + cmd = ( + [sys.executable, "-u"] + + coverage_args + + [training_script] + + training_script_args + ) logger.debug("start trainer proc{} env:{}".format(cmd, current_env)) if idx == 0: - logger.info("Local start {} processes. First process distributed " - "environment info (Only For Debug): {}".format( - len(pod.trainers), - pretty_print_envs(proc_env, - ("Distributed Envs", "Value")))) + logger.info( + "Local start {} processes. First process distributed " + "environment info (Only For Debug): {}".format( + len(pod.trainers), + pretty_print_envs(proc_env, ("Distributed Envs", "Value")), + ) + ) logger.info( "details about PADDLE_TRAINER_ENDPOINTS can be found in " "{}/endpoints.log, and detail running logs maybe found in " - "{}/workerlog.0".format(log_dir, log_dir)) + "{}/workerlog.0".format(log_dir, log_dir) + ) fn = None pre_fn = None if os.name == 'nt' else os.setsid if log_dir is not None: @@ -584,16 +616,17 @@ def start_local_trainers(cluster, with open("%s/endpoints.log" % log_dir, "w") as f: f.write("PADDLE_TRAINER_ENDPOINTS: \n") f.write("\n".join(cluster.trainers_endpoints())) - if current_env.get("PADDLE_ENABLE_AUTO_MAPPING") is not None \ - and current_env.get("PADDLE_NEED_RANK_MAPPING").lower() == "true": + if ( + current_env.get("PADDLE_ENABLE_AUTO_MAPPING") is not None + and current_env.get("PADDLE_NEED_RANK_MAPPING").lower() + == "true" + ): fn = open("%s/prelaunchlog.%d" % (log_dir, idx), "a") else: fn = open("%s/workerlog.%d" % (log_dir, idx), "a") - proc = subprocess.Popen(cmd, - env=current_env, - stdout=fn, - stderr=fn, - preexec_fn=pre_fn) + proc = subprocess.Popen( + cmd, env=current_env, stdout=fn, stderr=fn, preexec_fn=pre_fn + ) else: proc = subprocess.Popen(cmd, env=current_env, preexec_fn=pre_fn) @@ -620,8 +653,9 @@ def pull_worker_log(tp): except UnicodeEncodeError: sys.stdout.write( 'UnicodeEncodeError occurs at this line. ' - 'Please refer to the original log file "%s"\n' % - tp.log_fn.name) + 'Please refer to the original log file "%s"\n' + % tp.log_fn.name + ) tp.log_offset = fin.tell() @@ -652,14 +686,18 @@ def watch_local_trainers(procs, nranks): return except SystemExit: logger.error( - "ABORT!!! Out of all {} trainers, the trainer process with rank={} was aborted. Please check its log." - .format(nranks, error_rank)) + "ABORT!!! Out of all {} trainers, the trainer process with rank={} was aborted. Please check its log.".format( + nranks, error_rank + ) + ) terminate_local_procs(procs) raise except: logger.error( - "ABORT!!! Out of all {} trainers, the trainer process with rank={} was aborted. Please check its log." - .format(nranks, error_rank)) + "ABORT!!! Out of all {} trainers, the trainer process with rank={} was aborted. Please check its log.".format( + nranks, error_rank + ) + ) terminate_local_procs(procs) return @@ -680,17 +718,22 @@ def get_gpus(gpus): # therefore gpus=0,1,2,3 cuda_visible_devices_list = cuda_visible_devices.split(',') for x in gpus.split(','): - assert x in cuda_visible_devices_list, "Can't find "\ - "your gpus %s in CUDA_VISIBLE_DEVICES[%s]."\ + assert x in cuda_visible_devices_list, ( + "Can't find " + "your gpus %s in CUDA_VISIBLE_DEVICES[%s]." % (x, cuda_visible_devices) + ) res_gpus = [ cuda_visible_devices_list.index(x.strip()) for x in gpus.split(',') ] - logger.info("Change selected_gpus into reletive values. --ips:{} " - "will change into relative_ips:{} according to your " - "CUDA_VISIBLE_DEVICES:{}".format( - gpus, res_gpus, cuda_visible_devices_list)) + logger.info( + "Change selected_gpus into reletive values. --ips:{} " + "will change into relative_ips:{} according to your " + "CUDA_VISIBLE_DEVICES:{}".format( + gpus, res_gpus, cuda_visible_devices_list + ) + ) return res_gpus @@ -709,17 +752,23 @@ def get_xpus(xpus): # therefore xpus=0,1,2,3 xpu_visible_devices_list = xpu_visible_devices.split(',') for x in xpus.split(','): - assert x in xpu_visible_devices_list, "Can't find "\ - "your xpus %s in XPU_VISIBLE_DEVICES[%s]."\ - % (x, xpu_visible_devices) + assert ( + x in xpu_visible_devices_list + ), "Can't find " "your xpus %s in XPU_VISIBLE_DEVICES[%s]." % ( + x, + xpu_visible_devices, + ) res_xpus = [ xpu_visible_devices_list.index(x.strip()) for x in xpus.split(',') ] - logger.info("Change selected_xpus into reletive values. --ips:{} " - "will change into relative_ips:{} according to your " - "XPU_VISIBLE_DEVICES:{}".format( - xpus, res_xpus, xpu_visible_devices_list)) + logger.info( + "Change selected_xpus into reletive values. --ips:{} " + "will change into relative_ips:{} according to your " + "XPU_VISIBLE_DEVICES:{}".format( + xpus, res_xpus, xpu_visible_devices_list + ) + ) return res_xpus @@ -738,17 +787,22 @@ def get_npus(npus): # therefore npus=0,1,2,3 npu_visible_devices_list = npu_visible_devices.split(',') for x in npus.split(','): - assert x in npu_visible_devices_list, "Can't find "\ - "your npus %s in ASCEND_VISIBLE_DEVICES[%s]."\ + assert x in npu_visible_devices_list, ( + "Can't find " + "your npus %s in ASCEND_VISIBLE_DEVICES[%s]." % (x, npu_visible_devices) + ) res_npus = [ npu_visible_devices_list.index(x.strip()) for x in npus.split(',') ] - logger.info("Change selected_npus into reletive values. --ips:{} " - "will change into relative_ips:{} according to your " - "ASCEND_VISIBLE_DEVICES:{}".format( - npus, res_npus, npu_visible_devices_list)) + logger.info( + "Change selected_npus into reletive values. --ips:{} " + "will change into relative_ips:{} according to your " + "ASCEND_VISIBLE_DEVICES:{}".format( + npus, res_npus, npu_visible_devices_list + ) + ) return res_npus @@ -767,33 +821,45 @@ def get_mlus(mlus): # therefore mlus=0,1,2,3 mlu_visible_devices_list = mlu_visible_devices.split(',') for x in mlus.split(','): - assert x in mlu_visible_devices_list, "Can't find "\ - "your mlus %s in MLU_VISIBLE_DEVICES[%s]."\ - % (x, mlu_visible_devices) + assert ( + x in mlu_visible_devices_list + ), "Can't find " "your mlus %s in MLU_VISIBLE_DEVICES[%s]." % ( + x, + mlu_visible_devices, + ) res_mlus = [ mlu_visible_devices_list.index(x.strip()) for x in mlus.split(',') ] - logger.info("Change selected_mlus into reletive values. --ips:{} " - "will change into relative_ips:{} according to your " - "MLU_VISIBLE_DEVICES:{}".format( - mlus, res_mlus, mlu_visible_devices_list)) + logger.info( + "Change selected_mlus into reletive values. --ips:{} " + "will change into relative_ips:{} according to your " + "MLU_VISIBLE_DEVICES:{}".format( + mlus, res_mlus, mlu_visible_devices_list + ) + ) return res_mlus def get_device_mode(backend): if backend == 'heter': - if fluid.core.is_compiled_with_cuda() and \ - fluid.core.get_cuda_device_count() > 0: + if ( + fluid.core.is_compiled_with_cuda() + and fluid.core.get_cuda_device_count() > 0 + ): print("launch train in heter mode with GPU device.") return DeviceMode.GPU - if fluid.core.is_compiled_with_xpu() and \ - fluid.core.get_xpu_device_count() > 0: + if ( + fluid.core.is_compiled_with_xpu() + and fluid.core.get_xpu_device_count() > 0 + ): print("launch train in heter mode with XPU device.") return DeviceMode.XPU - if fluid.core.is_compiled_with_npu() and \ - fluid.core.get_npu_device_count() > 0: + if ( + fluid.core.is_compiled_with_npu() + and fluid.core.get_npu_device_count() > 0 + ): print("launch train in heter mode with NPU device.") return DeviceMode.ASCEND_NPU @@ -801,8 +867,7 @@ def get_device_mode(backend): print("launch train in ascend npu mode!") return DeviceMode.ASCEND_NPU - if backend == 'nccl' and \ - fluid.core.get_cuda_device_count() > 0: + if backend == 'nccl' and fluid.core.get_cuda_device_count() > 0: print("launch train in GPU mode!") return DeviceMode.GPU @@ -830,62 +895,80 @@ def get_device_proc_info(args): if device_mode == DeviceMode.GPU: gpus = get_gpus(args.gpus) if args.nproc_per_node is not None: - assert (len(gpus) % int(args.nproc_per_node)) ==0, \ - "gpus' number:{} mod args.nproc_per_node:{} must == 0".format(len(gpus), args.nproc_per_node) + assert ( + len(gpus) % int(args.nproc_per_node) + ) == 0, "gpus' number:{} mod args.nproc_per_node:{} must == 0".format( + len(gpus), args.nproc_per_node + ) n = int(len(gpus) / int(args.nproc_per_node)) - devices_per_proc = [gpus[i:i + n] for i in range(0, len(gpus), n)] + devices_per_proc = [gpus[i : i + n] for i in range(0, len(gpus), n)] else: devices_per_proc = gpus elif device_mode == DeviceMode.ASCEND_NPU: npus = get_npus(args.npus) if args.nproc_per_node is not None: - assert (len(npus) % int(args.nproc_per_node)) ==0, \ - "npus' number:{} mod args.nproc_per_node:{} must == 0".format(len(npus), args.nproc_per_node) + assert ( + len(npus) % int(args.nproc_per_node) + ) == 0, "npus' number:{} mod args.nproc_per_node:{} must == 0".format( + len(npus), args.nproc_per_node + ) n = int(len(npus) / int(args.nproc_per_node)) - devices_per_proc = [npus[i:i + n] for i in range(0, len(npus), n)] + devices_per_proc = [npus[i : i + n] for i in range(0, len(npus), n)] else: devices_per_proc = npus elif device_mode == DeviceMode.XPU: xpus = get_xpus(args.xpus) if args.nproc_per_node is not None: - assert (len(xpus) % int(args.nproc_per_node)) == 0, \ - "xpus' number:{} mod args.nproc_per_node:{} must == 0".format(len(xpus), args.nproc_per_node) + assert ( + len(xpus) % int(args.nproc_per_node) + ) == 0, "xpus' number:{} mod args.nproc_per_node:{} must == 0".format( + len(xpus), args.nproc_per_node + ) n = int(len(xpus) / int(args.nproc_per_node)) - devices_per_proc = [xpus[i:i + n] for i in range(0, len(xpus), n)] + devices_per_proc = [xpus[i : i + n] for i in range(0, len(xpus), n)] else: devices_per_proc = xpus elif device_mode == DeviceMode.MLU: mlus = get_mlus(args.mlus) if args.nproc_per_node is not None: - assert (len(mlus) % int(args.nproc_per_node)) ==0, \ - "mlus' number:{} mod args.nproc_per_node:{} must == 0".format(len(mlus), args.nproc_per_node) + assert ( + len(mlus) % int(args.nproc_per_node) + ) == 0, "mlus' number:{} mod args.nproc_per_node:{} must == 0".format( + len(mlus), args.nproc_per_node + ) n = int(len(mlus) / int(args.nproc_per_node)) - devices_per_proc = [mlus[i:i + n] for i in range(0, len(mlus), n)] + devices_per_proc = [mlus[i : i + n] for i in range(0, len(mlus), n)] else: devices_per_proc = mlus elif device_mode == DeviceMode.CPU: if hasattr(args, "paddle_cpuonly") and args.nproc_per_node is None: - #NOTE (xiongkun03) set it to cpu core number + # NOTE (xiongkun03) set it to cpu core number args.nproc_per_node = multiprocessing.cpu_count() if args.nproc_per_node is None: devices_per_proc = [0] else: devices_per_proc = [x for x in range(0, args.nproc_per_node)] else: - assert False, "Can't support device_mode:{}, support only cpu|gpu|xpu now.".format( - device_mode) + assert ( + False + ), "Can't support device_mode:{}, support only cpu|gpu|xpu now.".format( + device_mode + ) return (device_mode, devices_per_proc) def direct_start(args): # run ps-cpu mode on paddlecloud, using given envs - cmd = [sys.executable, "-u", args.training_script] + \ - args.training_script_args + cmd = [ + sys.executable, + "-u", + args.training_script, + ] + args.training_script_args proc = subprocess.Popen(cmd) proc.wait() return @@ -907,7 +990,7 @@ def get_custom_endpoints(origin_endpoints, offset=0): return paddle_user_define_endpoints -#def cloud_ps_heter_env_set(args): +# def cloud_ps_heter_env_set(args): # environs = {} # # paddle_trainer_endpoints = os.getenv("TRAINER_IP_PORT_LIST", "") @@ -947,12 +1030,13 @@ def get_custom_endpoints(origin_endpoints, offset=0): # pretty_print_envs(environs))) -def get_mapped_cluster_without_rank_mapping(node_ips, node_ip, - trainer_endpoints, device_mode, - node_ranks): +def get_mapped_cluster_without_rank_mapping( + node_ips, node_ip, trainer_endpoints, device_mode, node_ranks +): assert type(trainer_endpoints) is list, "trainer_endpoints must be list" - assert device_mode == DeviceMode.GPU, \ - "Only support get mapped cluster for gpu now." + assert ( + device_mode == DeviceMode.GPU + ), "Only support get mapped cluster for gpu now." cluster = Cluster(hdfs=None) for node_rank, ip in enumerate(node_ips): pod = Pod() @@ -976,8 +1060,9 @@ def get_mapped_cluster_without_rank_mapping(node_ips, node_ip, def get_mapped_cluster_from_args_without_rank_mapping(args, device_mode): - assert device_mode == DeviceMode.GPU, \ - "Only support get mapped cluster for gpu now." + assert ( + device_mode == DeviceMode.GPU + ), "Only support get mapped cluster for gpu now." gpus_num = fluid.core.get_cuda_device_count() # parse ip-ranks json file @@ -999,17 +1084,21 @@ def get_mapped_cluster_from_args_without_rank_mapping(args, device_mode): else: _, node_ip = get_host_name_ip() - assert node_ip in node_ips, \ - "Can't find your local ip {%s} in node_ips: {%s}" % (node_ip, node_ips) + assert ( + node_ip in node_ips + ), "Can't find your local ip {%s} in node_ips: {%s}" % (node_ip, node_ips) node_rank = node_ips.index(node_ip) - assert len(node_ranks) == len(node_ips), \ - "ranks length should be equal to ips length." + assert len(node_ranks) == len( + node_ips + ), "ranks length should be equal to ips length." - logger.debug("parsed from args: node_ips:{} node_ip:{} " - "node_rank:{} node_ranks:{}".format(node_ips, node_ip, - node_rank, - node_ranks[node_rank])) + logger.debug( + "parsed from args: node_ips:{} node_ip:{} " + "node_rank:{} node_ranks:{}".format( + node_ips, node_ip, node_rank, node_ranks[node_rank] + ) + ) # NOTE: there are different number of global mapped ranks on each node. free_ports = [] @@ -1019,30 +1108,40 @@ def get_mapped_cluster_from_args_without_rank_mapping(args, device_mode): if os.environ.get('PADDLE_PORT') is not None: start_port = int(os.getenv("PADDLE_PORT", "")) free_ports = [ - x for x in range(start_port, start_port + - len(node_ranks[node_rank])) + x + for x in range( + start_port, start_port + len(node_ranks[node_rank]) + ) ] elif os.environ.get('FLAGS_START_PORT') is not None: start_port = int(os.environ.get('FLAGS_START_PORT')) free_ports = [ - x for x in range(start_port, start_port + - len(node_ranks[node_rank])) + x + for x in range( + start_port, start_port + len(node_ranks[node_rank]) + ) ] else: free_ports = find_free_ports(len(node_ranks[node_rank])) trainer_endpoints.append(["%s:%d" % (ip, port) for port in free_ports]) - return get_mapped_cluster_without_rank_mapping(node_ips, node_ip, - trainer_endpoints, - device_mode, node_ranks) + return get_mapped_cluster_without_rank_mapping( + node_ips, node_ip, trainer_endpoints, device_mode, node_ranks + ) -def get_mapped_cluster_with_rank_mapping(node_ips, node_ip, trainer_endpoints, - device_mode, node_ranks, - node_rank_mappings): +def get_mapped_cluster_with_rank_mapping( + node_ips, + node_ip, + trainer_endpoints, + device_mode, + node_ranks, + node_rank_mappings, +): assert type(trainer_endpoints) is list, "trainer_endpoints must be list" - assert device_mode == DeviceMode.GPU, \ - "Only support get mapped cluster for gpu now." + assert ( + device_mode == DeviceMode.GPU + ), "Only support get mapped cluster for gpu now." def get_relative_gpu_id(gpu_id): cuda_visible_devices = os.getenv("CUDA_VISIBLE_DEVICES") @@ -1052,8 +1151,10 @@ def get_mapped_cluster_with_rank_mapping(node_ips, node_ip, trainer_endpoints, cuda_visible_devices_list = cuda_visible_devices.split(',') relative_id = cuda_visible_devices_list.index(str(gpu_id)) logger.info( - "Change gpu id from {} to {} based on CUDA_VISIBLE_DEVICES {}". - format(gpu_id, relative_id, cuda_visible_devices_list)) + "Change gpu id from {} to {} based on CUDA_VISIBLE_DEVICES {}".format( + gpu_id, relative_id, cuda_visible_devices_list + ) + ) return relative_id cluster = Cluster(hdfs=None) @@ -1069,12 +1170,15 @@ def get_mapped_cluster_with_rank_mapping(node_ips, node_ip, trainer_endpoints, cur_node_rank_mapping = node_rank_mappings[node_rank] for i in range(len(ranks_per_node)): trainer = Trainer() - local_device_ids = cur_node_rank_mapping["ranks"][str( - ranks_per_node[i])] - assert len(local_device_ids) == 1, \ - "Only support one process to one device mapping" - trainer.accelerators.append(get_relative_gpu_id( - local_device_ids[0])) + local_device_ids = cur_node_rank_mapping["ranks"][ + str(ranks_per_node[i]) + ] + assert ( + len(local_device_ids) == 1 + ), "Only support one process to one device mapping" + trainer.accelerators.append( + get_relative_gpu_id(local_device_ids[0]) + ) trainer.endpoint = "%s" % (cur_node_endpoints[i]) trainer.rank = ranks_per_node[i] pod.trainers.append(trainer) @@ -1085,13 +1189,15 @@ def get_mapped_cluster_with_rank_mapping(node_ips, node_ip, trainer_endpoints, def get_mapped_cluster_from_args_with_rank_mapping(args, device_mode): - assert device_mode == DeviceMode.GPU, \ - "Only support get mapped cluster for gpu now." + assert ( + device_mode == DeviceMode.GPU + ), "Only support get mapped cluster for gpu now." gpus_num = fluid.core.get_cuda_device_count() # parse ip-ranks json file rank_mapping_path = args.rank_mapping_path or os.getenv( - "PADDLE_RANK_MAPPING_PATH") + "PADDLE_RANK_MAPPING_PATH" + ) rank_mapping = None with open(rank_mapping_path, "r") as json_file: rank_mapping = json.load(json_file) @@ -1118,19 +1224,24 @@ def get_mapped_cluster_from_args_with_rank_mapping(args, device_mode): else: _, node_ip = get_host_name_ip() - assert node_ip in node_ips, \ - "Can't find your local ip {%s} in node_ips: {%s}" % (node_ip, node_ips) + assert ( + node_ip in node_ips + ), "Can't find your local ip {%s} in node_ips: {%s}" % (node_ip, node_ips) node_rank = node_ips.index(node_ip) - assert len(node_ranks[node_rank]) <= gpus_num, \ - "number of ranks mapped to one node should not exceed the avaiable ones." - assert len(node_ranks) == len(node_ips), \ - "ranks length should be equal to ips length." - - logger.debug("parsed from args: node_ips:{} node_ip:{} " - "node_rank:{} node_ranks:{}".format(node_ips, node_ip, - node_rank, - node_ranks[node_rank])) + assert ( + len(node_ranks[node_rank]) <= gpus_num + ), "number of ranks mapped to one node should not exceed the avaiable ones." + assert len(node_ranks) == len( + node_ips + ), "ranks length should be equal to ips length." + + logger.debug( + "parsed from args: node_ips:{} node_ip:{} " + "node_rank:{} node_ranks:{}".format( + node_ips, node_ip, node_rank, node_ranks[node_rank] + ) + ) # NOTE: there are different number of global mapped ranks on each node. free_ports = [] @@ -1140,26 +1251,34 @@ def get_mapped_cluster_from_args_with_rank_mapping(args, device_mode): if os.environ.get('PADDLE_PORT') is not None: start_port = int(os.getenv("PADDLE_PORT", "")) free_ports = [ - x for x in range(start_port, start_port + - len(node_ranks[node_rank])) + x + for x in range( + start_port, start_port + len(node_ranks[node_rank]) + ) ] elif os.environ.get('FLAGS_START_PORT') is not None: start_port = int(os.environ.get('FLAGS_START_PORT')) free_ports = [ - x for x in range(start_port, start_port + - len(node_ranks[node_rank])) + x + for x in range( + start_port, start_port + len(node_ranks[node_rank]) + ) ] else: free_ports = find_free_ports(len(node_ranks[node_rank])) trainer_endpoints.append(["%s:%d" % (ip, port) for port in free_ports]) - return get_mapped_cluster_with_rank_mapping(node_ips, node_ip, - trainer_endpoints, device_mode, - node_ranks, node_rank_mappings) + return get_mapped_cluster_with_rank_mapping( + node_ips, + node_ip, + trainer_endpoints, + device_mode, + node_ranks, + node_rank_mappings, + ) class ParameterServerLauncher(object): - def __init__(self, args, distribute_mode): self.args = args self.distribute_mode = distribute_mode @@ -1200,17 +1319,21 @@ class ParameterServerLauncher(object): if args.server_num: self.server_num = args.server_num if args.servers: - assert len( - args.servers.split(",") - ) == self.server_num, "The server_num and servers doesn't match. Expect servers endpoints num epual to server_num, but received servers enpoint num: {} and server_num {}".format( - len(args.servers.split(",")), self.server_num) + assert ( + len(args.servers.split(",")) == self.server_num + ), "The server_num and servers doesn't match. Expect servers endpoints num epual to server_num, but received servers enpoint num: {} and server_num {}".format( + len(args.servers.split(",")), self.server_num + ) self.server_endpoints = args.servers else: ports = get_ports(self.server_num, 0) self.server_endpoints = ",".join( - ["127.0.0.1:" + str(x) for x in ports]) + ["127.0.0.1:" + str(x) for x in ports] + ) else: - assert args.servers != "", "The setting of Parameter-Server must has server_num or servers." + assert ( + args.servers != "" + ), "The setting of Parameter-Server must has server_num or servers." self.server_endpoints = args.servers self.server_num = len(self.server_endpoints.split(",")) @@ -1218,18 +1341,22 @@ class ParameterServerLauncher(object): if args.worker_num: self.worker_num = args.worker_num if args.workers: - assert len( - args.workers.split(",") - ) == self.worker_num, "The worker_num and workers doesn't match. Expect workers endpoints num epual to worker_num, but received workers enpoint num: {} and worker_num {}".format( - len(args.workers.split(",")), self.worker_num) + assert ( + len(args.workers.split(",")) == self.worker_num + ), "The worker_num and workers doesn't match. Expect workers endpoints num epual to worker_num, but received workers enpoint num: {} and worker_num {}".format( + len(args.workers.split(",")), self.worker_num + ) self.worker_endpoints = args.workers else: ports = get_ports(self.worker_num, self.server_num) self.worker_endpoints = ",".join( - ["127.0.0.1:" + str(x) for x in ports]) + ["127.0.0.1:" + str(x) for x in ports] + ) else: - assert args.workers != "", "The setting of Parameter-Server must has worker_num or workers." + assert ( + args.workers != "" + ), "The setting of Parameter-Server must has worker_num or workers." worker_endpoints_ips = [ x.strip().split(":")[0] for x in args.workers.split(",") ] @@ -1243,13 +1370,20 @@ class ParameterServerLauncher(object): start_port = 6170 worker_endpoints_port = range( start_port + self.server_num, - start_port + self.server_num + self.worker_num, 1) + start_port + self.server_num + self.worker_num, + 1, + ) # create endpoints str worker_endpoints = [] for i in range(self.worker_num): - worker_endpoints.append(":".join( - (worker_endpoints_ips[i], - str(worker_endpoints_port[i])))) + worker_endpoints.append( + ":".join( + ( + worker_endpoints_ips[i], + str(worker_endpoints_port[i]), + ) + ) + ) self.worker_endpoints = ",".join(worker_endpoints) else: self.worker_endpoints = args.workers @@ -1259,21 +1393,25 @@ class ParameterServerLauncher(object): self.with_coordinator = True self.coordinator_num = args.coordinator_num if args.coordinators: - assert len( - args.coordinators.split(",") - ) == self.coordinator_num, "The coordinator_num and coordinators doesn't match. Expect coordinators endpoints num epual to coordinator_num, but received coordinator enpoint num: {} and coordinator_num {}".format( - len(args.coordinators.split(",")), self.coordinator_num) + assert ( + len(args.coordinators.split(",")) == self.coordinator_num + ), "The coordinator_num and coordinators doesn't match. Expect coordinators endpoints num epual to coordinator_num, but received coordinator enpoint num: {} and coordinator_num {}".format( + len(args.coordinators.split(",")), self.coordinator_num + ) self.coordinator_endpoints = args.coordinators else: ports = get_ports(self.coordinator_num, 1) self.coordinator_endpoints = ",".join( - ["127.0.0.1:" + str(x) for x in ports]) + ["127.0.0.1:" + str(x) for x in ports] + ) print(">>> use default coordinator addr(only one process)") # get heter worker envs if self.distribute_mode == DistributeMode.PS_HETER: - assert args.heter_devices != "", "The setting of Parameter-Server heter mode must has heter_devices." + assert ( + args.heter_devices != "" + ), "The setting of Parameter-Server heter mode must has heter_devices." self.stage_device_map[1] = "cpu" # for cpu trainer heter_devices_list = args.heter_devices.split(";") for i in range(len(heter_devices_list)): @@ -1292,19 +1430,22 @@ class ParameterServerLauncher(object): self.stage_heter_trainer_num ), "The stage_num and heter_workers doesn't match. Expect heter_workers endpoints stage num epual to heter_worker_num stage, but received heter_workers enpoint stage num: {} and heter_worker_num stage {}".format( len(args.heter_workers.split(";")), - len(self.stage_heter_trainer_num)) + len(self.stage_heter_trainer_num), + ) heter_worker_endpoints_list = args.heter_workers.split(";") self.heter_worker_endpoints = "" for i in range(len(self.stage_heter_trainer_num)): if self.heter_worker_endpoints != "": self.heter_worker_endpoints += "," heter_worker_endpoints = heter_worker_endpoints_list[ - i].split(",") - assert len( - heter_worker_endpoints - ) == self.stage_heter_trainer_num[ - i], "The heter trainer num in stage {} is not equal in args.heter_worker_num and args.heter_workers".format( - i) + i + ].split(",") + assert ( + len(heter_worker_endpoints) + == self.stage_heter_trainer_num[i] + ), "The heter trainer num in stage {} is not equal in args.heter_worker_num and args.heter_workers".format( + i + ) heter_worker_endpoints_ips = [ x.strip().split(":")[0] @@ -1319,20 +1460,28 @@ class ParameterServerLauncher(object): # if no port value in heter_worker_endpoint, will set default port values. heter_worker_endpoints_port = get_ports( len(heter_worker_endpoints_ips), - self.worker_num + self.server_num + - self.heter_worker_num) + self.worker_num + + self.server_num + + self.heter_worker_num, + ) new_heter_worker_endpoints = [] for j in range(len(heter_worker_endpoints_ips)): - new_heter_worker_endpoints.append(":".join( - (heter_worker_endpoints_ips[j], - str(heter_worker_endpoints_port[j])))) + new_heter_worker_endpoints.append( + ":".join( + ( + heter_worker_endpoints_ips[j], + str(heter_worker_endpoints_port[j]), + ) + ) + ) ip_port_list = ",".join(new_heter_worker_endpoints) else: ip_port_list = ",".join(heter_worker_endpoints) self.stage_heter_map[i + 2] = ip_port_list - self.stage_list.extend([i + 2] * - len(ip_port_list.split(','))) + self.stage_list.extend( + [i + 2] * len(ip_port_list.split(',')) + ) self.heter_worker_num += self.stage_heter_trainer_num[i] self.heter_worker_endpoints += ip_port_list @@ -1340,27 +1489,36 @@ class ParameterServerLauncher(object): for i in range(len(self.stage_heter_trainer_num)): heter_trainer_num = self.stage_heter_trainer_num[i] ports = get_ports( - heter_trainer_num, self.server_num + - self.worker_num + self.heter_worker_num) + heter_trainer_num, + self.server_num + + self.worker_num + + self.heter_worker_num, + ) ip_port_list = ",".join( - ["127.0.0.1:" + str(x) for x in ports]) + ["127.0.0.1:" + str(x) for x in ports] + ) self.stage_heter_map[i + 2] = ip_port_list - self.stage_list.extend([i + 2] * - len(ip_port_list.split(','))) + self.stage_list.extend( + [i + 2] * len(ip_port_list.split(',')) + ) self.heter_worker_num += heter_trainer_num if self.heter_worker_endpoints != "": self.heter_worker_endpoints += "," self.heter_worker_endpoints += ip_port_list else: - assert args.heter_workers != "", "The setting of Parameter-Server heter mode must has heter_worker_num or heter_workers." + assert ( + args.heter_workers != "" + ), "The setting of Parameter-Server heter mode must has heter_worker_num or heter_workers." self.stage_heter_trainer_num = [] heter_worker_endpoints_list = args.heter_workers.split(";") self.heter_worker_endpoints = "" for i in range(len(heter_worker_endpoints_list)): heter_worker_endpoints = heter_worker_endpoints_list[ - i].split(",") + i + ].split(",") self.stage_heter_trainer_num.append( - len(heter_worker_endpoints)) + len(heter_worker_endpoints) + ) heter_worker_endpoints_ips = [ x.strip().split(":")[0] for x in heter_worker_endpoints ] @@ -1371,29 +1529,39 @@ class ParameterServerLauncher(object): if 1 in heter_worker_endpoints_len: # if no port value in heter_worker_endpoint, will set default port values. heter_worker_endpoints_port = get_ports( - len(heter_worker_endpoints_ips), self.worker_num + - self.server_num + self.heter_worker_num) + len(heter_worker_endpoints_ips), + self.worker_num + + self.server_num + + self.heter_worker_num, + ) new_heter_worker_endpoints = [] for j in range(len(heter_worker_endpoints_ips)): - new_heter_worker_endpoints.append(":".join( - (heter_worker_endpoints_ips[j], - str(heter_worker_endpoints_port[j])))) + new_heter_worker_endpoints.append( + ":".join( + ( + heter_worker_endpoints_ips[j], + str(heter_worker_endpoints_port[j]), + ) + ) + ) ip_port_list = ",".join(new_heter_worker_endpoints) else: ip_port_list = ",".join(heter_worker_endpoints) self.stage_heter_map[i + 2] = ip_port_list - self.stage_list.extend([i + 2] * - len(ip_port_list.split(','))) + self.stage_list.extend( + [i + 2] * len(ip_port_list.split(',')) + ) self.heter_worker_num += self.stage_heter_trainer_num[-1] if self.heter_worker_endpoints != "": self.heter_worker_endpoints += "," self.heter_worker_endpoints += ip_port_list - self.stage_trainer_num = [self.worker_num - ] + self.stage_heter_trainer_num + self.stage_trainer_num = [ + self.worker_num + ] + self.stage_heter_trainer_num self.stage_num = len(self.stage_trainer_num) # get http_port @@ -1401,7 +1569,8 @@ class ParameterServerLauncher(object): http_port = [args.http_port] else: http_port = get_ports( - 1, self.server_num + self.worker_num + self.heter_worker_num) + 1, self.server_num + self.worker_num + self.heter_worker_num + ) http_ip = self.server_endpoints.split(",")[0].split(":")[0] self.http_port = http_ip + ":" + str(http_port[0]) @@ -1461,13 +1630,17 @@ class ParameterServerLauncher(object): else: self.current_node_ip = pod_ip if not self.distribute_mode == DistributeMode.PS_HETER: - assert self.current_node_ip in self.node_ips, "Can't find your local ip {%s} in args.servers and args.workers ips: {%s}" \ - % (self.current_node_ip, self.node_ips) + assert self.current_node_ip in self.node_ips, ( + "Can't find your local ip {%s} in args.servers and args.workers ips: {%s}" + % (self.current_node_ip, self.node_ips) + ) if self.current_node_ip in self.node_ips: self.node_rank = self.node_ips.index(self.current_node_ip) logger.debug( - "parsed from args: node_ips:{} current_node_ip:{} node_rank:{}". - format(self.node_ips, self.current_node_ip, self.node_rank)) + "parsed from args: node_ips:{} current_node_ip:{} node_rank:{}".format( + self.node_ips, self.current_node_ip, self.node_rank + ) + ) def start_ps(self): if self.current_node_ip not in self.node_ips: @@ -1484,16 +1657,20 @@ class ParameterServerLauncher(object): for i in range(len(self.server_endpoints_ips)): if ip == self.server_endpoints_ips[i]: server = Trainer() - server.endpoint = "%s:%s" % (ip, - self.server_endpoints_port[i]) + server.endpoint = "%s:%s" % ( + ip, + self.server_endpoints_port[i], + ) server.rank = server_rank server_rank += 1 pod.servers.append(server) for j in range(len(self.worker_endpoints_ips)): if ip == self.worker_endpoints_ips[j]: worker = Trainer() - worker.endpoint = "%s:%s" % (ip, - self.worker_endpoints_port[j]) + worker.endpoint = "%s:%s" % ( + ip, + self.worker_endpoints_port[j], + ) worker.rank = worker_rank worker.stage = 1 worker_rank += 1 @@ -1502,7 +1679,9 @@ class ParameterServerLauncher(object): if ip == self.coordinator_endpoints_ips[m]: coordinator = Trainer() coordinator.endpoint = "%s:%s" % ( - ip, self.coordinator_endpoints_port[m]) + ip, + self.coordinator_endpoints_port[m], + ) coordinator.rank = coordinator_rank coordinator.stage = 1 coordinator_rank += 1 @@ -1512,7 +1691,9 @@ class ParameterServerLauncher(object): if ip == self.heter_worker_endpoints_ips[k]: heter_worker = Trainer() heter_worker.endpoint = "%s:%s" % ( - ip, self.heter_worker_endpoints_port[k]) + ip, + self.heter_worker_endpoints_port[k], + ) heter_worker.rank = heter_worker_rank heter_worker.stage = self.stage_list[k] heter_worker_rank += 1 @@ -1528,19 +1709,19 @@ class ParameterServerLauncher(object): "worker": [], "coordinator": [], "server": [], - "heter_worker": [] + "heter_worker": [], } self.cmds = { "worker": [], "coordinator": [], "server": [], - "heter_worker": [] + "heter_worker": [], } self.log_fns = { "worker": [], "coordinator": [], "server": [], - "heter_worker": [] + "heter_worker": [], } self.start_pod_server(self.args, pod) @@ -1551,9 +1732,13 @@ class ParameterServerLauncher(object): self.start_pod_heter_worker(self.args, pod) logger.info( - "Please check servers, workers, coordinator and heter_worker logs in {}/workerlog.*, {}/serverlog.* , {}/coordinatorlog.*, and {}/heterlog.*" - .format(self.args.log_dir, self.args.log_dir, self.args.log_dir, - self.args.log_dir)) + "Please check servers, workers, coordinator and heter_worker logs in {}/workerlog.*, {}/serverlog.* , {}/coordinatorlog.*, and {}/heterlog.*".format( + self.args.log_dir, + self.args.log_dir, + self.args.log_dir, + self.args.log_dir, + ) + ) # 4. wait for finish training if len(self.procs["worker"]) > 0: @@ -1609,8 +1794,7 @@ class ParameterServerLauncher(object): "PADDLE_PSERVERS_IP_PORT_LIST": self.server_endpoints, "PADDLE_TRAINER_ENDPOINTS": self.worker_endpoints, "PADDLE_COORDINATOR_ENDPOINTS": self.coordinator_endpoints, - "PADDLE_ALL_HETER_TRAINER_IP_PORT_LIST": - self.heter_worker_endpoints, + "PADDLE_ALL_HETER_TRAINER_IP_PORT_LIST": self.heter_worker_endpoints, "PADDLE_PORT": cur_server.endpoint.split(":")[1], "TRAINING_ROLE": "PSERVER", "PADDLE_TRAINERS_NUM": str(self.worker_num), @@ -1618,7 +1802,7 @@ class ParameterServerLauncher(object): "PADDLE_WITH_GLOO": str(os.getenv("PADDLE_WITH_GLOO", "0")), "PADDLE_GLOO_RENDEZVOUS": "3", "PADDLE_GLOO_FS_PATH": self.gloo_rendezvous_dir, - "PADDLE_GLOO_HTTP_ENDPOINT": self.http_port + "PADDLE_GLOO_HTTP_ENDPOINT": self.http_port, } else: proc_env = { @@ -1632,12 +1816,15 @@ class ParameterServerLauncher(object): "PADDLE_WITH_GLOO": str(os.getenv("PADDLE_WITH_GLOO", "0")), "PADDLE_GLOO_RENDEZVOUS": "3", "PADDLE_GLOO_FS_PATH": self.gloo_rendezvous_dir, - "PADDLE_GLOO_HTTP_ENDPOINT": self.http_port + "PADDLE_GLOO_HTTP_ENDPOINT": self.http_port, } current_env.update(proc_env) - cmd = [sys.executable, "-u", args.training_script - ] + args.training_script_args + cmd = [ + sys.executable, + "-u", + args.training_script, + ] + args.training_script_args self.cmds["server"].append(cmd) if idx == 0: @@ -1645,17 +1832,19 @@ class ParameterServerLauncher(object): "Local server start {} processes. First process distributed " "environment info (Only For Debug): {}".format( len(pod.servers), - pretty_print_envs(proc_env, - ("Distributed Envs", "Value")))) + pretty_print_envs( + proc_env, ("Distributed Envs", "Value") + ), + ) + ) if args.log_dir is not None: os.system("mkdir -p {}".format(args.log_dir)) fn = open("%s/serverlog.%d" % (args.log_dir, idx), "w") self.log_fns["server"].append(fn) - proc = subprocess.Popen(cmd, - env=current_env, - stdout=fn, - stderr=fn) + proc = subprocess.Popen( + cmd, env=current_env, stdout=fn, stderr=fn + ) else: proc = subprocess.Popen(cmd, env=current_env) @@ -1685,56 +1874,38 @@ class ParameterServerLauncher(object): device_list = [str(x) for x in range(0, heter_device_num)] for idx, cur_worker in enumerate(pod.workers): - device_id = "0" if heter_device_num == 0 else str( - device_list[(idx) % heter_device_num]) + device_id = ( + "0" + if heter_device_num == 0 + else str(device_list[(idx) % heter_device_num]) + ) if self.distribute_mode == DistributeMode.PS_HETER: proc_env = { - "PADDLE_PSERVERS_IP_PORT_LIST": - self.server_endpoints, - "PADDLE_TRAINER_ENDPOINTS": - self.worker_endpoints, - "PADDLE_TRAINERS_NUM": - str(self.worker_num), - "PADDLE_COORDINATOR_ENDPOINTS": - self.coordinator_endpoints, - "PADDLE_STAGE_TRAINERS_NUM": - str(self.stage_trainer_num), - "STAGE_ID": - "1", - "STAGE_NUM": - str(self.stage_num), - "PADDLE_PREVIOUS_HETER_TRAINER_IP_PORT_LIST": - "", - "PADDLE_NEXT_HETER_TRAINER_IP_PORT_LIST": - self.stage_heter_map[2], - "PADDLE_ALL_HETER_TRAINER_IP_PORT_LIST": - self.heter_worker_endpoints, - "HETER_DEVICE_TYPE": - self.stage_device_map[1], - "TRAINING_ROLE": - "TRAINER", - "POD_IP": - cur_worker.endpoint.split(":")[0], - "PADDLE_PORT": - cur_worker.endpoint.split(":")[1], - "PADDLE_TRAINER_ID": - str(cur_worker.rank), - "PADDLE_WITH_GLOO": - str(os.getenv("PADDLE_WITH_GLOO", "0")), - "PADDLE_GLOO_RENDEZVOUS": - "3", - "PADDLE_GLOO_FS_PATH": - self.gloo_rendezvous_dir, - "FLAGS_selected_gpus": - "0", - "FLAGS_selected_xpus": - "0", - "CUDA_VISIBLE_DEVICES": - device_id, - "XPU_VISIBLE_DEVICES": - device_id, - "PADDLE_GLOO_HTTP_ENDPOINT": - self.http_port + "PADDLE_PSERVERS_IP_PORT_LIST": self.server_endpoints, + "PADDLE_TRAINER_ENDPOINTS": self.worker_endpoints, + "PADDLE_TRAINERS_NUM": str(self.worker_num), + "PADDLE_COORDINATOR_ENDPOINTS": self.coordinator_endpoints, + "PADDLE_STAGE_TRAINERS_NUM": str(self.stage_trainer_num), + "STAGE_ID": "1", + "STAGE_NUM": str(self.stage_num), + "PADDLE_PREVIOUS_HETER_TRAINER_IP_PORT_LIST": "", + "PADDLE_NEXT_HETER_TRAINER_IP_PORT_LIST": self.stage_heter_map[ + 2 + ], + "PADDLE_ALL_HETER_TRAINER_IP_PORT_LIST": self.heter_worker_endpoints, + "HETER_DEVICE_TYPE": self.stage_device_map[1], + "TRAINING_ROLE": "TRAINER", + "POD_IP": cur_worker.endpoint.split(":")[0], + "PADDLE_PORT": cur_worker.endpoint.split(":")[1], + "PADDLE_TRAINER_ID": str(cur_worker.rank), + "PADDLE_WITH_GLOO": str(os.getenv("PADDLE_WITH_GLOO", "0")), + "PADDLE_GLOO_RENDEZVOUS": "3", + "PADDLE_GLOO_FS_PATH": self.gloo_rendezvous_dir, + "FLAGS_selected_gpus": "0", + "FLAGS_selected_xpus": "0", + "CUDA_VISIBLE_DEVICES": device_id, + "XPU_VISIBLE_DEVICES": device_id, + "PADDLE_GLOO_HTTP_ENDPOINT": self.http_port, } else: proc_env = { @@ -1753,12 +1924,15 @@ class ParameterServerLauncher(object): "FLAGS_selected_xpus": "0", "CUDA_VISIBLE_DEVICES": device_id, "XPU_VISIBLE_DEVICES": device_id, - "PADDLE_GLOO_HTTP_ENDPOINT": self.http_port + "PADDLE_GLOO_HTTP_ENDPOINT": self.http_port, } current_env.update(proc_env) - cmd = [sys.executable, "-u", args.training_script - ] + args.training_script_args + cmd = [ + sys.executable, + "-u", + args.training_script, + ] + args.training_script_args self.cmds["worker"].append(cmd) if idx == 0: @@ -1766,17 +1940,19 @@ class ParameterServerLauncher(object): "Local worker start {} processes. First process distributed " "environment info (Only For Debug): {}".format( len(pod.workers), - pretty_print_envs(proc_env, - ("Distributed Envs", "Value")))) + pretty_print_envs( + proc_env, ("Distributed Envs", "Value") + ), + ) + ) if args.log_dir is not None: os.system("mkdir -p {}".format(args.log_dir)) fn = open("%s/workerlog.%d" % (args.log_dir, idx), "w") self.log_fns["worker"].append(fn) - proc = subprocess.Popen(cmd, - env=current_env, - stdout=fn, - stderr=fn) + proc = subprocess.Popen( + cmd, env=current_env, stdout=fn, stderr=fn + ) else: proc = subprocess.Popen(cmd, env=current_env) @@ -1816,12 +1992,15 @@ class ParameterServerLauncher(object): "FLAGS_selected_xpus": "0", "CUDA_VISIBLE_DEVICES": device_id, "XPU_VISIBLE_DEVICES": device_id, - "PADDLE_GLOO_HTTP_ENDPOINT": self.http_port + "PADDLE_GLOO_HTTP_ENDPOINT": self.http_port, } current_env.update(proc_env) - cmd = [sys.executable, "-u", args.training_script - ] + args.training_script_args + cmd = [ + sys.executable, + "-u", + args.training_script, + ] + args.training_script_args self.cmds["coordinator"].append(cmd) if idx == 0: @@ -1829,17 +2008,19 @@ class ParameterServerLauncher(object): "Local coordinator start {} processes. First process distributed " "environment info (Only For Debug): {}".format( len(pod.coordinators), - pretty_print_envs(proc_env, - ("Distributed Envs", "Value")))) + pretty_print_envs( + proc_env, ("Distributed Envs", "Value") + ), + ) + ) if args.log_dir is not None: os.system("mkdir -p {}".format(args.log_dir)) fn = open("%s/coordinator.%d" % (args.log_dir, idx), "w") self.log_fns["coordinator"].append(fn) - proc = subprocess.Popen(cmd, - env=current_env, - stdout=fn, - stderr=fn) + proc = subprocess.Popen( + cmd, env=current_env, stdout=fn, stderr=fn + ) else: proc = subprocess.Popen(cmd, env=current_env) @@ -1869,58 +2050,48 @@ class ParameterServerLauncher(object): device_list = [str(x) for x in range(0, heter_device_num)] for idx, cur_heter_worker in enumerate(pod.heter_workers): - device_id = "0" if heter_device_num == 0 else str( - device_list[(idx) % heter_device_num]) + device_id = ( + "0" + if heter_device_num == 0 + else str(device_list[(idx) % heter_device_num]) + ) stage_id = cur_heter_worker.stage proc_env = { - "PADDLE_PSERVERS_IP_PORT_LIST": - self.server_endpoints, - "PADDLE_TRAINER_ENDPOINTS": - self.worker_endpoints, - "PADDLE_NEXT_HETER_TRAINER_IP_PORT_LIST": - self.stage_heter_map[stage_id + 1] - if stage_id <= self.stage_num - 1 else "", - "PADDLE_PREVIOUS_HETER_TRAINER_IP_PORT_LIST": - self.stage_heter_map[stage_id - 1], - "PADDLE_ALL_HETER_TRAINER_IP_PORT_LIST": - self.heter_worker_endpoints, - "HETER_DEVICE_TYPE": - self.stage_device_map[stage_id], - "STAGE_ID": - str(stage_id), - "STAGE_NUM": - str(self.stage_num), - "PADDLE_PORT": - cur_heter_worker.endpoint.split(":")[1], - "TRAINING_ROLE": - "HETER_TRAINER", - "PADDLE_TRAINERS_NUM": - str(self.worker_num), - "PADDLE_STAGE_TRAINERS_NUM": - str(self.stage_trainer_num), - "POD_IP": - cur_heter_worker.endpoint.split(":")[0], - "PADDLE_WITH_GLOO": - str(os.getenv("PADDLE_WITH_GLOO", "0")), - "PADDLE_GLOO_RENDEZVOUS": - "3", - "PADDLE_GLOO_FS_PATH": - self.gloo_rendezvous_dir, - "FLAGS_selected_gpus": - "0", - "FLAGS_selected_xpus": - "0", - "CUDA_VISIBLE_DEVICES": - device_id, - "XPU_VISIBLE_DEVICES": - device_id, - "PADDLE_GLOO_HTTP_ENDPOINT": - self.http_port + "PADDLE_PSERVERS_IP_PORT_LIST": self.server_endpoints, + "PADDLE_TRAINER_ENDPOINTS": self.worker_endpoints, + "PADDLE_NEXT_HETER_TRAINER_IP_PORT_LIST": self.stage_heter_map[ + stage_id + 1 + ] + if stage_id <= self.stage_num - 1 + else "", + "PADDLE_PREVIOUS_HETER_TRAINER_IP_PORT_LIST": self.stage_heter_map[ + stage_id - 1 + ], + "PADDLE_ALL_HETER_TRAINER_IP_PORT_LIST": self.heter_worker_endpoints, + "HETER_DEVICE_TYPE": self.stage_device_map[stage_id], + "STAGE_ID": str(stage_id), + "STAGE_NUM": str(self.stage_num), + "PADDLE_PORT": cur_heter_worker.endpoint.split(":")[1], + "TRAINING_ROLE": "HETER_TRAINER", + "PADDLE_TRAINERS_NUM": str(self.worker_num), + "PADDLE_STAGE_TRAINERS_NUM": str(self.stage_trainer_num), + "POD_IP": cur_heter_worker.endpoint.split(":")[0], + "PADDLE_WITH_GLOO": str(os.getenv("PADDLE_WITH_GLOO", "0")), + "PADDLE_GLOO_RENDEZVOUS": "3", + "PADDLE_GLOO_FS_PATH": self.gloo_rendezvous_dir, + "FLAGS_selected_gpus": "0", + "FLAGS_selected_xpus": "0", + "CUDA_VISIBLE_DEVICES": device_id, + "XPU_VISIBLE_DEVICES": device_id, + "PADDLE_GLOO_HTTP_ENDPOINT": self.http_port, } current_env.update(proc_env) - cmd = [sys.executable, "-u", args.training_script - ] + args.training_script_args + cmd = [ + sys.executable, + "-u", + args.training_script, + ] + args.training_script_args self.cmds["heter_worker"].append(cmd) if idx == 0: @@ -1928,17 +2099,19 @@ class ParameterServerLauncher(object): "Local heter_worker start {} processes. First process distributed " "environment info (Only For Debug): {}".format( len(pod.heter_workers), - pretty_print_envs(proc_env, - ("Distributed Envs", "Value")))) + pretty_print_envs( + proc_env, ("Distributed Envs", "Value") + ), + ) + ) if args.log_dir is not None: os.system("mkdir -p {}".format(args.log_dir)) fn = open("%s/heterlog.%d" % (args.log_dir, idx), "w") self.log_fns["heter_worker"].append(fn) - proc = subprocess.Popen(cmd, - env=current_env, - stdout=fn, - stderr=fn) + proc = subprocess.Popen( + cmd, env=current_env, stdout=fn, stderr=fn + ) else: proc = subprocess.Popen(cmd, env=current_env) @@ -1955,13 +2128,21 @@ class ParameterServerLauncher(object): def check_backend(backend): if backend not in [ - 'nccl', 'gloo', 'bkcl', 'cncl', 'auto', 'hccl', 'heter', 'xccl' + 'nccl', + 'gloo', + 'bkcl', + 'cncl', + 'auto', + 'hccl', + 'heter', + 'xccl', ]: raise ValueError( "paddle.distributed initialize error, " "backend argument can only be one of " "'nccl', 'gloo', 'bkcl', 'auto', 'hccl', 'heter', 'xccl' " - "but got %s" % backend) + "but got %s" % backend + ) if backend == 'nccl' and not fluid.core.is_compiled_with_cuda(): raise ValueError( @@ -1989,7 +2170,8 @@ def check_backend(backend): def block_windows_and_macos(backend): - if backend != 'gloo': return + if backend != 'gloo': + return if utils.OS_NAME.startswith('darwin'): # MACOS , block raise ValueError( "You are going to using gloo on macos, but currently is not supported" diff --git a/python/paddle/distributed/fleet/layers/mpu/mp_layers.py b/python/paddle/distributed/fleet/layers/mpu/mp_layers.py index 673f9b0f8a7ab2b717987f74e610e66881aafbb5..7ce317f8a38b0ca387ed5c207326c6ef80128137 100644 --- a/python/paddle/distributed/fleet/layers/mpu/mp_layers.py +++ b/python/paddle/distributed/fleet/layers/mpu/mp_layers.py @@ -86,27 +86,38 @@ class VocabParallelEmbedding(Layer): return x """ - def __init__(self, - num_embeddings, - embedding_dim, - weight_attr=None, - mp_group=None, - name=None): + def __init__( + self, + num_embeddings, + embedding_dim, + weight_attr=None, + mp_group=None, + name=None, + ): super(VocabParallelEmbedding, self).__init__() - self.model_parallel_group = tp._HYBRID_PARALLEL_GROUP.get_model_parallel_group( - ) if mp_group is None else mp_group - self.world_size = tp._HYBRID_PARALLEL_GROUP.get_model_parallel_world_size( - ) if mp_group is None else mp_group.nranks - self.rank = tp._HYBRID_PARALLEL_GROUP.get_model_parallel_rank( - ) if mp_group is None else mp_group.rank + self.model_parallel_group = ( + tp._HYBRID_PARALLEL_GROUP.get_model_parallel_group() + if mp_group is None + else mp_group + ) + self.world_size = ( + tp._HYBRID_PARALLEL_GROUP.get_model_parallel_world_size() + if mp_group is None + else mp_group.nranks + ) + self.rank = ( + tp._HYBRID_PARALLEL_GROUP.get_model_parallel_rank() + if mp_group is None + else mp_group.rank + ) self.origin_num_embeddings = num_embeddings - self.is_mp = (self.world_size > 1) + self.is_mp = self.world_size > 1 - assert num_embeddings % self.world_size == 0, ( - "The length of the vocabulary must be divisible by the parallelism degree of MP" - ) + assert ( + num_embeddings % self.world_size == 0 + ), "The length of the vocabulary must be divisible by the parallelism degree of MP" per_part_size = num_embeddings // self.world_size @@ -118,15 +129,19 @@ class VocabParallelEmbedding(Layer): if self.is_mp and paddle.in_dynamic_mode(): with get_rng_state_tracker().rng_state(): - self.weight = self.create_parameter(attr=self._weight_attr, - shape=self._size, - dtype=self._dtype, - is_bias=False) + self.weight = self.create_parameter( + attr=self._weight_attr, + shape=self._size, + dtype=self._dtype, + is_bias=False, + ) else: - self.weight = self.create_parameter(attr=self._weight_attr, - shape=self._size, - dtype=self._dtype, - is_bias=False) + self.weight = self.create_parameter( + attr=self._weight_attr, + shape=self._size, + dtype=self._dtype, + is_bias=False, + ) self.weight.is_distributed = True if self.is_mp else False @@ -136,17 +151,22 @@ class VocabParallelEmbedding(Layer): self.weight, x, start_index=self.vocab_start_index, - name=self._name) - output = mp_ops._mp_allreduce(output_parallel, - group=self.model_parallel_group, - use_calc_stream=True, - use_model_parallel=True) + name=self._name, + ) + output = mp_ops._mp_allreduce( + output_parallel, + group=self.model_parallel_group, + use_calc_stream=True, + use_model_parallel=True, + ) else: - output = F.embedding(x, - weight=self.weight, - padding_idx=None, - sparse=False, - name=self._name) + output = F.embedding( + x, + weight=self.weight, + padding_idx=None, + sparse=False, + name=self._name, + ) return output @@ -200,29 +220,39 @@ class ColumnParallelLinear(Layer): return x """ - def __init__(self, - in_features, - out_features, - weight_attr=None, - has_bias=None, - gather_output=True, - fuse_matmul_bias=False, - mp_group=None, - name=None): + def __init__( + self, + in_features, + out_features, + weight_attr=None, + has_bias=None, + gather_output=True, + fuse_matmul_bias=False, + mp_group=None, + name=None, + ): super(ColumnParallelLinear, self).__init__() - self.model_parallel_group = tp._HYBRID_PARALLEL_GROUP.get_model_parallel_group( - ) if mp_group is None else mp_group - self.world_size = tp._HYBRID_PARALLEL_GROUP.get_model_parallel_world_size( - ) if mp_group is None else mp_group.nranks + self.model_parallel_group = ( + tp._HYBRID_PARALLEL_GROUP.get_model_parallel_group() + if mp_group is None + else mp_group + ) + self.world_size = ( + tp._HYBRID_PARALLEL_GROUP.get_model_parallel_world_size() + if mp_group is None + else mp_group.nranks + ) self._name = name - self.is_mp = (self.world_size > 1) + self.is_mp = self.world_size > 1 self.gather_output = gather_output assert out_features % self.world_size == 0, ( "Number of column of the weight for linear ({}) must be" " divisible by model parallel size ({})".format( - out_features, self.world_size)) + out_features, self.world_size + ) + ) self.output_size_per_partition = out_features // self.world_size self._weight_attr = weight_attr @@ -234,13 +264,15 @@ class ColumnParallelLinear(Layer): shape=[in_features, self.output_size_per_partition], attr=self._weight_attr, dtype=self._dtype, - is_bias=False) + is_bias=False, + ) else: self.weight = self.create_parameter( shape=[in_features, self.output_size_per_partition], attr=self._weight_attr, dtype=self._dtype, - is_bias=False) + is_bias=False, + ) self.weight.is_distributed = True if self.is_mp else False @@ -250,7 +282,8 @@ class ColumnParallelLinear(Layer): shape=[self.output_size_per_partition], attr=paddle.nn.initializer.Constant(value=0.0), dtype=self._dtype, - is_bias=True) + is_bias=True, + ) self.bias.is_distributed = True if self.is_mp else False else: self.bias = None @@ -263,26 +296,29 @@ class ColumnParallelLinear(Layer): "You set fuse_matmul_bias=True in ColumnParallelLinear, " "however, the paddle you are using not support this operation. " "Please set fuse_matmul_bias=False or use paddle compiled " - "with cuda 11.6 or higher.") + "with cuda 11.6 or higher." + ) from paddle.incubate.nn.functional import fused_linear + self.linear = fused_linear def forward(self, x): # use inner api to process identity if self.is_mp: - input_parallel = mp_ops._c_identity(x, - group=self.model_parallel_group) + input_parallel = mp_ops._c_identity( + x, group=self.model_parallel_group + ) else: input_parallel = x - output_parallel = self.linear(input_parallel, - self.weight, - self.bias, - name=self._name) + output_parallel = self.linear( + input_parallel, self.weight, self.bias, name=self._name + ) if self.gather_output and self.is_mp: - output = mp_ops._c_concat(output_parallel, - group=self.model_parallel_group) + output = mp_ops._c_concat( + output_parallel, group=self.model_parallel_group + ) else: output = output_parallel return output @@ -338,15 +374,17 @@ class RowParallelLinear(Layer): return x """ - def __init__(self, - in_features, - out_features, - weight_attr=None, - has_bias=True, - input_is_parallel=False, - fuse_matmul_bias=False, - mp_group=None, - name=None): + def __init__( + self, + in_features, + out_features, + weight_attr=None, + has_bias=True, + input_is_parallel=False, + fuse_matmul_bias=False, + mp_group=None, + name=None, + ): super(RowParallelLinear, self).__init__() self.in_features = in_features @@ -356,18 +394,29 @@ class RowParallelLinear(Layer): self._dtype = self._helper.get_default_dtype() self._name = name - self.model_parallel_group = tp._HYBRID_PARALLEL_GROUP.get_model_parallel_group( - ) if mp_group is None else mp_group - self.world_size = tp._HYBRID_PARALLEL_GROUP.get_model_parallel_world_size( - ) if mp_group is None else mp_group.nranks - self.rank = tp._HYBRID_PARALLEL_GROUP.get_model_parallel_rank( - ) if mp_group is None else mp_group.rank + self.model_parallel_group = ( + tp._HYBRID_PARALLEL_GROUP.get_model_parallel_group() + if mp_group is None + else mp_group + ) + self.world_size = ( + tp._HYBRID_PARALLEL_GROUP.get_model_parallel_world_size() + if mp_group is None + else mp_group.nranks + ) + self.rank = ( + tp._HYBRID_PARALLEL_GROUP.get_model_parallel_rank() + if mp_group is None + else mp_group.rank + ) - self.is_mp = (self.world_size > 1) + self.is_mp = self.world_size > 1 assert in_features % self.world_size == 0, ( "Number of row of the weight for linear ({}) must be" " divisible by model parallel size ({})".format( - in_features, self.world_size)) + in_features, self.world_size + ) + ) self.input_size_per_partition = in_features // self.world_size @@ -377,13 +426,15 @@ class RowParallelLinear(Layer): shape=[self.input_size_per_partition, self.out_features], attr=self._weight_attr, dtype=self._dtype, - is_bias=False) + is_bias=False, + ) else: self.weight = self.create_parameter( shape=[self.input_size_per_partition, self.out_features], attr=self._weight_attr, dtype=self._dtype, - is_bias=False) + is_bias=False, + ) self.weight.is_distributed = True if self.is_mp else False @@ -392,7 +443,8 @@ class RowParallelLinear(Layer): shape=[self.out_features], attr=paddle.nn.initializer.Constant(value=0.0), dtype=self._dtype, - is_bias=True) + is_bias=True, + ) else: self.bias = None @@ -404,8 +456,10 @@ class RowParallelLinear(Layer): "You set fuse_matmul_bias=True in RowParallelLinear, " "however, the paddle you are using not support this operation. " "Please set fuse_matmul_bias=False or use paddle compiled " - "with cuda 11.6 or higher.") + "with cuda 11.6 or higher." + ) from paddle.incubate.nn.functional import fused_linear + self.linear = fused_linear def forward(self, x): @@ -416,19 +470,20 @@ class RowParallelLinear(Layer): input_parallel = mp_ops._c_split(x, group=self.model_parallel_group) if self.is_mp: - output_parallel = self.linear(input_parallel, - self.weight, - name=self._name) - output_ = mp_ops._mp_allreduce(output_parallel, - group=self.model_parallel_group, - use_calc_stream=True, - use_model_parallel=True) + output_parallel = self.linear( + input_parallel, self.weight, name=self._name + ) + output_ = mp_ops._mp_allreduce( + output_parallel, + group=self.model_parallel_group, + use_calc_stream=True, + use_model_parallel=True, + ) output = output_ + self.bias if self.bias is not None else output_ else: - output = self.linear(input_parallel, - self.weight, - self.bias, - name=self._name) + output = self.linear( + input_parallel, self.weight, self.bias, name=self._name + ) return output @@ -451,14 +506,24 @@ class ParallelCrossEntropy(Layer): def __init__(self, mp_group=None, name=None): super(ParallelCrossEntropy, self).__init__() self.name = name - self.model_parallel_group = tp._HYBRID_PARALLEL_GROUP.get_model_parallel_group( - ) if mp_group is None else mp_group - self.world_size = tp._HYBRID_PARALLEL_GROUP.get_model_parallel_world_size( - ) if mp_group is None else mp_group.nranks - self.rank = tp._HYBRID_PARALLEL_GROUP.get_model_parallel_rank( - ) if mp_group is None else mp_group.rank + self.model_parallel_group = ( + tp._HYBRID_PARALLEL_GROUP.get_model_parallel_group() + if mp_group is None + else mp_group + ) + self.world_size = ( + tp._HYBRID_PARALLEL_GROUP.get_model_parallel_world_size() + if mp_group is None + else mp_group.nranks + ) + self.rank = ( + tp._HYBRID_PARALLEL_GROUP.get_model_parallel_rank() + if mp_group is None + else mp_group.rank + ) def forward(self, input, label): loss = mp_ops._c_softmax_with_cross_entropy( - input, label, group=self.model_parallel_group) + input, label, group=self.model_parallel_group + ) return loss diff --git a/python/paddle/distributed/fleet/layers/mpu/mp_ops.py b/python/paddle/distributed/fleet/layers/mpu/mp_ops.py index 30c2a7ea3c4745dd475d0ba231c25eb093c58ab3..b1627d5a3b79c017e791649a30e7015b661fdbb4 100644 --- a/python/paddle/distributed/fleet/layers/mpu/mp_ops.py +++ b/python/paddle/distributed/fleet/layers/mpu/mp_ops.py @@ -47,12 +47,17 @@ def _c_identity(tensor, group=None): from paddle.autograd import PyLayer class c_identity_eager(PyLayer): - @staticmethod def forward(ctx, tensor): - return _legacy_C_ops.c_identity(tensor, 'use_calc_stream', True, - 'ring_id', group.id, - 'use_model_parallel', True) + return _legacy_C_ops.c_identity( + tensor, + 'use_calc_stream', + True, + 'ring_id', + group.id, + 'use_model_parallel', + True, + ) @staticmethod def backward(ctx, dy): @@ -63,25 +68,36 @@ def _c_identity(tensor, group=None): return c_identity_eager.apply(tensor) elif _in_legacy_dygraph(): - return _legacy_C_ops.c_identity(tensor, 'use_calc_stream', True, - 'ring_id', ring_id, - 'use_model_parallel', True) + return _legacy_C_ops.c_identity( + tensor, + 'use_calc_stream', + True, + 'ring_id', + ring_id, + 'use_model_parallel', + True, + ) op_type = 'c_identity' helper = LayerHelper(op_type, **locals()) out = helper.create_variable_for_type_inference(dtype=tensor.dtype) check_variable_and_dtype( - tensor, 'tensor', ['float16', 'float32', 'float64', 'int32', 'int64'], - '_c_identity') - - helper.append_op(type=op_type, - inputs={'X': tensor}, - outputs={'Out': out}, - attrs={ - 'ring_id': ring_id, - 'use_calc_stream': True, - 'use_model_parallel': True, - }) + tensor, + 'tensor', + ['float16', 'float32', 'float64', 'int32', 'int64'], + '_c_identity', + ) + + helper.append_op( + type=op_type, + inputs={'X': tensor}, + outputs={'Out': out}, + attrs={ + 'ring_id': ring_id, + 'use_calc_stream': True, + 'use_model_parallel': True, + }, + ) return out @@ -107,29 +123,43 @@ def _c_concat(tensor, group=None): nranks = group.nranks if _non_static_mode(): - return _legacy_C_ops.c_concat(tensor, 'ring_id', ring_id, - 'use_calc_stream', True, 'rank', rank, - 'nranks', nranks, 'use_model_parallel', - True) + return _legacy_C_ops.c_concat( + tensor, + 'ring_id', + ring_id, + 'use_calc_stream', + True, + 'rank', + rank, + 'nranks', + nranks, + 'use_model_parallel', + True, + ) op_type = 'c_concat' helper = LayerHelper(op_type, **locals()) out = helper.create_variable_for_type_inference(dtype=tensor.dtype) check_variable_and_dtype( - tensor, 'tensor', ['float16', 'float32', 'float64', 'int32', 'int64'], - '_c_concat') - - helper.append_op(type=op_type, - inputs={'X': tensor}, - outputs={'Out': out}, - attrs={ - 'ring_id': ring_id, - 'use_calc_stream': True, - 'use_model_parallel': True, - 'nranks': nranks, - 'rank': rank - }) + tensor, + 'tensor', + ['float16', 'float32', 'float64', 'int32', 'int64'], + '_c_concat', + ) + + helper.append_op( + type=op_type, + inputs={'X': tensor}, + outputs={'Out': out}, + attrs={ + 'ring_id': ring_id, + 'use_calc_stream': True, + 'use_model_parallel': True, + 'nranks': nranks, + 'rank': rank, + }, + ) return out @@ -152,42 +182,61 @@ def _c_split(tensor, group=None): global_rank = collective._get_global_env().rank rank = global_rank if group is None else group.get_group_rank(global_rank) - nranks = collective._get_global_env( - ).world_size if group is None else group.nranks + nranks = ( + collective._get_global_env().world_size + if group is None + else group.nranks + ) if _non_static_mode(): - return _legacy_C_ops.c_split(tensor, 'use_calc_stream', True, 'ring_id', - ring_id, 'rank', rank, 'nranks', nranks, - 'use_model_parallel', True) + return _legacy_C_ops.c_split( + tensor, + 'use_calc_stream', + True, + 'ring_id', + ring_id, + 'rank', + rank, + 'nranks', + nranks, + 'use_model_parallel', + True, + ) op_type = 'c_split' helper = LayerHelper(op_type, **locals()) out = helper.create_variable_for_type_inference(dtype=tensor.dtype) check_variable_and_dtype( - tensor, 'tensor', ['float16', 'float32', 'float64', 'int32', 'int64'], - '_c_split') - - helper.append_op(type=op_type, - inputs={'X': tensor}, - outputs={'Out': out}, - attrs={ - 'ring_id': ring_id, - 'use_calc_stream': True, - 'rank': rank, - 'nranks': nranks, - 'use_model_parallel': True, - }) + tensor, + 'tensor', + ['float16', 'float32', 'float64', 'int32', 'int64'], + '_c_split', + ) + + helper.append_op( + type=op_type, + inputs={'X': tensor}, + outputs={'Out': out}, + attrs={ + 'ring_id': ring_id, + 'use_calc_stream': True, + 'rank': rank, + 'nranks': nranks, + 'use_model_parallel': True, + }, + ) return out -def _mp_allreduce(tensor, - op=ReduceOp.SUM, - group=None, - use_calc_stream=True, - use_model_parallel=True): - """[it is same as allreduce above, but it supports model parallel. And it support inplace startegy] - """ +def _mp_allreduce( + tensor, + op=ReduceOp.SUM, + group=None, + use_calc_stream=True, + use_model_parallel=True, +): + """[it is same as allreduce above, but it supports model parallel. And it support inplace startegy]""" if group is not None and not group.is_member(): return @@ -198,38 +247,57 @@ def _mp_allreduce(tensor, from paddle.autograd import PyLayer class mp_allreduce_eager(PyLayer): - @staticmethod - def forward(ctx, tensor, group, use_calc_stream, - use_model_parallel): + def forward( + ctx, tensor, group, use_calc_stream, use_model_parallel + ): ctx.ring_id = group.id if use_calc_stream: op_type = collective._get_reduce_op(op, "_mp_allreduce") group.process_group.allreduce_on_calc_stream( - tensor, op_type) + tensor, op_type + ) return tensor else: return _legacy_C_ops.c_allreduce_sum_( - tensor, 'use_calc_stream', use_calc_stream, 'ring_id', - ring_id, "use_model_parallel", use_model_parallel) + tensor, + 'use_calc_stream', + use_calc_stream, + 'ring_id', + ring_id, + "use_model_parallel", + use_model_parallel, + ) @staticmethod def backward(ctx, dy): - return _legacy_C_ops.c_identity(dy, 'use_calc_stream', True, - 'ring_id', ctx.ring_id, - 'use_model_parallel', True) - - return mp_allreduce_eager.apply(tensor, group, use_calc_stream, - use_model_parallel) + return _legacy_C_ops.c_identity( + dy, + 'use_calc_stream', + True, + 'ring_id', + ctx.ring_id, + 'use_model_parallel', + True, + ) + + return mp_allreduce_eager.apply( + tensor, group, use_calc_stream, use_model_parallel + ) ring_id = 0 if group is None else group.id if _in_legacy_dygraph(): if op == ReduceOp.SUM: - return _legacy_C_ops.c_allreduce_sum_(tensor, 'use_calc_stream', - use_calc_stream, 'ring_id', - ring_id, "use_model_parallel", - use_model_parallel) + return _legacy_C_ops.c_allreduce_sum_( + tensor, + 'use_calc_stream', + use_calc_stream, + 'ring_id', + ring_id, + "use_model_parallel", + use_model_parallel, + ) else: raise ValueError("Unknown parameter: {}.".format(op)) @@ -238,17 +306,22 @@ def _mp_allreduce(tensor, out = helper.create_variable_for_type_inference(dtype=tensor.dtype) check_variable_and_dtype( - tensor, 'tensor', ['float16', 'float32', 'float64', 'int32', 'int64'], - op_type) - - helper.append_op(type=op_type, - inputs={'X': tensor}, - outputs={'Out': out}, - attrs={ - 'ring_id': ring_id, - 'use_calc_stream': use_calc_stream, - 'use_model_parallel': use_model_parallel, - }) + tensor, + 'tensor', + ['float16', 'float32', 'float64', 'int32', 'int64'], + op_type, + ) + + helper.append_op( + type=op_type, + inputs={'X': tensor}, + outputs={'Out': out}, + attrs={ + 'ring_id': ring_id, + 'use_calc_stream': use_calc_stream, + 'use_model_parallel': use_model_parallel, + }, + ) return out @@ -267,21 +340,21 @@ def _c_lookup_table(table, index, start_index=0, name=None): Tensor. """ if _non_static_mode(): - return _legacy_C_ops.c_embedding(table, index, "start_index", - start_index) + return _legacy_C_ops.c_embedding( + table, index, "start_index", start_index + ) op_type = 'c_embedding' helper = LayerHelper(op_type, **locals()) dtype = helper.input_dtype(input_param_name='table') check_variable_and_dtype(index, 'input', ['int32', 'int64'], op_type) tmp = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='c_embedding', - inputs={ - 'Ids': index, - 'W': table - }, - outputs={'Out': tmp}, - attrs={"start_index": start_index}) + helper.append_op( + type='c_embedding', + inputs={'Ids': index, 'W': table}, + outputs={'Out': tmp}, + attrs={"start_index": start_index}, + ) return tmp @@ -290,63 +363,75 @@ class _Linear(layers.Layer): Linear """ - def __init__(self, - in_features, - out_features, - weight_attr=None, - bias_attr=None, - name=None): + def __init__( + self, + in_features, + out_features, + weight_attr=None, + bias_attr=None, + name=None, + ): super(_Linear, self).__init__() self._dtype = self._helper.get_default_dtype() self._weight_attr = weight_attr self._bias_attr = bias_attr - self.weight = self.create_parameter(shape=[in_features, out_features], - attr=self._weight_attr, - dtype=self._dtype, - is_bias=False) - self.bias = self.create_parameter(shape=[out_features], - attr=self._bias_attr, - dtype=self._dtype, - is_bias=True) + self.weight = self.create_parameter( + shape=[in_features, out_features], + attr=self._weight_attr, + dtype=self._dtype, + is_bias=False, + ) + self.bias = self.create_parameter( + shape=[out_features], + attr=self._bias_attr, + dtype=self._dtype, + is_bias=True, + ) self.name = name def forward(self, input): - out = _linear(x=input, - weight=self.weight, - bias=self.bias, - name=self.name) + out = _linear( + x=input, weight=self.weight, bias=self.bias, name=self.name + ) return out def extra_repr(self): name_str = ', name={}'.format(self.name) if self.name else '' return 'in_features={}, out_features={}, dtype={}{}'.format( - self.weight.shape[0], self.weight.shape[1], self._dtype, name_str) + self.weight.shape[0], self.weight.shape[1], self._dtype, name_str + ) -def _c_softmax_with_cross_entropy(logits, - label, - group=None, - return_softmax=False): +def _c_softmax_with_cross_entropy( + logits, label, group=None, return_softmax=False +): if group is not None and not group.is_member(): return ring_id = 0 if group is None else group.id global_rank = collective._get_global_env().rank rank = global_rank if group is None else group.get_group_rank(global_rank) - nranks = collective._get_global_env( - ).world_size if group is None else group.nranks + nranks = ( + collective._get_global_env().world_size + if group is None + else group.nranks + ) input_dims = len(list(logits.shape)) label_dims = len(list(label.shape)) if input_dims - 1 != label_dims and input_dims != label_dims: raise ValueError( 'Expected nput_dims - 1 = label_dims or input_dims == label_dims\ - (got nput_dims{}, label_dims{})'.format(input_dims, label_dims)) + (got nput_dims{}, label_dims{})'.format( + input_dims, label_dims + ) + ) if input_dims - 1 == label_dims: label = paddle.unsqueeze(label, axis=-1) if _non_static_mode(): softmax, loss = _legacy_C_ops.c_softmax_with_cross_entropy( - logits, label, 'ring_id', ring_id, 'rank', rank, 'nranks', nranks) + logits, label, 'ring_id', ring_id, 'rank', rank, 'nranks', nranks + ) if not return_softmax: return loss else: @@ -360,16 +445,12 @@ def _c_softmax_with_cross_entropy(logits, helper = LayerHelper('c_softmax_with_cross_entropy', **locals()) softmax = helper.create_variable_for_type_inference(dtype=logits.dtype) loss = helper.create_variable_for_type_inference(dtype=logits.dtype) - helper.append_op(type='c_softmax_with_cross_entropy', - inputs={ - 'Logits': logits, - 'Label': label - }, - outputs={ - 'Softmax': softmax, - 'Loss': loss - }, - attrs=attrs) + helper.append_op( + type='c_softmax_with_cross_entropy', + inputs={'Logits': logits, 'Label': label}, + outputs={'Softmax': softmax, 'Loss': loss}, + attrs=attrs, + ) if return_softmax: return loss, softmax @@ -383,19 +464,30 @@ def _linear(x, weight, bias=None, name=None): """ if _non_static_mode(): pre_bias = _varbase_creator(dtype=x.dtype) - _legacy_C_ops.matmul(x, weight, pre_bias, 'transpose_X', False, - 'transpose_Y', False, "alpha", 1) - return dygraph_utils._append_bias_in_dygraph(pre_bias, - bias, - axis=len(x.shape) - 1) + _legacy_C_ops.matmul( + x, + weight, + pre_bias, + 'transpose_X', + False, + 'transpose_Y', + False, + "alpha", + 1, + ) + return dygraph_utils._append_bias_in_dygraph( + pre_bias, bias, axis=len(x.shape) - 1 + ) else: helper = LayerHelper('linear', **locals()) dtype = x.dtype - assert len( - x.shape) < 4, "X latitude is not supported greater than 3 now." + assert ( + len(x.shape) < 4 + ), "X latitude is not supported greater than 3 now." - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'linear') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], 'linear' + ) check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'], 'linear') inputs = {'X': [x], 'Y': [weight]} @@ -405,19 +497,17 @@ def _linear(x, weight, bias=None, name=None): 'alpha': 1, } tmp = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='matmul_v2', - inputs=inputs, - outputs={'Out': tmp}, - attrs=attrs) + helper.append_op( + type='matmul_v2', inputs=inputs, outputs={'Out': tmp}, attrs=attrs + ) if bias is not None: res = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='elementwise_add', - inputs={ - 'X': [tmp], - 'Y': [bias] - }, - outputs={'Out': [res]}, - attrs={'axis': len(x.shape) - 1}) + helper.append_op( + type='elementwise_add', + inputs={'X': [tmp], 'Y': [bias]}, + outputs={'Out': [res]}, + attrs={'axis': len(x.shape) - 1}, + ) else: res = tmp return res @@ -436,18 +526,20 @@ def _set_var_distributed(var): main_block._find_var_recursive(var.name).is_distributed = True -def _parallel_linear(x, - num_rows, - num_cols, - axis, - param_attr, - bias_attr, - gather_out, - inner_rank, - nranks, - split_tensor, - name, - group=None): +def _parallel_linear( + x, + num_rows, + num_cols, + axis, + param_attr, + bias_attr, + gather_out, + inner_rank, + nranks, + split_tensor, + name, + group=None, +): """ Parallel Linear @@ -466,21 +558,25 @@ def _parallel_linear(x, else: x = _c_identity(x, group=group) - linear = paddle.nn.Linear(num_rows, - num_cols, - weight_attr=param_attr, - bias_attr=bias_attr, - name=name) + linear = paddle.nn.Linear( + num_rows, + num_cols, + weight_attr=param_attr, + bias_attr=bias_attr, + name=name, + ) # NOTE: npu linear function use matmul_v2 but linear use matmul - linear_function = _linear if core.is_compiled_with_npu()\ - else paddle.nn.functional.linear + linear_function = ( + _linear if core.is_compiled_with_npu() else paddle.nn.functional.linear + ) linear_out = linear_function( x, linear.weight, # NOTE(wangxi): row split, bias need add after allreduce None if axis == 0 else linear.bias, - linear.name) + linear.name, + ) _set_var_distributed(linear.weight) # set is_distributed for splited bias @@ -489,7 +585,8 @@ def _parallel_linear(x, if axis == 1 and linear._bias_attr != False: _set_var_distributed(linear.bias) - if not gather_out: return linear_out + if not gather_out: + return linear_out out_shape = list(linear_out.shape) out_shape[0] *= 1 if axis == 0 else nranks @@ -501,40 +598,47 @@ def _parallel_linear(x, lod_level=linear_out.lod_level, persistable=False, is_data=False, - need_check_feed=linear_out.desc.need_check_feed()) + need_check_feed=linear_out.desc.need_check_feed(), + ) if axis == 0: - main_block.append_op(type='c_allreduce_sum', - inputs={'X': linear_out}, - outputs={'Out': out}, - attrs={ - 'ring_id': ring_id, - 'use_calc_stream': True, - 'use_model_parallel': True - }) + main_block.append_op( + type='c_allreduce_sum', + inputs={'X': linear_out}, + outputs={'Out': out}, + attrs={ + 'ring_id': ring_id, + 'use_calc_stream': True, + 'use_model_parallel': True, + }, + ) if linear.bias is not None: out = out + linear.bias else: - main_block.append_op(type='c_concat', - inputs={'X': linear_out}, - outputs={'Out': out}, - attrs={ - 'rank': inner_rank, - 'ring_id': ring_id, - 'nranks': nranks, - 'use_calc_stream': True, - 'use_model_parallel': True - }) + main_block.append_op( + type='c_concat', + inputs={'X': linear_out}, + outputs={'Out': out}, + attrs={ + 'rank': inner_rank, + 'ring_id': ring_id, + 'nranks': nranks, + 'use_calc_stream': True, + 'use_model_parallel': True, + }, + ) return out -def _parallel_embedding(x, - per_part_embeddings, - origin_size, - param_attr, - inner_rank, - num_partitions, - name, - group=None): +def _parallel_embedding( + x, + per_part_embeddings, + origin_size, + param_attr, + inner_rank, + num_partitions, + name, + group=None, +): """ Parallel Embedding """ @@ -551,43 +655,43 @@ def _parallel_embedding(x, dtype = helper.get_default_dtype() size = [per_part_size, origin_size[1]] - weight = helper.create_parameter(attr=param_attr, - shape=size, - dtype=dtype, - is_bias=False) + weight = helper.create_parameter( + attr=param_attr, shape=size, dtype=dtype, is_bias=False + ) if num_partitions == 1: - return paddle.nn.functional.embedding(x, - weight=weight, - padding_idx=None, - sparse=False, - name=name) + return paddle.nn.functional.embedding( + x, weight=weight, padding_idx=None, sparse=False, name=name + ) startup_block = paddle.static.default_startup_program().global_block() main_block = paddle.static.default_main_program().global_block() startup_block.vars[weight.name].is_distributed = True main_block.vars[weight.name].is_distributed = True - output_parallel = _c_lookup_table(weight, - x, - start_index=vocab_start_index, - name=name) - out = _mp_allreduce(output_parallel, - group=group, - use_calc_stream=True, - use_model_parallel=True) + output_parallel = _c_lookup_table( + weight, x, start_index=vocab_start_index, name=name + ) + out = _mp_allreduce( + output_parallel, + group=group, + use_calc_stream=True, + use_model_parallel=True, + ) return out -def split(x, - size, - operation, - axis=0, - num_partitions=1, - gather_out=True, - weight_attr=None, - bias_attr=None, - name=None): +def split( + x, + size, + operation, + axis=0, + num_partitions=1, + gather_out=True, + weight_attr=None, + bias_attr=None, + name=None, +): """ Split the weight of the specified operation into multiple devices @@ -706,14 +810,16 @@ def split(x, num_partitions=2) """ - assert isinstance( - size, - (list, tuple)), ("The type of size for " - "paddle.distributed.split must be list or tuple.") - assert len(size) == 2, ("Number of elements in size of " - "paddle.distributed.split must be two.") - assert isinstance(operation, str), ("The type of operation for " - "paddle.distributed.split must be str.") + assert isinstance(size, (list, tuple)), ( + "The type of size for " + "paddle.distributed.split must be list or tuple." + ) + assert len(size) == 2, ( + "Number of elements in size of " "paddle.distributed.split must be two." + ) + assert isinstance(operation, str), ( + "The type of operation for " "paddle.distributed.split must be str." + ) supported_operations = [ 'linear', 'embedding', @@ -721,16 +827,22 @@ def split(x, assert operation in supported_operations, ( "The operation for " "paddle.distributed.split must be one of {}.".format( - supported_operations)) + supported_operations + ) + ) if _non_static_mode(): raise ValueError( "paddle.distributed.split cannot be used in dynamic " "graph mode, plese use ParallelEmbedding, ParallelRowLinear, " - "ParallelColumnLinear instead.") + "ParallelColumnLinear instead." + ) else: from paddle.distributed.fleet import fleet - assert fleet._role_maker, ("To use paddle.distributed.split, " - "you must call fleet.init() firstly.") + + assert fleet._role_maker, ( + "To use paddle.distributed.split, " + "you must call fleet.init() firstly." + ) rank = fleet.worker_index() nranks = fleet.worker_num() @@ -738,21 +850,28 @@ def split(x, inner_rank = rank % num_partitions if operation == "embedding": - assert axis == 0, ("We only support to split the weight of embedding " - "along the first axis now.") - assert size[0] % num_partitions == 0, \ - "The length of the vocabulary must be divisible by num_partitions " \ - "but received vocabulary={} num_partitions={}".format(size[0], num_partitions) + assert axis == 0, ( + "We only support to split the weight of embedding " + "along the first axis now." + ) + assert size[0] % num_partitions == 0, ( + "The length of the vocabulary must be divisible by num_partitions " + "but received vocabulary={} num_partitions={}".format( + size[0], num_partitions + ) + ) per_part_size = size[0] // num_partitions - emb_out = _parallel_embedding(x, - per_part_size, - size, - weight_attr, - inner_rank, - num_partitions, - name, - group=None) + emb_out = _parallel_embedding( + x, + per_part_size, + size, + weight_attr, + inner_rank, + num_partitions, + name, + group=None, + ) return emb_out else: should_split = False @@ -760,32 +879,41 @@ def split(x, assert size[0] % num_partitions == 0, ( "Number of rows of the weight for linear ({}) must be" " divisible by num_partitions ({})".format( - size[0], num_partitions)) + size[0], num_partitions + ) + ) per_part_size = size[0] // num_partitions linear_size = (per_part_size, size[1]) - if x.shape[-1] == size[0]: should_split = True + if x.shape[-1] == size[0]: + should_split = True elif axis == 1: assert size[1] % num_partitions == 0, ( "Number of column of the weight for linear ({}) must be" " divisible by num_partitions ({})".format( - size[1], num_partitions)) + size[1], num_partitions + ) + ) per_part_size = size[1] // num_partitions linear_size = (size[0], per_part_size) else: - raise ValueError("The value of axis must be 0 or 1, but the value " - "given is {}.".format(axis)) - - linear_out = _parallel_linear(x, - linear_size[0], - linear_size[1], - axis, - weight_attr, - bias_attr, - gather_out, - inner_rank, - num_partitions, - should_split, - name=name, - group=None) + raise ValueError( + "The value of axis must be 0 or 1, but the value " + "given is {}.".format(axis) + ) + + linear_out = _parallel_linear( + x, + linear_size[0], + linear_size[1], + axis, + weight_attr, + bias_attr, + gather_out, + inner_rank, + num_partitions, + should_split, + name=name, + group=None, + ) return linear_out diff --git a/python/paddle/distributed/fleet/layers/mpu/random.py b/python/paddle/distributed/fleet/layers/mpu/random.py index ff082695cb7eed74e3d12bc0c76855d79b5ffba3..17442c1938a1d3264da80e3a05a19c5c41ed63d7 100644 --- a/python/paddle/distributed/fleet/layers/mpu/random.py +++ b/python/paddle/distributed/fleet/layers/mpu/random.py @@ -85,6 +85,7 @@ def get_rng_state_tracker(): def model_parallel_random_seed(seed=None): import paddle.distributed.fleet as fleet + hcg = fleet.get_hybrid_communicate_group() rank = hcg.get_model_parallel_rank() @@ -105,23 +106,23 @@ def determinate_seed(rng_name): helper = LayerHelper('seed', **locals()) out = helper.create_variable_for_type_inference(dtype=paddle.int32) # set force_cpu to reduce sync copy from CPU->GPU->CPU, and reduce pipeline hang - helper.append_op(type='seed', - outputs={'Out': out}, - attrs={ - 'deterministic': True, - 'rng_name': rng_name, - 'force_cpu': True - }) + helper.append_op( + type='seed', + outputs={'Out': out}, + attrs={'deterministic': True, 'rng_name': rng_name, 'force_cpu': True}, + ) return out -def dropout(x, - p=0.5, - axis=None, - rng_name=None, - training=True, - mode="upscale_in_train", - name=None): +def dropout( + x, + p=0.5, + axis=None, + rng_name=None, + training=True, + mode="upscale_in_train", + name=None, +): """ Dropout is a regularization technique for reducing overfitting by preventing neuron co-adaption during training. The dropout operator randomly sets the @@ -191,53 +192,66 @@ def dropout(x, raise TypeError("p argument should be a number(int|float) or Variable") # fast return for p == 0 - if isinstance(p, (int, float)) and p == 0: return x + if isinstance(p, (int, float)) and p == 0: + return x assert 0 <= p <= 1, ValueError("p argument should between 0 and 1") - assert mode in ('downscale_in_infer', 'upscale_in_train'), \ - ValueError( - "mode argument should be 'downscale_in_infer' or 'upscale_in_train'") + assert mode in ('downscale_in_infer', 'upscale_in_train'), ValueError( + "mode argument should be 'downscale_in_infer' or 'upscale_in_train'" + ) - assert axis is None, \ - TypeError("unsupport axis when using random seed generator") + assert axis is None, TypeError( + "unsupport axis when using random seed generator" + ) - mode = 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode #semantic transfer + mode = ( + 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode + ) # semantic transfer # dygraph using tracker, doesn't need determinate seed if _non_static_mode(): - out, mask = _legacy_C_ops.dropout(x, 'dropout_prob', p, 'is_test', - not training, 'fix_seed', False, - 'seed', 0, 'dropout_implementation', - mode) + out, mask = _legacy_C_ops.dropout( + x, + 'dropout_prob', + p, + 'is_test', + not training, + 'fix_seed', + False, + 'seed', + 0, + 'dropout_implementation', + mode, + ) return out seed = determinate_seed(rng_name) if isinstance(p, Variable) and not p.shape != [1]: raise TypeError( - "Required p.shape == [1] if type(p) is Variable, but received p.shape = {}" - .format(p.shape)) + "Required p.shape == [1] if type(p) is Variable, but received p.shape = {}".format( + p.shape + ) + ) helper = LayerHelper('dropout', **locals()) - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'dropout') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], 'dropout' + ) out = helper.create_variable_for_type_inference(dtype=x.dtype) mask = helper.create_variable_for_type_inference( - dtype=core.VarDesc.VarType.UINT8, stop_gradient=True) - - helper.append_op(type='dropout', - inputs={ - 'X': [x], - 'Seed': seed - }, - outputs={ - 'Out': [out], - 'Mask': [mask] - }, - attrs={ - 'dropout_prob': p, - 'is_test': not training, - 'dropout_implementation': mode, - }) + dtype=core.VarDesc.VarType.UINT8, stop_gradient=True + ) + + helper.append_op( + type='dropout', + inputs={'X': [x], 'Seed': seed}, + outputs={'Out': [out], 'Mask': [mask]}, + attrs={ + 'dropout_prob': p, + 'is_test': not training, + 'dropout_implementation': mode, + }, + ) return out diff --git a/python/paddle/distributed/fleet/meta_optimizers/amp_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/amp_optimizer.py index 78a53ccdba55e62a70661b714f553ba6869ff42a..f9e8b6b8e21d51e19de8da572dc639d8aedda680 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/amp_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/amp_optimizer.py @@ -18,7 +18,6 @@ __all__ = [] class AMPOptimizer(MetaOptimizerBase): - def __init__(self, optimizer): super(AMPOptimizer, self).__init__(optimizer) self.inner_opt = optimizer @@ -32,11 +31,12 @@ class AMPOptimizer(MetaOptimizerBase): ] self.meta_optimizers_black_list = ["DGCOptimizer"] - def _set_basic_info(self, loss, role_maker, user_defined_optimizer, - user_defined_strategy): - super(AMPOptimizer, - self)._set_basic_info(loss, role_maker, user_defined_optimizer, - user_defined_strategy) + def _set_basic_info( + self, loss, role_maker, user_defined_optimizer, user_defined_strategy + ): + super(AMPOptimizer, self)._set_basic_info( + loss, role_maker, user_defined_optimizer, user_defined_strategy + ) def _init_wrapped_opt(self): if self.wrapped_opt is not None: @@ -48,14 +48,21 @@ class AMPOptimizer(MetaOptimizerBase): custom_black_list = set(config['custom_black_list']) custom_black_varnames = set(config['custom_black_varnames']) amp_lists = mixed_precision.AutoMixedPrecisionLists( - custom_white_list, custom_black_list, custom_black_varnames) + custom_white_list, custom_black_list, custom_black_varnames + ) self.wrapped_opt = mixed_precision.decorate( - self.inner_opt, amp_lists, config['init_loss_scaling'], - config['incr_every_n_steps'], config['decr_every_n_nan_or_inf'], - config['incr_ratio'], config['decr_ratio'], - config['use_dynamic_loss_scaling'], config['use_pure_fp16'], - config['use_fp16_guard']) + self.inner_opt, + amp_lists, + config['init_loss_scaling'], + config['incr_every_n_steps'], + config['decr_every_n_nan_or_inf'], + config['incr_ratio'], + config['decr_ratio'], + config['use_dynamic_loss_scaling'], + config['use_pure_fp16'], + config['use_fp16_guard'], + ) # if worker_num > 1, all cards will communication with each other, # add is_distributed to optimize amp, overlap communication and @@ -87,46 +94,46 @@ class AMPOptimizer(MetaOptimizerBase): "decr_every_n_nan_or_inf": 2, "incr_ratio": 2.0, "decr_ratio": 0.8, - "use_dynamic_loss_scaling": True + "use_dynamic_loss_scaling": True, } - def backward(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None, - callbacks=None): + def backward( + self, + loss, + startup_program=None, + parameter_list=None, + no_grad_set=None, + callbacks=None, + ): # maybe inner_opt of other meta optimizer self._init_wrapped_opt() - return self.wrapped_opt.backward(loss, startup_program, parameter_list, - no_grad_set, callbacks) + return self.wrapped_opt.backward( + loss, startup_program, parameter_list, no_grad_set, callbacks + ) def apply_gradients(self, params_grads): return self.wrapped_opt.apply_gradients(params_grads=params_grads) def apply_optimize(self, loss, startup_program, params_grads): - return self.wrapped_opt.apply_optimize(loss, - startup_program=startup_program, - params_grads=params_grads) - - def minimize_impl(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None): + return self.wrapped_opt.apply_optimize( + loss, startup_program=startup_program, params_grads=params_grads + ) + + def minimize_impl( + self, loss, startup_program=None, parameter_list=None, no_grad_set=None + ): self._init_wrapped_opt() - optimize_ops, params_grads = \ - self.wrapped_opt.minimize(loss, startup_program, - parameter_list, no_grad_set) + optimize_ops, params_grads = self.wrapped_opt.minimize( + loss, startup_program, parameter_list, no_grad_set + ) return optimize_ops, params_grads - def amp_init(self, - place, - scope=None, - test_program=None, - use_fp16_test=False): - return self.wrapped_opt.amp_init(place, scope, test_program, - use_fp16_test) + def amp_init( + self, place, scope=None, test_program=None, use_fp16_test=False + ): + return self.wrapped_opt.amp_init( + place, scope, test_program, use_fp16_test + ) def get_loss_scaling(self): return self.wrapped_opt.get_loss_scaling() diff --git a/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_optimizer.py index c78c855a285521ace29a5942c0ee5634f0c89a9b..ee3f96e243fe2a1ccd88879ff25aa6017ca23a34 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_optimizer.py @@ -25,7 +25,6 @@ __all__ = [] class AscendIRParser(object): - def __init__(self, auto_dp=False, world_rank_size=1): self.graph_idx = 0 self.hcom_endpoints = {} @@ -39,24 +38,33 @@ class AscendIRParser(object): for id, var in enumerate(input_varlist): if var.is_data: # input data ge_input = core.GEOperatorFactory.create_operator( - var.name, "Data").set_attr_int32("index", id) + var.name, "Data" + ).set_attr_int32("index", id) ret_map[var.name] = ge_input ge_in_operator.append(ge_input) else: # param, learning ... ge_input = core.GEOperatorFactory.create_operator( - var.name, "Variable") + var.name, "Variable" + ) ge_input.update_output_desc( "y", - core.GETensorDesc(core.GEShape(var.shape), - core.GEFormat.FORMAT_ND, - core.GEDataType.DT_FLOAT)) + core.GETensorDesc( + core.GEShape(var.shape), + core.GEFormat.FORMAT_ND, + core.GEDataType.DT_FLOAT, + ), + ) ret_map[var.name] = ge_input return ge_in_operator, ret_map def _endpoint_to_world_rank_id(self, endpoint): world_endpoints = fleet.worker_endpoints() - assert endpoint in world_endpoints, "endpoint (%s) not in worker_endpoints (%s) " % ( - endpoint, fleet.world_device_ids()) + assert ( + endpoint in world_endpoints + ), "endpoint (%s) not in worker_endpoints (%s) " % ( + endpoint, + fleet.world_device_ids(), + ) return world_endpoints.index(endpoint) def parse_op(self, op): @@ -72,13 +80,16 @@ class AscendIRParser(object): self.hcom_endpoints[nccl_id] = other_endpoints[:] self.hcom_endpoints[nccl_id].insert(rank, endpoint) - print("nccl_id (%s) registered endpoints %s" % - (nccl_id, self.hcom_endpoints[nccl_id])) + print( + "nccl_id (%s) registered endpoints %s" + % (nccl_id, self.hcom_endpoints[nccl_id]) + ) elif op.type == 'c_comm_init': nccl_id = op.input_arg_names[0] nranks = op.attr("nranks") - assert nranks == len(self.hcom_endpoints[nccl_id] - ), "nranks doesn't match endpoint count" + assert nranks == len( + self.hcom_endpoints[nccl_id] + ), "nranks doesn't match endpoint count" rank = op.attr("rank") ring_id = op.attr("ring_id") @@ -88,24 +99,29 @@ class AscendIRParser(object): for endpoint in self.hcom_endpoints[nccl_id] ] self.groups_to_create.append( - HcomGroupConfig(name=group_name, - nranks=nranks, - rank_ids=global_rank_ids)) - print("append to create group: %s, with rank_ids: %s" % - (group_name, global_rank_ids)) + HcomGroupConfig( + name=group_name, nranks=nranks, rank_ids=global_rank_ids + ) + ) + print( + "append to create group: %s, with rank_ids: %s" + % (group_name, global_rank_ids) + ) elif op.type in ascend_parser.registerd_op: op_parser = self.parser_factory.create_parse( - ascend_parser.registerd_op[op.type]) + ascend_parser.registerd_op[op.type] + ) op_parser.apply(op) else: - assert False, "Op[%s] has not been registered, so we have to skip it" % ( - op.type) - - def _parse_program(self, - graph_name, - program, - input_varlist=[], - fetch_list=[]): + assert ( + False + ), "Op[%s] has not been registered, so we have to skip it" % ( + op.type + ) + + def _parse_program( + self, graph_name, program, input_varlist=[], fetch_list=[] + ): begin_graph_idx = self.graph_idx ge_in_operator = [] ge_out_operator = [] @@ -121,7 +137,8 @@ class AscendIRParser(object): ge_in_operator, self.var2geop = self._construct_input_map(input_varlist) self.parser_factory = ascend_parser.AscendParserFactory( - graph, self.var2geop) + graph, self.var2geop + ) for i, curop in list(enumerate(block.ops)): self.parse_op(curop) @@ -150,28 +167,36 @@ class AscendIRParser(object): input_varlist = [var for var in input_varlist if var.is_data] - block.append_op(type="ascend_trigger", - inputs={"FeedList": input_varlist}, - outputs={"FetchList": fetch_list}, - attrs={'graph_idx': self.graph_idx}) + block.append_op( + type="ascend_trigger", + inputs={"FeedList": input_varlist}, + outputs={"FetchList": fetch_list}, + attrs={'graph_idx': self.graph_idx}, + ) self.graph_idx += 1 return graph - def parse_program(self, startup_program, main_program, input_varlist, - fetch_list): + def parse_program( + self, startup_program, main_program, input_varlist, fetch_list + ): startup_graph = self._parse_program("startup", startup_program) - main_graph = self._parse_program("main", main_program, input_varlist, - fetch_list) + main_graph = self._parse_program( + "main", main_program, input_varlist, fetch_list + ) if self._auto_dp and self._world_rank_size > 1: - assert len(self.groups_to_create - ) == 0, "can't parse program under auto_dp mode" + assert ( + len(self.groups_to_create) == 0 + ), "can't parse program under auto_dp mode" from paddle.distributed import fleet + self.groups_to_create.append( - HcomGroupConfig(name="hcom_group_0", - nranks=fleet.world_size(), - rank_ids=[x - for x in range(fleet.world_size())])) + HcomGroupConfig( + name="hcom_group_0", + nranks=fleet.world_size(), + rank_ids=[x for x in range(fleet.world_size())], + ) + ) return startup_graph, main_graph @@ -179,7 +204,6 @@ class AscendIRParser(object): # AscendOptimizer is a wrapper for basic optimizer now # We will make it part of fleet meta_optimizer in the future class AscendOptimizer(Optimizer): - def __init__(self, optimizer, fetch_list=[]): self.inner_opt = optimizer self.fetch_list = fetch_list @@ -209,28 +233,34 @@ class AscendOptimizer(Optimizer): ret_list.append(var) return ret_list - def minimize(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None, - auto_dp=False, - rank_table_file=None, - precision_mode="must_keep_origin_dtype"): + def minimize( + self, + loss, + startup_program=None, + parameter_list=None, + no_grad_set=None, + auto_dp=False, + rank_table_file=None, + precision_mode="must_keep_origin_dtype", + ): minimized = None if self.inner_opt: - minimized = self.inner_opt.minimize(loss, - startup_program=startup_program) + minimized = self.inner_opt.minimize( + loss, startup_program=startup_program + ) self.ascend_instance = core.AscendInstance() from paddle.distributed import fleet + if auto_dp and fleet.world_size() > 1: from paddle.fluid.transpiler import ascend_transpiler - t = ascend_transpiler.AscendTranspiler(startup_program, - loss.block.program) + + t = ascend_transpiler.AscendTranspiler( + startup_program, loss.block.program + ) t.transpile() - #print(loss.block.program) + # print(loss.block.program) # Config about Graph Engine can be found in https://support.huaweicloud.com/ config = { @@ -251,17 +281,21 @@ class AscendOptimizer(Optimizer): self.ascend_instance.init_global_resources() main_block = loss.block - self.parser = AscendIRParser(auto_dp=auto_dp, - world_rank_size=fleet.world_size()) + self.parser = AscendIRParser( + auto_dp=auto_dp, world_rank_size=fleet.world_size() + ) input_varlist = self._get_input_varlist(main_block.program) startup_graph, main_graph = self.parser.parse_program( - startup_program, main_block.program, input_varlist, self.fetch_list) + startup_program, main_block.program, input_varlist, self.fetch_list + ) for cfg in self.parser.groups_to_create: - print("create group (%s), nranks: %d, rank_ids: %s" % - (cfg.name, cfg.nranks, cfg.rank_ids)) + print( + "create group (%s), nranks: %d, rank_ids: %s" + % (cfg.name, cfg.nranks, cfg.rank_ids) + ) hccl.create_group(cfg.name, cfg.nranks, cfg.rank_ids) self.ascend_instance.add_ascend_subgraph(0, startup_graph) diff --git a/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_parser.py b/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_parser.py index e09d8ec8bf77cf5dfa7f90c4a1030da189047bc0..52447952e55a3f6aeeb0fac6ada848fc6090e075 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_parser.py +++ b/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_parser.py @@ -56,7 +56,7 @@ registerd_op = { ## forwards "slice": "SliceParser", "top_k": "TopkParser", "accuracy": "AccuracyParser", - #"increment": "IncrementParser", + # "increment": "IncrementParser", "lookup_table": "LookupTableParser", "truncated_gaussian_random": "TruncatedNormalParser", "c_allgather": "AllGatherParser", @@ -71,7 +71,6 @@ registerd_op = { ## forwards "equal": "EqualParser", "expand": "ExpandParser", "squeeze2": "SqueezeParser", - ## backwords "matmul_grad": "MatMulGradParser", "mul_grad": "MulGradParser", @@ -94,17 +93,15 @@ registerd_op = { ## forwards "gather_grad": "GatherGradParser", "transpose2_grad": "TransposeGradParser", "layer_norm_grad": "LayerNormGradParser", - ## opt "sgd": "SGDParser", - #"adam": "AdamParser", + # "adam": "AdamParser", } global_cnt = -1 global_input_cnt = -1 class AscendHelper(object): - def __init__(self): self.dtype2ge_map = { 0: core.GEDataType.DT_BOOL, @@ -113,7 +110,7 @@ class AscendHelper(object): 3: core.GEDataType.DT_INT64, 4: core.GEDataType.DT_FLOAT16, 5: core.GEDataType.DT_FLOAT, - 6: core.GEDataType.DT_DOUBLE + 6: core.GEDataType.DT_DOUBLE, } self.dtype2np_map = { 0: "bool", @@ -122,23 +119,24 @@ class AscendHelper(object): 3: "int64", 4: "float16", 5: "float32", - 6: "float64" + 6: "float64", } self.dtype2paddle_inv_map = {"VarType.FP32": 0, "VarType.FP16": 1} def dtype2ge(self, dtype): assert dtype in self.dtype2ge_map, "dtype[%d] is not supported %d" % ( - dtype) + dtype + ) return self.dtype2ge_map[dtype] def dtype2np(self, index): assert index in self.dtype2np_map, "index[%d] is not supported %d" % ( - index) + index + ) return self.dtype2np_map[index] class AscendParserFactory(object): - def __init__(self, graph, var2geop): self.graph = graph self.var2geop = var2geop @@ -152,7 +150,6 @@ class AscendParserFactory(object): class AscendParserBase(object): - def __init__(self, graph, var2geop): self.graph = graph self.var2geop = var2geop @@ -161,35 +158,42 @@ class AscendParserBase(object): def _get_ge_input(self, input_var_name): assert input_var_name in self.var2geop, "var %s not created before" % ( - input_var_name) + input_var_name + ) return self.var2geop[input_var_name] def update_output(self, geop_list, index_list): output_num = len(self.op.output_names) - assert output_num == len( - index_list - ), "Parser[%s]'s output number[%d] is not equal to parameters number[%d]" % ( - self.parser_name, len(index_list), output_num) + assert output_num == len(index_list), ( + "Parser[%s]'s output number[%d] is not equal to parameters number[%d]" + % (self.parser_name, len(index_list), output_num) + ) for output_id in range(output_num): arguments = self.op.output(self.op.output_names[output_id]) if len(arguments) > 0: - assert len(arguments) == len( - index_list[output_id] - ), "Parser[%s]'s %dth argument number[%d] is not equal to paddle's number[%d]" % ( - self.parser_name, output_id, len( - index_list[output_id]), len(arguments)) + assert len(arguments) == len(index_list[output_id]), ( + "Parser[%s]'s %dth argument number[%d] is not equal to paddle's number[%d]" + % ( + self.parser_name, + output_id, + len(index_list[output_id]), + len(arguments), + ) + ) for i in range(len(arguments)): self.var2geop[arguments[i]] = geop_list[ - index_list[output_id][i]] + index_list[output_id][i] + ] for geop in geop_list: self.graph.add_op(geop) def apply(self, op): self.op = op - assert self.op.type == self.parser_name, "op [%s] != parser_name[%s]" % ( - self.op.type, self.parser_name) - #print("begin to parse op %s" % (self.parser_name)) + assert ( + self.op.type == self.parser_name + ), "op [%s] != parser_name[%s]" % (self.op.type, self.parser_name) + # print("begin to parse op %s" % (self.parser_name)) geop_list, index_list = self._apply() self.update_output(geop_list, index_list) @@ -205,33 +209,43 @@ class AscendParserBase(object): return name def _create_ge_tensor(self, shape, dtype, value): - tensor_desc = core.GETensorDesc(core.GEShape(shape), - core.GEFormat.FORMAT_ND, - self.ascend_helper.dtype2ge(dtype)) + tensor_desc = core.GETensorDesc( + core.GEShape(shape), + core.GEFormat.FORMAT_ND, + self.ascend_helper.dtype2ge(dtype), + ) tensor = core.GETensor(tensor_desc) - data = (value * np.ones( - (shape))).reshape(shape).astype(self.ascend_helper.dtype2np(dtype)) + data = ( + (value * np.ones((shape))) + .reshape(shape) + .astype(self.ascend_helper.dtype2np(dtype)) + ) buf = data.tobytes() data_8 = np.frombuffer(buf, dtype=np.uint8) tensor.set_data(data_8) return tensor def _get_ge_tensor(self, shape, dtype, value_list): - tensor_desc = core.GETensorDesc(core.GEShape(shape), - core.GEFormat.FORMAT_ND, - self.ascend_helper.dtype2ge(dtype)) + tensor_desc = core.GETensorDesc( + core.GEShape(shape), + core.GEFormat.FORMAT_ND, + self.ascend_helper.dtype2ge(dtype), + ) tensor = core.GETensor(tensor_desc) - data = np.array(value_list).reshape(shape).astype( - self.ascend_helper.dtype2np(dtype)) + data = ( + np.array(value_list) + .reshape(shape) + .astype(self.ascend_helper.dtype2np(dtype)) + ) buf = data.tobytes() data_8 = np.frombuffer(buf, dtype=np.uint8) tensor.set_data(data_8) tensor_const = core.GEOperatorFactory.create_operator( - "const" + self._accumulated_op_id(), - "Const").set_attr_tensor("value", tensor) + "const" + self._accumulated_op_id(), "Const" + ).set_attr_tensor("value", tensor) return tensor_const @@ -242,21 +256,28 @@ class AscendParserBase(object): type = core.GEDataType.DT_FLOAT var = core.GEOperatorFactory.create_operator( - "variable" + self._accumulated_op_id(), "Variable") + "variable" + self._accumulated_op_id(), "Variable" + ) var.update_output_desc( "y", - core.GETensorDesc(core.GEShape(shape), core.GEFormat.FORMAT_ND, - type)) - assign = core.GEOperatorFactory.create_operator( - "assign" + self._accumulated_op_id(), - "Assign").set_input("value", tensor).set_input("ref", var) + core.GETensorDesc( + core.GEShape(shape), core.GEFormat.FORMAT_ND, type + ), + ) + assign = ( + core.GEOperatorFactory.create_operator( + "assign" + self._accumulated_op_id(), "Assign" + ) + .set_input("value", tensor) + .set_input("ref", var) + ) return assign def _create_shape_tensor(self): - tensor_desc = core.GETensorDesc(core.GEShape([2]), - core.GEFormat.FORMAT_ND, - core.GEDataType.DT_INT32) + tensor_desc = core.GETensorDesc( + core.GEShape([2]), core.GEFormat.FORMAT_ND, core.GEDataType.DT_INT32 + ) tensor = core.GETensor(tensor_desc) data = np.ones((2)).astype("int32").reshape([2]) @@ -268,16 +289,19 @@ class AscendParserBase(object): def _get_GEtensor_shape(self, tensor): tensor_shape = core.GEOperatorFactory.create_operator( - "shape" + self._accumulated_op_id(), - "Shape").set_input("x", tensor) - tensor_shape = core.GEOperatorFactory.create_operator( - "cast" + self._accumulated_op_id(), - "Cast").set_input("x", tensor_shape).set_attr_int32("dst_type", 0) + "shape" + self._accumulated_op_id(), "Shape" + ).set_input("x", tensor) + tensor_shape = ( + core.GEOperatorFactory.create_operator( + "cast" + self._accumulated_op_id(), "Cast" + ) + .set_input("x", tensor_shape) + .set_attr_int32("dst_type", 0) + ) return tensor_shape class AddParser(AscendParserBase): - def __init__(self, graph, var2geop): super(AddParser, self).__init__(graph, var2geop) self.parser_name = "elementwise_add" @@ -285,14 +309,17 @@ class AddParser(AscendParserBase): def _apply(self): x = self._get_ge_input(self.op.input_arg_names[0]) y = self._get_ge_input(self.op.input_arg_names[1]) - add = core.GEOperatorFactory.create_operator( - "add" + self._accumulated_op_id(), - "Add").set_input("x1", x).set_input("x2", y) + add = ( + core.GEOperatorFactory.create_operator( + "add" + self._accumulated_op_id(), "Add" + ) + .set_input("x1", x) + .set_input("x2", y) + ) return [add], [[0]] class DotSubParser(AscendParserBase): - def __init__(self, graph, var2geop): super(DotSubParser, self).__init__(graph, var2geop) self.parser_name = "elementwise_sub" @@ -300,14 +327,17 @@ class DotSubParser(AscendParserBase): def _apply(self): x = self._get_ge_input(self.op.input_arg_names[0]) y = self._get_ge_input(self.op.input_arg_names[1]) - sub = core.GEOperatorFactory.create_operator( - "sub" + self._accumulated_op_id(), - "Sub").set_input("x1", x).set_input("x2", y) + sub = ( + core.GEOperatorFactory.create_operator( + "sub" + self._accumulated_op_id(), "Sub" + ) + .set_input("x1", x) + .set_input("x2", y) + ) return [sub], [[0]] class DotMulParser(AscendParserBase): - def __init__(self, graph, var2geop): super(DotMulParser, self).__init__(graph, var2geop) self.parser_name = "elementwise_mul" @@ -315,14 +345,17 @@ class DotMulParser(AscendParserBase): def _apply(self): x = self._get_ge_input(self.op.input_arg_names[0]) y = self._get_ge_input(self.op.input_arg_names[1]) - mul = core.GEOperatorFactory.create_operator( - "dotmul" + self._accumulated_op_id(), - "Mul").set_input("x1", x).set_input("x2", y) + mul = ( + core.GEOperatorFactory.create_operator( + "dotmul" + self._accumulated_op_id(), "Mul" + ) + .set_input("x1", x) + .set_input("x2", y) + ) return [mul], [[0]] class DotDivParser(AscendParserBase): - def __init__(self, graph, var2geop): super(DotDivParser, self).__init__(graph, var2geop) self.parser_name = "elementwise_div" @@ -330,14 +363,17 @@ class DotDivParser(AscendParserBase): def _apply(self): x = self._get_ge_input(self.op.input_arg_names[0]) y = self._get_ge_input(self.op.input_arg_names[1]) - div = core.GEOperatorFactory.create_operator( - "dotdiv" + self._accumulated_op_id(), - "Div").set_input("x1", x).set_input("x2", y) + div = ( + core.GEOperatorFactory.create_operator( + "dotdiv" + self._accumulated_op_id(), "Div" + ) + .set_input("x1", x) + .set_input("x2", y) + ) return [div], [[0]] class DotPowParser(AscendParserBase): - def __init__(self, graph, var2geop): super(DotPowParser, self).__init__(graph, var2geop) self.parser_name = "elementwise_pow" @@ -345,14 +381,17 @@ class DotPowParser(AscendParserBase): def _apply(self): x = self._get_ge_input(self.op.input_arg_names[0]) y = self._get_ge_input(self.op.input_arg_names[1]) - pow = core.GEOperatorFactory.create_operator( - "dotpow" + self._accumulated_op_id(), - "Pow").set_input("x1", x).set_input("x2", y) + pow = ( + core.GEOperatorFactory.create_operator( + "dotpow" + self._accumulated_op_id(), "Pow" + ) + .set_input("x1", x) + .set_input("x2", y) + ) return [pow], [[0]] class LessParser(AscendParserBase): - def __init__(self, graph, var2geop): super(LessParser, self).__init__(graph, var2geop) self.parser_name = "less_than" @@ -360,14 +399,17 @@ class LessParser(AscendParserBase): def _apply(self): x = self._get_ge_input(self.op.input_arg_names[0]) y = self._get_ge_input(self.op.input_arg_names[1]) - less_than = core.GEOperatorFactory.create_operator( - "less_than" + self._accumulated_op_id(), - "Less").set_input("x1", x).set_input("x2", y) + less_than = ( + core.GEOperatorFactory.create_operator( + "less_than" + self._accumulated_op_id(), "Less" + ) + .set_input("x1", x) + .set_input("x2", y) + ) return [less_than], [[0]] class MaxParser(AscendParserBase): - def __init__(self, graph, var2geop): super(MaxParser, self).__init__(graph, var2geop) self.parser_name = "elementwise_max" @@ -375,14 +417,17 @@ class MaxParser(AscendParserBase): def _apply(self): x = self._get_ge_input(self.op.input_arg_names[0]) y = self._get_ge_input(self.op.input_arg_names[1]) - max_out = core.GEOperatorFactory.create_operator( - "max" + self._accumulated_op_id(), - "Maximum").set_input("x1", x).set_input("x2", y) + max_out = ( + core.GEOperatorFactory.create_operator( + "max" + self._accumulated_op_id(), "Maximum" + ) + .set_input("x1", x) + .set_input("x2", y) + ) return [max_out], [[0]] class MinParser(AscendParserBase): - def __init__(self, graph, var2geop): super(MinParser, self).__init__(graph, var2geop) self.parser_name = "elementwise_min" @@ -390,15 +435,18 @@ class MinParser(AscendParserBase): def _apply(self): x = self._get_ge_input(self.op.input_arg_names[0]) y = self._get_ge_input(self.op.input_arg_names[1]) - min_out = core.GEOperatorFactory.create_operator( - "min" + self._accumulated_op_id(), - "Minimum").set_input("x1", x).set_input("x2", y) + min_out = ( + core.GEOperatorFactory.create_operator( + "min" + self._accumulated_op_id(), "Minimum" + ) + .set_input("x1", x) + .set_input("x2", y) + ) return [min_out], [[0]] ## cal class LogParser(AscendParserBase): - def __init__(self, graph, var2geop): super(LogParser, self).__init__(graph, var2geop) self.parser_name = "log" @@ -406,12 +454,12 @@ class LogParser(AscendParserBase): def _apply(self): x = self._get_ge_input(self.op.input_arg_names[0]) log = core.GEOperatorFactory.create_operator( - "log" + self._accumulated_op_id(), "Log").set_input("x", x) + "log" + self._accumulated_op_id(), "Log" + ).set_input("x", x) return [log], [[0]] class SqrtParser(AscendParserBase): - def __init__(self, graph, var2geop): super(SqrtParser, self).__init__(graph, var2geop) self.parser_name = "sqrt" @@ -419,12 +467,12 @@ class SqrtParser(AscendParserBase): def _apply(self): x = self._get_ge_input(self.op.input_arg_names[0]) sqrt = core.GEOperatorFactory.create_operator( - "sqrt" + self._accumulated_op_id(), "Sqrt").set_input("x", x) + "sqrt" + self._accumulated_op_id(), "Sqrt" + ).set_input("x", x) return [sqrt], [[0]] class PowParser(AscendParserBase): - def __init__(self, graph, var2geop): super(PowParser, self).__init__(graph, var2geop) self.parser_name = "pow" @@ -432,17 +480,19 @@ class PowParser(AscendParserBase): def _apply(self): x = self._get_ge_input(self.op.input_arg_names[0]) factor = self.op.attr("factor") - pow_value = core.GEOperatorFactory.create_operator( - "pow" + self._accumulated_op_id(), - "Power").set_input("x", x).set_attr_float( - "power", - factor).set_attr_float("scale", - 1.0).set_attr_float("shift", 0.0) + pow_value = ( + core.GEOperatorFactory.create_operator( + "pow" + self._accumulated_op_id(), "Power" + ) + .set_input("x", x) + .set_attr_float("power", factor) + .set_attr_float("scale", 1.0) + .set_attr_float("shift", 0.0) + ) return [pow_value], [[0]] class SquareParser(AscendParserBase): - def __init__(self, graph, var2geop): super(SquareParser, self).__init__(graph, var2geop) self.parser_name = "square" @@ -450,12 +500,12 @@ class SquareParser(AscendParserBase): def _apply(self): x = self._get_ge_input(self.op.input_arg_names[0]) square = core.GEOperatorFactory.create_operator( - "square" + self._accumulated_op_id(), "Square").set_input("x", x) + "square" + self._accumulated_op_id(), "Square" + ).set_input("x", x) return [square], [[0]] class SumParser(AscendParserBase): - def __init__(self, graph, var2geop): super(SumParser, self).__init__(graph, var2geop) self.parser_name = "sum" @@ -466,19 +516,26 @@ class SumParser(AscendParserBase): assert False, "the size of input list must large or equal 2" x = self._get_ge_input(self.op.input_arg_names[0]) y = self._get_ge_input(self.op.input_arg_names[1]) - sum = core.GEOperatorFactory.create_operator( - "sum" + self._accumulated_op_id(), - "Add").set_input("x1", x).set_input("x2", y) + sum = ( + core.GEOperatorFactory.create_operator( + "sum" + self._accumulated_op_id(), "Add" + ) + .set_input("x1", x) + .set_input("x2", y) + ) for i in range(2, len_list): y = self._get_ge_input(self.op.input_arg_names[i]) - sum = core.GEOperatorFactory.create_operator( - "sum" + self._accumulated_op_id(), - "Add").set_input("x1", sum).set_input("x2", y) + sum = ( + core.GEOperatorFactory.create_operator( + "sum" + self._accumulated_op_id(), "Add" + ) + .set_input("x1", sum) + .set_input("x2", y) + ) return [sum], [[0]] class LogicalNotParser(AscendParserBase): - def __init__(self, graph, var2geop): super(LogicalNotParser, self).__init__(graph, var2geop) self.parser_name = "logical_not" @@ -486,28 +543,30 @@ class LogicalNotParser(AscendParserBase): def _apply(self): x = self._get_ge_input(self.op.input_arg_names[0]) logical_not = core.GEOperatorFactory.create_operator( - "logical_not" + self._accumulated_op_id(), - "LogicalNot").set_input("x", x) + "logical_not" + self._accumulated_op_id(), "LogicalNot" + ).set_input("x", x) return [logical_not], [[0]] class MeanParser(AscendParserBase): - def __init__(self, graph, var2geop): super(MeanParser, self).__init__(graph, var2geop) self.parser_name = "mean" def _apply(self): x = self._get_ge_input(self.op.input_arg_names[0]) - mean = core.GEOperatorFactory.create_operator( - "mean" + self._accumulated_op_id(), "ReduceMeanD").set_input( - "x", x).set_attr_bool("keep_dims", - False).set_attr_vec_int32("axes", []) + mean = ( + core.GEOperatorFactory.create_operator( + "mean" + self._accumulated_op_id(), "ReduceMeanD" + ) + .set_input("x", x) + .set_attr_bool("keep_dims", False) + .set_attr_vec_int32("axes", []) + ) return [mean], [[0]] class ReduceSumParser(AscendParserBase): - def __init__(self, graph, var2geop): super(ReduceSumParser, self).__init__(graph, var2geop) self.parser_name = "reduce_sum" @@ -520,14 +579,18 @@ class ReduceSumParser(AscendParserBase): x_shape = self.op.block.var(self.op.input_arg_names[0]).shape if reduce_all: axes = list(range(len(x_shape))) - reduce_sum = core.GEOperatorFactory.create_operator( - "reduce_sum" + self._accumulated_op_id(), - "ReduceSumD").set_input("x", x, 0).set_attr_vec_int32( - "axes", axes).set_attr_bool("keep_dims", keep_dims) + reduce_sum = ( + core.GEOperatorFactory.create_operator( + "reduce_sum" + self._accumulated_op_id(), "ReduceSumD" + ) + .set_input("x", x, 0) + .set_attr_vec_int32("axes", axes) + .set_attr_bool("keep_dims", keep_dims) + ) return [reduce_sum], [[0]] -#class IncrementParser(AscendParserBase): +# class IncrementParser(AscendParserBase): # def __init__(self, graph, var2geop): # super(IncrementParser, self).__init__(graph, var2geop) # self.parser_name = "increment" @@ -544,7 +607,6 @@ class ReduceSumParser(AscendParserBase): ## matrix cal class MatMulParser(AscendParserBase): - def __init__(self, graph, var2geop): super(MatMulParser, self).__init__(graph, var2geop) self.parser_name = "matmul" @@ -559,24 +621,31 @@ class MatMulParser(AscendParserBase): x2_shape = self.op.block.var(self.op.input_arg_names[1]).shape if len(x1_shape) > 2: - matmul = core.GEOperatorFactory.create_operator( - "matmul" + self._accumulated_op_id(), "BatchMatMul").set_input( - "x1", x).set_input("x2", y).set_attr_bool( - "adj_x1", - transpose_x).set_attr_bool("adj_x2", transpose_y) + matmul = ( + core.GEOperatorFactory.create_operator( + "matmul" + self._accumulated_op_id(), "BatchMatMul" + ) + .set_input("x1", x) + .set_input("x2", y) + .set_attr_bool("adj_x1", transpose_x) + .set_attr_bool("adj_x2", transpose_y) + ) elif len(x1_shape) == 2: - matmul = core.GEOperatorFactory.create_operator( - "matmul" + self._accumulated_op_id(), - "MatMul").set_input("x1", x).set_input("x2", y).set_attr_bool( - "transpose_x1", - transpose_x).set_attr_bool("transpose_x2", transpose_y) + matmul = ( + core.GEOperatorFactory.create_operator( + "matmul" + self._accumulated_op_id(), "MatMul" + ) + .set_input("x1", x) + .set_input("x2", y) + .set_attr_bool("transpose_x1", transpose_x) + .set_attr_bool("transpose_x2", transpose_y) + ) else: assert False, "not support" return [matmul], [[0]] class MulParser(AscendParserBase): - def __init__(self, graph, var2geop): super(MulParser, self).__init__(graph, var2geop) self.parser_name = "mul" @@ -591,48 +660,72 @@ class MulParser(AscendParserBase): if x_num_col_dims == 1 and y_num_col_dims == 1: if len(shape_x1) == 2 and len(shape_x2) == 2: - matmul = core.GEOperatorFactory.create_operator( - "mul" + self._accumulated_op_id(), - "MatMul").set_input("x1", x).set_input("x2", y) + matmul = ( + core.GEOperatorFactory.create_operator( + "mul" + self._accumulated_op_id(), "MatMul" + ) + .set_input("x1", x) + .set_input("x2", y) + ) elif len(shape_x1) == 3 and len(shape_x2) == 2: flatten_x1 = core.GEOperatorFactory.create_operator( - "flatten" + self._accumulated_op_id(), - "Flatten").set_input("x", x) - matmul = core.GEOperatorFactory.create_operator( - "mul" + self._accumulated_op_id(), - "MatMul").set_input("x1", flatten_x1, - 0).set_input("x2", y, 0) + "flatten" + self._accumulated_op_id(), "Flatten" + ).set_input("x", x) + matmul = ( + core.GEOperatorFactory.create_operator( + "mul" + self._accumulated_op_id(), "MatMul" + ) + .set_input("x1", flatten_x1, 0) + .set_input("x2", y, 0) + ) else: assert False, "not support" else: if len(shape_x1) == 3 and len(shape_x2) == 2: assert x_num_col_dims == 2, "only support 2" - flatten_x1 = core.GEOperatorFactory.create_operator( - "flatten" + self._accumulated_op_id(), - "FlattenV2").set_input("x", x).set_attr_int32( - "axis", 0).set_attr_int32("end_axis", 1) - matmul_m = core.GEOperatorFactory.create_operator( - "mul" + self._accumulated_op_id(), - "MatMul").set_input("x1", flatten_x1, - 0).set_input("x2", y, 0) - matmul_transpose = core.GEOperatorFactory.create_operator( - "transpose" + self._accumulated_op_id(), - "TransposeD").set_input("x", matmul_m).set_attr_vec_int32( - "perm", [1, 0]) + flatten_x1 = ( + core.GEOperatorFactory.create_operator( + "flatten" + self._accumulated_op_id(), "FlattenV2" + ) + .set_input("x", x) + .set_attr_int32("axis", 0) + .set_attr_int32("end_axis", 1) + ) + matmul_m = ( + core.GEOperatorFactory.create_operator( + "mul" + self._accumulated_op_id(), "MatMul" + ) + .set_input("x1", flatten_x1, 0) + .set_input("x2", y, 0) + ) + matmul_transpose = ( + core.GEOperatorFactory.create_operator( + "transpose" + self._accumulated_op_id(), "TransposeD" + ) + .set_input("x", matmul_m) + .set_attr_vec_int32("perm", [1, 0]) + ) tensor = self._create_ge_tensor( - [3], 2, [shape_x2[1], shape_x1[0], shape_x1[1]]) + [3], 2, [shape_x2[1], shape_x1[0], shape_x1[1]] + ) const_shape = core.GEOperatorFactory.create_operator( - "shape" + self._accumulated_op_id(), - "Const").set_attr_tensor("value", tensor) - reshape_matmul = core.GEOperatorFactory.create_operator( - "reshape" + self._accumulated_op_id(), - "Reshape").set_input("x", matmul_transpose).set_input( - "shape", const_shape).set_attr_int32("axis", 0) - matmul = core.GEOperatorFactory.create_operator( - "transpose" + self._accumulated_op_id(), - "TransposeD").set_input("x", - reshape_matmul).set_attr_vec_int32( - "perm", [1, 2, 0]) + "shape" + self._accumulated_op_id(), "Const" + ).set_attr_tensor("value", tensor) + reshape_matmul = ( + core.GEOperatorFactory.create_operator( + "reshape" + self._accumulated_op_id(), "Reshape" + ) + .set_input("x", matmul_transpose) + .set_input("shape", const_shape) + .set_attr_int32("axis", 0) + ) + matmul = ( + core.GEOperatorFactory.create_operator( + "transpose" + self._accumulated_op_id(), "TransposeD" + ) + .set_input("x", reshape_matmul) + .set_attr_vec_int32("perm", [1, 2, 0]) + ) else: assert False, "not support" @@ -640,7 +733,6 @@ class MulParser(AscendParserBase): class LayerNormParser(AscendParserBase): - def __init__(self, graph, var2geop): super(LayerNormParser, self).__init__(graph, var2geop) self.parser_name = "layer_norm" @@ -654,44 +746,65 @@ class LayerNormParser(AscendParserBase): x_dtype = self.op.block.var(self.op.input_arg_names[2]).dtype shape_tensor = core.GEOperatorFactory.create_operator( - "shape" + self._accumulated_op_id(), "Shape").set_input("x", x) - scale_expand = core.GEOperatorFactory.create_operator( - "broadcast_to_d" + self._accumulated_op_id(), - "BroadcastTo").set_input("x", - scale).set_input("shape", shape_tensor) - bias_expand = core.GEOperatorFactory.create_operator( - "broadcast_to_d" + self._accumulated_op_id(), - "BroadcastTo").set_input("x", - bias).set_input("shape", shape_tensor) - layer_norm = core.GEOperatorFactory.create_operator( - "layer_norm" + self._accumulated_op_id(), - "LayerNorm").set_input("x", x).set_input( - "gamma", - scale_expand).set_input("beta", bias_expand).set_attr_int32( - "begin_norm_axis", begin_norm_axis).set_attr_int32( - "begin_params_axis", - begin_norm_axis).set_attr_float("epsilon", epsilon) - - cast_dtype = 0 if self.ascend_helper.dtype2paddle_inv_map[str( - x_dtype)] == 0 else 1 - y = core.GEOperatorFactory.create_operator( - "cast" + self._accumulated_op_id(), - "Cast").set_input("x", layer_norm, - 0).set_attr_int32("dst_type", cast_dtype) - mean = core.GEOperatorFactory.create_operator( - "cast" + self._accumulated_op_id(), - "Cast").set_input("x", layer_norm, - 1).set_attr_int32("dst_type", cast_dtype) - variance = core.GEOperatorFactory.create_operator( - "cast" + self._accumulated_op_id(), - "Cast").set_input("x", layer_norm, - 2).set_attr_int32("dst_type", cast_dtype) + "shape" + self._accumulated_op_id(), "Shape" + ).set_input("x", x) + scale_expand = ( + core.GEOperatorFactory.create_operator( + "broadcast_to_d" + self._accumulated_op_id(), "BroadcastTo" + ) + .set_input("x", scale) + .set_input("shape", shape_tensor) + ) + bias_expand = ( + core.GEOperatorFactory.create_operator( + "broadcast_to_d" + self._accumulated_op_id(), "BroadcastTo" + ) + .set_input("x", bias) + .set_input("shape", shape_tensor) + ) + layer_norm = ( + core.GEOperatorFactory.create_operator( + "layer_norm" + self._accumulated_op_id(), "LayerNorm" + ) + .set_input("x", x) + .set_input("gamma", scale_expand) + .set_input("beta", bias_expand) + .set_attr_int32("begin_norm_axis", begin_norm_axis) + .set_attr_int32("begin_params_axis", begin_norm_axis) + .set_attr_float("epsilon", epsilon) + ) + + cast_dtype = ( + 0 + if self.ascend_helper.dtype2paddle_inv_map[str(x_dtype)] == 0 + else 1 + ) + y = ( + core.GEOperatorFactory.create_operator( + "cast" + self._accumulated_op_id(), "Cast" + ) + .set_input("x", layer_norm, 0) + .set_attr_int32("dst_type", cast_dtype) + ) + mean = ( + core.GEOperatorFactory.create_operator( + "cast" + self._accumulated_op_id(), "Cast" + ) + .set_input("x", layer_norm, 1) + .set_attr_int32("dst_type", cast_dtype) + ) + variance = ( + core.GEOperatorFactory.create_operator( + "cast" + self._accumulated_op_id(), "Cast" + ) + .set_input("x", layer_norm, 2) + .set_attr_int32("dst_type", cast_dtype) + ) return [y, mean, variance], [[1], [2], [0]] ## activate function class ReluParser(AscendParserBase): - def __init__(self, graph, var2geop): super(ReluParser, self).__init__(graph, var2geop) self.parser_name = "relu" @@ -699,12 +812,12 @@ class ReluParser(AscendParserBase): def _apply(self): x = self._get_ge_input(self.op.input_arg_names[0]) relu = core.GEOperatorFactory.create_operator( - "relu" + self._accumulated_op_id(), "Relu").set_input("x", x) + "relu" + self._accumulated_op_id(), "Relu" + ).set_input("x", x) return [relu], [[0]] class GeluParser(AscendParserBase): - def __init__(self, graph, var2geop): super(GeluParser, self).__init__(graph, var2geop) self.parser_name = "gelu" @@ -712,12 +825,12 @@ class GeluParser(AscendParserBase): def _apply(self): x = self._get_ge_input(self.op.input_arg_names[0]) gelu = core.GEOperatorFactory.create_operator( - "gelu" + self._accumulated_op_id(), "Gelu").set_input("x", x) + "gelu" + self._accumulated_op_id(), "Gelu" + ).set_input("x", x) return [gelu], [[0]] class TanhParser(AscendParserBase): - def __init__(self, graph, var2geop): super(TanhParser, self).__init__(graph, var2geop) self.parser_name = "tanh" @@ -725,13 +838,13 @@ class TanhParser(AscendParserBase): def _apply(self): x = self._get_ge_input(self.op.input_arg_names[0]) tanh = core.GEOperatorFactory.create_operator( - "tanh" + self._accumulated_op_id(), "Tanh").set_input("x", x) + "tanh" + self._accumulated_op_id(), "Tanh" + ).set_input("x", x) return [tanh], [[0]] ## loss function class SoftmaxWithCrossEntropyParser(AscendParserBase): - def __init__(self, graph, var2geop): super(SoftmaxWithCrossEntropyParser, self).__init__(graph, var2geop) self.parser_name = "softmax_with_cross_entropy" @@ -742,48 +855,65 @@ class SoftmaxWithCrossEntropyParser(AscendParserBase): cls_num = self.op.block.var(self.op.input_arg_names[1]).shape[1] softmax = core.GEOperatorFactory.create_operator( - "softmax" + self._accumulated_op_id(), - "SoftmaxV2").set_input("x", logits) - label = core.GEOperatorFactory.create_operator( - "cast" + self._accumulated_op_id(), - "Cast").set_input("x", label).set_attr_int32("dst_type", 3) + "softmax" + self._accumulated_op_id(), "SoftmaxV2" + ).set_input("x", logits) + label = ( + core.GEOperatorFactory.create_operator( + "cast" + self._accumulated_op_id(), "Cast" + ) + .set_input("x", label) + .set_attr_int32("dst_type", 3) + ) tensoron = self._create_ge_tensor([1], 5, 1) on = core.GEOperatorFactory.create_operator( - "const" + self._accumulated_op_id(), - "Const").set_attr_tensor("value", tensoron) + "const" + self._accumulated_op_id(), "Const" + ).set_attr_tensor("value", tensoron) tensoroff = self._create_ge_tensor([1], 5, 0) off = core.GEOperatorFactory.create_operator( - "const" + self._accumulated_op_id(), - "Const").set_attr_tensor("value", tensoroff) + "const" + self._accumulated_op_id(), "Const" + ).set_attr_tensor("value", tensoroff) self._mark_as_input(on) self._mark_as_input(off) - onehot = core.GEOperatorFactory.create_operator( - "onehot" + self._accumulated_op_id(), - "OneHotD").set_input("x", - label).set_input("on_value", on).set_input( - "off_value", - off).set_attr_int32("depth", cls_num) + onehot = ( + core.GEOperatorFactory.create_operator( + "onehot" + self._accumulated_op_id(), "OneHotD" + ) + .set_input("x", label) + .set_input("on_value", on) + .set_input("off_value", off) + .set_attr_int32("depth", cls_num) + ) squeeze = core.GEOperatorFactory.create_operator( - "mul" + self._accumulated_op_id(), - "Squeeze").set_input("x", onehot) - - loss_all = core.GEOperatorFactory.create_operator( - "loss" + self._accumulated_op_id(), - "SoftmaxCrossEntropyWithLogits").set_input("features", - logits).set_input( - "labels", squeeze) - loss = core.GEOperatorFactory.create_operator( - "cast" + self._accumulated_op_id(), - "Cast").set_input("x", loss_all, 0).set_attr_int32("dst_type", 0) - loss_expand = core.GEOperatorFactory.create_operator( - "unsqueeze" + self._accumulated_op_id(), - "Unsqueeze").set_input("x", loss).set_attr_vec_int32("axes", [1]) + "mul" + self._accumulated_op_id(), "Squeeze" + ).set_input("x", onehot) + + loss_all = ( + core.GEOperatorFactory.create_operator( + "loss" + self._accumulated_op_id(), + "SoftmaxCrossEntropyWithLogits", + ) + .set_input("features", logits) + .set_input("labels", squeeze) + ) + loss = ( + core.GEOperatorFactory.create_operator( + "cast" + self._accumulated_op_id(), "Cast" + ) + .set_input("x", loss_all, 0) + .set_attr_int32("dst_type", 0) + ) + loss_expand = ( + core.GEOperatorFactory.create_operator( + "unsqueeze" + self._accumulated_op_id(), "Unsqueeze" + ) + .set_input("x", loss) + .set_attr_vec_int32("axes", [1]) + ) return [label, softmax, loss_expand], [[2], [1]] class SoftMaxParser(AscendParserBase): - def __init__(self, graph, var2geop): super(SoftMaxParser, self).__init__(graph, var2geop) self.parser_name = "softmax" @@ -792,16 +922,18 @@ class SoftMaxParser(AscendParserBase): logits = self._get_ge_input(self.op.input_arg_names[0]) axes = self.op.attr("axis") - softmax = core.GEOperatorFactory.create_operator( - "softmax" + self._accumulated_op_id(), - "SoftmaxV2").set_input("x", - logits).set_attr_vec_int32("axes", [axes]) + softmax = ( + core.GEOperatorFactory.create_operator( + "softmax" + self._accumulated_op_id(), "SoftmaxV2" + ) + .set_input("x", logits) + .set_attr_vec_int32("axes", [axes]) + ) return [softmax], [[0]] ## general class ShapeParser(AscendParserBase): - def __init__(self, graph, var2geop): super(ShapeParser, self).__init__(graph, var2geop) self.parser_name = "shape" @@ -809,12 +941,12 @@ class ShapeParser(AscendParserBase): def _apply(self): x = self._get_ge_input(self.op.input_arg_names[0]) shape = core.GEOperatorFactory.create_operator( - "shape" + self._accumulated_op_id(), "Shape").set_input("x", x) + "shape" + self._accumulated_op_id(), "Shape" + ).set_input("x", x) return [shape], [[0]] class FillConstantParser(AscendParserBase): - def __init__(self, graph, var2geop): super(FillConstantParser, self).__init__(graph, var2geop) self.parser_name = "fill_constant" @@ -826,27 +958,35 @@ class FillConstantParser(AscendParserBase): tensor = self._create_ge_tensor(shape, dtype, value) const = core.GEOperatorFactory.create_operator( - "const" + self._accumulated_op_id(), - "Const").set_attr_tensor("value", tensor) + "const" + self._accumulated_op_id(), "Const" + ).set_attr_tensor("value", tensor) self._mark_as_input(const) if self.op.block.var(self.op.output('Out')[0]).persistable: - #print("%s is Persistable in fill_constant" % + # print("%s is Persistable in fill_constant" % # (self.op.output('Out')[0])) var = core.GEOperatorFactory.create_operator( - self.op.output('Out')[0], "Variable") + self.op.output('Out')[0], "Variable" + ) var.update_output_desc( "y", - core.GETensorDesc(core.GEShape(shape), core.GEFormat.FORMAT_ND, - core.GEDataType.DT_FLOAT)) - assign = core.GEOperatorFactory.create_operator( - "assign" + self._accumulated_op_id(), - "Assign").set_input("value", const).set_input("ref", var) + core.GETensorDesc( + core.GEShape(shape), + core.GEFormat.FORMAT_ND, + core.GEDataType.DT_FLOAT, + ), + ) + assign = ( + core.GEOperatorFactory.create_operator( + "assign" + self._accumulated_op_id(), "Assign" + ) + .set_input("value", const) + .set_input("ref", var) + ) return [const], [[0]] return [const], [[0]] class TruncatedNormalParser(AscendParserBase): - def __init__(self, graph, var2geop): super(TruncatedNormalParser, self).__init__(graph, var2geop) self.parser_name = "truncated_gaussian_random" @@ -860,24 +1000,24 @@ class TruncatedNormalParser(AscendParserBase): tensor1 = self._create_ge_tensor([len(shape)], 2, shape) shape_tensor = core.GEOperatorFactory.create_operator( - "const" + self._accumulated_op_id(), - "Const").set_attr_tensor("value", tensor1) + "const" + self._accumulated_op_id(), "Const" + ).set_attr_tensor("value", tensor1) tensor2 = self._create_ge_tensor([1], dtype, mean) mean_tensor = core.GEOperatorFactory.create_operator( - "const" + self._accumulated_op_id(), - "Const").set_attr_tensor("value", tensor2) + "const" + self._accumulated_op_id(), "Const" + ).set_attr_tensor("value", tensor2) tensor3 = self._create_ge_tensor([1], dtype, std) std_tensor = core.GEOperatorFactory.create_operator( - "const" + self._accumulated_op_id(), - "Const").set_attr_tensor("value", tensor3) + "const" + self._accumulated_op_id(), "Const" + ).set_attr_tensor("value", tensor3) tensor4 = self._create_ge_tensor([1], dtype, mean - 2 * std) min_tensor = core.GEOperatorFactory.create_operator( - "const" + self._accumulated_op_id(), - "Const").set_attr_tensor("value", tensor4) + "const" + self._accumulated_op_id(), "Const" + ).set_attr_tensor("value", tensor4) tensor5 = self._create_ge_tensor([1], dtype, mean + 2 * std) max_tensor = core.GEOperatorFactory.create_operator( - "const" + self._accumulated_op_id(), - "Const").set_attr_tensor("value", tensor5) + "const" + self._accumulated_op_id(), "Const" + ).set_attr_tensor("value", tensor5) self._mark_as_input(shape_tensor) self._mark_as_input(mean_tensor) @@ -885,34 +1025,50 @@ class TruncatedNormalParser(AscendParserBase): self._mark_as_input(min_tensor) self._mark_as_input(max_tensor) - truncated_normal = core.GEOperatorFactory.create_operator( - "truncated_normal" + self._accumulated_op_id(), - "ParameterizedTruncatedNormal").set_input( - "shape", - shape_tensor).set_input("means", mean_tensor).set_input( - "stdevs", - std_tensor).set_input("min", min_tensor).set_input( - "max", max_tensor).set_attr_int32("seed", 0) + truncated_normal = ( + core.GEOperatorFactory.create_operator( + "truncated_normal" + self._accumulated_op_id(), + "ParameterizedTruncatedNormal", + ) + .set_input("shape", shape_tensor) + .set_input("means", mean_tensor) + .set_input("stdevs", std_tensor) + .set_input("min", min_tensor) + .set_input("max", max_tensor) + .set_attr_int32("seed", 0) + ) ## wirte the output of truncatedNormal from startup_program to main_program if self.op.block.var(self.op.output('Out')[0]).persistable: - #print("%s is Persistable in truncated_normal" % + # print("%s is Persistable in truncated_normal" % # (self.op.output('Out')[0])) var = core.GEOperatorFactory.create_operator( - self.op.output('Out')[0], "Variable") + self.op.output('Out')[0], "Variable" + ) var.update_output_desc( "y", - core.GETensorDesc(core.GEShape(shape), core.GEFormat.FORMAT_ND, - core.GEDataType.DT_FLOAT)) - assign = core.GEOperatorFactory.create_operator( - "assign" + self._accumulated_op_id(), - "Assign").set_input("value", - truncated_normal).set_input("ref", var) + core.GETensorDesc( + core.GEShape(shape), + core.GEFormat.FORMAT_ND, + core.GEDataType.DT_FLOAT, + ), + ) + assign = ( + core.GEOperatorFactory.create_operator( + "assign" + self._accumulated_op_id(), "Assign" + ) + .set_input("value", truncated_normal) + .set_input("ref", var) + ) return [ - shape_tensor, mean_tensor, std_tensor, min_tensor, max_tensor, - truncated_normal + shape_tensor, + mean_tensor, + std_tensor, + min_tensor, + max_tensor, + truncated_normal, ], [[-1]] - #else: + # else: # print( # "self.op.output('Out')[0] is not persistable in truncated_noraml" # ) @@ -920,7 +1076,6 @@ class TruncatedNormalParser(AscendParserBase): class GatherParser(AscendParserBase): - def __init__(self, graph, var2geop): super(GatherParser, self).__init__(graph, var2geop) self.parser_name = "gather" @@ -930,16 +1085,18 @@ class GatherParser(AscendParserBase): x = self._get_ge_input(self.op.input_arg_names[1]) clo = self.op.block.var(self.op.input_arg_names[1]).shape[-1] - gather = core.GEOperatorFactory.create_operator( - "gather" + self._accumulated_op_id(), - "Gather").set_input("x", x).set_input("indices", - index).set_attr_bool( - "validate_indices", True) + gather = ( + core.GEOperatorFactory.create_operator( + "gather" + self._accumulated_op_id(), "Gather" + ) + .set_input("x", x) + .set_input("indices", index) + .set_attr_bool("validate_indices", True) + ) return [gather], [[0]] class ScatterParser(AscendParserBase): - def __init__(self, graph, var2geop): super(ScatterParser, self).__init__(graph, var2geop) self.parser_name = "scatter" @@ -952,25 +1109,35 @@ class ScatterParser(AscendParserBase): index_shape = self.op.block.var(self.op.input_arg_names[0]).shape if len(index_shape) == 1: - index = core.GEOperatorFactory.create_operator( - "unsqueeze" + self.getid(), - "Unsqueeze").set_input("x", - index).set_attr_vec_int32("axes", [1]) + index = ( + core.GEOperatorFactory.create_operator( + "unsqueeze" + self.getid(), "Unsqueeze" + ) + .set_input("x", index) + .set_attr_vec_int32("axes", [1]) + ) if not overwrite: - scatter_value = core.GEOperatorFactory.create_operator( - "scatter" + self._accumulated_op_id(), - "TensorScatterAdd").set_input("x", x).set_input( - "indices", index).set_input("updates", updates) + scatter_value = ( + core.GEOperatorFactory.create_operator( + "scatter" + self._accumulated_op_id(), "TensorScatterAdd" + ) + .set_input("x", x) + .set_input("indices", index) + .set_input("updates", updates) + ) else: - scatter_value = core.GEOperatorFactory.create_operator( - "scatter" + self._accumulated_op_id(), - "TensorScatterUpdate").set_input("x", x).set_input( - "indices", index).set_input("updates", updates) + scatter_value = ( + core.GEOperatorFactory.create_operator( + "scatter" + self._accumulated_op_id(), "TensorScatterUpdate" + ) + .set_input("x", x) + .set_input("indices", index) + .set_input("updates", updates) + ) return [x, index, updates, scatter_value], [[-1]] class CastParser(AscendParserBase): - def __init__(self, graph, var2geop): super(CastParser, self).__init__(graph, var2geop) self.parser_name = "cast" @@ -978,14 +1145,17 @@ class CastParser(AscendParserBase): def _apply(self): x = self._get_ge_input(self.op.input_arg_names[0]) dtype = self.op.attr("out_dtype") - cast = core.GEOperatorFactory.create_operator( - "cast" + self._accumulated_op_id(), - "Cast").set_input("x", x).set_attr_int32("dst_type", dtype) + cast = ( + core.GEOperatorFactory.create_operator( + "cast" + self._accumulated_op_id(), "Cast" + ) + .set_input("x", x) + .set_attr_int32("dst_type", dtype) + ) return [cast], [[0]] class AssignParser(AscendParserBase): - def __init__(self, graph, var2geop): super(AssignParser, self).__init__(graph, var2geop) self.parser_name = "assign" @@ -993,14 +1163,17 @@ class AssignParser(AscendParserBase): def _apply(self): const = self._get_ge_input(self.op.input_arg_names[0]) var = self._get_ge_input(self.op.input_arg_names[1]) - assign = core.GEOperatorFactory.create_operator( - "assign" + self._accumulated_op_id(), - "Assign").set_input("value", const).set_input("ref", var) + assign = ( + core.GEOperatorFactory.create_operator( + "assign" + self._accumulated_op_id(), "Assign" + ) + .set_input("value", const) + .set_input("ref", var) + ) return [assign], [[0]] class ScaleParser(AscendParserBase): - def __init__(self, graph, var2geop): super(ScaleParser, self).__init__(graph, var2geop) self.parser_name = "scale" @@ -1012,27 +1185,36 @@ class ScaleParser(AscendParserBase): bias_after_scale = self.op.attr("bias_after_scale") if bias_after_scale: - scale_value = core.GEOperatorFactory.create_operator( - "scale" + self._accumulated_op_id(), - "Power").set_input("x", x).set_attr_float( - "power", - 1.0).set_attr_float("scale", - scale).set_attr_float("shift", bias) + scale_value = ( + core.GEOperatorFactory.create_operator( + "scale" + self._accumulated_op_id(), "Power" + ) + .set_input("x", x) + .set_attr_float("power", 1.0) + .set_attr_float("scale", scale) + .set_attr_float("shift", bias) + ) else: - x_add_bias = core.GEOperatorFactory.create_operator( - "adds" + self._accumulated_op_id(), - "Adds").set_input("x", x).set_attr_float("value", bias) - scale_value = core.GEOperatorFactory.create_operator( - "scale" + self._accumulated_op_id(), - "Power").set_input("x", x_add_bias).set_attr_float( - "power", - 1.0).set_attr_float("scale", - scale).set_attr_float("shift", 0.0) + x_add_bias = ( + core.GEOperatorFactory.create_operator( + "adds" + self._accumulated_op_id(), "Adds" + ) + .set_input("x", x) + .set_attr_float("value", bias) + ) + scale_value = ( + core.GEOperatorFactory.create_operator( + "scale" + self._accumulated_op_id(), "Power" + ) + .set_input("x", x_add_bias) + .set_attr_float("power", 1.0) + .set_attr_float("scale", scale) + .set_attr_float("shift", 0.0) + ) return [scale_value], [[0]] class SliceParser(AscendParserBase): - def __init__(self, graph, var2geop): super(SliceParser, self).__init__(graph, var2geop) self.parser_name = "slice" @@ -1058,18 +1240,22 @@ class SliceParser(AscendParserBase): cnt += 1 size = [ends_cor[i] - starts_cor[i] for i in range(len(axes_cor))] - assert len(axes_cor) == len(starts_cor) == len( - ends_cor), "the three fields must have same size" - slice_value = core.GEOperatorFactory.create_operator( - "slice" + self._accumulated_op_id(), - "SliceD").set_input("x", x).set_attr_vec_int32( - "offsets", starts_cor).set_attr_vec_int32("size", size) + assert ( + len(axes_cor) == len(starts_cor) == len(ends_cor) + ), "the three fields must have same size" + slice_value = ( + core.GEOperatorFactory.create_operator( + "slice" + self._accumulated_op_id(), "SliceD" + ) + .set_input("x", x) + .set_attr_vec_int32("offsets", starts_cor) + .set_attr_vec_int32("size", size) + ) return [slice_value], [[0]] class ReshapeParser(AscendParserBase): - def __init__(self, graph, var2geop): super(ReshapeParser, self).__init__(graph, var2geop) self.parser_name = "reshape2" @@ -1092,21 +1278,24 @@ class ReshapeParser(AscendParserBase): x = self._get_ge_input(self.op.input_arg_names[0]) tensor = self._create_ge_tensor([len(shape)], 2, shape) const_shape = core.GEOperatorFactory.create_operator( - "shape" + self._accumulated_op_id(), - "Const").set_attr_tensor("value", tensor) - reshape = core.GEOperatorFactory.create_operator( - "reshape" + self._accumulated_op_id(), - "Reshape").set_input("x", x).set_input("shape", - const_shape).set_attr_int32( - "axis", 0) + "shape" + self._accumulated_op_id(), "Const" + ).set_attr_tensor("value", tensor) + reshape = ( + core.GEOperatorFactory.create_operator( + "reshape" + self._accumulated_op_id(), "Reshape" + ) + .set_input("x", x) + .set_input("shape", const_shape) + .set_attr_int32("axis", 0) + ) x_shape = core.GEOperatorFactory.create_operator( - "shape" + self._accumulated_op_id(), "Shape").set_input("x", x) + "shape" + self._accumulated_op_id(), "Shape" + ).set_input("x", x) return [x_shape, reshape], [[1], [0]] class TransposeParser(AscendParserBase): - def __init__(self, graph, var2geop): super(TransposeParser, self).__init__(graph, var2geop) self.parser_name = "transpose2" @@ -1114,17 +1303,21 @@ class TransposeParser(AscendParserBase): def _apply(self): x = self._get_ge_input(self.op.input_arg_names[0]) perm = self.op.attr("axis") - transpose = core.GEOperatorFactory.create_operator( - "transpose" + self._accumulated_op_id(), - "TransposeD").set_input("x", x).set_attr_vec_int32("perm", perm) + transpose = ( + core.GEOperatorFactory.create_operator( + "transpose" + self._accumulated_op_id(), "TransposeD" + ) + .set_input("x", x) + .set_attr_vec_int32("perm", perm) + ) x_shape = core.GEOperatorFactory.create_operator( - "shape" + self._accumulated_op_id(), "Shape").set_input("x", x) + "shape" + self._accumulated_op_id(), "Shape" + ).set_input("x", x) return [x_shape, transpose], [[1], [0]] class AccuracyParser(AscendParserBase): - def __init__(self, graph, var2geop): super(AccuracyParser, self).__init__(graph, var2geop) self.parser_name = "accuracy" @@ -1134,42 +1327,73 @@ class AccuracyParser(AscendParserBase): label = self._get_ge_input(self.op.input_arg_names[1]) logits = self._get_ge_input(self.op.input_arg_names[2]) - pred = core.GEOperatorFactory.create_operator( - "cast" + self._accumulated_op_id(), - "Cast").set_input("x", pred).set_attr_int32("dst_type", 3) - label = core.GEOperatorFactory.create_operator( - "cast" + self._accumulated_op_id(), - "Cast").set_input("x", label).set_attr_int32("dst_type", 3) - equal = core.GEOperatorFactory.create_operator( - "equal" + self._accumulated_op_id(), - "Equal").set_input("x1", pred).set_input("x2", label) - cast = core.GEOperatorFactory.create_operator( - "cast" + self._accumulated_op_id(), - "Cast").set_input("x", equal).set_attr_int32("dst_type", 0) - acc = core.GEOperatorFactory.create_operator( - "mean" + self._accumulated_op_id(), "ReduceMeanD").set_input( - "x", cast).set_attr_bool("keep_dims", - False).set_attr_vec_int32("axes", []) - correct = core.GEOperatorFactory.create_operator( - "sum" + self._accumulated_op_id(), "ReduceSumD").set_input( - "x", cast).set_attr_bool("keep_dims", - False).set_attr_vec_int32("axes", []) - ones_tensor = core.GEOperatorFactory.create_operator( - "oneslike" + self._accumulated_op_id(), - "OnesLike").set_input("x", label) + pred = ( + core.GEOperatorFactory.create_operator( + "cast" + self._accumulated_op_id(), "Cast" + ) + .set_input("x", pred) + .set_attr_int32("dst_type", 3) + ) + label = ( + core.GEOperatorFactory.create_operator( + "cast" + self._accumulated_op_id(), "Cast" + ) + .set_input("x", label) + .set_attr_int32("dst_type", 3) + ) + equal = ( + core.GEOperatorFactory.create_operator( + "equal" + self._accumulated_op_id(), "Equal" + ) + .set_input("x1", pred) + .set_input("x2", label) + ) + cast = ( + core.GEOperatorFactory.create_operator( + "cast" + self._accumulated_op_id(), "Cast" + ) + .set_input("x", equal) + .set_attr_int32("dst_type", 0) + ) + acc = ( + core.GEOperatorFactory.create_operator( + "mean" + self._accumulated_op_id(), "ReduceMeanD" + ) + .set_input("x", cast) + .set_attr_bool("keep_dims", False) + .set_attr_vec_int32("axes", []) + ) + correct = ( + core.GEOperatorFactory.create_operator( + "sum" + self._accumulated_op_id(), "ReduceSumD" + ) + .set_input("x", cast) + .set_attr_bool("keep_dims", False) + .set_attr_vec_int32("axes", []) + ) ones_tensor = core.GEOperatorFactory.create_operator( - "cast" + self._accumulated_op_id(), - "Cast").set_input("x", ones_tensor).set_attr_int32("dst_type", 0) - total = core.GEOperatorFactory.create_operator( - "sum" + self._accumulated_op_id(), - "ReduceSumD").set_input("x", ones_tensor).set_attr_bool( - "keep_dims", False).set_attr_vec_int32("axes", []) + "oneslike" + self._accumulated_op_id(), "OnesLike" + ).set_input("x", label) + ones_tensor = ( + core.GEOperatorFactory.create_operator( + "cast" + self._accumulated_op_id(), "Cast" + ) + .set_input("x", ones_tensor) + .set_attr_int32("dst_type", 0) + ) + total = ( + core.GEOperatorFactory.create_operator( + "sum" + self._accumulated_op_id(), "ReduceSumD" + ) + .set_input("x", ones_tensor) + .set_attr_bool("keep_dims", False) + .set_attr_vec_int32("axes", []) + ) return [acc, correct, total], [[0], [1], [2]] class TopkParser(AscendParserBase): - def __init__(self, graph, var2geop): super(TopkParser, self).__init__(graph, var2geop) self.parser_name = "top_k" @@ -1180,25 +1404,40 @@ class TopkParser(AscendParserBase): tensor = self._create_ge_tensor([1], 2, k) const_k = core.GEOperatorFactory.create_operator( - "const" + self._accumulated_op_id(), - "Const").set_attr_tensor("value", tensor) - cast_x = core.GEOperatorFactory.create_operator( - "cast" + self._accumulated_op_id(), - "Cast").set_input("x", x).set_attr_int32("dst_type", 1) - topk = core.GEOperatorFactory.create_operator( - "topk" + self._accumulated_op_id(), - "TopK").set_input("x", cast_x).set_input("k", const_k) - value = core.GEOperatorFactory.create_operator( - "cast" + self._accumulated_op_id(), - "Cast").set_input("x", topk, 0).set_attr_int32("dst_type", 0) - index = core.GEOperatorFactory.create_operator( - "cast" + self._accumulated_op_id(), - "Cast").set_input("x", topk, 1).set_attr_int32("dst_type", 0) + "const" + self._accumulated_op_id(), "Const" + ).set_attr_tensor("value", tensor) + cast_x = ( + core.GEOperatorFactory.create_operator( + "cast" + self._accumulated_op_id(), "Cast" + ) + .set_input("x", x) + .set_attr_int32("dst_type", 1) + ) + topk = ( + core.GEOperatorFactory.create_operator( + "topk" + self._accumulated_op_id(), "TopK" + ) + .set_input("x", cast_x) + .set_input("k", const_k) + ) + value = ( + core.GEOperatorFactory.create_operator( + "cast" + self._accumulated_op_id(), "Cast" + ) + .set_input("x", topk, 0) + .set_attr_int32("dst_type", 0) + ) + index = ( + core.GEOperatorFactory.create_operator( + "cast" + self._accumulated_op_id(), "Cast" + ) + .set_input("x", topk, 1) + .set_attr_int32("dst_type", 0) + ) return [value, index], [[1], [0]] class LookupTableParser(AscendParserBase): - def __init__(self, graph, var2geop): super(LookupTableParser, self).__init__(graph, var2geop) self.parser_name = "lookup_table" @@ -1207,17 +1446,24 @@ class LookupTableParser(AscendParserBase): ids = self._get_ge_input(self.op.input_arg_names[0]) w = self._get_ge_input(self.op.input_arg_names[1]) - ids_squeeze = core.GEOperatorFactory.create_operator( - "squeeze" + self._accumulated_op_id(), - "Squeeze").set_input("x", ids).set_attr_vec_int32("axes", [-1]) - out = core.GEOperatorFactory.create_operator( - "lookup" + self._accumulated_op_id(), - "Gather").set_input("x", w).set_input("indices", ids_squeeze) + ids_squeeze = ( + core.GEOperatorFactory.create_operator( + "squeeze" + self._accumulated_op_id(), "Squeeze" + ) + .set_input("x", ids) + .set_attr_vec_int32("axes", [-1]) + ) + out = ( + core.GEOperatorFactory.create_operator( + "lookup" + self._accumulated_op_id(), "Gather" + ) + .set_input("x", w) + .set_input("indices", ids_squeeze) + ) return [out], [[0]] class StackParser(AscendParserBase): - def __init__(self, graph, var2geop): super(StackParser, self).__init__(graph, var2geop) self.parser_name = "stack" @@ -1226,30 +1472,37 @@ class StackParser(AscendParserBase): tiles = len(self.op.input_arg_names) data_x_lst = [] for index in range(tiles): - data_x_lst.append(self._get_ge_input( - self.op.input_arg_names[index])) + data_x_lst.append( + self._get_ge_input(self.op.input_arg_names[index]) + ) axis = self.op.attr("axis") data_x = data_x_lst[0] tensor = self._create_ge_tensor([1], 2, axis) tensor_axis = core.GEOperatorFactory.create_operator( - "axis" + self._accumulated_op_id(), - "Const").set_attr_tensor("value", tensor) - expand = core.GEOperatorFactory.create_operator( - "expand" + self._accumulated_op_id(), - "ExpandDims").set_input("x", data_x).set_input("axis", tensor_axis) - - stack = core.GEOperatorFactory.create_operator( - "stack" + self._accumulated_op_id(), "TileWithAxis").set_input( - "x", - expand).set_attr_int32("axis", - axis).set_attr_int32("tiles", tiles) + "axis" + self._accumulated_op_id(), "Const" + ).set_attr_tensor("value", tensor) + expand = ( + core.GEOperatorFactory.create_operator( + "expand" + self._accumulated_op_id(), "ExpandDims" + ) + .set_input("x", data_x) + .set_input("axis", tensor_axis) + ) + + stack = ( + core.GEOperatorFactory.create_operator( + "stack" + self._accumulated_op_id(), "TileWithAxis" + ) + .set_input("x", expand) + .set_attr_int32("axis", axis) + .set_attr_int32("tiles", tiles) + ) return [stack], [[0]] class UnSqueezeParser(AscendParserBase): - def __init__(self, graph, var2geop): super(UnSqueezeParser, self).__init__(graph, var2geop) self.parser_name = "unsqueeze2" @@ -1258,18 +1511,21 @@ class UnSqueezeParser(AscendParserBase): x = self._get_ge_input(self.op.input_arg_names[0]) axes = self.op.attr('axes') - output = core.GEOperatorFactory.create_operator( - "unsqueeze" + self._accumulated_op_id(), - "Unsqueeze").set_input("x", x).set_attr_vec_int32("axes", axes) + output = ( + core.GEOperatorFactory.create_operator( + "unsqueeze" + self._accumulated_op_id(), "Unsqueeze" + ) + .set_input("x", x) + .set_attr_vec_int32("axes", axes) + ) shape = core.GEOperatorFactory.create_operator( - "shape" + self._accumulated_op_id(), - "Shape").set_input("x", output) + "shape" + self._accumulated_op_id(), "Shape" + ).set_input("x", output) return [shape, output], [[1], [0]] ## parallel class AllGatherParser(AscendParserBase): - def __init__(self, graph, var2geop): super(AllGatherParser, self).__init__(graph, var2geop) self.parser_name = "c_allgather" @@ -1279,15 +1535,18 @@ class AllGatherParser(AscendParserBase): rank_size = self.op.attr("rank_size") group = self.op.attr("group") - allgather = core.GEOperatorFactory.create_operator( - "allgather" + self._accumulated_op_id(), - "HcomAllGather").set_input("x", x).set_attr_int32( - "rank_size", rank_size).set_attr_string("group", group) + allgather = ( + core.GEOperatorFactory.create_operator( + "allgather" + self._accumulated_op_id(), "HcomAllGather" + ) + .set_input("x", x) + .set_attr_int32("rank_size", rank_size) + .set_attr_string("group", group) + ) return [allgather], [[0]] class AllReduceParser(AscendParserBase): - def __init__(self, graph, var2geop, reduction): super(AllReduceParser, self).__init__(graph, var2geop) self.parser_name = "c_allreduce_" + reduction @@ -1298,13 +1557,17 @@ class AllReduceParser(AscendParserBase): reduction = self.reduction ring_id = self.op.attr("ring_id") group = "hcom_group_" + str(ring_id) - fusion = None #self.op.attr("fusion") - fusion_id = None #self.op.attr("fusion_id") - - allreduce = core.GEOperatorFactory.create_operator( - "allreduce" + self._accumulated_op_id(), - "HcomAllReduce").set_input("x", x).set_attr_string( - "reduction", reduction).set_attr_string("group", group) + fusion = None # self.op.attr("fusion") + fusion_id = None # self.op.attr("fusion_id") + + allreduce = ( + core.GEOperatorFactory.create_operator( + "allreduce" + self._accumulated_op_id(), "HcomAllReduce" + ) + .set_input("x", x) + .set_attr_string("reduction", reduction) + .set_attr_string("group", group) + ) if fusion is not None: allreduce.set_attr_int32("fusion", fusion) @@ -1314,19 +1577,16 @@ class AllReduceParser(AscendParserBase): class AllReduceSumParser(AllReduceParser): - def __init__(self, graph, var2geop): super(AllReduceSumParser, self).__init__(graph, var2geop, 'sum') class AllReduceMaxParser(AllReduceParser): - def __init__(self, graph, var2geop): super(AllReduceMaxParser, self).__init__(graph, var2geop, 'max') class BroadcastParser(AscendParserBase): - def __init__(self, graph, var2geop): super(BroadcastParser, self).__init__(graph, var2geop) self.parser_name = "c_broadcast" @@ -1336,15 +1596,18 @@ class BroadcastParser(AscendParserBase): root_rank = self.op.attr("root_rank") group = self.op.attr("group") - broadcast = core.GEOperatorFactory.create_operator( - "broadcast" + self._accumulated_op_id(), - "HcomBroadcast").set_input("x", x).set_attr_int32( - "root_rank", root_rank).set_attr_string("group", group) + broadcast = ( + core.GEOperatorFactory.create_operator( + "broadcast" + self._accumulated_op_id(), "HcomBroadcast" + ) + .set_input("x", x) + .set_attr_int32("root_rank", root_rank) + .set_attr_string("group", group) + ) return [broadcast], [[0]] class ReduceScatterParser(AscendParserBase): - def __init__(self, graph, var2geop): super(ReduceScatterParser, self).__init__(graph, var2geop) self.parser_name = "c_reduce_scatter" @@ -1355,17 +1618,19 @@ class ReduceScatterParser(AscendParserBase): group = self.op.attr("group") rank_size = self.op.attr("rank_size") - reduce_scatter = core.GEOperatorFactory.create_operator( - "reducescatter" + self._accumulated_op_id(), - "HcomReduceScatter").set_input("x", x).set_attr_string( - "reduction", - reduction).set_attr_string("group", group).set_attr_int32( - "rank_size", rank_size) + reduce_scatter = ( + core.GEOperatorFactory.create_operator( + "reducescatter" + self._accumulated_op_id(), "HcomReduceScatter" + ) + .set_input("x", x) + .set_attr_string("reduction", reduction) + .set_attr_string("group", group) + .set_attr_int32("rank_size", rank_size) + ) return [reduce_scatter], [[0]] class SendParser(AscendParserBase): - def __init__(self, graph, var2geop): super(SendParser, self).__init__(graph, var2geop) self.parser_name = "c_send" @@ -1376,15 +1641,19 @@ class SendParser(AscendParserBase): dest_rank = self.op.attr("dest_rank") group = self.op.attr("group") - send = core.GEOperatorFactory.create_operator( - "send" + self._accumulated_op_id(), "HcomSend").set_input( - "x", x).set_attr_int32("sr_tag", sr_tag).set_attr_int32( - "dest_rank", dest_rank).set_attr_string("group", group) + send = ( + core.GEOperatorFactory.create_operator( + "send" + self._accumulated_op_id(), "HcomSend" + ) + .set_input("x", x) + .set_attr_int32("sr_tag", sr_tag) + .set_attr_int32("dest_rank", dest_rank) + .set_attr_string("group", group) + ) return [send], [[0]] class ReceiveParser(AscendParserBase): - def __init__(self, graph, var2geop): super(ReceiveParser, self).__init__(graph, var2geop) self.parser_name = "c_receive" @@ -1397,19 +1666,21 @@ class ReceiveParser(AscendParserBase): shape = self.op.attr("shape") dtype = self.op.attr("dtype") - receive = core.GEOperatorFactory.create_operator( - "receive" + self._accumulated_op_id(), - "HcomReceive").set_input("x", x).set_attr_int32( - "sr_tag", - sr_tag).set_attr_int32("src_rank", src_rank).set_attr_string( - "group", group).set_attr_vec_int32("shape", - shape).set_attr_int32( - "dtype", dtype) + receive = ( + core.GEOperatorFactory.create_operator( + "receive" + self._accumulated_op_id(), "HcomReceive" + ) + .set_input("x", x) + .set_attr_int32("sr_tag", sr_tag) + .set_attr_int32("src_rank", src_rank) + .set_attr_string("group", group) + .set_attr_vec_int32("shape", shape) + .set_attr_int32("dtype", dtype) + ) return [receive], [[0]] class RangeParser(AscendParserBase): - def __init__(self, graph, var2geop): super(RangeParser, self).__init__(graph, var2geop) self.parser_name = "range" @@ -1420,17 +1691,19 @@ class RangeParser(AscendParserBase): end = self._get_ge_input(self.op.input_arg_names[1]) delta = self._get_ge_input(self.op.input_arg_names[2]) - ge_range = core.GEOperatorFactory.create_operator( - "range" + self._accumulated_op_id(), "Range")\ - .set_input("start", end)\ - .set_input("limit", start) \ - .set_input("delta", delta) + ge_range = ( + core.GEOperatorFactory.create_operator( + "range" + self._accumulated_op_id(), "Range" + ) + .set_input("start", end) + .set_input("limit", start) + .set_input("delta", delta) + ) return [ge_range], [[0]] class UniformRandomParser(AscendParserBase): - def __init__(self, graph, var2geop): super(UniformRandomParser, self).__init__(graph, var2geop) self.parser_name = "uniform_random" @@ -1442,35 +1715,42 @@ class UniformRandomParser(AscendParserBase): max_v = self.op.attr("max") seed = self.op.attr("seed") dtype = self.op.attr("dtype") - assert max_v > min_v, "assert max_v > min_v, but received " + \ - "as max_v={}, min_v={} ".format(max_v, min_v) + assert max_v > min_v, ( + "assert max_v > min_v, but received " + + "as max_v={}, min_v={} ".format(max_v, min_v) + ) tensor1 = self._create_ge_tensor([len(shape)], 2, shape) shape_tensor = core.GEOperatorFactory.create_operator( - "const" + self._accumulated_op_id(), - "Const").set_attr_tensor("value", tensor1) - - ge_ur = core.GEOperatorFactory.create_operator( - "uniform_random" + self._accumulated_op_id(), "RandomUniform")\ - .set_input("shape", shape_tensor)\ - .set_attr_dtype("dtype", self.ascend_helper.dtype2ge(dtype)) \ - .set_attr_int32("seed", seed)\ + "const" + self._accumulated_op_id(), "Const" + ).set_attr_tensor("value", tensor1) + + ge_ur = ( + core.GEOperatorFactory.create_operator( + "uniform_random" + self._accumulated_op_id(), "RandomUniform" + ) + .set_input("shape", shape_tensor) + .set_attr_dtype("dtype", self.ascend_helper.dtype2ge(dtype)) + .set_attr_int32("seed", seed) .set_attr_int32("seed2", seed) + ) scale = max_v - min_v - scale_value = core.GEOperatorFactory.create_operator( - "scale" + self._accumulated_op_id(), - "Power").set_input("x", ge_ur).set_attr_float( - "power", - 1.0).set_attr_float("scale", - scale).set_attr_float("shift", min_v) + scale_value = ( + core.GEOperatorFactory.create_operator( + "scale" + self._accumulated_op_id(), "Power" + ) + .set_input("x", ge_ur) + .set_attr_float("power", 1.0) + .set_attr_float("scale", scale) + .set_attr_float("shift", min_v) + ) return [scale_value], [[0]] class EqualParser(AscendParserBase): - def __init__(self, graph, var2geop): super(EqualParser, self).__init__(graph, var2geop) self.parser_name = "equal" @@ -1478,15 +1758,17 @@ class EqualParser(AscendParserBase): def _apply(self): data_x1 = self._get_ge_input(self.op.input_arg_names[0]) data_x2 = self._get_ge_input(self.op.input_arg_names[1]) - equal = core.GEOperatorFactory.create_operator("equal" \ - + self._accumulated_op_id(), "Equal")\ - .set_input("x1", data_x1)\ - .set_input("x2", data_x2) + equal = ( + core.GEOperatorFactory.create_operator( + "equal" + self._accumulated_op_id(), "Equal" + ) + .set_input("x1", data_x1) + .set_input("x2", data_x2) + ) return [equal], [[0]] class ExpandParser(AscendParserBase): - def __init__(self, graph, var2geop): super(ExpandParser, self).__init__(graph, var2geop) self.parser_name = "expand" @@ -1496,19 +1778,21 @@ class ExpandParser(AscendParserBase): expand_times = self.op.attr('expand_times') tensor = self._create_ge_tensor([len(expand_times)], 2, expand_times) - expand_tensor = core.GEOperatorFactory.\ - create_operator("const" + self._accumulated_op_id(), "Const")\ - .set_attr_tensor("value", tensor) - - assign = core.GEOperatorFactory\ - .create_operator("tile" + self._accumulated_op_id(), "Tile")\ - .set_input("x", data_x1_shape)\ - .set_input("multiples", expand_tensor) + expand_tensor = core.GEOperatorFactory.create_operator( + "const" + self._accumulated_op_id(), "Const" + ).set_attr_tensor("value", tensor) + + assign = ( + core.GEOperatorFactory.create_operator( + "tile" + self._accumulated_op_id(), "Tile" + ) + .set_input("x", data_x1_shape) + .set_input("multiples", expand_tensor) + ) return [assign], [[0]] class SqueezeParser(AscendParserBase): - def __init__(self, graph, var2geop): super(SqueezeParser, self).__init__(graph, var2geop) self.parser_name = "squeeze2" @@ -1517,26 +1801,28 @@ class SqueezeParser(AscendParserBase): tensor = self._get_ge_input(self.op.input_arg_names[0]) axes = self.op.attr("axes") - data_squeezed = core.GEOperatorFactory\ - .create_operator("squeeze" + self._accumulated_op_id(), "Squeeze")\ - .set_input("x", tensor)\ - .set_attr_vec_int32("axes", axes) + data_squeezed = ( + core.GEOperatorFactory.create_operator( + "squeeze" + self._accumulated_op_id(), "Squeeze" + ) + .set_input("x", tensor) + .set_attr_vec_int32("axes", axes) + ) shape = core.GEOperatorFactory.create_operator( - "shape" + self._accumulated_op_id(), - "Shape").set_input("x", data_squeezed) + "shape" + self._accumulated_op_id(), "Shape" + ).set_input("x", data_squeezed) return [shape, data_squeezed], [[1], [0]] -#****************************************************************# -#*************************** *************************# -#*************************** *************************# -#*************************** GradParser *************************# -#*************************** *************************# -#*************************** *************************# -#****************************************************************# +# ****************************************************************# +# *************************** *************************# +# *************************** *************************# +# *************************** GradParser *************************# +# *************************** *************************# +# *************************** *************************# +# ****************************************************************# ## grad class ReduceSumGradParser(AscendParserBase): - def __init__(self, graph, var2geop): super(ReduceSumGradParser, self).__init__(graph, var2geop) self.parser_name = "reduce_sum_grad" @@ -1546,24 +1832,27 @@ class ReduceSumGradParser(AscendParserBase): input = self._get_ge_input(self.op.input_arg_names[1]) shape_tensor = core.GEOperatorFactory.create_operator( - "shape" + self._accumulated_op_id(), - "Shape").set_input("x", input, 0) + "shape" + self._accumulated_op_id(), "Shape" + ).set_input("x", input, 0) tensoron = self._create_ge_tensor([1], 2, -1) const = core.GEOperatorFactory.create_operator( - "const" + self._accumulated_op_id(), - "Const").set_attr_tensor("value", tensoron) + "const" + self._accumulated_op_id(), "Const" + ).set_attr_tensor("value", tensoron) self._mark_as_input(const) - reduce_sum = core.GEOperatorFactory.create_operator( - "broadcast_to_d" + self._accumulated_op_id(), - "BroadcastTo").set_input("x", x).set_input("shape", shape_tensor) - #reduce_sum = core.GEOperatorFactory.create_operator("expand" + self._accumulated_op_id(), "ExpandDims").set_input("x", reduce_sum).set_input("axis", const) + reduce_sum = ( + core.GEOperatorFactory.create_operator( + "broadcast_to_d" + self._accumulated_op_id(), "BroadcastTo" + ) + .set_input("x", x) + .set_input("shape", shape_tensor) + ) + # reduce_sum = core.GEOperatorFactory.create_operator("expand" + self._accumulated_op_id(), "ExpandDims").set_input("x", reduce_sum).set_input("axis", const) return [reduce_sum], [[0]] class MatMulGradParser(AscendParserBase): - def __init__(self, graph, var2geop): super(MatMulGradParser, self).__init__(graph, var2geop) self.parser_name = "matmul_grad" @@ -1581,63 +1870,91 @@ class MatMulGradParser(AscendParserBase): if len(x_shape) > 2: if transpose_y: - x_grad = core.GEOperatorFactory.create_operator( - self.parser_name + self._accumulated_op_id(), - "BatchMatMul").set_input("x1", out_grad).set_input( - "x2", - y).set_attr_bool("adj_x1", - False).set_attr_bool("adj_x2", False) - y_grad = core.GEOperatorFactory.create_operator( - self.parser_name + self._accumulated_op_id(), - "BatchMatMul").set_input("x1", out_grad).set_input( - "x2", - x).set_attr_bool("adj_x1", - True).set_attr_bool("adj_x2", False) + x_grad = ( + core.GEOperatorFactory.create_operator( + self.parser_name + self._accumulated_op_id(), + "BatchMatMul", + ) + .set_input("x1", out_grad) + .set_input("x2", y) + .set_attr_bool("adj_x1", False) + .set_attr_bool("adj_x2", False) + ) + y_grad = ( + core.GEOperatorFactory.create_operator( + self.parser_name + self._accumulated_op_id(), + "BatchMatMul", + ) + .set_input("x1", out_grad) + .set_input("x2", x) + .set_attr_bool("adj_x1", True) + .set_attr_bool("adj_x2", False) + ) else: - x_grad = core.GEOperatorFactory.create_operator( - self.parser_name + self._accumulated_op_id(), - "BatchMatMul").set_input("x1", out_grad).set_input( - "x2", - y).set_attr_bool("adj_x1", - False).set_attr_bool("adj_x2", True) - y_grad = core.GEOperatorFactory.create_operator( - self.parser_name + self._accumulated_op_id(), - "BatchMatMul").set_input( - "x1", x).set_input("x2", out_grad).set_attr_bool( - "adj_x1", True).set_attr_bool("adj_x2", False) + x_grad = ( + core.GEOperatorFactory.create_operator( + self.parser_name + self._accumulated_op_id(), + "BatchMatMul", + ) + .set_input("x1", out_grad) + .set_input("x2", y) + .set_attr_bool("adj_x1", False) + .set_attr_bool("adj_x2", True) + ) + y_grad = ( + core.GEOperatorFactory.create_operator( + self.parser_name + self._accumulated_op_id(), + "BatchMatMul", + ) + .set_input("x1", x) + .set_input("x2", out_grad) + .set_attr_bool("adj_x1", True) + .set_attr_bool("adj_x2", False) + ) else: if transpose_y: - x_grad = core.GEOperatorFactory.create_operator( - self.parser_name + self._accumulated_op_id(), - "MatMul").set_input("x1", out_grad).set_input( - "x2", y).set_attr_bool("transpose_x1", - False).set_attr_bool( - "transpose_x2", False) - y_grad = core.GEOperatorFactory.create_operator( - self.parser_name + self._accumulated_op_id(), - "MatMul").set_input("x1", out_grad).set_input( - "x2", x).set_attr_bool("transpose_x1", - True).set_attr_bool( - "transpose_x2", False) + x_grad = ( + core.GEOperatorFactory.create_operator( + self.parser_name + self._accumulated_op_id(), "MatMul" + ) + .set_input("x1", out_grad) + .set_input("x2", y) + .set_attr_bool("transpose_x1", False) + .set_attr_bool("transpose_x2", False) + ) + y_grad = ( + core.GEOperatorFactory.create_operator( + self.parser_name + self._accumulated_op_id(), "MatMul" + ) + .set_input("x1", out_grad) + .set_input("x2", x) + .set_attr_bool("transpose_x1", True) + .set_attr_bool("transpose_x2", False) + ) else: - x_grad = core.GEOperatorFactory.create_operator( - self.parser_name + self._accumulated_op_id(), - "MatMul").set_input("x1", out_grad).set_input( - "x2", y).set_attr_bool("transpose_x1", - False).set_attr_bool( - "transpose_x2", True) - y_grad = core.GEOperatorFactory.create_operator( - self.parser_name + self._accumulated_op_id(), - "MatMul").set_input("x1", x).set_input( - "x2", out_grad).set_attr_bool("transpose_x1", - True).set_attr_bool( - "transpose_x2", False) + x_grad = ( + core.GEOperatorFactory.create_operator( + self.parser_name + self._accumulated_op_id(), "MatMul" + ) + .set_input("x1", out_grad) + .set_input("x2", y) + .set_attr_bool("transpose_x1", False) + .set_attr_bool("transpose_x2", True) + ) + y_grad = ( + core.GEOperatorFactory.create_operator( + self.parser_name + self._accumulated_op_id(), "MatMul" + ) + .set_input("x1", x) + .set_input("x2", out_grad) + .set_attr_bool("transpose_x1", True) + .set_attr_bool("transpose_x2", False) + ) return [x_grad, y_grad], [[0], [1]] class MulGradParser(AscendParserBase): - def __init__(self, graph, var2geop): super(MulGradParser, self).__init__(graph, var2geop) self.parser_name = "mul_grad" @@ -1655,78 +1972,114 @@ class MulGradParser(AscendParserBase): if x_num_col_dims == 1 and y_num_col_dims == 1: if len(shape_x) == 2 and len(shape_y) == 2: - x_grad = core.GEOperatorFactory.create_operator( - self.parser_name + self._accumulated_op_id(), - "MatMul").set_input("x1", out_grad).set_input( - "x2", y).set_attr_bool("transpose_x1", - False).set_attr_bool( - "transpose_x2", True) - y_grad = core.GEOperatorFactory.create_operator( - self.parser_name + self._accumulated_op_id(), - "MatMul").set_input("x1", x).set_input( - "x2", out_grad).set_attr_bool("transpose_x1", - True).set_attr_bool( - "transpose_x2", False) + x_grad = ( + core.GEOperatorFactory.create_operator( + self.parser_name + self._accumulated_op_id(), "MatMul" + ) + .set_input("x1", out_grad) + .set_input("x2", y) + .set_attr_bool("transpose_x1", False) + .set_attr_bool("transpose_x2", True) + ) + y_grad = ( + core.GEOperatorFactory.create_operator( + self.parser_name + self._accumulated_op_id(), "MatMul" + ) + .set_input("x1", x) + .set_input("x2", out_grad) + .set_attr_bool("transpose_x1", True) + .set_attr_bool("transpose_x2", False) + ) elif len(shape_x) == 3 and len(shape_y) == 2: flatten_x = core.GEOperatorFactory.create_operator( - "flatten" + self._accumulated_op_id(), - "Flatten").set_input("x", x) - x_grad = core.GEOperatorFactory.create_operator( - self.parser_name + self._accumulated_op_id(), - "MatMul").set_input("x1", out_grad).set_input( - "x2", y).set_attr_bool("transpose_x1", - False).set_attr_bool( - "transpose_x2", True) + "flatten" + self._accumulated_op_id(), "Flatten" + ).set_input("x", x) + x_grad = ( + core.GEOperatorFactory.create_operator( + self.parser_name + self._accumulated_op_id(), "MatMul" + ) + .set_input("x1", out_grad) + .set_input("x2", y) + .set_attr_bool("transpose_x1", False) + .set_attr_bool("transpose_x2", True) + ) if len(shape_out_grad) == 2: - x_grad = core.GEOperatorFactory.create_operator( - "unsqueeze" + self._accumulated_op_id(), - "Unsqueeze").set_input("x", x_grad).set_attr_vec_int32( - "axes", [1]) - - y_grad = core.GEOperatorFactory.create_operator( - self.parser_name + self._accumulated_op_id(), - "MatMul").set_input("x1", flatten_x).set_input( - "x2", out_grad).set_attr_bool("transpose_x1", - True).set_attr_bool( - "transpose_x2", False) + x_grad = ( + core.GEOperatorFactory.create_operator( + "unsqueeze" + self._accumulated_op_id(), "Unsqueeze" + ) + .set_input("x", x_grad) + .set_attr_vec_int32("axes", [1]) + ) + + y_grad = ( + core.GEOperatorFactory.create_operator( + self.parser_name + self._accumulated_op_id(), "MatMul" + ) + .set_input("x1", flatten_x) + .set_input("x2", out_grad) + .set_attr_bool("transpose_x1", True) + .set_attr_bool("transpose_x2", False) + ) else: if len(shape_x) == 3 and len(shape_y) == 2: assert x_num_col_dims == 2, "only support 2" - flatten_x = core.GEOperatorFactory.create_operator( - "flatten" + self._accumulated_op_id(), - "FlattenV2").set_input("x", x).set_attr_int32( - "axis", 0).set_attr_int32("end_axis", 1) - flatten_out_grad = core.GEOperatorFactory.create_operator( - "flatten" + self._accumulated_op_id(), - "FlattenV2").set_input("x", out_grad).set_attr_int32( - "axis", 0).set_attr_int32("end_axis", 1) - - y_unsqueeze = core.GEOperatorFactory.create_operator( - "unsqueeze" + self._accumulated_op_id(), - "Unsqueeze").set_input("x", - y).set_attr_vec_int32("axes", [0]) - y_stack = core.GEOperatorFactory.create_operator( - "stack" + self._accumulated_op_id(), - "TileWithAxis").set_input("x", y_unsqueeze).set_attr_int32( - "axis", 0).set_attr_int32("tiles", shape_out_grad[0]) - x_grad = core.GEOperatorFactory.create_operator( - self.parser_name + self._accumulated_op_id(), - "BatchMatMul").set_input("x1", out_grad).set_input( - "x2", y_stack).set_attr_bool("adj_x1", - False).set_attr_bool( - "adj_x2", True) - y_grad = core.GEOperatorFactory.create_operator( - self.parser_name + self._accumulated_op_id(), - "MatMul").set_input("x1", flatten_x).set_input( - "x2", flatten_out_grad).set_attr_bool( - "transpose_x1", - True).set_attr_bool("transpose_x2", False) + flatten_x = ( + core.GEOperatorFactory.create_operator( + "flatten" + self._accumulated_op_id(), "FlattenV2" + ) + .set_input("x", x) + .set_attr_int32("axis", 0) + .set_attr_int32("end_axis", 1) + ) + flatten_out_grad = ( + core.GEOperatorFactory.create_operator( + "flatten" + self._accumulated_op_id(), "FlattenV2" + ) + .set_input("x", out_grad) + .set_attr_int32("axis", 0) + .set_attr_int32("end_axis", 1) + ) + + y_unsqueeze = ( + core.GEOperatorFactory.create_operator( + "unsqueeze" + self._accumulated_op_id(), "Unsqueeze" + ) + .set_input("x", y) + .set_attr_vec_int32("axes", [0]) + ) + y_stack = ( + core.GEOperatorFactory.create_operator( + "stack" + self._accumulated_op_id(), "TileWithAxis" + ) + .set_input("x", y_unsqueeze) + .set_attr_int32("axis", 0) + .set_attr_int32("tiles", shape_out_grad[0]) + ) + x_grad = ( + core.GEOperatorFactory.create_operator( + self.parser_name + self._accumulated_op_id(), + "BatchMatMul", + ) + .set_input("x1", out_grad) + .set_input("x2", y_stack) + .set_attr_bool("adj_x1", False) + .set_attr_bool("adj_x2", True) + ) + y_grad = ( + core.GEOperatorFactory.create_operator( + self.parser_name + self._accumulated_op_id(), "MatMul" + ) + .set_input("x1", flatten_x) + .set_input("x2", flatten_out_grad) + .set_attr_bool("transpose_x1", True) + .set_attr_bool("transpose_x2", False) + ) return [x_grad, y_grad], [[0], [1]] class ReluGradParser(AscendParserBase): - def __init__(self, graph, var2geop): super(ReluGradParser, self).__init__(graph, var2geop) self.parser_name = "relu_grad" @@ -1734,15 +2087,17 @@ class ReluGradParser(AscendParserBase): def _apply(self): out = self._get_ge_input(self.op.input_arg_names[0]) out_grad = self._get_ge_input(self.op.input_arg_names[1]) - relu_grad = core.GEOperatorFactory.create_operator( - self.parser_name + self._accumulated_op_id(), - "ReluGrad").set_input("gradients", - out_grad).set_input("features", out) + relu_grad = ( + core.GEOperatorFactory.create_operator( + self.parser_name + self._accumulated_op_id(), "ReluGrad" + ) + .set_input("gradients", out_grad) + .set_input("features", out) + ) return [relu_grad], [[0]] class SoftmaxWithCrossEntropyGradParser(AscendParserBase): - def __init__(self, graph, var2geop): super(SoftmaxWithCrossEntropyGradParser, self).__init__(graph, var2geop) self.parser_name = "softmax_with_cross_entropy_grad" @@ -1759,39 +2114,53 @@ class SoftmaxWithCrossEntropyGradParser(AscendParserBase): tensoron = self._create_ge_tensor([1], 5, 1) on = core.GEOperatorFactory.create_operator( - "const" + self._accumulated_op_id(), - "Const").set_attr_tensor("value", tensoron) + "const" + self._accumulated_op_id(), "Const" + ).set_attr_tensor("value", tensoron) tensoroff = self._create_ge_tensor([1], 5, 0) off = core.GEOperatorFactory.create_operator( - "const" + self._accumulated_op_id(), - "Const").set_attr_tensor("value", tensoroff) + "const" + self._accumulated_op_id(), "Const" + ).set_attr_tensor("value", tensoroff) self._mark_as_input(on) self._mark_as_input(off) - label = core.GEOperatorFactory.create_operator( - "cast" + self._accumulated_op_id(), - "Cast").set_input("x", label).set_attr_int32("dst_type", 3) - onehot = core.GEOperatorFactory.create_operator( - "onehot" + self._accumulated_op_id(), - "OneHotD").set_input("x", - label).set_input("on_value", on).set_input( - "off_value", - off).set_attr_int32("depth", cls_num) + label = ( + core.GEOperatorFactory.create_operator( + "cast" + self._accumulated_op_id(), "Cast" + ) + .set_input("x", label) + .set_attr_int32("dst_type", 3) + ) + onehot = ( + core.GEOperatorFactory.create_operator( + "onehot" + self._accumulated_op_id(), "OneHotD" + ) + .set_input("x", label) + .set_input("on_value", on) + .set_input("off_value", off) + .set_attr_int32("depth", cls_num) + ) squeeze = core.GEOperatorFactory.create_operator( - "suqeeze" + self._accumulated_op_id(), - "Squeeze").set_input("x", onehot) - sub = core.GEOperatorFactory.create_operator( - "sub" + self._accumulated_op_id(), - "Sub").set_input("x1", softmax).set_input("x2", squeeze) - grad = core.GEOperatorFactory.create_operator( - "mul" + self._accumulated_op_id(), - "Mul").set_input("x1", loss_grad).set_input("x2", sub) + "suqeeze" + self._accumulated_op_id(), "Squeeze" + ).set_input("x", onehot) + sub = ( + core.GEOperatorFactory.create_operator( + "sub" + self._accumulated_op_id(), "Sub" + ) + .set_input("x1", softmax) + .set_input("x2", squeeze) + ) + grad = ( + core.GEOperatorFactory.create_operator( + "mul" + self._accumulated_op_id(), "Mul" + ) + .set_input("x1", loss_grad) + .set_input("x2", sub) + ) return [on, off, label, onehot, grad], [[-1]] class DotMulGradParser(AscendParserBase): - def __init__(self, graph, var2geop): super(DotMulGradParser, self).__init__(graph, var2geop) self.parser_name = "elementwise_mul_grad" @@ -1801,18 +2170,25 @@ class DotMulGradParser(AscendParserBase): out_1 = self._get_ge_input(self.op.input_arg_names[1]) out_2 = self._get_ge_input(self.op.input_arg_names[2]) - x_grad = core.GEOperatorFactory.create_operator( - self.parser_name + self._accumulated_op_id(), - "Mul").set_input("x1", out_grad).set_input("x2", out_2) - y_grad = core.GEOperatorFactory.create_operator( - self.parser_name + self._accumulated_op_id(), - "Mul").set_input("x1", out_1).set_input("x2", out_grad) + x_grad = ( + core.GEOperatorFactory.create_operator( + self.parser_name + self._accumulated_op_id(), "Mul" + ) + .set_input("x1", out_grad) + .set_input("x2", out_2) + ) + y_grad = ( + core.GEOperatorFactory.create_operator( + self.parser_name + self._accumulated_op_id(), "Mul" + ) + .set_input("x1", out_1) + .set_input("x2", out_grad) + ) return [x_grad, y_grad], [[0], [1]] class DotAddGradParser(AscendParserBase): - def __init__(self, graph, var2geop): super(DotAddGradParser, self).__init__(graph, var2geop) self.parser_name = "elementwise_add_grad" @@ -1828,36 +2204,53 @@ class DotAddGradParser(AscendParserBase): x_grad = out_grad cur_time_x = len(out_grad_shape) - len(out_1_shape) for i in range(cur_time_x): - x_grad = core.GEOperatorFactory.create_operator( - self.parser_name + self._accumulated_op_id(), - "ReduceSumD").set_input("x", x_grad).set_attr_vec_int32( - "axes", [0]).set_attr_bool("keep_dims", False) + x_grad = ( + core.GEOperatorFactory.create_operator( + self.parser_name + self._accumulated_op_id(), "ReduceSumD" + ) + .set_input("x", x_grad) + .set_attr_vec_int32("axes", [0]) + .set_attr_bool("keep_dims", False) + ) for axis, size in enumerate(out_1_shape): if size == 1: - x_grad = core.GEOperatorFactory.create_operator( - self.parser_name + self._accumulated_op_id(), - "ReduceSumD").set_input("x", x_grad).set_attr_vec_int32( - "axes", [axis]).set_attr_bool("keep_dims", True) + x_grad = ( + core.GEOperatorFactory.create_operator( + self.parser_name + self._accumulated_op_id(), + "ReduceSumD", + ) + .set_input("x", x_grad) + .set_attr_vec_int32("axes", [axis]) + .set_attr_bool("keep_dims", True) + ) y_grad = out_grad cur_time_y = len(out_grad_shape) - len(out_2_shape) for i in range(cur_time_y): - y_grad = core.GEOperatorFactory.create_operator( - self.parser_name + self._accumulated_op_id(), - "ReduceSumD").set_input("x", y_grad).set_attr_vec_int32( - "axes", [0]).set_attr_bool("keep_dims", False) + y_grad = ( + core.GEOperatorFactory.create_operator( + self.parser_name + self._accumulated_op_id(), "ReduceSumD" + ) + .set_input("x", y_grad) + .set_attr_vec_int32("axes", [0]) + .set_attr_bool("keep_dims", False) + ) for axis, size in enumerate(out_2_shape): if size == 1: - y_grad = core.GEOperatorFactory.create_operator( - self.parser_name + self._accumulated_op_id(), - "ReduceSumD").set_input("x", y_grad).set_attr_vec_int32( - "axes", [axis]).set_attr_bool("keep_dims", True) + y_grad = ( + core.GEOperatorFactory.create_operator( + self.parser_name + self._accumulated_op_id(), + "ReduceSumD", + ) + .set_input("x", y_grad) + .set_attr_vec_int32("axes", [axis]) + .set_attr_bool("keep_dims", True) + ) return [x_grad, y_grad], [[0], [1]] class DotDivGradParser(AscendParserBase): - def __init__(self, graph, var2geop): super(DotDivGradParser, self).__init__(graph, var2geop) self.parser_name = "elementwise_div_grad" @@ -1868,41 +2261,68 @@ class DotDivGradParser(AscendParserBase): x = self._get_ge_input(self.op.input_arg_names[2]) y = self._get_ge_input(self.op.input_arg_names[3]) - y_power = core.GEOperatorFactory.create_operator( - "power" + self._accumulated_op_id(), - "Power").set_input("x", y).set_attr_float("power", -1) + y_power = ( + core.GEOperatorFactory.create_operator( + "power" + self._accumulated_op_id(), "Power" + ) + .set_input("x", y) + .set_attr_float("power", -1) + ) tensor_zeros = core.GEOperatorFactory.create_operator( - "zeroslike" + self._accumulated_op_id(), - "ZerosLike").set_input("x", x) - x_zero = core.GEOperatorFactory.create_operator( - "equal" + self._accumulated_op_id(), - "Equal").set_input("x1", x).set_input("x2", tensor_zeros) + "zeroslike" + self._accumulated_op_id(), "ZerosLike" + ).set_input("x", x) + x_zero = ( + core.GEOperatorFactory.create_operator( + "equal" + self._accumulated_op_id(), "Equal" + ) + .set_input("x1", x) + .set_input("x2", tensor_zeros) + ) x_nozero = core.GEOperatorFactory.create_operator( - "logical_not" + self._accumulated_op_id(), - "LogicalNot").set_input("x", x_zero) - x_nozero_f = core.GEOperatorFactory.create_operator( - "cast" + self._accumulated_op_id(), - "Cast").set_input("x", x_nozero).set_attr_int32("dst_type", 0) - x_grad_w = core.GEOperatorFactory.create_operator( - "mul" + self._accumulated_op_id(), - "Mul").set_input("x1", x_nozero_f).set_input("x2", y_power) - x_grad = core.GEOperatorFactory.create_operator( - self.parser_name + self._accumulated_op_id(), - "Mul").set_input("x1", x_grad_w).set_input("x2", out_grad) - - y_grad_w = core.GEOperatorFactory.create_operator( - "mul" + self._accumulated_op_id(), - "Mul").set_input("x1", out).set_input("x2", y_power) - y_grad = core.GEOperatorFactory.create_operator( - "mul" + self._accumulated_op_id(), - "Mul").set_input("x1", y_grad_w).set_input("x2", out_grad) + "logical_not" + self._accumulated_op_id(), "LogicalNot" + ).set_input("x", x_zero) + x_nozero_f = ( + core.GEOperatorFactory.create_operator( + "cast" + self._accumulated_op_id(), "Cast" + ) + .set_input("x", x_nozero) + .set_attr_int32("dst_type", 0) + ) + x_grad_w = ( + core.GEOperatorFactory.create_operator( + "mul" + self._accumulated_op_id(), "Mul" + ) + .set_input("x1", x_nozero_f) + .set_input("x2", y_power) + ) + x_grad = ( + core.GEOperatorFactory.create_operator( + self.parser_name + self._accumulated_op_id(), "Mul" + ) + .set_input("x1", x_grad_w) + .set_input("x2", out_grad) + ) + + y_grad_w = ( + core.GEOperatorFactory.create_operator( + "mul" + self._accumulated_op_id(), "Mul" + ) + .set_input("x1", out) + .set_input("x2", y_power) + ) + y_grad = ( + core.GEOperatorFactory.create_operator( + "mul" + self._accumulated_op_id(), "Mul" + ) + .set_input("x1", y_grad_w) + .set_input("x2", out_grad) + ) return [x_grad, y_grad], [[0], [1]] class SoftmaxGradParser(AscendParserBase): - def __init__(self, graph, var2geop): super(SoftmaxGradParser, self).__init__(graph, var2geop) self.parser_name = "softmax_grad" @@ -1911,15 +2331,17 @@ class SoftmaxGradParser(AscendParserBase): out = self._get_ge_input(self.op.input_arg_names[0]) out_grad = self._get_ge_input(self.op.input_arg_names[1]) - x_grad = core.GEOperatorFactory.create_operator( - self.parser_name + self._accumulated_op_id(), - "SoftmaxGrad").set_input("softmax", - out).set_input("grad_softmax", out_grad) + x_grad = ( + core.GEOperatorFactory.create_operator( + self.parser_name + self._accumulated_op_id(), "SoftmaxGrad" + ) + .set_input("softmax", out) + .set_input("grad_softmax", out_grad) + ) return [x_grad], [[0]] class ReshapeGradParser(AscendParserBase): - def __init__(self, graph, var2geop): super(ReshapeGradParser, self).__init__(graph, var2geop) self.parser_name = "reshape2_grad" @@ -1931,20 +2353,24 @@ class ReshapeGradParser(AscendParserBase): if x_shape_list[0] == 0: x_shape_delzero = x_shape_list[1:] - tensor = self._create_ge_tensor([len(x_shape_delzero)], 2, - x_shape_delzero) + tensor = self._create_ge_tensor( + [len(x_shape_delzero)], 2, x_shape_delzero + ) const_shape = core.GEOperatorFactory.create_operator( - "shape" + self._accumulated_op_id(), - "Const").set_attr_tensor("value", tensor) - x_grad = core.GEOperatorFactory.create_operator( - "reshape" + self._accumulated_op_id(), - "Reshape").set_input("x", out_grad).set_input("shape", const_shape) + "shape" + self._accumulated_op_id(), "Const" + ).set_attr_tensor("value", tensor) + x_grad = ( + core.GEOperatorFactory.create_operator( + "reshape" + self._accumulated_op_id(), "Reshape" + ) + .set_input("x", out_grad) + .set_input("shape", const_shape) + ) return [x_grad], [[0]] class GatherGradParser(AscendParserBase): - def __init__(self, graph, var2geop): super(GatherGradParser, self).__init__(graph, var2geop) self.parser_name = "gather_grad" @@ -1959,24 +2385,30 @@ class GatherGradParser(AscendParserBase): x_shape = self.op.block.var(self.op.input_arg_names[2]).shape if len(index_shape) == 1: - index = core.GEOperatorFactory.create_operator( - "unsqueeze" + self._accumulated_op_id(), - "Unsqueeze").set_input("x", - index).set_attr_vec_int32("axes", [1]) + index = ( + core.GEOperatorFactory.create_operator( + "unsqueeze" + self._accumulated_op_id(), "Unsqueeze" + ) + .set_input("x", index) + .set_attr_vec_int32("axes", [1]) + ) tensor_zeros = core.GEOperatorFactory.create_operator( - "zeroslike" + self._accumulated_op_id(), - "ZerosLike").set_input("x", x) - x_grad = core.GEOperatorFactory.create_operator( - "scatter" + self._accumulated_op_id(), - "TensorScatterUpdate").set_input("x", tensor_zeros).set_input( - "indices", index).set_input("updates", out_grad) + "zeroslike" + self._accumulated_op_id(), "ZerosLike" + ).set_input("x", x) + x_grad = ( + core.GEOperatorFactory.create_operator( + "scatter" + self._accumulated_op_id(), "TensorScatterUpdate" + ) + .set_input("x", tensor_zeros) + .set_input("indices", index) + .set_input("updates", out_grad) + ) return [tensor_zeros, x_grad], [[-1]] class TransposeGradParser(AscendParserBase): - def __init__(self, graph, var2geop): super(TransposeGradParser, self).__init__(graph, var2geop) self.parser_name = "transpose2_grad" @@ -1990,16 +2422,18 @@ class TransposeGradParser(AscendParserBase): out_grad_shape = self.op.block.var(self.op.input_arg_names[0]).shape assert list(map(lambda x: out_grad_shape[x], perm)) == list(x_shape) - x_grad = core.GEOperatorFactory.create_operator( - "transpose" + self._accumulated_op_id(), - "TransposeD").set_input("x", - out_grad).set_attr_vec_int32("perm", perm) + x_grad = ( + core.GEOperatorFactory.create_operator( + "transpose" + self._accumulated_op_id(), "TransposeD" + ) + .set_input("x", out_grad) + .set_attr_vec_int32("perm", perm) + ) return [x_grad], [[0]] class LayerNormGradParser(AscendParserBase): - def __init__(self, graph, var2geop): super(LayerNormGradParser, self).__init__(graph, var2geop) self.parser_name = "layer_norm_grad" @@ -2013,33 +2447,48 @@ class LayerNormGradParser(AscendParserBase): out_grad = self._get_ge_input(self.op.input_arg_names[5]) x_dtype = self.op.block.var(self.op.input_arg_names[4]).dtype - x_grad = core.GEOperatorFactory.create_operator( - self.parser_name + self._accumulated_op_id(), - "LayerNormGrad").set_input("dy", out_grad).set_input( - "x", x).set_input("variance", - variance).set_input("mean", mean).set_input( - "gamma", scale) - - cast_dtype = 0 if self.ascend_helper.dtype2paddle_inv_map[str( - x_dtype)] == 0 else 1 - out_x_grad = core.GEOperatorFactory.create_operator( - "cast" + self._accumulated_op_id(), - "Cast").set_input("x", x_grad, - 0).set_attr_int32("dst_type", cast_dtype) - out_scale_grad = core.GEOperatorFactory.create_operator( - "cast" + self._accumulated_op_id(), - "Cast").set_input("x", x_grad, - 1).set_attr_int32("dst_type", cast_dtype) - out_bias_grad = core.GEOperatorFactory.create_operator( - "cast" + self._accumulated_op_id(), - "Cast").set_input("x", x_grad, - 2).set_attr_int32("dst_type", cast_dtype) + x_grad = ( + core.GEOperatorFactory.create_operator( + self.parser_name + self._accumulated_op_id(), "LayerNormGrad" + ) + .set_input("dy", out_grad) + .set_input("x", x) + .set_input("variance", variance) + .set_input("mean", mean) + .set_input("gamma", scale) + ) + + cast_dtype = ( + 0 + if self.ascend_helper.dtype2paddle_inv_map[str(x_dtype)] == 0 + else 1 + ) + out_x_grad = ( + core.GEOperatorFactory.create_operator( + "cast" + self._accumulated_op_id(), "Cast" + ) + .set_input("x", x_grad, 0) + .set_attr_int32("dst_type", cast_dtype) + ) + out_scale_grad = ( + core.GEOperatorFactory.create_operator( + "cast" + self._accumulated_op_id(), "Cast" + ) + .set_input("x", x_grad, 1) + .set_attr_int32("dst_type", cast_dtype) + ) + out_bias_grad = ( + core.GEOperatorFactory.create_operator( + "cast" + self._accumulated_op_id(), "Cast" + ) + .set_input("x", x_grad, 2) + .set_attr_int32("dst_type", cast_dtype) + ) return [out_x_grad, out_scale_grad, out_bias_grad], [[2], [1], [0]] class TanhGradParser(AscendParserBase): - def __init__(self, graph, var2geop): super(TanhGradParser, self).__init__(graph, var2geop) self.parser_name = 'tanh_grad' @@ -2047,15 +2496,18 @@ class TanhGradParser(AscendParserBase): def _apply(self): y = self._get_ge_input(self.op.input_arg_names[0]) out_grad = self._get_ge_input(self.op.input_arg_names[1]) - tanh_grad = core.GEOperatorFactory.create_operator( - "tanh_grad" + self._accumulated_op_id(), - "TanhGrad").set_input("y", y).set_input("dy", out_grad) + tanh_grad = ( + core.GEOperatorFactory.create_operator( + "tanh_grad" + self._accumulated_op_id(), "TanhGrad" + ) + .set_input("y", y) + .set_input("dy", out_grad) + ) return [tanh_grad], [[0]] class LogGradParser(AscendParserBase): - def __init__(self, graph, var2geop): super(LogGradParser, self).__init__(graph, var2geop) self.parser_name = 'log_grad' @@ -2063,14 +2515,17 @@ class LogGradParser(AscendParserBase): def _apply(self): grad = self._get_ge_input(self.op.input_arg_names[0]) input = self._get_ge_input(self.op.input_arg_names[1]) - log_grad = core.GEOperatorFactory.create_operator( - "log_grad" + self._accumulated_op_id(), - "DivNoNan").set_input("x1", grad).set_input("x2", input) + log_grad = ( + core.GEOperatorFactory.create_operator( + "log_grad" + self._accumulated_op_id(), "DivNoNan" + ) + .set_input("x1", grad) + .set_input("x2", input) + ) return [log_grad], [[0]] class SqrtGradParser(AscendParserBase): - def __init__(self, graph, var2geop): super(SqrtGradParser, self).__init__(graph, var2geop) self.parser_name = "sqrt_grad" @@ -2078,14 +2533,17 @@ class SqrtGradParser(AscendParserBase): def _apply(self): y = self._get_ge_input(self.op.input_arg_names[0]) out_grad = self._get_ge_input(self.op.input_arg_names[1]) - sqrt_grad = core.GEOperatorFactory.create_operator( - "sqrt_grad" + self._accumulated_op_id(), - "SqrtGrad").set_input("y", y).set_input("dy", out_grad) + sqrt_grad = ( + core.GEOperatorFactory.create_operator( + "sqrt_grad" + self._accumulated_op_id(), "SqrtGrad" + ) + .set_input("y", y) + .set_input("dy", out_grad) + ) return [sqrt_grad] class PowGradParser(AscendParserBase): - def __init__(self, graph, var2geop): super(PowGradParser, self).__init__(graph, var2geop) self.parser_name = "pow_grad" @@ -2097,31 +2555,46 @@ class PowGradParser(AscendParserBase): shape_tensor = self._create_shape_tensor() shape_tensor = core.GEOperatorFactory.create_operator( - "shape" + self._accumulated_op_id(), "Shape").set_input("x", x) + "shape" + self._accumulated_op_id(), "Shape" + ).set_input("x", x) factor_scale = self._create_ge_tensor([1], 5, factor) factor_scale = core.GEOperatorFactory.create_operator( - "const" + self._accumulated_op_id(), - "Const").set_attr_tensor("value", factor_scale) - factor_tensor = core.GEOperatorFactory.create_operator( - "broadcast_to_d" + self._accumulated_op_id(), - "BroadcastTo").set_input("x", factor_scale).set_input( - "shape", shape_tensor) - - x_power = core.GEOperatorFactory.create_operator( - "x_power" + self._accumulated_op_id(), - "Power").set_input("x", x).set_attr_float("power", factor - 1) - x_power_mul_factor = core.GEOperatorFactory.create_operator( - "x_power_mul_factor" + self._accumulated_op_id(), - "Mul").set_input("x1", x).set_input("x2", factor_tensor) - x_power_mul_factor_grad = core.GEOperatorFactory.create_operator( - "x_power_mul_factor_grad" + self._accumulated_op_id(), - "Mul").set_input("x1", x_power_mul_factor).set_input("x2", grad) + "const" + self._accumulated_op_id(), "Const" + ).set_attr_tensor("value", factor_scale) + factor_tensor = ( + core.GEOperatorFactory.create_operator( + "broadcast_to_d" + self._accumulated_op_id(), "BroadcastTo" + ) + .set_input("x", factor_scale) + .set_input("shape", shape_tensor) + ) + + x_power = ( + core.GEOperatorFactory.create_operator( + "x_power" + self._accumulated_op_id(), "Power" + ) + .set_input("x", x) + .set_attr_float("power", factor - 1) + ) + x_power_mul_factor = ( + core.GEOperatorFactory.create_operator( + "x_power_mul_factor" + self._accumulated_op_id(), "Mul" + ) + .set_input("x1", x) + .set_input("x2", factor_tensor) + ) + x_power_mul_factor_grad = ( + core.GEOperatorFactory.create_operator( + "x_power_mul_factor_grad" + self._accumulated_op_id(), "Mul" + ) + .set_input("x1", x_power_mul_factor) + .set_input("x2", grad) + ) return [x_power_mul_factor_grad], [[0]] class GeluGradParser(AscendParserBase): - def __init__(self, graph, var2geop): super(GeluGradParser, self).__init__(graph, var2geop) self.parser_name = "gelu_grad" @@ -2131,17 +2604,21 @@ class GeluGradParser(AscendParserBase): x = self._get_ge_input(self.op.input_arg_names[1]) y = core.GEOperatorFactory.create_operator( - "gelu" + self._accumulated_op_id(), "Gelu").set_input("x", x) - gelu_grad = core.GEOperatorFactory.create_operator( - "gelu_grad" + self._accumulated_op_id(), - "GeluGrad").set_input("x", x).set_input("dy", - grad).set_input("y", y) + "gelu" + self._accumulated_op_id(), "Gelu" + ).set_input("x", x) + gelu_grad = ( + core.GEOperatorFactory.create_operator( + "gelu_grad" + self._accumulated_op_id(), "GeluGrad" + ) + .set_input("x", x) + .set_input("dy", grad) + .set_input("y", y) + ) return [gelu_grad], [[0]] class MeanGradParser(AscendParserBase): - def __init__(self, graph, var2geop): super(MeanGradParser, self).__init__(graph, var2geop) self.parser_name = "mean_grad" @@ -2151,25 +2628,36 @@ class MeanGradParser(AscendParserBase): x = self._get_ge_input(self.op.input_arg_names[1]) ones_tensor = core.GEOperatorFactory.create_operator( - "one_tensor" + self._accumulated_op_id(), - "OnesLike").set_input("x", x) - sum = core.GEOperatorFactory.create_operator( - "mean" + self._accumulated_op_id(), - "ReduceSumD").set_input("x", ones_tensor).set_attr_bool( - "keep_dims", False).set_attr_vec_int32("axes", []) - mean = core.GEOperatorFactory.create_operator( - "x_power" + self._accumulated_op_id(), - "Power").set_input("x", sum).set_attr_float("power", -1) - - mean_grad = core.GEOperatorFactory.create_operator( - "mean_grad" + self._accumulated_op_id(), - "Mul").set_input("x1", mean).set_input("x2", grad) + "one_tensor" + self._accumulated_op_id(), "OnesLike" + ).set_input("x", x) + sum = ( + core.GEOperatorFactory.create_operator( + "mean" + self._accumulated_op_id(), "ReduceSumD" + ) + .set_input("x", ones_tensor) + .set_attr_bool("keep_dims", False) + .set_attr_vec_int32("axes", []) + ) + mean = ( + core.GEOperatorFactory.create_operator( + "x_power" + self._accumulated_op_id(), "Power" + ) + .set_input("x", sum) + .set_attr_float("power", -1) + ) + + mean_grad = ( + core.GEOperatorFactory.create_operator( + "mean_grad" + self._accumulated_op_id(), "Mul" + ) + .set_input("x1", mean) + .set_input("x2", grad) + ) return [mean_grad], [[0]] class SliceGradParser(AscendParserBase): - def __init__(self, graph, var2geop): super(SliceGradParser, self).__init__(graph, var2geop) self.parser_name = "slice_grad" @@ -2200,15 +2688,18 @@ class SliceGradParser(AscendParserBase): starts_cor[0] = 0 ends_cor[0] = 0 paddings = [[s, e] for (s, e) in zip(starts_cor, ends_cor)] - slice_value = core.GEOperatorFactory.create_operator( - "slice_grad" + self._accumulated_op_id(), "PadD").set_input( - "x", grad).set_attr_vec_vec_int64("paddings", paddings) + slice_value = ( + core.GEOperatorFactory.create_operator( + "slice_grad" + self._accumulated_op_id(), "PadD" + ) + .set_input("x", grad) + .set_attr_vec_vec_int64("paddings", paddings) + ) return [slice_value], [[0]] class LookUpTableGradParser(AscendParserBase): - def __init__(self, graph, var2geop): super(LookUpTableGradParser, self).__init__(graph, var2geop) self.parser_name = "lookup_table_grad" @@ -2222,28 +2713,39 @@ class LookUpTableGradParser(AscendParserBase): shape_grad = self.op.block.var(self.op.input_arg_names[1]).shape shape_embedding = self.op.block.var(self.op.input_arg_names[2]).shape - ids_flatten = core.GEOperatorFactory.create_operator( - "flatten" + self._accumulated_op_id(), "FlattenV2").set_input( - "x", ids).set_attr_int32("axis", - 0).set_attr_int32("end_axis", 1) - grad_flatten = core.GEOperatorFactory.create_operator( - "flatten" + self._accumulated_op_id(), "FlattenV2").set_input( - "x", grad).set_attr_int32("axis", - 0).set_attr_int32("end_axis", 1) + ids_flatten = ( + core.GEOperatorFactory.create_operator( + "flatten" + self._accumulated_op_id(), "FlattenV2" + ) + .set_input("x", ids) + .set_attr_int32("axis", 0) + .set_attr_int32("end_axis", 1) + ) + grad_flatten = ( + core.GEOperatorFactory.create_operator( + "flatten" + self._accumulated_op_id(), "FlattenV2" + ) + .set_input("x", grad) + .set_attr_int32("axis", 0) + .set_attr_int32("end_axis", 1) + ) tensor_zeros = core.GEOperatorFactory.create_operator( - "zeroslike" + self._accumulated_op_id(), - "ZerosLike").set_input("x", embedding) - embedding_grad = core.GEOperatorFactory.create_operator( - "scatteradd" + self._accumulated_op_id(), - "TensorScatterAdd").set_input("x", tensor_zeros).set_input( - "indices", ids_flatten).set_input("updates", grad_flatten) + "zeroslike" + self._accumulated_op_id(), "ZerosLike" + ).set_input("x", embedding) + embedding_grad = ( + core.GEOperatorFactory.create_operator( + "scatteradd" + self._accumulated_op_id(), "TensorScatterAdd" + ) + .set_input("x", tensor_zeros) + .set_input("indices", ids_flatten) + .set_input("updates", grad_flatten) + ) return [embedding_grad], [[0]] class SGDParser(AscendParserBase): - def __init__(self, graph, var2geop): super(SGDParser, self).__init__(graph, var2geop) self.parser_name = "sgd" @@ -2252,15 +2754,18 @@ class SGDParser(AscendParserBase): grad = self._get_ge_input(self.op.input_arg_names[0]) lr = self._get_ge_input(self.op.input_arg_names[1]) param = self._get_ge_input(self.op.input_arg_names[2]) - sgd = core.GEOperatorFactory.create_operator( - "momentum" + self._accumulated_op_id(), - "ApplyGradientDescent").set_input("var", param).set_input( - "alpha", lr).set_input("delta", grad) + sgd = ( + core.GEOperatorFactory.create_operator( + "momentum" + self._accumulated_op_id(), "ApplyGradientDescent" + ) + .set_input("var", param) + .set_input("alpha", lr) + .set_input("delta", grad) + ) return [sgd], [[0]] class AdamParser(AscendParserBase): - def __init__(self, graph, var2geop): super(AdamParser, self).__init__(graph, var2geop) self.parser_name = "adam" @@ -2278,26 +2783,29 @@ class AdamParser(AscendParserBase): epsilon = self.op.attr('epsilon') beta1 = core.GEOperatorFactory.create_operator( - "const" + self._accumulated_op_id(), - "Const").set_attr_tensor("value", - self._create_ge_tensor([1], 5, beta1)) + "const" + self._accumulated_op_id(), "Const" + ).set_attr_tensor("value", self._create_ge_tensor([1], 5, beta1)) beta2 = core.GEOperatorFactory.create_operator( - "const" + self._accumulated_op_id(), - "Const").set_attr_tensor("value", - self._create_ge_tensor([1], 5, beta2)) + "const" + self._accumulated_op_id(), "Const" + ).set_attr_tensor("value", self._create_ge_tensor([1], 5, beta2)) epsilon = core.GEOperatorFactory.create_operator( - "const" + self._accumulated_op_id(), - "Const").set_attr_tensor("value", - self._create_ge_tensor([1], 5, epsilon)) - - adam = core.GEOperatorFactory.create_operator( - "adam" + self._accumulated_op_id(), - "ApplyAdam").set_input("var", param).set_input( - "m", moment1).set_input("v", moment2).set_input( - "beta1_power", beta1_power).set_input( - "beta2_power", - beta2_power).set_input("lr", lr).set_input( - "beta1", beta1).set_input("beta2", beta2).set_input( - "epsilon", epsilon).set_input("grad", grad) + "const" + self._accumulated_op_id(), "Const" + ).set_attr_tensor("value", self._create_ge_tensor([1], 5, epsilon)) + + adam = ( + core.GEOperatorFactory.create_operator( + "adam" + self._accumulated_op_id(), "ApplyAdam" + ) + .set_input("var", param) + .set_input("m", moment1) + .set_input("v", moment2) + .set_input("beta1_power", beta1_power) + .set_input("beta2_power", beta2_power) + .set_input("lr", lr) + .set_input("beta1", beta1) + .set_input("beta2", beta2) + .set_input("epsilon", epsilon) + .set_input("grad", grad) + ) return [adam], [[0]] diff --git a/python/paddle/distributed/fleet/meta_optimizers/asp_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/asp_optimizer.py index 2047c3172c26092ea9e214670861741806eee789..59a75d1b054cde943190966fe5bb8bb6950b324d 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/asp_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/asp_optimizer.py @@ -19,23 +19,26 @@ __all__ = [] class ASPOptimizer(MetaOptimizerBase): - def __init__(self, optimizer): super(ASPOptimizer, self).__init__(optimizer) self.inner_opt = optimizer # we do not allow meta optimizer to be inner optimizer currently self.meta_optimizers_white_list = [ - "AMPOptimizer", "LarsOptimizer", "LambOptimizer", - "GraphExecutionOptimizer", "RecomputeOptimizer", - "GradientMergeOptimizer" + "AMPOptimizer", + "LarsOptimizer", + "LambOptimizer", + "GraphExecutionOptimizer", + "RecomputeOptimizer", + "GradientMergeOptimizer", ] self.meta_optimizers_black_list = [] - def _set_basic_info(self, loss, role_maker, user_defined_optimizer, - user_defined_strategy): - super(ASPOptimizer, - self)._set_basic_info(loss, role_maker, user_defined_optimizer, - user_defined_strategy) + def _set_basic_info( + self, loss, role_maker, user_defined_optimizer, user_defined_strategy + ): + super(ASPOptimizer, self)._set_basic_info( + loss, role_maker, user_defined_optimizer, user_defined_strategy + ) def _can_apply(self): if not self.role_maker._is_collective: @@ -52,17 +55,16 @@ class ASPOptimizer(MetaOptimizerBase): def _enable_strategy(self, dist_strategy, context): dist_strategy.asp = True - def minimize_impl(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None): + def minimize_impl( + self, loss, startup_program=None, parameter_list=None, no_grad_set=None + ): optimize_ops, params_grads = ASPHelper._minimize( self.inner_opt, loss, startup_program=startup_program, parameter_list=parameter_list, - no_grad_set=no_grad_set) + no_grad_set=no_grad_set, + ) return optimize_ops, params_grads diff --git a/python/paddle/distributed/fleet/meta_optimizers/common.py b/python/paddle/distributed/fleet/meta_optimizers/common.py index ed04188de20f0e4c81e237e084e9e8ee64b00e88..7a3c89f1e9dea2b584f56f8461b2f8624d47d0cf 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/common.py +++ b/python/paddle/distributed/fleet/meta_optimizers/common.py @@ -27,8 +27,11 @@ OP_ROLE_VAR_KEY = core.op_proto_and_checker_maker.kOpRoleVarAttrName() def is_update_op(op): - return 'Param' in op.input_names and 'Grad' in op.input_names and \ - "LearningRate" in op.input_names + return ( + 'Param' in op.input_names + and 'Grad' in op.input_names + and "LearningRate" in op.input_names + ) def is_loss_grad_op(op): @@ -39,17 +42,18 @@ def is_loss_grad_op(op): def is_backward_op(op): - return OP_ROLE_KEY in op.attr_names and \ - int(op.all_attrs()[OP_ROLE_KEY]) & int(OpRole.Backward) + return OP_ROLE_KEY in op.attr_names and int( + op.all_attrs()[OP_ROLE_KEY] + ) & int(OpRole.Backward) def is_optimizer_op(op): - return OP_ROLE_KEY in op.attr_names and \ - int(op.all_attrs()[OP_ROLE_KEY]) & int(OpRole.Optimize) + return OP_ROLE_KEY in op.attr_names and int( + op.all_attrs()[OP_ROLE_KEY] + ) & int(OpRole.Optimize) class CollectiveHelper(object): - def __init__(self, role_maker, nrings=1, wait_port=True): self.nrings = nrings self.wait_port = wait_port @@ -63,21 +67,27 @@ class CollectiveHelper(object): endpoints = self.role_maker._get_trainer_endpoints() current_endpoint = endpoints[self.role_maker._worker_index()] for ring_id in range(self.nrings): - self._init_communicator(self.startup_program, - current_endpoint, endpoints, - self.role_maker._worker_index(), ring_id, - self.wait_port) + self._init_communicator( + self.startup_program, + current_endpoint, + endpoints, + self.role_maker._worker_index(), + ring_id, + self.wait_port, + ) self._broadcast_params() - def _init_communicator(self, - program, - current_endpoint, - endpoints, - rank, - ring_id, - wait_port, - global_ring_id=None, - sync=True): + def _init_communicator( + self, + program, + current_endpoint, + endpoints, + rank, + ring_id, + wait_port, + global_ring_id=None, + sync=True, + ): # if current_endpoint is None, it means just for sync, # no group is created. if current_endpoint: @@ -89,32 +99,40 @@ class CollectiveHelper(object): wait_server_ready(other_endpoints) def _add_sync_by_allreduce(block): - sync_var = block.create_var(name=unique_name.generate('sync_var'), - dtype=core.VarDesc.VarType.INT32, - persistable=False, - stop_gradient=True) - block.append_op(type='fill_constant', - inputs={}, - outputs={'Out': [sync_var]}, - attrs={ - 'shape': [1], - 'dtype': sync_var.dtype, - 'value': 1, - 'force_cpu': False, - OP_ROLE_KEY: OpRole.Forward - }) - block.append_op(type='c_allreduce_sum', - inputs={'X': [sync_var]}, - outputs={'Out': [sync_var]}, - attrs={ - 'ring_id': global_ring_id, - 'use_calc_stream': True, - OP_ROLE_KEY: OpRole.Forward - }) - block.append_op(type='c_sync_calc_stream', - inputs={'X': sync_var}, - outputs={'Out': sync_var}, - attrs={OP_ROLE_KEY: OpRole.Forward}) + sync_var = block.create_var( + name=unique_name.generate('sync_var'), + dtype=core.VarDesc.VarType.INT32, + persistable=False, + stop_gradient=True, + ) + block.append_op( + type='fill_constant', + inputs={}, + outputs={'Out': [sync_var]}, + attrs={ + 'shape': [1], + 'dtype': sync_var.dtype, + 'value': 1, + 'force_cpu': False, + OP_ROLE_KEY: OpRole.Forward, + }, + ) + block.append_op( + type='c_allreduce_sum', + inputs={'X': [sync_var]}, + outputs={'Out': [sync_var]}, + attrs={ + 'ring_id': global_ring_id, + 'use_calc_stream': True, + OP_ROLE_KEY: OpRole.Forward, + }, + ) + block.append_op( + type='c_sync_calc_stream', + inputs={'X': sync_var}, + outputs={'Out': sync_var}, + attrs={OP_ROLE_KEY: OpRole.Forward}, + ) block = program.global_block() if current_endpoint is None: @@ -123,79 +141,93 @@ class CollectiveHelper(object): _add_sync_by_allreduce(block) return - comm_id_var = block.create_var(name=unique_name.generate('comm_id'), - persistable=True, - type=core.VarDesc.VarType.RAW) + comm_id_var = block.create_var( + name=unique_name.generate('comm_id'), + persistable=True, + type=core.VarDesc.VarType.RAW, + ) if core.is_compiled_with_cuda(): - block.append_op(type='c_gen_nccl_id', - inputs={}, - outputs={'Out': comm_id_var}, - attrs={ - 'rank': rank, - 'endpoint': current_endpoint, - 'other_endpoints': other_endpoints, - 'ring_id': ring_id, - OP_ROLE_KEY: OpRole.Forward - }) - block.append_op(type='c_comm_init', - inputs={'X': comm_id_var}, - outputs={}, - attrs={ - 'nranks': nranks, - 'rank': rank, - 'ring_id': ring_id, - OP_ROLE_KEY: OpRole.Forward - }) + block.append_op( + type='c_gen_nccl_id', + inputs={}, + outputs={'Out': comm_id_var}, + attrs={ + 'rank': rank, + 'endpoint': current_endpoint, + 'other_endpoints': other_endpoints, + 'ring_id': ring_id, + OP_ROLE_KEY: OpRole.Forward, + }, + ) + block.append_op( + type='c_comm_init', + inputs={'X': comm_id_var}, + outputs={}, + attrs={ + 'nranks': nranks, + 'rank': rank, + 'ring_id': ring_id, + OP_ROLE_KEY: OpRole.Forward, + }, + ) elif core.is_compiled_with_xpu(): - block.append_op(type='c_gen_bkcl_id', - inputs={}, - outputs={'Out': comm_id_var}, - attrs={ - 'rank': rank, - 'endpoint': current_endpoint, - 'other_endpoints': other_endpoints, - 'ring_id': ring_id, - OP_ROLE_KEY: OpRole.Forward - }) - block.append_op(type='c_comm_init', - inputs={'X': comm_id_var}, - outputs={}, - attrs={ - 'nranks': nranks, - 'rank': rank, - 'ring_id': ring_id, - OP_ROLE_KEY: OpRole.Forward - }) + block.append_op( + type='c_gen_bkcl_id', + inputs={}, + outputs={'Out': comm_id_var}, + attrs={ + 'rank': rank, + 'endpoint': current_endpoint, + 'other_endpoints': other_endpoints, + 'ring_id': ring_id, + OP_ROLE_KEY: OpRole.Forward, + }, + ) + block.append_op( + type='c_comm_init', + inputs={'X': comm_id_var}, + outputs={}, + attrs={ + 'nranks': nranks, + 'rank': rank, + 'ring_id': ring_id, + OP_ROLE_KEY: OpRole.Forward, + }, + ) elif core.is_compiled_with_npu(): - block.append_op(type='c_gen_hccl_id', - inputs={}, - outputs={'Out': comm_id_var}, - attrs={ - 'rank': rank, - 'endpoint': current_endpoint, - 'other_endpoints': other_endpoints, - 'ring_id': ring_id, - OP_ROLE_KEY: OpRole.Forward - }) - block.append_op(type='c_comm_init_hccl', - inputs={'X': comm_id_var}, - outputs={}, - attrs={ - 'rank': rank, - 'ring_id': ring_id, - 'device_id': - int(os.getenv("FLAGS_selected_npus")), - 'rank_ids': nranks, - OP_ROLE_KEY: OpRole.Forward - }) + block.append_op( + type='c_gen_hccl_id', + inputs={}, + outputs={'Out': comm_id_var}, + attrs={ + 'rank': rank, + 'endpoint': current_endpoint, + 'other_endpoints': other_endpoints, + 'ring_id': ring_id, + OP_ROLE_KEY: OpRole.Forward, + }, + ) + block.append_op( + type='c_comm_init_hccl', + inputs={'X': comm_id_var}, + outputs={}, + attrs={ + 'rank': rank, + 'ring_id': ring_id, + 'device_id': int(os.getenv("FLAGS_selected_npus")), + 'rank_ids': nranks, + OP_ROLE_KEY: OpRole.Forward, + }, + ) else: raise ValueError( "comm_id must be generated in paddlepaddle-xpu or paddlepaddle-xpu." ) - if sync: _add_sync_by_allreduce(block) + if sync: + _add_sync_by_allreduce(block) def _wait(self, current_endpoint, endpoints): - assert (self.wait_port) + assert self.wait_port other_endpoints = endpoints[:] other_endpoints.remove(current_endpoint) wait_server_ready(other_endpoints) @@ -208,20 +240,21 @@ class CollectiveHelper(object): continue ring_id = (ring_id + 1) % self.nrings - block.append_op(type='c_broadcast', - inputs={'X': param}, - outputs={'Out': param}, - attrs={ - 'ring_id': ring_id, - 'root': 0, - OP_ROLE_KEY: OpRole.Forward - }) + block.append_op( + type='c_broadcast', + inputs={'X': param}, + outputs={'Out': param}, + attrs={ + 'ring_id': ring_id, + 'root': 0, + OP_ROLE_KEY: OpRole.Forward, + }, + ) for ring_id in range(self.nrings): - block.append_op(type='c_sync_comm_stream', - inputs={'X': param}, - outputs={'Out': param}, - attrs={ - 'ring_id': ring_id, - OP_ROLE_KEY: OpRole.Forward - }) + block.append_op( + type='c_sync_comm_stream', + inputs={'X': param}, + outputs={'Out': param}, + attrs={'ring_id': ring_id, OP_ROLE_KEY: OpRole.Forward}, + ) diff --git a/python/paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py index d25cf9680236f2ba6272f9ee3d3bbbad2978fef1..15090f7826544b81428345a451ec770086a8d005 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py @@ -19,7 +19,6 @@ __all__ = [] class DGCOptimizer(MetaOptimizerBase): - def __init__(self, optimizer): super(DGCOptimizer, self).__init__(optimizer) self.inner_opt = optimizer @@ -28,11 +27,12 @@ class DGCOptimizer(MetaOptimizerBase): self.meta_optimizers_white_list = [] self.meta_optimizers_black_list = [] - def _set_basic_info(self, loss, role_maker, user_defined_optimizer, - user_defined_strategy): - super(DGCOptimizer, - self)._set_basic_info(loss, role_maker, user_defined_optimizer, - user_defined_strategy) + def _set_basic_info( + self, loss, role_maker, user_defined_optimizer, user_defined_strategy + ): + super(DGCOptimizer, self)._set_basic_info( + loss, role_maker, user_defined_optimizer, user_defined_strategy + ) def _init_dgc_opt(self): if self.dgc_opt is not None: @@ -62,7 +62,8 @@ class DGCOptimizer(MetaOptimizerBase): num_trainers=self.role_maker._worker_num(), regularization=opt.regularization, grad_clip=opt._grad_clip, - name=opt._name) + name=opt._name, + ) def _can_apply(self): if not self.role_maker._is_collective: @@ -88,15 +89,18 @@ class DGCOptimizer(MetaOptimizerBase): dist_strategy.dgc = True dist_strategy.dgc_configs = {"rampup_begin_step": 0, "rampup_step": 1} - def backward(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None, - callbacks=None): + def backward( + self, + loss, + startup_program=None, + parameter_list=None, + no_grad_set=None, + callbacks=None, + ): self._init_dgc_opt() - return self.dgc_opt.backward(loss, startup_program, parameter_list, - no_grad_set, callbacks) + return self.dgc_opt.backward( + loss, startup_program, parameter_list, no_grad_set, callbacks + ) def apply_gradients(self, params_grads): self._init_dgc_opt() @@ -104,17 +108,15 @@ class DGCOptimizer(MetaOptimizerBase): def apply_optimize(self, loss, startup_program, params_grads): self._init_dgc_opt() - return self.dgc_opt.apply_optimize(loss, - startup_program=startup_program, - params_grads=params_grads) - - def minimize_impl(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None): + return self.dgc_opt.apply_optimize( + loss, startup_program=startup_program, params_grads=params_grads + ) + + def minimize_impl( + self, loss, startup_program=None, parameter_list=None, no_grad_set=None + ): self._init_dgc_opt() - optimize_ops, params_grads = \ - self.dgc_opt.minimize(loss, startup_program, - parameter_list, no_grad_set) + optimize_ops, params_grads = self.dgc_opt.minimize( + loss, startup_program, parameter_list, no_grad_set + ) return optimize_ops, params_grads diff --git a/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/dygraph_sharding_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/dygraph_sharding_optimizer.py index 958ebeb517d615b45e9cc374385e582e3eabebc3..7e5f6983867e19a9a4cd0ff3f9313f135fe93423 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/dygraph_sharding_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/dygraph_sharding_optimizer.py @@ -42,17 +42,26 @@ class DygraphShardingOptimizer(object): # 3. dynamic trainable params, which is the case bewteen pretraining and finetuning # 4. option to choose fuse comm (more GPU MEM need) or un-fuse comm - def __init__(self, hcg, user_defined_strategy, params, - inner_optimizer_class, **inner_optimizer_kargs): + def __init__( + self, + hcg, + user_defined_strategy, + params, + inner_optimizer_class, + **inner_optimizer_kargs + ): if not isinstance(params, list): raise TypeError( "`parameters` argument given to the DygraphShardingOptimizer should be " - "an iterable of paddle Tensors, but got argument type is `{}`.". - format(type(params))) + "an iterable of paddle Tensors, but got argument type is `{}`.".format( + type(params) + ) + ) self._parameter_list = params self._reference_is_trainable_params = list( - map(_is_trainable, self._parameter_list)) + map(_is_trainable, self._parameter_list) + ) self._inner_optimizer_class = inner_optimizer_class self._inner_optimizer_kargs = inner_optimizer_kargs @@ -102,8 +111,11 @@ class DygraphShardingOptimizer(object): rank = sizes.index(min(sizes)) mapping[rank].append(param) numel = reduce(lambda x, y: x * y, param.shape) - assert numel > 0, "param [{}] should larger than 0, but it is [{}]".format( - param.name, numel) + assert ( + numel > 0 + ), "param [{}] should larger than 0, but it is [{}]".format( + param.name, numel + ) sizes[rank] += numel return mapping @@ -127,7 +139,8 @@ class DygraphShardingOptimizer(object): # update related ops: clip, regular, opt self._inner_optimizer = self._inner_optimizer_class( parameters=self._rank2params[self._sharding_rank], - **self._inner_optimizer_kargs) + **self._inner_optimizer_kargs + ) def _sharding_sync_parameters(self): """ @@ -146,7 +159,8 @@ class DygraphShardingOptimizer(object): # instead of the relative logic rank id within group src=self._hcg.get_sharding_parallel_group().ranks[rank], group=self._hcg.get_sharding_parallel_group(), - sync_op=True) + sync_op=True, + ) def _update_trainable(self): """ @@ -154,21 +168,23 @@ class DygraphShardingOptimizer(object): """ raise NotImplementedError - def minimize(self, - loss, - startup_program=None, - parameters=None, - no_grad_set=None): + def minimize( + self, loss, startup_program=None, parameters=None, no_grad_set=None + ): # NOTE in dygraph mode, the only different between step and minimize is that minimize # allow user to customize the parameters for updating on each step input_param_names = set([param.name for param in parameters]) parameters = list( - filter(lambda x: x.name in input_param_names, - self._rank2params[self._sharding_rank])) - result = self._inner_optimizer.minimize(loss, startup_program, - parameters, no_grad_set) + filter( + lambda x: x.name in input_param_names, + self._rank2params[self._sharding_rank], + ) + ) + result = self._inner_optimizer.minimize( + loss, startup_program, parameters, no_grad_set + ) # sync parameters across sharding ranks self._sharding_sync_parameters() @@ -187,7 +203,9 @@ class DygraphShardingOptimizer(object): # TODO is it a good way to make _grad_clip a property @property def _grad_clip(self): - assert self._inner_optimizer is not None, "inner opt of sharding is not initiliazed." + assert ( + self._inner_optimizer is not None + ), "inner opt of sharding is not initiliazed." return self._inner_optimizer._grad_clip def __getattr__(self, item): diff --git a/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/heter_parallel_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/heter_parallel_optimizer.py index cda8c9e30cefa779f93ff807ed423dd70f777075..a2a65d995ad7c65cd3b51dd6cd067b0dd27d1396 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/heter_parallel_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/heter_parallel_optimizer.py @@ -20,7 +20,8 @@ __all__ = [] def _obtain_optimizer_parameters_list(optimizer): if getattr(optimizer, '_param_groups', None) and isinstance( - optimizer._param_groups[0], dict): + optimizer._param_groups[0], dict + ): parameters_list = [] for group in optimizer._param_groups: for param in group['params']: @@ -47,19 +48,19 @@ class HeterParallelOptimizer: self._inner_opt.step() @imperative_base.no_grad - def minimize(self, - loss, - startup_program=None, - parameters=None, - no_grad_set=None): + def minimize( + self, loss, startup_program=None, parameters=None, no_grad_set=None + ): # minimize does not support parameters in the form of param_group, # so no need use _obtain_optimizer_parameters_list - parameter_list = parameters if parameters \ - else self._inner_opt._parameter_list + parameter_list = ( + parameters if parameters else self._inner_opt._parameter_list + ) - return self._inner_opt.minimize(loss, startup_program, parameter_list, - no_grad_set) + return self._inner_opt.minimize( + loss, startup_program, parameter_list, no_grad_set + ) def __getattr__(self, item): return getattr(self._inner_opt, item) diff --git a/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/hybrid_parallel_gradscaler.py b/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/hybrid_parallel_gradscaler.py index bd762a202f3711c652cdfafa033625d276cbe7ee..d768411dea5fec22b5482f3c9a09f55db8c0f64e 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/hybrid_parallel_gradscaler.py +++ b/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/hybrid_parallel_gradscaler.py @@ -21,12 +21,12 @@ __all__ = [] class HybridParallelGradScaler: - def __init__(self, scaler, hcg): self._scaler = scaler self._hcg = hcg self._use_dp_mode = ( - self._hcg.get_parallel_mode() == ParallelMode.DATA_PARALLEL) + self._hcg.get_parallel_mode() == ParallelMode.DATA_PARALLEL + ) def scale(self, var): return self._scaler.scale(var) @@ -56,20 +56,22 @@ class HybridParallelGradScaler: if not self._enable: return param_grads = [ - param._grad_ivar() for param in optimizer._parameter_list + param._grad_ivar() + for param in optimizer._parameter_list if param._grad_ivar() is not None ] - _legacy_C_ops.check_finite_and_unscale(param_grads, self._scale, - param_grads, self._found_inf) + _legacy_C_ops.check_finite_and_unscale( + param_grads, self._scale, param_grads, self._found_inf + ) # allreduce_max found_inf in check_group if not self._use_dp_mode: self._found_inf = paddle.cast(self._found_inf, dtype="int32") # TODO(shenliang03) Since the minimize call in the optimizer is # after the gradscaler, check_finite needs to synchronize global # information. In the future, we should use check_group - paddle.distributed.all_reduce(self._found_inf, - op=paddle.distributed.ReduceOp.MAX, - group=None) + paddle.distributed.all_reduce( + self._found_inf, op=paddle.distributed.ReduceOp.MAX, group=None + ) self._found_inf = paddle.cast(self._found_inf, dtype="bool") def __getattr__(self, item): diff --git a/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/hybrid_parallel_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/hybrid_parallel_optimizer.py index 2cc43abee92aac503030156facab2c5ff49c795c..33922b7f35d9c470fe15e7b5ddf9b7d1908aa99c 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/hybrid_parallel_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/hybrid_parallel_optimizer.py @@ -14,7 +14,10 @@ import paddle from paddle.fluid.clip import ClipGradByGlobalNorm -from ...utils.hybrid_parallel_util import fused_allreduce_gradients, sharding_reduce_gradients +from ...utils.hybrid_parallel_util import ( + fused_allreduce_gradients, + sharding_reduce_gradients, +) from ...base.topology import ParallelMode from paddle.fluid.dygraph import base as imperative_base from paddle.fluid import framework @@ -27,7 +30,8 @@ __all__ = [] def _obtain_optimizer_parameters_list(optimizer): if getattr(optimizer, '_param_groups', None) and isinstance( - optimizer._param_groups[0], dict): + optimizer._param_groups[0], dict + ): parameters_list = [] for group in optimizer._param_groups: for param in group['params']: @@ -39,7 +43,6 @@ def _obtain_optimizer_parameters_list(optimizer): class HybridParallelClipGrad: - def __init__(self, clip, hcg): self._clip = clip self._hcg = hcg @@ -65,7 +68,8 @@ class HybridParallelClipGrad: not_shared_enable = (not hasattr(p, 'is_firstly_shared')) or ( hasattr(p, 'is_firstly_shared') - and getattr(p, 'is_firstly_shared', True)) + and getattr(p, 'is_firstly_shared', True) + ) if not_shared_enable: if p.is_distributed: @@ -81,69 +85,83 @@ class HybridParallelClipGrad: # global norm of distributed FP16 params_and_grads if len(sum_square_dist_fp16) == 0: - global_norm_dist_fp16 = paddle.to_tensor([0.], dtype=paddle.float32) + global_norm_dist_fp16 = paddle.to_tensor( + [0.0], dtype=paddle.float32 + ) else: global_norm_dist_fp16 = layers.concat(sum_square_dist_fp16) global_norm_dist_fp16 = layers.reduce_sum(global_norm_dist_fp16) - global_norm_dist_fp16 = paddle.cast(global_norm_dist_fp16, - dtype=paddle.float32) + global_norm_dist_fp16 = paddle.cast( + global_norm_dist_fp16, dtype=paddle.float32 + ) # global norm of non-distributed FP16 params_and_grads if len(sum_square_not_dist_fp16) == 0: - global_norm_not_dist_fp16 = paddle.to_tensor([0.], - dtype=paddle.float32) + global_norm_not_dist_fp16 = paddle.to_tensor( + [0.0], dtype=paddle.float32 + ) else: global_norm_not_dist_fp16 = layers.concat(sum_square_not_dist_fp16) global_norm_not_dist_fp16 = layers.reduce_sum( - global_norm_not_dist_fp16) - global_norm_not_dist_fp16 = paddle.cast(global_norm_not_dist_fp16, - dtype=paddle.float32) + global_norm_not_dist_fp16 + ) + global_norm_not_dist_fp16 = paddle.cast( + global_norm_not_dist_fp16, dtype=paddle.float32 + ) # global norm of distributed FP32 params_and_grads - global_norm_dist_fp32 = layers.concat(sum_square_dist_fp32) if len( - sum_square_dist_fp32) != 0 else paddle.to_tensor( - [0.], dtype=paddle.float32) + global_norm_dist_fp32 = ( + layers.concat(sum_square_dist_fp32) + if len(sum_square_dist_fp32) != 0 + else paddle.to_tensor([0.0], dtype=paddle.float32) + ) global_norm_dist_fp32 = layers.reduce_sum(global_norm_dist_fp32) # global norm of non-distributed FP32 params_and_grads - global_norm_not_dist_fp32 = layers.concat( - sum_square_not_dist_fp32 - ) if len(sum_square_not_dist_fp32) != 0 else paddle.to_tensor( - [0.], dtype=paddle.float32) + global_norm_not_dist_fp32 = ( + layers.concat(sum_square_not_dist_fp32) + if len(sum_square_not_dist_fp32) != 0 + else paddle.to_tensor([0.0], dtype=paddle.float32) + ) global_norm_not_dist_fp32 = layers.reduce_sum(global_norm_not_dist_fp32) global_norm_var_dist = global_norm_dist_fp16 + global_norm_dist_fp32 - global_norm_var_not_dist = global_norm_not_dist_fp16 + global_norm_not_dist_fp32 + global_norm_var_not_dist = ( + global_norm_not_dist_fp16 + global_norm_not_dist_fp32 + ) # add all reduce to get global norm of distributed params_and_grads if self._hcg.get_model_parallel_world_size() > 1: paddle.distributed.all_reduce( - global_norm_var_dist, - group=self._hcg.get_check_parallel_group()) + global_norm_var_dist, group=self._hcg.get_check_parallel_group() + ) # add all reduce to get global norm of non-distributed params_and_grads in groups of pp if self._hcg.get_pipe_parallel_world_size() > 1: paddle.distributed.all_reduce( global_norm_var_not_dist, - group=self._hcg.get_pipe_parallel_group()) + group=self._hcg.get_pipe_parallel_group(), + ) # In Sharding mode, param and grad is mapping different rank in optimizer. # ClipGradByGlobalNorm need allreduce to get globol norm if self._hcg.get_sharding_parallel_world_size() > 1: paddle.distributed.all_reduce( global_norm_var_not_dist, - group=self._hcg.get_sharding_parallel_group()) - - global_norm_var_fp32 = layers.sqrt(global_norm_var_dist + - global_norm_var_not_dist) - - max_global_norm = layers.fill_constant(shape=[1], - dtype=global_norm_var_fp32.dtype, - value=self.clip_norm) - clip_var = layers.elementwise_div(x=max_global_norm, - y=layers.elementwise_max( - x=global_norm_var_fp32, - y=max_global_norm)) + group=self._hcg.get_sharding_parallel_group(), + ) + + global_norm_var_fp32 = layers.sqrt( + global_norm_var_dist + global_norm_var_not_dist + ) + + max_global_norm = layers.fill_constant( + shape=[1], dtype=global_norm_var_fp32.dtype, value=self.clip_norm + ) + clip_var = layers.elementwise_div( + x=max_global_norm, + y=layers.elementwise_max(x=global_norm_var_fp32, y=max_global_norm), + ) clip_var_fp16 = paddle.cast(clip_var, paddle.float16) for p, g in params_grads: if g is None: @@ -173,35 +191,43 @@ class HybridParallelOptimizer: self._hcg = hcg self._use_dp_mode = ( - self._hcg.get_parallel_mode() == ParallelMode.DATA_PARALLEL) + self._hcg.get_parallel_mode() == ParallelMode.DATA_PARALLEL + ) - self._need_dp = (self._hcg.get_data_parallel_world_size() > 1) + self._need_dp = self._hcg.get_data_parallel_world_size() > 1 # NOTE(shenliang03): Because of the pure DataParallel mode, the gradient synchronization # is achieved through reducer, so there is no need to call fuse_allreduce in optimizer. self._dp_enable = not self._use_dp_mode and self._need_dp - self._sharding_enable = (self._hcg.get_sharding_parallel_world_size() > - 1) + self._sharding_enable = self._hcg.get_sharding_parallel_world_size() > 1 - if isinstance(self._inner_opt._grad_clip, - ClipGradByGlobalNorm) and not self._use_dp_mode: - logger.warning("While using ClipGradByGlobalNorm in TensorParallel, PipelineParallel " \ - "or Sharding, the grad clip of original optimizer will be changed.") + if ( + isinstance(self._inner_opt._grad_clip, ClipGradByGlobalNorm) + and not self._use_dp_mode + ): + logger.warning( + "While using ClipGradByGlobalNorm in TensorParallel, PipelineParallel " + "or Sharding, the grad clip of original optimizer will be changed." + ) if self._sharding_enable: # change sharding inner_optimizer's _grad_clip - self._inner_opt._inner_optimizer._grad_clip = HybridParallelClipGrad( - self._inner_opt._grad_clip, hcg) + self._inner_opt._inner_optimizer._grad_clip = ( + HybridParallelClipGrad(self._inner_opt._grad_clip, hcg) + ) else: self._inner_opt._grad_clip = HybridParallelClipGrad( - self._inner_opt._grad_clip, hcg) + self._inner_opt._grad_clip, hcg + ) if self._inner_opt._parameter_list and isinstance( - self._inner_opt._parameter_list[0], dict): + self._inner_opt._parameter_list[0], dict + ): for item in self._inner_opt._param_groups: if "grad_clip" in item.keys(): item["grad_clip"] = HybridParallelClipGrad( - self._inner_opt._grad_clip, hcg) + self._inner_opt._grad_clip, hcg + ) @imperative_base.no_grad @framework.dygraph_only @@ -216,16 +242,15 @@ class HybridParallelOptimizer: self._inner_opt.step() @imperative_base.no_grad - def minimize(self, - loss, - startup_program=None, - parameters=None, - no_grad_set=None): + def minimize( + self, loss, startup_program=None, parameters=None, no_grad_set=None + ): # minimize does not support parameters in the form of param_group, # so no need use _obtain_optimizer_parameters_list - parameter_list = parameters if parameters \ - else self._inner_opt._parameter_list + parameter_list = ( + parameters if parameters else self._inner_opt._parameter_list + ) # Here sharding should use global parameter list if self._sharding_enable: @@ -234,8 +259,9 @@ class HybridParallelOptimizer: if self._dp_enable: fused_allreduce_gradients(list(parameter_list), self._hcg) - return self._inner_opt.minimize(loss, startup_program, parameter_list, - no_grad_set) + return self._inner_opt.minimize( + loss, startup_program, parameter_list, no_grad_set + ) def __getattr__(self, item): return getattr(self._inner_opt, item) diff --git a/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/sharding_optimizer_stage2.py b/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/sharding_optimizer_stage2.py index 3468ec7a3a7b88c45449b4fd0380baf5f04a1a45..ed1435209540c153be8b8baf61185e8389a2c920 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/sharding_optimizer_stage2.py +++ b/python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/sharding_optimizer_stage2.py @@ -30,10 +30,19 @@ import paddle from paddle.fluid import core from paddle.optimizer import Optimizer from paddle.fluid.clip import ClipGradByGlobalNorm -from paddle.distributed.collective import _get_global_group, new_group, broadcast, wait +from paddle.distributed.collective import ( + _get_global_group, + new_group, + broadcast, + wait, +) from ...utils.internal_storage import ParamStorage, GradStorage -from ...meta_parallel.sharding.sharding_utils import Type, device_guard, ShardingClipGrad +from ...meta_parallel.sharding.sharding_utils import ( + Type, + device_guard, + ShardingClipGrad, +) # CUDA alignment 256 bytes, cpu alignment 4096 bytes alignment = {"gpu": 256, "cpu": 4096} @@ -61,19 +70,22 @@ class ShardingOptimizerStage2(Optimizer): # 4. Support offload function. # 5. Support the establishment of independent communication groups. # 6. Broadcast_fp16 is not supported now. - def __init__(self, - params, - optim, - group=None, - offload=False, - device="gpu", - pertrain_sync_models=True, - **kw): + def __init__( + self, + params, + optim, + group=None, + offload=False, + device="gpu", + pertrain_sync_models=True, + **kw + ): super().__init__(optim._learning_rate, params, kw) # Segmentation information - self._dtype_rank_params = OrderedDict( + self._dtype_rank_params = ( + OrderedDict() ) # {dtype:[param1,param2]} device, rank, params self._param2rank = {} self.__segment_params = [] @@ -84,17 +96,26 @@ class ShardingOptimizerStage2(Optimizer): self._optim_defaults = kw self._optim = optim - assert hasattr(self._optim, "_master_weights" - ), "Must use optimizer with _master_weights attribute" + assert hasattr( + self._optim, "_master_weights" + ), "Must use optimizer with _master_weights attribute" self._local_params = params self._default_device = device - self._pfp16 = len( - list( - filter(lambda x: x.trainable and x.dtype == Type.fp16.value, - self._local_params))) > 0 + self._pfp16 = ( + len( + list( + filter( + lambda x: x.trainable and x.dtype == Type.fp16.value, + self._local_params, + ) + ) + ) + > 0 + ) - self.group = new_group( - _get_global_group().ranks) if group is None else group + self.group = ( + new_group(_get_global_group().ranks) if group is None else group + ) self.world_size = self.group.nranks self.rank = self.group.rank @@ -110,19 +131,24 @@ class ShardingOptimizerStage2(Optimizer): logging.warning( "While using ClipGradByGlobalNorm in ShardingOptimizer, the grad clip of original optimizer will be changed." ) - self._optim._grad_clip = ShardingClipGrad(self._optim._grad_clip, - paddle.get_device(), - self.group) + self._optim._grad_clip = ShardingClipGrad( + self._optim._grad_clip, paddle.get_device(), self.group + ) if self._optim._parameter_list and isinstance( - self._optim._parameter_list[0], dict): + self._optim._parameter_list[0], dict + ): for item in self._optim._param_groups: if "grad_clip" in item.keys(): item["grad_clip"] = ShardingClipGrad( - self._optim._grad_clip, paddle.get_device(), - self.group) + self._optim._grad_clip, + paddle.get_device(), + self.group, + ) if offload: - assert self._pfp16, "Only support offload strategy while using \'Adam\', \'AdamW\' and \'Momentum\' optimizer with AMP/Pure FP16" + assert ( + self._pfp16 + ), "Only support offload strategy while using \'Adam\', \'AdamW\' and \'Momentum\' optimizer with AMP/Pure FP16" self.offload = offload # Using for offload self.offload_device = "cpu" @@ -143,10 +169,9 @@ class ShardingOptimizerStage2(Optimizer): """ for p in self._local_params: - broadcast(p, - src=self._global_root_rank, - group=self.group, - sync_op=True) + broadcast( + p, src=self._global_root_rank, group=self.group, sync_op=True + ) # Multi stream operation will be supported later wait(tensor=p, group=self.group, use_calc_stream=True) @@ -159,16 +184,17 @@ class ShardingOptimizerStage2(Optimizer): name=param.name, value=param.cast(dtype=Type.fp32.value).numpy(), place=core.CPUPlace(), - stop_gradient=param.stop_gradient) + stop_gradient=param.stop_gradient, + ) else: for param in trainable_params: if param.dtype == Type.fp16.value: self._optim._master_weights[param.name] = paddle.cast( - param, Type.fp32.value) + param, Type.fp32.value + ) def _update_opt_status(self): - """Update optimizer status and parameter storage information, and special functions to be developed. - """ + """Update optimizer status and parameter storage information, and special functions to be developed.""" # func 1 self._integration_params() @@ -222,8 +248,9 @@ class ShardingOptimizerStage2(Optimizer): self._dtype_rank_params[param.dtype] = [ [] for _ in range(self.world_size) ] - self._dtype_rank_params[param.dtype][self.param2rank[ - param.name]].append(param) + self._dtype_rank_params[param.dtype][ + self.param2rank[param.name] + ].append(param) # Sort per rank params by size for dtype in self._dtype_rank_params.keys(): @@ -243,7 +270,8 @@ class ShardingOptimizerStage2(Optimizer): if dtype not in self._rank_buffer_size.keys(): self._rank_buffer_size[dtype] = {} for dst_rank, per_rank_params in enumerate( - self.dtype_rank_params[dtype]): + self.dtype_rank_params[dtype] + ): if dst_rank not in self._rank_buffer_size[dtype].keys(): self._rank_buffer_size[dtype][dst_rank] = 0 for param in per_rank_params: @@ -251,11 +279,15 @@ class ShardingOptimizerStage2(Optimizer): continue size = np.prod(param.shape) * align[dtype] remaining = size % alignment[self._default_device] - ali = 0 if remaining == 0 else alignment[ - self._default_device] - remaining + ali = ( + 0 + if remaining == 0 + else alignment[self._default_device] - remaining + ) align_ = ali // align[dtype] - self._rank_buffer_size[dtype][dst_rank] += np.prod( - param.shape) + align_ + self._rank_buffer_size[dtype][dst_rank] += ( + np.prod(param.shape) + align_ + ) self._param2align[param.name] = align_ return self._rank_buffer_size @@ -274,23 +306,27 @@ class ShardingOptimizerStage2(Optimizer): # Merge all the trainable params in a single InternalStorage trainable_params = list( - filter(lambda x: x.trainable, params)) + filter(lambda x: x.trainable, params) + ) if self._pfp16 and dst_rank == self.rank: self._generate_master_params(trainable_params) if trainable_params: param_storage = ParamStorage( size=self.rank_buffer_size[dtype][dst_rank], dtype=dtype, - device=self._default_device) + device=self._default_device, + ) - param_storage.add_rank_params(trainable_params, - self._param2align) + param_storage.add_rank_params( + trainable_params, self._param2align + ) self.param_storages[dtype][dst_rank] = param_storage # Clear the InternalStorage keys which are not in use anymore dtype_in_use = list(self.dtype_rank_params.keys()) dtype_to_pop = list( - filter(lambda x: x not in dtype_in_use, self.param_storages.keys())) + filter(lambda x: x not in dtype_in_use, self.param_storages.keys()) + ) for d in dtype_to_pop: self.param_storages.pop(d) @@ -300,8 +336,11 @@ class ShardingOptimizerStage2(Optimizer): for param in cpu_master_params: size = np.prod(param.shape) * align[Type.fp32.value] remaining = size % alignment[self.offload_device] - ali = 0 if remaining == 0 else alignment[ - self.offload_device] - remaining + ali = ( + 0 + if remaining == 0 + else alignment[self.offload_device] - remaining + ) align_ = ali // align[Type.fp32.value] self.offload_buffer_size += np.prod(param.shape) + align_ self.offload_param2align[param.name] = align_ @@ -311,9 +350,11 @@ class ShardingOptimizerStage2(Optimizer): self.offload_params = ParamStorage( size=self.offload_buffer_size, dtype=Type.fp32.value, - device=self.offload_device) + device=self.offload_device, + ) self.offload_params.add_rank_params( - cpu_master_params, self.offload_param2align, False) + cpu_master_params, self.offload_param2align, False + ) self.offload_params.buffer.stop_gradient = False self.offload_grads = GradStorage( @@ -322,14 +363,16 @@ class ShardingOptimizerStage2(Optimizer): device=self.offload_device, destination=self.rank, parm2align=self.offload_param2align, - convert_cpu=True) + convert_cpu=True, + ) for p in cpu_master_params: self.offload_grads.add_grad( - p, self.offload_param2align[p.name]) + p, self.offload_param2align[p.name] + ) self._optim._master_weights[ - self.offload_params.buffer. - name] = self.offload_params.buffer + self.offload_params.buffer.name + ] = self.offload_params.buffer def _offload_acc_grad(self, param_name, grad_fp32_cpu): """accumulate grads with offload strategy""" @@ -337,12 +380,14 @@ class ShardingOptimizerStage2(Optimizer): if param_name in self._master_params.keys(): if self._master_params[param_name].grad is None: self._master_params[param_name]._copy_gradient_from( - grad_fp32_cpu) + grad_fp32_cpu + ) else: self._master_params[param_name].grad.add_(grad_fp32_cpu) self.offload_params.buffer._copy_gradient_from( - self.offload_grads.buffer) + self.offload_grads.buffer + ) def _offload_scale_grad(self, scale_size): """scale grads with offload strategy""" @@ -362,7 +407,7 @@ class ShardingOptimizerStage2(Optimizer): if self.offload: params_list = [self.offload_params.buffer] - #TODO(Baibaifan): Offload will support param_groups later + # TODO(Baibaifan): Offload will support param_groups later if not isinstance(self._optim._param_groups[0], dict): self._optim._parameter_list = params_list self._optim._param_groups = params_list @@ -376,8 +421,10 @@ class ShardingOptimizerStage2(Optimizer): for param in self._local_params: if param.name in self._master_params.keys(): param.set_value( - self._master_params[param.name].cuda(dev_id).cast( - dtype=param.dtype)) + self._master_params[param.name] + .cuda(dev_id) + .cast(dtype=param.dtype) + ) else: self._optim.step() @@ -386,7 +433,8 @@ class ShardingOptimizerStage2(Optimizer): def minimize(self): raise RuntimeError( - "optimizer.minimize() not support now, please use optimizer.step()") + "optimizer.minimize() not support now, please use optimizer.step()" + ) def set_state_dict(self, state_dict): self._optim.set_state_dict(state_dict) @@ -408,12 +456,16 @@ class ShardingOptimizerStage2(Optimizer): # Exchange all the shards with the other ranks for dtype_per_rank in self.param_storages.values(): for dst_rank, internal_storage in dtype_per_rank.items(): - broadcast(tensor=internal_storage.buffer, - src=self.group.ranks[dst_rank], - group=self.group, - sync_op=True) + broadcast( + tensor=internal_storage.buffer, + src=self.group.ranks[dst_rank], + group=self.group, + sync_op=True, + ) # Multi stream operation will be supported later - wait(tensor=internal_storage.buffer, - group=self.group, - use_calc_stream=True) + wait( + tensor=internal_storage.buffer, + group=self.group, + use_calc_stream=True, + ) diff --git a/python/paddle/distributed/fleet/meta_optimizers/fp16_allreduce_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/fp16_allreduce_optimizer.py index 93857461b26d26133abe67008ed086129e7a8571..3834f85bffaebe09a7c00a8987798ccb5a6ae17b 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/fp16_allreduce_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/fp16_allreduce_optimizer.py @@ -18,7 +18,6 @@ __all__ = [] class FP16AllReduceOptimizer(MetaOptimizerBase): - def __init__(self, optimizer): super(FP16AllReduceOptimizer, self).__init__(optimizer) self.inner_opt = optimizer @@ -34,11 +33,12 @@ class FP16AllReduceOptimizer(MetaOptimizerBase): ] self.meta_optimizers_black_list = ["DGCOptimizer"] - def _set_basic_info(self, loss, role_maker, user_defined_optimizer, - user_defined_strategy): - super(FP16AllReduceOptimizer, - self)._set_basic_info(loss, role_maker, user_defined_optimizer, - user_defined_strategy) + def _set_basic_info( + self, loss, role_maker, user_defined_optimizer, user_defined_strategy + ): + super(FP16AllReduceOptimizer, self)._set_basic_info( + loss, role_maker, user_defined_optimizer, user_defined_strategy + ) def _can_apply(self): if not self.role_maker._is_collective: @@ -84,28 +84,30 @@ class FP16AllReduceOptimizer(MetaOptimizerBase): else: op._remove_attr(op_maker.kOpRoleVarAttrName()) - new_grad = block.create_var(name=unique_name.generate(grad.name + - ".cast_fp16"), - dtype=core.VarDesc.VarType.FP16, - persistable=False, - stop_gradient=True) + new_grad = block.create_var( + name=unique_name.generate(grad.name + ".cast_fp16"), + dtype=core.VarDesc.VarType.FP16, + persistable=False, + stop_gradient=True, + ) with block.program._backward_role_guard(): - cast_op = block.append_op(type="cast", - inputs={"X": grad}, - outputs={"Out": new_grad}, - attrs={ - "in_dtype": - core.VarDesc.VarType.FP32, - "out_dtype": - core.VarDesc.VarType.FP16 - }, - stop_gradient=True) + cast_op = block.append_op( + type="cast", + inputs={"X": grad}, + outputs={"Out": new_grad}, + attrs={ + "in_dtype": core.VarDesc.VarType.FP32, + "out_dtype": core.VarDesc.VarType.FP16, + }, + stop_gradient=True, + ) backward = op_maker.OpRole.Backward cast_op._set_attr(op_maker.kOpRoleAttrName(), backward) - cast_op._set_attr(op_maker.kOpRoleVarAttrName(), - [param.name, new_grad.name]) + cast_op._set_attr( + op_maker.kOpRoleVarAttrName(), [param.name, new_grad.name] + ) new_grad.op = cast_op new_param_and_grads.append((param, new_grad, True)) @@ -122,30 +124,32 @@ class FP16AllReduceOptimizer(MetaOptimizerBase): continue block = grad.block - new_grad = block.create_var(name=unique_name.generate(grad.name + - ".cast_fp32"), - dtype=core.VarDesc.VarType.FP32, - persistable=False, - stop_gradient=True) + new_grad = block.create_var( + name=unique_name.generate(grad.name + ".cast_fp32"), + dtype=core.VarDesc.VarType.FP32, + persistable=False, + stop_gradient=True, + ) with block.program._optimized_guard( - [param, grad]), framework.name_scope('fp16_allreduce'): - cast_op = block.append_op(type="cast", - inputs={"X": grad}, - outputs={"Out": new_grad}, - attrs={ - "in_dtype": - core.VarDesc.VarType.FP16, - "out_dtype": - core.VarDesc.VarType.FP32 - }, - stop_gradient=True) + [param, grad] + ), framework.name_scope('fp16_allreduce'): + cast_op = block.append_op( + type="cast", + inputs={"X": grad}, + outputs={"Out": new_grad}, + attrs={ + "in_dtype": core.VarDesc.VarType.FP16, + "out_dtype": core.VarDesc.VarType.FP32, + }, + stop_gradient=True, + ) ret_param_and_grads.append((param, new_grad)) return ret_param_and_grads def apply_optimize(self, loss, startup_program, params_grads): new_params_grads = self.fp16_compression(params_grads) - return self.inner_opt.apply_optimize(loss, - startup_program=startup_program, - params_grads=new_params_grads) + return self.inner_opt.apply_optimize( + loss, startup_program=startup_program, params_grads=new_params_grads + ) diff --git a/python/paddle/distributed/fleet/meta_optimizers/gradient_merge_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/gradient_merge_optimizer.py index 10175f8936a70baad1c588e164e7d0e174b48b0b..eb97122587f364202102abca88b7bfee11d305ac 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/gradient_merge_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/gradient_merge_optimizer.py @@ -18,7 +18,6 @@ __all__ = [] class GradientMergeOptimizer(MetaOptimizerBase): - def __init__(self, optimizer): super(GradientMergeOptimizer, self).__init__(optimizer) self.inner_opt = optimizer @@ -32,26 +31,30 @@ class GradientMergeOptimizer(MetaOptimizerBase): ] self.meta_optimizers_black_list = [] - def _set_basic_info(self, loss, role_maker, user_defined_optimizer, - user_defined_strategy): - super(GradientMergeOptimizer, - self)._set_basic_info(loss, role_maker, user_defined_optimizer, - user_defined_strategy) + def _set_basic_info( + self, loss, role_maker, user_defined_optimizer, user_defined_strategy + ): + super(GradientMergeOptimizer, self)._set_basic_info( + loss, role_maker, user_defined_optimizer, user_defined_strategy + ) def _init_wrapped_opt(self): config = self.user_defined_strategy.gradient_merge_configs self.wrapped_opt = GM(self.inner_opt) self.wrapped_opt._set_k_steps( - self.user_defined_strategy.gradient_merge_configs["k_steps"]) + self.user_defined_strategy.gradient_merge_configs["k_steps"] + ) self.wrapped_opt._set_avg( - self.user_defined_strategy.gradient_merge_configs["avg"]) + self.user_defined_strategy.gradient_merge_configs["avg"] + ) def _can_apply(self): if not self.role_maker._is_collective: return False - can_apply = (self.user_defined_strategy.gradient_merge == True) and \ - self.user_defined_strategy.gradient_merge_configs["k_steps"] > 1 + can_apply = ( + self.user_defined_strategy.gradient_merge == True + ) and self.user_defined_strategy.gradient_merge_configs["k_steps"] > 1 return can_apply def _disable_strategy(self, dist_strategy): @@ -62,13 +65,11 @@ class GradientMergeOptimizer(MetaOptimizerBase): # we currently do not support auto-enable GradientMerge return - def minimize_impl(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None): + def minimize_impl( + self, loss, startup_program=None, parameter_list=None, no_grad_set=None + ): self._init_wrapped_opt() - optimize_ops, params_grads = \ - self.wrapped_opt.minimize(loss, startup_program, - parameter_list, no_grad_set) + optimize_ops, params_grads = self.wrapped_opt.minimize( + loss, startup_program, parameter_list, no_grad_set + ) return optimize_ops, params_grads diff --git a/python/paddle/distributed/fleet/meta_optimizers/graph_execution_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/graph_execution_optimizer.py index a5b0856a66ff4c0892abcc303b6d30af8c9bef8c..dd2ccfc7ff7d26e2a47bf24bd2de9899cff98f2f 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/graph_execution_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/graph_execution_optimizer.py @@ -24,7 +24,6 @@ __all__ = [] class GraphExecutionOptimizer(MetaOptimizerBase): - def __init__(self, optimizer): super(GraphExecutionOptimizer, self).__init__(optimizer) self.inner_opt = optimizer @@ -45,12 +44,14 @@ class GraphExecutionOptimizer(MetaOptimizerBase): return False return not self.user_defined_strategy.without_graph_optimization - def backward(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None, - callbacks=None): + def backward( + self, + loss, + startup_program=None, + parameter_list=None, + no_grad_set=None, + callbacks=None, + ): pass # should fix the variable @@ -69,75 +70,77 @@ class GraphExecutionOptimizer(MetaOptimizerBase): if trainer_id == 0 and not paddle.is_compiled_with_npu(): wait_server_ready(other_trainers) - if build_strategy.reduce_strategy == BuildStrategy.ReduceStrategy._NoReduce: + if ( + build_strategy.reduce_strategy + == BuildStrategy.ReduceStrategy._NoReduce + ): return if core.is_compiled_with_cuda(): comm_id_var = startup_program.global_block().create_var( - name="NCCLID", persistable=True, type=core.VarDesc.VarType.RAW) + name="NCCLID", persistable=True, type=core.VarDesc.VarType.RAW + ) for i in range(1, build_strategy.nccl_comm_num): startup_program.global_block().create_var( name="NCCLID_{}".format(i), persistable=True, - type=core.VarDesc.VarType.RAW) + type=core.VarDesc.VarType.RAW, + ) if build_strategy.use_hierarchical_allreduce: for i in range(0, build_strategy.nccl_comm_num): startup_program.global_block().create_var( name="Hierarchical_inter_NCCLID_{}".format(i), persistable=True, - type=core.VarDesc.VarType.RAW) + type=core.VarDesc.VarType.RAW, + ) startup_program.global_block().create_var( name="Hierarchical_exter_NCCLID_{}".format(i), persistable=True, - type=core.VarDesc.VarType.RAW) + type=core.VarDesc.VarType.RAW, + ) startup_program.global_block().append_op( type="gen_nccl_id", inputs={}, outputs={"NCCLID": comm_id_var}, attrs={ - "trainers": - trainer_endpoints, - "trainer_id": - trainer_id, - "nccl_comm_num": - build_strategy.nccl_comm_num, - "use_hierarchical_allreduce": - build_strategy.use_hierarchical_allreduce, - "hierarchical_allreduce_inter_ranks": - build_strategy.hierarchical_allreduce_inter_nranks - }) + "trainers": trainer_endpoints, + "trainer_id": trainer_id, + "nccl_comm_num": build_strategy.nccl_comm_num, + "use_hierarchical_allreduce": build_strategy.use_hierarchical_allreduce, + "hierarchical_allreduce_inter_ranks": build_strategy.hierarchical_allreduce_inter_nranks, + }, + ) elif core.is_compiled_with_xpu(): comm_id_var = startup_program.global_block().create_var( - name="BKCLID", persistable=True, type=core.VarDesc.VarType.RAW) + name="BKCLID", persistable=True, type=core.VarDesc.VarType.RAW + ) - #NOTE(liuyuhui) Baidu Kunlun Communication Library(BKCL) currently do not support multi machines. - assert build_strategy.bkcl_comm_num == 1, \ - "Baidu Kunlun Communication Library(BKCL) currently do not support multi machines." + # NOTE(liuyuhui) Baidu Kunlun Communication Library(BKCL) currently do not support multi machines. + assert ( + build_strategy.bkcl_comm_num == 1 + ), "Baidu Kunlun Communication Library(BKCL) currently do not support multi machines." for i in range(1, build_strategy.bkcl_comm_num): startup_program.global_block().create_var( name="BKCLID_{}".format(i), persistable=True, - type=core.VarDesc.VarType.RAW) + type=core.VarDesc.VarType.RAW, + ) startup_program.global_block().append_op( type="gen_bkcl_id", inputs={}, outputs={"BKCLID": comm_id_var}, attrs={ - "trainers": - trainer_endpoints, - "trainer_id": - trainer_id, - "nccl_comm_num": - build_strategy.nccl_comm_num, - "use_hierarchical_allreduce": - build_strategy.use_hierarchical_allreduce, - "hierarchical_allreduce_inter_ranks": - build_strategy.hierarchical_allreduce_inter_nranks - }) + "trainers": trainer_endpoints, + "trainer_id": trainer_id, + "nccl_comm_num": build_strategy.nccl_comm_num, + "use_hierarchical_allreduce": build_strategy.use_hierarchical_allreduce, + "hierarchical_allreduce_inter_ranks": build_strategy.hierarchical_allreduce_inter_nranks, + }, + ) else: raise ValueError( "comm_id must be generated in paddlepaddle-xpu or paddlepaddle-gpu." @@ -147,27 +150,32 @@ class GraphExecutionOptimizer(MetaOptimizerBase): dist_strategy = self.user_defined_strategy local_build_strategy = dist_strategy.build_strategy - local_build_strategy.use_hierarchical_allreduce = \ + local_build_strategy.use_hierarchical_allreduce = ( dist_strategy.use_hierarchical_allreduce - local_build_strategy.hierarchical_allreduce_inter_nranks = \ + ) + local_build_strategy.hierarchical_allreduce_inter_nranks = ( dist_strategy.hierarchical_allreduce_inter_nranks - local_build_strategy.sync_batch_norm = \ - dist_strategy.sync_batch_norm - local_build_strategy.fuse_all_reduce_ops = \ + ) + local_build_strategy.sync_batch_norm = dist_strategy.sync_batch_norm + local_build_strategy.fuse_all_reduce_ops = ( dist_strategy.fuse_all_reduce_ops - local_build_strategy.nccl_comm_num = \ - dist_strategy.nccl_comm_num + ) + local_build_strategy.nccl_comm_num = dist_strategy.nccl_comm_num - gradient_scale_configs = self.user_defined_strategy.gradient_scale_configs + gradient_scale_configs = ( + self.user_defined_strategy.gradient_scale_configs + ) scale_strategys = { 'avg': BuildStrategy.GradientScaleStrategy.CoeffNumDevice, 'sum': BuildStrategy.GradientScaleStrategy.One, 'customized': BuildStrategy.GradientScaleStrategy.Customized, } - assert gradient_scale_configs['scale_strategy'] in scale_strategys, \ - "gradient_scale_configs.scale_strategy must be 'avg', 'sum' or 'customized'" - local_build_strategy.gradient_scale_strategy = \ - scale_strategys[gradient_scale_configs['scale_strategy']] + assert ( + gradient_scale_configs['scale_strategy'] in scale_strategys + ), "gradient_scale_configs.scale_strategy must be 'avg', 'sum' or 'customized'" + local_build_strategy.gradient_scale_strategy = scale_strategys[ + gradient_scale_configs['scale_strategy'] + ] if self.user_defined_strategy.recompute == True: logging.warn( @@ -180,7 +188,9 @@ class GraphExecutionOptimizer(MetaOptimizerBase): node_num = self.role_maker._node_num() if self.role_maker._is_collective: - assert worker_num >= 1, "nccl2 worker_num must >= 1, now:{}" % worker_num + assert worker_num >= 1, ( + "nccl2 worker_num must >= 1, now:{}" % worker_num + ) if worker_num <= 1: # local mode @@ -198,8 +208,8 @@ class GraphExecutionOptimizer(MetaOptimizerBase): sync_allreduce = dist_strategy.sync_nccl_allreduce if sync_allreduce: exe_strategy.num_threads = max( - local_build_strategy.nccl_comm_num + 1, - exe_strategy.num_threads) + local_build_strategy.nccl_comm_num + 1, exe_strategy.num_threads + ) if local_build_strategy.nccl_comm_num > 1: logging.warn( "nccl_comm_num > 1, you may need to set sync_nccl_allreduce=False to ensure that different nccl comms can overlap" @@ -217,17 +227,23 @@ class GraphExecutionOptimizer(MetaOptimizerBase): # NOTE. compatible with compiler, otherwise these values will be overwritten by compiler main_program._nccl_comm_num = local_build_strategy.nccl_comm_num - main_program._use_hierarchical_allreduce = local_build_strategy.use_hierarchical_allreduce - main_program._hierarchical_allreduce_inter_nranks = local_build_strategy.hierarchical_allreduce_inter_nranks + main_program._use_hierarchical_allreduce = ( + local_build_strategy.use_hierarchical_allreduce + ) + main_program._hierarchical_allreduce_inter_nranks = ( + local_build_strategy.hierarchical_allreduce_inter_nranks + ) # TODO(guru4elephant): should be an independent optimizer if worker_num > 1: - self._setup_nccl_op(startup_program, main_program, - local_build_strategy) + self._setup_nccl_op( + startup_program, main_program, local_build_strategy + ) local_build_strategy.num_trainers = self.role_maker._worker_num() local_build_strategy.trainer_id = self.role_maker._worker_index() - local_build_strategy.trainers_endpoints = self.role_maker._get_trainer_endpoints( + local_build_strategy.trainers_endpoints = ( + self.role_maker._get_trainer_endpoints() ) local_build_strategy.enable_backward_optimizer_op_deps = True @@ -237,7 +253,8 @@ class GraphExecutionOptimizer(MetaOptimizerBase): loss_name=loss.name, build_strategy=local_build_strategy, exec_strategy=exe_strategy, - share_vars_from=None) + share_vars_from=None, + ) return self._compiled_program @@ -249,15 +266,14 @@ class GraphExecutionOptimizer(MetaOptimizerBase): # by default, graph execution strategy is enabled return - def minimize(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None): + def minimize( + self, loss, startup_program=None, parameter_list=None, no_grad_set=None + ): if startup_program == None: startup_program = paddle.static.default_startup_program() - compiled_program = self._try_to_compile(startup_program, - loss.block.program, loss) + compiled_program = self._try_to_compile( + startup_program, loss.block.program, loss + ) loss.block.program._graph = compiled_program # just return self.optimizer_ops and self.param_grads diff --git a/python/paddle/distributed/fleet/meta_optimizers/lamb_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/lamb_optimizer.py index 3dc5bed03aeac8fbf70419476e6844352e79a478..22f225469c2fcef12e707101959f5c5bca85bc8b 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/lamb_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/lamb_optimizer.py @@ -20,7 +20,6 @@ __all__ = [] class LambOptimizer(MetaOptimizerBase): - def __init__(self, optimizer): super(LambOptimizer, self).__init__(optimizer) self.inner_opt = optimizer @@ -29,11 +28,12 @@ class LambOptimizer(MetaOptimizerBase): self.meta_optimizers_white_list = ["GraphExecutionOptimizer"] self.meta_optimizers_black_list = [] - def _set_basic_info(self, loss, role_maker, user_defined_optimizer, - user_defined_strategy): - super(LambOptimizer, - self)._set_basic_info(loss, role_maker, user_defined_optimizer, - user_defined_strategy) + def _set_basic_info( + self, loss, role_maker, user_defined_optimizer, user_defined_strategy + ): + super(LambOptimizer, self)._set_basic_info( + loss, role_maker, user_defined_optimizer, user_defined_strategy + ) opt = self.inner_opt if not isinstance(opt, AdamOptimizer): @@ -63,7 +63,8 @@ class LambOptimizer(MetaOptimizerBase): regularization=opt.regularization, grad_clip=opt._grad_clip, exclude_from_weight_decay_fn=_exclude_from_weight_decay_fn, - name=opt._name) + name=opt._name, + ) def _can_apply(self): if not self.role_maker._is_collective: @@ -72,8 +73,10 @@ class LambOptimizer(MetaOptimizerBase): if self.user_defined_strategy.lamb: if not isinstance(self.inner_opt, AdamOptimizer): logging.warn( - "lamb need the inner optimizer to be AdamOptimizer optimizer but got {}." - .format(self.inner_opt.type)) + "lamb need the inner optimizer to be AdamOptimizer optimizer but got {}.".format( + self.inner_opt.type + ) + ) return False return True return False @@ -86,33 +89,34 @@ class LambOptimizer(MetaOptimizerBase): dist_strategy.lamb = True dist_strategy.lamb_configs = { "lamb_weight_decay": 0.01, - "exclude_from_weight_decay": [] + "exclude_from_weight_decay": [], } - def backward(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None, - callbacks=None): - return self.lamb_opt.backward(loss, startup_program, parameter_list, - no_grad_set, callbacks) + def backward( + self, + loss, + startup_program=None, + parameter_list=None, + no_grad_set=None, + callbacks=None, + ): + return self.lamb_opt.backward( + loss, startup_program, parameter_list, no_grad_set, callbacks + ) # the following function will be used by AMP if both LARS and AMP are turn on together. def apply_gradients(self, params_grads): return self.lamb_opt.apply_gradients(params_grads=params_grads) def apply_optimize(self, loss, startup_program, params_grads): - return self.lamb_opt.apply_optimize(loss, - startup_program=startup_program, - params_grads=params_grads) - - def minimize_impl(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None): - optimize_ops, params_grads = \ - self.lamb_opt.minimize(loss, startup_program, - parameter_list, no_grad_set) + return self.lamb_opt.apply_optimize( + loss, startup_program=startup_program, params_grads=params_grads + ) + + def minimize_impl( + self, loss, startup_program=None, parameter_list=None, no_grad_set=None + ): + optimize_ops, params_grads = self.lamb_opt.minimize( + loss, startup_program, parameter_list, no_grad_set + ) return optimize_ops, params_grads diff --git a/python/paddle/distributed/fleet/meta_optimizers/lars_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/lars_optimizer.py index 44f8fe473e2f982d04ace6b2cc22cc41b47c4e1a..e584783581e4afde0539fd8b46b577d5162d29c1 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/lars_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/lars_optimizer.py @@ -19,7 +19,6 @@ __all__ = [] class LarsOptimizer(MetaOptimizerBase): - def __init__(self, optimizer): super(LarsOptimizer, self).__init__(optimizer) self.inner_opt = optimizer @@ -28,11 +27,12 @@ class LarsOptimizer(MetaOptimizerBase): self.meta_optimizers_white_list = ["GraphExecutionOptimizer"] self.meta_optimizers_black_list = [] - def _set_basic_info(self, loss, role_maker, user_defined_optimizer, - user_defined_strategy): - super(LarsOptimizer, - self)._set_basic_info(loss, role_maker, user_defined_optimizer, - user_defined_strategy) + def _set_basic_info( + self, loss, role_maker, user_defined_optimizer, user_defined_strategy + ): + super(LarsOptimizer, self)._set_basic_info( + loss, role_maker, user_defined_optimizer, user_defined_strategy + ) opt = self.inner_opt if not isinstance(opt, Momentum): @@ -50,7 +50,8 @@ class LarsOptimizer(MetaOptimizerBase): grad_clip=opt._grad_clip, name=opt._name, exclude_from_weight_decay=configs['exclude_from_weight_decay'], - epsilon=configs['epsilon']) + epsilon=configs['epsilon'], + ) def _can_apply(self): if not self.role_maker._is_collective: @@ -59,8 +60,10 @@ class LarsOptimizer(MetaOptimizerBase): if self.user_defined_strategy.lars: if not isinstance(self.inner_opt, Momentum): logging.warn( - "lars need the inner optimizer to be Momentum optimizer but got {}." - .format(self.inner_opt.type)) + "lars need the inner optimizer to be Momentum optimizer but got {}.".format( + self.inner_opt.type + ) + ) return False return True return False @@ -76,30 +79,31 @@ class LarsOptimizer(MetaOptimizerBase): "lars_weight_decay": 0.0005, } - def backward(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None, - callbacks=None): - return self.lars_opt.backward(loss, startup_program, parameter_list, - no_grad_set, callbacks) + def backward( + self, + loss, + startup_program=None, + parameter_list=None, + no_grad_set=None, + callbacks=None, + ): + return self.lars_opt.backward( + loss, startup_program, parameter_list, no_grad_set, callbacks + ) # the following function will be used by AMP if both LARS and AMP are turn on together. def apply_gradients(self, params_grads): return self.lars_opt.apply_gradients(params_grads=params_grads) def apply_optimize(self, loss, startup_program, params_grads): - return self.lars_opt.apply_optimize(loss, - startup_program=startup_program, - params_grads=params_grads) - - def minimize_impl(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None): - optimize_ops, params_grads = \ - self.lars_opt.minimize(loss, startup_program, - parameter_list, no_grad_set) + return self.lars_opt.apply_optimize( + loss, startup_program=startup_program, params_grads=params_grads + ) + + def minimize_impl( + self, loss, startup_program=None, parameter_list=None, no_grad_set=None + ): + optimize_ops, params_grads = self.lars_opt.minimize( + loss, startup_program, parameter_list, no_grad_set + ) return optimize_ops, params_grads diff --git a/python/paddle/distributed/fleet/meta_optimizers/localsgd_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/localsgd_optimizer.py index 3431ad7e9145c757beadee292483576089bbd9ea..aba540fe7e783ff8b739a4667c8d896f2b8459a5 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/localsgd_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/localsgd_optimizer.py @@ -22,7 +22,6 @@ __all__ = [] class LocalSGDOptimizer(MetaOptimizerBase): - def __init__(self, optimizer): super(LocalSGDOptimizer, self).__init__(optimizer) self.inner_opt = optimizer @@ -43,10 +42,12 @@ class LocalSGDOptimizer(MetaOptimizerBase): if self.role_maker._worker_num() <= 1: return False - return isinstance(self.inner_opt, paddle.optimizer.momentum.Momentum) \ - or isinstance(self.inner_opt, paddle.fluid.optimizer.Momentum) \ - or isinstance(self.inner_opt, paddle.optimizer.sgd.SGD) \ + return ( + isinstance(self.inner_opt, paddle.optimizer.momentum.Momentum) + or isinstance(self.inner_opt, paddle.fluid.optimizer.Momentum) + or isinstance(self.inner_opt, paddle.optimizer.sgd.SGD) or isinstance(self.inner_opt, paddle.fluid.optimizer.SGD) + ) def _disable_strategy(self, dist_strategy): dist_strategy.localsgd = False @@ -69,11 +70,13 @@ class LocalSGDOptimizer(MetaOptimizerBase): p2s = [] for param in non_dist_params: - snapshot = block.create_var(name=self.snapshot_name(param.name), - shape=param.shape, - persistable=True, - stop_gradient=True, - dtype=param.dtype) + snapshot = block.create_var( + name=self.snapshot_name(param.name), + shape=param.shape, + persistable=True, + stop_gradient=True, + dtype=param.dtype, + ) p2s.append([param, snapshot]) return p2s @@ -82,17 +85,17 @@ class LocalSGDOptimizer(MetaOptimizerBase): for param, snapshot in param2snapshot: layers.assign(param, snapshot) - def minimize_impl(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None): - minimized = self.inner_opt.minimize(loss, - startup_program=startup_program) + def minimize_impl( + self, loss, startup_program=None, parameter_list=None, no_grad_set=None + ): + minimized = self.inner_opt.minimize( + loss, startup_program=startup_program + ) k_steps_value = self.user_defined_strategy.localsgd_configs['k_steps'] begin_step_value = self.user_defined_strategy.localsgd_configs[ - 'begin_step'] + 'begin_step' + ] if startup_program is None: startup_program = default_startup_program() @@ -107,78 +110,90 @@ class LocalSGDOptimizer(MetaOptimizerBase): p2s = self.create_snapshot_vars(main_block.program) with program_guard(main_block.program, startup_program): step = layers.autoincreased_step_counter(begin=1) - k_steps = layers.create_global_var(name="k_steps", - shape=[1], - value=k_steps_value, - dtype='int64', - persistable=True) - - begin_step = layers.create_global_var(name="begin_step", - shape=[1], - value=begin_step_value, - dtype='int64', - persistable=True) - - last_step = layers.create_global_var(name="last_step", - shape=[1], - value=begin_step_value, - dtype='int64', - persistable=True) + k_steps = layers.create_global_var( + name="k_steps", + shape=[1], + value=k_steps_value, + dtype='int64', + persistable=True, + ) + + begin_step = layers.create_global_var( + name="begin_step", + shape=[1], + value=begin_step_value, + dtype='int64', + persistable=True, + ) + + last_step = layers.create_global_var( + name="last_step", + shape=[1], + value=begin_step_value, + dtype='int64', + persistable=True, + ) def communicate(): sub_block = default_main_program().current_block() ring_id = -1 for param, snapshot in p2s: - sub_block.append_op(type='elementwise_sub', - inputs={ - 'X': [snapshot], - 'Y': [param] - }, - outputs={'Out': [param]}, - attrs={OP_ROLE_KEY: OpRole.Optimize}) - sub_block.append_op(type='c_sync_calc_stream', - inputs={'X': param}, - outputs={'Out': param}, - attrs={OP_ROLE_KEY: OpRole.Optimize}) + sub_block.append_op( + type='elementwise_sub', + inputs={'X': [snapshot], 'Y': [param]}, + outputs={'Out': [param]}, + attrs={OP_ROLE_KEY: OpRole.Optimize}, + ) + sub_block.append_op( + type='c_sync_calc_stream', + inputs={'X': param}, + outputs={'Out': param}, + attrs={OP_ROLE_KEY: OpRole.Optimize}, + ) ring_id = (ring_id + 1) % self.nrings - sub_block.append_op(type='c_allreduce_sum', - inputs={'X': [param]}, - outputs={'Out': [param]}, - attrs={ - 'ring_id': ring_id, - OP_ROLE_KEY: OpRole.Optimize - }) + sub_block.append_op( + type='c_allreduce_sum', + inputs={'X': [param]}, + outputs={'Out': [param]}, + attrs={ + 'ring_id': ring_id, + OP_ROLE_KEY: OpRole.Optimize, + }, + ) for ring_id in range(self.nrings): - sub_block.append_op(type='c_sync_comm_stream', - inputs={'X': param}, - outputs={'Out': param}, - attrs={ - 'ring_id': ring_id, - OP_ROLE_KEY: OpRole.Optimize - }) + sub_block.append_op( + type='c_sync_comm_stream', + inputs={'X': param}, + outputs={'Out': param}, + attrs={ + 'ring_id': ring_id, + OP_ROLE_KEY: OpRole.Optimize, + }, + ) for param, snapshot in p2s: - sub_block.append_op(type='scale', - inputs={'X': [param]}, - outputs={'Out': [param]}, - attrs={ - 'scale': - 1.0 / self.role_maker._worker_num(), - OP_ROLE_KEY: - OpRole.Optimize - }) - sub_block.append_op(type='elementwise_sub', - inputs={ - 'X': [snapshot], - 'Y': [param] - }, - outputs={'Out': [param]}, - attrs={OP_ROLE_KEY: OpRole.Optimize}) - sub_block.append_op(type='assign', - inputs={'X': [param]}, - outputs={'Out': [snapshot]}, - attrs={OP_ROLE_KEY: OpRole.Optimize}) + sub_block.append_op( + type='scale', + inputs={'X': [param]}, + outputs={'Out': [param]}, + attrs={ + 'scale': 1.0 / self.role_maker._worker_num(), + OP_ROLE_KEY: OpRole.Optimize, + }, + ) + sub_block.append_op( + type='elementwise_sub', + inputs={'X': [snapshot], 'Y': [param]}, + outputs={'Out': [param]}, + attrs={OP_ROLE_KEY: OpRole.Optimize}, + ) + sub_block.append_op( + type='assign', + inputs={'X': [param]}, + outputs={'Out': [snapshot]}, + attrs={OP_ROLE_KEY: OpRole.Optimize}, + ) layers.assign(step, last_step) def begin_localsgd(): @@ -189,13 +204,13 @@ class LocalSGDOptimizer(MetaOptimizerBase): class AdaptiveLocalSGDOptimizer(MetaOptimizerBase): - def __init__(self, optimizer): super(AdaptiveLocalSGDOptimizer, self).__init__(optimizer) self.inner_opt = optimizer self.meta_optimizers_white_list = ['AMPOptimizer'] self.meta_optimizers_black_list = [ - "GraphExecutionOptimizer", "LocalSGDOptimizer" + "GraphExecutionOptimizer", + "LocalSGDOptimizer", ] self.snapshot_key = '@SNAPSHOT' @@ -209,10 +224,12 @@ class AdaptiveLocalSGDOptimizer(MetaOptimizerBase): if self.role_maker._worker_num() <= 1: return False - return isinstance(self.inner_opt, paddle.optimizer.momentum.Momentum) \ - or isinstance(self.inner_opt, paddle.fluid.optimizer.Momentum) \ - or isinstance(self.inner_opt, paddle.optimizer.sgd.SGD) \ + return ( + isinstance(self.inner_opt, paddle.optimizer.momentum.Momentum) + or isinstance(self.inner_opt, paddle.fluid.optimizer.Momentum) + or isinstance(self.inner_opt, paddle.optimizer.sgd.SGD) or isinstance(self.inner_opt, paddle.fluid.optimizer.SGD) + ) def _disable_strategy(self, dist_strategy): dist_strategy.adaptive_localsgd = False @@ -222,7 +239,7 @@ class AdaptiveLocalSGDOptimizer(MetaOptimizerBase): dist_strategy.adaptive_localsgd = True dist_strategy.adaptive_localsgd_configs = { "init_k_steps": 1, - "begin_step": 1 + "begin_step": 1, } def snapshot_name(self, param_name): @@ -238,11 +255,13 @@ class AdaptiveLocalSGDOptimizer(MetaOptimizerBase): p2s = [] for param in non_dist_params: - snapshot = block.create_var(name=self.snapshot_name(param.name), - shape=param.shape, - persistable=True, - stop_gradient=True, - dtype=param.dtype) + snapshot = block.create_var( + name=self.snapshot_name(param.name), + shape=param.shape, + persistable=True, + stop_gradient=True, + dtype=param.dtype, + ) p2s.append([param, snapshot]) return p2s @@ -252,40 +271,46 @@ class AdaptiveLocalSGDOptimizer(MetaOptimizerBase): layers.assign(param, snapshot) def _generate_avg_loss(self, program_block, loss, avg_loss): - program_block.append_op(type='c_allreduce_sum', - inputs={'X': [loss]}, - outputs={'Out': [avg_loss]}, - attrs={ - 'ring_id': 0, - OP_ROLE_KEY: OpRole.Optimize, - 'use_calc_stream': True - }) - program_block.append_op(type='c_sync_calc_stream', - inputs={'X': [avg_loss]}, - outputs={'Out': [avg_loss]}, - attrs={OP_ROLE_KEY: OpRole.Optimize}) - - program_block.append_op(type='scale', - inputs={'X': [avg_loss]}, - outputs={'Out': [avg_loss]}, - attrs={ - 'scale': - 1.0 / self.role_maker._worker_num(), - OP_ROLE_KEY: OpRole.Optimize - }) - - def minimize_impl(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None): - minimized = self.inner_opt.minimize(loss, - startup_program=startup_program) + program_block.append_op( + type='c_allreduce_sum', + inputs={'X': [loss]}, + outputs={'Out': [avg_loss]}, + attrs={ + 'ring_id': 0, + OP_ROLE_KEY: OpRole.Optimize, + 'use_calc_stream': True, + }, + ) + program_block.append_op( + type='c_sync_calc_stream', + inputs={'X': [avg_loss]}, + outputs={'Out': [avg_loss]}, + attrs={OP_ROLE_KEY: OpRole.Optimize}, + ) + + program_block.append_op( + type='scale', + inputs={'X': [avg_loss]}, + outputs={'Out': [avg_loss]}, + attrs={ + 'scale': 1.0 / self.role_maker._worker_num(), + OP_ROLE_KEY: OpRole.Optimize, + }, + ) + + def minimize_impl( + self, loss, startup_program=None, parameter_list=None, no_grad_set=None + ): + minimized = self.inner_opt.minimize( + loss, startup_program=startup_program + ) init_k_steps = self.user_defined_strategy.adaptive_localsgd_configs[ - 'init_k_steps'] + 'init_k_steps' + ] begin_step_value = self.user_defined_strategy.adaptive_localsgd_configs[ - 'begin_step'] + 'begin_step' + ] if startup_program is None: startup_program = default_startup_program() @@ -301,41 +326,53 @@ class AdaptiveLocalSGDOptimizer(MetaOptimizerBase): with program_guard(main_block.program, startup_program): step = layers.autoincreased_step_counter(begin=1) - k_steps = layers.create_global_var(name="k_steps", - shape=[1], - value=int(init_k_steps), - dtype='int64', - persistable=True) - - begin_step = layers.create_global_var(name="begin_step", - shape=[1], - value=int(begin_step_value), - dtype='int64', - persistable=True) - - last_step = layers.create_global_var(name="last_step", - shape=[1], - value=int(0), - dtype='int64', - persistable=True) - - avg_loss = layers.create_global_var(name="avg_loss", - shape=[1], - value=float(0), - dtype=loss.dtype, - persistable=True) - - lr_0 = layers.create_global_var(name="lr_0", - shape=[1], - value=float(0), - dtype='float32', - persistable=True) - - loss_0 = layers.create_global_var(name="loss_0", - shape=[1], - value=float(0), - dtype='float32', - persistable=True) + k_steps = layers.create_global_var( + name="k_steps", + shape=[1], + value=int(init_k_steps), + dtype='int64', + persistable=True, + ) + + begin_step = layers.create_global_var( + name="begin_step", + shape=[1], + value=int(begin_step_value), + dtype='int64', + persistable=True, + ) + + last_step = layers.create_global_var( + name="last_step", + shape=[1], + value=int(0), + dtype='int64', + persistable=True, + ) + + avg_loss = layers.create_global_var( + name="avg_loss", + shape=[1], + value=float(0), + dtype=loss.dtype, + persistable=True, + ) + + lr_0 = layers.create_global_var( + name="lr_0", + shape=[1], + value=float(0), + dtype='float32', + persistable=True, + ) + + loss_0 = layers.create_global_var( + name="loss_0", + shape=[1], + value=float(0), + dtype='float32', + persistable=True, + ) global_lr = self.inner_opt._global_learning_rate() @@ -350,75 +387,90 @@ class AdaptiveLocalSGDOptimizer(MetaOptimizerBase): sub_block = default_main_program().current_block() ring_id = -1 for param, snapshot in p2s: - sub_block.append_op(type='elementwise_sub', - inputs={ - 'X': [snapshot], - 'Y': [param] - }, - outputs={'Out': [param]}, - attrs={OP_ROLE_KEY: OpRole.Optimize}) - sub_block.append_op(type='c_sync_calc_stream', - inputs={'X': param}, - outputs={'Out': param}, - attrs={OP_ROLE_KEY: OpRole.Optimize}) + sub_block.append_op( + type='elementwise_sub', + inputs={'X': [snapshot], 'Y': [param]}, + outputs={'Out': [param]}, + attrs={OP_ROLE_KEY: OpRole.Optimize}, + ) + sub_block.append_op( + type='c_sync_calc_stream', + inputs={'X': param}, + outputs={'Out': param}, + attrs={OP_ROLE_KEY: OpRole.Optimize}, + ) ring_id = (ring_id + 1) % self.nrings - sub_block.append_op(type='c_allreduce_sum', - inputs={'X': [param]}, - outputs={'Out': [param]}, - attrs={ - 'ring_id': ring_id, - OP_ROLE_KEY: OpRole.Optimize - }) + sub_block.append_op( + type='c_allreduce_sum', + inputs={'X': [param]}, + outputs={'Out': [param]}, + attrs={ + 'ring_id': ring_id, + OP_ROLE_KEY: OpRole.Optimize, + }, + ) for ring_id in range(self.nrings): - sub_block.append_op(type='c_sync_comm_stream', - inputs={'X': param}, - outputs={'Out': param}, - attrs={ - 'ring_id': ring_id, - OP_ROLE_KEY: OpRole.Optimize - }) + sub_block.append_op( + type='c_sync_comm_stream', + inputs={'X': param}, + outputs={'Out': param}, + attrs={ + 'ring_id': ring_id, + OP_ROLE_KEY: OpRole.Optimize, + }, + ) for param, snapshot in p2s: - sub_block.append_op(type='scale', - inputs={'X': [param]}, - outputs={'Out': [param]}, - attrs={ - 'scale': - 1.0 / self.role_maker._worker_num(), - OP_ROLE_KEY: - OpRole.Optimize - }) - sub_block.append_op(type='elementwise_sub', - inputs={ - 'X': [snapshot], - 'Y': [param] - }, - outputs={'Out': [param]}, - attrs={OP_ROLE_KEY: OpRole.Optimize}) - sub_block.append_op(type='assign', - inputs={'X': [param]}, - outputs={'Out': [snapshot]}, - attrs={OP_ROLE_KEY: OpRole.Optimize}) + sub_block.append_op( + type='scale', + inputs={'X': [param]}, + outputs={'Out': [param]}, + attrs={ + 'scale': 1.0 / self.role_maker._worker_num(), + OP_ROLE_KEY: OpRole.Optimize, + }, + ) + sub_block.append_op( + type='elementwise_sub', + inputs={'X': [snapshot], 'Y': [param]}, + outputs={'Out': [param]}, + attrs={OP_ROLE_KEY: OpRole.Optimize}, + ) + sub_block.append_op( + type='assign', + inputs={'X': [param]}, + outputs={'Out': [snapshot]}, + attrs={OP_ROLE_KEY: OpRole.Optimize}, + ) layers.assign(step, last_step) def communicate_avg_loss(): communicate() self._generate_avg_loss(main_block, loss, avg_loss) - next_local_steps = layers.cast(layers.ceil( - layers.sqrt(lr_0 * avg_loss / (global_lr * loss_0) * - float(init_k_steps))), - dtype='int64') - max_local_steps = layers.fill_constant(shape=[1], - dtype='int64', - value=16) - min_local_steps = layers.fill_constant(shape=[1], - dtype='int64', - value=1) + next_local_steps = layers.cast( + layers.ceil( + layers.sqrt( + lr_0 + * avg_loss + / (global_lr * loss_0) + * float(init_k_steps) + ) + ), + dtype='int64', + ) + max_local_steps = layers.fill_constant( + shape=[1], dtype='int64', value=16 + ) + min_local_steps = layers.fill_constant( + shape=[1], dtype='int64', value=1 + ) next_local_steps = layers.elementwise_min( - next_local_steps, max_local_steps) + next_local_steps, max_local_steps + ) next_local_steps = layers.elementwise_max( - next_local_steps, min_local_steps) + next_local_steps, min_local_steps + ) layers.assign(next_local_steps, k_steps) def begin_localsgd(): diff --git a/python/paddle/distributed/fleet/meta_optimizers/meta_optimizer_base.py b/python/paddle/distributed/fleet/meta_optimizers/meta_optimizer_base.py index 35e11221b6f638b3e2d78ec79d7b9f71477534f6..87085a322c30370f7e67868e07423d09980d7de8 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/meta_optimizer_base.py +++ b/python/paddle/distributed/fleet/meta_optimizers/meta_optimizer_base.py @@ -18,7 +18,6 @@ __all__ = [] class MetaOptimizerBase(Optimizer): - def __init__(self, optimizer): self.inner_opt = optimizer self._learning_rate = self.inner_opt._learning_rate @@ -26,8 +25,9 @@ class MetaOptimizerBase(Optimizer): self.meta_optimizers_white_list = [] self.meta_optimizers_black_list = [] - def _set_basic_info(self, loss, role_maker, user_defined_optimizer, - user_defined_strategy): + def _set_basic_info( + self, loss, role_maker, user_defined_optimizer, user_defined_strategy + ): self.loss = loss self.role_maker = role_maker self.user_defined_optimizer = user_defined_optimizer @@ -50,52 +50,57 @@ class MetaOptimizerBase(Optimizer): def _disable_strategy(self, dist_strategy): raise NotImplementedError( "you should implement disable strategy in {}".format( - type(self).__name__)) + type(self).__name__ + ) + ) def _enable_strategy(self, dist_strategy, context=None): raise NotImplementedError( "you should implement enable strategy in {}".format( - type(self).__name__)) + type(self).__name__ + ) + ) def apply_gradients(self, params_grads): return self.inner_opt.apply_gradients(params_grads=params_grads) - def backward(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None, - callbacks=None): - return self.inner_opt.backward(loss, startup_program, parameter_list, - no_grad_set, callbacks) + def backward( + self, + loss, + startup_program=None, + parameter_list=None, + no_grad_set=None, + callbacks=None, + ): + return self.inner_opt.backward( + loss, startup_program, parameter_list, no_grad_set, callbacks + ) def apply_optimize(self, loss, startup_program, params_grads): - return self.inner_opt.apply_optimize(loss, - startup_program=startup_program, - params_grads=params_grads) - - def minimize_impl(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None): - params_grads = self.backward(loss, - startup_program=startup_program, - parameter_list=parameter_list, - no_grad_set=no_grad_set) - - optimize_ops = self.apply_optimize(loss, - startup_program=startup_program, - params_grads=params_grads) + return self.inner_opt.apply_optimize( + loss, startup_program=startup_program, params_grads=params_grads + ) + + def minimize_impl( + self, loss, startup_program=None, parameter_list=None, no_grad_set=None + ): + params_grads = self.backward( + loss, + startup_program=startup_program, + parameter_list=parameter_list, + no_grad_set=no_grad_set, + ) + + optimize_ops = self.apply_optimize( + loss, startup_program=startup_program, params_grads=params_grads + ) return optimize_ops, params_grads - def minimize(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None): - optimize_ops, params_grads = self.minimize_impl(loss, startup_program, - parameter_list, - no_grad_set) + def minimize( + self, loss, startup_program=None, parameter_list=None, no_grad_set=None + ): + optimize_ops, params_grads = self.minimize_impl( + loss, startup_program, parameter_list, no_grad_set + ) return optimize_ops, params_grads diff --git a/python/paddle/distributed/fleet/meta_optimizers/parameter_server_graph_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/parameter_server_graph_optimizer.py index 42784113822c39aa2ebaabff373844bb5c603f1a..f94215aad09381e0d69d06688ed306506946585d 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/parameter_server_graph_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/parameter_server_graph_optimizer.py @@ -18,7 +18,6 @@ __all__ = [] class ParameterServerGraphOptimizer(ParameterServerOptimizer): - def __init__(self, optimizer): super(ParameterServerGraphOptimizer, self).__init__(optimizer) self.inner_opt = optimizer @@ -59,18 +58,18 @@ class ParameterServerGraphOptimizer(ParameterServerOptimizer): self._compiled_program = compiler.CompiledProgram(main_program) - self._compiled_program.with_data_parallel(loss_name=loss.name, - build_strategy=build_strategy, - exec_strategy=exec_strategy, - share_vars_from=None) + self._compiled_program.with_data_parallel( + loss_name=loss.name, + build_strategy=build_strategy, + exec_strategy=exec_strategy, + share_vars_from=None, + ) return self._compiled_program - def minimize(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None): + def minimize( + self, loss, startup_program=None, parameter_list=None, no_grad_set=None + ): program = loss.block.program compiled_program = self._try_to_compile(program, loss) program._graph = compiled_program diff --git a/python/paddle/distributed/fleet/meta_optimizers/parameter_server_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/parameter_server_optimizer.py index c04215d45656c98444f7e8d4a3f2a01ca6a4e169..d18c5d1fdcb7ac0fa161e96ad8934f186a27fae6 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/parameter_server_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/parameter_server_optimizer.py @@ -24,23 +24,24 @@ __all__ = [] class ParameterServerOptimizer(MetaOptimizerBase): - def __init__(self, optimizer): super(ParameterServerOptimizer, self).__init__(optimizer) self.inner_opt = optimizer # we do not allow meta optimizer to be inner optimizer currently self.meta_optimizers_white_list = [] - def _set_basic_info(self, loss, role_maker, user_defined_optimizer, - user_defined_strategy): - super(ParameterServerOptimizer, - self)._set_basic_info(loss, role_maker, user_defined_optimizer, - user_defined_strategy) + def _set_basic_info( + self, loss, role_maker, user_defined_optimizer, user_defined_strategy + ): + super(ParameterServerOptimizer, self)._set_basic_info( + loss, role_maker, user_defined_optimizer, user_defined_strategy + ) - #self.micro_batch_size = user_defined_strategy.pipeline_configs[ + # self.micro_batch_size = user_defined_strategy.pipeline_configs[ # 'micro_batch_size'] self.num_microbatches = user_defined_strategy.pipeline_configs[ - 'accumulate_steps'] + 'accumulate_steps' + ] def _is_graph_out(self): return False @@ -66,11 +67,13 @@ class ParameterServerOptimizer(MetaOptimizerBase): 'trainer_id': trainer_id, 'num_trainers': num_trainers, 'current_endpoint': current_endpoint, - 'trainer_endpoints': trainer_endpoints + 'trainer_endpoints': trainer_endpoints, } def _get_distributed_strategy(self): - from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import StrategyFactory + from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import ( + StrategyFactory, + ) k_steps = self.user_defined_strategy.a_sync_configs["k_steps"] strategy = None @@ -90,7 +93,9 @@ class ParameterServerOptimizer(MetaOptimizerBase): return strategy def _build_trainer_programs(self, compiled_config): - from paddle.fluid.incubate.fleet.parameter_server.ir import trainer_pass as worker + from paddle.fluid.incubate.fleet.parameter_server.ir import ( + trainer_pass as worker, + ) _main = compiled_config.origin_main_program.clone() _startup = compiled_config.origin_startup_program.clone() @@ -98,49 +103,66 @@ class ParameterServerOptimizer(MetaOptimizerBase): use_ps_gpu = self.user_defined_strategy.a_sync_configs["use_ps_gpu"] if not compiled_config.is_geo_mode(): - from paddle.fluid.incubate.fleet.parameter_server.ir.public import _add_lr_decay_table_pass + from paddle.fluid.incubate.fleet.parameter_server.ir.public import ( + _add_lr_decay_table_pass, + ) + _add_lr_decay_table_pass( - _main, compiled_config, - self.user_defined_strategy.a_sync_configs["lr_decay_steps"]) + _main, + compiled_config, + self.user_defined_strategy.a_sync_configs["lr_decay_steps"], + ) # for main program - _main = worker.distributed_ops_pass(_main, compiled_config, - use_ps_gpu) + _main = worker.distributed_ops_pass( + _main, compiled_config, use_ps_gpu + ) if not use_ps_gpu: _main = worker.delete_optimizer_pass(_main, compiled_config) _main = worker.append_send_ops_pass(_main, compiled_config) _startup = worker.delete_extra_optimizes_pass( - _startup, compiled_config) + _startup, compiled_config + ) # for startup program _startup = worker.fake_init_ops_pass(_startup, compiled_config) if use_ps_gpu: _main = worker.ps_gpu_pass(_main) - from paddle.fluid.transpiler.collective import SingleProcessMultiThread + from paddle.fluid.transpiler.collective import ( + SingleProcessMultiThread, + ) + t = SingleProcessMultiThread() env = self.get_dist_env() - t.transpile(startup_program=_startup, - main_program=_main, - rank=env["trainer_id"], - endpoints=env["trainer_endpoints"], - current_endpoint=env['current_endpoint'], - wait_port=False) + t.transpile( + startup_program=_startup, + main_program=_main, + rank=env["trainer_id"], + endpoints=env["trainer_endpoints"], + current_endpoint=env['current_endpoint'], + wait_port=False, + ) compiled_config.set_origin_ps_main_program(_main) compiled_config.set_origin_ps_startup_program(_startup) # for heter program if self.role_maker._is_heter_parameter_server_mode: - from paddle.fluid.incubate.fleet.parameter_server.ir import heter_trainer_pass as heter_worker + from paddle.fluid.incubate.fleet.parameter_server.ir import ( + heter_trainer_pass as heter_worker, + ) + if self.role_maker._is_heter_worker(): # for heter worker stage_id = self.role_maker._get_stage_id() device = self.role_maker._heter_device_type().lower() _main = heter_worker.split_heter_worker_ops_pass( - _main, compiled_config, stage_id, device) + _main, compiled_config, stage_id, device + ) else: # for default worker _main = heter_worker.split_trainer_ops_pass( - _main, compiled_config) + _main, compiled_config + ) else: _main = worker.append_send_ops_pass(_main, compiled_config) _startup = _startup @@ -148,7 +170,8 @@ class ParameterServerOptimizer(MetaOptimizerBase): compiled_config.set_origin_ps_startup_program(_startup) launch_barrier = self.user_defined_strategy.a_sync_configs[ - "launch_barrier"] + "launch_barrier" + ] launch_barrier_flag = int(os.getenv("FLAGS_LAUNCH_BARRIER", "1")) if launch_barrier and launch_barrier_flag: # for trainer wait server ready @@ -165,11 +188,16 @@ class ParameterServerOptimizer(MetaOptimizerBase): _main = fluid.Program() _startup = fluid.Program() - from paddle.fluid.incubate.fleet.parameter_server.ir import pserver_pass as server + from paddle.fluid.incubate.fleet.parameter_server.ir import ( + pserver_pass as server, + ) if not compiled_config.is_geo_mode(): - from paddle.fluid.incubate.fleet.parameter_server.ir.public import _get_optimize_ops + from paddle.fluid.incubate.fleet.parameter_server.ir.public import ( + _get_optimize_ops, + ) + is_sgd_adam = False main_program = compiled_config.get_origin_main_program() @@ -178,11 +206,16 @@ class ParameterServerOptimizer(MetaOptimizerBase): if len(ops) == 0: return _main, _startup - from paddle.fluid.incubate.fleet.parameter_server.ir.public import _add_lr_decay_table_pass + from paddle.fluid.incubate.fleet.parameter_server.ir.public import ( + _add_lr_decay_table_pass, + ) + lr_decay_steps = self.user_defined_strategy.a_sync_configs[ - "lr_decay_steps"] - _add_lr_decay_table_pass(main_program, compiled_config, - lr_decay_steps) + "lr_decay_steps" + ] + _add_lr_decay_table_pass( + main_program, compiled_config, lr_decay_steps + ) for op in ops: if op.type in ["sgd", "adam"]: @@ -195,37 +228,44 @@ class ParameterServerOptimizer(MetaOptimizerBase): _main = server.add_listen_and_serv_pass(_main, compiled_config) _main = server.add_rpc_global_flags_pass(_main, compiled_config) _main = server.add_optimizer_pass(_main, compiled_config) - _main = server.large_scale_sparse_pass(_main, _main, - compiled_config, False) + _main = server.large_scale_sparse_pass( + _main, _main, compiled_config, False + ) _startup = server.build_pserver_startup_program_pass( - _startup, _main, compiled_config) - _startup = server.large_scale_sparse_pass(_startup, _main, - compiled_config, True) + _startup, _main, compiled_config + ) + _startup = server.large_scale_sparse_pass( + _startup, _main, compiled_config, True + ) if not compiled_config.is_sync_mode(): _main = server.delete_unused_in_main_pass( - _main, compiled_config) + _main, compiled_config + ) _startup = server.delete_unused_in_startup_pass( - _startup, _main, compiled_config) + _startup, _main, compiled_config + ) else: _main = server.add_listen_and_serv_pass(_main, compiled_config) _main = server.add_rpc_global_flags_pass(_main, compiled_config) _main = server.add_geo_optimizer_pass(_main, compiled_config) _startup = server.build_pserver_startup_program_pass( - _startup, _main, compiled_config) + _startup, _main, compiled_config + ) _startup = server.delete_unused_in_startup_pass( - _startup, _main, compiled_config) + _startup, _main, compiled_config + ) return _main, _startup def _can_apply_geo(self, dist_strategy, program): - def get_sys_free_mem(): plat = platform.system() if platform.system() == "Darwin": - vm = subprocess.Popen(['vm_stat'], - stdout=subprocess.PIPE).communicate()[0] + vm = subprocess.Popen( + ['vm_stat'], stdout=subprocess.PIPE + ).communicate()[0] # Process vm_stat vmLines = vm.split('\n') sep = re.compile(r':[\s]+') @@ -233,8 +273,9 @@ class ParameterServerOptimizer(MetaOptimizerBase): for row in range(1, len(vmLines) - 2): rowText = vmLines[row].strip() rowElements = sep.split(rowText) - vmStats[(rowElements[0])] = int( - rowElements[1].strip(r'\.')) * 4096 + vmStats[(rowElements[0])] = ( + int(rowElements[1].strip(r'\.')) * 4096 + ) return vmStats["Pages free"] elif platform.system() == "Linux": mems = {} @@ -246,22 +287,27 @@ class ParameterServerOptimizer(MetaOptimizerBase): return free else: raise ValueError( - "%s platform is unsupported is parameter server optimizer" % - (platform.system())) + "%s platform is unsupported is parameter server optimizer" + % (platform.system()) + ) if not isinstance(self.inner_opt, fluid.optimizer.SGDOptimizer): return False free = get_sys_free_mem() - from paddle.fluid.incubate.fleet.parameter_server.ir import vars_metatools + from paddle.fluid.incubate.fleet.parameter_server.ir import ( + vars_metatools, + ) processed_var_names = set(["@EMPTY@"]) param_memory_size = 0 for varname in program.global_block().vars: var = program.global_block().vars[varname] - if not var.persistable or var.desc.type( - ) != core.VarDesc.VarType.LOD_TENSOR: + if ( + not var.persistable + or var.desc.type() != core.VarDesc.VarType.LOD_TENSOR + ): continue param = vars_metatools.create_var_struct(var) param_memory_size += param.m_size @@ -287,15 +333,18 @@ class ParameterServerOptimizer(MetaOptimizerBase): if x < 0: if neg_dim_count >= 1: raise ValueError( - "Var %s has more than one negative dim." % - (var_name)) + "Var %s has more than one negative dim." + % (var_name) + ) neg_dim_count += 1 - data_count *= (-x) + data_count *= -x else: data_count *= x program_tmp_vars[var_name] = ( - data_count, neg_dim_count, - vars_metatools.dtype_to_size[var.dtype]) + data_count, + neg_dim_count, + vars_metatools.dtype_to_size[var.dtype], + ) for varname in program_tmp_vars: data_count, neg_dim_count, type_size = program_tmp_vars[varname] @@ -309,27 +358,32 @@ class ParameterServerOptimizer(MetaOptimizerBase): else: return False - def minimize_impl(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None): - self.inner_opt.minimize(loss, startup_program, parameter_list, - no_grad_set) + def minimize_impl( + self, loss, startup_program=None, parameter_list=None, no_grad_set=None + ): + self.inner_opt.minimize( + loss, startup_program, parameter_list, no_grad_set + ) strategy = self._get_distributed_strategy() _origin_main_program = loss.block.program _origin_startup_program = startup_program - from paddle.fluid.incubate.fleet.parameter_server.ir import public as public - - compiled_config = public.CompileTimeStrategy(_origin_main_program, - _origin_startup_program, - strategy, self.role_maker) + from paddle.fluid.incubate.fleet.parameter_server.ir import ( + public as public, + ) + + compiled_config = public.CompileTimeStrategy( + _origin_main_program, + _origin_startup_program, + strategy, + self.role_maker, + ) compiled_config.strategy = strategy if self.role_maker._is_worker() or self.role_maker._is_heter_worker(): main_program, startup_program = self._build_trainer_programs( - compiled_config) + compiled_config + ) if self.role_maker._is_heter_parameter_server_mode: _origin_startup_program._heter_pipeline_opt = { "startup_program": startup_program, @@ -340,12 +394,12 @@ class ParameterServerOptimizer(MetaOptimizerBase): loss.block.program._heter_pipeline_opt = { "trainer": "HeterPipelineTrainer", "device_worker": "HeterSection", - "trainers": self.role_maker._get_stage_trainers( - ), ## trainer num in each stage + "trainers": self.role_maker._get_stage_trainers(), ## trainer num in each stage "trainer_id": int(self.role_maker._role_id()), "pipeline_stage": int(self.role_maker._get_stage_id()) - 1, - "num_pipeline_stages": - int(self.role_maker._get_num_stage()), + "num_pipeline_stages": int( + self.role_maker._get_num_stage() + ), "section_program": main_program, "num_microbatches": self.num_microbatches, "heter_place": self.role_maker._heter_device(), @@ -356,13 +410,14 @@ class ParameterServerOptimizer(MetaOptimizerBase): elif self.role_maker._is_server(): main_program, startup_program = self._build_pserver_programs( - compiled_config) + compiled_config + ) loss.block.program = main_program fluid.framework.switch_startup_program(startup_program) return None, None def _disable_strategy(self, dist_strategy): - #if self.role_maker._is_heter_parameter_server_mode: + # if self.role_maker._is_heter_parameter_server_mode: # dist_strategy.pipeline = False # dist_strategy.pipeline_configs = { # "micro_batch_size": 1, @@ -374,7 +429,7 @@ class ParameterServerOptimizer(MetaOptimizerBase): dist_strategy.a_sync_configs = a_sync_configs def _enable_strategy(self, dist_strategy, context): - #if self.role_maker._is_heter_parameter_server_mode: + # if self.role_maker._is_heter_parameter_server_mode: # dist_strategy.pipeline = True # dist_strategy.pipeline_configs = { # "micro_batch_size": 1, @@ -387,8 +442,9 @@ class ParameterServerOptimizer(MetaOptimizerBase): dist_strategy.a_sync = True a_sync_configs = dist_strategy.a_sync_configs - is_geo = self._can_apply_geo(dist_strategy, - context["origin_main_program"]) + is_geo = self._can_apply_geo( + dist_strategy, context["origin_main_program"] + ) if is_geo: a_sync_configs["k_steps"] = 800 diff --git a/python/paddle/distributed/fleet/meta_optimizers/pipeline_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/pipeline_optimizer.py index 690ccdfea5f0cfb5cca126971f05532577b81ef3..dfb8fe5b22438128af5489d135c10b6e643f8700 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/pipeline_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/pipeline_optimizer.py @@ -14,13 +14,19 @@ import paddle.fluid as fluid from paddle.fluid.optimizer import PipelineOptimizer as PO from .meta_optimizer_base import MetaOptimizerBase -from .common import CollectiveHelper, OP_ROLE_KEY, OP_ROLE_VAR_KEY, OpRole, is_backward_op, is_loss_grad_op +from .common import ( + CollectiveHelper, + OP_ROLE_KEY, + OP_ROLE_VAR_KEY, + OpRole, + is_backward_op, + is_loss_grad_op, +) __all__ = [] class PipelineOptimizer(MetaOptimizerBase): - def __init__(self, optimizer): super(PipelineOptimizer, self).__init__(optimizer) self.inner_opt = optimizer @@ -35,17 +41,21 @@ class PipelineOptimizer(MetaOptimizerBase): self.dp_ring_id = 2 self.start_pipeline_ring_id = 20 # Just a magic number - def _set_basic_info(self, loss, role_maker, user_defined_optimizer, - user_defined_strategy): - super(PipelineOptimizer, - self)._set_basic_info(loss, role_maker, user_defined_optimizer, - user_defined_strategy) + def _set_basic_info( + self, loss, role_maker, user_defined_optimizer, user_defined_strategy + ): + super(PipelineOptimizer, self)._set_basic_info( + loss, role_maker, user_defined_optimizer, user_defined_strategy + ) self.micro_batch_size = user_defined_strategy.pipeline_configs[ - 'micro_batch_size'] + 'micro_batch_size' + ] self.num_microbatches = user_defined_strategy.pipeline_configs[ - 'accumulate_steps'] + 'accumulate_steps' + ] self.schedule_mode = user_defined_strategy.pipeline_configs[ - 'schedule_mode'] + 'schedule_mode' + ] self.use_sharding = user_defined_strategy.sharding def _can_apply(self): @@ -83,23 +93,25 @@ class PipelineOptimizer(MetaOptimizerBase): if param.is_distributed: continue - block.append_op(type='c_broadcast', - inputs={'X': param}, - outputs={'Out': param}, - attrs={ - 'ring_id': ring_id, - 'root': 0, - OP_ROLE_KEY: OpRole.Forward - }) + block.append_op( + type='c_broadcast', + inputs={'X': param}, + outputs={'Out': param}, + attrs={ + 'ring_id': ring_id, + 'root': 0, + OP_ROLE_KEY: OpRole.Forward, + }, + ) - if not param: return # no parameter on this device - block.append_op(type='c_sync_comm_stream', - inputs={'X': param}, - outputs={'Out': param}, - attrs={ - 'ring_id': ring_id, - OP_ROLE_KEY: OpRole.Forward - }) + if not param: + return # no parameter on this device + block.append_op( + type='c_sync_comm_stream', + inputs={'X': param}, + outputs={'Out': param}, + attrs={'ring_id': ring_id, OP_ROLE_KEY: OpRole.Forward}, + ) def _get_process_group_info(self): # global ring info @@ -121,12 +133,16 @@ class PipelineOptimizer(MetaOptimizerBase): self._get_process_group_info() collective_helper = CollectiveHelper(self.role_maker, wait_port=False) # Create global ring for all gpus (ring_id = 0) - collective_helper._init_communicator(self.startup_program, - self.current_endpoint, - self.global_endpoints, - self.global_rank, - self.global_ring_id, True, - self.global_ring_id, True) + collective_helper._init_communicator( + self.startup_program, + self.current_endpoint, + self.global_endpoints, + self.global_rank, + self.global_ring_id, + True, + self.global_ring_id, + True, + ) # Create pipeline rings if self.inner_parallelism > 1: pipeline_id = self.rank // self.inner_parallelism @@ -139,41 +155,62 @@ class PipelineOptimizer(MetaOptimizerBase): second_node = pair[1] + start_index if self.rank != first_node and self.rank != second_node: collective_helper._init_communicator( - self.startup_program, None, None, None, None, False, - self.global_ring_id, True) + self.startup_program, + None, + None, + None, + None, + False, + self.global_ring_id, + True, + ) continue pipeline_endpoints = [ - self.endpoints[first_node], self.endpoints[second_node] + self.endpoints[first_node], + self.endpoints[second_node], ] pipeline_rank = 0 if self.rank == first_node else 1 pipeline_nranks = 2 - collective_helper._init_communicator(self.startup_program, - self.current_endpoint, - pipeline_endpoints, - pipeline_rank, ring_id, - False, self.global_ring_id, - True) + collective_helper._init_communicator( + self.startup_program, + self.current_endpoint, + pipeline_endpoints, + pipeline_rank, + ring_id, + False, + self.global_ring_id, + True, + ) # Create dp rings if self.pipeline_num > 1: collective_helper._init_communicator( - self.startup_program, self.current_endpoint, self.dp_endpoints, - self.dp_rank, self.dp_ring_id, True, self.global_ring_id, True) + self.startup_program, + self.current_endpoint, + self.dp_endpoints, + self.dp_rank, + self.dp_ring_id, + True, + self.global_ring_id, + True, + ) self._broadcast_params(self.dp_ring_id) - def minimize_impl(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None): + def minimize_impl( + self, loss, startup_program=None, parameter_list=None, no_grad_set=None + ): self.endpoints = self.role_maker._get_trainer_endpoints() self.current_endpoint = self.endpoints[self.role_maker._worker_index()] self.rank = self.role_maker._worker_index() self.nranks = self.role_maker._worker_num() - self.wrapped_opt = PO(self.inner_opt, - num_microbatches=self.num_microbatches) - orig_startup_program = startup_program if startup_program else fluid.default_startup_program( + self.wrapped_opt = PO( + self.inner_opt, num_microbatches=self.num_microbatches + ) + orig_startup_program = ( + startup_program + if startup_program + else fluid.default_startup_program() ) block = loss.block program = block.program @@ -187,10 +224,18 @@ class PipelineOptimizer(MetaOptimizerBase): program._pipeline_opt['use_sharding'] = False program._pipeline_opt['mp_degree'] = 1 program._pipeline_opt['mp_rank'] = 0 - optimize_ops, params_grads, prog_list, pp_pair, ring_map = self.wrapped_opt.minimize( - loss, startup_program, parameter_list, no_grad_set) + ( + optimize_ops, + params_grads, + prog_list, + pp_pair, + ring_map, + ) = self.wrapped_opt.minimize( + loss, startup_program, parameter_list, no_grad_set + ) self.startup_program = orig_startup_program._pipeline_opt[ - 'startup_program'] + 'startup_program' + ] self.inner_parallelism = program._pipeline_opt['inner_parallelism'] assert self.nranks % self.inner_parallelism == 0 assert prog_list @@ -217,18 +262,21 @@ class PipelineOptimizer(MetaOptimizerBase): for idx, op in reversed(list(enumerate(block.ops))): if is_loss_grad_op(op): loss_grad_var = block.vars[op.output_arg_names[0]] - block._insert_op(idx + 1, - type='scale', - inputs={'X': loss_grad_var}, - outputs={'Out': loss_grad_var}, - attrs={ - 'scale': 1.0 / pipeline_num, - OP_ROLE_KEY: OpRole.Backward - }) + block._insert_op( + idx + 1, + type='scale', + inputs={'X': loss_grad_var}, + outputs={'Out': loss_grad_var}, + attrs={ + 'scale': 1.0 / pipeline_num, + OP_ROLE_KEY: OpRole.Backward, + }, + ) def _insert_allreduce_ops(self, ring_id): - block = self.main_program._pipeline_opt['section_program'].global_block( - ) + block = self.main_program._pipeline_opt[ + 'section_program' + ].global_block() origin_block = self.main_program.global_block() grad = None processed_param_name = set() @@ -237,9 +285,9 @@ class PipelineOptimizer(MetaOptimizerBase): if is_backward_op(op) and not first_optimize_op_idx: first_optimize_op_idx = idx + 1 # no optimize phase - if first_optimize_op_idx == len(block.ops): return - if is_backward_op(op) and \ - OP_ROLE_VAR_KEY in op.attr_names: + if first_optimize_op_idx == len(block.ops): + return + if is_backward_op(op) and OP_ROLE_VAR_KEY in op.attr_names: op_role_var = op.all_attrs()[OP_ROLE_VAR_KEY] if len(op_role_var) == 0: continue @@ -248,21 +296,25 @@ class PipelineOptimizer(MetaOptimizerBase): for i in range(0, len(op_role_var), 2): param_name = op_role_var[i] param = block.vars[op_role_var[i]] - if param_name in processed_param_name: continue + if param_name in processed_param_name: + continue processed_param_name.add(param_name) grad_name = op_role_var[i + 1] - if 'MERGED' not in grad_name: grad_name += '@MERGED' + if 'MERGED' not in grad_name: + grad_name += '@MERGED' grad = block.vars[grad_name] origin_param = origin_block.vars[op_role_var[i]] if origin_param.is_distributed: continue - block._insert_op(first_optimize_op_idx + offset, - type='c_allreduce_sum', - inputs={'X': grad}, - outputs={'Out': grad}, - attrs={ - 'ring_id': ring_id, - 'use_calc_stream': True, - OP_ROLE_KEY: OpRole.Optimize - }) + block._insert_op( + first_optimize_op_idx + offset, + type='c_allreduce_sum', + inputs={'X': grad}, + outputs={'Out': grad}, + attrs={ + 'ring_id': ring_id, + 'use_calc_stream': True, + OP_ROLE_KEY: OpRole.Optimize, + }, + ) diff --git a/python/paddle/distributed/fleet/meta_optimizers/ps_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/ps_optimizer.py index 523a6741102937a7de11b89b6b6e827e022f7a16..000ef98bcaec459299d063282a62a76068047c29 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/ps_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/ps_optimizer.py @@ -19,24 +19,31 @@ import subprocess import re import os import platform -from paddle.distributed.ps.utils.public import TrainerRuntimeConfig, build_var_distributed, dtype_to_size, get_dist_env, get_var_mem_size, logger +from paddle.distributed.ps.utils.public import ( + TrainerRuntimeConfig, + build_var_distributed, + dtype_to_size, + get_dist_env, + get_var_mem_size, + logger, +) from paddle.distributed.passes import PassContext from paddle.distributed.ps.utils.ps_factory import PsProgramBuilderFactory class ParameterServerOptimizer(MetaOptimizerBase): - def __init__(self, optimizer): super(ParameterServerOptimizer, self).__init__(optimizer) self.inner_opt = optimizer # we do not allow meta optimizer to be inner optimizer currently self.meta_optimizers_white_list = [] - def _set_basic_info(self, loss, role_maker, user_defined_optimizer, - user_defined_strategy): - super(ParameterServerOptimizer, - self)._set_basic_info(loss, role_maker, user_defined_optimizer, - user_defined_strategy) + def _set_basic_info( + self, loss, role_maker, user_defined_optimizer, user_defined_strategy + ): + super(ParameterServerOptimizer, self)._set_basic_info( + loss, role_maker, user_defined_optimizer, user_defined_strategy + ) def _set_origin_programs(self, losses): self.origin_main_programs = [] @@ -66,31 +73,40 @@ class ParameterServerOptimizer(MetaOptimizerBase): logger.info("ps_mode: {}".format(attrs['ps_mode'])) attrs['role_maker'] = self.role_maker attrs[ - 'is_heter_ps_mode'] = self.role_maker._is_heter_parameter_server_mode + 'is_heter_ps_mode' + ] = self.role_maker._is_heter_parameter_server_mode attrs['is_worker'] = self.role_maker._is_worker() attrs['is_server'] = self.role_maker._is_server() attrs['is_heter_worker'] = self.role_maker._is_heter_worker() - logger.info("this process is heter? {}".format( - attrs['is_heter_worker'])) + logger.info( + "this process is heter? {}".format(attrs['is_heter_worker']) + ) attrs['use_ps_gpu'] = self.user_defined_strategy.a_sync_configs[ - "use_ps_gpu"] + "use_ps_gpu" + ] attrs['lr_decay_steps'] = self.user_defined_strategy.a_sync_configs[ - "lr_decay_steps"] + "lr_decay_steps" + ] # FL attrs['local_sparse'] = attrs[ - "user_defined_strategy"].trainer_desc_configs["local_sparse"] + "user_defined_strategy" + ].trainer_desc_configs["local_sparse"] attrs['remote_sparse'] = attrs[ - "user_defined_strategy"].trainer_desc_configs["remote_sparse"] + "user_defined_strategy" + ].trainer_desc_configs["remote_sparse"] attrs['is_fl_ps_mode'] = self.user_defined_strategy.is_fl_ps_mode attrs[ - 'with_coordinator'] = self.user_defined_strategy.is_with_coordinator + 'with_coordinator' + ] = self.user_defined_strategy.is_with_coordinator attrs['k_steps'] = self.user_defined_strategy.a_sync_configs["k_steps"] attrs['launch_barrier'] = self.user_defined_strategy.a_sync_configs[ - "launch_barrier"] + "launch_barrier" + ] attrs['launch_barrier_flag'] = int( - os.getenv("FLAGS_LAUNCH_BARRIER", "1")) + os.getenv("FLAGS_LAUNCH_BARRIER", "1") + ) build_var_distributed(attrs) @@ -111,31 +127,32 @@ class ParameterServerOptimizer(MetaOptimizerBase): k_steps = self.user_defined_strategy.a_sync_configs["k_steps"] return True if k_steps >= 0 else False - def minimize_impl(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None): - self.inner_opt.minimize(loss, startup_program, parameter_list, - no_grad_set) + def minimize_impl( + self, loss, startup_program=None, parameter_list=None, no_grad_set=None + ): + self.inner_opt.minimize( + loss, startup_program, parameter_list, no_grad_set + ) if startup_program == None: startup_program = paddle.static.default_startup_program() - -# print("program after inner optimizer minimize:", -# str(loss.block.program)) + # print("program after inner optimizer minimize:", + # str(loss.block.program)) self._set_origin_programs([loss]) self._init_ps_pass_context(loss, startup_program) ps_builder = PsProgramBuilderFactory()._create_ps_program_builder( - self.pass_ctx) + self.pass_ctx + ) ps_builder._build_programs() return None, None - def minimize_losses_impl(self, - losses, - startup_program=None, - parameter_list=None, - no_grad_set=None): + def minimize_losses_impl( + self, + losses, + startup_program=None, + parameter_list=None, + no_grad_set=None, + ): if parameter_list is None: parameter_list = [None] * len(losses) for idx, loss in enumerate(losses): @@ -148,18 +165,19 @@ class ParameterServerOptimizer(MetaOptimizerBase): startup_prog = startup_program[idx] self._init_ps_pass_context(loss, startup_prog) ps_builder = PsProgramBuilderFactory()._create_ps_program_builder( - self.pass_ctx) + self.pass_ctx + ) ps_builder._build_programs() startup_program[idx] = self.pass_ctx._attrs['cloned_startup'] return None, None def _can_apply_geo(self, program): - def get_sys_free_mem(): plat = platform.system() if platform.system() == "Darwin": - vm = subprocess.Popen(['vm_stat'], - stdout=subprocess.PIPE).communicate()[0] + vm = subprocess.Popen( + ['vm_stat'], stdout=subprocess.PIPE + ).communicate()[0] # Process vm_stat vmLines = vm.split('\n') sep = re.compile(r':[\s]+') @@ -167,8 +185,9 @@ class ParameterServerOptimizer(MetaOptimizerBase): for row in range(1, len(vmLines) - 2): rowText = vmLines[row].strip() rowElements = sep.split(rowText) - vmStats[(rowElements[0])] = int( - rowElements[1].strip(r'\.')) * 4096 + vmStats[(rowElements[0])] = ( + int(rowElements[1].strip(r'\.')) * 4096 + ) return vmStats["Pages free"] elif platform.system() == "Linux": mems = {} @@ -180,8 +199,9 @@ class ParameterServerOptimizer(MetaOptimizerBase): return free else: raise ValueError( - "%s platform is unsupported is parameter server optimizer" % - (platform.system())) + "%s platform is unsupported is parameter server optimizer" + % (platform.system()) + ) if not isinstance(self.inner_opt, fluid.optimizer.SGDOptimizer): return False @@ -191,8 +211,10 @@ class ParameterServerOptimizer(MetaOptimizerBase): param_memory_size = 0 for varname in program.global_block().vars: var = program.global_block().vars[varname] - if not var.persistable or var.desc.type( - ) != core.VarDesc.VarType.LOD_TENSOR: + if ( + not var.persistable + or var.desc.type() != core.VarDesc.VarType.LOD_TENSOR + ): continue param_memory_size += get_var_mem_size(var) processed_var_names.add(varname) @@ -217,14 +239,18 @@ class ParameterServerOptimizer(MetaOptimizerBase): if x < 0: if neg_dim_count >= 1: raise ValueError( - "Var %s has more than one negative dim." % - (var_name)) + "Var %s has more than one negative dim." + % (var_name) + ) neg_dim_count += 1 - data_count *= (-x) + data_count *= -x else: data_count *= x - program_tmp_vars[var_name] = (data_count, neg_dim_count, - dtype_to_size[var.dtype]) + program_tmp_vars[var_name] = ( + data_count, + neg_dim_count, + dtype_to_size[var.dtype], + ) for varname in program_tmp_vars: data_count, neg_dim_count, type_size = program_tmp_vars[varname] diff --git a/python/paddle/distributed/fleet/meta_optimizers/raw_program_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/raw_program_optimizer.py index 9c7f213105edae68846b4256c204c5dbe195fa73..6ae89a9754ebdef406df51d3414918c71a0d6854 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/raw_program_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/raw_program_optimizer.py @@ -14,11 +14,18 @@ import paddle.fluid as fluid from paddle.fluid import core, unique_name from .meta_optimizer_base import MetaOptimizerBase -from .common import OpRole, OP_ROLE_KEY, OP_ROLE_VAR_KEY, CollectiveHelper, is_loss_grad_op, is_backward_op, is_optimizer_op +from .common import ( + OpRole, + OP_ROLE_KEY, + OP_ROLE_VAR_KEY, + CollectiveHelper, + is_loss_grad_op, + is_backward_op, + is_optimizer_op, +) class RawProgramOptimizer(MetaOptimizerBase): - def __init__(self, optimizer): super(RawProgramOptimizer, self).__init__(optimizer) self.inner_opt = optimizer @@ -36,16 +43,23 @@ class RawProgramOptimizer(MetaOptimizerBase): ] self.global_ring_id = 0 - def _set_basic_info(self, loss, role_maker, user_defined_optimizer, - user_defined_strategy): - super(RawProgramOptimizer, - self)._set_basic_info(loss, role_maker, user_defined_optimizer, - user_defined_strategy) - self.without_graph_optimization = user_defined_strategy.without_graph_optimization + def _set_basic_info( + self, loss, role_maker, user_defined_optimizer, user_defined_strategy + ): + super(RawProgramOptimizer, self)._set_basic_info( + loss, role_maker, user_defined_optimizer, user_defined_strategy + ) + self.without_graph_optimization = ( + user_defined_strategy.without_graph_optimization + ) self.fuse_all_reduce_ops = user_defined_strategy.fuse_all_reduce_ops if self.fuse_all_reduce_ops: - self.fuse_grad_size_in_num = user_defined_strategy.fuse_grad_size_in_num - self.calc_comm_same_stream = user_defined_strategy._calc_comm_same_stream + self.fuse_grad_size_in_num = ( + user_defined_strategy.fuse_grad_size_in_num + ) + self.calc_comm_same_stream = ( + user_defined_strategy._calc_comm_same_stream + ) def _can_apply(self): if not self.role_maker._is_collective: @@ -68,23 +82,25 @@ class RawProgramOptimizer(MetaOptimizerBase): if param.is_distributed: continue - block.append_op(type='c_broadcast', - inputs={'X': param}, - outputs={'Out': param}, - attrs={ - 'ring_id': ring_id, - 'root': 0, - OP_ROLE_KEY: OpRole.Forward - }) - - if not param: return # no parameter on this device - block.append_op(type='c_sync_comm_stream', - inputs={'X': param}, - outputs={'Out': param}, - attrs={ - 'ring_id': ring_id, - OP_ROLE_KEY: OpRole.Forward - }) + block.append_op( + type='c_broadcast', + inputs={'X': param}, + outputs={'Out': param}, + attrs={ + 'ring_id': ring_id, + 'root': 0, + OP_ROLE_KEY: OpRole.Forward, + }, + ) + + if not param: + return # no parameter on this device + block.append_op( + type='c_sync_comm_stream', + inputs={'X': param}, + outputs={'Out': param}, + attrs={'ring_id': ring_id, OP_ROLE_KEY: OpRole.Forward}, + ) def _get_process_group_info(self): # global ring info @@ -96,19 +112,21 @@ class RawProgramOptimizer(MetaOptimizerBase): self._get_process_group_info() collective_helper = CollectiveHelper(self.role_maker, wait_port=False) # Create global ring for all gpus (ring_id = 0) - collective_helper._init_communicator(self.startup_program, - self.current_endpoint, - self.global_endpoints, - self.global_rank, - self.global_ring_id, True, - self.global_ring_id, True) + collective_helper._init_communicator( + self.startup_program, + self.current_endpoint, + self.global_endpoints, + self.global_rank, + self.global_ring_id, + True, + self.global_ring_id, + True, + ) self._broadcast_params(self.global_ring_id) - def minimize_impl(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None): + def minimize_impl( + self, loss, startup_program=None, parameter_list=None, no_grad_set=None + ): self.endpoints = self.role_maker._get_trainer_endpoints() self.current_endpoint = self.endpoints[self.role_maker._worker_index()] self.rank = self.role_maker._worker_index() @@ -122,7 +140,8 @@ class RawProgramOptimizer(MetaOptimizerBase): self.main_program = program optimize_ops, params_grads = self.inner_opt.minimize( - loss, startup_program, parameter_list, no_grad_set) + loss, startup_program, parameter_list, no_grad_set + ) if self.nranks == 1: return optimize_ops, params_grads self._init_process_group() @@ -147,7 +166,9 @@ class RawProgramOptimizer(MetaOptimizerBase): if gm_cond_var_name is None: return None - cond_op = None # false_fn of gm is None, so we should only find one block + cond_op = ( + None # false_fn of gm is None, so we should only find one block + ) for op in self.main_program.global_block().ops: if op.type != 'conditional_block' or 'Cond' not in op.input_names: continue @@ -173,8 +194,7 @@ class RawProgramOptimizer(MetaOptimizerBase): param_vars = [] grad_vars = [] for op in block.ops: - if is_backward_op(op) and \ - OP_ROLE_VAR_KEY in op.attr_names: + if is_backward_op(op) and OP_ROLE_VAR_KEY in op.attr_names: op_role_var = op.attr(OP_ROLE_VAR_KEY) assert len(op_role_var) % 2 == 0 for i in range(0, len(op_role_var), 2): @@ -188,35 +208,41 @@ class RawProgramOptimizer(MetaOptimizerBase): if not grad_vars: return - gm_block._insert_op(first_optimize_op_idx, - type="c_sync_calc_stream", - inputs={'X': grad_vars[0]}, - outputs={'Out': grad_vars[0]}, - attrs={OP_ROLE_KEY: OpRole.Backward}) + gm_block._insert_op( + first_optimize_op_idx, + type="c_sync_calc_stream", + inputs={'X': grad_vars[0]}, + outputs={'Out': grad_vars[0]}, + attrs={OP_ROLE_KEY: OpRole.Backward}, + ) insert_op_num = 1 ring_id = self.global_ring_id # NOTE: can perform fuse allreduce inside the loop in the future for i, (p, g) in enumerate(zip(param_vars, grad_vars)): - gm_block._insert_op(first_optimize_op_idx + insert_op_num, - type="c_allreduce_sum", - inputs={'X': g}, - outputs={'Out': g}, - attrs={ - 'ring_id': ring_id, - OP_ROLE_KEY: OpRole.Backward, - }) + gm_block._insert_op( + first_optimize_op_idx + insert_op_num, + type="c_allreduce_sum", + inputs={'X': g}, + outputs={'Out': g}, + attrs={ + 'ring_id': ring_id, + OP_ROLE_KEY: OpRole.Backward, + }, + ) insert_op_num += 1 - gm_block._insert_op(first_optimize_op_idx + insert_op_num, - type="c_sync_comm_stream", - inputs={'X': grad_vars}, - outputs={'Out': grad_vars}, - attrs={ - 'ring_id': ring_id, - OP_ROLE_KEY: OpRole.Backward, - }) + gm_block._insert_op( + first_optimize_op_idx + insert_op_num, + type="c_sync_comm_stream", + inputs={'X': grad_vars}, + outputs={'Out': grad_vars}, + attrs={ + 'ring_id': ring_id, + OP_ROLE_KEY: OpRole.Backward, + }, + ) def _transpile_main_program(self, loss): self._insert_loss_grad_ops(loss) @@ -240,14 +266,16 @@ class RawProgramOptimizer(MetaOptimizerBase): for idx, op in reversed(list(enumerate(block.ops))): if is_loss_grad_op(op): loss_grad_var = block.vars[op.output_arg_names[0]] - block._insert_op(idx + 1, - type='scale', - inputs={'X': loss_grad_var}, - outputs={'Out': loss_grad_var}, - attrs={ - 'scale': 1.0 / self.nranks, - OP_ROLE_KEY: OpRole.Backward - }) + block._insert_op( + idx + 1, + type='scale', + inputs={'X': loss_grad_var}, + outputs={'Out': loss_grad_var}, + attrs={ + 'scale': 1.0 / self.nranks, + OP_ROLE_KEY: OpRole.Backward, + }, + ) def _insert_allreduce_ops(self): block = self.main_program.global_block() @@ -255,8 +283,7 @@ class RawProgramOptimizer(MetaOptimizerBase): grad = None grad_vars = [] for idx, op in reversed(list(enumerate(block.ops))): - if is_backward_op(op) and \ - OP_ROLE_VAR_KEY in op.attr_names: + if is_backward_op(op) and OP_ROLE_VAR_KEY in op.attr_names: op_role_var = op.attr(OP_ROLE_VAR_KEY) if len(op_role_var) == 0: continue @@ -271,36 +298,39 @@ class RawProgramOptimizer(MetaOptimizerBase): continue grad_vars.append(grad) - block._insert_op(idx + offset, - type='c_sync_calc_stream', - inputs={'X': grad}, - outputs={'Out': grad}, - attrs={ - OP_ROLE_KEY: OpRole.Backward, - }) + block._insert_op( + idx + offset, + type='c_sync_calc_stream', + inputs={'X': grad}, + outputs={'Out': grad}, + attrs={ + OP_ROLE_KEY: OpRole.Backward, + }, + ) offset += 1 - block._insert_op(idx + offset, - type='c_allreduce_sum', - inputs={'X': grad}, - outputs={'Out': grad}, - attrs={ - 'ring_id': ring_id, - OP_ROLE_KEY: OpRole.Backward - }) + block._insert_op( + idx + offset, + type='c_allreduce_sum', + inputs={'X': grad}, + outputs={'Out': grad}, + attrs={ + 'ring_id': ring_id, + OP_ROLE_KEY: OpRole.Backward, + }, + ) if grad is None: return for idx, op in enumerate(block.ops): if is_optimizer_op(op): - block._insert_op(idx, - type='c_sync_comm_stream', - inputs={'X': grad_vars}, - outputs={'Out': grad_vars}, - attrs={ - 'ring_id': ring_id, - OP_ROLE_KEY: OpRole.Backward - }) + block._insert_op( + idx, + type='c_sync_comm_stream', + inputs={'X': grad_vars}, + outputs={'Out': grad_vars}, + attrs={'ring_id': ring_id, OP_ROLE_KEY: OpRole.Backward}, + ) break # This function helps reduce the number of allreduce by integrating op, which can save communication time. @@ -318,16 +348,16 @@ class RawProgramOptimizer(MetaOptimizerBase): # find all grad params for idx, op in enumerate(block.ops): - if first_backward_idx == -1 and \ - is_backward_op(op): + if first_backward_idx == -1 and is_backward_op(op): first_backward_idx = idx - if is_backward_op(op) and \ - OP_ROLE_VAR_KEY in op.attr_names: + if is_backward_op(op) and OP_ROLE_VAR_KEY in op.attr_names: op_role_var = op.attr(OP_ROLE_VAR_KEY) if len(op_role_var) == 0: continue - assert len(op_role_var) % 2 == 0, "vars need to be one param var followed by one grad var, " \ - "but got odd number of vars" + assert len(op_role_var) % 2 == 0, ( + "vars need to be one param var followed by one grad var, " + "but got odd number of vars" + ) for i in range(0, len(op_role_var), 2): param_name = op_role_var[i] param = block.var(param_name) @@ -338,7 +368,8 @@ class RawProgramOptimizer(MetaOptimizerBase): param_grads.append((param, grad)) outputs_name_to_idx = self.__get_ouputs_name_to_idx( - first_backward_idx, block) + first_backward_idx, block + ) # structure of grad_param_segments is # [([grad0, grad1], [param0, param1]), ([grad2, grad3], [param2, param3])] @@ -348,9 +379,11 @@ class RawProgramOptimizer(MetaOptimizerBase): last_dtype = None # split the grad based on dtype and fused size for param, grad in param_grads: - if len(grad_param_segments) == 0 \ - or len(grad_param_segments[-1][0]) == self.fuse_grad_size_in_num \ - or grad.dtype != last_dtype: + if ( + len(grad_param_segments) == 0 + or len(grad_param_segments[-1][0]) == self.fuse_grad_size_in_num + or grad.dtype != last_dtype + ): grad_param_segments.append(([grad], [param])) last_dtype = grad.dtype else: @@ -366,34 +399,40 @@ class RawProgramOptimizer(MetaOptimizerBase): # not to use reversed since needs the absolute index value grad_segment, param_segment = grad_param_segments[i] # insert coalesce tensor - fused_var = block.create_var(name=unique_name.generate( - 'FusedOutput_{}'.format(grad_segment[0].name)), - dtype=grad_segment[0].dtype, - persistable=False, - stop_gradient=True) + fused_var = block.create_var( + name=unique_name.generate( + 'FusedOutput_{}'.format(grad_segment[0].name) + ), + dtype=grad_segment[0].dtype, + persistable=False, + stop_gradient=True, + ) fused_vars[i] = fused_var after_idx = outputs_name_to_idx[grad_segment[-1]][1] - block._insert_op_without_sync(after_idx + 1, - type='c_allreduce_sum', - inputs={'X': fused_var}, - outputs={'Out': fused_var}, - attrs={ - 'ring_id': ring_id, - 'use_calc_stream': - self.calc_comm_same_stream, - OP_ROLE_KEY: OpRole.Backward - }) + block._insert_op_without_sync( + after_idx + 1, + type='c_allreduce_sum', + inputs={'X': fused_var}, + outputs={'Out': fused_var}, + attrs={ + 'ring_id': ring_id, + 'use_calc_stream': self.calc_comm_same_stream, + OP_ROLE_KEY: OpRole.Backward, + }, + ) if not self.calc_comm_same_stream: block._insert_op_without_sync( after_idx + 1, type='c_sync_calc_stream', inputs={'X': fused_var}, outputs={'Out': fused_var}, - attrs={OP_ROLE_KEY: OpRole.Backward}) + attrs={OP_ROLE_KEY: OpRole.Backward}, + ) # update the outputs_name_to_idx after insertion of sync/allreduce ops outputs_name_to_idx = self.__get_ouputs_name_to_idx( - first_backward_idx, block) + first_backward_idx, block + ) # the before_idx is not guaranteed sorted, therefore we have to find the # topology to insert the coalesce ops pos_for_coalesce = {} @@ -407,25 +446,26 @@ class RawProgramOptimizer(MetaOptimizerBase): pos_for_coalesce[i] = before_idx # insert the coalesce op based on the sorted before_idx - pos_for_coalesce = sorted(pos_for_coalesce.items(), - key=lambda kv: (kv[1], kv[0]), - reverse=True) + pos_for_coalesce = sorted( + pos_for_coalesce.items(), + key=lambda kv: (kv[1], kv[0]), + reverse=True, + ) for i, before_idx in pos_for_coalesce: grad_segment, param_segment = grad_param_segments[i] fused_var = fused_vars[i] - block._insert_op_without_sync(before_idx, - type="coalesce_tensor", - inputs={"Input": param_segment}, - outputs={ - "Output": grad_segment, - "FusedOutput": fused_var - }, - attrs={ - "copy_data": False, - "use_align": True, - "dtype": grad_segment[0].dtype, - OP_ROLE_KEY: OpRole.Backward - }) + block._insert_op_without_sync( + before_idx, + type="coalesce_tensor", + inputs={"Input": param_segment}, + outputs={"Output": grad_segment, "FusedOutput": fused_var}, + attrs={ + "copy_data": False, + "use_align": True, + "dtype": grad_segment[0].dtype, + OP_ROLE_KEY: OpRole.Backward, + }, + ) if self.calc_comm_same_stream: block._sync_with_cpp() @@ -434,14 +474,13 @@ class RawProgramOptimizer(MetaOptimizerBase): # insert the sync comm op for idx, op in enumerate(block.ops): if is_optimizer_op(op): - block._insert_op_without_sync(idx, - type='c_sync_comm_stream', - inputs={'X': fused_vars}, - outputs={'Out': fused_vars}, - attrs={ - 'ring_id': ring_id, - OP_ROLE_KEY: OpRole.Backward - }) + block._insert_op_without_sync( + idx, + type='c_sync_comm_stream', + inputs={'X': fused_vars}, + outputs={'Out': fused_vars}, + attrs={'ring_id': ring_id, OP_ROLE_KEY: OpRole.Backward}, + ) break block._sync_with_cpp() @@ -465,6 +504,8 @@ class RawProgramOptimizer(MetaOptimizerBase): # the first idx and the last ids are identical outputs_name_to_idx[var] = (idx, idx) else: - outputs_name_to_idx[var] = (outputs_name_to_idx[var][0], - idx) + outputs_name_to_idx[var] = ( + outputs_name_to_idx[var][0], + idx, + ) return outputs_name_to_idx diff --git a/python/paddle/distributed/fleet/meta_optimizers/recompute_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/recompute_optimizer.py index c9054c793f491c5b11331b6efcca905a22ad14a5..524c3a123abc08062b49d509750996193b9bd9e2 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/recompute_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/recompute_optimizer.py @@ -18,7 +18,6 @@ __all__ = [] class RecomputeOptimizer(MetaOptimizerBase): - def __init__(self, optimizer): super(RecomputeOptimizer, self).__init__(optimizer) self.inner_opt = optimizer @@ -32,11 +31,12 @@ class RecomputeOptimizer(MetaOptimizerBase): ] self.meta_optimizers_black_list = [] - def _set_basic_info(self, loss, role_maker, user_defined_optimizer, - user_defined_strategy): - super(RecomputeOptimizer, - self)._set_basic_info(loss, role_maker, user_defined_optimizer, - user_defined_strategy) + def _set_basic_info( + self, loss, role_maker, user_defined_optimizer, user_defined_strategy + ): + super(RecomputeOptimizer, self)._set_basic_info( + loss, role_maker, user_defined_optimizer, user_defined_strategy + ) def _init_wrapped_opt(self): if self.wrapped_opt is not None: @@ -56,8 +56,10 @@ class RecomputeOptimizer(MetaOptimizerBase): return False if self.user_defined_strategy.recompute == True: - if len(self.user_defined_strategy.recompute_configs["checkpoints"] - ) == 0: + if ( + len(self.user_defined_strategy.recompute_configs["checkpoints"]) + == 0 + ): return False else: return True @@ -70,32 +72,33 @@ class RecomputeOptimizer(MetaOptimizerBase): # we do not support automatically recompute checkpoints currently return - def backward(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None, - callbacks=None): + def backward( + self, + loss, + startup_program=None, + parameter_list=None, + no_grad_set=None, + callbacks=None, + ): # maybe inner_opt of other meta optimizer self._init_wrapped_opt() - return self.wrapped_opt.backward(loss, startup_program, parameter_list, - no_grad_set, callbacks) + return self.wrapped_opt.backward( + loss, startup_program, parameter_list, no_grad_set, callbacks + ) def apply_gradients(self, params_grads): return self.wrapped_opt.apply_gradients(params_grads=params_grads) def apply_optimize(self, loss, startup_program, params_grads): - return self.wrapped_opt.apply_optimize(loss, - startup_program=startup_program, - params_grads=params_grads) + return self.wrapped_opt.apply_optimize( + loss, startup_program=startup_program, params_grads=params_grads + ) - def minimize_impl(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None): + def minimize_impl( + self, loss, startup_program=None, parameter_list=None, no_grad_set=None + ): self._init_wrapped_opt() - optimize_ops, params_grads = \ - self.wrapped_opt.minimize(loss, startup_program, - parameter_list, no_grad_set) + optimize_ops, params_grads = self.wrapped_opt.minimize( + loss, startup_program, parameter_list, no_grad_set + ) return optimize_ops, params_grads diff --git a/python/paddle/distributed/fleet/meta_optimizers/sharding/fp16_helper.py b/python/paddle/distributed/fleet/meta_optimizers/sharding/fp16_helper.py index 126c7d1ca04bad653b9070a1ede9bf80c889dd67..e4db252cf7c95132660555ea93c389fecb2e67b7 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/sharding/fp16_helper.py +++ b/python/paddle/distributed/fleet/meta_optimizers/sharding/fp16_helper.py @@ -12,7 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle.distributed.fleet.meta_optimizers.common import is_optimizer_op, OP_ROLE_KEY, OpRole +from paddle.distributed.fleet.meta_optimizers.common import ( + is_optimizer_op, + OP_ROLE_KEY, + OpRole, +) from paddle.fluid import core @@ -20,7 +24,6 @@ __all__ = [] class FP16Utils(object): - def __init__(self): pass @@ -30,16 +33,20 @@ class FP16Utils(object): return False if is_optimizer_op(op): return False - assert (len(op.desc.input_arg_names()) == 1) - assert (len(op.desc.output_arg_names()) == 1) - input_name, output_name = op.desc.input_arg_names( - )[0], op.desc.output_arg_names()[0] + assert len(op.desc.input_arg_names()) == 1 + assert len(op.desc.output_arg_names()) == 1 + input_name, output_name = ( + op.desc.input_arg_names()[0], + op.desc.output_arg_names()[0], + ) if input_name not in params: return False input_var = block.var(input_name) output_var = block.var(output_name) - if input_var.dtype != core.VarDesc.VarType.FP32 or \ - output_var.dtype != core.VarDesc.VarType.FP16: + if ( + input_var.dtype != core.VarDesc.VarType.FP32 + or output_var.dtype != core.VarDesc.VarType.FP16 + ): return False return True @@ -49,14 +56,18 @@ class FP16Utils(object): return False if not is_optimizer_op(op): return False - assert (len(op.desc.input_arg_names()) == 1) - assert (len(op.desc.output_arg_names()) == 1) - input_name, output_name = op.desc.input_arg_names( - )[0], op.desc.output_arg_names()[0] + assert len(op.desc.input_arg_names()) == 1 + assert len(op.desc.output_arg_names()) == 1 + input_name, output_name = ( + op.desc.input_arg_names()[0], + op.desc.output_arg_names()[0], + ) input_var = block.var(input_name) output_var = block.var(output_name) - if input_var.dtype != core.VarDesc.VarType.FP16 or \ - output_var.dtype != core.VarDesc.VarType.FP32: + if ( + input_var.dtype != core.VarDesc.VarType.FP16 + or output_var.dtype != core.VarDesc.VarType.FP32 + ): return False return True @@ -64,7 +75,8 @@ class FP16Utils(object): def remove_cast_op(block, params, segment, offset): inserted_op_num = 0 for op_idx in reversed( - range(offset + segment._start_idx, offset + segment._end_idx)): + range(offset + segment._start_idx, offset + segment._end_idx) + ): op = block.ops[op_idx] if FP16Utils.is_fp16_cast_op(block, op, params): block._remove_op(op_idx, sync=False) @@ -84,13 +96,16 @@ class FP16Utils(object): continue output_name = op.desc.output_arg_names()[0] # TODO (JZ-LIANG) revise this for uniform mixed parallelism - param_name = output_name.strip( - "@GRAD@MERGED" - ) if "@MERGED" in output_name else output_name.strip("@GRAD") + param_name = ( + output_name.strip("@GRAD@MERGED") + if "@MERGED" in output_name + else output_name.strip("@GRAD") + ) if param_name not in shard.global_params: raise ValueError( "Output 'X' of cast_op must be a grad of" - "model param, but {} is not a grad".format(output_name)) + "model param, but {} is not a grad".format(output_name) + ) if output_name in reduced_grads_to_param: continue if shard.has_param(param_name): @@ -117,7 +132,8 @@ class FP16Utils(object): if param_name not in shard.global_params: raise ValueError( "Input 'X' of check_finite_and_unscale must" - "be grads, but {} is not a grad".format(input_name)) + "be grads, but {} is not a grad".format(input_name) + ) if shard.has_param(param_name): reversed_x.append(input_name) reversed_x_paramname.append(param_name) @@ -127,55 +143,73 @@ class FP16Utils(object): # the grad checking should take the all and only param in the current shard to_check_param = set(reversed_x_paramname) should_check_param = set(shard.global_params).intersection( - set([param for param, worker_idx in shard.global_param2device.items() \ - if worker_idx == shard.worker_idx])) - assert to_check_param == should_check_param, "amp \ + set( + [ + param + for param, worker_idx in shard.global_param2device.items() + if worker_idx == shard.worker_idx + ] + ) + ) + assert ( + to_check_param == should_check_param + ), "amp \ check_finite_and_unscale checking miss [{}] and got unexpected [{}]".format( should_check_param - to_check_param, - to_check_param - should_check_param) + to_check_param - should_check_param, + ) if update_loss_scaling_op_idx == -1: return inf_var = block.var(inf_var_name) - inf_var_int32 = block.create_var(name=inf_var_name + "@cast_int32", - shape=inf_var.shape, - dtype=core.VarDesc.VarType.INT32) - - block._insert_op_without_sync(update_loss_scaling_op_idx, - type='cast', - inputs={'X': inf_var}, - outputs={'Out': inf_var_int32}, - attrs={ - "in_dtype": inf_var.dtype, - "out_dtype": inf_var_int32.dtype, - OP_ROLE_KEY: OpRole.Optimize - }) + inf_var_int32 = block.create_var( + name=inf_var_name + "@cast_int32", + shape=inf_var.shape, + dtype=core.VarDesc.VarType.INT32, + ) + + block._insert_op_without_sync( + update_loss_scaling_op_idx, + type='cast', + inputs={'X': inf_var}, + outputs={'Out': inf_var_int32}, + attrs={ + "in_dtype": inf_var.dtype, + "out_dtype": inf_var_int32.dtype, + OP_ROLE_KEY: OpRole.Optimize, + }, + ) update_loss_scaling_op_idx += 1 # allreduce(mp)->allreduce(sharding)->allreduce(pp) for ring_id in ring_ids: - if ring_id == -1: continue + if ring_id == -1: + continue # this allreduce communication should not overlap with calc - block._insert_op_without_sync(update_loss_scaling_op_idx, - type='c_allreduce_max', - inputs={'X': inf_var_int32}, - outputs={'Out': inf_var_int32}, - attrs={ - 'ring_id': ring_id, - 'use_calc_stream': True, - OP_ROLE_KEY: OpRole.Optimize - }) + block._insert_op_without_sync( + update_loss_scaling_op_idx, + type='c_allreduce_max', + inputs={'X': inf_var_int32}, + outputs={'Out': inf_var_int32}, + attrs={ + 'ring_id': ring_id, + 'use_calc_stream': True, + OP_ROLE_KEY: OpRole.Optimize, + }, + ) update_loss_scaling_op_idx += 1 - block._insert_op_without_sync(update_loss_scaling_op_idx, - type='cast', - inputs={'X': inf_var_int32}, - outputs={'Out': inf_var}, - attrs={ - "in_dtype": inf_var_int32.dtype, - "out_dtype": inf_var.dtype, - OP_ROLE_KEY: OpRole.Optimize - }) + block._insert_op_without_sync( + update_loss_scaling_op_idx, + type='cast', + inputs={'X': inf_var_int32}, + outputs={'Out': inf_var}, + attrs={ + "in_dtype": inf_var_int32.dtype, + "out_dtype": inf_var.dtype, + OP_ROLE_KEY: OpRole.Optimize, + }, + ) update_loss_scaling_op_idx += 1 block._sync_with_cpp() @@ -197,42 +231,51 @@ class FP16Utils(object): # 1. inf_var_int32 = allreduce_max(inf_var_int32) # 3. inf_var = cast(inf_var_int32) inf_var = block.var(inf_var_name) - inf_var_int32 = block.create_var(name=inf_var_name + "@cast_int32", - shape=inf_var.shape, - dtype=core.VarDesc.VarType.INT32) - block._insert_op_without_sync(update_loss_scaling_op_idx, - type='cast', - inputs={'X': inf_var}, - outputs={'Out': inf_var_int32}, - attrs={ - "in_dtype": inf_var.dtype, - "out_dtype": inf_var_int32.dtype, - OP_ROLE_KEY: OpRole.Optimize - }) + inf_var_int32 = block.create_var( + name=inf_var_name + "@cast_int32", + shape=inf_var.shape, + dtype=core.VarDesc.VarType.INT32, + ) + block._insert_op_without_sync( + update_loss_scaling_op_idx, + type='cast', + inputs={'X': inf_var}, + outputs={'Out': inf_var_int32}, + attrs={ + "in_dtype": inf_var.dtype, + "out_dtype": inf_var_int32.dtype, + OP_ROLE_KEY: OpRole.Optimize, + }, + ) update_loss_scaling_op_idx += 1 # allreduce(mp)->allreduce(pp) for ring_id in ring_ids: - if ring_id == -1: continue - block._insert_op_without_sync(update_loss_scaling_op_idx, - type='c_allreduce_max', - inputs={'X': inf_var_int32}, - outputs={'Out': inf_var_int32}, - attrs={ - 'ring_id': ring_id, - 'use_calc_stream': True, - OP_ROLE_KEY: OpRole.Optimize - }) + if ring_id == -1: + continue + block._insert_op_without_sync( + update_loss_scaling_op_idx, + type='c_allreduce_max', + inputs={'X': inf_var_int32}, + outputs={'Out': inf_var_int32}, + attrs={ + 'ring_id': ring_id, + 'use_calc_stream': True, + OP_ROLE_KEY: OpRole.Optimize, + }, + ) update_loss_scaling_op_idx += 1 - block._insert_op_without_sync(update_loss_scaling_op_idx, - type='cast', - inputs={'X': inf_var_int32}, - outputs={'Out': inf_var}, - attrs={ - "in_dtype": inf_var_int32.dtype, - "out_dtype": inf_var.dtype, - OP_ROLE_KEY: OpRole.Optimize - }) + block._insert_op_without_sync( + update_loss_scaling_op_idx, + type='cast', + inputs={'X': inf_var_int32}, + outputs={'Out': inf_var}, + attrs={ + "in_dtype": inf_var_int32.dtype, + "out_dtype": inf_var.dtype, + OP_ROLE_KEY: OpRole.Optimize, + }, + ) update_loss_scaling_op_idx += 1 block._sync_with_cpp() diff --git a/python/paddle/distributed/fleet/meta_optimizers/sharding/gradient_clip_helper.py b/python/paddle/distributed/fleet/meta_optimizers/sharding/gradient_clip_helper.py index 03d955842f5fc5e38b49e4aa7639cdb9c49954b3..563757d35f4057a5b2f1b444fe09677388ebda56 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/sharding/gradient_clip_helper.py +++ b/python/paddle/distributed/fleet/meta_optimizers/sharding/gradient_clip_helper.py @@ -18,13 +18,13 @@ __all__ = [] class GradientClipHelper(object): - def __init__(self, mp_ring_id): self.mp_ring_id = mp_ring_id def _is_gradient_clip_op(self, op): - return op.desc.has_attr("op_namescope") \ - and op.desc.attr("op_namescope").startswith("/gradient_clip") + return op.desc.has_attr("op_namescope") and op.desc.attr( + "op_namescope" + ).startswith("/gradient_clip") def prune_gradient_clip(self, block, shard, ring_ids): """ @@ -51,8 +51,9 @@ class GradientClipHelper(object): param_name = input_name.strip("@GRAD@MERGED") else: param_name = input_name.strip("@GRAD") - if shard.is_param(param_name) and \ - not shard.has_param(param_name): + if shard.is_param(param_name) and not shard.has_param( + param_name + ): deperate_op = True elif shard.is_param(param_name): reversed_x_paramname.append(param_name) @@ -84,7 +85,7 @@ class GradientClipHelper(object): reversed_inputs.append(input_name) op.desc.set_input("X", reversed_inputs) - assert (len(op.desc.output_arg_names()) == 1) + assert len(op.desc.output_arg_names()) == 1 sum_res = op.desc.output_arg_names()[0] # NOTE(wangxi): If we have 2 param, but sharding is 4, @@ -96,26 +97,25 @@ class GradientClipHelper(object): namescope = op.attr("op_namescope") block._remove_op(idx, sync=False) - op = block._insert_op_without_sync(idx, - type='fill_constant', - inputs={}, - outputs={'Out': sum_res}, - attrs={ - 'shape': - sum_var.shape, - 'dtype': - sum_var.dtype, - 'value': - 0.0, - OP_ROLE_KEY: - OpRole.Optimize - }) + op = block._insert_op_without_sync( + idx, + type='fill_constant', + inputs={}, + outputs={'Out': sum_res}, + attrs={ + 'shape': sum_var.shape, + 'dtype': sum_var.dtype, + 'value': 0.0, + OP_ROLE_KEY: OpRole.Optimize, + }, + ) op._set_attr('op_namescope', namescope) # allreduce(mp)->allreduce(sharding)->allreduce(pp) idx_offset = 1 for ring_id in ring_ids: - if ring_id == -1: continue + if ring_id == -1: + continue # this allreduce should not overlap with calc and should be scheduled in calc stream block._insert_op_without_sync( idx + idx_offset, @@ -127,18 +127,28 @@ class GradientClipHelper(object): 'op_namescope': "/gradient_clip_model_parallelism", 'use_calc_stream': True, OP_ROLE_KEY: OpRole.Optimize, - }) + }, + ) idx_offset += 1 # the grad sum here should take the all and only param in the current shard to_check_param = set(reversed_x_paramname) - should_check_param = set(shard.global_params).intersection(set( - [param for param, worker_idx in shard.global_param2device.items() \ - if worker_idx == shard.worker_idx])) - assert to_check_param == should_check_param, "amp check_finite_and_unscale \ + should_check_param = set(shard.global_params).intersection( + set( + [ + param + for param, worker_idx in shard.global_param2device.items() + if worker_idx == shard.worker_idx + ] + ) + ) + assert ( + to_check_param == should_check_param + ), "amp check_finite_and_unscale \ checking miss [{}] and got unexpected [{}]".format( should_check_param - to_check_param, - to_check_param - should_check_param) + to_check_param - should_check_param, + ) for var_name in deperated_vars: block._remove_var(var_name, sync=False) @@ -178,8 +188,12 @@ class GradientClipHelper(object): # by global norm. Those vars either doesn't have is_distributed attr # or the is_distributed attr has been set as False. # Therefore, we prune those duplicated vars for grad clip. - if mp_rank >= 1 and (not (hasattr(input_var, 'is_distributed') - and input_var.is_distributed)): + if mp_rank >= 1 and ( + not ( + hasattr(input_var, 'is_distributed') + and input_var.is_distributed + ) + ): removed_op_idx.add(idx) for output_name in op.output_arg_names: removed_tmp_var.add(output_name) @@ -222,8 +236,9 @@ class GradientClipHelper(object): 'shape': sum_rst_var.shape, 'dtype': sum_rst_var.dtype, 'value': 0.0, - OP_ROLE_KEY: OpRole.Optimize - }) + OP_ROLE_KEY: OpRole.Optimize, + }, + ) fill_constant_op._set_attr('op_namescope', namescope) self._insert_allreduce(block, ring_ids, idx, sum_rst_var) break @@ -245,4 +260,5 @@ class GradientClipHelper(object): 'op_namescope': "/gradient_clip_model_parallelism", 'use_calc_stream': True, OP_ROLE_KEY: OpRole.Optimize, - }) + }, + ) diff --git a/python/paddle/distributed/fleet/meta_optimizers/sharding/offload_helper.py b/python/paddle/distributed/fleet/meta_optimizers/sharding/offload_helper.py index 968709717d1b0781ef92b0e9df005a771c2202eb..ac10bb42383d1598c51485972bc1e723f907ed48 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/sharding/offload_helper.py +++ b/python/paddle/distributed/fleet/meta_optimizers/sharding/offload_helper.py @@ -56,21 +56,25 @@ class OffloadHelper(object): def _insert_cast_op(self, block, idx, src_name, dst_name): src_var = block.var(src_name) if not block.has_var(dst_name): - block.create_var(name=dst_name, - shape=src_var.shape, - dtype=core.VarDesc.VarType.FP16, - persistable=True) + block.create_var( + name=dst_name, + shape=src_var.shape, + dtype=core.VarDesc.VarType.FP16, + persistable=True, + ) dst_var = block.var(dst_name) assert dst_var.dtype == core.VarDesc.VarType.FP16 - block._insert_op_without_sync(idx, - type='cast', - inputs={'X': src_var}, - outputs={'Out': dst_var}, - attrs={ - 'in_dtype': src_var.dtype, - 'out_dtype': dst_var.dtype, - OP_ROLE_KEY: OpRole.Optimize - }) + block._insert_op_without_sync( + idx, + type='cast', + inputs={'X': src_var}, + outputs={'Out': dst_var}, + attrs={ + 'in_dtype': src_var.dtype, + 'out_dtype': dst_var.dtype, + OP_ROLE_KEY: OpRole.Optimize, + }, + ) def _insert_broadcast_op(self, block, idx, param_name): rings = [] @@ -86,36 +90,42 @@ class OffloadHelper(object): # the insert op order is: mp, dp for ring in rings: - block._insert_op_without_sync(idx, - type="c_broadcast", - inputs={'X': param_name}, - outputs={'Out': param_name}, - attrs={ - 'ring_id': ring, - 'root': 0, - 'use_calc_stream': True, - OP_ROLE_KEY: OpRole.Forward, - }) + block._insert_op_without_sync( + idx, + type="c_broadcast", + inputs={'X': param_name}, + outputs={'Out': param_name}, + attrs={ + 'ring_id': ring, + 'root': 0, + 'use_calc_stream': True, + OP_ROLE_KEY: OpRole.Forward, + }, + ) def _insert_memcpy_op(self, block, idx, src_name, dst_name, dst_place_type): src_var = block.var(src_name) dst_var = block.var(dst_name) - block._insert_op_without_sync(idx, - type='memcpy', - inputs={'X': src_var}, - outputs={'Out': dst_var}, - attrs={ - 'dst_place_type': dst_place_type, - OP_ROLE_KEY: OpRole.Optimize, - }) + block._insert_op_without_sync( + idx, + type='memcpy', + inputs={'X': src_var}, + outputs={'Out': dst_var}, + attrs={ + 'dst_place_type': dst_place_type, + OP_ROLE_KEY: OpRole.Optimize, + }, + ) def _insert_fetch_op(self, block, idx, src_name, dst_name): - self._insert_memcpy_op(block, idx, src_name, dst_name, - OffloadHelper.cuda_place_type) + self._insert_memcpy_op( + block, idx, src_name, dst_name, OffloadHelper.cuda_place_type + ) def _insert_offload_op(self, block, idx, src_name, dst_name): - self._insert_memcpy_op(block, idx, src_name, dst_name, - OffloadHelper.cuda_pinned_place_type) + self._insert_memcpy_op( + block, idx, src_name, dst_name, OffloadHelper.cuda_pinned_place_type + ) def _get_offload_var_name(self, name): return unique_name.generate(name + '@offload') @@ -124,10 +134,12 @@ class OffloadHelper(object): for block in blocks: var = block.var(var_name) var.persistable = False - offload_var = block.create_var(name=offload_var_name, - shape=var.shape, - dtype=var.dtype, - persistable=True) + offload_var = block.create_var( + name=offload_var_name, + shape=var.shape, + dtype=var.dtype, + persistable=True, + ) def offload_fp32param(self, block, startup_block, offload=True): """ @@ -188,13 +200,15 @@ class OffloadHelper(object): if 'subprog' not in output_name: assert output_name == input_name + '.cast_fp16' - assert input_name not in param_to_fp16, \ - "There must be only one cast op from fp32 param to fp16 param." + assert ( + input_name not in param_to_fp16 + ), "There must be only one cast op from fp32 param to fp16 param." param_to_fp16[input_name] = output_name else: # fp16-->recompute_var - assert input_name in param_to_fp16, \ - "param must first be cast to fp16" + assert ( + input_name in param_to_fp16 + ), "param must first be cast to fp16" fp16_param = param_to_fp16[input_name] fp16_param_to_recompute[fp16_param] = output_name recompute_to_fp16[output_name] = fp16_param @@ -205,24 +219,28 @@ class OffloadHelper(object): for idx, op in reversed(list(enumerate(block.ops))): if is_update_op(op): param = op.desc.input("Param")[0] - if param not in param_to_idx: continue + if param not in param_to_idx: + continue # step3.1: create offload_var offload_var_name = self._get_offload_var_name(param) param_name_to_offload_name[param] = offload_var_name if offload: - self._create_offload_var(param, offload_var_name, - [block, startup_block]) + self._create_offload_var( + param, offload_var_name, [block, startup_block] + ) # step3.2: insert cast op and offload op - self._insert_offload_op(block, idx + 1, param, - offload_var_name) + self._insert_offload_op( + block, idx + 1, param, offload_var_name + ) assert param in param_to_fp16 fp16_param_name = param_to_fp16[param] fp16_param_var = block.var(fp16_param_name) fp16_param_var.persistable = True - self._insert_cast_op(block, idx + 1, param, - param_to_fp16[param]) + self._insert_cast_op( + block, idx + 1, param, param_to_fp16[param] + ) if offload: # step3.3: insert fetch op @@ -242,8 +260,9 @@ class OffloadHelper(object): op._rename_input(input_name, recompute_to_fp16[input_name]) for output_name in op.desc.output_arg_names(): if output_name in recompute_to_fp16: - op._rename_output(output_name, - recompute_to_fp16[output_name]) + op._rename_output( + output_name, recompute_to_fp16[output_name] + ) # step4: remove recompute_param for name in recompute_to_fp16.keys(): @@ -262,14 +281,23 @@ class OffloadHelper(object): var_name = out_name if offload: offload_var_name = param_name_to_offload_name[var_name] - self._insert_offload_op(startup_block, insert_idx, - var_name, offload_var_name) - self._insert_cast_op(startup_block, insert_idx, var_name, - param_to_fp16[var_name]) + self._insert_offload_op( + startup_block, + insert_idx, + var_name, + offload_var_name, + ) + self._insert_cast_op( + startup_block, + insert_idx, + var_name, + param_to_fp16[var_name], + ) # NOTE(wangxi): cast and offload should insert after broadcast param. # the insert op order is: {mp, dp}broadcast, cast, offload - self._insert_broadcast_op(startup_block, insert_idx, - var_name) + self._insert_broadcast_op( + startup_block, insert_idx, var_name + ) visited_vars.add(out_name) @@ -322,14 +350,16 @@ class OffloadHelper(object): offload_var_name = self._get_offload_var_name(var_name) vars_name_to_offload_name[var_name] = offload_var_name - self._create_offload_var(var_name, offload_var_name, - [block, startup_block]) + self._create_offload_var( + var_name, offload_var_name, [block, startup_block] + ) # step2: insert offload op for var_name in vars_name: offload_var_name = vars_name_to_offload_name[var_name] - self._insert_offload_op(block, idx + 1, var_name, - offload_var_name) + self._insert_offload_op( + block, idx + 1, var_name, offload_var_name + ) # step3: insert fetch op for var_name in vars_name: @@ -347,18 +377,17 @@ class OffloadHelper(object): var_name = out_name offload_var_name = vars_name_to_offload_name[var_name] # insert offload op after var is generated - self._insert_offload_op(startup_block, idx + 1, var_name, - offload_var_name) + self._insert_offload_op( + startup_block, idx + 1, var_name, offload_var_name + ) visited_vars.add(out_name) block._sync_with_cpp() startup_block._sync_with_cpp() - def opt_sharding_cast_fp32param(self, - block, - startup_block, - params, - offload=False): + def opt_sharding_cast_fp32param( + self, block, startup_block, params, offload=False + ): """ (p_fp16) = cast(p) (p_fp16_recompute) = cast(p) @@ -420,13 +449,15 @@ class OffloadHelper(object): if 'subprog' not in output_name: assert output_name == input_name + '.cast_fp16' - assert input_name not in param_to_fp16, \ - "There must be only one cast op from fp32 param to fp16 param." + assert ( + input_name not in param_to_fp16 + ), "There must be only one cast op from fp32 param to fp16 param." param_to_fp16[input_name] = output_name else: # fp16-->recompute_var - assert input_name in param_to_fp16, \ - "param must first be cast to fp16" + assert ( + input_name in param_to_fp16 + ), "param must first be cast to fp16" fp16_param = param_to_fp16[input_name] fp16_param_to_recompute[fp16_param] = output_name recompute_to_fp16[output_name] = fp16_param @@ -443,19 +474,22 @@ class OffloadHelper(object): offload_var_name = self._get_offload_var_name(param) param_name_to_offload_name[param] = offload_var_name if offload: - self._create_offload_var(param, offload_var_name, - [block, startup_block]) + self._create_offload_var( + param, offload_var_name, [block, startup_block] + ) # step3.2: insert cast op and offload op - self._insert_offload_op(block, idx + 1, param, - offload_var_name) + self._insert_offload_op( + block, idx + 1, param, offload_var_name + ) assert param in param_to_fp16 fp16_param_name = param_to_fp16[param] fp16_param_var = block.var(fp16_param_name) fp16_param_var.persistable = True - self._insert_cast_op(block, idx + 1, param, - param_to_fp16[param]) + self._insert_cast_op( + block, idx + 1, param, param_to_fp16[param] + ) if offload: # step3.3: insert fetch op @@ -476,8 +510,9 @@ class OffloadHelper(object): op._rename_input(input_name, recompute_to_fp16[input_name]) for output_name in op.desc.output_arg_names(): if output_name in recompute_to_fp16: - op._rename_output(output_name, - recompute_to_fp16[output_name]) + op._rename_output( + output_name, recompute_to_fp16[output_name] + ) # step4: remove recompute_param for name in recompute_to_fp16.keys(): @@ -508,22 +543,31 @@ class OffloadHelper(object): insert_idx = len(startup_block.ops) for idx, op in reversed(list(enumerate(startup_block.ops))): for out_name in op.output_arg_names: - if out_name in visited_vars: continue + if out_name in visited_vars: + continue if out_name in param_to_fp16: var_name = out_name if offload: self._insert_offload_op( - startup_block, idx + 1, var_name, - param_name_to_offload_name[var_name]) - - self._insert_cast_op(startup_block, insert_idx, var_name, - param_to_fp16[var_name]) + startup_block, + idx + 1, + var_name, + param_name_to_offload_name[var_name], + ) + + self._insert_cast_op( + startup_block, + insert_idx, + var_name, + param_to_fp16[var_name], + ) # NOTE(wangxi): cast and offload should insert after broadcast param. # the insert op order is: {mp, dp}broadcast, cast, offload - self._insert_broadcast_op(startup_block, insert_idx, - var_name) + self._insert_broadcast_op( + startup_block, insert_idx, var_name + ) if var_name not in local_params: param = startup_block.var(out_name) diff --git a/python/paddle/distributed/fleet/meta_optimizers/sharding/prune.py b/python/paddle/distributed/fleet/meta_optimizers/sharding/prune.py index adbc00f25deb68b43dcc64558d72fef4019bccbb..895fd2f7acb2712afef26b12cb07661a18869a3d 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/sharding/prune.py +++ b/python/paddle/distributed/fleet/meta_optimizers/sharding/prune.py @@ -16,7 +16,6 @@ __all__ = [] class ProgramDeps(object): - def __init__(self, block, start_vars, end_vars): self._block = block # vars where to start to build the deps @@ -45,7 +44,9 @@ class ProgramDeps(object): else: return None - def _build_deps(self, ): + def _build_deps( + self, + ): for var_name in self._start_vars: self._var_to_use_op[var_name] = [] @@ -53,8 +54,9 @@ class ProgramDeps(object): for idx, op in enumerate(self._block.ops): if op.type in [ - "c_allreduce_sum", "c_sync_comm_stream", - "c_calc_comm_stream" + "c_allreduce_sum", + "c_sync_comm_stream", + "c_calc_comm_stream", ]: continue input_vars = op.desc.input_arg_names() @@ -77,11 +79,13 @@ class ProgramDeps(object): self._var_to_generate_op[output_name].append(idx) if op.type == "conditional_block": # subblock - assert (op.desc.has_attr("sub_block")) + assert op.desc.has_attr("sub_block") subblock_idx = op.desc.attr("sub_block").id subblock_deps = ProgramDeps( self._block.program.block(subblock_idx), - op.desc.input_arg_names(), op.desc.output_arg_names()) + op.desc.input_arg_names(), + op.desc.output_arg_names(), + ) self._sub_block_deps[subblock_idx] = subblock_deps subblock_deps._father_block_deps = self @@ -93,17 +97,24 @@ class ProgramDeps(object): raise ValueError( "op_idx: {} is not in self._var_to_use_op[{}], " "self._var_to_use_op[{}] is {}".format( - op_idx, var_name, var_name, - self._var_to_use_op[var_name])) + op_idx, + var_name, + var_name, + self._var_to_use_op[var_name], + ) + ) self._var_to_use_op[var_name].remove(op_idx) # update _should_removed_var if var_name in self._start_vars: self._should_removed_var.discard(var_name) - elif self._var_to_use_op[ - var_name] == []: # no more deps of this var + elif ( + self._var_to_use_op[var_name] == [] + ): # no more deps of this var self._should_removed_var.add(var_name) - elif self._var_to_generate_op[var_name][-1] >= self._var_to_use_op[ - var_name][-1]: + elif ( + self._var_to_generate_op[var_name][-1] + >= self._var_to_use_op[var_name][-1] + ): # there are circle in the graph self._should_removed_var.add(var_name) else: # input_name should not be deleted @@ -111,11 +122,13 @@ class ProgramDeps(object): def crop_output_var_from_op(self, op_idx, var_name): if var_name in self._var_to_generate_op: - assert (op_idx in self._var_to_generate_op[var_name]) + assert op_idx in self._var_to_generate_op[var_name] self._var_to_generate_op[var_name].remove(op_idx) if self._block.has_var(var_name): - if var_name not in self._var_to_generate_op or self._var_to_generate_op[ - var_name] == []: + if ( + var_name not in self._var_to_generate_op + or self._var_to_generate_op[var_name] == [] + ): self._block._remove_var(var_name, sync=False) def remove_op(self, op_idx, reserved_vars=None): diff --git a/python/paddle/distributed/fleet/meta_optimizers/sharding/shard.py b/python/paddle/distributed/fleet/meta_optimizers/sharding/shard.py index e6490d62a5d59fb1600047557ba5e02a9552a1c6..d33d04098d051a73aff63bcd268d14382d761bc7 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/sharding/shard.py +++ b/python/paddle/distributed/fleet/meta_optimizers/sharding/shard.py @@ -15,14 +15,17 @@ import re from paddle.distributed.fleet.meta_optimizers.common import is_optimizer_op from paddle.distributed.fleet.meta_optimizers.sharding.utils import get_var_size -from paddle.distributed.fleet.meta_optimizers.sharding.fp16_helper import FP16Utils +from paddle.distributed.fleet.meta_optimizers.sharding.fp16_helper import ( + FP16Utils, +) __all__ = [] class Shard(object): - - def __init__(self, ): + def __init__( + self, + ): self.global_params = set([]) self.worker_idx = -1 self.worker_num = -1 @@ -37,19 +40,25 @@ class Shard(object): self.worker_num = worker_num # global_param2device contains fp32 params and fp16 params # device2global_params only contains fp32 params - self.global_param2device, self.device2global_params \ - = self._split_params(params_grads, worker_idx, worker_num) + ( + self.global_param2device, + self.device2global_params, + ) = self._split_params(params_grads, worker_idx, worker_num) def has_param(self, var_name): - return var_name in self.global_param2device and \ - self._var_device_id(var_name) == self.worker_idx + return ( + var_name in self.global_param2device + and self._var_device_id(var_name) == self.worker_idx + ) def has_opt_var(self, var_name): return self._var_device_id(var_name) == self.worker_idx def has_var(self, var_name): - return self._var_device_id(var_name) == -1 or \ - self._var_device_id(var_name) == self.worker_idx + return ( + self._var_device_id(var_name) == -1 + or self._var_device_id(var_name) == self.worker_idx + ) def _split_params(self, params_grads, worker_idx, worker_num): param2device = {} @@ -74,8 +83,11 @@ class Shard(object): if var_name in self.global_param2device: return self.global_param2device[var_name] for suffix in [ - "_moment1_0", "_moment2_0", "_beta1_pow_acc_0", - "_beta2_pow_acc_0", "_velocity_0" + "_moment1_0", + "_moment2_0", + "_beta1_pow_acc_0", + "_beta2_pow_acc_0", + "_velocity_0", ]: base_name = re.sub(suffix, '', var_name) if base_name in self.global_param2device: @@ -105,7 +117,8 @@ class Shard(object): fp16_to_fp32[output_name] = input_name param_usage[input_name] -= 1 self.global_param2device[output_name] = self.global_param2device[ - input_name] + input_name + ] for param, usage in param_usage.items(): if usage > 0: @@ -122,8 +135,11 @@ class Shard(object): if var_name in self.global_params: return True for suffix in [ - "_moment1_0", "_moment2_0", "_beta1_pow_acc_0", - "_beta2_pow_acc_0", "_velocity_0" + "_moment1_0", + "_moment2_0", + "_beta1_pow_acc_0", + "_beta2_pow_acc_0", + "_velocity_0", ]: base_name = re.sub(suffix, '', var_name) if base_name in self.global_params: @@ -140,7 +156,6 @@ class Shard(object): class ProgramSegment(object): - def __init__(self, block): self._block = block self._allreduce_vars = [] diff --git a/python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py b/python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py index b5f72aac893e0daca831a9af62019b41a604616c..1ec9457854dce7b43d974763050ae93046e98c4f 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py +++ b/python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py @@ -14,7 +14,11 @@ import paddle from paddle.fluid import core, unique_name from functools import reduce -from paddle.distributed.fleet.meta_optimizers.common import is_loss_grad_op, is_backward_op, is_optimizer_op +from paddle.distributed.fleet.meta_optimizers.common import ( + is_loss_grad_op, + is_backward_op, + is_optimizer_op, +) from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole import re @@ -42,7 +46,10 @@ def check_broadcast(block): "var_name areadly exist: {}" "the old pos is {}, the new pos is {}".format( var_name, - broadcast_vars[var_name]["broadcast_pos"], idx)) + broadcast_vars[var_name]["broadcast_pos"], + idx, + ) + ) broadcast_vars[var_name] = { "fill_constant_pos": -1, "broadcast_pos": idx, @@ -69,17 +76,21 @@ def check_broadcast(block): var_name = op.desc.input_arg_names()[0] if "@BroadCast" in var_name: if broadcast_vars[var_name]["fill_constant_pos"] != -1: - assert (last_sync_calc_op_idx != -1) - assert (broadcast_vars[var_name]["fill_constant_pos"] < - last_sync_calc_op_idx) - assert (last_sync_calc_op_idx < idx) + assert last_sync_calc_op_idx != -1 + assert ( + broadcast_vars[var_name]["fill_constant_pos"] + < last_sync_calc_op_idx + ) + assert last_sync_calc_op_idx < idx continue for input_name in op.desc.input_arg_names(): if input_name in broadcast_vars: - assert (broadcast_vars[input_name]["broadcast_pos"] != -1) - assert (broadcast_vars[input_name]["broadcast_pos"] < - last_sync_comm_op_idx) - assert (last_sync_comm_op_idx < idx) + assert broadcast_vars[input_name]["broadcast_pos"] != -1 + assert ( + broadcast_vars[input_name]["broadcast_pos"] + < last_sync_comm_op_idx + ) + assert last_sync_comm_op_idx < idx return @@ -135,8 +146,10 @@ def check_allreduce_sum(block, shard, sharding_ring_id, dp_ring_id=-1): if var_name in vars_status and vars_status[var_name] == 0: vars_status[var_name] = 1 for var_name in dp_grads_status: - if var_name in dp_grads_status and dp_grads_status[ - var_name] == 0: + if ( + var_name in dp_grads_status + and dp_grads_status[var_name] == 0 + ): dp_grads_status[var_name] = 1 # check sharding allreduce and reduce but skip megatron allreduce elif op.type == "c_allreduce_sum" or op.type == "c_reduce_sum": @@ -144,7 +157,9 @@ def check_allreduce_sum(block, shard, sharding_ring_id, dp_ring_id=-1): var_name = op.desc.input_arg_names()[0] ring_id = op.desc.attr("ring_id") if ring_id == sharding_ring_id: - assert op.type == "c_reduce_sum", "Grad in Sharding group should be reduce rather than allreduce" + assert ( + op.type == "c_reduce_sum" + ), "Grad in Sharding group should be reduce rather than allreduce" if var_name in vars_status: _status = vars_status[var_name] else: @@ -152,12 +167,15 @@ def check_allreduce_sum(block, shard, sharding_ring_id, dp_ring_id=-1): if _status == -1: raise ValueError( "{} is not generated, but you are" - "trying to all-reduce it".format(var_name)) + "trying to all-reduce it".format(var_name) + ) if _status == 0: - raise ValueError("There should be a sync_calc op " - "after generate Var: {} and before the" - "c_allreduce_sum op".format(var_name)) - assert (_status == 1) + raise ValueError( + "There should be a sync_calc op " + "after generate Var: {} and before the" + "c_allreduce_sum op".format(var_name) + ) + assert _status == 1 if var_name in vars_status: vars_status[var_name] = 2 else: @@ -193,29 +211,37 @@ def check_allreduce_sum(block, shard, sharding_ring_id, dp_ring_id=-1): if vars_status[input_name] != 3: raise ValueError( "There should be a sync_comm op " - "after allreduce the Var: {}".format(input_name)) + "after allreduce the Var: {}".format(input_name) + ) raise ValueError( - "The reduce output grad [{}] should NOT be be used in Non-root rank." - .format(input_name)) + "The reduce output grad [{}] should NOT be be used in Non-root rank.".format( + input_name + ) + ) if input_name in dp_grads_status: if dp_ring_id == -1: if dp_grads_status[input_name] != 3: raise ValueError( "There should be a sync_comm op " - "after allreduce the Var: {}".format( - input_name)) + "after allreduce the Var: {}".format(input_name) + ) else: if dp_grads_status[input_name] != 5: raise ValueError( "The grad in shard should be allreduce and sync" - "twice before usage {}".format(input_name)) + "twice before usage {}".format(input_name) + ) for output_name in op.desc.output_arg_names(): - if output_name in vars_status and \ - vars_status[output_name] == -1: + if ( + output_name in vars_status + and vars_status[output_name] == -1 + ): vars_status[output_name] = 0 - if output_name in dp_grads_status and \ - dp_grads_status[output_name] == -1: + if ( + output_name in dp_grads_status + and dp_grads_status[output_name] == -1 + ): dp_grads_status[output_name] = 0 # check sharding with amp @@ -234,9 +260,9 @@ def get_valid_op_role(block, insert_idx): return OpRole.Forward or OpRole.Backward """ op_role = block.ops[insert_idx].attr('op_role') - if (insert_idx >= len(block.ops)) or (op_role in [ - int(OpRole.Backward), int(OpRole.Optimize) - ]): + if (insert_idx >= len(block.ops)) or ( + op_role in [int(OpRole.Backward), int(OpRole.Optimize)] + ): return OpRole.Backward if op_role in [int(OpRole.Forward), int(OpRole.Loss)]: return OpRole.Forward @@ -249,11 +275,13 @@ def insert_sync_calc_op(block, insert_idx, calc_dep_vars): _insert_sync_calc_op """ op_role = get_valid_op_role(block, insert_idx) - block._insert_op_without_sync(insert_idx, - type='c_sync_calc_stream', - inputs={'X': calc_dep_vars}, - outputs={'Out': calc_dep_vars}, - attrs={OP_ROLE_KEY: op_role}) + block._insert_op_without_sync( + insert_idx, + type='c_sync_calc_stream', + inputs={'X': calc_dep_vars}, + outputs={'Out': calc_dep_vars}, + attrs={OP_ROLE_KEY: op_role}, + ) return @@ -262,14 +290,13 @@ def insert_sync_comm_op(block, insert_idx, ring_id, comm_dep_vars): insert sync_comm_op for single var """ op_role = get_valid_op_role(block, insert_idx) - block._insert_op_without_sync(insert_idx, - type='c_sync_comm_stream', - inputs={'X': comm_dep_vars}, - outputs={'Out': comm_dep_vars}, - attrs={ - 'ring_id': ring_id, - OP_ROLE_KEY: op_role - }) + block._insert_op_without_sync( + insert_idx, + type='c_sync_comm_stream', + inputs={'X': comm_dep_vars}, + outputs={'Out': comm_dep_vars}, + attrs={'ring_id': ring_id, OP_ROLE_KEY: op_role}, + ) return 1 @@ -282,14 +309,13 @@ def insert_sync_comm_ops(block, insert_idx, ring_id, comm_dep_vars): return 0 op_role = get_valid_op_role(block, insert_idx) - block._insert_op_without_sync(insert_idx, - type='c_sync_comm_stream', - inputs={'X': comm_dep_vars}, - outputs={'Out': comm_dep_vars}, - attrs={ - 'ring_id': int(ring_id), - OP_ROLE_KEY: op_role - }) + block._insert_op_without_sync( + insert_idx, + type='c_sync_comm_stream', + inputs={'X': comm_dep_vars}, + outputs={'Out': comm_dep_vars}, + attrs={'ring_id': int(ring_id), OP_ROLE_KEY: op_role}, + ) return 1 @@ -300,15 +326,17 @@ def insert_fill_constant_ops(block, insert_idx, fill_constant_vars): op_role = get_valid_op_role(block, insert_idx) for broadcast_name in fill_constant_vars: broadcast_var = block.var(broadcast_name) - block._insert_op_without_sync(insert_idx, - type="fill_constant", - outputs={"Out": broadcast_var.name}, - attrs={ - "shape": broadcast_var.shape, - "dtype": broadcast_var.dtype, - "value": 0.0, - OP_ROLE_KEY: op_role - }) + block._insert_op_without_sync( + insert_idx, + type="fill_constant", + outputs={"Out": broadcast_var.name}, + attrs={ + "shape": broadcast_var.shape, + "dtype": broadcast_var.dtype, + "value": 0.0, + OP_ROLE_KEY: op_role, + }, + ) return @@ -318,58 +346,69 @@ def insert_cast_ops(block, insert_idx, cast_ops): """ op_role = get_valid_op_role(block, insert_idx) for fp16_name, fp32_name in cast_ops.items(): - block._insert_op_without_sync(insert_idx, - type="cast", - inputs={"X": fp32_name}, - outputs={"Out": fp16_name}, - attrs={ - "in_dtype": core.VarDesc.VarType.FP32, - "out_dtype": - core.VarDesc.VarType.FP16, - OP_ROLE_KEY: op_role - }) + block._insert_op_without_sync( + insert_idx, + type="cast", + inputs={"X": fp32_name}, + outputs={"Out": fp16_name}, + attrs={ + "in_dtype": core.VarDesc.VarType.FP32, + "out_dtype": core.VarDesc.VarType.FP16, + OP_ROLE_KEY: op_role, + }, + ) return -def insert_allreduce_ops(block, - insert_idx, - ring_id, - allreduce_vars, - op_role=OpRole.Backward, - use_calc_stream=False, - user_defined_strategy=None): +def insert_allreduce_ops( + block, + insert_idx, + ring_id, + allreduce_vars, + op_role=OpRole.Backward, + use_calc_stream=False, + user_defined_strategy=None, +): """ _add_allreduce_ops """ if len(allreduce_vars) == 0: return - if user_defined_strategy and \ - user_defined_strategy.fuse_all_reduce_ops and \ - not user_defined_strategy.fuse_grad_merge: + if ( + user_defined_strategy + and user_defined_strategy.fuse_all_reduce_ops + and not user_defined_strategy.fuse_grad_merge + ): # If fuse_grad_merge is enable, the grad vars have already been fused during # gradient merge pass, therefore, those vars are not need to be fused here - insert_fused_allreduce_ops(block, insert_idx, ring_id, allreduce_vars, - op_role, use_calc_stream, - user_defined_strategy.fuse_grad_size_in_MB) + insert_fused_allreduce_ops( + block, + insert_idx, + ring_id, + allreduce_vars, + op_role, + use_calc_stream, + user_defined_strategy.fuse_grad_size_in_MB, + ) else: for var in allreduce_vars: - block._insert_op_without_sync(insert_idx, - type='c_allreduce_sum', - inputs={'X': var}, - outputs={'Out': var}, - attrs={ - 'ring_id': ring_id, - 'use_calc_stream': - use_calc_stream, - OP_ROLE_KEY: op_role - }) + block._insert_op_without_sync( + insert_idx, + type='c_allreduce_sum', + inputs={'X': var}, + outputs={'Out': var}, + attrs={ + 'ring_id': ring_id, + 'use_calc_stream': use_calc_stream, + OP_ROLE_KEY: op_role, + }, + ) return class FuseHelper(object): - @staticmethod def sort_vars_by_dtype(block, vars_name): fp32_vars = [] @@ -389,17 +428,19 @@ class FuseHelper(object): return fp32_vars @staticmethod - def get_fused_groups(block, vars_name, fuse_size=32.): - """ coalesce tensor, get fused group """ + def get_fused_groups(block, vars_name, fuse_size=32.0): + """coalesce tensor, get fused group""" groups = [] - cur_size = 0. + cur_size = 0.0 last_dtype = None for var_name in vars_name: real_var = block.var(var_name) var_size = get_var_size(real_var) - if cur_size + var_size > fuse_size \ - or len(groups) == 0 \ - or real_var.dtype != last_dtype: + if ( + cur_size + var_size > fuse_size + or len(groups) == 0 + or real_var.dtype != last_dtype + ): groups.append([real_var]) cur_size = var_size last_dtype = real_var.dtype @@ -409,11 +450,9 @@ class FuseHelper(object): return groups @staticmethod - def insert_coalesce_tensor(block, - index, - groups, - op_role=OpRole.Backward, - prefix="Output"): + def insert_coalesce_tensor( + block, index, groups, op_role=OpRole.Backward, prefix="Output" + ): fused_vars = [] insert_num = 0 for group in groups: @@ -423,247 +462,305 @@ class FuseHelper(object): fused_vars.append(group[0]) continue - fused_var = block.create_var(name=unique_name.generate( - 'Fused{}_{}'.format(prefix, group[0].name)), - dtype=group[0].dtype, - persistable=False, - stop_gradient=True) + fused_var = block.create_var( + name=unique_name.generate( + 'Fused{}_{}'.format(prefix, group[0].name) + ), + dtype=group[0].dtype, + persistable=False, + stop_gradient=True, + ) fused_vars.append(fused_var) - block._insert_op_without_sync(index, - type="coalesce_tensor", - inputs={"Input": group}, - outputs={ - "Output": group, - "FusedOutput": fused_var - }, - attrs={ - "copy_data": True, - "use_align": True, - "dtype": group[0].dtype, - OP_ROLE_KEY: op_role - }) + block._insert_op_without_sync( + index, + type="coalesce_tensor", + inputs={"Input": group}, + outputs={"Output": group, "FusedOutput": fused_var}, + attrs={ + "copy_data": True, + "use_align": True, + "dtype": group[0].dtype, + OP_ROLE_KEY: op_role, + }, + ) insert_num += 1 return fused_vars, insert_num -def insert_fused_allreduce_ops(block, - insert_idx, - ring_id, - allreduce_vars, - op_role=OpRole.Backward, - use_calc_stream=False, - fuse_grad_size_in_MB=32): - groups = FuseHelper.get_fused_groups(block, allreduce_vars, - fuse_grad_size_in_MB) +def insert_fused_allreduce_ops( + block, + insert_idx, + ring_id, + allreduce_vars, + op_role=OpRole.Backward, + use_calc_stream=False, + fuse_grad_size_in_MB=32, +): + groups = FuseHelper.get_fused_groups( + block, allreduce_vars, fuse_grad_size_in_MB + ) - fused_vars, insert_num = FuseHelper.insert_coalesce_tensor(block, - insert_idx, - groups, - op_role, - prefix="Grad") + fused_vars, insert_num = FuseHelper.insert_coalesce_tensor( + block, insert_idx, groups, op_role, prefix="Grad" + ) for fused_var in fused_vars: - block._insert_op_without_sync(insert_idx + insert_num, - type='c_allreduce_sum', - inputs={'X': fused_var}, - outputs={'Out': fused_var}, - attrs={ - 'ring_id': ring_id, - 'use_calc_stream': use_calc_stream, - OP_ROLE_KEY: op_role - }) + block._insert_op_without_sync( + insert_idx + insert_num, + type='c_allreduce_sum', + inputs={'X': fused_var}, + outputs={'Out': fused_var}, + attrs={ + 'ring_id': ring_id, + 'use_calc_stream': use_calc_stream, + OP_ROLE_KEY: op_role, + }, + ) if not use_calc_stream: - block._insert_op_without_sync(insert_idx + insert_num, - type='c_sync_calc_stream', - inputs={'X': fused_var}, - outputs={'Out': fused_var}, - attrs={OP_ROLE_KEY: op_role}) - - -def insert_fused_reduce_ops(block, - insert_idx, - ring_id, - reduce_vars, - shard, - op_role=OpRole.Backward, - use_calc_stream=False, - rank=None, - fuse_grad_size=32): + block._insert_op_without_sync( + insert_idx + insert_num, + type='c_sync_calc_stream', + inputs={'X': fused_var}, + outputs={'Out': fused_var}, + attrs={OP_ROLE_KEY: op_role}, + ) + + +def insert_fused_reduce_ops( + block, + insert_idx, + ring_id, + reduce_vars, + shard, + op_role=OpRole.Backward, + use_calc_stream=False, + rank=None, + fuse_grad_size=32, +): nranks = shard.worker_num device_to_vars = [[] for _ in range(nranks)] for var in reduce_vars: root_id = get_grad_device(var, shard) - assert 0 <= root_id < nranks, "root_id should >=0 and < nranks, " \ - "but now nranks={}, the root_id of var={} is {}"\ - .format(nranks, var, root_id) + assert 0 <= root_id < nranks, ( + "root_id should >=0 and < nranks, " + "but now nranks={}, the root_id of var={} is {}".format( + nranks, var, root_id + ) + ) device_to_vars[root_id].append(var) for root_id, vars_name in enumerate(device_to_vars): groups = FuseHelper.get_fused_groups(block, vars_name, fuse_grad_size) fused_vars, insert_num = FuseHelper.insert_coalesce_tensor( - block, insert_idx, groups, op_role, prefix="Grad") + block, insert_idx, groups, op_role, prefix="Grad" + ) for fused_var in fused_vars: - block._insert_op_without_sync(insert_idx + insert_num, - type='c_reduce_sum', - inputs={'X': fused_var}, - outputs={'Out': fused_var}, - attrs={ - 'ring_id': ring_id, - 'root_id': root_id, - 'use_calc_stream': - use_calc_stream, - OP_ROLE_KEY: op_role - }) + block._insert_op_without_sync( + insert_idx + insert_num, + type='c_reduce_sum', + inputs={'X': fused_var}, + outputs={'Out': fused_var}, + attrs={ + 'ring_id': ring_id, + 'root_id': root_id, + 'use_calc_stream': use_calc_stream, + OP_ROLE_KEY: op_role, + }, + ) if not use_calc_stream: - block._insert_op_without_sync(insert_idx + insert_num, - type='c_sync_calc_stream', - inputs={'X': fused_var}, - outputs={'Out': fused_var}, - attrs={OP_ROLE_KEY: op_role}) + block._insert_op_without_sync( + insert_idx + insert_num, + type='c_sync_calc_stream', + inputs={'X': fused_var}, + outputs={'Out': fused_var}, + attrs={OP_ROLE_KEY: op_role}, + ) return [] if rank is None else device_to_vars[rank] -def insert_reduce_ops(block, - insert_idx, - ring_id, - reduce_vars, - shard, - op_role=OpRole.Backward, - use_calc_stream=False, - rank=None, - strategy=None): +def insert_reduce_ops( + block, + insert_idx, + ring_id, + reduce_vars, + shard, + op_role=OpRole.Backward, + use_calc_stream=False, + rank=None, + strategy=None, +): """ _add_reduce_ops """ - if strategy and strategy.fuse_all_reduce_ops and \ - not strategy.fuse_grad_merge: - return insert_fused_reduce_ops(block, insert_idx, ring_id, reduce_vars, - shard, op_role, use_calc_stream, rank, - strategy.fuse_grad_size_in_MB) + if ( + strategy + and strategy.fuse_all_reduce_ops + and not strategy.fuse_grad_merge + ): + return insert_fused_reduce_ops( + block, + insert_idx, + ring_id, + reduce_vars, + shard, + op_role, + use_calc_stream, + rank, + strategy.fuse_grad_size_in_MB, + ) grad_in_this_device = [] for var in reduce_vars: grad_var = var - if strategy and strategy.fuse_all_reduce_ops and \ - strategy.fuse_grad_merge: + if ( + strategy + and strategy.fuse_all_reduce_ops + and strategy.fuse_grad_merge + ): # TODO(wangxi): if support fp16_allreduce, need be # 'FusedMergedGrad.cast_fp16._' grad_var = var.replace('FusedMergedGrad_', '') root_id = get_grad_device(grad_var, shard) - assert root_id >= 0, "root id should be a positive int, but now root id is {}".format( - root_id) + assert ( + root_id >= 0 + ), "root id should be a positive int, but now root id is {}".format( + root_id + ) if rank is not None and rank == root_id: grad_in_this_device.append(var) - block._insert_op_without_sync(insert_idx, - type='c_reduce_sum', - inputs={'X': var}, - outputs={'Out': var}, - attrs={ - 'ring_id': ring_id, - 'root_id': root_id, - 'use_calc_stream': use_calc_stream, - OP_ROLE_KEY: op_role - }) + block._insert_op_without_sync( + insert_idx, + type='c_reduce_sum', + inputs={'X': var}, + outputs={'Out': var}, + attrs={ + 'ring_id': ring_id, + 'root_id': root_id, + 'use_calc_stream': use_calc_stream, + OP_ROLE_KEY: op_role, + }, + ) return grad_in_this_device -def insert_fused_broadcast_param_ops(block, - insert_idx, - ring_id, - params, - shard, - op_role=OpRole.Optimize, - use_calc_stream=False, - rank=None, - fuse_size=32): +def insert_fused_broadcast_param_ops( + block, + insert_idx, + ring_id, + params, + shard, + op_role=OpRole.Optimize, + use_calc_stream=False, + rank=None, + fuse_size=32, +): nranks = shard.worker_num device_to_vars = [[] for _ in range(nranks)] for var in params: root_id = shard.device(var) - assert 0 <= root_id < nranks, "root_id should >=0 and < nranks, " \ - "but now nranks={}, the root_id of var={} is {}"\ - .format(nranks, var, root_id) + assert 0 <= root_id < nranks, ( + "root_id should >=0 and < nranks, " + "but now nranks={}, the root_id of var={} is {}".format( + nranks, var, root_id + ) + ) device_to_vars[root_id].append(var) for root_id, vars_name in enumerate(device_to_vars): groups = FuseHelper.get_fused_groups(block, vars_name, fuse_size) fused_vars, insert_num = FuseHelper.insert_coalesce_tensor( - block, insert_idx, groups, op_role, prefix="Param") + block, insert_idx, groups, op_role, prefix="Param" + ) for fused_var in fused_vars: - block._insert_op_without_sync(insert_idx + insert_num, - type='c_broadcast', - inputs={'X': fused_var}, - outputs={'Out': fused_var}, - attrs={ - 'ring_id': ring_id, - 'root': root_id, - 'use_calc_stream': - use_calc_stream, - OP_ROLE_KEY: op_role - }) + block._insert_op_without_sync( + insert_idx + insert_num, + type='c_broadcast', + inputs={'X': fused_var}, + outputs={'Out': fused_var}, + attrs={ + 'ring_id': ring_id, + 'root': root_id, + 'use_calc_stream': use_calc_stream, + OP_ROLE_KEY: op_role, + }, + ) if not use_calc_stream: - block._insert_op_without_sync(insert_idx + insert_num, - type='c_sync_calc_stream', - inputs={'X': fused_var}, - outputs={'Out': fused_var}, - attrs={OP_ROLE_KEY: op_role}) + block._insert_op_without_sync( + insert_idx + insert_num, + type='c_sync_calc_stream', + inputs={'X': fused_var}, + outputs={'Out': fused_var}, + attrs={OP_ROLE_KEY: op_role}, + ) return [] if rank is None else device_to_vars[rank] -def insert_broadcast_param_ops(block, - insert_idx, - ring_id, - params, - shard, - op_role=OpRole.Optimize, - use_calc_stream=False, - rank=None, - strategy=None): +def insert_broadcast_param_ops( + block, + insert_idx, + ring_id, + params, + shard, + op_role=OpRole.Optimize, + use_calc_stream=False, + rank=None, + strategy=None, +): """ add broadcast param ops """ if strategy and strategy.fuse_all_reduce_ops: # TODO(wangxi): put fused var in startup_program, only need exec once - return insert_fused_broadcast_param_ops(block, insert_idx, ring_id, - params, shard, op_role, - use_calc_stream, rank, - strategy.fuse_grad_size_in_MB) + return insert_fused_broadcast_param_ops( + block, + insert_idx, + ring_id, + params, + shard, + op_role, + use_calc_stream, + rank, + strategy.fuse_grad_size_in_MB, + ) param_in_this_device = [] for param in params: root_id = shard.device(param) - assert root_id >= 0, "root id should be a positive int, but now root id is {}".format( - root_id) + assert ( + root_id >= 0 + ), "root id should be a positive int, but now root id is {}".format( + root_id + ) if rank is not None and rank == root_id: param_in_this_device.append(param) - block._insert_op_without_sync(insert_idx, - type='c_broadcast', - inputs={'X': param}, - outputs={'Out': param}, - attrs={ - 'ring_id': ring_id, - 'root': root_id, - 'use_calc_stream': use_calc_stream, - OP_ROLE_KEY: op_role - }) + block._insert_op_without_sync( + insert_idx, + type='c_broadcast', + inputs={'X': param}, + outputs={'Out': param}, + attrs={ + 'ring_id': ring_id, + 'root': root_id, + 'use_calc_stream': use_calc_stream, + OP_ROLE_KEY: op_role, + }, + ) return param_in_this_device -def fuse_opt_broadcast_param_ops(block, - ring_id, - shard, - op_role=OpRole.Optimize, - strategy=None): +def fuse_opt_broadcast_param_ops( + block, ring_id, shard, op_role=OpRole.Optimize, strategy=None +): """ fuse optimizer sharding broadcast param ops """ @@ -689,26 +786,30 @@ def fuse_opt_broadcast_param_ops(block, groups = FuseHelper.get_fused_groups(block, vars_name, fuse_size) fused_vars, insert_num = FuseHelper.insert_coalesce_tensor( - block, insert_idx, groups, op_role, prefix="Param") + block, insert_idx, groups, op_role, prefix="Param" + ) for fused_var in fused_vars: - block._insert_op_without_sync(insert_idx + insert_num, - type='c_broadcast', - inputs={'X': fused_var}, - outputs={'Out': fused_var}, - attrs={ - 'ring_id': ring_id, - 'root': root_id, - 'use_calc_stream': True, - OP_ROLE_KEY: op_role - }) + block._insert_op_without_sync( + insert_idx + insert_num, + type='c_broadcast', + inputs={'X': fused_var}, + outputs={'Out': fused_var}, + attrs={ + 'ring_id': ring_id, + 'root': root_id, + 'use_calc_stream': True, + OP_ROLE_KEY: op_role, + }, + ) block._sync_with_cpp() def get_grad_device(grad_name, shard): assert "@GRAD" in grad_name, "[{}] should be a grad variable.".format( - grad_name) + grad_name + ) base_name = None # NOTE: mind the traversal order possible_suffixes = [ @@ -725,8 +826,9 @@ def get_grad_device(grad_name, shard): base_name = re.sub(suffix, '', grad_name) break - assert base_name in shard.global_param2device, "[{}] should be a param variable.".format( - base_name) + assert ( + base_name in shard.global_param2device + ), "[{}] should be a param variable.".format(base_name) return shard.global_param2device[base_name] @@ -760,15 +862,17 @@ def insert_broadcast_ops(block, insert_idx, ring_id, broadcast2root): """ op_role = get_valid_op_role(block, insert_idx) for broadcast_name, root_device in broadcast2root: - block._insert_op_without_sync(insert_idx, - type='c_broadcast', - inputs={'X': broadcast_name}, - outputs={'Out': broadcast_name}, - attrs={ - 'ring_id': ring_id, - 'root': root_device, - OP_ROLE_KEY: op_role - }) + block._insert_op_without_sync( + insert_idx, + type='c_broadcast', + inputs={'X': broadcast_name}, + outputs={'Out': broadcast_name}, + attrs={ + 'ring_id': ring_id, + 'root': root_device, + OP_ROLE_KEY: op_role, + }, + ) return @@ -793,8 +897,12 @@ def get_var_size(param): var size in MB """ assert -1 not in param.shape - return reduce(lambda x, y: x * y, - param.shape) * DtypeToSize[param.dtype] / 1024.0 / 1024.0 + return ( + reduce(lambda x, y: x * y, param.shape) + * DtypeToSize[param.dtype] + / 1024.0 + / 1024.0 + ) def insert_scale_loss_grad_ops(block, scale=1.0): @@ -804,9 +912,10 @@ def insert_scale_loss_grad_ops(block, scale=1.0): ''' for idx, op in reversed(list(enumerate(block.ops))): if is_loss_grad_op(op): - assert op.type == 'fill_constant', \ - "loss_grad_op must be fill_constant op, " \ + assert op.type == 'fill_constant', ( + "loss_grad_op must be fill_constant op, " "but this op is {}".format(op.type) + ) assert op.has_attr('value') loss_scale = float(op.attr('value')) loss_scale = loss_scale / scale @@ -825,8 +934,9 @@ def comm_analyse(main_program): if op.type == "c_broadcast": var_name = op.desc.input_arg_names()[0] # convert MB to KB - broadcast_vars[var_name] = get_var_size( - block.var(var_name)) * 1024.0 + broadcast_vars[var_name] = ( + get_var_size(block.var(var_name)) * 1024.0 + ) elif op.type == "c_allreduce_sum": var_name = op.desc.input_arg_names()[0] reduce_vars[var_name] = get_var_size(block.var(var_name)) * 1024.0 @@ -836,14 +946,14 @@ def comm_analyse(main_program): for k, v in broadcast_vars.items(): print("broadcast: {}: {} KB".format(k, v)) - if (int(v / gap) in varsize_count): + if int(v / gap) in varsize_count: varsize_count[int(v / gap)] += 1 else: varsize_count[int(v / gap)] = 1 for k, v in reduce_vars.items(): print("allreduce: {}: {} KB".format(k, v)) - if (int(v / gap) in varsize_count): + if int(v / gap) in varsize_count: varsize_count[int(v / gap)] += 1 else: varsize_count[int(v / gap)] = 1 @@ -852,8 +962,9 @@ def comm_analyse(main_program): sorted_varsize = sorted(varsize_count.items(), key=lambda x: x[0]) for varsize, count in sorted_varsize: print("NCCL size {}~{} KB: {}".format(varsize, varsize + 1, count)) - f.write("NCCL size {}~{} KB: {}\n".format(varsize, varsize + 1, - count)) + f.write( + "NCCL size {}~{} KB: {}\n".format(varsize, varsize + 1, count) + ) def add_sync_comm(program, sharding_ring_id): @@ -863,7 +974,7 @@ def add_sync_comm(program, sharding_ring_id): add the sync_comm op for the test prog. """ - #NOTE (liangjianzhong): only support one comm stream by now, use more than one + # NOTE (liangjianzhong): only support one comm stream by now, use more than one # comm streams will cause error. should be revise in future. assert sharding_ring_id >= 0, "sharding_ring_id should larger than zero" @@ -877,15 +988,15 @@ def add_sync_comm(program, sharding_ring_id): for input_name in op.desc.input_arg_names(): not_sync_vars.remove(input_name) if not_sync_vars: - block.append_op(type='c_sync_comm_stream', - inputs={'X': list(not_sync_vars)}, - outputs={'Out': list(not_sync_vars)}, - attrs={ - 'ring_id': - sharding_ring_id, - 'op_role': - core.op_proto_and_checker_maker.OpRole.Forward - }) + block.append_op( + type='c_sync_comm_stream', + inputs={'X': list(not_sync_vars)}, + outputs={'Out': list(not_sync_vars)}, + attrs={ + 'ring_id': sharding_ring_id, + 'op_role': core.op_proto_and_checker_maker.OpRole.Forward, + }, + ) return @@ -905,8 +1016,14 @@ def save_persistables(exe, dirname, main_program, filename=None): # support EMA optimizer with '_ema_0', # support offload with '@offload_0' and '.cast_fp16' checks = [ - "_moment1_0", "_moment2_0", "_beta1_pow_acc_0", "_beta2_pow_acc_0", - "_velocity_0", "_ema_0", "@offload_0", ".cast_fp16" + "_moment1_0", + "_moment2_0", + "_beta1_pow_acc_0", + "_beta2_pow_acc_0", + "_velocity_0", + "_ema_0", + "@offload_0", + ".cast_fp16", ] for check in checks: if var.name.endswith(check) and var.persistable: @@ -919,24 +1036,27 @@ def save_persistables(exe, dirname, main_program, filename=None): return var.name.endswith("@GradiantMerge") def is_trainable(var): - return isinstance(var, - paddle.fluid.framework.Parameter) and var.trainable + return ( + isinstance(var, paddle.fluid.framework.Parameter) and var.trainable + ) def sharding_predicate(var): - return is_trainable(var) or is_opt_vars(var) or is_gradient_merge_vars( - var) + return ( + is_trainable(var) or is_opt_vars(var) or is_gradient_merge_vars(var) + ) if int(os.environ.get('PADDLE_TRAINER_ID', 0)) == 0: - paddle.fluid.io.save_persistables(exe, - dirname, - main_program=main_program, - filename=None) + paddle.fluid.io.save_persistables( + exe, dirname, main_program=main_program, filename=None + ) else: - paddle.fluid.io.save_vars(exe, - dirname, - main_program=main_program, - predicate=sharding_predicate, - filename=None) + paddle.fluid.io.save_vars( + exe, + dirname, + main_program=main_program, + predicate=sharding_predicate, + filename=None, + ) return @@ -944,22 +1064,28 @@ def save_persistables(exe, dirname, main_program, filename=None): def append_naive_sync(block, sync_var, ring_id): # NOTE (JZ-LIANG) update this to use barrier sync for more elegent logic # sync within global - block.append_op(type="fill_constant", - outputs={"Out": sync_var}, - attrs={ - "shape": sync_var.shape, - "dtype": sync_var.dtype, - "value": int(1), - }) - block.append_op(type='c_allreduce_sum', - inputs={'X': sync_var}, - outputs={'Out': sync_var}, - attrs={ - 'ring_id': ring_id, - 'use_calc_stream': True, - OP_ROLE_KEY: OpRole.Forward - }) - block.append_op(type='c_sync_calc_stream', - inputs={'X': [sync_var]}, - outputs={'Out': [sync_var]}, - attrs={OP_ROLE_KEY: OpRole.Forward}) + block.append_op( + type="fill_constant", + outputs={"Out": sync_var}, + attrs={ + "shape": sync_var.shape, + "dtype": sync_var.dtype, + "value": int(1), + }, + ) + block.append_op( + type='c_allreduce_sum', + inputs={'X': sync_var}, + outputs={'Out': sync_var}, + attrs={ + 'ring_id': ring_id, + 'use_calc_stream': True, + OP_ROLE_KEY: OpRole.Forward, + }, + ) + block.append_op( + type='c_sync_calc_stream', + inputs={'X': [sync_var]}, + outputs={'Out': [sync_var]}, + attrs={OP_ROLE_KEY: OpRole.Forward}, + ) diff --git a/python/paddle/distributed/fleet/meta_optimizers/sharding/weight_decay_helper.py b/python/paddle/distributed/fleet/meta_optimizers/sharding/weight_decay_helper.py index 42c52af44311c50b63e8341224e961cff481b646..3d5d8aa2a3851bc1ece59ae8443a5f260d91e3b7 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/sharding/weight_decay_helper.py +++ b/python/paddle/distributed/fleet/meta_optimizers/sharding/weight_decay_helper.py @@ -18,13 +18,13 @@ __all__ = [] class WeightDecayHelper(object): - def __init__(self): pass def _is_weight_decay_op(self, op): - return op.desc.has_attr("op_namescope") \ - and op.desc.attr("op_namescope").startswith("/regularization") + return op.desc.has_attr("op_namescope") and op.desc.attr( + "op_namescope" + ).startswith("/regularization") def prune_weight_decay(self, block, shard): for idx, op in reversed(list(enumerate(block.ops))): @@ -33,7 +33,8 @@ class WeightDecayHelper(object): if OP_ROLE_VAR_KEY not in op.attr_names: raise ValueError( "The Weight Dacay op should hold op_role_var attribute" - "but the {} op does not hold op_role_var".format(op.type)) + "but the {} op does not hold op_role_var".format(op.type) + ) op_role_var = op.all_attrs()[OP_ROLE_VAR_KEY] if not shard.has_param(op_role_var[0]): block._remove_op(idx, sync=False) diff --git a/python/paddle/distributed/fleet/meta_optimizers/sharding_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/sharding_optimizer.py index 073de83d19acbd8f502ba09faf73298469393f2c..da0a08aefa109ecdd44a295acbdb940b57bfe46e 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/sharding_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/sharding_optimizer.py @@ -94,7 +94,7 @@ class ShardingOptimizer(MetaOptimizerBase): dist_strategy.sharding_configs = {"segment_broadcast_MB": 32} def _get_sharding_segment_strategy(self): - """ get + """get self._sharding_segment_strategy 1. if by_size: self._broadcast_MB 2. if by_anchors: self._sharding_segment_anchors @@ -107,21 +107,26 @@ class ShardingOptimizer(MetaOptimizerBase): if segment_strategy == "segment_broadcast_MB": self._broadcast_MB = sharding_configs["segment_broadcast_MB"] - assert self._broadcast_MB > 0, "segment size should larger than zero !" + assert ( + self._broadcast_MB > 0 + ), "segment size should larger than zero !" elif segment_strategy == "segment_anchors": self._sharding_segment_anchors = sharding_configs["segment_anchors"] - assert len(self._sharding_segment_anchors - ) > 0, "you should set the sharding segment anchors !" + assert ( + len(self._sharding_segment_anchors) > 0 + ), "you should set the sharding segment anchors !" self._backward_remain_anchors = self._sharding_segment_anchors[:] self._forward_remain_anchors = [] else: raise NotImplementedError( "the sharding segment strategy [{}] is not implemented".format( - str(segment_strategy))) + str(segment_strategy) + ) + ) self._sharding_segment_strategy = segment_strategy def _get_hybrid_degree(self): - """ get + """get self.hybrid_dp self.sharding_degree self.mp_degree @@ -145,21 +150,32 @@ class ShardingOptimizer(MetaOptimizerBase): assert strategy.pipeline is True if os.getenv("PADDLE_MANUAL_PIPELINE_STAGE", None): - assert pp_degree == 2, ("For manually set pipeline, only " - "pp_degree = 2 is supported.") - assert global_world_size == mp_degree * sharding_degree * dp_degree, \ - "global work size [{}], mp_degree [{}], sharding_degree [{}], dp_degree [{}].".format( - global_world_size, mp_degree, sharding_degree, dp_degree) + assert pp_degree == 2, ( + "For manually set pipeline, only " "pp_degree = 2 is supported." + ) + assert ( + global_world_size == mp_degree * sharding_degree * dp_degree + ), "global work size [{}], mp_degree [{}], sharding_degree [{}], dp_degree [{}].".format( + global_world_size, mp_degree, sharding_degree, dp_degree + ) else: - assert global_world_size == mp_degree * sharding_degree * pp_degree * dp_degree, \ - "global work size [{}], mp_degree [{}], sharding_degree [{}], pp_degree [{}], dp_degree [{}].".format( - global_world_size, mp_degree, sharding_degree, pp_degree, dp_degree) + assert ( + global_world_size + == mp_degree * sharding_degree * pp_degree * dp_degree + ), "global work size [{}], mp_degree [{}], sharding_degree [{}], pp_degree [{}], dp_degree [{}].".format( + global_world_size, + mp_degree, + sharding_degree, + pp_degree, + dp_degree, + ) # FIXME (JZ-LIANG) deprecated hybrid_dp if sharding_configs["hybrid_dp"]: logger.warning( "[hybrid_dp] API setting is deprecated. Now when " - "dp_degree >= 2, its will be in hybrid dp mode automatically") + "dp_degree >= 2, its will be in hybrid dp mode automatically" + ) assert dp_degree >= 1 self.hybrid_dp = True if dp_degree > 1 else False @@ -169,7 +185,7 @@ class ShardingOptimizer(MetaOptimizerBase): self.dp_degree = dp_degree def _get_hybrid_dp_mode(self): - """ get + """get self.hybrid_dp_mode = 'pp_hybrid_dp' or 'sharding_hybrid_dp' self.gradient_merge_mode = 'pp_gm' or 'sharding_gm' self._gradient_merge_acc_step @@ -193,9 +209,10 @@ class ShardingOptimizer(MetaOptimizerBase): if self.pp_degree > 1: dp_mode = "pp_hybrid_dp" else: - assert self.sharding_degree > 1, \ - "by now we only support five kind of hybrid dp: sharding_hybrid_dp, " \ + assert self.sharding_degree > 1, ( + "by now we only support five kind of hybrid dp: sharding_hybrid_dp, " "mp_sharding_hybrid_dp, pp_hybrid_dp, mp_sharding_pp_hybrid_dp, sharding_pp_hybrid_dp." + ) dp_mode = "sharding_hybrid_dp" # gradient merge @@ -208,23 +225,33 @@ class ShardingOptimizer(MetaOptimizerBase): gm_mode = "pp_gm" gm_acc_step = strategy.pipeline_configs['accumulate_steps'] gradient_scale_configs = strategy.gradient_scale_configs - assert gradient_scale_configs['scale_strategy'] == 'avg', \ - 'For pipeline mode, the ' 'gradient scale mode should ' \ - 'be "avg", but got {}'.format(gradient_scale_configs['scale_strategy']) + assert gradient_scale_configs['scale_strategy'] == 'avg', ( + 'For pipeline mode, the ' + 'gradient scale mode should ' + 'be "avg", but got {}'.format( + gradient_scale_configs['scale_strategy'] + ) + ) # Note (Yuang Liu): this avg_loss flag determines where to do the average op for grad merge. # If True, will do sum firstly for gradient merge, then do scale by gm_acc_step. # If False, will scale loss by gm_acc_step first, then do sum for gradient merge. self.scale_gradient = gradient_scale_configs['scale_gradient'] if gm_acc_step > 1: - logger.info("Gradient merge in [{}], acc step = [{}]".format( - gm_mode, gm_acc_step)) + logger.info( + "Gradient merge in [{}], acc step = [{}]".format( + gm_mode, gm_acc_step + ) + ) optimizer_sharding = False # TODO(wangxi): need support dp_as_opt_sharding with sharding # need support without pp in future - if self.sharding_degree == 1 and self.dp_degree > 1 \ - and sharding_configs['_dp_as_optimizer_sharding'] \ - and self.pp_degree > 1: + if ( + self.sharding_degree == 1 + and self.dp_degree > 1 + and sharding_configs['_dp_as_optimizer_sharding'] + and self.pp_degree > 1 + ): optimizer_sharding = True self.hybrid_dp_mode = dp_mode @@ -234,19 +261,23 @@ class ShardingOptimizer(MetaOptimizerBase): # this feature is design for ascend, and should NOT be used in GPU training self.pp_allreduce_in_optimize = sharding_configs[ - "pp_allreduce_in_optimize"] + "pp_allreduce_in_optimize" + ] - def _inner_opt_minimize(self, loss, startup_program, parameter_list, - no_grad_set): + def _inner_opt_minimize( + self, loss, startup_program, parameter_list, no_grad_set + ): pipeline_configs = self.user_defined_strategy.pipeline_configs if self.inner_opt is None: raise ValueError( - "self.inner_opt of ShardingOptimizer should not be None.") + "self.inner_opt of ShardingOptimizer should not be None." + ) if self.pp_degree > 1: pp_optimizer = fluid.optimizer.PipelineOptimizer( - self.inner_opt, self._gradient_merge_acc_step) + self.inner_opt, self._gradient_merge_acc_step + ) self._pp_optimizer = pp_optimizer global_rank = self.role_maker._worker_index() @@ -263,17 +294,25 @@ class ShardingOptimizer(MetaOptimizerBase): 'global_ring_id': 3, 'mp_degree': self.mp_degree, 'mp_rank': global_rank % self.mp_degree, - 'scale_gradient': self.scale_gradient + 'scale_gradient': self.scale_gradient, } main_program = loss.block.program main_program._pipeline_opt = pipeline_opt - optimize_ops, params_grads, program_list, self.pipeline_pair, self.pp_ring_map = pp_optimizer.minimize( - loss, startup_program, parameter_list, no_grad_set) + ( + optimize_ops, + params_grads, + program_list, + self.pipeline_pair, + self.pp_ring_map, + ) = pp_optimizer.minimize( + loss, startup_program, parameter_list, no_grad_set + ) assert self.pp_degree == len(program_list) else: optimize_ops, params_grads = self.inner_opt.minimize( - loss, startup_program, parameter_list, no_grad_set) + loss, startup_program, parameter_list, no_grad_set + ) if startup_program is None: startup_program = default_startup_program() @@ -282,8 +321,9 @@ class ShardingOptimizer(MetaOptimizerBase): startup_program = startup_program._pipeline_opt['startup_program'] print("pp_rank:", self.pp_rank) if os.getenv("PADDLE_MANUAL_PIPELINE_STAGE", None): - main_program = program_list[int( - os.getenv("PADDLE_MANUAL_PIPELINE_STAGE"))] + main_program = program_list[ + int(os.getenv("PADDLE_MANUAL_PIPELINE_STAGE")) + ] else: main_program = program_list[self.pp_rank] with open("main_%d" % self.role_maker._worker_index(), 'w') as f: @@ -309,14 +349,16 @@ class ShardingOptimizer(MetaOptimizerBase): return optimize_ops, params_grads def _apply_sharding_pass(self, params_grads): - if self.sharding_degree == 1: return + if self.sharding_degree == 1: + return main_block = self._main_program.global_block() startup_block = self._startup_program.global_block() # step1: build shard - self._build_shard(params_grads, self.sharding_rank, - self.sharding_degree) + self._build_shard( + params_grads, self.sharding_rank, self.sharding_degree + ) # step2: split_program self._split_program(main_block) @@ -328,13 +370,16 @@ class ShardingOptimizer(MetaOptimizerBase): # step4: remove unneeded ops and vars from block self._prune_main_program( - main_block, self._shard, - [self.mp_ring_id, self.sharding_ring_id, self.pp_ring_id]) + main_block, + self._shard, + [self.mp_ring_id, self.sharding_ring_id, self.pp_ring_id], + ) self._prune_startup_program(startup_block, self._shard) def _apply_opt_sharding_pass(self, params_grads): - """ outer dp as optimizer sharding """ - if self._optimizer_sharding is False: return + """outer dp as optimizer sharding""" + if self._optimizer_sharding is False: + return main_block = self._main_program.global_block() startup_block = self._startup_program.global_block() @@ -348,12 +393,15 @@ class ShardingOptimizer(MetaOptimizerBase): # step4: remove unneeded ops and vars from block self._prune_main_program( - main_block, self._shard, - [self.mp_ring_id, self.pp_ring_id, self.dp_ring_id]) + main_block, + self._shard, + [self.mp_ring_id, self.pp_ring_id, self.dp_ring_id], + ) self._prune_startup_program(startup_block, self._shard) def _insert_allreduce_for_pp(self, params_grads): - if self.pp_degree == 1: return + if self.pp_degree == 1: + return strategy = self.user_defined_strategy sharding_configs = strategy.sharding_configs @@ -373,10 +421,12 @@ class ShardingOptimizer(MetaOptimizerBase): main_block._remove_op(idx) for idx, op in reversed(list(enumerate(main_block.ops))): - if op.type != 'cast': continue + if op.type != 'cast': + continue in_name = op.input_arg_names[0] - if in_name not in self._params: continue - #if self._shard.has_param(param_name): continue + if in_name not in self._params: + continue + # if self._shard.has_param(param_name): continue if in_name not in main_block.vars: main_block._remove_op(idx) @@ -386,7 +436,8 @@ class ShardingOptimizer(MetaOptimizerBase): shard = self._shard if self._optimizer_sharding else None accumulated_grad_names = self._pp_optimizer._accumulate_gradients( - main_block, strategy=strategy, shard=shard) + main_block, strategy=strategy, shard=shard + ) len_of_ops = len(main_block.ops) if self.scale_gradient: @@ -394,8 +445,9 @@ class ShardingOptimizer(MetaOptimizerBase): first_optimize_op_index = get_first_optimize_op_idx(main_block) if self.pp_allreduce_in_optimize: - logger.info("Pipeline Persistable grad is {}".format( - accumulated_grad_names)) + logger.info( + "Pipeline Persistable grad is {}".format(accumulated_grad_names) + ) # FIXME(wangxi): accumulated_grad get from pipeline is not # include sharding's param@BroadCast grad when # pp_allreduce_in_optimize @@ -407,10 +459,11 @@ class ShardingOptimizer(MetaOptimizerBase): self._shard, core.op_proto_and_checker_maker.OpRole.Optimize, use_calc_stream=True, - rank=self.sharding_rank) + rank=self.sharding_rank, + ) logger.info("PP-Sharding grad is {}".format(accumulated_grad_names)) - first_optimize_op_index += (len(main_block.ops) - len_of_ops) + first_optimize_op_index += len(main_block.ops) - len_of_ops len_of_ops = len(main_block.ops) if self._optimizer_sharding: @@ -423,10 +476,12 @@ class ShardingOptimizer(MetaOptimizerBase): OpRole.Optimize, use_calc_stream=True, rank=self.dp_rank, - strategy=strategy) + strategy=strategy, + ) logger.info( - "Optimizer grad in this rank {}".format(accumulated_grad_names)) - first_optimize_op_index += (len(main_block.ops) - len_of_ops) + "Optimizer grad in this rank {}".format(accumulated_grad_names) + ) + first_optimize_op_index += len(main_block.ops) - len_of_ops len_of_ops = len(main_block.ops) # NOTE(wangxi): we fused after optimize_cast @@ -434,14 +489,17 @@ class ShardingOptimizer(MetaOptimizerBase): optimizer_param = utils.insert_broadcast_param_ops( main_block, len_of_ops, - self.dp_ring_id, [x[0].name for x in params_grads], + self.dp_ring_id, + [x[0].name for x in params_grads], self._shard, OpRole.Optimize, use_calc_stream=True, rank=self.dp_rank, - strategy=None if optimize_cast else strategy) + strategy=None if optimize_cast else strategy, + ) logger.info( - "Optimizer param in this rank {}".format(optimizer_param)) + "Optimizer param in this rank {}".format(optimizer_param) + ) if not strategy.fuse_grad_merge and not optimize_cast: assert len(accumulated_grad_names) == len(optimizer_param) elif self.hybrid_dp and self.hybrid_dp_mode == "pp_hybrid_dp": @@ -452,15 +510,20 @@ class ShardingOptimizer(MetaOptimizerBase): accumulated_grad_names, core.op_proto_and_checker_maker.OpRole.Optimize, use_calc_stream=True, - user_defined_strategy=strategy) - first_optimize_op_index += (len(main_block.ops) - len_of_ops) + user_defined_strategy=strategy, + ) + first_optimize_op_index += len(main_block.ops) - len_of_ops len_of_ops = len(main_block.ops) # FIXME(wangxi): if fp16_allreduce, put cast fp16->fp32 to there? def _avg_grad_merge_after_sum(self, main_block, accumulated_grad_names): - if self.user_defined_strategy.amp and \ - self.user_defined_strategy.amp_configs['use_dynamic_loss_scaling']: + if ( + self.user_defined_strategy.amp + and self.user_defined_strategy.amp_configs[ + 'use_dynamic_loss_scaling' + ] + ): # For AMP, if using dynamic loss scaling the avg # operation can be simple done by modify the LossScaling op. for idx, op in enumerate(main_block.ops): @@ -471,7 +534,8 @@ class ShardingOptimizer(MetaOptimizerBase): loss_scale_tmp_var = main_block.create_var( name=loss_scale_tmp_var_name, shape=loss_scaling_var.shape, - dtype=loss_scaling_var.dtype) + dtype=loss_scaling_var.dtype, + ) main_block._insert_op_without_sync( idx, type='scale', @@ -481,8 +545,9 @@ class ShardingOptimizer(MetaOptimizerBase): 'scale': self._gradient_merge_acc_step, 'bias': 0.0, 'bias_after_scale': False, - OP_ROLE_KEY: OpRole.Optimize - }) + OP_ROLE_KEY: OpRole.Optimize, + }, + ) op._rename_input(loss_scale_name, loss_scale_tmp_var_name) break else: @@ -493,7 +558,9 @@ class ShardingOptimizer(MetaOptimizerBase): if is_optimizer_op(op) and op.type != 'c_sync_comm_stream': tmp_first_opt_idx = idx break - assert tmp_first_opt_idx is not None, 'Occurs some errors, no optimize ops' + assert ( + tmp_first_opt_idx is not None + ), 'Occurs some errors, no optimize ops' for grad in accumulated_grad_names: main_block._insert_op_without_sync( tmp_first_opt_idx, @@ -504,14 +571,17 @@ class ShardingOptimizer(MetaOptimizerBase): 'scale': 1.0 / self._gradient_merge_acc_step, 'bias': 0.0, 'bias_after_scale': False, - OP_ROLE_KEY: OpRole.Optimize - }) + OP_ROLE_KEY: OpRole.Optimize, + }, + ) def _adapt_amp_clip_without_sharding(self): # if not use sharding, adapt amp/clip, for remain parallelism. # cast --> amp --> clip --> opt - if self.sharding_degree > 1: return - if self._optimizer_sharding: return + if self.sharding_degree > 1: + return + if self._optimizer_sharding: + return main_block = self._main_program.global_block() startup_block = self._startup_program.global_block() @@ -525,9 +595,9 @@ class ShardingOptimizer(MetaOptimizerBase): FP16Utils.sync_amp_check_nan_inf(main_block, rings) gradientclip_helper = GradientClipHelper(None) - gradientclip_helper.sync_global_norm(main_block, - [self.mp_ring_id, self.pp_ring_id], - self.mp_rank) + gradientclip_helper.sync_global_norm( + main_block, [self.mp_ring_id, self.pp_ring_id], self.mp_rank + ) def _insert_loss_grad_scale_op(self): main_block = self._main_program.global_block() @@ -548,8 +618,9 @@ class ShardingOptimizer(MetaOptimizerBase): mp_ring_id = self.mp_ring_id if self.mp_degree > 1 else None dp_ring_id = self.dp_ring_id if self.dp_degree > 1 else None - offload_helper = OffloadHelper(mp_ring_id=mp_ring_id, - dp_ring_id=dp_ring_id) + offload_helper = OffloadHelper( + mp_ring_id=mp_ring_id, dp_ring_id=dp_ring_id + ) # optimize offload should be enable while gradient merge is enable and # acc_step is quite large (e.g. >> 100). Since its memcpy could not be @@ -565,32 +636,32 @@ class ShardingOptimizer(MetaOptimizerBase): # will take more memory, but will be faster. Trade space for time. if self._optimizer_sharding: offload_helper.opt_sharding_cast_fp32param( - main_block, startup_block, - [x[0].name for x in params_grads]) + main_block, startup_block, [x[0].name for x in params_grads] + ) # NOTE(wangxi): fused after optimize_cast - utils.fuse_opt_broadcast_param_ops(main_block, - dp_ring_id, - self._shard, - strategy=strategy) + utils.fuse_opt_broadcast_param_ops( + main_block, dp_ring_id, self._shard, strategy=strategy + ) else: offload_helper.cast_fp32param_in_optimize( - main_block, startup_block) + main_block, startup_block + ) def _dump_program_for_debug(self): main_block = self._main_program.global_block() startup_block = self._startup_program.global_block() - with open("start_sharding_%d" % self.role_maker._worker_index(), - 'w') as f: + with open( + "start_sharding_%d" % self.role_maker._worker_index(), 'w' + ) as f: f.writelines(str(startup_block.program)) - with open("main_sharding_%d" % self.role_maker._worker_index(), - 'w') as f: + with open( + "main_sharding_%d" % self.role_maker._worker_index(), 'w' + ) as f: f.writelines(str(main_block.program)) - def minimize_impl(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None): + def minimize_impl( + self, loss, startup_program=None, parameter_list=None, no_grad_set=None + ): # TODO: (JZ-LIANG) support multiple comm in future # self._nrings = self.user_defined_strategy.nccl_comm_num self._nrings_sharding = 1 @@ -605,7 +676,8 @@ class ShardingOptimizer(MetaOptimizerBase): # inner optimize minimize optimize_ops, params_grads = self._inner_opt_minimize( - loss, startup_program, parameter_list, no_grad_set) + loss, startup_program, parameter_list, no_grad_set + ) self._init_comm() @@ -654,13 +726,15 @@ class ShardingOptimizer(MetaOptimizerBase): ] pp_rank = 0 if self.pp_rank == pair[0] else 1 if os.getenv("PADDLE_MANUAL_PIPELINE_STAGE", None) is None: - self._collective_helper._init_communicator(self._startup_program, - self.current_endpoint, - pp_group_endpoints, - pp_rank, - ring_id, - False, - sync=False) + self._collective_helper._init_communicator( + self._startup_program, + self.current_endpoint, + pp_group_endpoints, + pp_rank, + ring_id, + False, + sync=False, + ) def _init_npu_pipeline_comm(self, startup_block): # NOTE(wangxi): some bug with hccl, must set pp_degree be even number @@ -678,15 +752,22 @@ class ShardingOptimizer(MetaOptimizerBase): my_pair.append(pair) # for example: self.pp_rank=2, self.pp_degree=4 - send_to_next_pair = (self.pp_rank, (self.pp_rank + 1) % self.pp_degree - ) # 2->3 + send_to_next_pair = ( + self.pp_rank, + (self.pp_rank + 1) % self.pp_degree, + ) # 2->3 recv_from_next_pair = ( - (self.pp_rank + 1) % self.pp_degree, self.pp_rank) # 3->2 + (self.pp_rank + 1) % self.pp_degree, + self.pp_rank, + ) # 3->2 recv_from_prev_pair = ( - (self.pp_rank - 1 + self.pp_degree) % self.pp_degree, self.pp_rank + (self.pp_rank - 1 + self.pp_degree) % self.pp_degree, + self.pp_rank, ) # 1->2 - send_to_prev_pair = (self.pp_rank, (self.pp_rank - 1 + self.pp_degree) % - self.pp_degree) # 2->1 + send_to_prev_pair = ( + self.pp_rank, + (self.pp_rank - 1 + self.pp_degree) % self.pp_degree, + ) # 2->1 even = (self.pp_rank % 2) == 0 @@ -695,54 +776,66 @@ class ShardingOptimizer(MetaOptimizerBase): ring_id = self.pp_ring_map[pair[0] * 1000 + pair[1]] self._init_pair_comm(pair, ring_id) my_pair.remove(pair) - logger.info("pair0(even->odd): pp pair:{}, ring_id: {}".format( - pair, ring_id)) + logger.info( + "pair0(even->odd): pp pair:{}, ring_id: {}".format(pair, ring_id) + ) # 2. even recv from next, odd send to prev, 1->0, 3->2 pair = recv_from_next_pair if even else send_to_prev_pair ring_id = self.pp_ring_map[pair[0] * 1000 + pair[1]] self._init_pair_comm(pair, ring_id) my_pair.remove(pair) - logger.info("pair1(even<-odd): pp pair:{}, ring_id: {}".format( - pair, ring_id)) + logger.info( + "pair1(even<-odd): pp pair:{}, ring_id: {}".format(pair, ring_id) + ) # if pp_degree is 2, only need pair(0->1, 1->0) if self.pp_degree > 2: # 3. odd send to next, even recv from prev, 1->2, 3->0 pair = send_to_next_pair if not even else recv_from_prev_pair - ring_id = self.pp_ring_map.get(pair[0] * 1000 + pair[1], - max_ring_id + - 1) # 3->0 not in pp_ring_map + ring_id = self.pp_ring_map.get( + pair[0] * 1000 + pair[1], max_ring_id + 1 + ) # 3->0 not in pp_ring_map self._init_pair_comm(pair, ring_id) if self.pp_rank != 0 and self.pp_rank != self.pp_degree - 1: my_pair.remove(pair) - logger.info("pair2(odd->even): pp pair:{}, ring_id: {}".format( - pair, ring_id)) + logger.info( + "pair2(odd->even): pp pair:{}, ring_id: {}".format( + pair, ring_id + ) + ) # 4. odd recv from next, even send to prev, 2->1, 0->3 pair = recv_from_next_pair if not even else send_to_prev_pair - ring_id = self.pp_ring_map.get(pair[0] * 1000 + pair[1], - max_ring_id + - 2) # 0->3 not in pp_ring_map + ring_id = self.pp_ring_map.get( + pair[0] * 1000 + pair[1], max_ring_id + 2 + ) # 0->3 not in pp_ring_map self._init_pair_comm(pair, ring_id) if self.pp_rank != 0 and self.pp_rank != self.pp_degree - 1: my_pair.remove(pair) - logger.info("pair3(odd<-even): pp pair:{}, ring_id: {}".format( - pair, ring_id)) + logger.info( + "pair3(odd<-even): pp pair:{}, ring_id: {}".format( + pair, ring_id + ) + ) - assert len(my_pair) == 0, "Current pipeline does not support cross stage communication, " \ - "please check unexpected pair {}".format(my_pair) + assert len(my_pair) == 0, ( + "Current pipeline does not support cross stage communication, " + "please check unexpected pair {}".format(my_pair) + ) def _init_pipeline_comm(self, startup_block): # TODO (JZ-LIANG) to unify pp_rank_ and pp_rank if os.getenv("PADDLE_MANUAL_PIPELINE_STAGE", None) is None: - self._collective_helper._init_communicator(self._startup_program, - self.current_endpoint, - self.pp_group_endpoints, - self.pp_rank, - self.pp_ring_id, - False, - sync=False) + self._collective_helper._init_communicator( + self._startup_program, + self.current_endpoint, + self.pp_group_endpoints, + self.pp_rank, + self.pp_ring_id, + False, + sync=False, + ) if core.is_compiled_with_npu(): self._init_npu_pipeline_comm(startup_block) @@ -762,13 +855,15 @@ class ShardingOptimizer(MetaOptimizerBase): # mp ring if self.mp_degree > 1: - self._collective_helper._init_communicator(self._startup_program, - self.current_endpoint, - self.mp_group_endpoints, - self.mp_rank, - self.mp_ring_id, - False, - sync=False) + self._collective_helper._init_communicator( + self._startup_program, + self.current_endpoint, + self.mp_group_endpoints, + self.mp_rank, + self.mp_ring_id, + False, + sync=False, + ) # sharding ring if self.sharding_degree > 1: @@ -779,7 +874,8 @@ class ShardingOptimizer(MetaOptimizerBase): self.sharding_rank, self.sharding_ring_id, False, - sync=False) + sync=False, + ) # pp ring if self.pp_degree > 1: @@ -787,13 +883,15 @@ class ShardingOptimizer(MetaOptimizerBase): # pure dp ring if self.dp_degree > 1: - self._collective_helper._init_communicator(self._startup_program, - self.current_endpoint, - self.dp_group_endpoints, - self.dp_rank, - self.dp_ring_id, - False, - sync=False) + self._collective_helper._init_communicator( + self._startup_program, + self.current_endpoint, + self.dp_group_endpoints, + self.dp_rank, + self.dp_ring_id, + False, + sync=False, + ) startup_block._sync_with_cpp() @@ -804,9 +902,12 @@ class ShardingOptimizer(MetaOptimizerBase): # step 3: get broadcast vars self._broadcast_vars = self._shard.find_broadcast_params( - self._main_program.global_block()) + self._main_program.global_block() + ) - def _wait(self, ): + def _wait( + self, + ): endpoints = self.global_endpoints[:] current_endpoint = endpoints[self.global_rank] if self.global_rank == 0: @@ -831,7 +932,7 @@ class ShardingOptimizer(MetaOptimizerBase): segment._end_idx = last_backward_op_idx for op_idx in reversed(range(last_backward_op_idx)): op = block.ops[op_idx] - assert (int(op.attr('op_role')) != int(OpRole.Optimize)) + assert int(op.attr('op_role')) != int(OpRole.Optimize) if self._sharding_segment_strategy == "segment_broadcast_MB": if segment._param_mem >= self._broadcast_MB: segment = self.collect_segment(segment, op_idx, block) @@ -845,21 +946,27 @@ class ShardingOptimizer(MetaOptimizerBase): if ".cast_fp16@GRAD" not in input_name: continue else: - input_name = input_name[:input_name. - find(".cast_fp16@GRAD")] + input_name = input_name[ + : input_name.find(".cast_fp16@GRAD") + ] if input_name in self._backward_remain_anchors: segment = self.collect_segment( - segment, op_idx, block) - assert input_name not in self._forward_remain_anchors, "segment anchor [{}] met twice !".format( - input_name) + segment, op_idx, block + ) + assert ( + input_name not in self._forward_remain_anchors + ), "segment anchor [{}] met twice !".format( + input_name + ) self._backward_remain_anchors.remove(input_name) self._forward_remain_anchors.append(input_name) elif int(op.attr('op_role')) == int(OpRole.Forward): for output_name in op.desc.output_arg_names(): if output_name in self._forward_remain_anchors: segment = self.collect_segment( - segment, op_idx, block) + segment, op_idx, block + ) self._forward_remain_anchors.remove(output_name) # find broadcast vars @@ -875,47 +982,49 @@ class ShardingOptimizer(MetaOptimizerBase): if self._shard.has_param(input_name): broadcast_var_name = input_name else: - broadcast_var_name = unique_name.generate(input_name + - "@BroadCast") + broadcast_var_name = unique_name.generate( + input_name + "@BroadCast" + ) segment._fill_constant_vars.append(broadcast_var_name) # (JZ-LIANG) should use Param base name ? broadcast_var_base_name = input_name if "subprog" in broadcast_var_base_name: # remove suffix - broadcast_var_base_name = broadcast_var_base_name[: - broadcast_var_base_name - .find( - ".subprog" - )] + broadcast_var_base_name = broadcast_var_base_name[ + : broadcast_var_base_name.find(".subprog") + ] - var2broadcast_time[ - broadcast_var_base_name] = var2broadcast_time.get( - broadcast_var_base_name, 0) + 1 + var2broadcast_time[broadcast_var_base_name] = ( + var2broadcast_time.get(broadcast_var_base_name, 0) + 1 + ) segment._param2broadcast[input_name] = broadcast_var_name segment._broadcast_vars.append( - (broadcast_var_name, self._shard.device(input_name))) + (broadcast_var_name, self._shard.device(input_name)) + ) segment._param_mem += get_var_size( - self._main_program.global_block().var(input_name)) + self._main_program.global_block().var(input_name) + ) # find reduce vars if self.pp_degree > 1 and self.pp_allreduce_in_optimize: # place pipeline gradient allreduce in optimize pass else: - if is_backward_op(op) and \ - OP_ROLE_VAR_KEY in op.attr_names: + if is_backward_op(op) and OP_ROLE_VAR_KEY in op.attr_names: op_role_var = op.all_attrs()[OP_ROLE_VAR_KEY] if len(op_role_var) != 0: assert len(op_role_var) % 2 == 0 for i in range(0, len(op_role_var), 2): - param, reduced_grad = op_role_var[i], op_role_var[i - + - 1] + param, reduced_grad = ( + op_role_var[i], + op_role_var[i + 1], + ) segment._allreduce_vars.append(reduced_grad) - assert (reduced_grad - not in self._reduced_grads_to_param) + assert ( + reduced_grad not in self._reduced_grads_to_param + ) self._reduced_grads_to_param[reduced_grad] = param # find cast op @@ -930,29 +1039,40 @@ class ShardingOptimizer(MetaOptimizerBase): self._segments.insert(0, segment) if self._sharding_segment_strategy == "segment_anchors": - assert len( - self._forward_remain_anchors) == 0, "remain anchors {}".format( - self._forward_remain_anchors) - assert len( - self._backward_remain_anchors) == 0, "remain anchors {}".format( - self._backward_remain_anchors) + assert ( + len(self._forward_remain_anchors) == 0 + ), "remain anchors {}".format(self._forward_remain_anchors) + assert ( + len(self._backward_remain_anchors) == 0 + ), "remain anchors {}".format(self._backward_remain_anchors) if self._verbose: - for varname in sorted(var2broadcast_time, - key=var2broadcast_time.get, - reverse=True): - logger.info("Sharding broadcast: [{}] times [{}]".format( - var2broadcast_time[varname], varname)) + for varname in sorted( + var2broadcast_time, key=var2broadcast_time.get, reverse=True + ): + logger.info( + "Sharding broadcast: [{}] times [{}]".format( + var2broadcast_time[varname], varname + ) + ) for idx_ in range(len(self._segments)): logger.info("segment [{}] :".format(idx_)) - logger.info("start op: [{}] [{}]".format( - block.ops[self._segments[idx_]._start_idx].desc.type(), - block.ops[self._segments[idx_]. - _start_idx].desc.input_arg_names())) - logger.info("end op: [{}] [{}]".format( - block.ops[self._segments[idx_]._end_idx].desc.type(), - block.ops[ - self._segments[idx_]._end_idx].desc.input_arg_names())) + logger.info( + "start op: [{}] [{}]".format( + block.ops[self._segments[idx_]._start_idx].desc.type(), + block.ops[ + self._segments[idx_]._start_idx + ].desc.input_arg_names(), + ) + ) + logger.info( + "end op: [{}] [{}]".format( + block.ops[self._segments[idx_]._end_idx].desc.type(), + block.ops[ + self._segments[idx_]._end_idx + ].desc.input_arg_names(), + ) + ) return def _prune_main_program(self, block, shard, rings): @@ -985,17 +1105,18 @@ class ShardingOptimizer(MetaOptimizerBase): input_names = op.desc.input_arg_names() output_names = op.desc.output_arg_names() # FIXME(wangxi): need use grads, pipeline grad is @GRAD@MERGE - if op.type == "c_allreduce_sum" and \ - op.attr('use_model_parallel') is False: - assert (len(output_names) == 1) + if ( + op.type == "c_allreduce_sum" + and op.attr('use_model_parallel') is False + ): + assert len(output_names) == 1 output_name = output_names[0] reduced_grads.append(output_name) # prune optimizer state and param pruned_opti_vars = [] for var_name in list(block.vars.keys()): - if shard.is_opti_var(var_name) and \ - not shard.has_opt_var(var_name): + if shard.is_opti_var(var_name) and not shard.has_opt_var(var_name): pruned_opti_vars.append(var_name) program_deps = ProgramDeps(block, reduced_grads, pruned_opti_vars) @@ -1006,17 +1127,17 @@ class ShardingOptimizer(MetaOptimizerBase): # Prune for idx, op in reversed(list(enumerate(block.ops))): if op.type in [ - "c_allreduce_sum", - "c_sync_comm_stream", - "c_calc_comm_stream", - "c_gen_nccl_id", - "c_comm_init", - 'send_v2', - 'recv_v2', + "c_allreduce_sum", + "c_sync_comm_stream", + "c_calc_comm_stream", + "c_gen_nccl_id", + "c_comm_init", + 'send_v2', + 'recv_v2', ]: pass elif op.type == "conditional_block": - assert (op.desc.has_attr("sub_block")) + assert op.desc.has_attr("sub_block") subblock_idx = op.desc.attr("sub_block").id subblock_deps = program_deps.get_sub_block_deps(subblock_idx) # only prune amp subblock @@ -1032,7 +1153,8 @@ class ShardingOptimizer(MetaOptimizerBase): reversed_output_vars.append(output_name) # prune for sub_op_idx, _ in reversed( - list(enumerate(subblock_deps._block.ops))): + list(enumerate(subblock_deps._block.ops)) + ): if subblock_deps.should_remove_op(sub_op_idx): subblock_deps.remove_op(sub_op_idx) reversed_input_vars = [] @@ -1048,7 +1170,9 @@ class ShardingOptimizer(MetaOptimizerBase): # _should_removed_var: opt state not cur shard if program_deps.should_remove_op(idx): # NOTE(wangxi): need reserve all param in optimizer_sharding - reserved_vars = self._params if self._optimizer_sharding else None + reserved_vars = ( + self._params if self._optimizer_sharding else None + ) program_deps.remove_op(idx, reserved_vars) # NOTE (JZ-LIANG) revise and unify logic here @@ -1059,7 +1183,8 @@ class ShardingOptimizer(MetaOptimizerBase): # remove inputs that not on this card reserved_x = [] for var_name in op.desc.input("X"): - if block.has_var(var_name): reserved_x.append(var_name) + if block.has_var(var_name): + reserved_x.append(var_name) op.desc.set_input('X', reserved_x) block._sync_with_cpp() return @@ -1082,175 +1207,280 @@ class ShardingOptimizer(MetaOptimizerBase): # NOTE (JZ-LIANG) revise and unify logic here # fix the _end_idx for segments[-1] if pp is used. new_end_idx = self._segments[-1]._end_idx - for idx in range(self._segments[-1]._end_idx - 1, - self._segments[-1]._start_idx - 1, -1): + for idx in range( + self._segments[-1]._end_idx - 1, + self._segments[-1]._start_idx - 1, + -1, + ): op = block.ops[idx] if op.type == "fill_constant" or op.type == "sum": - if "MERGED" in op.output_arg_names[0]: new_end_idx = idx + 1 + if "MERGED" in op.output_arg_names[0]: + new_end_idx = idx + 1 elif op.type == "cast": - if "@TMP" in op.output_arg_names[0]: new_end_idx = idx + 1 + if "@TMP" in op.output_arg_names[0]: + new_end_idx = idx + 1 self._segments[-1]._end_idx = new_end_idx if self._segments[-1]._allreduce_vars: shard_allredue_vars = self._shard.filter_grads( - self._segments[-1]._allreduce_vars) - if self.gradient_merge_mode != "sharding_gm" or self._gradient_merge_acc_step <= 1: - if self.hybrid_dp and self.hybrid_dp_mode == "sharding_hybrid_dp" and len( - shard_allredue_vars) >= 1: - insert_sync_comm_ops(block, self._segments[-1]._end_idx, - self.dp_ring_id, shard_allredue_vars) + self._segments[-1]._allreduce_vars + ) + if ( + self.gradient_merge_mode != "sharding_gm" + or self._gradient_merge_acc_step <= 1 + ): + if ( + self.hybrid_dp + and self.hybrid_dp_mode == "sharding_hybrid_dp" + and len(shard_allredue_vars) >= 1 + ): + insert_sync_comm_ops( + block, + self._segments[-1]._end_idx, + self.dp_ring_id, + shard_allredue_vars, + ) insert_allreduce_ops( block, self._segments[-1]._end_idx, self.dp_ring_id, shard_allredue_vars, - user_defined_strategy=self.user_defined_strategy) + user_defined_strategy=self.user_defined_strategy, + ) # gradient merge - elif self.gradient_merge_mode == "sharding_gm" and self._gradient_merge_acc_step > 1: + elif ( + self.gradient_merge_mode == "sharding_gm" + and self._gradient_merge_acc_step > 1 + ): self.create_persistable_gradients_and_insert_merge_ops( - block, self._startup_program.global_block(), - self._segments[-1]._end_idx, shard_allredue_vars, - self._shard) - - insert_sync_comm_ops(block, self._segments[-1]._end_idx, - self.sharding_ring_id, - self._segments[-1]._allreduce_vars) + block, + self._startup_program.global_block(), + self._segments[-1]._end_idx, + shard_allredue_vars, + self._shard, + ) + + insert_sync_comm_ops( + block, + self._segments[-1]._end_idx, + self.sharding_ring_id, + self._segments[-1]._allreduce_vars, + ) # allreduce --> reduce - insert_reduce_ops(block, - self._segments[-1]._end_idx, - self.sharding_ring_id, - self._segments[-1]._allreduce_vars, - self._shard, - op_role=OpRole.Backward, - use_calc_stream=False) + insert_reduce_ops( + block, + self._segments[-1]._end_idx, + self.sharding_ring_id, + self._segments[-1]._allreduce_vars, + self._shard, + op_role=OpRole.Backward, + use_calc_stream=False, + ) for idx, segment in reversed(list(enumerate(self._segments))): - allreduce_vars = self._segments[ - idx - 1]._allreduce_vars if idx > 0 else [] - broadcast_vars = self._segments[ - idx + - 1]._broadcast_vars if idx < len(self._segments) - 1 else [] - fill_constant_vars = self._segments[ - idx + - 2]._fill_constant_vars if idx < len(self._segments) - 2 else [] - cast_ops = self._segments[ - idx + 2]._cast_ops if idx < len(self._segments) - 2 else {} + allreduce_vars = ( + self._segments[idx - 1]._allreduce_vars if idx > 0 else [] + ) + broadcast_vars = ( + self._segments[idx + 1]._broadcast_vars + if idx < len(self._segments) - 1 + else [] + ) + fill_constant_vars = ( + self._segments[idx + 2]._fill_constant_vars + if idx < len(self._segments) - 2 + else [] + ) + cast_ops = ( + self._segments[idx + 2]._cast_ops + if idx < len(self._segments) - 2 + else {} + ) for op_idx in reversed(range(segment._start_idx, segment._end_idx)): op = block.ops[op_idx] for input_name in op.desc.input_arg_names(): - if input_name in segment._param2broadcast and \ - input_name != segment._param2broadcast[input_name]: - op._rename_input(input_name, - segment._param2broadcast[input_name]) + if ( + input_name in segment._param2broadcast + and input_name != segment._param2broadcast[input_name] + ): + op._rename_input( + input_name, segment._param2broadcast[input_name] + ) for param_name, broadcast_name in segment._param2broadcast.items(): if param_name != broadcast_name: block.create_var( name=broadcast_name, - shape=self._main_program.global_block().var( - param_name).shape, - dtype=self._main_program.global_block().var( - param_name).dtype, - persistable=False) + shape=self._main_program.global_block() + .var(param_name) + .shape, + dtype=self._main_program.global_block() + .var(param_name) + .dtype, + persistable=False, + ) # step1: remove cast ops block._sync_with_cpp() segment._end_idx += FP16Utils.remove_cast_op( - block, self._params, segment, 0) + block, self._params, segment, 0 + ) # step2: add Sync ops shard_allredue_vars = self._shard.filter_grads(allreduce_vars) - if self.gradient_merge_mode != "sharding_gm" or self._gradient_merge_acc_step <= 1: - if self.hybrid_dp and self.hybrid_dp_mode == "sharding_hybrid_dp" and len( - shard_allredue_vars) >= 1: - insert_sync_comm_ops(block, segment._end_idx, - self.dp_ring_id, shard_allredue_vars) + if ( + self.gradient_merge_mode != "sharding_gm" + or self._gradient_merge_acc_step <= 1 + ): + if ( + self.hybrid_dp + and self.hybrid_dp_mode == "sharding_hybrid_dp" + and len(shard_allredue_vars) >= 1 + ): + insert_sync_comm_ops( + block, + segment._end_idx, + self.dp_ring_id, + shard_allredue_vars, + ) broad_cast_vars = [x[0] for x in broadcast_vars] if len(broad_cast_vars) > 0: - insert_sync_comm_ops(block, segment._end_idx, - self.sharding_ring_id, - broad_cast_vars) + insert_sync_comm_ops( + block, + segment._end_idx, + self.sharding_ring_id, + broad_cast_vars, + ) else: comm_dep_vars = allreduce_vars + [ x[0] for x in broadcast_vars ] if len(comm_dep_vars) > 0: - insert_sync_comm_ops(block, segment._end_idx, - self.sharding_ring_id, - comm_dep_vars) + insert_sync_comm_ops( + block, + segment._end_idx, + self.sharding_ring_id, + comm_dep_vars, + ) # gradient merge - elif self.gradient_merge_mode == "sharding_gm" and self._gradient_merge_acc_step > 1: + elif ( + self.gradient_merge_mode == "sharding_gm" + and self._gradient_merge_acc_step > 1 + ): broad_cast_vars = [x[0] for x in broadcast_vars] if len(broad_cast_vars) > 0: - insert_sync_comm_ops(block, segment._end_idx, - self.sharding_ring_id, broad_cast_vars) + insert_sync_comm_ops( + block, + segment._end_idx, + self.sharding_ring_id, + broad_cast_vars, + ) - calc_dep_vars = fill_constant_vars + [ - k for k, v in cast_ops.items() - ] + self._segments[idx]._allreduce_vars + calc_dep_vars = ( + fill_constant_vars + + [k for k, v in cast_ops.items()] + + self._segments[idx]._allreduce_vars + ) if len(calc_dep_vars) > 0: - insert_sync_calc_op(block, segment._end_idx, - [calc_dep_vars[-1]]) + insert_sync_calc_op( + block, segment._end_idx, [calc_dep_vars[-1]] + ) # step3: insert `fill_constant` ops - insert_fill_constant_ops(block, segment._end_idx, - fill_constant_vars) + insert_fill_constant_ops( + block, segment._end_idx, fill_constant_vars + ) # step4: add `cast` ops insert_cast_ops(block, segment._end_idx, cast_ops) # step5: add broadcast ops # gradient merge - if self.gradient_merge_mode == "sharding_gm" and self._gradient_merge_acc_step > 1: + if ( + self.gradient_merge_mode == "sharding_gm" + and self._gradient_merge_acc_step > 1 + ): self.create_persistable_gradients_and_insert_merge_ops( - block, self._startup_program.global_block(), - segment._start_idx, shard_allredue_vars, self._shard) + block, + self._startup_program.global_block(), + segment._start_idx, + shard_allredue_vars, + self._shard, + ) - insert_broadcast_ops(block, segment._start_idx, - self.sharding_ring_id, broadcast_vars) + insert_broadcast_ops( + block, segment._start_idx, self.sharding_ring_id, broadcast_vars + ) # step6: add all_reduce ops # dp - if self.gradient_merge_mode != "sharding_gm" or self._gradient_merge_acc_step <= 1: - if self.hybrid_dp and self.hybrid_dp_mode == "sharding_hybrid_dp" and len( - shard_allredue_vars) >= 1: + if ( + self.gradient_merge_mode != "sharding_gm" + or self._gradient_merge_acc_step <= 1 + ): + if ( + self.hybrid_dp + and self.hybrid_dp_mode == "sharding_hybrid_dp" + and len(shard_allredue_vars) >= 1 + ): insert_allreduce_ops( block, segment._start_idx, self.dp_ring_id, shard_allredue_vars, - user_defined_strategy=self.user_defined_strategy) - insert_sync_comm_ops(block, segment._start_idx, - self.sharding_ring_id, allreduce_vars) + user_defined_strategy=self.user_defined_strategy, + ) + insert_sync_comm_ops( + block, + segment._start_idx, + self.sharding_ring_id, + allreduce_vars, + ) # gradient merge - elif self.gradient_merge_mode == "sharding_gm" and self._gradient_merge_acc_step > 1: - insert_sync_comm_ops(block, segment._start_idx, - self.sharding_ring_id, allreduce_vars) + elif ( + self.gradient_merge_mode == "sharding_gm" + and self._gradient_merge_acc_step > 1 + ): + insert_sync_comm_ops( + block, + segment._start_idx, + self.sharding_ring_id, + allreduce_vars, + ) # sharding # allreduce --> reduce # TODO temp change if len(allreduce_vars) > 0: - insert_reduce_ops(block, - segment._start_idx, - self.sharding_ring_id, - allreduce_vars, - self._shard, - op_role=OpRole.Backward, - use_calc_stream=False) + insert_reduce_ops( + block, + segment._start_idx, + self.sharding_ring_id, + allreduce_vars, + self._shard, + op_role=OpRole.Backward, + use_calc_stream=False, + ) block._sync_with_cpp() if self._segments[0]._broadcast_vars: broadcast_vars = [x[0] for x in self._segments[0]._broadcast_vars] - insert_sync_comm_ops(block, self._segments[0]._start_idx, - self.sharding_ring_id, broadcast_vars) - insert_broadcast_ops(block, self._segments[0]._start_idx, - self.sharding_ring_id, - self._segments[0]._broadcast_vars) + insert_sync_comm_ops( + block, + self._segments[0]._start_idx, + self.sharding_ring_id, + broadcast_vars, + ) + insert_broadcast_ops( + block, + self._segments[0]._start_idx, + self.sharding_ring_id, + self._segments[0]._broadcast_vars, + ) fill_constant_vars = [] for x in self._segments[:2]: @@ -1264,12 +1494,14 @@ class ShardingOptimizer(MetaOptimizerBase): calc_deps_vars = fill_constant_vars + [k for k, v in cast_ops.items()] if fill_constant_vars or cast_ops: - insert_sync_calc_op(block, self._segments[0]._start_idx, - [calc_deps_vars[-1]]) + insert_sync_calc_op( + block, self._segments[0]._start_idx, [calc_deps_vars[-1]] + ) if fill_constant_vars: - insert_fill_constant_ops(block, self._segments[0]._start_idx, - fill_constant_vars) + insert_fill_constant_ops( + block, self._segments[0]._start_idx, fill_constant_vars + ) if cast_ops: insert_cast_ops(block, self._segments[0]._start_idx, cast_ops) @@ -1283,7 +1515,7 @@ class ShardingOptimizer(MetaOptimizerBase): continue if self._optimizer_sharding and shard.is_param(output_name): continue - #TODO why do we remove op, when only one var is removed + # TODO why do we remove op, when only one var is removed block._remove_op(idx, sync=False) break @@ -1312,16 +1544,29 @@ class ShardingOptimizer(MetaOptimizerBase): self.global_rank = self.role_maker._worker_index() self.global_endpoints = self.role_maker._get_trainer_endpoints() self.current_endpoint = self.global_endpoints[self.global_rank] - self._collective_helper = CollectiveHelper(self.role_maker, - nrings=self._nrings_sharding) - assert self.global_word_size % self.mp_degree == 0, \ - "global_word_size: {} should be divisible to the mp_degree: {}".format(self.global_word_size, self.mp_degree) - assert self.global_word_size % self.sharding_degree == 0, \ - "global_word_size: {} should be divisible to the sharding_degree: {}".format(self.global_word_size, self.sharding_degree) - assert self.global_word_size % self.pp_degree == 0, \ - "global_word_size: {} should be divisible to the pp_degree: {}".format(self.global_word_size, self.pp_degree) - assert self.global_word_size % self.dp_degree == 0, \ - "global_word_size: {} should be divisible to the dp_degree: {}".format(self.global_word_size, self.dp_degree) + self._collective_helper = CollectiveHelper( + self.role_maker, nrings=self._nrings_sharding + ) + assert ( + self.global_word_size % self.mp_degree == 0 + ), "global_word_size: {} should be divisible to the mp_degree: {}".format( + self.global_word_size, self.mp_degree + ) + assert ( + self.global_word_size % self.sharding_degree == 0 + ), "global_word_size: {} should be divisible to the sharding_degree: {}".format( + self.global_word_size, self.sharding_degree + ) + assert ( + self.global_word_size % self.pp_degree == 0 + ), "global_word_size: {} should be divisible to the pp_degree: {}".format( + self.global_word_size, self.pp_degree + ) + assert ( + self.global_word_size % self.dp_degree == 0 + ), "global_word_size: {} should be divisible to the dp_degree: {}".format( + self.global_word_size, self.dp_degree + ) # mp group if self.mp_degree > 1: @@ -1329,14 +1574,16 @@ class ShardingOptimizer(MetaOptimizerBase): self.mp_rank = self.global_rank % self.mp_degree self.mp_group_id = self.global_rank // self.mp_degree self.mp_group_endpoints = [ - ep for idx, ep in enumerate(self.global_endpoints) + ep + for idx, ep in enumerate(self.global_endpoints) if idx // self.mp_degree == self.mp_group_id ] assert self.current_endpoint in self.mp_group_endpoints - assert len( - self.mp_group_endpoints - ) == self.mp_degree, "num of mp worker in group is [{}], but mp group size is [{}]".format( - len(self.mp_group_endpoints), self.mp_degree) + assert ( + len(self.mp_group_endpoints) == self.mp_degree + ), "num of mp worker in group is [{}], but mp group size is [{}]".format( + len(self.mp_group_endpoints), self.mp_degree + ) else: self.mp_degree = 1 self.mp_ring_id = -1 @@ -1347,23 +1594,28 @@ class ShardingOptimizer(MetaOptimizerBase): # sharding if self.sharding_degree > 1: self.sharding_ring_id = 1 - self.sharding_rank = (self.global_rank // - self.mp_degree) % self.sharding_degree - self.sharding_group_id = self.global_rank // (self.mp_degree * - self.sharding_degree) + self.sharding_rank = ( + self.global_rank // self.mp_degree + ) % self.sharding_degree + self.sharding_group_id = self.global_rank // ( + self.mp_degree * self.sharding_degree + ) # mp + sharding + ... if self.mp_degree > 1: self.sharding_group_endpoints = [ - ep for idx, ep in enumerate(self.global_endpoints) - if (idx // (self.mp_degree * self.sharding_degree)) == self. - sharding_group_id and idx % self.mp_degree == self.mp_rank + ep + for idx, ep in enumerate(self.global_endpoints) + if (idx // (self.mp_degree * self.sharding_degree)) + == self.sharding_group_id + and idx % self.mp_degree == self.mp_rank ] # sharding + ... else: self.sharding_group_endpoints = [ - ep for idx, ep in enumerate(self.global_endpoints) - if (idx // (self.mp_degree * self.sharding_degree) - ) == self.sharding_group_id + ep + for idx, ep in enumerate(self.global_endpoints) + if (idx // (self.mp_degree * self.sharding_degree)) + == self.sharding_group_id ] assert self.current_endpoint in self.sharding_group_endpoints else: @@ -1378,20 +1630,28 @@ class ShardingOptimizer(MetaOptimizerBase): self.pp_pair_ring_id = 20 # pipeline global ring_id set to 4 for sharding0, mp1, dp2, global3 self.pp_ring_id = 4 - self.pp_rank = self.global_rank // (self.sharding_degree * - self.mp_degree) % self.pp_degree + self.pp_rank = ( + self.global_rank + // (self.sharding_degree * self.mp_degree) + % self.pp_degree + ) # (NOTE): Already adjust for (outter-pure) dp self.pp_group_id = self.global_rank // ( - self.mp_degree * self.sharding_degree * self.pp_degree) + self.mp_degree * self.sharding_degree * self.pp_degree + ) pp_first_stage_idx = self.global_rank % ( - self.sharding_degree * self.mp_degree) + self.pp_group_id * ( - self.mp_degree * self.sharding_degree * self.pp_degree) + self.sharding_degree * self.mp_degree + ) + self.pp_group_id * ( + self.mp_degree * self.sharding_degree * self.pp_degree + ) pp_stage_offset = self.sharding_degree * self.mp_degree self.pp_group_endpoints = [] for i in range(self.pp_degree): self.pp_group_endpoints.append( - self.global_endpoints[pp_first_stage_idx + - pp_stage_offset * i]) + self.global_endpoints[ + pp_first_stage_idx + pp_stage_offset * i + ] + ) assert self.current_endpoint in self.pp_group_endpoints else: self.pp_ring_id = -1 @@ -1407,29 +1667,48 @@ class ShardingOptimizer(MetaOptimizerBase): # sharding-hybrid-dp as one senario of outter-pure-dp local_pp_degree = self.pp_degree if os.getenv("PADDLE_MANUAL_PIPELINE_STAGE", None): - assert self.pp_degree == 2, ("For manually set pipeline, only " - "pp_degree = 2 is supported.") - assert self.global_word_size == self.mp_degree * self.sharding_degree * self.dp_degree, \ - "global work size [{}], mp_degree [{}], sharding_degree [{}], dp_degree [{}].".format( - self.global_word_size, self.mp_degree, self.sharding_degree, self.dp_degree) + assert self.pp_degree == 2, ( + "For manually set pipeline, only " "pp_degree = 2 is supported." + ) + assert ( + self.global_word_size + == self.mp_degree * self.sharding_degree * self.dp_degree + ), "global work size [{}], mp_degree [{}], sharding_degree [{}], dp_degree [{}].".format( + self.global_word_size, + self.mp_degree, + self.sharding_degree, + self.dp_degree, + ) local_pp_degree = 1 else: - assert self.global_word_size == self.mp_degree * self.sharding_degree * self.pp_degree * self.dp_degree, "mp_degree: [{}], sharding_degree: [{}], pp_degree: [{}], dp_degree: [{}]; BUT global nrank: [{}]".format( - self.mp_degree, self.sharding_degree, self.pp_degree, - self.dp_degree, self.global_word_size) + assert ( + self.global_word_size + == self.mp_degree + * self.sharding_degree + * self.pp_degree + * self.dp_degree + ), "mp_degree: [{}], sharding_degree: [{}], pp_degree: [{}], dp_degree: [{}]; BUT global nrank: [{}]".format( + self.mp_degree, + self.sharding_degree, + self.pp_degree, + self.dp_degree, + self.global_word_size, + ) if self.dp_degree > 1: self.dp_ring_id = 2 self.dp_rank = self.global_rank // ( - self.sharding_degree * self.mp_degree * local_pp_degree) + self.sharding_degree * self.mp_degree * local_pp_degree + ) dp_first_rank_idx = self.global_rank % ( - self.sharding_degree * self.mp_degree * local_pp_degree) - dp_offset = (self.sharding_degree * self.mp_degree * - local_pp_degree) + self.sharding_degree * self.mp_degree * local_pp_degree + ) + dp_offset = self.sharding_degree * self.mp_degree * local_pp_degree self.dp_group_endpoints = [] for i in range(self.dp_degree): self.dp_group_endpoints.append( - self.global_endpoints[dp_first_rank_idx + dp_offset * i]) + self.global_endpoints[dp_first_rank_idx + dp_offset * i] + ) assert self.current_endpoint in self.dp_group_endpoints logger.info("Hybrid DP mode turn on !") else: @@ -1458,8 +1737,9 @@ class ShardingOptimizer(MetaOptimizerBase): logger.info("sharding group size: {}".format(self.sharding_degree)) logger.info("sharding rank: {}".format(self.sharding_rank)) logger.info("sharding group id: {}".format(self.sharding_group_id)) - logger.info("sharding group endpoints: {}".format( - self.sharding_group_endpoints)) + logger.info( + "sharding group endpoints: {}".format(self.sharding_group_endpoints) + ) logger.info("sharding ring id: {}".format(self.sharding_ring_id)) logger.info("#####" * 6) @@ -1472,15 +1752,15 @@ class ShardingOptimizer(MetaOptimizerBase): logger.info("pure dp group size: {}".format(self.dp_degree)) logger.info("pure dp rank: {}".format(self.dp_rank)) - logger.info("pure dp group endpoints: {}".format( - self.dp_group_endpoints)) + logger.info( + "pure dp group endpoints: {}".format(self.dp_group_endpoints) + ) logger.info("pure dp ring id: {}".format(self.dp_ring_id)) logger.info("#####" * 6) return def _recreate_not_persist_param_as_var(self): - def recreate_not_persist_param_as_var(program): block = program.global_block() params = block.all_parameters() @@ -1504,14 +1784,16 @@ class ShardingOptimizer(MetaOptimizerBase): is_distributed = param.is_distributed block._remove_var(name, sync=False) - var = block.create_var(name=name, - shape=shape, - dtype=dtype, - type=type, - lod_level=lod_level, - stop_gradient=stop_gradient, - trainable=trainable, - persistable=False) + var = block.create_var( + name=name, + shape=shape, + dtype=dtype, + type=type, + lod_level=lod_level, + stop_gradient=stop_gradient, + trainable=trainable, + persistable=False, + ) if have_dist_attr: var.is_distributed = is_distributed @@ -1547,7 +1829,8 @@ class ShardingOptimizer(MetaOptimizerBase): broadcast_params.add(op.desc.output_arg_names()[0]) for param in params_name: - if param in broadcast_params: continue + if param in broadcast_params: + continue rings = [] # need sync not distributed param in mp group @@ -1557,30 +1840,37 @@ class ShardingOptimizer(MetaOptimizerBase): rings.append(self.dp_ring_id) for ring in rings: - startup_block.append_op(type='c_broadcast', - inputs={'X': param}, - outputs={'Out': param}, - attrs={ - 'ring_id': ring, - 'root': 0, - 'use_calc_stream': True, - OP_ROLE_KEY: OpRole.Forward - }) + startup_block.append_op( + type='c_broadcast', + inputs={'X': param}, + outputs={'Out': param}, + attrs={ + 'ring_id': ring, + 'root': 0, + 'use_calc_stream': True, + OP_ROLE_KEY: OpRole.Forward, + }, + ) startup_block._sync_with_cpp() # sharding gradient merge def create_persistable_gradients_and_insert_merge_ops( - self, main_block, startup_block, insert_idx, grad_names, shard): + self, main_block, startup_block, insert_idx, grad_names, shard + ): for grad_name in grad_names: - assert get_grad_device( - grad_name, shard - ) == shard.worker_idx, "try to merge gradient not belong to current shard: [{}]".format( - grad_name) + assert ( + get_grad_device(grad_name, shard) == shard.worker_idx + ), "try to merge gradient not belong to current shard: [{}]".format( + grad_name + ) persistable_grad_name = grad_name + '@GradiantMerge' - assert grad_name not in self._grad2merged_grad, "grad [{}] already in grad2merged_grad, maybe you meet sharing weight case !".format( - grad_name) + assert ( + grad_name not in self._grad2merged_grad + ), "grad [{}] already in grad2merged_grad, maybe you meet sharing weight case !".format( + grad_name + ) self._grad2merged_grad[grad_name] = persistable_grad_name grad_var = main_block.var(grad_name) # create var @@ -1588,36 +1878,38 @@ class ShardingOptimizer(MetaOptimizerBase): name=persistable_grad_name, shape=grad_var.shape, dtype=grad_var.dtype, - persistable=True) + persistable=True, + ) startup_gradient_merge_var = startup_block.create_var( name=persistable_grad_name, shape=grad_var.shape, dtype=grad_var.dtype, - persistable=True) + persistable=True, + ) # merge gradient main_block._insert_op_without_sync( insert_idx, type="elementwise_add", - inputs={ - 'X': grad_name, - 'Y': gradient_merge_var - }, + inputs={'X': grad_name, 'Y': gradient_merge_var}, outputs={'Out': gradient_merge_var}, attrs={ 'axis': -1, 'use_mkldnn': False, - OP_ROLE_KEY: OpRole.Backward - }) + OP_ROLE_KEY: OpRole.Backward, + }, + ) # startup initialization - startup_block.append_op(type="fill_constant", - outputs={"Out": startup_gradient_merge_var}, - attrs={ - "shape": grad_var.shape, - "dtype": grad_var.dtype, - "value": float(0), - }) + startup_block.append_op( + type="fill_constant", + outputs={"Out": startup_gradient_merge_var}, + attrs={ + "shape": grad_var.shape, + "dtype": grad_var.dtype, + "value": float(0), + }, + ) main_block._sync_with_cpp() startup_block._sync_with_cpp() @@ -1630,14 +1922,17 @@ class ShardingOptimizer(MetaOptimizerBase): value=int(self._gradient_merge_acc_step), dtype='int32', persistable=True, - force_cpu=True) + force_cpu=True, + ) - zero_var = layers.create_global_var(name="gradient_merge_zero", - shape=[1], - value=int(0), - dtype='int32', - persistable=True, - force_cpu=True) + zero_var = layers.create_global_var( + name="gradient_merge_zero", + shape=[1], + value=int(0), + dtype='int32', + persistable=True, + force_cpu=True, + ) # Add step var & cond var current_step_var = layers.create_global_var( @@ -1646,42 +1941,40 @@ class ShardingOptimizer(MetaOptimizerBase): value=int(0), dtype='int32', persistable=True, - force_cpu=True) + force_cpu=True, + ) - cond_var = main_block.create_var(name="gradient_merge_cond", - shape=[1], - dtype='bool') + cond_var = main_block.create_var( + name="gradient_merge_cond", shape=[1], dtype='bool' + ) with device_guard("cpu"): # step_var = (step_var + 1) % k_step - main_block.append_op(type='increment', - inputs={'X': [current_step_var]}, - outputs={'Out': [current_step_var]}, - attrs={ - 'step': float(1), - OP_ROLE_KEY: OpRole.Optimize - }) - - main_block.append_op(type='elementwise_mod', - inputs={ - 'X': current_step_var, - 'Y': acc_step_var - }, - outputs={'Out': current_step_var}, - attrs={ - 'axis': -1, - OP_ROLE_KEY: OpRole.Optimize, - 'use_mkldnn': False - }) + main_block.append_op( + type='increment', + inputs={'X': [current_step_var]}, + outputs={'Out': [current_step_var]}, + attrs={'step': float(1), OP_ROLE_KEY: OpRole.Optimize}, + ) + + main_block.append_op( + type='elementwise_mod', + inputs={'X': current_step_var, 'Y': acc_step_var}, + outputs={'Out': current_step_var}, + attrs={ + 'axis': -1, + OP_ROLE_KEY: OpRole.Optimize, + 'use_mkldnn': False, + }, + ) # cond_var = (step_var == 0) - main_block.append_op(type='equal', - inputs={ - 'X': current_step_var, - 'Y': zero_var - }, - outputs={'Out': cond_var}, - attrs={OP_ROLE_KEY: OpRole.Optimize}) + main_block.append_op( + type='equal', + inputs={'X': current_step_var, 'Y': zero_var}, + outputs={'Out': cond_var}, + attrs={OP_ROLE_KEY: OpRole.Optimize}, + ) # paddle.static.Print(current_step_var, message="in FWBW last conditional") return cond_var @@ -1708,35 +2001,37 @@ class ShardingOptimizer(MetaOptimizerBase): # allreduce grad@gradientmerge if self.hybrid_dp: - assert self.dp_ring_id >= 0, "dp_ring_id should larger than 0 when in sharding&DP mode" + assert ( + self.dp_ring_id >= 0 + ), "dp_ring_id should larger than 0 when in sharding&DP mode" for grad, merged_grad in self._grad2merged_grad.items(): merged_grad_var = main_block.var(merged_grad) - cur_block.append_op(type='c_allreduce_sum', - inputs={'X': merged_grad_var}, - outputs={'Out': merged_grad_var}, - attrs={ - 'ring_id': self.dp_ring_id, - 'use_calc_stream': True, - OP_ROLE_KEY: OpRole.Optimize - }) + cur_block.append_op( + type='c_allreduce_sum', + inputs={'X': merged_grad_var}, + outputs={'Out': merged_grad_var}, + attrs={ + 'ring_id': self.dp_ring_id, + 'use_calc_stream': True, + OP_ROLE_KEY: OpRole.Optimize, + }, + ) # grad@gradientmerge / acc_step for grad, merged_grad in self._grad2merged_grad.items(): # grad /= k_steps merged_grad_var = main_block.var(merged_grad) - cur_block.append_op(type='scale', - inputs={'X': merged_grad_var}, - outputs={'Out': merged_grad_var}, - attrs={ - 'scale': - 1.0 / float(self._gradient_merge_acc_step), - 'bias': - 0.0, - 'bias_after_scale': - False, - OP_ROLE_KEY: - OpRole.Optimize - }) + cur_block.append_op( + type='scale', + inputs={'X': merged_grad_var}, + outputs={'Out': merged_grad_var}, + attrs={ + 'scale': 1.0 / float(self._gradient_merge_acc_step), + 'bias': 0.0, + 'bias_after_scale': False, + OP_ROLE_KEY: OpRole.Optimize, + }, + ) # re-create optimize ops already_moved_var_names = [] @@ -1747,15 +2042,19 @@ class ShardingOptimizer(MetaOptimizerBase): for input_name in new_op_desc.input_arg_names(): if input_name in self._grad2merged_grad: new_op_desc._rename_input( - input_name, self._grad2merged_grad[input_name]) + input_name, self._grad2merged_grad[input_name] + ) for output_name in new_op_desc.output_arg_names(): if output_name in self._grad2merged_grad: new_op_desc._rename_output( - output_name, self._grad2merged_grad[output_name]) + output_name, self._grad2merged_grad[output_name] + ) # move non temp optimize vars from block0 to cond block - if output_name not in already_moved_var_names and output_name not in self._grad2merged_grad.keys( + if ( + output_name not in already_moved_var_names + and output_name not in self._grad2merged_grad.keys() ): var_ = self._main_program.global_block().var(output_name) if not var_.persistable: @@ -1764,11 +2063,14 @@ class ShardingOptimizer(MetaOptimizerBase): shape_ = var_.shape type_ = var_.dtype self._main_program.global_block()._remove_var( - var_.name, sync=False) - self.cond_block.create_var(name=name_, - shape=shape_, - dtype=type_, - persistable=False) + var_.name, sync=False + ) + self.cond_block.create_var( + name=name_, + shape=shape_, + dtype=type_, + persistable=False, + ) already_moved_var_names.append(name_) self._main_program.global_block()._sync_with_cpp() @@ -1777,14 +2079,16 @@ class ShardingOptimizer(MetaOptimizerBase): # fill zero to grad@gradientmerge for grad, merged_grad in self._grad2merged_grad.items(): merged_grad_var = main_block.var(merged_grad) - cur_block.append_op(type='fill_constant', - outputs={'Out': merged_grad_var}, - attrs={ - "shape": merged_grad_var.shape, - "dtype": merged_grad_var.dtype, - "value": float(0), - OP_ROLE_KEY: OpRole.Optimize - }) + cur_block.append_op( + type='fill_constant', + outputs={'Out': merged_grad_var}, + attrs={ + "shape": merged_grad_var.shape, + "dtype": merged_grad_var.dtype, + "value": float(0), + OP_ROLE_KEY: OpRole.Optimize, + }, + ) # lr_var = main_block.var("gradient_merge_current_step") # paddle.static.Print(lr_var, message="in OPTIMIZE last conditional") @@ -1796,7 +2100,10 @@ class ShardingOptimizer(MetaOptimizerBase): create cond block """ - if self.gradient_merge_mode != "sharding_gm" or self._gradient_merge_acc_step <= 1: + if ( + self.gradient_merge_mode != "sharding_gm" + or self._gradient_merge_acc_step <= 1 + ): return main_block = self._main_program.global_block() @@ -1815,7 +2122,8 @@ class ShardingOptimizer(MetaOptimizerBase): main_block._remove_op(op_idx, sync=False) tmp_copy_block._sync_with_cpp() self.original_optimize_ops_desc = list( - reversed(self.original_optimize_ops_desc)) + reversed(self.original_optimize_ops_desc) + ) # back to block 0 self._main_program._rollback() @@ -1832,18 +2140,17 @@ class ShardingOptimizer(MetaOptimizerBase): # cond op step_scope = self._main_program.global_block().create_var( - type=core.VarDesc.VarType.STEP_SCOPES) + type=core.VarDesc.VarType.STEP_SCOPES + ) conditional_block_op = self._main_program.global_block().append_op( type='conditional_block', inputs={ 'Cond': cond, 'Input': [], }, - outputs={ - 'Out': [], - 'Scope': [step_scope] - }, + outputs={'Out': [], 'Scope': [step_scope]}, attrs={ 'sub_block': cond_block, 'is_scalar_condition': True, - }) + }, + ) diff --git a/python/paddle/distributed/fleet/meta_optimizers/tensor_parallel_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/tensor_parallel_optimizer.py index 5f01552c71bdecb48b4f6324bee732a8179c42b7..0cd86ad08bde29aa3f05c590045d8a4e1dd719e2 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/tensor_parallel_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/tensor_parallel_optimizer.py @@ -13,13 +13,20 @@ import paddle.fluid as fluid from .meta_optimizer_base import MetaOptimizerBase -from .common import CollectiveHelper, OP_ROLE_KEY, OP_ROLE_VAR_KEY, OpRole, is_backward_op, is_loss_grad_op, is_optimizer_op +from .common import ( + CollectiveHelper, + OP_ROLE_KEY, + OP_ROLE_VAR_KEY, + OpRole, + is_backward_op, + is_loss_grad_op, + is_optimizer_op, +) __all__ = [] class TensorParallelOptimizer(MetaOptimizerBase): - def __init__(self, optimizer): super(TensorParallelOptimizer, self).__init__(optimizer) self.inner_opt = optimizer @@ -36,13 +43,15 @@ class TensorParallelOptimizer(MetaOptimizerBase): self.global_ring_id = 1 self.dp_ring_id = 2 - def _set_basic_info(self, loss, role_maker, user_defined_optimizer, - user_defined_strategy): - super(TensorParallelOptimizer, - self)._set_basic_info(loss, role_maker, user_defined_optimizer, - user_defined_strategy) + def _set_basic_info( + self, loss, role_maker, user_defined_optimizer, user_defined_strategy + ): + super(TensorParallelOptimizer, self)._set_basic_info( + loss, role_maker, user_defined_optimizer, user_defined_strategy + ) self.mp_degree = user_defined_strategy.tensor_parallel_configs[ - 'tensor_parallel_degree'] + 'tensor_parallel_degree' + ] def _can_apply(self): if not self.role_maker._is_collective: @@ -69,23 +78,25 @@ class TensorParallelOptimizer(MetaOptimizerBase): if param.is_distributed and mp_mode: continue - block.append_op(type='c_broadcast', - inputs={'X': param}, - outputs={'Out': param}, - attrs={ - 'ring_id': ring_id, - 'root': 0, - OP_ROLE_KEY: OpRole.Forward - }) - - if not param: return # no parameter on this device - block.append_op(type='c_sync_comm_stream', - inputs={'X': param}, - outputs={'Out': param}, - attrs={ - 'ring_id': ring_id, - OP_ROLE_KEY: OpRole.Forward - }) + block.append_op( + type='c_broadcast', + inputs={'X': param}, + outputs={'Out': param}, + attrs={ + 'ring_id': ring_id, + 'root': 0, + OP_ROLE_KEY: OpRole.Forward, + }, + ) + + if not param: + return # no parameter on this device + block.append_op( + type='c_sync_comm_stream', + inputs={'X': param}, + outputs={'Out': param}, + attrs={'ring_id': ring_id, OP_ROLE_KEY: OpRole.Forward}, + ) def _get_process_group_info(self): # global ring info @@ -98,7 +109,8 @@ class TensorParallelOptimizer(MetaOptimizerBase): self.mp_nranks = self.mp_degree mp_group = self.rank // self.mp_degree self.mp_endpoints = [ - self.endpoints[i] for i in range(self.global_nranks) + self.endpoints[i] + for i in range(self.global_nranks) if i // self.mp_degree == mp_group ] @@ -117,33 +129,47 @@ class TensorParallelOptimizer(MetaOptimizerBase): collective_helper = CollectiveHelper(self.role_maker, wait_port=False) # Create global ring for all gpus - collective_helper._init_communicator(self.startup_program, - self.current_endpoint, - self.global_endpoints, - self.global_rank, - self.global_ring_id, True, - self.global_ring_id, True) + collective_helper._init_communicator( + self.startup_program, + self.current_endpoint, + self.global_endpoints, + self.global_rank, + self.global_ring_id, + True, + self.global_ring_id, + True, + ) # Create model parallel ring for all gpus - collective_helper._init_communicator(self.startup_program, - self.current_endpoint, - self.mp_endpoints, self.mp_rank, - self.mp_ring_id, True, - self.global_ring_id, True) + collective_helper._init_communicator( + self.startup_program, + self.current_endpoint, + self.mp_endpoints, + self.mp_rank, + self.mp_ring_id, + True, + self.global_ring_id, + True, + ) self._broadcast_params(self.mp_ring_id, mp_mode=True) # Create dp rings if self.nranks > self.mp_degree: collective_helper._init_communicator( - self.startup_program, self.current_endpoint, self.dp_endpoints, - self.dp_rank, self.dp_ring_id, True, self.global_ring_id, True) + self.startup_program, + self.current_endpoint, + self.dp_endpoints, + self.dp_rank, + self.dp_ring_id, + True, + self.global_ring_id, + True, + ) self._broadcast_params(self.dp_ring_id, mp_mode=False) - def minimize_impl(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None): + def minimize_impl( + self, loss, startup_program=None, parameter_list=None, no_grad_set=None + ): self.endpoints = self.role_maker._get_trainer_endpoints() self.current_endpoint = self.endpoints[self.role_maker._worker_index()] self.startup_program = startup_program @@ -151,7 +177,8 @@ class TensorParallelOptimizer(MetaOptimizerBase): self.startup_program = fluid.default_startup_program() optimize_ops, params_grads = self.inner_opt.minimize( - loss, self.startup_program, parameter_list, no_grad_set) + loss, self.startup_program, parameter_list, no_grad_set + ) self.main_program = loss.block.program self.nranks = len(self.endpoints) @@ -180,14 +207,16 @@ class TensorParallelOptimizer(MetaOptimizerBase): for idx, op in reversed(list(enumerate(block.ops))): if is_loss_grad_op(op): loss_grad_var = block.vars[op.output_arg_names[0]] - block._insert_op(idx + 1, - type='scale', - inputs={'X': loss_grad_var}, - outputs={'Out': loss_grad_var}, - attrs={ - 'scale': 1.0 / dp_degree, - OP_ROLE_KEY: OpRole.Backward - }) + block._insert_op( + idx + 1, + type='scale', + inputs={'X': loss_grad_var}, + outputs={'Out': loss_grad_var}, + attrs={ + 'scale': 1.0 / dp_degree, + OP_ROLE_KEY: OpRole.Backward, + }, + ) break def _insert_allreduce_ops(self, loss, ring_id): @@ -205,33 +234,36 @@ class TensorParallelOptimizer(MetaOptimizerBase): grad = block.vars[op_role_var[i + 1]] if offset == idx: offset += 1 - block._insert_op(offset, - type='c_sync_calc_stream', - inputs={'X': grad}, - outputs={'Out': grad}, - attrs={OP_ROLE_KEY: OpRole.Backward}) + block._insert_op( + offset, + type='c_sync_calc_stream', + inputs={'X': grad}, + outputs={'Out': grad}, + attrs={OP_ROLE_KEY: OpRole.Backward}, + ) offset += 1 - block._insert_op(offset, - type='c_allreduce_sum', - inputs={'X': grad}, - outputs={'Out': grad}, - attrs={ - 'ring_id': ring_id, - OP_ROLE_KEY: OpRole.Backward - }) + block._insert_op( + offset, + type='c_allreduce_sum', + inputs={'X': grad}, + outputs={'Out': grad}, + attrs={ + 'ring_id': ring_id, + OP_ROLE_KEY: OpRole.Backward, + }, + ) if grad is None: return for idx, op in list(enumerate(block.ops)): if is_optimizer_op(op): - block._insert_op(idx, - type='c_sync_comm_stream', - inputs={'X': grad}, - outputs={'Out': grad}, - attrs={ - 'ring_id': ring_id, - OP_ROLE_KEY: OpRole.Backward - }) + block._insert_op( + idx, + type='c_sync_comm_stream', + inputs={'X': grad}, + outputs={'Out': grad}, + attrs={'ring_id': ring_id, OP_ROLE_KEY: OpRole.Backward}, + ) break diff --git a/python/paddle/distributed/fleet/meta_parallel/meta_parallel_base.py b/python/paddle/distributed/fleet/meta_parallel/meta_parallel_base.py index f5b8660bd88d40ac35a46f4341dfecb211d2f0ad..9386893728ab6a3673dc6ca3fd936c0a0f53d90f 100644 --- a/python/paddle/distributed/fleet/meta_parallel/meta_parallel_base.py +++ b/python/paddle/distributed/fleet/meta_parallel/meta_parallel_base.py @@ -18,10 +18,10 @@ __all__ = [] class MetaParallelBase(Layer): - def __init__(self, layers, hcg, strategy): - super(MetaParallelBase, - self).__init__(layers.full_name() + "_meta_parallel_base") + super(MetaParallelBase, self).__init__( + layers.full_name() + "_meta_parallel_base" + ) self._layers = layers self._hcg = hcg self._strategy = strategy diff --git a/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py b/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py index f1a812391801b48730b448a6a6c82da8a3af9fc1..fec5005627b88a0027c6fc43ca7e5949fc75cd01 100755 --- a/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py +++ b/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py @@ -54,7 +54,6 @@ __all__ = [] class LayerDesc(object): - def __init__(self, layer_func, *inputs, **kwargs): self.layer_func = layer_func self.inputs = inputs @@ -62,25 +61,28 @@ class LayerDesc(object): if not issubclass(layer_func, Layer): raise TypeError( - "The input(layer_func) should be a derived class of Layer.") + "The input(layer_func) should be a derived class of Layer." + ) def build_layer(self): return self.layer_func(*self.inputs, **self.kwargs) def __repr__(self): - return layer_to_str(self.layer_func.__name__, *self.inputs, - **self.kwargs) + return layer_to_str( + self.layer_func.__name__, *self.inputs, **self.kwargs + ) class SharedLayerDesc(LayerDesc): - - def __init__(self, - key, - layer_func, - forward_func=None, - shared_weight_attr='weight', - *inputs, - **kwargs): + def __init__( + self, + key, + layer_func, + forward_func=None, + shared_weight_attr='weight', + *inputs, + **kwargs + ): super(SharedLayerDesc, self).__init__(layer_func, *inputs, **kwargs) self.layer_name = key self.forward_func = forward_func @@ -88,12 +90,13 @@ class SharedLayerDesc(LayerDesc): class SegmentLayers(object): - - def __init__(self, - layers_desc, - num_parts, - method="uniform", - num_virtual_pipeline_stage=None): + def __init__( + self, + layers_desc, + num_parts, + method="uniform", + num_virtual_pipeline_stage=None, + ): self._layers_desc = layers_desc self.method = method self.num_parts = num_parts @@ -101,7 +104,9 @@ class SegmentLayers(object): self.num_virtual_pipeline_stage = num_virtual_pipeline_stage if self.num_virtual_pipeline_stage is not None: self.total_parts = num_parts * self.num_virtual_pipeline_stage - assert self.num_items >= self.num_parts, "layer number should be greater than number of segments" + assert ( + self.num_items >= self.num_parts + ), "layer number should be greater than number of segments" def do_segment(self): if self.method == "uniform": @@ -115,12 +120,17 @@ class SegmentLayers(object): for idx in weight_idxs: weights[idx] = 1 - actual_num_parts = self.num_parts if self.num_virtual_pipeline_stage is None else self.total_parts - - assert sum( - weights - ) % actual_num_parts == 0, "number of layers ({}) should be divided by part number({})".format( - sum(weights), actual_num_parts) + actual_num_parts = ( + self.num_parts + if self.num_virtual_pipeline_stage is None + else self.total_parts + ) + + assert ( + sum(weights) % actual_num_parts == 0 + ), "number of layers ({}) should be divided by part number({})".format( + sum(weights), actual_num_parts + ) part_size = sum(weights) // actual_num_parts result = [0 for _ in range(actual_num_parts + 1)] @@ -153,8 +163,9 @@ class SegmentLayers(object): if regex.search(name): weight_idxs.append(idx) - assert len( - weight_idxs) > 0, "weight_idxs' length should be greater than 0" + assert ( + len(weight_idxs) > 0 + ), "weight_idxs' length should be greater than 0" return weight_idxs def uniform(self, num_items, num_parts): @@ -167,7 +178,6 @@ class SegmentLayers(object): class PipelineLayerChunk(Layer): - def __init__(self): super(PipelineLayerChunk, self).__init__() self.run_function = [] @@ -189,7 +199,8 @@ class PipelineLayerChunk(Layer): # behavior under recompute circumstance. raise PermissionError( "The forward function of PipelineLayerChunk cannot be called directly. " - "Please call forward function of PipelineLayer.") + "Please call forward function of PipelineLayer." + ) class PipelineLayer(Layer): @@ -271,32 +282,41 @@ class PipelineLayer(Layer): """ - def __init__(self, - layers, - num_stages=None, - topology=None, - loss_fn=None, - seg_method="uniform", - recompute_interval=0, - recompute_ctx=None, - num_virtual_pipeline_stages=None): + def __init__( + self, + layers, + num_stages=None, + topology=None, + loss_fn=None, + seg_method="uniform", + recompute_interval=0, + recompute_ctx=None, + num_virtual_pipeline_stages=None, + ): super(PipelineLayer, self).__init__() if num_stages is None and topology is None: raise ValueError("should provide num_stages or topology") if num_virtual_pipeline_stages: - assert isinstance(num_virtual_pipeline_stages, int), \ - "virtual_pipeline_stage should be None or an int" + assert isinstance( + num_virtual_pipeline_stages, int + ), "virtual_pipeline_stage should be None or an int" if num_virtual_pipeline_stages > 1: logger.info( "set num_virtual_pipeline_stages > 1 means using interleave scheduler instead of 1f1b scheduler" ) - assert isinstance(seg_method, str), \ - "seg_method should be a str for interleave scheduler" - assert seg_method.startswith('layer:'), \ - "seg_method shoud be start with layer: for interleave scheduler" - - self._num_virtual_pipeline_stages = 1 if num_virtual_pipeline_stages is None else num_virtual_pipeline_stages + assert isinstance( + seg_method, str + ), "seg_method should be a str for interleave scheduler" + assert seg_method.startswith( + 'layer:' + ), "seg_method shoud be start with layer: for interleave scheduler" + + self._num_virtual_pipeline_stages = ( + 1 + if num_virtual_pipeline_stages is None + else num_virtual_pipeline_stages + ) # lazy import import paddle.distributed as dist @@ -310,13 +330,17 @@ class PipelineLayer(Layer): self.recompute_ctx = recompute_ctx if recompute_interval > 0: - assert recompute_ctx is not None, "recompute_ctx must be not None for recompute." + assert ( + recompute_ctx is not None + ), "recompute_ctx must be not None for recompute." offload = recompute_ctx.get('offload', False) partition = recompute_ctx.get('partition', False) logger.info( - "Start Recompute for PipeLineParallel. recompute_offload: {}, recompute_partition: {}" - .format(offload, partition)) + "Start Recompute for PipeLineParallel. recompute_offload: {}, recompute_partition: {}".format( + offload, partition + ) + ) world_size = dist.get_world_size() self.global_rank = dist.get_rank() @@ -325,22 +349,28 @@ class PipelineLayer(Layer): self._stage_id = self._topo.get_coord(self.global_rank).pipe self._num_stages = self._topo.get_dim_size("pipe") if num_stages: - assert self._num_stages == num_stages, "num_stages should be equal to be %d" % ( - self._num_stages) + assert ( + self._num_stages == num_stages + ), "num_stages should be equal to be %d" % (self._num_stages) else: # construct default topology if world_size % num_stages != 0: raise ValueError( "should provide correct num_stages({}) " "which can be divided by world_size({})".format( - num_stages, world_size)) + num_stages, world_size + ) + ) dp_num = world_size // num_stages - self._topo = fleet.CommunicateTopology(["data", "pipe", "model"], - [dp_num, num_stages, 1]) + self._topo = fleet.CommunicateTopology( + ["data", "pipe", "model"], [dp_num, num_stages, 1] + ) self._stage_id = self._topo.get_coord(self.global_rank).pipe self._num_stages = self._topo.get_dim_size("pipe") - self._total_stages_with_virtual_stages = self._num_stages * self._num_virtual_pipeline_stages + self._total_stages_with_virtual_stages = ( + self._num_stages * self._num_virtual_pipeline_stages + ) # initialize segment self._layers_desc = list(self.layers) @@ -378,16 +408,22 @@ class PipelineLayer(Layer): start_idx = virtual_pp_rank * self._num_stages for stage in range(self._num_stages): # stage mark the real pp stage - if self.segment_parts[start_idx + - stage] <= layer_idx < self.segment_parts[ - start_idx + stage + 1]: + if ( + self.segment_parts[start_idx + stage] + <= layer_idx + < self.segment_parts[start_idx + stage + 1] + ): return stage def get_num_virtual_stages(self): return self._num_virtual_pipeline_stages def get_model_chunks(self): - return None if self._num_virtual_pipeline_stages == 1 else self._model_chunks + return ( + None + if self._num_virtual_pipeline_stages == 1 + else self._model_chunks + ) def _construct_shared_comm(self): shared_comm = {} @@ -395,17 +431,21 @@ class PipelineLayer(Layer): return layers_desc = self._layers_desc - shared_layer_names = set(s.layer_name for s in layers_desc - if isinstance(s, SharedLayerDesc)) + shared_layer_names = set( + s.layer_name for s in layers_desc if isinstance(s, SharedLayerDesc) + ) for key in shared_layer_names: shared_layers = [] for idx, layer in enumerate(layers_desc): - if isinstance(layer, - SharedLayerDesc) and layer.layer_name == key: + if ( + isinstance(layer, SharedLayerDesc) + and layer.layer_name == key + ): shared_layers.append(idx) shared_stages = set( - self.get_stage_from_index(idx) for idx in shared_layers) + self.get_stage_from_index(idx) for idx in shared_layers + ) self._dp_degree = self._topo.get_dim('data') self._mp_degree = self._topo.get_dim('model') self._sharding_degree = self._topo.get_dim('sharding') @@ -422,7 +462,9 @@ class PipelineLayer(Layer): pipe=s, data=dp, sharding=sharding, - model=mp)) + model=mp, + ) + ) group = paddle.distributed.new_group(ranks=shared_ranks) if self.global_rank in shared_ranks: @@ -431,8 +473,9 @@ class PipelineLayer(Layer): shared_comm[key] = { 'ranks': shared_ranks, 'group': group, - 'weight_attr': - self.shared_weight_attrs[key], + 'weight_attr': self.shared_weight_attrs[ + key + ], 'layer': self.shared_layers[key], } return shared_comm @@ -440,10 +483,11 @@ class PipelineLayer(Layer): def _synchronize_shared_weights(self): for key, comm in self.shared_comm.items(): with paddle.framework.no_grad(): - paddle.distributed.broadcast(getattr(comm['layer'], - comm['weight_attr']), - src=min(comm['ranks']), - group=comm['group']) + paddle.distributed.broadcast( + getattr(comm['layer'], comm['weight_attr']), + src=min(comm['ranks']), + group=comm['group'], + ) for param in comm['layer'].parameters(): if self.global_rank != min(comm['ranks']): @@ -455,8 +499,9 @@ class PipelineLayer(Layer): # need use trace_op to allreduce weight if in_dygraph_mode(): with paddle.framework.no_grad(): - paddle.distributed.all_reduce(param.grad, - group=comm['group']) + paddle.distributed.all_reduce( + param.grad, group=comm['group'] + ) else: with paddle.framework.no_grad(): paddle.fluid.framework._dygraph_tracer().trace_op( @@ -465,8 +510,9 @@ class PipelineLayer(Layer): outputs={'Out': param._grad_ivar()}, attrs={ 'ring_id': comm['group'].id, - 'use_calc_stream': True - }) + 'use_calc_stream': True, + }, + ) def _segment_network_for_interleave(self, seg_method): logger.info("start segment network for interleave scheduler") @@ -474,14 +520,20 @@ class PipelineLayer(Layer): self._layers_desc, num_parts=self._num_stages, method=seg_method, - num_virtual_pipeline_stage=self._num_virtual_pipeline_stages) + num_virtual_pipeline_stage=self._num_virtual_pipeline_stages, + ) self.segment_parts = seg.do_segment() - logger.info("segment result:" + - ", ".join(str(arg) for arg in self.segment_parts)) + logger.info( + "segment result:" + + ", ".join(str(arg) for arg in self.segment_parts) + ) - for i in range(self._stage_id, self._total_stages_with_virtual_stages, - self._num_stages): + for i in range( + self._stage_id, + self._total_stages_with_virtual_stages, + self._num_stages, + ): # If there are 2 real pp stages and 2 virtual pp stages, and the model has 8 layers. # Layers [0, 1], [4, 5] will be assigned to the first real pp stage. # Layers [2, 3], [6, 7] will be assigned to the second real pp stage. @@ -497,13 +549,15 @@ class PipelineLayer(Layer): def _segment_network(self, seg_method): logger.info("start segment network..") - seg = SegmentLayers(self._layers_desc, - num_parts=self._num_stages, - method=seg_method) + seg = SegmentLayers( + self._layers_desc, num_parts=self._num_stages, method=seg_method + ) self.segment_parts = seg.do_segment() - logger.info("segment result:" + - ", ".join(str(arg) for arg in self.segment_parts)) + logger.info( + "segment result:" + + ", ".join(str(arg) for arg in self.segment_parts) + ) self._start_pos = self.segment_parts[self._stage_id] self._end_pos = self.segment_parts[self._stage_id + 1] @@ -511,22 +565,30 @@ class PipelineLayer(Layer): def _print_segmentation_for_debug(self): # print information for debug - for stage in range(self._num_stages * - self._num_virtual_pipeline_stages): + for stage in range( + self._num_stages * self._num_virtual_pipeline_stages + ): start = self.segment_parts[stage] end = self.segment_parts[stage + 1] - logger.info("stage={}, global_rank={} ,layer_number={}".format( - stage, self.global_rank, end - start)) + logger.info( + "stage={}, global_rank={} ,layer_number={}".format( + stage, self.global_rank, end - start + ) + ) for index, layer in enumerate(self._layers_desc[start:end]): logger.info("{}: {}".format(index + start, str(layer))) if self._num_virtual_pipeline_stages > 1: for stage in range(self._num_stages): - stage_to_virtual_stage_info = "stage {} contains virtual stages: ".format( - stage) - for i in range(stage, self._total_stages_with_virtual_stages, - self._num_stages): + stage_to_virtual_stage_info = ( + "stage {} contains virtual stages: ".format(stage) + ) + for i in range( + stage, + self._total_stages_with_virtual_stages, + self._num_stages, + ): stage_to_virtual_stage_info += " {},".format(i) logger.info(stage_to_virtual_stage_info) @@ -572,9 +634,11 @@ class PipelineLayer(Layer): if layer.layer_name not in self.shared_layers: self.shared_layers[layer.layer_name] = layer.build_layer() self.shared_weight_attrs[ - layer.layer_name] = layer.shared_weight_attr + layer.layer_name + ] = layer.shared_weight_attr for param in self.shared_layers[ - layer.layer_name].parameters(): + layer.layer_name + ].parameters(): setattr(param, "is_firstly_shared", True) if layer.forward_func is None: @@ -582,8 +646,11 @@ class PipelineLayer(Layer): else: run_function.append( - partial(layer.forward_func, - self.shared_layers[layer.layer_name])) + partial( + layer.forward_func, + self.shared_layers[layer.layer_name], + ) + ) elif isinstance(layer, LayerDesc): model = layer.build_layer() @@ -612,11 +679,15 @@ class PipelineLayer(Layer): def forward(self, input, chunk_id=None): if chunk_id is not None: assert isinstance(chunk_id, int), "chunk_id should be an int" - assert self._num_virtual_pipeline_stages > 1, \ - "chunk_id is only valid when using virtual pipeline stage" - assert chunk_id < len(self._model_chunks), \ - "The virtual pipeline only has {} chunks, " \ - "but received chunk_id {}.".format(len(self._model_chunks), chunk_id) + assert ( + self._num_virtual_pipeline_stages > 1 + ), "chunk_id is only valid when using virtual pipeline stage" + assert chunk_id < len(self._model_chunks), ( + "The virtual pipeline only has {} chunks, " + "but received chunk_id {}.".format( + len(self._model_chunks), chunk_id + ) + ) # Get the target model chunk. model_chunk = self._model_chunks[chunk_id] # Update the self.run_function to the target run functions. @@ -634,20 +705,25 @@ class PipelineLayer(Layer): funcs = self.run_function[start_idx:end_idx] if not isinstance(input, tuple): - input = (input, ) + input = (input,) if self._need_recompute(funcs, input): input = recompute_hybrid( self.recompute_ctx, - self.forward_function(start_idx, end_idx), *input) + self.forward_function(start_idx, end_idx), + *input + ) else: input = self.forward_function(start_idx, end_idx)(*input) return input def _need_recompute(self, funcs, inputs): - if not any(input_.stop_gradient == False - for input_ in inputs if isinstance(input_, paddle.Tensor)): + if not any( + input_.stop_gradient == False + for input_ in inputs + if isinstance(input_, paddle.Tensor) + ): return False params = [f.parameters() for f in funcs if isinstance(f, Layer)] @@ -671,11 +747,18 @@ class PipelineLayer(Layer): if self._num_virtual_pipeline_stages > 1: # add virtual pipeline info to the save path assert local_chunk_id is not None - virtual_pipeline_stage_message = "-virtual_pp_stage_{:0>2d}".format( - local_chunk_id) - layer_save_path = os.path.join(ckpt_dir, - 'layer_{:0>2d}'.format(idx)) - layer_save_path = layer_save_path + virtual_pipeline_stage_message + rank_message + '-model_states.pdparams' + virtual_pipeline_stage_message = ( + "-virtual_pp_stage_{:0>2d}".format(local_chunk_id) + ) + layer_save_path = os.path.join( + ckpt_dir, 'layer_{:0>2d}'.format(idx) + ) + layer_save_path = ( + layer_save_path + + virtual_pipeline_stage_message + + rank_message + + '-model_states.pdparams' + ) return layer_save_path def _save_model(run_functions, local_chunk_id=None): @@ -698,7 +781,8 @@ class PipelineLayer(Layer): def set_state_dir(self, path): assert os.path.exists( - path), "{} not found, please check the path".format(path) + path + ), "{} not found, please check the path".format(path) def _load_model(run_functions, local_chunk_id=None): for idx, layer in enumerate(run_functions): @@ -712,21 +796,26 @@ class PipelineLayer(Layer): pos_offset = self._start_poss[local_chunk_id] layer_idx = idx + pos_offset layer_save_path = os.path.join( - path, 'layer_{0:0>2d}'.format(layer_idx)) + path, 'layer_{0:0>2d}'.format(layer_idx) + ) if self._num_virtual_pipeline_stages > 1: # add virtual pipeline info to the path assert local_chunk_id is not None - layer_save_path = layer_save_path + "-virtual_pp_stage_{:0>2d}".format( - local_chunk_id) - model_files = glob.glob(layer_save_path + - "*model_states.pdparams") + layer_save_path = ( + layer_save_path + + "-virtual_pp_stage_{:0>2d}".format(local_chunk_id) + ) + model_files = glob.glob( + layer_save_path + "*model_states.pdparams" + ) model_files.sort() mp_rank = self._topo.get_coord(self.global_rank).model mp_world_size = self._topo.get_dim('model') num_files = len(model_files) - load_param_path = model_files[mp_rank * num_files // - mp_world_size] + load_param_path = model_files[ + mp_rank * num_files // mp_world_size + ] model_state_dict = paddle.load(load_param_path) layer.set_state_dict(model_state_dict) diff --git a/python/paddle/distributed/fleet/meta_parallel/pipeline_parallel.py b/python/paddle/distributed/fleet/meta_parallel/pipeline_parallel.py index 937122e8a7b33010e96aae6f3f4367f54b7ca2a6..89a3d619218b837b3add3253069f853d82cd356d 100755 --- a/python/paddle/distributed/fleet/meta_parallel/pipeline_parallel.py +++ b/python/paddle/distributed/fleet/meta_parallel/pipeline_parallel.py @@ -29,27 +29,31 @@ __all__ = [] class PipelineParallel(MetaParallelBase): - def __init__(self, layers, hcg, strategy): if not isinstance(layers, PipelineLayer): raise TypeError( - "The Layer should be a derived class of PipelineLayer.") + "The Layer should be a derived class of PipelineLayer." + ) super(PipelineParallel, self).__init__(layers, hcg, strategy) self.use_data_parallel = self._hcg.get_data_parallel_world_size() > 1 self.use_model_parallel = self._hcg.get_model_parallel_world_size() > 1 - self.use_sharding_parallel = self._hcg.get_sharding_parallel_world_size( - ) > 1 + self.use_sharding_parallel = ( + self._hcg.get_sharding_parallel_world_size() > 1 + ) self.total_loss = None self.micro_batch_size = self._strategy.pipeline_configs[ - 'micro_batch_size'] + 'micro_batch_size' + ] self.accumulate_steps = self._strategy.pipeline_configs[ - 'accumulate_steps'] + 'accumulate_steps' + ] # If sent tensor are not the same from different hosts, # they shouldn't been sent partially and then concated as a whole tensor. self._enable_partial_send_recv = self._strategy.pipeline_configs[ - 'enable_partial_send_recv'] + 'enable_partial_send_recv' + ] self._using_cache = self._strategy.pipeline_configs['p2p_cache_shape'] self.num_stages = self._hcg.get_pipe_parallel_world_size() @@ -61,16 +65,20 @@ class PipelineParallel(MetaParallelBase): self._real_pp_world_size = self.num_stages self._real_pp_rank = self.stage_id - p2p.initialize_p2p_groups(hcg, self._using_cache, - self._enable_partial_send_recv) + p2p.initialize_p2p_groups( + hcg, self._using_cache, self._enable_partial_send_recv + ) self.global_rank = self._hcg.get_global_rank() self.micro_batch_id = 0 self._compute_loss = True - logger.info("Pipeline Info -- num_stages: {}, stage_id: {}".format( - self.num_stages, self.stage_id)) + logger.info( + "Pipeline Info -- num_stages: {}, stage_id: {}".format( + self.num_stages, self.stage_id + ) + ) if self.use_model_parallel: logger.info("start broadcast mp parameters") @@ -122,7 +130,7 @@ class PipelineParallel(MetaParallelBase): # store data id for micro_batch self.micro_batch_id = 0 - startup_steps = (self.num_stages - self.stage_id - 1) + startup_steps = self.num_stages - self.stage_id - 1 startup_steps = min(startup_steps, self.accumulate_steps) steady_steps = self.accumulate_steps - startup_steps @@ -142,39 +150,46 @@ class PipelineParallel(MetaParallelBase): input_tensor = p2p.recv_forward(self.is_pipeline_first_stage()) for i in range(steady_steps): - last_iter = (i == (steady_steps - 1)) + last_iter = i == (steady_steps - 1) output_tensor = self._forward_step(input_tensor) output_tensor_grad = p2p.send_forward_recv_backward( - output_tensor, self.is_pipeline_last_stage()) + output_tensor, self.is_pipeline_last_stage() + ) input_buffers.append(input_tensor) output_buffers.append(output_tensor) input_tensor, output_tensor = input_buffers.pop( - 0), output_buffers.pop(0) + 0 + ), output_buffers.pop(0) - input_tensor_grad = self._backward_step(input_tensor, output_tensor, - output_tensor_grad) + input_tensor_grad = self._backward_step( + input_tensor, output_tensor, output_tensor_grad + ) if last_iter: input_tensor = None - p2p.send_backward(input_tensor_grad, - self.is_pipeline_first_stage()) + p2p.send_backward( + input_tensor_grad, self.is_pipeline_first_stage() + ) else: input_tensor = p2p.send_backward_recv_forward( - input_tensor_grad, self.is_pipeline_first_stage()) + input_tensor_grad, self.is_pipeline_first_stage() + ) for i in range(startup_steps): input_tensor = input_buffers.pop(0) output_tensor = output_buffers.pop(0) output_tensor_grad = p2p.recv_backward( - self.is_pipeline_last_stage()) + self.is_pipeline_last_stage() + ) - input_tensor_grad = self._backward_step(input_tensor, output_tensor, - output_tensor_grad) + input_tensor_grad = self._backward_step( + input_tensor, output_tensor, output_tensor_grad + ) p2p.send_backward(input_tensor_grad, self.is_pipeline_first_stage()) self._layers.allreduce_shared_weight_gradients() @@ -186,17 +201,20 @@ class PipelineParallel(MetaParallelBase): # reset the virtual pp rank for each run self.set_virtual_pipeline_rank(0) - assert isinstance(optimizer, HybridParallelOptimizer), ( - 'optimizer should be HybridParallelOptimizer subclass.') + assert isinstance( + optimizer, HybridParallelOptimizer + ), 'optimizer should be HybridParallelOptimizer subclass.' - assert fluid.framework._dygraph_tracer()._has_grad, ( - 'Please enable the generation of gradients.') + assert ( + fluid.framework._dygraph_tracer()._has_grad + ), 'Please enable the generation of gradients.' if self.is_pipeline_first_stage( - ignore_virtual=True) or self.is_pipeline_last_stage( - ignore_virtual=True): - assert data is not None, ( - "For the first and the last stage, the data must be set.") + ignore_virtual=True + ) or self.is_pipeline_last_stage(ignore_virtual=True): + assert ( + data is not None + ), "For the first and the last stage, the data must be set." else: data = None @@ -233,7 +251,7 @@ class PipelineParallel(MetaParallelBase): # store total loss of entire batch self.total_loss = None - startup_steps = (self.num_stages - self.stage_id - 1) + startup_steps = self.num_stages - self.stage_id - 1 startup_steps = min(startup_steps, self.accumulate_steps) steady_steps = self.accumulate_steps - startup_steps @@ -253,7 +271,7 @@ class PipelineParallel(MetaParallelBase): input_tensor = p2p.recv_forward(self.is_pipeline_first_stage()) for i in range(steady_steps): - last_iter = (i == (steady_steps - 1)) + last_iter = i == (steady_steps - 1) output_tensor = self._forward_step(input_tensor) p2p.send_forward(output_tensor, self.is_pipeline_last_stage()) @@ -282,13 +300,14 @@ class PipelineParallel(MetaParallelBase): if self.is_pipeline_last_stage(): # train calculate loss for train if self._compute_loss: - assert self._layers._loss_fn is not None, "loss function should exist to compute loss" + assert ( + self._layers._loss_fn is not None + ), "loss function should exist to compute loss" labels = self._load_micro_batch(self.micro_batch_id) output_tensor = self._layers._loss_fn(output_tensor, labels) assert isinstance( - output_tensor, - (paddle.Tensor, core.eager.Tensor - )), "Currently, loss_fn should obtain Paddle.Tensor dtype" + output_tensor, (paddle.Tensor, core.eager.Tensor) + ), "Currently, loss_fn should obtain Paddle.Tensor dtype" with paddle.amp.auto_cast(enable=False): if self.accumulate_steps > 1: @@ -318,16 +337,20 @@ class PipelineParallel(MetaParallelBase): assert len(outputs) == len(output_tensor_grad) paddle.autograd.backward( tensors=outputs, - grad_tensors=[t for t in output_tensor_grad]) + grad_tensors=[t for t in output_tensor_grad], + ) else: - paddle.autograd.backward(tensors=[output_tensor], - grad_tensors=[output_tensor_grad]) + paddle.autograd.backward( + tensors=[output_tensor], + grad_tensors=[output_tensor_grad], + ) input_tensor_grad = None if input_tensor is not None: if isinstance(input_tensor, tuple): input_tensor_grad = tuple( - [t.grad for t in input_tensor if not t.stop_gradient]) + [t.grad for t in input_tensor if not t.stop_gradient] + ) else: input_tensor_grad = input_tensor.grad return input_tensor_grad @@ -341,31 +364,39 @@ class PipelineParallel(MetaParallelBase): if self.is_pipeline_first_stage(): assert len(inputs) == 2, "length of input should be 2" if isinstance(inputs[0], tuple): - assert len( - inputs[0] - ) > 1, "If you use tuple for input data, it should have at least two inputs." + assert ( + len(inputs[0]) > 1 + ), "If you use tuple for input data, it should have at least two inputs." batch_size = inputs[0][0].shape[0] - assert self.micro_batch_size * self.accumulate_steps == batch_size, ( + assert ( + self.micro_batch_size * self.accumulate_steps == batch_size + ), ( "batch_size needs to be divisible by micro_batch_size. Currently, " "batch_size = %d, micro_batch_size = %d, accumulate_steps = %d." - % - (batch_size, self.micro_batch_size, self.accumulate_steps)) + % (batch_size, self.micro_batch_size, self.accumulate_steps) + ) data = [input[begin:end, :].detach() for input in inputs[0]] return tuple(data) else: batch_size = inputs[0].shape[0] - assert self.micro_batch_size * self.accumulate_steps == batch_size + assert ( + self.micro_batch_size * self.accumulate_steps == batch_size + ) return inputs[0][begin:end, :].detach() elif self.is_pipeline_last_stage(): assert len(inputs) == 2, "length of input should be 2" if isinstance(inputs[1], tuple): batch_size = inputs[1][0].shape[0] - assert self.micro_batch_size * self.accumulate_steps == batch_size + assert ( + self.micro_batch_size * self.accumulate_steps == batch_size + ) data = [input[begin:end, :].detach() for input in inputs[1]] return tuple(data) else: batch_size = inputs[1].shape[0] - assert self.micro_batch_size * self.accumulate_steps == batch_size + assert ( + self.micro_batch_size * self.accumulate_steps == batch_size + ) return inputs[1][begin:end, :].detach() else: # No data input is required for other stages @@ -375,34 +406,40 @@ class PipelineParallel(MetaParallelBase): # Since the last backward run in interleave will set the virtual rank to 0, # here we need to check last stage ignoring virtual stage. if self.is_pipeline_last_stage(ignore_virtual=True): - assert self.total_loss is not None, "train_batch() in last stage should obtain vaild loss" + assert ( + self.total_loss is not None + ), "train_batch() in last stage should obtain vaild loss" loss = self.total_loss.detach() - is_fp32 = paddle.to_tensor( - 1) if loss.dtype == paddle.float32 else paddle.to_tensor(0) - paddle.distributed.broadcast(is_fp32, - src=self.global_rank, - sync_op=True, - group=self.pp_group) - paddle.distributed.broadcast(loss, - src=self.global_rank, - sync_op=True, - group=self.pp_group) + is_fp32 = ( + paddle.to_tensor(1) + if loss.dtype == paddle.float32 + else paddle.to_tensor(0) + ) + paddle.distributed.broadcast( + is_fp32, src=self.global_rank, sync_op=True, group=self.pp_group + ) + paddle.distributed.broadcast( + loss, src=self.global_rank, sync_op=True, group=self.pp_group + ) else: is_fp32 = paddle.to_tensor(1) paddle.distributed.broadcast( is_fp32, src=self._hcg.get_rank_from_stage(self.num_stages - 1), sync_op=True, - group=self.pp_group) - loss = paddle.zeros(shape=[ - 1 - ], dtype="float32") if is_fp32.numpy()[0] else paddle.zeros( - shape=[1], dtype="float16") + group=self.pp_group, + ) + loss = ( + paddle.zeros(shape=[1], dtype="float32") + if is_fp32.numpy()[0] + else paddle.zeros(shape=[1], dtype="float16") + ) paddle.distributed.broadcast( loss, src=self._hcg.get_rank_from_stage(self.num_stages - 1), sync_op=True, - group=self.pp_group) + group=self.pp_group, + ) return loss def _optimizer_step(self): @@ -421,11 +458,12 @@ class PipelineParallelWithInterleave(PipelineParallel): # pipeline parallel with interleave scheduler def __init__(self, layers, hcg, strategy): - super(PipelineParallelWithInterleave, self).__init__(layers=layers, - hcg=hcg, - strategy=strategy) + super(PipelineParallelWithInterleave, self).__init__( + layers=layers, hcg=hcg, strategy=strategy + ) assert layers.get_num_virtual_stages() > 1 - assert framework.in_dygraph_mode( + assert ( + framework.in_dygraph_mode() ), "virtual pipeline stage with interleave only support eager dygraph mode" # setup for interleave scheduler self.num_model_chunks = layers.get_num_virtual_stages() @@ -436,11 +474,12 @@ class PipelineParallelWithInterleave(PipelineParallel): self._virtual_pp_rank = 0 def _get_virtual_pp_rank(self, micro_step, forward): - virtual_pp_stage = micro_step % (self.num_stages * - self.num_model_chunks) + virtual_pp_stage = micro_step % ( + self.num_stages * self.num_model_chunks + ) virtual_pp_stage = virtual_pp_stage // self.num_stages if not forward: - virtual_pp_stage = (self.num_model_chunks - virtual_pp_stage - 1) + virtual_pp_stage = self.num_model_chunks - virtual_pp_stage - 1 return virtual_pp_stage def _forward_step_helper(self, micro_step): @@ -455,7 +494,8 @@ class PipelineParallelWithInterleave(PipelineParallel): if self.is_pipeline_first_stage(): if len(self.input_tensors[virtual_pp_rank]) == len( - self.output_tensors[virtual_pp_rank]): + self.output_tensors[virtual_pp_rank] + ): self.input_tensors[virtual_pp_rank].append(None) input_tensor = self.input_tensors[virtual_pp_rank][-1] output_tensor = self._forward_step(input_tensor, virtual_pp_rank) @@ -484,21 +524,22 @@ class PipelineParallelWithInterleave(PipelineParallel): input_tensor = self.input_tensors[virtual_pp_rank].pop(0) output_tensor = self.output_tensors[virtual_pp_rank].pop(0) output_tensor_grad = self.output_tensor_grads[virtual_pp_rank].pop(0) - input_tensor_grad = self._backward_step(input_tensor, output_tensor, - output_tensor_grad) + input_tensor_grad = self._backward_step( + input_tensor, output_tensor, output_tensor_grad + ) return input_tensor_grad - def interleave_pipeline(self, - data, - scaler, - forward_only=False, - compute_loss=True): + def interleave_pipeline( + self, data, scaler, forward_only=False, compute_loss=True + ): # use interleave scheduling strategy. # this strategy is inspired by: # https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/schedules.py if not compute_loss: - assert not forward_only, "compute_loss can only be set to False when forward_only is set to True" + assert ( + not forward_only + ), "compute_loss can only be set to False when forward_only is set to True" # init some attributes for this batch run self.scaler = scaler @@ -530,15 +571,17 @@ class PipelineParallelWithInterleave(PipelineParallel): self.set_virtual_pipeline_rank(0) self.input_tensors[0].append( - p2p.recv_forward(self.is_pipeline_first_stage(), sync_recv=False)) + p2p.recv_forward(self.is_pipeline_first_stage(), sync_recv=False) + ) # run startup steps for micro_step in range(startup_steps): output_tensor = self._forward_step_helper(micro_step) # determine whether recv forward tensor or not - next_virtual_pp_rank = self._get_virtual_pp_rank(micro_step + 1, - forward=True) + next_virtual_pp_rank = self._get_virtual_pp_rank( + micro_step + 1, forward=True + ) recv_prev = True if self.is_pipeline_first_stage(ignore_virtual=True): if next_virtual_pp_rank == 0: @@ -552,24 +595,33 @@ class PipelineParallelWithInterleave(PipelineParallel): if self.is_pipeline_last_stage(): output_tensor = None - if micro_step == (startup_steps - - 1) and not forward_only and not all_startup_steps: + if ( + micro_step == (startup_steps - 1) + and not forward_only + and not all_startup_steps + ): input_tensor_grad = None recv_next = True if self.is_pipeline_last_stage(ignore_virtual=True): recv_next = False # the last startup step needs on four direction comm to set up for steady 1f1b - input_tensor, output_tensor_grad = p2p.send_forward_backward_recv_forward_backward( + ( + input_tensor, + output_tensor_grad, + ) = p2p.send_forward_backward_recv_forward_backward( output_tensor, input_tensor_grad, recv_prev=recv_prev, - recv_next=recv_next) - self.output_tensor_grads[self.num_model_chunks - - 1].append(output_tensor_grad) + recv_next=recv_next, + ) + self.output_tensor_grads[self.num_model_chunks - 1].append( + output_tensor_grad + ) else: input_tensor = p2p.send_forward_recv_forward( - output_tensor, recv_prev=recv_prev) + output_tensor, recv_prev=recv_prev + ) self.input_tensors[next_virtual_pp_rank].append(input_tensor) # run 1f1b steady steps @@ -581,7 +633,8 @@ class PipelineParallelWithInterleave(PipelineParallel): # backward backward_micro_step_id = micro_step input_tensor_grad = self._backward_step_helper( - backward_micro_step_id) + backward_micro_step_id + ) # four directions comm # send output tensor to downstream @@ -591,14 +644,16 @@ class PipelineParallelWithInterleave(PipelineParallel): # last stage doesn't send rst to downstream forward_virtual_pp_rank = self._get_virtual_pp_rank( - forward_micro_step_id, forward=True) + forward_micro_step_id, forward=True + ) self.set_virtual_pipeline_rank(forward_virtual_pp_rank) if self.is_pipeline_last_stage(): output_tensor = None # first stage doesn't send grad to upstream backward_virtual_pp_rank = self._get_virtual_pp_rank( - backward_micro_step_id, forward=False) + backward_micro_step_id, forward=False + ) self.set_virtual_pipeline_rank(backward_virtual_pp_rank) if self.is_pipeline_first_stage(): input_tensor_grad = None @@ -607,14 +662,16 @@ class PipelineParallelWithInterleave(PipelineParallel): recv_prev = True if self.is_pipeline_first_stage(ignore_virtual=True): next_forward_virtual_pp_rank = self._get_virtual_pp_rank( - forward_micro_step_id - (self.num_stages - 1), forward=True) + forward_micro_step_id - (self.num_stages - 1), forward=True + ) if next_forward_virtual_pp_rank == (self.num_model_chunks - 1): # first pp stage and first virtual stage recv_prev = False next_forward_virtual_pp_rank += 1 else: next_forward_virtual_pp_rank = self._get_virtual_pp_rank( - forward_micro_step_id + 1, forward=True) + forward_micro_step_id + 1, forward=True + ) # last iteration doesn't need recv from upstream if micro_step == (steady_steps - 1): @@ -625,53 +682,67 @@ class PipelineParallelWithInterleave(PipelineParallel): if self.is_pipeline_last_stage(ignore_virtual=True): next_backward_virtual_pp_rank = self._get_virtual_pp_rank( backward_micro_step_id - (self.num_stages - 1), - forward=False) + forward=False, + ) if next_backward_virtual_pp_rank == 0: # last pp stage and last virtual stage recv_next = False next_backward_virtual_pp_rank -= 1 else: next_backward_virtual_pp_rank = self._get_virtual_pp_rank( - backward_micro_step_id + 1, forward=False) + backward_micro_step_id + 1, forward=False + ) - input_tensor, output_tensor_grad = p2p.send_forward_backward_recv_forward_backward( + ( + input_tensor, + output_tensor_grad, + ) = p2p.send_forward_backward_recv_forward_backward( output_tensor, input_tensor_grad, recv_prev=recv_prev, - recv_next=recv_next) + recv_next=recv_next, + ) if recv_prev: self.input_tensors[next_forward_virtual_pp_rank].append( - input_tensor) + input_tensor + ) if recv_next: self.output_tensor_grads[next_backward_virtual_pp_rank].append( - output_tensor_grad) + output_tensor_grad + ) # remaining backward steps if not forward_only: if all_startup_steps: self.output_tensor_grads[self.num_model_chunks - 1].append( - p2p.recv_backward(self.is_pipeline_last_stage(), - sync_recv=False)) + p2p.recv_backward( + self.is_pipeline_last_stage(), sync_recv=False + ) + ) for micro_step in range(steady_steps, num_steps): # cooldown loop input_tensor_grad = self._backward_step_helper(micro_step) next_backward_virtual_pp_rank = self._get_virtual_pp_rank( - micro_step + 1, forward=False) + micro_step + 1, forward=False + ) recv_next = True if self.is_pipeline_last_stage(ignore_virtual=True): - if next_backward_virtual_pp_rank == (self.num_model_chunks - - 1): + if next_backward_virtual_pp_rank == ( + self.num_model_chunks - 1 + ): recv_next = False if micro_step == (num_steps - 1): recv_next = False self.output_tensor_grads[next_backward_virtual_pp_rank].append( - p2p.send_backward_recv_backward(input_tensor_grad, - recv_next=recv_next)) + p2p.send_backward_recv_backward( + input_tensor_grad, recv_next=recv_next + ) + ) self._layers.allreduce_shared_weight_gradients() diff --git a/python/paddle/distributed/fleet/meta_parallel/pp_utils/p2p_communication.py b/python/paddle/distributed/fleet/meta_parallel/pp_utils/p2p_communication.py index ecd3dc7ab9136f9c3e66ae572f0ef5a180b50f26..e97cf3c02302f8a298dbe5f294c5fa0cfcf79dad 100644 --- a/python/paddle/distributed/fleet/meta_parallel/pp_utils/p2p_communication.py +++ b/python/paddle/distributed/fleet/meta_parallel/pp_utils/p2p_communication.py @@ -30,12 +30,23 @@ def initialize_p2p_groups(hcg, use_cache=True, enable_partial_send_recv=True): _hcg = hcg _use_cache = use_cache _enable_partial_send_recv = enable_partial_send_recv - send_next_group, send_prev_group, recv_next_group, recv_prev_group = _hcg.get_p2p_groups( + ( + send_next_group, + send_prev_group, + recv_next_group, + recv_prev_group, + ) = _hcg.get_p2p_groups() + + debug_str = ( + "P2pInfo: send_next_group: %s, send_prev_group: %s, " + "recv_next_group: %s, recv_prev_group: %s" + % ( + repr(send_next_group), + repr(send_prev_group), + repr(recv_next_group), + repr(recv_prev_group), + ) ) - - debug_str = "P2pInfo: send_next_group: %s, send_prev_group: %s, " \ - "recv_next_group: %s, recv_prev_group: %s" % (repr(send_next_group), - repr(send_prev_group),repr(recv_next_group), repr(recv_prev_group)) logger.info(debug_str) @@ -150,9 +161,11 @@ class SendRecvMeta: self.send_dtype_message = paddle_2_number(tensor.dtype) elif isinstance(tensor, tuple): self.send_shape_message = tuple( - [d.shape for d in tensor if not d.stop_gradient]) + [d.shape for d in tensor if not d.stop_gradient] + ) self.send_dtype_message = tuple( - [paddle_2_number(d.dtype) for d in tensor]) + [paddle_2_number(d.dtype) for d in tensor] + ) _send_recv_meta = SendRecvMeta() @@ -166,84 +179,117 @@ def _is_valid_send_recv_partial(tensor, mp_degree): return mp_degree > 1 and tensor_numel % mp_degree == 0 -def _partial_send_op(tensor, group, use_calc_stream, ring_id, dst, nranks, - rank_id): +def _partial_send_op( + tensor, group, use_calc_stream, ring_id, dst, nranks, rank_id +): dst_rank_in_group = dst if group is None else group.get_group_rank(dst) if _in_legacy_dygraph(): - return _legacy_C_ops.partial_send(tensor.detach(), 'use_calc_stream', - use_calc_stream, 'ring_id', ring_id, - 'peer', dst_rank_in_group, 'num', - nranks, 'id', rank_id) + return _legacy_C_ops.partial_send( + tensor.detach(), + 'use_calc_stream', + use_calc_stream, + 'ring_id', + ring_id, + 'peer', + dst_rank_in_group, + 'num', + nranks, + 'id', + rank_id, + ) elif in_dygraph_mode(): - group = paddle.distributed.collective._get_default_group( - ) if group is None else group - comm_op = group.process_group.send_partial_on_calc_stream \ - if use_calc_stream else group.process_group.send_partial + group = ( + paddle.distributed.collective._get_default_group() + if group is None + else group + ) + comm_op = ( + group.process_group.send_partial_on_calc_stream + if use_calc_stream + else group.process_group.send_partial + ) return comm_op(tensor, dst_rank_in_group, nranks, rank_id) -def send_partial(tensor, - dst=0, - nranks=1, - rank_id=0, - group=None, - use_calc_stream=True): +def send_partial( + tensor, dst=0, nranks=1, rank_id=0, group=None, use_calc_stream=True +): # dst: local rank in group if group is not None and not group.is_member(): return ring_id = 0 if group is None else group.id - dst_rank = _hcg._get_p2p_next_rank( - ) if dst == 1 else _hcg._get_p2p_prev_rank() + dst_rank = ( + _hcg._get_p2p_next_rank() if dst == 1 else _hcg._get_p2p_prev_rank() + ) if _is_valid_send_recv_partial(tensor, nranks): - return _partial_send_op(tensor, group, use_calc_stream, ring_id, - dst_rank, nranks, rank_id) + return _partial_send_op( + tensor, group, use_calc_stream, ring_id, dst_rank, nranks, rank_id + ) else: if _in_legacy_dygraph(): - send_op = lambda x, dst, group: \ - paddle.distributed.send(x, dst, group, use_calc_stream) + send_op = lambda x, dst, group: paddle.distributed.send( + x, dst, group, use_calc_stream + ) elif in_dygraph_mode(): send_op = paddle.distributed.isend return send_op(tensor.detach(), dst=dst_rank, group=group) -def _partial_recv_op(tensor, group, use_calc_stream, ring_id, src, nranks, - rank_id): +def _partial_recv_op( + tensor, group, use_calc_stream, ring_id, src, nranks, rank_id +): src_rank_in_group = src if group is None else group.get_group_rank(src) if _in_legacy_dygraph(): assert use_calc_stream - return _legacy_C_ops.partial_recv(tensor.detach(), 'use_calc_stream', - use_calc_stream, 'ring_id', ring_id, - 'peer', src_rank_in_group, 'num', - nranks, 'id', rank_id, 'dtype', - tensor.dtype, 'out_shape', - tensor.shape) + return _legacy_C_ops.partial_recv( + tensor.detach(), + 'use_calc_stream', + use_calc_stream, + 'ring_id', + ring_id, + 'peer', + src_rank_in_group, + 'num', + nranks, + 'id', + rank_id, + 'dtype', + tensor.dtype, + 'out_shape', + tensor.shape, + ) elif in_dygraph_mode(): - group = paddle.distributed.collective._get_default_group( - ) if group is None else group - comm_op = group.process_group.recv_partial_on_calc_stream \ - if use_calc_stream else group.process_group.recv_partial + group = ( + paddle.distributed.collective._get_default_group() + if group is None + else group + ) + comm_op = ( + group.process_group.recv_partial_on_calc_stream + if use_calc_stream + else group.process_group.recv_partial + ) return comm_op(tensor, src_rank_in_group, nranks, rank_id) -def recv_partial(tensor, - src=0, - nranks=1, - rank_id=0, - group=None, - use_calc_stream=True): +def recv_partial( + tensor, src=0, nranks=1, rank_id=0, group=None, use_calc_stream=True +): # src: local rank in group if group is not None and not group.is_member(): return ring_id = 0 if group is None else group.id - src_rank = _hcg._get_p2p_prev_rank( - ) if src == 0 else _hcg._get_p2p_next_rank() + src_rank = ( + _hcg._get_p2p_prev_rank() if src == 0 else _hcg._get_p2p_next_rank() + ) if _is_valid_send_recv_partial(tensor, nranks): - return _partial_recv_op(tensor, group, use_calc_stream, ring_id, - src_rank, nranks, rank_id) + return _partial_recv_op( + tensor, group, use_calc_stream, ring_id, src_rank, nranks, rank_id + ) else: if _in_legacy_dygraph() or use_calc_stream: recv_op = paddle.distributed.recv @@ -252,42 +298,52 @@ def recv_partial(tensor, return recv_op(tensor.detach(), src=src_rank, group=group) -def _partial_allgather_op(tensor, group, use_calc_stream, ring_id, nranks, - rank_id): +def _partial_allgather_op( + tensor, group, use_calc_stream, ring_id, nranks, rank_id +): if _in_legacy_dygraph(): - return _legacy_C_ops.partial_allgather_(tensor.detach(), - 'use_calc_stream', - use_calc_stream, 'ring_id', - ring_id, 'nranks', nranks, - 'rank', rank_id) + return _legacy_C_ops.partial_allgather_( + tensor.detach(), + 'use_calc_stream', + use_calc_stream, + 'ring_id', + ring_id, + 'nranks', + nranks, + 'rank', + rank_id, + ) elif in_dygraph_mode(): - group = paddle.distributed.collective._get_default_group( - ) if group is None else group - comm_op = group.process_group.all_gather_partial_on_calc_stream \ - if use_calc_stream else group.process_group.all_gather_partial + group = ( + paddle.distributed.collective._get_default_group() + if group is None + else group + ) + comm_op = ( + group.process_group.all_gather_partial_on_calc_stream + if use_calc_stream + else group.process_group.all_gather_partial + ) return comm_op(tensor, tensor, nranks, rank_id) -def allgather_partial(tensor, - nranks=1, - rank_id=0, - group=None, - use_calc_stream=True): +def allgather_partial( + tensor, nranks=1, rank_id=0, group=None, use_calc_stream=True +): if not _is_valid_send_recv_partial(tensor, nranks): return tensor if group is not None and not group.is_member(): return ring_id = 0 if group is None else group.id - return _partial_allgather_op(tensor, group, use_calc_stream, ring_id, - nranks, rank_id) + return _partial_allgather_op( + tensor, group, use_calc_stream, ring_id, nranks, rank_id + ) -def _p2p_helper(tensor_send_next, - tensor_send_prev, - recv_prev, - recv_next, - sync_recv=True): +def _p2p_helper( + tensor_send_next, tensor_send_prev, recv_prev, recv_next, sync_recv=True +): global _hcg tensor_recv_prev = None @@ -310,15 +366,17 @@ def _p2p_helper(tensor_send_next, if isinstance(recv_shape_msg, tuple): tensor_recv_prev = [] for idx, shape in enumerate(recv_shape_msg): - tmp = paddle.empty(shape=shape, - dtype=number_2_dtype(recv_dtype_msg[idx])) + tmp = paddle.empty( + shape=shape, dtype=number_2_dtype(recv_dtype_msg[idx]) + ) tmp.stop_gradient = recv_stop_gradient[idx] tensor_recv_prev.append(tmp) tensor_recv_prev = tuple(tensor_recv_prev) else: tensor_recv_prev = paddle.empty( - shape=recv_shape_msg, dtype=number_2_dtype(recv_dtype_msg)) + shape=recv_shape_msg, dtype=number_2_dtype(recv_dtype_msg) + ) tensor_recv_prev.stop_gradient = recv_stop_gradient if recv_next: @@ -326,12 +384,15 @@ def _p2p_helper(tensor_send_next, tensor_recv_next = [] for idx, shape in enumerate(send_shape_msg): tensor_recv_next.append( - paddle.empty(shape=shape, - dtype=number_2_dtype(send_dtype_msg[idx]))) + paddle.empty( + shape=shape, dtype=number_2_dtype(send_dtype_msg[idx]) + ) + ) tensor_recv_next = tuple(tensor_recv_next) else: tensor_recv_next = paddle.empty( - shape=send_shape_msg, dtype=number_2_dtype(send_dtype_msg)) + shape=send_shape_msg, dtype=number_2_dtype(send_dtype_msg) + ) # TODO(Yuang Liu): use batch_isend_irecv replace all these comm ops tasks = [] @@ -340,51 +401,63 @@ def _p2p_helper(tensor_send_next, if isinstance(tensor_send_prev, tuple): for d in tensor_send_prev: paddle.distributed.wait(d, use_calc_stream=True) - send_partial(d, - dst=0, - nranks=mp_degree, - rank_id=mp_rank, - group=_hcg.send_prev_group, - use_calc_stream=False) + send_partial( + d, + dst=0, + nranks=mp_degree, + rank_id=mp_rank, + group=_hcg.send_prev_group, + use_calc_stream=False, + ) else: paddle.distributed.wait(tensor_send_prev, use_calc_stream=True) - send_partial(tensor_send_prev, - dst=0, - nranks=mp_degree, - rank_id=mp_rank, - group=_hcg.send_prev_group, - use_calc_stream=False) + send_partial( + tensor_send_prev, + dst=0, + nranks=mp_degree, + rank_id=mp_rank, + group=_hcg.send_prev_group, + use_calc_stream=False, + ) if tensor_recv_prev is not None: if isinstance(tensor_recv_prev, tuple): for d in tensor_recv_prev: - task = recv_partial(d, - src=0, - nranks=mp_degree, - rank_id=mp_rank, - group=_hcg.recv_prev_group, - use_calc_stream=sync_recv) + task = recv_partial( + d, + src=0, + nranks=mp_degree, + rank_id=mp_rank, + group=_hcg.recv_prev_group, + use_calc_stream=sync_recv, + ) if sync_recv: - allgather_partial(d, - nranks=mp_degree, - rank_id=mp_rank, - group=mp_group, - use_calc_stream=True) + allgather_partial( + d, + nranks=mp_degree, + rank_id=mp_rank, + group=mp_group, + use_calc_stream=True, + ) else: tasks.append(task) else: - task = recv_partial(tensor_recv_prev, - src=0, - nranks=mp_degree, - rank_id=mp_rank, - group=_hcg.recv_prev_group, - use_calc_stream=sync_recv) + task = recv_partial( + tensor_recv_prev, + src=0, + nranks=mp_degree, + rank_id=mp_rank, + group=_hcg.recv_prev_group, + use_calc_stream=sync_recv, + ) if sync_recv: - allgather_partial(tensor_recv_prev, - nranks=mp_degree, - rank_id=mp_rank, - group=mp_group, - use_calc_stream=True) + allgather_partial( + tensor_recv_prev, + nranks=mp_degree, + rank_id=mp_rank, + group=mp_group, + use_calc_stream=True, + ) else: tasks.append(task) @@ -392,52 +465,64 @@ def _p2p_helper(tensor_send_next, if isinstance(tensor_send_next, tuple): for d in tensor_send_next: paddle.distributed.wait(d, use_calc_stream=True) - send_partial(d, - dst=1, - nranks=mp_degree, - rank_id=mp_rank, - group=_hcg.send_next_group, - use_calc_stream=False) + send_partial( + d, + dst=1, + nranks=mp_degree, + rank_id=mp_rank, + group=_hcg.send_next_group, + use_calc_stream=False, + ) else: paddle.distributed.wait(tensor_send_next, use_calc_stream=True) - send_partial(tensor_send_next, - dst=1, - nranks=mp_degree, - rank_id=mp_rank, - group=_hcg.send_next_group, - use_calc_stream=False) + send_partial( + tensor_send_next, + dst=1, + nranks=mp_degree, + rank_id=mp_rank, + group=_hcg.send_next_group, + use_calc_stream=False, + ) if tensor_recv_next is not None: if isinstance(tensor_recv_next, tuple): for d in tensor_recv_next: - task = recv_partial(d, - src=1, - nranks=mp_degree, - rank_id=mp_rank, - group=_hcg.recv_next_group, - use_calc_stream=sync_recv) + task = recv_partial( + d, + src=1, + nranks=mp_degree, + rank_id=mp_rank, + group=_hcg.recv_next_group, + use_calc_stream=sync_recv, + ) if sync_recv: - allgather_partial(d, - nranks=mp_degree, - rank_id=mp_rank, - group=mp_group, - use_calc_stream=True) + allgather_partial( + d, + nranks=mp_degree, + rank_id=mp_rank, + group=mp_group, + use_calc_stream=True, + ) else: tasks.append(task) else: - task = recv_partial(tensor_recv_next, - src=1, - nranks=mp_degree, - rank_id=mp_rank, - group=_hcg.recv_next_group, - use_calc_stream=sync_recv) + task = recv_partial( + tensor_recv_next, + src=1, + nranks=mp_degree, + rank_id=mp_rank, + group=_hcg.recv_next_group, + use_calc_stream=sync_recv, + ) if sync_recv: - allgather_partial(tensor_recv_next, - nranks=mp_degree, - rank_id=mp_rank, - group=mp_group, - use_calc_stream=True) + allgather_partial( + tensor_recv_next, + nranks=mp_degree, + rank_id=mp_rank, + group=mp_group, + use_calc_stream=True, + ) else: tasks.append(task) @@ -463,11 +548,13 @@ def _p2p_helper(tensor_send_next, tensors_for_all_gather.append(tensor_recv_next) for tensor in tensors_for_all_gather: - allgather_partial(tensor, - nranks=mp_degree, - rank_id=mp_rank, - group=mp_group, - use_calc_stream=True) + allgather_partial( + tensor, + nranks=mp_degree, + rank_id=mp_rank, + group=mp_group, + use_calc_stream=True, + ) return tensor_recv_prev, tensor_recv_next @@ -480,11 +567,13 @@ def recv_forward(pp_first_stage, sync_recv=True): _send_recv_meta.recv_meta(_hcg.recv_prev_group) _send_recv_meta.has_recv_meta = _use_cache - input_tensor, _ = _p2p_helper(tensor_send_next=None, - tensor_send_prev=None, - recv_prev=True, - recv_next=False, - sync_recv=sync_recv) + input_tensor, _ = _p2p_helper( + tensor_send_next=None, + tensor_send_prev=None, + recv_prev=True, + recv_next=False, + sync_recv=sync_recv, + ) return input_tensor @@ -492,11 +581,13 @@ def recv_backward(pp_last_stage, sync_recv=True): if pp_last_stage: output_tensor_grad = None else: - _, output_tensor_grad = _p2p_helper(tensor_send_next=None, - tensor_send_prev=None, - recv_prev=False, - recv_next=True, - sync_recv=sync_recv) + _, output_tensor_grad = _p2p_helper( + tensor_send_next=None, + tensor_send_prev=None, + recv_prev=False, + recv_next=True, + sync_recv=sync_recv, + ) return output_tensor_grad @@ -507,28 +598,34 @@ def send_forward(output_tensor, pp_last_stage): _send_recv_meta.send_meta(output_tensor, _hcg.send_next_group) _send_recv_meta.has_send_meta = _use_cache - _p2p_helper(tensor_send_next=output_tensor, - tensor_send_prev=None, - recv_prev=False, - recv_next=False) + _p2p_helper( + tensor_send_next=output_tensor, + tensor_send_prev=None, + recv_prev=False, + recv_next=False, + ) def send_backward(input_tensor_grad, pp_first_stage): if not pp_first_stage: - _p2p_helper(tensor_send_next=None, - tensor_send_prev=input_tensor_grad, - recv_prev=False, - recv_next=False) + _p2p_helper( + tensor_send_next=None, + tensor_send_prev=input_tensor_grad, + recv_prev=False, + recv_next=False, + ) def send_forward_recv_backward(output_tensor, pp_last_stage): if pp_last_stage: output_tensor_grad = None else: - _, output_tensor_grad = _p2p_helper(tensor_send_next=output_tensor, - tensor_send_prev=None, - recv_prev=False, - recv_next=True) + _, output_tensor_grad = _p2p_helper( + tensor_send_next=output_tensor, + tensor_send_prev=None, + recv_prev=False, + recv_next=True, + ) return output_tensor_grad @@ -536,16 +633,18 @@ def send_backward_recv_forward(input_tensor_grad, pp_first_stage): if pp_first_stage: input_tensor = None else: - input_tensor, _ = _p2p_helper(tensor_send_next=None, - tensor_send_prev=input_tensor_grad, - recv_prev=True, - recv_next=False) + input_tensor, _ = _p2p_helper( + tensor_send_next=None, + tensor_send_prev=input_tensor_grad, + recv_prev=True, + recv_next=False, + ) return input_tensor -def send_forward_backward_recv_forward_backward(output_tensor, - input_tensor_grad, recv_prev, - recv_next): +def send_forward_backward_recv_forward_backward( + output_tensor, input_tensor_grad, recv_prev, recv_next +): # always have to send dytpe info to downstream if not _send_recv_meta.has_send_meta: _send_recv_meta.set_send_message(output_tensor) @@ -559,7 +658,8 @@ def send_forward_backward_recv_forward_backward(output_tensor, tensor_send_prev=input_tensor_grad, recv_prev=recv_prev, recv_next=recv_next, - sync_recv=False) + sync_recv=False, + ) return input_tensor, output_tensor_grad @@ -573,19 +673,23 @@ def send_forward_recv_forward(output_tensor, recv_prev): _send_recv_meta.recv_meta(_hcg.recv_prev_group) _send_recv_meta.has_recv_meta = _use_cache - input_tensor, _ = _p2p_helper(tensor_send_next=output_tensor, - tensor_send_prev=None, - recv_prev=recv_prev, - recv_next=False, - sync_recv=False) + input_tensor, _ = _p2p_helper( + tensor_send_next=output_tensor, + tensor_send_prev=None, + recv_prev=recv_prev, + recv_next=False, + sync_recv=False, + ) return input_tensor def send_backward_recv_backward(input_tensor_grad, recv_next): - _, output_tensor_grad = _p2p_helper(tensor_send_next=None, - tensor_send_prev=input_tensor_grad, - recv_prev=False, - recv_next=recv_next, - sync_recv=False) + _, output_tensor_grad = _p2p_helper( + tensor_send_next=None, + tensor_send_prev=input_tensor_grad, + recv_prev=False, + recv_next=recv_next, + sync_recv=False, + ) return output_tensor_grad diff --git a/python/paddle/distributed/fleet/meta_parallel/pp_utils/utils.py b/python/paddle/distributed/fleet/meta_parallel/pp_utils/utils.py index 9f5d868a1570dc3a389ec25caa8c1f8b8b5c0f26..e33f9d4689cec7b5a5a16c66a347a8ed45b68080 100644 --- a/python/paddle/distributed/fleet/meta_parallel/pp_utils/utils.py +++ b/python/paddle/distributed/fleet/meta_parallel/pp_utils/utils.py @@ -28,7 +28,7 @@ PADDLE_TO_NUMBER = { paddle.float32: 1, paddle.float64: 2, paddle.int32: 3, - paddle.int64: 4 + paddle.int64: 4, } NUMBER_TO_DTYPE = { @@ -36,7 +36,7 @@ NUMBER_TO_DTYPE = { 1: "float32", 2: "float64", 3: "int32", - 4: "int64" + 4: "int64", } @@ -88,7 +88,17 @@ def _all_gather(tensor, group=None, use_calc_stream=True): if group is not None and not group.is_member(): return ring_id = 0 if group is None else group.id - nranks = paddle.distributed.collective._get_global_group( - ).nranks if group is None else group.nranks - return _legacy_C_ops.c_allgather(tensor, 'use_calc_stream', use_calc_stream, - 'ring_id', ring_id, 'nranks', nranks) + nranks = ( + paddle.distributed.collective._get_global_group().nranks + if group is None + else group.nranks + ) + return _legacy_C_ops.c_allgather( + tensor, + 'use_calc_stream', + use_calc_stream, + 'ring_id', + ring_id, + 'nranks', + nranks, + ) diff --git a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_optimizer_stage2.py b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_optimizer_stage2.py index 798137564904003fa8c4fd75698928615e624612..2668b5808996d3598cc1e8f6abb2b27867145bc7 100644 --- a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_optimizer_stage2.py +++ b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_optimizer_stage2.py @@ -31,7 +31,11 @@ import paddle from paddle.fluid import core from paddle.optimizer import Optimizer from paddle.fluid.clip import ClipGradByGlobalNorm -from paddle.distributed.collective import _get_global_group, broadcast, new_group +from paddle.distributed.collective import ( + _get_global_group, + broadcast, + new_group, +) from .group_sharded_storage import ParamStorage, GradStorage from .group_sharded_utils import Type, device_guard, GroupShardedClipGrad @@ -63,21 +67,24 @@ class GroupShardedOptimizerStage2(Optimizer): # 4. Support offload function. # 5. Support the establishment of independent communication groups. # 6. Broadcast_fp16 is not supported now. - def __init__(self, - params, - optim, - group=None, - offload=False, - device="gpu", - pertrain_sync_models=True, - dp_group=None, - **kw): + def __init__( + self, + params, + optim, + group=None, + offload=False, + device="gpu", + pertrain_sync_models=True, + dp_group=None, + **kw + ): super().__init__(learning_rate=optim._learning_rate, parameters=params) assert core.is_compiled_with_cuda(), "Only GPU is supported now" # Segmentation information - self._dtype_rank_params = OrderedDict( + self._dtype_rank_params = ( + OrderedDict() ) # {dtype:[param1,param2]} device, rank, params self._param2rank = {} self.__segment_params = [] @@ -92,8 +99,9 @@ class GroupShardedOptimizerStage2(Optimizer): # record the last task used for comm overlap for sharding stage 2 self._comm_task = None - assert hasattr(self._optim, "_master_weights" - ), "Must use optimizer with _master_weights attribute" + assert hasattr( + self._optim, "_master_weights" + ), "Must use optimizer with _master_weights attribute" # Support parameter group and parameter list self._local_params = [] @@ -104,10 +112,17 @@ class GroupShardedOptimizerStage2(Optimizer): self._local_params.extend(list(params)) self._default_device = device - self._pfp16 = len( - list( - filter(lambda x: x.trainable and x.dtype == Type.fp16.value, - self._local_params))) > 0 + self._pfp16 = ( + len( + list( + filter( + lambda x: x.trainable and x.dtype == Type.fp16.value, + self._local_params, + ) + ) + ) + > 0 + ) self._broadcast_overlap = False self._forward_pre_hook_remove_helper = [] @@ -116,12 +131,14 @@ class GroupShardedOptimizerStage2(Optimizer): # Have to sort the params to make sure all params are in the forward using order. self._broadcast_order_params = sorted( self.local_params, - key=lambda x: int(x.name.split('.')[0].split('_')[-1])) + key=lambda x: int(x.name.split('.')[0].split('_')[-1]), + ) except ValueError: self._broadcast_order_params = None - self._group = new_group( - _get_global_group().ranks) if group is None else group + self._group = ( + new_group(_get_global_group().ranks) if group is None else group + ) # only support to combine stage2 and dp hybrid parallel now. self._dp_group = dp_group @@ -141,15 +158,19 @@ class GroupShardedOptimizerStage2(Optimizer): ) self._optim._grad_clip = GroupShardedClipGrad( - self._optim._grad_clip, paddle.get_device(), self._group) + self._optim._grad_clip, paddle.get_device(), self._group + ) if self._optim._parameter_list and isinstance( - self._optim._parameter_list[0], dict): + self._optim._parameter_list[0], dict + ): for item in self._optim._param_groups: if "grad_clip" in item.keys(): item["grad_clip"] = self._optim._grad_clip if offload: - assert self._pfp16, "Only support offload strategy while using \'Adam\', \'AdamW\' and \'Momentum\' optimizer with AMP/Pure FP16" + assert ( + self._pfp16 + ), "Only support offload strategy while using \'Adam\', \'AdamW\' and \'Momentum\' optimizer with AMP/Pure FP16" self.offload = offload # Using for offload self.offload_device = "cpu" @@ -171,16 +192,17 @@ class GroupShardedOptimizerStage2(Optimizer): """ for p in self._local_params: - broadcast(p, - src=self._global_root_rank, - group=self._group, - sync_op=True) + broadcast( + p, src=self._global_root_rank, group=self._group, sync_op=True + ) if self._dp_group: - broadcast(p, - src=self._dp_group.ranks[0], - group=self._dp_group, - sync_op=True) + broadcast( + p, + src=self._dp_group.ranks[0], + group=self._dp_group, + sync_op=True, + ) def _update_task(self, task): if self._reduce_overlap: @@ -194,15 +216,15 @@ class GroupShardedOptimizerStage2(Optimizer): # Enable gradients' reduces overlap with backward calculation. self._reduce_overlap = reduce_overlap - def _set_broadcast_overlap(self, - broadcast_overlap, - layers=None, - num_groups=None): + def _set_broadcast_overlap( + self, broadcast_overlap, layers=None, num_groups=None + ): # Enable post optimizer broadcasts overlap with the forward calculation of next batch. self._broadcast_overlap = broadcast_overlap if self._broadcast_overlap: - assert layers is not None, \ - "To enable broadcast overlap forward, please pass the module to the function." + assert ( + layers is not None + ), "To enable broadcast overlap forward, please pass the module to the function." self._layers = layers warnings.warn( "Setting overlap broadcast means the `paddle.device.cuda.synchronize()` " @@ -212,7 +234,8 @@ class GroupShardedOptimizerStage2(Optimizer): # Params' names should be like column_linear_32.w_0 patter to get the best performance. warnings.warn( r"The param name passed to the optimizer doesn't follow .+_[0-9]+\..+ patter, " - "overlap broadcast may harm the performance.") + "overlap broadcast may harm the performance." + ) self._broadcast_order_params = self._local_params if num_groups is None or num_groups > len(self._broadcast_order_params): @@ -222,9 +245,9 @@ class GroupShardedOptimizerStage2(Optimizer): ) num_groups = 1 - assert isinstance( - num_groups, - int) and num_groups > 0, "num_groups should be a positive integer" + assert ( + isinstance(num_groups, int) and num_groups > 0 + ), "num_groups should be a positive integer" self._number_of_broadcast_groups = num_groups self._broadcast_groups = [ @@ -244,7 +267,8 @@ class GroupShardedOptimizerStage2(Optimizer): name=param.name, value=param.cast(dtype=Type.fp32.value).numpy(), place=core.CPUPlace(), - stop_gradient=param.stop_gradient) + stop_gradient=param.stop_gradient, + ) else: for param in trainable_params: if param.dtype == Type.fp16.value: @@ -253,8 +277,7 @@ class GroupShardedOptimizerStage2(Optimizer): self._optim._master_weights[param.name] = master_tensor def _update_opt_status(self): - """Update optimizer status and parameter storage information, and special functions to be developed. - """ + """Update optimizer status and parameter storage information, and special functions to be developed.""" # func 1 self._integration_params() @@ -302,14 +325,16 @@ class GroupShardedOptimizerStage2(Optimizer): if len(self._dtype_rank_params) == 0: # Assign the parameters of each rank according to the type trainable_params = list( - filter(lambda x: x.trainable, self._local_params)) + filter(lambda x: x.trainable, self._local_params) + ) for param in trainable_params: if param.dtype not in self._dtype_rank_params.keys(): self._dtype_rank_params[param.dtype] = [ [] for _ in range(self.world_size) ] - self._dtype_rank_params[param.dtype][self.param2rank[ - param.name]].append(param) + self._dtype_rank_params[param.dtype][ + self.param2rank[param.name] + ].append(param) # Sort per rank params by size for dtype in self._dtype_rank_params.keys(): @@ -329,7 +354,8 @@ class GroupShardedOptimizerStage2(Optimizer): if dtype not in self._rank_buffer_size.keys(): self._rank_buffer_size[dtype] = {} for dst_rank, per_rank_params in enumerate( - self.dtype_rank_params[dtype]): + self.dtype_rank_params[dtype] + ): if dst_rank not in self._rank_buffer_size[dtype].keys(): self._rank_buffer_size[dtype][dst_rank] = 0 for param in per_rank_params: @@ -337,11 +363,15 @@ class GroupShardedOptimizerStage2(Optimizer): continue size = param._numel() * align[dtype] remaining = size % alignment[self._default_device] - ali = 0 if remaining == 0 else alignment[ - self._default_device] - remaining + ali = ( + 0 + if remaining == 0 + else alignment[self._default_device] - remaining + ) align_ = ali // align[dtype] - self._rank_buffer_size[dtype][dst_rank] += param._numel( - ) + align_ + self._rank_buffer_size[dtype][dst_rank] += ( + param._numel() + align_ + ) self._param2align[param.name] = align_ return self._rank_buffer_size @@ -360,23 +390,27 @@ class GroupShardedOptimizerStage2(Optimizer): # Merge all the trainable params in a single InternalStorage trainable_params = list( - filter(lambda x: x.trainable, params)) + filter(lambda x: x.trainable, params) + ) if self._pfp16 and dst_rank == self._rank: self._generate_master_params(trainable_params) if trainable_params: param_storage = ParamStorage( size=self.rank_buffer_size[dtype][dst_rank], dtype=dtype, - device=self._default_device) + device=self._default_device, + ) - param_storage.add_rank_params(trainable_params, - self._param2align) + param_storage.add_rank_params( + trainable_params, self._param2align + ) self.param_storages[dtype][dst_rank] = param_storage # Clear the InternalStorage keys which are not in use anymore dtype_in_use = list(self.dtype_rank_params.keys()) dtype_to_pop = list( - filter(lambda x: x not in dtype_in_use, self.param_storages.keys())) + filter(lambda x: x not in dtype_in_use, self.param_storages.keys()) + ) for d in dtype_to_pop: self.param_storages.pop(d) @@ -386,8 +420,11 @@ class GroupShardedOptimizerStage2(Optimizer): for param in cpu_master_params: size = param._numel() * align[Type.fp32.value] remaining = size % alignment[self.offload_device] - ali = 0 if remaining == 0 else alignment[ - self.offload_device] - remaining + ali = ( + 0 + if remaining == 0 + else alignment[self.offload_device] - remaining + ) align_ = ali // align[Type.fp32.value] self.offload_buffer_size += param._numel() + align_ self.offload_param2align[param.name] = align_ @@ -397,10 +434,12 @@ class GroupShardedOptimizerStage2(Optimizer): self.offload_params = ParamStorage( size=self.offload_buffer_size, dtype=Type.fp32.value, - device=self.offload_device) + device=self.offload_device, + ) self.offload_params.buffer.name = "offload_buffer" self.offload_params.add_rank_params( - cpu_master_params, self.offload_param2align, False) + cpu_master_params, self.offload_param2align, False + ) self.offload_params.buffer.stop_gradient = False self.offload_grads = GradStorage( @@ -409,14 +448,16 @@ class GroupShardedOptimizerStage2(Optimizer): device=self.offload_device, destination=self._rank, parm2align=self.offload_param2align, - convert_cpu=True) + convert_cpu=True, + ) for p in cpu_master_params: self.offload_grads.add_grad( - p, self.offload_param2align[p.name]) + p, self.offload_param2align[p.name] + ) self._optim._master_weights[ - self.offload_params.buffer. - name] = self.offload_params.buffer + self.offload_params.buffer.name + ] = self.offload_params.buffer def _offload_acc_grad(self, param_name, grad_fp32_cpu): """accumulate grads with offload strategy""" @@ -424,12 +465,14 @@ class GroupShardedOptimizerStage2(Optimizer): if param_name in self._master_params.keys(): if self._master_params[param_name].grad is None: self._master_params[param_name]._copy_gradient_from( - grad_fp32_cpu) + grad_fp32_cpu + ) else: self._master_params[param_name].grad.add_(grad_fp32_cpu) self.offload_params.buffer._copy_gradient_from( - self.offload_grads.buffer) + self.offload_grads.buffer + ) def _offload_scale_grad(self, scale_size): """scale grads with offload strategy""" @@ -456,7 +499,7 @@ class GroupShardedOptimizerStage2(Optimizer): if self.offload: params_list = [self.offload_params.buffer] - #TODO(Baibaifan): Offload will support param_groups later + # TODO(Baibaifan): Offload will support param_groups later if not isinstance(self._optim._param_groups[0], dict): self._optim._parameter_list = params_list self._optim._param_groups = params_list @@ -468,8 +511,11 @@ class GroupShardedOptimizerStage2(Optimizer): for param in self._local_params: if param.name in self._master_params.keys(): - param.set_value(self._master_params[param.name].cuda( - self.dev_id).cast(dtype=param.dtype)) + param.set_value( + self._master_params[param.name] + .cuda(self.dev_id) + .cast(dtype=param.dtype) + ) else: self._optim.step() @@ -478,7 +524,8 @@ class GroupShardedOptimizerStage2(Optimizer): def minimize(self): raise RuntimeError( - "optimizer.minimize() not support now, please use optimizer.step()") + "optimizer.minimize() not support now, please use optimizer.step()" + ) def set_state_dict(self, state_dict): self._optim.set_state_dict(state_dict) @@ -501,10 +548,12 @@ class GroupShardedOptimizerStage2(Optimizer): else: for dtype_per_rank in self.param_storages.values(): for dst_rank, internal_storage in dtype_per_rank.items(): - broadcast(tensor=internal_storage.buffer, - src=self._group.ranks[dst_rank], - group=self._group, - sync_op=True) + broadcast( + tensor=internal_storage.buffer, + src=self._group.ranks[dst_rank], + group=self._group, + sync_op=True, + ) def _forward_pre_hook_function(self, tasks): # Since the layers will call pre hook by `forward_pre_hook(self, inputs)`, @@ -527,10 +576,12 @@ class GroupShardedOptimizerStage2(Optimizer): if x.trainable: group = self._broadcast_groups[group_idx] group_idx = (group_idx + 1) % self._number_of_broadcast_groups - task = broadcast(tensor=x, - src=group.ranks[self._param2rank[x.name]], - group=group, - sync_op=False) + task = broadcast( + tensor=x, + src=group.ranks[self._param2rank[x.name]], + group=group, + sync_op=False, + ) assert x.name not in param2task param2task[x.name] = task @@ -544,4 +595,6 @@ class GroupShardedOptimizerStage2(Optimizer): tasks.append(param2task[param.name]) self._forward_pre_hook_remove_helper.append( layer.register_forward_pre_hook( - self._forward_pre_hook_function(tasks))) + self._forward_pre_hook_function(tasks) + ) + ) diff --git a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage2.py b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage2.py index 6c0716f7bbb31917c9f70be8049667facfda2b9c..044111cc3490a8b7e52b011e5bb2bce3515d4eea 100644 --- a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage2.py +++ b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage2.py @@ -58,38 +58,50 @@ class GroupShardedStage2(nn.Layer): # 5. Support the establishment of independent communication groups. def __init__( - self, - layer, - sharding_optimizer, - group=None, - sync_buffers=False, - buffer_max_size=2**23, #8MB - auto_refresh_trainable=True, - device="gpu", - dp_group=None): + self, + layer, + sharding_optimizer, + group=None, + sync_buffers=False, + buffer_max_size=2**23, # 8MB + auto_refresh_trainable=True, + device="gpu", + dp_group=None, + ): super().__init__() # training options self._layer = layer - self._sharding_optimizers = [ - sharding_optimizer - ] if not isinstance(sharding_optimizer, list) else sharding_optimizer + self._sharding_optimizers = ( + [sharding_optimizer] + if not isinstance(sharding_optimizer, list) + else sharding_optimizer + ) assert all( list( - map(lambda opt: isinstance(opt, GroupShardedOptimizerStage2), - self._sharding_optimizers)) + map( + lambda opt: isinstance(opt, GroupShardedOptimizerStage2), + self._sharding_optimizers, + ) + ) ), "Please use GroupShardedOptimizerStage2 optimizer" self._sync_buffers = sync_buffers self._auto_refresh_trainable = auto_refresh_trainable # Communication related attributes - self._group = collective.new_group( - collective._get_global_group().ranks) if group is None else group + self._group = ( + collective.new_group(collective._get_global_group().ranks) + if group is None + else group + ) self._world_size_scaling = 1.0 / self._group.nranks - assert self._group.nranks > 1, "Training must be distributed, ranks must be greater than 1" + assert ( + self._group.nranks > 1 + ), "Training must be distributed, ranks must be greater than 1" self._rank = self._group.rank self._global_root_rank = self._group.ranks[ - 0] # picking ranks index 0 as the reference + 0 + ] # picking ranks index 0 as the reference self._default_device = device self._dp_group = dp_group @@ -106,15 +118,17 @@ class GroupShardedStage2(nn.Layer): self._trainable_param2rank = {} self._trainable_param2align = {} self._trainable_params = list( - filter(lambda x: x.trainable, self._all_params)) + filter(lambda x: x.trainable, self._all_params) + ) self._trainable_mask = list(map(_trainable, self._trainable_params)) self._param_grads = [] # Set grad storage size & Display param sizes and model sizes model_size = sum([p._numel() for p in self._layer.parameters()]) assert buffer_max_size >= 0, "buffer_max_size must be GE than 0." - self._buffer_max_size = self._rank_buffer_size(buffer_max_size, - model_size) + self._buffer_max_size = self._rank_buffer_size( + buffer_max_size, model_size + ) self._use_grad_storage = buffer_max_size > 0 self._grad_storages = {} # {dtype: {rank: GradStorage}} self._has_grad_storage = [] @@ -123,11 +137,12 @@ class GroupShardedStage2(nn.Layer): # Offload # TODO(haohongxiang): Now it's not be supported for multi-optimizers using Offload strategy self._offload_optims = list( - filter(lambda optim: optim.offload, self._sharding_optimizers)) + filter(lambda optim: optim.offload, self._sharding_optimizers) + ) if len(self._offload_optims) > 0: - assert len( - self._sharding_optimizers - ) == 1, "Only support offload strategy for single optimizer" + assert ( + len(self._sharding_optimizers) == 1 + ), "Only support offload strategy for single optimizer" self._offload = len(self._offload_optims) > 0 self._offload_device = "cpu" @@ -170,17 +185,21 @@ class GroupShardedStage2(nn.Layer): return fw def set_state_dict(self, state_dict, use_structured_name=True): - self._layer.set_state_dict(state_dict, - use_structured_name=use_structured_name) - - def state_dict(self, - destination=None, - include_sublayers=True, - structured_name_prefix=""): + self._layer.set_state_dict( + state_dict, use_structured_name=use_structured_name + ) + + def state_dict( + self, + destination=None, + include_sublayers=True, + structured_name_prefix="", + ): return self._layer.state_dict( destination=destination, include_sublayers=include_sublayers, - structured_name_prefix=structured_name_prefix) + structured_name_prefix=structured_name_prefix, + ) def _clear_gradients(self): """ @@ -188,8 +207,10 @@ class GroupShardedStage2(nn.Layer): """ # Release grad storages for dtype in self._grad_storages.keys(): - if not self._offload and self._rank in self._grad_storages[ - dtype].keys(): + if ( + not self._offload + and self._rank in self._grad_storages[dtype].keys() + ): self._grad_storages[dtype][self._rank].buffer.zero_() # Release grads of params @@ -213,10 +234,13 @@ class GroupShardedStage2(nn.Layer): # Scale grad storages for dtype in self._grad_storages.keys(): - if not self._offload and self._rank in self._grad_storages[ - dtype].keys(): + if ( + not self._offload + and self._rank in self._grad_storages[dtype].keys() + ): self._grad_storages[dtype][self._rank].buffer.scale_( - scale=scale_factor) + scale=scale_factor + ) # Scale grads of params with paddle.no_grad(): @@ -246,7 +270,9 @@ class GroupShardedStage2(nn.Layer): Synchronously or asynchronously convert the data type of the layer, the device is not supported now. """ assert isinstance(device, str), "Device must be type str" - assert device == self._default_device, "New devices are not supported, because of the optimizer state is not sync" + assert ( + device == self._default_device + ), "New devices are not supported, because of the optimizer state is not sync" self._layer.to(device=device, dtype=dtype, blocking=blocking) @@ -254,14 +280,15 @@ class GroupShardedStage2(nn.Layer): self._fresh_trainable() def _fresh_trainable(self): - """ Whether to update training parameters. """ + """Whether to update training parameters.""" # Make sure that this is not done while gradients are waiting to be reduced (if no_sync context for instance) if reduce(lambda x, y: x or y, self._grad_reduced, False): logging.warning("Grads waiting to be reduced.") self._trainable_params = list( - filter(lambda x: x.trainable, self._all_params)) + filter(lambda x: x.trainable, self._all_params) + ) self._trainable_params.sort(key=lambda x: x._numel()) self._trainable_param2rank = {} @@ -271,14 +298,19 @@ class GroupShardedStage2(nn.Layer): optim._update_opt_status() # Get the parameters split by the optimizer according to rank - for per_rank_params in optim.dtype_rank_params.values( + for ( + per_rank_params + ) in ( + optim.dtype_rank_params.values() ): # all the params from all ranks for params in per_rank_params: for param in filter(lambda x: x.trainable, params): self._trainable_param2rank[ - param.name] = optim.param2rank[param.name] + param.name + ] = optim.param2rank[param.name] self._trainable_param2align[ - param.name] = optim._param2align[param.name] + param.name + ] = optim._param2align[param.name] # Create grad_storage self._setup_use_grad_storage() @@ -292,16 +324,17 @@ class GroupShardedStage2(nn.Layer): """ for buffer in self._layer.buffers(include_sublayers=True): - collective.broadcast(buffer, - self._global_root_rank, - self._group, - sync_op=True) + collective.broadcast( + buffer, self._global_root_rank, self._group, sync_op=True + ) if self._dp_group and self._dp_group.nranks > 1: - collective.broadcast(buffer, - self._dp_group.ranks[0], - self._dp_group, - sync_op=True) + collective.broadcast( + buffer, + self._dp_group.ranks[0], + self._dp_group, + sync_op=True, + ) def __getattr__(self, name): """Forward missing attributes to wrapped layer.""" @@ -327,9 +360,9 @@ class GroupShardedStage2(nn.Layer): # model._set_reduce_overlap(True) self._reduce_overlap = reduce_overlap if self._reduce_overlap: - assert len( - self._sharding_optimizers - ) == 1, "Only support comm overlap strategy for single optimizer" + assert ( + len(self._sharding_optimizers) == 1 + ), "Only support comm overlap strategy for single optimizer" self._sharding_optimizers[0]._set_reduce_overlap(reduce_overlap) def _get_reduce_fn(self, index, param, dst_rank): @@ -345,7 +378,9 @@ class GroupShardedStage2(nn.Layer): def reduce(*_): # Skip gradient reduction, do not change status information if self._grad_reduced[index]: - assert param.grad is not None, "Parameter gradient cannot be None" + assert ( + param.grad is not None + ), "Parameter gradient cannot be None" # Change reduce information self._grad_reduced[index] = False @@ -356,26 +391,35 @@ class GroupShardedStage2(nn.Layer): param.clear_gradient(False) elif self._offload: tmp_grad = param.grad.cast( - dtype=Type.fp32.value).cpu() + dtype=Type.fp32.value + ).cpu() self._sharding_optimizers[0]._offload_acc_grad( - param.name, tmp_grad) + param.name, tmp_grad + ) del tmp_grad param.clear_gradient(False) # Synchronize the reduce parameter gradient asynchronize self._sharding_optimizers[0]._update_task( - collective.reduce(tensor=param.grad, - dst=self._group.ranks[dst_rank], - group=self._group, - sync_op=not self._reduce_overlap)) + collective.reduce( + tensor=param.grad, + dst=self._group.ranks[dst_rank], + group=self._group, + sync_op=not self._reduce_overlap, + ) + ) if self._dp_group and self._dp_group.nranks > 1: - assert not self._reduce_overlap, 'dp + stage2 hybrid parallel only Synchronize due to the new communication lib.' - #TODO(wuhuachao):after the new communication lib upgrading, overlapping the comm of dp + stage2. - collective.all_reduce(tensor=param.grad, - group=self._dp_group, - sync_op=True) + assert ( + not self._reduce_overlap + ), 'dp + stage2 hybrid parallel only Synchronize due to the new communication lib.' + # TODO(wuhuachao):after the new communication lib upgrading, overlapping the comm of dp + stage2. + collective.all_reduce( + tensor=param.grad, + group=self._dp_group, + sync_op=True, + ) # Clear the task flow and trigger callback to clear the redundant gradient # self._clear_task_flow() @@ -388,7 +432,9 @@ class GroupShardedStage2(nn.Layer): def reduce(*_): # Skip gradient reduction, do not change status information if self._grad_reduced[index]: - assert param.grad is not None, "Parameter gradient cannot be None" + assert ( + param.grad is not None + ), "Parameter gradient cannot be None" # Change reduce information self._grad_reduced[index] = False @@ -410,9 +456,11 @@ class GroupShardedStage2(nn.Layer): for p in grad_storage._params: with device_guard(): tmp_grad = p.grad.cast( - dtype=Type.fp32.value) + dtype=Type.fp32.value + ) self._sharding_optimizers[ - 0]._offload_acc_grad(p.name, tmp_grad) + 0 + ]._offload_acc_grad(p.name, tmp_grad) p.clear_gradient(False) grad_storage._device = self._default_device grad_storage.buffer._clear_data() @@ -425,14 +473,20 @@ class GroupShardedStage2(nn.Layer): tensor=grad_storage.buffer, dst=self._group.ranks[grad_storage.destination], group=self._group, - sync_op=not self._reduce_overlap)) + sync_op=not self._reduce_overlap, + ) + ) if self._dp_group and self._dp_group.nranks > 1: - assert not self._reduce_overlap, 'dp + stage2 hybrid parallel only Synchronize due to the new communication lib.' - #TODO(wuhuachao):after the new communication lib upgrading, overlapping the comm of dp + stage2. - collective.all_reduce(tensor=grad_storage.buffer, - group=self._dp_group, - sync_op=True) + assert ( + not self._reduce_overlap + ), 'dp + stage2 hybrid parallel only Synchronize due to the new communication lib.' + # TODO(wuhuachao):after the new communication lib upgrading, overlapping the comm of dp + stage2. + collective.all_reduce( + tensor=grad_storage.buffer, + group=self._dp_group, + sync_op=True, + ) cleanup() @@ -460,7 +514,8 @@ class GroupShardedStage2(nn.Layer): reduce_function = self._get_reduce_fn(index, param, dst_rank) self._bw_hooks.append( - param._register_backward_hook(reduce_function)) + param._register_backward_hook(reduce_function) + ) def _setup_use_grad_storage(self): """ @@ -483,25 +538,32 @@ class GroupShardedStage2(nn.Layer): dtype=param.dtype, device=self._default_device, destination=dst_rank, - parm2align=self._trainable_param2align) + parm2align=self._trainable_param2align, + ) # Criteria to decide whether this parameter is to be put in GradStorage if self._grad_storages[param.dtype][dst_rank].can_add_grad_view( - param, self._trainable_param2align[param.name]): + param, self._trainable_param2align[param.name] + ): self._grad_storages[param.dtype][dst_rank].add_grad( - param, self._trainable_param2align[param.name]) + param, self._trainable_param2align[param.name] + ) self._has_grad_storage[index] = True else: self._param_grads.append(param.name) print( - "Can not add param: {}, param's shape: {}, param align: {}, grad_storages fill: {}, " - .format(param.name, param.shape, - self._trainable_param2align[param.name], - self._grad_storages[param.dtype][dst_rank]._fill)) + "Can not add param: {}, param's shape: {}, param align: {}, grad_storages fill: {}, ".format( + param.name, + param.shape, + self._trainable_param2align[param.name], + self._grad_storages[param.dtype][dst_rank]._fill, + ) + ) for dtype in self._grad_storages.keys(): self._grad_storage_list.extend( - list(self._grad_storages[dtype].values())) + list(self._grad_storages[dtype].values()) + ) # def _clear_task_flow(self): # """Try to consume the previous tasks.""" @@ -553,21 +615,27 @@ class GroupShardedStage2(nn.Layer): if Type.fp16.value in rank_buffer_size.keys(): # FP16 GradStorage and model size logger_.info( - "====== FP16 GradStorage size: {:.2f}M parameters, Model size {:.2f}M parameters ======" - .format(rank_buffer_size[Type.fp16.value] / 2**19, - model_size / 2**19)) + "====== FP16 GradStorage size: {:.2f}M parameters, Model size {:.2f}M parameters ======".format( + rank_buffer_size[Type.fp16.value] / 2**19, + model_size / 2**19, + ) + ) if Type.bf16.value in rank_buffer_size.keys(): # FP16 GradStorage and model size logger_.info( - "====== BF16 GradStorage size: {:.2f}M parameters, Model size {:.2f}M parameters ======" - .format(rank_buffer_size[Type.bf16.value] / 2**19, - model_size / 2**19)) + "====== BF16 GradStorage size: {:.2f}M parameters, Model size {:.2f}M parameters ======".format( + rank_buffer_size[Type.bf16.value] / 2**19, + model_size / 2**19, + ) + ) if Type.fp32.value in rank_buffer_size.keys(): # FP32 GradStorage and model size logger_.info( - "====== FP32 GradStorage size: {:.2f}M parameters, Model size {:.2f}M parameters ======" - .format(rank_buffer_size[Type.fp32.value] / 2**18, - model_size / 2**18)) + "====== FP32 GradStorage size: {:.2f}M parameters, Model size {:.2f}M parameters ======".format( + rank_buffer_size[Type.fp32.value] / 2**18, + model_size / 2**18, + ) + ) return rank_buffer_size def _redefine_opt_step(self): diff --git a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage3.py b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage3.py index 00ce653e1df7d16d2fc198b4215142acb70d6cc5..e80ea0531f359cd8cb6245b17e9efba6e52ea886 100644 --- a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage3.py +++ b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage3.py @@ -72,16 +72,18 @@ class GroupShardedStage3(nn.Layer): # 3. Support offload function. # 4. Support the establishment of independent communication groups. - def __init__(self, - layer, - optimizer, - group=None, - sync_buffers=False, - device="gpu", - segment_size=2**20, - pertrain_sync_models=True, - offload=False, - sync_comm=False): + def __init__( + self, + layer, + optimizer, + group=None, + sync_buffers=False, + device="gpu", + segment_size=2**20, + pertrain_sync_models=True, + offload=False, + sync_comm=False, + ): super().__init__() # Default configs @@ -96,27 +98,40 @@ class GroupShardedStage3(nn.Layer): self._segment_size = segment_size global DEV - DEV = "cpu" if paddle.get_device() == "cpu" else paddle.get_device( - ).split(":")[0] + DEV = ( + "cpu" + if paddle.get_device() == "cpu" + else paddle.get_device().split(":")[0] + ) global DEV_ID - DEV_ID = 0 if paddle.get_device() == "cpu" else int( - paddle.get_device().split(":")[1]) + DEV_ID = ( + 0 + if paddle.get_device() == "cpu" + else int(paddle.get_device().split(":")[1]) + ) global param2dtype param2dtype = dict() # Communication group establishment - self._group = collective.new_group( - collective._get_global_group().ranks) if group is None else group + self._group = ( + collective.new_group(collective._get_global_group().ranks) + if group is None + else group + ) self._world_size_scaling = 1.0 / self._group.nranks - assert self._group.nranks > 1, "Training must be distributed, ranks must be greater than 1." + assert ( + self._group.nranks > 1 + ), "Training must be distributed, ranks must be greater than 1." self._rank = self._group.rank self._global_root_rank = self._group.ranks[ - 0] # picking ranks index 0 as the reference + 0 + ] # picking ranks index 0 as the reference # Parameter segmentation for global ranks # After flatten -> self._param2buffer_size, self._param2buffer, self._trainable_params self._param2buffer_size = dict() # {param.name: size} - self._param2buffer = dict( + self._param2buffer = ( + dict() ) # {param.name: [(start0, end0),(start1, end1), ...]} self._trainable_params = dict() # {id(layer): [trainable_params]} self._unslice_params = set() # param's numel <= segment_size @@ -124,9 +139,11 @@ class GroupShardedStage3(nn.Layer): self._grad_storages = dict() # {param.dtype: GradStorage} assert not isinstance( - optimizer, list), "Multiple optimizers are not supported now." - self._optim = _OptimizerWrapper(optimizer, self._offload, self._group, - self._update_params_slice) + optimizer, list + ), "Multiple optimizers are not supported now." + self._optim = _OptimizerWrapper( + optimizer, self._offload, self._group, self._update_params_slice + ) self._ori_parameter_list = self._optim._parameter_list self._ori_param_groups = self._optim._param_groups @@ -136,9 +153,11 @@ class GroupShardedStage3(nn.Layer): "While using ClipGradByGlobalNorm in GroupShardedStage3, the grad clip of original optimizer will be changed." ) self._optim._grad_clip = GroupShardedClipGrad( - self._optim._grad_clip, paddle.get_device(), self._group) + self._optim._grad_clip, paddle.get_device(), self._group + ) if self._optim._parameter_list and isinstance( - self._optim._parameter_list[0], dict): + self._optim._parameter_list[0], dict + ): for item in self._optim._param_groups: if "grad_clip" in item.keys(): item["grad_clip"] = self._optim._grad_clip @@ -177,22 +196,24 @@ class GroupShardedStage3(nn.Layer): """ for p in self._layer.parameters(): - collective.broadcast(p, - src=self._global_root_rank, - group=self._group, - sync_op=True) + collective.broadcast( + p, src=self._global_root_rank, group=self._group, sync_op=True + ) def _clear_gradients(self): assert len(self._trainable_params.keys()) > 0 current_layer_params = self._layer.parameters(include_sublayers=True) # 1.Handle param's slice trainable_params = list( - filter(lambda p: p.trainable and p not in self._unslice_params, - current_layer_params)) + filter( + lambda p: p.trainable and p not in self._unslice_params, + current_layer_params, + ) + ) for param in trainable_params: - assert hasattr(param, "fw_storage" - ), "Find {} don't have fw_storage attribute.".format( - param.name) + assert hasattr( + param, "fw_storage" + ), "Find {} don't have fw_storage attribute.".format(param.name) param.fw_storage.clear_gradient(False) param.bw_storage._clear() @@ -206,8 +227,10 @@ class GroupShardedStage3(nn.Layer): param.clear_gradient(False) tmp_var = param.cuda(DEV_ID) - if tmp_var.dtype == Type.fp32.value and param2dtype[ - param.name] == Type.fp16.value: + if ( + tmp_var.dtype == Type.fp32.value + and param2dtype[param.name] == Type.fp16.value + ): tmp_var = paddle.cast(tmp_var, Type.fp16.value) tmp_var._share_buffer_to(param) del tmp_var @@ -222,9 +245,11 @@ class GroupShardedStage3(nn.Layer): if not isinstance(self._optim._param_groups[0], dict): slice_params = [param.fw_storage for param in update_list] self._optim._parameter_list = slice_params + list( - self._unslice_params) + self._unslice_params + ) self._optim._param_groups = slice_params + list( - self._unslice_params) + self._unslice_params + ) else: for param_group in self._optim._param_groups: p_group = [] @@ -250,17 +275,21 @@ class GroupShardedStage3(nn.Layer): return fw def set_state_dict(self, state_dict, use_structured_name=True): - self._layer.set_state_dict(state_dict, - use_structured_name=use_structured_name) - - def state_dict(self, - destination=None, - include_sublayers=True, - structured_name_prefix=""): + self._layer.set_state_dict( + state_dict, use_structured_name=use_structured_name + ) + + def state_dict( + self, + destination=None, + include_sublayers=True, + structured_name_prefix="", + ): return self._layer.state_dict( destination=destination, include_sublayers=include_sublayers, - structured_name_prefix=structured_name_prefix) + structured_name_prefix=structured_name_prefix, + ) def _handle_unslice_params(self): buffer_size = dict() @@ -287,9 +316,11 @@ class GroupShardedStage3(nn.Layer): dtype=param.dtype, device=self._default_device, destination=self._rank, - parm2align=self._unslice_params2align) + parm2align=self._unslice_params2align, + ) self._grad_storages[param.dtype].add_grad( - param, self._unslice_params2align[param.name]) + param, self._unslice_params2align[param.name] + ) def _segment_rank_params(self, layer, name="last_layer"): """ @@ -329,8 +360,11 @@ class GroupShardedStage3(nn.Layer): align_ = self._param2align(param) offset = align_ + param._numel() - buffer_size = offset if offset % self._group.nranks == 0 else offset + self._group.nranks - ( - offset % self._group.nranks) + buffer_size = ( + offset + if offset % self._group.nranks == 0 + else offset + self._group.nranks - (offset % self._group.nranks) + ) self._param2buffer_size[param.name] = buffer_size # 2.Combination param buffer @@ -339,7 +373,8 @@ class GroupShardedStage3(nn.Layer): for rank_ in range(self._group.nranks): self._param2buffer[param.name].append( - (rank_ * pre_buffer, (rank_ + 1) * pre_buffer)) + (rank_ * pre_buffer, (rank_ + 1) * pre_buffer) + ) # Record param's dtype param2dtype[param.name] = param.dtype @@ -351,10 +386,11 @@ class GroupShardedStage3(nn.Layer): This is a function to simplify the handling of parameter InternalStorages. """ assert isinstance(buffer_size, int) - value = np.zeros( - buffer_size, - dtype=np.float16) if Type.fp16.value == param.dtype else np.zeros( - buffer_size, dtype=np.float32) + value = ( + np.zeros(buffer_size, dtype=np.float16) + if Type.fp16.value == param.dtype + else np.zeros(buffer_size, dtype=np.float32) + ) buffer = core.eager.Tensor(value=value, place=core.CPUPlace()) param_shape = param.shape @@ -376,16 +412,19 @@ class GroupShardedStage3(nn.Layer): if self._offload: with device_guard(): tmp_tensor = buffer._slice(start, end) - param.fw_storage = core.eager.Tensor(value=tmp_tensor, - place=core.CPUPlace(), - name="slice@" + param.name) + param.fw_storage = core.eager.Tensor( + value=tmp_tensor, + place=core.CPUPlace(), + name="slice@" + param.name, + ) with device_guard(): - param.master_weight = paddle.cast(param.fw_storage, - Type.fp32.value) + param.master_weight = paddle.cast( + param.fw_storage, Type.fp32.value + ) else: - param.fw_storage = core.eager.Tensor(value=buffer._slice( - start, end), - name="slice@" + param.name) + param.fw_storage = core.eager.Tensor( + value=buffer._slice(start, end), name="slice@" + param.name + ) param.status = "part" # Updata optimizer master weights @@ -414,20 +453,32 @@ class GroupShardedStage3(nn.Layer): self._register_forward_hooks(sub_layer) def _register_forward_all_hooks(self, sub_layer, task_flow): - def _forward_pre_hook(layer, inputs): - return ForwardPreHooks(layer, self._order_tracer, - self._trainable_params, - self._param2buffer_size, self._group, - self._sync_comm, self._offload, task_flow) + return ForwardPreHooks( + layer, + self._order_tracer, + self._trainable_params, + self._param2buffer_size, + self._group, + self._sync_comm, + self._offload, + task_flow, + ) def _forward_post_hook(layer, inputs, outputs): - return ForwardPostHooks.apply(outputs, layer, self._order_tracer, - self._trainable_params, - self._param2buffer, - self._param2buffer_size, self._rank, - self._group, self._sync_comm, - self._offload, task_flow) + return ForwardPostHooks.apply( + outputs, + layer, + self._order_tracer, + self._trainable_params, + self._param2buffer, + self._param2buffer_size, + self._rank, + self._group, + self._sync_comm, + self._offload, + task_flow, + ) # register previous forward hooks sub_layer.register_forward_pre_hook(_forward_pre_hook) @@ -442,10 +493,9 @@ class GroupShardedStage3(nn.Layer): """ for buffer in self._layer.buffers(include_sublayers=True): - collective.broadcast(buffer, - self._global_root_rank, - self._group, - sync_op=True) + collective.broadcast( + buffer, self._global_root_rank, self._group, sync_op=True + ) def __getattr__(self, name): """Forward missing attributes to wrapped layer.""" @@ -462,14 +512,16 @@ class GroupShardedStage3(nn.Layer): assert len(self._trainable_params.keys()) > 0 current_layer_params = self._layer.parameters(include_sublayers=True) trainable_params = list( - filter(lambda p: p.trainable and p not in self._unslice_params, - current_layer_params)) + filter( + lambda p: p.trainable and p not in self._unslice_params, + current_layer_params, + ) + ) # 1.Handle param's slice for param in trainable_params: assert hasattr( - param, - "fw_storage"), "Find {} don't have fw_storage attribute".format( - param.name) + param, "fw_storage" + ), "Find {} don't have fw_storage attribute".format(param.name) # Gradient average if self._offload: with device_guard(): @@ -507,16 +559,21 @@ class GroupShardedStage3(nn.Layer): assert len(self._trainable_params.keys()) > 0 current_layer_params = self._layer.parameters(include_sublayers=True) trainable_params = list( - filter(lambda p: p.trainable and p not in self._unslice_params, - current_layer_params)) - t_flow = _allgather_buffer(trainable_params, - self._group, - param2buffer_size=self._param2buffer_size, - use_calc_stream=True, - task_flow=TaskFlow(), - sync_wait=True, - offload=self._offload, - convert2cpu=convert2cpu) + filter( + lambda p: p.trainable and p not in self._unslice_params, + current_layer_params, + ) + ) + t_flow = _allgather_buffer( + trainable_params, + self._group, + param2buffer_size=self._param2buffer_size, + use_calc_stream=True, + task_flow=TaskFlow(), + sync_wait=True, + offload=self._offload, + convert2cpu=convert2cpu, + ) if convert2cpu: for param in trainable_params: t_flow.full_param[param.name][0]._share_buffer_to(param) @@ -527,15 +584,17 @@ class GroupShardedStage3(nn.Layer): def _register_backward_hooks(self): current_layer_params = self._layer.parameters(include_sublayers=True) trainable_params = list( - filter(lambda p: p.trainable and p not in self._unslice_params, - current_layer_params)) + filter( + lambda p: p.trainable and p not in self._unslice_params, + current_layer_params, + ) + ) for param in trainable_params: allreduce_function = self._get_allreduce_fn(param) param._register_backward_hook(allreduce_function) def _get_allreduce_fn(self, param): - @paddle.autograd.no_grad() def allreduce_(*_): if param.name in self._task_flow.full_grad.keys(): @@ -545,21 +604,25 @@ class GroupShardedStage3(nn.Layer): start, end = self._param2buffer[param.name][self._rank] if param.bw_storage is None: - param.bw_storage = full_grad._slice(start, - end).detach().clone() + param.bw_storage = ( + full_grad._slice(start, end).detach().clone() + ) if self._offload: param.bw_storage = _device2cpu(param.bw_storage, True) else: if self._offload: cpu_grad = _device2cpu( - full_grad._slice(start, end).detach().clone(), True) + full_grad._slice(start, end).detach().clone(), True + ) with device_guard(): param.bw_storage = paddle.add( - param.bw_storage, cpu_grad) + param.bw_storage, cpu_grad + ) else: param.bw_storage = paddle.add( param.bw_storage, - full_grad._slice(start, end).detach().clone()) + full_grad._slice(start, end).detach().clone(), + ) param.clear_gradient(False) del self._task_flow.full_grad[param.name] @@ -568,8 +631,12 @@ class GroupShardedStage3(nn.Layer): param.use_count = 0 param._clear_data() start, end = self._param2buffer[param.name][self._rank] - param.fw_storage = self._task_flow.full_param[ - param.name][0]._slice(start, end).detach().clone() + param.fw_storage = ( + self._task_flow.full_param[param.name][0] + ._slice(start, end) + .detach() + .clone() + ) param.status = "part" del self._task_flow.full_param[param.name] @@ -583,8 +650,9 @@ class GroupShardedStage3(nn.Layer): # CUDA alignment 256 bytes size = param._numel() * align[param.dtype] remaining = size % alignment[self._default_device] - ali = 0 if remaining == 0 else alignment[ - self._default_device] - remaining + ali = ( + 0 if remaining == 0 else alignment[self._default_device] - remaining + ) align_ = ali // align[param.dtype] return align_ @@ -618,8 +686,16 @@ class GroupShardedStage3(nn.Layer): self._optim.clear_grad = MethodType(_opt_clear, self._optim) -def ForwardPreHooks(layer, order_tracer, trainable_params, param2buffer_size, - group, sync_comm, offload, task_flow): +def ForwardPreHooks( + layer, + order_tracer, + trainable_params, + param2buffer_size, + group, + sync_comm, + offload, + task_flow, +): # Record layer's id layer_id = id(layer) @@ -634,35 +710,55 @@ def ForwardPreHooks(layer, order_tracer, trainable_params, param2buffer_size, # Whether to use calc stream task_flow.use_calc[layer_id] = use_calc # wait current layer params - _wait_layer(trainable_params[layer_id], task_flow, group, - param2buffer_size, use_calc, offload) - - if layer_id == order_tracer["layer"][-1]: return + _wait_layer( + trainable_params[layer_id], + task_flow, + group, + param2buffer_size, + use_calc, + offload, + ) + + if layer_id == order_tracer["layer"][-1]: + return order_ = order_tracer[layer_id] layer_id = order_tracer["layer"][order_ + 1] - _allgather_buffer(trainable_params[layer_id], - group, - param2buffer_size=param2buffer_size, - use_calc_stream=use_calc, - task_flow=task_flow, - sync_wait=sync_wait, - offload=offload) + _allgather_buffer( + trainable_params[layer_id], + group, + param2buffer_size=param2buffer_size, + use_calc_stream=use_calc, + task_flow=task_flow, + sync_wait=sync_wait, + offload=offload, + ) return class ForwardPostHooks(PyLayer): - @staticmethod - def forward(ctx, inputs, layer, order_tracer, trainable_params, - param2buffer, param2buffer_size, rank, group, sync_comm, - offload, task_flow): + def forward( + ctx, + inputs, + layer, + order_tracer, + trainable_params, + param2buffer, + param2buffer_size, + rank, + group, + sync_comm, + offload, + task_flow, + ): layer_id = id(layer) # release current layer full params - _release_param(trainable_params[layer_id], param2buffer, rank, - task_flow, offload) + _release_param( + trainable_params[layer_id], param2buffer, rank, task_flow, offload + ) if layer_id not in order_tracer.keys(): order_ = order_tracer["order"] @@ -670,7 +766,7 @@ class ForwardPostHooks(PyLayer): order_tracer["order"] += 1 order_tracer["layer"].append(layer_id) - #Record fw info + # Record fw info ctx.order_tracer = order_tracer ctx.task_flow = task_flow ctx.group = group @@ -698,32 +794,43 @@ class ForwardPostHooks(PyLayer): # Allgather params synchronization if sync_comm: use_calc, sync_wait = True, True - _allgather_buffer(trainable_params[layer_id], - group, - param2buffer_size=param2buffer_size, - use_calc_stream=use_calc, - task_flow=task_flow, - sync_wait=sync_wait, - offload=offload) + _allgather_buffer( + trainable_params[layer_id], + group, + param2buffer_size=param2buffer_size, + use_calc_stream=use_calc, + task_flow=task_flow, + sync_wait=sync_wait, + offload=offload, + ) else: - _wait_layer(trainable_params[layer_id], task_flow, group, - param2buffer_size, use_calc, offload) + _wait_layer( + trainable_params[layer_id], + task_flow, + group, + param2buffer_size, + use_calc, + offload, + ) # Create params's grad - _create_params_grad(trainable_params[layer_id], param2buffer_size, - task_flow) + _create_params_grad( + trainable_params[layer_id], param2buffer_size, task_flow + ) # Whether to use calc stream task_flow.use_calc[layer_id] = use_calc if layer_id != order_tracer["layer"][0] and not sync_comm: layer_next_id = order_tracer["layer"][order_tracer[layer_id] - 1] - _allgather_buffer(trainable_params[layer_next_id], - group, - param2buffer_size=param2buffer_size, - use_calc_stream=use_calc, - task_flow=task_flow, - sync_wait=sync_wait, - offload=offload) + _allgather_buffer( + trainable_params[layer_next_id], + group, + param2buffer_size=param2buffer_size, + use_calc_stream=use_calc, + task_flow=task_flow, + sync_wait=sync_wait, + offload=offload, + ) return args @@ -733,22 +840,22 @@ class TaskFlow: Task flows, one way linked list for task acquisition. """ - def __init__(self, - full_param=dict(), - full_grad=dict(), - use_calc=dict(), - callback=None): + def __init__( + self, + full_param=dict(), + full_grad=dict(), + use_calc=dict(), + callback=None, + ): self.full_param = full_param self.full_grad = full_grad self.use_calc = use_calc self.callback = callback -def _release_param(trainable_params, - param2buffer, - rank, - task_flow, - offload=False): +def _release_param( + trainable_params, param2buffer, rank, task_flow, offload=False +): for param in trainable_params: # async communicate share weight not clear param.use_count -= 1 @@ -757,8 +864,12 @@ def _release_param(trainable_params, if param.name in task_flow.full_param.keys(): start, end = param2buffer[param.name][rank] with paddle.amp.auto_cast(enable=False): - param.fw_storage = task_flow.full_param[ - param.name][0]._slice(start, end).detach().clone() + param.fw_storage = ( + task_flow.full_param[param.name][0] + ._slice(start, end) + .detach() + .clone() + ) param.status = "part" del task_flow.full_param[param.name] @@ -767,12 +878,14 @@ def _release_param(trainable_params, return -def _wait_layer(trainable_params, - task_flow, - group, - param2buffer_size, - use_calc_stream, - offload=False): +def _wait_layer( + trainable_params, + task_flow, + group, + param2buffer_size, + use_calc_stream, + offload=False, +): for param in trainable_params: if param.status == "all": @@ -787,25 +900,29 @@ def _wait_layer(trainable_params, param.status = "all" param.use_count += 1 else: - _allgather_buffer(trainable_params, - group, - param2buffer_size=param2buffer_size, - use_calc_stream=True, - task_flow=task_flow, - sync_wait=True, - offload=offload) + _allgather_buffer( + trainable_params, + group, + param2buffer_size=param2buffer_size, + use_calc_stream=True, + task_flow=task_flow, + sync_wait=True, + offload=offload, + ) break return task_flow -def _allgather_buffer(trainable_params, - group, - param2buffer_size, - use_calc_stream, - task_flow, - sync_wait=False, - offload=False, - convert2cpu=False): +def _allgather_buffer( + trainable_params, + group, + param2buffer_size, + use_calc_stream, + task_flow, + sync_wait=False, + offload=False, + convert2cpu=False, +): for param in trainable_params: if param.status == "all": @@ -846,8 +963,9 @@ def _create_params_grad(trainable_params, param2buffer_size, task_flow): if param.name in task_flow.full_grad.keys(): continue assert isinstance(param2buffer_size[param.name], int) - temp_grad = paddle.zeros([param2buffer_size[param.name]], - dtype=param.dtype) + temp_grad = paddle.zeros( + [param2buffer_size[param.name]], dtype=param.dtype + ) temp_tensor = temp_grad._slice(0, param._numel()) temp_tensor.get_tensor()._set_dims(param.shape) param._copy_gradient_from(temp_tensor) @@ -875,13 +993,14 @@ def _UnsliceParam(param): def _VarBaseWrapper(param): varbase = param.fw_storage - tmp_param = EagerParamBase(shape=varbase.shape, - dtype=varbase.dtype, - name="slice@" + param.name) + tmp_param = EagerParamBase( + shape=varbase.shape, dtype=varbase.dtype, name="slice@" + param.name + ) varbase._share_buffer_to(tmp_param) tmp_param.regularizer = param.regularizer tmp_param.optimize_attr['learning_rate'] = param.optimize_attr[ - 'learning_rate'] + 'learning_rate' + ] varbase._clear() return tmp_param @@ -906,14 +1025,17 @@ def _device2cpu(trans_param, convert_dtype=False): def _cpu2device(param): tmp_p = param.fw_storage.cuda(DEV_ID) - if tmp_p.dtype == Type.fp32.value and param2dtype[ - param.name] == Type.fp16.value: + if ( + tmp_p.dtype == Type.fp32.value + and param2dtype[param.name] == Type.fp16.value + ): tmp_p = paddle.cast(tmp_p, Type.fp16.value) return tmp_p def _current_layer_params(layer): - return layer.parameters( - include_sublayers=False) + list(layer.extra_parameters) if hasattr( - layer, "extra_parameters") else layer.parameters( - include_sublayers=False) + return ( + layer.parameters(include_sublayers=False) + list(layer.extra_parameters) + if hasattr(layer, "extra_parameters") + else layer.parameters(include_sublayers=False) + ) diff --git a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_storage.py b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_storage.py index 66e9617f00efd22140a1f306cabe406996969418..72e9ebfcb7d8859909c48228f202b663ec3ddac6 100644 --- a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_storage.py +++ b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_storage.py @@ -46,30 +46,40 @@ class InternalStorage: # The flatten tensor size = [size] if isinstance(size, int) else size if convert_cpu: - value = np.zeros( - size, - dtype=np.float16) if Type.fp16.value == dtype else np.zeros( - size, dtype=np.float32) + value = ( + np.zeros(size, dtype=np.float16) + if Type.fp16.value == dtype + else np.zeros(size, dtype=np.float32) + ) self.buffer = core.eager.Tensor(value=value, place=core.CPUPlace()) if dtype == Type.bf16.value: self.buffer = paddle.cast(self.buffer, dtype=paddle.bfloat16) else: self.buffer = paddle.zeros(size, dtype=dtype) - self.dev_id = 0 if paddle.get_device() == "cpu" else int( - paddle.get_device().split(":")[1]) + self.dev_id = ( + 0 + if paddle.get_device() == "cpu" + else int(paddle.get_device().split(":")[1]) + ) def to(self, device, dtype=None, keep_alignment=True): """ Move the underlying buffer """ - assert self.buffer is not None, "Cannot move a collapsed bucket, please rebuild it" - assert (dtype == Type.fp32.value - or Type.fp16.value), "Conversion type is not supported now" + assert ( + self.buffer is not None + ), "Cannot move a collapsed bucket, please rebuild it" + assert ( + dtype == Type.fp32.value or Type.fp16.value + ), "Conversion type is not supported now" if self._device != device: - tmp_buffer = self.buffer.cuda( - self.dev_id) if device == "gpu" else self.buffer.cpu() + tmp_buffer = ( + self.buffer.cuda(self.dev_id) + if device == "gpu" + else self.buffer.cpu() + ) for param in self._params: param.clear_gradient(False) @@ -107,17 +117,18 @@ class ParamStorage(InternalStorage): Add new parameters to the InternalStorage. Params becomes a view of this InternalStorage buffer. """ - assert all([ - id(param) not in self._param_ids for param in trainable_params - ]), "The same param cannot be checked in twice" + assert all( + [id(param) not in self._param_ids for param in trainable_params] + ), "The same param cannot be checked in twice" assert self.buffer is not None self.param2align = param2align cpu_param_shape = list() for param in trainable_params: - p_shape = self._add_param_as_view(param, param2align[param.name], - convert_gpu) + p_shape = self._add_param_as_view( + param, param2align[param.name], convert_gpu + ) cpu_param_shape.append(p_shape) if convert_gpu: @@ -127,8 +138,9 @@ class ParamStorage(InternalStorage): self._fill = 0 for idx, param in enumerate(trainable_params): - self._convert_buffer(param, cpu_param_shape[idx], - param2align[param.name]) + self._convert_buffer( + param, cpu_param_shape[idx], param2align[param.name] + ) self._params.append(param) self._param_ids.append(id(param)) @@ -138,7 +150,8 @@ class ParamStorage(InternalStorage): assert ( param.dtype == self.buffer.dtype ), "Different types for the InternalStorage and the param, cannot proceed: {} - {}".format( - param.dtype, self.buffer.dtype) + param.dtype, self.buffer.dtype + ) var_end = self._fill + param._numel() offset = var_end + align @@ -199,13 +212,9 @@ class GradStorage(InternalStorage): This is a basic class to simplify the handling of gradient InternalStorages """ - def __init__(self, - size, - dtype, - device, - destination, - parm2align, - convert_cpu=False): + def __init__( + self, size, dtype, device, destination, parm2align, convert_cpu=False + ): if isinstance(size, np.int64): size = size.tolist() super().__init__(size, dtype, device, convert_cpu) @@ -219,21 +228,21 @@ class GradStorage(InternalStorage): self.sent = False def reset_checked_in(self): - """ Reset the counter of the parameter grads which have been checked in - """ + """Reset the counter of the parameter grads which have been checked in""" self.params_checked_in = 0 self.sent = False @property def all_checked_in(self): - """ Judge all the expected gradient check-in happened """ + """Judge all the expected gradient check-in happened""" return len(self._params) == self.params_checked_in def can_add_grad_view(self, param, align): - """ Is there enough InternalStorage to add this parameter gradient, and whether this param have already checked in. - """ - return self._fill + param._numel() + align <= self._max_size and id( - param) not in self._param_ids + """Is there enough InternalStorage to add this parameter gradient, and whether this param have already checked in.""" + return ( + self._fill + param._numel() + align <= self._max_size + and id(param) not in self._param_ids + ) def to(self, device, dtype=None, keep_alignment=True): """ @@ -253,9 +262,9 @@ class GradStorage(InternalStorage): Add a new parameter gradient to the InternalStorage. Param.grad becomes a view of this InternalStorage buffer. """ - assert id( - param - ) not in self._param_ids, "The same gradients cannot be checked in twice" + assert ( + id(param) not in self._param_ids + ), "The same gradients cannot be checked in twice" self._add_grad_as_view(param, align) self._params.append(param) @@ -302,8 +311,9 @@ class GradStorage(InternalStorage): @paddle.autograd.no_grad() def _add_grad_as_view(self, param, align): - assert param._numel( - ) > 0, "Cannot add a gradient to a released InternalStorage, please rebuild" + assert ( + param._numel() > 0 + ), "Cannot add a gradient to a released InternalStorage, please rebuild" assert param.dtype == self.buffer.dtype grad_end = self._fill + param._numel() diff --git a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_utils.py b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_utils.py index 0981b2cab76fd43a969a901de302a4d6dc68ffd9..4107bd83db9363704232b9d9c169dd1a63ad6af4 100644 --- a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_utils.py +++ b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_utils.py @@ -39,13 +39,13 @@ class Type(Enum): """ Type of trainable parameters """ + fp16 = paddle.float16 bf16 = paddle.bfloat16 fp32 = paddle.float32 class GroupShardedClipGrad: - def __init__(self, clip, device, group): self._clip = clip self._device = device @@ -66,49 +66,64 @@ class GroupShardedClipGrad: merge_grad = g if g.type == core.VarDesc.VarType.SELECTED_ROWS: merge_grad = layers.get_tensor_from_selected_rows( - layers.merge_selected_rows(g)) + layers.merge_selected_rows(g) + ) square = layers.square(merge_grad) sum_square = layers.reduce_sum(square) if p.dtype == paddle.float16: - if p_slice: sum_square_fp16.append(sum_square) - else: unslice_params_fp16.append(sum_square) + if p_slice: + sum_square_fp16.append(sum_square) + else: + unslice_params_fp16.append(sum_square) elif p.dtype == paddle.float32: - if p_slice: sum_square_fp32.append(sum_square) - else: unslice_params_fp32.append(sum_square) + if p_slice: + sum_square_fp32.append(sum_square) + else: + unslice_params_fp32.append(sum_square) # global norm of non-distributed FP16 params_and_grads if len(sum_square_fp16) == 0: - global_norm_fp16 = paddle.to_tensor([0.], dtype=paddle.float32) + global_norm_fp16 = paddle.to_tensor([0.0], dtype=paddle.float32) else: global_norm_fp16 = layers.concat(sum_square_fp16) global_norm_fp16 = layers.reduce_sum(global_norm_fp16) - global_norm_fp16 = paddle.cast(global_norm_fp16, - dtype=paddle.float32) + global_norm_fp16 = paddle.cast( + global_norm_fp16, dtype=paddle.float32 + ) # global norm of non-distributed FP16 params_and_grads for unslice parameters if len(unslice_params_fp16) == 0: - global_unslice_fp16 = paddle.to_tensor([0.], dtype=paddle.float32) + global_unslice_fp16 = paddle.to_tensor([0.0], dtype=paddle.float32) else: global_unslice_fp16 = layers.concat(unslice_params_fp16) global_unslice_fp16 = layers.reduce_sum(global_unslice_fp16) - global_unslice_fp16 = paddle.cast(global_unslice_fp16, - dtype=paddle.float32) + global_unslice_fp16 = paddle.cast( + global_unslice_fp16, dtype=paddle.float32 + ) # global norm of non-distributed FP32 params_and_grads - global_norm_fp32 = layers.concat( - sum_square_fp32) if len(sum_square_fp32) != 0 else paddle.to_tensor( - [0.], dtype=paddle.float32) + global_norm_fp32 = ( + layers.concat(sum_square_fp32) + if len(sum_square_fp32) != 0 + else paddle.to_tensor([0.0], dtype=paddle.float32) + ) global_norm_fp32 = layers.reduce_sum(global_norm_fp32) # global norm of non-distributed FP32 params_and_grads for unslice parameters - global_unslice_fp32 = layers.concat(unslice_params_fp32) if len( - unslice_params_fp32) != 0 else paddle.to_tensor( - [0.], dtype=paddle.float32) + global_unslice_fp32 = ( + layers.concat(unslice_params_fp32) + if len(unslice_params_fp32) != 0 + else paddle.to_tensor([0.0], dtype=paddle.float32) + ) global_unslice_fp32 = layers.reduce_sum(global_unslice_fp32) global_unslice_var = global_unslice_fp16 + global_unslice_fp32 - global_norm_var = global_norm_fp16 + global_norm_fp32 + 1.0 / self._group.nranks * global_unslice_var + global_norm_var = ( + global_norm_fp16 + + global_norm_fp32 + + 1.0 / self._group.nranks * global_unslice_var + ) # add all reduce to get global norm of distributed params_and_grads dev_id = int(self._device.split(":")[1]) @@ -119,14 +134,14 @@ class GroupShardedClipGrad: paddle.distributed.all_reduce(global_norm_var, group=self._group) global_norm_var = layers.sqrt(global_norm_var) - max_global_norm = layers.fill_constant(shape=[1], - dtype=global_norm_var.dtype, - value=self.clip_norm) - - clip_var = layers.elementwise_div(x=max_global_norm, - y=layers.elementwise_max( - x=global_norm_var, - y=max_global_norm)) + max_global_norm = layers.fill_constant( + shape=[1], dtype=global_norm_var.dtype, value=self.clip_norm + ) + + clip_var = layers.elementwise_div( + x=max_global_norm, + y=layers.elementwise_max(x=global_norm_var, y=max_global_norm), + ) clip_var_fp16 = paddle.cast(clip_var, paddle.float16) for p, g in params_grads: @@ -165,7 +180,6 @@ def device_guard(dev_id=0, device="cpu"): @dygraph_only def GroupShardedScaler(scaler): - def unscale_method(self, optimizer): if not self._enable: return @@ -177,14 +191,16 @@ def GroupShardedScaler(scaler): optimizer.update_scaler = True if getattr(optimizer._optim, '_param_groups', None) and isinstance( - optimizer._optim._param_groups[0], dict): + optimizer._optim._param_groups[0], dict + ): for group in optimizer._optim._param_groups: for param in group['params']: if param.grad is not None: param_grads.append(param.grad) if param.grad.dtype in [ - core.VarDesc.VarType.FP16, paddle.float16 + core.VarDesc.VarType.FP16, + paddle.float16, ]: param_grads_fp16.append(param.grad) else: @@ -194,7 +210,8 @@ def GroupShardedScaler(scaler): if param.grad is not None: param_grads.append(param.grad) if param.grad.dtype in [ - core.VarDesc.VarType.FP16, paddle.float16 + core.VarDesc.VarType.FP16, + paddle.float16, ]: param_grads_fp16.append(param.grad) else: @@ -204,27 +221,34 @@ def GroupShardedScaler(scaler): temp_found_inf_fp32 = to_variable(np.array([0]).astype(np.bool_)) device = "cpu" if optimizer.offload else "gpu" - dev_id = 0 if device == "cpu" else int( - paddle.get_device().split(":")[1]) + dev_id = ( + 0 if device == "cpu" else int(paddle.get_device().split(":")[1]) + ) with device_guard(dev_id, device): if len(param_grads_fp16): - _legacy_C_ops.check_finite_and_unscale(param_grads_fp16, - self._scale, - param_grads_fp16, - temp_found_inf_fp16) + _legacy_C_ops.check_finite_and_unscale( + param_grads_fp16, + self._scale, + param_grads_fp16, + temp_found_inf_fp16, + ) if len(param_grads_fp32): - _legacy_C_ops.check_finite_and_unscale(param_grads_fp32, - self._scale, - param_grads_fp32, - temp_found_inf_fp32) + _legacy_C_ops.check_finite_and_unscale( + param_grads_fp32, + self._scale, + param_grads_fp32, + temp_found_inf_fp32, + ) self._found_inf = 1 if temp_found_inf_fp16 or temp_found_inf_fp32 else 0 is_found_inf = paddle.to_tensor([self._found_inf], dtype="int32") - paddle.distributed.all_reduce(is_found_inf, - op=paddle.distributed.ReduceOp.MAX, - group=optimizer._group) + paddle.distributed.all_reduce( + is_found_inf, + op=paddle.distributed.ReduceOp.MAX, + group=optimizer._group, + ) self._found_inf = is_found_inf.numpy()[0] scaler._unscale = MethodType(unscale_method, scaler) diff --git a/python/paddle/distributed/fleet/meta_parallel/sharding/sharding_stage2.py b/python/paddle/distributed/fleet/meta_parallel/sharding/sharding_stage2.py index 5933d11037eafef3d1a71a8722f10206c19ad977..63ce53f01c43998fda705e8886ea1b9e7494e887 100644 --- a/python/paddle/distributed/fleet/meta_parallel/sharding/sharding_stage2.py +++ b/python/paddle/distributed/fleet/meta_parallel/sharding/sharding_stage2.py @@ -35,7 +35,9 @@ from paddle.distributed import collective as dist from paddle.distributed.collective import _get_global_group from ...utils.internal_storage import GradStorage -from ...meta_optimizers.dygraph_optimizer.sharding_optimizer_stage2 import ShardingOptimizerStage2 +from ...meta_optimizers.dygraph_optimizer.sharding_optimizer_stage2 import ( + ShardingOptimizerStage2, +) from .sharding_utils import Taskflow, Type @@ -59,42 +61,55 @@ class ShardingStage2(nn.Layer): # 5. Support the establishment of independent communication groups. def __init__( - self, - layer, - sharding_optimizer, - group=None, - sync_buffers=False, - buffer_max_size=2**23, #8MB - auto_refresh_trainable=True, - device="gpu"): + self, + layer, + sharding_optimizer, + group=None, + sync_buffers=False, + buffer_max_size=2**23, # 8MB + auto_refresh_trainable=True, + device="gpu", + ): super().__init__() # training options self._layer = layer - self._sharding_optimizers = [ - sharding_optimizer - ] if not isinstance(sharding_optimizer, list) else sharding_optimizer + self._sharding_optimizers = ( + [sharding_optimizer] + if not isinstance(sharding_optimizer, list) + else sharding_optimizer + ) assert all( list( - map(lambda opt: isinstance(opt, ShardingOptimizerStage2), - self._sharding_optimizers)) + map( + lambda opt: isinstance(opt, ShardingOptimizerStage2), + self._sharding_optimizers, + ) + ) ), "Please use ShardingOptimizerStage2 optimizer" self._sync_buffers = sync_buffers self._auto_refresh_trainable = auto_refresh_trainable # Communication related attributes - self._group = dist.new_group( - _get_global_group().ranks) if group is None else group + self._group = ( + dist.new_group(_get_global_group().ranks) + if group is None + else group + ) self._world_size_scaling = 1.0 / self._group.nranks - assert self._group.nranks > 1, "Training must be distributed, ranks must be greater than 1" + assert ( + self._group.nranks > 1 + ), "Training must be distributed, ranks must be greater than 1" self._rank = self._group.rank self._global_root_rank = self._group.ranks[ - 0] # picking rank 0 as the reference + 0 + ] # picking rank 0 as the reference self._default_device = device # Global statistical parameters self._all_params = list( - chain(*[optim.local_params for optim in self._sharding_optimizers])) + chain(*[optim.local_params for optim in self._sharding_optimizers]) + ) self._trainable_params = [] self._grad_reduced = [] self._trainable_param2rank = {} @@ -103,11 +118,13 @@ class ShardingStage2(nn.Layer): self._param_grads = [] # Set grad storage size & Display param sizes and model sizes - model_size = sum([np.prod(p.shape) - for p in self._layer.parameters()]).item() + model_size = sum( + [np.prod(p.shape) for p in self._layer.parameters()] + ).item() assert buffer_max_size >= 0, "buffer_max_size must be GE than 0." - self._buffer_max_size = self._rank_buffer_size(buffer_max_size, - model_size) + self._buffer_max_size = self._rank_buffer_size( + buffer_max_size, model_size + ) self._use_grad_storage = buffer_max_size > 0 self._grad_storages = {} # {dtype: {rank: GradStorage}} self._has_grad_storage = [] @@ -116,11 +133,12 @@ class ShardingStage2(nn.Layer): # Offload # TODO(haohongxiang): Now it's not be supported for multi-optimizers using Offload strategy self._offload_optims = list( - filter(lambda optim: optim.offload, self._sharding_optimizers)) + filter(lambda optim: optim.offload, self._sharding_optimizers) + ) if len(self._offload_optims) > 0: - assert len( - self._sharding_optimizers - ) == 1, "Only support offload strategy for single optimizer" + assert ( + len(self._sharding_optimizers) == 1 + ), "Only support offload strategy for single optimizer" self._offload = self._sharding_optimizers[0].offload self._offload_device = "cpu" @@ -163,16 +181,19 @@ class ShardingStage2(nn.Layer): return fw def set_state_dict(self, state_dict, use_structured_name=True): - self._layer.set_state_dict(state_dict, - use_structured_name=use_structured_name) - - def state_dict(self, - destination=None, - include_sublayers=True, - structured_name_prefix=""): - return self._layer.state_dict(destination=None, - include_sublayers=True, - structured_name_prefix="") + self._layer.set_state_dict( + state_dict, use_structured_name=use_structured_name + ) + + def state_dict( + self, + destination=None, + include_sublayers=True, + structured_name_prefix="", + ): + return self._layer.state_dict( + destination=None, include_sublayers=True, structured_name_prefix="" + ) def _clear_gradients(self): """ @@ -180,8 +201,10 @@ class ShardingStage2(nn.Layer): """ # Release grad storages for dtype in self._grad_storages.keys(): - if not self._offload and self._rank in self._grad_storages[ - dtype].keys(): + if ( + not self._offload + and self._rank in self._grad_storages[dtype].keys() + ): self._grad_storages[dtype][self._rank].buffer.zero_() # Release grads of params @@ -199,10 +222,13 @@ class ShardingStage2(nn.Layer): """ # Scale grad storages for dtype in self._grad_storages.keys(): - if not self._offload and self._rank in self._grad_storages[ - dtype].keys(): + if ( + not self._offload + and self._rank in self._grad_storages[dtype].keys() + ): self._grad_storages[dtype][self._rank].buffer.scale_( - scale=self._world_size_scaling) + scale=self._world_size_scaling + ) # Scale grads of params for param in self._trainable_params: @@ -213,7 +239,8 @@ class ShardingStage2(nn.Layer): # Scale grads of master params with offload strategy if self._offload: self._sharding_optimizers[0]._offload_scale_grad( - self._world_size_scaling) + self._world_size_scaling + ) def _init_internal_storage(self, needs_fresh): """ @@ -232,7 +259,9 @@ class ShardingStage2(nn.Layer): Synchronously or asynchronously convert the data type of the layer, the device is not supported now. """ assert isinstance(device, str), "Device must be type str" - assert device == self._default_device, "New devices are not supported, because of the optimizer state is not sync" + assert ( + device == self._default_device + ), "New devices are not supported, because of the optimizer state is not sync" self._layer.to(device=device, dtype=dtype, blocking=blocking) @@ -240,14 +269,15 @@ class ShardingStage2(nn.Layer): self._fresh_trainable() def _fresh_trainable(self): - """ Whether to update training parameters. """ + """Whether to update training parameters.""" # Make sure that this is not done while gradients are waiting to be reduced (if no_sync context for instance) if reduce(lambda x, y: x or y, self._grad_reduced, False): logging.warning("Grads waiting to be reduced.") self._trainable_params = list( - filter(lambda x: x.trainable, self._all_params)) + filter(lambda x: x.trainable, self._all_params) + ) self._trainable_params.sort(key=lambda x: np.prod(x.shape)) self._trainable_param2rank = {} @@ -257,14 +287,19 @@ class ShardingStage2(nn.Layer): optim.update_opt_status() # Get the parameters split by the optimizer according to rank - for per_rank_params in optim.dtype_rank_params.values( + for ( + per_rank_params + ) in ( + optim.dtype_rank_params.values() ): # all the params from all ranks for params in per_rank_params: for param in filter(lambda x: x.trainable, params): self._trainable_param2rank[ - param.name] = optim.param2rank[param.name] + param.name + ] = optim.param2rank[param.name] self._trainable_param2align[ - param.name] = optim._param2align[param.name] + param.name + ] = optim._param2align[param.name] self._setup_use_grad_storage() @@ -278,10 +313,9 @@ class ShardingStage2(nn.Layer): """ for buffer in self._layer.buffers(include_sublayers=True): - dist.broadcast(buffer, - self._global_root_rank, - self._group, - sync_op=True) + dist.broadcast( + buffer, self._global_root_rank, self._group, sync_op=True + ) # Multi stream operation will be supported later dist.wait(tensor=buffer, group=self._group, use_calc_stream=True) @@ -315,7 +349,9 @@ class ShardingStage2(nn.Layer): def reduce(*_): # Skip gradient reduction, do not change status information if self._grad_reduced[index]: - assert param.grad is not None, "Parameter gradient cannot be None" + assert ( + param.grad is not None + ), "Parameter gradient cannot be None" # Change reduce information self._grad_reduced[index] = False @@ -327,22 +363,29 @@ class ShardingStage2(nn.Layer): elif self._offload: self._sharding_optimizers[0]._offload_acc_grad( param.name, - param.grad.cast(dtype=Type.fp32.value).cpu()) + param.grad.cast(dtype=Type.fp32.value).cpu(), + ) param.clear_gradient(False) # Synchronize the reduce parameter gradient self._tasks_flow.append( - Taskflow(task=dist.reduce( - tensor=param.grad, - dst=self._group.ranks[dst_rank], - group=self._group, - sync_op=True), - callback=cleanup)) + Taskflow( + task=dist.reduce( + tensor=param.grad, + dst=self._group.ranks[dst_rank], + group=self._group, + sync_op=True, + ), + callback=cleanup, + ) + ) # Multi stream operation will be supported later - dist.wait(tensor=param.grad, - group=self._group, - use_calc_stream=True) + dist.wait( + tensor=param.grad, + group=self._group, + use_calc_stream=True, + ) # Clear the task flow and trigger callback to clear the redundant gradient self._clear_task_flow() @@ -353,7 +396,9 @@ class ShardingStage2(nn.Layer): def reduce(*_): # Skip gradient reduction, do not change status information if self._grad_reduced[index]: - assert param.grad is not None, "Parameter gradient cannot be None" + assert ( + param.grad is not None + ), "Parameter gradient cannot be None" # Change reduce information self._grad_reduced[index] = False @@ -370,35 +415,43 @@ class ShardingStage2(nn.Layer): p.clear_gradient(False) p._gradient_set_empty(False) - grad_storage.buffer.value().get_tensor()._clear( - ) + grad_storage.buffer.value().get_tensor()._clear() elif self._offload: grad_storage.to(device=self._offload_device) for p in grad_storage._params: self._sharding_optimizers[ - 0]._offload_acc_grad( - p.name, - p.grad.cast(dtype=Type.fp32.value)) + 0 + ]._offload_acc_grad( + p.name, + p.grad.cast(dtype=Type.fp32.value), + ) p.clear_gradient(False) p._gradient_set_empty(False) grad_storage._device = self._default_device - grad_storage.buffer.value().get_tensor()._clear( - ) + grad_storage.buffer.value().get_tensor()._clear() # Reduce the bucket grad_storage.sent = True self._tasks_flow.append( - Taskflow(task=dist.reduce( - tensor=grad_storage.buffer, - dst=self._group.ranks[grad_storage.destination], - group=self._group, - sync_op=True), - callback=cleanup)) + Taskflow( + task=dist.reduce( + tensor=grad_storage.buffer, + dst=self._group.ranks[ + grad_storage.destination + ], + group=self._group, + sync_op=True, + ), + callback=cleanup, + ) + ) # Multi stream operation will be supported later - dist.wait(tensor=grad_storage.buffer, - group=self._group, - use_calc_stream=True) + dist.wait( + tensor=grad_storage.buffer, + group=self._group, + use_calc_stream=True, + ) # Clear the task flow and trigger callback to clear the redundant gradient self._clear_task_flow() @@ -424,7 +477,8 @@ class ShardingStage2(nn.Layer): reduce_function = self._get_reduce_fn(index, param, dst_rank) self._bw_hooks.append( - param._register_backward_hook(reduce_function)) + param._register_backward_hook(reduce_function) + ) def _setup_use_grad_storage(self): """ @@ -447,27 +501,36 @@ class ShardingStage2(nn.Layer): dtype=param.dtype, device=self._default_device, destination=dst_rank, - parm2align=self._trainable_param2align) + parm2align=self._trainable_param2align, + ) # Criteria to decide whether this parameter is to be put in GradStorage if self._grad_storages[param.dtype][dst_rank].can_add_grad_view( - param, self._trainable_param2align[param.name]): + param, self._trainable_param2align[param.name] + ): self._grad_storages[param.dtype][dst_rank].add_grad( - param, self._trainable_param2align[param.name]) + param, self._trainable_param2align[param.name] + ) self._has_grad_storage[index] = True else: self._param_grads.append(param.name) print( - "Can not add param: {}, param's shape: {}, param align: {}, grad_storages fill: {}, " - .format(param.name, param.shape, - self._trainable_param2align[param.name], - self._grad_storages[param.dtype][dst_rank]._fill)) + "Can not add param: {}, param's shape: {}, param align: {}, grad_storages fill: {}, ".format( + param.name, + param.shape, + self._trainable_param2align[param.name], + self._grad_storages[param.dtype][dst_rank]._fill, + ) + ) self._grad_storage_list = list( - chain(*[ - self._grad_storages[dtype].values() - for dtype in self._grad_storages.keys() - ])) + chain( + *[ + self._grad_storages[dtype].values() + for dtype in self._grad_storages.keys() + ] + ) + ) def _clear_task_flow(self): """Try to consume the previous tasks.""" @@ -518,15 +581,19 @@ class ShardingStage2(nn.Layer): if Type.fp16.value in rank_buffer_size.keys(): # FP16 GradStorage and model size print( - "====== FP16 GradStorage size: {:.2f}M parameters, Model size {:.2f}M parameters ======" - .format(rank_buffer_size[Type.fp16.value] / 2**19, - model_size / 2**19)) + "====== FP16 GradStorage size: {:.2f}M parameters, Model size {:.2f}M parameters ======".format( + rank_buffer_size[Type.fp16.value] / 2**19, + model_size / 2**19, + ) + ) if Type.fp32.value in rank_buffer_size.keys(): # FP32 GradStorage and model size print( - "====== FP32 GradStorage size: {:.2f}M parameters, Model size {:.2f}M parameters ======" - .format(rank_buffer_size[Type.fp32.value] / 2**18, - model_size / 2**18)) + "====== FP32 GradStorage size: {:.2f}M parameters, Model size {:.2f}M parameters ======".format( + rank_buffer_size[Type.fp32.value] / 2**18, + model_size / 2**18, + ) + ) return rank_buffer_size def _redefine_opt_step(self): diff --git a/python/paddle/distributed/fleet/meta_parallel/sharding/sharding_stage3.py b/python/paddle/distributed/fleet/meta_parallel/sharding/sharding_stage3.py index 02e701e8990db16774e7b310da30a183c4c0459b..deae0cddd291c01db52942a3c03c25135f1e0ba2 100644 --- a/python/paddle/distributed/fleet/meta_parallel/sharding/sharding_stage3.py +++ b/python/paddle/distributed/fleet/meta_parallel/sharding/sharding_stage3.py @@ -59,16 +59,18 @@ class ShardingStage3(nn.Layer): # 3. Support offload function. # 4. Support the establishment of independent communication groups. - def __init__(self, - layer, - optimizer, - group=None, - sync_buffers=False, - device="gpu", - segment_size=2**15, - pertrain_sync_models=True, - offload=False, - sync_comm=False): + def __init__( + self, + layer, + optimizer, + group=None, + sync_buffers=False, + device="gpu", + segment_size=2**15, + pertrain_sync_models=True, + offload=False, + sync_comm=False, + ): super().__init__() # Default configs @@ -83,28 +85,41 @@ class ShardingStage3(nn.Layer): self._segment_size = segment_size global DEV - DEV = "cpu" if paddle.get_device() == "cpu" else paddle.get_device( - ).split(":")[0] + DEV = ( + "cpu" + if paddle.get_device() == "cpu" + else paddle.get_device().split(":")[0] + ) global DEV_ID - DEV_ID = 0 if paddle.get_device() == "cpu" else int( - paddle.get_device().split(":")[1]) + DEV_ID = ( + 0 + if paddle.get_device() == "cpu" + else int(paddle.get_device().split(":")[1]) + ) global param2dtype param2dtype = dict() # Communication group establishment - self._group = dist.new_group( - _get_global_group().ranks) if group is None else group + self._group = ( + dist.new_group(_get_global_group().ranks) + if group is None + else group + ) self._world_size_scaling = 1.0 / self._group.nranks - assert self._group.nranks > 1, "Training must be distributed, ranks must be greater than 1." + assert ( + self._group.nranks > 1 + ), "Training must be distributed, ranks must be greater than 1." self._rank = self._group.rank self._global_root_rank = self._group.ranks[ - 0] # picking rank 0 as the reference + 0 + ] # picking rank 0 as the reference self._global_ranks = self._group.ranks # Parameter segmentation for global ranks # After flatten -> self._param2buffer_size, self._param2buffer, self._trainable_params self._param2buffer_size = dict() # {param.name: size} - self._param2buffer = dict( + self._param2buffer = ( + dict() ) # {param.name: [(start0, end0),(start1, end1), ...]} self._trainable_params = dict() # {id(layer): [trainable_params]} self._unslice_params = set() # param's numel <= segment_size @@ -112,9 +127,11 @@ class ShardingStage3(nn.Layer): self._grad_storages = dict() # {param.dtype: GradStorage} assert not isinstance( - optimizer, list), "Multiple optimizers are not supported now." - self._optim = _OptimizerWrapper(optimizer, self._offload, self._group, - self._update_params_slice) + optimizer, list + ), "Multiple optimizers are not supported now." + self._optim = _OptimizerWrapper( + optimizer, self._offload, self._group, self._update_params_slice + ) self._ori_parameter_list = self._optim._parameter_list self._ori_param_groups = self._optim._param_groups @@ -123,9 +140,9 @@ class ShardingStage3(nn.Layer): logging.warning( "While using ClipGradByGlobalNorm in ShardingStage3, the grad clip of original optimizer will be changed." ) - self._optim._grad_clip = ShardingClipGrad(self._optim._grad_clip, - paddle.get_device(), - self._group) + self._optim._grad_clip = ShardingClipGrad( + self._optim._grad_clip, paddle.get_device(), self._group + ) # Synchronous all ranks models if pertrain_sync_models: @@ -161,10 +178,9 @@ class ShardingStage3(nn.Layer): """ for p in self._layer.parameters(): - dist.broadcast(p, - src=self._global_root_rank, - group=self._group, - sync_op=True) + dist.broadcast( + p, src=self._global_root_rank, group=self._group, sync_op=True + ) # Multi stream operation will be supported later dist.wait(tensor=p, group=self._group, use_calc_stream=True) @@ -174,12 +190,15 @@ class ShardingStage3(nn.Layer): current_layer_params = self._layer.parameters(include_sublayers=True) # 1.Handle param's slice trainable_params = list( - filter(lambda p: p.trainable and p not in self._unslice_params, - current_layer_params)) + filter( + lambda p: p.trainable and p not in self._unslice_params, + current_layer_params, + ) + ) for param in trainable_params: - assert hasattr(param, "fw_storage" - ), "Find {} don't have fw_storage attribute.".format( - param.name) + assert hasattr( + param, "fw_storage" + ), "Find {} don't have fw_storage attribute.".format(param.name) param.fw_storage.clear_gradient(False) param.fw_storage._gradient_set_empty(False) @@ -195,8 +214,10 @@ class ShardingStage3(nn.Layer): param._gradient_set_empty(False) tmp_var = param.cuda(DEV_ID) - if tmp_var.dtype == Type.fp32.value and param2dtype[ - param.name] == Type.fp16.value: + if ( + tmp_var.dtype == Type.fp32.value + and param2dtype[param.name] == Type.fp16.value + ): tmp_var = paddle.cast(tmp_var, Type.fp16.value) tmp_var._share_buffer_to(param) tmp_var._clear() @@ -211,9 +232,11 @@ class ShardingStage3(nn.Layer): if not isinstance(self._optim._param_groups[0], dict): slice_params = [param.fw_storage for param in update_list] self._optim._parameter_list = slice_params + list( - self._unslice_params) + self._unslice_params + ) self._optim._param_groups = slice_params + list( - self._unslice_params) + self._unslice_params + ) else: for param_group in self._optim._param_groups: p_group = [] @@ -239,16 +262,19 @@ class ShardingStage3(nn.Layer): return fw def set_state_dict(self, state_dict, use_structured_name=True): - self._layer.set_state_dict(state_dict, - use_structured_name=use_structured_name) - - def state_dict(self, - destination=None, - include_sublayers=True, - structured_name_prefix=""): - return self._layer.state_dict(destination=None, - include_sublayers=True, - structured_name_prefix="") + self._layer.set_state_dict( + state_dict, use_structured_name=use_structured_name + ) + + def state_dict( + self, + destination=None, + include_sublayers=True, + structured_name_prefix="", + ): + return self._layer.state_dict( + destination=None, include_sublayers=True, structured_name_prefix="" + ) def _handle_unslice_params(self): buffer_size = dict() @@ -258,7 +284,8 @@ class ShardingStage3(nn.Layer): # Updata optimizer master weights if param.dtype == Type.fp16.value and not self._offload: self._optim._master_weights[param.name] = paddle.cast( - param, Type.fp32.value) + param, Type.fp32.value + ) if self._offload: param.master_weight = paddle.cast(param, Type.fp32.value).cpu() param2dtype[param.name] = param.dtype @@ -274,9 +301,11 @@ class ShardingStage3(nn.Layer): dtype=param.dtype, device=self._default_device, destination=self._rank, - parm2align=self._unslice_params2align) + parm2align=self._unslice_params2align, + ) self._grad_storages[param.dtype].add_grad( - param, self._unslice_params2align[param.name]) + param, self._unslice_params2align[param.name] + ) def _segment_rank_params(self, layer, name="last_layer"): """ @@ -316,8 +345,11 @@ class ShardingStage3(nn.Layer): align_ = self._param2align(param) offset = align_ + param._numel() - buffer_size = offset if offset % self._group.nranks == 0 else offset + self._group.nranks - ( - offset % self._group.nranks) + buffer_size = ( + offset + if offset % self._group.nranks == 0 + else offset + self._group.nranks - (offset % self._group.nranks) + ) self._param2buffer_size[param.name] = buffer_size # 2.Combination param buffer @@ -326,7 +358,8 @@ class ShardingStage3(nn.Layer): for rank_ in range(self._group.nranks): self._param2buffer[param.name].append( - (rank_ * pre_buffer, (rank_ + 1) * pre_buffer)) + (rank_ * pre_buffer, (rank_ + 1) * pre_buffer) + ) # Record param's dtype param2dtype[param.name] = param.dtype @@ -339,10 +372,11 @@ class ShardingStage3(nn.Layer): This is a function to simplify the handling of parameter InternalStorages. """ assert isinstance(buffer_size, int) - value = np.zeros( - buffer_size, - dtype=np.float16) if Type.fp16.value == param.dtype else np.zeros( - buffer_size, dtype=np.float32) + value = ( + np.zeros(buffer_size, dtype=np.float16) + if Type.fp16.value == param.dtype + else np.zeros(buffer_size, dtype=np.float32) + ) buffer = core.VarBase(value=value, place=core.CPUPlace()) param_shape = param.shape @@ -353,30 +387,37 @@ class ShardingStage3(nn.Layer): start, end = self._param2buffer[param.name][self._rank] # Copy the current param value - tmp_var = core.VarBase(tensor=buffer._slice(0, param._numel()), - place=core.CPUPlace()) + tmp_var = core.VarBase( + tensor=buffer._slice(0, param._numel()), place=core.CPUPlace() + ) param_cpu = param.cpu() - tmp_var.value().get_tensor().set(param_cpu.value().get_tensor(), - core.CPUPlace()) + tmp_var.value().get_tensor().set( + param_cpu.value().get_tensor(), core.CPUPlace() + ) param.value().get_tensor()._set_dims(param_shape) # Current rank param_storage if self._offload: - param.fw_storage = core.VarBase(buffer._slice(start, end), - core.CPUPlace(), - "slice@" + param.name) + param.fw_storage = core.VarBase( + buffer._slice(start, end), + core.CPUPlace(), + "slice@" + param.name, + ) with device_guard(device="cpu"): - param.master_weight = paddle.cast(param.fw_storage, - Type.fp32.value) + param.master_weight = paddle.cast( + param.fw_storage, Type.fp32.value + ) else: - param.fw_storage = core.VarBase(buffer._slice(start, end), - "slice@" + param.name) + param.fw_storage = core.VarBase( + buffer._slice(start, end), "slice@" + param.name + ) param.status = "part" # Updata optimizer master weights if param.dtype == Type.fp16.value and not self._offload: self._optim._master_weights[param.fw_storage.name] = paddle.cast( - param.fw_storage, Type.fp32.value) + param.fw_storage, Type.fp32.value + ) param._clear() def _register_forward_hooks(self, layer): @@ -398,20 +439,33 @@ class ShardingStage3(nn.Layer): self._register_forward_hooks(sub_layer) def _register_forward_all_hooks(self, sub_layer, task_flow): - def _forward_pre_hook(layer, inputs): - return ForwardPreHooks(layer, self._order_tracer, - self._trainable_params, self._param2buffer, - self._rank, self._group, self._sync_comm, - self._offload, task_flow) + return ForwardPreHooks( + layer, + self._order_tracer, + self._trainable_params, + self._param2buffer, + self._rank, + self._group, + self._sync_comm, + self._offload, + task_flow, + ) def _forward_post_hook(layer, inputs, outputs): - return ForwardPostHooks.apply(outputs, layer, self._order_tracer, - self._trainable_params, - self._param2buffer, - self._param2buffer_size, self._rank, - self._group, self._sync_comm, - self._offload, task_flow) + return ForwardPostHooks.apply( + outputs, + layer, + self._order_tracer, + self._trainable_params, + self._param2buffer, + self._param2buffer_size, + self._rank, + self._group, + self._sync_comm, + self._offload, + task_flow, + ) # register previous forward hooks sub_layer.register_forward_pre_hook(_forward_pre_hook) @@ -426,10 +480,9 @@ class ShardingStage3(nn.Layer): """ for buffer in self._layer.buffers(include_sublayers=True): - dist.broadcast(buffer, - self._global_root_rank, - self._group, - sync_op=True) + dist.broadcast( + buffer, self._global_root_rank, self._group, sync_op=True + ) # Multi stream operation will be supported later dist.wait(tensor=buffer, group=self._group, use_calc_stream=True) @@ -448,14 +501,16 @@ class ShardingStage3(nn.Layer): assert len(self._trainable_params.keys()) > 0 current_layer_params = self._layer.parameters(include_sublayers=True) trainable_params = list( - filter(lambda p: p.trainable and p not in self._unslice_params, - current_layer_params)) + filter( + lambda p: p.trainable and p not in self._unslice_params, + current_layer_params, + ) + ) # 1.Handle param's slice for param in trainable_params: assert hasattr( - param, - "fw_storage"), "Find {} don't have fw_storage attribute".format( - param.name) + param, "fw_storage" + ), "Find {} don't have fw_storage attribute".format(param.name) # Gradient average if self._offload: with device_guard(device="cpu"): @@ -470,12 +525,14 @@ class ShardingStage3(nn.Layer): # 2.Handle unslice param for grad_storage in self._grad_storages.values(): grad_storage.buffer.scale_(scale=self._world_size_scaling) - dist.all_reduce(tensor=grad_storage.buffer, - group=self._group, - sync_op=True) - dist.wait(tensor=grad_storage.buffer, - group=self._group, - use_calc_stream=True) + dist.all_reduce( + tensor=grad_storage.buffer, group=self._group, sync_op=True + ) + dist.wait( + tensor=grad_storage.buffer, + group=self._group, + use_calc_stream=True, + ) if self._offload: for param in list(self._unslice_params): @@ -500,15 +557,20 @@ class ShardingStage3(nn.Layer): assert len(self._trainable_params.keys()) > 0 current_layer_params = self._layer.parameters(include_sublayers=True) trainable_params = list( - filter(lambda p: p.trainable and p not in self._unslice_params, - current_layer_params)) - t_flow = _allgather_buffer(trainable_params, - self._group, - use_calc_stream=True, - task_flow=TaskFlow(), - sync_wait=True, - offload=self._offload, - convert2cpu=convert2cpu) + filter( + lambda p: p.trainable and p not in self._unslice_params, + current_layer_params, + ) + ) + t_flow = _allgather_buffer( + trainable_params, + self._group, + use_calc_stream=True, + task_flow=TaskFlow(), + sync_wait=True, + offload=self._offload, + convert2cpu=convert2cpu, + ) if convert2cpu: for param in trainable_params: t_flow.full_param[param.name]._share_buffer_to(param) @@ -519,49 +581,60 @@ class ShardingStage3(nn.Layer): def _register_backward_hooks(self): current_layer_params = self._layer.parameters(include_sublayers=True) trainable_params = list( - filter(lambda p: p.trainable and p not in self._unslice_params, - current_layer_params)) + filter( + lambda p: p.trainable and p not in self._unslice_params, + current_layer_params, + ) + ) for param in trainable_params: allreduce_function = self._get_allreduce_fn(param) param._register_backward_hook(allreduce_function) def _get_allreduce_fn(self, param): - @paddle.autograd.no_grad() def allreduce_(*_): if param.name in self._task_flow.full_grad.keys(): full_grad = self._task_flow.full_grad[param.name] # Only support sync allreduce current rank's layer now - dist.all_reduce(tensor=full_grad, - group=self._group, - sync_op=True) - dist.wait(tensor=full_grad, - group=self._group, - use_calc_stream=True) + dist.all_reduce( + tensor=full_grad, group=self._group, sync_op=True + ) + dist.wait( + tensor=full_grad, group=self._group, use_calc_stream=True + ) start, end = self._param2buffer[param.name][self._rank] if param.bw_storage is None: - param.bw_storage = core.VarBase(full_grad._slice( - start, end)).detach().clone() + param.bw_storage = ( + core.VarBase(full_grad._slice(start, end)) + .detach() + .clone() + ) if self._offload: param.bw_storage = _device2cpu(param.bw_storage, True) else: if self._offload: cpu_grad = _device2cpu( - core.VarBase(full_grad._slice( - start, end)).detach().clone(), True) + core.VarBase(full_grad._slice(start, end)) + .detach() + .clone(), + True, + ) with device_guard(device="cpu"): param.bw_storage = paddle.add( - param.bw_storage, cpu_grad) + param.bw_storage, cpu_grad + ) else: # param.bw_storage.add_( # core.VarBase(full_grad._slice(start, end)) # .detach().clone()) param.bw_storage = paddle.add( param.bw_storage, - core.VarBase(full_grad._slice( - start, end)).detach().clone()) + core.VarBase(full_grad._slice(start, end)) + .detach() + .clone(), + ) param.clear_gradient(False) param._gradient_set_empty(False) tmp_var = self._task_flow.full_grad.pop(param.name) @@ -572,10 +645,16 @@ class ShardingStage3(nn.Layer): param.use_count = 0 param._clear() start, end = self._param2buffer[param.name][self._rank] - param.fw_storage = core.VarBase( - self._task_flow.full_param[param.name]._slice( - start, end), - param.name + "@slice").detach().clone() + param.fw_storage = ( + core.VarBase( + self._task_flow.full_param[param.name]._slice( + start, end + ), + param.name + "@slice", + ) + .detach() + .clone() + ) param.status = "part" tmp_var = self._task_flow.full_param.pop(param.name) tmp_var._clear() @@ -590,8 +669,9 @@ class ShardingStage3(nn.Layer): # CUDA alignment 256 bytes size = param._numel() * align[param.dtype] remaining = size % alignment[self._default_device] - ali = 0 if remaining == 0 else alignment[ - self._default_device] - remaining + ali = ( + 0 if remaining == 0 else alignment[self._default_device] - remaining + ) align_ = ali // align[param.dtype] return align_ @@ -625,8 +705,17 @@ class ShardingStage3(nn.Layer): self._optim.clear_grad = MethodType(_opt_clear, self._optim) -def ForwardPreHooks(layer, order_tracer, trainable_params, param2buffer, rank, - group, sync_comm, offload, task_flow): +def ForwardPreHooks( + layer, + order_tracer, + trainable_params, + param2buffer, + rank, + group, + sync_comm, + offload, + task_flow, +): # Record layer's id layer_id = id(layer) @@ -641,34 +730,49 @@ def ForwardPreHooks(layer, order_tracer, trainable_params, param2buffer, rank, # Whether to use calc stream task_flow.use_calc[layer_id] = use_calc # wait current layer params - _wait_layer(trainable_params[layer_id], task_flow, group, use_calc, - offload) + _wait_layer( + trainable_params[layer_id], task_flow, group, use_calc, offload + ) - if layer_id == order_tracer["layer"][-1]: return + if layer_id == order_tracer["layer"][-1]: + return order_ = order_tracer[layer_id] layer_id = order_tracer["layer"][order_ + 1] - _allgather_buffer(trainable_params[layer_id], - group, - use_calc_stream=use_calc, - task_flow=task_flow, - sync_wait=sync_wait, - offload=offload) + _allgather_buffer( + trainable_params[layer_id], + group, + use_calc_stream=use_calc, + task_flow=task_flow, + sync_wait=sync_wait, + offload=offload, + ) return class ForwardPostHooks(PyLayer): - @staticmethod - def forward(ctx, inputs, layer, order_tracer, trainable_params, - param2buffer, param2buffer_size, rank, group, sync_comm, - offload, task_flow): + def forward( + ctx, + inputs, + layer, + order_tracer, + trainable_params, + param2buffer, + param2buffer_size, + rank, + group, + sync_comm, + offload, + task_flow, + ): layer_id = id(layer) # release current layer full params - _release_param(trainable_params[layer_id], param2buffer, rank, - task_flow, offload) + _release_param( + trainable_params[layer_id], param2buffer, rank, task_flow, offload + ) if layer_id not in order_tracer.keys(): order_ = order_tracer["order"] @@ -676,7 +780,7 @@ class ForwardPostHooks(PyLayer): order_tracer["order"] += 1 order_tracer["layer"].append(layer_id) - #Record bw info + # Record bw info ctx.order_tracer = order_tracer ctx.task_flow = task_flow ctx.group = group @@ -705,30 +809,36 @@ class ForwardPostHooks(PyLayer): # Allgather params synchronization if sync_comm: use_calc, sync_wait = True, True - _allgather_buffer(trainable_params[layer_id], - group, - use_calc_stream=use_calc, - task_flow=task_flow, - sync_wait=sync_wait, - offload=offload) + _allgather_buffer( + trainable_params[layer_id], + group, + use_calc_stream=use_calc, + task_flow=task_flow, + sync_wait=sync_wait, + offload=offload, + ) else: - _wait_layer(trainable_params[layer_id], task_flow, group, use_calc, - offload) + _wait_layer( + trainable_params[layer_id], task_flow, group, use_calc, offload + ) # Create params's grad - _create_params_grad(trainable_params[layer_id], param2buffer_size, - task_flow) + _create_params_grad( + trainable_params[layer_id], param2buffer_size, task_flow + ) # Whether to use calc stream task_flow.use_calc[layer_id] = use_calc if layer_id != order_tracer["layer"][0] and not sync_comm: layer_next_id = order_tracer["layer"][order_tracer[layer_id] - 1] - _allgather_buffer(trainable_params[layer_next_id], - group, - use_calc_stream=use_calc, - task_flow=task_flow, - sync_wait=sync_wait, - offload=offload) + _allgather_buffer( + trainable_params[layer_next_id], + group, + use_calc_stream=use_calc, + task_flow=task_flow, + sync_wait=sync_wait, + offload=offload, + ) return args @@ -738,22 +848,22 @@ class TaskFlow: Task flows, one way linked list for task acquisition. """ - def __init__(self, - full_param=dict(), - full_grad=dict(), - use_calc=dict(), - callback=None): + def __init__( + self, + full_param=dict(), + full_grad=dict(), + use_calc=dict(), + callback=None, + ): self.full_param = full_param self.full_grad = full_grad self.use_calc = use_calc self.callback = callback -def _release_param(trainable_params, - param2buffer, - rank, - task_flow, - offload=False): +def _release_param( + trainable_params, param2buffer, rank, task_flow, offload=False +): for param in trainable_params: # async communicate share weight not clear param.use_count -= 1 @@ -762,9 +872,14 @@ def _release_param(trainable_params, if param.name in task_flow.full_param.keys(): start, end = param2buffer[param.name][rank] with paddle.amp.auto_cast(enable=False): - param.fw_storage = core.VarBase( - task_flow.full_param[param.name]._slice(start, end), - param.name + "@slice").detach().clone() + param.fw_storage = ( + core.VarBase( + task_flow.full_param[param.name]._slice(start, end), + param.name + "@slice", + ) + .detach() + .clone() + ) param.status = "part" tmp_var = task_flow.full_param.pop(param.name) tmp_var._clear() @@ -774,11 +889,9 @@ def _release_param(trainable_params, return -def _wait_layer(trainable_params, - task_flow, - group, - use_calc_stream, - offload=False): +def _wait_layer( + trainable_params, task_flow, group, use_calc_stream, offload=False +): paddle.device.cuda.synchronize() for param in trainable_params: if param.status == "all": @@ -786,30 +899,35 @@ def _wait_layer(trainable_params, continue if param.name in task_flow.full_param.keys(): full_param = task_flow.full_param[param.name] - core.VarBase(full_param._slice( - 0, param._numel()))._share_buffer_to(param) + core.VarBase(full_param._slice(0, param._numel()))._share_buffer_to( + param + ) param.fw_storage._clear() param.fw_storage = None param.status = "all" param.use_count += 1 else: - _allgather_buffer(trainable_params, - group, - use_calc_stream=True, - task_flow=task_flow, - sync_wait=True, - offload=offload) + _allgather_buffer( + trainable_params, + group, + use_calc_stream=True, + task_flow=task_flow, + sync_wait=True, + offload=offload, + ) break return task_flow -def _allgather_buffer(trainable_params, - group, - use_calc_stream, - task_flow, - sync_wait=False, - offload=False, - convert2cpu=False): +def _allgather_buffer( + trainable_params, + group, + use_calc_stream, + task_flow, + sync_wait=False, + offload=False, + convert2cpu=False, +): for param in trainable_params: if param.status == "all": @@ -820,18 +938,21 @@ def _allgather_buffer(trainable_params, param.fw_storage = _cpu2device(param) with paddle.amp.auto_cast(enable=False): - full_param = _all_gather(param.fw_storage, - group, - use_calc_stream=use_calc_stream) + full_param = _all_gather( + param.fw_storage, group, use_calc_stream=use_calc_stream + ) # Allgather current layer in the 1st step synchronously if sync_wait: with paddle.amp.auto_cast(enable=False): - dist.wait(tensor=full_param, - group=group, - use_calc_stream=use_calc_stream) - core.VarBase(full_param._slice( - 0, param._numel()))._share_buffer_to(param) + dist.wait( + tensor=full_param, + group=group, + use_calc_stream=use_calc_stream, + ) + core.VarBase(full_param._slice(0, param._numel()))._share_buffer_to( + param + ) param.fw_storage._clear() param.fw_storage = None param.status = "all" @@ -855,10 +976,12 @@ def _create_params_grad(trainable_params, param2buffer_size, task_flow): if param.name in task_flow.full_grad.keys(): continue assert isinstance(param2buffer_size[param.name], int) - temp_grad = paddle.zeros([param2buffer_size[param.name]], - dtype=param.dtype) + temp_grad = paddle.zeros( + [param2buffer_size[param.name]], dtype=param.dtype + ) param._copy_gradient_from( - core.VarBase(temp_grad._slice(0, param._numel()))) + core.VarBase(temp_grad._slice(0, param._numel())) + ) task_flow.full_grad[param.name] = temp_grad return task_flow @@ -882,13 +1005,14 @@ def _UnsliceParam(param): def _VarBaseWrapper(param): varbase = param.fw_storage - tmp_param = ParamBase(shape=varbase.shape, - dtype=varbase.dtype, - name="slice@" + param.name) + tmp_param = ParamBase( + shape=varbase.shape, dtype=varbase.dtype, name="slice@" + param.name + ) varbase._share_buffer_to(tmp_param) tmp_param.regularizer = param.regularizer tmp_param.optimize_attr['learning_rate'] = param.optimize_attr[ - 'learning_rate'] + 'learning_rate' + ] varbase._clear() return tmp_param @@ -913,14 +1037,17 @@ def _device2cpu(trans_param, convert_dtype=False): def _cpu2device(param): tmp_p = param.fw_storage.cuda(DEV_ID) - if tmp_p.dtype == Type.fp32.value and param2dtype[ - param.name] == Type.fp16.value: + if ( + tmp_p.dtype == Type.fp32.value + and param2dtype[param.name] == Type.fp16.value + ): tmp_p = paddle.cast(tmp_p, Type.fp16.value) return tmp_p def _current_layer_params(layer): - return layer.parameters( - include_sublayers=False) + list(layer.extra_parameters) if hasattr( - layer, "extra_parameters") else layer.parameters( - include_sublayers=False) + return ( + layer.parameters(include_sublayers=False) + list(layer.extra_parameters) + if hasattr(layer, "extra_parameters") + else layer.parameters(include_sublayers=False) + ) diff --git a/python/paddle/distributed/fleet/meta_parallel/sharding/sharding_utils.py b/python/paddle/distributed/fleet/meta_parallel/sharding/sharding_utils.py index 0e7725e3e21f875ea1defa657e93198cd72e324b..07cf159c3e66fd2ce8f4d4aae45bcc9c4e543433 100644 --- a/python/paddle/distributed/fleet/meta_parallel/sharding/sharding_utils.py +++ b/python/paddle/distributed/fleet/meta_parallel/sharding/sharding_utils.py @@ -40,13 +40,13 @@ class Type(Enum): """ Type of trainable parameters """ + fp16 = paddle.float16 bf16 = paddle.bfloat16 fp32 = paddle.float32 class ShardingClipGrad: - def __init__(self, clip, device, group): self._clip = clip self._device = device @@ -67,49 +67,64 @@ class ShardingClipGrad: merge_grad = g if g.type == core.VarDesc.VarType.SELECTED_ROWS: merge_grad = layers.get_tensor_from_selected_rows( - layers.merge_selected_rows(g)) + layers.merge_selected_rows(g) + ) square = layers.square(merge_grad) sum_square = layers.reduce_sum(square) if p.dtype == paddle.float16: - if p_slice: sum_square_fp16.append(sum_square) - else: unslice_params_fp16.append(sum_square) + if p_slice: + sum_square_fp16.append(sum_square) + else: + unslice_params_fp16.append(sum_square) elif p.dtype == paddle.float32: - if p_slice: sum_square_fp32.append(sum_square) - else: unslice_params_fp32.append(sum_square) + if p_slice: + sum_square_fp32.append(sum_square) + else: + unslice_params_fp32.append(sum_square) # global norm of non-distributed FP16 params_and_grads if len(sum_square_fp16) == 0: - global_norm_fp16 = paddle.to_tensor([0.], dtype=paddle.float32) + global_norm_fp16 = paddle.to_tensor([0.0], dtype=paddle.float32) else: global_norm_fp16 = layers.concat(sum_square_fp16) global_norm_fp16 = layers.reduce_sum(global_norm_fp16) - global_norm_fp16 = paddle.cast(global_norm_fp16, - dtype=paddle.float32) + global_norm_fp16 = paddle.cast( + global_norm_fp16, dtype=paddle.float32 + ) # global norm of non-distributed FP16 params_and_grads for unslice parameter if len(unslice_params_fp16) == 0: - global_unslice_fp16 = paddle.to_tensor([0.], dtype=paddle.float32) + global_unslice_fp16 = paddle.to_tensor([0.0], dtype=paddle.float32) else: global_unslice_fp16 = layers.concat(unslice_params_fp16) global_unslice_fp16 = layers.reduce_sum(global_unslice_fp16) - global_unslice_fp16 = paddle.cast(global_unslice_fp16, - dtype=paddle.float32) + global_unslice_fp16 = paddle.cast( + global_unslice_fp16, dtype=paddle.float32 + ) # global norm of non-distributed FP32 params_and_grads - global_norm_fp32 = layers.concat( - sum_square_fp32) if len(sum_square_fp32) != 0 else paddle.to_tensor( - [0.], dtype=paddle.float32) + global_norm_fp32 = ( + layers.concat(sum_square_fp32) + if len(sum_square_fp32) != 0 + else paddle.to_tensor([0.0], dtype=paddle.float32) + ) global_norm_fp32 = layers.reduce_sum(global_norm_fp32) # global norm of non-distributed FP32 params_and_grads for unslice parameter - global_unslice_fp32 = layers.concat(unslice_params_fp32) if len( - unslice_params_fp32) != 0 else paddle.to_tensor( - [0.], dtype=paddle.float32) + global_unslice_fp32 = ( + layers.concat(unslice_params_fp32) + if len(unslice_params_fp32) != 0 + else paddle.to_tensor([0.0], dtype=paddle.float32) + ) global_unslice_fp32 = layers.reduce_sum(global_unslice_fp32) global_unslice_var = global_unslice_fp16 + global_unslice_fp32 - global_norm_var = global_norm_fp16 + global_norm_fp32 + 1.0 / self._group.nranks * global_unslice_var + global_norm_var = ( + global_norm_fp16 + + global_norm_fp32 + + 1.0 / self._group.nranks * global_unslice_var + ) # add all reduce to get global norm of distributed params_and_grads dev_id = int(self._device.split(":")[1]) @@ -117,14 +132,14 @@ class ShardingClipGrad: paddle.distributed.all_reduce(global_norm_var, group=self._group) global_norm_var = layers.sqrt(global_norm_var) - max_global_norm = layers.fill_constant(shape=[1], - dtype=global_norm_var.dtype, - value=self.clip_norm) - - clip_var = layers.elementwise_div(x=max_global_norm, - y=layers.elementwise_max( - x=global_norm_var, - y=max_global_norm)) + max_global_norm = layers.fill_constant( + shape=[1], dtype=global_norm_var.dtype, value=self.clip_norm + ) + + clip_var = layers.elementwise_div( + x=max_global_norm, + y=layers.elementwise_max(x=global_norm_var, y=max_global_norm), + ) clip_var_fp16 = paddle.cast(clip_var, paddle.float16) for p, g in params_grads: @@ -163,7 +178,6 @@ def device_guard(dev_id=0, device="cpu"): @dygraph_only def ShardingScaler(scaler): - def unscale_method(self, optimizer): if not self._enable: return @@ -175,14 +189,16 @@ def ShardingScaler(scaler): optimizer.update_scaler = True if getattr(optimizer._optim, '_param_groups', None) and isinstance( - optimizer._optim._param_groups[0], dict): + optimizer._optim._param_groups[0], dict + ): for group in optimizer._optim._param_groups: for param in group['params']: if param._grad_ivar() is not None: param_grads.append(param._grad_ivar()) if param._grad_ivar().dtype in [ - core.VarDesc.VarType.FP16, paddle.float16 + core.VarDesc.VarType.FP16, + paddle.float16, ]: param_grads_fp16.append(param._grad_ivar()) else: @@ -192,7 +208,8 @@ def ShardingScaler(scaler): if param.grad is not None: param_grads.append(param.grad) if param.grad.dtype in [ - core.VarDesc.VarType.FP16, paddle.float16 + core.VarDesc.VarType.FP16, + paddle.float16, ]: param_grads_fp16.append(param.grad) else: @@ -202,27 +219,34 @@ def ShardingScaler(scaler): temp_found_inf_fp32 = to_variable(np.array([0]).astype(np.bool_)) device = "cpu" if optimizer.offload else "gpu" - dev_id = 0 if device == "cpu" else int( - paddle.get_device().split(":")[1]) + dev_id = ( + 0 if device == "cpu" else int(paddle.get_device().split(":")[1]) + ) with device_guard(dev_id, device): if len(param_grads_fp16): - _legacy_C_ops.check_finite_and_unscale(param_grads_fp16, - self._scale, - param_grads_fp16, - temp_found_inf_fp16) + _legacy_C_ops.check_finite_and_unscale( + param_grads_fp16, + self._scale, + param_grads_fp16, + temp_found_inf_fp16, + ) if len(param_grads_fp32): - _legacy_C_ops.check_finite_and_unscale(param_grads_fp32, - self._scale, - param_grads_fp32, - temp_found_inf_fp32) + _legacy_C_ops.check_finite_and_unscale( + param_grads_fp32, + self._scale, + param_grads_fp32, + temp_found_inf_fp32, + ) self._found_inf = 1 if temp_found_inf_fp16 or temp_found_inf_fp32 else 0 is_found_inf = paddle.to_tensor([self._found_inf], dtype="int32") - paddle.distributed.all_reduce(is_found_inf, - op=paddle.distributed.ReduceOp.MAX, - group=optimizer.group) + paddle.distributed.all_reduce( + is_found_inf, + op=paddle.distributed.ReduceOp.MAX, + group=optimizer.group, + ) self._found_inf = is_found_inf.numpy()[0] scaler._unscale = MethodType(unscale_method, scaler) diff --git a/python/paddle/distributed/fleet/meta_parallel/sharding_parallel.py b/python/paddle/distributed/fleet/meta_parallel/sharding_parallel.py index fc3f195f7dea59b7b5c6b53c411e0762c31d7e8f..65fd317bd7623304f8ebc2c4804bc1812918668c 100644 --- a/python/paddle/distributed/fleet/meta_parallel/sharding_parallel.py +++ b/python/paddle/distributed/fleet/meta_parallel/sharding_parallel.py @@ -20,7 +20,6 @@ __all__ = [] class ShardingParallel(MetaParallelBase): - def __init__(self, layers, hcg, **kwargs): super(ShardingParallel, self).__init__(layers, hcg, **kwargs) diff --git a/python/paddle/distributed/fleet/meta_parallel/tensor_parallel.py b/python/paddle/distributed/fleet/meta_parallel/tensor_parallel.py index 2e2072e9a3e2e7bf9034cb95713234b615395553..5a7d0ae6c770e152160a929f78ef437a96168f45 100755 --- a/python/paddle/distributed/fleet/meta_parallel/tensor_parallel.py +++ b/python/paddle/distributed/fleet/meta_parallel/tensor_parallel.py @@ -15,14 +15,16 @@ from .meta_parallel_base import MetaParallelBase from ..utils.hybrid_parallel_util import broadcast_dp_parameters from ..utils.hybrid_parallel_util import broadcast_input_data -from ..utils.hybrid_parallel_util import broadcast_mp_parameters, broadcast_sharding_parameters +from ..utils.hybrid_parallel_util import ( + broadcast_mp_parameters, + broadcast_sharding_parameters, +) from ..utils.log_util import logger __all__ = [] class TensorParallel(MetaParallelBase): - def __init__(self, layers, hcg, **kwargs): super(TensorParallel, self).__init__(layers, hcg, **kwargs) diff --git a/python/paddle/distributed/fleet/metrics/metric.py b/python/paddle/distributed/fleet/metrics/metric.py index 6aa3793cd65d5029b1f2aae8b615603ac058d3ce..34c1c2968c8206692d840bdd10948ff116c6a620 100644 --- a/python/paddle/distributed/fleet/metrics/metric.py +++ b/python/paddle/distributed/fleet/metrics/metric.py @@ -257,7 +257,8 @@ def mae(abserr, total_ins_num, scope=None, util=None): abserr = np.array(scope.find_var(abserr).get_tensor()) if isinstance(total_ins_num, Variable): total_ins_num = np.array( - scope.find_var(total_ins_num.name).get_tensor()) + scope.find_var(total_ins_num.name).get_tensor() + ) elif isinstance(total_ins_num, str): total_ins_num = np.array(scope.find_var(total_ins_num).get_tensor()) @@ -306,7 +307,8 @@ def rmse(sqrerr, total_ins_num, scope=None, util=None): sqrerr = np.array(scope.find_var(sqrerr).get_tensor()) if isinstance(total_ins_num, Variable): total_ins_num = np.array( - scope.find_var(total_ins_num.name).get_tensor()) + scope.find_var(total_ins_num.name).get_tensor() + ) elif isinstance(total_ins_num, str): total_ins_num = np.array(scope.find_var(total_ins_num).get_tensor()) old_metric_shape = np.array(sqrerr.shape) @@ -355,7 +357,8 @@ def mse(sqrerr, total_ins_num, scope=None, util=None): sqrerr = np.array(scope.find_var(sqrerr).get_tensor()) if isinstance(total_ins_num, Variable): total_ins_num = np.array( - scope.find_var(total_ins_num.name).get_tensor()) + scope.find_var(total_ins_num.name).get_tensor() + ) elif isinstance(total_ins_num, str): total_ins_num = np.array(scope.find_var(total_ins_num).get_tensor()) old_metric_shape = np.array(sqrerr.shape) diff --git a/python/paddle/distributed/fleet/model.py b/python/paddle/distributed/fleet/model.py index 632e016f3d350c525c4e0dffd9d0936add478d2b..21e6d07ad55e329ee6439cb0f04c650a924c0bf3 100644 --- a/python/paddle/distributed/fleet/model.py +++ b/python/paddle/distributed/fleet/model.py @@ -15,7 +15,12 @@ import paddle from .base.topology import ParallelMode from .meta_parallel import TensorParallel -from .meta_parallel import PipelineParallel, ShardingParallel, PipelineParallelWithInterleave, PipelineLayer +from .meta_parallel import ( + PipelineParallel, + ShardingParallel, + PipelineParallelWithInterleave, + PipelineLayer, +) from paddle.fluid.dygraph.varbase_patch_methods import _grad_scalar from paddle.distributed import fleet @@ -89,19 +94,23 @@ def distributed_model(model): amp_enable = True amp_level = "O2" if strategy.amp_configs['use_pure_fp16'] else "O1" if amp_level.upper() == "O2": - model = paddle.amp.decorate(models=model, - optimizers=None, - level="O2", - master_weight=None, - save_dtype=None) + model = paddle.amp.decorate( + models=model, + optimizers=None, + level="O2", + master_weight=None, + save_dtype=None, + ) init_loss_scaling = strategy.amp_configs['init_loss_scaling'] incr_ratio = strategy.amp_configs['incr_ratio'] decr_ratio = strategy.amp_configs['decr_ratio'] incr_every_n_steps = strategy.amp_configs['incr_every_n_steps'] decr_every_n_nan_or_inf = strategy.amp_configs[ - 'decr_every_n_nan_or_inf'] + 'decr_every_n_nan_or_inf' + ] use_dynamic_loss_scaling = strategy.amp_configs[ - 'use_dynamic_loss_scaling'] + 'use_dynamic_loss_scaling' + ] global _grad_scalar _grad_scalar = paddle.amp.GradScaler( @@ -110,14 +119,16 @@ def distributed_model(model): decr_ratio=decr_ratio, incr_every_n_steps=incr_every_n_steps, decr_every_n_nan_or_inf=decr_every_n_nan_or_inf, - use_dynamic_loss_scaling=use_dynamic_loss_scaling) + use_dynamic_loss_scaling=use_dynamic_loss_scaling, + ) if strategy.heter_ccl_mode == True: distributed_model = paddle.DataParallel( model, comm_buffer_size=strategy.fuse_grad_size_in_MB, last_comm_buffer_size=strategy.last_comm_group_size_MB, - find_unused_parameters=strategy.find_unused_parameters) + find_unused_parameters=strategy.find_unused_parameters, + ) return distributed_model if fleet_env._hcg.get_parallel_mode() == ParallelMode.SHARDING_PARALLEL: @@ -127,8 +138,13 @@ def distributed_model(model): # NOTE (JZ-LIANG) init parameters broadcast within sharding group # normally it should be done inside DataParallel if fleet_env.sharding_degree > 1: - from paddle.distributed.fleet.utils.hybrid_parallel_util import broadcast_sharding_parameters - assert fleet_env.sharding_degree == fleet_env._hcg.get_sharding_parallel_world_size( + from paddle.distributed.fleet.utils.hybrid_parallel_util import ( + broadcast_sharding_parameters, + ) + + assert ( + fleet_env.sharding_degree + == fleet_env._hcg.get_sharding_parallel_world_size() ) broadcast_sharding_parameters(model, fleet_env._hcg) model = paddle.DataParallel( @@ -136,7 +152,8 @@ def distributed_model(model): comm_buffer_size=strategy.fuse_grad_size_in_MB, last_comm_buffer_size=strategy.last_comm_group_size_MB, find_unused_parameters=strategy.find_unused_parameters, - group=fleet_env._hcg.get_data_parallel_group()) + group=fleet_env._hcg.get_data_parallel_group(), + ) elif fleet_env._hcg.get_parallel_mode() == ParallelMode.TENSOR_PARALLEL: model = TensorParallel(model, fleet_env._hcg, strategy=strategy) elif fleet_env._hcg.get_parallel_mode() == ParallelMode.PIPELINE_PARALLEL: @@ -148,8 +165,8 @@ def distributed_model(model): model = PipelineParallel(model, fleet_env._hcg, strategy=strategy) else: # interleave pipeline - model = PipelineParallelWithInterleave(model, - fleet_env._hcg, - strategy=strategy) + model = PipelineParallelWithInterleave( + model, fleet_env._hcg, strategy=strategy + ) return model diff --git a/python/paddle/distributed/fleet/optimizer.py b/python/paddle/distributed/fleet/optimizer.py index 42567465c551be98fa689f2a03671a45ac93ca09..37a3a896f6b05664f110668524535f2af583eca6 100644 --- a/python/paddle/distributed/fleet/optimizer.py +++ b/python/paddle/distributed/fleet/optimizer.py @@ -21,27 +21,27 @@ from .utils.log_util import logger def _dygraph_distributed_optimizer(optimizer, strategy=None): """ - Optimizer for distributed training. - For the distributed training, this method would rebuild a new instance of DistributedOptimizer. - Which has basic Optimizer function and special features for distributed training. - Args: - optimizer(Optimizer): The executor to run for init server. - strategy(DistributedStrategy): Extra properties for distributed optimizer. - It is recommended to use DistributedStrategy in fleet.init(). The strategy - here is for compatibility. If the strategy in fleet.distributed_optimizer() - is not None, then it will overwrite the DistributedStrategy in fleet.init(), - which will take effect in distributed training. - Returns: - Fleet: instance of fleet. - Examples: - .. code-block:: python - import paddle - import paddle.distributed.fleet as fleet - fleet.init(is_collective=True) - strategy = fleet.DistributedStrategy() - optimizer = paddle.optimizer.SGD(learning_rate=0.001) - optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy) - """ + Optimizer for distributed training. + For the distributed training, this method would rebuild a new instance of DistributedOptimizer. + Which has basic Optimizer function and special features for distributed training. + Args: + optimizer(Optimizer): The executor to run for init server. + strategy(DistributedStrategy): Extra properties for distributed optimizer. + It is recommended to use DistributedStrategy in fleet.init(). The strategy + here is for compatibility. If the strategy in fleet.distributed_optimizer() + is not None, then it will overwrite the DistributedStrategy in fleet.init(), + which will take effect in distributed training. + Returns: + Fleet: instance of fleet. + Examples: + .. code-block:: python + import paddle + import paddle.distributed.fleet as fleet + fleet.init(is_collective=True) + strategy = fleet.DistributedStrategy() + optimizer = paddle.optimizer.SGD(learning_rate=0.001) + optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy) + """ fleet_env = fleet.fleet fleet_env.user_defined_optimizer = optimizer @@ -52,18 +52,21 @@ def _dygraph_distributed_optimizer(optimizer, strategy=None): "in fleet_env.init(). The strategy here is only for compatibility. " "If the strategy in fleet_env.distributed_optimizer() is " "not None, then it will overwrite the DistributedStrategy in fleet_env.init(), " - "which will take effect in distributed training.") + "which will take effect in distributed training." + ) fleet_env._user_defined_strategy = copy.deepcopy(strategy) fleet_env._context = {} if fleet_env.worker_num() > 1: if fleet_env._user_defined_strategy.heter_ccl_mode == False: - return HybridParallelOptimizer(optimizer, fleet_env._hcg, - fleet_env._user_defined_strategy) + return HybridParallelOptimizer( + optimizer, fleet_env._hcg, fleet_env._user_defined_strategy + ) else: - return HeterParallelOptimizer(optimizer, - fleet_env._user_defined_strategy) + return HeterParallelOptimizer( + optimizer, fleet_env._user_defined_strategy + ) else: return optimizer diff --git a/python/paddle/distributed/fleet/recompute/recompute.py b/python/paddle/distributed/fleet/recompute/recompute.py index 03b0c9d73f06b2b99d3b02b02fbb2e38fbdf7efc..2657c60f02c9a7a8b8c9f189ef1230d355f76818 100755 --- a/python/paddle/distributed/fleet/recompute/recompute.py +++ b/python/paddle/distributed/fleet/recompute/recompute.py @@ -40,16 +40,23 @@ def detach_variable(inputs): def check_recompute_necessary(inputs): - if not any(input_.stop_gradient == False for input_ in inputs - if isinstance(input_, (core.eager.Tensor, paddle.Tensor))): + if not any( + input_.stop_gradient == False + for input_ in inputs + if isinstance(input_, (core.eager.Tensor, paddle.Tensor)) + ): logger.warning( "[Recompute]: None of the inputs to current recompute block need grad, " - "therefore there is NO need to recompute this block in backward !") + "therefore there is NO need to recompute this block in backward !" + ) @contextlib.contextmanager def swith_rng_state_tracker(rng_state, tracker): - from paddle.distributed.fleet.meta_parallel.parallel_layers.random import get_rng_state_tracker + from paddle.distributed.fleet.meta_parallel.parallel_layers.random import ( + get_rng_state_tracker, + ) + orig_cuda_rng_state = paddle.get_cuda_rng_state() orig_cuda_rng_tracker = get_rng_state_tracker().get_states_tracker() @@ -63,10 +70,11 @@ def swith_rng_state_tracker(rng_state, tracker): class LegacyRecomputeFunction(LegacyPyLayer): - @staticmethod def forward(ctx, run_function, preserve_rng_state, *args): - from paddle.distributed.fleet.meta_parallel.parallel_layers.random import get_rng_state_tracker + from paddle.distributed.fleet.meta_parallel.parallel_layers.random import ( + get_rng_state_tracker, + ) # store for recomputing ctx.run_function = run_function @@ -95,30 +103,37 @@ class LegacyRecomputeFunction(LegacyPyLayer): cur_device = paddle.get_device() if 'gpu:' not in cur_device: raise RuntimeError( - "Recompute with RNG perserve is not support current device: {}." - .format(cur_device)) + "Recompute with RNG perserve is not support current device: {}.".format( + cur_device + ) + ) ctx.fw_cuda_rng_state = paddle.get_cuda_rng_state() - ctx.fwd_cuda_rng_state_tracker = get_rng_state_tracker( - ).get_states_tracker() + ctx.fwd_cuda_rng_state_tracker = ( + get_rng_state_tracker().get_states_tracker() + ) # TODO support AMP tracer = framework._dygraph_tracer() - ctx.is_fw_autocast = False if tracer._amp_level == core.AmpLevel.O0 else True + ctx.is_fw_autocast = ( + False if tracer._amp_level == core.AmpLevel.O0 else True + ) if tracer._amp_level == core.AmpLevel.O2: ctx.amp_level = 'O2' elif tracer._amp_level in (core.AmpLevel.O1, core.AmpLevel.O0): ctx.amp_level = 'O1' else: - raise ValueError("unsupported amp level: {}".format( - tracer._amp_level)) + raise ValueError( + "unsupported amp level: {}".format(tracer._amp_level) + ) if tracer._amp_dtype == 'float16': ctx.amp_dtype = 'float16' elif tracer._amp_dtype in ('bfloat16', 'float32'): ctx.amp_dtype = 'bfloat16' else: - raise ValueError("unsupported amp dtype: {}".format( - tracer._amp_dtype)) + raise ValueError( + "unsupported amp dtype: {}".format(tracer._amp_dtype) + ) ctx.amp_white_list, ctx.amp_black_list = tracer._get_amp_op_list() @@ -145,27 +160,31 @@ class LegacyRecomputeFunction(LegacyPyLayer): # NOTE support AMP # need restore auto_cast state as well as w/b list if ctx.preserve_rng_state: - with swith_rng_state_tracker(ctx.fw_cuda_rng_state, - ctx.fwd_cuda_rng_state_tracker): + with swith_rng_state_tracker( + ctx.fw_cuda_rng_state, ctx.fwd_cuda_rng_state_tracker + ): with paddle.amp.auto_cast( - enable=ctx.is_fw_autocast, - custom_white_list=ctx.amp_white_list, - custom_black_list=ctx.amp_black_list, - level=ctx.amp_level, - dtype=ctx.amp_dtype): + enable=ctx.is_fw_autocast, + custom_white_list=ctx.amp_white_list, + custom_black_list=ctx.amp_black_list, + level=ctx.amp_level, + dtype=ctx.amp_dtype, + ): detached_inputs = detach_variable(tuple(inputs)) outputs = ctx.run_function(*detached_inputs) else: - with paddle.amp.auto_cast(enable=ctx.is_fw_autocast, - custom_white_list=ctx.amp_white_list, - custom_black_list=ctx.amp_black_list, - level=ctx.amp_level, - dtype=ctx.amp_dtype): + with paddle.amp.auto_cast( + enable=ctx.is_fw_autocast, + custom_white_list=ctx.amp_white_list, + custom_black_list=ctx.amp_black_list, + level=ctx.amp_level, + dtype=ctx.amp_dtype, + ): detached_inputs = detach_variable(tuple(inputs)) outputs = ctx.run_function(*detached_inputs) if isinstance(outputs, core.VarBase): - outputs = (outputs, ) + outputs = (outputs,) assert len(outputs) == len(args) # run backward() with only tensor that requires grad @@ -176,8 +195,10 @@ class LegacyRecomputeFunction(LegacyPyLayer): # the following backward_inputs_with_grad is used to avoid this case. backward_inputs_with_grad = [] for i in range(len(outputs)): - if isinstance(outputs[i], - core.VarBase) and not outputs[i].stop_gradient: + if ( + isinstance(outputs[i], core.VarBase) + and not outputs[i].stop_gradient + ): forward_outputs_with_grad.append(outputs[i]) backward_inputs_with_grad.append(args[i]) @@ -188,19 +209,24 @@ class LegacyRecomputeFunction(LegacyPyLayer): # actually backward with paddle.amp.auto_cast(enable=False): - paddle.autograd.backward(forward_outputs_with_grad, - backward_inputs_with_grad) + paddle.autograd.backward( + forward_outputs_with_grad, backward_inputs_with_grad + ) - grads = list(inp._grad_ivar() for inp in detached_inputs - if isinstance(inp, core.VarBase)) + grads = list( + inp._grad_ivar() + for inp in detached_inputs + if isinstance(inp, core.VarBase) + ) return grads class RecomputeFunction(PyLayer): - @staticmethod def forward(ctx, run_function, preserve_rng_state, *args, **kwargs): - from paddle.distributed.fleet.meta_parallel.parallel_layers.random import get_rng_state_tracker + from paddle.distributed.fleet.meta_parallel.parallel_layers.random import ( + get_rng_state_tracker, + ) # store for recomputing ctx.run_function = run_function @@ -230,30 +256,37 @@ class RecomputeFunction(PyLayer): cur_device = paddle.get_device() if 'gpu:' not in cur_device: raise RuntimeError( - "Recompute with RNG perserve is not support current device: {}." - .format(cur_device)) + "Recompute with RNG perserve is not support current device: {}.".format( + cur_device + ) + ) ctx.fw_cuda_rng_state = paddle.get_cuda_rng_state() - ctx.fwd_cuda_rng_state_tracker = get_rng_state_tracker( - ).get_states_tracker() + ctx.fwd_cuda_rng_state_tracker = ( + get_rng_state_tracker().get_states_tracker() + ) # TODO support AMP tracer = framework._dygraph_tracer() - ctx.is_fw_autocast = False if tracer._amp_level == core.AmpLevel.O0 else True + ctx.is_fw_autocast = ( + False if tracer._amp_level == core.AmpLevel.O0 else True + ) if tracer._amp_level == core.AmpLevel.O2: ctx.amp_level = 'O2' elif tracer._amp_level in (core.AmpLevel.O1, core.AmpLevel.O0): ctx.amp_level = 'O1' else: - raise ValueError("unsupported amp level: {}".format( - tracer._amp_level)) + raise ValueError( + "unsupported amp level: {}".format(tracer._amp_level) + ) if tracer._amp_dtype == 'float16': ctx.amp_dtype = 'float16' elif tracer._amp_dtype in ('bfloat16', 'float32'): ctx.amp_dtype = 'bfloat16' else: - raise ValueError("unsupported amp dtype: {}".format( - tracer._amp_dtype)) + raise ValueError( + "unsupported amp dtype: {}".format(tracer._amp_dtype) + ) ctx.amp_white_list, ctx.amp_black_list = tracer._get_amp_op_list() @@ -280,28 +313,33 @@ class RecomputeFunction(PyLayer): # NOTE support AMP # need restore auto_cast state as well as w/b list if ctx.preserve_rng_state: - with swith_rng_state_tracker(ctx.fw_cuda_rng_state, - ctx.fwd_cuda_rng_state_tracker): + with swith_rng_state_tracker( + ctx.fw_cuda_rng_state, ctx.fwd_cuda_rng_state_tracker + ): with paddle.amp.auto_cast( - enable=ctx.is_fw_autocast, - custom_white_list=ctx.amp_white_list, - custom_black_list=ctx.amp_black_list, - level=ctx.amp_level, - dtype=ctx.amp_dtype): + enable=ctx.is_fw_autocast, + custom_white_list=ctx.amp_white_list, + custom_black_list=ctx.amp_black_list, + level=ctx.amp_level, + dtype=ctx.amp_dtype, + ): detached_inputs = detach_variable(tuple(inputs)) - outputs = ctx.run_function(*detached_inputs, - **ctx.kwargs) + outputs = ctx.run_function( + *detached_inputs, **ctx.kwargs + ) else: - with paddle.amp.auto_cast(enable=ctx.is_fw_autocast, - custom_white_list=ctx.amp_white_list, - custom_black_list=ctx.amp_black_list, - level=ctx.amp_level, - dtype=ctx.amp_dtype): + with paddle.amp.auto_cast( + enable=ctx.is_fw_autocast, + custom_white_list=ctx.amp_white_list, + custom_black_list=ctx.amp_black_list, + level=ctx.amp_level, + dtype=ctx.amp_dtype, + ): detached_inputs = detach_variable(tuple(inputs)) outputs = ctx.run_function(*detached_inputs, **ctx.kwargs) if isinstance(outputs, (core.VarBase, core.eager.Tensor)): - outputs = (outputs, ) + outputs = (outputs,) assert len(outputs) == len(args) # run backward() with only tensor that requires grad @@ -312,10 +350,10 @@ class RecomputeFunction(PyLayer): # the following backward_inputs_with_grad is used to avoid this case. backward_inputs_with_grad = [] for i in range(len(outputs)): - if isinstance( - outputs[i], - (core.VarBase, - core.eager.Tensor)) and not outputs[i].stop_gradient: + if ( + isinstance(outputs[i], (core.VarBase, core.eager.Tensor)) + and not outputs[i].stop_gradient + ): forward_outputs_with_grad.append(outputs[i]) backward_inputs_with_grad.append(args[i]) @@ -326,17 +364,22 @@ class RecomputeFunction(PyLayer): # actually backward with paddle.amp.auto_cast(enable=False): - paddle.autograd.backward(forward_outputs_with_grad, - backward_inputs_with_grad) + paddle.autograd.backward( + forward_outputs_with_grad, backward_inputs_with_grad + ) if in_dygraph_mode(): grads = tuple( - inp._grad_ivar() for inp in detached_inputs - if isinstance(inp, (core.VarBase, core.eager.Tensor))) + inp._grad_ivar() + for inp in detached_inputs + if isinstance(inp, (core.VarBase, core.eager.Tensor)) + ) else: grads = list( - inp._grad_ivar() for inp in detached_inputs - if isinstance(inp, (core.VarBase, core.eager.Tensor))) + inp._grad_ivar() + for inp in detached_inputs + if isinstance(inp, (core.VarBase, core.eager.Tensor)) + ) return grads @@ -494,7 +537,6 @@ def recompute_sequential(ctx, functions, *args, **kwargs): preserve_rng_state = ctx.get('preserve_rng_state', True) def _run_func(begin, end, funcs): - def do_run(input): for i in range(begin, end + 1): input = funcs[i](input) @@ -510,8 +552,10 @@ def recompute_sequential(ctx, functions, *args, **kwargs): end = -1 for begin in range(0, segment_size * (segments - 1), segment_size): end = begin + segment_size - 1 - args = recompute(_run_func(begin, end, functions), - *args, - preserve_rng_state=preserve_rng_state, - **kwargs) + args = recompute( + _run_func(begin, end, functions), + *args, + preserve_rng_state=preserve_rng_state, + **kwargs + ) return _run_func(end + 1, len(functions) - 1, functions)(args) diff --git a/python/paddle/distributed/fleet/recompute/recompute_hybrid.py b/python/paddle/distributed/fleet/recompute/recompute_hybrid.py index 9c006c1e044f0ca9c1f455b7321fa05a150163a5..7f0d22726e59635f70e7e688bec530ddf2ee7954 100644 --- a/python/paddle/distributed/fleet/recompute/recompute_hybrid.py +++ b/python/paddle/distributed/fleet/recompute/recompute_hybrid.py @@ -17,7 +17,11 @@ from paddle.fluid import core from paddle.autograd import PyLayer from paddle.fluid import framework from ..meta_parallel.parallel_layers.random import get_rng_state_tracker -from .recompute import check_recompute_necessary, detach_variable, swith_rng_state_tracker +from .recompute import ( + check_recompute_necessary, + detach_variable, + swith_rng_state_tracker, +) from ..meta_parallel.pp_utils import utils __all__ = [] @@ -32,8 +36,11 @@ def _split_activation(tensor, mp_group): tensor_numel = paddle.numel(tensor) assert tensor_numel != 0, "can't recompute zero element" - assert tensor_numel % mp_degree == 0, "The capacity of the activation ({}) cannot be divisible by mp_degree({})".format( - tensor_numel, mp_degree) + assert ( + tensor_numel % mp_degree == 0 + ), "The capacity of the activation ({}) cannot be divisible by mp_degree({})".format( + tensor_numel, mp_degree + ) # use inplace operation to save memory data = tensor.flatten_() @@ -69,8 +76,16 @@ class _HPRecomputeFunction(PyLayer): """ @staticmethod - def forward(ctx, run_function, all_outputs, mp_group, offload, partition, - *args, **kwargs): + def forward( + ctx, + run_function, + all_outputs, + mp_group, + offload, + partition, + *args, + **kwargs + ): check_recompute_necessary(args) # store for recomputing @@ -80,8 +95,9 @@ class _HPRecomputeFunction(PyLayer): # store the rng states ctx.fwd_cuda_rng_state = paddle.get_cuda_rng_state() - ctx.fwd_cuda_rng_state_tracker = get_rng_state_tracker( - ).get_states_tracker() + ctx.fwd_cuda_rng_state_tracker = ( + get_rng_state_tracker().get_states_tracker() + ) # save config info ctx.mp_group = mp_group @@ -95,20 +111,25 @@ class _HPRecomputeFunction(PyLayer): tensor_inputs = [] cur_device = paddle.get_device() - assert 'gpu:' in paddle.get_device( + assert ( + 'gpu:' in paddle.get_device() ), "Recompute with RNG is not support current device: {}.".format( - cur_device) + cur_device + ) # TODO support AMP tracer = framework._dygraph_tracer() - ctx.is_fw_autocast = False if tracer._amp_level == core.AmpLevel.O0 else True + ctx.is_fw_autocast = ( + False if tracer._amp_level == core.AmpLevel.O0 else True + ) if tracer._amp_level == core.AmpLevel.O2: ctx.amp_level = 'O2' elif tracer._amp_level in (core.AmpLevel.O1, core.AmpLevel.O0): ctx.amp_level = 'O1' else: - raise ValueError("unsupported amp level: {}".format( - tracer._amp_level)) + raise ValueError( + "unsupported amp level: {}".format(tracer._amp_level) + ) ctx.amp_white_list, ctx.amp_black_list = tracer._get_amp_op_list() with paddle.no_grad(): @@ -119,8 +140,9 @@ class _HPRecomputeFunction(PyLayer): state = arg.stop_gradient if partition: ctx.tensor_shapes.append(arg.shape) - partition = _split_activation(arg.detach(), - mp_group).clone() + partition = _split_activation( + arg.detach(), mp_group + ).clone() # TODO(shenliang03) not use calculate stream to D2H to speed arg = partition.cpu() if offload else partition else: @@ -154,38 +176,44 @@ class _HPRecomputeFunction(PyLayer): for i, idx in enumerate(tensor_indices): if ctx.partition: state = tensors[i].stop_gradient - tensors[i] = _merge_activation( - tensors[i], - ctx.mp_group).detach().reshape_(tensor_shapes[i]) + tensors[i] = ( + _merge_activation(tensors[i], ctx.mp_group) + .detach() + .reshape_(tensor_shapes[i]) + ) tensors[i].stop_gradient = state - inputs[idx] = tensors[i].cuda( - device_id) if ctx.offload else tensors[i] + inputs[idx] = ( + tensors[i].cuda(device_id) if ctx.offload else tensors[i] + ) tracer = framework._dygraph_tracer() tracer._has_grad = True # need restore auto_cast state as well as w/b list - with swith_rng_state_tracker(ctx.fwd_cuda_rng_state, - ctx.fwd_cuda_rng_state_tracker): - with paddle.amp.auto_cast(enable=ctx.is_fw_autocast, - custom_white_list=ctx.amp_white_list, - custom_black_list=ctx.amp_black_list, - level=ctx.amp_level): + with swith_rng_state_tracker( + ctx.fwd_cuda_rng_state, ctx.fwd_cuda_rng_state_tracker + ): + with paddle.amp.auto_cast( + enable=ctx.is_fw_autocast, + custom_white_list=ctx.amp_white_list, + custom_black_list=ctx.amp_black_list, + level=ctx.amp_level, + ): detached_inputs = detach_variable(tuple(inputs)) outputs = ctx.run_function(*detached_inputs, **ctx.kwargs) if isinstance(outputs, (core.VarBase, core.eager.Tensor)): - outputs = (outputs, ) + outputs = (outputs,) assert len(outputs) == len(args) forward_outputs_with_grad = [] backward_inputs = [] for i in range(len(outputs)): - if isinstance( - outputs[i], - (core.VarBase, - core.eager.Tensor)) and not outputs[i].stop_gradient: + if ( + isinstance(outputs[i], (core.VarBase, core.eager.Tensor)) + and not outputs[i].stop_gradient + ): forward_outputs_with_grad.append(outputs[i]) backward_inputs.append(args[i]) @@ -196,8 +224,11 @@ class _HPRecomputeFunction(PyLayer): # actually backward paddle.autograd.backward(forward_outputs_with_grad, backward_inputs) - grads = tuple(inp._grad_ivar() for inp in detached_inputs - if isinstance(inp, (core.VarBase, core.eager.Tensor))) + grads = tuple( + inp._grad_ivar() + for inp in detached_inputs + if isinstance(inp, (core.VarBase, core.eager.Tensor)) + ) return grads @@ -226,14 +257,17 @@ def recompute_hybrid(ctx, function, *args, **kwargs): """ mp_group = ctx.get('mp_group', None) - assert mp_group is not None, "ctx must contains mp_group and mp_group can not be None." + assert ( + mp_group is not None + ), "ctx must contains mp_group and mp_group can not be None." offload = ctx.get('offload', False) partition = ctx.get('partition', False) all_outputs = [] - _HPRecomputeFunction.apply(function, all_outputs, mp_group, offload, - partition, *args, **kwargs) + _HPRecomputeFunction.apply( + function, all_outputs, mp_group, offload, partition, *args, **kwargs + ) if len(all_outputs) == 1: return all_outputs[0] diff --git a/python/paddle/distributed/fleet/runtime/collective_runtime.py b/python/paddle/distributed/fleet/runtime/collective_runtime.py index 5b66bf79398acc94bc1750e6177f22377adf85b7..7ccd0ecf7d000ebab8f93fc5022ad789f9b11ebf 100644 --- a/python/paddle/distributed/fleet/runtime/collective_runtime.py +++ b/python/paddle/distributed/fleet/runtime/collective_runtime.py @@ -19,28 +19,32 @@ __all__ = [] class CollectiveRuntime(RuntimeBase): - def __init__(self): super(CollectiveRuntime, self).__init__() def _init_worker(self): logging.warn( - "You should not call 'init_worker' method for collective mode.") + "You should not call 'init_worker' method for collective mode." + ) def _run_worker(self): logging.warn( - "You should not call 'run_worker' method for collective mode.") + "You should not call 'run_worker' method for collective mode." + ) def _init_server(self, *args, **kwargs): logging.warn( - "You should not call 'init_server' method for collective mode.") + "You should not call 'init_server' method for collective mode." + ) def _run_server(self): logging.warn( - "You should not call 'run_server' method for collective mode.") + "You should not call 'run_server' method for collective mode." + ) def _stop_worker(self): logging.warn( - "You should not call 'stop_worker' method for collective mode.") + "You should not call 'stop_worker' method for collective mode." + ) # save inference model should be added here diff --git a/python/paddle/distributed/fleet/runtime/parameter_server_runtime.py b/python/paddle/distributed/fleet/runtime/parameter_server_runtime.py index 062a6d5abf5846c1c1cb76fa4617f2cbc4cd3290..e2c5e5da29ea45539f8f6a74b332c6b93a7551c6 100644 --- a/python/paddle/distributed/fleet/runtime/parameter_server_runtime.py +++ b/python/paddle/distributed/fleet/runtime/parameter_server_runtime.py @@ -30,7 +30,6 @@ __all__ = [] class ParameterServerRuntime(RuntimeBase): - def __init__(self): super(ParameterServerRuntime, self).__init__() self._communicator = None @@ -46,7 +45,9 @@ class ParameterServerRuntime(RuntimeBase): def _get_distributed_strategy(self): strategy = None - from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import StrategyFactory + from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import ( + StrategyFactory, + ) dist_strategy = self.context["valid_strategy"] k_steps = dist_strategy.a_sync_configs["k_steps"] @@ -66,19 +67,21 @@ class ParameterServerRuntime(RuntimeBase): return strategy def build_compiled_startegy(self): - from paddle.fluid.incubate.fleet.parameter_server.ir.public import CompileTimeStrategy - - compiled_config = CompileTimeStrategy(self.origin_main_program, - self.origin_main_program, - self.async_strategy, - self.role_maker) + from paddle.fluid.incubate.fleet.parameter_server.ir.public import ( + CompileTimeStrategy, + ) + + compiled_config = CompileTimeStrategy( + self.origin_main_program, + self.origin_main_program, + self.async_strategy, + self.role_maker, + ) return compiled_config - def _load_sparse_params(self, - executor, - dirname, - varnames, - main_program=None): + def _load_sparse_params( + self, executor, dirname, varnames, main_program=None + ): assert vars != None check_vars = [] load_prog = Program() @@ -88,12 +91,15 @@ class ParameterServerRuntime(RuntimeBase): return var.name in varnames load_vars = list( - filter(_in_varnames, - fluid.default_main_program().list_vars())) + filter(_in_varnames, fluid.default_main_program().list_vars()) + ) if main_program is None: main_program = self.origin_main_program - from paddle.fluid.incubate.fleet.parameter_server.ir.public import _get_varname_parts + from paddle.fluid.incubate.fleet.parameter_server.ir.public import ( + _get_varname_parts, + ) + for each_var in load_vars: assert isinstance(each_var, Variable) @@ -104,29 +110,31 @@ class ParameterServerRuntime(RuntimeBase): if not os.path.exists(var_path): raise ValueError( "SelectedRows var {} can not find at {}".format( - new_var.name, var_path)) + new_var.name, var_path + ) + ) if os.path.isfile(var_path): - load_block.append_op(type='sparse_tensor_load', - inputs={}, - outputs={'Out': [new_var]}, - attrs={ - 'file_path': - os.path.join(dirname, origin_varname), - 'node_index': - self.role_maker._server_index(), - 'node_num': - self.role_maker._server_num(), - 'shape': - each_var.shape - }) + load_block.append_op( + type='sparse_tensor_load', + inputs={}, + outputs={'Out': [new_var]}, + attrs={ + 'file_path': os.path.join(dirname, origin_varname), + 'node_index': self.role_maker._server_index(), + 'node_num': self.role_maker._server_num(), + 'shape': each_var.shape, + }, + ) check_vars.append(each_var) executor.run(load_prog) def _load_distributed_params(self, dirname, varnames): from paddle.fluid.communicator import LargeScaleKV - from paddle.fluid.incubate.fleet.parameter_server.ir.public import _get_varname_parts + from paddle.fluid.incubate.fleet.parameter_server.ir.public import ( + _get_varname_parts, + ) scale_kv = LargeScaleKV() for varname in varnames: @@ -136,12 +144,13 @@ class ParameterServerRuntime(RuntimeBase): @staticmethod def __exclude_vars(exclude_var_names=[]): - def is_valid(var): if var.name in exclude_var_names: return False - from paddle.fluid.incubate.fleet.parameter_server.ir.public import _get_varname_parts + from paddle.fluid.incubate.fleet.parameter_server.ir.public import ( + _get_varname_parts, + ) origin_varname, _, _ = _get_varname_parts(var.name) if origin_varname.endswith("@GRAD"): @@ -150,25 +159,29 @@ class ParameterServerRuntime(RuntimeBase): if origin_varname == "learning_rate_0": return False - if var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH or \ - var.desc.type() == core.VarDesc.VarType.FETCH_LIST or \ - var.desc.type() == core.VarDesc.VarType.READER: + if ( + var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH + or var.desc.type() == core.VarDesc.VarType.FETCH_LIST + or var.desc.type() == core.VarDesc.VarType.READER + ): return False return var.persistable return is_valid def _init_worker(self): - def sync_strategy_envs(): kwargs = {} kwargs[ - "pserver_endpoints"] = self.role_maker._get_pserver_endpoints() + "pserver_endpoints" + ] = self.role_maker._get_pserver_endpoints() kwargs["trainer_id"] = self.role_maker._worker_index() return kwargs def geo_strategy_envs(): - from paddle.fluid.incubate.fleet.parameter_server.ir.public import get_sparse_tablenames + from paddle.fluid.incubate.fleet.parameter_server.ir.public import ( + get_sparse_tablenames, + ) def get_sparse_attrs(): opt_init_map = {} @@ -176,13 +189,17 @@ class ParameterServerRuntime(RuntimeBase): opt_init_map["fill_constant"] = ["value"] opt_init_map["uniform_random"] = ["seed", "min", "max"] opt_init_map["truncated_gaussian_random"] = [ - "seed", "mean", "std" + "seed", + "mean", + "std", ] - dist_varnames = get_sparse_tablenames(self.origin_main_program, - True) + dist_varnames = get_sparse_tablenames( + self.origin_main_program, True + ) sparse_varnames = get_sparse_tablenames( - self.origin_main_program, False) + self.origin_main_program, False + ) if len(dist_varnames) != 0: raise ValueError( @@ -191,15 +208,18 @@ class ParameterServerRuntime(RuntimeBase): init_attrs = [] for value_name in sparse_varnames: - value_var = self.origin_main_program.global_block( - ).vars[value_name] + value_var = self.origin_main_program.global_block().vars[ + value_name + ] value_attr = [ value_name, - ",".join([str(dim) for dim in value_var.shape]) + ",".join([str(dim) for dim in value_var.shape]), ] for op in self.origin_startup_program.global_block().ops: - if op.type in opt_init_map.keys( - ) and value_name == op.output("Out")[0]: + if ( + op.type in opt_init_map.keys() + and value_name == op.output("Out")[0] + ): init_attr = [op.type] for attr in opt_init_map[op.type]: init_attr.append(str(op.attr(attr))) @@ -213,10 +233,15 @@ class ParameterServerRuntime(RuntimeBase): kwargs["sparse_attrs"] = get_sparse_attrs() return kwargs - from paddle.fluid.incubate.fleet.parameter_server.ir.public import _get_lr_ops, _has_global_step + from paddle.fluid.incubate.fleet.parameter_server.ir.public import ( + _get_lr_ops, + _has_global_step, + ) - from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import \ - SyncStrategy, GeoStrategy + from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import ( + SyncStrategy, + GeoStrategy, + ) trainer_config = self.async_strategy.get_trainer_runtime_config() print(trainer_config) @@ -228,7 +253,9 @@ class ParameterServerRuntime(RuntimeBase): wait_server_ready(self.role_maker._get_pserver_endpoints()) # for ps-heter mode, wait heter worker ready - if self.role_maker._is_heter_parameter_server_mode and self.role_maker._is_worker( + if ( + self.role_maker._is_heter_parameter_server_mode + and self.role_maker._is_worker() ): wait_server_ready(self.role_maker._get_heter_worker_endpoints()) @@ -252,15 +279,18 @@ class ParameterServerRuntime(RuntimeBase): if self.compiled_strategy.is_geo_mode(): recv_ctx = self.compiled_strategy.get_communicator_recv_context( - recv_type=4) + recv_type=4 + ) else: recv_ctx = self.compiled_strategy.get_communicator_recv_context( - recv_type=1) + recv_type=1 + ) from paddle.fluid.communicator import Communicator + self._communicator = Communicator( - trainer_config.mode, kwargs, - trainer_config.get_communicator_flags()) + trainer_config.mode, kwargs, trainer_config.get_communicator_flags() + ) self._communicator.init_with_ctx(send_ctx, recv_ctx) if not self._communicator.is_running(): @@ -271,21 +301,30 @@ class ParameterServerRuntime(RuntimeBase): def _get_executor(self): executor = fluid.Executor(fluid.CPUPlace()) if self.role_maker._is_heter_parameter_server_mode: - heter_worker_device_guard = self.context[ - "valid_strategy"].a_sync_configs[ - "heter_worker_device_guard"].upper() + heter_worker_device_guard = ( + self.context["valid_strategy"] + .a_sync_configs["heter_worker_device_guard"] + .upper() + ) if heter_worker_device_guard not in ["GPU", "XPU", "CPU"]: - raise ValueError("Heter Worker Not Support Device {}".format( - heter_worker_device_guard)) + raise ValueError( + "Heter Worker Not Support Device {}".format( + heter_worker_device_guard + ) + ) if self.role_maker._is_heter_worker(): if heter_worker_device_guard == "GPU": executor = Executor( fluid.CUDAPlace( - int(os.getenv("FLAGS_selected_gpus", "0")))) + int(os.getenv("FLAGS_selected_gpus", "0")) + ) + ) elif heter_worker_device_guard == "XPU": executor = Executor( fluid.XPUPlace( - int(os.getenv("FLAGS_selected_xpus", "0")))) + int(os.getenv("FLAGS_selected_xpus", "0")) + ) + ) return executor def _init_server(self, *args, **kwargs): @@ -297,8 +336,10 @@ class ParameterServerRuntime(RuntimeBase): model_dirname = None executor = self._get_executor() - if self.role_maker._is_heter_worker( - ) and self.context["valid_strategy"].a_sync_configs["launch_barrier"]: + if ( + self.role_maker._is_heter_worker() + and self.context["valid_strategy"].a_sync_configs["launch_barrier"] + ): # for heter trainer wait server ready wait_server_ready(self.role_maker._get_pserver_endpoints()) executor.run(fluid.default_startup_program()) @@ -310,26 +351,35 @@ class ParameterServerRuntime(RuntimeBase): sparse_varnames = self.compiled_strategy.get_sparse_varname_on_ps(False) sparse_related_optimize_varnames = [] for var_name in sparse_varnames: - sparse_related_optimize_varnames += self.compiled_strategy.get_optimize_varname_on_ps( - var_name) + sparse_related_optimize_varnames += ( + self.compiled_strategy.get_optimize_varname_on_ps(var_name) + ) sparse_related_optimize_varnames = list( - set(sparse_related_optimize_varnames)) + set(sparse_related_optimize_varnames) + ) distribtued_varnames = self.compiled_strategy.get_sparse_varname_on_ps( - True) + True + ) distributed_related_optimize_varnames = [] for var_name in distribtued_varnames: - distributed_related_optimize_varnames += self.compiled_strategy.get_optimize_varname_on_ps( - var_name) + distributed_related_optimize_varnames += ( + self.compiled_strategy.get_optimize_varname_on_ps(var_name) + ) distributed_related_optimize_varnames = list( - set(distributed_related_optimize_varnames)) + set(distributed_related_optimize_varnames) + ) remaining_vars = list( filter( ParameterServerRuntime.__exclude_vars( - sparse_varnames + distribtued_varnames + - sparse_related_optimize_varnames + - distributed_related_optimize_varnames), - fluid.default_main_program().list_vars())) + sparse_varnames + + distribtued_varnames + + sparse_related_optimize_varnames + + distributed_related_optimize_varnames + ), + fluid.default_main_program().list_vars(), + ) + ) if not model_dirname: return @@ -338,21 +388,26 @@ class ParameterServerRuntime(RuntimeBase): raise ValueError("There is no directory named '%s'", model_dirname) # load dense - fluid.io.load_vars(executor, - main_program=fluid.default_main_program(), - dirname=model_dirname, - vars=remaining_vars) + fluid.io.load_vars( + executor, + main_program=fluid.default_main_program(), + dirname=model_dirname, + vars=remaining_vars, + ) # load sparse - self._load_sparse_params(executor=executor, - dirname=model_dirname, - varnames=sparse_varnames + - sparse_related_optimize_varnames) + self._load_sparse_params( + executor=executor, + dirname=model_dirname, + varnames=sparse_varnames + sparse_related_optimize_varnames, + ) # load large scale - self._load_distributed_params(dirname=model_dirname, - varnames=distribtued_varnames + - distributed_related_optimize_varnames) + self._load_distributed_params( + dirname=model_dirname, + varnames=distribtued_varnames + + distributed_related_optimize_varnames, + ) def _run_server(self): executor = self._get_executor() @@ -365,8 +420,15 @@ class ParameterServerRuntime(RuntimeBase): def _get_optimizer_status(self, op, param_name): supported_opts = [ - "sgd", "adam", "adagrad", "adamax", "momentum", "lars_momentum", - "rmsprop", "decayed_adagrad", "ftrl" + "sgd", + "adam", + "adagrad", + "adamax", + "momentum", + "lars_momentum", + "rmsprop", + "decayed_adagrad", + "ftrl", ] reshaped_val_map = {} @@ -377,7 +439,9 @@ class ParameterServerRuntime(RuntimeBase): reshaped_val_map["momentum"] = ["velocity_0"] reshaped_val_map["lars_momentum"] = ["velocity_0"] reshaped_val_map["rmsprop"] = [ - "momentum_0", "mean_square_0", "mean_grad_0" + "momentum_0", + "mean_square_0", + "mean_grad_0", ] reshaped_val_map["decayed_adagrad"] = ["moment_0"] reshaped_val_map["ftrl"] = ["squared_0", "linear_0"] @@ -388,8 +452,10 @@ class ParameterServerRuntime(RuntimeBase): if op not in supported_opts: raise ValueError( - "fleet can not support optimizer: {}, only this can be supported: {}" - .format(op, supported_opts)) + "fleet can not support optimizer: {}, only this can be supported: {}".format( + op, supported_opts + ) + ) reshaped_names = [ param_name + "_" + val for val in reshaped_val_map[op] @@ -404,12 +470,17 @@ class ParameterServerRuntime(RuntimeBase): return reshaped_names, origin_names def _get_optimizer_op(self, param_name): - from paddle.fluid.incubate.fleet.parameter_server.ir.public import _get_optimize_ops + from paddle.fluid.incubate.fleet.parameter_server.ir.public import ( + _get_optimize_ops, + ) opts = _get_optimize_ops(self.origin_main_program) for op in opts: - if "Param" in op.input_names and \ - "LearningRate" in op.input_names and op.input("Param")[0] == param_name: + if ( + "Param" in op.input_names + and "LearningRate" in op.input_names + and op.input("Param")[0] == param_name + ): return op def _save_dense_params(self, executor, dirname, context, main_program): @@ -428,27 +499,24 @@ class ParameterServerRuntime(RuntimeBase): optimizer = self._get_optimizer_op(varname) reshaped_varnames, origin_varnames = self._get_optimizer_status( - optimizer.type, varname) + optimizer.type, varname + ) for var_name in [varname] + reshaped_varnames + origin_varnames: var = self.origin_main_program.global_block().vars[var_name] - block.append_op(type='recv_save', - attrs={ - "trainer_id": - self.role_maker._worker_index(), - "shape": - var.shape, - "slice_shapes": - [",".join([str(i) for i in var.shape])], - "slice_varnames": [var.name], - "remote_varnames": [var.name], - "is_sparse": - False, - "endpoints": - var_ctx.split_endpoints(), - "file_path": - os.path.join(dirname, var.name) - }) + block.append_op( + type='recv_save', + attrs={ + "trainer_id": self.role_maker._worker_index(), + "shape": var.shape, + "slice_shapes": [",".join([str(i) for i in var.shape])], + "slice_varnames": [var.name], + "remote_varnames": [var.name], + "is_sparse": False, + "endpoints": var_ctx.split_endpoints(), + "file_path": os.path.join(dirname, var.name), + }, + ) executor.run(prog) return local_vars @@ -467,7 +535,8 @@ class ParameterServerRuntime(RuntimeBase): optimizer = self._get_optimizer_op(varname) reshaped_varnames, origin_varnames = self._get_optimizer_status( - optimizer.type, varname) + optimizer.type, varname + ) var = self.origin_main_program.global_block().vars[varname] slice_shapes = [] @@ -476,37 +545,34 @@ class ParameterServerRuntime(RuntimeBase): for section in var_ctx.sections(): slice_shapes.append(str(section) + dims1) - block.append_op(type='recv_save', - attrs={ - "trainer_id": - self.role_maker._worker_index(), - "shape": - var.shape, - "slice_shapes": - slice_shapes, - "slice_varnames": - var_ctx.split_varnames(), - "remote_varnames": - var_ctx.split_varnames(), - "is_sparse": - True, - "endpoints": - var_ctx.split_endpoints(), - "pserver_num": - len(self.role_maker._get_pserver_endpoints()), - "file_path": - os.path.join(dirname, var.name) - }) + block.append_op( + type='recv_save', + attrs={ + "trainer_id": self.role_maker._worker_index(), + "shape": var.shape, + "slice_shapes": slice_shapes, + "slice_varnames": var_ctx.split_varnames(), + "remote_varnames": var_ctx.split_varnames(), + "is_sparse": True, + "endpoints": var_ctx.split_endpoints(), + "pserver_num": len( + self.role_maker._get_pserver_endpoints() + ), + "file_path": os.path.join(dirname, var.name), + }, + ) for reshaped_varname in reshaped_varnames: - var = self.origin_main_program.global_block( - ).vars[reshaped_varname] + var = self.origin_main_program.global_block().vars[ + reshaped_varname + ] slice_varnames = [] remote_varnames = [] for i in range(len(var_ctx.split_varnames())): - slice_varnames.append("{}.block{}".format( - reshaped_varname, i)) + slice_varnames.append( + "{}.block{}".format(reshaped_varname, i) + ) remote_varnames.append(reshaped_varname) block.append_op( @@ -519,32 +585,31 @@ class ParameterServerRuntime(RuntimeBase): "remote_varnames": remote_varnames, "is_sparse": True, "endpoints": var_ctx.split_endpoints(), - "pserver_num": - len(self.role_maker._get_pserver_endpoints()), - "file_path": os.path.join(dirname, var.name) - }) + "pserver_num": len( + self.role_maker._get_pserver_endpoints() + ), + "file_path": os.path.join(dirname, var.name), + }, + ) for origin_varname in origin_varnames: - var = self.origin_main_program.global_block( - ).vars[origin_varname] - - block.append_op(type='recv_save', - attrs={ - "trainer_id": - self.role_maker._worker_index(), - "shape": - var.shape, - "slice_shapes": - [",".join([str(i) for i in var.shape])], - "slice_varnames": [origin_varname], - "remote_varnames": [origin_varname], - "is_sparse": - False, - "endpoints": - var_ctx.split_endpoints()[:1], - "file_path": - os.path.join(dirname, var.name) - }) + var = self.origin_main_program.global_block().vars[ + origin_varname + ] + + block.append_op( + type='recv_save', + attrs={ + "trainer_id": self.role_maker._worker_index(), + "shape": var.shape, + "slice_shapes": [",".join([str(i) for i in var.shape])], + "slice_varnames": [origin_varname], + "remote_varnames": [origin_varname], + "is_sparse": False, + "endpoints": var_ctx.split_endpoints()[:1], + "file_path": os.path.join(dirname, var.name), + }, + ) executor.run(prog) return context.keys() @@ -553,58 +618,71 @@ class ParameterServerRuntime(RuntimeBase): block = prog.global_block() for name, var_ctx in context.items(): - block.append_op(type='checkpoint_notify', - attrs={ - "varname": name, - "mode": mode, - "slice_varnames": var_ctx.split_varnames(), - "remote_varnames": var_ctx.split_varnames(), - "endpoints": var_ctx.split_endpoints(), - "dirname": dirname - }) + block.append_op( + type='checkpoint_notify', + attrs={ + "varname": name, + "mode": mode, + "slice_varnames": var_ctx.split_varnames(), + "remote_varnames": var_ctx.split_varnames(), + "endpoints": var_ctx.split_endpoints(), + "dirname": dirname, + }, + ) executor.run(prog) return context.keys() - def _save_distributed_persistables(self, executor, dirname, main_program, - mode): + def _save_distributed_persistables( + self, executor, dirname, main_program, mode + ): dense_ctx = self.compiled_strategy.get_communicator_recv_context( - recv_type=1, use_origin_program=True) + recv_type=1, use_origin_program=True + ) sparse_ctx = self.compiled_strategy.get_communicator_recv_context( - recv_type=2, use_origin_program=True) + recv_type=2, use_origin_program=True + ) distributed_ctx = self.compiled_strategy.get_communicator_recv_context( - recv_type=3, use_origin_program=True) + recv_type=3, use_origin_program=True + ) - recv_dense_varnames = self._save_dense_params(executor, dirname, - dense_ctx, main_program) + recv_dense_varnames = self._save_dense_params( + executor, dirname, dense_ctx, main_program + ) - recv_sparse_varnames = self._save_sparse_params(executor, dirname, - sparse_ctx, - main_program) + recv_sparse_varnames = self._save_sparse_params( + executor, dirname, sparse_ctx, main_program + ) recv_distributed_varnames = self._save_distributed_params( - executor, dirname, distributed_ctx, mode) + executor, dirname, distributed_ctx, mode + ) - saved_varnames = recv_dense_varnames + list( - recv_sparse_varnames) + list(recv_distributed_varnames) + saved_varnames = ( + recv_dense_varnames + + list(recv_sparse_varnames) + + list(recv_distributed_varnames) + ) remaining_vars = list( - filter(ParameterServerRuntime.__exclude_vars(saved_varnames), - main_program.list_vars())) - - fluid.io.save_vars(executor, - main_program=main_program, - dirname=dirname, - vars=remaining_vars) - - def _ps_inference_save_persistables(self, - executor, - dirname, - main_program=None, - mode=0, - **kwargs): + filter( + ParameterServerRuntime.__exclude_vars(saved_varnames), + main_program.list_vars(), + ) + ) + + fluid.io.save_vars( + executor, + main_program=main_program, + dirname=dirname, + vars=remaining_vars, + ) + + def _ps_inference_save_persistables( + self, executor, dirname, main_program=None, mode=0, **kwargs + ): """ This function filters out all variables with `persistable==True` from the give `main_program` and then saves these variables to the folder `dirname` @@ -634,16 +712,19 @@ class ParameterServerRuntime(RuntimeBase): "in fleet.save_persistables() function, main_program must be as Program type, CompiledProgram is not allowed" ) - self._save_distributed_persistables(executor, dirname, main_program, - mode) - - def _ps_inference_save_inference_model(self, - executor, - dirname, - feeded_var_names, - target_vars, - main_program=None, - export_for_deployment=True): + self._save_distributed_persistables( + executor, dirname, main_program, mode + ) + + def _ps_inference_save_inference_model( + self, + executor, + dirname, + feeded_var_names, + target_vars, + main_program=None, + export_for_deployment=True, + ): """ Prune the given `main_program` to build a new program especially for inference, and then save it and all related parameters to given `dirname` by the `executor`. @@ -664,14 +745,28 @@ class ParameterServerRuntime(RuntimeBase): raise TypeError( "in fleet.save_inference_model() function, main_program must be as Program type, CompiledProgram is not allowed" ) - fluid.io.save_inference_model(dirname, feeded_var_names, - target_vars, executor, main_program, - None, None, export_for_deployment) + fluid.io.save_inference_model( + dirname, + feeded_var_names, + target_vars, + executor, + main_program, + None, + None, + export_for_deployment, + ) else: - fluid.io.save_inference_model(dirname, feeded_var_names, - target_vars, executor, - self.origin_main_program, None, None, - export_for_deployment, True) + fluid.io.save_inference_model( + dirname, + feeded_var_names, + target_vars, + executor, + self.origin_main_program, + None, + None, + export_for_deployment, + True, + ) model_basename = "__model__" model_filename = os.path.join(dirname, model_basename) @@ -681,10 +776,9 @@ class ParameterServerRuntime(RuntimeBase): program = Program.parse_from_string(program_desc_str) program._copy_dist_param_info_from(fluid.default_main_program()) - self._ps_inference_save_persistables(executor, - dirname, - program, - mode=0) + self._ps_inference_save_persistables( + executor, dirname, program, mode=0 + ) def _save_inference_model(self, *args, **kwargs): self._ps_inference_save_inference_model(*args, **kwargs) diff --git a/python/paddle/distributed/fleet/runtime/runtime_base.py b/python/paddle/distributed/fleet/runtime/runtime_base.py index 38bb31ce3fc1d66e66e0200c2c4c6488592cb935..2e8bacfbc3b1ded58e63e8d9e93764a0c0090b91 100644 --- a/python/paddle/distributed/fleet/runtime/runtime_base.py +++ b/python/paddle/distributed/fleet/runtime/runtime_base.py @@ -16,7 +16,6 @@ __all__ = [] class RuntimeBase(object): - def __init__(self): pass diff --git a/python/paddle/distributed/fleet/runtime/the_one_ps.py b/python/paddle/distributed/fleet/runtime/the_one_ps.py index e9765c9e2e60676c9dddff50529290bd979a47de..5a0be9a1e018f6c317a2e5dcf2d7eb508427a8cd 100644 --- a/python/paddle/distributed/fleet/runtime/the_one_ps.py +++ b/python/paddle/distributed/fleet/runtime/the_one_ps.py @@ -35,8 +35,12 @@ PSERVER_SAVE_SUFFIX = ".shard" def parse_table_class(varname, o_main_program): - from paddle.fluid.incubate.fleet.parameter_server.ir.public import is_distributed_sparse_op - from paddle.fluid.incubate.fleet.parameter_server.ir.public import is_sparse_op + from paddle.fluid.incubate.fleet.parameter_server.ir.public import ( + is_distributed_sparse_op, + ) + from paddle.fluid.incubate.fleet.parameter_server.ir.public import ( + is_sparse_op, + ) for op in o_main_program.global_block().ops: if not is_distributed_sparse_op(op) and not is_sparse_op(op): @@ -44,7 +48,11 @@ def parse_table_class(varname, o_main_program): param_name = op.input("W")[0] - if param_name == varname and op.type == "lookup_table" or op.type == "lookup_table_v2": + if ( + param_name == varname + and op.type == "lookup_table" + or op.type == "lookup_table_v2" + ): if op.has_attr('table_class') and op.attr("table_class") != "none": return op.attr('table_class') else: @@ -90,7 +98,10 @@ def get_default_accessor_proto(accessor, varname, o_main_program): for sgd_param in [accessor.embed_sgd_param, accessor.embedx_sgd_param]: if not sgd_param.HasField("name"): sgd_param.name = "SparseAdaGradSGDRule" - if sgd_param.name == "SparseAdaGradSGDRule" or sgd_param.name == "StdAdaGradSGDRule": + if ( + sgd_param.name == "SparseAdaGradSGDRule" + or sgd_param.name == "StdAdaGradSGDRule" + ): if not sgd_param.adagrad.HasField("learning_rate"): sgd_param.adagrad.learning_rate = 0.05 if not sgd_param.adagrad.HasField("initial_g2sum"): @@ -130,17 +141,20 @@ def check_embedding_dim(accessor, varname, o_main_program): fea_dim = accessor.fea_dim if fea_dim != embedding_dim: raise ValueError( - "The fea_dim is wrong, it will be sparse_embedding_dim: {}, but got {}" - .format(embedding_dim, fea_dim)) + "The fea_dim is wrong, it will be sparse_embedding_dim: {}, but got {}".format( + embedding_dim, fea_dim + ) + ) embedx_dim = accessor.embedx_dim if embedx_dim != embedding_dim - 3: raise ValueError( - "The embedx_dim is wrong, it will be sparse_embedding_dim - 3: {}, but got {}" - .format(embedding_dim - 3, embedx_dim)) + "The embedx_dim is wrong, it will be sparse_embedding_dim - 3: {}, but got {}".format( + embedding_dim - 3, embedx_dim + ) + ) class Accessor: - def __init__(self): self.accessor_class = "" self.optimizer = None @@ -157,12 +171,12 @@ class Accessor: attrs += "\n" if self.optimizer is not None: attrs += self.optimizer.to_string(indent) - return accessor_str.format(conv_indent(indent), attrs, - conv_indent(indent)) + return accessor_str.format( + conv_indent(indent), attrs, conv_indent(indent) + ) class CommonAccessor: - def __init__(self): self.accessor_class = "" self.table_name = None @@ -183,26 +197,45 @@ class CommonAccessor: def define_optimize_map(self): opt_input_map = {} opt_input_map["sgd"] = [("Param", None), ("LearningRate", 1)] - opt_input_map["adam"] = [("Param", None), ("Moment1", None), - ("Moment2", None), ("Beta1Pow", 1), - ("Beta2Pow", 1), ("LearningRate", 1)] - opt_input_map["adam_d2sum"] = [("Param", None), ("D2Sum", None), - ("G2Sum", None), ("Moment", None), - ("MomentDecayRate", 1), - ("AdaDecayRate", 1), ("AdaEpsilon", 1), - ("LearningRate", 1)] + opt_input_map["adam"] = [ + ("Param", None), + ("Moment1", None), + ("Moment2", None), + ("Beta1Pow", 1), + ("Beta2Pow", 1), + ("LearningRate", 1), + ] + opt_input_map["adam_d2sum"] = [ + ("Param", None), + ("D2Sum", None), + ("G2Sum", None), + ("Moment", None), + ("MomentDecayRate", 1), + ("AdaDecayRate", 1), + ("AdaEpsilon", 1), + ("LearningRate", 1), + ] opt_input_map["sum"] = [("Param", None)] - opt_input_map["naive_adagrad"] = [("Param", None), ("G2Sum", 1), - ("LearningRate", 1)] + opt_input_map["naive_adagrad"] = [ + ("Param", None), + ("G2Sum", 1), + ("LearningRate", 1), + ] opt_attr_map = {} opt_attr_map["sgd"] = [] opt_attr_map["sum"] = [] opt_attr_map["naive_adagrad"] = [] - opt_attr_map["adam"] = [("beta1", "f"), ("beta2", "f"), - ("epsilon", "f")] - opt_attr_map["adam_d2sum"] = [("beta1", "f"), ("beta2", "f"), - ("epsilon", "f")] + opt_attr_map["adam"] = [ + ("beta1", "f"), + ("beta2", "f"), + ("epsilon", "f"), + ] + opt_attr_map["adam_d2sum"] = [ + ("beta1", "f"), + ("beta2", "f"), + ("epsilon", "f"), + ] opt_init_map = {} opt_init_map["gaussian_random"] = ["seed", "mean", "std"] @@ -215,8 +248,12 @@ class CommonAccessor: self.opt_init_map = opt_init_map def parse_entry(self, varname, o_main_program): - from paddle.fluid.incubate.fleet.parameter_server.ir.public import is_distributed_sparse_op - from paddle.fluid.incubate.fleet.parameter_server.ir.public import is_sparse_op + from paddle.fluid.incubate.fleet.parameter_server.ir.public import ( + is_distributed_sparse_op, + ) + from paddle.fluid.incubate.fleet.parameter_server.ir.public import ( + is_sparse_op, + ) for op in o_main_program.global_block().ops: if not is_distributed_sparse_op(op) and not is_sparse_op(op): @@ -250,8 +287,10 @@ class CommonAccessor: origin_var_name = value_name for op in o_startup_program.global_block().ops: - if op.type in self.opt_init_map.keys( - ) and origin_var_name == op.output("Out")[0]: + if ( + op.type in self.opt_init_map.keys() + and origin_var_name == op.output("Out")[0] + ): init_attr = [op.type] for attr in self.opt_init_map[op.type]: init_attr.append(str(op.attr(attr))) @@ -259,9 +298,19 @@ class CommonAccessor: break return attr_str - def parse_by_optimizer(self, grad_name, is_sparse, size, single_dim, - compiled_strategy, adam_d2sum): - from paddle.fluid.incubate.fleet.parameter_server.ir.public import _get_optimize_ops + def parse_by_optimizer( + self, + grad_name, + is_sparse, + size, + single_dim, + compiled_strategy, + adam_d2sum, + ): + from paddle.fluid.incubate.fleet.parameter_server.ir.public import ( + _get_optimize_ops, + ) + param_name = compiled_strategy.grad_name_to_param_name[grad_name] main_program, startup_program = compiled_strategy.get_origin_programs() pserver_id = compiled_strategy.get_role_id() @@ -270,8 +319,9 @@ class CommonAccessor: oop = None for op in optimizer_ops: - if ("Param" in op.input_names) and (op.input("Param")[0] - == param_name): + if ("Param" in op.input_names) and ( + op.input("Param")[0] == param_name + ): oop = op break @@ -311,7 +361,7 @@ class CommonAccessor: for (formal_name, shape) in param_varnames: params.append(formal_name) if self.accessor_class == "adam_d2sum": - #for dims + # for dims if shape is None: if is_sparse: shape = single_dim @@ -319,18 +369,24 @@ class CommonAccessor: shape = self.get_shard(size, pserver_num, pserver_id) dims.append(shape) - #for initializers + # for initializers if formal_name == "Param" or formal_name == "LearningRate": - param = main_program.global_block().vars[oop.input( - formal_name)[0]] - #TODO: for dense learning_rate, can be different from sparse lr - if formal_name == "LearningRate" and param.name != "learning_rate_0": + param = main_program.global_block().vars[ + oop.input(formal_name)[0] + ] + # TODO: for dense learning_rate, can be different from sparse lr + if ( + formal_name == "LearningRate" + and param.name != "learning_rate_0" + ): warnings.warn("will support decay soon") - param = main_program.global_block( - ).vars["learning_rate_0"] + param = main_program.global_block().vars[ + "learning_rate_0" + ] initializer = self.get_initializer_attr( - param.name, startup_program) + param.name, startup_program + ) elif formal_name == "MomentDecayRate": initializer = "fill_constant&0.99" elif formal_name == "AdaDecayRate": @@ -346,23 +402,30 @@ class CommonAccessor: initializer = "fill_constant&0" initializers.append(initializer) else: - param = main_program.global_block().vars[oop.input( - formal_name)[0]] - if formal_name == "LearningRate" and param.name != "learning_rate_0": + param = main_program.global_block().vars[ + oop.input(formal_name)[0] + ] + if ( + formal_name == "LearningRate" + and param.name != "learning_rate_0" + ): warnings.warn("will support decay soon") - param = main_program.global_block( - ).vars["learning_rate_0"] + param = main_program.global_block().vars[ + "learning_rate_0" + ] if shape is None: if is_sparse: shape = single_dim else: - shape = self.get_shard(size, pserver_num, - pserver_id) + shape = self.get_shard( + size, pserver_num, pserver_id + ) dims.append(shape) initializer = self.get_initializer_attr( - param.name, startup_program) + param.name, startup_program + ) initializers.append(initializer) for (attr_varname, type_) in attr_varnames: @@ -401,12 +464,12 @@ class CommonAccessor: attrs += "initializers: \"{}\" ".format(initializer) attrs += "\n" - return accessor_str.format(conv_indent(indent), attrs, - conv_indent(indent)) + return accessor_str.format( + conv_indent(indent), attrs, conv_indent(indent) + ) class Tensor: - def __init__(self): self.main_program_id = None self.startup_program_id = None @@ -422,14 +485,15 @@ class Tensor: attrs += "startup_program_id: {} ".format(str(self.startup_program_id)) attrs += "main_program_id: {} ".format(str(self.main_program_id)) attrs += "tensor_table_class: \"{}\" ".format( - str(self.tensor_table_class)) + str(self.tensor_table_class) + ) attrs += "\n" - return program_str.format(conv_indent(indent), attrs, - conv_indent(indent)) + return program_str.format( + conv_indent(indent), attrs, conv_indent(indent) + ) class Table: - def __init__(self): self.id = -1 self.table_class = None @@ -458,9 +522,9 @@ class Table: if self.accessor_proto is not None: accessor_str = "{}accessor {{{}\n{}}}" - accessor_str = accessor_str.format(conv_indent(indent), - self.accessor_proto, - conv_indent(indent)) + accessor_str = accessor_str.format( + conv_indent(indent), self.accessor_proto, conv_indent(indent) + ) attrs += accessor_str + "\n" elif self.accessor is not None: attrs += self.accessor.to_string(indent) @@ -478,7 +542,6 @@ class Table: class Service: - def __init__(self): self.server_class = "BrpcPsServer" self.client_class = "BrpcPsClient" @@ -496,12 +559,12 @@ class Service: attrs += "start_server_port: {} ".format(self.start_server_port) attrs += "server_thread_num: {} ".format(self.server_thread_num) - return service_str.format(conv_indent(indent), attrs, - conv_indent(indent)) + return service_str.format( + conv_indent(indent), attrs, conv_indent(indent) + ) class DownpourServer: - def __init__(self): self.service = None self.tables = [] @@ -526,12 +589,12 @@ class DownpourServer: for table in self.tables: table_strs += "\n" table_strs += table.to_string(indent) - return server_str.format(conv_indent(indent), table_strs, - conv_indent(indent)) + return server_str.format( + conv_indent(indent), table_strs, conv_indent(indent) + ) class Server: - def __init__(self): self.servers = [] @@ -552,7 +615,6 @@ class Server: class DownpourWorker: - def __init__(self): self.tables = [] @@ -569,12 +631,12 @@ class DownpourWorker: table_strs += "\n" table_strs += table.to_string(indent) - return worker_str.format(conv_indent(indent), table_strs, - conv_indent(indent)) + return worker_str.format( + conv_indent(indent), table_strs, conv_indent(indent) + ) class Worker: - def __init__(self): self.workers = [] @@ -595,7 +657,6 @@ class Worker: class fsClient: - def __init__(self, proto): self.proto = proto self.uri = proto.uri @@ -605,6 +666,7 @@ class fsClient: def to_string(self): from google.protobuf import text_format + proto_txt = text_format.MessageToString(self.proto) if proto_txt: fs_str = "fs_client_param {{\n{}}}" @@ -614,7 +676,6 @@ class fsClient: class TheOnePSRuntime(RuntimeBase): - def __init__(self): super(TheOnePSRuntime, self).__init__() self._communicator = None @@ -634,8 +695,9 @@ class TheOnePSRuntime(RuntimeBase): def _get_distributed_strategy(self): strategy = None - from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import \ - StrategyFactory + from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import ( + StrategyFactory, + ) dist_strategy = self.context["valid_strategy"] k_steps = dist_strategy.a_sync_configs["k_steps"] @@ -657,19 +719,24 @@ class TheOnePSRuntime(RuntimeBase): return strategy def build_compiled_startegy(self): - from paddle.fluid.incubate.fleet.parameter_server.ir.public import CompileTimeStrategy - - compiled_config = CompileTimeStrategy(self.origin_main_program, - self.origin_main_program, - self.async_strategy, - self.role_maker) + from paddle.fluid.incubate.fleet.parameter_server.ir.public import ( + CompileTimeStrategy, + ) + + compiled_config = CompileTimeStrategy( + self.origin_main_program, + self.origin_main_program, + self.async_strategy, + self.role_maker, + ) if self.async_strategy.use_ps_gpu: compiled_config.use_ps_gpu = True return compiled_config def _init_worker(self): - from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import \ - SyncStrategy + from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import ( + SyncStrategy, + ) is_sync = self.compiled_strategy.is_sync_mode() worker = self._get_fleet_proto(is_server=False, is_sync=is_sync) @@ -690,7 +757,8 @@ class TheOnePSRuntime(RuntimeBase): def sync_strategy_envs(): kwargs = {} kwargs[ - "pserver_endpoints"] = self.role_maker._get_pserver_endpoints() + "pserver_endpoints" + ] = self.role_maker._get_pserver_endpoints() kwargs["trainer_id"] = self.role_maker._worker_index() return kwargs @@ -712,11 +780,13 @@ class TheOnePSRuntime(RuntimeBase): string_hosts.append(pshost.serialize_to_string()) dense_map = self.compiled_strategy.get_the_one_recv_context( - split_dense_table=self.role_maker._is_heter_parameter_server_mode) + split_dense_table=self.role_maker._is_heter_parameter_server_mode + ) send_ctx = self.compiled_strategy.get_the_one_send_context( split_dense_table=self.role_maker._is_heter_parameter_server_mode, use_origin_program=self.role_maker._is_heter_parameter_server_mode, - ep_list=endpoints) + ep_list=endpoints, + ) trainer_config = self.async_strategy.get_trainer_runtime_config() debug = bool(int(os.getenv("PSERVER_DEBUG", "0"))) @@ -732,7 +802,7 @@ class TheOnePSRuntime(RuntimeBase): kwargs['need_global_step'] = "0" kwargs["trainer_id"] = self.role_maker._role_id() kwargs["trainers"] = self.role_maker._worker_num() - #if self.role_maker._is_heter_worker(): + # if self.role_maker._is_heter_worker(): # kwargs["trainer_id"] += kwargs["trainers"] for table in server.servers[0].tables: @@ -745,13 +815,16 @@ class TheOnePSRuntime(RuntimeBase): kwargs.update(sync_kwargs) from paddle.fluid.communicator import Communicator, HeterClient + self._communicator = Communicator( - trainer_config.mode, kwargs, - trainer_config.get_communicator_flags()) - self._communicator.init_with_ctx(send_ctx, dense_map, proto_txt, - string_hosts, fluid.global_scope()) + trainer_config.mode, kwargs, trainer_config.get_communicator_flags() + ) + self._communicator.init_with_ctx( + send_ctx, dense_map, proto_txt, string_hosts, fluid.global_scope() + ) import paddle.distributed.fleet as fleet + fleet.util.barrier() info = self._communicator.get_client_info() if isinstance(info, list) and len(info) > 0: @@ -774,11 +847,14 @@ class TheOnePSRuntime(RuntimeBase): is_test = bool(int(os.getenv("TEST_MODE", "0"))) - if self.role_maker._is_first_worker( - ) and self.role_maker._is_heter_parameter_server_mode: + if ( + self.role_maker._is_first_worker() + and self.role_maker._is_heter_parameter_server_mode + ): # for ps-heter mode load all parameters on first_worker init_params = self.compiled_strategy.get_the_one_recv_context( - split_dense_table=True, use_origin_program=True) + split_dense_table=True, use_origin_program=True + ) else: init_params = dense_map @@ -798,8 +874,10 @@ class TheOnePSRuntime(RuntimeBase): if launch_barrier and launch_barrier_flag: # for trainer wait server ready wait_server_ready(self.role_maker._get_pserver_endpoints()) - if self.role_maker._is_heter_parameter_server_mode and self.role_maker._get_next_trainers( - ) != []: + if ( + self.role_maker._is_heter_parameter_server_mode + and self.role_maker._get_next_trainers() != [] + ): wait_server_ready(self.role_maker._get_next_trainers()) if self.role_maker._is_heter_parameter_server_mode: previous_trainers = [] @@ -808,14 +886,13 @@ class TheOnePSRuntime(RuntimeBase): next_trainers = [] if self.role_maker._get_next_trainers() != []: next_trainers = self.role_maker._get_next_trainers() - self._heter_client = HeterClient(next_trainers, - previous_trainers, - self.role_maker._role_id()) - - def _push_sparse_param(self, - var_name, - table_id=-1, - scope=fluid.global_scope()): + self._heter_client = HeterClient( + next_trainers, previous_trainers, self.role_maker._role_id() + ) + + def _push_sparse_param( + self, var_name, table_id=-1, scope=fluid.global_scope() + ): self._communicator.push_sparse_param(var_name, table_id, scope) def _get_executor(self): @@ -825,20 +902,23 @@ class TheOnePSRuntime(RuntimeBase): heter_device_type = self.role_maker._heter_device_type().upper() if heter_device_type not in ["GPU", "XPU", "CPU"]: raise ValueError( - "Heter Worker Not Support Device {}".format( - device_type)) + "Heter Worker Not Support Device {}".format(device_type) + ) if heter_device_type == "GPU": executor = Executor( fluid.CUDAPlace( - int(os.getenv("FLAGS_selected_gpus", "0")))) + int(os.getenv("FLAGS_selected_gpus", "0")) + ) + ) elif heter_device_type == "XPU": executor = Executor( fluid.XPUPlace( - int(os.getenv("FLAGS_selected_xpus", "0")))) + int(os.getenv("FLAGS_selected_xpus", "0")) + ) + ) return executor def _get_fleet_proto(self, is_server, is_sync, **kwargs): - def _build_merge_accessor(ctx): accessor = Accessor() accessor.accessor_class = "CommMergeAccessor" @@ -872,7 +952,8 @@ class TheOnePSRuntime(RuntimeBase): trainer_num = self.compiled_strategy.get_trainers() if self.role_maker._is_heter_parameter_server_mode: trainer_num += len( - self.role_maker._get_heter_worker_endpoints()) + self.role_maker._get_heter_worker_endpoints() + ) common.trainer_num = trainer_num common.attrs = "" common.dims = [] @@ -918,27 +999,32 @@ class TheOnePSRuntime(RuntimeBase): for table_name in tensor_table_dict: if tensor_table_dict[table_name]["startup_program"] != None: tensor_table_dict[table_name][ - "startup_program_id"] = program_idx + "startup_program_id" + ] = program_idx self._server_sub_program.append( - tensor_table_dict[table_name]["startup_program"].desc) + tensor_table_dict[table_name]["startup_program"].desc + ) program_idx += 1 if tensor_table_dict[table_name]["main_program"] != None: tensor_table_dict[table_name][ - "main_program_id"] = program_idx + "main_program_id" + ] = program_idx self._server_sub_program.append( - tensor_table_dict[table_name]["main_program"].desc) + tensor_table_dict[table_name]["main_program"].desc + ) program_idx += 1 # Todo: Hard code for lr_decay table apply table id - new_table = _build_tensor_table(len(tables), - tensor_table_dict[table_name]) + new_table = _build_tensor_table( + len(tables), tensor_table_dict[table_name] + ) tables.append(new_table) return tables def _get_tables(): send_ctx = self.compiled_strategy.get_the_one_send_context( use_origin_program=True, - split_dense_table=self.role_maker. - _is_heter_parameter_server_mode) + split_dense_table=self.role_maker._is_heter_parameter_server_mode, + ) tables = [] for idx, (name, ctx) in enumerate(send_ctx.items()): @@ -953,14 +1039,18 @@ class TheOnePSRuntime(RuntimeBase): table.type = "PS_SPARSE_TABLE" table.shard_num = 256 - common.table_name = self.compiled_strategy.grad_name_to_param_name[ - ctx.origin_varnames()[0]] + common.table_name = ( + self.compiled_strategy.grad_name_to_param_name[ + ctx.origin_varnames()[0] + ] + ) if self.compiled_strategy.is_geo_mode(): table.table_class = "MemorySparseGeoTable" else: all_table_proto = self.context[ - "user_defined_strategy"].sparse_table_configs + "user_defined_strategy" + ].sparse_table_configs table_proto = all_table_proto.add() for proto in all_table_proto: if proto.table_name == common.table_name: @@ -970,11 +1060,13 @@ class TheOnePSRuntime(RuntimeBase): table.table_class = table_proto.table_class else: table.table_class = parse_table_class( - common.table_name, self.origin_main_program) + common.table_name, self.origin_main_program + ) if table.table_class != 'MemorySparseTable': table.table_class = 'MemorySparseTable' warnings.warn( - "The PS mode must use MemorySparseTable.") + "The PS mode must use MemorySparseTable." + ) if table_proto.HasField("shard_num"): table.shard_num = table_proto.shard_num @@ -988,15 +1080,21 @@ class TheOnePSRuntime(RuntimeBase): warnings.warn( "The accessor of sparse table is not set, use default value." ) - get_default_accessor_proto(table_proto.accessor, - common.table_name, - self.origin_main_program) - check_embedding_dim(table_proto.accessor, - common.table_name, - self.origin_main_program) + get_default_accessor_proto( + table_proto.accessor, + common.table_name, + self.origin_main_program, + ) + check_embedding_dim( + table_proto.accessor, + common.table_name, + self.origin_main_program, + ) from google.protobuf import text_format + table.accessor_proto = text_format.MessageToString( - table_proto.accessor) + table_proto.accessor + ) else: table.type = "PS_DENSE_TABLE" table.table_class = "MemoryDenseTable" @@ -1005,14 +1103,18 @@ class TheOnePSRuntime(RuntimeBase): adam_d2sum = self.context["user_defined_strategy"].adam_d2sum common.parse_by_optimizer( - ctx.origin_varnames()[0], ctx.is_sparse(), + ctx.origin_varnames()[0], + ctx.is_sparse(), ctx.sections()[0], ctx.sections()[1] if ctx.is_sparse() else 1, - self.compiled_strategy, adam_d2sum) + self.compiled_strategy, + adam_d2sum, + ) if ctx.is_sparse(): - common.parse_entry(common.table_name, - self.origin_main_program) + common.parse_entry( + common.table_name, self.origin_main_program + ) if is_sync: common.sync = "true" @@ -1071,7 +1173,8 @@ class TheOnePSRuntime(RuntimeBase): server = self._get_fleet_proto(is_server=True, is_sync=is_sync) proto_txt = str(server) fs_client = fsClient( - self.context["user_defined_strategy"].fs_client_param) + self.context["user_defined_strategy"].fs_client_param + ) proto_txt = proto_txt + "\n" + fs_client.to_string() debug = bool(int(os.getenv("PSERVER_DEBUG", "0"))) @@ -1085,10 +1188,13 @@ class TheOnePSRuntime(RuntimeBase): string_hosts.append(pshost.serialize_to_string()) self._server = fluid.core.DistFleetWrapper() - self._server.init_server(proto_txt, string_hosts, role_id, trainers, - self._server_sub_program) + self._server.init_server( + proto_txt, string_hosts, role_id, trainers, self._server_sub_program + ) - from paddle.fluid.incubate.fleet.parameter_server.ir.public import get_sparse_tablenames + from paddle.fluid.incubate.fleet.parameter_server.ir.public import ( + get_sparse_tablenames, + ) dist_varnames = get_sparse_tablenames(self.origin_main_program, True) sparse_varnames = get_sparse_tablenames(self.origin_main_program, False) @@ -1101,8 +1207,10 @@ class TheOnePSRuntime(RuntimeBase): for var_name in var_names: if var_name not in distributed_varnames: raise ValueError( - "fleet.init server can only load sparse variables in {}" - .format(distributed_varnames)) + "fleet.init server can only load sparse variables in {}".format( + distributed_varnames + ) + ) load_varnames = var_names if dirname is None or not load_varnames: @@ -1132,19 +1240,22 @@ class TheOnePSRuntime(RuntimeBase): def _stop_worker(self): self._communicator.stop() if self.role_maker._is_heter_parameter_server_mode: - assert self._heter_client != None, "heter client should not be None in heterps mode" + assert ( + self._heter_client != None + ), "heter client should not be None in heterps mode" self._heter_client.stop() - #executor = self._get_executor() - #executor.close() + # executor = self._get_executor() + # executor.close() @staticmethod def __exclude_vars(exclude_var_names=[]): - def is_valid(var): if var.name in exclude_var_names: return False - from paddle.fluid.incubate.fleet.parameter_server.ir.public import _get_varname_parts + from paddle.fluid.incubate.fleet.parameter_server.ir.public import ( + _get_varname_parts, + ) origin_varname, _, _ = _get_varname_parts(var.name) if origin_varname.endswith("@GRAD"): @@ -1153,9 +1264,11 @@ class TheOnePSRuntime(RuntimeBase): if origin_varname == "learning_rate_0": return False - if var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH or \ - var.desc.type() == core.VarDesc.VarType.FETCH_LIST or \ - var.desc.type() == core.VarDesc.VarType.READER: + if ( + var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH + or var.desc.type() == core.VarDesc.VarType.FETCH_LIST + or var.desc.type() == core.VarDesc.VarType.READER + ): return False return var.persistable @@ -1168,11 +1281,16 @@ class TheOnePSRuntime(RuntimeBase): model_path = os.path.join(dirname, "dnn_plugin") return model_path - def _save_sparse_params(self, executor, dirname, context, main_program, - mode): - from paddle.fluid.incubate.fleet.parameter_server.ir.public import get_sparse_tablenames + def _save_sparse_params( + self, executor, dirname, context, main_program, mode + ): + from paddle.fluid.incubate.fleet.parameter_server.ir.public import ( + get_sparse_tablenames, + ) + distributed_varnames = get_sparse_tablenames( - self.compiled_strategy.origin_main_program, True) + self.compiled_strategy.origin_main_program, True + ) values = [] model_path = self._get_inference_model_path(dirname) for id, names in context.items(): @@ -1188,23 +1306,24 @@ class TheOnePSRuntime(RuntimeBase): # self._worker.save_all_model(dirname, mode) return values - def _save_distributed_persistables(self, - executor, - dirname, - main_program, - mode=0): + def _save_distributed_persistables( + self, executor, dirname, main_program, mode=0 + ): denses = self.compiled_strategy.get_the_one_recv_context( is_dense=True, split_dense_table=self.role_maker._is_heter_parameter_server_mode, - use_origin_program=True) + use_origin_program=True, + ) sparses = self.compiled_strategy.get_the_one_recv_context( is_dense=False, split_dense_table=self.role_maker._is_heter_parameter_server_mode, - use_origin_program=True) + use_origin_program=True, + ) - sparse_varnames = self._save_sparse_params(executor, dirname, sparses, - main_program, mode) + sparse_varnames = self._save_sparse_params( + executor, dirname, sparses, main_program, mode + ) recv_dense_varnames = [] for id, names in denses.items(): @@ -1214,24 +1333,25 @@ class TheOnePSRuntime(RuntimeBase): saved_varnames = sparse_varnames remaining_vars = list( - filter(TheOnePSRuntime.__exclude_vars(saved_varnames), - main_program.list_vars())) + filter( + TheOnePSRuntime.__exclude_vars(saved_varnames), + main_program.list_vars(), + ) + ) import paddle + for var in remaining_vars: # if var.name not in recv_dense_varnames: # continue tensor = var.get_value() - paddle.save(tensor, - os.path.join(dirname, var.name), - use_binary_format=True) - - def _ps_inference_save_persistables(self, - executor, - dirname, - main_program=None, - mode=0, - **kwargs): + paddle.save( + tensor, os.path.join(dirname, var.name), use_binary_format=True + ) + + def _ps_inference_save_persistables( + self, executor, dirname, main_program=None, mode=0, **kwargs + ): """ This function filters out all variables with `persistable==True` from the give `main_program` and then saves these variables to the folder `dirname` @@ -1250,7 +1370,8 @@ class TheOnePSRuntime(RuntimeBase): if not isinstance(executor, Executor): raise TypeError( - "in fleet.save() function, executor must be as Executor type") + "in fleet.save() function, executor must be as Executor type" + ) if main_program is None: main_program = self.compiled_strategy.get_origin_ps_main_program() @@ -1265,14 +1386,16 @@ class TheOnePSRuntime(RuntimeBase): # mode) self._worker.save_all_model(dirname, mode) - def _ps_inference_save_inference_model(self, - executor, - dirname, - feeded_var_names, - target_vars, - main_program=None, - export_for_deployment=True, - mode=0): + def _ps_inference_save_inference_model( + self, + executor, + dirname, + feeded_var_names, + target_vars, + main_program=None, + export_for_deployment=True, + mode=0, + ): """ Prune the given `main_program` to build a new program especially for inference, and then save it and all related parameters to given `dirname` by the `executor`. @@ -1285,10 +1408,14 @@ class TheOnePSRuntime(RuntimeBase): if not isinstance(executor, Executor): raise TypeError( - "in fleet.save() function, executor must be as Executor type") + "in fleet.save() function, executor must be as Executor type" + ) import paddle - program = self.origin_main_program if main_program is None else main_program + + program = ( + self.origin_main_program if main_program is None else main_program + ) if isinstance(program, CompiledProgram): raise TypeError( @@ -1299,8 +1426,9 @@ class TheOnePSRuntime(RuntimeBase): program.global_block().var(name) for name in feeded_var_names ] - infer_program = paddle.static.normalize_program(program, feed_vars, - target_vars) + infer_program = paddle.static.normalize_program( + program, feed_vars, target_vars + ) infer_program._copy_dist_param_info_from(program) @@ -1312,29 +1440,38 @@ class TheOnePSRuntime(RuntimeBase): sparses = self.compiled_strategy.get_the_one_recv_context( is_dense=False, split_dense_table=self.role_maker._is_heter_parameter_server_mode, - use_origin_program=True) - sparse_names = self._save_sparse_params(executor, dirname, sparses, - main_program, mode) + use_origin_program=True, + ) + sparse_names = self._save_sparse_params( + executor, dirname, sparses, main_program, mode + ) denses = self.compiled_strategy.get_the_one_recv_context( is_dense=True, split_dense_table=self.role_maker._is_heter_parameter_server_mode, - use_origin_program=True) + use_origin_program=True, + ) # TODO(zhaocaibei123): for GEO: should call GeoCommunicator::RecvDense self._communicator.pull_dense(denses) generate_vars = self.context[ - "user_defined_strategy"].trainer_desc_configs["stat_var_names"] + "user_defined_strategy" + ].trainer_desc_configs["stat_var_names"] generate_vars = [var for var in generate_vars] remaining_vars = list( - filter(TheOnePSRuntime.__exclude_vars(sparse_names), - infer_program.list_vars())) + filter( + TheOnePSRuntime.__exclude_vars(sparse_names), + infer_program.list_vars(), + ) + ) for var in remaining_vars: tensor = var.get_value() - paddle.save(tensor, - os.path.join(model_path, var.name), - use_binary_format=True) + paddle.save( + tensor, + os.path.join(model_path, var.name), + use_binary_format=True, + ) def _save_inference_model(self, *args, **kwargs): self._ps_inference_save_inference_model(*args, **kwargs) @@ -1343,9 +1480,13 @@ class TheOnePSRuntime(RuntimeBase): self._ps_inference_save_persistables(*args, **kwargs) def _load_sparse_params(self, dirname, context, main_program, mode): - from paddle.fluid.incubate.fleet.parameter_server.ir.public import get_sparse_tablenames + from paddle.fluid.incubate.fleet.parameter_server.ir.public import ( + get_sparse_tablenames, + ) + distributed_varnames = get_sparse_tablenames( - self.compiled_strategy.origin_main_program, True) + self.compiled_strategy.origin_main_program, True + ) values = [] for id, names in context.items(): if names[0] not in distributed_varnames: @@ -1356,10 +1497,9 @@ class TheOnePSRuntime(RuntimeBase): values.extend(names) return values - def _ps_inference_load_inference_model(self, - dirname, - mode=0, - main_program=None): + def _ps_inference_load_inference_model( + self, dirname, mode=0, main_program=None + ): if main_program is None: main_program = self.compiled_strategy.get_origin_ps_main_program() @@ -1371,14 +1511,17 @@ class TheOnePSRuntime(RuntimeBase): denses = self.compiled_strategy.get_the_one_recv_context( is_dense=True, split_dense_table=self.role_maker._is_heter_parameter_server_mode, - use_origin_program=True) + use_origin_program=True, + ) sparses = self.compiled_strategy.get_the_one_recv_context( is_dense=False, split_dense_table=self.role_maker._is_heter_parameter_server_mode, - use_origin_program=True) + use_origin_program=True, + ) - sparse_varnames = self._load_sparse_params(dirname, sparses, - main_program, mode) + sparse_varnames = self._load_sparse_params( + dirname, sparses, main_program, mode + ) recv_dense_varnames = [] for id, names in denses.items(): @@ -1387,14 +1530,18 @@ class TheOnePSRuntime(RuntimeBase): loaded_varnames = sparse_varnames remaining_vars = list( - filter(TheOnePSRuntime.__exclude_vars(loaded_varnames), - main_program.list_vars())) + filter( + TheOnePSRuntime.__exclude_vars(loaded_varnames), + main_program.list_vars(), + ) + ) if dirname.startswith("afs:") or dirname.startswith("hdfs:"): model_path = "./dnn_plugin" else: model_path = os.path.join(dirname, "dnn_plugin") import paddle + for var in remaining_vars: if var.name not in recv_dense_varnames: continue @@ -1421,13 +1568,14 @@ class TheOnePSRuntime(RuntimeBase): else: threshold = 0 import paddle.distributed.fleet as fleet + fleet.util.barrier() if self.role_maker._is_first_worker(): sparses = self.compiled_strategy.get_the_one_recv_context( is_dense=False, - split_dense_table=self.role_maker. - _is_heter_parameter_server_mode, - use_origin_program=True) + split_dense_table=self.role_maker._is_heter_parameter_server_mode, + use_origin_program=True, + ) for id, names in sparses.items(): self._worker.shrink_sparse_table(id, threshold) diff --git a/python/paddle/distributed/fleet/scaler.py b/python/paddle/distributed/fleet/scaler.py index 583c2819d8df0b2b14ee7b8b81d93d06e22e9aef..60bc82016802c0f183fad59e43662dde7b21960b 100644 --- a/python/paddle/distributed/fleet/scaler.py +++ b/python/paddle/distributed/fleet/scaler.py @@ -23,12 +23,12 @@ from paddle import _legacy_C_ops def distributed_scaler(scaler): - def unscale_method(self, optimizer): if not self._enable: return if getattr(optimizer, '_param_groups', None) and isinstance( - optimizer._param_groups[0], dict): + optimizer._param_groups[0], dict + ): param_grads = [] param_grads_fp16 = [] param_grads_fp32 = [] @@ -36,38 +36,47 @@ def distributed_scaler(scaler): for param in group['params']: if param._grad_ivar() is not None: param_grads.append(param._grad_ivar()) - if param._grad_ivar( - ).dtype == core.VarDesc.VarType.FP16: + if ( + param._grad_ivar().dtype + == core.VarDesc.VarType.FP16 + ): param_grads_fp16.append(param._grad_ivar()) else: param_grads_fp32.append(param._grad_ivar()) else: param_grads = [ - param._grad_ivar() for param in optimizer._parameter_list + param._grad_ivar() + for param in optimizer._parameter_list if param._grad_ivar() is not None ] param_grads_fp16 = [ - param._grad_ivar() for param in optimizer._parameter_list - if (param._grad_ivar() is not None) and ( - param._grad_ivar().dtype == core.VarDesc.VarType.FP16) + param._grad_ivar() + for param in optimizer._parameter_list + if (param._grad_ivar() is not None) + and (param._grad_ivar().dtype == core.VarDesc.VarType.FP16) ] param_grads_fp32 = [ - param._grad_ivar() for param in optimizer._parameter_list - if (param._grad_ivar() is not None) and ( - param._grad_ivar().dtype == core.VarDesc.VarType.FP32) + param._grad_ivar() + for param in optimizer._parameter_list + if (param._grad_ivar() is not None) + and (param._grad_ivar().dtype == core.VarDesc.VarType.FP32) ] temp_found_inf_fp16 = to_variable(np.array([0]).astype(np.bool_)) temp_found_inf_fp32 = to_variable(np.array([0]).astype(np.bool_)) if len(param_grads_fp16): - _legacy_C_ops.check_finite_and_unscale(param_grads_fp16, - self._scale, - param_grads_fp16, - temp_found_inf_fp16) + _legacy_C_ops.check_finite_and_unscale( + param_grads_fp16, + self._scale, + param_grads_fp16, + temp_found_inf_fp16, + ) if len(param_grads_fp32): - _legacy_C_ops.check_finite_and_unscale(param_grads_fp32, - self._scale, - param_grads_fp32, - temp_found_inf_fp32) + _legacy_C_ops.check_finite_and_unscale( + param_grads_fp32, + self._scale, + param_grads_fp32, + temp_found_inf_fp32, + ) self._found_inf = 1 if temp_found_inf_fp16 or temp_found_inf_fp32 else 0 is_found_inf = paddle.to_tensor([self._found_inf], dtype="int32") @@ -75,9 +84,9 @@ def distributed_scaler(scaler): # TODO(shenliang03) Since dp allreduce in the optimizer is # after the gradscaler, check_finite needs to synchronize global # information. In the future, we should use check_group to speed. - paddle.distributed.all_reduce(is_found_inf, - op=paddle.distributed.ReduceOp.MAX, - group=None) + paddle.distributed.all_reduce( + is_found_inf, op=paddle.distributed.ReduceOp.MAX, group=None + ) self._found_inf = is_found_inf.numpy()[0] # Only data_parallel doesn't need to modify scaler diff --git a/python/paddle/distributed/fleet/utils/__init__.py b/python/paddle/distributed/fleet/utils/__init__.py index 93fc890d05af5b6a7e3bcfa51be0e115271e929c..340370e5e55ea301bbeeb5254bb122aee5e15460 100644 --- a/python/paddle/distributed/fleet/utils/__init__.py +++ b/python/paddle/distributed/fleet/utils/__init__.py @@ -22,14 +22,14 @@ import paddle from . import log_util # noqa: F401 from . import hybrid_parallel_util # noqa: F401 -__all__ = [ #noqa - "LocalFS", "recompute", "DistributedInfer", "HDFSClient" -] +__all__ = ["LocalFS", "recompute", "DistributedInfer", "HDFSClient"] # noqa -@deprecated(since="2.4.0", - update_to="paddle.distributed.fleet.recompute", - level=1, - reason="Please use new recompute API(fleet.recompute) ") +@deprecated( + since="2.4.0", + update_to="paddle.distributed.fleet.recompute", + level=1, + reason="Please use new recompute API(fleet.recompute) ", +) def recompute(function, *args, **kwargs): return fleet.recompute.recompute(function, *args, **kwargs) diff --git a/python/paddle/distributed/fleet/utils/fs.py b/python/paddle/distributed/fleet/utils/fs.py index cf0a6475d0e57c1679f36f93d41d10707b468a2b..9daddcdb8568ab5603634747be880ddc79015c69 100644 --- a/python/paddle/distributed/fleet/utils/fs.py +++ b/python/paddle/distributed/fleet/utils/fs.py @@ -47,7 +47,6 @@ class FSShellCmdAborted(ExecuteError): class FS(object): - @abc.abstractmethod def ls_dir(self, fs_path): raise NotImplementedError @@ -171,7 +170,8 @@ class LocalFS(FS): client.delete("test_mkdirs") """ assert not os.path.isfile(fs_path), "{} is already a file".format( - fs_path) + fs_path + ) os.system("mkdir -p {}".format(fs_path)) def rename(self, fs_src_path, fs_dst_path): @@ -379,9 +379,7 @@ class LocalFS(FS): def _handle_errors(max_time_out=None): - def decorator(f): - @functools.wraps(f) def handler(*args, **kwargs): o = args[0] @@ -400,16 +398,20 @@ def _handle_errors(max_time_out=None): # important: only ExecuteError need to retry except ExecuteError as e: if time.time() - start >= time_out: - raise FSTimeOut("args:{} timeout:{}".format( - args, - time.time() - start)) + raise FSTimeOut( + "args:{} timeout:{}".format( + args, time.time() - start + ) + ) time.sleep(inter) if time.time() - last_print_time > 30: - print("hadoop operator timeout:args:{} timeout:{}".format( - args, - time.time() - start)) + print( + "hadoop operator timeout:args:{} timeout:{}".format( + args, time.time() - start + ) + ) last_print_time = time.time() return handler @@ -443,11 +445,12 @@ class HDFSClient(FS): """ def __init__( - self, - hadoop_home, - configs, - time_out=5 * 60 * 1000, # ms - sleep_inter=1000): # ms + self, + hadoop_home, + configs, + time_out=5 * 60 * 1000, # ms + sleep_inter=1000, + ): # ms self.pre_commands = [] hadoop_bin = '%s/bin/hadoop' % hadoop_home self.pre_commands.append(hadoop_bin) @@ -463,7 +466,8 @@ class HDFSClient(FS): self._sleep_inter = sleep_inter self._base_cmd = " ".join(self.pre_commands) self._bd_err_re = re.compile( - r'\s?responseErrorMsg\s?\:.*, errorCode\:\s?[0-9]+, path\:') + r'\s?responseErrorMsg\s?\:.*, errorCode\:\s?[0-9]+, path\:' + ) def _run_cmd(self, cmd, redirect_stderr=False, retry_times=5): exe_cmd = "{} -{}".format(self._base_cmd, cmd) @@ -771,8 +775,9 @@ class HDFSClient(FS): procs = [] for i in range(multi_processes): process_datas = self._split_files(all_files, i, multi_processes) - p = multiprocessing.Process(target=__subprocess_upload, - args=(fs_path, process_datas)) + p = multiprocessing.Process( + target=__subprocess_upload, args=(fs_path, process_datas) + ) procs.append(p) p.start() @@ -841,8 +846,9 @@ class HDFSClient(FS): procs = [] for i in range(multi_processes): process_datas = self._split_files(all_files, i, multi_processes) - p = multiprocessing.Process(target=__subprocess_download, - args=(local_path, process_datas)) + p = multiprocessing.Process( + target=__subprocess_download, args=(local_path, process_datas) + ) procs.append(p) p.start() @@ -938,7 +944,8 @@ class HDFSClient(FS): if test_exists: if not self.is_exist(fs_src_path): raise FSFileNotExistsError( - "{} is not exists".format(fs_src_path)) + "{} is not exists".format(fs_src_path) + ) if self.is_exist(fs_dst_path): raise FSFileExistsError("{} exists already".format(fs_dst_path)) @@ -954,8 +961,7 @@ class HDFSClient(FS): if ret != 0: raise ExecuteError(cmd) except Exception as e: - if not self.is_exist(fs_src_path) and \ - self.is_exist(fs_dst_path): + if not self.is_exist(fs_src_path) and self.is_exist(fs_dst_path): return raise e @@ -1103,7 +1109,7 @@ class HDFSClient(FS): trainer_files = [[]] * trainers begin = 0 for i in range(trainers): - trainer_files[i] = files[begin:begin + blocks[i]] + trainer_files[i] = files[begin : begin + blocks[i]] begin += blocks[i] return trainer_files[trainer_id] @@ -1121,13 +1127,15 @@ class HDFSClient(FS): file_list = [] - #concat filelist can speed up 'hadoop ls' + # concat filelist can speed up 'hadoop ls' str_concat = "" for path in path_list: str_concat += path + " " - cmd = "ls " + str_concat + " | awk '{if ($8 != \"\") {print $5\" \"$8 }}'" + cmd = ( + "ls " + str_concat + " | awk '{if ($8 != \"\") {print $5\" \"$8 }}'" + ) ret, lines = self._run_cmd(cmd) - if (len(lines) == 0): + if len(lines) == 0: logger.warning("list_files empty, path[%s]" % path_list) return [] for line in lines: @@ -1155,10 +1163,7 @@ class AFSClient(FS): client.ls_dir("hdfs:/test_hdfs_client") """ - def __init__( - self, - time_out=5 * 60 * 1000, # ms - sleep_inter=1000): # ms + def __init__(self, time_out=5 * 60 * 1000, sleep_inter=1000): # ms # ms self._fs = core.AfsWrapper() self._time_out = time_out @@ -1392,8 +1397,9 @@ class AFSClient(FS): procs = [] for i in range(multi_processes): process_datas = self._split_files(all_files, i, multi_processes) - p = multiprocessing.Process(target=__subprocess_download, - args=(local_path, process_datas)) + p = multiprocessing.Process( + target=__subprocess_download, args=(local_path, process_datas) + ) procs.append(p) p.start() @@ -1448,7 +1454,8 @@ class AFSClient(FS): if test_exists: if not self.is_exist(fs_src_path): raise FSFileNotExistsError( - "{} is not exists".format(fs_src_path)) + "{} is not exists".format(fs_src_path) + ) if self.is_exist(fs_dst_path): raise FSFileExistsError("{} exists already".format(fs_dst_path)) @@ -1552,7 +1559,7 @@ class AFSClient(FS): trainer_files = [[]] * trainers begin = 0 for i in range(trainers): - trainer_files[i] = files[begin:begin + blocks[i]] + trainer_files[i] = files[begin : begin + blocks[i]] begin += blocks[i] return trainer_files[trainer_id] diff --git a/python/paddle/distributed/fleet/utils/http_server.py b/python/paddle/distributed/fleet/utils/http_server.py index a1251c46f3c897afd4cd15cceb7ef21e347c7271..1dd78136d98093c7ee85f0a63d5d9347723d9442 100644 --- a/python/paddle/distributed/fleet/utils/http_server.py +++ b/python/paddle/distributed/fleet/utils/http_server.py @@ -35,9 +35,9 @@ def get_logger(name, level, fmt): return logger -_http_server_logger = get_logger(__name__, - logging.INFO, - fmt='%(asctime)s-%(levelname)s: %(message)s') +_http_server_logger = get_logger( + __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s' +) class KVHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): @@ -167,7 +167,8 @@ class KVServer: start server until user calls stop to let it quit. """ self.listen_thread = threading.Thread( - target=lambda: self.http_server.serve_forever()) + target=lambda: self.http_server.serve_forever() + ) self.listen_thread.start() def stop(self): diff --git a/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py b/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py index fd81c5b11f4a02929d2729fd6306355a6b3ac243..0c5bff02ed820d83957a8c89176afbb4cc554311 100644 --- a/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py +++ b/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py @@ -181,15 +181,17 @@ class HybridParallelInferenceHelper(object): print(res) """ - def __init__(self, - startup_program, - main_program, - num_mp=1, - num_pp=1, - micro_batch_size=1, - beam_size=1, - init_comm=True, - role_maker=None): + def __init__( + self, + startup_program, + main_program, + num_mp=1, + num_pp=1, + micro_batch_size=1, + beam_size=1, + init_comm=True, + role_maker=None, + ): assert isinstance(startup_program, Program) assert isinstance(main_program, Program) @@ -225,7 +227,8 @@ class HybridParallelInferenceHelper(object): if role_maker is None: self.role_maker = fleet.base.role_maker.PaddleCloudRoleMaker( - is_collective=True) + is_collective=True + ) else: if isinstance(role_maker, fleet.base.role_maker.RoleMakerBase): assert role_maker._is_collective == True @@ -249,7 +252,8 @@ class HybridParallelInferenceHelper(object): self.global_nranks = self.nranks arr = np.arange(0, self.num_pp * self.num_mp).reshape( - [self.num_pp, self.num_mp]) + [self.num_pp, self.num_mp] + ) ipp, imp = np.where(arr == self.rank) ipp = ipp[0] imp = imp[0] @@ -269,31 +273,43 @@ class HybridParallelInferenceHelper(object): num_pp = len(dev_ids) num_pp = max(1, num_pp) assert num_pp == self.num_pp, 'num_pp: {}, self.num_pp: {}'.format( - num_pp, self.num_pp) + num_pp, self.num_pp + ) collective_helper = fleet.meta_optimizers.common.CollectiveHelper( - self.role_maker, wait_port=False) + self.role_maker, wait_port=False + ) # Create global rings - collective_helper._init_communicator(self._startup_program, - self.current_endpoint, - self.global_endpoints, - self.global_rank, - self.global_ring_id, True, - self.global_ring_id, True) + collective_helper._init_communicator( + self._startup_program, + self.current_endpoint, + self.global_endpoints, + self.global_rank, + self.global_ring_id, + True, + self.global_ring_id, + True, + ) # Create mp rings if self.num_mp > 1: mp_endpoints = [self.endpoints[mp_idx] for mp_idx in self.mp_group] mp_rank = [ - idx for idx, mp_idx in enumerate(self.mp_group) + idx + for idx, mp_idx in enumerate(self.mp_group) if mp_idx == self.rank ][0] - collective_helper._init_communicator(self._startup_program, - self.current_endpoint, - mp_endpoints, mp_rank, - self.mp_ring_id, True, - self.global_ring_id, True) + collective_helper._init_communicator( + self._startup_program, + self.current_endpoint, + mp_endpoints, + mp_rank, + self.mp_ring_id, + True, + self.global_ring_id, + True, + ) # Create pipeline rings if self.num_pp > 1: @@ -305,20 +321,32 @@ class HybridParallelInferenceHelper(object): second_node = self.pp_group[pair[1]] if self.rank != first_node and self.rank != second_node: collective_helper._init_communicator( - self._startup_program, None, None, None, None, False, - self.global_ring_id, True) + self._startup_program, + None, + None, + None, + None, + False, + self.global_ring_id, + True, + ) continue pipeline_endpoints = [ - self.endpoints[first_node], self.endpoints[second_node] + self.endpoints[first_node], + self.endpoints[second_node], ] pipeline_rank = 0 if self.rank == first_node else 1 - collective_helper._init_communicator(self._startup_program, - self.current_endpoint, - pipeline_endpoints, - pipeline_rank, ring_id, - False, self.global_ring_id, - True) + collective_helper._init_communicator( + self._startup_program, + self.current_endpoint, + pipeline_endpoints, + pipeline_rank, + ring_id, + False, + self.global_ring_id, + True, + ) def _get_input_output_info(self, block): ''' @@ -345,7 +373,10 @@ class HybridParallelInferenceHelper(object): for each_block in self._main_program.blocks: for op in each_block.ops: for var_name in op.input_arg_names: - if var_name not in params or var_name in self._param_device_map: + if ( + var_name not in params + or var_name in self._param_device_map + ): continue device = op.attr(self._op_device_key) @@ -374,7 +405,8 @@ class HybridParallelInferenceHelper(object): if op.type == "while": sub_block_id = int(op.attr('sub_block').id) sub_used_var_names = self._split_program( - program, stage, sub_block_id) + program, stage, sub_block_id + ) used_var_names.update(sub_used_var_names) @@ -409,25 +441,24 @@ class HybridParallelInferenceHelper(object): return used_var_names - -# def _find_post_op(self, index, var_name): -# """ -# Find the post op that has variable named var_name as input. -# """ -# # bugfix for uniform hybrid parallelism -# if '.cast_fp32' in var_name: -# var_name = var_name.replace('.cast_fp32', '') -# if '.cast_fp16' in var_name: -# var_name = var_name.replace('.cast_fp16', '') - -# post_ops = self._input_var_to_op[var_name] -# if post_ops == None: return None -# result_op = None -# for post_op, post_idx in reversed(post_ops): -# if post_idx > index: -# result_op = post_op -# break -# return result_op + # def _find_post_op(self, index, var_name): + # """ + # Find the post op that has variable named var_name as input. + # """ + # # bugfix for uniform hybrid parallelism + # if '.cast_fp32' in var_name: + # var_name = var_name.replace('.cast_fp32', '') + # if '.cast_fp16' in var_name: + # var_name = var_name.replace('.cast_fp16', '') + + # post_ops = self._input_var_to_op[var_name] + # if post_ops == None: return None + # result_op = None + # for post_op, post_idx in reversed(post_ops): + # if post_idx > index: + # result_op = post_op + # break + # return result_op def _find_prev_op(self, index, var_name): """ @@ -435,7 +466,8 @@ class HybridParallelInferenceHelper(object): variable named var_name. """ prev_ops = self._output_var_to_op[var_name] - if prev_ops == None: return None + if prev_ops == None: + return None result_op = None for prev_op, prev_idx in reversed(prev_ops): if prev_idx < index: @@ -481,26 +513,31 @@ class HybridParallelInferenceHelper(object): pre_stage_id = None for op in block.ops: - assert op.has_attr(self._op_role_key), ("{} has no {} set .".format( - op.type, self._op_role_key)) + assert op.has_attr(self._op_role_key), "{} has no {} set .".format( + op.type, self._op_role_key + ) op_role = op.attr(self._op_role_key) - assert op_role == int(self._op_role.Forward), ( - "Only forward is supported for inference.") + assert op_role == int( + self._op_role.Forward + ), "Only forward is supported for inference." if not op._has_kernel(op.type): assert op.type in [ - "while", "conditional_block" - ], ("The only supported op without kernel is while.") + "while", + "conditional_block", + ], "The only supported op without kernel is while." sub_block_id = op.attr('sub_block').id sub_block = block.program.block(sub_block_id) self._check_validation(sub_block) - assert op.has_attr( - self._op_device_key), ("{} has no {} set.".format( - op.type, self._op_device_key)) + assert op.has_attr(self._op_device_key), "{} has no {} set.".format( + op.type, self._op_device_key + ) device = op.attr(self._op_device_key) - assert device, ("{} has no {} set.".format(op.type, - self._op_device_key)) - if device.split(':')[1] == "all": continue + assert device, "{} has no {} set.".format( + op.type, self._op_device_key + ) + if device.split(':')[1] == "all": + continue dev_type = device.split(':')[0] assert dev_type == self._device @@ -522,14 +559,17 @@ class HybridParallelInferenceHelper(object): for index, op in enumerate(list(block.ops)): cur_device = op.attr(self._op_device_key) - if cur_device.split(':')[-1] == "all": continue + if cur_device.split(':')[-1] == "all": + continue for var_name in op.input_arg_names: if not block.has_var(var_name) and block._find_var_recursive( - var_name): + var_name + ): continue var = block.var(var_name) # skip data var - if var.is_data: continue + if var.is_data: + continue prev_device = None generate_ops = self._output_var_to_op.get(var_name) if generate_ops is None: @@ -540,21 +580,24 @@ class HybridParallelInferenceHelper(object): prev_op = self._find_prev_op(index, var_name) if not prev_device: - prev_device = prev_op.attr(self._op_device_key) \ - if prev_op else None + prev_device = ( + prev_op.attr(self._op_device_key) if prev_op else None + ) if prev_device is None or prev_device.split(":")[-1] == "all": continue - if prev_device == cur_device: continue + if prev_device == cur_device: + continue if var_name not in input_var_to_device: input_var_to_device[var_name] = [] if (cur_device, prev_device) in input_var_to_device[var_name]: continue - assert self._device == cur_device.split( - ':')[0], "More than one device type found." + assert ( + self._device == cur_device.split(':')[0] + ), "More than one device type found." device_type = cur_device.split(':')[0] + ':' def _insert_send_recv(cur_id, prev_id): @@ -568,7 +611,8 @@ class HybridParallelInferenceHelper(object): _insert_send_recv(cur_id - 1, prev_id) _insert_send_recv(cur_id, cur_id - 1) input_var_to_device[var_name].append( - (cur_dev, prev_dev)) + (cur_dev, prev_dev) + ) return assert cur_id - prev_id == 1 @@ -577,7 +621,10 @@ class HybridParallelInferenceHelper(object): op_role = op.attr(self._op_role_key) var = block.vars[var_name] pair = (prev_id, cur_id) - if is_while_block and pair not in self._pipeline_pair_in_while: + if ( + is_while_block + and pair not in self._pipeline_pair_in_while + ): self._pipeline_pair_in_while.append(pair) # 1000 is just a magic number @@ -599,14 +646,16 @@ class HybridParallelInferenceHelper(object): self._op_role_key: op_role, 'use_calc_stream': True, 'peer': 1, - 'ring_id': ring_id - }) + 'ring_id': ring_id, + }, + ) extra_index_info['index'] += 1 var_shape = list(var.shape) if var_shape[0] < 0: if is_while_block: - var_shape[ - 0] = self.micro_batch_size * self.beam_size + var_shape[0] = ( + self.micro_batch_size * self.beam_size + ) else: var_shape[0] = self.micro_batch_size @@ -621,17 +670,24 @@ class HybridParallelInferenceHelper(object): self._op_role_key: op_role, 'use_calc_stream': True, 'peer': 0, - 'ring_id': ring_id - }) + 'ring_id': ring_id, + }, + ) extra_index_info['index'] += 1 - _insert_send_recv(int(cur_device.split(':')[1]), - int(prev_device.split(':')[1])) + _insert_send_recv( + int(cur_device.split(':')[1]), + int(prev_device.split(':')[1]), + ) block._sync_with_cpp() def _insert_sendrecv_ops_in_while_block( - self, block, sync_in_while_lastpp2firstpp_var_names, - sync_in_while_var_names, stage): + self, + block, + sync_in_while_lastpp2firstpp_var_names, + sync_in_while_var_names, + stage, + ): dev_ids = [] for pair in self._pipeline_pair_in_while: prev_id, cur_id = pair @@ -646,13 +702,16 @@ class HybridParallelInferenceHelper(object): first_id = min(dev_ids) last_id = max(dev_ids) - assert len(block.ops) > 2, "It must have more than 2 ops in while sub block, " \ - "layers.assign(layers.cast(cond_int, dtype='bool'), cond) must at end of while block, " \ + assert len(block.ops) > 2, ( + "It must have more than 2 ops in while sub block, " + "layers.assign(layers.cast(cond_int, dtype='bool'), cond) must at end of while block, " "because nccl cannot send bool dtype var" + ) index = len(block.ops) - 2 for prev_id in dev_ids: - if prev_id == cur_id: continue + if prev_id == cur_id: + continue assert cur_id > prev_id pair = (prev_id, cur_id) @@ -667,7 +726,10 @@ class HybridParallelInferenceHelper(object): ring_id = self._pp_ring_map[pair_key] if cur_id == last_id and prev_id == first_id: - var_names = sync_in_while_lastpp2firstpp_var_names + sync_in_while_var_names + var_names = ( + sync_in_while_lastpp2firstpp_var_names + + sync_in_while_var_names + ) else: var_names = sync_in_while_var_names @@ -679,19 +741,24 @@ class HybridParallelInferenceHelper(object): type='send_v2', inputs={'X': var}, attrs={ - self._op_device_key: - self._device + ':' + str(cur_id), + self._op_device_key: self._device + + ':' + + str(cur_id), self._op_role_key: int(self._op_role.Forward), 'use_calc_stream': True, 'peer': 0, - 'ring_id': ring_id - }) + 'ring_id': ring_id, + }, + ) else: var_shape = list(var.shape) print(var_name) if len(var.shape) > 0: - var_shape[0] = self.micro_batch_size if var_shape[ - 0] < 0 else var_shape[0] + var_shape[0] = ( + self.micro_batch_size + if var_shape[0] < 0 + else var_shape[0] + ) block._insert_op_without_sync( index=index, type='recv_v2', @@ -699,13 +766,15 @@ class HybridParallelInferenceHelper(object): attrs={ 'out_shape': var_shape, 'dtype': var.dtype, - self._op_device_key: - self._device + ':' + str(prev_id), + self._op_device_key: self._device + + ':' + + str(prev_id), self._op_role_key: int(self._op_role.Forward), 'use_calc_stream': True, 'peer': 1, - 'ring_id': ring_id - }) + 'ring_id': ring_id, + }, + ) index += 1 block._sync_with_cpp() @@ -721,13 +790,16 @@ class HybridParallelInferenceHelper(object): if op.type == 'while': sub_block_id = op.attr('sub_block').id num_while += 1 - if sub_block_id: return op, self._main_program.block(sub_block_id) + if sub_block_id: + return op, self._main_program.block(sub_block_id) return None, None - def gen_infer_program(self, - sync_in_while_lastpp2firstpp_var_names=None, - sync_in_while_var_names=None, - debug=False): + def gen_infer_program( + self, + sync_in_while_lastpp2firstpp_var_names=None, + sync_in_while_var_names=None, + debug=False, + ): """ Generate inference program. Params: @@ -764,15 +836,19 @@ class HybridParallelInferenceHelper(object): while_op, while_block = self._get_while_block() if while_block: out_var_to_op, in_var_to_op = self._get_input_output_info( - while_block) + while_block + ) self._output_var_to_op = out_var_to_op self._input_var_to_op = in_var_to_op self._insert_sendrecv_ops_for_boundaries(while_block, True) self._insert_sendrecv_ops_in_while_block( - while_block, sync_in_while_lastpp2firstpp_var_names, - sync_in_while_var_names, self._stage) + while_block, + sync_in_while_lastpp2firstpp_var_names, + sync_in_while_var_names, + self._stage, + ) # step3: split programs self._split_program(self._startup_program, self._stage, 0) diff --git a/python/paddle/distributed/fleet/utils/hybrid_parallel_util.py b/python/paddle/distributed/fleet/utils/hybrid_parallel_util.py index 93b9ce4ef799ca8637f4a3369c4aab0867d2fc39..fec3e455f8ab239eea64381fecd39847cb4e022a 100644 --- a/python/paddle/distributed/fleet/utils/hybrid_parallel_util.py +++ b/python/paddle/distributed/fleet/utils/hybrid_parallel_util.py @@ -15,7 +15,11 @@ from paddle import framework import paddle from paddle.fluid import core -from paddle.fluid.dygraph.parallel import _split_tensors, sync_params_buffers, build_groups +from paddle.fluid.dygraph.parallel import ( + _split_tensors, + sync_params_buffers, + build_groups, +) from paddle.fluid.framework import in_dygraph_mode, _in_legacy_dygraph from .log_util import logger @@ -30,7 +34,8 @@ def _apply_collective_grads(parameters, comm_group): for param in parameters: if param.trainable and (param._grad_ivar() is not None): g_var = param._grad_ivar() - assert not g_var._is_sparse( + assert ( + not g_var._is_sparse() ), "Now, it doesn't support sparse parameters" grad_vars.append(g_var) assert g_var not in grad_var_set @@ -38,19 +43,20 @@ def _apply_collective_grads(parameters, comm_group): coalesced_grads_and_vars = build_groups(grad_vars, 128 * 1024 * 1024) - nranks = paddle.distributed.get_world_size( - ) if comm_group is None else comm_group.nranks + nranks = ( + paddle.distributed.get_world_size() + if comm_group is None + else comm_group.nranks + ) for coalesced_grad, _, _ in coalesced_grads_and_vars: # need to div nranks div_factor = paddle.to_tensor(nranks, dtype=coalesced_grad.dtype) paddle.fluid.framework._dygraph_tracer().trace_op( type="elementwise_div", - inputs={ - 'X': coalesced_grad, - 'Y': div_factor - }, + inputs={'X': coalesced_grad, 'Y': div_factor}, outputs={'Out': coalesced_grad}, - attrs={'axis': -1}) + attrs={'axis': -1}, + ) paddle.distributed.all_reduce(coalesced_grad, group=comm_group) _split_tensors(coalesced_grads_and_vars) @@ -63,7 +69,8 @@ def _apply_collective_grads_eager(parameters, comm_group): for param in parameters: if param.trainable and (param._grad_ivar() is not None): g_var = param._grad_ivar() - assert not g_var.is_sparse( + assert ( + not g_var.is_sparse() ), "Now, it doesn't support sparse parameters" grad_vars.append(g_var) assert g_var not in grad_var_set @@ -71,8 +78,11 @@ def _apply_collective_grads_eager(parameters, comm_group): coalesced_grads_and_vars = build_groups(grad_vars, 128 * 1024 * 1024) - nranks = paddle.distributed.get_world_size( - ) if comm_group is None else comm_group.nranks + nranks = ( + paddle.distributed.get_world_size() + if comm_group is None + else comm_group.nranks + ) for coalesced_grad, _, _ in coalesced_grads_and_vars: # need to div nranks coalesced_grad.scale_(1.0 / nranks) @@ -87,20 +97,18 @@ def _broadcast_data_help(data, shape, dtype, hcg): mp_rank = hcg.get_model_parallel_rank() shape_gpu = paddle.to_tensor(shape, dtype="int32") - paddle.distributed.broadcast(shape_gpu, - src=src_rank, - group=model_parallel_group, - sync_op=True) + paddle.distributed.broadcast( + shape_gpu, src=src_rank, group=model_parallel_group, sync_op=True + ) if mp_rank != 0: input_data = paddle.zeros(shape_gpu, dtype=dtype) else: input_data = data - paddle.distributed.broadcast(input_data, - src=src_rank, - group=model_parallel_group, - sync_op=True) + paddle.distributed.broadcast( + input_data, src=src_rank, group=model_parallel_group, sync_op=True + ) if mp_rank != 0: if in_dygraph_mode(): @@ -109,7 +117,8 @@ def _broadcast_data_help(data, shape, dtype, hcg): else: data.value().get_tensor()._clear() data.value().get_tensor()._share_data_with( - input_data.value().get_tensor()) + input_data.value().get_tensor() + ) def broadcast_input_data(hcg, *inputs, **kwargs): @@ -117,8 +126,11 @@ def broadcast_input_data(hcg, *inputs, **kwargs): for v in inputs: if isinstance(v, (core.VarBase, core.eager.Tensor)): with framework.no_grad(): - if "gpu" in cur_device and in_dygraph_mode() \ - and not v.place.is_gpu_place(): + if ( + "gpu" in cur_device + and in_dygraph_mode() + and not v.place.is_gpu_place() + ): v_gpu = v.cuda(int(cur_device.split(":")[1])) v._clear_data() v_gpu._share_buffer_to(v) @@ -129,8 +141,11 @@ def broadcast_input_data(hcg, *inputs, **kwargs): for k, v in kwargs.items(): if isinstance(v, (core.VarBase, core.eager.Tensor)): with framework.no_grad(): - if "gpu" in cur_device and in_dygraph_mode() \ - and not v.place.is_gpu_place(): + if ( + "gpu" in cur_device + and in_dygraph_mode() + and not v.place.is_gpu_place() + ): v_gpu = v.cuda(int(cur_device.split(":")[1])) v._clear_data() v_gpu._share_buffer_to(v) @@ -144,26 +159,27 @@ def broadcast_input_data(hcg, *inputs, **kwargs): def broadcast_mp_parameters(model, hcg): model_parallel_group = hcg.get_model_parallel_group() src_rank = hcg.get_model_parallel_group_src_rank() - sync_params_buffers(model, - model_parallel_group, - src_rank, - is_model_parallel=True) + sync_params_buffers( + model, model_parallel_group, src_rank, is_model_parallel=True + ) def broadcast_dp_parameters(model, hcg): data_parallel_group = hcg.get_data_parallel_group() src_rank = hcg.get_data_parallel_group_src_rank() - sync_params_buffers(model, - data_parallel_group, - src_rank, - is_model_parallel=False) + sync_params_buffers( + model, data_parallel_group, src_rank, is_model_parallel=False + ) def fused_allreduce_gradients(parameter_list, hcg): data_parallel_group = None if hcg is None else hcg.get_data_parallel_group() logger.debug("dp start fuse allreduce gradients") - apply_func = _apply_collective_grads_eager if in_dygraph_mode( - ) else _apply_collective_grads + apply_func = ( + _apply_collective_grads_eager + if in_dygraph_mode() + else _apply_collective_grads + ) with framework.no_grad(): apply_func(parameter_list, data_parallel_group) @@ -182,7 +198,8 @@ def sharding_reduce_gradients(parameter_list, hcg): paddle.distributed.all_reduce( param.grad, group=hcg.get_sharding_parallel_group(), - sync_op=True) + sync_op=True, + ) elif _in_legacy_dygraph(): g_var = param._grad_ivar() @@ -195,20 +212,20 @@ def sharding_reduce_gradients(parameter_list, hcg): outputs={'Out': g_var}, attrs={ 'ring_id': hcg.get_sharding_parallel_group().id, - 'use_calc_stream': True - }) + 'use_calc_stream': True, + }, + ) # grad / sharding_rank - div_factor = paddle.to_tensor(sharding_nrank, - dtype=g_var.dtype) + div_factor = paddle.to_tensor( + sharding_nrank, dtype=g_var.dtype + ) paddle.fluid.framework._dygraph_tracer().trace_op( type="elementwise_div", - inputs={ - 'X': g_var, - 'Y': div_factor - }, + inputs={'X': g_var, 'Y': div_factor}, outputs={'Out': g_var}, - attrs={'axis': -1}) + attrs={'axis': -1}, + ) def broadcast_sharding_parameters(model, hcg): @@ -216,7 +233,6 @@ def broadcast_sharding_parameters(model, hcg): logger.debug("sharding start init parameters sync") sharding_parallel_group = hcg.get_sharding_parallel_group() src_rank = hcg.get_sharding_parallel_group_src_rank() - sync_params_buffers(model, - sharding_parallel_group, - src_rank, - is_model_parallel=False) + sync_params_buffers( + model, sharding_parallel_group, src_rank, is_model_parallel=False + ) diff --git a/python/paddle/distributed/fleet/utils/internal_storage.py b/python/paddle/distributed/fleet/utils/internal_storage.py index 89011a6fe1c17df9a7979b66d413ebd3a77fe942..e0f2ef0a04640d9de39412cd0ff7f3682cb22bab 100644 --- a/python/paddle/distributed/fleet/utils/internal_storage.py +++ b/python/paddle/distributed/fleet/utils/internal_storage.py @@ -47,10 +47,11 @@ class InternalStorage: # The actual flat tensor size = [size] if isinstance(size, int) else size if convert_cpu: - value = np.zeros( - size, - dtype=np.float16) if Type.fp16.value == dtype else np.zeros( - size, dtype=np.float32) + value = ( + np.zeros(size, dtype=np.float16) + if Type.fp16.value == dtype + else np.zeros(size, dtype=np.float32) + ) self.buffer = core.VarBase(value=value, place=core.CPUPlace()) else: self.buffer = paddle.zeros(size, dtype=dtype) @@ -59,16 +60,25 @@ class InternalStorage: """ Move the underlying buffer """ - assert self.buffer is not None, "Cannot move a collapsed bucket, please rebuild it" - assert (dtype == Type.fp32.value - or Type.fp16.value), "Conversion type is not supported now" + assert ( + self.buffer is not None + ), "Cannot move a collapsed bucket, please rebuild it" + assert ( + dtype == Type.fp32.value or Type.fp16.value + ), "Conversion type is not supported now" - dev_id = 0 if paddle.get_device() == "cpu" else int( - paddle.get_device().split(":")[1]) + dev_id = ( + 0 + if paddle.get_device() == "cpu" + else int(paddle.get_device().split(":")[1]) + ) if self._device != device: - tmp_buffer = self.buffer.cuda( - dev_id) if device == "gpu" else self.buffer.cpu() + tmp_buffer = ( + self.buffer.cuda(dev_id) + if device == "gpu" + else self.buffer.cpu() + ) for param in self._params: param.clear_gradient(False) param._gradient_set_empty(False) @@ -106,17 +116,18 @@ class ParamStorage(InternalStorage): Add new parameters to the InternalStorage. Params becomes a view of this InternalStorage buffer. """ - assert all([ - id(param) not in self._param_ids for param in trainable_params - ]), "The same param cannot be checked in twice" + assert all( + [id(param) not in self._param_ids for param in trainable_params] + ), "The same param cannot be checked in twice" assert self.buffer is not None self.param2align = param2align cpu_param_shape = list() for param in trainable_params: - p_shape = self._add_param_as_view(param, param2align[param.name], - convert_gpu) + p_shape = self._add_param_as_view( + param, param2align[param.name], convert_gpu + ) cpu_param_shape.append(p_shape) if convert_gpu: @@ -127,8 +138,9 @@ class ParamStorage(InternalStorage): self._fill = 0 for idx, param in enumerate(trainable_params): - self._convert_buffer(param, cpu_param_shape[idx], - param2align[param.name]) + self._convert_buffer( + param, cpu_param_shape[idx], param2align[param.name] + ) self._params.append(param) self._param_ids.append(id(param)) @@ -138,7 +150,8 @@ class ParamStorage(InternalStorage): assert ( param.dtype == self.buffer.dtype ), "Different types for the InternalStorage and the param, cannot proceed: {} - {}".format( - param.dtype, self.buffer.dtype) + param.dtype, self.buffer.dtype + ) var_end = self._fill + np.prod(param.shape) offset = var_end + align @@ -152,11 +165,15 @@ class ParamStorage(InternalStorage): param.stop_gradient = origin_state # Copy the current param value - dev_id = 0 if paddle.get_device() == "cpu" else int( - paddle.get_device().split(":")[1]) + dev_id = ( + 0 + if paddle.get_device() == "cpu" + else int(paddle.get_device().split(":")[1]) + ) with device_guard(dev_id, "cpu"): tmp_var = core.VarBase( - tensor=self.buffer._slice(self._fill, var_end)) + tensor=self.buffer._slice(self._fill, var_end) + ) if convert_gpu: param_cpu = param.cpu() param.value().get_tensor()._clear() @@ -199,13 +216,9 @@ class GradStorage(InternalStorage): This is a basic class to simplify the handling of gradient InternalStorages """ - def __init__(self, - size, - dtype, - device, - destination, - parm2align, - convert_cpu=False): + def __init__( + self, size, dtype, device, destination, parm2align, convert_cpu=False + ): if isinstance(size, np.int64): size = size.tolist() super().__init__(size, dtype, device, convert_cpu) @@ -219,22 +232,21 @@ class GradStorage(InternalStorage): self.sent = False def reset_checked_in(self): - """ Reset the counter of the parameter grads which have been checked in - """ + """Reset the counter of the parameter grads which have been checked in""" self.params_checked_in = 0 self.sent = False @property def all_checked_in(self): - """ Judge all the expected gradient check-in happened """ + """Judge all the expected gradient check-in happened""" return len(self._params) == self.params_checked_in def can_add_grad_view(self, param, align): - """ Is there enough InternalStorage to add this parameter gradient, and whether this param have already checked in. - """ - return self._fill + np.prod( - param.shape) + align <= self._max_size and id( - param) not in self._param_ids + """Is there enough InternalStorage to add this parameter gradient, and whether this param have already checked in.""" + return ( + self._fill + np.prod(param.shape) + align <= self._max_size + and id(param) not in self._param_ids + ) def to(self, device, dtype=None, keep_alignment=True): """ @@ -254,9 +266,9 @@ class GradStorage(InternalStorage): Add a new parameter gradient to the InternalStorage. Param.grad becomes a view of this InternalStorage buffer. """ - assert id( - param - ) not in self._param_ids, "The same gradients cannot be checked in twice" + assert ( + id(param) not in self._param_ids + ), "The same gradients cannot be checked in twice" self._add_grad_as_view(param, align) self._params.append(param) @@ -304,9 +316,9 @@ class GradStorage(InternalStorage): @fluid.dygraph.no_grad def _add_grad_as_view(self, param, align): - assert np.prod( - self.buffer.shape - ) > 0, "Cannot add a gradient to a released InternalStorage, please rebuild" + assert ( + np.prod(self.buffer.shape) > 0 + ), "Cannot add a gradient to a released InternalStorage, please rebuild" assert param.dtype == self.buffer.dtype grad_end = self._fill + np.prod(param.shape) @@ -314,8 +326,11 @@ class GradStorage(InternalStorage): assert offset <= np.prod(self.buffer.shape) # Copy the current grad value to InternalStorage - dev_id = 0 if paddle.get_device() == "cpu" else int( - paddle.get_device().split(":")[1]) + dev_id = ( + 0 + if paddle.get_device() == "cpu" + else int(paddle.get_device().split(":")[1]) + ) if self._device == "cpu": with device_guard(dev_id, self._device): tmp_var = core.VarBase(self.buffer._slice(self._fill, grad_end)) diff --git a/python/paddle/distributed/fleet/utils/log_util.py b/python/paddle/distributed/fleet/utils/log_util.py index 34b1caa863cb25dd3fbf15e915af61202cd84f00..07bcbea4b4109090dd7af26847695e1b11ea90b8 100644 --- a/python/paddle/distributed/fleet/utils/log_util.py +++ b/python/paddle/distributed/fleet/utils/log_util.py @@ -67,7 +67,8 @@ def layer_to_str(base, *args, **kwargs): if kwargs: name += ", " if kwargs: - name += ", ".join("{}={}".format(key, str(value)) - for key, value in kwargs.items()) + name += ", ".join( + "{}={}".format(key, str(value)) for key, value in kwargs.items() + ) name += ")" return name diff --git a/python/paddle/distributed/fleet/utils/ps_util.py b/python/paddle/distributed/fleet/utils/ps_util.py index fbf713bed936345fc0a6da9adeb7d0f0b727e257..9b079d64bb5307c155d787ac13832bccb64a0ddc 100644 --- a/python/paddle/distributed/fleet/utils/ps_util.py +++ b/python/paddle/distributed/fleet/utils/ps_util.py @@ -29,21 +29,21 @@ class DistributedInfer: if main_program: self.origin_main_program = main_program.clone() else: - self.origin_main_program = paddle.static.default_main_program( - ).clone() + self.origin_main_program = ( + paddle.static.default_main_program().clone() + ) if startup_program: self.origin_startup_program = startup_program else: - self.origin_startup_program = paddle.static.default_startup_program( + self.origin_startup_program = ( + paddle.static.default_startup_program() ) self.sparse_table_maps = None - def init_distributed_infer_env(self, - exe, - loss, - role_maker=None, - dirname=None): + def init_distributed_infer_env( + self, exe, loss, role_maker=None, dirname=None + ): import paddle.distributed.fleet as fleet if fleet.fleet._runtime_handle is None: @@ -52,10 +52,12 @@ class DistributedInfer: fake_optimizer = paddle.optimizer.SGD() strategy = fleet.DistributedStrategy() strategy.a_sync = True - optimizer = fleet.distributed_optimizer(fake_optimizer, - strategy=strategy) - optimizer.minimize(loss, - startup_program=self.origin_startup_program) + optimizer = fleet.distributed_optimizer( + fake_optimizer, strategy=strategy + ) + optimizer.minimize( + loss, startup_program=self.origin_startup_program + ) if fleet.is_server(): fleet.init_server(dirname=dirname) @@ -88,36 +90,45 @@ class DistributedInfer: if dirname is not None and exe is not None: all_persist_vars = [ - v for v in self.origin_main_program.list_vars() + v + for v in self.origin_main_program.list_vars() if paddle.static.io.is_persistable(v) ] - dense_persist_vars = [(v.name, v) for v in all_persist_vars - if v.name not in sparse_table_maps] + dense_persist_vars = [ + (v.name, v) + for v in all_persist_vars + if v.name not in sparse_table_maps + ] need_load_vars = [ - v[1] for v in dense_persist_vars + v[1] + for v in dense_persist_vars if os.path.isfile(os.path.join(dirname, v[0])) ] - paddle.static.load_vars(exe, - dirname, - main_program=self.origin_main_program, - vars=need_load_vars) + paddle.static.load_vars( + exe, + dirname, + main_program=self.origin_main_program, + vars=need_load_vars, + ) def get_dist_infer_program(self): varname2tables = self._get_sparse_table_map() - convert_program = self._convert_program(self.origin_main_program, - varname2tables) + convert_program = self._convert_program( + self.origin_main_program, varname2tables + ) return convert_program def _convert_program(self, main_program, varname2tables): - def distributed_ops_pass(program): SPARSE_OP_TYPE_DICT = {"lookup_table": "W", "lookup_table_v2": "W"} def _get_pull_sparse_ops(_program): pull_sparse_ops = {} for op in _program.global_block().ops: - if op.type in SPARSE_OP_TYPE_DICT.keys() \ - and op.attr('remote_prefetch') is True: + if ( + op.type in SPARSE_OP_TYPE_DICT.keys() + and op.attr('remote_prefetch') is True + ): param_name = op.input(SPARSE_OP_TYPE_DICT[op.type])[0] ops = pull_sparse_ops.get(param_name, []) ops.append(op) @@ -125,7 +136,6 @@ class DistributedInfer: return pull_sparse_ops def _pull_sparse_fuse(_program, pull_sparse_ops): - def dag_check_up_and_reorder(program, inputs, outputs): global_block = program.global_block() min_output_index = len(global_block.ops) @@ -151,7 +161,8 @@ class DistributedInfer: if out_var.name in ins: output_indexes[idx] = 1 min_output_index = min( - min_output_index, idx) + min_output_index, idx + ) for i in range(len(global_block.ops)): if input_indexes[i] == 1 and output_indexes[i] == 1: @@ -162,8 +173,9 @@ class DistributedInfer: if min_output_index < max_input_index: move_ops = [] - for i in range(min_output_index + 1, - len(input_indexes)): + for i in range( + min_output_index + 1, len(input_indexes) + ): if input_indexes[i] == 1: move_ops.append((global_block.ops[i], i)) for i, op in enumerate(move_ops): @@ -179,8 +191,9 @@ class DistributedInfer: for k in range(0, len(op.input_names)): ins = op.input(op.input_names[k]) op_inputs.append(ins) - for j in range(pos - 1, min_output_index - 1, - -1): + for j in range( + pos - 1, min_output_index - 1, -1 + ): op1 = global_block.ops[j] if op1 in visited: continue @@ -209,27 +222,35 @@ class DistributedInfer: queue.sort() for index in queue: desc = global_block.desc._insert_op( - min_output_index) + min_output_index + ) desc.copy_from(global_block.ops[index].desc) global_block.desc._remove_op( - index + 1, index + 2) + index + 1, index + 2 + ) global_block.ops[index].desc = desc insert_op = global_block.ops.pop(index) input_state = input_indexes.pop(index) output_state = output_indexes.pop(index) - global_block.ops.insert(min_output_index, - insert_op) - input_indexes.insert(min_output_index, - input_state) - output_indexes.insert(min_output_index, - output_state) + global_block.ops.insert( + min_output_index, insert_op + ) + input_indexes.insert( + min_output_index, input_state + ) + output_indexes.insert( + min_output_index, output_state + ) min_output_index = min_output_index + 1 assert global_block.desc.op_size() == len( - global_block.ops) + global_block.ops + ) for i in range(len(global_block.ops)): - assert global_block.desc.op( - i) == global_block.ops[i].desc + assert ( + global_block.desc.op(i) + == global_block.ops[i].desc + ) for param, ops in pull_sparse_ops.items(): all_ops = program.global_block().ops @@ -243,8 +264,10 @@ class DistributedInfer: if w.name not in varname2tables.keys(): raise ValueError( - "can not find variable {}, please check your configuration" - .format(w.name)) + "can not find variable {}, please check your configuration".format( + w.name + ) + ) table_id = varname2tables[w.name] @@ -264,8 +287,9 @@ class DistributedInfer: program.global_block()._remove_op(idx) inputs_idxs = [-1] * len(inputs) - outputs_idxs = [len(program.global_block().ops) + 1 - ] * len(outputs) + outputs_idxs = [len(program.global_block().ops) + 1] * len( + outputs + ) for idx, op in enumerate(program.global_block().ops): for i in range(0, len(op.output_names)): @@ -273,13 +297,15 @@ class DistributedInfer: for in_id, in_var in enumerate(inputs): if in_var.name in outs: inputs_idxs[in_id] = max( - idx, inputs_idxs[in_id]) + idx, inputs_idxs[in_id] + ) for i in range(0, len(op.input_names)): ins = op.input(op.input_names[i]) for out_id, out_var in enumerate(outputs): if out_var.name in ins: outputs_idxs[out_id] = min( - idx, outputs_idxs[out_id]) + idx, outputs_idxs[out_id] + ) if min(outputs_idxs) - max(inputs_idxs) >= 1: distributed_idx = max(inputs_idxs) + 1 @@ -287,18 +313,16 @@ class DistributedInfer: program.global_block()._insert_op( index=distributed_idx, type="distributed_lookup_table", - inputs={ - "Ids": inputs, - 'W': w - }, + inputs={"Ids": inputs, 'W': w}, outputs={"Outputs": outputs}, attrs={ "is_distributed": is_distributed, "padding_idx": padding_idx, "table_id": table_id, "is_test": True, - "lookup_table_version": op_type - }) + "lookup_table_version": op_type, + }, + ) else: raise ValueError( "something wrong with Fleet, submit a issue is recommended" diff --git a/python/paddle/distributed/launch/context/__init__.py b/python/paddle/distributed/launch/context/__init__.py index 9936b534edfcd9486991065b1b50e1bce9dcc6f0..037fc0efbc51be8c732b757d9570b869f0c11125 100644 --- a/python/paddle/distributed/launch/context/__init__.py +++ b/python/paddle/distributed/launch/context/__init__.py @@ -22,7 +22,6 @@ import logging class Context(object): - def __init__(self, enable_plugin=True): self.args, self.unknown_args = parse_args() self.envs = fetch_envs() @@ -54,8 +53,9 @@ class Context(object): return False if len(self.unknown_args) > 0: - self.logger.warning("Compatible mode enable with args {}".format( - self.unknown_args)) + self.logger.warning( + "Compatible mode enable with args {}".format(self.unknown_args) + ) return True return False @@ -75,7 +75,8 @@ class Context(object): logger = logging.getLogger("LAUNCH") logger.setLevel(self.args.log_level.upper() or level) formatter = logging.Formatter( - fmt='%(name)s %(levelname)s %(asctime)s %(message)s') + fmt='%(name)s %(levelname)s %(asctime)s %(message)s' + ) ch = logging.StreamHandler() ch.setFormatter(formatter) logger.addHandler(ch) diff --git a/python/paddle/distributed/launch/context/args_envs.py b/python/paddle/distributed/launch/context/args_envs.py index 104ad1b789f54b2959b34acfe5bef5be91f52944..2013ba6a3d7baa170494b1151bf08c07bbe10688 100644 --- a/python/paddle/distributed/launch/context/args_envs.py +++ b/python/paddle/distributed/launch/context/args_envs.py @@ -52,126 +52,135 @@ def parse_args(): base_group = parser.add_argument_group("Base Parameters") - base_group.add_argument("--master", - type=str, - default=None, - help="the master/rendezvous server, ip:port") - - base_group.add_argument("--legacy", - type=bool, - default=False, - help="use legacy launch") - - base_group.add_argument("--rank", - type=int, - default=-1, - help="the node rank") - - base_group.add_argument("--log_level", - type=str, - default="INFO", - help="log level. Default INFO") - - base_group.add_argument("--nnodes", - type=str, - default="1", - help="the number of nodes, i.e. pod/node number") - - base_group.add_argument("--nproc_per_node", - type=int, - default=None, - help="the number of processes in a pod") + base_group.add_argument( + "--master", + type=str, + default=None, + help="the master/rendezvous server, ip:port", + ) + + base_group.add_argument( + "--legacy", type=bool, default=False, help="use legacy launch" + ) + + base_group.add_argument( + "--rank", type=int, default=-1, help="the node rank" + ) + + base_group.add_argument( + "--log_level", type=str, default="INFO", help="log level. Default INFO" + ) + + base_group.add_argument( + "--nnodes", + type=str, + default="1", + help="the number of nodes, i.e. pod/node number", + ) + + base_group.add_argument( + "--nproc_per_node", + type=int, + default=None, + help="the number of processes in a pod", + ) base_group.add_argument( "--log_dir", type=str, default="log", - help="the path for each process's log. Default ./log") - base_group.add_argument("--run_mode", - type=str, - default=None, - help="run mode of the job, collective/ps/ps-heter") - - base_group.add_argument("--job_id", - type=str, - default="default", - help="unique id of the job. Default default") - - base_group.add_argument("--devices", - "--gpus", - "--npus", - "--xpus", - type=str, - default=None, - help="accelerate devices. as --gpus,npus,xpus") + help="the path for each process's log. Default ./log", + ) + base_group.add_argument( + "--run_mode", + type=str, + default=None, + help="run mode of the job, collective/ps/ps-heter", + ) + + base_group.add_argument( + "--job_id", + type=str, + default="default", + help="unique id of the job. Default default", + ) + + base_group.add_argument( + "--devices", + "--gpus", + "--npus", + "--xpus", + type=str, + default=None, + help="accelerate devices. as --gpus,npus,xpus", + ) base_group.add_argument("--host", type=str, default=None, help="host ip") - base_group.add_argument("--ips", - type=str, - default=None, - help="nodes ips, e.g. 10.10.1.1,10.10.1.2") + base_group.add_argument( + "--ips", + type=str, + default=None, + help="nodes ips, e.g. 10.10.1.1,10.10.1.2", + ) - base_group.add_argument("--start_port", - type=int, - default=6070, - help="fix port start with") + base_group.add_argument( + "--start_port", type=int, default=6070, help="fix port start with" + ) - base_group.add_argument("training_script", - type=str, - help="the full path of py script," - "followed by arguments for the " - "training script") + base_group.add_argument( + "training_script", + type=str, + help="the full path of py script," + "followed by arguments for the " + "training script", + ) base_group.add_argument('training_script_args', nargs=REMAINDER) ps_group = parser.add_argument_group("Parameter-Server Parameters") # for parameter server - ps_group.add_argument("--servers", - type=str, - default='', - help="servers endpoints full list") - ps_group.add_argument("--trainers", - type=str, - default='', - help="trainers endpoints full list") - - ps_group.add_argument("--trainer_num", - type=int, - default=None, - help="number of trainers") - ps_group.add_argument("--server_num", - type=int, - default=None, - help="number of servers") - ps_group.add_argument("--gloo_port", - type=int, - default=6767, - help="gloo http port") - ps_group.add_argument("--with_gloo", - type=str, - default="1", - help="use gloo or not") + ps_group.add_argument( + "--servers", type=str, default='', help="servers endpoints full list" + ) + ps_group.add_argument( + "--trainers", type=str, default='', help="trainers endpoints full list" + ) + + ps_group.add_argument( + "--trainer_num", type=int, default=None, help="number of trainers" + ) + ps_group.add_argument( + "--server_num", type=int, default=None, help="number of servers" + ) + ps_group.add_argument( + "--gloo_port", type=int, default=6767, help="gloo http port" + ) + ps_group.add_argument( + "--with_gloo", type=str, default="1", help="use gloo or not" + ) # parameter elastic mode elastic_group = parser.add_argument_group("Elastic Parameters") - elastic_group.add_argument("--max_restart", - type=int, - default=3, - help="the times can restart. Default 3") + elastic_group.add_argument( + "--max_restart", + type=int, + default=3, + help="the times can restart. Default 3", + ) elastic_group.add_argument( "--elastic_level", type=int, default=-1, - help= - "elastic level: -1 disable, 0 failed exit, peers hold, 1 internal restart" + help="elastic level: -1 disable, 0 failed exit, peers hold, 1 internal restart", ) elastic_group.add_argument( "--elastic_timeout", type=int, default=30, - help="seconds to wait before elastic job begin to train") + help="seconds to wait before elastic job begin to train", + ) return parser.parse_known_args() diff --git a/python/paddle/distributed/launch/context/device.py b/python/paddle/distributed/launch/context/device.py index 14997df24590f80d920f2eb0f85fcd89558e05b6..f05bc1b776869675915464da95ad483798c89543 100644 --- a/python/paddle/distributed/launch/context/device.py +++ b/python/paddle/distributed/launch/context/device.py @@ -28,7 +28,6 @@ class DeviceType: class Device(object): - def __init__(self, dtype=None, memory="", labels=""): self._dtype = dtype self._memory = memory @@ -102,7 +101,8 @@ class Device(object): if 'PADDLE_XCCL_BACKEND' in os.environ: dev._dtype = DeviceType.CUSTOM_DEVICE visible_devices_str = '{}_VISIBLE_DEVICES'.format( - os.getenv('PADDLE_XCCL_BACKEND').upper()) + os.getenv('PADDLE_XCCL_BACKEND').upper() + ) if visible_devices_str in os.environ: visible_devices = os.getenv(visible_devices_str) elif 'CUDA_VISIBLE_DEVICES' in os.environ: @@ -127,7 +127,6 @@ class Device(object): @classmethod def detect_device(self): - def get_custom_devices_count(device_type): all_custom_devices = get_available_custom_device() all_custom_devices = [ @@ -144,7 +143,8 @@ class Device(object): dev._dtype = DeviceType.CUSTOM_DEVICE num = get_custom_devices_count(custom_device_type) visible_devices_str = '{}_VISIBLE_DEVICES'.format( - custom_device_type.upper()) + custom_device_type.upper() + ) if visible_devices_str in os.environ: visible_devices = os.getenv(visible_devices_str) elif fluid.core.is_compiled_with_cuda(): diff --git a/python/paddle/distributed/launch/context/event.py b/python/paddle/distributed/launch/context/event.py index cb39e1529fc82e18e12d2c58dd1dfee035da6b4d..3859edb83f29c1320ff90a91253d56d1f0a04a2a 100644 --- a/python/paddle/distributed/launch/context/event.py +++ b/python/paddle/distributed/launch/context/event.py @@ -14,7 +14,6 @@ class Event(object): - def __init__(self, kind="status", message="", fatal=False): self.kind = kind self.message = message diff --git a/python/paddle/distributed/launch/context/node.py b/python/paddle/distributed/launch/context/node.py index e81c91e1bca9f1ff5f5a1e808b71f1d1b355fa99..b8c1a2a14f5ce64e1eb05464012404d75a50b8d3 100644 --- a/python/paddle/distributed/launch/context/node.py +++ b/python/paddle/distributed/launch/context/node.py @@ -22,7 +22,6 @@ from contextlib import closing class Node(object): - def __init__(self): # self.device = Device.detect_device() self.device = Device.parse_device() @@ -54,8 +53,9 @@ class Node(object): def _get_free_port(self, port=0): with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s: - s.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, - struct.pack('ii', 1, 0)) + s.setsockopt( + socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', 1, 0) + ) try: s.bind(('', port)) return s.getsockname()[1] @@ -81,8 +81,8 @@ class Node(object): @classmethod def is_server_ready(self, ip, port): with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock: - #sock.settimeout(0.01) - #sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + # sock.settimeout(0.01) + # sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) if hasattr(socket, 'SO_REUSEPORT'): sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) result = sock.connect_ex((ip, int(port))) diff --git a/python/paddle/distributed/launch/context/resource.py b/python/paddle/distributed/launch/context/resource.py index d523c3c5cdfe8cd4377d7c50aec75d2bd83856ab..b7baf6fd7b621c647c6708843c10fc24a32e1b5e 100644 --- a/python/paddle/distributed/launch/context/resource.py +++ b/python/paddle/distributed/launch/context/resource.py @@ -14,6 +14,5 @@ class Resource(object): - def __init__(self): self.devices = [] diff --git a/python/paddle/distributed/launch/controllers/collective.py b/python/paddle/distributed/launch/controllers/collective.py index dd0a3cc34aa3ad8be3d9c67ce00264aa646ef34d..be38b4057f0d7874ddd5444375a366f70e0029a9 100644 --- a/python/paddle/distributed/launch/controllers/collective.py +++ b/python/paddle/distributed/launch/controllers/collective.py @@ -19,7 +19,6 @@ import json class CollectiveController(Controller): - @classmethod def enable(cls, ctx): # collective is the default mode @@ -31,7 +30,11 @@ class CollectiveController(Controller): return False def build_pod(self): - if self.ctx.args.master is None and self.ctx.args.start_port and self.ctx.args.ips: + if ( + self.ctx.args.master is None + and self.ctx.args.start_port + and self.ctx.args.ips + ): self._build_pod_with_args() else: self._build_pod_with_master() @@ -48,15 +51,18 @@ class CollectiveController(Controller): self.ctx.logger.debug("job endpoints: {}".format(job_endpoints)) - rank_offset = ips.index( - self.ctx.node.ip - ) * self.pod.replicas if self.ctx.node.ip in ips else 0 + rank_offset = ( + ips.index(self.ctx.node.ip) * self.pod.replicas + if self.ctx.node.ip in ips + else 0 + ) self.save_pod_log(job_endpoints) selected_dev_key = self.ctx.node.device.get_selected_device_key() selected_dev_list = self.ctx.node.device.get_selected_devices( - self.ctx.args.devices) + self.ctx.args.devices + ) for i in range(self.pod.replicas): e = { @@ -101,19 +107,24 @@ class CollectiveController(Controller): for p in self.ctx.node.get_free_ports(self.pod.replicas) ] - data = json.dumps({ - 'name': self.pod.name, - 'rank': self.pod.rank, - 'replicas': self.pod.replicas, - 'dtype': self.ctx.node.device.dtype, - 'candidate': '{}:{}'.format(self.ctx.node.ip, port), - 'endpoints': ",".join(endpoints), - }) - - peer_list, rank = self.master.sync_peers('/{}/info'.format(self.job.id), - self.pod.name, data, - self.job.replicas, - self.pod.rank) + data = json.dumps( + { + 'name': self.pod.name, + 'rank': self.pod.rank, + 'replicas': self.pod.replicas, + 'dtype': self.ctx.node.device.dtype, + 'candidate': '{}:{}'.format(self.ctx.node.ip, port), + 'endpoints': ",".join(endpoints), + } + ) + + peer_list, rank = self.master.sync_peers( + '/{}/info'.format(self.job.id), + self.pod.name, + data, + self.job.replicas, + self.pod.rank, + ) self.pod.rank = rank if len(peer_list) < 1: @@ -136,7 +147,8 @@ class CollectiveController(Controller): self.pod.reset() selected_dev_key = self.ctx.node.device.get_selected_device_key() selected_dev_list = self.ctx.node.device.get_selected_devices( - self.ctx.args.devices) + self.ctx.args.devices + ) for i in range(self.pod.replicas): e = { "PADDLE_MASTER": collective_master, @@ -170,7 +182,6 @@ class CollectiveController(Controller): class CollectiveElasticController(CollectiveController): - @classmethod def enable(cls, ctx): if ctx.args.master and ctx.args.master.startswith("etcd://"): @@ -200,9 +211,9 @@ class CollectiveElasticController(CollectiveController): self.ctx.logger.info("Waiting peer ready...") - ok, replicas = self.master.wait_peer_ready(self.job.replicas_min, - self.job.replicas_max, - timeout) + ok, replicas = self.master.wait_peer_ready( + self.job.replicas_min, self.job.replicas_max, timeout + ) if ok: self.job.replicas = replicas else: diff --git a/python/paddle/distributed/launch/controllers/controller.py b/python/paddle/distributed/launch/controllers/controller.py index 21b9ce2d5c80c544bcfb8ae880fa695f36955e5e..9ff18d5b5d511f88ea45a443d0a8f070ea61caea 100644 --- a/python/paddle/distributed/launch/controllers/controller.py +++ b/python/paddle/distributed/launch/controllers/controller.py @@ -32,7 +32,6 @@ class ControleMode: class ControllerBase(object): - def __init__(self, ctx): signal.signal(signal.SIGTERM, self.signal_handler) signal.signal(signal.SIGABRT, self.signal_handler) @@ -43,9 +42,11 @@ class ControllerBase(object): self.watcher = Watcher(self.ctx) - self.job = Job(nnodes=self.ctx.args.nnodes, - mode=self.ctx.args.run_mode, - jid=self.ctx.args.job_id) + self.job = Job( + nnodes=self.ctx.args.nnodes, + mode=self.ctx.args.run_mode, + jid=self.ctx.args.job_id, + ) self.pod = Pod() self.ctx.set_envs({"POD_NAME": self.pod.name}) @@ -74,14 +75,14 @@ class ControllerBase(object): ''' watch self and peer status, return true to exit ''' - #TODO(kuizhiqing) unify ctx.status and master status + # TODO(kuizhiqing) unify ctx.status and master status self.ctx.logger.info("Watching {}".format(self.pod)) while not self.ctx.status.is_done(): status = self.pod.watch(timeout=2) - #if self.ctx.continous_log(): + # if self.ctx.continous_log(): # default to print log self.pod.logs() @@ -120,8 +121,10 @@ class ControllerBase(object): return False # peer failure - if self.ctx.status.is_restarting( - ) and self.master.get_status() != self.ctx.status.COMPLETED: + if ( + self.ctx.status.is_restarting() + and self.master.get_status() != self.ctx.status.COMPLETED + ): self.pod.stop(timeout=30) return False @@ -190,12 +193,9 @@ class Controller(ControllerBase): err = os.path.join(self.ctx.args.log_dir, err) return out, (err or out) - def new_container(self, - entrypoint=None, - envs={}, - use_ctx_env=True, - out=None, - err=None): + def new_container( + self, entrypoint=None, envs={}, use_ctx_env=True, out=None, err=None + ): c = Container( entrypoint=(entrypoint or self._get_entrypoint()), env=(self.ctx.get_envs() if use_ctx_env else {}), @@ -204,18 +204,19 @@ class Controller(ControllerBase): c.update_env(envs) return c - def add_container(self, - container=None, - entrypoint=None, - envs={}, - log_file=None, - is_init=False): + def add_container( + self, + container=None, + entrypoint=None, + envs={}, + log_file=None, + is_init=False, + ): if not container: - container = self.new_container(entrypoint=entrypoint, - envs=envs, - out=log_file, - err=log_file) + container = self.new_container( + entrypoint=entrypoint, envs=envs, out=log_file, err=log_file + ) if is_init: self.pod.add_init_container(container) @@ -241,8 +242,10 @@ class Controller(ControllerBase): if not self.ctx.args.log_dir: return - f = os.path.join(self.ctx.args.log_dir, - '{}.{}.log'.format(self.job.id, self.pod.name)) + f = os.path.join( + self.ctx.args.log_dir, + '{}.{}.log'.format(self.job.id, self.pod.name), + ) try: os.makedirs(os.path.dirname(f), exist_ok=True) with open(f, 'a+') as fd: diff --git a/python/paddle/distributed/launch/controllers/ipu_controller.py b/python/paddle/distributed/launch/controllers/ipu_controller.py index 92dc2960ab6240aa1c690f8d0de0b85c04aec952..ea342e5ebe03aaa7acbdf82f131ec5aed2eca8e6 100644 --- a/python/paddle/distributed/launch/controllers/ipu_controller.py +++ b/python/paddle/distributed/launch/controllers/ipu_controller.py @@ -21,7 +21,6 @@ from paddle.distributed.launch.job.container import Container class IPUController(CollectiveController): - @classmethod def enable(cls, ctx): if ctx.args.training_script == "ipu": @@ -33,26 +32,31 @@ class IPUController(CollectiveController): def parse_ipu_args(self, args_list): parser = argparse.ArgumentParser() - parser.add_argument("--hosts", - type=str, - help="The hosts for IPU distributd training.") - parser.add_argument("--nproc_per_host", - type=int, - help="The number of processes launched per host.") - parser.add_argument("--ipus_per_replica", - type=int, - help="The number of IPUs requested per replica.") - parser.add_argument("--ipu_partition", - type=str, - help="The partition name of IPU devices.") - parser.add_argument("--vipu_server", - type=str, - help="The ip of the IPU device manager.") + parser.add_argument( + "--hosts", type=str, help="The hosts for IPU distributd training." + ) + parser.add_argument( + "--nproc_per_host", + type=int, + help="The number of processes launched per host.", + ) + parser.add_argument( + "--ipus_per_replica", + type=int, + help="The number of IPUs requested per replica.", + ) + parser.add_argument( + "--ipu_partition", + type=str, + help="The partition name of IPU devices.", + ) + parser.add_argument( + "--vipu_server", type=str, help="The ip of the IPU device manager." + ) parser.add_argument( "training_script", type=str, - help= - "The full path to the IPU distributed training program/script to be launched in parallel. e.g., ``training.py``." + help="The full path to the IPU distributed training program/script to be launched in parallel. e.g., ``training.py``.", ) parser.add_argument('training_script_args', nargs=argparse.REMAINDER) return parser.parse_args(args_list) @@ -64,19 +68,27 @@ class IPUController(CollectiveController): num_ipus = int(self.ctx.args.devices) # The number of replicas for data parallel - assert (num_ipus % poprun_args.ipus_per_replica) == 0, \ - "The number of IPUs:{} mod the number of IPUs per replica:{} must == 0".format(num_ipus, poprun_args.ipus_per_replica) + assert ( + num_ipus % poprun_args.ipus_per_replica + ) == 0, "The number of IPUs:{} mod the number of IPUs per replica:{} must == 0".format( + num_ipus, poprun_args.ipus_per_replica + ) num_replicas = num_ipus // poprun_args.ipus_per_replica self.ctx.logger.info( - "The number of total replicas is {}.".format(num_replicas)) + "The number of total replicas is {}.".format(num_replicas) + ) # The number of processes num_nodes = len(poprun_args.hosts.split(',')) num_procs = num_nodes * poprun_args.nproc_per_host self.ctx.logger.info( - "The number of total processes is {}.".format(num_procs)) - assert (num_replicas % num_procs) == 0, \ - "The number of replicas:{} mod the number of processes:{} must == 0".format(num_replicas, num_procs) + "The number of total processes is {}.".format(num_procs) + ) + assert ( + num_replicas % num_procs + ) == 0, "The number of replicas:{} mod the number of processes:{} must == 0".format( + num_replicas, num_procs + ) # hosts and endpoints hosts = poprun_args.hosts.replace(' ', '').split(',') @@ -87,26 +99,36 @@ class IPUController(CollectiveController): poprun_command.append('--num-instances={}'.format(num_procs)) poprun_command.append('--num-replicas={}'.format(num_replicas)) - poprun_command.append('--ipus-per-replica={}'.format( - poprun_args.ipus_per_replica)) + poprun_command.append( + '--ipus-per-replica={}'.format(poprun_args.ipus_per_replica) + ) poprun_command.append('--host={}'.format(','.join(hosts))) - poprun_command.append('--vipu-partition={}'.format( - poprun_args.ipu_partition)) - poprun_command.append('--vipu-server-host={}'.format( - poprun_args.vipu_server)) + poprun_command.append( + '--vipu-partition={}'.format(poprun_args.ipu_partition) + ) + poprun_command.append( + '--vipu-server-host={}'.format(poprun_args.vipu_server) + ) - poprun_command.extend([ - '--update-partition=no', '--vipu-server-timeout=120', - '--print-topology=yes', '--numa-aware=yes' - ]) + poprun_command.extend( + [ + '--update-partition=no', + '--vipu-server-timeout=120', + '--print-topology=yes', + '--numa-aware=yes', + ] + ) # global envs global_envs = '--mpi-local-args=\'' log_level = os.getenv('POPART_LOG_LEVEL', None) if log_level: global_envs += '-x POPART_LOG_LEVEL={} '.format(log_level) - global_envs += '-x PADDLE_TRAINERS_NUM={} -x PADDLE_TRAINER_ENDPOINTS={}'.format( - num_procs, ','.join(endpoints)) + global_envs += ( + '-x PADDLE_TRAINERS_NUM={} -x PADDLE_TRAINER_ENDPOINTS={}'.format( + num_procs, ','.join(endpoints) + ) + ) global_envs += '\'' poprun_command.append(global_envs) @@ -115,8 +137,10 @@ class IPUController(CollectiveController): cur_endpoint = endpoints[idx // poprun_args.nproc_per_host] rank_in_node = idx % poprun_args.nproc_per_host poprun_command.append( - '--instance-mpi-local-args={}:\"-x PADDLE_TRAINER_ID={} -x PADDLE_CURRENT_ENDPOINT={} -x PADDLE_RANK_IN_NODE={}\"' - .format(idx, idx, cur_endpoint, rank_in_node)) + '--instance-mpi-local-args={}:\"-x PADDLE_TRAINER_ID={} -x PADDLE_CURRENT_ENDPOINT={} -x PADDLE_RANK_IN_NODE={}\"'.format( + idx, idx, cur_endpoint, rank_in_node + ) + ) # executor poprun_command.append(sys.executable) @@ -142,12 +166,9 @@ class IPUController(CollectiveController): entrypoint = [" ".join(entrypoint)] return entrypoint - def new_container(self, - entrypoint=None, - envs={}, - use_ctx_env=True, - out=None, - err=None): + def new_container( + self, entrypoint=None, envs={}, use_ctx_env=True, out=None, err=None + ): c = Container( entrypoint=(entrypoint or self._get_entrypoint()), env=(self.ctx.get_envs() if use_ctx_env else {}), diff --git a/python/paddle/distributed/launch/controllers/master.py b/python/paddle/distributed/launch/controllers/master.py index da240ceffa9f3ae0aaed1af486f859d60aa03cfd..128852e092cfbe3347cba7a613a1b32aadbf7964 100644 --- a/python/paddle/distributed/launch/controllers/master.py +++ b/python/paddle/distributed/launch/controllers/master.py @@ -64,7 +64,6 @@ class Master(object): class HTTPMaster(Master): - def lazy_init(self): if self.initialized: return @@ -83,7 +82,8 @@ class HTTPMaster(Master): break except Exception as e: self.ctx.logger.warning( - "start master failed {}".format(e)) + "start master failed {}".format(e) + ) time.sleep(0.1) continue else: @@ -94,7 +94,9 @@ class HTTPMaster(Master): print("Copy the following command to other nodes to run.") cmd = [ - sys.executable.split('/')[-1], "-m", "paddle.distributed.launch" + sys.executable.split('/')[-1], + "-m", + "paddle.distributed.launch", ] cmd.extend(["--master", self.endpoint]) cmd.extend(sys.argv[1:]) @@ -104,7 +106,8 @@ class HTTPMaster(Master): if int(self.ctx.args.rank) >= 0: self.ctx.logger.warning( - "--rank set in the command may not compatible in auto mode") + "--rank set in the command may not compatible in auto mode" + ) if '127.0.0.1' in self.endpoint: self.endpoint = self.endpoint.replace('127.0.0.1', self.ctx.node.ip) @@ -173,7 +176,6 @@ class HTTPMaster(Master): class ETCDMaster(Master): - def __init__(self, ctx): super().__init__(ctx) @@ -225,7 +227,8 @@ class ETCDMaster(Master): ii = int(six.ensure_str(k.key).split('/')[-1]) if ii < 0: self.ctx.logger.error( - "rank {} error in sync".format(ii)) + "rank {} error in sync".format(ii) + ) ret[ii] = six.ensure_str(v) return ret, rank else: @@ -241,7 +244,7 @@ class ETCDMaster(Master): lease = self.client.lease(ttl) - #self.client.delete_prefix(self.job_prefix) + # self.client.delete_prefix(self.job_prefix) beat_path = "{}/{}".format(self.heartbeat_prefix, pod_id) self.client.put(beat_path, pod_id.encode('latin-1'), lease=lease) @@ -250,16 +253,17 @@ class ETCDMaster(Master): self.ctx.status.restart() beat_watch = self.client.add_watch_prefix_callback( - self.heartbeat_prefix, _beat_watch) + self.heartbeat_prefix, _beat_watch + ) def _heartbeat(): while not self.ctx.status.is_done(): try: lease.refresh() if pod_id not in self.fetch_peer_alive(): - self.client.put(beat_path, - pod_id.encode('latin-1'), - lease=lease) + self.client.put( + beat_path, pod_id.encode('latin-1'), lease=lease + ) self.ctx.logger.debug("Heartbeat register again") except Exception as e: self.ctx.logger.error("Heartbeat error {}".format(e)) @@ -267,9 +271,9 @@ class ETCDMaster(Master): self.ctx.logger.debug("Heartbeat done") self.client.cancel_watch(beat_watch) - self.beat_thread = threading.Thread(name='heartbeat', - target=_heartbeat, - daemon=True) + self.beat_thread = threading.Thread( + name='heartbeat', target=_heartbeat, daemon=True + ) self.beat_thread.start() def fetch_peer_alive(self): @@ -311,7 +315,8 @@ class ETCDMaster(Master): assert self.client.put( self.job_prefix, status.encode('latin-1'), - lease=self.client.lease(600)), "set status failed {}".format(status) + lease=self.client.lease(600), + ), "set status failed {}".format(status) def get_status(self): return six.ensure_str(self.client.get(self.job_prefix)[0] or '') @@ -320,4 +325,4 @@ class ETCDMaster(Master): if hasattr(self, 'beat_thread'): self.ctx.status.done() # daemon thread - #self.beat_thread.join() + # self.beat_thread.join() diff --git a/python/paddle/distributed/launch/controllers/ps.py b/python/paddle/distributed/launch/controllers/ps.py index f785311a525402c49008658f73a39e69502a1e30..b78ab34d490962bef1e1904294910186ea3c034f 100644 --- a/python/paddle/distributed/launch/controllers/ps.py +++ b/python/paddle/distributed/launch/controllers/ps.py @@ -19,12 +19,15 @@ import os, shutil class PSController(Controller): - @classmethod def enable(cls, ctx): - if ctx.args.run_mode == ControleMode.PS or ctx.args.server_num or len( - ctx.args.servers) > 0 or ctx.args.trainer_num or len( - ctx.args.trainers) > 0: + if ( + ctx.args.run_mode == ControleMode.PS + or ctx.args.server_num + or len(ctx.args.servers) > 0 + or ctx.args.trainer_num + or len(ctx.args.trainers) > 0 + ): ctx.logger.debug("{} enabled".format(cls.__name__)) ctx.args.run_mode = ControleMode.PS return True @@ -59,6 +62,7 @@ class PSController(Controller): self.save_pod_log([server_endpoints, trainer_endpoints]) import tempfile + gloo_rendezvous_dir = tempfile.mkdtemp() if os.path.exists(gloo_rendezvous_dir): shutil.rmtree(gloo_rendezvous_dir) @@ -70,7 +74,7 @@ class PSController(Controller): "PADDLE_GLOO_RENDEZVOUS": "3", "PADDLE_GLOO_FS_PATH": gloo_rendezvous_dir, "PADDLE_GLOO_HTTP_ENDPOINT": gloo_http, - "PADDLE_WITH_GLOO": self.ctx.args.with_gloo + "PADDLE_WITH_GLOO": self.ctx.args.with_gloo, } for i in range(server_num): @@ -124,19 +128,24 @@ class PSController(Controller): for p in self.ctx.node.get_free_ports(trainer_num) ] - data = json.dumps({ - 'name': self.pod.name, - 'rank': self.pod.rank, - 'servers': servers, - 'trainers': trainers, - 'dtype': self.ctx.node.device.dtype, - 'gloo_port': self.ctx.node.get_free_port(), - }) + data = json.dumps( + { + 'name': self.pod.name, + 'rank': self.pod.rank, + 'servers': servers, + 'trainers': trainers, + 'dtype': self.ctx.node.device.dtype, + 'gloo_port': self.ctx.node.get_free_port(), + } + ) - peer_list, rank = self.master.sync_peers('/{}/info'.format(self.job.id), - self.pod.name, data, - self.job.replicas, - self.pod.rank) + peer_list, rank = self.master.sync_peers( + '/{}/info'.format(self.job.id), + self.pod.name, + data, + self.job.replicas, + self.pod.rank, + ) self.ctx.logger.debug("sync peers done {}".format(peer_list)) @@ -146,17 +155,19 @@ class PSController(Controller): server_endpoints = [j for i in peer_list for j in i['servers']] trainer_endpoints = [j for i in peer_list for j in i['trainers']] - #rank_offset = sum([i['replicas'] for i in peer_list[:rank]]) + # rank_offset = sum([i['replicas'] for i in peer_list[:rank]]) server_rank_offset = sum([len(i['servers']) for i in peer_list[:rank]]) trainer_rank_offset = sum( - [len(i['trainers']) for i in peer_list[:rank]]) + [len(i['trainers']) for i in peer_list[:rank]] + ) self.pod.rank = rank self.pod.replicas = server_num + trainer_num import tempfile + gloo_rendezvous_dir = tempfile.mkdtemp() if os.path.exists(gloo_rendezvous_dir): shutil.rmtree(gloo_rendezvous_dir) @@ -168,27 +179,21 @@ class PSController(Controller): "PADDLE_GLOO_RENDEZVOUS": "3", "PADDLE_GLOO_FS_PATH": gloo_rendezvous_dir, "PADDLE_GLOO_HTTP_ENDPOINT": gloo_http, - "PADDLE_WITH_GLOO": self.ctx.args.with_gloo + "PADDLE_WITH_GLOO": self.ctx.args.with_gloo, } for i in range(server_num): e = { - "PADDLE_NNODES": - "{}".format(self.job.replicas), - "PADDLE_PSERVERS_IP_PORT_LIST": - ",".join(server_endpoints), - "PADDLE_TRAINER_ENDPOINTS": - ",".join(trainer_endpoints), - "PADDLE_PORT": - server_endpoints[i + server_rank_offset].split(":")[1], - "PADDLE_ROLE": - "PSERVER", - "TRAINING_ROLE": - "PSERVER", - "PADDLE_TRAINERS_NUM": - "{}".format(len(trainer_endpoints)), - "POD_IP": - self.ctx.node.ip, + "PADDLE_NNODES": "{}".format(self.job.replicas), + "PADDLE_PSERVERS_IP_PORT_LIST": ",".join(server_endpoints), + "PADDLE_TRAINER_ENDPOINTS": ",".join(trainer_endpoints), + "PADDLE_PORT": server_endpoints[i + server_rank_offset].split( + ":" + )[1], + "PADDLE_ROLE": "PSERVER", + "TRAINING_ROLE": "PSERVER", + "PADDLE_TRAINERS_NUM": "{}".format(len(trainer_endpoints)), + "POD_IP": self.ctx.node.ip, } e.update(_gloo_envs) log_file = "serverlog.{}".format(i) @@ -196,24 +201,17 @@ class PSController(Controller): for i in range(trainer_num): e = { - "PADDLE_NNODES": - "{}".format(self.job.replicas), - "PADDLE_PSERVERS_IP_PORT_LIST": - ",".join(server_endpoints), - "PADDLE_TRAINER_ENDPOINTS": - ",".join(trainer_endpoints), - "PADDLE_PORT": - trainer_endpoints[i + trainer_rank_offset].split(":")[1], - "PADDLE_ROLE": - "TRAINER", - "TRAINING_ROLE": - "TRAINER", - "PADDLE_TRAINER_ID": - "{}".format(i + trainer_rank_offset), - "PADDLE_TRAINERS_NUM": - "{}".format(len(trainer_endpoints)), - "POD_IP": - self.ctx.node.ip, + "PADDLE_NNODES": "{}".format(self.job.replicas), + "PADDLE_PSERVERS_IP_PORT_LIST": ",".join(server_endpoints), + "PADDLE_TRAINER_ENDPOINTS": ",".join(trainer_endpoints), + "PADDLE_PORT": trainer_endpoints[i + trainer_rank_offset].split( + ":" + )[1], + "PADDLE_ROLE": "TRAINER", + "TRAINING_ROLE": "TRAINER", + "PADDLE_TRAINER_ID": "{}".format(i + trainer_rank_offset), + "PADDLE_TRAINERS_NUM": "{}".format(len(trainer_endpoints)), + "POD_IP": self.ctx.node.ip, } e.update(_gloo_envs) log_file = "workerlog.{}".format(i) diff --git a/python/paddle/distributed/launch/controllers/rpc.py b/python/paddle/distributed/launch/controllers/rpc.py index d68c389da9e03712226bce81209efe46a0b127a8..0d3c314ec7788036a78fa384a423067674d709b4 100644 --- a/python/paddle/distributed/launch/controllers/rpc.py +++ b/python/paddle/distributed/launch/controllers/rpc.py @@ -18,7 +18,6 @@ import json class RpcController(Controller): - @classmethod def enable(cls, ctx): if ctx.args.run_mode == ControleMode.RPC: @@ -28,8 +27,9 @@ class RpcController(Controller): return False def build_pod(self): - assert (self.ctx.args.master - is not None), "Master is None, Please set master address!" + assert ( + self.ctx.args.master is not None + ), "Master is None, Please set master address!" self._build_pod_with_master() def _build_pod_with_master(self): @@ -47,14 +47,16 @@ class RpcController(Controller): for p in self.ctx.node.get_free_ports(self.pod.replicas) ] - data = json.dumps({ - "name": self.pod.name, - "rank": self.pod.rank, - "replicas": self.pod.replicas, - "dtype": self.ctx.node.device.dtype, - "candidate": "{}:{}".format(self.ctx.node.ip, port), - "endpoints": ",".join(endpoints), - }) + data = json.dumps( + { + "name": self.pod.name, + "rank": self.pod.rank, + "replicas": self.pod.replicas, + "dtype": self.ctx.node.device.dtype, + "candidate": "{}:{}".format(self.ctx.node.ip, port), + "endpoints": ",".join(endpoints), + } + ) peer_list, rank = self.master.sync_peers( "/{}/info".format(self.job.id), self.pod.name, diff --git a/python/paddle/distributed/launch/controllers/watcher.py b/python/paddle/distributed/launch/controllers/watcher.py index 4b8e346e7908fc9ece77c72a3cba1608369b6be7..a9c1f509666875bcac3f43fb455f6d97b49af6fa 100644 --- a/python/paddle/distributed/launch/controllers/watcher.py +++ b/python/paddle/distributed/launch/controllers/watcher.py @@ -20,7 +20,6 @@ from threading import Thread class Watcher(object): - def __init__(self, ctx): self.ctx = ctx @@ -31,8 +30,9 @@ class Watcher(object): # gpu log file self.gpus = self.ctx.args.devices or self.ctx.node.device.labels if len(self.gpus) > 0: - fn = os.path.join(self.ctx.args.log_dir, - "{}.gpu.log".format(self.ctx.args.job_id)) + fn = os.path.join( + self.ctx.args.log_dir, "{}.gpu.log".format(self.ctx.args.job_id) + ) os.makedirs(os.path.dirname(fn), exist_ok=True) self.gpu_fd = open(fn, 'w') else: diff --git a/python/paddle/distributed/launch/job/container.py b/python/paddle/distributed/launch/job/container.py index 55223bacdd8489863dacf5960d4f273f5d9fa198..6eb313ea579657abed1dff9d79df433b8d8c3247 100644 --- a/python/paddle/distributed/launch/job/container.py +++ b/python/paddle/distributed/launch/job/container.py @@ -89,7 +89,8 @@ class Container(object): def _valide_env(self): for k, v in self._env.items(): assert isinstance(k, str) and isinstance( - v, str), 'env {}:{} must be str'.format(k, v) + v, str + ), 'env {}:{} must be str'.format(k, v) def _get_fd(self, pth): if not pth: @@ -120,11 +121,13 @@ class Container(object): self._log_handler.seek(0, 2) self._log_start_offset = self._log_handler.tell() - self._proc = ProcessContext(self._entrypoint, - env=self._env, - out=self._stdout, - err=self._stderr, - shell=self._shell) + self._proc = ProcessContext( + self._entrypoint, + env=self._env, + out=self._stdout, + err=self._stderr, + shell=self._shell, + ) self._proc.start() @@ -159,13 +162,15 @@ class Container(object): return Status.FAILED def __str__(self): - return 'Container rank {} status {} cmd {} code {} log {} \nenv {}'.format( - self._rank, - self.status, - self._entrypoint, - self.exit_code, - self.errfile, - self._env, + return ( + 'Container rank {} status {} cmd {} code {} log {} \nenv {}'.format( + self._rank, + self.status, + self._entrypoint, + self.exit_code, + self.errfile, + self._env, + ) ) def logs(self, fn=None, offset=0, whence=1, limit=1000): diff --git a/python/paddle/distributed/launch/job/job.py b/python/paddle/distributed/launch/job/job.py index 4bad1209c1859e8ab53b1bb77facb989e9536bd3..f5c805e31bf63d00d24b459b18ccc952df5970e3 100644 --- a/python/paddle/distributed/launch/job/job.py +++ b/python/paddle/distributed/launch/job/job.py @@ -20,7 +20,6 @@ class JobMode: class Job(object): - def __init__(self, jid='default', mode=JobMode.COLLECTIVE, nnodes="1"): self._mode = mode self._id = jid @@ -34,8 +33,13 @@ class Job(object): def __str__(self): return "Job: {}, mode {}, replicas {}[{}:{}], elastic {}".format( - self.id, self.mode, self._replicas, self._replicas_min, - self._replicas_max, self.elastic) + self.id, + self.mode, + self._replicas, + self._replicas_min, + self._replicas_max, + self.elastic, + ) @property def mode(self): @@ -76,6 +80,9 @@ class Job(object): self._elastic = True else: self._replicas = int(np) - self._replicas_min, self._replicas_max = self._replicas, self._replicas + self._replicas_min, self._replicas_max = ( + self._replicas, + self._replicas, + ) self._elastic = False diff --git a/python/paddle/distributed/launch/job/pod.py b/python/paddle/distributed/launch/job/pod.py index 960a52aa3f3687fc27de1d1563d2a31d0cb80098..b65aad6e0fcb43a869d5038489db927e13261216 100644 --- a/python/paddle/distributed/launch/job/pod.py +++ b/python/paddle/distributed/launch/job/pod.py @@ -21,17 +21,17 @@ import time class PodSepc(object): - def __init__(self): self._name = ''.join( - random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(6)) + random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(6) + ) # by controller self._init_containers: List[Container] = [] self._containers: List[Container] = [] - #self.resource: Resource = None - #self.status: Status = None + # self.resource: Resource = None + # self.status: Status = None self._rank = -1 self._init_timeout = None @@ -41,14 +41,13 @@ class PodSepc(object): class Pod(PodSepc): - def __init__(self): super().__init__() def __str__(self): - return "Pod: {}, replicas {}, status {}".format(self.name, - self.replicas, - self.status) + return "Pod: {}, replicas {}, status {}".format( + self.name, self.replicas, self.status + ) def failed_container(self): cs = [] @@ -183,11 +182,13 @@ class Pod(PodSepc): else: self._containers[idx].tail() - def watch(self, - all_list=[Status.COMPLETED], - any_list=[Status.FAILED], - interval=1, - timeout=-1): + def watch( + self, + all_list=[Status.COMPLETED], + any_list=[Status.FAILED], + interval=1, + timeout=-1, + ): ''' watch return if any container status in any_list or all container status in all_list diff --git a/python/paddle/distributed/launch/main.py b/python/paddle/distributed/launch/main.py index a6a0c973208fd3abaffafdbec395a4ef7e108b89..c960239f733193913db301b4d60a1db090181b48 100644 --- a/python/paddle/distributed/launch/main.py +++ b/python/paddle/distributed/launch/main.py @@ -290,6 +290,7 @@ def launch(): # legacy mode from paddle.distributed.fleet import launch + launch.launch() else: diff --git a/python/paddle/distributed/launch/plugins/__init__.py b/python/paddle/distributed/launch/plugins/__init__.py index 91a2dee09a3b890568934a35cf3bd90c7e6839b7..2d7632d642cf2030dfba4204635a01a3c4dec363 100644 --- a/python/paddle/distributed/launch/plugins/__init__.py +++ b/python/paddle/distributed/launch/plugins/__init__.py @@ -27,7 +27,7 @@ def log(ctx): def process_args(ctx): # reset device by args - #argdev = ctx.args.gpus or ctx.args.xpus or ctx.args.npus + # argdev = ctx.args.gpus or ctx.args.xpus or ctx.args.npus argdev = ctx.args.devices if argdev: for d in argdev.split(','): @@ -49,7 +49,8 @@ def collective_compatible(ctx): ctx.args.master = eps[0] if ':' in eps[0] else '{}:6768'.format(eps[0]) ctx.args.nnodes = len(hosts) ctx.logger.info( - 'args reset by env PADDLE_TRAINER_ENDPOINTS\n{}'.format(eps)) + 'args reset by env PADDLE_TRAINER_ENDPOINTS\n{}'.format(eps) + ) if 'DISTRIBUTED_TRAINER_ENDPOINTS' in ctx.envs: eps = ctx.envs['DISTRIBUTED_TRAINER_ENDPOINTS'].split(',') @@ -57,7 +58,8 @@ def collective_compatible(ctx): ctx.args.master = eps[0] ctx.args.nnodes = len(hosts) ctx.logger.info( - 'args reset by env DISTRIBUTED_TRAINER_ENDPOINTS\n{}'.format(eps)) + 'args reset by env DISTRIBUTED_TRAINER_ENDPOINTS\n{}'.format(eps) + ) def rewrite_host_ip(ctx): @@ -72,9 +74,13 @@ def test_mode(ctx): if int(ctx.args.nnodes) < 2: ctx.args.nnodes = 2 ctx.args.training_script = '{}/test.py'.format( - os.path.dirname(__file__)) + os.path.dirname(__file__) + ) enabled_plugins = [ - test_mode, collective_compatible, rewrite_host_ip, process_args + test_mode, + collective_compatible, + rewrite_host_ip, + process_args, ] diff --git a/python/paddle/distributed/launch/plugins/test.py b/python/paddle/distributed/launch/plugins/test.py index ae4c111351ea36dd551602a34c545b1c12c174a2..3dd7ab886e61bfd16f2d6db6b7ca03d155d165e9 100644 --- a/python/paddle/distributed/launch/plugins/test.py +++ b/python/paddle/distributed/launch/plugins/test.py @@ -31,13 +31,12 @@ class_dim = 102 # define a random dataset class RandomDataset(Dataset): - def __init__(self, num_samples): self.num_samples = num_samples def __getitem__(self, idx): image = np.random.random([3, 224, 224]).astype('float32') - label = np.random.randint(0, class_dim - 1, (1, )).astype('int64') + label = np.random.randint(0, class_dim - 1, (1,)).astype('int64') return image, label def __len__(self): @@ -49,7 +48,8 @@ def optimizer_setting(parameter_list=None): learning_rate=base_lr, momentum=momentum_rate, weight_decay=paddle.regularizer.L2Decay(l2_decay), - parameters=parameter_list) + parameters=parameter_list, + ) return optimizer @@ -62,11 +62,13 @@ def train_resnet(): resnet = fleet.distributed_model(resnet) dataset = RandomDataset(batch_num * batch_size) - train_loader = DataLoader(dataset, - batch_size=batch_size, - shuffle=True, - drop_last=True, - num_workers=2) + train_loader = DataLoader( + dataset, + batch_size=batch_size, + shuffle=True, + drop_last=True, + num_workers=2, + ) print("Distributed training start...") for eop in range(epoch): @@ -86,14 +88,17 @@ def train_resnet(): optimizer.step() resnet.clear_gradients() - print("[Epoch %d, batch %d] loss: %.5f, acc1: %.5f, acc5: %.5f" % - (eop, batch_id, avg_loss, acc_top1, acc_top5)) + print( + "[Epoch %d, batch %d] loss: %.5f, acc1: %.5f, acc5: %.5f" + % (eop, batch_id, avg_loss, acc_top1, acc_top5) + ) print("Distributed training completed") if __name__ == '__main__': import os + nnodes = os.getenv('PADDLE_NNODES') cn = os.getenv('PADDLE_LOCAL_SIZE') print(f"Prepare distributed training with {nnodes} nodes {cn} cards") diff --git a/python/paddle/distributed/launch/utils/kv_client.py b/python/paddle/distributed/launch/utils/kv_client.py index a66ca800c58c22a289d0a36c0076a50ec433f66d..8ed46053de0d32eaa6b432254b455594b133d850 100644 --- a/python/paddle/distributed/launch/utils/kv_client.py +++ b/python/paddle/distributed/launch/utils/kv_client.py @@ -17,10 +17,12 @@ import time class KVClient(object): - def __init__(self, endpoint='localhost:2379'): - self.endpoint = endpoint if endpoint.startswith( - "http://") else "http://{}".format(endpoint) + self.endpoint = ( + endpoint + if endpoint.startswith("http://") + else "http://{}".format(endpoint) + ) def put(self, key, value): key = key if key.startswith('/') else "/{}".format(key) diff --git a/python/paddle/distributed/launch/utils/kv_server.py b/python/paddle/distributed/launch/utils/kv_server.py index ddf5685c988b7de9541899183b1b1cce161e88eb..7c8807d3a16b4903dc8a292fb2e2528756c728d3 100644 --- a/python/paddle/distributed/launch/utils/kv_server.py +++ b/python/paddle/distributed/launch/utils/kv_server.py @@ -22,7 +22,6 @@ import json class KVHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): - def do_GET(self): with self.server.kv_lock: ret = {} @@ -69,7 +68,6 @@ class KVHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): class KVServer(HTTPServer, object): - def __init__(self, port): super(KVServer, self).__init__(('', port), KVHandler) self.kv_lock = threading.Lock() @@ -90,8 +88,7 @@ class KVServer(HTTPServer, object): self.stopped = True -class PKVServer(): - +class PKVServer: def __init__(self, port): self._server = KVServer(port) @@ -114,11 +111,11 @@ class PKVServer(): if __name__ == '__main__': - #kv = PKVServer(8090) + # kv = PKVServer(8090) kv = KVServer(8090) kv.start() import time - #print("serve at 8090 for 600 s") + # print("serve at 8090 for 600 s") time.sleep(600) diff --git a/python/paddle/distributed/launch/utils/nvsmi.py b/python/paddle/distributed/launch/utils/nvsmi.py index 785704be3ff742f8e9acebb82fd571414561fb79..d1a14f11666d2521438d84b8132c830e39ef1270 100644 --- a/python/paddle/distributed/launch/utils/nvsmi.py +++ b/python/paddle/distributed/launch/utils/nvsmi.py @@ -19,7 +19,6 @@ import shutil class Info(object): - def __repr__(self): return str(self.__dict__) @@ -76,20 +75,28 @@ def query_smi(query=None, query_type="gpu", index=None, dtype=None): def get_gpu_info(index=None): q = "index,uuid,driver_version,name,gpu_serial,display_active,display_mode".split( - ",") + "," + ) d = [int, str, str, str, str, str, str] - index = index if index is None or isinstance( - index, list) else str(index).split(",") + index = ( + index + if index is None or isinstance(index, list) + else str(index).split(",") + ) return query_smi(q, index=index, dtype=d) def get_gpu_util(index=None): q = "index,utilization.gpu,memory.total,memory.used,memory.free,timestamp".split( - ",") + "," + ) d = [int, int, int, int, int, str] - index = index if index is None or isinstance( - index, list) else str(index).split(",") + index = ( + index + if index is None or isinstance(index, list) + else str(index).split(",") + ) return query_smi(q, index=index, dtype=d) @@ -97,8 +104,11 @@ def get_gpu_util(index=None): def get_gpu_process(index=None): q = "pid,process_name,gpu_uuid,gpu_name,used_memory".split(",") d = [int, str, str, str, int] - index = index if index is None or isinstance( - index, list) else str(index).split(",") + index = ( + index + if index is None or isinstance(index, list) + else str(index).split(",") + ) return query_smi(q, index=index, query_type="compute", dtype=d) diff --git a/python/paddle/distributed/launch/utils/process_context.py b/python/paddle/distributed/launch/utils/process_context.py index 5d8505aa66eb38fc326892d1e23bc9f212fd40c4..682a857f2ee6c3bfbe2e1f89f68fb13da4598852 100644 --- a/python/paddle/distributed/launch/utils/process_context.py +++ b/python/paddle/distributed/launch/utils/process_context.py @@ -17,15 +17,16 @@ import os, sys, signal, time class ProcessContext(object): - - def __init__(self, - cmd, - env=os.environ, - out=sys.stdout, - err=sys.stderr, - group=True, - preexec_fn=None, - shell=False): + def __init__( + self, + cmd, + env=os.environ, + out=sys.stdout, + err=sys.stderr, + group=True, + preexec_fn=None, + shell=False, + ): self._cmd = cmd self._env = env self._preexec_fn = preexec_fn @@ -38,12 +39,14 @@ class ProcessContext(object): def _start(self): pre_fn = os.setsid if self._group else None - self._proc = subprocess.Popen(self._cmd, - env=self._env, - stdout=self._stdout, - stderr=self._stderr, - preexec_fn=self._preexec_fn or pre_fn, - shell=self._shell) + self._proc = subprocess.Popen( + self._cmd, + env=self._env, + stdout=self._stdout, + stderr=self._stderr, + preexec_fn=self._preexec_fn or pre_fn, + shell=self._shell, + ) def _close_std(self): try: diff --git a/python/paddle/distributed/metric/metrics.py b/python/paddle/distributed/metric/metrics.py index 9f8573183b37d3f3bd5fe5a0773099566032dc62..64f62d85251abba622f1cc7b2bcf70ee443c5bfe 100644 --- a/python/paddle/distributed/metric/metrics.py +++ b/python/paddle/distributed/metric/metrics.py @@ -22,15 +22,17 @@ logger = get_logger(logging.INFO, name="metrics") # read metric config from yaml and init MetricMsg in fleet_wrapper -def init_metric(metric_ptr, - metric_yaml_path, - cmatch_rank_var="", - mask_var="", - uid_var="", - phase=-1, - cmatch_rank_group="", - ignore_rank=False, - bucket_size=1000000): +def init_metric( + metric_ptr, + metric_yaml_path, + cmatch_rank_var="", + mask_var="", + uid_var="", + phase=-1, + cmatch_rank_group="", + ignore_rank=False, + bucket_size=1000000, +): yaml_fobj = open(metric_yaml_path) if sys.version.startswith('2.7.13'): content = yaml.load(yaml_fobj) @@ -49,53 +51,102 @@ def init_metric(metric_ptr, phase = 1 if is_join else 0 if metric_runner['method'] == 'AucCalculator': - metric_ptr.init_metric(metric_runner['method'], - metric_runner['name'], - metric_runner['label'], - metric_runner['target'], cmatch_rank_var, - mask_var, uid_var, phase, cmatch_rank_group, - ignore_rank, bucket_size) + metric_ptr.init_metric( + metric_runner['method'], + metric_runner['name'], + metric_runner['label'], + metric_runner['target'], + cmatch_rank_var, + mask_var, + uid_var, + phase, + cmatch_rank_group, + ignore_rank, + bucket_size, + ) elif metric_runner['method'] == 'MultiTaskAucCalculator': metric_ptr.init_metric( - metric_runner['method'], metric_runner['name'], - metric_runner['label'], metric_runner['target'], - metric_runner['cmatch_var'], mask_var, uid_var, phase, - metric_runner['cmatch_group'], ignore_rank, bucket_size) + metric_runner['method'], + metric_runner['name'], + metric_runner['label'], + metric_runner['target'], + metric_runner['cmatch_var'], + mask_var, + uid_var, + phase, + metric_runner['cmatch_group'], + ignore_rank, + bucket_size, + ) elif metric_runner['method'] == 'CmatchRankAucCalculator': metric_ptr.init_metric( - metric_runner['method'], metric_runner['name'], - metric_runner['label'], metric_runner['target'], - metric_runner['cmatch_var'], mask_var, uid_var, phase, - metric_runner['cmatch_group'], metric_runner['ignore_rank'], - bucket_size) + metric_runner['method'], + metric_runner['name'], + metric_runner['label'], + metric_runner['target'], + metric_runner['cmatch_var'], + mask_var, + uid_var, + phase, + metric_runner['cmatch_group'], + metric_runner['ignore_rank'], + bucket_size, + ) elif metric_runner['method'] == 'MaskAucCalculator': - metric_ptr.init_metric(metric_runner['method'], - metric_runner['name'], - metric_runner['label'], - metric_runner['target'], cmatch_rank_var, - metric_runner['mask'], uid_var, phase, - cmatch_rank_group, ignore_rank, bucket_size) + metric_ptr.init_metric( + metric_runner['method'], + metric_runner['name'], + metric_runner['label'], + metric_runner['target'], + cmatch_rank_var, + metric_runner['mask'], + uid_var, + phase, + cmatch_rank_group, + ignore_rank, + bucket_size, + ) elif metric_runner['method'] == 'CmatchRankMaskAucCalculator': metric_ptr.init_metric( - metric_runner['method'], metric_runner['name'], - metric_runner['label'], metric_runner['target'], - metric_runner['cmatch_var'], metric_runner['mask'], uid_var, - phase, metric_runner['cmatch_group'], - metric_runner['ignore_rank'], bucket_size) + metric_runner['method'], + metric_runner['name'], + metric_runner['label'], + metric_runner['target'], + metric_runner['cmatch_var'], + metric_runner['mask'], + uid_var, + phase, + metric_runner['cmatch_group'], + metric_runner['ignore_rank'], + bucket_size, + ) elif metric_runner['method'] == 'WuAucCalculator': - metric_ptr.init_metric(metric_runner['method'], - metric_runner['name'], - metric_runner['label'], - metric_runner['target'], cmatch_rank_var, - mask_var, metric_runner['uid'], phase, - cmatch_rank_group, ignore_rank, bucket_size) + metric_ptr.init_metric( + metric_runner['method'], + metric_runner['name'], + metric_runner['label'], + metric_runner['target'], + cmatch_rank_var, + mask_var, + metric_runner['uid'], + phase, + cmatch_rank_group, + ignore_rank, + bucket_size, + ) else: - metric_ptr.init_metric(metric_runner['method'], - metric_runner['name'], - metric_runner['label'], - metric_runner['target'], cmatch_rank_var, - mask_var, phase, cmatch_rank_group, - ignore_rank, bucket_size) + metric_ptr.init_metric( + metric_runner['method'], + metric_runner['name'], + metric_runner['label'], + metric_runner['target'], + cmatch_rank_var, + mask_var, + phase, + cmatch_rank_group, + ignore_rank, + bucket_size, + ) def print_metric(metric_ptr, name): @@ -104,14 +155,27 @@ def print_metric(metric_ptr, name): """ if name.find("wuauc") != -1: metric = metric_ptr.get_wuauc_metric_msg(name) - monitor_msg = "%s: User Count=%.0f INS Count=%.0f UAUC=%.6f WUAUC=%.6f "\ - % (name, metric[0], metric[1], metric[4], metric[5]) + monitor_msg = ( + "%s: User Count=%.0f INS Count=%.0f UAUC=%.6f WUAUC=%.6f " + % (name, metric[0], metric[1], metric[4], metric[5]) + ) else: metric = metric_ptr.get_metric_msg(name) - monitor_msg = "%s: AUC=%.6f BUCKET_ERROR=%.6f MAE=%.6f RMSE=%.6f "\ - "Actual CTR=%.6f Predicted CTR=%.6f COPC=%.6f INS Count=%.0f"\ - % (name, metric[0], metric[1], metric[2], metric[3], metric[4], - metric[5], metric[6], metric[7]) + monitor_msg = ( + "%s: AUC=%.6f BUCKET_ERROR=%.6f MAE=%.6f RMSE=%.6f " + "Actual CTR=%.6f Predicted CTR=%.6f COPC=%.6f INS Count=%.0f" + % ( + name, + metric[0], + metric[1], + metric[2], + metric[3], + metric[4], + metric[5], + metric[6], + metric[7], + ) + ) # logger.info(monitor_msg) return monitor_msg diff --git a/python/paddle/distributed/models/moe/utils.py b/python/paddle/distributed/models/moe/utils.py index 4c6ac5034498284241abc92cbdbb43c3d9b606f6..2011e558701e76adaa51c744b95f467015b90e67 100644 --- a/python/paddle/distributed/models/moe/utils.py +++ b/python/paddle/distributed/models/moe/utils.py @@ -51,10 +51,12 @@ def _number_count(numbers, upper_range): helper = LayerHelper(op_type, **locals()) out = helper.create_variable_for_type_inference(dtype=numbers.dtype) - helper.append_op(type=op_type, - inputs={'numbers': numbers}, - outputs={'Out': out}, - attrs={'upper_range': upper_range}) + helper.append_op( + type=op_type, + inputs={'numbers': numbers}, + outputs={'Out': out}, + attrs={'upper_range': upper_range}, + ) return out @@ -98,29 +100,31 @@ def _assign_pos(x, cum_count): helper = LayerHelper(op_type, **locals()) out = helper.create_variable_for_type_inference(dtype=cum_count.dtype) - helper.append_op(type=op_type, - inputs={ - 'X': [x], - 'cum_count': [cum_count], - "eff_num_len": [cum_count[-1]] - }, - outputs={'Out': [out]}) + helper.append_op( + type=op_type, + inputs={ + 'X': [x], + 'cum_count': [cum_count], + "eff_num_len": [cum_count[-1]], + }, + outputs={'Out': [out]}, + ) return out def _random_routing(topk_idx, topk_value, prob, topk=2): r""" - random routing topk gate idx - ``` - out = topk_idx - for i in len(topk_idx): - if topk * value[i][topk-1] < prob[i]: - out[i][topk-1] = -1 - ``` - Args: - topk_idx: gate idx, shape=(N, topk) - topk_value: values, shape = topk_idx.shape - prob: random prob, shape=(topk_idx.shape[0],) + random routing topk gate idx + ``` + out = topk_idx + for i in len(topk_idx): + if topk * value[i][topk-1] < prob[i]: + out[i][topk-1] = -1 + ``` + Args: + topk_idx: gate idx, shape=(N, topk) + topk_value: values, shape = topk_idx.shape + prob: random prob, shape=(topk_idx.shape[0],) """ if topk == 2: if in_dygraph_mode(): @@ -155,25 +159,27 @@ def _limit_by_capacity(expert_count, capacity, n_worker): print(out) # the result: [1, 2, 2, 4, 3, 3] """ if in_dygraph_mode(): - return _legacy_C_ops.limit_by_capacity(expert_count, capacity, - 'n_worker', n_worker) + return _legacy_C_ops.limit_by_capacity( + expert_count, capacity, 'n_worker', n_worker + ) elif _in_legacy_dygraph(): - return core.ops.limit_by_capacity(expert_count, capacity, 'n_worker', - n_worker) + return core.ops.limit_by_capacity( + expert_count, capacity, 'n_worker', n_worker + ) else: op_type = 'limit_by_capacity' helper = LayerHelper(op_type, **locals()) out = helper.create_variable_for_type_inference( - dtype=expert_count.dtype) - - helper.append_op(type=op_type, - inputs={ - 'expert_count': expert_count, - 'capacity': capacity - }, - outputs={'Out': out}, - attrs={'n_worker': n_worker}) + dtype=expert_count.dtype + ) + + helper.append_op( + type=op_type, + inputs={'expert_count': expert_count, 'capacity': capacity}, + outputs={'Out': out}, + attrs={'n_worker': n_worker}, + ) return out @@ -202,30 +208,35 @@ def _prune_gate_by_capacity(gate_idx, expert_count, n_expert, n_worker): [1, 3, 3, 3, -1, 2, 1, 1]) """ if in_dygraph_mode(): - return _legacy_C_ops.prune_gate_by_capacity(gate_idx, expert_count, - "n_expert", n_expert, - "n_worker", n_worker) + return _legacy_C_ops.prune_gate_by_capacity( + gate_idx, expert_count, "n_expert", n_expert, "n_worker", n_worker + ) elif _in_legacy_dygraph(): - return core.ops.prune_gate_by_capacity(gate_idx, expert_count, - "n_expert", n_expert, "n_worker", - n_worker) - check_variable_and_dtype(gate_idx, 'GateIdx', ['int32', 'int64'], - 'paddle.distributed.utils.prune_gate_by_capacity') - check_variable_and_dtype(expert_count, 'ExpertCount', ['int32', 'int64'], - 'paddle.distributed.utils.prune_gate_by_capacity') + return core.ops.prune_gate_by_capacity( + gate_idx, expert_count, "n_expert", n_expert, "n_worker", n_worker + ) + check_variable_and_dtype( + gate_idx, + 'GateIdx', + ['int32', 'int64'], + 'paddle.distributed.utils.prune_gate_by_capacity', + ) + check_variable_and_dtype( + expert_count, + 'ExpertCount', + ['int32', 'int64'], + 'paddle.distributed.utils.prune_gate_by_capacity', + ) helper = LayerHelper('prune_gate_by_capacity', **locals()) new_gate_idx = helper.create_variable_for_type_inference( - dtype=gate_idx.dtype) - helper.append_op(type='prune_gate_by_capacity', - inputs={ - 'GateIdx': gate_idx, - "ExpertCount": expert_count - }, - outputs={'NewGateIdx': new_gate_idx}, - attrs={ - "n_expert": n_expert, - "n_worker": n_worker - }) + dtype=gate_idx.dtype + ) + helper.append_op( + type='prune_gate_by_capacity', + inputs={'GateIdx': gate_idx, "ExpertCount": expert_count}, + outputs={'NewGateIdx': new_gate_idx}, + attrs={"n_expert": n_expert, "n_worker": n_worker}, + ) return new_gate_idx diff --git a/python/paddle/distributed/parallel.py b/python/paddle/distributed/parallel.py index 49cb60349f7af491070e0bc6a14c36367b3dc6c1..2a74e6bb2cff7770d34601e444df93273dd37289 100644 --- a/python/paddle/distributed/parallel.py +++ b/python/paddle/distributed/parallel.py @@ -26,7 +26,9 @@ from paddle.fluid.framework import _set_expected_place from paddle.fluid.dygraph import parallel_helper from paddle.distributed.fleet.launch_utils import check_backend from paddle.fluid.dygraph.parallel import ParallelEnv -from paddle.distributed.fleet.base.private_helper_function import wait_server_ready # noqa: F401 +from paddle.distributed.fleet.base.private_helper_function import ( + wait_server_ready, +) # noqa: F401 from paddle.distributed.collective import _set_group_map from paddle.distributed.collective import _set_group_map_by_name from paddle.distributed.collective import _get_group_map_by_name @@ -57,6 +59,7 @@ def _get_global_parallel_env(): def _start_kv_server(port, http_server_d, size): from paddle.distributed.fleet.utils.http_server import KVServer + http_server = KVServer(int(port), size=size) http_server.start() wait_seconds = 3 @@ -67,10 +70,15 @@ def _start_kv_server(port, http_server_d, size): def _is_cpuonly(backend): check_backend(backend) - if (backend in ['auto', 'nccl', 'bkcl', 'hccl', 'heter', 'cncl'] and - (core.is_compiled_with_cuda() or core.is_compiled_with_xpu() - or core.is_compiled_with_npu() - or core.is_compiled_with_mlu())) or backend == 'xccl': + if ( + backend in ['auto', 'nccl', 'bkcl', 'hccl', 'heter', 'cncl'] + and ( + core.is_compiled_with_cuda() + or core.is_compiled_with_xpu() + or core.is_compiled_with_npu() + or core.is_compiled_with_mlu() + ) + ) or backend == 'xccl': # passes 'auto' and can use cuda or xpu, use the default logics. so return False return False @@ -81,9 +89,10 @@ def _is_cpuonly(backend): def _check_var_exists(var_name): var = os.environ.get(var_name, None) if var is None: - raise ValueError("paddle.distributed initialize error, " - "environment variable %s is needed, but not set." % - var_name) + raise ValueError( + "paddle.distributed initialize error, " + "environment variable %s is needed, but not set." % var_name + ) def init_parallel_env(): @@ -161,15 +170,21 @@ def init_parallel_env(): backend = os.environ.get('PADDLE_DISTRI_BACKEND', 'auto') is_cpu_only = _is_cpuonly(backend) # 1. gpu xpu check, must be gpu or xpu, - if not (is_cpu_only or core.is_compiled_with_cuda() - or core.is_compiled_with_xpu() or core.is_compiled_with_npu() - or core.is_compiled_with_mlu()): + if not ( + is_cpu_only + or core.is_compiled_with_cuda() + or core.is_compiled_with_xpu() + or core.is_compiled_with_npu() + or core.is_compiled_with_mlu() + ): raise NotImplementedError( - "If you want to use CPU-only version, please use 'gloo' as backend") + "If you want to use CPU-only version, please use 'gloo' as backend" + ) if backend == "xccl": FLAGS_selected_custom_devices = 'FLAGS_selected_{}s'.format( - parallel_env.device_type) + parallel_env.device_type + ) _check_var_exists(FLAGS_selected_custom_devices) else: if not is_cpu_only and core.is_compiled_with_cuda(): @@ -197,8 +212,9 @@ def init_parallel_env(): # they need to call a function to change default place, # here just set correctly place to users if backend == "xccl": - place = core.CustomPlace(parallel_env.device_type, - parallel_env.device_id) + place = core.CustomPlace( + parallel_env.device_type, parallel_env.device_id + ) elif is_cpu_only: place = core.CPUPlace() elif core.is_compiled_with_cuda(): @@ -222,11 +238,15 @@ def init_parallel_env(): assert rank >= 0 and world_size > rank and world_size > 1, ( "rank must be non-negative and world_size must be the " "maximum rank plus one. Moreover, at least two processes are " - "required to create a process group.") + "required to create a process group." + ) master_addr = os.getenv("MASTER_ADDR", None) master_port = os.getenv("MASTER_PORT", None) - endpoints = ":".join([master_addr, master_port - ]) if master_addr and master_port else None + endpoints = ( + ":".join([master_addr, master_port]) + if master_addr and master_port + else None + ) if endpoints is None: endpoints = os.getenv("PADDLE_MASTER", None) if endpoints is None: @@ -235,23 +255,28 @@ def init_parallel_env(): "The environment variable 'MASTER_ADDR' and 'MASTER_PORT' " "must be specified, for example 'export MASTER_ADDR=127.0.0.1' " "and 'export MASTER_ADDR=54612'. Or you can start your training" - "with paddle.distributed.run module.") + "with paddle.distributed.run module." + ) master_addr, master_port = endpoints.split(":") master_port = int(master_port) is_master = rank == 0 stop_check_timeout = int(os.getenv("FLAGS_stop_check_timeout", "900")) - default_store = core.TCPStore(master_addr, - master_port, - is_master, - world_size, - timeout=stop_check_timeout) + default_store = core.TCPStore( + master_addr, + master_port, + is_master, + world_size, + timeout=stop_check_timeout, + ) _set_default_store(default_store) - pg = _new_process_group_impl(backend, - default_store, - rank, - world_size, - _default_group_name, - pg_options=None) + pg = _new_process_group_impl( + backend, + default_store, + rank, + world_size, + _default_group_name, + pg_options=None, + ) ranks = list(range(world_size)) group = Group(rank, 0, ranks, pg=pg, name=_default_group_name) _set_group_map_by_name(_default_group_name, group) @@ -277,8 +302,10 @@ def init_parallel_env(): size = {'_worker': parallel_env.world_size} if backend == "heter": size = {'_worker': len(node_num)} - http_server = Process(target=_start_kv_server, - args=(int(ep_rank_0[1]), http_server_d, size)) + http_server = Process( + target=_start_kv_server, + args=(int(ep_rank_0[1]), http_server_d, size), + ) http_server.daemon = True http_server_d["running"] = True http_server.start() @@ -296,22 +323,28 @@ def init_parallel_env(): # init nccl or hccl or bkcl or heter context if is_cpu_only: parallel_helper._set_parallel_ctx( - core.GLOOParallelContext(strategy, place)) - elif (backend == "heter"): + core.GLOOParallelContext(strategy, place) + ) + elif backend == "heter": parallel_helper._set_parallel_ctx( - core.HeterParallelContext(strategy, parallel_env.device_id)) + core.HeterParallelContext(strategy, parallel_env.device_id) + ) elif core.is_compiled_with_cuda(): parallel_helper._set_parallel_ctx( - core.NCCLParallelContext(strategy, place)) + core.NCCLParallelContext(strategy, place) + ) elif core.is_compiled_with_xpu(): parallel_helper._set_parallel_ctx( - core.BKCLParallelContext(strategy, place)) + core.BKCLParallelContext(strategy, place) + ) elif core.is_compiled_with_npu(): parallel_helper._set_parallel_ctx( - core.HCCLParallelContext(strategy, place)) + core.HCCLParallelContext(strategy, place) + ) elif core.is_compiled_with_mlu(): parallel_helper._set_parallel_ctx( - core.CNCLParallelContext(strategy, place)) + core.CNCLParallelContext(strategy, place) + ) if backend != "heter": other_endpoints = strategy.trainer_endpoints[:] diff --git a/python/paddle/distributed/parallel_with_gloo.py b/python/paddle/distributed/parallel_with_gloo.py index a5630239948c07684af358518afec9f83460f5c2..eeb91332070b45ee5a638e0356312295aa0e5211 100755 --- a/python/paddle/distributed/parallel_with_gloo.py +++ b/python/paddle/distributed/parallel_with_gloo.py @@ -17,7 +17,9 @@ from multiprocessing import Process, Manager # deprecated module import from paddle.fluid import core -from paddle.distributed.fleet.base.private_helper_function import wait_server_ready +from paddle.distributed.fleet.base.private_helper_function import ( + wait_server_ready, +) __all__ = [] @@ -26,6 +28,7 @@ _global_gloo_ctx = None def _start_kv_server(port, http_server_d, size): from paddle.distributed.fleet.utils.http_server import KVServer + http_server = KVServer(int(port), size=size) http_server.start() wait_seconds = 3 @@ -89,8 +92,9 @@ def gloo_init_parallel_env(rank_id, rank_num, server_endpoint): test_gloo_init_with_multiprocess(2) """ - assert (rank_num < 2) is False, \ - "rank_num should greater than or equal to 2 for parallel environment initialzation." + assert ( + rank_num < 2 + ) is False, "rank_num should greater than or equal to 2 for parallel environment initialzation." # init gloo context manager = Manager() @@ -100,9 +104,10 @@ def gloo_init_parallel_env(rank_id, rank_num, server_endpoint): if rank_id == 0: # The scope for worker used by http server is '_worker' size = {'_worker': rank_num} - http_server_proc = Process(target=_start_kv_server, - args=(int(server_endpoint.split(":")[1]), - http_server_status, size)) + http_server_proc = Process( + target=_start_kv_server, + args=(int(server_endpoint.split(":")[1]), http_server_status, size), + ) http_server_proc.daemon = True http_server_status["running"] = True http_server_proc.start() diff --git a/python/paddle/distributed/passes/auto_parallel_amp.py b/python/paddle/distributed/passes/auto_parallel_amp.py index dc51cced37d92fb0c24e2e6d3dacd87632f2c090..3305982f5055c564d35a0a7eb8d5ca4702e3f079 100644 --- a/python/paddle/distributed/passes/auto_parallel_amp.py +++ b/python/paddle/distributed/passes/auto_parallel_amp.py @@ -18,25 +18,48 @@ from paddle.fluid import unique_name from .pass_base import PassBase, register_pass from paddle.distributed.fleet.meta_optimizers.common import OpRole from paddle.fluid.data_feeder import check_variable_and_dtype, check_type -from paddle.distributed.auto_parallel.utils import get_loss_op, set_var_dist_attr -from paddle.distributed.auto_parallel.utils import naive_set_dist_op_attr_for_program_by_mesh_and_mapping -from paddle.distributed.auto_parallel.process_group import get_world_process_group -from paddle.fluid.contrib.mixed_precision.fp16_utils import AutoMixedPrecisionLists -from paddle.fluid.contrib.mixed_precision.fp16_utils import _keep_fp32_input, _keep_fp32_output, find_op_index -from paddle.fluid.contrib.mixed_precision.fp16_utils import _valid_types, find_true_post_op, find_true_prev_op -from paddle.fluid.contrib.mixed_precision.fp16_utils import _is_in_black_varnames, _dtype_to_str, _rename_arg -from paddle.distributed.auto_parallel.dist_attribute import OperatorDistributedAttribute +from paddle.distributed.auto_parallel.utils import ( + get_loss_op, + set_var_dist_attr, +) +from paddle.distributed.auto_parallel.utils import ( + naive_set_dist_op_attr_for_program_by_mesh_and_mapping, +) +from paddle.distributed.auto_parallel.process_group import ( + get_world_process_group, +) +from paddle.fluid.contrib.mixed_precision.fp16_utils import ( + AutoMixedPrecisionLists, +) +from paddle.fluid.contrib.mixed_precision.fp16_utils import ( + _keep_fp32_input, + _keep_fp32_output, + find_op_index, +) +from paddle.fluid.contrib.mixed_precision.fp16_utils import ( + _valid_types, + find_true_post_op, + find_true_prev_op, +) +from paddle.fluid.contrib.mixed_precision.fp16_utils import ( + _is_in_black_varnames, + _dtype_to_str, + _rename_arg, +) +from paddle.distributed.auto_parallel.dist_attribute import ( + OperatorDistributedAttribute, +) from ..auto_parallel.utils import is_forward_op, is_backward_op, is_loss_op world_process_group = get_world_process_group() class AMPState(object): - def __init__(self, block): self._block = block - self._op_fp16_dict = { - } # op_id --> True/False. 'True' means that the current op is in fp16 mode. + self._op_fp16_dict = ( + {} + ) # op_id --> True/False. 'True' means that the current op is in fp16 mode. self._var_name_dict = {} # fwd_op_id --> {old_name: cast_name} self.is_train = False @@ -55,7 +78,8 @@ class AMPState(object): elif int(op.attr('op_role')) == int(OpRole.Backward): if op.desc.original_id() in dist_op_context.grad_op_id_to_op_id: fwd_op_id = dist_op_context.grad_op_id_to_op_id[ - op.desc.original_id()] + op.desc.original_id() + ] if self._is_fp16_op(fwd_op_id) == True: self._op_fp16_dict[op.desc.original_id()] = True elif self._is_fp16_op(fwd_op_id) == False: @@ -78,7 +102,8 @@ class AMPState(object): if op.type == 'create_py_reader' or op.type == 'read': continue if amp_lists.black_varnames is not None and _is_in_black_varnames( - op, amp_lists): + op, amp_lists + ): self._op_fp16_dict[op.desc.original_id()] = False continue if op.type in amp_lists.black_list: @@ -98,17 +123,24 @@ class AMPState(object): continue elif in_var.op is op: prev_op = find_true_prev_op( - ops, op, in_var_name) + ops, op, in_var_name + ) if prev_op is None: continue else: prev_op = in_var.op # if it's one of inputs - if self._is_fp16_op(prev_op.desc.original_id()) == False or \ - prev_op.type in amp_lists.black_list: + if ( + self._is_fp16_op(prev_op.desc.original_id()) + == False + or prev_op.type in amp_lists.black_list + ): is_black_op = True - elif self._is_fp16_op(prev_op.desc.original_id()) == True or \ - prev_op.type in amp_lists.white_list: + elif ( + self._is_fp16_op(prev_op.desc.original_id()) + == True + or prev_op.type in amp_lists.white_list + ): is_white_op = True if is_black_op: self._op_fp16_dict[op.desc.original_id()] = False @@ -131,19 +163,28 @@ class AMPState(object): break if self._is_fp16_op(op.desc.original_id()) == False: num_cast_ops = self._insert_cast_op_forward( - op, idx, core.VarDesc.VarType.FP16, - core.VarDesc.VarType.FP32, dist_context) + op, + idx, + core.VarDesc.VarType.FP16, + core.VarDesc.VarType.FP32, + dist_context, + ) elif self._is_fp16_op(op.desc.original_id()) == True: num_cast_ops = self._insert_cast_op_forward( - op, idx, core.VarDesc.VarType.FP32, - core.VarDesc.VarType.FP16, dist_context) + op, + idx, + core.VarDesc.VarType.FP32, + core.VarDesc.VarType.FP16, + dist_context, + ) else: pass idx += num_cast_ops + 1 self._block._sync_with_cpp() - def _insert_cast_op_forward(self, op, idx, src_dtype, dst_dtype, - dist_context): + def _insert_cast_op_forward( + self, op, idx, src_dtype, dst_dtype, dist_context + ): """ only for forward cast modified from paddle.fluid.contrib.mixed_precision @@ -152,38 +193,45 @@ class AMPState(object): var_name_dict = {} for in_name in op.input_names: if src_dtype == core.VarDesc.VarType.FP32 and _keep_fp32_input( - op, in_name): + op, in_name + ): continue for in_var_name in op.input(in_name): in_var = self._block._find_var_recursive(in_var_name) if in_var.type not in _valid_types or in_var.dtype == dst_dtype: continue if in_var.dtype == src_dtype: - cast_name = in_var.name + '.cast_' + _dtype_to_str( - dst_dtype) + cast_name = ( + in_var.name + '.cast_' + _dtype_to_str(dst_dtype) + ) out_var = self._block.vars.get(cast_name) var_name_dict[in_var.name] = cast_name consume_op_attr = dist_context.get_op_dist_attr_for_program( - op) + op + ) assert consume_op_attr is not None if out_var is None or out_var.dtype != dst_dtype: # NOTE we make the cast op and var's dist attr as the op that consume the # cast var instead of the op which generates the var in_var_dist_attr = consume_op_attr.get_input_dist_attr( - in_var.name) + in_var.name + ) assert in_var_dist_attr is not None ref_mesh = in_var_dist_attr.process_mesh ref_mapping = in_var_dist_attr.dims_mapping consume_op_attr.set_input_dist_attr( - cast_name, in_var_dist_attr) + cast_name, in_var_dist_attr + ) out_var = self._block.create_var( name=cast_name, dtype=dst_dtype, persistable=False, - stop_gradient=in_var.stop_gradient) - set_var_dist_attr(dist_context, out_var, ref_mapping, - ref_mesh) + stop_gradient=in_var.stop_gradient, + ) + set_var_dist_attr( + dist_context, out_var, ref_mapping, ref_mesh + ) cast_op = self._block._insert_op_without_sync( idx, @@ -193,22 +241,29 @@ class AMPState(object): attrs={ "in_dtype": in_var.dtype, "out_dtype": out_var.dtype, - }) + }, + ) naive_set_dist_op_attr_for_program_by_mesh_and_mapping( - cast_op, ref_mesh, ref_mapping, dist_context) + cast_op, ref_mesh, ref_mapping, dist_context + ) num_cast_ops += 1 else: in_var_dist_attr = consume_op_attr.get_input_dist_attr( - in_var.name) + in_var.name + ) consume_op_attr.set_input_dist_attr( - cast_name, in_var_dist_attr) + cast_name, in_var_dist_attr + ) _rename_arg(op, in_var.name, cast_name) else: if op.has_attr('in_dtype'): op._set_attr('in_dtype', dst_dtype) self._var_name_dict[op.desc.original_id()] = var_name_dict - if src_dtype == core.VarDesc.VarType.FP32 and dst_dtype == core.VarDesc.VarType.FP16: + if ( + src_dtype == core.VarDesc.VarType.FP32 + and dst_dtype == core.VarDesc.VarType.FP16 + ): for out_name in op.output_names: if _keep_fp32_output(op, out_name): continue @@ -238,8 +293,9 @@ class AMPState(object): # NOTE: the map in `grad_var_to_var` may be changed when the var is casted, # which will affect the dist_op to insert allreduce_sum op. op_dist_attr = dist_context.get_op_dist_attr_for_program(grad_op) - if is_backward_op(grad_op) and (is_forward_op(ops[idx - 1]) - or is_loss_op(ops[idx - 1])): + if is_backward_op(grad_op) and ( + is_forward_op(ops[idx - 1]) or is_loss_op(ops[idx - 1]) + ): if not op_dist_attr.is_recompute: appended_grad_times += 1 @@ -248,14 +304,22 @@ class AMPState(object): if grad_op_orig_id in dist_op_context.grad_op_id_to_op_id: if self._is_fp16_op(grad_op_orig_id) == False: # fp32 num_cast_ops = self._insert_cast_op_backward( - grad_op, idx, core.VarDesc.VarType.FP16, - core.VarDesc.VarType.FP32, dist_context, - appended_grad_times) + grad_op, + idx, + core.VarDesc.VarType.FP16, + core.VarDesc.VarType.FP32, + dist_context, + appended_grad_times, + ) elif self._is_fp16_op(grad_op_orig_id) == True: # fp16 num_cast_ops = self._insert_cast_op_backward( - grad_op, idx, core.VarDesc.VarType.FP32, - core.VarDesc.VarType.FP16, dist_context, - appended_grad_times) + grad_op, + idx, + core.VarDesc.VarType.FP32, + core.VarDesc.VarType.FP16, + dist_context, + appended_grad_times, + ) elif grad_op.type == "sum": in_var_name = grad_op.desc.input_arg_names()[0] src_dtype = self._block.var(in_var_name).dtype @@ -270,15 +334,24 @@ class AMPState(object): else: raise ValueError( "'{}' op is not supported in the complete amp pass.".format( - grad_op.type)) + grad_op.type + ) + ) idx += num_cast_ops + 1 self._block._sync_with_cpp() _update_backward_cast_ops(params_grads, dist_context) - def _insert_cast_op_backward(self, grad_op, idx, src_dtype, dst_dtype, - dist_context, appended_grad_times): - """ only for backward cast """ + def _insert_cast_op_backward( + self, + grad_op, + idx, + src_dtype, + dst_dtype, + dist_context, + appended_grad_times, + ): + """only for backward cast""" def _keep_fp32_input(op, in_name): op_type = op.type @@ -299,7 +372,8 @@ class AMPState(object): for in_name in grad_op.input_names: if src_dtype == core.VarDesc.VarType.FP32 and _keep_fp32_input( - grad_op, in_name): + grad_op, in_name + ): for in_var_name in grad_op.input(in_name): in_var = self._block._find_var_recursive(in_var_name) assert in_var.dtype == core.VarDesc.VarType.FP32 @@ -309,24 +383,34 @@ class AMPState(object): in_var = self._block._find_var_recursive(in_var_name) if in_var.dtype == src_dtype: consume_op_attr = dist_context.get_op_dist_attr_for_program( - grad_op) + grad_op + ) if in_var_name in self._var_name_dict[fwd_op_id]: # NOTE: if in_var of consume grad_op has been casted before, # it should be renamed and reset dist_attr. cast_name = self._var_name_dict[fwd_op_id][in_var_name] grad_op.desc._rename_input(in_var_name, cast_name) in_var_dist_attr = consume_op_attr.get_input_dist_attr( - in_var_name) + in_var_name + ) consume_op_attr.set_input_dist_attr( - cast_name, in_var_dist_attr) + cast_name, in_var_dist_attr + ) else: - assert in_var.dtype == dst_dtype, "op [{}] expect input [{}] to be dtype [{}] BUT got [{}]. {}".format( - grad_op.type, in_name, dst_dtype, in_var.dtype, - str(grad_op)) + assert ( + in_var.dtype == dst_dtype + ), "op [{}] expect input [{}] to be dtype [{}] BUT got [{}]. {}".format( + grad_op.type, + in_name, + dst_dtype, + in_var.dtype, + str(grad_op), + ) for out_name in grad_op.output_names: if src_dtype == core.VarDesc.VarType.FP32 and _keep_fp32_output( - grad_op, out_name): + grad_op, out_name + ): for out_var_name in grad_op.output(out_name): out_var = self._block._find_var_recursive(out_var_name) assert out_var.dtype == core.VarDesc.VarType.FP32 @@ -334,7 +418,7 @@ class AMPState(object): for out_var_name in grad_op.output(out_name): out_var = self._block._find_var_recursive(out_var_name) - out_var_name_prefix = out_var_name[:out_var_name.find("@")] + out_var_name_prefix = out_var_name[: out_var_name.find("@")] fwd_var = self._block._find_var_recursive(out_var_name_prefix) # NOTE: the out_var's dtype of consume grad_op should equal to the fwd_var's dtype if out_var.dtype != fwd_var.dtype: @@ -345,34 +429,45 @@ class AMPState(object): # NOTE: if out_var of consume grad_op has been casted before, # it should be renamed and reset dist_attr, then we insert cast op to # convert the cast_var to original dtype - consume_op_attr = dist_context.get_op_dist_attr_for_program( - grad_op) + consume_op_attr = ( + dist_context.get_op_dist_attr_for_program(grad_op) + ) fwd_cast_name = self._var_name_dict[fwd_op_id][ - out_var_name_prefix] + out_var_name_prefix + ] suffix = "" if "@RENAME" in out_var_name: - suffix = out_var_name[out_var_name.find("@RENAME"):] + suffix = out_var_name[ + out_var_name.find("@RENAME") : + ] cast_name = fwd_cast_name + "@GRAD" + suffix cast_var = self._block.vars.get(cast_name) if cast_var is None or cast_var.dtype != dst_dtype: grad_op.desc._rename_output(out_var_name, cast_name) - out_var_dist_attr = consume_op_attr.get_output_dist_attr( - out_var_name) + out_var_dist_attr = ( + consume_op_attr.get_output_dist_attr( + out_var_name + ) + ) ref_mesh = out_var_dist_attr.process_mesh ref_mapping = out_var_dist_attr.dims_mapping consume_op_attr.set_output_dist_attr( - cast_name, out_var_dist_attr) + cast_name, out_var_dist_attr + ) assert ref_mapping is not None cast_var = self._block.create_var( name=cast_name, shape=out_var.shape, dtype=dst_dtype, persistable=False, - stop_gradient=out_var.stop_gradient) - set_var_dist_attr(dist_context, cast_var, - ref_mapping, ref_mesh) + stop_gradient=out_var.stop_gradient, + ) + set_var_dist_attr( + dist_context, cast_var, ref_mapping, ref_mesh + ) dist_op_context.grad_var_to_var[ - appended_grad_times][cast_name] = fwd_cast_name + appended_grad_times + ][cast_name] = fwd_cast_name cast_op = self._block._insert_op( idx + 1, @@ -382,13 +477,15 @@ class AMPState(object): attrs={ "in_dtype": cast_var.dtype, "out_dtype": out_var.dtype, - "op_role": OpRole.Backward - }) + "op_role": OpRole.Backward, + }, + ) cast_op._remove_attr("op_role_var") cast_op._remove_attr("op_namescope") cast_op._remove_attr("with_quant_attr") naive_set_dist_op_attr_for_program_by_mesh_and_mapping( - cast_op, ref_mesh, ref_mapping, dist_context) + cast_op, ref_mesh, ref_mapping, dist_context + ) num_cast_ops += 1 else: assert out_var.dtype == dst_dtype @@ -409,15 +506,18 @@ def _update_backward_cast_ops(params_grads, dist_context): for p, g in params_grads: op = g.op if g.dtype == core.VarDesc.VarType.FP32 and op.type == 'cast': - if int(op.attr('op_role')) == int( - OpRole.Backward) and op.has_attr('op_role_var'): + if int(op.attr('op_role')) == int(OpRole.Backward) and op.has_attr( + 'op_role_var' + ): op._remove_attr("op_role_var") post_ops = find_true_post_op(main_block.ops, op, g.name) if post_ops: - raise ValueError("The cast op {0}'s output should not be" - "used by a non-optimize op, however, it" - "is used by {1}".format(op, post_ops[0])) + raise ValueError( + "The cast op {0}'s output should not be" + "used by a non-optimize op, however, it" + "is used by {1}".format(op, post_ops[0]) + ) if op == main_block.ops[-1]: continue @@ -425,23 +525,29 @@ def _update_backward_cast_ops(params_grads, dist_context): # add new op in the python and cpp at the same time new_op_desc = main_block.desc.append_op() new_op_desc.copy_from(op.desc) - new_op = paddle.fluid.framework.Operator(block=main_block, - desc=new_op_desc, - type=None, - inputs=None, - outputs=None, - attrs=None) + new_op = paddle.fluid.framework.Operator( + block=main_block, + desc=new_op_desc, + type=None, + inputs=None, + outputs=None, + attrs=None, + ) main_block.ops.append(new_op) # dist attr param_dist_attr = dist_context.get_tensor_dist_attr_for_program(p) output_dist_attr = dist_context.get_tensor_dist_attr_for_program( - main_block.var(op.output_arg_names[0])) + main_block.var(op.output_arg_names[0]) + ) assert param_dist_attr is not None assert output_dist_attr is not None naive_set_dist_op_attr_for_program_by_mesh_and_mapping( - new_op, param_dist_attr.process_mesh, - param_dist_attr.dims_mapping, dist_context) + new_op, + param_dist_attr.process_mesh, + param_dist_attr.dims_mapping, + dist_context, + ) output_dist_attr.process_mesh = param_dist_attr.process_mesh output_dist_attr.dims_mapping = param_dist_attr.dims_mapping @@ -462,26 +568,34 @@ def _check_and_update_gradient(params_grads, loss_scaling, dist_context): grads = [g for _, g in params_grads] check_type(grads, 'x', (tuple, list), 'check_finite_and_unscale') for e in grads: - check_variable_and_dtype(e, "x", ['float16', 'float32', 'float64'], - 'check_finite_and_unscale') + check_variable_and_dtype( + e, + "x", + ['float16', 'float32', 'float64'], + 'check_finite_and_unscale', + ) found_inf = main_block.create_var( - name=unique_name.generate_with_ignorable_key(".".join( - ['find_infinite_scale', 'tmp'])), + name=unique_name.generate_with_ignorable_key( + ".".join(['find_infinite_scale', 'tmp']) + ), shape=[1], dtype='bool', type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=False) + stop_gradient=False, + ) set_var_dist_attr(dist_context, found_inf, [-1], world_process_group.ranks) inputs = {'X': grads, 'Scale': loss_scaling} outputs = {'Out': grads, 'FoundInfinite': found_inf} attrs = {'op_role': OpRole.Optimize} - new_op = main_block.append_op(type='check_finite_and_unscale', - inputs=inputs, - outputs=outputs, - attrs=attrs) + new_op = main_block.append_op( + type='check_finite_and_unscale', + inputs=inputs, + outputs=outputs, + attrs=attrs, + ) new_op_dist_attr = OperatorDistributedAttribute() new_op_dist_attr.process_mesh = world_process_group.ranks @@ -491,17 +605,18 @@ def _check_and_update_gradient(params_grads, loss_scaling, dist_context): for g in grads: g_dist_attr = dist_context.get_tensor_dist_attr_for_program(g) assert g_dist_attr is not None - new_op_dist_attr.set_input_dims_mapping(g.name, - g_dist_attr.dims_mapping) - new_op_dist_attr.set_output_dims_mapping(g.name, - g_dist_attr.dims_mapping) + new_op_dist_attr.set_input_dims_mapping( + g.name, g_dist_attr.dims_mapping + ) + new_op_dist_attr.set_output_dims_mapping( + g.name, g_dist_attr.dims_mapping + ) dist_context.set_op_dist_attr_for_program(new_op, new_op_dist_attr) return grads, found_inf @register_pass("auto_parallel_amp") class AMPPass(PassBase): - def __init__(self): super(AMPPass, self).__init__() self.set_attr("loss", None) @@ -552,7 +667,8 @@ class AMPPass(PassBase): amp_lists = AutoMixedPrecisionLists( set(self.get_attr("custom_white_list")), set(self.get_attr("custom_black_list")), - set(self.get_attr("custom_black_varnames"))) + set(self.get_attr("custom_black_varnames")), + ) with paddle.static.program_guard(main_program, startup_program): amp_state = AMPState(main_program.global_block()) @@ -566,10 +682,13 @@ class AMPPass(PassBase): self._init_amp_var() self._scale_loss() - if self.get_attr("use_dynamic_loss_scaling" - ) or self.get_attr("init_loss_scaling") != 1.0: + if ( + self.get_attr("use_dynamic_loss_scaling") + or self.get_attr("init_loss_scaling") != 1.0 + ): grads, found_inf = _check_and_update_gradient( - params_grads, self._loss_scaling, self.dist_context) + params_grads, self._loss_scaling, self.dist_context + ) if self.get_attr("use_dynamic_loss_scaling"): self._update_loss_scaling(grads, found_inf) @@ -580,9 +699,14 @@ class AMPPass(PassBase): shape=[1], value=self.get_attr("init_loss_scaling"), dtype='float32', - persistable=True) - set_var_dist_attr(self.dist_context, self._loss_scaling, [-1], - world_process_group.ranks) + persistable=True, + ) + set_var_dist_attr( + self.dist_context, + self._loss_scaling, + [-1], + world_process_group.ranks, + ) if self.get_attr("use_dynamic_loss_scaling"): self._num_good_steps = paddle.static.create_global_var( @@ -590,18 +714,28 @@ class AMPPass(PassBase): shape=[1], value=0, dtype='int32', - persistable=True) - set_var_dist_attr(self.dist_context, self._num_good_steps, [-1], - world_process_group.ranks) + persistable=True, + ) + set_var_dist_attr( + self.dist_context, + self._num_good_steps, + [-1], + world_process_group.ranks, + ) self._num_bad_steps = paddle.static.create_global_var( name=unique_name.generate("num_bad_steps"), shape=[1], value=0, dtype='int32', - persistable=True) - set_var_dist_attr(self.dist_context, self._num_bad_steps, [-1], - world_process_group.ranks) + persistable=True, + ) + set_var_dist_attr( + self.dist_context, + self._num_bad_steps, + [-1], + world_process_group.ranks, + ) def _scale_loss(self): @@ -613,18 +747,22 @@ class AMPPass(PassBase): assert loss is not None loss_op = loss.op loss_op_dist_attr = self.dist_context.get_op_dist_attr_for_program( - loss_op) + loss_op + ) if loss.dtype != core.VarDesc.VarType.FP32: tmp_name = unique_name.generate(loss.name + ".cast_fp32") - cast_loss = main_block.create_var(name=tmp_name, - dtype=core.VarDesc.VarType.FP32) + cast_loss = main_block.create_var( + name=tmp_name, dtype=core.VarDesc.VarType.FP32 + ) loss_dist_attr = self.dist_context.get_tensor_dist_attr_for_program( - loss) + loss + ) ref_mesh = loss_op_dist_attr.process_mesh self.dist_context.set_tensor_dist_attr_for_program( - cast_loss, loss_dist_attr) + cast_loss, loss_dist_attr + ) # forward loss_op_idx = find_op_index(main_block.desc, loss_op.desc) @@ -637,22 +775,28 @@ class AMPPass(PassBase): "in_dtype": loss.dtype, "out_dtype": core.VarDesc.VarType.FP32, 'op_role': loss_op.all_attrs()[OP_ROLE_KEY], - }) + }, + ) - loss_op._set_attr(OP_ROLE_KEY, - core.op_proto_and_checker_maker.OpRole.Forward) + loss_op._set_attr( + OP_ROLE_KEY, core.op_proto_and_checker_maker.OpRole.Forward + ) naive_set_dist_op_attr_for_program_by_mesh_and_mapping( - cast_op, ref_mesh, [-1], self.dist_context) + cast_op, ref_mesh, [-1], self.dist_context + ) # backward first_backward_op = main_block.ops[loss_op_idx + 2] - assert first_backward_op.type == "fill_constant" and int( - first_backward_op.all_attrs()[OP_ROLE_KEY]) == 257 + assert ( + first_backward_op.type == "fill_constant" + and int(first_backward_op.all_attrs()[OP_ROLE_KEY]) == 257 + ) cast_loss_grad = main_block.create_var( name=unique_name.generate(tmp_name + "@GRAD"), shape=loss.shape, dtype=core.VarDesc.VarType.FP32, - persistable=loss.persistable) + persistable=loss.persistable, + ) set_var_dist_attr(self.dist_context, cast_loss_grad, [-1], ref_mesh) pre_grad_name = first_backward_op.output_arg_names[0] @@ -666,14 +810,18 @@ class AMPPass(PassBase): "in_dtype": core.VarDesc.VarType.FP32, "out_dtype": core.VarDesc.VarType.FP16, 'op_role': core.op_proto_and_checker_maker.OpRole.Backward, - }) + }, + ) naive_set_dist_op_attr_for_program_by_mesh_and_mapping( - cast_grad_op, ref_mesh, [-1], self.dist_context) + cast_grad_op, ref_mesh, [-1], self.dist_context + ) loss_op = cast_op loss = cast_loss - if self.get_attr("use_dynamic_loss_scaling" - ) or self.get_attr("init_loss_scaling") != 1.0: + if ( + self.get_attr("use_dynamic_loss_scaling") + or self.get_attr("init_loss_scaling") != 1.0 + ): loss_op_idx = find_op_index(main_block.desc, loss_op.desc) @@ -683,63 +831,76 @@ class AMPPass(PassBase): name=unique_name.generate("scaled_loss"), shape=loss.shape, dtype=loss.dtype, - persistable=loss.persistable) - set_var_dist_attr(self.dist_context, self._scaled_loss, [-1], - ref_mesh) + persistable=loss.persistable, + ) + set_var_dist_attr( + self.dist_context, self._scaled_loss, [-1], ref_mesh + ) elementwise_mul_op = main_block._insert_op( loss_op_idx + 1, type='elementwise_mul', - inputs={ - 'X': [loss], - 'Y': [self._loss_scaling] - }, + inputs={'X': [loss], 'Y': [self._loss_scaling]}, outputs={'Out': [self._scaled_loss]}, attrs={ 'op_role': loss_op.all_attrs()[OP_ROLE_KEY], - }) - loss_op._set_attr(OP_ROLE_KEY, - core.op_proto_and_checker_maker.OpRole.Forward) + }, + ) + loss_op._set_attr( + OP_ROLE_KEY, core.op_proto_and_checker_maker.OpRole.Forward + ) naive_set_dist_op_attr_for_program_by_mesh_and_mapping( - elementwise_mul_op, ref_mesh, [-1], self.dist_context) + elementwise_mul_op, ref_mesh, [-1], self.dist_context + ) # backward first_backward_op = main_block.ops[loss_op_idx + 2] - assert first_backward_op.type == "fill_constant" and int( - first_backward_op.all_attrs()[OP_ROLE_KEY]) == 257 + assert ( + first_backward_op.type == "fill_constant" + and int(first_backward_op.all_attrs()[OP_ROLE_KEY]) == 257 + ) self._scaled_loss_grad = main_block.create_var( name=unique_name.generate("scaled_loss") + "@GRAD", shape=loss.shape, dtype=loss.dtype, - persistable=loss.persistable) - set_var_dist_attr(self.dist_context, self._scaled_loss_grad, [-1], - ref_mesh) + persistable=loss.persistable, + ) + set_var_dist_attr( + self.dist_context, self._scaled_loss_grad, [-1], ref_mesh + ) pre_grad_name = first_backward_op.output_arg_names[0] - first_backward_op._rename_output(pre_grad_name, - self._scaled_loss_grad.name) + first_backward_op._rename_output( + pre_grad_name, self._scaled_loss_grad.name + ) # FIXME(JZ-LIANG) a trick to insert backward op main_block._sync_with_cpp() elementwise_mul_grad_op_desc = main_block.desc._insert_op( - loss_op_idx + 3) + loss_op_idx + 3 + ) elementwise_mul_grad_op_desc.set_type("elementwise_mul_grad") elementwise_mul_grad_op_desc.set_input( - 'Out@GRAD', [self._scaled_loss_grad.name]) + 'Out@GRAD', [self._scaled_loss_grad.name] + ) elementwise_mul_grad_op_desc.set_input('X', [loss.name]) - elementwise_mul_grad_op_desc.set_input('Y', - [self._loss_scaling.name]) + elementwise_mul_grad_op_desc.set_input( + 'Y', [self._loss_scaling.name] + ) elementwise_mul_grad_op_desc.set_output('X@GRAD', [pre_grad_name]) elementwise_mul_grad_op_desc.set_output('Y@GRAD', []) elementwise_mul_grad_op_desc._set_attr( - OP_ROLE_KEY, core.op_proto_and_checker_maker.OpRole.Backward) + OP_ROLE_KEY, core.op_proto_and_checker_maker.OpRole.Backward + ) elementwise_mul_grad_op_desc._set_attr('axis', -1) elementwise_mul_grad_op = paddle.fluid.framework.Operator( - main_block, elementwise_mul_grad_op_desc) + main_block, elementwise_mul_grad_op_desc + ) main_block.ops.insert(loss_op_idx + 3, elementwise_mul_grad_op) main_block._sync_with_cpp() elementwise_mul_grad_op = main_block.ops[loss_op_idx + 3] assert elementwise_mul_grad_op.type == "elementwise_mul_grad" naive_set_dist_op_attr_for_program_by_mesh_and_mapping( - elementwise_mul_grad_op, ref_mesh, [-1], self.dist_context) + elementwise_mul_grad_op, ref_mesh, [-1], self.dist_context + ) else: self._scaled_loss = loss @@ -751,31 +912,39 @@ class AMPPass(PassBase): main_block = paddle.static.default_main_program().global_block() main_block._sync_with_cpp() - check_variable_and_dtype(self._loss_scaling, "prev_loss_scaling", - ['float32', 'float64'], "update_loss_scaling") + check_variable_and_dtype( + self._loss_scaling, + "prev_loss_scaling", + ['float32', 'float64'], + "update_loss_scaling", + ) check_type(grads, 'x', (tuple, list), 'update_loss_scaling') for e in grads: - check_variable_and_dtype(e, "x", ['float16', 'float32', 'float64'], - 'update_loss_scaling') + check_variable_and_dtype( + e, "x", ['float16', 'float32', 'float64'], 'update_loss_scaling' + ) if e.dtype == core.VarDesc.VarType.FP16: - assert self._loss_scaling.dtype == core.VarDesc.VarType.FP32, \ - "The dtype of prev_loss_scaling should be float32 when the dtype of x is float16." + assert ( + self._loss_scaling.dtype == core.VarDesc.VarType.FP32 + ), "The dtype of prev_loss_scaling should be float32 when the dtype of x is float16." else: - assert self._loss_scaling.dtype == e.dtype, "The dtype of prev_loss_scaling should be equal to the dtype of x." + assert ( + self._loss_scaling.dtype == e.dtype + ), "The dtype of prev_loss_scaling should be equal to the dtype of x." inputs = { 'X': grads, 'FoundInfinite': found_inf, 'PrevLossScaling': self._loss_scaling, 'InGoodSteps': self._num_good_steps, - 'InBadSteps': self._num_bad_steps + 'InBadSteps': self._num_bad_steps, } outputs = { 'Out': grads, 'LossScaling': self._loss_scaling, 'OutGoodSteps': self._num_good_steps, - 'OutBadSteps': self._num_bad_steps + 'OutBadSteps': self._num_bad_steps, } attrs = { @@ -784,13 +953,15 @@ class AMPPass(PassBase): 'incr_ratio': self.get_attr("incr_ratio"), 'decr_ratio': self.get_attr("decr_ratio"), 'stop_update': self.get_attr("stop_update"), - 'op_role': OpRole.Optimize + 'op_role': OpRole.Optimize, } - new_op = main_block.append_op(type='update_loss_scaling', - inputs=inputs, - outputs=outputs, - attrs=attrs) + new_op = main_block.append_op( + type='update_loss_scaling', + inputs=inputs, + outputs=outputs, + attrs=attrs, + ) new_op_dist_attr = OperatorDistributedAttribute() new_op_dist_attr.process_mesh = world_process_group.ranks @@ -800,10 +971,12 @@ class AMPPass(PassBase): for g in grads: g_dist_attr = self.dist_context.get_tensor_dist_attr_for_program(g) assert g_dist_attr is not None - new_op_dist_attr.set_input_dims_mapping(g.name, - g_dist_attr.dims_mapping) - new_op_dist_attr.set_output_dims_mapping(g.name, - g_dist_attr.dims_mapping) + new_op_dist_attr.set_input_dims_mapping( + g.name, g_dist_attr.dims_mapping + ) + new_op_dist_attr.set_output_dims_mapping( + g.name, g_dist_attr.dims_mapping + ) self.dist_context.set_op_dist_attr_for_program(new_op, new_op_dist_attr) main_block._sync_with_cpp() diff --git a/python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py b/python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py index 8470aa510996128119c2561313d49490897d74e8..fc938d6523030431ad6b43b8234eb4426ebe8508 100644 --- a/python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py +++ b/python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py @@ -19,14 +19,25 @@ import paddle from paddle.fluid import unique_name from paddle.fluid.framework import default_main_program from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole -from paddle.distributed.auto_parallel.operators.common import is_data_parallel_scale_op, is_data_parallel_reduce_op -from paddle.distributed.auto_parallel.utils import find_higher_order_backward_op, is_loss_grad_op, is_optimize_op, ring_id_to_process_group +from paddle.distributed.auto_parallel.operators.common import ( + is_data_parallel_scale_op, + is_data_parallel_reduce_op, +) +from paddle.distributed.auto_parallel.utils import ( + find_higher_order_backward_op, + is_loss_grad_op, + is_optimize_op, + ring_id_to_process_group, +) from .pass_base import PassBase, PassType, register_pass # add new optimizers supporting rescale_grad here __rescale_grad_supported_opts__ = [ - 'lars_momentum', 'sparse_momentum', 'dgc_momentum', 'momentum', - 'merge_momentum' + 'lars_momentum', + 'sparse_momentum', + 'dgc_momentum', + 'momentum', + 'merge_momentum', ] # a heuristic number @@ -62,8 +73,9 @@ class DataParallelOptimizationPass(PassBase): def _check_self(self): if self.get_attr("dist_context") is None: return False - if (not isinstance(self.get_attr("global_rank"), - int)) or self.get_attr("global_rank") < 0: + if (not isinstance(self.get_attr("global_rank"), int)) or self.get_attr( + "global_rank" + ) < 0: return False return True @@ -140,8 +152,11 @@ class DataParallelOptimizationPass(PassBase): ), "Unexception: comm op [{}] has NOT ring id.".format(str(op)) group = ring_id_to_process_group(op.attr("ring_id")) - assert group is not None, "Unexception: data parallel group of [{}] from op [{}] is None".format( - grad_name, str(op)) + assert ( + group is not None + ), "Unexception: data parallel group of [{}] from op [{}] is None".format( + grad_name, str(op) + ) self._grad_name_to_group_map[grad_name] = group @@ -156,18 +171,21 @@ class DataParallelOptimizationPass(PassBase): # TODO support multiple optimizers in on network in future. # here we assume that the optimizer is unique in network. - elif is_optimize_op( - op) and op.type in __rescale_grad_supported_opts__: + elif ( + is_optimize_op(op) + and op.type in __rescale_grad_supported_opts__ + ): self._support_rescale_grad = True not_synchronized_grads = [] for grad_name in scaled_grads: if grad_name not in self._grad_name_to_group_map: not_synchronized_grads.append(grad_name) - assert len( + assert ( + len(not_synchronized_grads) == 0 + ), "Unexception: gradients [{}] is scaled BUT NOT synchronized.".format( not_synchronized_grads - ) == 0, "Unexception: gradients [{}] is scaled BUT NOT synchronized.".format( - not_synchronized_grads) + ) def is_data_parallel_applied(self): return len(self._group_to_grad_name_map) > 0 @@ -175,14 +193,21 @@ class DataParallelOptimizationPass(PassBase): def _could_be_prune(self): return self.dist_context.gradient_scale and ( - self._support_rescale_grad or self._all_dp_groups_same_degree()) + self._support_rescale_grad or self._all_dp_groups_same_degree() + ) def _all_dp_groups_same_degree(self): - return len( - set([ - len(group.ranks) - for group in self._group_to_grad_name_map.keys() - ])) == 1 + return ( + len( + set( + [ + len(group.ranks) + for group in self._group_to_grad_name_map.keys() + ] + ) + ) + == 1 + ) def _scale_backward_initial_grad(self): @@ -191,9 +216,10 @@ class DataParallelOptimizationPass(PassBase): for idx, op in reversed(list(enumerate(block.ops))): if is_loss_grad_op(op): - assert op.type == 'fill_constant', \ - "loss_grad_op must be fill_constant op, " \ + assert op.type == 'fill_constant', ( + "loss_grad_op must be fill_constant op, " "but this op is {}".format(op.type) + ) assert op.has_attr('value') loss_scale = float(op.attr('value')) loss_scale = loss_scale / dp_degree @@ -215,28 +241,35 @@ class DataParallelOptimizationPass(PassBase): scaled_grads = set() for idx, op in reversed(list(enumerate(block.ops))): - if is_optimize_op( - op) and op.type in __rescale_grad_supported_opts__: + if ( + is_optimize_op(op) + and op.type in __rescale_grad_supported_opts__ + ): assert op.has_attr( 'rescale_grad' ), "Unexception: op [{}] is supported to have [rescale_grad] attribute.".format( - str(op)) - assert len( - op.input("Grad") - ) == 1, "Unexception: op [{}] is supported to have only one input grad var.".format( - str(op)) + str(op) + ) + assert ( + len(op.input("Grad")) == 1 + ), "Unexception: op [{}] is supported to have only one input grad var.".format( + str(op) + ) grad_name = op.input("Grad")[0] dp_degree = len( - list(self._grad_name_to_group_map[grad_name].ranks)) + list(self._grad_name_to_group_map[grad_name].ranks) + ) scaled_grads.add(grad_name) rescale_grad = float(op.attr('rescale_grad')) / dp_degree op._set_attr('rescale_grad', rescale_grad) - assert scaled_grads == set(self._grad_name_to_group_map.keys( - )), "Unexception: gradients [{}] are unscaled.".format( - set(self._grad_name_to_group_map.keys()) - scaled_grads) + assert scaled_grads == set( + self._grad_name_to_group_map.keys() + ), "Unexception: gradients [{}] are unscaled.".format( + set(self._grad_name_to_group_map.keys()) - scaled_grads + ) def _could_be_overlap(self): # NOTE current different nccl comm will use different cuda stream @@ -266,14 +299,13 @@ class DataParallelOptimizationPass(PassBase): op._set_attr('use_calc_stream', False) ring_id = op.attr("ring_id") - block._insert_op_without_sync(idx, - type='c_wait_compute', - inputs={'X': []}, - outputs={'Out': []}, - attrs={ - 'op_role': OpRole.Backward, - 'ring_id': ring_id - }) + block._insert_op_without_sync( + idx, + type='c_wait_compute', + inputs={'X': []}, + outputs={'Out': []}, + attrs={'op_role': OpRole.Backward, 'ring_id': ring_id}, + ) block._sync_with_cpp() @@ -307,8 +339,10 @@ class DataParallelOptimizationPass(PassBase): # other ops that might use communicating grad else: for input_var_name in op.input_arg_names: - for ring_id, unsync_grad_names in ring_id_to_un_sync_grad_map.items( - ): + for ( + ring_id, + unsync_grad_names, + ) in ring_id_to_un_sync_grad_map.items(): if input_var_name in unsync_grad_names: # need to sync before op_i if i in op_idx_to_sync_ring_id_map: @@ -328,14 +362,13 @@ class DataParallelOptimizationPass(PassBase): for i in sorted(indices, reverse=True): for ring_id in op_idx_to_sync_ring_id_map[i]: - block._insert_op_without_sync(i, - type='c_wait_comm', - inputs={'X': []}, - outputs={'Out': []}, - attrs={ - 'op_role': OpRole.Backward, - 'ring_id': ring_id - }) + block._insert_op_without_sync( + i, + type='c_wait_comm', + inputs={'X': []}, + outputs={'Out': []}, + attrs={'op_role': OpRole.Backward, 'ring_id': ring_id}, + ) def _could_be_fuse(self): # TODO support gradient fuse higher order gradient. @@ -423,36 +456,49 @@ class DataParallelOptimizationPass(PassBase): for i, group in enumerate(grad_groups[::-1]): # create coalecse tensor - group.coalesce_var = block.create_var(name=unique_name.generate( - 'coalecse_grad_{}'.format(i)), - dtype=group.dtype, - persistable=False, - stop_gradient=True) + group.coalesce_var = block.create_var( + name=unique_name.generate('coalecse_grad_{}'.format(i)), + dtype=group.dtype, + persistable=False, + stop_gradient=True, + ) # update allreduce & scale op if group.scale_op_idx != -1: scale_op = block.ops[group.scale_op_idx] - assert scale_op.type == 'scale', "should found scale op but found {}".format( - str(scale_op)) - scale_op._rename_input(scale_op.input_arg_names[0], - group.coalesce_var.name) - scale_op._rename_output(scale_op.output_arg_names[0], - group.coalesce_var.name) + assert ( + scale_op.type == 'scale' + ), "should found scale op but found {}".format(str(scale_op)) + scale_op._rename_input( + scale_op.input_arg_names[0], group.coalesce_var.name + ) + scale_op._rename_output( + scale_op.output_arg_names[0], group.coalesce_var.name + ) allreduce_op = block.ops[group.allreduce_op_idx] - assert allreduce_op.type == 'c_allreduce_sum', "should found c_allreduce_sum op but found {}".format( - str(allreduce_op)) - allreduce_op._rename_input(allreduce_op.input_arg_names[0], - group.coalesce_var.name) - allreduce_op._rename_output(allreduce_op.output_arg_names[0], - group.coalesce_var.name) + assert ( + allreduce_op.type == 'c_allreduce_sum' + ), "should found c_allreduce_sum op but found {}".format( + str(allreduce_op) + ) + allreduce_op._rename_input( + allreduce_op.input_arg_names[0], group.coalesce_var.name + ) + allreduce_op._rename_output( + allreduce_op.output_arg_names[0], group.coalesce_var.name + ) # remvoe un-used op - remove_op_indices = group.remove_wait_op_indices + group.remove_allreduce_op_indices + group.remove_scale_op_indices + remove_op_indices = ( + group.remove_wait_op_indices + + group.remove_allreduce_op_indices + + group.remove_scale_op_indices + ) for idx in sorted(remove_op_indices, reverse=True): - assert block.ops[ - idx].type in remove_op_types, "Unexception: try to remove op {}".format( - str(op)) + assert ( + block.ops[idx].type in remove_op_types + ), "Unexception: try to remove op {}".format(str(op)) block._remove_op(idx) # insert coalecse op @@ -464,22 +510,23 @@ class DataParallelOptimizationPass(PassBase): concated_ranks.append(len(shape)) grad_names = [grad.name for grad in group.gradients] - block._insert_op_without_sync(group.coalesce_op_idx, - type="coalesce_tensor", - inputs={"Input": grad_names}, - outputs={ - "Output": grad_names, - "FusedOutput": group.coalesce_var - }, - attrs={ - "copy_data": False, - "use_align": True, - "dtype": group.dtype, - "concated_shapes": - concated_shapes, - "concated_ranks": concated_ranks, - OP_ROLE_KEY: OpRole.Backward - }) + block._insert_op_without_sync( + group.coalesce_op_idx, + type="coalesce_tensor", + inputs={"Input": grad_names}, + outputs={ + "Output": grad_names, + "FusedOutput": group.coalesce_var, + }, + attrs={ + "copy_data": False, + "use_align": True, + "dtype": group.dtype, + "concated_shapes": concated_shapes, + "concated_ranks": concated_ranks, + OP_ROLE_KEY: OpRole.Backward, + }, + ) block._sync_with_cpp() # TODO update dist attr @@ -487,6 +534,7 @@ class DataParallelOptimizationPass(PassBase): def summary(self, grad_groups=[]): # TODO: add logger module import logging + self._logger = logging.getLogger() self._logger.propagate = False if not self._logger.handlers: @@ -500,26 +548,31 @@ class DataParallelOptimizationPass(PassBase): if len(grad_groups) > 0: self._logger.info( - "origin {} allreduce ops are fused into {} coalecse allreduce ops." - .format(len(self._grad_name_to_group_map.keys()), - len(grad_groups))) + "origin {} allreduce ops are fused into {} coalecse allreduce ops.".format( + len(self._grad_name_to_group_map.keys()), len(grad_groups) + ) + ) self._logger.info("gradient fusing group are following: ") fused_grads = set() for i, group in enumerate(grad_groups): self._logger.info( "coalecse gradient [{}] is composed by: {}".format( - i, [grad.name for grad in group.gradients])) + i, [grad.name for grad in group.gradients] + ) + ) fused_grads.update([grad.name for grad in group.gradients]) - individual_grads = set( - self._grad_name_to_group_map.keys()) - set(fused_grads) + individual_grads = set(self._grad_name_to_group_map.keys()) - set( + fused_grads + ) self._logger.info( "the following [{}] gradients are not fused: ".format( - len(individual_grads))) + len(individual_grads) + ) + ) self._logger.info("individual gradient {}".format(individual_grads)) class GradientsGroup(object): - def __init__(self, ops, max_group_size): self.max_group_size = max_group_size self.ops = ops @@ -575,8 +628,11 @@ class GradientsGroup(object): grad_op_idx -= 1 grad_op = self.ops[grad_op_idx] - assert grad_var.name in grad_op.output_arg_names, "grad [{}] should be output of {}".format( - grad_var.name, str(grad_op)) + assert ( + grad_var.name in grad_op.output_arg_names + ), "grad [{}] should be output of {}".format( + grad_var.name, str(grad_op) + ) self.coalesce_op_idx = grad_op_idx def finalize(self): diff --git a/python/paddle/distributed/passes/auto_parallel_fp16.py b/python/paddle/distributed/passes/auto_parallel_fp16.py index 35a4831af3acef07cc43b2a93868df76bb8e8aba..cf1b2d45290bd4143e219e789cc3f24ee209b62a 100644 --- a/python/paddle/distributed/passes/auto_parallel_fp16.py +++ b/python/paddle/distributed/passes/auto_parallel_fp16.py @@ -20,12 +20,31 @@ from paddle.fluid.framework import default_main_program, default_startup_program from paddle.fluid import unique_name from .pass_base import register_pass from paddle.fluid.data_feeder import check_variable_and_dtype, check_type -from paddle.distributed.auto_parallel.utils import set_var_dist_attr, naive_set_dist_op_attr_for_program_by_mesh_and_mapping -from paddle.distributed.auto_parallel.process_group import get_world_process_group -from paddle.fluid.contrib.mixed_precision.fp16_utils import AutoMixedPrecisionLists -from paddle.fluid.contrib.mixed_precision.fp16_utils import _keep_layer_norm_scale_bias_to_fp32, _need_keep_fp32, _valid_types, _dtype_to_str -from paddle.distributed.auto_parallel.dist_attribute import OperatorDistributedAttribute -from paddle.distributed.auto_parallel.utils import is_forward_op, is_backward_op, OP_ROLE_KEY, OpRole +from paddle.distributed.auto_parallel.utils import ( + set_var_dist_attr, + naive_set_dist_op_attr_for_program_by_mesh_and_mapping, +) +from paddle.distributed.auto_parallel.process_group import ( + get_world_process_group, +) +from paddle.fluid.contrib.mixed_precision.fp16_utils import ( + AutoMixedPrecisionLists, +) +from paddle.fluid.contrib.mixed_precision.fp16_utils import ( + _keep_layer_norm_scale_bias_to_fp32, + _need_keep_fp32, + _valid_types, + _dtype_to_str, +) +from paddle.distributed.auto_parallel.dist_attribute import ( + OperatorDistributedAttribute, +) +from paddle.distributed.auto_parallel.utils import ( + is_forward_op, + is_backward_op, + OP_ROLE_KEY, + OpRole, +) from .auto_parallel_amp import AMPPass world_process_group = get_world_process_group() @@ -39,11 +58,15 @@ __amp_skip_ops__ = [ def set_op_dtype_to_fp16(op): - if op.has_attr('in_dtype') and op.attr( - 'in_dtype') == core.VarDesc.VarType.FP32: + if ( + op.has_attr('in_dtype') + and op.attr('in_dtype') == core.VarDesc.VarType.FP32 + ): op._set_attr('in_dtype', core.VarDesc.VarType.FP16) - if op.has_attr('out_dtype') and op.attr( - 'out_dtype') == core.VarDesc.VarType.FP32: + if ( + op.has_attr('out_dtype') + and op.attr('out_dtype') == core.VarDesc.VarType.FP32 + ): op._set_attr('out_dtype', core.VarDesc.VarType.FP16) if op.has_attr('dtype') and op.attr('dtype') == core.VarDesc.VarType.FP32: op._set_attr('dtype', core.VarDesc.VarType.FP16) @@ -63,7 +86,12 @@ def _keep_fp32_input(op, in_name): return in_name not in {'X', 'FilterX', 'Z', 'FilterZ'} if op_type in ['fused_attention', 'fused_feedforward']: return in_name in { - 'LnScale', 'LnBias', 'Ln2Scale', 'Ln2Bias', "Ln1Scale", "Ln1Bias" + 'LnScale', + 'LnBias', + 'Ln2Scale', + 'Ln2Bias', + "Ln1Scale", + "Ln1Bias", } # backward if op_type in ['batch_norm_grad']: @@ -83,8 +111,12 @@ def _keep_fp32_output(op, out_name): return out_name not in {'Y', 'ConvX', 'ConvZ'} if op_type in ['fused_attention', 'fused_feedforward']: return out_name in { - 'LnMean', 'LnVariance', 'Ln2Mean', 'Ln2Variance', 'Ln1Mean', - 'Ln1Variance' + 'LnMean', + 'LnVariance', + 'Ln2Mean', + 'Ln2Variance', + 'Ln1Mean', + 'Ln1Variance', } # backward if op_type in ['layer_norm_grad']: @@ -95,24 +127,28 @@ def _keep_fp32_output(op, out_name): class FP16State(object): - - def __init__(self, - program, - amp_list, - dist_context, - use_fp16_guard, - input_data_var_names=None): + def __init__( + self, + program, + amp_list, + dist_context, + use_fp16_guard, + input_data_var_names=None, + ): self.program = program self.amp_list = amp_list self.use_fp16_guard = use_fp16_guard self.dist_context = dist_context - self.grad_op_to_op_map = self.dist_context.dist_op_context.grad_op_id_to_op_id + self.grad_op_to_op_map = ( + self.dist_context.dist_op_context.grad_op_id_to_op_id + ) if input_data_var_names: self.input_data_var_names = input_data_var_names else: self.input_data_var_names = [] - self._op_fp16_dict = { - } # op_id --> True/False. 'True' means that the op is should run in fp16 mode. + self._op_fp16_dict = ( + {} + ) # op_id --> True/False. 'True' means that the op is should run in fp16 mode. # a trick to determine leaf tensor node in program {varname: generator_op_id} self.forward_non_leaf_tensors = {} # record the cast ops that are inserted for a forward @@ -156,8 +192,9 @@ class FP16State(object): if op.type == "assign" and "array_" in op.input_arg_names[0]: self._op_fp16_dict[op.desc.original_id()] = False return - if _need_keep_fp32(op, self.amp_list.unsupported_list, - self.use_fp16_guard): + if _need_keep_fp32( + op, self.amp_list.unsupported_list, self.use_fp16_guard + ): self._op_fp16_dict[op.desc.original_id()] = False else: self._op_fp16_dict[op.desc.original_id()] = True @@ -170,8 +207,9 @@ class FP16State(object): if op.desc.original_id() in self.grad_op_to_op_map: fwd_op_id = self.grad_op_to_op_map[op.desc.original_id()] assert fwd_op_id in self._op_fp16_dict, "{}".format(str(op)) - self._op_fp16_dict[ - op.desc.original_id()] = self._op_fp16_dict[fwd_op_id] + self._op_fp16_dict[op.desc.original_id()] = self._op_fp16_dict[ + fwd_op_id + ] if int(op.attr('op_role')) == 257: self.is_train = True @@ -197,13 +235,18 @@ class FP16State(object): for op in block.ops: if is_forward_op(op): # NOTE (JZ-LIANG) un-expected cast op when user call "+, -, *, /" in python - if self._is_fp16_op(op.desc.original_id()) == True \ - or op.type == "cast": + if ( + self._is_fp16_op(op.desc.original_id()) == True + or op.type == "cast" + ): for in_name in op.input_names: if _keep_fp32_input(op, in_name): continue for in_var_name in op.input(in_name): - if in_var_name not in self.forward_non_leaf_tensors and in_var_name not in self.input_data_var_names: + if ( + in_var_name not in self.forward_non_leaf_tensors + and in_var_name not in self.input_data_var_names + ): self.set_var_to_fp16(in_var_name, block) for out_name in op.output_names: if _keep_fp32_output(op, out_name): @@ -249,22 +292,42 @@ class FP16State(object): elif is_forward_op(op): if self._is_fp16_op(op.desc.original_id()) == False: num_cast_ops = self._insert_forward_cast_ops( - op, idx, block, core.VarDesc.VarType.FP16, - core.VarDesc.VarType.FP32, self.dist_context) + op, + idx, + block, + core.VarDesc.VarType.FP16, + core.VarDesc.VarType.FP32, + self.dist_context, + ) elif self._is_fp16_op(op.desc.original_id()) == True: num_cast_ops = self._insert_forward_cast_ops( - op, idx, block, core.VarDesc.VarType.FP32, - core.VarDesc.VarType.FP16, self.dist_context) + op, + idx, + block, + core.VarDesc.VarType.FP32, + core.VarDesc.VarType.FP16, + self.dist_context, + ) elif is_backward_op(op): if op.desc.original_id() in dist_op_context.grad_op_id_to_op_id: if self._is_fp16_op(op.desc.original_id()) == False: num_cast_ops = self._insert_backward_cast_ops( - op, idx, block, core.VarDesc.VarType.FP16, - core.VarDesc.VarType.FP32, self.dist_context) + op, + idx, + block, + core.VarDesc.VarType.FP16, + core.VarDesc.VarType.FP32, + self.dist_context, + ) elif self._is_fp16_op(op.desc.original_id()) == True: num_cast_ops = self._insert_backward_cast_ops( - op, idx, block, core.VarDesc.VarType.FP32, - core.VarDesc.VarType.FP16, self.dist_context) + op, + idx, + block, + core.VarDesc.VarType.FP32, + core.VarDesc.VarType.FP16, + self.dist_context, + ) elif op.type == "sum": # all inputs dtype of sum should be equal and output dtype should follow input out_var_name = op.output_arg_names[0] @@ -272,41 +335,51 @@ class FP16State(object): out_var = block.var(out_var_name) in_var = block._find_var_recursive(in_var_name) for in_var_name in op.input_arg_names: - assert in_var.dtype == block.var( - in_var_name).dtype, "{}, {}, {}".format( - in_var, block.var(in_var_name), str(op)) + assert ( + in_var.dtype == block.var(in_var_name).dtype + ), "{}, {}, {}".format( + in_var, block.var(in_var_name), str(op) + ) out_var.desc.set_dtype(in_var.dtype) idx += num_cast_ops + 1 block._sync_with_cpp() - def _insert_forward_cast_ops(self, op, idx, block, src_dtype, dst_dtype, - dist_context): + def _insert_forward_cast_ops( + self, op, idx, block, src_dtype, dst_dtype, dist_context + ): num_cast_ops = 0 for in_name in op.input_names: if src_dtype == core.VarDesc.VarType.FP32 and _keep_fp32_input( - op, in_name): + op, in_name + ): continue consume_op_attr = dist_context.get_op_dist_attr_for_program(op) assert consume_op_attr is not None for in_var_name in op.input(in_name): in_var = block._find_var_recursive(in_var_name) - if in_var is None or in_var.type not in _valid_types or in_var.dtype == dst_dtype: + if ( + in_var is None + or in_var.type not in _valid_types + or in_var.dtype == dst_dtype + ): continue if in_var.dtype == src_dtype: - cast_name = in_var.name + '.cast_' + _dtype_to_str( - dst_dtype) + cast_name = ( + in_var.name + '.cast_' + _dtype_to_str(dst_dtype) + ) cast_var = block.vars.get(cast_name) self.forward_input_cast_ops[op.desc.original_id()] += [ (cast_name, in_var.name, dst_dtype, src_dtype, in_name) ] in_var_dist_attr = consume_op_attr.get_input_dist_attr( - in_var.name) + in_var.name + ) assert in_var_dist_attr is not None # truly insert cast op if cast_var is None or cast_var.dtype != dst_dtype: @@ -320,9 +393,11 @@ class FP16State(object): name=cast_name, dtype=dst_dtype, persistable=False, - stop_gradient=in_var.stop_gradient) - set_var_dist_attr(dist_context, cast_var, ref_mapping, - ref_mesh) + stop_gradient=in_var.stop_gradient, + ) + set_var_dist_attr( + dist_context, cast_var, ref_mapping, ref_mesh + ) cast_op = block._insert_op_without_sync( idx, @@ -332,23 +407,27 @@ class FP16State(object): attrs={ "in_dtype": in_var.dtype, "out_dtype": cast_var.dtype, - OP_ROLE_KEY: OpRole.Forward - }) + OP_ROLE_KEY: OpRole.Forward, + }, + ) naive_set_dist_op_attr_for_program_by_mesh_and_mapping( - cast_op, ref_mesh, ref_mapping, dist_context) + cast_op, ref_mesh, ref_mapping, dist_context + ) num_cast_ops += 1 op._rename_input(in_var.name, cast_name) - consume_op_attr.set_input_dist_attr(cast_name, - in_var_dist_attr) + consume_op_attr.set_input_dist_attr( + cast_name, in_var_dist_attr + ) if op.has_attr('out_dtype') and op.attr('out_dtype') != -1: assert op.attr('out_dtype') == dst_dtype return num_cast_ops - def _insert_backward_cast_ops(self, op, idx, block, src_dtype, dst_dtype, - dist_context): + def _insert_backward_cast_ops( + self, op, idx, block, src_dtype, dst_dtype, dist_context + ): num_cast_ops = 0 op_id = op.desc.id() @@ -364,10 +443,16 @@ class FP16State(object): if _keep_fp32_output(op, out_var.name): continue assert out_var.dtype == dst_dtype, "{}, {}".format( - str(out_var), dst_dtype) + str(out_var), dst_dtype + ) - for cast_name, src_name, dst_dtype, src_dtype, slot_name in self.forward_input_cast_ops[ - forward_op_id]: + for ( + cast_name, + src_name, + dst_dtype, + src_dtype, + slot_name, + ) in self.forward_input_cast_ops[forward_op_id]: # some forward output is not need by backward computation, e.g. logit in softmax_with_cross_entropy if slot_name not in op.input_names: @@ -375,8 +460,8 @@ class FP16State(object): # rename input assert src_name in op.input( - slot_name), "var: {} not in op's {}. {}".format( - src_name, slot_name, str(op)) + slot_name + ), "var: {} not in op's {}. {}".format(src_name, slot_name, str(op)) src_var_dist_attr = grad_op_attr.get_input_dist_attr(src_name) assert src_var_dist_attr is not None op._rename_input(src_name, cast_name) @@ -384,15 +469,16 @@ class FP16State(object): # create cast grad grad_slot_name = slot_name + "@GRAD" - assert grad_slot_name in op.output_names, "[{}], Current Op: {}".format( - grad_slot_name, str(op)) + assert ( + grad_slot_name in op.output_names + ), "[{}], Current Op: {}".format(grad_slot_name, str(op)) # some forward input maybe stop_gradient=True, e.g. input_mask if len(op.output(grad_slot_name)) == 0: continue - assert len( - op.output(grad_slot_name)) == 1, "[{}], Current Op: {}".format( - grad_slot_name, str(op)) + assert ( + len(op.output(grad_slot_name)) == 1 + ), "[{}], Current Op: {}".format(grad_slot_name, str(op)) grad_name = op.output(grad_slot_name)[0] grad = block.var(grad_name) grad_dist_attr = grad_op_attr.get_output_dist_attr(grad_name) @@ -401,15 +487,18 @@ class FP16State(object): ref_mapping = grad_dist_attr.dims_mapping cast_grad = block.create_var( - name=unique_name.generate_with_ignorable_key("".join( - [cast_name, '@GRAD'])), + name=unique_name.generate_with_ignorable_key( + "".join([cast_name, '@GRAD']) + ), dtype=dst_dtype, shape=grad.shape, type=grad.type, persistable=grad.persistable, - stop_gradient=grad.stop_gradient) + stop_gradient=grad.stop_gradient, + ) dist_context.set_tensor_dist_attr_for_program( - cast_grad, grad_dist_attr) + cast_grad, grad_dist_attr + ) op._rename_output(grad_name, cast_grad.name) grad_op_attr.set_output_dist_attr(cast_grad.name, grad_dist_attr) @@ -422,12 +511,14 @@ class FP16State(object): attrs={ "in_dtype": dst_dtype, "out_dtype": src_dtype, - OP_ROLE_KEY: OpRole.Backward - }) + OP_ROLE_KEY: OpRole.Backward, + }, + ) grad.desc.set_dtype(src_dtype) naive_set_dist_op_attr_for_program_by_mesh_and_mapping( - cast_op, ref_mesh, ref_mapping, dist_context) + cast_op, ref_mesh, ref_mapping, dist_context + ) num_cast_ops += 1 return num_cast_ops @@ -440,26 +531,34 @@ def _check_and_update_gradient(grads, loss_scaling, name, dist_context): check_type(grads, 'x', (tuple, list), 'check_finite_and_unscale') for e in grads: - check_variable_and_dtype(e, "x", ['float16', 'float32', 'float64'], - 'check_finite_and_unscale') + check_variable_and_dtype( + e, + "x", + ['float16', 'float32', 'float64'], + 'check_finite_and_unscale', + ) found_inf = main_block.create_var( - name=unique_name.generate_with_ignorable_key(".".join( - ['find_infinite_scale', name])), + name=unique_name.generate_with_ignorable_key( + ".".join(['find_infinite_scale', name]) + ), shape=[1], dtype='bool', type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=False) + stop_gradient=False, + ) set_var_dist_attr(dist_context, found_inf, [-1], world_process_group.ranks) inputs = {'X': grads, 'Scale': loss_scaling} outputs = {'Out': grads, 'FoundInfinite': found_inf} attrs = {'op_role': OpRole.Optimize} - new_op = main_block.append_op(type='check_finite_and_unscale', - inputs=inputs, - outputs=outputs, - attrs=attrs) + new_op = main_block.append_op( + type='check_finite_and_unscale', + inputs=inputs, + outputs=outputs, + attrs=attrs, + ) new_op_dist_attr = OperatorDistributedAttribute() new_op_dist_attr.process_mesh = world_process_group.ranks @@ -469,10 +568,12 @@ def _check_and_update_gradient(grads, loss_scaling, name, dist_context): for g in grads: g_dist_attr = dist_context.get_tensor_dist_attr_for_program(g) assert g_dist_attr is not None - new_op_dist_attr.set_input_dims_mapping(g.name, - g_dist_attr.dims_mapping) - new_op_dist_attr.set_output_dims_mapping(g.name, - g_dist_attr.dims_mapping) + new_op_dist_attr.set_input_dims_mapping( + g.name, g_dist_attr.dims_mapping + ) + new_op_dist_attr.set_output_dims_mapping( + g.name, g_dist_attr.dims_mapping + ) dist_context.set_op_dist_attr_for_program(new_op, new_op_dist_attr) return grads, found_inf @@ -481,8 +582,9 @@ def _split_grads(params_grads): grads = [g for _, g in params_grads] fp32_grads = [g for g in grads if g.dtype == core.VarDesc.VarType.FP32] fp16_grads = [g for g in grads if g.dtype == core.VarDesc.VarType.FP16] - assert len(fp32_grads) + len(fp16_grads) == len(grads), \ - "Data types of all grads must be either fp16 or fp32." + assert len(fp32_grads) + len(fp16_grads) == len( + grads + ), "Data types of all grads must be either fp16 or fp32." return grads, fp32_grads, fp16_grads @@ -494,37 +596,45 @@ def _set_op_dist_attr_with_ranks(new_op, ranks, block, dist_context): var = block.var(var_name) var_dist_attr = dist_context.get_tensor_dist_attr_for_program(var) assert var_dist_attr is not None - new_op_dist_attr.set_input_dims_mapping(var_name, - var_dist_attr.dims_mapping) + new_op_dist_attr.set_input_dims_mapping( + var_name, var_dist_attr.dims_mapping + ) for var_name in new_op.output_arg_names: var = block.var(var_name) var_dist_attr = dist_context.get_tensor_dist_attr_for_program(var) assert var_dist_attr is not None - new_op_dist_attr.set_output_dims_mapping(var_name, - var_dist_attr.dims_mapping) + new_op_dist_attr.set_output_dims_mapping( + var_name, var_dist_attr.dims_mapping + ) dist_context.set_op_dist_attr_for_program(new_op, new_op_dist_attr) def _get_memcopy_idx(block, found_inf_var): # use reduce_any op for check_nan_inf as the anchor for now for idx, op in enumerate(block.ops): - if op.type == 'reduce_any' and op.output_arg_names[ - 0] == found_inf_var.name: + if ( + op.type == 'reduce_any' + and op.output_arg_names[0] == found_inf_var.name + ): return idx + 1 raise RuntimeError( - "not found the correct location for memcopy for found_inf_var.") + "not found the correct location for memcopy for found_inf_var." + ) def _insert_memcopy(block, idx, src_var, dist_context, direction="D2H"): src_name = src_var.name - output_var = block.create_var(name=unique_name.generate_with_ignorable_key( - src_name.join(['memcopy_'])), - dtype=src_var.dtype, - shape=src_var.shape, - type=core.VarDesc.VarType.LOD_TENSOR, - persistable=False, - stop_gradient=src_var.stop_gradient) + output_var = block.create_var( + name=unique_name.generate_with_ignorable_key( + src_name.join(['memcopy_']) + ), + dtype=src_var.dtype, + shape=src_var.shape, + type=core.VarDesc.VarType.LOD_TENSOR, + persistable=False, + stop_gradient=src_var.stop_gradient, + ) set_var_dist_attr(dist_context, output_var, [-1], world_process_group.ranks) @@ -535,16 +645,20 @@ def _insert_memcopy(block, idx, src_var, dist_context, direction="D2H"): dst_place_type = 1 else: raise NotImplementedError( - "direction [{}] is not supported yet.".format(direction)) + "direction [{}] is not supported yet.".format(direction) + ) attrs = {'dst_place_type': dst_place_type} - new_op = block._insert_op_without_sync(index=idx, - type='memcpy', - inputs={'X': [src_var]}, - outputs={'Out': [output_var]}, - attrs=attrs) - _set_op_dist_attr_with_ranks(new_op, world_process_group.ranks, block, - dist_context) + new_op = block._insert_op_without_sync( + index=idx, + type='memcpy', + inputs={'X': [src_var]}, + outputs={'Out': [output_var]}, + attrs=attrs, + ) + _set_op_dist_attr_with_ranks( + new_op, world_process_group.ranks, block, dist_context + ) block._sync_with_cpp() return output_var @@ -572,19 +686,21 @@ def cast_startup_program(): for op in startup_program.global_block().ops: if is_initialization_op(op): output_name = op.output_arg_names[0] - if param_to_dtype.get(output_name, - None) == core.VarDesc.VarType.FP16: + if ( + param_to_dtype.get(output_name, None) + == core.VarDesc.VarType.FP16 + ): assert op.has_attr( 'dtype' ), "initialization op is supported to has dtype attribute but got {}.".format( - str(op)) + str(op) + ) if op.attr('dtype') == core.VarDesc.VarType.FP32: op._set_attr('dtype', core.VarDesc.VarType.FP16) @register_pass("auto_parallel_fp16") class FP16Pass(AMPPass): - def __init__(self): super(FP16Pass, self).__init__() @@ -597,16 +713,22 @@ class FP16Pass(AMPPass): amp_list = AutoMixedPrecisionLists( set(self.get_attr("custom_white_list")), - set(self.get_attr("custom_black_list")), None) + set(self.get_attr("custom_black_list")), + None, + ) # NOTE don't not change input data dtype, since it is controled by dataloader # and which is out of control of FP16 Pass input_data_var_names = [var.name for var in self.get_attr("input_data")] with paddle.static.program_guard(main_program, startup_program): - fp16_state = FP16State(main_program, amp_list, self.dist_context, - self.get_attr("use_fp16_guard"), - input_data_var_names) + fp16_state = FP16State( + main_program, + amp_list, + self.dist_context, + self.get_attr("use_fp16_guard"), + input_data_var_names, + ) is_train = fp16_state._build_state() cast_startup_program() @@ -619,41 +741,63 @@ class FP16Pass(AMPPass): grads, fp32_grads, fp16_grads = _split_grads(params_grads) - if self.get_attr("use_dynamic_loss_scaling" - ) or self.get_attr("init_loss_scaling") != 1.0: + if ( + self.get_attr("use_dynamic_loss_scaling") + or self.get_attr("init_loss_scaling") != 1.0 + ): found_infs = [] if fp32_grads: with main_program._optimized_guard([]): _, found_inf_fp32 = _check_and_update_gradient( - fp32_grads, self._loss_scaling, "@fp32", - self.dist_context) + fp32_grads, + self._loss_scaling, + "@fp32", + self.dist_context, + ) found_infs.append(found_inf_fp32) if fp16_grads: with main_program._optimized_guard([]): _, found_inf_fp16 = _check_and_update_gradient( - fp16_grads, self._loss_scaling, "@fp16", - self.dist_context) + fp16_grads, + self._loss_scaling, + "@fp16", + self.dist_context, + ) found_infs.append(found_inf_fp16) with main_program._optimized_guard([]): block = main_program.global_block() all_infs = paddle.fluid.layers.concat(found_infs) - set_var_dist_attr(self.dist_context, all_infs, [-1], - world_process_group.ranks) + set_var_dist_attr( + self.dist_context, + all_infs, + [-1], + world_process_group.ranks, + ) new_op = block.ops[-1] assert new_op.type == "concat" - _set_op_dist_attr_with_ranks(new_op, - world_process_group.ranks, - block, self.dist_context) + _set_op_dist_attr_with_ranks( + new_op, + world_process_group.ranks, + block, + self.dist_context, + ) found_inf = paddle.fluid.layers.reduce_any(all_infs) - set_var_dist_attr(self.dist_context, found_inf, [-1], - world_process_group.ranks) + set_var_dist_attr( + self.dist_context, + found_inf, + [-1], + world_process_group.ranks, + ) new_op = block.ops[-1] assert new_op.type == "reduce_any" - _set_op_dist_attr_with_ranks(new_op, - world_process_group.ranks, - block, self.dist_context) + _set_op_dist_attr_with_ranks( + new_op, + world_process_group.ranks, + block, + self.dist_context, + ) if self.get_attr("use_dynamic_loss_scaling"): with main_program._optimized_guard([]): @@ -668,14 +812,15 @@ class FP16Pass(AMPPass): if self.get_attr("use_optimizer_fp16"): base_opt._multi_precision = False if isinstance( - base_opt, - (paddle.fluid.optimizer.Adam, paddle.optimizer.AdamW)): + base_opt, (paddle.fluid.optimizer.Adam, paddle.optimizer.AdamW) + ): with main_program._optimized_guard([]): # found_inf = paddle.tensor.creation._memcpy( # found_inf, paddle.CPUPlace()) insert_idx = _get_memcopy_idx(block, found_inf) - found_inf = _insert_memcopy(block, insert_idx, found_inf, - self.dist_context) + found_inf = _insert_memcopy( + block, insert_idx, found_inf, self.dist_context + ) base_opt._set_auxiliary_var('found_inf', found_inf.name) elif hasattr(base_opt, "_set_auxiliary_var"): base_opt._set_auxiliary_var('found_inf', found_inf.name) diff --git a/python/paddle/distributed/passes/auto_parallel_grad_clip.py b/python/paddle/distributed/passes/auto_parallel_grad_clip.py index 5108992ae55feac11dd6ebad29dc7eba9d1d4e2e..ebb44bb0f126463925bd1c23dc571d4ffd707d72 100644 --- a/python/paddle/distributed/passes/auto_parallel_grad_clip.py +++ b/python/paddle/distributed/passes/auto_parallel_grad_clip.py @@ -20,8 +20,17 @@ import paddle from .pass_base import PassBase, register_pass from ..auto_parallel.reshard import Resharder from ..auto_parallel.process_group import get_world_process_group -from ..auto_parallel.utils import is_gradient_clip_op, is_optimize_op, OP_ROLE_KEY, OpRole, _get_comm_group -from ..auto_parallel.dist_attribute import TensorDistributedAttribute, OperatorDistributedAttribute +from ..auto_parallel.utils import ( + is_gradient_clip_op, + is_optimize_op, + OP_ROLE_KEY, + OpRole, + _get_comm_group, +) +from ..auto_parallel.dist_attribute import ( + TensorDistributedAttribute, + OperatorDistributedAttribute, +) def _get_params_grads(block): @@ -52,7 +61,8 @@ def _get_dpmp_topology(origin_topology, sharding_group): """ sharding_axis = 1 dp_sharding_topology = [ - origin_topology[0] // sharding_group.nranks, sharding_group.nranks + origin_topology[0] // sharding_group.nranks, + sharding_group.nranks, ] if dp_sharding_topology[0] == 1: sharding_axis = 0 @@ -108,22 +118,24 @@ def _get_dpmp_process_mesh(rank_id, topology, processes, sharding_group): return dpmp_topology, list(dpmp_processes_in_sharding) -def _is_about_global_norm(rank_id, tensor_shape, topology, processes, - dims_mapping, sharding_group): +def _is_about_global_norm( + rank_id, tensor_shape, topology, processes, dims_mapping, sharding_group +): # get current process_mesh where the parameter exist. dpmp_topology, dpmp_processes = _get_dpmp_process_mesh( - rank_id, topology, processes, sharding_group) + rank_id, topology, processes, sharding_group + ) - complete_shape = Resharder.compute_complete_shape(tensor_shape, - dpmp_topology, - dims_mapping) + complete_shape = Resharder.compute_complete_shape( + tensor_shape, dpmp_topology, dims_mapping + ) complete_partitions = [] complete_param_ranks = [] for process in dpmp_processes: partition_index = Resharder.compute_partition_index( - process, complete_shape, dims_mapping, dpmp_topology, - dpmp_processes) + process, complete_shape, dims_mapping, dpmp_topology, dpmp_processes + ) if partition_index not in complete_partitions: complete_partitions.append(partition_index) complete_param_ranks.append(process) @@ -132,7 +144,6 @@ def _is_about_global_norm(rank_id, tensor_shape, topology, processes, class ClipHelper(object): - def __init__(self, params_grads, rank_id, block, dist_context): params, _ = zip(*params_grads) self.params = list(params) @@ -154,9 +165,14 @@ class ClipHelper(object): topology = dist_attr.process_mesh.topology processes = dist_attr.process_mesh.processes dims_mapping = dist_attr.dims_mapping - return _is_about_global_norm(self.rank_id, param.shape, topology, - processes, dims_mapping, - self.sharding_group) + return _is_about_global_norm( + self.rank_id, + param.shape, + topology, + processes, + dims_mapping, + self.sharding_group, + ) def _get_dist_attr(self, name): var = self.block.vars[name] @@ -181,7 +197,8 @@ class ClipHelper(object): in_dist_attr.process_mesh = self.world_ranks in_dist_attr.dims_mapping = [-1] self.dist_context.set_tensor_dist_attr_for_program( - in_var, in_dist_attr) + in_var, in_dist_attr + ) op_dist_attr.set_input_dist_attr(in_name, in_dist_attr) for out_name in op.output_arg_names: out_var = self.block.vars[out_name] @@ -189,7 +206,8 @@ class ClipHelper(object): out_dist_attr.process_mesh = self.world_ranks out_dist_attr.dims_mapping = [-1] self.dist_context.set_tensor_dist_attr_for_program( - out_var, out_dist_attr) + out_var, out_dist_attr + ) op_dist_attr.set_output_dist_attr(out_name, out_dist_attr) self.dist_context.set_op_dist_attr_for_program(op, op_dist_attr) @@ -228,14 +246,18 @@ class ClipGradByGloblNormPass(PassBase): dist_params_grads = self.get_attr("params_grads", None) # dist_params_grads = _get_params_grads(block) - self.clip_helper = ClipHelper(dist_params_grads, rank_id, block, - dist_context) + self.clip_helper = ClipHelper( + dist_params_grads, rank_id, block, dist_context + ) self._remove_no_need_ops_vars(block) def _remove_no_need_ops_vars(self, block): removed_op_out_type = [ - 'clip_by_norm', 'squared_l2_norm', 'square', 'reduce_sum' + 'clip_by_norm', + 'squared_l2_norm', + 'square', + 'reduce_sum', ] removed_op_idx = set() @@ -248,12 +270,14 @@ class ClipGradByGloblNormPass(PassBase): input_name = op.input("X")[0] if input_name.find("@GRAD") != -1: #'clip_by_norm', 'squared_l2_norm', 'square' - param_name = input_name[:input_name.find("@GRAD")] + param_name = input_name[: input_name.find("@GRAD")] is_local = self.clip_helper._is_local_param(param_name) is_calculate = self.clip_helper._is_calcuate_norm( - param_name) - if not is_local or (not is_calculate - and op.type != 'clip_by_norm'): + param_name + ) + if not is_local or ( + not is_calculate and op.type != 'clip_by_norm' + ): removed_op_idx.add(idx) removed_tmp_var.update(set(op.output_arg_names)) else: @@ -265,20 +289,23 @@ class ClipGradByGloblNormPass(PassBase): elif op.type == 'elementwise_mul': input_name = op.input("X")[0] if input_name.find("@GRAD") != -1: - param_name = input_name[:input_name.find("@GRAD")] + param_name = input_name[: input_name.find("@GRAD")] is_local = self.clip_helper._is_local_param(param_name) if not is_local: removed_op_idx.add(idx) if block.ops[idx - 1].type == 'cast': removed_op_idx.add(idx - 1) removed_tmp_var.update( - set(block.ops[idx - 1].output_arg_names)) + set(block.ops[idx - 1].output_arg_names) + ) elif op.type == 'sum': reserved_vars = [] for input_name in op.input_arg_names: - if input_name not in removed_tmp_var and \ - self.clip_helper._is_local_var(input_name): + if ( + input_name not in removed_tmp_var + and self.clip_helper._is_local_var(input_name) + ): reserved_vars.append(input_name) if not reserved_vars: removed_op_idx.add(idx) @@ -286,7 +313,8 @@ class ClipGradByGloblNormPass(PassBase): if block.ops[idx + 1].type == 'cast': removed_op_idx.add(idx + 1) removed_tmp_var.update( - set(block.ops[idx + 1].output_arg_names)) + set(block.ops[idx + 1].output_arg_names) + ) else: op.desc.set_input("X", reserved_vars) @@ -320,10 +348,12 @@ class ClipGradByGloblNormPass(PassBase): 'dtype': input_var.dtype, 'value': 0, 'force_cpu': False, - OP_ROLE_KEY: OpRole.Optimize - }) - fill_constant_op._set_attr('op_namescope', - "/gradient_clip_pass") + OP_ROLE_KEY: OpRole.Optimize, + }, + ) + fill_constant_op._set_attr( + 'op_namescope', "/gradient_clip_pass" + ) offset += 1 self.clip_helper._init_dist_attr(fill_constant_op) @@ -336,9 +366,11 @@ class ClipGradByGloblNormPass(PassBase): 'ring_id': 0, 'use_calc_stream': True, OP_ROLE_KEY: OpRole.Optimize, - }) - allreduce_op._set_attr('op_namescope', - "/gradient_clip_pass") + }, + ) + allreduce_op._set_attr( + 'op_namescope', "/gradient_clip_pass" + ) self.clip_helper._init_dist_attr(allreduce_op) for varname in removed_tmp_var: diff --git a/python/paddle/distributed/passes/auto_parallel_gradient_merge.py b/python/paddle/distributed/passes/auto_parallel_gradient_merge.py index 2c0af10e35d9899dc67c38c857f3a88924efa5be..282f909987d4160e946c1c8c7242c3737872a7ae 100644 --- a/python/paddle/distributed/passes/auto_parallel_gradient_merge.py +++ b/python/paddle/distributed/passes/auto_parallel_gradient_merge.py @@ -19,9 +19,18 @@ from paddle.framework import core from paddle.fluid import layers from paddle.fluid.framework import device_guard from .pass_base import PassBase, PassType, register_pass -from paddle.distributed.auto_parallel.utils import set_var_dist_attr, is_optimize_op, OpRole, OP_ROLE_KEY -from paddle.distributed.auto_parallel.utils import naive_set_dist_op_attr_for_program_by_mesh_and_mapping -from paddle.distributed.auto_parallel.process_group import get_world_process_group +from paddle.distributed.auto_parallel.utils import ( + set_var_dist_attr, + is_optimize_op, + OpRole, + OP_ROLE_KEY, +) +from paddle.distributed.auto_parallel.utils import ( + naive_set_dist_op_attr_for_program_by_mesh_and_mapping, +) +from paddle.distributed.auto_parallel.process_group import ( + get_world_process_group, +) world_process_group = get_world_process_group() @@ -56,79 +65,87 @@ def _remove_and_get_optimizer_op(main_program, dist_context): def _get_gm_cond_var(main_program, k_steps, dist_context): main_block = main_program.global_block() # Add const var - k_step_var = layers.create_global_var(name="gradient_merge_k", - shape=[1], - value=int(k_steps), - dtype='int32', - persistable=True, - force_cpu=True) + k_step_var = layers.create_global_var( + name="gradient_merge_k", + shape=[1], + value=int(k_steps), + dtype='int32', + persistable=True, + force_cpu=True, + ) set_var_dist_attr(dist_context, k_step_var, [-1], world_process_group.ranks) - zero_var = layers.create_global_var(name="gradient_merge_zero", - shape=[1], - value=int(0), - dtype='int32', - persistable=True, - force_cpu=True) + zero_var = layers.create_global_var( + name="gradient_merge_zero", + shape=[1], + value=int(0), + dtype='int32', + persistable=True, + force_cpu=True, + ) set_var_dist_attr(dist_context, zero_var, [-1], world_process_group.ranks) # Add step var & cond var - step_var = layers.create_global_var(name="gradient_merge_step", - shape=[1], - value=int(0), - dtype='int32', - persistable=True, - force_cpu=True) + step_var = layers.create_global_var( + name="gradient_merge_step", + shape=[1], + value=int(0), + dtype='int32', + persistable=True, + force_cpu=True, + ) set_var_dist_attr(dist_context, step_var, [-1], world_process_group.ranks) - cond_var = main_block.create_var(name="gradient_merge_cond", - shape=[1], - dtype='bool') + cond_var = main_block.create_var( + name="gradient_merge_cond", shape=[1], dtype='bool' + ) set_var_dist_attr(dist_context, cond_var, [-1], world_process_group.ranks) with device_guard("cpu"): # step_var += 1 - increment_op = main_block.append_op(type='increment', - inputs={'X': [step_var]}, - outputs={'Out': [step_var]}, - attrs={ - 'step': float(1.0), - OP_ROLE_KEY: OpRole.Backward - }) + increment_op = main_block.append_op( + type='increment', + inputs={'X': [step_var]}, + outputs={'Out': [step_var]}, + attrs={'step': float(1.0), OP_ROLE_KEY: OpRole.Backward}, + ) naive_set_dist_op_attr_for_program_by_mesh_and_mapping( - increment_op, world_process_group.ranks, [-1], dist_context) + increment_op, world_process_group.ranks, [-1], dist_context + ) # step_var %= k_step - elementwise_mod_op = main_block.append_op(type='elementwise_mod', - inputs={ - 'X': step_var, - 'Y': k_step_var - }, - outputs={'Out': step_var}, - attrs={ - 'axis': -1, - 'use_mkldnn': False, - OP_ROLE_KEY: - OpRole.Backward - }) + elementwise_mod_op = main_block.append_op( + type='elementwise_mod', + inputs={'X': step_var, 'Y': k_step_var}, + outputs={'Out': step_var}, + attrs={ + 'axis': -1, + 'use_mkldnn': False, + OP_ROLE_KEY: OpRole.Backward, + }, + ) naive_set_dist_op_attr_for_program_by_mesh_and_mapping( - elementwise_mod_op, world_process_group.ranks, [-1], dist_context) + elementwise_mod_op, world_process_group.ranks, [-1], dist_context + ) # cond_var = (step_var == 0) - equal_op = main_block.append_op(type='equal', - inputs={ - 'X': step_var, - 'Y': zero_var - }, - outputs={'Out': cond_var}, - attrs={OP_ROLE_KEY: OpRole.Backward}) + equal_op = main_block.append_op( + type='equal', + inputs={'X': step_var, 'Y': zero_var}, + outputs={'Out': cond_var}, + attrs={OP_ROLE_KEY: OpRole.Backward}, + ) naive_set_dist_op_attr_for_program_by_mesh_and_mapping( - equal_op, world_process_group.ranks, [-1], dist_context) + equal_op, world_process_group.ranks, [-1], dist_context + ) return cond_var def _append_gradient_merge_backward_op( - main_program, startup_program, params_grads: List[Tuple[Any, Any]], - dist_context) -> Tuple[List[Tuple[Any, Any]], Dict[str, Any]]: + main_program, + startup_program, + params_grads: List[Tuple[Any, Any]], + dist_context, +) -> Tuple[List[Tuple[Any, Any]], Dict[str, Any]]: main_block = main_program.global_block() startup_block = startup_program.global_block() @@ -146,57 +163,66 @@ def _append_gradient_merge_backward_op( for param, grad in params_grads: param_name = param.name param_var = main_block.var(param_name) - assert (param_var is not None) + assert param_var is not None ref_dist_attr = dist_context.get_tensor_dist_attr_for_program(param_var) assert ref_dist_attr is not None - gradient_merge_var = main_block.create_var(name=param_name + - "@GRAD@GradientMerge", - shape=param_var.shape, - dtype=param_var.dtype, - persistable=True) + gradient_merge_var = main_block.create_var( + name=param_name + "@GRAD@GradientMerge", + shape=param_var.shape, + dtype=param_var.dtype, + persistable=True, + ) ref_process_mesh = ref_dist_attr.process_mesh ref_dims_mapping = ref_dist_attr.dims_mapping - set_var_dist_attr(dist_context, gradient_merge_var, ref_dims_mapping, - ref_process_mesh) + set_var_dist_attr( + dist_context, gradient_merge_var, ref_dims_mapping, ref_process_mesh + ) startup_gradient_merge_var = startup_block.create_var( name=param_name + "@GRAD@GradientMerge", shape=param_var.shape, dtype=param_var.dtype, - persistable=True) - startup_block.append_op(type="fill_constant", - outputs={"Out": startup_gradient_merge_var}, - attrs={ - "shape": param_var.shape, - "dtype": param_var.dtype, - "value": float(0), - }) + persistable=True, + ) + startup_block.append_op( + type="fill_constant", + outputs={"Out": startup_gradient_merge_var}, + attrs={ + "shape": param_var.shape, + "dtype": param_var.dtype, + "value": float(0), + }, + ) # grad_merge += grad - new_grad_op = main_block.append_op(type="elementwise_add", - inputs={ - 'X': grad, - 'Y': gradient_merge_var - }, - outputs={'Out': gradient_merge_var}, - attrs={ - 'axis': -1, - 'use_mkldnn': False, - OP_ROLE_KEY: OpRole.Backward - }) + new_grad_op = main_block.append_op( + type="elementwise_add", + inputs={'X': grad, 'Y': gradient_merge_var}, + outputs={'Out': gradient_merge_var}, + attrs={ + 'axis': -1, + 'use_mkldnn': False, + OP_ROLE_KEY: OpRole.Backward, + }, + ) new_params_to_grads.append([param, gradient_merge_var]) grad_to_gradient_merge[grad.name] = gradient_merge_var.name naive_set_dist_op_attr_for_program_by_mesh_and_mapping( - new_grad_op, ref_process_mesh, ref_dims_mapping, dist_context) + new_grad_op, ref_process_mesh, ref_dims_mapping, dist_context + ) return new_params_to_grads, grad_to_gradient_merge def _create_cond_block_and_update_optimizer( - main_program, cond_var, new_params_to_grads: List[Tuple[Any, Any]], - grad_to_gradient_merge: Dict[str, str], optimize_ops_desc: List[Any], - k_steps, avg): - + main_program, + cond_var, + new_params_to_grads: List[Tuple[Any, Any]], + grad_to_gradient_merge: Dict[str, str], + optimize_ops_desc: List[Any], + k_steps, + avg, +): def true_apply_gradient(): cur_block_idx = main_program.current_block_idx cur_block = main_program.current_block() @@ -207,14 +233,16 @@ def _create_cond_block_and_update_optimizer( if avg: for param, new_grad in new_params_to_grads: # grad /= k_steps - cur_block.append_op(type='scale', - inputs={'X': new_grad}, - outputs={'Out': new_grad}, - attrs={ - 'scale': 1.0 / k_steps, - 'bias': 0.0, - 'bias_after_scale': False - }) + cur_block.append_op( + type='scale', + inputs={'X': new_grad}, + outputs={'Out': new_grad}, + attrs={ + 'scale': 1.0 / k_steps, + 'bias': 0.0, + 'bias_after_scale': False, + }, + ) new_grad.op._set_attr(OP_ROLE_KEY, OpRole.Optimize) # append optimizer ops @@ -222,16 +250,18 @@ def _create_cond_block_and_update_optimizer( new_op_desc = cur_block.desc.append_op() new_op_desc.copy_from(op_desc) - #update input/output + # update input/output for input_name in new_op_desc.input_arg_names(): if input_name in grad_to_gradient_merge: new_op_desc._rename_input( - input_name, grad_to_gradient_merge[input_name]) + input_name, grad_to_gradient_merge[input_name] + ) for output_name in new_op_desc.output_arg_names(): if output_name in grad_to_gradient_merge: new_op_desc._rename_output( - output_name, grad_to_gradient_merge[output_name]) + output_name, grad_to_gradient_merge[output_name] + ) # remove op_role_var if new_op_desc.has_attr(op_maker.kOpRoleVarAttrName()): @@ -249,10 +279,12 @@ def _create_cond_block_and_update_optimizer( # clear gradient_merge_vars for param, new_grad in new_params_to_grads: - layers.fill_constant(shape=new_grad.shape, - dtype=new_grad.dtype, - value=0.0, - out=new_grad) + layers.fill_constant( + shape=new_grad.shape, + dtype=new_grad.dtype, + value=0.0, + out=new_grad, + ) new_grad.op._set_attr(OP_ROLE_KEY, op_maker.OpRole.Optimize) layers.cond(cond_var, true_fn=true_apply_gradient, false_fn=None) @@ -260,8 +292,9 @@ def _create_cond_block_and_update_optimizer( cond_op._set_attr(OP_ROLE_KEY, OpRole.Optimize) -def parse_program(main_program, startup_program, params_grads, k_steps, avg, - dist_context): +def parse_program( + main_program, startup_program, params_grads, k_steps, avg, dist_context +): # 1 remove optimizer_op from main_program optimize_ops_desc = _remove_and_get_optimizer_op(main_program, dist_context) @@ -269,22 +302,30 @@ def parse_program(main_program, startup_program, params_grads, k_steps, avg, main_program._rollback() # 2 append gradient merge backward op to main_program - new_params_to_grads, grad_to_gradient_merge = _append_gradient_merge_backward_op( - main_program, startup_program, params_grads, dist_context) + ( + new_params_to_grads, + grad_to_gradient_merge, + ) = _append_gradient_merge_backward_op( + main_program, startup_program, params_grads, dist_context + ) # 3 create gradient_merge_cond cond_var = _get_gm_cond_var(main_program, k_steps, dist_context) # 4 create ConditionalBlock and append gradient merge optimizer ops - _create_cond_block_and_update_optimizer(main_program, cond_var, - new_params_to_grads, - grad_to_gradient_merge, - optimize_ops_desc, k_steps, avg) + _create_cond_block_and_update_optimizer( + main_program, + cond_var, + new_params_to_grads, + grad_to_gradient_merge, + optimize_ops_desc, + k_steps, + avg, + ) @register_pass("auto_parallel_gradient_merge_pass") class GradientMergePass(PassBase): - def __init__(self): super(GradientMergePass, self).__init__() self.set_attr("k_steps", -1) @@ -307,7 +348,13 @@ class GradientMergePass(PassBase): dist_context = self.get_attr("dist_context") params_grads = self.get_attr("params_grads") with paddle.static.program_guard(main_program, startup_program): - parse_program(main_program, startup_program, params_grads, k_steps, - avg, dist_context) + parse_program( + main_program, + startup_program, + params_grads, + k_steps, + avg, + dist_context, + ) main_program._sync_with_cpp() diff --git a/python/paddle/distributed/passes/auto_parallel_quantization.py b/python/paddle/distributed/passes/auto_parallel_quantization.py index c0ac93d83939dedd99f1a6720771367e175d0947..893b9d0d9e5cfb05b551281be7110d1f4bf9215a 100644 --- a/python/paddle/distributed/passes/auto_parallel_quantization.py +++ b/python/paddle/distributed/passes/auto_parallel_quantization.py @@ -20,7 +20,10 @@ from paddle.fluid.contrib.slim.quantization import utils from paddle.fluid.contrib.slim.quantization import QuantizationTransformPassV2 from paddle.fluid.contrib.slim.quantization import AddQuantDequantPassV2 from paddle.fluid.contrib.slim.quantization import OutScaleForTrainingPass -from paddle.distributed.auto_parallel.dist_attribute import OperatorDistributedAttribute, TensorDistributedAttribute +from paddle.distributed.auto_parallel.dist_attribute import ( + OperatorDistributedAttribute, + TensorDistributedAttribute, +) from .pass_base import PassBase, register_pass @@ -34,7 +37,6 @@ def _node_id(node): @register_pass("auto_parallel_quantization") class QuantizationPass(PassBase): - def __init__(self): super(QuantizationPass, self).__init__() self.set_attr("dist_context", None) @@ -61,14 +63,19 @@ class QuantizationPass(PassBase): place = paddle.fluid.CUDAPlace(ParallelEnv().dev_id) # 1. Program convert to Graph, and this pass is only for train mode - main_graph = framework.IrGraph(core.Graph(main_program.desc), - for_test=False) + main_graph = framework.IrGraph( + core.Graph(main_program.desc), for_test=False + ) # 2. Prepare inputs transform_pass_ops = [] quant_dequant_ops = [] quantize_op_types = [ - 'conv2d', 'depthwise_conv2d', 'mul', 'matmul', 'matmul_v2' + 'conv2d', + 'depthwise_conv2d', + 'mul', + 'matmul', + 'matmul_v2', ] for op_type in quantize_op_types: if op_type in TRANSFORM_PASS_OP_TYPES: @@ -76,8 +83,11 @@ class QuantizationPass(PassBase): elif op_type in QUANT_DEQUANT_PASS_OP_TYPES: quant_dequant_ops.append(op_type) - weight_quantize_type = "channel_wise_abs_max" if self.get_attr( - 'channel_wise_abs_max') else "abs_max" + weight_quantize_type = ( + "channel_wise_abs_max" + if self.get_attr('channel_wise_abs_max') + else "abs_max" + ) # 3. Add quant op for ops which have parameters transform_pass = QuantizationTransformPassV2( @@ -94,7 +104,8 @@ class QuantizationPass(PassBase): weight_preprocess_func=None, act_preprocess_func=None, optimizer_func=None, - executor=None) + executor=None, + ) transform_pass.apply(main_graph) # 4. Add quant op for ops which don't have parameter @@ -103,12 +114,14 @@ class QuantizationPass(PassBase): place=place, quant_bits=self.get_attr('activation_bits'), skip_pattern=self.get_attr('not_quant_pattern'), - quantizable_op_type=quant_dequant_ops) + quantizable_op_type=quant_dequant_ops, + ) quant_dequant_pass.apply(main_graph) # 5. Gather quantitative information for the output - out_scale_training_pass = OutScaleForTrainingPass(scope=scope, - place=place) + out_scale_training_pass = OutScaleForTrainingPass( + scope=scope, place=place + ) out_scale_training_pass.apply(main_graph) # 6. Convert Graph back to Program @@ -132,34 +145,43 @@ class QuantizationPass(PassBase): for ip, quant_op in enumerate(block.ops): quant_op_dist_attr = OperatorDistributedAttribute() - if "quantize" in quant_op.type or \ - quant_op.type == "moving_average_abs_max_scale": + if ( + "quantize" in quant_op.type + or quant_op.type == "moving_average_abs_max_scale" + ): input_name = quant_op.desc.input('X')[0] if "quantize" in input_name: - input_name = input_name[:input_name.index(".quantized")] + input_name = input_name[ + : input_name.index(".quantized") + ] if quant_op.type == "moving_average_abs_max_scale": consume_op = main_program.blocks[ib].vars[input_name].op else: - consume_op = main_program.blocks[ib].ops[ip - - qat_offset] + consume_op = main_program.blocks[ib].ops[ + ip - qat_offset + ] consume_op_dist_attr = dist_context.get_dist_op_for_program( - consume_op).dist_attr + consume_op + ).dist_attr ref_process_mesh = consume_op_dist_attr.process_mesh if input_name in consume_op_dist_attr.outputs_dist_attrs: - consume_input_dist_attr = consume_op_dist_attr.outputs_dist_attrs[ - input_name] + consume_input_dist_attr = ( + consume_op_dist_attr.outputs_dist_attrs[input_name] + ) else: - consume_input_dist_attr = consume_op_dist_attr.inputs_dist_attrs[ - input_name] + consume_input_dist_attr = ( + consume_op_dist_attr.inputs_dist_attrs[input_name] + ) quant_op_dist_attr.impl_idx = 0 quant_op_dist_attr.impl_type = "default" quant_op_dist_attr.process_mesh = ref_process_mesh quant_op_dist_attr.set_input_dist_attr( - quant_op.desc.input('X')[0], consume_input_dist_attr) + quant_op.desc.input('X')[0], consume_input_dist_attr + ) for slot_name in quant_op.desc.input_names(): if slot_name == "X": @@ -170,26 +192,32 @@ class QuantizationPass(PassBase): tensor_dist_attr.process_mesh = ref_process_mesh tensor_dist_attr.dims_mapping = [-1] dist_context.set_tensor_dist_attr_for_program( - input_var, tensor_dist_attr) + input_var, tensor_dist_attr + ) quant_op_dist_attr.set_input_dist_attr( - in_name, tensor_dist_attr) + in_name, tensor_dist_attr + ) for slot_name in quant_op.desc.output_names(): output_name = quant_op.desc.output(slot_name)[0] output_var = block.vars[output_name] if slot_name == "Y": dist_context.set_tensor_dist_attr_for_program( - output_var, consume_input_dist_attr) + output_var, consume_input_dist_attr + ) quant_op_dist_attr.set_output_dist_attr( - output_name, consume_input_dist_attr) + output_name, consume_input_dist_attr + ) else: tensor_dist_attr = TensorDistributedAttribute() tensor_dist_attr.process_mesh = ref_process_mesh tensor_dist_attr.dims_mapping = [-1] dist_context.set_tensor_dist_attr_for_program( - output_var, tensor_dist_attr) + output_var, tensor_dist_attr + ) quant_op_dist_attr.set_output_dist_attr( - output_name, tensor_dist_attr) + output_name, tensor_dist_attr + ) quant_op._set_attr("op_device", "") qat_offset += 1 @@ -199,59 +227,86 @@ class QuantizationPass(PassBase): origin_op = main_program.blocks[ib].ops[ip - qat_offset] quant_op.desc.set_original_id(origin_op.desc.original_id()) dist_origin_op = dist_context.get_dist_op_for_program( - origin_op) - assert dist_origin_op is not None, "origin op must have dist attr." + origin_op + ) + assert ( + dist_origin_op is not None + ), "origin op must have dist attr." origin_op_dist_attr = dist_origin_op.dist_attr quant_op_dist_attr.impl_idx = origin_op_dist_attr.impl_idx quant_op_dist_attr.impl_type = origin_op_dist_attr.impl_type - quant_op_dist_attr.process_mesh = origin_op_dist_attr.process_mesh + quant_op_dist_attr.process_mesh = ( + origin_op_dist_attr.process_mesh + ) for idx, input_name in enumerate(quant_op.input_arg_names): origin_input_name = origin_op.input_arg_names[idx] - origin_input_dist_attr = origin_op_dist_attr.inputs_dist_attrs[ - origin_input_name] + origin_input_dist_attr = ( + origin_op_dist_attr.inputs_dist_attrs[ + origin_input_name + ] + ) quant_op_dist_attr.set_input_dist_attr( - input_name, origin_input_dist_attr) + input_name, origin_input_dist_attr + ) if input_name not in main_program.blocks[ib].vars: origin_input_var = main_program.blocks[ib].vars[ - origin_input_name] - origin_in_tensor_dist_attr = dist_context.get_dist_tensor_for_program( - origin_input_var).dist_attr + origin_input_name + ] + origin_in_tensor_dist_attr = ( + dist_context.get_dist_tensor_for_program( + origin_input_var + ).dist_attr + ) quant_input_var = block.vars[input_name] dist_context.set_tensor_dist_attr_for_program( - quant_input_var, origin_in_tensor_dist_attr) + quant_input_var, origin_in_tensor_dist_attr + ) for idx, output_name in enumerate( - quant_op.output_arg_names): + quant_op.output_arg_names + ): origin_output_name = origin_op.output_arg_names[idx] - origin_output_dist_attr = origin_op_dist_attr.outputs_dist_attrs[ - origin_output_name] + origin_output_dist_attr = ( + origin_op_dist_attr.outputs_dist_attrs[ + origin_output_name + ] + ) quant_op_dist_attr.set_output_dist_attr( - output_name, origin_output_dist_attr) + output_name, origin_output_dist_attr + ) if output_name not in main_program.blocks[ib].vars: origin_output_var = main_program.blocks[ib].vars[ - origin_output_name] - origin_out_tensor_dist_attr = dist_context.get_dist_tensor_for_program( - origin_output_var).dist_attr + origin_output_name + ] + origin_out_tensor_dist_attr = ( + dist_context.get_dist_tensor_for_program( + origin_output_var + ).dist_attr + ) quant_output_var = block.vars[output_name] dist_context.set_tensor_dist_attr_for_program( - quant_output_var, origin_out_tensor_dist_attr) + quant_output_var, origin_out_tensor_dist_attr + ) dist_context.set_op_dist_attr_for_program( - quant_op, quant_op_dist_attr) + quant_op, quant_op_dist_attr + ) # recover vars' dist_attr for name, dst_var in block.vars.items(): if name in main_program.blocks[ib].vars: src_var = main_program.blocks[ib].vars[name] dist_tensor = dist_context.get_dist_tensor_for_program( - src_var) + src_var + ) if not dist_tensor: continue dist_context.set_tensor_dist_attr_for_program( - dst_var, dist_tensor.dist_attr) + dst_var, dist_tensor.dist_attr + ) context.set_attr("main_program", quant_program) context.set_attr("startup_program", startup_program) diff --git a/python/paddle/distributed/passes/auto_parallel_recompute.py b/python/paddle/distributed/passes/auto_parallel_recompute.py index 81bda0d3d39557d37c6a43b52dde9b338fbfa14e..26568fbe727553d4d864d0e15b703330c1b48b39 100644 --- a/python/paddle/distributed/passes/auto_parallel_recompute.py +++ b/python/paddle/distributed/passes/auto_parallel_recompute.py @@ -20,13 +20,20 @@ from paddle.fluid import framework as framework from paddle.fluid.framework import Variable from paddle.fluid.backward import _append_grad_suffix_, _get_no_grad_set_name from paddle.fluid.backward import ProgramStats, _rename_arg_, _find_op_path_ -from paddle.distributed.auto_parallel.dist_attribute import OperatorDistributedAttribute -from paddle.distributed.auto_parallel.utils import get_loss_op, set_var_dist_attr, set_dist_op_desc_original_id -from paddle.distributed.auto_parallel.utils import naive_set_dist_op_attr_for_program_by_mesh_and_mapping +from paddle.distributed.auto_parallel.dist_attribute import ( + OperatorDistributedAttribute, +) +from paddle.distributed.auto_parallel.utils import ( + get_loss_op, + set_var_dist_attr, + set_dist_op_desc_original_id, +) +from paddle.distributed.auto_parallel.utils import ( + naive_set_dist_op_attr_for_program_by_mesh_and_mapping, +) class RecomputeState(ProgramStats): - def __init__(self, block, ops): super(RecomputeState, self).__init__(block=block, ops=ops) self._block = block @@ -52,7 +59,7 @@ class RecomputeState(ProgramStats): self.var_op_deps[name]["var_as_output_ops"] = [i] def get_recompute_segments(self, checkpoints): - """ get recompute segments from checkpoints """ + """get recompute segments from checkpoints""" segments = [] start_idx = -1 pre_segment_end_idx = -1 @@ -67,27 +74,37 @@ class RecomputeState(ProgramStats): segments.append([0, max(op_idx_list) + 1]) else: flag, min_idx, max_idx = self.is_subgraph( - [checkpoints[start_idx]], [checkpoints[start_idx + 1]]) + [checkpoints[start_idx]], [checkpoints[start_idx + 1]] + ) if flag: min_idx = self._update_segment_start( - min_idx, pre_segment_end_idx) + min_idx, pre_segment_end_idx + ) segments.append([min_idx, max_idx + 1]) else: logging.info( "Could not recompute op range [{}] - [{}] ".format( - min_idx, max_idx + 1)) + min_idx, max_idx + 1 + ) + ) start_idx += 1 for i, (idx1, idx2) in enumerate(segments): logging.info("recompute segment[{}]".format(i)) - logging.info("segment start op: [{}]: [{}] [{}]".format( - self._ops[idx1].desc.type(), - self._ops[idx1].desc.input_arg_names(), - self._ops[idx1].desc.output_arg_names())) - logging.info("segment end op: [{}]: [{}] [{}]".format( - self._ops[idx2 - 1].desc.type(), - self._ops[idx2 - 1].desc.input_arg_names(), - self._ops[idx2 - 1].desc.output_arg_names())) + logging.info( + "segment start op: [{}]: [{}] [{}]".format( + self._ops[idx1].desc.type(), + self._ops[idx1].desc.input_arg_names(), + self._ops[idx1].desc.output_arg_names(), + ) + ) + logging.info( + "segment end op: [{}]: [{}] [{}]".format( + self._ops[idx2 - 1].desc.type(), + self._ops[idx2 - 1].desc.input_arg_names(), + self._ops[idx2 - 1].desc.output_arg_names(), + ) + ) return segments @@ -115,44 +132,49 @@ class RecomputeState(ProgramStats): cur_op_dist_attr = dist_context.get_op_dist_attr_for_program(cur_op) # insert seed op to guarantee that two dropout op have the same outputs op_unique_name = unique_name.generate("seed") - var_unique_name = unique_name.generate_with_ignorable_key(".".join( - [op_unique_name, 'tmp'])) + var_unique_name = unique_name.generate_with_ignorable_key( + ".".join([op_unique_name, 'tmp']) + ) seed_var = self._block.create_var( name=var_unique_name, dtype='int32', type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=False) + stop_gradient=False, + ) # set new seed_var's dist_attr ref_dims_mapping = [-1] ref_process_mesh = cur_op_dist_attr.process_mesh - seed_var_dist_attr = set_var_dist_attr(dist_context, seed_var, - ref_dims_mapping, - ref_process_mesh) - - seed = 0 if cur_op.attr("fix_seed") is False else int( - cur_op.attr("seed")) + seed_var_dist_attr = set_var_dist_attr( + dist_context, seed_var, ref_dims_mapping, ref_process_mesh + ) + + seed = ( + 0 + if cur_op.attr("fix_seed") is False + else int(cur_op.attr("seed")) + ) seed_op = self._block._insert_op_without_sync( index=cur_op.idx, type="seed", inputs={}, outputs={"Out": seed_var}, - attrs={ - "seed": seed, - "force_cpu": True - }) + attrs={"seed": seed, "force_cpu": True}, + ) # set new seed op's dist_attr naive_set_dist_op_attr_for_program_by_mesh_and_mapping( - seed_op, ref_process_mesh, ref_dims_mapping, dist_context) + seed_op, ref_process_mesh, ref_dims_mapping, dist_context + ) # modify dropout op's desc self._ops.insert(op_idx, seed_op) cur_op.desc.set_input("Seed", [var_unique_name]) cur_op._remove_attr("fix_seed") cur_op._remove_attr("seed") - cur_op_dist_attr.set_input_dist_attr(seed_var.name, - seed_var_dist_attr) + cur_op_dist_attr.set_input_dist_attr( + seed_var.name, seed_var_dist_attr + ) op_idx += 2 self._block._sync_with_cpp() @@ -166,7 +188,7 @@ def _find_op_index(block, cur_op): def _get_stop_gradients(program, no_grad_set): - """ get no grad var """ + """get no grad var""" if no_grad_set is None: no_grad_set = set() else: @@ -183,8 +205,9 @@ def _get_stop_gradients(program, no_grad_set): return no_grad_set_name -def _add_needed_descs_to_block(descs, block, main_block, in_memory_vars, - dist_context): +def _add_needed_descs_to_block( + descs, block, main_block, in_memory_vars, dist_context +): """ Get the recomputed ops which will insert the backward part """ @@ -215,7 +238,6 @@ def _add_needed_descs_to_block(descs, block, main_block, in_memory_vars, @register_pass("auto_parallel_recompute") class RecomputePass(PassBase): - def __init__(self): super(RecomputePass, self).__init__() self.set_attr("checkpoints", None) @@ -259,12 +281,15 @@ class RecomputePass(PassBase): vars_should_be_hold = [] for segment in segments: vars_should_be_hold.extend( - rc_state.get_out_of_subgraph_vars(segment[0], segment[1])) + rc_state.get_out_of_subgraph_vars(segment[0], segment[1]) + ) cross_vars = set(vars_should_be_hold) - set(checkpoints) logging.info( "found [{}] vars which cross recompute segment: [{}]," "better checkpoints might be set to reduce those vars".format( - len(cross_vars), cross_vars)) + len(cross_vars), cross_vars + ) + ) vars_should_be_hold.extend(rc_state.get_reserved_vars()) vars_should_be_hold.extend(rc_state.get_input_nodes()) vars_should_be_hold = list(set(vars_should_be_hold)) @@ -275,14 +300,15 @@ class RecomputePass(PassBase): ckpt_ops_dict = {} buffer_block = main_block.program._create_block() for i, segment in enumerate(segments[::-1]): - fwd_ops = op_path[segment[0]:segment[1]] + fwd_ops = op_path[segment[0] : segment[1]] var_suffix = ".subprog_%d" % i for op in fwd_ops: input_and_output_names = [] input_and_output_names.extend(op.desc.input_arg_names()) input_and_output_names.extend(op.desc.output_arg_names()) - cur_op_dist_attr = self._dist_context.get_op_dist_attr_for_program( - op) + cur_op_dist_attr = ( + self._dist_context.get_op_dist_attr_for_program(op) + ) assert cur_op_dist_attr is not None for name in input_and_output_names: if main_block.var(name).persistable or name in checkpoints: @@ -292,11 +318,13 @@ class RecomputePass(PassBase): if name not in var_name_dict: ref_process_mesh = cur_op_dist_attr.process_mesh if name in op.desc.input_arg_names(): - ref_dims_mapping = cur_op_dist_attr.get_input_dims_mapping( - name) + ref_dims_mapping = ( + cur_op_dist_attr.get_input_dims_mapping(name) + ) else: - ref_dims_mapping = cur_op_dist_attr.get_output_dims_mapping( - name) + ref_dims_mapping = ( + cur_op_dist_attr.get_output_dims_mapping(name) + ) # record recomputed var's old_name and new_name (old_name.subprog_XXX) # create new var with new name var_name_dict[name] = name + var_suffix @@ -307,15 +335,23 @@ class RecomputePass(PassBase): dtype=ref_var.dtype, type=ref_var.type, persistable=ref_var.persistable, - stop_gradient=ref_var.stop_gradient) + stop_gradient=ref_var.stop_gradient, + ) # set new recomputed var's dist attr - set_var_dist_attr(self._dist_context, rc_var, - ref_dims_mapping, ref_process_mesh) + set_var_dist_attr( + self._dist_context, + rc_var, + ref_dims_mapping, + ref_process_mesh, + ) # get recomputed segment's descs - segment_descs = _add_needed_descs_to_block(fwd_ops, buffer_block, - main_block, - vars_in_memory, - self._dist_context) + segment_descs = _add_needed_descs_to_block( + fwd_ops, + buffer_block, + main_block, + vars_in_memory, + self._dist_context, + ) # rename recomputed ops' input and output var name for key in var_name_dict: _rename_arg_(segment_descs, key, var_name_dict[key]) @@ -343,7 +379,10 @@ class RecomputePass(PassBase): # rename grad op's var_name which is not in 'vars_in_memory' for key in var_name_dict: - if key not in grad_op.input_arg_names + grad_op.output_arg_names: + if ( + key + not in grad_op.input_arg_names + grad_op.output_arg_names + ): continue self.reset_op_dist_attr(grad_op, var_name_dict) _rename_arg_([grad_op.desc], key, var_name_dict[key]) @@ -358,17 +397,20 @@ class RecomputePass(PassBase): idx -= 1 segment_descs = ckpt_ops_dict[fwd_op_id][1] for _, op_desc in reversed(list(enumerate(segment_descs))): - rc_op = main_block._insert_op_without_sync(idx, - type='nop') + rc_op = main_block._insert_op_without_sync( + idx, type='nop' + ) rc_desc = rc_op.desc rc_desc.copy_from(op_desc) rc_desc.set_original_id(rc_desc.id()) # set recomputed ops' dist attr fwd_op_dist_attr = self._dist_context.get_op_dist_attr_for_program_with_id( - op_desc.original_id()) + op_desc.original_id() + ) assert fwd_op_dist_attr is not None - self.set_op_dist_attr(rc_op, fwd_op_dist_attr, - var_name_dict) + self.set_op_dist_attr( + rc_op, fwd_op_dist_attr, var_name_dict + ) ckpt_ops_dict[fwd_op_id][0] = False @@ -380,13 +422,15 @@ class RecomputePass(PassBase): for input in op.desc.input_arg_names(): if input in var_name_dict.keys(): in_dist_attr = op_dist_attr.get_input_dist_attr(input) - op_dist_attr.set_input_dist_attr(var_name_dict[input], - in_dist_attr) + op_dist_attr.set_input_dist_attr( + var_name_dict[input], in_dist_attr + ) for output in op.desc.output_arg_names(): if output in var_name_dict.keys(): out_dist_attr = op_dist_attr.get_output_dist_attr(output) - op_dist_attr.set_output_dist_attr(var_name_dict[output], - out_dist_attr) + op_dist_attr.set_output_dist_attr( + var_name_dict[output], out_dist_attr + ) def set_op_dist_attr(self, op, old_dist_attr, var_name_dict): new_dist_attr = OperatorDistributedAttribute() @@ -397,16 +441,18 @@ class RecomputePass(PassBase): for input in old_dist_attr.inputs_dist_attrs.keys(): if input in var_name_dict.keys(): in_dist_attr = old_dist_attr.inputs_dist_attrs[input] - new_dist_attr.set_input_dist_attr(var_name_dict[input], - in_dist_attr) + new_dist_attr.set_input_dist_attr( + var_name_dict[input], in_dist_attr + ) else: in_dist_attr = old_dist_attr.inputs_dist_attrs[input] new_dist_attr.set_input_dist_attr(input, in_dist_attr) for output in old_dist_attr.outputs_dist_attrs.keys(): if output in var_name_dict.keys(): out_dist_attr = old_dist_attr.outputs_dist_attrs[output] - new_dist_attr.set_output_dist_attr(var_name_dict[output], - out_dist_attr) + new_dist_attr.set_output_dist_attr( + var_name_dict[output], out_dist_attr + ) else: out_dist_attr = old_dist_attr.outputs_dist_attrs[output] new_dist_attr.set_output_dist_attr(output, out_dist_attr) diff --git a/python/paddle/distributed/passes/auto_parallel_sharding.py b/python/paddle/distributed/passes/auto_parallel_sharding.py index 171188618efe2b5920a8c7d78694be42b0c686c9..51262506598662c9c24daa5da59ce22350ad8cab 100644 --- a/python/paddle/distributed/passes/auto_parallel_sharding.py +++ b/python/paddle/distributed/passes/auto_parallel_sharding.py @@ -17,27 +17,51 @@ from functools import reduce from paddle.framework import core from paddle.fluid import unique_name from .pass_base import PassBase, register_pass -from paddle.distributed.fleet.meta_optimizers.common import is_backward_op, is_optimizer_op +from paddle.distributed.fleet.meta_optimizers.common import ( + is_backward_op, + is_optimizer_op, +) from paddle.distributed.auto_parallel.process_group import new_process_group -from paddle.distributed.auto_parallel.operators.common import is_parameter_related, is_data_parallel_reduce_op -from paddle.distributed.auto_parallel.utils import _get_comm_group, naive_set_dist_op_attr_for_program_by_mesh_and_mapping, set_var_dist_attr +from paddle.distributed.auto_parallel.operators.common import ( + is_parameter_related, + is_data_parallel_reduce_op, +) +from paddle.distributed.auto_parallel.utils import ( + _get_comm_group, + naive_set_dist_op_attr_for_program_by_mesh_and_mapping, + set_var_dist_attr, +) OpRole = core.op_proto_and_checker_maker.OpRole OP_ROLE_KEY = core.op_proto_and_checker_maker.kOpRoleAttrName() _skip_ops = [ - 'create_py_reader', 'create_double_buffer_reader', 'read', 'slice', 'split', - 'assign', "send_v2" + 'create_py_reader', + 'create_double_buffer_reader', + 'read', + 'slice', + 'split', + 'assign', + "send_v2", ] # update here to support new optimizers _supported_optimizer_type = [ - "adam", "adamax", "adamw", "decayed_adagrad", "momentum", "dgc_momentum", - "lars_momentum", "merged_momentum", "lamb", "sgd" + "adam", + "adamax", + "adamw", + "decayed_adagrad", + "momentum", + "dgc_momentum", + "lars_momentum", + "merged_momentum", + "lamb", + "sgd", ] def _is_reshard_op(op): - return op.desc.has_attr("op_namescope") and \ - "/auto_parallel/reshard" in op.desc.attr('op_namescope') + return op.desc.has_attr( + "op_namescope" + ) and "/auto_parallel/reshard" in op.desc.attr('op_namescope') # NOTE we add the "auto_parallel" prefix to the pass in order to @@ -46,7 +70,6 @@ def _is_reshard_op(op): # should use dist op instead of custom comm op @register_pass("auto_parallel_sharding") class ShardingPass(PassBase): - def __init__(self): super(ShardingPass, self).__init__() self.set_attr("dist_context", None) @@ -69,19 +92,22 @@ class ShardingPass(PassBase): if self.get_attr("stage") not in [1, 2, 3]: return False if self.get_attr("sharding_degree") is not None: - if (not isinstance(self.get_attr("sharding_degree"), int)) \ - or self.get_attr("sharding_degree") <= 1: + if ( + not isinstance(self.get_attr("sharding_degree"), int) + ) or self.get_attr("sharding_degree") <= 1: return False elif self.get_attr("degree") is not None: - if (not isinstance(self.get_attr("degree"), int)) \ - or self.get_attr("degree") <= 1: + if (not isinstance(self.get_attr("degree"), int)) or self.get_attr( + "degree" + ) <= 1: return False else: return False if len(self.get_attr("params_grads")) <= 0: return False - if (not isinstance(self.get_attr("global_rank"), - int)) or self.get_attr("global_rank") < 0: + if (not isinstance(self.get_attr("global_rank"), int)) or self.get_attr( + "global_rank" + ) < 0: return False return True @@ -92,12 +118,15 @@ class ShardingPass(PassBase): def _apply_single_impl(self, main_program, startup_program, context): self._dist_context = self.get_attr("dist_context") self.sharding_world_size = int( - self.get_attr("sharding_degree") or self.get_attr("degree")) + self.get_attr("sharding_degree") or self.get_attr("degree") + ) self.stage = int(self.get_attr("stage")) self.global_rank = int(self.get_attr("global_rank")) params_grads = self.get_attr("params_grads") - main_block, startup_block = main_program.global_block( - ), startup_program.global_block() + main_block, startup_block = ( + main_program.global_block(), + startup_program.global_block(), + ) self._build_sharding_groups(main_block, params_grads) self._shard_optimizer(main_block, startup_block, params_grads, context) @@ -119,7 +148,8 @@ class ShardingPass(PassBase): if _is_reshard_op(op): continue group = _inference_data_parallel_group_for_operator( - self.global_rank, op, self._dist_context) + self.global_rank, op, self._dist_context + ) if group is not None: self.dp_groups.add(group) @@ -127,32 +157,45 @@ class ShardingPass(PassBase): # genetated by auto search if len(self.dp_groups) != 1: raise NotImplementedError( - "So far Only and Exactly one data parallel group in network are supported, but got [{}] different data parallel groups" - .format(len(self.dp_groups))) + "So far Only and Exactly one data parallel group in network are supported, but got [{}] different data parallel groups".format( + len(self.dp_groups) + ) + ) def _build_sharding_infos(self, params_grads): for dp_group in self.dp_groups: - assert dp_group.nranks >= self.sharding_world_size, "sharding world size [{}] should not larger than dp world size [{}]".format( - self.sharding_world_size, dp_group.nranks) - assert dp_group.nranks % self.sharding_world_size == 0, "sharding world size [{}] should be divisible by dp world size [{}]".format( - self.sharding_world_size, dp_group.nranks) - assert self.global_rank in dp_group.ranks, "current ranks [{}] does NOT belong to the data parallel group [{}]".format( - self.global_rank, dp_group.ranks) - assert len( - params_grads - ) >= self.sharding_world_size, "number of parameters [{}] is not enough to be shard among [{}] ranks".format( - len(params_grads), self.sharding_world_size) + assert ( + dp_group.nranks >= self.sharding_world_size + ), "sharding world size [{}] should not larger than dp world size [{}]".format( + self.sharding_world_size, dp_group.nranks + ) + assert ( + dp_group.nranks % self.sharding_world_size == 0 + ), "sharding world size [{}] should be divisible by dp world size [{}]".format( + self.sharding_world_size, dp_group.nranks + ) + assert ( + self.global_rank in dp_group.ranks + ), "current ranks [{}] does NOT belong to the data parallel group [{}]".format( + self.global_rank, dp_group.ranks + ) + assert ( + len(params_grads) >= self.sharding_world_size + ), "number of parameters [{}] is not enough to be shard among [{}] ranks".format( + len(params_grads), self.sharding_world_size + ) # sharding hybrid data parallel: partial sharding param within if dp_group.nranks > self.sharding_world_size: self.partial_sharding = True - assert len( - self.dp_groups - ) == 1, "hybrid sharding and data parallelism are supported only when there is excatly one data parallel group in the network" + assert ( + len(self.dp_groups) == 1 + ), "hybrid sharding and data parallelism are supported only when there is excatly one data parallel group in the network" outer_dp_group, sharding_group = _get_dp_and_sharding_groups( - dp_group.ranks, self.sharding_world_size, self.global_rank) + dp_group.ranks, self.sharding_world_size, self.global_rank + ) sharding_group = new_process_group(sharding_group) self.outer_dp_group = new_process_group(outer_dp_group) else: @@ -160,14 +203,16 @@ class ShardingPass(PassBase): self._dist_context._sharding_group = sharding_group # TODO(JZ-LIANG) when support multiple dp groups in future, should group param and bind them to corresponding dp group - sharding_info = ShardingInfo(sharding_group, self.global_rank, - params_grads) + sharding_info = ShardingInfo( + sharding_group, self.global_rank, params_grads + ) self.sharding_infos.append(sharding_info) for param in sharding_info.params: self.varname_to_sharding_info[param.name] = sharding_info - def _shard_optimizer(self, main_block, startup_block, params_grads, - pass_context): + def _shard_optimizer( + self, main_block, startup_block, params_grads, pass_context + ): """ sharding all optimizer related ops and vars, include: gradient clip ops & vars @@ -189,7 +234,7 @@ class ShardingPass(PassBase): # shard amp related param_grad cast if _is_param_grad_fp32_cast_op(main_block, op): output_name = op.output_arg_names[0] - param_name = output_name[:output_name.find("@")] + param_name = output_name[: output_name.find("@")] if not self._is_parameter_in_local_shard(param_name): main_block._remove_op(idx, sync=False) main_block._remove_var(output_name, sync=False) @@ -198,7 +243,7 @@ class ShardingPass(PassBase): elif op.type in ["check_finite_and_unscale", "update_loss_scaling"]: reversed_x = [] for input_name in op.desc.input('X'): - param_name = input_name[:input_name.find("@")] + param_name = input_name[: input_name.find("@")] if self._is_parameter_in_local_shard(param_name): reversed_x.append(input_name) @@ -223,7 +268,8 @@ class ShardingPass(PassBase): "dtype": out_var.dtype, "value": 0, OP_ROLE_KEY: op_role, - }) + }, + ) else: main_block._remove_op(idx, sync=False) @@ -245,7 +291,7 @@ class ShardingPass(PassBase): if op.type in removed_op_type: input_name = op.input("X")[0] - param_name = input_name[:input_name.find("@GRAD")] + param_name = input_name[: input_name.find("@GRAD")] if not self._is_parameter_in_local_shard(param_name): removed_op_idx.add(idx) if op.type in ['squared_l2_norm', 'clip_by_norm']: @@ -283,9 +329,13 @@ class ShardingPass(PassBase): 'op_namescope': "/gradient_clip_model_parallelism", 'use_calc_stream': True, OP_ROLE_KEY: OpRole.Optimize, - }) - dist_attr = self._dist_context.get_tensor_dist_attr_for_program( - main_block.var(sum_op_output)) + }, + ) + dist_attr = ( + self._dist_context.get_tensor_dist_attr_for_program( + main_block.var(sum_op_output) + ) + ) # assert dist_attr is not None # naive_set_dist_op_attr_for_program_by_mesh_and_mapping( # new_op, dist_attr.process_mesh, dist_attr.dims_mapping, @@ -304,7 +354,8 @@ class ShardingPass(PassBase): continue else: raise NotImplementedError( - "weight decay is NOT supported by now") + "weight decay is NOT supported by now" + ) main_block._sync_with_cpp() def _shard_optimizer_ops_and_states(self, main_block, startup_block): @@ -319,18 +370,24 @@ class ShardingPass(PassBase): assert len(op.input("Param")) == 1 param_name = op.input("Param")[0] if not self._is_parameter_in_local_shard(param_name): - should_removed_optimizer_states.extend([ - varname for varname in op.output_arg_names - if varname != param_name - ]) + should_removed_optimizer_states.extend( + [ + varname + for varname in op.output_arg_names + if varname != param_name + ] + ) main_block._remove_op(idx, sync=False) else: self.shared_params_grads.append( - self._get_param_grad(param_name)) + self._get_param_grad(param_name) + ) for idx, op in reversed(list(enumerate(startup_block.ops))): - if len(op.output_arg_names) == 1 and op.output_arg_names[ - 0] in should_removed_optimizer_states: + if ( + len(op.output_arg_names) == 1 + and op.output_arg_names[0] in should_removed_optimizer_states + ): startup_block._remove_op(idx, sync=False) for varname in should_removed_optimizer_states: @@ -352,26 +409,27 @@ class ShardingPass(PassBase): assert main_block.has_var(param.name) assert startup_block.has_var(param.name) - new_op = main_block.append_op(type='c_broadcast', - inputs={'X': param}, - outputs={'Out': param}, - attrs={ - 'ring_id': - sharding_info.group.id, - 'root': - sharding_info.get_var_rank( - param.name), - 'use_calc_stream': - True, - OP_ROLE_KEY: - OpRole.Optimize - }) - param_dist_attr = self._dist_context.get_tensor_dist_attr_for_program( - param) + new_op = main_block.append_op( + type='c_broadcast', + inputs={'X': param}, + outputs={'Out': param}, + attrs={ + 'ring_id': sharding_info.group.id, + 'root': sharding_info.get_var_rank(param.name), + 'use_calc_stream': True, + OP_ROLE_KEY: OpRole.Optimize, + }, + ) + param_dist_attr = ( + self._dist_context.get_tensor_dist_attr_for_program(param) + ) assert param_dist_attr is not None naive_set_dist_op_attr_for_program_by_mesh_and_mapping( - new_op, param_dist_attr.process_mesh, - param_dist_attr.dims_mapping, self._dist_context) + new_op, + param_dist_attr.process_mesh, + param_dist_attr.dims_mapping, + self._dist_context, + ) main_block._sync_with_cpp() def _is_parameter_in_local_shard(self, param_name): @@ -397,12 +455,18 @@ class ShardingPass(PassBase): input_name = op.input_arg_names[0] base_name = _get_base_name_from_grad_name(input_name) sharding_info = self.varname_to_sharding_info[base_name] - _insert_reduce_op(main_block, idx, input_name, - sharding_info.group.id, - sharding_info.get_var_rank(base_name), - self._dist_context) - if not self.partial_sharding or not sharding_info.is_in_local_shard( - base_name): + _insert_reduce_op( + main_block, + idx, + input_name, + sharding_info.group.id, + sharding_info.get_var_rank(base_name), + self._dist_context, + ) + if ( + not self.partial_sharding + or not sharding_info.is_in_local_shard(base_name) + ): main_block._remove_op(idx + 1, sync=False) else: op._set_attr("ring_id", self.outer_dp_group.id) @@ -427,12 +491,17 @@ class ShardingPass(PassBase): dp_ring_ids = [group.id for group in self.dp_groups] for sharding_info in self.sharding_infos: - need_broadcast_vars, param_usage = sharding_info.get_broadcast_vars_and_param_usage( - main_block) + ( + need_broadcast_vars, + param_usage, + ) = sharding_info.get_broadcast_vars_and_param_usage(main_block) not_used_param_nane = [] for param_name in param_usage: - if param_usage[param_name] == 0 and sharding_info.get_var_rank( - param_name) != sharding_info.local_rank: + if ( + param_usage[param_name] == 0 + and sharding_info.get_var_rank(param_name) + != sharding_info.local_rank + ): not_used_param_nane.append(param_name) for idx, op in reversed(list(enumerate(main_block.ops))): @@ -442,8 +511,9 @@ class ShardingPass(PassBase): for input_name in op.desc.input_arg_names(): # NOTE hack for embedding op when AMP 02-3 # paddle amp force embedding (lookup table) to be run on fp32 - if _is_param_fp16_cast_op(main_block, op, - sharding_info.param_names): + if _is_param_fp16_cast_op( + main_block, op, sharding_info.param_names + ): continue if input_name not in need_broadcast_vars: continue @@ -451,28 +521,39 @@ class ShardingPass(PassBase): if root_rank == sharding_info.local_rank: broadcast_varname = input_name else: - broadcast_varname = unique_name.generate(input_name + - "@BroadCast") + broadcast_varname = unique_name.generate( + input_name + "@BroadCast" + ) input_var = main_block.var(input_name) - new_var = main_block.create_var(name=broadcast_varname, - shape=input_var.shape, - dtype=input_var.dtype, - persistable=False) - ref_dist_attr = self._dist_context.get_tensor_dist_attr_for_program( - input_var) + new_var = main_block.create_var( + name=broadcast_varname, + shape=input_var.shape, + dtype=input_var.dtype, + persistable=False, + ) + ref_dist_attr = ( + self._dist_context.get_tensor_dist_attr_for_program( + input_var + ) + ) out_var_dist_attr = set_var_dist_attr( - self._dist_context, new_var, + self._dist_context, + new_var, ref_dist_attr.dims_mapping, - ref_dist_attr.process_mesh) + ref_dist_attr.process_mesh, + ) op._rename_input(input_name, broadcast_varname) - _insert_init_and_broadcast_op(main_block, idx, - broadcast_varname, - sharding_info.local_rank, - root_rank, - sharding_info.group.id, - op.attr('op_role'), - self._dist_context) + _insert_init_and_broadcast_op( + main_block, + idx, + broadcast_varname, + sharding_info.local_rank, + root_rank, + sharding_info.group.id, + op.attr('op_role'), + self._dist_context, + ) for idx, op in reversed(list(enumerate(main_block.ops))): if op.type != "cast": @@ -487,22 +568,33 @@ class ShardingPass(PassBase): assert len(op.output_arg_names) == 1 output_name = op.output_arg_names[0] - if op.type == "c_broadcast" and op.attr( - "ring_id") in dp_ring_ids: - if self.outer_dp_group and sharding_info.get_var_rank( - output_name) == sharding_info.local_rank: + if ( + op.type == "c_broadcast" + and op.attr("ring_id") in dp_ring_ids + ): + if ( + self.outer_dp_group + and sharding_info.get_var_rank(output_name) + == sharding_info.local_rank + ): op._set_attr("ring_id", self.outer_dp_group.id) else: startup_block._remove_op(idx, sync=False) continue - if op.type != "c_broadcast" and output_name in param_usage and sharding_info.get_var_rank( - output_name) != sharding_info.local_rank: + if ( + op.type != "c_broadcast" + and output_name in param_usage + and sharding_info.get_var_rank(output_name) + != sharding_info.local_rank + ): startup_block._remove_op(idx, sync=False) for param_name in param_usage: - if sharding_info.get_var_rank( - param_name) != sharding_info.local_rank: + if ( + sharding_info.get_var_rank(param_name) + != sharding_info.local_rank + ): main_block._remove_var(param_name, sync=False) startup_block._remove_var(param_name, sync=False) @@ -510,28 +602,42 @@ class ShardingPass(PassBase): startup_block._sync_with_cpp() -def _insert_init_and_broadcast_op(block, insert_idx, varname, local_rank, - root_rank, ring_id, op_role, dist_context): +def _insert_init_and_broadcast_op( + block, + insert_idx, + varname, + local_rank, + root_rank, + ring_id, + op_role, + dist_context, +): """ empty op for initialization """ broadcast_var = block.var(varname) broadcast_var_dist_attr = dist_context.get_tensor_dist_attr_for_program( - broadcast_var) - - new_op = block._insert_op_without_sync(insert_idx, - type='c_broadcast', - inputs={'X': varname}, - outputs={'Out': varname}, - attrs={ - 'ring_id': ring_id, - 'root': root_rank, - 'use_calc_stream': True, - OP_ROLE_KEY: op_role - }) + broadcast_var + ) + + new_op = block._insert_op_without_sync( + insert_idx, + type='c_broadcast', + inputs={'X': varname}, + outputs={'Out': varname}, + attrs={ + 'ring_id': ring_id, + 'root': root_rank, + 'use_calc_stream': True, + OP_ROLE_KEY: op_role, + }, + ) naive_set_dist_op_attr_for_program_by_mesh_and_mapping( - new_op, broadcast_var_dist_attr.process_mesh, - broadcast_var_dist_attr.dims_mapping, dist_context) + new_op, + broadcast_var_dist_attr.process_mesh, + broadcast_var_dist_attr.dims_mapping, + dist_context, + ) if local_rank != root_rank: new_op = block._insert_op_without_sync( @@ -541,40 +647,50 @@ def _insert_init_and_broadcast_op(block, insert_idx, varname, local_rank, attrs={ "shape": broadcast_var.shape, "dtype": broadcast_var.dtype, - OP_ROLE_KEY: op_role - }) + OP_ROLE_KEY: op_role, + }, + ) naive_set_dist_op_attr_for_program_by_mesh_and_mapping( - new_op, broadcast_var_dist_attr.process_mesh, - broadcast_var_dist_attr.dims_mapping, dist_context) + new_op, + broadcast_var_dist_attr.process_mesh, + broadcast_var_dist_attr.dims_mapping, + dist_context, + ) return -def _insert_reduce_op(block, - insert_idx, - reduce_var, - ring_id, - root_id, - dist_context, - op_role=OpRole.Backward, - use_calc_stream=True): - assert root_id >= 0, "root id should be a positive int, but now root id is {}".format( - root_id) - new_op = block._insert_op_without_sync(insert_idx, - type='c_reduce_sum', - inputs={'X': [reduce_var]}, - outputs={'Out': [reduce_var]}, - attrs={ - 'ring_id': ring_id, - 'root_id': root_id, - 'use_calc_stream': - use_calc_stream, - OP_ROLE_KEY: op_role - }) +def _insert_reduce_op( + block, + insert_idx, + reduce_var, + ring_id, + root_id, + dist_context, + op_role=OpRole.Backward, + use_calc_stream=True, +): + assert ( + root_id >= 0 + ), "root id should be a positive int, but now root id is {}".format(root_id) + new_op = block._insert_op_without_sync( + insert_idx, + type='c_reduce_sum', + inputs={'X': [reduce_var]}, + outputs={'Out': [reduce_var]}, + attrs={ + 'ring_id': ring_id, + 'root_id': root_id, + 'use_calc_stream': use_calc_stream, + OP_ROLE_KEY: op_role, + }, + ) dist_attr = dist_context.get_tensor_dist_attr_for_program( - block.var(reduce_var)) + block.var(reduce_var) + ) naive_set_dist_op_attr_for_program_by_mesh_and_mapping( - new_op, dist_attr.process_mesh, dist_attr.dims_mapping, dist_context) + new_op, dist_attr.process_mesh, dist_attr.dims_mapping, dist_context + ) def _get_dp_and_sharding_groups(origin_group, sharding_group_size, rank): @@ -589,23 +705,26 @@ def _get_dp_and_sharding_groups(origin_group, sharding_group_size, rank): def _is_gradient_clip_op(op): - return op.desc.has_attr("op_namescope") \ - and op.desc.attr("op_namescope").startswith("/gradient_clip") + return op.desc.has_attr("op_namescope") and op.desc.attr( + "op_namescope" + ).startswith("/gradient_clip") def _is_weight_decay_op(op): - return op.desc.has_attr("op_namescope") \ - and op.desc.attr("op_namescope").startswith("/regularization") + return op.desc.has_attr("op_namescope") and op.desc.attr( + "op_namescope" + ).startswith("/regularization") def _is_param_grad_fp32_cast_op(block, op): if not is_backward_op(op): return False - if not _is_desired_cast_op(block, op, core.VarDesc.VarType.FP16, - core.VarDesc.VarType.FP32): + if not _is_desired_cast_op( + block, op, core.VarDesc.VarType.FP16, core.VarDesc.VarType.FP32 + ): return False output_name = op.desc.output_arg_names()[0] - base_name = output_name[:output_name.find("@")] + base_name = output_name[: output_name.find("@")] if not block.has_var(base_name): return False return block.var(base_name).is_parameter @@ -623,19 +742,20 @@ def _is_param_fp16_cast_op(block, op, params): return True -def _is_desired_cast_op(block, - op, - src_var_type=core.VarDesc.VarType.FP32, - dst_var_type=core.VarDesc.VarType.FP16): +def _is_desired_cast_op( + block, + op, + src_var_type=core.VarDesc.VarType.FP32, + dst_var_type=core.VarDesc.VarType.FP16, +): if op.type != "cast": return False - assert (len(op.desc.input_arg_names()) == 1) - assert (len(op.desc.output_arg_names()) == 1) + assert len(op.desc.input_arg_names()) == 1 + assert len(op.desc.output_arg_names()) == 1 input_var = block.var(op.desc.input_arg_names()[0]) output_var = block.var(op.desc.output_arg_names()[0]) - if input_var.dtype != src_var_type or \ - output_var.dtype != dst_var_type: + if input_var.dtype != src_var_type or output_var.dtype != dst_var_type: return False return True @@ -644,9 +764,9 @@ def _is_desired_cast_op(block, def _get_base_name_from_grad_name(grad_name): base_name = None if ".cast_fp16@GRAD" in grad_name: - base_name = grad_name[:grad_name.find(".cast_fp16@GRAD")] + base_name = grad_name[: grad_name.find(".cast_fp16@GRAD")] elif "@GRAD" in grad_name: - base_name = grad_name[:grad_name.find("@GRAD")] + base_name = grad_name[: grad_name.find("@GRAD")] return base_name @@ -696,9 +816,12 @@ def _inference_data_parallel_group_for_operator(rank_id, op, dist_context): # TODO(JZ-LIANG) replace with specific batch size dimension batch_size_axis = input_dim_mapping[0] if batch_size_axis > -1 and mesh_shape[batch_size_axis] > 1: - group_ranks = _get_comm_group(process_mesh.processes, - process_mesh.topology, - batch_size_axis, rank_id) + group_ranks = _get_comm_group( + process_mesh.processes, + process_mesh.topology, + batch_size_axis, + rank_id, + ) dp_group = new_process_group(group_ranks) break @@ -717,20 +840,23 @@ def shard_parameters(params, group_size): rank = sizes.index(min(sizes)) mapping[rank].append(param) numel = reduce(lambda x, y: x * y, param.shape) - assert numel > 0, "param [{}] should larger than 0, but it is [{}]".format( - param.name, numel) + assert ( + numel > 0 + ), "param [{}] should larger than 0, but it is [{}]".format( + param.name, numel + ) sizes[rank] += numel return mapping class ShardingInfo(object): - def __init__(self, group, rank, params_grads): self.group = group self.params_grads = dict([(p.name, (p, g)) for p, g in params_grads]) - assert len(self.params_grads) == len(set( - self.params_grads)), "found duplicated param in params_grads" + assert len(self.params_grads) == len( + set(self.params_grads) + ), "found duplicated param in params_grads" self.params = [p for p, _ in params_grads] self.param_names = [p.name for p in self.params] @@ -796,7 +922,8 @@ class ShardingInfo(object): def get_param_grad(self, param_name): if not self.is_in_local_shard(param_name): raise ValueError( - "param[{}] not in current rank.".format(param_name)) + "param[{}] not in current rank.".format(param_name) + ) if param_name not in self.params_grads: raise ValueError('param[{}] not in params_grads'.format(param_name)) return self.params_grads.get(param_name, None) diff --git a/python/paddle/distributed/passes/cpp_pass.py b/python/paddle/distributed/passes/cpp_pass.py index c729a919c1a33719855af19ba11d0563bea28226..17d54186dadb84f4f68f71732d3da72c54edd135 100644 --- a/python/paddle/distributed/passes/cpp_pass.py +++ b/python/paddle/distributed/passes/cpp_pass.py @@ -19,7 +19,6 @@ from paddle.fluid.framework import core, _apply_pass as _apply_cpp_pass @register_pass("fuse_elewise_add_act") class FuseElementwiseAddActPass(CPPPassWrapper): - def __init__(self): super(FuseElementwiseAddActPass, self).__init__() @@ -33,7 +32,6 @@ class FuseElementwiseAddActPass(CPPPassWrapper): @register_pass("fuse_bn_act") class FuseBatchNormActPass(CPPPassWrapper): - def __init__(self): super(FuseBatchNormActPass, self).__init__() @@ -47,7 +45,6 @@ class FuseBatchNormActPass(CPPPassWrapper): @register_pass("fuse_bn_add_act") class FuseBatchNormAddActPass(CPPPassWrapper): - def __init__(self): super(FuseBatchNormAddActPass, self).__init__() @@ -61,7 +58,6 @@ class FuseBatchNormAddActPass(CPPPassWrapper): @register_pass("fuse_relu_depthwise_conv") class FuseReluDepthwiseConvPass(CPPPassWrapper): - def __init__(self): super(FuseReluDepthwiseConvPass, self).__init__() @@ -75,14 +71,15 @@ class FuseReluDepthwiseConvPass(CPPPassWrapper): @register_pass("fuse_optimizer") class FuseOptimizerPass(CPPPassWrapper): - def __init__(self): super(FuseOptimizerPass, self).__init__() @property def cpp_name(self): return [ - "fuse_adam_op_pass", "fuse_sgd_op_pass", "fuse_momentum_op_pass" + "fuse_adam_op_pass", + "fuse_sgd_op_pass", + "fuse_momentum_op_pass", ] def _type(self): @@ -91,7 +88,6 @@ class FuseOptimizerPass(CPPPassWrapper): @register_pass("inplace_addto_op") class InplaceAddtoOpPass(CPPPassWrapper): - def __init__(self): super(InplaceAddtoOpPass, self).__init__() @@ -112,7 +108,6 @@ def _set_cinn_op_flag(flag_name, extra_ops): @register_pass("build_cinn") class BuildCINNPass(CPPPassWrapper): - def __init__(self): super(BuildCINNPass, self).__init__() self.set_attr("allow_ops", []) @@ -127,13 +122,15 @@ class BuildCINNPass(CPPPassWrapper): def _apply_single_impl(self, main_program, startup_program, context): - assert 'FLAGS_allow_cinn_ops' in core.globals( + assert ( + 'FLAGS_allow_cinn_ops' in core.globals() ), "PaddlePaddle is not compiled with CINN support" old_allow_ops = core.globals()['FLAGS_allow_cinn_ops'] old_deny_ops = core.globals()['FLAGS_deny_cinn_ops'] try: - _set_cinn_op_flag('FLAGS_allow_cinn_ops', - self.get_attr("allow_ops")) + _set_cinn_op_flag( + 'FLAGS_allow_cinn_ops', self.get_attr("allow_ops") + ) _set_cinn_op_flag('FLAGS_deny_cinn_ops', self.get_attr("deny_ops")) feed = self.get_attr('feed', []) @@ -142,18 +139,26 @@ class BuildCINNPass(CPPPassWrapper): if prune_program: tmp_main_program = Executor._prune_program( - main_program, feed, fetch_list, []) + main_program, feed, fetch_list, [] + ) tmp_main_program = Executor._add_fetch_ops( - tmp_main_program, fetch_list, 'fetch') + tmp_main_program, fetch_list, 'fetch' + ) else: tmp_main_program = Executor._add_fetch_ops( - main_program, fetch_list, 'fetch') - - _apply_cpp_pass(tmp_main_program, startup_program, self.cpp_name, - {}, self.cpp_attr_types) + main_program, fetch_list, 'fetch' + ) + + _apply_cpp_pass( + tmp_main_program, + startup_program, + self.cpp_name, + {}, + self.cpp_attr_types, + ) tmp_main_program = Executor._remove_fetch_ops(tmp_main_program) diff --git a/python/paddle/distributed/passes/fuse_all_reduce.py b/python/paddle/distributed/passes/fuse_all_reduce.py index 628caa0696a96acb06b8f37977fa1a167d1fb306..dcd1976c89c61922ce09ec20f256ddea76e3761e 100644 --- a/python/paddle/distributed/passes/fuse_all_reduce.py +++ b/python/paddle/distributed/passes/fuse_all_reduce.py @@ -18,9 +18,9 @@ from .pass_base import PassBase, PassType, register_pass import numpy as np -def find_adjacent_match_sequences(iterable, - filter_func, - adjacent_filter_func=None): +def find_adjacent_match_sequences( + iterable, filter_func, adjacent_filter_func=None +): n = len(iterable) match_sequences = [] if adjacent_filter_func is None: @@ -30,8 +30,11 @@ def find_adjacent_match_sequences(iterable, while i < n and not filter_func(iterable[i]): i += 1 j = i + 1 - while j < n and filter_func(iterable[j]) and adjacent_filter_func( - iterable[i], iterable[j]): + while ( + j < n + and filter_func(iterable[j]) + and adjacent_filter_func(iterable[i], iterable[j]) + ): j += 1 if i < n and j <= n: match_sequences.append((i, j)) @@ -41,11 +44,13 @@ def find_adjacent_match_sequences(iterable, return match_sequences -def insert_fuse_all_reduce_ops(block, reversed_op_indices, input_var_names, - output_var_names, dtype, attrs): - fused_var = block.create_var(name=unique_name.generate( - "FusedOutput_{}".format(input_var_names[0])), - dtype=dtype) +def insert_fuse_all_reduce_ops( + block, reversed_op_indices, input_var_names, output_var_names, dtype, attrs +): + fused_var = block.create_var( + name=unique_name.generate("FusedOutput_{}".format(input_var_names[0])), + dtype=dtype, + ) # FIXME(zengjinle): here we assume that we use # c_sync_calc_stream/c_sync_comm_stream to do sync. @@ -56,16 +61,20 @@ def insert_fuse_all_reduce_ops(block, reversed_op_indices, input_var_names, for i, op_idx in enumerate(reversed_op_indices): prev_op_idx = op_idx - 1 - while prev_op_idx >= 0 and block.ops[ - prev_op_idx].type == "c_sync_calc_stream": + while ( + prev_op_idx >= 0 + and block.ops[prev_op_idx].type == "c_sync_calc_stream" + ): new_op_indices.append(prev_op_idx) prev_op_idx -= 1 if i > 0: next_op_idx = op_idx + 1 n = len(block.ops) - while next_op_idx < n and block.ops[ - next_op_idx].type == "c_sync_comm_stream": + while ( + next_op_idx < n + and block.ops[next_op_idx].type == "c_sync_comm_stream" + ): assert block.ops[next_op_idx].attr("ring_id") == ring_id new_op_indices.append(next_op_idx) @@ -102,21 +111,22 @@ def insert_fuse_all_reduce_ops(block, reversed_op_indices, input_var_names, } if not attrs["use_calc_stream"]: - block._insert_op_without_sync(insert_idx, - type="c_sync_calc_stream", - inputs={"X": fused_var}, - outputs={ - "Out": fused_var, - op_role_key: attrs[op_role_key] - }) + block._insert_op_without_sync( + insert_idx, + type="c_sync_calc_stream", + inputs={"X": fused_var}, + outputs={"Out": fused_var, op_role_key: attrs[op_role_key]}, + ) insert_idx += 1 # c_allreduce_sum should insert - block._insert_op_without_sync(insert_idx, - type="c_allreduce_sum", - inputs={"X": fused_var}, - outputs={"Out": fused_var}, - attrs=attrs) + block._insert_op_without_sync( + insert_idx, + type="c_allreduce_sum", + inputs={"X": fused_var}, + outputs={"Out": fused_var}, + attrs=attrs, + ) for op_idx in reversed_op_indices: block._remove_op(op_idx) @@ -185,9 +195,9 @@ def find_all_fuse_all_reduce_groups(block): return False return True - match_seqs = find_adjacent_match_sequences(collective_ops, - is_valid_allreduce_op, - is_same_adjacent_op) + match_seqs = find_adjacent_match_sequences( + collective_ops, is_valid_allreduce_op, is_same_adjacent_op + ) new_match_seqs = [] for i, j in match_seqs: new_match_seqs.append([collective_op_indices[k] for k in range(i, j)]) @@ -301,8 +311,13 @@ def insert_fuse_all_reduce_by_memory_size(block, groups, max_memory_size): if len(recorded_op_indices) > 1: attrs[op_role_var_key] = op_role_vars coalesce_op_kwargs = insert_fuse_all_reduce_ops( - block, recorded_op_indices, in_var_names, out_var_names, - dtype, attrs) + block, + recorded_op_indices, + in_var_names, + out_var_names, + dtype, + attrs, + ) coalesce_ops_kwargs.append(coalesce_op_kwargs) cur_mem_size = 0 @@ -321,8 +336,13 @@ def insert_fuse_all_reduce_by_memory_size(block, groups, max_memory_size): if len(recorded_op_indices) > 1: attrs[op_role_var_key] = op_role_vars coalesce_op_kwargs = insert_fuse_all_reduce_ops( - block, recorded_op_indices, in_var_names, out_var_names, dtype, - attrs) + block, + recorded_op_indices, + in_var_names, + out_var_names, + dtype, + attrs, + ) coalesce_ops_kwargs.append(coalesce_op_kwargs) block._sync_with_cpp() insert_coalesce_tensor_ops(block, coalesce_ops_kwargs) @@ -330,7 +350,6 @@ def insert_fuse_all_reduce_by_memory_size(block, groups, max_memory_size): @register_pass("fuse_all_reduce") class FuseAllReducePass(PassBase): - def __init__(self): super(FuseAllReducePass, self).__init__() self.set_attr("max_memory_size", -1) @@ -358,7 +377,9 @@ class FuseAllReducePass(PassBase): block = main_program.block(i) groups = find_all_fuse_all_reduce_groups(block) groups = split_fuse_all_reduce_groups_by_deps( - block, groups, op_deps[i]) - insert_fuse_all_reduce_by_memory_size(block, groups, - max_memory_size) + block, groups, op_deps[i] + ) + insert_fuse_all_reduce_by_memory_size( + block, groups, max_memory_size + ) main_program._sync_with_cpp() diff --git a/python/paddle/distributed/passes/pass_base.py b/python/paddle/distributed/passes/pass_base.py index e042ce4a15e7dc366173ef1b101153d5e8681e72..56cc262ca031006741c77e504f20bdf1960928e7 100644 --- a/python/paddle/distributed/passes/pass_base.py +++ b/python/paddle/distributed/passes/pass_base.py @@ -17,7 +17,6 @@ from paddle.fluid.framework import _apply_pass as _apply_cpp_pass class PassContext: - def __init__(self): self._applied_passes = [] self._attrs = {} @@ -84,7 +83,8 @@ class PassBase(ABC): def _check_conflict_including_common_rules(self, other_pass): return self._check_conflict(other_pass) and all( - [r(other_pass, self) for r in PassBase._COMMON_RULES]) + [r(other_pass, self) for r in PassBase._COMMON_RULES] + ) def apply(self, main_programs, startup_programs, context=None): if context is None: @@ -93,10 +93,12 @@ class PassBase(ABC): if not self._check_self(): return context - if not all([ + if not all( + [ self._check_conflict_including_common_rules(p) for p in context.passes - ]): + ] + ): return context assert isinstance(main_programs, list) @@ -107,8 +109,9 @@ class PassBase(ABC): return context def _apply_impl(self, main_programs, startup_programs, context): - for main_program, startup_program in zip(main_programs, - startup_programs): + for main_program, startup_program in zip( + main_programs, startup_programs + ): self._apply_single_impl(main_program, startup_program, context) @abstractmethod @@ -117,7 +120,6 @@ class PassBase(ABC): def register_pass(name): - def impl(cls): PassBase._register(name, cls) cls.name = name @@ -136,7 +138,6 @@ def new_pass(name, pass_attrs={}): class CPPPassWrapper(PassBase): - def __init__(self): super(CPPPassWrapper, self).__init__() @@ -155,21 +156,28 @@ class CPPPassWrapper(PassBase): return True def _apply_single_impl(self, main_program, startup_program, context): - _apply_cpp_pass(main_program, startup_program, self.cpp_name, - self._attrs, self.cpp_attr_types) + _apply_cpp_pass( + main_program, + startup_program, + self.cpp_name, + self._attrs, + self.cpp_attr_types, + ) def _fusion_opt_last_rule(pass_before, pass_after): - if pass_before._type( - ) == PassType.FUSION_OPT and pass_after._type() != PassType.FUSION_OPT: + if ( + pass_before._type() == PassType.FUSION_OPT + and pass_after._type() != PassType.FUSION_OPT + ): return False else: return True -def _make_rule_from_white_lists_dict(before_white_lists_dict, - after_white_lists_dict): - +def _make_rule_from_white_lists_dict( + before_white_lists_dict, after_white_lists_dict +): def collect_pass_names(white_lists_dict, result): for k, v in white_lists_dict.items(): result.add(k) @@ -196,7 +204,10 @@ def _make_rule_from_white_lists_dict(before_white_lists_dict, def rule(pass_before, pass_after): all_passes_after = compatible_pass_dict.get(pass_before.name) - if all_passes_after is None or pass_after.name not in compatible_pass_dict: + if ( + all_passes_after is None + or pass_after.name not in compatible_pass_dict + ): return True else: return pass_after.name in all_passes_after @@ -220,8 +231,9 @@ PassBase._AFTER_WHITE_LISTS_DICT = { PassBase._COMMON_RULES = [ _fusion_opt_last_rule, lambda pass_before, pass_after: type(pass_before) != type(pass_after), - _make_rule_from_white_lists_dict(PassBase._BEFORE_WHITE_LISTS_DICT, - PassBase._AFTER_WHITE_LISTS_DICT), + _make_rule_from_white_lists_dict( + PassBase._BEFORE_WHITE_LISTS_DICT, PassBase._AFTER_WHITE_LISTS_DICT + ), # Add more common rules here ] @@ -270,10 +282,12 @@ def _solve_pass_conflict(passes, context): old_passes = passes passes = [] for p in old_passes: - if all([ + if all( + [ p._check_conflict_including_common_rules(applied_p) for applied_p in context.passes - ]): + ] + ): passes.append(p) if not passes: @@ -287,14 +301,14 @@ def _solve_pass_conflict(passes, context): for i in range(n): for j in range(n): adjacent_matrix[i][j] = passes[ - j]._check_conflict_including_common_rules(passes[i]) + j + ]._check_conflict_including_common_rules(passes[i]) longest_path = _find_longest_path(adjacent_matrix) return [passes[idx] for idx in longest_path] class PassManager: - def __init__(self, passes, context=None, auto_solve_conflict=True): if context is None: context = PassContext() diff --git a/python/paddle/distributed/passes/pass_utils.py b/python/paddle/distributed/passes/pass_utils.py index e779048c56b12065b7af26d1ebda55789ec0f4d5..8e608bd9ed64bfd6ddff3886ebf245a03e9a94a1 100644 --- a/python/paddle/distributed/passes/pass_utils.py +++ b/python/paddle/distributed/passes/pass_utils.py @@ -108,8 +108,9 @@ def split_program(program, op_indices): op_indices.append(op_num) for idx in range(len(op_indices) - 1): - assert op_indices[idx] < op_indices[ - idx + 1], "op_indices must be strictly sorted" + assert ( + op_indices[idx] < op_indices[idx + 1] + ), "op_indices must be strictly sorted" splitted_programs = [] for idx in range(len(op_indices) - 1): diff --git a/python/paddle/distributed/passes/ps_server_pass.py b/python/paddle/distributed/passes/ps_server_pass.py index 27c10fd1f5bf758d1bfab4d286b23aac629bdeb9..37e5622ea8e7f6c71323476d187dbbdf5fba1013 100755 --- a/python/paddle/distributed/passes/ps_server_pass.py +++ b/python/paddle/distributed/passes/ps_server_pass.py @@ -15,16 +15,30 @@ import logging import paddle.fluid as fluid -from ..ps.utils.public import get_optimize_ops, get_ps_endpoint, get_role_id, get_trainers +from ..ps.utils.public import ( + get_optimize_ops, + get_ps_endpoint, + get_role_id, + get_trainers, +) from .pass_base import PassBase, register_pass from paddle.optimizer.lr import LRScheduler -from paddle.optimizer.lr import ExponentialDecay, InverseTimeDecay, NaturalExpDecay, NoamDecay -from paddle.fluid.layers.learning_rate_scheduler import exponential_decay, inverse_time_decay, natural_exp_decay, noam_decay +from paddle.optimizer.lr import ( + ExponentialDecay, + InverseTimeDecay, + NaturalExpDecay, + NoamDecay, +) +from paddle.fluid.layers.learning_rate_scheduler import ( + exponential_decay, + inverse_time_decay, + natural_exp_decay, + noam_decay, +) @register_pass("add_lr_decay_table_pass") class AddLrDecayTablePass(PassBase): - def __init__(self): super(AddLrDecayTablePass, self).__init__() @@ -34,13 +48,15 @@ class AddLrDecayTablePass(PassBase): def _check_conflict(self, other_pass): return True - def _add_tensor_table(self, - attrs, - feed_var_name, - fetch_var_name="", - startup_program=None, - main_program=None, - tensor_table_class=""): + def _add_tensor_table( + self, + attrs, + feed_var_name, + fetch_var_name="", + startup_program=None, + main_program=None, + tensor_table_class="", + ): tensor_table_dict = {} tensor_table_dict[feed_var_name] = {} tensor_table_dict[feed_var_name]["feed_var_name"] = feed_var_name @@ -48,13 +64,16 @@ class AddLrDecayTablePass(PassBase): tensor_table_dict[feed_var_name]["startup_program"] = startup_program tensor_table_dict[feed_var_name]["main_program"] = main_program tensor_table_dict[feed_var_name][ - "tensor_table_class"] = tensor_table_class + "tensor_table_class" + ] = tensor_table_class attrs['tensor_table'] = tensor_table_dict def _get_lr_sheduler_program(self, lr_sheduler, lr_decay_steps): schedler_decay = [ - 'NoamDecay', 'NaturalExpDecay', 'InverseTimeDecay', - 'ExponentialDecay' + 'NoamDecay', + 'NaturalExpDecay', + 'InverseTimeDecay', + 'ExponentialDecay', ] decay_main_program = fluid.framework.Program() @@ -63,48 +82,59 @@ class AddLrDecayTablePass(PassBase): if isinstance(lr_sheduler, ExponentialDecay): with fluid.program_guard(decay_main_program, decay_startup_program): - lr = exponential_decay(1.0, lr_decay_steps, lr_sheduler.gamma, - True) + lr = exponential_decay( + 1.0, lr_decay_steps, lr_sheduler.gamma, True + ) lr_name = lr.name logging.warn( "ExponentialDecay is set, staircase = True, global learning rate decay step is [ %d ], Change decay steps as follow: \n" "\t strategy = paddle.distributed.fleet.DistributedStrategy() \n " "\t strategy.a_sync = True \n" "\t strategy.a_sync_configs= { 'lr_decay_steps' : YOUR_DECAY_STEP } \n" - % lr_decay_steps) + % lr_decay_steps + ) elif isinstance(lr_sheduler, NoamDecay): with fluid.program_guard(decay_main_program, decay_startup_program): - lr = noam_decay(lr_sheduler.d_model, lr_sheduler.warmup_steps, - 1.0) + lr = noam_decay( + lr_sheduler.d_model, lr_sheduler.warmup_steps, 1.0 + ) lr_name = lr.name - logging.warn("NoamDecay is set, warmup steps is [ %d ]" % - lr_sheduler.warmup_steps) + logging.warn( + "NoamDecay is set, warmup steps is [ %d ]" + % lr_sheduler.warmup_steps + ) elif isinstance(lr_sheduler, NaturalExpDecay): with fluid.program_guard(decay_main_program, decay_startup_program): - lr = natural_exp_decay(1.0, lr_decay_steps, lr_sheduler.gamma, - True) + lr = natural_exp_decay( + 1.0, lr_decay_steps, lr_sheduler.gamma, True + ) lr_name = lr.name logging.warn( "NaturalExpDecay is set, staircase = True, global learning rate decay step is [ %d ], Change decay steps as follow: \n" "\t strategy = paddle.distributed.fleet.DistributedStrategy() \n " "\t strategy.a_sync = True \n" "\t strategy.a_sync_configs= { 'lr_decay_steps' : YOUR_DECAY_STEP } \n" - % lr_decay_steps) + % lr_decay_steps + ) elif isinstance(lr_sheduler, InverseTimeDecay): with fluid.program_guard(decay_main_program, decay_startup_program): - lr = inverse_time_decay(1.0, lr_decay_steps, lr_sheduler.gamma, - True) + lr = inverse_time_decay( + 1.0, lr_decay_steps, lr_sheduler.gamma, True + ) lr_name = lr.name logging.warn( "InverseTimeDecay is set, staircase = True, global learning rate decay step is [ %d ], Change decay steps as follow: \n" "\t strategy = paddle.distributed.fleet.DistributedStrategy() \n " "\t strategy.a_sync = True \n" "\t strategy.a_sync_configs= { 'lr_decay_steps' : YOUR_DECAY_STEP } \n" - % lr_decay_steps) + % lr_decay_steps + ) else: raise ValueError( - "Not supported current LearningRate strategy, please use follow decay strategy: {}" - .format(schedler_decay)) + "Not supported current LearningRate strategy, please use follow decay strategy: {}".format( + schedler_decay + ) + ) return decay_main_program, decay_startup_program, lr_name @@ -113,21 +143,31 @@ class AddLrDecayTablePass(PassBase): if hasattr(attrs['origin_main_program'], 'lr_sheduler') == False: return - assert isinstance(attrs['origin_main_program'].lr_sheduler, - LRScheduler), "must be LRScheduler" + assert isinstance( + attrs['origin_main_program'].lr_sheduler, LRScheduler + ), "must be LRScheduler" ops = get_optimize_ops(attrs['origin_main_program']) - lr_decay_main_program, lr_decay_startup_program, lr_name = self._get_lr_sheduler_program( - attrs['origin_main_program'].lr_sheduler, attrs['lr_decay_steps']) - self._add_tensor_table(attrs, "@LR_DECAY_COUNTER@", lr_name, - lr_decay_startup_program, lr_decay_main_program, - "GlobalStepTable") + ( + lr_decay_main_program, + lr_decay_startup_program, + lr_name, + ) = self._get_lr_sheduler_program( + attrs['origin_main_program'].lr_sheduler, attrs['lr_decay_steps'] + ) + self._add_tensor_table( + attrs, + "@LR_DECAY_COUNTER@", + lr_name, + lr_decay_startup_program, + lr_decay_main_program, + "GlobalStepTable", + ) return @register_pass("add_listen_and_serv_pass") class AddListenAndServPass(PassBase): - def __init__(self): super(AddListenAndServPass, self).__init__() @@ -145,7 +185,6 @@ class AddListenAndServPass(PassBase): "lr_decay_block_id": None, "dense_optimize_blocks": None, "sparse_optimize_blocks": None, - # runtime attribute "endpoint": get_ps_endpoint(attrs['role_maker']), "pserver_id": get_role_id(attrs['role_maker']), @@ -153,17 +192,15 @@ class AddListenAndServPass(PassBase): "distributed_mode": attrs['ps_mode'], "rpc_get_thread_num": -1, "rpc_send_thread_num": -1, - "rpc_prefetch_thread_num": -1 + "rpc_prefetch_thread_num": -1, } - main_program.global_block().append_op(type="listen_and_serv", - inputs={'X': []}, - outputs={}, - attrs=opt) + main_program.global_block().append_op( + type="listen_and_serv", inputs={'X': []}, outputs={}, attrs=opt + ) @register_pass("add_rpc_global_flags_pass") class AddRpcGlobalFlagsPass(PassBase): - def __init__(self): super(AddRpcGlobalFlagsPass, self).__init__() @@ -179,7 +216,6 @@ class AddRpcGlobalFlagsPass(PassBase): @register_pass("add_optimizer_pass") class AddOptimizerPass(PassBase): - def __init__(self): super(AddOptimizerPass, self).__init__() @@ -195,7 +231,6 @@ class AddOptimizerPass(PassBase): @register_pass("add_geo_optimizer_pass") class AddGeoOptimizerPass(PassBase): - def __init__(self): super(AddGeoOptimizerPass, self).__init__() @@ -211,7 +246,6 @@ class AddGeoOptimizerPass(PassBase): @register_pass("build_pserver_startup_program_pass") class BuildPserverStartupProgramPass(PassBase): - def __init__(self): super(BuildPserverStartupProgramPass, self).__init__() @@ -227,7 +261,6 @@ class BuildPserverStartupProgramPass(PassBase): @register_pass("delete_unused_in_startup_pass") class DeleteUnusedInStartupPass(PassBase): - def __init__(self): super(DeleteUnusedInStartupPass, self).__init__() diff --git a/python/paddle/distributed/passes/ps_trainer_pass.py b/python/paddle/distributed/passes/ps_trainer_pass.py index 1b5eb1e442ff12a6eb9d6011d6eca3c3a0891115..f99d9f316d462a54198e0ecd389f1d66925804f8 100755 --- a/python/paddle/distributed/passes/ps_trainer_pass.py +++ b/python/paddle/distributed/passes/ps_trainer_pass.py @@ -25,7 +25,6 @@ from paddle.fluid.framework import Program, Parameter @register_pass("append_send_ops_pass") class AppendSendOpsPass(PassBase): # 该 pass 被多种模式复用 - def __init__(self): super(AppendSendOpsPass, self).__init__() @@ -35,8 +34,9 @@ class AppendSendOpsPass(PassBase): # 该 pass 被多种模式复用 def _check_conflict(self, other_pass): return True - def _append_send_op(self, program, union_vars, queue, is_sparse, table_id, - ps_mode): + def _append_send_op( + self, program, union_vars, queue, is_sparse, table_id, ps_mode + ): if queue == STEP_COUNTER: send_input_vars = [] else: @@ -48,43 +48,42 @@ class AppendSendOpsPass(PassBase): # 该 pass 被多种模式复用 dummy_output = [] if ps_mode in [DistributedMode.SYNC, DistributedMode.HALF_ASYNC]: dummy_output = program.global_block().create_var( - name=framework.generate_control_dev_var_name()) - program.global_block().append_op(type="send", - inputs={"X": send_input_vars}, - outputs={"Out": dummy_output}, - attrs={ - "send_varnames": [queue], - "is_sparse": - is_sparse, - "table_id": - table_id, - RPC_OP_ROLE_ATTR_NAME: - RPC_OP_ROLE_ATTR_VALUE - }) + name=framework.generate_control_dev_var_name() + ) + program.global_block().append_op( + type="send", + inputs={"X": send_input_vars}, + outputs={"Out": dummy_output}, + attrs={ + "send_varnames": [queue], + "is_sparse": is_sparse, + "table_id": table_id, + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE, + }, + ) return dummy_output def _append_barrier_op(self, program, dummys, trainer_id): - program.global_block().append_op(type="send_barrier", - inputs={"X": dummys}, - outputs={"Out": []}, - attrs={ - "trainer_id": - trainer_id, - "half_async": - True, - RPC_OP_ROLE_ATTR_NAME: - RPC_OP_ROLE_ATTR_VALUE - }) + program.global_block().append_op( + type="send_barrier", + inputs={"X": dummys}, + outputs={"Out": []}, + attrs={ + "trainer_id": trainer_id, + "half_async": True, + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE, + }, + ) def _apply_single_impl(self, main_program, startup_program, pass_ctx): attrs = pass_ctx._attrs ps_mode = attrs['ps_mode'] - #if ps_mode == DistributedMode.GEO: + # if ps_mode == DistributedMode.GEO: # send_ctx = get_geo_trainer_send_context(attrs) # geo 模式, 没必要 send_ctx = get_the_one_send_context( - attrs, - split_dense_table=attrs['is_heter_ps_mode']) # async、sync 等各种模式 + attrs, split_dense_table=attrs['is_heter_ps_mode'] + ) # async、sync 等各种模式 dummys = [] for merged_name, send in send_ctx.items(): # embedding_0.w_0@GRAD @@ -99,9 +98,15 @@ class AppendSendOpsPass(PassBase): # 该 pass 被多种模式复用 is_sparse = 1 if send.is_sparse() else 0 is_sparse = 2 if send.is_distributed() else is_sparse dummys.append( - self._append_send_op(main_program, send.origin_varnames(), - merged_name, is_sparse, send.table_id(), - ps_mode)) + self._append_send_op( + main_program, + send.origin_varnames(), + merged_name, + is_sparse, + send.table_id(), + ps_mode, + ) + ) if ps_mode in [DistributedMode.SYNC, DistributedMode.HALF_ASYNC]: trainer_id = get_role_id(attrs['role_maker']) self._append_barrier_op(main_program, dummys, trainer_id) @@ -109,7 +114,6 @@ class AppendSendOpsPass(PassBase): # 该 pass 被多种模式复用 @register_pass("distributed_ops_pass") class DistributedOpsPass(PassBase): - def __init__(self): super(DistributedOpsPass, self).__init__() self.w_2_table_id = {} @@ -138,8 +142,10 @@ class DistributedOpsPass(PassBase): if len(entry) == 3 and entry[0] == 'show_click_entry': show_var_name = entry[1] click_var_name = entry[2] - if show_var_name in _program.global_block( - ).vars and click_var_name in _program.global_block().vars: + if ( + show_var_name in _program.global_block().vars + and click_var_name in _program.global_block().vars + ): show = _program.global_block().vars[show_var_name] clk = _program.global_block().vars[click_var_name] use_entry = True @@ -154,31 +160,37 @@ class DistributedOpsPass(PassBase): name="show", dtype=core.VarDesc.VarType.FP32, persistable=False, - stop_gradient=True) - _program.global_block()._insert_op(index=0, - type='fill_constant', - inputs={}, - outputs={'Out': show}, - attrs={ - 'shape': [1], - 'dtype': show.dtype, - 'value': 1, - }) + stop_gradient=True, + ) + _program.global_block()._insert_op( + index=0, + type='fill_constant', + inputs={}, + outputs={'Out': show}, + attrs={ + 'shape': [1], + 'dtype': show.dtype, + 'value': 1, + }, + ) clk = _program.global_block().create_var( name="clk", dtype=core.VarDesc.VarType.FP32, persistable=False, - stop_gradient=True) - _program.global_block()._insert_op(index=0, - type='fill_constant', - inputs={}, - outputs={'Out': clk}, - attrs={ - 'shape': [1], - 'dtype': clk.dtype, - 'value': 0, - }) + stop_gradient=True, + ) + _program.global_block()._insert_op( + index=0, + type='fill_constant', + inputs={}, + outputs={'Out': clk}, + attrs={ + 'shape': [1], + 'dtype': clk.dtype, + 'value': 0, + }, + ) for param, ops in push_sparse_ops.items(): all_ops = _program.global_block().ops @@ -203,27 +215,27 @@ class DistributedOpsPass(PassBase): for idx in op_idxs[::-1]: _program.global_block()._remove_op(idx) - _program.global_block().append_op(type="distributed_push_sparse", - inputs={ - "Ids": inputs, - 'W': w, - "Outputs": outputs, - "Shows": show, - "Clicks": clk, - }, - outputs={"Outputs": outputs}, - attrs={ - "is_distributed": - is_distributed, - "padding_idx": padding_idx, - "table_id": table_id, - "size": self.emb_size[param], - "use_cvm_op": use_cvm_op, - "slots": slots - }) + _program.global_block().append_op( + type="distributed_push_sparse", + inputs={ + "Ids": inputs, + 'W': w, + "Outputs": outputs, + "Shows": show, + "Clicks": clk, + }, + outputs={"Outputs": outputs}, + attrs={ + "is_distributed": is_distributed, + "padding_idx": padding_idx, + "table_id": table_id, + "size": self.emb_size[param], + "use_cvm_op": use_cvm_op, + "slots": slots, + }, + ) def _pull_sparse_fuse(self, _program, pull_sparse_ops, attrs, send_ctx): - def dag_check_up_and_reorder(program, inputs, outputs): global_block = program.global_block() min_output_index = len(global_block.ops) @@ -348,7 +360,8 @@ class DistributedOpsPass(PassBase): if table_id == -1: raise ValueError( - "can not find suitable sparse table, please check") + "can not find suitable sparse table, please check" + ) self.w_2_table_id[param] = table_id padding_idx = ops[0].attr("padding_idx") @@ -379,8 +392,9 @@ class DistributedOpsPass(PassBase): ins = op.input(op.input_names[i]) for out_id, out_var in enumerate(outputs): if out_var.name in ins: - outputs_idxs[out_id] = min(idx, - outputs_idxs[out_id]) + outputs_idxs[out_id] = min( + idx, outputs_idxs[out_id] + ) if attrs['use_ps_gpu']: gpups_inputs_idxs.extend(inputs_idxs) @@ -388,8 +402,9 @@ class DistributedOpsPass(PassBase): gpups_inputs.extend(inputs) gpups_outputs.extend(outputs) gpups_w_size.extend([w.shape[1]] * len(inputs)) - gpups_min_distributed_idx = min(min(op_idxs), - gpups_min_distributed_idx) + gpups_min_distributed_idx = min( + min(op_idxs), gpups_min_distributed_idx + ) continue if min(outputs_idxs) - max(inputs_idxs) >= 1: @@ -401,18 +416,16 @@ class DistributedOpsPass(PassBase): _program.global_block()._insert_op( index=distributed_idx, type="distributed_lookup_table", - inputs={ - "Ids": inputs, - 'W': w - }, + inputs={"Ids": inputs, 'W': w}, outputs={"Outputs": outputs}, attrs={ "is_distributed": is_distributed, "padding_idx": padding_idx, "table_id": table_id, "lookup_table_version": op_type, - "op_device": op_device - }) + "op_device": op_device, + }, + ) else: for i in range(len(inputs_idxs)): distributed_idx = op_idxs[i] @@ -420,34 +433,34 @@ class DistributedOpsPass(PassBase): _program.global_block()._insert_op( index=distributed_idx, type="distributed_lookup_table", - inputs={ - "Ids": [inputs[i]], - 'W': w - }, + inputs={"Ids": [inputs[i]], 'W': w}, outputs={"Outputs": [outputs[i]]}, attrs={ "is_distributed": is_distributed, "padding_idx": padding_idx, "table_id": table_id, "lookup_table_version": op_type, - "op_device": op_device - }) + "op_device": op_device, + }, + ) if attrs['use_ps_gpu'] and len(gpups_inputs) > 0: if max(gpups_inputs_idxs) > 0: raise ValueError("There can't be ops before embedding in gpups") - _program.global_block()._insert_op(index=gpups_min_distributed_idx, - type="pull_gpups_sparse", - inputs={ - "Ids": gpups_inputs, - }, - outputs={"Out": gpups_outputs}, - attrs={ - "size": gpups_w_size, - "is_distributed": True, - "is_sparse": True - }) + _program.global_block()._insert_op( + index=gpups_min_distributed_idx, + type="pull_gpups_sparse", + inputs={ + "Ids": gpups_inputs, + }, + outputs={"Out": gpups_outputs}, + attrs={ + "size": gpups_w_size, + "is_distributed": True, + "is_sparse": True, + }, + ) PSGPU = paddle.fluid.core.PSGPU() try: gpu_slot = [int(var.name) for var in gpups_inputs] @@ -466,8 +479,10 @@ class DistributedOpsPass(PassBase): ops = {} use_cvm_op = False for op in _program.global_block().ops: - if op.type in SPARSE_OP_TYPE_DICT.keys() \ - and op.attr('remote_prefetch') is True: + if ( + op.type in SPARSE_OP_TYPE_DICT.keys() + and op.attr('remote_prefetch') is True + ): param_name = op.input(SPARSE_OP_TYPE_DICT[op.type])[0] if attrs['is_heter_ps_mode'] and not attrs['is_fl_ps_mode']: # TODO: trick for matchnet, need to modify for heter_ps @@ -486,8 +501,10 @@ class DistributedOpsPass(PassBase): for op in _program.global_block().ops: if op.type in SPARSE_GRAD_OP_TYPE_DICT.keys(): param_name = op.input(SPARSE_GRAD_OP_TYPE_DICT[op.type])[0] - if param_name in pull_sparse_ids and op.input( - "Ids")[0] in pull_sparse_ids[param_name]: + if ( + param_name in pull_sparse_ids + and op.input("Ids")[0] in pull_sparse_ids[param_name] + ): ops = push_sparse_ops.get(param_name, []) ops.append(op) push_sparse_ops[param_name] = ops @@ -496,19 +513,25 @@ class DistributedOpsPass(PassBase): def _apply_single_impl(self, main_program, startup_program, pass_ctx): attrs = pass_ctx._attrs - pull_sparse_ops, push_sparse_ops, use_cvm_op = self._get_pull_sparse_ops( - main_program, attrs) - print("is_heter_ps_mode in distributed_ops_pass {}?".format( - attrs['is_heter_ps_mode'])) + ( + pull_sparse_ops, + push_sparse_ops, + use_cvm_op, + ) = self._get_pull_sparse_ops(main_program, attrs) + print( + "is_heter_ps_mode in distributed_ops_pass {}?".format( + attrs['is_heter_ps_mode'] + ) + ) send_ctx = get_the_one_send_context( - attrs, split_dense_table=attrs['is_heter_ps_mode']) + attrs, split_dense_table=attrs['is_heter_ps_mode'] + ) self._pull_sparse_fuse(main_program, pull_sparse_ops, attrs, send_ctx) self._push_sparse_fuse(main_program, push_sparse_ops, attrs, use_cvm_op) @register_pass("delete_optimizer_pass") class DeleteOptimizesPass(PassBase): - def __init__(self): super(DeleteOptimizesPass, self).__init__() @@ -518,8 +541,9 @@ class DeleteOptimizesPass(PassBase): def _check_conflict(self, other_pass): return True - def _delete_optimizer_op_and_vars(self, _program, remote_optimize_ops, - local_optimize_ops): + def _delete_optimizer_op_and_vars( + self, _program, remote_optimize_ops, local_optimize_ops + ): local_optimize_vars = [] remote_optimize_vars = [] remote_optimize_op_role_vars = [] @@ -533,14 +557,18 @@ class DeleteOptimizesPass(PassBase): remote_optimize_op_role_vars.extend(op.attr("op_role_var")) remote_optimize_vars = list( - set(remote_optimize_vars - )) # param + grad + optimizer_state + learning_rate + set(remote_optimize_vars) + ) # param + grad + optimizer_state + learning_rate remote_optimize_op_role_vars = list( - set(remote_optimize_op_role_vars)) # param + grad + set(remote_optimize_op_role_vars) + ) # param + grad print( - "remote_optimize_vars: {}, remote_optimize_op_role_vars: {}, local_optimize_vars: {}" - .format(remote_optimize_vars, remote_optimize_op_role_vars, - local_optimize_vars)) + "remote_optimize_vars: {}, remote_optimize_op_role_vars: {}, local_optimize_vars: {}".format( + remote_optimize_vars, + remote_optimize_op_role_vars, + local_optimize_vars, + ) + ) for var in remote_optimize_vars: if var in local_optimize_vars: continue @@ -555,26 +583,32 @@ class DeleteOptimizesPass(PassBase): def _add_lr_var(self, main_program, attrs): # Todo: hard code for pe - lr_var = attrs['origin_main_program'].global_block( - ).vars["learning_rate_0"] - main_program.global_block().create_var(name=lr_var.name, - shape=lr_var.shape, - dtype=lr_var.dtype, - type=lr_var.type, - lod_level=lr_var.lod_level, - persistable=True) + lr_var = ( + attrs['origin_main_program'].global_block().vars["learning_rate_0"] + ) + main_program.global_block().create_var( + name=lr_var.name, + shape=lr_var.shape, + dtype=lr_var.dtype, + type=lr_var.type, + lod_level=lr_var.lod_level, + persistable=True, + ) def _apply_single_impl(self, main_program, startup_program, pass_ctx): attrs = pass_ctx._attrs all_optimize_ops = get_optimize_ops(main_program) - remote_optimize_ops = get_optimize_ops(main_program, - attrs['remote_sparse']) + remote_optimize_ops = get_optimize_ops( + main_program, attrs['remote_sparse'] + ) lr_ops = get_lr_ops(main_program) remote_optimize_ops.extend(lr_ops) local_optimize_ops = list( - set(all_optimize_ops) - set(remote_optimize_ops)) - self._delete_optimizer_op_and_vars(main_program, remote_optimize_ops, - local_optimize_ops) + set(all_optimize_ops) - set(remote_optimize_ops) + ) + self._delete_optimizer_op_and_vars( + main_program, remote_optimize_ops, local_optimize_ops + ) if hasattr(attrs['origin_main_program'], 'lr_sheduler'): self._add_lr_var(main_program, attrs) @@ -582,7 +616,6 @@ class DeleteOptimizesPass(PassBase): @register_pass("delete_extra_optimizer_pass") class DeleteExtraOptimizerPass(PassBase): - def __init__(self): super(DeleteExtraOptimizerPass, self).__init__() @@ -598,10 +631,12 @@ class DeleteExtraOptimizerPass(PassBase): remote_optimize_op_role_vars = [] optimize_need_delete_vars = [] all_optimize_ops = get_optimize_ops(main_program) - remote_optimize_ops = get_optimize_ops(main_program, - attrs['remote_sparse']) + remote_optimize_ops = get_optimize_ops( + main_program, attrs['remote_sparse'] + ) local_optimize_ops = list( - set(all_optimize_ops) - set(remote_optimize_ops)) + set(all_optimize_ops) - set(remote_optimize_ops) + ) local_optimize_vars = [] for op in local_optimize_ops: @@ -638,7 +673,6 @@ class DeleteExtraOptimizerPass(PassBase): @register_pass("fake_init_ops_pass") class FakeInitOpsPass(PassBase): - def __init__(self): super(FakeInitOpsPass, self).__init__() @@ -649,19 +683,24 @@ class FakeInitOpsPass(PassBase): return True def _get_sparse_table_names(self, attrs): - dist_varnames = get_sparse_tablenames(attrs['origin_main_programs'], - True) - sparse_varnames = get_sparse_tablenames(attrs['origin_main_programs'], - False) + dist_varnames = get_sparse_tablenames( + attrs['origin_main_programs'], True + ) + sparse_varnames = get_sparse_tablenames( + attrs['origin_main_programs'], False + ) return list(set(dist_varnames + sparse_varnames)) - def _fake_init_sparsetable(self, startup_program, sparse_table_names, - attrs): + def _fake_init_sparsetable( + self, startup_program, sparse_table_names, attrs + ): # delete table init op for table_name in sparse_table_names: table_var = startup_program.global_block().vars[table_name] - if str(table_var).split( - ":")[0].strip().split()[-1] in attrs['local_sparse']: + if ( + str(table_var).split(":")[0].strip().split()[-1] + in attrs['local_sparse'] + ): continue table_param_init_op = [] for op in startup_program.global_block().ops: @@ -669,14 +708,16 @@ class FakeInitOpsPass(PassBase): table_param_init_op.append(op) init_op_num = len(table_param_init_op) if init_op_num != 1: - raise ValueError("table init op num should be 1, now is " + - str(init_op_num)) + raise ValueError( + "table init op num should be 1, now is " + str(init_op_num) + ) table_init_op = table_param_init_op[0] startup_program.global_block().append_op( type="fake_init", inputs={}, outputs={"Out": table_var}, - attrs={"shape": table_init_op.attr('shape')}) + attrs={"shape": table_init_op.attr('shape')}, + ) delete_ops(startup_program.global_block(), table_param_init_op) def _apply_single_impl(self, main_program, startup_program, pass_ctx): @@ -687,7 +728,6 @@ class FakeInitOpsPass(PassBase): @register_pass("ps_gpu_pass") class PsGpuPass(PassBase): - def __init__(self): super(PsGpuPass, self).__init__() @@ -706,14 +746,17 @@ class PsGpuPass(PassBase): if op.type != "pull_box_sparse" and op.type != "pull_gpups_sparse": continue grad_op_desc, op_grad_to_var = core.get_grad_op_desc( - op.desc, set(), []) + op.desc, set(), [] + ) for op_desc in grad_op_desc: new_op_desc = program.global_block().desc._insert_op( - insert_index + 1) + insert_index + 1 + ) new_op_desc.copy_from(op_desc) new_op_desc._set_attr(op_role_attr_name, backward) new_op = paddle.fluid.framework.Operator( - program.global_block(), new_op_desc) + program.global_block(), new_op_desc + ) program.global_block().ops.insert(insert_index + 1, new_op) program.global_block()._sync_with_cpp() @@ -787,7 +830,6 @@ class PsGpuPass(PassBase): @register_pass("ps_transpile_pass") class PsTranspilePass(PassBase): - def __init__(self): super(PsTranspilePass, self).__init__() @@ -801,17 +843,18 @@ class PsTranspilePass(PassBase): attrs = pass_ctx._attrs t = SingleProcessMultiThread() env = get_dist_env() - t.transpile(startup_program=startup_program, - main_program=main_program, - rank=env["trainer_id"], - endpoints=env["trainer_endpoints"], - current_endpoint=env['current_endpoint'], - wait_port=False) + t.transpile( + startup_program=startup_program, + main_program=main_program, + rank=env["trainer_id"], + endpoints=env["trainer_endpoints"], + current_endpoint=env['current_endpoint'], + wait_port=False, + ) @register_pass("split_heter_worker_ops_pass") class SplitHeterWorkerOpsPass(PassBase): - def __init__(self): super(SplitHeterWorkerOpsPass, self).__init__() @@ -821,9 +864,15 @@ class SplitHeterWorkerOpsPass(PassBase): def _check_conflict(self, other_pass): return True - def _create_heter_program(self, program, attrs, heter_program, - program_block_ops_list, heter_ops, - block_var_detail): + def _create_heter_program( + self, + program, + attrs, + heter_program, + program_block_ops_list, + heter_ops, + block_var_detail, + ): # This function mainly includes the following contents: # 1. For every heter block: # a) copy heter device op from origin program @@ -849,10 +898,12 @@ class SplitHeterWorkerOpsPass(PassBase): current_device = role_maker._heter_device_type().lower() stage_id = int(role_maker._get_stage_id()) - heter_block_ops_forward = program_block_ops_list[stage_id - - 1]["forward"] - heter_block_ops_backward = program_block_ops_list[stage_id - - 1]["backward"] + heter_block_ops_forward = program_block_ops_list[stage_id - 1][ + "forward" + ] + heter_block_ops_backward = program_block_ops_list[stage_id - 1][ + "backward" + ] heter_block = heter_program._create_block(pre_block_idx) optimizer_block.append(heter_block) @@ -874,59 +925,82 @@ class SplitHeterWorkerOpsPass(PassBase): for _, op in enumerate(heter_block_ops_backward): block_append_op(heter_program, program, heter_block_bp, op) - bp_entrance_vars = block_var_detail[stage_id - - 1]["backward"]["entrance"] - add_vars_by_var_list(bp_entrance_vars, program, heter_program, - heter_block_bp) + bp_entrance_vars = block_var_detail[stage_id - 1]["backward"][ + "entrance" + ] + add_vars_by_var_list( + bp_entrance_vars, program, heter_program, heter_block_bp + ) bp_exit_vars = block_var_detail[stage_id - 1]["backward"]["exit"] - add_vars_by_var_list(bp_exit_vars, program, heter_program, - heter_block_bp) - backward_comm_info = get_communicate_var_info(program, - stage_id, - bp_entrance_vars, - type="backward") + add_vars_by_var_list( + bp_exit_vars, program, heter_program, heter_block_bp + ) + backward_comm_info = get_communicate_var_info( + program, stage_id, bp_entrance_vars, type="backward" + ) - grad_to_block_id.append(backward_comm_info["block_input_var_name"] + - ":" + str(heter_block_bp.idx)) + grad_to_block_id.append( + backward_comm_info["block_input_var_name"] + + ":" + + str(heter_block_bp.idx) + ) else: for _, op in enumerate(heter_block_ops_backward): block_append_op(heter_program, program, heter_block, op) - bp_entrance_vars = block_var_detail[stage_id - - 1]["backward"]["entrance"] - add_vars_by_var_list(bp_entrance_vars, program, heter_program, - heter_block) + bp_entrance_vars = block_var_detail[stage_id - 1]["backward"][ + "entrance" + ] + add_vars_by_var_list( + bp_entrance_vars, program, heter_program, heter_block + ) bp_exit_vars = block_var_detail[stage_id - 1]["backward"]["exit"] - add_vars_by_var_list(bp_exit_vars, program, heter_program, - heter_block) + add_vars_by_var_list( + bp_exit_vars, program, heter_program, heter_block + ) heter_block_bp = heter_block - forward_comm_info = get_communicate_var_info(program, - stage_id, - entrance_vars, - type="forward") + forward_comm_info = get_communicate_var_info( + program, stage_id, entrance_vars, type="forward" + ) - grad_to_block_id.append(forward_comm_info["block_input_var_name"] + - ":" + str(heter_block.idx)) + grad_to_block_id.append( + forward_comm_info["block_input_var_name"] + + ":" + + str(heter_block.idx) + ) first_op_index_bp = len(heter_block_bp.ops) if stage_id <= len(block_var_detail) - 1: - static_var = insert_communicate_op(program, role_maker, heter_block, - stage_id, first_op_index_fp, - block_var_detail, current_device) - static_var_bp = insert_communicate_op(program, role_maker, - heter_block_bp, stage_id, - first_op_index_bp, - block_var_detail, current_device, - False) + static_var = insert_communicate_op( + program, + role_maker, + heter_block, + stage_id, + first_op_index_fp, + block_var_detail, + current_device, + ) + static_var_bp = insert_communicate_op( + program, + role_maker, + heter_block_bp, + stage_id, + first_op_index_bp, + block_var_detail, + current_device, + False, + ) # add send op send_grad_var_list = add_send_op( - program, heter_block_bp, - block_var_detail[stage_id - 1]["backward"]["persistables"]) + program, + heter_block_bp, + block_var_detail[stage_id - 1]["backward"]["persistables"], + ) # add step conter send_input_vars = [] @@ -941,13 +1015,15 @@ class SplitHeterWorkerOpsPass(PassBase): "pserver_id": get_role_id(role_maker), "distributed_mode": attrs['ps_mode'], "rpc_exec_thread_num": int(os.getenv("CPU_NUM", 32)), - RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE, } # append the listen_and_serv op - heter_program.global_block().append_op(type="heter_listen_and_serv", - inputs={'X': []}, - outputs={}, - attrs=attrs) + heter_program.global_block().append_op( + type="heter_listen_and_serv", + inputs={'X': []}, + outputs={}, + attrs=attrs, + ) # TODO check heter program def _apply_single_impl(self, main_program, startup_program, pass_ctx): @@ -960,7 +1036,8 @@ class SplitHeterWorkerOpsPass(PassBase): attrs = pass_ctx._attrs default_deveice = "cpu" program, heter_ops, _, program_block_ops = find_heter_ops( - main_program, default_deveice) + main_program, default_deveice + ) if len(heter_ops) == 0: warnings.warn( "Currently running in Heter Parameter Server mode, but no OP running on heterogeneous devices, Please check your code." @@ -969,18 +1046,23 @@ class SplitHeterWorkerOpsPass(PassBase): return program_block_ops = union_forward_gradient_op(program_block_ops) - block_vars_detail = find_block_joints(program, program_block_ops, - heter_ops) + block_vars_detail = find_block_joints( + program, program_block_ops, heter_ops + ) heter_program = framework.Program() - self._create_heter_program(program, attrs, heter_program, - program_block_ops, heter_ops, - block_vars_detail) + self._create_heter_program( + program, + attrs, + heter_program, + program_block_ops, + heter_ops, + block_vars_detail, + ) main_program = heter_program @register_pass("split_trainer_ops_pass") class SplitTrainerOpsPass(PassBase): - def __init__(self): super(SplitTrainerOpsPass, self).__init__() @@ -990,8 +1072,9 @@ class SplitTrainerOpsPass(PassBase): def _check_conflict(self, other_pass): return True - def _replace_ops_by_communicate_op(self, program, attrs, heter_block_index, - ops_list, block_var_detail): + def _replace_ops_by_communicate_op( + self, program, attrs, heter_block_index, ops_list, block_var_detail + ): all_op = program.global_block().ops start_op = ops_list[0] first_op_idx = -1 @@ -1008,10 +1091,12 @@ class SplitTrainerOpsPass(PassBase): next_heter_worker_endpoints = get_next_stage_trainers(role_maker) entrance_var = block_var_detail[heter_block_index]["forward"][ - "entrance"] + "entrance" + ] - comm_info = get_communicate_var_info(program, heter_block_index + 1, - entrance_var) + comm_info = get_communicate_var_info( + program, heter_block_index + 1, entrance_var + ) program.global_block()._insert_op( index=first_op_idx, type="send_and_recv", @@ -1025,8 +1110,9 @@ class SplitTrainerOpsPass(PassBase): "next_endpoints": next_heter_worker_endpoints, "previous_endpoints": [], "trainer_id": get_role_id(role_maker), - RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE - }) + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE, + }, + ) return entrance_var @@ -1051,18 +1137,22 @@ class SplitTrainerOpsPass(PassBase): del attrs['merged_sparse_pairs'][index] return - def _remove_trainer_send_op(self, program, attrs, heter_block_index, - block_var_detail): + def _remove_trainer_send_op( + self, program, attrs, heter_block_index, block_var_detail + ): # if trainer do FF->BP->SEND, it has follow vars: var, var@GRAD # if trainer only do SEND, it has one var: var@GRAD # Delete Send op ,if trainer doesn't has pair var (var<->var@GRAD) - persistables = block_var_detail[heter_block_index]["forward"]["persistables"] + \ - block_var_detail[heter_block_index]["backward"]["persistables"] + persistables = ( + block_var_detail[heter_block_index]["forward"]["persistables"] + + block_var_detail[heter_block_index]["backward"]["persistables"] + ) need_remove_send_op = [] need_remove_grad_var = [] for op in find_send_op(program): - input_list, _ = find_op_input_output(program, - program.global_block(), op) + input_list, _ = find_op_input_output( + program, program.global_block(), op + ) for var_name in input_list: origin_var_name = var_name.split("@GRAD")[0] if origin_var_name in persistables: @@ -1073,8 +1163,14 @@ class SplitTrainerOpsPass(PassBase): for grad_var_name in need_remove_grad_var: self._remove_var_pair_by_grad(grad_var_name, attrs) - def _create_trainer_program(self, program, origin_program, attrs, - program_block_ops_list, block_var_detail): + def _create_trainer_program( + self, + program, + origin_program, + attrs, + program_block_ops_list, + block_var_detail, + ): # This function mainly includes the following contents: # 1. For every heter block in origin program # a) delete heter op and related variables @@ -1087,13 +1183,16 @@ class SplitTrainerOpsPass(PassBase): # 2. check every op's device static_var = [] for heter_block_index in range(1, len(program_block_ops_list)): - ops_list = program_block_ops_list[heter_block_index][ - "forward"] + program_block_ops_list[heter_block_index][ - "backward"] + ops_list = ( + program_block_ops_list[heter_block_index]["forward"] + + program_block_ops_list[heter_block_index]["backward"] + ) static_var += self._replace_ops_by_communicate_op( - program, attrs, heter_block_index, ops_list, block_var_detail) - self._remove_trainer_send_op(program, attrs, heter_block_index, - block_var_detail) + program, attrs, heter_block_index, ops_list, block_var_detail + ) + self._remove_trainer_send_op( + program, attrs, heter_block_index, block_var_detail + ) optimizer_block = [] grad_to_block_id = [] @@ -1101,40 +1200,46 @@ class SplitTrainerOpsPass(PassBase): bp_ops_list = program_block_ops_list[0]["backward"] delete_same_ops(program.global_block(), bp_ops_list) delete_trainer_useless_var(program, static_var) - backward_block = create_backward_block(program, origin_program, - bp_ops_list, block_var_detail) + backward_block = create_backward_block( + program, origin_program, bp_ops_list, block_var_detail + ) bp_entrance_vars = block_var_detail[0]["backward"]["entrance"] - backward_comm_info = get_communicate_var_info(origin_program, - 1, - bp_entrance_vars, - type="backward") - - grad_to_block_id.append(backward_comm_info["block_input_var_name"] + - ":" + str(backward_block.idx)) + backward_comm_info = get_communicate_var_info( + origin_program, 1, bp_entrance_vars, type="backward" + ) + + grad_to_block_id.append( + backward_comm_info["block_input_var_name"] + + ":" + + str(backward_block.idx) + ) optimizer_block.append(backward_block) role_maker = attrs['role_maker'] attrs = { "message_to_block_id": grad_to_block_id, "optimize_blocks": optimizer_block, # runtime attribute - "endpoint": - get_trainer_endpoint(role_maker), ## get trainer endpoint + "endpoint": get_trainer_endpoint( + role_maker + ), ## get trainer endpoint "fanin": 0, ## get heter worker "pserver_id": get_role_id(role_maker), "distributed_mode": attrs['ps_mode'], "rpc_exec_thread_num": int(os.getenv("CPU_NUM", 32)), - RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE, } # append the listen_and_serv op - program.global_block()._insert_op(index=0, - type="heter_listen_and_serv", - inputs={'X': []}, - outputs={}, - attrs=attrs) + program.global_block()._insert_op( + index=0, + type="heter_listen_and_serv", + inputs={'X': []}, + outputs={}, + attrs=attrs, + ) ## TODO add check for bp block - #check_op_device(program.global_block(), DEFAULT_DEVICE) + # check_op_device(program.global_block(), DEFAULT_DEVICE) def _apply_single_impl(self, main_program, startup_program, pass_ctx): """ @@ -1146,20 +1251,26 @@ class SplitTrainerOpsPass(PassBase): attrs = pass_ctx._attrs default_device_ = 'cpu' program, heter_ops, default_ops, program_block_ops = find_heter_ops( - main_program, default_device_) + main_program, default_device_ + ) program_block_ops = union_forward_gradient_op(program_block_ops) - block_vars_detail = find_block_joints(program, program_block_ops, - heter_ops) + block_vars_detail = find_block_joints( + program, program_block_ops, heter_ops + ) trainer_program = program.clone() - self._create_trainer_program(trainer_program, program, attrs, - program_block_ops, block_vars_detail) + self._create_trainer_program( + trainer_program, + program, + attrs, + program_block_ops, + block_vars_detail, + ) main_program = trainer_program @register_pass("set_heter_pipeline_opt_pass") class SetHeterPipelineOptPass(PassBase): - def __init__(self): super(SetHeterPipelineOptPass, self).__init__() @@ -1173,32 +1284,31 @@ class SetHeterPipelineOptPass(PassBase): attrs = pass_ctx._attrs role_maker = attrs['role_maker'] num_microbatches = attrs['user_defined_strategy'].pipeline_configs[ - 'accumulate_steps'] + 'accumulate_steps' + ] startup_program._heter_pipeline_opt = { "startup_program": startup_program, "pipeline_stage": int(role_maker._get_stage_id()) - 1, "heter_place": role_maker._heter_device(), - "is_fl_mode": 1 + "is_fl_mode": 1, } main_program._heter_pipeline_opt = { "trainer": "HeterPipelineTrainer", "device_worker": "HeterSection", - "trainers": - role_maker._get_stage_trainers(), ## trainer num in each stage + "trainers": role_maker._get_stage_trainers(), ## trainer num in each stage "trainer_id": int(role_maker._role_id()), "pipeline_stage": int(role_maker._get_stage_id()) - 1, "num_pipeline_stages": int(role_maker._get_num_stage()), "section_program": main_program, "num_microbatches": num_microbatches, "heter_place": role_maker._heter_device(), - "is_fl_mode": 1 + "is_fl_mode": 1, } @register_pass("split_fl_ops_pass") class SplitFlOpsPass(PassBase): - def __init__(self): super(SplitFlOpsPass, self).__init__() self.PART_A_DEVICE_FlAG = 'gpu:0' @@ -1231,10 +1341,17 @@ class SplitFlOpsPass(PassBase): block = self.ori_main_program.block(0) for op in block.ops: device = op.attr(OP_DEVICE_KEY) - if device == self.PART_A_DEVICE_FlAG or device == '' or device == self.PART_A_JOINT_OP_DEVICE_FlAG: + if ( + device == self.PART_A_DEVICE_FlAG + or device == '' + or device == self.PART_A_JOINT_OP_DEVICE_FlAG + ): program = party_program_map['a'] self.partA_ops.append(op) - elif device == self.PART_B_DEVICE_FlAG or device == self.PART_B_JOINT_OP_DEVICE_FlAG: + elif ( + device == self.PART_B_DEVICE_FlAG + or device == self.PART_B_JOINT_OP_DEVICE_FlAG + ): program = party_program_map['b'] self.partB_ops.append(op) op_desc = op.desc @@ -1257,21 +1374,24 @@ class SplitFlOpsPass(PassBase): outputs={'Out': []}, attrs={ 'mode': 'forward', # mode 直接关联前向和反向 channel 选择 - 'send_var_name': - self.partA_to_partB_tensor_name + ["microbatch_id"], + 'send_var_name': self.partA_to_partB_tensor_name + + ["microbatch_id"], 'recv_var_name': [], 'message_name': comm_info, - 'next_endpoints': - get_next_stage_trainers(self.role_maker), # partB_endpoints - 'previous_endpoints': - get_previous_stage_trainers(self.role_maker), + 'next_endpoints': get_next_stage_trainers( + self.role_maker + ), # partB_endpoints + 'previous_endpoints': get_previous_stage_trainers( + self.role_maker + ), 'trainer_id': get_role_id(self.role_maker), # global id - RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE - }) + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE, + }, + ) return def _insert_partB_communicate_op(self, block, idx): - comm_info = ("backward_joint_{}_{}@fl_ps".format(2, 1)) + comm_info = "backward_joint_{}_{}@fl_ps".format(2, 1) block._insert_op( idx, type='send_and_recv', @@ -1279,17 +1399,20 @@ class SplitFlOpsPass(PassBase): outputs={'Out': []}, attrs={ 'mode': 'backward', - 'send_var_name': - self.partB_to_partA_grad_name + ["microbatch_id"], + 'send_var_name': self.partB_to_partA_grad_name + + ["microbatch_id"], 'recv_var_name': [], 'message_name': comm_info, - 'next_endpoints': - get_next_stage_trainers(self.role_maker), # partA_endpoints - 'previous_endpoints': - get_previous_stage_trainers(self.role_maker), + 'next_endpoints': get_next_stage_trainers( + self.role_maker + ), # partA_endpoints + 'previous_endpoints': get_previous_stage_trainers( + self.role_maker + ), 'trainer_id': get_role_id(self.role_maker), # global id - RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE - }) + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE, + }, + ) return def _create_var_for_block(self, vars, block): @@ -1308,7 +1431,8 @@ class SplitFlOpsPass(PassBase): trainable=source_var.trainable, optimize_attr=source_var.optimize_attr, regularizer=source_var.regularizer, - error_clip=source_var.error_clip) + error_clip=source_var.error_clip, + ) else: dest_var = block._clone_variable(source_var, False) dest_var.stop_gradient = source_var.stop_gradient @@ -1362,15 +1486,18 @@ class SplitFlOpsPass(PassBase): def _find_dense_grad_vars(self, bp_op_list): program = self.ori_main_program bp_op_input, bp_op_output = find_ops_list_input_output( - program, bp_op_list) - return (screen_persistables(program, bp_op_input) + - screen_persistables(program, bp_op_output)) + program, bp_op_list + ) + return screen_persistables(program, bp_op_input) + screen_persistables( + program, bp_op_output + ) def _get_partA_program(self, block): # 1. create block 0 # 1.1 insert send op - op_idx = self._find_joint_forward_op(block, - self.PART_A_JOINT_OP_DEVICE_FlAG) + op_idx = self._find_joint_forward_op( + block, self.PART_A_JOINT_OP_DEVICE_FlAG + ) op_list = [] for i in range(len(block.ops)): op = block.ops[i] @@ -1388,8 +1515,9 @@ class SplitFlOpsPass(PassBase): bp_op_list = get_bp_op_list(block) push_sparse_op_list = get_distributed_push_sparse_op_list(block) # logger.info('bp_op_list: {}'.format(bp_op_list)) - second_block = self._get_block_by_idx(bp_op_list + push_sparse_op_list, - self.partA_program, 1) + second_block = self._get_block_by_idx( + bp_op_list + push_sparse_op_list, self.partA_program, 1 + ) # 2.1. insert partA recv op block_input_flag = "backward_joint_{}_{}@fl_ps".format(2, 1) grad_to_block_id = block_input_flag + ":" + str(second_block.idx) @@ -1401,13 +1529,15 @@ class SplitFlOpsPass(PassBase): "pserver_id": get_role_id(self.role_maker), "distributed_mode": self.ps_mode, "rpc_exec_thread_num": int(os.getenv("CPU_NUM", 32)), - RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE, } - second_block._insert_op(index=0, - type='heter_listen_and_serv', - inputs={'X': []}, - outputs={}, - attrs=attrs) + second_block._insert_op( + index=0, + type='heter_listen_and_serv', + inputs={'X': []}, + outputs={}, + attrs=attrs, + ) # 2.2 insert push dense grad op send_ops = find_send_op(self.ori_main_program) # push dense delete_same_ops(block, send_ops) @@ -1417,9 +1547,11 @@ class SplitFlOpsPass(PassBase): def _get_partB_program(self, block): op_idx1 = self._find_joint_forward_op( - block, self.PART_B_JOINT_OP_DEVICE_FlAG) # elementwise_add op - op_idx2 = self._find_joint_backward_op(block, - self.PART_B_JOINT_OP_DEVICE_FlAG) + block, self.PART_B_JOINT_OP_DEVICE_FlAG + ) # elementwise_add op + op_idx2 = self._find_joint_backward_op( + block, self.PART_B_JOINT_OP_DEVICE_FlAG + ) op_cnt = 0 op_list1 = [] op_list2 = [] @@ -1458,16 +1590,18 @@ class SplitFlOpsPass(PassBase): "pserver_id": 1, # TODO "distributed_mode": self.ps_mode, "rpc_exec_thread_num": int(os.getenv("CPU_NUM", 32)), - RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE, } - first_block._insert_op(index=len(op_list1), - type="heter_listen_and_serv", - inputs={'X': []}, - outputs={}, - attrs=attrs) + first_block._insert_op( + index=len(op_list1), + type="heter_listen_and_serv", + inputs={'X': []}, + outputs={}, + attrs=attrs, + ) - #logger.info('partB-first_block:{}'.format(first_block)) - #logger.info('partB-second_block:{}'.format(second_block)) + # logger.info('partB-first_block:{}'.format(first_block)) + # logger.info('partB-second_block:{}'.format(second_block)) def _apply_single_impl(self, main_program, startup_program, pass_ctx): attrs = pass_ctx._attrs @@ -1482,8 +1616,9 @@ class SplitFlOpsPass(PassBase): prog_a = party_program_map['a'] _main_file = ps_log_root_dir + '6_fl_A_main_program.prototxt' debug_program(_main_file, prog_a) - self._get_partB_to_partA_grad(prog_a.global_block(), - self.PART_A_JOINT_OP_DEVICE_FlAG) + self._get_partB_to_partA_grad( + prog_a.global_block(), self.PART_A_JOINT_OP_DEVICE_FlAG + ) prog_b = party_program_map['b'] _main_file = ps_log_root_dir + '6_fl_B_main_program.prototxt' diff --git a/python/paddle/distributed/ps/coordinator.py b/python/paddle/distributed/ps/coordinator.py index a012f338a514fb0e30d37ce5d9201996de584888..3a6e0756df43586530bafb174b47594f1e5f9f03 100755 --- a/python/paddle/distributed/ps/coordinator.py +++ b/python/paddle/distributed/ps/coordinator.py @@ -26,7 +26,8 @@ import logging logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) formatter = logging.Formatter( - fmt='%(asctime)s %(levelname)-2s [%(filename)s:%(lineno)d] %(message)s') + fmt='%(asctime)s %(levelname)-2s [%(filename)s:%(lineno)d] %(message)s' +) ch = logging.StreamHandler() ch.setFormatter(formatter) logger.addHandler(ch) @@ -46,7 +47,6 @@ class FLStrategy: class ClientSelectorBase(abc.ABC): - def __init__(self, fl_clients_info_mp): self.fl_clients_info_mp = fl_clients_info_mp self.clients_info = {} @@ -58,17 +58,19 @@ class ClientSelectorBase(abc.ABC): for client_id, info in self.fl_clients_info_mp.items(): self.fl_client_info_desc = the_one_ps_pb2.FLClientInfo() - text_format.Parse(bytes(info, encoding="utf8"), - self.fl_client_info_desc) + text_format.Parse( + bytes(info, encoding="utf8"), self.fl_client_info_desc + ) self.clients_info[client_id] = {} self.clients_info[client_id][ - ClientInfoAttr. - DEVICE_TYPE] = self.fl_client_info_desc.device_type + ClientInfoAttr.DEVICE_TYPE + ] = self.fl_client_info_desc.device_type self.clients_info[client_id][ - ClientInfoAttr. - COMPUTE_CAPACITY] = self.fl_client_info_desc.compute_capacity + ClientInfoAttr.COMPUTE_CAPACITY + ] = self.fl_client_info_desc.compute_capacity self.clients_info[client_id][ - ClientInfoAttr.BANDWIDTH] = self.fl_client_info_desc.bandwidth + ClientInfoAttr.BANDWIDTH + ] = self.fl_client_info_desc.bandwidth @abc.abstractmethod def select(self): @@ -76,7 +78,6 @@ class ClientSelectorBase(abc.ABC): class ClientSelector(ClientSelectorBase): - def __init__(self, fl_clients_info_mp): super().__init__(fl_clients_info_mp) self.__fl_strategy = {} @@ -84,8 +85,11 @@ class ClientSelector(ClientSelectorBase): def select(self): self.parse_from_string() for client_id in self.clients_info: - logger.info("fl-ps > client {} info : {}".format( - client_id, self.clients_info[client_id])) + logger.info( + "fl-ps > client {} info : {}".format( + client_id, self.clients_info[client_id] + ) + ) # ......... to implement ...... # fl_strategy_desc = the_one_ps_pb2.FLStrategy() fl_strategy_desc.iteration_num = 99 @@ -97,7 +101,6 @@ class ClientSelector(ClientSelectorBase): class FLClientBase(abc.ABC): - def __init__(self): pass @@ -113,8 +116,9 @@ class FLClientBase(abc.ABC): self.startup_program = paddle.static.default_startup_program() self._client_ptr = fleet.get_fl_client() self._coordinators = self.role_maker._get_coordinator_endpoints() - logger.info("fl-ps > coordinator enpoints: {}".format( - self._coordinators)) + logger.info( + "fl-ps > coordinator enpoints: {}".format(self._coordinators) + ) self.strategy_handlers = dict() self.exe = None self.use_cuda = int(self.config.get("runner.use_gpu")) @@ -129,8 +133,11 @@ class FLClientBase(abc.ABC): def set_train_dataset_info(self, train_dataset, train_file_list): self.train_dataset = train_dataset self.train_file_list = train_file_list - logger.info("fl-ps > {}, data_feed_desc:\n {}".format( - type(self.train_dataset), self.train_dataset._desc())) + logger.info( + "fl-ps > {}, data_feed_desc:\n {}".format( + type(self.train_dataset), self.train_dataset._desc() + ) + ) def set_test_dataset_info(self, test_dataset, test_file_list): self.test_dataset = test_dataset @@ -161,7 +168,8 @@ class FLClientBase(abc.ABC): if self.config.get("runner.need_dump"): self.debug = True dump_fields_path = "{}/epoch_{}".format( - self.config.get("runner.dump_fields_path"), self.epoch_idx) + self.config.get("runner.dump_fields_path"), self.epoch_idx + ) dump_fields = self.config.get("runner.dump_fields", []) dump_param = self.config.get("runner.dump_param", []) persist_vars_list = self.main_program.all_parameters() @@ -170,11 +178,13 @@ class FLClientBase(abc.ABC): for param in persist_vars_list ] logger.info( - "fl-ps > persist_vars_list: {}".format(persist_vars_name)) + "fl-ps > persist_vars_list: {}".format(persist_vars_name) + ) if dump_fields_path is not None: self.main_program._fleet_opt[ - 'dump_fields_path'] = dump_fields_path + 'dump_fields_path' + ] = dump_fields_path if dump_fields is not None: self.main_program._fleet_opt["dump_fields"] = dump_fields if dump_param is not None: @@ -186,7 +196,6 @@ class FLClientBase(abc.ABC): class FLClient(FLClientBase): - def __init__(self): super(FLClient, self).__init__() @@ -195,12 +204,13 @@ class FLClient(FLClientBase): state_info = { ClientInfoAttr.DEVICE_TYPE: "Andorid", ClientInfoAttr.COMPUTE_CAPACITY: 10, - ClientInfoAttr.BANDWIDTH: 100 + ClientInfoAttr.BANDWIDTH: 100, } client_info = the_one_ps_pb2.FLClientInfo() client_info.device_type = state_info[ClientInfoAttr.DEVICE_TYPE] client_info.compute_capacity = state_info[ - ClientInfoAttr.COMPUTE_CAPACITY] + ClientInfoAttr.COMPUTE_CAPACITY + ] client_info.bandwidth = state_info[ClientInfoAttr.BANDWIDTH] str_msg = text_format.MessageToString(client_info) return str_msg @@ -224,7 +234,7 @@ class FLClient(FLClientBase): state_info = { "client id": self.worker_index, "auc": 0.9, - "epoch": self.epoch_idx + "epoch": self.epoch_idx, } self.push_fl_client_info_sync(state_info) strategy_dict = self.pull_fl_strategy() @@ -242,13 +252,18 @@ class FLClient(FLClientBase): def pull_fl_strategy(self): strategy_dict = {} - fl_strategy_str = self._client_ptr.pull_fl_strategy( + fl_strategy_str = ( + self._client_ptr.pull_fl_strategy() ) # block: wait for coordinator's strategy arrived - logger.info("fl-ps > fl client recved fl_strategy(str):\n{}".format( - fl_strategy_str)) + logger.info( + "fl-ps > fl client recved fl_strategy(str):\n{}".format( + fl_strategy_str + ) + ) fl_strategy_desc = the_one_ps_pb2.FLStrategy() - text_format.Parse(bytes(fl_strategy_str, encoding="utf8"), - fl_strategy_desc) + text_format.Parse( + bytes(fl_strategy_str, encoding="utf8"), fl_strategy_desc + ) strategy_dict["next_state"] = fl_strategy_desc.next_state return strategy_dict @@ -262,8 +277,9 @@ class FLClient(FLClientBase): self.register_handlers('train', self.callback_train) self.register_handlers('infer', self.callback_infer) self.register_handlers('finish', self.callback_finish) - self.register_handlers('initialize_model_params', - self.callback_initialize_model_params) + self.register_handlers( + 'initialize_model_params', self.callback_initialize_model_params + ) self.register_handlers('init_worker', self.callback_init_worker) self.register_handlers('save_model', self.callback_save_model) @@ -282,12 +298,14 @@ class FLClient(FLClientBase): "Epoch {} Var {}".format(self.epoch_idx, var_name) for var_name in self.metrics ] - self.exe.train_from_dataset(program=self.main_program, - dataset=self.train_dataset, - fetch_list=self.fetch_vars, - fetch_info=fetch_info, - print_period=self.print_step, - debug=self.debug) + self.exe.train_from_dataset( + program=self.main_program, + dataset=self.train_dataset, + fetch_list=self.fetch_vars, + fetch_info=fetch_info, + print_period=self.print_step, + debug=self.debug, + ) self.epoch_idx += 1 epoch_time = time.time() - epoch_start_time epoch_speed = self.train_example_nums / epoch_time @@ -299,12 +317,14 @@ class FLClient(FLClientBase): "Epoch {} Var {}".format(self.epoch_idx, var_name) for var_name in self.metrics ] - self.exe.infer_from_dataset(program=self.main_program, - dataset=self.test_dataset, - fetch_list=self.fetch_vars, - fetch_info=fetch_info, - print_period=self.print_step, - debug=self.debug) + self.exe.infer_from_dataset( + program=self.main_program, + dataset=self.test_dataset, + fetch_list=self.fetch_vars, + fetch_info=fetch_info, + print_period=self.print_step, + debug=self.debug, + ) def callback_save_model(self): model_dir = "{}/{}".format(self.save_model_path, self.epoch_idx) @@ -318,12 +338,14 @@ class FLClient(FLClientBase): fleet.stop_worker() def print_program(self): - with open("./{}_worker_main_program.prototxt".format(self.worker_index), - 'w+') as f: + with open( + "./{}_worker_main_program.prototxt".format(self.worker_index), 'w+' + ) as f: f.write(str(self.main_program)) with open( - "./{}_worker_startup_program.prototxt".format( - self.worker_index), 'w+') as f: + "./{}_worker_startup_program.prototxt".format(self.worker_index), + 'w+', + ) as f: f.write(str(self.startup_program)) def print_train_statical_info(self): @@ -332,7 +354,6 @@ class FLClient(FLClientBase): class Coordinator(object): - def __init__(self, ps_hosts): self._communicator = FLCommunicator(ps_hosts) self._client_selector = None @@ -344,7 +365,8 @@ class Coordinator(object): logger.info("fl-ps > running make_fl_strategy(loop) in coordinator\n") while True: # 1. get all fl clients reported info - str_map = self._communicator.query_fl_clients_info( + str_map = ( + self._communicator.query_fl_clients_info() ) # block: wait for all fl clients info reported # 2. generate fl strategy self._client_selector = ClientSelector(str_map) diff --git a/python/paddle/distributed/ps/the_one_ps.py b/python/paddle/distributed/ps/the_one_ps.py index 78ecef22b63e6160651d9417de4a19cd3bd03e4f..86766d60ae8c0e32301746a5a6f80d347c40930c 100755 --- a/python/paddle/distributed/ps/the_one_ps.py +++ b/python/paddle/distributed/ps/the_one_ps.py @@ -24,15 +24,21 @@ from paddle.fluid.compiler import CompiledProgram from paddle.fluid.executor import Executor from paddle.fluid.parallel_executor import ParallelExecutor from paddle.distributed.fleet.runtime.runtime_base import RuntimeBase -from paddle.distributed.fleet.base.private_helper_function import wait_server_ready +from paddle.distributed.fleet.base.private_helper_function import ( + wait_server_ready, +) from paddle.distributed.fleet.proto import the_one_ps_pb2 from paddle.fluid.communicator import Communicator, HeterClient from google.protobuf import text_format from paddle.distributed.ps.coordinator import Coordinator __all__ = [ - 'Table', 'SparseTable', 'GeoSparseTable', 'BarrierTable', 'TensorTable', - 'DenseTable' + 'Table', + 'SparseTable', + 'GeoSparseTable', + 'BarrierTable', + 'TensorTable', + 'DenseTable', ] @@ -52,7 +58,11 @@ def parse_table_class(varname, program_id, context): param_name = op.input("W")[0] - if param_name == varname and op.type == "lookup_table" or op.type == "lookup_table_v2": + if ( + param_name == varname + and op.type == "lookup_table" + or op.type == "lookup_table_v2" + ): if op.has_attr('table_class') and op.attr("table_class") != "none": return op.attr('table_class') else: @@ -65,37 +75,47 @@ def check_embedding_dim(accessor_proto, varname, program_id, context): for var in main_program.list_vars(): if var.name == varname: embedding_dim = var.shape[1] - print('new var: {}, {}, {}'.format(var, embedding_dim, - accessor_proto.fea_dim)) + print( + 'new var: {}, {}, {}'.format( + var, embedding_dim, accessor_proto.fea_dim + ) + ) break fea_dim = accessor_proto.fea_dim if accessor_proto.accessor_class == "SparseAccessor": if fea_dim != embedding_dim + 2: raise ValueError( - "The fea_dim is wrong, it will be sparse_embedding_dim + 2: {}, but got {}" - .format(embedding_dim + 2, fea_dim)) + "The fea_dim is wrong, it will be sparse_embedding_dim + 2: {}, but got {}".format( + embedding_dim + 2, fea_dim + ) + ) else: if fea_dim != embedding_dim: raise ValueError( - "The fea_dim is wrong, it will be sparse_embedding_dim: {}, but got {}" - .format(embedding_dim, fea_dim)) + "The fea_dim is wrong, it will be sparse_embedding_dim: {}, but got {}".format( + embedding_dim, fea_dim + ) + ) embedx_dim = accessor_proto.embedx_dim if accessor_proto.accessor_class == "SparseAccessor": if embedx_dim != embedding_dim - 1: raise ValueError( - "The embedx_dim is wrong, it will be sparse_embedding_dim - 1: {}, but got {}" - .format(embedding_dim - 1, embedx_dim)) + "The embedx_dim is wrong, it will be sparse_embedding_dim - 1: {}, but got {}".format( + embedding_dim - 1, embedx_dim + ) + ) else: if embedx_dim != embedding_dim - 3: raise ValueError( - "The embedx_dim is wrong, it will be sparse_embedding_dim - 3: {}, but got {}" - .format(embedding_dim - 3, embedx_dim)) + "The embedx_dim is wrong, it will be sparse_embedding_dim - 3: {}, but got {}".format( + embedding_dim - 3, embedx_dim + ) + ) class Service: - def __init__(self): pass @@ -108,7 +128,6 @@ class Service: class GpuService(Service): - def __init__(self): super(GpuService, self).__init__() @@ -118,7 +137,6 @@ class GpuService(Service): class Accessor: - def __init__(self): self.accessor_class = "" self.optimizer = None @@ -126,10 +144,12 @@ class Accessor: self.embedding_dim = 0 # TableAccessorParameter accessor - def _set(self, accessor_proto, varname, program_id, context, - common_accessor): + def _set( + self, accessor_proto, varname, program_id, context, common_accessor + ): main_program, startup_program, idx = get_program_by_id( - context, program_id) + context, program_id + ) embedding_dim = 0 for var in main_program.list_vars(): if var.name == varname: @@ -184,7 +204,8 @@ class Accessor: ctr_accessor_param.ssd_unseenday_threshold = 1 for sgd_param in [ - accessor_proto.embed_sgd_param, accessor_proto.embedx_sgd_param + accessor_proto.embed_sgd_param, + accessor_proto.embedx_sgd_param, ]: if not sgd_param.HasField("name"): if common_accessor.accessor_class == "sgd": @@ -194,7 +215,10 @@ class Accessor: else: # for fl-ps, because geo accessor is 'sum' sgd_param.name = "SparseAdamSGDRule" - if sgd_param.name == "SparseAdaGradSGDRule" or sgd_param.name == "StdAdaGradSGDRule": + if ( + sgd_param.name == "SparseAdaGradSGDRule" + or sgd_param.name == "StdAdaGradSGDRule" + ): if not sgd_param.adagrad.HasField("learning_rate"): sgd_param.adagrad.learning_rate = 0.05 if not sgd_param.adagrad.HasField("initial_g2sum"): @@ -206,42 +230,52 @@ class Accessor: if sgd_param.name == "SparseNaiveSGDRule": if not sgd_param.naive.HasField("learning_rate"): - learning_rate = common_accessor.initializers[-1].split( - "&")[1] + learning_rate = common_accessor.initializers[-1].split("&")[ + 1 + ] sgd_param.naive.learning_rate = float(learning_rate) if not sgd_param.naive.HasField("initial_range"): - initial_range = common_accessor.initializers[0].split( - "&")[-1] + initial_range = common_accessor.initializers[0].split("&")[ + -1 + ] sgd_param.naive.initial_range = float(initial_range) if len(sgd_param.naive.weight_bounds) == 0: sgd_param.naive.weight_bounds.extend([-10.0, 10.0]) - if sgd_param.name == "SparseAdamSGDRule" or sgd_param.name == "SparseSharedAdamSGDRule": + if ( + sgd_param.name == "SparseAdamSGDRule" + or sgd_param.name == "SparseSharedAdamSGDRule" + ): if not sgd_param.adam.HasField("learning_rate"): - learning_rate = common_accessor.initializers[-1].split( - "&")[1] + learning_rate = common_accessor.initializers[-1].split("&")[ + 1 + ] sgd_param.adam.learning_rate = float(learning_rate) if not sgd_param.adam.HasField("initial_range"): - initial_range = common_accessor.initializers[0].split( - "&")[-1] + initial_range = common_accessor.initializers[0].split("&")[ + -1 + ] sgd_param.adam.initial_range = float(initial_range) attr_list = [x.split("&") for x in common_accessor.attrs] - if not sgd_param.adam.HasField( - "beta1_decay_rate" - ) and common_accessor.accessor_class == "adam": + if ( + not sgd_param.adam.HasField("beta1_decay_rate") + and common_accessor.accessor_class == "adam" + ): sgd_param.adam.beta1_decay_rate = float(attr_list[0][1]) else: sgd_param.adam.beta1_decay_rate = 0.9 - if not sgd_param.adam.HasField( - "beta2_decay_rate" - ) and common_accessor.accessor_class == "adam": + if ( + not sgd_param.adam.HasField("beta2_decay_rate") + and common_accessor.accessor_class == "adam" + ): sgd_param.adam.beta2_decay_rate = float(attr_list[1][1]) else: sgd_param.adam.beta2_decay_rate = 0.999 - if not sgd_param.adam.HasField( - "ada_epsilon" - ) and common_accessor.accessor_class == "adam": + if ( + not sgd_param.adam.HasField("ada_epsilon") + and common_accessor.accessor_class == "adam" + ): sgd_param.adam.ada_epsilon = float(attr_list[2][1]) else: sgd_param.adam.ada_epsilon = 1e-08 @@ -250,7 +284,6 @@ class Accessor: class CommonAccessor(Accessor): - def __init__(self): super(CommonAccessor, self).__init__() self.table_name = '' @@ -269,27 +302,46 @@ class CommonAccessor(Accessor): def define_optimize_map(self): opt_input_map = {} opt_input_map["sgd"] = [("Param", None), ("LearningRate", 1)] - opt_input_map["adam"] = [("Param", None), ("Moment1", None), - ("Moment2", None), ("Beta1Pow", 1), - ("Beta2Pow", 1), ("LearningRate", 1)] - opt_input_map["adam_d2sum"] = [("Param", None), ("D2Sum", None), - ("G2Sum", None), ("Moment", None), - ("MomentDecayRate", 1), - ("AdaDecayRate", 1), ("AdaEpsilon", 1), - ("LearningRate", 1)] + opt_input_map["adam"] = [ + ("Param", None), + ("Moment1", None), + ("Moment2", None), + ("Beta1Pow", 1), + ("Beta2Pow", 1), + ("LearningRate", 1), + ] + opt_input_map["adam_d2sum"] = [ + ("Param", None), + ("D2Sum", None), + ("G2Sum", None), + ("Moment", None), + ("MomentDecayRate", 1), + ("AdaDecayRate", 1), + ("AdaEpsilon", 1), + ("LearningRate", 1), + ] opt_input_map["sum"] = [("Param", None)] - opt_input_map["naive_adagrad"] = [("Param", None), ("G2Sum", 1), - ("LearningRate", 1)] + opt_input_map["naive_adagrad"] = [ + ("Param", None), + ("G2Sum", 1), + ("LearningRate", 1), + ] opt_input_map["summary"] = [("Param", None), ("SummaryDecayRate", 1)] opt_attr_map = {} opt_attr_map["sgd"] = [] opt_attr_map["sum"] = [] opt_attr_map["naive_adagrad"] = [] - opt_attr_map["adam"] = [("beta1", "f"), ("beta2", "f"), - ("epsilon", "f")] - opt_attr_map["adam_d2sum"] = [("beta1", "f"), ("beta2", "f"), - ("epsilon", "f")] + opt_attr_map["adam"] = [ + ("beta1", "f"), + ("beta2", "f"), + ("epsilon", "f"), + ] + opt_attr_map["adam_d2sum"] = [ + ("beta1", "f"), + ("beta2", "f"), + ("epsilon", "f"), + ] opt_attr_map["summary"] = [("summary_decay_rate", "f")] opt_init_map = {} @@ -304,7 +356,8 @@ class CommonAccessor(Accessor): def parse_entry(self, varname, program_id, context): main_program, startup_program, idx = get_program_by_id( - context, program_id) + context, program_id + ) for op in main_program.global_block().ops: if not is_distributed_sparse_op(op) and not is_sparse_op(op): continue @@ -337,8 +390,10 @@ class CommonAccessor(Accessor): origin_var_name = value_name # print("get_initializer_attr param name:", value_name) for op in o_startup_program.global_block().ops: - if op.type in self.opt_init_map.keys( - ) and origin_var_name == op.output("Out")[0]: + if ( + op.type in self.opt_init_map.keys() + and origin_var_name == op.output("Out")[0] + ): init_attr = [op.type] # print("get_initializer_attr op type:", op.type) for attr in self.opt_init_map[op.type]: @@ -359,7 +414,8 @@ class CommonAccessor(Accessor): # ctx.table_id(), ctx.is_datanorm_table())) main_program, startup_program, idx = get_program_by_id( - context, ctx.program_id()) + context, ctx.program_id() + ) pserver_id = get_role_id(context['role_maker']) pserver_num = len(get_ps_endpoints(context['role_maker'])) optimizer_ops = get_optimize_ops(main_program) @@ -369,8 +425,9 @@ class CommonAccessor(Accessor): for op in optimizer_ops: if ("Param" in op.input_names) and ( - op.input("Param")[0] - == context['grad_name_to_param_name'][grad_name]): + op.input("Param")[0] + == context['grad_name_to_param_name'][grad_name] + ): oop = op break @@ -409,7 +466,8 @@ class CommonAccessor(Accessor): else: if oop.type != 'sgd' and oop.type != 'adam': raise ValueError( - "The dense optimizer in PS is only supported SGD or Adam!") + "The dense optimizer in PS is only supported SGD or Adam!" + ) param_varnames = self.opt_input_map[oop.type] attr_varnames = self.opt_attr_map[oop.type] self.accessor_class = oop.type @@ -417,7 +475,7 @@ class CommonAccessor(Accessor): for (formal_name, shape) in param_varnames: params.append(formal_name) if self.accessor_class == "adam_d2sum": - #for dims + # for dims if shape is None: if is_sparse: shape = single_dim @@ -425,19 +483,24 @@ class CommonAccessor(Accessor): shape = self.get_shard(size, pserver_num, pserver_id) dims.append(shape) - #for initializers + # for initializers if formal_name == "Param" or formal_name == "LearningRate": - param = main_program.global_block().vars[oop.input( - formal_name)[0]] - #TODO: for dense learning_rate, can be different from sparse lr - if formal_name == "LearningRate" and param.name != "learning_rate_" + str( - idx): + param = main_program.global_block().vars[ + oop.input(formal_name)[0] + ] + # TODO: for dense learning_rate, can be different from sparse lr + if ( + formal_name == "LearningRate" + and param.name != "learning_rate_" + str(idx) + ): warnings.warn("will support decay soon") param = main_program.global_block().vars[ - "learning_rate_" + str(idx)] + "learning_rate_" + str(idx) + ] initializer = self.get_initializer_attr( - param.name, startup_program) + param.name, startup_program + ) elif formal_name == "MomentDecayRate": initializer = "fill_constant&0.99" elif formal_name == "AdaDecayRate": @@ -448,7 +511,7 @@ class CommonAccessor(Accessor): initializer = "fill_constant&0" initializers.append(initializer) elif self.accessor_class == "summary": - #for dims + # for dims if shape is None: if is_sparse: shape = single_dim @@ -456,13 +519,15 @@ class CommonAccessor(Accessor): shape = self.get_shard(size, pserver_num, pserver_id) dims.append(shape) - #for initializers + # for initializers if formal_name == "Param": - param = main_program.global_block().vars[oop.input( - formal_name)[0]] + param = main_program.global_block().vars[ + oop.input(formal_name)[0] + ] initializer = self.get_initializer_attr( - param.name, startup_program) + param.name, startup_program + ) elif formal_name == "SummaryDecayRate": initializer = "fill_constant&0.999999" else: @@ -474,32 +539,39 @@ class CommonAccessor(Accessor): initializer = "fill_constant&0" initializers.append(initializer) else: - param = main_program.global_block().vars[oop.input( - formal_name)[0]] - if formal_name == "LearningRate" and param.name != "learning_rate_" + str( - idx): + param = main_program.global_block().vars[ + oop.input(formal_name)[0] + ] + if ( + formal_name == "LearningRate" + and param.name != "learning_rate_" + str(idx) + ): warnings.warn("will support decay soon") param = main_program.global_block().vars[ - "learning_rate_" + str(idx)] + "learning_rate_" + str(idx) + ] if shape is None: if is_sparse: shape = single_dim else: - shape = self.get_shard(size, pserver_num, - pserver_id) + shape = self.get_shard( + size, pserver_num, pserver_id + ) dims.append(shape) initializer = self.get_initializer_attr( - param.name, startup_program) + param.name, startup_program + ) initializers.append(initializer) if self.accessor_class == 'summary': datanorm_ops = get_datanorm_ops(main_program) for op in datanorm_ops: if ("BatchSize" in op.input_names) and ( - op.input("BatchSize")[0] - == context['grad_name_to_param_name'][grad_name]): + op.input("BatchSize")[0] + == context['grad_name_to_param_name'][grad_name] + ): oop = op break @@ -528,23 +600,24 @@ class CommonAccessor(Accessor): class Tensor: - def __init__(self, tesnor_dcit): self.tensor_dict = tesnor_dcit def _set(self, tensor_proto): tensor_proto.main_program_id = self.tensor_dict.get( - "main_program_id", 0) + "main_program_id", 0 + ) tensor_proto.startup_program_id = self.tensor_dict.get( - "startup_program_id", 0) + "startup_program_id", 0 + ) tensor_proto.feed_var_name = self.tensor_dict.get("feed_var_name", '') tensor_proto.fetch_var_name = self.tensor_dict.get("fetch_var_name", '') tensor_proto.tensor_table_class = self.tensor_dict.get( - "tensor_table_class", '') + "tensor_table_class", '' + ) class Table: - def __init__(self): self.table_class = None self.shard_num = -1 @@ -559,7 +632,6 @@ class Table: class BarrierTable(Table): - def __init__(self, context, idx): super(BarrierTable, self).__init__() self.type = None @@ -595,7 +667,6 @@ class BarrierTable(Table): class TensorTable(Table): - def __init__(self, idx, tensor_dict, role_maker): super(TensorTable, self).__init__() self.idx = idx @@ -610,7 +681,8 @@ class TensorTable(Table): table_proto.accessor.accessor_class = "CommMergeAccessor" table_proto.common.table_name = self.tensor_dict.get( - "feed_var_name", '') + "feed_var_name", '' + ) table_proto.common.trainer_num = get_trainers(self.role_maker) tensor = Tensor(self.tensor_dict) @@ -618,7 +690,6 @@ class TensorTable(Table): class SparseTable(Table): - def __init__(self, context, send_ctx): super(SparseTable, self).__init__() self.context = context @@ -629,31 +700,39 @@ class SparseTable(Table): def _set(self, table_proto): ctx = self.ctx - if ctx.is_tensor_table() or len( - ctx.origin_varnames()) < 1 or (ctx.is_sparse() == False): + if ( + ctx.is_tensor_table() + or len(ctx.origin_varnames()) < 1 + or (ctx.is_sparse() == False) + ): return table_proto.table_id = ctx.table_id() table_proto.table_class = self.table_class table_proto.type = the_one_ps_pb2.PS_SPARSE_TABLE table_proto.shard_num = self.shard_num if table_proto.sparse_table_cache_file_num > len( - get_ps_endpoints(self.context['role_maker'])): + get_ps_endpoints(self.context['role_maker']) + ): table_proto.sparse_table_cache_file_num = len( - get_ps_endpoints(self.context['role_maker'])) + get_ps_endpoints(self.context['role_maker']) + ) self.common.table_name = self.context['grad_name_to_param_name'][ - ctx.origin_varnames()[0]] + ctx.origin_varnames()[0] + ] self.common.parse_by_optimizer(ctx, self.context) - self.common.parse_entry(self.common.table_name, ctx.program_id(), - self.context) + self.common.parse_entry( + self.common.table_name, ctx.program_id(), self.context + ) self.common.sync = True if self.context['is_sync'] else False self.common._set(table_proto.common) print('new table_name: {}'.format(self.common.table_name)) all_table_proto = self.context[ - "user_defined_strategy"].sparse_table_configs + "user_defined_strategy" + ].sparse_table_configs usr_table_proto = all_table_proto.add() for proto in all_table_proto: if proto.table_name == self.common.table_name: @@ -679,11 +758,17 @@ class SparseTable(Table): ) if usr_table_proto.HasField("enable_sparse_table_cache"): - table_proto.enable_sparse_table_cache = usr_table_proto.enable_sparse_table_cache + table_proto.enable_sparse_table_cache = ( + usr_table_proto.enable_sparse_table_cache + ) if usr_table_proto.HasField("sparse_table_cache_rate"): - table_proto.sparse_table_cache_rate = usr_table_proto.sparse_table_cache_rate + table_proto.sparse_table_cache_rate = ( + usr_table_proto.sparse_table_cache_rate + ) if usr_table_proto.HasField("sparse_table_cache_file_num"): - table_proto.sparse_table_cache_file_num = usr_table_proto.sparse_table_cache_file_num + table_proto.sparse_table_cache_file_num = ( + usr_table_proto.sparse_table_cache_file_num + ) if usr_table_proto.HasField("enable_revert"): table_proto.enable_revert = usr_table_proto.enable_revert if usr_table_proto.HasField("shard_merge_rate"): @@ -691,19 +776,29 @@ class SparseTable(Table): if usr_table_proto.accessor.ByteSize() == 0: warnings.warn( - "The accessor of sparse table is not set, use default value.") + "The accessor of sparse table is not set, use default value." + ) table_proto.accessor.ParseFromString( - usr_table_proto.accessor.SerializeToString()) - self.accessor._set(table_proto.accessor, self.common.table_name, - ctx.program_id(), self.context, self.common) + usr_table_proto.accessor.SerializeToString() + ) + self.accessor._set( + table_proto.accessor, + self.common.table_name, + ctx.program_id(), + self.context, + self.common, + ) - check_embedding_dim(table_proto.accessor, self.common.table_name, - ctx.program_id(), self.context) + check_embedding_dim( + table_proto.accessor, + self.common.table_name, + ctx.program_id(), + self.context, + ) class GeoSparseTable(SparseTable): - def __init__(self, context, send_ctx): super(GeoSparseTable, self).__init__(context, send_ctx) self.table_class = "MemorySparseGeoTable" @@ -712,8 +807,11 @@ class GeoSparseTable(SparseTable): def _set(self, table_proto): ctx = self.ctx - if ctx.is_tensor_table() or len( - ctx.origin_varnames()) < 1 or (ctx.is_sparse() == False): + if ( + ctx.is_tensor_table() + or len(ctx.origin_varnames()) < 1 + or (ctx.is_sparse() == False) + ): return table_proto.table_id = ctx.table_id() table_proto.table_class = self.table_class @@ -725,16 +823,17 @@ class GeoSparseTable(SparseTable): table_proto.accessor.embedx_dim = ctx.sections()[1] self.common.table_name = self.context['grad_name_to_param_name'][ - ctx.origin_varnames()[0]] + ctx.origin_varnames()[0] + ] self.common.parse_by_optimizer(ctx, self.context) - self.common.parse_entry(self.common.table_name, ctx.program_id(), - self.context) + self.common.parse_entry( + self.common.table_name, ctx.program_id(), self.context + ) self.common.sync = False self.common._set(table_proto.common) class DenseTable(Table): - def __init__(self, context, send_ctx): super(DenseTable, self).__init__() self.context = context @@ -743,8 +842,11 @@ class DenseTable(Table): def _set(self, table_proto): ctx = self.ctx - if ctx.is_tensor_table() or len( - ctx.origin_varnames()) < 1 or (ctx.is_sparse() == True): + if ( + ctx.is_tensor_table() + or len(ctx.origin_varnames()) < 1 + or (ctx.is_sparse() == True) + ): return table_proto.table_id = ctx.table_id() @@ -759,15 +861,15 @@ class DenseTable(Table): self.common.table_name = "MergedDense" self.common.parse_by_optimizer(ctx, self.context) - self.common.parse_entry(self.common.table_name, ctx.program_id(), - self.context) + self.common.parse_entry( + self.common.table_name, ctx.program_id(), self.context + ) self.common.sync = True if self.context['is_sync'] else False self.common._set(table_proto.common) class Server: - def __init__(self): pass @@ -776,7 +878,6 @@ class Server: class DownpourServer(Server): - def __init__(self): super(DownpourServer, self).__init__() @@ -785,7 +886,6 @@ class DownpourServer(Server): class Worker: - def __init__(self): pass @@ -794,7 +894,6 @@ class Worker: class DownpourWorker(Worker): - def __init__(self): super(DownpourWorker, self).__init__() @@ -803,7 +902,6 @@ class DownpourWorker(Worker): class fsClient: - def __init__(self, fs_client_param): self.fs_client_param = fs_client_param @@ -817,7 +915,6 @@ class fsClient: class PsDescBuilder(object): - def __init__(self, context): self.context = context self.is_sync = context['is_sync'] @@ -827,7 +924,8 @@ class PsDescBuilder(object): self.barrier_table_id = None self.send_ctx = get_the_one_send_context( - self.context, split_dense_table=self.is_heter_ps_mode) + self.context, split_dense_table=self.is_heter_ps_mode + ) self.tensor_table_dict = {} # TODO self._server_sub_program = [] @@ -846,8 +944,11 @@ class PsDescBuilder(object): self._server_sub_program.append(Program().desc) tables = [] for table_name in self.tensor_table_dict: - tables.append(globals()['TensorTable'](len(tables), tensor_dict, - self.context['role_maker'])) + tables.append( + globals()['TensorTable']( + len(tables), tensor_dict, self.context['role_maker'] + ) + ) program_idx += 1 return tables @@ -857,14 +958,17 @@ class PsDescBuilder(object): print("idx, name, ctx:", idx, name, ctx) if ctx.is_sparse(): if self.ps_mode == DistributedMode.GEO: - if (self.context['local_sparse'] - and name[:-5] in self.context['local_sparse']) or ( - not self.context['local_sparse']): - tables.append(globals()['GeoSparseTable'](self.context, - ctx)) + if ( + self.context['local_sparse'] + and name[:-5] in self.context['local_sparse'] + ) or (not self.context['local_sparse']): + tables.append( + globals()['GeoSparseTable'](self.context, ctx) + ) else: - tables.append(globals()['SparseTable'](self.context, - ctx)) + tables.append( + globals()['SparseTable'](self.context, ctx) + ) else: tables.append(globals()['SparseTable'](self.context, ctx)) else: @@ -888,37 +992,45 @@ class PsDescBuilder(object): def build_worker_desc(self): for table in self.tables: - table_proto = self.ps_desc.worker_param.downpour_worker_param.downpour_table_param.add( + table_proto = ( + self.ps_desc.worker_param.downpour_worker_param.downpour_table_param.add() ) table._set(table_proto) - table_proto = self.ps_desc.server_param.downpour_server_param.downpour_table_param.add( + table_proto = ( + self.ps_desc.server_param.downpour_server_param.downpour_table_param.add() ) table._set(table_proto) if type(table) == BarrierTable and self.barrier_table_id is None: self.barrier_table_id = table.idx self.service._set( - self.ps_desc.server_param.downpour_server_param.service_param) + self.ps_desc.server_param.downpour_server_param.service_param + ) self.fs_client._set(self.ps_desc.fs_client_param) return text_format.MessageToString(self.ps_desc) def build_server_desc(self): self.sparse_table_maps = {} for table in self.tables: - table_proto = self.ps_desc.server_param.downpour_server_param.downpour_table_param.add( + table_proto = ( + self.ps_desc.server_param.downpour_server_param.downpour_table_param.add() ) table._set(table_proto) - if table_proto.type == the_one_ps_pb2.PS_SPARSE_TABLE and table_proto.common is not None: + if ( + table_proto.type == the_one_ps_pb2.PS_SPARSE_TABLE + and table_proto.common is not None + ): self.sparse_table_maps[ - table_proto.common.table_name] = table_proto.table_id + table_proto.common.table_name + ] = table_proto.table_id self.service._set( - self.ps_desc.server_param.downpour_server_param.service_param) + self.ps_desc.server_param.downpour_server_param.service_param + ) self.fs_client._set(self.ps_desc.fs_client_param) return text_format.MessageToString(self.ps_desc) class TheOnePSRuntime(RuntimeBase): - def __init__(self): super(TheOnePSRuntime, self).__init__() self._communicator = None @@ -936,30 +1048,41 @@ class TheOnePSRuntime(RuntimeBase): self.debug = bool(int(os.getenv("PSERVER_DEBUG", "0"))) self.origin_main_program = context["origin_main_program"] - self.origin_main_programs = context.get("origin_main_programs", - [self.origin_main_program]) + self.origin_main_programs = context.get( + "origin_main_programs", [self.origin_main_program] + ) self.context["origin_main_programs"] = self.origin_main_programs self.context["origin_startup_programs"] = context.get( - 'origin_startup_programs', [context['origin_startup_program']]) + 'origin_startup_programs', [context['origin_startup_program']] + ) self.context[ - 'is_heter_ps_mode'] = self.role_maker._is_heter_parameter_server_mode + 'is_heter_ps_mode' + ] = self.role_maker._is_heter_parameter_server_mode self.is_heter_ps_mode = self.context['is_heter_ps_mode'] self.context['trainer'] = TrainerRuntimeConfig( - context['valid_strategy']) + context['valid_strategy'] + ) self.context['ps_mode'] = self.context['trainer'].mode self.context['use_ps_gpu'] = context['valid_strategy'].a_sync_configs[ - 'use_ps_gpu'] - self.context['is_sync'] = True if self.context[ - 'ps_mode'] == DistributedMode.SYNC else False + 'use_ps_gpu' + ] + self.context['is_sync'] = ( + True if self.context['ps_mode'] == DistributedMode.SYNC else False + ) self.context['grad_name_to_param_name'] = {} self.context['tensor_table'] = {} # FL self.context['local_sparse'] = context[ - "user_defined_strategy"].trainer_desc_configs["local_sparse"] + "user_defined_strategy" + ].trainer_desc_configs["local_sparse"] self.context['remote_sparse'] = context[ - "user_defined_strategy"].trainer_desc_configs["remote_sparse"] - print("fl-ps > local_sparse: {}, remote_sparse: {}".format( - self.context['local_sparse'], self.context['remote_sparse'])) + "user_defined_strategy" + ].trainer_desc_configs["remote_sparse"] + print( + "fl-ps > local_sparse: {}, remote_sparse: {}".format( + self.context['local_sparse'], self.context['remote_sparse'] + ) + ) build_var_distributed(self.context) @@ -993,7 +1116,7 @@ class TheOnePSRuntime(RuntimeBase): scope = scopes[idx] table_id = ctx.table_id() var_names = recv_map[table_id] - #print("init params:", idx, table_id, var_names) + # print("init params:", idx, table_id, var_names) self._worker.push_dense_params(scope, table_id, var_names) all_var_names.extend(var_names) return all_var_names @@ -1007,7 +1130,7 @@ class TheOnePSRuntime(RuntimeBase): scope = scopes[idx] table_id = ctx.table_id() var_names = recv_map[table_id] - #print("pull all dense:", idx, table_id, var_names) + # print("pull all dense:", idx, table_id, var_names) self._worker.pull_dense_params(scope, table_id, var_names) all_var_names.extend(var_names) return all_var_names @@ -1056,16 +1179,19 @@ class TheOnePSRuntime(RuntimeBase): def sync_strategy_envs(): kwargs = {} kwargs[ - "pserver_endpoints"] = self.role_maker._get_pserver_endpoints() + "pserver_endpoints" + ] = self.role_maker._get_pserver_endpoints() kwargs["trainer_id"] = self.role_maker._worker_index() return kwargs dense_map = get_the_one_recv_context( - self.context, split_dense_table=self.is_heter_ps_mode) + self.context, split_dense_table=self.is_heter_ps_mode + ) send_ctx = get_the_one_send_context( self.context, split_dense_table=self.is_heter_ps_mode, - ep_list=self.endpoints) + ep_list=self.endpoints, + ) self._send_ctx = send_ctx trainer_config = self.context['trainer'] @@ -1097,24 +1223,34 @@ class TheOnePSRuntime(RuntimeBase): print("fl-ps > with_coordinator? {}".format(self.with_coordinator)) print("fl-ps > coordinator addr: {}".format(self.coordinator_hosts)) if self.with_coordinator: - self._worker.init_fl_worker(self.coordinator_hosts, self.role_id, - self.trainer_endpoint) + self._worker.init_fl_worker( + self.coordinator_hosts, self.role_id, self.trainer_endpoint + ) - if self.context[ - 'ps_mode'] == DistributedMode.GEO or self.is_heter_ps_mode: + if ( + self.context['ps_mode'] == DistributedMode.GEO + or self.is_heter_ps_mode + ): self._communicator = Communicator( - trainer_config.mode, kwargs, - trainer_config.get_communicator_flags()) - self._communicator.init_with_ctx(send_ctx, dense_map, worker_desc, - self.string_hosts, - fluid.global_scope()) + trainer_config.mode, + kwargs, + trainer_config.get_communicator_flags(), + ) + self._communicator.init_with_ctx( + send_ctx, + dense_map, + worker_desc, + self.string_hosts, + fluid.global_scope(), + ) fleet.util.barrier() # info = self._communicator.get_client_info() info = self._worker.get_client_info() if isinstance(info, list) and len(info) > 0: all_info = self.role_maker._all_gather( - info[0]) # 收集其他 client 的 service 地址 + info[0] + ) # 收集其他 client 的 service 地址 # for unittest if not isinstance(all_info, list): warnings.warn("gloo may not initialize correctly") @@ -1143,8 +1279,10 @@ class TheOnePSRuntime(RuntimeBase): self.scopes = scopes if not is_test: - if self.context[ - 'ps_mode'] == DistributedMode.GEO or self.is_heter_ps_mode == True: + if ( + self.context['ps_mode'] == DistributedMode.GEO + or self.is_heter_ps_mode == True + ): self._communicator.init_params(dense_map) else: if not self.context['use_ps_gpu']: @@ -1158,8 +1296,10 @@ class TheOnePSRuntime(RuntimeBase): self._pull_all_dense(scopes, send_ctx, dense_map) fleet.util.barrier() - if self.context[ - 'ps_mode'] == DistributedMode.GEO or self.is_heter_ps_mode == True: + if ( + self.context['ps_mode'] == DistributedMode.GEO + or self.is_heter_ps_mode == True + ): if not self._communicator.is_running(): self._communicator.start() else: @@ -1169,8 +1309,10 @@ class TheOnePSRuntime(RuntimeBase): launch_barrier_flag = int(os.getenv("FLAGS_LAUNCH_BARRIER", "1")) if launch_barrier and launch_barrier_flag: wait_server_ready(self.role_maker._get_pserver_endpoints()) - if self.is_heter_ps_mode and self.role_maker._get_next_trainers( - ) != []: + if ( + self.is_heter_ps_mode + and self.role_maker._get_next_trainers() != [] + ): wait_server_ready(self.role_maker._get_next_trainers()) if self.is_heter_ps_mode: previous_trainers = [] @@ -1180,8 +1322,8 @@ class TheOnePSRuntime(RuntimeBase): if self.role_maker._get_next_trainers() != []: next_trainers = self.role_maker._get_next_trainers() self._heter_client = HeterClient( - next_trainers, previous_trainers, - self.role_maker._role_id()) # --> HeterClient::GetInstance + next_trainers, previous_trainers, self.role_maker._role_id() + ) # --> HeterClient::GetInstance def _init_coordinator(self, scopes=None): if self._coordinator == None: @@ -1189,12 +1331,13 @@ class TheOnePSRuntime(RuntimeBase): print(">>> curr node ip: {}".format(self.coordinator_hosts[0])) print(">>> all trainer endpoints: {}".format(self.trainer_endpoints)) - self._coordinator.start_coordinator(self.coordinator_hosts[0], - self.trainer_endpoints) + self._coordinator.start_coordinator( + self.coordinator_hosts[0], self.trainer_endpoints + ) def _make_fl_strategy(self): if self._coordinator == None: - assert ("Coordinator py object is null!") + assert "Coordinator py object is null!" else: self._coordinator.make_fl_strategy() @@ -1208,12 +1351,18 @@ class TheOnePSRuntime(RuntimeBase): print("server_desc: \n{}".format(server_desc)) self._server = fluid.core.DistFleetWrapper() - self._server.init_server(server_desc, self.string_hosts, self.role_id, - trainers, self._server_sub_program) + self._server.init_server( + server_desc, + self.string_hosts, + self.role_id, + trainers, + self._server_sub_program, + ) dist_varnames = get_sparse_tablenames(self.origin_main_programs, True) - sparse_varnames = get_sparse_tablenames(self.origin_main_programs, - False) + sparse_varnames = get_sparse_tablenames( + self.origin_main_programs, False + ) distributed_varnames = dist_varnames + sparse_varnames @@ -1223,8 +1372,10 @@ class TheOnePSRuntime(RuntimeBase): for var_name in var_names: if var_name not in distributed_varnames: raise ValueError( - "fleet.init server can only load sparse variables in {}" - .format(distributed_varnames)) + "fleet.init server can only load sparse variables in {}".format( + distributed_varnames + ) + ) load_varnames = var_names if dirname is None or not load_varnames: @@ -1249,17 +1400,19 @@ class TheOnePSRuntime(RuntimeBase): self._communicator.stop() self._worker.stop_worker() if self.is_heter_ps_mode: - assert self._heter_client != None, "heter client should not be None in heterps mode" + assert ( + self._heter_client != None + ), "heter client should not be None in heterps mode" self._heter_client.stop() @staticmethod def __exclude_vars(exclude_var_names=[]): - def is_valid(var): if var.name in exclude_var_names: return False from .utils.public import _get_varname_parts + origin_varname, _, _ = _get_varname_parts(var.name) if origin_varname.endswith("@GRAD"): return False @@ -1267,9 +1420,11 @@ class TheOnePSRuntime(RuntimeBase): if origin_varname.startswith("learning_rate_"): return False - if var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH or \ - var.desc.type() == core.VarDesc.VarType.FETCH_LIST or \ - var.desc.type() == core.VarDesc.VarType.READER: + if ( + var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH + or var.desc.type() == core.VarDesc.VarType.FETCH_LIST + or var.desc.type() == core.VarDesc.VarType.READER + ): return False return var.persistable @@ -1282,35 +1437,35 @@ class TheOnePSRuntime(RuntimeBase): model_path = os.path.join(dirname, "dnn_plugin") return model_path - def _ps_save_dense_params(self, - executor, - dirname, - scope, - program, - var_names=None): + def _ps_save_dense_params( + self, executor, dirname, scope, program, var_names=None + ): dense_map = get_the_one_recv_context( - self.context, split_dense_table=self.is_heter_ps_mode) + self.context, split_dense_table=self.is_heter_ps_mode + ) send_ctx = get_the_one_send_context( self.context, split_dense_table=self.is_heter_ps_mode, - ep_list=self.endpoints) + ep_list=self.endpoints, + ) if program is None or len(self.origin_main_programs) == 1: program = self.origin_main_programs[0] dense_var_names = self._pull_dense(program, scope, send_ctx, dense_map) save_var_names = dense_var_names if var_names is None else var_names vars = [program.global_block().var(i) for i in save_var_names] import paddle + with paddle.static.scope_guard(scope): - paddle.static.save_vars(executor, - "./", - program, - vars=vars, - filename=dirname) - - def _save_sparse_params(self, executor, dirname, context, main_program, - mode): - distributed_varnames = get_sparse_tablenames(self.origin_main_programs, - True) + paddle.static.save_vars( + executor, "./", program, vars=vars, filename=dirname + ) + + def _save_sparse_params( + self, executor, dirname, context, main_program, mode + ): + distributed_varnames = get_sparse_tablenames( + self.origin_main_programs, True + ) values = [] model_path = self._get_inference_model_path(dirname) for id, names in context.items(): @@ -1326,12 +1481,9 @@ class TheOnePSRuntime(RuntimeBase): # self._worker.save_all_model(dirname, mode) return values - def _save_distributed_persistables(self, - executor, - dirname, - main_program=None, - mode=0, - **kwargs): + def _save_distributed_persistables( + self, executor, dirname, main_program=None, mode=0, **kwargs + ): """ This function filters out all variables with `persistable==True` from the give `main_program` and then saves these variables to the folder `dirname` @@ -1350,7 +1502,8 @@ class TheOnePSRuntime(RuntimeBase): if not isinstance(executor, Executor): raise TypeError( - "in fleet.save() function, executor must be as Executor type") + "in fleet.save() function, executor must be as Executor type" + ) if main_program is None: main_program = self.context['origin_main_program'] @@ -1362,14 +1515,16 @@ class TheOnePSRuntime(RuntimeBase): self._worker.save_all_model(dirname, mode) - def _ps_inference_save_inference_model(self, - executor, - dirname, - feeded_var_names, - target_vars, - main_program=None, - export_for_deployment=True, - mode=0): + def _ps_inference_save_inference_model( + self, + executor, + dirname, + feeded_var_names, + target_vars, + main_program=None, + export_for_deployment=True, + mode=0, + ): """ Prune the given `main_program` to build a new program especially for inference, and then save it and all related parameters to given `dirname` by the `executor`. @@ -1382,11 +1537,16 @@ class TheOnePSRuntime(RuntimeBase): if not isinstance(executor, Executor): raise TypeError( - "in fleet.save() function, executor must be as Executor type") + "in fleet.save() function, executor must be as Executor type" + ) import paddle - program = self.origin_main_programs[ - 0] if main_program is None else main_program + + program = ( + self.origin_main_programs[0] + if main_program is None + else main_program + ) _, _, idx = get_program_by_id(self.context, id(program)) scope = self.scopes[idx] print("save inference model scope idx:", idx) @@ -1400,8 +1560,9 @@ class TheOnePSRuntime(RuntimeBase): program.global_block().var(name) for name in feeded_var_names ] - infer_program = paddle.static.normalize_program(program, feed_vars, - target_vars) + infer_program = paddle.static.normalize_program( + program, feed_vars, target_vars + ) infer_program._copy_dist_param_info_from(program) @@ -1413,30 +1574,40 @@ class TheOnePSRuntime(RuntimeBase): sparses = get_the_one_recv_context( self.context, is_dense=False, - split_dense_table=self.is_heter_ps_mode) - sparse_names = self._save_sparse_params(executor, dirname, sparses, - main_program, mode) + split_dense_table=self.is_heter_ps_mode, + ) + sparse_names = self._save_sparse_params( + executor, dirname, sparses, main_program, mode + ) dense_map = get_the_one_recv_context( - self.context, split_dense_table=self.is_heter_ps_mode) + self.context, split_dense_table=self.is_heter_ps_mode + ) send_ctx = get_the_one_send_context( self.context, split_dense_table=self.is_heter_ps_mode, - ep_list=self.endpoints) + ep_list=self.endpoints, + ) self._pull_dense(program, scope, send_ctx, dense_map) generate_vars = self.context[ - "user_defined_strategy"].trainer_desc_configs["stat_var_names"] + "user_defined_strategy" + ].trainer_desc_configs["stat_var_names"] generate_vars = [var for var in generate_vars] remaining_vars = list( - filter(TheOnePSRuntime.__exclude_vars(sparse_names), - infer_program.list_vars())) + filter( + TheOnePSRuntime.__exclude_vars(sparse_names), + infer_program.list_vars(), + ) + ) for var in remaining_vars: tensor = var.get_value(scope) - paddle.save(tensor, - os.path.join(model_path, var.name), - use_binary_format=True) + paddle.save( + tensor, + os.path.join(model_path, var.name), + use_binary_format=True, + ) def _save_cache_model(self, dirname, **kwargs): mode = kwargs.get("mode", 1) @@ -1447,7 +1618,7 @@ class TheOnePSRuntime(RuntimeBase): if self.role_maker._is_first_worker(): cache_threshold = self._worker.get_cache_threshold(table_id) - #check cache threshold right or not + # check cache threshold right or not fleet.util.barrier() if self.role_maker._is_first_worker(): @@ -1469,8 +1640,9 @@ class TheOnePSRuntime(RuntimeBase): fleet.util.barrier() def _load_sparse_params(self, dirname, context, main_program, mode): - distributed_varnames = get_sparse_tablenames(self.origin_main_programs, - True) + distributed_varnames = get_sparse_tablenames( + self.origin_main_programs, True + ) values = [] for id, names in context.items(): if names[0] not in distributed_varnames: @@ -1481,12 +1653,14 @@ class TheOnePSRuntime(RuntimeBase): values.extend(names) return values - def _ps_inference_load_inference_model(self, - dirname, - mode=0, - main_program=None): - main_program = self.origin_main_programs[ - 0] if main_program is None else main_program + def _ps_inference_load_inference_model( + self, dirname, mode=0, main_program=None + ): + main_program = ( + self.origin_main_programs[0] + if main_program is None + else main_program + ) _, _, idx = get_program_by_id(self.context, id(main_program)) scope = self.scopes[idx] print("load inference model scope idx:", idx) @@ -1499,17 +1673,21 @@ class TheOnePSRuntime(RuntimeBase): sparses = get_the_one_recv_context( self.context, is_dense=False, - split_dense_table=self.is_heter_ps_mode) + split_dense_table=self.is_heter_ps_mode, + ) - sparse_varnames = self._load_sparse_params(dirname, sparses, - main_program, mode) + sparse_varnames = self._load_sparse_params( + dirname, sparses, main_program, mode + ) dense_map = get_the_one_recv_context( - self.context, split_dense_table=self.is_heter_ps_mode) + self.context, split_dense_table=self.is_heter_ps_mode + ) send_ctx = get_the_one_send_context( self.context, split_dense_table=self.is_heter_ps_mode, - ep_list=self.endpoints) + ep_list=self.endpoints, + ) recv_dense_varnames = [] for _, names in dense_map.items(): @@ -1518,11 +1696,15 @@ class TheOnePSRuntime(RuntimeBase): loaded_varnames = sparse_varnames remaining_vars = list( - filter(TheOnePSRuntime.__exclude_vars(loaded_varnames), - main_program.list_vars())) + filter( + TheOnePSRuntime.__exclude_vars(loaded_varnames), + main_program.list_vars(), + ) + ) model_path = self._get_inference_model_path(dirname) import paddle + for var in remaining_vars: if var.name not in recv_dense_varnames: continue @@ -1586,8 +1768,8 @@ class TheOnePSRuntime(RuntimeBase): sparses = get_the_one_recv_context( self.context, is_dense=False, - split_dense_table=self.role_maker. - _is_heter_parameter_server_mode) + split_dense_table=self.role_maker._is_heter_parameter_server_mode, + ) for id, names in sparses.items(): self._worker.shrink_sparse_table(id, threshold) diff --git a/python/paddle/distributed/ps/utils/ps_factory.py b/python/paddle/distributed/ps/utils/ps_factory.py index 1f522ec27f82cf56aa9716f6a64e4fbfb4dda38d..0726fe15dc4cd437810b22b64ae5cfdb6dd04540 100755 --- a/python/paddle/distributed/ps/utils/ps_factory.py +++ b/python/paddle/distributed/ps/utils/ps_factory.py @@ -16,14 +16,18 @@ from .ps_program_builder import * # noqa: F403 from .public import * # noqa: F403 __all__ = [ - 'PsProgramBuilder', 'GeoPsProgramBuilder', 'CpuSyncPsProgramBuilder', - 'CpuAsyncPsProgramBuilder', 'GpuPsProgramBuilder', - 'HeterAsyncPsProgramBuilder', 'FlPsProgramBuilder', 'NuPsProgramBuilder' + 'PsProgramBuilder', + 'GeoPsProgramBuilder', + 'CpuSyncPsProgramBuilder', + 'CpuAsyncPsProgramBuilder', + 'GpuPsProgramBuilder', + 'HeterAsyncPsProgramBuilder', + 'FlPsProgramBuilder', + 'NuPsProgramBuilder', ] class PsProgramBuilderFactory(object): - def __init__(self): pass diff --git a/python/paddle/distributed/ps/utils/public.py b/python/paddle/distributed/ps/utils/public.py index c5835c730eeb3a864c025bedf15a9e4850473b33..75182a497213cfe6409e6b795d212ac81a326877 100755 --- a/python/paddle/distributed/ps/utils/public.py +++ b/python/paddle/distributed/ps/utils/public.py @@ -22,9 +22,9 @@ import paddle.fluid as fluid from paddle.fluid import core import paddle.fluid.framework as framework -#logging.basicConfig( +# logging.basicConfig( # format='%(levelname)s - %(asctime)s - %(pathname)s: %(lineno)s - %(message)s', level=logging.INFO) -#logger = logging.getLogger(__name__) +# logger = logging.getLogger(__name__) OP_NAME_SCOPE = "op_namescope" CLIP_OP_NAME_SCOPE = "gradient_clip" @@ -47,7 +47,7 @@ SPARSE_OP_LIST = ["lookup_table", "lookup_table_v2"] SPARSE_OP_TYPE_DICT = {"lookup_table": "W", "lookup_table_v2": "W"} SPARSE_GRAD_OP_TYPE_DICT = { "lookup_table_grad": "W", - "lookup_table_v2_grad": "W" + "lookup_table_v2_grad": "W", } DEFAULT_DEVICE = 'cpu' @@ -58,13 +58,13 @@ DATA_NORM_GRAD_NAME = [x + "@GRAD" for x in DATA_NORM_NAME] def logger_config(log_path, logging_name): logger = logging.getLogger(logging_name) logger.setLevel(level=logging.WARNING) - handler = logging.FileHandler(log_path, - mode='a', - encoding='UTF-8', - delay=True) + handler = logging.FileHandler( + log_path, mode='a', encoding='UTF-8', delay=True + ) handler.setLevel(logging.INFO) formatter = logging.Formatter( - '%(levelname)s - %(asctime)s - %(pathname)s: %(lineno)s - %(message)s') + '%(levelname)s - %(asctime)s - %(pathname)s: %(lineno)s - %(message)s' + ) handler.setFormatter(formatter) console = logging.StreamHandler() console.setLevel(logging.DEBUG) @@ -74,8 +74,9 @@ def logger_config(log_path, logging_name): ps_log_root_dir = './ps_log/' -logger = logger_config(log_path='./ps_usr_print_log', - logging_name='ps_usr_print_log') +logger = logger_config( + log_path='./ps_usr_print_log', logging_name='ps_usr_print_log' +) class DistributedMode: @@ -88,7 +89,6 @@ class DistributedMode: class TrainerRuntimeConfig(object): - def __init__(self, valid_strategy): self.mode = None num_threads = os.getenv("CPU_NUM", "1") @@ -107,21 +107,28 @@ class TrainerRuntimeConfig(object): self.runtime_configs = {} self.runtime_configs['communicator_max_merge_var_num'] = os.getenv( - "FLAGS_communicator_max_merge_var_num", send_queue_size) + "FLAGS_communicator_max_merge_var_num", send_queue_size + ) self.runtime_configs['communicator_send_queue_size'] = os.getenv( - "FLAGS_communicator_send_queue_size", send_queue_size) + "FLAGS_communicator_send_queue_size", send_queue_size + ) self.runtime_configs[ - 'communicator_independent_recv_thread'] = os.getenv( - "FLAGS_communicator_independent_recv_thread", "1") + 'communicator_independent_recv_thread' + ] = os.getenv("FLAGS_communicator_independent_recv_thread", "1") self.runtime_configs[ - 'communicator_min_send_grad_num_before_recv'] = os.getenv( - "FLAGS_communicator_min_send_grad_num_before_recv", num_threads) + 'communicator_min_send_grad_num_before_recv' + ] = os.getenv( + "FLAGS_communicator_min_send_grad_num_before_recv", num_threads + ) self.runtime_configs['communicator_thread_pool_size'] = os.getenv( - "FLAGS_communicator_thread_pool_size", "5") + "FLAGS_communicator_thread_pool_size", "5" + ) self.runtime_configs['communicator_send_wait_times'] = os.getenv( - "FLAGS_communicator_send_wait_times", "5") + "FLAGS_communicator_send_wait_times", "5" + ) self.runtime_configs['communicator_is_sgd_optimizer'] = os.getenv( - "FLAGS_communicator_is_sgd_optimizer", "1") + "FLAGS_communicator_is_sgd_optimizer", "1" + ) def get_communicator_flags(self): need_keys = [] @@ -130,46 +137,62 @@ class TrainerRuntimeConfig(object): if self.mode is None or self.mode == DistributedMode.ASYNC: need_keys = self.runtime_configs.keys() mode_str = "async" - elif self.mode == DistributedMode.SYNC or self.mode == DistributedMode.HALF_ASYNC: + elif ( + self.mode == DistributedMode.SYNC + or self.mode == DistributedMode.HALF_ASYNC + ): mode_str = "sync or half_async" need_keys = [ 'communicator_max_merge_var_num', - 'communicator_send_wait_times', 'communicator_thread_pool_size', - 'communicator_send_queue_size' + 'communicator_send_wait_times', + 'communicator_thread_pool_size', + 'communicator_send_queue_size', ] elif self.mode == DistributedMode.GEO: mode_str = "GEO" need_keys = [ - 'communicator_thread_pool_size', 'communicator_send_wait_times', - 'communicator_max_merge_var_num', 'communicator_send_queue_size' + 'communicator_thread_pool_size', + 'communicator_send_wait_times', + 'communicator_max_merge_var_num', + 'communicator_send_queue_size', ] else: raise ValueError("Unsupported Mode") - if self.mode == DistributedMode.SYNC or self.mode == DistributedMode.HALF_ASYNC: + if ( + self.mode == DistributedMode.SYNC + or self.mode == DistributedMode.HALF_ASYNC + ): max_merge_var_num = self.runtime_configs[ - 'communicator_max_merge_var_num'] + 'communicator_max_merge_var_num' + ] send_queue_size = self.runtime_configs[ - 'communicator_send_queue_size'] + 'communicator_send_queue_size' + ] if max_merge_var_num != num_threads: print( 'WARNING: In {} mode, communicator_max_merge_var_num ' 'must be equal to CPU_NUM. But received, ' 'communicator_max_merge_var_num = {}, CPU_NUM = ' - '{}. communicator_max_merge_var_num will be forced to {}.'. - format(mode_str, max_merge_var_num, num_threads, - num_threads)) + '{}. communicator_max_merge_var_num will be forced to {}.'.format( + mode_str, max_merge_var_num, num_threads, num_threads + ) + ) self.runtime_configs[ - 'communicator_max_merge_var_num'] = num_threads + 'communicator_max_merge_var_num' + ] = num_threads if send_queue_size != num_threads: - print('WARNING: In {} mode, communicator_send_queue_size ' - 'must be equal to CPU_NUM. But received, ' - 'communicator_send_queue_size = {}, CPU_NUM = ' - '{}. communicator_send_queue_size will be forced to {}.'. - format(mode_str, send_queue_size, num_threads, - num_threads)) + print( + 'WARNING: In {} mode, communicator_send_queue_size ' + 'must be equal to CPU_NUM. But received, ' + 'communicator_send_queue_size = {}, CPU_NUM = ' + '{}. communicator_send_queue_size will be forced to {}.'.format( + mode_str, send_queue_size, num_threads, num_threads + ) + ) self.runtime_configs[ - 'communicator_send_queue_size'] = num_threads + 'communicator_send_queue_size' + ] = num_threads return dict((key, str(self.runtime_configs[key])) for key in need_keys) @@ -178,9 +201,9 @@ def get_lr_ops(program): lr_ops = [] for index, op in enumerate(program.global_block().ops): role_id = int(op.attr(RPC_OP_ROLE_ATTR_NAME)) - if role_id == int(LR_SCHED_OP_ROLE_ATTR_VALUE) or \ - role_id == int(LR_SCHED_OP_ROLE_ATTR_VALUE) | \ - int(OPT_OP_ROLE_ATTR_VALUE): + if role_id == int(LR_SCHED_OP_ROLE_ATTR_VALUE) or role_id == int( + LR_SCHED_OP_ROLE_ATTR_VALUE + ) | int(OPT_OP_ROLE_ATTR_VALUE): lr_ops.append(op) return lr_ops @@ -190,16 +213,20 @@ def get_optimize_ops(_program, remote_sparse=[]): opt_ops = [] for op in block.ops: if _is_opt_role_op(op): - if len(remote_sparse) > 0 and op.input( - "Param" - )[0] not in remote_sparse: # for fl: only delete remote sparse optimize + if ( + len(remote_sparse) > 0 + and op.input("Param")[0] not in remote_sparse + ): # for fl: only delete remote sparse optimize continue # delete clip op from opt_ops when run in Parameter Server mode - if OP_NAME_SCOPE in op.all_attrs() \ - and CLIP_OP_NAME_SCOPE in op.attr(OP_NAME_SCOPE): + if ( + OP_NAME_SCOPE in op.all_attrs() + and CLIP_OP_NAME_SCOPE in op.attr(OP_NAME_SCOPE) + ): op._set_attr( "op_role", - int(core.op_proto_and_checker_maker.OpRole.Backward)) + int(core.op_proto_and_checker_maker.OpRole.Backward), + ) continue opt_ops.append(op) return opt_ops @@ -228,7 +255,7 @@ def get_dist_env(): 'trainer_id': trainer_id, 'num_trainers': num_trainers, 'current_endpoint': current_endpoint, - 'trainer_endpoints': trainer_endpoints + 'trainer_endpoints': trainer_endpoints, } @@ -276,8 +303,10 @@ def is_distributed_sparse_op(op): if op.type in SPARSE_OP_LIST and op.attr('is_distributed') is True: return True - if op.type == "distributed_lookup_table" and op.attr( - 'is_distributed') is True: + if ( + op.type == "distributed_lookup_table" + and op.attr('is_distributed') is True + ): return True return False @@ -288,12 +317,17 @@ def get_sparse_tablename(op): def is_sparse_op(op): - if op.type in SPARSE_OP_LIST and op.attr('is_sparse') is True and op.attr( - 'is_distributed') is False: + if ( + op.type in SPARSE_OP_LIST + and op.attr('is_sparse') is True + and op.attr('is_distributed') is False + ): return True - if op.type == "distributed_lookup_table" and op.attr( - 'is_distributed') is False: + if ( + op.type == "distributed_lookup_table" + and op.attr('is_distributed') is False + ): return True return False @@ -320,12 +354,14 @@ def get_trainers(role_maker): return role_maker.worker_num() -def get_dense_send_context(program, - send_ctx, - idx, - merged_dense_pairs, - trainer_id, - split_dense_table=False): +def get_dense_send_context( + program, + send_ctx, + idx, + merged_dense_pairs, + trainer_id, + split_dense_table=False, +): if len(merged_dense_pairs) < 1: return idx if not split_dense_table: @@ -356,10 +392,23 @@ def get_dense_send_context(program, # print("public get_dense_send_context dense_table:", grad_name, # var_numel, origin_varnames) from paddle.fluid.core import CommContext - dense_ctx = CommContext(grad_name, [grad_name], ["127.0.0.1:6071"], - [var_numel], origin_varnames, trainer_id, - aggregate, False, False, idx, False, False, - id(program), []) + + dense_ctx = CommContext( + grad_name, + [grad_name], + ["127.0.0.1:6071"], + [var_numel], + origin_varnames, + trainer_id, + aggregate, + False, + False, + idx, + False, + False, + id(program), + [], + ) send_ctx[grad_name] = dense_ctx idx += 1 @@ -379,10 +428,23 @@ def get_dense_send_context(program, # print("public get_dense_send_context data_norm table:", grad_name, # var_numel, origin_varnames) from paddle.fluid.core import CommContext - data_norm_ctx = CommContext(grad_name, [grad_name], ["127.0.0.1:6071"], - [var_numel], origin_varnames, trainer_id, - aggregate, False, False, idx, False, True, - id(program), []) + + data_norm_ctx = CommContext( + grad_name, + [grad_name], + ["127.0.0.1:6071"], + [var_numel], + origin_varnames, + trainer_id, + aggregate, + False, + False, + idx, + False, + True, + id(program), + [], + ) send_ctx[grad_name] = data_norm_ctx idx += 1 else: @@ -394,10 +456,23 @@ def get_dense_send_context(program, grad_name = origin_varname aggregate = True from paddle.fluid.core import CommContext - dense_ctx = CommContext(grad_name, [grad_name], ["127.0.0.1:6071"], - [var_numel], [origin_varname], trainer_id, - aggregate, False, False, idx, False, False, - id(program), []) + + dense_ctx = CommContext( + grad_name, + [grad_name], + ["127.0.0.1:6071"], + [var_numel], + [origin_varname], + trainer_id, + aggregate, + False, + False, + idx, + False, + False, + id(program), + [], + ) send_ctx[grad_name] = dense_ctx idx += 1 return idx @@ -405,8 +480,10 @@ def get_dense_send_context(program, def get_geo_trainer_send_context(attrs): if attrs['ps_mode'] != DistributedMode.GEO: - raise ValueError("ps mode: {} not matched {}", - format(ps_mode, "get_geo_trainer_send_context")) + raise ValueError( + "ps mode: {} not matched {}", + format(ps_mode, "get_geo_trainer_send_context"), + ) send_ctx = {} trainer_id = get_role_id(attrs['role_maker']) origin_programs = attrs['origin_main_programs'] @@ -422,16 +499,32 @@ def get_geo_trainer_send_context(attrs): if param_name in attrs['remote_sparse']: # for recall/ncf model continue - is_distributed = True if param_name in distibuted_varnames else False + is_distributed = ( + True if param_name in distibuted_varnames else False + ) var = program.global_block().vars[grad.merged_var.name] var_numel = reduce(lambda x, y: x * y, var.shape[1:]) from paddle.fluid.core import CommContext - print("public get_the_geo_send_context sparse: ", grad_name, - var_numel) - sparse_ctx = CommContext(grad_name, [grad_name], ["127.0.0.1:6071"], - [var_numel], [grad_name], trainer_id, True, - True, is_distributed, idx, False, False, - id(program), []) + + print( + "public get_the_geo_send_context sparse: ", grad_name, var_numel + ) + sparse_ctx = CommContext( + grad_name, + [grad_name], + ["127.0.0.1:6071"], + [var_numel], + [grad_name], + trainer_id, + True, + True, + is_distributed, + idx, + False, + False, + id(program), + [], + ) idx += 1 send_ctx[sparse_ctx.var_name()] = sparse_ctx @@ -452,8 +545,23 @@ def _step_ctx(idx, role_maker): sections = [1] * len(endpoints) names = [name] * len(endpoints) from paddle.fluid.core import CommContext - ctx = CommContext(name, names, endpoints, sections, [name], trainer_id, - True, False, False, idx, True, False, -1, []) + + ctx = CommContext( + name, + names, + endpoints, + sections, + [name], + trainer_id, + True, + False, + False, + idx, + True, + False, + -1, + [], + ) return name, ctx @@ -483,7 +591,9 @@ def get_the_one_send_context(attrs, split_dense_table=False, ep_list=None): for i in range(len(ep_list)): splited_varname.append("{}.block{}".format(param_name, i)) - is_distributed = True if param_name in distibuted_varnames else False + is_distributed = ( + True if param_name in distibuted_varnames else False + ) var = program.global_block().vars[grad.merged_var.name] @@ -493,20 +603,43 @@ def get_the_one_send_context(attrs, split_dense_table=False, ep_list=None): if grad_name in send_ctx: continue from paddle.fluid.core import CommContext - print("public get_the_one_send_context sparse: ", grad_name, - splited_varname, shape) - sparse_ctx = CommContext(grad_name, splited_varname, ep_list, shape, - [grad_name], trainer_id, True, True, - is_distributed, idx, False, False, - id(program), remote_sparse_ids) + + print( + "public get_the_one_send_context sparse: ", + grad_name, + splited_varname, + shape, + ) + sparse_ctx = CommContext( + grad_name, + splited_varname, + ep_list, + shape, + [grad_name], + trainer_id, + True, + True, + is_distributed, + idx, + False, + False, + id(program), + remote_sparse_ids, + ) idx += 1 send_ctx[sparse_ctx.var_name()] = sparse_ctx for i, program in enumerate(origin_programs): merged_dense_pairs = attrs['merged_dense_pairs'][i] - idx = get_dense_send_context(program, send_ctx, idx, merged_dense_pairs, - trainer_id, split_dense_table) + idx = get_dense_send_context( + program, + send_ctx, + idx, + merged_dense_pairs, + trainer_id, + split_dense_table, + ) if len(attrs['tensor_table']) > 0 and attrs['is_worker']: name, ctx = _step_ctx(idx, attrs['role_maker']) @@ -517,8 +650,11 @@ def get_the_one_send_context(attrs, split_dense_table=False, ep_list=None): def find_heter_ops(program, default_device="cpu"): if default_device not in DEVICE_LIST: - raise ValueError("Given device {} is not in device list {}".format( - default_device, DEVICE_LIST)) + raise ValueError( + "Given device {} is not in device list {}".format( + default_device, DEVICE_LIST + ) + ) def _is_heter_op(op, current_heter_device, default_device="cpu"): heter_devices = list(DEVICE_LIST) @@ -527,10 +663,13 @@ def find_heter_ops(program, default_device="cpu"): op_type = op.type if op_device in heter_devices: return True - elif op_type in COMMUNICATE_OPS_TYPE and current_heter_device != default_device: + elif ( + op_type in COMMUNICATE_OPS_TYPE + and current_heter_device != default_device + ): # for distributed communciate ops: send & recv & barrier etc. # Todo: need update this method - #op._set_attr('op_device', current_heter_device) + # op._set_attr('op_device', current_heter_device) return True elif op_device == None or op_device == default_device: op._set_attr('op_device', default_device) @@ -565,16 +704,20 @@ def find_heter_ops(program, default_device="cpu"): op = op_list[i] if "_grad" in op.type: forward_op_type = op.type.split("_grad")[0] - if forward_op_type in SPARSE_OP_TYPE_DICT.keys() \ - and op.attr('remote_prefetch') is True: + if ( + forward_op_type in SPARSE_OP_TYPE_DICT.keys() + and op.attr('remote_prefetch') is True + ): param_name = op.input(SPARSE_OP_TYPE_DICT[forward_op_type])[0] if param_name in var2idx: ## insert sum op & remove sum op from var2idx and origin place op_list = list(block.ops) sum_op = op_list[var2idx[param_name]] sum_op_inputs = { - sum_op.input_names[0]: - [block.vars[input] for input in sum_op.input_arg_names] + sum_op.input_names[0]: [ + block.vars[input] + for input in sum_op.input_arg_names + ] } sum_op_outputs = { sum_op.output_names[0]: [ @@ -582,11 +725,13 @@ def find_heter_ops(program, default_device="cpu"): for output in sum_op.output_arg_names ] } - block._insert_op(index=i + 1, - type=sum_op.type, - inputs=sum_op_inputs, - outputs=sum_op_outputs, - attrs=sum_op.all_attrs()) + block._insert_op( + index=i + 1, + type=sum_op.type, + inputs=sum_op_inputs, + outputs=sum_op_outputs, + attrs=sum_op.all_attrs(), + ) block._remove_op(var2idx[param_name] + 1) var2idx.pop(param_name) for var_ in var2idx: @@ -607,9 +752,9 @@ def find_heter_ops(program, default_device="cpu"): for no_grad_var in output_vars_no_grad: if no_grad_var in var2idx: """ - insert sum op & remove sum op from var2idx and origin place + insert sum op & remove sum op from var2idx and origin place - """ + """ op_list = list(block.ops) sum_op = op_list[var2idx[no_grad_var]] sum_op_inputs = { @@ -624,11 +769,13 @@ def find_heter_ops(program, default_device="cpu"): for output in sum_op.output_arg_names ] } - block._insert_op(index=i + 1, - type=sum_op.type, - inputs=sum_op_inputs, - outputs=sum_op_outputs, - attrs=sum_op.all_attrs()) + block._insert_op( + index=i + 1, + type=sum_op.type, + inputs=sum_op_inputs, + outputs=sum_op_outputs, + attrs=sum_op.all_attrs(), + ) block._remove_op(var2idx[no_grad_var] + 1) var2idx.pop(no_grad_var) for var_ in var2idx: @@ -641,12 +788,16 @@ def find_heter_ops(program, default_device="cpu"): pre_op = op_list[i - 1] if "_grad" in pre_op.type: forward_op_type = pre_op.type.split("_grad")[0] - if forward_op_type in SPARSE_OP_TYPE_DICT.keys() \ - and pre_op.attr('remote_prefetch') is True: + if ( + forward_op_type in SPARSE_OP_TYPE_DICT.keys() + and pre_op.attr('remote_prefetch') is True + ): param_name = pre_op.input( - SPARSE_OP_TYPE_DICT[forward_op_type])[0] + SPARSE_OP_TYPE_DICT[forward_op_type] + )[0] if param_name == origin_var and op.attr( - "op_device") == pre_op.attr("op_device"): + "op_device" + ) == pre_op.attr("op_device"): continue else: var2idx[origin_var] = i @@ -699,7 +850,8 @@ def find_heter_ops(program, default_device="cpu"): # for cpu-op block append if len(current_default_block_ops) > 1: default_ops[default_device][ - block_index] = current_default_block_ops + block_index + ] = current_default_block_ops program_block_ops.append(current_default_block_ops) current_default_block_ops = [] block_index += 1 @@ -755,8 +907,10 @@ def find_heter_ops(program, default_device="cpu"): for _, heter_block in heter_block_dict.items(): total_heter_ops += len(heter_block) print( - "There are {} OPs in your main_program, and contains {} heter-OPs which is made up of {} heter-blocks." - .format(len(block.ops), total_heter_ops, heter_blocks)) + "There are {} OPs in your main_program, and contains {} heter-OPs which is made up of {} heter-blocks.".format( + len(block.ops), total_heter_ops, heter_blocks + ) + ) return origin_porgram, heter_ops, default_ops, program_block_ops @@ -773,11 +927,14 @@ def union_forward_gradient_op(program_block_ops_list): """ block_length = len(program_block_ops_list) union_program_block_ops_list = [] - assert block_length % 2 != 0, "the length of program_block_ops_list should be odd" + assert ( + block_length % 2 != 0 + ), "the length of program_block_ops_list should be odd" for i in range(0, block_length // 2): block_op_list = {"forward": program_block_ops_list[i]} block_op_list.update( - {"backward": program_block_ops_list[block_length - 1 - i]}) + {"backward": program_block_ops_list[block_length - 1 - i]} + ) union_program_block_ops_list.append(block_op_list) block_op_list = {"forward": [], "backward": []} @@ -791,13 +948,15 @@ def union_forward_gradient_op(program_block_ops_list): def find_block_joints(program, program_block_ops_list, heter_ops): - block_var_detail = find_entrance_exit_private(program, - program_block_ops_list) - block_var_detail = entrance_exit_check(program, program_block_ops_list, - block_var_detail, heter_ops) - block_var_detail = delete_block_useless_exit(program, - program_block_ops_list, - block_var_detail) + block_var_detail = find_entrance_exit_private( + program, program_block_ops_list + ) + block_var_detail = entrance_exit_check( + program, program_block_ops_list, block_var_detail, heter_ops + ) + block_var_detail = delete_block_useless_exit( + program, program_block_ops_list, block_var_detail + ) return block_var_detail @@ -822,9 +981,11 @@ def find_entrance_exit_private(program, program_block_ops_list): for index, block_op_list in enumerate(program_block_ops_list): ## forward block_input, block_output = find_ops_list_input_output( - program, block_op_list["forward"]) + program, block_op_list["forward"] + ) persistables = screen_persistables( - program, block_input) + screen_persistables(program, block_output) + program, block_input + ) + screen_persistables(program, block_output) # find entrance & exit block_private_vars = list(set(block_input) & set(block_output)) block_entrance = list(set(block_input) - set(block_private_vars)) @@ -834,35 +995,40 @@ def find_entrance_exit_private(program, program_block_ops_list): "entrance": block_entrance, "exit": block_exit, "private": block_private_vars, - "persistables": persistables + "persistables": persistables, } } ## backward bp_block_input, bp_block_output = find_ops_list_input_output( - program, block_op_list["backward"]) + program, block_op_list["backward"] + ) bp_persistables = screen_persistables( - program, bp_block_input) + screen_persistables( - program, bp_block_output) + program, bp_block_input + ) + screen_persistables(program, bp_block_output) # find entrance & exit bp_block_private_vars = list(set(bp_block_input) & set(bp_block_output)) bp_block_entrance = list( - set(bp_block_input) - set(bp_block_private_vars)) + set(bp_block_input) - set(bp_block_private_vars) + ) bp_block_exit = list(set(bp_block_output) - set(bp_block_private_vars)) - detail.update({ - "backward": { - "entrance": bp_block_entrance, - "exit": bp_block_exit, - "private": bp_block_private_vars, - "persistables": bp_persistables + detail.update( + { + "backward": { + "entrance": bp_block_entrance, + "exit": bp_block_exit, + "private": bp_block_private_vars, + "persistables": bp_persistables, + } } - }) + ) block_var_detail.append(detail) return block_var_detail -def entrance_exit_check(program, program_block_ops_list, block_var_detail, - heter_ops): +def entrance_exit_check( + program, program_block_ops_list, block_var_detail, heter_ops +): for index in range(len(block_var_detail) - 1, -1, -1): if index - 1 < 0: break @@ -872,9 +1038,11 @@ def entrance_exit_check(program, program_block_ops_list, block_var_detail, backward_entrance = block_var_detail[index]["backward"]["entrance"] - forward_all = block_var_detail[index]["forward"][ - "entrance"] + block_var_detail[index]["forward"][ - "private"] + block_var_detail[index]["forward"]["exit"] + forward_all = ( + block_var_detail[index]["forward"]["entrance"] + + block_var_detail[index]["forward"]["private"] + + block_var_detail[index]["forward"]["exit"] + ) for var in backward_entrance: if not ("@GRAD" in var) and not (var in forward_all): @@ -885,18 +1053,24 @@ def entrance_exit_check(program, program_block_ops_list, block_var_detail, if previous_block_exit == current_block_entrance: continue exist_vars = list( - set(previous_block_exit) & set(current_block_entrance)) + set(previous_block_exit) & set(current_block_entrance) + ) need_add_vars = list(set(current_block_entrance) - set(exist_vars)) # var in different stage should not be ignored, since they are not placed in the same program & device - #need_add_vars = find_need_var_from_previous_block( + # need_add_vars = find_need_var_from_previous_block( # need_add_vars, block_var_detail, index, heter_ops) - previous_block_private = block_var_detail[index - - 1]["forward"]["private"] - previous_block_entrance = block_var_detail[index - - 1]["forward"]["entrance"] + previous_block_private = block_var_detail[index - 1]["forward"][ + "private" + ] + previous_block_entrance = block_var_detail[index - 1]["forward"][ + "entrance" + ] for var in need_add_vars: - if var not in previous_block_private and var not in previous_block_entrance: + if ( + var not in previous_block_private + and var not in previous_block_entrance + ): previous_block_entrance.append(var) previous_block_exit.append(var) if var not in current_block_entrance: @@ -912,27 +1086,35 @@ def entrance_exit_check(program, program_block_ops_list, block_var_detail, if previous_block_exit == current_block_entrance: continue exist_vars = list( - set(previous_block_exit) & set(current_block_entrance)) + set(previous_block_exit) & set(current_block_entrance) + ) need_add_vars = list(set(current_block_entrance) - set(exist_vars)) need_ignore_vars = [] for var in need_add_vars: if "@GRAD" not in var: need_ignore_vars.append(var) need_add_vars = list( - set(need_add_vars).difference(set(need_ignore_vars))) - previous_block_private = block_var_detail[index + - 1]["backward"]["private"] - previous_block_entrance = block_var_detail[index + - 1]["backward"]["entrance"] + set(need_add_vars).difference(set(need_ignore_vars)) + ) + previous_block_private = block_var_detail[index + 1]["backward"][ + "private" + ] + previous_block_entrance = block_var_detail[index + 1]["backward"][ + "entrance" + ] for var in need_add_vars: - if var not in previous_block_private and var not in previous_block_entrance: + if ( + var not in previous_block_private + and var not in previous_block_entrance + ): previous_block_entrance.append(var) previous_block_exit.append(var) return block_var_detail -def delete_block_useless_exit(program, program_block_ops_list, - block_var_detail): +def delete_block_useless_exit( + program, program_block_ops_list, block_var_detail +): ## forward for index in range(len(block_var_detail)): if index == len(block_var_detail) - 1: @@ -951,8 +1133,9 @@ def delete_block_useless_exit(program, program_block_ops_list, if index - 1 < 0: break current_block_exit = block_var_detail[index]["backward"]["exit"] - next_block_entrance = block_var_detail[index - - 1]["backward"]["entrance"] + next_block_entrance = block_var_detail[index - 1]["backward"][ + "entrance" + ] need_delete_var = [] for var in current_block_exit: if var not in next_block_entrance: @@ -963,19 +1146,20 @@ def delete_block_useless_exit(program, program_block_ops_list, return block_var_detail -def get_communicate_var_info(program, - block_index, - entrance_var_list, - type="forward"): +def get_communicate_var_info( + program, block_index, entrance_var_list, type="forward" +): input_var_reshape_dim = [] input_var_reshape_name = [] if type == "forward": block_input_var_name = "forward_joint_{}_{}@Heter".format( - block_index - 1, block_index) + block_index - 1, block_index + ) else: block_input_var_name = "backward_joint_{}_{}@Heter".format( - block_index + 1, block_index) + block_index + 1, block_index + ) entrance_var_list.sort() # input @@ -998,12 +1182,15 @@ def get_communicate_var_info(program, def add_vars_by_var_list(var_name_list, origin_program, program, block): for var_name in var_name_list: - if var_name not in program.global_block( - ).vars and var_name not in block.vars: + if ( + var_name not in program.global_block().vars + and var_name not in block.vars + ): var = origin_program.global_block().vars[var_name] if var.persistable: - program.global_block()._clone_variable(var, - force_persistable=False) + program.global_block()._clone_variable( + var, force_persistable=False + ) else: block._clone_variable(var, force_persistable=False) @@ -1082,11 +1269,14 @@ def block_append_op(program, origin_program, block, op): if not isinstance(varlist, list): varlist = [varlist] for var in varlist: - if var.name not in program.global_block( - ).vars and var.name not in block.vars: + if ( + var.name not in program.global_block().vars + and var.name not in block.vars + ): if var.persistable: program.global_block()._clone_variable( - var, force_persistable=False) + var, force_persistable=False + ) else: block._clone_variable(var, force_persistable=False) @@ -1095,20 +1285,22 @@ def block_append_op(program, origin_program, block, op): if not isinstance(varlist, list): varlist = [varlist] for var in varlist: - if var.name not in program.global_block( - ).vars and var.name not in block.vars: + if ( + var.name not in program.global_block().vars + and var.name not in block.vars + ): if var.persistable: program.global_block()._clone_variable( - var, force_persistable=False) + var, force_persistable=False + ) else: block._clone_variable(var, force_persistable=False) if "_grad" not in op.type: # for forward op - return block.append_op(type=op.type, - inputs=inputs, - outputs=outputs, - attrs=op.all_attrs()) + return block.append_op( + type=op.type, inputs=inputs, outputs=outputs, attrs=op.all_attrs() + ) else: # for grad op op_desc = op.desc @@ -1134,49 +1326,54 @@ def get_next_stage_trainers(role_maker): return role_maker.get_next_trainers() -def insert_communicate_op(orign_program, - role_maker, - heter_block, - stage_id, - first_op_index, - block_var_detail, - device, - is_forward=True): +def insert_communicate_op( + orign_program, + role_maker, + heter_block, + stage_id, + first_op_index, + block_var_detail, + device, + is_forward=True, +): if is_forward: next_heter_worker_endpoints = get_next_stage_trainers(role_maker) previous_heter_worker_endpoints = get_previous_stage_trainers( - role_maker) + role_maker + ) entrance_var = block_var_detail[stage_id]["forward"]["entrance"] - comm_info = get_communicate_var_info(orign_program, stage_id + 1, - entrance_var) + comm_info = get_communicate_var_info( + orign_program, stage_id + 1, entrance_var + ) else: next_heter_worker_endpoints = get_next_stage_trainers(role_maker) previous_heter_worker_endpoints = get_previous_stage_trainers( - role_maker) + role_maker + ) entrance_var = block_var_detail[stage_id - 1]["backward"]["exit"] - comm_info = get_communicate_var_info(orign_program, stage_id - 1, - entrance_var, "backward") - - heter_block._insert_op(index=first_op_index, - type="send_and_recv", - inputs={"X": heter_block.vars[entrance_var[0]]}, - outputs={"Out": []}, - attrs={ - "mode": "forward" if is_forward else "backward", - "send_var_name": - entrance_var + ["microbatch_id"], - "recv_var_name": [], - "message_name": - comm_info["block_input_var_name"], - "next_endpoints": next_heter_worker_endpoints, - "previous_endpoints": - previous_heter_worker_endpoints, - "trainer_id": get_role_id(role_maker), - "op_device": device, - RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE - }) + comm_info = get_communicate_var_info( + orign_program, stage_id - 1, entrance_var, "backward" + ) + + heter_block._insert_op( + index=first_op_index, + type="send_and_recv", + inputs={"X": heter_block.vars[entrance_var[0]]}, + outputs={"Out": []}, + attrs={ + "mode": "forward" if is_forward else "backward", + "send_var_name": entrance_var + ["microbatch_id"], + "recv_var_name": [], + "message_name": comm_info["block_input_var_name"], + "next_endpoints": next_heter_worker_endpoints, + "previous_endpoints": previous_heter_worker_endpoints, + "trainer_id": get_role_id(role_maker), + "op_device": device, + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE, + }, + ) return entrance_var @@ -1185,8 +1382,9 @@ def get_the_one_recv_context(context, is_dense=True, split_dense_table=False): recv_id_maps = {} grad_name_to_param_name = {} if is_dense: - send_ctx = get_the_one_send_context(context, - split_dense_table=split_dense_table) + send_ctx = get_the_one_send_context( + context, split_dense_table=split_dense_table + ) for idx, (name, ctx) in enumerate(send_ctx.items()): if ctx.is_sparse(): continue @@ -1201,9 +1399,9 @@ def get_the_one_recv_context(context, is_dense=True, split_dense_table=False): param_names.append(param_name) recv_id_maps[ctx.table_id()] = param_names else: - send_ctx = get_the_one_send_context(context, - split_dense_table=False, - ep_list=None) + send_ctx = get_the_one_send_context( + context, split_dense_table=False, ep_list=None + ) for idx, (name, ctx) in enumerate(send_ctx.items()): if not ctx.is_sparse(): continue @@ -1225,15 +1423,15 @@ def _get_varname_parts(varname): block_part = "" trainer_idx = varname.find(".trainer_") if trainer_idx >= 0: - trainer_part = varname[trainer_idx + 1:] + trainer_part = varname[trainer_idx + 1 :] else: trainer_idx = len(varname) block_index = varname.find(".block") if block_index >= 0: - block_part = varname[block_index + 1:trainer_idx] + block_part = varname[block_index + 1 : trainer_idx] else: block_index = len(varname) - orig_var_name = varname[0:min(block_index, trainer_idx)] + orig_var_name = varname[0 : min(block_index, trainer_idx)] return orig_var_name, block_part, trainer_part @@ -1256,7 +1454,6 @@ def get_var_mem_size(var): class MergedVariable: - def __init__(self, merged, ordered, offsets): self.merged_var = merged self.ordered_vars = ordered @@ -1276,8 +1473,8 @@ def build_var_distributed(context): context["merged_variable_map"] = {} for origin_program in origin_programs: sparse_pairs, dense_pairs = get_param_grads(origin_program) - #print("public build_var_distributed sparse_pairs:", sparse_pairs) - #print("public build_var_distributed dense_pairs:", dense_pairs) + # print("public build_var_distributed sparse_pairs:", sparse_pairs) + # print("public build_var_distributed dense_pairs:", dense_pairs) origin_for_sparse = [] origin_for_dense = [] merged_sparse_pairs = [] @@ -1297,7 +1494,7 @@ def build_var_distributed(context): m_grad = MergedVariable(grad, [grad], [0]) merged_variables_pairs.append((m_param, m_grad)) merged_dense_pairs.append((m_param, m_grad)) - #print("public build_var_distributed merged_dense_pairs:", + # print("public build_var_distributed merged_dense_pairs:", # merged_dense_pairs) for sparse_pair in origin_for_sparse: @@ -1307,15 +1504,17 @@ def build_var_distributed(context): m_grad = MergedVariable(grad, [grad], [0]) merged_variables_pairs.append((m_param, m_grad)) merged_sparse_pairs.append((m_param, m_grad)) - #print("public build_var_distributed merged_sparse_pairs:", + # print("public build_var_distributed merged_sparse_pairs:", # merged_sparse_pairs) for merged in merged_variables_pairs: m_param, m_grad = merged context["merged_variable_map"][ - m_param.merged_var.name] = m_param.merged_var + m_param.merged_var.name + ] = m_param.merged_var context["merged_variable_map"][ - m_grad.merged_var.name] = m_grad.merged_var + m_grad.merged_var.name + ] = m_grad.merged_var param_merges = [] param_merges.extend(origin_for_sparse) @@ -1353,14 +1552,14 @@ def _is_opt_role_op(op): # optimize op_maker = core.op_proto_and_checker_maker optimize_role = core.op_proto_and_checker_maker.OpRole.Optimize - if op_maker.kOpRoleAttrName() in op.attr_names and \ - int(op.all_attrs()[op_maker.kOpRoleAttrName()]) == int(optimize_role): + if op_maker.kOpRoleAttrName() in op.attr_names and int( + op.all_attrs()[op_maker.kOpRoleAttrName()] + ) == int(optimize_role): return True return False def get_param_grads(origin_program): - def _get_params_grads(sparse_varnames): block = origin_program.global_block() @@ -1373,8 +1572,10 @@ def get_param_grads(origin_program): for op in block.ops: if _is_opt_role_op(op): # delete clip op from opt_ops when run in Parameter Server mode - if OP_NAME_SCOPE in op.all_attrs() \ - and CLIP_OP_NAME_SCOPE in op.attr(OP_NAME_SCOPE): + if ( + OP_NAME_SCOPE in op.all_attrs() + and CLIP_OP_NAME_SCOPE in op.attr(OP_NAME_SCOPE) + ): op._set_attr("op_role", role_id) continue if op.attr(OP_ROLE_VAR_ATTR_NAME): @@ -1382,8 +1583,10 @@ def get_param_grads(origin_program): grad_name = op.attr(OP_ROLE_VAR_ATTR_NAME)[1] if param_name not in optimize_params: optimize_params.add(param_name) - param_grad = (origin_var_dict[param_name], - origin_var_dict[grad_name]) + param_grad = ( + origin_var_dict[param_name], + origin_var_dict[grad_name], + ) if param_name in sparse_varnames: sparse_param_grads.append(param_grad) @@ -1394,8 +1597,10 @@ def get_param_grads(origin_program): def _get_sparse_varnames(): varnames = [] for op in origin_program.global_block().ops: - if op.type in SPARSE_OP_TYPE_DICT.keys() \ - and op.attr('remote_prefetch') is True: + if ( + op.type in SPARSE_OP_TYPE_DICT.keys() + and op.attr('remote_prefetch') is True + ): param_name = op.input(SPARSE_OP_TYPE_DICT[op.type])[0] varnames.append(param_name) @@ -1437,13 +1642,13 @@ def find_op_input_output(program, block, op): def add_send_op(program, block, _vars): - def _get_send_op_dict(): send_op_dict = {} send_op_list = find_send_op(program) for op in send_op_list: - input_list, _ = find_op_input_output(program, - program.global_block(), op) + input_list, _ = find_op_input_output( + program, program.global_block(), op + ) for var in input_list: send_op_dict[var] = op return send_op_dict @@ -1472,21 +1677,23 @@ def add_send_op(program, block, _vars): for table_id in table_dict: dummy_output = block.create_var( - name=framework.generate_control_dev_var_name()) + name=framework.generate_control_dev_var_name() + ) send_input_vars = [ block.vars[union_var] for union_var in table_dict[table_id]['var_list'] ] - block.append_op(type="send", - inputs={"X": send_input_vars}, - outputs={"Out": dummy_output}, - attrs={ - "send_varnames": - table_dict[table_id]['send_varnames'], - "is_sparse": is_sparse, - "table_id": table_id, - RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE - }) + block.append_op( + type="send", + inputs={"X": send_input_vars}, + outputs={"Out": dummy_output}, + attrs={ + "send_varnames": table_dict[table_id]['send_varnames'], + "is_sparse": is_sparse, + "table_id": table_id, + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE, + }, + ) return send_grad_var_list @@ -1503,21 +1710,26 @@ def delete_trainer_useless_var(program, static_var): program_useful_var_list = [] for op in program.global_block().ops: input_var_list, output_var_list = find_op_input_output( - program, program.global_block(), op) + program, program.global_block(), op + ) op_var_list = list(set(input_var_list).union(set(output_var_list))) program_useful_var_list = list( - set(program_useful_var_list).union(set(op_var_list))) + set(program_useful_var_list).union(set(op_var_list)) + ) program_useful_var_list += static_var program_useless_var_list = list( set(get_vars_name_in_block(program.global_block())).difference( - set(program_useful_var_list))) + set(program_useful_var_list) + ) + ) for var in program_useless_var_list: program.global_block()._remove_var(var) return program_useless_var_list -def create_backward_block(program, origin_program, bp_ops_list, - block_var_detail): +def create_backward_block( + program, origin_program, bp_ops_list, block_var_detail +): pre_block_idx = program.num_blocks - 1 heter_block = program._create_block(pre_block_idx) @@ -1526,8 +1738,10 @@ def create_backward_block(program, origin_program, bp_ops_list, send_varnames = op.attr('send_varnames') is_skip = False for varname in send_varnames: - if varname not in program.global_block( - ).vars and varname not in heter_block.vars: + if ( + varname not in program.global_block().vars + and varname not in heter_block.vars + ): is_skip = True break if is_skip == True: @@ -1543,12 +1757,14 @@ def create_backward_block(program, origin_program, bp_ops_list, def is_backward_op(op): return op_role_attr_name in op.attr_names and ( - int(op.attr(op_role_attr_name)) & int(op_role.Backward)) + int(op.attr(op_role_attr_name)) & int(op_role.Backward) + ) def is_forward_op(op): - return op_role_attr_name in op.attr_names and (int( - op.attr(op_role_attr_name)) == int(op_role.Forward)) + return op_role_attr_name in op.attr_names and ( + int(op.attr(op_role_attr_name)) == int(op_role.Forward) + ) def is_push_sparse_op(op): @@ -1591,11 +1807,13 @@ def check_program(program): for op in block.ops: input_var_names = op.desc.input_arg_names() output_var_names = op.desc.output_arg_names() - for var_name in (input_var_names + output_var_names): + for var_name in input_var_names + output_var_names: if not block._find_var_recursive(str(var_name)): raise ValueError( 'var: {} needed by op is not found in block: {}'.format( - str(var_name), block_idx)) + str(var_name), block_idx + ) + ) block_idx += 1 print('program checked valid') diff --git a/python/paddle/distributed/rpc/rpc.py b/python/paddle/distributed/rpc/rpc.py index c61b1a805b093a80ac6a5c0b088f42c0a970b646..6a93f27517c21b84ba08c8028c92a4662eaa42d1 100644 --- a/python/paddle/distributed/rpc/rpc.py +++ b/python/paddle/distributed/rpc/rpc.py @@ -26,7 +26,7 @@ from paddle.distributed.launch.context import Node WorkerInfo = namedtuple("WorkerInfo", ["name", "rank", "ip", "port"]) _DEFAULT_RPC_TIMEOUT = -1 -_MAX_RPC_TIMEOUT_MS = 0x7fffffff +_MAX_RPC_TIMEOUT_MS = 0x7FFFFFFF _BARRIER_TIMEOUT_MAX_DAYS = 99999999 # tcp store for `_barrier_never_timeout` _barrier_store = None @@ -55,8 +55,9 @@ def _exchange_all_service_infos(world_size): s = set() for rank in range(world_size): info = pickle.loads(_barrier_store.get(str(rank))) - assert (info.name not in s - ), "The Worker name must be unique, but name `{}` is repeated." + assert ( + info.name not in s + ), "The Worker name must be unique, but name `{}` is repeated." s.add(info.name) all_infos.append(info) return all_infos @@ -94,22 +95,30 @@ def init_rpc(name, rank=None, world_size=None, master_endpoint=None): """ rank = int(os.environ["PADDLE_TRAINER_ID"]) if rank is None else rank - world_size = int( - os.environ["PADDLE_TRAINERS_NUM"]) if world_size is None else world_size + world_size = ( + int(os.environ["PADDLE_TRAINERS_NUM"]) + if world_size is None + else world_size + ) worker_endpoint = os.getenv("PADDLE_WORKER_ENDPOINT", None) if worker_endpoint is None: worker_endpoint = _gen_endpoint() logger.info("Trainer {}: worker endpoint: {}".format(rank, worker_endpoint)) - master_endpoint = (master_endpoint if master_endpoint != None else - os.environ["PADDLE_MASTER_ENDPOINT"]) + master_endpoint = ( + master_endpoint + if master_endpoint != None + else os.environ["PADDLE_MASTER_ENDPOINT"] + ) master_addr, master_port = master_endpoint.split(":") master_port = int(master_port) stop_check_timeout = int(os.getenv("FLAGS_stop_check_timeout", "900")) - store = core.TCPStore(master_addr, - master_port, - rank == 0, - world_size, - timeout=stop_check_timeout) + store = core.TCPStore( + master_addr, + master_port, + rank == 0, + world_size, + timeout=stop_check_timeout, + ) _set_barrier_store(store) ip, port = worker_endpoint.split(":") port = int(port) @@ -117,8 +126,9 @@ def init_rpc(name, rank=None, world_size=None, master_endpoint=None): all_infos = _exchange_all_service_infos(world_size) c_infos = [] for node_info in all_infos: - info = core.WorkerInfo(node_info.name, node_info.rank, node_info.ip, - node_info.port) + info = core.WorkerInfo( + node_info.name, node_info.rank, node_info.ip, node_info.port + ) c_infos.append(info) core.init_and_set_agent_instance(name, c_infos) core.rpc_start_worker() @@ -226,7 +236,7 @@ def _barrier_never_timeout(global_rank, global_world_size): global _barrier_count barrier_prefix = "Barrier/" + str(_barrier_count) + "/" _barrier_count += 1 - is_master = (global_rank == 0) + is_master = global_rank == 0 def _check_keys_ready(wait_keys): start_time = time.time() @@ -235,11 +245,13 @@ def _barrier_never_timeout(global_rank, global_world_size): elapse_time = time.time() - start_time if datetime.timedelta(seconds=elapse_time) > timeout: raise RuntimeError( - "Keys {} are not ready sinck rank {} is waiting them.". - format(wait_keys, global_rank)) + "Keys {} are not ready sinck rank {} is waiting them.".format( + wait_keys, global_rank + ) + ) wait_keys = list( - filter(lambda key: int(_barrier_store.get(key)) != 1, - wait_keys)) + filter(lambda key: int(_barrier_store.get(key)) != 1, wait_keys) + ) if is_master: # the master will add key, wait for all workers'exiting key and exit in the end. diff --git a/python/paddle/distributed/sharding/__init__.py b/python/paddle/distributed/sharding/__init__.py index e938c12d5af0ea34af2311e8a3d6ad6c0ff09543..3a710ca8059424f7bd7b1e1f1da18173f6cb4bb0 100644 --- a/python/paddle/distributed/sharding/__init__.py +++ b/python/paddle/distributed/sharding/__init__.py @@ -12,6 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .group_sharded import group_sharded_parallel, save_group_sharded_model # noqa: F401 +from .group_sharded import ( + group_sharded_parallel, + save_group_sharded_model, +) # noqa: F401 __all__ = ['group_sharded_parallel', 'save_group_sharded_model'] diff --git a/python/paddle/distributed/sharding/group_sharded.py b/python/paddle/distributed/sharding/group_sharded.py index 1474f639547fb6ae1cb785ba238e1e6f628c8625..4137075c3f902088f8b68dea4fb72aaf6d7f643b 100644 --- a/python/paddle/distributed/sharding/group_sharded.py +++ b/python/paddle/distributed/sharding/group_sharded.py @@ -22,31 +22,49 @@ from paddle.distributed.utils.log_utils import get_logger from paddle.fluid.framework import in_dygraph_mode # Old version -from paddle.distributed.fleet.meta_optimizers.dygraph_optimizer.sharding_optimizer_stage2 import ShardingOptimizerStage2 -from paddle.distributed.fleet.meta_parallel.sharding.sharding_stage2 import ShardingStage2 -from paddle.distributed.fleet.meta_parallel.sharding.sharding_stage3 import ShardingStage3 -from paddle.distributed.fleet.meta_parallel.sharding.sharding_utils import ShardingScaler +from paddle.distributed.fleet.meta_optimizers.dygraph_optimizer.sharding_optimizer_stage2 import ( + ShardingOptimizerStage2, +) +from paddle.distributed.fleet.meta_parallel.sharding.sharding_stage2 import ( + ShardingStage2, +) +from paddle.distributed.fleet.meta_parallel.sharding.sharding_stage3 import ( + ShardingStage3, +) +from paddle.distributed.fleet.meta_parallel.sharding.sharding_utils import ( + ShardingScaler, +) # New version -from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_optimizer_stage2 import GroupShardedOptimizerStage2 -from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage2 import GroupShardedStage2 -from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage3 import GroupShardedStage3 -from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_utils import GroupShardedScaler +from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_optimizer_stage2 import ( + GroupShardedOptimizerStage2, +) +from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage2 import ( + GroupShardedStage2, +) +from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage3 import ( + GroupShardedStage3, +) +from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_utils import ( + GroupShardedScaler, +) logger_ = get_logger(logging.WARNING) -def group_sharded_parallel(model, - optimizer, - level, - scaler=None, - group=None, - offload=False, - sync_buffers=False, - buffer_max_size=2**23, - segment_size=2**20, - sync_comm=False, - dp_group=None): +def group_sharded_parallel( + model, + optimizer, + level, + scaler=None, + group=None, + offload=False, + sync_buffers=False, + buffer_max_size=2**23, + segment_size=2**20, + sync_comm=False, + dp_group=None, +): """ Use group_sharded_parallel can perform group shared configuration on the model, optimizer and GradScaler. Level has three string options, 'os', 'os_g' and 'p_g_os' corresponds to three different usage scenarios: optimizer state segmentation, optimizer state + gradient segmentation, and parameter + gradient + optimizer state segmentation. Usually, optimizer state + gradient segmentation is actually a re optimization of optimizer state segmentation, so optimizer state + gradient segmentation can be used to realize optimizer state segmentation. @@ -101,13 +119,16 @@ def group_sharded_parallel(model, """ # check optition type assert isinstance( - model, - paddle.nn.Layer), "The model must be the instance of paddle.nn.Layer." + model, paddle.nn.Layer + ), "The model must be the instance of paddle.nn.Layer." assert isinstance( optimizer, Optimizer ), "The optimizer must be the instance of paddle.optimizer.Optimizer." - assert level in ['os', 'os_g', - 'p_g_os'], "The level must be os, os_g or p_g_os." + assert level in [ + 'os', + 'os_g', + 'p_g_os', + ], "The level must be os, os_g or p_g_os." def check_dtype(param): return param.dtype == paddle.float16 @@ -126,40 +147,51 @@ def group_sharded_parallel(model, optim=optimizer, group=group, offload=offload, - dp_group=dp_group) - model = GroupShardedStage2(model, - optimizer, - group=group, - sync_buffers=sync_buffers, - buffer_max_size=buffer_max_size, - dp_group=dp_group) + dp_group=dp_group, + ) + model = GroupShardedStage2( + model, + optimizer, + group=group, + sync_buffers=sync_buffers, + buffer_max_size=buffer_max_size, + dp_group=dp_group, + ) else: - optimizer = ShardingOptimizerStage2(params=model.parameters(), - optim=optimizer, - group=group, - offload=offload) - model = ShardingStage2(model, - optimizer, - group=group, - sync_buffers=sync_buffers, - buffer_max_size=buffer_max_size) + optimizer = ShardingOptimizerStage2( + params=model.parameters(), + optim=optimizer, + group=group, + offload=offload, + ) + model = ShardingStage2( + model, + optimizer, + group=group, + sync_buffers=sync_buffers, + buffer_max_size=buffer_max_size, + ) elif level == 'p_g_os': if in_dygraph_mode(): - model = GroupShardedStage3(model, - optimizer=optimizer, - group=group, - sync_buffers=sync_buffers, - segment_size=segment_size, - offload=offload, - sync_comm=sync_comm) + model = GroupShardedStage3( + model, + optimizer=optimizer, + group=group, + sync_buffers=sync_buffers, + segment_size=segment_size, + offload=offload, + sync_comm=sync_comm, + ) else: - model = ShardingStage3(model, - optimizer=optimizer, - group=group, - sync_buffers=sync_buffers, - segment_size=segment_size, - offload=offload, - sync_comm=sync_comm) + model = ShardingStage3( + model, + optimizer=optimizer, + group=group, + sync_buffers=sync_buffers, + segment_size=segment_size, + offload=offload, + sync_comm=sync_comm, + ) else: raise ValueError("Please enter the correct level.") if isinstance(scaler, paddle.amp.GradScaler): @@ -222,7 +254,8 @@ def save_group_sharded_model(model, output, optimizer=None): save_group_sharded_model(model, optimizer, output=output_dir) """ logger_.info( - "==========Begin to save group sharded model and optimizer==========") + "==========Begin to save group sharded model and optimizer==========" + ) assert not os.path.isfile( output ), "Saving directory ({}) should be a directory, not a file".format(output) @@ -246,4 +279,5 @@ def save_group_sharded_model(model, output, optimizer=None): output_opt = os.path.join(output, "model.pdopt") paddle.save(optimizer._optim.state_dict(), output_opt) logger_.info( - "==========End to save group sharded model and optimizer==========") + "==========End to save group sharded model and optimizer==========" + ) diff --git a/python/paddle/distributed/spawn.py b/python/paddle/distributed/spawn.py index 7deebdf549ebae9cb909a2ba2c27c580afeed8d0..2df9118ac4e870a7ff2ae5d24c099eeaf4c4bc83 100644 --- a/python/paddle/distributed/spawn.py +++ b/python/paddle/distributed/spawn.py @@ -18,11 +18,22 @@ import signal import sys import warnings -from paddle.distributed.utils.launch_utils import _print_arguments, _prepare_trainer_env, get_host_name_ip -from paddle.distributed.cloud_utils import get_cluster_and_pod, _get_trainers_num +from paddle.distributed.utils.launch_utils import ( + _print_arguments, + _prepare_trainer_env, + get_host_name_ip, +) +from paddle.distributed.cloud_utils import ( + get_cluster_and_pod, + _get_trainers_num, +) from paddle.distributed.fleet.launch import get_cluster_from_args from paddle.distributed.fleet.cloud_utils import use_paddlecloud -from paddle.distributed.fleet.launch_utils import DeviceMode, check_backend, block_windows_and_macos +from paddle.distributed.fleet.launch_utils import ( + DeviceMode, + check_backend, + block_windows_and_macos, +) from paddle.device import get_device # deprecated module import @@ -33,7 +44,6 @@ __all__ = [] class ParallelEnvArgs(object): - def __init__(self): # Paddle cluster nodes ips, such as 192.168.0.16,192.168.0.17.. self.cluster_node_ips = None @@ -64,17 +74,27 @@ def _py_supported_check(): "Use `paddle.distributed.spawn` to start parallel training " "requires python version greater than 3.4, if your python " "is lower than this version, please use " - "`paddle.distributed.launch` instead.") + "`paddle.distributed.launch` instead." + ) def _options_valid_check(options): # `print_config` keeped as a debug options, not show to users supported_options = [ - 'start_method', 'ips', 'gpus', 'xpus', 'mlus', 'print_config', 'backend' + 'start_method', + 'ips', + 'gpus', + 'xpus', + 'mlus', + 'print_config', + 'backend', ] deprecated_options = [ - 'selected_devices', 'started_port', 'cluster_node_ips', 'node_ip', - 'use_paddlecloud' + 'selected_devices', + 'started_port', + 'cluster_node_ips', + 'node_ip', + 'use_paddlecloud', ] for key in options: if key not in supported_options: @@ -82,11 +102,14 @@ def _options_valid_check(options): warnings.warn( "The config option (%s) of `paddle.distributed.spawn` is deprecated. " "Please use the latest config options stated in the `spawn` API documentation." - % key, DeprecationWarning) + % key, + DeprecationWarning, + ) else: raise ValueError( "The config option (%s) of `paddle.distributed.spawn` is not supported." - % key) + % key + ) def _get_default_nprocs(): @@ -101,8 +124,10 @@ def _get_default_nprocs(): return multiprocessing.cpu_count() else: raise RuntimeError( - "`paddle.distributed.spawn` does not support parallel training on device `{}` now." - .format(device)) + "`paddle.distributed.spawn` does not support parallel training on device `{}` now.".format( + device + ) + ) def _get_default_backend(): @@ -117,8 +142,10 @@ def _get_default_backend(): return 'gloo' else: raise RuntimeError( - "`paddle.distributed.spawn` does not support parallel training on device `{}` now." - .format(device)) + "`paddle.distributed.spawn` does not support parallel training on device `{}` now.".format( + device + ) + ) def _get_node_ip(ips): @@ -180,23 +207,28 @@ def _get_subprocess_env_list(nprocs, options): "the number of visible devices(%d) is less than the number " "of spawn processes(%d), please ensure that the correct " "`nprocs` argument is passed or the environment variable " - "`CUDA_VISIBLE_DEVICES` is correctly configured." % - (len(env_devices_list), nprocs)) + "`CUDA_VISIBLE_DEVICES` is correctly configured." + % (len(env_devices_list), nprocs) + ) args.selected_devices = ",".join( - [str(env_devices_list[x]) for x in range(0, nprocs)]) + [str(env_devices_list[x]) for x in range(0, nprocs)] + ) else: selected_device_list = args.selected_devices.split(',') if len(selected_device_list) != nprocs: raise ValueError( "The number of selected devices(%s) is not equal to " "the number of spawn processes(%d), please ensure that the " - "correct `nprocs` and `gpus` arguments are passed." % - (len(selected_device_list), nprocs)) + "correct `nprocs` and `gpus` arguments are passed." + % (len(selected_device_list), nprocs) + ) for card_id in selected_device_list: if card_id not in env_devices_list: - raise ValueError("The selected gpu card %s cannot found in " - "CUDA_VISIBLE_DEVICES (%s)." % - (card_id, ",".join(env_devices_list))) + raise ValueError( + "The selected gpu card %s cannot found in " + "CUDA_VISIBLE_DEVICES (%s)." + % (card_id, ",".join(env_devices_list)) + ) elif options['backend'] == 'bkcl': args.selected_devices = options.get('xpus', None) @@ -215,23 +247,28 @@ def _get_subprocess_env_list(nprocs, options): "the number of visible devices(%d) is less than the number " "of spawn processes(%d), please ensure that the correct " "`nprocs` argument is passed or the environment variable " - "`XPU_VISIBLE_DEVICES` is correctly configured." % - (len(env_devices_list), nprocs)) + "`XPU_VISIBLE_DEVICES` is correctly configured." + % (len(env_devices_list), nprocs) + ) args.selected_devices = ",".join( - [str(env_devices_list[x]) for x in range(0, nprocs)]) + [str(env_devices_list[x]) for x in range(0, nprocs)] + ) else: selected_device_list = args.selected_devices.split(',') if len(selected_device_list) != nprocs: raise ValueError( "The number of selected devices(%s) is not equal to " "the number of spawn processes(%d), please ensure that the " - "correct `nprocs` and `xpus` arguments are passed." % - (len(selected_device_list), nprocs)) + "correct `nprocs` and `xpus` arguments are passed." + % (len(selected_device_list), nprocs) + ) for card_id in selected_device_list: if card_id not in env_devices_list: - raise ValueError("The selected xpu card %s cannot found in " - "XPU_VISIBLE_DEVICES (%s)." % - (card_id, ",".join(env_devices_list))) + raise ValueError( + "The selected xpu card %s cannot found in " + "XPU_VISIBLE_DEVICES (%s)." + % (card_id, ",".join(env_devices_list)) + ) elif options['backend'] == 'cncl': args.selected_devices = options.get('mlus', None) if args.selected_devices is None: @@ -249,23 +286,28 @@ def _get_subprocess_env_list(nprocs, options): "the number of visible devices(%d) is less than the number " "of spawn processes(%d), please ensure that the correct " "`nprocs` argument is passed or the environment variable " - "`MLU_VISIBLE_DEVICES` is correctly configured." % - (len(env_devices_list), nprocs)) + "`MLU_VISIBLE_DEVICES` is correctly configured." + % (len(env_devices_list), nprocs) + ) args.selected_devices = ",".join( - [str(env_devices_list[x]) for x in range(0, nprocs)]) + [str(env_devices_list[x]) for x in range(0, nprocs)] + ) else: selected_device_list = args.selected_devices.split(',') if len(selected_device_list) != nprocs: raise ValueError( "The number of selected devices(%s) is not equal to " "the number of spawn processes(%d), please ensure that the " - "correct `nprocs` and `mlus` arguments are passed." % - (len(selected_device_list), nprocs)) + "correct `nprocs` and `mlus` arguments are passed." + % (len(selected_device_list), nprocs) + ) for card_id in selected_device_list: if card_id not in env_devices_list: - raise ValueError("The selected mlu card %s cannot found in " - "MLU_VISIBLE_DEVICES (%s)." % - (card_id, ",".join(env_devices_list))) + raise ValueError( + "The selected mlu card %s cannot found in " + "MLU_VISIBLE_DEVICES (%s)." + % (card_id, ",".join(env_devices_list)) + ) elif options['backend'] == 'gloo': # TODO check gpu / xpu flag must not exist warnings.warn( @@ -275,14 +317,15 @@ def _get_subprocess_env_list(nprocs, options): args.paddle_cpuonly = True args.selected_devices = None args.ips = args.cluster_node_ips - assert options.get( - 'use_paddlecloud', - None) is None, "CPUONLY spawn doesn't support use paddle cloud" - assert len( - args.cluster_node_ips.split(',') - ) <= 1, "CPUONLY spawn only support single trainer, that is len(ips)=1, but got %s." - assert _get_trainers_num( - ) == 1, "CPUONLY spawn doesn't support multi-trainer" + assert ( + options.get('use_paddlecloud', None) is None + ), "CPUONLY spawn doesn't support use paddle cloud" + assert ( + len(args.cluster_node_ips.split(',')) <= 1 + ), "CPUONLY spawn only support single trainer, that is len(ips)=1, but got %s." + assert ( + _get_trainers_num() == 1 + ), "CPUONLY spawn doesn't support multi-trainer" # set other inner args args.node_ip = options.get('node_ip', None) @@ -298,15 +341,17 @@ def _get_subprocess_env_list(nprocs, options): # get cluster and pod config if options['backend'] == 'gloo': devices_per_proc = [x for x in range(0, nprocs)] - cluster, pod = get_cluster_from_args(args, DeviceMode.CPU, - devices_per_proc) + cluster, pod = get_cluster_from_args( + args, DeviceMode.CPU, devices_per_proc + ) else: cluster, pod = get_cluster_and_pod(args) # prepare subprocess env list for trainer in pod.trainers: processes_env_list.append( - _prepare_trainer_env(cluster, trainer, options['backend'])) + _prepare_trainer_env(cluster, trainer, options['backend']) + ) # [Debug] print config args.print_config = options.get('print_config', False) @@ -340,7 +385,7 @@ def _set_trainer_env(env_dict, backend): elif backend == 'cncl': set_flags({'FLAGS_selected_mlus': env_dict['FLAGS_selected_mlus']}) else: - #NOTE(xiongkun) why not raise Error ? + # NOTE(xiongkun) why not raise Error ? # So far, we added support for CPU parallel, and will be applied when paddle is not # compiled with cuda or xp. just do nothing. pass @@ -362,12 +407,12 @@ def _func_wrapper(func, args, error_queue, return_queue, env_dict, backend): pass except Exception: import traceback + error_queue.put(traceback.format_exc()) sys.exit(1) class MultiprocessContext(object): - def __init__(self, processes, error_queues, return_queues): _py_supported_check() self.error_queues = error_queues @@ -379,16 +424,16 @@ class MultiprocessContext(object): self.return_queues = return_queues self.processes = processes self.sentinels = { - process.sentinel: index - for index, process in enumerate(processes) + process.sentinel: index for index, process in enumerate(processes) } def join(self, timeout=None): if len(self.sentinels) == 0: return True - ready = multiprocessing.connection.wait(self.sentinels.keys(), - timeout=timeout) + ready = multiprocessing.connection.wait( + self.sentinels.keys(), timeout=timeout + ) error_index = None for sentinel in ready: @@ -414,16 +459,22 @@ class MultiprocessContext(object): exitcode = self.processes[error_index].exitcode if exitcode < 0: name = signal.Signals(-exitcode).name - raise Exception("Process %d terminated with signal %s." % - (error_index, name)) + raise Exception( + "Process %d terminated with signal %s." + % (error_index, name) + ) else: - raise Exception("Process %d terminated with exit code %d." % - (error_index, exitcode)) + raise Exception( + "Process %d terminated with exit code %d." + % (error_index, exitcode) + ) original_trace = self.error_queues[error_index].get() - msg = "\n\n----------------------------------------------\n" \ - "Process %d terminated with the following error:\n" \ - "----------------------------------------------\n\n" % error_index + msg = ( + "\n\n----------------------------------------------\n" + "Process %d terminated with the following error:\n" + "----------------------------------------------\n\n" % error_index + ) msg += original_trace raise Exception(msg) @@ -584,9 +635,17 @@ def spawn(func, args=(), nprocs=-1, join=True, daemon=False, **options): for i in range(nprocs): error_queue = mp.SimpleQueue() return_queue = mp.SimpleQueue() - process = mp.Process(target=_func_wrapper, - args=(func, args, error_queue, return_queue, - procs_env_list[i], options['backend'])) + process = mp.Process( + target=_func_wrapper, + args=( + func, + args, + error_queue, + return_queue, + procs_env_list[i], + options['backend'], + ), + ) process.daemon = daemon process.start() error_queues.append(error_queue) diff --git a/python/paddle/distributed/utils/launch_utils.py b/python/paddle/distributed/utils/launch_utils.py index a542523e60536422b0a43c135197b4b100201897..88acc643ead83d61963b9d2a0efce92dadf41a8a 100644 --- a/python/paddle/distributed/utils/launch_utils.py +++ b/python/paddle/distributed/utils/launch_utils.py @@ -33,12 +33,18 @@ def get_cluster_from_args(args, selected_gpus): node_ip = args.node_ip node_rank = node_ips.index(node_ip) - logger.debug("parsed from args:node_ips:{} node_ip:{} node_rank:{}".format( - node_ips, node_ip, node_rank)) + logger.debug( + "parsed from args:node_ips:{} node_ip:{} node_rank:{}".format( + node_ips, node_ip, node_rank + ) + ) free_ports = None - if not args.use_paddlecloud and len( - node_ips) <= 1 and args.started_port is None: + if ( + not args.use_paddlecloud + and len(node_ips) <= 1 + and args.started_port is None + ): free_ports = find_free_ports(len(selected_gpus)) if free_ports is not None: free_ports = list(free_ports) @@ -60,6 +66,7 @@ def get_cluster_from_args(args, selected_gpus): def get_gpus(selected_gpus): if selected_gpus is None: from paddle.fluid import core + gpus_num = core.get_cuda_device_count() gpus = [str(x) for x in range(0, gpus_num)] else: @@ -72,48 +79,56 @@ def get_gpus(selected_gpus): # therefore selected_gpus=0,1,2,3 cuda_visible_devices_list = cuda_visible_devices.split(',') for x in selected_gpus.split(','): - assert x in cuda_visible_devices_list, "Can't find "\ - "your selected_gpus %s in CUDA_VISIBLE_DEVICES[%s]."\ - % (x, cuda_visible_devices) + assert x in cuda_visible_devices_list, ( + "Can't find " + "your selected_gpus %s in CUDA_VISIBLE_DEVICES[%s]." + % (x, cuda_visible_devices) + ) gpus = [ cuda_visible_devices_list.index(x.strip()) for x in selected_gpus.split(',') ] - logger.info("Change selected_gpus into reletive values. --ips:{} " - "will change into relative_ips:{} according to your " - "CUDA_VISIBLE_DEVICES:{}".format( - selected_gpus, gpus, cuda_visible_devices_list)) + logger.info( + "Change selected_gpus into reletive values. --ips:{} " + "will change into relative_ips:{} according to your " + "CUDA_VISIBLE_DEVICES:{}".format( + selected_gpus, gpus, cuda_visible_devices_list + ) + ) return gpus class Hdfs(object): - def __init__(self): self.hdfs_ugi = None self.hdfs_name = None self.hdfs_path = None def is_valid(self): - return self.hdfs_ugi is not None and \ - self.hdfs_name is not None and \ - self.hdfs_path is not None + return ( + self.hdfs_ugi is not None + and self.hdfs_name is not None + and self.hdfs_path is not None + ) def __str__(self): return "hdfs_ugi:{} hdfs_name:{} hdfs_path{}".format( - self.hdfs_ugi, self.hdfs_name, self.hdfs_path) + self.hdfs_ugi, self.hdfs_name, self.hdfs_path + ) def __eq__(self, n): - return self.hdfs_ugi == n.hdfs_ugi and \ - self.hdfs_name == n.hdfs_name and \ - self.hdfs_path == n.hdfs_path + return ( + self.hdfs_ugi == n.hdfs_ugi + and self.hdfs_name == n.hdfs_name + and self.hdfs_path == n.hdfs_path + ) def __ne__(self, n): return not self == n class Cluster(object): - def __init__(self, hdfs): self.job_server = None self.pods = [] @@ -122,8 +137,11 @@ class Cluster(object): def __str__(self): return "job_server:{} pods:{} job_stage_flag:{} hdfs:{}".format( - self.job_server, [str(pod) for pod in self.pods], - self.job_stage_flag, self.hdfs) + self.job_server, + [str(pod) for pod in self.pods], + self.job_stage_flag, + self.hdfs, + ) def __eq__(self, cluster): if len(self.pods) != len(cluster.pods): @@ -161,8 +179,9 @@ class Cluster(object): r = [] for pod in self.pods: ep = "{}:{}".format(pod.addr, pod.port) - assert pod.port != None and pod.addr != None, "{} not a valid endpoint".format( - ep) + assert ( + pod.port != None and pod.addr != None + ), "{} not a valid endpoint".format(ep) r.append(ep) return r @@ -176,7 +195,6 @@ class Cluster(object): class JobServer(object): - def __init__(self): self.endpoint = None @@ -191,22 +209,21 @@ class JobServer(object): class Trainer(object): - def __init__(self): self.gpus = [] self.endpoint = None self.rank = None def __str__(self): - return "gpu:{} endpoint:{} rank:{}".format(self.gpus, self.endpoint, - self.rank) + return "gpu:{} endpoint:{} rank:{}".format( + self.gpus, self.endpoint, self.rank + ) def __eq__(self, t): if len(self.gpus) != len(t.gpus): return False - if self.endpoint != t.endpoint or \ - self.rank != t.rank: + if self.endpoint != t.endpoint or self.rank != t.rank: return False for a, b in zip(self.gpus, t.gpus): @@ -223,7 +240,6 @@ class Trainer(object): class Pod(object): - def __init__(self): self.rank = None self.id = None @@ -233,27 +249,38 @@ class Pod(object): self.gpus = [] def __str__(self): - return "rank:{} id:{} addr:{} port:{} visible_gpu:{} trainers:{}".format( - self.rank, self.id, self.addr, self.port, self.gpus, - [str(t) for t in self.trainers]) + return ( + "rank:{} id:{} addr:{} port:{} visible_gpu:{} trainers:{}".format( + self.rank, + self.id, + self.addr, + self.port, + self.gpus, + [str(t) for t in self.trainers], + ) + ) def __eq__(self, pod): - if self.rank != pod.rank or \ - self.id != pod.id or \ - self.addr != pod.addr or \ - self.port != pod.port: + if ( + self.rank != pod.rank + or self.id != pod.id + or self.addr != pod.addr + or self.port != pod.port + ): logger.debug("pod {} != {}".format(self, pod)) return False if len(self.trainers) != len(pod.trainers): - logger.debug("trainers {} != {}".format(self.trainers, - pod.trainers)) + logger.debug( + "trainers {} != {}".format(self.trainers, pod.trainers) + ) return False for i in range(len(self.trainers)): if self.trainers[i] != pod.trainers[i]: - logger.debug("trainer {} != {}".format(self.trainers[i], - pod.trainers[i])) + logger.debug( + "trainer {} != {}".format(self.trainers[i], pod.trainers[i]) + ) return False return True @@ -310,7 +337,7 @@ def terminate_local_procs(procs): p.log_fn.close() logger.debug("terminate process id:{}".format(p.proc.pid)) - #wait all process terminiated + # wait all process terminiated time.sleep(3) for step in range(0, 50): alive = False @@ -347,15 +374,16 @@ def add_arguments(argname, type, default, help, argparser, **kwargs): args = parser.parse_args() """ type = strtobool if type == bool else type - argparser.add_argument("--" + argname, - default=default, - type=type, - help=help + ' Default: %(default)s.', - **kwargs) + argparser.add_argument( + "--" + argname, + default=default, + type=type, + help=help + ' Default: %(default)s.', + **kwargs + ) def find_free_ports(num): - def __free_port(): with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s: s.bind(('', 0)) @@ -386,30 +414,30 @@ def _prepare_trainer_env(cluster, trainer, backend=None): backend = get_backend_by_compile_flag() # for compatibility if backend == 'bkcl': proc_env = { - "FLAGS_selected_xpus": - "%s" % ",".join([str(g) for g in trainer.gpus]), + "FLAGS_selected_xpus": "%s" + % ",".join([str(g) for g in trainer.gpus]), "PADDLE_TRAINER_ID": "%d" % trainer.rank, "PADDLE_CURRENT_ENDPOINT": "%s" % trainer.endpoint, "PADDLE_TRAINERS_NUM": "%d" % cluster.trainers_nranks(), - "PADDLE_TRAINER_ENDPOINTS": ",".join(cluster.trainers_endpoints()) + "PADDLE_TRAINER_ENDPOINTS": ",".join(cluster.trainers_endpoints()), } elif backend == 'nccl': proc_env = { - "FLAGS_selected_gpus": - "%s" % ",".join([str(g) for g in trainer.gpus]), + "FLAGS_selected_gpus": "%s" + % ",".join([str(g) for g in trainer.gpus]), "PADDLE_TRAINER_ID": "%d" % trainer.rank, "PADDLE_CURRENT_ENDPOINT": "%s" % trainer.endpoint, "PADDLE_TRAINERS_NUM": "%d" % cluster.trainers_nranks(), - "PADDLE_TRAINER_ENDPOINTS": ",".join(cluster.trainers_endpoints()) + "PADDLE_TRAINER_ENDPOINTS": ",".join(cluster.trainers_endpoints()), } elif backend == 'cncl': proc_env = { - "FLAGS_selected_mlus": - "%s" % ",".join([str(g) for g in trainer.gpus]), + "FLAGS_selected_mlus": "%s" + % ",".join([str(g) for g in trainer.gpus]), "PADDLE_TRAINER_ID": "%d" % trainer.rank, "PADDLE_CURRENT_ENDPOINT": "%s" % trainer.endpoint, "PADDLE_TRAINERS_NUM": "%d" % cluster.trainers_nranks(), - "PADDLE_TRAINER_ENDPOINTS": ",".join(cluster.trainers_endpoints()) + "PADDLE_TRAINER_ENDPOINTS": ",".join(cluster.trainers_endpoints()), } elif backend == 'gloo': # NOTE (xiongkun) default fall back into cpu only @@ -418,8 +446,7 @@ def _prepare_trainer_env(cluster, trainer, backend=None): "PADDLE_CURRENT_ENDPOINT": "%s" % trainer.endpoint, "PADDLE_TRAINERS_NUM": "%d" % cluster.trainers_nranks(), "PADDLE_TRAINER_ENDPOINTS": ",".join(cluster.trainers_endpoints()), - "PADDLE_DISTRI_BACKEND": - backend, # only add here, other will be auto + "PADDLE_DISTRI_BACKEND": backend, # only add here, other will be auto } else: raise ValueError("backend must be one of 'gloo, nccl, bkcl'") @@ -428,7 +455,6 @@ def _prepare_trainer_env(cluster, trainer, backend=None): class TrainerProc(object): - def __init__(self): self.proc = None self.log_fn = None @@ -438,16 +464,14 @@ class TrainerProc(object): self.cmd = None -def start_local_trainers(cluster, - pod, - training_script, - training_script_args, - log_dir=None): +def start_local_trainers( + cluster, pod, training_script, training_script_args, log_dir=None +): current_env = copy.copy(os.environ.copy()) - #paddle broadcast ncclUniqueId use socket, and - #proxy maybe make trainers unreachable, so delete them. - #if we set them to "", grpc will log error message "bad uri" - #so just delete them. + # paddle broadcast ncclUniqueId use socket, and + # proxy maybe make trainers unreachable, so delete them. + # if we set them to "", grpc will log error message "bad uri" + # so just delete them. current_env.pop("http_proxy", None) current_env.pop("https_proxy", None) @@ -493,8 +517,9 @@ def pull_worker_log(tp): except UnicodeEncodeError: sys.stdout.write( 'UnicodeEncodeError occurs at this line. ' - 'Please refer to the original log file "%s"\n' % - tp.log_fn.name) + 'Please refer to the original log file "%s"\n' + % tp.log_fn.name + ) tp.log_offset = fin.tell() @@ -525,14 +550,18 @@ def watch_local_trainers(procs, nranks): raise except SystemExit: logger.error( - "ABORT!!! Out of all {} trainers, the trainer process with rank={} was aborted. Please check its log." - .format(nranks, error_rank)) + "ABORT!!! Out of all {} trainers, the trainer process with rank={} was aborted. Please check its log.".format( + nranks, error_rank + ) + ) terminate_local_procs(procs) raise except: logger.error( - "ABORT!!! Out of all {} trainers, the trainer process with rank={} was aborted. Please check its log." - .format(nranks, error_rank)) + "ABORT!!! Out of all {} trainers, the trainer process with rank={} was aborted. Please check its log.".format( + nranks, error_rank + ) + ) terminate_local_procs(procs) raise diff --git a/python/paddle/distributed/utils/moe_utils.py b/python/paddle/distributed/utils/moe_utils.py index d6dbfdfab58c0205882077de68a056ed509930ea..cd7c0e758d4e0be678cff83684e92acd6a15cc4e 100644 --- a/python/paddle/distributed/utils/moe_utils.py +++ b/python/paddle/distributed/utils/moe_utils.py @@ -18,11 +18,9 @@ from paddle.fluid.data_feeder import check_variable_and_dtype from paddle import _legacy_C_ops -def global_scatter(x, - local_count, - global_count, - group=None, - use_calc_stream=True): +def global_scatter( + x, local_count, global_count, group=None, use_calc_stream=True +): """ The global_scatter operator distributes the data of x to n_expert * world_size experts according to local_count, and then receives data according to global_count. The expert refers to a user-defined expert network, @@ -110,42 +108,49 @@ def global_scatter(x, ring_id = 0 if group is None else group.id if _non_static_mode(): - return _legacy_C_ops.global_scatter(x, local_count, \ - global_count, \ - 'use_calc_stream', use_calc_stream, \ - 'ring_id', ring_id) + return _legacy_C_ops.global_scatter( + x, + local_count, + global_count, + 'use_calc_stream', + use_calc_stream, + 'ring_id', + ring_id, + ) else: op_type = 'global_scatter' check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], - 'global_scatter') - check_variable_and_dtype(local_count, 'local_count', ['int64'], - 'global_scatter') - check_variable_and_dtype(global_count, 'global_count', ['int64'], - 'global_scatter') + x, + 'x', + ['float16', 'float32', 'float64', 'int32', 'int64'], + 'global_scatter', + ) + check_variable_and_dtype( + local_count, 'local_count', ['int64'], 'global_scatter' + ) + check_variable_and_dtype( + global_count, 'global_count', ['int64'], 'global_scatter' + ) helper = LayerHelper(op_type, **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type=op_type, - inputs={ - 'X': [x], - 'local_count': [local_count], - 'global_count': [global_count], - }, - outputs={'Out': [out]}, - attrs={ - 'ring_id': ring_id, - 'use_calc_stream': use_calc_stream - }) + helper.append_op( + type=op_type, + inputs={ + 'X': [x], + 'local_count': [local_count], + 'global_count': [global_count], + }, + outputs={'Out': [out]}, + attrs={'ring_id': ring_id, 'use_calc_stream': use_calc_stream}, + ) return out -def global_gather(x, - local_count, - global_count, - group=None, - use_calc_stream=True): +def global_gather( + x, local_count, global_count, group=None, use_calc_stream=True +): """ The global_gather operator gathers the data of x into n_expert * world_size experts according to global_count, and then receives data according to local_count. The expert refers to a user-defined expert network, n_expert refers to the number of expert networks owned by each card, and world_size refers to the number of graphics cards running the network. @@ -223,33 +228,45 @@ def global_gather(x, ring_id = 0 if group is None else group.id if _non_static_mode(): - return _legacy_C_ops.global_gather(x, local_count, \ - global_count, \ - 'use_calc_stream', use_calc_stream, \ - 'ring_id', ring_id) + return _legacy_C_ops.global_gather( + x, + local_count, + global_count, + 'use_calc_stream', + use_calc_stream, + 'ring_id', + ring_id, + ) else: op_type = 'global_gather' check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], - 'global_gather') + x, + 'x', + ['float16', 'float32', 'float64', 'int32', 'int64'], + 'global_gather', + ) - check_variable_and_dtype(local_count, 'local_count', ['int64'], - 'global_gather') + check_variable_and_dtype( + local_count, 'local_count', ['int64'], 'global_gather' + ) - check_variable_and_dtype(global_count, 'global_count', ['int64'], - 'global_gather') + check_variable_and_dtype( + global_count, 'global_count', ['int64'], 'global_gather' + ) helper = LayerHelper(op_type, **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type=op_type, - inputs={ - 'X': [x], - 'local_count': [local_count], - 'global_count': [global_count] - }, - outputs={'Out': [out]}, - attrs={ - 'ring_id': group, - 'use_calc_stream': use_calc_stream, - }) + helper.append_op( + type=op_type, + inputs={ + 'X': [x], + 'local_count': [local_count], + 'global_count': [global_count], + }, + outputs={'Out': [out]}, + attrs={ + 'ring_id': group, + 'use_calc_stream': use_calc_stream, + }, + ) return out diff --git a/python/paddle/distribution/__init__.py b/python/paddle/distribution/__init__.py index 805675a0f2373bc3da8fa1c72c83667d3c3a6cfd..77b83fa6a94c544e55ab32fc610dc569e1148667 100644 --- a/python/paddle/distribution/__init__.py +++ b/python/paddle/distribution/__init__.py @@ -25,15 +25,26 @@ from paddle.distribution.lognormal import LogNormal from paddle.distribution.multinomial import Multinomial from paddle.distribution.normal import Normal from paddle.distribution.transform import * # noqa: F403 -from paddle.distribution.transformed_distribution import \ - TransformedDistribution +from paddle.distribution.transformed_distribution import TransformedDistribution from paddle.distribution.uniform import Uniform from paddle.distribution.laplace import Laplace __all__ = [ # noqa - 'Beta', 'Categorical', 'Dirichlet', 'Distribution', 'ExponentialFamily', - 'Multinomial', 'Normal', 'Uniform', 'kl_divergence', 'register_kl', - 'Independent', 'TransformedDistribution', 'Laplace', 'LogNormal', 'Gumbel' + 'Beta', + 'Categorical', + 'Dirichlet', + 'Distribution', + 'ExponentialFamily', + 'Multinomial', + 'Normal', + 'Uniform', + 'kl_divergence', + 'register_kl', + 'Independent', + 'TransformedDistribution', + 'Laplace', + 'LogNormal', + 'Gumbel', ] __all__.extend(transform.__all__) diff --git a/python/paddle/distribution/beta.py b/python/paddle/distribution/beta.py index 3474ee87b4ca6b171cccd62a632502beb9191a0f..f0eaa1dfe17b051ef328a7adc29e540c509c58e1 100644 --- a/python/paddle/distribution/beta.py +++ b/python/paddle/distribution/beta.py @@ -92,20 +92,19 @@ class Beta(exponential_family.ExponentialFamily): self.alpha, self.beta = paddle.broadcast_tensors([alpha, beta]) self._dirichlet = dirichlet.Dirichlet( - paddle.stack([self.alpha, self.beta], -1)) + paddle.stack([self.alpha, self.beta], -1) + ) super(Beta, self).__init__(self._dirichlet._batch_shape) @property def mean(self): - """Mean of beta distribution. - """ + """Mean of beta distribution.""" return self.alpha / (self.alpha + self.beta) @property def variance(self): - """Variance of beat distribution - """ + """Variance of beat distribution""" sum = self.alpha + self.beta return self.alpha * self.beta / (sum.pow(2) * (sum + 1)) diff --git a/python/paddle/distribution/categorical.py b/python/paddle/distribution/categorical.py index 269e2b5567196883f363fcc1baa223ef62f173e4..bd6c570ca1f10b65a327440bcbeb8e2adae62965 100644 --- a/python/paddle/distribution/categorical.py +++ b/python/paddle/distribution/categorical.py @@ -15,9 +15,9 @@ import numpy as np import paddle from paddle.distribution import distribution -from paddle.fluid.data_feeder import (check_type, convert_dtype) +from paddle.fluid.data_feeder import check_type, convert_dtype from paddle.fluid.framework import _non_static_mode -from paddle.fluid.layers import (ops, tensor) +from paddle.fluid.layers import ops, tensor from paddle.tensor import multinomial @@ -90,9 +90,12 @@ class Categorical(distribution.Distribution): name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. """ if not _non_static_mode(): - check_type(logits, 'logits', - (np.ndarray, tensor.Variable, list, tuple), - 'Categorical') + check_type( + logits, + 'logits', + (np.ndarray, tensor.Variable, list, tuple), + 'Categorical', + ) self.name = name if name is not None else 'Categorical' self.dtype = 'float32' @@ -101,8 +104,10 @@ class Categorical(distribution.Distribution): self.logits = logits self.dtype = convert_dtype(logits.dtype) else: - if isinstance(logits, np.ndarray) and str( - logits.dtype) in ['float32', 'float64']: + if isinstance(logits, np.ndarray) and str(logits.dtype) in [ + 'float32', + 'float64', + ]: self.dtype = logits.dtype self.logits = self._to_tensor(logits)[0] if self.dtype != convert_dtype(self.logits.dtype): @@ -149,13 +154,15 @@ class Categorical(distribution.Distribution): if len(logits_shape) > 1: sample_shape = shape + logits_shape[:-1] logits = paddle.reshape( - self.logits, [np.prod(logits_shape[:-1]), logits_shape[-1]]) + self.logits, [np.prod(logits_shape[:-1]), logits_shape[-1]] + ) else: sample_shape = shape logits = self.logits - sample_index = multinomial(self._logits_to_probs(logits), num_samples, - True) + sample_index = multinomial( + self._logits_to_probs(logits), num_samples, True + ) # multinomial sample shape is (logits.shape[:-1], num_samples), need to # tanspose to (num_samples, logits.shape[:-1]) @@ -203,21 +210,22 @@ class Categorical(distribution.Distribution): if not _non_static_mode(): check_type(other, 'other', Categorical, 'kl_divergence') - logits = self.logits - \ - paddle.max(self.logits, axis=-1, keepdim=True) + logits = self.logits - paddle.max(self.logits, axis=-1, keepdim=True) other_logits = other.logits - paddle.max( - other.logits, axis=-1, keepdim=True) + other.logits, axis=-1, keepdim=True + ) e_logits = ops.exp(logits) other_e_logits = ops.exp(other_logits) z = paddle.sum(e_logits, axis=-1, keepdim=True) other_z = paddle.sum(other_e_logits, axis=-1, keepdim=True) prob = e_logits / z kl = paddle.sum( - prob * - (logits - paddle.log(z) - other_logits + paddle.log(other_z)), + prob + * (logits - paddle.log(z) - other_logits + paddle.log(other_z)), axis=-1, keepdim=True, - name=name) + name=name, + ) return kl @@ -246,8 +254,7 @@ class Categorical(distribution.Distribution): """ name = self.name + '_entropy' - logits = self.logits - \ - paddle.max(self.logits, axis=-1, keepdim=True) + logits = self.logits - paddle.max(self.logits, axis=-1, keepdim=True) e_logits = ops.exp(logits) z = paddle.sum(e_logits, axis=-1, keepdim=True) prob = e_logits / z @@ -293,17 +300,20 @@ class Categorical(distribution.Distribution): """ name = self.name + '_probs' if len(self._prob.shape) == 1: # batch_shape is empty - return paddle.gather(self._prob, - value.reshape([-1], name=name), - name=name).reshape(value.shape, name=name) + return paddle.gather( + self._prob, value.reshape([-1], name=name), name=name + ).reshape(value.shape, name=name) else: if len(value.shape) == 1: return paddle.take_along_axis( self._prob, - paddle.reshape(value, - (len(self._prob.shape) - 1) * [1] + [-1], - name=name), - axis=-1) + paddle.reshape( + value, + (len(self._prob.shape) - 1) * [1] + [-1], + name=name, + ), + axis=-1, + ) else: return paddle.take_along_axis(self._prob, value, axis=-1) diff --git a/python/paddle/distribution/constraint.py b/python/paddle/distribution/constraint.py index 4cde3d30a565ca5838f2275fc81763a0ec84abda..1891d20177913c64bd9ef0ef751200b5a236dcc4 100644 --- a/python/paddle/distribution/constraint.py +++ b/python/paddle/distribution/constraint.py @@ -15,21 +15,18 @@ import paddle class Constraint(object): - """Constraint condition for random variable. - """ + """Constraint condition for random variable.""" def __call__(self, value): raise NotImplementedError class Real(Constraint): - def __call__(self, value): return value == value class Range(Constraint): - def __init__(self, lower, upper): self._lower = lower self._upper = upper @@ -40,16 +37,15 @@ class Range(Constraint): class Positive(Constraint): - def __call__(self, value): - return value >= 0. + return value >= 0.0 class Simplex(Constraint): - def __call__(self, value): - return paddle.all(value >= 0, - axis=-1) and ((value.sum(-1) - 1).abs() < 1e-6) + return paddle.all(value >= 0, axis=-1) and ( + (value.sum(-1) - 1).abs() < 1e-6 + ) real = Real() diff --git a/python/paddle/distribution/dirichlet.py b/python/paddle/distribution/dirichlet.py index 7a8e918096821eead8c4d9a4c7a54ea792af12bd..c57bc41c01832465d157f2deaf64622ca8264bd2 100644 --- a/python/paddle/distribution/dirichlet.py +++ b/python/paddle/distribution/dirichlet.py @@ -73,11 +73,13 @@ class Dirichlet(exponential_family.ExponentialFamily): def __init__(self, concentration): if concentration.dim() < 1: raise ValueError( - "`concentration` parameter must be at least one dimensional") + "`concentration` parameter must be at least one dimensional" + ) self.concentration = concentration - super(Dirichlet, self).__init__(concentration.shape[:-1], - concentration.shape[-1:]) + super(Dirichlet, self).__init__( + concentration.shape[:-1], concentration.shape[-1:] + ) @property def mean(self): @@ -97,7 +99,8 @@ class Dirichlet(exponential_family.ExponentialFamily): """ concentration0 = self.concentration.sum(-1, keepdim=True) return (self.concentration * (concentration0 - self.concentration)) / ( - concentration0.pow(2) * (concentration0 + 1)) + concentration0.pow(2) * (concentration0 + 1) + ) def sample(self, shape=()): """Sample from dirichlet distribution. @@ -125,9 +128,11 @@ class Dirichlet(exponential_family.ExponentialFamily): Args: value (Tensor): Value to be evaluated. """ - return ((paddle.log(value) * (self.concentration - 1.0)).sum(-1) + - paddle.lgamma(self.concentration.sum(-1)) - - paddle.lgamma(self.concentration).sum(-1)) + return ( + (paddle.log(value) * (self.concentration - 1.0)).sum(-1) + + paddle.lgamma(self.concentration.sum(-1)) + - paddle.lgamma(self.concentration).sum(-1) + ) def entropy(self): """Entropy of Dirichlet distribution. @@ -137,15 +142,18 @@ class Dirichlet(exponential_family.ExponentialFamily): """ concentration0 = self.concentration.sum(-1) k = self.concentration.shape[-1] - return (paddle.lgamma(self.concentration).sum(-1) - - paddle.lgamma(concentration0) - - (k - concentration0) * paddle.digamma(concentration0) - - ((self.concentration - 1.0) * - paddle.digamma(self.concentration)).sum(-1)) + return ( + paddle.lgamma(self.concentration).sum(-1) + - paddle.lgamma(concentration0) + - (k - concentration0) * paddle.digamma(concentration0) + - ( + (self.concentration - 1.0) * paddle.digamma(self.concentration) + ).sum(-1) + ) @property def _natural_parameters(self): - return (self.concentration, ) + return (self.concentration,) def _log_normalizer(self, x): return x.lgamma().sum(-1) - paddle.lgamma(x.sum(-1)) @@ -154,8 +162,9 @@ class Dirichlet(exponential_family.ExponentialFamily): def _dirichlet(concentration, name=None): op_type = 'dirichlet' - check_variable_and_dtype(concentration, 'concentration', - ['float32', 'float64'], op_type) + check_variable_and_dtype( + concentration, 'concentration', ['float32', 'float64'], op_type + ) if in_dygraph_mode(): return paddle._C_ops.dirichlet(concentration) @@ -164,9 +173,12 @@ def _dirichlet(concentration, name=None): else: helper = LayerHelper(op_type, **locals()) out = helper.create_variable_for_type_inference( - dtype=concentration.dtype) - helper.append_op(type=op_type, - inputs={"Alpha": concentration}, - outputs={'Out': out}, - attrs={}) + dtype=concentration.dtype + ) + helper.append_op( + type=op_type, + inputs={"Alpha": concentration}, + outputs={'Out': out}, + attrs={}, + ) return out diff --git a/python/paddle/distribution/distribution.py b/python/paddle/distribution/distribution.py index 14861d3794e3217de549011d3c5ec88805b97d7f..65a899c0fac68901f6b66193d623b0aaeabdb467 100644 --- a/python/paddle/distribution/distribution.py +++ b/python/paddle/distribution/distribution.py @@ -24,8 +24,12 @@ import warnings import numpy as np import paddle from paddle import _C_ops, _legacy_C_ops -from paddle.fluid.data_feeder import (check_variable_and_dtype, convert_dtype) -from paddle.fluid.framework import _non_static_mode, in_dygraph_mode, _in_legacy_dygraph +from paddle.fluid.data_feeder import check_variable_and_dtype, convert_dtype +from paddle.fluid.framework import ( + _non_static_mode, + in_dygraph_mode, + _in_legacy_dygraph, +) from paddle.fluid.layers import tensor @@ -45,10 +49,16 @@ class Distribution(object): def __init__(self, batch_shape=(), event_shape=()): - self._batch_shape = batch_shape if isinstance( - batch_shape, tuple) else tuple(batch_shape) - self._event_shape = event_shape if isinstance( - event_shape, tuple) else tuple(event_shape) + self._batch_shape = ( + batch_shape + if isinstance(batch_shape, tuple) + else tuple(batch_shape) + ) + self._event_shape = ( + event_shape + if isinstance(event_shape, tuple) + else tuple(event_shape) + ) super(Distribution, self).__init__() @@ -147,7 +157,8 @@ class Distribution(object): if is_variable and is_number: raise ValueError( - 'if one argument is Tensor, all arguments should be Tensor') + 'if one argument is Tensor, all arguments should be Tensor' + ) return is_variable @@ -162,15 +173,17 @@ class Distribution(object): """ numpy_args = [] variable_args = [] - tmp = 0. + tmp = 0.0 for arg in args: if isinstance(arg, float): arg = [arg] if not isinstance(arg, (list, tuple, np.ndarray, tensor.Variable)): raise TypeError( - "Type of input args must be float, list, numpy.ndarray or Tensor, but received type {}" - .format(type(arg))) + "Type of input args must be float, list, numpy.ndarray or Tensor, but received type {}".format( + type(arg) + ) + ) arg_np = np.array(arg) arg_dtype = arg_np.dtype @@ -208,20 +221,24 @@ class Distribution(object): value (Tensor): Change value's dtype if value's dtype is different from param. """ if _non_static_mode(): - if value.dtype != param.dtype and convert_dtype( - value.dtype) in ['float32', 'float64']: + if value.dtype != param.dtype and convert_dtype(value.dtype) in [ + 'float32', + 'float64', + ]: warnings.warn( "dtype of input 'value' needs to be the same as parameters of distribution class. dtype of 'value' will be converted." ) if in_dygraph_mode(): return _C_ops.cast(value, param.dtype) if _in_legacy_dygraph(): - return _legacy_C_ops.cast(value, 'in_dtype', value.dtype, - 'out_dtype', param.dtype) + return _legacy_C_ops.cast( + value, 'in_dtype', value.dtype, 'out_dtype', param.dtype + ) return value - check_variable_and_dtype(value, 'value', ['float32', 'float64'], - 'log_prob') + check_variable_and_dtype( + value, 'value', ['float32', 'float64'], 'log_prob' + ) if value.dtype != param.dtype: warnings.warn( "dtype of input 'value' needs to be the same as parameters of distribution class. dtype of 'value' will be converted." @@ -236,8 +253,11 @@ class Distribution(object): multi-dimensional, values of last axis denote the probabilities of occurrence of each of the events. """ - return (paddle.log(probs) - paddle.log1p(-probs)) \ - if is_binary else paddle.log(probs) + return ( + (paddle.log(probs) - paddle.log1p(-probs)) + if is_binary + else paddle.log(probs) + ) def _logits_to_probs(self, logits, is_binary=False): r""" @@ -245,5 +265,8 @@ class Distribution(object): log odds, whereas for the multi-dimensional case, the values along the last dimension denote the log probabilities of the events. """ - return paddle.nn.functional.sigmoid(logits) \ - if is_binary else paddle.nn.functional.softmax(logits, axis=-1) + return ( + paddle.nn.functional.sigmoid(logits) + if is_binary + else paddle.nn.functional.softmax(logits, axis=-1) + ) diff --git a/python/paddle/distribution/exponential_family.py b/python/paddle/distribution/exponential_family.py index 301c0cea13a582d31228cdc75a0c89c630223e70..0939e73f72643ab17ed3a8d016731b2d85031f42 100644 --- a/python/paddle/distribution/exponential_family.py +++ b/python/paddle/distribution/exponential_family.py @@ -62,9 +62,9 @@ class ExponentialFamily(distribution.Distribution): log_norm = self._log_normalizer(*natural_parameters) if _non_static_mode(): - grads = paddle.grad(log_norm.sum(), - natural_parameters, - create_graph=True) + grads = paddle.grad( + log_norm.sum(), natural_parameters, create_graph=True + ) else: grads = paddle.static.gradients(log_norm.sum(), natural_parameters) diff --git a/python/paddle/distribution/gumbel.py b/python/paddle/distribution/gumbel.py index 225e327cc91e9a64e82b3a7c22b971a00b3b5efe..1f16fd5529b69b0cc6433ba972c6745e385d4eb2 100644 --- a/python/paddle/distribution/gumbel.py +++ b/python/paddle/distribution/gumbel.py @@ -70,7 +70,8 @@ class Gumbel(TransformedDistribution): if not isinstance(loc, (numbers.Real, framework.Variable)): raise TypeError( - f"Expected type of loc is Real|Variable, but got {type(loc)}") + f"Expected type of loc is Real|Variable, but got {type(loc)}" + ) if not isinstance(scale, (numbers.Real, framework.Variable)): raise TypeError( f"Expected type of scale is Real|Variable, but got {type(scale)}" @@ -90,7 +91,8 @@ class Gumbel(TransformedDistribution): finfo = np.finfo(dtype='float32') self.base_dist = paddle.distribution.Uniform( paddle.full_like(self.loc, float(finfo.tiny)), - paddle.full_like(self.loc, float(1 - finfo.eps))) + paddle.full_like(self.loc, float(1 - finfo.eps)), + ) self.transforms = () @@ -136,9 +138,11 @@ class Gumbel(TransformedDistribution): Tensor: The variance value. """ - temp = paddle.full(shape=self.loc.shape, - fill_value=math.pi * math.pi, - dtype=self.scale.dtype) + temp = paddle.full( + shape=self.loc.shape, + fill_value=math.pi * math.pi, + dtype=self.scale.dtype, + ) return paddle.pow(self.scale, 2) * temp / 6 @@ -230,13 +234,19 @@ class Gumbel(TransformedDistribution): """ exp_trans = paddle.distribution.ExpTransform() affine_trans_1 = paddle.distribution.AffineTransform( - paddle.full(shape=self.scale.shape, - fill_value=0, - dtype=self.loc.dtype), -paddle.ones_like(self.scale)) + paddle.full( + shape=self.scale.shape, fill_value=0, dtype=self.loc.dtype + ), + -paddle.ones_like(self.scale), + ) affine_trans_2 = paddle.distribution.AffineTransform( - self.loc, -self.scale) + self.loc, -self.scale + ) return affine_trans_2.forward( exp_trans.inverse( affine_trans_1.forward( - exp_trans.inverse(self._base.sample(shape))))) + exp_trans.inverse(self._base.sample(shape)) + ) + ) + ) diff --git a/python/paddle/distribution/independent.py b/python/paddle/distribution/independent.py index 9f02d802fc88f458fb27dcf23b45cfd9b9fbf5a8..a12ab32b85a4e14d7cdd71d129e853986c9cfeb0 100644 --- a/python/paddle/distribution/independent.py +++ b/python/paddle/distribution/independent.py @@ -61,11 +61,14 @@ class Independent(distribution.Distribution): self._reinterpreted_batch_rank = reinterpreted_batch_rank shape = base.batch_shape + base.event_shape - super(Independent, - self).__init__(batch_shape=shape[:len(base.batch_shape) - - reinterpreted_batch_rank], - event_shape=shape[len(base.batch_shape) - - reinterpreted_batch_rank:]) + super(Independent, self).__init__( + batch_shape=shape[ + : len(base.batch_shape) - reinterpreted_batch_rank + ], + event_shape=shape[ + len(base.batch_shape) - reinterpreted_batch_rank : + ], + ) @property def mean(self): @@ -79,15 +82,17 @@ class Independent(distribution.Distribution): return self._base.sample(shape) def log_prob(self, value): - return self._sum_rightmost(self._base.log_prob(value), - self._reinterpreted_batch_rank) + return self._sum_rightmost( + self._base.log_prob(value), self._reinterpreted_batch_rank + ) def prob(self, value): return self.log_prob(value).exp() def entropy(self): - return self._sum_rightmost(self._base.entropy(), - self._reinterpreted_batch_rank) + return self._sum_rightmost( + self._base.entropy(), self._reinterpreted_batch_rank + ) def _sum_rightmost(self, value, n): return value.sum(list(range(-n, 0))) if n > 0 else value diff --git a/python/paddle/distribution/kl.py b/python/paddle/distribution/kl.py index aa288a57ba0b77deca879c70f40223443a259b0a..4f95366391a155eb3365002529e594bf9fed3492 100644 --- a/python/paddle/distribution/kl.py +++ b/python/paddle/distribution/kl.py @@ -85,8 +85,9 @@ def register_kl(cls_p, cls_q): def kl_beta_beta(): pass # insert implementation here """ - if (not issubclass(cls_p, Distribution) - or not issubclass(cls_q, Distribution)): + if not issubclass(cls_p, Distribution) or not issubclass( + cls_q, Distribution + ): raise TypeError('cls_p and cls_q must be subclass of Distribution') def decorator(f): @@ -100,8 +101,11 @@ def _dispatch(cls_p, cls_q): """Multiple dispatch into concrete implement function.""" # find all matched super class pair of p and q - matchs = [(super_p, super_q) for super_p, super_q in _REGISTER_TABLE - if issubclass(cls_p, super_p) and issubclass(cls_q, super_q)] + matchs = [ + (super_p, super_q) + for super_p, super_q in _REGISTER_TABLE + if issubclass(cls_p, super_p) and issubclass(cls_q, super_q) + ] if not matchs: raise NotImplementedError @@ -110,16 +114,20 @@ def _dispatch(cls_p, cls_q): if _REGISTER_TABLE[left_p, left_q] is not _REGISTER_TABLE[right_p, right_q]: warnings.warn( - 'Ambiguous kl_divergence({}, {}). Please register_kl({}, {})'. - format(cls_p.__name__, cls_q.__name__, left_p.__name__, - right_q.__name__), RuntimeWarning) + 'Ambiguous kl_divergence({}, {}). Please register_kl({}, {})'.format( + cls_p.__name__, + cls_q.__name__, + left_p.__name__, + right_q.__name__, + ), + RuntimeWarning, + ) return _REGISTER_TABLE[left_p, left_q] @functools.total_ordering class _Compare(object): - def __init__(self, *classes): self.classes = classes @@ -137,22 +145,33 @@ class _Compare(object): @register_kl(Beta, Beta) def _kl_beta_beta(p, q): - return ((q.alpha.lgamma() + q.beta.lgamma() + (p.alpha + p.beta).lgamma()) - - (p.alpha.lgamma() + p.beta.lgamma() + (q.alpha + q.beta).lgamma()) + - ((p.alpha - q.alpha) * p.alpha.digamma()) + - ((p.beta - q.beta) * p.beta.digamma()) + - (((q.alpha + q.beta) - (p.alpha + p.beta)) * - (p.alpha + p.beta).digamma())) + return ( + (q.alpha.lgamma() + q.beta.lgamma() + (p.alpha + p.beta).lgamma()) + - (p.alpha.lgamma() + p.beta.lgamma() + (q.alpha + q.beta).lgamma()) + + ((p.alpha - q.alpha) * p.alpha.digamma()) + + ((p.beta - q.beta) * p.beta.digamma()) + + ( + ((q.alpha + q.beta) - (p.alpha + p.beta)) + * (p.alpha + p.beta).digamma() + ) + ) @register_kl(Dirichlet, Dirichlet) def _kl_dirichlet_dirichlet(p, q): return ( - (p.concentration.sum(-1).lgamma() - q.concentration.sum(-1).lgamma()) - - ((p.concentration.lgamma() - q.concentration.lgamma()).sum(-1)) + - (((p.concentration - q.concentration) * - (p.concentration.digamma() - - p.concentration.sum(-1).digamma().unsqueeze(-1))).sum(-1))) + (p.concentration.sum(-1).lgamma() - q.concentration.sum(-1).lgamma()) + - ((p.concentration.lgamma() - q.concentration.lgamma()).sum(-1)) + + ( + ( + (p.concentration - q.concentration) + * ( + p.concentration.digamma() + - p.concentration.sum(-1).digamma().unsqueeze(-1) + ) + ).sum(-1) + ) + ) @register_kl(Categorical, Categorical) @@ -177,8 +196,7 @@ def _kl_laplace_laplace(p, q): @register_kl(ExponentialFamily, ExponentialFamily) def _kl_expfamily_expfamily(p, q): - """Compute kl-divergence using `Bregman divergences `_ - """ + """Compute kl-divergence using `Bregman divergences `_""" if not type(p) == type(q): raise NotImplementedError @@ -194,19 +212,22 @@ def _kl_expfamily_expfamily(p, q): try: if _non_static_mode(): - p_grads = paddle.grad(p_log_norm, - p_natural_params, - create_graph=True) + p_grads = paddle.grad( + p_log_norm, p_natural_params, create_graph=True + ) else: p_grads = paddle.static.gradients(p_log_norm, p_natural_params) except RuntimeError as e: raise TypeError( - "Cann't compute kl_divergence({cls_p}, {cls_q}) use bregman divergence. Please register_kl({cls_p}, {cls_q})." - .format(cls_p=type(p).__name__, cls_q=type(q).__name__)) from e + "Cann't compute kl_divergence({cls_p}, {cls_q}) use bregman divergence. Please register_kl({cls_p}, {cls_q}).".format( + cls_p=type(p).__name__, cls_q=type(q).__name__ + ) + ) from e kl = q._log_normalizer(*q_natural_params) - p_log_norm - for p_param, q_param, p_grad in zip(p_natural_params, q_natural_params, - p_grads): + for p_param, q_param, p_grad in zip( + p_natural_params, q_natural_params, p_grads + ): term = (q_param - p_param) * p_grad kl -= _sum_rightmost(term, len(q.event_shape)) diff --git a/python/paddle/distribution/laplace.py b/python/paddle/distribution/laplace.py index 2b117f93bacec87526b28b9b570b29f88d75a521..a7a9301724ca462eb9d4e82e18617f06698caeea 100644 --- a/python/paddle/distribution/laplace.py +++ b/python/paddle/distribution/laplace.py @@ -55,7 +55,8 @@ class Laplace(distribution.Distribution): def __init__(self, loc, scale): if not isinstance(loc, (numbers.Real, framework.Variable)): raise TypeError( - f"Expected type of loc is Real|Variable, but got {type(loc)}") + f"Expected type of loc is Real|Variable, but got {type(loc)}" + ) if not isinstance(scale, (numbers.Real, framework.Variable)): raise TypeError( @@ -68,8 +69,9 @@ class Laplace(distribution.Distribution): if isinstance(scale, numbers.Real): scale = paddle.full(shape=(), fill_value=scale) - if (len(scale.shape) > 0 or len(loc.shape) > 0) and (loc.dtype - == scale.dtype): + if (len(scale.shape) > 0 or len(loc.shape) > 0) and ( + loc.dtype == scale.dtype + ): self.loc, self.scale = paddle.broadcast_tensors([loc, scale]) else: self.loc, self.scale = loc, scale @@ -135,10 +137,14 @@ class Laplace(distribution.Distribution): value = paddle.full(shape=(), fill_value=value) if value.dtype != self.scale.dtype: value = paddle.cast(value, self.scale.dtype) - if len(self.scale.shape) > 0 or len(self.loc.shape) > 0 or len( - value.shape) > 0: + if ( + len(self.scale.shape) > 0 + or len(self.loc.shape) > 0 + or len(value.shape) > 0 + ): loc, scale, value = paddle.broadcast_tensors( - [self.loc, self.scale, value]) + [self.loc, self.scale, value] + ) else: loc, scale = self.loc, self.scale @@ -178,7 +184,7 @@ class Laplace(distribution.Distribution): loc, scale, value = self._validate_value(value) log_scale = -paddle.log(2 * scale) - return (log_scale - paddle.abs(value - loc) / scale) + return log_scale - paddle.abs(value - loc) / scale def entropy(self): r"""Entropy of Laplace distribution. @@ -238,8 +244,11 @@ class Laplace(distribution.Distribution): # [0.54758132]) """ loc, scale, value = self._validate_value(value) - iterm = (0.5 * (value - loc).sign() * - paddle.expm1(-(value - loc).abs() / scale)) + iterm = ( + 0.5 + * (value - loc).sign() + * paddle.expm1(-(value - loc).abs() / scale) + ) return 0.5 - iterm @@ -276,7 +285,7 @@ class Laplace(distribution.Distribution): loc, scale, value = self._validate_value(value) term = value - 0.5 - return (loc - scale * (term).sign() * paddle.log1p(-2 * term.abs())) + return loc - scale * (term).sign() * paddle.log1p(-2 * term.abs()) def sample(self, shape=()): r"""Generate samples of the specified shape. @@ -299,7 +308,8 @@ class Laplace(distribution.Distribution): """ if not isinstance(shape, tuple): raise TypeError( - f'Expected shape should be tuple[int], but got {type(shape)}') + f'Expected shape should be tuple[int], but got {type(shape)}' + ) with paddle.no_grad(): return self.rsample(shape) @@ -325,19 +335,22 @@ class Laplace(distribution.Distribution): """ eps = self._get_eps() - shape = self._extend_shape(shape) or (1, ) - uniform = paddle.uniform(shape=shape, - min=float(np.nextafter(-1, 1)) + eps / 2, - max=1. - eps / 2, - dtype=self.loc.dtype) + shape = self._extend_shape(shape) or (1,) + uniform = paddle.uniform( + shape=shape, + min=float(np.nextafter(-1, 1)) + eps / 2, + max=1.0 - eps / 2, + dtype=self.loc.dtype, + ) if len(self.scale.shape) == 0 and len(self.loc.shape) == 0: loc, scale, uniform = paddle.broadcast_tensors( - [self.loc, self.scale, uniform]) + [self.loc, self.scale, uniform] + ) else: loc, scale = self.loc, self.scale - return (loc - scale * uniform.sign() * paddle.log1p(-uniform.abs())) + return loc - scale * uniform.sign() * paddle.log1p(-uniform.abs()) def _get_eps(self): """ @@ -351,8 +364,10 @@ class Laplace(distribution.Distribution): Float: An eps value by different data types. """ eps = 1.19209e-07 - if (self.loc.dtype == paddle.float64 - or self.loc.dtype == paddle.complex128): + if ( + self.loc.dtype == paddle.float64 + or self.loc.dtype == paddle.complex128 + ): eps = 2.22045e-16 return eps @@ -400,7 +415,7 @@ class Laplace(distribution.Distribution): var_ratio = other.scale / self.scale t = paddle.abs(self.loc - other.loc) - term1 = ((self.scale * paddle.exp(-t / self.scale) + t) / other.scale) + term1 = (self.scale * paddle.exp(-t / self.scale) + t) / other.scale term2 = paddle.log(var_ratio) return term1 + term2 - 1 diff --git a/python/paddle/distribution/lognormal.py b/python/paddle/distribution/lognormal.py index b171e1ecbc61ebb6e4e491064c3a20a5bf1dae4a..f49eee4d2e01a97d3fdf2e647b99b9e71c7c9b16 100644 --- a/python/paddle/distribution/lognormal.py +++ b/python/paddle/distribution/lognormal.py @@ -15,8 +15,7 @@ import paddle from paddle.distribution.normal import Normal from paddle.distribution.transform import ExpTransform -from paddle.distribution.transformed_distribution import \ - TransformedDistribution +from paddle.distribution.transformed_distribution import TransformedDistribution class LogNormal(TransformedDistribution): @@ -104,8 +103,9 @@ class LogNormal(TransformedDistribution): Returns: Tensor: variance value. """ - return (paddle.expm1(self._base.variance) * - paddle.exp(2 * self._base.mean + self._base.variance)) + return paddle.expm1(self._base.variance) * paddle.exp( + 2 * self._base.mean + self._base.variance + ) def entropy(self): r"""Shannon entropy in nats. diff --git a/python/paddle/distribution/multinomial.py b/python/paddle/distribution/multinomial.py index 22f8aa820f9936f69491531f4645e847f7db62ec..56bb3da44817f06ecb60085c681bcbaf38e4853e 100644 --- a/python/paddle/distribution/multinomial.py +++ b/python/paddle/distribution/multinomial.py @@ -14,6 +14,7 @@ import paddle from paddle.distribution import categorical, distribution + try: from collections.abc import Iterable except: @@ -77,12 +78,14 @@ class Multinomial(distribution.Distribution): if probs.dim() < 1: raise ValueError( - 'probs parameter shoule not be none and over one dimension') + 'probs parameter shoule not be none and over one dimension' + ) self.probs = probs / probs.sum(-1, keepdim=True) self.total_count = total_count self._categorical = categorical.Categorical( - logits=self._probs_to_logits(probs)) + logits=self._probs_to_logits(probs) + ) super(Multinomial, self).__init__(probs.shape[:-1], probs.shape[-1:]) @@ -128,11 +131,15 @@ class Multinomial(distribution.Distribution): value = paddle.cast(value, self.probs.dtype) logits, value = paddle.broadcast_tensors( - [paddle.log(self.probs), value]) + [paddle.log(self.probs), value] + ) logits[(value == 0) & (paddle.isinf(logits))] = 0 - return (paddle.lgamma(value.sum(-1) + 1) - - paddle.lgamma(value + 1).sum(-1) + (value * logits).sum(-1)) + return ( + paddle.lgamma(value.sum(-1) + 1) + - paddle.lgamma(value + 1).sum(-1) + + (value * logits).sum(-1) + ) def sample(self, shape=()): """draw sample data from multinomial distribution @@ -143,11 +150,17 @@ class Multinomial(distribution.Distribution): if not isinstance(shape, Iterable): raise TypeError('sample shape must be Iterable object.') - samples = self._categorical.sample([ - self.total_count, - ] + list(shape)) - return paddle.nn.functional.one_hot(samples, self.probs.shape[-1]).cast( - self.probs.dtype).sum(0) + samples = self._categorical.sample( + [ + self.total_count, + ] + + list(shape) + ) + return ( + paddle.nn.functional.one_hot(samples, self.probs.shape[-1]) + .cast(self.probs.dtype) + .sum(0) + ) def entropy(self): """entropy of multinomial distribution @@ -155,18 +168,18 @@ class Multinomial(distribution.Distribution): Returns: Tensor: entropy value """ - n = paddle.full(shape=[1], - fill_value=self.total_count, - dtype=self.probs.dtype) + n = paddle.full( + shape=[1], fill_value=self.total_count, dtype=self.probs.dtype + ) support = paddle.arange( - self.total_count + 1, - dtype=self.probs.dtype).reshape((-1, ) + - (1, ) * len(self.probs.shape))[1:] + self.total_count + 1, dtype=self.probs.dtype + ).reshape((-1,) + (1,) * len(self.probs.shape))[1:] binomial_pmf = paddle.exp(self._binomial_logpmf(n, support)) - return ((n * self._categorical.entropy() - paddle.lgamma(n + 1)) + - ((binomial_pmf * paddle.lgamma(support + 1)).sum([0, -1]))) + return (n * self._categorical.entropy() - paddle.lgamma(n + 1)) + ( + (binomial_pmf * paddle.lgamma(support + 1)).sum([0, -1]) + ) def _binomial_logpmf(self, count, value): logits = self._probs_to_logits(self.probs, is_binary=True) @@ -175,9 +188,11 @@ class Multinomial(distribution.Distribution): factor_k = paddle.lgamma(value + 1) factor_nmk = paddle.lgamma(count - value + 1) - norm = (count * _clip_by_zero(logits) + - count * paddle.log1p(paddle.exp(-paddle.abs(logits))) - - factor_n) + norm = ( + count * _clip_by_zero(logits) + + count * paddle.log1p(paddle.exp(-paddle.abs(logits))) + - factor_n + ) return value * logits - factor_k - factor_nmk - norm diff --git a/python/paddle/distribution/normal.py b/python/paddle/distribution/normal.py index f76f13b250e4c82e149352b580a2cf76e6eddf1f..a05c8e4703b65ad9523ac578dbe2e9387004f171 100644 --- a/python/paddle/distribution/normal.py +++ b/python/paddle/distribution/normal.py @@ -16,10 +16,17 @@ import math import numpy as np import paddle from paddle.distribution import distribution -from paddle.fluid.data_feeder import (check_type, convert_dtype) +from paddle.fluid.data_feeder import check_type, convert_dtype from paddle.fluid.framework import _non_static_mode -from paddle.fluid.layers import (elementwise_add, elementwise_div, - elementwise_sub, nn, ops, tensor) +from paddle.fluid.layers import ( + elementwise_add, + elementwise_div, + elementwise_sub, + nn, + ops, + tensor, +) + try: from collections.abc import Iterable except: @@ -89,12 +96,18 @@ class Normal(distribution.Distribution): def __init__(self, loc, scale, name=None): if not _non_static_mode(): - check_type(loc, 'loc', - (int, float, np.ndarray, tensor.Variable, list, tuple), - 'Normal') - check_type(scale, 'scale', - (int, float, np.ndarray, tensor.Variable, list, tuple), - 'Normal') + check_type( + loc, + 'loc', + (int, float, np.ndarray, tensor.Variable, list, tuple), + 'Normal', + ) + check_type( + scale, + 'scale', + (int, float, np.ndarray, tensor.Variable, list, tuple), + 'Normal', + ) self.batch_size_unknown = False self.all_arg_is_float = False @@ -114,11 +127,15 @@ class Normal(distribution.Distribution): else: if isinstance(loc, float) and isinstance(scale, float): self.all_arg_is_float = True - if isinstance(loc, np.ndarray) and str( - loc.dtype) in ['float32', 'float64']: + if isinstance(loc, np.ndarray) and str(loc.dtype) in [ + 'float32', + 'float64', + ]: self.dtype = loc.dtype - elif isinstance(scale, np.ndarray) and str( - scale.dtype) in ['float32', 'float64']: + elif isinstance(scale, np.ndarray) and str(scale.dtype) in [ + 'float32', + 'float64', + ]: self.dtype = scale.dtype # pylint: disable=unbalanced-tuple-unpacking self.loc, self.scale = self._to_tensor(loc, scale) @@ -169,22 +186,21 @@ class Normal(distribution.Distribution): if self.batch_size_unknown: output_shape = shape + batch_shape zero_tmp = tensor.fill_constant_batch_size_like( - self.loc + self.scale, batch_shape + shape, self.dtype, 0.) + self.loc + self.scale, batch_shape + shape, self.dtype, 0.0 + ) zero_tmp_reshape = nn.reshape(zero_tmp, output_shape) zero_tmp_shape = nn.shape(zero_tmp_reshape) - normal_random_tmp = nn.gaussian_random(zero_tmp_shape, - mean=0., - std=1., - seed=seed, - dtype=self.dtype) + normal_random_tmp = nn.gaussian_random( + zero_tmp_shape, mean=0.0, std=1.0, seed=seed, dtype=self.dtype + ) output = normal_random_tmp * (zero_tmp_reshape + self.scale) output = elementwise_add(output, self.loc, name=name) return output else: output_shape = shape + batch_shape output = nn.gaussian_random( - output_shape, mean=0., std=1., seed=seed, dtype=self.dtype) * ( - tensor.zeros(output_shape, dtype=self.dtype) + self.scale) + output_shape, mean=0.0, std=1.0, seed=seed, dtype=self.dtype + ) * (tensor.zeros(output_shape, dtype=self.dtype) + self.scale) output = elementwise_add(output, self.loc, name=name) if self.all_arg_is_float: return nn.reshape(output, shape, name=name) @@ -206,7 +222,7 @@ class Normal(distribution.Distribution): shape = self._extend_shape(tuple(shape)) eps = paddle.normal(shape=shape) - return (self.loc + eps * self.scale) + return self.loc + eps * self.scale def entropy(self): r"""Shannon entropy in nats. @@ -227,13 +243,14 @@ class Normal(distribution.Distribution): """ name = self.name + '_entropy' batch_shape = list((self.loc + self.scale).shape) - zero_tmp = tensor.fill_constant_batch_size_like(self.loc + self.scale, - batch_shape, self.dtype, - 0.) - return elementwise_add(0.5 + zero_tmp, - 0.5 * math.log(2 * math.pi) + nn.log( - (self.scale + zero_tmp)), - name=name) + zero_tmp = tensor.fill_constant_batch_size_like( + self.loc + self.scale, batch_shape, self.dtype, 0.0 + ) + return elementwise_add( + 0.5 + zero_tmp, + 0.5 * math.log(2 * math.pi) + nn.log((self.scale + zero_tmp)), + name=name, + ) def log_prob(self, value): """Log probability density/mass function. @@ -250,10 +267,11 @@ class Normal(distribution.Distribution): var = self.scale * self.scale log_scale = nn.log(self.scale) - return elementwise_sub(-1. * ((value - self.loc) * (value - self.loc)) / - (2. * var), - log_scale + math.log(math.sqrt(2. * math.pi)), - name=name) + return elementwise_sub( + -1.0 * ((value - self.loc) * (value - self.loc)) / (2.0 * var), + log_scale + math.log(math.sqrt(2.0 * math.pi)), + name=name, + ) def probs(self, value): """Probability density/mass function. @@ -269,10 +287,13 @@ class Normal(distribution.Distribution): value = self._check_values_dtype_in_probs(self.loc, value) var = self.scale * self.scale - return elementwise_div(ops.exp(-1. * ((value - self.loc) * - (value - self.loc)) / (2. * var)), - (math.sqrt(2 * math.pi) * self.scale), - name=name) + return elementwise_div( + ops.exp( + -1.0 * ((value - self.loc) * (value - self.loc)) / (2.0 * var) + ), + (math.sqrt(2 * math.pi) * self.scale), + name=name, + ) def kl_divergence(self, other): r"""The KL-divergence between two normal distributions. @@ -312,9 +333,9 @@ class Normal(distribution.Distribution): name = self.name + '_kl_divergence' var_ratio = self.scale / other.scale - var_ratio = (var_ratio * var_ratio) + var_ratio = var_ratio * var_ratio t1 = (self.loc - other.loc) / other.scale - t1 = (t1 * t1) - return elementwise_add(0.5 * var_ratio, - 0.5 * (t1 - 1. - nn.log(var_ratio)), - name=name) + t1 = t1 * t1 + return elementwise_add( + 0.5 * var_ratio, 0.5 * (t1 - 1.0 - nn.log(var_ratio)), name=name + ) diff --git a/python/paddle/distribution/transform.py b/python/paddle/distribution/transform.py index 6cb0e048bab7e7219f2397cbd2351f36d7585d4d..f31dfe1b154c6c7702014abfdc92ae6303d9b834 100644 --- a/python/paddle/distribution/transform.py +++ b/python/paddle/distribution/transform.py @@ -20,20 +20,33 @@ import typing import paddle import paddle.nn.functional as F -from paddle.distribution import (constraint, distribution, - transformed_distribution, variable) +from paddle.distribution import ( + constraint, + distribution, + transformed_distribution, + variable, +) __all__ = [ # noqa - 'Transform', 'AbsTransform', 'AffineTransform', 'ChainTransform', - 'ExpTransform', 'IndependentTransform', 'PowerTransform', - 'ReshapeTransform', 'SigmoidTransform', 'SoftmaxTransform', - 'StackTransform', 'StickBreakingTransform', 'TanhTransform' + 'Transform', + 'AbsTransform', + 'AffineTransform', + 'ChainTransform', + 'ExpTransform', + 'IndependentTransform', + 'PowerTransform', + 'ReshapeTransform', + 'SigmoidTransform', + 'SoftmaxTransform', + 'StackTransform', + 'StickBreakingTransform', + 'TanhTransform', ] class Type(enum.Enum): - """Mapping type of a transformation. - """ + """Mapping type of a transformation.""" + BIJECTION = 'bijection' # bijective(injective and surjective) INJECTION = 'injection' # injective-only SURJECTION = 'surjection' # surjective-only @@ -41,8 +54,7 @@ class Type(enum.Enum): @classmethod def is_injective(cls, _type): - """Both bijection and injection are injective mapping. - """ + """Both bijection and injection are injective mapping.""" return _type in (cls.BIJECTION, cls.INJECTION) @@ -138,7 +150,8 @@ class Transform(object): """ if isinstance(input, distribution.Distribution): return transformed_distribution.TransformedDistribution( - input, [self]) + input, [self] + ) if isinstance(input, Transform): return ChainTransform([self, input]) return self.forward(input) @@ -157,11 +170,13 @@ class Transform(object): """ if not isinstance(x, paddle.fluid.framework.Variable): raise TypeError( - f"Expected 'x' is a Tensor or Real, but got {type(x)}.") + f"Expected 'x' is a Tensor or Real, but got {type(x)}." + ) if x.dim() < self._domain.event_rank: raise ValueError( f'The dimensions of x({x.dim()}) should be ' - f'grater than or equal to {self._domain.event_rank}') + f'grater than or equal to {self._domain.event_rank}' + ) return self._forward(x) def inverse(self, y): @@ -176,11 +191,13 @@ class Transform(object): """ if not isinstance(y, paddle.fluid.framework.Variable): raise TypeError( - f"Expected 'y' is a Tensor or Real, but got {type(y)}.") + f"Expected 'y' is a Tensor or Real, but got {type(y)}." + ) if y.dim() < self._codomain.event_rank: raise ValueError( f'The dimensions of y({y.dim()}) should be ' - f'grater than or equal to {self._codomain.event_rank}') + f'grater than or equal to {self._codomain.event_rank}' + ) return self._inverse(y) def forward_log_det_jacobian(self, x): @@ -196,16 +213,21 @@ class Transform(object): """ if not isinstance(x, paddle.fluid.framework.Variable): raise TypeError( - f"Expected 'y' is a Tensor or Real, but got {type(x)}.") - if isinstance(x, paddle.fluid.framework.Variable - ) and x.dim() < self._domain.event_rank: + f"Expected 'y' is a Tensor or Real, but got {type(x)}." + ) + if ( + isinstance(x, paddle.fluid.framework.Variable) + and x.dim() < self._domain.event_rank + ): raise ValueError( f'The dimensions of x({x.dim()}) should be ' - f'grater than or equal to {self._domain.event_rank}') + f'grater than or equal to {self._domain.event_rank}' + ) if not self._is_injective(): raise NotImplementedError( "forward_log_det_jacobian can't be implemented for non-injective" - "transforms.") + "transforms." + ) return self._call_forward_log_det_jacobian(x) @@ -226,7 +248,8 @@ class Transform(object): if y.dim() < self._codomain.event_rank: raise ValueError( f'The dimensions of y({y.dim()}) should be ' - f'grater than or equal to {self._codomain.event_rank}') + f'grater than or equal to {self._codomain.event_rank}' + ) return self._call_inverse_log_det_jacobian(y) def forward_shape(self, shape): @@ -240,7 +263,8 @@ class Transform(object): """ if not isinstance(shape, typing.Sequence): raise TypeError( - f"Expected shape is Sequence[int] type, but got {type(shape)}.") + f"Expected shape is Sequence[int] type, but got {type(shape)}." + ) return self._forward_shape(shape) def inverse_shape(self, shape): @@ -254,7 +278,8 @@ class Transform(object): """ if not isinstance(shape, typing.Sequence): raise TypeError( - f"Expected shape is Sequence[int] type, but got {type(shape)}.") + f"Expected shape is Sequence[int] type, but got {type(shape)}." + ) return self._inverse_shape(shape) @property @@ -287,7 +312,8 @@ class Transform(object): return -self._inverse_log_det_jacobian(self.forward(x)) raise NotImplementedError( 'Neither _forward_log_det_jacobian nor _inverse_log_det_jacobian' - 'is implemented. One of them is required.') + 'is implemented. One of them is required.' + ) def _call_inverse_log_det_jacobian(self, y): """Inner method called by ``inverse_log_det_jacobian``""" @@ -297,7 +323,8 @@ class Transform(object): return -self._forward_log_det_jacobian(self._inverse(y)) raise NotImplementedError( 'Neither _forward_log_det_jacobian nor _inverse_log_det_jacobian ' - 'is implemented. One of them is required') + 'is implemented. One of them is required' + ) def _forward_shape(self, shape): """Inner method called by ``forward_shape``, which is used to infer the @@ -420,7 +447,8 @@ class AffineTransform(Transform): raise TypeError(f"Expected 'loc' is a Tensor, but got {type(loc)}") if not isinstance(scale, paddle.fluid.framework.Variable): raise TypeError( - f"Expected scale is a Tensor, but got {type(scale)}") + f"Expected scale is a Tensor, but got {type(scale)}" + ) self._loc = loc self._scale = scale super(AffineTransform, self).__init__() @@ -446,13 +474,17 @@ class AffineTransform(Transform): return tuple( paddle.broadcast_shape( paddle.broadcast_shape(shape, self._loc.shape), - self._scale.shape)) + self._scale.shape, + ) + ) def _inverse_shape(self, shape): return tuple( paddle.broadcast_shape( paddle.broadcast_shape(shape, self._loc.shape), - self._scale.shape)) + self._scale.shape, + ) + ) @property def _domain(self): @@ -504,7 +536,8 @@ class ChainTransform(Transform): ) if not all(isinstance(t, Transform) for t in transforms): raise TypeError( - "All elements of transforms should be Transform type.") + "All elements of transforms should be Transform type." + ) self.transforms = transforms super(ChainTransform, self).__init__() @@ -523,11 +556,12 @@ class ChainTransform(Transform): return y def _forward_log_det_jacobian(self, x): - value = 0. + value = 0.0 event_rank = self._domain.event_rank for t in self.transforms: - value += self._sum_rightmost(t.forward_log_det_jacobian(x), - event_rank - t._domain.event_rank) + value += self._sum_rightmost( + t.forward_log_det_jacobian(x), event_rank - t._domain.event_rank + ) x = t.forward(x) event_rank += t._codomain.event_rank - t._domain.event_rank return value @@ -682,7 +716,8 @@ class IndependentTransform(Transform): def __init__(self, base, reinterpreted_batch_rank): if not isinstance(base, Transform): raise TypeError( - f"Expected 'base' is Transform type, but get {type(base)}") + f"Expected 'base' is Transform type, but get {type(base)}" + ) if reinterpreted_batch_rank <= 0: raise ValueError( f"Expected 'reinterpreted_batch_rank' is grater than zero, but got {reinterpreted_batch_rank}" @@ -707,7 +742,8 @@ class IndependentTransform(Transform): def _forward_log_det_jacobian(self, x): return self._base.forward_log_det_jacobian(x).sum( - list(range(-self._reinterpreted_batch_rank, 0))) + list(range(-self._reinterpreted_batch_rank, 0)) + ) def _forward_shape(self, shape): return self._base.forward_shape(shape) @@ -717,13 +753,15 @@ class IndependentTransform(Transform): @property def _domain(self): - return variable.Independent(self._base._domain, - self._reinterpreted_batch_rank) + return variable.Independent( + self._base._domain, self._reinterpreted_batch_rank + ) @property def _codomain(self): - return variable.Independent(self._base._codomain, - self._reinterpreted_batch_rank) + return variable.Independent( + self._base._codomain, self._reinterpreted_batch_rank + ) class PowerTransform(Transform): @@ -757,7 +795,8 @@ class PowerTransform(Transform): def __init__(self, power): if not isinstance(power, paddle.fluid.framework.Variable): raise TypeError( - f"Expected 'power' is a tensor, but got {type(power)}") + f"Expected 'power' is a tensor, but got {type(power)}" + ) self._power = power super(PowerTransform, self).__init__() @@ -826,13 +865,16 @@ class ReshapeTransform(Transform): def __init__(self, in_event_shape, out_event_shape): if not isinstance(in_event_shape, typing.Sequence) or not isinstance( - out_event_shape, typing.Sequence): + out_event_shape, typing.Sequence + ): raise TypeError( f"Expected type of 'in_event_shape' and 'out_event_shape' is " f"Squence[int], but got 'in_event_shape': {in_event_shape}, " - f"'out_event_shape': {out_event_shape}") + f"'out_event_shape': {out_event_shape}" + ) if functools.reduce(operator.mul, in_event_shape) != functools.reduce( - operator.mul, out_event_shape): + operator.mul, out_event_shape + ): raise ValueError( f"The numel of 'in_event_shape' should be 'out_event_shape', " f"but got {functools.reduce(operator.mul, in_event_shape)}!={functools.reduce(operator.mul, out_event_shape)}" @@ -860,39 +902,45 @@ class ReshapeTransform(Transform): def _forward(self, x): return x.reshape( - tuple(x.shape)[:x.dim() - len(self._in_event_shape)] + - self._out_event_shape) + tuple(x.shape)[: x.dim() - len(self._in_event_shape)] + + self._out_event_shape + ) def _inverse(self, y): return y.reshape( - tuple(y.shape)[:y.dim() - len(self._out_event_shape)] + - self._in_event_shape) + tuple(y.shape)[: y.dim() - len(self._out_event_shape)] + + self._in_event_shape + ) def _forward_shape(self, shape): if len(shape) < len(self._in_event_shape): raise ValueError( f"Expected length of 'shape' is not less than {len(self._in_event_shape)}, but got {len(shape)}" ) - if shape[-len(self._in_event_shape):] != self._in_event_shape: + if shape[-len(self._in_event_shape) :] != self._in_event_shape: raise ValueError( f"Event shape mismatch, expected: {self._in_event_shape}, but got {shape[-len(self._in_event_shape):]}" ) - return tuple(shape[:-len(self._in_event_shape)]) + self._out_event_shape + return ( + tuple(shape[: -len(self._in_event_shape)]) + self._out_event_shape + ) def _inverse_shape(self, shape): if len(shape) < len(self._out_event_shape): raise ValueError( f"Expected 'shape' length is not less than {len(self._out_event_shape)}, but got {len(shape)}" ) - if shape[-len(self._out_event_shape):] != self._out_event_shape: + if shape[-len(self._out_event_shape) :] != self._out_event_shape: raise ValueError( f"Event shape mismatch, expected: {self._out_event_shape}, but got {shape[-len(self._out_event_shape):]}" ) - return tuple(shape[:-len(self._out_event_shape)]) + self._in_event_shape + return ( + tuple(shape[: -len(self._out_event_shape)]) + self._in_event_shape + ) def _forward_log_det_jacobian(self, x): # paddle.zeros not support zero dimension Tensor. - shape = x.shape[:x.dim() - len(self._in_event_shape)] or [1] + shape = x.shape[: x.dim() - len(self._in_event_shape)] or [1] return paddle.zeros(shape, dtype=x.dtype) @@ -927,7 +975,7 @@ class SigmoidTransform(Transform): @property def _codomain(self): - return variable.Variable(False, 0, constraint.Range(0., 1.)) + return variable.Variable(False, 0, constraint.Range(0.0, 1.0)) def _forward(self, x): return F.sigmoid(x) @@ -996,7 +1044,7 @@ class SoftmaxTransform(Transform): class StackTransform(Transform): - r""" ``StackTransform`` applies a sequence of transformations along the + r"""``StackTransform`` applies a sequence of transformations along the specific axis. Args: @@ -1043,7 +1091,8 @@ class StackTransform(Transform): ) if not all(isinstance(t, Transform) for t in transforms): raise TypeError( - 'Expected all element in transforms is Transform Type.') + 'Expected all element in transforms is Transform Type.' + ) if not isinstance(axis, int): raise TypeError(f"Expected 'axis' is int, but got{type(axis)}.") @@ -1063,34 +1112,45 @@ class StackTransform(Transform): def _forward(self, x): self._check_size(x) - return paddle.stack([ - t.forward(v) - for v, t in zip(paddle.unstack(x, self._axis), self._transforms) - ], self._axis) + return paddle.stack( + [ + t.forward(v) + for v, t in zip(paddle.unstack(x, self._axis), self._transforms) + ], + self._axis, + ) def _inverse(self, y): self._check_size(y) - return paddle.stack([ - t.inverse(v) - for v, t in zip(paddle.unstack(y, self._axis), self._transforms) - ], self._axis) + return paddle.stack( + [ + t.inverse(v) + for v, t in zip(paddle.unstack(y, self._axis), self._transforms) + ], + self._axis, + ) def _forward_log_det_jacobian(self, x): self._check_size(x) - return paddle.stack([ - t.forward_log_det_jacobian(v) - for v, t in zip(paddle.unstack(x, self._axis), self._transforms) - ], self._axis) + return paddle.stack( + [ + t.forward_log_det_jacobian(v) + for v, t in zip(paddle.unstack(x, self._axis), self._transforms) + ], + self._axis, + ) def _check_size(self, v): if not (-v.dim() <= self._axis < v.dim()): raise ValueError( f'Input dimensions {v.dim()} should be grater than stack ' - f'transform axis {self._axis}.') + f'transform axis {self._axis}.' + ) if v.shape[self._axis] != len(self._transforms): raise ValueError( f'Input size along {self._axis} should be equal to the ' - f'length of transforms.') + f'length of transforms.' + ) @property def _domain(self): @@ -1098,8 +1158,9 @@ class StackTransform(Transform): @property def _codomain(self): - return variable.Stack([t._codomain for t in self._transforms], - self._axis) + return variable.Stack( + [t._codomain for t in self._transforms], self._axis + ) class StickBreakingTransform(Transform): @@ -1132,8 +1193,9 @@ class StickBreakingTransform(Transform): offset = x.shape[-1] + 1 - paddle.ones([x.shape[-1]]).cumsum(-1) z = F.sigmoid(x - offset.log()) z_cumprod = (1 - z).cumprod(-1) - return F.pad(z, [0] * 2 * (len(x.shape) - 1) + [0, 1], value=1) * \ - F.pad(z_cumprod, [0] * 2 * (len(x.shape) - 1) + [1, 0], value=1) + return F.pad(z, [0] * 2 * (len(x.shape) - 1) + [0, 1], value=1) * F.pad( + z_cumprod, [0] * 2 * (len(x.shape) - 1) + [1, 0], value=1 + ) def _inverse(self, y): y_crop = y[..., :-1] @@ -1151,12 +1213,12 @@ class StickBreakingTransform(Transform): def _forward_shape(self, shape): if not shape: raise ValueError(f"Expected 'shape' is not empty, but got {shape}") - return shape[:-1] + (shape[-1] + 1, ) + return shape[:-1] + (shape[-1] + 1,) def _inverse_shape(self, shape): if not shape: raise ValueError(f"Expected 'shape' is not empty, but got {shape}") - return shape[:-1] + (shape[-1] - 1, ) + return shape[:-1] + (shape[-1] - 1,) @property def _domain(self): @@ -1220,4 +1282,4 @@ class TanhTransform(Transform): See details: https://github.com/tensorflow/probability/blob/master/tensorflow_probability/python/bijectors/tanh.py#L69-L80 """ - return 2. * (math.log(2.) - x - F.softplus(-2. * x)) + return 2.0 * (math.log(2.0) - x - F.softplus(-2.0 * x)) diff --git a/python/paddle/distribution/transformed_distribution.py b/python/paddle/distribution/transformed_distribution.py index 8433ed0c910980f5533d93d91c62f588b948a759..1f97150cc9aa78181c7a3945c77098280c7fab3b 100644 --- a/python/paddle/distribution/transformed_distribution.py +++ b/python/paddle/distribution/transformed_distribution.py @@ -65,8 +65,9 @@ class TransformedDistribution(distribution.Distribution): self._base = base self._transforms = transforms if not transforms: - super(TransformedDistribution, - self).__init__(base.batch_shape, base.event_shape) + super(TransformedDistribution, self).__init__( + base.batch_shape, base.event_shape + ) return if len(base.batch_shape + base.event_shape) < chain._domain.event_rank: raise ValueError( @@ -74,15 +75,23 @@ class TransformedDistribution(distribution.Distribution): ) if chain._domain.event_rank > len(base.event_shape): base = independent.Independent( - (base, chain._domain.event_rank - len(base.event_shape))) + (base, chain._domain.event_rank - len(base.event_shape)) + ) - transformed_shape = chain.forward_shape(base.batch_shape + - base.event_shape) - transformed_event_rank = chain._codomain.event_rank + \ - max(len(base.event_shape) - chain._domain.event_rank, 0) + transformed_shape = chain.forward_shape( + base.batch_shape + base.event_shape + ) + transformed_event_rank = chain._codomain.event_rank + max( + len(base.event_shape) - chain._domain.event_rank, 0 + ) super(TransformedDistribution, self).__init__( - transformed_shape[:len(transformed_shape) - transformed_event_rank], - transformed_shape[len(transformed_shape) - transformed_event_rank:]) + transformed_shape[ + : len(transformed_shape) - transformed_event_rank + ], + transformed_shape[ + len(transformed_shape) - transformed_event_rank : + ], + ) def sample(self, shape=()): """Sample from ``TransformedDistribution``. @@ -127,12 +136,13 @@ class TransformedDistribution(distribution.Distribution): for t in reversed(self._transforms): x = t.inverse(y) event_rank += t._domain.event_rank - t._codomain.event_rank - log_prob = log_prob - \ - _sum_rightmost(t.forward_log_det_jacobian( - x), event_rank - t._domain.event_rank) + log_prob = log_prob - _sum_rightmost( + t.forward_log_det_jacobian(x), event_rank - t._domain.event_rank + ) y = x - log_prob += _sum_rightmost(self._base.log_prob(y), - event_rank - len(self._base.event_shape)) + log_prob += _sum_rightmost( + self._base.log_prob(y), event_rank - len(self._base.event_shape) + ) return log_prob diff --git a/python/paddle/distribution/uniform.py b/python/paddle/distribution/uniform.py index 5f9dc6c3d37dff1e48376ea59a1e34ae4f23a3b1..4e48dc606223f772a3bc6c7c348700b266565783 100644 --- a/python/paddle/distribution/uniform.py +++ b/python/paddle/distribution/uniform.py @@ -15,10 +15,19 @@ import numpy as np from paddle import _C_ops, _legacy_C_ops from paddle.distribution import distribution -from paddle.fluid.data_feeder import (check_type, convert_dtype) -from paddle.fluid.framework import _non_static_mode, in_dygraph_mode, _in_legacy_dygraph -from paddle.fluid.layers import (elementwise_add, elementwise_div, - elementwise_sub, nn, tensor) +from paddle.fluid.data_feeder import check_type, convert_dtype +from paddle.fluid.framework import ( + _non_static_mode, + in_dygraph_mode, + _in_legacy_dygraph, +) +from paddle.fluid.layers import ( + elementwise_add, + elementwise_div, + elementwise_sub, + nn, + tensor, +) class Uniform(distribution.Distribution): @@ -86,12 +95,18 @@ class Uniform(distribution.Distribution): def __init__(self, low, high, name=None): if not _non_static_mode(): - check_type(low, 'low', - (int, float, np.ndarray, tensor.Variable, list, tuple), - 'Uniform') - check_type(high, 'high', - (int, float, np.ndarray, tensor.Variable, list, tuple), - 'Uniform') + check_type( + low, + 'low', + (int, float, np.ndarray, tensor.Variable, list, tuple), + 'Uniform', + ) + check_type( + high, + 'high', + (int, float, np.ndarray, tensor.Variable, list, tuple), + 'Uniform', + ) self.all_arg_is_float = False self.batch_size_unknown = False @@ -111,11 +126,15 @@ class Uniform(distribution.Distribution): else: if isinstance(low, float) and isinstance(high, float): self.all_arg_is_float = True - if isinstance(low, np.ndarray) and str( - low.dtype) in ['float32', 'float64']: + if isinstance(low, np.ndarray) and str(low.dtype) in [ + 'float32', + 'float64', + ]: self.dtype = low.dtype - elif isinstance(high, np.ndarray) and str( - high.dtype) in ['float32', 'float64']: + elif isinstance(high, np.ndarray) and str(high.dtype) in [ + 'float32', + 'float64', + ]: self.dtype = high.dtype # pylint: disable=unbalanced-tuple-unpacking self.low, self.high = self._to_tensor(low, high) @@ -145,27 +164,33 @@ class Uniform(distribution.Distribution): if self.batch_size_unknown: output_shape = shape + batch_shape zero_tmp = tensor.fill_constant_batch_size_like( - self.low + self.high, batch_shape + shape, self.dtype, 0.) + self.low + self.high, batch_shape + shape, self.dtype, 0.0 + ) uniform_random_tmp = nn.uniform_random_batch_size_like( zero_tmp, zero_tmp.shape, dtype=self.dtype, - min=0., - max=1., - seed=seed) + min=0.0, + max=1.0, + seed=seed, + ) zero_tmp_reshape = nn.reshape(zero_tmp, output_shape) - uniform_random_tmp_reshape = nn.reshape(uniform_random_tmp, - output_shape) - output = uniform_random_tmp_reshape * (zero_tmp_reshape + - self.high - self.low) + uniform_random_tmp_reshape = nn.reshape( + uniform_random_tmp, output_shape + ) + output = uniform_random_tmp_reshape * ( + zero_tmp_reshape + self.high - self.low + ) output = elementwise_add(output, self.low, name=name) return output else: output_shape = shape + batch_shape output = nn.uniform_random( - output_shape, dtype=self.dtype, min=0., max=1., - seed=seed) * (tensor.zeros(output_shape, dtype=self.dtype) + - (self.high - self.low)) + output_shape, dtype=self.dtype, min=0.0, max=1.0, seed=seed + ) * ( + tensor.zeros(output_shape, dtype=self.dtype) + + (self.high - self.low) + ) output = elementwise_add(output, self.low, name=name) if self.all_arg_is_float: return nn.reshape(output, shape, name=name) @@ -194,10 +219,12 @@ class Uniform(distribution.Distribution): return nn.log(lb * ub) - nn.log(self.high - self.low) if _in_legacy_dygraph(): - lb = _legacy_C_ops.cast(lb_bool, 'in_dtype', lb_bool.dtype, - 'out_dtype', value.dtype) - ub = _legacy_C_ops.cast(ub_bool, 'in_dtype', ub_bool.dtype, - 'out_dtype', value.dtype) + lb = _legacy_C_ops.cast( + lb_bool, 'in_dtype', lb_bool.dtype, 'out_dtype', value.dtype + ) + ub = _legacy_C_ops.cast( + ub_bool, 'in_dtype', ub_bool.dtype, 'out_dtype', value.dtype + ) return nn.log(lb * ub) - nn.log(self.high - self.low) name = self.name + '_log_prob' @@ -205,9 +232,9 @@ class Uniform(distribution.Distribution): ub_bool = value < self.high lb = tensor.cast(lb_bool, dtype=value.dtype) ub = tensor.cast(ub_bool, dtype=value.dtype) - return elementwise_sub(nn.log(lb * ub), - nn.log(self.high - self.low), - name=name) + return elementwise_sub( + nn.log(lb * ub), nn.log(self.high - self.low), name=name + ) def probs(self, value): """Probability density/mass function. @@ -230,10 +257,12 @@ class Uniform(distribution.Distribution): return (lb * ub) / (self.high - self.low) if _in_legacy_dygraph(): - lb = _legacy_C_ops.cast(lb_bool, 'in_dtype', lb_bool.dtype, - 'out_dtype', value.dtype) - ub = _legacy_C_ops.cast(ub_bool, 'in_dtype', ub_bool.dtype, - 'out_dtype', value.dtype) + lb = _legacy_C_ops.cast( + lb_bool, 'in_dtype', lb_bool.dtype, 'out_dtype', value.dtype + ) + ub = _legacy_C_ops.cast( + ub_bool, 'in_dtype', ub_bool.dtype, 'out_dtype', value.dtype + ) return (lb * ub) / (self.high - self.low) name = self.name + '_probs' diff --git a/python/paddle/distribution/variable.py b/python/paddle/distribution/variable.py index f6cced041645b1fe5eb134039991886b7764108f..2bde6c79aeb98af0499e1e651adc8a1d4dd98e2d 100644 --- a/python/paddle/distribution/variable.py +++ b/python/paddle/distribution/variable.py @@ -43,13 +43,11 @@ class Variable(object): class Real(Variable): - def __init__(self, event_rank=0): super(Real, self).__init__(False, event_rank, constraint.real) class Positive(Variable): - def __init__(self, event_rank=0): super(Positive, self).__init__(False, event_rank, constraint.positive) @@ -66,23 +64,24 @@ class Independent(Variable): def __init__(self, base, reinterpreted_batch_rank): self._base = base self._reinterpreted_batch_rank = reinterpreted_batch_rank - super(Independent, - self).__init__(base.is_discrete, - base.event_rank + reinterpreted_batch_rank) + super(Independent, self).__init__( + base.is_discrete, base.event_rank + reinterpreted_batch_rank + ) def constraint(self, value): ret = self._base.constraint(value) if ret.dim() < self._reinterpreted_batch_rank: raise ValueError( "Input dimensions must be equal or grater than {}".format( - self._reinterpreted_batch_rank)) - return ret.reshape(ret.shape[:ret.dim() - - self.reinterpreted_batch_rank] + - (-1, )).all(-1) + self._reinterpreted_batch_rank + ) + ) + return ret.reshape( + ret.shape[: ret.dim() - self.reinterpreted_batch_rank] + (-1,) + ).all(-1) class Stack(Variable): - def __init__(self, vars, axis=0): self._vars = vars self._axis = axis @@ -102,12 +101,18 @@ class Stack(Variable): if not (-value.dim() <= self._axis < value.dim()): raise ValueError( f'Input dimensions {value.dim()} should be grater than stack ' - f'constraint axis {self._axis}.') - - return paddle.stack([ - var.check(value) - for var, value in zip(self._vars, paddle.unstack(value, self._axis)) - ], self._axis) + f'constraint axis {self._axis}.' + ) + + return paddle.stack( + [ + var.check(value) + for var, value in zip( + self._vars, paddle.unstack(value, self._axis) + ) + ], + self._axis, + ) real = Real() diff --git a/python/paddle/fft.py b/python/paddle/fft.py index b873d7b1df61033de6cfe2f541b8be3d660bd08f..8bc95cd37e9f240c985bb3dfee68aae5a0695eed 100644 --- a/python/paddle/fft.py +++ b/python/paddle/fft.py @@ -51,61 +51,76 @@ __all__ = [ def _check_normalization(norm): if norm not in ['forward', 'backward', 'ortho']: raise ValueError( - "Unexpected norm: {}. Norm should be forward, backward or ortho". - format(norm)) + "Unexpected norm: {}. Norm should be forward, backward or ortho".format( + norm + ) + ) def _check_fft_n(n): if not isinstance(n, int): raise ValueError( - "Invalid FFT argument n({}), it shoule be an integer.".format(n)) + "Invalid FFT argument n({}), it shoule be an integer.".format(n) + ) if n <= 0: raise ValueError( - "Invalid FFT argument n({}), it should be positive.".format(n)) + "Invalid FFT argument n({}), it should be positive.".format(n) + ) def _check_fft_shape(x, s): ndim = x.ndim if not isinstance(s, Sequence): raise ValueError( - "Invaid FFT argument s({}), it should be a sequence of integers.") + "Invaid FFT argument s({}), it should be a sequence of integers." + ) if len(s) > ndim: raise ValueError( "Length of FFT argument s should not be larger than the rank of input. " - "Received s: {}, rank of x: {}".format(s, ndim)) + "Received s: {}, rank of x: {}".format(s, ndim) + ) for size in s: if not isinstance(size, int) or size <= 0: - raise ValueError("FFT sizes {} contains invalid value ({})".format( - s, size)) + raise ValueError( + "FFT sizes {} contains invalid value ({})".format(s, size) + ) def _check_fft_axis(x, axis): ndim = x.ndim if not isinstance(axis, int): raise ValueError( - "Invalid FFT axis ({}), it shoule be an integer.".format(axis)) + "Invalid FFT axis ({}), it shoule be an integer.".format(axis) + ) if axis < -ndim or axis >= ndim: raise ValueError( "Invalid FFT axis ({}), it should be in range [-{}, {})".format( - axis, ndim, ndim)) + axis, ndim, ndim + ) + ) def _check_fft_axes(x, axes): ndim = x.ndim if not isinstance(axes, Sequence): raise ValueError( - "Invalid FFT axes ({}), it should be a sequence of integers.". - format(axes)) + "Invalid FFT axes ({}), it should be a sequence of integers.".format( + axes + ) + ) if len(axes) > ndim: raise ValueError( "Length of fft axes should not be larger than the rank of input. " - "Received, len of axes: {}, rank of x: {}".format(len(axes), ndim)) + "Received, len of axes: {}, rank of x: {}".format(len(axes), ndim) + ) for axis in axes: if not isinstance(axis, int) or axis < -ndim or axis >= ndim: raise ValueError( - "FFT axes {} contains invalid value ({}), it should be in range [-{}, {})" - .format(axes, axis, ndim, ndim)) + "FFT axes {} contains invalid value ({}), it should be in range [-{}, {})".format( + axes, axis, ndim, ndim + ) + ) def _resize_fft_input(x, s, axes): @@ -127,10 +142,12 @@ def _resize_fft_input(x, s, axes): slices.append((0, s[i])) if axes_to_slice: - x = paddle.slice(x, - axes_to_slice, - starts=[item[0] for item in slices], - ends=[item[1] for item in slices]) + x = paddle.slice( + x, + axes_to_slice, + starts=[item[0] for item in slices], + ends=[item[1] for item in slices], + ) if axes_to_pad: padding_widths = [0] * (2 * ndim) for axis, pad in zip(axes_to_pad, paddings): @@ -146,8 +163,9 @@ def _normalize_axes(x, axes): def _check_at_least_ndim(x, rank): if x.ndim < rank: - raise ValueError("The rank of the input ({}) should >= {}".format( - x.ndim, rank)) + raise ValueError( + "The rank of the input ({}) should >= {}".format(x.ndim, rank) + ) # public APIs 1d @@ -197,13 +215,9 @@ def fft(x, n=None, axis=-1, norm="backward", name=None): """ if is_integer(x) or is_floating_point(x): - return fft_r2c(x, - n, - axis, - norm, - forward=True, - onesided=False, - name=name) + return fft_r2c( + x, n, axis, norm, forward=True, onesided=False, name=name + ) else: return fft_c2c(x, n, axis, norm, forward=True, name=name) @@ -266,13 +280,9 @@ def ifft(x, n=None, axis=-1, norm="backward", name=None): """ if is_integer(x) or is_floating_point(x): - return fft_r2c(x, - n, - axis, - norm, - forward=False, - onesided=False, - name=name) + return fft_r2c( + x, n, axis, norm, forward=False, onesided=False, name=name + ) else: return fft_c2c(x, n, axis, norm, forward=False, name=name) @@ -536,13 +546,9 @@ def fftn(x, s=None, axes=None, norm="backward", name=None): # [-8.-8.j 0.+0.j 0.+0.j 0.-0.j]]] """ if is_integer(x) or is_floating_point(x): - return fftn_r2c(x, - s, - axes, - norm, - forward=True, - onesided=False, - name=name) + return fftn_r2c( + x, s, axes, norm, forward=True, onesided=False, name=name + ) else: return fftn_c2c(x, s, axes, norm, forward=True, name=name) @@ -608,13 +614,9 @@ def ifftn(x, s=None, axes=None, norm="backward", name=None): # (-0.1666666716337204+0.28867512941360474j)]]) """ if is_integer(x) or is_floating_point(x): - return fftn_r2c(x, - s, - axes, - norm, - forward=False, - onesided=False, - name=name) + return fftn_r2c( + x, s, axes, norm, forward=False, onesided=False, name=name + ) else: return fftn_c2c(x, s, axes, norm, forward=False, name=name) @@ -914,13 +916,17 @@ def fft2(x, s=None, axes=(-2, -1), norm="backward", name=None): if s is not None: if not isinstance(s, Sequence) or len(s) != 2: raise ValueError( - "Invalid FFT argument s ({}), it should be a sequence of 2 integers." - .format(s)) + "Invalid FFT argument s ({}), it should be a sequence of 2 integers.".format( + s + ) + ) if axes is not None: if not isinstance(axes, Sequence) or len(axes) != 2: raise ValueError( - "Invalid FFT argument axes ({}), it should be a sequence of 2 integers." - .format(axes)) + "Invalid FFT argument axes ({}), it should be a sequence of 2 integers.".format( + axes + ) + ) return fftn(x, s, axes, norm, name) @@ -979,13 +985,17 @@ def ifft2(x, s=None, axes=(-2, -1), norm="backward", name=None): if s is not None: if not isinstance(s, Sequence) or len(s) != 2: raise ValueError( - "Invalid FFT argument s ({}), it should be a sequence of 2 integers." - .format(s)) + "Invalid FFT argument s ({}), it should be a sequence of 2 integers.".format( + s + ) + ) if axes is not None: if not isinstance(axes, Sequence) or len(axes) != 2: raise ValueError( - "Invalid FFT argument axes ({}), it should be a sequence of 2 integers." - .format(axes)) + "Invalid FFT argument axes ({}), it should be a sequence of 2 integers.".format( + axes + ) + ) return ifftn(x, s, axes, norm, name) @@ -1038,13 +1048,17 @@ def rfft2(x, s=None, axes=(-2, -1), norm="backward", name=None): if s is not None: if not isinstance(s, Sequence) or len(s) != 2: raise ValueError( - "Invalid FFT argument s ({}), it should be a sequence of 2 integers." - .format(s)) + "Invalid FFT argument s ({}), it should be a sequence of 2 integers.".format( + s + ) + ) if axes is not None: if not isinstance(axes, Sequence) or len(axes) != 2: raise ValueError( - "Invalid FFT argument axes ({}), it should be a sequence of 2 integers." - .format(axes)) + "Invalid FFT argument axes ({}), it should be a sequence of 2 integers.".format( + axes + ) + ) return rfftn(x, s, axes, norm, name) @@ -1090,13 +1104,17 @@ def irfft2(x, s=None, axes=(-2, -1), norm="backward", name=None): if s is not None: if not isinstance(s, Sequence) or len(s) != 2: raise ValueError( - "Invalid FFT argument s ({}), it should be a sequence of 2 integers." - .format(s)) + "Invalid FFT argument s ({}), it should be a sequence of 2 integers.".format( + s + ) + ) if axes is not None: if not isinstance(axes, Sequence) or len(axes) != 2: raise ValueError( - "Invalid FFT argument axes ({}), it should be a sequence of 2 integers." - .format(axes)) + "Invalid FFT argument axes ({}), it should be a sequence of 2 integers.".format( + axes + ) + ) return irfftn(x, s, axes, norm, name) @@ -1135,13 +1153,17 @@ def hfft2(x, s=None, axes=(-2, -1), norm="backward", name=None): if s is not None: if not isinstance(s, Sequence) or len(s) != 2: raise ValueError( - "Invalid FFT argument s ({}), it should be a sequence of 2 integers." - .format(s)) + "Invalid FFT argument s ({}), it should be a sequence of 2 integers.".format( + s + ) + ) if axes is not None: if not isinstance(axes, Sequence) or len(axes) != 2: raise ValueError( - "Invalid FFT argument axes ({}), it should be a sequence of 2 integers." - .format(axes)) + "Invalid FFT argument axes ({}), it should be a sequence of 2 integers.".format( + axes + ) + ) return hfftn(x, s, axes, norm, name) @@ -1187,13 +1209,17 @@ def ihfft2(x, s=None, axes=(-2, -1), norm="backward", name=None): if s is not None: if not isinstance(s, Sequence) or len(s) != 2: raise ValueError( - "Invalid FFT argument s ({}), it should be a sequence of 2 integers." - .format(s)) + "Invalid FFT argument s ({}), it should be a sequence of 2 integers.".format( + s + ) + ) if axes is not None: if not isinstance(axes, Sequence) or len(axes) != 2: raise ValueError( - "Invalid FFT argument axes ({}), it should be a sequence of 2 integers." - .format(axes)) + "Invalid FFT argument axes ({}), it should be a sequence of 2 integers.".format( + axes + ) + ) return ihfftn(x, s, axes, norm, name) @@ -1417,10 +1443,9 @@ def fft_c2c(x, n, axis, norm, forward, name): dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) outputs = {"Out": [out]} - helper.append_op(type=op_type, - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type=op_type, inputs=inputs, outputs=outputs, attrs=attrs + ) return out @@ -1442,8 +1467,16 @@ def fft_r2c(x, n, axis, norm, forward, onesided, name): if in_dygraph_mode(): out = _C_ops.fft_r2c(x, axes, norm, forward, onesided) elif _in_legacy_dygraph(): - attrs = ('axes', axes, 'normalization', norm, 'forward', forward, - 'onesided', onesided) + attrs = ( + 'axes', + axes, + 'normalization', + norm, + 'forward', + forward, + 'onesided', + onesided, + ) out = getattr(_legacy_C_ops, op_type)(x, *attrs) else: inputs = { @@ -1458,12 +1491,12 @@ def fft_r2c(x, n, axis, norm, forward, onesided, name): helper = LayerHelper(op_type, **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference( - _real_to_complex_dtype(dtype)) + _real_to_complex_dtype(dtype) + ) outputs = {"Out": [out]} - helper.append_op(type=op_type, - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type=op_type, inputs=inputs, outputs=outputs, attrs=attrs + ) return out @@ -1491,8 +1524,16 @@ def fft_c2r(x, n, axis, norm, forward, name): out = _C_ops.fft_c2r(x, axes, norm, forward, 0) elif _in_legacy_dygraph(): if n is not None: - attrs = ('axes', axes, 'normalization', norm, 'forward', forward, - 'last_dim_size', n) + attrs = ( + 'axes', + axes, + 'normalization', + norm, + 'forward', + forward, + 'last_dim_size', + n, + ) else: attrs = ('axes', axes, 'normalization', norm, 'forward', forward) out = getattr(_legacy_C_ops, op_type)(x, *attrs) @@ -1506,12 +1547,12 @@ def fft_c2r(x, n, axis, norm, forward, name): helper = LayerHelper(op_type, **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference( - _complex_to_real_dtype(dtype)) + _complex_to_real_dtype(dtype) + ) outputs = {"Out": [out]} - helper.append_op(type=op_type, - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type=op_type, inputs=inputs, outputs=outputs, attrs=attrs + ) return out @@ -1539,8 +1580,10 @@ def fftn_c2c(x, s, axes, norm, forward, name): if s is not None: if len(s) != len(axes): raise ValueError( - "Length of s ({}) and length of axes ({}) does not match.". - format(len(s), len(axes))) + "Length of s ({}) and length of axes ({}) does not match.".format( + len(s), len(axes) + ) + ) s = [s[i] for i in axes_argsoft] if s is not None: @@ -1562,10 +1605,9 @@ def fftn_c2c(x, s, axes, norm, forward, name): dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) outputs = {"Out": [out]} - helper.append_op(type=op_type, - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type=op_type, inputs=inputs, outputs=outputs, attrs=attrs + ) return out @@ -1591,8 +1633,10 @@ def fftn_r2c(x, s, axes, norm, forward, onesided, name): if s is not None: if len(s) != len(axes): raise ValueError( - "Length of s ({}) and length of axes ({}) does not match.". - format(len(s), len(axes))) + "Length of s ({}) and length of axes ({}) does not match.".format( + len(s), len(axes) + ) + ) s = [s[i] for i in axes_argsoft] + [s[-1]] if s is not None: @@ -1604,8 +1648,16 @@ def fftn_r2c(x, s, axes, norm, forward, onesided, name): if in_dygraph_mode(): out = _C_ops.fft_r2c(x, axes, norm, forward, onesided) elif _in_legacy_dygraph(): - attrs = ('axes', axes, 'normalization', norm, 'forward', forward, - 'onesided', onesided) + attrs = ( + 'axes', + axes, + 'normalization', + norm, + 'forward', + forward, + 'onesided', + onesided, + ) out = getattr(_legacy_C_ops, op_type)(x, *attrs) else: inputs = { @@ -1620,12 +1672,12 @@ def fftn_r2c(x, s, axes, norm, forward, onesided, name): helper = LayerHelper(op_type, **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference( - _real_to_complex_dtype(dtype)) + _real_to_complex_dtype(dtype) + ) outputs = {"Out": [out]} - helper.append_op(type=op_type, - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type=op_type, inputs=inputs, outputs=outputs, attrs=attrs + ) return out @@ -1654,8 +1706,10 @@ def fftn_c2r(x, s, axes, norm, forward, name): if s is not None: if len(s) != len(axes): raise ValueError( - "Length of s ({}) and length of axes ({}) does not match.". - format(len(s), len(axes))) + "Length of s ({}) and length of axes ({}) does not match.".format( + len(s), len(axes) + ) + ) s = [s[i] for i in axes_argsoft] + [s[-1]] if s is not None: @@ -1673,8 +1727,16 @@ def fftn_c2r(x, s, axes, norm, forward, name): out = _C_ops.fft_c2r(x, axes, norm, forward, 0) elif _in_legacy_dygraph(): if s: - attrs = ('axes', axes, 'normalization', norm, 'forward', forward, - 'last_dim_size', s[-1]) + attrs = ( + 'axes', + axes, + 'normalization', + norm, + 'forward', + forward, + 'last_dim_size', + s[-1], + ) else: attrs = ('axes', axes, 'normalization', norm, 'forward', forward) out = getattr(_legacy_C_ops, op_type)(x, *attrs) @@ -1688,10 +1750,10 @@ def fftn_c2r(x, s, axes, norm, forward, name): helper = LayerHelper(op_type, **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference( - _complex_to_real_dtype(dtype)) + _complex_to_real_dtype(dtype) + ) outputs = {"Out": [out]} - helper.append_op(type=op_type, - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type=op_type, inputs=inputs, outputs=outputs, attrs=attrs + ) return out diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index 00f222b3e8b20e5a028690bc5ab88a568e720a2f..53d50a8b4a3ed378aa203f9458a4dc440e080716 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -22,8 +22,9 @@ core_suffix = 'so' if os.name == 'nt': core_suffix = 'pyd' -legacy_core = os.path.abspath( - os.path.dirname(__file__)) + os.sep + 'core.' + core_suffix +legacy_core = ( + os.path.abspath(os.path.dirname(__file__)) + os.sep + 'core.' + core_suffix +) if os.path.exists(legacy_core): sys.stderr.write('Deleting legacy file ' + legacy_core + '\n') try: @@ -34,6 +35,7 @@ if os.path.exists(legacy_core): # import all class inside framework into fluid module from . import framework from .framework import * + # import all class inside executor into fluid module from . import executor from .executor import * @@ -70,10 +72,23 @@ from .param_attr import ParamAttr, WeightNormParamAttr from .data_feeder import DataFeeder from .core import LoDTensor, LoDTensorArray, Scope, _Scope -from .core import CPUPlace, XPUPlace, CUDAPlace, CUDAPinnedPlace, NPUPlace, IPUPlace, MLUPlace, CustomPlace +from .core import ( + CPUPlace, + XPUPlace, + CUDAPlace, + CUDAPinnedPlace, + NPUPlace, + IPUPlace, + MLUPlace, + CustomPlace, +) from .incubate import fleet -from .transpiler import DistributeTranspiler, \ - memory_optimize, release_memory, DistributeTranspilerConfig +from .transpiler import ( + DistributeTranspiler, + memory_optimize, + release_memory, + DistributeTranspilerConfig, +) from .lod_tensor import create_lod_tensor, create_random_int_lodtensor from . import clip from . import profiler @@ -93,7 +108,14 @@ from .dygraph.varbase_patch_methods import monkey_patch_varbase from . import generator from .core import _cuda_synchronize from .generator import Generator -from .trainer_desc import TrainerDesc, DistMultiTrainer, PipelineTrainer, HeterPipelineTrainer, MultiTrainer, HeterXpuTrainer +from .trainer_desc import ( + TrainerDesc, + DistMultiTrainer, + PipelineTrainer, + HeterPipelineTrainer, + MultiTrainer, + HeterXpuTrainer, +) from .transpiler import HashName, RoundRobin from .backward import append_backward @@ -101,10 +123,18 @@ Tensor = LoDTensor enable_imperative = enable_dygraph disable_imperative = disable_dygraph -__all__ = framework.__all__ + executor.__all__ + \ - trainer_desc.__all__ + transpiler.__all__ + \ - parallel_executor.__all__ + lod_tensor.__all__ + \ - data_feed_desc.__all__ + compiler.__all__ + backward.__all__ + generator.__all__ + [ +__all__ = ( + framework.__all__ + + executor.__all__ + + trainer_desc.__all__ + + transpiler.__all__ + + parallel_executor.__all__ + + lod_tensor.__all__ + + data_feed_desc.__all__ + + compiler.__all__ + + backward.__all__ + + generator.__all__ + + [ 'io', 'initializer', 'embedding', @@ -142,8 +172,9 @@ __all__ = framework.__all__ + executor.__all__ + \ 'install_check', 'save', 'load', - '_cuda_synchronize' + '_cuda_synchronize', ] +) def __bootstrap__(): @@ -169,18 +200,21 @@ def __bootstrap__(): num_threads = 1 if num_threads > 1: - print('WARNING: OMP_NUM_THREADS set to {0}, not 1. The computation ' - 'speed will not be optimized if you use data parallel. It will ' - 'fail if this PaddlePaddle binary is compiled with OpenBlas since' - ' OpenBlas does not support multi-threads.'.format(num_threads), - file=sys.stderr) + print( + 'WARNING: OMP_NUM_THREADS set to {0}, not 1. The computation ' + 'speed will not be optimized if you use data parallel. It will ' + 'fail if this PaddlePaddle binary is compiled with OpenBlas since' + ' OpenBlas does not support multi-threads.'.format(num_threads), + file=sys.stderr, + ) print('PLEASE USE OMP_NUM_THREADS WISELY.', file=sys.stderr) os.environ['OMP_NUM_THREADS'] = str(num_threads) flag_prefix = "FLAGS_" read_env_flags = [ - key[len(flag_prefix):] for key in core.globals().keys() + key[len(flag_prefix) :] + for key in core.globals().keys() if key.startswith(flag_prefix) ] diff --git a/python/paddle/fluid/average.py b/python/paddle/fluid/average.py index d31b4d496483c7198770991d2daa9aacc7f06913..2d83246df7f636036a1e6d886bb349c130da291e 100644 --- a/python/paddle/fluid/average.py +++ b/python/paddle/fluid/average.py @@ -14,6 +14,7 @@ import numpy as np import warnings + """ Class of all kinds of Average. @@ -27,8 +28,11 @@ __all__ = ["WeightedAverage"] def _is_number_(var): - return isinstance(var, int) or isinstance( - var, float) or (isinstance(var, np.ndarray) and var.shape == (1, )) + return ( + isinstance(var, int) + or isinstance(var, float) + or (isinstance(var, np.ndarray) and var.shape == (1,)) + ) def _is_number_or_matrix_(var): @@ -59,8 +63,10 @@ class WeightedAverage(object): def __init__(self): warnings.warn( - "The %s is deprecated, please use fluid.metrics.Accuracy instead." % - (self.__class__.__name__), Warning) + "The %s is deprecated, please use fluid.metrics.Accuracy instead." + % (self.__class__.__name__), + Warning, + ) self.reset() def reset(self): @@ -70,7 +76,8 @@ class WeightedAverage(object): def add(self, value, weight): if not _is_number_or_matrix_(value): raise ValueError( - "The 'value' must be a number(int, float) or a numpy ndarray.") + "The 'value' must be a number(int, float) or a numpy ndarray." + ) if not _is_number_(weight): raise ValueError("The 'weight' must be a number(int, float).") @@ -84,5 +91,6 @@ class WeightedAverage(object): def eval(self): if self.numerator is None or self.denominator is None: raise ValueError( - "There is no data to be averaged in WeightedAverage.") + "There is no data to be averaged in WeightedAverage." + ) return self.numerator / self.denominator diff --git a/python/paddle/fluid/backward.py b/python/paddle/fluid/backward.py index 8da8faeefb119300f9c9961a00d7f7259e13aab6..16708268bd5a0b044a6790a7db42747089789382 100755 --- a/python/paddle/fluid/backward.py +++ b/python/paddle/fluid/backward.py @@ -26,6 +26,7 @@ from . import log_helper import paddle.fluid from .data_feeder import check_type import warnings + try: from collections.abc import Sequence except: @@ -36,13 +37,12 @@ __all__ = [ 'gradients', ] -_logger = log_helper.get_logger(__name__, - logging.INFO, - fmt='%(asctime)s-%(levelname)s: %(message)s') +_logger = log_helper.get_logger( + __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s' +) class ProgramStats(object): - def __init__(self, block, ops): self.block = block self.ops = ops @@ -52,8 +52,10 @@ class ProgramStats(object): def get_input_nodes(self): input_names = [] for name in self.var_op_deps: - if len(self.var_op_deps[name]["var_as_output_ops"]) == 0 and \ - len(self.var_op_deps[name]["var_as_input_ops"]) > 0: + if ( + len(self.var_op_deps[name]["var_as_output_ops"]) == 0 + and len(self.var_op_deps[name]["var_as_input_ops"]) > 0 + ): if self.block.var(name).persistable: continue input_names.append(name) @@ -115,16 +117,21 @@ class ProgramStats(object): """ def is_amp_cast(op): - return op.desc.type() == 'cast' and self.block.var( - op.desc.input_arg_names()[0]).persistable + return ( + op.desc.type() == 'cast' + and self.block.var(op.desc.input_arg_names()[0]).persistable + ) idx_ = min_idx - 1 updated_min_idx = min_idx while idx_ > pre_segment_end_idx: if is_amp_cast(self.ops[idx_]): - _logger.info("found amp-cast op: {}, : {}".format( - self.ops[idx_].desc.type(), - self.ops[idx_].desc.input_arg_names()[0])) + _logger.info( + "found amp-cast op: {}, : {}".format( + self.ops[idx_].desc.type(), + self.ops[idx_].desc.input_arg_names()[0], + ) + ) updated_min_idx = idx_ idx_ -= 1 else: @@ -138,7 +145,8 @@ class ProgramStats(object): for j, name in enumerate(op.desc.input_arg_names()): if name in self.var_op_deps: self.op_deps[i]["in_ops"].extend( - self.var_op_deps[name]["var_as_output_ops"]) + self.var_op_deps[name]["var_as_output_ops"] + ) for j, name in enumerate(op.desc.input_arg_names()): if name in self.var_op_deps: self.var_op_deps[name]["var_as_input_ops"].extend([i]) @@ -164,13 +172,15 @@ class ProgramStats(object): if name not in self.var_op_deps: _logger.info( "Recompute Optimizer: deleted %s from checkpoints, because it is not used in paddle program." - % name) + % name + ) elif self.var_op_deps[name]["var_as_output_ops"] == []: # input nodes sorted_checkpoints.append((name, -1)) else: sorted_checkpoints.append( - (name, max(self.var_op_deps[name]["var_as_output_ops"]))) + (name, max(self.var_op_deps[name]["var_as_output_ops"])) + ) sorted_checkpoints = sorted(sorted_checkpoints, key=lambda x: x[1]) return [x[0] for x in sorted_checkpoints] @@ -191,17 +201,20 @@ class ProgramStats(object): continue # add a seed op so that the two dropout op can generate same output op_unique_name = unique_name.generate("seed") - var_unique_name = unique_name.generate_with_ignorable_key(".".join( - [op_unique_name, 'tmp'])) + var_unique_name = unique_name.generate_with_ignorable_key( + ".".join([op_unique_name, 'tmp']) + ) added_var = self.block.create_var( name=var_unique_name, dtype='int32', type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=False) + stop_gradient=False, + ) seed = 0 if op.attr("fix_seed") is False else int(op.attr("seed")) - op_device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName( + op_device_attr_name = ( + core.op_proto_and_checker_maker.kOpDeviceAttrName() ) op_device = "" if op.desc.has_attr(op_device_attr_name): @@ -209,15 +222,13 @@ class ProgramStats(object): # Setting the force_cpu of seed to true will make the output of seed in cpu memory, # reduce the synchronous copy from GPU to CPU in dropout, and reduce the communication hang - added_op = self.block._insert_op(index=op.idx, - type='seed', - inputs={}, - outputs={'Out': [added_var]}, - attrs={ - 'seed': seed, - 'op_device': op_device, - 'force_cpu': True - }) + added_op = self.block._insert_op( + index=op.idx, + type='seed', + inputs={}, + outputs={'Out': [added_var]}, + attrs={'seed': seed, 'op_device': op_device, 'force_cpu': True}, + ) self.ops.insert(op_idx, added_op) # modify dropout op desc so that it accept a seed var as input op.desc.set_input("Seed", [var_unique_name]) @@ -228,22 +239,24 @@ class ProgramStats(object): def _pretty_op_desc_(op_desc, prefix): - out_s = "%s\tname:[%s]\n%s \tinputs:[%s]\n%s \toutputs:[%s]" % \ - (prefix + "_op", str(op_desc.type()), prefix + "_input", " ".join(op_desc.input_arg_names()), - prefix + "_output", " ".join(op_desc.output_arg_names())) + out_s = "%s\tname:[%s]\n%s \tinputs:[%s]\n%s \toutputs:[%s]" % ( + prefix + "_op", + str(op_desc.type()), + prefix + "_input", + " ".join(op_desc.input_arg_names()), + prefix + "_output", + " ".join(op_desc.output_arg_names()), + ) return out_s -def _add_needed_descs_to_block(descs, - block, - main_block, - in_memory_vars, - grad_op_id_to_fwd_op=None): +def _add_needed_descs_to_block( + descs, block, main_block, in_memory_vars, grad_op_id_to_fwd_op=None +): if len(descs) == 0: return [] result_descs = [] - op_role_attr_name = \ - core.op_proto_and_checker_maker.kOpRoleAttrName() + op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName() backward = core.op_proto_and_checker_maker.OpRole.Backward for desc in descs: origin_desc = desc @@ -275,8 +288,7 @@ def _add_descs_to_block(descs, block, grad_op_id_to_fwd_op=None): if len(descs) == 0: return [] result_descs = [] - op_role_attr_name = \ - core.op_proto_and_checker_maker.kOpRoleAttrName() + op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName() backward = core.op_proto_and_checker_maker.OpRole.Backward for desc in descs: if isinstance(desc, framework.Operator): @@ -298,8 +310,10 @@ def _add_descs_to_block(descs, block, grad_op_id_to_fwd_op=None): def _find_loss_op_(loss): for op in reversed(loss.block.ops): assert isinstance(op, framework.Operator) - if len(op.output_arg_names - ) == 1 and op.output_arg_names[0] == loss.name: + if ( + len(op.output_arg_names) == 1 + and op.output_arg_names[0] == loss.name + ): loss.op = op break if loss.op is None: @@ -340,21 +354,30 @@ def _create_op_desc_(op_type, inputs, outputs, attrs): op_desc.set_input( para, list( - map(lambda arg: arg.decode() - if isinstance(arg, bytes) else arg, args))) + map( + lambda arg: arg.decode() if isinstance(arg, bytes) else arg, + args, + ) + ), + ) for para, args in outputs.items(): op_desc.set_output( para, list( - map(lambda arg: arg.decode() - if isinstance(arg, bytes) else arg, args))) + map( + lambda arg: arg.decode() if isinstance(arg, bytes) else arg, + args, + ) + ), + ) op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName() op_device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName() if op_role_attr_name not in attrs: attrs[ - op_role_attr_name] = core.op_proto_and_checker_maker.OpRole.Backward + op_role_attr_name + ] = core.op_proto_and_checker_maker.OpRole.Backward if op_device_attr_name not in attrs: attrs[op_device_attr_name] = "" for name, val in attrs.items(): @@ -367,20 +390,23 @@ def _create_op_desc_(op_type, inputs, outputs, attrs): def _create_loss_op_desc_(loss): op_desc = _create_op_desc_( - "fill_constant", {}, {"Out": [_append_grad_suffix_(loss.name)]}, { + "fill_constant", + {}, + {"Out": [_append_grad_suffix_(loss.name)]}, + { "shape": [1], - "value": - 1.0, - "dtype": - loss.dtype, - "force_cpu": - False, - core.op_proto_and_checker_maker.kOpRoleAttrName(): - int(core.op_proto_and_checker_maker.OpRole.Backward) + "value": 1.0, + "dtype": loss.dtype, + "force_cpu": False, + core.op_proto_and_checker_maker.kOpRoleAttrName(): int( + core.op_proto_and_checker_maker.OpRole.Backward + ) | int(core.op_proto_and_checker_maker.OpRole.Loss), - core.op_proto_and_checker_maker.kOpDeviceAttrName(): - loss.op.attr(core.op_proto_and_checker_maker.kOpDeviceAttrName()) - }) + core.op_proto_and_checker_maker.kOpDeviceAttrName(): loss.op.attr( + core.op_proto_and_checker_maker.kOpDeviceAttrName() + ), + }, + ) return op_desc @@ -397,8 +423,10 @@ def _infer_var_data_type_shape_(grad_var_name, block): else: # TODO(jiabin): Maybe we should not to this to cause some unexpected error on dtype warnings.warn( - "Set grad var: {} dtype to default FP32, since we can't find its related forward var" - .format(grad_var_name)) + "Set grad var: {} dtype to default FP32, since we can't find its related forward var".format( + grad_var_name + ) + ) grad_var.set_dtype(core.VarDesc.VarType.FP32) @@ -435,7 +463,7 @@ def _strip_grad_suffix_(name): pos = name.find(core.grad_var_suffix()) new_name = name[:pos] if pos != -1 else name new_pos = name.rfind('grad/') - return new_name[new_pos + 5:] if new_pos != -1 else new_name + return new_name[new_pos + 5 :] if new_pos != -1 else new_name def _append_grad_suffix_(name): @@ -446,30 +474,28 @@ def _append_grad_suffix_(name): return name + core.grad_var_suffix() -def _accumulate_gradients_by_sum_op_(var_name, - renamed_vars, - pending_sum_ops, - op_idx, - op_device=""): +def _accumulate_gradients_by_sum_op_( + var_name, renamed_vars, pending_sum_ops, op_idx, op_device="" +): """ Use sum op to accumulate_gradients, the gradients are stored in renamed_vars. """ if op_idx not in pending_sum_ops.keys(): pending_sum_ops[op_idx] = [] pending_sum_ops[op_idx].append( - _create_op_desc_("sum", {"X": renamed_vars[var_name]}, - {"Out": [var_name]}, { - "use_mkldnn": False, - "op_device": op_device - })) + _create_op_desc_( + "sum", + {"X": renamed_vars[var_name]}, + {"Out": [var_name]}, + {"use_mkldnn": False, "op_device": op_device}, + ) + ) renamed_vars[var_name] = [var_name] -def _accumulate_gradients_by_add_ops_(var_name, - renamed_vars, - pending_sum_ops, - op_idx, - op_device=""): +def _accumulate_gradients_by_add_ops_( + var_name, renamed_vars, pending_sum_ops, op_idx, op_device="" +): """ Use several inplace add op to accumulate_gradients, the gradients are stored in renamed_vars. """ @@ -484,20 +510,19 @@ def _accumulate_gradients_by_add_ops_(var_name, else: out_name = var_name pending_sum_ops[op_idx].append( - _create_op_desc_("grad_add", { - "X": [x_name], - "Y": [y_name] - }, {"Out": [out_name]}, { - "use_mkldnn": False, - "op_device": op_device - })) + _create_op_desc_( + "grad_add", + {"X": [x_name], "Y": [y_name]}, + {"Out": [out_name]}, + {"use_mkldnn": False, "op_device": op_device}, + ) + ) renamed_vars[var_name] = [var_name] -def _addup_repetitive_outputs_(op_descs, - block_idx, - grad_var_to_var=None, - grad_op_id_to_fwd_op=None): +def _addup_repetitive_outputs_( + op_descs, block_idx, grad_var_to_var=None, grad_op_id_to_fwd_op=None +): """ In backward part, an variable may be the output of more than one ops. And one op may yield its multiple outputs to the same variable. @@ -510,14 +535,15 @@ def _addup_repetitive_outputs_(op_descs, """ _MAX_ADD_NUM_ = framework._global_flags()['FLAGS_max_inplace_grad_add'] - #pending_sum_ops = [] + # pending_sum_ops = [] pending_sum_ops = collections.OrderedDict() var_rename_count = collections.defaultdict(int) renamed_vars = collections.defaultdict(list) renamed_var_start_idx = collections.defaultdict(list) var_device = collections.defaultdict(str) for idx, op_desc in enumerate(op_descs): - op_device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName( + op_device_attr_name = ( + core.op_proto_and_checker_maker.kOpDeviceAttrName() ) op_device = "" if op_desc.has_attr(op_device_attr_name): @@ -527,13 +553,21 @@ def _addup_repetitive_outputs_(op_descs, continue if len(renamed_vars[var_name]) > 1: if len(renamed_vars[var_name]) > _MAX_ADD_NUM_: - _accumulate_gradients_by_sum_op_(var_name, renamed_vars, - pending_sum_ops, idx, - var_device[var_name]) + _accumulate_gradients_by_sum_op_( + var_name, + renamed_vars, + pending_sum_ops, + idx, + var_device[var_name], + ) else: - _accumulate_gradients_by_add_ops_(var_name, renamed_vars, - pending_sum_ops, idx, - var_device[var_name]) + _accumulate_gradients_by_add_ops_( + var_name, + renamed_vars, + pending_sum_ops, + idx, + var_device[var_name], + ) for param_idx, param_name in enumerate(op_desc.output_names()): arg_names = op_desc.output(param_name) @@ -542,8 +576,10 @@ def _addup_repetitive_outputs_(op_descs, continue # if "@RENAME@" in var_name: # continue - if var_name == core.empty_var_name( - ) or var_name in op_desc.input_arg_names(): + if ( + var_name == core.empty_var_name() + or var_name in op_desc.input_arg_names() + ): # empty variable or inplace op continue if len(renamed_vars[var_name]) == 0: @@ -552,14 +588,20 @@ def _addup_repetitive_outputs_(op_descs, renamed_var_start_idx[var_name] = idx else: if len(renamed_vars[var_name]) == 1: - new_name = var_name + "@RENAME@block" + str(block_idx) + "@" + \ - str(var_rename_count[var_name]) + new_name = ( + var_name + + "@RENAME@block" + + str(block_idx) + + "@" + + str(var_rename_count[var_name]) + ) var_rename_count[var_name] += 1 # Build the mapping between the new_name and var_name (Only for auto parallel) if grad_var_to_var is not None: if var_name in grad_var_to_var: grad_var_to_var[new_name] = grad_var_to_var[ - var_name] + var_name + ] else: grad_var_to_var[new_name] = var_name # rename original var_name @@ -568,31 +610,45 @@ def _addup_repetitive_outputs_(op_descs, # new_name, 0, idx) # rename arg from idx of the first appearance # in backward, not always from 0 - _rename_arg_(op_descs, var_name, new_name, - renamed_var_start_idx[var_name], idx) + _rename_arg_( + op_descs, + var_name, + new_name, + renamed_var_start_idx[var_name], + idx, + ) _rename_arg_(pending_sum_ops, var_name, new_name) for p in op_desc.output_names()[:param_idx]: p_arg_names = op_desc.output(p) if var_name in p_arg_names: - op_desc.set_output(p, [ - new_name if x == var_name else x - for x in p_arg_names - ]) + op_desc.set_output( + p, + [ + new_name if x == var_name else x + for x in p_arg_names + ], + ) arg_names = [ new_name if x == var_name else x for x in arg_names[:arg_idx] ] + arg_names[arg_idx:] - new_name = var_name + "@RENAME@block" + str(block_idx) + "@" + \ - str(var_rename_count[var_name]) + new_name = ( + var_name + + "@RENAME@block" + + str(block_idx) + + "@" + + str(var_rename_count[var_name]) + ) var_rename_count[var_name] += 1 # Build the mapping between the new_name and var_name (Only for auto parallel) if grad_var_to_var is not None: if var_name in grad_var_to_var: grad_var_to_var[new_name] = grad_var_to_var[ - var_name] + var_name + ] else: grad_var_to_var[new_name] = var_name arg_names[arg_idx] = new_name @@ -604,19 +660,27 @@ def _addup_repetitive_outputs_(op_descs, for var_name, inputs in renamed_vars.items(): if len(renamed_vars[var_name]) > 1: if len(renamed_vars[var_name]) > _MAX_ADD_NUM_: - _accumulate_gradients_by_sum_op_(var_name, renamed_vars, - pending_sum_ops, len(op_descs), - var_device[var_name]) + _accumulate_gradients_by_sum_op_( + var_name, + renamed_vars, + pending_sum_ops, + len(op_descs), + var_device[var_name], + ) else: - _accumulate_gradients_by_add_ops_(var_name, - renamed_vars, pending_sum_ops, - len(op_descs), - var_device[var_name]) + _accumulate_gradients_by_add_ops_( + var_name, + renamed_vars, + pending_sum_ops, + len(op_descs), + var_device[var_name], + ) op_descs_len = len(op_descs) # sum_op descs are sorted according to their insert position for key, value in collections.OrderedDict( - reversed(list(pending_sum_ops.items()))).items(): + reversed(list(pending_sum_ops.items())) + ).items(): # NOTE(zhiqiu): Since reversed, the idx of op_descs to be inserted will remains correct. # For example, [0, 1, 2], and we want to insert 'a' at idx 1, 'b' at idx 2, and the expected result is [0, 1, 'a', 2, 'b']. @@ -626,19 +690,24 @@ def _addup_repetitive_outputs_(op_descs, for i, op in enumerate(value): # update the mapping between fwd and bwd target_idx = idx - 1 if idx == op_descs_len else idx + i - if grad_op_id_to_fwd_op is not None and grad_op_id_to_fwd_op.get( - op_descs[target_idx].original_id(), None) is not None: + if ( + grad_op_id_to_fwd_op is not None + and grad_op_id_to_fwd_op.get( + op_descs[target_idx].original_id(), None + ) + is not None + ): grad_op_id_to_fwd_op[op.original_id()] = grad_op_id_to_fwd_op[ - op_descs[target_idx].original_id()] + op_descs[target_idx].original_id() + ] op_descs.insert(idx + i, op) return op_descs -def _remove_no_grad_branch_(op_descs, - no_grad_set, - grad_op_id_to_fwd_op=None, - target_vars=[]): +def _remove_no_grad_branch_( + op_descs, no_grad_set, grad_op_id_to_fwd_op=None, target_vars=[] +): """ Remove unnecessary grad ops A grad op can be removed in two cases: @@ -651,19 +720,25 @@ def _remove_no_grad_branch_(op_descs, out_arg_names = op_desc.output_arg_names() if len(out_arg_names) == 0 or _all_in_set_(out_arg_names, no_grad_set): return True - if _all_in_set_([ - name for name in op_desc.input_arg_names() + if _all_in_set_( + [ + name + for name in op_desc.input_arg_names() if name.find(core.grad_var_suffix()) != -1 - ], no_grad_set): + ], + no_grad_set, + ): no_grad_set.update(set(out_arg_names) - target_grad_var_names) return True return False # Remove ops whose outputs are all in no_grad_dict target_grad_var_names = set( - [var.name + core.grad_var_suffix() for var in target_vars]) + [var.name + core.grad_var_suffix() for var in target_vars] + ) op_descs = [ - op_desc for op_desc in op_descs + op_desc + for op_desc in op_descs if not _op_can_be_removed_(op_desc, no_grad_set) ] # Insert fill_any_like_op with value 0 @@ -675,16 +750,21 @@ def _remove_no_grad_branch_(op_descs, x_in = _strip_grad_suffix_(arg) # the reason should be: arg can be input of another grad op # and the op is a not-to-remove op - new_op_desc = _create_op_desc_("fill_any_like", {"X": [x_in]}, - {"Out": [arg]}, { - 'value': 0, - 'dtype': -1 - }) + new_op_desc = _create_op_desc_( + "fill_any_like", + {"X": [x_in]}, + {"Out": [arg]}, + {'value': 0, 'dtype': -1}, + ) # update the mapping between fwd and bwd - if grad_op_id_to_fwd_op is not None and grad_op_id_to_fwd_op.get( - op_desc.original_id(), None) is not None: - grad_op_id_to_fwd_op[new_op_desc.original_id( - )] = grad_op_id_to_fwd_op[op_desc.original_id()] + if ( + grad_op_id_to_fwd_op is not None + and grad_op_id_to_fwd_op.get(op_desc.original_id(), None) + is not None + ): + grad_op_id_to_fwd_op[ + new_op_desc.original_id() + ] = grad_op_id_to_fwd_op[op_desc.original_id()] to_insert.append((new_op_desc, idx)) list([op_descs.insert(p[1], p[0]) for p in reversed(to_insert)]) @@ -711,7 +791,6 @@ def _find_not_need_ops(grad_op_descs, forward_ops, input_grad_names_set): """ class Var(object): - def __init__(self, var_name): self.var_name = var_name self.gen_op = None @@ -727,7 +806,6 @@ def _find_not_need_ops(grad_op_descs, forward_ops, input_grad_names_set): self.pendding_ops.append(op) class Op(object): - def __init__(self, op_desc): self.op_desc = op_desc self.inputs = [] @@ -768,8 +846,9 @@ def _find_not_need_ops(grad_op_descs, forward_ops, input_grad_names_set): return op_node # Record the forward vars - forward_vars_set = set() if input_grad_names_set is None else set( - input_grad_names_set) + forward_vars_set = ( + set() if input_grad_names_set is None else set(input_grad_names_set) + ) for op in forward_ops: forward_vars_set.update(op.desc.input_arg_names()) forward_vars_set.update(op.desc.output_arg_names()) @@ -824,14 +903,16 @@ def serialize_op_decs(op_desc): return proto.__str__() -def _append_backward_ops_with_checkpoints_(block, - ops, - target_vars, - target_block, - no_grad_dict, - grad_to_var, - checkpoints, - grad_op_id_to_fwd_op=None): +def _append_backward_ops_with_checkpoints_( + block, + ops, + target_vars, + target_block, + no_grad_dict, + grad_to_var, + checkpoints, + grad_op_id_to_fwd_op=None, +): """ Create grad ops with forward ops, and insert them into given block @@ -898,16 +979,20 @@ def _append_backward_ops_with_checkpoints_(block, # min_idx: checkpoint_1' s input op # max_idx: checkpoint_2' s output op flag, min_idx, max_idx = program_stat.is_subgraph( - [checkpoints_name[start_idx]], - [checkpoints_name[start_idx + 1]]) + [checkpoints_name[start_idx]], [checkpoints_name[start_idx + 1]] + ) if flag: # max_idx + 1 since the exact and used segment end idx is max_idx min_idx = program_stat._update_segment_start( - min_idx, pre_segment_end_idx) + min_idx, pre_segment_end_idx + ) segments.append([min_idx, max_idx + 1]) else: - _logger.info("Could not recompute op range [{}] - [{}] ".format( - min_idx, max_idx + 1)) + _logger.info( + "Could not recompute op range [{}] - [{}] ".format( + min_idx, max_idx + 1 + ) + ) start_idx += 1 @@ -918,26 +1003,42 @@ def _append_backward_ops_with_checkpoints_(block, for i, (idx1, idx2) in enumerate(recompute_segments): _logger.info("recompute segment[{}]".format(i)) - _logger.info("segment start op: [{}]: [{}]".format( - ops[idx1].desc.type(), ops[idx1].desc.input_arg_names())) - _logger.info("segment end op: [{}]: [{}]".format( - ops[idx2 - 1].desc.type(), ops[idx2 - 1].desc.input_arg_names())) + _logger.info( + "segment start op: [{}]: [{}]".format( + ops[idx1].desc.type(), ops[idx1].desc.input_arg_names() + ) + ) + _logger.info( + "segment end op: [{}]: [{}]".format( + ops[idx2 - 1].desc.type(), ops[idx2 - 1].desc.input_arg_names() + ) + ) _logger.info("recompute segment[{}]".format(i)) - _logger.info("segment start op: [{}]: [{}]".format( - ops[idx1].desc.type(), ops[idx1].desc.input_arg_names())) - _logger.info("segment end op: [{}]: [{}]".format( - ops[idx2 - 1].desc.type(), ops[idx2 - 1].desc.input_arg_names())) + _logger.info( + "segment start op: [{}]: [{}]".format( + ops[idx1].desc.type(), ops[idx1].desc.input_arg_names() + ) + ) + _logger.info( + "segment end op: [{}]: [{}]".format( + ops[idx2 - 1].desc.type(), ops[idx2 - 1].desc.input_arg_names() + ) + ) # 2) go through all forward ops and induct all variables that will be hold in memory vars_should_be_hold = [] # a. variables that are used across segments will be held in memory for segment in recompute_segments: vars_should_be_hold.extend( - program_stat.get_out_of_subgraph_vars(segment[0], segment[1])) + program_stat.get_out_of_subgraph_vars(segment[0], segment[1]) + ) cross_vars = set(vars_should_be_hold) - set(checkpoints_name) - _logger.info("found [{}] vars which cross recompute segment: [{}], better checkpoints might be set to reduce those vars".format( \ - len(cross_vars), cross_vars)) + _logger.info( + "found [{}] vars which cross recompute segment: [{}], better checkpoints might be set to reduce those vars".format( + len(cross_vars), cross_vars + ) + ) # b. output of seed op should be kept in memory vars_should_be_hold.extend(program_stat.get_reserved_vars()) @@ -957,11 +1058,14 @@ def _append_backward_ops_with_checkpoints_(block, gap_ops = ops[0:max_calculated_op_position] for op in reversed(gap_ops): if op.has_attr("sub_block"): - raise Exception("Recompute don't support ops with sub_block" - "invoke op: %s" % - _pretty_op_desc_(op.desc, "with_sub_block")) + raise Exception( + "Recompute don't support ops with sub_block" + "invoke op: %s" + % _pretty_op_desc_(op.desc, "with_sub_block") + ) grad_op_desc, op_grad_to_var = core.get_grad_op_desc( - op.desc, no_grad_dict[block.idx], []) + op.desc, no_grad_dict[block.idx], [] + ) # record the mapping between fwd and bwd if grad_op_id_to_fwd_op is not None: @@ -973,21 +1077,25 @@ def _append_backward_ops_with_checkpoints_(block, op_device = op.desc.attr(device_attr_name) for op_desc in grad_op_desc: op_desc._set_attr(device_attr_name, op_device) - added_descs = _add_descs_to_block(grad_op_desc, local_block, - grad_op_id_to_fwd_op) + added_descs = _add_descs_to_block( + grad_op_desc, local_block, grad_op_id_to_fwd_op + ) grad_op_descs.extend(added_descs) grad_to_var.update(op_grad_to_var) for i, segment in enumerate(recompute_segments[::-1]): - gap_ops = ops[segment[1]:max_calculated_op_position] + gap_ops = ops[segment[1] : max_calculated_op_position] max_calculated_op_position = segment[0] for op in reversed(gap_ops): if op.has_attr("sub_block"): - raise Exception("Recompute don't support ops with sub_block" - "invoke op: %s" % - _pretty_op_desc_(op.desc, "with_sub_block")) + raise Exception( + "Recompute don't support ops with sub_block" + "invoke op: %s" + % _pretty_op_desc_(op.desc, "with_sub_block") + ) grad_op_desc, op_grad_to_var = core.get_grad_op_desc( - op.desc, no_grad_dict[block.idx], []) + op.desc, no_grad_dict[block.idx], [] + ) # record the mapping between fwd and bwd if grad_op_id_to_fwd_op is not None: @@ -999,19 +1107,22 @@ def _append_backward_ops_with_checkpoints_(block, op_device = op.desc.attr(device_attr_name) for op_desc in grad_op_desc: op_desc._set_attr(device_attr_name, op_device) - added_descs = _add_descs_to_block(grad_op_desc, local_block, - grad_op_id_to_fwd_op) + added_descs = _add_descs_to_block( + grad_op_desc, local_block, grad_op_id_to_fwd_op + ) grad_op_descs.extend(added_descs) grad_to_var.update(op_grad_to_var) - ff_ops = ops[segment[0]:segment[1]] + ff_ops = ops[segment[0] : segment[1]] var_suffix = ".subprog_%d" % i for op in ff_ops: if op.has_attr("sub_block"): - raise Exception("Recompute don't support ops with sub_block" - "invoke op: %s" % - _pretty_op_desc_(op.desc, "with_sub_block")) + raise Exception( + "Recompute don't support ops with sub_block" + "invoke op: %s" + % _pretty_op_desc_(op.desc, "with_sub_block") + ) input_and_output_names = [] input_and_output_names.extend(op.desc.input_arg_names()) input_and_output_names.extend(op.desc.output_arg_names()) @@ -1025,19 +1136,22 @@ def _append_backward_ops_with_checkpoints_(block, # we should create the rename var in subprog, otherwise its VarType will be BOOL ref_var = block.program.global_block().var(name) - block.create_var(name=var_name_dict[name], - shape=ref_var.shape, - dtype=ref_var.dtype, - type=ref_var.type, - persistable=ref_var.persistable, - stop_gradient=ref_var.stop_gradient) + block.create_var( + name=var_name_dict[name], + shape=ref_var.shape, + dtype=ref_var.dtype, + type=ref_var.type, + persistable=ref_var.persistable, + stop_gradient=ref_var.stop_gradient, + ) # 3.a. add ops in current recompute_segment as forward recomputation ops - buffer_descs = _add_needed_descs_to_block(ff_ops, buffer_block, block, - vars_in_memory, - grad_op_id_to_fwd_op) - added_descs = _add_descs_to_block(ff_ops, local_block, - grad_op_id_to_fwd_op) + buffer_descs = _add_needed_descs_to_block( + ff_ops, buffer_block, block, vars_in_memory, grad_op_id_to_fwd_op + ) + added_descs = _add_descs_to_block( + ff_ops, local_block, grad_op_id_to_fwd_op + ) # 3.b. rename all non-checkpoint variables in recomputation ops for key in var_name_dict: @@ -1049,13 +1163,15 @@ def _append_backward_ops_with_checkpoints_(block, # 3.c. add backward ops for all ops in current segment for op_desc in reversed(added_descs): grad_op_desc, op_grad_to_var = core.get_grad_op_desc( - op_desc, no_grad_dict[block.idx], []) + op_desc, no_grad_dict[block.idx], [] + ) # record the mapping between fwd and bwd if grad_op_id_to_fwd_op is not None: for g_op_desc in grad_op_desc: - grad_op_id_to_fwd_op[g_op_desc.original_id( - )] = grad_op_id_to_fwd_op[op_desc.original_id()] + grad_op_id_to_fwd_op[ + g_op_desc.original_id() + ] = grad_op_id_to_fwd_op[op_desc.original_id()] # Set device for grad_op according to forward Op if op_desc.has_attr(device_attr_name): @@ -1070,21 +1186,33 @@ def _append_backward_ops_with_checkpoints_(block, # 3.d. add sum op for repetitive_outputs grad_op_descs = _addup_repetitive_outputs_( - grad_op_descs, block.idx, grad_op_id_to_fwd_op=grad_op_id_to_fwd_op) + grad_op_descs, block.idx, grad_op_id_to_fwd_op=grad_op_id_to_fwd_op + ) # 4) remove no grad branch as it is in _remove_no_grad_branch_ - grad_op_descs = _remove_no_grad_branch_(grad_op_descs, - no_grad_dict[block.idx], - grad_op_id_to_fwd_op, target_vars) - added_descs = _add_descs_to_block(grad_op_descs, target_block, - grad_op_id_to_fwd_op) - return program_stat, checkpoints_name, vars_should_be_hold, recompute_segments - - -def _get_sub_block_path(sub_block, - sub_block_op_desc, - no_grad_set, - op_path_dict, - sub_block_target_names=None): + grad_op_descs = _remove_no_grad_branch_( + grad_op_descs, + no_grad_dict[block.idx], + grad_op_id_to_fwd_op, + target_vars, + ) + added_descs = _add_descs_to_block( + grad_op_descs, target_block, grad_op_id_to_fwd_op + ) + return ( + program_stat, + checkpoints_name, + vars_should_be_hold, + recompute_segments, + ) + + +def _get_sub_block_path( + sub_block, + sub_block_op_desc, + no_grad_set, + op_path_dict, + sub_block_target_names=None, +): """ Get output vars in subblock which will be assigned to parent block. It is used to find the grad path in subblock. @@ -1102,8 +1230,8 @@ def _get_sub_block_path(sub_block, """ assert sub_block_op_desc.has_attr( - "sub_block") and sub_block.idx == sub_block_op_desc._block_attr_id( - "sub_block") + "sub_block" + ) and sub_block.idx == sub_block_op_desc._block_attr_id("sub_block") assert isinstance(sub_block_target_names, (set, type(None))) if sub_block_target_names is None: @@ -1123,8 +1251,9 @@ def _get_sub_block_path(sub_block, # Step2: find op path of sub-block is_while = sub_block_op_desc.type in ["while"] - sub_block_op_path = _find_op_path_(sub_block, sub_outputs, [], - no_grad_set, op_path_dict, is_while) + sub_block_op_path = _find_op_path_( + sub_block, sub_outputs, [], no_grad_set, op_path_dict, is_while + ) return sub_block_op_path return sub_block.ops @@ -1132,8 +1261,9 @@ def _get_sub_block_path(sub_block, def _is_grad_op_(op): op_maker = core.op_proto_and_checker_maker backward = core.op_proto_and_checker_maker.OpRole.Backward - if op_maker.kOpRoleVarAttrName() in op.attr_names and \ - int(op.all_attrs()[op_maker.kOpRoleAttrName()]) == int(backward): + if op_maker.kOpRoleVarAttrName() in op.attr_names and int( + op.all_attrs()[op_maker.kOpRoleAttrName()] + ) == int(backward): return True return False @@ -1142,18 +1272,20 @@ def _rename_grad_name_(name, grad_order): return 'grad/' * grad_order + name -def _append_backward_ops_(block, - ops, - target_vars, - target_block, - no_grad_dict, - grad_to_var, - callbacks=None, - input_grad_names_set=None, - op_path_dict=None, - distop_context=None, - rename_var_map=None, - grad_op_id_to_fwd_op=None): +def _append_backward_ops_( + block, + ops, + target_vars, + target_block, + no_grad_dict, + grad_to_var, + callbacks=None, + input_grad_names_set=None, + op_path_dict=None, + distop_context=None, + rename_var_map=None, + grad_op_id_to_fwd_op=None, +): """ Create all grad ops, and insert them into given block @@ -1180,18 +1312,22 @@ def _append_backward_ops_(block, """ # Build the mapping between the forward op and backward op (Only for auto parallel) - def update_distop_context(distop_context, op_grad_to_var, - appending_grad_times): + def update_distop_context( + distop_context, op_grad_to_var, appending_grad_times + ): distop_context.grad_var_to_var[appending_grad_times].update( - op_grad_to_var) + op_grad_to_var + ) for op_desc in grad_op_desc: - assert op_desc.original_id( - ) not in distop_context.grad_op_id_to_op_id + assert ( + op_desc.original_id() not in distop_context.grad_op_id_to_op_id + ) distop_context.grad_op_id_to_op_id[ - op_desc.original_id()] = op.desc.original_id() + op_desc.original_id() + ] = op.desc.original_id() if callbacks is not None: - assert (isinstance(callbacks, (list, tuple))) + assert isinstance(callbacks, (list, tuple)) for cb in callbacks: if not hasattr(cb, '__call__'): raise ValueError("'callback' must be a callable object.") @@ -1216,16 +1352,18 @@ def _append_backward_ops_(block, pre_input_grad_names_set = copy.copy(input_grad_names_set) input_grad_names_set = None sub_block_path = op_path_dict[op._block_attr_id("sub_block")] - _append_backward_ops_(sub_block, - sub_block_path, - target_vars, - grad_sub_block, - no_grad_dict, - grad_to_var, - callbacks, - input_grad_names_set, - op_path_dict, - grad_op_id_to_fwd_op=grad_op_id_to_fwd_op) + _append_backward_ops_( + sub_block, + sub_block_path, + target_vars, + grad_sub_block, + no_grad_dict, + grad_to_var, + callbacks, + input_grad_names_set, + op_path_dict, + grad_op_id_to_fwd_op=grad_op_id_to_fwd_op, + ) input_grad_names_set = pre_input_grad_names_set program._rollback() @@ -1233,7 +1371,8 @@ def _append_backward_ops_(block, # Getting op's corresponding grad_op grad_op_desc, op_grad_to_var = core.get_grad_op_desc( - op.desc, no_grad_dict[block.idx], grad_sub_block_list) + op.desc, no_grad_dict[block.idx], grad_sub_block_list + ) # record the mapping between fwd and bwd if grad_op_id_to_fwd_op is not None: @@ -1242,15 +1381,22 @@ def _append_backward_ops_(block, # Build the mapping between the forward op and backward op (Only for auto parallel) if distop_context is not None: - update_distop_context(distop_context, op_grad_to_var, - program._appending_grad_times) + update_distop_context( + distop_context, op_grad_to_var, program._appending_grad_times + ) else: - default_ctx = getattr(paddle.distributed.auto_parallel.dist_context, - '_g_default_distributed_context', None) + default_ctx = getattr( + paddle.distributed.auto_parallel.dist_context, + '_g_default_distributed_context', + None, + ) if default_ctx is not None: distop_context = default_ctx.dist_op_context - update_distop_context(distop_context, op_grad_to_var, - program._appending_grad_times) + update_distop_context( + distop_context, + op_grad_to_var, + program._appending_grad_times, + ) # Set device for grad_op according to forward Op device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName() @@ -1278,7 +1424,8 @@ def _append_backward_ops_(block, continue if block.desc.find_var(name.encode("ascii")): new_name = _rename_grad_name_( - name, program._appending_grad_times) + name, program._appending_grad_times + ) op_desc._rename_output(name, new_name) rename_var_map[name] = new_name @@ -1286,8 +1433,8 @@ def _append_backward_ops_(block, # Build the mapping between the grad var name and var name (Only for auto parallel) if distop_context is not None: distop_context.grad_var_to_var[ - program._appending_grad_times][ - new_name] = op_grad_to_var[name] + program._appending_grad_times + ][new_name] = op_grad_to_var[name] op_grad_to_var[new_name] = op_grad_to_var[name] op_grad_to_var.pop(name) @@ -1296,12 +1443,15 @@ def _append_backward_ops_(block, # But this strategy is not suited for while op for some control flow, # for example, for while op, the grads maybe generated in next loop. if input_grad_names_set is not None: - is_grad_name = lambda name: name.find(core.grad_var_suffix( - )) != -1 or name in input_grad_names_set + is_grad_name = ( + lambda name: name.find(core.grad_var_suffix()) != -1 + or name in input_grad_names_set + ) is_append_grad = False for op_desc in grad_op_desc: input_grad_names = [ - name for name in op_desc.input_arg_names() + name + for name in op_desc.input_arg_names() if is_grad_name(name) ] # some code of gradient ops, like increment, are not very @@ -1325,19 +1475,24 @@ def _append_backward_ops_(block, grad_var_to_var = None if distop_context is not None: grad_var_to_var = distop_context.grad_var_to_var[ - program._appending_grad_times] + program._appending_grad_times + ] # sum parameter's gradients' var given multiple var gradient grad_op_descs = _addup_repetitive_outputs_( grad_op_descs, block.idx, grad_var_to_var, - grad_op_id_to_fwd_op=grad_op_id_to_fwd_op) + grad_op_id_to_fwd_op=grad_op_id_to_fwd_op, + ) # if all outputs of the grad op are in no_grad_set, then just remove and fill zero # if all inputs of the grad op are in no_grad_set, just remove this op - grad_op_descs = _remove_no_grad_branch_(grad_op_descs, - no_grad_dict[block.idx], - grad_op_id_to_fwd_op, target_vars) + grad_op_descs = _remove_no_grad_branch_( + grad_op_descs, + no_grad_dict[block.idx], + grad_op_id_to_fwd_op, + target_vars, + ) # remove some backward ops not_need_ops = _find_not_need_ops(grad_op_descs, ops, input_grad_names_set) @@ -1355,7 +1510,7 @@ def _append_backward_ops_(block, new_op_desc._set_attr(op_role_attr_name, backward) grad_to_var["__current_op_desc__"] = new_op_desc if callbacks is not None: - assert (isinstance(callbacks, (list, tuple))) + assert isinstance(callbacks, (list, tuple)) for cb in callbacks: cb(block=target_block, context=grad_to_var) @@ -1376,8 +1531,10 @@ def _find_parent_op_(sub_block): block_desc = program.block(block_id).desc for op_idx in range(block_desc.op_size()): op = block_desc.op(op_idx) - if op.has_attr("sub_block") and op._block_attr_id( - "sub_block") == sub_block_id: + if ( + op.has_attr("sub_block") + and op._block_attr_id("sub_block") == sub_block_id + ): return op # NOTE(paddle-dev): When optimizer is added in conditional block, @@ -1431,11 +1588,13 @@ def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map): ] inputs = [ - var for var in op_desc.input_arg_names() + var + for var in op_desc.input_arg_names() if var != core.empty_var_name() ] outputs = [ - var for var in op_desc.output_arg_names() + var + for var in op_desc.output_arg_names() if var != core.empty_var_name() ] @@ -1450,7 +1609,8 @@ def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map): ''' if grad_var_ins: existing_grad_var_ins = [ - var for var in grad_var_ins + var + for var in grad_var_ins if block.desc.has_var_recursive(var.encode()) or var in parent_op_vars ] @@ -1470,8 +1630,10 @@ def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map): new_vars = set() # create new gradient variables for grad_var_name in op_desc.output_arg_names(): - if block.desc.has_var_recursive(grad_var_name.encode( - )) or grad_var_name == core.empty_var_name(): + if ( + block.desc.has_var_recursive(grad_var_name.encode()) + or grad_var_name == core.empty_var_name() + ): continue block.desc.var(grad_var_name.encode()) new_vars.add(grad_var_name) @@ -1550,21 +1712,26 @@ def _get_no_grad_set_name(no_grad_set): else: raise TypeError( "The type of no_grad_set's member must be paddle.fluid.Variable or str, but received %s." - % (type(no_grad_var))) + % (type(no_grad_var)) + ) else: raise TypeError( - "The type of no_grad_set should be set or list or tuple, but received {}" - .format(type(no_grad_set))) + "The type of no_grad_set should be set or list or tuple, but received {}".format( + type(no_grad_set) + ) + ) return no_grad_set_name @framework.static_only -def append_backward(loss, - parameter_list=None, - no_grad_set=None, - callbacks=None, - checkpoints=None, - distop_context=None): +def append_backward( + loss, + parameter_list=None, + no_grad_set=None, + callbacks=None, + checkpoints=None, + distop_context=None, +): """ :api_attr: Static Graph @@ -1665,11 +1832,13 @@ def append_backward(loss, p_g_list6 = paddle.static.append_backward(loss=avg_loss, parameter_list=all_weights, no_grad_set=set(all_weights)) """ - grad_op_id_to_fwd_op = { - } # for cuda graph usage, recording the mapping between grad op original id to fwd op + grad_op_id_to_fwd_op = ( + {} + ) # for cuda graph usage, recording the mapping between grad op original id to fwd op - check_type(loss, 'loss', framework.Variable, - 'paddle.static.append_backward') + check_type( + loss, 'loss', framework.Variable, 'paddle.static.append_backward' + ) if loss.op is None: # the loss is from a cloned program. Find loss op manually. @@ -1678,11 +1847,16 @@ def append_backward(loss, loss.op._set_attr( core.op_proto_and_checker_maker.kOpRoleAttrName(), int(core.op_proto_and_checker_maker.OpRole.Forward) - | int(core.op_proto_and_checker_maker.OpRole.Loss)) + | int(core.op_proto_and_checker_maker.OpRole.Loss), + ) if callbacks is not None: - check_type(callbacks, 'callbacks', (list, tuple), - 'paddle.static.append_backward') + check_type( + callbacks, + 'callbacks', + (list, tuple), + 'paddle.static.append_backward', + ) program = loss.block.program root_block = program.block(0) @@ -1712,14 +1886,16 @@ def append_backward(loss, if is_in_control_flow: # create grad block if in switch control flow. target_grad_block = program._create_block( - parent_idx=current_block.parent_idx) + parent_idx=current_block.parent_idx + ) target_grad_block._set_forward_block_idx(current_block_idx) # after _create_block, program.current_block changes else: target_grad_block = root_block son_parent_block_idx_dict = _get_son_parent_block_idx_dict( - program, current_block_idx) + program, current_block_idx + ) block_fwd_op_num_dict = {} # block_id: fwd_op_num for idx in son_parent_block_idx_dict: @@ -1736,18 +1912,22 @@ def append_backward(loss, block = program.block(block_idx) block_no_grad_set = set( - map(_strip_grad_suffix_, no_grad_dict[block_idx])) + map(_strip_grad_suffix_, no_grad_dict[block_idx]) + ) op_path_dict = dict() - op_path = _find_op_path_(block, [loss], [], block_no_grad_set, - op_path_dict) + op_path = _find_op_path_( + block, [loss], [], block_no_grad_set, op_path_dict + ) - no_grad_vars = _find_no_grad_vars(block, op_path, [loss], - block_no_grad_set) + no_grad_vars = _find_no_grad_vars( + block, op_path, [loss], block_no_grad_set + ) block_no_grad_set.update(no_grad_vars) no_grad_dict[block_idx].update( - list(map(_append_grad_suffix_, block_no_grad_set))) + list(map(_append_grad_suffix_, block_no_grad_set)) + ) input_grad_names_set = None # For double backward, input_grad_names is used for filtering @@ -1762,22 +1942,27 @@ def append_backward(loss, # TODO: support _append_backward_ops_with_checkpoints_ in # sub-block (control flow) is_recompute = False - if checkpoints != None and \ - isinstance(checkpoints, list) and \ - len(checkpoints) > 0: + if ( + checkpoints != None + and isinstance(checkpoints, list) + and len(checkpoints) > 0 + ): is_recompute = True - program_stat, checkpoint_names, \ - vars_should_be_hold, \ - recompute_segments = \ - _append_backward_ops_with_checkpoints_( - root_block, - op_path, - [loss], - root_block, - no_grad_dict, - grad_to_var, - checkpoints, - grad_op_id_to_fwd_op) + ( + program_stat, + checkpoint_names, + vars_should_be_hold, + recompute_segments, + ) = _append_backward_ops_with_checkpoints_( + root_block, + op_path, + [loss], + root_block, + no_grad_dict, + grad_to_var, + checkpoints, + grad_op_id_to_fwd_op, + ) else: _append_backward_ops_( block, # the block where forward ops are in @@ -1790,22 +1975,27 @@ def append_backward(loss, input_grad_names_set=input_grad_names_set, op_path_dict=op_path_dict, distop_context=distop_context, - grad_op_id_to_fwd_op=grad_op_id_to_fwd_op) + grad_op_id_to_fwd_op=grad_op_id_to_fwd_op, + ) grad_info_map = dict() # if in control flow, target_grad_block is a created new block which only contains grad ops, # so fwd_op_num is set to 0. - fwd_op_num = block_fwd_op_num_dict[ - current_block_idx] if not is_in_control_flow else 0 + fwd_op_num = ( + block_fwd_op_num_dict[current_block_idx] + if not is_in_control_flow + else 0 + ) # Because append_backward may be called multiple times, # we need rename the internal gradient variables so that they have # different names. _rename_grad_(target_grad_block, fwd_op_num, grad_to_var, {}) - _append_backward_vars_(target_grad_block, fwd_op_num, grad_to_var, - grad_info_map) + _append_backward_vars_( + target_grad_block, fwd_op_num, grad_to_var, grad_info_map + ) program.current_block_idx = current_block_idx program._sync_with_cpp() @@ -1817,13 +2007,20 @@ def append_backward(loss, op._cuda_graph_attr = fwd_op._cuda_graph_attr if parameter_list is not None: - check_type(parameter_list, 'parameter_list', (list, tuple, set), - 'fluid.backward.append_backward') + check_type( + parameter_list, + 'parameter_list', + (list, tuple, set), + 'fluid.backward.append_backward', + ) parameters = [] for i, param in enumerate(parameter_list): - check_type(param, 'parameter_list[%s]' % i, - (framework.Variable, str), - 'fluid.backward.append_backward') + check_type( + param, + 'parameter_list[%s]' % i, + (framework.Variable, str), + 'fluid.backward.append_backward', + ) if isinstance(param, framework.Variable): parameters.append(param.name) elif isinstance(param, str): @@ -1840,8 +2037,11 @@ def append_backward(loss, grad_info = grad_info_map[param] grad_block = grad_info[1] if not grad_block.has_var(grad_info[0]): - raise ValueError("grad block[{0}] did not have grad var {1}".format( - grad_info[1], grad_info[0])) + raise ValueError( + "grad block[{0}] did not have grad var {1}".format( + grad_info[1], grad_info[0] + ) + ) # Get the param var from the global block param_var = program.global_block().var(param) grad_var = grad_block.var(grad_info[0]) @@ -1856,8 +2056,9 @@ def append_backward(loss, for p, g in params_and_grads: if g is None: continue - ops = grad_block.ops if is_in_control_flow else program.global_block( - ).ops + ops = ( + grad_block.ops if is_in_control_flow else program.global_block().ops + ) for op in reversed(ops): assert isinstance(op, framework.Operator) if g.name in op.output_arg_names: @@ -1924,8 +2125,9 @@ def _get_output_names(cur_block, targets): if _some_in_set_(op.desc.output_arg_names(), current_output_names): for name in op.desc.input_arg_names(): current_output_names.add(name) - if not block.desc.find_var(name.encode()) \ - and parent_block.desc.find_var(name.encode()): + if not block.desc.find_var( + name.encode() + ) and parent_block.desc.find_var(name.encode()): parent_block_output_names.add(name) block = parent_block @@ -1945,8 +2147,11 @@ def _find_no_grad_vars(block, op_path, targets, no_grad_set): # If the op has sub_block, it is too complicated to find the correct no_grad_var. if not op.has_attr("sub_block"): for out_var in op.desc.output_arg_names(): - if out_var not in output_names and out_var not in op.desc.input_arg_names( - ) and not block.vars[out_var].stop_gradient: + if ( + out_var not in output_names + and out_var not in op.desc.input_arg_names() + and not block.vars[out_var].stop_gradient + ): no_grad_var.append(out_var) for name in op.desc.input_arg_names(): if name not in no_grad_set: @@ -1954,12 +2159,9 @@ def _find_no_grad_vars(block, op_path, targets, no_grad_set): return set(no_grad_var) -def _find_op_path_(block, - targets, - inputs, - no_grad_set, - op_path_dict=None, - is_while=False): +def _find_op_path_( + block, targets, inputs, no_grad_set, op_path_dict=None, is_while=False +): """ It is used to find the grad path in `block`. @@ -1986,9 +2188,9 @@ def _find_op_path_(block, # All the inputs of the block are used if inputs is empty, if inputs: for i, op in enumerate(block.ops): - if _some_in_set_(op.desc.input_arg_names(), - input_names) and core.has_non_empty_grad_op_maker( - op.type): + if _some_in_set_( + op.desc.input_arg_names(), input_names + ) and core.has_non_empty_grad_op_maker(op.type): for name in op.desc.output_arg_names(): if name not in no_grad_set: input_names.add(name) @@ -2000,14 +2202,14 @@ def _find_op_path_(block, sub_block_id = op._block_attr_id("sub_block") sub_block = block.program.block(sub_block_id) sub_block_target_names = output_names & set(op.output_arg_names) - sub_block_path = _get_sub_block_path(sub_block, op, set(), - op_path_dict, - sub_block_target_names) + sub_block_path = _get_sub_block_path( + sub_block, op, set(), op_path_dict, sub_block_target_names + ) op_path_dict[sub_block_id] = sub_block_path - if _some_in_set_(op.desc.output_arg_names(), - output_names) and core.has_non_empty_grad_op_maker( - op.type): + if _some_in_set_( + op.desc.output_arg_names(), output_names + ) and core.has_non_empty_grad_op_maker(op.type): for name in op.desc.input_arg_names(): if name not in no_grad_set: output_names.add(name) @@ -2018,8 +2220,9 @@ def _find_op_path_(block, # If block is while block, dealing with op specifically again. # TODO(liym27): Consider special types of ops. for i, op in reversed(list(enumerate(block.ops))): - if relevant_op_flags[i] == False \ - and _some_in_set_(op.desc.output_arg_names(), output_names): + if relevant_op_flags[i] == False and _some_in_set_( + op.desc.output_arg_names(), output_names + ): relevant_op_flags[i] = True op_path = [ @@ -2072,7 +2275,8 @@ def calc_gradient(targets, inputs, target_gradients=None, no_grad_set=None): if len(targets) != len(target_gradients): raise ValueError( - "Should have the same number of target_gradients as targets") + "Should have the same number of target_gradients as targets" + ) if no_grad_set is None: no_grad_set = set() @@ -2093,16 +2297,24 @@ def calc_gradient(targets, inputs, target_gradients=None, no_grad_set=None): if grad is None: target_shape = target.name + '_shape' block.desc.append_op().copy_from( - _create_op_desc_("shape", {'Input': [target.name]}, - {"Out": [target_shape]}, {})) + _create_op_desc_( + "shape", + {'Input': [target.name]}, + {"Out": [target_shape]}, + {}, + ) + ) input_grad_names_set.add(target_shape) - op_desc = _create_op_desc_("fill_constant", - {"ShapeTensor": [target_shape]}, - {"Out": [grad_name]}, { - "shape": target.shape, - "value": 1.0, - "dtype": target.dtype, - }) + op_desc = _create_op_desc_( + "fill_constant", + {"ShapeTensor": [target_shape]}, + {"Out": [grad_name]}, + { + "shape": target.shape, + "value": 1.0, + "dtype": target.dtype, + }, + ) block.desc.append_op().copy_from(op_desc) input_grad_names_set.add(grad_name) @@ -2111,8 +2323,9 @@ def calc_gradient(targets, inputs, target_gradients=None, no_grad_set=None): raise ValueError("all targets must be in the same block") if target.shape != grad.shape: raise ValueError( - "The shapes of target and grad are different: %s %s" % - (target.name, grad.name)) + "The shapes of target and grad are different: %s %s" + % (target.name, grad.name) + ) target_grad_map[_append_grad_suffix_(target.name)] = grad.name input_grad_names_set.add(grad.name) rename_var_map[grad_name] = grad.name @@ -2131,26 +2344,30 @@ def calc_gradient(targets, inputs, target_gradients=None, no_grad_set=None): block_no_grad_set = set(map(_strip_grad_suffix_, no_grad_dict[0])) op_path_dict = dict() - op_path = _find_op_path_(block, targets, inputs, block_no_grad_set, - op_path_dict) + op_path = _find_op_path_( + block, targets, inputs, block_no_grad_set, op_path_dict + ) # find no grad var by op_path - no_grad_vars = _find_no_grad_vars(block, op_path, targets, - block_no_grad_set) + no_grad_vars = _find_no_grad_vars( + block, op_path, targets, block_no_grad_set + ) block_no_grad_set.update(no_grad_vars) no_grad_dict[0].update(list(map(_append_grad_suffix_, block_no_grad_set))) grad_to_var = dict() grad_info_map = dict() - _append_backward_ops_(block, - op_path, - targets, - block, - no_grad_dict, - grad_to_var, - input_grad_names_set=input_grad_names_set, - op_path_dict=op_path_dict, - rename_var_map=rename_var_map) + _append_backward_ops_( + block, + op_path, + targets, + block, + no_grad_dict, + grad_to_var, + input_grad_names_set=input_grad_names_set, + op_path_dict=op_path_dict, + rename_var_map=rename_var_map, + ) # Because calc_gradient may be called multiple times, # we need rename the internal gradient variables so that they have @@ -2214,13 +2431,24 @@ def gradients(targets, inputs, target_gradients=None, no_grad_set=None): z = paddle.static.gradients([y], x) print(z) # [var x@GRAD : LOD_TENSOR.shape(-1, 2, 8, 8).dtype(float32).stop_gradient(False)] """ - check_type(targets, 'targets', (framework.Variable, list, tuple), - 'paddle.static.gradients') - check_type(inputs, 'inputs', (framework.Variable, list, tuple), - 'paddle.static.gradients') - check_type(target_gradients, 'target_gradients', - (framework.Variable, list, tuple, type(None)), - 'paddle.static.gradients') + check_type( + targets, + 'targets', + (framework.Variable, list, tuple), + 'paddle.static.gradients', + ) + check_type( + inputs, + 'inputs', + (framework.Variable, list, tuple), + 'paddle.static.gradients', + ) + check_type( + target_gradients, + 'target_gradients', + (framework.Variable, list, tuple, type(None)), + 'paddle.static.gradients', + ) outs = calc_gradient(targets, inputs, target_gradients, no_grad_set) return _as_list(outs) @@ -2263,10 +2491,18 @@ def gradients_with_optimizer(program, optimizer, inputs=None, outputs=None): print(opt_ops) """ - check_type(program, 'program', paddle.fluid.Program, - 'paddle.static.gradients_with_optimizer') - check_type(optimizer, 'optimizer', paddle.optimizer.Optimizer, - 'paddle.static.gradients_with_optimizer') + check_type( + program, + 'program', + paddle.fluid.Program, + 'paddle.static.gradients_with_optimizer', + ) + check_type( + optimizer, + 'optimizer', + paddle.optimizer.Optimizer, + 'paddle.static.gradients_with_optimizer', + ) if inputs is None or outputs is None: in_set = set() @@ -2285,9 +2521,12 @@ def gradients_with_optimizer(program, optimizer, inputs=None, outputs=None): grads = gradients(outputs, inputs) with program_guard(program, None): - pram_grads = [(pram, grad) for pram, grad in zip(inputs, grads) - if isinstance(pram, paddle.fluid.framework.Parameter) - and grad is not None] + pram_grads = [ + (pram, grad) + for pram, grad in zip(inputs, grads) + if isinstance(pram, paddle.fluid.framework.Parameter) + and grad is not None + ] optimize_ops = optimizer.apply_gradients(pram_grads) diff --git a/python/paddle/fluid/clip.py b/python/paddle/fluid/clip.py index fd5c457946884719231cc04b1f75951dacb190e4..93ad5564cb27b4bc3d5fe153994626d677788c2b 100644 --- a/python/paddle/fluid/clip.py +++ b/python/paddle/fluid/clip.py @@ -29,8 +29,11 @@ from .framework import default_main_program from paddle import _C_ops, _legacy_C_ops __all__ = [ - 'set_gradient_clip', 'ErrorClipByValue', 'ClipGradByValue', - 'ClipGradByNorm', 'ClipGradByGlobalNorm' + 'set_gradient_clip', + 'ErrorClipByValue', + 'ClipGradByValue', + 'ClipGradByNorm', + 'ClipGradByGlobalNorm', ] _clip_by_global_norm_using_mp_type_flag = False @@ -49,9 +52,10 @@ def _clip_by_global_norm_using_mp_type(*args): def _cast_to_mp_type_if_enabled(x): - if (x.dtype == core.VarDesc.VarType.FP16 - or x.dtype == core.VarDesc.VarType.BF16 - ) and _clip_by_global_norm_using_mp_type(): + if ( + x.dtype == core.VarDesc.VarType.FP16 + or x.dtype == core.VarDesc.VarType.BF16 + ) and _clip_by_global_norm_using_mp_type(): return x.astype(core.VarDesc.VarType.FP32) else: return x @@ -63,8 +67,11 @@ def _squared_l2_norm(x): """ x = _cast_to_mp_type_if_enabled(x) - if core.is_compiled_with_xpu( - ) or x.dtype == core.VarDesc.VarType.FP16 or x.dtype == core.VarDesc.VarType.BF16: + if ( + core.is_compiled_with_xpu() + or x.dtype == core.VarDesc.VarType.FP16 + or x.dtype == core.VarDesc.VarType.BF16 + ): square = layers.square(x) sum_square = layers.reduce_sum(square) return sum_square @@ -86,7 +93,6 @@ def _squared_l2_norm(x): class BaseErrorClipAttr(object): - def __str__(self): raise NotImplementedError() @@ -161,8 +167,9 @@ def error_clip_callback(block, context): for grad_n in [n for n in op_desc.output_arg_names() if n in grad_to_var]: fwd_var = block._var_recursive(grad_to_var[grad_n]) error_clip = getattr(fwd_var, "error_clip", None) - if not (error_clip is None - or isinstance(error_clip, BaseErrorClipAttr)): + if not ( + error_clip is None or isinstance(error_clip, BaseErrorClipAttr) + ): raise TypeError( "Variable's error_clip should be an instance of BaseErrorClipAttr or None." ) @@ -171,7 +178,6 @@ def error_clip_callback(block, context): class ClipGradBase(object): - def __init__(self): super(ClipGradBase, self).__init__() @@ -194,7 +200,8 @@ class ClipGradBase(object): warnings.warn( "'set_gradient_clip' will be ineffective, because you have " "set 'need_clip' in 'ParamAttr'. So, 'set_gradient_clip' " - "is redundant and you can remove it.") + "is redundant and you can remove it." + ) break return self._static_clip(params_grads) @@ -249,7 +256,7 @@ class ClipGradByValue(ClipGradBase): def __init__(self, max, min=None): super(ClipGradByValue, self).__init__() if min is None: - assert (max > 0.0) + assert max > 0.0 min = -max self.max = float(max) self.min = float(min) @@ -465,10 +472,9 @@ class ClipGradByGlobalNorm(ClipGradBase): sdg.step() """ - def __init__(self, - clip_norm, - group_name="default_group", - auto_skip_clip=False): + def __init__( + self, clip_norm, group_name="default_group", auto_skip_clip=False + ): super(ClipGradByGlobalNorm, self).__init__() self.clip_norm = float(clip_norm) self.group_name = group_name @@ -500,7 +506,10 @@ class ClipGradByGlobalNorm(ClipGradBase): merge_grad = layers.get_tensor_from_selected_rows(merge_grad) sum_square = _squared_l2_norm(merge_grad) - if sum_square.dtype == core.VarDesc.VarType.FP16 or sum_square.dtype == core.VarDesc.VarType.BF16: + if ( + sum_square.dtype == core.VarDesc.VarType.FP16 + or sum_square.dtype == core.VarDesc.VarType.BF16 + ): sum_square_list_fp16.append(sum_square) elif sum_square.dtype == core.VarDesc.VarType.FP32: sum_square_list_fp32.append(sum_square) @@ -508,8 +517,12 @@ class ClipGradByGlobalNorm(ClipGradBase): sum_square_list.append(sum_square) # all parameters have been filterd out - if len(sum_square_list) + len(sum_square_list_fp16) + len( - sum_square_list_fp32) == 0: + if ( + len(sum_square_list) + + len(sum_square_list_fp16) + + len(sum_square_list_fp32) + == 0 + ): return params_grads sum_dtype = 'float64' if len(sum_square_list) > 0 else "float32" @@ -528,22 +541,23 @@ class ClipGradByGlobalNorm(ClipGradBase): global_norm_var.append(global_norm_var_fp64) global_norm_var = paddle.add_n(global_norm_var) global_norm_var = layers.sqrt(global_norm_var) - max_global_norm = layers.fill_constant(shape=[1], - dtype=global_norm_var.dtype, - value=self.clip_norm) + max_global_norm = layers.fill_constant( + shape=[1], dtype=global_norm_var.dtype, value=self.clip_norm + ) need_clip = False if not self.auto_skip_clip: # always apply clip need_clip = True - clip_var = layers.elementwise_div(x=max_global_norm, - y=layers.elementwise_max( - x=global_norm_var, - y=max_global_norm)) + clip_var = layers.elementwise_div( + x=max_global_norm, + y=layers.elementwise_max(x=global_norm_var, y=max_global_norm), + ) elif global_norm_var > max_global_norm: # only when global_norm_var > max_global_norm, grad need clip need_clip = True - clip_var = layers.elementwise_div(x=max_global_norm, - y=global_norm_var) + clip_var = layers.elementwise_div( + x=max_global_norm, y=global_norm_var + ) for p, g in params_grads: if g is None: @@ -553,8 +567,11 @@ class ClipGradByGlobalNorm(ClipGradBase): continue # TODO(wangxi): use inplace elementwise_mul if need_clip: - clip_input = (clip_var.astype(g.dtype) - if clip_var.dtype != g.dtype else clip_var) + clip_input = ( + clip_var.astype(g.dtype) + if clip_var.dtype != g.dtype + else clip_var + ) new_grad = layers.elementwise_mul(g, clip_input) params_and_grads.append((p, new_grad)) else: @@ -578,7 +595,8 @@ class ClipGradByGlobalNorm(ClipGradBase): if g.type == core.VarDesc.VarType.SELECTED_ROWS: merge_grad = layers.merge_selected_rows(g) merge_grad = layers.get_tensor_from_selected_rows( - merge_grad) + merge_grad + ) sum_square = _squared_l2_norm(merge_grad) if sum_square.dtype == core.VarDesc.VarType.FP16: sum_square_list_fp16.append(sum_square) @@ -588,8 +606,12 @@ class ClipGradByGlobalNorm(ClipGradBase): sum_square_list.append(sum_square) # all parameters have been filterd out - if len(sum_square_list) + len(sum_square_list_fp16) + len( - sum_square_list_fp32) == 0: + if ( + len(sum_square_list) + + len(sum_square_list_fp16) + + len(sum_square_list_fp32) + == 0 + ): return params_grads with p.block.program._optimized_guard([p, g]): @@ -598,10 +620,14 @@ class ClipGradByGlobalNorm(ClipGradBase): global_norm_var = [] if len(sum_square_list_fp16) > 0: global_norm_var_fp16 = layers.sums(sum_square_list_fp16) - if sum_square_list_fp32 or sum_square_list or not _allow_pure_fp16_global_norm_clip( + if ( + sum_square_list_fp32 + or sum_square_list + or not _allow_pure_fp16_global_norm_clip() ): global_norm_var.append( - global_norm_var_fp16.astype(sum_dtype)) + global_norm_var_fp16.astype(sum_dtype) + ) else: global_norm_var.append(global_norm_var_fp16) if len(sum_square_list_fp32) > 0: @@ -610,23 +636,28 @@ class ClipGradByGlobalNorm(ClipGradBase): global_norm_var.append(global_norm_var_fp32) else: global_norm_var.append( - global_norm_var_fp32.astype(sum_dtype)) + global_norm_var_fp32.astype(sum_dtype) + ) if len(sum_square_list) > 0: # fp64 global_norm_var_other_dtype = layers.sums(sum_square_list) global_norm_var.append(global_norm_var_other_dtype) - global_norm_var = layers.sums(global_norm_var) if len( - global_norm_var) > 1 else global_norm_var[0] + global_norm_var = ( + layers.sums(global_norm_var) + if len(global_norm_var) > 1 + else global_norm_var[0] + ) global_norm_var = layers.sqrt(x=global_norm_var) max_global_norm = layers.fill_constant( - shape=[1], - dtype=global_norm_var.dtype, - value=self.clip_norm) - scale_var = layers.elementwise_div(x=max_global_norm, - y=layers.elementwise_max( - x=max_global_norm, - y=global_norm_var)) + shape=[1], dtype=global_norm_var.dtype, value=self.clip_norm + ) + scale_var = layers.elementwise_div( + x=max_global_norm, + y=layers.elementwise_max( + x=max_global_norm, y=global_norm_var + ), + ) param_new_grad_name_dict = dict() for p, g in params_grads: if g is None: @@ -638,29 +669,32 @@ class ClipGradByGlobalNorm(ClipGradBase): with p.block.program._optimized_guard([p, g]): new_g = _cast_to_mp_type_if_enabled(g) # inplace - scale_input = (scale_var.astype('float16') if - new_g.dtype == core.VarDesc.VarType.FP16 and - scale_var.dtype != core.VarDesc.VarType.FP16 - else scale_var) + scale_input = ( + scale_var.astype('float16') + if new_g.dtype == core.VarDesc.VarType.FP16 + and scale_var.dtype != core.VarDesc.VarType.FP16 + else scale_var + ) # NOTE(Yuang Liu): For pure dp with gradient merge, the p and g # will be in different blocks with the gradient clip related ops. # We need to handle the correct block, otherwise will encounter # a 'NotFoundError' during compile time. block = default_main_program().current_block() - block.append_op(type='elementwise_mul', - inputs={ - 'X': new_g, - 'Y': scale_input - }, - outputs={'Out': new_g}) + block.append_op( + type='elementwise_mul', + inputs={'X': new_g, 'Y': scale_input}, + outputs={'Out': new_g}, + ) if new_g is not g: - block.append_op(type='cast', - inputs={'X': new_g}, - outputs={'Out': g}, - attrs={ - 'in_dtype': new_g.dtype, - 'out_dtype': g.dtype - }) + block.append_op( + type='cast', + inputs={'X': new_g}, + outputs={'Out': g}, + attrs={ + 'in_dtype': new_g.dtype, + 'out_dtype': g.dtype, + }, + ) param_new_grad_name_dict[p.name] = g.name params_and_grads.append((p, g)) @@ -673,7 +707,8 @@ class ClipGradByGlobalNorm(ClipGradBase): context[self.group_name] = [] context[self.group_name + "_clip_value"] = self.clip_norm context[self.group_name + "_clip"] = layers.fill_constant( - shape=[1], dtype=grad.dtype, value=self.clip_norm) + shape=[1], dtype=grad.dtype, value=self.clip_norm + ) else: if not self.clip_norm == context[self.group_name + "_clip_value"]: raise ValueError( @@ -696,20 +731,19 @@ class ClipGradByGlobalNorm(ClipGradBase): group_norm_var = layers.sums(input=self.context[self.group_name]) group_norm_var = layers.sqrt(x=group_norm_var) clip_var = self.context[self.group_name + "_clip"] - group_scale_var = layers.elementwise_div(x=clip_var, - y=layers.elementwise_max( - x=clip_var, - y=group_norm_var)) - assert group_scale_var.shape == (1, ) + group_scale_var = layers.elementwise_div( + x=clip_var, + y=layers.elementwise_max(x=clip_var, y=group_norm_var), + ) + assert group_scale_var.shape == (1,) self.context[group_scale_name] = group_scale_var # inplace - param.block.append_op(type='elementwise_mul', - inputs={ - 'X': grad, - 'Y': self.context[group_scale_name] - }, - outputs={'Out': grad}) + param.block.append_op( + type='elementwise_mul', + inputs={'X': grad, 'Y': self.context[group_scale_name]}, + outputs={'Out': grad}, + ) return param, grad @@ -804,22 +838,26 @@ def set_gradient_clip(clip, param_list=None, program=None): """ - warnings.warn("Caution! 'set_gradient_clip' is not recommended " - "and may be deprecated in future! " - "We recommend a new strategy: set 'grad_clip' " - "when initializing the 'optimizer'. " - "This method can reduce the mistakes, please " - "refer to documention of 'optimizer'.") + warnings.warn( + "Caution! 'set_gradient_clip' is not recommended " + "and may be deprecated in future! " + "We recommend a new strategy: set 'grad_clip' " + "when initializing the 'optimizer'. " + "This method can reduce the mistakes, please " + "refer to documention of 'optimizer'." + ) if not isinstance(clip, ClipGradBase): raise TypeError( - "'clip' should be an instance of ClipGradBase's derived class") + "'clip' should be an instance of ClipGradBase's derived class" + ) if program is None: program = framework.default_main_program() for op in program.block(0).ops: if 'op_namescope' in op.all_attrs() and "optimizer" in op.attr( - "op_namescope"): + "op_namescope" + ): warnings.warn( "'minimize' has been invoked before, this will make 'set_gradient_clip' " "be ineffective! Please invoke 'set_gradient_clip' before 'minimize'." @@ -844,14 +882,16 @@ def append_gradient_clip_ops(param_grads): for p, g in param_grads: if g is None: continue - with p.block.program._optimized_guard( - [p, g]), framework.name_scope('gradient_clip'): + with p.block.program._optimized_guard([p, g]), framework.name_scope( + 'gradient_clip' + ): clip_attr = getattr(p, 'gradient_clip_attr', None) if clip_attr is None: return param_grads if not isinstance(clip_attr, ClipGradBase): raise TypeError( - "clip attribute should be an instance of GradientClipBase") + "clip attribute should be an instance of GradientClipBase" + ) clip_attr._process_context(context=context, param=p, grad=g) @@ -860,8 +900,9 @@ def append_gradient_clip_ops(param_grads): for p, g in param_grads: if g is None: continue - with p.block.program._optimized_guard( - [p, g]), framework.name_scope('gradient_clip'): + with p.block.program._optimized_guard([p, g]), framework.name_scope( + 'gradient_clip' + ): param, new_grad = clip_attr._create_operators(param=p, grad=g) param_new_grad_name_dict[param.name] = new_grad.name res.append([param, new_grad]) @@ -885,12 +926,16 @@ def _correct_clip_op_role_var(params_grads, param_new_grad_name_dict): continue block_id_list.append(block_id) for op in param.block.program.global_block().ops: - if op.has_attr("op_namescope") and "gradient_clip" in op.attr( - "op_namescope") and op.attr('op_role_var'): + if ( + op.has_attr("op_namescope") + and "gradient_clip" in op.attr("op_namescope") + and op.attr('op_role_var') + ): param_name = op.attr('op_role_var')[0] if param_name in param_new_grad_name_dict: correct_p_g = [ - param_name, param_new_grad_name_dict[param_name] + param_name, + param_new_grad_name_dict[param_name], ] op._set_attr('op_role_var', correct_p_g) diff --git a/python/paddle/fluid/communicator.py b/python/paddle/fluid/communicator.py index 4704628f2aee8a719ea4409346bd106087a11c74..be3fb5ea22debb39f54c3cbcd159381bcdf5b13e 100755 --- a/python/paddle/fluid/communicator.py +++ b/python/paddle/fluid/communicator.py @@ -27,6 +27,7 @@ # limitations under the License. from .executor import global_scope + """ Communicator is used for async distribute training in distribute_transpiler mode. It's a wrapper of a cpp class Communicator and should be used inside fleet API. @@ -38,7 +39,6 @@ __all__ = ['Communicator', 'FLCommunicator', 'LargeScaleKV'] class Communicator(object): - def __init__(self, mode, kwargs=None, envs=None): """ Communicator is used for async distribute training in distribute_transpiler mode. @@ -69,7 +69,8 @@ class Communicator(object): else: if mode == DistributedMode.SYNC: envs["pserver_endpoints"] = ','.join( - kwargs["pserver_endpoints"]) + kwargs["pserver_endpoints"] + ) envs["trainers"] = str(kwargs["trainers"]) envs["trainer_id"] = str(kwargs["trainer_id"]) @@ -93,26 +94,32 @@ class Communicator(object): self.send_ctx_ = None self.recv_ctx_ = None - def init_with_ctx(self, - send_ctx, - recv_ctx, - proto_txt, - unit64_hosts, - scope=None): + def init_with_ctx( + self, send_ctx, recv_ctx, proto_txt, unit64_hosts, scope=None + ): if scope == None: scope = global_scope() - self.communicator_ = core.DistCommunicator(self.mode, proto_txt, - unit64_hosts, send_ctx, - recv_ctx, scope, self.envs) + self.communicator_ = core.DistCommunicator( + self.mode, + proto_txt, + unit64_hosts, + send_ctx, + recv_ctx, + scope, + self.envs, + ) self.send_ctx_ = send_ctx self.recv_ctx_ = recv_ctx - def create_client_to_client_connection(self, - pserver_timeout_ms=500000, - pserver_connect_timeout_ms=10000, - max_retry=3): + def create_client_to_client_connection( + self, + pserver_timeout_ms=500000, + pserver_connect_timeout_ms=10000, + max_retry=3, + ): self.communicator_.create_client_to_client_connection( - pserver_timeout_ms, pserver_connect_timeout_ms, max_retry) + pserver_timeout_ms, pserver_connect_timeout_ms, max_retry + ) def get_client_info(self): return self.communicator_.get_client_info() @@ -209,7 +216,6 @@ class Communicator(object): class FLCommunicator(Communicator): ## only for coordinator - def __init__(self, ps_hosts, kwargs=None): mode = None super(FLCommunicator, self).__init__(mode, kwargs) @@ -221,8 +227,9 @@ class FLCommunicator(Communicator): ## only for coordinator def start_coordinator(self, self_endpoint, trainer_endpoints): if self.communicator_ != None: - self.communicator_.start_coordinator(self_endpoint, - trainer_endpoints) + self.communicator_.start_coordinator( + self_endpoint, trainer_endpoints + ) return def save_fl_strategy(self, mp): @@ -240,7 +247,6 @@ class FLCommunicator(Communicator): ## only for coordinator class LargeScaleKV(object): - def __init__(self): self.scale_kv = core.LargeScaleKV() @@ -255,10 +261,10 @@ class LargeScaleKV(object): class HeterClient(object): - def __init__(self, endpoint, previous_endpoint, trainer_id): - self.heter_client_ = core.HeterClient(endpoint, previous_endpoint, - trainer_id) + self.heter_client_ = core.HeterClient( + endpoint, previous_endpoint, trainer_id + ) def stop(self): self.heter_client_.stop() diff --git a/python/paddle/fluid/compiler.py b/python/paddle/fluid/compiler.py index 7ac8e4c30ab69ad99a2acacf64c786036a106efb..1adef41f86a35065bf6f44b48d50875ff9aa71ed 100644 --- a/python/paddle/fluid/compiler.py +++ b/python/paddle/fluid/compiler.py @@ -22,8 +22,11 @@ from .framework import cuda_places, cpu_places, xpu_places from . import core __all__ = [ - 'CompiledProgram', 'ExecutionStrategy', 'BuildStrategy', - 'IpuCompiledProgram', 'IpuStrategy' + 'CompiledProgram', + 'ExecutionStrategy', + 'BuildStrategy', + 'IpuCompiledProgram', + 'IpuStrategy', ] ExecutionStrategy = core.ParallelExecutor.ExecutionStrategy @@ -40,8 +43,7 @@ def _place_obj(place): def _is_pserver_mode(main_program): - main = main_program if main_program \ - else framework.default_main_program() + main = main_program if main_program else framework.default_main_program() for op in main.global_block().ops: if op.type in ["send", "recv"]: return True @@ -50,8 +52,11 @@ def _is_pserver_mode(main_program): def _has_backward_op(graph): for node in graph.nodes(): - if node.is_op() and node.op() is not None and \ - node.op().type().endswith("_grad"): + if ( + node.is_op() + and node.op() is not None + and node.op().type().endswith("_grad") + ): return True return False @@ -60,7 +65,8 @@ def _prune_feed_ops(program): # prune the feed ops in the program. pop_idx = [] for i, op in enumerate(program.global_block().ops): - if op.type == "feed": pop_idx.append(i) + if op.type == "feed": + pop_idx.append(i) for index in pop_idx[::-1]: program.global_block()._remove_op(index) @@ -69,8 +75,9 @@ def _has_optimize_op(block): for op in block.ops: op_maker = core.op_proto_and_checker_maker optimize = core.op_proto_and_checker_maker.OpRole.Optimize - if op_maker.kOpRoleVarAttrName() in op.attr_names and \ - int(op.all_attrs()[op_maker.kOpRoleAttrName()]) == int(optimize): + if op_maker.kOpRoleVarAttrName() in op.attr_names and int( + op.all_attrs()[op_maker.kOpRoleAttrName()] + ) == int(optimize): return True return False @@ -93,7 +100,8 @@ def _should_broadcast_or_not_exists(program, var_name): if var is None: return True is_distributed = getattr(var, '_is_distributed', False) or getattr( - var, 'is_distributed', False) + var, 'is_distributed', False + ) return not is_distributed @@ -159,7 +167,8 @@ class CompiledProgram(object): else: raise TypeError( "The type of program_to_graph parameter is wrong, expected Graph or Program, but received %s" - % type(program_or_graph)) + % type(program_or_graph) + ) self._scope = None self._place = None @@ -173,12 +182,14 @@ class CompiledProgram(object): self._build_strategy = build_strategy self._exec_strategy = None - def with_data_parallel(self, - loss_name=None, - build_strategy=None, - exec_strategy=None, - share_vars_from=None, - places=None): + def with_data_parallel( + self, + loss_name=None, + build_strategy=None, + exec_strategy=None, + share_vars_from=None, + places=None, + ): """ This interface is used to transform the input Program or Graph to a multi-graph to run the model in data parallel mode. Users can use the build_strategy and @@ -284,14 +295,19 @@ class CompiledProgram(object): feed={"X": test_data}, fetch_list=[loss.name]) """ - assert not self._is_data_parallel, "Already compiled with parallel, cannot be recompiled." - assert not self._is_inference, "Cannot compile with both data parallel and inference." + assert ( + not self._is_data_parallel + ), "Already compiled with parallel, cannot be recompiled." + assert ( + not self._is_inference + ), "Cannot compile with both data parallel and inference." self._is_data_parallel = True # FIXME(zcd): Currently, the build_strategy can be set during creating # CompiledProgram or calling with_data_parallel, and it may be confusing, # but in the long run, we should set up build_strategy only when creating # CompiledProgram, and exec_strategy should be deprecated. - if build_strategy is not None: self._build_strategy = build_strategy + if build_strategy is not None: + self._build_strategy = build_strategy self._exec_strategy = exec_strategy self._loss_name = loss_name self._share_vars_from = share_vars_from @@ -301,7 +317,9 @@ class CompiledProgram(object): self._places = _get_paddle_place(places) if _has_backward_op(self._graph): - assert self._loss_name is not None, "The loss name of CompiledProgram is None. The loss name should be set if CompiledProgram contains backward part." + assert ( + self._loss_name is not None + ), "The loss name of CompiledProgram is None. The loss name should be set if CompiledProgram contains backward part." if self._places is not None: if not isinstance(self._places, (list, tuple)): @@ -310,20 +328,26 @@ class CompiledProgram(object): return self def _with_inference_optimize(self, config): - """ Add inference optimize + """Add inference optimize Args: config: instance of `NativeConfig` or `AnalysisConfig` to create predictor Returns: self """ - assert not self._is_data_parallel, "Cannot compile with both data parallel and inference" - assert not self._is_inference, "Already compiled with inference, cannot be recompiled." - - assert any([ - isinstance(config, InferNativeConfig), - isinstance(config, InferAnalysisConfig) - ]) + assert ( + not self._is_data_parallel + ), "Cannot compile with both data parallel and inference" + assert ( + not self._is_inference + ), "Already compiled with inference, cannot be recompiled." + + assert any( + [ + isinstance(config, InferNativeConfig), + isinstance(config, InferAnalysisConfig), + ] + ) self._is_inference = True self._infer_config = config return self @@ -340,18 +364,23 @@ class CompiledProgram(object): if not self._share_vars_from._is_data_parallel: raise ValueError( "The shared Program is not data parallel, cannot " - "share variables from it.") + "share variables from it." + ) if self._share_vars_from._executor is None: raise ValueError( "The shared Program is not compiled and executed, so there is no " - "variables to share.") + "variables to share." + ) self._local_scopes = self._share_vars_from._executor.local_scopes() else: assert scope is not None, "" self._local_scopes = [] - assert isinstance(places, tuple) or isinstance(places, list), \ - "Currently , The places type can only be list or tuple, but the input type is {}.".format(type(places)) + assert isinstance(places, tuple) or isinstance( + places, list + ), "Currently , The places type can only be list or tuple, but the input type is {}.".format( + type(places) + ) if self._build_strategy is None: self._build_strategy = BuildStrategy() @@ -372,41 +401,61 @@ class CompiledProgram(object): else: self._exec_strategy.num_threads = len(places) * 2 - if "FLAGS_use_cinn" in core.globals() and core.globals( - )["FLAGS_use_cinn"] and self._exec_strategy.num_threads != 1: - warnings.warn("At present, when CINN is turned on, each process can " \ - "only contain one thread, so reset the number of threads to 1 here.") + if ( + "FLAGS_use_cinn" in core.globals() + and core.globals()["FLAGS_use_cinn"] + and self._exec_strategy.num_threads != 1 + ): + warnings.warn( + "At present, when CINN is turned on, each process can " + "only contain one thread, so reset the number of threads to 1 here." + ) self._exec_strategy.num_threads = 1 if self._build_strategy.num_trainers > 1: - assert self._is_data_parallel, \ - "If you use multi-trainer to train the model, you should use "\ + assert self._is_data_parallel, ( + "If you use multi-trainer to train the model, you should use " "the data parallel model, i.e. calling with_data_parallel function." + ) # TODO(wuyi): trainer endpoings should be passed in through # build_strategy, not program.xxx. # TODO(gongwb): let user to set them once. - if self._program and self._build_strategy.num_trainers > 1 and \ - self._program._trainers_endpoints: + if ( + self._program + and self._build_strategy.num_trainers > 1 + and self._program._trainers_endpoints + ): tps = self._program._trainers_endpoints assert self._build_strategy.num_trainers == len( - tps), "The trainer numbers is not equal to endpoint numbers." + tps + ), "The trainer numbers is not equal to endpoint numbers." self._build_strategy.trainers_endpoints = tps if self._program: self._build_strategy.nccl_comm_num = self._program._nccl_comm_num - self._build_strategy.use_hierarchical_allreduce = self._program._use_hierarchical_allreduce - self._build_strategy.hierarchical_allreduce_inter_nranks = self._program._hierarchical_allreduce_inter_nranks + self._build_strategy.use_hierarchical_allreduce = ( + self._program._use_hierarchical_allreduce + ) + self._build_strategy.hierarchical_allreduce_inter_nranks = ( + self._program._hierarchical_allreduce_inter_nranks + ) if self._build_strategy.sync_batch_norm: self._build_strategy.enable_sequential_execution = True if self._program is not None and self._program._enable_dgc: - assert self._exec_strategy._use_device == DeviceType.CUDA, "DGC only used under CUDA environment." - assert self._build_strategy.num_trainers * len( - places) > 1, "DGC is not avaliable for single card training." - assert self._build_strategy.reduce_strategy == BuildStrategy.ReduceStrategy.AllReduce, "DGC \ + assert ( + self._exec_strategy._use_device == DeviceType.CUDA + ), "DGC only used under CUDA environment." + assert ( + self._build_strategy.num_trainers * len(places) > 1 + ), "DGC is not avaliable for single card training." + assert ( + self._build_strategy.reduce_strategy + == BuildStrategy.ReduceStrategy.AllReduce + ), "DGC \ only can be used for AllReduce BuildStrategy." # DGC doesn't support fuse for now, close fuse. @@ -414,11 +463,17 @@ class CompiledProgram(object): self._persistable_vars = [] for node in self._graph.nodes(): - if node.is_var() and node.var() is not None and node.var().persistable() and \ - node.var().type() != core.VarDesc.VarType.RAW: + if ( + node.is_var() + and node.var() is not None + and node.var().persistable() + and node.var().type() != core.VarDesc.VarType.RAW + ): name = node.name() - if self._program is not None and _should_broadcast_or_not_exists( - self._program, name): + if ( + self._program is not None + and _should_broadcast_or_not_exists(self._program, name) + ): self._persistable_vars.append(node.name()) places = list(map(_place_obj, places)) @@ -429,11 +484,16 @@ class CompiledProgram(object): self._persistable_vars = list(set(self._persistable_vars)) self._persistable_vars.sort() - return core.ParallelExecutor(places, self._persistable_vars, - self._loss_name if self._loss_name else '', - self._scope, self._local_scopes, - self._exec_strategy, self._build_strategy, - self._graph) + return core.ParallelExecutor( + places, + self._persistable_vars, + self._loss_name if self._loss_name else '', + self._scope, + self._local_scopes, + self._exec_strategy, + self._build_strategy, + self._graph, + ) def _compile_inference(self): return core.create_paddle_predictor(self._infer_config) @@ -471,28 +531,31 @@ class CompiledProgram(object): # Todo(liym27):If optimizer is used in control flow, # training on multi-places is not supported now, will # be supported later. - if len(self._places) > 1 and \ - _has_optimizer_in_control_flow(self._program): + if len(self._places) > 1 and _has_optimizer_in_control_flow( + self._program + ): raise NotImplementedError( "If optimizer is used in control flow, " - "training on multi-places is not supported now.") + "training on multi-places is not supported now." + ) if isinstance(self._place, core.CUDAPlace): use_device = DeviceType.CUDA elif isinstance(self._place, core.XPUPlace): use_device = DeviceType.XPU else: use_device = DeviceType.CPU - self._executor = self._compile_data_parallel(use_device=use_device, - scope=self._scope, - places=self._places) + self._executor = self._compile_data_parallel( + use_device=use_device, scope=self._scope, places=self._places + ) return self def _get_places(self, place, place_list): - has_set_place = (place_list is not None) + has_set_place = place_list is not None if has_set_place: for p in place_list: - assert p._type() == place._type(), \ - "Place type not match. You may set wrong type of places." + assert ( + p._type() == place._type() + ), "Place type not match. You may set wrong type of places." else: if isinstance(place, core.CUDAPlace): place_list = cuda_places() @@ -515,9 +578,9 @@ class IpuDynamicPatcher(object): pass @staticmethod - def convert_concrete_program(ipu_strategy, - concrete_program, - class_instance=None): + def convert_concrete_program( + ipu_strategy, concrete_program, class_instance=None + ): """ Convert the ConcreteProgram to IPUConcreteProgram. """ @@ -553,7 +616,8 @@ class IpuDynamicPatcher(object): # copy the bias and filters for param_or_buffer in concrete_program.parameters: param_or_buffer_tensor = scope.var( - param_or_buffer.name).get_tensor() + param_or_buffer.name + ).get_tensor() src_tensor = param_or_buffer.value().get_tensor() param_or_buffer_tensor._share_data_with(src_tensor) @@ -576,13 +640,16 @@ class IpuDynamicPatcher(object): dtype=var_tmp.dtype, type=var_tmp.type, shape=var_tmp.shape, - belong_to_optimizer=True) + belong_to_optimizer=True, + ) device = optimizer._get_device_for_param(param_name) with device_guard(device): optimizer.helper.set_variable_initializer( - var, initializer=Constant(value=0.0)) + var, initializer=Constant(value=0.0) + ) param_or_lr_tensor = scope.find_var( - var_tmp.name).get_tensor() + var_tmp.name + ).get_tensor() optim_tensor = var.value().get_tensor() param_or_lr_tensor._share_data_with(optim_tensor) optimizer._accumulators[k][param_name] = var @@ -595,16 +662,19 @@ class IpuDynamicPatcher(object): to_fp16_var_names = paddle.static.amp.cast_model_to_fp16( concrete_program.main_program, amp_list, - use_fp16_guard=False) + use_fp16_guard=False, + ) paddle.static.amp.cast_parameters_to_fp16( paddle.CPUPlace(), concrete_program.main_program, - to_fp16_var_names=to_fp16_var_names) - - program = IpuCompiledProgram(concrete_program.main_program, - ipu_strategy=ipu_strategy, - scope=scope).compile( - feed_list, fetch_list) + to_fp16_var_names=to_fp16_var_names, + ) + + program = IpuCompiledProgram( + concrete_program.main_program, + ipu_strategy=ipu_strategy, + scope=scope, + ).compile(feed_list, fetch_list) return program main_program = func_compile() @@ -613,7 +683,7 @@ class IpuDynamicPatcher(object): @staticmethod def patch_program_cache(ipu_strategy): - """ Monkey patch ProgramCache discriptor to support dynamic2static in IPU. + """Monkey patch ProgramCache discriptor to support dynamic2static in IPU. Args: ipu_strategy: The ipu_strategy used in dynamic graph. @@ -621,53 +691,70 @@ class IpuDynamicPatcher(object): Returns: None """ - from ..fluid.dygraph.dygraph_to_static.program_translator import ProgramCache - from ..fluid.dygraph.dygraph_to_static.program_translator import CacheKey + from ..fluid.dygraph.dygraph_to_static.program_translator import ( + ProgramCache, + ) + from ..fluid.dygraph.dygraph_to_static.program_translator import ( + CacheKey, + ) from ..fluid.dygraph.dygraph_to_static import logging_utils - from ..fluid.dygraph.dygraph_to_static.program_translator import MAX_TRACED_PROGRAM_COUNT - from ..fluid.dygraph.dygraph_to_static.partial_program import partial_program_from + from ..fluid.dygraph.dygraph_to_static.program_translator import ( + MAX_TRACED_PROGRAM_COUNT, + ) + from ..fluid.dygraph.dygraph_to_static.partial_program import ( + partial_program_from, + ) old_getter = ProgramCache.__getitem__ def patch_getter(self, item): if not isinstance(item, CacheKey): raise ValueError( - 'type(item) should be CacheKey, but received %s' % - type(item).__name__) + 'type(item) should be CacheKey, but received %s' + % type(item).__name__ + ) item_id = hash(item) self._recent_key = item_id if item_id not in self._caches or ipu_strategy.need_compile: if item_id in self._caches: logging_utils.warn( - "ipu_strategy chances detected. Please sync weights.") + "ipu_strategy chances detected. Please sync weights." + ) if self._caches and not ipu_strategy.need_compile: logging_utils.warn( "dynamic2static on IPU doesn't support mutiple caches. Please make sure" - "dynamic inputs is not used.") + "dynamic inputs is not used." + ) concrete_program, _ = self._build_once(item) concrete_program = IpuDynamicPatcher.convert_concrete_program( - ipu_strategy, concrete_program, item.class_instance) + ipu_strategy, concrete_program, item.class_instance + ) - self._caches[item_id] = (concrete_program, - partial_program_from(concrete_program)) + self._caches[item_id] = ( + concrete_program, + partial_program_from(concrete_program), + ) # Note: raise warnings if number of traced program is more than `max_tracing_count` current_tracing_count = len(self._caches) if current_tracing_count > MAX_TRACED_PROGRAM_COUNT: logging_utils.warn( "Current traced program number: {} > `max_tracing_count`:{}. Too much cached programs will bring expensive overhead. " - "The reason may be: (1) passing tensors with different shapes, (2) passing python objects instead of tensors." - .format(current_tracing_count, - MAX_TRACED_PROGRAM_COUNT)) + "The reason may be: (1) passing tensors with different shapes, (2) passing python objects instead of tensors.".format( + current_tracing_count, MAX_TRACED_PROGRAM_COUNT + ) + ) return self._caches[item_id] setattr(ProgramCache, '__getitem__', patch_getter) IpuDynamicPatcher.patcher_cache.append( - [ProgramCache, '__getitem__', old_getter]) + [ProgramCache, '__getitem__', old_getter] + ) @staticmethod def patch_lr_scheduler(ipu_strategy): from paddle.optimizer.lr import LRScheduler + # For IPU dynamic graph usage, lr_var is not synced in executor as static mode do. # Manually set lr to ipu_strategy to update the lr. old_step = LRScheduler.step @@ -718,10 +805,8 @@ class IpuStrategy(object): 'on_chip': 0, 'use_replicated_tensor_sharding': 1, }, # set optimizer location - 'accumulation_and_replication_reduction_type': - 1, # popart::ReductionType::Mean - 'mean_accumulation_and_replication_reduction_strategy': - 1, # popart::MeanReductionStrategy::Post + 'accumulation_and_replication_reduction_type': 1, # popart::ReductionType::Mean + 'mean_accumulation_and_replication_reduction_strategy': 1, # popart::MeanReductionStrategy::Post } self._ipu_strategy.set_options(default_options) self.has_custom_ops = False @@ -732,6 +817,7 @@ class IpuStrategy(object): "Can not use IpuStrategy in non IPU compiled environment, please re-compile with WITH_IPU=ON." ) from paddle import in_dynamic_mode + if in_dynamic_mode(): self.register_patch() @@ -797,6 +883,7 @@ class IpuStrategy(object): ipu_strategy.set_optimizer(optimizer) """ from paddle import in_dynamic_mode + if in_dynamic_mode(): self._optimizer = optimizer optimizer_attrs = self.parse_optimizer(optimizer) @@ -831,6 +918,7 @@ class IpuStrategy(object): def get_lr(): from paddle.optimizer.lr import LRScheduler + if isinstance(optimizer._learning_rate, float): return {"lr": optimizer._learning_rate} elif isinstance(optimizer._learning_rate, LRScheduler): @@ -842,11 +930,13 @@ class IpuStrategy(object): optimizer_attrs.update(fn()) return optimizer_attrs - def set_graph_config(self, - num_ipus=1, - is_training=True, - micro_batch_size=1, - enable_manual_shard=False): + def set_graph_config( + self, + num_ipus=1, + is_training=True, + micro_batch_size=1, + enable_manual_shard=False, + ): """ Set graph configuration to the IpuStrategy instance. @@ -889,11 +979,13 @@ class IpuStrategy(object): } self.set_options(options) - def set_pipelining_config(self, - enable_pipelining=False, - batches_per_step=1, - enable_gradient_accumulation=False, - accumulation_factor=1): + def set_pipelining_config( + self, + enable_pipelining=False, + batches_per_step=1, + enable_gradient_accumulation=False, + accumulation_factor=1, + ): """ Set pipelining configuration to the IpuStrategy instance. Used to optimize the throughput performance. @@ -967,11 +1059,9 @@ class IpuStrategy(object): } self.set_options(options) - def add_custom_op(self, - paddle_op, - popart_op=None, - domain='custom.ops', - version=1): + def add_custom_op( + self, paddle_op, popart_op=None, domain='custom.ops', version=1 + ): """ Add a mapping to use popart custom ops running on the IPU. @@ -1201,8 +1291,9 @@ class IpuCompiledProgram(object): if not isinstance(program, framework.Program): raise TypeError( - "The type of program is wrong, expected Program, but got %s" % - type(program)) + "The type of program is wrong, expected Program, but got %s" + % type(program) + ) self._program = program self._compiled = False @@ -1212,6 +1303,7 @@ class IpuCompiledProgram(object): else: # import here to avoiding confused import paddle + self._scope = paddle.static.global_scope() if ipu_strategy is not None: diff --git a/python/paddle/fluid/contrib/decoder/beam_search_decoder.py b/python/paddle/fluid/contrib/decoder/beam_search_decoder.py index 10bc30d13d1a72bdbef85d33de2038f0297e9b8d..429feda47bb53d9c750d1ff7de69ff4ed7fbba6f 100644 --- a/python/paddle/fluid/contrib/decoder/beam_search_decoder.py +++ b/python/paddle/fluid/contrib/decoder/beam_search_decoder.py @@ -64,13 +64,15 @@ class InitState(object): See `StateCell`. """ - def __init__(self, - init=None, - shape=None, - value=0.0, - init_boot=None, - need_reorder=False, - dtype='float32'): + def __init__( + self, + init=None, + shape=None, + value=0.0, + init_boot=None, + need_reorder=False, + dtype='float32', + ): if init is not None: self._init = init elif init_boot is None: @@ -78,10 +80,9 @@ class InitState(object): 'init_boot must be provided to infer the shape of InitState .\n' ) else: - self._init = layers.fill_constant_batch_size_like(input=init_boot, - value=value, - shape=shape, - dtype=dtype) + self._init = layers.fill_constant_batch_size_like( + input=init_boot, value=value, shape=shape, dtype=dtype + ) self._shape = shape self._value = value @@ -98,12 +99,12 @@ class InitState(object): class _MemoryState(object): - def __init__(self, state_name, rnn_obj, init_state): self._state_name = state_name # each is a rnn.memory self._rnn_obj = rnn_obj self._state_mem = self._rnn_obj.memory( - init=init_state.value, need_reorder=init_state.need_reorder) + init=init_state.value, need_reorder=init_state.need_reorder + ) def get_state(self): return self._state_mem @@ -113,7 +114,6 @@ class _MemoryState(object): class _ArrayState(object): - def __init__(self, state_name, block, init_state): self._state_name = state_name self._block = block @@ -121,33 +121,36 @@ class _ArrayState(object): self._state_array = self._block.create_var( name=unique_name.generate('array_state_array'), type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, - dtype=init_state.value.dtype) + dtype=init_state.value.dtype, + ) self._counter = self._block.create_var( name=unique_name.generate('array_state_counter'), type=core.VarDesc.VarType.LOD_TENSOR, - dtype='int64') + dtype='int64', + ) # initialize counter - self._block.append_op(type='fill_constant', - inputs={}, - outputs={'Out': [self._counter]}, - attrs={ - 'shape': [1], - 'dtype': self._counter.dtype, - 'value': float(0.0), - 'force_cpu': True - }) + self._block.append_op( + type='fill_constant', + inputs={}, + outputs={'Out': [self._counter]}, + attrs={ + 'shape': [1], + 'dtype': self._counter.dtype, + 'value': float(0.0), + 'force_cpu': True, + }, + ) self._counter.stop_gradient = True # write initial state - block.append_op(type='write_to_array', - inputs={ - 'X': init_state.value, - 'I': self._counter - }, - outputs={'Out': self._state_array}) + block.append_op( + type='write_to_array', + inputs={'X': init_state.value, 'I': self._counter}, + outputs={'Out': self._state_array}, + ) def get_state(self): state = layers.array_read(array=self._state_array, i=self._counter) @@ -220,8 +223,9 @@ class StateCell(object): def _leave_decoder(self, decoder_obj): if not self._in_decoder: - raise ValueError('StateCell not in decoder, ' - 'invalid leaving operation.') + raise ValueError( + 'StateCell not in decoder, ' 'invalid leaving operation.' + ) if self._cur_decoder_obj != decoder_obj: raise ValueError('Inconsistent decoder object in StateCell.') @@ -242,29 +246,35 @@ class StateCell(object): state = self._cur_states[state_name] if not isinstance(state, InitState): - raise ValueError('Current type of state is %s, should be ' - 'an InitState object.' % type(state)) + raise ValueError( + 'Current type of state is %s, should be ' + 'an InitState object.' % type(state) + ) self._states_holder[state_name] = {} if self._cur_decoder_obj.type == _DecoderType.TRAINING: - self._states_holder[state_name][id(self._cur_decoder_obj)] \ - = _MemoryState(state_name, - self._cur_decoder_obj.dynamic_rnn, - state) + self._states_holder[state_name][ + id(self._cur_decoder_obj) + ] = _MemoryState( + state_name, self._cur_decoder_obj.dynamic_rnn, state + ) elif self._cur_decoder_obj.type == _DecoderType.BEAM_SEARCH: - self._states_holder[state_name][id(self._cur_decoder_obj)] \ - = _ArrayState(state_name, - self._cur_decoder_obj._parent_block(), - state) + self._states_holder[state_name][ + id(self._cur_decoder_obj) + ] = _ArrayState( + state_name, self._cur_decoder_obj._parent_block(), state + ) else: - raise ValueError('Unknown decoder type, only support ' - '[TRAINING, BEAM_SEARCH]') + raise ValueError( + 'Unknown decoder type, only support ' + '[TRAINING, BEAM_SEARCH]' + ) # Read back, since current state should be LoDTensor - self._cur_states[state_name] = \ - self._states_holder[state_name][ - id(self._cur_decoder_obj)].get_state() + self._cur_states[state_name] = self._states_holder[state_name][ + id(self._cur_decoder_obj) + ].get_state() self._switched_decoder = True @@ -284,7 +294,8 @@ class StateCell(object): if state_name not in self._cur_states: raise ValueError( 'Unknown state %s. Please make sure _switch_decoder() ' - 'invoked.' % state_name) + 'invoked.' % state_name + ) return self._cur_states[state_name] @@ -328,8 +339,10 @@ class StateCell(object): def _decorator(state_cell): if state_cell == self: - raise TypeError('Updater should only accept a StateCell object ' - 'as argument.') + raise TypeError( + 'Updater should only accept a StateCell object ' + 'as argument.' + ) updater(state_cell) return _decorator @@ -353,9 +366,11 @@ class StateCell(object): for input_name, input_value in inputs.items(): if input_name not in self._inputs: - raise ValueError('Unknown input %s. ' - 'Please make sure %s in input ' - 'place holder.' % (input_name, input_name)) + raise ValueError( + 'Unknown input %s. ' + 'Please make sure %s in input ' + 'place holder.' % (input_name, input_name) + ) self._inputs[input_name] = input_value self._state_updater(self) @@ -368,10 +383,13 @@ class StateCell(object): for state_name, decoder_state in self._states_holder.items(): if id(self._cur_decoder_obj) not in decoder_state: - raise ValueError('Unknown decoder object, please make sure ' - 'switch_decoder been invoked.') + raise ValueError( + 'Unknown decoder object, please make sure ' + 'switch_decoder been invoked.' + ) decoder_state[id(self._cur_decoder_obj)].update_state( - self._cur_states[state_name]) + self._cur_states[state_name] + ) def out_state(self): """ @@ -409,6 +427,7 @@ class TrainingDecoder(object): decoder.state_cell.update_states() decoder.output(current_score) """ + BEFORE_DECODER = 0 IN_DECODER = 1 AFTER_DECODER = 2 @@ -495,8 +514,10 @@ class TrainingDecoder(object): Variable: The specified output of the RNN cell. """ if self._status != TrainingDecoder.AFTER_DECODER: - raise ValueError('Output of training decoder can only be visited ' - 'outside the block.') + raise ValueError( + 'Output of training decoder can only be visited ' + 'outside the block.' + ) return self._dynamic_rnn(*args, **kwargs) def output(self, *outputs): @@ -520,8 +541,10 @@ class TrainingDecoder(object): def _assert_in_decoder_block(self, method): if self._status != TrainingDecoder.IN_DECODER: - raise ValueError('%s should be invoked inside block of ' - 'TrainingDecoder object.' % method) + raise ValueError( + '%s should be invoked inside block of ' + 'TrainingDecoder object.' % method + ) class BeamSearchDecoder(object): @@ -569,42 +592,44 @@ class BeamSearchDecoder(object): decoder.decode() translation_ids, translation_scores = decoder() """ + BEFORE_BEAM_SEARCH_DECODER = 0 IN_BEAM_SEARCH_DECODER = 1 AFTER_BEAM_SEARCH_DECODER = 2 - def __init__(self, - state_cell, - init_ids, - init_scores, - target_dict_dim, - word_dim, - input_var_dict={}, - topk_size=50, - sparse_emb=True, - max_len=100, - beam_size=1, - end_id=1, - name=None): + def __init__( + self, + state_cell, + init_ids, + init_scores, + target_dict_dim, + word_dim, + input_var_dict={}, + topk_size=50, + sparse_emb=True, + max_len=100, + beam_size=1, + end_id=1, + name=None, + ): self._helper = LayerHelper('beam_search_decoder', name=name) self._counter = layers.zeros(shape=[1], dtype='int64') self._counter.stop_gradient = True self._type = _DecoderType.BEAM_SEARCH - self._max_len = layers.fill_constant(shape=[1], - dtype='int64', - value=max_len) - self._cond = layers.less_than(x=self._counter, - y=layers.fill_constant(shape=[1], - dtype='int64', - value=max_len)) + self._max_len = layers.fill_constant( + shape=[1], dtype='int64', value=max_len + ) + self._cond = layers.less_than( + x=self._counter, + y=layers.fill_constant(shape=[1], dtype='int64', value=max_len), + ) self._while_op = layers.While(self._cond) self._state_cell = state_cell self._state_cell._enter_decoder(self) self._status = BeamSearchDecoder.BEFORE_BEAM_SEARCH_DECODER - self._zero_idx = layers.fill_constant(shape=[1], - value=0, - dtype='int64', - force_cpu=True) + self._zero_idx = layers.fill_constant( + shape=[1], value=0, dtype='int64', force_cpu=True + ) self._array_dict = {} self._array_link = [] self._ids_array = None @@ -637,13 +662,13 @@ class BeamSearchDecoder(object): layers.increment(x=self._counter, value=1.0, in_place=True) for value, array in self._array_link: - layers.array_write(x=value, - i=self._counter, - array=array) + layers.array_write( + x=value, i=self._counter, array=array + ) - layers.less_than(x=self._counter, - y=self._max_len, - cond=self._cond) + layers.less_than( + x=self._counter, y=self._max_len, cond=self._cond + ) self._status = BeamSearchDecoder.AFTER_BEAM_SEARCH_DECODER self._state_cell._leave_decoder(self) @@ -656,11 +681,9 @@ class BeamSearchDecoder(object): """ Stop the generation process in advance. Could be used as "break". """ - layers.fill_constant(shape=[1], - value=0, - dtype='bool', - force_cpu=True, - out=self._cond) + layers.fill_constant( + shape=[1], value=0, dtype='bool', force_cpu=True, out=self._cond + ) def decode(self): """ @@ -675,32 +698,39 @@ class BeamSearchDecoder(object): """ with self.block(): prev_ids = self.read_array(init=self._init_ids, is_ids=True) - prev_scores = self.read_array(init=self._init_scores, - is_scores=True) + prev_scores = self.read_array( + init=self._init_scores, is_scores=True + ) prev_ids_embedding = layers.embedding( input=prev_ids, size=[self._target_dict_dim, self._word_dim], dtype='float32', - is_sparse=self._sparse_emb) + is_sparse=self._sparse_emb, + ) feed_dict = {} update_dict = {} for init_var_name, init_var in self._input_var_dict.items(): if init_var_name not in self.state_cell._inputs: - raise ValueError('Variable ' + init_var_name + - ' not found in StateCell!\n') + raise ValueError( + 'Variable ' + + init_var_name + + ' not found in StateCell!\n' + ) read_var = self.read_array(init=init_var) update_dict[init_var_name] = read_var feed_var_expanded = layers.sequence_expand( - read_var, prev_scores) + read_var, prev_scores + ) feed_dict[init_var_name] = feed_var_expanded for state_str in self._state_cell._state_names: prev_state = self.state_cell.get_state(state_str) prev_state_expanded = layers.sequence_expand( - prev_state, prev_scores) + prev_state, prev_scores + ) self.state_cell.set_state(state_str, prev_state_expanded) for i, input_name in enumerate(self._state_cell._inputs): @@ -709,23 +739,29 @@ class BeamSearchDecoder(object): self.state_cell.compute_state(inputs=feed_dict) current_state = self.state_cell.out_state() - current_state_with_lod = layers.lod_reset(x=current_state, - y=prev_scores) - scores = layers.fc(input=current_state_with_lod, - size=self._target_dict_dim, - act='softmax') + current_state_with_lod = layers.lod_reset( + x=current_state, y=prev_scores + ) + scores = layers.fc( + input=current_state_with_lod, + size=self._target_dict_dim, + act='softmax', + ) topk_scores, topk_indices = layers.topk(scores, k=self._topk_size) - accu_scores = layers.elementwise_add(x=layers.log(x=topk_scores), - y=layers.reshape(prev_scores, - shape=[-1]), - axis=0) - selected_ids, selected_scores = layers.beam_search(prev_ids, - prev_scores, - topk_indices, - accu_scores, - self._beam_size, - end_id=1, - level=0) + accu_scores = layers.elementwise_add( + x=layers.log(x=topk_scores), + y=layers.reshape(prev_scores, shape=[-1]), + axis=0, + ) + selected_ids, selected_scores = layers.beam_search( + prev_ids, + prev_scores, + topk_indices, + accu_scores, + self._beam_size, + end_id=1, + level=0, + ) with layers.Switch() as switch: with switch.case(layers.is_empty(selected_ids)): @@ -760,8 +796,10 @@ class BeamSearchDecoder(object): self._assert_in_decoder_block('read_array') if is_ids and is_scores: - raise ValueError('Shouldn\'t mark current array be ids array and' - 'scores array at the same time.') + raise ValueError( + 'Shouldn\'t mark current array be ids array and' + 'scores array at the same time.' + ) if not isinstance(init, Variable): raise TypeError('The input argument `init` must be a Variable.') @@ -770,13 +808,13 @@ class BeamSearchDecoder(object): array = parent_block.create_var( name=unique_name.generate('beam_search_decoder_array'), type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, - dtype=init.dtype) - parent_block.append_op(type='write_to_array', - inputs={ - 'X': init, - 'I': self._zero_idx - }, - outputs={'Out': array}) + dtype=init.dtype, + ) + parent_block.append_op( + type='write_to_array', + inputs={'X': init, 'I': self._zero_idx}, + outputs={'Out': array}, + ) if is_ids: self._ids_array = array @@ -800,7 +838,8 @@ class BeamSearchDecoder(object): if not isinstance(array, Variable): raise TypeError( - 'The input argument `array` of must be a Variable.') + 'The input argument `array` of must be a Variable.' + ) if not isinstance(value, Variable): raise TypeError('The input argument `value` of must be a Variable.') @@ -819,12 +858,16 @@ class BeamSearchDecoder(object): as id, holds the score for each generated token. """ if self._status != BeamSearchDecoder.AFTER_BEAM_SEARCH_DECODER: - raise ValueError('Output of BeamSearchDecoder object can ' - 'only be visited outside the block.') - return layers.beam_search_decode(ids=self._ids_array, - scores=self._scores_array, - beam_size=self._beam_size, - end_id=self._end_id) + raise ValueError( + 'Output of BeamSearchDecoder object can ' + 'only be visited outside the block.' + ) + return layers.beam_search_decode( + ids=self._ids_array, + scores=self._scores_array, + beam_size=self._beam_size, + end_id=self._end_id, + ) @property def state_cell(self): @@ -847,5 +890,7 @@ class BeamSearchDecoder(object): def _assert_in_decoder_block(self, method): if self._status != BeamSearchDecoder.IN_BEAM_SEARCH_DECODER: - raise ValueError('%s should be invoked inside block of ' - 'BeamSearchDecoder object.' % method) + raise ValueError( + '%s should be invoked inside block of ' + 'BeamSearchDecoder object.' % method + ) diff --git a/python/paddle/fluid/contrib/extend_optimizer/extend_optimizer_with_weight_decay.py b/python/paddle/fluid/contrib/extend_optimizer/extend_optimizer_with_weight_decay.py index 6a87bb54d3f8b92b68f252c74371c2bce2bb998f..59b6ac388844eb88c9ae6a568c8a0a518c40f5a7 100644 --- a/python/paddle/fluid/contrib/extend_optimizer/extend_optimizer_with_weight_decay.py +++ b/python/paddle/fluid/contrib/extend_optimizer/extend_optimizer_with_weight_decay.py @@ -18,10 +18,10 @@ __all__ = ["extend_with_decoupled_weight_decay"] class DecoupledWeightDecay(object): - def __init__(self, coeff=0.0, apply_decay_param_fun=None, **kwargs): - if not isinstance(coeff, float) and \ - not isinstance(coeff, framework.Variable): + if not isinstance(coeff, float) and not isinstance( + coeff, framework.Variable + ): raise TypeError("coeff should be float or Variable.") self._params_name = set() self._apply_decay_param_fun = apply_decay_param_fun @@ -47,19 +47,28 @@ class DecoupledWeightDecay(object): # If no gradient then we don't need to do anything if grad is None: continue - if self._apply_decay_param_fun is not None \ - and not self._apply_decay_param_fun(param.name): + if ( + self._apply_decay_param_fun is not None + and not self._apply_decay_param_fun(param.name) + ): continue if isinstance(self._coeff, float): - assert param.dtype is not paddle.fluid.core.VarDesc.VarType.FP32, \ - "the type of coeff(float) and parameter(%s) is not consistent."%(self._coeff.dtype) + assert ( + param.dtype is not paddle.fluid.core.VarDesc.VarType.FP32 + ), ( + "the type of coeff(float) and parameter(%s) is not consistent." + % (self._coeff.dtype) + ) else: - assert self._coeff.dtype == param.dtype, \ - "the type of coeff(%s) and parameter(%s) is not consistent."%(self._coeff.dtype, param.dtype) + assert self._coeff.dtype == param.dtype, ( + "the type of coeff(%s) and parameter(%s) is not consistent." + % (self._coeff.dtype, param.dtype) + ) with param.block.program._optimized_guard( - [param, grad]), framework.name_scope('weight decay'): + [param, grad] + ), framework.name_scope('weight decay'): assert param.name not in self._params_name scaled_params.append((param, grad, param * self._coeff)) self._params_name.add(param.name) @@ -71,27 +80,31 @@ class DecoupledWeightDecay(object): def apply_optimize(self, **kargs): return super(DecoupledWeightDecay, self).apply_optimize(**kargs) - def minimize(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None): - params_grads = self.backward(loss=loss, - startup_program=startup_program, - parameter_list=parameter_list, - no_grad_set=no_grad_set) + def minimize( + self, loss, startup_program=None, parameter_list=None, no_grad_set=None + ): + params_grads = self.backward( + loss=loss, + startup_program=startup_program, + parameter_list=parameter_list, + no_grad_set=no_grad_set, + ) scaled_params = self._scale_parameters(params_grads) for p_grad_sgrad in scaled_params: param, grad, scaled_param = p_grad_sgrad with param.block.program._optimized_guard( - [param, grad]), framework.name_scope('weight decay'): + [param, grad] + ), framework.name_scope('weight decay'): updated_param = paddle.fluid.layers.elementwise_sub( - x=param, y=scaled_param) + x=param, y=scaled_param + ) paddle.fluid.layers.assign(input=updated_param, output=param) - optimize_ops = self.apply_optimize(loss=loss, - params_grads=params_grads, - startup_program=startup_program) + optimize_ops = self.apply_optimize( + loss=loss, + params_grads=params_grads, + startup_program=startup_program, + ) return optimize_ops, params_grads def __str__(self): @@ -126,10 +139,12 @@ def extend_with_decoupled_weight_decay(base_optimizer): """ if not issubclass(base_optimizer, paddle.fluid.optimizer.Optimizer): raise TypeError( - "The input(base_optimizer) should be a derived class of Optimizer.") + "The input(base_optimizer) should be a derived class of Optimizer." + ) - class OptimizerWithDecoupledWeightDecay(DecoupledWeightDecay, - base_optimizer): + class OptimizerWithDecoupledWeightDecay( + DecoupledWeightDecay, base_optimizer + ): """ OptimizerWithDecoupledWeightDecay is used to update the optimized parameters with the parameters before optimization. For more information, please refer: @@ -145,7 +160,8 @@ def extend_with_decoupled_weight_decay(base_optimizer): """ def __init__(self, weight_decay, apply_decay_param_fun=None, **kwargs): - super(OptimizerWithDecoupledWeightDecay, - self).__init__(weight_decay, apply_decay_param_fun, **kwargs) + super(OptimizerWithDecoupledWeightDecay, self).__init__( + weight_decay, apply_decay_param_fun, **kwargs + ) return OptimizerWithDecoupledWeightDecay diff --git a/python/paddle/fluid/contrib/layers/metric_op.py b/python/paddle/fluid/contrib/layers/metric_op.py index 82d1a145f5b231905074ccff5d67279be2a64b9c..414fcf5b6cd51c757edaf7213faf0c38008ca107 100755 --- a/python/paddle/fluid/contrib/layers/metric_op.py +++ b/python/paddle/fluid/contrib/layers/metric_op.py @@ -78,171 +78,195 @@ def ctr_metric_bundle(input, label, ins_tag_weight=None): """ if ins_tag_weight is None: - ins_tag_weight = tensor.fill_constant(shape=[1, 1], - dtype="float32", - value=1.0) + ins_tag_weight = tensor.fill_constant( + shape=[1, 1], dtype="float32", value=1.0 + ) assert input.shape == label.shape helper = LayerHelper("ctr_metric_bundle", **locals()) - local_abserr = helper.create_global_variable(persistable=True, - dtype='float32', - shape=[1]) - local_sqrerr = helper.create_global_variable(persistable=True, - dtype='float32', - shape=[1]) - local_prob = helper.create_global_variable(persistable=True, - dtype='float32', - shape=[1]) - local_q = helper.create_global_variable(persistable=True, - dtype='float32', - shape=[1]) - local_pos_num = helper.create_global_variable(persistable=True, - dtype='float32', - shape=[1]) - local_ins_num = helper.create_global_variable(persistable=True, - dtype='float32', - shape=[1]) - - tmp_res_elesub = helper.create_global_variable(persistable=False, - dtype='float32', - shape=[-1]) - tmp_res_sigmoid = helper.create_global_variable(persistable=False, - dtype='float32', - shape=[-1]) - tmp_ones = helper.create_global_variable(persistable=False, - dtype='float32', - shape=[-1]) - - batch_prob = helper.create_global_variable(persistable=False, - dtype='float32', - shape=[1]) - batch_abserr = helper.create_global_variable(persistable=False, - dtype='float32', - shape=[1]) - batch_sqrerr = helper.create_global_variable(persistable=False, - dtype='float32', - shape=[1]) - batch_q = helper.create_global_variable(persistable=False, - dtype='float32', - shape=[1]) - batch_pos_num = helper.create_global_variable(persistable=False, - dtype='float32', - shape=[1]) - batch_ins_num = helper.create_global_variable(persistable=False, - dtype='float32', - shape=[1]) + local_abserr = helper.create_global_variable( + persistable=True, dtype='float32', shape=[1] + ) + local_sqrerr = helper.create_global_variable( + persistable=True, dtype='float32', shape=[1] + ) + local_prob = helper.create_global_variable( + persistable=True, dtype='float32', shape=[1] + ) + local_q = helper.create_global_variable( + persistable=True, dtype='float32', shape=[1] + ) + local_pos_num = helper.create_global_variable( + persistable=True, dtype='float32', shape=[1] + ) + local_ins_num = helper.create_global_variable( + persistable=True, dtype='float32', shape=[1] + ) + + tmp_res_elesub = helper.create_global_variable( + persistable=False, dtype='float32', shape=[-1] + ) + tmp_res_sigmoid = helper.create_global_variable( + persistable=False, dtype='float32', shape=[-1] + ) + tmp_ones = helper.create_global_variable( + persistable=False, dtype='float32', shape=[-1] + ) + + batch_prob = helper.create_global_variable( + persistable=False, dtype='float32', shape=[1] + ) + batch_abserr = helper.create_global_variable( + persistable=False, dtype='float32', shape=[1] + ) + batch_sqrerr = helper.create_global_variable( + persistable=False, dtype='float32', shape=[1] + ) + batch_q = helper.create_global_variable( + persistable=False, dtype='float32', shape=[1] + ) + batch_pos_num = helper.create_global_variable( + persistable=False, dtype='float32', shape=[1] + ) + batch_ins_num = helper.create_global_variable( + persistable=False, dtype='float32', shape=[1] + ) for var in [ - local_abserr, batch_abserr, local_sqrerr, batch_sqrerr, local_prob, - batch_prob, local_q, batch_q, batch_pos_num, batch_ins_num, - local_pos_num, local_ins_num + local_abserr, + batch_abserr, + local_sqrerr, + batch_sqrerr, + local_prob, + batch_prob, + local_q, + batch_q, + batch_pos_num, + batch_ins_num, + local_pos_num, + local_ins_num, ]: - helper.set_variable_initializer(var, Constant(value=0.0, - force_cpu=True)) - - helper.append_op(type="elementwise_sub", - inputs={ - "X": [input], - "Y": [label] - }, - outputs={"Out": [tmp_res_elesub]}) - - helper.append_op(type="squared_l2_norm", - inputs={"X": [tmp_res_elesub]}, - outputs={"Out": [batch_sqrerr]}) - helper.append_op(type="elementwise_add", - inputs={ - "X": [batch_sqrerr], - "Y": [local_sqrerr] - }, - outputs={"Out": [local_sqrerr]}) - - helper.append_op(type="l1_norm", - inputs={"X": [tmp_res_elesub]}, - outputs={"Out": [batch_abserr]}) - helper.append_op(type="elementwise_add", - inputs={ - "X": [batch_abserr], - "Y": [local_abserr] - }, - outputs={"Out": [local_abserr]}) - - helper.append_op(type="reduce_sum", - inputs={"X": [input]}, - outputs={"Out": [batch_prob]}) - helper.append_op(type="elementwise_add", - inputs={ - "X": [batch_prob], - "Y": [local_prob] - }, - outputs={"Out": [local_prob]}) - helper.append_op(type="sigmoid", - inputs={"X": [input]}, - outputs={"Out": [tmp_res_sigmoid]}) - helper.append_op(type="reduce_sum", - inputs={"X": [tmp_res_sigmoid]}, - outputs={"Out": [batch_q]}) - - helper.append_op(type="reduce_sum", - inputs={"X": [label]}, - outputs={"Out": [batch_pos_num]}) - helper.append_op(type="elementwise_add", - inputs={ - "X": [batch_pos_num], - "Y": [local_pos_num] - }, - outputs={"Out": [local_pos_num]}) - - helper.append_op(type='fill_constant_batch_size_like', - inputs={"Input": label}, - outputs={'Out': [tmp_ones]}, - attrs={ - 'shape': [-1, 1], - 'dtype': tmp_ones.dtype, - 'value': float(1.0), - }) - helper.append_op(type="reduce_sum", - inputs={"X": [tmp_ones]}, - outputs={"Out": [batch_ins_num]}) - - #if data is fake, return 0 + helper.set_variable_initializer( + var, Constant(value=0.0, force_cpu=True) + ) + + helper.append_op( + type="elementwise_sub", + inputs={"X": [input], "Y": [label]}, + outputs={"Out": [tmp_res_elesub]}, + ) + + helper.append_op( + type="squared_l2_norm", + inputs={"X": [tmp_res_elesub]}, + outputs={"Out": [batch_sqrerr]}, + ) + helper.append_op( + type="elementwise_add", + inputs={"X": [batch_sqrerr], "Y": [local_sqrerr]}, + outputs={"Out": [local_sqrerr]}, + ) + + helper.append_op( + type="l1_norm", + inputs={"X": [tmp_res_elesub]}, + outputs={"Out": [batch_abserr]}, + ) + helper.append_op( + type="elementwise_add", + inputs={"X": [batch_abserr], "Y": [local_abserr]}, + outputs={"Out": [local_abserr]}, + ) + + helper.append_op( + type="reduce_sum", inputs={"X": [input]}, outputs={"Out": [batch_prob]} + ) + helper.append_op( + type="elementwise_add", + inputs={"X": [batch_prob], "Y": [local_prob]}, + outputs={"Out": [local_prob]}, + ) + helper.append_op( + type="sigmoid", + inputs={"X": [input]}, + outputs={"Out": [tmp_res_sigmoid]}, + ) + helper.append_op( + type="reduce_sum", + inputs={"X": [tmp_res_sigmoid]}, + outputs={"Out": [batch_q]}, + ) + + helper.append_op( + type="reduce_sum", + inputs={"X": [label]}, + outputs={"Out": [batch_pos_num]}, + ) + helper.append_op( + type="elementwise_add", + inputs={"X": [batch_pos_num], "Y": [local_pos_num]}, + outputs={"Out": [local_pos_num]}, + ) + + helper.append_op( + type='fill_constant_batch_size_like', + inputs={"Input": label}, + outputs={'Out': [tmp_ones]}, + attrs={ + 'shape': [-1, 1], + 'dtype': tmp_ones.dtype, + 'value': float(1.0), + }, + ) + helper.append_op( + type="reduce_sum", + inputs={"X": [tmp_ones]}, + outputs={"Out": [batch_ins_num]}, + ) + + # if data is fake, return 0 inputs_slice = {'Input': ins_tag_weight} attrs = {'axes': [0]} attrs['starts'] = [0] attrs['ends'] = [1] - helper.append_op(type="slice", - inputs=inputs_slice, - attrs=attrs, - outputs={"Out": ins_tag_weight}) + helper.append_op( + type="slice", + inputs=inputs_slice, + attrs=attrs, + outputs={"Out": ins_tag_weight}, + ) axis = helper.kwargs.get('axis', 0) - helper.append_op(type="elementwise_mul", - inputs={ - "X": [batch_ins_num], - "Y": [ins_tag_weight] - }, - outputs={"Out": [batch_ins_num]}, - attrs={'axis': axis}) - - helper.append_op(type="elementwise_add", - inputs={ - "X": [batch_ins_num], - "Y": [local_ins_num] - }, - outputs={"Out": [local_ins_num]}) - - helper.append_op(type="elementwise_mul", - inputs={ - "X": [batch_q], - "Y": [ins_tag_weight] - }, - outputs={"Out": [batch_q]}, - attrs={'axis': axis}) - helper.append_op(type="elementwise_add", - inputs={ - "X": [batch_q], - "Y": [local_q] - }, - outputs={"Out": [local_q]}) - - return local_sqrerr, local_abserr, local_prob, local_q, local_pos_num, local_ins_num + helper.append_op( + type="elementwise_mul", + inputs={"X": [batch_ins_num], "Y": [ins_tag_weight]}, + outputs={"Out": [batch_ins_num]}, + attrs={'axis': axis}, + ) + + helper.append_op( + type="elementwise_add", + inputs={"X": [batch_ins_num], "Y": [local_ins_num]}, + outputs={"Out": [local_ins_num]}, + ) + + helper.append_op( + type="elementwise_mul", + inputs={"X": [batch_q], "Y": [ins_tag_weight]}, + outputs={"Out": [batch_q]}, + attrs={'axis': axis}, + ) + helper.append_op( + type="elementwise_add", + inputs={"X": [batch_q], "Y": [local_q]}, + outputs={"Out": [local_q]}, + ) + + return ( + local_sqrerr, + local_abserr, + local_prob, + local_q, + local_pos_num, + local_ins_num, + ) diff --git a/python/paddle/fluid/contrib/layers/nn.py b/python/paddle/fluid/contrib/layers/nn.py index e09bdcb0d8fbd6cdac3017c2728f2a98e88a098b..08ab46dfac9ebcd26414d3c939c44d2403fec2ee 100644 --- a/python/paddle/fluid/contrib/layers/nn.py +++ b/python/paddle/fluid/contrib/layers/nn.py @@ -25,7 +25,12 @@ from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.layers import utils from ... import unique_name from paddle.fluid.initializer import Normal, Constant, NumpyArrayInitializer -from paddle.fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype +from paddle.fluid.data_feeder import ( + check_variable_and_dtype, + check_type, + check_dtype, + convert_dtype, +) from paddle.fluid import core from paddle.fluid.param_attr import ParamAttr @@ -36,21 +41,33 @@ import warnings from paddle import _C_ops, _legacy_C_ops __all__ = [ - 'fused_elemwise_activation', 'sequence_topk_avg_pooling', 'var_conv_2d', - 'match_matrix_tensor', 'tree_conv', 'fused_embedding_seq_pool', - 'multiclass_nms2', 'search_pyramid_hash', 'shuffle_batch', 'partial_concat', - 'sparse_embedding', 'partial_sum', 'tdm_child', 'rank_attention', - 'tdm_sampler', 'batch_fc', '_pull_box_extended_sparse', 'bilateral_slice', - 'correlation', 'fused_bn_add_act', 'fused_seqpool_cvm' + 'fused_elemwise_activation', + 'sequence_topk_avg_pooling', + 'var_conv_2d', + 'match_matrix_tensor', + 'tree_conv', + 'fused_embedding_seq_pool', + 'multiclass_nms2', + 'search_pyramid_hash', + 'shuffle_batch', + 'partial_concat', + 'sparse_embedding', + 'partial_sum', + 'tdm_child', + 'rank_attention', + 'tdm_sampler', + 'batch_fc', + '_pull_box_extended_sparse', + 'bilateral_slice', + 'correlation', + 'fused_bn_add_act', + 'fused_seqpool_cvm', ] -def fused_elemwise_activation(x, - y, - functor_list, - axis=-1, - scale=0.0, - save_intermediate_out=True): +def fused_elemwise_activation( + x, y, functor_list, axis=-1, scale=0.0, save_intermediate_out=True +): """ **Fused elementwise_add/mul and activation layers** @@ -89,104 +106,103 @@ def fused_elemwise_activation(x, if not isinstance(functor_list, list) or len(functor_list) != 2: raise ValueError( - 'functor_list should be a list of str, and the length should be 2.') + 'functor_list should be a list of str, and the length should be 2.' + ) helper = LayerHelper('fused_elemwise_activation', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) intermediate_out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='fused_elemwise_activation', - inputs={ - 'X': x, - 'Y': y - }, - outputs={ - 'Out': out, - 'IntermediateOut': intermediate_out - }, - attrs={ - 'axis': axis, - 'scale': scale, - 'save_intermediate_out': save_intermediate_out, - 'functor_list': functor_list - }) + helper.append_op( + type='fused_elemwise_activation', + inputs={'X': x, 'Y': y}, + outputs={'Out': out, 'IntermediateOut': intermediate_out}, + attrs={ + 'axis': axis, + 'scale': scale, + 'save_intermediate_out': save_intermediate_out, + 'functor_list': functor_list, + }, + ) return out -def var_conv_2d(input, - row, - col, - input_channel, - output_channel, - filter_size, - stride=1, - param_attr=None, - act=None, - dtype='float32', - name=None): +def var_conv_2d( + input, + row, + col, + input_channel, + output_channel, + filter_size, + stride=1, + param_attr=None, + act=None, + dtype='float32', + name=None, +): r""" - The var_conv_2d layer calculates the output base on the :attr:`input` with variable length, - row, col, input channel, filter size and strides. Both :attr:`input`, :attr:`row`, - and :attr:`col` are 1-level LodTensor. The convolution operation is same as conv2d layer with - padding. Besides, input.dims[1] should be 1. - - .. code-block:: text - - If input_channel is 2 and given row lodTensor and col lodTensor as follows: - row.lod = [[5, 4]] - col.lod = [[6, 7]] - input is a lodTensor: - input.lod = [[60, 56]] # where 60 = input_channel * 5 * 6 - input.dims = [116, 1] # where 116 = 60 + 56 - - If set output_channel is 3, filter_size is [3, 3], stride is [1, 1]: - # where 90 = output_channel * [(5-1)/stride + 1] * [(6-1)/stride + 1] - output.lod = [[90, 84]] - output.dims = [174, 1] # where 174 = 90 + 84 - - Args: - input (Variable): The input should be 1-level LodTensor with dims[1] equals 1. - row (Variable): The row should be 1-level LodTensor to provide height information. - col (Variable): The col should be 1-level LodTensor to provide width information. - input_channel (int): The number of input channel. - output_channel (int): The number of output channel. - filter_size (int|tuple|None): The filter size. If filter_size is a tuple, - it must contain two integers, (filter_size_H, filter_size_W). - Otherwise, the filter will be a square. - stride (int|tuple): The stride size. If stride is a tuple, it must - contain two integers, (stride_H, stride_W). Otherwise, the - stride_H = stride_W = stride. Default: stride = 1. - param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights - of var_conv2d. If it is set to None or one attribute of ParamAttr, var_conv2d - will create ParamAttr as param_attr. If the Initializer of the param_attr - is not set, the parameter is initialized with :math:`Normal(0.0, std)`, - and the :math:`std` is :math:`(\\frac{2.0 }{filter\_elem\_num})^{ - 0.5}`. Default: None. - act (str): Activation type, if it is set to None, activation is not appended. - Default: None - dtype ('float32'): The data type of parameter and output. - name (str|None): A name for this layer(optional). If set None, the layer - will be named automatically. Default: None - - Returns: - Variable: Output variable with LoD specified by this layer. - - Examples: - .. code-block:: python - - import numpy as np - from paddle.fluid import layers - from paddle.fluid import contrib - - x_lod_tensor = layers.data(name='x', shape=[1], lod_level=1) - row_lod_tensor = layers.data(name='row', shape=[6], lod_level=1) - col_lod_tensor = layers.data(name='col', shape=[6], lod_level=1) - out = contrib.var_conv_2d(input=x_lod_tensor, - row=row_lod_tensor, - col=col_lod_tensor, - input_channel=3, - output_channel=5, - filter_size=[3, 3], - stride=1) + The var_conv_2d layer calculates the output base on the :attr:`input` with variable length, + row, col, input channel, filter size and strides. Both :attr:`input`, :attr:`row`, + and :attr:`col` are 1-level LodTensor. The convolution operation is same as conv2d layer with + padding. Besides, input.dims[1] should be 1. + + .. code-block:: text + + If input_channel is 2 and given row lodTensor and col lodTensor as follows: + row.lod = [[5, 4]] + col.lod = [[6, 7]] + input is a lodTensor: + input.lod = [[60, 56]] # where 60 = input_channel * 5 * 6 + input.dims = [116, 1] # where 116 = 60 + 56 + + If set output_channel is 3, filter_size is [3, 3], stride is [1, 1]: + # where 90 = output_channel * [(5-1)/stride + 1] * [(6-1)/stride + 1] + output.lod = [[90, 84]] + output.dims = [174, 1] # where 174 = 90 + 84 + + Args: + input (Variable): The input should be 1-level LodTensor with dims[1] equals 1. + row (Variable): The row should be 1-level LodTensor to provide height information. + col (Variable): The col should be 1-level LodTensor to provide width information. + input_channel (int): The number of input channel. + output_channel (int): The number of output channel. + filter_size (int|tuple|None): The filter size. If filter_size is a tuple, + it must contain two integers, (filter_size_H, filter_size_W). + Otherwise, the filter will be a square. + stride (int|tuple): The stride size. If stride is a tuple, it must + contain two integers, (stride_H, stride_W). Otherwise, the + stride_H = stride_W = stride. Default: stride = 1. + param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights + of var_conv2d. If it is set to None or one attribute of ParamAttr, var_conv2d + will create ParamAttr as param_attr. If the Initializer of the param_attr + is not set, the parameter is initialized with :math:`Normal(0.0, std)`, + and the :math:`std` is :math:`(\\frac{2.0 }{filter\_elem\_num})^{ + 0.5}`. Default: None. + act (str): Activation type, if it is set to None, activation is not appended. + Default: None + dtype ('float32'): The data type of parameter and output. + name (str|None): A name for this layer(optional). If set None, the layer + will be named automatically. Default: None + + Returns: + Variable: Output variable with LoD specified by this layer. + + Examples: + .. code-block:: python + + import numpy as np + from paddle.fluid import layers + from paddle.fluid import contrib + + x_lod_tensor = layers.data(name='x', shape=[1], lod_level=1) + row_lod_tensor = layers.data(name='row', shape=[6], lod_level=1) + col_lod_tensor = layers.data(name='col', shape=[6], lod_level=1) + out = contrib.var_conv_2d(input=x_lod_tensor, + row=row_lod_tensor, + col=col_lod_tensor, + input_channel=3, + output_channel=5, + filter_size=[3, 3], + stride=1) """ helper = LayerHelper('var_conv_2d', **locals()) x_shape = list(input.shape) @@ -197,7 +213,7 @@ def var_conv_2d(input, filter_shape = [ int(output_channel), - int(input_channel) * filter_size[0] * filter_size[1] + int(input_channel) * filter_size[0] * filter_size[1], ] filter_param = helper.create_parameter( attr=helper.param_attr, @@ -206,39 +222,35 @@ def var_conv_2d(input, ) conv_res = helper.create_variable_for_type_inference(dtype) - tmp_res = helper.create_variable_for_type_inference(dtype, - stop_gradient=True) - - helper.append_op(type='var_conv_2d', - inputs={ - 'X': input, - 'ROW': row, - 'COLUMN': col, - 'W': filter_param, - }, - outputs={ - "Out": conv_res, - "Col": tmp_res - }, - attrs={ - 'InputChannel': input_channel, - 'OutputChannel': output_channel, - 'StrideH': stride[0], - 'StrideW': stride[1], - 'KernelH': filter_size[0], - 'KernelW': filter_size[1], - }) + tmp_res = helper.create_variable_for_type_inference( + dtype, stop_gradient=True + ) + + helper.append_op( + type='var_conv_2d', + inputs={ + 'X': input, + 'ROW': row, + 'COLUMN': col, + 'W': filter_param, + }, + outputs={"Out": conv_res, "Col": tmp_res}, + attrs={ + 'InputChannel': input_channel, + 'OutputChannel': output_channel, + 'StrideH': stride[0], + 'StrideW': stride[1], + 'KernelH': filter_size[0], + 'KernelW': filter_size[1], + }, + ) return helper.append_activation(conv_res) -def match_matrix_tensor(x, - y, - channel_num, - act=None, - param_attr=None, - dtype='float32', - name=None): +def match_matrix_tensor( + x, y, channel_num, act=None, param_attr=None, dtype='float32', name=None +): """ Calculate the semantic matching matrix of two word sequences with variable length. Given a query A of length `n` and a title B of length `m`, the input shape are respectively @@ -296,28 +308,28 @@ def match_matrix_tensor(x, x_shape = list(x.shape) y_shape = list(y.shape) - assert len(x_shape) == 2 and len( - y_shape) == 2 and x_shape[-1] == y_shape[-1] + assert ( + len(x_shape) == 2 and len(y_shape) == 2 and x_shape[-1] == y_shape[-1] + ) weight_shape = [x_shape[-1], channel_num, y_shape[-1]] - w = helper.create_parameter(attr=helper.param_attr, - shape=weight_shape, - dtype=dtype, - is_bias=False) + w = helper.create_parameter( + attr=helper.param_attr, shape=weight_shape, dtype=dtype, is_bias=False + ) mm_res = helper.create_variable_for_type_inference(dtype) - tmp_res = helper.create_variable_for_type_inference(dtype, - stop_gradient=True) - helper.append_op(type='match_matrix_tensor', - inputs={ - 'X': x, - 'Y': y, - 'W': w, - }, - outputs={ - "Out": mm_res, - "Tmp": tmp_res - }, - attrs={'dim_t': channel_num}) + tmp_res = helper.create_variable_for_type_inference( + dtype, stop_gradient=True + ) + helper.append_op( + type='match_matrix_tensor', + inputs={ + 'X': x, + 'Y': y, + 'W': w, + }, + outputs={"Out": mm_res, "Tmp": tmp_res}, + attrs={'dim_t': channel_num}, + ) return helper.append_activation(mm_res), tmp_res @@ -375,72 +387,67 @@ def sequence_topk_avg_pooling(input, row, col, topks, channel_num): """ helper = LayerHelper('sequence_topk_avg_pooling', **locals()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) - pos = helper.create_variable_for_type_inference(dtype=helper.input_dtype(), - stop_gradient=True) - helper.append_op(type='sequence_topk_avg_pooling', - inputs={ - 'X': input, - 'ROW': row, - 'COLUMN': col - }, - outputs={ - 'Out': out, - 'pos': pos - }, - attrs={ - 'topks': topks, - 'channel_num': channel_num - }) + pos = helper.create_variable_for_type_inference( + dtype=helper.input_dtype(), stop_gradient=True + ) + helper.append_op( + type='sequence_topk_avg_pooling', + inputs={'X': input, 'ROW': row, 'COLUMN': col}, + outputs={'Out': out, 'pos': pos}, + attrs={'topks': topks, 'channel_num': channel_num}, + ) return out -def tree_conv(nodes_vector, - edge_set, - output_size, - num_filters=1, - max_depth=2, - act='tanh', - param_attr=None, - bias_attr=None, - name=None): +def tree_conv( + nodes_vector, + edge_set, + output_size, + num_filters=1, + max_depth=2, + act='tanh', + param_attr=None, + bias_attr=None, + name=None, +): """ - ${comment} -Args : nodes_vector(${nodes_vector_type}) : $ { nodes_vector_comment } -edge_set(${edge_set_type}) : $ { edge_set_comment } - output_size(int): output feature width - num_filters(int): number of filters, Default 1 - max_depth(int): max depth of filters, Default 2 - act(str): activation function, Default tanh - param_attr(ParamAttr): the parameter attribute for the filters, Default None - bias_attr(ParamAttr): the parameter attribute for the bias of this layer, Default None - name(str): a name of this layer(optional). If set None, the layer will be named automatically, Default None - - Returns: - out(${out_type}): ${ - out_comment - } - - Examples: - .. code-block:: python - - import paddle.fluid as fluid + ${comment} + Args : nodes_vector(${nodes_vector_type}) : $ { nodes_vector_comment } + edge_set(${edge_set_type}) : $ { edge_set_comment } + output_size(int): output feature width + num_filters(int): number of filters, Default 1 + max_depth(int): max depth of filters, Default 2 + act(str): activation function, Default tanh + param_attr(ParamAttr): the parameter attribute for the filters, Default None + bias_attr(ParamAttr): the parameter attribute for the bias of this layer, Default None + name(str): a name of this layer(optional). If set None, the layer will be named automatically, Default None + + Returns: + out(${out_type}): ${ + out_comment + } + + Examples: + .. code-block:: python - # 10 for max_node_size of dataset, 5 for vector width - nodes_vector = fluid.layers.data( - name='vectors', shape=[10, 5], dtype='float32') - # 10 for max_node_size of dataset, 2 for every edge has two nodes - # edges must be directional - edge_set = fluid.layers.data(name='edge_set', shape=[ - 10, 2], dtype='float32') - # the shape of output will be [10, 6, 1], - # 10 for max_node_size of dataset, 6 for output size, 1 for 1 filter - out_vector = fluid.layers.tree_conv(nodes_vector, edge_set, 6, 1, 2) -#After reshape, output tensor could be nodes_vector for next tree convolution - out_vector = fluid.layers.reshape(out_vector, shape=[-1, 10, 6]) - out_vector_2 = fluid.layers.tree_conv(out_vector, edge_set, 3, 4, 2) -#also output tensor could be pooling(the pooling in paper called global pooling) - pooled = fluid.layers.reduce_max(out_vector, dim=2) # global pooling + import paddle.fluid as fluid + + # 10 for max_node_size of dataset, 5 for vector width + nodes_vector = fluid.layers.data( + name='vectors', shape=[10, 5], dtype='float32') + # 10 for max_node_size of dataset, 2 for every edge has two nodes + # edges must be directional + edge_set = fluid.layers.data(name='edge_set', shape=[ + 10, 2], dtype='float32') + # the shape of output will be [10, 6, 1], + # 10 for max_node_size of dataset, 6 for output size, 1 for 1 filter + out_vector = fluid.layers.tree_conv(nodes_vector, edge_set, 6, 1, 2) + #After reshape, output tensor could be nodes_vector for next tree convolution + out_vector = fluid.layers.reshape(out_vector, shape=[-1, 10, 6]) + out_vector_2 = fluid.layers.tree_conv(out_vector, edge_set, 3, 4, 2) + #also output tensor could be pooling(the pooling in paper called global pooling) + pooled = fluid.layers.reduce_max(out_vector, dim=2) # global pooling """ check_type(nodes_vector, 'nodes_vector', (Variable), 'tree_conv') check_type(edge_set, 'edge_set', (Variable), 'tree_conv') @@ -449,21 +456,18 @@ edge_set(${edge_set_type}) : $ { edge_set_comment } dtype = helper.input_dtype('nodes_vector') feature_size = nodes_vector.shape[2] W_shape = [feature_size, 3, output_size, num_filters] - W = helper.create_parameter(attr=param_attr, - shape=W_shape, - dtype=dtype, - is_bias=False) + W = helper.create_parameter( + attr=param_attr, shape=W_shape, dtype=dtype, is_bias=False + ) out = helper.create_variable_for_type_inference(dtype=dtype) - helper.append_op(type='tree_conv', - inputs={ - 'NodesVector': nodes_vector, - 'EdgeSet': edge_set, - 'Filter': W - }, - outputs={ - 'Out': out, - }, - attrs={'max_depth': max_depth}) + helper.append_op( + type='tree_conv', + inputs={'NodesVector': nodes_vector, 'EdgeSet': edge_set, 'Filter': W}, + outputs={ + 'Out': out, + }, + attrs={'max_depth': max_depth}, + ) if helper.bias_attr: pre_activation = helper.append_bias_op(out) else: @@ -471,13 +475,15 @@ edge_set(${edge_set_type}) : $ { edge_set_comment } return helper.append_activation(pre_activation) -def fused_embedding_seq_pool(input, - size, - is_sparse=False, - padding_idx=None, - combiner='sum', - param_attr=None, - dtype='float32'): +def fused_embedding_seq_pool( + input, + size, + is_sparse=False, + padding_idx=None, + combiner='sum', + param_attr=None, + dtype='float32', +): r""" **Embedding Sequence pool** @@ -520,33 +526,33 @@ def fused_embedding_seq_pool(input, is_sparse=False) """ helper = LayerHelper('fused_embedding_seq_pool', **locals()) - w = helper.create_parameter(attr=helper.param_attr, - shape=size, - dtype=dtype, - is_bias=False) + w = helper.create_parameter( + attr=helper.param_attr, shape=size, dtype=dtype, is_bias=False + ) out = helper.create_variable_for_type_inference(dtype) - padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else ( - size[0] + padding_idx) - helper.append_op(type='fused_embedding_seq_pool', - inputs={ - 'Ids': input, - 'W': w - }, - outputs={'Out': out}, - attrs={ - 'is_sparse': is_sparse, - 'combiner': combiner, - 'padding_idx': padding_idx - }) + padding_idx = ( + -1 + if padding_idx is None + else padding_idx + if padding_idx >= 0 + else (size[0] + padding_idx) + ) + helper.append_op( + type='fused_embedding_seq_pool', + inputs={'Ids': input, 'W': w}, + outputs={'Out': out}, + attrs={ + 'is_sparse': is_sparse, + 'combiner': combiner, + 'padding_idx': padding_idx, + }, + ) return out -def fused_seqpool_cvm(input, - pool_type, - cvm, - pad_value=0.0, - use_cvm=True, - cvm_offset=2): +def fused_seqpool_cvm( + input, pool_type, cvm, pad_value=0.0, use_cvm=True, cvm_offset=2 +): """ :api_attr: Static Graph @@ -592,13 +598,15 @@ def fused_seqpool_cvm(input, if pool_type.upper() != 'SUM': raise ValueError( "fused_seqpool_cvm only support SUM pooling now, and your type is: " - + pool_type) + + pool_type + ) check_type(input, 'input', list, 'fused_seqpool_cvm') if isinstance(input, list): for _input in input: - check_variable_and_dtype(_input, 'input', ['float32'], - 'fused_seqpool_cvm') + check_variable_and_dtype( + _input, 'input', ['float32'], 'fused_seqpool_cvm' + ) dtype = helper.input_dtype() inputs = helper.multiple_input() @@ -607,33 +615,34 @@ def fused_seqpool_cvm(input, for i in range(len(inputs)) ] - helper.append_op(type="fused_seqpool_cvm", - inputs={ - "X": inputs, - "CVM": cvm - }, - outputs={"Out": outs}, - attrs={ - "pooltype": pool_type.upper(), - "pad_value": pad_value, - "use_cvm": use_cvm, - "cvm_offset": cvm_offset, - }) + helper.append_op( + type="fused_seqpool_cvm", + inputs={"X": inputs, "CVM": cvm}, + outputs={"Out": outs}, + attrs={ + "pooltype": pool_type.upper(), + "pad_value": pad_value, + "use_cvm": use_cvm, + "cvm_offset": cvm_offset, + }, + ) return outs -def multiclass_nms2(bboxes, - scores, - score_threshold, - nms_top_k, - keep_top_k, - nms_threshold=0.3, - normalized=True, - nms_eta=1., - background_label=0, - return_index=False, - name=None): +def multiclass_nms2( + bboxes, + scores, + score_threshold, + nms_top_k, + keep_top_k, + nms_threshold=0.3, + normalized=True, + nms_eta=1.0, + background_label=0, + return_index=False, + name=None, +): """ **Multiclass NMS2** @@ -729,24 +738,20 @@ def multiclass_nms2(bboxes, output = helper.create_variable_for_type_inference(dtype=bboxes.dtype) index = helper.create_variable_for_type_inference(dtype='int') - helper.append_op(type="multiclass_nms2", - inputs={ - 'BBoxes': bboxes, - 'Scores': scores - }, - attrs={ - 'background_label': background_label, - 'score_threshold': score_threshold, - 'nms_top_k': nms_top_k, - 'nms_threshold': nms_threshold, - 'keep_top_k': keep_top_k, - 'nms_eta': nms_eta, - 'normalized': normalized - }, - outputs={ - 'Out': output, - 'Index': index - }) + helper.append_op( + type="multiclass_nms2", + inputs={'BBoxes': bboxes, 'Scores': scores}, + attrs={ + 'background_label': background_label, + 'score_threshold': score_threshold, + 'nms_top_k': nms_top_k, + 'nms_threshold': nms_threshold, + 'keep_top_k': keep_top_k, + 'nms_eta': nms_eta, + 'normalized': normalized, + }, + outputs={'Out': output, 'Index': index}, + ) output.stop_gradient = True index.stop_gradient = True @@ -755,24 +760,26 @@ def multiclass_nms2(bboxes, return output -def search_pyramid_hash(input, - num_emb, - space_len, - pyramid_layer, - rand_len, - drop_out_percent, - is_training, - use_filter, - white_list_len, - black_list_len, - seed, - lr, - param_attr=None, - param_attr_wl=None, - param_attr_bl=None, - name=None, - distribute_update_vars=None, - dtype='float32'): +def search_pyramid_hash( + input, + num_emb, + space_len, + pyramid_layer, + rand_len, + drop_out_percent, + is_training, + use_filter, + white_list_len, + black_list_len, + seed, + lr, + param_attr=None, + param_attr_wl=None, + param_attr_bl=None, + name=None, + distribute_update_vars=None, + dtype='float32', +): """ **Pyramid hash embedding** @@ -809,28 +816,25 @@ def search_pyramid_hash(input, helper = LayerHelper('search_pyramid_hash', **locals()) w_shape = [space_len + rand_len, 1] - w = helper.create_parameter(attr=param_attr, - shape=w_shape, - dtype=dtype, - is_bias=False) + w = helper.create_parameter( + attr=param_attr, shape=w_shape, dtype=dtype, is_bias=False + ) w.stop_gradient = True input_vars = {'X': input, 'W': w} if white_list_len > 0: wl_shape = [white_list_len, 1] - white_list = helper.create_parameter(attr=param_attr_wl, - shape=wl_shape, - dtype=dtype, - is_bias=False) + white_list = helper.create_parameter( + attr=param_attr_wl, shape=wl_shape, dtype=dtype, is_bias=False + ) white_list.stop_gradient = True input_vars['WhiteList'] = white_list if black_list_len >= 0: bl_shape = [black_list_len, 1] - black_list = helper.create_parameter(attr=param_attr_bl, - shape=bl_shape, - dtype=dtype, - is_bias=False) + black_list = helper.create_parameter( + attr=param_attr_bl, shape=bl_shape, dtype=dtype, is_bias=False + ) black_list.stop_gradient = True input_vars['BlackList'] = black_list @@ -847,33 +851,32 @@ def search_pyramid_hash(input, for param in distribute_update_vars: if param not in special_name_list: raise ValueError( - "Pyramid Hash layer didn't have parameter {}".format(param)) + "Pyramid Hash layer didn't have parameter {}".format(param) + ) distribute_update_vars_str = ",".join(distribute_update_vars) res = helper.create_variable_for_type_inference(dtype) drop_pos = helper.create_variable_for_type_inference(dtype) x_temp_out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='pyramid_hash', - inputs=input_vars, - outputs={ - "Out": res, - "X_Temp_Out": x_temp_out, - 'DropPos': drop_pos - }, - attrs={ - 'num_emb': num_emb, - 'space_len': space_len, - 'pyramid_layer': pyramid_layer, - 'rand_len': rand_len, - 'drop_out_percent': drop_out_percent, - 'is_training': is_training, - 'use_filter': use_filter, - 'white_list_len': white_list_len, - 'black_list_len': black_list_len, - 'seed': seed, - 'lr': lr, - 'distribute_update_vars': distribute_update_vars_str - }) + helper.append_op( + type='pyramid_hash', + inputs=input_vars, + outputs={"Out": res, "X_Temp_Out": x_temp_out, 'DropPos': drop_pos}, + attrs={ + 'num_emb': num_emb, + 'space_len': space_len, + 'pyramid_layer': pyramid_layer, + 'rand_len': rand_len, + 'drop_out_percent': drop_out_percent, + 'is_training': is_training, + 'use_filter': use_filter, + 'white_list_len': white_list_len, + 'black_list_len': black_list_len, + 'seed': seed, + 'lr': lr, + 'distribute_update_vars': distribute_update_vars_str, + }, + ) return res @@ -930,18 +933,14 @@ def shuffle_batch(x, seed=None): seed = helper.create_variable( name=unique_name.generate("shuffle_batch_seed"), dtype="int64", - persistable=False) - helper.append_op(type='shuffle_batch', - inputs={ - 'X': x, - 'Seed': seed - }, - outputs={ - 'Out': out, - 'ShuffleIdx': shuffle_idx, - 'SeedOut': seed - }, - attrs=op_attrs) + persistable=False, + ) + helper.append_op( + type='shuffle_batch', + inputs={'X': x, 'Seed': seed}, + outputs={'Out': out, 'ShuffleIdx': shuffle_idx, 'SeedOut': seed}, + attrs=op_attrs, + ) return out @@ -987,23 +986,28 @@ def partial_concat(input, start_index=0, length=-1): if not isinstance(input, list): warnings.warn( "The type of input in partial_concat should be list, but received %s." - % (type(input))) + % (type(input)) + ) input = [input] for id, x in enumerate(input): check_variable_and_dtype( - x, 'input[' + str(id) + ']', + x, + 'input[' + str(id) + ']', ['float16', 'float32', 'float64', 'int32', 'int64'], - 'partial_concat') + 'partial_concat', + ) check_type(start_index, 'start_index', (int), 'partial_concat') check_type(length, 'length', (int), 'partial_concat') inputs = {'X': input} attrs = {'start_index': start_index, 'length': length} helper = LayerHelper('partial_concat', **locals()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) - helper.append_op(type='partial_concat', - inputs=inputs, - outputs={'Out': [out]}, - attrs=attrs) + helper.append_op( + type='partial_concat', + inputs=inputs, + outputs={'Out': [out]}, + attrs=attrs, + ) return out @@ -1046,9 +1050,12 @@ def partial_sum(input, start_index=0, length=-1): out = exe.run(feed={"x":xx, "y":yy}, fetch_list=[sum]) """ for id, x in enumerate(input): - check_variable_and_dtype(x, 'input[' + str(id) + ']', - ['float32', 'float64', 'int32', 'int64'], - 'partial_sum') + check_variable_and_dtype( + x, + 'input[' + str(id) + ']', + ['float32', 'float64', 'int32', 'int64'], + 'partial_sum', + ) inputs = {'X': input} attrs = {} @@ -1056,22 +1063,23 @@ def partial_sum(input, start_index=0, length=-1): attrs['length'] = length helper = LayerHelper('partial_sum', **locals()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) - helper.append_op(type='partial_sum', - inputs=inputs, - outputs={'Out': [out]}, - attrs=attrs) + helper.append_op( + type='partial_sum', inputs=inputs, outputs={'Out': [out]}, attrs=attrs + ) return out -def sparse_embedding(input, - size, - padding_idx=None, - is_test=False, - entry=None, - table_class="MemorySparseTable", - param_attr=None, - dtype='float32', - slot=None): +def sparse_embedding( + input, + size, + padding_idx=None, + is_test=False, + entry=None, + table_class="MemorySparseTable", + param_attr=None, + dtype='float32', + slot=None, +): r""" :api_attr: Static Graph @@ -1183,25 +1191,39 @@ def sparse_embedding(input, helper = LayerHelper('sparse_embedding', **locals()) - check_variable_and_dtype(input, 'input', ['int64'], - 'fluid.contrib.layers.sparse_embedding') + check_variable_and_dtype( + input, 'input', ['int64'], 'fluid.contrib.layers.sparse_embedding' + ) - check_dtype(dtype, 'dtype', ['float32', 'float64'], - 'paddle.static.nn.sparse_embedding') + check_dtype( + dtype, + 'dtype', + ['float32', 'float64'], + 'paddle.static.nn.sparse_embedding', + ) - w = helper.create_parameter(attr=helper.param_attr, - shape=size, - type=core.VarDesc.VarType.SELECTED_ROWS, - dtype=dtype, - is_bias=False) + w = helper.create_parameter( + attr=helper.param_attr, + shape=size, + type=core.VarDesc.VarType.SELECTED_ROWS, + dtype=dtype, + is_bias=False, + ) tmp = helper.create_variable_for_type_inference(dtype) - padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else ( - size[0] + padding_idx) + padding_idx = ( + -1 + if padding_idx is None + else padding_idx + if padding_idx >= 0 + else (size[0] + padding_idx) + ) if table_class not in [ - "CommonSparseTable", "SSDSparseTable", "MemorySparseTable" + "CommonSparseTable", + "SSDSparseTable", + "MemorySparseTable", ]: raise ValueError( "table_class must be in [CommonSparseTable, SSDSparseTable, MemorySparseTable]" @@ -1211,7 +1233,9 @@ def sparse_embedding(input, if entry is not None: if entry.__class__.__name__ not in [ - "ProbabilityEntry", "CountFilterEntry", "ShowClickEntry" + "ProbabilityEntry", + "CountFilterEntry", + "ShowClickEntry", ]: raise ValueError( "entry must be instance in [paddle.distributed.ProbabilityEntry, paddle.distributed.CountFilterEntry, paddle.distributed.ShowClickEntry]" @@ -1221,22 +1245,21 @@ def sparse_embedding(input, if slot == None: slot = 0 - helper.append_op(type='lookup_table', - inputs={ - 'Ids': input, - 'W': w - }, - outputs={'Out': tmp}, - attrs={ - 'padding_idx': padding_idx, - 'is_sparse': True, - 'is_distributed': True, - 'remote_prefetch': True, - 'is_test': is_test, - 'entry': entry_str, - 'table_class': table_class, - 'slot': slot - }) + helper.append_op( + type='lookup_table', + inputs={'Ids': input, 'W': w}, + outputs={'Out': tmp}, + attrs={ + 'padding_idx': padding_idx, + 'is_sparse': True, + 'is_distributed': True, + 'remote_prefetch': True, + 'is_test': is_test, + 'entry': entry_str, + 'table_class': table_class, + 'slot': slot, + }, + ) return tmp @@ -1298,48 +1321,46 @@ def tdm_child(x, node_nums, child_nums, param_attr=None, dtype='int32'): exe.run(fluid.default_startup_program()) xx = np.array([[2],[3]]).reshape((2,1)).astype("int32") child_res, leaf_mask_res = exe.run(feed={"x":xx}, fetch_list=[child, leaf_mask]) - """ + """ helper = LayerHelper("tdm_child", **locals()) - check_dtype(dtype, 'dtype', ['int32', 'int64'], - 'fluid.contrib.layers.tdm_child') + check_dtype( + dtype, 'dtype', ['int32', 'int64'], 'fluid.contrib.layers.tdm_child' + ) c_dtype = convert_np_dtype_to_dtype_(dtype) - tree_info = helper.create_parameter(attr=helper.param_attr, - shape=[node_nums, 3 + child_nums], - dtype=dtype, - default_initializer=Constant(0)) + tree_info = helper.create_parameter( + attr=helper.param_attr, + shape=[node_nums, 3 + child_nums], + dtype=dtype, + default_initializer=Constant(0), + ) tree_info.stop_gradient = True child = helper.create_variable_for_type_inference(dtype=dtype) leaf_mask = helper.create_variable_for_type_inference(dtype=dtype) - helper.append_op(type='tdm_child', - inputs={ - 'X': x, - 'TreeInfo': tree_info - }, - outputs={ - 'Child': child, - 'LeafMask': leaf_mask - }, - attrs={ - 'child_nums': child_nums, - 'dtype': c_dtype - }, - stop_gradient=True) + helper.append_op( + type='tdm_child', + inputs={'X': x, 'TreeInfo': tree_info}, + outputs={'Child': child, 'LeafMask': leaf_mask}, + attrs={'child_nums': child_nums, 'dtype': c_dtype}, + stop_gradient=True, + ) return (child, leaf_mask) -def tdm_sampler(x, - neg_samples_num_list, - layer_node_num_list, - leaf_node_num, - tree_travel_attr=None, - tree_layer_attr=None, - output_positive=True, - output_list=True, - seed=0, - tree_dtype='int32', - dtype='int32'): +def tdm_sampler( + x, + neg_samples_num_list, + layer_node_num_list, + leaf_node_num, + tree_travel_attr=None, + tree_layer_attr=None, + output_positive=True, + output_list=True, + seed=0, + tree_dtype='int32', + dtype='int32', +): """ **Tdm Sampler** According to the input positive samples at leaf node(x), do negative sampling layer by layer on the given tree. @@ -1426,18 +1447,25 @@ def tdm_sampler(x, """ helper = LayerHelper("tdm_sampler", **locals()) - check_dtype(tree_dtype, 'tree_dtype', ['int32', 'int64'], - 'fluid.contrib.layers.tdm_sampler') - check_dtype(dtype, 'dtype', ['int32', 'int64'], - 'fluid.contrib.layers.tdm_sampler') + check_dtype( + tree_dtype, + 'tree_dtype', + ['int32', 'int64'], + 'fluid.contrib.layers.tdm_sampler', + ) + check_dtype( + dtype, 'dtype', ['int32', 'int64'], 'fluid.contrib.layers.tdm_sampler' + ) c_dtype = convert_np_dtype_to_dtype_(dtype) if len(neg_samples_num_list) != len(layer_node_num_list): raise ValueError( "The shape of negative samples list must match the shape of layers. " "But received len of neg_samples_num_list: {}," - "and len of layer_node_num_list: {}, please check your input.". - format(len(neg_samples_num_list), len(layer_node_num_list))) + "and len of layer_node_num_list: {}, please check your input.".format( + len(neg_samples_num_list), len(layer_node_num_list) + ) + ) assert leaf_node_num is not None, "leaf_node_num should not be None here." layer_nums = 0 @@ -1452,21 +1480,31 @@ def tdm_sampler(x, "The number of negative samples must be less than the number of nodes " "in the layer {}, But received negative nums {}, and num of node at layer {} " "is {}, please check your input.".format( - layer_idx, neg_samples_num_list[layer_idx], layer_idx, - layer_node_num_list[layer_idx])) - assert leaf_node_num < node_nums, "leaf_node_num must be less than total node nums." + layer_idx, + neg_samples_num_list[layer_idx], + layer_idx, + layer_node_num_list[layer_idx], + ) + ) + assert ( + leaf_node_num < node_nums + ), "leaf_node_num must be less than total node nums." travel_shape = [leaf_node_num, layer_nums] - travel = helper.create_parameter(attr=tree_travel_attr, - shape=travel_shape, - dtype=tree_dtype, - default_initializer=Constant(0)) + travel = helper.create_parameter( + attr=tree_travel_attr, + shape=travel_shape, + dtype=tree_dtype, + default_initializer=Constant(0), + ) layer_shape = [node_nums, 1] - layer = helper.create_parameter(attr=tree_layer_attr, - shape=layer_shape, - dtype=tree_dtype, - default_initializer=Constant(0)) + layer = helper.create_parameter( + attr=tree_layer_attr, + shape=layer_shape, + dtype=tree_dtype, + default_initializer=Constant(0), + ) out = helper.create_variable_for_type_inference(dtype=dtype) out.stop_gradient = True @@ -1477,24 +1515,18 @@ def tdm_sampler(x, mask = helper.create_variable_for_type_inference(dtype=dtype) mask.stop_gradient = True - helper.append_op(type='tdm_sampler', - inputs={ - "X": x, - "Travel": travel, - "Layer": layer - }, - outputs={ - 'Out': out, - 'Labels': labels, - 'Mask': mask - }, - attrs={ - 'neg_samples_num_list': neg_samples_num_list, - 'output_positive': output_positive, - 'layer_offset_lod': tree_layer_offset_lod, - 'seed': seed, - 'dtype': c_dtype - }) + helper.append_op( + type='tdm_sampler', + inputs={"X": x, "Travel": travel, "Layer": layer}, + outputs={'Out': out, 'Labels': labels, 'Mask': mask}, + attrs={ + 'neg_samples_num_list': neg_samples_num_list, + 'output_positive': output_positive, + 'layer_offset_lod': tree_layer_offset_lod, + 'seed': seed, + 'dtype': c_dtype, + }, + ) if output_list: output_list = [] @@ -1506,31 +1538,30 @@ def tdm_sampler(x, positive_flag = 0 for layer_sample_num in neg_samples_num_list: - end_offset = start_offset + \ - layer_sample_num + positive_flag - layer_samples = slice(out, - axes=[1], - starts=[start_offset], - ends=[end_offset]) - layer_labels = slice(labels, - axes=[1], - starts=[start_offset], - ends=[end_offset]) - layer_mask = slice(mask, - axes=[1], - starts=[start_offset], - ends=[end_offset]) - - layer_samples = reshape(layer_samples, - [-1, layer_sample_num + positive_flag, 1]) + end_offset = start_offset + layer_sample_num + positive_flag + layer_samples = slice( + out, axes=[1], starts=[start_offset], ends=[end_offset] + ) + layer_labels = slice( + labels, axes=[1], starts=[start_offset], ends=[end_offset] + ) + layer_mask = slice( + mask, axes=[1], starts=[start_offset], ends=[end_offset] + ) + + layer_samples = reshape( + layer_samples, [-1, layer_sample_num + positive_flag, 1] + ) layer_samples.stop_gradient = True - layer_labels = reshape(layer_labels, - [-1, layer_sample_num + positive_flag, 1]) + layer_labels = reshape( + layer_labels, [-1, layer_sample_num + positive_flag, 1] + ) layer_labels.stop_gradient = True - layer_mask = reshape(layer_mask, - [-1, layer_sample_num + positive_flag, 1]) + layer_mask = reshape( + layer_mask, [-1, layer_sample_num + positive_flag, 1] + ) layer_mask.stop_gradient = True output_list.append(layer_samples) @@ -1545,12 +1576,14 @@ def tdm_sampler(x, return (out, labels, mask) -def rank_attention(input, - rank_offset, - rank_param_shape, - rank_param_attr, - max_rank=3, - max_size=0): +def rank_attention( + input, + rank_offset, + rank_param_shape, + rank_param_attr, + max_rank=3, + max_size=0, +): """ **Rank Attention layer** This Op can calculate rank attention between input and rank_param, and @@ -1588,32 +1621,25 @@ def rank_attention(input, input_shape = input.shape assert input_shape[1] * max_rank * max_rank == rank_param_shape[0] - rank_param = helper.create_parameter(attr=rank_param_attr, - shape=rank_param_shape, - dtype=dtype) + rank_param = helper.create_parameter( + attr=rank_param_attr, shape=rank_param_shape, dtype=dtype + ) rank_param.stop_gradient = False output = helper.create_variable_for_type_inference(dtype) - input_help = helper.create_variable_for_type_inference(dtype=dtype, - stop_gradient=True) - ins_rank = helper.create_variable_for_type_inference(dtype=dtype, - stop_gradient=True) - - helper.append_op(type="rank_attention", - inputs={ - "X": input, - "RankOffset": rank_offset, - "RankParam": rank_param - }, - outputs={ - "Out": output, - "InputHelp": input_help, - "InsRank": ins_rank - }, - attrs={ - "MaxRank": max_rank, - "MaxSize": max_size - }) + input_help = helper.create_variable_for_type_inference( + dtype=dtype, stop_gradient=True + ) + ins_rank = helper.create_variable_for_type_inference( + dtype=dtype, stop_gradient=True + ) + + helper.append_op( + type="rank_attention", + inputs={"X": input, "RankOffset": rank_offset, "RankParam": rank_param}, + outputs={"Out": output, "InputHelp": input_help, "InsRank": ins_rank}, + attrs={"MaxRank": max_rank, "MaxSize": max_size}, + ) return output @@ -1666,22 +1692,18 @@ def batch_fc(input, param_size, param_attr, bias_size, bias_attr, act=None): dtype = helper.input_dtype() check_dtype(dtype, 'input', ['float32', 'float64'], 'batch_fc') - w = helper.create_parameter(attr=param_attr, - shape=param_size, - dtype=dtype, - is_bias=False) - b = helper.create_parameter(attr=bias_attr, - shape=bias_size, - dtype=dtype, - is_bias=False) + w = helper.create_parameter( + attr=param_attr, shape=param_size, dtype=dtype, is_bias=False + ) + b = helper.create_parameter( + attr=bias_attr, shape=bias_size, dtype=dtype, is_bias=False + ) pre_act = helper.create_variable_for_type_inference(dtype) - helper.append_op(type="batch_fc", - inputs={ - "Input": input, - "W": w, - "Bias": b - }, - outputs={"Out": pre_act}) + helper.append_op( + type="batch_fc", + inputs={"Input": input, "W": w, "Bias": b}, + outputs={"Out": pre_act}, + ) return helper.append_activation(pre_act) @@ -1720,16 +1742,12 @@ def _pull_box_extended_sparse(input, size, extend_size=64, dtype='float32'): helper.create_variable_for_type_inference(dtype) for i in range(len(inputs)) ] - helper.append_op(type='pull_box_extended_sparse', - inputs={'Ids': inputs}, - outputs={ - 'Out': outs, - 'OutExtend': outs_extend - }, - attrs={ - 'emb_size': size, - 'emb_extended_size': extend_size - }) + helper.append_op( + type='pull_box_extended_sparse', + inputs={'Ids': inputs}, + outputs={'Out': outs, 'OutExtend': outs_extend}, + attrs={'emb_size': size, 'emb_extended_size': extend_size}, + ) if len(outs) == 1: return outs[0], outs_extend[0] return outs, outs_extend @@ -1738,8 +1756,8 @@ def _pull_box_extended_sparse(input, size, extend_size=64, dtype='float32'): def bilateral_slice(x, guide, grid, has_offset, name=None): """ :alias_main: paddle.nn.functional.bilateral_slice - :alias: paddle.nn.functional.bilateral_slice,paddle.nn.functional.vision.bilateral_slice - :old_api: paddle.fluid.layers.bilateral_slice + :alias: paddle.nn.functional.bilateral_slice,paddle.nn.functional.vision.bilateral_slice + :old_api: paddle.fluid.layers.bilateral_slice This operation implements bilateral slicing on the input according to the guide map. For more information of bilateral slicing, please refer to Deep Bilateral Learning for Real-Time Image Enhancement _ @@ -1783,28 +1801,34 @@ def bilateral_slice(x, guide, grid, has_offset, name=None): return getattr(_legacy_C_ops, "bilateral_slice")(x, grid, guide, *attrs) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'bilateral_slice') - check_variable_and_dtype(guide, 'guide', ['float32', 'float64'], - 'bilateral_slice') - check_variable_and_dtype(grid, 'grid', ['float32', 'float64'], - 'bilateral_slice') + check_variable_and_dtype( + guide, 'guide', ['float32', 'float64'], 'bilateral_slice' + ) + check_variable_and_dtype( + grid, 'grid', ['float32', 'float64'], 'bilateral_slice' + ) helper = LayerHelper("bilateral_slice", **locals()) out = helper.create_variable_for_type_inference(x.dtype) inputs = {'X': x, 'Guide': guide, 'Grid': grid} - helper.append_op(type='bilateral_slice', - inputs=inputs, - attrs={'has_offset': has_offset}, - outputs={'Out': out}) + helper.append_op( + type='bilateral_slice', + inputs=inputs, + attrs={'has_offset': has_offset}, + outputs={'Out': out}, + ) return out -def correlation(x, - y, - pad_size, - kernel_size, - max_displacement, - stride1, - stride2, - corr_type_multiply=1): +def correlation( + x, + y, + pad_size, + kernel_size, + max_displacement, + stride1, + stride2, + corr_type_multiply=1, +): """ This operation compute correlation of two tensor. @@ -1852,40 +1876,52 @@ def correlation(x, """ if paddle.fluid._non_static_mode(): - attrs = ("pad_size", pad_size, "kernel_size", kernel_size, - "max_displacement", max_displacement, "stride1", stride1, - "stride2", stride2, "corr_type_multiply", corr_type_multiply) + attrs = ( + "pad_size", + pad_size, + "kernel_size", + kernel_size, + "max_displacement", + max_displacement, + "stride1", + stride1, + "stride2", + stride2, + "corr_type_multiply", + corr_type_multiply, + ) output = getattr(_legacy_C_ops, "correlation")(x, y, *attrs) else: helper = LayerHelper("correlation", **locals()) output = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type="correlation", - inputs={ - "Input1": x, - "Input2": y - }, - attrs={ - "pad_size": pad_size, - "kernel_size": kernel_size, - "max_displacement": max_displacement, - "stride1": stride1, - "stride2": stride2, - "corr_type_multiply": corr_type_multiply - }, - outputs={"Output": output}) + helper.append_op( + type="correlation", + inputs={"Input1": x, "Input2": y}, + attrs={ + "pad_size": pad_size, + "kernel_size": kernel_size, + "max_displacement": max_displacement, + "stride1": stride1, + "stride2": stride2, + "corr_type_multiply": corr_type_multiply, + }, + outputs={"Output": output}, + ) return output -def fused_bn_add_act(x, - y, - momentum=0.9, - epsilon=1e-05, - param_attr=None, - bias_attr=None, - moving_mean_name=None, - moving_variance_name=None, - act=None, - name=None): +def fused_bn_add_act( + x, + y, + momentum=0.9, + epsilon=1e-05, + param_attr=None, + bias_attr=None, + moving_mean_name=None, + moving_variance_name=None, + act=None, + name=None, +): r""" This Op performs batch norm on input x, and adds the result to input y. Then it performs activation on the sum. The data format of inputs must be NHWC @@ -1906,14 +1942,14 @@ def fused_bn_add_act(x, numerical stability. Default is 1e-5. param_attr(ParamAttr, optional): The parameter attribute for Parameter `scale` of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm - will create ParamAttr as param_attr, the name of scale can be set in ParamAttr. - If the Initializer of the param_attr is not set, the parameter is initialized - with Xavier. Default: None. + will create ParamAttr as param_attr, the name of scale can be set in ParamAttr. + If the Initializer of the param_attr is not set, the parameter is initialized + with Xavier. Default: None. bias_attr(ParamAttr, optional): The parameter attribute for the bias of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm - will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr. - If the Initializer of the bias_attr is not set, the bias is initialized zero. - Default: None. + will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr. + If the Initializer of the bias_attr is not set, the bias is initialized zero. + Default: None. moving_mean_name(str, optional): The name of moving_mean which store the global Mean. If it is set to None, batch_norm will save global mean with a random name, otherwise, batch_norm will save global mean with the string. @@ -1988,10 +2024,12 @@ def fused_bn_add_act(x, """ helper = LayerHelper('fused_bn_add_act', **locals()) - check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'], - 'fused_bn_add_act') - check_variable_and_dtype(y, 'input', ['float16', 'float32', 'float64'], - 'fused_bn_add_act') + check_variable_and_dtype( + x, 'input', ['float16', 'float32', 'float64'], 'fused_bn_add_act' + ) + check_variable_and_dtype( + y, 'input', ['float16', 'float32', 'float64'], 'fused_bn_add_act' + ) bn_param_dtype = core.VarDesc.VarType.FP32 x_shape = x.shape @@ -1999,25 +2037,35 @@ def fused_bn_add_act(x, param_shape = [channel_num] # create parameter - scale = helper.create_parameter(attr=helper.param_attr, - shape=param_shape, - dtype=bn_param_dtype, - default_initializer=Constant(1.0)) - bias = helper.create_parameter(attr=helper.bias_attr, - shape=param_shape, - dtype=bn_param_dtype, - is_bias=True) - mean = helper.create_parameter(attr=ParamAttr(name=moving_mean_name, - initializer=Constant(0.0), - trainable=False), - shape=param_shape, - dtype=bn_param_dtype) + scale = helper.create_parameter( + attr=helper.param_attr, + shape=param_shape, + dtype=bn_param_dtype, + default_initializer=Constant(1.0), + ) + bias = helper.create_parameter( + attr=helper.bias_attr, + shape=param_shape, + dtype=bn_param_dtype, + is_bias=True, + ) + mean = helper.create_parameter( + attr=ParamAttr( + name=moving_mean_name, initializer=Constant(0.0), trainable=False + ), + shape=param_shape, + dtype=bn_param_dtype, + ) mean.stop_gradient = True - variance = helper.create_parameter(attr=ParamAttr(name=moving_variance_name, - initializer=Constant(1.0), - trainable=False), - shape=param_shape, - dtype=bn_param_dtype) + variance = helper.create_parameter( + attr=ParamAttr( + name=moving_variance_name, + initializer=Constant(1.0), + trainable=False, + ), + shape=param_shape, + dtype=bn_param_dtype, + ) variance.stop_gradient = True # create output @@ -2025,14 +2073,18 @@ def fused_bn_add_act(x, mean_out = mean # variance and variance out share the same memory variance_out = variance - saved_mean = helper.create_variable_for_type_inference(dtype=bn_param_dtype, - stop_gradient=True) + saved_mean = helper.create_variable_for_type_inference( + dtype=bn_param_dtype, stop_gradient=True + ) saved_variance = helper.create_variable_for_type_inference( - dtype=bn_param_dtype, stop_gradient=True) + dtype=bn_param_dtype, stop_gradient=True + ) reserve_space = helper.create_variable_for_type_inference( - dtype=core.VarDesc.VarType.FP16, stop_gradient=True) + dtype=core.VarDesc.VarType.FP16, stop_gradient=True + ) batch_norm_out = helper.create_variable_for_type_inference( - core.VarDesc.VarType.FP16) + core.VarDesc.VarType.FP16 + ) inputs = { "X": x, @@ -2048,51 +2100,50 @@ def fused_bn_add_act(x, "VarianceOut": variance_out, "SavedMean": saved_mean, "SavedVariance": saved_variance, - "ReserveSpace": reserve_space + "ReserveSpace": reserve_space, } - helper.append_op(type="fused_bn_add_activation", - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type="fused_bn_add_activation", + inputs=inputs, + outputs=outputs, + attrs=attrs, + ) return batch_norm_out -def pow2_decay_with_linear_warmup(warmup_steps, - total_steps, - base_lr, - end_lr, - dtype='float32', - name=None): +def pow2_decay_with_linear_warmup( + warmup_steps, total_steps, base_lr, end_lr, dtype='float32', name=None +): if paddle.fluid._non_static_mode(): raise NotImplementedError( - "pow2_decay_with_linear_warmup does not support dygraph mode yet.") + "pow2_decay_with_linear_warmup does not support dygraph mode yet." + ) helper = LayerHelper("pow2_decay_with_linear_warmup", **locals()) lr = helper.create_global_variable(persistable=True, dtype=dtype, shape=[1]) helper.set_variable_initializer( - lr, Constant(value=float(base_lr) / warmup_steps)) + lr, Constant(value=float(base_lr) / warmup_steps) + ) - step = helper.create_global_variable(persistable=True, - dtype='int64', - shape=[1]) + step = helper.create_global_variable( + persistable=True, dtype='int64', shape=[1] + ) helper.set_variable_initializer(step, Constant(value=0)) - assert warmup_steps <= total_steps, "warmup_steps cannot be larger than total_steps" - - helper.append_op(type="pow2_decay_with_linear_warmup", - inputs={ - "LearningRate": lr, - "Step": step - }, - outputs={ - "LearningRateOut": lr, - "StepOut": step - }, - attrs={ - "warmup_steps": warmup_steps, - "total_steps": total_steps, - "base_lr": base_lr, - "end_lr": end_lr, - }) + assert ( + warmup_steps <= total_steps + ), "warmup_steps cannot be larger than total_steps" + + helper.append_op( + type="pow2_decay_with_linear_warmup", + inputs={"LearningRate": lr, "Step": step}, + outputs={"LearningRateOut": lr, "StepOut": step}, + attrs={ + "warmup_steps": warmup_steps, + "total_steps": total_steps, + "base_lr": base_lr, + "end_lr": end_lr, + }, + ) return lr diff --git a/python/paddle/fluid/contrib/layers/rnn_impl.py b/python/paddle/fluid/contrib/layers/rnn_impl.py index 95ce0a6ba538a22280530f914a08522e81f686d3..668ce445f78b041bdad077eae9de07c4d8a371a2 100644 --- a/python/paddle/fluid/contrib/layers/rnn_impl.py +++ b/python/paddle/fluid/contrib/layers/rnn_impl.py @@ -74,18 +74,21 @@ class BasicGRUUnit(Layer): """ - def __init__(self, - name_scope, - hidden_size, - param_attr=None, - bias_attr=None, - gate_activation=None, - activation=None, - dtype='float32'): + def __init__( + self, + name_scope, + hidden_size, + param_attr=None, + bias_attr=None, + gate_activation=None, + activation=None, + dtype='float32', + ): super(BasicGRUUnit, self).__init__(name_scope, dtype) # reserve old school _full_name and _helper for static graph save load - self._full_name = unique_name.generate(name_scope + "/" + - self.__class__.__name__) + self._full_name = unique_name.generate( + name_scope + "/" + self.__class__.__name__ + ) self._helper = LayerObjectHelper(self._full_name) self._name = name_scope @@ -98,7 +101,7 @@ class BasicGRUUnit(Layer): def _build_once(self, input, pre_hidden): self._input_size = input.shape[-1] - assert (self._input_size > 0) + assert self._input_size > 0 if self._param_attr is not None and self._param_attr.name is not None: gate_param_attr = copy.deepcopy(self._param_attr) @@ -112,12 +115,14 @@ class BasicGRUUnit(Layer): self._gate_weight = self.create_parameter( attr=gate_param_attr, shape=[self._input_size + self._hiden_size, 2 * self._hiden_size], - dtype=self._dtype) + dtype=self._dtype, + ) self._candidate_weight = self.create_parameter( attr=candidate_param_attr, shape=[self._input_size + self._hiden_size, self._hiden_size], - dtype=self._dtype) + dtype=self._dtype, + ) if self._bias_attr is not None and self._bias_attr.name is not None: gate_bias_attr = copy.deepcopy(self._bias_attr) @@ -128,14 +133,18 @@ class BasicGRUUnit(Layer): gate_bias_attr = self._bias_attr candidate_bias_attr = self._bias_attr - self._gate_bias = self.create_parameter(attr=gate_bias_attr, - shape=[2 * self._hiden_size], - dtype=self._dtype, - is_bias=True) - self._candidate_bias = self.create_parameter(attr=candidate_bias_attr, - shape=[self._hiden_size], - dtype=self._dtype, - is_bias=True) + self._gate_bias = self.create_parameter( + attr=gate_bias_attr, + shape=[2 * self._hiden_size], + dtype=self._dtype, + is_bias=True, + ) + self._candidate_bias = self.create_parameter( + attr=candidate_bias_attr, + shape=[self._hiden_size], + dtype=self._dtype, + is_bias=True, + ) def forward(self, input, pre_hidden): concat_input_hidden = layers.concat([input, pre_hidden], 1) @@ -149,8 +158,9 @@ class BasicGRUUnit(Layer): r_hidden = r * pre_hidden - candidate = layers.matmul(layers.concat([input, r_hidden], 1), - self._candidate_weight) + candidate = layers.matmul( + layers.concat([input, r_hidden], 1), self._candidate_weight + ) candidate = layers.elementwise_add(candidate, self._candidate_bias) c = self._activation(candidate) @@ -159,20 +169,22 @@ class BasicGRUUnit(Layer): return new_hidden -def basic_gru(input, - init_hidden, - hidden_size, - num_layers=1, - sequence_length=None, - dropout_prob=0.0, - bidirectional=False, - batch_first=True, - param_attr=None, - bias_attr=None, - gate_activation=None, - activation=None, - dtype='float32', - name='basic_gru'): +def basic_gru( + input, + init_hidden, + hidden_size, + num_layers=1, + sequence_length=None, + dropout_prob=0.0, + bidirectional=False, + batch_first=True, + param_attr=None, + bias_attr=None, + gate_activation=None, + activation=None, + dtype='float32', + name='basic_gru', +): r""" GRU implementation using basic operator, supports multiple layers and bidirectional gru. @@ -273,8 +285,16 @@ def basic_gru(input, else: layer_bias_attr = bias_attr fw_unit_list.append( - BasicGRUUnit(new_name, hidden_size, layer_param_attr, - layer_bias_attr, gate_activation, activation, dtype)) + BasicGRUUnit( + new_name, + hidden_size, + layer_param_attr, + layer_bias_attr, + gate_activation, + activation, + dtype, + ) + ) if bidirectional: bw_unit_list = [] @@ -292,9 +312,16 @@ def basic_gru(input, layer_bias_attr = bias_attr bw_unit_list.append( - BasicGRUUnit(new_name, hidden_size, layer_param_attr, - layer_bias_attr, gate_activation, activation, - dtype)) + BasicGRUUnit( + new_name, + hidden_size, + layer_param_attr, + layer_bias_attr, + gate_activation, + activation, + dtype, + ) + ) if batch_first: input = layers.transpose(input, [1, 0, 2]) @@ -302,9 +329,9 @@ def basic_gru(input, mask = None if sequence_length: max_seq_len = layers.shape(input)[0] - mask = layers.sequence_mask(sequence_length, - maxlen=max_seq_len, - dtype='float32') + mask = layers.sequence_mask( + sequence_length, maxlen=max_seq_len, dtype='float32' + ) mask = layers.transpose(mask, [1, 0]) direc_num = 1 @@ -312,12 +339,12 @@ def basic_gru(input, direc_num = 2 if init_hidden: init_hidden = layers.reshape( - init_hidden, shape=[num_layers, direc_num, -1, hidden_size]) + init_hidden, shape=[num_layers, direc_num, -1, hidden_size] + ) - def get_single_direction_output(rnn_input, - unit_list, - mask=None, - direc_index=0): + def get_single_direction_output( + rnn_input, unit_list, mask=None, direc_index=0 + ): rnn = StaticRNN() with rnn.step(): step_input = rnn.step_input(rnn_input) @@ -329,16 +356,20 @@ def basic_gru(input, if init_hidden: pre_hidden = rnn.memory(init=init_hidden[i, direc_index]) else: - pre_hidden = rnn.memory(batch_ref=rnn_input, - shape=[-1, hidden_size], - ref_batch_dim_idx=1) + pre_hidden = rnn.memory( + batch_ref=rnn_input, + shape=[-1, hidden_size], + ref_batch_dim_idx=1, + ) new_hidden = unit_list[i](step_input, pre_hidden) if mask: new_hidden = layers.elementwise_mul( - new_hidden, step_mask, axis=0) - layers.elementwise_mul( - pre_hidden, (step_mask - 1), axis=0) + new_hidden, step_mask, axis=0 + ) - layers.elementwise_mul( + pre_hidden, (step_mask - 1), axis=0 + ) rnn.update_memory(pre_hidden, new_hidden) rnn.step_output(new_hidden) @@ -362,26 +393,25 @@ def basic_gru(input, last_hidden_array.append(last_hidden) last_hidden_output = layers.concat(last_hidden_array, axis=0) - last_hidden_output = layers.reshape(last_hidden_output, - shape=[num_layers, -1, hidden_size]) + last_hidden_output = layers.reshape( + last_hidden_output, shape=[num_layers, -1, hidden_size] + ) return rnn_output, last_hidden_output # seq_len, batch_size, hidden_size - fw_rnn_out, fw_last_hidden = get_single_direction_output(input, - fw_unit_list, - mask, - direc_index=0) + fw_rnn_out, fw_last_hidden = get_single_direction_output( + input, fw_unit_list, mask, direc_index=0 + ) if bidirectional: bw_input = layers.reverse(input, axis=[0]) bw_mask = None if mask: bw_mask = layers.reverse(mask, axis=[0]) - bw_rnn_out, bw_last_hidden = get_single_direction_output(bw_input, - bw_unit_list, - bw_mask, - direc_index=1) + bw_rnn_out, bw_last_hidden = get_single_direction_output( + bw_input, bw_unit_list, bw_mask, direc_index=1 + ) bw_rnn_out = layers.reverse(bw_rnn_out, axis=[0]) @@ -389,7 +419,8 @@ def basic_gru(input, last_hidden = layers.concat([fw_last_hidden, bw_last_hidden], axis=1) last_hidden = layers.reshape( - last_hidden, shape=[num_layers * direc_num, -1, hidden_size]) + last_hidden, shape=[num_layers * direc_num, -1, hidden_size] + ) if batch_first: rnn_out = layers.transpose(rnn_out, [1, 0, 2]) @@ -405,22 +436,24 @@ def basic_gru(input, return rnn_out, last_hidden -def basic_lstm(input, - init_hidden, - init_cell, - hidden_size, - num_layers=1, - sequence_length=None, - dropout_prob=0.0, - bidirectional=False, - batch_first=True, - param_attr=None, - bias_attr=None, - gate_activation=None, - activation=None, - forget_bias=1.0, - dtype='float32', - name='basic_lstm'): +def basic_lstm( + input, + init_hidden, + init_cell, + hidden_size, + num_layers=1, + sequence_length=None, + dropout_prob=0.0, + bidirectional=False, + batch_first=True, + param_attr=None, + bias_attr=None, + gate_activation=None, + activation=None, + forget_bias=1.0, + dtype='float32', + name='basic_lstm', +): r""" LSTM implementation using basic operators, supports multiple layers and bidirectional LSTM. @@ -535,14 +568,17 @@ def basic_lstm(input, else: layer_bias_attr = bias_attr fw_unit_list.append( - BasicLSTMUnit(new_name, - hidden_size, - param_attr=layer_param_attr, - bias_attr=layer_bias_attr, - gate_activation=gate_activation, - activation=activation, - forget_bias=forget_bias, - dtype=dtype)) + BasicLSTMUnit( + new_name, + hidden_size, + param_attr=layer_param_attr, + bias_attr=layer_bias_attr, + gate_activation=gate_activation, + activation=activation, + forget_bias=forget_bias, + dtype=dtype, + ) + ) if bidirectional: bw_unit_list = [] @@ -559,14 +595,17 @@ def basic_lstm(input, else: layer_bias_attr = param_attr bw_unit_list.append( - BasicLSTMUnit(new_name, - hidden_size, - param_attr=layer_param_attr, - bias_attr=layer_bias_attr, - gate_activation=gate_activation, - activation=activation, - forget_bias=forget_bias, - dtype=dtype)) + BasicLSTMUnit( + new_name, + hidden_size, + param_attr=layer_param_attr, + bias_attr=layer_bias_attr, + gate_activation=gate_activation, + activation=activation, + forget_bias=forget_bias, + dtype=dtype, + ) + ) if batch_first: input = layers.transpose(input, [1, 0, 2]) @@ -574,9 +613,9 @@ def basic_lstm(input, mask = None if sequence_length: max_seq_len = layers.shape(input)[0] - mask = layers.sequence_mask(sequence_length, - maxlen=max_seq_len, - dtype='float32') + mask = layers.sequence_mask( + sequence_length, maxlen=max_seq_len, dtype='float32' + ) mask = layers.transpose(mask, [1, 0]) @@ -586,15 +625,16 @@ def basic_lstm(input, # convert to [num_layers, 2, batch_size, hidden_size] if init_hidden: init_hidden = layers.reshape( - init_hidden, shape=[num_layers, direc_num, -1, hidden_size]) + init_hidden, shape=[num_layers, direc_num, -1, hidden_size] + ) init_cell = layers.reshape( - init_cell, shape=[num_layers, direc_num, -1, hidden_size]) + init_cell, shape=[num_layers, direc_num, -1, hidden_size] + ) # forward direction - def get_single_direction_output(rnn_input, - unit_list, - mask=None, - direc_index=0): + def get_single_direction_output( + rnn_input, unit_list, mask=None, direc_index=0 + ): rnn = StaticRNN() with rnn.step(): step_input = rnn.step_input(rnn_input) @@ -607,21 +647,28 @@ def basic_lstm(input, pre_hidden = rnn.memory(init=init_hidden[i, direc_index]) pre_cell = rnn.memory(init=init_cell[i, direc_index]) else: - pre_hidden = rnn.memory(batch_ref=rnn_input, - shape=[-1, hidden_size]) - pre_cell = rnn.memory(batch_ref=rnn_input, - shape=[-1, hidden_size]) + pre_hidden = rnn.memory( + batch_ref=rnn_input, shape=[-1, hidden_size] + ) + pre_cell = rnn.memory( + batch_ref=rnn_input, shape=[-1, hidden_size] + ) - new_hidden, new_cell = unit_list[i](step_input, pre_hidden, - pre_cell) + new_hidden, new_cell = unit_list[i]( + step_input, pre_hidden, pre_cell + ) if mask: new_hidden = layers.elementwise_mul( - new_hidden, step_mask, axis=0) - layers.elementwise_mul( - pre_hidden, (step_mask - 1), axis=0) + new_hidden, step_mask, axis=0 + ) - layers.elementwise_mul( + pre_hidden, (step_mask - 1), axis=0 + ) new_cell = layers.elementwise_mul( - new_cell, step_mask, axis=0) - layers.elementwise_mul( - pre_cell, (step_mask - 1), axis=0) + new_cell, step_mask, axis=0 + ) - layers.elementwise_mul( + pre_cell, (step_mask - 1), axis=0 + ) rnn.update_memory(pre_hidden, new_hidden) rnn.update_memory(pre_cell, new_cell) @@ -634,7 +681,8 @@ def basic_lstm(input, step_input = layers.dropout( step_input, dropout_prob=dropout_prob, - dropout_implementation='upscale_in_train') + dropout_implementation='upscale_in_train', + ) rnn.step_output(step_input) @@ -652,17 +700,20 @@ def basic_lstm(input, last_cell_array.append(last_cell) last_hidden_output = layers.concat(last_hidden_array, axis=0) - last_hidden_output = layers.reshape(last_hidden_output, - shape=[num_layers, -1, hidden_size]) + last_hidden_output = layers.reshape( + last_hidden_output, shape=[num_layers, -1, hidden_size] + ) last_cell_output = layers.concat(last_cell_array, axis=0) - last_cell_output = layers.reshape(last_cell_output, - shape=[num_layers, -1, hidden_size]) + last_cell_output = layers.reshape( + last_cell_output, shape=[num_layers, -1, hidden_size] + ) return rnn_output, last_hidden_output, last_cell_output # seq_len, batch_size, hidden_size fw_rnn_out, fw_last_hidden, fw_last_cell = get_single_direction_output( - input, fw_unit_list, mask, direc_index=0) + input, fw_unit_list, mask, direc_index=0 + ) if bidirectional: bw_input = layers.reverse(input, axis=[0]) @@ -670,18 +721,21 @@ def basic_lstm(input, if mask: bw_mask = layers.reverse(mask, axis=[0]) bw_rnn_out, bw_last_hidden, bw_last_cell = get_single_direction_output( - bw_input, bw_unit_list, bw_mask, direc_index=1) + bw_input, bw_unit_list, bw_mask, direc_index=1 + ) bw_rnn_out = layers.reverse(bw_rnn_out, axis=[0]) rnn_out = layers.concat([fw_rnn_out, bw_rnn_out], axis=2) last_hidden = layers.concat([fw_last_hidden, bw_last_hidden], axis=1) last_hidden = layers.reshape( - last_hidden, shape=[num_layers * direc_num, -1, hidden_size]) + last_hidden, shape=[num_layers * direc_num, -1, hidden_size] + ) last_cell = layers.concat([fw_last_cell, bw_last_cell], axis=1) last_cell = layers.reshape( - last_cell, shape=[num_layers * direc_num, -1, hidden_size]) + last_cell, shape=[num_layers * direc_num, -1, hidden_size] + ) if batch_first: rnn_out = layers.transpose(rnn_out, [1, 0, 2]) @@ -769,19 +823,22 @@ class BasicLSTMUnit(Layer): """ - def __init__(self, - name_scope, - hidden_size, - param_attr=None, - bias_attr=None, - gate_activation=None, - activation=None, - forget_bias=1.0, - dtype='float32'): + def __init__( + self, + name_scope, + hidden_size, + param_attr=None, + bias_attr=None, + gate_activation=None, + activation=None, + forget_bias=1.0, + dtype='float32', + ): super(BasicLSTMUnit, self).__init__(name_scope, dtype) # reserve old school _full_name and _helper for static graph save load - self._full_name = unique_name.generate(name_scope + "/" + - self.__class__.__name__) + self._full_name = unique_name.generate( + name_scope + "/" + self.__class__.__name__ + ) self._helper = LayerObjectHelper(self._full_name) self._name = name_scope @@ -790,25 +847,28 @@ class BasicLSTMUnit(Layer): self._bias_attr = bias_attr self._gate_activation = gate_activation or layers.sigmoid self._activation = activation or layers.tanh - self._forget_bias = layers.fill_constant([1], - dtype=dtype, - value=forget_bias) + self._forget_bias = layers.fill_constant( + [1], dtype=dtype, value=forget_bias + ) self._forget_bias.stop_gradient = False self._dtype = dtype def _build_once(self, input, pre_hidden, pre_cell): self._input_size = input.shape[-1] - assert (self._input_size > 0) + assert self._input_size > 0 self._weight = self.create_parameter( attr=self._param_attr, shape=[self._input_size + self._hiden_size, 4 * self._hiden_size], - dtype=self._dtype) + dtype=self._dtype, + ) - self._bias = self.create_parameter(attr=self._bias_attr, - shape=[4 * self._hiden_size], - dtype=self._dtype, - is_bias=True) + self._bias = self.create_parameter( + attr=self._bias_attr, + shape=[4 * self._hiden_size], + dtype=self._dtype, + is_bias=True, + ) def forward(self, input, pre_hidden, pre_cell): concat_input_hidden = layers.concat([input, pre_hidden], 1) @@ -819,8 +879,10 @@ class BasicLSTMUnit(Layer): new_cell = layers.elementwise_add( layers.elementwise_mul( pre_cell, - layers.sigmoid(layers.elementwise_add(f, self._forget_bias))), - layers.elementwise_mul(layers.sigmoid(i), layers.tanh(j))) + layers.sigmoid(layers.elementwise_add(f, self._forget_bias)), + ), + layers.elementwise_mul(layers.sigmoid(i), layers.tanh(j)), + ) new_hidden = layers.tanh(new_cell) * layers.sigmoid(o) return new_hidden, new_cell diff --git a/python/paddle/fluid/contrib/memory_usage_calc.py b/python/paddle/fluid/contrib/memory_usage_calc.py index 607f8b45da1f17f294827c3d2e7c434a88aef532..8a8dcb55a00b7895e7489e237a5af11bdebb3f64 100644 --- a/python/paddle/fluid/contrib/memory_usage_calc.py +++ b/python/paddle/fluid/contrib/memory_usage_calc.py @@ -66,7 +66,8 @@ def memory_usage(program, batch_size): if not isinstance(program, Program): raise TypeError( "Calculating Memory Usage requires Program as its Parameter." - "But you passed in %s" % (type(program))) + "But you passed in %s" % (type(program)) + ) if batch_size <= 0: raise ValueError("The batch size need to be positive.") @@ -88,8 +89,9 @@ def memory_usage(program, batch_size): if x < 0: if neg_dim_count >= 1: raise ValueError( - "Var %s has more than one negative dim." % - (var_name)) + "Var %s has more than one negative dim." + % (var_name) + ) neg_dim_count += 1 data_count *= batch_size * (-x) else: diff --git a/python/paddle/fluid/contrib/mixed_precision/amp_nn.py b/python/paddle/fluid/contrib/mixed_precision/amp_nn.py index f9552fa705c11af048f4f562adcee5c2b423521b..f336c87a11e10a8fb3b710c933c62d35c2ea80ee 100644 --- a/python/paddle/fluid/contrib/mixed_precision/amp_nn.py +++ b/python/paddle/fluid/contrib/mixed_precision/amp_nn.py @@ -39,8 +39,12 @@ def check_finite_and_unscale(x, scale, name=None, float_status=None): """ check_type(x, 'x', (tuple, list), 'check_finite_and_unscale') for e in x: - check_variable_and_dtype(e, "x", ['float16', 'float32', 'float64'], - 'check_finite_and_unscale') + check_variable_and_dtype( + e, + "x", + ['float16', 'float32', 'float64'], + 'check_finite_and_unscale', + ) helper = LayerHelper("check_finite_and_unscale", **locals()) @@ -52,29 +56,34 @@ def check_finite_and_unscale(x, scale, name=None, float_status=None): inputs = {'X': x, 'Scale': scale} if core.is_compiled_with_npu(): - check_variable_and_dtype(float_status, "float_status", - ['float16', 'float32'], - 'check_finite_and_unscale') + check_variable_and_dtype( + float_status, + "float_status", + ['float16', 'float32'], + 'check_finite_and_unscale', + ) inputs['FloatStatus'] = float_status outputs = {'Out': x, 'FoundInfinite': found_inf} - helper.append_op(type='check_finite_and_unscale', - inputs=inputs, - outputs=outputs) + helper.append_op( + type='check_finite_and_unscale', inputs=inputs, outputs=outputs + ) return x, found_inf -def update_loss_scaling(x, - found_inf, - prev_loss_scaling, - num_good_steps, - num_bad_steps, - incr_every_n_steps, - decr_every_n_nan_or_inf, - incr_ratio, - decr_ratio, - stop_update=False, - name=None): +def update_loss_scaling( + x, + found_inf, + prev_loss_scaling, + num_good_steps, + num_bad_steps, + incr_every_n_steps, + decr_every_n_nan_or_inf, + incr_ratio, + decr_ratio, + stop_update=False, + name=None, +): """ Update loss scaling according to overall gradients. If all gradients is finite after incr_every_n_steps, loss scaling will increase by incr_ratio. @@ -102,23 +111,39 @@ def update_loss_scaling(x, loss scaling. """ - check_variable_and_dtype(prev_loss_scaling, "prev_loss_scaling", - ['float32', 'float64'], "update_loss_scaling") + check_variable_and_dtype( + prev_loss_scaling, + "prev_loss_scaling", + ['float32', 'float64'], + "update_loss_scaling", + ) check_type(x, 'x', (tuple, list), 'update_loss_scaling') for e in x: - check_variable_and_dtype(e, "x", ['float16', 'float32', 'float64'], - 'update_loss_scaling') + check_variable_and_dtype( + e, "x", ['float16', 'float32', 'float64'], 'update_loss_scaling' + ) if e.dtype == core.VarDesc.VarType.FP16: - assert prev_loss_scaling.dtype == core.VarDesc.VarType.FP32, \ - "The dtype of prev_loss_scaling should be float32 when the dtype of x is float16." + assert ( + prev_loss_scaling.dtype == core.VarDesc.VarType.FP32 + ), "The dtype of prev_loss_scaling should be float32 when the dtype of x is float16." else: - assert prev_loss_scaling.dtype == e.dtype, "The dtype of prev_loss_scaling should be equal to the dtype of x." + assert ( + prev_loss_scaling.dtype == e.dtype + ), "The dtype of prev_loss_scaling should be equal to the dtype of x." if in_dygraph_mode(): - _C_ops.update_loss_scaling_(x, found_inf, prev_loss_scaling, - num_good_steps, num_bad_steps, - incr_every_n_steps, decr_every_n_nan_or_inf, - incr_ratio, decr_ratio, stop_update) + _C_ops.update_loss_scaling_( + x, + found_inf, + prev_loss_scaling, + num_good_steps, + num_bad_steps, + incr_every_n_steps, + decr_every_n_nan_or_inf, + incr_ratio, + decr_ratio, + stop_update, + ) return x helper = LayerHelper("update_loss_scaling", **locals()) @@ -128,14 +153,14 @@ def update_loss_scaling(x, 'FoundInfinite': found_inf, 'PrevLossScaling': prev_loss_scaling, 'InGoodSteps': num_good_steps, - 'InBadSteps': num_bad_steps + 'InBadSteps': num_bad_steps, } outputs = { 'Out': x, 'LossScaling': prev_loss_scaling, 'OutGoodSteps': num_good_steps, - 'OutBadSteps': num_bad_steps + 'OutBadSteps': num_bad_steps, } attrs = { @@ -150,9 +175,8 @@ def update_loss_scaling(x, else: attrs['stop_update'] = stop_update - helper.append_op(type='update_loss_scaling', - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type='update_loss_scaling', inputs=inputs, outputs=outputs, attrs=attrs + ) return x diff --git a/python/paddle/fluid/contrib/mixed_precision/bf16/amp_lists.py b/python/paddle/fluid/contrib/mixed_precision/bf16/amp_lists.py index 3f799809af9772167eea7097a465ac2e3e2e0668..33694f4d127eed76fd84ba8b0074d1229f38d9a3 100644 --- a/python/paddle/fluid/contrib/mixed_precision/bf16/amp_lists.py +++ b/python/paddle/fluid/contrib/mixed_precision/bf16/amp_lists.py @@ -15,8 +15,11 @@ import copy from paddle.fluid import core -from ..fp16_lists import white_list as white_list_fp16, black_list as black_list_fp16,\ - gray_list as gray_list_fp16 +from ..fp16_lists import ( + white_list as white_list_fp16, + black_list as black_list_fp16, + gray_list as gray_list_fp16, +) __all__ = ["AutoMixedPrecisionListsBF16"] @@ -40,10 +43,12 @@ class AutoMixedPrecisionListsBF16(object): paddle.static.amp.AutoMixedPrecisionListsBF16(custom_fp32_list={'lstm'}) """ - def __init__(self, - custom_bf16_list=None, - custom_fp32_list=None, - custom_fp32_varnames=None): + def __init__( + self, + custom_bf16_list=None, + custom_fp32_list=None, + custom_fp32_varnames=None, + ): self._custom_bf16_list = custom_bf16_list self._custom_fp32_list = custom_fp32_list self.bf16_list = copy.copy(bf16_list) @@ -61,8 +66,9 @@ class AutoMixedPrecisionListsBF16(object): if self._custom_bf16_list and self._custom_fp32_list: for op_name in self._custom_bf16_list: if op_name in self._custom_fp32_list: - raise ValueError("Custom bf16 list overlap " - "custom fp32 list") + raise ValueError( + "Custom bf16 list overlap " "custom fp32 list" + ) if self._custom_bf16_list: for op_name in self._custom_bf16_list: if op_name in self.fp32_list: @@ -92,13 +98,28 @@ bf16_list = { # depends on the prev_op type gray_list = { - 'elementwise_add', 'elementwise_sub', 'elementwise_mul', 'elementwise_div', - 'relu', 'layer_norm', 'slice', 'concat', 'uniform_random', 'reshape2', - 'transpose2', 'pool2d', 'sigmoid', 'cast', 'scale', 'fill_constant', 'split' + 'elementwise_add', + 'elementwise_sub', + 'elementwise_mul', + 'elementwise_div', + 'relu', + 'layer_norm', + 'slice', + 'concat', + 'uniform_random', + 'reshape2', + 'transpose2', + 'pool2d', + 'sigmoid', + 'cast', + 'scale', + 'fill_constant', + 'split', } _, _, _sys_unsupported_bf16_list = core.op_supported_infos( - 'CPU', core.VarDesc.VarType.BF16) + 'CPU', core.VarDesc.VarType.BF16 +) unsupported_list = _sys_unsupported_bf16_list fp32_list = black_list_fp16.copy().copy() diff --git a/python/paddle/fluid/contrib/mixed_precision/bf16/amp_utils.py b/python/paddle/fluid/contrib/mixed_precision/bf16/amp_utils.py index 94734a8088de59e26498ab5b00dc0b1b19af7796..25468b563aa1280a5c700fd370d755bd394b01ac 100644 --- a/python/paddle/fluid/contrib/mixed_precision/bf16/amp_utils.py +++ b/python/paddle/fluid/contrib/mixed_precision/bf16/amp_utils.py @@ -19,8 +19,13 @@ from .... import global_scope from ....log_helper import get_logger from ....wrapped_decorator import signature_safe_contextmanager from .amp_lists import AutoMixedPrecisionListsBF16 -from ..fp16_utils import find_true_prev_op, find_true_post_op, _rename_arg, \ - find_op_index, _rename_op_input +from ..fp16_utils import ( + find_true_prev_op, + find_true_post_op, + _rename_arg, + find_op_index, + _rename_op_input, +) import collections import struct @@ -28,17 +33,21 @@ import logging import numpy as np __all__ = [ - "bf16_guard", "rewrite_program_bf16", "cast_model_to_bf16", - "cast_parameters_to_bf16", "convert_float_to_uint16" + "bf16_guard", + "rewrite_program_bf16", + "cast_model_to_bf16", + "cast_parameters_to_bf16", + "convert_float_to_uint16", ] -_logger = get_logger(__name__, - logging.INFO, - fmt='%(asctime)s-%(levelname)s: %(message)s') +_logger = get_logger( + __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s' +) _valid_types = [ - core.VarDesc.VarType.LOD_TENSOR, core.VarDesc.VarType.SELECTED_ROWS, - core.VarDesc.VarType.LOD_TENSOR_ARRAY + core.VarDesc.VarType.LOD_TENSOR, + core.VarDesc.VarType.SELECTED_ROWS, + core.VarDesc.VarType.LOD_TENSOR_ARRAY, ] _bf16_guard_pattern = "__use_bf16__" @@ -48,7 +57,8 @@ def convert_float_to_uint16(in_list): in_list = np.asarray(in_list) out = np.vectorize( lambda x: struct.unpack('> 16, - otypes=[np.uint16])(in_list.flat) + otypes=[np.uint16], + )(in_list.flat) return np.reshape(out, in_list.shape) @@ -83,7 +93,9 @@ def _insert_cast_op(block, op, idx, src_dtype, dest_dtype): for in_name in op.input_names: if src_dtype == core.VarDesc.VarType.FP32 and op.type in [ - 'batch_norm', 'fused_bn_add_activation', 'layer_norm' + 'batch_norm', + 'fused_bn_add_activation', + 'layer_norm', ]: if in_name not in {'X', 'Z'}: continue @@ -99,26 +111,34 @@ def _insert_cast_op(block, op, idx, src_dtype, dest_dtype): name=cast_name, dtype=dest_dtype, persistable=False, - stop_gradient=in_var.stop_gradient) - - block._insert_op(idx, - type="cast", - inputs={"X": in_var}, - outputs={"Out": out_var}, - attrs={ - "in_dtype": in_var.dtype, - "out_dtype": out_var.dtype - }) + stop_gradient=in_var.stop_gradient, + ) + + block._insert_op( + idx, + type="cast", + inputs={"X": in_var}, + outputs={"Out": out_var}, + attrs={ + "in_dtype": in_var.dtype, + "out_dtype": out_var.dtype, + }, + ) num_cast_ops += 1 _rename_arg(op, in_var.name, out_var.name) else: if op.has_attr('in_dtype'): op._set_attr('in_dtype', dest_dtype) - if src_dtype == core.VarDesc.VarType.FP32 and dest_dtype == core.VarDesc.VarType.BF16: + if ( + src_dtype == core.VarDesc.VarType.FP32 + and dest_dtype == core.VarDesc.VarType.BF16 + ): for out_name in op.output_names: - if op.type in [ - 'batch_norm', 'fused_bn_add_activation', 'layer_norm' - ] and out_name != 'Y': + if ( + op.type + in ['batch_norm', 'fused_bn_add_activation', 'layer_norm'] + and out_name != 'Y' + ): continue for out_var_name in op.output(out_name): out_var = block.var(out_var_name) @@ -131,31 +151,36 @@ def _insert_cast_op(block, op, idx, src_dtype, dest_dtype): return num_cast_ops -def _insert_cast_post_op(block, op, idx, src_dtype, dest_dtype, target_name, - op_var_rename_map): +def _insert_cast_post_op( + block, op, idx, src_dtype, dest_dtype, target_name, op_var_rename_map +): num_cast_ops = 0 target_var = block.var(target_name) if target_var.type not in _valid_types or target_var.dtype == dest_dtype: return num_cast_ops - assert target_var.dtype == src_dtype, \ - "The real dtype({}) is not equal to the src dtype({})".format(_dtype_to_str(target_var.dtype), _dtype_to_str(src_dtype)) + assert ( + target_var.dtype == src_dtype + ), "The real dtype({}) is not equal to the src dtype({})".format( + _dtype_to_str(target_var.dtype), _dtype_to_str(src_dtype) + ) cast_name = target_var.name + '.cast_' + _dtype_to_str(dest_dtype) cast_var = block.vars.get(cast_name) if cast_var is None or cast_var.dtype != dest_dtype: - cast_var = block.create_var(name=cast_name, - dtype=dest_dtype, - persistable=False, - stop_gradient=target_var.stop_gradient) - block._insert_op(idx, - type="cast", - inputs={"X": target_var}, - outputs={"Out": cast_var}, - attrs={ - "in_dtype": target_var.dtype, - "out_dtype": cast_var.dtype - }) + cast_var = block.create_var( + name=cast_name, + dtype=dest_dtype, + persistable=False, + stop_gradient=target_var.stop_gradient, + ) + block._insert_op( + idx, + type="cast", + inputs={"X": target_var}, + outputs={"Out": cast_var}, + attrs={"in_dtype": target_var.dtype, "out_dtype": cast_var.dtype}, + ) num_cast_ops += 1 op_var_rename_map[block.idx][target_var.name] = cast_var.name @@ -192,8 +217,9 @@ def _need_keep_fp32(op, unsupported_op_list, use_bf16_guard): return True if use_bf16_guard: - if op.has_attr("op_namescope") and \ - (_bf16_guard_pattern in op.attr("op_namescope")): + if op.has_attr("op_namescope") and ( + _bf16_guard_pattern in op.attr("op_namescope") + ): # op in bf16 guard return False else: @@ -238,12 +264,14 @@ def are_post_ops_bf16(post_ops, keep_fp32_ops): return True -def cast_initializers_to_bf16(startup_prog, - amp_lists, - block, - all_ops, - keep_fp32_ops, - to_bf16_var_names=None): +def cast_initializers_to_bf16( + startup_prog, + amp_lists, + block, + all_ops, + keep_fp32_ops, + to_bf16_var_names=None, +): prepend_ops = startup_prog.global_block().ops for op in prepend_ops: if str(op.type) in amp_lists.bf16_initializer_list: @@ -265,17 +293,21 @@ def cast_initializers_to_bf16(startup_prog, for out_var in op_out_vars: if out_var.dtype == core.VarDesc.VarType.FP32: out_var.desc.set_dtype(core.VarDesc.VarType.BF16) - if to_bf16_var_names is not None and out_var.name in to_bf16_var_names: + if ( + to_bf16_var_names is not None + and out_var.name in to_bf16_var_names + ): to_bf16_var_names.remove(out_var.name) - if op.has_attr('dtype') and op.attr( - 'dtype') == core.VarDesc.VarType.FP32: + if ( + op.has_attr('dtype') + and op.attr('dtype') == core.VarDesc.VarType.FP32 + ): op._set_attr('dtype', core.VarDesc.VarType.BF16) -def cast_model_to_bf16(program, - startup_prog=None, - amp_lists=None, - use_bf16_guard=True): +def cast_model_to_bf16( + program, startup_prog=None, amp_lists=None, use_bf16_guard=True +): """ Traverse all ops in the whole model and set their inputs and outputs to the bf16 data type. This function will do some special processing for @@ -307,7 +339,9 @@ def cast_model_to_bf16(program, continue # processed below for in_name in op.input_names: if op.type in { - 'batch_norm', 'fused_bn_add_activation', 'layer_norm' + 'batch_norm', + 'fused_bn_add_activation', + 'layer_norm', } and in_name not in {'X', 'Z'}: continue for in_var_name in op.input(in_name): @@ -316,13 +350,17 @@ def cast_model_to_bf16(program, in_var = block.var(in_var_name) except ValueError as e: _logger.debug( - "-- {}, try to get it in the global block --". - format(e)) + "-- {}, try to get it in the global block --".format( + e + ) + ) in_var = global_block.var(in_var_name) if in_var is not None: _logger.debug( - "-- var {} is got in the global block --". - format(in_var_name)) + "-- var {} is got in the global block --".format( + in_var_name + ) + ) if in_var is None or in_var.type not in _valid_types: continue @@ -332,13 +370,17 @@ def cast_model_to_bf16(program, to_bf16_var_names.add(in_var_name) _logger.debug( - "-- op type: {}, in var name: {}, in var dtype: {} --". - format(op.type, in_var_name, in_var.dtype)) + "-- op type: {}, in var name: {}, in var dtype: {} --".format( + op.type, in_var_name, in_var.dtype + ) + ) for out_name in op.output_names: - if op.type in { - 'batch_norm', 'fused_bn_add_activation', 'layer_norm' - } and out_name != 'Y': + if ( + op.type + in {'batch_norm', 'fused_bn_add_activation', 'layer_norm'} + and out_name != 'Y' + ): continue for out_var_name in op.output(out_name): out_var = None @@ -346,13 +388,17 @@ def cast_model_to_bf16(program, out_var = block.var(out_var_name) except ValueError as e: _logger.debug( - "-- {}, try to get it in the global block --". - format(e)) + "-- {}, try to get it in the global block --".format( + e + ) + ) out_var = global_block.var(out_var_name) if out_var is not None: _logger.debug( - "-- var {} is got in the global block --". - format(out_var_name)) + "-- var {} is got in the global block --".format( + out_var_name + ) + ) if out_var is None or out_var.type not in _valid_types: continue @@ -361,11 +407,15 @@ def cast_model_to_bf16(program, out_var.desc.set_dtype(core.VarDesc.VarType.BF16) _logger.debug( - "-- op type: {}, out var name: {}, out var dtype: {} --" - .format(op.type, out_var_name, out_var.dtype)) + "-- op type: {}, out var name: {}, out var dtype: {} --".format( + op.type, out_var_name, out_var.dtype + ) + ) for attr_name in ['in_dtype', 'out_dtype', 'dtype']: - if op.has_attr(attr_name) and op.attr( - attr_name) == core.VarDesc.VarType.FP32: + if ( + op.has_attr(attr_name) + and op.attr(attr_name) == core.VarDesc.VarType.FP32 + ): op._set_attr(attr_name, core.VarDesc.VarType.BF16) if op.has_attr('use_mkldnn'): op._set_attr('use_mkldnn', True) @@ -373,8 +423,14 @@ def cast_model_to_bf16(program, op._set_attr('mkldnn_data_type', 'bfloat16') if startup_prog is not None: - cast_initializers_to_bf16(startup_prog, amp_lists, global_block, - ops, keep_fp32_ops, to_bf16_var_names) + cast_initializers_to_bf16( + startup_prog, + amp_lists, + global_block, + ops, + keep_fp32_ops, + to_bf16_var_names, + ) # process ops in keep_fp32_ops op_var_rename_map = [ @@ -388,14 +444,22 @@ def cast_model_to_bf16(program, num_cast_ops = 0 if op not in keep_fp32_ops: if op in to_bf16_pre_cast_ops: - in_var_cast_num = _insert_cast_op(block, op, idx, - core.VarDesc.VarType.FP32, - core.VarDesc.VarType.BF16) + in_var_cast_num = _insert_cast_op( + block, + op, + idx, + core.VarDesc.VarType.FP32, + core.VarDesc.VarType.BF16, + ) num_cast_ops += in_var_cast_num else: - pre_cast_num = _insert_cast_op(block, op, idx, - core.VarDesc.VarType.BF16, - core.VarDesc.VarType.FP32) + pre_cast_num = _insert_cast_op( + block, + op, + idx, + core.VarDesc.VarType.BF16, + core.VarDesc.VarType.FP32, + ) num_cast_ops += pre_cast_num for out_var_name in op.output_arg_names: out_var = block.vars.get(out_var_name) @@ -408,10 +472,14 @@ def cast_model_to_bf16(program, if post_op in keep_fp32_ops: continue post_cast_num = _insert_cast_post_op( - block, op, idx + pre_cast_num + 1, + block, + op, + idx + pre_cast_num + 1, core.VarDesc.VarType.FP32, - core.VarDesc.VarType.BF16, out_var_name, - op_var_rename_map) + core.VarDesc.VarType.BF16, + out_var_name, + op_var_rename_map, + ) num_cast_ops += post_cast_num idx += num_cast_ops + 1 @@ -482,7 +550,8 @@ def rewrite_program_bf16(main_prog, amp_lists=None): continue if amp_lists.fp32_varnames is not None and _is_in_fp32_varnames( - op, amp_lists): + op, amp_lists + ): fp32_op_set.add(op) continue @@ -508,11 +577,15 @@ def rewrite_program_bf16(main_prog, amp_lists=None): else: prev_op = in_var.op # if it's one of inputs - if prev_op in fp32_op_set or \ - prev_op.type in amp_lists.fp32_list: + if ( + prev_op in fp32_op_set + or prev_op.type in amp_lists.fp32_list + ): is_fp32_op = True - elif prev_op in bf16_op_set or \ - prev_op.type in amp_lists.bf16_list: + elif ( + prev_op in bf16_op_set + or prev_op.type in amp_lists.bf16_list + ): is_bf16_op = True if is_fp32_op: fp32_op_set.add(op) @@ -530,20 +603,30 @@ def rewrite_program_bf16(main_prog, amp_lists=None): op = ops[idx] num_cast_ops = 0 if op in fp32_op_set: - num_cast_ops = _insert_cast_op(block, op, idx, - core.VarDesc.VarType.BF16, - core.VarDesc.VarType.FP32) + num_cast_ops = _insert_cast_op( + block, + op, + idx, + core.VarDesc.VarType.BF16, + core.VarDesc.VarType.FP32, + ) elif op in bf16_op_set: if op.has_attr('use_mkldnn'): op._set_attr('use_mkldnn', True) op._set_attr('mkldnn_data_type', 'bfloat16') - elif op.has_attr('dtype') and op.attr( - 'dtype') == core.VarDesc.VarType.FP32: + elif ( + op.has_attr('dtype') + and op.attr('dtype') == core.VarDesc.VarType.FP32 + ): op._set_attr('dtype', core.VarDesc.VarType.BF16) - num_cast_ops = _insert_cast_op(block, op, idx, - core.VarDesc.VarType.FP32, - core.VarDesc.VarType.BF16) + num_cast_ops = _insert_cast_op( + block, + op, + idx, + core.VarDesc.VarType.FP32, + core.VarDesc.VarType.BF16, + ) else: pass diff --git a/python/paddle/fluid/contrib/mixed_precision/bf16/decorator.py b/python/paddle/fluid/contrib/mixed_precision/bf16/decorator.py index 0551cf1aace404e4f66fda8d27048702ac2693bb..9110686582220fe1762e2c52551638f85d5ff828 100644 --- a/python/paddle/fluid/contrib/mixed_precision/bf16/decorator.py +++ b/python/paddle/fluid/contrib/mixed_precision/bf16/decorator.py @@ -12,10 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle.fluid import (core, default_main_program, layers, program_guard, - unique_name) -from .amp_utils import (rewrite_program_bf16, cast_model_to_bf16, - cast_parameters_to_bf16) +from paddle.fluid import ( + core, + default_main_program, + layers, + program_guard, + unique_name, +) +from .amp_utils import ( + rewrite_program_bf16, + cast_model_to_bf16, + cast_parameters_to_bf16, +) from .amp_lists import AutoMixedPrecisionListsBF16 import types import warnings @@ -58,20 +66,24 @@ class OptimizerWithMixedPrecision(object): # Ensure the data type of learning rate vars is float32 (same as the # master parameter dtype) if isinstance(self._optimizer._learning_rate, float): - self._optimizer._learning_rate_map[default_main_program()] = \ - layers.create_global_var( - name=unique_name.generate("learning_rate"), - shape=[1], - value=float(self._optimizer._learning_rate), - dtype='float32', - persistable=True) - - def backward(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None, - callbacks=None): + self._optimizer._learning_rate_map[ + default_main_program() + ] = layers.create_global_var( + name=unique_name.generate("learning_rate"), + shape=[1], + value=float(self._optimizer._learning_rate), + dtype='float32', + persistable=True, + ) + + def backward( + self, + loss, + startup_program=None, + parameter_list=None, + no_grad_set=None, + callbacks=None, + ): """ Backward propagation or auto differentiation for gradients' computation. @@ -96,24 +108,25 @@ class OptimizerWithMixedPrecision(object): if self._use_pure_bf16: self._to_bf16_var_names = cast_model_to_bf16( - self._train_program, startup_program, self._amp_lists, - self._use_bf16_guard) + self._train_program, + startup_program, + self._amp_lists, + self._use_bf16_guard, + ) else: rewrite_program_bf16(self._train_program, self._amp_lists) if loss.dtype != core.VarDesc.VarType.FP32: loss = loss.astype('float32') - params_grads = self._optimizer.backward(loss, startup_program, - parameter_list, no_grad_set, - callbacks) + params_grads = self._optimizer.backward( + loss, startup_program, parameter_list, no_grad_set, callbacks + ) return params_grads - def amp_init(self, - place, - scope=None, - test_program=None, - use_bf16_test=False): + def amp_init( + self, place, scope=None, test_program=None, use_bf16_test=False + ): """ Init the amp training, such as cast fp32 parameters to bf16 type. @@ -165,16 +178,20 @@ class OptimizerWithMixedPrecision(object): optimizer.amp_init(place, scope=paddle.static.global_scope()) """ - assert self._train_program is not None, \ - "Please call the minimize method first." + assert ( + self._train_program is not None + ), "Please call the minimize method first." if self._use_pure_bf16: - cast_parameters_to_bf16(place, self._train_program, scope, - self._to_bf16_var_names) + cast_parameters_to_bf16( + place, self._train_program, scope, self._to_bf16_var_names + ) if test_program is not None: if self._use_pure_bf16: - cast_model_to_bf16(test_program, - amp_lists=self._amp_lists, - use_bf16_guard=self._use_bf16_guard) + cast_model_to_bf16( + test_program, + amp_lists=self._amp_lists, + use_bf16_guard=self._use_bf16_guard, + ) elif use_bf16_test: rewrite_program_bf16(test_program, amp_lists=self._amp_lists) @@ -197,11 +214,9 @@ class OptimizerWithMixedPrecision(object): optimize_ops = self.apply_gradients(params_grads) return optimize_ops - def minimize(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None): + def minimize( + self, loss, startup_program=None, parameter_list=None, no_grad_set=None + ): """ Perform optimization by minimizing the given loss. @@ -217,26 +232,28 @@ class OptimizerWithMixedPrecision(object): list of scaled parameters and gradients. """ opt_dict = self._optimizer.__class__.__dict__ - if 'minimize' in opt_dict and isinstance(opt_dict['minimize'], - types.FunctionType): + if 'minimize' in opt_dict and isinstance( + opt_dict['minimize'], types.FunctionType + ): warnings.warn( "The decorated optimizer has its own `minimize` method, but it will not be executed." ) - params_grads = self.backward(loss, - startup_program=startup_program, - parameter_list=parameter_list, - no_grad_set=no_grad_set) + params_grads = self.backward( + loss, + startup_program=startup_program, + parameter_list=parameter_list, + no_grad_set=no_grad_set, + ) optimize_ops = self.apply_optimize(loss, startup_program, params_grads) return optimize_ops, params_grads -def decorate_bf16(optimizer, - amp_lists=None, - use_pure_bf16=False, - use_bf16_guard=None): +def decorate_bf16( + optimizer, amp_lists=None, use_pure_bf16=False, use_bf16_guard=None +): """ Decorate the given optimizer to adapt to the mixed-precision training. @@ -252,7 +269,7 @@ def decorate_bf16(optimizer, enabled. Examples 1: - .. code-block:: python + .. code-block:: python # fp32&bf16 list based strategy example import paddle @@ -316,7 +333,8 @@ def decorate_bf16(optimizer, if use_bf16_guard is None: use_bf16_guard = use_pure_bf16 - mp_optimizer = OptimizerWithMixedPrecision(optimizer, amp_lists, - use_pure_bf16, use_bf16_guard) + mp_optimizer = OptimizerWithMixedPrecision( + optimizer, amp_lists, use_pure_bf16, use_bf16_guard + ) return mp_optimizer diff --git a/python/paddle/fluid/contrib/mixed_precision/decorator.py b/python/paddle/fluid/contrib/mixed_precision/decorator.py index 70aefac5a5996273486c720f30224bf4803b9ca8..75554ff9c812307364ae52c118005dd3724d356b 100644 --- a/python/paddle/fluid/contrib/mixed_precision/decorator.py +++ b/python/paddle/fluid/contrib/mixed_precision/decorator.py @@ -63,10 +63,19 @@ class OptimizerWithMixedPrecision(object): """ - def __init__(self, optimizer, amp_lists, init_loss_scaling, - use_dynamic_loss_scaling, incr_every_n_steps, - decr_every_n_nan_or_inf, incr_ratio, decr_ratio, use_pure_fp16, - use_fp16_guard): + def __init__( + self, + optimizer, + amp_lists, + init_loss_scaling, + use_dynamic_loss_scaling, + incr_every_n_steps, + decr_every_n_nan_or_inf, + incr_ratio, + decr_ratio, + use_pure_fp16, + use_fp16_guard, + ): self._optimizer = optimizer self._amp_lists = amp_lists self._param_grads = None @@ -97,9 +106,10 @@ class OptimizerWithMixedPrecision(object): self._is_distributed = flag def get_loss_scaling(self): - """Return the real-time loss scaling factor. - """ - assert self._loss_scaling is not None, 'Please call minimize() before calling get_loss_scaling().' + """Return the real-time loss scaling factor.""" + assert ( + self._loss_scaling is not None + ), 'Please call minimize() before calling get_loss_scaling().' return self._loss_scaling def get_scaled_loss(self): @@ -117,7 +127,8 @@ class OptimizerWithMixedPrecision(object): shape=[1], value=self._init_loss_scaling, dtype='float32', - persistable=True) + persistable=True, + ) if self._use_dynamic_loss_scaling: self._num_good_steps = layers.create_global_var( @@ -125,31 +136,37 @@ class OptimizerWithMixedPrecision(object): shape=[1], value=0, dtype='int32', - persistable=True) + persistable=True, + ) self._num_bad_steps = layers.create_global_var( name=unique_name.generate("num_bad_steps"), shape=[1], value=0, dtype='int32', - persistable=True) + persistable=True, + ) # Ensure the data type of learning rate vars is float32 (same as the # master parameter dtype) if isinstance(self._optimizer._learning_rate, float): - self._optimizer._learning_rate_map[default_main_program()] = \ - layers.create_global_var( - name=unique_name.generate("learning_rate"), - shape=[1], - value=float(self._optimizer._learning_rate), - dtype='float32', - persistable=True) - - def backward(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None, - callbacks=None): + self._optimizer._learning_rate_map[ + default_main_program() + ] = layers.create_global_var( + name=unique_name.generate("learning_rate"), + shape=[1], + value=float(self._optimizer._learning_rate), + dtype='float32', + persistable=True, + ) + + def backward( + self, + loss, + startup_program=None, + parameter_list=None, + no_grad_set=None, + callbacks=None, + ): """ Backward propagation or auto differentiation for gradients' computation. @@ -171,9 +188,9 @@ class OptimizerWithMixedPrecision(object): # NOTE(zhiqiu): _float_status is only used for NPU. if core.is_compiled_with_npu(): - float_status = paddle.static.data(name="float_status", - shape=[8], - dtype='float32') + float_status = paddle.static.data( + name="float_status", shape=[8], dtype='float32' + ) self._train_program.global_block().append_op( type="alloc_float_status", outputs={"FloatStatus": float_status}, @@ -192,7 +209,8 @@ class OptimizerWithMixedPrecision(object): if self._use_pure_fp16: self._to_fp16_var_names = cast_model_to_fp16( - self._train_program, self._amp_lists, self._use_fp16_guard) + self._train_program, self._amp_lists, self._use_fp16_guard + ) else: rewrite_program(self._train_program, self._amp_lists) @@ -205,10 +223,13 @@ class OptimizerWithMixedPrecision(object): else: self._scaled_loss = loss - params_grads = self._optimizer.backward(self._scaled_loss, - startup_program, - parameter_list, no_grad_set, - callbacks) + params_grads = self._optimizer.backward( + self._scaled_loss, + startup_program, + parameter_list, + no_grad_set, + callbacks, + ) if self._supports_check_nan_inf(): self._add_cast_ops_to_startup_program(startup_program) return params_grads @@ -216,8 +237,11 @@ class OptimizerWithMixedPrecision(object): def _add_cast_ops_to_startup_program(self, startup_program): names = list(self._to_fp16_var_names) if self._to_fp16_var_names else [] names.sort() - startup_program = default_startup_program( - ) if startup_program is None else startup_program + startup_program = ( + default_startup_program() + if startup_program is None + else startup_program + ) block = startup_program.global_block() param_names = [p.name for p in block.all_parameters()] for name in names: @@ -225,23 +249,23 @@ class OptimizerWithMixedPrecision(object): continue tmp = block.create_var(dtype=core.VarDesc.VarType.FP32) - block.append_op(type='assign', - inputs={'X': [name]}, - outputs={'Out': [tmp]}) - block.append_op(type='cast', - inputs={'X': [tmp]}, - outputs={'Out': [name]}, - attrs={ - 'in_dtype': core.VarDesc.VarType.FP32, - 'out_dtype': core.VarDesc.VarType.FP16, - }) + block.append_op( + type='assign', inputs={'X': [name]}, outputs={'Out': [tmp]} + ) + block.append_op( + type='cast', + inputs={'X': [tmp]}, + outputs={'Out': [name]}, + attrs={ + 'in_dtype': core.VarDesc.VarType.FP32, + 'out_dtype': core.VarDesc.VarType.FP16, + }, + ) self._to_fp16_var_names = None - def amp_init(self, - place, - scope=None, - test_program=None, - use_fp16_test=False): + def amp_init( + self, place, scope=None, test_program=None, use_fp16_test=False + ): """ Init the amp training, such as cast fp32 parameters to fp16 type. @@ -297,15 +321,18 @@ class OptimizerWithMixedPrecision(object): if paddle.is_compiled_with_cuda() and len(paddle.static.cuda_places()) > 0: run_example_code() """ - assert self._train_program is not None, \ - "Please call the minimize method first." + assert ( + self._train_program is not None + ), "Please call the minimize method first." if self._use_pure_fp16: - cast_parameters_to_fp16(place, self._train_program, scope, - self._to_fp16_var_names) + cast_parameters_to_fp16( + place, self._train_program, scope, self._to_fp16_var_names + ) if test_program is not None: if self._use_pure_fp16: - cast_model_to_fp16(test_program, self._amp_lists, - self._use_fp16_guard) + cast_model_to_fp16( + test_program, self._amp_lists, self._use_fp16_guard + ) elif use_fp16_test: rewrite_program(test_program, self._amp_lists) @@ -327,7 +354,10 @@ class OptimizerWithMixedPrecision(object): # When not using dynamic loss scaling and the init loss scaling value is equal to 1.0, # the model can be optimized. - if not self._use_dynamic_loss_scaling and self._init_loss_scaling == 1.0: + if ( + not self._use_dynamic_loss_scaling + and self._init_loss_scaling == 1.0 + ): return self._optimizer.apply_gradients(params_grads) if self._supports_check_nan_inf(): @@ -346,13 +376,16 @@ class OptimizerWithMixedPrecision(object): real_optimizer = self._optimizer while hasattr(real_optimizer, "inner_opt"): real_optimizer = real_optimizer.inner_opt - if isinstance(real_optimizer, - (paddle.fluid.optimizer.Adam, paddle.optimizer.AdamW)): + if isinstance( + real_optimizer, + (paddle.fluid.optimizer.Adam, paddle.optimizer.AdamW), + ): # NOTE(zhiqiu): Since found_inf needs to be on cpu in adam op, we # copy it in advance to avoid multiple time copies. with self._train_program._optimized_guard([]): found_inf = paddle.tensor.creation._memcpy( - found_inf, paddle.CPUPlace()) + found_inf, paddle.CPUPlace() + ) real_optimizer._set_auxiliary_var('found_inf', found_inf) elif hasattr(real_optimizer, "_set_auxiliary_var"): real_optimizer._set_auxiliary_var('found_inf', found_inf) @@ -363,8 +396,9 @@ class OptimizerWithMixedPrecision(object): grads = [g for _, g in params_grads] fp32_grads = [g for g in grads if g.dtype == core.VarDesc.VarType.FP32] fp16_grads = [g for g in grads if g.dtype == core.VarDesc.VarType.FP16] - assert len(fp32_grads) + len(fp16_grads) == len(grads), \ - "Data types of all grads must be either fp16 or fp32." + assert len(fp32_grads) + len(fp16_grads) == len( + grads + ), "Data types of all grads must be either fp16 or fp32." return grads, fp32_grads, fp16_grads def _check_finite_and_unscale(self, params_grads): @@ -380,7 +414,8 @@ class OptimizerWithMixedPrecision(object): grads, self._loss_scaling, name="find_infinite_scale", - float_status=self._float_status) + float_status=self._float_status, + ) found_infs.append(found_inf) else: for p, g in params_grads: @@ -391,7 +426,8 @@ class OptimizerWithMixedPrecision(object): ], self._loss_scaling, name="find_infinite_scale", - float_status=self._float_status) + float_status=self._float_status, + ) found_infs.append(found_inf) elif self._use_pure_fp16: if fp32_grads: @@ -400,7 +436,8 @@ class OptimizerWithMixedPrecision(object): fp32_grads, self._loss_scaling, name="find_infinite_scale_fp32", - float_status=self._float_status) + float_status=self._float_status, + ) found_infs.append(fp32_found_inf) if fp16_grads: with self._train_program._optimized_guard(fp16_grads): @@ -408,7 +445,8 @@ class OptimizerWithMixedPrecision(object): fp16_grads, self._loss_scaling, name="find_infinite_scale_fp16", - float_status=self._float_status) + float_status=self._float_status, + ) found_infs.append(fp16_found_inf) else: with self._train_program._optimized_guard(grads): @@ -416,7 +454,8 @@ class OptimizerWithMixedPrecision(object): grads, self._loss_scaling, name="find_infinite_scale", - float_status=self._float_status) + float_status=self._float_status, + ) if self._is_distributed or self._use_pure_fp16: with self._train_program._optimized_guard([]): @@ -439,7 +478,8 @@ class OptimizerWithMixedPrecision(object): self._incr_ratio, self._decr_ratio, stop_update=self._optimizer._get_stop_update_var(), - name="update_loss_scaling") + name="update_loss_scaling", + ) return grads, fp32_grads, fp16_grads = self._split_grads(params_grads) @@ -447,42 +487,48 @@ class OptimizerWithMixedPrecision(object): stop_update = False with self._train_program._optimized_guard([]): if fp32_grads: - update_loss_scaling(fp32_grads, - found_inf, - self._loss_scaling, - self._num_good_steps, - self._num_bad_steps, - self._incr_every_n_steps, - self._decr_every_n_nan_or_inf, - self._incr_ratio, - self._decr_ratio, - stop_update=stop_update, - name="update_loss_scaling_fp32") + update_loss_scaling( + fp32_grads, + found_inf, + self._loss_scaling, + self._num_good_steps, + self._num_bad_steps, + self._incr_every_n_steps, + self._decr_every_n_nan_or_inf, + self._incr_ratio, + self._decr_ratio, + stop_update=stop_update, + name="update_loss_scaling_fp32", + ) stop_update = True if fp16_grads: - update_loss_scaling(fp16_grads, - found_inf, - self._loss_scaling, - self._num_good_steps, - self._num_bad_steps, - self._incr_every_n_steps, - self._decr_every_n_nan_or_inf, - self._incr_ratio, - self._decr_ratio, - stop_update=stop_update, - name="update_loss_scaling_fp16") + update_loss_scaling( + fp16_grads, + found_inf, + self._loss_scaling, + self._num_good_steps, + self._num_bad_steps, + self._incr_every_n_steps, + self._decr_every_n_nan_or_inf, + self._incr_ratio, + self._decr_ratio, + stop_update=stop_update, + name="update_loss_scaling_fp16", + ) else: with self._train_program._optimized_guard([]): - update_loss_scaling(grads, - found_inf, - self._loss_scaling, - self._num_good_steps, - self._num_bad_steps, - self._incr_every_n_steps, - self._decr_every_n_nan_or_inf, - self._incr_ratio, - self._decr_ratio, - name="update_loss_scaling") + update_loss_scaling( + grads, + found_inf, + self._loss_scaling, + self._num_good_steps, + self._num_bad_steps, + self._incr_every_n_steps, + self._decr_every_n_nan_or_inf, + self._incr_ratio, + self._decr_ratio, + name="update_loss_scaling", + ) def apply_optimize(self, loss, startup_program, params_grads): program = loss.block.program @@ -490,11 +536,9 @@ class OptimizerWithMixedPrecision(object): optimize_ops = self.apply_gradients(params_grads) return optimize_ops - def minimize(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None): + def minimize( + self, loss, startup_program=None, parameter_list=None, no_grad_set=None + ): """ Perform optimization by minimizing the given loss. @@ -511,33 +555,39 @@ class OptimizerWithMixedPrecision(object): """ opt_dict = self._optimizer.__class__.__dict__ - if 'minimize' in opt_dict and isinstance(opt_dict['minimize'], - types.FunctionType): + if 'minimize' in opt_dict and isinstance( + opt_dict['minimize'], types.FunctionType + ): warnings.warn( "The decorated optimizer has its own `minimize` method, but it will not be executed." ) - scaled_params_grads = self.backward(loss, - startup_program=startup_program, - parameter_list=parameter_list, - no_grad_set=no_grad_set) + scaled_params_grads = self.backward( + loss, + startup_program=startup_program, + parameter_list=parameter_list, + no_grad_set=no_grad_set, + ) - optimize_ops = self.apply_optimize(loss, startup_program, - scaled_params_grads) + optimize_ops = self.apply_optimize( + loss, startup_program, scaled_params_grads + ) return optimize_ops, scaled_params_grads -def decorate(optimizer, - amp_lists=None, - init_loss_scaling=2**15, - incr_every_n_steps=1000, - decr_every_n_nan_or_inf=2, - incr_ratio=2.0, - decr_ratio=0.8, - use_dynamic_loss_scaling=True, - use_pure_fp16=False, - use_fp16_guard=None): +def decorate( + optimizer, + amp_lists=None, + init_loss_scaling=2**15, + incr_every_n_steps=1000, + decr_every_n_nan_or_inf=2, + incr_ratio=2.0, + decr_ratio=0.8, + use_dynamic_loss_scaling=True, + use_pure_fp16=False, + use_fp16_guard=None, +): """ Decorate the given optimizer to adapt to the mixed-precision training. @@ -564,7 +614,7 @@ def decorate(optimizer, enabled. Examples 1: - .. code-block:: python + .. code-block:: python # black&white list based strategy example import paddle @@ -635,8 +685,16 @@ def decorate(optimizer, use_fp16_guard = use_pure_fp16 mp_optimizer = OptimizerWithMixedPrecision( - optimizer, amp_lists, init_loss_scaling, use_dynamic_loss_scaling, - incr_every_n_steps, decr_every_n_nan_or_inf, incr_ratio, decr_ratio, - use_pure_fp16, use_fp16_guard) + optimizer, + amp_lists, + init_loss_scaling, + use_dynamic_loss_scaling, + incr_every_n_steps, + decr_every_n_nan_or_inf, + incr_ratio, + decr_ratio, + use_pure_fp16, + use_fp16_guard, + ) return mp_optimizer diff --git a/python/paddle/fluid/contrib/mixed_precision/fp16_lists.py b/python/paddle/fluid/contrib/mixed_precision/fp16_lists.py index b2767b1dd1cbfa7ab4ea209bb8cee3b648e5cb7c..ef8f222bac23b22244748968385bbfb67898d01c 100644 --- a/python/paddle/fluid/contrib/mixed_precision/fp16_lists.py +++ b/python/paddle/fluid/contrib/mixed_precision/fp16_lists.py @@ -19,7 +19,10 @@ __all__ = ["CustomOpLists", "AutoMixedPrecisionLists"] # lookup_table fp16 is slower than fp32, though fp16 is supported. _extra_unsupported_fp16_list = { - 'lookup_table', 'lookup_table_v2', 'scatter', 'scatter_grad' + 'lookup_table', + 'lookup_table_v2', + 'scatter', + 'scatter_grad', } @@ -36,10 +39,12 @@ class AutoMixedPrecisionLists(object): custom_black_varnames (set): Users' custom black varibles' names. """ - def __init__(self, - custom_white_list=None, - custom_black_list=None, - custom_black_varnames=None): + def __init__( + self, + custom_white_list=None, + custom_black_list=None, + custom_black_varnames=None, + ): self._custom_white_list = custom_white_list self._custom_black_list = custom_black_list self.white_list = copy.copy(white_list) @@ -56,8 +61,9 @@ class AutoMixedPrecisionLists(object): if self._custom_white_list and self._custom_black_list: for op_name in self._custom_white_list: if op_name in self._custom_black_list: - raise ValueError("Custom white list overlap " - "custom black list") + raise ValueError( + "Custom white list overlap " "custom black list" + ) if self._custom_white_list: for op_name in self._custom_white_list: if op_name in self.black_list: @@ -175,17 +181,23 @@ gray_list = { _sys_unsupported_fp16_list = [] if core.is_compiled_with_xpu(): _, _, _sys_unsupported_fp16_list = core.op_supported_infos( - 'XPU', core.VarDesc.VarType.FP16) + 'XPU', core.VarDesc.VarType.FP16 + ) elif core.is_compiled_with_npu(): _, _, _sys_unsupported_fp16_list = core.op_supported_infos( - 'NPU', core.VarDesc.VarType.FP16) + 'NPU', core.VarDesc.VarType.FP16 + ) elif core.is_compiled_with_mlu(): _, _, _sys_unsupported_fp16_list = core.op_supported_infos( - 'MLU', core.VarDesc.VarType.FP16) + 'MLU', core.VarDesc.VarType.FP16 + ) else: _, _, _sys_unsupported_fp16_list = core.op_supported_infos( - 'GPU', core.VarDesc.VarType.FP16) + 'GPU', core.VarDesc.VarType.FP16 + ) -unsupported_fp16_list = _extra_unsupported_fp16_list | _sys_unsupported_fp16_list +unsupported_fp16_list = ( + _extra_unsupported_fp16_list | _sys_unsupported_fp16_list +) CustomOpLists = AutoMixedPrecisionLists diff --git a/python/paddle/fluid/contrib/mixed_precision/fp16_utils.py b/python/paddle/fluid/contrib/mixed_precision/fp16_utils.py index ad91e8ba799b650564ae20a1cff93ae793a20845..e50a2457147e12d4497c90ad297d08bcade52736 100644 --- a/python/paddle/fluid/contrib/mixed_precision/fp16_utils.py +++ b/python/paddle/fluid/contrib/mixed_precision/fp16_utils.py @@ -25,13 +25,14 @@ import numpy as np __all__ = ["fp16_guard", "cast_model_to_fp16", "cast_parameters_to_fp16"] -_logger = get_logger(__name__, - logging.INFO, - fmt='%(asctime)s-%(levelname)s: %(message)s') +_logger = get_logger( + __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s' +) _valid_types = [ - core.VarDesc.VarType.LOD_TENSOR, core.VarDesc.VarType.SELECTED_ROWS, - core.VarDesc.VarType.LOD_TENSOR_ARRAY + core.VarDesc.VarType.LOD_TENSOR, + core.VarDesc.VarType.SELECTED_ROWS, + core.VarDesc.VarType.LOD_TENSOR_ARRAY, ] _fp16_guard_pattern = "__use_fp16__" @@ -106,7 +107,12 @@ def _keep_fp32_input(op, in_name): return in_name not in {'X', 'FilterX', 'Z', 'FilterZ'} if op_type in ['fused_attention', 'fused_feedforward']: return in_name in { - 'LnScale', 'LnBias', 'Ln2Scale', 'Ln2Bias', "Ln1Scale", "Ln1Bias" + 'LnScale', + 'LnBias', + 'Ln2Scale', + 'Ln2Bias', + "Ln1Scale", + "Ln1Bias", } if op_type == 'fused_multi_transformer': return in_name in {'LnScale', 'LnBias', 'FFNLnScale', 'FFNLnBias'} @@ -123,8 +129,12 @@ def _keep_fp32_output(op, out_name): return out_name not in {'Y', 'ConvX', 'ConvZ'} if op_type in ['fused_attention', 'fused_feedforward']: return out_name in { - 'LnMean', 'LnVariance', 'Ln2Mean', 'Ln2Variance', 'Ln1Mean', - 'Ln1Variance' + 'LnMean', + 'LnVariance', + 'Ln2Mean', + 'Ln2Variance', + 'Ln1Mean', + 'Ln1Variance', } return False @@ -147,7 +157,8 @@ def _insert_cast_op(block, op, idx, src_dtype, dest_dtype): for in_name in op.input_names: if src_dtype == core.VarDesc.VarType.FP32 and _keep_fp32_input( - op, in_name): + op, in_name + ): continue for in_var_name in op.input(in_name): in_var = block._find_var_recursive(in_var_name) @@ -163,11 +174,15 @@ def _insert_cast_op(block, op, idx, src_dtype, dest_dtype): # set cast_op device to `all`, can reduce send cast_var. # TODO: need remove this after we unified the dynamic # and static pipeline interface. - if src_dtype == core.VarDesc.VarType.FP32 and in_var.stop_gradient: + if ( + src_dtype == core.VarDesc.VarType.FP32 + and in_var.stop_gradient + ): prev_op = None if in_var.op is op: - prev_op = find_true_prev_op(block.ops, op, - in_var_name) + prev_op = find_true_prev_op( + block.ops, op, in_var_name + ) elif in_var.op is not None: prev_op = in_var.op @@ -175,33 +190,40 @@ def _insert_cast_op(block, op, idx, src_dtype, dest_dtype): if prev_op is not None: prev_op_device = prev_op.attr('op_device') - if prev_op_device is not None and 'all' in prev_op_device: + if ( + prev_op_device is not None + and 'all' in prev_op_device + ): op_device = prev_op_device out_var = block.create_var( name=cast_name, dtype=dest_dtype, persistable=False, - stop_gradient=in_var.stop_gradient) - - block._insert_op_without_sync(idx, - type="cast", - inputs={"X": in_var}, - outputs={"Out": out_var}, - attrs={ - "in_dtype": in_var.dtype, - "out_dtype": - out_var.dtype, - "op_device": op_device, - "op_role": - op.attr("op_role"), - }) + stop_gradient=in_var.stop_gradient, + ) + + block._insert_op_without_sync( + idx, + type="cast", + inputs={"X": in_var}, + outputs={"Out": out_var}, + attrs={ + "in_dtype": in_var.dtype, + "out_dtype": out_var.dtype, + "op_device": op_device, + "op_role": op.attr("op_role"), + }, + ) num_cast_ops += 1 _rename_arg(op, in_var.name, out_var.name) else: if op.has_attr('in_dtype'): op._set_attr('in_dtype', dest_dtype) - if src_dtype == core.VarDesc.VarType.FP32 and dest_dtype == core.VarDesc.VarType.FP16: + if ( + src_dtype == core.VarDesc.VarType.FP32 + and dest_dtype == core.VarDesc.VarType.FP16 + ): for out_name in op.output_names: if _keep_fp32_output(op, out_name): continue @@ -216,35 +238,42 @@ def _insert_cast_op(block, op, idx, src_dtype, dest_dtype): return num_cast_ops -def _insert_cast_post_op(block, op, idx, src_dtype, dest_dtype, target_name, - op_var_rename_map): +def _insert_cast_post_op( + block, op, idx, src_dtype, dest_dtype, target_name, op_var_rename_map +): num_cast_ops = 0 target_var = block.var(target_name) if target_var.type not in _valid_types or target_var.dtype == dest_dtype: return num_cast_ops - assert target_var.dtype == src_dtype, \ - "The real dtype({}) is not equal to the src dtype({})".format( - _dtype_to_str(target_var.dtype), _dtype_to_str(src_dtype)) + assert ( + target_var.dtype == src_dtype + ), "The real dtype({}) is not equal to the src dtype({})".format( + _dtype_to_str(target_var.dtype), _dtype_to_str(src_dtype) + ) cast_name = target_var.name + '.cast_' + _dtype_to_str(dest_dtype) cast_var = block.vars.get(cast_name) if cast_var is None or cast_var.dtype != dest_dtype: - cast_var = block.create_var(name=cast_name, - dtype=dest_dtype, - persistable=False, - stop_gradient=target_var.stop_gradient) - block._insert_op(idx, - type="cast", - inputs={"X": target_var}, - outputs={"Out": cast_var}, - attrs={ - "in_dtype": target_var.dtype, - "out_dtype": cast_var.dtype, - "op_device": op.attr("op_device"), - "op_role": op.attr("op_role"), - }) + cast_var = block.create_var( + name=cast_name, + dtype=dest_dtype, + persistable=False, + stop_gradient=target_var.stop_gradient, + ) + block._insert_op( + idx, + type="cast", + inputs={"X": target_var}, + outputs={"Out": cast_var}, + attrs={ + "in_dtype": target_var.dtype, + "out_dtype": cast_var.dtype, + "op_device": op.attr("op_device"), + "op_role": op.attr("op_role"), + }, + ) num_cast_ops += 1 op_var_rename_map[block.idx][target_var.name] = cast_var.name @@ -270,8 +299,10 @@ def find_true_prev_op(ops, cur_op, var_name): prev_op.append(op) if prev_op: if not len(prev_op) == 1: - raise ValueError("There must be only one previous op " - "that outputs {0} variable".format(var_name)) + raise ValueError( + "There must be only one previous op " + "that outputs {0} variable".format(var_name) + ) else: return prev_op[0] return None @@ -313,8 +344,7 @@ def find_true_post_op(ops, cur_op, var_name, search_all=False): def find_op_index(block_desc, cur_op_desc): - """ - """ + """ """ for idx in range(block_desc.op_size()): if cur_op_desc == block_desc.op(idx): return idx @@ -348,8 +378,9 @@ def _need_keep_fp32(op, unsupported_op_list, use_fp16_guard): return True if use_fp16_guard: - if op.has_attr("op_namescope") and \ - (_fp16_guard_pattern in op.attr("op_namescope")): + if op.has_attr("op_namescope") and ( + _fp16_guard_pattern in op.attr("op_namescope") + ): # op in fp16 guard return False else: @@ -419,7 +450,8 @@ def cast_model_to_fp16(program, amp_lists=None, use_fp16_guard=True): for in_name in op.input_names: # for ipu, all inputs must be converted to fp16 if not core.is_compiled_with_ipu() and _keep_fp32_input( - op, in_name): + op, in_name + ): continue for in_var_name in op.input(in_name): in_var = None @@ -427,13 +459,17 @@ def cast_model_to_fp16(program, amp_lists=None, use_fp16_guard=True): in_var = block.var(in_var_name) except ValueError as e: _logger.debug( - "-- {}, try to get it in the global block --". - format(e)) + "-- {}, try to get it in the global block --".format( + e + ) + ) in_var = global_block.var(in_var_name) if in_var is not None: _logger.debug( - "-- var {} is got in the global block --". - format(in_var_name)) + "-- var {} is got in the global block --".format( + in_var_name + ) + ) if in_var is None or in_var.type not in _valid_types: continue @@ -443,13 +479,16 @@ def cast_model_to_fp16(program, amp_lists=None, use_fp16_guard=True): to_fp16_var_names.add(in_var_name) _logger.debug( - "-- op type: {}, in var name: {}, in var dtype: {} --". - format(op.type, in_var_name, in_var.dtype)) + "-- op type: {}, in var name: {}, in var dtype: {} --".format( + op.type, in_var_name, in_var.dtype + ) + ) for out_name in op.output_names: # for ipu, all outputs must be converted to fp16 if not core.is_compiled_with_ipu() and _keep_fp32_output( - op, out_name): + op, out_name + ): continue for out_var_name in op.output(out_name): out_var = None @@ -457,13 +496,17 @@ def cast_model_to_fp16(program, amp_lists=None, use_fp16_guard=True): out_var = block.var(out_var_name) except ValueError as e: _logger.debug( - "-- {}, try to get it in the global block --". - format(e)) + "-- {}, try to get it in the global block --".format( + e + ) + ) out_var = global_block.var(out_var_name) if out_var is not None: _logger.debug( - "-- var {} is got in the global block --". - format(out_var_name)) + "-- var {} is got in the global block --".format( + out_var_name + ) + ) if out_var is None or out_var.type not in _valid_types: continue @@ -472,16 +515,24 @@ def cast_model_to_fp16(program, amp_lists=None, use_fp16_guard=True): out_var.desc.set_dtype(core.VarDesc.VarType.FP16) _logger.debug( - "-- op type: {}, out var name: {}, out var dtype: {} --" - .format(op.type, out_var_name, out_var.dtype)) - if op.has_attr('in_dtype') and op.attr( - 'in_dtype') == core.VarDesc.VarType.FP32: + "-- op type: {}, out var name: {}, out var dtype: {} --".format( + op.type, out_var_name, out_var.dtype + ) + ) + if ( + op.has_attr('in_dtype') + and op.attr('in_dtype') == core.VarDesc.VarType.FP32 + ): op._set_attr('in_dtype', core.VarDesc.VarType.FP16) - if op.has_attr('out_dtype') and op.attr( - 'out_dtype') == core.VarDesc.VarType.FP32: + if ( + op.has_attr('out_dtype') + and op.attr('out_dtype') == core.VarDesc.VarType.FP32 + ): op._set_attr('out_dtype', core.VarDesc.VarType.FP16) - if op.has_attr('dtype') and op.attr( - 'dtype') == core.VarDesc.VarType.FP32: + if ( + op.has_attr('dtype') + and op.attr('dtype') == core.VarDesc.VarType.FP32 + ): op._set_attr('dtype', core.VarDesc.VarType.FP16) # process ops in keep_fp32_ops @@ -495,9 +546,13 @@ def cast_model_to_fp16(program, amp_lists=None, use_fp16_guard=True): op = ops[idx] num_cast_ops = 0 if op in keep_fp32_ops: - pre_cast_num = _insert_cast_op(block, op, idx, - core.VarDesc.VarType.FP16, - core.VarDesc.VarType.FP32) + pre_cast_num = _insert_cast_op( + block, + op, + idx, + core.VarDesc.VarType.FP16, + core.VarDesc.VarType.FP32, + ) num_cast_ops += pre_cast_num for out_var_name in op.output_arg_names: out_var = block.vars.get(out_var_name) @@ -510,10 +565,14 @@ def cast_model_to_fp16(program, amp_lists=None, use_fp16_guard=True): if post_op in keep_fp32_ops: continue post_cast_num = _insert_cast_post_op( - block, op, idx + pre_cast_num + 1, + block, + op, + idx + pre_cast_num + 1, core.VarDesc.VarType.FP32, - core.VarDesc.VarType.FP16, out_var_name, - op_var_rename_map) + core.VarDesc.VarType.FP16, + out_var_name, + op_var_rename_map, + ) num_cast_ops += post_cast_num idx += num_cast_ops + 1 @@ -583,7 +642,8 @@ def rewrite_program(main_prog, amp_lists): continue if amp_lists.black_varnames is not None and _is_in_black_varnames( - op, amp_lists): + op, amp_lists + ): black_op_set.add(op) continue @@ -609,11 +669,15 @@ def rewrite_program(main_prog, amp_lists): else: prev_op = in_var.op # if it's one of inputs - if prev_op in black_op_set or \ - prev_op.type in amp_lists.black_list: + if ( + prev_op in black_op_set + or prev_op.type in amp_lists.black_list + ): is_black_op = True - elif prev_op in white_op_set or \ - prev_op.type in amp_lists.white_list: + elif ( + prev_op in white_op_set + or prev_op.type in amp_lists.white_list + ): is_white_op = True if is_black_op: black_op_set.add(op) @@ -631,13 +695,21 @@ def rewrite_program(main_prog, amp_lists): op = ops[idx] num_cast_ops = 0 if op in black_op_set: - num_cast_ops = _insert_cast_op(block, op, idx, - core.VarDesc.VarType.FP16, - core.VarDesc.VarType.FP32) + num_cast_ops = _insert_cast_op( + block, + op, + idx, + core.VarDesc.VarType.FP16, + core.VarDesc.VarType.FP32, + ) elif op in white_op_set: - num_cast_ops = _insert_cast_op(block, op, idx, - core.VarDesc.VarType.FP32, - core.VarDesc.VarType.FP16) + num_cast_ops = _insert_cast_op( + block, + op, + idx, + core.VarDesc.VarType.FP32, + core.VarDesc.VarType.FP16, + ) else: pass @@ -668,13 +740,16 @@ def update_role_var_grad(main_prog, params_grads): if role & int(BACKWARD) and op.has_attr('op_role_var'): op._remove_attr("op_role_var") else: - raise ValueError("The cast op {0} must be in BACKWARD role " - "and have op_role_var attr.".format(op)) + raise ValueError( + "The cast op {0} must be in BACKWARD role " + "and have op_role_var attr.".format(op) + ) fp16_grad_name = op.input(op.input_names[0])[0] op_for_fp16_grad = find_true_prev_op(block.ops, op, fp16_grad_name) - op_role_var_attr_name = \ + op_role_var_attr_name = ( core.op_proto_and_checker_maker.kOpRoleVarAttrName() + ) attr_val = [p.name, fp16_grad_name] if op_for_fp16_grad.has_attr(op_role_var_attr_name): attr_val.extend(op_for_fp16_grad.attr(op_role_var_attr_name)) @@ -688,18 +763,22 @@ def update_role_var_grad(main_prog, params_grads): continue post_ops = find_true_post_op(block.ops, op, g.name) if post_ops: - raise ValueError("The cast op {0}'s output should not be" - "used by a non-optimize op, however, it" - "is used by {1}".format(op, post_ops[0])) + raise ValueError( + "The cast op {0}'s output should not be" + "used by a non-optimize op, however, it" + "is used by {1}".format(op, post_ops[0]) + ) # add new op in the python and cpp at the same time new_op_desc = block.desc.append_op() new_op_desc.copy_from(op.desc) - new_op = framework.Operator(block=block, - desc=new_op_desc, - type=None, - inputs=None, - outputs=None, - attrs=None) + new_op = framework.Operator( + block=block, + desc=new_op_desc, + type=None, + inputs=None, + outputs=None, + attrs=None, + ) block.ops.append(new_op) op_idx = find_op_index(block.desc, op.desc) if op_idx == -1: diff --git a/python/paddle/fluid/contrib/model_stat.py b/python/paddle/fluid/contrib/model_stat.py index 3bcc33068954566bb874ee9f86ac3c675d28fa61..0c18deec5da2273771e0e3caad30af39481ba54c 100644 --- a/python/paddle/fluid/contrib/model_stat.py +++ b/python/paddle/fluid/contrib/model_stat.py @@ -151,8 +151,10 @@ def _format_summary(collected_ops_list): _verify_dependent_package() from prettytable import PrettyTable + summary_table = PrettyTable( - ["No.", "TYPE", "INPUT", "OUTPUT", "PARAMs", "FLOPs"]) + ["No.", "TYPE", "INPUT", "OUTPUT", "PARAMs", "FLOPs"] + ) summary_table.align = 'r' total = {} @@ -200,8 +202,9 @@ def _print_summary(summary_table, total): parmas = total['params'] flops = total['flops'] print(summary_table) - print('Total PARAMs: {}({:.4f}M)'.format(sum(parmas), - sum(parmas) / (10**6))) + print( + 'Total PARAMs: {}({:.4f}M)'.format(sum(parmas), sum(parmas) / (10**6)) + ) print('Total FLOPs: {}({:.2f}G)'.format(sum(flops), sum(flops) / 10**9)) print( "Notice: \n now supported ops include [Conv, DepthwiseConv, FC(mul), BatchNorm, Pool, Activation(sigmoid, tanh, relu, leaky_relu, prelu)]" diff --git a/python/paddle/fluid/contrib/op_frequence.py b/python/paddle/fluid/contrib/op_frequence.py index 5203aa308e312ac6133888f13d8e3b64c155d981..ee435ac657af3ed7421312073cef537cde6fd9f3 100644 --- a/python/paddle/fluid/contrib/op_frequence.py +++ b/python/paddle/fluid/contrib/op_frequence.py @@ -43,8 +43,10 @@ def op_freq_statistic(program): """ if not isinstance(program, Program): - raise TypeError("The input type should be Porgram." - "But you passed in %s" % (type(program))) + raise TypeError( + "The input type should be Porgram." + "But you passed in %s" % (type(program)) + ) uni_op_freq = OrderedDict() adj_2_op_freq = OrderedDict() @@ -78,8 +80,10 @@ def op_freq_statistic(program): else: op_in_ops[op.type] = [var_gen_op[var_name][-1]] else: - print("Var's generate op is not found,%s, %s" % - (var_name, op.type)) + print( + "Var's generate op is not found,%s, %s" + % (var_name, op.type) + ) for var_name in op.output_arg_names: if var_gen_op.has_key(var_name): @@ -95,11 +99,11 @@ def op_freq_statistic(program): else: adj_2_op_freq[op_op] = 1 - uni_op_freq = sorted(uni_op_freq.items(), - key=lambda item: item[1], - reverse=True) - adj_2_op_freq = sorted(adj_2_op_freq.items(), - key=lambda item: item[1], - reverse=True) + uni_op_freq = sorted( + uni_op_freq.items(), key=lambda item: item[1], reverse=True + ) + adj_2_op_freq = sorted( + adj_2_op_freq.items(), key=lambda item: item[1], reverse=True + ) return uni_op_freq, adj_2_op_freq diff --git a/python/paddle/fluid/contrib/optimizer.py b/python/paddle/fluid/contrib/optimizer.py index dc6dc213a8f88dcf103c67f1bc437aae9db5081a..53386bc666640188c6ed0a11bb6f3c582e5ffa33 100644 --- a/python/paddle/fluid/contrib/optimizer.py +++ b/python/paddle/fluid/contrib/optimizer.py @@ -104,31 +104,35 @@ class Momentum(Optimizer): """ _velocity_acc_str = "velocity" - def __init__(self, - learning_rate, - momentum, - parameter_list=None, - use_nesterov=False, - regularization=None, - grad_clip=None, - multi_precision=False, - rescale_grad=1.0, - name=None): + def __init__( + self, + learning_rate, + momentum, + parameter_list=None, + use_nesterov=False, + regularization=None, + grad_clip=None, + multi_precision=False, + rescale_grad=1.0, + name=None, + ): assert learning_rate is not None assert momentum is not None predicate = lambda regular: isinstance(regular, L2DecayRegularizer) py_regular = None if predicate(regularization) else regularization - super(Momentum, self).__init__(learning_rate=learning_rate, - parameter_list=parameter_list, - regularization=py_regular, - grad_clip=grad_clip, - name=name) + super(Momentum, self).__init__( + learning_rate=learning_rate, + parameter_list=parameter_list, + regularization=py_regular, + grad_clip=grad_clip, + name=name, + ) self.type = "momentum" self._momentum = momentum self._use_nesterov = bool(use_nesterov) self._regularization_method = "" self._regularization_coeff = 0 - if (isinstance(regularization, L2DecayRegularizer)): + if isinstance(regularization, L2DecayRegularizer): self._regularization_method = "l2_decay" self._regularization_coeff = regularization._regularization_coeff self._multi_precision = multi_precision @@ -140,19 +144,23 @@ class Momentum(Optimizer): var_name = param.name + "_fp32_master" var_name = unique_name.generate(var_name) - var = layers.create_global_var(name=var_name, - shape=param.shape, - value=0, - dtype='float32', - persistable=True) + var = layers.create_global_var( + name=var_name, + shape=param.shape, + value=0, + dtype='float32', + persistable=True, + ) block = self.helper.startup_program.global_block() - block.append_op(type="cast", - inputs={"X": [param]}, - outputs={"Out": [var]}, - attrs={ - "in_dtype": param.dtype, - "out_dtype": core.VarDesc.VarType.FP32 - }) + block.append_op( + type="cast", + inputs={"X": [param]}, + outputs={"Out": [var]}, + attrs={ + "in_dtype": param.dtype, + "out_dtype": core.VarDesc.VarType.FP32, + }, + ) self._master_weights[param.name] = var return var @@ -168,15 +176,22 @@ class Momentum(Optimizer): """ if self._name is not None: name = self._name + "_" + name - find_master = self._multi_precision and param.dtype == core.VarDesc.VarType.FP16 - target_param = self._master_weights[ - param.name] if find_master else param + find_master = ( + self._multi_precision and param.dtype == core.VarDesc.VarType.FP16 + ) + target_param = ( + self._master_weights[param.name] if find_master else param + ) target_name = target_param.name - if (name not in self._accumulators - or target_name not in self._accumulators[name]): + if ( + name not in self._accumulators + or target_name not in self._accumulators[name] + ): raise Exception( "Accumulator {} does not exist for parameter {}".format( - name, target_name)) + name, target_name + ) + ) return self._accumulators[name][target_name] def _create_accumulators(self, block, parameters): @@ -187,7 +202,10 @@ class Momentum(Optimizer): master_p = self._create_master_weight(p) self._add_accumulator(self._velocity_acc_str, master_p) continue - if p.dtype == core.VarDesc.VarType.FP16 and not self._multi_precision: + if ( + p.dtype == core.VarDesc.VarType.FP16 + and not self._multi_precision + ): warnings.warn( "Accumulating with FP16 in optimizer can lead to poor accuracy or slow convergence." "Consider using multi_precision=True option of the Momentum optimizer." @@ -197,23 +215,42 @@ class Momentum(Optimizer): def _append_optimize_op(self, block, param_and_grad): assert isinstance(block, framework.Block) - velocity_acc = self._get_accumulator(self._velocity_acc_str, - param_and_grad[0]) + velocity_acc = self._get_accumulator( + self._velocity_acc_str, param_and_grad[0] + ) lr = self._create_param_lr(param_and_grad) - find_master = self._multi_precision and param_and_grad[ - 0].dtype == core.VarDesc.VarType.FP16 - master_weight = (self._master_weights[param_and_grad[0].name] - if find_master else None) + find_master = ( + self._multi_precision + and param_and_grad[0].dtype == core.VarDesc.VarType.FP16 + ) + master_weight = ( + self._master_weights[param_and_grad[0].name] + if find_master + else None + ) if framework._non_static_mode(): _, _, _ = _legacy_C_ops.momentum( - param_and_grad[0], param_and_grad[1], velocity_acc, lr, - master_weight, param_and_grad[0], velocity_acc, master_weight, - 'mu', self._momentum, 'use_nesterov', self._use_nesterov, - 'regularization_method', self._regularization_method, - 'regularization_coeff', self._regularization_coeff, - 'multi_precision', find_master) + param_and_grad[0], + param_and_grad[1], + velocity_acc, + lr, + master_weight, + param_and_grad[0], + velocity_acc, + master_weight, + 'mu', + self._momentum, + 'use_nesterov', + self._use_nesterov, + 'regularization_method', + self._regularization_method, + 'regularization_coeff', + self._regularization_coeff, + 'multi_precision', + find_master, + ) return None attrs = { @@ -222,17 +259,17 @@ class Momentum(Optimizer): "regularization_method": self._regularization_method, "regularization_coeff": self._regularization_coeff, "multi_precision": find_master, - "rescale_grad": self._rescale_grad + "rescale_grad": self._rescale_grad, } inputs = { "Param": [param_and_grad[0]], "Grad": [param_and_grad[1]], "Velocity": [velocity_acc], - "LearningRate": [lr] + "LearningRate": [lr], } outputs = { "ParamOut": [param_and_grad[0]], - "VelocityOut": [velocity_acc] + "VelocityOut": [velocity_acc], } if find_master: @@ -240,10 +277,12 @@ class Momentum(Optimizer): outputs["MasterParamOut"] = master_weight # create the momentum optimize op - momentum_op = block.append_op(type=self.type, - inputs=inputs, - outputs=outputs, - attrs=attrs, - stop_gradient=True) + momentum_op = block.append_op( + type=self.type, + inputs=inputs, + outputs=outputs, + attrs=attrs, + stop_gradient=True, + ) return momentum_op diff --git a/python/paddle/fluid/contrib/quantize/quantize_transpiler.py b/python/paddle/fluid/contrib/quantize/quantize_transpiler.py index de4c10040862afd2eea4ca92703f31348057b07a..6e225fdbcc8e6c8731e730d831d6907adc1d7d77 100644 --- a/python/paddle/fluid/contrib/quantize/quantize_transpiler.py +++ b/python/paddle/fluid/contrib/quantize/quantize_transpiler.py @@ -15,7 +15,11 @@ import collections import numpy as np -from paddle.fluid.framework import default_main_program, default_startup_program, program_guard +from paddle.fluid.framework import ( + default_main_program, + default_startup_program, + program_guard, +) from paddle.fluid.layer_helper import LayerHelper from paddle.fluid import unique_name from paddle.fluid import core @@ -57,13 +61,13 @@ def _original_var_name(var_name): Return the original variable name. """ if var_name.endswith('.quantized.dequantized'): - return var_name[:-len('.quantized.dequantized')] + return var_name[: -len('.quantized.dequantized')] if var_name.endswith('.quantized'): - return var_name[:-len('.quantized')] + return var_name[: -len('.quantized')] if var_name.endswith('.dequantized'): - return var_name[:-len('.dequantized')] + return var_name[: -len('.dequantized')] if var_name.endswith('.scale'): - return var_name[:-len('.scale')] + return var_name[: -len('.scale')] else: return var_name @@ -78,14 +82,15 @@ def quant(x, scale, num_bits): class QuantizeTranspiler(object): - - def __init__(self, - weight_bits=8, - activation_bits=8, - activation_quantize_type='abs_max', - weight_quantize_type='abs_max', - window_size=10000, - moving_rate=0.9): + def __init__( + self, + weight_bits=8, + activation_bits=8, + activation_quantize_type='abs_max', + weight_quantize_type='abs_max', + window_size=10000, + moving_rate=0.9, + ): """ Convert and rewrite the fluid Program according to weight and activation quantization type. @@ -123,12 +128,14 @@ class QuantizeTranspiler(object): raise ValueError( "Unknown weight_quantize_type: '%s'. It can only be ", "'abs_max' or 'range_abs_max' or 'moving_average_abs_max'.", - str(weight_quantize_type)) + str(weight_quantize_type), + ) if activation_quantize_type not in quant_type: raise ValueError( "Unknown activation_quantize_type : '%s'. It can only be ", "'abs_max' or 'range_abs_max' or 'moving_average_abs_max'.", - str(activation_quantize_type)) + str(activation_quantize_type), + ) self.weight_quantize_type = weight_quantize_type self.activation_quantize_type = activation_quantize_type @@ -137,8 +144,9 @@ class QuantizeTranspiler(object): self.moving_rate = moving_rate self.helper = LayerHelper(self.__class__.__name__) self.fake_quant_op_types = [ - 'fake_quantize_abs_max', 'fake_quantize_range_abs_max', - 'fake_quantize_moving_average_abs_max' + 'fake_quantize_abs_max', + 'fake_quantize_range_abs_max', + 'fake_quantize_moving_average_abs_max', ] self.fake_dequant_op_types = ['fake_dequantize_max_abs'] self.is_test = None @@ -157,8 +165,11 @@ class QuantizeTranspiler(object): """ self.is_test = False program = default_main_program() if program is None else program - startup_program = default_startup_program() if startup_program is \ - None else startup_program + startup_program = ( + default_startup_program() + if startup_program is None + else startup_program + ) # marked the variable which has been quantized and dequantized. dequanted_vars = [ @@ -173,20 +184,28 @@ class QuantizeTranspiler(object): block_id = block.idx # insert quant op and dequant op for name in op.input_arg_names: - #if share input between ops + # if share input between ops if name in dequanted_vars[block_id]: dequant_var = dequanted_vars[block_id][name] else: var = block.var(name) - quant_bits = self.weight_bits if var.name in params \ - else self.activation_bits - quant_type = self.weight_quantize_type if var.name \ - in params else self.activation_quantize_type + quant_bits = ( + self.weight_bits + if var.name in params + else self.activation_bits + ) + quant_type = ( + self.weight_quantize_type + if var.name in params + else self.activation_quantize_type + ) quant_var, scale_var = self._insert_quant_op( - block, idx, var, quant_bits, quant_type) + block, idx, var, quant_bits, quant_type + ) dequant_var = self._insert_dequant_op( - block, idx + 1, quant_var, scale_var, quant_bits) + block, idx + 1, quant_var, scale_var, quant_bits + ) dequanted_vars[block_id][name] = dequant_var # rename the forward op inputs op._rename_input(name, dequant_var.name) @@ -200,8 +219,9 @@ class QuantizeTranspiler(object): op._rename_input(name, dequant_var.name) no_dequanted_input_vars = False if no_dequanted_input_vars: - raise ValueError("There is no dequanted inputs for op %s." % - (op.type)) + raise ValueError( + "There is no dequanted inputs for op %s." % (op.type) + ) with program_guard(program, startup_program): self._create_global_step() @@ -217,8 +237,10 @@ class QuantizeTranspiler(object): _transpile_backward(block, op) def _create_global_step(self): - if self.weight_quantize_type == 'range_abs_max' or \ - self.activation_quantize_type == 'range_abs_max': + if ( + self.weight_quantize_type == 'range_abs_max' + or self.activation_quantize_type == 'range_abs_max' + ): self.global_step = autoincreased_step_counter() def freeze_program(self, program, place, scope=None): @@ -263,7 +285,7 @@ class QuantizeTranspiler(object): max_range = None scale_var = None for name in op.input_arg_names: - #rename input name of the op to the input name of last op which has be removed + # rename input name of the op to the input name of last op which has be removed if name in op_in_rename_map[block_id]: op._rename_input(name, op_in_rename_map[block_id][name]) @@ -278,23 +300,25 @@ class QuantizeTranspiler(object): scale_var = scale_v if len(op.output_arg_names) != 1: - raise ValueError("Only support one output, but op %s has" - " more than one output." % (op.type)) + raise ValueError( + "Only support one output, but op %s has" + " more than one output." % (op.type) + ) out_var = block.var(op.output_arg_names[0]) - dequant_var = block.create_var(name=_dequantized_var_name( - out_var.name), - type=out_var.type, - shape=out_var.shape, - dtype=out_var.dtype) + dequant_var = block.create_var( + name=_dequantized_var_name(out_var.name), + type=out_var.type, + shape=out_var.shape, + dtype=out_var.dtype, + ) # insert fake_dequantize_op - dequant_op = block._insert_op(idx + 1, - type="fake_dequantize_max_abs", - attrs={'max_range': float(max_range)}, - inputs={ - "X": out_var, - 'Scale': scale_var - }, - outputs={"Out": dequant_var}) + dequant_op = block._insert_op( + idx + 1, + type="fake_dequantize_max_abs", + attrs={'max_range': float(max_range)}, + inputs={"X": out_var, 'Scale': scale_var}, + outputs={"Out": dequant_var}, + ) op_out_rename_map[block_id][out_var.name] = dequant_var.name return dequant_var @@ -315,8 +339,9 @@ class QuantizeTranspiler(object): # input of the followed ops(of fc/conv) to the dquant_op for name in op.input_arg_names: if name in op_out_rename_map[block_id]: - op._rename_input(name, - op_out_rename_map[block_id][name]) + op._rename_input( + name, op_out_rename_map[block_id][name] + ) if op_type in self.fake_quant_op_types: in_arg_name = op.input('X')[0] @@ -346,7 +371,7 @@ class QuantizeTranspiler(object): # remove the unused var in ProgramDesc self._remove_unused_var(program) - #program = program.clone() + # program = program.clone() def convert_to_int8(self, program, place, scope=None): scope = global_scope() if scope is None else scope @@ -363,7 +388,8 @@ class QuantizeTranspiler(object): name=int8_var_name.encode('ascii'), type=var.type, dtype=core.VarDesc.VarType.INT8, - shape=var.shape) + shape=var.shape, + ) tensor = _load_var(var.name) @@ -392,7 +418,7 @@ class QuantizeTranspiler(object): for op in block.ops: args += op.input_arg_names args += op.output_arg_names - args = list(set(args)) #vals of all left ops + args = list(set(args)) # vals of all left ops var_names = block.vars.keys() # all vals sub_block_remove_vars = [] for var in var_names: @@ -406,39 +432,45 @@ class QuantizeTranspiler(object): block._remove_var(v) def _insert_quant_abs_max_op(self, block, idx, var, quant_bits): - """Insert fake_quantize_abs_max op. - """ - quant_var = block.create_var(name=_quantized_var_name(var.name), - type=var.type, - shape=var.shape, - dtype=var.dtype) - scale = block.create_var(name=_quantized_scale_name(var.name), - type=var.type, - shape=var.shape, - dtype=var.dtype) - quant_op = block._insert_op(idx, - type='fake_quantize_abs_max', - attrs={'bit_length': quant_bits}, - inputs={'X': var}, - outputs={ - 'Out': quant_var, - 'OutScale': scale - }) + """Insert fake_quantize_abs_max op.""" + quant_var = block.create_var( + name=_quantized_var_name(var.name), + type=var.type, + shape=var.shape, + dtype=var.dtype, + ) + scale = block.create_var( + name=_quantized_scale_name(var.name), + type=var.type, + shape=var.shape, + dtype=var.dtype, + ) + quant_op = block._insert_op( + idx, + type='fake_quantize_abs_max', + attrs={'bit_length': quant_bits}, + inputs={'X': var}, + outputs={'Out': quant_var, 'OutScale': scale}, + ) return quant_var, scale def _insert_quant_range_abs_max_op(self, block, idx, var, quant_bits): - """Insert fake_quantize_range_abs_max - """ - quant_var = block.create_var(name=_quantized_var_name(var.name), - type=var.type, - shape=var.shape, - dtype=var.dtype) - scale = self.helper.create_parameter(attr=ParamAttr( - name=_quantized_scale_name(var.name), - initializer=Constant(0.001), - trainable=False), - shape=[1], - dtype=var.dtype) + """Insert fake_quantize_range_abs_max""" + quant_var = block.create_var( + name=_quantized_var_name(var.name), + type=var.type, + shape=var.shape, + dtype=var.dtype, + ) + scale = self.helper.create_parameter( + attr=ParamAttr( + name=_quantized_scale_name(var.name), + initializer=Constant(0.001), + trainable=False, + ), + shape=[1], + dtype=var.dtype, + ) scale.stop_gradient = True ins = {'X': var, 'InScale': scale} @@ -449,9 +481,11 @@ class QuantizeTranspiler(object): name=unique_name.generate('scales'), persistable=True, dtype=var.dtype, - shape=[self.window_size]) - self.helper.set_variable_initializer(scales, - initializer=Constant(value=0)) + shape=[self.window_size], + ) + self.helper.set_variable_initializer( + scales, initializer=Constant(value=0) + ) ins['Iter'] = self.global_step outs['OutScales'] = scales @@ -459,45 +493,56 @@ class QuantizeTranspiler(object): attrs = { 'window_size': self.window_size, 'bit_length': quant_bits, - 'is_test': self.is_test + 'is_test': self.is_test, } - quant_op = block._insert_op(idx, - type='fake_quantize_range_abs_max', - attrs=attrs, - inputs=ins, - outputs=outs) + quant_op = block._insert_op( + idx, + type='fake_quantize_range_abs_max', + attrs=attrs, + inputs=ins, + outputs=outs, + ) return quant_var, scale - def _insert_quant_moving_average_abs_max_op(self, block, idx, var, - quant_bits): - """Insert fake_quantize_moving_average_abs_max - """ - quant_var = block.create_var(name=_quantized_var_name(var.name), - type=var.type, - shape=var.shape, - dtype=var.dtype) + def _insert_quant_moving_average_abs_max_op( + self, block, idx, var, quant_bits + ): + """Insert fake_quantize_moving_average_abs_max""" + quant_var = block.create_var( + name=_quantized_var_name(var.name), + type=var.type, + shape=var.shape, + dtype=var.dtype, + ) state = self.helper.create_global_variable( name=unique_name.generate('state'), persistable=True, dtype=var.dtype, - shape=[1]) - self.helper.set_variable_initializer(state, - initializer=Constant(value=1)) + shape=[1], + ) + self.helper.set_variable_initializer( + state, initializer=Constant(value=1) + ) accum = self.helper.create_global_variable( name=unique_name.generate('accum'), persistable=True, dtype=var.dtype, - shape=[1]) - self.helper.set_variable_initializer(accum, - initializer=Constant(value=1)) - scale = self.helper.create_parameter(attr=ParamAttr( - name=_quantized_scale_name(var.name), - initializer=Constant(0.001), - trainable=False), - shape=[1], - dtype=var.dtype) + shape=[1], + ) + self.helper.set_variable_initializer( + accum, initializer=Constant(value=1) + ) + scale = self.helper.create_parameter( + attr=ParamAttr( + name=_quantized_scale_name(var.name), + initializer=Constant(0.001), + trainable=False, + ), + shape=[1], + dtype=var.dtype, + ) scale.stop_gradient = True ins = {'X': var, 'InScale': scale} @@ -511,14 +556,16 @@ class QuantizeTranspiler(object): attrs = { 'bit_length': quant_bits, 'moving_rate': self.moving_rate, - 'is_test': self.is_test + 'is_test': self.is_test, } - quant_op = block._insert_op(idx, - type='fake_quantize_moving_average_abs_max', - attrs=attrs, - inputs=ins, - outputs=outs) + quant_op = block._insert_op( + idx, + type='fake_quantize_moving_average_abs_max', + attrs=attrs, + inputs=ins, + outputs=outs, + ) return quant_var, scale @@ -529,28 +576,31 @@ class QuantizeTranspiler(object): if quant_type == 'abs_max': return self._insert_quant_abs_max_op(block, idx, var, quant_bits) elif quant_type == 'range_abs_max': - return self._insert_quant_range_abs_max_op(block, idx, var, - quant_bits) + return self._insert_quant_range_abs_max_op( + block, idx, var, quant_bits + ) elif quant_type == 'moving_average_abs_max': return self._insert_quant_moving_average_abs_max_op( - block, idx, var, quant_bits) + block, idx, var, quant_bits + ) def _insert_dequant_op(self, block, idx, var, scale, quant_bits): """ Insert fake_quantize_op """ - dequant_var = block.create_var(name=_dequantized_var_name(var.name), - type=var.type, - shape=var.shape, - dtype=var.dtype) + dequant_var = block.create_var( + name=_dequantized_var_name(var.name), + type=var.type, + shape=var.shape, + dtype=var.dtype, + ) # insert fake_dequantize_op max_range = (1 << (quant_bits - 1)) - 1 - dequant_op = block._insert_op(idx, - type="fake_dequantize_max_abs", - attrs={'max_range': float(max_range)}, - inputs={ - "X": var, - 'Scale': scale - }, - outputs={"Out": dequant_var}) + dequant_op = block._insert_op( + idx, + type="fake_dequantize_max_abs", + attrs={'max_range': float(max_range)}, + inputs={"X": var, 'Scale': scale}, + outputs={"Out": dequant_var}, + ) return dequant_var diff --git a/python/paddle/fluid/contrib/slim/quantization/adaround.py b/python/paddle/fluid/contrib/slim/quantization/adaround.py index 04d894b055dc6e10bededa2c2b9151cfaad55ec1..9dd00ddadc6acd0eaf9af25d713ef564250ea960 100644 --- a/python/paddle/fluid/contrib/slim/quantization/adaround.py +++ b/python/paddle/fluid/contrib/slim/quantization/adaround.py @@ -20,44 +20,52 @@ import logging import paddle.fluid as fluid from ....log_helper import get_logger -from .utils import load_variable_data, set_variable_data, stable_sigmoid, quant_tensor, dequant_tensor, _channelwise_quant_axis1_ops, calculate_quant_cos_error, bias_correction_w - -_logger = get_logger(__name__, - logging.INFO, - fmt='%(asctime)s-%(levelname)s: %(message)s') +from .utils import ( + load_variable_data, + set_variable_data, + stable_sigmoid, + quant_tensor, + dequant_tensor, + _channelwise_quant_axis1_ops, + calculate_quant_cos_error, + bias_correction_w, +) + +_logger = get_logger( + __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s' +) GAMMA = -0.1 ZETA = 1.1 def compute_soft_rounding(alpha_v): - return fluid.layers.clip(fluid.layers.sigmoid(alpha_v) * (ZETA - GAMMA) + - GAMMA, - min=0, - max=1) + return fluid.layers.clip( + fluid.layers.sigmoid(alpha_v) * (ZETA - GAMMA) + GAMMA, min=0, max=1 + ) def compute_soft_rounding_np(alpha_v): - return np.clip(stable_sigmoid(alpha_v) * (ZETA - GAMMA) + GAMMA, - a_min=0, - a_max=1) + return np.clip( + stable_sigmoid(alpha_v) * (ZETA - GAMMA) + GAMMA, a_min=0, a_max=1 + ) class AdaRoundLoss(object): - def __init__(self, reg_param=0.01, default_beta_range=(20, 2)): self.default_reg_param = reg_param self.default_beta_range = default_beta_range def compute_recon_loss(self, ada_quantized_output, orig_output): - square_cost = fluid.layers.square_error_cost(ada_quantized_output, - orig_output) + square_cost = fluid.layers.square_error_cost( + ada_quantized_output, orig_output + ) recon_loss = fluid.layers.reduce_mean( - fluid.layers.reduce_sum(square_cost, dim=-1)) + fluid.layers.reduce_sum(square_cost, dim=-1) + ) return recon_loss def compute_round_loss(self, alpha_v, warm_start, beta): - def round_loss_fn(): # compute rectified sigmoid of parameter 'alpha' which maps it between zero and one h_v = compute_soft_rounding(alpha_v) @@ -65,8 +73,9 @@ class AdaRoundLoss(object): # calculate regularization term - which ensures parameter to converge to exactly zeros and ones # at the end of optimization reg_term = fluid.layers.reduce_sum( - -fluid.layers.pow(fluid.layers.abs(2 * h_v - 1), factor=beta) + - 1) + -fluid.layers.pow(fluid.layers.abs(2 * h_v - 1), factor=beta) + + 1 + ) # calculate the rounding loss round_loss = self.default_reg_param * reg_term @@ -74,8 +83,12 @@ class AdaRoundLoss(object): return round_loss round_loss = fluid.layers.cond( - warm_start, lambda: fluid.layers.fill_constant( - shape=[1], dtype='float32', value=0.0), round_loss_fn) + warm_start, + lambda: fluid.layers.fill_constant( + shape=[1], dtype='float32', value=0.0 + ), + round_loss_fn, + ) return round_loss @@ -88,29 +101,32 @@ class AdaRoundLoss(object): warm_start_end_iter = warm_start * max_iter # compute relative iteration of current iteration - rel_iter = (cur_iter - warm_start_end_iter) / (max_iter - - warm_start_end_iter) - beta = end_beta + 0.5 * (start_beta - - end_beta) * (1 + np.cos(rel_iter * np.pi)) + rel_iter = (cur_iter - warm_start_end_iter) / ( + max_iter - warm_start_end_iter + ) + beta = end_beta + 0.5 * (start_beta - end_beta) * ( + 1 + np.cos(rel_iter * np.pi) + ) return beta class AdaRound(object): - - def __init__(self, - scale, - weight_tensor, - scope=None, - weight_var_name=None, - weight_op_type=None, - is_train=True, - num_iterations=1000): + def __init__( + self, + scale, + weight_tensor, + scope=None, + weight_var_name=None, + weight_op_type=None, + is_train=True, + num_iterations=1000, + ): self.is_train = is_train self.num_iterations = num_iterations self.warm_start = 0.1 self.weight_bits = 8 - self.offset = 0. # zero-point offset + self.offset = 0.0 # zero-point offset self.adaround_loss = AdaRoundLoss() self.ori_weight_tensor = weight_tensor self.scale = scale @@ -134,19 +150,23 @@ class AdaRound(object): shape=alpha.shape, dtype="float32", name=var_name + ".alpha", - default_initializer=fluid.initializer.NumpyArrayInitializer(alpha)) - - def _calculate_output_with_adarounded_weights(self, program, place, exe, - data, fp32_fetch_list, - weight_tensor_dequant): - set_variable_data(self.scope, place, self.weight_var_name, - weight_tensor_dequant) - - adaround_out_tensor = exe.run(program=program, - feed=data, - fetch_list=[fp32_fetch_list], - return_numpy=True, - scope=self.scope) + default_initializer=fluid.initializer.NumpyArrayInitializer(alpha), + ) + + def _calculate_output_with_adarounded_weights( + self, program, place, exe, data, fp32_fetch_list, weight_tensor_dequant + ): + set_variable_data( + self.scope, place, self.weight_var_name, weight_tensor_dequant + ) + + adaround_out_tensor = exe.run( + program=program, + feed=data, + fetch_list=[fp32_fetch_list], + return_numpy=True, + scope=self.scope, + ) return adaround_out_tensor def _calculate_quant_weight(self): @@ -154,9 +174,11 @@ class AdaRound(object): h_alpha = compute_soft_rounding_np(np_alpha) # Scale the tensor - tensor_scale = quant_tensor(self.ori_weight_tensor.copy(), - self.scale, - quant_axis=self.quant_axis) + tensor_scale = quant_tensor( + self.ori_weight_tensor.copy(), + self.scale, + quant_axis=self.quant_axis, + ) weight_tensor = np.floor(tensor_scale) @@ -168,10 +190,11 @@ class AdaRound(object): weight_tensor_quant = self._calculate_quant_weight() # Dequantize the tensor - weight_tensor_dequant = dequant_tensor(weight_tensor_quant + - self.offset, - self.scale, - quant_axis=self.quant_axis) + weight_tensor_dequant = dequant_tensor( + weight_tensor_quant + self.offset, + self.scale, + quant_axis=self.quant_axis, + ) return weight_tensor_dequant def update_final_weights(self): @@ -180,37 +203,42 @@ class AdaRound(object): def get_loss(self, beta, warm_start, adaround_out_tensor, orig_out_tensor): round_loss = self.adaround_loss.compute_round_loss( - self.alpha_v, warm_start, beta) + self.alpha_v, warm_start, beta + ) recon_loss = self.adaround_loss.compute_recon_loss( - adaround_out_tensor, orig_out_tensor) + adaround_out_tensor, orig_out_tensor + ) loss = round_loss + recon_loss losses = { 'loss': loss, 'round_loss': round_loss, - 'recon_loss': recon_loss + 'recon_loss': recon_loss, } return losses def update_beta_warm(self, cur_iteration): warm_start = cur_iteration < self.num_iterations * self.warm_start - beta = self.adaround_loss.compute_beta(self.num_iterations, - cur_iteration, self.warm_start) + beta = self.adaround_loss.compute_beta( + self.num_iterations, cur_iteration, self.warm_start + ) return beta, warm_start -def run_adaround(data_loader, - fp32_program, - fetch_list, - exe, - scope, - place, - quantized_op_pairs, - weight_op_pairs, - scale_dict, - num_iterations=1000, - lr=0.001, - bias_correction=False, - fast_mode=True): +def run_adaround( + data_loader, + fp32_program, + fetch_list, + exe, + scope, + place, + quantized_op_pairs, + weight_op_pairs, + scale_dict, + num_iterations=1000, + lr=0.001, + bias_correction=False, + fast_mode=True, +): fetch_op_name = fetch_list[0].name final_weight_tensor_quant_dict = {} for weight_var_name, quant_op_out_name in quantized_op_pairs.items(): @@ -224,7 +252,8 @@ def run_adaround(data_loader, if _op.type == "fetch": _op._rename_input(fetch_op_name, quant_op_out_name) fp32_fetch_list = fp32_program.global_block().var( - quant_op_out_name) + quant_op_out_name + ) fetch_op_name = quant_op_out_name # build adaround program @@ -235,29 +264,37 @@ def run_adaround(data_loader, with fluid.program_guard(train_program, startup_program): with fluid.unique_name.guard(): # initialize adaround - adaround = AdaRound(scale, - weight_var_tensor, - scope=scope, - weight_var_name=weight_var_name, - weight_op_type=weight_op_type, - num_iterations=num_iterations) - orig_out_tensor = fluid.data(name='orig_out_tensor', - shape=fp32_fetch_list.shape, - dtype='float32') - adaround_out_tensor = fluid.data(name='adaround_out_tensor', - shape=fp32_fetch_list.shape, - dtype='float32') - beta_tensor = fluid.data(name='beta', - shape=[1], - dtype='float32') - warm_start_tensor = fluid.data(name='warm_start', - shape=[1], - dtype='bool') - - train_fetches_loss = adaround.get_loss(beta_tensor, - warm_start_tensor, - adaround_out_tensor, - orig_out_tensor) + adaround = AdaRound( + scale, + weight_var_tensor, + scope=scope, + weight_var_name=weight_var_name, + weight_op_type=weight_op_type, + num_iterations=num_iterations, + ) + orig_out_tensor = fluid.data( + name='orig_out_tensor', + shape=fp32_fetch_list.shape, + dtype='float32', + ) + adaround_out_tensor = fluid.data( + name='adaround_out_tensor', + shape=fp32_fetch_list.shape, + dtype='float32', + ) + beta_tensor = fluid.data( + name='beta', shape=[1], dtype='float32' + ) + warm_start_tensor = fluid.data( + name='warm_start', shape=[1], dtype='bool' + ) + + train_fetches_loss = adaround.get_loss( + beta_tensor, + warm_start_tensor, + adaround_out_tensor, + orig_out_tensor, + ) optimizer = fluid.optimizer.Adam(learning_rate=lr) loss = train_fetches_loss['loss'] optimizer.minimize(loss) @@ -269,21 +306,32 @@ def run_adaround(data_loader, prev_start_time = start_time start_time = time.time() # run fp32 model - np_orig_out_tensor = exe.run(program=fp32_program, - feed=data, - fetch_list=[fp32_fetch_list], - return_numpy=True, - scope=scope) + np_orig_out_tensor = exe.run( + program=fp32_program, + feed=data, + fetch_list=[fp32_fetch_list], + return_numpy=True, + scope=scope, + ) - adaround_weight_tensor_dequant = adaround._calculate_adarounded_weights( + adaround_weight_tensor_dequant = ( + adaround._calculate_adarounded_weights() + ) + np_adaround_out_tensor = ( + adaround._calculate_output_with_adarounded_weights( + fp32_program, + place, + exe, + data, + fp32_fetch_list, + adaround_weight_tensor_dequant, + ) ) - np_adaround_out_tensor = adaround._calculate_output_with_adarounded_weights( - fp32_program, place, exe, data, fp32_fetch_list, - adaround_weight_tensor_dequant) # If the cosine distance of the two tensor is small, skip training - cos_error = calculate_quant_cos_error(np_orig_out_tensor[0], - np_adaround_out_tensor[0]) + cos_error = calculate_quant_cos_error( + np_orig_out_tensor[0], np_adaround_out_tensor[0] + ) if fast_mode and cos_error > 0.99: _logger.info("The cosine error is small, skip training.") break @@ -292,22 +340,30 @@ def run_adaround(data_loader, 'orig_out_tensor': np_orig_out_tensor[0], 'adaround_out_tensor': np_adaround_out_tensor[0], 'beta': beta, - 'warm_start': warm_start + 'warm_start': warm_start, } out = exe.run( train_program, feed=feed_dict, fetch_list=[v.name for v in train_fetches_loss.values()], - return_numpy=True) + return_numpy=True, + ) _logger.info( - "Iter {:d}, lr {:.5f}, loss {:.5f}, loss_round {:.5f}, loss_recon {:.5f}, time {:.5f}s" - .format(i, lr, np.mean(out[0]), np.mean(out[1]), - np.mean(out[2]), start_time - prev_start_time)) + "Iter {:d}, lr {:.5f}, loss {:.5f}, loss_round {:.5f}, loss_recon {:.5f}, time {:.5f}s".format( + i, + lr, + np.mean(out[0]), + np.mean(out[1]), + np.mean(out[2]), + start_time - prev_start_time, + ) + ) sys.stdout.flush() if i == num_iterations: break final_weight_tensor_quant_dict[ - weight_var_name] = adaround.update_final_weights() + weight_var_name + ] = adaround.update_final_weights() if bias_correction: final_weight_tensor_quant_dict[weight_var_name] = bias_correction_w( @@ -315,11 +371,16 @@ def run_adaround(data_loader, final_weight_tensor_quant_dict[weight_var_name], scale, adaround.quant_axis, - weight_bits=adaround.weight_bits) + weight_bits=adaround.weight_bits, + ) del adaround # update adarounded calibrated weights for weight_var_name in quantized_op_pairs.keys(): - set_variable_data(scope, place, weight_var_name, - final_weight_tensor_quant_dict[weight_var_name]) + set_variable_data( + scope, + place, + weight_var_name, + final_weight_tensor_quant_dict[weight_var_name], + ) diff --git a/python/paddle/fluid/contrib/slim/quantization/cal_kl_threshold.py b/python/paddle/fluid/contrib/slim/quantization/cal_kl_threshold.py index 69cd3f64061628a5db015886bc5966df045ec55d..ea3b1876f23462c450570290d6354e63f44b102f 100644 --- a/python/paddle/fluid/contrib/slim/quantization/cal_kl_threshold.py +++ b/python/paddle/fluid/contrib/slim/quantization/cal_kl_threshold.py @@ -17,9 +17,9 @@ import math import numpy as np from ....log_helper import get_logger -_logger = get_logger(__name__, - logging.INFO, - fmt='%(asctime)s-%(levelname)s: %(message)s') +_logger = get_logger( + __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s' +) __all__ = ['cal_kl_threshold'] @@ -38,11 +38,13 @@ def expand_quantized_bins(quantized_bins, reference_bins): if zero_count == num_merged_bins: avg_bin_ele = 0 else: - avg_bin_ele = quantized_bins[idx] / (num_merged_bins - zero_count + - 0.0) + avg_bin_ele = quantized_bins[idx] / ( + num_merged_bins - zero_count + 0.0 + ) for idx1 in range(j_start, j_end): - expanded_quantized_bins[idx1] = (0 if reference_bins[idx1] == 0 else - avg_bin_ele) + expanded_quantized_bins[idx1] = ( + 0 if reference_bins[idx1] == 0 else avg_bin_ele + ) j_start += num_merged_bins j_end += num_merged_bins if (idx + 1) == len(quantized_bins) - 1: @@ -65,8 +67,12 @@ def safe_entropy(reference_distr_P, P_sum, candidate_distr_Q, Q_sum): tmp_sum2 += 0 else: if q_idx == 0: - _logger.error("Fatal error!, idx = " + str(idx) + - " qindex = 0! p_idx = " + str(p_idx)) + _logger.error( + "Fatal error!, idx = " + + str(idx) + + " qindex = 0! p_idx = " + + str(p_idx) + ) tmp_sum1 += p_idx * (math.log(Q_sum * p_idx)) tmp_sum2 += p_idx * (math.log(P_sum * q_idx)) return (tmp_sum1 - tmp_sum2) / P_sum @@ -84,7 +90,7 @@ def cal_kl_threshold(hist, bin_width, bits): assert hist.ndim == 1 hist_bins = hist.shape[0] starting_iter = int((hist_bins - 1) * 0.5) - quant_range = 2**(bits - 1) - 1 + quant_range = 2 ** (bits - 1) - 1 P_sum = np.sum(np.array(hist).ravel()) min_kl_divergence = 0 @@ -105,16 +111,19 @@ def cal_kl_threshold(hist, bin_width, bits): j_end = num_merged_bins for idx in range(quant_range): candidate_distr_Q_quantized[idx] = sum( - candidate_distr_Q[j_start:j_end]) + candidate_distr_Q[j_start:j_end] + ) j_start += num_merged_bins j_end += num_merged_bins if (idx + 1) == quant_range - 1: j_end = i - candidate_distr_Q = expand_quantized_bins(candidate_distr_Q_quantized, - reference_distr_bins) + candidate_distr_Q = expand_quantized_bins( + candidate_distr_Q_quantized, reference_distr_bins + ) Q_sum = sum(candidate_distr_Q) - kl_divergence = safe_entropy(reference_distr_P, P_sum, - candidate_distr_Q, Q_sum) + kl_divergence = safe_entropy( + reference_distr_P, P_sum, candidate_distr_Q, Q_sum + ) if not kl_inited: min_kl_divergence = kl_divergence min_kl_index = i diff --git a/python/paddle/fluid/contrib/slim/quantization/imperative/fuse_utils.py b/python/paddle/fluid/contrib/slim/quantization/imperative/fuse_utils.py index 4ae949bf0fe379be8cb6929ad36d2aeedfef869d..b01ceddd1cab07e8948f8f310b385408bebcbaa2 100644 --- a/python/paddle/fluid/contrib/slim/quantization/imperative/fuse_utils.py +++ b/python/paddle/fluid/contrib/slim/quantization/imperative/fuse_utils.py @@ -51,20 +51,20 @@ def fuse_conv_bn(model): def fuse_layers(model, layers_to_fuse, inplace=False): ''' - fuse layers in layers_to_fuse - - Args: - model(paddle.nn.Layer): The model to be fused. - layers_to_fuse(list): The layers' names to be fused. For - example,"fuse_list = [["conv1", "bn1"], ["conv2", "bn2"]]". - A TypeError would be raised if "fuse" was set as - True but "fuse_list" was None. - Default: None. - inplace(bool): Whether apply fusing to the input model. - Default: False. - - Return - fused_model(paddle.nn.Layer): The fused model. + fuse layers in layers_to_fuse + + Args: + model(paddle.nn.Layer): The model to be fused. + layers_to_fuse(list): The layers' names to be fused. For + example,"fuse_list = [["conv1", "bn1"], ["conv2", "bn2"]]". + A TypeError would be raised if "fuse" was set as + True but "fuse_list" was None. + Default: None. + inplace(bool): Whether apply fusing to the input model. + Default: False. + + Return + fused_model(paddle.nn.Layer): The fused model. ''' if inplace == False: model = copy.deepcopy(model) @@ -78,12 +78,14 @@ def _fuse_layers(model, layers_list): layer_list = [] for layer_name in layers_list: parent_layer, sub_name = utils.find_parent_layer_and_sub_name( - model, layer_name) + model, layer_name + ) layer_list.append(getattr(parent_layer, sub_name)) new_layers = _fuse_func(layer_list) for i, item in enumerate(layers_list): parent_layer, sub_name = utils.find_parent_layer_and_sub_name( - model, item) + model, item + ) setattr(parent_layer, sub_name, new_layers[i]) @@ -109,10 +111,13 @@ def _fuse_func(layer_list): def _fuse_conv_bn(conv, bn): '''fuse conv and bn for train or eval''' - assert(conv.training == bn.training),\ - "Conv and BN both must be in the same mode (train or eval)." + assert ( + conv.training == bn.training + ), "Conv and BN both must be in the same mode (train or eval)." if conv.training: - assert bn._num_features == conv._out_channels, 'Output channel of Conv2d must match num_features of BatchNorm2d' + assert ( + bn._num_features == conv._out_channels + ), 'Output channel of Conv2d must match num_features of BatchNorm2d' raise NotImplementedError else: return _fuse_conv_bn_eval(conv, bn) @@ -120,17 +125,23 @@ def _fuse_conv_bn(conv, bn): def _fuse_conv_bn_eval(conv, bn): '''fuse conv and bn for eval''' - assert (not (conv.training or bn.training)), "Fusion only for eval!" + assert not (conv.training or bn.training), "Fusion only for eval!" fused_conv = copy.deepcopy(conv) - fused_weight, fused_bias = _fuse_conv_bn_weights(fused_conv.weight, - fused_conv.bias, bn._mean, - bn._variance, bn._epsilon, - bn.weight, bn.bias) + fused_weight, fused_bias = _fuse_conv_bn_weights( + fused_conv.weight, + fused_conv.bias, + bn._mean, + bn._variance, + bn._epsilon, + bn.weight, + bn.bias, + ) fused_conv.weight.set_value(fused_weight) if fused_conv.bias is None: fused_conv.bias = paddle.create_parameter( - shape=[fused_conv._out_channels], is_bias=True, dtype=bn.bias.dtype) + shape=[fused_conv._out_channels], is_bias=True, dtype=bn.bias.dtype + ) fused_conv.bias.set_value(fused_bias) return fused_conv @@ -144,19 +155,22 @@ def _fuse_conv_bn_weights(conv_w, conv_b, bn_rm, bn_rv, bn_eps, bn_w, bn_b): if bn_b is None: bn_b = paddle.zeros_like(bn_rm) bn_var_rsqrt = paddle.rsqrt(bn_rv + bn_eps) - conv_w = conv_w * \ - (bn_w * bn_var_rsqrt).reshape([-1] + [1] * (len(conv_w.shape) - 1)) + conv_w = conv_w * (bn_w * bn_var_rsqrt).reshape( + [-1] + [1] * (len(conv_w.shape) - 1) + ) conv_b = (conv_b - bn_rm) * bn_var_rsqrt * bn_w + bn_b return conv_w, conv_b def _fuse_linear_bn(linear, bn): '''fuse linear and bn''' - assert (linear.training == bn.training),\ - "Linear and BN both must be in the same mode (train or eval)." + assert ( + linear.training == bn.training + ), "Linear and BN both must be in the same mode (train or eval)." if linear.training: - assert bn._num_features == linear.weight.shape[ - 1], 'Output channel of Linear must match num_features of BatchNorm' + assert ( + bn._num_features == linear.weight.shape[1] + ), 'Output channel of Linear must match num_features of BatchNorm' raise NotImplementedError else: return _fuse_linear_bn_eval(linear, bn) @@ -164,26 +178,32 @@ def _fuse_linear_bn(linear, bn): def _fuse_linear_bn_eval(linear, bn): '''fuse linear and bn for eval''' - assert (not (linear.training or bn.training)), "Fusion only for eval!" + assert not (linear.training or bn.training), "Fusion only for eval!" fused_linear = copy.deepcopy(linear) - fused_weight, fused_bias = _fuse_linear_bn_weights(fused_linear.weight, - fused_linear.bias, - bn._mean, bn._variance, - bn._epsilon, bn.weight, - bn.bias) + fused_weight, fused_bias = _fuse_linear_bn_weights( + fused_linear.weight, + fused_linear.bias, + bn._mean, + bn._variance, + bn._epsilon, + bn.weight, + bn.bias, + ) fused_linear.weight.set_value(fused_weight) if fused_linear.bias is None: fused_linear.bias = paddle.create_parameter( shape=[fused_linear.weight.shape[1]], is_bias=True, - dtype=bn.bias.dtype) + dtype=bn.bias.dtype, + ) fused_linear.bias.set_value(fused_bias) return fused_linear -def _fuse_linear_bn_weights(linear_w, linear_b, bn_rm, bn_rv, bn_eps, bn_w, - bn_b): +def _fuse_linear_bn_weights( + linear_w, linear_b, bn_rm, bn_rv, bn_eps, bn_w, bn_b +): '''fuse weights and bias of linear and bn''' if linear_b is None: linear_b = paddle.zeros_like(bn_rm) diff --git a/python/paddle/fluid/contrib/slim/quantization/imperative/ptq.py b/python/paddle/fluid/contrib/slim/quantization/imperative/ptq.py index 23affb658cc5b266c0aa2829826eb5cd26935235..9c028736d6826530994396ed5429b03c1d4118a9 100644 --- a/python/paddle/fluid/contrib/slim/quantization/imperative/ptq.py +++ b/python/paddle/fluid/contrib/slim/quantization/imperative/ptq.py @@ -31,9 +31,9 @@ from .ptq_registry import PTQRegistry __all__ = ['ImperativePTQ'] -_logger = get_logger(__name__, - logging.INFO, - fmt='%(asctime)s-%(levelname)s: %(message)s') +_logger = get_logger( + __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s' +) class ImperativePTQ(object): @@ -75,17 +75,20 @@ class ImperativePTQ(object): Return quantized_model(paddle.nn.Layer): The quantized model. """ - assert isinstance(model, paddle.nn.Layer), \ - "The model must be the instance of paddle.nn.Layer." + assert isinstance( + model, paddle.nn.Layer + ), "The model must be the instance of paddle.nn.Layer." if not inplace: model = copy.deepcopy(model) if fuse: model.eval() model = fuse_utils.fuse_layers(model, fuse_list) for name, layer in model.named_sublayers(): - if PTQRegistry.is_supported_layer(layer) \ - and utils.is_leaf_layer(layer) \ - and not self._is_skip_layer(layer): + if ( + PTQRegistry.is_supported_layer(layer) + and utils.is_leaf_layer(layer) + and not self._is_skip_layer(layer) + ): # Add quant config quant_config = copy.deepcopy(self._quant_config) @@ -98,7 +101,8 @@ class ImperativePTQ(object): quant_hook_handle = layer.register_forward_post_hook(hook) quant_config.quant_hook_handle = quant_hook_handle layer._forward_post_hooks.move_to_end( - quant_hook_handle._hook_id, last=False) + quant_hook_handle._hook_id, last=False + ) return model @@ -133,8 +137,9 @@ class ImperativePTQ(object): None """ - assert isinstance(model, paddle.nn.Layer), \ - "The model must be the instance of paddle.nn.Layer." + assert isinstance( + model, paddle.nn.Layer + ), "The model must be the instance of paddle.nn.Layer." # Convert and save dygraph quantized model self._convert(model) @@ -156,12 +161,16 @@ class ImperativePTQ(object): model_filename = basename + INFER_MODEL_SUFFIX params_filename = basename + INFER_PARAMS_SUFFIX - [infer_program, feed_target_names, - fetch_targets] = (paddle.fluid.io.load_inference_model( - dirname=dirname, - executor=exe, - model_filename=model_filename, - params_filename=params_filename)) + [ + infer_program, + feed_target_names, + fetch_targets, + ] = paddle.fluid.io.load_inference_model( + dirname=dirname, + executor=exe, + model_filename=model_filename, + params_filename=params_filename, + ) # Process inference program self._clean_up(infer_program) @@ -169,13 +178,15 @@ class ImperativePTQ(object): self._remove_scale_op(infer_program) # Save final program - paddle.fluid.io.save_inference_model(dirname=dirname, - feeded_var_names=feed_target_names, - target_vars=fetch_targets, - executor=exe, - main_program=infer_program.clone(), - model_filename=model_filename, - params_filename=params_filename) + paddle.fluid.io.save_inference_model( + dirname=dirname, + feeded_var_names=feed_target_names, + target_vars=fetch_targets, + executor=exe, + main_program=infer_program.clone(), + model_filename=model_filename, + params_filename=params_filename, + ) if is_dynamic_mode: paddle.disable_static() @@ -213,8 +224,9 @@ class ImperativePTQ(object): Returns: None """ - assert isinstance(model, paddle.nn.Layer), \ - "The input model must be the instance of paddle.nn.Layer." + assert isinstance( + model, paddle.nn.Layer + ), "The input model must be the instance of paddle.nn.Layer." total_num = 0 cur_num = 0 @@ -226,8 +238,9 @@ class ImperativePTQ(object): if self._is_quant_layer(sub_layer): cur_num += 1 if cur_num % 5 == 0: - _logger.info("Process the %s / %s layer" % - (cur_num, total_num)) + _logger.info( + "Process the %s / %s layer" % (cur_num, total_num) + ) quant_config = sub_layer._quant_config @@ -236,7 +249,7 @@ class ImperativePTQ(object): quant_config.out_act_quantizer.cal_thresholds() if PTQRegistry.is_simulated_quant_layer(sub_layer): - weights = (sub_layer.weight, ) + weights = (sub_layer.weight,) quant_config.wt_quantizer.sample_data(sub_layer, weights) quant_config.wt_quantizer.cal_thresholds() @@ -250,8 +263,9 @@ class ImperativePTQ(object): Returns: None """ - assert isinstance(sub_layer, paddle.nn.Layer), \ - "The input model must be the instance of paddle.nn.Layer." + assert isinstance( + sub_layer, paddle.nn.Layer + ), "The input model must be the instance of paddle.nn.Layer." layer_info = PTQRegistry.layer_info(sub_layer) @@ -272,12 +286,14 @@ class ImperativePTQ(object): Returns: None """ - assert isinstance(model, paddle.nn.Layer), \ - "The input model must be the instance of paddle.nn.Layer." + assert isinstance( + model, paddle.nn.Layer + ), "The input model must be the instance of paddle.nn.Layer." for name, sub_layer in model.named_sublayers(): - if self._is_quant_layer(sub_layer) \ - and PTQRegistry.is_simulated_quant_layer(sub_layer): + if self._is_quant_layer( + sub_layer + ) and PTQRegistry.is_simulated_quant_layer(sub_layer): quant_config = sub_layer._quant_config assert quant_config.enable_in_act_quantizer == True @@ -303,15 +319,17 @@ class ImperativePTQ(object): "activation_bits": in_act_quantizer.quant_bits, } - quant_layer = quant_layers.__dict__[quant_layer_name](sub_layer, - **kwargs) + quant_layer = quant_layers.__dict__[quant_layer_name]( + sub_layer, **kwargs + ) # save the input thresholds assert hasattr(quant_layer, "_fake_quant_input") assert hasattr(quant_layer._fake_quant_input, "_scale") assert len(in_act_quantizer.thresholds) == 1 - input_threshold = np.array([in_act_quantizer.thresholds[0]], - dtype=np.float32) + input_threshold = np.array( + [in_act_quantizer.thresholds[0]], dtype=np.float32 + ) quant_layer._fake_quant_input._scale.set_value(input_threshold) assert hasattr(quant_layer, "_fake_quant_weight") @@ -319,20 +337,24 @@ class ImperativePTQ(object): assert len(wt_quantizer.thresholds) == 1 weight_threshold = wt_quantizer.thresholds[0] if isinstance(weight_threshold, list): - weight_threshold = np.array(weight_threshold, - dtype=np.float32) + weight_threshold = np.array( + weight_threshold, dtype=np.float32 + ) else: - weight_threshold = np.array([weight_threshold], - dtype=np.float32) + weight_threshold = np.array( + [weight_threshold], dtype=np.float32 + ) quant_layer._fake_quant_weight._scale.set_value( - weight_threshold) + weight_threshold + ) # save the output thresholds self._save_output_thresholds(quant_layer, quant_config) # replace the layer - parent_layer, sub_name = \ - utils.find_parent_layer_and_sub_name(model, name) + parent_layer, sub_name = utils.find_parent_layer_and_sub_name( + model, name + ) setattr(parent_layer, sub_name, quant_layer) def _gather_input_thresholds(self, program, scope): @@ -351,30 +373,37 @@ class ImperativePTQ(object): if previous_op is None: continue - if "quantize_dequantize" in previous_op.type or \ - previous_op.type == "moving_average_abs_max_scale": + if ( + "quantize_dequantize" in previous_op.type + or previous_op.type == "moving_average_abs_max_scale" + ): attr_name = previous_op.output('OutScale')[0] in_threshold = utils.load_variable_data(scope, attr_name) in_threshold = utils.fp_numpy_to_naive(in_threshold) argname, index = utils._get_input_name_index( - op, in_var_name) - op._set_attr(argname + str(index) + "_threshold", - in_threshold) + op, in_var_name + ) + op._set_attr( + argname + str(index) + "_threshold", in_threshold + ) op._set_attr("with_quant_attr", True) else: for out_var_name in utils._get_op_output_var_names( - previous_op): + previous_op + ): if out_var_name != in_var_name: continue argname, index = utils._get_output_name_index( - previous_op, out_var_name) + previous_op, out_var_name + ) attr_name = argname + str(index) + "_threshold" if not previous_op.has_attr(attr_name): continue threshold = previous_op.attr(attr_name) argname, index = utils._get_input_name_index( - op, in_var_name) + op, in_var_name + ) attr_name = argname + str(index) + "_threshold" op._set_attr(attr_name, threshold) op._set_attr("with_quant_attr", True) @@ -390,8 +419,11 @@ class ImperativePTQ(object): """ def _helper(op, next_op, old_attr_name, new_attr_name): - if op.has_attr(old_attr_name) and next_op.has_attr(old_attr_name) \ - and op.attr(old_attr_name) == next_op.attr(old_attr_name): + if ( + op.has_attr(old_attr_name) + and next_op.has_attr(old_attr_name) + and op.attr(old_attr_name) == next_op.attr(old_attr_name) + ): threshold = op.attr(old_attr_name) op._remove_attr(old_attr_name) next_op._remove_attr(old_attr_name) @@ -417,8 +449,8 @@ class ImperativePTQ(object): old_attr_name = argname + str(index) + "_threshold" argname, index = utils._get_output_name_index( - next_op, - next_op.output("Out")[0]) + next_op, next_op.output("Out")[0] + ) new_attr_name = argname + str(index) + "_threshold" _helper(op, next_op, old_attr_name, new_attr_name) diff --git a/python/paddle/fluid/contrib/slim/quantization/imperative/ptq_hooks.py b/python/paddle/fluid/contrib/slim/quantization/imperative/ptq_hooks.py index 41c9b07195aefd867c2762476c8c94cb812b7074..319beee0ed73b5fe169cc7d85d9872281f75e02d 100644 --- a/python/paddle/fluid/contrib/slim/quantization/imperative/ptq_hooks.py +++ b/python/paddle/fluid/contrib/slim/quantization/imperative/ptq_hooks.py @@ -23,10 +23,11 @@ def quant_forward_post_hook(layer, inputs, outputs): """ The forward_post_hook for PTQ. """ - assert hasattr(layer, '_quant_config'), \ - "The layer should have _quant_config attr" + assert hasattr( + layer, '_quant_config' + ), "The layer should have _quant_config attr" qc = layer._quant_config if qc.enable_in_act_quantizer: qc.in_act_quantizer.sample_data(layer, inputs) - qc.out_act_quantizer.sample_data(layer, (outputs, )) + qc.out_act_quantizer.sample_data(layer, (outputs,)) diff --git a/python/paddle/fluid/contrib/slim/quantization/imperative/ptq_quantizer.py b/python/paddle/fluid/contrib/slim/quantization/imperative/ptq_quantizer.py index 9b751a93a0fc6ddcaa804df79127624b8ff853aa..ba881f88efc39b0fa433d0ef48cfc6da77358f4c 100644 --- a/python/paddle/fluid/contrib/slim/quantization/imperative/ptq_quantizer.py +++ b/python/paddle/fluid/contrib/slim/quantization/imperative/ptq_quantizer.py @@ -24,9 +24,13 @@ from . import utils from ..cal_kl_threshold import cal_kl_threshold __all__ = [ - 'BaseQuantizer', 'AbsmaxQuantizer', 'PerChannelAbsmaxQuantizer', - 'KLQuantizer', 'HistQuantizer', 'SUPPORT_ACT_QUANTIZERS', - 'SUPPORT_WT_QUANTIZERS' + 'BaseQuantizer', + 'AbsmaxQuantizer', + 'PerChannelAbsmaxQuantizer', + 'KLQuantizer', + 'HistQuantizer', + 'SUPPORT_ACT_QUANTIZERS', + 'SUPPORT_WT_QUANTIZERS', ] @@ -50,25 +54,25 @@ def merge_max_value(old, new): return new -def combine_abs_max_and_hist(tensor, origin_max, origin_hist, bins, - upsample_bins): - """ - """ +def combine_abs_max_and_hist( + tensor, origin_max, origin_hist, bins, upsample_bins +): + """ """ new_max = abs_max_value(tensor) if new_max == 0.0: return origin_max, origin_hist elif origin_max == 0.0: - new_hist, _ = np.histogram(paddle.abs(tensor).numpy(), - range=(0, new_max), - bins=bins) + new_hist, _ = np.histogram( + paddle.abs(tensor).numpy(), range=(0, new_max), bins=bins + ) new_hist = new_hist.astype(np.float32) return new_max, new_hist elif new_max <= origin_max: - new_hist, _ = np.histogram(paddle.abs(tensor).numpy(), - range=(0, origin_max), - bins=bins) + new_hist, _ = np.histogram( + paddle.abs(tensor).numpy(), range=(0, origin_max), bins=bins + ) new_hist = new_hist.astype(np.float32) new_hist += origin_hist return origin_max, new_hist @@ -81,17 +85,18 @@ def combine_abs_max_and_hist(tensor, origin_max, origin_hist, bins, upsampled_hist = np.repeat(origin_hist, upsample_bins) expanded_hist = np.zeros((bins * downsampe_bins), dtype=np.float32) - expanded_hist[0:bins * upsample_bins] = upsampled_hist - cumsumed_hist = np.cumsum( - expanded_hist, dtype=np.float64)[downsampe_bins - 1::downsampe_bins] + expanded_hist[0 : bins * upsample_bins] = upsampled_hist + cumsumed_hist = np.cumsum(expanded_hist, dtype=np.float64)[ + downsampe_bins - 1 :: downsampe_bins + ] shift_cumsumed_hist = np.zeros((bins), dtype=np.float64) shift_cumsumed_hist[1:] = cumsumed_hist[0:-1] sampled_hist = (cumsumed_hist - shift_cumsumed_hist) / upsample_bins sampled_hist = sampled_hist.astype(np.float32) - new_hist, _ = np.histogram(paddle.abs(tensor).numpy(), - range=(0, new_max), - bins=bins) + new_hist, _ = np.histogram( + paddle.abs(tensor).numpy(), range=(0, new_max), bins=bins + ) new_hist = new_hist.astype(np.float32) new_hist += sampled_hist @@ -165,16 +170,16 @@ class PerChannelAbsmaxQuantizer(BaseQuantizer): ] abs_max_vals_list.append(abs_max_vals) - self.abs_max_vals = merge_max_value(self.abs_max_vals, - abs_max_vals_list) + self.abs_max_vals = merge_max_value( + self.abs_max_vals, abs_max_vals_list + ) def cal_thresholds(self): self.thresholds = self.abs_max_vals class BaseHistQuantizer(BaseQuantizer, metaclass=abc.ABCMeta): - """ - """ + """ """ def __init__(self, quant_bits=8, bins=1024, upsample_bins=64): super(BaseHistQuantizer, self).__init__(quant_bits) @@ -194,9 +199,11 @@ class BaseHistQuantizer(BaseQuantizer, metaclass=abc.ABCMeta): if abs_max_vals[idx] == 0.0: self.hists.append(None) else: - hist, _ = np.histogram(paddle.abs(tensor).numpy(), - range=(0., abs_max_vals[idx]), - bins=self.bins) + hist, _ = np.histogram( + paddle.abs(tensor).numpy(), + range=(0.0, abs_max_vals[idx]), + bins=self.bins, + ) hist = hist.astype(np.float32) self.hists.append(hist) else: @@ -205,8 +212,12 @@ class BaseHistQuantizer(BaseQuantizer, metaclass=abc.ABCMeta): for idx, tensor in enumerate(tensors): new_abs_max, new_hist = combine_abs_max_and_hist( - tensor, self.abs_max_vals[idx], self.hists[idx], self.bins, - self.upsample_bins) + tensor, + self.abs_max_vals[idx], + self.hists[idx], + self.bins, + self.upsample_bins, + ) self.abs_max_vals[idx] = new_abs_max self.hists[idx] = new_hist @@ -216,19 +227,15 @@ class BaseHistQuantizer(BaseQuantizer, metaclass=abc.ABCMeta): class HistQuantizer(BaseHistQuantizer): - """ - """ + """ """ - def __init__(self, - quant_bits=8, - bins=1024, - upsample_bins=64, - hist_percent=0.99999): + def __init__( + self, quant_bits=8, bins=1024, upsample_bins=64, hist_percent=0.99999 + ): super(HistQuantizer, self).__init__(quant_bits, bins, upsample_bins) self.hist_percent = hist_percent def cal_thresholds(self): - def _helper(abs_max, hist, percent): assert hist.ndim == 1 and percent < 1.0 hist = hist / np.sum(hist, dtype=np.float64) @@ -240,14 +247,14 @@ class HistQuantizer(BaseHistQuantizer): if self.hists[idx] is None: self.thresholds.append(self.abs_max_vals[idx]) else: - threshold = _helper(self.abs_max_vals[idx], self.hists[idx], - self.hist_percent) + threshold = _helper( + self.abs_max_vals[idx], self.hists[idx], self.hist_percent + ) self.thresholds.append(threshold) class KLQuantizer(BaseHistQuantizer): - """ - """ + """ """ def __init__(self, quant_bits=8, bins=1024, upsample_bins=64): super(KLQuantizer, self).__init__(quant_bits, bins, upsample_bins) diff --git a/python/paddle/fluid/contrib/slim/quantization/imperative/ptq_registry.py b/python/paddle/fluid/contrib/slim/quantization/imperative/ptq_registry.py index a6b8033bc78c9822fe18d47f81cc82cbe92ebc52..5bc7fc0c6b3512da1926347e407022666484f5db 100644 --- a/python/paddle/fluid/contrib/slim/quantization/imperative/ptq_registry.py +++ b/python/paddle/fluid/contrib/slim/quantization/imperative/ptq_registry.py @@ -48,10 +48,15 @@ PTQ_LAYERS_INFO = [ ] QUANT_LAYERS_INFO = [ - LayerInfo(paddle.nn.quant.quant_layers.QuantizedConv2D, ['Input'], - ['Filter'], ['Output']), - LayerInfo(paddle.nn.quant.quant_layers.QuantizedLinear, ['X'], ['Y'], - ['Out']), + LayerInfo( + paddle.nn.quant.quant_layers.QuantizedConv2D, + ['Input'], + ['Filter'], + ['Output'], + ), + LayerInfo( + paddle.nn.quant.quant_layers.QuantizedLinear, ['X'], ['Y'], ['Out'] + ), ] SIMULATED_LAYERS = [paddle.nn.Conv2D, paddle.nn.Linear] @@ -61,6 +66,7 @@ class PTQRegistry(object): """ Register the supported layers for PTQ and provide layers info. """ + supported_layers_map = {} registered_layers_map = {} is_inited = False @@ -89,8 +95,9 @@ class PTQRegistry(object): flag(bool): Whther the layer is supported. """ cls._init() - return layer in cls.supported_layers_map or \ - isinstance(layer, tuple(cls.supported_layers_map.keys())) + return layer in cls.supported_layers_map or isinstance( + layer, tuple(cls.supported_layers_map.keys()) + ) @classmethod def is_registered_layer(cls, layer): @@ -102,8 +109,9 @@ class PTQRegistry(object): flag(bool): Wether the layer is register layer_info. """ cls._init() - return layer in cls.registered_layers_map or \ - isinstance(layer, tuple(cls.registered_layers_map.keys())) + return layer in cls.registered_layers_map or isinstance( + layer, tuple(cls.registered_layers_map.keys()) + ) @classmethod def is_simulated_quant_layer(cls, layer): @@ -114,8 +122,9 @@ class PTQRegistry(object): Returns: flag(bool): Whther the layer is supported. """ - return layer in SIMULATED_LAYERS or \ - isinstance(layer, tuple(SIMULATED_LAYERS)) + return layer in SIMULATED_LAYERS or isinstance( + layer, tuple(SIMULATED_LAYERS) + ) @classmethod def layer_info(cls, layer): @@ -126,8 +135,9 @@ class PTQRegistry(object): Returns: layer_info(LayerInfo): The layer info of the input layer. """ - assert cls.is_registered_layer(layer), \ - "The input layer is not register." + assert cls.is_registered_layer( + layer + ), "The input layer is not register." for layer_key, layer_info in cls.registered_layers_map.items(): if layer == layer_key or isinstance(layer, layer_key): diff --git a/python/paddle/fluid/contrib/slim/quantization/imperative/qat.py b/python/paddle/fluid/contrib/slim/quantization/imperative/qat.py index 423fb0fcd52f372bc6016d7138722eb17149af53..255e8e03a20ab8885ff6a9b8944348ce46f53f7f 100644 --- a/python/paddle/fluid/contrib/slim/quantization/imperative/qat.py +++ b/python/paddle/fluid/contrib/slim/quantization/imperative/qat.py @@ -38,17 +38,20 @@ from . import fuse_utils __all__ = ['ImperativeQuantAware'] -_logger = get_logger(__name__, - logging.INFO, - fmt='%(asctime)s-%(levelname)s: %(message)s') +_logger = get_logger( + __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s' +) def lazy_import_fleet(layer_name_map, fake_quant_input_layers): from paddle.distributed import fleet + layer_name_map[ - 'ColumnParallelLinear'] = fleet.meta_parallel.parallel_layers.mp_layers.ColumnParallelLinear + 'ColumnParallelLinear' + ] = fleet.meta_parallel.parallel_layers.mp_layers.ColumnParallelLinear layer_name_map[ - 'RowParallelLinear'] = fleet.meta_parallel.parallel_layers.mp_layers.RowParallelLinear + 'RowParallelLinear' + ] = fleet.meta_parallel.parallel_layers.mp_layers.RowParallelLinear fake_quant_input_layers.append(fleet.meta_parallel.RowParallelLinear) fake_quant_input_layers.append(fleet.meta_parallel.ColumnParallelLinear) return layer_name_map, fake_quant_input_layers @@ -59,22 +62,27 @@ class ImperativeQuantAware(object): Applying quantization aware training (QAT) to the dgraph model. """ - def __init__(self, - quantizable_layer_type=[ - 'Conv2D', 'Linear', 'Conv2DTranspose', - 'ColumnParallelLinear', 'RowParallelLinear' - ], - weight_quantize_type='abs_max', - activation_quantize_type='moving_average_abs_max', - weight_bits=8, - activation_bits=8, - moving_rate=0.9, - fuse_conv_bn=False, - weight_preprocess_layer=None, - act_preprocess_layer=None, - weight_quantize_layer=None, - act_quantize_layer=None, - onnx_format=False): + def __init__( + self, + quantizable_layer_type=[ + 'Conv2D', + 'Linear', + 'Conv2DTranspose', + 'ColumnParallelLinear', + 'RowParallelLinear', + ], + weight_quantize_type='abs_max', + activation_quantize_type='moving_average_abs_max', + weight_bits=8, + activation_bits=8, + moving_rate=0.9, + fuse_conv_bn=False, + weight_preprocess_layer=None, + act_preprocess_layer=None, + weight_quantize_layer=None, + act_quantize_layer=None, + onnx_format=False, + ): """ The constructor for ImperativeQuantAware. @@ -222,13 +230,14 @@ class ImperativeQuantAware(object): "weight_preprocess_layer": weight_preprocess_layer, "act_preprocess_layer": act_preprocess_layer, "weight_quantize_layer": weight_quantize_layer, - "act_quantize_layer": act_quantize_layer + "act_quantize_layer": act_quantize_layer, } self._quantize_inputs = ImperativeQuantizeInputs(**kwargs) self._quantize_outputs = ImperativeQuantizeOutputs( - moving_rate, activation_bits, onnx_format) + moving_rate, activation_bits, onnx_format + ) def quantize(self, model): """ @@ -278,8 +287,9 @@ class ImperativeQuantAware(object): # fake quant logical. imperative_qat.quantize(model) """ - assert isinstance(model, dygraph.Layer), \ - "The model must be the instance of dygraph.Layer." + assert isinstance( + model, dygraph.Layer + ), "The model must be the instance of dygraph.Layer." if self.fuse_conv_bn: fuse_utils.fuse_conv_bn(model) @@ -289,8 +299,9 @@ class ImperativeQuantAware(object): return model def save_quantized_model(self, layer, path, input_spec=None, **config): - self._quantize_outputs.save_quantized_model(layer, path, input_spec, - **config) + self._quantize_outputs.save_quantized_model( + layer, path, input_spec, **config + ) class ImperativeQuantizeInputs(object): @@ -299,17 +310,19 @@ class ImperativeQuantizeInputs(object): logic both for activation inputs and weight inputs. """ - def __init__(self, - quantizable_layer_type=['Conv2D', 'Linear', 'Conv2DTranspose'], - weight_quantize_type='abs_max', - activation_quantize_type='moving_average_abs_max', - weight_bits=8, - activation_bits=8, - moving_rate=0.9, - weight_preprocess_layer=None, - act_preprocess_layer=None, - weight_quantize_layer=None, - act_quantize_layer=None): + def __init__( + self, + quantizable_layer_type=['Conv2D', 'Linear', 'Conv2DTranspose'], + weight_quantize_type='abs_max', + activation_quantize_type='moving_average_abs_max', + weight_bits=8, + activation_bits=8, + moving_rate=0.9, + weight_preprocess_layer=None, + act_preprocess_layer=None, + weight_quantize_layer=None, + act_quantize_layer=None, + ): """ The constructor for ImperativeQuantizeInputs. @@ -317,48 +330,66 @@ class ImperativeQuantizeInputs(object): """ super(ImperativeQuantizeInputs, self).__init__() self.layer_name_map, self.fake_quant_input_layers = lazy_import_fleet( - utils.layer_name_map, utils.fake_quant_input_layers) + utils.layer_name_map, utils.fake_quant_input_layers + ) self._quantizable_layer_type = tuple( - self.layer_name_map[layer] if layer in - self.layer_name_map else layer for layer in quantizable_layer_type) + self.layer_name_map[layer] + if layer in self.layer_name_map + else layer + for layer in quantizable_layer_type + ) for layer in self._quantizable_layer_type: - assert not isinstance(layer, str) \ - and layer in self.fake_quant_input_layers, \ - "%s is unspported to be quantized." % layer + assert ( + not isinstance(layer, str) + and layer in self.fake_quant_input_layers + ), ("%s is unspported to be quantized." % layer) quantize_type = { - 'abs_max', 'moving_average_abs_max', 'channel_wise_abs_max', - 'lsq_weight', 'channel_wise_lsq_weight' + 'abs_max', + 'moving_average_abs_max', + 'channel_wise_abs_max', + 'lsq_weight', + 'channel_wise_lsq_weight', } act_quantize_type = {'moving_average_abs_max', 'lsq_act'} - assert weight_quantize_type != 'moving_average_abs_max' \ - and weight_quantize_type in quantize_type, \ - "Unsupported weight_quantize_type: %s. It can only " \ + assert ( + weight_quantize_type != 'moving_average_abs_max' + and weight_quantize_type in quantize_type + ), ( + "Unsupported weight_quantize_type: %s. It can only " "be abs_max or channel_wise_abs_max." % weight_quantize_type + ) # TODO (jc): activation_quantize_type supports range_abs_max - assert activation_quantize_type in act_quantize_type, \ - "Unsupported activation_quantize_type: %s. It can " \ - "only be moving_average_abs_max or lsq_act now." \ + assert activation_quantize_type in act_quantize_type, ( + "Unsupported activation_quantize_type: %s. It can " + "only be moving_average_abs_max or lsq_act now." % activation_quantize_type - - bits_check = lambda bits: isinstance(bits, int) \ - and bits >= 0 and bits <= 16 - assert bits_check(weight_bits), \ - "weight_bits should be 1, 2,... or 16." - assert bits_check(activation_bits), \ - "activation_bits should be 1, 2,... or 16." - - layer_check = lambda method: method is None or \ - issubclass(method, dygraph.layers.Layer) - assert layer_check(weight_preprocess_layer), \ - "weight_preprocess should be nn.Layer." - assert layer_check(act_preprocess_layer), \ - "act_preprocess should be nn.Layer." - assert layer_check(weight_quantize_layer), \ - "weight_quantize should be nn.Layer." - assert layer_check(act_quantize_layer), \ - "act_quantize should be nn.Layer." + ) + + bits_check = ( + lambda bits: isinstance(bits, int) and bits >= 0 and bits <= 16 + ) + assert bits_check(weight_bits), "weight_bits should be 1, 2,... or 16." + assert bits_check( + activation_bits + ), "activation_bits should be 1, 2,... or 16." + + layer_check = lambda method: method is None or issubclass( + method, dygraph.layers.Layer + ) + assert layer_check( + weight_preprocess_layer + ), "weight_preprocess should be nn.Layer." + assert layer_check( + act_preprocess_layer + ), "act_preprocess should be nn.Layer." + assert layer_check( + weight_quantize_layer + ), "weight_quantize should be nn.Layer." + assert layer_check( + act_quantize_layer + ), "act_quantize should be nn.Layer." self._kwargs = { "weight_quantize_type": weight_quantize_type, @@ -369,7 +400,7 @@ class ImperativeQuantizeInputs(object): "weight_pre_layer": weight_preprocess_layer, "act_pre_layer": act_preprocess_layer, "weight_quant_layer": weight_quantize_layer, - "act_quant_layer": act_quantize_layer + "act_quant_layer": act_quantize_layer, } def apply(self, model): @@ -385,17 +416,20 @@ class ImperativeQuantizeInputs(object): None """ - assert isinstance(model, dygraph.Layer), \ - "The model must be the instance of dygraph.Layer." + assert isinstance( + model, dygraph.Layer + ), "The model must be the instance of dygraph.Layer." for name, cur_layer in model.named_sublayers(): - if not isinstance(cur_layer, self._quantizable_layer_type) \ - or (hasattr(cur_layer, "skip_quant") \ - and cur_layer.skip_quant == True): + if not isinstance(cur_layer, self._quantizable_layer_type) or ( + hasattr(cur_layer, "skip_quant") + and cur_layer.skip_quant == True + ): continue - parent_layer, sub_name = \ - utils.find_parent_layer_and_sub_name(model, name) + parent_layer, sub_name = utils.find_parent_layer_and_sub_name( + model, name + ) cur_quant_layer = self._get_input_quantized_layer(cur_layer) setattr(parent_layer, sub_name, cur_quant_layer) @@ -407,9 +441,9 @@ class ImperativeQuantizeInputs(object): if isinstance(layer, value): quant_layer_name = 'Quantized' + key break - assert quant_layer_name is not None, \ - "The layer %s is unsupported to be quantized." \ - % layer.full_name() + assert quant_layer_name is not None, ( + "The layer %s is unsupported to be quantized." % layer.full_name() + ) return quant_layers.__dict__[quant_layer_name](layer, **self._kwargs) @@ -445,8 +479,9 @@ class ImperativeQuantizeOutputs(object): Returns: None """ - assert isinstance(model, dygraph.Layer), \ - "The model must be the instance of dygraph.Layer." + assert isinstance( + model, dygraph.Layer + ), "The model must be the instance of dygraph.Layer." for cur_name, cur_layer in model.named_sublayers(): if '_act_preprocess' in cur_name: @@ -454,17 +489,20 @@ class ImperativeQuantizeOutputs(object): if not self._is_target_layer(cur_layer): continue - parent_layer, sub_name = \ - utils.find_parent_layer_and_sub_name(model, cur_name) + parent_layer, sub_name = utils.find_parent_layer_and_sub_name( + model, cur_name + ) reduce_type = None if isinstance(cur_layer, tuple(utils.fake_quant_output_layers)): cur_quant_layer = quant_layers.FakeQuantMAOutputScaleLayer( - cur_layer, self._moving_rate, reduce_type=reduce_type) + cur_layer, self._moving_rate, reduce_type=reduce_type + ) else: cur_quant_layer = quant_layers.MAOutputScaleLayer( - cur_layer, self._moving_rate, reduce_type=reduce_type) + cur_layer, self._moving_rate, reduce_type=reduce_type + ) setattr(parent_layer, sub_name, cur_quant_layer) @@ -496,8 +534,9 @@ class ImperativeQuantizeOutputs(object): Returns: None """ - assert isinstance(model, dygraph.Layer), \ - "The model must be the instance of dygraph.Layer." + assert isinstance( + model, dygraph.Layer + ), "The model must be the instance of dygraph.Layer." paddle.jit.save(layer=model, path=path, input_spec=input_spec, **config) @@ -515,11 +554,16 @@ class ImperativeQuantizeOutputs(object): model_filename = basename + INFER_MODEL_SUFFIX params_filename = basename + INFER_PARAMS_SUFFIX - [infer_program, feed_target_names, fetch_targets - ] = (load_inference_model(dirname=dirname, - executor=exe, - model_filename=model_filename, - params_filename=params_filename)) + [ + infer_program, + feed_target_names, + fetch_targets, + ] = load_inference_model( + dirname=dirname, + executor=exe, + model_filename=model_filename, + params_filename=params_filename, + ) if not self._onnx_format: self._gather_scales(infer_program, scope, fetch_targets) @@ -539,7 +583,8 @@ class ImperativeQuantizeOutputs(object): else: graph = IrGraph(core.Graph(infer_program.desc), for_test=False) transform_pass = ReplaceFakeQuantDequantPass( - scope, place, quant_bits=self._activation_bits) + scope, place, quant_bits=self._activation_bits + ) for sub_graph in graph.all_sub_graphs(): sub_graph._for_test = True transform_pass.apply(sub_graph) @@ -555,14 +600,16 @@ class ImperativeQuantizeOutputs(object): move_persistable_var_to_global_block(infer_program) - save_inference_model(dirname=dirname, - feeded_var_names=feed_target_names, - target_vars=fetch_targets, - executor=exe, - main_program=infer_program.clone(), - model_filename=model_filename, - params_filename=params_filename, - clip_extra=clip_extra) + save_inference_model( + dirname=dirname, + feeded_var_names=feed_target_names, + target_vars=fetch_targets, + executor=exe, + main_program=infer_program.clone(), + model_filename=model_filename, + params_filename=params_filename, + clip_extra=clip_extra, + ) if is_dynamic_mode: paddle.disable_static() @@ -576,12 +623,16 @@ class ImperativeQuantizeOutputs(object): return False if self._onnx_format: - return True if isinstance(layer, tuple( - utils.fake_quant_wrap_layers)) else False + return ( + True + if isinstance(layer, tuple(utils.fake_quant_wrap_layers)) + else False + ) flag = False - if utils.is_leaf_layer(layer) and \ - not isinstance(layer, tuple(utils.fake_quant_leaf_layers)): + if utils.is_leaf_layer(layer) and not isinstance( + layer, tuple(utils.fake_quant_leaf_layers) + ): flag = True if isinstance(layer, tuple(utils.fake_quant_wrap_layers)): @@ -600,8 +651,9 @@ class ImperativeQuantizeOutputs(object): def _gather_input_scale(): target_ops = [] - skip_ops = utils.fake_quantize_dequantize_op_types + \ - ["moving_average_abs_max_scale"] + skip_ops = utils.fake_quantize_dequantize_op_types + [ + "moving_average_abs_max_scale" + ] for block in program.blocks: for op in block.ops: if op.type not in skip_ops: @@ -611,16 +663,19 @@ class ImperativeQuantizeOutputs(object): for in_var_name in utils._get_op_input_var_names(op): previous_op = utils.find_previous_op(op.block, in_var_name) - if previous_op is not None and \ - ("quantize_dequantize" in previous_op.type or \ - previous_op.type == "moving_average_abs_max_scale"): + if previous_op is not None and ( + "quantize_dequantize" in previous_op.type + or previous_op.type == "moving_average_abs_max_scale" + ): scale_name = previous_op.output('OutScale')[0] in_scale = utils.load_variable_data(scope, scale_name) in_scale = utils.fp_numpy_to_naive(in_scale) argname, index = utils._get_input_name_index( - op, in_var_name) - op._set_attr(argname + str(index) + "_threshold", - in_scale) + op, in_var_name + ) + op._set_attr( + argname + str(index) + "_threshold", in_scale + ) op._set_attr("with_quant_attr", True) def _gather_output_scale(): @@ -646,7 +701,8 @@ class ImperativeQuantizeOutputs(object): if res is not None: argname, index = res previous_op._set_attr( - argname + str(index) + "_threshold", out_scale) + argname + str(index) + "_threshold", out_scale + ) previous_op._set_attr("out_threshold", out_scale) previous_op._set_attr("with_quant_attr", True) @@ -678,12 +734,20 @@ class ImperativeQuantizeOutputs(object): 2. the previous ops of the input op are not fake_quantize_dequantize ops """ target_op_types = [ - "conv2d", "depthwise_conv2d", "matmul", "conv2d_transpose" + "conv2d", + "depthwise_conv2d", + "matmul", + "conv2d_transpose", ] if in_op.type not in target_op_types: return False - previous_ops = [utils.find_previous_op(block, arg_name) \ - for arg_name in in_op.input_arg_names] - return any(op is not None and op.type not in \ - utils.fake_quantize_dequantize_op_types for op in previous_ops) + previous_ops = [ + utils.find_previous_op(block, arg_name) + for arg_name in in_op.input_arg_names + ] + return any( + op is not None + and op.type not in utils.fake_quantize_dequantize_op_types + for op in previous_ops + ) diff --git a/python/paddle/fluid/contrib/slim/quantization/imperative/utils.py b/python/paddle/fluid/contrib/slim/quantization/imperative/utils.py index a30d775165e186212961df9ae3d1e85b3728f016..d771b51e09d11f16f464ef12aecb79765fb13dc4 100644 --- a/python/paddle/fluid/contrib/slim/quantization/imperative/utils.py +++ b/python/paddle/fluid/contrib/slim/quantization/imperative/utils.py @@ -18,7 +18,12 @@ import numpy as np import paddle import paddle.nn.quant.quant_layers as quant_layers -from ..utils import _get_op_input_var_names, _get_op_output_var_names, _get_output_name_index, _get_input_name_index +from ..utils import ( + _get_op_input_var_names, + _get_op_output_var_names, + _get_output_name_index, + _get_input_name_index, +) layer_name_map = { 'Conv2DTranspose': paddle.nn.Conv2DTranspose, @@ -54,8 +59,10 @@ fake_quant_input_layers = [ # TODO(jc): fix the problem of adding duplicate fake_quant ops # paddle.nn.AdaptiveAvgPool2D, paddle.nn.AvgPool2D, paddle.nn.ReLU,paddle.nn.LeakyReLU fake_quant_output_layers = [ - paddle.nn.quant.add, paddle.nn.quant.subtract, paddle.nn.quant.multiply, - paddle.nn.quant.divide + paddle.nn.quant.add, + paddle.nn.quant.subtract, + paddle.nn.quant.multiply, + paddle.nn.quant.divide, ] fake_quant_leaf_layers = [ @@ -66,24 +73,28 @@ fake_quant_leaf_layers = [ ] fake_quant_wrap_layers = [ - quant_layers.QuantizedConv2D, quant_layers.QuantizedLinear, + quant_layers.QuantizedConv2D, + quant_layers.QuantizedLinear, quant_layers.QuantizedConv2DTranspose, quant_layers.QuantizedColumnParallelLinear, - quant_layers.QuantizedRowParallelLinear + quant_layers.QuantizedRowParallelLinear, ] # The weight format of these layers is Cin * Cout * H * W spec_channel_axis_layers = [paddle.nn.Conv2DTranspose, paddle.nn.Linear] weight_op_types = [ - "conv2d", "depthwise_conv2d", "matmul", "conv2d_transpose", - "depthwise_conv2d_transpose" + "conv2d", + "depthwise_conv2d", + "matmul", + "conv2d_transpose", + "depthwise_conv2d_transpose", ] fake_quantize_dequantize_op_types = [ "fake_quantize_dequantize_abs_max", "fake_channel_wise_quantize_dequantize_abs_max", - "fake_quantize_dequantize_moving_average_abs_max" + "fake_quantize_dequantize_moving_average_abs_max", ] @@ -92,8 +103,7 @@ def load_variable_data(scope, var_name): Load variable value from scope """ var_node = scope.find_var(var_name) - assert var_node is not None, \ - "Can not find " + var_name + " in the scope." + assert var_node is not None, "Can not find " + var_name + " in the scope." return np.array(var_node.get_tensor()) @@ -131,8 +141,9 @@ def find_parent_layer_and_sub_name(model, name): Returns: parent_layer, subname """ - assert isinstance(model, paddle.nn.Layer), \ - "The model must be the instance of paddle.nn.Layer." + assert isinstance( + model, paddle.nn.Layer + ), "The model must be the instance of paddle.nn.Layer." assert len(name) > 0, "The input (name) should not be empty." last_idx = 0 @@ -164,8 +175,7 @@ def is_leaf_layer(layer): """ Whether the layer is leaf layer. """ - return isinstance(layer, paddle.nn.Layer) \ - and len(layer.sublayers()) == 0 + return isinstance(layer, paddle.nn.Layer) and len(layer.sublayers()) == 0 def fp_numpy_to_naive(x_np): diff --git a/python/paddle/fluid/contrib/slim/quantization/post_training_quantization.py b/python/paddle/fluid/contrib/slim/quantization/post_training_quantization.py index 5c0fff9abe4e4abfa704a3acd2224dc6b7e22e97..73d1e31159a6f496fb5269e7f064fc2b52c73538 100644 --- a/python/paddle/fluid/contrib/slim/quantization/post_training_quantization.py +++ b/python/paddle/fluid/contrib/slim/quantization/post_training_quantization.py @@ -32,7 +32,14 @@ from .... import unique_name from ....executor import global_scope, Executor from ....framework import IrGraph from ....log_helper import get_logger -from .quantization_pass import QuantizationTransformPass, QuantizationTransformPassV2, QuantizationFreezePass, QuantWeightPass, AddQuantDequantPass, AddQuantDequantPassV2 +from .quantization_pass import ( + QuantizationTransformPass, + QuantizationTransformPassV2, + QuantizationFreezePass, + QuantWeightPass, + AddQuantDequantPass, + AddQuantDequantPassV2, +) from .cal_kl_threshold import cal_kl_threshold from .adaround import run_adaround from . import utils @@ -43,9 +50,9 @@ __all__ = [ 'PostTrainingQuantizationProgram', ] -_logger = get_logger(__name__, - logging.INFO, - fmt='%(asctime)s-%(levelname)s: %(message)s') +_logger = get_logger( + __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s' +) def _all_persistable_var_names(program): @@ -68,8 +75,9 @@ def _remove_unused_var_nodes(graph): all_used_vars = {n.node for n in all_used_vars} all_unused_vars = { n - for n in filter(lambda node: node.node not in all_used_vars, - graph.all_var_nodes()) + for n in filter( + lambda node: node.node not in all_used_vars, graph.all_var_nodes() + ) } graph.safe_remove_nodes(all_unused_vars) return graph @@ -84,12 +92,9 @@ def _remove_ctrl_vars(graph): return graph -def _apply_pass(scope, - graph, - pass_name, - attrs=None, - attr_values=None, - debug=False): +def _apply_pass( + scope, graph, pass_name, attrs=None, attr_values=None, debug=False +): ir_pass = core.get_pass(pass_name) cpp_graph = graph.graph if not cpp_graph.has('__param_scope__'): @@ -114,37 +119,39 @@ class PostTrainingQuantization(object): quantized variables. """ - def __init__(self, - executor, - model_dir, - scope=None, - model_filename=None, - params_filename=None, - batch_generator=None, - sample_generator=None, - data_loader=None, - batch_size=10, - batch_nums=None, - algo="KL", - hist_percent=0.99999, - quantizable_op_type=["conv2d", "depthwise_conv2d", "mul"], - round_type='round', - learning_rate=0.001, - is_full_quantize=False, - bias_correction=False, - activation_bits=8, - weight_bits=8, - activation_quantize_type='range_abs_max', - weight_quantize_type='channel_wise_abs_max', - onnx_format=False, - freeze_model=True, - optimize_model=False, - is_use_cache_file=False, - skip_tensor_list=None, - same_scale_tensor_list=None, - cache_dir=None, - scale_dict=None, - return_graph=False): + def __init__( + self, + executor, + model_dir, + scope=None, + model_filename=None, + params_filename=None, + batch_generator=None, + sample_generator=None, + data_loader=None, + batch_size=10, + batch_nums=None, + algo="KL", + hist_percent=0.99999, + quantizable_op_type=["conv2d", "depthwise_conv2d", "mul"], + round_type='round', + learning_rate=0.001, + is_full_quantize=False, + bias_correction=False, + activation_bits=8, + weight_bits=8, + activation_quantize_type='range_abs_max', + weight_quantize_type='channel_wise_abs_max', + onnx_format=False, + freeze_model=True, + optimize_model=False, + is_use_cache_file=False, + skip_tensor_list=None, + same_scale_tensor_list=None, + cache_dir=None, + scale_dict=None, + return_graph=False, + ): ''' Constructor. @@ -274,38 +281,65 @@ class PostTrainingQuantization(object): ''' self._support_activation_quantize_type = [ - 'range_abs_max', 'moving_average_abs_max', 'abs_max' + 'range_abs_max', + 'moving_average_abs_max', + 'abs_max', ] self._support_weight_quantize_type = ['abs_max', 'channel_wise_abs_max'] self._support_algo_type = [ - 'KL', 'hist', 'avg', 'mse', 'emd', 'abs_max', 'min_max', 'ptf' + 'KL', + 'hist', + 'avg', + 'mse', + 'emd', + 'abs_max', + 'min_max', + 'ptf', ] assert round_type in ['adaround', 'round'] self._round_type = round_type self._learning_rate = learning_rate self._dynamic_quantize_op_type = ['lstm'] - self._support_quantize_op_type = \ - list(set(utils._weight_supported_quantizable_op_type + - utils._act_supported_quantizable_op_type + - self._dynamic_quantize_op_type)) + self._support_quantize_op_type = list( + set( + utils._weight_supported_quantizable_op_type + + utils._act_supported_quantizable_op_type + + self._dynamic_quantize_op_type + ) + ) # Check inputs assert executor is not None, "The executor cannot be None." - assert any([gen is not None] for gen in [sample_generator, - batch_generator, data_loader]), "The sample_generator, batch_generator " \ + assert any( + [gen is not None] + for gen in [sample_generator, batch_generator, data_loader] + ), ( + "The sample_generator, batch_generator " "and data_loader cannot be None in the same time." + ) if data_loader is not None: - assert isinstance(data_loader, (io.DataLoader, type(isgeneratorfunction), reader.GeneratorLoader)), \ - "data_loader only accepts `paddle.io.DataLoader` or Generator instance." + assert isinstance( + data_loader, + ( + io.DataLoader, + type(isgeneratorfunction), + reader.GeneratorLoader, + ), + ), "data_loader only accepts `paddle.io.DataLoader` or Generator instance." assert batch_size > 0, "The batch_size should be greater than 0." - assert algo in self._support_algo_type, \ - "The algo should be KL, hist, mse, avg, abs_max, min_max or ptf." - assert activation_quantize_type in self._support_activation_quantize_type, \ - "The activation_quantize_type ({}) should in ({}).".format( - activation_quantize_type, self._support_activation_quantize_type) - assert weight_quantize_type in self._support_weight_quantize_type, \ - "The weight_quantize_type ({}) shoud in ({}).".format( - weight_quantize_type, self._support_weight_quantize_type) + assert ( + algo in self._support_algo_type + ), "The algo should be KL, hist, mse, avg, abs_max, min_max or ptf." + assert ( + activation_quantize_type in self._support_activation_quantize_type + ), "The activation_quantize_type ({}) should in ({}).".format( + activation_quantize_type, self._support_activation_quantize_type + ) + assert ( + weight_quantize_type in self._support_weight_quantize_type + ), "The weight_quantize_type ({}) shoud in ({}).".format( + weight_quantize_type, self._support_weight_quantize_type + ) # Save input params self._bias_correction = bias_correction @@ -333,8 +367,9 @@ class PostTrainingQuantization(object): else: self._quantizable_op_type = quantizable_op_type for op_type in self._quantizable_op_type: - assert op_type in self._support_quantize_op_type, \ + assert op_type in self._support_quantize_op_type, ( op_type + " is not supported for quantization." + ) self._optimize_model = optimize_model # Define variables @@ -389,16 +424,18 @@ class PostTrainingQuantization(object): if self._algo in ["KL", "hist"]: batch_id = 0 with tqdm( - total=self._batch_nums, - bar_format= - 'Preparation stage, Run batch:|{bar}| {n_fmt}/{total_fmt}', - ncols=80) as t: + total=self._batch_nums, + bar_format='Preparation stage, Run batch:|{bar}| {n_fmt}/{total_fmt}', + ncols=80, + ) as t: for data in self._data_loader(): - self._executor.run(program=self._program, - feed=data, - fetch_list=self._fetch_list, - return_numpy=False, - scope=self._scope) + self._executor.run( + program=self._program, + feed=data, + fetch_list=self._fetch_list, + return_numpy=False, + scope=self._scope, + ) self._collect_activation_abs_min_max() batch_id += 1 t.update() @@ -407,16 +444,19 @@ class PostTrainingQuantization(object): self._init_sampling_act_histogram() batch_id = 0 - with tqdm(total=self._batch_nums, - bar_format= - 'Sampling stage, Run batch:|{bar}| {n_fmt}/{total_fmt}', - ncols=80) as t: + with tqdm( + total=self._batch_nums, + bar_format='Sampling stage, Run batch:|{bar}| {n_fmt}/{total_fmt}', + ncols=80, + ) as t: for data in self._data_loader(): - self._executor.run(program=self._program, - feed=data, - fetch_list=self._fetch_list, - return_numpy=False, - scope=self._scope) + self._executor.run( + program=self._program, + feed=data, + fetch_list=self._fetch_list, + return_numpy=False, + scope=self._scope, + ) self._sampling() batch_id += 1 t.update() @@ -425,8 +465,9 @@ class PostTrainingQuantization(object): if self._algo == 'avg': for var_name in self._quantized_act_var_name: - self._quantized_threshold[var_name] = \ - np.array(self._quantized_var_avg[var_name]).mean() + self._quantized_threshold[var_name] = np.array( + self._quantized_var_avg[var_name] + ).mean() if self._algo in ["KL", "hist"]: self._calculate_kl_hist_threshold() @@ -444,10 +485,13 @@ class PostTrainingQuantization(object): if not self.FLAG: self._save_output_threshold() - if any(op_type in self._quantizable_op_type - for op_type in self._dynamic_quantize_op_type): + if any( + op_type in self._quantizable_op_type + for op_type in self._dynamic_quantize_op_type + ): self._collect_dynamic_quantize_op_threshold( - self._dynamic_quantize_op_type) + self._dynamic_quantize_op_type + ) utils.move_persistable_var_to_global_block(self._program) @@ -463,23 +507,24 @@ class PostTrainingQuantization(object): scale_dict = self._quantized_var_threshold else: scale_dict = self._quantized_threshold - run_adaround(self._data_loader, - self._program, - self._fetch_list, - self._executor, - self._scope, - self._place, - self._quantized_op_pairs, - self._weight_op_pairs, - scale_dict, - num_iterations=self._batch_nums, - bias_correction=self._bias_correction, - lr=self._learning_rate) - - def save_quantized_model(self, - save_model_path, - model_filename=None, - params_filename=None): + run_adaround( + self._data_loader, + self._program, + self._fetch_list, + self._executor, + self._scope, + self._place, + self._quantized_op_pairs, + self._weight_op_pairs, + scale_dict, + num_iterations=self._batch_nums, + bias_correction=self._bias_correction, + lr=self._learning_rate, + ) + + def save_quantized_model( + self, save_model_path, model_filename=None, params_filename=None + ): ''' Save the quantized model to the disk. @@ -494,14 +539,16 @@ class PostTrainingQuantization(object): Returns: None ''' - io.save_inference_model(dirname=save_model_path, - model_filename=model_filename, - params_filename=params_filename, - feeded_var_names=self._feed_list, - target_vars=self._fetch_list, - executor=self._executor, - main_program=self._program, - clip_extra=self._clip_extra) + io.save_inference_model( + dirname=save_model_path, + model_filename=model_filename, + params_filename=params_filename, + feeded_var_names=self._feed_list, + target_vars=self._fetch_list, + executor=self._executor, + main_program=self._program, + clip_extra=self._clip_extra, + ) _logger.info("The quantized model is saved in " + save_model_path) def _load_model_data(self): @@ -510,36 +557,49 @@ class PostTrainingQuantization(object): ''' if self._program is None: _logger.info("Load model and set data loader ...") - [self._program, self._feed_list, self._fetch_list] = \ - io.load_inference_model(dirname=self._model_dir, - executor=self._executor, - model_filename=self._model_filename, - params_filename=self._params_filename) + [ + self._program, + self._feed_list, + self._fetch_list, + ] = io.load_inference_model( + dirname=self._model_dir, + executor=self._executor, + model_filename=self._model_filename, + params_filename=self._params_filename, + ) if self._optimize_model: self._optimize_fp32_model() - feed_vars = [framework._get_var(str(var_name), self._program) \ - for var_name in self._feed_list] + feed_vars = [ + framework._get_var(str(var_name), self._program) + for var_name in self._feed_list + ] if self._data_loader is not None: - self._batch_nums = self._batch_nums if self._batch_nums else len( - self._data_loader) + self._batch_nums = ( + self._batch_nums if self._batch_nums else len(self._data_loader) + ) return - self._data_loader = io.DataLoader.from_generator(feed_list=feed_vars, - capacity=3 * - self._batch_size, - iterable=True) + self._data_loader = io.DataLoader.from_generator( + feed_list=feed_vars, capacity=3 * self._batch_size, iterable=True + ) if self._sample_generator is not None: - self._data_loader.set_sample_generator(self._sample_generator, - batch_size=self._batch_size, - drop_last=True, - places=self._place) + self._data_loader.set_sample_generator( + self._sample_generator, + batch_size=self._batch_size, + drop_last=True, + places=self._place, + ) elif self._batch_generator is not None: - self._data_loader.set_batch_generator(self._batch_generator, - places=self._place) - self._batch_nums = self._batch_nums if self._batch_nums else len( - list(self._data_loader)) + self._data_loader.set_batch_generator( + self._batch_generator, places=self._place + ) + self._batch_nums = ( + self._batch_nums + if self._batch_nums + else len(list(self._data_loader)) + ) def _optimize_fp32_model(self): ''' @@ -552,8 +612,9 @@ class PostTrainingQuantization(object): graph = _apply_pass(self._scope, graph, 'depthwise_conv_bn_fuse_pass') graph = _apply_pass(self._scope, graph, 'conv_transpose_bn_fuse_pass') graph = _apply_pass(self._scope, graph, 'conv_eltwiseadd_bn_fuse_pass') - graph = _apply_pass(self._scope, graph, - 'depthwise_conv_eltwiseadd_bn_fuse_pass') + graph = _apply_pass( + self._scope, graph, 'depthwise_conv_eltwiseadd_bn_fuse_pass' + ) self._program = graph.to_program() @@ -584,26 +645,39 @@ class PostTrainingQuantization(object): op._set_attr("op_namescope", "skip_quant") op_type = op.type - if self._is_full_quantize and \ - op_type not in self._quantizable_op_type: - _logger.warning(op_type + - " is not supported for quantization.") + if ( + self._is_full_quantize + and op_type not in self._quantizable_op_type + ): + _logger.warning( + op_type + " is not supported for quantization." + ) # For quantized ops, sample inputs and outputs if op_type in self._quantizable_op_type: - collect_var_name(utils._get_op_input_var_names(op), - persistable_var_names, op_type) - collect_var_name(utils._get_op_output_var_names(op), - persistable_var_names, op_type) + collect_var_name( + utils._get_op_input_var_names(op), + persistable_var_names, + op_type, + ) + collect_var_name( + utils._get_op_output_var_names(op), + persistable_var_names, + op_type, + ) # collect quanted op output var name for out_var_name in utils._get_op_output_var_names(op): for in_var_name in utils._get_op_input_var_names(op): if in_var_name in persistable_var_names: self._quantized_op_pairs[ - in_var_name] = out_var_name + in_var_name + ] = out_var_name # For other op, only sample output scale elif op_type in self._out_scale_op_list: - collect_var_name(utils._get_op_output_var_names(op), - persistable_var_names, op_type) + collect_var_name( + utils._get_op_output_var_names(op), + persistable_var_names, + op_type, + ) def _set_activation_persistable(self): ''' @@ -651,15 +725,19 @@ class PostTrainingQuantization(object): abs_max_value = float(np.max(np.abs(var_tensor))) elif self._weight_quantize_type == "channel_wise_abs_max": abs_max_value = [] - if self._weight_op_pairs[ - var_name] in utils._channelwise_quant_axis1_ops: + if ( + self._weight_op_pairs[var_name] + in utils._channelwise_quant_axis1_ops + ): for i in range(var_tensor.shape[1]): abs_max_value.append( - float(np.max(np.abs(var_tensor[:, i])))) + float(np.max(np.abs(var_tensor[:, i]))) + ) else: for i in range(var_tensor.shape[0]): abs_max_value.append( - float(np.max(np.abs(var_tensor[i])))) + float(np.max(np.abs(var_tensor[i]))) + ) self._quantized_threshold[var_name] = abs_max_value _logger.info("MSE searching stage ...") for var_name in self._quantized_act_var_name: @@ -673,16 +751,19 @@ class PostTrainingQuantization(object): while s <= 1.0: scale = s * abs_max_value s += 0.02 - bins = 2**(self._activation_bits - 1) - 1 + bins = 2 ** (self._activation_bits - 1) - 1 if self._onnx_format: - quant_var = np.clip(np.round(var_tensor / scale * bins), - -bins - 1, bins) + quant_var = np.clip( + np.round(var_tensor / scale * bins), -bins - 1, bins + ) quant_dequant_var = quant_var / bins * scale else: - quant_dequant_var = np.round( - np.clip(var_tensor, 0.0, scale) / scale * - bins) / bins * scale - mse_loss = ((var_tensor - quant_dequant_var)**2).mean() + quant_dequant_var = ( + np.round(np.clip(var_tensor, 0.0, scale) / scale * bins) + / bins + * scale + ) + mse_loss = ((var_tensor - quant_dequant_var) ** 2).mean() if mse_loss <= self._best_calibration_loss[var_name]: self._best_calibration_loss[var_name] = mse_loss self._quantized_threshold[var_name] = scale @@ -695,15 +776,19 @@ class PostTrainingQuantization(object): abs_max_value = float(np.max(np.abs(var_tensor))) elif self._weight_quantize_type == "channel_wise_abs_max": abs_max_value = [] - if self._weight_op_pairs[ - var_name] in utils._channelwise_quant_axis1_ops: + if ( + self._weight_op_pairs[var_name] + in utils._channelwise_quant_axis1_ops + ): for i in range(var_tensor.shape[1]): abs_max_value.append( - float(np.max(np.abs(var_tensor[:, i])))) + float(np.max(np.abs(var_tensor[:, i]))) + ) else: for i in range(var_tensor.shape[0]): abs_max_value.append( - float(np.max(np.abs(var_tensor[i])))) + float(np.max(np.abs(var_tensor[i]))) + ) self._quantized_threshold[var_name] = abs_max_value _logger.info("EMD searching stage ...") for var_name in self._quantized_act_var_name: @@ -717,18 +802,21 @@ class PostTrainingQuantization(object): while s <= 1.0: scale = s * abs_max_value s += 0.02 - bins = 2**(self._activation_bits - 1) - 1 + bins = 2 ** (self._activation_bits - 1) - 1 if self._onnx_format: - quant_var = np.clip(np.round(var_tensor / scale * bins), - -bins - 1, bins) + quant_var = np.clip( + np.round(var_tensor / scale * bins), -bins - 1, bins + ) quant_dequant_var = quant_var / bins * scale else: - quant_dequant_var = np.round( - np.clip(var_tensor, 0.0, scale) / scale * - bins) / bins * scale + quant_dequant_var = ( + np.round(np.clip(var_tensor, 0.0, scale) / scale * bins) + / bins + * scale + ) emd_loss = np.abs( - np.mean(var_tensor) - np.mean(quant_dequant_var)) + np.abs( - np.std(var_tensor) - np.std(quant_dequant_var)) + np.mean(var_tensor) - np.mean(quant_dequant_var) + ) + np.abs(np.std(var_tensor) - np.std(quant_dequant_var)) if emd_loss <= self._best_calibration_loss[var_name]: self._best_calibration_loss[var_name] = emd_loss self._quantized_threshold[var_name] = scale @@ -741,24 +829,34 @@ class PostTrainingQuantization(object): abs_max_value = float(np.max(np.abs(var_tensor))) elif self._weight_quantize_type == "channel_wise_abs_max": abs_max_value = [] - if self._weight_op_pairs[ - var_name] in utils._channelwise_quant_axis1_ops: + if ( + self._weight_op_pairs[var_name] + in utils._channelwise_quant_axis1_ops + ): for i in range(var_tensor.shape[1]): abs_max_value.append( - float(np.max(np.abs(var_tensor[:, i])))) + float(np.max(np.abs(var_tensor[:, i]))) + ) else: for i in range(var_tensor.shape[0]): abs_max_value.append( - float(np.max(np.abs(var_tensor[i])))) + float(np.max(np.abs(var_tensor[i]))) + ) self._quantized_threshold[var_name] = abs_max_value for var_name in self._quantized_act_var_name: var_tensor = utils.load_variable_data(self._scope, var_name) abs_max_value = float(np.max(np.abs(var_tensor))) - if (var_name not in self._quantized_var_avg): + if var_name not in self._quantized_var_avg: self._quantized_var_avg[var_name] = [] - abs_avg_value = float(np.mean(np.max( \ - np.abs(var_tensor.reshape(var_tensor.shape[0], -1)), axis=(1)))) + abs_avg_value = float( + np.mean( + np.max( + np.abs(var_tensor.reshape(var_tensor.shape[0], -1)), + axis=(1), + ) + ) + ) self._quantized_var_avg[var_name].append(abs_avg_value) continue @@ -770,22 +868,27 @@ class PostTrainingQuantization(object): abs_max_value = float(np.max(np.abs(var_tensor))) elif self._weight_quantize_type == "channel_wise_abs_max": abs_max_value = [] - if self._weight_op_pairs[ - var_name] in utils._channelwise_quant_axis1_ops: + if ( + self._weight_op_pairs[var_name] + in utils._channelwise_quant_axis1_ops + ): for i in range(var_tensor.shape[1]): abs_max_value.append( - float(np.max(np.abs(var_tensor[:, i])))) + float(np.max(np.abs(var_tensor[:, i]))) + ) else: for i in range(var_tensor.shape[0]): abs_max_value.append( - float(np.max(np.abs(var_tensor[i])))) + float(np.max(np.abs(var_tensor[i]))) + ) self._quantized_threshold[var_name] = abs_max_value for var_name in self._quantized_act_var_name: var_tensor = utils.load_variable_data(self._scope, var_name) abs_max_value = float(np.max(np.abs(var_tensor))) - if (var_name not in self._quantized_threshold) or \ - (abs_max_value > self._quantized_threshold[var_name]): + if (var_name not in self._quantized_threshold) or ( + abs_max_value > self._quantized_threshold[var_name] + ): self._quantized_threshold[var_name] = abs_max_value def _sample_min_max(self): @@ -798,8 +901,10 @@ class PostTrainingQuantization(object): elif self._weight_quantize_type == "channel_wise_abs_max": min_value = [] max_value = [] - if self._weight_op_pairs[ - var_name] in utils._channelwise_quant_axis1_ops: + if ( + self._weight_op_pairs[var_name] + in utils._channelwise_quant_axis1_ops + ): for i in range(var_tensor.shape[1]): min_value.append(float(np.min(var_tensor[:, i]))) max_value.append(float(np.max(var_tensor[:, i]))) @@ -814,11 +919,13 @@ class PostTrainingQuantization(object): var_tensor = utils.load_variable_data(self._scope, var_name) min_value = float(np.min(var_tensor)) max_value = float(np.max(var_tensor)) - if (var_name not in self._quantized_var_min) or \ - (min_value < self._quantized_var_min[var_name]): + if (var_name not in self._quantized_var_min) or ( + min_value < self._quantized_var_min[var_name] + ): self._quantized_var_min[var_name] = min_value - if (var_name not in self._quantized_var_max) or \ - (max_value > self._quantized_var_max[var_name]): + if (var_name not in self._quantized_var_max) or ( + max_value > self._quantized_var_max[var_name] + ): self._quantized_var_max[var_name] = max_value def _sample_histogram(self): @@ -841,39 +948,47 @@ class PostTrainingQuantization(object): abs_max_value = float(np.max(np.abs(var_tensor))) elif self._weight_quantize_type == "channel_wise_abs_max": abs_max_value = [] - if self._weight_op_pairs[ - var_name] in utils._channelwise_quant_axis1_ops: + if ( + self._weight_op_pairs[var_name] + in utils._channelwise_quant_axis1_ops + ): for i in range(var_tensor.shape[1]): abs_max_value.append( - float(np.max(np.abs(var_tensor[:, i])))) + float(np.max(np.abs(var_tensor[:, i]))) + ) else: for i in range(var_tensor.shape[0]): abs_max_value.append( - float(np.max(np.abs(var_tensor[i])))) + float(np.max(np.abs(var_tensor[i]))) + ) self._quantized_threshold[var_name] = abs_max_value for var_name in self._quantized_act_var_name: var_tensor = utils.load_variable_data(self._scope, var_name) abs_max_value = float(np.max(np.abs(var_tensor))) - q_max = 2**(self._activation_bits - 1) - 1 + q_max = 2 ** (self._activation_bits - 1) - 1 scale8 = abs_max_value / q_max scale4 = scale8 / 2 scale2 = scale4 / 2 scale1 = scale2 / 2 - quant_dequant_var_scale1 = np.clip(np.round(var_tensor / scale1), 0, - q_max) * scale1 - quant_dequant_var_scale2 = np.clip(np.round(var_tensor / scale2), 0, - q_max) * scale2 - quant_dequant_var_scale4 = np.clip(np.round(var_tensor / scale4), 0, - q_max) * scale4 - quant_dequant_var_scale8 = np.clip(np.round(var_tensor / scale8), 0, - q_max) * scale8 + quant_dequant_var_scale1 = ( + np.clip(np.round(var_tensor / scale1), 0, q_max) * scale1 + ) + quant_dequant_var_scale2 = ( + np.clip(np.round(var_tensor / scale2), 0, q_max) * scale2 + ) + quant_dequant_var_scale4 = ( + np.clip(np.round(var_tensor / scale4), 0, q_max) * scale4 + ) + quant_dequant_var_scale8 = ( + np.clip(np.round(var_tensor / scale8), 0, q_max) * scale8 + ) score1 = utils.l2_loss(var_tensor, quant_dequant_var_scale1) score2 = utils.l2_loss(var_tensor, quant_dequant_var_scale2) score4 = utils.l2_loss(var_tensor, quant_dequant_var_scale4) score8 = utils.l2_loss(var_tensor, quant_dequant_var_scale8) score = [score1, score2, score4, score8] - mask = 2**score.index(min(score)) + mask = 2 ** score.index(min(score)) scale = scale1 * mask threshold = q_max * scale self._quantized_threshold[var_name] = threshold @@ -882,18 +997,21 @@ class PostTrainingQuantization(object): ''' Save input threshold to the quantized op. ''' - assert self._algo == "min_max", \ - "The algo should be min_max to save input threshold." + assert ( + self._algo == "min_max" + ), "The algo should be min_max to save input threshold." for block_id in range(len(self._program.blocks)): for op in self._program.blocks[block_id].ops: if op.type in self._quantizable_op_type: for var_name in utils._get_op_input_var_names(op): assert var_name in self._quantized_var_min assert var_name in self._quantized_var_max - op._set_attr(var_name + ".min", - self._quantized_var_min[var_name]) - op._set_attr(var_name + ".max", - self._quantized_var_max[var_name]) + op._set_attr( + var_name + ".min", self._quantized_var_min[var_name] + ) + op._set_attr( + var_name + ".max", self._quantized_var_max[var_name] + ) op._set_attr("with_quant_attr", True) def _collect_activation_abs_min_max(self): @@ -908,7 +1026,8 @@ class PostTrainingQuantization(object): max_value = float(np.max(var_tensor)) if var_name not in self._sampling_act_abs_min_max: self._sampling_act_abs_min_max[var_name] = [ - min_value, max_value + min_value, + max_value, ] else: if min_value < self._sampling_act_abs_min_max[var_name][0]: @@ -924,9 +1043,9 @@ class PostTrainingQuantization(object): if var_name not in self._sampling_act_histogram: min_val = self._sampling_act_abs_min_max[var_name][0] max_val = self._sampling_act_abs_min_max[var_name][1] - hist, hist_edeges = np.histogram([], - bins=self._histogram_bins, - range=(min_val, max_val)) + hist, hist_edeges = np.histogram( + [], bins=self._histogram_bins, range=(min_val, max_val) + ) self._sampling_act_histogram[var_name] = [hist, hist_edeges] def _calculate_kl_hist_threshold(self): @@ -943,26 +1062,32 @@ class PostTrainingQuantization(object): weight_threshold = float(np.max(np.abs(weight_data))) elif self._weight_quantize_type == "channel_wise_abs_max": weight_threshold = [] - if self._weight_op_pairs[ - var_name] in utils._channelwise_quant_axis1_ops: + if ( + self._weight_op_pairs[var_name] + in utils._channelwise_quant_axis1_ops + ): for i in range(weight_data.shape[1]): weight_threshold.append( - float(np.max(np.abs(weight_data[:, i])))) + float(np.max(np.abs(weight_data[:, i]))) + ) else: for i in range(weight_data.shape[0]): weight_threshold.append( - float(np.max(np.abs(weight_data[i])))) + float(np.max(np.abs(weight_data[i]))) + ) self._quantized_var_threshold[var_name] = weight_threshold for var_name in self._quantized_act_var_name: hist, hist_edeges = self._sampling_act_histogram[var_name] if self._algo == "KL": bin_width = hist_edeges[1] - hist_edeges[0] - self._quantized_var_threshold[var_name] = \ - cal_kl_threshold(hist, bin_width, self._activation_bits) + self._quantized_var_threshold[var_name] = cal_kl_threshold( + hist, bin_width, self._activation_bits + ) elif self._algo == "hist": - self._quantized_var_threshold[var_name] = \ - self._get_hist_scaling_factor(hist, hist_edeges) + self._quantized_var_threshold[ + var_name + ] = self._get_hist_scaling_factor(hist, hist_edeges) def _update_program(self): ''' @@ -986,7 +1111,8 @@ class PostTrainingQuantization(object): activation_bits=self._activation_bits, activation_quantize_type=self._activation_quantize_type, weight_quantize_type=self._weight_quantize_type, - quantizable_op_type=major_quantizable_op_types) + quantizable_op_type=major_quantizable_op_types, + ) else: transform_pass = QuantizationTransformPassV2( scope=self._scope, @@ -995,7 +1121,8 @@ class PostTrainingQuantization(object): activation_bits=self._activation_bits, activation_quantize_type=self._activation_quantize_type, weight_quantize_type=self._weight_quantize_type, - quantizable_op_type=major_quantizable_op_types) + quantizable_op_type=major_quantizable_op_types, + ) for sub_graph in graph.all_sub_graphs(): # Insert fake_quant/fake_dequantize op must in test graph, so @@ -1012,13 +1139,15 @@ class PostTrainingQuantization(object): add_quant_dequant_pass = AddQuantDequantPass( scope=self._scope, place=self._place, - quantizable_op_type=minor_quantizable_op_types) + quantizable_op_type=minor_quantizable_op_types, + ) else: add_quant_dequant_pass = AddQuantDequantPassV2( scope=self._scope, place=self._place, quantizable_op_type=minor_quantizable_op_types, - is_full_quantized=True) + is_full_quantized=True, + ) for sub_graph in graph.all_sub_graphs(): sub_graph._for_test = True @@ -1038,41 +1167,49 @@ class PostTrainingQuantization(object): for tensor_name in tensor_list: if '#' in tensor_name: real_tensor_name, opera, scalar = tensor_name.split( - '#') + '#' + ) if real_tensor_name not in scale_dict.keys(): continue if opera == '*': scale_dict[real_tensor_name] = float( - scale_dict[real_tensor_name]) * float( - scalar) + scale_dict[real_tensor_name] + ) * float(scalar) elif opera == '/': scale_dict[real_tensor_name] = float( - scale_dict[real_tensor_name]) / float( - scalar) - max_scale = scale_dict[ - real_tensor_name] if max_scale is None else max( - max_scale, scale_dict[real_tensor_name]) + scale_dict[real_tensor_name] + ) / float(scalar) + max_scale = ( + scale_dict[real_tensor_name] + if max_scale is None + else max( + max_scale, scale_dict[real_tensor_name] + ) + ) else: if tensor_name not in scale_dict.keys(): continue - max_scale = scale_dict[ - tensor_name] if max_scale is None else max( - max_scale, scale_dict[tensor_name]) + max_scale = ( + scale_dict[tensor_name] + if max_scale is None + else max(max_scale, scale_dict[tensor_name]) + ) for tensor_name in tensor_list: if '#' in tensor_name: real_tensor_name, opera, scalar = tensor_name.split( - '#') + '#' + ) if real_tensor_name not in scale_dict.keys(): continue if opera == '*': scale_dict[ - real_tensor_name] = max_scale / float( - scalar) + real_tensor_name + ] = max_scale / float(scalar) elif opera == '/': scale_dict[ - real_tensor_name] = max_scale * float( - scalar) + real_tensor_name + ] = max_scale * float(scalar) else: if tensor_name not in scale_dict.keys(): continue @@ -1080,11 +1217,18 @@ class PostTrainingQuantization(object): self._scale_dict = scale_dict for key, val in self._scale_dict.items(): - utils.set_variable_data(self._scope, self._place, key + "@scale", - np.array([val], dtype=np.float32)) - utils.set_variable_data(self._scope, self._place, - key + ".quant_dequant@scale", - np.array([val], dtype=np.float32)) + utils.set_variable_data( + self._scope, + self._place, + key + "@scale", + np.array([val], dtype=np.float32), + ) + utils.set_variable_data( + self._scope, + self._place, + key + ".quant_dequant@scale", + np.array([val], dtype=np.float32), + ) if not self._onnx_format: # apply QuantizationFreezePass, and obtain the final quant model @@ -1097,7 +1241,8 @@ class PostTrainingQuantization(object): round_type=self._round_type, activation_bits=self._activation_bits, weight_quantize_type=self._weight_quantize_type, - quantizable_op_type=major_quantizable_op_types) + quantizable_op_type=major_quantizable_op_types, + ) for sub_graph in graph.all_sub_graphs(): sub_graph._for_test = True @@ -1116,16 +1261,20 @@ class PostTrainingQuantization(object): ''' self._calibration_scales = {} - def save_info(op_node, out_var_name, threshold_map, out_info_name, - quantized_type): - assert out_var_name in threshold_map, \ - "The output ({}) of {} node does not have threshold.".format( - out_var_name, op_node.type) + def save_info( + op_node, out_var_name, threshold_map, out_info_name, quantized_type + ): + assert ( + out_var_name in threshold_map + ), "The output ({}) of {} node does not have threshold.".format( + out_var_name, op_node.type + ) if self._onnx_format: # For easy extension, every var_node set a dict to save parameters of quant. self._calibration_scales[var_name] = {} self._calibration_scales[var_name]['scale'] = threshold_map[ - var_name] + var_name + ] else: op_node._set_attr(out_info_name, threshold_map[var_name]) op_node._set_attr("with_quant_attr", True) @@ -1134,42 +1283,78 @@ class PostTrainingQuantization(object): def analysis_and_save_info(op_node, out_var_name): argname_index = utils._get_output_name_index(op_node, out_var_name) - assert argname_index is not None, \ + assert argname_index is not None, ( out_var_name + " is not the output of the op" + ) if self._algo == "KL": # For compatibility, we save output threshold by two methods. - save_info(op_node, out_var_name, self._quantized_var_threshold, - "out_threshold", "post_kl") save_info( - op_node, out_var_name, self._quantized_var_threshold, + op_node, + out_var_name, + self._quantized_var_threshold, + "out_threshold", + "post_kl", + ) + save_info( + op_node, + out_var_name, + self._quantized_var_threshold, argname_index[0] + str(argname_index[1]) + "_threshold", - "post_kl") + "post_kl", + ) elif self._algo == "hist": # For compatibility, we save output threshold by two methods. - save_info(op_node, out_var_name, self._quantized_var_threshold, - "out_threshold", "post_hist") save_info( - op_node, out_var_name, self._quantized_var_threshold, + op_node, + out_var_name, + self._quantized_var_threshold, + "out_threshold", + "post_hist", + ) + save_info( + op_node, + out_var_name, + self._quantized_var_threshold, argname_index[0] + str(argname_index[1]) + "_threshold", - "post_hist") + "post_hist", + ) elif self._algo in ["avg", "abs_max", "mse", "emd", "ptf"]: - save_info(op_node, out_var_name, self._quantized_threshold, - "out_threshold", "post_" + str(self._algo)) save_info( - op_node, out_var_name, self._quantized_threshold, + op_node, + out_var_name, + self._quantized_threshold, + "out_threshold", + "post_" + str(self._algo), + ) + save_info( + op_node, + out_var_name, + self._quantized_threshold, argname_index[0] + str(argname_index[1]) + "_threshold", - "post_" + str(self._algo)) + "post_" + str(self._algo), + ) elif self._algo == "min_max": - save_info(op_node, out_var_name, self._quantized_var_min, - "out_min", "post_min_max") - save_info(op_node, out_var_name, self._quantized_var_max, - "out_max", "post_min_max") + save_info( + op_node, + out_var_name, + self._quantized_var_min, + "out_min", + "post_min_max", + ) + save_info( + op_node, + out_var_name, + self._quantized_var_max, + "out_max", + "post_min_max", + ) for block_id in range(len(self._program.blocks)): for op in self._program.blocks[block_id].ops: - if op.type in (self._quantizable_op_type + - self._out_scale_op_list): + if op.type in ( + self._quantizable_op_type + self._out_scale_op_list + ): out_var_names = utils._get_op_output_var_names(op) for var_name in out_var_names: analysis_and_save_info(op, var_name) @@ -1221,55 +1406,77 @@ class PostTrainingQuantization(object): class PostTrainingQuantizationProgram(PostTrainingQuantization): - - def __init__(self, - executor, - program, - feed_list=None, - fetch_list=None, - scope=None, - batch_generator=None, - sample_generator=None, - data_loader=None, - batch_size=10, - batch_nums=None, - algo="KL", - hist_percent=0.99999, - quantizable_op_type=["conv2d", "depthwise_conv2d", "mul"], - round_type='round', - learning_rate=0.001, - is_full_quantize=False, - bias_correction=False, - activation_bits=8, - weight_bits=8, - activation_quantize_type='range_abs_max', - weight_quantize_type='channel_wise_abs_max', - onnx_format=False, - freeze_model=True, - optimize_model=False, - is_use_cache_file=False, - skip_tensor_list=None, - same_scale_tensor_list=None, - cache_dir=None, - scale_dict=None, - return_graph=True): - super().__init__(executor, scope, None, None, None, batch_generator, - sample_generator, data_loader, batch_size, batch_nums, - algo, hist_percent, quantizable_op_type, round_type, - learning_rate, is_full_quantize, bias_correction, - activation_bits, weight_bits, activation_quantize_type, - weight_quantize_type, onnx_format, freeze_model, - optimize_model, is_use_cache_file, skip_tensor_list, - same_scale_tensor_list, cache_dir, scale_dict, - return_graph) + def __init__( + self, + executor, + program, + feed_list=None, + fetch_list=None, + scope=None, + batch_generator=None, + sample_generator=None, + data_loader=None, + batch_size=10, + batch_nums=None, + algo="KL", + hist_percent=0.99999, + quantizable_op_type=["conv2d", "depthwise_conv2d", "mul"], + round_type='round', + learning_rate=0.001, + is_full_quantize=False, + bias_correction=False, + activation_bits=8, + weight_bits=8, + activation_quantize_type='range_abs_max', + weight_quantize_type='channel_wise_abs_max', + onnx_format=False, + freeze_model=True, + optimize_model=False, + is_use_cache_file=False, + skip_tensor_list=None, + same_scale_tensor_list=None, + cache_dir=None, + scale_dict=None, + return_graph=True, + ): + super().__init__( + executor, + scope, + None, + None, + None, + batch_generator, + sample_generator, + data_loader, + batch_size, + batch_nums, + algo, + hist_percent, + quantizable_op_type, + round_type, + learning_rate, + is_full_quantize, + bias_correction, + activation_bits, + weight_bits, + activation_quantize_type, + weight_quantize_type, + onnx_format, + freeze_model, + optimize_model, + is_use_cache_file, + skip_tensor_list, + same_scale_tensor_list, + cache_dir, + scale_dict, + return_graph, + ) self.FLAG = False self._program = program if self._program is not None: self.FLAG = True - assert feed_list is not None, \ - "Feed list should not be None." - assert fetch_list is not None, \ - "Fetch list should not be None." + assert feed_list is not None, "Feed list should not be None." + assert fetch_list is not None, "Fetch list should not be None." self._feed_list = feed_list self._fetch_list = fetch_list @@ -1298,15 +1505,17 @@ class WeightQuantization(object): self._model_filename = model_filename self._params_filename = params_filename - def quantize_weight_to_int(self, - save_model_dir, - save_model_filename=None, - save_params_filename=None, - quantizable_op_type=["conv2d", "mul"], - weight_bits=8, - weight_quantize_type="channel_wise_abs_max", - generate_test_model=False, - threshold_rate=0.0): + def quantize_weight_to_int( + self, + save_model_dir, + save_model_filename=None, + save_params_filename=None, + quantizable_op_type=["conv2d", "mul"], + weight_bits=8, + weight_quantize_type="channel_wise_abs_max", + generate_test_model=False, + threshold_rate=0.0, + ): ''' In order to reduce the size of model, this api quantizes the weight of some ops from float32 to int8/16. In the inference stage, the @@ -1342,28 +1551,45 @@ class WeightQuantization(object): value will be optimized. Default is 0.0. ''' for op_type in quantizable_op_type: - assert op_type in self._supported_quantizable_op_type, \ - "Input error:" + op_type + \ - " is not supported for weight quantization." - assert weight_bits in [8, 16], \ - "Input error: weight_bits should be 8 or 16." - assert weight_quantize_type in self._supported_weight_quantize_type, \ - "Input error: weight_quantize_type should in {}".format( - self._supported_weight_quantize_type) + assert op_type in self._supported_quantizable_op_type, ( + "Input error:" + + op_type + + " is not supported for weight quantization." + ) + assert weight_bits in [ + 8, + 16, + ], "Input error: weight_bits should be 8 or 16." + assert ( + weight_quantize_type in self._supported_weight_quantize_type + ), "Input error: weight_quantize_type should in {}".format( + self._supported_weight_quantize_type + ) quantized_model_dir = os.path.join(save_model_dir, "quantized_model") - self._quantize_weight_to_int(quantized_model_dir, save_model_filename, - save_params_filename, quantizable_op_type, - weight_bits, weight_quantize_type, False, - threshold_rate) + self._quantize_weight_to_int( + quantized_model_dir, + save_model_filename, + save_params_filename, + quantizable_op_type, + weight_bits, + weight_quantize_type, + False, + threshold_rate, + ) if generate_test_model: test_model_dir = os.path.join(save_model_dir, "test_model") - self._quantize_weight_to_int(test_model_dir, save_model_filename, - save_params_filename, - quantizable_op_type, weight_bits, - weight_quantize_type, True, - threshold_rate) + self._quantize_weight_to_int( + test_model_dir, + save_model_filename, + save_params_filename, + quantizable_op_type, + weight_bits, + weight_quantize_type, + True, + threshold_rate, + ) def convert_weight_to_fp16(self, save_model_dir): """ @@ -1379,11 +1605,12 @@ class WeightQuantization(object): place = core.CPUPlace() exe = Executor(place) scope = global_scope() - [infer_program, feed_list, fetch_list] = \ - io.load_inference_model(dirname=self._model_dir, - executor=exe, - model_filename=self._model_filename, - params_filename=self._params_filename) + [infer_program, feed_list, fetch_list] = io.load_inference_model( + dirname=self._model_dir, + executor=exe, + model_filename=self._model_filename, + params_filename=self._params_filename, + ) # Clone and save fp16 weights save_program = framework.Program() @@ -1391,27 +1618,31 @@ class WeightQuantization(object): save_var_map = {} for var in infer_program.list_vars(): - if (var.type == core.VarDesc.VarType.RAW) or \ - (not var.persistable) or (var.name in ['feed', 'fetch']) \ - or (var.dtype != core.VarDesc.VarType.FP32): + if ( + (var.type == core.VarDesc.VarType.RAW) + or (not var.persistable) + or (var.name in ['feed', 'fetch']) + or (var.dtype != core.VarDesc.VarType.FP32) + ): continue - #new_var = _clone_var_to_block_(var, save_block) + # new_var = _clone_var_to_block_(var, save_block) new_var = save_block._clone_variable(var) if self._params_filename is not None: save_var_map[new_var.name] = new_var else: - save_file_path = os.path.join(os.path.normpath(save_model_dir), - new_var.name) - save_block.append_op(type='save', - inputs={'X': [new_var]}, - outputs={}, - attrs={ - 'file_path': - os.path.normpath(save_file_path), - 'save_as_fp16': - True - }) + save_file_path = os.path.join( + os.path.normpath(save_model_dir), new_var.name + ) + save_block.append_op( + type='save', + inputs={'X': [new_var]}, + outputs={}, + attrs={ + 'file_path': os.path.normpath(save_file_path), + 'save_as_fp16': True, + }, + ) if self._params_filename is not None: save_var_list = [] @@ -1420,33 +1651,44 @@ class WeightQuantization(object): saved_params_var = save_block.create_var( type=core.VarDesc.VarType.RAW, - name=unique_name.generate("saved_params")) + name=unique_name.generate("saved_params"), + ) saved_params_var.desc.set_persistable(True) - save_path = os.path.join(os.path.normpath(save_model_dir), - self._params_filename) - save_block.append_op(type='save_combine', - inputs={'X': save_var_list}, - outputs={'Y': saved_params_var}, - attrs={ - 'file_path': save_path, - 'save_as_fp16': True - }) + save_path = os.path.join( + os.path.normpath(save_model_dir), self._params_filename + ) + save_block.append_op( + type='save_combine', + inputs={'X': save_var_list}, + outputs={'Y': saved_params_var}, + attrs={'file_path': save_path, 'save_as_fp16': True}, + ) save_program._sync_with_cpp() exe.run(save_program) # Copy model - model_filename = "__model__" if self._model_filename is None \ - else self._model_filename + model_filename = ( + "__model__" + if self._model_filename is None + else self._model_filename + ) src_model = os.path.join(self._model_dir, model_filename) dest_model = os.path.join(save_model_dir, model_filename) shutil.copyfile(src_model, dest_model) - def _quantize_weight_to_int(self, save_model_dir, save_model_filename, - save_params_filename, quantizable_op_type, - weight_bits, weight_quantize_type, for_test, - threshold_rate): + def _quantize_weight_to_int( + self, + save_model_dir, + save_model_filename, + save_params_filename, + quantizable_op_type, + weight_bits, + weight_quantize_type, + for_test, + threshold_rate, + ): """ Generate quantized model or fake quantized model. """ @@ -1454,11 +1696,12 @@ class WeightQuantization(object): place = core.CPUPlace() exe = Executor(place) scope = global_scope() - [program, feed_list, fetch_list] = \ - io.load_inference_model(dirname=self._model_dir, - executor=exe, - model_filename=self._model_filename, - params_filename=self._params_filename) + [program, feed_list, fetch_list] = io.load_inference_model( + dirname=self._model_dir, + executor=exe, + model_filename=self._model_filename, + params_filename=self._params_filename, + ) quantized_ops = [] for index in range(program.num_blocks): @@ -1474,22 +1717,32 @@ class WeightQuantization(object): if var_name in persistable_var_names: if weight_quantize_type == "abs_max": self._weight_abs_max_quantization( - scope, place, weight_bits, threshold_rate, op, - var_name, for_test) + scope, + place, + weight_bits, + threshold_rate, + op, + var_name, + for_test, + ) elif weight_quantize_type == "channel_wise_abs_max": self._weight_channel_wise_abs_max_quantization( - scope, place, weight_bits, op, var_name, for_test) - - io.save_inference_model(dirname=save_model_dir, - feeded_var_names=feed_list, - target_vars=fetch_list, - executor=exe, - main_program=program, - model_filename=save_model_filename, - params_filename=save_params_filename) - - def _weight_abs_max_quantization(self, scope, place, weight_bits, - threshold_rate, op, var_name, for_test): + scope, place, weight_bits, op, var_name, for_test + ) + + io.save_inference_model( + dirname=save_model_dir, + feeded_var_names=feed_list, + target_vars=fetch_list, + executor=exe, + main_program=program, + model_filename=save_model_filename, + params_filename=save_params_filename, + ) + + def _weight_abs_max_quantization( + self, scope, place, weight_bits, threshold_rate, op, var_name, for_test + ): ''' Use abs_max method to quantize weight. ''' @@ -1501,23 +1754,28 @@ class WeightQuantization(object): if abs(threshold_rate) < 1e-10: threshold_value = np.max(np.abs(weight_data)) else: - threshold_value = self._calculate_threshold(\ - weight_data, threshold_rate) + threshold_value = self._calculate_threshold( + weight_data, threshold_rate + ) weight_data[weight_data > threshold_value] = threshold_value weight_data[weight_data < -threshold_value] = -threshold_value scale = threshold_value / quantize_range - quantized_weight_data = \ - np.around(weight_data / scale).astype(save_weight_dtype) + quantized_weight_data = np.around(weight_data / scale).astype( + save_weight_dtype + ) # Set weight data if not for_test: - utils.set_variable_data(scope, place, var_name, - quantized_weight_data) + utils.set_variable_data( + scope, place, var_name, quantized_weight_data + ) else: - dequantized_weight_data = \ - (quantized_weight_data * scale).astype(np.float32) - utils.set_variable_data(scope, place, var_name, - dequantized_weight_data) + dequantized_weight_data = (quantized_weight_data * scale).astype( + np.float32 + ) + utils.set_variable_data( + scope, place, var_name, dequantized_weight_data + ) # Save info op._set_attr('quantization_type', 'post_weight_abs_max') @@ -1525,9 +1783,9 @@ class WeightQuantization(object): op._set_attr(var_name + "_quant_scale", [scale]) # Save as list op._set_attr("with_quant_attr", True) - def _weight_channel_wise_abs_max_quantization(self, scope, place, - weight_bits, op, var_name, - for_test): + def _weight_channel_wise_abs_max_quantization( + self, scope, place, weight_bits, op, var_name, for_test + ): ''' Use channel_wise_abs_max method to quantize weight. ''' @@ -1537,32 +1795,42 @@ class WeightQuantization(object): # Get quantized scale and weight data weight_data = utils.load_variable_data(scope, var_name) if op.type == "mul": - scales, quantized_weight_data = \ - self._mul_channel_wise_quantization(weight_data, - quantize_range, save_weight_dtype) + scales, quantized_weight_data = self._mul_channel_wise_quantization( + weight_data, quantize_range, save_weight_dtype + ) elif op.type in ["conv2d", "depthwise_conv2d"]: - scales, quantized_weight_data = \ - self._conv_channel_wise_quantization(weight_data, - quantize_range, save_weight_dtype) + ( + scales, + quantized_weight_data, + ) = self._conv_channel_wise_quantization( + weight_data, quantize_range, save_weight_dtype + ) else: _logger.error(op.type + " is not supported by weight quantization") # Set weight data if not for_test: - utils.set_variable_data(scope, place, var_name, - quantized_weight_data) + utils.set_variable_data( + scope, place, var_name, quantized_weight_data + ) else: if op.type == "mul": - dequantized_weight_data = \ - self._mul_channel_wise_dequantization(quantized_weight_data, scales) + dequantized_weight_data = self._mul_channel_wise_dequantization( + quantized_weight_data, scales + ) elif op.type in ["conv2d", "depthwise_conv2d"]: - dequantized_weight_data = \ - self._conv_channel_wise_dequantization(quantized_weight_data, scales) + dequantized_weight_data = ( + self._conv_channel_wise_dequantization( + quantized_weight_data, scales + ) + ) else: - _logger.error(op.type + - " is not supported by weight quantization") - utils.set_variable_data(scope, place, var_name, - dequantized_weight_data) + _logger.error( + op.type + " is not supported by weight quantization" + ) + utils.set_variable_data( + scope, place, var_name, dequantized_weight_data + ) # Save info op._set_attr('quantization_type', 'post_weight_channel_wise_abs_max') @@ -1570,67 +1838,77 @@ class WeightQuantization(object): op._set_attr(var_name + "_quant_scale", scales) op._set_attr("with_quant_attr", True) - def _conv_channel_wise_quantization(self, weight_data, quantize_range, - save_weight_dtype): + def _conv_channel_wise_quantization( + self, weight_data, quantize_range, save_weight_dtype + ): ''' Get channel wise scale for the weights of conv2d and depthwise_conv2d, and quantize the weights. ''' scales = [] - quantized_weight_data = np.zeros_like(weight_data, - dtype=save_weight_dtype) + quantized_weight_data = np.zeros_like( + weight_data, dtype=save_weight_dtype + ) channel_num = weight_data.shape[0] for i in range(channel_num): scale = np.max(np.abs(weight_data[i])) / quantize_range scales.append(scale) - quantized_weight_data[i] = \ - np.around(weight_data[i] / scale).astype(save_weight_dtype) + quantized_weight_data[i] = np.around(weight_data[i] / scale).astype( + save_weight_dtype + ) return scales, quantized_weight_data def _conv_channel_wise_dequantization(self, quantized_weight_data, scales): ''' For conv2d and depthwise_conv2d, dequantize the weights to fp32. ''' - dequantized_weight_data = np.zeros_like(quantized_weight_data, - dtype=np.float32) + dequantized_weight_data = np.zeros_like( + quantized_weight_data, dtype=np.float32 + ) for i in range(len(scales)): - dequantized_weight_data[i] = \ - (quantized_weight_data[i] * scales[i]).astype(np.float32) + dequantized_weight_data[i] = ( + quantized_weight_data[i] * scales[i] + ).astype(np.float32) return dequantized_weight_data - def _mul_channel_wise_quantization(self, weight_data, quantize_range, - save_weight_dtype): + def _mul_channel_wise_quantization( + self, weight_data, quantize_range, save_weight_dtype + ): ''' Get channel wise scale for the weights of conv2d and depthwise_conv2d, and quantize the weights. ''' scales = [] - quantized_weight_data = np.zeros_like(weight_data, - dtype=save_weight_dtype) + quantized_weight_data = np.zeros_like( + weight_data, dtype=save_weight_dtype + ) channel_num = weight_data.shape[-1] for i in range(channel_num): scale = np.max(np.abs(weight_data[:, i])) / quantize_range scales.append(scale) - quantized_weight_data[:, i] = \ - np.around(weight_data[:, i] / scale).astype(save_weight_dtype) + quantized_weight_data[:, i] = np.around( + weight_data[:, i] / scale + ).astype(save_weight_dtype) return scales, quantized_weight_data def _mul_channel_wise_dequantization(self, quantized_weight_data, scales): ''' For mul, dequantize the weights to fp32. ''' - dequantized_weight_data = np.zeros_like(quantized_weight_data, - dtype=np.float32) + dequantized_weight_data = np.zeros_like( + quantized_weight_data, dtype=np.float32 + ) for i in range(len(scales)): - dequantized_weight_data[:, i] = \ - (quantized_weight_data[:, i] * scales[i]).astype(np.float32) + dequantized_weight_data[:, i] = ( + quantized_weight_data[:, i] * scales[i] + ).astype(np.float32) return dequantized_weight_data def _calculate_threshold(self, input, threshold_rate, histogram_bins=5000): input_abs = np.abs(input) - hist, hist_edeges = np.histogram(input_abs, - bins=histogram_bins, - range=(0, np.max(input_abs))) + hist, hist_edeges = np.histogram( + input_abs, bins=histogram_bins, range=(0, np.max(input_abs)) + ) hist = hist / float(sum(hist)) hist_sum = 0 hist_index = 0 diff --git a/python/paddle/fluid/contrib/slim/quantization/quant2_int8_mkldnn_pass.py b/python/paddle/fluid/contrib/slim/quantization/quant2_int8_mkldnn_pass.py index f8de55ee3caea9f4f7766bfbab238e03a9ca9926..c7723097f4deb2c63bd6dabd30998732c14e9abe 100644 --- a/python/paddle/fluid/contrib/slim/quantization/quant2_int8_mkldnn_pass.py +++ b/python/paddle/fluid/contrib/slim/quantization/quant2_int8_mkldnn_pass.py @@ -36,13 +36,15 @@ class Quant2Int8MkldnnPass(object): passes (`cpu_quantize_pass`, `cpu_quantize_squash_pass`). """ - def __init__(self, - _ops_to_quantize, - _op_ids_to_skip=None, - _scope=None, - _place=None, - _core=None, - _debug=False): + def __init__( + self, + _ops_to_quantize, + _op_ids_to_skip=None, + _scope=None, + _place=None, + _core=None, + _debug=False, + ): self._scope = _scope self._place = _get_paddle_place(_place) self._core = _core @@ -52,19 +54,26 @@ class Quant2Int8MkldnnPass(object): 'fake_quantize_range_abs_max', ] self._fake_dequantize_types = [ - 'fake_dequantize_max_abs', 'fake_channel_wise_dequantize_max_abs' + 'fake_dequantize_max_abs', + 'fake_channel_wise_dequantize_max_abs', ] self._fake_quantize_dequantize_types = [ 'fake_quantize_dequantize_abs_max', 'fake_quantize_dequantize_moving_average_abs_max', - 'fake_channel_wise_quantize_dequantize_abs_max' + 'fake_channel_wise_quantize_dequantize_abs_max', ] self._ops_to_quantize = _ops_to_quantize - self._op_ids_to_skip = _op_ids_to_skip if _op_ids_to_skip is not None else set( - [-1]) + self._op_ids_to_skip = ( + _op_ids_to_skip if _op_ids_to_skip is not None else set([-1]) + ) self._scale_immutable_ops = [ - 'transpose2', 'reshape2', 'pool2d', 'slice', 'shape', - 'nearest_interp', 'nearest_interp_v2' + 'transpose2', + 'reshape2', + 'pool2d', + 'slice', + 'shape', + 'nearest_interp', + 'nearest_interp_v2', ] self._scale_ops = ['scale'] self._conv_ops = ['conv2d', 'depthwise_conv2d'] @@ -84,8 +93,9 @@ class Quant2Int8MkldnnPass(object): self._pass_group = 'int8' def apply(self, graph): - assert isinstance(graph, - IrGraph), 'graph must be the instance of IrGraph.' + assert isinstance( + graph, IrGraph + ), 'graph must be the instance of IrGraph.' self._reset_pass_idx_and_group('int8') graph = self._label_skip_quantized_op(graph) @@ -105,8 +115,9 @@ class Quant2Int8MkldnnPass(object): return graph def prepare_and_optimize_fp32(self, graph): - assert isinstance(graph, - IrGraph), 'graph must be the instance of IrGraph.' + assert isinstance( + graph, IrGraph + ), 'graph must be the instance of IrGraph.' self._reset_pass_idx_and_group('fp32') graph = self._optimize_fp32_graph(graph) @@ -130,10 +141,10 @@ class Quant2Int8MkldnnPass(object): return any(op.name() in op_types for op in graph.all_op_nodes()) def _is_any_of_op_types_quantized(self, op_types, graph): - return self._is_any_of_op_types_in_graph( - op_types, graph) and (self._is_quantizing_all_ops() - or any(op_type in self._ops_to_quantize - for op_type in op_types)) + return self._is_any_of_op_types_in_graph(op_types, graph) and ( + self._is_quantizing_all_ops() + or any(op_type in self._ops_to_quantize for op_type in op_types) + ) def _is_conv_quantized(self, graph): return self._is_any_of_op_types_quantized(self._conv_ops, graph) @@ -153,8 +164,9 @@ class Quant2Int8MkldnnPass(object): """ target_ops = self._conv_ops + self._mul_ops + self._matmul_ops for op_node in graph.all_op_nodes(): - if op_node.name() in target_ops and \ - not op_node.op().has_attr("quantization_type"): + if op_node.name() in target_ops and not op_node.op().has_attr( + "quantization_type" + ): is_quantized_op = True for var_node in op_node.inputs: for front_op_node in var_node.inputs: @@ -181,21 +193,25 @@ class Quant2Int8MkldnnPass(object): for op in graph.all_op_nodes(): if op.name() in fake_ops: bit_length = op.op().attr("bit_length") - assert bit_length == 8, 'Unsupported number quantization bits ({}). Only 8 is supported now.'.format( - bit_length) + assert ( + bit_length == 8 + ), 'Unsupported number quantization bits ({}). Only 8 is supported now.'.format( + bit_length + ) input_name = op.input("X")[0] scale_name = op.input("InScale")[0] output_name = op.output("Out")[0] # Gather new weight scales after folding batchnorm in convolution scale = np.array( - 1.0 / self._load_param(self._scope, scale_name)[0]).astype( - np.float64) + 1.0 / self._load_param(self._scope, scale_name)[0] + ).astype(np.float64) scale[scale == np.Inf] = 0.0 lod_tensor = self._convert_scale2tensor(scale) use_unsigned_int = False - self._add_scale_for_vars([input_name, output_name], - use_unsigned_int, lod_tensor) + self._add_scale_for_vars( + [input_name, output_name], use_unsigned_int, lod_tensor + ) return graph @@ -205,15 +221,16 @@ class Quant2Int8MkldnnPass(object): input_name = op.input("X")[0] if op.op().has_attr("max_range"): _max_range = np.array(op.op().attr("max_range")).astype( - np.float64) + np.float64 + ) self._weight_thresholds[input_name] = np.array( - self._s8_max * self._s8_max / _max_range).astype( - np.float64) + self._s8_max * self._s8_max / _max_range + ).astype(np.float64) else: scale_name = op.input("Scales")[0] self._weight_thresholds[input_name] = np.array( - self._load_param(self._scope, - scale_name)).astype(np.float64) + self._load_param(self._scope, scale_name) + ).astype(np.float64) return graph @@ -229,14 +246,13 @@ class Quant2Int8MkldnnPass(object): use_unsigned_int = False for output_name in op.op().outputs(): for out_var_name in op.op().output(output_name): - self._add_scale_for_vars([out_var_name], - use_unsigned_int, - scale_lod_tensor) + self._add_scale_for_vars( + [out_var_name], use_unsigned_int, scale_lod_tensor + ) return graph def _propagate_scales(self, graph): - def _update_scale_op_in_scale(op, input, output): unsigned, tensor = self._var_quant_scales[output] scale = np.array(tensor) * op.op().attr("scale") @@ -254,24 +270,28 @@ class Quant2Int8MkldnnPass(object): output_name = op.output("Out")[0] tensor_names = [input_name, output_name] - if all(name not in self._var_quant_scales - for name in tensor_names): + if all( + name not in self._var_quant_scales + for name in tensor_names + ): waiting_for_scale.update(tensor_names) continue elif input_name in self._var_quant_scales: self._var_quant_scales[ - output_name] = self._var_quant_scales[input_name] + output_name + ] = self._var_quant_scales[input_name] elif output_name in self._var_quant_scales: self._var_quant_scales[ - input_name] = self._var_quant_scales[output_name] + input_name + ] = self._var_quant_scales[output_name] elif op.name() == 'concat': output_name = op.output("Out")[0] if output_name in self._var_quant_scales: input_names = op.input("X") for input_name in input_names: self._var_quant_scales[ - input_name] = self._var_quant_scales[ - output_name] + input_name + ] = self._var_quant_scales[output_name] elif op.name() in self._scale_ops: input_name = op.input("X")[0] output_name = op.output("Out")[0] @@ -282,8 +302,10 @@ class Quant2Int8MkldnnPass(object): waiting_for_scale = _update_scales(graph) waiting_for_scale_prev = set() - while len(waiting_for_scale - ) != 0 and waiting_for_scale != waiting_for_scale_prev: + while ( + len(waiting_for_scale) != 0 + and waiting_for_scale != waiting_for_scale_prev + ): waiting_for_scale_prev = waiting_for_scale waiting_for_scale = _update_scales(graph) @@ -305,27 +327,31 @@ class Quant2Int8MkldnnPass(object): def _remove_fake_quantize(self, graph, op): fake_quant_in = graph._find_node_by_name(op.inputs, op.input("X")[0]) - fake_quant_in_scale = graph._find_node_by_name(op.inputs, - op.input("InScale")[0]) - fake_quant_out = graph._find_node_by_name(op.outputs, - op.output("Out")[0]) + fake_quant_in_scale = graph._find_node_by_name( + op.inputs, op.input("InScale")[0] + ) + fake_quant_out = graph._find_node_by_name( + op.outputs, op.output("Out")[0] + ) fake_quant_out_scale = graph._find_node_by_name( - op.outputs, - op.output("OutScale")[0]) + op.outputs, op.output("OutScale")[0] + ) next_ops = fake_quant_out.outputs for next_op in next_ops: self._swap_inputs(next_op, fake_quant_out, fake_quant_in) graph.link_to(fake_quant_in, next_op) graph.safe_remove_nodes( - {op, fake_quant_in_scale, fake_quant_out, fake_quant_out_scale}) + {op, fake_quant_in_scale, fake_quant_out, fake_quant_out_scale} + ) return graph def _remove_fake_dequantize(self, graph, op): fake_dequant_in = graph._find_node_by_name(op.inputs, op.input("X")[0]) - fake_dequant_out = graph._find_node_by_name(op.outputs, - op.output("Out")[0]) + fake_dequant_out = graph._find_node_by_name( + op.outputs, op.output("Out")[0] + ) next_ops = fake_dequant_out.outputs for next_op in next_ops: @@ -338,13 +364,15 @@ class Quant2Int8MkldnnPass(object): def _swap_inputs(self, op, old_input, new_input): for input_name in op.op().input_names(): if old_input.name() in op.input(input_name): - op.op().set_input(input_name, [ - new_input.name() if x == old_input.name() else x - for x in op.input(input_name) - ]) + op.op().set_input( + input_name, + [ + new_input.name() if x == old_input.name() else x + for x in op.input(input_name) + ], + ) def _dequantize_weights(self, graph): - def _is_int8_weights(op_node, weight_name): weight_var_name = op_node.input(weight_name)[0] if self._scope.find_var(weight_var_name) is None: @@ -373,8 +401,10 @@ class Quant2Int8MkldnnPass(object): w_fp32 = np.multiply(np.divide(weight, self._s8_max), scales) else: raise ValueError( - "The size of weight scales vector ({}) does not match the dimensions ({}) of the weights tensor {}." - .format(scales.size, weight.shape, weight_var_name)) + "The size of weight scales vector ({}) does not match the dimensions ({}) of the weights tensor {}.".format( + scales.size, weight.shape, weight_var_name + ) + ) w_fp32 = w_fp32.reshape(weight.shape).astype(np.float32) self._restore_var(weight_var_name, w_fp32) @@ -384,8 +414,9 @@ class Quant2Int8MkldnnPass(object): def _update_activations(self, graph): for op in graph.all_op_nodes(): - if op.name( - ) in self._conv_ops and not op.op().has_attr("fuse_activation"): + if op.name() in self._conv_ops and not op.op().has_attr( + "fuse_activation" + ): activation = "" if op.op().has_attr("fuse_relu") and op.op().attr("fuse_relu"): activation = "relu" @@ -403,8 +434,9 @@ class Quant2Int8MkldnnPass(object): def _optimize_fp32_graph(self, graph): graph = self._update_activations(graph) graph = self._remove_ctrl_vars(graph) - graph = self._apply_pass(graph, 'mkldnn_placement_pass', - ['mkldnn_enabled_op_types'], [set()]) + graph = self._apply_pass( + graph, 'mkldnn_placement_pass', ['mkldnn_enabled_op_types'], [set()] + ) # remove dropout ops graph = self._apply_pass(graph, 'simplify_with_basic_ops_pass') graph = self._apply_pass(graph, 'layer_norm_fuse_pass') @@ -433,29 +465,34 @@ class Quant2Int8MkldnnPass(object): graph = self._apply_pass(graph, 'conv_eltwiseadd_bn_fuse_pass') graph = self._apply_pass(graph, 'conv_affine_channel_mkldnn_fuse_pass') graph = self._apply_pass(graph, 'conv_transpose_bn_fuse_pass') - graph = self._apply_pass(graph, - 'conv_transpose_eltwiseadd_bn_fuse_pass') + graph = self._apply_pass( + graph, 'conv_transpose_eltwiseadd_bn_fuse_pass' + ) graph = self._apply_pass(graph, 'conv_bias_mkldnn_fuse_pass') graph = self._apply_pass(graph, 'conv_transpose_bias_mkldnn_fuse_pass') graph = self._apply_pass(graph, 'conv_elementwise_add_mkldnn_fuse_pass') graph = self._apply_pass(graph, 'conv_activation_mkldnn_fuse_pass') - graph = self._apply_pass(graph, 'fc_fuse_pass', - ['use_gpu', 'use_fc_padding'], [False, False]) + graph = self._apply_pass( + graph, 'fc_fuse_pass', ['use_gpu', 'use_fc_padding'], [False, False] + ) graph = self._apply_pass(graph, 'repeated_fc_relu_fuse_pass') if self._is_fc_quantized(graph): # Disabled due to topology-dependent speed-up graph = self._apply_pass(graph, 'fc_mkldnn_pass') graph = self._apply_pass(graph, 'fc_act_mkldnn_fuse_pass') - graph = self._apply_pass(graph, - 'matmul_transpose_reshape_mkldnn_fuse_pass') - graph = self._apply_pass(graph, - 'matmul_elementwise_add_mkldnn_fuse_pass') + graph = self._apply_pass( + graph, 'matmul_transpose_reshape_mkldnn_fuse_pass' + ) + graph = self._apply_pass( + graph, 'matmul_elementwise_add_mkldnn_fuse_pass' + ) graph = self._apply_pass(graph, 'matmul_activation_mkldnn_fuse_pass') graph = self._apply_pass(graph, 'batch_norm_act_fuse_pass') graph = self._apply_pass(graph, 'softplus_activation_mkldnn_fuse_pass') graph = self._apply_pass(graph, 'scale_matmul_fuse_pass') - graph = self._apply_pass(graph, - 'reshape_transpose_matmul_mkldnn_fuse_pass') + graph = self._apply_pass( + graph, 'reshape_transpose_matmul_mkldnn_fuse_pass' + ) # the following pass should be the last one since it will work on all fused ops. graph = self._apply_pass(graph, 'runtime_context_cache_pass') return graph @@ -474,8 +511,10 @@ class Quant2Int8MkldnnPass(object): ir_pass.apply(cpp_graph) if self._debug: graph.draw( - '.', '{}_{}_{}'.format(self._pass_group, self._pass_idx, - pass_name), graph.all_op_nodes()) + '.', + '{}_{}_{}'.format(self._pass_group, self._pass_idx, pass_name), + graph.all_op_nodes(), + ) self._remove_unused_var_nodes(graph) self._pass_idx += 1 return graph @@ -502,8 +541,10 @@ class Quant2Int8MkldnnPass(object): all_used_vars = {n.node for n in all_used_vars} all_unused_vars = { n - for n in filter(lambda node: node.node not in all_used_vars, - graph.all_var_nodes()) + for n in filter( + lambda node: node.node not in all_used_vars, + graph.all_var_nodes(), + ) } graph.safe_remove_nodes(all_unused_vars) return graph @@ -515,45 +556,60 @@ class Quant2Int8MkldnnPass(object): return graph def _compute_weight_scales(self, graph): - def _compute_var_scales(ops, w_name, axis): for op in graph.all_op_nodes(): if op.op().type() in ops: weight_var_name = op.input(w_name)[0] weights = np.array( - self._load_param(self._scope, weight_var_name)) - scales = 1.0 / np.amax(np.abs( - weights.reshape(weights.shape[0], -1)).astype( - np.float64), - axis=axis) + self._load_param(self._scope, weight_var_name) + ) + scales = 1.0 / np.amax( + np.abs(weights.reshape(weights.shape[0], -1)).astype( + np.float64 + ), + axis=axis, + ) scales[scales == np.Inf] = 0.0 lod_tensor = self._convert_scale2tensor(scales) use_unsigned_int = False - self._var_quant_scales[weight_var_name] = (use_unsigned_int, - lod_tensor) + self._var_quant_scales[weight_var_name] = ( + use_unsigned_int, + lod_tensor, + ) def _compute_single_gru_weight_scales(wx_var_name, wh_var_name): wx = np.array(self._load_param(self._scope, wx_var_name)) wh = np.array(self._load_param(self._scope, wh_var_name)) OC = wh.shape[0] - scale_ur = 1.0 / np.max(np.abs( - np.concatenate([ - wx[:, :2 * OC], - wh.flatten()[:2 * OC * OC].reshape(OC, 2 * OC) - ], - axis=0)), - axis=0) - scale_o = 1.0 / np.max(np.abs( - np.concatenate([ - wx[:, 2 * OC:], - wh.flatten()[2 * OC * OC:].reshape(OC, OC) - ], - axis=0)), - axis=0) - - gru_weights_scale = np.concatenate([scale_ur, - scale_o]).astype('float') + scale_ur = 1.0 / np.max( + np.abs( + np.concatenate( + [ + wx[:, : 2 * OC], + wh.flatten()[: 2 * OC * OC].reshape(OC, 2 * OC), + ], + axis=0, + ) + ), + axis=0, + ) + scale_o = 1.0 / np.max( + np.abs( + np.concatenate( + [ + wx[:, 2 * OC :], + wh.flatten()[2 * OC * OC :].reshape(OC, OC), + ], + axis=0, + ) + ), + axis=0, + ) + + gru_weights_scale = np.concatenate([scale_ur, scale_o]).astype( + 'float' + ) return self._convert_scale2tensor(gru_weights_scale) @@ -563,21 +619,26 @@ class Quant2Int8MkldnnPass(object): assert len(op.input(wx_name)) == len( op.input(wh_name) ), 'Mismatch in number of weights inputs ({} for WeightX vs. {} for WeightH).'.format( - len(op.input(wx_name)), len(op.input(wh_name))) + len(op.input(wx_name)), len(op.input(wh_name)) + ) for i, wx_var_name in enumerate(op.input(wx_name)): wh_var_name = op.input(wh_name)[i] use_unsigned_int = False lod_tensor = _compute_single_gru_weight_scales( - wx_var_name, wh_var_name) - self._var_quant_scales[wx_var_name] = (use_unsigned_int, - lod_tensor) + wx_var_name, wh_var_name + ) + self._var_quant_scales[wx_var_name] = ( + use_unsigned_int, + lod_tensor, + ) def _compute_single_lstm_weight_scales(wx_var_name, wh_var_name): wx = np.array(self._load_param(self._scope, wx_var_name)) wh = np.array(self._load_param(self._scope, wh_var_name)) lstm_weights_scale = 1.0 / np.max( - np.abs(np.concatenate([wx[:, :], wh[:, :]], axis=0)), axis=0) + np.abs(np.concatenate([wx[:, :], wh[:, :]], axis=0)), axis=0 + ) lstm_weights_scale = lstm_weights_scale.astype('float') return self._convert_scale2tensor(lstm_weights_scale) @@ -588,14 +649,18 @@ class Quant2Int8MkldnnPass(object): assert len(op.input(wx_name)) == len( op.input(wh_name) ), 'Mismatch in number of weights inputs ({} for WeightX vs. {} for WeightH).'.format( - len(op.input(wx_name)), len(op.input(wh_name))) + len(op.input(wx_name)), len(op.input(wh_name)) + ) for i, wx_var_name in enumerate(op.input(wx_name)): wh_var_name = op.input(wh_name)[i] use_unsigned_int = False lod_tensor = _compute_single_lstm_weight_scales( - wx_var_name, wh_var_name) - self._var_quant_scales[wx_var_name] = (use_unsigned_int, - lod_tensor) + wx_var_name, wh_var_name + ) + self._var_quant_scales[wx_var_name] = ( + use_unsigned_int, + lod_tensor, + ) _compute_var_scales(self._conv_ops, "Filter", axis=1) _compute_var_scales(self._fc_ops, "W", axis=0) @@ -606,7 +671,6 @@ class Quant2Int8MkldnnPass(object): return graph def _update_relu_output_scales(self, graph): - def _set_unsigned_scale(graph, ops, op_out_name, predicate): ''' Sets the type of an output scale of a passed op type(s) to 'unsigned int8' if the @@ -617,7 +681,8 @@ class Quant2Int8MkldnnPass(object): if op.name() in ops: out_name = op.output(op_out_name)[0] if out_name in self._var_quant_scales and predicate( - op.op()): + op.op() + ): is_unsigned, tensor = self._var_quant_scales[out_name] if is_unsigned is False: # If the variable is signed, it means that the scales for this var @@ -625,23 +690,26 @@ class Quant2Int8MkldnnPass(object): # to fill the entire range of uint8 scale = np.array(tensor) * 2 tensor = self._convert_scale2tensor( - scale.astype(np.float64)) + scale.astype(np.float64) + ) self._var_quant_scales[out_name] = (True, tensor) return graph def conv_predicate(op): return op.attr("fuse_activation") in self._relu_ops - graph = _set_unsigned_scale(graph, self._conv_ops, "Output", - conv_predicate) + graph = _set_unsigned_scale( + graph, self._conv_ops, "Output", conv_predicate + ) def fc_predicate(op): return op.attr("activation_type") in self._relu_ops graph = _set_unsigned_scale(graph, self._fc_ops, "Out", fc_predicate) - graph = _set_unsigned_scale(graph, self._relu_ops, 'Out', - lambda op: True) + graph = _set_unsigned_scale( + graph, self._relu_ops, 'Out', lambda op: True + ) return graph @@ -650,15 +718,21 @@ class Quant2Int8MkldnnPass(object): def _quantize_fp32_graph(self, graph): graph = self._apply_pass(graph, 'scale_matmul_fuse_pass') - graph = self._apply_pass(graph, - 'reshape_transpose_matmul_mkldnn_fuse_pass') - graph = self._apply_pass(graph, 'cpu_quantize_placement_pass', - ['quantize_enabled_op_types'], - [self._ops_to_quantize]) graph = self._apply_pass( - graph, 'cpu_quantize_pass', ['quant_var_scales', 'data_layout'], - [self._var_quant_scales, - self._get_data_layout(graph)]) + graph, 'reshape_transpose_matmul_mkldnn_fuse_pass' + ) + graph = self._apply_pass( + graph, + 'cpu_quantize_placement_pass', + ['quantize_enabled_op_types'], + [self._ops_to_quantize], + ) + graph = self._apply_pass( + graph, + 'cpu_quantize_pass', + ['quant_var_scales', 'data_layout'], + [self._var_quant_scales, self._get_data_layout(graph)], + ) graph = self._apply_pass(graph, 'cpu_quantize_squash_pass') graph = self._apply_pass(graph, 'int8_scale_calculation_mkldnn_pass') graph = self._apply_pass(graph, 'params_quantization_mkldnn_pass') diff --git a/python/paddle/fluid/contrib/slim/quantization/quant_int8_mkldnn_pass.py b/python/paddle/fluid/contrib/slim/quantization/quant_int8_mkldnn_pass.py index d56aeb79f3f7c93ef23a2888f2f58fc0628e4ef0..73c611db0120600400a0e4a22118a859fe141032 100644 --- a/python/paddle/fluid/contrib/slim/quantization/quant_int8_mkldnn_pass.py +++ b/python/paddle/fluid/contrib/slim/quantization/quant_int8_mkldnn_pass.py @@ -66,7 +66,7 @@ class QuantInt8MkldnnPass(object): self._quantize_type = [ 'fake_quantize_moving_average_abs_max', - 'fake_quantize_range_abs_max' + 'fake_quantize_range_abs_max', ] self._dequantize_type = ['fake_dequantize_max_abs'] self._quantize_dequantize_type = [ @@ -92,8 +92,9 @@ class QuantInt8MkldnnPass(object): graph(IrGraph): the applied graph. """ - assert isinstance(graph, - IrGraph), 'graph must be the instance of IrGraph.' + assert isinstance( + graph, IrGraph + ), 'graph must be the instance of IrGraph.' ops = graph.all_op_nodes() persistable_vars = [p.name() for p in graph.all_persistable_nodes()] @@ -104,7 +105,8 @@ class QuantInt8MkldnnPass(object): input_name = op_node.input("X")[0] scale_name = op_node.input("Scale")[0] self._in_scale[input_name] = self._load_param( - self._scope, scale_name)[0] + self._scope, scale_name + )[0] self._max_range[input_name] = op_node.op().attr("max_range") self._new_output[input_name] = op_node.output("Out")[0] @@ -114,7 +116,8 @@ class QuantInt8MkldnnPass(object): input_name = op_node.input("X")[0] scale_name = op_node.input("InScale")[0] self._in_scale[input_name] = self._load_param( - self._scope, scale_name)[0] + self._scope, scale_name + )[0] # self._max_range[input_name] = op_node.op().attr("max_range") self._new_output[input_name] = op_node.output("Out")[0] @@ -142,29 +145,30 @@ class QuantInt8MkldnnPass(object): output_name = op_node.output("Output")[0] # Convert int8 range weights to fp32 range weights weight = self._load_param(self._scope, weight_name) - w_fp32 = np.divide(np.multiply(weight, self._s8_max), - self._max_range[output_name]) + w_fp32 = np.divide( + np.multiply(weight, self._s8_max), self._max_range[output_name] + ) w_fp32 = w_fp32.reshape(weight.shape) self._restore_var(weight_name, w_fp32) - input_var_node = graph._find_node_by_name(op_node.inputs, - op_node.input("Input")[0]) + input_var_node = graph._find_node_by_name( + op_node.inputs, op_node.input("Input")[0] + ) weight_var_node = graph._find_node_by_name(op_node.inputs, weight_name) # Set fake_dequantize_abs_max's output as new output of conv2d output_var_node = graph._find_node_by_name( - graph.all_var_nodes(), self._new_output[output_name]) + graph.all_var_nodes(), self._new_output[output_name] + ) attrs = { - name: op_node.op().attr(name) - for name in op_node.op().attr_names() + name: op_node.op().attr(name) for name in op_node.op().attr_names() } - conv_op_node = graph.create_op_node(op_type='conv2d', - attrs=attrs, - inputs={ - 'Input': input_var_node, - 'Filter': weight_var_node - }, - outputs={'Output': output_var_node}) + conv_op_node = graph.create_op_node( + op_type='conv2d', + attrs=attrs, + inputs={'Input': input_var_node, 'Filter': weight_var_node}, + outputs={'Output': output_var_node}, + ) # Based on the Quant's scales to calculate the scales of MKL-DNN INT8 conv2d scale_in = self._s8_max / self._in_scale[output_name] @@ -187,29 +191,30 @@ class QuantInt8MkldnnPass(object): output_name = op_node.output("Out")[0] # Convert int8 range weights to fp32 range weights weight = self._load_param(self._scope, weight_name) - w_fp32 = np.divide(np.multiply(weight, self._s8_max), - self._max_range[output_name]) + w_fp32 = np.divide( + np.multiply(weight, self._s8_max), self._max_range[output_name] + ) w_fp32 = w_fp32.reshape(weight.shape) self._restore_var(weight_name, w_fp32) - input_var_node = graph._find_node_by_name(op_node.inputs, - op_node.input("X")[0]) + input_var_node = graph._find_node_by_name( + op_node.inputs, op_node.input("X")[0] + ) weight_var_node = graph._find_node_by_name(op_node.inputs, weight_name) # Set fake_dequantize_abs_max's output as new output of mul output_var_node = graph._find_node_by_name( - graph.all_var_nodes(), self._new_output[output_name]) + graph.all_var_nodes(), self._new_output[output_name] + ) attrs = { - name: op_node.op().attr(name) - for name in op_node.op().attr_names() + name: op_node.op().attr(name) for name in op_node.op().attr_names() } - mul_op_node = graph.create_op_node(op_type='mul', - attrs=attrs, - inputs={ - 'X': input_var_node, - 'Y': weight_var_node - }, - outputs={'Out': output_var_node}) + mul_op_node = graph.create_op_node( + op_type='mul', + attrs=attrs, + inputs={'X': input_var_node, 'Y': weight_var_node}, + outputs={'Out': output_var_node}, + ) # Based on the Quant's scales to calculate MKL-DNN INT8 mul's scales scale_in = self._s8_max / self._in_scale[output_name] @@ -230,30 +235,35 @@ class QuantInt8MkldnnPass(object): """ Transform fake_quantize_xx op to quantize mkldnn op in the graph. """ - input_var_node = graph._find_node_by_name(op_node.inputs, - op_node.input("X")[0]) - output_var_node = graph._find_node_by_name(op_node.outputs, - op_node.output("Out")[0]) - scale_in = self._s8_max / self._load_param( - self._scope, - op_node.input("InScale")[0])[0] + input_var_node = graph._find_node_by_name( + op_node.inputs, op_node.input("X")[0] + ) + output_var_node = graph._find_node_by_name( + op_node.outputs, op_node.output("Out")[0] + ) + scale_in = ( + self._s8_max + / self._load_param(self._scope, op_node.input("InScale")[0])[0] + ) quant_op_node = graph.create_op_node( op_type='quantize', attrs={ 'data_format': 'MKLDNNLAYOUT', 'use_mkldnn': 1, 'Scale': scale_in, - 'is_negative_input': 1 + 'is_negative_input': 1, }, inputs={'Input': input_var_node}, - outputs={'Output': output_var_node}) + outputs={'Output': output_var_node}, + ) graph.link_to(input_var_node, quant_op_node) graph.link_to(quant_op_node, output_var_node) graph.safe_remove_nodes(op_node) def _remove_fake_dequantize_op(self, graph, op_node): - input_var_node = graph._find_node_by_name(op_node.inputs, - op_node.input("X")[0]) + input_var_node = graph._find_node_by_name( + op_node.inputs, op_node.input("X")[0] + ) graph.safe_remove_nodes(op_node) def _load_param(self, scope, param_name): @@ -275,7 +285,9 @@ class QuantInt8MkldnnPass(object): all_used_vars = {n.node for n in all_used_vars} all_unused_vars = { n - for n in filter(lambda node: node.node not in all_used_vars, - graph.all_var_nodes()) + for n in filter( + lambda node: node.node not in all_used_vars, + graph.all_var_nodes(), + ) } graph.safe_remove_nodes(all_unused_vars) diff --git a/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py b/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py index abd6a7822d872ee33d37fa8ac41fac34ccd311cf..81aac8823d81363fb85cfe00e601c5ef7ef97294 100644 --- a/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py +++ b/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py @@ -14,6 +14,7 @@ import collections import numpy as np + try: from tqdm import tqdm except: @@ -47,12 +48,15 @@ __all__ = [ ] _fake_quant_op_list = [ - 'fake_quantize_abs_max', 'fake_quantize_range_abs_max', - 'fake_quantize_moving_average_abs_max', 'fake_channel_wise_quantize_abs_max' + 'fake_quantize_abs_max', + 'fake_quantize_range_abs_max', + 'fake_quantize_moving_average_abs_max', + 'fake_channel_wise_quantize_abs_max', ] _fake_dequant_op_list = [ - 'fake_dequantize_max_abs', 'fake_channel_wise_dequantize_max_abs' + 'fake_dequantize_max_abs', + 'fake_channel_wise_dequantize_max_abs', ] _fake_quant_dequant_op_list = [ @@ -67,12 +71,11 @@ _SCALE_DEFAULT_VALUE = 0.001 def _init_var_node(var_node, value, scope, place): - assert isinstance(value, - np.ndarray), 'The type of value should be numpy array.' - assert scope is not None, \ - 'The scope cannot be set None.' - assert place is not None, \ - 'The place cannot be set None.' + assert isinstance( + value, np.ndarray + ), 'The type of value should be numpy array.' + assert scope is not None, 'The scope cannot be set None.' + assert place is not None, 'The place cannot be set None.' tensor = scope.var(var_node.name()).get_tensor() tensor.set(value, place) @@ -84,8 +87,9 @@ def _is_input_all_not_persistable(graph, op_node): is_input_all_not_persistable = True for var_name in utils._get_op_input_var_names(op_node): in_node = graph._find_node_by_name(op_node.inputs, var_name) - is_input_all_not_persistable = (is_input_all_not_persistable and \ - (not in_node.persistable())) + is_input_all_not_persistable = is_input_all_not_persistable and ( + not in_node.persistable() + ) return is_input_all_not_persistable @@ -109,24 +113,26 @@ class QuantizationTransformPass(object): the quantized ops's inputs. """ - def __init__(self, - scope=None, - place=None, - weight_bits=8, - activation_bits=8, - activation_quantize_type='abs_max', - weight_quantize_type='abs_max', - window_size=10000, - moving_rate=0.9, - skip_pattern=['skip_quant'], - quantizable_op_type=['conv2d', 'depthwise_conv2d', 'mul'], - weight_quantize_func=None, - act_quantize_func=None, - weight_preprocess_func=None, - act_preprocess_func=None, - optimizer_func=None, - executor=None, - is_test=None): + def __init__( + self, + scope=None, + place=None, + weight_bits=8, + activation_bits=8, + activation_quantize_type='abs_max', + weight_quantize_type='abs_max', + window_size=10000, + moving_rate=0.9, + skip_pattern=['skip_quant'], + quantizable_op_type=['conv2d', 'depthwise_conv2d', 'mul'], + weight_quantize_func=None, + act_quantize_func=None, + weight_preprocess_func=None, + act_preprocess_func=None, + optimizer_func=None, + executor=None, + is_test=None, + ): r""" Constructor. @@ -215,21 +221,26 @@ class QuantizationTransformPass(object): self._optimizer = optimizer_func self._exe = executor quant_type = [ - 'abs_max', 'channel_wise_abs_max', 'range_abs_max', - 'moving_average_abs_max' + 'abs_max', + 'channel_wise_abs_max', + 'range_abs_max', + 'moving_average_abs_max', ] - assert activation_quantize_type != 'channel_wise_abs_max', \ - "The activation quantization type does not support 'channel_wise_abs_max'." + assert ( + activation_quantize_type != 'channel_wise_abs_max' + ), "The activation quantization type does not support 'channel_wise_abs_max'." if activation_quantize_type not in quant_type: raise ValueError( "Unknown activation_quantize_type : '%s'. It can only be " - "'abs_max' or 'range_abs_max' or 'moving_average_abs_max'." % - (str(activation_quantize_type))) + "'abs_max' or 'range_abs_max' or 'moving_average_abs_max'." + % (str(activation_quantize_type)) + ) if weight_quantize_type not in quant_type: raise ValueError( "Unknown weight_quantize_type: '%s'. It can only be " "'abs_max' or 'channel_wise_abs_max' or 'range_abs_max' " - "or 'moving_average_abs_max'." % (str(weight_quantize_type))) + "or 'moving_average_abs_max'." % (str(weight_quantize_type)) + ) self._activation_quantize_type = activation_quantize_type self._weight_quantize_type = weight_quantize_type @@ -238,8 +249,9 @@ class QuantizationTransformPass(object): self._quantizable_ops = quantizable_op_type for op in self._quantizable_ops: - assert op in utils._weight_supported_quantizable_op_type, \ + assert op in utils._weight_supported_quantizable_op_type, ( op + " is not supported for quantization." + ) self._quantizable_grad_ops = [ '%s_grad' % (op) for op in self._quantizable_ops ] @@ -260,8 +272,9 @@ class QuantizationTransformPass(object): Returns: None """ - assert isinstance(graph, - IrGraph), 'graph must be the instance of IrGraph.' + assert isinstance( + graph, IrGraph + ), 'graph must be the instance of IrGraph.' if self._is_test is None: self._is_test = graph.is_test() # marked the variable which has been dequantized. @@ -272,13 +285,18 @@ class QuantizationTransformPass(object): def _quant_preprocess(op_node): user_skipped = False if isinstance(self._skip_pattern, list): - user_skipped = op_node.op().has_attr("op_namescope") and \ - any(pattern in op_node.op().attr("op_namescope") \ - for pattern in self._skip_pattern) + user_skipped = op_node.op().has_attr("op_namescope") and any( + pattern in op_node.op().attr("op_namescope") + for pattern in self._skip_pattern + ) elif isinstance(self._skip_pattern, str): - user_skipped = op_node.op().has_attr("op_namescope") and \ - op_node.op().attr("op_namescope").find( - self._skip_pattern) != -1 + user_skipped = ( + op_node.op().has_attr("op_namescope") + and op_node.op() + .attr("op_namescope") + .find(self._skip_pattern) + != -1 + ) if user_skipped: op_node.op()._set_attr("skip_quant", True) @@ -297,8 +315,9 @@ class QuantizationTransformPass(object): name = var_node.name() if name in processed_vars: continue - is_weight = True if var_node.name() in persistable_vars \ - else False + is_weight = ( + True if var_node.name() in persistable_vars else False + ) # if var node is weight and weight_preprocess_func is not None, # will insert weight preprocess func @@ -308,11 +327,14 @@ class QuantizationTransformPass(object): # to preorocess activation before quantization if is_weight and self._weight_preprocess_func is not None: var_node = self._insert_func( - graph, self._weight_preprocess_func, var_node, op) - elif not is_weight and self._act_preprocess_func is not None: - var_node = self._insert_func(graph, - self._act_preprocess_func, - var_node, op) + graph, self._weight_preprocess_func, var_node, op + ) + elif ( + not is_weight and self._act_preprocess_func is not None + ): + var_node = self._insert_func( + graph, self._act_preprocess_func, var_node, op + ) # if var node is weight and weight_quantize_func is not None, # will insert weight quantize func to quantize and dequantize weight @@ -320,32 +342,55 @@ class QuantizationTransformPass(object): # will insert act quantize func to quantize and dequantize activation if is_weight and self._weight_quantize_func is not None: target_out_node = self._insert_func( - graph, self._weight_quantize_func, var_node, op) + graph, self._weight_quantize_func, var_node, op + ) processed_vars.append(name) continue elif not is_weight and self._act_quantize_func is not None: target_out_node = self._insert_func( - graph, self._act_quantize_func, var_node, op) + graph, self._act_quantize_func, var_node, op + ) processed_vars.append(name) continue - quant_bits = self._weight_bits if var_node.name() in persistable_vars \ + quant_bits = ( + self._weight_bits + if var_node.name() in persistable_vars else self._activation_bits - quant_type = self._weight_quantize_type if is_weight \ + ) + quant_type = ( + self._weight_quantize_type + if is_weight else self._activation_quantize_type - if quant_type == 'channel_wise_abs_max': # Weight quantization - quant_axis = 1 if op.name() in \ - utils._channelwise_quant_axis1_ops else 0 - quant_var_node, scale_var_node = self._insert_channel_quant_op( - graph, var_node, name, quant_bits, quant_axis) + ) + if ( + quant_type == 'channel_wise_abs_max' + ): # Weight quantization + quant_axis = ( + 1 + if op.name() in utils._channelwise_quant_axis1_ops + else 0 + ) + ( + quant_var_node, + scale_var_node, + ) = self._insert_channel_quant_op( + graph, var_node, name, quant_bits, quant_axis + ) dequant_var_node = self._insert_channel_dequant_op( - graph, quant_var_node, [scale_var_node], - [quant_bits], quant_axis) + graph, + quant_var_node, + [scale_var_node], + [quant_bits], + quant_axis, + ) else: quant_var_node, scale_var_node = self._insert_quant_op( - graph, var_node, name, quant_bits, quant_type) + graph, var_node, name, quant_bits, quant_type + ) dequant_var_node = self._insert_dequant_op( - graph, quant_var_node, scale_var_node, quant_bits) + graph, quant_var_node, scale_var_node, quant_bits + ) dequantized_vars[name] = dequant_var_node graph.update_input_link(var_node, dequant_var_node, op) @@ -373,17 +418,20 @@ class QuantizationTransformPass(object): # Do the preproccess of quantization, such as skipping some ops # for not being quantized. for op in ops: - if op.name() in self._quantizable_ops or \ - op.name() in self._quantizable_grad_ops: + if ( + op.name() in self._quantizable_ops + or op.name() in self._quantizable_grad_ops + ): _quant_preprocess(op) # Insert mapping table to solve the problem in saving inference model. graph.out_node_mapping_table = dict() # The process of _transform_forward and _transform_backward is needed in two for loops. # The loop for transforming the forward graph: - with tqdm(total=len(ops), - bar_format= - 'Adding quant op with weight:|{bar}| {n_fmt}/{total_fmt}', - ncols=80) as t: + with tqdm( + total=len(ops), + bar_format='Adding quant op with weight:|{bar}| {n_fmt}/{total_fmt}', + ncols=80, + ) as t: for op in ops: if op.name() in self._quantizable_ops: if not self._is_skip_quant(graph, op) and _has_weight(op): @@ -397,8 +445,10 @@ class QuantizationTransformPass(object): return graph def _create_global_step(self, graph): - if self._weight_quantize_type == 'range_abs_max' or \ - self._activation_quantize_type == 'range_abs_max': + if ( + self._weight_quantize_type == 'range_abs_max' + or self._activation_quantize_type == 'range_abs_max' + ): counter_name = '@STEP_COUNTER@' for node in graph.all_var_nodes(): if node.name() == counter_name: @@ -408,21 +458,27 @@ class QuantizationTransformPass(object): name=counter_name, var_type=core.VarDesc.VarType.LOD_TENSOR, shape=[1], - var_dtype=core.VarDesc.VarType.INT64) - _init_var_node(global_step_in, np.zeros([1], dtype='int64'), - self._scope, self._place) + var_dtype=core.VarDesc.VarType.INT64, + ) + _init_var_node( + global_step_in, + np.zeros([1], dtype='int64'), + self._scope, + self._place, + ) global_step_out = graph.create_var_node_from_desc( - global_step_in.var()) + global_step_in.var() + ) # The attribute of `op_role` is needed by ParallelExecutor. increment_op = graph.create_op_node( op_type='increment', attrs={ 'step': 1.0, - 'op_role': - core.op_proto_and_checker_maker.OpRole.Forward + 'op_role': core.op_proto_and_checker_maker.OpRole.Forward, }, inputs={'X': global_step_in}, - outputs={'Out': global_step_out}) + outputs={'Out': global_step_out}, + ) graph.link_to(global_step_in, increment_op) graph.link_to(increment_op, global_step_out) self._global_step = global_step_out @@ -432,14 +488,17 @@ class QuantizationTransformPass(object): Insert fake_quantize_op in the graph. """ if quant_type == 'abs_max': - return self._insert_quant_abs_max_op(graph, var_node, name, - quant_bits) + return self._insert_quant_abs_max_op( + graph, var_node, name, quant_bits + ) elif quant_type == 'range_abs_max': - return self._insert_quant_range_abs_max_op(graph, var_node, name, - quant_bits) + return self._insert_quant_range_abs_max_op( + graph, var_node, name, quant_bits + ) elif quant_type == 'moving_average_abs_max': return self._insert_quant_moving_average_abs_max_op( - graph, var_node, name, quant_bits) + graph, var_node, name, quant_bits + ) def _insert_quant_abs_max_op(self, graph, var_node, name, quant_bits): """ @@ -451,33 +510,37 @@ class QuantizationTransformPass(object): name=self._quantized_var_name(name), var_type=var_node.type(), shape=var_node.shape(), - var_dtype=var_node.dtype()) + var_dtype=var_node.dtype(), + ) scale_name = self._quantized_scale_name(name) - data_type = 'float64' if var_node.dtype( - ) == core.VarDesc.VarType.FP64 else 'float32' + data_type = ( + 'float64' + if var_node.dtype() == core.VarDesc.VarType.FP64 + else 'float32' + ) try: scale_value = np.array( - self._scope.find_var(scale_name).get_tensor()) + self._scope.find_var(scale_name).get_tensor() + ) except: scale_value = np.zeros([1], dtype=data_type) scale_var_node = graph.create_persistable_node( name=scale_name, var_type=var_node.type(), shape=[1], - var_dtype=var_node.dtype()) + var_dtype=var_node.dtype(), + ) _init_var_node(scale_var_node, scale_value, self._scope, self._place) quant_op_node = graph.create_op_node( op_type='fake_quantize_abs_max', attrs={ 'bit_length': quant_bits, - 'op_role': core.op_proto_and_checker_maker.OpRole.Forward + 'op_role': core.op_proto_and_checker_maker.OpRole.Forward, }, inputs={'X': var_node}, - outputs={ - 'Out': quant_var_node, - 'OutScale': scale_var_node - }) + outputs={'Out': quant_var_node, 'OutScale': scale_var_node}, + ) graph.link_to(var_node, quant_op_node) graph.link_to(quant_op_node, quant_var_node) graph.link_to(quant_op_node, scale_var_node) @@ -493,21 +556,27 @@ class QuantizationTransformPass(object): name=self._quantized_var_name(name), var_type=var_node.type(), shape=var_node.shape(), - var_dtype=var_node.dtype()) + var_dtype=var_node.dtype(), + ) scale_name = self._quantized_scale_name(name) - data_type = 'float64' if var_node.dtype( - ) == core.VarDesc.VarType.FP64 else 'float32' + data_type = ( + 'float64' + if var_node.dtype() == core.VarDesc.VarType.FP64 + else 'float32' + ) try: scale_value = np.array( - self._scope.find_var(scale_name).get_tensor()) + self._scope.find_var(scale_name).get_tensor() + ) except: scale_value = np.array([_SCALE_DEFAULT_VALUE], dtype=data_type) scale_in_node = graph.create_persistable_node( name=scale_name, var_type=core.VarDesc.VarType.LOD_TENSOR, shape=[1], - var_dtype=var_node.dtype()) + var_dtype=var_node.dtype(), + ) _init_var_node(scale_in_node, scale_value, self._scope, self._place) scale_out_node = graph.create_var_node_from_desc(scale_in_node.var()) @@ -520,12 +589,19 @@ class QuantizationTransformPass(object): name=unique_name.generate('scales'), var_type=core.VarDesc.VarType.LOD_TENSOR, shape=[self._window_size], - var_dtype=var_node.dtype()) - data_type = 'float64' if var_node.dtype( - ) == core.VarDesc.VarType.FP64 else 'float32' - _init_var_node(scales_node, - np.zeros([self._window_size], dtype=data_type), - self._scope, self._place) + var_dtype=var_node.dtype(), + ) + data_type = ( + 'float64' + if var_node.dtype() == core.VarDesc.VarType.FP64 + else 'float32' + ) + _init_var_node( + scales_node, + np.zeros([self._window_size], dtype=data_type), + self._scope, + self._place, + ) inputs['Iter'] = self._global_step outputs['OutScales'] = scales_node @@ -533,13 +609,14 @@ class QuantizationTransformPass(object): 'window_size': self._window_size, 'bit_length': quant_bits, 'is_test': self._is_test, - 'op_role': core.op_proto_and_checker_maker.OpRole.Forward + 'op_role': core.op_proto_and_checker_maker.OpRole.Forward, } quant_op_node = graph.create_op_node( op_type='fake_quantize_range_abs_max', attrs=attrs, inputs=inputs, - outputs=outputs) + outputs=outputs, + ) graph.link_to(var_node, quant_op_node) graph.link_to(scale_in_node, quant_op_node) @@ -552,28 +629,34 @@ class QuantizationTransformPass(object): return quant_var_node, scale_out_node - def _insert_quant_moving_average_abs_max_op(self, graph, var_node, name, - quant_bits): - """Insert fake_quantize_moving_average_abs_max - """ + def _insert_quant_moving_average_abs_max_op( + self, graph, var_node, name, quant_bits + ): + """Insert fake_quantize_moving_average_abs_max""" quant_var_node = graph.create_var_node( name=self._quantized_var_name(name), var_type=var_node.type(), shape=var_node.shape(), - var_dtype=var_node.dtype()) + var_dtype=var_node.dtype(), + ) scale_name = self._quantized_scale_name(name) - data_type = 'float64' if var_node.dtype( - ) == core.VarDesc.VarType.FP64 else 'float32' + data_type = ( + 'float64' + if var_node.dtype() == core.VarDesc.VarType.FP64 + else 'float32' + ) try: scale_value = np.array( - self._scope.find_var(scale_name).get_tensor()) + self._scope.find_var(scale_name).get_tensor() + ) except: scale_value = np.array([_SCALE_DEFAULT_VALUE], dtype=data_type) scale_in_node = graph.create_persistable_node( name=scale_name, var_type=core.VarDesc.VarType.LOD_TENSOR, shape=[1], - var_dtype=var_node.dtype()) + var_dtype=var_node.dtype(), + ) _init_var_node(scale_in_node, scale_value, self._scope, self._place) scale_out_node = graph.create_var_node_from_desc(scale_in_node.var()) @@ -584,22 +667,37 @@ class QuantizationTransformPass(object): name=unique_name.generate('state'), var_type=core.VarDesc.VarType.LOD_TENSOR, var_dtype=var_node.dtype(), - shape=[1]) - data_type = 'float64' if var_node.dtype( - ) == core.VarDesc.VarType.FP64 else 'float32' - _init_var_node(state_in_node, np.ones([1], dtype=data_type), - self._scope, self._place) + shape=[1], + ) + data_type = ( + 'float64' + if var_node.dtype() == core.VarDesc.VarType.FP64 + else 'float32' + ) + _init_var_node( + state_in_node, + np.ones([1], dtype=data_type), + self._scope, + self._place, + ) accum_in_node = graph.create_persistable_node( name=unique_name.generate('accum'), var_type=core.VarDesc.VarType.LOD_TENSOR, var_dtype=var_node.dtype(), - shape=[1]) - _init_var_node(accum_in_node, np.ones([1], dtype=data_type), - self._scope, self._place) + shape=[1], + ) + _init_var_node( + accum_in_node, + np.ones([1], dtype=data_type), + self._scope, + self._place, + ) state_out_node = graph.create_var_node_from_desc( - state_in_node.var()) + state_in_node.var() + ) accum_out_node = graph.create_var_node_from_desc( - accum_in_node.var()) + accum_in_node.var() + ) ins['InState'] = state_in_node ins['InAccum'] = accum_in_node @@ -610,14 +708,15 @@ class QuantizationTransformPass(object): 'bit_length': quant_bits, 'moving_rate': self._moving_rate, 'is_test': self._is_test, - 'op_role': core.op_proto_and_checker_maker.OpRole.Forward + 'op_role': core.op_proto_and_checker_maker.OpRole.Forward, } quant_op_node = graph.create_op_node( op_type='fake_quantize_moving_average_abs_max', attrs=attrs, inputs=ins, - outputs=outs) + outputs=outs, + ) graph.link_to(var_node, quant_op_node) graph.link_to(scale_in_node, quant_op_node) @@ -632,8 +731,9 @@ class QuantizationTransformPass(object): return quant_var_node, scale_out_node - def _insert_channel_quant_op(self, graph, var_node, name, quant_bits, - quant_axis): + def _insert_channel_quant_op( + self, graph, var_node, name, quant_bits, quant_axis + ): """ Insert fake_channel_wise_quantize_abs_max op in the graph. """ @@ -643,21 +743,28 @@ class QuantizationTransformPass(object): name=self._quantized_var_name(name), var_type=var_node.type(), shape=var_node.shape(), - var_dtype=var_node.dtype()) + var_dtype=var_node.dtype(), + ) scale_name = self._quantized_scale_name(name) - data_type = 'float64' if var_node.dtype( - ) == core.VarDesc.VarType.FP64 else 'float32' + data_type = ( + 'float64' + if var_node.dtype() == core.VarDesc.VarType.FP64 + else 'float32' + ) try: scale_value = np.array( - self._scope.find_var(scale_name).get_tensor()) + self._scope.find_var(scale_name).get_tensor() + ) except: - scale_value = np.zeros([var_node.shape()[quant_axis]], - dtype=data_type) + scale_value = np.zeros( + [var_node.shape()[quant_axis]], dtype=data_type + ) scale_var_node = graph.create_persistable_node( name=self._quantized_scale_name(name), var_type=var_node.type(), shape=[var_node.shape()[quant_axis]], - var_dtype=var_node.dtype()) + var_dtype=var_node.dtype(), + ) _init_var_node(scale_var_node, scale_value, self._scope, self._place) quant_op_node = graph.create_op_node( op_type='fake_channel_wise_quantize_abs_max', @@ -665,13 +772,11 @@ class QuantizationTransformPass(object): 'bit_length': quant_bits, 'quant_axis': quant_axis, 'is_test': self._is_test, - 'op_role': core.op_proto_and_checker_maker.OpRole.Forward + 'op_role': core.op_proto_and_checker_maker.OpRole.Forward, }, inputs={'X': var_node}, - outputs={ - 'Out': quant_var_node, - 'OutScale': scale_var_node - }) + outputs={'Out': quant_var_node, 'OutScale': scale_var_node}, + ) graph.link_to(var_node, quant_op_node) graph.link_to(quant_op_node, quant_var_node) graph.link_to(quant_op_node, scale_var_node) @@ -687,26 +792,26 @@ class QuantizationTransformPass(object): name=self._dequantized_var_name(var_node.name()), var_type=var_node.type(), shape=var_node.shape(), - var_dtype=var_node.dtype()) + var_dtype=var_node.dtype(), + ) max_range = (1 << (quant_bits - 1)) - 1 dequant_op_node = graph.create_op_node( op_type='fake_dequantize_max_abs', attrs={ 'max_range': float(max_range), - 'op_role': core.op_proto_and_checker_maker.OpRole.Forward - }, - inputs={ - 'X': var_node, - 'Scale': scale_var_node + 'op_role': core.op_proto_and_checker_maker.OpRole.Forward, }, - outputs={'Out': dequant_var_node}) + inputs={'X': var_node, 'Scale': scale_var_node}, + outputs={'Out': dequant_var_node}, + ) graph.link_to(var_node, dequant_op_node) graph.link_to(scale_var_node, dequant_op_node) graph.link_to(dequant_op_node, dequant_var_node) return dequant_var_node - def _insert_channel_dequant_op(self, graph, var_node, scale_var_nodes, - quant_bits, quant_axis): + def _insert_channel_dequant_op( + self, graph, var_node, scale_var_nodes, quant_bits, quant_axis + ): """ Insert fake_channel_wise_dequantize_max_abs in the graph. """ @@ -716,19 +821,18 @@ class QuantizationTransformPass(object): name=self._dequantized_var_name(var_node.name()), var_type=var_node.type(), shape=var_node.shape(), - var_dtype=var_node.dtype()) + var_dtype=var_node.dtype(), + ) dequant_op_node = graph.create_op_node( op_type='fake_channel_wise_dequantize_max_abs', attrs={ 'quant_bits': quant_bits, 'quant_axis': quant_axis, - 'op_role': core.op_proto_and_checker_maker.OpRole.Forward - }, - inputs={ - 'X': var_node, - 'Scales': scale_var_nodes + 'op_role': core.op_proto_and_checker_maker.OpRole.Forward, }, - outputs={'Out': dequant_var_node}) + inputs={'X': var_node, 'Scales': scale_var_nodes}, + outputs={'Out': dequant_var_node}, + ) graph.link_to(var_node, dequant_op_node) for scale_n in scale_var_nodes: graph.link_to(scale_n, dequant_op_node) @@ -815,27 +919,34 @@ class QuantizationTransformPass(object): startup_program = Program() with program_guard(tmp_program, startup_program): with unique_name.guard(var_node.name() + "_"): - in_node = data(var_node.name() + '_tmp_input', - shape=var_node.shape(), - dtype='float32') + in_node = data( + var_node.name() + '_tmp_input', + shape=var_node.shape(), + dtype='float32', + ) out_node = func(in_node) graph.out_node_mapping_table[out_node.name] = var_node.name() # loss shape must be 1 when minimize loss = mean(out_node) if not graph._for_test: - assert self._optimizer, "optimizer_func must be set when graph is test graph" + assert ( + self._optimizer + ), "optimizer_func must be set when graph is test graph" in_node.stop_gradient = False optimizer = self._optimizer() optimizer.minimize(loss) with scope_guard(self._scope): self._exe.run(startup_program) - tmp_graph = IrGraph(core.Graph(tmp_program.desc), - for_test=graph._for_test) - in_node = tmp_graph._find_node_by_name(tmp_graph.all_var_nodes(), - in_node.name) - out_node = tmp_graph._find_node_by_name(tmp_graph.all_var_nodes(), - out_node.name) + tmp_graph = IrGraph( + core.Graph(tmp_program.desc), for_test=graph._for_test + ) + in_node = tmp_graph._find_node_by_name( + tmp_graph.all_var_nodes(), in_node.name + ) + out_node = tmp_graph._find_node_by_name( + tmp_graph.all_var_nodes(), out_node.name + ) in_node_params = [] in_op_node = [] @@ -854,10 +965,12 @@ class QuantizationTransformPass(object): for node in in_op_node: self._copy_graph(graph, tmp_graph, node) - target_in_node = graph._find_node_by_name(graph.all_var_nodes(), - in_node.name()) - target_out_node = graph._find_node_by_name(graph.all_var_nodes(), - out_node.name()) + target_in_node = graph._find_node_by_name( + graph.all_var_nodes(), in_node.name() + ) + target_out_node = graph._find_node_by_name( + graph.all_var_nodes(), out_node.name() + ) loss_node = graph._find_node_by_name(graph.all_var_nodes(), loss.name) outputs = target_in_node.outputs for node in outputs: @@ -867,16 +980,17 @@ class QuantizationTransformPass(object): # update grad if not graph._for_test: op_out = op.outputs[0] - op_out_grad = graph._find_node_by_name(graph.all_var_nodes(), - op_out.name() + "@GRAD") + op_out_grad = graph._find_node_by_name( + graph.all_var_nodes(), op_out.name() + "@GRAD" + ) # find op's gradient op, such as conv2d_grad op_grad = op_out_grad.outputs[0] target_out_grad_node = graph._find_node_by_name( - graph.all_var_nodes(), - target_out_node.name() + "@GRAD") + graph.all_var_nodes(), target_out_node.name() + "@GRAD" + ) in_node_grad = graph._find_node_by_name( - graph.all_var_nodes(), - target_in_node.name() + "@GRAD") + graph.all_var_nodes(), target_in_node.name() + "@GRAD" + ) in_node_grad_op = in_node_grad.inputs # update op_grad's input graph.update_input_link(var_node, target_out_node, op_grad) @@ -888,8 +1002,9 @@ class QuantizationTransformPass(object): op_grad_out = node # update op_grad's output if op_grad_out is not None: - graph.update_output_link(op_grad_out, target_out_grad_node, - op_grad) + graph.update_output_link( + op_grad_out, target_out_grad_node, op_grad + ) else: graph.link_to(op_grad, target_out_grad_node) @@ -934,31 +1049,37 @@ class QuantizationTransformPass(object): Analyse whether the op node skips quantization. """ is_skip = False - if op_node.op().has_attr("skip_quant") and \ - op_node.op().attr("skip_quant"): + if op_node.op().has_attr("skip_quant") and op_node.op().attr( + "skip_quant" + ): is_skip = True # if the inputs of mul and matmul are not all persistable, use # AddQuantDequantPass to quantize them. - if op_node.name() in ["mul", "matmul"] and \ - _is_input_all_not_persistable(graph, op_node): + if op_node.name() in [ + "mul", + "matmul", + ] and _is_input_all_not_persistable(graph, op_node): is_skip = True - if op_node.op().has_attr("quantization_type") and \ - op_node.op().attr("quantization_type") == "qat_without_weight": + if ( + op_node.op().has_attr("quantization_type") + and op_node.op().attr("quantization_type") == "qat_without_weight" + ): is_skip = True return is_skip class QuantizationFreezePass(object): - - def __init__(self, - scope, - place, - bias_correction=False, - weight_bits=8, - activation_bits=8, - round_type='round', - weight_quantize_type='abs_max', - quantizable_op_type=None): + def __init__( + self, + scope, + place, + bias_correction=False, + weight_bits=8, + activation_bits=8, + round_type='round', + weight_quantize_type='abs_max', + quantizable_op_type=None, + ): """ The freeze pass is used to adjust the quantize operator order, for example: 1) `activation -> quant -> dequant -> conv2d` will be frozen into @@ -984,10 +1105,8 @@ class QuantizationFreezePass(object): quantizable_op_type(list[str]): This input param will be removed latter. The pass will process all quantized op, so it is not necessary to set the input param. """ - assert scope is not None, \ - 'The scope cannot be set None.' - assert place is not None, \ - 'The place cannot be set None.' + assert scope is not None, 'The scope cannot be set None.' + assert place is not None, 'The place cannot be set None.' self._scope = scope self._bias_correction = bias_correction self._place = _get_paddle_place(place) @@ -1020,21 +1139,26 @@ class QuantizationFreezePass(object): if hasattr(graph, 'out_node_mapping_table'): if input_arg_name in graph.out_node_mapping_table.keys(): input_arg_name = graph.out_node_mapping_table[ - input_arg_name] + input_arg_name + ] if input_arg_name not in persistable_vars: scale_v = graph._find_node_by_name( - op_node.outputs, - op_node.output('OutScale')[0]) + op_node.outputs, op_node.output('OutScale')[0] + ) self._quant_var_scale_map[input_arg_name] = scale_v else: # Obtain scale from OutScale var node scale_v = self._load_var(op_node.output('OutScale')[0]) assert scale_v.ndim in [ - 1, 2 + 1, + 2, ], "the dim of scale_v should be 1 or 2" if scale_v.ndim == 2: scale_v = scale_v[0] - if scale_v.size == 1 and self._weight_quantize_type == 'abs_max': + if ( + scale_v.size == 1 + and self._weight_quantize_type == 'abs_max' + ): scale_v = scale_v[0] else: scale_v = scale_v.tolist() @@ -1043,14 +1167,18 @@ class QuantizationFreezePass(object): if self._round_type == 'round': param_v = self._load_var(input_arg_name) if any( - _check_grandchild_op_node(op_node, op) - for op in utils._channelwise_quant_axis1_ops): + _check_grandchild_op_node(op_node, op) + for op in utils._channelwise_quant_axis1_ops + ): quant_axis = 1 else: quant_axis = 0 quantized_param_v = utils.quant_tensor( - param_v.copy(), scale_v, quant_axis, - self._weight_bits) + param_v.copy(), + scale_v, + quant_axis, + self._weight_bits, + ) quantized_param_v = np.round(quantized_param_v) # Weight bias correction if self._bias_correction == True: @@ -1059,7 +1187,8 @@ class QuantizationFreezePass(object): quantized_param_v, scale_v, quant_axis, - weight_bits=self._weight_bits) + weight_bits=self._weight_bits, + ) quantized_param_v = np.round(quantized_param_v) self._restore_var(input_arg_name, quantized_param_v) self._remove_fake_quant_and_dequant_op(graph, op_node) @@ -1075,13 +1204,19 @@ class QuantizationFreezePass(object): ops = graph.all_op_nodes() for op_node in ops: op_node_desc = op_node.op() - if op_node_desc.has_attr("quantization_type") and \ - op_node_desc.attr("quantization_type") == "qat_with_weight": + if ( + op_node_desc.has_attr("quantization_type") + and op_node_desc.attr("quantization_type") == "qat_with_weight" + ): if self._weight_quantize_type == 'channel_wise_abs_max': - quant_axis = 1 if op_node.name() in \ - utils._channelwise_quant_axis1_ops else 0 + quant_axis = ( + 1 + if op_node.name() in utils._channelwise_quant_axis1_ops + else 0 + ) self._insert_post_channel_dequant_op( - graph, op_node, quant_axis) + graph, op_node, quant_axis + ) else: self._insert_post_dequant_op(graph, op_node) @@ -1105,7 +1240,8 @@ class QuantizationFreezePass(object): self._op_input_rename_map[k.node] = v else: self._op_input_rename_map[k.node] = self._op_input_rename_map[ - v.node] + v.node + ] graph.safe_remove_nodes(op_node) def _insert_post_channel_dequant_op(self, graph, op_node, quant_axis): @@ -1123,35 +1259,47 @@ class QuantizationFreezePass(object): scale_v = self._quant_var_scale_map[original_var_name] if original_var_name in persistable_vars: assert isinstance( - scale_v, - list), 'The scale of parameter %s is not a list.' % ( - original_var_name) + scale_v, list + ), 'The scale of parameter %s is not a list.' % ( + original_var_name + ) channel_scale = np.array(scale_v) else: assert isinstance(scale_v, IrNode) scale_var_node = self._quant_var_scale_map[original_var_name] if len(op_node.output_arg_names()) != 1: - raise ValueError("Only support one output, but op %s has" - " more than one output." % (op_node.name())) + raise ValueError( + "Only support one output, but op %s has" + " more than one output." % (op_node.name()) + ) output_var_node = graph._find_node_by_name( - op_node.outputs, - op_node.output_arg_names()[0]) + op_node.outputs, op_node.output_arg_names()[0] + ) weight_scale_node = graph.create_persistable_node( name=unique_name.generate('channel_scale'), var_type=core.VarDesc.VarType.LOD_TENSOR, shape=[channel_scale.shape[0]], - var_dtype=output_var_node.dtype()) - data_type = 'float64' if output_var_node.dtype( - ) == core.VarDesc.VarType.FP64 else 'float32' - _init_var_node(weight_scale_node, channel_scale.astype(data_type), - self._scope, self._place) + var_dtype=output_var_node.dtype(), + ) + data_type = ( + 'float64' + if output_var_node.dtype() == core.VarDesc.VarType.FP64 + else 'float32' + ) + _init_var_node( + weight_scale_node, + channel_scale.astype(data_type), + self._scope, + self._place, + ) dequant_var_node = graph.create_var_node( name=self._dequantized_var_name(output_var_node.name()), var_type=output_var_node.type(), shape=output_var_node.shape(), - var_dtype=output_var_node.dtype()) + var_dtype=output_var_node.dtype(), + ) x_num_col_dims = 1 if op_node.name() in ['matmul', 'matmul_v2', 'mul']: x_num_col_dims = len(op_node.outputs[0].shape()) - 1 @@ -1163,13 +1311,14 @@ class QuantizationFreezePass(object): 'quant_bits': [self._weight_bits, self._activation_bits], 'quant_axis': quant_axis, 'op_role': core.op_proto_and_checker_maker.OpRole.Forward, - 'x_num_col_dims': x_num_col_dims + 'x_num_col_dims': x_num_col_dims, }, inputs={ 'X': output_var_node, - 'Scales': [weight_scale_node, scale_var_node] + 'Scales': [weight_scale_node, scale_var_node], }, - outputs={'Out': dequant_var_node}) + outputs={'Out': dequant_var_node}, + ) graph.link_to(output_var_node, dequant_op_node) graph.link_to(scale_var_node, dequant_op_node) graph.link_to(weight_scale_node, dequant_op_node) @@ -1195,8 +1344,10 @@ class QuantizationFreezePass(object): scale_v = self._quant_var_scale_map[original_var_name] if original_var_name in persistable_vars: assert self._is_float( - scale_v), 'The scale of parameter %s is not a float.' % ( - original_var_name) + scale_v + ), 'The scale of parameter %s is not a float.' % ( + original_var_name + ) scale_v = 1e-8 if scale_v == 0.0 else scale_v max_range *= param_range / scale_v else: @@ -1205,28 +1356,29 @@ class QuantizationFreezePass(object): scale_var_node = self._quant_var_scale_map[original_var_name] if len(op_node.output_arg_names()) != 1: - raise ValueError("Only support one output, but op %s has" - " more than one output." % (op_node.name())) + raise ValueError( + "Only support one output, but op %s has" + " more than one output." % (op_node.name()) + ) output_var_node = graph._find_node_by_name( - op_node.outputs, - op_node.output_arg_names()[0]) + op_node.outputs, op_node.output_arg_names()[0] + ) dequant_var_node = graph.create_var_node( name=self._dequantized_var_name(output_var_node.name()), var_type=output_var_node.type(), shape=output_var_node.shape(), - var_dtype=output_var_node.dtype()) + var_dtype=output_var_node.dtype(), + ) dequant_op_node = graph.create_op_node( op_type='fake_dequantize_max_abs', attrs={ 'max_range': float(max_range), - 'op_role': core.op_proto_and_checker_maker.OpRole.Forward - }, - inputs={ - 'X': output_var_node, - 'Scale': scale_var_node + 'op_role': core.op_proto_and_checker_maker.OpRole.Forward, }, - outputs={'Out': dequant_var_node}) + inputs={'X': output_var_node, 'Scale': scale_var_node}, + outputs={'Out': dequant_var_node}, + ) graph.link_to(output_var_node, dequant_op_node) graph.link_to(scale_var_node, dequant_op_node) graph.link_to(dequant_op_node, dequant_var_node) @@ -1252,8 +1404,10 @@ class QuantizationFreezePass(object): all_used_vars = {n.node for n in all_used_vars} all_unused_vars = { n - for n in filter(lambda node: node.node not in all_used_vars, - graph.all_var_nodes()) + for n in filter( + lambda node: node.node not in all_used_vars, + graph.all_var_nodes(), + ) } graph.safe_remove_nodes(all_unused_vars) @@ -1262,13 +1416,13 @@ class QuantizationFreezePass(object): Return the original variable name. """ if var_name.endswith('.quantized.dequantized'): - return var_name[:-len('.quantized.dequantized')] + return var_name[: -len('.quantized.dequantized')] if var_name.endswith('.quantized'): - return var_name[:-len('.quantized')] + return var_name[: -len('.quantized')] if var_name.endswith('.dequantized'): - return var_name[:-len('.dequantized')] + return var_name[: -len('.dequantized')] if var_name.endswith('@scale'): - return var_name[:-len('@scale')] + return var_name[: -len('@scale')] else: return var_name @@ -1279,12 +1433,14 @@ class QuantizationFreezePass(object): return "%s.dequantized" % (var_name) def _is_float(self, v): - return isinstance(v, float) or isinstance(v, np.float32) \ + return ( + isinstance(v, float) + or isinstance(v, np.float32) or isinstance(v, np.float64) + ) class ConvertToInt8Pass(object): - def __init__(self, scope, place, quantizable_op_type=None): """ Convert the weights into int8_t type. @@ -1297,10 +1453,8 @@ class ConvertToInt8Pass(object): quantizable_op_type(list[str]): This input param will be removed latter. The pass will process all quantized op, so it is not necessary to set the input param. """ - assert scope is not None, \ - 'The scope cannot be set None.' - assert place is not None, \ - 'The place cannot be set None.' + assert scope is not None, 'The scope cannot be set None.' + assert place is not None, 'The place cannot be set None.' self._scope = scope self._place = _get_paddle_place(place) @@ -1318,17 +1472,21 @@ class ConvertToInt8Pass(object): ops = graph.all_op_nodes() input_map = {} for op_node in ops: - if op_node.op().has_attr("quantization_type") and \ - op_node.op().attr("quantization_type") == "qat_with_weight": + if ( + op_node.op().has_attr("quantization_type") + and op_node.op().attr("quantization_type") == "qat_with_weight" + ): for var_node in op_node.inputs: name = var_node.name() if name in persistable_vars: if name not in input_map: int8_var_node = self._convert_to_int8( - graph, var_node) + graph, var_node + ) input_map[name] = int8_var_node - graph.update_input_link(var_node, input_map[name], - op_node) + graph.update_input_link( + var_node, input_map[name], op_node + ) # remove the unused var node in the graph self._remove_unused_var_nodes(graph) @@ -1341,7 +1499,8 @@ class ConvertToInt8Pass(object): name=int8_var_node_name, var_type=var_node.type(), shape=var_node.shape(), - var_dtype=core.VarDesc.VarType.INT8) + var_dtype=core.VarDesc.VarType.INT8, + ) array = self._load_var(var_node.name()) self._scope.var(int8_var_node_name) self._store_var(int8_var_node_name, array, np.int8) @@ -1366,14 +1525,15 @@ class ConvertToInt8Pass(object): all_used_vars = {n.node for n in all_used_vars} all_unused_vars = { n - for n in filter(lambda node: node.node not in all_used_vars, - graph.all_var_nodes()) + for n in filter( + lambda node: node.node not in all_used_vars, + graph.all_var_nodes(), + ) } graph.safe_remove_nodes(all_unused_vars) class TransformForMobilePass(object): - def __init__(self): """ This pass is used to convert the frozen graph for paddle-mobile execution. @@ -1416,13 +1576,14 @@ class TransformForMobilePass(object): class OutScaleForTrainingPass(object): - - def __init__(self, - scope=None, - place=None, - moving_rate=0.9, - is_test=None, - scale_dict=None): + def __init__( + self, + scope=None, + place=None, + moving_rate=0.9, + is_test=None, + scale_dict=None, + ): """ This pass is used for calculating output scales of some operators. These output scales may be used by tensorRT or some other inference engines. @@ -1449,48 +1610,61 @@ class OutScaleForTrainingPass(object): Args: graph(IrGraph): the target graph. """ - assert isinstance(graph, - IrGraph), 'graph must be the instance of IrGraph.' + assert isinstance( + graph, IrGraph + ), 'graph must be the instance of IrGraph.' if self._is_test is None: self._is_test = graph.is_test() target_ops = [] for op in graph.all_op_nodes(): if op.name() in self._teller_set: target_ops.append(op) - with tqdm(total=len(target_ops), - bar_format='Adding OutScale op:|{bar}| {n_fmt}/{total_fmt}', - ncols=80) as t: + with tqdm( + total=len(target_ops), + bar_format='Adding OutScale op:|{bar}| {n_fmt}/{total_fmt}', + ncols=80, + ) as t: for op in target_ops: for output_var_name in utils._get_op_output_var_names(op): - in_node = graph._find_node_by_name(op.outputs, - output_var_name) - if in_node.dtype() not in \ - [core.VarDesc.VarType.FP64, core.VarDesc.VarType.FP32]: + in_node = graph._find_node_by_name( + op.outputs, output_var_name + ) + if in_node.dtype() not in [ + core.VarDesc.VarType.FP64, + core.VarDesc.VarType.FP32, + ]: continue - data_type = 'float64' if in_node.dtype() \ - == core.VarDesc.VarType.FP64 else 'float32' + data_type = ( + 'float64' + if in_node.dtype() == core.VarDesc.VarType.FP64 + else 'float32' + ) try: graph._find_node_by_name( graph.all_var_nodes(), - self._scale_name(in_node.name())) + self._scale_name(in_node.name()), + ) continue except: scale_node = graph.create_persistable_node( name=self._scale_name(in_node.name()), var_type=core.VarDesc.VarType.LOD_TENSOR, shape=[1], - var_dtype=in_node.dtype()) + var_dtype=in_node.dtype(), + ) if self._scale_dict is not None: try: scale_value = np.array( - [self._scale_dict[in_node.name()]]) + [self._scale_dict[in_node.name()]] + ) except: scale_value = np.ones([1], dtype=data_type) else: scale_value = np.ones([1], dtype=data_type) - _init_var_node(scale_node, scale_value, self._scope, - self._place) + _init_var_node( + scale_node, scale_value, self._scope, self._place + ) ins = {'X': in_node} outs = {'OutScale': scale_node} @@ -1499,22 +1673,32 @@ class OutScaleForTrainingPass(object): name=unique_name.generate('scale_state@'), var_type=core.VarDesc.VarType.LOD_TENSOR, var_dtype=in_node.dtype(), - shape=[1]) - _init_var_node(state_in_node, - np.ones([1], dtype=data_type), - self._scope, self._place) + shape=[1], + ) + _init_var_node( + state_in_node, + np.ones([1], dtype=data_type), + self._scope, + self._place, + ) accum_in_node = graph.create_persistable_node( name=unique_name.generate('scale_accum@'), var_type=core.VarDesc.VarType.LOD_TENSOR, var_dtype=in_node.dtype(), - shape=[1]) - _init_var_node(accum_in_node, - np.ones([1], dtype=data_type), - self._scope, self._place) + shape=[1], + ) + _init_var_node( + accum_in_node, + np.ones([1], dtype=data_type), + self._scope, + self._place, + ) state_out_node = graph.create_var_node_from_desc( - state_in_node.var()) + state_in_node.var() + ) accum_out_node = graph.create_var_node_from_desc( - accum_in_node.var()) + accum_in_node.var() + ) ins['InState'] = state_in_node ins['InAccum'] = accum_in_node @@ -1524,14 +1708,14 @@ class OutScaleForTrainingPass(object): attrs = { 'moving_rate': self._moving_rate, 'is_test': self._is_test, - 'op_role': - core.op_proto_and_checker_maker.OpRole.Forward + 'op_role': core.op_proto_and_checker_maker.OpRole.Forward, } scale_op_node = graph.create_op_node( op_type='moving_average_abs_max_scale', attrs=attrs, inputs=ins, - outputs=outs) + outputs=outs, + ) next_op_node = None if len(in_node.outputs) > 0: @@ -1558,7 +1742,6 @@ class OutScaleForTrainingPass(object): class OutScaleForInferencePass(object): - def __init__(self, scope=None): """ This pass is used for setting output scales of some operators. @@ -1578,34 +1761,45 @@ class OutScaleForInferencePass(object): Args: graph(IrGraph): the target graph. """ - assert isinstance(graph, - IrGraph), 'graph must be the instance of IrGraph.' + assert isinstance( + graph, IrGraph + ), 'graph must be the instance of IrGraph.' op_nodes = graph.all_op_nodes() for op_node in op_nodes: if op_node.name() in self._teller_set: var_names = utils._get_op_output_var_names(op_node) for var_name in var_names: - in_node = graph._find_node_by_name(op_node.outputs, - var_name) - if in_node.dtype() not in \ - [core.VarDesc.VarType.FP64, core.VarDesc.VarType.FP32]: + in_node = graph._find_node_by_name( + op_node.outputs, var_name + ) + if in_node.dtype() not in [ + core.VarDesc.VarType.FP64, + core.VarDesc.VarType.FP32, + ]: continue scale_name = self._scale_name(var_name) scale_var = self._scope.find_var(scale_name) - assert scale_var is not None, \ - "Can not find {} variable in the scope".format(scale_name) + assert ( + scale_var is not None + ), "Can not find {} variable in the scope".format( + scale_name + ) scale_value = np.array(scale_var.get_tensor())[0] # For compatibility, we save output threshold by two methods. op_node.op()._set_attr("out_threshold", float(scale_value)) argname_index = utils._get_output_name_index( - op_node, var_name) - assert argname_index is not None, \ + op_node, var_name + ) + assert argname_index is not None, ( var_name + " is not the output of the op" - op_node.op()._set_attr(argname_index[0] + str(argname_index[1]) \ - + "_threshold", float(scale_value)) + ) + op_node.op()._set_attr( + argname_index[0] + str(argname_index[1]) + "_threshold", + float(scale_value), + ) op_node.op()._set_attr("with_quant_attr", True) graph.resolve_hazard() return graph @@ -1626,16 +1820,18 @@ class AddQuantDequantPass(object): # To be compatible with PaddleSlim, not remove _activation_type for now _activation_type = ["relu", "relu6", "leaky_relu", "tanh", "swish"] - def __init__(self, - scope=None, - place=None, - moving_rate=0.9, - quant_bits=8, - skip_pattern=["skip_quant"], - quantizable_op_type=["elementwise_add", "pool2d"], - is_full_quantized=False, - is_test=None, - scale_dict=None): + def __init__( + self, + scope=None, + place=None, + moving_rate=0.9, + quant_bits=8, + skip_pattern=["skip_quant"], + quantizable_op_type=["elementwise_add", "pool2d"], + is_full_quantized=False, + is_test=None, + scale_dict=None, + ): """ Constructor. @@ -1671,8 +1867,9 @@ class AddQuantDequantPass(object): else: self._quantizable_op_type = quantizable_op_type for op_type in quantizable_op_type: - assert op_type in utils._act_supported_quantizable_op_type, \ + assert op_type in utils._act_supported_quantizable_op_type, ( op_type + " is not supported for quantization." + ) self._quantizable_grad_op_type = [ '%s_grad' % (op) for op in self._quantizable_op_type ] @@ -1690,50 +1887,71 @@ class AddQuantDequantPass(object): Returns: None """ - assert isinstance(graph, - IrGraph), 'graph must be the instance of IrGraph.' + assert isinstance( + graph, IrGraph + ), 'graph must be the instance of IrGraph.' if self._is_test is None: self._is_test = graph.is_test() dequantized_vars_map = collections.OrderedDict() # Forward stage, insert quant_dequant op all_op_nodes = graph.all_op_nodes() - with tqdm(total=len(all_op_nodes), - bar_format= - 'Adding quant activation op:|{bar}| {n_fmt}/{total_fmt}', - ncols=80) as t: + with tqdm( + total=len(all_op_nodes), + bar_format='Adding quant activation op:|{bar}| {n_fmt}/{total_fmt}', + ncols=80, + ) as t: for op_node in all_op_nodes: if op_node.name() in self._quantizable_op_type: is_skip = False if isinstance(self._skip_pattern, list): - is_skip = op_node.op().has_attr("op_namescope") and \ - any(pattern in op_node.op().attr("op_namescope") for pattern in self._skip_pattern) + is_skip = op_node.op().has_attr("op_namescope") and any( + pattern in op_node.op().attr("op_namescope") + for pattern in self._skip_pattern + ) elif isinstance(self._skip_pattern, str): - is_skip = op_node.op().has_attr("op_namescope") and \ - op_node.op().attr("op_namescope").find(self._skip_pattern) != -1 - is_quantized = op_node.op().has_attr("quantization_type") and \ - op_node.op().attr("quantization_type") == "qat_with_weight" - if is_skip or is_quantized or \ - (not _is_input_all_not_persistable(graph, op_node)): + is_skip = ( + op_node.op().has_attr("op_namescope") + and op_node.op() + .attr("op_namescope") + .find(self._skip_pattern) + != -1 + ) + is_quantized = ( + op_node.op().has_attr("quantization_type") + and op_node.op().attr("quantization_type") + == "qat_with_weight" + ) + if ( + is_skip + or is_quantized + or (not _is_input_all_not_persistable(graph, op_node)) + ): continue - op_node.op()._set_attr("quantization_type", - "qat_without_weight") + op_node.op()._set_attr( + "quantization_type", "qat_without_weight" + ) op_node.op()._set_attr("activation_bits", self._quant_bits) op_node.op()._set_attr("with_quant_attr", True) arg_names = utils._get_op_input_var_names(op_node) for arg_name in arg_names: in_node = graph._find_node_by_name( - op_node.inputs, arg_name) + op_node.inputs, arg_name + ) if arg_name in dequantized_vars_map: quant_var_node = dequantized_vars_map[arg_name] else: - quant_var_node, _ = \ - self._inser_quant_dequant_moving_average_abs_max_op( - graph, in_node, self._quant_bits) + ( + quant_var_node, + _, + ) = self._inser_quant_dequant_moving_average_abs_max_op( + graph, in_node, self._quant_bits + ) dequantized_vars_map[arg_name] = quant_var_node - graph.update_input_link(in_node, quant_var_node, - op_node) + graph.update_input_link( + in_node, quant_var_node, op_node + ) t.update() # Backward stage, update input link @@ -1742,35 +1960,45 @@ class AddQuantDequantPass(object): for input_name in op_node.input_arg_names(): if input_name in dequantized_vars_map: in_node = graph._find_node_by_name( - op_node.inputs, input_name) + op_node.inputs, input_name + ) dequant_var_node = dequantized_vars_map[input_name] - graph.update_input_link(in_node, dequant_var_node, - op_node) + graph.update_input_link( + in_node, dequant_var_node, op_node + ) graph.resolve_hazard() return graph - def _inser_quant_dequant_moving_average_abs_max_op(self, graph, var_node, - quant_bits): - """Insert fake_quantize_dequantize_moving_average_abs_max op. - """ - quant_var_node = graph.create_var_node(name="{}.quant_dequant".format( - var_node.name()), - var_type=var_node.type(), - shape=var_node.shape(), - var_dtype=var_node.dtype()) + def _inser_quant_dequant_moving_average_abs_max_op( + self, graph, var_node, quant_bits + ): + """Insert fake_quantize_dequantize_moving_average_abs_max op.""" + quant_var_node = graph.create_var_node( + name="{}.quant_dequant".format(var_node.name()), + var_type=var_node.type(), + shape=var_node.shape(), + var_dtype=var_node.dtype(), + ) scale_name = "{}.quant_dequant@scale".format(var_node.name()) - data_type = 'float64' if var_node.dtype( - ) == core.VarDesc.VarType.FP64 else 'float32' + data_type = ( + 'float64' + if var_node.dtype() == core.VarDesc.VarType.FP64 + else 'float32' + ) try: - if self._scale_dict is not None and var_node.name( - ) in self._scale_dict.keys(): - scale_value = np.array([self._scale_dict[var_node.name()]], - dtype=data_type) + if ( + self._scale_dict is not None + and var_node.name() in self._scale_dict.keys() + ): + scale_value = np.array( + [self._scale_dict[var_node.name()]], dtype=data_type + ) else: scale_value = np.array( self._scope.find_var(scale_name).get_tensor(), - dtype=data_type) + dtype=data_type, + ) except: scale_value = np.array([_SCALE_DEFAULT_VALUE], dtype=data_type) @@ -1778,7 +2006,8 @@ class AddQuantDequantPass(object): name="{}.quant_dequant@scale".format(var_node.name()), var_type=core.VarDesc.VarType.LOD_TENSOR, shape=[1], - var_dtype=var_node.dtype()) + var_dtype=var_node.dtype(), + ) _init_var_node(scale_in_node, scale_value, self._scope, self._place) scale_out_node = graph.create_var_node_from_desc(scale_in_node.var()) @@ -1789,22 +2018,37 @@ class AddQuantDequantPass(object): name=unique_name.generate('quant_dequant.state'), var_type=core.VarDesc.VarType.LOD_TENSOR, var_dtype=var_node.dtype(), - shape=[1]) - data_type = 'float64' if var_node.dtype( - ) == core.VarDesc.VarType.FP64 else 'float32' - _init_var_node(state_in_node, np.ones([1], dtype=data_type), - self._scope, self._place) + shape=[1], + ) + data_type = ( + 'float64' + if var_node.dtype() == core.VarDesc.VarType.FP64 + else 'float32' + ) + _init_var_node( + state_in_node, + np.ones([1], dtype=data_type), + self._scope, + self._place, + ) accum_in_node = graph.create_persistable_node( name=unique_name.generate('quant_dequant.accum'), var_type=core.VarDesc.VarType.LOD_TENSOR, var_dtype=var_node.dtype(), - shape=[1]) - _init_var_node(accum_in_node, np.ones([1], dtype=data_type), - self._scope, self._place) + shape=[1], + ) + _init_var_node( + accum_in_node, + np.ones([1], dtype=data_type), + self._scope, + self._place, + ) state_out_node = graph.create_var_node_from_desc( - state_in_node.var()) + state_in_node.var() + ) accum_out_node = graph.create_var_node_from_desc( - accum_in_node.var()) + accum_in_node.var() + ) ins['InState'] = state_in_node ins['InAccum'] = accum_in_node @@ -1815,14 +2059,15 @@ class AddQuantDequantPass(object): 'bit_length': quant_bits, 'moving_rate': self._moving_rate, 'is_test': self._is_test, - 'op_role': core.op_proto_and_checker_maker.OpRole.Forward + 'op_role': core.op_proto_and_checker_maker.OpRole.Forward, } quant_op_node = graph.create_op_node( op_type='fake_quantize_dequantize_moving_average_abs_max', attrs=attrs, inputs=ins, - outputs=outs) + outputs=outs, + ) graph.link_to(var_node, quant_op_node) graph.link_to(scale_in_node, quant_op_node) @@ -1856,15 +2101,17 @@ class InsertQuantizeLinear(object): scale_dict(dict, optional): calibration ranges of tensors output. """ - def __init__(self, - place, - scope, - quant_bits=8, - quant_axis=-1, - channel_wise=False, - moving_rate=0.9, - is_test=True, - scale_dict=None): + def __init__( + self, + place, + scope, + quant_bits=8, + quant_axis=-1, + channel_wise=False, + moving_rate=0.9, + is_test=True, + scale_dict=None, + ): self._place = place self._scope = scope self.quant_bits = quant_bits @@ -1881,32 +2128,42 @@ class InsertQuantizeLinear(object): name=self._quantized_var_name(var_name), var_type=var_node.type(), shape=var_node.shape(), - var_dtype=var_node.dtype()) - data_type = 'float64' if var_node.dtype( - ) == core.VarDesc.VarType.FP64 else 'float32' + var_dtype=var_node.dtype(), + ) + data_type = ( + 'float64' + if var_node.dtype() == core.VarDesc.VarType.FP64 + else 'float32' + ) scale_name = self._quantized_scale_name(var_name) if self.channel_wise: scale_var_shape = var_node.shape()[self.quant_axis] scale_var_type = core.VarDesc.VarType.LOD_TENSOR - init_scale_value = np.ones(scale_var_shape, - dtype=data_type) * _SCALE_DEFAULT_VALUE + init_scale_value = ( + np.ones(scale_var_shape, dtype=data_type) * _SCALE_DEFAULT_VALUE + ) else: scale_var_shape = 1 scale_var_type = var_node.type() init_scale_value = np.array([_SCALE_DEFAULT_VALUE], dtype=data_type) - if self._scale_dict is not None and var_node.name( - ) in self._scale_dict.keys(): - init_scale_value = np.array([self._scale_dict[var_node.name()]], - dtype=data_type) + if ( + self._scale_dict is not None + and var_node.name() in self._scale_dict.keys() + ): + init_scale_value = np.array( + [self._scale_dict[var_node.name()]], dtype=data_type + ) scale_var_node = graph.create_persistable_node( name=scale_name, var_type=scale_var_type, shape=[scale_var_shape], - var_dtype=var_node.dtype()) - _init_var_node(scale_var_node, init_scale_value, self._scope, - self._place) + var_dtype=var_node.dtype(), + ) + _init_var_node( + scale_var_node, init_scale_value, self._scope, self._place + ) zero_point_node = None if zero_point_node is None: @@ -1914,10 +2171,14 @@ class InsertQuantizeLinear(object): name=self._zero_point_name(quant_var_node.name()), var_type=core.VarDesc.VarType.LOD_TENSOR, shape=scale_var_node.shape(), - var_dtype=core.VarDesc.VarType.INT32) - _init_var_node(zero_point_node, - np.zeros(scale_var_node.shape(), dtype="int32"), - self._scope, self._place) + var_dtype=core.VarDesc.VarType.INT32, + ) + _init_var_node( + zero_point_node, + np.zeros(scale_var_node.shape(), dtype="int32"), + self._scope, + self._place, + ) inputs = {"X": var_node, "Scale": scale_var_node} if zero_point_node is not None: @@ -1928,27 +2189,43 @@ class InsertQuantizeLinear(object): outputs = {"Y": quant_var_node} if not self._is_test: scale_out_node = graph.create_var_node_from_desc( - scale_var_node.var()) + scale_var_node.var() + ) state_in_node = graph.create_persistable_node( name=unique_name.generate('state'), var_type=core.VarDesc.VarType.LOD_TENSOR, var_dtype=var_node.dtype(), - shape=[1]) - data_type = 'float64' if var_node.dtype( - ) == core.VarDesc.VarType.FP64 else 'float32' - _init_var_node(state_in_node, np.ones([1], dtype=data_type), - self._scope, self._place) + shape=[1], + ) + data_type = ( + 'float64' + if var_node.dtype() == core.VarDesc.VarType.FP64 + else 'float32' + ) + _init_var_node( + state_in_node, + np.ones([1], dtype=data_type), + self._scope, + self._place, + ) accum_in_node = graph.create_persistable_node( name=unique_name.generate('accum'), var_type=core.VarDesc.VarType.LOD_TENSOR, var_dtype=var_node.dtype(), - shape=[1]) - _init_var_node(accum_in_node, np.ones([1], dtype=data_type), - self._scope, self._place) + shape=[1], + ) + _init_var_node( + accum_in_node, + np.ones([1], dtype=data_type), + self._scope, + self._place, + ) state_out_node = graph.create_var_node_from_desc( - state_in_node.var()) + state_in_node.var() + ) accum_out_node = graph.create_var_node_from_desc( - accum_in_node.var()) + accum_in_node.var() + ) outputs["OutScale"] = scale_out_node inputs['InState'] = state_in_node @@ -1958,10 +2235,12 @@ class InsertQuantizeLinear(object): attrs["is_test"] = self._is_test attrs['moving_rate'] = self._moving_rate - quant_op_node = graph.create_op_node(op_type="quantize_linear", - attrs=attrs, - inputs=inputs, - outputs=outputs) + quant_op_node = graph.create_op_node( + op_type="quantize_linear", + attrs=attrs, + inputs=inputs, + outputs=outputs, + ) graph.link_to(var_node, quant_op_node) graph.link_to(scale_var_node, quant_op_node) @@ -1983,7 +2262,8 @@ class InsertQuantizeLinear(object): name=self._dequantized_var_name(var_node.name()), var_type=var_node.type(), shape=var_node.shape(), - var_dtype=var_node.dtype()) + var_dtype=var_node.dtype(), + ) zero_point_node = None if zero_point_node is None: @@ -1991,10 +2271,14 @@ class InsertQuantizeLinear(object): name=self._zero_point_name(dequant_var_node.name()), var_type=core.VarDesc.VarType.LOD_TENSOR, shape=scale_var_node.shape(), - var_dtype=core.VarDesc.VarType.INT32) - _init_var_node(zero_point_node, - np.zeros(scale_var_node.shape(), dtype="int32"), - self._scope, self._place) + var_dtype=core.VarDesc.VarType.INT32, + ) + _init_var_node( + zero_point_node, + np.zeros(scale_var_node.shape(), dtype="int32"), + self._scope, + self._place, + ) inputs = {"X": var_node, "Scale": scale_var_node} if zero_point_node is not None: @@ -2003,10 +2287,12 @@ class InsertQuantizeLinear(object): attrs = {"quant_axis": self.quant_axis, "bit_length": self.quant_bits} attrs["op_role"] = core.op_proto_and_checker_maker.OpRole.Forward - quant_op_node = graph.create_op_node(op_type="dequantize_linear", - attrs=attrs, - inputs=inputs, - outputs={"Y": dequant_var_node}) + quant_op_node = graph.create_op_node( + op_type="dequantize_linear", + attrs=attrs, + inputs=inputs, + outputs={"Y": dequant_var_node}, + ) graph.link_to(var_node, quant_op_node) graph.link_to(scale_var_node, quant_op_node) @@ -2046,24 +2332,26 @@ class QuantizationTransformPassV2(QuantizationTransformPass): the quantized ops's inputs. It is used in the new format of quantization. """ - def __init__(self, - scope=None, - place=None, - weight_bits=8, - activation_bits=8, - activation_quantize_type='abs_max', - weight_quantize_type='abs_max', - window_size=10000, - moving_rate=0.9, - skip_pattern=['skip_quant'], - quantizable_op_type=['conv2d', 'depthwise_conv2d', 'mul'], - weight_quantize_func=None, - act_quantize_func=None, - weight_preprocess_func=None, - act_preprocess_func=None, - optimizer_func=None, - executor=None, - is_test=None): + def __init__( + self, + scope=None, + place=None, + weight_bits=8, + activation_bits=8, + activation_quantize_type='abs_max', + weight_quantize_type='abs_max', + window_size=10000, + moving_rate=0.9, + skip_pattern=['skip_quant'], + quantizable_op_type=['conv2d', 'depthwise_conv2d', 'mul'], + weight_quantize_func=None, + act_quantize_func=None, + weight_preprocess_func=None, + act_preprocess_func=None, + optimizer_func=None, + executor=None, + is_test=None, + ): r""" Args: scope(paddle.Scope): When activation use 'range_abs_max' as the quantize @@ -2149,21 +2437,26 @@ class QuantizationTransformPassV2(QuantizationTransformPass): self._optimizer = optimizer_func self._exe = executor quant_type = [ - 'abs_max', 'channel_wise_abs_max', 'range_abs_max', - 'moving_average_abs_max' + 'abs_max', + 'channel_wise_abs_max', + 'range_abs_max', + 'moving_average_abs_max', ] - assert activation_quantize_type != 'channel_wise_abs_max', \ - "The activation quantization type does not support 'channel_wise_abs_max'." + assert ( + activation_quantize_type != 'channel_wise_abs_max' + ), "The activation quantization type does not support 'channel_wise_abs_max'." if activation_quantize_type not in quant_type: raise ValueError( "Unknown activation_quantize_type : '%s'. It can only be " - "'abs_max' or 'range_abs_max' or 'moving_average_abs_max'." % - (str(activation_quantize_type))) + "'abs_max' or 'range_abs_max' or 'moving_average_abs_max'." + % (str(activation_quantize_type)) + ) if weight_quantize_type not in quant_type: raise ValueError( "Unknown weight_quantize_type: '%s'. It can only be " "'abs_max' or 'channel_wise_abs_max' or 'range_abs_max' " - "or 'moving_average_abs_max'." % (str(weight_quantize_type))) + "or 'moving_average_abs_max'." % (str(weight_quantize_type)) + ) self._activation_quantize_type = activation_quantize_type self._weight_quantize_type = weight_quantize_type @@ -2172,8 +2465,9 @@ class QuantizationTransformPassV2(QuantizationTransformPass): self._quantizable_ops = quantizable_op_type for op in self._quantizable_ops: - assert op in utils._weight_supported_quantizable_op_type, \ + assert op in utils._weight_supported_quantizable_op_type, ( op + " is not supported for quantization." + ) self._quantizable_grad_ops = [ '%s_grad' % (op) for op in self._quantizable_ops ] @@ -2191,13 +2485,16 @@ class QuantizationTransformPassV2(QuantizationTransformPass): def _quant_preprocess(self, op_node): user_skipped = False if isinstance(self._skip_pattern, list): - user_skipped = op_node.op().has_attr("op_namescope") and \ - any(pattern in op_node.op().attr("op_namescope") \ - for pattern in self._skip_pattern) + user_skipped = op_node.op().has_attr("op_namescope") and any( + pattern in op_node.op().attr("op_namescope") + for pattern in self._skip_pattern + ) elif isinstance(self._skip_pattern, str): - user_skipped = op_node.op().has_attr("op_namescope") and \ - op_node.op().attr("op_namescope").find( - self._skip_pattern) != -1 + user_skipped = ( + op_node.op().has_attr("op_namescope") + and op_node.op().attr("op_namescope").find(self._skip_pattern) + != -1 + ) if user_skipped: op_node.op()._set_attr("skip_quant", True) @@ -2215,8 +2512,9 @@ class QuantizationTransformPassV2(QuantizationTransformPass): name = var_node.name() if name in self.processed_vars: continue - is_weight = True if var_node.name() in self.persistable_vars \ - else False + is_weight = ( + True if var_node.name() in self.persistable_vars else False + ) # if var node is weight and weight_preprocess_func is not None, # will insert weight preprocess func @@ -2225,13 +2523,13 @@ class QuantizationTransformPassV2(QuantizationTransformPass): # will insert activation preprocess func # to preorocess activation before quantization if is_weight and self._weight_preprocess_func is not None: - var_node = self._insert_func(graph, - self._weight_preprocess_func, - var_node, op) + var_node = self._insert_func( + graph, self._weight_preprocess_func, var_node, op + ) elif not is_weight and self._act_preprocess_func is not None: - var_node = self._insert_func(graph, - self._act_preprocess_func, - var_node, op) + var_node = self._insert_func( + graph, self._act_preprocess_func, var_node, op + ) # if var node is weight and weight_quantize_func is not None, # will insert weight quantize func to quantize and dequantize weight @@ -2239,26 +2537,36 @@ class QuantizationTransformPassV2(QuantizationTransformPass): # will insert act quantize func to quantize and dequantize activation if is_weight and self._weight_quantize_func is not None: target_out_node = self._insert_func( - graph, self._weight_quantize_func, var_node, op) + graph, self._weight_quantize_func, var_node, op + ) self.processed_vars.append(name) continue elif not is_weight and self._act_quantize_func is not None: - target_out_node = self._insert_func(graph, - self._act_quantize_func, - var_node, op) + target_out_node = self._insert_func( + graph, self._act_quantize_func, var_node, op + ) self.processed_vars.append(name) continue - quant_bits = self._weight_bits if var_node.name() in self.persistable_vars \ + quant_bits = ( + self._weight_bits + if var_node.name() in self.persistable_vars else self._activation_bits - quant_type = self._weight_quantize_type if is_weight \ + ) + quant_type = ( + self._weight_quantize_type + if is_weight else self._activation_quantize_type + ) quant_axis = -1 channel_wise = False if quant_type == 'channel_wise_abs_max': # Weight quantization channel_wise = True - quant_axis = 1 if op.name() in \ - utils._channelwise_quant_axis1_ops else 0 + quant_axis = ( + 1 + if op.name() in utils._channelwise_quant_axis1_ops + else 0 + ) insert_quant_pass = InsertQuantizeLinear( self._place, self._scope, @@ -2266,11 +2574,17 @@ class QuantizationTransformPassV2(QuantizationTransformPass): quant_axis=quant_axis, channel_wise=channel_wise, moving_rate=self._moving_rate, - is_test=self._is_test) - quant_var_node, scale_var_node = insert_quant_pass.insert_quant_op( - graph, var_node, var_name=name) + is_test=self._is_test, + ) + ( + quant_var_node, + scale_var_node, + ) = insert_quant_pass.insert_quant_op( + graph, var_node, var_name=name + ) dequant_var_node = insert_quant_pass.insert_dequant_op( - graph, quant_var_node, scale_var_node) + graph, quant_var_node, scale_var_node + ) self.dequantized_vars[name] = dequant_var_node graph.update_input_link(var_node, dequant_var_node, op) @@ -2304,8 +2618,9 @@ class QuantizationTransformPassV2(QuantizationTransformPass): Returns: None """ - assert isinstance(graph, - IrGraph), 'graph must be the instance of IrGraph.' + assert isinstance( + graph, IrGraph + ), 'graph must be the instance of IrGraph.' if self._is_test is None: self._is_test = graph.is_test() @@ -2317,21 +2632,25 @@ class QuantizationTransformPassV2(QuantizationTransformPass): # Do the preproccess of quantization, such as skipping some ops # for not being quantized. for op in ops: - if op.name() in self._quantizable_ops or \ - op.name() in self._quantizable_grad_ops: + if ( + op.name() in self._quantizable_ops + or op.name() in self._quantizable_grad_ops + ): self._quant_preprocess(op) # Insert mapping table to solve the problem in saving inference model. graph.out_node_mapping_table = dict() # The process of _transform_forward and _transform_backward is needed in two for loops. # The loop for transforming the forward graph: - with tqdm(total=len(ops), - bar_format= - 'Adding quant op with weight:|{bar}| {n_fmt}/{total_fmt}', - ncols=80) as t: + with tqdm( + total=len(ops), + bar_format='Adding quant op with weight:|{bar}| {n_fmt}/{total_fmt}', + ncols=80, + ) as t: for op in ops: if op.name() in self._quantizable_ops: - if not self._is_skip_quant(graph, - op) and self._has_weight(op): + if not self._is_skip_quant(graph, op) and self._has_weight( + op + ): self._transform_forward(graph, op) t.update() # The loop for renaming the inputs of backward op. @@ -2350,16 +2669,18 @@ class AddQuantDequantPassV2(object): # To be compatible with PaddleSlim, not remove _activation_type for now _activation_type = ["relu", "relu6", "leaky_relu", "tanh", "swish"] - def __init__(self, - scope=None, - place=None, - moving_rate=0.9, - quant_bits=8, - skip_pattern=["skip_quant"], - quantizable_op_type=["elementwise_add", "pool2d"], - is_full_quantized=False, - is_test=None, - scale_dict=None): + def __init__( + self, + scope=None, + place=None, + moving_rate=0.9, + quant_bits=8, + skip_pattern=["skip_quant"], + quantizable_op_type=["elementwise_add", "pool2d"], + is_full_quantized=False, + is_test=None, + scale_dict=None, + ): """ Args: scope(paddle.Scope): The scope is used to initialize these new parameters. @@ -2409,8 +2730,9 @@ class AddQuantDequantPassV2(object): else: self._quantizable_op_type = quantizable_op_type for op_type in quantizable_op_type: - assert op_type in utils._act_supported_quantizable_op_type, \ + assert op_type in utils._act_supported_quantizable_op_type, ( op_type + " is not supported for quantization." + ) self._quantizable_grad_op_type = [ '%s_grad' % (op) for op in self._quantizable_op_type ] @@ -2429,8 +2751,9 @@ class AddQuantDequantPassV2(object): Returns: None """ - assert isinstance(graph, - IrGraph), 'graph must be the instance of IrGraph.' + assert isinstance( + graph, IrGraph + ), 'graph must be the instance of IrGraph.' if self._is_test is None: self._is_test = graph.is_test() dequantized_vars_map = collections.OrderedDict() @@ -2441,28 +2764,40 @@ class AddQuantDequantPassV2(object): # Forward stage, insert quant_dequant op all_op_nodes = graph.all_op_nodes() - with tqdm(total=len(all_op_nodes), - bar_format= - 'Adding quant activation op:|{bar}| {n_fmt}/{total_fmt}', - ncols=80) as t: + with tqdm( + total=len(all_op_nodes), + bar_format='Adding quant activation op:|{bar}| {n_fmt}/{total_fmt}', + ncols=80, + ) as t: for op_node in all_op_nodes: if op_node.name() in self._quantizable_op_type: is_skip = False if isinstance(self._skip_pattern, list): - is_skip = op_node.op().has_attr("op_namescope") and \ - any(pattern in op_node.op().attr("op_namescope") for pattern in self._skip_pattern) + is_skip = op_node.op().has_attr("op_namescope") and any( + pattern in op_node.op().attr("op_namescope") + for pattern in self._skip_pattern + ) elif isinstance(self._skip_pattern, str): - is_skip = op_node.op().has_attr("op_namescope") and \ - op_node.op().attr("op_namescope").find(self._skip_pattern) != -1 - is_quantized = op_node.op().has_attr("quantization_type") and \ - op_node.op().attr("quantization_type") == "qat_with_weight" + is_skip = ( + op_node.op().has_attr("op_namescope") + and op_node.op() + .attr("op_namescope") + .find(self._skip_pattern) + != -1 + ) + is_quantized = ( + op_node.op().has_attr("quantization_type") + and op_node.op().attr("quantization_type") + == "qat_with_weight" + ) if is_skip or is_quantized: continue arg_names = utils._get_op_input_var_names(op_node) for arg_name in arg_names: in_node = graph._find_node_by_name( - op_node.inputs, arg_name) + op_node.inputs, arg_name + ) if in_node.persistable(): continue if arg_name in dequantized_vars_map: @@ -2476,14 +2811,23 @@ class AddQuantDequantPassV2(object): channel_wise=False, moving_rate=self._moving_rate, is_test=self._is_test, - scale_dict=self._scale_dict) - quant_var_node, scale_var_node = insert_quant_pass.insert_quant_op( - graph, in_node) - dequant_var_node = insert_quant_pass.insert_dequant_op( - graph, quant_var_node, scale_var_node) + scale_dict=self._scale_dict, + ) + ( + quant_var_node, + scale_var_node, + ) = insert_quant_pass.insert_quant_op( + graph, in_node + ) + dequant_var_node = ( + insert_quant_pass.insert_dequant_op( + graph, quant_var_node, scale_var_node + ) + ) dequantized_vars_map[arg_name] = dequant_var_node - graph.update_input_link(in_node, dequant_var_node, - op_node) + graph.update_input_link( + in_node, dequant_var_node, op_node + ) t.update() # Backward stage, update input link @@ -2492,10 +2836,12 @@ class AddQuantDequantPassV2(object): for input_name in op_node.input_arg_names(): if input_name in dequantized_vars_map: in_node = graph._find_node_by_name( - op_node.inputs, input_name) + op_node.inputs, input_name + ) dequant_var_node = dequantized_vars_map[input_name] - graph.update_input_link(in_node, dequant_var_node, - op_node) + graph.update_input_link( + in_node, dequant_var_node, op_node + ) return graph @@ -2536,13 +2882,16 @@ class ReplaceFakeQuantDequantPass(object): assert self._place != None, "place must not be None." def apply(self, graph): - assert isinstance(graph, - IrGraph), 'graph must be the instance of IrGraph.' + assert isinstance( + graph, IrGraph + ), 'graph must be the instance of IrGraph.' fake_quant_dequant_ops = [] for op in graph.all_op_nodes(): - if op.name() in _fake_quant_dequant_op_list or op.name( - ) == "moving_average_abs_max_scale": + if ( + op.name() in _fake_quant_dequant_op_list + or op.name() == "moving_average_abs_max_scale" + ): fake_quant_dequant_ops.append(op) for _op in fake_quant_dequant_ops: @@ -2555,13 +2904,18 @@ class ReplaceFakeQuantDequantPass(object): def _replace_op(self, graph, op): x_node = graph._find_node_by_name(op.inputs, op.input("X")[0]) out_node = graph._find_node_by_name(op.outputs, op.output("Out")[0]) - scale_node = graph._find_node_by_name(op.outputs, - op.output("OutScale")[0]) - - quant_axis = op.op().attr("quant_axis") if op.op().has_attr( - "quant_axis") else -1 - bit_length = op.op().attr("bit_length") if op.op().has_attr( - "bit_length") else self._quant_bits + scale_node = graph._find_node_by_name( + op.outputs, op.output("OutScale")[0] + ) + + quant_axis = ( + op.op().attr("quant_axis") if op.op().has_attr("quant_axis") else -1 + ) + bit_length = ( + op.op().attr("bit_length") + if op.op().has_attr("bit_length") + else self._quant_bits + ) zero_point_node = None quanted_node = x_node @@ -2570,43 +2924,46 @@ class ReplaceFakeQuantDequantPass(object): name=self._zero_point_name(quanted_node.name()), var_type=core.VarDesc.VarType.LOD_TENSOR, shape=scale_node.shape(), - var_dtype=core.VarDesc.VarType.INT32) - _init_var_node(zero_point_node, - np.zeros(scale_node.shape(), dtype="int32"), - self._scope, self._place) - - quant_var_node = graph.create_var_node(name=self._quantized_var_name( - x_node.name()), - var_type=x_node.type(), - shape=x_node.shape(), - var_dtype=x_node.dtype()) - quant_op_node = graph.create_op_node(op_type="quantize_linear", - attrs={ - "quant_axis": quant_axis, - "bit_length": bit_length - }, - inputs={ - "X": x_node, - "Scale": scale_node, - "ZeroPoint": zero_point_node - }, - outputs={"Y": quant_var_node}) + var_dtype=core.VarDesc.VarType.INT32, + ) + _init_var_node( + zero_point_node, + np.zeros(scale_node.shape(), dtype="int32"), + self._scope, + self._place, + ) + + quant_var_node = graph.create_var_node( + name=self._quantized_var_name(x_node.name()), + var_type=x_node.type(), + shape=x_node.shape(), + var_dtype=x_node.dtype(), + ) + quant_op_node = graph.create_op_node( + op_type="quantize_linear", + attrs={"quant_axis": quant_axis, "bit_length": bit_length}, + inputs={ + "X": x_node, + "Scale": scale_node, + "ZeroPoint": zero_point_node, + }, + outputs={"Y": quant_var_node}, + ) graph.link_to(x_node, quant_op_node) graph.link_to(scale_node, quant_op_node) if zero_point_node is not None: graph.link_to(zero_point_node, quant_op_node) graph.link_to(quant_op_node, quant_var_node) - dequant_op_node = graph.create_op_node(op_type="dequantize_linear", - attrs={ - "quant_axis": quant_axis, - "bit_length": bit_length - }, - inputs={ - "X": quant_var_node, - "Scale": scale_node, - "ZeroPoint": zero_point_node - }, - outputs={"Y": out_node}) + dequant_op_node = graph.create_op_node( + op_type="dequantize_linear", + attrs={"quant_axis": quant_axis, "bit_length": bit_length}, + inputs={ + "X": quant_var_node, + "Scale": scale_node, + "ZeroPoint": zero_point_node, + }, + outputs={"Y": out_node}, + ) graph.link_to(quant_var_node, dequant_op_node) graph.link_to(scale_node, dequant_op_node) if zero_point_node is not None: @@ -2657,12 +3014,14 @@ class QuantWeightPass(object): quant_weight_pass.apply(graph) """ - def __init__(self, - scope, - place, - bias_correction=False, - quant_bits=8, - save_int_weight=True): + def __init__( + self, + scope, + place, + bias_correction=False, + quant_bits=8, + save_int_weight=True, + ): self._place = _get_paddle_place(place) self._scope = scope self._bias_correction = bias_correction @@ -2672,8 +3031,9 @@ class QuantWeightPass(object): assert self._place != None, "place must not be None." def apply(self, graph): - assert isinstance(graph, - IrGraph), 'graph must be the instance of IrGraph.' + assert isinstance( + graph, IrGraph + ), 'graph must be the instance of IrGraph.' fake_quant_ops_for_weight = [] fake_quant_ops = [ @@ -2682,17 +3042,21 @@ class QuantWeightPass(object): for _op in fake_quant_ops: x_node = graph._find_node_by_name(_op.inputs, _op.input("X")[0]) if x_node.persistable(): - scale_node = graph._find_node_by_name(_op.inputs, - _op.input("Scale")[0]) + scale_node = graph._find_node_by_name( + _op.inputs, _op.input("Scale")[0] + ) zero_point_node = graph._find_node_by_name( - _op.inputs, - _op.input("ZeroPoint")[0]) - out_node = graph._find_node_by_name(_op.outputs, - _op.output("Y")[0]) + _op.inputs, _op.input("ZeroPoint")[0] + ) + out_node = graph._find_node_by_name( + _op.outputs, _op.output("Y")[0] + ) scale_v = self._load_var(scale_node.name()) - assert scale_v.ndim in [1, 2 - ], "the dim of scale_v should be 1 or 2" + assert scale_v.ndim in [ + 1, + 2, + ], "the dim of scale_v should be 1 or 2" if scale_v.ndim == 2: scale_v = scale_v[0] if scale_v.size == 1 and _op.name() == 'abs_max': @@ -2702,24 +3066,28 @@ class QuantWeightPass(object): param_v = self._load_var(x_node.name()) quant_axis = _op.op().attr("quant_axis") bits_length = _op.op().attr("bit_length") - quantized_param_v = utils.quant_tensor(param_v.copy(), - scale_v, - quant_axis, - bits_length, - onnx_format=True) + quantized_param_v = utils.quant_tensor( + param_v.copy(), + scale_v, + quant_axis, + bits_length, + onnx_format=True, + ) if self._bias_correction == True: quantized_param_v = utils.bias_correction_w( param_v, quantized_param_v, scale_v, quant_axis, - weight_bits=bits_length) + weight_bits=bits_length, + ) if self._save_int_weight: # cast weight type to int if self._quant_bits == 8: save_weight_dtype = np.int8 quantized_param_v = quantized_param_v.astype( - save_weight_dtype) + save_weight_dtype + ) self._restore_var(x_node.name(), quantized_param_v) for next_op_node in out_node.outputs: @@ -2739,8 +3107,10 @@ class QuantWeightPass(object): all_used_vars = {n.node for n in all_used_vars} all_unused_vars = { n - for n in filter(lambda node: node.node not in all_used_vars, - graph.all_var_nodes()) + for n in filter( + lambda node: node.node not in all_used_vars, + graph.all_var_nodes(), + ) } graph.safe_remove_nodes(all_unused_vars) @@ -2775,24 +3145,29 @@ class AddQuantDequantForInferencePass(object): Args: graph(IrGraph): the target graph. """ - assert isinstance(graph, - IrGraph), 'graph must be the instance of IrGraph.' + assert isinstance( + graph, IrGraph + ), 'graph must be the instance of IrGraph.' dequant_node_map = {} dequantized_vars_map = collections.OrderedDict() for op_node in graph.all_op_nodes(): if op_node.name() in self._teller_set: var_names = utils._get_op_output_var_names(op_node) for var_name in var_names: - out_node = graph._find_node_by_name(op_node.outputs, - var_name) - if out_node.dtype() not in \ - [core.VarDesc.VarType.FP64, core.VarDesc.VarType.FP32]: + out_node = graph._find_node_by_name( + op_node.outputs, var_name + ) + if out_node.dtype() not in [ + core.VarDesc.VarType.FP64, + core.VarDesc.VarType.FP32, + ]: continue if var_name in dequantized_vars_map: dequant_var_node = dequantized_vars_map[var_name] else: dequant_var_node = self._insert_quant_dequant_op( - graph, out_node) + graph, out_node + ) dequantized_vars_map[var_name] = dequant_var_node dequant_node_map[var_name] = dequant_var_node @@ -2805,10 +3180,11 @@ class AddQuantDequantForInferencePass(object): for var_name in var_names: if var_name in dequant_node_map: in_node = graph._find_node_by_name( - op_node.inputs, var_name) - graph.update_input_link(in_node, - dequant_node_map[var_name], - op_node) + op_node.inputs, var_name + ) + graph.update_input_link( + in_node, dequant_node_map[var_name], op_node + ) return graph @@ -2826,22 +3202,29 @@ class AddQuantDequantForInferencePass(object): name="{}.quantized".format(var_name), var_type=var_node.type(), shape=var_node.shape(), - var_dtype=var_node.dtype()) - scale_var_node = graph._find_node_by_name(graph.all_persistable_nodes(), - self._scale_name(var_name)) + var_dtype=var_node.dtype(), + ) + scale_var_node = graph._find_node_by_name( + graph.all_persistable_nodes(), self._scale_name(var_name) + ) try: zero_point_node = graph._find_node_by_name( graph.all_persistable_nodes(), - "{}@zero_point".format(quant_var_node.name())) + "{}@zero_point".format(quant_var_node.name()), + ) except: zero_point_node = graph.create_persistable_node( name="{}@zero_point".format(quant_var_node.name()), var_type=core.VarDesc.VarType.LOD_TENSOR, shape=scale_var_node.shape(), - var_dtype=core.VarDesc.VarType.INT32) - _init_var_node(zero_point_node, - np.zeros(scale_var_node.shape(), dtype="int32"), - self._scope, self._place) + var_dtype=core.VarDesc.VarType.INT32, + ) + _init_var_node( + zero_point_node, + np.zeros(scale_var_node.shape(), dtype="int32"), + self._scope, + self._place, + ) inputs = {"X": var_node, "Scale": scale_var_node} if zero_point_node is not None: @@ -2851,10 +3234,12 @@ class AddQuantDequantForInferencePass(object): attrs["op_role"] = core.op_proto_and_checker_maker.OpRole.Forward outputs = {"Y": quant_var_node} - quant_op_node = graph.create_op_node(op_type="quantize_linear", - attrs=attrs, - inputs=inputs, - outputs=outputs) + quant_op_node = graph.create_op_node( + op_type="quantize_linear", + attrs=attrs, + inputs=inputs, + outputs=outputs, + ) graph.link_to(var_node, quant_op_node) graph.link_to(scale_var_node, quant_op_node) @@ -2867,7 +3252,8 @@ class AddQuantDequantForInferencePass(object): name="{}.dequantized".format(quant_var_node.name()), var_type=quant_var_node.type(), shape=quant_var_node.shape(), - var_dtype=quant_var_node.dtype()) + var_dtype=quant_var_node.dtype(), + ) inputs = {"X": quant_var_node, "Scale": scale_var_node} if zero_point_node is not None: @@ -2876,10 +3262,12 @@ class AddQuantDequantForInferencePass(object): attrs = {"quant_axis": -1, "bit_length": self._quant_bits} attrs["op_role"] = core.op_proto_and_checker_maker.OpRole.Forward - dequant_op_node = graph.create_op_node(op_type="dequantize_linear", - attrs=attrs, - inputs=inputs, - outputs={"Y": dequant_var_node}) + dequant_op_node = graph.create_op_node( + op_type="dequantize_linear", + attrs=attrs, + inputs=inputs, + outputs={"Y": dequant_var_node}, + ) graph.link_to(quant_var_node, dequant_op_node) graph.link_to(scale_var_node, dequant_op_node) diff --git a/python/paddle/fluid/contrib/slim/quantization/quantize_transpiler_v2.py b/python/paddle/fluid/contrib/slim/quantization/quantize_transpiler_v2.py index ce20d2e695ad2fe9ce9f75b72871fcb979801dc9..12f6f3c53d262bc4894f9d8e2689b735866a71d4 100644 --- a/python/paddle/fluid/contrib/slim/quantization/quantize_transpiler_v2.py +++ b/python/paddle/fluid/contrib/slim/quantization/quantize_transpiler_v2.py @@ -24,9 +24,9 @@ from ....param_attr import ParamAttr from ....initializer import Constant from ....log_helper import get_logger -_logger = get_logger(__name__, - logging.INFO, - fmt='%(asctime)s-%(levelname)s: %(message)s') +_logger = get_logger( + __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s' +) def find_next_ops(block, var_name): @@ -45,24 +45,24 @@ def load_variable_data(scope, var_name): Load variable value from scope ''' var_node = scope.find_var(var_name) - assert var_node is not None, \ - "Cannot find " + var_name + " in scope." + assert var_node is not None, "Cannot find " + var_name + " in scope." return np.array(var_node.get_tensor()) class QuantizeTranspilerV2(object): - - def __init__(self, - weight_bits=8, - activation_bits=8, - weight_quantize_type='abs_max', - activation_quantize_type='moving_average_abs_max', - quantizable_op_type=[ - 'conv2d', - 'depthwise_conv2d', - 'mul', - ], - skip_pattern=['skip_quant']): + def __init__( + self, + weight_bits=8, + activation_bits=8, + weight_quantize_type='abs_max', + activation_quantize_type='moving_average_abs_max', + quantizable_op_type=[ + 'conv2d', + 'depthwise_conv2d', + 'mul', + ], + skip_pattern=['skip_quant'], + ): """ Apply fake quant for the quantized ops. @@ -81,18 +81,26 @@ class QuantizeTranspilerV2(object): self._weight_bits = weight_bits self._activation_bits = activation_bits - assert activation_quantize_type in \ - ["abs_max", "moving_average_abs_max"], \ - "activation_quantize_type should be abs_max " \ + assert activation_quantize_type in [ + "abs_max", + "moving_average_abs_max", + ], ( + "activation_quantize_type should be abs_max " "or moving_average_abs_max for now." - assert weight_quantize_type in ["abs_max", "channel_wise_abs_max"], \ - "weight_quantize_type should be abs_max or channel_wise_abs_max." + ) + assert weight_quantize_type in [ + "abs_max", + "channel_wise_abs_max", + ], "weight_quantize_type should be abs_max or channel_wise_abs_max." self._activation_quantize_type = activation_quantize_type self._weight_quantize_type = weight_quantize_type for op_type in quantizable_op_type: - assert op_type in ['conv2d', 'depthwise_conv2d', 'mul'], \ - "Quantize op should be ['conv2d', 'depthwise_conv2d', 'mul']" + assert op_type in [ + 'conv2d', + 'depthwise_conv2d', + 'mul', + ], "Quantize op should be ['conv2d', 'depthwise_conv2d', 'mul']" self._quantizable_ops = quantizable_op_type self._quantizable_grad_ops = [ '%s_grad' % (op) for op in self._quantizable_ops @@ -115,10 +123,12 @@ class QuantizeTranspilerV2(object): Returns: None """ - assert isinstance(program, Program), \ - "program must be the instance of Program" - assert isinstance(startup_program, Program), \ - "startup_program must be the instance of Program" + assert isinstance( + program, Program + ), "program must be the instance of Program" + assert isinstance( + startup_program, Program + ), "startup_program must be the instance of Program" var_rename_map = [ collections.OrderedDict() for _ in range(len(program.blocks)) @@ -127,16 +137,19 @@ class QuantizeTranspilerV2(object): for block in program.blocks: ops = list(block.ops) for op in ops: - if op.type in self._quantizable_ops and \ - (not self._is_skip_quant(op)): - self._transform_forward(block, op, var_rename_map, - is_test) + if op.type in self._quantizable_ops and ( + not self._is_skip_quant(op) + ): + self._transform_forward( + block, op, var_rename_map, is_test + ) for block in program.blocks: ops = list(block.ops) for op in ops: - if op.type in self._quantizable_grad_ops and \ - (not self._is_skip_quant(op)): + if op.type in self._quantizable_grad_ops and ( + not self._is_skip_quant(op) + ): self._transform_backward(block, op, var_rename_map) def convert(self, test_program, scope=None): @@ -153,8 +166,10 @@ class QuantizeTranspilerV2(object): for block in test_program.blocks: for op in block.ops: - if op.has_attr("quantization_type") \ - and op.attr("quantization_type") == "qat_with_weight": + if ( + op.has_attr("quantization_type") + and op.attr("quantization_type") == "qat_with_weight" + ): # quant op -> var1 -> fake op -> var2 assert len(op.output_arg_names) == 1 var1_name = op.output_arg_names[0] @@ -188,29 +203,40 @@ class QuantizeTranspilerV2(object): else: in_var = block.var(in_name) target_dtype = [ - core.VarDesc.VarType.FP32, core.VarDesc.VarType.FP16 + core.VarDesc.VarType.FP32, + core.VarDesc.VarType.FP16, ] if in_var.dtype not in target_dtype: continue - quant_bits = self._weight_bits if in_var.persistable \ - else self._activation_bits - quant_type = self._weight_quantize_type if in_var.persistable \ - else self._activation_quantize_type + quant_bits = ( + self._weight_bits + if in_var.persistable + else self._activation_bits + ) + quant_type = ( + self._weight_quantize_type + if in_var.persistable + else self._activation_quantize_type + ) if quant_type == "abs_max": new_var = self._insert_abs_max_fq_op( - block, idx, in_var, quant_bits) + block, idx, in_var, quant_bits + ) elif quant_type == "moving_average_abs_max": new_var = self._insert_ma_abs_max_fq_op( - block, idx, in_var, quant_bits, is_test) + block, idx, in_var, quant_bits, is_test + ) elif quant_type == "channel_wise_abs_max": ch_axis = 1 if op.type in self._out_ch_axis1_ops else 0 new_var = self._insert_pc_abs_max_fq_op( - block, idx, in_var, quant_bits, ch_axis) + block, idx, in_var, quant_bits, ch_axis + ) else: - _logger.error("Don't support the quant_type: %s" % - quant_type) + _logger.error( + "Don't support the quant_type: %s" % quant_type + ) continue new_in_name = new_var.name @@ -225,7 +251,8 @@ class QuantizeTranspilerV2(object): idx = block.ops.index(op) out_var = block.var(out_name) new_out_var = self._insert_ma_abs_max_scale_op( - block, idx + 1, out_var, is_test, True) + block, idx + 1, out_var, is_test, True + ) for next_op in next_ops: if "_grad" not in next_op.type: @@ -237,13 +264,15 @@ class QuantizeTranspilerV2(object): """ user_skipped = False if isinstance(self._skip_pattern, list): - user_skipped = op.has_attr("op_namescope") and \ - any(pattern in op.attr("op_namescope") \ - for pattern in self._skip_pattern) + user_skipped = op.has_attr("op_namescope") and any( + pattern in op.attr("op_namescope") + for pattern in self._skip_pattern + ) elif isinstance(self._skip_pattern, str): - user_skipped = op.has_attr("op_namescope") and \ - op.attr("op_namescope").find( - self._skip_pattern) != -1 + user_skipped = ( + op.has_attr("op_namescope") + and op.attr("op_namescope").find(self._skip_pattern) != -1 + ) return user_skipped def _transform_backward(self, block, op, var_rename_map): @@ -259,75 +288,92 @@ class QuantizeTranspilerV2(object): op._rename_input(name, new_var_name) no_dequanted_input_vars = False if no_dequanted_input_vars: - raise ValueError("There is no dequanted inputs for op %s." % - (op.type)) + raise ValueError( + "There is no dequanted inputs for op %s." % (op.type) + ) def _insert_abs_max_fq_op(self, block, idx, in_var, quant_bits): """ Inset abs max fake quant op. """ - quant_dequant_var = block.create_var(type=in_var.type, - name="{}.quant_dequant".format( - in_var.name), - shape=in_var.shape, - dtype=in_var.dtype) - scale_var = self._helper.create_parameter(attr=ParamAttr( - name="{}.quant_dequant.scale".format(in_var.name), - initializer=Constant(0.), - trainable=False), - shape=[1], - dtype=in_var.dtype) + quant_dequant_var = block.create_var( + type=in_var.type, + name="{}.quant_dequant".format(in_var.name), + shape=in_var.shape, + dtype=in_var.dtype, + ) + scale_var = self._helper.create_parameter( + attr=ParamAttr( + name="{}.quant_dequant.scale".format(in_var.name), + initializer=Constant(0.0), + trainable=False, + ), + shape=[1], + dtype=in_var.dtype, + ) scale_var.stop_gradient = True inputs = {'X': in_var} outputs = {'Out': quant_dequant_var, 'OutScale': scale_var} attrs = {'bit_length': quant_bits} - block._insert_op(idx, - type='fake_quantize_dequantize_abs_max', - attrs=attrs, - inputs=inputs, - outputs=outputs) + block._insert_op( + idx, + type='fake_quantize_dequantize_abs_max', + attrs=attrs, + inputs=inputs, + outputs=outputs, + ) return quant_dequant_var def _insert_ma_abs_max_fq_op(self, block, idx, in_var, quant_bits, is_test): """ Insert moving average abs max fake quant op. """ - quant_dequant_var = block.create_var(type=in_var.type, - name="{}.quant_dequant".format( - in_var.name), - shape=in_var.shape, - dtype=in_var.dtype) - - scale_var = self._helper.create_parameter(attr=ParamAttr( - name="{}.quant_dequant.scale".format(in_var.name), - initializer=Constant(0.), - trainable=False), - shape=[1], - dtype=in_var.dtype) + quant_dequant_var = block.create_var( + type=in_var.type, + name="{}.quant_dequant".format(in_var.name), + shape=in_var.shape, + dtype=in_var.dtype, + ) + + scale_var = self._helper.create_parameter( + attr=ParamAttr( + name="{}.quant_dequant.scale".format(in_var.name), + initializer=Constant(0.0), + trainable=False, + ), + shape=[1], + dtype=in_var.dtype, + ) scale_var.stop_gradient = True if not is_test: - state_var = self._helper.create_parameter(attr=ParamAttr( - name="{}.quant_dequant.state".format(in_var.name), - initializer=Constant(0), - trainable=False), - shape=[1], - dtype=in_var.dtype) + state_var = self._helper.create_parameter( + attr=ParamAttr( + name="{}.quant_dequant.state".format(in_var.name), + initializer=Constant(0), + trainable=False, + ), + shape=[1], + dtype=in_var.dtype, + ) state_var.stop_gradient = True - accum_var = self._helper.create_parameter(attr=ParamAttr( - name="{}.quant_dequant.accum".format(in_var.name), - initializer=Constant(0), - trainable=False), - shape=[1], - dtype=in_var.dtype) + accum_var = self._helper.create_parameter( + attr=ParamAttr( + name="{}.quant_dequant.accum".format(in_var.name), + initializer=Constant(0), + trainable=False, + ), + shape=[1], + dtype=in_var.dtype, + ) accum_var.stop_gradient = True attrs = { 'moving_rate': self._moving_rate, 'bit_length': quant_bits, - 'is_test': is_test + 'is_test': is_test, } inputs = {'X': in_var, 'InScale': scale_var} outputs = {'Out': quant_dequant_var, 'OutScale': scale_var} @@ -337,56 +383,64 @@ class QuantizeTranspilerV2(object): outputs['OutState'] = state_var outputs['OutAccum'] = accum_var - block._insert_op(idx, - type='fake_quantize_dequantize_moving_average_abs_max', - attrs=attrs, - inputs=inputs, - outputs=outputs) + block._insert_op( + idx, + type='fake_quantize_dequantize_moving_average_abs_max', + attrs=attrs, + inputs=inputs, + outputs=outputs, + ) return quant_dequant_var def _insert_pc_abs_max_fq_op(self, block, idx, in_var, quant_bits, ch_axis): """ Insert per channel abs max fake quant op. """ - quant_dequant_var = block.create_var(type=in_var.type, - name="{}.quant_dequant".format( - in_var.name), - shape=in_var.shape, - dtype=in_var.dtype) - - scale_var = self._helper.create_parameter(attr=ParamAttr( - name="{}.quant_dequant.scale".format(in_var.name), - initializer=Constant(0.), - trainable=False), - shape=[in_var.shape[ch_axis]], - dtype=in_var.dtype) + quant_dequant_var = block.create_var( + type=in_var.type, + name="{}.quant_dequant".format(in_var.name), + shape=in_var.shape, + dtype=in_var.dtype, + ) + + scale_var = self._helper.create_parameter( + attr=ParamAttr( + name="{}.quant_dequant.scale".format(in_var.name), + initializer=Constant(0.0), + trainable=False, + ), + shape=[in_var.shape[ch_axis]], + dtype=in_var.dtype, + ) scale_var.stop_gradient = True inputs = {'X': in_var} outputs = {'Out': quant_dequant_var, 'OutScale': scale_var} attrs = {'bit_length': quant_bits, 'quant_axis': ch_axis} - block._insert_op(idx, - type='fake_channel_wise_quantize_dequantize_abs_max', - attrs=attrs, - inputs=inputs, - outputs=outputs) + block._insert_op( + idx, + type='fake_channel_wise_quantize_dequantize_abs_max', + attrs=attrs, + inputs=inputs, + outputs=outputs, + ) return quant_dequant_var - def _insert_ma_abs_max_scale_op(self, - block, - idx, - in_var, - is_test, - has_out_var=False): + def _insert_ma_abs_max_scale_op( + self, block, idx, in_var, is_test, has_out_var=False + ): """ Insert moving average abs max scale op. """ - scale_var = self._helper.create_parameter(attr=ParamAttr( - name="{}.outscale.scale".format(in_var.name), - initializer=Constant(0.), - trainable=False), - shape=[1], - dtype=in_var.dtype) + scale_var = self._helper.create_parameter( + attr=ParamAttr( + name="{}.outscale.scale".format(in_var.name), + initializer=Constant(0.0), + trainable=False, + ), + shape=[1], + dtype=in_var.dtype, + ) scale_var.stop_gradient = True attrs = {'moving_rate': self._moving_rate, 'is_test': is_test} @@ -394,20 +448,26 @@ class QuantizeTranspilerV2(object): outputs = {'OutScale': scale_var} if not is_test: - state_var = self._helper.create_parameter(attr=ParamAttr( - name="{}.outscale.state".format(in_var.name), - initializer=Constant(0), - trainable=False), - shape=[1], - dtype=in_var.dtype) + state_var = self._helper.create_parameter( + attr=ParamAttr( + name="{}.outscale.state".format(in_var.name), + initializer=Constant(0), + trainable=False, + ), + shape=[1], + dtype=in_var.dtype, + ) state_var.stop_gradient = True - accum_var = self._helper.create_parameter(attr=ParamAttr( - name="{}.outscale.accum".format(in_var.name), - initializer=Constant(0), - trainable=False), - shape=[1], - dtype=in_var.dtype) + accum_var = self._helper.create_parameter( + attr=ParamAttr( + name="{}.outscale.accum".format(in_var.name), + initializer=Constant(0), + trainable=False, + ), + shape=[1], + dtype=in_var.dtype, + ) accum_var.stop_gradient = True inputs['InState'] = state_var @@ -416,18 +476,22 @@ class QuantizeTranspilerV2(object): outputs['OutAccum'] = accum_var if has_out_var: - out_var = block.create_var(type=in_var.type, - name="{}.tmp".format(in_var.name), - shape=in_var.shape, - dtype=in_var.dtype) + out_var = block.create_var( + type=in_var.type, + name="{}.tmp".format(in_var.name), + shape=in_var.shape, + dtype=in_var.dtype, + ) outputs['Out'] = out_var - block._insert_op(idx, - type='moving_average_abs_max_scale', - attrs=attrs, - inputs=inputs, - outputs=outputs) + block._insert_op( + idx, + type='moving_average_abs_max_scale', + attrs=attrs, + inputs=inputs, + outputs=outputs, + ) if has_out_var: return out_var diff --git a/python/paddle/fluid/contrib/slim/quantization/utils.py b/python/paddle/fluid/contrib/slim/quantization/utils.py index 158f7e07a0d91a8c856fb386ddced4155f1fadc4..11e39116389c2a2460b9057084cc9a1f5a470566 100644 --- a/python/paddle/fluid/contrib/slim/quantization/utils.py +++ b/python/paddle/fluid/contrib/slim/quantization/utils.py @@ -18,8 +18,12 @@ from ....framework import IrNode from ....framework import Operator _weight_supported_quantizable_op_type = [ - 'conv2d', 'depthwise_conv2d', 'conv2d_transpose', 'mul', 'matmul', - 'matmul_v2' + 'conv2d', + 'depthwise_conv2d', + 'conv2d_transpose', + 'mul', + 'matmul', + 'matmul_v2', ] _act_supported_quantizable_op_type = [ @@ -114,13 +118,19 @@ _act_supported_quantizable_op_type = [ ] QUANT_SUPPORTED_OP_TYPE_LIST = list( - set(_weight_supported_quantizable_op_type + - _act_supported_quantizable_op_type)) + set( + _weight_supported_quantizable_op_type + + _act_supported_quantizable_op_type + ) +) _out_scale_op_list = QUANT_SUPPORTED_OP_TYPE_LIST _channelwise_quant_axis1_ops = [ - 'conv2d_transpose', 'mul', 'matmul', 'matmul_v2' + 'conv2d_transpose', + 'mul', + 'matmul', + 'matmul_v2', ] # list op real input and output names, to avoid processing input such as AxisTensor. @@ -230,11 +240,11 @@ def _get_op_input_var_names(op): Returns: input_var_names or None. """ - assert isinstance(op, (IrNode, Operator)), \ - "The input op should be IrNode or Operator." + assert isinstance( + op, (IrNode, Operator) + ), "The input op should be IrNode or Operator." var_names = [] - op_name = op.name() if isinstance(op, IrNode) \ - else op.type + op_name = op.name() if isinstance(op, IrNode) else op.type if op_name not in _op_real_in_out_name: return [] @@ -250,11 +260,11 @@ def _get_op_input_var_names(op): def _get_op_output_var_names(op): """ """ - assert isinstance(op, (IrNode, Operator)), \ - "The input op should be IrNode or Operator." + assert isinstance( + op, (IrNode, Operator) + ), "The input op should be IrNode or Operator." var_names = [] - op_name = op.name() if isinstance(op, IrNode) \ - else op.type + op_name = op.name() if isinstance(op, IrNode) else op.type if op_name not in _op_real_in_out_name: return [] @@ -270,10 +280,10 @@ def _get_op_output_var_names(op): def _get_input_name_index(op, input_var_name): """Get the input name and index of the var_name in the op""" - assert isinstance(op, (IrNode, Operator)), \ - "The input op should be IrNode or Operator." - op_name = op.name() if isinstance(op, IrNode) \ - else op.type + assert isinstance( + op, (IrNode, Operator) + ), "The input op should be IrNode or Operator." + op_name = op.name() if isinstance(op, IrNode) else op.type if op_name not in _op_real_in_out_name: return None @@ -288,10 +298,10 @@ def _get_input_name_index(op, input_var_name): def _get_output_name_index(op, output_var_name): """Get the output name and index of the var_name in the op""" - assert isinstance(op, (IrNode, Operator)), \ - "The input op should be IrNode or Operator." - op_name = op.name() if isinstance(op, IrNode) \ - else op.type + assert isinstance( + op, (IrNode, Operator) + ), "The input op should be IrNode or Operator." + op_name = op.name() if isinstance(op, IrNode) else op.type if op_name not in _op_real_in_out_name: return None @@ -310,8 +320,7 @@ def load_variable_data(scope, var_name): Load variable value from scope ''' var_node = scope.find_var(var_name) - assert var_node is not None, \ - "Cannot find " + var_name + " in scope." + assert var_node is not None, "Cannot find " + var_name + " in scope." return np.array(var_node.get_tensor()) @@ -319,8 +328,9 @@ def set_variable_data(scope, place, var_name, np_value): ''' Set the value of var node by name, if the node exits, ''' - assert isinstance(np_value, np.ndarray), \ - 'The type of value should be numpy array.' + assert isinstance( + np_value, np.ndarray + ), 'The type of value should be numpy array.' var_node = scope.find_var(var_name) if var_node != None: tensor = var_node.get_tensor() @@ -405,10 +415,12 @@ def bias_correction_w(x, x_quant, scale_v, quant_axis, weight_bits=8): x_dequant[:, i] = x_quant[:, i] * s / bnt quant_bias = x - x_dequant mean_bias = np.array( - [quant_bias[:, i].mean() for i in range(quant_bias.shape[1])]) + [quant_bias[:, i].mean() for i in range(quant_bias.shape[1])] + ) std_orig = np.array([x[:, i].std() for i in range(x.shape[1])]) std_quant = np.array( - [x_dequant[:, i].std() for i in range(x_dequant.shape[1])]) + [x_dequant[:, i].std() for i in range(x_dequant.shape[1])] + ) std_bias = std_orig / (std_quant + eps) else: x_dequant = x_quant * scale_v / bnt @@ -419,8 +431,9 @@ def bias_correction_w(x, x_quant, scale_v, quant_axis, weight_bits=8): mean_bias = np.resize(mean_bias, x.shape) x_dequant = (mean_bias + x_dequant) * std_bias - quantized_param_v = quant_tensor(x_dequant, scale_v, quant_axis, - weight_bits) + quantized_param_v = quant_tensor( + x_dequant, scale_v, quant_axis, weight_bits + ) return quantized_param_v @@ -430,8 +443,10 @@ def stable_sigmoid(x): def calculate_quant_cos_error(orig_tensor, qdq_tensor): - cos_sim = np.inner(orig_tensor.flatten(), qdq_tensor.flatten()) \ - / (np.linalg.norm(orig_tensor.flatten()) * np.linalg.norm(qdq_tensor.flatten())) + cos_sim = np.inner(orig_tensor.flatten(), qdq_tensor.flatten()) / ( + np.linalg.norm(orig_tensor.flatten()) + * np.linalg.norm(qdq_tensor.flatten()) + ) return cos_sim @@ -454,11 +469,10 @@ def move_persistable_var_to_global_block(program): def l2_loss(gt, pred): - return ((gt - pred)**2).mean() + return ((gt - pred) ** 2).mean() class tqdm(object): - def __init__(self, total, bar_format='Loading|{bar}', ncols=80): self.total = total self.bar_format = bar_format @@ -470,8 +484,9 @@ class tqdm(object): a = "=" * round((self.n / self.total) * self.ncols) b = " " * (self.ncols - len(a)) prefix = self.bar_format.split('|')[0] - sys.stderr.write("\r{}|{}=>{}| {}/{}".format(prefix, a, b, self.n, - self.total)) + sys.stderr.write( + "\r{}|{}=>{}| {}/{}".format(prefix, a, b, self.n, self.total) + ) sys.stderr.flush() def __enter__(self): diff --git a/python/paddle/fluid/contrib/slim/tests/convert_model2dot.py b/python/paddle/fluid/contrib/slim/tests/convert_model2dot.py index 3573f53e22db7152de212f104602f842fc31dc66..7bb7de706bcacafa0ac44bb50bcfd878496b545a 100644 --- a/python/paddle/fluid/contrib/slim/tests/convert_model2dot.py +++ b/python/paddle/fluid/contrib/slim/tests/convert_model2dot.py @@ -26,20 +26,21 @@ paddle.enable_static() def parse_args(): parser = argparse.ArgumentParser() - parser.add_argument('--model_path', - type=str, - default='', - help='A path to a model.') - parser.add_argument('--save_graph_dir', - type=str, - default='', - help='A path to save the graph.') + parser.add_argument( + '--model_path', type=str, default='', help='A path to a model.' + ) + parser.add_argument( + '--save_graph_dir', + type=str, + default='', + help='A path to save the graph.', + ) parser.add_argument( '--save_graph_name', type=str, default='', - help= - 'A name to save the graph. Default - name from model path will be used') + help='A name to save the graph. Default - name from model path will be used', + ) test_args, args = parser.parse_known_args(namespace=unittest) return test_args, sys.argv[:1] + args @@ -51,12 +52,19 @@ def generate_dot_for_model(model_path, save_graph_dir, save_graph_name): inference_scope = fluid.executor.global_scope() with fluid.scope_guard(inference_scope): if os.path.exists(os.path.join(model_path, '__model__')): - [inference_program, feed_target_names, - fetch_targets] = fluid.io.load_inference_model(model_path, exe) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = fluid.io.load_inference_model(model_path, exe) else: - [inference_program, feed_target_names, fetch_targets - ] = fluid.io.load_inference_model(model_path, exe, 'model', - 'params') + [ + inference_program, + feed_target_names, + fetch_targets, + ] = fluid.io.load_inference_model( + model_path, exe, 'model', 'params' + ) graph = IrGraph(core.Graph(inference_program.desc), for_test=True) if not os.path.exists(save_graph_dir): os.makedirs(save_graph_dir) @@ -65,12 +73,17 @@ def generate_dot_for_model(model_path, save_graph_dir, save_graph_name): save_graph_name = model_name graph.draw(save_graph_dir, save_graph_name, graph.all_op_nodes()) print( - "Success! Generated dot and pdf files for {0} model, that can be found at {1} named {2}.\n" - .format(model_name, save_graph_dir, save_graph_name)) + "Success! Generated dot and pdf files for {0} model, that can be found at {1} named {2}.\n".format( + model_name, save_graph_dir, save_graph_name + ) + ) if __name__ == '__main__': global test_args test_args, remaining_args = parse_args() - generate_dot_for_model(test_args.model_path, test_args.save_graph_dir, - test_args.save_graph_name) + generate_dot_for_model( + test_args.model_path, + test_args.save_graph_dir, + test_args.save_graph_name, + ) diff --git a/python/paddle/fluid/contrib/slim/tests/imperative_test_utils.py b/python/paddle/fluid/contrib/slim/tests/imperative_test_utils.py index d69241d6cb982979ba42001f209caac196982571..43ace1b77000d14e5d67e87685daa91c52c1dfb2 100644 --- a/python/paddle/fluid/contrib/slim/tests/imperative_test_utils.py +++ b/python/paddle/fluid/contrib/slim/tests/imperative_test_utils.py @@ -24,9 +24,9 @@ from paddle.nn import BatchNorm1D from paddle.fluid.log_helper import get_logger -_logger = get_logger(__name__, - logging.INFO, - fmt='%(asctime)s-%(levelname)s: %(message)s') +_logger = get_logger( + __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s' +) def fix_model_dict(model): @@ -37,16 +37,18 @@ def fix_model_dict(model): if name.endswith("bias"): value = np.zeros_like(p_value).astype('float32') else: - value = np.random.normal( - loc=0.0, scale=0.01, - size=np.product(p_shape)).reshape(p_shape).astype('float32') + value = ( + np.random.normal(loc=0.0, scale=0.01, size=np.product(p_shape)) + .reshape(p_shape) + .astype('float32') + ) fixed_state[name] = value model.set_dict(fixed_state) return model def pre_hook(layer, input): - input_return = (input[0] * 2) + input_return = input[0] * 2 return input_return @@ -59,8 +61,9 @@ def train_lenet(lenet, reader, optimizer): lenet.train() for batch_id, data in enumerate(reader()): - x_data = np.array([x[0].reshape(1, 28, 28) - for x in data]).astype('float32') + x_data = np.array([x[0].reshape(1, 28, 28) for x in data]).astype( + 'float32' + ) y_data = np.array([x[1] for x in data]).astype('int64').reshape(-1, 1) img = paddle.to_tensor(x_data) @@ -82,7 +85,6 @@ def train_lenet(lenet, reader, optimizer): class ImperativeLenet(fluid.dygraph.Layer): - def __init__(self, num_classes=10): super(ImperativeLenet, self).__init__() conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1") @@ -95,36 +97,55 @@ class ImperativeLenet(fluid.dygraph.Layer): fc_b2_attr = fluid.ParamAttr(name="fc_b_2") fc_b3_attr = fluid.ParamAttr(name="fc_b_3") self.features = Sequential( - Conv2D(in_channels=1, - out_channels=6, - kernel_size=3, - stride=1, - padding=1, - weight_attr=conv2d_w1_attr, - bias_attr=False), BatchNorm2D(6), ReLU(), + Conv2D( + in_channels=1, + out_channels=6, + kernel_size=3, + stride=1, + padding=1, + weight_attr=conv2d_w1_attr, + bias_attr=False, + ), + BatchNorm2D(6), + ReLU(), + MaxPool2D(kernel_size=2, stride=2), + Conv2D( + in_channels=6, + out_channels=16, + kernel_size=5, + stride=1, + padding=0, + weight_attr=conv2d_w2_attr, + bias_attr=conv2d_b2_attr, + ), + BatchNorm2D(16), + PReLU(), MaxPool2D(kernel_size=2, stride=2), - Conv2D(in_channels=6, - out_channels=16, - kernel_size=5, - stride=1, - padding=0, - weight_attr=conv2d_w2_attr, - bias_attr=conv2d_b2_attr), BatchNorm2D(16), PReLU(), - MaxPool2D(kernel_size=2, stride=2)) + ) self.fc = Sequential( - Linear(in_features=400, - out_features=120, - weight_attr=fc_w1_attr, - bias_attr=fc_b1_attr), LeakyReLU(), - Linear(in_features=120, - out_features=84, - weight_attr=fc_w2_attr, - bias_attr=fc_b2_attr), Sigmoid(), - Linear(in_features=84, - out_features=num_classes, - weight_attr=fc_w3_attr, - bias_attr=fc_b3_attr), Softmax()) + Linear( + in_features=400, + out_features=120, + weight_attr=fc_w1_attr, + bias_attr=fc_b1_attr, + ), + LeakyReLU(), + Linear( + in_features=120, + out_features=84, + weight_attr=fc_w2_attr, + bias_attr=fc_b2_attr, + ), + Sigmoid(), + Linear( + in_features=84, + out_features=num_classes, + weight_attr=fc_w3_attr, + bias_attr=fc_b3_attr, + ), + Softmax(), + ) self.add = paddle.nn.quant.add() self.quant_stub = paddle.nn.quant.QuantStub() @@ -139,7 +160,6 @@ class ImperativeLenet(fluid.dygraph.Layer): class ImperativeLenetWithSkipQuant(fluid.dygraph.Layer): - def __init__(self, num_classes=10): super(ImperativeLenetWithSkipQuant, self).__init__() @@ -153,48 +173,58 @@ class ImperativeLenetWithSkipQuant(fluid.dygraph.Layer): fc_b1_attr = fluid.ParamAttr(name="fc_b_1") fc_b2_attr = fluid.ParamAttr(name="fc_b_2") fc_b3_attr = fluid.ParamAttr(name="fc_b_3") - self.conv2d_0 = Conv2D(in_channels=1, - out_channels=6, - kernel_size=3, - stride=1, - padding=1, - weight_attr=conv2d_w1_attr, - bias_attr=conv2d_b1_attr) + self.conv2d_0 = Conv2D( + in_channels=1, + out_channels=6, + kernel_size=3, + stride=1, + padding=1, + weight_attr=conv2d_w1_attr, + bias_attr=conv2d_b1_attr, + ) self.conv2d_0.skip_quant = True self.batch_norm_0 = BatchNorm2D(6) self.relu_0 = ReLU() self.pool2d_0 = MaxPool2D(kernel_size=2, stride=2) - self.conv2d_1 = Conv2D(in_channels=6, - out_channels=16, - kernel_size=5, - stride=1, - padding=0, - weight_attr=conv2d_w2_attr, - bias_attr=conv2d_b2_attr) + self.conv2d_1 = Conv2D( + in_channels=6, + out_channels=16, + kernel_size=5, + stride=1, + padding=0, + weight_attr=conv2d_w2_attr, + bias_attr=conv2d_b2_attr, + ) self.conv2d_1.skip_quant = False self.batch_norm_1 = BatchNorm2D(16) self.relu6_0 = ReLU6() self.pool2d_1 = MaxPool2D(kernel_size=2, stride=2) - self.linear_0 = Linear(in_features=400, - out_features=120, - weight_attr=fc_w1_attr, - bias_attr=fc_b1_attr) + self.linear_0 = Linear( + in_features=400, + out_features=120, + weight_attr=fc_w1_attr, + bias_attr=fc_b1_attr, + ) self.linear_0.skip_quant = True self.leaky_relu_0 = LeakyReLU() - self.linear_1 = Linear(in_features=120, - out_features=84, - weight_attr=fc_w2_attr, - bias_attr=fc_b2_attr) + self.linear_1 = Linear( + in_features=120, + out_features=84, + weight_attr=fc_w2_attr, + bias_attr=fc_b2_attr, + ) self.linear_1.skip_quant = False self.sigmoid_0 = Sigmoid() - self.linear_2 = Linear(in_features=84, - out_features=num_classes, - weight_attr=fc_w3_attr, - bias_attr=fc_b3_attr) + self.linear_2 = Linear( + in_features=84, + out_features=num_classes, + weight_attr=fc_w3_attr, + bias_attr=fc_b3_attr, + ) self.linear_2.skip_quant = False self.softmax_0 = Softmax() @@ -221,24 +251,28 @@ class ImperativeLenetWithSkipQuant(fluid.dygraph.Layer): class ImperativeLinearBn(fluid.dygraph.Layer): - def __init__(self): super(ImperativeLinearBn, self).__init__() fc_w_attr = paddle.ParamAttr( name="fc_weight", - initializer=paddle.nn.initializer.Constant(value=0.5)) + initializer=paddle.nn.initializer.Constant(value=0.5), + ) fc_b_attr = paddle.ParamAttr( name="fc_bias", - initializer=paddle.nn.initializer.Constant(value=1.0)) + initializer=paddle.nn.initializer.Constant(value=1.0), + ) bn_w_attr = paddle.ParamAttr( name="bn_weight", - initializer=paddle.nn.initializer.Constant(value=0.5)) - - self.linear = Linear(in_features=10, - out_features=10, - weight_attr=fc_w_attr, - bias_attr=fc_b_attr) + initializer=paddle.nn.initializer.Constant(value=0.5), + ) + + self.linear = Linear( + in_features=10, + out_features=10, + weight_attr=fc_w_attr, + bias_attr=fc_b_attr, + ) self.bn = BatchNorm1D(10, weight_attr=bn_w_attr) def forward(self, inputs): @@ -249,17 +283,17 @@ class ImperativeLinearBn(fluid.dygraph.Layer): class ImperativeLinearBn_hook(fluid.dygraph.Layer): - def __init__(self): super(ImperativeLinearBn_hook, self).__init__() fc_w_attr = paddle.ParamAttr( name="linear_weight", - initializer=paddle.nn.initializer.Constant(value=0.5)) + initializer=paddle.nn.initializer.Constant(value=0.5), + ) - self.linear = Linear(in_features=10, - out_features=10, - weight_attr=fc_w_attr) + self.linear = Linear( + in_features=10, out_features=10, weight_attr=fc_w_attr + ) self.bn = BatchNorm1D(10) forward_pre = self.linear.register_forward_pre_hook(pre_hook) diff --git a/python/paddle/fluid/contrib/slim/tests/quant2_int8_image_classification_comparison.py b/python/paddle/fluid/contrib/slim/tests/quant2_int8_image_classification_comparison.py index c3d7f1a254eea34ff0632d121f5a743db395c39f..dcd5d6de3133161751e60f021d20a3ac63240040 100644 --- a/python/paddle/fluid/contrib/slim/tests/quant2_int8_image_classification_comparison.py +++ b/python/paddle/fluid/contrib/slim/tests/quant2_int8_image_classification_comparison.py @@ -40,50 +40,50 @@ def parse_args(): '--skip_batch_num', type=int, default=0, - help='Number of the first minibatches to skip in performance statistics.' + help='Number of the first minibatches to skip in performance statistics.', + ) + parser.add_argument( + '--quant_model', type=str, default='', help='A path to a Quant model.' + ) + parser.add_argument( + '--fp32_model', type=str, default='', help='A path to an FP32 model.' ) - parser.add_argument('--quant_model', - type=str, - default='', - help='A path to a Quant model.') - parser.add_argument('--fp32_model', - type=str, - default='', - help='A path to an FP32 model.') parser.add_argument('--infer_data', type=str, default='', help='Data file.') parser.add_argument( '--batch_num', type=int, default=0, - help= - 'Number of batches to process. 0 or less means whole dataset. Default: 0.' + help='Number of batches to process. 0 or less means whole dataset. Default: 0.', + ) + parser.add_argument( + '--acc_diff_threshold', + type=float, + default=0.01, + help='Accepted accuracy difference threshold.', ) - parser.add_argument('--acc_diff_threshold', - type=float, - default=0.01, - help='Accepted accuracy difference threshold.') parser.add_argument( '--ops_to_quantize', type=str, default='', - help= - 'A comma separated list of operators to quantize. Only quantizable operators are taken into account. If the option is not used, an attempt to quantize all quantizable operators will be made.' + help='A comma separated list of operators to quantize. Only quantizable operators are taken into account. If the option is not used, an attempt to quantize all quantizable operators will be made.', ) parser.add_argument( '--op_ids_to_skip', type=str, default='', - help='A comma separated list of operator ids to skip in quantization.') + help='A comma separated list of operator ids to skip in quantization.', + ) parser.add_argument( '--targets', type=str, default='quant,int8,fp32', - help= - 'A comma separated list of inference types to run ("int8", "fp32", "quant"). Default: "quant,int8,fp32"' + help='A comma separated list of inference types to run ("int8", "fp32", "quant"). Default: "quant,int8,fp32"', + ) + parser.add_argument( + '--debug', + action='store_true', + help='If used, the graph of Quant model is drawn.', ) - parser.add_argument('--debug', - action='store_true', - help='If used, the graph of Quant model is drawn.') test_args, args = parser.parse_known_args(namespace=unittest) return test_args, sys.argv[:1] + args @@ -95,7 +95,6 @@ class Quant2Int8ImageClassificationComparisonTest(unittest.TestCase): """ def _reader_creator(self, data_file='data.bin'): - def reader(): with open(data_file, 'rb') as fp: num = fp.read(8) @@ -114,7 +113,8 @@ class Quant2Int8ImageClassificationComparisonTest(unittest.TestCase): fp.seek(imgs_offset + img_size * step) img = fp.read(img_size) img = struct.unpack_from( - '{}f'.format(img_ch * img_w * img_h), img) + '{}f'.format(img_ch * img_w * img_h), img + ) img = np.array(img) img.shape = (img_ch, img_w, img_h) fp.seek(labels_offset + label_size * step) @@ -148,14 +148,14 @@ class Quant2Int8ImageClassificationComparisonTest(unittest.TestCase): name = op_node.name() if name in ['depthwise_conv2d']: input_var_node = graph._find_node_by_name( - op_node.inputs, - op_node.input("Input")[0]) + op_node.inputs, op_node.input("Input")[0] + ) weight_var_node = graph._find_node_by_name( - op_node.inputs, - op_node.input("Filter")[0]) + op_node.inputs, op_node.input("Filter")[0] + ) output_var_node = graph._find_node_by_name( - graph.all_var_nodes(), - op_node.output("Output")[0]) + graph.all_var_nodes(), op_node.output("Output")[0] + ) attrs = { name: op_node.op().attr(name) for name in op_node.op().attr_names() @@ -164,11 +164,9 @@ class Quant2Int8ImageClassificationComparisonTest(unittest.TestCase): conv_op_node = graph.create_op_node( op_type='conv2d', attrs=attrs, - inputs={ - 'Input': input_var_node, - 'Filter': weight_var_node - }, - outputs={'Output': output_var_node}) + inputs={'Input': input_var_node, 'Filter': weight_var_node}, + outputs={'Output': output_var_node}, + ) graph.link_to(input_var_node, conv_op_node) graph.link_to(weight_var_node, conv_op_node) @@ -177,28 +175,37 @@ class Quant2Int8ImageClassificationComparisonTest(unittest.TestCase): return graph - def _predict(self, - test_reader=None, - model_path=None, - batch_size=1, - batch_num=1, - skip_batch_num=0, - target='quant'): + def _predict( + self, + test_reader=None, + model_path=None, + batch_size=1, + batch_num=1, + skip_batch_num=0, + target='quant', + ): assert target in ['quant', 'int8', 'fp32'] place = fluid.CPUPlace() exe = fluid.Executor(place) inference_scope = fluid.executor.global_scope() with fluid.scope_guard(inference_scope): if os.path.exists(os.path.join(model_path, '__model__')): - [inference_program, feed_target_names, fetch_targets - ] = fluid.io.load_inference_model(model_path, exe) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = fluid.io.load_inference_model(model_path, exe) else: - [inference_program, feed_target_names, fetch_targets - ] = fluid.io.load_inference_model(model_path, exe, 'model', - 'params') + [ + inference_program, + feed_target_names, + fetch_targets, + ] = fluid.io.load_inference_model( + model_path, exe, 'model', 'params' + ) graph = IrGraph(core.Graph(inference_program.desc), for_test=True) - if (self._debug): + if self._debug: graph.draw('.', 'quant_orig', graph.all_op_nodes()) quant_transform_pass = Quant2Int8MkldnnPass( self._quantized_ops, @@ -206,10 +213,11 @@ class Quant2Int8ImageClassificationComparisonTest(unittest.TestCase): _scope=inference_scope, _place=place, _core=core, - _debug=self._debug) - if (target == 'quant'): + _debug=self._debug, + ) + if target == 'quant': graph = self._prepare_for_fp32_mkldnn(graph) - elif (target == 'int8'): + elif target == 'int8': graph = quant_transform_pass.apply(graph) else: # target == fp32 graph = quant_transform_pass.prepare_and_optimize_fp32(graph) @@ -238,30 +246,35 @@ class Quant2Int8ImageClassificationComparisonTest(unittest.TestCase): images = np.array(images).astype('float32') labels = np.array([x[1] for x in data]).astype('int64') - if (target == 'fp32'): + if target == 'fp32': # FP32 models have accuracy measuring layers labels = labels.reshape([-1, 1]) start = time.time() - out = exe.run(inference_program, - feed={ - feed_target_names[0]: images, - feed_target_names[1]: labels - }, - fetch_list=fetch_targets) + out = exe.run( + inference_program, + feed={ + feed_target_names[0]: images, + feed_target_names[1]: labels, + }, + fetch_list=fetch_targets, + ) batch_time = (time.time() - start) * 1000 # in miliseconds batch_acc1, batch_acc5 = out[1][0], out[2][0] outputs.append(batch_acc1) else: # Quant INT8 models do not have accuracy measuring layers start = time.time() - out = exe.run(inference_program, - feed={feed_target_names[0]: images}, - fetch_list=fetch_targets) + out = exe.run( + inference_program, + feed={feed_target_names[0]: images}, + fetch_list=fetch_targets, + ) batch_time = (time.time() - start) * 1000 # in miliseconds outputs.append(out[0]) # Calculate accuracy result batch_acc1, batch_acc5 = self._get_batch_accuracy( - out[0], labels) + out[0], labels + ) infer_accs1.append(batch_acc1) infer_accs5.append(batch_acc5) @@ -272,10 +285,17 @@ class Quant2Int8ImageClassificationComparisonTest(unittest.TestCase): fpses.append(fps) iters += 1 appx = ' (warm-up)' if iters <= skip_batch_num else '' - _logger.info('batch {0}{5}, acc1: {1:.4f}, acc5: {2:.4f}, ' - 'latency: {3:.4f} ms, fps: {4:.2f}'.format( - iters, batch_acc1, batch_acc5, - batch_time / batch_size, fps, appx)) + _logger.info( + 'batch {0}{5}, acc1: {1:.4f}, acc5: {2:.4f}, ' + 'latency: {3:.4f} ms, fps: {4:.2f}'.format( + iters, + batch_acc1, + batch_acc5, + batch_time / batch_size, + fps, + appx, + ) + ) # Postprocess benchmark data batch_latencies = batch_times[skip_batch_num:] @@ -287,18 +307,24 @@ class Quant2Int8ImageClassificationComparisonTest(unittest.TestCase): acc1_avg = np.mean(infer_accs1) acc5_avg = np.mean(infer_accs5) _logger.info( - 'Total inference run time: {:.2f} s'.format(infer_total_time)) + 'Total inference run time: {:.2f} s'.format(infer_total_time) + ) return outputs, acc1_avg, acc5_avg, fps_avg, latency_avg def _print_performance(self, title, fps, lat): - _logger.info('{0}: avg fps: {1:.2f}, avg latency: {2:.4f} ms'.format( - title, fps, lat)) + _logger.info( + '{0}: avg fps: {1:.2f}, avg latency: {2:.4f} ms'.format( + title, fps, lat + ) + ) def _print_accuracy(self, title, acc1, acc5): _logger.info( - '{0}: avg top1 accuracy: {1:.4f}, avg top5 accuracy: {2:.4f}'. - format(title, acc1, acc5)) + '{0}: avg top1 accuracy: {1:.4f}, avg top5 accuracy: {2:.4f}'.format( + title, acc1, acc5 + ) + ) def _summarize_performance(self, int8_fps, int8_lat, fp32_fps, fp32_lat): _logger.info('--- Performance summary ---') @@ -306,8 +332,9 @@ class Quant2Int8ImageClassificationComparisonTest(unittest.TestCase): if fp32_lat >= 0: self._print_performance('FP32', fp32_fps, fp32_lat) - def _summarize_accuracy(self, quant_acc1, quant_acc5, int8_acc1, int8_acc5, - fp32_acc1, fp32_acc5): + def _summarize_accuracy( + self, quant_acc1, quant_acc5, int8_acc1, int8_acc5, fp32_acc1, fp32_acc5 + ): _logger.info('--- Accuracy summary ---') self._print_accuracy('Quant', quant_acc1, quant_acc5) self._print_accuracy('INT8', int8_acc1, int8_acc5) @@ -316,8 +343,10 @@ class Quant2Int8ImageClassificationComparisonTest(unittest.TestCase): def _compare_accuracy(self, threshold, quant_acc1, int8_acc1): _logger.info( - 'Accepted top1 accuracy drop threshold: {0}. (condition: (Quant_top1_acc - IN8_top1_acc) <= threshold && Quant_top1_acc > 0.5 && INT8_top1_acc > 0.5)' - .format(threshold)) + 'Accepted top1 accuracy drop threshold: {0}. (condition: (Quant_top1_acc - IN8_top1_acc) <= threshold && Quant_top1_acc > 0.5 && INT8_top1_acc > 0.5)'.format( + threshold + ) + ) # We assume valid accuracy to be at least 0.5 assert quant_acc1 > 0.5 assert int8_acc1 > 0.5 @@ -334,9 +363,13 @@ class Quant2Int8ImageClassificationComparisonTest(unittest.TestCase): return quant_model_path = test_case_args.quant_model - assert quant_model_path, 'The Quant model path cannot be empty. Please, use the --quant_model option.' + assert ( + quant_model_path + ), 'The Quant model path cannot be empty. Please, use the --quant_model option.' data_path = test_case_args.infer_data - assert data_path, 'The dataset path cannot be empty. Please, use the --infer_data option.' + assert ( + data_path + ), 'The dataset path cannot be empty. Please, use the --infer_data option.' fp32_model_path = test_case_args.fp32_model batch_size = test_case_args.batch_size batch_num = test_case_args.batch_num @@ -347,12 +380,14 @@ class Quant2Int8ImageClassificationComparisonTest(unittest.TestCase): self._quantized_ops = set() if test_case_args.ops_to_quantize: self._quantized_ops = self._strings_from_csv( - test_case_args.ops_to_quantize) + test_case_args.ops_to_quantize + ) self._op_ids_to_skip = set([-1]) if test_case_args.op_ids_to_skip: self._op_ids_to_skip = self._ints_from_csv( - test_case_args.op_ids_to_skip) + test_case_args.op_ids_to_skip + ) self._targets = self._strings_from_csv(test_case_args.targets) assert self._targets.intersection( @@ -368,61 +403,99 @@ class Quant2Int8ImageClassificationComparisonTest(unittest.TestCase): _logger.info('Batch number: {}'.format(batch_num)) _logger.info('Accuracy drop threshold: {}.'.format(acc_diff_threshold)) _logger.info( - 'Quantized ops: {}.'.format(','.join(self._quantized_ops) if self. - _quantized_ops else 'all quantizable')) - _logger.info('Op ids to skip quantization: {}.'.format( - ','.join(map(str, self._op_ids_to_skip) - ) if test_case_args.op_ids_to_skip else 'none')) + 'Quantized ops: {}.'.format( + ','.join(self._quantized_ops) + if self._quantized_ops + else 'all quantizable' + ) + ) + _logger.info( + 'Op ids to skip quantization: {}.'.format( + ','.join(map(str, self._op_ids_to_skip)) + if test_case_args.op_ids_to_skip + else 'none' + ) + ) _logger.info('Targets: {}.'.format(','.join(self._targets))) if 'quant' in self._targets: _logger.info('--- Quant prediction start ---') - val_reader = paddle.batch(self._reader_creator(data_path), - batch_size=batch_size) - quant_output, quant_acc1, quant_acc5, quant_fps, quant_lat = self._predict( + val_reader = paddle.batch( + self._reader_creator(data_path), batch_size=batch_size + ) + ( + quant_output, + quant_acc1, + quant_acc5, + quant_fps, + quant_lat, + ) = self._predict( val_reader, quant_model_path, batch_size, batch_num, skip_batch_num, - target='quant') + target='quant', + ) self._print_performance('Quant', quant_fps, quant_lat) self._print_accuracy('Quant', quant_acc1, quant_acc5) if 'int8' in self._targets: _logger.info('--- INT8 prediction start ---') - val_reader = paddle.batch(self._reader_creator(data_path), - batch_size=batch_size) - int8_output, int8_acc1, int8_acc5, int8_fps, int8_lat = self._predict( + val_reader = paddle.batch( + self._reader_creator(data_path), batch_size=batch_size + ) + ( + int8_output, + int8_acc1, + int8_acc5, + int8_fps, + int8_lat, + ) = self._predict( val_reader, quant_model_path, batch_size, batch_num, skip_batch_num, - target='int8') + target='int8', + ) self._print_performance('INT8', int8_fps, int8_lat) self._print_accuracy('INT8', int8_acc1, int8_acc5) fp32_acc1 = fp32_acc5 = fp32_fps = fp32_lat = -1 if 'fp32' in self._targets and fp32_model_path: _logger.info('--- FP32 prediction start ---') - val_reader = paddle.batch(self._reader_creator(data_path), - batch_size=batch_size) - fp32_output, fp32_acc1, fp32_acc5, fp32_fps, fp32_lat = self._predict( + val_reader = paddle.batch( + self._reader_creator(data_path), batch_size=batch_size + ) + ( + fp32_output, + fp32_acc1, + fp32_acc5, + fp32_fps, + fp32_lat, + ) = self._predict( val_reader, fp32_model_path, batch_size, batch_num, skip_batch_num, - target='fp32') + target='fp32', + ) self._print_performance('FP32', fp32_fps, fp32_lat) self._print_accuracy('FP32', fp32_acc1, fp32_acc5) if {'int8', 'fp32'}.issubset(self._targets): self._summarize_performance(int8_fps, int8_lat, fp32_fps, fp32_lat) if {'int8', 'quant'}.issubset(self._targets): - self._summarize_accuracy(quant_acc1, quant_acc5, int8_acc1, - int8_acc5, fp32_acc1, fp32_acc5) + self._summarize_accuracy( + quant_acc1, + quant_acc5, + int8_acc1, + int8_acc5, + fp32_acc1, + fp32_acc5, + ) self._compare_accuracy(acc_diff_threshold, quant_acc1, int8_acc1) diff --git a/python/paddle/fluid/contrib/slim/tests/quant2_int8_lstm_model.py b/python/paddle/fluid/contrib/slim/tests/quant2_int8_lstm_model.py index 0a9abe61e0e4b07de0d75acad48110b0496eab13..71bac0208e4cfe50bf3f467fbb91e40639ef0b9a 100644 --- a/python/paddle/fluid/contrib/slim/tests/quant2_int8_lstm_model.py +++ b/python/paddle/fluid/contrib/slim/tests/quant2_int8_lstm_model.py @@ -25,35 +25,33 @@ from save_quant_model import transform_and_save_int8_model def parse_args(): parser = argparse.ArgumentParser() - parser.add_argument('--fp32_model', - type=str, - default='', - help='A path to a FP32 model.') - parser.add_argument('--quant_model', - type=str, - default='', - help='A path to a quant model.') + parser.add_argument( + '--fp32_model', type=str, default='', help='A path to a FP32 model.' + ) + parser.add_argument( + '--quant_model', type=str, default='', help='A path to a quant model.' + ) parser.add_argument('--infer_data', type=str, default='', help='Data file.') parser.add_argument( '--warmup_iter', type=int, default=1, - help='Number of the first iterations to skip in performance statistics.' + help='Number of the first iterations to skip in performance statistics.', + ) + parser.add_argument( + '--acc_diff_threshold', + type=float, + default=0.01, + help='Accepted accuracy difference threshold.', + ) + parser.add_argument( + '--num_threads', type=int, default=1, help='Number of threads.' ) - parser.add_argument('--acc_diff_threshold', - type=float, - default=0.01, - help='Accepted accuracy difference threshold.') - parser.add_argument('--num_threads', - type=int, - default=1, - help='Number of threads.') parser.add_argument( '--mkldnn_cache_capacity', type=int, default=0, - help= - 'Mkldnn cache capacity. The default value in Python API is 15, which can slow down int8 models. Default 0 means unlimited cache.' + help='Mkldnn cache capacity. The default value in Python API is 15, which can slow down int8 models. Default 0 means unlimited cache.', ) test_args, args = parser.parse_known_args(namespace=unittest) @@ -61,7 +59,6 @@ def parse_args(): class TestLstmModelPTQ(unittest.TestCase): - def get_warmup_tensor(self, data_path, place): data = [] with open(data_path, 'rb') as in_f: @@ -75,11 +72,13 @@ class TestLstmModelPTQ(unittest.TestCase): seq_len = (alllen >> 16) & 0xFFFF label = in_f.read(4 * label_len) - label = np.frombuffer(label, - dtype=np.int32).reshape([len(label) // 4]) + label = np.frombuffer(label, dtype=np.int32).reshape( + [len(label) // 4] + ) feat = in_f.read(4 * seq_len * 8) feat = np.frombuffer(feat, dtype=np.float32).reshape( - [len(feat) // 4 // 8, 8]) + [len(feat) // 4 // 8, 8] + ) lod_feat = [feat.shape[0]] minputs = fluid.create_lod_tensor(feat, [lod_feat], place) @@ -97,13 +96,15 @@ class TestLstmModelPTQ(unittest.TestCase): inputs = data[1:] return warmup_data, inputs - def set_config(self, - model_path, - num_threads, - mkldnn_cache_capacity, - warmup_data=None, - use_analysis=False, - enable_ptq=False): + def set_config( + self, + model_path, + num_threads, + mkldnn_cache_capacity, + warmup_data=None, + use_analysis=False, + enable_ptq=False, + ): config = AnalysisConfig(model_path) config.set_cpu_math_library_num_threads(num_threads) if use_analysis: @@ -120,19 +121,27 @@ class TestLstmModelPTQ(unittest.TestCase): config.quantizer_config().set_quant_batch_size(1) return config - def run_program(self, - model_path, - data_path, - num_threads, - mkldnn_cache_capacity, - warmup_iter, - use_analysis=False, - enable_ptq=False): + def run_program( + self, + model_path, + data_path, + num_threads, + mkldnn_cache_capacity, + warmup_iter, + use_analysis=False, + enable_ptq=False, + ): place = fluid.CPUPlace() warmup_data, inputs = self.get_warmup_tensor(data_path, place) warmup_data = [item[0] for item in warmup_data] - config = self.set_config(model_path, num_threads, mkldnn_cache_capacity, - warmup_data, use_analysis, enable_ptq) + config = self.set_config( + model_path, + num_threads, + mkldnn_cache_capacity, + warmup_data, + use_analysis, + enable_ptq, + ) predictor = create_paddle_predictor(config) data = [item[0] for item in inputs] @@ -164,13 +173,17 @@ class TestLstmModelPTQ(unittest.TestCase): if this_label_data[0] <= 6350: all_ctc_num += 1 - if np_ctc_out.shape[0] == 1 and np_ctc_out.all( - ) == this_label_data.all(): + if ( + np_ctc_out.shape[0] == 1 + and np_ctc_out.all() == this_label_data.all() + ): ok_ctc_num += 1 else: all_ctc_num += 1 - if np_ctc_out.shape[0] == this_label.shape[ - 0] and np_ctc_out.all() == this_label_data.all(): + if ( + np_ctc_out.shape[0] == this_label.shape[0] + and np_ctc_out.all() == this_label_data.all() + ): ok_ctc_num += 1 if all_ctc_num > 1000 or all_hz_num > 1000: @@ -187,44 +200,75 @@ class TestLstmModelPTQ(unittest.TestCase): return fp32_model = test_case_args.fp32_model - assert fp32_model, 'The FP32 model path cannot be empty. Please, use the --fp32_model option.' + assert ( + fp32_model + ), 'The FP32 model path cannot be empty. Please, use the --fp32_model option.' quant_model = test_case_args.quant_model - assert quant_model, 'The quant model path cannot be empty. Please, use the --quant_model option.' + assert ( + quant_model + ), 'The quant model path cannot be empty. Please, use the --quant_model option.' infer_data = test_case_args.infer_data - assert infer_data, 'The dataset path cannot be empty. Please, use the --infer_data option.' + assert ( + infer_data + ), 'The dataset path cannot be empty. Please, use the --infer_data option.' num_threads = test_case_args.num_threads mkldnn_cache_capacity = test_case_args.mkldnn_cache_capacity warmup_iter = test_case_args.warmup_iter acc_diff_threshold = test_case_args.acc_diff_threshold - (fp32_hx_acc, fp32_ctc_acc, - fp32_fps) = self.run_program(fp32_model, infer_data, num_threads, - mkldnn_cache_capacity, warmup_iter, False, - False) - - (int8_hx_acc, int8_ctc_acc, - int8_fps) = self.run_program(fp32_model, infer_data, num_threads, - mkldnn_cache_capacity, warmup_iter, True, - True) + (fp32_hx_acc, fp32_ctc_acc, fp32_fps) = self.run_program( + fp32_model, + infer_data, + num_threads, + mkldnn_cache_capacity, + warmup_iter, + False, + False, + ) + + (int8_hx_acc, int8_ctc_acc, int8_fps) = self.run_program( + fp32_model, + infer_data, + num_threads, + mkldnn_cache_capacity, + warmup_iter, + True, + True, + ) quant_model_save_path = quant_model + "_int8" # transform model to quant2 - transform_and_save_int8_model(quant_model, quant_model_save_path, - "fusion_lstm,concat") - - (quant_hx_acc, quant_ctc_acc, - quant_fps) = self.run_program(quant_model_save_path, infer_data, - num_threads, mkldnn_cache_capacity, - warmup_iter, True, False) - - print("FP32: fps {0}, hx_acc {1}, ctc_acc {2}".format( - fp32_fps, fp32_hx_acc, fp32_ctc_acc)) - - print("PTQ_INT8: fps {0}, hx_acc {1}, ctc_acc {2}".format( - int8_fps, int8_hx_acc, int8_ctc_acc)) - - print("QUANT2_INT8: fps {0}, hx_acc {1}, ctc_acc {2}".format( - quant_fps, quant_hx_acc, quant_ctc_acc)) + transform_and_save_int8_model( + quant_model, quant_model_save_path, "fusion_lstm,concat" + ) + + (quant_hx_acc, quant_ctc_acc, quant_fps) = self.run_program( + quant_model_save_path, + infer_data, + num_threads, + mkldnn_cache_capacity, + warmup_iter, + True, + False, + ) + + print( + "FP32: fps {0}, hx_acc {1}, ctc_acc {2}".format( + fp32_fps, fp32_hx_acc, fp32_ctc_acc + ) + ) + + print( + "PTQ_INT8: fps {0}, hx_acc {1}, ctc_acc {2}".format( + int8_fps, int8_hx_acc, int8_ctc_acc + ) + ) + + print( + "QUANT2_INT8: fps {0}, hx_acc {1}, ctc_acc {2}".format( + quant_fps, quant_hx_acc, quant_ctc_acc + ) + ) sys.stdout.flush() diff --git a/python/paddle/fluid/contrib/slim/tests/quant2_int8_nlp_comparison.py b/python/paddle/fluid/contrib/slim/tests/quant2_int8_nlp_comparison.py index fecead6d6de030112462bab7166dbe084b4daa97..3b997fa0d5000127c4ff6b6f7b5d8ffd651b41e9 100644 --- a/python/paddle/fluid/contrib/slim/tests/quant2_int8_nlp_comparison.py +++ b/python/paddle/fluid/contrib/slim/tests/quant2_int8_nlp_comparison.py @@ -39,57 +39,56 @@ def parse_args(): '--skip_batch_num', type=int, default=0, - help='Number of the first minibatches to skip in performance statistics.' + help='Number of the first minibatches to skip in performance statistics.', + ) + parser.add_argument( + '--quant_model', type=str, default='', help='A path to a Quant model.' ) - parser.add_argument('--quant_model', - type=str, - default='', - help='A path to a Quant model.') parser.add_argument( '--fp32_model', type=str, default='', - help= - 'A path to an FP32 model. If empty, the Quant model will be used for FP32 inference.' + help='A path to an FP32 model. If empty, the Quant model will be used for FP32 inference.', ) parser.add_argument('--infer_data', type=str, default='', help='Data file.') - parser.add_argument('--labels', - type=str, - default='', - help='File with labels.') + parser.add_argument( + '--labels', type=str, default='', help='File with labels.' + ) parser.add_argument( '--batch_num', type=int, default=0, - help= - 'Number of batches to process. 0 or less means whole dataset. Default: 0.' + help='Number of batches to process. 0 or less means whole dataset. Default: 0.', + ) + parser.add_argument( + '--acc_diff_threshold', + type=float, + default=0.01, + help='Accepted accuracy difference threshold.', ) - parser.add_argument('--acc_diff_threshold', - type=float, - default=0.01, - help='Accepted accuracy difference threshold.') parser.add_argument( '--ops_to_quantize', type=str, default='', - help= - 'A comma separated list of operators to quantize. Only quantizable operators are taken into account. If the option is not used, an attempt to quantize all quantizable operators will be made.' + help='A comma separated list of operators to quantize. Only quantizable operators are taken into account. If the option is not used, an attempt to quantize all quantizable operators will be made.', ) parser.add_argument( '--op_ids_to_skip', type=str, default='', - help='A comma separated list of operator ids to skip in quantization.') + help='A comma separated list of operator ids to skip in quantization.', + ) parser.add_argument( '--targets', type=str, default='quant,int8,fp32', - help= - 'A comma separated list of inference types to run ("int8", "fp32", "quant"). Default: "quant,int8,fp32"' + help='A comma separated list of inference types to run ("int8", "fp32", "quant"). Default: "quant,int8,fp32"', + ) + parser.add_argument( + '--debug', + action='store_true', + help='If used, the graph of Quant model is drawn.', ) - parser.add_argument('--debug', - action='store_true', - help='If used, the graph of Quant model is drawn.') test_args, args = parser.parse_known_args(namespace=unittest) @@ -116,16 +115,16 @@ class QuantInt8NLPComparisonTest(unittest.TestCase): for i in range(len(data_lines)): data_fields = data_lines[i].split(';') - assert len( - data_fields - ) >= 2, "The number of data fields in the dataset is less than 2" + assert ( + len(data_fields) >= 2 + ), "The number of data fields in the dataset is less than 2" buffers = [] shape = [] for j in range(2): data = data_fields[j].split(':') - assert len( - data - ) >= 2, "Size of data in the dataset is less than 2" + assert ( + len(data) >= 2 + ), "Size of data in the dataset is less than 2" # Shape is stored under index 0, while data under 1 shape = data[0].split() shape.pop(0) @@ -149,42 +148,53 @@ class QuantInt8NLPComparisonTest(unittest.TestCase): correct += 1 return correct - def _predict(self, - test_reader=None, - model_path=None, - batch_size=1, - batch_num=1, - skip_batch_num=0, - target='quant'): + def _predict( + self, + test_reader=None, + model_path=None, + batch_size=1, + batch_num=1, + skip_batch_num=0, + target='quant', + ): assert target in ['quant', 'int8', 'fp32'] place = fluid.CPUPlace() exe = fluid.Executor(place) inference_scope = fluid.executor.global_scope() with fluid.scope_guard(inference_scope): if os.path.exists(os.path.join(model_path, '__model__')): - [inference_program, feed_target_names, fetch_targets - ] = fluid.io.load_inference_model(model_path, exe) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = fluid.io.load_inference_model(model_path, exe) else: - [inference_program, feed_target_names, fetch_targets - ] = fluid.io.load_inference_model(model_path, exe, 'model', - 'params') + [ + inference_program, + feed_target_names, + fetch_targets, + ] = fluid.io.load_inference_model( + model_path, exe, 'model', 'params' + ) graph = IrGraph(core.Graph(inference_program.desc), for_test=True) - if (self._debug): + if self._debug: graph.draw('.', 'quant_orig', graph.all_op_nodes()) - if (target != 'quant'): + if target != 'quant': quant_transform_pass = Quant2Int8MkldnnPass( self._quantized_ops, _op_ids_to_skip=self._op_ids_to_skip, _scope=inference_scope, _place=place, _core=core, - _debug=self._debug) - if (target == 'int8'): + _debug=self._debug, + ) + if target == 'int8': graph = quant_transform_pass.apply(graph) else: # target == fp32 graph = quant_transform_pass.prepare_and_optimize_fp32( - graph) + graph + ) inference_program = graph.to_program() @@ -205,12 +215,14 @@ class QuantInt8NLPComparisonTest(unittest.TestCase): labels = np.array([x[2] for x in data]).astype('int64') start = time.time() - out = exe.run(inference_program, - feed={ - feed_target_names[0]: input0, - feed_target_names[1]: input1 - }, - fetch_list=fetch_targets) + out = exe.run( + inference_program, + feed={ + feed_target_names[0]: input0, + feed_target_names[1]: input1, + }, + fetch_list=fetch_targets, + ) batch_time = (time.time() - start) * 1000 # in miliseconds batch_times.append(batch_time) batch_correct = self._get_batch_correct(out, labels) @@ -224,8 +236,10 @@ class QuantInt8NLPComparisonTest(unittest.TestCase): iters += 1 appx = ' (warm-up)' if iters <= skip_batch_num else '' _logger.info( - 'batch {0}{4}, acc: {1:.4f}, latency: {2:.4f} ms, predictions per sec: {3:.2f}' - .format(iters, batch_acc, latency, pps, appx)) + 'batch {0}{4}, acc: {1:.4f}, latency: {2:.4f} ms, predictions per sec: {3:.2f}'.format( + iters, batch_acc, latency, pps, appx + ) + ) # Postprocess benchmark data infer_total_time = time.time() - infer_start_time @@ -236,14 +250,17 @@ class QuantInt8NLPComparisonTest(unittest.TestCase): pps_avg = np.average(ppses) acc_avg = float(np.sum(total_correct)) / float(total_samples) _logger.info( - 'Total inference run time: {:.2f} s'.format(infer_total_time)) + 'Total inference run time: {:.2f} s'.format(infer_total_time) + ) return acc_avg, pps_avg, latency_avg def _print_performance(self, title, pps, lat): _logger.info( - '{0}: avg predictions per sec: {1:.2f}, avg latency: {2:.4f} ms'. - format(title, pps, lat)) + '{0}: avg predictions per sec: {1:.2f}, avg latency: {2:.4f} ms'.format( + title, pps, lat + ) + ) def _print_accuracy(self, title, acc): _logger.info('{0}: avg accuracy: {1:.6f}'.format(title, acc)) @@ -263,8 +280,10 @@ class QuantInt8NLPComparisonTest(unittest.TestCase): def _compare_accuracy(self, threshold, quant_acc, int8_acc): _logger.info( - 'Accepted accuracy drop threshold: {0}. (condition: (Quant_acc - INT8_acc) <= threshold)' - .format(threshold)) + 'Accepted accuracy drop threshold: {0}. (condition: (Quant_acc - INT8_acc) <= threshold)'.format( + threshold + ) + ) # Random outputs give accuracy about 0.33, we assume valid accuracy to be at least 0.5 assert quant_acc > 0.5 assert int8_acc > 0.5 @@ -281,9 +300,13 @@ class QuantInt8NLPComparisonTest(unittest.TestCase): return quant_model_path = test_case_args.quant_model - assert quant_model_path, 'The Quant model path cannot be empty. Please, use the --quant_model option.' + assert ( + quant_model_path + ), 'The Quant model path cannot be empty. Please, use the --quant_model option.' data_path = test_case_args.infer_data - assert data_path, 'The dataset path cannot be empty. Please, use the --infer_data option.' + assert ( + data_path + ), 'The dataset path cannot be empty. Please, use the --infer_data option.' fp32_model_path = test_case_args.fp32_model labels_path = test_case_args.labels batch_size = test_case_args.batch_size @@ -295,12 +318,14 @@ class QuantInt8NLPComparisonTest(unittest.TestCase): self._quantized_ops = set() if test_case_args.ops_to_quantize: self._quantized_ops = self._strings_from_csv( - test_case_args.ops_to_quantize) + test_case_args.ops_to_quantize + ) self._op_ids_to_skip = set([-1]) if test_case_args.op_ids_to_skip: self._op_ids_to_skip = self._ints_from_csv( - test_case_args.op_ids_to_skip) + test_case_args.op_ids_to_skip + ) self._targets = self._strings_from_csv(test_case_args.targets) assert self._targets.intersection( @@ -317,53 +342,70 @@ class QuantInt8NLPComparisonTest(unittest.TestCase): _logger.info('Batch number: {}'.format(batch_num)) _logger.info('Accuracy drop threshold: {}.'.format(acc_diff_threshold)) _logger.info( - 'Quantized ops: {}.'.format(','.join(self._quantized_ops) if self. - _quantized_ops else 'all quantizable')) - _logger.info('Op ids to skip quantization: {}.'.format( - ','.join(map(str, self._op_ids_to_skip) - ) if test_case_args.op_ids_to_skip else 'none')) + 'Quantized ops: {}.'.format( + ','.join(self._quantized_ops) + if self._quantized_ops + else 'all quantizable' + ) + ) + _logger.info( + 'Op ids to skip quantization: {}.'.format( + ','.join(map(str, self._op_ids_to_skip)) + if test_case_args.op_ids_to_skip + else 'none' + ) + ) _logger.info('Targets: {}.'.format(','.join(self._targets))) if 'quant' in self._targets: _logger.info('--- Quant prediction start ---') - val_reader = paddle.batch(self._reader_creator( - data_path, labels_path), - batch_size=batch_size) - quant_acc, quant_pps, quant_lat = self._predict(val_reader, - quant_model_path, - batch_size, - batch_num, - skip_batch_num, - target='quant') + val_reader = paddle.batch( + self._reader_creator(data_path, labels_path), + batch_size=batch_size, + ) + quant_acc, quant_pps, quant_lat = self._predict( + val_reader, + quant_model_path, + batch_size, + batch_num, + skip_batch_num, + target='quant', + ) self._print_performance('Quant', quant_pps, quant_lat) self._print_accuracy('Quant', quant_acc) if 'int8' in self._targets: _logger.info('--- INT8 prediction start ---') - val_reader = paddle.batch(self._reader_creator( - data_path, labels_path), - batch_size=batch_size) - int8_acc, int8_pps, int8_lat = self._predict(val_reader, - quant_model_path, - batch_size, - batch_num, - skip_batch_num, - target='int8') + val_reader = paddle.batch( + self._reader_creator(data_path, labels_path), + batch_size=batch_size, + ) + int8_acc, int8_pps, int8_lat = self._predict( + val_reader, + quant_model_path, + batch_size, + batch_num, + skip_batch_num, + target='int8', + ) self._print_performance('INT8', int8_pps, int8_lat) self._print_accuracy('INT8', int8_acc) fp32_acc = fp32_pps = fp32_lat = -1 if 'fp32' in self._targets and fp32_model_path: _logger.info('--- FP32 prediction start ---') - val_reader = paddle.batch(self._reader_creator( - data_path, labels_path), - batch_size=batch_size) - fp32_acc, fp32_pps, fp32_lat = self._predict(val_reader, - fp32_model_path, - batch_size, - batch_num, - skip_batch_num, - target='fp32') + val_reader = paddle.batch( + self._reader_creator(data_path, labels_path), + batch_size=batch_size, + ) + fp32_acc, fp32_pps, fp32_lat = self._predict( + val_reader, + fp32_model_path, + batch_size, + batch_num, + skip_batch_num, + target='fp32', + ) self._print_performance('FP32', fp32_pps, fp32_lat) self._print_accuracy('FP32', fp32_acc) diff --git a/python/paddle/fluid/contrib/slim/tests/quant_int8_image_classification_comparison.py b/python/paddle/fluid/contrib/slim/tests/quant_int8_image_classification_comparison.py index ec88664c2edce082b990f636d80823bc8cb1b29c..e3aecd48c34db61c6e68f9f90fb651ec16317bb6 100644 --- a/python/paddle/fluid/contrib/slim/tests/quant_int8_image_classification_comparison.py +++ b/python/paddle/fluid/contrib/slim/tests/quant_int8_image_classification_comparison.py @@ -40,27 +40,29 @@ def parse_args(): '--skip_batch_num', type=int, default=0, - help='Number of the first minibatches to skip in performance statistics.' + help='Number of the first minibatches to skip in performance statistics.', + ) + parser.add_argument( + '--debug', + action='store_true', + help='If used, the graph of Quant model is drawn.', + ) + parser.add_argument( + '--quant_model', type=str, default='', help='A path to a Quant model.' ) - parser.add_argument('--debug', - action='store_true', - help='If used, the graph of Quant model is drawn.') - parser.add_argument('--quant_model', - type=str, - default='', - help='A path to a Quant model.') parser.add_argument('--infer_data', type=str, default='', help='Data file.') parser.add_argument( '--batch_num', type=int, default=0, - help= - 'Number of batches to process. 0 or less means whole dataset. Default: 0.' + help='Number of batches to process. 0 or less means whole dataset. Default: 0.', + ) + parser.add_argument( + '--acc_diff_threshold', + type=float, + default=0.01, + help='Accepted accuracy difference threshold.', ) - parser.add_argument('--acc_diff_threshold', - type=float, - default=0.01, - help='Accepted accuracy difference threshold.') test_args, args = parser.parse_known_args(namespace=unittest) return test_args, sys.argv[:1] + args @@ -72,7 +74,6 @@ class QuantInt8ImageClassificationComparisonTest(unittest.TestCase): """ def _reader_creator(self, data_file='data.bin'): - def reader(): with open(data_file, 'rb') as fp: num = fp.read(8) @@ -91,7 +92,8 @@ class QuantInt8ImageClassificationComparisonTest(unittest.TestCase): fp.seek(imgs_offset + img_size * step) img = fp.read(img_size) img = struct.unpack_from( - '{}f'.format(img_ch * img_w * img_h), img) + '{}f'.format(img_ch * img_w * img_h), img + ) img = np.array(img) img.shape = (img_ch, img_w, img_h) fp.seek(labels_offset + label_size * step) @@ -125,14 +127,14 @@ class QuantInt8ImageClassificationComparisonTest(unittest.TestCase): name = op_node.name() if name in ['depthwise_conv2d']: input_var_node = graph._find_node_by_name( - op_node.inputs, - op_node.input("Input")[0]) + op_node.inputs, op_node.input("Input")[0] + ) weight_var_node = graph._find_node_by_name( - op_node.inputs, - op_node.input("Filter")[0]) + op_node.inputs, op_node.input("Filter")[0] + ) output_var_node = graph._find_node_by_name( - graph.all_var_nodes(), - op_node.output("Output")[0]) + graph.all_var_nodes(), op_node.output("Output")[0] + ) attrs = { name: op_node.op().attr(name) for name in op_node.op().attr_names() @@ -141,11 +143,9 @@ class QuantInt8ImageClassificationComparisonTest(unittest.TestCase): conv_op_node = graph.create_op_node( op_type='conv2d', attrs=attrs, - inputs={ - 'Input': input_var_node, - 'Filter': weight_var_node - }, - outputs={'Output': output_var_node}) + inputs={'Input': input_var_node, 'Filter': weight_var_node}, + outputs={'Output': output_var_node}, + ) graph.link_to(input_var_node, conv_op_node) graph.link_to(weight_var_node, conv_op_node) @@ -154,31 +154,41 @@ class QuantInt8ImageClassificationComparisonTest(unittest.TestCase): return graph - def _predict(self, - test_reader=None, - model_path=None, - batch_size=1, - batch_num=1, - skip_batch_num=0, - transform_to_int8=False): + def _predict( + self, + test_reader=None, + model_path=None, + batch_size=1, + batch_num=1, + skip_batch_num=0, + transform_to_int8=False, + ): place = fluid.CPUPlace() exe = fluid.Executor(place) inference_scope = fluid.executor.global_scope() with fluid.scope_guard(inference_scope): if os.path.exists(os.path.join(model_path, '__model__')): - [inference_program, feed_target_names, fetch_targets - ] = fluid.io.load_inference_model(model_path, exe) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = fluid.io.load_inference_model(model_path, exe) else: - [inference_program, feed_target_names, fetch_targets - ] = fluid.io.load_inference_model(model_path, exe, 'model', - 'params') + [ + inference_program, + feed_target_names, + fetch_targets, + ] = fluid.io.load_inference_model( + model_path, exe, 'model', 'params' + ) graph = IrGraph(core.Graph(inference_program.desc), for_test=True) - if (self._debug): + if self._debug: graph.draw('.', 'quant_orig', graph.all_op_nodes()) - if (transform_to_int8): - mkldnn_int8_pass = QuantInt8MkldnnPass(_scope=inference_scope, - _place=place) + if transform_to_int8: + mkldnn_int8_pass = QuantInt8MkldnnPass( + _scope=inference_scope, _place=place + ) graph = mkldnn_int8_pass.apply(graph) else: graph = self._prepare_for_fp32_mkldnn(graph) @@ -205,13 +215,16 @@ class QuantInt8ImageClassificationComparisonTest(unittest.TestCase): labels = np.array([x[1] for x in data]).astype('int64') start = time.time() - out = exe.run(inference_program, - feed={feed_target_names[0]: images}, - fetch_list=fetch_targets) + out = exe.run( + inference_program, + feed={feed_target_names[0]: images}, + fetch_list=fetch_targets, + ) batch_time = (time.time() - start) * 1000 # in miliseconds outputs.append(out[0]) batch_acc1, batch_acc5 = self._get_batch_accuracy( - out[0], labels) + out[0], labels + ) infer_accs1.append(batch_acc1) infer_accs5.append(batch_acc5) samples = len(data) @@ -221,10 +234,17 @@ class QuantInt8ImageClassificationComparisonTest(unittest.TestCase): fpses.append(fps) iters += 1 appx = ' (warm-up)' if iters <= skip_batch_num else '' - _logger.info('batch {0}{5}, acc1: {1:.4f}, acc5: {2:.4f}, ' - 'latency: {3:.4f} ms, fps: {4:.2f}'.format( - iters, batch_acc1, batch_acc5, - batch_time / batch_size, fps, appx)) + _logger.info( + 'batch {0}{5}, acc1: {1:.4f}, acc5: {2:.4f}, ' + 'latency: {3:.4f} ms, fps: {4:.2f}'.format( + iters, + batch_acc1, + batch_acc5, + batch_time / batch_size, + fps, + appx, + ) + ) # Postprocess benchmark data batch_latencies = batch_times[skip_batch_num:] @@ -236,29 +256,43 @@ class QuantInt8ImageClassificationComparisonTest(unittest.TestCase): acc1_avg = np.mean(infer_accs1) acc5_avg = np.mean(infer_accs5) _logger.info( - 'Total inference run time: {:.2f} s'.format(infer_total_time)) + 'Total inference run time: {:.2f} s'.format(infer_total_time) + ) return outputs, acc1_avg, acc5_avg, fps_avg, latency_avg def _summarize_performance(self, fp32_fps, fp32_lat, int8_fps, int8_lat): _logger.info('--- Performance summary ---') - _logger.info('FP32: avg fps: {0:.2f}, avg latency: {1:.4f} ms'.format( - fp32_fps, fp32_lat)) - _logger.info('INT8: avg fps: {0:.2f}, avg latency: {1:.4f} ms'.format( - int8_fps, int8_lat)) - - def _compare_accuracy(self, fp32_acc1, fp32_acc5, int8_acc1, int8_acc5, - threshold): + _logger.info( + 'FP32: avg fps: {0:.2f}, avg latency: {1:.4f} ms'.format( + fp32_fps, fp32_lat + ) + ) + _logger.info( + 'INT8: avg fps: {0:.2f}, avg latency: {1:.4f} ms'.format( + int8_fps, int8_lat + ) + ) + + def _compare_accuracy( + self, fp32_acc1, fp32_acc5, int8_acc1, int8_acc5, threshold + ): _logger.info('--- Accuracy summary ---') _logger.info( - 'Accepted top1 accuracy drop threshold: {0}. (condition: (FP32_top1_acc - IN8_top1_acc) <= threshold)' - .format(threshold)) + 'Accepted top1 accuracy drop threshold: {0}. (condition: (FP32_top1_acc - IN8_top1_acc) <= threshold)'.format( + threshold + ) + ) _logger.info( - 'FP32: avg top1 accuracy: {0:.4f}, avg top5 accuracy: {1:.4f}'. - format(fp32_acc1, fp32_acc5)) + 'FP32: avg top1 accuracy: {0:.4f}, avg top5 accuracy: {1:.4f}'.format( + fp32_acc1, fp32_acc5 + ) + ) _logger.info( - 'INT8: avg top1 accuracy: {0:.4f}, avg top5 accuracy: {1:.4f}'. - format(int8_acc1, int8_acc5)) + 'INT8: avg top1 accuracy: {0:.4f}, avg top5 accuracy: {1:.4f}'.format( + int8_acc1, int8_acc5 + ) + ) assert fp32_acc1 > 0.0 assert int8_acc1 > 0.0 assert fp32_acc1 - int8_acc1 <= threshold @@ -268,9 +302,13 @@ class QuantInt8ImageClassificationComparisonTest(unittest.TestCase): return quant_model_path = test_case_args.quant_model - assert quant_model_path, 'The Quant model path cannot be empty. Please, use the --quant_model option.' + assert ( + quant_model_path + ), 'The Quant model path cannot be empty. Please, use the --quant_model option.' data_path = test_case_args.infer_data - assert data_path, 'The dataset path cannot be empty. Please, use the --infer_data option.' + assert ( + data_path + ), 'The dataset path cannot be empty. Please, use the --infer_data option.' batch_size = test_case_args.batch_size batch_num = test_case_args.batch_num skip_batch_num = test_case_args.skip_batch_num @@ -285,29 +323,34 @@ class QuantInt8ImageClassificationComparisonTest(unittest.TestCase): _logger.info('Accuracy drop threshold: {0}.'.format(acc_diff_threshold)) _logger.info('--- Quant FP32 prediction start ---') - val_reader = paddle.batch(self._reader_creator(data_path), - batch_size=batch_size) + val_reader = paddle.batch( + self._reader_creator(data_path), batch_size=batch_size + ) fp32_output, fp32_acc1, fp32_acc5, fp32_fps, fp32_lat = self._predict( val_reader, quant_model_path, batch_size, batch_num, skip_batch_num, - transform_to_int8=False) + transform_to_int8=False, + ) _logger.info('--- Quant INT8 prediction start ---') - val_reader = paddle.batch(self._reader_creator(data_path), - batch_size=batch_size) + val_reader = paddle.batch( + self._reader_creator(data_path), batch_size=batch_size + ) int8_output, int8_acc1, int8_acc5, int8_fps, int8_lat = self._predict( val_reader, quant_model_path, batch_size, batch_num, skip_batch_num, - transform_to_int8=True) + transform_to_int8=True, + ) self._summarize_performance(fp32_fps, fp32_lat, int8_fps, int8_lat) - self._compare_accuracy(fp32_acc1, fp32_acc5, int8_acc1, int8_acc5, - acc_diff_threshold) + self._compare_accuracy( + fp32_acc1, fp32_acc5, int8_acc1, int8_acc5, acc_diff_threshold + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/contrib/slim/tests/save_quant_model.py b/python/paddle/fluid/contrib/slim/tests/save_quant_model.py index cb15b3da4735cd272145aaed09cad160e213156b..b743615f575aa7b7351e5755794f3eb74c58d376 100644 --- a/python/paddle/fluid/contrib/slim/tests/save_quant_model.py +++ b/python/paddle/fluid/contrib/slim/tests/save_quant_model.py @@ -27,88 +27,102 @@ paddle.enable_static() def parse_args(): parser = argparse.ArgumentParser() - parser.add_argument('--quant_model_path', - type=str, - default='', - help='A path to a Quant model.') - parser.add_argument('--int8_model_save_path', - type=str, - default='', - help='Saved optimized and quantized INT8 model') + parser.add_argument( + '--quant_model_path', + type=str, + default='', + help='A path to a Quant model.', + ) + parser.add_argument( + '--int8_model_save_path', + type=str, + default='', + help='Saved optimized and quantized INT8 model', + ) parser.add_argument( '--ops_to_quantize', type=str, default='', - help= - 'A comma separated list of operators to quantize. Only quantizable operators are taken into account. If the option is not used, an attempt to quantize all quantizable operators will be made.' + help='A comma separated list of operators to quantize. Only quantizable operators are taken into account. If the option is not used, an attempt to quantize all quantizable operators will be made.', ) parser.add_argument( '--op_ids_to_skip', type=str, default='', - help='A comma separated list of operator ids to skip in quantization.') - parser.add_argument('--debug', - action='store_true', - help='If used, the graph of Quant model is drawn.') + help='A comma separated list of operator ids to skip in quantization.', + ) + parser.add_argument( + '--debug', + action='store_true', + help='If used, the graph of Quant model is drawn.', + ) parser.add_argument( '--quant_model_filename', type=str, default="", - help= - 'The input model`s file name. If empty, search default `__model__` and separate parameter files and use them or in case if not found, attempt loading `model` and `params` files.' + help='The input model`s file name. If empty, search default `__model__` and separate parameter files and use them or in case if not found, attempt loading `model` and `params` files.', ) parser.add_argument( '--quant_params_filename', type=str, default="", - help= - 'If quant_model_filename is empty, this field is ignored. The input model`s all parameters file name. If empty load parameters from separate files.' + help='If quant_model_filename is empty, this field is ignored. The input model`s all parameters file name. If empty load parameters from separate files.', ) parser.add_argument( '--save_model_filename', type=str, default="__model__", - help= - 'The name of file to save the inference program itself. If is set None, a default filename __model__ will be used.' + help='The name of file to save the inference program itself. If is set None, a default filename __model__ will be used.', ) parser.add_argument( '--save_params_filename', type=str, default=None, - help= - 'The name of file to save all related parameters. If it is set None, parameters will be saved in separate files' + help='The name of file to save all related parameters. If it is set None, parameters will be saved in separate files', ) test_args, args = parser.parse_known_args(namespace=unittest) return test_args, sys.argv[:1] + args -def transform_and_save_int8_model(original_path, - save_path, - ops_to_quantize='', - op_ids_to_skip='', - debug=False, - quant_model_filename='', - quant_params_filename='', - save_model_filename="__model__", - save_params_filename=None): +def transform_and_save_int8_model( + original_path, + save_path, + ops_to_quantize='', + op_ids_to_skip='', + debug=False, + quant_model_filename='', + quant_params_filename='', + save_model_filename="__model__", + save_params_filename=None, +): place = fluid.CPUPlace() exe = fluid.Executor(place) inference_scope = fluid.executor.global_scope() with fluid.scope_guard(inference_scope): if not quant_model_filename: if os.path.exists(os.path.join(original_path, '__model__')): - [inference_program, feed_target_names, fetch_targets - ] = fluid.io.load_inference_model(original_path, exe) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = fluid.io.load_inference_model(original_path, exe) else: - [inference_program, feed_target_names, fetch_targets - ] = fluid.io.load_inference_model(original_path, exe, 'model', - 'params') + [ + inference_program, + feed_target_names, + fetch_targets, + ] = fluid.io.load_inference_model( + original_path, exe, 'model', 'params' + ) else: - [inference_program, feed_target_names, fetch_targets - ] = fluid.io.load_inference_model(original_path, exe, - quant_model_filename, - quant_params_filename) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = fluid.io.load_inference_model( + original_path, exe, quant_model_filename, quant_params_filename + ) ops_to_quantize_set = set() print(ops_to_quantize) @@ -121,7 +135,7 @@ def transform_and_save_int8_model(original_path, op_ids_to_skip_set = set(map(int, op_ids_to_skip.split(','))) graph = IrGraph(core.Graph(inference_program.desc), for_test=True) - if (debug): + if debug: graph.draw('.', 'quant_orig', graph.all_op_nodes()) transform_to_mkldnn_int8_pass = Quant2Int8MkldnnPass( ops_to_quantize_set, @@ -129,27 +143,38 @@ def transform_and_save_int8_model(original_path, _scope=inference_scope, _place=place, _core=core, - _debug=debug) + _debug=debug, + ) graph = transform_to_mkldnn_int8_pass.apply(graph) inference_program = graph.to_program() with fluid.scope_guard(inference_scope): - fluid.io.save_inference_model(save_path, - feed_target_names, - fetch_targets, - exe, - inference_program, - model_filename=save_model_filename, - params_filename=save_params_filename) + fluid.io.save_inference_model( + save_path, + feed_target_names, + fetch_targets, + exe, + inference_program, + model_filename=save_model_filename, + params_filename=save_params_filename, + ) print( - "Success! INT8 model obtained from the Quant model can be found at {}\n" - .format(save_path)) + "Success! INT8 model obtained from the Quant model can be found at {}\n".format( + save_path + ) + ) if __name__ == '__main__': global test_args test_args, remaining_args = parse_args() transform_and_save_int8_model( - test_args.quant_model_path, test_args.int8_model_save_path, - test_args.ops_to_quantize, test_args.op_ids_to_skip, test_args.debug, - test_args.quant_model_filename, test_args.quant_params_filename, - test_args.save_model_filename, test_args.save_params_filename) + test_args.quant_model_path, + test_args.int8_model_save_path, + test_args.ops_to_quantize, + test_args.op_ids_to_skip, + test_args.debug, + test_args.quant_model_filename, + test_args.quant_params_filename, + test_args.save_model_filename, + test_args.save_params_filename, + ) diff --git a/python/paddle/fluid/contrib/slim/tests/test_graph.py b/python/paddle/fluid/contrib/slim/tests/test_graph.py index dd08eb1cbde63b041d2874dd90b3fab5292a5e98..482c7237bfce8ae4f0a26b52e993dea873f2f2b7 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_graph.py +++ b/python/paddle/fluid/contrib/slim/tests/test_graph.py @@ -29,19 +29,23 @@ os.environ["CPU_NUM"] = "1" def conv_block(): img = fluid.layers.data(name='image', shape=[1, 28, 28], dtype='float32') label = fluid.layers.data(name='label', shape=[1], dtype='int64') - conv_pool_1 = fluid.nets.simple_img_conv_pool(input=img, - filter_size=5, - num_filters=20, - pool_size=2, - pool_stride=2, - act="relu") + conv_pool_1 = fluid.nets.simple_img_conv_pool( + input=img, + filter_size=5, + num_filters=20, + pool_size=2, + pool_stride=2, + act="relu", + ) conv_pool_1 = fluid.layers.batch_norm(conv_pool_1) - conv_pool_2 = fluid.nets.simple_img_conv_pool(input=conv_pool_1, - filter_size=5, - num_filters=50, - pool_size=2, - pool_stride=2, - act="relu") + conv_pool_2 = fluid.nets.simple_img_conv_pool( + input=conv_pool_1, + filter_size=5, + num_filters=50, + pool_size=2, + pool_stride=2, + act="relu", + ) prediction = fluid.layers.fc(input=conv_pool_2, size=10, act='softmax') loss = fluid.layers.cross_entropy(input=prediction, label=label) avg_loss = paddle.mean(loss) @@ -49,7 +53,6 @@ def conv_block(): class TestGraph(unittest.TestCase): - def graph_apis(self, use_cuda=False, for_ci=True): main = fluid.Program() startup = fluid.Program() @@ -65,25 +68,27 @@ class TestGraph(unittest.TestCase): build_strategy.memory_optimize = False build_strategy.enable_inplace = False origin_binary = fluid.CompiledProgram(graph.graph).with_data_parallel( - loss_name=loss.name, build_strategy=build_strategy) + loss_name=loss.name, build_strategy=build_strategy + ) backup_binary = fluid.CompiledProgram( - backup_graph.graph).with_data_parallel( - loss_name=loss.name, build_strategy=build_strategy) + backup_graph.graph + ).with_data_parallel(loss_name=loss.name, build_strategy=build_strategy) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) exe.run(startup) iters = 5 batch_size = 8 - train_reader = paddle.batch(paddle.dataset.mnist.train(), - batch_size=batch_size) + train_reader = paddle.batch( + paddle.dataset.mnist.train(), batch_size=batch_size + ) feeder = fluid.DataFeeder(feed_list=feeds, place=place) def _train(binary): for _ in range(iters): data = next(train_reader()) - loss_v = exe.run(binary, - feed=feeder.feed(data), - fetch_list=[loss.name]) + loss_v = exe.run( + binary, feed=feeder.feed(data), fetch_list=[loss.name] + ) if not for_ci: print('{}: {}'.format('loss', loss_v)) @@ -98,18 +103,18 @@ class TestGraph(unittest.TestCase): var.set(var_array, place) sum_before = np.sum( - np.array( - fluid.global_scope().find_var('conv2d_1.w_0').get_tensor())) + np.array(fluid.global_scope().find_var('conv2d_1.w_0').get_tensor()) + ) fluid.io._save_persistable_nodes(exe, checkponit_dir, graph) _set_zero('conv2d_1.w_0', fluid.global_scope(), place) set_after = np.sum( - np.array( - fluid.global_scope().find_var('conv2d_1.w_0').get_tensor())) + np.array(fluid.global_scope().find_var('conv2d_1.w_0').get_tensor()) + ) self.assertEqual(set_after, 0) fluid.io._load_persistable_nodes(exe, checkponit_dir, graph) sum_after = np.sum( - np.array( - fluid.global_scope().find_var('conv2d_1.w_0').get_tensor())) + np.array(fluid.global_scope().find_var('conv2d_1.w_0').get_tensor()) + ) self.assertEqual(sum_before, sum_after) marked_nodes = set() diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_out_scale.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_out_scale.py index 743e45e927a19a16a27df3d4d448f44a9a2c7370..5d990d4a9860bf5659ffc7e30f5039a702a63f7c 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_out_scale.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_out_scale.py @@ -42,9 +42,9 @@ os.environ["CPU_NUM"] = "1" if core.is_compiled_with_cuda(): fluid.set_flags({"FLAGS_cudnn_deterministic": True}) -_logger = get_logger(__name__, - logging.INFO, - fmt='%(asctime)s-%(levelname)s: %(message)s') +_logger = get_logger( + __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s' +) def get_vaild_warning_num(warning, w): @@ -56,7 +56,6 @@ def get_vaild_warning_num(warning, w): class ImperativeLenet(fluid.dygraph.Layer): - def __init__(self, num_classes=10): super(ImperativeLenet, self).__init__() conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1") @@ -69,36 +68,55 @@ class ImperativeLenet(fluid.dygraph.Layer): fc_b2_attr = fluid.ParamAttr(name="fc_b_2") fc_b3_attr = fluid.ParamAttr(name="fc_b_3") self.features = Sequential( - Conv2D(in_channels=1, - out_channels=6, - kernel_size=3, - stride=1, - padding=1, - weight_attr=conv2d_w1_attr, - bias_attr=False), BatchNorm2D(6), ReLU(), + Conv2D( + in_channels=1, + out_channels=6, + kernel_size=3, + stride=1, + padding=1, + weight_attr=conv2d_w1_attr, + bias_attr=False, + ), + BatchNorm2D(6), + ReLU(), + MaxPool2D(kernel_size=2, stride=2), + Conv2D( + in_channels=6, + out_channels=16, + kernel_size=5, + stride=1, + padding=0, + weight_attr=conv2d_w2_attr, + bias_attr=conv2d_b2_attr, + ), + BatchNorm2D(16), + PReLU(), MaxPool2D(kernel_size=2, stride=2), - Conv2D(in_channels=6, - out_channels=16, - kernel_size=5, - stride=1, - padding=0, - weight_attr=conv2d_w2_attr, - bias_attr=conv2d_b2_attr), BatchNorm2D(16), PReLU(), - MaxPool2D(kernel_size=2, stride=2)) + ) self.fc = Sequential( - Linear(in_features=400, - out_features=120, - weight_attr=fc_w1_attr, - bias_attr=fc_b1_attr), LeakyReLU(), - Linear(in_features=120, - out_features=84, - weight_attr=fc_w2_attr, - bias_attr=fc_b2_attr), Sigmoid(), - Linear(in_features=84, - out_features=num_classes, - weight_attr=fc_w3_attr, - bias_attr=fc_b3_attr), Softmax()) + Linear( + in_features=400, + out_features=120, + weight_attr=fc_w1_attr, + bias_attr=fc_b1_attr, + ), + LeakyReLU(), + Linear( + in_features=120, + out_features=84, + weight_attr=fc_w2_attr, + bias_attr=fc_b2_attr, + ), + Sigmoid(), + Linear( + in_features=84, + out_features=num_classes, + weight_attr=fc_w3_attr, + bias_attr=fc_b3_attr, + ), + Softmax(), + ) def forward(self, inputs): x = self.features(inputs) @@ -109,13 +127,14 @@ class ImperativeLenet(fluid.dygraph.Layer): class TestImperativeOutSclae(unittest.TestCase): - def setUp(self): self.root_path = tempfile.TemporaryDirectory() - self.param_save_path = os.path.join(self.root_path.name, - "lenet.pdparams") - self.save_path = os.path.join(self.root_path.name, - "lenet_dynamic_outscale_infer_model") + self.param_save_path = os.path.join( + self.root_path.name, "lenet.pdparams" + ) + self.save_path = os.path.join( + self.root_path.name, "lenet_dynamic_outscale_infer_model" + ) def tearDown(self): self.root_path.cleanup() @@ -128,7 +147,8 @@ class TestImperativeOutSclae(unittest.TestCase): activation_quantize_type = 'moving_average_abs_max' imperative_out_scale = ImperativeQuantAware( weight_quantize_type=weight_quantize_type, - activation_quantize_type=activation_quantize_type) + activation_quantize_type=activation_quantize_type, + ) with fluid.dygraph.guard(): np.random.seed(seed) @@ -139,11 +159,12 @@ class TestImperativeOutSclae(unittest.TestCase): lenet = fix_model_dict(lenet) imperative_out_scale.quantize(lenet) - reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=32, - drop_last=True) - adam = AdamOptimizer(learning_rate=lr, - parameter_list=lenet.parameters()) + reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=32, drop_last=True + ) + adam = AdamOptimizer( + learning_rate=lr, parameter_list=lenet.parameters() + ) loss_list = train_lenet(lenet, reader, adam) lenet.eval() @@ -151,8 +172,10 @@ class TestImperativeOutSclae(unittest.TestCase): paddle.save(save_dict, self.param_save_path) for i in range(len(loss_list) - 1): - self.assertTrue(loss_list[i] > loss_list[i + 1], - msg='Failed to do the imperative qat.') + self.assertTrue( + loss_list[i] > loss_list[i + 1], + msg='Failed to do the imperative qat.', + ) with fluid.dygraph.guard(): lenet = ImperativeLenet() @@ -160,11 +183,12 @@ class TestImperativeOutSclae(unittest.TestCase): imperative_out_scale.quantize(lenet) lenet.set_dict(load_dict) - reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=32, - drop_last=True) - adam = AdamOptimizer(learning_rate=lr, - parameter_list=lenet.parameters()) + reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=32, drop_last=True + ) + adam = AdamOptimizer( + learning_rate=lr, parameter_list=lenet.parameters() + ) loss_list = train_lenet(lenet, reader, adam) lenet.eval() @@ -172,13 +196,17 @@ class TestImperativeOutSclae(unittest.TestCase): layer=lenet, path=self.save_path, input_spec=[ - paddle.static.InputSpec(shape=[None, 1, 28, 28], - dtype='float32') - ]) + paddle.static.InputSpec( + shape=[None, 1, 28, 28], dtype='float32' + ) + ], + ) for i in range(len(loss_list) - 1): - self.assertTrue(loss_list[i] > loss_list[i + 1], - msg='Failed to do the imperative qat.') + self.assertTrue( + loss_list[i] > loss_list[i + 1], + msg='Failed to do the imperative qat.', + ) def test_out_scale_acc(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_ptq.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_ptq.py index 6727356b327acfa9e27011eee2cdec8ef4a29bee..759e74907e1bcae2dbfac190e5321b08edecec73 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_ptq.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_ptq.py @@ -30,12 +30,16 @@ from paddle.fluid.log_helper import get_logger from paddle.dataset.common import download from paddle.fluid.framework import _test_eager_guard -from imperative_test_utils import fix_model_dict, ImperativeLenet, ImperativeLinearBn +from imperative_test_utils import ( + fix_model_dict, + ImperativeLenet, + ImperativeLinearBn, +) from imperative_test_utils import ImperativeLinearBn_hook -_logger = get_logger(__name__, - logging.INFO, - fmt='%(asctime)s-%(levelname)s: %(message)s') +_logger = get_logger( + __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s' +) class TestFuseLinearBn(unittest.TestCase): @@ -54,27 +58,30 @@ class TestFuseLinearBn(unittest.TestCase): quant_h = ptq.quantize(model_h, fuse=True, fuse_list=f_l) for name, layer in quant_model.named_sublayers(): if name in f_l: - assert not (isinstance(layer, nn.BatchNorm1D) - or isinstance(layer, nn.BatchNorm2D)) + assert not ( + isinstance(layer, nn.BatchNorm1D) + or isinstance(layer, nn.BatchNorm2D) + ) out = model(inputs) out_h = model_h(inputs) out_quant = quant_model(inputs) out_quant_h = quant_h(inputs) cos_sim_func = nn.CosineSimilarity(axis=0) - print('fuse linear+bn', cos_sim_func(out.flatten(), - out_quant.flatten())) + print( + 'fuse linear+bn', cos_sim_func(out.flatten(), out_quant.flatten()) + ) print(cos_sim_func(out_h.flatten(), out_quant_h.flatten())) class TestImperativePTQ(unittest.TestCase): - """ - """ + """ """ @classmethod def setUpClass(cls): cls.download_path = 'dygraph_int8/download' - cls.cache_folder = os.path.expanduser('~/.cache/paddle/dataset/' + - cls.download_path) + cls.cache_folder = os.path.expanduser( + '~/.cache/paddle/dataset/' + cls.download_path + ) cls.lenet_url = "https://paddle-inference-dist.cdn.bcebos.com/int8/unittest_model_data/lenet_pretrained.tar.gz" cls.lenet_md5 = "953b802fb73b52fae42896e3c24f0afb" @@ -87,7 +94,8 @@ class TestImperativePTQ(unittest.TestCase): def cache_unzipping(self, target_folder, zip_path): if not os.path.exists(target_folder): cmd = 'mkdir {0} && tar xf {1} -C {0}'.format( - target_folder, zip_path) + target_folder, zip_path + ) os.system(cmd) def download_model(self, data_url, data_md5, folder_name): @@ -114,23 +122,29 @@ class TestImperativePTQ(unittest.TestCase): 'batch_norm2d_0': [[0.37673383951187134], [0.44249194860458374]], 're_lu_0': [[0.44249194860458374], [0.25804123282432556]], 'max_pool2d_0': [[0.25804123282432556], [0.25804123282432556]], - 'linear_0': [[1.7058950662612915], [14.405526161193848], - [0.4373355209827423]], + 'linear_0': [ + [1.7058950662612915], + [14.405526161193848], + [0.4373355209827423], + ], 'add_0': [[1.7058950662612915, 0.0], [1.7058950662612915]], } def model_test(self, model, batch_num=-1, batch_size=8): model.eval() - test_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=batch_size) + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size + ) eval_acc_top1_list = [] for batch_id, data in enumerate(test_reader()): - x_data = np.array([x[0].reshape(1, 28, 28) - for x in data]).astype('float32') - y_data = np.array([x[1] - for x in data]).astype('int64').reshape(-1, 1) + x_data = np.array([x[0].reshape(1, 28, 28) for x in data]).astype( + 'float32' + ) + y_data = ( + np.array([x[1] for x in data]).astype('int64').reshape(-1, 1) + ) img = paddle.to_tensor(x_data) label = paddle.to_tensor(y_data) @@ -141,8 +155,11 @@ class TestImperativePTQ(unittest.TestCase): eval_acc_top1_list.append(float(acc_top1.numpy())) if batch_id % 50 == 0: - _logger.info("Test | At step {}: acc1 = {:}, acc5 = {:}".format( - batch_id, acc_top1.numpy(), acc_top5.numpy())) + _logger.info( + "Test | At step {}: acc1 = {:}, acc5 = {:}".format( + batch_id, acc_top1.numpy(), acc_top5.numpy() + ) + ) if batch_num > 0 and batch_id + 1 >= batch_num: break @@ -153,31 +170,39 @@ class TestImperativePTQ(unittest.TestCase): def program_test(self, program_path, batch_num=-1, batch_size=8): exe = paddle.static.Executor(paddle.CPUPlace()) - [inference_program, feed_target_names, fetch_targets - ] = (paddle.static.load_inference_model(program_path, exe)) - - test_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=batch_size) - - top1_correct_num = 0. - total_num = 0. + [ + inference_program, + feed_target_names, + fetch_targets, + ] = paddle.static.load_inference_model(program_path, exe) + + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size + ) + + top1_correct_num = 0.0 + total_num = 0.0 for batch_id, data in enumerate(test_reader()): - img = np.array([x[0].reshape(1, 28, 28) - for x in data]).astype('float32') + img = np.array([x[0].reshape(1, 28, 28) for x in data]).astype( + 'float32' + ) label = np.array([x[1] for x in data]).astype('int64') feed = {feed_target_names[0]: img} - results = exe.run(inference_program, - feed=feed, - fetch_list=fetch_targets) + results = exe.run( + inference_program, feed=feed, fetch_list=fetch_targets + ) pred = np.argmax(results[0], axis=1) top1_correct_num += np.sum(np.equal(pred, label)) total_num += len(img) if total_num % 50 == 49: - _logger.info("Test | Test num {}: acc1 = {:}".format( - total_num, top1_correct_num / total_num)) + _logger.info( + "Test | Test num {}: acc1 = {:}".format( + total_num, top1_correct_num / total_num + ) + ) if batch_num > 0 and batch_id + 1 >= batch_num: break @@ -189,8 +214,9 @@ class TestImperativePTQ(unittest.TestCase): self.set_vars() # Load model - params_path = self.download_model(self.lenet_url, self.lenet_md5, - "lenet") + params_path = self.download_model( + self.lenet_url, self.lenet_md5, "lenet" + ) params_path += "/lenet_pretrained/lenet.pdparams" model = ImperativeLenet() @@ -198,25 +224,28 @@ class TestImperativePTQ(unittest.TestCase): model.set_state_dict(model_state_dict) # Quantize, calibrate and save quant_model = self.ptq.quantize(model) - before_acc_top1 = self.model_test(quant_model, self.batch_num, - self.batch_size) + before_acc_top1 = self.model_test( + quant_model, self.batch_num, self.batch_size + ) input_spec = [ paddle.static.InputSpec(shape=[None, 1, 28, 28], dtype='float32') ] with tempfile.TemporaryDirectory(prefix="imperative_ptq_") as tmpdir: save_path = os.path.join(tmpdir, "model") - self.ptq.save_quantized_model(model=quant_model, - path=save_path, - input_spec=input_spec) + self.ptq.save_quantized_model( + model=quant_model, path=save_path, input_spec=input_spec + ) print('Quantized model saved in {%s}' % save_path) - after_acc_top1 = self.model_test(quant_model, self.batch_num, - self.batch_size) + after_acc_top1 = self.model_test( + quant_model, self.batch_num, self.batch_size + ) paddle.enable_static() - infer_acc_top1 = self.program_test(save_path, self.batch_num, - self.batch_size) + infer_acc_top1 = self.program_test( + save_path, self.batch_num, self.batch_size + ) paddle.disable_static() # Check @@ -224,11 +253,15 @@ class TestImperativePTQ(unittest.TestCase): print('After converted acc_top1: %s' % after_acc_top1) print('Infer acc_top1: %s' % infer_acc_top1) - self.assertTrue(after_acc_top1 >= self.eval_acc_top1, - msg="The test acc {%f} is less than {%f}." % - (after_acc_top1, self.eval_acc_top1)) - self.assertTrue(infer_acc_top1 >= after_acc_top1, - msg='The acc is lower after converting model.') + self.assertTrue( + after_acc_top1 >= self.eval_acc_top1, + msg="The test acc {%f} is less than {%f}." + % (after_acc_top1, self.eval_acc_top1), + ) + self.assertTrue( + infer_acc_top1 >= after_acc_top1, + msg='The acc is lower after converting model.', + ) end_time = time.time() print("total time: %ss \n" % (end_time - start_time)) @@ -240,15 +273,15 @@ class TestImperativePTQ(unittest.TestCase): class TestImperativePTQfuse(TestImperativePTQ): - def func_ptq(self): start_time = time.time() self.set_vars() # Load model - params_path = self.download_model(self.lenet_url, self.lenet_md5, - "lenet") + params_path = self.download_model( + self.lenet_url, self.lenet_md5, "lenet" + ) params_path += "/lenet_pretrained/lenet.pdparams" model = ImperativeLenet() @@ -259,27 +292,32 @@ class TestImperativePTQfuse(TestImperativePTQ): quant_model = self.ptq.quantize(model, fuse=True, fuse_list=f_l) for name, layer in quant_model.named_sublayers(): if name in f_l: - assert not (isinstance(layer, nn.BatchNorm1D) - or isinstance(layer, nn.BatchNorm2D)) - before_acc_top1 = self.model_test(quant_model, self.batch_num, - self.batch_size) + assert not ( + isinstance(layer, nn.BatchNorm1D) + or isinstance(layer, nn.BatchNorm2D) + ) + before_acc_top1 = self.model_test( + quant_model, self.batch_num, self.batch_size + ) input_spec = [ paddle.static.InputSpec(shape=[None, 1, 28, 28], dtype='float32') ] with tempfile.TemporaryDirectory(prefix="imperative_ptq_") as tmpdir: save_path = os.path.join(tmpdir, "model") - self.ptq.save_quantized_model(model=quant_model, - path=save_path, - input_spec=input_spec) + self.ptq.save_quantized_model( + model=quant_model, path=save_path, input_spec=input_spec + ) print('Quantized model saved in {%s}' % save_path) - after_acc_top1 = self.model_test(quant_model, self.batch_num, - self.batch_size) + after_acc_top1 = self.model_test( + quant_model, self.batch_num, self.batch_size + ) paddle.enable_static() - infer_acc_top1 = self.program_test(save_path, self.batch_num, - self.batch_size) + infer_acc_top1 = self.program_test( + save_path, self.batch_num, self.batch_size + ) paddle.disable_static() # Check @@ -287,15 +325,19 @@ class TestImperativePTQfuse(TestImperativePTQ): print('After converted acc_top1: %s' % after_acc_top1) print('Infer acc_top1: %s' % infer_acc_top1) - #Check whether the quant_model is correct after converting. - #The acc of quantized model should be higher than 0.95. - self.assertTrue(after_acc_top1 >= self.eval_acc_top1, - msg="The test acc {%f} is less than {%f}." % - (after_acc_top1, self.eval_acc_top1)) - #Check the saved infer_model.The acc of infer model - #should not be lower than the one of dygraph model. - self.assertTrue(infer_acc_top1 >= after_acc_top1, - msg='The acc is lower after converting model.') + # Check whether the quant_model is correct after converting. + # The acc of quantized model should be higher than 0.95. + self.assertTrue( + after_acc_top1 >= self.eval_acc_top1, + msg="The test acc {%f} is less than {%f}." + % (after_acc_top1, self.eval_acc_top1), + ) + # Check the saved infer_model.The acc of infer model + # should not be lower than the one of dygraph model. + self.assertTrue( + infer_acc_top1 >= after_acc_top1, + msg='The acc is lower after converting model.', + ) end_time = time.time() print("total time: %ss \n" % (end_time - start_time)) @@ -307,7 +349,6 @@ class TestImperativePTQfuse(TestImperativePTQ): class TestImperativePTQHist(TestImperativePTQ): - def set_vars(self): config = PTQConfig(HistQuantizer(), AbsmaxQuantizer()) self.ptq = ImperativePTQ(config) @@ -317,19 +358,24 @@ class TestImperativePTQHist(TestImperativePTQ): self.eval_acc_top1 = 0.98 self.gt_thresholds = { - 'conv2d_0': [[0.99853515625], [0.35732391771364225], - [0.10933732241392136]], + 'conv2d_0': [ + [0.99853515625], + [0.35732391771364225], + [0.10933732241392136], + ], 'batch_norm2d_0': [[0.35732391771364225], [0.4291427868761275]], 're_lu_0': [[0.4291427868761275], [0.2359918110742001]], 'max_pool2d_0': [[0.2359918110742001], [0.25665526917146053]], - 'linear_0': [[1.7037603475152991], [14.395224522473026], - [0.4373355209827423]], + 'linear_0': [ + [1.7037603475152991], + [14.395224522473026], + [0.4373355209827423], + ], 'add_0': [[1.7037603475152991, 0.0], [1.7037603475152991]], } class TestImperativePTQKL(TestImperativePTQ): - def set_vars(self): config = PTQConfig(KLQuantizer(), PerChannelAbsmaxQuantizer()) self.ptq = ImperativePTQ(config) @@ -339,13 +385,20 @@ class TestImperativePTQKL(TestImperativePTQ): self.eval_acc_top1 = 0.98 conv2d_1_wt_thresholds = [ - 0.18116560578346252, 0.17079241573810577, 0.1702047884464264, - 0.179476797580719, 0.1454375684261322, 0.22981858253479004 + 0.18116560578346252, + 0.17079241573810577, + 0.1702047884464264, + 0.179476797580719, + 0.1454375684261322, + 0.22981858253479004, ] self.gt_thresholds = { 'conv2d_0': [[0.99267578125], [0.37695913558696836]], - 'conv2d_1': [[0.19189296757394914], [0.24514256547263358], - [conv2d_1_wt_thresholds]], + 'conv2d_1': [ + [0.19189296757394914], + [0.24514256547263358], + [conv2d_1_wt_thresholds], + ], 'batch_norm2d_0': [[0.37695913558696836], [0.27462541429440535]], 're_lu_0': [[0.27462541429440535], [0.19189296757394914]], 'max_pool2d_0': [[0.19189296757394914], [0.19189296757394914]], diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat.py index a507fbe416177b3058ac622a4da7dc92c07e8295..e037cea8f558cbda7667070d90afd070ac8b8c7b 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat.py @@ -29,7 +29,10 @@ from paddle.fluid.dygraph.container import Sequential from paddle.nn import Linear, Conv2D, Softmax, Conv2DTranspose from paddle.fluid.log_helper import get_logger from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX -from paddle.nn.quant.quant_layers import QuantizedConv2D, QuantizedConv2DTranspose +from paddle.nn.quant.quant_layers import ( + QuantizedConv2D, + QuantizedConv2DTranspose, +) from paddle.fluid.framework import _test_eager_guard from imperative_test_utils import fix_model_dict, ImperativeLenet @@ -39,9 +42,9 @@ os.environ["CPU_NUM"] = "1" if core.is_compiled_with_cuda(): fluid.set_flags({"FLAGS_cudnn_deterministic": True}) -_logger = get_logger(__name__, - logging.INFO, - fmt='%(asctime)s-%(levelname)s: %(message)s') +_logger = get_logger( + __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s' +) class TestImperativeQat(unittest.TestCase): @@ -67,26 +70,28 @@ class TestImperativeQat(unittest.TestCase): weight_quantize_type=self.weight_quantize_type, activation_quantize_type=self.activation_quantize_type, fuse_conv_bn=self.fuse_conv_bn, - onnx_format=self.onnx_format) + onnx_format=self.onnx_format, + ) with fluid.dygraph.guard(): # For CI coverage - conv1 = Conv2D(in_channels=3, - out_channels=2, - kernel_size=3, - stride=1, - padding=1, - padding_mode='replicate') + conv1 = Conv2D( + in_channels=3, + out_channels=2, + kernel_size=3, + stride=1, + padding=1, + padding_mode='replicate', + ) quant_conv1 = QuantizedConv2D(conv1) data = np.random.uniform(-1, 1, [10, 3, 32, 32]).astype('float32') quant_conv1(fluid.dygraph.to_variable(data)) conv_transpose = Conv2DTranspose(4, 6, (3, 3)) quant_conv_transpose = QuantizedConv2DTranspose(conv_transpose) - x_var = paddle.uniform((2, 4, 8, 8), - dtype='float32', - min=-1.0, - max=1.0) + x_var = paddle.uniform( + (2, 4, 8, 8), dtype='float32', min=-1.0, max=1.0 + ) quant_conv_transpose(x_var) seed = 1 @@ -97,23 +102,29 @@ class TestImperativeQat(unittest.TestCase): lenet = ImperativeLenet() lenet = fix_model_dict(lenet) imperative_qat.quantize(lenet) - adam = AdamOptimizer(learning_rate=0.001, - parameter_list=lenet.parameters()) + adam = AdamOptimizer( + learning_rate=0.001, parameter_list=lenet.parameters() + ) - train_reader = paddle.batch(paddle.dataset.mnist.train(), - batch_size=32, - drop_last=True) - test_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=32) + train_reader = paddle.batch( + paddle.dataset.mnist.train(), batch_size=32, drop_last=True + ) + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=32 + ) epoch_num = 1 for epoch in range(epoch_num): lenet.train() for batch_id, data in enumerate(train_reader()): - x_data = np.array([x[0].reshape(1, 28, 28) - for x in data]).astype('float32') - y_data = np.array([x[1] for x in data - ]).astype('int64').reshape(-1, 1) + x_data = np.array( + [x[0].reshape(1, 28, 28) for x in data] + ).astype('float32') + y_data = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape(-1, 1) + ) img = fluid.dygraph.to_variable(x_data) label = fluid.dygraph.to_variable(y_data) @@ -126,52 +137,65 @@ class TestImperativeQat(unittest.TestCase): lenet.clear_gradients() if batch_id % 100 == 0: _logger.info( - "Train | At epoch {} step {}: loss = {:}, acc= {:}". - format(epoch, batch_id, avg_loss.numpy(), - acc.numpy())) + "Train | At epoch {} step {}: loss = {:}, acc= {:}".format( + epoch, batch_id, avg_loss.numpy(), acc.numpy() + ) + ) if batch_id == 500: # For shortening CI time break lenet.eval() eval_acc_top1_list = [] for batch_id, data in enumerate(test_reader()): - x_data = np.array([x[0].reshape(1, 28, 28) - for x in data]).astype('float32') - y_data = np.array([x[1] for x in data - ]).astype('int64').reshape(-1, 1) + x_data = np.array( + [x[0].reshape(1, 28, 28) for x in data] + ).astype('float32') + y_data = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape(-1, 1) + ) img = fluid.dygraph.to_variable(x_data) label = fluid.dygraph.to_variable(y_data) out = lenet(img) - acc_top1 = fluid.layers.accuracy(input=out, - label=label, - k=1) - acc_top5 = fluid.layers.accuracy(input=out, - label=label, - k=5) + acc_top1 = fluid.layers.accuracy( + input=out, label=label, k=1 + ) + acc_top5 = fluid.layers.accuracy( + input=out, label=label, k=5 + ) if batch_id % 100 == 0: eval_acc_top1_list.append(float(acc_top1.numpy())) _logger.info( - "Test | At epoch {} step {}: acc1 = {:}, acc5 = {:}" - .format(epoch, batch_id, acc_top1.numpy(), - acc_top5.numpy())) + "Test | At epoch {} step {}: acc1 = {:}, acc5 = {:}".format( + epoch, + batch_id, + acc_top1.numpy(), + acc_top5.numpy(), + ) + ) # check eval acc eval_acc_top1 = sum(eval_acc_top1_list) / len( - eval_acc_top1_list) + eval_acc_top1_list + ) print('eval_acc_top1', eval_acc_top1) - self.assertTrue(eval_acc_top1 > 0.9, - msg="The test acc {%f} is less than 0.9." % - eval_acc_top1) + self.assertTrue( + eval_acc_top1 > 0.9, + msg="The test acc {%f} is less than 0.9." % eval_acc_top1, + ) # test the correctness of `paddle.jit.save` data = next(test_reader()) - test_data = np.array([x[0].reshape(1, 28, 28) - for x in data]).astype('float32') - y_data = np.array([x[1] - for x in data]).astype('int64').reshape(-1, 1) + test_data = np.array( + [x[0].reshape(1, 28, 28) for x in data] + ).astype('float32') + y_data = ( + np.array([x[1] for x in data]).astype('int64').reshape(-1, 1) + ) test_img = fluid.dygraph.to_variable(test_data) label = fluid.dygraph.to_variable(y_data) lenet.eval() @@ -184,9 +208,11 @@ class TestImperativeQat(unittest.TestCase): layer=lenet, path=os.path.join(tmpdir, "lenet"), input_spec=[ - paddle.static.InputSpec(shape=[None, 1, 28, 28], - dtype='float32') - ]) + paddle.static.InputSpec( + shape=[None, 1, 28, 28], dtype='float32' + ) + ], + ) print('Quantized model saved in %s' % tmpdir) if core.is_compiled_with_cuda(): @@ -194,15 +220,21 @@ class TestImperativeQat(unittest.TestCase): else: place = core.CPUPlace() exe = fluid.Executor(place) - [inference_program, feed_target_names, - fetch_targets] = fluid.io.load_inference_model( - dirname=tmpdir, - executor=exe, - model_filename="lenet" + INFER_MODEL_SUFFIX, - params_filename="lenet" + INFER_PARAMS_SUFFIX) - quant_out, = exe.run(inference_program, - feed={feed_target_names[0]: test_data}, - fetch_list=fetch_targets) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = fluid.io.load_inference_model( + dirname=tmpdir, + executor=exe, + model_filename="lenet" + INFER_MODEL_SUFFIX, + params_filename="lenet" + INFER_PARAMS_SUFFIX, + ) + (quant_out,) = exe.run( + inference_program, + feed={feed_target_names[0]: test_data}, + fetch_list=fetch_targets, + ) paddle.disable_static() quant_out = fluid.dygraph.to_variable(quant_out) quant_acc = fluid.layers.accuracy(quant_out, label).numpy() @@ -217,7 +249,6 @@ class TestImperativeQat(unittest.TestCase): class TestImperativeQatONNXFormat(unittest.TestCase): - def set_vars(self): self.weight_quantize_type = 'abs_max' self.activation_quantize_type = 'moving_average_abs_max' diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_amp.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_amp.py index b92d19cab25140fcccf19b6bf8a0daf50224b2e9..ee0edb445e708822c9a269466f248dd3f54b102b 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_amp.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_amp.py @@ -33,9 +33,9 @@ os.environ["CPU_NUM"] = "1" if paddle.is_compiled_with_cuda(): fluid.set_flags({"FLAGS_cudnn_deterministic": True}) -_logger = get_logger(__name__, - logging.INFO, - fmt='%(asctime)s-%(levelname)s: %(message)s') +_logger = get_logger( + __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s' +) class TestImperativeQatAmp(unittest.TestCase): @@ -46,12 +46,14 @@ class TestImperativeQatAmp(unittest.TestCase): @classmethod def setUpClass(cls): cls.root_path = tempfile.TemporaryDirectory( - prefix="imperative_qat_amp_") + prefix="imperative_qat_amp_" + ) cls.save_path = os.path.join(cls.root_path.name, "model") cls.download_path = 'dygraph_int8/download' - cls.cache_folder = os.path.expanduser('~/.cache/paddle/dataset/' + - cls.download_path) + cls.cache_folder = os.path.expanduser( + '~/.cache/paddle/dataset/' + cls.download_path + ) cls.lenet_url = "https://paddle-inference-dist.cdn.bcebos.com/int8/unittest_model_data/lenet_pretrained.tar.gz" cls.lenet_md5 = "953b802fb73b52fae42896e3c24f0afb" @@ -68,7 +70,8 @@ class TestImperativeQatAmp(unittest.TestCase): def cache_unzipping(self, target_folder, zip_path): if not os.path.exists(target_folder): cmd = 'mkdir {0} && tar xf {1} -C {0}'.format( - target_folder, zip_path) + target_folder, zip_path + ) os.system(cmd) def download_model(self, data_url, data_md5, folder_name): @@ -93,17 +96,21 @@ class TestImperativeQatAmp(unittest.TestCase): def model_train(self, model, batch_num=-1, batch_size=32, use_amp=False): model.train() - train_reader = paddle.batch(paddle.dataset.mnist.train(), - batch_size=batch_size) - adam = paddle.optimizer.Adam(learning_rate=0.001, - parameters=model.parameters()) + train_reader = paddle.batch( + paddle.dataset.mnist.train(), batch_size=batch_size + ) + adam = paddle.optimizer.Adam( + learning_rate=0.001, parameters=model.parameters() + ) scaler = paddle.amp.GradScaler(init_loss_scaling=500) for batch_id, data in enumerate(train_reader()): - x_data = np.array([x[0].reshape(1, 28, 28) - for x in data]).astype('float32') - y_data = np.array([x[1] - for x in data]).astype('int64').reshape(-1, 1) + x_data = np.array([x[0].reshape(1, 28, 28) for x in data]).astype( + 'float32' + ) + y_data = ( + np.array([x[1] for x in data]).astype('int64').reshape(-1, 1) + ) img = paddle.to_tensor(x_data) label = paddle.to_tensor(y_data) @@ -130,8 +137,11 @@ class TestImperativeQatAmp(unittest.TestCase): model.clear_gradients() if batch_id % 100 == 0: - _logger.info("Train | step {}: loss = {:}, acc= {:}".format( - batch_id, avg_loss.numpy(), acc.numpy())) + _logger.info( + "Train | step {}: loss = {:}, acc= {:}".format( + batch_id, avg_loss.numpy(), acc.numpy() + ) + ) if batch_num > 0 and batch_id + 1 >= batch_num: break @@ -139,15 +149,18 @@ class TestImperativeQatAmp(unittest.TestCase): def model_test(self, model, batch_num=-1, batch_size=32, use_amp=False): model.eval() - test_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=batch_size) + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size + ) acc_top1_list = [] for batch_id, data in enumerate(test_reader()): - x_data = np.array([x[0].reshape(1, 28, 28) - for x in data]).astype('float32') - y_data = np.array([x[1] - for x in data]).astype('int64').reshape(-1, 1) + x_data = np.array([x[0].reshape(1, 28, 28) for x in data]).astype( + 'float32' + ) + y_data = ( + np.array([x[1] for x in data]).astype('int64').reshape(-1, 1) + ) img = paddle.to_tensor(x_data) label = paddle.to_tensor(y_data) @@ -159,8 +172,11 @@ class TestImperativeQatAmp(unittest.TestCase): acc_top1_list.append(float(acc_top1.numpy())) if batch_id % 100 == 0: - _logger.info("Test | At step {}: acc1 = {:}, acc5 = {:}".format( - batch_id, acc_top1.numpy(), acc_top5.numpy())) + _logger.info( + "Test | At step {}: acc1 = {:}, acc5 = {:}".format( + batch_id, acc_top1.numpy(), acc_top5.numpy() + ) + ) if batch_num > 0 and batch_id + 1 >= batch_num: break @@ -173,8 +189,9 @@ class TestImperativeQatAmp(unittest.TestCase): self.set_vars() - params_path = self.download_model(self.lenet_url, self.lenet_md5, - "lenet") + params_path = self.download_model( + self.lenet_url, self.lenet_md5, "lenet" + ) params_path += "/lenet_pretrained/lenet.pdparams" with fluid.dygraph.guard(): @@ -183,24 +200,31 @@ class TestImperativeQatAmp(unittest.TestCase): model.set_state_dict(model_state_dict) _logger.info("Test fp32 model") - fp32_acc_top1 = self.model_test(model, self.test_batch_num, - self.test_batch_size) + fp32_acc_top1 = self.model_test( + model, self.test_batch_num, self.test_batch_size + ) self.qat.quantize(model) use_amp = True - self.model_train(model, self.train_batch_num, self.train_batch_size, - use_amp) + self.model_train( + model, self.train_batch_num, self.train_batch_size, use_amp + ) _logger.info("Test int8 model") - int8_acc_top1 = self.model_test(model, self.test_batch_num, - self.test_batch_size, use_amp) - - _logger.info('fp32_acc_top1: %f, int8_acc_top1: %f' % - (fp32_acc_top1, int8_acc_top1)) - self.assertTrue(int8_acc_top1 > fp32_acc_top1 - 0.01, - msg='fp32_acc_top1: %f, int8_acc_top1: %f' % - (fp32_acc_top1, int8_acc_top1)) + int8_acc_top1 = self.model_test( + model, self.test_batch_num, self.test_batch_size, use_amp + ) + + _logger.info( + 'fp32_acc_top1: %f, int8_acc_top1: %f' + % (fp32_acc_top1, int8_acc_top1) + ) + self.assertTrue( + int8_acc_top1 > fp32_acc_top1 - 0.01, + msg='fp32_acc_top1: %f, int8_acc_top1: %f' + % (fp32_acc_top1, int8_acc_top1), + ) input_spec = [ paddle.static.InputSpec(shape=[None, 1, 28, 28], dtype='float32') diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_channelwise.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_channelwise.py index 30ea0346d8bbb7142c04ebea5e307d67130ebf43..45ea756ce27abf59371528c1649476f80ecf042d 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_channelwise.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_channelwise.py @@ -31,13 +31,12 @@ os.environ["CPU_NUM"] = "1" if core.is_compiled_with_cuda(): fluid.set_flags({"FLAGS_cudnn_deterministic": True}) -_logger = get_logger(__name__, - logging.INFO, - fmt='%(asctime)s-%(levelname)s: %(message)s') +_logger = get_logger( + __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s' +) class TestImperativeQatChannelWise(TestImperativeQat): - def set_vars(self): self.weight_quantize_type = 'channel_wise_abs_max' self.activation_quantize_type = 'moving_average_abs_max' @@ -48,7 +47,6 @@ class TestImperativeQatChannelWise(TestImperativeQat): class TestImperativeQatChannelWiseONNXFormat(TestImperativeQat): - def set_vars(self): self.weight_quantize_type = 'channel_wise_abs_max' self.activation_quantize_type = 'moving_average_abs_max' diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_fuse.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_fuse.py index 898d89f0142b4849be3d608dc11b1f3b3d20d577..dc1dee13cf81f171a9de6c371ef0800545b4db2c 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_fuse.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_fuse.py @@ -31,13 +31,12 @@ os.environ["CPU_NUM"] = "1" if core.is_compiled_with_cuda(): fluid.set_flags({"FLAGS_cudnn_deterministic": True}) -_logger = get_logger(__name__, - logging.INFO, - fmt='%(asctime)s-%(levelname)s: %(message)s') +_logger = get_logger( + __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s' +) class TestImperativeQatfuseBN(TestImperativeQat): - def set_vars(self): self.weight_quantize_type = 'abs_max' self.activation_quantize_type = 'moving_average_abs_max' diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_lsq.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_lsq.py index 8269abbab4deeb9ab78874d22fd0f4a45f77c7ad..7e94c8dedd55a7e21dad024dad1037331ec24f81 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_lsq.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_lsq.py @@ -23,14 +23,21 @@ import logging import paddle import paddle.fluid as fluid from paddle.fluid import core -from paddle.fluid.optimizer import SGDOptimizer, AdamOptimizer, MomentumOptimizer +from paddle.fluid.optimizer import ( + SGDOptimizer, + AdamOptimizer, + MomentumOptimizer, +) from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware from paddle.fluid.dygraph.container import Sequential from paddle.nn import ReLU, ReLU6, LeakyReLU, Sigmoid, Softmax, PReLU from paddle.nn import Linear, Conv2D, Softmax, BatchNorm2D, MaxPool2D from paddle.fluid.log_helper import get_logger from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX -from paddle.nn.quant.quant_layers import QuantizedConv2D, QuantizedConv2DTranspose +from paddle.nn.quant.quant_layers import ( + QuantizedConv2D, + QuantizedConv2DTranspose, +) from paddle.fluid.framework import _test_eager_guard from imperative_test_utils import fix_model_dict @@ -40,13 +47,12 @@ os.environ["CPU_NUM"] = "1" if core.is_compiled_with_cuda(): fluid.set_flags({"FLAGS_cudnn_deterministic": True}) -_logger = get_logger(__name__, - logging.INFO, - fmt='%(asctime)s-%(levelname)s: %(message)s') +_logger = get_logger( + __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s' +) class ImperativeLenet(fluid.dygraph.Layer): - def __init__(self, num_classes=10): super(ImperativeLenet, self).__init__() conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1") @@ -59,36 +65,55 @@ class ImperativeLenet(fluid.dygraph.Layer): fc_b2_attr = fluid.ParamAttr(name="fc_b_2") fc_b3_attr = fluid.ParamAttr(name="fc_b_3") self.features = Sequential( - Conv2D(in_channels=1, - out_channels=6, - kernel_size=3, - stride=1, - padding=1, - weight_attr=conv2d_w1_attr, - bias_attr=False), BatchNorm2D(6), ReLU(), + Conv2D( + in_channels=1, + out_channels=6, + kernel_size=3, + stride=1, + padding=1, + weight_attr=conv2d_w1_attr, + bias_attr=False, + ), + BatchNorm2D(6), + ReLU(), + MaxPool2D(kernel_size=2, stride=2), + Conv2D( + in_channels=6, + out_channels=16, + kernel_size=5, + stride=1, + padding=0, + weight_attr=conv2d_w2_attr, + bias_attr=conv2d_b2_attr, + ), + BatchNorm2D(16), + PReLU(), MaxPool2D(kernel_size=2, stride=2), - Conv2D(in_channels=6, - out_channels=16, - kernel_size=5, - stride=1, - padding=0, - weight_attr=conv2d_w2_attr, - bias_attr=conv2d_b2_attr), BatchNorm2D(16), PReLU(), - MaxPool2D(kernel_size=2, stride=2)) + ) self.fc = Sequential( - Linear(in_features=400, - out_features=120, - weight_attr=fc_w1_attr, - bias_attr=fc_b1_attr), LeakyReLU(), - Linear(in_features=120, - out_features=84, - weight_attr=fc_w2_attr, - bias_attr=fc_b2_attr), Sigmoid(), - Linear(in_features=84, - out_features=num_classes, - weight_attr=fc_w3_attr, - bias_attr=fc_b3_attr), Softmax()) + Linear( + in_features=400, + out_features=120, + weight_attr=fc_w1_attr, + bias_attr=fc_b1_attr, + ), + LeakyReLU(), + Linear( + in_features=120, + out_features=84, + weight_attr=fc_w2_attr, + bias_attr=fc_b2_attr, + ), + Sigmoid(), + Linear( + in_features=84, + out_features=num_classes, + weight_attr=fc_w3_attr, + bias_attr=fc_b3_attr, + ), + Softmax(), + ) def forward(self, inputs): x = self.features(inputs) @@ -98,7 +123,6 @@ class ImperativeLenet(fluid.dygraph.Layer): class TestImperativeQatLSQ(unittest.TestCase): - def set_vars(self): self.weight_quantize_type = 'channel_wise_lsq_weight' self.activation_quantize_type = 'lsq_act' @@ -111,7 +135,8 @@ class TestImperativeQatLSQ(unittest.TestCase): imperative_qat = ImperativeQuantAware( weight_quantize_type=self.weight_quantize_type, activation_quantize_type=self.activation_quantize_type, - fuse_conv_bn=self.fuse_conv_bn) + fuse_conv_bn=self.fuse_conv_bn, + ) seed = 100 np.random.seed(seed) @@ -121,22 +146,26 @@ class TestImperativeQatLSQ(unittest.TestCase): lenet = ImperativeLenet() lenet = fix_model_dict(lenet) imperative_qat.quantize(lenet) - optimizer = MomentumOptimizer(learning_rate=0.1, - parameter_list=lenet.parameters(), - momentum=0.9) + optimizer = MomentumOptimizer( + learning_rate=0.1, parameter_list=lenet.parameters(), momentum=0.9 + ) - train_reader = paddle.batch(paddle.dataset.mnist.train(), - batch_size=64, - drop_last=True) + train_reader = paddle.batch( + paddle.dataset.mnist.train(), batch_size=64, drop_last=True + ) test_reader = paddle.batch(paddle.dataset.mnist.test(), batch_size=32) epoch_num = 2 for epoch in range(epoch_num): lenet.train() for batch_id, data in enumerate(train_reader()): - x_data = np.array([x[0].reshape(1, 28, 28) - for x in data]).astype('float32') - y_data = np.array([x[1] for x in data - ]).astype('int64').reshape(-1, 1) + x_data = np.array( + [x[0].reshape(1, 28, 28) for x in data] + ).astype('float32') + y_data = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape(-1, 1) + ) img = fluid.dygraph.to_variable(x_data) label = fluid.dygraph.to_variable(y_data) @@ -151,42 +180,53 @@ class TestImperativeQatLSQ(unittest.TestCase): if batch_id % 100 == 0: _logger.info( - "Train | At epoch {} step {}: loss = {:}, acc= {:}". - format(epoch, batch_id, avg_loss.numpy(), acc.numpy())) + "Train | At epoch {} step {}: loss = {:}, acc= {:}".format( + epoch, batch_id, avg_loss.numpy(), acc.numpy() + ) + ) lenet.eval() eval_acc_top1_list = [] with paddle.no_grad(): for batch_id, data in enumerate(test_reader()): - x_data = np.array([x[0].reshape(1, 28, 28) - for x in data]).astype('float32') - y_data = np.array([x[1] for x in data - ]).astype('int64').reshape(-1, 1) + x_data = np.array( + [x[0].reshape(1, 28, 28) for x in data] + ).astype('float32') + y_data = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape(-1, 1) + ) img = fluid.dygraph.to_variable(x_data) label = fluid.dygraph.to_variable(y_data) out = lenet(img) - acc_top1 = fluid.layers.accuracy(input=out, - label=label, - k=1) - acc_top5 = fluid.layers.accuracy(input=out, - label=label, - k=5) + acc_top1 = fluid.layers.accuracy( + input=out, label=label, k=1 + ) + acc_top5 = fluid.layers.accuracy( + input=out, label=label, k=5 + ) if batch_id % 100 == 0: eval_acc_top1_list.append(float(acc_top1.numpy())) _logger.info( - "Test | At epoch {} step {}: acc1 = {:}, acc5 = {:}" - .format(epoch, batch_id, acc_top1.numpy(), - acc_top5.numpy())) + "Test | At epoch {} step {}: acc1 = {:}, acc5 = {:}".format( + epoch, + batch_id, + acc_top1.numpy(), + acc_top5.numpy(), + ) + ) # check eval acc eval_acc_top1 = sum(eval_acc_top1_list) / len(eval_acc_top1_list) print('eval_acc_top1', eval_acc_top1) - self.assertTrue(eval_acc_top1 > 0.9, - msg="The test acc {%f} is less than 0.9." % - eval_acc_top1) + self.assertTrue( + eval_acc_top1 > 0.9, + msg="The test acc {%f} is less than 0.9." % eval_acc_top1, + ) def test_qat(self): self.func_qat() diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_user_defined.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_user_defined.py index 2ae75744cf8a865d0f4446173d9731dc58fe6a54..89e2362b51dd71f74c693782e116d27ed89a8baa 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_user_defined.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_user_defined.py @@ -32,21 +32,21 @@ from paddle.fluid.framework import _test_eager_guard os.environ["CPU_NUM"] = "1" -_logger = get_logger(__name__, - logging.INFO, - fmt='%(asctime)s-%(levelname)s: %(message)s') +_logger = get_logger( + __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s' +) class PACT(nn.Layer): - def __init__(self, init_value=20): super(PACT, self).__init__() alpha_attr = paddle.ParamAttr( name=self.full_name() + ".pact", - initializer=paddle.nn.initializer.Constant(value=init_value)) - self.alpha = self.create_parameter(shape=[1], - attr=alpha_attr, - dtype='float32') + initializer=paddle.nn.initializer.Constant(value=init_value), + ) + self.alpha = self.create_parameter( + shape=[1], attr=alpha_attr, dtype='float32' + ) def forward(self, x): out_left = paddle.nn.functional.relu(x - self.alpha) @@ -56,31 +56,30 @@ class PACT(nn.Layer): class CustomQAT(nn.Layer): - def __init__(self): super(CustomQAT, self).__init__() - attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Constant( - value=1.0)) - self.u_param = self.create_parameter(shape=[1], - attr=attr, - dtype='float32') - self.l_param = self.create_parameter(shape=[1], - attr=attr, - dtype='float32') - self.alpha_param = self.create_parameter(shape=[1], - attr=attr, - dtype='float32') - self.upper = self.create_parameter(shape=[1], - attr=attr, - dtype='float32') + attr = paddle.ParamAttr( + initializer=paddle.nn.initializer.Constant(value=1.0) + ) + self.u_param = self.create_parameter( + shape=[1], attr=attr, dtype='float32' + ) + self.l_param = self.create_parameter( + shape=[1], attr=attr, dtype='float32' + ) + self.alpha_param = self.create_parameter( + shape=[1], attr=attr, dtype='float32' + ) + self.upper = self.create_parameter( + shape=[1], attr=attr, dtype='float32' + ) self.upper.stop_gradient = True - self.lower = self.create_parameter(shape=[1], - attr=attr, - dtype='float32') + self.lower = self.create_parameter( + shape=[1], attr=attr, dtype='float32' + ) self.lower.stop_gradient = True def forward(self, x): - def clip(x, upper, lower): x = x + paddle.nn.functional.relu(lower - x) x = x - paddle.nn.functional.relu(x - upper) @@ -111,7 +110,6 @@ class CustomQAT(nn.Layer): class ModelForConv2dT(nn.Layer): - def __init__(self, num_classes=10): super(ModelForConv2dT, self).__init__() self.features = nn.Conv2DTranspose(4, 6, (3, 3)) @@ -125,29 +123,34 @@ class ModelForConv2dT(nn.Layer): class ImperativeLenet(paddle.nn.Layer): - def __init__(self, num_classes=10, classifier_activation='softmax'): super(ImperativeLenet, self).__init__() self.features = Sequential( - Conv2D(num_channels=1, - num_filters=6, - filter_size=3, - stride=1, - padding=1), + Conv2D( + num_channels=1, + num_filters=6, + filter_size=3, + stride=1, + padding=1, + ), Pool2D(pool_size=2, pool_type='max', pool_stride=2), - Conv2D(num_channels=6, - num_filters=16, - filter_size=5, - stride=1, - padding=0), - Pool2D(pool_size=2, pool_type='max', pool_stride=2)) + Conv2D( + num_channels=6, + num_filters=16, + filter_size=5, + stride=1, + padding=0, + ), + Pool2D(pool_size=2, pool_type='max', pool_stride=2), + ) self.fc = Sequential( Linear(input_dim=400, output_dim=120), Linear(input_dim=120, output_dim=84), - Linear(input_dim=84, - output_dim=num_classes, - act=classifier_activation)) + Linear( + input_dim=84, output_dim=num_classes, act=classifier_activation + ), + ) def forward(self, inputs): x = self.features(inputs) @@ -158,7 +161,6 @@ class ImperativeLenet(paddle.nn.Layer): class TestUserDefinedActPreprocess(unittest.TestCase): - def setUp(self): _logger.info("test act_preprocess") self.imperative_qat = ImperativeQuantAware(act_preprocess_layer=PACT) @@ -178,9 +180,13 @@ class TestUserDefinedActPreprocess(unittest.TestCase): if name.endswith("bias"): value = np.zeros_like(p_value).astype('float32') else: - value = np.random.normal( - loc=0.0, scale=0.01, - size=np.product(p_shape)).reshape(p_shape).astype('float32') + value = ( + np.random.normal( + loc=0.0, scale=0.01, size=np.product(p_shape) + ) + .reshape(p_shape) + .astype('float32') + ) fixed_state[name] = value param_init_map[param.name] = value lenet.set_dict(fixed_state) @@ -188,10 +194,10 @@ class TestUserDefinedActPreprocess(unittest.TestCase): imperative_qat.quantize(lenet) adam = Adam(learning_rate=0.001, parameters=lenet.parameters()) dynamic_loss_rec = [] - #for CI coverage + # for CI coverage conv_transpose = ModelForConv2dT() imperative_qat.quantize(conv_transpose) - x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.) + x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1.0, max=1.0) conv_transpose(x_var) def train(model): @@ -200,10 +206,14 @@ class TestUserDefinedActPreprocess(unittest.TestCase): for epoch in range(epoch_num): model.train() for batch_id, data in enumerate(train_reader()): - x_data = np.array([x[0].reshape(1, 28, 28) - for x in data]).astype('float32') - y_data = np.array([x[1] for x in data - ]).astype('int64').reshape(-1, 1) + x_data = np.array( + [x[0].reshape(1, 28, 28) for x in data] + ).astype('float32') + y_data = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape(-1, 1) + ) img = paddle.to_tensor(x_data) label = paddle.to_tensor(y_data) @@ -216,19 +226,24 @@ class TestUserDefinedActPreprocess(unittest.TestCase): model.clear_gradients() if batch_id % 50 == 0: _logger.info( - "Train | At epoch {} step {}: loss = {:}, acc= {:}". - format(epoch, batch_id, avg_loss.numpy(), - acc.numpy())) + "Train | At epoch {} step {}: loss = {:}, acc= {:}".format( + epoch, batch_id, avg_loss.numpy(), acc.numpy() + ) + ) break def test(model): model.eval() avg_acc = [[], []] for batch_id, data in enumerate(test_reader()): - x_data = np.array([x[0].reshape(1, 28, 28) - for x in data]).astype('float32') - y_data = np.array([x[1] for x in data - ]).astype('int64').reshape(-1, 1) + x_data = np.array( + [x[0].reshape(1, 28, 28) for x in data] + ).astype('float32') + y_data = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape(-1, 1) + ) img = paddle.to_tensor(x_data) label = paddle.to_tensor(y_data) @@ -241,11 +256,13 @@ class TestUserDefinedActPreprocess(unittest.TestCase): if batch_id % 100 == 0: _logger.info( "Test | step {}: acc1 = {:}, acc5 = {:}".format( - batch_id, acc_top1.numpy(), acc_top5.numpy())) + batch_id, acc_top1.numpy(), acc_top5.numpy() + ) + ) - train_reader = paddle.batch(paddle.dataset.mnist.train(), - batch_size=512, - drop_last=True) + train_reader = paddle.batch( + paddle.dataset.mnist.train(), batch_size=512, drop_last=True + ) test_reader = paddle.batch(paddle.dataset.mnist.test(), batch_size=512) train(lenet) test(lenet) @@ -257,25 +274,23 @@ class TestUserDefinedActPreprocess(unittest.TestCase): class TestUserDefinedWeightPreprocess(TestUserDefinedActPreprocess): - def setUp(self): _logger.info("test weight_preprocess") self.imperative_qat = ImperativeQuantAware(weight_preprocess_layer=PACT) class TestUserDefinedActQuantize(TestUserDefinedActPreprocess): - def setUp(self): _logger.info("test act_quantize") self.imperative_qat = ImperativeQuantAware(act_quantize_layer=CustomQAT) class TestUserDefinedWeightQuantize(TestUserDefinedActPreprocess): - def setUp(self): _logger.info("test weight_quantize") self.imperative_qat = ImperativeQuantAware( - weight_quantize_layer=CustomQAT) + weight_quantize_layer=CustomQAT + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_skip_op.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_skip_op.py index f7608ee66b41e98b6afb1d73e03eb6172b99025b..131866095ad7b3c7f1dc8110eefb4b3d2dd4a10a 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_skip_op.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_skip_op.py @@ -29,20 +29,23 @@ from paddle.nn import Linear, Conv2D, Softmax, BatchNorm from paddle.fluid.dygraph.nn import Pool2D from paddle.fluid.log_helper import get_logger -from imperative_test_utils import fix_model_dict, train_lenet, ImperativeLenetWithSkipQuant +from imperative_test_utils import ( + fix_model_dict, + train_lenet, + ImperativeLenetWithSkipQuant, +) from paddle.fluid.framework import _test_eager_guard os.environ["CPU_NUM"] = "1" if core.is_compiled_with_cuda(): fluid.set_flags({"FLAGS_cudnn_deterministic": True}) -_logger = get_logger(__name__, - logging.INFO, - fmt='%(asctime)s-%(levelname)s: %(message)s') +_logger = get_logger( + __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s' +) class TestImperativeOutSclae(unittest.TestCase): - def func_out_scale_acc(self): paddle.disable_static() seed = 1000 @@ -51,16 +54,17 @@ class TestImperativeOutSclae(unittest.TestCase): qat = ImperativeQuantAware() np.random.seed(seed) - reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=512, - drop_last=True) + reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=512, drop_last=True + ) lenet = ImperativeLenetWithSkipQuant() lenet = fix_model_dict(lenet) qat.quantize(lenet) - adam = AdamOptimizer(learning_rate=lr, - parameter_list=lenet.parameters()) + adam = AdamOptimizer( + learning_rate=lr, parameter_list=lenet.parameters() + ) dynamic_loss_rec = [] lenet.train() loss_list = train_lenet(lenet, reader, adam) @@ -70,13 +74,15 @@ class TestImperativeOutSclae(unittest.TestCase): path = "./save_dynamic_quant_infer_model/lenet" save_dir = "./save_dynamic_quant_infer_model" - qat.save_quantized_model(layer=lenet, - path=path, - input_spec=[ - paddle.static.InputSpec( - shape=[None, 1, 28, 28], - dtype='float32') - ]) + qat.save_quantized_model( + layer=lenet, + path=path, + input_spec=[ + paddle.static.InputSpec( + shape=[None, 1, 28, 28], dtype='float32' + ) + ], + ) paddle.enable_static() @@ -86,12 +92,16 @@ class TestImperativeOutSclae(unittest.TestCase): place = core.CPUPlace() exe = fluid.Executor(place) - [inference_program, feed_target_names, - fetch_targets] = (fluid.io.load_inference_model( - dirname=save_dir, - executor=exe, - model_filename="lenet" + INFER_MODEL_SUFFIX, - params_filename="lenet" + INFER_PARAMS_SUFFIX)) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = fluid.io.load_inference_model( + dirname=save_dir, + executor=exe, + model_filename="lenet" + INFER_MODEL_SUFFIX, + params_filename="lenet" + INFER_PARAMS_SUFFIX, + ) model_ops = inference_program.global_block().ops conv2d_count, matmul_count = 0, 0 @@ -105,10 +115,12 @@ class TestImperativeOutSclae(unittest.TestCase): conv2d_skip_count += 1 if conv2d_count > 0: self.assertTrue( - 'fake_quantize_dequantize' in model_ops[i - 1].type) + 'fake_quantize_dequantize' in model_ops[i - 1].type + ) else: self.assertTrue( - 'fake_quantize_dequantize' not in model_ops[i - 1].type) + 'fake_quantize_dequantize' not in model_ops[i - 1].type + ) conv2d_count += 1 if op.type == 'matmul': @@ -117,10 +129,12 @@ class TestImperativeOutSclae(unittest.TestCase): matmul_skip_count += 1 if matmul_count > 0: self.assertTrue( - 'fake_quantize_dequantize' in model_ops[i - 1].type) + 'fake_quantize_dequantize' in model_ops[i - 1].type + ) else: self.assertTrue( - 'fake_quantize_dequantize' not in model_ops[i - 1].type) + 'fake_quantize_dequantize' not in model_ops[i - 1].type + ) matmul_count += 1 if find_conv2d: diff --git a/python/paddle/fluid/contrib/slim/tests/test_moving_average_abs_max_scale_op.py b/python/paddle/fluid/contrib/slim/tests/test_moving_average_abs_max_scale_op.py index 04814ed9fe129d896d07df113b886d0b4ce515f2..1ec463192d9198c527fd84c9d8ee805ce001ca7a 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_moving_average_abs_max_scale_op.py +++ b/python/paddle/fluid/contrib/slim/tests/test_moving_average_abs_max_scale_op.py @@ -28,46 +28,51 @@ def init_data(batch_size=32, img_shape=[784], label_range=9): assert isinstance(img_shape, list) input_shape = [batch_size] + img_shape img = np.random.random(size=input_shape).astype(np.float32) - label = np.array( - [np.random.randint(0, label_range) for _ in range(batch_size)]).reshape( - (-1, 1)).astype("int64") + label = ( + np.array([np.random.randint(0, label_range) for _ in range(batch_size)]) + .reshape((-1, 1)) + .astype("int64") + ) return img, label class TestMovingAverageAbsMaxScaleOp(unittest.TestCase): - def check_backward(self, use_cuda): main_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(main_program, startup_program): - image = fluid.layers.data(name='image', - shape=[784], - dtype='float32') + image = fluid.layers.data( + name='image', shape=[784], dtype='float32' + ) label = fluid.layers.data(name='label', shape=[1], dtype='int64') fc_tmp = fluid.layers.fc(image, size=10, act='softmax') out_scale = quant_layers.MovingAverageAbsMaxScale( - name=fc_tmp.name, dtype=fc_tmp.dtype) + name=fc_tmp.name, dtype=fc_tmp.dtype + ) fc_tmp_1 = out_scale(fc_tmp) cross_entropy = fluid.layers.softmax_with_cross_entropy( - fc_tmp, label) + fc_tmp, label + ) loss = fluid.layers.reduce_mean(cross_entropy) sgd = fluid.optimizer.SGD(learning_rate=1e-3) sgd.minimize(loss) moving_average_abs_max_scale_ops = [ - op for op in main_program.blocks[0].ops + op + for op in main_program.blocks[0].ops if op.type == u'moving_average_abs_max_scale' ] - assert len( - moving_average_abs_max_scale_ops - ) == 1, "The number of moving_average_abs_max_scale_ops should be 1." + assert ( + len(moving_average_abs_max_scale_ops) == 1 + ), "The number of moving_average_abs_max_scale_ops should be 1." place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) exe.run(startup_program) binary = fluid.compiler.CompiledProgram( - main_program).with_data_parallel(loss_name=loss.name) + main_program + ).with_data_parallel(loss_name=loss.name) img, label = init_data() feed_dict = {"image": img, "label": label} diff --git a/python/paddle/fluid/contrib/slim/tests/test_post_training_quantization_lstm_model.py b/python/paddle/fluid/contrib/slim/tests/test_post_training_quantization_lstm_model.py index 4c55f58e05e05d83a693357b4db45267c4863b9b..7eb7f4d479e26266feced10d16c130fcfac31809 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_post_training_quantization_lstm_model.py +++ b/python/paddle/fluid/contrib/slim/tests/test_post_training_quantization_lstm_model.py @@ -34,19 +34,23 @@ np.random.seed(0) class TestPostTrainingQuantization(unittest.TestCase): - def setUp(self): self.download_path = 'int8/download' - self.cache_folder = os.path.expanduser('~/.cache/paddle/dataset/' + - self.download_path) + self.cache_folder = os.path.expanduser( + '~/.cache/paddle/dataset/' + self.download_path + ) self.root_path = tempfile.TemporaryDirectory() - self.int8_model_path = os.path.join(self.root_path.name, - "post_training_quantization") + self.int8_model_path = os.path.join( + self.root_path.name, "post_training_quantization" + ) try: os.system("mkdir -p " + self.int8_model_path) except Exception as e: - print("Failed to create {} due to {}".format( - self.int8_model_path, str(e))) + print( + "Failed to create {} due to {}".format( + self.int8_model_path, str(e) + ) + ) sys.exit(-1) def tearDown(self): @@ -55,7 +59,8 @@ class TestPostTrainingQuantization(unittest.TestCase): def cache_unzipping(self, target_folder, zip_path): if not os.path.exists(target_folder): cmd = 'mkdir {0} && tar xf {1} -C {0}'.format( - target_folder, zip_path) + target_folder, zip_path + ) os.system(cmd) def download_model(self, data_url, data_md5, folder_name): @@ -69,7 +74,6 @@ class TestPostTrainingQuantization(unittest.TestCase): return data_cache_folder def get_batch_reader(self, data_path, place): - def reader(): with open(data_path, 'rb') as in_file: while True: @@ -83,13 +87,15 @@ class TestPostTrainingQuantization(unittest.TestCase): label = in_file.read(4 * label_len) label = np.frombuffer(label, dtype=np.int32).reshape( - [len(label) // 4]) + [len(label) // 4] + ) if label.shape[0] != 1 or label[0] > 6350: continue feat = in_file.read(4 * seq_len * 8) feat = np.frombuffer(feat, dtype=np.float32).reshape( - [len(feat) // 4 // 8, 8]) + [len(feat) // 4 // 8, 8] + ) lod_feat = [feat.shape[0]] minputs = fluid.create_lod_tensor(feat, [lod_feat], place) @@ -98,7 +104,6 @@ class TestPostTrainingQuantization(unittest.TestCase): return reader def get_simple_reader(self, data_path, place): - def reader(): with open(data_path, 'rb') as in_file: while True: @@ -112,13 +117,15 @@ class TestPostTrainingQuantization(unittest.TestCase): label = in_file.read(4 * label_len) label = np.frombuffer(label, dtype=np.int32).reshape( - [len(label) // 4]) + [len(label) // 4] + ) if label.shape[0] != 1 or label[0] > 6350: continue feat = in_file.read(4 * seq_len * 8) feat = np.frombuffer(feat, dtype=np.float32).reshape( - [len(feat) // 4 // 8, 8]) + [len(feat) // 4 // 8, 8] + ) lod_feat = [feat.shape[0]] minputs = fluid.create_lod_tensor(feat, [lod_feat], place) @@ -130,8 +137,11 @@ class TestPostTrainingQuantization(unittest.TestCase): print("test model path:" + model_path) place = fluid.CPUPlace() exe = fluid.Executor(place) - [infer_program, feed_dict, fetch_targets] = \ - fluid.io.load_inference_model(model_path, exe) + [ + infer_program, + feed_dict, + fetch_targets, + ] = fluid.io.load_inference_model(model_path, exe) val_reader = self.get_simple_reader(data_path, place) @@ -140,10 +150,12 @@ class TestPostTrainingQuantization(unittest.TestCase): periods = [] for batch_id, (data, label) in enumerate(val_reader()): t1 = time.time() - cls_out, ctc_out = exe.run(infer_program, - feed={feed_dict[0]: data}, - fetch_list=fetch_targets, - return_numpy=False) + cls_out, ctc_out = exe.run( + infer_program, + feed={feed_dict[0]: data}, + fetch_list=fetch_targets, + return_numpy=False, + ) t2 = time.time() periods.append(t2 - t1) @@ -161,86 +173,117 @@ class TestPostTrainingQuantization(unittest.TestCase): acc = right_num / all_num return (latency, acc) - def generate_quantized_model(self, - model_path, - data_path, - algo="KL", - round_type="round", - quantizable_op_type=["conv2d"], - is_full_quantize=False, - is_use_cache_file=False, - is_optimize_model=False, - batch_size=10, - batch_nums=10, - onnx_format=False): + def generate_quantized_model( + self, + model_path, + data_path, + algo="KL", + round_type="round", + quantizable_op_type=["conv2d"], + is_full_quantize=False, + is_use_cache_file=False, + is_optimize_model=False, + batch_size=10, + batch_nums=10, + onnx_format=False, + ): place = fluid.CPUPlace() exe = fluid.Executor(place) scope = fluid.global_scope() batch_generator = self.get_batch_reader(data_path, place) - ptq = PostTrainingQuantization(executor=exe, - model_dir=model_path, - batch_generator=batch_generator, - batch_nums=batch_nums, - algo=algo, - quantizable_op_type=quantizable_op_type, - round_type=round_type, - is_full_quantize=is_full_quantize, - optimize_model=is_optimize_model, - onnx_format=onnx_format, - is_use_cache_file=is_use_cache_file) + ptq = PostTrainingQuantization( + executor=exe, + model_dir=model_path, + batch_generator=batch_generator, + batch_nums=batch_nums, + algo=algo, + quantizable_op_type=quantizable_op_type, + round_type=round_type, + is_full_quantize=is_full_quantize, + optimize_model=is_optimize_model, + onnx_format=onnx_format, + is_use_cache_file=is_use_cache_file, + ) ptq.quantize() if onnx_format: ptq._clip_extra = False ptq.save_quantized_model(self.int8_model_path) - def run_test(self, - model_name, - model_url, - model_md5, - data_name, - data_url, - data_md5, - algo, - round_type, - quantizable_op_type, - is_full_quantize, - is_use_cache_file, - is_optimize_model, - diff_threshold, - infer_iterations, - quant_iterations, - onnx_format=False): + def run_test( + self, + model_name, + model_url, + model_md5, + data_name, + data_url, + data_md5, + algo, + round_type, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + infer_iterations, + quant_iterations, + onnx_format=False, + ): fp32_model_path = self.download_model(model_url, model_md5, model_name) fp32_model_path = os.path.join(fp32_model_path, model_name) data_path = self.download_model(data_url, data_md5, data_name) data_path = os.path.join(data_path, data_name) - print("Start FP32 inference for {0} on {1} samples ...".format( - model_name, infer_iterations)) - (fp32_latency, fp32_acc) = self.run_program(fp32_model_path, data_path, - infer_iterations) - - print("Start post training quantization for {0} on {1} samples ...". - format(model_name, quant_iterations)) - self.generate_quantized_model(fp32_model_path, data_path, algo, - round_type, quantizable_op_type, - is_full_quantize, is_use_cache_file, - is_optimize_model, 10, quant_iterations, - onnx_format) - - print("Start INT8 inference for {0} on {1} samples ...".format( - model_name, infer_iterations)) - (int8_latency, int8_acc) = self.run_program(self.int8_model_path, - data_path, infer_iterations) + print( + "Start FP32 inference for {0} on {1} samples ...".format( + model_name, infer_iterations + ) + ) + (fp32_latency, fp32_acc) = self.run_program( + fp32_model_path, data_path, infer_iterations + ) + + print( + "Start post training quantization for {0} on {1} samples ...".format( + model_name, quant_iterations + ) + ) + self.generate_quantized_model( + fp32_model_path, + data_path, + algo, + round_type, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + 10, + quant_iterations, + onnx_format, + ) + + print( + "Start INT8 inference for {0} on {1} samples ...".format( + model_name, infer_iterations + ) + ) + (int8_latency, int8_acc) = self.run_program( + self.int8_model_path, data_path, infer_iterations + ) print("---Post training quantization of {} method---".format(algo)) - print("FP32 {0}: batch_size {1}, latency {2} s, acc {3}.".format( - model_name, 1, fp32_latency, fp32_acc)) - print("INT8 {0}: batch_size {1}, latency {2} s, acc1 {3}.\n".format( - model_name, 1, int8_latency, int8_acc)) + print( + "FP32 {0}: batch_size {1}, latency {2} s, acc {3}.".format( + model_name, 1, fp32_latency, fp32_acc + ) + ) + print( + "INT8 {0}: batch_size {1}, latency {2} s, acc1 {3}.\n".format( + model_name, 1, int8_latency, int8_acc + ) + ) sys.stdout.flush() delta_value = fp32_acc - int8_acc @@ -248,7 +291,6 @@ class TestPostTrainingQuantization(unittest.TestCase): class TestPostTrainingAvgForLSTM(TestPostTrainingQuantization): - def test_post_training_avg(self): model_name = "nlp_lstm_fp32_model" model_url = "https://paddle-inference-dist.cdn.bcebos.com/int8/unittest_model_data/nlp_lstm_fp32_model.tar.gz" @@ -265,14 +307,26 @@ class TestPostTrainingAvgForLSTM(TestPostTrainingQuantization): diff_threshold = 0.02 infer_iterations = 100 quant_iterations = 10 - self.run_test(model_name, model_url, model_md5, data_name, data_url, - data_md5, algo, round_type, quantizable_op_type, - is_full_quantize, is_use_cache_file, is_optimize_model, - diff_threshold, infer_iterations, quant_iterations) + self.run_test( + model_name, + model_url, + model_md5, + data_name, + data_url, + data_md5, + algo, + round_type, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + infer_iterations, + quant_iterations, + ) class TestPostTrainingAvgForLSTMONNXFormat(TestPostTrainingQuantization): - def not_test_post_training_avg_onnx_format(self): model_name = "nlp_lstm_fp32_model" model_url = "https://paddle-inference-dist.cdn.bcebos.com/int8/unittest_model_data/nlp_lstm_fp32_model.tar.gz" @@ -290,22 +344,24 @@ class TestPostTrainingAvgForLSTMONNXFormat(TestPostTrainingQuantization): infer_iterations = 100 quant_iterations = 10 onnx_format = True - self.run_test(model_name, - model_url, - model_md5, - data_name, - data_url, - data_md5, - algo, - round_type, - quantizable_op_type, - is_full_quantize, - is_use_cache_file, - is_optimize_model, - diff_threshold, - infer_iterations, - quant_iterations, - onnx_format=onnx_format) + self.run_test( + model_name, + model_url, + model_md5, + data_name, + data_url, + data_md5, + algo, + round_type, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + infer_iterations, + quant_iterations, + onnx_format=onnx_format, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/contrib/slim/tests/test_post_training_quantization_mnist.py b/python/paddle/fluid/contrib/slim/tests/test_post_training_quantization_mnist.py index 807bdbf8a9ab110e6e9dadc5ec465fbd1e03d426..6ff54f7c970761dedf2f1ba310c6bb7422143a96 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_post_training_quantization_mnist.py +++ b/python/paddle/fluid/contrib/slim/tests/test_post_training_quantization_mnist.py @@ -33,19 +33,23 @@ np.random.seed(0) class TestPostTrainingQuantization(unittest.TestCase): - def setUp(self): self.root_path = tempfile.TemporaryDirectory() - self.int8_model_path = os.path.join(self.root_path.name, - "post_training_quantization") + self.int8_model_path = os.path.join( + self.root_path.name, "post_training_quantization" + ) self.download_path = 'int8/download' - self.cache_folder = os.path.expanduser('~/.cache/paddle/dataset/' + - self.download_path) + self.cache_folder = os.path.expanduser( + '~/.cache/paddle/dataset/' + self.download_path + ) try: os.system("mkdir -p " + self.int8_model_path) except Exception as e: - print("Failed to create {} due to {}".format( - self.int8_model_path, str(e))) + print( + "Failed to create {} due to {}".format( + self.int8_model_path, str(e) + ) + ) sys.exit(-1) def tearDown(self): @@ -54,7 +58,8 @@ class TestPostTrainingQuantization(unittest.TestCase): def cache_unzipping(self, target_folder, zip_path): if not os.path.exists(target_folder): cmd = 'mkdir {0} && tar xf {1} -C {0}'.format( - target_folder, zip_path) + target_folder, zip_path + ) os.system(cmd) def download_model(self, data_url, data_md5, folder_name): @@ -71,8 +76,11 @@ class TestPostTrainingQuantization(unittest.TestCase): print("test model path:" + model_path) place = fluid.CPUPlace() exe = fluid.Executor(place) - [infer_program, feed_dict, fetch_targets] = \ - fluid.io.load_inference_model(model_path, exe) + [ + infer_program, + feed_dict, + fetch_targets, + ] = fluid.io.load_inference_model(model_path, exe) val_reader = paddle.batch(paddle.dataset.mnist.test(), batch_size) img_shape = [1, 28, 28] @@ -80,14 +88,17 @@ class TestPostTrainingQuantization(unittest.TestCase): cnt = 0 periods = [] for batch_id, data in enumerate(val_reader()): - image = np.array([x[0].reshape(img_shape) - for x in data]).astype("float32") + image = np.array([x[0].reshape(img_shape) for x in data]).astype( + "float32" + ) input_label = np.array([x[1] for x in data]).astype("int64") t1 = time.time() - out = exe.run(infer_program, - feed={feed_dict[0]: image}, - fetch_list=fetch_targets) + out = exe.run( + infer_program, + feed={feed_dict[0]: image}, + fetch_list=fetch_targets, + ) t2 = time.time() period = t2 - t1 periods.append(period) @@ -105,91 +116,117 @@ class TestPostTrainingQuantization(unittest.TestCase): acc1 = np.sum(test_info) / cnt return (throughput, latency, acc1) - def generate_quantized_model(self, - model_path, - algo="KL", - round_type="round", - quantizable_op_type=["conv2d"], - is_full_quantize=False, - is_use_cache_file=False, - is_optimize_model=False, - batch_size=10, - batch_nums=10, - onnx_format=False, - skip_tensor_list=None, - bias_correction=False): + def generate_quantized_model( + self, + model_path, + algo="KL", + round_type="round", + quantizable_op_type=["conv2d"], + is_full_quantize=False, + is_use_cache_file=False, + is_optimize_model=False, + batch_size=10, + batch_nums=10, + onnx_format=False, + skip_tensor_list=None, + bias_correction=False, + ): place = fluid.CPUPlace() exe = fluid.Executor(place) val_reader = paddle.dataset.mnist.train() - ptq = PostTrainingQuantization(executor=exe, - model_dir=model_path, - sample_generator=val_reader, - batch_size=batch_size, - batch_nums=batch_nums, - algo=algo, - quantizable_op_type=quantizable_op_type, - round_type=round_type, - is_full_quantize=is_full_quantize, - optimize_model=is_optimize_model, - bias_correction=bias_correction, - onnx_format=onnx_format, - skip_tensor_list=skip_tensor_list, - is_use_cache_file=is_use_cache_file) + ptq = PostTrainingQuantization( + executor=exe, + model_dir=model_path, + sample_generator=val_reader, + batch_size=batch_size, + batch_nums=batch_nums, + algo=algo, + quantizable_op_type=quantizable_op_type, + round_type=round_type, + is_full_quantize=is_full_quantize, + optimize_model=is_optimize_model, + bias_correction=bias_correction, + onnx_format=onnx_format, + skip_tensor_list=skip_tensor_list, + is_use_cache_file=is_use_cache_file, + ) ptq.quantize() ptq.save_quantized_model(self.int8_model_path) - def run_test(self, - model_name, - data_url, - data_md5, - algo, - round_type, - quantizable_op_type, - is_full_quantize, - is_use_cache_file, - is_optimize_model, - diff_threshold, - batch_size=10, - infer_iterations=10, - quant_iterations=5, - bias_correction=False, - onnx_format=False, - skip_tensor_list=None): + def run_test( + self, + model_name, + data_url, + data_md5, + algo, + round_type, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + batch_size=10, + infer_iterations=10, + quant_iterations=5, + bias_correction=False, + onnx_format=False, + skip_tensor_list=None, + ): origin_model_path = self.download_model(data_url, data_md5, model_name) origin_model_path = os.path.join(origin_model_path, model_name) - print("Start FP32 inference for {0} on {1} images ...".format( - model_name, infer_iterations * batch_size)) - (fp32_throughput, fp32_latency, - fp32_acc1) = self.run_program(origin_model_path, batch_size, - infer_iterations) - - print("Start INT8 post training quantization for {0} on {1} images ...". - format(model_name, quant_iterations * batch_size)) - self.generate_quantized_model(origin_model_path, algo, round_type, - quantizable_op_type, is_full_quantize, - is_use_cache_file, is_optimize_model, - batch_size, quant_iterations, onnx_format, - skip_tensor_list, bias_correction) - - print("Start INT8 inference for {0} on {1} images ...".format( - model_name, infer_iterations * batch_size)) - (int8_throughput, int8_latency, - int8_acc1) = self.run_program(self.int8_model_path, batch_size, - infer_iterations) + print( + "Start FP32 inference for {0} on {1} images ...".format( + model_name, infer_iterations * batch_size + ) + ) + (fp32_throughput, fp32_latency, fp32_acc1) = self.run_program( + origin_model_path, batch_size, infer_iterations + ) + + print( + "Start INT8 post training quantization for {0} on {1} images ...".format( + model_name, quant_iterations * batch_size + ) + ) + self.generate_quantized_model( + origin_model_path, + algo, + round_type, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + batch_size, + quant_iterations, + onnx_format, + skip_tensor_list, + bias_correction, + ) + + print( + "Start INT8 inference for {0} on {1} images ...".format( + model_name, infer_iterations * batch_size + ) + ) + (int8_throughput, int8_latency, int8_acc1) = self.run_program( + self.int8_model_path, batch_size, infer_iterations + ) print("---Post training quantization of {} method---".format(algo)) print( - "FP32 {0}: batch_size {1}, throughput {2} img/s, latency {3} s, acc1 {4}." - .format(model_name, batch_size, fp32_throughput, fp32_latency, - fp32_acc1)) + "FP32 {0}: batch_size {1}, throughput {2} img/s, latency {3} s, acc1 {4}.".format( + model_name, batch_size, fp32_throughput, fp32_latency, fp32_acc1 + ) + ) print( - "INT8 {0}: batch_size {1}, throughput {2} img/s, latency {3} s, acc1 {4}.\n" - .format(model_name, batch_size, int8_throughput, int8_latency, - int8_acc1)) + "INT8 {0}: batch_size {1}, throughput {2} img/s, latency {3} s, acc1 {4}.\n".format( + model_name, batch_size, int8_throughput, int8_latency, int8_acc1 + ) + ) sys.stdout.flush() delta_value = fp32_acc1 - int8_acc1 @@ -197,10 +234,11 @@ class TestPostTrainingQuantization(unittest.TestCase): class TestPostTrainingKLForMnist(TestPostTrainingQuantization): - def test_post_training_kl(self): model_name = "mnist_model" - data_url = "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_model.tar.gz" + data_url = ( + "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_model.tar.gz" + ) data_md5 = "be71d3997ec35ac2a65ae8a145e2887c" algo = "KL" round_type = "round" @@ -212,17 +250,29 @@ class TestPostTrainingKLForMnist(TestPostTrainingQuantization): batch_size = 10 infer_iterations = 50 quant_iterations = 5 - self.run_test(model_name, data_url, data_md5, algo, round_type, - quantizable_op_type, is_full_quantize, is_use_cache_file, - is_optimize_model, diff_threshold, batch_size, - infer_iterations, quant_iterations) + self.run_test( + model_name, + data_url, + data_md5, + algo, + round_type, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + batch_size, + infer_iterations, + quant_iterations, + ) class TestPostTraininghistForMnist(TestPostTrainingQuantization): - def test_post_training_hist(self): model_name = "mnist_model" - data_url = "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_model.tar.gz" + data_url = ( + "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_model.tar.gz" + ) data_md5 = "be71d3997ec35ac2a65ae8a145e2887c" algo = "hist" round_type = "round" @@ -234,17 +284,29 @@ class TestPostTraininghistForMnist(TestPostTrainingQuantization): batch_size = 10 infer_iterations = 50 quant_iterations = 5 - self.run_test(model_name, data_url, data_md5, algo, round_type, - quantizable_op_type, is_full_quantize, is_use_cache_file, - is_optimize_model, diff_threshold, batch_size, - infer_iterations, quant_iterations) + self.run_test( + model_name, + data_url, + data_md5, + algo, + round_type, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + batch_size, + infer_iterations, + quant_iterations, + ) class TestPostTrainingmseForMnist(TestPostTrainingQuantization): - def test_post_training_mse(self): model_name = "mnist_model" - data_url = "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_model.tar.gz" + data_url = ( + "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_model.tar.gz" + ) data_md5 = "be71d3997ec35ac2a65ae8a145e2887c" algo = "mse" round_type = "round" @@ -256,17 +318,29 @@ class TestPostTrainingmseForMnist(TestPostTrainingQuantization): batch_size = 10 infer_iterations = 50 quant_iterations = 5 - self.run_test(model_name, data_url, data_md5, algo, round_type, - quantizable_op_type, is_full_quantize, is_use_cache_file, - is_optimize_model, diff_threshold, batch_size, - infer_iterations, quant_iterations) + self.run_test( + model_name, + data_url, + data_md5, + algo, + round_type, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + batch_size, + infer_iterations, + quant_iterations, + ) class TestPostTrainingemdForMnist(TestPostTrainingQuantization): - def test_post_training_mse(self): model_name = "mnist_model" - data_url = "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_model.tar.gz" + data_url = ( + "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_model.tar.gz" + ) data_md5 = "be71d3997ec35ac2a65ae8a145e2887c" algo = "emd" round_type = "round" @@ -278,17 +352,29 @@ class TestPostTrainingemdForMnist(TestPostTrainingQuantization): batch_size = 10 infer_iterations = 50 quant_iterations = 5 - self.run_test(model_name, data_url, data_md5, algo, round_type, - quantizable_op_type, is_full_quantize, is_use_cache_file, - is_optimize_model, diff_threshold, batch_size, - infer_iterations, quant_iterations) + self.run_test( + model_name, + data_url, + data_md5, + algo, + round_type, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + batch_size, + infer_iterations, + quant_iterations, + ) class TestPostTrainingavgForMnist(TestPostTrainingQuantization): - def test_post_training_avg(self): model_name = "mnist_model" - data_url = "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_model.tar.gz" + data_url = ( + "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_model.tar.gz" + ) data_md5 = "be71d3997ec35ac2a65ae8a145e2887c" algo = "avg" round_type = "round" @@ -300,17 +386,29 @@ class TestPostTrainingavgForMnist(TestPostTrainingQuantization): batch_size = 10 infer_iterations = 50 quant_iterations = 5 - self.run_test(model_name, data_url, data_md5, algo, round_type, - quantizable_op_type, is_full_quantize, is_use_cache_file, - is_optimize_model, diff_threshold, batch_size, - infer_iterations, quant_iterations) + self.run_test( + model_name, + data_url, + data_md5, + algo, + round_type, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + batch_size, + infer_iterations, + quant_iterations, + ) class TestPostTrainingAbsMaxForMnist(TestPostTrainingQuantization): - def test_post_training_abs_max(self): model_name = "mnist_model" - data_url = "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_model.tar.gz" + data_url = ( + "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_model.tar.gz" + ) data_md5 = "be71d3997ec35ac2a65ae8a145e2887c" algo = "abs_max" round_type = "round" @@ -322,17 +420,29 @@ class TestPostTrainingAbsMaxForMnist(TestPostTrainingQuantization): batch_size = 10 infer_iterations = 50 quant_iterations = 10 - self.run_test(model_name, data_url, data_md5, algo, round_type, - quantizable_op_type, is_full_quantize, is_use_cache_file, - is_optimize_model, diff_threshold, batch_size, - infer_iterations, quant_iterations) + self.run_test( + model_name, + data_url, + data_md5, + algo, + round_type, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + batch_size, + infer_iterations, + quant_iterations, + ) class TestPostTrainingmseAdaroundForMnist(TestPostTrainingQuantization): - def test_post_training_mse(self): model_name = "mnist_model" - data_url = "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_model.tar.gz" + data_url = ( + "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_model.tar.gz" + ) data_md5 = "be71d3997ec35ac2a65ae8a145e2887c" algo = "mse" round_type = "adaround" @@ -345,27 +455,30 @@ class TestPostTrainingmseAdaroundForMnist(TestPostTrainingQuantization): infer_iterations = 50 quant_iterations = 5 bias_correction = True - self.run_test(model_name, - data_url, - data_md5, - algo, - round_type, - quantizable_op_type, - is_full_quantize, - is_use_cache_file, - is_optimize_model, - diff_threshold, - batch_size, - infer_iterations, - quant_iterations, - bias_correction=bias_correction) + self.run_test( + model_name, + data_url, + data_md5, + algo, + round_type, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + batch_size, + infer_iterations, + quant_iterations, + bias_correction=bias_correction, + ) class TestPostTrainingKLAdaroundForMnist(TestPostTrainingQuantization): - def test_post_training_kl(self): model_name = "mnist_model" - data_url = "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_model.tar.gz" + data_url = ( + "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_model.tar.gz" + ) data_md5 = "be71d3997ec35ac2a65ae8a145e2887c" algo = "KL" round_type = "adaround" @@ -377,17 +490,29 @@ class TestPostTrainingKLAdaroundForMnist(TestPostTrainingQuantization): batch_size = 10 infer_iterations = 50 quant_iterations = 5 - self.run_test(model_name, data_url, data_md5, algo, round_type, - quantizable_op_type, is_full_quantize, is_use_cache_file, - is_optimize_model, diff_threshold, batch_size, - infer_iterations, quant_iterations) + self.run_test( + model_name, + data_url, + data_md5, + algo, + round_type, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + batch_size, + infer_iterations, + quant_iterations, + ) class TestPostTrainingmseForMnistONNXFormat(TestPostTrainingQuantization): - def test_post_training_mse_onnx_format(self): model_name = "mnist_model" - data_url = "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_model.tar.gz" + data_url = ( + "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_model.tar.gz" + ) data_md5 = "be71d3997ec35ac2a65ae8a145e2887c" algo = "mse" round_type = "round" @@ -400,28 +525,32 @@ class TestPostTrainingmseForMnistONNXFormat(TestPostTrainingQuantization): batch_size = 10 infer_iterations = 50 quant_iterations = 5 - self.run_test(model_name, - data_url, - data_md5, - algo, - round_type, - quantizable_op_type, - is_full_quantize, - is_use_cache_file, - is_optimize_model, - diff_threshold, - batch_size, - infer_iterations, - quant_iterations, - onnx_format=onnx_format) + self.run_test( + model_name, + data_url, + data_md5, + algo, + round_type, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + batch_size, + infer_iterations, + quant_iterations, + onnx_format=onnx_format, + ) class TestPostTrainingmseForMnistONNXFormatFullQuant( - TestPostTrainingQuantization): - + TestPostTrainingQuantization +): def test_post_training_mse_onnx_format_full_quant(self): model_name = "mnist_model" - data_url = "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_model.tar.gz" + data_url = ( + "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_model.tar.gz" + ) data_md5 = "be71d3997ec35ac2a65ae8a145e2887c" algo = "mse" round_type = "round" @@ -434,27 +563,30 @@ class TestPostTrainingmseForMnistONNXFormatFullQuant( batch_size = 10 infer_iterations = 50 quant_iterations = 5 - self.run_test(model_name, - data_url, - data_md5, - algo, - round_type, - quantizable_op_type, - is_full_quantize, - is_use_cache_file, - is_optimize_model, - diff_threshold, - batch_size, - infer_iterations, - quant_iterations, - onnx_format=onnx_format) + self.run_test( + model_name, + data_url, + data_md5, + algo, + round_type, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + batch_size, + infer_iterations, + quant_iterations, + onnx_format=onnx_format, + ) class TestPostTrainingavgForMnistSkipOP(TestPostTrainingQuantization): - def test_post_training_avg_skip_op(self): model_name = "mnist_model" - data_url = "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_model.tar.gz" + data_url = ( + "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_model.tar.gz" + ) data_md5 = "be71d3997ec35ac2a65ae8a145e2887c" algo = "avg" round_type = "round" @@ -467,20 +599,22 @@ class TestPostTrainingavgForMnistSkipOP(TestPostTrainingQuantization): infer_iterations = 50 quant_iterations = 5 skip_tensor_list = ["fc_0.w_0"] - self.run_test(model_name, - data_url, - data_md5, - algo, - round_type, - quantizable_op_type, - is_full_quantize, - is_use_cache_file, - is_optimize_model, - diff_threshold, - batch_size, - infer_iterations, - quant_iterations, - skip_tensor_list=skip_tensor_list) + self.run_test( + model_name, + data_url, + data_md5, + algo, + round_type, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + batch_size, + infer_iterations, + quant_iterations, + skip_tensor_list=skip_tensor_list, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/contrib/slim/tests/test_post_training_quantization_mobilenetv1.py b/python/paddle/fluid/contrib/slim/tests/test_post_training_quantization_mobilenetv1.py index 41add5e8b8fb1eb41835873b086013cea83ad4a2..37daeab1186299f23da8e90101ad12926ebde7f9 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_post_training_quantization_mobilenetv1.py +++ b/python/paddle/fluid/contrib/slim/tests/test_post_training_quantization_mobilenetv1.py @@ -77,13 +77,14 @@ def process_image(sample, mode, color_jitter, rotate): return img, sample[1] -def _reader_creator(file_list, - mode, - shuffle=False, - color_jitter=False, - rotate=False, - data_dir=DATA_DIR): - +def _reader_creator( + file_list, + mode, + shuffle=False, + color_jitter=False, + rotate=False, + data_dir=DATA_DIR, +): def reader(): with open(file_list) as flist: full_lines = [line.strip() for line in flist] @@ -98,10 +99,9 @@ def _reader_creator(file_list, continue yield img_path, int(label) - mapper = functools.partial(process_image, - mode=mode, - color_jitter=color_jitter, - rotate=rotate) + mapper = functools.partial( + process_image, mode=mode, color_jitter=color_jitter, rotate=rotate + ) return paddle.reader.xmap_readers(mapper, reader, THREAD, BUF_SIZE) @@ -112,11 +112,11 @@ def val(data_dir=DATA_DIR): class TestPostTrainingQuantization(unittest.TestCase): - def setUp(self): self.int8_download = 'int8/download' - self.cache_folder = os.path.expanduser('~/.cache/paddle/dataset/' + - self.int8_download) + self.cache_folder = os.path.expanduser( + '~/.cache/paddle/dataset/' + self.int8_download + ) self.data_cache_folder = '' data_urls = [] data_md5s = [] @@ -129,29 +129,34 @@ class TestPostTrainingQuantization(unittest.TestCase): 'https://paddle-inference-dist.bj.bcebos.com/int8/ILSVRC2012_img_val.tar.gz.partab' ) data_md5s.append('1e9f15f64e015e58d6f9ec3210ed18b5') - self.data_cache_folder = self.download_data(data_urls, data_md5s, - "full_data", False) + self.data_cache_folder = self.download_data( + data_urls, data_md5s, "full_data", False + ) else: data_urls.append( 'http://paddle-inference-dist.bj.bcebos.com/int8/calibration_test_data.tar.gz' ) data_md5s.append('1b6c1c434172cca1bf9ba1e4d7a3157d') - self.data_cache_folder = self.download_data(data_urls, data_md5s, - "small_data", False) + self.data_cache_folder = self.download_data( + data_urls, data_md5s, "small_data", False + ) # reader/decorator.py requires the relative path to the data folder if not os.path.exists("./data/ILSVRC2012"): - cmd = 'rm -rf {0} && ln -s {1} {0}'.format("data", - self.data_cache_folder) + cmd = 'rm -rf {0} && ln -s {1} {0}'.format( + "data", self.data_cache_folder + ) os.system(cmd) self.batch_size = 1 if os.environ.get('DATASET') == 'full' else 50 - self.infer_iterations = 50000 if os.environ.get( - 'DATASET') == 'full' else 2 + self.infer_iterations = ( + 50000 if os.environ.get('DATASET') == 'full' else 2 + ) self.root_path = tempfile.TemporaryDirectory() - self.int8_model = os.path.join(self.root_path.name, - "post_training_quantization") + self.int8_model = os.path.join( + self.root_path.name, "post_training_quantization" + ) def tearDown(self): self.root_path.cleanup() @@ -159,7 +164,8 @@ class TestPostTrainingQuantization(unittest.TestCase): def cache_unzipping(self, target_folder, zip_path): if not os.path.exists(target_folder): cmd = 'mkdir {0} && tar xf {1} -C {0}'.format( - target_folder, zip_path) + target_folder, zip_path + ) os.system(cmd) def download_data(self, data_urls, data_md5s, folder_name, is_model=True): @@ -171,13 +177,15 @@ class TestPostTrainingQuantization(unittest.TestCase): download(data_urls[i], self.int8_download, data_md5s[i]) file_names.append(data_urls[i].split('/')[-1]) - zip_path = os.path.join(self.cache_folder, - 'full_imagenet_val.tar.gz') + zip_path = os.path.join( + self.cache_folder, 'full_imagenet_val.tar.gz' + ) if not os.path.exists(zip_path): cat_command = 'cat' for file_name in file_names: - cat_command += ' ' + os.path.join(self.cache_folder, - file_name) + cat_command += ' ' + os.path.join( + self.cache_folder, file_name + ) cat_command += ' > ' + zip_path os.system(cat_command) @@ -197,8 +205,11 @@ class TestPostTrainingQuantization(unittest.TestCase): image_shape = [3, 224, 224] place = fluid.CPUPlace() exe = fluid.Executor(place) - [infer_program, feed_dict, fetch_targets] = \ - fluid.io.load_inference_model(model_path, exe) + [ + infer_program, + feed_dict, + fetch_targets, + ] = fluid.io.load_inference_model(model_path, exe) val_reader = paddle.batch(val(), batch_size) iterations = infer_iterations @@ -206,18 +217,18 @@ class TestPostTrainingQuantization(unittest.TestCase): cnt = 0 periods = [] for batch_id, data in enumerate(val_reader()): - image = np.array([x[0].reshape(image_shape) - for x in data]).astype("float32") + image = np.array([x[0].reshape(image_shape) for x in data]).astype( + "float32" + ) label = np.array([x[1] for x in data]).astype("int64") label = label.reshape([-1, 1]) t1 = time.time() - _, acc1, _ = exe.run(infer_program, - feed={ - feed_dict[0]: image, - feed_dict[1]: label - }, - fetch_list=fetch_targets) + _, acc1, _ = exe.run( + infer_program, + feed={feed_dict[0]: image, feed_dict[1]: label}, + fetch_list=fetch_targets, + ) t2 = time.time() period = t2 - t1 periods.append(period) @@ -236,22 +247,25 @@ class TestPostTrainingQuantization(unittest.TestCase): acc1 = np.sum(test_info) / cnt return (throughput, latency, acc1) - def generate_quantized_model(self, - model_path, - quantizable_op_type, - batch_size, - algo="KL", - round_type="round", - is_full_quantize=False, - is_use_cache_file=False, - is_optimize_model=False, - batch_nums=10, - onnx_format=False): + def generate_quantized_model( + self, + model_path, + quantizable_op_type, + batch_size, + algo="KL", + round_type="round", + is_full_quantize=False, + is_use_cache_file=False, + is_optimize_model=False, + batch_nums=10, + onnx_format=False, + ): try: os.system("mkdir " + self.int8_model) except Exception as e: - print("Failed to create {} due to {}".format( - self.int8_model, str(e))) + print( + "Failed to create {} due to {}".format(self.int8_model, str(e)) + ) sys.exit(-1) place = fluid.CPUPlace() @@ -259,68 +273,92 @@ class TestPostTrainingQuantization(unittest.TestCase): scope = fluid.global_scope() val_reader = val() - ptq = PostTrainingQuantization(executor=exe, - sample_generator=val_reader, - model_dir=model_path, - batch_size=batch_size, - batch_nums=batch_nums, - algo=algo, - quantizable_op_type=quantizable_op_type, - round_type=round_type, - is_full_quantize=is_full_quantize, - optimize_model=is_optimize_model, - onnx_format=onnx_format, - is_use_cache_file=is_use_cache_file) + ptq = PostTrainingQuantization( + executor=exe, + sample_generator=val_reader, + model_dir=model_path, + batch_size=batch_size, + batch_nums=batch_nums, + algo=algo, + quantizable_op_type=quantizable_op_type, + round_type=round_type, + is_full_quantize=is_full_quantize, + optimize_model=is_optimize_model, + onnx_format=onnx_format, + is_use_cache_file=is_use_cache_file, + ) ptq.quantize() ptq.save_quantized_model(self.int8_model) - def run_test(self, - model, - algo, - round_type, - data_urls, - data_md5s, - quantizable_op_type, - is_full_quantize, - is_use_cache_file, - is_optimize_model, - diff_threshold, - onnx_format=False, - batch_nums=10): + def run_test( + self, + model, + algo, + round_type, + data_urls, + data_md5s, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + onnx_format=False, + batch_nums=10, + ): infer_iterations = self.infer_iterations batch_size = self.batch_size model_cache_folder = self.download_data(data_urls, data_md5s, model) - print("Start FP32 inference for {0} on {1} images ...".format( - model, infer_iterations * batch_size)) + print( + "Start FP32 inference for {0} on {1} images ...".format( + model, infer_iterations * batch_size + ) + ) (fp32_throughput, fp32_latency, fp32_acc1) = self.run_program( - os.path.join(model_cache_folder, "model"), batch_size, - infer_iterations) - - print("Start INT8 post training quantization for {0} on {1} images ...". - format(model, batch_nums * batch_size)) - self.generate_quantized_model(os.path.join(model_cache_folder, "model"), - quantizable_op_type, batch_size, algo, - round_type, is_full_quantize, - is_use_cache_file, is_optimize_model, - batch_nums, onnx_format) - - print("Start INT8 inference for {0} on {1} images ...".format( - model, infer_iterations * batch_size)) - (int8_throughput, int8_latency, - int8_acc1) = self.run_program(self.int8_model, batch_size, - infer_iterations) + os.path.join(model_cache_folder, "model"), + batch_size, + infer_iterations, + ) + + print( + "Start INT8 post training quantization for {0} on {1} images ...".format( + model, batch_nums * batch_size + ) + ) + self.generate_quantized_model( + os.path.join(model_cache_folder, "model"), + quantizable_op_type, + batch_size, + algo, + round_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + batch_nums, + onnx_format, + ) + + print( + "Start INT8 inference for {0} on {1} images ...".format( + model, infer_iterations * batch_size + ) + ) + (int8_throughput, int8_latency, int8_acc1) = self.run_program( + self.int8_model, batch_size, infer_iterations + ) print("---Post training quantization of {} method---".format(algo)) print( - "FP32 {0}: batch_size {1}, throughput {2} images/second, latency {3} second, accuracy {4}." - .format(model, batch_size, fp32_throughput, fp32_latency, - fp32_acc1)) + "FP32 {0}: batch_size {1}, throughput {2} images/second, latency {3} second, accuracy {4}.".format( + model, batch_size, fp32_throughput, fp32_latency, fp32_acc1 + ) + ) print( - "INT8 {0}: batch_size {1}, throughput {2} images/second, latency {3} second, accuracy {4}.\n" - .format(model, batch_size, int8_throughput, int8_latency, - int8_acc1)) + "INT8 {0}: batch_size {1}, throughput {2} images/second, latency {3} second, accuracy {4}.\n".format( + model, batch_size, int8_throughput, int8_latency, int8_acc1 + ) + ) sys.stdout.flush() delta_value = fp32_acc1 - int8_acc1 @@ -328,7 +366,6 @@ class TestPostTrainingQuantization(unittest.TestCase): class TestPostTrainingKLForMobilenetv1(TestPostTrainingQuantization): - def test_post_training_kl_mobilenetv1(self): model = "MobileNet-V1" algo = "KL" @@ -348,13 +385,21 @@ class TestPostTrainingKLForMobilenetv1(TestPostTrainingQuantization): is_optimize_model = True diff_threshold = 0.025 batch_nums = 3 - self.run_test(model, algo, round_type, data_urls, data_md5s, - quantizable_op_type, is_full_quantize, is_use_cache_file, - is_optimize_model, diff_threshold) + self.run_test( + model, + algo, + round_type, + data_urls, + data_md5s, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + ) class TestPostTrainingavgForMobilenetv1(TestPostTrainingQuantization): - def test_post_training_avg_mobilenetv1(self): model = "MobileNet-V1" algo = "avg" @@ -372,13 +417,21 @@ class TestPostTrainingavgForMobilenetv1(TestPostTrainingQuantization): is_use_cache_file = False is_optimize_model = True diff_threshold = 0.025 - self.run_test(model, algo, round_type, data_urls, data_md5s, - quantizable_op_type, is_full_quantize, is_use_cache_file, - is_optimize_model, diff_threshold) + self.run_test( + model, + algo, + round_type, + data_urls, + data_md5s, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + ) class TestPostTraininghistForMobilenetv1(TestPostTrainingQuantization): - def test_post_training_hist_mobilenetv1(self): model = "MobileNet-V1" algo = "hist" @@ -397,21 +450,22 @@ class TestPostTraininghistForMobilenetv1(TestPostTrainingQuantization): is_optimize_model = True diff_threshold = 0.03 batch_nums = 3 - self.run_test(model, - algo, - round_type, - data_urls, - data_md5s, - quantizable_op_type, - is_full_quantize, - is_use_cache_file, - is_optimize_model, - diff_threshold, - batch_nums=batch_nums) + self.run_test( + model, + algo, + round_type, + data_urls, + data_md5s, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + batch_nums=batch_nums, + ) class TestPostTrainingAbsMaxForMobilenetv1(TestPostTrainingQuantization): - def test_post_training_abs_max_mobilenetv1(self): model = "MobileNet-V1" algo = "abs_max" @@ -429,13 +483,21 @@ class TestPostTrainingAbsMaxForMobilenetv1(TestPostTrainingQuantization): is_optimize_model = False # The accuracy diff of post-training quantization (abs_max) maybe bigger diff_threshold = 0.05 - self.run_test(model, algo, round_type, data_urls, data_md5s, - quantizable_op_type, is_full_quantize, is_use_cache_file, - is_optimize_model, diff_threshold) + self.run_test( + model, + algo, + round_type, + data_urls, + data_md5s, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + ) class TestPostTrainingAvgONNXFormatForMobilenetv1(TestPostTrainingQuantization): - def test_post_training_onnx_format_mobilenetv1(self): model = "MobileNet-V1" algo = "emd" @@ -455,18 +517,20 @@ class TestPostTrainingAvgONNXFormatForMobilenetv1(TestPostTrainingQuantization): onnx_format = True diff_threshold = 0.05 batch_nums = 3 - self.run_test(model, - algo, - round_type, - data_urls, - data_md5s, - quantizable_op_type, - is_full_quantize, - is_use_cache_file, - is_optimize_model, - diff_threshold, - onnx_format=onnx_format, - batch_nums=batch_nums) + self.run_test( + model, + algo, + round_type, + data_urls, + data_md5s, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + onnx_format=onnx_format, + batch_nums=batch_nums, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/contrib/slim/tests/test_post_training_quantization_program_resnet50.py b/python/paddle/fluid/contrib/slim/tests/test_post_training_quantization_program_resnet50.py index 5854d40529d5834bce63ecd524b9a3591baacc07..345853636a41ba00ed751cacd223eb2b91017d38 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_post_training_quantization_program_resnet50.py +++ b/python/paddle/fluid/contrib/slim/tests/test_post_training_quantization_program_resnet50.py @@ -23,8 +23,12 @@ import contextlib import numpy as np import paddle.fluid as fluid from PIL import Image, ImageEnhance -from paddle.fluid.contrib.slim.quantization import PostTrainingQuantizationProgram -from test_post_training_quantization_mobilenetv1 import TestPostTrainingQuantization +from paddle.fluid.contrib.slim.quantization import ( + PostTrainingQuantizationProgram, +) +from test_post_training_quantization_mobilenetv1 import ( + TestPostTrainingQuantization, +) paddle.enable_static() @@ -76,13 +80,14 @@ def process_image(sample, mode, color_jitter, rotate): return img, sample[1] -def _reader_creator(file_list, - mode, - shuffle=False, - color_jitter=False, - rotate=False, - data_dir=DATA_DIR): - +def _reader_creator( + file_list, + mode, + shuffle=False, + color_jitter=False, + rotate=False, + data_dir=DATA_DIR, +): def reader(): with open(file_list) as flist: full_lines = [line.strip() for line in flist] @@ -97,10 +102,9 @@ def _reader_creator(file_list, continue yield img_path, int(label) - mapper = functools.partial(process_image, - mode=mode, - color_jitter=color_jitter, - rotate=rotate) + mapper = functools.partial( + process_image, mode=mode, color_jitter=color_jitter, rotate=rotate + ) return paddle.reader.xmap_readers(mapper, reader, THREAD, BUF_SIZE) @@ -111,31 +115,33 @@ def val(data_dir=DATA_DIR): class TestPostTrainingQuantizationProgram(TestPostTrainingQuantization): - def run_program(self, model_path, batch_size, infer_iterations): image_shape = [3, 224, 224] place = fluid.CPUPlace() exe = fluid.Executor(place) - [infer_program, feed_dict, fetch_targets] = \ - fluid.io.load_inference_model(model_path, exe) + [ + infer_program, + feed_dict, + fetch_targets, + ] = fluid.io.load_inference_model(model_path, exe) val_reader = paddle.batch(val(), batch_size) iterations = infer_iterations test_info = [] cnt = 0 periods = [] for batch_id, data in enumerate(val_reader()): - image = np.array([x[0].reshape(image_shape) - for x in data]).astype("float32") + image = np.array([x[0].reshape(image_shape) for x in data]).astype( + "float32" + ) label = np.array([x[1] for x in data]).astype("int64") label = label.reshape([-1, 1]) t1 = time.time() - _, acc1, _ = exe.run(infer_program, - feed={ - feed_dict[0]: image, - feed_dict[1]: label - }, - fetch_list=fetch_targets) + _, acc1, _ = exe.run( + infer_program, + feed={feed_dict[0]: image, feed_dict[1]: label}, + fetch_list=fetch_targets, + ) t2 = time.time() period = t2 - t1 periods.append(period) @@ -152,10 +158,19 @@ class TestPostTrainingQuantizationProgram(TestPostTrainingQuantization): throughput = cnt / np.sum(periods) latency = np.average(periods) acc1 = np.sum(test_info) / cnt - [infer_program, feed_dict, fetch_targets] = \ - fluid.io.load_inference_model(model_path, exe) - return (throughput, latency, acc1, infer_program, feed_dict, - fetch_targets) + [ + infer_program, + feed_dict, + fetch_targets, + ] = fluid.io.load_inference_model(model_path, exe) + return ( + throughput, + latency, + acc1, + infer_program, + feed_dict, + fetch_targets, + ) def generate_quantized_model( self, @@ -173,25 +188,27 @@ class TestPostTrainingQuantizationProgram(TestPostTrainingQuantization): try: os.system("mkdir " + self.int8_model) except Exception as e: - print("Failed to create {} due to {}".format( - self.int8_model, str(e))) + print( + "Failed to create {} due to {}".format(self.int8_model, str(e)) + ) sys.exit(-1) place = fluid.CPUPlace() exe = fluid.Executor(place) scope = fluid.global_scope() val_reader = val() - same_scale_tensor_list = [[ - 'batch_norm_3.tmp_2#/#1', 'batch_norm_4.tmp_2#*#1' - ], ['batch_norm_27.tmp_2', 'batch_norm_26.tmp_2'], - [ - 'test_scale_name_not_in_scale_dict1', - 'test_scale_name_not_in_scale_dict2' - ], - [ - 'test_scale_name_not_in_scale_dict1#/#1', - 'test_scale_name_not_in_scale_dict2#/#1' - ]] + same_scale_tensor_list = [ + ['batch_norm_3.tmp_2#/#1', 'batch_norm_4.tmp_2#*#1'], + ['batch_norm_27.tmp_2', 'batch_norm_26.tmp_2'], + [ + 'test_scale_name_not_in_scale_dict1', + 'test_scale_name_not_in_scale_dict2', + ], + [ + 'test_scale_name_not_in_scale_dict1#/#1', + 'test_scale_name_not_in_scale_dict2#/#1', + ], + ] ptq = PostTrainingQuantizationProgram( executor=exe, program=program, @@ -206,56 +223,86 @@ class TestPostTrainingQuantizationProgram(TestPostTrainingQuantization): is_use_cache_file=is_use_cache_file, feed_list=feed_list, fetch_list=fetch_list, - same_scale_tensor_list=same_scale_tensor_list) + same_scale_tensor_list=same_scale_tensor_list, + ) ptq.quantize() ptq.save_quantized_model(self.int8_model) - def run_test(self, - model, - algo, - round_type, - data_urls, - data_md5s, - quantizable_op_type, - is_full_quantize, - is_use_cache_file, - is_optimize_model, - diff_threshold, - onnx_format=False): + def run_test( + self, + model, + algo, + round_type, + data_urls, + data_md5s, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + onnx_format=False, + ): infer_iterations = self.infer_iterations batch_size = self.batch_size sample_iterations = self.sample_iterations model_cache_folder = self.download_data(data_urls, data_md5s, model) - print("Start FP32 inference for {0} on {1} images ...".format( - model, infer_iterations * batch_size)) - (fp32_throughput, fp32_latency, fp32_acc1, infer_program, feed_dict, - fetch_targets) = self.run_program( - os.path.join(model_cache_folder, "model"), batch_size, - infer_iterations) - print("Start INT8 post training quantization for {0} on {1} images ...". - format(model, sample_iterations * batch_size)) - self.generate_quantized_model(infer_program, quantizable_op_type, - feed_dict, fetch_targets, algo, - round_type, is_full_quantize, - is_use_cache_file, is_optimize_model, - onnx_format) - - print("Start INT8 inference for {0} on {1} images ...".format( - model, infer_iterations * batch_size)) - (int8_throughput, int8_latency, int8_acc1, _, _, - _) = self.run_program(self.int8_model, batch_size, infer_iterations) + print( + "Start FP32 inference for {0} on {1} images ...".format( + model, infer_iterations * batch_size + ) + ) + ( + fp32_throughput, + fp32_latency, + fp32_acc1, + infer_program, + feed_dict, + fetch_targets, + ) = self.run_program( + os.path.join(model_cache_folder, "model"), + batch_size, + infer_iterations, + ) + print( + "Start INT8 post training quantization for {0} on {1} images ...".format( + model, sample_iterations * batch_size + ) + ) + self.generate_quantized_model( + infer_program, + quantizable_op_type, + feed_dict, + fetch_targets, + algo, + round_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + onnx_format, + ) + + print( + "Start INT8 inference for {0} on {1} images ...".format( + model, infer_iterations * batch_size + ) + ) + (int8_throughput, int8_latency, int8_acc1, _, _, _) = self.run_program( + self.int8_model, batch_size, infer_iterations + ) print("---Post training quantization of {} method---".format(algo)) print( - "FP32 {0}: batch_size {1}, throughput {2} images/second, latency {3} second, accuracy {4}." - .format(model, batch_size, fp32_throughput, fp32_latency, - fp32_acc1)) + "FP32 {0}: batch_size {1}, throughput {2} images/second, latency {3} second, accuracy {4}.".format( + model, batch_size, fp32_throughput, fp32_latency, fp32_acc1 + ) + ) print( - "INT8 {0}: batch_size {1}, throughput {2} images/second, latency {3} second, accuracy {4}.\n" - .format(model, batch_size, int8_throughput, int8_latency, - int8_acc1)) + "INT8 {0}: batch_size {1}, throughput {2} images/second, latency {3} second, accuracy {4}.\n".format( + model, batch_size, int8_throughput, int8_latency, int8_acc1 + ) + ) sys.stdout.flush() delta_value = fp32_acc1 - int8_acc1 @@ -263,8 +310,8 @@ class TestPostTrainingQuantizationProgram(TestPostTrainingQuantization): class TestPostTrainingProgramAbsMaxForResnet50( - TestPostTrainingQuantizationProgram): - + TestPostTrainingQuantizationProgram +): def test_post_training_abs_max_resnet50(self): model = "ResNet-50" algo = "abs_max" @@ -278,9 +325,18 @@ class TestPostTrainingProgramAbsMaxForResnet50( is_use_cache_file = False is_optimize_model = False diff_threshold = 0.025 - self.run_test(model, algo, round_type, data_urls, data_md5s, - quantizable_op_type, is_full_quantize, is_use_cache_file, - is_optimize_model, diff_threshold) + self.run_test( + model, + algo, + round_type, + data_urls, + data_md5s, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/contrib/slim/tests/test_post_training_quantization_resnet50.py b/python/paddle/fluid/contrib/slim/tests/test_post_training_quantization_resnet50.py index c79499100cee3fd4c412c3703e4606932ffa7fe7..65e1d391399ddb217500a57b65c0404ea65e29f2 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_post_training_quantization_resnet50.py +++ b/python/paddle/fluid/contrib/slim/tests/test_post_training_quantization_resnet50.py @@ -14,14 +14,15 @@ import sys import unittest -from test_post_training_quantization_mobilenetv1 import TestPostTrainingQuantization +from test_post_training_quantization_mobilenetv1 import ( + TestPostTrainingQuantization, +) import paddle paddle.enable_static() class TestPostTrainingForResnet50(TestPostTrainingQuantization): - def test_post_training_resnet50(self): model = "ResNet-50" algo = "min_max" @@ -35,13 +36,21 @@ class TestPostTrainingForResnet50(TestPostTrainingQuantization): is_use_cache_file = False is_optimize_model = False diff_threshold = 0.025 - self.run_test(model, algo, round_type, data_urls, data_md5s, - quantizable_op_type, is_full_quantize, is_use_cache_file, - is_optimize_model, diff_threshold) + self.run_test( + model, + algo, + round_type, + data_urls, + data_md5s, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + ) class TestPostTrainingForResnet50ONNXFormat(TestPostTrainingQuantization): - def test_post_training_resnet50(self): model = "ResNet-50" algo = "min_max" @@ -56,17 +65,19 @@ class TestPostTrainingForResnet50ONNXFormat(TestPostTrainingQuantization): is_optimize_model = False diff_threshold = 0.025 onnx_format = True - self.run_test(model, - algo, - round_type, - data_urls, - data_md5s, - quantizable_op_type, - is_full_quantize, - is_use_cache_file, - is_optimize_model, - diff_threshold, - onnx_format=onnx_format) + self.run_test( + model, + algo, + round_type, + data_urls, + data_md5s, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + onnx_format=onnx_format, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/contrib/slim/tests/test_post_training_quantization_while.py b/python/paddle/fluid/contrib/slim/tests/test_post_training_quantization_while.py index f4eaf5d9bc777e3ade8c83044851663c5130d227..628d120f45ebd9278b43d743f66cd55a4be05c2a 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_post_training_quantization_while.py +++ b/python/paddle/fluid/contrib/slim/tests/test_post_training_quantization_while.py @@ -32,27 +32,34 @@ np.random.seed(0) class TestPostTrainingQuantization(unittest.TestCase): - def setUp(self): self.download_path = 'int8/download' - self.cache_folder = os.path.expanduser('~/.cache/paddle/dataset/' + - self.download_path) + self.cache_folder = os.path.expanduser( + '~/.cache/paddle/dataset/' + self.download_path + ) self.timestamp = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime()) - self.int8_model_path = os.path.join(os.getcwd(), - "post_training_" + self.timestamp) + self.int8_model_path = os.path.join( + os.getcwd(), "post_training_" + self.timestamp + ) try: os.system("mkdir -p " + self.int8_model_path) except Exception as e: - print("Failed to create {} due to {}".format( - self.int8_model_path, str(e))) + print( + "Failed to create {} due to {}".format( + self.int8_model_path, str(e) + ) + ) sys.exit(-1) def tearDown(self): try: os.system("rm -rf {}".format(self.int8_model_path)) except Exception as e: - print("Failed to delete {} due to {}".format( - self.int8_model_path, str(e))) + print( + "Failed to delete {} due to {}".format( + self.int8_model_path, str(e) + ) + ) def cache_unzipping(self, target_folder, zip_path): cmd = 'tar xf {0} -C {1}'.format(zip_path, target_folder) @@ -72,10 +79,16 @@ class TestPostTrainingQuantization(unittest.TestCase): print("test model path:" + model_path) place = fluid.CPUPlace() exe = fluid.Executor(place) - [infer_program, feed_dict, fetch_targets] = \ - fluid.io.load_inference_model(model_path, - model_filename='model.pdmodel', - params_filename='model.pdiparams', executor=exe) + [ + infer_program, + feed_dict, + fetch_targets, + ] = fluid.io.load_inference_model( + model_path, + model_filename='model.pdmodel', + params_filename='model.pdiparams', + executor=exe, + ) val_reader = paddle.batch(paddle.dataset.mnist.test(), batch_size) img_shape = [1, 28, 28] @@ -83,14 +96,17 @@ class TestPostTrainingQuantization(unittest.TestCase): cnt = 0 periods = [] for batch_id, data in enumerate(val_reader()): - image = np.array([x[0].reshape(img_shape) - for x in data]).astype("float32") + image = np.array([x[0].reshape(img_shape) for x in data]).astype( + "float32" + ) input_label = np.array([x[1] for x in data]).astype("int64") t1 = time.time() - out = exe.run(infer_program, - feed={feed_dict[0]: image}, - fetch_list=fetch_targets) + out = exe.run( + infer_program, + feed={feed_dict[0]: image}, + fetch_list=fetch_targets, + ) t2 = time.time() period = t2 - t1 periods.append(period) @@ -108,16 +124,18 @@ class TestPostTrainingQuantization(unittest.TestCase): acc1 = np.sum(test_info) / cnt return (throughput, latency, acc1) - def generate_quantized_model(self, - model_path, - algo="KL", - quantizable_op_type=["conv2d"], - is_full_quantize=False, - is_use_cache_file=False, - is_optimize_model=False, - batch_size=10, - batch_nums=10, - is_data_loader=False): + def generate_quantized_model( + self, + model_path, + algo="KL", + quantizable_op_type=["conv2d"], + is_full_quantize=False, + is_use_cache_file=False, + is_optimize_model=False, + batch_size=10, + batch_nums=10, + is_data_loader=False, + ): place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -146,63 +164,81 @@ class TestPostTrainingQuantization(unittest.TestCase): quantizable_op_type=quantizable_op_type, is_full_quantize=is_full_quantize, optimize_model=is_optimize_model, - is_use_cache_file=is_use_cache_file) + is_use_cache_file=is_use_cache_file, + ) ptq.quantize() - ptq.save_quantized_model(self.int8_model_path, - model_filename='model.pdmodel', - params_filename='model.pdiparams') - - def run_test(self, - model_name, - data_url, - data_md5, - algo, - quantizable_op_type, - is_full_quantize, - is_use_cache_file, - is_optimize_model, - diff_threshold, - batch_size=10, - infer_iterations=10, - quant_iterations=5, - is_data_loader=False): + ptq.save_quantized_model( + self.int8_model_path, + model_filename='model.pdmodel', + params_filename='model.pdiparams', + ) + + def run_test( + self, + model_name, + data_url, + data_md5, + algo, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + batch_size=10, + infer_iterations=10, + quant_iterations=5, + is_data_loader=False, + ): origin_model_path = self.download_model(data_url, data_md5, model_name) - #origin_model_path = os.path.join(origin_model_path, model_name) - - print("Start FP32 inference for {0} on {1} images ...".format( - model_name, infer_iterations * batch_size)) - (fp32_throughput, fp32_latency, - fp32_acc1) = self.run_program(origin_model_path, batch_size, - infer_iterations) - - print("Start INT8 post training quantization for {0} on {1} images ...". - format(model_name, quant_iterations * batch_size)) - self.generate_quantized_model(origin_model_path, - algo, - quantizable_op_type, - is_full_quantize, - is_use_cache_file, - is_optimize_model, - batch_size, - quant_iterations, - is_data_loader=is_data_loader) - - print("Start INT8 inference for {0} on {1} images ...".format( - model_name, infer_iterations * batch_size)) - (int8_throughput, int8_latency, - int8_acc1) = self.run_program(self.int8_model_path, batch_size, - infer_iterations) + # origin_model_path = os.path.join(origin_model_path, model_name) + + print( + "Start FP32 inference for {0} on {1} images ...".format( + model_name, infer_iterations * batch_size + ) + ) + (fp32_throughput, fp32_latency, fp32_acc1) = self.run_program( + origin_model_path, batch_size, infer_iterations + ) + + print( + "Start INT8 post training quantization for {0} on {1} images ...".format( + model_name, quant_iterations * batch_size + ) + ) + self.generate_quantized_model( + origin_model_path, + algo, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + batch_size, + quant_iterations, + is_data_loader=is_data_loader, + ) + + print( + "Start INT8 inference for {0} on {1} images ...".format( + model_name, infer_iterations * batch_size + ) + ) + (int8_throughput, int8_latency, int8_acc1) = self.run_program( + self.int8_model_path, batch_size, infer_iterations + ) print("---Post training quantization of {} method---".format(algo)) print( - "FP32 {0}: batch_size {1}, throughput {2} img/s, latency {3} s, acc1 {4}." - .format(model_name, batch_size, fp32_throughput, fp32_latency, - fp32_acc1)) + "FP32 {0}: batch_size {1}, throughput {2} img/s, latency {3} s, acc1 {4}.".format( + model_name, batch_size, fp32_throughput, fp32_latency, fp32_acc1 + ) + ) print( - "INT8 {0}: batch_size {1}, throughput {2} img/s, latency {3} s, acc1 {4}.\n" - .format(model_name, batch_size, int8_throughput, int8_latency, - int8_acc1)) + "INT8 {0}: batch_size {1}, throughput {2} img/s, latency {3} s, acc1 {4}.\n".format( + model_name, batch_size, int8_throughput, int8_latency, int8_acc1 + ) + ) sys.stdout.flush() delta_value = fp32_acc1 - int8_acc1 @@ -210,10 +246,11 @@ class TestPostTrainingQuantization(unittest.TestCase): class TestPostTrainingKLForWhile(TestPostTrainingQuantization): - def test_post_training_kl(self): model_name = "mnist_while" - data_url = "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_while.tar.gz" + data_url = ( + "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_while.tar.gz" + ) data_md5 = "2387390beeb37b51dec041c27b8a681f" algo = "KL" quantizable_op_type = ["conv2d", "depthwise_conv2d", "mul"] @@ -224,17 +261,28 @@ class TestPostTrainingKLForWhile(TestPostTrainingQuantization): batch_size = 10 infer_iterations = 50 quant_iterations = 5 - self.run_test(model_name, data_url, data_md5, algo, quantizable_op_type, - is_full_quantize, is_use_cache_file, is_optimize_model, - diff_threshold, batch_size, infer_iterations, - quant_iterations) + self.run_test( + model_name, + data_url, + data_md5, + algo, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + batch_size, + infer_iterations, + quant_iterations, + ) class TestPostTraininghistForWhile(TestPostTrainingQuantization): - def test_post_training_hist(self): model_name = "mnist_while" - data_url = "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_while.tar.gz" + data_url = ( + "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_while.tar.gz" + ) data_md5 = "2387390beeb37b51dec041c27b8a681f" algo = "hist" quantizable_op_type = ["conv2d", "depthwise_conv2d", "mul"] @@ -245,17 +293,28 @@ class TestPostTraininghistForWhile(TestPostTrainingQuantization): batch_size = 10 infer_iterations = 50 quant_iterations = 5 - self.run_test(model_name, data_url, data_md5, algo, quantizable_op_type, - is_full_quantize, is_use_cache_file, is_optimize_model, - diff_threshold, batch_size, infer_iterations, - quant_iterations) + self.run_test( + model_name, + data_url, + data_md5, + algo, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + batch_size, + infer_iterations, + quant_iterations, + ) class TestPostTrainingmseForWhile(TestPostTrainingQuantization): - def test_post_training_mse(self): model_name = "mnist_while" - data_url = "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_while.tar.gz" + data_url = ( + "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_while.tar.gz" + ) data_md5 = "2387390beeb37b51dec041c27b8a681f" algo = "mse" quantizable_op_type = ["conv2d", "depthwise_conv2d", "mul"] @@ -266,17 +325,28 @@ class TestPostTrainingmseForWhile(TestPostTrainingQuantization): batch_size = 10 infer_iterations = 50 quant_iterations = 5 - self.run_test(model_name, data_url, data_md5, algo, quantizable_op_type, - is_full_quantize, is_use_cache_file, is_optimize_model, - diff_threshold, batch_size, infer_iterations, - quant_iterations) + self.run_test( + model_name, + data_url, + data_md5, + algo, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + batch_size, + infer_iterations, + quant_iterations, + ) class TestPostTrainingavgForWhile(TestPostTrainingQuantization): - def test_post_training_avg(self): model_name = "mnist_while" - data_url = "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_while.tar.gz" + data_url = ( + "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_while.tar.gz" + ) data_md5 = "2387390beeb37b51dec041c27b8a681f" algo = "avg" quantizable_op_type = ["conv2d", "depthwise_conv2d", "mul"] @@ -287,17 +357,28 @@ class TestPostTrainingavgForWhile(TestPostTrainingQuantization): batch_size = 10 infer_iterations = 50 quant_iterations = 5 - self.run_test(model_name, data_url, data_md5, algo, quantizable_op_type, - is_full_quantize, is_use_cache_file, is_optimize_model, - diff_threshold, batch_size, infer_iterations, - quant_iterations) + self.run_test( + model_name, + data_url, + data_md5, + algo, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + batch_size, + infer_iterations, + quant_iterations, + ) class TestPostTrainingMinMaxForWhile(TestPostTrainingQuantization): - def test_post_training_min_max(self): model_name = "mnist_while" - data_url = "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_while.tar.gz" + data_url = ( + "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_while.tar.gz" + ) data_md5 = "2387390beeb37b51dec041c27b8a681f" algo = "min_max" quantizable_op_type = ["conv2d", "depthwise_conv2d", "mul"] @@ -308,17 +389,28 @@ class TestPostTrainingMinMaxForWhile(TestPostTrainingQuantization): batch_size = 10 infer_iterations = 50 quant_iterations = 5 - self.run_test(model_name, data_url, data_md5, algo, quantizable_op_type, - is_full_quantize, is_use_cache_file, is_optimize_model, - diff_threshold, batch_size, infer_iterations, - quant_iterations) + self.run_test( + model_name, + data_url, + data_md5, + algo, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + batch_size, + infer_iterations, + quant_iterations, + ) class TestPostTrainingAbsMaxForWhile(TestPostTrainingQuantization): - def test_post_training_abs_max(self): model_name = "mnist_while" - data_url = "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_while.tar.gz" + data_url = ( + "http://paddle-inference-dist.bj.bcebos.com/int8/mnist_while.tar.gz" + ) data_md5 = "2387390beeb37b51dec041c27b8a681f" algo = "abs_max" quantizable_op_type = ["conv2d", "depthwise_conv2d", "mul"] @@ -329,23 +421,35 @@ class TestPostTrainingAbsMaxForWhile(TestPostTrainingQuantization): batch_size = 10 infer_iterations = 50 quant_iterations = 5 - self.run_test(model_name, data_url, data_md5, algo, quantizable_op_type, - is_full_quantize, is_use_cache_file, is_optimize_model, - diff_threshold, batch_size, infer_iterations, - quant_iterations) - self.run_test(model_name, - data_url, - data_md5, - algo, - quantizable_op_type, - is_full_quantize, - is_use_cache_file, - is_optimize_model, - diff_threshold, - batch_size, - infer_iterations, - quant_iterations, - is_data_loader=True) + self.run_test( + model_name, + data_url, + data_md5, + algo, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + batch_size, + infer_iterations, + quant_iterations, + ) + self.run_test( + model_name, + data_url, + data_md5, + algo, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + batch_size, + infer_iterations, + quant_iterations, + is_data_loader=True, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/contrib/slim/tests/test_quant2_int8_mkldnn_pass.py b/python/paddle/fluid/contrib/slim/tests/test_quant2_int8_mkldnn_pass.py index dea0fcd4897685fe92eb1113a7611b056cb80c9e..0f7a43ebebd68375681b6a93dff006972b179ef7 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_quant2_int8_mkldnn_pass.py +++ b/python/paddle/fluid/contrib/slim/tests/test_quant2_int8_mkldnn_pass.py @@ -24,7 +24,6 @@ paddle.enable_static() class TestQuant2Int8MkldnnPassMul(unittest.TestCase): - def op_name(self): return "mul" @@ -39,7 +38,8 @@ class TestQuant2Int8MkldnnPassMul(unittest.TestCase): self.mul_weights_size = [3, 5] self.mul_output_size = [1, 5] self.mul_input = np.random.random(self.mul_input_size).astype( - self.dtype) + self.dtype + ) self.mul_weights = np.ones(self.mul_weights_size, self.dtype) self.mul_weights_bad = np.ones([1, 1], self.dtype) self.mul_output = np.ndarray(self.mul_output_size).astype(self.dtype) @@ -49,23 +49,22 @@ class TestQuant2Int8MkldnnPassMul(unittest.TestCase): "mul_input": self.mul_input, "mul_weights": self.mul_weights, "mul_output": self.mul_output, - "mul_weights_bad": self.mul_weights_bad + "mul_weights_bad": self.mul_weights_bad, } def prepare_program_mul(self, program): block = program.global_block() for name in self.variables_mul: - block.create_var(name=name, - dtype="float32", - shape=self.variables_mul[name].shape) - - mul_op1 = block.append_op(type=self.op_name(), - inputs={ - "X": block.var('mul_input'), - "Y": block.var('mul_weights') - }, - outputs={"Out": block.var('mul_output')}, - attrs={'use_mkldnn': self.use_mkldnn}) + block.create_var( + name=name, dtype="float32", shape=self.variables_mul[name].shape + ) + + mul_op1 = block.append_op( + type=self.op_name(), + inputs={"X": block.var('mul_input'), "Y": block.var('mul_weights')}, + outputs={"Out": block.var('mul_output')}, + attrs={'use_mkldnn': self.use_mkldnn}, + ) def test_dequantize_op_weights(self): program = fluid.Program() @@ -80,11 +79,13 @@ class TestQuant2Int8MkldnnPassMul(unittest.TestCase): break assert op_node != "", "op of type %s not found" % self.op_name() - qpass = Quant2Int8MkldnnPass(self.quantized_ops, - _scope=self.scope, - _place=self.place, - _core=core, - _debug=False) + qpass = Quant2Int8MkldnnPass( + self.quantized_ops, + _scope=self.scope, + _place=self.place, + _core=core, + _debug=False, + ) qpass._weight_thresholds["mul_output"] = self.mul_output_scale param = self.scope.var("mul_weights").get_tensor() param.set(self.variables_mul["mul_weights"], self.place) @@ -92,9 +93,30 @@ class TestQuant2Int8MkldnnPassMul(unittest.TestCase): assert np.allclose( self.scope.find_var("mul_weights").get_tensor(), - [[1. / 127., 2. / 127., 3. / 127., 4. / 127., 5. / 127.], - [1. / 127., 2. / 127., 3. / 127., 4. / 127., 5. / 127.], - [1. / 127., 2. / 127., 3. / 127., 4. / 127., 5. / 127.]]) + [ + [ + 1.0 / 127.0, + 2.0 / 127.0, + 3.0 / 127.0, + 4.0 / 127.0, + 5.0 / 127.0, + ], + [ + 1.0 / 127.0, + 2.0 / 127.0, + 3.0 / 127.0, + 4.0 / 127.0, + 5.0 / 127.0, + ], + [ + 1.0 / 127.0, + 2.0 / 127.0, + 3.0 / 127.0, + 4.0 / 127.0, + 5.0 / 127.0, + ], + ], + ) param = self.scope.var("mul_weights").get_tensor() param.set(self.variables_mul["mul_weights_bad"], self.place) @@ -103,13 +125,11 @@ class TestQuant2Int8MkldnnPassMul(unittest.TestCase): class TestQuant2Int8MkldnnPassMatmulV2(TestQuant2Int8MkldnnPassMul): - def op_name(self): return "matmul_v2" class TestQuant2Int8MkldnnPassConv2D(unittest.TestCase): - def setUp(self): self.scope = fluid.Scope() self.place = fluid.CPUPlace() @@ -131,7 +151,8 @@ class TestQuant2Int8MkldnnPassConv2D(unittest.TestCase): self.filter2 = np.random.random(self.filter_size2).astype(self.dtype) self.conv_output = np.ndarray(self.conv_output_size).astype(self.dtype) self.conv_output2 = np.ndarray(self.conv_output2_size).astype( - self.dtype) + self.dtype + ) self.quantized_ops = 'conv2d' self.variables = { "input": self.input, @@ -144,15 +165,12 @@ class TestQuant2Int8MkldnnPassConv2D(unittest.TestCase): def prepare_program_conv2d(self, program): block = program.global_block() for name in self.variables: - block.create_var(name=name, - dtype="float32", - shape=self.variables[name].shape) + block.create_var( + name=name, dtype="float32", shape=self.variables[name].shape + ) conv2d_op1 = block.append_op( type="conv2d", - inputs={ - "Input": block.var('input'), - 'Filter': block.var('filter') - }, + inputs={"Input": block.var('input'), 'Filter': block.var('filter')}, outputs={"Output": block.var('conv_output')}, attrs={ 'strides': self.stride, @@ -162,13 +180,14 @@ class TestQuant2Int8MkldnnPassConv2D(unittest.TestCase): 'use_cudnn': self.use_cudnn, 'use_mkldnn': self.use_mkldnn, 'data_format': self.data_format, - 'fuse_relu': True - }) + 'fuse_relu': True, + }, + ) conv2d_op2 = block.append_op( type="conv2d", inputs={ "Input": block.var('conv_output'), - 'Filter': block.var('filter2') + 'Filter': block.var('filter2'), }, outputs={"Output": block.var('conv_output2')}, attrs={ @@ -178,8 +197,9 @@ class TestQuant2Int8MkldnnPassConv2D(unittest.TestCase): 'dilations': self.dilations, 'use_cudnn': self.use_cudnn, 'use_mkldnn': self.use_mkldnn, - 'data_format': self.data_format - }) + 'data_format': self.data_format, + }, + ) def remove_fuse_activation_attribute(self, graph): for op in graph.all_op_nodes(): @@ -204,16 +224,17 @@ class TestQuant2Int8MkldnnPassConv2D(unittest.TestCase): graph = IrGraph(core.Graph(program.desc), for_test=True) graph = self.remove_fuse_activation_attribute(graph) self.check_graph_before_pass(graph) - quant2_int8_mkldnn_pass = Quant2Int8MkldnnPass(self.quantized_ops, - _scope=self.scope, - _place=self.place, - _core=core, - _debug=False) + quant2_int8_mkldnn_pass = Quant2Int8MkldnnPass( + self.quantized_ops, + _scope=self.scope, + _place=self.place, + _core=core, + _debug=False, + ) graph = quant2_int8_mkldnn_pass._update_activations(graph) self.check_graph_after_pass(graph) class TestQuant2Int8MkldnnPassNearestInterp(unittest.TestCase): - def op_name(self): return "nearest_interp" @@ -236,7 +257,8 @@ class TestQuant2Int8MkldnnPassConv2D(unittest.TestCase): self.input = np.random.random(self.input_size).astype(self.dtype) self.filter = np.random.random(self.filter_size).astype(self.dtype) self.conv_output = np.ndarray(self.conv_output_size).astype( - self.dtype) + self.dtype + ) # nearest_interp self.out_h = 1 @@ -246,16 +268,20 @@ class TestQuant2Int8MkldnnPassConv2D(unittest.TestCase): self.data_layout = 'NCHW' self.nearest_interp_output_size = [1, 1, 2, 2] self.nearest_interp_output = np.ndarray( - self.nearest_interp_output_size).astype(self.dtype) + self.nearest_interp_output_size + ).astype(self.dtype) # dropout self.dropout_prob = 0.5 self.dropout_out = np.ndarray( - self.nearest_interp_output_size).astype(self.dtype) + self.nearest_interp_output_size + ).astype(self.dtype) self.dropout_mask = np.ndarray(self.nearest_interp_output_size) self.quantized_ops = { - "conv2d", "nearest_interp", "nearest_interp_v2" + "conv2d", + "nearest_interp", + "nearest_interp_v2", } self.variables = { "input": self.input, @@ -263,55 +289,61 @@ class TestQuant2Int8MkldnnPassConv2D(unittest.TestCase): "conv_output": self.conv_output, "nearest_interp_output": self.nearest_interp_output, "dropout_out": self.dropout_out, - 'dropout_mask': self.dropout_mask + 'dropout_mask': self.dropout_mask, } def prepare_program(self, program): block = program.global_block() for name in self.variables: - block.create_var(name=name, - dtype="float32", - shape=self.variables[name].shape) - block.append_op(type="conv2d", - inputs={ - "Input": block.var('input'), - 'Filter': block.var('filter') - }, - outputs={"Output": block.var('conv_output')}, - attrs={ - 'strides': self.stride, - 'paddings': self.pad, - 'groups': self.groups, - 'dilations': self.dilations, - 'use_cudnn': self.use_cudnn, - 'use_mkldnn': self.use_mkldnn, - 'data_format': self.data_format, - 'fuse_relu': True - }) - block.append_op(type=self.op_name(), - inputs={ - "X": block.var('conv_output'), - }, - outputs={"Out": block.var('nearest_interp_output')}, - attrs={ - 'interp_method': self.interp_method, - 'out_h': self.out_h, - 'out_w': self.out_w, - 'scale': self.scale, - 'data_layout': self.data_layout, - 'use_mkldnn': self.use_mkldnn - }) - block.append_op(type='dropout', - inputs={ - "X": block.var('nearest_interp_output'), - }, - outputs={ - 'Out': block.var('dropout_out'), - 'Mask': block.var('dropout_mask') - }, - attrs={ - 'dropout_prob': self.dropout_prob, - }) + block.create_var( + name=name, dtype="float32", shape=self.variables[name].shape + ) + block.append_op( + type="conv2d", + inputs={ + "Input": block.var('input'), + 'Filter': block.var('filter'), + }, + outputs={"Output": block.var('conv_output')}, + attrs={ + 'strides': self.stride, + 'paddings': self.pad, + 'groups': self.groups, + 'dilations': self.dilations, + 'use_cudnn': self.use_cudnn, + 'use_mkldnn': self.use_mkldnn, + 'data_format': self.data_format, + 'fuse_relu': True, + }, + ) + block.append_op( + type=self.op_name(), + inputs={ + "X": block.var('conv_output'), + }, + outputs={"Out": block.var('nearest_interp_output')}, + attrs={ + 'interp_method': self.interp_method, + 'out_h': self.out_h, + 'out_w': self.out_w, + 'scale': self.scale, + 'data_layout': self.data_layout, + 'use_mkldnn': self.use_mkldnn, + }, + ) + block.append_op( + type='dropout', + inputs={ + "X": block.var('nearest_interp_output'), + }, + outputs={ + 'Out': block.var('dropout_out'), + 'Mask': block.var('dropout_mask'), + }, + attrs={ + 'dropout_prob': self.dropout_prob, + }, + ) def check_graph_after_pass(self, graph): for op in graph.all_op_nodes(): @@ -329,12 +361,21 @@ class TestQuant2Int8MkldnnPassConv2D(unittest.TestCase): _scope=self.scope, _place=self.place, _core=core, - _debug=False) - - input_scale_tensor = quant2_int8_mkldnn_pass._convert_scale2tensor( - np.array(self.scale).astype(np.float64)) - output_scale_tensor = quant2_int8_mkldnn_pass._convert_scale2tensor( - np.array(1. / self.scale * self.scale).astype(np.float64)) + _debug=False, + ) + + input_scale_tensor = ( + quant2_int8_mkldnn_pass._convert_scale2tensor( + np.array(self.scale).astype(np.float64) + ) + ) + output_scale_tensor = ( + quant2_int8_mkldnn_pass._convert_scale2tensor( + np.array(1.0 / self.scale * self.scale).astype( + np.float64 + ) + ) + ) var_scale = { "input": (False, input_scale_tensor), "filter": (False, input_scale_tensor), @@ -347,7 +388,6 @@ class TestQuant2Int8MkldnnPassConv2D(unittest.TestCase): self.check_graph_after_pass(graph) class TestQuant2Int8MkldnnPassNearestInterpV2(unittest.TestCase): - def op_name(self): return "nearest_interp_v2" diff --git a/python/paddle/fluid/contrib/slim/tests/test_quantization_mkldnn_pass.py b/python/paddle/fluid/contrib/slim/tests/test_quantization_mkldnn_pass.py index da3b0139e024a0483914540f6f9e8c2586527e37..23b89512454a7e332cb2b53137be6e31c8a75469 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_quantization_mkldnn_pass.py +++ b/python/paddle/fluid/contrib/slim/tests/test_quantization_mkldnn_pass.py @@ -29,19 +29,23 @@ os.environ["CPU_NUM"] = "1" def conv_net(img, label): - conv_pool_1 = fluid.nets.simple_img_conv_pool(input=img, - filter_size=5, - num_filters=20, - pool_size=2, - pool_stride=2, - act="relu") + conv_pool_1 = fluid.nets.simple_img_conv_pool( + input=img, + filter_size=5, + num_filters=20, + pool_size=2, + pool_stride=2, + act="relu", + ) conv_pool_1 = fluid.layers.batch_norm(conv_pool_1) - conv_pool_2 = fluid.nets.simple_img_conv_pool(input=conv_pool_1, - filter_size=5, - num_filters=50, - pool_size=2, - pool_stride=2, - act="relu") + conv_pool_2 = fluid.nets.simple_img_conv_pool( + input=conv_pool_1, + filter_size=5, + num_filters=50, + pool_size=2, + pool_stride=2, + act="relu", + ) prediction = fluid.layers.fc(input=conv_pool_2, size=10, act='softmax') loss = fluid.layers.cross_entropy(input=prediction, label=label) avg_loss = paddle.mean(loss) @@ -49,12 +53,11 @@ def conv_net(img, label): class TestMKLDNNTransformBasedFreezePass(unittest.TestCase): - def setUp(self): self.quantizable_op_and_inputs = { 'conv2d': ['Input', 'Filter'], 'depthwise_conv2d': ['Input', 'Filter'], - 'mul': ['X', 'Y'] + 'mul': ['X', 'Y'], } def check_program(self, program): @@ -74,25 +77,27 @@ class TestMKLDNNTransformBasedFreezePass(unittest.TestCase): startup.random_seed = seed with fluid.unique_name.guard(): with fluid.program_guard(main, startup): - img = fluid.layers.data(name='image', - shape=[1, 28, 28], - dtype='float32') - label = fluid.layers.data(name='label', - shape=[1], - dtype='int64') + img = fluid.layers.data( + name='image', shape=[1, 28, 28], dtype='float32' + ) + label = fluid.layers.data( + name='label', shape=[1], dtype='int64' + ) loss = conv_net(img, label) if not is_test: opt = fluid.optimizer.Adam(learning_rate=0.001) opt.minimize(loss) return [img, label], loss - def mkldnn_based_freeze_graph(self, - use_cuda, - seed, - activation_quant_type, - weight_quant_type='abs_max', - quant_perf=False, - for_ci=False): + def mkldnn_based_freeze_graph( + self, + use_cuda, + seed, + activation_quant_type, + weight_quant_type='abs_max', + quant_perf=False, + for_ci=False, + ): random.seed(0) np.random.seed(0) @@ -115,42 +120,48 @@ class TestMKLDNNTransformBasedFreezePass(unittest.TestCase): scope=scope, place=place, activation_quantize_type=activation_quant_type, - weight_quantize_type=weight_quant_type) + weight_quantize_type=weight_quant_type, + ) transform_pass.apply(main_graph) transform_pass = QuantizationTransformPass( scope=scope, place=place, activation_quantize_type=activation_quant_type, - weight_quantize_type=weight_quant_type) + weight_quantize_type=weight_quant_type, + ) transform_pass.apply(test_graph) build_strategy = fluid.BuildStrategy() build_strategy.memory_optimize = False build_strategy.enable_inplace = False binary = fluid.CompiledProgram(main_graph.graph).with_data_parallel( - loss_name=loss.name, build_strategy=build_strategy) + loss_name=loss.name, build_strategy=build_strategy + ) quantized_test_program = test_graph.to_program() iters = 5 batch_size = 8 - train_reader = paddle.batch(paddle.reader.shuffle( - paddle.dataset.mnist.train(), buf_size=500), - batch_size=batch_size) - test_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=batch_size) + train_reader = paddle.batch( + paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=500), + batch_size=batch_size, + ) + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size + ) feeder = fluid.DataFeeder(feed_list=feeds, place=place) # Training the model to get the weights value with fluid.scope_guard(scope): for _ in range(iters): data = next(train_reader()) - loss_v = exe.run(binary, - feed=feeder.feed(data), - fetch_list=[loss]) + loss_v = exe.run( + binary, feed=feeder.feed(data), fetch_list=[loss] + ) # Freeze graph for inference, but the weight of fc/conv is still float type. freeze_pass = QuantizationFreezePass( - scope=scope, place=place, weight_quantize_type=weight_quant_type) + scope=scope, place=place, weight_quantize_type=weight_quant_type + ) freeze_pass.apply(test_graph) # Transform quantized graph for MKL-DNN INT8 inference @@ -163,8 +174,14 @@ class TestMKLDNNTransformBasedFreezePass(unittest.TestCase): if op.name().find('quantize') > -1: marked_nodes.add(op) test_graph.draw( - '.', 'test_mkldnn' + dev_name + activation_quant_type + '_' + - weight_quant_type, marked_nodes) + '.', + 'test_mkldnn' + + dev_name + + activation_quant_type + + '_' + + weight_quant_type, + marked_nodes, + ) mkldnn_program = test_graph.to_program() # Check the transformation weights of conv2d and mul @@ -178,9 +195,16 @@ class TestMKLDNNTransformBasedFreezePass(unittest.TestCase): # output self.check_program(mkldnn_program) if not for_ci: - print('{}: {}'.format( - 'w_mkldnn' + dev_name + activation_quant_type + '_' + - weight_quant_type, np.sum(w_mkldnn))) + print( + '{}: {}'.format( + 'w_mkldnn' + + dev_name + + activation_quant_type + + '_' + + weight_quant_type, + np.sum(w_mkldnn), + ) + ) def test_mkldnn_graph_cpu_static(self): with fluid.unique_name.guard(): @@ -189,13 +213,15 @@ class TestMKLDNNTransformBasedFreezePass(unittest.TestCase): seed=2, activation_quant_type='range_abs_max', weight_quant_type='abs_max', - for_ci=True) + for_ci=True, + ) self.mkldnn_based_freeze_graph( False, seed=2, activation_quant_type='moving_average_abs_max', weight_quant_type='abs_max', - for_ci=True) + for_ci=True, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/contrib/slim/tests/test_quantization_pass.py b/python/paddle/fluid/contrib/slim/tests/test_quantization_pass.py index 4310c259ca5f4790cf2a2123f63cc9fc4053465f..be42ab5cf2e20a8618b51ccacfc90bfab445f78c 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_quantization_pass.py +++ b/python/paddle/fluid/contrib/slim/tests/test_quantization_pass.py @@ -45,50 +45,47 @@ def linear_fc(num): def residual_block(num, quant_skip_pattern=None): - - def conv_bn_layer(input, - ch_out, - filter_size, - stride, - padding, - act='relu', - bias_attr=False): - tmp = fluid.layers.conv2d(input=input, - filter_size=filter_size, - num_filters=ch_out, - stride=stride, - padding=padding, - act=None, - bias_attr=bias_attr) + def conv_bn_layer( + input, ch_out, filter_size, stride, padding, act='relu', bias_attr=False + ): + tmp = fluid.layers.conv2d( + input=input, + filter_size=filter_size, + num_filters=ch_out, + stride=stride, + padding=padding, + act=None, + bias_attr=bias_attr, + ) return fluid.layers.batch_norm(input=tmp, act=act) - data = fluid.layers.data(name='image', - shape=[1, 1, 32, 32], - dtype='float32', - append_batch_size=False) - label = fluid.layers.data(name='label', - shape=[1, 1], - dtype='int64', - append_batch_size=False) + data = fluid.layers.data( + name='image', + shape=[1, 1, 32, 32], + dtype='float32', + append_batch_size=False, + ) + label = fluid.layers.data( + name='label', shape=[1, 1], dtype='int64', append_batch_size=False + ) hidden = data for _ in range(num): conv = conv_bn_layer(hidden, 16, 3, 1, 1, act=None, bias_attr=True) short = conv_bn_layer(hidden, 16, 1, 1, 0, act=None) hidden = fluid.layers.elementwise_add(x=conv, y=short, act='relu') - matmul_weight = fluid.layers.create_parameter(shape=[1, 16, 32, 32], - dtype='float32') + matmul_weight = fluid.layers.create_parameter( + shape=[1, 16, 32, 32], dtype='float32' + ) hidden = fluid.layers.matmul(hidden, matmul_weight, True, True) if quant_skip_pattern: with fluid.name_scope(quant_skip_pattern): - pool = fluid.layers.pool2d(input=hidden, - pool_size=2, - pool_type='avg', - pool_stride=2) + pool = fluid.layers.pool2d( + input=hidden, pool_size=2, pool_type='avg', pool_stride=2 + ) else: - pool = fluid.layers.pool2d(input=hidden, - pool_size=2, - pool_type='avg', - pool_stride=2) + pool = fluid.layers.pool2d( + input=hidden, pool_size=2, pool_type='avg', pool_stride=2 + ) fc = fluid.layers.fc(input=pool, size=10) loss = fluid.layers.cross_entropy(input=fc, label=label) loss = paddle.mean(loss) @@ -96,21 +93,25 @@ def residual_block(num, quant_skip_pattern=None): def conv_net(img, label, quant_skip_pattern): - conv_pool_1 = fluid.nets.simple_img_conv_pool(input=img, - filter_size=5, - num_filters=20, - pool_size=2, - pool_stride=2, - pool_type='max', - act="relu") + conv_pool_1 = fluid.nets.simple_img_conv_pool( + input=img, + filter_size=5, + num_filters=20, + pool_size=2, + pool_stride=2, + pool_type='max', + act="relu", + ) conv_pool_1 = fluid.layers.batch_norm(conv_pool_1) - conv_pool_2 = fluid.nets.simple_img_conv_pool(input=conv_pool_1, - filter_size=5, - num_filters=50, - pool_size=2, - pool_stride=2, - pool_type='avg', - act="relu") + conv_pool_2 = fluid.nets.simple_img_conv_pool( + input=conv_pool_1, + filter_size=5, + num_filters=50, + pool_size=2, + pool_stride=2, + pool_type='avg', + act="relu", + ) hidden = fluid.layers.fc(input=conv_pool_2, size=100, act='relu') with fluid.name_scope(quant_skip_pattern): prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') @@ -120,17 +121,16 @@ def conv_net(img, label, quant_skip_pattern): class TestQuantizationTransformPass(unittest.TestCase): - def setUp(self): self.quantizable_op_and_inputs = { 'conv2d': ['Input', 'Filter'], 'depthwise_conv2d': ['Input', 'Filter'], - 'mul': ['X', 'Y'] + 'mul': ['X', 'Y'], } self.quantizable_grad_op_inputs = { 'conv2d_grad': ['Input', 'Filter'], 'depthwise_conv2d_grad': ['Input', 'Filter'], - 'mul_grad': ['X', 'Y'] + 'mul_grad': ['X', 'Y'], } def check_program(self, program): @@ -141,7 +141,8 @@ class TestQuantizationTransformPass(unittest.TestCase): if op.type in self.quantizable_op_and_inputs: for arg_name in op.input_arg_names: self.assertTrue( - arg_name.endswith('.quantized.dequantized')) + arg_name.endswith('.quantized.dequantized') + ) quantized_ops.add(arg_name) for op in block.ops: @@ -150,13 +151,13 @@ class TestQuantizationTransformPass(unittest.TestCase): for pname in self.quantizable_grad_op_inputs[op.type]: arg_name = op.input(pname)[0] self.assertTrue( - arg_name.endswith('.quantized.dequantized')) + arg_name.endswith('.quantized.dequantized') + ) self.assertTrue(arg_name in quantized_ops) - def linear_fc_quant(self, - activation_quant_type, - weight_quantize_type, - for_ci=True): + def linear_fc_quant( + self, activation_quant_type, weight_quantize_type, for_ci=True + ): main = fluid.Program() startup = fluid.Program() with fluid.program_guard(main, startup): @@ -169,15 +170,17 @@ class TestQuantizationTransformPass(unittest.TestCase): scope=fluid.global_scope(), place=place, activation_quantize_type=activation_quant_type, - weight_quantize_type=weight_quantize_type) + weight_quantize_type=weight_quantize_type, + ) transform_pass.apply(graph) if not for_ci: marked_nodes = set() for op in graph.all_op_nodes(): if op.name().find('quantize') > -1: marked_nodes.add(op) - graph.draw('.', 'quantize_fc_' + activation_quant_type, - marked_nodes) + graph.draw( + '.', 'quantize_fc_' + activation_quant_type, marked_nodes + ) program = graph.to_program() self.check_program(program) val_graph = IrGraph(core.Graph(program.desc), for_test=False) @@ -186,8 +189,9 @@ class TestQuantizationTransformPass(unittest.TestCase): for op in val_graph.all_op_nodes(): if op.name().find('quantize') > -1: val_marked_nodes.add(op) - val_graph.draw('.', 'val_fc_' + activation_quant_type, - val_marked_nodes) + val_graph.draw( + '.', 'val_fc_' + activation_quant_type, val_marked_nodes + ) def test_linear_fc_quant_abs_max(self): self.linear_fc_quant('abs_max', 'abs_max', for_ci=True) @@ -196,15 +200,17 @@ class TestQuantizationTransformPass(unittest.TestCase): self.linear_fc_quant('range_abs_max', 'abs_max', for_ci=True) def test_linear_fc_quant_moving_average_abs_max(self): - self.linear_fc_quant('moving_average_abs_max', - 'channel_wise_abs_max', - for_ci=True) - - def residual_block_quant(self, - activation_quant_type, - weight_quantize_type, - quantizable_op_type, - for_ci=True): + self.linear_fc_quant( + 'moving_average_abs_max', 'channel_wise_abs_max', for_ci=True + ) + + def residual_block_quant( + self, + activation_quant_type, + weight_quantize_type, + quantizable_op_type, + for_ci=True, + ): main = fluid.Program() startup = fluid.Program() with fluid.program_guard(main, startup): @@ -218,15 +224,17 @@ class TestQuantizationTransformPass(unittest.TestCase): place=place, activation_quantize_type=activation_quant_type, weight_quantize_type=weight_quantize_type, - quantizable_op_type=quantizable_op_type) + quantizable_op_type=quantizable_op_type, + ) transform_pass.apply(graph) if not for_ci: marked_nodes = set() for op in graph.all_op_nodes(): if op.name().find('quantize') > -1: marked_nodes.add(op) - graph.draw('.', 'quantize_residual_' + activation_quant_type, - marked_nodes) + graph.draw( + '.', 'quantize_residual_' + activation_quant_type, marked_nodes + ) program = graph.to_program() self.check_program(program) val_graph = IrGraph(core.Graph(program.desc), for_test=False) @@ -235,53 +243,54 @@ class TestQuantizationTransformPass(unittest.TestCase): for op in val_graph.all_op_nodes(): if op.name().find('quantize') > -1: val_marked_nodes.add(op) - val_graph.draw('.', 'val_residual_' + activation_quant_type, - val_marked_nodes) + val_graph.draw( + '.', 'val_residual_' + activation_quant_type, val_marked_nodes + ) def test_residual_block_abs_max(self): quantizable_op_type = ['conv2d', 'depthwise_conv2d', 'mul', 'matmul'] - self.residual_block_quant('abs_max', - 'abs_max', - quantizable_op_type, - for_ci=True) + self.residual_block_quant( + 'abs_max', 'abs_max', quantizable_op_type, for_ci=True + ) def test_residual_block_range_abs_max(self): quantizable_op_type = ['conv2d', 'depthwise_conv2d', 'mul', 'matmul'] - self.residual_block_quant('range_abs_max', - 'abs_max', - quantizable_op_type, - for_ci=True) + self.residual_block_quant( + 'range_abs_max', 'abs_max', quantizable_op_type, for_ci=True + ) def test_residual_block_moving_average_abs_max(self): quantizable_op_type = ['conv2d', 'depthwise_conv2d', 'mul', 'matmul'] - self.residual_block_quant('moving_average_abs_max', - 'channel_wise_abs_max', - quantizable_op_type, - for_ci=True) + self.residual_block_quant( + 'moving_average_abs_max', + 'channel_wise_abs_max', + quantizable_op_type, + for_ci=True, + ) class TestQuantizationFreezePass(unittest.TestCase): - - def freeze_graph(self, - use_cuda, - seed, - activation_quant_type, - bias_correction=False, - weight_quant_type='abs_max', - for_ci=True, - quant_skip_pattern='skip_quant'): - + def freeze_graph( + self, + use_cuda, + seed, + activation_quant_type, + bias_correction=False, + weight_quant_type='abs_max', + for_ci=True, + quant_skip_pattern='skip_quant', + ): def build_program(main, startup, is_test): main.random_seed = seed startup.random_seed = seed with fluid.unique_name.guard(): with fluid.program_guard(main, startup): - img = fluid.layers.data(name='image', - shape=[1, 28, 28], - dtype='float32') - label = fluid.layers.data(name='label', - shape=[1], - dtype='int64') + img = fluid.layers.data( + name='image', shape=[1, 28, 28], dtype='float32' + ) + label = fluid.layers.data( + name='label', shape=[1], dtype='int64' + ) loss = conv_net(img, label, quant_skip_pattern) if not is_test: opt = fluid.optimizer.Adam(learning_rate=0.001) @@ -310,14 +319,16 @@ class TestQuantizationFreezePass(unittest.TestCase): place=place, activation_quantize_type=activation_quant_type, weight_quantize_type=weight_quant_type, - skip_pattern=quant_skip_pattern) + skip_pattern=quant_skip_pattern, + ) transform_pass.apply(main_graph) transform_pass = QuantizationTransformPass( scope=scope, place=place, activation_quantize_type=activation_quant_type, weight_quantize_type=weight_quant_type, - skip_pattern=quant_skip_pattern) + skip_pattern=quant_skip_pattern, + ) transform_pass.apply(test_graph) dev_name = '_gpu_' if use_cuda else '_cpu_' if not for_ci: @@ -326,57 +337,85 @@ class TestQuantizationFreezePass(unittest.TestCase): if op.name().find('quantize') > -1: marked_nodes.add(op) main_graph.draw( - '.', 'main' + dev_name + activation_quant_type + '_' + - weight_quant_type, marked_nodes) + '.', + 'main' + + dev_name + + activation_quant_type + + '_' + + weight_quant_type, + marked_nodes, + ) marked_nodes = set() for op in test_graph.all_op_nodes(): if op.name().find('quantize') > -1: marked_nodes.add(op) test_graph.draw( - '.', 'test' + dev_name + activation_quant_type + '_' + - weight_quant_type, marked_nodes) + '.', + 'test' + + dev_name + + activation_quant_type + + '_' + + weight_quant_type, + marked_nodes, + ) build_strategy = fluid.BuildStrategy() build_strategy.memory_optimize = False build_strategy.enable_inplace = False build_strategy.fuse_all_reduce_ops = False binary = fluid.CompiledProgram(main_graph.graph).with_data_parallel( - loss_name=loss.name, build_strategy=build_strategy) + loss_name=loss.name, build_strategy=build_strategy + ) quantized_test_program = test_graph.to_program() iters = 5 batch_size = 8 - train_reader = paddle.batch(paddle.reader.shuffle( - paddle.dataset.mnist.train(), buf_size=500), - batch_size=batch_size) - test_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=batch_size) + train_reader = paddle.batch( + paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=500), + batch_size=batch_size, + ) + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size + ) feeder = fluid.DataFeeder(feed_list=feeds, place=place) with fluid.scope_guard(scope): for _ in range(iters): data = next(train_reader()) - loss_v = exe.run(binary, - feed=feeder.feed(data), - fetch_list=[loss]) + loss_v = exe.run( + binary, feed=feeder.feed(data), fetch_list=[loss] + ) if not for_ci: - print('{}: {}'.format( - 'loss' + dev_name + activation_quant_type + '_' + - weight_quant_type, loss_v)) + print( + '{}: {}'.format( + 'loss' + + dev_name + + activation_quant_type + + '_' + + weight_quant_type, + loss_v, + ) + ) test_data = next(test_reader()) with fluid.program_guard(quantized_test_program): - w_var = fluid.framework._get_var('conv2d_1.w_0.quantized', - quantized_test_program) + w_var = fluid.framework._get_var( + 'conv2d_1.w_0.quantized', quantized_test_program + ) # Testing with fluid.scope_guard(scope): - test_loss1, w_quant = exe.run(program=quantized_test_program, - feed=feeder.feed(test_data), - fetch_list=[loss, w_var]) + test_loss1, w_quant = exe.run( + program=quantized_test_program, + feed=feeder.feed(test_data), + fetch_list=[loss, w_var], + ) # Freeze graph for inference, but the weight of fc/conv is still float type. freeze_pass = QuantizationFreezePass( - scope=scope, place=place, bias_correction=bias_correction, \ - weight_quantize_type=weight_quant_type) + scope=scope, + place=place, + bias_correction=bias_correction, + weight_quantize_type=weight_quant_type, + ) freeze_pass.apply(test_graph) if not for_ci: marked_nodes = set() @@ -384,32 +423,68 @@ class TestQuantizationFreezePass(unittest.TestCase): if op.name().find('quantize') > -1: marked_nodes.add(op) test_graph.draw( - '.', 'test_freeze' + dev_name + activation_quant_type + '_' + - weight_quant_type, marked_nodes) + '.', + 'test_freeze' + + dev_name + + activation_quant_type + + '_' + + weight_quant_type, + marked_nodes, + ) server_program = test_graph.to_program() with fluid.scope_guard(scope): - test_loss2, = exe.run(program=server_program, - feed=feeder.feed(test_data), - fetch_list=[loss]) + (test_loss2,) = exe.run( + program=server_program, + feed=feeder.feed(test_data), + fetch_list=[loss], + ) self.assertAlmostEqual(test_loss1, test_loss2, delta=5e-3) if not for_ci: - print('{}: {}'.format( - 'test_loss1' + dev_name + activation_quant_type + '_' + - weight_quant_type, test_loss1)) - print('{}: {}'.format( - 'test_loss2' + dev_name + activation_quant_type + '_' + - weight_quant_type, test_loss2)) + print( + '{}: {}'.format( + 'test_loss1' + + dev_name + + activation_quant_type + + '_' + + weight_quant_type, + test_loss1, + ) + ) + print( + '{}: {}'.format( + 'test_loss2' + + dev_name + + activation_quant_type + + '_' + + weight_quant_type, + test_loss2, + ) + ) w_freeze = np.array(scope.find_var('conv2d_1.w_0').get_tensor()) # Maybe failed, this is due to the calculation precision # self.assertAlmostEqual(np.sum(w_freeze), np.sum(w_quant)) if not for_ci: - print('{}: {}'.format( - 'w_freeze' + dev_name + activation_quant_type + '_' + - weight_quant_type, np.sum(w_freeze))) - print('{}: {}'.format( - 'w_quant' + dev_name + activation_quant_type + '_' + - weight_quant_type, np.sum(w_quant))) + print( + '{}: {}'.format( + 'w_freeze' + + dev_name + + activation_quant_type + + '_' + + weight_quant_type, + np.sum(w_freeze), + ) + ) + print( + '{}: {}'.format( + 'w_quant' + + dev_name + + activation_quant_type + + '_' + + weight_quant_type, + np.sum(w_quant), + ) + ) # Convert parameter to 8-bit. convert_int8_pass = ConvertToInt8Pass(scope=scope, place=place) @@ -420,30 +495,62 @@ class TestQuantizationFreezePass(unittest.TestCase): if op.name().find('quantize') > -1: marked_nodes.add(op) test_graph.draw( - '.', 'test_int8' + dev_name + activation_quant_type + '_' + - weight_quant_type, marked_nodes) + '.', + 'test_int8' + + dev_name + + activation_quant_type + + '_' + + weight_quant_type, + marked_nodes, + ) server_program_int8 = test_graph.to_program() # Save the 8-bit parameter and model file. with fluid.scope_guard(scope): fluid.io.save_inference_model( - 'server_int8' + dev_name + activation_quant_type + '_' + - weight_quant_type, ['image', 'label'], [loss], exe, - server_program_int8) + 'server_int8' + + dev_name + + activation_quant_type + + '_' + + weight_quant_type, + ['image', 'label'], + [loss], + exe, + server_program_int8, + ) # Test whether the 8-bit parameter and model file can be loaded successfully. [infer, feed, fetch] = fluid.io.load_inference_model( - 'server_int8' + dev_name + activation_quant_type + '_' + - weight_quant_type, exe) + 'server_int8' + + dev_name + + activation_quant_type + + '_' + + weight_quant_type, + exe, + ) # Check the loaded 8-bit weight. w_8bit = np.array(scope.find_var('conv2d_1.w_0.int8').get_tensor()) self.assertEqual(w_8bit.dtype, np.int8) self.assertEqual(np.sum(w_8bit), np.sum(w_freeze)) if not for_ci: - print('{}: {}'.format( - 'w_8bit' + dev_name + activation_quant_type + '_' + - weight_quant_type, np.sum(w_8bit))) - print('{}: {}'.format( - 'w_freeze' + dev_name + activation_quant_type + '_' + - weight_quant_type, np.sum(w_freeze))) + print( + '{}: {}'.format( + 'w_8bit' + + dev_name + + activation_quant_type + + '_' + + weight_quant_type, + np.sum(w_8bit), + ) + ) + print( + '{}: {}'.format( + 'w_freeze' + + dev_name + + activation_quant_type + + '_' + + weight_quant_type, + np.sum(w_freeze), + ) + ) mobile_pass = TransformForMobilePass() mobile_pass.apply(test_graph) @@ -453,129 +560,164 @@ class TestQuantizationFreezePass(unittest.TestCase): if op.name().find('quantize') > -1: marked_nodes.add(op) test_graph.draw( - '.', 'test_mobile' + dev_name + activation_quant_type + '_' + - weight_quant_type, marked_nodes) + '.', + 'test_mobile' + + dev_name + + activation_quant_type + + '_' + + weight_quant_type, + marked_nodes, + ) mobile_program = test_graph.to_program() with fluid.scope_guard(scope): fluid.io.save_inference_model( - 'mobile_int8' + dev_name + activation_quant_type + '_' + - weight_quant_type, ['image', 'label'], [loss], exe, - mobile_program) + 'mobile_int8' + + dev_name + + activation_quant_type + + '_' + + weight_quant_type, + ['image', 'label'], + [loss], + exe, + mobile_program, + ) def test_freeze_graph_cuda_dynamic(self): if fluid.core.is_compiled_with_cuda(): with fluid.unique_name.guard(): - self.freeze_graph(True, - seed=1, - activation_quant_type='abs_max', - weight_quant_type='abs_max', - for_ci=True) + self.freeze_graph( + True, + seed=1, + activation_quant_type='abs_max', + weight_quant_type='abs_max', + for_ci=True, + ) with fluid.unique_name.guard(): - self.freeze_graph(True, - seed=1, - activation_quant_type='abs_max', - weight_quant_type='channel_wise_abs_max', - for_ci=True) + self.freeze_graph( + True, + seed=1, + activation_quant_type='abs_max', + weight_quant_type='channel_wise_abs_max', + for_ci=True, + ) def test_freeze_graph_cpu_dynamic(self): with fluid.unique_name.guard(): - self.freeze_graph(False, - seed=2, - activation_quant_type='abs_max', - weight_quant_type='abs_max', - for_ci=True) - self.freeze_graph(False, - seed=2, - activation_quant_type='abs_max', - weight_quant_type='channel_wise_abs_max', - for_ci=True) + self.freeze_graph( + False, + seed=2, + activation_quant_type='abs_max', + weight_quant_type='abs_max', + for_ci=True, + ) + self.freeze_graph( + False, + seed=2, + activation_quant_type='abs_max', + weight_quant_type='channel_wise_abs_max', + for_ci=True, + ) def test_freeze_graph_cuda_static(self): if fluid.core.is_compiled_with_cuda(): with fluid.unique_name.guard(): - self.freeze_graph(True, - seed=1, - activation_quant_type='range_abs_max', - bias_correction=True, - weight_quant_type='abs_max', - for_ci=True) - self.freeze_graph(True, - seed=1, - activation_quant_type='range_abs_max', - weight_quant_type='abs_max', - for_ci=True) + self.freeze_graph( + True, + seed=1, + activation_quant_type='range_abs_max', + bias_correction=True, + weight_quant_type='abs_max', + for_ci=True, + ) + self.freeze_graph( + True, + seed=1, + activation_quant_type='range_abs_max', + weight_quant_type='abs_max', + for_ci=True, + ) self.freeze_graph( True, seed=1, activation_quant_type='moving_average_abs_max', weight_quant_type='abs_max', - for_ci=True) - self.freeze_graph(True, - seed=1, - activation_quant_type='range_abs_max', - weight_quant_type='channel_wise_abs_max', - for_ci=True) + for_ci=True, + ) + self.freeze_graph( + True, + seed=1, + activation_quant_type='range_abs_max', + weight_quant_type='channel_wise_abs_max', + for_ci=True, + ) self.freeze_graph( True, seed=1, activation_quant_type='moving_average_abs_max', weight_quant_type='channel_wise_abs_max', - for_ci=True) + for_ci=True, + ) self.freeze_graph( True, seed=1, activation_quant_type='moving_average_abs_max', bias_correction=True, weight_quant_type='channel_wise_abs_max', - for_ci=True) + for_ci=True, + ) def test_freeze_graph_cpu_static(self): with fluid.unique_name.guard(): - self.freeze_graph(False, - seed=2, - activation_quant_type='range_abs_max', - weight_quant_type='abs_max', - for_ci=True) - self.freeze_graph(False, - seed=2, - activation_quant_type='moving_average_abs_max', - weight_quant_type='abs_max', - for_ci=True) - self.freeze_graph(False, - seed=2, - activation_quant_type='range_abs_max', - weight_quant_type='channel_wise_abs_max', - for_ci=True) - self.freeze_graph(False, - seed=2, - activation_quant_type='moving_average_abs_max', - weight_quant_type='channel_wise_abs_max', - for_ci=True) + self.freeze_graph( + False, + seed=2, + activation_quant_type='range_abs_max', + weight_quant_type='abs_max', + for_ci=True, + ) + self.freeze_graph( + False, + seed=2, + activation_quant_type='moving_average_abs_max', + weight_quant_type='abs_max', + for_ci=True, + ) + self.freeze_graph( + False, + seed=2, + activation_quant_type='range_abs_max', + weight_quant_type='channel_wise_abs_max', + for_ci=True, + ) + self.freeze_graph( + False, + seed=2, + activation_quant_type='moving_average_abs_max', + weight_quant_type='channel_wise_abs_max', + for_ci=True, + ) def quant_dequant_residual_block(num, quant_skip_pattern=None): - - def conv_bn_layer(input, - ch_out, - filter_size, - stride, - padding, - act='relu', - bias_attr=False): - tmp = fluid.layers.conv2d(input=input, - filter_size=filter_size, - num_filters=ch_out, - stride=stride, - padding=padding, - act=None, - bias_attr=bias_attr) + def conv_bn_layer( + input, ch_out, filter_size, stride, padding, act='relu', bias_attr=False + ): + tmp = fluid.layers.conv2d( + input=input, + filter_size=filter_size, + num_filters=ch_out, + stride=stride, + padding=padding, + act=None, + bias_attr=bias_attr, + ) return fluid.layers.batch_norm(input=tmp, act=act) data1 = fluid.layers.data(name='image', shape=[1, 32, 32], dtype='float32') - data2 = fluid.layers.data(name='matmul_input', - shape=[16, 32, 32], - dtype='float32') + data2 = fluid.layers.data( + name='matmul_input', shape=[16, 32, 32], dtype='float32' + ) label = fluid.layers.data(name='label', shape=[1], dtype='int64') hidden = data1 for _ in range(num): @@ -585,43 +727,37 @@ def quant_dequant_residual_block(num, quant_skip_pattern=None): hidden = fluid.layers.matmul(hidden, data2, True, True) if isinstance(quant_skip_pattern, str): with fluid.name_scope(quant_skip_pattern): - pool1 = fluid.layers.pool2d(input=hidden, - pool_size=2, - pool_type='avg', - pool_stride=2) - pool2 = fluid.layers.pool2d(input=hidden, - pool_size=2, - pool_type='max', - pool_stride=2) - pool_add = fluid.layers.elementwise_add(x=pool1, - y=pool2, - act='relu') + pool1 = fluid.layers.pool2d( + input=hidden, pool_size=2, pool_type='avg', pool_stride=2 + ) + pool2 = fluid.layers.pool2d( + input=hidden, pool_size=2, pool_type='max', pool_stride=2 + ) + pool_add = fluid.layers.elementwise_add( + x=pool1, y=pool2, act='relu' + ) elif isinstance(quant_skip_pattern, list): - assert len( - quant_skip_pattern - ) > 1, 'test config error: the len of quant_skip_pattern list should be greater than 1.' + assert ( + len(quant_skip_pattern) > 1 + ), 'test config error: the len of quant_skip_pattern list should be greater than 1.' with fluid.name_scope(quant_skip_pattern[0]): - pool1 = fluid.layers.pool2d(input=hidden, - pool_size=2, - pool_type='avg', - pool_stride=2) - pool2 = fluid.layers.pool2d(input=hidden, - pool_size=2, - pool_type='max', - pool_stride=2) + pool1 = fluid.layers.pool2d( + input=hidden, pool_size=2, pool_type='avg', pool_stride=2 + ) + pool2 = fluid.layers.pool2d( + input=hidden, pool_size=2, pool_type='max', pool_stride=2 + ) with fluid.name_scope(quant_skip_pattern[1]): - pool_add = fluid.layers.elementwise_add(x=pool1, - y=pool2, - act='relu') + pool_add = fluid.layers.elementwise_add( + x=pool1, y=pool2, act='relu' + ) else: - pool1 = fluid.layers.pool2d(input=hidden, - pool_size=2, - pool_type='avg', - pool_stride=2) - pool2 = fluid.layers.pool2d(input=hidden, - pool_size=2, - pool_type='max', - pool_stride=2) + pool1 = fluid.layers.pool2d( + input=hidden, pool_size=2, pool_type='avg', pool_stride=2 + ) + pool2 = fluid.layers.pool2d( + input=hidden, pool_size=2, pool_type='max', pool_stride=2 + ) pool_add = fluid.layers.elementwise_add(x=pool1, y=pool2, act='relu') fc = fluid.layers.fc(input=pool_add, size=10) loss = fluid.layers.cross_entropy(input=fc, label=label) @@ -630,7 +766,6 @@ def quant_dequant_residual_block(num, quant_skip_pattern=None): class TestAddQuantDequantPass(unittest.TestCase): - def setUp(self): self._target_ops = {'elementwise_add', 'pool2d'} self._target_grad_ops = {'elementwise_add_grad', 'pool2d_grad'} @@ -641,32 +776,40 @@ class TestAddQuantDequantPass(unittest.TestCase): if op_node.name() in self._target_ops: user_skipped = False if isinstance(skip_pattern, list): - user_skipped = op_node.op().has_attr("op_namescope") and \ - any(pattern in op_node.op().attr("op_namescope") for pattern in skip_pattern) + user_skipped = op_node.op().has_attr( + "op_namescope" + ) and any( + pattern in op_node.op().attr("op_namescope") + for pattern in skip_pattern + ) elif isinstance(skip_pattern, str): - user_skipped = op_node.op().has_attr("op_namescope") and \ - op_node.op().attr("op_namescope").find(skip_pattern) != -1 + user_skipped = ( + op_node.op().has_attr("op_namescope") + and op_node.op().attr("op_namescope").find(skip_pattern) + != -1 + ) if user_skipped: continue in_nodes_all_not_persistable = True for input_name in op_node.input_arg_names(): - in_node = graph._find_node_by_name(op_node.inputs, - input_name) - in_nodes_all_not_persistable = (in_nodes_all_not_persistable - and - not in_node.persistable()) + in_node = graph._find_node_by_name( + op_node.inputs, input_name + ) + in_nodes_all_not_persistable = ( + in_nodes_all_not_persistable + and not in_node.persistable() + ) if not in_nodes_all_not_persistable: continue input_names = op_node.input_arg_names() for input_name in input_names: self.assertTrue(input_name.endswith('.quant_dequant')) - def residual_block_quant(self, - quantizable_op_type, - skip_pattern=None, - for_ci=True): + def residual_block_quant( + self, quantizable_op_type, skip_pattern=None, for_ci=True + ): main = fluid.Program() startup = fluid.Program() with fluid.program_guard(main, startup): @@ -679,7 +822,8 @@ class TestAddQuantDequantPass(unittest.TestCase): scope=fluid.global_scope(), place=place, skip_pattern=skip_pattern, - quantizable_op_type=quantizable_op_type) + quantizable_op_type=quantizable_op_type, + ) add_quant_dequant_pass.apply(graph) if not for_ci: marked_nodes = set() @@ -699,35 +843,36 @@ class TestAddQuantDequantPass(unittest.TestCase): def test_residual_block(self): quantizable_op_type = ['elementwise_add', 'pool2d', 'mul', 'matmul'] - self.residual_block_quant(quantizable_op_type, - skip_pattern=None, - for_ci=True) + self.residual_block_quant( + quantizable_op_type, skip_pattern=None, for_ci=True + ) def test_residual_block_skip_pattern(self): quantizable_op_type = ['elementwise_add', 'pool2d', 'mul', 'matmul'] - self.residual_block_quant(quantizable_op_type, - skip_pattern='skip_quant', - for_ci=True) + self.residual_block_quant( + quantizable_op_type, skip_pattern='skip_quant', for_ci=True + ) def test_residual_block_skip_pattern_1(self): quantizable_op_type = ['elementwise_add', 'pool2d', 'mul', 'matmul'] - self.residual_block_quant(quantizable_op_type, - skip_pattern=['skip_quant1', 'skip_quant2'], - for_ci=True) + self.residual_block_quant( + quantizable_op_type, + skip_pattern=['skip_quant1', 'skip_quant2'], + for_ci=True, + ) class TestQuantizationTransformPassV2(unittest.TestCase): - def setUp(self): self.quantizable_op_and_inputs = { 'conv2d': ['Input', 'Filter'], 'depthwise_conv2d': ['Input', 'Filter'], - 'mul': ['X', 'Y'] + 'mul': ['X', 'Y'], } self.quantizable_grad_op_inputs = { 'conv2d_grad': ['Input', 'Filter'], 'depthwise_conv2d_grad': ['Input', 'Filter'], - 'mul_grad': ['X', 'Y'] + 'mul_grad': ['X', 'Y'], } def check_program(self, program): @@ -738,7 +883,8 @@ class TestQuantizationTransformPassV2(unittest.TestCase): if op.type in self.quantizable_op_and_inputs: for arg_name in op.input_arg_names: self.assertTrue( - arg_name.endswith('.quantized.dequantized')) + arg_name.endswith('.quantized.dequantized') + ) quantized_ops.add(arg_name) for op in block.ops: @@ -747,13 +893,13 @@ class TestQuantizationTransformPassV2(unittest.TestCase): for pname in self.quantizable_grad_op_inputs[op.type]: arg_name = op.input(pname)[0] self.assertTrue( - arg_name.endswith('.quantized.dequantized')) + arg_name.endswith('.quantized.dequantized') + ) self.assertTrue(arg_name in quantized_ops) - def linear_fc_quant(self, - activation_quant_type, - weight_quantize_type, - for_ci=True): + def linear_fc_quant( + self, activation_quant_type, weight_quantize_type, for_ci=True + ): main = fluid.Program() startup = fluid.Program() with fluid.program_guard(main, startup): @@ -766,15 +912,17 @@ class TestQuantizationTransformPassV2(unittest.TestCase): scope=fluid.global_scope(), place=place, activation_quantize_type=activation_quant_type, - weight_quantize_type=weight_quantize_type) + weight_quantize_type=weight_quantize_type, + ) transform_pass.apply(graph) if not for_ci: marked_nodes = set() for op in graph.all_op_nodes(): if op.name().find('quantize') > -1: marked_nodes.add(op) - graph.draw('.', 'quantize_fc_' + activation_quant_type, - marked_nodes) + graph.draw( + '.', 'quantize_fc_' + activation_quant_type, marked_nodes + ) program = graph.to_program() self.check_program(program) val_graph = IrGraph(core.Graph(program.desc), for_test=False) @@ -783,8 +931,9 @@ class TestQuantizationTransformPassV2(unittest.TestCase): for op in val_graph.all_op_nodes(): if op.name().find('quantize') > -1: val_marked_nodes.add(op) - val_graph.draw('.', 'val_fc_' + activation_quant_type, - val_marked_nodes) + val_graph.draw( + '.', 'val_fc_' + activation_quant_type, val_marked_nodes + ) def test_linear_fc_quant_abs_max(self): self.linear_fc_quant('abs_max', 'abs_max', for_ci=True) @@ -792,11 +941,13 @@ class TestQuantizationTransformPassV2(unittest.TestCase): def test_linear_fc_quant_channel_wise_abs_max(self): self.linear_fc_quant('abs_max', 'channel_wise_abs_max', for_ci=True) - def residual_block_quant(self, - activation_quant_type, - weight_quantize_type, - quantizable_op_type, - for_ci=True): + def residual_block_quant( + self, + activation_quant_type, + weight_quantize_type, + quantizable_op_type, + for_ci=True, + ): main = fluid.Program() startup = fluid.Program() with fluid.program_guard(main, startup): @@ -810,15 +961,17 @@ class TestQuantizationTransformPassV2(unittest.TestCase): place=place, activation_quantize_type=activation_quant_type, weight_quantize_type=weight_quantize_type, - quantizable_op_type=quantizable_op_type) + quantizable_op_type=quantizable_op_type, + ) transform_pass.apply(graph) if not for_ci: marked_nodes = set() for op in graph.all_op_nodes(): if op.name().find('quantize') > -1: marked_nodes.add(op) - graph.draw('.', 'quantize_residual_' + activation_quant_type, - marked_nodes) + graph.draw( + '.', 'quantize_residual_' + activation_quant_type, marked_nodes + ) program = graph.to_program() self.check_program(program) val_graph = IrGraph(core.Graph(program.desc), for_test=False) @@ -827,22 +980,21 @@ class TestQuantizationTransformPassV2(unittest.TestCase): for op in val_graph.all_op_nodes(): if op.name().find('quantize') > -1: val_marked_nodes.add(op) - val_graph.draw('.', 'val_residual_' + activation_quant_type, - val_marked_nodes) + val_graph.draw( + '.', 'val_residual_' + activation_quant_type, val_marked_nodes + ) def test_residual_block_abs_max(self): quantizable_op_type = ['conv2d', 'depthwise_conv2d', 'mul', 'matmul'] - self.residual_block_quant('abs_max', - 'abs_max', - quantizable_op_type, - for_ci=True) + self.residual_block_quant( + 'abs_max', 'abs_max', quantizable_op_type, for_ci=True + ) def test_residual_block_channel_wise_abs_max(self): quantizable_op_type = ['conv2d', 'depthwise_conv2d', 'mul', 'matmul'] - self.residual_block_quant('abs_max', - 'channel_wise_abs_max', - quantizable_op_type, - for_ci=True) + self.residual_block_quant( + 'abs_max', 'channel_wise_abs_max', quantizable_op_type, for_ci=True + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/contrib/slim/tests/test_quantization_scale_pass.py b/python/paddle/fluid/contrib/slim/tests/test_quantization_scale_pass.py index a8466ad7371c364d824d99f44f17a5a03e7c6a02..46e37002460372185fd8cded920e602cd7a5c538 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_quantization_scale_pass.py +++ b/python/paddle/fluid/contrib/slim/tests/test_quantization_scale_pass.py @@ -34,21 +34,25 @@ os.environ["CPU_NUM"] = "1" def conv_net(img, label): - conv_pool_1 = fluid.nets.simple_img_conv_pool(input=img, - filter_size=5, - num_filters=20, - pool_size=2, - pool_stride=2, - pool_type='max', - act="relu") + conv_pool_1 = fluid.nets.simple_img_conv_pool( + input=img, + filter_size=5, + num_filters=20, + pool_size=2, + pool_stride=2, + pool_type='max', + act="relu", + ) conv_pool_1 = fluid.layers.batch_norm(conv_pool_1) - conv_pool_2 = fluid.nets.simple_img_conv_pool(input=conv_pool_1, - filter_size=5, - num_filters=50, - pool_size=2, - pool_stride=2, - pool_type='avg', - act="relu") + conv_pool_2 = fluid.nets.simple_img_conv_pool( + input=conv_pool_1, + filter_size=5, + num_filters=50, + pool_size=2, + pool_stride=2, + pool_type='avg', + act="relu", + ) hidden = fluid.layers.fc(input=conv_pool_2, size=100, act='relu') prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') loss = fluid.layers.cross_entropy(input=prediction, label=label) @@ -57,25 +61,25 @@ def conv_net(img, label): class TestQuantizationScalePass(unittest.TestCase): - - def quantization_scale(self, - use_cuda, - seed, - activation_quant_type, - weight_quant_type='abs_max', - for_ci=False): - + def quantization_scale( + self, + use_cuda, + seed, + activation_quant_type, + weight_quant_type='abs_max', + for_ci=False, + ): def build_program(main, startup, is_test): main.random_seed = seed startup.random_seed = seed with fluid.unique_name.guard(): with fluid.program_guard(main, startup): - img = fluid.layers.data(name='image', - shape=[1, 28, 28], - dtype='float32') - label = fluid.layers.data(name='label', - shape=[1], - dtype='int64') + img = fluid.layers.data( + name='image', shape=[1, 28, 28], dtype='float32' + ) + label = fluid.layers.data( + name='label', shape=[1], dtype='int64' + ) loss = conv_net(img, label) if not is_test: opt = fluid.optimizer.Adam(learning_rate=0.0001) @@ -104,7 +108,8 @@ class TestQuantizationScalePass(unittest.TestCase): scope=scope, place=place, activation_quantize_type=activation_quant_type, - weight_quantize_type=weight_quant_type) + weight_quantize_type=weight_quant_type, + ) transform_pass.apply(main_graph) transform_pass.apply(test_graph) @@ -133,20 +138,22 @@ class TestQuantizationScalePass(unittest.TestCase): build_strategy.enable_inplace = False build_strategy.fuse_all_reduce_ops = False binary = fluid.CompiledProgram(main_graph.graph).with_data_parallel( - loss_name=loss.name, build_strategy=build_strategy) + loss_name=loss.name, build_strategy=build_strategy + ) iters = 5 batch_size = 8 - train_reader = paddle.batch(paddle.reader.shuffle( - paddle.dataset.mnist.train(), buf_size=500), - batch_size=batch_size) + train_reader = paddle.batch( + paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=500), + batch_size=batch_size, + ) feeder = fluid.DataFeeder(feed_list=feeds, place=place) with fluid.scope_guard(scope): for _ in range(iters): data = next(train_reader()) - loss_v = exe.run(binary, - feed=feeder.feed(data), - fetch_list=[loss]) + loss_v = exe.run( + binary, feed=feeder.feed(data), fetch_list=[loss] + ) if not for_ci: print('{}: {}'.format('loss' + dev_name, loss_v)) @@ -155,7 +162,8 @@ class TestQuantizationScalePass(unittest.TestCase): # Freeze graph for inference, but the weight of fc/conv is still float type. freeze_pass = QuantizationFreezePass( - scope=scope, place=place, weight_quantize_type=weight_quant_type) + scope=scope, place=place, weight_quantize_type=weight_quant_type + ) freeze_pass.apply(test_graph) server_program = test_graph.to_program() @@ -168,16 +176,21 @@ class TestQuantizationScalePass(unittest.TestCase): tempdir = tempfile.TemporaryDirectory() mapping_table_path = os.path.join( - tempdir.name, 'quant_scale_model' + dev_name + '.txt') + tempdir.name, 'quant_scale_model' + dev_name + '.txt' + ) save_path = os.path.join(tempdir.name, 'quant_scale_model' + dev_name) with open(mapping_table_path, 'w') as f: f.write(str(server_program)) with fluid.scope_guard(scope): - fluid.io.save_inference_model(save_path, ['image', 'label'], [loss], - exe, - server_program, - clip_extra=True) + fluid.io.save_inference_model( + save_path, + ['image', 'label'], + [loss], + exe, + server_program, + clip_extra=True, + ) tempdir.cleanup() def test_quant_scale_cuda(self): @@ -188,7 +201,8 @@ class TestQuantizationScalePass(unittest.TestCase): seed=1, activation_quant_type='moving_average_abs_max', weight_quant_type='channel_wise_abs_max', - for_ci=True) + for_ci=True, + ) def test_quant_scale_cpu(self): with fluid.unique_name.guard(): @@ -197,7 +211,8 @@ class TestQuantizationScalePass(unittest.TestCase): seed=2, activation_quant_type='moving_average_abs_max', weight_quant_type='channel_wise_abs_max', - for_ci=True) + for_ci=True, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/contrib/slim/tests/test_quantize_transpiler_v2.py b/python/paddle/fluid/contrib/slim/tests/test_quantize_transpiler_v2.py index 3893177cbf033c5ab14e2d59d5665d46c9cecd61..3b487ae1fae2b0d3dfb985555738b08ff3180b68 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_quantize_transpiler_v2.py +++ b/python/paddle/fluid/contrib/slim/tests/test_quantize_transpiler_v2.py @@ -19,7 +19,9 @@ import numpy as np import paddle.fluid as fluid import paddle from paddle.fluid.framework import IrGraph -from paddle.fluid.contrib.slim.quantization.quantize_transpiler_v2 import QuantizeTranspilerV2 +from paddle.fluid.contrib.slim.quantization.quantize_transpiler_v2 import ( + QuantizeTranspilerV2, +) from paddle.fluid import core paddle.enable_static() @@ -29,20 +31,24 @@ os.environ["CPU_NUM"] = "1" def conv_net(img, label): - conv_pool_1 = fluid.nets.simple_img_conv_pool(input=img, - filter_size=5, - num_filters=20, - pool_size=2, - pool_stride=2, - pool_type='max', - act="relu") - conv_pool_2 = fluid.nets.simple_img_conv_pool(input=conv_pool_1, - filter_size=5, - num_filters=50, - pool_size=2, - pool_stride=2, - pool_type='avg', - act="relu") + conv_pool_1 = fluid.nets.simple_img_conv_pool( + input=img, + filter_size=5, + num_filters=20, + pool_size=2, + pool_stride=2, + pool_type='max', + act="relu", + ) + conv_pool_2 = fluid.nets.simple_img_conv_pool( + input=conv_pool_1, + filter_size=5, + num_filters=50, + pool_size=2, + pool_stride=2, + pool_type='avg', + act="relu", + ) with fluid.name_scope("skip_quant"): hidden = fluid.layers.fc(input=conv_pool_1, size=100, act='relu') prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') @@ -52,25 +58,25 @@ def conv_net(img, label): class TestQuantizeProgramPass(unittest.TestCase): - - def quantize_program(self, - use_cuda, - seed, - activation_quant_type='abs_max', - weight_quant_type='abs_max', - for_ci=False): - + def quantize_program( + self, + use_cuda, + seed, + activation_quant_type='abs_max', + weight_quant_type='abs_max', + for_ci=False, + ): def build_program(main, startup, is_test): main.random_seed = seed startup.random_seed = seed with fluid.unique_name.guard(): with fluid.program_guard(main, startup): - img = fluid.layers.data(name='image', - shape=[1, 28, 28], - dtype='float32') - label = fluid.layers.data(name='label', - shape=[1], - dtype='int64') + img = fluid.layers.data( + name='image', shape=[1, 28, 28], dtype='float32' + ) + label = fluid.layers.data( + name='label', shape=[1], dtype='int64' + ) loss = conv_net(img, label) if not is_test: opt = fluid.optimizer.Adam(learning_rate=0.0001) @@ -89,8 +95,9 @@ class TestQuantizeProgramPass(unittest.TestCase): test_program = test_program.clone(for_test=True) if not for_ci: - train_graph = IrGraph(core.Graph(train_program.desc), - for_test=False) + train_graph = IrGraph( + core.Graph(train_program.desc), for_test=False + ) train_graph.draw('.', 'train_program_1') test_graph = IrGraph(core.Graph(test_program.desc), for_test=True) test_graph.draw('.', 'test_program_1') @@ -98,7 +105,8 @@ class TestQuantizeProgramPass(unittest.TestCase): # 2 Apply quantization qt = QuantizeTranspilerV2( activation_quantize_type=activation_quant_type, - weight_quantize_type=weight_quant_type) + weight_quantize_type=weight_quant_type, + ) qt.apply(train_program, startup_program, is_test=False) qt.apply(test_program, startup_program, is_test=True) @@ -109,8 +117,9 @@ class TestQuantizeProgramPass(unittest.TestCase): with fluid.scope_guard(scope): exe.run(startup_program) if not for_ci: - train_graph = IrGraph(core.Graph(train_program.desc), - for_test=False) + train_graph = IrGraph( + core.Graph(train_program.desc), for_test=False + ) train_graph.draw('.', 'train_program_2') test_graph = IrGraph(core.Graph(test_program.desc), for_test=True) test_graph.draw('.', 'test_program_2') @@ -120,19 +129,21 @@ class TestQuantizeProgramPass(unittest.TestCase): build_strategy.enable_inplace = False build_strategy.fuse_all_reduce_ops = False binary = fluid.CompiledProgram(train_program).with_data_parallel( - loss_name=loss.name, build_strategy=build_strategy) + loss_name=loss.name, build_strategy=build_strategy + ) iters = 5 batch_size = 8 - train_reader = paddle.batch(paddle.dataset.mnist.train(), - batch_size=batch_size) + train_reader = paddle.batch( + paddle.dataset.mnist.train(), batch_size=batch_size + ) feeder = fluid.DataFeeder(feed_list=feeds, place=place) with fluid.scope_guard(scope): for idx in range(iters): data = next(train_reader()) - loss_v = exe.run(binary, - feed=feeder.feed(data), - fetch_list=[loss]) + loss_v = exe.run( + binary, feed=feeder.feed(data), fetch_list=[loss] + ) if not for_ci and idx % 20 == 0: print('{}: {}'.format('loss', np.mean(loss_v))) @@ -142,19 +153,24 @@ class TestQuantizeProgramPass(unittest.TestCase): qt.convert(test_program, scope) if not for_ci: with fluid.scope_guard(scope): - fluid.io.save_inference_model('./infer_model', - ['image', 'label'], [loss], - exe, - test_program, - clip_extra=True) + fluid.io.save_inference_model( + './infer_model', + ['image', 'label'], + [loss], + exe, + test_program, + clip_extra=True, + ) def test_gpu_1(self): if fluid.core.is_compiled_with_cuda(): - self.quantize_program(use_cuda=True, - seed=1, - activation_quant_type='abs_max', - weight_quant_type='abs_max', - for_ci=True) + self.quantize_program( + use_cuda=True, + seed=1, + activation_quant_type='abs_max', + weight_quant_type='abs_max', + for_ci=True, + ) def test_gpu_2(self): if fluid.core.is_compiled_with_cuda(): @@ -163,21 +179,26 @@ class TestQuantizeProgramPass(unittest.TestCase): seed=1, activation_quant_type='moving_average_abs_max', weight_quant_type='channel_wise_abs_max', - for_ci=True) + for_ci=True, + ) def test_cpu_1(self): - self.quantize_program(use_cuda=False, - seed=2, - activation_quant_type='abs_max', - weight_quant_type='abs_max', - for_ci=True) + self.quantize_program( + use_cuda=False, + seed=2, + activation_quant_type='abs_max', + weight_quant_type='abs_max', + for_ci=True, + ) def test_cpu_2(self): - self.quantize_program(use_cuda=False, - seed=2, - activation_quant_type='moving_average_abs_max', - weight_quant_type='channel_wise_abs_max', - for_ci=True) + self.quantize_program( + use_cuda=False, + seed=2, + activation_quant_type='moving_average_abs_max', + weight_quant_type='channel_wise_abs_max', + for_ci=True, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/contrib/slim/tests/test_user_defined_quantization.py b/python/paddle/fluid/contrib/slim/tests/test_user_defined_quantization.py index 361440f0238c7a3c13ccd9feb8ad629467e5f1d2..96635700666e24020107019b9313a7ff494f9b9c 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_user_defined_quantization.py +++ b/python/paddle/fluid/contrib/slim/tests/test_user_defined_quantization.py @@ -36,21 +36,25 @@ os.environ["CPU_NUM"] = "1" def conv_net(img, label): - conv_pool_1 = fluid.nets.simple_img_conv_pool(input=img, - filter_size=5, - num_filters=20, - pool_size=2, - pool_stride=2, - pool_type='max', - act="relu") + conv_pool_1 = fluid.nets.simple_img_conv_pool( + input=img, + filter_size=5, + num_filters=20, + pool_size=2, + pool_stride=2, + pool_type='max', + act="relu", + ) conv_pool_1 = fluid.layers.batch_norm(conv_pool_1) - conv_pool_2 = fluid.nets.simple_img_conv_pool(input=conv_pool_1, - filter_size=5, - num_filters=50, - pool_size=2, - pool_stride=2, - pool_type='avg', - act="relu") + conv_pool_2 = fluid.nets.simple_img_conv_pool( + input=conv_pool_1, + filter_size=5, + num_filters=50, + pool_size=2, + pool_stride=2, + pool_type='avg', + act="relu", + ) hidden = fluid.layers.fc(input=conv_pool_2, size=100, act='relu') prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') loss = fluid.layers.cross_entropy(input=prediction, label=label) @@ -66,41 +70,44 @@ def pact(x, name=None): name=x.name + '_pact', initializer=fluid.initializer.ConstantInitializer(value=init_thres), regularizer=fluid.regularizer.L2Decay(0.0001), - learning_rate=1) + learning_rate=1, + ) u_param = helper.create_parameter(attr=u_param_attr, shape=[1], dtype=dtype) x = fluid.layers.elementwise_sub( - x, fluid.layers.relu(fluid.layers.elementwise_sub(x, u_param))) + x, fluid.layers.relu(fluid.layers.elementwise_sub(x, u_param)) + ) x = fluid.layers.elementwise_add( - x, fluid.layers.relu(fluid.layers.elementwise_sub(-u_param, x))) + x, fluid.layers.relu(fluid.layers.elementwise_sub(-u_param, x)) + ) return x class TestUserDefinedQuantization(unittest.TestCase): - - def quantization_scale(self, - use_cuda, - seed, - activation_quant_type, - weight_quant_type='abs_max', - for_ci=False, - act_preprocess_func=None, - weight_preprocess_func=None, - act_quantize_func=None, - weight_quantize_func=None): - + def quantization_scale( + self, + use_cuda, + seed, + activation_quant_type, + weight_quant_type='abs_max', + for_ci=False, + act_preprocess_func=None, + weight_preprocess_func=None, + act_quantize_func=None, + weight_quantize_func=None, + ): def build_program(main, startup, is_test): main.random_seed = seed startup.random_seed = seed with fluid.unique_name.guard(): with fluid.program_guard(main, startup): - img = fluid.layers.data(name='image', - shape=[1, 28, 28], - dtype='float32') + img = fluid.layers.data( + name='image', shape=[1, 28, 28], dtype='float32' + ) img.stop_gradient = False - label = fluid.layers.data(name='label', - shape=[1], - dtype='int64') + label = fluid.layers.data( + name='label', shape=[1], dtype='int64' + ) loss = conv_net(img, label) if not is_test: opt = fluid.optimizer.SGD(learning_rate=0.0001) @@ -149,7 +156,8 @@ class TestUserDefinedQuantization(unittest.TestCase): act_quantize_func=act_quantize_func, weight_quantize_func=weight_quantize_func, optimizer_func=get_optimizer, - executor=exe) + executor=exe, + ) train_transform_pass.apply(main_graph) test_transform_pass = QuantizationTransformPass( scope=scope, @@ -161,7 +169,8 @@ class TestUserDefinedQuantization(unittest.TestCase): act_quantize_func=act_quantize_func, weight_quantize_func=weight_quantize_func, optimizer_func=get_optimizer, - executor=exe) + executor=exe, + ) test_transform_pass.apply(test_graph) save_dict(test_graph.out_node_mapping_table, mapping_table_path) @@ -180,20 +189,22 @@ class TestUserDefinedQuantization(unittest.TestCase): build_strategy.enable_inplace = False build_strategy.fuse_all_reduce_ops = False binary = fluid.CompiledProgram(main_graph.graph).with_data_parallel( - loss_name=loss.name, build_strategy=build_strategy) + loss_name=loss.name, build_strategy=build_strategy + ) iters = 5 batch_size = 8 - train_reader = paddle.batch(paddle.reader.shuffle( - paddle.dataset.mnist.train(), buf_size=500), - batch_size=batch_size) + train_reader = paddle.batch( + paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=500), + batch_size=batch_size, + ) feeder = fluid.DataFeeder(feed_list=feeds, place=place) with fluid.scope_guard(scope): for _ in range(iters): data = next(train_reader()) - loss_v = exe.run(binary, - feed=feeder.feed(data), - fetch_list=[loss]) + loss_v = exe.run( + binary, feed=feeder.feed(data), fetch_list=[loss] + ) out_scale_infer_pass = OutScaleForInferencePass(scope=scope) out_scale_infer_pass.apply(test_graph) @@ -203,7 +214,8 @@ class TestUserDefinedQuantization(unittest.TestCase): place=place, weight_bits=8, activation_bits=8, - weight_quantize_type=weight_quant_type) + weight_quantize_type=weight_quant_type, + ) mapping_table = load_dict(mapping_table_path) test_graph.out_node_mapping_table = mapping_table @@ -220,7 +232,8 @@ class TestUserDefinedQuantization(unittest.TestCase): activation_quant_type='moving_average_abs_max', weight_quant_type='channel_wise_abs_max', for_ci=True, - act_preprocess_func=pact) + act_preprocess_func=pact, + ) def test_act_preprocess_cpu(self): with fluid.unique_name.guard(): @@ -230,7 +243,8 @@ class TestUserDefinedQuantization(unittest.TestCase): activation_quant_type='moving_average_abs_max', weight_quant_type='channel_wise_abs_max', for_ci=True, - act_preprocess_func=pact) + act_preprocess_func=pact, + ) def test_weight_preprocess_cuda(self): if fluid.core.is_compiled_with_cuda(): @@ -241,7 +255,8 @@ class TestUserDefinedQuantization(unittest.TestCase): activation_quant_type='moving_average_abs_max', weight_quant_type='channel_wise_abs_max', for_ci=True, - weight_preprocess_func=pact) + weight_preprocess_func=pact, + ) def test_weight_preprocess_cpu(self): with fluid.unique_name.guard(): @@ -251,7 +266,8 @@ class TestUserDefinedQuantization(unittest.TestCase): activation_quant_type='moving_average_abs_max', weight_quant_type='channel_wise_abs_max', for_ci=True, - weight_preprocess_func=pact) + weight_preprocess_func=pact, + ) def test_act_quantize_cuda(self): if fluid.core.is_compiled_with_cuda(): @@ -262,7 +278,8 @@ class TestUserDefinedQuantization(unittest.TestCase): activation_quant_type='moving_average_abs_max', weight_quant_type='channel_wise_abs_max', for_ci=True, - act_quantize_func=pact) + act_quantize_func=pact, + ) def test_act_quantize_cpu(self): with fluid.unique_name.guard(): @@ -272,7 +289,8 @@ class TestUserDefinedQuantization(unittest.TestCase): activation_quant_type='moving_average_abs_max', weight_quant_type='channel_wise_abs_max', for_ci=True, - act_quantize_func=pact) + act_quantize_func=pact, + ) def test_weight_quantize_cuda(self): if fluid.core.is_compiled_with_cuda(): @@ -283,7 +301,8 @@ class TestUserDefinedQuantization(unittest.TestCase): activation_quant_type='moving_average_abs_max', weight_quant_type='channel_wise_abs_max', for_ci=True, - weight_quantize_func=pact) + weight_quantize_func=pact, + ) def test_weight_quantize_cpu(self): with fluid.unique_name.guard(): @@ -293,7 +312,8 @@ class TestUserDefinedQuantization(unittest.TestCase): activation_quant_type='moving_average_abs_max', weight_quant_type='channel_wise_abs_max', for_ci=True, - weight_quantize_func=pact) + weight_quantize_func=pact, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/contrib/slim/tests/test_weight_quantization_mobilenetv1.py b/python/paddle/fluid/contrib/slim/tests/test_weight_quantization_mobilenetv1.py index e6af06c6ceac00ba8c7bca7601129e56b4e766dd..929eb34994b4662d4e3da1c6880dd75e07cd467d 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_weight_quantization_mobilenetv1.py +++ b/python/paddle/fluid/contrib/slim/tests/test_weight_quantization_mobilenetv1.py @@ -28,8 +28,7 @@ def _load_variable_data(scope, var_name): Load variable value from scope ''' var_node = scope.find_var(var_name) - assert var_node is not None, \ - "Cannot find " + var_name + " in scope." + assert var_node is not None, "Cannot find " + var_name + " in scope." return np.array(var_node.get_tensor()) @@ -37,8 +36,9 @@ def _set_variable_data(scope, place, var_name, np_value): ''' Set the value of var node by name, if the node exits, ''' - assert isinstance(np_value, np.ndarray), \ - 'The type of value should be numpy array.' + assert isinstance( + np_value, np.ndarray + ), 'The type of value should be numpy array.' var_node = scope.find_var(var_name) if var_node != None: tensor = var_node.get_tensor() @@ -46,11 +46,11 @@ def _set_variable_data(scope, place, var_name, np_value): class TestWeightQuantization(unittest.TestCase): - def setUp(self): self.weight_quantization_dir = 'weight_quantization' - self.cache_folder = os.path.join(DATA_HOME, - self.weight_quantization_dir) + self.cache_folder = os.path.join( + DATA_HOME, self.weight_quantization_dir + ) def download_model(self, model_name, data_url, data_md5): download(data_url, self.weight_quantization_dir, data_md5) @@ -66,21 +66,32 @@ class TestWeightQuantization(unittest.TestCase): def cache_unzipping(self, target_folder, zip_path): if not os.path.exists(target_folder): cmd = 'mkdir {0} && tar xf {1} -C {0}'.format( - target_folder, zip_path) + target_folder, zip_path + ) os.system(cmd) - def quantize_to_int(self, model_name, model_data_url, model_data_md5, - weight_bits, quantizable_op_type, weight_quantize_type, - generate_test_model, threshold_rate): - - model_dir = self.download_model(model_name, model_data_url, - model_data_md5) + def quantize_to_int( + self, + model_name, + model_data_url, + model_data_md5, + weight_bits, + quantizable_op_type, + weight_quantize_type, + generate_test_model, + threshold_rate, + ): + + model_dir = self.download_model( + model_name, model_data_url, model_data_md5 + ) load_model_dir = os.path.join(model_dir, model_name) timestamp = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime()) save_model_dir = os.path.join( os.getcwd(), - model_name + "_wq_" + str(weight_bits) + "_" + timestamp) + model_name + "_wq_" + str(weight_bits) + "_" + timestamp, + ) weight_quant = WeightQuantization(model_dir=load_model_dir) weight_quant.quantize_weight_to_int( @@ -89,39 +100,54 @@ class TestWeightQuantization(unittest.TestCase): quantizable_op_type=quantizable_op_type, weight_quantize_type=weight_quantize_type, generate_test_model=generate_test_model, - threshold_rate=threshold_rate) + threshold_rate=threshold_rate, + ) print("finish weight quantization for " + model_name + "\n") try: os.system("rm -rf {}".format(save_model_dir)) except Exception as e: - print("Failed to delete {} due to {}".format( - save_model_dir, str(e))) - - def convert_to_fp16(self, model_name, model_data_url, model_data_md5, - model_filename, params_filename): - model_dir = self.download_model(model_name, model_data_url, - model_data_md5) + print( + "Failed to delete {} due to {}".format(save_model_dir, str(e)) + ) + + def convert_to_fp16( + self, + model_name, + model_data_url, + model_data_md5, + model_filename, + params_filename, + ): + model_dir = self.download_model( + model_name, model_data_url, model_data_md5 + ) load_model_dir = os.path.join(model_dir, model_name) timestamp = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime()) - save_model_dir = os.path.join(os.getcwd(), - model_name + "_wq_fp16_" + timestamp) + save_model_dir = os.path.join( + os.getcwd(), model_name + "_wq_fp16_" + timestamp + ) - weight_quant = WeightQuantization(load_model_dir, model_filename, - params_filename) + weight_quant = WeightQuantization( + load_model_dir, model_filename, params_filename + ) weight_quant.convert_weight_to_fp16(save_model_dir) - print("finish converting the data type of weights to fp16 for " + - model_name) + print( + "finish converting the data type of weights to fp16 for " + + model_name + ) print("fp16 model saved in " + save_model_dir + "\n") input_data = np.ones([1, 3, 224, 224], dtype=np.float32) - res_fp32 = self.run_models(load_model_dir, model_filename, - params_filename, input_data, False) - res_fp16 = self.run_models(save_model_dir, model_filename, - params_filename, input_data, True) + res_fp32 = self.run_models( + load_model_dir, model_filename, params_filename, input_data, False + ) + res_fp16 = self.run_models( + save_model_dir, model_filename, params_filename, input_data, True + ) np.testing.assert_allclose( res_fp32, @@ -129,40 +155,60 @@ class TestWeightQuantization(unittest.TestCase): rtol=1e-05, atol=1e-08, equal_nan=True, - err_msg='Failed to test the accuracy of the fp32 and fp16 model.') + err_msg='Failed to test the accuracy of the fp32 and fp16 model.', + ) try: os.system("rm -rf {}".format(save_model_dir)) except Exception as e: - print("Failed to delete {} due to {}".format( - save_model_dir, str(e))) - - def run_models(self, model_dir, model_filename, params_filename, input_data, - is_fp16_model): + print( + "Failed to delete {} due to {}".format(save_model_dir, str(e)) + ) + + def run_models( + self, + model_dir, + model_filename, + params_filename, + input_data, + is_fp16_model, + ): print(model_dir) place = paddle.CPUPlace() exe = paddle.static.Executor(place) scope = paddle.static.Scope() with paddle.static.scope_guard(scope): - [inference_program, feed_target_names, fetch_targets] = \ - paddle.fluid.io.load_inference_model(model_dir, exe, - model_filename=model_filename, - params_filename=params_filename) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = paddle.fluid.io.load_inference_model( + model_dir, + exe, + model_filename=model_filename, + params_filename=params_filename, + ) if is_fp16_model: for var in inference_program.list_vars(): - if (var.type == paddle.fluid.core.VarDesc.VarType.RAW) or \ - (not var.persistable) or (var.name in ['feed', 'fetch']) \ - or (var.dtype != paddle.fluid.core.VarDesc.VarType.FP16): + if ( + (var.type == paddle.fluid.core.VarDesc.VarType.RAW) + or (not var.persistable) + or (var.name in ['feed', 'fetch']) + or (var.dtype != paddle.fluid.core.VarDesc.VarType.FP16) + ): continue tensor = _load_variable_data(scope, var.name) - _set_variable_data(scope, place, var.name, - tensor.astype(np.float32)) - - results = exe.run(inference_program, - feed={feed_target_names[0]: input_data}, - fetch_list=fetch_targets) + _set_variable_data( + scope, place, var.name, tensor.astype(np.float32) + ) + + results = exe.run( + inference_program, + feed={feed_target_names[0]: input_data}, + fetch_list=fetch_targets, + ) return np.array(results[0]) @@ -181,10 +227,16 @@ class TestWeightQuantizationMobilenetv1(TestWeightQuantization): weight_quantize_type = "abs_max" generate_test_model = True threshold_rate = 0.0 - self.quantize_to_int(self.nocomb_model_name, self.nocomb_model_data_url, - self.nocomb_model_data_md5, weight_bits, - quantizable_op_type, weight_quantize_type, - generate_test_model, threshold_rate) + self.quantize_to_int( + self.nocomb_model_name, + self.nocomb_model_data_url, + self.nocomb_model_data_md5, + weight_bits, + quantizable_op_type, + weight_quantize_type, + generate_test_model, + threshold_rate, + ) def test_weight_quantization_mobilenetv1_8bit_channel_wise_abs_max(self): weight_bits = 8 @@ -192,10 +244,16 @@ class TestWeightQuantizationMobilenetv1(TestWeightQuantization): weight_quantize_type = "channel_wise_abs_max" generate_test_model = True threshold_rate = 0.0 - self.quantize_to_int(self.nocomb_model_name, self.nocomb_model_data_url, - self.nocomb_model_data_md5, weight_bits, - quantizable_op_type, weight_quantize_type, - generate_test_model, threshold_rate) + self.quantize_to_int( + self.nocomb_model_name, + self.nocomb_model_data_url, + self.nocomb_model_data_md5, + weight_bits, + quantizable_op_type, + weight_quantize_type, + generate_test_model, + threshold_rate, + ) def test_weight_quantization_mobilenetv1_16bit_abs_max(self): weight_bits = 16 @@ -203,10 +261,16 @@ class TestWeightQuantizationMobilenetv1(TestWeightQuantization): weight_quantize_type = "abs_max" generate_test_model = False threshold_rate = 0 - self.quantize_to_int(self.nocomb_model_name, self.nocomb_model_data_url, - self.nocomb_model_data_md5, weight_bits, - quantizable_op_type, weight_quantize_type, - generate_test_model, threshold_rate) + self.quantize_to_int( + self.nocomb_model_name, + self.nocomb_model_data_url, + self.nocomb_model_data_md5, + weight_bits, + quantizable_op_type, + weight_quantize_type, + generate_test_model, + threshold_rate, + ) def test_weight_quantization_mobilenetv1_16bit_channel_wise_abs_max(self): weight_bits = 16 @@ -214,24 +278,38 @@ class TestWeightQuantizationMobilenetv1(TestWeightQuantization): weight_quantize_type = "channel_wise_abs_max" generate_test_model = False threshold_rate = 1e-9 - self.quantize_to_int(self.nocomb_model_name, self.nocomb_model_data_url, - self.nocomb_model_data_md5, weight_bits, - quantizable_op_type, weight_quantize_type, - generate_test_model, threshold_rate) + self.quantize_to_int( + self.nocomb_model_name, + self.nocomb_model_data_url, + self.nocomb_model_data_md5, + weight_bits, + quantizable_op_type, + weight_quantize_type, + generate_test_model, + threshold_rate, + ) def test_mobilenetv1_fp16_combined(self): model_filename = '__model__' params_filename = '__params__' - self.convert_to_fp16(self.comb_model_name, self.comb_model_data_url, - self.comb_model_data_md5, model_filename, - params_filename) + self.convert_to_fp16( + self.comb_model_name, + self.comb_model_data_url, + self.comb_model_data_md5, + model_filename, + params_filename, + ) def test_mobilenetv1_fp16_nocombined(self): model_filename = None params_filename = None - self.convert_to_fp16(self.nocomb_model_name, self.nocomb_model_data_url, - self.nocomb_model_data_md5, model_filename, - params_filename) + self.convert_to_fp16( + self.nocomb_model_name, + self.nocomb_model_data_url, + self.nocomb_model_data_md5, + model_filename, + params_filename, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/contrib/sparsity/__init__.py b/python/paddle/fluid/contrib/sparsity/__init__.py index 43c645057e8b558e252b4e5260bbe235122be129..fcb7acda377a0b940af51e58d2021c0fc70bacec 100644 --- a/python/paddle/fluid/contrib/sparsity/__init__.py +++ b/python/paddle/fluid/contrib/sparsity/__init__.py @@ -30,8 +30,19 @@ from .asp import reset_excluded_layers from .supported_layer_list import add_supported_layer __all__ = [ - 'calculate_density', 'check_mask_1d', 'get_mask_1d', 'check_mask_2d', - 'get_mask_2d_greedy', 'get_mask_2d_best', 'create_mask', 'check_sparsity', - 'MaskAlgo', 'CheckMethod', 'decorate', 'prune_model', 'set_excluded_layers', - 'reset_excluded_layers', 'add_supported_layer' + 'calculate_density', + 'check_mask_1d', + 'get_mask_1d', + 'check_mask_2d', + 'get_mask_2d_greedy', + 'get_mask_2d_best', + 'create_mask', + 'check_sparsity', + 'MaskAlgo', + 'CheckMethod', + 'decorate', + 'prune_model', + 'set_excluded_layers', + 'reset_excluded_layers', + 'add_supported_layer', ] diff --git a/python/paddle/fluid/contrib/sparsity/asp.py b/python/paddle/fluid/contrib/sparsity/asp.py index ac97bcecf323f970961464c016afcec3cdb6e1ad..d770bd36e3980942cca37d25a825ca000afd257f 100644 --- a/python/paddle/fluid/contrib/sparsity/asp.py +++ b/python/paddle/fluid/contrib/sparsity/asp.py @@ -25,14 +25,19 @@ from paddle.fluid import global_scope, program_guard, layers from paddle.fluid.initializer import ConstantInitializer from paddle.fluid.contrib import sparsity from paddle.fluid import core -from paddle.fluid.contrib.sparsity.supported_layer_list import supported_layers_and_prune_func_map +from paddle.fluid.contrib.sparsity.supported_layer_list import ( + supported_layers_and_prune_func_map, +) from paddle.fluid.contrib.sparsity.supported_layer_list import _default_pruning OpRole = core.op_proto_and_checker_maker.OpRole OP_ROLE_KEY = core.op_proto_and_checker_maker.kOpRoleAttrName() __all__ = [ - 'decorate', 'prune_model', 'set_excluded_layers', 'reset_excluded_layers' + 'decorate', + 'prune_model', + 'set_excluded_layers', + 'reset_excluded_layers', ] @@ -118,8 +123,9 @@ def set_excluded_layers(param_names, main_program=None): """ if main_program is None: main_program = paddle.static.default_main_program() - ASPHelper.set_excluded_layers(param_names=param_names, - main_program=main_program) + ASPHelper.set_excluded_layers( + param_names=param_names, main_program=main_program + ) def reset_excluded_layers(main_program=None): @@ -437,32 +443,39 @@ def prune_model(model, n=2, m=4, mask_algo='mask_1d', with_mask=True): MaskAlgo_mapping = { 'mask_1d': sparsity.MaskAlgo.MASK_1D, 'mask_2d_greedy': sparsity.MaskAlgo.MASK_2D_GREEDY, - 'mask_2d_best': sparsity.MaskAlgo.MASK_2D_BEST + 'mask_2d_best': sparsity.MaskAlgo.MASK_2D_BEST, } - assert (mask_algo in MaskAlgo_mapping), \ - 'The "mask_algo" should be one of ["mask_1d", "mask_2d_greedy", "mask_2d_best"]' + assert ( + mask_algo in MaskAlgo_mapping + ), 'The "mask_algo" should be one of ["mask_1d", "mask_2d_greedy", "mask_2d_best"]' prune_func = None if isinstance(model, paddle.nn.Layer): prune_func = ASPHelper.prune_model_by_layer elif isinstance(model, paddle.static.Program): prune_func = ASPHelper.prune_model_by_program - if hasattr(model, "distributed_info_") and \ - model.distributed_info_["sharding_degree"] > 1 and \ - paddle.fluid.is_compiled_with_cuda(): + if ( + hasattr(model, "distributed_info_") + and model.distributed_info_["sharding_degree"] > 1 + and paddle.fluid.is_compiled_with_cuda() + ): gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0)) place = paddle.CUDAPlace(gpu_id) else: raise TypeError( - "model should be paddle.nn.Layer or paddle.static.Program, but got {}" - .format(type(model))) + "model should be paddle.nn.Layer or paddle.static.Program, but got {}".format( + type(model) + ) + ) - return prune_func(place, - model, - n=n, - m=m, - mask_algo=MaskAlgo_mapping[mask_algo], - with_mask=with_mask) + return prune_func( + place, + model, + n=n, + m=m, + mask_algo=MaskAlgo_mapping[mask_algo], + with_mask=with_mask, + ) class ProgramASPInfo(object): @@ -547,18 +560,21 @@ class ASPHelper(object): # default_main_program as the key. main_prog = paddle.static.default_main_program() startup_prog = paddle.static.default_startup_program() - ASPHelper._create_mask_variables(main_prog, startup_prog, - optimizer._parameter_list) + ASPHelper._create_mask_variables( + main_prog, startup_prog, optimizer._parameter_list + ) return OptimizerWithSparsityGuarantee(optimizer) @classmethod - def prune_model_by_program(cls, - place, - main_program=None, - n=2, - m=4, - mask_algo=sparsity.MaskAlgo.MASK_1D, - with_mask=True): + def prune_model_by_program( + cls, + place, + main_program=None, + n=2, + m=4, + mask_algo=sparsity.MaskAlgo.MASK_1D, + with_mask=True, + ): r""" This is the implementation of `sparsity.prune_model`, for details please see explanation in `sparsity.prune_model`. """ @@ -574,34 +590,43 @@ class ASPHelper(object): prune_func = ASPHelper._get_prune_func_by_name(param.name) - weight_pruned_nparray, weight_sparse_mask = \ - prune_func(weight_nparray, m, n, mask_algo, param.name) + weight_pruned_nparray, weight_sparse_mask = prune_func( + weight_nparray, m, n, mask_algo, param.name + ) weight_pruned_nparray = weight_pruned_nparray.astype( - weight_nparray.dtype) + weight_nparray.dtype + ) weight_tensor.set(weight_pruned_nparray, place) if with_mask: weight_mask_param = global_scope().find_var( - ASPHelper._get_mask_name(param.name)) - assert weight_mask_param is not None, \ - 'Cannot find {} variable, please call optimizer.minimize (' \ - 'paddle.sparsity.decorate(optimizer).minimize(loss)' \ - ' and initialization (exe.run(startup_program)) first!'.format(ASPHelper._get_mask_name(param.name)) + ASPHelper._get_mask_name(param.name) + ) + assert weight_mask_param is not None, ( + 'Cannot find {} variable, please call optimizer.minimize (' + 'paddle.sparsity.decorate(optimizer).minimize(loss)' + ' and initialization (exe.run(startup_program)) first!'.format( + ASPHelper._get_mask_name(param.name) + ) + ) weight_mask_tensor = weight_mask_param.get_tensor() weight_sparse_mask = weight_sparse_mask.astype( - np.array(weight_mask_tensor).dtype) + np.array(weight_mask_tensor).dtype + ) weight_mask_tensor.set(weight_sparse_mask, place) asp_info.update_masks(param.name, weight_sparse_mask) return asp_info.masks.copy() @classmethod - def prune_model_by_layer(cls, - place, - layer, - n=2, - m=4, - mask_algo=sparsity.MaskAlgo.MASK_1D, - with_mask=True): + def prune_model_by_layer( + cls, + place, + layer, + n=2, + m=4, + mask_algo=sparsity.MaskAlgo.MASK_1D, + with_mask=True, + ): r""" This is the implementation of `sparsity.prune_model`, for details please see explanation in `sparsity.prune_model`. """ @@ -615,19 +640,25 @@ class ASPHelper(object): prune_func = ASPHelper._get_prune_func_by_name(param.name) - weight_pruned_nparray, weight_sparse_mask = \ - prune_func(weight_nparray, m, n, mask_algo, param.name) + weight_pruned_nparray, weight_sparse_mask = prune_func( + weight_nparray, m, n, mask_algo, param.name + ) weight_pruned_nparray = weight_pruned_nparray.astype( - weight_nparray.dtype) + weight_nparray.dtype + ) param.set_value(weight_pruned_nparray) if with_mask: weight_mask_param = asp_info.mask_vars.get( - param.name, None) - assert weight_mask_param is not None, \ - 'Cannot find {} variable, please call sparsity.decorate() to' \ - ' decorate your optimizer first!'.format(ASPHelper._get_mask_name(param.name)) + param.name, None + ) + assert weight_mask_param is not None, ( + 'Cannot find {} variable, please call sparsity.decorate() to' + ' decorate your optimizer first!'.format( + ASPHelper._get_mask_name(param.name) + ) + ) weight_mask_param.set_value(weight_sparse_mask) asp_info.update_masks(param.name, weight_sparse_mask) @@ -639,14 +670,17 @@ class ASPHelper(object): target_program = None for param in layer.parameters(): target_program = param.block.program - assert target_program is not None, \ - 'Cannot get paddle.static.Program from Paddle.nn.Layer.' - return ASPHelper.prune_model_by_program(place, - target_program, - n=n, - m=m, - mask_algo=mask_algo, - with_mask=with_mask) + assert ( + target_program is not None + ), 'Cannot get paddle.static.Program from Paddle.nn.Layer.' + return ASPHelper.prune_model_by_program( + place, + target_program, + n=n, + m=m, + mask_algo=mask_algo, + with_mask=with_mask, + ) @staticmethod def _get_mask_name(param_name): @@ -728,13 +762,16 @@ class ASPHelper(object): param_name_no_weight_suffix = param_name_list[0] param_type_suffix = param_name_list[1] - layer_name = param_name_no_weight_suffix[:param_name_no_weight_suffix. - rfind('_')] + layer_name = param_name_no_weight_suffix[ + : param_name_no_weight_suffix.rfind('_') + ] if ASPHelper.PADDLE_WEIGHT_SUFFIX not in param_type_suffix: return False - if param_name_no_weight_suffix in supported_layers_and_prune_func_map or \ - layer_name in supported_layers_and_prune_func_map: + if ( + param_name_no_weight_suffix in supported_layers_and_prune_func_map + or layer_name in supported_layers_and_prune_func_map + ): return True return False @@ -745,23 +782,27 @@ class ASPHelper(object): param_name_no_weight_suffix = param_name.split('.')[0] if func is None: func = supported_layers_and_prune_func_map.get( - param_name_no_weight_suffix, None) + param_name_no_weight_suffix, None + ) if func is None: - layer_name = param_name_no_weight_suffix[: - param_name_no_weight_suffix - .rfind('_')] + layer_name = param_name_no_weight_suffix[ + : param_name_no_weight_suffix.rfind('_') + ] func = supported_layers_and_prune_func_map.get( - layer_name, _default_pruning) + layer_name, _default_pruning + ) return func @classmethod - def _minimize(cls, - optimizer, - loss, - main_program=None, - startup_program=None, - parameter_list=None, - no_grad_set=None): + def _minimize( + cls, + optimizer, + loss, + main_program=None, + startup_program=None, + parameter_list=None, + no_grad_set=None, + ): r""" This function is a decorator of `minimize` function in `Optimizer`. There are three steps: @@ -792,7 +833,8 @@ class ASPHelper(object): startup_program = paddle.static.default_startup_program() optimizer_ops, params_and_grads = optimizer.minimize( - loss, startup_program, parameter_list, no_grad_set=no_grad_set) + loss, startup_program, parameter_list, no_grad_set=no_grad_set + ) params_only = [pg[0] for pg in params_and_grads] cls._create_mask_variables(main_program, startup_program, params_only) @@ -819,8 +861,9 @@ class ASPHelper(object): optimizer.step() main_prog = paddle.static.default_main_program() with paddle.fluid.dygraph.no_grad(): - ASPHelper._insert_sparse_mask_ops(main_prog, - optimizer._parameter_list) + ASPHelper._insert_sparse_mask_ops( + main_prog, optimizer._parameter_list + ) @classmethod def _create_mask_variables(cls, main_program, startup_program, params): @@ -842,7 +885,8 @@ class ASPHelper(object): name=ASPHelper._get_mask_name(param.name), shape=param.shape, dtype=param.dtype, - default_initializer=ConstantInitializer(value=1.0)) + default_initializer=ConstantInitializer(value=1.0), + ) mask_param.stop_gradient = True mask_param.trainable = False asp_info.update_mask_vars(param.name, mask_param) @@ -861,17 +905,16 @@ class ASPHelper(object): asp_info = cls._get_program_asp_info(main_program) for param in params: if param.name in asp_info.mask_vars: - block.append_op(type='elementwise_mul', - inputs={ - "X": param, - 'Y': asp_info.mask_vars[param.name] - }, - outputs={'Out': param}, - attrs={ - 'axis': -1, - 'use_mkldnn': False, - OP_ROLE_KEY: int(OpRole.Optimize) - }) + block.append_op( + type='elementwise_mul', + inputs={"X": param, 'Y': asp_info.mask_vars[param.name]}, + outputs={'Out': param}, + attrs={ + 'axis': -1, + 'use_mkldnn': False, + OP_ROLE_KEY: int(OpRole.Optimize), + }, + ) class OptimizerWithSparsityGuarantee(object): @@ -889,11 +932,9 @@ class OptimizerWithSparsityGuarantee(object): def __getattr__(self, item): return getattr(self._optimizer, item) - def minimize(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None): + def minimize( + self, loss, startup_program=None, parameter_list=None, no_grad_set=None + ): r""" This function is to call `ASPHelper.minimize()` and return its return @@ -906,11 +947,13 @@ class OptimizerWithSparsityGuarantee(object): list: operators from :attr:`optimizer`.minimize(:attr:`loss`). list: pairs of parameters and their gradients. """ - return ASPHelper._minimize(self._optimizer, - loss, - startup_program=startup_program, - parameter_list=parameter_list, - no_grad_set=no_grad_set) + return ASPHelper._minimize( + self._optimizer, + loss, + startup_program=startup_program, + parameter_list=parameter_list, + no_grad_set=no_grad_set, + ) @dygraph_only def step(self): @@ -940,7 +983,8 @@ class OptimizerWithSparsityGuarantee(object): """ state_dict = self._optimizer.state_dict() asp_info = ASPHelper._get_program_asp_info( - paddle.static.default_main_program()) + paddle.static.default_main_program() + ) for param_name, var in asp_info.mask_vars.items(): state_dict.update({ASPHelper._get_mask_name(param_name): var}) return state_dict @@ -955,11 +999,13 @@ class OptimizerWithSparsityGuarantee(object): None """ asp_info = ASPHelper._get_program_asp_info( - paddle.static.default_main_program()) + paddle.static.default_main_program() + ) for param_name, var in asp_info.mask_vars.items(): param_mask_name = ASPHelper._get_mask_name(param_name) - assert param_mask_name in state_dict, \ - "The {} is not found.".format(param_mask_name) + assert param_mask_name in state_dict, "The {} is not found.".format( + param_mask_name + ) var.set_value(state_dict[param_mask_name]) asp_info.update_masks(param_name, var.numpy()) return self._optimizer.set_state_dict(state_dict) diff --git a/python/paddle/fluid/contrib/sparsity/supported_layer_list.py b/python/paddle/fluid/contrib/sparsity/supported_layer_list.py index 38dd428e0f0953f93167add965059d55f79466ba..f55a877b4b7f364e476d06050ed7624a1f3143c6 100644 --- a/python/paddle/fluid/contrib/sparsity/supported_layer_list.py +++ b/python/paddle/fluid/contrib/sparsity/supported_layer_list.py @@ -23,9 +23,9 @@ from ...log_helper import get_logger __all__ = ['add_supported_layer'] -_logger = get_logger(__name__, - logging.INFO, - fmt='%(asctime)s-%(levelname)s: %(message)s') +_logger = get_logger( + __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s' +) def _default_pruning(weight_nparray, m, n, func_name, param_name): @@ -38,13 +38,17 @@ def _default_pruning(weight_nparray, m, n, func_name, param_name): exlude_cond_shape4 = len(shape) == 4 and shape[1] < m if exlude_cond_shape2: _logger.warning( - '{} is not pruned because the first dimension of {} is smaller than {}' - .format(param_name, shape, m)) + '{} is not pruned because the first dimension of {} is smaller than {}'.format( + param_name, shape, m + ) + ) return weight_pruned_nparray, weight_sparse_mask if exlude_cond_shape4: _logger.warning( - '{} is not pruned because the second dimension of {} is smaller than {}' - .format(param_name, shape, m)) + '{} is not pruned because the second dimension of {} is smaller than {}'.format( + param_name, shape, m + ) + ) return weight_pruned_nparray, weight_sparse_mask checked_func_name = sparsity.CheckMethod.get_checking_method(func_name) @@ -60,13 +64,13 @@ def _default_pruning(weight_nparray, m, n, func_name, param_name): # sparsity/utils is row-major pruning. That is the reason we have to transpose weight # matrices beforce invoking create_mask. Then we transpose the result mask to make # sure its shape to be the same as the input weight. - weight_sparse_mask = sparsity.create_mask(weight_nparray.T, - func_name=func_name, - n=n, - m=m).T + weight_sparse_mask = sparsity.create_mask( + weight_nparray.T, func_name=func_name, n=n, m=m + ).T weight_pruned_nparray = np.multiply(weight_nparray, weight_sparse_mask) - assert sparsity.check_sparsity(weight_pruned_nparray.T, n=n, m=m, func_name=checked_func_name), \ - 'Pruning {} weight matrix failure!!!'.format(param_name) + assert sparsity.check_sparsity( + weight_pruned_nparray.T, n=n, m=m, func_name=checked_func_name + ), 'Pruning {} weight matrix failure!!!'.format(param_name) return weight_pruned_nparray, weight_sparse_mask @@ -93,13 +97,18 @@ def add_supported_layer(layer, pruning_func=None): name = layer elif isinstance(layer, paddle.fluid.dygraph.layers.Layer): name = paddle.fluid.dygraph.layers._convert_camel_to_snake( - type(layer).__name__) + type(layer).__name__ + ) elif issubclass(layer, paddle.fluid.dygraph.layers.Layer): name = paddle.fluid.dygraph.layers._convert_camel_to_snake( - layer.__name__) + layer.__name__ + ) else: - assert "The type of layer should be string of Layer, but got {}!".format( - type(layer)) + assert ( + "The type of layer should be string of Layer, but got {}!".format( + type(layer) + ) + ) if pruning_func is None: pruning_func = _default_pruning _supported_layers_and_prune_func_map_lock.acquire() diff --git a/python/paddle/fluid/contrib/sparsity/utils.py b/python/paddle/fluid/contrib/sparsity/utils.py index 82846920031a4a5036724c977b520f39c4be2619..b5be3887380aeac9c948aadda0f5ef30afcec0fa 100644 --- a/python/paddle/fluid/contrib/sparsity/utils.py +++ b/python/paddle/fluid/contrib/sparsity/utils.py @@ -25,9 +25,16 @@ from itertools import permutations import threading __all__ = [ - 'calculate_density', 'check_mask_1d', 'get_mask_1d', 'check_mask_2d', - 'get_mask_2d_greedy', 'get_mask_2d_best', 'create_mask', 'check_sparsity', - 'MaskAlgo', 'CheckMethod' + 'calculate_density', + 'check_mask_1d', + 'get_mask_1d', + 'check_mask_2d', + 'get_mask_2d_greedy', + 'get_mask_2d_best', + 'create_mask', + 'check_sparsity', + 'MaskAlgo', + 'CheckMethod', ] @@ -74,8 +81,9 @@ class CheckMethod(Enum): CheckMethod.get_checking_method(MaskAlgo.MASK_2D_BEST) # CheckMethod.CHECK_2D """ - assert isinstance(mask_algo, MaskAlgo), \ - "mask_algo should be MaskAlgo type" + assert isinstance( + mask_algo, MaskAlgo + ), "mask_algo should be MaskAlgo type" if mask_algo == MaskAlgo.MASK_1D: return CheckMethod.CHECK_1D else: @@ -124,7 +132,7 @@ def _reshape_1d(mat, m): remainder = mat.shape[1] % m if mat.shape[1] % m > 0: mat_padded = np.zeros((mat.shape[0], mat.shape[1] + (m - remainder))) - mat_padded[:, :mat.shape[1]] = mat + mat_padded[:, : mat.shape[1]] = mat shape = mat_padded.shape return mat_padded.reshape(-1, m), shape else: @@ -211,7 +219,7 @@ def get_mask_1d(mat, n, m): min_order_indices = np.argsort(np.absolute(sub_mat)) mask_flattern[i, min_order_indices[:n].tolist()] = 0 mask_flattern = mask_flattern.reshape(shape) - mask[:, :] = mask_flattern[:, :mat.shape[1]] + mask[:, :] = mask_flattern[:, : mat.shape[1]] return mask @@ -237,12 +245,12 @@ def _reshape_2d(mat, m): remainder_0 = mat.shape[0] % m remainder_1 = mat.shape[1] % m - new_shape = (mat.shape[0] if remainder_0 == 0 \ - else mat.shape[0] + (m - remainder_0), - mat.shape[1] if remainder_1 == 0 \ - else mat.shape[1] + (m - remainder_1)) + new_shape = ( + mat.shape[0] if remainder_0 == 0 else mat.shape[0] + (m - remainder_0), + mat.shape[1] if remainder_1 == 0 else mat.shape[1] + (m - remainder_1), + ) mat_padded = np.zeros(new_shape) - mat_padded[:mat.shape[0], :mat.shape[1]] = mat + mat_padded[: mat.shape[0], : mat.shape[1]] = mat mat_flattern = np.empty(new_shape).reshape(-1, m * m) curr_idx = 0 @@ -250,9 +258,9 @@ def _reshape_2d(mat, m): row_end = row_start + m for col_start in range(0, mat_padded.shape[1], m): col_end = col_start + m - sub_mat = np.squeeze(mat_padded[row_start:row_end, \ - col_start:col_end] \ - .reshape(-1)) + sub_mat = np.squeeze( + mat_padded[row_start:row_end, col_start:col_end].reshape(-1) + ) mat_flattern[curr_idx] = sub_mat curr_idx += 1 return mat_flattern, mat_padded.shape @@ -302,8 +310,9 @@ def check_mask_2d(mat, n, m): mat_padded, shape = _reshape_2d(mat, m) for sub_mat in mat_padded: sub_mask = np.absolute(np.squeeze(sub_mat.reshape(m, m))) > 0 - if (np.sum(np.sum(sub_mask, axis=1) > (m-n)) != 0) and \ - (np.sum(np.sum(sub_mask, axis=0) > (m-n)) != 0): + if (np.sum(np.sum(sub_mask, axis=1) > (m - n)) != 0) and ( + np.sum(np.sum(sub_mask, axis=0) > (m - n)) != 0 + ): return False return True @@ -348,15 +357,17 @@ def get_mask_2d_greedy(mat, n, m): sub_mask = np.squeeze(mask_padded[idx]) min_order_1d_indices = np.argsort(sub_mat) - min_order_2d_indices = [(int(x / m), x % m) - for x in min_order_1d_indices] + min_order_2d_indices = [ + (int(x / m), x % m) for x in min_order_1d_indices + ] row_counter = collections.Counter() col_counter = collections.Counter() for i in range(len(min_order_1d_indices) - 1, -1, -1): matrix_entry = min_order_2d_indices[i] - if (row_counter[matrix_entry[0]] == n) or \ - (col_counter[matrix_entry[1]] == n): + if (row_counter[matrix_entry[0]] == n) or ( + col_counter[matrix_entry[1]] == n + ): continue sub_mask[matrix_entry[0], matrix_entry[1]] = 1.0 @@ -371,7 +382,7 @@ def get_mask_2d_greedy(mat, n, m): col_end = col_start + m mask[row_start:row_end, col_start:col_end] = mask_padded[curr_idx] curr_idx += 1 - return mask[:mat.shape[0], :mat.shape[1]] + return mask[: mat.shape[0], : mat.shape[1]] _valid_2d_patterns_lock = threading.Lock() @@ -404,8 +415,11 @@ def _compute_valid_2d_patterns(n, m): patterns = patterns + patterns patterns = np.asarray(list(set(permutations(patterns, m)))) - valid = ((patterns.sum(axis=1) <= n).sum( - axis=1) == m).nonzero()[0].reshape(-1) + valid = ( + ((patterns.sum(axis=1) <= n).sum(axis=1) == m) + .nonzero()[0] + .reshape(-1) + ) valid_patterns = np.empty((valid.shape[0], m, m)) valid_patterns[:] = patterns[valid[:]] @@ -452,9 +466,10 @@ def get_mask_2d_best(mat, n, m): mat_flattern, shape = _reshape_2d(mat, m) mask_flattern = np.ones_like(mat_flattern).reshape(-1, m, m) - pmax = np.argmax(np.matmul(mat_flattern, - patterns.reshape(patterns.shape[0], m * m).T), - axis=1) + pmax = np.argmax( + np.matmul(mat_flattern, patterns.reshape(patterns.shape[0], m * m).T), + axis=1, + ) mask_flattern[:] = patterns[pmax[:]] mask = np.empty(shape) @@ -466,7 +481,7 @@ def get_mask_2d_best(mat, n, m): col_end = col_start + m mask[row_start:row_end, col_start:col_end] = mask_flattern[curr_idx] curr_idx += 1 - return mask[:mat.shape[0], :mat.shape[1]] + return mask[: mat.shape[0], : mat.shape[1]] def create_mask(tensor, func_name=MaskAlgo.MASK_1D, n=2, m=4): @@ -506,9 +521,10 @@ def create_mask(tensor, func_name=MaskAlgo.MASK_1D, n=2, m=4): dtype = tensor.dtype t = tensor.astype(float) - assert isinstance(func_name, MaskAlgo), \ - "func_name argumet of create_mask is only accepted as type MaskAlgo. " \ - "But got {}".format(type(func_name)) + assert isinstance(func_name, MaskAlgo), ( + "func_name argumet of create_mask is only accepted as type MaskAlgo. " + "But got {}".format(type(func_name)) + ) func = getattr(sys.modules[__name__], func_name.value, None) if len(shape) == 1: t = t.reshape(1, shape[0]) @@ -518,14 +534,20 @@ def create_mask(tensor, func_name=MaskAlgo.MASK_1D, n=2, m=4): t = t.reshape(shape[0] * shape[1], shape[2]) # 4d-tensor conv (h, w, in, out) -> (h*w*out, in) in GemmConvKernel Op elif len(shape) == 4: - t = t.transpose([0, 1, 3, 2]).reshape(shape[0] * shape[1] * shape[3], - shape[2]) + t = t.transpose([0, 1, 3, 2]).reshape( + shape[0] * shape[1] * shape[3], shape[2] + ) mask = func(t, n=n, m=m) - return mask.reshape([shape[0], shape[1], shape[3], - shape[2]]).transpose([0, 1, 3, 2]).astype(dtype) + return ( + mask.reshape([shape[0], shape[1], shape[3], shape[2]]) + .transpose([0, 1, 3, 2]) + .astype(dtype) + ) else: - raise ValueError("The dimension of input tensor is not supported in create_mask, " \ - "Only dimension < 4 is supported but got {}".format(len(shape))) + raise ValueError( + "The dimension of input tensor is not supported in create_mask, " + "Only dimension < 4 is supported but got {}".format(len(shape)) + ) mask = func(t, n=n, m=m) return mask.reshape(shape).astype(dtype) @@ -564,9 +586,10 @@ def check_sparsity(tensor, func_name=CheckMethod.CHECK_1D, n=2, m=4): shape = tensor.shape t = tensor.astype(float) - assert type(func_name) == CheckMethod, \ - "func_name argumet of check_sparsity is only accepted as type CheckMethod. " \ - "But got {}".format(type(func_name)) + assert type(func_name) == CheckMethod, ( + "func_name argumet of check_sparsity is only accepted as type CheckMethod. " + "But got {}".format(type(func_name)) + ) func = getattr(sys.modules[__name__], func_name.value, None) if len(shape) == 1: t = t.reshape(1, shape[0]) @@ -576,10 +599,13 @@ def check_sparsity(tensor, func_name=CheckMethod.CHECK_1D, n=2, m=4): t = t.reshape(shape[0] * shape[1], shape[2]) # 4d-tensor conv (h, w, in, out) -> (h*w*out, in) in GemmConvKernel Op elif len(shape) == 4: - t = t.transpose([0, 1, 3, - 2]).reshape([shape[0] * shape[1] * shape[3], shape[2]]) + t = t.transpose([0, 1, 3, 2]).reshape( + [shape[0] * shape[1] * shape[3], shape[2]] + ) else: - raise ValueError("The dimension of input tensor is not supported in create_mask, " \ - "Only dimension < 4 is supported but got {}".format(len(shape))) + raise ValueError( + "The dimension of input tensor is not supported in create_mask, " + "Only dimension < 4 is supported but got {}".format(len(shape)) + ) return func(t, n=n, m=m) diff --git a/python/paddle/fluid/contrib/tests/test_amp_list.py b/python/paddle/fluid/contrib/tests/test_amp_list.py index 93c99b5ea1c8ffd2aa394c6f2dd8977c5deee9f5..4a30185229a9765787d5f8c2315c2a5ad86fdfc7 100644 --- a/python/paddle/fluid/contrib/tests/test_amp_list.py +++ b/python/paddle/fluid/contrib/tests/test_amp_list.py @@ -14,11 +14,12 @@ import paddle import unittest -from paddle.fluid.contrib.mixed_precision.fp16_lists import AutoMixedPrecisionLists +from paddle.fluid.contrib.mixed_precision.fp16_lists import ( + AutoMixedPrecisionLists, +) class TestAMPList(unittest.TestCase): - def test_main(self): custom_white_list = [ 'lookup_table', @@ -31,8 +32,11 @@ class TestAMPList(unittest.TestCase): self.assertTrue(op not in amp_list.unsupported_list) default_black_list = [ - 'linear_interp_v2', 'nearest_interp_v2', 'bilinear_interp_v2', - 'bicubic_interp_v2', 'trilinear_interp_v2' + 'linear_interp_v2', + 'nearest_interp_v2', + 'bilinear_interp_v2', + 'bicubic_interp_v2', + 'trilinear_interp_v2', ] for op in default_black_list: self.assertTrue(op in amp_list.black_list) diff --git a/python/paddle/fluid/contrib/tests/test_bf16_utils.py b/python/paddle/fluid/contrib/tests/test_bf16_utils.py index c456b1263ce23a3d7b052781288909c367146c49..5d528f226596ce65097eb0c4b7a71a17778c03c7 100644 --- a/python/paddle/fluid/contrib/tests/test_bf16_utils.py +++ b/python/paddle/fluid/contrib/tests/test_bf16_utils.py @@ -22,7 +22,6 @@ paddle.enable_static() class AMPTest(unittest.TestCase): - def setUp(self): self.bf16_list = copy.copy(amp.bf16.amp_lists.bf16_list) self.fp32_list = copy.copy(amp.bf16.amp_lists.fp32_list) @@ -63,7 +62,8 @@ class AMPTest(unittest.TestCase): self.fp32_list.add('matmul_v2') self.amp_lists_ = amp.bf16.AutoMixedPrecisionListsBF16( - custom_fp32_list={'matmul_v2'}) + custom_fp32_list={'matmul_v2'} + ) def test_amp_lists_5(self): # 5. w=None, b={'matmul_v2'} @@ -71,43 +71,47 @@ class AMPTest(unittest.TestCase): self.bf16_list.remove('matmul_v2') self.amp_lists_ = amp.bf16.AutoMixedPrecisionListsBF16( - custom_fp32_list={'matmul_v2'}) + custom_fp32_list={'matmul_v2'} + ) def test_amp_lists_6(self): # 6. w=None, b={'lstm'} self.fp32_list.add('lstm') self.amp_lists_ = amp.bf16.AutoMixedPrecisionListsBF16( - custom_fp32_list={'lstm'}) + custom_fp32_list={'lstm'} + ) def test_amp_lists_7(self): self.fp32_list.add('reshape2') self.gray_list.remove('reshape2') self.amp_lists_ = amp.bf16.AutoMixedPrecisionListsBF16( - custom_fp32_list={'reshape2'}) + custom_fp32_list={'reshape2'} + ) def test_amp_list_8(self): self.bf16_list.add('reshape2') self.gray_list.remove('reshape2') self.amp_lists_ = amp.bf16.AutoMixedPrecisionListsBF16( - custom_bf16_list={'reshape2'}) + custom_bf16_list={'reshape2'} + ) class AMPTest2(unittest.TestCase): - def test_amp_lists_(self): # 7. w={'lstm'} b={'lstm'} # raise ValueError - self.assertRaises(ValueError, amp.bf16.AutoMixedPrecisionListsBF16, - {'lstm'}, {'lstm'}) + self.assertRaises( + ValueError, amp.bf16.AutoMixedPrecisionListsBF16, {'lstm'}, {'lstm'} + ) def test_find_op_index(self): block = fluid.default_main_program().global_block() op_desc = core.OpDesc() idx = amp.bf16.amp_utils.find_op_index(block.desc, op_desc) - assert (idx == -1) + assert idx == -1 def test_is_in_fp32_varnames(self): block = fluid.default_main_program().global_block() @@ -115,17 +119,19 @@ class AMPTest2(unittest.TestCase): var1 = block.create_var(name="X", shape=[3], dtype='float32') var2 = block.create_var(name="Y", shape=[3], dtype='float32') var3 = block.create_var(name="Z", shape=[3], dtype='float32') - op1 = block.append_op(type="abs", - inputs={"X": [var1]}, - outputs={"Out": [var2]}) - op2 = block.append_op(type="abs", - inputs={"X": [var2]}, - outputs={"Out": [var3]}) + op1 = block.append_op( + type="abs", inputs={"X": [var1]}, outputs={"Out": [var2]} + ) + op2 = block.append_op( + type="abs", inputs={"X": [var2]}, outputs={"Out": [var3]} + ) amp_lists_1 = amp.bf16.AutoMixedPrecisionListsBF16( - custom_fp32_varnames={'X'}) + custom_fp32_varnames={'X'} + ) assert amp.bf16.amp_utils._is_in_fp32_varnames(op1, amp_lists_1) amp_lists_2 = amp.bf16.AutoMixedPrecisionListsBF16( - custom_fp32_varnames={'Y'}) + custom_fp32_varnames={'Y'} + ) assert amp.bf16.amp_utils._is_in_fp32_varnames(op2, amp_lists_2) assert amp.bf16.amp_utils._is_in_fp32_varnames(op1, amp_lists_2) @@ -136,14 +142,14 @@ class AMPTest2(unittest.TestCase): var1 = block.create_var(name="X", shape=[3], dtype='float32') var2 = block.create_var(name="Y", shape=[3], dtype='float32') var3 = block.create_var(name="Z", shape=[3], dtype='float32') - op1 = block.append_op(type="abs", - inputs={"X": [var1]}, - outputs={"Out": [var2]}) - op2 = block.append_op(type="abs", - inputs={"X": [var2]}, - outputs={"Out": [var3]}) + op1 = block.append_op( + type="abs", inputs={"X": [var1]}, outputs={"Out": [var2]} + ) + op2 = block.append_op( + type="abs", inputs={"X": [var2]}, outputs={"Out": [var3]} + ) res = amp.bf16.amp_utils.find_true_post_op(block.ops, op1, "Y") - assert (res == [op2]) + assert res == [op2] def test_find_true_post_op_with_search_all(self): program = fluid.Program() @@ -152,27 +158,23 @@ class AMPTest2(unittest.TestCase): var1 = block.create_var(name="X", shape=[3], dtype='float32') var2 = block.create_var(name="Y", shape=[3], dtype='float32') - inititializer_op = startup_block._prepend_op(type="fill_constant", - outputs={"Out": var1}, - attrs={ - "shape": var1.shape, - "dtype": var1.dtype, - "value": 1.0 - }) - - op1 = block.append_op(type="abs", - inputs={"X": [var1]}, - outputs={"Out": [var2]}) - result = amp.bf16.amp_utils.find_true_post_op(block.ops, - inititializer_op, - "X", - search_all=False) - assert (len(result) == 0) - result = amp.bf16.amp_utils.find_true_post_op(block.ops, - inititializer_op, - "X", - search_all=True) - assert (result == [op1]) + inititializer_op = startup_block._prepend_op( + type="fill_constant", + outputs={"Out": var1}, + attrs={"shape": var1.shape, "dtype": var1.dtype, "value": 1.0}, + ) + + op1 = block.append_op( + type="abs", inputs={"X": [var1]}, outputs={"Out": [var2]} + ) + result = amp.bf16.amp_utils.find_true_post_op( + block.ops, inititializer_op, "X", search_all=False + ) + assert len(result) == 0 + result = amp.bf16.amp_utils.find_true_post_op( + block.ops, inititializer_op, "X", search_all=True + ) + assert result == [op1] if __name__ == '__main__': diff --git a/python/paddle/fluid/contrib/tests/test_correlation.py b/python/paddle/fluid/contrib/tests/test_correlation.py index d3ab5dff601ec5a679d7229b61afda7ce6ef8da4..553bb912bc0451bee78b5505b65bbda6efffb65c 100644 --- a/python/paddle/fluid/contrib/tests/test_correlation.py +++ b/python/paddle/fluid/contrib/tests/test_correlation.py @@ -21,22 +21,28 @@ import paddle paddle.enable_static() -def corr(x_1, - x_2, - pad_size=4, - kernel_size=1, - max_displacement=4, - stride1=1, - stride2=1, - corr_multiply=1): +def corr( + x_1, + x_2, + pad_size=4, + kernel_size=1, + max_displacement=4, + stride1=1, + stride2=1, + corr_multiply=1, +): K = kernel_size - rinput1 = np.pad(x_1, ((0, 0), (0, 0), (pad_size, pad_size), - (pad_size, pad_size)), - mode='constant') - rinput2 = np.pad(x_2, ((0, 0), (0, 0), (pad_size, pad_size), - (pad_size, pad_size)), - mode='constant') + rinput1 = np.pad( + x_1, + ((0, 0), (0, 0), (pad_size, pad_size), (pad_size, pad_size)), + mode='constant', + ) + rinput2 = np.pad( + x_2, + ((0, 0), (0, 0), (pad_size, pad_size), (pad_size, pad_size)), + mode='constant', + ) rinput1 = np.transpose(rinput1, (0, 2, 3, 1)) rinput2 = np.transpose(rinput2, (0, 2, 3, 1)) B = int(rinput1.shape[0]) @@ -55,17 +61,23 @@ def corr(x_1, y1_index = j + pad_size x2_index = x1_index + k y2_index = y1_index + l - output[b, l + d + D * (k + d), i, - j] = np.mean(rinput1[b, x1_index:x1_index + K, - y1_index:y1_index + K] * - rinput2[b, x2_index:x2_index + K, - y2_index:y2_index + K]) + output[b, l + d + D * (k + d), i, j] = np.mean( + rinput1[ + b, + x1_index : x1_index + K, + y1_index : y1_index + K, + ] + * rinput2[ + b, + x2_index : x2_index + K, + y2_index : y2_index + K, + ] + ) return output class TestCorrelationOp(unittest.TestCase): - def test_check_output(self): if not fluid.core.is_compiled_with_cuda(): return @@ -73,34 +85,42 @@ class TestCorrelationOp(unittest.TestCase): np.set_printoptions(threshold=np.inf) x_shape = (2, 10, 3, 3) x_type = 'float32' - x1 = fluid.layers.data(name='x1', - shape=x_shape, - dtype=x_type, - append_batch_size=False, - stop_gradient=False) - x2 = fluid.layers.data(name='x2', - shape=x_shape, - dtype=x_type, - append_batch_size=False, - stop_gradient=False) + x1 = fluid.layers.data( + name='x1', + shape=x_shape, + dtype=x_type, + append_batch_size=False, + stop_gradient=False, + ) + x2 = fluid.layers.data( + name='x2', + shape=x_shape, + dtype=x_type, + append_batch_size=False, + stop_gradient=False, + ) x1_np = np.random.randn(2, 3, 4, 5).astype(x_type) x2_np = np.random.randn(2, 3, 4, 5).astype(x_type) - out_np = corr(x1_np, - x2_np, - pad_size=4, - kernel_size=1, - max_displacement=4, - stride1=1, - stride2=1) - - out = fluid.contrib.correlation(x1, - x2, - pad_size=4, - kernel_size=1, - max_displacement=4, - stride1=1, - stride2=1) + out_np = corr( + x1_np, + x2_np, + pad_size=4, + kernel_size=1, + max_displacement=4, + stride1=1, + stride2=1, + ) + + out = fluid.contrib.correlation( + x1, + x2, + pad_size=4, + kernel_size=1, + max_displacement=4, + stride1=1, + stride2=1, + ) loss = fluid.layers.reduce_mean(out) optimizer = fluid.optimizer.Momentum(0.0001, 0.9) @@ -108,33 +128,31 @@ class TestCorrelationOp(unittest.TestCase): place = fluid.CUDAPlace(0) exe = fluid.Executor(place) - res = exe.run(feed={ - 'x1': x1_np, - 'x2': x2_np - }, - fetch_list=[out.name, loss.name]) + res = exe.run( + feed={'x1': x1_np, 'x2': x2_np}, fetch_list=[out.name, loss.name] + ) np.testing.assert_allclose(res[0], out_np, rtol=1e-05, atol=1e-8) class Net(fluid.dygraph.Layer): - def __init__(self, name_scope): super(Net, self).__init__(name_scope) def forward(self, x1, x2): - y = fluid.contrib.correlation(x1, - x2, - pad_size=4, - kernel_size=1, - max_displacement=4, - stride1=1, - stride2=1) + y = fluid.contrib.correlation( + x1, + x2, + pad_size=4, + kernel_size=1, + max_displacement=4, + stride1=1, + stride2=1, + ) return y class TestCorrelationOpDyGraph(unittest.TestCase): - def test_check_output(self): if not fluid.core.is_compiled_with_cuda(): return @@ -146,13 +164,15 @@ class TestCorrelationOpDyGraph(unittest.TestCase): with fluid.dygraph.guard(place): x1_np = np.random.randn(2, 3, 4, 5).astype(x_type) x2_np = np.random.randn(2, 3, 4, 5).astype(x_type) - out_np = corr(x1_np, - x2_np, - pad_size=4, - kernel_size=1, - max_displacement=4, - stride1=1, - stride2=1) + out_np = corr( + x1_np, + x2_np, + pad_size=4, + kernel_size=1, + max_displacement=4, + stride1=1, + stride2=1, + ) x1 = to_variable(x1_np) x2 = to_variable(x2_np) diff --git a/python/paddle/fluid/contrib/tests/test_fp16_utils.py b/python/paddle/fluid/contrib/tests/test_fp16_utils.py index 54753ce4479a35bceace46ea87fe8e53d85f5fa3..af245a30fed43b95a1b1658f1aefe9e645903152 100644 --- a/python/paddle/fluid/contrib/tests/test_fp16_utils.py +++ b/python/paddle/fluid/contrib/tests/test_fp16_utils.py @@ -22,12 +22,11 @@ paddle.enable_static() class AMPTest(unittest.TestCase): - def test_find_op_index(self): block = fluid.default_main_program().global_block() op_desc = core.OpDesc() idx = fp16_utils.find_op_index(block.desc, op_desc) - assert (idx == -1) + assert idx == -1 def test_find_true_post_op(self): block = fluid.default_main_program().global_block() @@ -35,14 +34,14 @@ class AMPTest(unittest.TestCase): var1 = block.create_var(name="X", shape=[3], dtype='float32') var2 = block.create_var(name="Y", shape=[3], dtype='float32') var3 = block.create_var(name="Z", shape=[3], dtype='float32') - op1 = block.append_op(type="abs", - inputs={"X": [var1]}, - outputs={"Out": [var2]}) - op2 = block.append_op(type="abs", - inputs={"X": [var2]}, - outputs={"Out": [var3]}) + op1 = block.append_op( + type="abs", inputs={"X": [var1]}, outputs={"Out": [var2]} + ) + op2 = block.append_op( + type="abs", inputs={"X": [var2]}, outputs={"Out": [var3]} + ) res = fp16_utils.find_true_post_op(block.ops, op1, "Y") - assert (res == [op2]) + assert res == [op2] if __name__ == '__main__': diff --git a/python/paddle/fluid/contrib/tests/test_image_classification_fp16.py b/python/paddle/fluid/contrib/tests/test_image_classification_fp16.py index bf09a145450156e3fb50b4aa77f62a84a2daf4bc..7edaeb2760bed4e599c461f4ccf774a33be33fc8 100644 --- a/python/paddle/fluid/contrib/tests/test_image_classification_fp16.py +++ b/python/paddle/fluid/contrib/tests/test_image_classification_fp16.py @@ -29,21 +29,18 @@ paddle.enable_static() def resnet_cifar10(input, depth=32): - - def conv_bn_layer(input, - ch_out, - filter_size, - stride, - padding, - act='relu', - bias_attr=False): - tmp = fluid.layers.conv2d(input=input, - filter_size=filter_size, - num_filters=ch_out, - stride=stride, - padding=padding, - act=None, - bias_attr=bias_attr) + def conv_bn_layer( + input, ch_out, filter_size, stride, padding, act='relu', bias_attr=False + ): + tmp = fluid.layers.conv2d( + input=input, + filter_size=filter_size, + num_filters=ch_out, + stride=stride, + padding=padding, + act=None, + bias_attr=bias_attr, + ) return fluid.layers.batch_norm(input=tmp, act=act) def shortcut(input, ch_in, ch_out, stride): @@ -66,33 +63,31 @@ def resnet_cifar10(input, depth=32): assert (depth - 2) % 6 == 0 n = (depth - 2) // 6 - conv1 = conv_bn_layer(input=input, - ch_out=16, - filter_size=3, - stride=1, - padding=1) + conv1 = conv_bn_layer( + input=input, ch_out=16, filter_size=3, stride=1, padding=1 + ) res1 = layer_warp(basicblock, conv1, 16, 16, n, 1) res2 = layer_warp(basicblock, res1, 16, 32, n, 2) res3 = layer_warp(basicblock, res2, 32, 64, n, 2) - pool = fluid.layers.pool2d(input=res3, - pool_size=8, - pool_type='avg', - pool_stride=1) + pool = fluid.layers.pool2d( + input=res3, pool_size=8, pool_type='avg', pool_stride=1 + ) return pool def vgg16_bn_drop(input): - def conv_block(input, num_filter, groups, dropouts): - return fluid.nets.img_conv_group(input=input, - pool_size=2, - pool_stride=2, - conv_num_filter=[num_filter] * groups, - conv_filter_size=3, - conv_act='relu', - conv_with_batchnorm=True, - conv_batchnorm_drop_rate=dropouts, - pool_type='max') + return fluid.nets.img_conv_group( + input=input, + pool_size=2, + pool_stride=2, + conv_num_filter=[num_filter] * groups, + conv_filter_size=3, + conv_act='relu', + conv_with_batchnorm=True, + conv_batchnorm_drop_rate=dropouts, + pool_type='max', + ) conv1 = conv_block(input, 64, 2, [0.3, 0]) conv2 = conv_block(conv1, 128, 2, [0.4, 0]) @@ -117,9 +112,9 @@ def train(net_type, use_cuda, save_dirname, is_local): train_program.random_seed = 123 startup_prog.random_seed = 456 with fluid.program_guard(train_program, startup_prog): - images = fluid.layers.data(name='pixel', - shape=data_shape, - dtype='float32') + images = fluid.layers.data( + name='pixel', shape=data_shape, dtype='float32' + ) label = fluid.layers.data(name='label', shape=[1], dtype='int64') if net_type == "vgg": @@ -133,7 +128,8 @@ def train(net_type, use_cuda, save_dirname, is_local): logits = fluid.layers.fc(input=net, size=classdim, act="softmax") cost, predict = fluid.layers.softmax_with_cross_entropy( - logits, label, return_softmax=True) + logits, label, return_softmax=True + ) avg_cost = paddle.mean(cost) acc = fluid.layers.accuracy(input=predict, label=label) @@ -143,11 +139,14 @@ def train(net_type, use_cuda, save_dirname, is_local): optimizer = fluid.optimizer.Lamb(learning_rate=0.001) amp_lists = fluid.contrib.mixed_precision.AutoMixedPrecisionLists( - custom_black_varnames={"loss", "conv2d_0.w_0"}) - mp_optimizer = decorate(optimizer=optimizer, - amp_lists=amp_lists, - init_loss_scaling=8.0, - use_dynamic_loss_scaling=True) + custom_black_varnames={"loss", "conv2d_0.w_0"} + ) + mp_optimizer = decorate( + optimizer=optimizer, + amp_lists=amp_lists, + init_loss_scaling=8.0, + use_dynamic_loss_scaling=True, + ) mp_optimizer.minimize(avg_cost) loss_scaling = mp_optimizer.get_loss_scaling() @@ -157,11 +156,13 @@ def train(net_type, use_cuda, save_dirname, is_local): PASS_NUM = 1 # no shuffle for unit test - train_reader = paddle.batch(paddle.dataset.cifar.train10(), - batch_size=BATCH_SIZE) + train_reader = paddle.batch( + paddle.dataset.cifar.train10(), batch_size=BATCH_SIZE + ) - test_reader = paddle.batch(paddle.dataset.cifar.test10(), - batch_size=BATCH_SIZE) + test_reader = paddle.batch( + paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE + ) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) @@ -175,18 +176,25 @@ def train(net_type, use_cuda, save_dirname, is_local): np_scaled_loss, loss = exe.run( main_program, feed=feeder.feed(data), - fetch_list=[scaled_loss, avg_cost]) + fetch_list=[scaled_loss, avg_cost], + ) print( - 'PassID {0:1}, BatchID {1:04}, train loss {2:2.4}, scaled train closs {3:2.4}' - .format(pass_id, batch_id + 1, float(loss), - float(np_scaled_loss))) + 'PassID {0:1}, BatchID {1:04}, train loss {2:2.4}, scaled train closs {3:2.4}'.format( + pass_id, + batch_id + 1, + float(loss), + float(np_scaled_loss), + ) + ) if (batch_id % 10) == 0: acc_list = [] avg_loss_list = [] for tid, test_data in enumerate(test_reader()): - loss_t, acc_t = exe.run(program=test_program, - feed=feeder.feed(test_data), - fetch_list=[avg_cost, acc]) + loss_t, acc_t = exe.run( + program=test_program, + feed=feeder.feed(test_data), + fetch_list=[avg_cost, acc], + ) if math.isnan(float(loss_t)): sys.exit("got NaN loss, training failed.") acc_list.append(float(acc_t)) @@ -197,16 +205,23 @@ def train(net_type, use_cuda, save_dirname, is_local): avg_loss_value = numpy.array(avg_loss_list).mean() print( - 'PassID {0:1}, BatchID {1:04}, test loss {2:2.2}, acc {3:2.2}' - .format(pass_id, batch_id + 1, float(avg_loss_value), - float(acc_value))) + 'PassID {0:1}, BatchID {1:04}, test loss {2:2.2}, acc {3:2.2}'.format( + pass_id, + batch_id + 1, + float(avg_loss_value), + float(acc_value), + ) + ) if acc_value > 0.08: # Low threshold for speeding up CI fluid.io.save_inference_model( - save_dirname, ["pixel"], [predict], + save_dirname, + ["pixel"], + [predict], exe, main_program=train_program, - clip_extra=True) + clip_extra=True, + ) return if is_local: @@ -226,8 +241,9 @@ def train(net_type, use_cuda, save_dirname, is_local): t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": pserver_prog = t.get_pserver_program(current_endpoint) - pserver_startup = t.get_startup_program(current_endpoint, - pserver_prog) + pserver_startup = t.get_startup_program( + current_endpoint, pserver_prog + ) exe.run(pserver_startup) exe.run(pserver_prog) elif training_role == "TRAINER": @@ -247,8 +263,11 @@ def infer(use_cuda, save_dirname=None): # the feed_target_names (the names of variables that will be fed # data using feed operators), and the fetch_targets (variables that # we want to obtain data from using fetch operators). - [inference_program, feed_target_names, - fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = fluid.io.load_inference_model(save_dirname, exe) # The input's dimension of conv should be 4-D or 5-D. # Use normilized image pixels as input data, which should be in the range [0, 1.0]. @@ -257,22 +276,25 @@ def infer(use_cuda, save_dirname=None): # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. - results = exe.run(inference_program, - feed={feed_target_names[0]: tensor_img}, - fetch_list=fetch_targets) + results = exe.run( + inference_program, + feed={feed_target_names[0]: tensor_img}, + fetch_list=fetch_targets, + ) print("infer results: ", results[0]) - fluid.io.save_inference_model(save_dirname, - feed_target_names, - fetch_targets, - exe, - inference_program, - clip_extra=True) + fluid.io.save_inference_model( + save_dirname, + feed_target_names, + fetch_targets, + exe, + inference_program, + clip_extra=True, + ) class TestImageClassification(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -286,18 +308,22 @@ class TestImageClassification(unittest.TestCase): # Directory for saving the trained model save_dirname = os.path.join( self.temp_dir.name, - "image_classification_" + net_type + ".inference.model") + "image_classification_" + net_type + ".inference.model", + ) train(net_type, use_cuda, save_dirname, is_local) - #infer(use_cuda, save_dirname) + # infer(use_cuda, save_dirname) def test_amp_lists(self): white_list = copy.copy( - fluid.contrib.mixed_precision.fp16_lists.white_list) + fluid.contrib.mixed_precision.fp16_lists.white_list + ) black_list = copy.copy( - fluid.contrib.mixed_precision.fp16_lists.black_list) + fluid.contrib.mixed_precision.fp16_lists.black_list + ) gray_list = copy.copy( - fluid.contrib.mixed_precision.fp16_lists.gray_list) + fluid.contrib.mixed_precision.fp16_lists.gray_list + ) amp_lists = fluid.contrib.mixed_precision.AutoMixedPrecisionLists() self.assertEqual(amp_lists.white_list, white_list) @@ -306,106 +332,130 @@ class TestImageClassification(unittest.TestCase): def test_amp_lists_1(self): white_list = copy.copy( - fluid.contrib.mixed_precision.fp16_lists.white_list) + fluid.contrib.mixed_precision.fp16_lists.white_list + ) black_list = copy.copy( - fluid.contrib.mixed_precision.fp16_lists.black_list) + fluid.contrib.mixed_precision.fp16_lists.black_list + ) gray_list = copy.copy( - fluid.contrib.mixed_precision.fp16_lists.gray_list) + fluid.contrib.mixed_precision.fp16_lists.gray_list + ) # 1. w={'exp}, b=None white_list.add('exp') black_list.remove('exp') amp_lists = fluid.contrib.mixed_precision.AutoMixedPrecisionLists( - {'exp'}) + {'exp'} + ) self.assertEqual(amp_lists.white_list, white_list) self.assertEqual(amp_lists.black_list, black_list) self.assertEqual(amp_lists.gray_list, gray_list) def test_amp_lists_2(self): white_list = copy.copy( - fluid.contrib.mixed_precision.fp16_lists.white_list) + fluid.contrib.mixed_precision.fp16_lists.white_list + ) black_list = copy.copy( - fluid.contrib.mixed_precision.fp16_lists.black_list) + fluid.contrib.mixed_precision.fp16_lists.black_list + ) gray_list = copy.copy( - fluid.contrib.mixed_precision.fp16_lists.gray_list) + fluid.contrib.mixed_precision.fp16_lists.gray_list + ) # 2. w={'tanh'}, b=None white_list.add('tanh') gray_list.remove('tanh') amp_lists = fluid.contrib.mixed_precision.AutoMixedPrecisionLists( - {'tanh'}) + {'tanh'} + ) self.assertEqual(amp_lists.white_list, white_list) self.assertEqual(amp_lists.black_list, black_list) self.assertEqual(amp_lists.gray_list, gray_list) def test_amp_lists_3(self): white_list = copy.copy( - fluid.contrib.mixed_precision.fp16_lists.white_list) + fluid.contrib.mixed_precision.fp16_lists.white_list + ) black_list = copy.copy( - fluid.contrib.mixed_precision.fp16_lists.black_list) + fluid.contrib.mixed_precision.fp16_lists.black_list + ) gray_list = copy.copy( - fluid.contrib.mixed_precision.fp16_lists.gray_list) + fluid.contrib.mixed_precision.fp16_lists.gray_list + ) # 3. w={'lstm'}, b=None white_list.add('lstm') amp_lists = fluid.contrib.mixed_precision.AutoMixedPrecisionLists( - {'lstm'}) + {'lstm'} + ) self.assertEqual(amp_lists.white_list, white_list) self.assertEqual(amp_lists.black_list, black_list) self.assertEqual(amp_lists.gray_list, gray_list) def test_amp_lists_4(self): white_list = copy.copy( - fluid.contrib.mixed_precision.fp16_lists.white_list) + fluid.contrib.mixed_precision.fp16_lists.white_list + ) black_list = copy.copy( - fluid.contrib.mixed_precision.fp16_lists.black_list) + fluid.contrib.mixed_precision.fp16_lists.black_list + ) gray_list = copy.copy( - fluid.contrib.mixed_precision.fp16_lists.gray_list) + fluid.contrib.mixed_precision.fp16_lists.gray_list + ) # 4. w=None, b={'conv2d'} white_list.remove('conv2d') black_list.add('conv2d') amp_lists = fluid.contrib.mixed_precision.AutoMixedPrecisionLists( - custom_black_list={'conv2d'}) + custom_black_list={'conv2d'} + ) self.assertEqual(amp_lists.white_list, white_list) self.assertEqual(amp_lists.black_list, black_list) self.assertEqual(amp_lists.gray_list, gray_list) def test_amp_lists_5(self): white_list = copy.copy( - fluid.contrib.mixed_precision.fp16_lists.white_list) + fluid.contrib.mixed_precision.fp16_lists.white_list + ) black_list = copy.copy( - fluid.contrib.mixed_precision.fp16_lists.black_list) + fluid.contrib.mixed_precision.fp16_lists.black_list + ) gray_list = copy.copy( - fluid.contrib.mixed_precision.fp16_lists.gray_list) + fluid.contrib.mixed_precision.fp16_lists.gray_list + ) # 5. w=None, b={'tanh'} black_list.add('tanh') gray_list.remove('tanh') amp_lists = fluid.contrib.mixed_precision.AutoMixedPrecisionLists( - custom_black_list={'tanh'}) + custom_black_list={'tanh'} + ) self.assertEqual(amp_lists.white_list, white_list) self.assertEqual(amp_lists.black_list, black_list) self.assertEqual(amp_lists.gray_list, gray_list) def test_amp_lists_6(self): white_list = copy.copy( - fluid.contrib.mixed_precision.fp16_lists.white_list) + fluid.contrib.mixed_precision.fp16_lists.white_list + ) black_list = copy.copy( - fluid.contrib.mixed_precision.fp16_lists.black_list) + fluid.contrib.mixed_precision.fp16_lists.black_list + ) gray_list = copy.copy( - fluid.contrib.mixed_precision.fp16_lists.gray_list) + fluid.contrib.mixed_precision.fp16_lists.gray_list + ) # 6. w=None, b={'lstm'} black_list.add('lstm') amp_lists = fluid.contrib.mixed_precision.AutoMixedPrecisionLists( - custom_black_list={'lstm'}) + custom_black_list={'lstm'} + ) self.assertEqual(amp_lists.white_list, white_list) self.assertEqual(amp_lists.black_list, black_list) self.assertEqual(amp_lists.gray_list, gray_list) @@ -413,9 +463,12 @@ class TestImageClassification(unittest.TestCase): def test_amp_lists_7(self): # 7. w={'lstm'} b={'lstm'} # raise ValueError - self.assertRaises(ValueError, - fluid.contrib.mixed_precision.AutoMixedPrecisionLists, - {'lstm'}, {'lstm'}) + self.assertRaises( + ValueError, + fluid.contrib.mixed_precision.AutoMixedPrecisionLists, + {'lstm'}, + {'lstm'}, + ) def test_vgg_cuda(self): with self.scope_prog_guard(): @@ -436,37 +489,43 @@ class TestImageClassification(unittest.TestCase): class TestAmpWithNonIterableDataLoader(unittest.TestCase): - def decorate_with_data_loader(self): main_prog = paddle.static.Program() start_prog = paddle.static.Program() with paddle.static.program_guard(main_prog, start_prog): with paddle.fluid.unique_name.guard(): - image = fluid.layers.data(name='image', - shape=[3, 224, 224], - dtype='float32') - label = fluid.layers.data(name='label', - shape=[1], - dtype='int64') + image = fluid.layers.data( + name='image', shape=[3, 224, 224], dtype='float32' + ) + label = fluid.layers.data( + name='label', shape=[1], dtype='int64' + ) py_reader = fluid.io.DataLoader.from_generator( feed_list=[image, label], capacity=4, iterable=False, - use_double_buffer=False) + use_double_buffer=False, + ) net = vgg16_bn_drop(image) logits = fluid.layers.fc(input=net, size=10, act="softmax") cost, predict = fluid.layers.softmax_with_cross_entropy( - logits, label, return_softmax=True) + logits, label, return_softmax=True + ) avg_cost = paddle.mean(cost) optimizer = fluid.optimizer.Lamb(learning_rate=0.001) - amp_lists = fluid.contrib.mixed_precision.AutoMixedPrecisionLists( - custom_black_varnames={"loss", "conv2d_0.w_0"}) - mp_optimizer = decorate(optimizer=optimizer, - amp_lists=amp_lists, - init_loss_scaling=8.0, - use_dynamic_loss_scaling=True) + amp_lists = ( + fluid.contrib.mixed_precision.AutoMixedPrecisionLists( + custom_black_varnames={"loss", "conv2d_0.w_0"} + ) + ) + mp_optimizer = decorate( + optimizer=optimizer, + amp_lists=amp_lists, + init_loss_scaling=8.0, + use_dynamic_loss_scaling=True, + ) mp_optimizer.minimize(avg_cost) diff --git a/python/paddle/fluid/contrib/tests/test_model_cast_to_bf16.py b/python/paddle/fluid/contrib/tests/test_model_cast_to_bf16.py index ab79242ae0092beaa7daae55a6415b621acf11a5..c000d55fccdfa77d5b508cf03cf092b4a89e2567 100644 --- a/python/paddle/fluid/contrib/tests/test_model_cast_to_bf16.py +++ b/python/paddle/fluid/contrib/tests/test_model_cast_to_bf16.py @@ -30,7 +30,8 @@ def convert_uint16_to_float(in_list): in_list = np.asarray(in_list) out = np.vectorize( lambda x: struct.unpack(' 0 else: sys.stderr.write('Do not get AVX flag on %s\n' % sysstr) @@ -162,10 +187,10 @@ def avx_supported(): def run_shell_command(cmd): import subprocess - out, err = subprocess.Popen(cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - shell=True).communicate() + + out, err = subprocess.Popen( + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True + ).communicate() if err: return None else: @@ -174,8 +199,9 @@ def run_shell_command(cmd): def get_dso_path(core_so, dso_name): if core_so and dso_name: - return run_shell_command("ldd %s|grep %s|awk '{print $3}'" % - (core_so, dso_name)) + return run_shell_command( + "ldd %s|grep %s|awk '{print $3}'" % (core_so, dso_name) + ) else: return None @@ -184,6 +210,7 @@ def load_dso(dso_absolute_path): if dso_absolute_path: try: from ctypes import cdll + cdll.LoadLibrary(dso_absolute_path) except: warnings.warn("Load {} failed".format(dso_absolute_path)) @@ -242,12 +269,14 @@ if platform.system().lower() == 'linux': try: from . import libpaddle + if avx_supported() and not libpaddle.is_compiled_with_avx(): sys.stderr.write( "Hint: Your machine support AVX, but the installed paddlepaddle doesn't have avx core. " "Hence, no-avx core with worse preformance will be imported.\nIf you like, you could " "reinstall paddlepaddle by 'python -m pip install --force-reinstall paddlepaddle-gpu[==version]' " - "to get better performance.\n") + "to get better performance.\n" + ) # assign tensor alias libpaddle.LoDTensor = libpaddle.Tensor @@ -278,6 +307,7 @@ try: from .libpaddle import _Profiler, _ProfilerResult, _RecordEvent from .libpaddle import _set_current_stream from .libpaddle import _get_phi_kernel_name + if sys.platform != 'win32': from .libpaddle import _set_process_pids from .libpaddle import _erase_process_pids @@ -290,12 +320,18 @@ try: except Exception as e: if has_paddle_dy_lib: sys.stderr.write( - 'Error: Can not import paddle core while this file exists: ' + - current_path + os.sep + 'libpaddle.' + dy_lib_suffix + '\n') + 'Error: Can not import paddle core while this file exists: ' + + current_path + + os.sep + + 'libpaddle.' + + dy_lib_suffix + + '\n' + ) if not avx_supported() and libpaddle.is_compiled_with_avx(): sys.stderr.write( "Error: Your machine doesn't support AVX, but the installed PaddlePaddle is avx core, " - "you should reinstall paddlepaddle with no-avx core.\n") + "you should reinstall paddlepaddle with no-avx core.\n" + ) raise e @@ -312,22 +348,26 @@ def set_paddle_custom_device_lib_path(lib_path): # set paddle lib path def set_paddle_lib_path(): - site_dirs = site.getsitepackages() if hasattr( - site, - 'getsitepackages') else [x for x in sys.path if 'site-packages' in x] + site_dirs = ( + site.getsitepackages() + if hasattr(site, 'getsitepackages') + else [x for x in sys.path if 'site-packages' in x] + ) for site_dir in site_dirs: lib_dir = os.path.sep.join([site_dir, 'paddle', 'libs']) if os.path.exists(lib_dir): _set_paddle_lib_path(lib_dir) set_paddle_custom_device_lib_path( - os.path.sep.join([lib_dir, '..', '..', 'paddle-plugins'])) + os.path.sep.join([lib_dir, '..', '..', 'paddle-plugins']) + ) return if hasattr(site, 'USER_SITE'): lib_dir = os.path.sep.join([site.USER_SITE, 'paddle', 'libs']) if os.path.exists(lib_dir): _set_paddle_lib_path(lib_dir) set_paddle_custom_device_lib_path( - os.path.sep.join([lib_dir, '..', '..', 'paddle-plugins'])) + os.path.sep.join([lib_dir, '..', '..', 'paddle-plugins']) + ) set_paddle_lib_path() diff --git a/python/paddle/fluid/data.py b/python/paddle/fluid/data.py index 99e38b9470b762f073b9463d5d2bf5ba903ddb62..00173a29c28ac873992b06b578f947e80d33182f 100644 --- a/python/paddle/fluid/data.py +++ b/python/paddle/fluid/data.py @@ -114,11 +114,13 @@ def data(name, shape, dtype='float32', lod_level=0): if shape[i] is None: shape[i] = -1 - return helper.create_global_variable(name=name, - shape=shape, - dtype=dtype, - type=core.VarDesc.VarType.LOD_TENSOR, - stop_gradient=True, - lod_level=lod_level, - is_data=True, - need_check_feed=True) + return helper.create_global_variable( + name=name, + shape=shape, + dtype=dtype, + type=core.VarDesc.VarType.LOD_TENSOR, + stop_gradient=True, + lod_level=lod_level, + is_data=True, + need_check_feed=True, + ) diff --git a/python/paddle/fluid/data_feed_desc.py b/python/paddle/fluid/data_feed_desc.py index 0aa72cadaf58184fc88ffee44a1830fdab1d6541..7a58c5ef5e1b41da615d77d3f2851d5c501cdb2a 100644 --- a/python/paddle/fluid/data_feed_desc.py +++ b/python/paddle/fluid/data_feed_desc.py @@ -175,7 +175,8 @@ class DataFeedDesc(object): ) for name in dense_slots_name: self.proto_desc.multi_slot_desc.slots[ - self.__name_to_index[name]].is_dense = True + self.__name_to_index[name] + ].is_dense = True def set_use_slots(self, use_slots_name): """ @@ -220,7 +221,8 @@ class DataFeedDesc(object): ) for name in use_slots_name: self.proto_desc.multi_slot_desc.slots[ - self.__name_to_index[name]].is_used = True + self.__name_to_index[name] + ].is_used = True def desc(self): """ diff --git a/python/paddle/fluid/data_feeder.py b/python/paddle/fluid/data_feeder.py index a6bf7c6d4406d1351c0fa6e5db159e04bd2d7597..e84d41e56204c22e50625c83e79177d6f4ecb1e8 100644 --- a/python/paddle/fluid/data_feeder.py +++ b/python/paddle/fluid/data_feeder.py @@ -18,7 +18,13 @@ import os import multiprocessing import warnings -from .framework import Variable, default_main_program, _current_expected_place, _non_static_mode, _in_eager_without_dygraph_check +from .framework import ( + Variable, + default_main_program, + _current_expected_place, + _non_static_mode, + _in_eager_without_dygraph_check, +) from .framework import _cpu_num, _cuda_ids __all__ = ['DataFeeder'] @@ -45,18 +51,46 @@ def convert_dtype(dtype): return _PADDLE_DTYPE_2_NUMPY_DTYPE[dtype] elif isinstance(dtype, type): if dtype in [ - bool, np.float16, np.uint16, np.float32, np.float64, np.int8, - np.int16, np.int32, np.int64, np.uint8, np.complex64, - np.complex128 + bool, + np.float16, + np.uint16, + np.float32, + np.float64, + np.int8, + np.int16, + np.int32, + np.int64, + np.uint8, + np.complex64, + np.complex128, ]: return dtype.__name__ else: if dtype in [ - 'bool', 'float16', 'uint16', 'float32', 'float64', 'int8', - 'int16', 'int32', 'int64', 'uint8', 'complex64', 'complex128', - u'bool', u'float16', u'uint16', u'float32', u'float64', u'int8', - u'int16', u'int32', u'int64', u'uint8', u'complex64', - u'complex128' + 'bool', + 'float16', + 'uint16', + 'float32', + 'float64', + 'int8', + 'int16', + 'int32', + 'int64', + 'uint8', + 'complex64', + 'complex128', + u'bool', + u'float16', + u'uint16', + u'float32', + u'float64', + u'int8', + u'int16', + u'int32', + u'int64', + u'uint8', + u'complex64', + u'complex128', ]: # this code is a little bit dangerous, since error could happen # when casting no-ascii code to str in python2. @@ -70,14 +104,13 @@ def convert_dtype(dtype): raise TypeError( "dtype must be any of [bool, float16, uint16, float32, float64, int8, int16, " - "int32, int64, uint8, complex64, complex128], but received %s" % dtype) + "int32, int64, uint8, complex64, complex128], but received %s" % dtype + ) -def check_variable_and_dtype(input, - input_name, - expected_dtype, - op_name, - extra_message=''): +def check_variable_and_dtype( + input, input_name, expected_dtype, op_name, extra_message='' +): check_type(input, input_name, Variable, op_name, extra_message) check_dtype(input.dtype, input_name, expected_dtype, op_name, extra_message) @@ -97,60 +130,76 @@ def check_type(input, input_name, expected_type, op_name, extra_message=''): # @declarative in transformation from dygrah to static layer. We add VarBase in # expected_type to skip checking because varBase may be created and used in unusual way. from .dygraph.base import in_declarative_mode + # Need a better design to be fix this. if in_declarative_mode(): if not isinstance(expected_type, tuple): - expected_type = (expected_type, ) - expected_type += (core.VarBase, ) + expected_type = (expected_type,) + expected_type += (core.VarBase,) if _in_eager_without_dygraph_check(): - expected_type += (core.eager.Tensor, ) + expected_type += (core.eager.Tensor,) elif isinstance(input, core.VarBase): raise TypeError( "Please use `with fluid.dygraph.guard()` as context or `fluid.enable_dygraph()` to switch to imperative mode firstly. " "Because received '{}' in {} is a imperative Variable.".format( - input_name, op_name)) + input_name, op_name + ) + ) elif hasattr(core, "eager"): if isinstance(input, core.eager.Tensor): raise TypeError( "Please use `with fluid.dygraph.guard()` as context or `fluid.enable_dygraph()` to switch to imperative mode firstly. " "Because received '{}' in {} is a imperative Variable.".format( - input_name, op_name)) + input_name, op_name + ) + ) if not isinstance(input, expected_type): raise TypeError( - "The type of '%s' in %s must be %s, but received %s. %s" % - (input_name, op_name, expected_type, type(input), extra_message)) + "The type of '%s' in %s must be %s, but received %s. %s" + % (input_name, op_name, expected_type, type(input), extra_message) + ) -def check_dtype(input_dtype, - input_name, - expected_dtype, - op_name, - extra_message=''): +def check_dtype( + input_dtype, input_name, expected_dtype, op_name, extra_message='' +): # See NOTE [ Why skip dynamic graph check ] if _non_static_mode(): return if convert_dtype(input_dtype) in ['float16']: warnings.warn( - "The data type of '%s' in %s only support float16 in GPU now. %s" % - (input_name, op_name, extra_message)) + "The data type of '%s' in %s only support float16 in GPU now. %s" + % (input_name, op_name, extra_message) + ) if convert_dtype(input_dtype) in ['uint16'] and op_name not in [ - 'reshape', 'lookup_table', 'scale' + 'reshape', + 'lookup_table', + 'scale', ]: warnings.warn( "The data type of '%s' in %s only support bfloat16 in OneDNN now. %s" - % (input_name, op_name, extra_message)) + % (input_name, op_name, extra_message) + ) if convert_dtype(input_dtype) not in expected_dtype: raise TypeError( - "The data type of '%s' in %s must be %s, but received %s. %s" % - (input_name, op_name, expected_dtype, convert_dtype(input_dtype), - extra_message)) - - -def check_shape(shape, + "The data type of '%s' in %s must be %s, but received %s. %s" + % ( + input_name, op_name, - expected_shape_type=(list, tuple, Variable), - expected_element_type=(int, Variable), - expected_tensor_dtype=('int32', 'int64')): + expected_dtype, + convert_dtype(input_dtype), + extra_message, + ) + ) + + +def check_shape( + shape, + op_name, + expected_shape_type=(list, tuple, Variable), + expected_element_type=(int, Variable), + expected_tensor_dtype=('int32', 'int64'), +): # See NOTE [ Why skip dynamic graph check ] if _non_static_mode(): return @@ -160,16 +209,19 @@ def check_shape(shape, check_type(item, 'element of shape', expected_element_type, op_name) if expected_tensor_dtype is not None and isinstance(item, Variable): check_dtype( - item.dtype, 'element of shape', expected_tensor_dtype, + item.dtype, + 'element of shape', + expected_tensor_dtype, op_name, - 'If element of shape is Tensor, its data type should be {}'. - format(', '.join(expected_tensor_dtype))) + 'If element of shape is Tensor, its data type should be {}'.format( + ', '.join(expected_tensor_dtype) + ), + ) if expected_tensor_dtype is not None and isinstance(shape, Variable): check_dtype(shape.dtype, 'shape', expected_tensor_dtype, op_name) class DataToLoDTensorConverter(object): - def __init__(self, place, lod_level, shape, dtype): self.place = place self.lod_level = lod_level @@ -203,8 +255,10 @@ class DataToLoDTensorConverter(object): for s1, s2 in zip(self.shape, shape): if s1 != s2 and s1 >= 0 and s2 >= 0: raise ValueError( - "Shape not match. What is defined in data layer is {}, but receive {}" - .format(self.shape, shape)) + "Shape not match. What is defined in data layer is {}, but receive {}".format( + self.shape, shape + ) + ) def done(self): arr = np.array(self.data, dtype=self.dtype) @@ -214,8 +268,10 @@ class DataToLoDTensorConverter(object): arr = arr.reshape(self.shape) except ValueError: raise ValueError( - "Reshape error. What is defined in data layer is {}, but receive {}" - .format(self.shape, arr.shape)) + "Reshape error. What is defined in data layer is {}, but receive {}".format( + self.shape, arr.shape + ) + ) t = core.LoDTensor() t.set(arr, self.place) if self.lod_level > 0: @@ -225,7 +281,6 @@ class DataToLoDTensorConverter(object): class BatchedTensorProvider(object): - def __init__(self, feed_list, place, batch_size, generator, drop_last): self.place = place self.batch_size = batch_size @@ -236,10 +291,13 @@ class BatchedTensorProvider(object): for var in feed_list: assert var.lod_level == 0, "lod_level must be 0" self.converters.append( - DataToLoDTensorConverter(place=self.place, - lod_level=0, - shape=var.shape, - dtype=var.dtype)) + DataToLoDTensorConverter( + place=self.place, + lod_level=0, + shape=var.shape, + dtype=var.dtype, + ) + ) def _done(self): return [c.done() for c in self.converters] @@ -380,18 +438,23 @@ class DataFeeder(object): """ converter = [] - for lod_level, shape, dtype in zip(self.feed_lod_level, - self.feed_shapes, self.feed_dtypes): + for lod_level, shape, dtype in zip( + self.feed_lod_level, self.feed_shapes, self.feed_dtypes + ): converter.append( - DataToLoDTensorConverter(place=self.place, - lod_level=lod_level, - shape=shape, - dtype=dtype)) + DataToLoDTensorConverter( + place=self.place, + lod_level=lod_level, + shape=shape, + dtype=dtype, + ) + ) for each_sample in iterable: assert len(each_sample) == len(converter), ( - "The number of fields in data (%d) does not match " + - "len(feed_list) (%d)") % (len(each_sample), len(converter)) + "The number of fields in data (%d) does not match " + + "len(feed_list) (%d)" + ) % (len(each_sample), len(converter)) for each_converter, each_slot in zip(converter, each_sample): each_converter.feed(each_slot) ret_dict = {} @@ -465,10 +528,12 @@ class DataFeeder(object): ] if len(iterable) != len(places): - raise ValueError("feed_parallel takes multiple mini-batches. Each " - "mini-batch will be feed on each device. The " - "number of devices and number of mini-batches " - "must be same.") + raise ValueError( + "feed_parallel takes multiple mini-batches. Each " + "mini-batch will be feed on each device. The " + "number of devices and number of mini-batches " + "must be same." + ) place = self.place for p, batch in zip(places, iterable): @@ -484,11 +549,9 @@ class DataFeeder(object): else: return _cpu_num() - def decorate_reader(self, - reader, - multi_devices, - num_places=None, - drop_last=True): + def decorate_reader( + self, reader, multi_devices, num_places=None, drop_last=True + ): """ Decorate the reader (generator) to fit multiple devices. The reader generate multiple mini-batches. Each mini-batch will be fed into a single device. @@ -566,6 +629,7 @@ class DataFeeder(object): raise ValueError( "The data batch which cannot fit for devices will be " "dropped is not implementation. Other strategies are " - "not implemented") + "not implemented" + ) return __reader_creator__ diff --git a/python/paddle/fluid/dataloader/__init__.py b/python/paddle/fluid/dataloader/__init__.py index 9136db3de80b34da7a0bc66fa15f8a35bd467eab..c0b2052283b1c549043e3b98c27dee961f0e45fa 100644 --- a/python/paddle/fluid/dataloader/__init__.py +++ b/python/paddle/fluid/dataloader/__init__.py @@ -24,7 +24,9 @@ from .dataloader_iter import * from . import sampler from .sampler import * -__all__ = dataset.__all__ \ - + batch_sampler.__all__ \ - + dataloader_iter.__all__ \ - + sampler.__all__ +__all__ = ( + dataset.__all__ + + batch_sampler.__all__ + + dataloader_iter.__all__ + + sampler.__all__ +) diff --git a/python/paddle/fluid/dataloader/batch_sampler.py b/python/paddle/fluid/dataloader/batch_sampler.py index 7c87bd3d4cd6120618c5584c5d54dbe4dd35afbc..624754ae286ad3d84473e8f64bd8df9160169d56 100644 --- a/python/paddle/fluid/dataloader/batch_sampler.py +++ b/python/paddle/fluid/dataloader/batch_sampler.py @@ -96,36 +96,51 @@ class BatchSampler(Sampler): """ - def __init__(self, - dataset=None, - sampler=None, - shuffle=False, - batch_size=1, - drop_last=False): + def __init__( + self, + dataset=None, + sampler=None, + shuffle=False, + batch_size=1, + drop_last=False, + ): if dataset is None: - assert sampler is not None, \ - "either dataset or sampler should be set" - assert isinstance(sampler, Sampler), \ - "sampler should be a paddle.io.Sampler, but got {}".format(type(sampler)) + assert ( + sampler is not None + ), "either dataset or sampler should be set" + assert isinstance( + sampler, Sampler + ), "sampler should be a paddle.io.Sampler, but got {}".format( + type(sampler) + ) assert not shuffle, "shuffle should be False when sampler is set" self.sampler = sampler else: - assert not isinstance(dataset, IterableDataset), \ - "dataset should not be a paddle.io.IterableDataset" - assert sampler is None, \ - "should not set both dataset and sampler" - assert isinstance(shuffle, bool), \ - "shuffle should be a boolean value, but got {}".format(type(shuffle)) + assert not isinstance( + dataset, IterableDataset + ), "dataset should not be a paddle.io.IterableDataset" + assert sampler is None, "should not set both dataset and sampler" + assert isinstance( + shuffle, bool + ), "shuffle should be a boolean value, but got {}".format( + type(shuffle) + ) if shuffle: self.sampler = RandomSampler(dataset) else: self.sampler = SequenceSampler(dataset) - assert isinstance(batch_size, int) and batch_size > 0, \ - "batch_size should be a positive integer, but got {}".format(batch_size) + assert ( + isinstance(batch_size, int) and batch_size > 0 + ), "batch_size should be a positive integer, but got {}".format( + batch_size + ) self.batch_size = batch_size - assert isinstance(drop_last, bool), \ - "drop_last should be a boolean value, but got {}".format(type(drop_last)) + assert isinstance( + drop_last, bool + ), "drop_last should be a boolean value, but got {}".format( + type(drop_last) + ) self.drop_last = drop_last def __iter__(self): @@ -145,7 +160,6 @@ class BatchSampler(Sampler): class _InfiniteIterableSampler(object): - def __init__(self, dataset, batch_size=1): assert isinstance( dataset, IterableDataset @@ -214,36 +228,41 @@ class DistributedBatchSampler(BatchSampler): break """ - def __init__(self, - dataset, - batch_size, - num_replicas=None, - rank=None, - shuffle=False, - drop_last=False): + def __init__( + self, + dataset, + batch_size, + num_replicas=None, + rank=None, + shuffle=False, + drop_last=False, + ): self.dataset = dataset - assert isinstance(batch_size, int) and batch_size > 0, \ - "batch_size should be a positive integer" + assert ( + isinstance(batch_size, int) and batch_size > 0 + ), "batch_size should be a positive integer" self.batch_size = batch_size - assert isinstance(shuffle, bool), \ - "shuffle should be a boolean value" + assert isinstance(shuffle, bool), "shuffle should be a boolean value" self.shuffle = shuffle - assert isinstance(drop_last, bool), \ - "drop_last should be a boolean number" + assert isinstance( + drop_last, bool + ), "drop_last should be a boolean number" from paddle.fluid.dygraph.parallel import ParallelEnv if num_replicas is not None: - assert isinstance(num_replicas, int) and num_replicas > 0, \ - "num_replicas should be a positive integer" + assert ( + isinstance(num_replicas, int) and num_replicas > 0 + ), "num_replicas should be a positive integer" self.nranks = num_replicas else: self.nranks = ParallelEnv().nranks if rank is not None: - assert isinstance(rank, int) and rank >= 0, \ - "rank should be a non-negative integer" + assert ( + isinstance(rank, int) and rank >= 0 + ), "rank should be a non-negative integer" self.local_rank = rank else: self.local_rank = ParallelEnv().local_rank @@ -256,7 +275,7 @@ class DistributedBatchSampler(BatchSampler): def __iter__(self): num_samples = len(self.dataset) indices = np.arange(num_samples).tolist() - indices += indices[:(self.total_size - len(indices))] + indices += indices[: (self.total_size - len(indices))] assert len(indices) == self.total_size if self.shuffle: np.random.RandomState(self.epoch).shuffle(indices) @@ -269,16 +288,21 @@ class DistributedBatchSampler(BatchSampler): assert last_batch_size % self.nranks == 0 last_local_batch_size = last_batch_size // self.nranks - for i in range(self.local_rank * self.batch_size, - len(indices) - last_batch_size, - self.batch_size * self.nranks): - subsampled_indices.extend(indices[i:i + self.batch_size]) + for i in range( + self.local_rank * self.batch_size, + len(indices) - last_batch_size, + self.batch_size * self.nranks, + ): + subsampled_indices.extend(indices[i : i + self.batch_size]) - indices = indices[len(indices) - last_batch_size:] + indices = indices[len(indices) - last_batch_size :] subsampled_indices.extend( - indices[self.local_rank * - last_local_batch_size:(self.local_rank + 1) * - last_local_batch_size]) + indices[ + self.local_rank + * last_local_batch_size : (self.local_rank + 1) + * last_local_batch_size + ] + ) return subsampled_indices if self.nranks > 1: diff --git a/python/paddle/fluid/dataloader/collate.py b/python/paddle/fluid/dataloader/collate.py index 3c46b54156d78eee95a3ed5054b75b92f47ff3cc..50b86ca41e53b7dafeea940746720662bf398d56 100644 --- a/python/paddle/fluid/dataloader/collate.py +++ b/python/paddle/fluid/dataloader/collate.py @@ -66,18 +66,20 @@ def default_collate_fn(batch): return batch elif isinstance(sample, Mapping): return { - key: default_collate_fn([d[key] for d in batch]) - for key in sample + key: default_collate_fn([d[key] for d in batch]) for key in sample } elif isinstance(sample, Sequence): sample_fields_num = len(sample) if not all(len(sample) == sample_fields_num for sample in iter(batch)): raise RuntimeError( - "fileds number not same among samples in a batch") + "fileds number not same among samples in a batch" + ) return [default_collate_fn(fields) for fields in zip(*batch)] - raise TypeError("batch data con only contains: tensor, numpy.ndarray, " - "dict, list, number, but got {}".format(type(sample))) + raise TypeError( + "batch data con only contains: tensor, numpy.ndarray, " + "dict, list, number, but got {}".format(type(sample)) + ) def default_convert_fn(batch): diff --git a/python/paddle/fluid/dataloader/dataloader_iter.py b/python/paddle/fluid/dataloader/dataloader_iter.py index 71e4a9f83adb46b85effef4312676f3aca99cd8f..83d95c479250ef6548e45e25daa1fd4aa81977c7 100644 --- a/python/paddle/fluid/dataloader/dataloader_iter.py +++ b/python/paddle/fluid/dataloader/dataloader_iter.py @@ -24,7 +24,11 @@ import threading import numpy as np import multiprocessing from collections import namedtuple -from paddle.fluid.framework import _set_expected_place, _current_expected_place, set_flags +from paddle.fluid.framework import ( + _set_expected_place, + _current_expected_place, + set_flags, +) # NOTE: queue has a different name in python2 and python3 import queue @@ -34,13 +38,23 @@ import paddle.profiler as profiler from paddle.profiler.utils import in_profiler_mode from .. import core, layers from ..framework import _non_static_mode, in_dygraph_mode, _in_legacy_dygraph -from ..multiprocess_utils import _set_SIGCHLD_handler, MP_STATUS_CHECK_INTERVAL, CleanupFuncRegistrar +from ..multiprocess_utils import ( + _set_SIGCHLD_handler, + MP_STATUS_CHECK_INTERVAL, + CleanupFuncRegistrar, +) from .fetcher import _IterableDatasetFetcher, _MapDatasetFetcher from .batch_sampler import _InfiniteIterableSampler from .collate import default_collate_fn, default_convert_fn -from .worker import ParentWatchDog, get_worker_info, _worker_loop, \ - _DatasetKind, _IterableDatasetStopIteration, _WorkerException, \ - _ResumeIteration +from .worker import ( + ParentWatchDog, + get_worker_info, + _worker_loop, + _DatasetKind, + _IterableDatasetStopIteration, + _WorkerException, + _ResumeIteration, +) from .flat import _flatten_batch, _restore_batch from paddle.profiler.timer import benchmark @@ -98,7 +112,9 @@ class _DataLoaderIterBase(object): self._use_buffer_reader = loader.use_buffer_reader self._prefetch_factor = loader.prefetch_factor self._use_shared_memory = loader.use_shared_memory - self._timeout = loader.timeout if loader.timeout > 0 else MP_STATUS_CHECK_INTERVAL + self._timeout = ( + loader.timeout if loader.timeout > 0 else MP_STATUS_CHECK_INTERVAL + ) self._worker_init_fn = loader.worker_init_fn self._dataset_kind = loader.dataset_kind self._pin_memory = loader.pin_memory @@ -155,8 +171,12 @@ class _DataLoaderIterSingleProcess(_DataLoaderIterBase): super(_DataLoaderIterSingleProcess, self).__init__(loader) self._dataset_fetcher = _DatasetKind.create_fetcher( - self._dataset_kind, self._dataset, self._auto_collate_batch, - self._collate_fn, self._drop_last) + self._dataset_kind, + self._dataset, + self._auto_collate_batch, + self._collate_fn, + self._drop_last, + ) # NOTE: _structrue_infos used to record the data structure of # batch to restore batch structure after reading Tensor @@ -170,7 +190,8 @@ class _DataLoaderIterSingleProcess(_DataLoaderIterBase): # iteration, set blocking_queue can cache "self._prefetch_factor" iteration datas # at most here self._blocking_queue_capacity = self._prefetch_factor * len( - self._places) + self._places + ) self._init_thread() self._shutdown = False @@ -187,20 +208,30 @@ class _DataLoaderIterSingleProcess(_DataLoaderIterBase): ] # if only 1 place, do not need to keep order self._blocking_queue = core.init_lod_tensor_blocking_queue( - core.Variable(), self._blocking_queue_capacity, - len(self._places) > 1) + core.Variable(), + self._blocking_queue_capacity, + len(self._places) > 1, + ) self._reader = core.create_py_reader( - self._blocking_queue, self._var_names, self._shapes, self._dtypes, - self._need_check_feed, self._places, self._use_buffer_reader, True, - self._pin_memory) - - self._thread = threading.Thread(target=self._thread_loop, - args=(_current_expected_place(), )) + self._blocking_queue, + self._var_names, + self._shapes, + self._dtypes, + self._need_check_feed, + self._places, + self._use_buffer_reader, + True, + self._pin_memory, + ) + + self._thread = threading.Thread( + target=self._thread_loop, args=(_current_expected_place(),) + ) self._thread.daemon = True self._thread.start() def _thread_loop(self, legacy_expected_place): - #NOTE(zhiqiu): Set the expected place for new thread as the same as father thread, + # NOTE(zhiqiu): Set the expected place for new thread as the same as father thread, # and it will call platform::SetDeviceId() in c++ internally. # If we do not set cudaDeviceId in new thread, the default cudaDeviceId will be 0, # Which may cost hundreds of MB of GPU memory on CUDAPlace(0) if calling some cuda @@ -215,19 +246,22 @@ class _DataLoaderIterSingleProcess(_DataLoaderIterBase): # read data from dataset in mini-batch # with paddle.fluid.dygraph.guard(place=paddle.CPUPlace()): # read data from dataset in mini-batch - batch = self._dataset_fetcher.fetch(indices, - self._thread_done_event) + batch = self._dataset_fetcher.fetch( + indices, self._thread_done_event + ) except StopIteration: self._exit_thread_expectedly() return - if batch is None or self._thread_done_event.is_set(): break + if batch is None or self._thread_done_event.is_set(): + break # flat batch and record structure infos batch, structure = _flatten_batch(batch) self._structure_infos.append(structure) - if self._thread_done_event.is_set(): break + if self._thread_done_event.is_set(): + break try: # pack as LoDTensorArray @@ -242,7 +276,8 @@ class _DataLoaderIterSingleProcess(_DataLoaderIterBase): array.append(slot) - if self._thread_done_event.is_set(): break + if self._thread_done_event.is_set(): + break try: self._blocking_queue.push(array) @@ -259,14 +294,16 @@ class _DataLoaderIterSingleProcess(_DataLoaderIterBase): if in_profiler_mode(): trace_event = profiler.RecordEvent( name="_DataLoaderIterSingleProcess", - event_type=profiler.TracerEventType.Dataloader) + event_type=profiler.TracerEventType.Dataloader, + ) trace_event.begin() try: benchmark().check_if_need_record(self) benchmark().before_reader() if in_dygraph_mode(): data = core.eager.read_next_tensor_list( - self._reader.read_next_list()[0]) + self._reader.read_next_list()[0] + ) data = _restore_batch(data, self._structure_infos.pop(0)) else: if _in_legacy_dygraph(): @@ -281,8 +318,9 @@ class _DataLoaderIterSingleProcess(_DataLoaderIterBase): self._structure_infos.pop(0) for _ in range(len(self._places)) ] - data = [_restore_batch(d, s) \ - for d, s in zip(data, structs)] + data = [ + _restore_batch(d, s) for d, s in zip(data, structs) + ] # static graph organized data on multi-device with list, if # place number is 1, there is only 1 device, extra the data # from list for devices to be compatible with dygraph mode @@ -341,15 +379,17 @@ class _DataLoaderIterSingleProcess(_DataLoaderIterBase): class _DataLoaderIterMultiProcess(_DataLoaderIterBase): - def __init__(self, loader): super(_DataLoaderIterMultiProcess, self).__init__(loader) self._persistent_workers = loader._persistent_workers self._resume_worker_cnt = 0 - assert self._num_workers > 0, "Multi-process DataLoader " \ - "invalid num_workers({})".format(self._num_workers) + assert ( + self._num_workers > 0 + ), "Multi-process DataLoader " "invalid num_workers({})".format( + self._num_workers + ) # subprocess wrokers' result queue self._data_queue = None @@ -370,7 +410,8 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase): # output data for at least "_prefetch_factor" iterations(Note that len(_places) # batches will be composed as an iteration output) self._outstanding_capacity = self._prefetch_factor * max( - self._num_workers, len(self._places)) + self._num_workers, len(self._places) + ) # see _try_put_indices self._thread_lock = threading.Lock() @@ -405,12 +446,22 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase): self._indices_queues.append(indices_queue) worker = multiprocessing.Process( target=_worker_loop, - args=(self._dataset, self._dataset_kind, indices_queue, - self._data_queue, self._workers_done_event, - self._auto_collate_batch, self._collate_fn, - self._drop_last, self._worker_init_fn, i, - self._num_workers, self._use_shared_memory, - self._base_seed)) + args=( + self._dataset, + self._dataset_kind, + indices_queue, + self._data_queue, + self._workers_done_event, + self._auto_collate_batch, + self._collate_fn, + self._drop_last, + self._worker_init_fn, + i, + self._num_workers, + self._use_shared_memory, + self._base_seed, + ), + ) worker.daemon = True worker.start() self._workers.append(worker) @@ -438,17 +489,25 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase): ] # if only 1 place, do not need to keep order self._blocking_queue = core.init_lod_tensor_blocking_queue( - core.Variable(), self._outstanding_capacity, - len(self._places) > 1) + core.Variable(), self._outstanding_capacity, len(self._places) > 1 + ) self._reader = core.create_py_reader( - self._blocking_queue, self._var_names, self._shapes, self._dtypes, - self._need_check_feed, self._places, self._use_buffer_reader, True, - self._pin_memory) + self._blocking_queue, + self._var_names, + self._shapes, + self._dtypes, + self._need_check_feed, + self._places, + self._use_buffer_reader, + True, + self._pin_memory, + ) self._thread_done_event = threading.Event() # thread event is only need in multi-processing mode - self._thread = threading.Thread(target=self._thread_loop, - args=(_current_expected_place(), )) + self._thread = threading.Thread( + target=self._thread_loop, args=(_current_expected_place(),) + ) self._thread.daemon = True self._thread.start() @@ -471,7 +530,8 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase): while self._blocking_queue.size() >= len(self._places): if in_dygraph_mode(): data = core.eager.read_next_tensor_list( - self._reader.read_next_list()[0]) + self._reader.read_next_list()[0] + ) else: if _in_legacy_dygraph(): self._reader.read_next_var_list() @@ -497,8 +557,9 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase): self._try_put_indices() def _shutdown_worker(self, worker_id, shutdown=False): - if self._worker_status[worker_id] or (self._persistent_workers - and shutdown): + if self._worker_status[worker_id] or ( + self._persistent_workers and shutdown + ): self._indices_queues[worker_id].put(None) self._worker_status[worker_id] = False @@ -526,7 +587,7 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase): self._shutdown = True def _thread_loop(self, legacy_expected_place): - #NOTE(zhiqiu): Set the expected place for new thread as the same as father thread, + # NOTE(zhiqiu): Set the expected place for new thread as the same as father thread, # and it will call platform::SetDeviceId() in c++ internally. # If we do not set cudaDeviceId in new thread, the default cudaDeviceId will be 0, # Which may cost hundreds of MB of GPU memory on CUDAPlace(0) if calling some cuda @@ -555,8 +616,8 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase): # serializable, cannot be create in workers for slot in batch: if isinstance( - slot, - (paddle.Tensor, core.eager.Tensor)): + slot, (paddle.Tensor, core.eager.Tensor) + ): slot = slot.value().get_tensor() elif not isinstance(slot, core.LoDTensor): tmp = core.LoDTensor() @@ -607,8 +668,10 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase): if self._batches_outstanding < len(self._places): return None - if self._rcvd_idx in self._task_infos and \ - len(self._task_infos[self._rcvd_idx]) == 3: + if ( + self._rcvd_idx in self._task_infos + and len(self._task_infos[self._rcvd_idx]) == 3 + ): info = self._task_infos.pop(self._rcvd_idx) self._structure_infos.append(info[2]) return info[1] @@ -637,8 +700,10 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase): if len(failed_workers) > 0: self._exit_thread_unexpectedly() pids = ', '.join(str(w.pid) for w in failed_workers) - raise RuntimeError("DataLoader {} workers exit unexpectedly, " \ - "pids: {}".format(len(failed_workers), pids)) + raise RuntimeError( + "DataLoader {} workers exit unexpectedly, " + "pids: {}".format(len(failed_workers), pids) + ) # get(timeout) will call _poll(timeout) and may raise IOError if isinstance(e, queue.Empty) or isinstance(e, IOError): @@ -646,12 +711,15 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase): continue self._exit_thread_unexpectedly() - logging.error("DataLoader reader thread failed({}) to read data from " \ - "workers' result queue.".format(e)) + logging.error( + "DataLoader reader thread failed({}) to read data from " + "workers' result queue.".format(e) + ) six.reraise(*sys.exc_info()) else: if self._dataset_kind == _DatasetKind.ITER and isinstance( - data, _IterableDatasetStopIteration): + data, _IterableDatasetStopIteration + ): # if a worker get StopIteraion, we shutdown this worker, # note that this batch indices to trigger StopIteration # is discard, outstanding batch number should be decrease @@ -667,8 +735,11 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase): idx, batch, structure = data - if isinstance(idx, _ResumeIteration) and batch is None \ - and structure is None: + if ( + isinstance(idx, _ResumeIteration) + and batch is None + and structure is None + ): return idx if isinstance(batch, _WorkerException): @@ -684,8 +755,9 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase): continue def _try_put_indices(self): - assert self._batches_outstanding <= self._outstanding_capacity, \ - "too many indices have been put to queue" + assert ( + self._batches_outstanding <= self._outstanding_capacity + ), "too many indices have been put to queue" # In multi-process mode for IterableDataset, _try_put_indices will # be called both in main process(for our implement has blocking queue, # and blocking queue read is in main process) and thread, which may @@ -709,7 +781,7 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase): return self._indices_queues[worker_idx].put((self._send_idx, indices)) - self._task_infos[self._send_idx] = (worker_idx, ) + self._task_infos[self._send_idx] = (worker_idx,) self._batches_outstanding += 1 self._send_idx += 1 @@ -723,7 +795,8 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase): if in_profiler_mode(): trace_event = profiler.RecordEvent( name="_DataLoaderIterMultiProcess", - event_type=profiler.TracerEventType.Dataloader) + event_type=profiler.TracerEventType.Dataloader, + ) trace_event.begin() try: benchmark().check_if_need_record(self) @@ -744,7 +817,8 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase): if in_dygraph_mode(): data = core.eager.read_next_tensor_list( - self._reader.read_next_list()[0]) + self._reader.read_next_list()[0] + ) data = _restore_batch(data, self._structure_infos.pop(0)) else: if _in_legacy_dygraph(): @@ -759,8 +833,9 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase): self._structure_infos.pop(0) for _ in range(len(self._places)) ] - data = [_restore_batch(d, s) \ - for d, s in zip(data, structs)] + data = [ + _restore_batch(d, s) for d, s in zip(data, structs) + ] # static graph organized data on multi-device with list, if # place number is 1, there is only 1 device, extra the data # from list for devices to be compatible with dygraph mode diff --git a/python/paddle/fluid/dataloader/dataset.py b/python/paddle/fluid/dataloader/dataset.py index eaab078d7e871207132b062c60a3059498d6c80e..5fe52196cd88f2c3a4c5df10f6487973e38bc991 100755 --- a/python/paddle/fluid/dataloader/dataset.py +++ b/python/paddle/fluid/dataloader/dataset.py @@ -16,8 +16,13 @@ import paddle from .. import framework __all__ = [ - "Dataset", "IterableDataset", "TensorDataset", "ComposeDataset", - "ChainDataset", "random_split", "Subset" + "Dataset", + "IterableDataset", + "TensorDataset", + "ComposeDataset", + "ChainDataset", + "random_split", + "Subset", ] @@ -67,12 +72,16 @@ class Dataset(object): pass def __getitem__(self, idx): - raise NotImplementedError("'{}' not implement in class "\ - "{}".format('__getitem__', self.__class__.__name__)) + raise NotImplementedError( + "'{}' not implement in class " + "{}".format('__getitem__', self.__class__.__name__) + ) def __len__(self): - raise NotImplementedError("'{}' not implement in class "\ - "{}".format('__len__', self.__class__.__name__)) + raise NotImplementedError( + "'{}' not implement in class " + "{}".format('__len__', self.__class__.__name__) + ) class IterableDataset(Dataset): @@ -210,16 +219,22 @@ class IterableDataset(Dataset): pass def __iter__(self): - raise NotImplementedError("'{}' not implement in class "\ - "{}".format('__iter__', self.__class__.__name__)) + raise NotImplementedError( + "'{}' not implement in class " + "{}".format('__iter__', self.__class__.__name__) + ) def __getitem__(self, idx): - raise RuntimeError("'{}' should not be called for IterableDataset" \ - "{}".format('__getitem__', self.__class__.__name__)) + raise RuntimeError( + "'{}' should not be called for IterableDataset" + "{}".format('__getitem__', self.__class__.__name__) + ) def __len__(self): - raise RuntimeError("'{}' should not be called for IterableDataset" \ - "{}".format('__len__', self.__class__.__name__)) + raise RuntimeError( + "'{}' should not be called for IterableDataset" + "{}".format('__len__', self.__class__.__name__) + ) class TensorDataset(Dataset): @@ -261,9 +276,11 @@ class TensorDataset(Dataset): def __init__(self, tensors): if not framework._non_static_mode(): raise RuntimeError( - "TensorDataset con only be used in imperative mode") - assert all([tensor.shape[0] == tensors[0].shape[0] for tensor in tensors]), \ - "tensors not have same shape of the 1st dimension" + "TensorDataset con only be used in imperative mode" + ) + assert all( + [tensor.shape[0] == tensors[0].shape[0] for tensor in tensors] + ), "tensors not have same shape of the 1st dimension" self.tensors = tensors def __getitem__(self, index): @@ -330,13 +347,16 @@ class ComposeDataset(Dataset): self.datasets = list(datasets) assert len(self.datasets) > 0, "input datasets shoule not be empty" for i, dataset in enumerate(self.datasets): - assert isinstance(dataset, Dataset), \ - "each input dataset should be paddle.io.Dataset" - assert not isinstance(dataset, IterableDataset), \ - "paddle.io.IterableDataset not supported" + assert isinstance( + dataset, Dataset + ), "each input dataset should be paddle.io.Dataset" + assert not isinstance( + dataset, IterableDataset + ), "paddle.io.IterableDataset not supported" if i > 0: - assert len(dataset) == len(self.datasets[i-1]), \ - "lengths of datasets should be same" + assert len(dataset) == len( + self.datasets[i - 1] + ), "lengths of datasets should be same" def __len__(self): return len(self.datasets[0]) @@ -391,8 +411,9 @@ class ChainDataset(IterableDataset): self.datasets = list(datasets) assert len(self.datasets) > 0, "input datasets shoule not be empty" for i, dataset in enumerate(self.datasets): - assert isinstance(dataset, IterableDataset), \ - "ChainDataset only support paddle.io.IterableDataset" + assert isinstance( + dataset, IterableDataset + ), "ChainDataset only support paddle.io.IterableDataset" def __iter__(self): for dataset in self.datasets: @@ -492,7 +513,7 @@ def random_split(dataset, lengths, generator=None): # For example var.item() and var.tolist() indices = paddle.randperm(sum(lengths)).numpy().tolist() return [ - Subset(dataset, indices[offset - length:offset]) + Subset(dataset, indices[offset - length : offset]) for offset, length in zip(_accumulate(lengths), lengths) ] diff --git a/python/paddle/fluid/dataloader/fetcher.py b/python/paddle/fluid/dataloader/fetcher.py index 387032cdfbbd37e03bf9a58af4486a470f8e95d8..b4a2ff6b923e4e6044940e638f8424fabd93894c 100644 --- a/python/paddle/fluid/dataloader/fetcher.py +++ b/python/paddle/fluid/dataloader/fetcher.py @@ -20,7 +20,6 @@ _WARNING_TO_LOG = True class _DatasetFetcher(object): - def __init__(self, dataset, auto_collate_batch, collate_fn, drop_last): self.dataset = dataset self.auto_collate_batch = auto_collate_batch @@ -37,48 +36,55 @@ class _DatasetFetcher(object): # done_event argument to check DataLoader exit status between # ecah sample processing in the batch def fetch(self, batch_indices, done_event=None): - raise NotImplementedError("'fetch' not implement for class {}".format( - self.__class__.__name__)) + raise NotImplementedError( + "'fetch' not implement for class {}".format(self.__class__.__name__) + ) def _log_warning(self): # only log warning on GPU 0 when distributed launch from ...distributed import get_world_size, get_rank + if get_world_size() >= 2 and get_rank() != 0: return - warn_str = "Detect dataset only contains single fileds, return format " \ - "changed since Paddle 2.1. In Paddle <= 2.0, DataLoader add " \ - "a list surround output data(e.g. return [data]), and in " \ - "Paddle >= 2.1, DataLoader return the single filed directly " \ - "(e.g. return data). For example, in following code: \n\n" - warn_str += \ - "import numpy as np\n" \ - "from paddle.io import DataLoader, Dataset\n\n" \ - "class RandomDataset(Dataset):\n" \ - " def __getitem__(self, idx):\n" \ - " data = np.random.random((2, 3)).astype('float32')\n\n" \ - " return data\n\n" \ - " def __len__(self):\n" \ - " return 10\n\n" \ - "dataset = RandomDataset()\n" \ - "loader = DataLoader(dataset, batch_size=1)\n" \ - "data = next(loader())\n\n" - - warn_str += "In Paddle <= 2.0, data is in format '[Tensor(shape=(1, 2, 3), " \ - "dtype=float32)]', and in Paddle >= 2.1, data is in format" \ - " 'Tensor(shape=(1, 2, 3), dtype=float32)'\n" - - logger = get_logger("DataLoader", - logging.INFO, - fmt='%(levelname)s: %(message)s') + warn_str = ( + "Detect dataset only contains single fileds, return format " + "changed since Paddle 2.1. In Paddle <= 2.0, DataLoader add " + "a list surround output data(e.g. return [data]), and in " + "Paddle >= 2.1, DataLoader return the single filed directly " + "(e.g. return data). For example, in following code: \n\n" + ) + warn_str += ( + "import numpy as np\n" + "from paddle.io import DataLoader, Dataset\n\n" + "class RandomDataset(Dataset):\n" + " def __getitem__(self, idx):\n" + " data = np.random.random((2, 3)).astype('float32')\n\n" + " return data\n\n" + " def __len__(self):\n" + " return 10\n\n" + "dataset = RandomDataset()\n" + "loader = DataLoader(dataset, batch_size=1)\n" + "data = next(loader())\n\n" + ) + + warn_str += ( + "In Paddle <= 2.0, data is in format '[Tensor(shape=(1, 2, 3), " + "dtype=float32)]', and in Paddle >= 2.1, data is in format" + " 'Tensor(shape=(1, 2, 3), dtype=float32)'\n" + ) + + logger = get_logger( + "DataLoader", logging.INFO, fmt='%(levelname)s: %(message)s' + ) logger.warning(warn_str) class _IterableDatasetFetcher(_DatasetFetcher): - def __init__(self, dataset, auto_collate_batch, collate_fn, drop_last): - super(_IterableDatasetFetcher, - self).__init__(dataset, auto_collate_batch, collate_fn, drop_last) + super(_IterableDatasetFetcher, self).__init__( + dataset, auto_collate_batch, collate_fn, drop_last + ) self.dataset_iter = iter(dataset) def fetch(self, batch_indices, done_event=None): @@ -94,13 +100,13 @@ class _IterableDatasetFetcher(_DatasetFetcher): else: return None - if len(data) == 0 or (self.drop_last - and len(data) < len(batch_indices)): + if len(data) == 0 or ( + self.drop_last and len(data) < len(batch_indices) + ): raise StopIteration global _WARNING_TO_LOG - if not isinstance(data[0], (Sequence, Mapping)) \ - and _WARNING_TO_LOG: + if not isinstance(data[0], (Sequence, Mapping)) and _WARNING_TO_LOG: self._log_warning() _WARNING_TO_LOG = False else: @@ -112,10 +118,10 @@ class _IterableDatasetFetcher(_DatasetFetcher): class _MapDatasetFetcher(_DatasetFetcher): - def __init__(self, dataset, auto_collate_batch, collate_fn, drop_last): - super(_MapDatasetFetcher, self).__init__(dataset, auto_collate_batch, - collate_fn, drop_last) + super(_MapDatasetFetcher, self).__init__( + dataset, auto_collate_batch, collate_fn, drop_last + ) def fetch(self, batch_indices, done_event=None): if self.auto_collate_batch: @@ -127,8 +133,7 @@ class _MapDatasetFetcher(_DatasetFetcher): return None global _WARNING_TO_LOG - if not isinstance(data[0], (Sequence, Mapping)) \ - and _WARNING_TO_LOG: + if not isinstance(data[0], (Sequence, Mapping)) and _WARNING_TO_LOG: self._log_warning() _WARNING_TO_LOG = False else: diff --git a/python/paddle/fluid/dataloader/flat.py b/python/paddle/fluid/dataloader/flat.py index 5baf4cc853e27596b36ce485228db6e6a9f3f0d4..6f3c6edf0b0ccde5c7a790abe1f8bbed90357b02 100644 --- a/python/paddle/fluid/dataloader/flat.py +++ b/python/paddle/fluid/dataloader/flat.py @@ -36,39 +36,47 @@ def _flatten_batch(batch): def _flatten(batch, flat_batch, structure, field_idx): if isinstance(batch, Sequence): for field in batch: - if isinstance(field, (np.ndarray, paddle.Tensor, - paddle.fluid.core.eager.Tensor)): + if isinstance( + field, + (np.ndarray, paddle.Tensor, paddle.fluid.core.eager.Tensor), + ): structure.append('{}{}'.format(FIELD_PREFIX, field_idx)) flat_batch.append(field) field_idx += 1 elif isinstance(field, (str, bytes, numbers.Number)): structure.append(field) elif isinstance(field, Sequence): - field_struct, field_idx = _flatten(field, flat_batch, [], - field_idx) + field_struct, field_idx = _flatten( + field, flat_batch, [], field_idx + ) structure.append(field_struct) elif isinstance(field, Mapping): - field_struct, field_idx = _flatten(field, flat_batch, {}, - field_idx) + field_struct, field_idx = _flatten( + field, flat_batch, {}, field_idx + ) structure.append(field_struct) else: structure.append(field) elif isinstance(batch, Mapping): for k, field in batch.items(): - if isinstance(field, (np.ndarray, paddle.Tensor, - paddle.fluid.core.eager.Tensor)): + if isinstance( + field, + (np.ndarray, paddle.Tensor, paddle.fluid.core.eager.Tensor), + ): structure[k] = '{}{}'.format(FIELD_PREFIX, field_idx) flat_batch.append(field) field_idx += 1 elif isinstance(field, (str, bytes, numbers.Number)): structure[k] = field elif isinstance(field, Sequence): - field_struct, field_idx = _flatten(field, flat_batch, [], - field_idx) + field_struct, field_idx = _flatten( + field, flat_batch, [], field_idx + ) structure[k] = field_struct elif isinstance(field, Mapping): - field_struct, field_idx = _flatten(field, flat_batch, {}, - field_idx) + field_struct, field_idx = _flatten( + field, flat_batch, {}, field_idx + ) structure[k] = field_struct else: structure[k] = field @@ -100,8 +108,9 @@ def _restore_batch(flat_batch, structure): if isinstance(field, str) and field.startswith(FIELD_PREFIX): cur_field_idx = int(field.replace(FIELD_PREFIX, '')) field_idx = max(field_idx, cur_field_idx) - assert flat_batch[cur_field_idx] is not None, \ - "flat_batch[{}] parsed repeatly" + assert ( + flat_batch[cur_field_idx] is not None + ), "flat_batch[{}] parsed repeatly" structure[i] = flat_batch[cur_field_idx] flat_batch[cur_field_idx] = None elif isinstance(field, (str, bytes, numbers.Number)): @@ -113,8 +122,9 @@ def _restore_batch(flat_batch, structure): if isinstance(field, str) and field.startswith(FIELD_PREFIX): cur_field_idx = int(field.replace(FIELD_PREFIX, '')) field_idx = max(field_idx, cur_field_idx) - assert flat_batch[cur_field_idx] is not None, \ - "flat_batch[{}] parsed repeatly" + assert ( + flat_batch[cur_field_idx] is not None + ), "flat_batch[{}] parsed repeatly" structure[k] = flat_batch[cur_field_idx] flat_batch[cur_field_idx] = None elif isinstance(field, (str, bytes, numbers.Number)): @@ -126,8 +136,7 @@ def _restore_batch(flat_batch, structure): return field_idx - assert isinstance(flat_batch, Sequence), \ - "flat_batch is not a list or tuple" + assert isinstance(flat_batch, Sequence), "flat_batch is not a list or tuple" # no np.array in dataset, no output tensor from blocking queue # simply return structure @@ -136,8 +145,9 @@ def _restore_batch(flat_batch, structure): # sample only contains single fields if isinstance(structure, (str, bytes)): - assert structure == '{}{}'.format(FIELD_PREFIX, 0), \ - "invalid structure: {}".format(structure) + assert structure == '{}{}'.format( + FIELD_PREFIX, 0 + ), "invalid structure: {}".format(structure) return flat_batch[0] field_idx = _restore(structure, 0) assert field_idx + 1 == len(flat_batch), "Tensor parse incomplete" diff --git a/python/paddle/fluid/dataloader/sampler.py b/python/paddle/fluid/dataloader/sampler.py index 77b8ff09ba06fdc985eded0c04ca5b185a707f1d..3626ed63e521c12f8bdfd0ef3001823c4a0acac6 100644 --- a/python/paddle/fluid/dataloader/sampler.py +++ b/python/paddle/fluid/dataloader/sampler.py @@ -16,7 +16,10 @@ import numpy as np from .. import core __all__ = [ - "Sampler", "SequenceSampler", "RandomSampler", "WeightedRandomSampler" + "Sampler", + "SequenceSampler", + "RandomSampler", + "WeightedRandomSampler", ] @@ -185,19 +188,19 @@ class RandomSampler(Sampler): see `paddle.io.Sampler` """ - def __init__(self, - data_source, - replacement=False, - num_samples=None, - generator=None): + def __init__( + self, data_source, replacement=False, num_samples=None, generator=None + ): self.data_source = data_source self.replacement = replacement self._num_samples = num_samples self.generator = generator if not isinstance(self.replacement, bool): - raise TypeError("expect boolean value for replacement, but got " - "replacement={}".format(self.replacement)) + raise TypeError( + "expect boolean value for replacement, but got " + "replacement={}".format(self.replacement) + ) if self._num_samples is not None and not replacement: raise ValueError( @@ -205,8 +208,10 @@ class RandomSampler(Sampler): ) if not isinstance(self.num_samples, int) or self.num_samples <= 0: - raise ValueError("num_samples should be a positive integer, " - "but got num_samples={}".format(self.num_samples)) + raise ValueError( + "num_samples should be a positive integer, " + "but got num_samples={}".format(self.num_samples) + ) @property def num_samples(self): @@ -225,13 +230,14 @@ class RandomSampler(Sampler): yield index else: if self.replacement: - for index in np.random.choice(np.arange(n), - self.num_samples, - replace=True).tolist(): + for index in np.random.choice( + np.arange(n), self.num_samples, replace=True + ).tolist(): yield index else: - for index in np.random.choice(np.arange(n), n, - replace=False).tolist(): + for index in np.random.choice( + np.arange(n), n, replace=False + ).tolist(): yield index def __len__(self): @@ -243,31 +249,29 @@ def _weighted_sample(weights, num_samples, replacement=True): weights = weights.numpy() if isinstance(weights, (list, tuple)): weights = np.array(weights) - assert isinstance(weights, np.ndarray), \ - "weights should be paddle.Tensor, numpy.ndarray, list or tuple" - assert len(weights.shape) <= 2, \ - "weights should be a 1-D or 2-D array" + assert isinstance( + weights, np.ndarray + ), "weights should be paddle.Tensor, numpy.ndarray, list or tuple" + assert len(weights.shape) <= 2, "weights should be a 1-D or 2-D array" weights = weights.reshape((-1, weights.shape[-1])) - assert np.all(weights >= 0.), \ - "weights should be positive value" - assert not np.any(weights == np.inf), \ - "weights shoule not be INF" - assert not np.any(weights == np.nan), \ - "weights shoule not be NaN" - - non_zeros = np.sum(weights > 0., axis=1) - assert np.all(non_zeros > 0), \ - "weights should have positive values" + assert np.all(weights >= 0.0), "weights should be positive value" + assert not np.any(weights == np.inf), "weights shoule not be INF" + assert not np.any(weights == np.nan), "weights shoule not be NaN" + + non_zeros = np.sum(weights > 0.0, axis=1) + assert np.all(non_zeros > 0), "weights should have positive values" if not replacement: - assert np.all(non_zeros >= num_samples), \ - "weights positive value number should not " \ + assert np.all(non_zeros >= num_samples), ( + "weights positive value number should not " "less than num_samples when replacement=False" + ) weights = weights / weights.sum(axis=1) rets = [] for i in range(weights.shape[0]): - ret = np.random.choice(weights.shape[1], num_samples, replacement, - weights[i]) + ret = np.random.choice( + weights.shape[1], num_samples, replacement, weights[i] + ) rets.append(ret) return np.array(rets) @@ -311,8 +315,9 @@ class WeightedRandomSampler(Sampler): self.replacement = replacement def __iter__(self): - idxs = _weighted_sample(self.weights, self.num_samples, - self.replacement) + idxs = _weighted_sample( + self.weights, self.num_samples, self.replacement + ) return iter(idxs.reshape((-1)).tolist()) def __len__(self): diff --git a/python/paddle/fluid/dataloader/worker.py b/python/paddle/fluid/dataloader/worker.py index 06ea7ef9d72a37b10756c34cda3b66975df4def5..f0aa32e774522eac2d6c31b30cf3eb03c1055201 100644 --- a/python/paddle/fluid/dataloader/worker.py +++ b/python/paddle/fluid/dataloader/worker.py @@ -21,7 +21,11 @@ import traceback from collections import namedtuple from .. import core from .fetcher import _IterableDatasetFetcher, _MapDatasetFetcher -from ..multiprocess_utils import _cleanup_mmap, CleanupFuncRegistrar, MP_STATUS_CHECK_INTERVAL +from ..multiprocess_utils import ( + _cleanup_mmap, + CleanupFuncRegistrar, + MP_STATUS_CHECK_INTERVAL, +) from ..framework import _non_static_mode, _in_eager_without_dygraph_check from .flat import _flatten_batch @@ -32,7 +36,6 @@ __all__ = ['get_worker_info'] class _IterableDatasetStopIteration(object): - def __init__(self, worker_id): self.worker_id = worker_id @@ -46,20 +49,22 @@ class _DatasetKind(object): ITER = 1 @staticmethod - def create_fetcher(kind, dataset, auto_collate_batch, collate_fn, - drop_last): + def create_fetcher( + kind, dataset, auto_collate_batch, collate_fn, drop_last + ): if kind == _DatasetKind.MAP: - return _MapDatasetFetcher(dataset, auto_collate_batch, collate_fn, - drop_last) + return _MapDatasetFetcher( + dataset, auto_collate_batch, collate_fn, drop_last + ) elif kind == _DatasetKind.ITER: - return _IterableDatasetFetcher(dataset, auto_collate_batch, - collate_fn, drop_last) + return _IterableDatasetFetcher( + dataset, auto_collate_batch, collate_fn, drop_last + ) else: raise NotImplementedError("unknown Dataset kind {}".format(kind)) class ParentWatchDog(object): - def __init__(self): self._parent_pid = os.getppid() self._parent_alive = True @@ -151,13 +156,15 @@ class WorkerInfo(object): def __setattr__(self, key, val): if self.__initialized: - raise RuntimeError("Cannot assign attributes to {} objects".format( - self.__class__.__name__)) + raise RuntimeError( + "Cannot assign attributes to {} objects".format( + self.__class__.__name__ + ) + ) return super(WorkerInfo, self).__setattr__(key, val) class _WorkerException(object): - def __init__(self, worker_id, exc_info=None): self.worker_id = worker_id exc_info = exc_info or sys.exc_info() @@ -166,7 +173,8 @@ class _WorkerException(object): def reraise(self): msg = "DataLoader worker({}) caught {} with message:\n{}".format( - self.worker_id, self.exc_type.__name__, self.exc_msg) + self.worker_id, self.exc_type.__name__, self.exc_msg + ) if getattr(self.exc_type, "message", None): raise self.exc_type(message=msg) raise self.exc_type(msg) @@ -204,12 +212,12 @@ class _WorkerException(object): # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. -INIT_A = 0x43b0d7e5 -MULT_A = 0x931e8875 -INIT_B = 0x8b51f9dd -MULT_B = 0x58f38ded -MIX_MULT_L = 0xca01f9dd -MIX_MULT_R = 0x4973f715 +INIT_A = 0x43B0D7E5 +MULT_A = 0x931E8875 +INIT_B = 0x8B51F9DD +MULT_B = 0x58F38DED +MIX_MULT_L = 0xCA01F9DD +MIX_MULT_R = 0x4973F715 XSHIFT = np.dtype(np.uint32).itemsize * 8 // 2 MASK32 = 0xFFFFFFFF @@ -255,9 +263,21 @@ def _generate_states(base_seed=0, worker_id=0): return states -def _worker_loop(dataset, dataset_kind, indices_queue, out_queue, done_event, - auto_collate_batch, collate_fn, drop_last, init_fn, worker_id, - num_workers, use_shared_memory, base_seed): +def _worker_loop( + dataset, + dataset_kind, + indices_queue, + out_queue, + done_event, + auto_collate_batch, + collate_fn, + drop_last, + init_fn, + worker_id, + num_workers, + use_shared_memory, + base_seed, +): try: # NOTE: [ mmap files clear ] When the child process exits unexpectedly, # some shared memory objects may have been applied for but have not yet @@ -282,18 +302,20 @@ def _worker_loop(dataset, dataset_kind, indices_queue, out_queue, done_event, np.random.seed(_generate_states(base_seed, worker_id)) global _worker_info - _worker_info = WorkerInfo(id=worker_id, - num_workers=num_workers, - dataset=dataset, - seed=base_seed) + _worker_info = WorkerInfo( + id=worker_id, + num_workers=num_workers, + dataset=dataset, + seed=base_seed, + ) init_exception = None try: if init_fn is not None: init_fn(worker_id) - fetcher = _DatasetKind.create_fetcher(dataset_kind, dataset, - auto_collate_batch, - collate_fn, drop_last) + fetcher = _DatasetKind.create_fetcher( + dataset_kind, dataset, auto_collate_batch, collate_fn, drop_last + ) except: init_exception = _WorkerException(worker_id) @@ -309,15 +331,16 @@ def _worker_loop(dataset, dataset_kind, indices_queue, out_queue, done_event, if isinstance(data, _ResumeIteration): out_queue.put((data, None, None)) iterator_drained = False - fetcher = _DatasetKind.create_fetcher(dataset_kind, dataset, - auto_collate_batch, - collate_fn, True) + fetcher = _DatasetKind.create_fetcher( + dataset_kind, dataset, auto_collate_batch, collate_fn, True + ) continue # None as poison piil, so worker event should be set if data is None: - assert done_event.is_set() or iterator_drained, \ - "get None when worker done_event set" + assert ( + done_event.is_set() or iterator_drained + ), "get None when worker done_event set" break # If worker done event is set but get still get data in # indices_queue, remaining data should be get and skipped. @@ -338,8 +361,10 @@ def _worker_loop(dataset, dataset_kind, indices_queue, out_queue, done_event, with paddle.fluid.dygraph.guard(place=paddle.CPUPlace()): batch = fetcher.fetch(indices) except Exception as e: - if isinstance( - e, StopIteration) and dataset_kind == _DatasetKind.ITER: + if ( + isinstance(e, StopIteration) + and dataset_kind == _DatasetKind.ITER + ): out_queue.put(_IterableDatasetStopIteration(worker_id)) iterator_drained = True else: @@ -355,10 +380,12 @@ def _worker_loop(dataset, dataset_kind, indices_queue, out_queue, done_event, if _in_eager_without_dygraph_check(): return core._array_to_share_memory_tensor(tensor) return tensor._share_memory() + tensor_list = [ core._array_to_share_memory_tensor(b) - if isinstance(b, np.ndarray) \ - else tensor_share_memory(b) for b in batch + if isinstance(b, np.ndarray) + else tensor_share_memory(b) + for b in batch ] out_queue.put((idx, tensor_list, structure)) core._remove_tensor_list_mmap_fds(tensor_list) diff --git a/python/paddle/fluid/dataset.py b/python/paddle/fluid/dataset.py index 4c7e8bb5378beb367296eaa91502a24709e0077d..ff68300d4a01b7519c9b739ff084b815801132fb 100644 --- a/python/paddle/fluid/dataset.py +++ b/python/paddle/fluid/dataset.py @@ -36,7 +36,7 @@ class DatasetFactory(object): """ def __init__(self): - """ Init. """ + """Init.""" pass def create_dataset(self, datafeed_class="QueueDataset"): @@ -59,15 +59,16 @@ class DatasetFactory(object): dataset = globals()[datafeed_class]() return dataset except: - raise ValueError("datafeed class %s does not exist" % - datafeed_class) + raise ValueError( + "datafeed class %s does not exist" % datafeed_class + ) class DatasetBase(object): - """ Base dataset class. """ + """Base dataset class.""" def __init__(self): - """ Init. """ + """Init.""" # define class name here # to decide whether we need create in memory instance self.proto_desc = data_feed_pb2.DataFeedDesc() @@ -374,7 +375,7 @@ class InMemoryDataset(DatasetBase): @deprecated(since="2.0.0", update_to="paddle.distributed.InMemoryDataset") def __init__(self): - """ Init. """ + """Init.""" super(InMemoryDataset, self).__init__() self.proto_desc.name = "MultiSlotInMemoryDataFeed" self.fleet_send_batch_size = None @@ -389,18 +390,22 @@ class InMemoryDataset(DatasetBase): self.fleet_send_sleep_seconds = None self.trainer_num = -1 - @deprecated(since="2.0.0", - update_to="paddle.distributed.InMemoryDataset._set_feed_type") + @deprecated( + since="2.0.0", + update_to="paddle.distributed.InMemoryDataset._set_feed_type", + ) def set_feed_type(self, data_feed_type): """ Set data_feed_desc """ self.proto_desc.name = data_feed_type - if (self.proto_desc.name == "SlotRecordInMemoryDataFeed"): + if self.proto_desc.name == "SlotRecordInMemoryDataFeed": self.dataset = core.Dataset("SlotRecordDataset") - @deprecated(since="2.0.0", - update_to="paddle.distributed.InMemoryDataset._prepare_to_run") + @deprecated( + since="2.0.0", + update_to="paddle.distributed.InMemoryDataset._prepare_to_run", + ) def _prepare_to_run(self): """ Set data_feed_desc before load or shuffle, @@ -423,8 +428,8 @@ class InMemoryDataset(DatasetBase): @deprecated( since="2.0.0", - update_to= - "paddle.distributed.InMemoryDataset._dynamic_adjust_before_train") + update_to="paddle.distributed.InMemoryDataset._dynamic_adjust_before_train", + ) def _dynamic_adjust_before_train(self, thread_num): if not self.is_user_set_queue_num: if self.use_ps_gpu: @@ -435,7 +440,7 @@ class InMemoryDataset(DatasetBase): @deprecated( since="2.0.0", - update_to="paddle.distributed.InMemoryDataset._dynamic_adjust_after_train" + update_to="paddle.distributed.InMemoryDataset._dynamic_adjust_after_train", ) def _dynamic_adjust_after_train(self): if not self.is_user_set_queue_num: @@ -445,8 +450,10 @@ class InMemoryDataset(DatasetBase): self.dataset.dynamic_adjust_channel_num(self.thread_num, False) self.dataset.dynamic_adjust_readers_num(self.thread_num) - @deprecated(since="2.0.0", - update_to="paddle.distributed.InMemoryDataset._set_queue_num") + @deprecated( + since="2.0.0", + update_to="paddle.distributed.InMemoryDataset._set_queue_num", + ) def set_queue_num(self, queue_num): """ Set Dataset output queue num, training threads get data from queues @@ -465,9 +472,10 @@ class InMemoryDataset(DatasetBase): self.is_user_set_queue_num = True self.queue_num = queue_num - @deprecated(since="2.0.0", - update_to="paddle.distributed.InMemoryDataset._set_parse_ins_id" - ) + @deprecated( + since="2.0.0", + update_to="paddle.distributed.InMemoryDataset._set_parse_ins_id", + ) def set_parse_ins_id(self, parse_ins_id): """ Set id Dataset need to parse insid @@ -487,7 +495,8 @@ class InMemoryDataset(DatasetBase): @deprecated( since="2.0.0", - update_to="paddle.distributed.InMemoryDataset._set_parse_content") + update_to="paddle.distributed.InMemoryDataset._set_parse_content", + ) def set_parse_content(self, parse_content): """ Set if Dataset need to parse content @@ -539,9 +548,10 @@ class InMemoryDataset(DatasetBase): """ self.trainer_num = trainer_num - @deprecated(since="2.0.0", - update_to="paddle.distributed.InMemoryDataset._set_merge_by_sid" - ) + @deprecated( + since="2.0.0", + update_to="paddle.distributed.InMemoryDataset._set_merge_by_sid", + ) def set_merge_by_sid(self, merge_by_sid): """ Set if Dataset need to merge sid. If not, one ins means one Pv. @@ -633,7 +643,7 @@ class InMemoryDataset(DatasetBase): @deprecated( since="2.0.0", - update_to="paddle.distributed.InMemoryDataset._set_fleet_send_batch_size" + update_to="paddle.distributed.InMemoryDataset._set_fleet_send_batch_size", ) def set_fleet_send_batch_size(self, fleet_send_batch_size=1024): """ @@ -654,8 +664,8 @@ class InMemoryDataset(DatasetBase): @deprecated( since="2.0.0", - update_to= - "paddle.distributed.InMemoryDataset._set_fleet_send_sleep_seconds") + update_to="paddle.distributed.InMemoryDataset._set_fleet_send_sleep_seconds", + ) def set_fleet_send_sleep_seconds(self, fleet_send_sleep_seconds=0): """ Set fleet send sleep time, default is 0 @@ -675,7 +685,8 @@ class InMemoryDataset(DatasetBase): @deprecated( since="2.0.0", - update_to="paddle.distributed.InMemoryDataset._set_merge_by_lineid") + update_to="paddle.distributed.InMemoryDataset._set_merge_by_lineid", + ) def set_merge_by_lineid(self, merge_size=2): """ Set merge by line id, instances of same line id will be merged after @@ -698,8 +709,8 @@ class InMemoryDataset(DatasetBase): @deprecated( since="2.0.0", - update_to= - "paddle.distributed.InMemoryDataset._set_generate_unique_feasigns") + update_to="paddle.distributed.InMemoryDataset._set_generate_unique_feasigns", + ) def set_generate_unique_feasigns(self, generate_uni_feasigns, shard_num): self.dataset.set_generate_unique_feasigns(generate_uni_feasigns) self.gen_uni_feasigns = generate_uni_feasigns @@ -707,13 +718,14 @@ class InMemoryDataset(DatasetBase): @deprecated( since="2.0.0", - update_to= - "paddle.distributed.InMemoryDataset._generate_local_tables_unlock") - def generate_local_tables_unlock(self, table_id, fea_dim, read_thread_num, - consume_thread_num, shard_num): - self.dataset.generate_local_tables_unlock(table_id, fea_dim, - read_thread_num, - consume_thread_num, shard_num) + update_to="paddle.distributed.InMemoryDataset._generate_local_tables_unlock", + ) + def generate_local_tables_unlock( + self, table_id, fea_dim, read_thread_num, consume_thread_num, shard_num + ): + self.dataset.generate_local_tables_unlock( + table_id, fea_dim, read_thread_num, consume_thread_num, shard_num + ) def set_date(self, date): """ @@ -738,8 +750,10 @@ class InMemoryDataset(DatasetBase): if self.use_ps_gpu and core._is_compiled_with_heterps(): self.psgpu.set_date(year, month, day) - @deprecated(since="2.0.0", - update_to="paddle.distributed.InMemoryDataset.load_into_memory") + @deprecated( + since="2.0.0", + update_to="paddle.distributed.InMemoryDataset.load_into_memory", + ) def load_into_memory(self, is_shuffle=False): """ Load data into memory @@ -766,7 +780,8 @@ class InMemoryDataset(DatasetBase): @deprecated( since="2.0.0", - update_to="paddle.distributed.InMemoryDataset.preload_into_memory") + update_to="paddle.distributed.InMemoryDataset.preload_into_memory", + ) def preload_into_memory(self, thread_num=None): """ Load data into memory in async mode @@ -792,9 +807,10 @@ class InMemoryDataset(DatasetBase): self.dataset.create_preload_readers() self.dataset.preload_into_memory() - @deprecated(since="2.0.0", - update_to="paddle.distributed.InMemoryDataset.wait_preload_done" - ) + @deprecated( + since="2.0.0", + update_to="paddle.distributed.InMemoryDataset.wait_preload_done", + ) def wait_preload_done(self): """ Wait preload_into_memory done @@ -813,8 +829,10 @@ class InMemoryDataset(DatasetBase): self.dataset.wait_preload_done() self.dataset.destroy_preload_readers() - @deprecated(since="2.0.0", - update_to="paddle.distributed.InMemoryDataset.local_shuffle") + @deprecated( + since="2.0.0", + update_to="paddle.distributed.InMemoryDataset.local_shuffle", + ) def local_shuffle(self): """ Local shuffle @@ -832,8 +850,10 @@ class InMemoryDataset(DatasetBase): """ self.dataset.local_shuffle() - @deprecated(since="2.0.0", - update_to="paddle.distributed.InMemoryDataset.global_shuffle") + @deprecated( + since="2.0.0", + update_to="paddle.distributed.InMemoryDataset.global_shuffle", + ) def global_shuffle(self, fleet=None, thread_num=12): """ Global shuffle. @@ -893,8 +913,10 @@ class InMemoryDataset(DatasetBase): else: fleet._role_maker.barrier_worker() - @deprecated(since="2.0.0", - update_to="paddle.distributed.InMemoryDataset.release_memory") + @deprecated( + since="2.0.0", + update_to="paddle.distributed.InMemoryDataset.release_memory", + ) def release_memory(self): """ :api_attr: Static Graph @@ -946,7 +968,8 @@ class InMemoryDataset(DatasetBase): @deprecated( since="2.0.0", - update_to="paddle.distributed.InMemoryDataset.get_memory_data_size") + update_to="paddle.distributed.InMemoryDataset.get_memory_data_size", + ) def get_memory_data_size(self, fleet=None): """ Get memory data size, user can call this function to know the num @@ -975,18 +998,21 @@ class InMemoryDataset(DatasetBase): """ import numpy as np + local_data_size = self.dataset.get_memory_data_size() local_data_size = np.array([local_data_size]) if fleet is not None: global_data_size = local_data_size * 0 - fleet._role_maker.all_reduce_worker(local_data_size, - global_data_size) + fleet._role_maker.all_reduce_worker( + local_data_size, global_data_size + ) return global_data_size[0] return local_data_size[0] @deprecated( since="2.0.0", - update_to="paddle.distributed.InMemoryDataset.get_shuffle_data_size") + update_to="paddle.distributed.InMemoryDataset.get_shuffle_data_size", + ) def get_shuffle_data_size(self, fleet=None): """ Get shuffle data size, user can call this function to know the num @@ -1017,6 +1043,7 @@ class InMemoryDataset(DatasetBase): """ import numpy as np + local_data_size = self.dataset.get_shuffle_data_size() local_data_size = np.array([local_data_size]) print('global shuffle local_data_size: ', local_data_size) @@ -1025,8 +1052,9 @@ class InMemoryDataset(DatasetBase): if hasattr(fleet, "util"): global_data_size = fleet.util.all_reduce(local_data_size) else: - fleet._role_maker.all_reduce_worker(local_data_size, - global_data_size) + fleet._role_maker.all_reduce_worker( + local_data_size, global_data_size + ) return global_data_size[0] return local_data_size[0] @@ -1070,16 +1098,20 @@ class InMemoryDataset(DatasetBase): self.proto_desc.graph_config.walk_len = config.get("walk_len", 20) self.proto_desc.graph_config.window = config.get("window", 5) self.proto_desc.graph_config.once_sample_startid_len = config.get( - "once_sample_startid_len", 8000) + "once_sample_startid_len", 8000 + ) self.proto_desc.graph_config.sample_times_one_chunk = config.get( - "sample_times_one_chunk", 10) + "sample_times_one_chunk", 10 + ) self.proto_desc.graph_config.batch_size = config.get("batch_size", 1) self.proto_desc.graph_config.debug_mode = config.get("debug_mode", 0) self.proto_desc.graph_config.first_node_type = config.get( - "first_node_type", "") + "first_node_type", "" + ) self.proto_desc.graph_config.meta_path = config.get("meta_path", "") self.proto_desc.graph_config.gpu_graph_training = config.get( - "gpu_graph_training", True) + "gpu_graph_training", True + ) self.dataset.set_gpu_graph_mode(True) @@ -1103,8 +1135,10 @@ class QueueDataset(DatasetBase): super(QueueDataset, self).__init__() self.proto_desc.name = "MultiSlotDataFeed" - @deprecated(since="2.0.0", - update_to="paddle.distributed.QueueDataset._prepare_to_run") + @deprecated( + since="2.0.0", + update_to="paddle.distributed.QueueDataset._prepare_to_run", + ) def _prepare_to_run(self): """ Set data_feed_desc/thread num/filelist before run, @@ -1139,7 +1173,8 @@ class QueueDataset(DatasetBase): """ raise NotImplementedError( "QueueDataset does not support local shuffle, " - "please use InMemoryDataset for local_shuffle") + "please use InMemoryDataset for local_shuffle" + ) def global_shuffle(self, fleet=None): """ @@ -1165,7 +1200,8 @@ class QueueDataset(DatasetBase): """ raise NotImplementedError( "QueueDataset does not support global shuffle, " - "please use InMemoryDataset for global_shuffle") + "please use InMemoryDataset for global_shuffle" + ) class FileInstantDataset(DatasetBase): @@ -1194,7 +1230,8 @@ class FileInstantDataset(DatasetBase): """ raise NotImplementedError( "FileInstantDataset does not support local shuffle, " - "please use InMemoryDataset for local_shuffle") + "please use InMemoryDataset for local_shuffle" + ) def global_shuffle(self, fleet=None): """ @@ -1203,7 +1240,8 @@ class FileInstantDataset(DatasetBase): """ raise NotImplementedError( "FileInstantDataset does not support global shuffle, " - "please use InMemoryDataset for global_shuffle") + "please use InMemoryDataset for global_shuffle" + ) class BoxPSDataset(InMemoryDataset): @@ -1289,7 +1327,7 @@ class BoxPSDataset(InMemoryDataset): filelist = ["a.txt", "b.txt"] dataset.set_filelist(filelist) dataset.load_into_memory() - """ + """ self._prepare_to_run() self.boxps.load_into_memory() diff --git a/python/paddle/fluid/debugger.py b/python/paddle/fluid/debugger.py index 0c2d61db2d295dd99f870348709727b527c03768..36f38df38e356a94b5e79a22c961c21b61895713 100644 --- a/python/paddle/fluid/debugger.py +++ b/python/paddle/fluid/debugger.py @@ -52,8 +52,9 @@ def repr_data_type(type): def repr_tensor(proto): - return "tensor(type={}, shape={})".format(_dtype2str_[int(proto.data_type)], - str(proto.dims)) + return "tensor(type={}, shape={})".format( + _dtype2str_[int(proto.data_type)], str(proto.dims) + ) reprtpl = "{ttype} {name} ({reprs})" @@ -65,30 +66,37 @@ def repr_lodtensor(proto): level = proto.type.lod_tensor.lod_level reprs = repr_tensor(proto.type.lod_tensor.tensor) - return reprtpl.format(ttype="LoDTensor" if level > 0 else "Tensor", - name=proto.name, - reprs="level=%d, %s" % - (level, reprs) if level > 0 else reprs) + return reprtpl.format( + ttype="LoDTensor" if level > 0 else "Tensor", + name=proto.name, + reprs="level=%d, %s" % (level, reprs) if level > 0 else reprs, + ) def repr_selected_rows(proto): if proto.type.type != framework_pb2.VarType.SELECTED_ROWS: return - return reprtpl.format(ttype="SelectedRows", - name=proto.name, - reprs=repr_tensor(proto.type.selected_rows)) + return reprtpl.format( + ttype="SelectedRows", + name=proto.name, + reprs=repr_tensor(proto.type.selected_rows), + ) def repr_tensor_array(proto): if proto.type.type != framework_pb2.VarType.LOD_TENSOR_ARRAY: return - return reprtpl.format(ttype="TensorArray", - name=proto.name, - reprs="level=%d, %s" % - (proto.type.tensor_array.lod_level, - repr_tensor(proto.type.lod_tensor.tensor))) + return reprtpl.format( + ttype="TensorArray", + name=proto.name, + reprs="level=%d, %s" + % ( + proto.type.tensor_array.lod_level, + repr_tensor(proto.type.lod_tensor.tensor), + ), + ) type_handlers = [ @@ -115,19 +123,23 @@ def pprint_program_codes(program_desc): def pprint_block_codes(block_desc, show_backward=False): - def is_op_backward(op_desc): - if op_desc.type.endswith('_grad'): return True + if op_desc.type.endswith('_grad'): + return True def is_var_backward(var): - if "@GRAD" in var.parameter: return True + if "@GRAD" in var.parameter: + return True for arg in var.arguments: - if "@GRAD" in arg: return True + if "@GRAD" in arg: + return True for var in op_desc.inputs: - if is_var_backward(var): return True + if is_var_backward(var): + return True for var in op_desc.outputs: - if is_var_backward(var): return True + if is_var_backward(var): + return True return False def is_var_backward(var_desc): @@ -135,7 +147,8 @@ def pprint_block_codes(block_desc, show_backward=False): if type(block_desc) is not framework_pb2.BlockDesc: block_desc = framework_pb2.BlockDesc.FromString( - block_desc.desc.serialize_to_string()) + block_desc.desc.serialize_to_string() + ) var_reprs = [] op_reprs = [] for var in block_desc.vars: @@ -144,7 +157,8 @@ def pprint_block_codes(block_desc, show_backward=False): var_reprs.append(repr_var(var)) for op in block_desc.ops: - if not show_backward and is_op_backward(op): continue + if not show_backward and is_op_backward(op): + continue op_reprs.append(repr_op(op)) tpl = "// block-{idx} parent-{pidx}\n// variables\n{vars}\n\n// operators\n{ops}\n" @@ -182,7 +196,8 @@ def _repr_op_fill_constant(optype, inputs, outputs, attrs): return "{output} = {data} [shape={shape}]".format( output=','.join(outputs), data=attrs['value'], - shape=str(attrs['shape'])) + shape=str(attrs['shape']), + ) op_repr_handlers = [ @@ -216,13 +231,16 @@ def repr_op(opdesc): for handler in op_repr_handlers: res = handler(opdesc.type, inputs, outputs, attr_dict) - if res: return res + if res: + return res - return tpl.format(outputs=', '.join(outputs), - optype=opdesc.type, - inputs=', '.join(inputs), - attrs="{%s}" % ','.join(attrs), - is_target=", is_target" if is_target else "") + return tpl.format( + outputs=', '.join(outputs), + optype=opdesc.type, + inputs=', '.join(inputs), + attrs="{%s}" % ','.join(attrs), + is_target=", is_target" if is_target else "", + ) def draw_block_graphviz(block, highlights=None, path="./temp.dot"): @@ -237,7 +255,8 @@ def draw_block_graphviz(block, highlights=None, path="./temp.dot"): desc = framework_pb2.BlockDesc.FromString(bytes(protostr)) def need_highlight(name): - if highlights is None: return False + if highlights is None: + return False for pattern in highlights: assert type(pattern) is str if re.match(pattern, name): @@ -250,9 +269,11 @@ def draw_block_graphviz(block, highlights=None, path="./temp.dot"): # TODO(gongwb): format the var.type # create var if var.persistable: - varn = graph.add_param(var.name, - str(var.type).replace("\n", "
", 1), - highlight=need_highlight(var.name)) + varn = graph.add_param( + var.name, + str(var.type).replace("\n", "
", 1), + highlight=need_highlight(var.name), + ) else: varn = graph.add_arg(var.name, highlight=need_highlight(var.name)) vars[var.name] = varn @@ -264,7 +285,8 @@ def draw_block_graphviz(block, highlights=None, path="./temp.dot"): vars[arg] = graph.add_arg(arg, highlight=need_highlight(arg)) varn = vars[arg] highlight = need_highlight(op.description) or need_highlight( - varn.description) + varn.description + ) if op2var: graph.add_edge(op, varn, highlight=highlight) else: diff --git a/python/paddle/fluid/device_worker.py b/python/paddle/fluid/device_worker.py index bf264f5a64703b1d41a360ab2dc2677b5b27bd5d..ee82c7ebbdc10b12e35ce1211af3dee76aefd32f 100644 --- a/python/paddle/fluid/device_worker.py +++ b/python/paddle/fluid/device_worker.py @@ -14,8 +14,12 @@ """Defination of device workers.""" __all__ = [ - 'DeviceWorker', 'Hogwild', 'DownpourSGD', 'Section', 'DownpourSGDOPT', - 'HeterSection' + 'DeviceWorker', + 'Hogwild', + 'DownpourSGD', + 'Section', + 'DownpourSGDOPT', + 'HeterSection', ] @@ -67,7 +71,8 @@ class DeviceWorker(object): """ raise NotImplementedError( "DeviceWorker does not implement gen_worker_desc, " - "please use Hogwild or DownpourSGD, etc.") + "please use Hogwild or DownpourSGD, etc." + ) class Hogwild(DeviceWorker): @@ -90,10 +95,16 @@ class Hogwild(DeviceWorker): trainer_desc.device_worker_name = "HogwildWorker" if self._infer: # just ignore feed op for inference model - trainer_desc.hogwild_param.skip_ops.extend([ - "feed", "push_sparse", "push_sparse_v2", "push_dense", - "distributed_push_sparse", "send" - ]) + trainer_desc.hogwild_param.skip_ops.extend( + [ + "feed", + "push_sparse", + "push_sparse_v2", + "push_dense", + "distributed_push_sparse", + "send", + ] + ) dense_table_set = set() program_id = str(id(self._program)) @@ -114,8 +125,11 @@ class Hogwild(DeviceWorker): from paddle.fluid.incubate.fleet.parameter_server import version - if version.is_transpiler( - ) and "fleet_desc" not in opt_info and "program_configs" not in opt_info: + if ( + version.is_transpiler() + and "fleet_desc" not in opt_info + and "program_configs" not in opt_info + ): return program_configs = opt_info["program_configs"] @@ -126,8 +140,10 @@ class Hogwild(DeviceWorker): if pid == program_id: pc = downpour.program_config.add() pc.program_id = program_id - print("device worker pull dense:", - program_configs[program_id]["pull_dense"]) + print( + "device worker pull dense:", + program_configs[program_id]["pull_dense"], + ) for i in program_configs[program_id]["push_sparse"]: pc.push_sparse_table_id.extend([i]) for i in program_configs[program_id]["push_dense"]: @@ -143,36 +159,43 @@ class Hogwild(DeviceWorker): trainer_desc.device_worker_name = "HogwildWorker" pull_thread = trainer_desc.pull_dense_param pull_thread.device_num = trainer_desc.thread_num - if opt_info.get("program_id_to_worker") is None and opt_info.get( - "dense_table_config") is None: + if ( + opt_info.get("program_id_to_worker") is None + and opt_info.get("dense_table_config") is None + ): raise ValueError( - "opt_info must have program_id_to_worker or dense_table_config") + "opt_info must have program_id_to_worker or dense_table_config" + ) if opt_info.get("program_id_to_worker") is not None: prog_id_to_worker = opt_info["program_id_to_worker"] if prog_id_to_worker.get(program_id) is None: - raise ValueError("%s not found in program_id_to_worker" % - program_id) + raise ValueError( + "%s not found in program_id_to_worker" % program_id + ) worker = opt_info["program_id_to_worker"][program_id] for i in worker.get_desc().dense_table: if i.table_id in dense_table_set: dense_table = pull_thread.dense_table.add() dense_table.dense_value_name.extend(i.dense_variable_name) - dense_table.table_id = \ - i.table_id + dense_table.table_id = i.table_id sparse_len = len(worker.get_desc().sparse_table) for i in range(sparse_len): sparse_table = downpour.sparse_table.add() - sparse_table.table_id = worker.get_desc( - ).sparse_table[i].table_id + sparse_table.table_id = ( + worker.get_desc().sparse_table[i].table_id + ) sparse_table.sparse_key_name.extend( - worker.get_desc().sparse_table[i].slot_key) + worker.get_desc().sparse_table[i].slot_key + ) sparse_table.sparse_value_name.extend( - worker.get_desc().sparse_table[i].slot_value) + worker.get_desc().sparse_table[i].slot_value + ) sparse_table.sparse_grad_name.extend( - worker.get_desc().sparse_table[i].slot_gradient) - sparse_table.fea_dim = \ - self._fleet_desc.server_param.downpour_server_param.downpour_table_param[ - i].accessor.fea_dim + worker.get_desc().sparse_table[i].slot_gradient + ) + sparse_table.fea_dim = self._fleet_desc.server_param.downpour_server_param.downpour_table_param[ + i + ].accessor.fea_dim # not use emb_dim sparse_table.emb_dim = -1 # not use hard code click @@ -184,7 +207,8 @@ class Hogwild(DeviceWorker): dense_table.table_id = i.table_id dense_table.dense_value_name.extend(i.dense_variable_name) dense_table.dense_grad_name.extend( - i.dense_gradient_variable_name) + i.dense_gradient_variable_name + ) hogwild.skip_ops.extend(worker.get_desc().skip_op) else: dense_table_config = opt_info.get("dense_table_config") @@ -196,7 +220,8 @@ class Hogwild(DeviceWorker): if self._infer: hogwild.skip_ops.extend( - ["push_sparse", "push_sparse_v2", "push_dense"]) + ["push_sparse", "push_sparse_v2", "push_dense"] + ) class DownpourLite(DeviceWorker): @@ -220,10 +245,16 @@ class DownpourLite(DeviceWorker): trainer_desc.device_worker_name = "DownpourLiteWorker" if self._infer: # just ignore feed op for inference model - trainer_desc.downpour_param.skip_ops.extend([ - "feed", "push_sparse", "push_sparse_v2", "push_dense", - "distributed_push_sparse", "send" - ]) + trainer_desc.downpour_param.skip_ops.extend( + [ + "feed", + "push_sparse", + "push_sparse_v2", + "push_dense", + "distributed_push_sparse", + "send", + ] + ) dense_table_set = set() program_id = str(id(self._program)) @@ -242,8 +273,11 @@ class DownpourLite(DeviceWorker): from paddle.fluid.incubate.fleet.parameter_server import version - if version.is_transpiler( - ) and "fleet_desc" not in opt_info and "program_configs" not in opt_info: + if ( + version.is_transpiler() + and "fleet_desc" not in opt_info + and "program_configs" not in opt_info + ): return program_configs = opt_info["program_configs"] @@ -254,8 +288,10 @@ class DownpourLite(DeviceWorker): if pid == program_id: pc = downpour.program_config.add() pc.program_id = program_id - print("device worker pull dense:", - program_configs[program_id]["pull_dense"]) + print( + "device worker pull dense:", + program_configs[program_id]["pull_dense"], + ) for i in program_configs[program_id]["push_sparse"]: pc.push_sparse_table_id.extend([i]) for i in program_configs[program_id]["push_dense"]: @@ -270,36 +306,43 @@ class DownpourLite(DeviceWorker): pull_thread = trainer_desc.pull_dense_param pull_thread.device_num = trainer_desc.thread_num - if opt_info.get("program_id_to_worker") is None and opt_info.get( - "dense_table_config") is None: + if ( + opt_info.get("program_id_to_worker") is None + and opt_info.get("dense_table_config") is None + ): raise ValueError( - "opt_info must have program_id_to_worker or dense_table_config") + "opt_info must have program_id_to_worker or dense_table_config" + ) if opt_info.get("program_id_to_worker") is not None: prog_id_to_worker = opt_info["program_id_to_worker"] if prog_id_to_worker.get(program_id) is None: - raise ValueError("%s not found in program_id_to_worker" % - program_id) + raise ValueError( + "%s not found in program_id_to_worker" % program_id + ) worker = opt_info["program_id_to_worker"][program_id] for i in worker.get_desc().dense_table: if i.table_id in dense_table_set: dense_table = pull_thread.dense_table.add() dense_table.dense_value_name.extend(i.dense_variable_name) - dense_table.table_id = \ - i.table_id + dense_table.table_id = i.table_id sparse_len = len(worker.get_desc().sparse_table) for i in range(sparse_len): sparse_table = downpour.sparse_table.add() - sparse_table.table_id = worker.get_desc( - ).sparse_table[i].table_id + sparse_table.table_id = ( + worker.get_desc().sparse_table[i].table_id + ) sparse_table.sparse_key_name.extend( - worker.get_desc().sparse_table[i].slot_key) + worker.get_desc().sparse_table[i].slot_key + ) sparse_table.sparse_value_name.extend( - worker.get_desc().sparse_table[i].slot_value) + worker.get_desc().sparse_table[i].slot_value + ) sparse_table.sparse_grad_name.extend( - worker.get_desc().sparse_table[i].slot_gradient) - sparse_table.fea_dim = \ - self._fleet_desc.server_param.downpour_server_param.downpour_table_param[ - i].accessor.fea_dim + worker.get_desc().sparse_table[i].slot_gradient + ) + sparse_table.fea_dim = self._fleet_desc.server_param.downpour_server_param.downpour_table_param[ + i + ].accessor.fea_dim # not use emb_dim sparse_table.emb_dim = -1 # not use hard code click @@ -311,7 +354,8 @@ class DownpourLite(DeviceWorker): dense_table.table_id = i.table_id dense_table.dense_value_name.extend(i.dense_variable_name) dense_table.dense_grad_name.extend( - i.dense_gradient_variable_name) + i.dense_gradient_variable_name + ) downpour.skip_ops.extend(worker.get_desc().skip_op) else: dense_table_config = opt_info.get("dense_table_config") @@ -323,7 +367,8 @@ class DownpourLite(DeviceWorker): if self._infer: downpour.skip_ops.extend( - ["push_sparse", "push_sparse_v2", "push_dense"]) + ["push_sparse", "push_sparse_v2", "push_dense"] + ) class DownpourSGD(DeviceWorker): @@ -377,43 +422,53 @@ class DownpourSGD(DeviceWorker): mc_map.value = value break - trainer_desc.device_worker_name = opt_info.get("worker_class", - "DownpourWorker") + trainer_desc.device_worker_name = opt_info.get( + "worker_class", "DownpourWorker" + ) pull_thread = trainer_desc.pull_dense_param pull_thread.device_num = trainer_desc.thread_num if opt_info.get("program_id_to_worker") is None: raise ValueError("opt_info must have program_id_to_worker") prog_id_to_worker = opt_info["program_id_to_worker"] if prog_id_to_worker.get(program_id) is None: - raise ValueError("%s not found in program_id_to_worker" % - program_id) + raise ValueError( + "%s not found in program_id_to_worker" % program_id + ) worker = opt_info["program_id_to_worker"][program_id] for i in worker.get_desc().dense_table: if i.table_id in dense_table_set: dense_table = pull_thread.dense_table.add() dense_table.dense_value_name.extend(i.dense_variable_name) - dense_table.table_id = \ - i.table_id + dense_table.table_id = i.table_id sparse_len = len(worker.get_desc().sparse_table) for i in range(sparse_len): sparse_table = downpour.sparse_table.add() sparse_table.table_id = worker.get_desc().sparse_table[i].table_id sparse_table.sparse_key_name.extend( - worker.get_desc().sparse_table[i].slot_key) + worker.get_desc().sparse_table[i].slot_key + ) sparse_table.sparse_value_name.extend( - worker.get_desc().sparse_table[i].slot_value) + worker.get_desc().sparse_table[i].slot_value + ) sparse_table.sparse_grad_name.extend( - worker.get_desc().sparse_table[i].slot_gradient) - if opt_info["use_cvm"] or "no_cvm" in opt_info and opt_info[ - "no_cvm"] == True: - sparse_table.emb_dim = \ - self._fleet_desc.server_param.downpour_server_param.downpour_table_param[ - i].accessor.fea_dim + worker.get_desc().sparse_table[i].slot_gradient + ) + if ( + opt_info["use_cvm"] + or "no_cvm" in opt_info + and opt_info["no_cvm"] == True + ): + sparse_table.emb_dim = self._fleet_desc.server_param.downpour_server_param.downpour_table_param[ + i + ].accessor.fea_dim sparse_table.fea_dim = sparse_table.emb_dim else: - sparse_table.emb_dim = \ + sparse_table.emb_dim = ( self._fleet_desc.server_param.downpour_server_param.downpour_table_param[ - i].accessor.fea_dim - 2 + i + ].accessor.fea_dim + - 2 + ) sparse_table.fea_dim = sparse_table.emb_dim + 2 # TODO(guru4elephant): hard code here, need to improve sparse_table.label_var_name = "click" @@ -427,7 +482,8 @@ class DownpourSGD(DeviceWorker): dense_table.table_id = i.table_id dense_table.dense_value_name.extend(i.dense_variable_name) dense_table.dense_grad_name.extend( - i.dense_gradient_variable_name) + i.dense_gradient_variable_name + ) downpour.skip_ops.extend(worker.get_desc().skip_op) if self._infer: downpour.push_dense = False @@ -485,43 +541,56 @@ class DownpourSGDOPT(DeviceWorker): raise ValueError("opt_info must have program_id_to_worker") prog_id_to_worker = opt_info["program_id_to_worker"] if prog_id_to_worker.get(program_id) is None: - raise ValueError("%s not found in program_id_to_worker" % - program_id) + raise ValueError( + "%s not found in program_id_to_worker" % program_id + ) worker = opt_info["program_id_to_worker"][program_id] for i in worker.get_desc().dense_table: if i.table_id in dense_table_set: dense_table = pull_thread.dense_table.add() dense_table.dense_value_name.extend(i.dense_variable_name) - dense_table.table_id = \ - i.table_id + dense_table.table_id = i.table_id sparse_len = len(worker.get_desc().sparse_table) for i in range(sparse_len): sparse_table = downpour.sparse_table.add() sparse_table.table_id = worker.get_desc().sparse_table[i].table_id sparse_table.sparse_key_name.extend( - worker.get_desc().sparse_table[i].slot_key) + worker.get_desc().sparse_table[i].slot_key + ) sparse_table.sparse_value_name.extend( - worker.get_desc().sparse_table[i].slot_value) + worker.get_desc().sparse_table[i].slot_value + ) sparse_table.sparse_grad_name.extend( - worker.get_desc().sparse_table[i].slot_gradient) - if opt_info["use_cvm"] or "no_cvm" in opt_info and opt_info[ - "no_cvm"] == True: - sparse_table.emb_dim = \ - self._fleet_desc.server_param.downpour_server_param.downpour_table_param[ - i].accessor.fea_dim + worker.get_desc().sparse_table[i].slot_gradient + ) + if ( + opt_info["use_cvm"] + or "no_cvm" in opt_info + and opt_info["no_cvm"] == True + ): + sparse_table.emb_dim = self._fleet_desc.server_param.downpour_server_param.downpour_table_param[ + i + ].accessor.fea_dim sparse_table.fea_dim = sparse_table.emb_dim else: - sparse_table.emb_dim = \ + sparse_table.emb_dim = ( self._fleet_desc.server_param.downpour_server_param.downpour_table_param[ - i].accessor.fea_dim - 2 + i + ].accessor.fea_dim + - 2 + ) sparse_table.fea_dim = sparse_table.emb_dim + 2 # TODO(guru4elephant): hard code here, need to improve sparse_table.label_var_name = "click" - if "local_tables" in opt_info and sparse_table.table_id in opt_info[ - "local_tables"]: + if ( + "local_tables" in opt_info + and sparse_table.table_id in opt_info["local_tables"] + ): sparse_table.is_local = True - if "async_tables" in opt_info and sparse_table.table_id in opt_info[ - "async_tables"]: + if ( + "async_tables" in opt_info + and sparse_table.table_id in opt_info["async_tables"] + ): sparse_table.is_async = True if opt_info["stat_var_names"]: for i in opt_info["stat_var_names"]: @@ -533,7 +602,8 @@ class DownpourSGDOPT(DeviceWorker): dense_table.table_id = i.table_id dense_table.dense_value_name.extend(i.dense_variable_name) dense_table.dense_grad_name.extend( - i.dense_gradient_variable_name) + i.dense_gradient_variable_name + ) downpour.skip_ops.extend(worker.get_desc().skip_op) if self._infer: downpour.push_dense = False @@ -555,6 +625,7 @@ class Section(DeviceWorker): """ from google.protobuf import text_format from . import core + trainer_desc.device_worker_name = "SectionWorker" pipeline_opt = self._program._pipeline_opt section_param = trainer_desc.section_param @@ -567,16 +638,16 @@ class Section(DeviceWorker): # then runs Backward phase for all microbatches. # 1F1B scheduler, which runs forward phase and backward phase altertively # after startup phase. - assert schedule_mode_str in [ - "F-then-B", "1F1B" - ], ("The schedule mode " - "for pipeline must be one of F-then-B or 1F1B") + assert schedule_mode_str in ["F-then-B", "1F1B"], ( + "The schedule mode " "for pipeline must be one of F-then-B or 1F1B" + ) schedule_mode = 0 if schedule_mode_str == "F-then-B" else 1 section_param.schedule_mode = schedule_mode cfg = section_param.section_config program = pipeline_opt["section_program"] cfg.program_desc.ParseFromString( - program._get_desc().serialize_to_string()) + program._get_desc().serialize_to_string() + ) # TODO: why does not work # cfg.program_desc.CopyFrom(program.program._get_desc()) place = pipeline_opt["place"] @@ -604,23 +675,27 @@ class HeterSection(DeviceWorker): """ from google.protobuf import text_format from . import core + trainer_desc.device_worker_name = "HeterSectionWorker" heter_pipeline_opt = self._program._heter_pipeline_opt heter_section_param = trainer_desc.heter_section_param heter_section_param.num_microbatches = heter_pipeline_opt[ - "num_microbatches"] + "num_microbatches" + ] heter_section_param.pipeline_stage = heter_pipeline_opt[ - "pipeline_stage"] + "pipeline_stage" + ] heter_section_param.num_pipeline_stages = heter_pipeline_opt[ - "num_pipeline_stages"] + "num_pipeline_stages" + ] cfg = heter_section_param.section_config program = heter_pipeline_opt["section_program"] cfg.program_desc.ParseFromString( - program._get_desc().serialize_to_string()) + program._get_desc().serialize_to_string() + ) class DeviceWorkerFactory(object): - def _create_device_worker(self, worker_type): classname = worker_type.capitalize() return globals()[classname]() diff --git a/python/paddle/fluid/distribute_lookup_table.py b/python/paddle/fluid/distribute_lookup_table.py index 74824f6832442d5090e0cea2962ca2f68b7a0181..cff2388bfdbe02886e27ea491407d44ac509c0b9 100644 --- a/python/paddle/fluid/distribute_lookup_table.py +++ b/python/paddle/fluid/distribute_lookup_table.py @@ -70,8 +70,10 @@ def find_distributed_lookup_table(program): if table_name is None: table_name = op.input("W")[0] if table_name != op.input("W")[0]: - raise RuntimeError("all distributed lookup_table_ops" - " should have only one table") + raise RuntimeError( + "all distributed lookup_table_ops" + " should have only one table" + ) else: if table_name is not None: assert op.input("W")[0] != table_name diff --git a/python/paddle/fluid/distributed/downpour.py b/python/paddle/fluid/distributed/downpour.py index 69d592d0d5f72f7f96d1db872c94ef8a01917345..c710b7337ccb0a51c0d29c5da85ff851a0f71812 100644 --- a/python/paddle/fluid/distributed/downpour.py +++ b/python/paddle/fluid/distributed/downpour.py @@ -16,8 +16,12 @@ from .node import DownpourWorker from ..backward import append_backward import ps_pb2 as pslib from paddle.fluid.distribute_lookup_table import find_distributed_lookup_table -from paddle.fluid.distribute_lookup_table import find_distributed_lookup_table_inputs -from paddle.fluid.distribute_lookup_table import find_distributed_lookup_table_outputs +from paddle.fluid.distribute_lookup_table import ( + find_distributed_lookup_table_inputs, +) +from paddle.fluid.distribute_lookup_table import ( + find_distributed_lookup_table_outputs, +) from google.protobuf import text_format @@ -47,15 +51,21 @@ class DownpourSGD(object): self.window_ = window self.type = "downpour" self.data_norm_name = [ - ".batch_size", ".batch_square_sum", ".batch_sum", - ".batch_size@GRAD", ".batch_square_sum@GRAD", ".batch_sum@GRAD" + ".batch_size", + ".batch_square_sum", + ".batch_sum", + ".batch_size@GRAD", + ".batch_square_sum@GRAD", + ".batch_sum@GRAD", ] - def minimize(self, - losses, - startup_program=None, - parameter_list=None, - no_grad_set=None): + def minimize( + self, + losses, + startup_program=None, + parameter_list=None, + no_grad_set=None, + ): """ DownpounSGD is a distributed optimizer so that user can call minimize to generate backward @@ -76,30 +86,44 @@ class DownpourSGD(object): raise ValueError('losses is a list, just lick [model.cost]') table_name = find_distributed_lookup_table(losses[0].block.program) prefetch_slots = find_distributed_lookup_table_inputs( - losses[0].block.program, table_name) + losses[0].block.program, table_name + ) prefetch_slots_emb = find_distributed_lookup_table_outputs( - losses[0].block.program, table_name) + losses[0].block.program, table_name + ) ps_param = pslib.PSParameter() server = DownpourServer() worker = DownpourWorker(self.window_) sparse_table_index = 0 - server.add_sparse_table(sparse_table_index, self.learning_rate_, - prefetch_slots, prefetch_slots_emb) - worker.add_sparse_table(sparse_table_index, self.learning_rate_, - prefetch_slots, prefetch_slots_emb) + server.add_sparse_table( + sparse_table_index, + self.learning_rate_, + prefetch_slots, + prefetch_slots_emb, + ) + worker.add_sparse_table( + sparse_table_index, + self.learning_rate_, + prefetch_slots, + prefetch_slots_emb, + ) dense_table_index = 1 program_configs = [] param_grads_list = [] for loss_index in range(len(losses)): program_config = ps_param.trainer_param.program_config.add() - program_config.program_id = str(id( - losses[loss_index].block.program)) + program_config.program_id = str( + id(losses[loss_index].block.program) + ) program_config.pull_sparse_table_id.extend([sparse_table_index]) program_config.push_sparse_table_id.extend([sparse_table_index]) - params_grads = sorted(append_backward(losses[loss_index], - parameter_list, no_grad_set), - key=lambda x: x[0].name) + params_grads = sorted( + append_backward( + losses[loss_index], parameter_list, no_grad_set + ), + key=lambda x: x[0].name, + ) param_grads_list.append(params_grads) params = [] grads = [] @@ -121,19 +145,28 @@ class DownpourSGD(object): data_norm_grads.append(i[1]) if not is_data_norm_data: grads.append(i[1]) - server.add_dense_table(dense_table_index, self.learning_rate_, - params, grads) - worker.add_dense_table(dense_table_index, self.learning_rate_, - params, grads) + server.add_dense_table( + dense_table_index, self.learning_rate_, params, grads + ) + worker.add_dense_table( + dense_table_index, self.learning_rate_, params, grads + ) program_config.pull_dense_table_id.extend([dense_table_index]) program_config.push_dense_table_id.extend([dense_table_index]) if len(data_norm_params) != 0 and len(data_norm_grads) != 0: dense_table_index += 1 - server.add_data_norm_table(dense_table_index, - self.learning_rate_, - data_norm_params, data_norm_grads) - worker.add_dense_table(dense_table_index, self.learning_rate_, - data_norm_params, data_norm_grads) + server.add_data_norm_table( + dense_table_index, + self.learning_rate_, + data_norm_params, + data_norm_grads, + ) + worker.add_dense_table( + dense_table_index, + self.learning_rate_, + data_norm_params, + data_norm_grads, + ) program_config.pull_dense_table_id.extend([dense_table_index]) program_config.push_dense_table_id.extend([dense_table_index]) dense_table_index += 1 diff --git a/python/paddle/fluid/distributed/fleet.py b/python/paddle/fluid/distributed/fleet.py index 9874492a41323889ab8ecd880f3e3134362cca1c..08b46ce04733ab3c82e3d2bc33ec99656fb95c0f 100644 --- a/python/paddle/fluid/distributed/fleet.py +++ b/python/paddle/fluid/distributed/fleet.py @@ -19,9 +19,7 @@ __all__ = ['Fleet'] class Fleet(object): - """ - - """ + """ """ def __init__(self): self.instance_ = ps_instance.PaddlePSInstance() @@ -38,11 +36,13 @@ class Fleet(object): def init_pserver(self, opt_info): if "fleet_desc" in opt_info: self.dist_desc_str_ = text_format.MessageToString( - opt_info["fleet_desc"]) + opt_info["fleet_desc"] + ) self.dist_desc_ = opt_info["fleet_desc"] else: print( - "You should run distributed optimization to get opt_info first") + "You should run distributed optimization to get opt_info first" + ) sys.exit(-1) self.fleet_.init_server(self.dist_desc_str_) ip = self.fleet_.start_server() @@ -55,17 +55,22 @@ class Fleet(object): def init_worker(self, opt_info): if "fleet_desc" in opt_info: self.dist_desc_str_ = text_format.MessageToString( - opt_info["fleet_desc"]) + opt_info["fleet_desc"] + ) self.dist_desc_ = opt_info["fleet_desc"] else: print( - "You should run distributed optimization to get opt_info first") + "You should run distributed optimization to get opt_info first" + ) sys.exit(-1) self.instance_.barrier_all() ips = self.instance.gather_ips() - self.fleet_.init_worker(self.dist_desc_str_, ips, - self.instance_.get_node_cnt(), - self.instance._rankid) + self.fleet_.init_worker( + self.dist_desc_str_, + ips, + self.instance_.get_node_cnt(), + self.instance._rankid, + ) self.instance.barrier_worker() def init_pserver_model(self): diff --git a/python/paddle/fluid/distributed/helper.py b/python/paddle/fluid/distributed/helper.py index 08e6fca6165a2d769638bc05acd2b77b21393cde..9511ce2db629f85e29f6591cc1fb03c9c8973d3b 100644 --- a/python/paddle/fluid/distributed/helper.py +++ b/python/paddle/fluid/distributed/helper.py @@ -26,23 +26,26 @@ class FileSystem(object): fs = FileSystm() """ - def __init__(self, - fs_type="afs", - uri="afs://xx", - user=None, - passwd=None, - hadoop_bin=""): + def __init__( + self, + fs_type="afs", + uri="afs://xx", + user=None, + passwd=None, + hadoop_bin="", + ): assert user != None assert passwd != None assert hadoop_bin != None import ps_pb2 as pslib + self.fs_client = pslib.FsClientParameter() self.fs_client.uri = uri self.fs_client.user = user self.fs_client.passwd = passwd - #self.fs_client.buffer_size = 0 + # self.fs_client.buffer_size = 0 self.fs_client.hadoop_bin = hadoop_bin - #self.fs_client.afs_conf = afs_conf if not afs_conf else "" + # self.fs_client.afs_conf = afs_conf if not afs_conf else "" def get_desc(self): """ @@ -63,6 +66,7 @@ class MPIHelper(object): def __init__(self): from mpi4py import MPI + self.comm = MPI.COMM_WORLD self.MPI = MPI @@ -74,11 +78,13 @@ class MPIHelper(object): def get_ip(self): import socket + local_ip = socket.gethostbyname(socket.gethostname()) return local_ip def get_hostname(self): import socket + return socket.gethostname() def finalize(self): diff --git a/python/paddle/fluid/distributed/node.py b/python/paddle/fluid/distributed/node.py index 90ac44ada145930d322487ed3e68a497d5db6f5f..793787d0fd7b681bbe88956cbffc07fdfb22f6bf 100644 --- a/python/paddle/fluid/distributed/node.py +++ b/python/paddle/fluid/distributed/node.py @@ -12,13 +12,14 @@ # See the License for the specific language governing permissions and import ps_pb2 as pslib + # NOTE: reduce removed in fuctools in python3 from functools import reduce class Server(object): """ - A Server basic class. + A Server basic class. """ def __init__(self): @@ -27,7 +28,7 @@ class Server(object): class Worker(object): """ - A Worker basic class. + A Worker basic class. """ def __init__(self): @@ -36,23 +37,30 @@ class Worker(object): class DownpourServer(Server): """ - DownpourServer class is used to generate server program_desc - Args: - server: it is pslib.ServerParameter() - Examples: - server = DownpourServer() + DownpourServer class is used to generate server program_desc + Args: + server: it is pslib.ServerParameter() + Examples: + server = DownpourServer() """ def __init__(self): self.server_ = pslib.ServerParameter() self.server_.downpour_server_param.service_param.start_server_port = 0 - self.server_.downpour_server_param.service_param.server_class = "DownpourBrpcPsServer" - self.server_.downpour_server_param.service_param.client_class = "DownpourBrpcPsClient" - self.server_.downpour_server_param.service_param.service_class = "DownpourPsService" + self.server_.downpour_server_param.service_param.server_class = ( + "DownpourBrpcPsServer" + ) + self.server_.downpour_server_param.service_param.client_class = ( + "DownpourBrpcPsClient" + ) + self.server_.downpour_server_param.service_param.service_class = ( + "DownpourPsService" + ) self.server_.downpour_server_param.service_param.server_thread_num = 12 - def add_sparse_table(self, table_id, learning_rate, slot_key_vars, - slot_value_var): + def add_sparse_table( + self, table_id, learning_rate, slot_key_vars, slot_value_var + ): r""" Args: table_id(int): id of sparse params table @@ -108,8 +116,9 @@ class DownpourServer(Server): table.accessor.dense_sgd_param.adam.mom_decay_rate = 0.99 table.accessor.dense_sgd_param.naive.learning_rate = 0.0002 fea_dim = 0 - for param in filter(lambda x: x.name.find("embedding") == -1, - param_var): + for param in filter( + lambda x: x.name.find("embedding") == -1, param_var + ): fea_dim += reduce(lambda x, y: x * y, param.shape, 1) table.accessor.fea_dim = fea_dim @@ -122,20 +131,21 @@ class DownpourServer(Server): class DownpourWorker(Worker): """ - DownpourWorker class is used to generate worker program_desc - Args: - window (int): push params frequency - worker: it is pslib.DownpourTrainerParameter - Examples: - worker = DownpourWorker(1) + DownpourWorker class is used to generate worker program_desc + Args: + window (int): push params frequency + worker: it is pslib.DownpourTrainerParameter + Examples: + worker = DownpourWorker(1) """ def __init__(self, window): self.window = window self.worker_ = pslib.DownpourTrainerParameter() - def add_sparse_table(self, table_id, learning_rate, slot_key_vars, - slot_value_vars): + def add_sparse_table( + self, table_id, learning_rate, slot_key_vars, slot_value_vars + ): r""" Args: table_id(int): id of sparse params table @@ -151,7 +161,8 @@ class DownpourWorker(Worker): table.slot_key.extend([var.name for var in slot_key_vars]) table.slot_value.extend([var.name for var in slot_value_vars]) table.slot_gradient.extend( - [var.name + "@GRAD" for var in slot_value_vars]) + [var.name + "@GRAD" for var in slot_value_vars] + ) def add_dense_table(self, table_id, learning_rate, param_vars, grad_vars): r""" @@ -167,11 +178,16 @@ class DownpourWorker(Worker): table = self.worker_.dense_table.add() table.table_id = table_id table.dense_variable_name.extend( - filter(lambda x: x.find("embedding") == -1, - [p.name for p in param_vars])) + filter( + lambda x: x.find("embedding") == -1, + [p.name for p in param_vars], + ) + ) table.dense_gradient_variable_name.extend( - filter(lambda x: x.find("embedding") == -1, - [g.name for g in grad_vars])) + filter( + lambda x: x.find("embedding") == -1, [g.name for g in grad_vars] + ) + ) def get_desc(self): """ diff --git a/python/paddle/fluid/distributed/ps_instance.py b/python/paddle/fluid/distributed/ps_instance.py index 6b19d7ca62e40d8f1a5c2c866ac53fde94d1572a..370e1b19647245f56b4b1177781eebace060f263 100644 --- a/python/paddle/fluid/distributed/ps_instance.py +++ b/python/paddle/fluid/distributed/ps_instance.py @@ -16,12 +16,12 @@ from .helper import MPIHelper class PaddlePSInstance(object): """ - PaddlePSInstance class is used to generate A instance of server or worker - Args: - server_worker_mode: is a value 0 or 1, default is 1 - proc_per_node: process per node, default is 2 - Examples: - instance = PaddlePSInstance(1, 2) + PaddlePSInstance class is used to generate A instance of server or worker + Args: + server_worker_mode: is a value 0 or 1, default is 1 + proc_per_node: process per node, default is 2 + Examples: + instance = PaddlePSInstance(1, 2) """ def __init__(self, server_worker_mode=1, proc_per_node=2): @@ -35,7 +35,7 @@ class PaddlePSInstance(object): self._worker_num = self._nodes * self._proc_per_node / 2 self._server_num = self._nodes * self._proc_per_node / 2 self._total_server_worker = self._worker_num + self._server_num - self._node_type = None #IDLE=-1, WORKER=1, SERVER=0 + self._node_type = None # IDLE=-1, WORKER=1, SERVER=0 self._set_nodetype() self._comm = None self._split_comm() @@ -104,7 +104,7 @@ class PaddlePSInstance(object): def set_ip(self, ip): """ - set server ip + set server ip """ self._ip = ip diff --git a/python/paddle/fluid/distributed/ps_pb2.py b/python/paddle/fluid/distributed/ps_pb2.py index f1262ebae12ff4f447857554e183d4d5ba02a001..8a131a2548c447b9a9be917110efcb11ca4ac736 100644 --- a/python/paddle/fluid/distributed/ps_pb2.py +++ b/python/paddle/fluid/distributed/ps_pb2.py @@ -24,6 +24,7 @@ from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() @@ -34,7 +35,8 @@ DESCRIPTOR = _descriptor.FileDescriptor( syntax='proto2', serialized_pb=_b( '\n\x08ps.proto\x12\x06paddle\"\x9e\x02\n\x0bPSParameter\x12\x14\n\x0cworker_class\x18\x01 \x01(\t\x12\x14\n\x0cserver_class\x18\x02 \x01(\t\x12\x16\n\x0einstance_class\x18\x03 \x01(\t\x12-\n\x0cworker_param\x18\x65 \x01(\x0b\x32\x17.paddle.WorkerParameter\x12-\n\x0cserver_param\x18\x66 \x01(\x0b\x32\x17.paddle.ServerParameter\x12\x38\n\rtrainer_param\x18\xad\x02 \x01(\x0b\x32 .paddle.DownpourTrainerParameter\x12\x33\n\x0f\x66s_client_param\x18\xf5\x03 \x01(\x0b\x32\x19.paddle.FsClientParameter\"Q\n\x0fWorkerParameter\x12>\n\x15\x64ownpour_worker_param\x18\x01 \x01(\x0b\x32\x1f.paddle.DownpourWorkerParameter\"Q\n\x0fServerParameter\x12>\n\x15\x64ownpour_server_param\x18\x01 \x01(\x0b\x32\x1f.paddle.DownpourServerParameter\"O\n\x17\x44ownpourWorkerParameter\x12\x34\n\x14\x64ownpour_table_param\x18\x01 \x03(\x0b\x32\x16.paddle.TableParameter\"\xfd\x01\n\x18\x44ownpourTrainerParameter\x12\x30\n\x0b\x64\x65nse_table\x18\x01 \x03(\x0b\x32\x1b.paddle.DenseTableParameter\x12\x32\n\x0csparse_table\x18\x02 \x03(\x0b\x32\x1c.paddle.SparseTableParameter\x12\x1d\n\x15push_sparse_per_batch\x18\x03 \x01(\x05\x12\x1c\n\x14push_dense_per_batch\x18\x04 \x01(\x05\x12\x0f\n\x07skip_op\x18\x05 \x03(\t\x12-\n\x0eprogram_config\x18\x06 \x03(\x0b\x32\x15.paddle.ProgramConfig\"\x99\x01\n\rProgramConfig\x12\x12\n\nprogram_id\x18\x01 \x02(\t\x12\x1c\n\x14push_sparse_table_id\x18\x02 \x03(\x05\x12\x1b\n\x13push_dense_table_id\x18\x03 \x03(\x05\x12\x1c\n\x14pull_sparse_table_id\x18\x04 \x03(\x05\x12\x1b\n\x13pull_dense_table_id\x18\x05 \x03(\x05\"{\n\x13\x44\x65nseTableParameter\x12\x10\n\x08table_id\x18\x01 \x01(\x05\x12\x1b\n\x13\x64\x65nse_variable_name\x18\x02 \x03(\t\x12$\n\x1c\x64\x65nse_gradient_variable_name\x18\x03 \x03(\t\x12\x0f\n\x07\x66\x65\x61_dim\x18\x04 \x01(\x05\"z\n\x14SparseTableParameter\x12\x10\n\x08table_id\x18\x01 \x01(\x05\x12\x13\n\x0b\x66\x65\x61ture_dim\x18\x02 \x01(\x05\x12\x10\n\x08slot_key\x18\x03 \x03(\t\x12\x12\n\nslot_value\x18\x04 \x03(\t\x12\x15\n\rslot_gradient\x18\x05 \x03(\t\"\x86\x01\n\x17\x44ownpourServerParameter\x12\x34\n\x14\x64ownpour_table_param\x18\x01 \x03(\x0b\x32\x16.paddle.TableParameter\x12\x35\n\rservice_param\x18\x02 \x01(\x0b\x32\x1e.paddle.ServerServiceParameter\"\xd7\x01\n\x16ServerServiceParameter\x12*\n\x0cserver_class\x18\x01 \x01(\t:\x14\x44ownpourBrpcPsServer\x12*\n\x0c\x63lient_class\x18\x02 \x01(\t:\x14\x44ownpourBrpcPsClient\x12(\n\rservice_class\x18\x03 \x01(\t:\x11\x44ownpourPsService\x12\x1c\n\x11start_server_port\x18\x04 \x01(\r:\x01\x30\x12\x1d\n\x11server_thread_num\x18\x05 \x01(\r:\x02\x31\x32\"\xbf\x01\n\x0eTableParameter\x12\x10\n\x08table_id\x18\x01 \x01(\x04\x12\x13\n\x0btable_class\x18\x02 \x01(\t\x12\x12\n\nshared_num\x18\x03 \x01(\x04\x12\x30\n\x08\x61\x63\x63\x65ssor\x18\x04 \x01(\x0b\x32\x1e.paddle.TableAccessorParameter\x12\x1f\n\x04type\x18\x05 \x01(\x0e\x32\x11.paddle.TableType\x12\x1f\n\x10\x63ompress_in_save\x18\x06 \x01(\x08:\x05\x66\x61lse\"\xf1\x02\n\x16TableAccessorParameter\x12\x16\n\x0e\x61\x63\x63\x65ssor_class\x18\x01 \x01(\t\x12\x38\n\x10sparse_sgd_param\x18\x02 \x01(\x0b\x32\x1e.paddle.SparseSGDRuleParameter\x12\x36\n\x0f\x64\x65nse_sgd_param\x18\x03 \x01(\x0b\x32\x1d.paddle.DenseSGDRuleParameter\x12\x0f\n\x07\x66\x65\x61_dim\x18\x04 \x01(\r\x12\x12\n\nembedx_dim\x18\x05 \x01(\r\x12\x18\n\x10\x65mbedx_threshold\x18\x06 \x01(\r\x12G\n\x17\x64ownpour_accessor_param\x18\x07 \x01(\x0b\x32&.paddle.DownpourTableAccessorParameter\x12\x45\n\x19table_accessor_save_param\x18\x08 \x03(\x0b\x32\".paddle.TableAccessorSaveParameter\"\xce\x01\n\x1e\x44ownpourTableAccessorParameter\x12\x14\n\x0cnonclk_coeff\x18\x01 \x01(\x02\x12\x13\n\x0b\x63lick_coeff\x18\x02 \x01(\x02\x12\x16\n\x0e\x62\x61se_threshold\x18\x03 \x01(\x02\x12\x17\n\x0f\x64\x65lta_threshold\x18\x04 \x01(\x02\x12\x17\n\x0f\x64\x65lta_keep_days\x18\x05 \x01(\x02\x12\x1d\n\x15show_click_decay_rate\x18\x06 \x01(\x02\x12\x18\n\x10\x64\x65lete_threshold\x18\x07 \x01(\x02\"S\n\x1aTableAccessorSaveParameter\x12\r\n\x05param\x18\x01 \x01(\r\x12\x11\n\tconverter\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65\x63onverter\x18\x03 \x01(\t\"e\n\x10PsRequestMessage\x12\x0e\n\x06\x63md_id\x18\x01 \x02(\r\x12\x10\n\x08table_id\x18\x02 \x01(\r\x12\x0e\n\x06params\x18\x03 \x03(\x0c\x12\x11\n\tclient_id\x18\x04 \x01(\x05\x12\x0c\n\x04\x64\x61ta\x18\x05 \x01(\x0c\"w\n\x16SparseSGDRuleParameter\x12\x15\n\rlearning_rate\x18\x01 \x01(\x01\x12\x15\n\rinitial_g2sum\x18\x02 \x01(\x01\x12\x18\n\rinitial_range\x18\x03 \x01(\x01:\x01\x30\x12\x15\n\rweight_bounds\x18\x04 \x03(\x02\"\xe1\x01\n\x15\x44\x65nseSGDRuleParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12&\n\x04\x61\x64\x61m\x18\x02 \x01(\x0b\x32\x18.paddle.AdamSGDParameter\x12(\n\x05naive\x18\x03 \x01(\x0b\x32\x19.paddle.NaiveSGDParameter\x12,\n\x07summary\x18\x04 \x01(\x0b\x32\x1b.paddle.SummarySGDParameter\x12:\n\x0emoving_average\x18\x05 \x01(\x0b\x32\".paddle.MovingAverageRuleParameter\"\x86\x01\n\x10\x41\x64\x61mSGDParameter\x12\x15\n\rlearning_rate\x18\x01 \x01(\x01\x12\x16\n\x0e\x61vg_decay_rate\x18\x02 \x01(\x01\x12\x16\n\x0e\x61\x64\x61_decay_rate\x18\x03 \x01(\x01\x12\x13\n\x0b\x61\x64\x61_epsilon\x18\x04 \x01(\x01\x12\x16\n\x0emom_decay_rate\x18\x05 \x01(\x01\"B\n\x11NaiveSGDParameter\x12\x15\n\rlearning_rate\x18\x01 \x01(\x01\x12\x16\n\x0e\x61vg_decay_rate\x18\x02 \x01(\x01\";\n\x13SummarySGDParameter\x12$\n\x12summary_decay_rate\x18\x01 \x01(\x01:\x08\x30.999999\".\n\x1aMovingAverageRuleParameter\x12\x10\n\x08momentum\x18\x01 \x01(\x01\"I\n\x11PsResponseMessage\x12\x13\n\x08\x65rr_code\x18\x01 \x02(\x05:\x01\x30\x12\x11\n\x07\x65rr_msg\x18\x02 \x02(\t:\x00\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\x0c\"\xd5\x01\n\x11\x46sClientParameter\x12:\n\x07\x66s_type\x18\x01 \x01(\x0e\x32#.paddle.FsClientParameter.FsApiType:\x04HDFS\x12\x0b\n\x03uri\x18\x02 \x01(\t\x12\x0c\n\x04user\x18\x03 \x01(\t\x12\x0e\n\x06passwd\x18\x04 \x01(\t\x12\x13\n\x0b\x62uffer_size\x18\x05 \x01(\x05\x12\x12\n\nhadoop_bin\x18\x33 \x01(\t\x12\x10\n\x08\x61\x66s_conf\x18\x65 \x01(\t\"\x1e\n\tFsApiType\x12\x08\n\x04HDFS\x10\x00\x12\x07\n\x03\x41\x46S\x10\x01*4\n\tTableType\x12\x13\n\x0fPS_SPARSE_TABLE\x10\x00\x12\x12\n\x0ePS_DENSE_TABLE\x10\x01*\xbd\x02\n\x07PsCmdID\x12\x17\n\x13PS_PULL_DENSE_TABLE\x10\x00\x12\x17\n\x13PS_PUSH_DENSE_TABLE\x10\x01\x12\x18\n\x14PS_PULL_SPARSE_TABLE\x10\x02\x12\x18\n\x14PS_PUSH_SPARSE_TABLE\x10\x03\x12\x13\n\x0fPS_SHRINK_TABLE\x10\x04\x12\x15\n\x11PS_SAVE_ONE_TABLE\x10\x05\x12\x15\n\x11PS_SAVE_ALL_TABLE\x10\x06\x12\x15\n\x11PS_LOAD_ONE_TABLE\x10\x07\x12\x15\n\x11PS_LOAD_ALL_TABLE\x10\x08\x12\x16\n\x12PS_CLEAR_ONE_TABLE\x10\t\x12\x16\n\x12PS_CLEAR_ALL_TABLE\x10\n\x12\x17\n\x13PS_PUSH_DENSE_PARAM\x10\x0b\x12\x12\n\x0ePS_STOP_SERVER\x10\x0c\x32K\n\tPsService\x12>\n\x07service\x12\x18.paddle.PsRequestMessage\x1a\x19.paddle.PsResponseMessageB\x03\x80\x01\x01' - )) + ), +) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _TABLETYPE = _descriptor.EnumDescriptor( @@ -43,16 +45,12 @@ _TABLETYPE = _descriptor.EnumDescriptor( filename=None, file=DESCRIPTOR, values=[ - _descriptor.EnumValueDescriptor(name='PS_SPARSE_TABLE', - index=0, - number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PS_DENSE_TABLE', - index=1, - number=1, - options=None, - type=None), + _descriptor.EnumValueDescriptor( + name='PS_SPARSE_TABLE', index=0, number=0, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name='PS_DENSE_TABLE', index=1, number=1, options=None, type=None + ), ], containing_type=None, options=None, @@ -68,71 +66,73 @@ _PSCMDID = _descriptor.EnumDescriptor( filename=None, file=DESCRIPTOR, values=[ - _descriptor.EnumValueDescriptor(name='PS_PULL_DENSE_TABLE', - index=0, - number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PS_PUSH_DENSE_TABLE', - index=1, - number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PS_PULL_SPARSE_TABLE', - index=2, - number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PS_PUSH_SPARSE_TABLE', - index=3, - number=3, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PS_SHRINK_TABLE', - index=4, - number=4, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PS_SAVE_ONE_TABLE', - index=5, - number=5, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PS_SAVE_ALL_TABLE', - index=6, - number=6, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PS_LOAD_ONE_TABLE', - index=7, - number=7, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PS_LOAD_ALL_TABLE', - index=8, - number=8, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PS_CLEAR_ONE_TABLE', - index=9, - number=9, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PS_CLEAR_ALL_TABLE', - index=10, - number=10, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PS_PUSH_DENSE_PARAM', - index=11, - number=11, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PS_STOP_SERVER', - index=12, - number=12, - options=None, - type=None), + _descriptor.EnumValueDescriptor( + name='PS_PULL_DENSE_TABLE', + index=0, + number=0, + options=None, + type=None, + ), + _descriptor.EnumValueDescriptor( + name='PS_PUSH_DENSE_TABLE', + index=1, + number=1, + options=None, + type=None, + ), + _descriptor.EnumValueDescriptor( + name='PS_PULL_SPARSE_TABLE', + index=2, + number=2, + options=None, + type=None, + ), + _descriptor.EnumValueDescriptor( + name='PS_PUSH_SPARSE_TABLE', + index=3, + number=3, + options=None, + type=None, + ), + _descriptor.EnumValueDescriptor( + name='PS_SHRINK_TABLE', index=4, number=4, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name='PS_SAVE_ONE_TABLE', index=5, number=5, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name='PS_SAVE_ALL_TABLE', index=6, number=6, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name='PS_LOAD_ONE_TABLE', index=7, number=7, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name='PS_LOAD_ALL_TABLE', index=8, number=8, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name='PS_CLEAR_ONE_TABLE', + index=9, + number=9, + options=None, + type=None, + ), + _descriptor.EnumValueDescriptor( + name='PS_CLEAR_ALL_TABLE', + index=10, + number=10, + options=None, + type=None, + ), + _descriptor.EnumValueDescriptor( + name='PS_PUSH_DENSE_PARAM', + index=11, + number=11, + options=None, + type=None, + ), + _descriptor.EnumValueDescriptor( + name='PS_STOP_SERVER', index=12, number=12, options=None, type=None + ), ], containing_type=None, options=None, @@ -164,16 +164,12 @@ _FSCLIENTPARAMETER_FSAPITYPE = _descriptor.EnumDescriptor( filename=None, file=DESCRIPTOR, values=[ - _descriptor.EnumValueDescriptor(name='HDFS', - index=0, - number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='AFS', - index=1, - number=1, - options=None, - type=None), + _descriptor.EnumValueDescriptor( + name='HDFS', index=0, number=0, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name='AFS', index=1, number=1, options=None, type=None + ), ], containing_type=None, options=None, @@ -189,36 +185,40 @@ _PSPARAMETER = _descriptor.Descriptor( file=DESCRIPTOR, containing_type=None, fields=[ - _descriptor.FieldDescriptor(name='worker_class', - full_name='paddle.PSParameter.worker_class', - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode('utf-8'), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor(name='server_class', - full_name='paddle.PSParameter.server_class', - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode('utf-8'), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), + _descriptor.FieldDescriptor( + name='worker_class', + full_name='paddle.PSParameter.worker_class', + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode('utf-8'), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), + _descriptor.FieldDescriptor( + name='server_class', + full_name='paddle.PSParameter.server_class', + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode('utf-8'), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), _descriptor.FieldDescriptor( name='instance_class', full_name='paddle.PSParameter.instance_class', @@ -234,37 +234,42 @@ _PSPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor(name='worker_param', - full_name='paddle.PSParameter.worker_param', - index=3, - number=101, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor(name='server_param', - full_name='paddle.PSParameter.server_param', - index=4, - number=102, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), + options=None, + ), + _descriptor.FieldDescriptor( + name='worker_param', + full_name='paddle.PSParameter.worker_param', + index=3, + number=101, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), + _descriptor.FieldDescriptor( + name='server_param', + full_name='paddle.PSParameter.server_param', + index=4, + number=102, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), _descriptor.FieldDescriptor( name='trainer_param', full_name='paddle.PSParameter.trainer_param', @@ -280,7 +285,8 @@ _PSPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='fs_client_param', full_name='paddle.PSParameter.fs_client_param', @@ -296,7 +302,8 @@ _PSPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -332,7 +339,8 @@ _WORKERPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -368,7 +376,8 @@ _SERVERPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -404,7 +413,8 @@ _DOWNPOURWORKERPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -440,7 +450,8 @@ _DOWNPOURTRAINERPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='sparse_table', full_name='paddle.DownpourTrainerParameter.sparse_table', @@ -456,7 +467,8 @@ _DOWNPOURTRAINERPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='push_sparse_per_batch', full_name='paddle.DownpourTrainerParameter.push_sparse_per_batch', @@ -472,7 +484,8 @@ _DOWNPOURTRAINERPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='push_dense_per_batch', full_name='paddle.DownpourTrainerParameter.push_dense_per_batch', @@ -488,7 +501,8 @@ _DOWNPOURTRAINERPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='skip_op', full_name='paddle.DownpourTrainerParameter.skip_op', @@ -504,7 +518,8 @@ _DOWNPOURTRAINERPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='program_config', full_name='paddle.DownpourTrainerParameter.program_config', @@ -520,7 +535,8 @@ _DOWNPOURTRAINERPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -541,21 +557,23 @@ _PROGRAMCONFIG = _descriptor.Descriptor( file=DESCRIPTOR, containing_type=None, fields=[ - _descriptor.FieldDescriptor(name='program_id', - full_name='paddle.ProgramConfig.program_id', - index=0, - number=1, - type=9, - cpp_type=9, - label=2, - has_default_value=False, - default_value=_b("").decode('utf-8'), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), + _descriptor.FieldDescriptor( + name='program_id', + full_name='paddle.ProgramConfig.program_id', + index=0, + number=1, + type=9, + cpp_type=9, + label=2, + has_default_value=False, + default_value=_b("").decode('utf-8'), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), _descriptor.FieldDescriptor( name='push_sparse_table_id', full_name='paddle.ProgramConfig.push_sparse_table_id', @@ -571,7 +589,8 @@ _PROGRAMCONFIG = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='push_dense_table_id', full_name='paddle.ProgramConfig.push_dense_table_id', @@ -587,7 +606,8 @@ _PROGRAMCONFIG = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='pull_sparse_table_id', full_name='paddle.ProgramConfig.pull_sparse_table_id', @@ -603,7 +623,8 @@ _PROGRAMCONFIG = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='pull_dense_table_id', full_name='paddle.ProgramConfig.pull_dense_table_id', @@ -619,7 +640,8 @@ _PROGRAMCONFIG = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -655,7 +677,8 @@ _DENSETABLEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='dense_variable_name', full_name='paddle.DenseTableParameter.dense_variable_name', @@ -671,7 +694,8 @@ _DENSETABLEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='dense_gradient_variable_name', full_name='paddle.DenseTableParameter.dense_gradient_variable_name', @@ -687,7 +711,8 @@ _DENSETABLEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='fea_dim', full_name='paddle.DenseTableParameter.fea_dim', @@ -703,7 +728,8 @@ _DENSETABLEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -739,7 +765,8 @@ _SPARSETABLEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='feature_dim', full_name='paddle.SparseTableParameter.feature_dim', @@ -755,7 +782,8 @@ _SPARSETABLEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='slot_key', full_name='paddle.SparseTableParameter.slot_key', @@ -771,7 +799,8 @@ _SPARSETABLEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='slot_value', full_name='paddle.SparseTableParameter.slot_value', @@ -787,7 +816,8 @@ _SPARSETABLEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='slot_gradient', full_name='paddle.SparseTableParameter.slot_gradient', @@ -803,7 +833,8 @@ _SPARSETABLEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -839,7 +870,8 @@ _DOWNPOURSERVERPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='service_param', full_name='paddle.DownpourServerParameter.service_param', @@ -855,7 +887,8 @@ _DOWNPOURSERVERPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -891,7 +924,8 @@ _SERVERSERVICEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='client_class', full_name='paddle.ServerServiceParameter.client_class', @@ -907,7 +941,8 @@ _SERVERSERVICEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='service_class', full_name='paddle.ServerServiceParameter.service_class', @@ -923,7 +958,8 @@ _SERVERSERVICEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='start_server_port', full_name='paddle.ServerServiceParameter.start_server_port', @@ -939,7 +975,8 @@ _SERVERSERVICEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='server_thread_num', full_name='paddle.ServerServiceParameter.server_thread_num', @@ -955,7 +992,8 @@ _SERVERSERVICEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -976,21 +1014,23 @@ _TABLEPARAMETER = _descriptor.Descriptor( file=DESCRIPTOR, containing_type=None, fields=[ - _descriptor.FieldDescriptor(name='table_id', - full_name='paddle.TableParameter.table_id', - index=0, - number=1, - type=4, - cpp_type=4, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), + _descriptor.FieldDescriptor( + name='table_id', + full_name='paddle.TableParameter.table_id', + index=0, + number=1, + type=4, + cpp_type=4, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), _descriptor.FieldDescriptor( name='table_class', full_name='paddle.TableParameter.table_class', @@ -1006,7 +1046,8 @@ _TABLEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='shared_num', full_name='paddle.TableParameter.shared_num', @@ -1022,37 +1063,42 @@ _TABLEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor(name='accessor', - full_name='paddle.TableParameter.accessor', - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor(name='type', - full_name='paddle.TableParameter.type', - index=4, - number=5, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), + options=None, + ), + _descriptor.FieldDescriptor( + name='accessor', + full_name='paddle.TableParameter.accessor', + index=3, + number=4, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), + _descriptor.FieldDescriptor( + name='type', + full_name='paddle.TableParameter.type', + index=4, + number=5, + type=14, + cpp_type=8, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), _descriptor.FieldDescriptor( name='compress_in_save', full_name='paddle.TableParameter.compress_in_save', @@ -1068,7 +1114,8 @@ _TABLEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -1104,7 +1151,8 @@ _TABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='sparse_sgd_param', full_name='paddle.TableAccessorParameter.sparse_sgd_param', @@ -1120,7 +1168,8 @@ _TABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='dense_sgd_param', full_name='paddle.TableAccessorParameter.dense_sgd_param', @@ -1136,7 +1185,8 @@ _TABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='fea_dim', full_name='paddle.TableAccessorParameter.fea_dim', @@ -1152,7 +1202,8 @@ _TABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='embedx_dim', full_name='paddle.TableAccessorParameter.embedx_dim', @@ -1168,7 +1219,8 @@ _TABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='embedx_threshold', full_name='paddle.TableAccessorParameter.embedx_threshold', @@ -1184,7 +1236,8 @@ _TABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='downpour_accessor_param', full_name='paddle.TableAccessorParameter.downpour_accessor_param', @@ -1200,7 +1253,8 @@ _TABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='table_accessor_save_param', full_name='paddle.TableAccessorParameter.table_accessor_save_param', @@ -1216,7 +1270,8 @@ _TABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -1252,7 +1307,8 @@ _DOWNPOURTABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='click_coeff', full_name='paddle.DownpourTableAccessorParameter.click_coeff', @@ -1268,7 +1324,8 @@ _DOWNPOURTABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='base_threshold', full_name='paddle.DownpourTableAccessorParameter.base_threshold', @@ -1284,7 +1341,8 @@ _DOWNPOURTABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='delta_threshold', full_name='paddle.DownpourTableAccessorParameter.delta_threshold', @@ -1300,7 +1358,8 @@ _DOWNPOURTABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='delta_keep_days', full_name='paddle.DownpourTableAccessorParameter.delta_keep_days', @@ -1316,11 +1375,11 @@ _DOWNPOURTABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='show_click_decay_rate', - full_name= - 'paddle.DownpourTableAccessorParameter.show_click_decay_rate', + full_name='paddle.DownpourTableAccessorParameter.show_click_decay_rate', index=5, number=6, type=2, @@ -1333,7 +1392,8 @@ _DOWNPOURTABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='delete_threshold', full_name='paddle.DownpourTableAccessorParameter.delete_threshold', @@ -1349,7 +1409,8 @@ _DOWNPOURTABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -1385,7 +1446,8 @@ _TABLEACCESSORSAVEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='converter', full_name='paddle.TableAccessorSaveParameter.converter', @@ -1401,7 +1463,8 @@ _TABLEACCESSORSAVEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='deconverter', full_name='paddle.TableAccessorSaveParameter.deconverter', @@ -1417,7 +1480,8 @@ _TABLEACCESSORSAVEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -1438,21 +1502,23 @@ _PSREQUESTMESSAGE = _descriptor.Descriptor( file=DESCRIPTOR, containing_type=None, fields=[ - _descriptor.FieldDescriptor(name='cmd_id', - full_name='paddle.PsRequestMessage.cmd_id', - index=0, - number=1, - type=13, - cpp_type=3, - label=2, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), + _descriptor.FieldDescriptor( + name='cmd_id', + full_name='paddle.PsRequestMessage.cmd_id', + index=0, + number=1, + type=13, + cpp_type=3, + label=2, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), _descriptor.FieldDescriptor( name='table_id', full_name='paddle.PsRequestMessage.table_id', @@ -1468,22 +1534,25 @@ _PSREQUESTMESSAGE = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor(name='params', - full_name='paddle.PsRequestMessage.params', - index=2, - number=3, - type=12, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), + options=None, + ), + _descriptor.FieldDescriptor( + name='params', + full_name='paddle.PsRequestMessage.params', + index=2, + number=3, + type=12, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), _descriptor.FieldDescriptor( name='client_id', full_name='paddle.PsRequestMessage.client_id', @@ -1499,22 +1568,25 @@ _PSREQUESTMESSAGE = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor(name='data', - full_name='paddle.PsRequestMessage.data', - index=4, - number=5, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), + options=None, + ), + _descriptor.FieldDescriptor( + name='data', + full_name='paddle.PsRequestMessage.data', + index=4, + number=5, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), ], extensions=[], nested_types=[], @@ -1550,7 +1622,8 @@ _SPARSESGDRULEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='initial_g2sum', full_name='paddle.SparseSGDRuleParameter.initial_g2sum', @@ -1566,7 +1639,8 @@ _SPARSESGDRULEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='initial_range', full_name='paddle.SparseSGDRuleParameter.initial_range', @@ -1582,7 +1656,8 @@ _SPARSESGDRULEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='weight_bounds', full_name='paddle.SparseSGDRuleParameter.weight_bounds', @@ -1598,7 +1673,8 @@ _SPARSESGDRULEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -1634,7 +1710,8 @@ _DENSESGDRULEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='adam', full_name='paddle.DenseSGDRuleParameter.adam', @@ -1650,7 +1727,8 @@ _DENSESGDRULEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='naive', full_name='paddle.DenseSGDRuleParameter.naive', @@ -1666,7 +1744,8 @@ _DENSESGDRULEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='summary', full_name='paddle.DenseSGDRuleParameter.summary', @@ -1682,7 +1761,8 @@ _DENSESGDRULEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='moving_average', full_name='paddle.DenseSGDRuleParameter.moving_average', @@ -1698,7 +1778,8 @@ _DENSESGDRULEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -1734,7 +1815,8 @@ _ADAMSGDPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='avg_decay_rate', full_name='paddle.AdamSGDParameter.avg_decay_rate', @@ -1750,7 +1832,8 @@ _ADAMSGDPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='ada_decay_rate', full_name='paddle.AdamSGDParameter.ada_decay_rate', @@ -1766,7 +1849,8 @@ _ADAMSGDPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='ada_epsilon', full_name='paddle.AdamSGDParameter.ada_epsilon', @@ -1782,7 +1866,8 @@ _ADAMSGDPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='mom_decay_rate', full_name='paddle.AdamSGDParameter.mom_decay_rate', @@ -1798,7 +1883,8 @@ _ADAMSGDPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -1834,7 +1920,8 @@ _NAIVESGDPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='avg_decay_rate', full_name='paddle.NaiveSGDParameter.avg_decay_rate', @@ -1850,7 +1937,8 @@ _NAIVESGDPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -1886,7 +1974,8 @@ _SUMMARYSGDPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -1922,7 +2011,8 @@ _MOVINGAVERAGERULEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -1958,7 +2048,8 @@ _PSRESPONSEMESSAGE = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='err_msg', full_name='paddle.PsResponseMessage.err_msg', @@ -1974,22 +2065,25 @@ _PSRESPONSEMESSAGE = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor(name='data', - full_name='paddle.PsResponseMessage.data', - index=2, - number=3, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), + options=None, + ), + _descriptor.FieldDescriptor( + name='data', + full_name='paddle.PsResponseMessage.data', + index=2, + number=3, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), ], extensions=[], nested_types=[], @@ -2025,52 +2119,59 @@ _FSCLIENTPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor(name='uri', - full_name='paddle.FsClientParameter.uri', - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode('utf-8'), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor(name='user', - full_name='paddle.FsClientParameter.user', - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode('utf-8'), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor(name='passwd', - full_name='paddle.FsClientParameter.passwd', - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode('utf-8'), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), + options=None, + ), + _descriptor.FieldDescriptor( + name='uri', + full_name='paddle.FsClientParameter.uri', + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode('utf-8'), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), + _descriptor.FieldDescriptor( + name='user', + full_name='paddle.FsClientParameter.user', + index=2, + number=3, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode('utf-8'), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), + _descriptor.FieldDescriptor( + name='passwd', + full_name='paddle.FsClientParameter.passwd', + index=3, + number=4, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode('utf-8'), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), _descriptor.FieldDescriptor( name='buffer_size', full_name='paddle.FsClientParameter.buffer_size', @@ -2086,7 +2187,8 @@ _FSCLIENTPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='hadoop_bin', full_name='paddle.FsClientParameter.hadoop_bin', @@ -2102,7 +2204,8 @@ _FSCLIENTPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='afs_conf', full_name='paddle.FsClientParameter.afs_conf', @@ -2118,7 +2221,8 @@ _FSCLIENTPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -2137,75 +2241,102 @@ _FSCLIENTPARAMETER = _descriptor.Descriptor( _PSPARAMETER.fields_by_name['worker_param'].message_type = _WORKERPARAMETER _PSPARAMETER.fields_by_name['server_param'].message_type = _SERVERPARAMETER _PSPARAMETER.fields_by_name[ - 'trainer_param'].message_type = _DOWNPOURTRAINERPARAMETER + 'trainer_param' +].message_type = _DOWNPOURTRAINERPARAMETER _PSPARAMETER.fields_by_name['fs_client_param'].message_type = _FSCLIENTPARAMETER _WORKERPARAMETER.fields_by_name[ - 'downpour_worker_param'].message_type = _DOWNPOURWORKERPARAMETER + 'downpour_worker_param' +].message_type = _DOWNPOURWORKERPARAMETER _SERVERPARAMETER.fields_by_name[ - 'downpour_server_param'].message_type = _DOWNPOURSERVERPARAMETER + 'downpour_server_param' +].message_type = _DOWNPOURSERVERPARAMETER _DOWNPOURWORKERPARAMETER.fields_by_name[ - 'downpour_table_param'].message_type = _TABLEPARAMETER + 'downpour_table_param' +].message_type = _TABLEPARAMETER _DOWNPOURTRAINERPARAMETER.fields_by_name[ - 'dense_table'].message_type = _DENSETABLEPARAMETER + 'dense_table' +].message_type = _DENSETABLEPARAMETER _DOWNPOURTRAINERPARAMETER.fields_by_name[ - 'sparse_table'].message_type = _SPARSETABLEPARAMETER + 'sparse_table' +].message_type = _SPARSETABLEPARAMETER _DOWNPOURTRAINERPARAMETER.fields_by_name[ - 'program_config'].message_type = _PROGRAMCONFIG + 'program_config' +].message_type = _PROGRAMCONFIG _DOWNPOURSERVERPARAMETER.fields_by_name[ - 'downpour_table_param'].message_type = _TABLEPARAMETER + 'downpour_table_param' +].message_type = _TABLEPARAMETER _DOWNPOURSERVERPARAMETER.fields_by_name[ - 'service_param'].message_type = _SERVERSERVICEPARAMETER + 'service_param' +].message_type = _SERVERSERVICEPARAMETER _TABLEPARAMETER.fields_by_name[ - 'accessor'].message_type = _TABLEACCESSORPARAMETER + 'accessor' +].message_type = _TABLEACCESSORPARAMETER _TABLEPARAMETER.fields_by_name['type'].enum_type = _TABLETYPE _TABLEACCESSORPARAMETER.fields_by_name[ - 'sparse_sgd_param'].message_type = _SPARSESGDRULEPARAMETER + 'sparse_sgd_param' +].message_type = _SPARSESGDRULEPARAMETER _TABLEACCESSORPARAMETER.fields_by_name[ - 'dense_sgd_param'].message_type = _DENSESGDRULEPARAMETER + 'dense_sgd_param' +].message_type = _DENSESGDRULEPARAMETER _TABLEACCESSORPARAMETER.fields_by_name[ - 'downpour_accessor_param'].message_type = _DOWNPOURTABLEACCESSORPARAMETER + 'downpour_accessor_param' +].message_type = _DOWNPOURTABLEACCESSORPARAMETER _TABLEACCESSORPARAMETER.fields_by_name[ - 'table_accessor_save_param'].message_type = _TABLEACCESSORSAVEPARAMETER + 'table_accessor_save_param' +].message_type = _TABLEACCESSORSAVEPARAMETER _DENSESGDRULEPARAMETER.fields_by_name['adam'].message_type = _ADAMSGDPARAMETER _DENSESGDRULEPARAMETER.fields_by_name['naive'].message_type = _NAIVESGDPARAMETER _DENSESGDRULEPARAMETER.fields_by_name[ - 'summary'].message_type = _SUMMARYSGDPARAMETER + 'summary' +].message_type = _SUMMARYSGDPARAMETER _DENSESGDRULEPARAMETER.fields_by_name[ - 'moving_average'].message_type = _MOVINGAVERAGERULEPARAMETER + 'moving_average' +].message_type = _MOVINGAVERAGERULEPARAMETER _FSCLIENTPARAMETER.fields_by_name[ - 'fs_type'].enum_type = _FSCLIENTPARAMETER_FSAPITYPE + 'fs_type' +].enum_type = _FSCLIENTPARAMETER_FSAPITYPE _FSCLIENTPARAMETER_FSAPITYPE.containing_type = _FSCLIENTPARAMETER DESCRIPTOR.message_types_by_name['PSParameter'] = _PSPARAMETER DESCRIPTOR.message_types_by_name['WorkerParameter'] = _WORKERPARAMETER DESCRIPTOR.message_types_by_name['ServerParameter'] = _SERVERPARAMETER DESCRIPTOR.message_types_by_name[ - 'DownpourWorkerParameter'] = _DOWNPOURWORKERPARAMETER + 'DownpourWorkerParameter' +] = _DOWNPOURWORKERPARAMETER DESCRIPTOR.message_types_by_name[ - 'DownpourTrainerParameter'] = _DOWNPOURTRAINERPARAMETER + 'DownpourTrainerParameter' +] = _DOWNPOURTRAINERPARAMETER DESCRIPTOR.message_types_by_name['ProgramConfig'] = _PROGRAMCONFIG DESCRIPTOR.message_types_by_name['DenseTableParameter'] = _DENSETABLEPARAMETER DESCRIPTOR.message_types_by_name['SparseTableParameter'] = _SPARSETABLEPARAMETER DESCRIPTOR.message_types_by_name[ - 'DownpourServerParameter'] = _DOWNPOURSERVERPARAMETER + 'DownpourServerParameter' +] = _DOWNPOURSERVERPARAMETER DESCRIPTOR.message_types_by_name[ - 'ServerServiceParameter'] = _SERVERSERVICEPARAMETER + 'ServerServiceParameter' +] = _SERVERSERVICEPARAMETER DESCRIPTOR.message_types_by_name['TableParameter'] = _TABLEPARAMETER DESCRIPTOR.message_types_by_name[ - 'TableAccessorParameter'] = _TABLEACCESSORPARAMETER + 'TableAccessorParameter' +] = _TABLEACCESSORPARAMETER DESCRIPTOR.message_types_by_name[ - 'DownpourTableAccessorParameter'] = _DOWNPOURTABLEACCESSORPARAMETER + 'DownpourTableAccessorParameter' +] = _DOWNPOURTABLEACCESSORPARAMETER DESCRIPTOR.message_types_by_name[ - 'TableAccessorSaveParameter'] = _TABLEACCESSORSAVEPARAMETER + 'TableAccessorSaveParameter' +] = _TABLEACCESSORSAVEPARAMETER DESCRIPTOR.message_types_by_name['PsRequestMessage'] = _PSREQUESTMESSAGE DESCRIPTOR.message_types_by_name[ - 'SparseSGDRuleParameter'] = _SPARSESGDRULEPARAMETER + 'SparseSGDRuleParameter' +] = _SPARSESGDRULEPARAMETER DESCRIPTOR.message_types_by_name[ - 'DenseSGDRuleParameter'] = _DENSESGDRULEPARAMETER + 'DenseSGDRuleParameter' +] = _DENSESGDRULEPARAMETER DESCRIPTOR.message_types_by_name['AdamSGDParameter'] = _ADAMSGDPARAMETER DESCRIPTOR.message_types_by_name['NaiveSGDParameter'] = _NAIVESGDPARAMETER DESCRIPTOR.message_types_by_name['SummarySGDParameter'] = _SUMMARYSGDPARAMETER DESCRIPTOR.message_types_by_name[ - 'MovingAverageRuleParameter'] = _MOVINGAVERAGERULEPARAMETER + 'MovingAverageRuleParameter' +] = _MOVINGAVERAGERULEPARAMETER DESCRIPTOR.message_types_by_name['PsResponseMessage'] = _PSRESPONSEMESSAGE DESCRIPTOR.message_types_by_name['FsClientParameter'] = _FSCLIENTPARAMETER DESCRIPTOR.enum_types_by_name['TableType'] = _TABLETYPE @@ -2213,215 +2344,259 @@ DESCRIPTOR.enum_types_by_name['PsCmdID'] = _PSCMDID PSParameter = _reflection.GeneratedProtocolMessageType( 'PSParameter', - (_message.Message, ), - dict(DESCRIPTOR=_PSPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.PSParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_PSPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.PSParameter) + ), +) _sym_db.RegisterMessage(PSParameter) WorkerParameter = _reflection.GeneratedProtocolMessageType( 'WorkerParameter', - (_message.Message, ), - dict(DESCRIPTOR=_WORKERPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.WorkerParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_WORKERPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.WorkerParameter) + ), +) _sym_db.RegisterMessage(WorkerParameter) ServerParameter = _reflection.GeneratedProtocolMessageType( 'ServerParameter', - (_message.Message, ), - dict(DESCRIPTOR=_SERVERPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.ServerParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_SERVERPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.ServerParameter) + ), +) _sym_db.RegisterMessage(ServerParameter) DownpourWorkerParameter = _reflection.GeneratedProtocolMessageType( 'DownpourWorkerParameter', - (_message.Message, ), - dict(DESCRIPTOR=_DOWNPOURWORKERPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.DownpourWorkerParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_DOWNPOURWORKERPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.DownpourWorkerParameter) + ), +) _sym_db.RegisterMessage(DownpourWorkerParameter) DownpourTrainerParameter = _reflection.GeneratedProtocolMessageType( 'DownpourTrainerParameter', - (_message.Message, ), - dict(DESCRIPTOR=_DOWNPOURTRAINERPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.DownpourTrainerParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_DOWNPOURTRAINERPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.DownpourTrainerParameter) + ), +) _sym_db.RegisterMessage(DownpourTrainerParameter) ProgramConfig = _reflection.GeneratedProtocolMessageType( 'ProgramConfig', - (_message.Message, ), - dict(DESCRIPTOR=_PROGRAMCONFIG, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.ProgramConfig) - )) + (_message.Message,), + dict( + DESCRIPTOR=_PROGRAMCONFIG, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.ProgramConfig) + ), +) _sym_db.RegisterMessage(ProgramConfig) DenseTableParameter = _reflection.GeneratedProtocolMessageType( 'DenseTableParameter', - (_message.Message, ), - dict(DESCRIPTOR=_DENSETABLEPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.DenseTableParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_DENSETABLEPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.DenseTableParameter) + ), +) _sym_db.RegisterMessage(DenseTableParameter) SparseTableParameter = _reflection.GeneratedProtocolMessageType( 'SparseTableParameter', - (_message.Message, ), - dict(DESCRIPTOR=_SPARSETABLEPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.SparseTableParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_SPARSETABLEPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.SparseTableParameter) + ), +) _sym_db.RegisterMessage(SparseTableParameter) DownpourServerParameter = _reflection.GeneratedProtocolMessageType( 'DownpourServerParameter', - (_message.Message, ), - dict(DESCRIPTOR=_DOWNPOURSERVERPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.DownpourServerParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_DOWNPOURSERVERPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.DownpourServerParameter) + ), +) _sym_db.RegisterMessage(DownpourServerParameter) ServerServiceParameter = _reflection.GeneratedProtocolMessageType( 'ServerServiceParameter', - (_message.Message, ), - dict(DESCRIPTOR=_SERVERSERVICEPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.ServerServiceParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_SERVERSERVICEPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.ServerServiceParameter) + ), +) _sym_db.RegisterMessage(ServerServiceParameter) TableParameter = _reflection.GeneratedProtocolMessageType( 'TableParameter', - (_message.Message, ), - dict(DESCRIPTOR=_TABLEPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.TableParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_TABLEPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.TableParameter) + ), +) _sym_db.RegisterMessage(TableParameter) TableAccessorParameter = _reflection.GeneratedProtocolMessageType( 'TableAccessorParameter', - (_message.Message, ), - dict(DESCRIPTOR=_TABLEACCESSORPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.TableAccessorParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_TABLEACCESSORPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.TableAccessorParameter) + ), +) _sym_db.RegisterMessage(TableAccessorParameter) DownpourTableAccessorParameter = _reflection.GeneratedProtocolMessageType( 'DownpourTableAccessorParameter', - (_message.Message, ), + (_message.Message,), dict( DESCRIPTOR=_DOWNPOURTABLEACCESSORPARAMETER, __module__='ps_pb2' # @@protoc_insertion_point(class_scope:paddle.DownpourTableAccessorParameter) - )) + ), +) _sym_db.RegisterMessage(DownpourTableAccessorParameter) TableAccessorSaveParameter = _reflection.GeneratedProtocolMessageType( 'TableAccessorSaveParameter', - (_message.Message, ), + (_message.Message,), dict( DESCRIPTOR=_TABLEACCESSORSAVEPARAMETER, __module__='ps_pb2' # @@protoc_insertion_point(class_scope:paddle.TableAccessorSaveParameter) - )) + ), +) _sym_db.RegisterMessage(TableAccessorSaveParameter) PsRequestMessage = _reflection.GeneratedProtocolMessageType( 'PsRequestMessage', - (_message.Message, ), - dict(DESCRIPTOR=_PSREQUESTMESSAGE, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.PsRequestMessage) - )) + (_message.Message,), + dict( + DESCRIPTOR=_PSREQUESTMESSAGE, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.PsRequestMessage) + ), +) _sym_db.RegisterMessage(PsRequestMessage) SparseSGDRuleParameter = _reflection.GeneratedProtocolMessageType( 'SparseSGDRuleParameter', - (_message.Message, ), - dict(DESCRIPTOR=_SPARSESGDRULEPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.SparseSGDRuleParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_SPARSESGDRULEPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.SparseSGDRuleParameter) + ), +) _sym_db.RegisterMessage(SparseSGDRuleParameter) DenseSGDRuleParameter = _reflection.GeneratedProtocolMessageType( 'DenseSGDRuleParameter', - (_message.Message, ), - dict(DESCRIPTOR=_DENSESGDRULEPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.DenseSGDRuleParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_DENSESGDRULEPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.DenseSGDRuleParameter) + ), +) _sym_db.RegisterMessage(DenseSGDRuleParameter) AdamSGDParameter = _reflection.GeneratedProtocolMessageType( 'AdamSGDParameter', - (_message.Message, ), - dict(DESCRIPTOR=_ADAMSGDPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.AdamSGDParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_ADAMSGDPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.AdamSGDParameter) + ), +) _sym_db.RegisterMessage(AdamSGDParameter) NaiveSGDParameter = _reflection.GeneratedProtocolMessageType( 'NaiveSGDParameter', - (_message.Message, ), - dict(DESCRIPTOR=_NAIVESGDPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.NaiveSGDParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_NAIVESGDPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.NaiveSGDParameter) + ), +) _sym_db.RegisterMessage(NaiveSGDParameter) SummarySGDParameter = _reflection.GeneratedProtocolMessageType( 'SummarySGDParameter', - (_message.Message, ), - dict(DESCRIPTOR=_SUMMARYSGDPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.SummarySGDParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_SUMMARYSGDPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.SummarySGDParameter) + ), +) _sym_db.RegisterMessage(SummarySGDParameter) MovingAverageRuleParameter = _reflection.GeneratedProtocolMessageType( 'MovingAverageRuleParameter', - (_message.Message, ), + (_message.Message,), dict( DESCRIPTOR=_MOVINGAVERAGERULEPARAMETER, __module__='ps_pb2' # @@protoc_insertion_point(class_scope:paddle.MovingAverageRuleParameter) - )) + ), +) _sym_db.RegisterMessage(MovingAverageRuleParameter) PsResponseMessage = _reflection.GeneratedProtocolMessageType( 'PsResponseMessage', - (_message.Message, ), - dict(DESCRIPTOR=_PSRESPONSEMESSAGE, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.PsResponseMessage) - )) + (_message.Message,), + dict( + DESCRIPTOR=_PSRESPONSEMESSAGE, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.PsResponseMessage) + ), +) _sym_db.RegisterMessage(PsResponseMessage) FsClientParameter = _reflection.GeneratedProtocolMessageType( 'FsClientParameter', - (_message.Message, ), - dict(DESCRIPTOR=_FSCLIENTPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.FsClientParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_FSCLIENTPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.FsClientParameter) + ), +) _sym_db.RegisterMessage(FsClientParameter) DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), - _b('\200\001\001')) +DESCRIPTOR._options = _descriptor._ParseOptions( + descriptor_pb2.FileOptions(), _b('\200\001\001') +) # @@protoc_insertion_point(module_scope) diff --git a/python/paddle/fluid/dygraph/amp/auto_cast.py b/python/paddle/fluid/dygraph/amp/auto_cast.py index 682f14fab5b9af59b161ee65975495de7b23be87..bb1acc7c09bfcc7610bb622c14cf0fb21dfa875f 100644 --- a/python/paddle/fluid/dygraph/amp/auto_cast.py +++ b/python/paddle/fluid/dygraph/amp/auto_cast.py @@ -12,10 +12,22 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle.fluid.wrapped_decorator import signature_safe_contextmanager, wrap_decorator +from paddle.fluid.wrapped_decorator import ( + signature_safe_contextmanager, + wrap_decorator, +) from paddle.fluid import core import contextlib -from paddle.fluid.framework import Variable, _non_static_mode, OpProtoHolder, Parameter, _dygraph_tracer, dygraph_only, set_flags, get_flags +from paddle.fluid.framework import ( + Variable, + _non_static_mode, + OpProtoHolder, + Parameter, + _dygraph_tracer, + dygraph_only, + set_flags, + get_flags, +) import warnings import copy import functools @@ -103,12 +115,11 @@ def amp_state(): return _g_amp_state_ -#NOTE(zhiqiu): similar as paddle.fluid.contrib.mixed_precision.fp16_lists.AutoMixedPrecisionLists._update_list +# NOTE(zhiqiu): similar as paddle.fluid.contrib.mixed_precision.fp16_lists.AutoMixedPrecisionLists._update_list # The reason why not use AutoMixedPrecisionLists is that custom_black_varnames is not suitable for imperative mode. -def _update_list(custom_white_list, - custom_black_list, - level='O1', - dtype='float16'): +def _update_list( + custom_white_list, custom_black_list, level='O1', dtype='float16' +): """ Update black and white list according to users' custom list. """ @@ -129,8 +140,9 @@ def _update_list(custom_white_list, if custom_white_list and custom_black_list: for op_name in custom_white_list: if op_name in custom_black_list: - raise ValueError("Custom white list overlap " - "custom black list") + raise ValueError( + "Custom white list overlap " "custom black list" + ) if custom_white_list: for op_name in custom_white_list: if op_name in _black_list: @@ -190,17 +202,29 @@ def pure_fp16_initialize(models): for layer in models[idx].sublayers(include_self=True): layer._casted_by_pure_fp16 = True if (layer._dtype == 'float16') or isinstance( - layer, (paddle.nn.BatchNorm, paddle.nn.BatchNorm1D, - paddle.nn.BatchNorm2D, paddle.nn.BatchNorm3D, - paddle.nn.LayerNorm, paddle.nn.SyncBatchNorm)): + layer, + ( + paddle.nn.BatchNorm, + paddle.nn.BatchNorm1D, + paddle.nn.BatchNorm2D, + paddle.nn.BatchNorm3D, + paddle.nn.LayerNorm, + paddle.nn.SyncBatchNorm, + ), + ): continue - if isinstance(layer, (paddle.incubate.nn.FusedFeedForward, - paddle.incubate.nn.FusedMultiHeadAttention)): + if isinstance( + layer, + ( + paddle.incubate.nn.FusedFeedForward, + paddle.incubate.nn.FusedMultiHeadAttention, + ), + ): layer._amp_decorate(dtype='float16') continue - layer._to_impl(dtype='float16', - include_sublayers=False, - floating_only=True) + layer._to_impl( + dtype='float16', include_sublayers=False, floating_only=True + ) return models @@ -208,9 +232,9 @@ def pure_fp16_initialize(models): def pure_bf16_initialize(models): for idx in range(len(models)): for layer in models[idx].sublayers(include_self=True): - layer._to_impl(dtype='bfloat16', - include_sublayers=False, - floating_only=True) + layer._to_impl( + dtype='bfloat16', include_sublayers=False, floating_only=True + ) return models @@ -218,8 +242,10 @@ def check_models(models): for model in models: if not isinstance(model, paddle.nn.Layer): raise RuntimeError( - "Current train mode is pure fp16, models should be paddle.nn.Layer, but receive {}." - .format(type(model))) + "Current train mode is pure fp16, models should be paddle.nn.Layer, but receive {}.".format( + type(model) + ) + ) if isinstance(model, paddle.DataParallel): raise RuntimeError( "For distributed AMP training, you should first use paddle.amp.decorate() to decotate origin model, and then call paddle.DataParallel get distributed model." @@ -229,20 +255,25 @@ def check_models(models): def check_optimizers(optimizers): for optimizer in optimizers: if not isinstance( - optimizer, - (paddle.optimizer.Optimizer, paddle.fluid.optimizer.Optimizer)): + optimizer, + (paddle.optimizer.Optimizer, paddle.fluid.optimizer.Optimizer), + ): raise RuntimeError( - "Current train mode is pure fp16, optimizers should be paddle.optimizer.Optimizer or paddle.fluid.optimizer.Optimizer, but receive {}." - .format(type(optimizer))) + "Current train mode is pure fp16, optimizers should be paddle.optimizer.Optimizer or paddle.fluid.optimizer.Optimizer, but receive {}.".format( + type(optimizer) + ) + ) @signature_safe_contextmanager @dygraph_only -def amp_guard(enable=True, - custom_white_list=None, - custom_black_list=None, - level='O1', - dtype='float16'): +def amp_guard( + enable=True, + custom_white_list=None, + custom_black_list=None, + level='O1', + dtype='float16', +): """ :api_attr: imperative @@ -306,19 +337,23 @@ def amp_guard(enable=True, tracer = _dygraph_tracer() if not tracer: raise ValueError( - "current_tracer is None, maybe it is not in imperative mode.") + "current_tracer is None, maybe it is not in imperative mode." + ) # check device_type: # NOTE: Now, amp only support gpu for float16 and bfloat16, xpu for float16, mlu for float16, npu for float16. # Maybe we will support cpu for bfloat16. - if enable and not (tracer._expected_place.is_gpu_place() - or tracer._expected_place.is_xpu_place() - or tracer._expected_place.is_mlu_place() - or tracer._expected_place.is_npu_place() - or tracer._expected_place.is_custom_place()): + if enable and not ( + tracer._expected_place.is_gpu_place() + or tracer._expected_place.is_xpu_place() + or tracer._expected_place.is_mlu_place() + or tracer._expected_place.is_npu_place() + or tracer._expected_place.is_custom_place() + ): warnings.warn( 'amp_guard can only be enabled on CUDAPlace, XPUPlace, MLUPlace, NPUPlace, and CustomPlace, current place is %s, so it makes no effect.' - % tracer._expected_place) + % tracer._expected_place + ) enable = False # For npu: if tracer._expected_place.is_npu_place() and (dtype == 'bfloat16'): @@ -343,14 +378,20 @@ def amp_guard(enable=True, prop = paddle.device.cuda.get_device_capability() warnings.warn( "For float16, amp only support NVIDIA GPU with Compute Capability 7.0 or higher, current GPU is: %s, with Compute Capability: %d.%d." - % (paddle.device.cuda.get_device_name(), prop[0], prop[1])) + % (paddle.device.cuda.get_device_name(), prop[0], prop[1]) + ) elif (dtype == 'bfloat16') and not _is_gpu_bfloat16_supported(): prop = paddle.device.cuda.get_device_capability() cuda_version = paddle.version.cuda() warnings.warn( "For bfloat16, amp only support NVIDIA GPU with Compute Capability 8.0 or higher and CUDA Version 11.0 or higher, current GPU is: %s, with Compute Capability: %d.%d, current CUDA Version is: %s." - % (paddle.device.cuda.get_device_name(), prop[0], prop[1], - cuda_version)) + % ( + paddle.device.cuda.get_device_name(), + prop[0], + prop[1], + cuda_version, + ) + ) amp_dtype = dtype @@ -381,8 +422,9 @@ def amp_guard(enable=True, _black_list = BF16_BLACK_LIST if custom_white_list or custom_black_list: - _white_list, _black_list = _update_list(custom_white_list, - custom_black_list, level, dtype) + _white_list, _black_list = _update_list( + custom_white_list, custom_black_list, level, dtype + ) if not enable: amp_level = AMP_LEVEL.O0 @@ -422,7 +464,6 @@ def amp_guard(enable=True, class StateDictHook(object): - def __init__(self, save_dtype): self._save_dtype = save_dtype @@ -437,12 +478,14 @@ class StateDictHook(object): @dygraph_only -def amp_decorate(models, - optimizers=None, - level='O1', - dtype='float16', - master_weight=None, - save_dtype=None): +def amp_decorate( + models, + optimizers=None, + level='O1', + dtype='float16', + master_weight=None, + save_dtype=None, +): """ Decorate models and optimizers for auto-mixed-precision. When level is O1(amp), the decorate will do nothing. When level is O2(pure fp16), the decorate will cast all parameters of models to FP16, except BatchNorm and LayerNorm. @@ -527,7 +570,8 @@ def amp_decorate(models, models_is_list = True else: raise TypeError( - "models must be either a single model or a list of models.") + "models must be either a single model or a list of models." + ) if dtype == 'float16': models = pure_fp16_initialize(models=models) elif dtype == 'bfloat16': @@ -539,8 +583,9 @@ def amp_decorate(models, # check optimizers optimizers_is_list = False if isinstance( - optimizers, - (paddle.optimizer.Optimizer, paddle.fluid.optimizer.Optimizer)): + optimizers, + (paddle.optimizer.Optimizer, paddle.fluid.optimizer.Optimizer), + ): optimizers_is_list = False optimizers = [optimizers] check_optimizers(optimizers) @@ -563,7 +608,8 @@ def amp_decorate(models, if not (save_dtype in ['float16', 'bfloat16', 'float32', 'float64']): raise ValueError( "save_dtype can only be float16 float32 or float64, but your input save_dtype is %s." - % save_dtype) + % save_dtype + ) for idx in range(len(models)): for layer in models[idx].sublayers(include_self=True): layer.register_state_dict_hook(StateDictHook(save_dtype)) diff --git a/python/paddle/fluid/dygraph/amp/loss_scaler.py b/python/paddle/fluid/dygraph/amp/loss_scaler.py index ce2e766d436ddcdfcc233528a403f066e199196b..6ab153e4a2e9a07cf4ed5ed50f211566300b5de0 100644 --- a/python/paddle/fluid/dygraph/amp/loss_scaler.py +++ b/python/paddle/fluid/dygraph/amp/loss_scaler.py @@ -14,7 +14,11 @@ from paddle.fluid import core from paddle.fluid.dygraph import to_variable -from paddle.fluid.framework import _varbase_creator, _dygraph_tracer, dygraph_only +from paddle.fluid.framework import ( + _varbase_creator, + _dygraph_tracer, + dygraph_only, +) from paddle.fluid.data_feeder import check_type from ...wrapped_decorator import signature_safe_contextmanager, wrap_decorator import warnings @@ -89,28 +93,34 @@ class AmpScaler(object): """ @dygraph_only - def __init__(self, - enable=True, - init_loss_scaling=2.**15, - incr_ratio=2.0, - decr_ratio=0.5, - incr_every_n_steps=1000, - decr_every_n_nan_or_inf=1, - use_dynamic_loss_scaling=True): + def __init__( + self, + enable=True, + init_loss_scaling=2.0**15, + incr_ratio=2.0, + decr_ratio=0.5, + incr_every_n_steps=1000, + decr_every_n_nan_or_inf=1, + use_dynamic_loss_scaling=True, + ): tracer = _dygraph_tracer() if not tracer: raise ValueError( - "current_tracer is None, maybe it is not in imperative mode.") + "current_tracer is None, maybe it is not in imperative mode." + ) - if enable and not (tracer._expected_place.is_gpu_place() - or tracer._expected_place.is_xpu_place() - or tracer._expected_place.is_mlu_place() - or tracer._expected_place.is_npu_place() - or tracer._expected_place.is_custom_place()): + if enable and not ( + tracer._expected_place.is_gpu_place() + or tracer._expected_place.is_xpu_place() + or tracer._expected_place.is_mlu_place() + or tracer._expected_place.is_npu_place() + or tracer._expected_place.is_custom_place() + ): warnings.warn( 'AmpScaler can only be enabled on CUDAPlace, XPUPlace, MLUPlace, NPUPlace and CustomPlace, current place is %s, so it makes no effect.' - % tracer._expected_place) + % tracer._expected_place + ) enable = False self._enable = enable @@ -130,13 +140,17 @@ class AmpScaler(object): self._found_inf = to_variable(np.array([0]).astype(np.bool_)) self._temp_found_inf_fp16 = to_variable( - np.array([0]).astype(np.bool_)) + np.array([0]).astype(np.bool_) + ) self._temp_found_inf_bf16 = to_variable( - np.array([0]).astype(np.bool_)) + np.array([0]).astype(np.bool_) + ) self._temp_found_inf_fp32 = to_variable( - np.array([0]).astype(np.bool_)) + np.array([0]).astype(np.bool_) + ) self._scale = to_variable( - np.array([self._init_loss_scaling]).astype(np.float32)) + np.array([self._init_loss_scaling]).astype(np.float32) + ) self._cache_founf_inf = None self._optimizer_states = defaultdict(_refresh_optimizer_state) @@ -260,7 +274,8 @@ class AmpScaler(object): raise RuntimeError("unscale_() is being called after step().") if getattr(optimizer, '_param_groups', None) and isinstance( - optimizer._param_groups[0], dict): + optimizer._param_groups[0], dict + ): param_grads = [] param_grads_fp16 = [] param_grads_bf16 = [] @@ -269,29 +284,37 @@ class AmpScaler(object): for param in group['params']: if param._grad_ivar() is not None: param_grads.append(param._grad_ivar()) - if param._grad_ivar( - ).dtype == core.VarDesc.VarType.FP16: + if ( + param._grad_ivar().dtype + == core.VarDesc.VarType.FP16 + ): param_grads_fp16.append(param._grad_ivar()) - elif param._grad_ivar( - ).dtype == core.VarDesc.VarType.BF16: + elif ( + param._grad_ivar().dtype + == core.VarDesc.VarType.BF16 + ): param_grads_bf16.append(param._grad_ivar()) else: param_grads_fp32.append(param._grad_ivar()) else: param_grads = [ - param._grad_ivar() for param in optimizer._parameter_list + param._grad_ivar() + for param in optimizer._parameter_list if param._grad_ivar() is not None ] param_grads_fp16 = [ - param for param in param_grads + param + for param in param_grads if param.dtype == core.VarDesc.VarType.FP16 ] param_grads_bf16 = [ - param for param in param_grads + param + for param in param_grads if param.dtype == core.VarDesc.VarType.BF16 ] param_grads_fp32 = [ - param for param in param_grads + param + for param in param_grads if param.dtype == core.VarDesc.VarType.FP32 ] if core.is_compiled_with_npu(): @@ -300,31 +323,56 @@ class AmpScaler(object): if len(param_grads_fp16): _legacy_C_ops.check_finite_and_unscale( - param_grads_fp16, self._scale, float_status, - param_grads_fp16, self._temp_found_inf_fp16) + param_grads_fp16, + self._scale, + float_status, + param_grads_fp16, + self._temp_found_inf_fp16, + ) if len(param_grads_bf16): _legacy_C_ops.check_finite_and_unscale( - param_grads_bf16, self._scale, float_status, - param_grads_bf16, self._temp_found_inf_bf16) + param_grads_bf16, + self._scale, + float_status, + param_grads_bf16, + self._temp_found_inf_bf16, + ) if len(param_grads_fp32): _legacy_C_ops.check_finite_and_unscale( - param_grads_fp32, self._scale, float_status, - param_grads_fp32, self._temp_found_inf_fp32) + param_grads_fp32, + self._scale, + float_status, + param_grads_fp32, + self._temp_found_inf_fp32, + ) else: if len(param_grads_fp16): _legacy_C_ops.check_finite_and_unscale( - param_grads_fp16, self._scale, param_grads_fp16, - self._temp_found_inf_fp16) + param_grads_fp16, + self._scale, + param_grads_fp16, + self._temp_found_inf_fp16, + ) if len(param_grads_bf16): _legacy_C_ops.check_finite_and_unscale( - param_grads_bf16, self._scale, param_grads_bf16, - self._temp_found_inf_bf16) + param_grads_bf16, + self._scale, + param_grads_bf16, + self._temp_found_inf_bf16, + ) if len(param_grads_fp32): _legacy_C_ops.check_finite_and_unscale( - param_grads_fp32, self._scale, param_grads_fp32, - self._temp_found_inf_fp32) - - self._found_inf = self._temp_found_inf_fp16 or self._temp_found_inf_bf16 or self._temp_found_inf_fp32 + param_grads_fp32, + self._scale, + param_grads_fp32, + self._temp_found_inf_fp32, + ) + + self._found_inf = ( + self._temp_found_inf_fp16 + or self._temp_found_inf_bf16 + or self._temp_found_inf_fp32 + ) optimizer_state["state"] = OptimizerState.UNSCALED @@ -340,9 +388,12 @@ class AmpScaler(object): self._decr_count = self._decr_count + 1 if self._decr_count == self._decr_every_n_nan_or_inf: print( - 'Found inf or nan, current scale is: {}, decrease to: {}*{}' - .format(float(self._scale), float(self._scale), - float(self._decr_ratio))) + 'Found inf or nan, current scale is: {}, decrease to: {}*{}'.format( + float(self._scale), + float(self._scale), + float(self._decr_ratio), + ) + ) self._scale = self._scale * self._decr_ratio self._decr_count = 0 else: @@ -390,7 +441,8 @@ class AmpScaler(object): """ self._init_loss_scaling = new_init_loss_scaling self._scale = to_variable( - np.array([self._init_loss_scaling]).astype(np.float32)) + np.array([self._init_loss_scaling]).astype(np.float32) + ) def get_incr_ratio(self): """ @@ -481,16 +533,20 @@ class AmpScaler(object): decr_count(int): The number of recent consecutive skipped steps. use_dynamic_loss_scaling(bool): Whether to use dynamic loss scaling. If False, fixed loss_scaling is used. If True, the loss scaling is updated dynamicly. Default is True. """ - return { - "scale": self._scale.numpy(), - "incr_ratio": self._incr_ratio, - "decr_ratio": self._decr_ratio, - "incr_every_n_steps": self._incr_every_n_steps, - "decr_every_n_nan_or_inf": self._decr_every_n_nan_or_inf, - "incr_count": self._incr_count, - "decr_count": self._decr_count, - "use_dynamic_loss_scaling": self._use_dynamic_loss_scaling - } if self._enable else {} + return ( + { + "scale": self._scale.numpy(), + "incr_ratio": self._incr_ratio, + "decr_ratio": self._decr_ratio, + "incr_every_n_steps": self._incr_every_n_steps, + "decr_every_n_nan_or_inf": self._decr_every_n_nan_or_inf, + "incr_count": self._incr_count, + "decr_count": self._decr_count, + "use_dynamic_loss_scaling": self._use_dynamic_loss_scaling, + } + if self._enable + else {} + ) def load_state_dict(self, state_dict): """ @@ -505,11 +561,13 @@ class AmpScaler(object): if len(state_dict) == 0: raise RuntimeError( "The input state dict is empty, possibly because it was saved " - "from a disabled instance of GradScaler.") + "from a disabled instance of GradScaler." + ) self._init_loss_scaling = state_dict["scale"][0] self._scale = to_variable( - np.array([self._init_loss_scaling]).astype(np.float32)) + np.array([self._init_loss_scaling]).astype(np.float32) + ) self._incr_ratio = state_dict["incr_ratio"] self._decr_ratio = state_dict["decr_ratio"] self._incr_every_n_steps = state_dict["incr_every_n_steps"] diff --git a/python/paddle/fluid/dygraph/base.py b/python/paddle/fluid/dygraph/base.py index 82ef806583743e908a2fe5b05d98fe3b9e3ca5ba..593ce558f2178eaa1ea32bf0e8b947219d0f0410 100644 --- a/python/paddle/fluid/dygraph/base.py +++ b/python/paddle/fluid/dygraph/base.py @@ -25,13 +25,23 @@ from .tracer import Tracer import logging from ..data_feeder import convert_dtype import warnings -from ..framework import _get_paddle_place, _in_legacy_dygraph, _in_eager_without_dygraph_check +from ..framework import ( + _get_paddle_place, + _in_legacy_dygraph, + _in_eager_without_dygraph_check, +) import paddle import warnings __all__ = [ - 'no_grad', 'no_grad_', 'grad', 'guard', 'enable_dygraph', 'disable_dygraph', - 'enabled', 'to_variable' + 'no_grad', + 'no_grad_', + 'grad', + 'guard', + 'enable_dygraph', + 'disable_dygraph', + 'enabled', + 'to_variable', ] # Flag that indicates whether running code under `@declarative` @@ -46,8 +56,9 @@ def in_declarative_mode(): return _in_declarative_mode_ -def declarative_unsupport_argument_warning(func_name, input_names, inputs, - support_values): +def declarative_unsupport_argument_warning( + func_name, input_names, inputs, support_values +): """ Warning if inputs do not elementwisely equals to support_values. It's a utility function for dy2static when dygraph interface have @@ -56,12 +67,13 @@ def declarative_unsupport_argument_warning(func_name, input_names, inputs, """ for name, inp, sup in zip(input_names, inputs, support_values): if inp != sup: - warnings.warn(f"{func_name} has unsupported parameter in jit: " + - f"{name}, jit will discard it") + warnings.warn( + f"{func_name} has unsupported parameter in jit: " + + f"{name}, jit will discard it" + ) def _switch_to_static_graph_(func): - def __impl__(*args, **kwargs): with framework._dygraph_guard(None): return func(*args, **kwargs) @@ -101,8 +113,11 @@ _functional_dygraph_context_manager = None @signature_safe_contextmanager def param_guard(parameters): # Note: parameters is a reference of self._parameters or self._buffers - if in_declarative_mode( - ) and not framework._non_static_mode() and parameters: + if ( + in_declarative_mode() + and not framework._non_static_mode() + and parameters + ): origin_parameters = parameters.copy() for name, var_base in parameters.items(): if isinstance(var_base, list): @@ -126,8 +141,9 @@ def _convert_into_variable(tensor): if new_var is not None: assert isinstance(new_var, framework.Variable) # Convert ParamBase into Parameter with same attributes in dy2stat. - elif isinstance(tensor, - (framework.EagerParamBase, framework.ParamBase)): + elif isinstance( + tensor, (framework.EagerParamBase, framework.ParamBase) + ): new_var = tensor._to_static_var(to_parameter=True) else: # Note(Aurelius84): Convert VarBase in self._buffers into Variable with @@ -140,8 +156,9 @@ def _convert_into_variable(tensor): # non-persistable. See case of `drop_state` in lstm api. is_persistable = len(tensor.shape) > 0 - new_var = tensor._to_static_var(to_parameter=False, - persistable=is_persistable) + new_var = tensor._to_static_var( + to_parameter=False, persistable=is_persistable + ) return new_var else: return tensor @@ -207,7 +224,8 @@ def enable_dygraph(place=None): global _functional_dygraph_context_manager if _functional_dygraph_context_manager is None: _functional_dygraph_context_manager = guard( - place=_get_paddle_place(place)) + place=_get_paddle_place(place) + ) _functional_dygraph_context_manager.__enter__() # call disable_dygraph when Python exit @@ -368,7 +386,6 @@ class no_grad_: """ def __call__(self, func): - @decorator.decorator def _decorate_function(func, *args, **kwargs): with self: @@ -448,14 +465,16 @@ def guard(place=None): @framework.non_static_only -def grad(outputs, - inputs, - grad_outputs=None, - retain_graph=None, - create_graph=False, - only_inputs=True, - allow_unused=False, - no_grad_vars=None): +def grad( + outputs, + inputs, + grad_outputs=None, + retain_graph=None, + create_graph=False, + only_inputs=True, + allow_unused=False, + no_grad_vars=None, +): ''' .. note:: **This API is ONLY available in imperative mode.** @@ -581,16 +600,18 @@ def grad(outputs, # dy1 = [3], dy2 = [4] grad_y1 = paddle.to_tensor(3.0) print(test_dygraph_grad([grad_y1, grad_value])) # [24.] - ''' + ''' if in_declarative_mode(): # In dy2static context, we call static interface `gradients` # to calculate grads. from paddle.static import gradients + declarative_unsupport_argument_warning( "paddle.grad", ["retain_graph", "create_grad", "only_inputs", "allow_unused"], [retain_graph, create_graph, only_inputs, allow_unused], - [None, False, True, False]) + [None, False, True, False], + ) return gradients(outputs, inputs, grad_outputs, no_grad_vars) def check_in_out(in_out_list, name): @@ -605,9 +626,8 @@ def grad(outputs, ), "Elements of {} must be Tensor".format(name) else: assert isinstance( - each_var, - core.VarBase), "Elements of {} must be Variable".format( - name) + each_var, core.VarBase + ), "Elements of {} must be Variable".format(name) return in_out_list else: if _in_eager_without_dygraph_check(): @@ -642,7 +662,8 @@ def grad(outputs, if len(grad_outputs) > 0: assert len(grad_outputs) == len( - outputs), "The length of grad_outputs must be equal to outputs" + outputs + ), "The length of grad_outputs must be equal to outputs" if no_grad_vars is None: no_grad_vars = [] @@ -655,12 +676,12 @@ def grad(outputs, for var in no_grad_vars: if _in_eager_without_dygraph_check(): assert isinstance( - var, - core.eager.Tensor), "no_grad_vars can only contains Tensor" + var, core.eager.Tensor + ), "no_grad_vars can only contains Tensor" else: assert isinstance( - var, - core.VarBase), "no_grad_vars can only contains Variable" + var, core.VarBase + ), "no_grad_vars can only contains Variable" else: if _in_eager_without_dygraph_check(): raise AssertionError( @@ -676,8 +697,9 @@ def grad(outputs, if retain_graph is None: retain_graph = create_graph - assert isinstance(retain_graph, - bool), "retain_graph must be None, True or False" + assert isinstance( + retain_graph, bool + ), "retain_graph must be None, True or False" assert isinstance(allow_unused, bool), "allow_unused must be True or False" @@ -685,17 +707,30 @@ def grad(outputs, assert only_inputs, "only_inputs=False is not supported yet" if _in_eager_without_dygraph_check(): - return core.eager.run_partial_grad(outputs, inputs, grad_outputs, - retain_graph, create_graph, - only_inputs, allow_unused, - no_grad_vars) + return core.eager.run_partial_grad( + outputs, + inputs, + grad_outputs, + retain_graph, + create_graph, + only_inputs, + allow_unused, + no_grad_vars, + ) else: place = core.Place() place.set_place(framework._current_expected_place()) - return core.dygraph_partial_grad(inputs, outputs, grad_outputs, - no_grad_vars, place, create_graph, - retain_graph, allow_unused, - only_inputs) + return core.dygraph_partial_grad( + inputs, + outputs, + grad_outputs, + no_grad_vars, + place, + create_graph, + retain_graph, + allow_unused, + only_inputs, + ) @framework.dygraph_only @@ -755,20 +790,30 @@ def to_variable(value, name=None, zero_copy=None, dtype=None): y.shape # [3L, 2L] """ - support_type = (list, tuple, np.ndarray, core.eager.Tensor, core.VarBase, - framework.Variable, core.Tensor, core.LoDTensor) + support_type = ( + list, + tuple, + np.ndarray, + core.eager.Tensor, + core.VarBase, + framework.Variable, + core.Tensor, + core.LoDTensor, + ) if not isinstance(value, support_type): raise TypeError( "The type of 'value' in fluid.dygraph.to_variable must be %s, but received %s." - % (support_type, type(value))) + % (support_type, type(value)) + ) if isinstance(value, (core.eager.Tensor, core.VarBase, framework.Variable)): return value elif isinstance(value, (core.Tensor, core.LoDTensor)): return core.VarBase(value) else: - if isinstance(framework._current_expected_place(), - framework.core.CPUPlace): - #TODO(zhiqiu): we found two problems when enable zero_copy on CPUPlace. + if isinstance( + framework._current_expected_place(), framework.core.CPUPlace + ): + # TODO(zhiqiu): we found two problems when enable zero_copy on CPUPlace. # (1): eigen requires 16-bytes alignments, but the data of numpy array may not statisfy. # Details: https://eigen.tuxfamily.org/dox/group__TopicUnalignedArrayAssert.html # (2): when used in flask framework, it may result in hang. @@ -780,7 +825,9 @@ def to_variable(value, name=None, zero_copy=None, dtype=None): ) zero_copy = False else: - assert not zero_copy, "zero_copy mode can only be used with CPUPlace" + assert ( + not zero_copy + ), "zero_copy mode can only be used with CPUPlace" if not isinstance(value, np.ndarray): value = np.array(value) @@ -791,13 +838,20 @@ def to_variable(value, name=None, zero_copy=None, dtype=None): value = value.astype(dtype) if _in_eager_without_dygraph_check(): - return core.eager.Tensor(value, framework._current_expected_place(), - False, zero_copy, name if name else None, - True) + return core.eager.Tensor( + value, + framework._current_expected_place(), + False, + zero_copy, + name if name else None, + True, + ) else: - py_var = core.VarBase(value=value, - place=framework._current_expected_place(), - persistable=False, - zero_copy=zero_copy, - name=name if name else '') + py_var = core.VarBase( + value=value, + place=framework._current_expected_place(), + persistable=False, + zero_copy=zero_copy, + name=name if name else '', + ) return py_var diff --git a/python/paddle/fluid/dygraph/checkpoint.py b/python/paddle/fluid/dygraph/checkpoint.py index 8306e702598e2f9432d35a5777357e5528b499cf..f7a1db032f48b8dda7b545be8a3ce1f2282c15ce 100644 --- a/python/paddle/fluid/dygraph/checkpoint.py +++ b/python/paddle/fluid/dygraph/checkpoint.py @@ -15,14 +15,27 @@ import os import collections import functools -from ..framework import Variable, default_main_program, _non_static_mode, dygraph_only, Parameter, ParamBase, _varbase_creator, _dygraph_tracer, EagerParamBase +from ..framework import ( + Variable, + default_main_program, + _non_static_mode, + dygraph_only, + Parameter, + ParamBase, + _varbase_creator, + _dygraph_tracer, + EagerParamBase, +) import pickle from . import learning_rate_scheduler import warnings from .. import core from .base import guard from paddle.fluid.dygraph.jit import _SaveLoadConfig -from paddle.fluid.dygraph.io import _construct_program_holders, _construct_params_and_buffers +from paddle.fluid.dygraph.io import ( + _construct_program_holders, + _construct_params_and_buffers, +) __all__ = [ 'save_dygraph', @@ -38,7 +51,8 @@ def _parse_load_config(configs): if key not in supported_configs: raise ValueError( "The additional config (%s) of `paddle.fluid.load_dygraph` is not supported." - % (key)) + % (key) + ) # construct inner config inner_config = _SaveLoadConfig() @@ -85,7 +99,9 @@ def save_dygraph(state_dict, model_path): ''' base_name = os.path.basename(model_path) - assert base_name != "", "The input model_path MUST be format of dirname/filename [dirname\\filename in Windows system], but received filename is empty string." + assert ( + base_name != "" + ), "The input model_path MUST be format of dirname/filename [dirname\\filename in Windows system], but received filename is empty string." suffix = ".pdparams" assert len(state_dict) > 0, "state_dict is empty, no need to save" @@ -193,7 +209,10 @@ def load_dygraph(model_path, **configs): with open(params_file_path, 'rb') as f: para_dict = pickle.load(f, encoding='latin1') - if not config.keep_name_table and "StructuredToParameterName@@" in para_dict: + if ( + not config.keep_name_table + and "StructuredToParameterName@@" in para_dict + ): del para_dict["StructuredToParameterName@@"] if os.path.exists(opti_file_path): @@ -202,8 +221,9 @@ def load_dygraph(model_path, **configs): else: # check model path if not os.path.isdir(model_prefix): - raise ValueError("Model saved directory '%s' is not exists." % - model_prefix) + raise ValueError( + "Model saved directory '%s' is not exists." % model_prefix + ) # check whether model file exists if config.model_filename is None: @@ -222,8 +242,9 @@ def load_dygraph(model_path, **configs): # NOTE(chenweihang): `jit.save` doesn't save optimizer state # 1. load program desc & construct _ProgramHolder - programs = _construct_program_holders(model_path, - config.model_filename) + programs = _construct_program_holders( + model_path, config.model_filename + ) # 2. load layer parameters & buffers # NOTE: using fluid.dygraph.guard() here will cause import error in py2 @@ -232,7 +253,8 @@ def load_dygraph(model_path, **configs): model_prefix, programs, config.params_filename, - append_suffix=False) + append_suffix=False, + ) # 3. construct state_dict para_dict = dict() @@ -248,10 +270,15 @@ def load_dygraph(model_path, **configs): structured_para_dict = dict() for var_name in para_dict: structured_name = extra_var_info[var_name].get( - 'structured_name', None) - assert structured_name is not None, "Cannot find saved variable (%s)'s structured name in saved model." % var_name + 'structured_name', None + ) + assert structured_name is not None, ( + "Cannot find saved variable (%s)'s structured name in saved model." + % var_name + ) structured_para_dict[structured_name] = para_dict[ - var_name] + var_name + ] para_dict = structured_para_dict else: # load state dict by `io.save_params/persistables` save format @@ -282,7 +309,8 @@ def load_dygraph(model_path, **configs): type='load', inputs={}, outputs={'Out': new_var}, - attrs={'file_path': os.path.join(model_path, name)}) + attrs={'file_path': os.path.join(model_path, name)}, + ) load_var_list.append(new_var) # 3. construct state_dict diff --git a/python/paddle/fluid/dygraph/container.py b/python/paddle/fluid/dygraph/container.py index 854df39355748ec45c53f3e377e44dfef862ff2c..db4ad3561024906bd9b260ba02420fa9be5a26ee 100644 --- a/python/paddle/fluid/dygraph/container.py +++ b/python/paddle/fluid/dygraph/container.py @@ -222,8 +222,10 @@ class LayerList(Layer): if isinstance(idx, int): if not (-len(self) <= idx < len(self)): raise IndexError( - 'index {} is out of range, should be an integer in range [{}, {})' - .format(idx, -len(self), len(self))) + 'index {} is out of range, should be an integer in range [{}, {})'.format( + idx, -len(self), len(self) + ) + ) if idx < 0: idx += len(self) return idx @@ -248,7 +250,8 @@ class LayerList(Layer): delattr(self, str(idx)) str_indices = [str(i) for i in range(len(self._sub_layers))] self._sub_layers = OrderedDict( - list(zip(str_indices, self._sub_layers.values()))) + list(zip(str_indices, self._sub_layers.values())) + ) def __len__(self): return len(self._sub_layers) @@ -297,9 +300,11 @@ class LayerList(Layer): linears.insert(-1, another) print(linears[-2] is another) # True """ - assert isinstance(index, int) and \ - -len(self._sub_layers) <= index < len(self._sub_layers), \ - "index should be an integer in range [{}, {})".format(-len(self), len(self)) + assert isinstance(index, int) and -len(self._sub_layers) <= index < len( + self._sub_layers + ), "index should be an integer in range [{}, {})".format( + -len(self), len(self) + ) index = self._get_abs_idx(index) for i in range(len(self._sub_layers), index, -1): diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/assert_transformer.py b/python/paddle/fluid/dygraph/dygraph_to_static/assert_transformer.py index a48258e30d5d565940dcd2907cfd0dbc50dbabc9..ca6f1e652e944e980f788d72b697256c80e7cdd5 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/assert_transformer.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/assert_transformer.py @@ -14,9 +14,13 @@ from paddle.utils import gast -from paddle.fluid.dygraph.dygraph_to_static.static_analysis import AstNodeWrapper +from paddle.fluid.dygraph.dygraph_to_static.static_analysis import ( + AstNodeWrapper, +) from paddle.fluid.dygraph.dygraph_to_static.utils import ast_to_source_code -from paddle.fluid.dygraph.dygraph_to_static.base_transformer import BaseTransformer +from paddle.fluid.dygraph.dygraph_to_static.base_transformer import ( + BaseTransformer, +) class AssertTransformer(BaseTransformer): @@ -35,8 +39,15 @@ class AssertTransformer(BaseTransformer): self.visit(self.root) def visit_Assert(self, node): - convert_assert_node = gast.parse('_jst.Assert({test}, {msg})'.format( - test=ast_to_source_code(node.test), - msg=ast_to_source_code(node.msg) if node.msg else "")).body[0].value + convert_assert_node = ( + gast.parse( + '_jst.Assert({test}, {msg})'.format( + test=ast_to_source_code(node.test), + msg=ast_to_source_code(node.msg) if node.msg else "", + ) + ) + .body[0] + .value + ) return gast.Expr(value=convert_assert_node) diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/ast_transformer.py b/python/paddle/fluid/dygraph/dygraph_to_static/ast_transformer.py index ff7a9a2a957b634054d2d43c0f66a7bc5cbaf62c..3ae91027f5aeedcb90b3db86e96f1f5970bfd507 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/ast_transformer.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/ast_transformer.py @@ -19,24 +19,60 @@ import os from paddle.utils import gast -from paddle.fluid.dygraph.dygraph_to_static.base_transformer import BaseTransformer -from paddle.fluid.dygraph.dygraph_to_static.early_return_transformer import EarlyReturnTransformer -from paddle.fluid.dygraph.dygraph_to_static.assert_transformer import AssertTransformer -from paddle.fluid.dygraph.dygraph_to_static.basic_api_transformer import BasicApiTransformer -from paddle.fluid.dygraph.dygraph_to_static.break_continue_transformer import BreakContinueTransformer -from paddle.fluid.dygraph.dygraph_to_static.break_continue_transformer import BreakTransformOptimizer -from paddle.fluid.dygraph.dygraph_to_static.call_transformer import CallTransformer -from paddle.fluid.dygraph.dygraph_to_static.cast_transformer import CastTransformer -from paddle.fluid.dygraph.dygraph_to_static.typehint_transformer import TypeHintTransformer -from paddle.fluid.dygraph.dygraph_to_static.ifelse_transformer import IfElseTransformer -from paddle.fluid.dygraph.dygraph_to_static.logical_transformer import LogicalTransformer -from paddle.fluid.dygraph.dygraph_to_static.loop_transformer import LoopTransformer -from paddle.fluid.dygraph.dygraph_to_static.print_transformer import PrintTransformer -from paddle.fluid.dygraph.dygraph_to_static.return_transformer import ReturnTransformer -from paddle.fluid.dygraph.dygraph_to_static.create_variable_transformer import CreateVariableTransformer -from paddle.fluid.dygraph.dygraph_to_static.static_analysis import StaticAnalysisVisitor -from paddle.fluid.dygraph.dygraph_to_static.tensor_shape_transformer import TensorShapeTransformer -from paddle.fluid.dygraph.dygraph_to_static.decorator_transformer import DecoratorTransformer +from paddle.fluid.dygraph.dygraph_to_static.base_transformer import ( + BaseTransformer, +) +from paddle.fluid.dygraph.dygraph_to_static.early_return_transformer import ( + EarlyReturnTransformer, +) +from paddle.fluid.dygraph.dygraph_to_static.assert_transformer import ( + AssertTransformer, +) +from paddle.fluid.dygraph.dygraph_to_static.basic_api_transformer import ( + BasicApiTransformer, +) +from paddle.fluid.dygraph.dygraph_to_static.break_continue_transformer import ( + BreakContinueTransformer, +) +from paddle.fluid.dygraph.dygraph_to_static.break_continue_transformer import ( + BreakTransformOptimizer, +) +from paddle.fluid.dygraph.dygraph_to_static.call_transformer import ( + CallTransformer, +) +from paddle.fluid.dygraph.dygraph_to_static.cast_transformer import ( + CastTransformer, +) +from paddle.fluid.dygraph.dygraph_to_static.typehint_transformer import ( + TypeHintTransformer, +) +from paddle.fluid.dygraph.dygraph_to_static.ifelse_transformer import ( + IfElseTransformer, +) +from paddle.fluid.dygraph.dygraph_to_static.logical_transformer import ( + LogicalTransformer, +) +from paddle.fluid.dygraph.dygraph_to_static.loop_transformer import ( + LoopTransformer, +) +from paddle.fluid.dygraph.dygraph_to_static.print_transformer import ( + PrintTransformer, +) +from paddle.fluid.dygraph.dygraph_to_static.return_transformer import ( + ReturnTransformer, +) +from paddle.fluid.dygraph.dygraph_to_static.create_variable_transformer import ( + CreateVariableTransformer, +) +from paddle.fluid.dygraph.dygraph_to_static.static_analysis import ( + StaticAnalysisVisitor, +) +from paddle.fluid.dygraph.dygraph_to_static.tensor_shape_transformer import ( + TensorShapeTransformer, +) +from paddle.fluid.dygraph.dygraph_to_static.decorator_transformer import ( + DecoratorTransformer, +) from paddle.fluid.dygraph.dygraph_to_static import logging_utils from paddle.fluid.dygraph.dygraph_to_static.utils import ast_to_source_code @@ -51,8 +87,11 @@ def apply_optimization(transformers): And not all optimized transformations are applied by default. It's controlled by 'export FLAGS_optim_transformation=1' """ - flag = str( - os.environ.get('FLAGS_optim_transformation')) in ['1', 'True', 'true'] + flag = str(os.environ.get('FLAGS_optim_transformation')) in [ + '1', + 'True', + 'true', + ] if flag: transformers.insert(3, BreakTransformOptimizer) @@ -69,7 +108,8 @@ class DygraphToStaticAst(BaseTransformer): # save root for some analysis may need global AST self.root = root self.static_analysis_visitor = StaticAnalysisVisitor(root) - self.static_analysis_root = self.static_analysis_visitor.get_node_wrapper_root( + self.static_analysis_root = ( + self.static_analysis_visitor.get_node_wrapper_root() ) self.decorate_func_name = None self.transfer_from_node_type(self.static_analysis_root) @@ -77,12 +117,14 @@ class DygraphToStaticAst(BaseTransformer): def _apply(self, transformer, node_wrapper, log_level): transformer(node_wrapper).transform() - self.translator_logger.log_transformed_code(log_level, self.root, - transformer.__name__) + self.translator_logger.log_transformed_code( + log_level, self.root, transformer.__name__ + ) def transfer_from_node_type(self, node_wrapper): self.translator_logger.log( - 1, "Source code: \n{}".format(ast_to_source_code(self.root))) + 1, "Source code: \n{}".format(ast_to_source_code(self.root)) + ) # Generic transformation self.visit(node_wrapper.node) @@ -110,7 +152,8 @@ class DygraphToStaticAst(BaseTransformer): self._apply(transformer, node_wrapper, log_level=index + 1) self.translator_logger.log_transformed_code( - logging_utils.LOG_AllTransformer, self.root, "All Transformers") + logging_utils.LOG_AllTransformer, self.root, "All Transformers" + ) def visit_FunctionDef(self, node): if self.decorate_func_name is None: diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/base_transformer.py b/python/paddle/fluid/dygraph/dygraph_to_static/base_transformer.py index b77c5b3c70174e976245cae860a08378c52ffc47..cb8f1562b3efc6f9c943fad28f824591e8f41404 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/base_transformer.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/base_transformer.py @@ -20,16 +20,23 @@ from paddle.fluid.dygraph.dygraph_to_static.utils import create_assign_node from paddle.fluid.dygraph.dygraph_to_static.utils import ORIGI_INFO from paddle.fluid.dygraph.dygraph_to_static.utils import FOR_ITER_INDEX_PREFIX from paddle.fluid.dygraph.dygraph_to_static.utils import FOR_ITER_TUPLE_PREFIX -from paddle.fluid.dygraph.dygraph_to_static.utils import FOR_ITER_TUPLE_INDEX_PREFIX +from paddle.fluid.dygraph.dygraph_to_static.utils import ( + FOR_ITER_TUPLE_INDEX_PREFIX, +) from paddle.fluid.dygraph.dygraph_to_static.utils import FOR_ITER_VAR_LEN_PREFIX -from paddle.fluid.dygraph.dygraph_to_static.utils import FOR_ITER_VAR_NAME_PREFIX -from paddle.fluid.dygraph.dygraph_to_static.utils import FOR_ITER_ZIP_TO_LIST_PREFIX +from paddle.fluid.dygraph.dygraph_to_static.utils import ( + FOR_ITER_VAR_NAME_PREFIX, +) +from paddle.fluid.dygraph.dygraph_to_static.utils import ( + FOR_ITER_ZIP_TO_LIST_PREFIX, +) from paddle.fluid.dygraph.dygraph_to_static.utils import FOR_ITER_TARGET_PREFIX -from paddle.fluid.dygraph.dygraph_to_static.utils import FOR_ITER_ITERATOR_PREFIX +from paddle.fluid.dygraph.dygraph_to_static.utils import ( + FOR_ITER_ITERATOR_PREFIX, +) class BaseTransformer(gast.NodeTransformer): - def visit(self, node): if not isinstance(node, gast.AST): msg = ('Expected "gast.AST", but got "{}".').format(type(node)) @@ -41,7 +48,7 @@ class BaseTransformer(gast.NodeTransformer): iter_result = result if iter_result is not node and iter_result is not None: if not isinstance(iter_result, (list, tuple)): - iter_result = (iter_result, ) + iter_result = (iter_result,) if origin_info is not None: for n in iter_result: setattr(n, ORIGI_INFO, origin_info) @@ -50,10 +57,10 @@ class BaseTransformer(gast.NodeTransformer): class RenameTransformer(BaseTransformer): - def __init__(self, node): assert isinstance( - node, gast.AST), "RenameTransformer only accepts gast.AST as input" + node, gast.AST + ), "RenameTransformer only accepts gast.AST as input" self.root = node self.old_name = "" self.new_name = "" @@ -113,7 +120,8 @@ class NameNodeReplaceTransformer(BaseTransformer): names = node.names def replace(s): - if s == self.target_name: return self.replace_node.id + if s == self.target_name: + return self.replace_node.id return s node.names = list(map(replace, names)) @@ -121,7 +129,7 @@ class NameNodeReplaceTransformer(BaseTransformer): class ForLoopTuplePreTransformer(BaseTransformer): - """ pre-process of for loop. + """pre-process of for loop. >>> for A in B: >>> C @@ -152,24 +160,28 @@ class ForLoopTuplePreTransformer(BaseTransformer): assign_iterator_node = gast.parse( f"{tuple_iterator} = _jst.Indexable({ast_to_source_code(node.iter).strip()})" ).body[0] - node.target = gast.Name(id=tuple_target, - ctx=gast.Store(), - annotation=None, - type_comment=None) - node.iter = gast.Name(id=tuple_iterator, - ctx=gast.Load(), - annotation=None, - type_comment=None) + node.target = gast.Name( + id=tuple_target, + ctx=gast.Store(), + annotation=None, + type_comment=None, + ) + node.iter = gast.Name( + id=tuple_iterator, + ctx=gast.Load(), + annotation=None, + type_comment=None, + ) node.body[0:0] = self.tuple_to_stmts(origin_tuple_node, tuple_target) # return a list will insert a list of node replace the original for node. return [assign_iterator_node, node] def tuple_node_to_unpack_structure(self, node): - """ Create a sequence to represents the structure of nest. - For example: `a, (b,c), [d,e,f]` is represented by - `[1, [1,1], [1,1,1]]`. the `1` is just a notation. + """Create a sequence to represents the structure of nest. + For example: `a, (b,c), [d,e,f]` is represented by + `[1, [1,1], [1,1,1]]`. the `1` is just a notation. - Specially, `a` is represented by `1`. + Specially, `a` is represented by `1`. """ ret = [] if not isinstance(node, (gast.Tuple, gast.List)): @@ -181,7 +193,9 @@ class ForLoopTuplePreTransformer(BaseTransformer): def tuple_to_stmts(self, node, tuple_name): structure_str = str(self.tuple_node_to_unpack_structure(node)) node_str = ast_to_source_code(node).strip() - assign_node_str = f"{node_str} = _jst.Unpack({tuple_name}, {structure_str})" + assign_node_str = ( + f"{node_str} = _jst.Unpack({tuple_name}, {structure_str})" + ) assign_node = gast.parse(assign_node_str).body[0] return [assign_node] @@ -213,8 +227,9 @@ class ForNodeVisitor(object): # 2. gast.For node main parts self.target = for_node.target # NOTE: type may be Node or list[Node] - self.iter_args = for_node.iter if self.is_for_iter( - ) else for_node.iter.args + self.iter_args = ( + for_node.iter if self.is_for_iter() else for_node.iter.args + ) self.body = for_node.body # 3. key shared node or names @@ -236,7 +251,8 @@ class ForNodeVisitor(object): self.iter_var_len_name = unique_name.generate(FOR_ITER_VAR_LEN_PREFIX) # - created zip to list var : __for_loop_iter_zip_0 self.iter_zip_to_list_name = unique_name.generate( - FOR_ITER_ZIP_TO_LIST_PREFIX) + FOR_ITER_ZIP_TO_LIST_PREFIX + ) # - var.numpy()/var # - for x in var|var.numpy() @@ -262,17 +278,22 @@ class ForNodeVisitor(object): return None def is_for_range_iter(self): - return isinstance(self.node.iter, gast.Call) and isinstance( - self.node.iter.func, - gast.Name) and self.node.iter.func.id == "range" + return ( + isinstance(self.node.iter, gast.Call) + and isinstance(self.node.iter.func, gast.Name) + and self.node.iter.func.id == "range" + ) def is_for_iter(self): - if isinstance(self.node.iter, - (gast.Name, gast.Attribute, gast.List, gast.Tuple)): + if isinstance( + self.node.iter, (gast.Name, gast.Attribute, gast.List, gast.Tuple) + ): return True - elif isinstance(self.node.iter, gast.Call) and isinstance( - self.node.iter.func, - gast.Attribute) and self.node.iter.func.attr == 'numpy': + elif ( + isinstance(self.node.iter, gast.Call) + and isinstance(self.node.iter.func, gast.Attribute) + and self.node.iter.func.attr == 'numpy' + ): return True elif isinstance(self.node.iter, gast.Subscript): return True @@ -280,17 +301,23 @@ class ForNodeVisitor(object): return False def is_for_enumerate_iter(self): - return isinstance(self.node.iter, gast.Call) and isinstance( - self.node.iter.func, - gast.Name) and self.node.iter.func.id == "enumerate" + return ( + isinstance(self.node.iter, gast.Call) + and isinstance(self.node.iter.func, gast.Name) + and self.node.iter.func.id == "enumerate" + ) def _args_check(self): if self.is_for_range_iter(): self.args_length = len(self.iter_args) - assert self.args_length >= 1 and self.args_length <= 3, "range() function takes 1 to 3 arguments" + assert ( + self.args_length >= 1 and self.args_length <= 3 + ), "range() function takes 1 to 3 arguments" elif self.is_for_enumerate_iter(): self.args_length = len(self.iter_args) - assert self.args_length >= 1 and self.args_length <= 2, "enumerate() function takes 1 to 2 arguments" + assert ( + self.args_length >= 1 and self.args_length <= 2 + ), "enumerate() function takes 1 to 2 arguments" else: self.args_length = None @@ -324,8 +351,9 @@ class ForNodeVisitor(object): target_node, assign_node = self._build_assign_var_slice_node() body_stmts[0:0] = [assign_node] for body_node in body_stmts: - NameNodeReplaceTransformer(body_node, self.iter_var_name, - target_node) + NameNodeReplaceTransformer( + body_node, self.iter_var_name, target_node + ) body_stmts.append(self._build_index_increase_node(step_node)) return init_stmts, cond_stmt, body_stmts @@ -346,8 +374,9 @@ class ForNodeVisitor(object): target_node, assign_node = self._build_assign_var_slice_node() body_stmts[0:0] = [assign_node] for body_node in body_stmts: - NameNodeReplaceTransformer(body_node, self.iter_var_name, - target_node) + NameNodeReplaceTransformer( + body_node, self.iter_var_name, target_node + ) body_stmts.append(self._build_index_increase_node(step_node)) body_stmts.append(self._build_enum_increase_node()) @@ -360,7 +389,8 @@ class ForNodeVisitor(object): index_init_value_str = '0' else: index_init_value_str = ast_to_source_code( - self.iter_args[0]).strip() + self.iter_args[0] + ).strip() index_init_var_name = self.iter_var_name else: @@ -368,7 +398,8 @@ class ForNodeVisitor(object): index_init_var_name = self.iter_idx_name index_init_node_source_str = "{target} = {value}".format( - target=index_init_var_name, value=index_init_value_str) + target=index_init_var_name, value=index_init_value_str + ) index_init_node = gast.parse(index_init_node_source_str).body[0] @@ -376,16 +407,20 @@ class ForNodeVisitor(object): def _build_var_len_assign_node(self): # get the length of iterable variable - if isinstance(self.iter_node, gast.Call) and isinstance( - self.iter_node.func, - gast.Attribute) and self.iter_node.func.attr == 'numpy': + if ( + isinstance(self.iter_node, gast.Call) + and isinstance(self.iter_node.func, gast.Attribute) + and self.iter_node.func.attr == 'numpy' + ): iter_var_name = ast_to_source_code( - self.iter_node.func.value).strip() + self.iter_node.func.value + ).strip() else: iter_var_name = ast_to_source_code(self.iter_node).strip() convert_len_node_source_str = '{} = _jst.Len({})'.format( - self.iter_var_len_name, iter_var_name) + self.iter_var_len_name, iter_var_name + ) convert_len_node = gast.parse(convert_len_node_source_str).body[0] @@ -403,18 +438,22 @@ class ForNodeVisitor(object): """ new_nodes = [] if isinstance(self.iter_node, gast.Call) and isinstance( - self.iter_node.func, gast.Name): + self.iter_node.func, gast.Name + ): if self.iter_node.func.id == 'zip': iter_var_name = ast_to_source_code(self.iter_node).strip() zip_to_list_str = "{target} = list({value})".format( - target=self.iter_zip_to_list_name, value=iter_var_name) + target=self.iter_zip_to_list_name, value=iter_var_name + ) zip_to_list_node = gast.parse(zip_to_list_str).body[0] new_nodes.append(zip_to_list_node) - self.iter_node = gast.Name(id=self.iter_zip_to_list_name, - ctx=gast.Load(), - annotation=None, - type_comment=None) + self.iter_node = gast.Name( + id=self.iter_zip_to_list_name, + ctx=gast.Load(), + annotation=None, + type_comment=None, + ) return new_nodes @@ -424,27 +463,35 @@ class ForNodeVisitor(object): else: init_value_str = '0' - enum_init_node_source_str = "{} = {}".format(self.enum_idx_name, - init_value_str) + enum_init_node_source_str = "{} = {}".format( + self.enum_idx_name, init_value_str + ) enum_init_node = gast.parse(enum_init_node_source_str).body[0] return enum_init_node def _build_compare_node(self): if self.is_for_range_iter(): - compare_node = self.iter_args[ - 0] if self.args_length == 1 else self.iter_args[1] + compare_node = ( + self.iter_args[0] + if self.args_length == 1 + else self.iter_args[1] + ) else: - compare_node = gast.Name(id=self.iter_var_len_name, - ctx=gast.Load(), - annotation=None, - type_comment=None) + compare_node = gast.Name( + id=self.iter_var_len_name, + ctx=gast.Load(), + annotation=None, + type_comment=None, + ) return compare_node def _build_step_node(self): if self.is_for_range_iter(): - step_node = self.iter_args[ - 2] if self.args_length == 3 else gast.Constant(value=1, - kind=None) + step_node = ( + self.iter_args[2] + if self.args_length == 3 + else gast.Constant(value=1, kind=None) + ) else: step_node = gast.Constant(value=1, kind=None) return step_node @@ -453,62 +500,82 @@ class ForNodeVisitor(object): if not isinstance(step_node, (gast.Constant, gast.UnaryOp)): raise NotImplementedError( "Dynamic-to-Static only supports the step value is a constant or negative constant in 'for-range' statements, " - "such as '2', '-3'. But received: '{}'. Please fix code to be compatible with Dynamic-to-Static." - .format(ast_to_source_code(step_node).strip())) + "such as '2', '-3'. But received: '{}'. Please fix code to be compatible with Dynamic-to-Static.".format( + ast_to_source_code(step_node).strip() + ) + ) if isinstance(step_node, gast.UnaryOp) or step_node.value < 0: # eg: # range(max, min, -2) # -> # i > min - return gast.Compare(left=gast.Name( - id=self.iter_var_name - if self.is_for_range_iter() else self.iter_idx_name, - ctx=gast.Load(), - annotation=None, - type_comment=None), - ops=[gast.Gt()], - comparators=[compare_node]) + return gast.Compare( + left=gast.Name( + id=self.iter_var_name + if self.is_for_range_iter() + else self.iter_idx_name, + ctx=gast.Load(), + annotation=None, + type_comment=None, + ), + ops=[gast.Gt()], + comparators=[compare_node], + ) else: # eg: # range(min, max, 2) # -> # i < max - return gast.Compare(left=gast.Name( - id=self.iter_var_name - if self.is_for_range_iter() else self.iter_idx_name, - ctx=gast.Load(), - annotation=None, - type_comment=None), - ops=[gast.Lt()], - comparators=[compare_node]) + return gast.Compare( + left=gast.Name( + id=self.iter_var_name + if self.is_for_range_iter() + else self.iter_idx_name, + ctx=gast.Load(), + annotation=None, + type_comment=None, + ), + ops=[gast.Lt()], + comparators=[compare_node], + ) def _build_index_increase_node(self, step_node): - return gast.AugAssign(target=gast.Name( - id=self.iter_var_name - if self.is_for_range_iter() else self.iter_idx_name, - ctx=gast.Store(), - annotation=None, - type_comment=None), - op=gast.Add(), - value=step_node) + return gast.AugAssign( + target=gast.Name( + id=self.iter_var_name + if self.is_for_range_iter() + else self.iter_idx_name, + ctx=gast.Store(), + annotation=None, + type_comment=None, + ), + op=gast.Add(), + value=step_node, + ) def _build_assign_var_slice_node(self): var_slice_str = "{}[{}]".format( - ast_to_source_code(self.iter_node).strip(), self.iter_idx_name) + ast_to_source_code(self.iter_node).strip(), self.iter_idx_name + ) var_slice_node = gast.parse(var_slice_str).body[0].value new_iter_var_name = unique_name.generate(FOR_ITER_VAR_NAME_PREFIX) - target_node, assign_node = create_assign_node(new_iter_var_name, - var_slice_node) + target_node, assign_node = create_assign_node( + new_iter_var_name, var_slice_node + ) return target_node, assign_node def _build_enum_increase_node(self): - return gast.AugAssign(target=gast.Name(id=self.enum_idx_name, - ctx=gast.Store(), - annotation=None, - type_comment=None), - op=gast.Add(), - value=gast.Constant(value=1, kind=None)) + return gast.AugAssign( + target=gast.Name( + id=self.enum_idx_name, + ctx=gast.Store(), + annotation=None, + type_comment=None, + ), + op=gast.Add(), + value=gast.Constant(value=1, kind=None), + ) def _get_iter_var_name(self): if self.is_for_range_iter(): diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/basic_api_transformer.py b/python/paddle/fluid/dygraph/dygraph_to_static/basic_api_transformer.py index 55afb7ae6d6de6bb4eecce60fdfa30792ae5e835..d6c32a1fc2bd12ebdbd18d79ac8de2d0d132872a 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/basic_api_transformer.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/basic_api_transformer.py @@ -15,9 +15,13 @@ import astor from paddle.utils import gast -from paddle.fluid.dygraph.dygraph_to_static.static_analysis import AstNodeWrapper +from paddle.fluid.dygraph.dygraph_to_static.static_analysis import ( + AstNodeWrapper, +) from paddle.fluid.dygraph.dygraph_to_static import utils -from paddle.fluid.dygraph.dygraph_to_static.base_transformer import BaseTransformer +from paddle.fluid.dygraph.dygraph_to_static.base_transformer import ( + BaseTransformer, +) from paddle.fluid.dygraph.dygraph_to_static.utils import ast_to_source_code @@ -139,9 +143,11 @@ class AttributeJstTransformer(BaseTransformer): assert isinstance( node, gast.AST ), "Input non-gast.AST node for the initialization of ToTensorTransformer." - self.interested_name = set([ - 'size', - ]) + self.interested_name = set( + [ + 'size', + ] + ) self.root = node def transform(self): @@ -151,12 +157,21 @@ class AttributeJstTransformer(BaseTransformer): def visit_Attribute(self, node): assert isinstance(node, gast.Attribute) assert isinstance(node.attr, str) - if isinstance(node.ctx, - gast.Load) and node.attr in self.interested_name: + if ( + isinstance(node.ctx, gast.Load) + and node.attr in self.interested_name + ): attr = node.attr value = node.value - node = gast.parse("_jst.Attr({}, \"{}\")".format( - ast_to_source_code(value).strip(), attr)).body[0].value + node = ( + gast.parse( + "_jst.Attr({}, \"{}\")".format( + ast_to_source_code(value).strip(), attr + ) + ) + .body[0] + .value + ) self.generic_visit(node) return node diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/break_continue_transformer.py b/python/paddle/fluid/dygraph/dygraph_to_static/break_continue_transformer.py index 8bfc7187ecab06d5830f383328dbcc05b0d1e2bf..6726e5f14f45c322c96481e8f7838237857014d0 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/break_continue_transformer.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/break_continue_transformer.py @@ -17,9 +17,15 @@ from paddle.utils import gast from paddle.fluid import unique_name from paddle.fluid.dygraph.dygraph_to_static.utils import index_in_list from paddle.fluid.dygraph.dygraph_to_static.utils import BaseNodeVisitor -from paddle.fluid.dygraph.dygraph_to_static.variable_trans_func import create_bool_node -from paddle.fluid.dygraph.dygraph_to_static.base_transformer import BaseTransformer -from paddle.fluid.dygraph.dygraph_to_static.base_transformer import ForNodeVisitor +from paddle.fluid.dygraph.dygraph_to_static.variable_trans_func import ( + create_bool_node, +) +from paddle.fluid.dygraph.dygraph_to_static.base_transformer import ( + BaseTransformer, +) +from paddle.fluid.dygraph.dygraph_to_static.base_transformer import ( + ForNodeVisitor, +) __all__ = ['BreakContinueTransformer'] @@ -35,8 +41,8 @@ class ForToWhileTransformer(BaseTransformer): def __init__(self, parent_node, loop_node, condition_node): assert isinstance( - loop_node, - gast.For), "loop_node is not gast.For in ForToWhileTransformer" + loop_node, gast.For + ), "loop_node is not gast.For in ForToWhileTransformer" self.parent_node = parent_node self.loop_node = loop_node self.condition_node = condition_node @@ -47,7 +53,7 @@ class ForToWhileTransformer(BaseTransformer): i = index_in_list(body_list, self.loop_node) if i != -1: new_stmts = self.get_for_stmt_nodes(body_list[i]) - body_list[i:i + 1] = new_stmts + body_list[i : i + 1] = new_stmts i += len(new_stmts) return new_stmts if hasattr(self.parent_node, 'orelse'): @@ -55,7 +61,7 @@ class ForToWhileTransformer(BaseTransformer): i = index_in_list(body_list, self.loop_node) if i != -1: new_stmts = self.get_for_stmt_nodes(body_list[i]) - body_list[i:i + 1] = new_stmts + body_list[i : i + 1] = new_stmts i += len(new_stmts) return new_stmts raise ValueError( @@ -64,7 +70,8 @@ class ForToWhileTransformer(BaseTransformer): def get_for_stmt_nodes(self, node): assert isinstance( - node, gast.For), "Input node is NOT gast.For in get_for_stmt_nodes" + node, gast.For + ), "Input node is NOT gast.For in get_for_stmt_nodes" # 1. parse current gast.For node current_for_node_parser = ForNodeVisitor(node) @@ -74,13 +81,14 @@ class ForToWhileTransformer(BaseTransformer): init_stmts, cond_stmt, body_stmts = stmts_tuple # 2. append break statement - new_cond_stmt = gast.BoolOp(op=gast.And(), - values=[cond_stmt, self.condition_node]) + new_cond_stmt = gast.BoolOp( + op=gast.And(), values=[cond_stmt, self.condition_node] + ) # 3. construct gast.While node - while_node = gast.While(test=new_cond_stmt, - body=body_stmts, - orelse=node.orelse) + while_node = gast.While( + test=new_cond_stmt, body=body_stmts, orelse=node.orelse + ) init_stmts.append(while_node) return init_stmts @@ -132,7 +140,8 @@ class BreakContinueTransformer(BaseNodeVisitor): # block can be a node containing stmt list. We should remove all stmts # after the 'break/continue' and set the V to True here. first_block_index = self._remove_stmts_after_break_continue( - node, variable_name, loop_node_index) + node, variable_name, loop_node_index + ) # 3. Add 'if not V' for stmts in ancestor blocks between the first one # (exclusive) and the ancestor loop (inclusive) @@ -142,19 +151,25 @@ class BreakContinueTransformer(BaseNodeVisitor): assign_false_node = create_bool_node(variable_name, False) self._add_stmt_before_cur_node(loop_node_index, assign_false_node) - cond_var_node = gast.UnaryOp(op=gast.Not(), - operand=gast.Name(id=variable_name, - ctx=gast.Load(), - annotation=None, - type_comment=None)) + cond_var_node = gast.UnaryOp( + op=gast.Not(), + operand=gast.Name( + id=variable_name, + ctx=gast.Load(), + annotation=None, + type_comment=None, + ), + ) if isinstance(loop_node, gast.While): - loop_node.test = gast.BoolOp(op=gast.And(), - values=[loop_node.test, cond_var_node]) + loop_node.test = gast.BoolOp( + op=gast.And(), values=[loop_node.test, cond_var_node] + ) elif isinstance(loop_node, gast.For): parent_node = self.ancestor_nodes[loop_node_index - 1] - for_to_while = ForToWhileTransformer(parent_node, loop_node, - cond_var_node) + for_to_while = ForToWhileTransformer( + parent_node, loop_node, cond_var_node + ) for_to_while.transform() def visit_Continue(self, node): @@ -169,7 +184,8 @@ class BreakContinueTransformer(BaseNodeVisitor): # block can be a node containing stmt list. We should remove all stmts # after the 'break/continue' and set the V to True here. first_block_index = self._remove_stmts_after_break_continue( - node, variable_name, loop_node_index) + node, variable_name, loop_node_index + ) # 3. Add 'if not V' for stmts in ancestor blocks between the first one # (exclusive) and the ancestor loop (inclusive) @@ -179,44 +195,51 @@ class BreakContinueTransformer(BaseNodeVisitor): assign_false_node = create_bool_node(variable_name, False) loop_node.body.insert(0, assign_false_node) - def _remove_stmts_after_break_continue(self, break_continue_node, - break_continue_name, - loop_node_index): + def _remove_stmts_after_break_continue( + self, break_continue_node, break_continue_name, loop_node_index + ): for first_block_index in range( - len(self.ancestor_nodes) - 1, loop_node_index - 1, -1): + len(self.ancestor_nodes) - 1, loop_node_index - 1, -1 + ): first_block = self.ancestor_nodes[first_block_index] - if hasattr(first_block, - "body") and self._replace_break_continue_in_stmt_list( - first_block.body, break_continue_node, - break_continue_name): + if hasattr( + first_block, "body" + ) and self._replace_break_continue_in_stmt_list( + first_block.body, break_continue_node, break_continue_name + ): return first_block_index - if hasattr(first_block, - "orelse") and self._replace_break_continue_in_stmt_list( - first_block.orelse, break_continue_node, - break_continue_name): + if hasattr( + first_block, "orelse" + ) and self._replace_break_continue_in_stmt_list( + first_block.orelse, break_continue_node, break_continue_name + ): return first_block_index return first_block_index - def _replace_if_stmt(self, loop_node_index, first_block_index, - break_continue_name): + def _replace_if_stmt( + self, loop_node_index, first_block_index, break_continue_name + ): for i in range(first_block_index - 1, loop_node_index - 1, -1): cur_node = self.ancestor_nodes[i] son_node = self.ancestor_nodes[i + 1] - if hasattr(cur_node, - 'body') and self._replace_after_node_to_if_in_stmt_list( - cur_node.body, son_node, break_continue_name): + if hasattr( + cur_node, 'body' + ) and self._replace_after_node_to_if_in_stmt_list( + cur_node.body, son_node, break_continue_name + ): continue if hasattr( - cur_node, - 'orelse') and self._replace_after_node_to_if_in_stmt_list( - cur_node.orelse, son_node, break_continue_name): + cur_node, 'orelse' + ) and self._replace_after_node_to_if_in_stmt_list( + cur_node.orelse, son_node, break_continue_name + ): continue - def _replace_break_continue_in_stmt_list(self, stmt_list, - break_continue_node, - break_continue_name): + def _replace_break_continue_in_stmt_list( + self, stmt_list, break_continue_node, break_continue_name + ): i = index_in_list(stmt_list, break_continue_node) if i == -1: return False @@ -224,8 +247,9 @@ class BreakContinueTransformer(BaseNodeVisitor): stmt_list[i:] = [assign_true_node] return True - def _replace_after_node_to_if_in_stmt_list(self, stmt_list, node, - break_continue_name): + def _replace_after_node_to_if_in_stmt_list( + self, stmt_list, node, break_continue_name + ): i = index_in_list(stmt_list, node) if i == -1: return False @@ -234,28 +258,37 @@ class BreakContinueTransformer(BaseNodeVisitor): # No need to add, we consider this as added successfully return True - if_stmt = gast.If(test=gast.UnaryOp(op=gast.Not(), - operand=gast.Name( - id=break_continue_name, - ctx=gast.Store(), - annotation=None, - type_comment=None)), - body=stmt_list[i + 1:], - orelse=[]) - stmt_list[i + 1:] = [] + if_stmt = gast.If( + test=gast.UnaryOp( + op=gast.Not(), + operand=gast.Name( + id=break_continue_name, + ctx=gast.Store(), + annotation=None, + type_comment=None, + ), + ), + body=stmt_list[i + 1 :], + orelse=[], + ) + stmt_list[i + 1 :] = [] stmt_list.append(if_stmt) return True def _add_stmt_before_cur_node(self, cur_node_index, stmt_node): cur_node = self.ancestor_nodes[cur_node_index] parent_node = self.ancestor_nodes[cur_node_index - 1] - if hasattr(parent_node, - "body") and self._add_stmt_into_list_before_node( - parent_node.body, cur_node, stmt_node): + if hasattr( + parent_node, "body" + ) and self._add_stmt_into_list_before_node( + parent_node.body, cur_node, stmt_node + ): return True - if hasattr(parent_node, - "orelse") and self._add_stmt_into_list_before_node( - parent_node.orelse, cur_node, stmt_node): + if hasattr( + parent_node, "orelse" + ) and self._add_stmt_into_list_before_node( + parent_node.orelse, cur_node, stmt_node + ): return True return False @@ -330,11 +363,13 @@ class BreakTransformOptimizer(BaseNodeVisitor): if isinstance(loop_node, gast.While): loop_node.test = gast.BoolOp( - op=gast.And(), values=[loop_node.test, cond_var_node]) + op=gast.And(), values=[loop_node.test, cond_var_node] + ) elif isinstance(loop_node, gast.For): parent_node = self.ancestor_nodes[loop_node_index - 1] - for_to_while = ForToWhileTransformer(parent_node, loop_node, - cond_var_node) + for_to_while = ForToWhileTransformer( + parent_node, loop_node, cond_var_node + ) for_to_while.transform() def _is_break_cond_pattern(self, break_node, loop_node): @@ -351,8 +386,10 @@ class BreakTransformOptimizer(BaseNodeVisitor): is_matched = False if isinstance(parent_if_node, gast.If): # gast.If only contains `break` - break_first_in_if = parent_if_node.body[0] == break_node and len( - parent_if_node.orelse) == 0 + break_first_in_if = ( + parent_if_node.body[0] == break_node + and len(parent_if_node.orelse) == 0 + ) # gast.If is first node of loop_node if_first_in_loop = loop_node.body[0] == parent_if_node diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/call_transformer.py b/python/paddle/fluid/dygraph/dygraph_to_static/call_transformer.py index d6af6ffced743b7f048a451bf6107eda553082e6..a29fe8f9d6b1ef0aae859339c05cda1a45553a40 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/call_transformer.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/call_transformer.py @@ -14,10 +14,14 @@ from paddle.utils import gast -from paddle.fluid.dygraph.dygraph_to_static.static_analysis import AstNodeWrapper +from paddle.fluid.dygraph.dygraph_to_static.static_analysis import ( + AstNodeWrapper, +) from paddle.fluid.dygraph.dygraph_to_static.utils import ast_to_source_code from paddle.fluid.dygraph.dygraph_to_static.utils import is_paddle_api -from paddle.fluid.dygraph.dygraph_to_static.base_transformer import BaseTransformer +from paddle.fluid.dygraph.dygraph_to_static.base_transformer import ( + BaseTransformer, +) PDB_SET = "pdb.set_trace" @@ -47,7 +51,10 @@ class CallTransformer(BaseTransformer): func_str = ast_to_source_code(node.func).strip() try: - from paddle.fluid.dygraph.dygraph_to_static.convert_call_func import is_builtin + from paddle.fluid.dygraph.dygraph_to_static.convert_call_func import ( + is_builtin, + ) + need_convert_builtin_func_list = { 'len', 'zip', diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/cast_transformer.py b/python/paddle/fluid/dygraph/dygraph_to_static/cast_transformer.py index 06c797033812f8bc87a7bbf4add58f17533b84a5..7a5821e12f5bf2b857ea0695dc5c61d4a5977c04 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/cast_transformer.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/cast_transformer.py @@ -14,9 +14,13 @@ from paddle.utils import gast -from paddle.fluid.dygraph.dygraph_to_static.static_analysis import AstNodeWrapper +from paddle.fluid.dygraph.dygraph_to_static.static_analysis import ( + AstNodeWrapper, +) from paddle.fluid.dygraph.dygraph_to_static.utils import ast_to_source_code -from paddle.fluid.dygraph.dygraph_to_static.base_transformer import BaseTransformer +from paddle.fluid.dygraph.dygraph_to_static.base_transformer import ( + BaseTransformer, +) class CastTransformer(BaseTransformer): diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/convert_call_func.py b/python/paddle/fluid/dygraph/dygraph_to_static/convert_call_func.py index 3f13618283916464d5408b0a33a10b819e49ca7d..5adf810eef4a388e3b667ac416f94d52ea72483d 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/convert_call_func.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/convert_call_func.py @@ -26,12 +26,26 @@ import six import builtins from paddle.fluid.dygraph.container import Sequential -from paddle.fluid.dygraph.dygraph_to_static.convert_operators import convert_len, convert_zip -from paddle.fluid.dygraph.dygraph_to_static.convert_operators import convert_range, convert_enumerate -from paddle.fluid.dygraph.dygraph_to_static.logging_utils import TranslatorLogger -from paddle.fluid.dygraph.dygraph_to_static.program_translator import StaticFunction -from paddle.fluid.dygraph.dygraph_to_static.program_translator import convert_to_static -from paddle.fluid.dygraph.dygraph_to_static.program_translator import unwrap_decorators +from paddle.fluid.dygraph.dygraph_to_static.convert_operators import ( + convert_len, + convert_zip, +) +from paddle.fluid.dygraph.dygraph_to_static.convert_operators import ( + convert_range, + convert_enumerate, +) +from paddle.fluid.dygraph.dygraph_to_static.logging_utils import ( + TranslatorLogger, +) +from paddle.fluid.dygraph.dygraph_to_static.program_translator import ( + StaticFunction, +) +from paddle.fluid.dygraph.dygraph_to_static.program_translator import ( + convert_to_static, +) +from paddle.fluid.dygraph.dygraph_to_static.program_translator import ( + unwrap_decorators, +) from paddle.fluid.dygraph.dygraph_to_static.utils import is_paddle_func, unwrap from paddle.fluid.dygraph.layers import Layer @@ -39,7 +53,14 @@ __all__ = ["convert_call"] # TODO(liym27): A better way to do this. BUILTIN_LIKELY_MODULES = [ - collections, pdb, copy, inspect, re, six, numpy, logging + collections, + pdb, + copy, + inspect, + re, + six, + numpy, + logging, ] # The api(s) should be considered as plain function and convert # them into static layer code. @@ -65,8 +86,8 @@ class ConversionOptions(object): def is_builtin(func, name=None): - """ predict whether a function is a builtin function with name={name}. - if name == None, then any builtin function will return True + """predict whether a function is a builtin function with name={name}. + if name == None, then any builtin function will return True """ def name_judge(): @@ -93,8 +114,10 @@ def is_unsupported(func): if func_in_dict: translator_logger.log( 2, - "Whitelist: {} is part of built-in module and does not have to be transformed." - .format(func)) + "Whitelist: {} is part of built-in module and does not have to be transformed.".format( + func + ), + ) return True # NOTE: should be placed before `is_paddle_func` @@ -104,8 +127,10 @@ def is_unsupported(func): if is_paddle_func(func): translator_logger.log( 2, - "Whitelist: {} is part of Paddle module and does not have to be transformed." - .format(func)) + "Whitelist: {} is part of Paddle module and does not have to be transformed.".format( + func + ), + ) return True @@ -145,8 +170,9 @@ def convert_call(func): # [1. 1. 1.]] """ - translator_logger.log(1, - "Convert callable object: convert {}.".format(func)) + translator_logger.log( + 1, "Convert callable object: convert {}.".format(func) + ) func_self = None converted_call = None @@ -158,8 +184,10 @@ def convert_call(func): if options is not None and options.not_convert: translator_logger.log( 2, - "{} is not converted when it is decorated by 'paddle.jit.not_to_static'." - .format(func)) + "{} is not converted when it is decorated by 'paddle.jit.not_to_static'.".format( + func + ), + ) return func if is_builtin(func, "len"): @@ -183,9 +211,15 @@ def convert_call(func): # occasion. number_of_stars = 30 translator_logger.warn( - "\n\n" + "*" * number_of_stars + - "\nYour function:`{}` doesn't support to transform to static function because it is a generator function, it will be run as-is." - .format(func.__name__) + "\n" + "*" * number_of_stars + "\n\n") + "\n\n" + + "*" * number_of_stars + + "\nYour function:`{}` doesn't support to transform to static function because it is a generator function, it will be run as-is.".format( + func.__name__ + ) + + "\n" + + "*" * number_of_stars + + "\n\n" + ) return func if inspect.isfunction(func): @@ -214,10 +248,12 @@ def convert_call(func): _, fn = unwrap_decorators(fn) global_functions.add(fn) elif inspect.isclass(fn): - if isinstance(fn.__dict__.get(func.__name__, None), - staticmethod): + if isinstance( + fn.__dict__.get(func.__name__, None), staticmethod + ): global_functions.add( - func) # Add func to ensure that we will convert + func + ) # Add func to ensure that we will convert if func in global_functions: converted_call = convert_to_static(func) @@ -227,8 +263,10 @@ def convert_call(func): # If func is not in __globals__, it does not need to be transformed # because it has been transformed before. translator_logger.warn( - "{} doesn't have to be transformed to static function because it has been transformed before, it will be run as-is." - .format(func)) + "{} doesn't have to be transformed to static function because it has been transformed before, it will be run as-is.".format( + func + ) + ) converted_call = func except AttributeError: # NOTE: @@ -274,12 +312,15 @@ def convert_call(func): func_self = None if func_self else func_self else: raise NotImplementedError( - "Callable {} can not be transformed at present.".format(func)) + "Callable {} can not be transformed at present.".format(func) + ) if converted_call is None: translator_logger.warn( - "{} doesn't have to be transformed to static function, and it will be run as-is." - .format(func)) + "{} doesn't have to be transformed to static function, and it will be run as-is.".format( + func + ) + ) return func if func_self: diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/convert_operators.py b/python/paddle/fluid/dygraph/dygraph_to_static/convert_operators.py index 2e8aa40541ca5091ef5f243934185c135af343aa..0fa48c4260c46d1555aa8b6dc6532745ca1964ed 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/convert_operators.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/convert_operators.py @@ -15,16 +15,46 @@ import re import paddle from paddle.fluid.data_feeder import convert_dtype -from paddle.fluid.dygraph.dygraph_to_static.variable_trans_func import to_static_variable +from paddle.fluid.dygraph.dygraph_to_static.variable_trans_func import ( + to_static_variable, +) from paddle.fluid.framework import core, Variable from paddle.fluid.layers import Assert, Print from paddle.fluid.layers import range as paddle_range -from paddle.fluid.layers import array_length, array_read, array_write, create_array -from paddle.fluid.layers import assign, fill_constant, slice, reduce_all, reduce_any -from paddle.fluid.layers import cast, control_flow, logical_and, logical_not, logical_or, nn -from paddle.fluid.layers.control_flow import cond, while_loop, less_than, increment -from paddle.fluid.dygraph.dygraph_to_static.return_transformer import RETURN_NO_VALUE_VAR_NAME -from paddle.fluid.dygraph.dygraph_to_static.utils import UndefinedVar, Dygraph2StaticException +from paddle.fluid.layers import ( + array_length, + array_read, + array_write, + create_array, +) +from paddle.fluid.layers import ( + assign, + fill_constant, + slice, + reduce_all, + reduce_any, +) +from paddle.fluid.layers import ( + cast, + control_flow, + logical_and, + logical_not, + logical_or, + nn, +) +from paddle.fluid.layers.control_flow import ( + cond, + while_loop, + less_than, + increment, +) +from paddle.fluid.dygraph.dygraph_to_static.return_transformer import ( + RETURN_NO_VALUE_VAR_NAME, +) +from paddle.fluid.dygraph.dygraph_to_static.utils import ( + UndefinedVar, + Dygraph2StaticException, +) from paddle.fluid.dygraph.dygraph_to_static.utils import GetterSetterHelper from paddle.fluid.layers.utils import copy_mutable_vars @@ -37,8 +67,10 @@ def convert_attr(x, attr): def indexable(x, code=None): - if isinstance(x, Variable): return x - if hasattr(x, '__len__') and hasattr(x, '__getitem__'): return x + if isinstance(x, Variable): + return x + if hasattr(x, '__len__') and hasattr(x, '__getitem__'): + return x if hasattr(x, '__iter__'): return [i for i in x] else: @@ -46,8 +78,7 @@ def indexable(x, code=None): def unpack_by_structure(target, structure): - """ unified unpack interface for paddle and python. - """ + """unified unpack interface for paddle and python.""" if isinstance(target, Variable): return _unpack_by_structure_paddle(target, structure) else: @@ -55,8 +86,7 @@ def unpack_by_structure(target, structure): def _unpack_by_structure_python(target, structure): - """ TODO(xiongkun): analysis the differences between python and paddle unpack. - """ + """TODO(xiongkun): analysis the differences between python and paddle unpack.""" return _unpack_by_structure_paddle(target, structure) @@ -75,12 +105,9 @@ def _unpack_by_structure_paddle(target, structure): return ret -def convert_while_loop(cond, - body, - getter, - setter, - return_name_ids=None, - push_pop_names=None): +def convert_while_loop( + cond, body, getter, setter, return_name_ids=None, push_pop_names=None +): """ A function representation of a Python ``while`` statement. @@ -100,8 +127,9 @@ def convert_while_loop(cond, # If loop_vars is changed during cond callable, then it causes bug, but current logical_and/logical_not/... doesn't change the loop_vars. pred = cond() if isinstance(pred, Variable): - _run_paddle_while(cond, body, getter, setter, return_name_ids, - push_pop_names) + _run_paddle_while( + cond, body, getter, setter, return_name_ids, push_pop_names + ) else: _run_py_while(cond, body, getter, setter) @@ -117,19 +145,21 @@ def _convert_tensor_arrray_if_necessary(setterhelper, push_pop_names): else: return v - setterhelper.set(push_pop_names, - [maybe_to_tensor_array(v) for v in push_pop_vars]) + setterhelper.set( + push_pop_names, [maybe_to_tensor_array(v) for v in push_pop_vars] + ) -def _run_paddle_while(cond, body, getter, setter, return_name_ids, - push_pop_names): +def _run_paddle_while( + cond, body, getter, setter, return_name_ids, push_pop_names +): # NOTE: loop_vars of Paddle op `control_flow.while_loop` must be Paddle Tensors. helper = GetterSetterHelper(getter, setter, return_name_ids, push_pop_names) _convert_tensor_arrray_if_necessary(helper, push_pop_names) def new_body_fn(*args): - """ wrap the body() and add return value for `while_loop` - the args may be differ from getter(). + """wrap the body() and add return value for `while_loop` + the args may be differ from getter(). """ mutable_loop_vars = args helper.set(return_name_ids, mutable_loop_vars) @@ -137,8 +167,8 @@ def _run_paddle_while(cond, body, getter, setter, return_name_ids, return helper.get(return_name_ids) def new_cond_fn(*args): - """ cond is a zero-args function, which is not - compatible with `while_loop`. + """cond is a zero-args function, which is not + compatible with `while_loop`. """ return cond() @@ -147,8 +177,9 @@ def _run_paddle_while(cond, body, getter, setter, return_name_ids, to_static_variable(var) if not isinstance(var, UndefinedVar) else var for var in helper.get(return_name_ids) ] - helper.set(return_name_ids, - loop_vars) # change the non-local var to variable + helper.set( + return_name_ids, loop_vars + ) # change the non-local var to variable # variable maybe modified to inner var. change it into loop_vars = control_flow.while_loop(new_cond_fn, new_body_fn, loop_vars) helper.set(return_name_ids, loop_vars) @@ -160,8 +191,10 @@ def _run_py_while(cond, body, getter, setter): pred = cond() if isinstance(pred, Variable): raise Dygraph2StaticException( - "python while pred change from bool to variable.") - if not pred: break + "python while pred change from bool to variable." + ) + if not pred: + break body() @@ -293,13 +326,15 @@ def _run_py_logical_not(x): return not x -def convert_ifelse(pred, - true_fn, - false_fn, - get_args, - set_args, - return_name_ids, - push_pop_names=None): +def convert_ifelse( + pred, + true_fn, + false_fn, + get_args, + set_args, + return_name_ids, + push_pop_names=None, +): """ A function representation of a Python ``if/else`` statement. @@ -317,63 +352,84 @@ def convert_ifelse(pred, """ if isinstance(pred, Variable): - out = _run_paddle_cond(pred, true_fn, false_fn, get_args, set_args, - return_name_ids, push_pop_names) + out = _run_paddle_cond( + pred, + true_fn, + false_fn, + get_args, + set_args, + return_name_ids, + push_pop_names, + ) else: - out = _run_py_ifelse(pred, true_fn, false_fn, get_args, set_args, - return_name_ids) + out = _run_py_ifelse( + pred, true_fn, false_fn, get_args, set_args, return_name_ids + ) return out -def _run_paddle_cond(pred, true_fn, false_fn, get_args, set_args, - return_name_ids, push_pop_names): +def _run_paddle_cond( + pred, true_fn, false_fn, get_args, set_args, return_name_ids, push_pop_names +): """ Paddle cond API will evaluate both ture_fn and false_fn codes. """ - helper = GetterSetterHelper(get_args, set_args, return_name_ids, - push_pop_names) + helper = GetterSetterHelper( + get_args, set_args, return_name_ids, push_pop_names + ) _convert_tensor_arrray_if_necessary(helper, push_pop_names) pred = cast_bool_if_necessary(pred) init_args = helper.get(return_name_ids) def new_true_fn(): - #init args may contain mutable python container like [var, 2], we copy then like in while_loop + # init args may contain mutable python container like [var, 2], we copy then like in while_loop helper.set(return_name_ids, copy_mutable_vars(init_args)) ret = true_fn() # IfExpr will return a non-None return value, so we just return ret. # We assume normal return has no return value. - if ret is None: return helper.get(return_name_ids) - else: return ret + if ret is None: + return helper.get(return_name_ids) + else: + return ret def new_false_fn(): - #init args may contain mutable python container like [var, 2], we copy then like in while_loop + # init args may contain mutable python container like [var, 2], we copy then like in while_loop helper.set(return_name_ids, copy_mutable_vars(init_args)) ret = false_fn() - if ret is None: return helper.get(return_name_ids) - else: return ret + if ret is None: + return helper.get(return_name_ids) + else: + return ret try: - cond_outs = control_flow.cond(pred, new_true_fn, new_false_fn, None, - return_name_ids) + cond_outs = control_flow.cond( + pred, new_true_fn, new_false_fn, None, return_name_ids + ) except Exception as e: - if re.search("Unsupported return type of true_fn and false_fn in cond", - str(e)): + if re.search( + "Unsupported return type of true_fn and false_fn in cond", str(e) + ): raise Dygraph2StaticException( - "Your if/else have different return type. TODO: add link to modifty. {}" - .format(str(e))) + "Your if/else have different return type. TODO: add link to modifty. {}".format( + str(e) + ) + ) if re.search("Incompatible return values of", str(e)): raise Dygraph2StaticException( - "Your if/else have different number of return value. TODO: add link to modifty. {}" - .format(str(e))) + "Your if/else have different number of return value. TODO: add link to modifty. {}".format( + str(e) + ) + ) raise e get_args = lambda: helper.get(return_name_ids) set_args = lambda vs: helper.set(return_name_ids, vs) return _recover_args_state(cond_outs, get_args, set_args, return_name_ids) -def _run_py_ifelse(pred, true_fn, false_fn, get_args, set_args, - return_name_ids): +def _run_py_ifelse( + pred, true_fn, false_fn, get_args, set_args, return_name_ids +): """ Evaluate python original branch function if-else. """ @@ -387,20 +443,22 @@ def _remove_no_value_return_var(out): align_ret = out[0] if isinstance(align_ret, tuple): for index, item in enumerate(align_ret): - if isinstance(item, Variable) and (RETURN_NO_VALUE_VAR_NAME - in item.name): + if isinstance(item, Variable) and ( + RETURN_NO_VALUE_VAR_NAME in item.name + ): # return None if index == 0: - processed_out = (None, ) + out[1:] + processed_out = (None,) + out[1:] elif index == 1: processed_out = align_ret[:1] + out[1:] else: - processed_out = (align_ret[:index], ) + out[1:] + processed_out = (align_ret[:index],) + out[1:] break for index, item in enumerate(processed_out): - if isinstance(item, Variable) and (RETURN_NO_VALUE_VAR_NAME - in item.name): + if isinstance(item, Variable) and ( + RETURN_NO_VALUE_VAR_NAME in item.name + ): processed_out = processed_out[:index] if not processed_out: @@ -415,14 +473,17 @@ def _remove_no_value_return_var(out): def _check_no_undefined_var(outs, names, branch_name): - if names is None: return + if names is None: + return if not isinstance(outs, (list, tuple)): outs = [outs] for var, name in zip(list(outs), names): if isinstance(var, UndefinedVar): raise ValueError( - "Required '{}' must be initialized both in if-else branch, but found it not initialized in '{}'." - .format(name, branch_name)) + "Required '{}' must be initialized both in if-else branch, but found it not initialized in '{}'.".format( + name, branch_name + ) + ) def _recover_args_state(outs, get_args, set_args, return_name_ids): @@ -443,10 +504,11 @@ def _recover_args_state(outs, get_args, set_args, return_name_ids): assert num_outs <= num_args if num_args == 1: - final_outs = (outs, ) if not isinstance(outs, - (list, tuple)) else tuple(outs) + final_outs = ( + (outs,) if not isinstance(outs, (list, tuple)) else tuple(outs) + ) else: - outs = (outs, ) if num_outs == 1 else tuple(outs) + outs = (outs,) if num_outs == 1 else tuple(outs) final_outs = outs + init_args[num_outs:] set_args(final_outs) @@ -464,8 +526,8 @@ def convert_len(var): if isinstance(var, Variable): assert var.ndim > 0, "len() of a 0D tensor is wrong" if var.type in [ - core.VarDesc.VarType.LOD_TENSOR, - core.VarDesc.VarType.SELECTED_ROWS + core.VarDesc.VarType.LOD_TENSOR, + core.VarDesc.VarType.SELECTED_ROWS, ]: # Note: Length of var may be known ahead of time in dygraph, # but it probably represents batch size which can be variant. @@ -478,7 +540,8 @@ def convert_len(var): else: raise TypeError( 'len(var) only supports LoDTensor/LoDTensorArray/SelectedRows, but received %s.' - % type(var)) + % type(var) + ) else: if isinstance(var, VariableTuple): return var.__len__() @@ -490,16 +553,17 @@ def convert_zip(*args): if isinstance(arg, Variable) and arg.shape[0] == -1: raise RuntimeError( "Not support zip(tensor, ...) when tensor.shape[0] == -1, " - "but found args[{}].shape[0] == -1 in 'zip'".format(str(i))) + "but found args[{}].shape[0] == -1 in 'zip'".format(str(i)) + ) return zip(*args) # TODO(xiongkun): delete when list is ready. class VariableTuple: """ - this class will cause enumerate can't be wrapped by other iterator change function. - this will be fixed when list is producted. - VariableTuple can only deal with variables which is fixed. + this class will cause enumerate can't be wrapped by other iterator change function. + this will be fixed when list is producted. + VariableTuple can only deal with variables which is fixed. """ def __init__(self, var, start=0): @@ -527,7 +591,8 @@ def convert_enumerate(*args): def convert_range(*args): has_variable = any(map(lambda x: isinstance(x, Variable), args)) if has_variable: - if len(args) == 1: return paddle_range(0, args[0], 1, paddle.int64) + if len(args) == 1: + return paddle_range(0, args[0], 1, paddle.int64) if len(args) == 2: return paddle_range(args[0], args[1], 1, paddle.int64) if len(args) == 3: @@ -581,8 +646,12 @@ def convert_shape_compare(left, *args): """ args_len = len(args) - assert args_len >= 2, "convert_shape_compare needs at least one right compare variable" - assert args_len % 2 == 0, "Illegal input for convert_shape_compare, *args should be op(str), var, op(str), var ..." + assert ( + args_len >= 2 + ), "convert_shape_compare needs at least one right compare variable" + assert ( + args_len % 2 == 0 + ), "Illegal input for convert_shape_compare, *args should be op(str), var, op(str), var ..." num_cmp = args_len // 2 if isinstance(left, Variable): @@ -590,7 +659,12 @@ def convert_shape_compare(left, *args): element_wise_result = eval("x " + op_str + " y") if op_str == "!=": return reduce_any(element_wise_result) - elif op_str == "is" or op_str == "is not" or op_str == "in" or op_str == "not in": + elif ( + op_str == "is" + or op_str == "is not" + or op_str == "in" + or op_str == "not in" + ): return element_wise_result else: return reduce_all(element_wise_result) @@ -601,8 +675,9 @@ def convert_shape_compare(left, *args): cmp_op = args[i * 2] cmp_right = args[i * 2 + 1] cur_result = reduce_compare(cmp_left, cmp_op, cmp_right) - final_result = convert_logical_and(lambda: final_result, - lambda: cur_result) + final_result = convert_logical_and( + lambda: final_result, lambda: cur_result + ) return final_result else: cmp_left = left @@ -633,13 +708,23 @@ def convert_var_dtype(var, dtype): if isinstance(var, Variable): src_dtype = convert_dtype(var.dtype) assert src_dtype in [ - 'bool', 'float16', 'float32', 'float64', 'int32', 'int64', 'uint8' + 'bool', + 'float16', + 'float32', + 'float64', + 'int32', + 'int64', + 'uint8', ], "The dtype of var {} is {}, which is not supported in the cast op.".format( - var.name, src_dtype) + var.name, src_dtype + ) assert dtype in [ - 'bool', 'int', 'float' + 'bool', + 'int', + 'float', ], "The casted target dtype is {}, which is not supported in type casting.".format( - dtype) + dtype + ) cast_map = { 'bool': 'bool', 'int': 'int32', @@ -733,7 +818,6 @@ def _run_paddle_pop(array, *args): # TODO(liym27): A better way to slice tensor array. # Maybe support start == end for slice op. def _slice_tensor_array(array, start, end): - def true_fn(): null_array = create_array("float32") return null_array diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/create_variable_transformer.py b/python/paddle/fluid/dygraph/dygraph_to_static/create_variable_transformer.py index 78ca606dcec4202e57401188b99c042d378dcdb0..3432765191fdda8840298e94f45f5f7f33e94504 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/create_variable_transformer.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/create_variable_transformer.py @@ -13,21 +13,28 @@ # limitations under the License. from paddle.utils import gast -from paddle.fluid.dygraph.dygraph_to_static.static_analysis import AstNodeWrapper -from paddle.fluid.dygraph.dygraph_to_static.utils import FunctionNameLivenessAnalysis -from paddle.fluid.dygraph.dygraph_to_static.variable_trans_func import create_undefined_var -from paddle.fluid.dygraph.dygraph_to_static.base_transformer import BaseTransformer +from paddle.fluid.dygraph.dygraph_to_static.static_analysis import ( + AstNodeWrapper, +) +from paddle.fluid.dygraph.dygraph_to_static.utils import ( + FunctionNameLivenessAnalysis, +) +from paddle.fluid.dygraph.dygraph_to_static.variable_trans_func import ( + create_undefined_var, +) +from paddle.fluid.dygraph.dygraph_to_static.base_transformer import ( + BaseTransformer, +) class CreateVariableTransformer(BaseTransformer): - """ - """ + """ """ def __init__(self, wrapper_root): - assert isinstance( - wrapper_root, AstNodeWrapper - ), "Type of input node should be AstNodeWrapper, but received %s ." % type( - wrapper_root) + assert isinstance(wrapper_root, AstNodeWrapper), ( + "Type of input node should be AstNodeWrapper, but received %s ." + % type(wrapper_root) + ) self.root = wrapper_root.node FunctionNameLivenessAnalysis(self.root) @@ -38,7 +45,7 @@ class CreateVariableTransformer(BaseTransformer): self.visit(self.root) def visit_FunctionDef(self, node): - #attributes = set(filter(lambda x: '.' in x, node.pd_scope.modified_vars())) + # attributes = set(filter(lambda x: '.' in x, node.pd_scope.modified_vars())) self.generic_visit(node) bodys = node.body names = sorted(node.pd_scope.created_vars()) diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/decorator_transformer.py b/python/paddle/fluid/dygraph/dygraph_to_static/decorator_transformer.py index db88d1670e1b4bacee3fe2f9c278597d0f3b8455..55ca1f9d9aca1fd27ee33a4d11dbac5ff2f34e88 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/decorator_transformer.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/decorator_transformer.py @@ -14,17 +14,31 @@ # limitations under the License. from paddle.utils import gast -from paddle.fluid.dygraph.dygraph_to_static.static_analysis import AstNodeWrapper -from paddle.fluid.dygraph.dygraph_to_static.base_transformer import BaseTransformer -from paddle.fluid.dygraph.dygraph_to_static.utils import create_funcDef_node, ast_to_source_code, is_paddle_api, Dygraph2StaticException +from paddle.fluid.dygraph.dygraph_to_static.static_analysis import ( + AstNodeWrapper, +) +from paddle.fluid.dygraph.dygraph_to_static.base_transformer import ( + BaseTransformer, +) +from paddle.fluid.dygraph.dygraph_to_static.utils import ( + create_funcDef_node, + ast_to_source_code, + is_paddle_api, + Dygraph2StaticException, +) import warnings import re from paddle.fluid.dygraph.dygraph_to_static.utils import RE_PYNAME, RE_PYMODULE IGNORE_NAMES = [ - 'declarative', 'to_static', 'dygraph_to_static_func', 'wraps', - 'staticmethod', 'classmethod', 'decorator' + 'declarative', + 'to_static', + 'dygraph_to_static_func', + 'wraps', + 'staticmethod', + 'classmethod', + 'decorator', ] @@ -34,10 +48,10 @@ class DecoratorTransformer(BaseTransformer): """ def __init__(self, wrapper_root): - assert isinstance( - wrapper_root, AstNodeWrapper - ), "Type of input node should be AstNodeWrapper, but received %s ." % type( - wrapper_root) + assert isinstance(wrapper_root, AstNodeWrapper), ( + "Type of input node should be AstNodeWrapper, but received %s ." + % type(wrapper_root) + ) self.root = wrapper_root.node self.ancestor_nodes = [] @@ -70,16 +84,21 @@ class DecoratorTransformer(BaseTransformer): # 1: @_jst.Call(a.b.c.d.deco)() # 2: @q.w.e.r.deco() re_tmp = re.match( - r'({module})*({name}\(){{0,1}}({module})*({name})(\)){{0,1}}\(.*$' - .format(name=RE_PYNAME, module=RE_PYMODULE), deco_full_name) + r'({module})*({name}\(){{0,1}}({module})*({name})(\)){{0,1}}\(.*$'.format( + name=RE_PYNAME, module=RE_PYMODULE + ), + deco_full_name, + ) deco_name = re_tmp.group(4) else: # match case like: # @a.d.g.deco re_tmp = re.match( - r'({module})*({name})$'.format(name=RE_PYNAME, - module=RE_PYMODULE), - deco_full_name) + r'({module})*({name})$'.format( + name=RE_PYNAME, module=RE_PYMODULE + ), + deco_full_name, + ) deco_name = re_tmp.group(2) if deco_name in IGNORE_NAMES: continue @@ -95,25 +114,37 @@ class DecoratorTransformer(BaseTransformer): if '_jst.Call' in deco_full_name: # in this case , the deco_full_name will be like: # '_jst.Call(deco)(5)' - rematch = re.match(r'\_jst\.Call\((.+?)\)\((.*)\)', - deco_full_name) + rematch = re.match( + r'\_jst\.Call\((.+?)\)\((.*)\)', deco_full_name + ) re_name = rematch.group(1) re_args = rematch.group(2) re_args_with_func = deco_target + ', ' + re_args - decofun_str = 'try:\n\t{0} = _jst.Call({1})({2})\nexcept:\n\t{0} = _jst.Call({1})({3})({4})'\ - .format(decoed_func, re_name, re_args_with_func, re_args, deco_target) + decofun_str = 'try:\n\t{0} = _jst.Call({1})({2})\nexcept:\n\t{0} = _jst.Call({1})({3})({4})'.format( + decoed_func, + re_name, + re_args_with_func, + re_args, + deco_target, + ) else: # paddle api will not be transformed to '_jst.Call' rematch = re.match(r'(.+?)\((.*)\)', deco_full_name) re_name = rematch.group(1) re_args = rematch.group(2) re_args_with_func = deco_target + ', ' + re_args - decofun_str = 'try:\n\t{0} = {1}({2})\nexcept:\n\t{0} = {1}({3})({4})'\ - .format(decoed_func, re_name, re_args_with_func, re_args, deco_target) + decofun_str = 'try:\n\t{0} = {1}({2})\nexcept:\n\t{0} = {1}({3})({4})'.format( + decoed_func, + re_name, + re_args_with_func, + re_args, + deco_target, + ) else: decofun_str = '{} = _jst.Call({})({})'.format( - decoed_func, deco_full_name, deco_target) + decoed_func, deco_full_name, deco_target + ) decofun_nodes.extend(gast.parse(decofun_str).body) deco_target = decoed_func @@ -121,12 +152,14 @@ class DecoratorTransformer(BaseTransformer): if not decofun_nodes: return node - orig_func_node = gast.FunctionDef(name='_orig_' + node.name, - args=node.args, - body=node.body, - decorator_list=[], - returns=None, - type_comment=None) + orig_func_node = gast.FunctionDef( + name='_orig_' + node.name, + args=node.args, + body=node.body, + decorator_list=[], + returns=None, + type_comment=None, + ) args = [arg.id for arg in node.args.args] arg_str = ','.join(args) diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/early_return_transformer.py b/python/paddle/fluid/dygraph/dygraph_to_static/early_return_transformer.py index e20f50b609e92d6cf5736bb3c9be5f4edbbc1c65..1ce75b277864e23efe396bd5716b370bf021895a 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/early_return_transformer.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/early_return_transformer.py @@ -13,8 +13,12 @@ # limitations under the License. from paddle.utils import gast -from paddle.fluid.dygraph.dygraph_to_static.static_analysis import AstNodeWrapper -from paddle.fluid.dygraph.dygraph_to_static.base_transformer import BaseTransformer +from paddle.fluid.dygraph.dygraph_to_static.static_analysis import ( + AstNodeWrapper, +) +from paddle.fluid.dygraph.dygraph_to_static.base_transformer import ( + BaseTransformer, +) class EarlyReturnTransformer(BaseTransformer): @@ -23,10 +27,10 @@ class EarlyReturnTransformer(BaseTransformer): """ def __init__(self, wrapper_root): - assert isinstance( - wrapper_root, AstNodeWrapper - ), "Type of input node should be AstNodeWrapper, but received %s ." % type( - wrapper_root) + assert isinstance(wrapper_root, AstNodeWrapper), ( + "Type of input node should be AstNodeWrapper, but received %s ." + % type(wrapper_root) + ) self.root = wrapper_root.node def transform(self): @@ -39,7 +43,8 @@ class EarlyReturnTransformer(BaseTransformer): assert isinstance( node, gast.If ), "Type of input node should be gast.If, but received %s ." % type( - node) + node + ) for child in node.body: if isinstance(child, gast.Return): return True @@ -60,9 +65,11 @@ class EarlyReturnTransformer(BaseTransformer): if isinstance(node, gast.If) and self.is_define_return_in_if(node): destination_nodes = node.orelse # handle stmt like `if/elif/elif` - while len(destination_nodes) > 0 and \ - isinstance(destination_nodes[0], gast.If) and \ - self.is_define_return_in_if(destination_nodes[0]): + while ( + len(destination_nodes) > 0 + and isinstance(destination_nodes[0], gast.If) + and self.is_define_return_in_if(destination_nodes[0]) + ): destination_nodes = destination_nodes[0].orelse return result_nodes diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/error.py b/python/paddle/fluid/dygraph/dygraph_to_static/error.py index d05a8ce4d84211f09eed4195b0ac06ba8970e46d..7e05b5db891f3dd118398e05b304f21a74afdf80 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/error.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/error.py @@ -19,8 +19,15 @@ import linecache import re import numpy as np -from paddle.fluid.dygraph.dygraph_to_static.origin_info import Location, OriginInfo, global_origin_info_map -from paddle.fluid.dygraph.dygraph_to_static.utils import _is_api_in_module_helper, RE_PYMODULE +from paddle.fluid.dygraph.dygraph_to_static.origin_info import ( + Location, + OriginInfo, + global_origin_info_map, +) +from paddle.fluid.dygraph.dygraph_to_static.utils import ( + _is_api_in_module_helper, + RE_PYMODULE, +) ERROR_DATA = "Error data about original source code information and traceback." @@ -72,10 +79,17 @@ class TraceBackFrame(OriginInfo): def formated_message(self): # self.source_code may be empty in some functions. # For example, decorator generated function - return ' ' * BLANK_COUNT_BEFORE_FILE_STR + 'File "{}", line {}, in {}\n\t{}'.format( - self.location.filepath, self.location.lineno, self.function_name, - self.source_code.lstrip() - if isinstance(self.source_code, str) else self.source_code) + return ( + ' ' * BLANK_COUNT_BEFORE_FILE_STR + + 'File "{}", line {}, in {}\n\t{}'.format( + self.location.filepath, + self.location.lineno, + self.function_name, + self.source_code.lstrip() + if isinstance(self.source_code, str) + else self.source_code, + ) + ) class TraceBackFrameRange(OriginInfo): @@ -118,26 +132,35 @@ class TraceBackFrameRange(OriginInfo): for i in range(len(self.source_code)): # if source_code[i] is empty line between two code line, dont add blank if self.source_code[i]: - self.source_code[i] = ' ' * ( - blank_count[i] - min_black_count + - BLANK_COUNT_BEFORE_FILE_STR * 2) + self.source_code[i] + self.source_code[i] = ( + ' ' + * ( + blank_count[i] + - min_black_count + + BLANK_COUNT_BEFORE_FILE_STR * 2 + ) + + self.source_code[i] + ) def formated_message(self): - msg = ' ' * BLANK_COUNT_BEFORE_FILE_STR + 'File "{}", line {}, in {}\n'.format( - self.location.filepath, self.location.lineno, self.function_name) + msg = ( + ' ' * BLANK_COUNT_BEFORE_FILE_STR + + 'File "{}", line {}, in {}\n'.format( + self.location.filepath, self.location.lineno, self.function_name + ) + ) # add empty line after range code return msg + '\n'.join(self.source_code) class SuggestionDict(object): - def __init__(self): # {(keywords): (suggestions)} self.suggestion_dict = { - ('is not initialized.', 'Hint:', 'IsInitialized'): - ("Please ensure all your sublayers are inheritted from nn.Layer.", - "Please ensure there is no tensor created explicitly depended on external data, we suggest to register it as buffer tensor. See https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/04_dygraph_to_static/export_model/principle_cn.html#parameters-buffers for details" - ) + ('is not initialized.', 'Hint:', 'IsInitialized'): ( + "Please ensure all your sublayers are inheritted from nn.Layer.", + "Please ensure there is no tensor created explicitly depended on external data, we suggest to register it as buffer tensor. See https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/04_dygraph_to_static/export_model/principle_cn.html#parameters-buffers for details", + ) } def keys(self): @@ -156,8 +179,9 @@ class ErrorData(object): Error data attached to an exception which is raised in un-transformed code. """ - def __init__(self, error_type, error_value, origin_traceback, - origin_info_map): + def __init__( + self, error_type, error_value, origin_traceback, origin_info_map + ): self.error_type = error_type self.error_value = error_value self.origin_traceback = origin_traceback @@ -182,24 +206,30 @@ class ErrorData(object): func_str = None for frame in tb: searched_name = re.search( - r'({module})*{name}'.format(module=RE_PYMODULE, - name=frame.name), error_line) + r'({module})*{name}'.format( + module=RE_PYMODULE, name=frame.name + ), + error_line, + ) if searched_name: func_str = searched_name.group(0) break try: - module_result = eval("_is_api_in_module_helper({}, '{}')".format( - func_str, "numpy")) - is_numpy_api_err = module_result or (func_str.startswith("numpy.") - or func_str.startswith("np.")) + module_result = eval( + "_is_api_in_module_helper({}, '{}')".format(func_str, "numpy") + ) + is_numpy_api_err = module_result or ( + func_str.startswith("numpy.") or func_str.startswith("np.") + ) except Exception: is_numpy_api_err = False if is_numpy_api_err and func_str: return [ - "TypeError: Code '{}' called numpy API {}, please use Paddle API to replace it." - .format(error_line, func_str), - " values will be changed to variables by dy2static, numpy api can not handle variables" + "TypeError: Code '{}' called numpy API {}, please use Paddle API to replace it.".format( + error_line, func_str + ), + " values will be changed to variables by dy2static, numpy api can not handle variables", ] else: return format_exception @@ -219,45 +249,56 @@ class ErrorData(object): # Simplify error value to improve readability if error is raised in runtime if self.in_runtime: if int( - os.getenv(SIMPLIFY_ERROR_ENV_NAME, - DEFAULT_SIMPLIFY_NEW_ERROR)): + os.getenv(SIMPLIFY_ERROR_ENV_NAME, DEFAULT_SIMPLIFY_NEW_ERROR) + ): self._simplify_error_value() message_lines.append(str(self.error_value)) return '\n'.join(message_lines) # Step2: Optimizes stack information with source code information of dygraph from user. user_code_traceback_index = [] - for i, (filepath, lineno, funcname, - code) in enumerate(self.origin_traceback): - dygraph_func_info = self.origin_info_map.get((filepath, lineno), - None) + for i, (filepath, lineno, funcname, code) in enumerate( + self.origin_traceback + ): + dygraph_func_info = self.origin_info_map.get( + (filepath, lineno), None + ) if dygraph_func_info: user_code_traceback_index.append(i) # Add user code traceback for i in user_code_traceback_index: filepath, lineno, funcname, code = self.origin_traceback[i] - dygraph_func_info = self.origin_info_map.get((filepath, lineno), - None) + dygraph_func_info = self.origin_info_map.get( + (filepath, lineno), None + ) if i == user_code_traceback_index[-1]: traceback_frame = TraceBackFrameRange( - dygraph_func_info.location, dygraph_func_info.function_name) + dygraph_func_info.location, dygraph_func_info.function_name + ) else: traceback_frame = TraceBackFrame( - dygraph_func_info.location, dygraph_func_info.function_name, - dygraph_func_info.source_code) + dygraph_func_info.location, + dygraph_func_info.function_name, + dygraph_func_info.source_code, + ) message_lines.append(traceback_frame.formated_message()) error_line = traceback_frame.error_line message_lines.append("") # Add paddle traceback after user code traceback - paddle_traceback_start_index = user_code_traceback_index[ - -1] + 1 if user_code_traceback_index else 0 + paddle_traceback_start_index = ( + user_code_traceback_index[-1] + 1 + if user_code_traceback_index + else 0 + ) for filepath, lineno, funcname, code in self.origin_traceback[ - paddle_traceback_start_index:]: - traceback_frame = TraceBackFrame(Location(filepath, lineno), - funcname, code) + paddle_traceback_start_index: + ]: + traceback_frame = TraceBackFrame( + Location(filepath, lineno), funcname, code + ) message_lines.append(traceback_frame.formated_message()) message_lines.append("") @@ -265,10 +306,12 @@ class ErrorData(object): # NOTE: `format_exception` is a list, its length is 1 in most cases, but sometimes its length # is gather than 1, for example, the error_type is IndentationError. format_exception = traceback.format_exception_only( - self.error_type, self.error_value) + self.error_type, self.error_value + ) if error_line is not None: - format_exception = self.numpy_api_check(format_exception, - error_line) + format_exception = self.numpy_api_check( + format_exception, error_line + ) error_message = [ " " * BLANK_COUNT_BEFORE_FILE_STR + line @@ -280,17 +323,23 @@ class ErrorData(object): def _create_revise_suggestion(self, bottom_error_message): revise_suggestions = [ - '', ' ' * BLANK_COUNT_BEFORE_FILE_STR + 'Revise suggestion: ' + '', + ' ' * BLANK_COUNT_BEFORE_FILE_STR + 'Revise suggestion: ', ] for keywords in self.suggestion_dict.keys(): contain_keywords = [ True for i in keywords if i in ''.join(bottom_error_message) ] if len(contain_keywords) == len( - keywords): # all keywords should be in bottom_error_message + keywords + ): # all keywords should be in bottom_error_message for suggestion in self.suggestion_dict[keywords]: - suggestion_msg = ' ' * BLANK_COUNT_BEFORE_FILE_STR * 2 + '{}. {}'.format( - str(len(revise_suggestions) - 1), suggestion) + suggestion_msg = ( + ' ' * BLANK_COUNT_BEFORE_FILE_STR * 2 + + '{}. {}'.format( + str(len(revise_suggestions) - 1), suggestion + ) + ) revise_suggestions.append(suggestion_msg) return revise_suggestions if len(revise_suggestions) > 2 else [] @@ -313,12 +362,12 @@ class ErrorData(object): start_trace = "outputs = static_func(*inputs)" start_idx = error_value_lines_strip.index(start_trace) - error_value_lines = error_value_lines[start_idx + 1:] - error_value_lines_strip = error_value_lines_strip[start_idx + 1:] + error_value_lines = error_value_lines[start_idx + 1 :] + error_value_lines_strip = error_value_lines_strip[start_idx + 1 :] # use empty line to locate the bottom_error_message empty_line_idx = error_value_lines_strip.index('') - bottom_error_message = error_value_lines[empty_line_idx + 1:] + bottom_error_message = error_value_lines[empty_line_idx + 1 :] revise_suggestion = self._create_revise_suggestion(bottom_error_message) error_traceback = [] @@ -336,14 +385,18 @@ class ErrorData(object): if error_value_lines_strip[i].startswith("File "): re_result = re.search(pattern, error_value_lines_strip[i]) tmp_filepath, lineno_str, function_name = re_result.groups() - code = error_value_lines_strip[ - i + 1] if i + 1 < len(error_value_lines_strip) else '' + code = ( + error_value_lines_strip[i + 1] + if i + 1 < len(error_value_lines_strip) + else '' + ) if static_info_map.get((tmp_filepath, int(lineno_str))): user_code_traceback_index.append(len(error_traceback)) error_traceback.append( - (tmp_filepath, int(lineno_str), function_name, code)) + (tmp_filepath, int(lineno_str), function_name, code) + ) error_frame = [] # Add user code traceback @@ -351,20 +404,27 @@ class ErrorData(object): filepath, lineno, funcname, code = error_traceback[i] if i == user_code_traceback_index[-1]: traceback_frame = TraceBackFrameRange( - Location(filepath, lineno), funcname) + Location(filepath, lineno), funcname + ) else: - traceback_frame = TraceBackFrame(Location(filepath, lineno), - funcname, code) + traceback_frame = TraceBackFrame( + Location(filepath, lineno), funcname, code + ) error_frame.append(traceback_frame.formated_message()) error_frame.append("") # Add paddle traceback after user code traceback - paddle_traceback_start_index = user_code_traceback_index[ - -1] + 1 if user_code_traceback_index else 0 + paddle_traceback_start_index = ( + user_code_traceback_index[-1] + 1 + if user_code_traceback_index + else 0 + ) for filepath, lineno, funcname, code in error_traceback[ - paddle_traceback_start_index:]: - traceback_frame = TraceBackFrame(Location(filepath, lineno), - funcname, code) + paddle_traceback_start_index: + ]: + traceback_frame = TraceBackFrame( + Location(filepath, lineno), funcname, code + ) error_frame.append(traceback_frame.formated_message()) error_frame.append("") diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/function_spec.py b/python/paddle/fluid/dygraph/dygraph_to_static/function_spec.py index 730c02fbc5e917bef4e284099daccb708137693e..039df7e84e433a3ac4c60c7558ebbac4d82f055f 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/function_spec.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/function_spec.py @@ -48,8 +48,9 @@ class FunctionSpec(object): self._arg_names, self._default_kwargs = parse_arg_and_kwargs(function) # parse *args self.varargs_name = parse_varargs_name(function) - if self.varargs_name is not None and isinstance(function.__self__, - TranslatedLayer): + if self.varargs_name is not None and isinstance( + function.__self__, TranslatedLayer + ): self._arg_names += function.__self__._input_args_names def unified_args_and_kwargs(self, args, kwargs): @@ -72,8 +73,12 @@ class FunctionSpec(object): """ if len(self._arg_names) < len(args): error_msg = "The decorated function `{}` requires {} arguments: {}, but received {} with {}.".format( - self._dygraph_function.__name__, len(self._arg_names), - self._arg_names, len(args), args) + self._dygraph_function.__name__, + len(self._arg_names), + self._arg_names, + len(args), + args, + ) if args and inspect.isclass(args[0]): error_msg += "\n\tMaybe the function has more than one decorator, we don't support this for now." raise NotImplementedError(error_msg) @@ -90,9 +95,13 @@ class FunctionSpec(object): else: if arg_name not in self._default_kwargs: raise ValueError( - "`{}()` requires `{}` arguments, but not found in input `args`: {} and `kwargs`: {}." - .format(self._dygraph_function.__name__, arg_name, args, - kwargs)) + "`{}()` requires `{}` arguments, but not found in input `args`: {} and `kwargs`: {}.".format( + self._dygraph_function.__name__, + arg_name, + args, + kwargs, + ) + ) args.append(self._default_kwargs[arg_name]) return tuple(args), kwargs @@ -135,16 +144,20 @@ class FunctionSpec(object): # So we don't support to deal this case while specificing `input_spec` currently. if kwargs: raise ValueError( - "{} got unexpected keyword arguments: {}. Cannot trace the function when `input_spec` is specificed." - .format(self._dygraph_function.__name__, kwargs)) + "{} got unexpected keyword arguments: {}. Cannot trace the function when `input_spec` is specificed.".format( + self._dygraph_function.__name__, kwargs + ) + ) # Note: The length of `input_spec` can be greater than `args`, # because `args` may contains non-tensor value merged form `kwargs` # after `unified_args_and_kwargs`. if len(args) < len(self._input_spec): raise ValueError( - "Requires len(arguments) >= len(input_spec), but received len(args):{} < len(InputSpec): {}" - .format(len(args), len(self._input_spec))) + "Requires len(arguments) >= len(input_spec), but received len(args):{} < len(InputSpec): {}".format( + len(args), len(self._input_spec) + ) + ) # replace argument with corresponding InputSpec. args_with_spec = convert_to_input_spec(args, self._input_spec) @@ -154,8 +167,9 @@ class FunctionSpec(object): # If without specificing name in input_spec, add default name # according to argument name from decorated function. - args_with_spec = replace_spec_empty_name(self._arg_names, - args_with_spec) + args_with_spec = replace_spec_empty_name( + self._arg_names, args_with_spec + ) return args_with_spec, kwargs_with_spec @@ -182,7 +196,8 @@ class FunctionSpec(object): dtype=var_spec.dtype, is_data=True, need_check_feed=False, - stop_gradient=stop_gradient) + stop_gradient=stop_gradient, + ) else: feed_layer = var_spec inputs.append(feed_layer) @@ -195,15 +210,19 @@ class FunctionSpec(object): """ if not isinstance(input_spec, (tuple, list)): raise TypeError( - "The type(input_spec) should be one of (tuple, list), but received {}." - .format(type_name(input_spec))) + "The type(input_spec) should be one of (tuple, list), but received {}.".format( + type_name(input_spec) + ) + ) return tuple(input_spec) def __repr__(self): return "function: {}({}), input_spec: {}".format( - self._dygraph_function.__name__, ','.join(self._arg_names), - self._input_spec) + self._dygraph_function.__name__, + ','.join(self._arg_names), + self._input_spec, + ) @property def dygraph_function(self): @@ -242,8 +261,10 @@ def get_parameters(layer_instance, include_sublayer=True): params = layer_instance._parameters else: raise TypeError( - "Type of `layer_instance` should be nn.Layer, but received {}". - format(type_name(layer_instance))) + "Type of `layer_instance` should be nn.Layer, but received {}".format( + type_name(layer_instance) + ) + ) return params @@ -264,8 +285,10 @@ def get_buffers(layer_instance, include_sublayer=True): buffers = layer_instance._buffers else: raise TypeError( - "Type of `layer_instance` should be nn.Layer, but received {}". - format(type_name(layer_instance))) + "Type of `layer_instance` should be nn.Layer, but received {}".format( + type_name(layer_instance) + ) + ) return buffers @@ -284,12 +307,17 @@ def convert_to_input_spec(inputs, input_spec): def check_type_and_len(input, spec, check_length=False): if type(input) is not type(spec): - raise TypeError('type(input) should be {}, but received {}.'.format( - type(spec), type(input))) + raise TypeError( + 'type(input) should be {}, but received {}.'.format( + type(spec), type(input) + ) + ) if check_length and len(input) < len(spec): raise ValueError( - 'Requires len(inputs) >= len(input_spec), but received len(inputs):{} < len(input_spec):{}' - .format(len(inputs), len(input_spec))) + 'Requires len(inputs) >= len(input_spec), but received len(inputs):{} < len(input_spec):{}'.format( + len(inputs), len(input_spec) + ) + ) if isinstance(input_spec, (tuple, list)): input_with_spec = [] @@ -302,13 +330,15 @@ def convert_to_input_spec(inputs, input_spec): # Note: If the rest inputs contain tensor or numpy.ndarray # without specific InputSpec, raise warning. if len(inputs) > len(input_spec): - for rest_input in inputs[len(input_spec):]: + for rest_input in inputs[len(input_spec) :]: if isinstance(rest_input, (core.VarBase, np.ndarray)): logging_utils.warn( "The inputs constain `{}` without specificing InputSpec, its shape and dtype will be treated immutable. " - "Please specific InputSpec information in `@to_static` if you expect them as mutable inputs." - .format(type_name(rest_input))) - input_with_spec.extend(inputs[len(input_spec):]) + "Please specific InputSpec information in `@to_static` if you expect them as mutable inputs.".format( + type_name(rest_input) + ) + ) + input_with_spec.extend(inputs[len(input_spec) :]) return input_with_spec elif isinstance(input_spec, dict): @@ -317,7 +347,8 @@ def convert_to_input_spec(inputs, input_spec): for name, input in inputs.items(): if name in input_spec: input_with_spec[name] = convert_to_input_spec( - input, input_spec[name]) + input, input_spec[name] + ) else: input_with_spec[name] = input return input_with_spec @@ -354,7 +385,7 @@ def replace_spec_empty_name(args_name, input_with_spec): print([in_var.name for in_var in foo.inputs]) # [x, y] """ input_with_spec = list(input_with_spec) - candidate_arg_names = args_name[:len(input_with_spec)] + candidate_arg_names = args_name[: len(input_with_spec)] for i, arg_name in enumerate(candidate_arg_names): input_spec = input_with_spec[i] @@ -405,11 +436,13 @@ def _hash_spec_names(args_specs, kwargs_specs): the former has one input ('x'), but the latter has two input ('x', 'y'). """ spec_names = [ - spec.name for spec in flatten(args_specs) + spec.name + for spec in flatten(args_specs) if isinstance(spec, paddle.static.InputSpec) ] spec_names += [ - spec.name for spec in flatten(kwargs_specs) + spec.name + for spec in flatten(kwargs_specs) if isinstance(spec, paddle.static.InputSpec) ] i, name_ids = 0, {} diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/ifelse_transformer.py b/python/paddle/fluid/dygraph/dygraph_to_static/ifelse_transformer.py index 5d51b0f48c274645d2f39ee5581f96dfbd1ebf9c..8120e79c1da308ac51c507a69c6680728ee6c2b7 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/ifelse_transformer.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/ifelse_transformer.py @@ -23,16 +23,47 @@ from collections import defaultdict from paddle.utils import gast from paddle.fluid import unique_name -from paddle.fluid.dygraph.dygraph_to_static.utils import create_funcDef_node, ast_to_source_code -from paddle.fluid.dygraph.dygraph_to_static.utils import create_assign_node, FunctionNameLivenessAnalysis -from paddle.fluid.dygraph.dygraph_to_static.static_analysis import StaticAnalysisVisitor -from paddle.fluid.dygraph.dygraph_to_static.static_analysis import AstNodeWrapper -from paddle.fluid.dygraph.dygraph_to_static.variable_trans_func import create_undefined_var -from paddle.fluid.dygraph.dygraph_to_static.utils import create_nonlocal_stmt_nodes -from paddle.fluid.dygraph.dygraph_to_static.utils import create_get_args_node, create_set_args_node -from paddle.fluid.dygraph.dygraph_to_static.base_transformer import BaseTransformer -from paddle.fluid.dygraph.dygraph_to_static.utils import FOR_ITER_INDEX_PREFIX, FOR_ITER_TUPLE_PREFIX, FOR_ITER_TUPLE_INDEX_PREFIX, FOR_ITER_VAR_LEN_PREFIX, FOR_ITER_VAR_NAME_PREFIX, FOR_ITER_ZIP_TO_LIST_PREFIX, FOR_ITER_TARGET_PREFIX, FOR_ITER_ITERATOR_PREFIX -from paddle.fluid.dygraph.dygraph_to_static.utils import GetterSetterHelper, create_name_str +from paddle.fluid.dygraph.dygraph_to_static.utils import ( + create_funcDef_node, + ast_to_source_code, +) +from paddle.fluid.dygraph.dygraph_to_static.utils import ( + create_assign_node, + FunctionNameLivenessAnalysis, +) +from paddle.fluid.dygraph.dygraph_to_static.static_analysis import ( + StaticAnalysisVisitor, +) +from paddle.fluid.dygraph.dygraph_to_static.static_analysis import ( + AstNodeWrapper, +) +from paddle.fluid.dygraph.dygraph_to_static.variable_trans_func import ( + create_undefined_var, +) +from paddle.fluid.dygraph.dygraph_to_static.utils import ( + create_nonlocal_stmt_nodes, +) +from paddle.fluid.dygraph.dygraph_to_static.utils import ( + create_get_args_node, + create_set_args_node, +) +from paddle.fluid.dygraph.dygraph_to_static.base_transformer import ( + BaseTransformer, +) +from paddle.fluid.dygraph.dygraph_to_static.utils import ( + FOR_ITER_INDEX_PREFIX, + FOR_ITER_TUPLE_PREFIX, + FOR_ITER_TUPLE_INDEX_PREFIX, + FOR_ITER_VAR_LEN_PREFIX, + FOR_ITER_VAR_NAME_PREFIX, + FOR_ITER_ZIP_TO_LIST_PREFIX, + FOR_ITER_TARGET_PREFIX, + FOR_ITER_ITERATOR_PREFIX, +) +from paddle.fluid.dygraph.dygraph_to_static.utils import ( + GetterSetterHelper, + create_name_str, +) TRUE_FUNC_PREFIX = 'true_fn' FALSE_FUNC_PREFIX = 'false_fn' @@ -47,13 +78,14 @@ class IfElseTransformer(BaseTransformer): """ def __init__(self, wrapper_root): - assert isinstance( - wrapper_root, AstNodeWrapper - ), "Type of input node should be AstNodeWrapper, but received %s ." % type( - wrapper_root) + assert isinstance(wrapper_root, AstNodeWrapper), ( + "Type of input node should be AstNodeWrapper, but received %s ." + % type(wrapper_root) + ) self.root = wrapper_root.node FunctionNameLivenessAnalysis( - self.root) # name analysis of current ast tree. + self.root + ) # name analysis of current ast tree. def transform(self): """ @@ -63,16 +95,31 @@ class IfElseTransformer(BaseTransformer): def visit_If(self, node): self.generic_visit(node) - true_func_node, false_func_node, get_args_node, set_args_node, return_name_ids, push_pop_ids = transform_if_else( - node, self.root) - - new_node = create_convert_ifelse_node(return_name_ids, push_pop_ids, - node.test, true_func_node, - false_func_node, get_args_node, - set_args_node) - - return [get_args_node, set_args_node, true_func_node, false_func_node - ] + [new_node] + ( + true_func_node, + false_func_node, + get_args_node, + set_args_node, + return_name_ids, + push_pop_ids, + ) = transform_if_else(node, self.root) + + new_node = create_convert_ifelse_node( + return_name_ids, + push_pop_ids, + node.test, + true_func_node, + false_func_node, + get_args_node, + set_args_node, + ) + + return [ + get_args_node, + set_args_node, + true_func_node, + false_func_node, + ] + [new_node] def visit_Call(self, node): # Remove `numpy()` statement, like `Tensor.numpy()[i]` -> `Tensor[i]` @@ -89,8 +136,9 @@ class IfElseTransformer(BaseTransformer): """ self.generic_visit(node) - new_node = create_convert_ifelse_node(None, None, node.test, node.body, - node.orelse, None, None, True) + new_node = create_convert_ifelse_node( + None, None, node.test, node.body, node.orelse, None, None, True + ) # Note: A blank line will be added separately if transform gast.Expr # into source code. Using gast.Expr.value instead to avoid syntax error # in python. @@ -101,7 +149,6 @@ class IfElseTransformer(BaseTransformer): class NameVisitor(gast.NodeVisitor): - def __init__(self, after_node=None, end_node=None): # The start node (exclusive) of the visitor self.after_node = after_node @@ -171,7 +218,8 @@ class NameVisitor(gast.NodeVisitor): # Blocks the vars in `if.body` and only inserts the vars both created in 'if/else' branch # into name_ids. new_name_ids = self._find_new_name_ids( - body_name_ids, else_name_ids) + body_name_ids, else_name_ids + ) for new_name_id in new_name_ids: before_if_name_ids[new_name_id].append(gast.Store()) @@ -186,7 +234,8 @@ class NameVisitor(gast.NodeVisitor): self.generic_visit(node) return blacklist = {'True', 'False', 'None'} - if node.id in blacklist: return + if node.id in blacklist: + return if node.id in self._def_func_names: return if not self._is_call_func_name_node(node): @@ -205,7 +254,10 @@ class NameVisitor(gast.NodeVisitor): # NOTE: We skip to visit names of get_args and set_args, because they contains # nonlocal statement such as 'nonlocal x, self' where 'self' should not be # parsed as returned value in contron flow. - if GET_ARGS_FUNC_PREFIX in node.name or SET_ARGS_FUNC_PREFIX in node.name: + if ( + GET_ARGS_FUNC_PREFIX in node.name + or SET_ARGS_FUNC_PREFIX in node.name + ): return if not self._in_range: @@ -236,7 +288,6 @@ class NameVisitor(gast.NodeVisitor): return copy.deepcopy(self.name_ids) def _find_new_name_ids(self, body_name_ids, else_name_ids): - def is_required_ctx(ctxs, required_ctx): for ctx in ctxs: if isinstance(ctx, required_ctx): @@ -244,13 +295,14 @@ class NameVisitor(gast.NodeVisitor): return False candidate_name_ids = set(body_name_ids.keys()) & set( - else_name_ids.keys()) + else_name_ids.keys() + ) store_ctx = gast.Store new_name_ids = set() for name_id in candidate_name_ids: - if is_required_ctx(body_name_ids[name_id], - store_ctx) and is_required_ctx( - else_name_ids[name_id], store_ctx): + if is_required_ctx( + body_name_ids[name_id], store_ctx + ) and is_required_ctx(else_name_ids[name_id], store_ctx): new_name_ids.add(name_id) return new_name_ids @@ -262,8 +314,10 @@ class NameVisitor(gast.NodeVisitor): parent_node = self.ancestor_nodes[-2] if isinstance(parent_node, gast.Call) and parent_node.func == node: # e.g: var_list.append(elem), var_list is also a name_id. - should_skip = isinstance( - node, gast.Attribute) and node.attr in white_func_names + should_skip = ( + isinstance(node, gast.Attribute) + and node.attr in white_func_names + ) if not should_skip: return True return False @@ -290,8 +344,10 @@ def _valid_nonlocal_names(return_name_ids, nonlocal_names): for name in return_name_ids: if name not in nonlocal_names: raise ValueError( - "Required returned var '{}' must be in 'nonlocal' statement '', but not found." - .format(name)) + "Required returned var '{}' must be in 'nonlocal' statement '', but not found.".format( + name + ) + ) nonlocal_names.remove(name) return return_name_ids + nonlocal_names @@ -313,15 +369,21 @@ def transform_if_else(node, root): # TODO(dev): Need a better way to deal this. # LoopTransformer will create some special vars, which is not visiable by users. so we can sure it's safe to remove them. filter_names = [ - ARGS_NAME, FOR_ITER_INDEX_PREFIX, FOR_ITER_TUPLE_PREFIX, - FOR_ITER_TARGET_PREFIX, FOR_ITER_ITERATOR_PREFIX, - FOR_ITER_TUPLE_INDEX_PREFIX, FOR_ITER_VAR_LEN_PREFIX, - FOR_ITER_VAR_NAME_PREFIX, FOR_ITER_ZIP_TO_LIST_PREFIX + ARGS_NAME, + FOR_ITER_INDEX_PREFIX, + FOR_ITER_TUPLE_PREFIX, + FOR_ITER_TARGET_PREFIX, + FOR_ITER_ITERATOR_PREFIX, + FOR_ITER_TUPLE_INDEX_PREFIX, + FOR_ITER_VAR_LEN_PREFIX, + FOR_ITER_VAR_NAME_PREFIX, + FOR_ITER_ZIP_TO_LIST_PREFIX, ] def remove_if(x): for name in filter_names: - if x.startswith(name): return False + if x.startswith(name): + return False return True nonlocal_names = list(filter(remove_if, nonlocal_names)) @@ -329,40 +391,53 @@ def transform_if_else(node, root): nonlocal_stmt_node = create_nonlocal_stmt_nodes(nonlocal_names) - empty_arg_node = gast.arguments(args=[], - posonlyargs=[], - vararg=None, - kwonlyargs=[], - kw_defaults=None, - kwarg=None, - defaults=[]) + empty_arg_node = gast.arguments( + args=[], + posonlyargs=[], + vararg=None, + kwonlyargs=[], + kw_defaults=None, + kwarg=None, + defaults=[], + ) true_func_node = create_funcDef_node( nonlocal_stmt_node + node.body, name=unique_name.generate(TRUE_FUNC_PREFIX), input_args=empty_arg_node, - return_name_ids=[]) + return_name_ids=[], + ) false_func_node = create_funcDef_node( nonlocal_stmt_node + node.orelse, name=unique_name.generate(FALSE_FUNC_PREFIX), input_args=empty_arg_node, - return_name_ids=[]) + return_name_ids=[], + ) helper = GetterSetterHelper(None, None, nonlocal_names, push_pop_ids) get_args_node = create_get_args_node(helper.union()) set_args_node = create_set_args_node(helper.union()) - return true_func_node, false_func_node, get_args_node, set_args_node, return_name_ids, push_pop_ids - - -def create_convert_ifelse_node(return_name_ids, - push_pop_ids, - pred, - true_func, - false_func, - get_args_func, - set_args_func, - is_if_expr=False): + return ( + true_func_node, + false_func_node, + get_args_node, + set_args_node, + return_name_ids, + push_pop_ids, + ) + + +def create_convert_ifelse_node( + return_name_ids, + push_pop_ids, + pred, + true_func, + false_func, + get_args_func, + set_args_func, + is_if_expr=False, +): """ Create `paddle.jit.dy2static.convert_ifelse( pred, true_fn, false_fn, get_args, set_args, return_name_ids)` @@ -377,16 +452,19 @@ def create_convert_ifelse_node(return_name_ids, convert_ifelse_layer = gast.parse( '_jst.IfElse(' - '{pred}, {true_fn}, {false_fn}, {get_args}, {set_args}, {return_name_ids}, push_pop_names={push_pop_ids})' - .format( + '{pred}, {true_fn}, {false_fn}, {get_args}, {set_args}, {return_name_ids}, push_pop_names={push_pop_ids})'.format( pred=ast_to_source_code(pred), true_fn=true_func_source, false_fn=false_func_source, - get_args=get_args_func.name if not is_if_expr else - 'lambda: None', #TODO: better way to deal with this + get_args=get_args_func.name + if not is_if_expr + else 'lambda: None', # TODO: better way to deal with this set_args=set_args_func.name - if not is_if_expr else 'lambda args: None', + if not is_if_expr + else 'lambda args: None', return_name_ids=create_name_str(return_name_ids), - push_pop_ids=create_name_str(push_pop_ids))).body[0] + push_pop_ids=create_name_str(push_pop_ids), + ) + ).body[0] return convert_ifelse_layer diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/logging_utils.py b/python/paddle/fluid/dygraph/dygraph_to_static/logging_utils.py index f0a4672794c3cc9d4cb8fd1f58cfd1e698a0cf8e..6f73a23316d272aaf34ad3116133d139d1dae22b 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/logging_utils.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/logging_utils.py @@ -29,7 +29,6 @@ LOG_AllTransformer = 100 def synchronized(func): - def wrapper(*args, **kwargs): with threading.Lock(): return func(*args, **kwargs) @@ -59,7 +58,8 @@ class TranslatorLogger(object): self._logger = log_helper.get_logger( self.logger_name, 1, - fmt='%(asctime)s %(name)s %(levelname)s: %(message)s') + fmt='%(asctime)s %(name)s %(levelname)s: %(message)s', + ) self._verbosity_level = None self._transformed_code_level = None self._need_to_echo_log_to_stdout = None @@ -155,16 +155,19 @@ class TranslatorLogger(object): if self.need_to_echo_log_to_stdout: self._output_to_stdout('INFO: ' + msg_with_level, *args) - def log_transformed_code(self, level, ast_node, transformer_name, *args, - **kwargs): + def log_transformed_code( + self, level, ast_node, transformer_name, *args, **kwargs + ): if self.has_code_level(level): source_code = ast_to_source_code(ast_node) if level == LOG_AllTransformer: - header_msg = "After the last level ast transformer: '{}', the transformed code:\n" \ - .format(transformer_name) + header_msg = "After the last level ast transformer: '{}', the transformed code:\n".format( + transformer_name + ) else: - header_msg = "After the level {} ast transformer: '{}', the transformed code:\n"\ - .format(level, transformer_name) + header_msg = "After the level {} ast transformer: '{}', the transformed code:\n".format( + level, transformer_name + ) msg = header_msg + source_code self.logger.info(msg, *args, **kwargs) @@ -271,5 +274,6 @@ def log(level, msg, *args, **kwargs): def log_transformed_code(level, ast_node, transformer_name, *args, **kwargs): - _TRANSLATOR_LOGGER.log_transformed_code(level, ast_node, transformer_name, - *args, **kwargs) + _TRANSLATOR_LOGGER.log_transformed_code( + level, ast_node, transformer_name, *args, **kwargs + ) diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/logical_transformer.py b/python/paddle/fluid/dygraph/dygraph_to_static/logical_transformer.py index b7006d935ec9f2944a8a1c5fe98acf589a8f3c79..3ad623a8ff08509056811b19baa2124a9f8aa83d 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/logical_transformer.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/logical_transformer.py @@ -14,7 +14,9 @@ from paddle.utils import gast from paddle.fluid.dygraph.dygraph_to_static.utils import ast_to_source_code -from paddle.fluid.dygraph.dygraph_to_static.base_transformer import BaseTransformer +from paddle.fluid.dygraph.dygraph_to_static.base_transformer import ( + BaseTransformer, +) cmpop_type_to_str = { gast.Eq: "==", @@ -26,7 +28,7 @@ cmpop_type_to_str = { gast.Is: "is", gast.IsNot: "is not", gast.In: "in", - gast.NotIn: "not in" + gast.NotIn: "not in", } @@ -70,7 +72,8 @@ class LogicalTransformer(BaseTransformer): new_node = self._create_bool_op_node(node.values, 'Or') else: raise TypeError( - "Only supports and/or syntax in control flow if statement.") + "Only supports and/or syntax in control flow if statement." + ) return new_node def _create_bool_op_node(self, nodes, api_type): @@ -80,10 +83,11 @@ class LogicalTransformer(BaseTransformer): according to the actual order. In `convert_logical_and(lambda:x>1, lambda:y<1)`, `lambda:y<1` must be run after `lambda:x>1`, If `x>1` is False, `y<1` should NOT be run. ''' - assert len( - nodes - ) > 1, "The length of BoolOp should be at least 2, but received {}.".format( - len(nodes)) + assert ( + len(nodes) > 1 + ), "The length of BoolOp should be at least 2, but received {}.".format( + len(nodes) + ) if len(nodes) > 2: # Creates logic_and/logic_or node recursively. pre_logic_node = self._create_bool_op_node(nodes[:2], api_type) @@ -95,7 +99,8 @@ class LogicalTransformer(BaseTransformer): args = [ast_to_source_code(child) for child in nodes] new_node_str = "_jst.{}(lambda:{}, lambda:{})".format( - api_type, args[0], args[1]) + api_type, args[0], args[1] + ) # NOTE: gast.parse return Module(body=[expr(...)]) new_node = gast.parse(new_node_str).body[0].value return new_node diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/loop_transformer.py b/python/paddle/fluid/dygraph/dygraph_to_static/loop_transformer.py index 38dc7468e08206e64d7d6b1fe1ab12e7a960e5d0..b61f9f6e43c20101c1ef54d8ba588710ea8d75c0 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/loop_transformer.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/loop_transformer.py @@ -17,21 +17,44 @@ from paddle.utils import gast from collections import defaultdict from paddle.fluid import unique_name -from paddle.fluid.dygraph.dygraph_to_static.static_analysis import AstNodeWrapper +from paddle.fluid.dygraph.dygraph_to_static.static_analysis import ( + AstNodeWrapper, +) from paddle.fluid.dygraph.dygraph_to_static.static_analysis import NodeVarType -from paddle.fluid.dygraph.dygraph_to_static.static_analysis import StaticAnalysisVisitor +from paddle.fluid.dygraph.dygraph_to_static.static_analysis import ( + StaticAnalysisVisitor, +) from paddle.fluid.dygraph.dygraph_to_static.utils import ast_to_source_code from paddle.fluid.dygraph.dygraph_to_static.utils import generate_name_node from paddle.fluid.dygraph.dygraph_to_static.utils import get_attribute_full_name -from paddle.fluid.dygraph.dygraph_to_static.variable_trans_func import create_undefined_var -from paddle.fluid.dygraph.dygraph_to_static.utils import create_nonlocal_stmt_nodes, create_get_args_node, create_set_args_node -from paddle.fluid.dygraph.dygraph_to_static.utils import FunctionNameLivenessAnalysis +from paddle.fluid.dygraph.dygraph_to_static.variable_trans_func import ( + create_undefined_var, +) +from paddle.fluid.dygraph.dygraph_to_static.utils import ( + create_nonlocal_stmt_nodes, + create_get_args_node, + create_set_args_node, +) +from paddle.fluid.dygraph.dygraph_to_static.utils import ( + FunctionNameLivenessAnalysis, +) from paddle.fluid.dygraph.dygraph_to_static.ifelse_transformer import ARGS_NAME -from paddle.fluid.dygraph.dygraph_to_static.base_transformer import BaseTransformer -from paddle.fluid.dygraph.dygraph_to_static.base_transformer import RenameTransformer -from paddle.fluid.dygraph.dygraph_to_static.base_transformer import ForLoopTuplePreTransformer -from paddle.fluid.dygraph.dygraph_to_static.base_transformer import ForNodeVisitor -from paddle.fluid.dygraph.dygraph_to_static.utils import GetterSetterHelper, create_name_str +from paddle.fluid.dygraph.dygraph_to_static.base_transformer import ( + BaseTransformer, +) +from paddle.fluid.dygraph.dygraph_to_static.base_transformer import ( + RenameTransformer, +) +from paddle.fluid.dygraph.dygraph_to_static.base_transformer import ( + ForLoopTuplePreTransformer, +) +from paddle.fluid.dygraph.dygraph_to_static.base_transformer import ( + ForNodeVisitor, +) +from paddle.fluid.dygraph.dygraph_to_static.utils import ( + GetterSetterHelper, + create_name_str, +) __all__ = ['LoopTransformer', 'NameVisitor'] @@ -42,8 +65,14 @@ FOR_CONDITION_PREFIX = 'for_loop_condition' FOR_BODY_PREFIX = 'for_loop_body' -def create_while_nodes(condition_name, body_name, loop_var_names, - push_pop_names, getter_name, setter_name): +def create_while_nodes( + condition_name, + body_name, + loop_var_names, + push_pop_names, + getter_name, + setter_name, +): """ Returns a list of gast.Node which represents the calling of Paddle controlflow while_loop. @@ -79,13 +108,21 @@ def create_while_nodes(condition_name, body_name, loop_var_names, # set doesn't have order so we convert it to list loop_var_names = list(loop_var_names) assign_loop_var_names = [] - for name in (loop_var_names): + for name in loop_var_names: assign_loop_var_names.append(name) while_func_name = "_jst.While" - while_node_str = "{}({}, {}, {}, {}, return_name_ids={}, push_pop_names={})".format( - while_func_name, condition_name, body_name, getter_name, setter_name, - create_name_str(loop_var_names), create_name_str(push_pop_names)) + while_node_str = ( + "{}({}, {}, {}, {}, return_name_ids={}, push_pop_names={})".format( + while_func_name, + condition_name, + body_name, + getter_name, + setter_name, + create_name_str(loop_var_names), + create_name_str(push_pop_names), + ) + ) while_node = gast.parse(while_node_str).body[0] ret = [while_node] @@ -123,14 +160,16 @@ class NameVisitor(gast.NodeVisitor): self.type_vars = set() self.static_analysis_visitor = StaticAnalysisVisitor(root_node) - self.node_to_wrapper_map = self.static_analysis_visitor.get_node_to_wrapper_map( + self.node_to_wrapper_map = ( + self.static_analysis_visitor.get_node_to_wrapper_map() ) self.visit(root_node) def get_loop_var_names(self, node): assert isinstance( - node, (gast.While, gast.For)), "Input node is not gast loop node" + node, (gast.While, gast.For) + ), "Input node is not gast loop node" loop_var_names = set() create_var_names = set() read_context = {type(gast.Load()), type(gast.AugLoad())} @@ -141,7 +180,8 @@ class NameVisitor(gast.NodeVisitor): var_name_to_ctxs = defaultdict(list) for var_node in in_loop_vars_list: var_name_to_ctxs[self._var_node_to_name(var_node)].append( - var_node.ctx) + var_node.ctx + ) in_loop_vars = set(in_loop_vars_list) in_loop_vars = self._remove_unnecessary_vars(in_loop_vars, node) @@ -149,13 +189,17 @@ class NameVisitor(gast.NodeVisitor): before_loop_body_vars = self.before_loop_body_vars[node] before_loop_body_vars = self._remove_unnecessary_vars( - before_loop_body_vars, node) + before_loop_body_vars, node + ) before_loop_name_strs = self._var_nodes_to_names(before_loop_body_vars) - after_loop_vars = self.current_seen_vars - before_loop_body_vars - in_loop_vars + after_loop_vars = ( + self.current_seen_vars - before_loop_body_vars - in_loop_vars + ) after_loop_vars = self._remove_unnecessary_vars(after_loop_vars, node) - after_loop_name_strs = self._var_nodes_to_names(after_loop_vars, - read_context) + after_loop_name_strs = self._var_nodes_to_names( + after_loop_vars, read_context + ) condition_vars = self.condition_vars[node] condition_names = self._var_nodes_to_names(condition_vars) @@ -204,8 +248,10 @@ class NameVisitor(gast.NodeVisitor): if isinstance(ctx, gast.Store): is_created = True - if isinstance(var_name_to_ctxs[name][0], - gast.Load) and is_created: + if ( + isinstance(var_name_to_ctxs[name][0], gast.Load) + and is_created + ): loop_var_names.add(name) create_var_names.add(name) @@ -223,7 +269,7 @@ class NameVisitor(gast.NodeVisitor): write_context = { type(gast.Store()), type(gast.AugStore()), - type(gast.Del()) + type(gast.Del()), } for loop_node in self.current_loop: @@ -322,8 +368,10 @@ class NameVisitor(gast.NodeVisitor): def _node_var_type_is_basic(self, node_var_type): basic_types = { - NodeVarType.BOOLEAN, NodeVarType.INT, NodeVarType.FLOAT, - NodeVarType.STRING + NodeVarType.BOOLEAN, + NodeVarType.INT, + NodeVarType.FLOAT, + NodeVarType.STRING, } for t in node_var_type: if t in basic_types: @@ -411,7 +459,8 @@ class NameVisitor(gast.NodeVisitor): target_vars = [target_node] vars_of_list_generator = vars_of_list_generator | set( - target_vars) + target_vars + ) # 1.2 vars from target vars used in elt_node target_var_names = {var.id for var in target_vars} @@ -424,7 +473,8 @@ class NameVisitor(gast.NodeVisitor): for node in elt_nodes: vars_of_list_generator |= filter_name_nodes_from( - node, target_var_names) + node, target_var_names + ) # 2. Get target vars or vars from target vars used in for-loop but the for-loop is # 1) not the "loop_node" itself @@ -450,15 +500,18 @@ class NameVisitor(gast.NodeVisitor): target_vars = [target_node] target_vars_of_for_node = target_vars_of_for_node | set( - target_vars) + target_vars + ) # 2.2 vars from target vars used in for-loop target_vars_name_strs = {var.id for var in target_vars_of_for_node} for var in loop_vars: if not isinstance(var, gast.Name): continue - if var.id in target_vars_name_strs and var not in self.condition_vars[ - loop_node]: + if ( + var.id in target_vars_name_strs + and var not in self.condition_vars[loop_node] + ): target_vars_of_for_node.add(var) removed_vars = target_vars_of_for_node | vars_of_list_generator @@ -506,11 +559,11 @@ class LoopTransformer(BaseTransformer): while i < len(body_list): if isinstance(body_list[i], gast.While): new_stmts = self.get_while_stmt_nodes(body_list[i]) - body_list[i:i + 1] = new_stmts + body_list[i : i + 1] = new_stmts i += len(new_stmts) elif isinstance(body_list[i], gast.For): new_stmts = self.get_for_stmt_nodes(body_list[i]) - body_list[i:i + 1] = new_stmts + body_list[i : i + 1] = new_stmts i += len(new_stmts) else: i += 1 @@ -536,8 +589,10 @@ class LoopTransformer(BaseTransformer): return [node] init_stmts, cond_stmt, body_stmts = stmts_tuple # 2. get original loop vars - loop_var_names, create_var_names = node.pd_scope.modified_vars( - ), node.pd_scope.created_vars() + loop_var_names, create_var_names = ( + node.pd_scope.modified_vars(), + node.pd_scope.created_vars(), + ) push_pop_names = list(node.pd_scope.variadic_length_vars()) # TODO: Remove the bunch of code? We have the unique format `for A in B:` # NOTE: in 'for x in var' or 'for i, x in enumerate(var)' cases, @@ -577,53 +632,64 @@ class LoopTransformer(BaseTransformer): # 5. create & append condition function node condition_func_node = gast.FunctionDef( name=unique_name.generate(FOR_CONDITION_PREFIX), - args=gast.arguments(args=[], - posonlyargs=[], - vararg=None, - kwonlyargs=[], - kw_defaults=None, - kwarg=None, - defaults=[]), + args=gast.arguments( + args=[], + posonlyargs=[], + vararg=None, + kwonlyargs=[], + kw_defaults=None, + kwarg=None, + defaults=[], + ), body=nonlocal_stmt_node + [gast.Return(value=cond_stmt)], decorator_list=[], returns=None, - type_comment=None) + type_comment=None, + ) new_stmts.append(condition_func_node) # 6. create & append loop body function node # append return values for loop body body_func_node = gast.FunctionDef( name=unique_name.generate(FOR_BODY_PREFIX), - args=gast.arguments(args=[], - posonlyargs=[], - vararg=None, - kwonlyargs=[], - kw_defaults=None, - kwarg=None, - defaults=[]), + args=gast.arguments( + args=[], + posonlyargs=[], + vararg=None, + kwonlyargs=[], + kw_defaults=None, + kwarg=None, + defaults=[], + ), body=nonlocal_stmt_node + body_stmts, decorator_list=[], returns=None, - type_comment=None) + type_comment=None, + ) new_stmts.append(body_func_node) helper = GetterSetterHelper(None, None, nonlocal_names, push_pop_names) get_args_node = create_get_args_node(helper.union()) set_args_node = create_set_args_node(helper.union()) # 7. create & append while loop node - while_loop_nodes = create_while_nodes(condition_func_node.name, - body_func_node.name, - nonlocal_names, push_pop_names, - get_args_node.name, - set_args_node.name) + while_loop_nodes = create_while_nodes( + condition_func_node.name, + body_func_node.name, + nonlocal_names, + push_pop_names, + get_args_node.name, + set_args_node.name, + ) new_stmts.extend([get_args_node, set_args_node]) new_stmts.extend(while_loop_nodes) return new_stmts def get_while_stmt_nodes(self, node): - loop_var_names, create_var_names = node.pd_scope.modified_vars( - ), node.pd_scope.created_vars() + loop_var_names, create_var_names = ( + node.pd_scope.modified_vars(), + node.pd_scope.created_vars(), + ) push_pop_names = list(node.pd_scope.variadic_length_vars()) new_stmts = [] @@ -648,45 +714,54 @@ class LoopTransformer(BaseTransformer): condition_func_node = gast.FunctionDef( name=unique_name.generate(WHILE_CONDITION_PREFIX), - args=gast.arguments(args=[], - posonlyargs=[], - vararg=None, - kwonlyargs=[], - kw_defaults=None, - kwarg=None, - defaults=[]), + args=gast.arguments( + args=[], + posonlyargs=[], + vararg=None, + kwonlyargs=[], + kw_defaults=None, + kwarg=None, + defaults=[], + ), body=nonlocal_stmt_node + [gast.Return(value=node.test)], decorator_list=[], returns=None, - type_comment=None) + type_comment=None, + ) new_stmts.append(condition_func_node) new_body = node.body body_func_node = gast.FunctionDef( name=unique_name.generate(WHILE_BODY_PREFIX), - args=gast.arguments(args=[], - posonlyargs=[], - vararg=None, - kwonlyargs=[], - kw_defaults=None, - kwarg=None, - defaults=[]), + args=gast.arguments( + args=[], + posonlyargs=[], + vararg=None, + kwonlyargs=[], + kw_defaults=None, + kwarg=None, + defaults=[], + ), body=nonlocal_stmt_node + new_body, decorator_list=[], returns=None, - type_comment=None) + type_comment=None, + ) new_stmts.append(body_func_node) helper = GetterSetterHelper(None, None, nonlocal_names, push_pop_names) get_args_node = create_get_args_node(helper.union()) set_args_node = create_set_args_node(helper.union()) - while_loop_nodes = create_while_nodes(condition_func_node.name, - body_func_node.name, - nonlocal_names, push_pop_names, - get_args_node.name, - set_args_node.name) + while_loop_nodes = create_while_nodes( + condition_func_node.name, + body_func_node.name, + nonlocal_names, + push_pop_names, + get_args_node.name, + set_args_node.name, + ) new_stmts.extend([get_args_node, set_args_node]) new_stmts.extend(while_loop_nodes) return new_stmts diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/origin_info.py b/python/paddle/fluid/dygraph/dygraph_to_static/origin_info.py index 1399eeb2b6bedd9e361ace0853d1a864895b0803..4bfc73a32565fa5a3bfc2fd04ee58c70a850bc24 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/origin_info.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/origin_info.py @@ -20,6 +20,7 @@ from paddle.fluid import core from paddle.fluid.dygraph.dygraph_to_static.utils import unwrap from paddle.fluid.dygraph.dygraph_to_static.utils import ORIGI_INFO from paddle.fluid.framework import Program + try: from collections.abc import Sequence except: @@ -30,6 +31,7 @@ class Location(object): """ Location information of source code. """ + __slots__ = ( "filepath", "lineno", @@ -42,8 +44,9 @@ class Location(object): self.col_offset = col_offset def __str__(self): - return "location: {}:{}:{}".format(self.filepath, self.lineno, - self.col_offset) + return "location: {}:{}:{}".format( + self.filepath, self.lineno, self.col_offset + ) @property def line_location(self): @@ -54,6 +57,7 @@ class OriginInfo(object): """ Original information of source code. """ + __slots__ = ( "location", "function_name", @@ -67,17 +71,26 @@ class OriginInfo(object): def __str__(self): return "{} \nsource_code: {} in function {}\n ".format( - self.location, self.source_code, self.function_name) + self.location, self.source_code, self.function_name + ) def formated_message(self): flag_for_origin_info = "(* user code *)" return ' File "{}", line {}, in {} {}\n\t{}'.format( - self.location.filepath, self.location.lineno, self.function_name, - flag_for_origin_info, self.source_code.lstrip()) + self.location.filepath, + self.location.lineno, + self.function_name, + flag_for_origin_info, + self.source_code.lstrip(), + ) def as_frame(self): - return (self.location.filepath, self.location.lineno, - self.function_name, self.source_code.lstrip()) + return ( + self.location.filepath, + self.location.lineno, + self.function_name, + self.source_code.lstrip(), + ) class OriginInfoAttacher(gast.NodeTransformer): @@ -142,9 +155,9 @@ class OriginInfoAttacher(gast.NodeTransformer): global_origin_info_map = {} -def create_and_update_origin_info_map(transformed_node, - static_func, - is_global=True): +def create_and_update_origin_info_map( + transformed_node, static_func, is_global=True +): """ Creates a original information map between transformed static function and original dygraph function. @@ -162,9 +175,11 @@ def create_and_update_origin_info_map(transformed_node, static_node = attach_origin_info(static_node, static_func) for t_node, s_node in ast_walk(transformed_node, static_node): - assert type(t_node) == type(s_node), \ - "The node types should be the same, but received type(t_node) is {}, and type(s_node) is {}." \ - .format(type(t_node), type(s_node)) + assert type(t_node) == type( + s_node + ), "The node types should be the same, but received type(t_node) is {}, and type(s_node) is {}.".format( + type(t_node), type(s_node) + ) dygraph_info = getattr(t_node, ORIGI_INFO, None) static_info = getattr(s_node, ORIGI_INFO, None) @@ -174,9 +189,15 @@ def create_and_update_origin_info_map(transformed_node, exist_origin_info = origin_info_map.get(static_loc) if exist_origin_info is not None: - if exist_origin_info.location.lineno >= dygraph_info.location.lineno: + if ( + exist_origin_info.location.lineno + >= dygraph_info.location.lineno + ): continue - if exist_origin_info.location.col_offset <= dygraph_info.location.col_offset: + if ( + exist_origin_info.location.col_offset + <= dygraph_info.location.col_offset + ): continue origin_info_map[static_loc] = dygraph_info @@ -229,12 +250,15 @@ def ast_walk(transformed_node, static_node): # Node types should be strictly required, but there is no strict distinction between gast.Load and gast.Param # in the ast transformation process. if isinstance(t_node, (gast.Load, gast.Param)) or isinstance( - s_node, (gast.Load, gast.Param)): + s_node, (gast.Load, gast.Param) + ): continue - assert type(t_node) == type(s_node), \ - "The node types should be the same, but received type(t_node) is {}, and type(s_node) is {}."\ - .format(type(t_node), type(s_node)) + assert type(t_node) == type( + s_node + ), "The node types should be the same, but received type(t_node) is {}, and type(s_node) is {}.".format( + type(t_node), type(s_node) + ) yield t_node, s_node @@ -289,11 +313,11 @@ def update_op_callstack_with_origin_info(program): loc = Location(filepath, lineno) dygraph_func_info = global_origin_info_map.get(loc.line_location) if dygraph_func_info: - filepath, lineno, funcname, code = \ - dygraph_func_info.as_frame() + filepath, lineno, funcname, code = dygraph_func_info.as_frame() callstack[i] = ' File "{}", line {}, in {}'.format( - filepath, lineno, funcname) + filepath, lineno, funcname + ) callstack[i + 1] = ' {}'.format(code) return callstack diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py b/python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py index ebc8b351f785a7c6e48bb34544a4dad4ac20b7df..e0129e8466995f1077215b009663b0f820c52d69 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py @@ -16,19 +16,32 @@ import numpy as np import paddle from paddle.fluid import framework, backward, core, program_guard -from paddle.fluid.executor import _is_enable_standalone_executor, _is_dy2st_enable_standalone_executor +from paddle.fluid.executor import ( + _is_enable_standalone_executor, + _is_dy2st_enable_standalone_executor, +) from paddle.fluid.dygraph import layers from paddle.fluid.dygraph.base import switch_to_static_graph from paddle.fluid.dygraph.dygraph_to_static import logging_utils -from paddle.fluid.dygraph.dygraph_to_static.return_transformer import RETURN_NO_VALUE_MAGIC_NUM +from paddle.fluid.dygraph.dygraph_to_static.return_transformer import ( + RETURN_NO_VALUE_MAGIC_NUM, +) from paddle.fluid.layers.utils import flatten from paddle.fluid.layers.utils import pack_sequence_as from paddle.fluid.layers.utils import _hash_with_id from paddle.fluid.compiler import BuildStrategy from paddle.fluid.framework import _apply_pass -from paddle.fluid.contrib.mixed_precision.decorator import AutoMixedPrecisionLists -from paddle.fluid.contrib.mixed_precision.fp16_utils import rewrite_program, cast_model_to_fp16 -from paddle.fluid.dygraph.amp.auto_cast import _in_amp_guard, _in_pure_fp16_guard +from paddle.fluid.contrib.mixed_precision.decorator import ( + AutoMixedPrecisionLists, +) +from paddle.fluid.contrib.mixed_precision.fp16_utils import ( + rewrite_program, + cast_model_to_fp16, +) +from paddle.fluid.dygraph.amp.auto_cast import ( + _in_amp_guard, + _in_pure_fp16_guard, +) from paddle import _legacy_C_ops @@ -61,7 +74,8 @@ class NestSequence(object): var_ids = [] for idx, var in enumerate(self.__input_list): if isinstance( - var, (framework.Variable, core.VarBase, core.eager.Tensor)): + var, (framework.Variable, core.VarBase, core.eager.Tensor) + ): var_ids.append(idx) return var_ids @@ -74,15 +88,17 @@ class NestSequence(object): warning_types = set() for var in self.__input_list: if not isinstance( - var, - (framework.Variable, core.VarBase, core.eager.Tensor)): + var, (framework.Variable, core.VarBase, core.eager.Tensor) + ): warning_types.add(type(var)) if warning_types: logging_utils.warn( "Output of traced function contains non-tensor type values: {}. " "Currently, We don't support to update them while training and will return " - "what we first saw. Please try to return them as tensor.". - format(list(warning_types))) + "what we first saw. Please try to return them as tensor.".format( + list(warning_types) + ) + ) @property def var_ids(self): @@ -136,12 +152,9 @@ class PartialProgramLayer: Layer: A Layer object that run all ops internally in static mode. """ - def __init__(self, - main_program, - inputs, - outputs, - parameters=None, - **kwargs): + def __init__( + self, main_program, inputs, outputs, parameters=None, **kwargs + ): super(PartialProgramLayer, self).__init__() self._inputs = NestSequence(inputs) self._outputs = NestSequence(outputs, need_check=True) @@ -164,7 +177,8 @@ class PartialProgramLayer: # For AMP training self._amp_list = AutoMixedPrecisionLists( custom_white_list=custom_white_list, - custom_black_list=custom_black_list) + custom_black_list=custom_black_list, + ) # program_id -> list(scope) self._scope_cache = {} @@ -200,7 +214,8 @@ class PartialProgramLayer: return self._origin_main_program.clone(for_test=is_infer_mode) else: train_program = self._append_backward_desc( - self._origin_main_program) + self._origin_main_program + ) # Note: Only set grad type once after initializing train program. So we put it here. self._set_grad_type(self._params, train_program) return train_program @@ -220,16 +235,18 @@ class PartialProgramLayer: @switch_to_static_graph def _create_pure_fp16_program(self, is_infer_mode=False): pure_fp16_program = self._origin_main_program.clone( - for_test=is_infer_mode) + for_test=is_infer_mode + ) with program_guard(pure_fp16_program): - cast_model_to_fp16(pure_fp16_program, - self._amp_list, - use_fp16_guard=False) + cast_model_to_fp16( + pure_fp16_program, self._amp_list, use_fp16_guard=False + ) if is_infer_mode: return pure_fp16_program else: train_pure_fp16_program = self._append_backward_desc( - pure_fp16_program) + pure_fp16_program + ) self._set_grad_type(self._params, train_pure_fp16_program) return train_pure_fp16_program @@ -237,23 +254,27 @@ class PartialProgramLayer: def _create_forward_backward_train_program(self): whole_program = self._create_program() forward_end_op_index = self._infer_program.desc.block(0).op_size() - return self._get_forward_backward_program_form(whole_program, - forward_end_op_index) + return self._get_forward_backward_program_form( + whole_program, forward_end_op_index + ) @switch_to_static_graph def _create_forward_backward_train_amp_program(self): whole_program = self._create_amp_program() forward_end_op_index = self._infer_amp_program.desc.block(0).op_size() - return self._get_forward_backward_program_form(whole_program, - forward_end_op_index) + return self._get_forward_backward_program_form( + whole_program, forward_end_op_index + ) @switch_to_static_graph def _create_forward_backward_train_pure_fp16_program(self): whole_program = self._create_pure_fp16_program() forward_end_op_index = self._infer_pure_fp16_program.desc.block( - 0).op_size() - return self._get_forward_backward_program_form(whole_program, - forward_end_op_index) + 0 + ).op_size() + return self._get_forward_backward_program_form( + whole_program, forward_end_op_index + ) @LazyInitialized def _train_program(self): @@ -349,8 +370,9 @@ class PartialProgramLayer: @LazyInitialized def _train_program_id(self): program_id = _hash_with_id(self._train_program, self) - core._set_cached_executor_build_strategy(program_id, - self._build_strategy) + core._set_cached_executor_build_strategy( + program_id, self._build_strategy + ) return program_id @LazyInitialized @@ -360,8 +382,9 @@ class PartialProgramLayer: @LazyInitialized def _train_amp_program_id(self): program_id = _hash_with_id(self._train_amp_program, self) - core._set_cached_executor_build_strategy(program_id, - self._build_strategy) + core._set_cached_executor_build_strategy( + program_id, self._build_strategy + ) return program_id @LazyInitialized @@ -371,8 +394,9 @@ class PartialProgramLayer: @LazyInitialized def _train_pure_fp16_program_id(self): program_id = _hash_with_id(self._train_pure_fp16_program, self) - core._set_cached_executor_build_strategy(program_id, - self._build_strategy) + core._set_cached_executor_build_strategy( + program_id, self._build_strategy + ) return program_id @LazyInitialized @@ -408,8 +432,9 @@ class PartialProgramLayer: return main_program - def prepare_gradient_aggregation(self, start_idx, main_program, - target_program): + def prepare_gradient_aggregation( + self, start_idx, main_program, target_program + ): """ Why we need add gradient aggregation operation ? In some cases, if non leaf nodes are used as output, gradient overwriting will occur, such as @@ -427,8 +452,8 @@ class PartialProgramLayer: if exist a op whose inputs is var, then return True """ if not isinstance(var, framework.Variable) or var.type not in [ - core.VarDesc.VarType.LOD_TENSOR, - core.VarDesc.VarType.SELECTED_ROWS + core.VarDesc.VarType.LOD_TENSOR, + core.VarDesc.VarType.SELECTED_ROWS, ]: return False if var.dtype not in [paddle.float32, paddle.float64]: @@ -445,20 +470,28 @@ class PartialProgramLayer: new_grad_name = var.name + suffix + "@GRAD" finded_ops = list( filter( - lambda x: x[0] >= start_idx and any([ - out_arg == var_grad_name - for out_arg in x[1].output_arg_names - ]), enumerate(target_program.block(0).ops))) + lambda x: x[0] >= start_idx + and any( + [ + out_arg == var_grad_name + for out_arg in x[1].output_arg_names + ] + ), + enumerate(target_program.block(0).ops), + ) + ) # len(finded_ops) may equals zero when stop_gradient works. # len(finded_ops) may > 1, because we may have fill_constant op. if len(finded_ops) == 0: return None # step1: create a new var named var.name@GRAD - target_program.block(0).create_var(name=new_grad_name, - type=var.type, - dtype=var.dtype, - shape=var.shape) + target_program.block(0).create_var( + name=new_grad_name, + type=var.type, + dtype=var.dtype, + shape=var.shape, + ) # step2: rename the var.name@GRAD to var.name@GRAD@dy2static for idx, op in finded_ops: op._rename_input(var_grad_name, new_grad_name) @@ -469,11 +502,13 @@ class PartialProgramLayer: finded_ops[-1][0] + 1, type='sum', inputs={'X': [var_grad_name, new_grad_name]}, - outputs={"Out": var_grad_name}) + outputs={"Out": var_grad_name}, + ) return None to_processed_vars = list( - filter(_need_aggregation, self._outputs.tolist())) + filter(_need_aggregation, self._outputs.tolist()) + ) for _var in to_processed_vars: _insert_aggregation_ops_for_var(target_program, _var) @@ -489,8 +524,9 @@ class PartialProgramLayer: if targets and self._params: backward.gradients(targets=targets, inputs=[]) - start_idx = len( - main_program.block(0).ops) + 2 * len(self._outputs.tolist()) + start_idx = len(main_program.block(0).ops) + 2 * len( + self._outputs.tolist() + ) self.prepare_gradient_aggregation(start_idx, main_program, program) @@ -509,7 +545,10 @@ class PartialProgramLayer: found_param = False for block in program.blocks: for op in block.ops: - if param.name in op.input_arg_names or param.name in op.output_arg_names: + if ( + param.name in op.input_arg_names + or param.name in op.output_arg_names + ): required_params.append(param) found_param = True break @@ -526,15 +565,21 @@ class PartialProgramLayer: var_desc = block.vars[name].desc var_base = None if not framework._in_eager_mode_: - var_base = core.VarBase(var_desc.dtype(), - var_desc.shape(), - var_desc.name(), - var_desc.type(), False) + var_base = core.VarBase( + var_desc.dtype(), + var_desc.shape(), + var_desc.name(), + var_desc.type(), + False, + ) else: - var_base = core.eager.Tensor(var_desc.dtype(), - var_desc.shape(), - var_desc.name(), - var_desc.type(), False) + var_base = core.eager.Tensor( + var_desc.dtype(), + var_desc.shape(), + var_desc.name(), + var_desc.type(), + False, + ) double_grads.append(var_base) return self._valid_vars(double_grads) @@ -554,36 +599,62 @@ class PartialProgramLayer: attrs = [ 'global_block', - self.program.desc.block(0), 'start_op_index', 0, 'end_op_index', - self._get_end_op_index(), 'is_test', not self.training, - 'program_id', self.program_id + self.program.desc.block(0), + 'start_op_index', + 0, + 'end_op_index', + self._get_end_op_index(), + 'is_test', + not self.training, + 'program_id', + self.program_id, ] if self._cuda_graph_capture_mode: attrs.extend( - ('cuda_graph_capture_mode', self._cuda_graph_capture_mode, - 'cuda_graph_pool_id', self._cuda_graph_pool_id)) - - use_interpretorcore = _is_enable_standalone_executor( - ) and _is_dy2st_enable_standalone_executor() + ( + 'cuda_graph_capture_mode', + self._cuda_graph_capture_mode, + 'cuda_graph_pool_id', + self._cuda_graph_pool_id, + ) + ) + + use_interpretorcore = ( + _is_enable_standalone_executor() + and _is_dy2st_enable_standalone_executor() + ) attrs.extend(('use_interpretorcore', use_interpretorcore)) if use_interpretorcore: attrs.extend( - ('forward_global_block', self.forward_program.desc.block(0), - 'backward_global_block', self.backward_program.desc.block(0))) + ( + 'forward_global_block', + self.forward_program.desc.block(0), + 'backward_global_block', + self.backward_program.desc.block(0), + ) + ) _legacy_C_ops.run_program( - self._valid_vars(in_vars), self._valid_vars(self._params), + self._valid_vars(in_vars), + self._valid_vars(self._params), self._valid_vars(out_vars), - self._create_scope_vec(program_id=self.program_id, - use_scope_cache=True), - self._double_grads, self._cuda_graph_vec, *attrs) + self._create_scope_vec( + program_id=self.program_id, use_scope_cache=True + ), + self._double_grads, + self._cuda_graph_vec, + *attrs + ) else: - _legacy_C_ops.run_program(self._valid_vars(in_vars), - self._valid_vars(self._params), - self._valid_vars(out_vars), - self._create_scope_vec(), - self._double_grads, self._cuda_graph_vec, - *attrs) + _legacy_C_ops.run_program( + self._valid_vars(in_vars), + self._valid_vars(self._params), + self._valid_vars(out_vars), + self._create_scope_vec(), + self._double_grads, + self._cuda_graph_vec, + *attrs + ) restored_nest_out = self._restore_out(out_vars) return self._remove_no_value(restored_nest_out) @@ -591,9 +662,11 @@ class PartialProgramLayer: if _in_pure_fp16_guard(): for i, var in enumerate(in_vars): name = var.name - if (self.program.global_block().has_var(name) - and self.program.global_block().var(name).dtype - == paddle.float16): + if ( + self.program.global_block().has_var(name) + and self.program.global_block().var(name).dtype + == paddle.float16 + ): in_vars[i] = var.astype('float16') in_vars[i].name = name @@ -624,25 +697,32 @@ class PartialProgramLayer: return self._infer_program @switch_to_static_graph - def _get_forward_backward_program_form(self, whole_program, - forward_end_op_index): + def _get_forward_backward_program_form( + self, whole_program, forward_end_op_index + ): forward_builded_program = add_build_strategy_for( - whole_program, 0, forward_end_op_index, self._build_strategy) + whole_program, 0, forward_end_op_index, self._build_strategy + ) backward_start_op_index = forward_end_op_index + 2 * len( - self._outputs.var_ids) + self._outputs.var_ids + ) backward_end_op_index = whole_program.desc.block(0).op_size() backward_builded_program = add_build_strategy_for( - whole_program, backward_start_op_index, backward_end_op_index, - self._build_strategy) - self._apply_inplace_pass(forward_builded_program, - backward_builded_program) + whole_program, + backward_start_op_index, + backward_end_op_index, + self._build_strategy, + ) + self._apply_inplace_pass( + forward_builded_program, backward_builded_program + ) return [forward_builded_program, backward_builded_program] def _apply_inplace_pass(self, forward_program, backward_program): attr_types = { "use_cuda": "bool", "mem_opt_skip_vars": "list[str]", - "for_partial_block": "bool" + "for_partial_block": "bool", } empty_startup_program = paddle.static.Program() use_cuda = True if core.is_compiled_with_cuda() else False @@ -664,22 +744,33 @@ class PartialProgramLayer: forward_mem_opt_skip_vars.append(var.desc.name()) backward_mem_opt_skip_vars.append(var.desc.name()) for var_name in core.parse_safe_eager_deletion_skip_vars( - backward_program.desc): + backward_program.desc + ): forward_mem_opt_skip_vars.append(var_name) attrs = { "use_cuda": use_cuda, "mem_opt_skip_vars": forward_mem_opt_skip_vars, - "for_partial_block": True + "for_partial_block": True, } - _apply_pass(forward_program, empty_startup_program, - "buffer_shared_inplace_pass", attrs, attr_types) + _apply_pass( + forward_program, + empty_startup_program, + "buffer_shared_inplace_pass", + attrs, + attr_types, + ) attrs = { "use_cuda": use_cuda, "mem_opt_skip_vars": backward_mem_opt_skip_vars, - "for_partial_block": True + "for_partial_block": True, } - _apply_pass(backward_program, empty_startup_program, - "buffer_shared_inplace_pass", attrs, attr_types) + _apply_pass( + backward_program, + empty_startup_program, + "buffer_shared_inplace_pass", + attrs, + attr_types, + ) def _prepare(self, inputs): """ @@ -695,23 +786,28 @@ class PartialProgramLayer: if isinstance(value, np.ndarray): var = None if not framework._in_eager_mode_: - var = core.VarBase(value=value, - name=self._inputs[i].desc.name(), - persistable=False, - place=expected_place, - zero_copy=True) + var = core.VarBase( + value=value, + name=self._inputs[i].desc.name(), + persistable=False, + place=expected_place, + zero_copy=True, + ) else: - var = core.eager.Tensor(value=value, - name=self._inputs[i].desc.name(), - persistable=False, - place=expected_place, - zero_copy=True) + var = core.eager.Tensor( + value=value, + name=self._inputs[i].desc.name(), + persistable=False, + place=expected_place, + zero_copy=True, + ) elif isinstance(value, (core.VarBase, core.eager.Tensor)): # NOTE(Aurelius84): If var is on CPUPlace, it will be transformed multi times # into CUDAPlace when it's as input of multi Ops. so we move it in advance # to avoid this problem. if value.stop_gradient and not value.place._equals( - expected_place): + expected_place + ): var = value._copy_to(expected_place, False) var.stop_gradient = True else: @@ -734,12 +830,21 @@ class PartialProgramLayer: return out_varbase_map[var_desc.name()] if not framework._in_eager_mode_: - var_base = core.VarBase(var_desc.dtype(), var_desc.shape(), - var_desc.name(), var_desc.type(), False) + var_base = core.VarBase( + var_desc.dtype(), + var_desc.shape(), + var_desc.name(), + var_desc.type(), + False, + ) else: - var_base = core.eager.Tensor(var_desc.dtype(), var_desc.shape(), - var_desc.name(), var_desc.type(), - False) + var_base = core.eager.Tensor( + var_desc.dtype(), + var_desc.shape(), + var_desc.name(), + var_desc.type(), + False, + ) var_base.stop_gradient = var.stop_gradient out_varbase_map[var_desc.name()] = var_base return var_base @@ -752,20 +857,30 @@ class PartialProgramLayer: def _create_scope_vec(self, program_id=None, use_scope_cache=False): # Hold forward variables tmp_scope_vec = None - inner_scope = self._get_scope(program_id=program_id, - use_scope_cache=use_scope_cache) + inner_scope = self._get_scope( + program_id=program_id, use_scope_cache=use_scope_cache + ) if not framework._in_eager_mode_: - tmp_scope_vec = core.VarBase(core.VarDesc.VarType.FP32, [], - "program_out_scope", - core.VarDesc.VarType.STEP_SCOPES, True) + tmp_scope_vec = core.VarBase( + core.VarDesc.VarType.FP32, + [], + "program_out_scope", + core.VarDesc.VarType.STEP_SCOPES, + True, + ) tmp_scope_vec.value().set_scope(inner_scope) else: tmp_scope_vec = [inner_scope] return tmp_scope_vec def _create_cuda_graph_vec(self): - var = core.VarBase(core.VarDesc.VarType.FP32, [], "cuda_graph", - core.VarDesc.VarType.RAW, True) + var = core.VarBase( + core.VarDesc.VarType.FP32, + [], + "cuda_graph", + core.VarDesc.VarType.RAW, + True, + ) var.stop_gradient = True return var @@ -788,8 +903,9 @@ class PartialProgramLayer: return main_program.clone(for_test=True) def _is_no_value(self, var): - if isinstance(var, - (core.VarBase, core.eager.Tensor)) and var.shape == [1]: + if isinstance(var, (core.VarBase, core.eager.Tensor)) and var.shape == [ + 1 + ]: # NOTE: .numpy() will insert MemcpySync operation, it hits performance. if var.numpy()[0] == RETURN_NO_VALUE_MAGIC_NUM: return True @@ -805,13 +921,14 @@ class PartialProgramLayer: return out_vars elif isinstance(out_vars, (tuple, list)): if isinstance(out_vars, tuple): - res = tuple(var for var in out_vars - if not self._is_no_value(var)) + res = tuple( + var for var in out_vars if not self._is_no_value(var) + ) else: # isinstance(out_vars, list) res = [var for var in out_vars if not self._is_no_value(var)] - has_removed = (len(out_vars) > len(res)) + has_removed = len(out_vars) > len(res) # len(out_vars) > len(res) means we have removed var. This is # preventing out_vars is empty or just one element at the beginning if len(res) == 0 and has_removed: @@ -860,15 +977,18 @@ class PartialProgramLayer: if not isinstance(self._params, (list, tuple)): raise TypeError( "Type of self._params in PartialProgramLayer should be list or tuple, but received %s." - % type(self._params)) + % type(self._params) + ) param_and_buffer_names_set = set() for i, var in enumerate(self._params): # self._params constains parameters and buffers with persistable=True. if not isinstance(var, (core.VarBase, core.eager.Tensor)): raise TypeError( - 'Type of self._params[{}] in PartialProgramLayer should be Parameter or Variable, but received {}.' - .format(i, type(var))) + 'Type of self._params[{}] in PartialProgramLayer should be Parameter or Variable, but received {}.'.format( + i, type(var) + ) + ) param_and_buffer_names_set.add(var.name) for block in main_program.blocks: @@ -882,7 +1002,8 @@ class PartialProgramLayer: "\n\tRevise suggestion: " "\n\t\t1. Please ensure all your sublayers are inheritted from nn.Layer." "\n\t\t2. Please use nn.ParameterList and nn.LayerList as container instead of using a native Python container such as List" - % name) + % name + ) def _valid_vars(self, vars): """ @@ -899,13 +1020,23 @@ def _create_fake_var(): """ if not framework._in_eager_mode_: return [ - core.VarBase(core.VarDesc.VarType.FP32, [], "Fake_var", - core.VarDesc.VarType.RAW, False) + core.VarBase( + core.VarDesc.VarType.FP32, + [], + "Fake_var", + core.VarDesc.VarType.RAW, + False, + ) ] else: return [ - core.eager.Tensor(core.VarDesc.VarType.FP32, [], "Fake_var", - core.VarDesc.VarType.RAW, False) + core.eager.Tensor( + core.VarDesc.VarType.FP32, + [], + "Fake_var", + core.VarDesc.VarType.RAW, + False, + ) ] @@ -914,23 +1045,27 @@ def partial_program_from(concrete_program): if inputs and isinstance(inputs[0], layers.Layer): inputs = inputs[1:] - return PartialProgramLayer(concrete_program.main_program, inputs, - concrete_program.outputs, - concrete_program.parameters, - **concrete_program.kwargs) + return PartialProgramLayer( + concrete_program.main_program, + inputs, + concrete_program.outputs, + concrete_program.parameters, + **concrete_program.kwargs + ) @switch_to_static_graph -def add_build_strategy_for(program, - start_op_index, - end_op_index, - build_strategy=None): - if (start_op_index < end_op_index): +def add_build_strategy_for( + program, start_op_index, end_op_index, build_strategy=None +): + if start_op_index < end_op_index: compiled_program = paddle.static.CompiledProgram( core.Graph(program.desc, start_op_index, end_op_index), - build_strategy=build_strategy) - compiled_program._compile(core.Scope(), - framework._current_expected_place()) + build_strategy=build_strategy, + ) + compiled_program._compile( + core.Scope(), framework._current_expected_place() + ) ir_graph = framework.IrGraph(compiled_program._graph) builded_program = ir_graph.to_program() if hasattr(compiled_program._program, 'lr_sheduler'): diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/print_transformer.py b/python/paddle/fluid/dygraph/dygraph_to_static/print_transformer.py index 93a16322b0b37d0c40462911f154ee8791d02d55..fdbd585a71bfb4df93aabbf9f3c08b230f225357 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/print_transformer.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/print_transformer.py @@ -14,8 +14,13 @@ from paddle.utils import gast -from paddle.fluid.dygraph.dygraph_to_static.static_analysis import AstNodeWrapper, StaticAnalysisVisitor -from paddle.fluid.dygraph.dygraph_to_static.base_transformer import BaseTransformer +from paddle.fluid.dygraph.dygraph_to_static.static_analysis import ( + AstNodeWrapper, + StaticAnalysisVisitor, +) +from paddle.fluid.dygraph.dygraph_to_static.base_transformer import ( + BaseTransformer, +) class PrintTransformer(BaseTransformer): @@ -31,7 +36,8 @@ class PrintTransformer(BaseTransformer): self.root = wrapper_root.node self.static_analysis_visitor = StaticAnalysisVisitor(self.root) - self.node_to_wrapper_map = self.static_analysis_visitor.get_node_to_wrapper_map( + self.node_to_wrapper_map = ( + self.static_analysis_visitor.get_node_to_wrapper_map() ) def transform(self): diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/program_translator.py b/python/paddle/fluid/dygraph/dygraph_to_static/program_translator.py index ad327a7f2e75b9f4da355babf1fcae9dd139bfdc..a8a0cd71212cc85ff6600369cd58684a4caf620d 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/program_translator.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/program_translator.py @@ -29,19 +29,36 @@ from paddle.fluid.dygraph.base import switch_to_static_graph from paddle.fluid.dygraph.dygraph_to_static import DygraphToStaticAst from paddle.fluid.dygraph.dygraph_to_static import error from paddle.fluid.dygraph.dygraph_to_static import logging_utils -from paddle.fluid.dygraph.dygraph_to_static.origin_info import attach_origin_info -from paddle.fluid.dygraph.dygraph_to_static.origin_info import create_and_update_origin_info_map -from paddle.fluid.dygraph.dygraph_to_static.origin_info import update_op_callstack_with_origin_info -from paddle.fluid.dygraph.dygraph_to_static.partial_program import partial_program_from +from paddle.fluid.dygraph.dygraph_to_static.origin_info import ( + attach_origin_info, +) +from paddle.fluid.dygraph.dygraph_to_static.origin_info import ( + create_and_update_origin_info_map, +) +from paddle.fluid.dygraph.dygraph_to_static.origin_info import ( + update_op_callstack_with_origin_info, +) +from paddle.fluid.dygraph.dygraph_to_static.partial_program import ( + partial_program_from, +) from paddle.fluid.dygraph.dygraph_to_static.utils import ast_to_func from paddle.fluid.dygraph.dygraph_to_static.utils import ast_to_source_code from paddle.fluid.dygraph.dygraph_to_static.utils import func_to_source_code from paddle.fluid.dygraph.dygraph_to_static.utils import input_specs_compatible from paddle.fluid.dygraph.dygraph_to_static.utils import type_name from paddle.fluid.dygraph.dygraph_to_static.utils import unwrap -from paddle.fluid.dygraph.dygraph_to_static.utils import make_hashable, ALREADY_D2S -from paddle.fluid.dygraph.dygraph_to_static.function_spec import FunctionSpec, _hash_spec_names -from paddle.fluid.dygraph.dygraph_to_static.function_spec import get_buffers, get_parameters +from paddle.fluid.dygraph.dygraph_to_static.utils import ( + make_hashable, + ALREADY_D2S, +) +from paddle.fluid.dygraph.dygraph_to_static.function_spec import ( + FunctionSpec, + _hash_spec_names, +) +from paddle.fluid.dygraph.dygraph_to_static.function_spec import ( + get_buffers, + get_parameters, +) from paddle.fluid.wrapped_decorator import signature_safe_contextmanager __all__ = ['ProgramTranslator', 'convert_to_static'] @@ -145,13 +162,24 @@ class CacheKey(object): """ Cached key for ProgramCache. """ + __slots__ = [ - 'function_spec', 'input_args_with_spec', 'input_kwargs_with_spec', - 'class_instance', 'kwargs', '_spec_names_id' + 'function_spec', + 'input_args_with_spec', + 'input_kwargs_with_spec', + 'class_instance', + 'kwargs', + '_spec_names_id', ] - def __init__(self, function_spec, input_args_with_spec, - input_kwargs_with_spec, class_instance, **kwargs): + def __init__( + self, + function_spec, + input_args_with_spec, + input_kwargs_with_spec, + class_instance, + **kwargs + ): """ Initializes a cache key. @@ -168,8 +196,9 @@ class CacheKey(object): self.class_instance = class_instance # NOTE: `kwargs` is usually not considered as basic member for `__hash__` self.kwargs = kwargs - self._spec_names_id = _hash_spec_names(input_args_with_spec, - input_kwargs_with_spec) + self._spec_names_id = _hash_spec_names( + input_args_with_spec, input_kwargs_with_spec + ) @classmethod def from_func_and_args(cls, function_spec, args, kwargs, class_instance): @@ -187,22 +216,34 @@ class CacheKey(object): args = args[1:] # 2. convert tensor and numpy array into InputSpec _args, _kwargs = function_spec.unified_args_and_kwargs(args, kwargs) - input_args_with_spec, input_kwargs_with_spec = function_spec.args_to_input_spec( - _args, _kwargs) + ( + input_args_with_spec, + input_kwargs_with_spec, + ) = function_spec.args_to_input_spec(_args, _kwargs) # 3. check whether hit the cache or build a new program for the input arguments - return CacheKey(function_spec, input_args_with_spec, - input_kwargs_with_spec, class_instance) + return CacheKey( + function_spec, + input_args_with_spec, + input_kwargs_with_spec, + class_instance, + ) def __hash__(self): error_msg = "Arguments to a `@paddle.jit.to_static` must be a hashable Python objects (or nested structures of these types)." with_hook = self.kwargs.get("with_hook", False) is_train = self.kwargs.get("is_train", False) - return hash((id(self.function_spec), - make_hashable(self.input_args_with_spec, error_msg), - make_hashable(self.input_kwargs_with_spec, - error_msg), self._spec_names_id, - self.class_instance, with_hook, is_train)) + return hash( + ( + id(self.function_spec), + make_hashable(self.input_args_with_spec, error_msg), + make_hashable(self.input_kwargs_with_spec, error_msg), + self._spec_names_id, + self.class_instance, + with_hook, + is_train, + ) + ) def __eq__(self, other): return (type(self) is type(other)) and hash(self) == hash(other) @@ -212,8 +253,11 @@ class CacheKey(object): def __repr__(self): return "id(function_spec): {}, input_args_with_spec: {}, input_kwargs_with_spec: {}, class_instance: {}".format( - id(self.function_spec), self.input_args_with_spec, - self.input_kwargs_with_spec, self.class_instance) + id(self.function_spec), + self.input_args_with_spec, + self.input_kwargs_with_spec, + self.class_instance, + ) def unwrap_decorators(func): @@ -260,9 +304,11 @@ class StaticFunction(object): if not hasattr(self._class_instance, '_original_funcs'): raise TypeError( "When using 'to_static' to convert method of a class, " - "please ensure the class inherits from nn.Layer") + "please ensure the class inherits from nn.Layer" + ) self._class_instance._original_funcs[ - function.__name__] = self._dygraph_function + function.__name__ + ] = self._dygraph_function else: self._dygraph_function = function self._class_instance = None @@ -286,21 +332,29 @@ class StaticFunction(object): return self._property def train(self): - if isinstance(self._class_instance, - layers.Layer) and self._class_instance.training == False: + if ( + isinstance(self._class_instance, layers.Layer) + and self._class_instance.training == False + ): raise RuntimeError( "Failed to switch train mode. {} is a Layer's method, " "please use Layer.train() to switch train mode.".format( - self.dygraph_function)) + self.dygraph_function + ) + ) self._training = True def eval(self): - if isinstance(self._class_instance, - layers.Layer) and self._class_instance.training == True: + if ( + isinstance(self._class_instance, layers.Layer) + and self._class_instance.training == True + ): raise RuntimeError( "Failed to switch eval mode. {} is a Layer's method, " "please use Layer.eval() to switch eval mode.".format( - self.dygraph_function)) + self.dygraph_function + ) + ) self._training = False def __get__(self, instance, owner): @@ -338,8 +392,9 @@ class StaticFunction(object): return self._descriptor_cache[instance] def _clone(self): - return self.__class__(self._dygraph_function, self._input_spec, - **self._kwargs) + return self.__class__( + self._dygraph_function, self._input_spec, **self._kwargs + ) def __call__(self, *args, **kwargs): """ @@ -364,7 +419,8 @@ class StaticFunction(object): logging_utils.warn( "The decorator '@paddle.jit.to_static' does NOT work when setting ProgramTranslator.enable to False. " "We will just return dygraph output. If you would like to get static graph output, please call API " - "ProgramTranslator.enable(True)") + "ProgramTranslator.enable(True)" + ) return self._call_dygraph_function(*args, **kwargs) if not _non_static_mode(): @@ -372,21 +428,26 @@ class StaticFunction(object): "Failed to run the callable object {} decorated by '@paddle.jit.to_static', " "because it is NOT in dynamic mode. Please disable the static mode to enter dynamic mode with the " "following API: paddle.disable_static().".format( - self.dygraph_function)) + self.dygraph_function + ) + ) # 2. trace ops from dygraph layers and cache the generated program. args, kwargs = self._function_spec.unified_args_and_kwargs(args, kwargs) try: concrete_program, partial_program_layer = self.get_concrete_program( - *args, **kwargs, is_train=self._is_train_mode()) + *args, **kwargs, is_train=self._is_train_mode() + ) # 3. synchronize self.training attribute. if isinstance(self._class_instance, layers.Layer): partial_program_layer.training = self._class_instance.training else: partial_program_layer.training = self._training - partial_program_layer._cuda_graph_capture_mode = self._cuda_graph_capture_mode + partial_program_layer._cuda_graph_capture_mode = ( + self._cuda_graph_capture_mode + ) partial_program_layer._cuda_graph_pool_id = self._cuda_graph_pool_id # 4. return outputs. @@ -404,7 +465,8 @@ class StaticFunction(object): else: logging_utils.warn( "Please file an issue at 'https://github.com/PaddlePaddle/Paddle/issues'" - " if you can't handle this {} yourself.".format(type(e))) + " if you can't handle this {} yourself.".format(type(e)) + ) raise e def _is_train_mode(self): @@ -412,7 +474,8 @@ class StaticFunction(object): if not hasattr(self._class_instance, 'training'): raise TypeError( "When using 'to_static' to convert method of a class, " - "please ensure the class inherits from nn.Layer") + "please ensure the class inherits from nn.Layer" + ) return self._class_instance.training else: return self._training @@ -430,7 +493,8 @@ class StaticFunction(object): """ if self._class_instance is not None: dygraph_function = self._dygraph_function.__get__( - self._class_instance) + self._class_instance + ) else: dygraph_function = self._dygraph_function @@ -460,23 +524,30 @@ class StaticFunction(object): with_hook = kwargs.get("with_hook", False) is_train = kwargs.get("is_train", True) - if "is_train" in kwargs: kwargs.pop("is_train") - if "with_hook" in kwargs: kwargs.pop("with_hook") + if "is_train" in kwargs: + kwargs.pop("is_train") + if "with_hook" in kwargs: + kwargs.pop("with_hook") # 1. unify args/kwargs and replace Tensor with InputSpec if len(args) != len(self._function_spec.args_name): args, kwargs = self._function_spec.unified_args_and_kwargs( - args, kwargs) - input_args_with_spec, input_kwargs_with_spec = self._function_spec.args_to_input_spec( - args, kwargs) + args, kwargs + ) + ( + input_args_with_spec, + input_kwargs_with_spec, + ) = self._function_spec.args_to_input_spec(args, kwargs) # 2. generate cache key - cache_key = CacheKey(self._function_spec, - input_args_with_spec, - input_kwargs_with_spec, - self._class_instance, - **self._kwargs, - with_hook=with_hook, - is_train=is_train) + cache_key = CacheKey( + self._function_spec, + input_args_with_spec, + input_kwargs_with_spec, + self._class_instance, + **self._kwargs, + with_hook=with_hook, + is_train=is_train + ) # 3. check whether hit the cache or build a new program for the input arguments concrete_program, partial_program_layer = self._program_cache[cache_key] @@ -533,9 +604,9 @@ class StaticFunction(object): """ return self.concrete_program_specify_input_spec(input_spec=None) - def concrete_program_specify_input_spec(self, - input_spec=None, - with_hook=False): + def concrete_program_specify_input_spec( + self, input_spec=None, with_hook=False + ): """ Returns recent ConcreteProgram instance of decorated function while specifying input_spec. If the self._function_spec already has @@ -556,30 +627,37 @@ class StaticFunction(object): desired_input_spec = input_spec if self._function_spec.input_spec is not None: if input_spec is not None and not input_specs_compatible( - flatten(input_spec), - flatten(self._function_spec.input_spec)): + flatten(input_spec), flatten(self._function_spec.input_spec) + ): raise ValueError( - "The `input_spec`: {} used to construct concrete_program is conflict with the `input_spec`: {} in `@paddle.jit.to_static`" - .format(input_spec, self._function_spec.input_spec)) + "The `input_spec`: {} used to construct concrete_program is conflict with the `input_spec`: {} in `@paddle.jit.to_static`".format( + input_spec, self._function_spec.input_spec + ) + ) # NOTE(chenweihang): we should always translated program based on the `input_spec` # decorated on forward if it is valid desired_input_spec = self._function_spec.input_spec if input_spec is not None: logging_utils.warn( - "\n\nYou have specified `input_spec` both in function definition (higher priority) and `paddle.jit.save` (will be ignored.)\n\n\t Using: {}\n\n\t Ignore: {}\n" - .format(desired_input_spec, input_spec)) + "\n\nYou have specified `input_spec` both in function definition (higher priority) and `paddle.jit.save` (will be ignored.)\n\n\t Using: {}\n\n\t Ignore: {}\n".format( + desired_input_spec, input_spec + ) + ) - has_input_spec = (desired_input_spec is not None) + has_input_spec = desired_input_spec is not None if has_input_spec: concrete_program, _ = self.get_concrete_program( *desired_input_spec, with_hook=with_hook, - is_train=self._is_train_mode()) + is_train=self._is_train_mode() + ) return concrete_program else: raise ValueError( - "No valid transformed program for {}.\n\t Please specific `input_spec` in `@paddle.jit.to_static` or feed input tensor to call the decorated function at once.\n" - .format(self._function_spec)) + "No valid transformed program for {}.\n\t Please specific `input_spec` in `@paddle.jit.to_static` or feed input tensor to call the decorated function at once.\n".format( + self._function_spec + ) + ) elif with_hook: cache_key = self._program_cache._recent_cache_key cache_key.kwargs["with_hook"] = True @@ -589,11 +667,15 @@ class StaticFunction(object): # If more than one programs have been cached, return the recent converted program by default. elif cached_program_len > 1: logging_utils.warn( - "Current {} has more than one cached programs: {}, the last traced progam will be return by default." - .format(self._function_spec, cached_program_len)) + "Current {} has more than one cached programs: {}, the last traced progam will be return by default.".format( + self._function_spec, cached_program_len + ) + ) - cache_key, (concrete_program, - partial_layer) = self._program_cache.last() + cache_key, ( + concrete_program, + partial_layer, + ) = self._program_cache.last() return concrete_program def rollback(self): @@ -639,11 +721,15 @@ class StaticFunction(object): # only rollback sub-functions on path of top _dygraph_function func_name = self._dygraph_function.__name__ - assert func_name in self._class_instance._original_funcs, "Not Found function '{}' in class '{}'.".format( - func_name, self._class_instance.__name__) + assert ( + func_name in self._class_instance._original_funcs + ), "Not Found function '{}' in class '{}'.".format( + func_name, self._class_instance.__name__ + ) func = self._class_instance._original_funcs[func_name] - setattr(self._class_instance, func_name, - func.__get__(self._class_instance)) + setattr( + self._class_instance, func_name, func.__get__(self._class_instance) + ) for sublayer in self._class_instance.sublayers(include_self=False): rollback_impl(sublayer) @@ -686,12 +772,15 @@ class StaticFunction(object): net_name = type(self._class_instance).__name__ logging_utils.log( level=-1, - msg="Not recommend to deepcopy '{}' decorated with @to_static, it has side effect that will" \ - " rollback into original state before @to_static. Please deepcopy '{}' before applying @to_static." - .format(net_name, net_name)) + msg="Not recommend to deepcopy '{}' decorated with @to_static, it has side effect that will" + " rollback into original state before @to_static. Please deepcopy '{}' before applying @to_static.".format( + net_name, net_name + ), + ) self.rollback() - return self._dygraph_function.__get__(memo[id( - self._class_instance)]) + return self._dygraph_function.__get__( + memo[id(self._class_instance)] + ) else: return self._dygraph_function @@ -703,7 +792,8 @@ class StaticFunction(object): self._raise_when_property() concrete_program = self.concrete_program inputs = [ - var for var in flatten(concrete_program.inputs) + var + for var in flatten(concrete_program.inputs) if isinstance(var, framework.Variable) ] return inputs @@ -716,7 +806,8 @@ class StaticFunction(object): self._raise_when_property() concrete_program = self.concrete_program outputs = [ - var for var in flatten(concrete_program.outputs) + var + for var in flatten(concrete_program.outputs) if isinstance(var, framework.Variable) ] @@ -751,7 +842,9 @@ def _verify_init_in_dynamic_mode(class_instance): " `paddle.jit.to_static` is only available in dynamic mode. Please call `paddle.disable_static()` before " "initializing your Layer class `{}` . Because parameters of Layer class should be initialized firstly " "in dynamic mode while applying transformation.".format( - class_instance)) + class_instance + ) + ) class HookHelper(object): @@ -764,22 +857,25 @@ class HookHelper(object): self.func = func self.class_instance = class_instance self.with_hook = with_hook - self.need_apply_hook = with_hook and isinstance( - self.class_instance, layers.Layer) and getattr( - func, "__name__") == "forward" + self.need_apply_hook = ( + with_hook + and isinstance(self.class_instance, layers.Layer) + and getattr(func, "__name__") == "forward" + ) def apply_pre_hooks(self, inputs): """ Apply _forward_pre_hooks from outermost layer """ - if not self.need_apply_hook: return inputs + if not self.need_apply_hook: + return inputs inputs = inputs[1:] for forward_pre_hook in self.class_instance._forward_pre_hooks.values(): hook_result = forward_pre_hook(self.class_instance, inputs) if hook_result is not None: if not isinstance(hook_result, tuple): - hook_result = (hook_result, ) + hook_result = (hook_result,) inputs = hook_result return [self.class_instance] + list(inputs) @@ -788,13 +884,16 @@ class HookHelper(object): """ Apply _forward_post_hooks from outermost layer """ - if not self.need_apply_hook: return outputs + if not self.need_apply_hook: + return outputs inputs = inputs[1:] - for forward_post_hook in self.class_instance._forward_post_hooks.values( - ): - hook_result = forward_post_hook(self.class_instance, inputs, - outputs) + for ( + forward_post_hook + ) in self.class_instance._forward_post_hooks.values(): + hook_result = forward_post_hook( + self.class_instance, inputs, outputs + ) if hook_result is not None: outputs = hook_result @@ -805,18 +904,25 @@ class HookHelper(object): class ConcreteProgram(object): __slots__ = [ - 'inputs', 'outputs', 'main_program', "startup_program", "parameters", - "function", 'kwargs' + 'inputs', + 'outputs', + 'main_program', + "startup_program", + "parameters", + "function", + 'kwargs', ] - def __init__(self, - inputs, - outputs, - parameters, - function, - main_program, - startup_program=None, - **kwargs): + def __init__( + self, + inputs, + outputs, + parameters, + function, + main_program, + startup_program=None, + **kwargs + ): self.inputs = inputs self.outputs = outputs self.main_program = main_program @@ -827,8 +933,9 @@ class ConcreteProgram(object): @staticmethod @switch_to_static_graph - def from_func_spec(func_spec, input_spec, input_kwargs_spec, class_instance, - **kwargs): + def from_func_spec( + func_spec, input_spec, input_kwargs_spec, class_instance, **kwargs + ): """ Builds the main_program with specialized inputs and returns outputs of program as fetch_list. @@ -844,38 +951,44 @@ class ConcreteProgram(object): dygraph_function = func_spec.dygraph_function static_func = convert_to_static(dygraph_function) # apply pre\post hook for outermost layer - hook_helper = HookHelper(dygraph_function, class_instance, - kwargs.get("with_hook", False)) + hook_helper = HookHelper( + dygraph_function, class_instance, kwargs.get("with_hook", False) + ) main_program, startup_program = framework.Program(), framework.Program() # Note: The random seed should be synchronized into cached program # if set in `fluid.dygraph_guard` because some ops rely on it, such as # `fluid.layers.dropout`. main_program.random_seed = framework.default_main_program().random_seed - startup_program.random_seed = framework.default_startup_program( - ).random_seed + startup_program.random_seed = ( + framework.default_startup_program().random_seed + ) from paddle.fluid.dygraph.base import _switch_declarative_mode_guard_ + with framework.program_guard(main_program, startup_program): with _switch_declarative_mode_guard_(is_declarative=True): # 1. Adds `fluid.data` layers for input if needed static_inputs = func_spec.to_static_inputs_with_spec( - input_spec, main_program) + input_spec, main_program + ) _kwargs = func_spec.to_static_inputs_with_spec( - input_kwargs_spec, main_program) + input_kwargs_spec, main_program + ) if class_instance: - static_inputs = tuple([class_instance] + - list(static_inputs)) + static_inputs = tuple( + [class_instance] + list(static_inputs) + ) # 2. Gets all ParamBases and buffered VarBases in the function all_parameters_and_buffers = _extract_indeed_params_buffers( - class_instance) + class_instance + ) # 3. Builds program only once and returns the output Variables. - with param_guard(get_parameters( - class_instance, - False)), param_guard(get_buffers(class_instance, - False)): + with param_guard( + get_parameters(class_instance, False) + ), param_guard(get_buffers(class_instance, False)): try: # only for jit.save, do nothing while train and eval process inputs = hook_helper.apply_pre_hooks(static_inputs) @@ -893,20 +1006,24 @@ class ConcreteProgram(object): raise if outputs is not None: - need_wrap_into_list = not isinstance( - outputs, (tuple, list)) or len(outputs) == 1 + need_wrap_into_list = ( + not isinstance(outputs, (tuple, list)) + or len(outputs) == 1 + ) if need_wrap_into_list: outputs = [outputs] main_program = update_op_callstack_with_origin_info(main_program) - return ConcreteProgram(inputs=static_inputs, - outputs=outputs, - parameters=all_parameters_and_buffers, - function=dygraph_function, - main_program=main_program, - startup_program=startup_program, - **kwargs) + return ConcreteProgram( + inputs=static_inputs, + outputs=outputs, + parameters=all_parameters_and_buffers, + function=dygraph_function, + main_program=main_program, + startup_program=startup_program, + **kwargs + ) def _extract_indeed_params_buffers(class_instance): @@ -938,13 +1055,16 @@ class ProgramCache(object): input_spec=cache_key.input_args_with_spec, input_kwargs_spec=cache_key.input_kwargs_with_spec, class_instance=cache_key.class_instance, - **cache_key.kwargs) + **cache_key.kwargs + ) return concrete_program, partial_program_from(concrete_program) def __getitem__(self, item): if not isinstance(item, CacheKey): - raise ValueError('type(item) should be CacheKey, but received %s' % - type_name(item)) + raise ValueError( + 'type(item) should be CacheKey, but received %s' + % type_name(item) + ) item_id = hash(item) self._recent_cache_key = item self._recent_key = item_id @@ -955,16 +1075,19 @@ class ProgramCache(object): if current_tracing_count > MAX_TRACED_PROGRAM_COUNT: logging_utils.warn( "Current traced program number: {} > `max_tracing_count`:{}. Too much cached programs will bring expensive overhead. " - "The reason may be: (1) passing tensors with different shapes, (2) passing python objects instead of tensors." - .format(current_tracing_count, MAX_TRACED_PROGRAM_COUNT)) + "The reason may be: (1) passing tensors with different shapes, (2) passing python objects instead of tensors.".format( + current_tracing_count, MAX_TRACED_PROGRAM_COUNT + ) + ) return self._caches[item_id] def get_program(self, item): if not isinstance(item, CacheKey): raise ValueError( - "Input item's type should be FunctionSpec, but received %s" % - type_name(item)) + "Input item's type should be FunctionSpec, but received %s" + % type_name(item) + ) item_id = hash(item) if item_id not in self._caches: raise RuntimeError( @@ -973,8 +1096,9 @@ class ProgramCache(object): return self._caches[item_id] def last(self): - assert len( - self._caches) >= 1, "No valid cached program in ProgramCache." + assert ( + len(self._caches) >= 1 + ), "No valid cached program in ProgramCache." assert self._recent_key is not None return self._recent_key, self._caches[self._recent_key] @@ -1082,8 +1206,12 @@ class ProgramTranslator(object): print(func(x)) # [[0. 0.]] """ - check_type(enable_to_static, "enable_to_static", bool, - "ProgramTranslator.enable") + check_type( + enable_to_static, + "enable_to_static", + bool, + "ProgramTranslator.enable", + ) self.enable_to_static = enable_to_static def get_output(self, dygraph_func, *args, **kwargs): @@ -1137,8 +1265,11 @@ class ProgramTranslator(object): try: function_spec = FunctionSpec(dygraph_func) cache_key = CacheKey.from_func_and_args( - function_spec, args, kwargs, - getattr(dygraph_func, '__self__', None)) + function_spec, + args, + kwargs, + getattr(dygraph_func, '__self__', None), + ) _, partial_program_layer = self._program_cache[cache_key] if args and isinstance(args[0], layers.Layer): @@ -1162,7 +1293,8 @@ class ProgramTranslator(object): else: logging_utils.warn( "Please file an issue at 'https://github.com/PaddlePaddle/Paddle/issues'" - " if you can't handle this {} yourself.".format(type(e))) + " if you can't handle this {} yourself.".format(type(e)) + ) raise e def get_func(self, dygraph_func): @@ -1267,24 +1399,28 @@ class ProgramTranslator(object): function_spec = FunctionSpec(dygraph_func) cache_key = CacheKey.from_func_and_args( - function_spec, args, kwargs, getattr(dygraph_func, '__self__', - None)) + function_spec, args, kwargs, getattr(dygraph_func, '__self__', None) + ) concrete_program, partial_program_layer = self._program_cache[cache_key] # Note: concrete_program hold all input/output infos include non-Variable input_vars = [ - var for var in concrete_program.inputs + var + for var in concrete_program.inputs if isinstance(var, framework.Variable) ] output_vars = [ - var for var in concrete_program.outputs + var + for var in concrete_program.outputs if isinstance(var, framework.Variable) ] - return concrete_program.main_program, \ - concrete_program.startup_program, \ - input_vars, \ - output_vars + return ( + concrete_program.main_program, + concrete_program.startup_program, + input_vars, + output_vars, + ) def get_code(self, dygraph_func): """ diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/return_transformer.py b/python/paddle/fluid/dygraph/dygraph_to_static/return_transformer.py index 88277bc832fe72d6a00e09dfd47018deeb3238d1..80bebbf501e55affebb56053fec3c9b83fc06d31 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/return_transformer.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/return_transformer.py @@ -16,15 +16,23 @@ from paddle.utils import gast from paddle.fluid import unique_name from paddle.fluid.dygraph.dygraph_to_static.utils import index_in_list -from paddle.fluid.dygraph.dygraph_to_static.break_continue_transformer import ForToWhileTransformer -from paddle.fluid.dygraph.dygraph_to_static.variable_trans_func import create_fill_constant_node +from paddle.fluid.dygraph.dygraph_to_static.break_continue_transformer import ( + ForToWhileTransformer, +) +from paddle.fluid.dygraph.dygraph_to_static.variable_trans_func import ( + create_fill_constant_node, +) from paddle.fluid.dygraph.dygraph_to_static.utils import ast_to_source_code -from paddle.fluid.dygraph.dygraph_to_static.base_transformer import BaseTransformer +from paddle.fluid.dygraph.dygraph_to_static.base_transformer import ( + BaseTransformer, +) from paddle.fluid.dygraph.dygraph_to_static.utils import Dygraph2StaticException from paddle.fluid.dygraph.dygraph_to_static.utils import ORIGI_INFO __all__ = [ - 'RETURN_NO_VALUE_MAGIC_NUM', 'RETURN_NO_VALUE_VAR_NAME', 'ReturnTransformer' + 'RETURN_NO_VALUE_MAGIC_NUM', + 'RETURN_NO_VALUE_VAR_NAME', + 'ReturnTransformer', ] # Constant for the name of the variable which stores the boolean state that we @@ -45,7 +53,7 @@ RETURN_VALUE_INIT_NAME = '__return_value_init' # should return. # Assign not support float64, use float32 value as magic number. -RETURN_NO_VALUE_MAGIC_NUM = 1.77113e+27 +RETURN_NO_VALUE_MAGIC_NUM = 1.77113e27 RETURN_NO_VALUE_VAR_NAME = "__no_value_return_var" @@ -91,7 +99,8 @@ class ReturnAnalysisVisitor(gast.NodeVisitor): def __init__(self, root_node): self.root = root_node assert isinstance( - self.root, gast.FunctionDef), "Input is not gast.FunctionDef node" + self.root, gast.FunctionDef + ), "Input is not gast.FunctionDef node" # the number of return statements self.count_return = 0 @@ -157,7 +166,8 @@ class SingleReturnTransformer(BaseTransformer): def __init__(self, root): self.root = root assert isinstance( - self.root, gast.FunctionDef), "Input is not gast.FunctionDef node" + self.root, gast.FunctionDef + ), "Input is not gast.FunctionDef node" self.ancestor_nodes = [] @@ -206,15 +216,18 @@ class SingleReturnTransformer(BaseTransformer): self.generic_visit(node) return node - def append_assign_to_return_node(self, value, parent_node_of_return, - return_name, assign_nodes): + def append_assign_to_return_node( + self, value, parent_node_of_return, return_name, assign_nodes + ): self.assert_parent_is_not_while(parent_node_of_return) assert value in [True, False], "value must be True or False." if isinstance(parent_node_of_return, gast.If): # Prepend control flow boolean nodes such as '__return@1 = True' node_str = "{} = _jst.create_bool_as_type({}, {})".format( return_name, - ast_to_source_code(parent_node_of_return.test).strip(), value) + ast_to_source_code(parent_node_of_return.test).strip(), + value, + ) assign_node = gast.parse(node_str).body[0] assign_nodes.append(assign_node) @@ -236,18 +249,26 @@ class SingleReturnTransformer(BaseTransformer): value_name = self.return_value_name if value_name is not None: node.body.append( - gast.Return(value=gast.Name(id=value_name, - ctx=gast.Load(), - annotation=None, - type_comment=None))) - assign_return_value_node = gast.Assign(targets=[ - gast.Name(id=value_name, - ctx=gast.Store(), - annotation=None, - type_comment=None) - ], - value=gast.Constant( - kind=None, value=None)) + gast.Return( + value=gast.Name( + id=value_name, + ctx=gast.Load(), + annotation=None, + type_comment=None, + ) + ) + ) + assign_return_value_node = gast.Assign( + targets=[ + gast.Name( + id=value_name, + ctx=gast.Store(), + annotation=None, + type_comment=None, + ) + ], + value=gast.Constant(kind=None, value=None), + ) node.body.insert(0, assign_return_value_node) # Prepend no value placeholders @@ -269,37 +290,52 @@ class SingleReturnTransformer(BaseTransformer): if index_in_list(branch_node, cur_node) != -1: if cur_node == node: self._replace_return_in_stmt_list( - branch_node, cur_node, return_name, - max_return_length, parent_node_of_return) + branch_node, + cur_node, + return_name, + max_return_length, + parent_node_of_return, + ) self._replace_after_node_to_if_in_stmt_list( - branch_node, cur_node, return_name, - parent_node_of_return) + branch_node, + cur_node, + return_name, + parent_node_of_return, + ) _deal_branches("body") _deal_branches("orelse") # If return node in while loop, add `not return_name` in gast.While.test if isinstance(ancestor, gast.While): - cond_var_node = gast.UnaryOp(op=gast.Not(), - operand=gast.Name( - id=return_name, - ctx=gast.Load(), - annotation=None, - type_comment=None)) + cond_var_node = gast.UnaryOp( + op=gast.Not(), + operand=gast.Name( + id=return_name, + ctx=gast.Load(), + annotation=None, + type_comment=None, + ), + ) ancestor.test = gast.BoolOp( - op=gast.And(), values=[ancestor.test, cond_var_node]) + op=gast.And(), values=[ancestor.test, cond_var_node] + ) continue # If return node in for loop, add `not return_name` in gast.While.test if isinstance(ancestor, gast.For): - cond_var_node = gast.UnaryOp(op=gast.Not(), - operand=gast.Name( - id=return_name, - ctx=gast.Load(), - annotation=None, - type_comment=None)) + cond_var_node = gast.UnaryOp( + op=gast.Not(), + operand=gast.Name( + id=return_name, + ctx=gast.Load(), + annotation=None, + type_comment=None, + ), + ) parent_node = self.ancestor_nodes[ancestor_index - 1] - for_to_while = ForToWhileTransformer(parent_node, ancestor, - cond_var_node) + for_to_while = ForToWhileTransformer( + parent_node, ancestor, cond_var_node + ) new_stmts = for_to_while.transform() while_node = new_stmts[-1] self.ancestor_nodes[ancestor_index] = while_node @@ -308,8 +344,14 @@ class SingleReturnTransformer(BaseTransformer): break # return_node is replaced so we shouldn't return here - def _replace_return_in_stmt_list(self, stmt_list, return_node, return_name, - max_return_length, parent_node_of_return): + def _replace_return_in_stmt_list( + self, + stmt_list, + return_node, + return_name, + max_return_length, + parent_node_of_return, + ): assert max_return_length >= 0, "Input illegal max_return_length" i = index_in_list(stmt_list, return_node) @@ -317,24 +359,31 @@ class SingleReturnTransformer(BaseTransformer): return False assign_nodes = [] - self.append_assign_to_return_node(True, parent_node_of_return, - return_name, assign_nodes) + self.append_assign_to_return_node( + True, parent_node_of_return, return_name, assign_nodes + ) return_length = get_return_size(return_node) # In this case we should NOT append RETURN_NO_VALUE placeholder if return_node.value is not None: if self.return_value_name is None: self.return_value_name = unique_name.generate( - RETURN_VALUE_PREFIX) + RETURN_VALUE_PREFIX + ) assign_nodes.append( - gast.Assign(targets=[ - gast.Name(id=self.return_value_name, - ctx=gast.Store(), - annotation=None, - type_comment=None) - ], - value=return_node.value)) + gast.Assign( + targets=[ + gast.Name( + id=self.return_value_name, + ctx=gast.Store(), + annotation=None, + type_comment=None, + ) + ], + value=return_node.value, + ) + ) return_origin_info = getattr(return_node, ORIGI_INFO, None) setattr(assign_nodes[-1], ORIGI_INFO, return_origin_info) @@ -343,9 +392,9 @@ class SingleReturnTransformer(BaseTransformer): stmt_list[i:] = assign_nodes return True - def _replace_after_node_to_if_in_stmt_list(self, stmt_list, node, - return_name, - parent_node_of_return): + def _replace_after_node_to_if_in_stmt_list( + self, stmt_list, node, return_name, parent_node_of_return + ): i = index_in_list(stmt_list, node) if i < 0 or i >= len(stmt_list): return False @@ -353,20 +402,26 @@ class SingleReturnTransformer(BaseTransformer): # No need to add, we consider this as added successfully return True - if_stmt = gast.If(test=gast.UnaryOp(op=gast.Not(), - operand=gast.Name( - id=return_name, - ctx=gast.Store(), - annotation=None, - type_comment=None)), - body=stmt_list[i + 1:], - orelse=[]) - - stmt_list[i + 1:] = [if_stmt] + if_stmt = gast.If( + test=gast.UnaryOp( + op=gast.Not(), + operand=gast.Name( + id=return_name, + ctx=gast.Store(), + annotation=None, + type_comment=None, + ), + ), + body=stmt_list[i + 1 :], + orelse=[], + ) + + stmt_list[i + 1 :] = [if_stmt] # Here assume that the parent node of return is gast.If assign_nodes = [] - self.append_assign_to_return_node(False, parent_node_of_return, - return_name, assign_nodes) + self.append_assign_to_return_node( + False, parent_node_of_return, return_name, assign_nodes + ) stmt_list[i:i] = assign_nodes return True diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/static_analysis.py b/python/paddle/fluid/dygraph/dygraph_to_static/static_analysis.py index 16723bea10eebcb5281ccbf7ac0f195229cd5df4..bf07523e12dba245fd0a821d19e02c6f17d1a30f 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/static_analysis.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/static_analysis.py @@ -14,7 +14,13 @@ from paddle.utils import gast from .logging_utils import warn -from .utils import is_paddle_api, is_dygraph_api, is_numpy_api, index_in_list, ast_to_source_code +from .utils import ( + is_paddle_api, + is_dygraph_api, + is_numpy_api, + index_in_list, + ast_to_source_code, +) __all__ = ['AstNodeWrapper', 'NodeVarType', 'StaticAnalysisVisitor'] @@ -26,6 +32,7 @@ class NodeVarType(object): tensor variable in if clause may lead to different conversion from dygraph to static graph. """ + ERROR = -1 # Returns when static analysis gets error UNKNOWN = 0 # Reserve for AST nodes have not known the type STATEMENT = 1 # For nodes representing statement (non-variable type) @@ -62,7 +69,7 @@ class NodeVarType(object): "int": INT, "float": FLOAT, "bool": BOOLEAN, - "str": STRING + "str": STRING, } @staticmethod @@ -76,9 +83,12 @@ class NodeVarType(object): return in_type1 supported_types = [ - NodeVarType.BOOLEAN, NodeVarType.INT, NodeVarType.FLOAT, - NodeVarType.NUMPY_NDARRAY, NodeVarType.TENSOR, - NodeVarType.PADDLE_RETURN_TYPES + NodeVarType.BOOLEAN, + NodeVarType.INT, + NodeVarType.FLOAT, + NodeVarType.NUMPY_NDARRAY, + NodeVarType.TENSOR, + NodeVarType.PADDLE_RETURN_TYPES, ] if in_type1 not in supported_types: @@ -122,14 +132,14 @@ class AstVarScope(object): AstVarScope is a class holding the map from current scope variable to its type. """ + SCOPE_TYPE_SCRIPT = 0 SCOPE_TYPE_FUNCTION = 1 SCOPE_TYPE_CLASS = 2 - def __init__(self, - scope_name='', - scope_type=SCOPE_TYPE_SCRIPT, - parent_scope=None): + def __init__( + self, scope_name='', scope_type=SCOPE_TYPE_SCRIPT, parent_scope=None + ): self.sub_scopes = [] self.name_to_id = {} self.id_to_type = {} @@ -158,8 +168,9 @@ class AstVarScope(object): num_id = self.cur_id self.cur_id += 1 self.name_to_id[var_name] = num_id - self.id_to_type[num_id] = node_var_type if isinstance( - node_var_type, set) else {node_var_type} + self.id_to_type[num_id] = ( + node_var_type if isinstance(node_var_type, set) else {node_var_type} + ) def get_var_type(self, var_name): if var_name in self.name_to_id: @@ -179,20 +190,24 @@ class AstVarEnv(object): self.cur_scope = AstVarScope() def enter_scope(self, scope_name, scope_type): - self.cur_scope = AstVarScope(scope_name, - scope_type, - parent_scope=self.cur_scope) + self.cur_scope = AstVarScope( + scope_name, scope_type, parent_scope=self.cur_scope + ) return self.cur_scope def exit_scope(self): - assert self.cur_scope.parent_scope is not None, "Call exit_scope in "\ + assert self.cur_scope.parent_scope is not None, ( + "Call exit_scope in " "AstVarEnv when current scope doesn't have parent scope." + ) self.cur_scope = self.cur_scope.parent_scope return self.cur_scope def get_parent_scope(self): - assert self.cur_scope.parent_scope is not None, "Call parent_scope in "\ + assert self.cur_scope.parent_scope is not None, ( + "Call parent_scope in " "AstVarEnv when current scope doesn't have parent scope." + ) return self.cur_scope.parent_scope def add_var_type(self, var_name, node_var_type): @@ -252,11 +267,13 @@ class StaticAnalysisVisitor(object): self.ancestor_wrappers.append(cur_wrapper) for child in gast.iter_child_nodes(node): if isinstance(child, gast.FunctionDef) or isinstance( - child, gast.AsyncFunctionDef): + child, gast.AsyncFunctionDef + ): # TODO: current version is function name mapping to its type # consider complex case involving parameters - self.var_env.enter_scope(child.name, - AstVarScope.SCOPE_TYPE_FUNCTION) + self.var_env.enter_scope( + child.name, AstVarScope.SCOPE_TYPE_FUNCTION + ) func_type = self.dfs_visit(child) self.var_env.exit_scope() else: @@ -284,8 +301,10 @@ class StaticAnalysisVisitor(object): return True def _get_constant_node_type(self, node): - assert isinstance(node, gast.Constant), \ - "Type of input node should be gast.Constant, but received %s" % type(node) + assert isinstance(node, gast.Constant), ( + "Type of input node should be gast.Constant, but received %s" + % type(node) + ) # singleton: None, True or False if node.value is None: return {NodeVarType.NONE} @@ -338,7 +357,8 @@ class StaticAnalysisVisitor(object): for sub_target in target.elts: if isinstance(sub_target, gast.Name): self.node_to_wrapper_map[ - sub_target].node_var_type = ret_type + sub_target + ].node_var_type = ret_type self.var_env.set_var_type(sub_target.id, ret_type) return ret_type @@ -349,9 +369,12 @@ class StaticAnalysisVisitor(object): # if annotation and value(Constant) are diffent type, we use value type if node.value: node_value_type = self.node_to_wrapper_map[ - node.value].node_var_type - if not (node_value_type - & {NodeVarType.UNKNOWN, NodeVarType.STATEMENT}): + node.value + ].node_var_type + if not ( + node_value_type + & {NodeVarType.UNKNOWN, NodeVarType.STATEMENT} + ): ret_type = node_value_type if isinstance(node.target, gast.Name): self.node_to_wrapper_map[node.target].node_var_type = ret_type @@ -365,8 +388,9 @@ class StaticAnalysisVisitor(object): return {NodeVarType.BOOLEAN} # If node is child of functionDef.arguments parent_node_wrapper = cur_wrapper.parent - if parent_node_wrapper and isinstance(parent_node_wrapper.node, - gast.arguments): + if parent_node_wrapper and isinstance( + parent_node_wrapper.node, gast.arguments + ): return self._get_func_argument_type(parent_node_wrapper, node) @@ -378,7 +402,10 @@ class StaticAnalysisVisitor(object): return {NodeVarType.NONE} return_type = self.node_to_wrapper_map[node.value].node_var_type - assert self.var_env.cur_scope.scope_type == AstVarScope.SCOPE_TYPE_FUNCTION, "Return at non-function scope" + assert ( + self.var_env.cur_scope.scope_type + == AstVarScope.SCOPE_TYPE_FUNCTION + ), "Return at non-function scope" func_name = self.var_env.cur_scope.scope_name parent_scope = self.var_env.get_parent_scope() parent_scope.add_var_type(func_name, return_type) diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/tensor_shape_transformer.py b/python/paddle/fluid/dygraph/dygraph_to_static/tensor_shape_transformer.py index b11f18d2658a2a465570cf9e7a2bb5fa897a7fae..e7a882b28a296dd70448906bd3e03c14770cb00d 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/tensor_shape_transformer.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/tensor_shape_transformer.py @@ -15,8 +15,12 @@ from paddle.utils import gast from paddle.fluid.dygraph.dygraph_to_static.utils import ast_to_source_code -from paddle.fluid.dygraph.dygraph_to_static.static_analysis import AstNodeWrapper -from paddle.fluid.dygraph.dygraph_to_static.base_transformer import BaseTransformer +from paddle.fluid.dygraph.dygraph_to_static.static_analysis import ( + AstNodeWrapper, +) +from paddle.fluid.dygraph.dygraph_to_static.base_transformer import ( + BaseTransformer, +) class TensorShapeTransformer(BaseTransformer): diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/typehint_transformer.py b/python/paddle/fluid/dygraph/dygraph_to_static/typehint_transformer.py index f258b98b50711942f62fac9ad0c874328ce0cdfd..3fddc0bcf58c205d78163075ac784edbb4d9b7be 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/typehint_transformer.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/typehint_transformer.py @@ -15,9 +15,13 @@ from paddle.utils import gast import warnings -from paddle.fluid.dygraph.dygraph_to_static.static_analysis import AstNodeWrapper +from paddle.fluid.dygraph.dygraph_to_static.static_analysis import ( + AstNodeWrapper, +) from paddle.fluid.dygraph.dygraph_to_static import utils -from paddle.fluid.dygraph.dygraph_to_static.base_transformer import BaseTransformer +from paddle.fluid.dygraph.dygraph_to_static.base_transformer import ( + BaseTransformer, +) class TypeHintTransformer(BaseTransformer): diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/utils.py b/python/paddle/fluid/dygraph/dygraph_to_static/utils.py index 8e1950b21fca77d8fc543b62e024ac54f5543a94..8b4a7ac645ec035fcb4e9ed8e9d63d82fc2bb744 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/utils.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/utils.py @@ -98,10 +98,18 @@ RE_PYMODULE = r'[a-zA-Z0-9_]+\.' # FullArgSpec is valid from Python3. Defined a Namedtuple to # to make it available in Python2. -FullArgSpec = collections.namedtuple('FullArgSpec', [ - 'args', 'varargs', 'varkw', 'defaults', 'kwonlyargs', 'kwonlydefaults', - 'annotations' -]) +FullArgSpec = collections.namedtuple( + 'FullArgSpec', + [ + 'args', + 'varargs', + 'varkw', + 'defaults', + 'kwonlyargs', + 'kwonlydefaults', + 'annotations', + ], +) def data_layer_not_check(name, shape, dtype='float32', lod_level=0): @@ -139,20 +147,26 @@ def data_layer_not_check(name, shape, dtype='float32', lod_level=0): if shape[i] is None: shape[i] = -1 - return helper.create_global_variable(name=name, - shape=shape, - dtype=dtype, - type=core.VarDesc.VarType.LOD_TENSOR, - stop_gradient=True, - lod_level=lod_level, - is_data=True, - need_check_feed=False) + return helper.create_global_variable( + name=name, + shape=shape, + dtype=dtype, + type=core.VarDesc.VarType.LOD_TENSOR, + stop_gradient=True, + lod_level=lod_level, + is_data=True, + need_check_feed=False, + ) def create_undefined_variable(): - from paddle.fluid.dygraph.dygraph_to_static.return_transformer import RETURN_NO_VALUE_MAGIC_NUM - var = data_layer_not_check(unique_name.generate("undefined_var"), [1], - "float64") + from paddle.fluid.dygraph.dygraph_to_static.return_transformer import ( + RETURN_NO_VALUE_MAGIC_NUM, + ) + + var = data_layer_not_check( + unique_name.generate("undefined_var"), [1], "float64" + ) var.stop_gradient = False # the variable is created in block(0), we append assign in block(0) either. helper = LayerHelper('create_undefined_variable', **locals()) @@ -164,17 +178,16 @@ def create_undefined_variable(): class UndefinedVar: - def __init__(self, name): self.name = name def check(self): raise UnboundLocalError( - "local variable '{}' should be created before using it.") + "local variable '{}' should be created before using it." + ) class Dygraph2StaticException(Exception): - def __init__(self, message): super().__init__(message) @@ -191,13 +204,15 @@ def getfullargspec(target): return inspect.getfullargspec(target) else: argspec = inspect.getargspec(target) - return FullArgSpec(args=argspec.args, - varargs=argspec.varargs, - varkw=argspec.keywords, - defaults=argspec.defaults, - kwonlyargs=[], - kwonlydefaults=None, - annotations={}) + return FullArgSpec( + args=argspec.args, + varargs=argspec.varargs, + varkw=argspec.keywords, + defaults=argspec.defaults, + kwonlyargs=[], + kwonlydefaults=None, + annotations={}, + ) def parse_arg_and_kwargs(function): @@ -214,7 +229,7 @@ def parse_arg_and_kwargs(function): default_values = fullargspec.defaults if default_values: assert len(default_values) <= len(arg_names) - default_kwarg_names = arg_names[-len(default_values):] + default_kwarg_names = arg_names[-len(default_values) :] default_kwargs = dict(zip(default_kwarg_names, default_values)) return arg_names, default_kwargs @@ -288,8 +303,9 @@ def is_api_in_module(node, module_prefix): from paddle.fluid.dygraph import to_variable from paddle import to_tensor - return eval("_is_api_in_module_helper({}, '{}')".format( - func_str, module_prefix)) + return eval( + "_is_api_in_module_helper({}, '{}')".format(func_str, module_prefix) + ) except Exception: return False @@ -320,12 +336,15 @@ def is_numpy_api(node): func_str = astor.to_source(gast.gast_to_ast(node.func)) try: import numpy as np - module_result = eval("_is_api_in_module_helper({}, '{}')".format( - func_str, "numpy")) + + module_result = eval( + "_is_api_in_module_helper({}, '{}')".format(func_str, "numpy") + ) # BUG: np.random.uniform doesn't have module and cannot be analyzed # TODO: find a better way - return module_result or (func_str.startswith("numpy.") - or func_str.startswith("np.")) + return module_result or ( + func_str.startswith("numpy.") or func_str.startswith("np.") + ) except Exception: return False @@ -334,6 +353,7 @@ def _delete_keywords_from(node): assert isinstance(node, gast.Call) func_src = astor.to_source(gast.gast_to_ast(node.func)) import paddle.fluid as fluid + full_args = eval("inspect.getargspec({})".format(func_src)) full_args_name = full_args[0] @@ -347,7 +367,8 @@ def to_static_api(dygraph_class): else: raise NotImplementedError( "Paddle dygraph API {} cannot be converted " - "to static graph at present.".format(dygraph_class)) + "to static graph at present.".format(dygraph_class) + ) def _add_keywords_to(node, dygraph_api_name): @@ -358,8 +379,10 @@ def _add_keywords_to(node, dygraph_api_name): ast_keyword.arg = "size" node.keywords.append( - gast.keyword(arg="num_flatten_dims", - value=gast.Constant(value=-1, kind=None))) + gast.keyword( + arg="num_flatten_dims", value=gast.Constant(value=-1, kind=None) + ) + ) if dygraph_api_name == "BilinearTensorProduct": for ast_keyword in node.keywords: @@ -378,15 +401,17 @@ def to_static_ast(node, class_node): assert isinstance(class_node, gast.Call) static_api = to_static_api(class_node.func.attr) - node.func = gast.Attribute(attr=static_api, - ctx=gast.Load(), - value=gast.Attribute(attr='layers', - ctx=gast.Load(), - value=gast.Name( - ctx=gast.Load(), - id='fluid', - annotation=None, - type_comment=None))) + node.func = gast.Attribute( + attr=static_api, + ctx=gast.Load(), + value=gast.Attribute( + attr='layers', + ctx=gast.Load(), + value=gast.Name( + ctx=gast.Load(), id='fluid', annotation=None, type_comment=None + ), + ), + ) update_args_of_func(node, class_node, 'forward') @@ -409,10 +434,13 @@ def update_args_of_func(node, dygraph_node, method_name): class_src = astor.to_source(gast.gast_to_ast(dygraph_node.func)) import paddle.fluid as fluid + if method_name == "__init__" or eval( - "issubclass({}, fluid.dygraph.Layer)".format(class_src)): - full_args = eval("inspect.getargspec({}.{})".format( - class_src, method_name)) + "issubclass({}, fluid.dygraph.Layer)".format(class_src) + ): + full_args = eval( + "inspect.getargspec({}.{})".format(class_src, method_name) + ) full_args_name = [ arg_name for arg_name in full_args[0] if arg_name != "self" ] @@ -427,21 +455,24 @@ def update_args_of_func(node, dygraph_node, method_name): def create_api_shape_node(tensor_shape_node): - assert isinstance(tensor_shape_node, - (gast.Name, gast.Attribute, gast.Subscript)) + assert isinstance( + tensor_shape_node, (gast.Name, gast.Attribute, gast.Subscript) + ) if isinstance(tensor_shape_node, gast.Name): api_shape_node = gast.Call( func=gast.parse('paddle.shape').body[0].value, args=[tensor_shape_node], - keywords=[]) + keywords=[], + ) return api_shape_node if isinstance(tensor_shape_node, gast.Attribute): api_shape_node = gast.Call( func=gast.parse('paddle.shape').body[0].value, args=[tensor_shape_node.value], - keywords=[]) + keywords=[], + ) return api_shape_node if isinstance(tensor_shape_node, gast.Subscript): @@ -451,14 +482,15 @@ def create_api_shape_node(tensor_shape_node): def get_constant_variable_node(name, value, shape=[1], dtype='int64'): - return gast.parse('%s = paddle.full(%s, "%s", %s)' % - (name, str(shape), str(value), dtype)) + return gast.parse( + '%s = paddle.full(%s, "%s", %s)' % (name, str(shape), str(value), dtype) + ) def get_attribute_full_name(node): assert isinstance( - node, - gast.Attribute), "Input non-Attribute node to get attribute full name" + node, gast.Attribute + ), "Input non-Attribute node to get attribute full name" return astor.to_source(gast.gast_to_ast(node)).strip() @@ -476,15 +508,15 @@ def generate_name_node(name_ids, ctx=gast.Load(), gen_tuple_if_single=False): name_ids = [name_ids] if not isinstance(name_ids, (list, tuple, set)): raise TypeError( - 'name_ids must be list or tuple or set, but received %s' % - type(type(name_ids))) + 'name_ids must be list or tuple or set, but received %s' + % type(type(name_ids)) + ) def create_node_for_name(name): if '.' not in name: - return gast.Name(id=name, - ctx=ctx, - annotation=None, - type_comment=None) + return gast.Name( + id=name, ctx=ctx, annotation=None, type_comment=None + ) return gast.parse(name).body[0].value gast_names = [create_node_for_name(name_id) for name_id in name_ids] @@ -506,12 +538,14 @@ def create_funcDef_node(nodes, name, input_args, return_name_ids): nodes.append(gast.Return(value=generate_name_node(return_name_ids))) else: nodes.append(gast.Return(value=None)) - func_def_node = gast.FunctionDef(name=name, - args=input_args, - body=nodes, - decorator_list=[], - returns=None, - type_comment=None) + func_def_node = gast.FunctionDef( + name=name, + args=input_args, + body=nodes, + decorator_list=[], + returns=None, + type_comment=None, + ) return func_def_node @@ -571,12 +605,14 @@ def ast_to_func(ast_root, dyfunc, delete_on_exit=True): source = ast_to_source_code(ast_root) source = _inject_import_statements() + source temp_dir = get_temp_dir() - f = tempfile.NamedTemporaryFile(mode='w', - prefix=func_prefix(dyfunc), - suffix='.py', - delete=False, - dir=temp_dir, - encoding='utf-8') + f = tempfile.NamedTemporaryFile( + mode='w', + prefix=func_prefix(dyfunc), + suffix='.py', + delete=False, + dir=temp_dir, + encoding='utf-8', + ) with f: module_name = os.path.basename(f.name[:-3]) f.write(source) @@ -598,8 +634,9 @@ def ast_to_func(ast_root, dyfunc, delete_on_exit=True): callable_func = getattr(module, func_name) else: raise ValueError( - 'Function: %s doesn\'t exist in the Module transformed from AST.' % - func_name) + 'Function: %s doesn\'t exist in the Module transformed from AST.' + % func_name + ) # After transform dygraph function into callable_func saved in tmp file, # it lost the global variables from imported statements or defined in source file. # Recovers the necessary variables by `__globals__`. @@ -610,10 +647,14 @@ def ast_to_func(ast_root, dyfunc, delete_on_exit=True): def _inject_import_statements(): import_statements = [ - "import paddle", "from paddle import Tensor", - "import paddle.fluid as fluid", "import paddle.jit.dy2static as _jst", - "from typing import *", "import numpy as np", "import warnings", - "warnings.filterwarnings('ignore', category=DeprecationWarning)" + "import paddle", + "from paddle import Tensor", + "import paddle.fluid as fluid", + "import paddle.jit.dy2static as _jst", + "from typing import *", + "import numpy as np", + "import warnings", + "warnings.filterwarnings('ignore', category=DeprecationWarning)", ] return '\n'.join(import_statements) + '\n' @@ -636,8 +677,10 @@ def func_to_source_code(function, dedent=True): """ if not (inspect.isfunction(function) or inspect.ismethod(function)): raise TypeError( - "The type of 'function' should be a function or method, but received {}." - .format(type(function).__name__)) + "The type of 'function' should be a function or method, but received {}.".format( + type(function).__name__ + ) + ) source_code_list, _ = inspect.getsourcelines(function) # Replace comments with blank lines so that error messages are not misplaced source_code_list = [ @@ -657,8 +700,9 @@ def ast_to_source_code(ast_node): """ if not isinstance(ast_node, (gast.AST, ast.AST)): raise TypeError( - "Type of ast_root should be gast.AST or ast.AST, but received %s." % - type(ast_node)) + "Type of ast_root should be gast.AST or ast.AST, but received %s." + % type(ast_node) + ) if isinstance(ast_node, gast.AST): ast_node = gast.gast_to_ast(ast_node) @@ -674,8 +718,17 @@ def is_candidate_node(node): """ Nodes with specified type will be dependent on tensor. """ - is_compare_node = isinstance(node, (gast.Compare, gast.BoolOp, gast.UnaryOp, - gast.For, gast.If, gast.While)) + is_compare_node = isinstance( + node, + ( + gast.Compare, + gast.BoolOp, + gast.UnaryOp, + gast.For, + gast.If, + gast.While, + ), + ) # TODO(Aurelius84): `.numpy()` may be an customized function, # and should consider a more elegant way to solve this problem. has_numpy_attr = ".numpy()" in ast_to_source_code(node) @@ -691,9 +744,9 @@ def compare_with_none(node): # node.comparators is a list. if isinstance(child, list): child = child[0] - if (isinstance(child, gast.Constant) - and child.value is None) or (isinstance(child, gast.Name) - and child.id == 'None'): + if (isinstance(child, gast.Constant) and child.value is None) or ( + isinstance(child, gast.Name) and child.id == 'None' + ): return True return False @@ -728,20 +781,22 @@ class IsControlFlowVisitor(gast.NodeVisitor): because reshape_op may be called before this statement. """ - def __init__(self, - ast_node, - static_analysis_visitor=None, - node_var_type_map=None): + def __init__( + self, ast_node, static_analysis_visitor=None, node_var_type_map=None + ): assert isinstance( ast_node, gast.AST ), "Type of input node should be gast.AST, but received %s." % type( - ast_node) + ast_node + ) self.ast_root = ast_node if static_analysis_visitor is None: from .static_analysis import StaticAnalysisVisitor + static_analysis_visitor = StaticAnalysisVisitor(ast_node) self.static_analysis_visitor = static_analysis_visitor - self.node_to_wrapper_map = self.static_analysis_visitor.get_node_to_wrapper_map( + self.node_to_wrapper_map = ( + self.static_analysis_visitor.get_node_to_wrapper_map() ) self.node_var_type_map = node_var_type_map @@ -770,7 +825,10 @@ class IsControlFlowVisitor(gast.NodeVisitor): if isinstance(node.iter, gast.Call): # for in range(var[0]|var.numpy()[0]) or for in enumerate(var|var.numpy()) if isinstance(node.iter.func, gast.Name): - if node.iter.func.id == "range" or node.iter.func.id == "enumerate": + if ( + node.iter.func.id == "range" + or node.iter.func.id == "enumerate" + ): for arg in node.iter.args: self.visit(arg) else: @@ -869,7 +927,9 @@ class IsControlFlowVisitor(gast.NodeVisitor): return node def _is_node_with_tensor(self, node, name_id): - from paddle.fluid.dygraph.dygraph_to_static.static_analysis import NodeVarType + from paddle.fluid.dygraph.dygraph_to_static.static_analysis import ( + NodeVarType, + ) # Look up the node_var_type_map by name_id. if self.node_var_type_map: @@ -899,7 +959,7 @@ def unwrap(func): return hasattr(f, '__wrapped__') unwrapped_f = func - while (_is_wrapped(unwrapped_f)): + while _is_wrapped(unwrapped_f): unwrapped_f = unwrapped_f.__wrapped__ return unwrapped_f @@ -923,10 +983,12 @@ def input_specs_compatible(src_input_specs, desired_input_specs): if spec not in desired_input_specs: return False else: - for (src_spec, desired_spec) in zip(src_input_specs, - desired_input_specs): + for (src_spec, desired_spec) in zip( + src_input_specs, desired_input_specs + ): if isinstance(src_spec, paddle.static.InputSpec) or isinstance( - desired_spec, paddle.static.InputSpec): + desired_spec, paddle.static.InputSpec + ): if not _compatible_tensor_spec(src_spec, desired_spec): return False else: @@ -986,15 +1048,14 @@ def _compatible_non_tensor_spec(src_spec, desired_spec): class NameScope: - def __init__(self): """ - A NameScope is a object which manager all the variable names. - only FunctionDef and Controlflow node will have a namescope property. + A NameScope is a object which manager all the variable names. + only FunctionDef and Controlflow node will have a namescope property. - type can be "function" and "controlflow" + type can be "function" and "controlflow" - we don't analyze the read only variable because they don't affect the analysis. + we don't analyze the read only variable because they don't affect the analysis. """ self.globals = set() self.nonlocals = set() @@ -1010,8 +1071,8 @@ class NameScope: self.father = father def existed_vars(self): - """ vars existing in current scope. - they must not contain qualified names. + """vars existing in current scope. + they must not contain qualified names. """ local_vars = self.w_vars - self.globals - self.nonlocals - self.args return set(filter(lambda x: '.' not in x, local_vars)) @@ -1040,18 +1101,20 @@ class NameScope: f"Find variable `{var}` defined in global scope" f" and call `{var}.append() or {var}.pop()`" f", which will be ignored and never be transfered into" - f" tensor array.") + f" tensor array." + ) else: non_global_push_pop_names.append(var) return set(non_global_push_pop_names) def control_flow_vars(self): valid_names = self.w_vars - tmp = self.father.global_vars & valid_names, + tmp = (self.father.global_vars & valid_names,) return {"global": tmp, "nonlocal": self.w_vars - tmp} def _is_simple_name(self, name): - if '.' in name or '[' in name: return False + if '.' in name or '[' in name: + return False return True def is_global_var(self, name): @@ -1062,11 +1125,14 @@ class NameScope: Only valid after FunctionNameLivenessAnalysis visitor. """ assert self._is_simple_name( - name), "is_global_var accept a simple name, but get `{name}`." + name + ), "is_global_var accept a simple name, but get `{name}`." ancestor = self while ancestor is not None: - if name in ancestor.globals: return True - if name in (ancestor.nonlocals | ancestor.w_vars): return False + if name in ancestor.globals: + return True + if name in (ancestor.nonlocals | ancestor.w_vars): + return False ancestor = ancestor.father return True @@ -1082,46 +1148,46 @@ class NameScope: class FunctionNameLivenessAnalysis(gast.NodeVisitor): - """ analyze the liveness of a function. - - every variables stored in this scope will be collected, - in addition with global/nonlocal information and - push_pop information. - - 1. global variable is stored in node.var_globals. - 2. nonlocal variable is stored in node.var_nonlocals. - 3. arguments is stored in node.var_args. - 4. if a variable's push and pop attribute is called, - it will be collected in push_pop_vars. They are - used for transformation to tensor_array. - NOTE: push_pop_vars **may not** in w_vars. - a.push(0) don't modify the variable a, but the content - of a. - - For example: - - def func(*args, **kargs): - a = 12 - global i,j - nonlocal x,y - print(a) - i = k - b = [] - c = [1,2,3] - for m in range(10): - q = 12 - b.push(1) - c.pop() - - After this visitor we have: - # node is the FunctionDef node with name: "func" - node.pd_scope = NameScope( - globals = ['i', 'j'], - nonlocals = ['x', 'y'], - args = ['args', 'kargs'], - wr_vars = ['a', 'i', 'q', 'm', 'c', 'b'] - push_pop_vars = ['b', 'c'] - ) + """analyze the liveness of a function. + + every variables stored in this scope will be collected, + in addition with global/nonlocal information and + push_pop information. + + 1. global variable is stored in node.var_globals. + 2. nonlocal variable is stored in node.var_nonlocals. + 3. arguments is stored in node.var_args. + 4. if a variable's push and pop attribute is called, + it will be collected in push_pop_vars. They are + used for transformation to tensor_array. + NOTE: push_pop_vars **may not** in w_vars. + a.push(0) don't modify the variable a, but the content + of a. + + For example: + + def func(*args, **kargs): + a = 12 + global i,j + nonlocal x,y + print(a) + i = k + b = [] + c = [1,2,3] + for m in range(10): + q = 12 + b.push(1) + c.pop() + + After this visitor we have: + # node is the FunctionDef node with name: "func" + node.pd_scope = NameScope( + globals = ['i', 'j'], + nonlocals = ['x', 'y'], + args = ['args', 'kargs'], + wr_vars = ['a', 'i', 'q', 'm', 'c', 'b'] + push_pop_vars = ['b', 'c'] + ) """ def __init__(self, root_node): @@ -1141,25 +1207,26 @@ class FunctionNameLivenessAnalysis(gast.NodeVisitor): return self._get_name_scope(self.scope_node_stack[-1]) def _father_name_scope(self): - if len(self.scope_node_stack) == 1: return None + if len(self.scope_node_stack) == 1: + return None return self._get_name_scope(self.scope_node_stack[-2]) def _nearest_function_scope(self): - if len(self.scope_node_stack) == 1: return None + if len(self.scope_node_stack) == 1: + return None for node in self.scope_node_stack[-2::-1]: if isinstance(node, gast.FunctionDef): return self._get_name_scope(node) def visit_ListComp(self, node): - """ [ i for i in range(10) ] - In this case, `i` will not created in FunctionScope. - We don't collect `i` by not calling generic_visit. + """[ i for i in range(10) ] + In this case, `i` will not created in FunctionScope. + We don't collect `i` by not calling generic_visit. """ pass def visit_DictComp(self, node): - """ the same as ListComp. - """ + """the same as ListComp.""" pass def visit_Name(self, node): @@ -1169,62 +1236,86 @@ class FunctionNameLivenessAnalysis(gast.NodeVisitor): self._current_name_scope().w_vars.add(node.id) def visit_FunctionDef(self, node): - def pre_func(): self._current_name_scope().args |= set( - self._get_argument_names(node)) + self._get_argument_names(node) + ) def post_func(): - """ NOTE: why we need merge w_vars and push_pop_vars here ? - because we do ifelse_transformer after loop_transformer. Loops will changed into functioons. but we know this function will be called in if. so we add w_vars to father function scope. + """NOTE: why we need merge w_vars and push_pop_vars here ? + because we do ifelse_transformer after loop_transformer. Loops will changed into functioons. but we know this function will be called in if. so we add w_vars to father function scope. """ - from paddle.fluid.dygraph.dygraph_to_static.loop_transformer import WHILE_CONDITION_PREFIX, WHILE_BODY_PREFIX, FOR_CONDITION_PREFIX, FOR_BODY_PREFIX - from paddle.fluid.dygraph.dygraph_to_static.ifelse_transformer import TRUE_FUNC_PREFIX, FALSE_FUNC_PREFIX + from paddle.fluid.dygraph.dygraph_to_static.loop_transformer import ( + WHILE_CONDITION_PREFIX, + WHILE_BODY_PREFIX, + FOR_CONDITION_PREFIX, + FOR_BODY_PREFIX, + ) + from paddle.fluid.dygraph.dygraph_to_static.ifelse_transformer import ( + TRUE_FUNC_PREFIX, + FALSE_FUNC_PREFIX, + ) + control_flow_function_def = [ - WHILE_BODY_PREFIX, WHILE_BODY_PREFIX, FOR_CONDITION_PREFIX, - FOR_BODY_PREFIX, TRUE_FUNC_PREFIX, FALSE_FUNC_PREFIX + WHILE_BODY_PREFIX, + WHILE_BODY_PREFIX, + FOR_CONDITION_PREFIX, + FOR_BODY_PREFIX, + TRUE_FUNC_PREFIX, + FALSE_FUNC_PREFIX, ] def is_control_flow_def_node(): for prefix in control_flow_function_def: - if node.name.startswith(prefix): return True + if node.name.startswith(prefix): + return True return False if self._father_name_scope() and is_control_flow_def_node(): - self._father_name_scope().w_vars |= self._current_name_scope( - ).w_vars - self._father_name_scope( - ).push_pop_vars |= self._current_name_scope().push_pop_vars + self._father_name_scope().w_vars |= ( + self._current_name_scope().w_vars + ) + self._father_name_scope().push_pop_vars |= ( + self._current_name_scope().push_pop_vars + ) self._visit_scope_node(node, pre_func, post_func) def _visit_scope_node(self, node, pre_func, post_func): - """ scope node main visit logic. - pre_func and post_func is callbacks + """scope node main visit logic. + pre_func and post_func is callbacks """ self._reset_name_scope(node) self.scope_node_stack.append(node) self._current_name_scope().set_father(self._nearest_function_scope()) - if pre_func: pre_func() + if pre_func: + pre_func() self.generic_visit(node) - if post_func: post_func() + if post_func: + post_func() self.scope_node_stack.pop() def _visit_controlflow_node(self, node): - def post_func(): self._father_name_scope().merge_from(self._current_name_scope()) self._nearest_function_scope().merge_from( - self._current_name_scope()) - self._current_name_scope().created = self._nearest_function_scope( - ).existed_vars() - node.before_created + self._current_name_scope() + ) + self._current_name_scope().created = ( + self._nearest_function_scope().existed_vars() + - node.before_created + ) # gather created vars into father and used in CreateUndefinedVarTransform - self._nearest_function_scope().created |= self._current_name_scope( - ).created + self._nearest_function_scope().created |= ( + self._current_name_scope().created + ) def pre_func(): - setattr(node, "before_created", - self._nearest_function_scope().existed_vars()) + setattr( + node, + "before_created", + self._nearest_function_scope().existed_vars(), + ) self._visit_scope_node(node, pre_func, post_func) @@ -1262,12 +1353,13 @@ class FunctionNameLivenessAnalysis(gast.NodeVisitor): self._current_name_scope().push_pop_vars.add(name) def _get_argument_names(self, node): - """ get all arguments name in the functiondef node. - this node is local to the function and shouldn't - be created. + """get all arguments name in the functiondef node. + this node is local to the function and shouldn't + be created. """ assert isinstance( - node, gast.FunctionDef), "Input node is not function define node" + node, gast.FunctionDef + ), "Input node is not function define node" names = [a for a in node.args.args] names.append(node.args.vararg) names.append(node.args.kwarg) @@ -1288,7 +1380,9 @@ def create_get_args_node(names): func_def = """ def {func_name}(): return - """.format(func_name=unique_name.generate(GET_ARGS_FUNC_PREFIX)) + """.format( + func_name=unique_name.generate(GET_ARGS_FUNC_PREFIX) + ) return gast.parse(textwrap.dedent(func_def)).body[0] assert isinstance(names, (list, tuple)) @@ -1307,7 +1401,8 @@ def create_get_args_node(names): func_def = template.format( func_name=unique_name.generate(GET_ARGS_FUNC_PREFIX), nonlocal_vars=nonlocal_vars, - vars=",".join(names)) + vars=",".join(names), + ) return gast.parse(textwrap.dedent(func_def)).body[0] @@ -1324,8 +1419,9 @@ def create_set_args_node(names): func_def = """ def {func_name}({args}): pass - """.format(func_name=unique_name.generate(SET_ARGS_FUNC_PREFIX), - args=ARGS_NAME) + """.format( + func_name=unique_name.generate(SET_ARGS_FUNC_PREFIX), args=ARGS_NAME + ) return gast.parse(textwrap.dedent(func_def)).body[0] assert isinstance(names, (list, tuple)) @@ -1345,7 +1441,8 @@ def create_set_args_node(names): func_name=unique_name.generate(SET_ARGS_FUNC_PREFIX), args=ARGS_NAME, nonlocal_vars=nonlocal_vars, - vars=",".join(names)) + vars=",".join(names), + ) return gast.parse(textwrap.dedent(func_def)).body[0] @@ -1355,8 +1452,8 @@ def create_nonlocal_stmt_nodes(names): mapped = list(filter(lambda n: '.' not in n, names)) mapped = list(filter(lambda n: '[' not in n, mapped)) names = sorted( - mapped, - key=mapped.index) # to keep the order, we can't use set() to unique + mapped, key=mapped.index + ) # to keep the order, we can't use set() to unique if not names: return [] func_code = "nonlocal {}".format(','.join(names)) @@ -1364,10 +1461,10 @@ def create_nonlocal_stmt_nodes(names): class GetterSetterHelper: - """ we have two classes of names in setter and getter function: - w_vars(loop_vars) + push_pop_vars - To simplify the setter logic in convert_while and convert_cond, - we extract the helper class here. + """we have two classes of names in setter and getter function: + w_vars(loop_vars) + push_pop_vars + To simplify the setter logic in convert_while and convert_cond, + we extract the helper class here. """ def __init__(self, getter_func, setter_func, *name_lists): @@ -1383,22 +1480,33 @@ class GetterSetterHelper: return self._union def get(self, names): - if names is None: names = [] + if names is None: + names = [] vars = self.getter() - if vars is None: return tuple() + if vars is None: + return tuple() for n in names: - assert n in self.name2id, "the name `{}` not in name union set`{}`.".format( - n, self.name2id.keys()) + assert ( + n in self.name2id + ), "the name `{}` not in name union set`{}`.".format( + n, self.name2id.keys() + ) return tuple(map(lambda n: vars[self.name2id[n]], names)) def set(self, names, values): - if names is None: names = [] - if values is None: values = [] + if names is None: + names = [] + if values is None: + values = [] vars = self.getter() - if vars is None: return + if vars is None: + return for n in names: - assert n in self.name2id, "the name `{}` not in name union set`{}`.".format( - n, self.name2id.keys()) + assert ( + n in self.name2id + ), "the name `{}` not in name union set`{}`.".format( + n, self.name2id.keys() + ) vars = list(vars) indices = list(map(lambda n: self.name2id[n], names)) for i, v in zip(indices, values): diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/variable_trans_func.py b/python/paddle/fluid/dygraph/dygraph_to_static/variable_trans_func.py index 8afe558ecfc2c8256504fe927869b56f375c16c5..f28f1993621cc43eb8e4dfb1ddf6f2b07b99c432 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/variable_trans_func.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/variable_trans_func.py @@ -17,7 +17,10 @@ import textwrap from paddle.utils import gast from paddle.fluid import unique_name from paddle.fluid.framework import Variable -from paddle.fluid.dygraph.dygraph_to_static.utils import UndefinedVar, create_undefined_variable +from paddle.fluid.dygraph.dygraph_to_static.utils import ( + UndefinedVar, + create_undefined_variable, +) from paddle.fluid.layers.utils import map_structure, is_sequence __all__ = [ @@ -37,16 +40,19 @@ def create_fill_constant_node(name, value=0): func_code = "{} = paddle.full(shape=[1], ".format(name) if isinstance(value, bool): func_code += "dtype='bool', fill_value={}, name='{}')".format( - value, name) + value, name + ) return gast.parse(func_code).body[0] if isinstance(value, float): func_code += "dtype='float64', fill_value={}, name='{}')".format( - value, name) + value, name + ) return gast.parse(func_code).body[0] if isinstance(value, int): func_code += "dtype='int64', fill_value={}, name='{}')".format( - value, name) + value, name + ) return gast.parse(func_code).body[0] diff --git a/python/paddle/fluid/dygraph/inplace_utils.py b/python/paddle/fluid/dygraph/inplace_utils.py index 968a957b660d3510e0abafdbf39fd6ffe5c4d364..fb27a5674b7d8b5eff301a26df77cde39dee8512 100644 --- a/python/paddle/fluid/dygraph/inplace_utils.py +++ b/python/paddle/fluid/dygraph/inplace_utils.py @@ -23,13 +23,14 @@ from paddle import _C_ops, _legacy_C_ops # in dygraph mode. If static mode is used, the inplace mechanism will not be used, and the static method # of the original API will be called. def _inplace_apis_in_dygraph_only_(func): - def __impl__(*args, **kwargs): if not _non_static_mode(): origin_api_name = func.__name__[:-1] warnings.warn( - "In static mode, {}() is the same as {}() and does not perform inplace operation." - .format(func.__name__, origin_api_name)) + "In static mode, {}() is the same as {}() and does not perform inplace operation.".format( + func.__name__, origin_api_name + ) + ) origin_func = "{}.{}".format(func.__module__, origin_api_name) return eval(origin_func)(*args, **kwargs) return func(*args, **kwargs) diff --git a/python/paddle/fluid/dygraph/io.py b/python/paddle/fluid/dygraph/io.py index 9ddb46798cfb169d2fa1358086d8a6b64ba0e4a4..d2e7ada85a7568bceb1d87e389eafebb20813cc3 100644 --- a/python/paddle/fluid/dygraph/io.py +++ b/python/paddle/fluid/dygraph/io.py @@ -27,8 +27,14 @@ from paddle.fluid.layers import nn from paddle.fluid.layers.utils import _hash_with_id from paddle.fluid.dygraph.base import switch_to_static_graph from paddle.fluid.framework import _non_static_mode -from paddle.fluid.executor import _is_enable_standalone_executor, _is_dy2st_enable_standalone_executor -from paddle.fluid.dygraph.dygraph_to_static.partial_program import add_build_strategy_for, LazyInitialized +from paddle.fluid.executor import ( + _is_enable_standalone_executor, + _is_dy2st_enable_standalone_executor, +) +from paddle.fluid.dygraph.dygraph_to_static.partial_program import ( + add_build_strategy_for, + LazyInitialized, +) from paddle import _C_ops, _legacy_C_ops __all__ = ['TranslatedLayer'] @@ -50,17 +56,20 @@ def _load_program_desc(model_file_path): program_desc = core.ProgramDesc(program_desc_str) if not core._is_program_version_supported(program_desc._version()): - raise ValueError("Unsupported program version: %d\n" % - program_desc._version()) + raise ValueError( + "Unsupported program version: %d\n" % program_desc._version() + ) return program_desc def _is_persistable(var_desc): - if var_desc.type() == core.VarDesc.VarType.FEED_MINIBATCH or \ - var_desc.type() == core.VarDesc.VarType.FETCH_LIST or \ - var_desc.type() == core.VarDesc.VarType.READER or \ - var_desc.type() == core.VarDesc.VarType.RAW: + if ( + var_desc.type() == core.VarDesc.VarType.FEED_MINIBATCH + or var_desc.type() == core.VarDesc.VarType.FETCH_LIST + or var_desc.type() == core.VarDesc.VarType.READER + or var_desc.type() == core.VarDesc.VarType.RAW + ): return False return var_desc.persistable() @@ -204,9 +213,11 @@ def _rename_var_program_desc(program_desc, include=None, exclude=None): name_old = var.name() is_double_grad_var = "@GRAD" in name_old has_double_grad = has_double_grad or is_double_grad_var - should_rename = (include is None or name_old in include) and ( - exclude is None - or name_old not in exclude) and not is_double_grad_var + should_rename = ( + (include is None or name_old in include) + and (exclude is None or name_old not in exclude) + and not is_double_grad_var + ) if should_rename: temp_name = name_old.split('_') if len(temp_name) > 1 and temp_name[-1].isnumeric(): @@ -215,9 +226,12 @@ def _rename_var_program_desc(program_desc, include=None, exclude=None): temp_name = name_old while True: name_new = _generate_unique_var_name_sync_with_main_program( - temp_name) - if name_new not in old_names[:var_idx] + old_names[var_idx + - 1:]: + temp_name + ) + if ( + name_new + not in old_names[:var_idx] + old_names[var_idx + 1 :] + ): break else: name_new = name_old @@ -237,13 +251,16 @@ def _rename_var_program_desc(program_desc, include=None, exclude=None): var_name = var.name() if "@GRAD" in var_name and name_old in var_name: new_var_name = var_name.replace( - name_old, dict_rename_var_old_new[name_old]) + name_old, dict_rename_var_old_new[name_old] + ) double_grad_rename_dict[var_name] = new_var_name for var_name in double_grad_rename_dict: dict_rename_var_old_new[var_name] = double_grad_rename_dict[ - var_name] + var_name + ] dict_rename_var_new_old[ - double_grad_rename_dict[var_name]] = var_name + double_grad_rename_dict[var_name] + ] = var_name # Rename on program desc for b_idx in range(program_desc.num_blocks()): @@ -252,27 +269,38 @@ def _rename_var_program_desc(program_desc, include=None, exclude=None): op = cur_block.op(op_idx) for input_arg_name in op.input_arg_names(): if input_arg_name in dict_rename_var_old_new: - if input_arg_name != dict_rename_var_old_new[input_arg_name]: + if ( + input_arg_name + != dict_rename_var_old_new[input_arg_name] + ): op._rename_input( input_arg_name, - dict_rename_var_old_new[input_arg_name]) + dict_rename_var_old_new[input_arg_name], + ) if cur_block.has_var(input_arg_name.encode()): cur_block._rename_var( input_arg_name.encode(), - dict_rename_var_old_new[input_arg_name].encode( - )) + dict_rename_var_old_new[ + input_arg_name + ].encode(), + ) for output_arg_name in op.output_arg_names(): if output_arg_name in dict_rename_var_old_new: - if output_arg_name != dict_rename_var_old_new[ - output_arg_name]: + if ( + output_arg_name + != dict_rename_var_old_new[output_arg_name] + ): op._rename_output( output_arg_name, - dict_rename_var_old_new[output_arg_name]) + dict_rename_var_old_new[output_arg_name], + ) if cur_block.has_var(output_arg_name.encode()): cur_block._rename_var( output_arg_name.encode(), - dict_rename_var_old_new[output_arg_name].encode( - )) + dict_rename_var_old_new[ + output_arg_name + ].encode(), + ) program_desc.flush() return dict_rename_var_new_old, dict_rename_var_old_new @@ -327,7 +355,8 @@ class _ProgramHolder(object): self._infer_program_desc = self._preprocess(program_desc) # forward + backward program self._train_program_desc = self._append_backward_desc( - self._infer_program_desc) + self._infer_program_desc + ) # forward: @switch_to_static_graph @@ -348,11 +377,13 @@ class _ProgramHolder(object): def _create_backward_train_program(self): whole_program = _build_program_by_desc(self._train_program_desc) start_op_index = self._infer_program_desc.block(0).op_size() + 2 * len( - self._output_descs) + self._output_descs + ) end_op_index = whole_program.desc.block(0).op_size() - if (start_op_index < end_op_index): - return add_build_strategy_for(whole_program, start_op_index, - end_op_index) + if start_op_index < end_op_index: + return add_build_strategy_for( + whole_program, start_op_index, end_op_index + ) else: return paddle.static.Program() @@ -400,7 +431,8 @@ class _ProgramHolder(object): # rename persistable variables of 'program_desc' list_persistable_var = _get_persistable_var_names(program_desc) rename_new_old_dict, _ = _rename_var_program_desc( - program_desc, list_persistable_var) + program_desc, list_persistable_var + ) # 1. Prune original program # remove feed, fetch and scale-1 op, remove op_callstack attr ops_to_remove = [] @@ -412,14 +444,17 @@ class _ProgramHolder(object): feed_var_name = op.input('X')[0].encode() root_block._remove_var(feed_var_name) self._input_descs.append( - root_block.find_var(op.output('Out')[0].encode())) + root_block.find_var(op.output('Out')[0].encode()) + ) elif op.type() == 'scale' and op.output('Out')[0].startswith( - 'save_infer_model/scale_'): + 'save_infer_model/scale_' + ): ops_to_remove.append(i) out_var_name = op.output('Out')[0].encode() root_block._remove_var(out_var_name) self._output_descs.append( - root_block.find_var(op.input('X')[0].encode())) + root_block.find_var(op.input('X')[0].encode()) + ) elif op.type() == 'fetch': ops_to_remove.append(i) fetch_var_name = op.output('Out')[0].encode() @@ -427,7 +462,8 @@ class _ProgramHolder(object): # NOTE: some old pre-train models have no extra scale_op if not op.input('X')[0].startswith('save_infer_model/scale_'): self._output_descs.append( - root_block.find_var(op.input('X')[0].encode())) + root_block.find_var(op.input('X')[0].encode()) + ) else: if op.has_attr("op_callstack"): op.remove_attr("op_callstack") @@ -472,7 +508,8 @@ class _ProgramHolder(object): # there will be a problem of duplicate names, so here is unified # to add the LOADED suffix to the parameters of the model loaded self._suffix_varname_dict = _get_loaded_var_new_old( - program_desc, rename_new_old_dict) + program_desc, rename_new_old_dict + ) # - get persistable var self._persistable_names = _get_persistable_var_names(program_desc) @@ -486,9 +523,9 @@ class _ProgramHolder(object): with framework.program_guard(program): for i, out in enumerate(self._output_descs): var = program.global_block().var(out.name()) - var = nn.scale(var, - 1., - name="translated_layer/scale_{}".format(i)) + var = nn.scale( + var, 1.0, name="translated_layer/scale_{}".format(i) + ) scale_output_vars.append(var) # 2. update output names & descs for i, var in enumerate(scale_output_vars): @@ -513,15 +550,19 @@ class _ProgramHolder(object): block = program.block(block_idx) for op in block.ops: if op.type == "batch_norm": - if "ReserveSpace" not in op.output_names or len( - op.output("ReserveSpace")) == 0: + if ( + "ReserveSpace" not in op.output_names + or len(op.output("ReserveSpace")) == 0 + ): reserve_space = block.create_var( name=unique_name.generate_with_ignorable_key( - ".".join(["reserve_space", 'tmp'])), + ".".join(["reserve_space", 'tmp']) + ), dtype=block.var(op.input("X")[0]).dtype, type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=True) + stop_gradient=True, + ) op.desc.set_output("ReserveSpace", [reserve_space.name]) return program @@ -567,9 +608,9 @@ class _ProgramHolder(object): # NOTE: [compatible] deal with model saved by save_inference_model, # which need get var info from program desc -def _load_persistable_vars_by_program(model_path, - program_holder, - params_filename=None): +def _load_persistable_vars_by_program( + model_path, program_holder, params_filename=None +): # make sure the path has been checked persistable_vars = _get_persistable_vars(program_holder.infer_program) load_var_dict = {} @@ -578,37 +619,43 @@ def _load_persistable_vars_by_program(model_path, if _is_parameter(each_var, program_holder.infer_program): # create output varbase if framework._in_eager_without_dygraph_check(): - new_var = framework.EagerParamBase(shape=each_var.shape(), - dtype=each_var.dtype(), - name=each_var.name(), - type=each_var.type(), - persistable=True) + new_var = framework.EagerParamBase( + shape=each_var.shape(), + dtype=each_var.dtype(), + name=each_var.name(), + type=each_var.type(), + persistable=True, + ) else: - new_var = framework.ParamBase(shape=each_var.shape(), - dtype=each_var.dtype(), - name=each_var.name(), - type=each_var.type(), - persistable=True) + new_var = framework.ParamBase( + shape=each_var.shape(), + dtype=each_var.dtype(), + name=each_var.name(), + type=each_var.type(), + persistable=True, + ) else: - new_var = framework._varbase_creator(type=each_var.type(), - name=each_var.name(), - shape=each_var.shape(), - dtype=each_var.dtype(), - persistable=True) + new_var = framework._varbase_creator( + type=each_var.type(), + name=each_var.name(), + shape=each_var.shape(), + dtype=each_var.dtype(), + persistable=True, + ) if params_filename is None: framework._dygraph_tracer().trace_op( type='load', inputs={}, outputs={'Out': new_var}, - attrs={'file_path': os.path.join(model_path, orig_each_name)}) + attrs={'file_path': os.path.join(model_path, orig_each_name)}, + ) new_var.stop_gradient = False load_var_dict[each_var.name()] = new_var if params_filename is not None: load_var_list = [] dict_name_old_new = { - v: k - for k, v in program_holder._suffix_varname_dict.items() + v: k for k, v in program_holder._suffix_varname_dict.items() } for name in sorted(dict_name_old_new.keys()): load_var_list.append(load_var_dict[dict_name_old_new[name]]) @@ -617,7 +664,8 @@ def _load_persistable_vars_by_program(model_path, type='load_combine', inputs={}, outputs={'Out': load_var_list}, - attrs={'file_path': os.path.join(model_path, params_filename)}) + attrs={'file_path': os.path.join(model_path, params_filename)}, + ) for each_var in persistable_vars: if not _is_parameter(each_var, program_holder.infer_program): @@ -639,8 +687,9 @@ def _load_persistable_vars_by_program(model_path, return load_var_dict -def _load_persistable_vars(model_path, var_info_path, program_holder, - params_filename): +def _load_persistable_vars( + model_path, var_info_path, program_holder, params_filename +): # 1. load extra var info with open(var_info_path, 'rb') as f: extra_var_info = pickle.load(f) @@ -649,8 +698,7 @@ def _load_persistable_vars(model_path, var_info_path, program_holder, load_var_dict = dict() load_var_list = [] inv_suffix_varname_dict = { - value: key - for key, value in program_holder._suffix_varname_dict.items() + value: key for key, value in program_holder._suffix_varname_dict.items() } # NOTE(chenweihang): we need load persistable vars based the program, @@ -661,7 +709,8 @@ def _load_persistable_vars(model_path, var_info_path, program_holder, raise RuntimeError( "The model to be loaded is not complete." "The variable `%s` of program cannot be found in loaded model.", - name) + name, + ) # get suffix var name, see [why need to append suffix to persistable vars] new_name = inv_suffix_varname_dict[name] # create output varbase @@ -674,7 +723,8 @@ def _load_persistable_vars(model_path, var_info_path, program_holder, ], # only to pass check, this shape is not meaningful dtype=core.VarDesc.VarType.FP32, name=new_name, - persistable=True) + persistable=True, + ) else: new_var = framework.ParamBase( shape=[ @@ -682,10 +732,12 @@ def _load_persistable_vars(model_path, var_info_path, program_holder, ], # only to pass check, this shape is not meaningful dtype=core.VarDesc.VarType.FP32, name=new_name, - persistable=True) + persistable=True, + ) else: - new_var = framework._varbase_creator(name=new_name, - persistable=True) + new_var = framework._varbase_creator( + name=new_name, persistable=True + ) new_var.stop_gradient = extra_var_info[name]['stop_gradient'] load_var_dict[new_name] = new_var @@ -698,10 +750,12 @@ def _load_persistable_vars(model_path, var_info_path, program_holder, if len(extra_var_info) != 0: raise ValueError("The model to be loaded is incomplete.") else: - framework._dygraph_tracer().trace_op(type='load_combine', - inputs={}, - outputs={'Out': load_var_list}, - attrs={'file_path': var_file_path}) + framework._dygraph_tracer().trace_op( + type='load_combine', + inputs={}, + outputs={'Out': load_var_list}, + attrs={'file_path': var_file_path}, + ) return load_var_dict @@ -723,17 +777,18 @@ def _construct_program_holders(model_path, model_filename=None): # [compatible] if assign model_filename, only can load one program as Layer.forward model_filename = os.path.basename(model_filename) model_file_path = os.path.join(model_path, model_filename) - model_name = model_filename[:-len(INFER_MODEL_SUFFIX)] - #Load every file that meets the requirements in the directory model_path. + model_name = model_filename[: -len(INFER_MODEL_SUFFIX)] + # Load every file that meets the requirements in the directory model_path. for filename in os.listdir(model_path): if model_filename == filename: func_name = 'forward' model_file_path = os.path.join(model_path, model_filename) elif filename.endswith(INFER_MODEL_SUFFIX) and filename.startswith( - model_name): - parsing_names = filename[len(model_name - ):-len(INFER_MODEL_SUFFIX) + - 1].split('.') + model_name + ): + parsing_names = filename[ + len(model_name) : -len(INFER_MODEL_SUFFIX) + 1 + ].split('.') if len(parsing_names) == 3 and len(parsing_names[1]) > 0: func_name = parsing_names[1] model_file_path = os.path.join(model_path, filename) @@ -742,7 +797,8 @@ def _construct_program_holders(model_path, model_filename=None): else: continue program_holder_dict[func_name] = _ProgramHolder( - _load_program_desc(model_file_path)) + _load_program_desc(model_file_path) + ) else: for _, _, file_names in os.walk(model_path): for name in file_names: @@ -754,30 +810,32 @@ def _construct_program_holders(model_path, model_filename=None): else: method_name.replace('model', '') program_holder_dict[method_name] = _ProgramHolder( - _load_program_desc(model_file_path)) + _load_program_desc(model_file_path) + ) return program_holder_dict -def _construct_params_and_buffers(model_path, - programs, - params_filename=None, - append_suffix=True): +def _construct_params_and_buffers( + model_path, programs, params_filename=None, append_suffix=True +): var_info_filename = str(params_filename) + ".info" var_info_path = os.path.join(model_path, var_info_filename) params_path = os.path.join(model_path, str(params_filename)) if os.path.exists(var_info_path): - var_dict = _load_persistable_vars(model_path, var_info_path, - programs['forward'], params_filename) - model_name = params_filename[:-len(INFER_PARAMS_SUFFIX)] - #Load every file that meets the requirements in the directory model_path. + var_dict = _load_persistable_vars( + model_path, var_info_path, programs['forward'], params_filename + ) + model_name = params_filename[: -len(INFER_PARAMS_SUFFIX)] + # Load every file that meets the requirements in the directory model_path. for file_name in os.listdir(model_path): if file_name.startswith(model_name) and file_name.endswith( - INFER_PARAMS_SUFFIX): - parsing_names = file_name[len(model_name - ):-len(INFER_PARAMS_SUFFIX) + - 1].split('.') + INFER_PARAMS_SUFFIX + ): + parsing_names = file_name[ + len(model_name) : -len(INFER_PARAMS_SUFFIX) + 1 + ].split('.') if len(parsing_names) == 3 and len(parsing_names[1]) > 0: func_name = parsing_names[1] else: @@ -786,15 +844,17 @@ def _construct_params_and_buffers(model_path, continue var_info_path = os.path.join(model_path, var_info_filename) var_dict.update( - _load_persistable_vars(model_path, var_info_path, - programs[func_name], file_name)) + _load_persistable_vars( + model_path, var_info_path, programs[func_name], file_name + ) + ) elif params_filename is not None and not os.path.exists(params_path): # When saving XX, there is only '*.pdmodel' return dict() else: - var_dict = _load_persistable_vars_by_program(model_path, - programs['forward'], - params_filename) + var_dict = _load_persistable_vars_by_program( + model_path, programs['forward'], params_filename + ) if not append_suffix: var_dict = _remove_varname_suffix(var_dict, programs['forward']) @@ -807,13 +867,23 @@ def _valid_vars(vars): return vars if framework._in_eager_without_dygraph_check(): return [ - core.eager.Tensor(core.VarDesc.VarType.FP32, [], "Fake_var", - core.VarDesc.VarType.RAW, False) + core.eager.Tensor( + core.VarDesc.VarType.FP32, + [], + "Fake_var", + core.VarDesc.VarType.RAW, + False, + ) ] else: return [ - core.VarBase(core.VarDesc.VarType.FP32, [], "Fake_var", - core.VarDesc.VarType.RAW, False) + core.VarBase( + core.VarDesc.VarType.FP32, + [], + "Fake_var", + core.VarDesc.VarType.RAW, + False, + ) ] @@ -825,7 +895,8 @@ def _run_dygraph(instance, input, program_holder): if not isinstance(value, (np.ndarray, core.VarBase, core.eager.Tensor)): raise TypeError( "The type of input in TranslatedLayer must be numpy array or Variable(VarBase), but received %s." - % type(value)) + % type(value) + ) # NOTE: In order to unify the API, firstly convert the input to VarBase if isinstance(value, np.ndarray): if framework._in_eager_without_dygraph_check(): @@ -834,13 +905,16 @@ def _run_dygraph(instance, input, program_holder): name=program_holder.input_descs[i].name(), persistable=False, place=framework._current_expected_place(), - zero_copy=True) + zero_copy=True, + ) else: - var = core.VarBase(value=value, - name=program_holder.input_descs[i].name(), - persistable=False, - place=framework._current_expected_place(), - zero_copy=True) + var = core.VarBase( + value=value, + name=program_holder.input_descs[i].name(), + persistable=False, + place=framework._current_expected_place(), + zero_copy=True, + ) else: var = value # NOTE: we changed var name here, @@ -862,67 +936,112 @@ def _run_dygraph(instance, input, program_holder): else: raise ValueError( "The persistable variable %s does not exist in current TranslatedLayer." - % var_name) + % var_name + ) output_vars = [] for var_desc in program_holder.output_descs: if framework._in_eager_without_dygraph_check(): - var = core.eager.Tensor(dtype=var_desc.dtype(), - dims=var_desc.shape(), - name=var_desc.name(), - type=var_desc.type(), - persistable=False) + var = core.eager.Tensor( + dtype=var_desc.dtype(), + dims=var_desc.shape(), + name=var_desc.name(), + type=var_desc.type(), + persistable=False, + ) else: - var = core.VarBase(var_desc.dtype(), var_desc.shape(), - var_desc.name(), var_desc.type(), False) + var = core.VarBase( + var_desc.dtype(), + var_desc.shape(), + var_desc.name(), + var_desc.type(), + False, + ) output_vars.append(var) # hold forward variables if framework._in_eager_without_dygraph_check(): tmp_scope_vec = [program_holder.scope] else: - tmp_scope_vec = core.VarBase(core.VarDesc.VarType.FP32, [], - "program_out_scope", - core.VarDesc.VarType.STEP_SCOPES, True) + tmp_scope_vec = core.VarBase( + core.VarDesc.VarType.FP32, + [], + "program_out_scope", + core.VarDesc.VarType.STEP_SCOPES, + True, + ) tmp_scope_vec.value().set_scope(program_holder.scope) double_grad_vars = [] for var_desc in program_holder.double_grad_descs: if framework._in_eager_without_dygraph_check(): - var = core.eager.Tensor(dtype=var_desc.dtype(), - dims=var_desc.shape(), - name=var_desc.name(), - type=var_desc.type(), - persistable=False) + var = core.eager.Tensor( + dtype=var_desc.dtype(), + dims=var_desc.shape(), + name=var_desc.name(), + type=var_desc.type(), + persistable=False, + ) else: - var = core.VarBase(var_desc.dtype(), var_desc.shape(), - var_desc.name(), var_desc.type(), False) + var = core.VarBase( + var_desc.dtype(), + var_desc.shape(), + var_desc.name(), + var_desc.type(), + False, + ) double_grad_vars.append(var) # 2. run program by op - trace_program = program_holder.infer_program if instance._is_test else program_holder.train_program - forward_program = program_holder._infer_program_desc if instance._is_test else program_holder.forward_program + trace_program = ( + program_holder.infer_program + if instance._is_test + else program_holder.train_program + ) + forward_program = ( + program_holder._infer_program_desc + if instance._is_test + else program_holder.forward_program + ) end_op_index = program_holder.infer_program.block(0).op_size() attrs = [ 'global_block', - trace_program.block(0), 'start_op_index', 0, 'end_op_index', - end_op_index, 'is_test', instance._is_test, 'program_id', - _hash_with_id(trace_program, instance) + trace_program.block(0), + 'start_op_index', + 0, + 'end_op_index', + end_op_index, + 'is_test', + instance._is_test, + 'program_id', + _hash_with_id(trace_program, instance), ] - use_interpretorcore = _is_enable_standalone_executor( - ) and _is_dy2st_enable_standalone_executor() + use_interpretorcore = ( + _is_enable_standalone_executor() + and _is_dy2st_enable_standalone_executor() + ) attrs.extend(('use_interpretorcore', use_interpretorcore)) if use_interpretorcore: attrs.extend( - ('forward_global_block', forward_program.block(0), - 'backward_global_block', program_holder.backward_program.block(0))) - - _legacy_C_ops.run_program(_valid_vars(input_vars), - _valid_vars(persistable_vars), - _valid_vars(output_vars), tmp_scope_vec, - _valid_vars(double_grad_vars), None, *attrs) + ( + 'forward_global_block', + forward_program.block(0), + 'backward_global_block', + program_holder.backward_program.block(0), + ) + ) + + _legacy_C_ops.run_program( + _valid_vars(input_vars), + _valid_vars(persistable_vars), + _valid_vars(output_vars), + tmp_scope_vec, + _valid_vars(double_grad_vars), + None, + *attrs + ) # NOTE: [ why need set param's gradient type here ] # if user set sparse gradient mode, the param's gradient @@ -951,8 +1070,11 @@ def _run_dygraph(instance, input, program_holder): def drop_scope_if_no_grad(instance, scope_vec): tracer = framework._dygraph_tracer() - scope = scope_vec.value().get_scope() if isinstance( - scope_vec, (core.VarBase)) else scope_vec[0] + scope = ( + scope_vec.value().get_scope() + if isinstance(scope_vec, (core.VarBase)) + else scope_vec[0] + ) if (not instance._is_test) and (not tracer._has_grad): scope.drop_kids() @@ -961,15 +1083,22 @@ def _run_static_graph(input, program_holder, trace_program): main_program = framework.default_main_program() param_var_names = _get_persistable_var_names(trace_program) _, dict_rename_var_old_new = _rename_var_program_desc( - trace_program, exclude=param_var_names) + trace_program, exclude=param_var_names + ) trace_program.flush() output_names = [var.name() for var in program_holder.output_descs] # append blocks from 'trace_program' - _append_block(main_program, trace_program, program_holder, input, - dict_rename_var_old_new) + _append_block( + main_program, + trace_program, + program_holder, + input, + dict_rename_var_old_new, + ) main_program._sync_with_cpp() - outs = _get_output_from_program(main_program, program_holder, - dict_rename_var_old_new) + outs = _get_output_from_program( + main_program, program_holder, dict_rename_var_old_new + ) if len(outs) == 1: outs = outs[0] return outs @@ -997,11 +1126,13 @@ def _collect_current_and_parent_var(program, block_idx): return vars -def _append_block(dest_program, - src_program_desc, - program_holder, - input_variables, - dict_rename_var_old_new=None): +def _append_block( + dest_program, + src_program_desc, + program_holder, + input_variables, + dict_rename_var_old_new=None, +): ''' Append Variables and Operators in 'src_program_desc' to dest_program. @@ -1015,28 +1146,35 @@ def _append_block(dest_program, ''' origin_block_idx = dest_program.current_block_idx - param_var_names = _collect_current_and_parent_var(dest_program, - origin_block_idx) - append_var_from_block_desc_static(dest_program.block(origin_block_idx), - src_program_desc.block(0), - exclude=param_var_names) + param_var_names = _collect_current_and_parent_var( + dest_program, origin_block_idx + ) + append_var_from_block_desc_static( + dest_program.block(origin_block_idx), + src_program_desc.block(0), + exclude=param_var_names, + ) name_inp_desc = [inp.name() for inp in program_holder.input_descs] input_names = [inp.name for inp in input_variables] if len(name_inp_desc) != len(input_names): raise ValueError( - "The number of input is invalid, expected {}, but received {}.". - format(len(name_inp_desc), len(input_names))) + "The number of input is invalid, expected {}, but received {}.".format( + len(name_inp_desc), len(input_names) + ) + ) for i, out_name in enumerate(name_inp_desc): if dict_rename_var_old_new: out_name = dict_rename_var_old_new[out_name] dest_program.block(origin_block_idx).append_op( type='assign', inputs={'X': [input_names[i]]}, - outputs={'Out': [out_name]}) + outputs={'Out': [out_name]}, + ) append_ops = append_op_from_block_desc_static( - dest_program.block(origin_block_idx), src_program_desc.block(0)) + dest_program.block(origin_block_idx), src_program_desc.block(0) + ) dest_program._sync_with_cpp() offset_block_idx = dest_program.num_blocks - 1 @@ -1050,11 +1188,12 @@ def _append_block(dest_program, else: parent_idx = origin_block_idx dest_block = dest_program._create_block(parent_idx=parent_idx) - append_var_from_block_desc_static(dest_block, - src_block, - exclude=param_var_names) + append_var_from_block_desc_static( + dest_block, src_block, exclude=param_var_names + ) append_ops += append_op_from_block_desc_static( - dest_block, src_block) + dest_block, src_block + ) dest_program._sync_with_cpp() for op in append_ops: @@ -1064,15 +1203,16 @@ def _append_block(dest_program, origin_id = sub.id if isinstance(sub, framework.Block): origin_id = sub.idx - op._set_attr('sub_block', - dest_program.block(offset_block_idx + origin_id)) + op._set_attr( + 'sub_block', dest_program.block(offset_block_idx + origin_id) + ) dest_program._sync_with_cpp() dest_program.current_block_idx = origin_block_idx -def _get_output_from_program(program, - program_holder, - dict_rename_var_old_new=None): +def _get_output_from_program( + program, program_holder, dict_rename_var_old_new=None +): """ Get output name of 'program' according to program_holder """ @@ -1121,20 +1261,21 @@ def append_op_from_desc_static(block, op_desc): op_type = op_desc.type() op_append = block.desc.append_op() op_append.copy_from(op_desc) - op = framework.Operator(block=block, - desc=op_append, - type=op_type, - inputs=None, - outputs=None, - attrs=None) + op = framework.Operator( + block=block, + desc=op_append, + type=op_type, + inputs=None, + outputs=None, + attrs=None, + ) block.ops.append(op) return op -def append_var_from_block_desc_static(block, - src_block_desc, - include=None, - exclude=None): +def append_var_from_block_desc_static( + block, src_block_desc, include=None, exclude=None +): """ Append Variables of 'src_block_desc' to current block. If 'include' is not `None`,variables that are not in include are not append. @@ -1153,13 +1294,14 @@ def append_var_from_block_desc_static(block, for var_desc in src_block_desc.all_vars(): var_desc_name = var_desc.name() should_append = (include is None or var_desc_name in include) and ( - exclude is None or var_desc_name not in exclude) + exclude is None or var_desc_name not in exclude + ) if not block.has_var(var_desc_name) and should_append: var_type = var_desc.type() if var_type in [ - core.VarDesc.VarType.SELECTED_ROWS, - core.VarDesc.VarType.LOD_TENSOR, - core.VarDesc.VarType.LOD_TENSOR_ARRAY + core.VarDesc.VarType.SELECTED_ROWS, + core.VarDesc.VarType.LOD_TENSOR, + core.VarDesc.VarType.LOD_TENSOR_ARRAY, ]: data_type = var_desc.dtype() var_shape = var_desc.shape() @@ -1167,8 +1309,8 @@ def append_var_from_block_desc_static(block, data_type = None var_shape = None if var_type in [ - core.VarDesc.VarType.LOD_TENSOR, - core.VarDesc.VarType.LOD_TENSOR_ARRAY + core.VarDesc.VarType.LOD_TENSOR, + core.VarDesc.VarType.LOD_TENSOR_ARRAY, ]: lod_level = var_desc.lod_level() else: @@ -1187,7 +1329,9 @@ def append_var_from_block_desc_static(block, shape=var_shape, lod_level=lod_level, persistable=var_desc.persistable(), - set_need_check_feed=var_desc.need_check_feed())) + set_need_check_feed=var_desc.need_check_feed(), + ) + ) return vars_append @@ -1312,8 +1456,9 @@ class TranslatedLayer(layers.Layer): # the TranslatedLayer object holded var names count started from 0 with unique_name.guard(): for name, var in persistable_vars.items(): - if isinstance(var, - (framework.ParamBase, framework.EagerParamBase)): + if isinstance( + var, (framework.ParamBase, framework.EagerParamBase) + ): dy_name = _generate_unique_var_name(PARAMETER_NAME_PREFIX) self._persistable_var_name_dict[name] = dy_name self.add_parameter(dy_name, var) @@ -1347,7 +1492,8 @@ class TranslatedLayer(layers.Layer): # 2. load layer parameters & buffers persistable_vars = _construct_params_and_buffers( - model_path, programs, params_filename) + model_path, programs, params_filename + ) # 3. construct TranslatedLayer object translated_layer = TranslatedLayer(programs, persistable_vars) @@ -1359,9 +1505,12 @@ class TranslatedLayer(layers.Layer): ins.name() for ins in program_holder.input_descs ] setattr( - TranslatedLayer, method_name, + TranslatedLayer, + method_name, TranslatedLayer._execution_method_creator( - method_name, program_holder)) + method_name, program_holder + ), + ) # 5. set TranslatedLayer's default mode to eval translated_layer.eval() @@ -1370,7 +1519,6 @@ class TranslatedLayer(layers.Layer): @staticmethod def _execution_method_creator(method_name, program_holder): - def __i_m_p_l__(self, *input): program_holder = self._program_holder_dict[__i_m_p_l__.__name__] # When using jit.save, it runs in static graph mode. @@ -1383,7 +1531,8 @@ class TranslatedLayer(layers.Layer): # because '_run_static_graph' modifies 'ProgramDesc', 'OpDesc.op_size()' will return a very large wrong number. # A Segmentation fault error may occur if used 'p=ProgramDesc(program_holder.infer_program)'. p = framework.Program._construct_from_desc( - core.ProgramDesc(program_holder.infer_program)) + core.ProgramDesc(program_holder.infer_program) + ) return _run_static_graph(input, program_holder, p.desc) __i_m_p_l__.__name__ = method_name @@ -1496,8 +1645,9 @@ class TranslatedLayer(layers.Layer): program_holder = self._program_holder_dict.get(method_name, None) if program_holder is None: raise ValueError( - "The method `%s` does not exist in loaded TranslatedLayer." % - method_name) + "The method `%s` does not exist in loaded TranslatedLayer." + % method_name + ) return program_holder def _input_spec(self, method_name='forward'): @@ -1507,9 +1657,11 @@ class TranslatedLayer(layers.Layer): # 2. build input spec by input desc input_spec = [] for var_desc in program_holder.input_descs: - spec = paddle.static.InputSpec(shape=var_desc.shape(), - dtype=var_desc.dtype(), - name=var_desc.name()) + spec = paddle.static.InputSpec( + shape=var_desc.shape(), + dtype=var_desc.dtype(), + name=var_desc.name(), + ) input_spec.append(spec) return input_spec @@ -1524,9 +1676,11 @@ class TranslatedLayer(layers.Layer): # NOTE(chenweihang): InputSpec describes a tensor, not just input. # Maybe the name is not good enough. Here we use InputSpec to # construct the description of Output tensor - spec = paddle.static.InputSpec(shape=var_desc.shape(), - dtype=var_desc.dtype(), - name=var_desc.name()) + spec = paddle.static.InputSpec( + shape=var_desc.shape(), + dtype=var_desc.dtype(), + name=var_desc.name(), + ) output_spec.append(spec) return output_spec diff --git a/python/paddle/fluid/dygraph/jit.py b/python/paddle/fluid/dygraph/jit.py index c659af4a7832f22a96417ea32202a803dfa77a1a..acca96a937693079fca6b7fe406f6b2dc97c47c9 100644 --- a/python/paddle/fluid/dygraph/jit.py +++ b/python/paddle/fluid/dygraph/jit.py @@ -24,25 +24,65 @@ from typing import Text, Tuple, Any, List import paddle from paddle.fluid import core, dygraph -from paddle.fluid.compiler import BuildStrategy, CompiledProgram, ExecutionStrategy +from paddle.fluid.compiler import ( + BuildStrategy, + CompiledProgram, + ExecutionStrategy, +) from paddle.fluid.data_feeder import check_type from paddle.fluid.layers.utils import flatten, pack_sequence_as -from paddle.fluid.dygraph.base import program_desc_tracing_guard, switch_to_static_graph +from paddle.fluid.dygraph.base import ( + program_desc_tracing_guard, + switch_to_static_graph, +) from paddle.fluid.dygraph.dygraph_to_static import logging_utils -from paddle.fluid.dygraph.dygraph_to_static.convert_call_func import ConversionOptions, CONVERSION_OPTIONS -from paddle.fluid.dygraph.dygraph_to_static.logging_utils import set_code_level, set_verbosity -from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator, StaticFunction, unwrap_decorators -from paddle.fluid.dygraph.io import TranslatedLayer, INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX, INFER_PARAMS_INFO_SUFFIX, INFER_PROPERTY_SUFFIX +from paddle.fluid.dygraph.dygraph_to_static.convert_call_func import ( + ConversionOptions, + CONVERSION_OPTIONS, +) +from paddle.fluid.dygraph.dygraph_to_static.logging_utils import ( + set_code_level, + set_verbosity, +) +from paddle.fluid.dygraph.dygraph_to_static.program_translator import ( + ProgramTranslator, + StaticFunction, + unwrap_decorators, +) +from paddle.fluid.dygraph.io import ( + TranslatedLayer, + INFER_MODEL_SUFFIX, + INFER_PARAMS_SUFFIX, + INFER_PARAMS_INFO_SUFFIX, + INFER_PROPERTY_SUFFIX, +) from paddle.fluid.dygraph.layers import Layer from paddle.fluid.executor import Executor, scope_guard -from paddle.fluid.framework import Block, ParamBase, Program, Variable, Parameter, EagerParamBase -from paddle.fluid.framework import _current_expected_place, _dygraph_guard, _dygraph_tracer +from paddle.fluid.framework import ( + Block, + ParamBase, + Program, + Variable, + Parameter, + EagerParamBase, +) +from paddle.fluid.framework import ( + _current_expected_place, + _dygraph_guard, + _dygraph_tracer, +) from paddle.fluid.framework import dygraph_only, _non_static_mode from paddle.fluid.wrapped_decorator import wrap_decorator __all__ = [ - 'TracedLayer', 'declarative', 'dygraph_to_static_func', 'set_code_level', - 'set_verbosity', 'save', 'load', 'not_to_static' + 'TracedLayer', + 'declarative', + 'dygraph_to_static_func', + 'set_code_level', + 'set_verbosity', + 'save', + 'load', + 'not_to_static', ] @@ -62,8 +102,10 @@ def _extract_vars(inputs, result_list, err_tag='inputs'): _extract_vars(var, result_list, err_tag) else: raise TypeError( - "The type of 'each element of {}' in fluid.dygraph.jit.TracedLayer.trace must be fluid.Variable, but received {}." - .format(err_tag, type(inputs))) + "The type of 'each element of {}' in fluid.dygraph.jit.TracedLayer.trace must be fluid.Variable, but received {}.".format( + err_tag, type(inputs) + ) + ) def extract_vars(inputs, err_tag='inputs'): @@ -127,7 +169,8 @@ def _dygraph_to_static_func_(dygraph_func): logging_utils.warn( "The decorator 'dygraph_to_static_func' doesn't work in " "dygraph mode or set ProgramTranslator.enable to False. " - "We will just return dygraph output.") + "We will just return dygraph output." + ) return dygraph_func(*args, **kwargs) static_func = program_translator.get_func(dygraph_func) return static_func(*args, **kwargs) @@ -158,10 +201,9 @@ def copy_decorator_attrs(original_func, decorated_obj): return decorated_obj -def declarative(function=None, - input_spec=None, - build_strategy=None, - property=False): +def declarative( + function=None, input_spec=None, build_strategy=None, property=False +): """ Converts imperative dygraph APIs into declarative function APIs. Decorator @declarative handles the Program and Executor of static mode and returns @@ -213,20 +255,25 @@ def declarative(function=None, _, python_func = unwrap_decorators(python_func) # Step 2. copy some attributes from original python function. - static_layer = copy_decorator_attrs(original_func=python_func, - decorated_obj=StaticFunction( - function=python_func, - input_spec=input_spec, - build_strategy=build_strategy, - property=property)) + static_layer = copy_decorator_attrs( + original_func=python_func, + decorated_obj=StaticFunction( + function=python_func, + input_spec=input_spec, + build_strategy=build_strategy, + property=property, + ), + ) return static_layer build_strategy = build_strategy or BuildStrategy() if not isinstance(build_strategy, BuildStrategy): raise TypeError( - "Required type(build_strategy) shall be `paddle.static.BuildStrategy`, but received {}" - .format(type(build_strategy).__name__)) + "Required type(build_strategy) shall be `paddle.static.BuildStrategy`, but received {}".format( + type(build_strategy).__name__ + ) + ) # for usage: `declarative(foo, ...)` if function is not None: @@ -234,8 +281,10 @@ def declarative(function=None, if isinstance(function.forward, StaticFunction): class_name = function.__class__.__name__ logging_utils.warn( - "`{}.forward` has already been decorated somewhere. It will be redecorated to replace previous one." - .format(class_name)) + "`{}.forward` has already been decorated somewhere. It will be redecorated to replace previous one.".format( + class_name + ) + ) function.forward = decorated(function.forward) return function else: @@ -286,7 +335,6 @@ def not_to_static(func=None): class _SaveLoadConfig(object): - def __init__(self): self._output_spec = None self._model_filename = None @@ -321,12 +369,14 @@ class _SaveLoadConfig(object): if not isinstance(spec, list): raise TypeError( "The config `output_spec` should be 'list', but received input type is %s." - % type(input)) + % type(input) + ) for var in spec: if not isinstance(var, core.VarBase): raise TypeError( "The element in config `output_spec` list should be 'Variable', but received element's type is %s." - % type(var)) + % type(var) + ) self._output_spec = spec @property @@ -340,7 +390,8 @@ class _SaveLoadConfig(object): if not isinstance(filename, str): raise TypeError( "The config `model_filename` should be str, but received input's type is %s." - % type(filename)) + % type(filename) + ) if len(filename) == 0: raise ValueError("The config `model_filename` is empty string.") self._model_filename = filename @@ -356,7 +407,8 @@ class _SaveLoadConfig(object): if not isinstance(filename, str): raise TypeError( "The config `params_filename` should be str, but received input's type is %s." - % type(filename)) + % type(filename) + ) if len(filename) == 0: raise ValueError("The config `params_filename` is empty string.") self._params_filename = filename @@ -372,14 +424,18 @@ class _SaveLoadConfig(object): if not isinstance(value, bool): raise TypeError( "The config `keep_name_table` should be bool value, but received input's type is %s." - % type(value)) + % type(value) + ) self._keep_name_table = value def _parse_save_configs(configs): supported_configs = [ - 'output_spec', "with_hook", "combine_params", "clip_extra", - "skip_forward" + 'output_spec', + "with_hook", + "combine_params", + "clip_extra", + "skip_forward", ] # input check @@ -387,7 +443,8 @@ def _parse_save_configs(configs): if key not in supported_configs: raise ValueError( "The additional config (%s) of `paddle.jit.save` is not supported." - % (key)) + % (key) + ) # construct inner config inner_config = _SaveLoadConfig() @@ -408,7 +465,8 @@ def _parse_load_config(configs): if key not in supported_configs: raise ValueError( "The additional config (%s) of `paddle.jit.load` is not supported." - % (key)) + % (key) + ) # construct inner config inner_config = _SaveLoadConfig() @@ -419,14 +477,18 @@ def _parse_load_config(configs): def _get_input_var_names(inputs, input_spec): - name_none_error = "The %s's name is None. " \ - "When using jit.save, please set InputSepc's name in " \ - "to_static(input_spec=[]) and jit.save(input_spec=[]) " \ + name_none_error = ( + "The %s's name is None. " + "When using jit.save, please set InputSepc's name in " + "to_static(input_spec=[]) and jit.save(input_spec=[]) " "and make sure they are consistent." - name_no_exists_error = "The tensor `%s` does not exists. " \ - "Please make sure the name of InputSpec or example Tensor " \ - "in input_spec is the same as the name of InputSpec in " \ + ) + name_no_exists_error = ( + "The tensor `%s` does not exists. " + "Please make sure the name of InputSpec or example Tensor " + "in input_spec is the same as the name of InputSpec in " "`to_static` decorated on the Layer.forward method." + ) result_list = [] input_var_names = [ var.name for var in flatten(inputs) if isinstance(var, Variable) @@ -437,7 +499,8 @@ def _get_input_var_names(inputs, input_spec): else: # fileter out non-tensor type spec infos. input_spec = [ - spec for spec in input_spec + spec + for spec in input_spec if isinstance(spec, paddle.static.InputSpec) ] @@ -469,10 +532,12 @@ def _get_input_var_names(inputs, input_spec): def _get_output_vars(outputs, output_spec, with_hook=False): - name_no_exists_error = "The tensor `%s` does not exists. " \ - "Please make sure the name of example Tensor " \ - "in configs.output_spec is the output tensor of " \ + name_no_exists_error = ( + "The tensor `%s` does not exists. " + "Please make sure the name of example Tensor " + "in configs.output_spec is the output tensor of " "Layer.forward method." + ) if output_spec and with_hook: raise RuntimeError( "Currently not support specify output_spec while founding pre/post hooks in your outermost layer." @@ -519,12 +584,14 @@ def _build_load_path_and_config(path, config): raise ValueError( "The %s.pdmodel and %s directory exist at the same time, " "don't know which one to load, please make sure that the specified target " - "of ``path`` is unique." % (path, path)) + "of ``path`` is unique." % (path, path) + ) elif not prefix_format_exist and not directory_format_exist: - raise ValueError("The ``path`` (%s) to load model not exists. " - "Please make sure that *.pdmodel exists or " - "don't using ``skip_forward=True`` to jit.save." % - path) + raise ValueError( + "The ``path`` (%s) to load model not exists. " + "Please make sure that *.pdmodel exists or " + "don't using ``skip_forward=True`` to jit.save." % path + ) else: if prefix_format_exist: file_prefix = os.path.basename(path) @@ -533,13 +600,15 @@ def _build_load_path_and_config(path, config): warnings.warn( "When loading the result saved with the " "specified file prefix, the ``model_filename`` config does " - "not take effect.") + "not take effect." + ) config.model_filename = file_prefix + INFER_MODEL_SUFFIX if config.params_filename is not None: warnings.warn( "When loading the result saved with the " "specified file prefix, the ``params_filename`` config does " - "not take effect.") + "not take effect." + ) config.params_filename = file_prefix + INFER_PARAMS_SUFFIX else: # Compatible with the old save_inference_model format @@ -553,7 +622,7 @@ _save_pre_hooks = [] class HookRemoveHelper(object): - """ A HookRemoveHelper that can be used to remove hook. """ + """A HookRemoveHelper that can be used to remove hook.""" def __init__(self, hook): self._hook = hook @@ -638,7 +707,6 @@ def _remove_save_pre_hook(hook): @wrap_decorator def _run_save_pre_hooks(func): - def wrapper(layer, path, input_spec=None, **configs): global _save_pre_hooks for hook in _save_pre_hooks: @@ -826,11 +894,15 @@ def save(layer, path, input_spec=None, **configs): "The paddle.jit.save doesn't work when setting ProgramTranslator.enable to False." ) - if not (isinstance(layer, Layer) or inspect.isfunction(layer) - or isinstance(layer, StaticFunction)): + if not ( + isinstance(layer, Layer) + or inspect.isfunction(layer) + or isinstance(layer, StaticFunction) + ): raise TypeError( "The input of paddle.jit.save should be 'Layer' or 'Function', but received input type is %s." - % type(layer)) + % type(layer) + ) elif inspect.isfunction(layer) or isinstance(layer, StaticFunction): warnings.warn( 'What you save is a function, and `jit.save` will generate the name of the model file according to `path` you specify. When loading these files with `jit.load`, you get a `TranslatedLayer` whose inference result is the same as the inference result of the function you saved.' @@ -852,7 +924,8 @@ def save(layer, path, input_spec=None, **configs): raise ValueError( "The input path MUST be format of dirname/file_prefix " "[dirname\\file_prefix in Windows system], but received " - "file_prefix is empty string.") + "file_prefix is empty string." + ) dirname = os.path.dirname(path) if dirname and not os.path.exists(dirname): @@ -864,23 +937,28 @@ def save(layer, path, input_spec=None, **configs): if isinstance(layer, Layer): for attr_func in dir(inner_layer): static_func = getattr(inner_layer, attr_func, None) - if isinstance(static_func, - StaticFunction) and 'forward' != attr_func: + if ( + isinstance(static_func, StaticFunction) + and 'forward' != attr_func + ): raise ValueError( "If there are static functions other than 'forward' that need to be saved, the input 'input_spec' should be None, but received the type of 'input_spec' is %s." - % type(input_spec)) + % type(input_spec) + ) if not isinstance(input_spec, (list, tuple)): raise TypeError( "The input input_spec should be 'list', but received input_spec's type is %s." - % type(input_spec)) + % type(input_spec) + ) inner_input_spec = [] for var in flatten(input_spec): if isinstance(var, paddle.static.InputSpec): inner_input_spec.append(var) elif isinstance(var, (core.VarBase, core.eager.Tensor, Variable)): inner_input_spec.append( - paddle.static.InputSpec.from_tensor(var)) + paddle.static.InputSpec.from_tensor(var) + ) else: # NOTE(Aurelius84): Support non-Tensor type in `input_spec`. inner_input_spec.append(var) @@ -917,12 +995,18 @@ def save(layer, path, input_spec=None, **configs): # property method to be exported immediate_val = static_func() property_vals.append( - (immediate_val, - layer.__class__.__name__ + '.' + attr_func)) + ( + immediate_val, + layer.__class__.__name__ + '.' + attr_func, + ) + ) continue - concrete_program = static_func.concrete_program_specify_input_spec( - inner_input_spec, with_hook=with_hook) + concrete_program = ( + static_func.concrete_program_specify_input_spec( + inner_input_spec, with_hook=with_hook + ) + ) elif 'forward' == attr_func: if configs.skip_forward: # do not jit.save forward function @@ -932,12 +1016,17 @@ def save(layer, path, input_spec=None, **configs): # inner_input_spec is list[InputSpec], it should be packed with same structure # as original input_spec here. if inner_input_spec: - inner_input_spec = pack_sequence_as(input_spec, - inner_input_spec) - static_forward = declarative(inner_layer.forward, - input_spec=inner_input_spec) - concrete_program = static_forward.concrete_program_specify_input_spec( - with_hook=with_hook) + inner_input_spec = pack_sequence_as( + input_spec, inner_input_spec + ) + static_forward = declarative( + inner_layer.forward, input_spec=inner_input_spec + ) + concrete_program = ( + static_forward.concrete_program_specify_input_spec( + with_hook=with_hook + ) + ) # the input_spec has been used in declarative, which is equal to # @declarative with input_spec and jit.save without input_spec, # avoid needless warning @@ -953,20 +1042,27 @@ def save(layer, path, input_spec=None, **configs): property_vals.append((immediate_val, attr_func)) continue - concrete_program = attr_func.concrete_program_specify_input_spec( - inner_input_spec) + concrete_program = ( + attr_func.concrete_program_specify_input_spec( + inner_input_spec + ) + ) else: if inner_input_spec: - inner_input_spec = pack_sequence_as(input_spec, - inner_input_spec) - static_function = declarative(attr_func, - input_spec=inner_input_spec) + inner_input_spec = pack_sequence_as( + input_spec, inner_input_spec + ) + static_function = declarative( + attr_func, input_spec=inner_input_spec + ) concrete_program = static_function.concrete_program if static_function._class_instance is None: warnings.warn( - '`jit.save` will only save the `Program`, not the parameters. If you have to save the parameters, please make sure that {} is a member function of `paddle.nn.Layer` and the saved parameters are in `state_dict`' - .format(layer)) + '`jit.save` will only save the `Program`, not the parameters. If you have to save the parameters, please make sure that {} is a member function of `paddle.nn.Layer` and the saved parameters are in `state_dict`'.format( + layer + ) + ) # when save multi `StaticFunction`, all `StaticFunction` share params. dygraph_state_dict = None @@ -974,7 +1070,8 @@ def save(layer, path, input_spec=None, **configs): dygraph_state_dict = inner_layer.to_static_state_dict() elif isinstance(attr_func, StaticFunction): if attr_func._class_instance: - dygraph_state_dict = attr_func._class_instance.to_static_state_dict( + dygraph_state_dict = ( + attr_func._class_instance.to_static_state_dict() ) if dygraph_state_dict: @@ -998,19 +1095,25 @@ def save(layer, path, input_spec=None, **configs): tgt_var.set_vocab(scr_tensor) else: param_or_buffer_tensor = scope.var( - param_or_buffer.name).get_tensor() - #src_tensor = param_or_buffer.value().get_tensor() - src_tensor = state_var_dict[ - param_or_buffer.name].value().get_tensor() + param_or_buffer.name + ).get_tensor() + # src_tensor = param_or_buffer.value().get_tensor() + src_tensor = ( + state_var_dict[param_or_buffer.name] + .value() + .get_tensor() + ) param_or_buffer_tensor._share_data_with(src_tensor) # record var info if param_or_buffer.name not in extra_var_info: extra_info_dict = dict() if param_or_buffer.name in state_names_dict: extra_info_dict['structured_name'] = state_names_dict[ - param_or_buffer.name] + param_or_buffer.name + ] extra_info_dict[ - 'stop_gradient'] = param_or_buffer.stop_gradient + 'stop_gradient' + ] = param_or_buffer.stop_gradient if isinstance(param_or_buffer, (ParamBase, EagerParamBase)): extra_info_dict['trainable'] = param_or_buffer.trainable extra_var_info[param_or_buffer.name] = extra_info_dict @@ -1024,8 +1127,9 @@ def save(layer, path, input_spec=None, **configs): # - prune inputs: # - the input_spec length < len((concrete_program.inputs) - 1 # - the input_spec's name should be in concrete_program.inputs - input_var_names = _get_input_var_names(concrete_program.inputs, - inner_input_spec) + input_var_names = _get_input_var_names( + concrete_program.inputs, inner_input_spec + ) # NOTE(chenweihang): [ Get output variables ] # the rule is like [ Get input variables name ]. For output var, @@ -1033,8 +1137,9 @@ def save(layer, path, input_spec=None, **configs): # var name of output, and we don't recommended to use output_spec # print(concrete_program.main_program) # print(concrete_program.outputs, configs.output_spec) - output_vars = _get_output_vars(concrete_program.outputs, - configs.output_spec, with_hook) + output_vars = _get_output_vars( + concrete_program.outputs, configs.output_spec, with_hook + ) # 5. save inference model from paddle.fluid.io import save_inference_model @@ -1048,7 +1153,9 @@ def save(layer, path, input_spec=None, **configs): params_filename = file_prefix + INFER_PARAMS_SUFFIX else: model_filename = file_prefix + '.' + attr_func + INFER_MODEL_SUFFIX - params_filename = file_prefix + '.' + attr_func + INFER_PARAMS_SUFFIX + params_filename = ( + file_prefix + '.' + attr_func + INFER_PARAMS_SUFFIX + ) with scope_guard(scope): save_inference_model( @@ -1061,12 +1168,14 @@ def save(layer, path, input_spec=None, **configs): params_filename=params_filename, export_for_deployment=configs._export_for_deployment, program_only=configs._program_only, - clip_extra=configs.clip_extra) + clip_extra=configs.clip_extra, + ) if combine_params: clone_main_program = concrete_program.main_program.clone() clone_main_program = clone_main_program._prune_with_input( - input_var_names, output_vars) + input_var_names, output_vars + ) for block in clone_main_program.blocks: combine_vars.update(block.vars) @@ -1080,15 +1189,16 @@ def save(layer, path, input_spec=None, **configs): params_filename = file_prefix + INFER_PARAMS_SUFFIX with scope_guard(scope): - paddle.static.save_vars(Executor(_current_expected_place()), - dirname=model_path, - vars=list( - filter(paddle.fluid.io.is_persistable, - ordered_vars)), - filename=params_filename) + paddle.static.save_vars( + Executor(_current_expected_place()), + dirname=model_path, + vars=list(filter(paddle.fluid.io.is_persistable, ordered_vars)), + filename=params_filename, + ) # save property - property_save_path = os.path.join(os.path.normpath(model_path), - file_prefix + INFER_PROPERTY_SUFFIX) + property_save_path = os.path.join( + os.path.normpath(model_path), file_prefix + INFER_PROPERTY_SUFFIX + ) _save_property(property_save_path, property_vals) # NOTE(chenweihang): [ Save extra variable info ] @@ -1350,11 +1460,9 @@ def load(path, **configs): @dygraph_only -def _trace(layer, - inputs, - feed_prefix='feed_', - fetch_prefix='fetch_', - tmp_prefix='t_'): +def _trace( + layer, inputs, feed_prefix='feed_', fetch_prefix='fetch_', tmp_prefix='t_' +): assert isinstance(layer, Layer) if not isinstance(inputs, (list, tuple)): @@ -1372,8 +1480,14 @@ def _trace(layer, outputs = original_outputs out_vars = extract_vars(outputs, err_tag='outputs') - program_desc, feed_names, fetch_names, parameters = tracer.create_program_desc( - var_list, feed_prefix, out_vars, fetch_prefix, tmp_prefix) + ( + program_desc, + feed_names, + fetch_names, + parameters, + ) = tracer.create_program_desc( + var_list, feed_prefix, out_vars, fetch_prefix, tmp_prefix + ) tracer.reset() with _dygraph_guard(None): @@ -1483,7 +1597,8 @@ class TracedLayer(object): assert isinstance( layer, Layer ), "The type of 'layer' in fluid.dygraph.jit.TracedLayer.trace must be fluid.dygraph.Layer, but received {}.".format( - type(layer)) + type(layer) + ) outs, prog, feed, fetch, parameters = _trace(layer, inputs) traced = TracedLayer(prog, parameters, feed, fetch) return outs, traced @@ -1533,25 +1648,30 @@ class TracedLayer(object): assert isinstance( build_strategy, (type(None), BuildStrategy) ), "The type of 'build_strategy' in fluid.dygraph.jit.TracedLayer.set_strategy must be fluid.BuildStrategy, but received {}.".format( - type(build_strategy)) + type(build_strategy) + ) assert isinstance( exec_strategy, (type(None), ExecutionStrategy) ), "The type of 'exec_strategy' in fluid.dygraph.jit.TracedLayer.set_strategy must be fluid.ExecutionStrategy, but received {}.".format( - type(exec_strategy)) + type(exec_strategy) + ) self._build_strategy = build_strategy self._exec_strategy = exec_strategy @switch_to_static_graph def _compile(self): self._compiled_program = CompiledProgram( - self._program).with_data_parallel( - build_strategy=self._build_strategy, - exec_strategy=self._exec_strategy, - places=self._place) + self._program + ).with_data_parallel( + build_strategy=self._build_strategy, + exec_strategy=self._exec_strategy, + places=self._place, + ) def _build_feed(self, inputs): - assert isinstance(inputs, (list, tuple)), \ - "Inputs should be a list or tuple of variables" + assert isinstance( + inputs, (list, tuple) + ), "Inputs should be a list or tuple of variables" assert len(inputs) == len(self._feed_names) feed_dict = {} if _non_static_mode(): @@ -1565,9 +1685,9 @@ class TracedLayer(object): @switch_to_static_graph def _run(self, feed): - return self._exe.run(self._compiled_program, - feed=feed, - fetch_list=self._fetch_names) + return self._exe.run( + self._compiled_program, feed=feed, fetch_list=self._fetch_names + ) def __call__(self, inputs): with scope_guard(self._scope): @@ -1631,22 +1751,40 @@ class TracedLayer(object): fetch, = exe.run(program, feed={feed_vars[0]: in_np}, fetch_list=fetch_vars) print(fetch.shape) # (2, 10) """ - check_type(path, "path", str, - "fluid.dygraph.jit.TracedLayer.save_inference_model") - check_type(feed, "feed", (type(None), list), - "fluid.dygraph.jit.TracedLayer.save_inference_model") + check_type( + path, + "path", + str, + "fluid.dygraph.jit.TracedLayer.save_inference_model", + ) + check_type( + feed, + "feed", + (type(None), list), + "fluid.dygraph.jit.TracedLayer.save_inference_model", + ) if isinstance(feed, list): for f in feed: check_type( - f, "each element of feed", int, - "fluid.dygraph.jit.TracedLayer.save_inference_model") - check_type(fetch, "fetch", (type(None), list), - "fluid.dygraph.jit.TracedLayer.save_inference_model") + f, + "each element of feed", + int, + "fluid.dygraph.jit.TracedLayer.save_inference_model", + ) + check_type( + fetch, + "fetch", + (type(None), list), + "fluid.dygraph.jit.TracedLayer.save_inference_model", + ) if isinstance(fetch, list): for f in fetch: check_type( - f, "each element of fetch", int, - "fluid.dygraph.jit.TracedLayer.save_inference_model") + f, + "each element of fetch", + int, + "fluid.dygraph.jit.TracedLayer.save_inference_model", + ) clip_extra = kwargs.get('clip_extra', True) # path check file_prefix = os.path.basename(path) @@ -1654,7 +1792,8 @@ class TracedLayer(object): raise ValueError( "The input path MUST be format of dirname/file_prefix " "[dirname\\file_prefix in Windows system], but received " - "file_prefix is empty string.") + "file_prefix is empty string." + ) dirname = os.path.dirname(path) if dirname and not os.path.exists(dirname): @@ -1680,11 +1819,13 @@ class TracedLayer(object): model_filename = file_prefix + INFER_MODEL_SUFFIX params_filename = file_prefix + INFER_PARAMS_SUFFIX - save_inference_model(dirname=dirname, - feeded_var_names=feeded_var_names, - target_vars=target_vars, - executor=self._exe, - main_program=self._program.clone(), - model_filename=model_filename, - params_filename=params_filename, - clip_extra=clip_extra) + save_inference_model( + dirname=dirname, + feeded_var_names=feeded_var_names, + target_vars=target_vars, + executor=self._exe, + main_program=self._program.clone(), + model_filename=model_filename, + params_filename=params_filename, + clip_extra=clip_extra, + ) diff --git a/python/paddle/fluid/dygraph/layer_hooks.py b/python/paddle/fluid/dygraph/layer_hooks.py index a231e7cf8745a5eba707bb9c79018a8498365ff8..8a373cd17c86d597350c371db6d6df4fa7803a8e 100644 --- a/python/paddle/fluid/dygraph/layer_hooks.py +++ b/python/paddle/fluid/dygraph/layer_hooks.py @@ -37,13 +37,16 @@ def record_program_ops_pre_hook(layer, inputs): if not _non_static_mode(): if layer._op_recorder.start < 0: layer._op_recorder.start = len( - default_main_program().current_block().ops) + default_main_program().current_block().ops + ) layer._op_recorder.is_valid = True else: layer._op_recorder.is_valid = False warnings.warn( - "{} has recorded the op information before. Please check whether you call this layer twice." - .format(layer._full_name)) + "{} has recorded the op information before. Please check whether you call this layer twice.".format( + layer._full_name + ) + ) return None @@ -56,7 +59,7 @@ def set_op_customized_attrs_post_hook(layer, inputs, outputs): start = layer._op_recorder.start end = len(default_main_program().current_block().ops) - assert (start >= 0 and end >= start) + assert start >= 0 and end >= start ops = default_main_program().current_block().ops[start:end] layer._op_recorder.end = end diff --git a/python/paddle/fluid/dygraph/layer_object_helper.py b/python/paddle/fluid/dygraph/layer_object_helper.py index bdd34abd5a86537e344d4c17e643a9ee71a2a9cf..b07798b72e64273121a17eac12aefee22f816a7d 100644 --- a/python/paddle/fluid/dygraph/layer_object_helper.py +++ b/python/paddle/fluid/dygraph/layer_object_helper.py @@ -22,16 +22,17 @@ from ..dygraph_utils import _append_activation_in_dygraph class LayerObjectHelper(LayerHelperBase): - def __init__(self, name): super(LayerObjectHelper, self).__init__(name, layer_type=name) - def append_op(self, - type=None, - inputs=None, - outputs=None, - attrs=None, - stop_gradient=None): + def append_op( + self, + type=None, + inputs=None, + outputs=None, + attrs=None, + stop_gradient=None, + ): """append an operator for this layer object. Args: @@ -48,7 +49,8 @@ class LayerObjectHelper(LayerHelperBase): inputs=inputs, outputs=outputs, attrs=attrs, - stop_gradient=stop_gradient) + stop_gradient=stop_gradient, + ) def _multiple_input(self, inputs_in): inputs = inputs_in @@ -73,8 +75,9 @@ class LayerObjectHelper(LayerHelperBase): param_attr = [param_attr] if len(param_attr) != 1 and len(param_attr) != length: - raise ValueError("parameter number mismatch in {}".format( - self.name)) + raise ValueError( + "parameter number mismatch in {}".format(self.name) + ) elif len(param_attr) == 1 and length != 1: tmp = [None] * length for i in range(length): @@ -93,8 +96,9 @@ class LayerObjectHelper(LayerHelperBase): """ param_attr_in = ParamAttr._to_attr(param_attr_in) if isinstance(param_attr_in, bool): - raise ValueError('Param_attr should not be False in {}'.format( - self.name)) + raise ValueError( + 'Param_attr should not be False in {}'.format(self.name) + ) inputs = inputs_in if (inputs_in is not None) else [] inputs = self._multiple_input(inputs) param_attrs = self._multiple_param_attr(len(inputs), param_attr_in) @@ -116,8 +120,10 @@ class LayerObjectHelper(LayerHelperBase): if dtype is None: dtype = each.dtype elif dtype != each.dtype: - raise ValueError("Data Type mismatch: %d to %d in %s" % - (dtype, each.dtype, self.name)) + raise ValueError( + "Data Type mismatch: %d to %d in %s" + % (dtype, each.dtype, self.name) + ) return dtype def get_parameter(self, name): @@ -130,8 +136,9 @@ class LayerObjectHelper(LayerHelperBase): """ param = self.main_program.global_block().var(name) if not isinstance(param, Parameter): - raise ValueError("no Parameter name %s found in %s" % - (name, self.name)) + raise ValueError( + "no Parameter name %s found in %s" % (name, self.name) + ) return param # TODO: this should not be called anymore after all activation func move to Layers @@ -153,7 +160,8 @@ class LayerObjectHelper(LayerHelperBase): act = {'type': act} else: raise TypeError( - str(act) + " should be unicode or str in %s ", self.name) + str(act) + " should be unicode or str in %s ", self.name + ) if (use_cudnn is not None) and use_cudnn: act['use_cudnn'] = use_cudnn @@ -162,15 +170,18 @@ class LayerObjectHelper(LayerHelperBase): act['use_mkldnn'] = use_mkldnn act_type = act.pop('type') if _non_static_mode(): - res = _append_activation_in_dygraph(input_var, act_type, use_cudnn, - use_mkldnn) + res = _append_activation_in_dygraph( + input_var, act_type, use_cudnn, use_mkldnn + ) return res else: tmp = self.create_variable_for_type_inference(dtype=input_var.dtype) - self.append_op(type=act_type, - inputs={"X": [input_var]}, - outputs={"Out": [tmp]}, - attrs=act) + self.append_op( + type=act_type, + inputs={"X": [input_var]}, + outputs={"Out": [tmp]}, + attrs=act, + ) return tmp def is_instance(self, param, cls): @@ -186,4 +197,8 @@ class LayerObjectHelper(LayerHelperBase): if not isinstance(param, cls): raise TypeError( "The input {0} parameter of method {1} must be {2}, in layer {3}", - param, self.layer_type, cls.__name__, self.name) + param, + self.layer_type, + cls.__name__, + self.name, + ) diff --git a/python/paddle/fluid/dygraph/layers.py b/python/paddle/fluid/dygraph/layers.py index 29f2e186bd2e794bf453b6c6a4da222e7ed0357d..889a910d8b9ac7420b93e2c2e3203d9f37011962 100644 --- a/python/paddle/fluid/dygraph/layers.py +++ b/python/paddle/fluid/dygraph/layers.py @@ -31,12 +31,25 @@ from . import parallel_helper from .. import unique_name from paddle.fluid import core from .layer_object_helper import LayerObjectHelper -from .layer_hooks import record_program_ops_pre_hook, set_op_customized_attrs_post_hook, LayerOpsRecoder -from .base import program_desc_tracing_guard, param_guard, in_declarative_mode, _convert_into_variable +from .layer_hooks import ( + record_program_ops_pre_hook, + set_op_customized_attrs_post_hook, + LayerOpsRecoder, +) +from .base import ( + program_desc_tracing_guard, + param_guard, + in_declarative_mode, + _convert_into_variable, +) from paddle.fluid import framework from ..param_attr import ParamAttr from paddle.fluid.executor import Executor, global_scope -from paddle.fluid.framework import _non_static_mode, convert_np_dtype_to_dtype_, in_dygraph_mode +from paddle.fluid.framework import ( + _non_static_mode, + convert_np_dtype_to_dtype_, + in_dygraph_mode, +) from paddle.fluid.framework import Program, program_guard from paddle.fluid.framework import _current_expected_place as _get_device from paddle.fluid.core import VarDesc @@ -66,7 +79,7 @@ def _addindent(string, indent): class HookRemoveHelper(object): - """ A HookRemoveHelper that can be used to remove hook. """ + """A HookRemoveHelper that can be used to remove hook.""" next_hook_id = 0 @@ -397,12 +410,14 @@ class Layer(object): self._forward_pre_hooks[hook_remove_helper._hook_id] = hook return hook_remove_helper - def create_parameter(self, - shape, - attr=None, - dtype=None, - is_bias=False, - default_initializer=None): + def create_parameter( + self, + shape, + attr=None, + dtype=None, + is_bias=False, + default_initializer=None, + ): """Create parameters for this layer. Parameters: @@ -442,12 +457,15 @@ class Layer(object): temp_attr = copy.deepcopy(attr) if isinstance(temp_attr, str) and temp_attr == "": temp_attr = None - return self._helper.create_parameter(temp_attr, shape, dtype, is_bias, - default_initializer) - - @deprecated(since="2.0.0", - update_to="paddle.nn.Layer.create_tensor", - reason="New api in create_tensor, easier to use.") + return self._helper.create_parameter( + temp_attr, shape, dtype, is_bias, default_initializer + ) + + @deprecated( + since="2.0.0", + update_to="paddle.nn.Layer.create_tensor", + reason="New api in create_tensor, easier to use.", + ) def create_variable(self, name=None, persistable=None, dtype=None): """ @@ -487,14 +505,16 @@ class Layer(object): if name is not None: var_name = ".".join([self._full_name, name]) else: - var_name = unique_name.generate(".".join( - [self._full_name, "_generated_var"])) + var_name = unique_name.generate( + ".".join([self._full_name, "_generated_var"]) + ) return self._helper.main_program.current_block().create_var( name=var_name, persistable=persistable, dtype=dtype, - type=core.VarDesc.VarType.LOD_TENSOR) + type=core.VarDesc.VarType.LOD_TENSOR, + ) # TODO: Add more parameter list when we need them def create_tensor(self, name=None, persistable=None, dtype=None): @@ -537,14 +557,16 @@ class Layer(object): if name is not None: var_name = ".".join([self._full_name, name]) else: - var_name = unique_name.generate(".".join( - [self._full_name, "_generated_var"])) + var_name = unique_name.generate( + ".".join([self._full_name, "_generated_var"]) + ) return self._helper.main_program.current_block().create_var( name=var_name, persistable=persistable, dtype=dtype, - type=core.VarDesc.VarType.LOD_TENSOR) + type=core.VarDesc.VarType.LOD_TENSOR, + ) def parameters(self, include_sublayers=True): """Returns a list of all Parameters from current layer and its sub-layers. @@ -562,8 +584,10 @@ class Layer(object): """ ret = [ - param for _, param in self.named_parameters( - include_sublayers=include_sublayers) + param + for _, param in self.named_parameters( + include_sublayers=include_sublayers + ) ] return ret @@ -677,9 +701,11 @@ class Layer(object): """ params_set = set() - named_sublayers = self.named_sublayers( - prefix=prefix, include_self=True) if include_sublayers else zip( - [prefix], [self]) + named_sublayers = ( + self.named_sublayers(prefix=prefix, include_self=True) + if include_sublayers + else zip([prefix], [self]) + ) for layer_prefix, sublayer in named_sublayers: params = sublayer._parameters.items() for key, param in params: @@ -723,9 +749,9 @@ class Layer(object): if layer is None: continue layer_prefix = prefix + ('.' if prefix else '') + key - for p, l in layer.named_sublayers(prefix=layer_prefix, - include_self=True, - layers_set=layers_set): + for p, l in layer.named_sublayers( + prefix=layer_prefix, include_self=True, layers_set=layers_set + ): yield p, l def register_buffer(self, name, tensor, persistable=True): @@ -768,25 +794,32 @@ class Layer(object): if '_buffers' not in self.__dict__: raise ValueError( - "super(YourLayer, self).__init__() should be called first") + "super(YourLayer, self).__init__() should be called first" + ) elif not isinstance(name, str): raise TypeError( - "The name of buffer should be a string, but received {}.". - format(type(name).__name__)) + "The name of buffer should be a string, but received {}.".format( + type(name).__name__ + ) + ) elif '.' in name: raise KeyError( "The name of buffer can not contain `.`, " "because when you access the newly added buffer in the " - "form of `self.**.**`, it will cause AttributeError.") + "form of `self.**.**`, it will cause AttributeError." + ) elif name == '': raise KeyError("The name of buffer can not be empty.") elif hasattr(self, name) and name not in self._buffers: raise KeyError("attribute '{}' already exists.".format(name)) - elif tensor is not None and not (type(tensor) == core.VarBase - or type(tensor) == core.eager.Tensor): + elif tensor is not None and not ( + type(tensor) == core.VarBase or type(tensor) == core.eager.Tensor + ): raise TypeError( - "The registered buffer should be a Paddle.Tensor, but received {}." - .format(type(tensor).__name__)) + "The registered buffer should be a Paddle.Tensor, but received {}.".format( + type(tensor).__name__ + ) + ) else: self._buffers[name] = tensor if persistable: @@ -819,8 +852,10 @@ class Layer(object): """ ret = [ - buffer for _, buffer in self.named_buffers( - include_sublayers=include_sublayers) + buffer + for _, buffer in self.named_buffers( + include_sublayers=include_sublayers + ) ] return ret @@ -861,9 +896,11 @@ class Layer(object): """ buffers_set = set() - named_sublayers = self.named_sublayers( - prefix=prefix, include_self=True) if include_sublayers else zip( - [prefix], [self]) + named_sublayers = ( + self.named_sublayers(prefix=prefix, include_self=True) + if include_sublayers + else zip([prefix], [self]) + ) for layer_prefix, sublayer in named_sublayers: buffers = sublayer._buffers.items() for key, buffer in buffers: @@ -909,7 +946,7 @@ class Layer(object): hook_result = forward_pre_hook(self, inputs) if hook_result is not None: if not isinstance(hook_result, tuple): - hook_result = (hook_result, ) + hook_result = (hook_result,) inputs = hook_result if not self._built: @@ -919,16 +956,20 @@ class Layer(object): # TODO(liuyuhui) Only xpu broadcast parameters here. # The other device is to call _sync_params_buffers in DataParallel # to realize the parameter synchronization among multiply cards. - if parallel_helper._is_data_parallel_mode( - ) and paddle.is_compiled_with_xpu(): + if ( + parallel_helper._is_data_parallel_mode() + and paddle.is_compiled_with_xpu() + ): parallel_helper._broadcast_parameters( - self._parameters.values()) + self._parameters.values() + ) self._built = True if in_profiler_mode(): - with profiler.RecordEvent(self.__class__.__name__, - profiler.TracerEventType.Forward): + with profiler.RecordEvent( + self.__class__.__name__, profiler.TracerEventType.Forward + ): outputs = self.forward(*inputs, **kwargs) else: outputs = self.forward(*inputs, **kwargs) @@ -941,8 +982,14 @@ class Layer(object): return outputs def __call__(self, *inputs, **kwargs): - if (not in_declarative_mode()) and (not self._forward_pre_hooks) \ - and (not self._forward_post_hooks) and (not self._built) and in_dygraph_mode() and (not in_profiler_mode()): + if ( + (not in_declarative_mode()) + and (not self._forward_pre_hooks) + and (not self._forward_post_hooks) + and (not self._built) + and in_dygraph_mode() + and (not in_profiler_mode()) + ): self._build_once(*inputs, **kwargs) return self.forward(*inputs, **kwargs) else: @@ -999,7 +1046,7 @@ class Layer(object): for prefix, layer in model.named_sublayers(): print(prefix, layer) """ - assert (isinstance(sublayer, Layer) or sublayer == None) + assert isinstance(sublayer, Layer) or sublayer == None self._sub_layers[name] = sublayer return sublayer @@ -1036,32 +1083,42 @@ class Layer(object): """ if '_parameters' not in self.__dict__: raise RuntimeError( - "super(YourLayer, self).__init__() should be called firstly.") + "super(YourLayer, self).__init__() should be called firstly." + ) elif not isinstance(name, str): raise TypeError( - "The name of parameter should be a string, but received {}.". - format(type(name).__name__)) + "The name of parameter should be a string, but received {}.".format( + type(name).__name__ + ) + ) elif '.' in name: raise KeyError( "The name of parameter can not contain `.`, " "because when you access the newly added parameter in the " - "form of `self.**.**`, it will cause AttributeError.") + "form of `self.**.**`, it will cause AttributeError." + ) elif name == '': raise KeyError("The name of parameter can not be empty.") elif hasattr(self, name) and name not in self._parameters: raise KeyError("The parameter '{}' already exists.".format(name)) - elif parameter is not None and not isinstance(parameter, - framework.Parameter): + elif parameter is not None and not isinstance( + parameter, framework.Parameter + ): raise TypeError( - "The parameter to be added should be a Parameter, but received {}." - .format(type(parameter).__name__)) + "The parameter to be added should be a Parameter, but received {}.".format( + type(parameter).__name__ + ) + ) else: if parameter is None: self._parameters[name] = None if len(self._loaddict_holder) > 0: - assert parameter.name in self._loaddict_holder, "Parameter not found, Can't not find [ {} ] in state_dict".format( - parameter.name) + assert ( + parameter.name in self._loaddict_holder + ), "Parameter not found, Can't not find [ {} ] in state_dict".format( + parameter.name + ) parameter.set_value(self._loaddict_holder[parameter.name]) @@ -1080,37 +1137,50 @@ class Layer(object): """ def is_already_registered(is_pre_hook): - layers_hooks = self._forward_pre_hooks if is_pre_hook else self._forward_post_hooks - candidate_hook = record_program_ops_pre_hook if is_pre_hook else set_op_customized_attrs_post_hook + layers_hooks = ( + self._forward_pre_hooks + if is_pre_hook + else self._forward_post_hooks + ) + candidate_hook = ( + record_program_ops_pre_hook + if is_pre_hook + else set_op_customized_attrs_post_hook + ) already_registed = False if layers_hooks: last_key = next(reversed(layers_hooks)) - already_registed = (layers_hooks[last_key] == candidate_hook) + already_registed = layers_hooks[last_key] == candidate_hook return already_registed if not isinstance(attrs, dict): raise TypeError( "attrs should be type(dict), but received {}".format( - type(attrs).__name__)) + type(attrs).__name__ + ) + ) # NOTE: Overwrite behavior for same key. self._customized_attrs.update(attrs) if not is_already_registered(is_pre_hook=True): pre_hook_helper = self.register_forward_pre_hook( - record_program_ops_pre_hook) + record_program_ops_pre_hook + ) assert len(self._op_recorder.hooks) == 0 self._op_recorder.hooks = [pre_hook_helper] # manually register post_hook to ensure it is inserted into the head. if not is_already_registered(is_pre_hook=False): post_hook_helper = self.register_forward_post_hook( - set_op_customized_attrs_post_hook) + set_op_customized_attrs_post_hook + ) if len(self._forward_post_hooks) > 1: - self._forward_post_hooks.move_to_end(post_hook_helper._hook_id, - last=False) + self._forward_post_hooks.move_to_end( + post_hook_helper._hook_id, last=False + ) assert len(self._op_recorder.hooks) == 1 @@ -1143,7 +1213,6 @@ class Layer(object): return object.__getattribute__(self, name) def __setattr__(self, name, value): - def _remove_if_exist(*dicts): for d in dicts: if name in d: @@ -1155,10 +1224,14 @@ class Layer(object): if isinstance(value, framework.Parameter): if params is None: raise ValueError( - "super(YourLayer, self).__init__() should be called first") + "super(YourLayer, self).__init__() should be called first" + ) if len(self._loaddict_holder) > 0: - assert value.name in self._loaddict_holder, "Parameter not found, Can't not find [ {} ] in state_dict".format( - value.name) + assert ( + value.name in self._loaddict_holder + ), "Parameter not found, Can't not find [ {} ] in state_dict".format( + value.name + ) value.set_value(self._loaddict_holder[value.name]) @@ -1167,9 +1240,10 @@ class Layer(object): elif params is not None and name in params: if value is not None: raise TypeError( - "assignment to parameter '{}' should be of type Parameter or None, but got '{}'" - .format(name, - type(value).__name__)) + "assignment to parameter '{}' should be of type Parameter or None, but got '{}'".format( + name, type(value).__name__ + ) + ) params[name] = None else: layers = self.__dict__.get('_sub_layers', None) @@ -1184,9 +1258,10 @@ class Layer(object): elif layers is not None and name in layers: if value is not None: raise TypeError( - "assignment to sublayer '{}' should be of type Layer or None, but got '{}'" - .format(name, - type(value).__name__)) + "assignment to sublayer '{}' should be of type Layer or None, but got '{}'".format( + name, type(value).__name__ + ) + ) layers[name] = None else: _buffers = self.__dict__.get('_buffers', None) @@ -1195,8 +1270,9 @@ class Layer(object): raise ValueError( "super(YourLayer, self).__init__() should be called first" ) - _remove_if_exist(self.__dict__, self._parameters, - self._sub_layers) + _remove_if_exist( + self.__dict__, self._parameters, self._sub_layers + ) # Set persistable=False by default. Only `register_buffer` can # add a persistable buffer. if name not in self._buffers: @@ -1210,6 +1286,7 @@ class Layer(object): # value via `assign`. if type(value) == framework.Variable: from paddle import assign + # Note(zhhsplendid): the condition below happens in PaddleGan model, # but should all non-Variable _buffers[name] be re-assign? We # should consider it in the future. I current wrote this as @@ -1217,18 +1294,23 @@ class Layer(object): if in_declarative_mode() and _buffers[name] is None: raise RuntimeError( 'In Dy2stat, self.{0} is a buffer and self.{0} is ' - 'not allowed to be set to Variable when self.{0} is None.' - .format(name)) - elif _buffers[name] is None or type(getattr( - self, name)) == core.VarBase: + 'not allowed to be set to Variable when self.{0} is None.'.format( + name + ) + ) + elif ( + _buffers[name] is None + or type(getattr(self, name)) == core.VarBase + ): _buffers[name] = assign(value) else: assign(value, getattr(self, name)) elif value is not None: raise TypeError( - "assignment to buffers '{}' should be of type core.VarBase or None, but got '{}'" - .format(name, - type(value).__name__)) + "assignment to buffers '{}' should be of type core.VarBase or None, but got '{}'".format( + name, type(value).__name__ + ) + ) else: # Assigning None will remove the buffer, but if re-assign a new varBase to it, # it will be remarked as a buffer with same `persistable` attribute. @@ -1315,10 +1397,12 @@ class Layer(object): self._state_dict_hooks[hook_remove_helper._hook_id] = hook return hook_remove_helper - def _obtain_parameters_buffers(self, - destination=None, - include_sublayers=True, - structured_name_prefix=""): + def _obtain_parameters_buffers( + self, + destination=None, + include_sublayers=True, + structured_name_prefix="", + ): """ The difference from state_dict() is that state_dict_hook will not be called, but the original types of parameters and buffers will be maintained. @@ -1329,7 +1413,10 @@ class Layer(object): if data is not None: destination[structured_name_prefix + name] = data for name, buffer in self._buffers.items(): - if buffer is not None and name not in self._non_persistable_buffer_names_set: + if ( + buffer is not None + and name not in self._non_persistable_buffer_names_set + ): destination[structured_name_prefix + name] = buffer if include_sublayers: @@ -1338,17 +1425,22 @@ class Layer(object): destination_temp = destination.copy() destination_temp.update( layer_item._obtain_parameters_buffers( - destination_temp, include_sublayers, - structured_name_prefix + layer_name + ".")) + destination_temp, + include_sublayers, + structured_name_prefix + layer_name + ".", + ) + ) destination = destination_temp return destination - def _state_dict_impl(self, - destination=None, - include_sublayers=True, - structured_name_prefix="", - include_non_persistable_buffer=False, - use_hook=True): + def _state_dict_impl( + self, + destination=None, + include_sublayers=True, + structured_name_prefix="", + include_non_persistable_buffer=False, + use_hook=True, + ): """ Get all parameters and persistable buffers of current layer and its sub-layers. And set them into a dict @@ -1366,7 +1458,10 @@ class Layer(object): destination[structured_name_prefix + name] = data for name, buffer in self._buffers.items(): if not include_non_persistable_buffer: - if buffer is not None and name not in self._non_persistable_buffer_names_set: + if ( + buffer is not None + and name not in self._non_persistable_buffer_names_set + ): destination[structured_name_prefix + name] = buffer else: if buffer is not None: @@ -1378,9 +1473,13 @@ class Layer(object): destination_temp = destination.copy() destination_temp.update( layer_item._state_dict_impl( - destination_temp, include_sublayers, + destination_temp, + include_sublayers, structured_name_prefix + layer_name + ".", - include_non_persistable_buffer, use_hook)) + include_non_persistable_buffer, + use_hook, + ) + ) destination = destination_temp if use_hook: for state_dict_hook in self._state_dict_hooks.values(): @@ -1390,11 +1489,13 @@ class Layer(object): return destination - def to_static_state_dict(self, - destination=None, - include_sublayers=True, - structured_name_prefix="", - use_hook=True): + def to_static_state_dict( + self, + destination=None, + include_sublayers=True, + structured_name_prefix="", + use_hook=True, + ): ''' Get all parameters and buffers of current layer and its sub-layers. And set them into a dict @@ -1422,13 +1523,16 @@ class Layer(object): include_sublayers=include_sublayers, structured_name_prefix=structured_name_prefix, include_non_persistable_buffer=True, - use_hook=use_hook) - - def state_dict(self, - destination=None, - include_sublayers=True, - structured_name_prefix="", - use_hook=True): + use_hook=use_hook, + ) + + def state_dict( + self, + destination=None, + include_sublayers=True, + structured_name_prefix="", + use_hook=True, + ): ''' Get all parameters and persistable buffers of current layer and its sub-layers. And set them into a dict @@ -1456,7 +1560,8 @@ class Layer(object): include_sublayers=include_sublayers, structured_name_prefix=structured_name_prefix, include_non_persistable_buffer=False, - use_hook=use_hook) + use_hook=use_hook, + ) @framework.deprecate_stat_dict def set_state_dict(self, state_dict, use_structured_name=True): @@ -1488,22 +1593,31 @@ class Layer(object): state = state_dict.get(key, None) if state is None: raise ValueError( - "{} is not found in the provided dict.".format(key)) - if (isinstance(state, dict) or isinstance(state, list)): - if (len(state) != len(param)): - raise ValueError("{} receieves the length of {}, " - "but the expected shape is {}".format( - key, len(state), len(param))) + "{} is not found in the provided dict.".format(key) + ) + if isinstance(state, dict) or isinstance(state, list): + if len(state) != len(param): + raise ValueError( + "{} receieves the length of {}, " + "but the expected shape is {}".format( + key, len(state), len(param) + ) + ) else: return param, state else: - state_shape = state.shape() if inspect.ismethod( - state.shape) else state.shape + state_shape = ( + state.shape() + if inspect.ismethod(state.shape) + else state.shape + ) if list(state_shape) != list(param.shape): raise ValueError( - "{} receives a shape {}, but the expected shape is {}.". - format(key, list(state_shape), list(param.shape))) + "{} receives a shape {}, but the expected shape is {}.".format( + key, list(state_shape), list(param.shape) + ) + ) return param, state matched_param_state = [] @@ -1542,7 +1656,9 @@ class Layer(object): # restore parameter states core._create_loaded_parameter( [param for param, state in matched_param_state], - global_scope(), executor) + global_scope(), + executor, + ) for param, state in matched_param_state: _set_var(param, state) except ValueError as e: @@ -1598,11 +1714,13 @@ class Layer(object): # [ 0.33960250, 0.96878713]]) ''' - return self._to_impl(device=device, - dtype=dtype, - blocking=blocking, - include_sublayers=True, - floating_only=False) + return self._to_impl( + device=device, + dtype=dtype, + blocking=blocking, + include_sublayers=True, + floating_only=False, + ) def _apply(self, func, device, dtype, blocking, include_sublayers=True): if include_sublayers: @@ -1616,8 +1734,9 @@ class Layer(object): if param.grad is not None: with no_grad(): - grad_applied = func(param._grad_ivar(), device, dtype, - blocking) + grad_applied = func( + param._grad_ivar(), device, dtype, blocking + ) for key, buf in self._buffers.items(): if buf is not None: @@ -1641,12 +1760,14 @@ class Layer(object): # Note(zhangbo): Paddle GPU minimum memory allocation unit is 256 bytes, waiting_alloc_memory will comput ‘t’ occupied memory space. # Coefficient 1.2 is used to avoid OOM that may occur in this critical state when the memory is just enough. waiting_alloc_memory = ( - (np.prod(t.shape) * size_dtype) / 256 + 1) * 256 * 1.2 + ((np.prod(t.shape) * size_dtype) / 256 + 1) * 256 * 1.2 + ) gpu_memory_available = core.gpu_memory_available() if gpu_memory_available < waiting_alloc_memory: # Copy param / Tensor to cpu - t_used = t._copy_to(paddle.CPUPlace(), - blocking) # k-v type will error + t_used = t._copy_to( + paddle.CPUPlace(), blocking + ) # k-v type will error # Release mem of t t.value().get_tensor()._clear() else: @@ -1657,7 +1778,8 @@ class Layer(object): # 2. cast param / Tensor to dtype if dtype is not None and dtype != t_used.dtype: with paddle.fluid.framework._dygraph_place_guard( - place=t_used.place): + place=t_used.place + ): t_casted = t_used.cast(dtype=dtype) else: t_casted = t_used @@ -1675,12 +1797,14 @@ class Layer(object): return t - def _to_impl(self, - device=None, - dtype=None, - blocking=None, - include_sublayers=True, - floating_only=False): + def _to_impl( + self, + device=None, + dtype=None, + blocking=None, + include_sublayers=True, + floating_only=False, + ): ''' Cast the parameters and buffers of Layer by the give device, dtype and blocking. @@ -1709,20 +1833,28 @@ class Layer(object): if device is not None: if isinstance(device, str): device = paddle.device._convert_to_place(device) - elif isinstance(device, (core.CPUPlace, core.CUDAPlace, - core.CUDAPinnedPlace, core.XPUPlace)): + elif isinstance( + device, + ( + core.CPUPlace, + core.CUDAPlace, + core.CUDAPinnedPlace, + core.XPUPlace, + ), + ): pass else: raise ValueError( "device value error, must be str, paddle.CPUPlace(), paddle.CUDAPlace(), paddle.CUDAPinnedPlace() or paddle.XPUPlace(), but the type of device is " - + type(device).__name__) + + type(device).__name__ + ) if blocking is None: blocking = True else: assert isinstance( - blocking, - bool), "blocking value error, must be the True, False or None" + blocking, bool + ), "blocking value error, must be the True, False or None" def transform(t, device, dtype, blocking): if floating_only and (not paddle.is_floating_point(t)): diff --git a/python/paddle/fluid/dygraph/learning_rate_scheduler.py b/python/paddle/fluid/dygraph/learning_rate_scheduler.py index 0620425910e6bcec2b4e821b4903d1b03854fcd6..65219a9b1766333c150632f9653b0df97f41fd41 100644 --- a/python/paddle/fluid/dygraph/learning_rate_scheduler.py +++ b/python/paddle/fluid/dygraph/learning_rate_scheduler.py @@ -20,9 +20,18 @@ from ..framework import Variable from ..data_feeder import check_type __all__ = [ - 'NoamDecay', 'PiecewiseDecay', 'NaturalExpDecay', 'ExponentialDecay', - 'InverseTimeDecay', 'PolynomialDecay', 'CosineDecay', 'LinearLrWarmup', - 'ReduceLROnPlateau', 'StepDecay', 'MultiStepDecay', 'LambdaDecay' + 'NoamDecay', + 'PiecewiseDecay', + 'NaturalExpDecay', + 'ExponentialDecay', + 'InverseTimeDecay', + 'PolynomialDecay', + 'CosineDecay', + 'LinearLrWarmup', + 'ReduceLROnPlateau', + 'StepDecay', + 'MultiStepDecay', + 'LambdaDecay', ] @@ -57,12 +66,14 @@ class LearningRateDecay(object): learning rate variable """ from .. import layers + lr = layers.create_global_var( name=unique_name.generate("learning_rate"), shape=[1], value=float(lr), dtype=self.dtype, - persistable=False) + persistable=False, + ) return lr # Note: If you want to change what optimizer.state_dict stores, just overwrite this functions, @@ -83,7 +94,8 @@ class LearningRateDecay(object): assert value.shape == [ 1 ], "shape of Variable in state_dict must be [1] {}".format( - value.shape) + value.shape + ) value = value.numpy()[0] state_dict[key] = value @@ -105,8 +117,10 @@ class LearningRateDecay(object): self.__dict__[key] = state_dict[key] else: raise RuntimeError( - "Please check whether state_dict is correct for optimizer. Can't find [ {} ] in state_dict" - .format(key)) + "Please check whether state_dict is correct for optimizer. Can't find [ {} ] in state_dict".format( + key + ) + ) if len(state_dict) > len(self.keys): warnings.warn( "There are some unused values in state_dict. Maybe the optimizer have different 'LearningRateDecay' when invoking state_dict and set_dict" @@ -238,14 +252,16 @@ class NaturalExpDecay(LearningRateDecay): """ - def __init__(self, - learning_rate, - decay_steps, - decay_rate, - staircase=False, - begin=0, - step=1, - dtype='float32'): + def __init__( + self, + learning_rate, + decay_steps, + decay_rate, + staircase=False, + begin=0, + step=1, + dtype='float32', + ): super(NaturalExpDecay, self).__init__(begin, step, dtype) self.learning_rate = learning_rate self.decay_steps = decay_steps @@ -254,11 +270,13 @@ class NaturalExpDecay(LearningRateDecay): def step(self): from .. import layers + div_res = self.create_lr_var(self.step_num / self.decay_steps) if self.staircase: div_res = layers.floor(div_res) decayed_lr = self.learning_rate * layers.exp( - -1 * self.decay_rate * div_res) + -1 * self.decay_rate * div_res + ) return decayed_lr @@ -312,22 +330,24 @@ class ExponentialDecay(LearningRateDecay): base_lr = 0.1 with fluid.dygraph.guard(): sgd_optimizer = fluid.optimizer.SGD( - learning_rate=fluid.dygraph.ExponentialDecay( - learning_rate=base_lr, - decay_steps=10000, - decay_rate=0.5, - staircase=True)) + learning_rate=fluid.dygraph.ExponentialDecay( + learning_rate=base_lr, + decay_steps=10000, + decay_rate=0.5, + staircase=True)) """ - def __init__(self, - learning_rate, - decay_steps, - decay_rate, - staircase=False, - begin=0, - step=1, - dtype='float32'): + def __init__( + self, + learning_rate, + decay_steps, + decay_rate, + staircase=False, + begin=0, + step=1, + dtype='float32', + ): super(ExponentialDecay, self).__init__(begin, step, dtype) self.learning_rate = learning_rate self.decay_steps = decay_steps @@ -336,6 +356,7 @@ class ExponentialDecay(LearningRateDecay): def step(self): from .. import layers + div_res = self.create_lr_var(self.step_num / self.decay_steps) if self.staircase: div_res = layers.floor(div_res) @@ -389,23 +410,25 @@ class InverseTimeDecay(LearningRateDecay): with fluid.dygraph.guard(): emb = fluid.dygraph.Embedding([10, 10]) sgd_optimizer = fluid.optimizer.SGD( - learning_rate=fluid.dygraph.InverseTimeDecay( - learning_rate=base_lr, - decay_steps=10000, - decay_rate=0.5, - staircase=True), + learning_rate=fluid.dygraph.InverseTimeDecay( + learning_rate=base_lr, + decay_steps=10000, + decay_rate=0.5, + staircase=True), parameter_list = emb.parameters()) """ - def __init__(self, - learning_rate, - decay_steps, - decay_rate, - staircase=False, - begin=0, - step=1, - dtype='float32'): + def __init__( + self, + learning_rate, + decay_steps, + decay_rate, + staircase=False, + begin=0, + step=1, + dtype='float32', + ): super(InverseTimeDecay, self).__init__(begin, step, dtype) self.learning_rate = learning_rate self.decay_steps = decay_steps @@ -414,6 +437,7 @@ class InverseTimeDecay(LearningRateDecay): def step(self): from .. import layers + div_res = self.create_lr_var(self.step_num / self.decay_steps) if self.staircase: div_res = layers.floor(div_res) @@ -480,15 +504,17 @@ class PolynomialDecay(LearningRateDecay): """ - def __init__(self, - learning_rate, - decay_steps, - end_learning_rate=0.0001, - power=1.0, - cycle=False, - begin=0, - step=1, - dtype='float32'): + def __init__( + self, + learning_rate, + decay_steps, + end_learning_rate=0.0001, + power=1.0, + cycle=False, + begin=0, + step=1, + dtype='float32', + ): super(PolynomialDecay, self).__init__(begin, step, dtype) self.learning_rate = learning_rate self.decay_steps = decay_steps @@ -498,22 +524,27 @@ class PolynomialDecay(LearningRateDecay): def step(self): from .. import layers + tmp_step_num = self.step_num tmp_decay_steps = self.decay_steps if self.cycle: div_res = layers.ceil( - self.create_lr_var(tmp_step_num / float(self.decay_steps))) + self.create_lr_var(tmp_step_num / float(self.decay_steps)) + ) if tmp_step_num == 0: div_res = self.create_lr_var(1.0) tmp_decay_steps = self.decay_steps * div_res else: tmp_step_num = self.create_lr_var( - tmp_step_num if tmp_step_num < self.decay_steps else self. - decay_steps) + tmp_step_num + if tmp_step_num < self.decay_steps + else self.decay_steps + ) - decayed_lr = (self.learning_rate - self.end_learning_rate) * \ - ((1 - tmp_step_num / tmp_decay_steps) ** self.power) + self.end_learning_rate + decayed_lr = (self.learning_rate - self.end_learning_rate) * ( + (1 - tmp_step_num / tmp_decay_steps) ** self.power + ) + self.end_learning_rate return decayed_lr @@ -545,22 +576,24 @@ class CosineDecay(LearningRateDecay): None. Examples: - .. code-block:: python + .. code-block:: python - base_lr = 0.1 + base_lr = 0.1 with fluid.dygraph.guard(): optimizer = fluid.optimizer.SGD( - learning_rate = fluid.dygraph.CosineDecay( - base_lr, 10000, 120) ) + learning_rate = fluid.dygraph.CosineDecay( + base_lr, 10000, 120) ) """ - def __init__(self, - learning_rate, - step_each_epoch, - epochs, - begin=0, - step=1, - dtype='float32'): + def __init__( + self, + learning_rate, + step_each_epoch, + epochs, + begin=0, + step=1, + dtype='float32', + ): super(CosineDecay, self).__init__(begin, step, dtype) self.learning_rate = learning_rate self.step_each_epoch = step_each_epoch @@ -568,10 +601,15 @@ class CosineDecay(LearningRateDecay): def step(self): from .. import layers + cur_epoch = layers.floor( - self.create_lr_var(self.step_num / self.step_each_epoch)) - decayed_lr = self.learning_rate * 0.5 * ( - layers.cos(cur_epoch * math.pi / self.epochs) + 1) + self.create_lr_var(self.step_num / self.step_each_epoch) + ) + decayed_lr = ( + self.learning_rate + * 0.5 + * (layers.cos(cur_epoch * math.pi / self.epochs) + 1) + ) return decayed_lr @@ -621,13 +659,15 @@ class NoamDecay(LearningRateDecay): parameter_list = emb.parameters()) """ - def __init__(self, - d_model, - warmup_steps, - begin=1, - step=1, - dtype='float32', - learning_rate=1.0): + def __init__( + self, + d_model, + warmup_steps, + begin=1, + step=1, + dtype='float32', + learning_rate=1.0, + ): super(NoamDecay, self).__init__(begin, step, dtype) self.learning_rate = learning_rate self.d_model = d_model @@ -635,10 +675,14 @@ class NoamDecay(LearningRateDecay): def step(self): from .. import layers + a = self.create_lr_var(self.step_num**-0.5) b = self.create_lr_var((self.warmup_steps**-1.5) * self.step_num) - lr_value = self.learning_rate * (self.d_model** - -0.5) * layers.elementwise_min(a, b) + lr_value = ( + self.learning_rate + * (self.d_model**-0.5) + * layers.elementwise_min(a, b) + ) return lr_value @@ -698,28 +742,37 @@ class LinearLrWarmup(LearningRateDecay): """ - def __init__(self, - learning_rate, - warmup_steps, - start_lr, - end_lr, - begin=1, - step=1, - dtype='float32'): + def __init__( + self, + learning_rate, + warmup_steps, + start_lr, + end_lr, + begin=1, + step=1, + dtype='float32', + ): super(LinearLrWarmup, self).__init__(begin, step, dtype) - type_check = isinstance(learning_rate, float) or isinstance( - learning_rate, int) or isinstance(learning_rate, LearningRateDecay) + type_check = ( + isinstance(learning_rate, float) + or isinstance(learning_rate, int) + or isinstance(learning_rate, LearningRateDecay) + ) if not type_check: raise TypeError( - "the type of learning_rate should be [int, float or LearningRateDecay], the current type is {}" - .format(learning_rate)) + "the type of learning_rate should be [int, float or LearningRateDecay], the current type is {}".format( + learning_rate + ) + ) self.learning_rate = learning_rate self.warmup_steps = warmup_steps self.start_lr = start_lr - assert end_lr > start_lr, "end_lr {} must be greater than start_lr {}".format( - end_lr, start_lr) - self.lr_ratio_before_warmup = (float(end_lr) - - float(start_lr)) / float(warmup_steps) + assert ( + end_lr > start_lr + ), "end_lr {} must be greater than start_lr {}".format(end_lr, start_lr) + self.lr_ratio_before_warmup = (float(end_lr) - float(start_lr)) / float( + warmup_steps + ) def step(self): base_lr = self.learning_rate @@ -727,6 +780,7 @@ class LinearLrWarmup(LearningRateDecay): base_lr = base_lr() from .. import layers + if self.step_num < self.warmup_steps: return self.lr_ratio_before_warmup * self.step_num + self.start_lr else: @@ -812,18 +866,20 @@ class ReduceLROnPlateau(LearningRateDecay): """ - def __init__(self, - learning_rate, - mode='min', - decay_rate=0.1, - patience=10, - verbose=False, - threshold=1e-4, - threshold_mode='rel', - cooldown=0, - min_lr=0, - eps=1e-8, - dtype='float32'): + def __init__( + self, + learning_rate, + mode='min', + decay_rate=0.1, + patience=10, + verbose=False, + threshold=1e-4, + threshold_mode='rel', + cooldown=0, + min_lr=0, + eps=1e-8, + dtype='float32', + ): super(ReduceLROnPlateau, self).__init__(dtype=dtype) mode = mode.lower() if mode not in ['min', 'max']: @@ -838,15 +894,21 @@ class ReduceLROnPlateau(LearningRateDecay): threshold_mode = threshold_mode.lower() if threshold_mode not in ['rel', 'abs']: - raise ValueError('threshold mode ' + threshold_mode + - ' is unknown!') + raise ValueError( + 'threshold mode ' + threshold_mode + ' is unknown!' + ) self.threshold_mode = threshold_mode - check_type(learning_rate, 'learning_rate', (float, int, Variable), - 'ReduceLROnPlateau') + check_type( + learning_rate, + 'learning_rate', + (float, int, Variable), + 'ReduceLROnPlateau', + ) if not isinstance(learning_rate, (float, int, Variable)): raise TypeError( "The type of 'learning_rate' in 'ReduceLROnPlateau' must be 'float, int, Variable', but received %s." - % type(learning_rate)) + % type(learning_rate) + ) self.learning_rate = learning_rate self.verbose = verbose @@ -865,8 +927,11 @@ class ReduceLROnPlateau(LearningRateDecay): # "cooldown_counter / best_loss / num_bad_epochs / epoch_num / learning_rate" will be stored. def _state_keys(self): self.keys = [ - 'cooldown_counter', 'best_loss', 'num_bad_epochs', 'epoch_num', - 'learning_rate' + 'cooldown_counter', + 'best_loss', + 'num_bad_epochs', + 'epoch_num', + 'learning_rate', ] def __call__(self): @@ -893,9 +958,13 @@ class ReduceLROnPlateau(LearningRateDecay): # loss must be 1-D Tensor with shape [1] check_type(loss, 'loss', Variable, 'ReduceLROnPlateau.step') - assert len(loss.shape) == 1 and loss.shape[0] == 1, "the loss.shape " \ - "should be (1L,), but the current loss.shape is {}. Maybe that " \ - "you should call paddle.mean to process it first.".format(loss.shape) + assert len(loss.shape) == 1 and loss.shape[0] == 1, ( + "the loss.shape " + "should be (1L,), but the current loss.shape is {}. Maybe that " + "you should call paddle.mean to process it first.".format( + loss.shape + ) + ) self.epoch_num += 1 if self.cooldown_counter > 0: @@ -909,18 +978,24 @@ class ReduceLROnPlateau(LearningRateDecay): if self.num_bad_epochs > self.patience: from .. import layers + self.cooldown_counter = self.cooldown self.num_bad_epochs = 0 new_lr = layers.elementwise_max( - self.learning_rate * self.decay_rate, self.min_lr) + self.learning_rate * self.decay_rate, self.min_lr + ) if self.learning_rate - new_lr > self.eps: if self.verbose: - old_lr = self.learning_rate.numpy()[0] if isinstance( - self.learning_rate, - Variable) else self.learning_rate - print('Epoch {}: reducing learning rate from {} to {}.'. - format(self.epoch_num, old_lr, - new_lr.numpy()[0])) + old_lr = ( + self.learning_rate.numpy()[0] + if isinstance(self.learning_rate, Variable) + else self.learning_rate + ) + print( + 'Epoch {}: reducing learning rate from {} to {}.'.format( + self.epoch_num, old_lr, new_lr.numpy()[0] + ) + ) self.learning_rate = new_lr def _is_better(self, current, best): @@ -952,7 +1027,8 @@ class _LearningRateEpochDecay(LearningRateDecay): if not isinstance(learning_rate, (float, int)): raise TypeError( "The type of 'learning_rate' must be 'float, int', but received %s." - % type(learning_rate)) + % type(learning_rate) + ) if learning_rate < 0: raise ValueError("Invalid learning rate: {}".format(learning_rate)) @@ -1057,8 +1133,9 @@ class StepDecay(_LearningRateEpochDecay): def __init__(self, learning_rate, step_size, decay_rate=0.1): if not isinstance(step_size, int): raise TypeError( - "The type of 'step_size' must be 'int', but received %s." % - type(step_size)) + "The type of 'step_size' must be 'int', but received %s." + % type(step_size) + ) if decay_rate >= 1.0: raise ValueError('decay_rate should be < 1.0.') @@ -1134,12 +1211,15 @@ class MultiStepDecay(_LearningRateEpochDecay): if not isinstance(milestones, (tuple, list)): raise TypeError( "The type of 'milestones' in 'MultiStepDecay' must be 'tuple, list', but received %s." - % type(milestones)) + % type(milestones) + ) - if not all([ + if not all( + [ milestones[i] < milestones[i + 1] for i in range(len(milestones) - 1) - ]): + ] + ): raise ValueError('The elements of milestones must be incremented') if decay_rate >= 1.0: raise ValueError('decay_rate should be < 1.0.') @@ -1154,7 +1234,7 @@ class MultiStepDecay(_LearningRateEpochDecay): if self.epoch_num < self.milestones[i]: return self.base_lr * (decay_rate**i) - return self.base_lr * (decay_rate**len(self.milestones)) + return self.base_lr * (decay_rate ** len(self.milestones)) class LambdaDecay(_LearningRateEpochDecay): @@ -1213,7 +1293,8 @@ class LambdaDecay(_LearningRateEpochDecay): if not callable(lr_lambda): raise TypeError( "The type of 'lr_lambda' in 'LambdaDecay' must be 'function', but received %s." - % type(lr_lambda)) + % type(lr_lambda) + ) self.lr_lambda = lr_lambda super(LambdaDecay, self).__init__(learning_rate) diff --git a/python/paddle/fluid/dygraph/math_op_patch.py b/python/paddle/fluid/dygraph/math_op_patch.py index ac894a3acc7f22f4d70da15a3a08739cc88b76a3..de71f68c1fed936ba52697785b649f26e50de4f9 100644 --- a/python/paddle/fluid/dygraph/math_op_patch.py +++ b/python/paddle/fluid/dygraph/math_op_patch.py @@ -13,7 +13,13 @@ # limitations under the License. from .. import core -from ..framework import Variable, convert_np_dtype_to_dtype_, _varbase_creator, _in_legacy_dygraph, in_dygraph_mode +from ..framework import ( + Variable, + convert_np_dtype_to_dtype_, + _varbase_creator, + _in_legacy_dygraph, + in_dygraph_mode, +) from ..layers.layer_function_generator import OpProtoHolder from . import no_grad from .. import framework @@ -70,13 +76,22 @@ def monkey_patch_math_varbase(): @no_grad def create_tensor(value, dtype, shape): if framework._in_eager_mode_: - out = _C_ops.full(shape, value, dtype, - framework._current_expected_place()) + out = _C_ops.full( + shape, value, dtype, framework._current_expected_place() + ) else: out = _varbase_creator(dtype=dtype) - out = _legacy_C_ops.fill_constant(out, 'dtype', dtype, 'shape', - shape, 'value', value, - 'force_cpu', False) + out = _legacy_C_ops.fill_constant( + out, + 'dtype', + dtype, + 'shape', + shape, + 'value', + value, + 'force_cpu', + False, + ) out.stop_gradient = True return out @@ -110,8 +125,9 @@ def monkey_patch_math_varbase(): dtype = convert_np_dtype_to_dtype_(dtype) if _in_legacy_dygraph(): - return _legacy_C_ops.cast(self, 'in_dtype', self.dtype, 'out_dtype', - dtype) + return _legacy_C_ops.cast( + self, 'in_dtype', self.dtype, 'out_dtype', dtype + ) return _C_ops.cast(self, dtype) def _scalar_elementwise_op_(var, scale, bias): @@ -124,7 +140,9 @@ def monkey_patch_math_varbase(): def _float_(var): numel = np.prod(var.shape) - assert numel == 1, "only one element variable can be converted to float." + assert ( + numel == 1 + ), "only one element variable can be converted to float." tensor = var.value().get_tensor() assert tensor._is_initialized(), "variable's tensor is not initialized" return float(var.numpy().flatten()[0]) @@ -154,7 +172,9 @@ def monkey_patch_math_varbase(): def _index_(var): numel = np.prod(var.shape) - assert numel == 1, "only one element variable can be converted to python index." + assert ( + numel == 1 + ), "only one element variable can be converted to python index." tensor = var.value().get_tensor() assert tensor._is_initialized(), "variable's tensor is not initialized" return int(var.numpy().flatten()[0]) @@ -196,12 +216,13 @@ def monkey_patch_math_varbase(): return _scalar_elementwise_op_(var, 1.0 / value, 0.0) # for binary operator such as elementwise, compare - def _binary_creator_(method_name, - op_type, - reverse=False, - scalar_method=None, - call_final_api=False): - + def _binary_creator_( + method_name, + op_type, + reverse=False, + scalar_method=None, + call_final_api=False, + ): def __impl__(self, other_var): # 1. scalar exists cases # we need combine the tensor.dtype and scalar.dtype, cast correct object @@ -224,8 +245,9 @@ def monkey_patch_math_varbase(): # so the calculation result here and the calculation result of numpy are # different after 6 decimal point. If necessary, we can also use float64 here. # torch's behavior here is consistent with ours - if (op_type == "divide" or op_type == "elementwise_div" - ) and self.dtype in _supported_int_dtype_: + if ( + op_type == "divide" or op_type == "elementwise_div" + ) and self.dtype in _supported_int_dtype_: self = astype(self, 'float32') # here use `scale` replace `elementwise` to get better performance # but only +, -, *, / can use this method @@ -244,36 +266,47 @@ def monkey_patch_math_varbase(): if not isinstance(other_var, other_var_should_be): if isinstance(other_var, complex): import paddle + other_var = paddle.to_tensor(other_var, dtype='complex64') else: if reverse: - other_var = create_tensor(other_var, - dtype=lhs_dtype, - shape=self.shape) + other_var = create_tensor( + other_var, dtype=lhs_dtype, shape=self.shape + ) else: # add fill_op - other_var = create_scalar(value=other_var, - dtype=lhs_dtype) + other_var = create_scalar( + value=other_var, dtype=lhs_dtype + ) # 3. promote types or unify right var type to left var rhs_dtype = other_var.dtype if lhs_dtype != rhs_dtype: if method_name in _supported_promote_complex_types_ and ( - lhs_dtype in _complex_dtypes - or rhs_dtype in _complex_dtypes): + lhs_dtype in _complex_dtypes or rhs_dtype in _complex_dtypes + ): # only when lhs_dtype or rhs_dtype is complex type, # the dtype will promote, in other cases, directly # use lhs_dtype, this is consistent will original rule promote_dtype = core._promote_types_if_complex_exists( - lhs_dtype, rhs_dtype) - self = self if lhs_dtype == promote_dtype else astype( - self, promote_dtype) - other_var = other_var if rhs_dtype == promote_dtype else astype( - other_var, promote_dtype) + lhs_dtype, rhs_dtype + ) + self = ( + self + if lhs_dtype == promote_dtype + else astype(self, promote_dtype) + ) + other_var = ( + other_var + if rhs_dtype == promote_dtype + else astype(other_var, promote_dtype) + ) else: warnings.warn( - 'The dtype of left and right variables are not the same, left dtype is {}, but right dtype is {}, the right dtype will convert to {}' - .format(lhs_dtype, rhs_dtype, lhs_dtype)) + 'The dtype of left and right variables are not the same, left dtype is {}, but right dtype is {}, the right dtype will convert to {}'.format( + lhs_dtype, rhs_dtype, lhs_dtype + ) + ) other_var = astype(other_var, lhs_dtype) if reverse: @@ -281,8 +314,9 @@ def monkey_patch_math_varbase(): self = other_var other_var = tmp - if (op_type == "divide" or op_type == "elementwise_div" - ) and self.dtype in _supported_int_dtype_: + if ( + op_type == "divide" or op_type == "elementwise_div" + ) and self.dtype in _supported_int_dtype_: self = astype(self, 'float32') other_var = astype(other_var, 'float32') @@ -315,7 +349,9 @@ def monkey_patch_math_varbase(): Returns: Tensor - """.format(comment) + """.format( + comment + ) __impl__.__name__ = method_name return __impl__ @@ -332,39 +368,78 @@ def monkey_patch_math_varbase(): ('ndim', _ndim_), ('size', _size_), ('T', _T_), - ('__add__', - _binary_creator_('__add__', 'elementwise_add', False, _scalar_add_)), + ( + '__add__', + _binary_creator_('__add__', 'elementwise_add', False, _scalar_add_), + ), # a+b == b+a. Do not need to reverse explicitly - ('__radd__', - _binary_creator_('__radd__', 'elementwise_add', False, _scalar_add_)), - ('__sub__', - _binary_creator_('__sub__', 'elementwise_sub', False, _scalar_sub_)), - ('__rsub__', - _binary_creator_('__rsub__', 'elementwise_sub', True, _scalar_rsub_)), - ('__mul__', - _binary_creator_('__mul__', 'elementwise_mul', False, _scalar_mul_)), + ( + '__radd__', + _binary_creator_( + '__radd__', 'elementwise_add', False, _scalar_add_ + ), + ), + ( + '__sub__', + _binary_creator_('__sub__', 'elementwise_sub', False, _scalar_sub_), + ), + ( + '__rsub__', + _binary_creator_( + '__rsub__', 'elementwise_sub', True, _scalar_rsub_ + ), + ), + ( + '__mul__', + _binary_creator_('__mul__', 'elementwise_mul', False, _scalar_mul_), + ), ## a*b == b*a. Do not need to reverse explicitly - ('__rmul__', - _binary_creator_('__rmul__', 'elementwise_mul', False, _scalar_mul_)), - ('__div__', - _binary_creator_('__div__', 'elementwise_div', False, _scalar_div_)), - ('__truediv__', - _binary_creator_('__truediv__', 'elementwise_div', False, - _scalar_div_)), - ('__rdiv__', _binary_creator_('__rdiv__', 'elementwise_div', True, - None)), - ('__rtruediv__', - _binary_creator_('rtruediv__', 'elementwise_div', True, None)), - ('__pow__', _binary_creator_('__pow__', 'elementwise_pow', False, - None)), - ('__rpow__', _binary_creator_('__rpow__', 'elementwise_pow', True, - None)), - ('__floordiv__', - _binary_creator_('__floordiv__', 'elementwise_floordiv', False, None)), - ('__mod__', _binary_creator_('__mod__', 'elementwise_mod', False, - None)), - ('__matmul__', _binary_creator_('__matmul__', "matmul_v2", False, - None)), + ( + '__rmul__', + _binary_creator_( + '__rmul__', 'elementwise_mul', False, _scalar_mul_ + ), + ), + ( + '__div__', + _binary_creator_('__div__', 'elementwise_div', False, _scalar_div_), + ), + ( + '__truediv__', + _binary_creator_( + '__truediv__', 'elementwise_div', False, _scalar_div_ + ), + ), + ( + '__rdiv__', + _binary_creator_('__rdiv__', 'elementwise_div', True, None), + ), + ( + '__rtruediv__', + _binary_creator_('rtruediv__', 'elementwise_div', True, None), + ), + ( + '__pow__', + _binary_creator_('__pow__', 'elementwise_pow', False, None), + ), + ( + '__rpow__', + _binary_creator_('__rpow__', 'elementwise_pow', True, None), + ), + ( + '__floordiv__', + _binary_creator_( + '__floordiv__', 'elementwise_floordiv', False, None + ), + ), + ( + '__mod__', + _binary_creator_('__mod__', 'elementwise_mod', False, None), + ), + ( + '__matmul__', + _binary_creator_('__matmul__', "matmul_v2", False, None), + ), ## for logical compare ('__eq__', _binary_creator_('__eq__', 'equal', False, None)), ('__ne__', _binary_creator_('__ne__', 'not_equal', False, None)), @@ -372,7 +447,7 @@ def monkey_patch_math_varbase(): ('__le__', _binary_creator_('__le__', 'less_equal', False, None)), ('__gt__', _binary_creator_('__gt__', 'greater_than', False, None)), ('__ge__', _binary_creator_('__ge__', 'greater_equal', False, None)), - ('__array_ufunc__', None) + ('__array_ufunc__', None), ] eager_methods = [ @@ -390,7 +465,7 @@ def monkey_patch_math_varbase(): ('T', _T_), # for logical compare ('__eq__', _binary_creator_('__eq__', 'equal', False, None, True)), - ('__array_ufunc__', None) + ('__array_ufunc__', None), ] eager_cpp_level_patch = [ @@ -447,12 +522,16 @@ def monkey_patch_math_varbase(): setattr(local_tensor, method_name, method_impl) else: import paddle.tensor + # Tensor method from module paddle.tensor for method_name in paddle.tensor.tensor_method_func: - if hasattr(local_tensor, method_name): continue + if hasattr(local_tensor, method_name): + continue method_impl = getattr(paddle.tensor, method_name, None) - if method_impl: setattr(local_tensor, method_name, method_impl) + if method_impl: + setattr(local_tensor, method_name, method_impl) for magic_method, origin_method in paddle.tensor.magic_method_func: impl = getattr(paddle.tensor, origin_method, None) - if impl: setattr(local_tensor, magic_method, impl) + if impl: + setattr(local_tensor, magic_method, impl) diff --git a/python/paddle/fluid/dygraph/nn.py b/python/paddle/fluid/dygraph/nn.py index 27ac082da79f5e307113c5a9acf245efe3f9c299..60202c2a6b105bb60b6cbb80c8c2668c670b2aa9 100644 --- a/python/paddle/fluid/dygraph/nn.py +++ b/python/paddle/fluid/dygraph/nn.py @@ -18,8 +18,24 @@ from ..layers import utils from ..layers import nn as F from .. import dygraph_utils from . import layers -from ..framework import Variable, _non_static_mode, OpProtoHolder, Parameter, _dygraph_tracer, _varbase_creator, default_main_program, _global_flags, in_dygraph_mode, _in_legacy_dygraph -from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype +from ..framework import ( + Variable, + _non_static_mode, + OpProtoHolder, + Parameter, + _dygraph_tracer, + _varbase_creator, + default_main_program, + _global_flags, + in_dygraph_mode, + _in_legacy_dygraph, +) +from ..data_feeder import ( + convert_dtype, + check_variable_and_dtype, + check_type, + check_dtype, +) from ..param_attr import ParamAttr from ..initializer import Normal, Constant, NumpyArrayInitializer from .. import unique_name @@ -33,10 +49,25 @@ import paddle.utils.deprecated as deprecated from paddle import _C_ops, _legacy_C_ops __all__ = [ - 'Conv2D', 'Conv3D', 'Pool2D', 'Linear', 'BatchNorm', 'Dropout', 'Embedding', - 'GRUUnit', 'InstanceNorm', 'LayerNorm', 'NCE', 'PRelu', - 'BilinearTensorProduct', 'Conv2DTranspose', 'Conv3DTranspose', 'GroupNorm', - 'SpectralNorm', 'TreeConv', 'Flatten' + 'Conv2D', + 'Conv3D', + 'Pool2D', + 'Linear', + 'BatchNorm', + 'Dropout', + 'Embedding', + 'GRUUnit', + 'InstanceNorm', + 'LayerNorm', + 'NCE', + 'PRelu', + 'BilinearTensorProduct', + 'Conv2DTranspose', + 'Conv3DTranspose', + 'GroupNorm', + 'SpectralNorm', + 'TreeConv', + 'Flatten', ] @@ -157,24 +188,30 @@ class Conv2D(layers.Layer): """ - def __init__(self, - num_channels, - num_filters, - filter_size, - stride=1, - padding=0, - dilation=1, - groups=None, - param_attr=None, - bias_attr=None, - use_cudnn=True, - act=None, - dtype='float32'): + def __init__( + self, + num_channels, + num_filters, + filter_size, + stride=1, + padding=0, + dilation=1, + groups=None, + param_attr=None, + bias_attr=None, + use_cudnn=True, + act=None, + dtype='float32', + ): assert param_attr is not False, "param_attr should not be False here." super(Conv2D, self).__init__() - if (core.is_compiled_with_cuda() and paddle.fluid.get_flags( - "FLAGS_conv2d_disable_cudnn")["FLAGS_conv2d_disable_cudnn"]): + if ( + core.is_compiled_with_cuda() + and paddle.fluid.get_flags("FLAGS_conv2d_disable_cudnn")[ + "FLAGS_conv2d_disable_cudnn" + ] + ): use_cudnn = False self._num_channels = num_channels @@ -193,17 +230,22 @@ class Conv2D(layers.Layer): self._bias_attr = bias_attr self._dtype = dtype - if (self._num_channels == self._groups - and num_filters % self._num_channels == 0 - and not self._use_cudnn and not self._use_mkldnn): + if ( + self._num_channels == self._groups + and num_filters % self._num_channels == 0 + and not self._use_cudnn + and not self._use_mkldnn + ): self._l_type = 'depthwise_conv2d' else: self._l_type = 'conv2d' # NPU only supports depthwise_conv2d when "input_channel = output_channel = groups" if core.is_compiled_with_npu(): - if (self._num_channels == self._groups - and self._num_channels == self._num_filters): + if ( + self._num_channels == self._groups + and self._num_channels == self._num_filters + ): self._l_type = 'depthwise_conv2d' else: self._l_type = 'conv2d' @@ -219,48 +261,75 @@ class Conv2D(layers.Layer): filter_shape = [self._num_filters, num_filter_channels] + filter_size def _get_default_param_initializer(): - filter_elem_num = filter_size[0] * filter_size[ - 1] * self._num_channels - std = (2.0 / filter_elem_num)**0.5 + filter_elem_num = ( + filter_size[0] * filter_size[1] * self._num_channels + ) + std = (2.0 / filter_elem_num) ** 0.5 return Normal(0.0, std, 0) self.weight = self.create_parameter( attr=self._param_attr, shape=filter_shape, dtype=self._dtype, - default_initializer=_get_default_param_initializer()) + default_initializer=_get_default_param_initializer(), + ) - self.bias = self.create_parameter(attr=self._bias_attr, - shape=[self._num_filters], - dtype=self._dtype, - is_bias=True) + self.bias = self.create_parameter( + attr=self._bias_attr, + shape=[self._num_filters], + dtype=self._dtype, + is_bias=True, + ) def forward(self, input): if in_dygraph_mode() and self._l_type == "conv2d": - pre_bias = _C_ops.conv2d(input, self.weight, self._stride, - self._padding, "EXPLICIT", - self._groups if self._groups else 1, - self._dilation, "NCHW", False, -1, False) + pre_bias = _C_ops.conv2d( + input, + self.weight, + self._stride, + self._padding, + "EXPLICIT", + self._groups if self._groups else 1, + self._dilation, + "NCHW", + False, + -1, + False, + ) if self.bias is not None: pre_act = F.elementwise_add(pre_bias, self.bias, axis=1) else: pre_act = pre_bias return dygraph_utils._append_activation_in_dygraph( - pre_act, self._act, use_mkldnn=self._use_mkldnn) - - if _non_static_mode() and (self._l_type == 'conv2d' - or self._l_type == 'depthwise_conv2d'): - attrs = ('strides', self._stride, 'paddings', self._padding, - 'dilations', self._dilation, 'groups', - self._groups if self._groups else 1, 'use_cudnn', - self._use_cudnn, 'use_mkldnn', self._use_mkldnn) + pre_act, self._act, use_mkldnn=self._use_mkldnn + ) + + if _non_static_mode() and ( + self._l_type == 'conv2d' or self._l_type == 'depthwise_conv2d' + ): + attrs = ( + 'strides', + self._stride, + 'paddings', + self._padding, + 'dilations', + self._dilation, + 'groups', + self._groups if self._groups else 1, + 'use_cudnn', + self._use_cudnn, + 'use_mkldnn', + self._use_mkldnn, + ) out = _legacy_C_ops.conv2d(input, self.weight, *attrs) pre_bias = out pre_act = dygraph_utils._append_bias_in_dygraph( - pre_bias, self.bias, 1, use_mkldnn=self._use_mkldnn) + pre_bias, self.bias, 1, use_mkldnn=self._use_mkldnn + ) return dygraph_utils._append_activation_in_dygraph( - pre_act, self._act, use_mkldnn=self._use_mkldnn) + pre_act, self._act, use_mkldnn=self._use_mkldnn + ) inputs = { 'Input': [input], 'Filter': [self.weight], @@ -274,32 +343,33 @@ class Conv2D(layers.Layer): 'use_mkldnn': self._use_mkldnn, } - check_variable_and_dtype(input, 'input', - ['float16', 'float32', 'float64'], 'Conv2D') + check_variable_and_dtype( + input, 'input', ['float16', 'float32', 'float64'], 'Conv2D' + ) pre_bias = self._helper.create_variable_for_type_inference( - dtype=self._dtype) + dtype=self._dtype + ) - self._helper.append_op(type=self._l_type, - inputs={ - 'Input': input, - 'Filter': self.weight, - }, - outputs={"Output": pre_bias}, - attrs=attrs) + self._helper.append_op( + type=self._l_type, + inputs={ + 'Input': input, + 'Filter': self.weight, + }, + outputs={"Output": pre_bias}, + attrs=attrs, + ) if self.bias is not None: pre_act = self._helper.create_variable_for_type_inference( - dtype=self._dtype) - self._helper.append_op(type='elementwise_add', - inputs={ - 'X': [pre_bias], - 'Y': [self.bias] - }, - outputs={'Out': [pre_act]}, - attrs={ - 'axis': 1, - 'use_mkldnn': self._use_mkldnn - }) + dtype=self._dtype + ) + self._helper.append_op( + type='elementwise_add', + inputs={'X': [pre_bias], 'Y': [self.bias]}, + outputs={'Out': [pre_act]}, + attrs={'axis': 1, 'use_mkldnn': self._use_mkldnn}, + ) else: pre_act = pre_bias @@ -418,19 +488,21 @@ class Conv3D(layers.Layer): """ - def __init__(self, - num_channels, - num_filters, - filter_size, - stride=1, - padding=0, - dilation=1, - groups=None, - param_attr=None, - bias_attr=None, - use_cudnn=True, - act=None, - dtype='float32'): + def __init__( + self, + num_channels, + num_filters, + filter_size, + stride=1, + padding=0, + dilation=1, + groups=None, + param_attr=None, + bias_attr=None, + use_cudnn=True, + act=None, + dtype='float32', + ): assert param_attr is not False, "param_attr should not be False here." super(Conv3D, self).__init__() self._num_channels = num_channels @@ -457,52 +529,61 @@ class Conv3D(layers.Layer): filter_shape = [self._num_filters, num_filter_channels] + filter_size def _get_default_param_initializer(): - filter_elem_num = filter_size[0] * filter_size[1] * filter_size[ - 2] * self._num_channels - std = (2.0 / filter_elem_num)**0.5 + filter_elem_num = ( + filter_size[0] + * filter_size[1] + * filter_size[2] + * self._num_channels + ) + std = (2.0 / filter_elem_num) ** 0.5 return Normal(0.0, std, 0) self.weight = self.create_parameter( attr=self._param_attr, shape=filter_shape, dtype=self._dtype, - default_initializer=_get_default_param_initializer()) + default_initializer=_get_default_param_initializer(), + ) - self.bias = self.create_parameter(attr=self._bias_attr, - shape=[self._num_filters], - dtype=self._dtype, - is_bias=True) + self.bias = self.create_parameter( + attr=self._bias_attr, + shape=[self._num_filters], + dtype=self._dtype, + is_bias=True, + ) def forward(self, input): pre_bias = self._helper.create_variable_for_type_inference( - dtype=self._dtype) - - self._helper.append_op(type='conv3d', - inputs={ - 'Input': input, - 'Filter': self.weight, - }, - outputs={"Output": pre_bias}, - attrs={ - 'strides': self._stride, - 'paddings': self._padding, - 'dilations': self._dilation, - 'groups': - self._groups if self._groups else 1, - 'use_cudnn': self._use_cudnn, - 'use_mkldnn': False - }) + dtype=self._dtype + ) + + self._helper.append_op( + type='conv3d', + inputs={ + 'Input': input, + 'Filter': self.weight, + }, + outputs={"Output": pre_bias}, + attrs={ + 'strides': self._stride, + 'paddings': self._padding, + 'dilations': self._dilation, + 'groups': self._groups if self._groups else 1, + 'use_cudnn': self._use_cudnn, + 'use_mkldnn': False, + }, + ) if self.bias is not None: pre_act = self._helper.create_variable_for_type_inference( - dtype=self._dtype) - self._helper.append_op(type='elementwise_add', - inputs={ - 'X': [pre_bias], - 'Y': [self.bias] - }, - outputs={'Out': [pre_act]}, - attrs={'axis': 1}) + dtype=self._dtype + ) + self._helper.append_op( + type='elementwise_add', + inputs={'X': [pre_bias], 'Y': [self.bias]}, + outputs={'Out': [pre_act]}, + attrs={'axis': 1}, + ) else: pre_act = pre_bias @@ -651,23 +732,27 @@ class Conv3DTranspose(layers.Layer): """ - def __init__(self, - num_channels, - num_filters, - filter_size, - padding=0, - stride=1, - dilation=1, - groups=None, - param_attr=None, - bias_attr=None, - use_cudnn=True, - act=None, - dtype='float32'): + def __init__( + self, + num_channels, + num_filters, + filter_size, + padding=0, + stride=1, + dilation=1, + groups=None, + param_attr=None, + bias_attr=None, + use_cudnn=True, + act=None, + dtype='float32', + ): super(Conv3DTranspose, self).__init__() if not isinstance(use_cudnn, bool): raise ValueError("use_cudnn should be True or False") - assert param_attr is not False, "param_attr should not be False in conv3d_transpose." + assert ( + param_attr is not False + ), "param_attr should not be False in conv3d_transpose." self._padding = utils.convert_to_list(padding, 3, 'padding') self._stride = utils.convert_to_list(stride, 3, 'stride') self._dilation = utils.convert_to_list(dilation, 3, 'dilation') @@ -682,46 +767,50 @@ class Conv3DTranspose(layers.Layer): self._dtype = dtype self._filter_size = utils.convert_to_list( - self._filter_size, 3, 'conv3d_transpose.filter_size') - - filter_shape = [self._num_channels, self._num_filters // self._groups - ] + self._filter_size - self.weight = self.create_parameter(dtype=self._dtype, - shape=filter_shape, - attr=self._param_attr) - self.bias = self.create_parameter(attr=self._bias_attr, - shape=[self._num_filters], - dtype=self._dtype, - is_bias=True) + self._filter_size, 3, 'conv3d_transpose.filter_size' + ) + + filter_shape = [ + self._num_channels, + self._num_filters // self._groups, + ] + self._filter_size + self.weight = self.create_parameter( + dtype=self._dtype, shape=filter_shape, attr=self._param_attr + ) + self.bias = self.create_parameter( + attr=self._bias_attr, + shape=[self._num_filters], + dtype=self._dtype, + is_bias=True, + ) def forward(self, input): pre_bias = self._helper.create_variable_for_type_inference( - dtype=self._dtype) - self._helper.append_op(type="conv3d_transpose", - inputs={ - 'Input': [input], - 'Filter': [self.weight] - }, - outputs={'Output': pre_bias}, - attrs={ - 'strides': self._stride, - 'paddings': self._padding, - 'dilations': self._dilation, - 'groups': - self._groups if self._groups else 1, - 'use_cudnn': self._use_cudnn - }) + dtype=self._dtype + ) + self._helper.append_op( + type="conv3d_transpose", + inputs={'Input': [input], 'Filter': [self.weight]}, + outputs={'Output': pre_bias}, + attrs={ + 'strides': self._stride, + 'paddings': self._padding, + 'dilations': self._dilation, + 'groups': self._groups if self._groups else 1, + 'use_cudnn': self._use_cudnn, + }, + ) if self._bias_attr: pre_act = self._helper.create_variable_for_type_inference( - dtype=self._dtype) - self._helper.append_op(type='elementwise_add', - inputs={ - 'X': [pre_bias], - 'Y': [self.bias] - }, - outputs={'Out': [pre_act]}, - attrs={'axis': 1}) + dtype=self._dtype + ) + self._helper.append_op( + type='elementwise_add', + inputs={'X': [pre_bias], 'Y': [self.bias]}, + outputs={'Out': [pre_act]}, + attrs={'axis': 1}, + ) else: pre_act = pre_bias @@ -835,27 +924,31 @@ class Pool2D(layers.Layer): """ - def __init__(self, - pool_size=-1, - pool_type="max", - pool_stride=1, - pool_padding=0, - global_pooling=False, - use_cudnn=True, - ceil_mode=False, - exclusive=True, - data_format="NCHW"): + def __init__( + self, + pool_size=-1, + pool_type="max", + pool_stride=1, + pool_padding=0, + global_pooling=False, + use_cudnn=True, + ceil_mode=False, + exclusive=True, + data_format="NCHW", + ): data_format = data_format.upper() # supprt NHWC, nhwc, etc. pool_type = pool_type.lower() # supprt max, Max, etc. if pool_type not in ["max", "avg"]: raise ValueError( "Unknown pool_type: '%s'. It can only be 'max' or 'avg'.", - str(pool_type)) + str(pool_type), + ) if global_pooling is False and pool_size == -1: raise ValueError( "When the global_pooling is False, pool_size must be passed " - "and be a valid value. Received pool_size: " + str(pool_size)) + "and be a valid value. Received pool_size: " + str(pool_size) + ) if not isinstance(use_cudnn, bool): raise ValueError("use_cudnn should be True or False") @@ -865,14 +958,16 @@ class Pool2D(layers.Layer): if data_format not in ["NCHW", "NHWC"]: raise ValueError( "Attr(data_format) should be 'NCHW' or 'NHWC'. Received " - "Attr(data_format): %s." % str(data_format)) + "Attr(data_format): %s." % str(data_format) + ) super(Pool2D, self).__init__() self._pool_type = pool_type self._pool_size = utils.convert_to_list(pool_size, 2, 'pool_size') - self._pool_padding = utils.convert_to_list(pool_padding, 2, - 'pool_padding') + self._pool_padding = utils.convert_to_list( + pool_padding, 2, 'pool_padding' + ) self._pool_stride = utils.convert_to_list(pool_stride, 2, 'pool_stride') self._global_pooling = global_pooling self._use_cudnn = use_cudnn @@ -884,23 +979,51 @@ class Pool2D(layers.Layer): def forward(self, input): if _non_static_mode(): if not self._use_mkldnn and in_dygraph_mode(): - return _C_ops.pool2d(input, self._pool_size, self._pool_stride, - self._pool_padding, self._ceil_mode, - self._exclusive, self._data_format, - self._pool_type, self._global_pooling, - False, "EXPLICIT", self._use_cudnn) - - attrs = ('pooling_type', self._pool_type, 'ksize', self._pool_size, - 'global_pooling', self._global_pooling, 'strides', - self._pool_stride, 'paddings', self._pool_padding, - 'use_cudnn', self._use_cudnn, 'ceil_mode', self._ceil_mode, - 'use_mkldnn', self._use_mkldnn, 'exclusive', - self._exclusive, 'data_format', self._data_format) + return _C_ops.pool2d( + input, + self._pool_size, + self._pool_stride, + self._pool_padding, + self._ceil_mode, + self._exclusive, + self._data_format, + self._pool_type, + self._global_pooling, + False, + "EXPLICIT", + self._use_cudnn, + ) + + attrs = ( + 'pooling_type', + self._pool_type, + 'ksize', + self._pool_size, + 'global_pooling', + self._global_pooling, + 'strides', + self._pool_stride, + 'paddings', + self._pool_padding, + 'use_cudnn', + self._use_cudnn, + 'ceil_mode', + self._ceil_mode, + 'use_mkldnn', + self._use_mkldnn, + 'exclusive', + self._exclusive, + 'data_format', + self._data_format, + ) return _legacy_C_ops.pool2d(input, *attrs) check_variable_and_dtype( - input, 'input', ['int8', 'uint8', 'float16', 'float32', 'float64'], - 'Pool2D') + input, + 'input', + ['int8', 'uint8', 'float16', 'float32', 'float64'], + 'Pool2D', + ) attrs = { "pooling_type": self._pool_type, @@ -918,10 +1041,12 @@ class Pool2D(layers.Layer): pool_out = self._helper.create_variable_for_type_inference(self._dtype) - self._helper.append_op(type=self._l_type, - inputs={"X": input}, - outputs={"Out": pool_out}, - attrs=attrs) + self._helper.append_op( + type=self._l_type, + inputs={"X": input}, + outputs={"Out": pool_out}, + attrs=attrs, + ) return pool_out @@ -977,44 +1102,60 @@ class Linear(layers.Layer): res = linear(data) # [30, 10, 64] """ - def __init__(self, - input_dim, - output_dim, - param_attr=None, - bias_attr=None, - act=None, - dtype="float32"): + def __init__( + self, + input_dim, + output_dim, + param_attr=None, + bias_attr=None, + act=None, + dtype="float32", + ): super(Linear, self).__init__() self._act = act self._dtype = dtype - self.weight = self.create_parameter(shape=[input_dim, output_dim], - attr=param_attr, - dtype=dtype, - is_bias=False) - self.bias = self.create_parameter(shape=[output_dim], - attr=bias_attr, - dtype=dtype, - is_bias=True) + self.weight = self.create_parameter( + shape=[input_dim, output_dim], + attr=param_attr, + dtype=dtype, + is_bias=False, + ) + self.bias = self.create_parameter( + shape=[output_dim], attr=bias_attr, dtype=dtype, is_bias=True + ) self._use_mkldnn = _global_flags()["FLAGS_use_mkldnn"] def forward(self, input): if _non_static_mode(): pre_bias = _varbase_creator(dtype=input.dtype) - _legacy_C_ops.matmul(input, self.weight, pre_bias, 'transpose_X', - False, 'transpose_Y', False, "alpha", 1, - "use_mkldnn", self._use_mkldnn) + _legacy_C_ops.matmul( + input, + self.weight, + pre_bias, + 'transpose_X', + False, + 'transpose_Y', + False, + "alpha", + 1, + "use_mkldnn", + self._use_mkldnn, + ) pre_act = dygraph_utils._append_bias_in_dygraph( pre_bias, self.bias, axis=len(input.shape) - 1, - use_mkldnn=self._use_mkldnn) + use_mkldnn=self._use_mkldnn, + ) return dygraph_utils._append_activation_in_dygraph( - pre_act, self._act, use_mkldnn=self._use_mkldnn) + pre_act, self._act, use_mkldnn=self._use_mkldnn + ) - check_variable_and_dtype(input, 'input', - ['float16', 'float32', 'float64'], "Linear") + check_variable_and_dtype( + input, 'input', ['float16', 'float32', 'float64'], "Linear" + ) attrs = { "transpose_X": False, @@ -1025,23 +1166,22 @@ class Linear(layers.Layer): inputs = {"X": [input], "Y": [self.weight]} tmp = self._helper.create_variable_for_type_inference(self._dtype) - self._helper.append_op(type="matmul", - inputs=inputs, - outputs={"Out": tmp}, - attrs=attrs) + self._helper.append_op( + type="matmul", inputs=inputs, outputs={"Out": tmp}, attrs=attrs + ) if self.bias is not None: pre_activation = self._helper.create_variable_for_type_inference( - dtype=self._dtype) - self._helper.append_op(type='elementwise_add', - inputs={ - 'X': [tmp], - 'Y': [self.bias] - }, - outputs={'Out': [pre_activation]}, - attrs={ - 'axis': len(input.shape) - 1, - 'use_mkldnn': self._use_mkldnn - }) + dtype=self._dtype + ) + self._helper.append_op( + type='elementwise_add', + inputs={'X': [tmp], 'Y': [self.bias]}, + outputs={'Out': [pre_activation]}, + attrs={ + 'axis': len(input.shape) - 1, + 'use_mkldnn': self._use_mkldnn, + }, + ) else: pre_activation = tmp return self._helper.append_activation(pre_activation, act=self._act) @@ -1115,16 +1255,20 @@ class InstanceNorm(layers.Layer): """ - def __init__(self, - num_channels, - epsilon=1e-5, - param_attr=None, - bias_attr=None, - dtype='float32'): + def __init__( + self, + num_channels, + epsilon=1e-5, + param_attr=None, + bias_attr=None, + dtype='float32', + ): super(InstanceNorm, self).__init__() if param_attr == False or bias_attr == False: - assert bias_attr == param_attr, "param_attr and bias_attr must be set to Fasle at the same time in InstanceNorm" + assert ( + bias_attr == param_attr + ), "param_attr and bias_attr must be set to Fasle at the same time in InstanceNorm" self._epsilon = epsilon self._param_attr = param_attr self._bias_attr = bias_attr @@ -1136,29 +1280,34 @@ class InstanceNorm(layers.Layer): shape=[num_channels], dtype=self._dtype, default_initializer=Constant(1.0), - is_bias=False) - self.bias = self.create_parameter(attr=self._bias_attr, - shape=[num_channels], - dtype=self._dtype, - default_initializer=Constant(0.0), - is_bias=True) + is_bias=False, + ) + self.bias = self.create_parameter( + attr=self._bias_attr, + shape=[num_channels], + dtype=self._dtype, + default_initializer=Constant(0.0), + is_bias=True, + ) else: self.scale = None self.bias = None def forward(self, input): if in_dygraph_mode(): - out = _C_ops.instance_norm(input, self.scale, self.bias, - self._epsilon) + out = _C_ops.instance_norm( + input, self.scale, self.bias, self._epsilon + ) return out if _in_legacy_dygraph(): - out, _, _ = _legacy_C_ops.instance_norm(input, self.scale, - self.bias, 'epsilon', - self._epsilon) + out, _, _ = _legacy_C_ops.instance_norm( + input, self.scale, self.bias, 'epsilon', self._epsilon + ) return out - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - "InstanceNorm") + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], "InstanceNorm" + ) attrs = {"epsilon": self._epsilon} @@ -1168,22 +1317,24 @@ class InstanceNorm(layers.Layer): inputs = {"X": [input]} saved_mean = self._helper.create_variable_for_type_inference( - dtype=self._dtype, stop_gradient=True) + dtype=self._dtype, stop_gradient=True + ) saved_variance = self._helper.create_variable_for_type_inference( - dtype=self._dtype, stop_gradient=True) + dtype=self._dtype, stop_gradient=True + ) instance_norm_out = self._helper.create_variable_for_type_inference( - self._dtype) + self._dtype + ) outputs = { "Y": [instance_norm_out], "SavedMean": [saved_mean], - "SavedVariance": [saved_variance] + "SavedVariance": [saved_variance], } - self._helper.append_op(type="instance_norm", - inputs=inputs, - outputs=outputs, - attrs=attrs) + self._helper.append_op( + type="instance_norm", inputs=inputs, outputs=outputs, attrs=attrs + ) return instance_norm_out @@ -1285,29 +1436,33 @@ class BatchNorm(layers.Layer): hidden1 = batch_norm(x) """ - def __init__(self, - num_channels, - act=None, - is_test=False, - momentum=0.9, - epsilon=1e-05, - param_attr=None, - bias_attr=None, - dtype='float32', - data_layout='NCHW', - in_place=False, - moving_mean_name=None, - moving_variance_name=None, - do_model_average_for_mean_and_var=True, - use_global_stats=False, - trainable_statistics=False): + def __init__( + self, + num_channels, + act=None, + is_test=False, + momentum=0.9, + epsilon=1e-05, + param_attr=None, + bias_attr=None, + dtype='float32', + data_layout='NCHW', + in_place=False, + moving_mean_name=None, + moving_variance_name=None, + do_model_average_for_mean_and_var=True, + use_global_stats=False, + trainable_statistics=False, + ): super(BatchNorm, self).__init__() self._param_attr = param_attr self._bias_attr = bias_attr self._act = act self._use_mkldnn = _global_flags()["FLAGS_use_mkldnn"] - assert bias_attr is not False, "bias_attr should not be False in batch_norm." + assert ( + bias_attr is not False + ), "bias_attr should not be False in batch_norm." if dtype == "float16": self._dtype = "float32" @@ -1317,34 +1472,48 @@ class BatchNorm(layers.Layer): param_shape = [num_channels] # create parameter - self.weight = self.create_parameter(attr=self._param_attr, - shape=param_shape, - dtype=self._dtype, - default_initializer=Constant(1.0)) - self.weight.stop_gradient = use_global_stats and self._param_attr.learning_rate == 0. - - self.bias = self.create_parameter(attr=self._bias_attr, - shape=param_shape, - dtype=self._dtype, - is_bias=True) - self.bias.stop_gradient = use_global_stats and self._param_attr.learning_rate == 0. - - self._mean = self.create_parameter(attr=ParamAttr( - name=moving_mean_name, - initializer=Constant(0.0), - trainable=False, - do_model_average=do_model_average_for_mean_and_var), - shape=param_shape, - dtype=self._dtype) + self.weight = self.create_parameter( + attr=self._param_attr, + shape=param_shape, + dtype=self._dtype, + default_initializer=Constant(1.0), + ) + self.weight.stop_gradient = ( + use_global_stats and self._param_attr.learning_rate == 0.0 + ) + + self.bias = self.create_parameter( + attr=self._bias_attr, + shape=param_shape, + dtype=self._dtype, + is_bias=True, + ) + self.bias.stop_gradient = ( + use_global_stats and self._param_attr.learning_rate == 0.0 + ) + + self._mean = self.create_parameter( + attr=ParamAttr( + name=moving_mean_name, + initializer=Constant(0.0), + trainable=False, + do_model_average=do_model_average_for_mean_and_var, + ), + shape=param_shape, + dtype=self._dtype, + ) self._mean.stop_gradient = True - self._variance = self.create_parameter(attr=ParamAttr( - name=moving_variance_name, - initializer=Constant(1.0), - trainable=False, - do_model_average=do_model_average_for_mean_and_var), - shape=param_shape, - dtype=self._dtype) + self._variance = self.create_parameter( + attr=ParamAttr( + name=moving_variance_name, + initializer=Constant(1.0), + trainable=False, + do_model_average=do_model_average_for_mean_and_var, + ), + shape=param_shape, + dtype=self._dtype, + ) self._variance.stop_gradient = True self._in_place = in_place @@ -1366,29 +1535,61 @@ class BatchNorm(layers.Layer): if _non_static_mode(): if in_dygraph_mode(): batch_norm_out, t1, t2, t3, t4, _ = _C_ops.batch_norm( - input, self.weight, self.bias, self._mean, self._variance, - self._momentum, self._epsilon, self._data_layout, - not self.training, self._use_global_stats, - self._trainable_statistics, False) + input, + self.weight, + self.bias, + self._mean, + self._variance, + self._momentum, + self._epsilon, + self._data_layout, + not self.training, + self._use_global_stats, + self._trainable_statistics, + False, + ) return dygraph_utils._append_activation_in_dygraph( - batch_norm_out, act=self._act, use_mkldnn=self._use_mkldnn) + batch_norm_out, act=self._act, use_mkldnn=self._use_mkldnn + ) elif _in_legacy_dygraph(): - attrs = ("momentum", self._momentum, "epsilon", self._epsilon, - "is_test", not self.training, "data_layout", - self._data_layout, "use_mkldnn", self._use_mkldnn, - "fuse_with_relu", self._fuse_with_relu, - "use_global_stats", self._use_global_stats, - 'trainable_statistics', self._trainable_statistics) + attrs = ( + "momentum", + self._momentum, + "epsilon", + self._epsilon, + "is_test", + not self.training, + "data_layout", + self._data_layout, + "use_mkldnn", + self._use_mkldnn, + "fuse_with_relu", + self._fuse_with_relu, + "use_global_stats", + self._use_global_stats, + 'trainable_statistics', + self._trainable_statistics, + ) batch_norm_out, _, _, _, _, _ = _legacy_C_ops.batch_norm( - input, self.weight, self.bias, self._mean, self._variance, - None, mean_out, variance_out, *attrs) + input, + self.weight, + self.bias, + self._mean, + self._variance, + None, + mean_out, + variance_out, + *attrs + ) return dygraph_utils._append_activation_in_dygraph( - batch_norm_out, act=self._act, use_mkldnn=self._use_mkldnn) + batch_norm_out, act=self._act, use_mkldnn=self._use_mkldnn + ) - check_variable_and_dtype(input, 'input', - ['float16', 'float32', 'float64'], 'BatchNorm') + check_variable_and_dtype( + input, 'input', ['float16', 'float32', 'float64'], 'BatchNorm' + ) attrs = { "momentum": self._momentum, @@ -1406,33 +1607,38 @@ class BatchNorm(layers.Layer): "Scale": [self.weight], "Bias": [self.bias], "Mean": [self._mean], - "Variance": [self._variance] + "Variance": [self._variance], } saved_mean = self._helper.create_variable_for_type_inference( - dtype=self._dtype, stop_gradient=True) + dtype=self._dtype, stop_gradient=True + ) saved_variance = self._helper.create_variable_for_type_inference( - dtype=self._dtype, stop_gradient=True) + dtype=self._dtype, stop_gradient=True + ) reserve_space = self._helper.create_variable_for_type_inference( - dtype=self._helper.input_dtype(input), stop_gradient=True) + dtype=self._helper.input_dtype(input), stop_gradient=True + ) - batch_norm_out = input if self._in_place else self._helper.create_variable_for_type_inference( - self._dtype) + batch_norm_out = ( + input + if self._in_place + else self._helper.create_variable_for_type_inference(self._dtype) + ) outputs = { "Y": [batch_norm_out], "MeanOut": [mean_out], "VarianceOut": [variance_out], "SavedMean": [saved_mean], - "SavedVariance": [saved_variance] + "SavedVariance": [saved_variance], } if reserve_space is not None: outputs["ReserveSpace"] = [reserve_space] - self._helper.append_op(type="batch_norm", - inputs=inputs, - outputs=outputs, - attrs=attrs) + self._helper.append_op( + type="batch_norm", inputs=inputs, outputs=outputs, attrs=attrs + ) # Currently, we don't support inplace in dygraph mode return self._helper.append_activation(batch_norm_out, self._act) @@ -1440,78 +1646,82 @@ class BatchNorm(layers.Layer): class Dropout(layers.Layer): """ - This interface is used to construct a callable object of the ``Dropout`` class. - For more details, refer to code examples. + This interface is used to construct a callable object of the ``Dropout`` class. + For more details, refer to code examples. - Drop or keep each element of input independently. Dropout is a regularization - technique for reducing overfitting by preventing neuron co-adaption during - training. The dropout operator randomly sets (according to the given dropout - probability) the outputs of some units to zero, while others are remain - unchanged. + Drop or keep each element of input independently. Dropout is a regularization + technique for reducing overfitting by preventing neuron co-adaption during + training. The dropout operator randomly sets (according to the given dropout + probability) the outputs of some units to zero, while others are remain + unchanged. - Dropout layer can be removed for efficiency concern. + Dropout layer can be removed for efficiency concern. - Parameters: - p (float, optional): Probability of setting units to zero. Default: 0.5 - seed (int, optional): A Python integer used to create random seeds. If this - parameter is set to None, a random seed is used. - NOTE: If an integer seed is given, always the same output - units will be dropped. DO NOT use a fixed seed in training. Default: None. - dropout_implementation(string, optional): ['downgrade_in_infer'(default)|'upscale_in_train'] + Parameters: + p (float, optional): Probability of setting units to zero. Default: 0.5 + seed (int, optional): A Python integer used to create random seeds. If this + parameter is set to None, a random seed is used. + NOTE: If an integer seed is given, always the same output + units will be dropped. DO NOT use a fixed seed in training. Default: None. + dropout_implementation(string, optional): ['downgrade_in_infer'(default)|'upscale_in_train'] - 1. downgrade_in_infer(default), downgrade the outcome at inference + 1. downgrade_in_infer(default), downgrade the outcome at inference - - train: out = input * mask - - inference: out = input * (1.0 - p) + - train: out = input * mask + - inference: out = input * (1.0 - p) - (mask is a tensor same shape with input, value is 0 or 1 - ratio of 0 is dropout_prob) - 2. upscale_in_train, upscale the outcome at training time + (mask is a tensor same shape with input, value is 0 or 1 + ratio of 0 is dropout_prob) + 2. upscale_in_train, upscale the outcome at training time - - train: out = input * mask / ( 1.0 - p ) - - inference: out = input + - train: out = input * mask / ( 1.0 - p ) + - inference: out = input - (mask is a tensor same shape with input, value is 0 or 1 - ratio of 0 is p) - is_test (bool, optional): A flag indicating whether it is in test phrase or not. - This flag only has effect on static graph mode. For dygraph mode, please use ``eval()``. - Default: False. + (mask is a tensor same shape with input, value is 0 or 1 + ratio of 0 is p) + is_test (bool, optional): A flag indicating whether it is in test phrase or not. + This flag only has effect on static graph mode. For dygraph mode, please use ``eval()``. + Default: False. - Returns: - None + Returns: + None - Examples: + Examples: - .. code-block:: python + .. code-block:: python - import paddle.fluid as fluid - from paddle.fluid.dygraph.base import to_variable - import numpy as np - - x = np.random.random(size=(3, 10, 3, 7)).astype('float32') - with fluid.dygraph.guard(): - x = to_variable(x) - m = fluid.dygraph.Dropout(p=0.5) - droped_train = m(x) - # switch to eval mode - m.eval() - droped_eval = m(x) - """ - - def __init__(self, - p=0.5, - seed=None, - dropout_implementation="downgrade_in_infer", - is_test=False): + import paddle.fluid as fluid + from paddle.fluid.dygraph.base import to_variable + import numpy as np + + x = np.random.random(size=(3, 10, 3, 7)).astype('float32') + with fluid.dygraph.guard(): + x = to_variable(x) + m = fluid.dygraph.Dropout(p=0.5) + droped_train = m(x) + # switch to eval mode + m.eval() + droped_eval = m(x) + """ + + def __init__( + self, + p=0.5, + seed=None, + dropout_implementation="downgrade_in_infer", + is_test=False, + ): super(Dropout, self).__init__() assert isinstance(p, (float, int)), "p argument should be a number" assert 0 <= p <= 1, "p argument should between 0 and 1" self._dropout_prob = p assert seed is None or isinstance( - seed, int), "seed argument should be None or a integer" + seed, int + ), "seed argument should be None or a integer" self._seed = seed assert dropout_implementation in ( - 'downgrade_in_infer', 'upscale_in_train' + 'downgrade_in_infer', + 'upscale_in_train', ), "dropout_implementation argument should be 'downgrade_in_infer' or 'upscale_in_train'" self._dropout_implementation = dropout_implementation self._is_test = is_test @@ -1525,8 +1735,9 @@ class Dropout(layers.Layer): self._seed = prog.random_seed attrs = { 'dropout_prob': self._dropout_prob, - 'is_test': - not self.training if _non_static_mode() else self._is_test, + 'is_test': not self.training + if _non_static_mode() + else self._is_test, 'fix_seed': self._seed is not None, 'seed': self._seed if self._seed is not None else 0, 'dropout_implementation': self._dropout_implementation, @@ -1539,23 +1750,23 @@ class Dropout(layers.Layer): out = self._helper.create_variable_for_type_inference(dtype=input.dtype) mask = self._helper.create_variable_for_type_inference( - dtype=core.VarDesc.VarType.UINT8, stop_gradient=True) - - self._helper.append_op(type='dropout', - inputs={'X': [input]}, - outputs={ - 'Out': [out], - 'Mask': [mask] - }, - attrs=attrs) + dtype=core.VarDesc.VarType.UINT8, stop_gradient=True + ) + + self._helper.append_op( + type='dropout', + inputs={'X': [input]}, + outputs={'Out': [out], 'Mask': [mask]}, + attrs=attrs, + ) return out class Embedding(layers.Layer): r""" :alias_main: paddle.nn.Embedding - :alias: paddle.nn.Embedding,paddle.nn.layer.Embedding,paddle.nn.layer.common.Embedding - :old_api: paddle.fluid.dygraph.Embedding + :alias: paddle.nn.Embedding,paddle.nn.layer.Embedding,paddle.nn.layer.common.Embedding + :old_api: paddle.fluid.dygraph.Embedding **Embedding Layer** @@ -1659,19 +1870,26 @@ class Embedding(layers.Layer): static_rlt3 = emb(base.to_variable(inp_word)) """ - def __init__(self, - size, - is_sparse=False, - is_distributed=False, - padding_idx=None, - param_attr=None, - dtype='float32'): + def __init__( + self, + size, + is_sparse=False, + is_distributed=False, + padding_idx=None, + param_attr=None, + dtype='float32', + ): super(Embedding, self).__init__() self._size = size self._is_sparse = is_sparse self._is_distributed = is_distributed - self._padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else ( - size[0] + padding_idx) + self._padding_idx = ( + -1 + if padding_idx is None + else padding_idx + if padding_idx >= 0 + else (size[0] + padding_idx) + ) self._param_attr = param_attr self._dtype = dtype @@ -1679,36 +1897,48 @@ class Embedding(layers.Layer): if self._remote_prefetch: assert self._is_sparse is True and self._is_distributed is False - self.weight = self.create_parameter(attr=self._param_attr, - shape=self._size, - dtype=self._dtype, - is_bias=False) + self.weight = self.create_parameter( + attr=self._param_attr, + shape=self._size, + dtype=self._dtype, + is_bias=False, + ) def forward(self, input): if _non_static_mode(): return _legacy_C_ops.lookup_table_v2( - self.weight, input, 'is_sparse', self._is_sparse, - 'is_distributed', self._is_distributed, 'remote_prefetch', - self._remote_prefetch, 'padding_idx', self._padding_idx) + self.weight, + input, + 'is_sparse', + self._is_sparse, + 'is_distributed', + self._is_distributed, + 'remote_prefetch', + self._remote_prefetch, + 'padding_idx', + self._padding_idx, + ) - check_variable_and_dtype(input, 'input', - ['uint8', 'int8', 'int16', 'int32', 'int64'], - 'Embedding') + check_variable_and_dtype( + input, + 'input', + ['uint8', 'int8', 'int16', 'int32', 'int64'], + 'Embedding', + ) attrs = { 'is_sparse': self._is_sparse, 'is_distributed': self._is_distributed, 'remote_prefetch': self._remote_prefetch, - 'padding_idx': self._padding_idx + 'padding_idx': self._padding_idx, } out = self._helper.create_variable_for_type_inference(self._dtype) - self._helper.append_op(type='lookup_table_v2', - inputs={ - 'Ids': input, - 'W': self.weight - }, - outputs={'Out': out}, - attrs=attrs) + self._helper.append_op( + type='lookup_table_v2', + inputs={'Ids': input, 'W': self.weight}, + outputs={'Out': out}, + attrs=attrs, + ) return out @@ -1716,8 +1946,8 @@ class Embedding(layers.Layer): class LayerNorm(layers.Layer): r""" :alias_main: paddle.nn.LayerNorm - :alias: paddle.nn.LayerNorm,paddle.nn.layer.LayerNorm,paddle.nn.layer.norm.LayerNorm - :old_api: paddle.fluid.dygraph.LayerNorm + :alias: paddle.nn.LayerNorm,paddle.nn.layer.LayerNorm,paddle.nn.layer.norm.LayerNorm + :old_api: paddle.fluid.dygraph.LayerNorm This interface is used to construct a callable object of the ``LayerNorm`` class. For more details, refer to code examples. @@ -1784,15 +2014,17 @@ class LayerNorm(layers.Layer): """ - def __init__(self, - normalized_shape, - scale=True, - shift=True, - epsilon=1e-05, - param_attr=None, - bias_attr=None, - act=None, - dtype='float32'): + def __init__( + self, + normalized_shape, + scale=True, + shift=True, + epsilon=1e-05, + param_attr=None, + bias_attr=None, + act=None, + dtype='float32', + ): super(LayerNorm, self).__init__() if isinstance(normalized_shape, numbers.Integral): normalized_shape = [normalized_shape] @@ -1811,7 +2043,8 @@ class LayerNorm(layers.Layer): attr=self._param_attr, shape=param_shape, dtype=self._dtype, - default_initializer=Constant(1.0)) + default_initializer=Constant(1.0), + ) else: if self._param_attr: logging.warn("param_attr are only available with scale is True") @@ -1819,10 +2052,12 @@ class LayerNorm(layers.Layer): if self._shift: assert self._bias_attr is not False - self.bias = self.create_parameter(attr=self._bias_attr, - shape=param_shape, - dtype=self._dtype, - is_bias=True) + self.bias = self.create_parameter( + attr=self._bias_attr, + shape=param_shape, + dtype=self._dtype, + is_bias=True, + ) else: if self._bias_attr: logging.warn("bias_attr are only available with shift is True") @@ -1833,31 +2068,50 @@ class LayerNorm(layers.Layer): input_ndim = len(input_shape) normalized_ndim = len(self._normalized_shape) self._begin_norm_axis = input_ndim - normalized_ndim - if input_ndim < normalized_ndim or input_shape[ - self._begin_norm_axis:] != self._normalized_shape: + if ( + input_ndim < normalized_ndim + or input_shape[self._begin_norm_axis :] != self._normalized_shape + ): str_normalized_shape = str(self._normalized_shape) - raise ValueError('Given normalized_shape is ' + - str_normalized_shape + - ', expected input with shape [*, ' + - str_normalized_shape[1:] + - ', but got input shape ' + str(input_shape)) + raise ValueError( + 'Given normalized_shape is ' + + str_normalized_shape + + ', expected input with shape [*, ' + + str_normalized_shape[1:] + + ', but got input shape ' + + str(input_shape) + ) if _non_static_mode(): if in_dygraph_mode(): - pre_act, _, _, = _C_ops.layer_norm(input, self.weight, - self.bias, self._epsilon, - self._begin_norm_axis, False) + pre_act, _, _, = _C_ops.layer_norm( + input, + self.weight, + self.bias, + self._epsilon, + self._begin_norm_axis, + False, + ) return dygraph_utils._append_activation_in_dygraph( - pre_act, act=self._act) + pre_act, act=self._act + ) else: pre_act, _, _ = _legacy_C_ops.layer_norm( - input, self.weight, self.bias, 'epsilon', self._epsilon, - 'begin_norm_axis', self._begin_norm_axis) + input, + self.weight, + self.bias, + 'epsilon', + self._epsilon, + 'begin_norm_axis', + self._begin_norm_axis, + ) return dygraph_utils._append_activation_in_dygraph( - pre_act, act=self._act) + pre_act, act=self._act + ) - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'LayerNorm') + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'LayerNorm' + ) inputs = dict() inputs['X'] = [input] @@ -1867,28 +2121,33 @@ class LayerNorm(layers.Layer): inputs['Bias'] = [self.bias] attrs = { "epsilon": self._epsilon, - "begin_norm_axis": self._begin_norm_axis + "begin_norm_axis": self._begin_norm_axis, } # create output mean_out = self._helper.create_variable_for_type_inference( - dtype=self._dtype, stop_gradient=True) + dtype=self._dtype, stop_gradient=True + ) variance_out = self._helper.create_variable_for_type_inference( - dtype=self._dtype, stop_gradient=True) + dtype=self._dtype, stop_gradient=True + ) layer_norm_out = self._helper.create_variable_for_type_inference( - self._dtype) - - self._helper.append_op(type="layer_norm", - inputs=inputs, - outputs={ - "Y": layer_norm_out, - "Mean": mean_out, - "Variance": variance_out, - }, - attrs={ - "epsilon": self._epsilon, - "begin_norm_axis": self._begin_norm_axis - }) + self._dtype + ) + + self._helper.append_op( + type="layer_norm", + inputs=inputs, + outputs={ + "Y": layer_norm_out, + "Mean": mean_out, + "Variance": variance_out, + }, + attrs={ + "epsilon": self._epsilon, + "begin_norm_axis": self._begin_norm_axis, + }, + ) return self._helper.append_activation(layer_norm_out, act=self._act) @@ -2002,14 +2261,16 @@ class GRUUnit(layers.Layer): """ - def __init__(self, - size, - param_attr=None, - bias_attr=None, - activation='tanh', - gate_activation='sigmoid', - origin_mode=False, - dtype='float32'): + def __init__( + self, + size, + param_attr=None, + bias_attr=None, + activation='tanh', + gate_activation='sigmoid', + origin_mode=False, + dtype='float32', + ): super(GRUUnit, self).__init__() self._bias_attr = bias_attr activation_dict = dict( @@ -2024,52 +2285,64 @@ class GRUUnit(layers.Layer): self._dtype = dtype size = size // 3 # create weight - self.weight = self.create_parameter(attr=param_attr, - shape=[size, 3 * size], - dtype=dtype) + self.weight = self.create_parameter( + attr=param_attr, shape=[size, 3 * size], dtype=dtype + ) # create bias bias_size = [1, 3 * size] self._bias_size = bias_size - self.bias = self.create_parameter(attr=bias_attr, - shape=bias_size, - dtype=dtype, - is_bias=True) + self.bias = self.create_parameter( + attr=bias_attr, shape=bias_size, dtype=dtype, is_bias=True + ) def forward(self, input, hidden): if _non_static_mode(): gate, reset_hidden_pre, updated_hidden = _legacy_C_ops.gru_unit( - input, hidden, self.weight, self.bias, 'activation', - self.activation, 'gate_activation', self.gate_activation) + input, + hidden, + self.weight, + self.bias, + 'activation', + self.activation, + 'gate_activation', + self.gate_activation, + ) return updated_hidden, reset_hidden_pre, gate - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'GRUUnit') - check_variable_and_dtype(hidden, 'hidden', ['float32', 'float64'], - 'GRUUnit') + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'GRUUnit' + ) + check_variable_and_dtype( + hidden, 'hidden', ['float32', 'float64'], 'GRUUnit' + ) inputs = { 'Input': [input], 'HiddenPrev': [hidden], - 'Weight': [self.weight] + 'Weight': [self.weight], } if self.bias is not None: inputs['Bias'] = [self.bias] gate = self._helper.create_variable_for_type_inference(self._dtype) reset_hidden_pre = self._helper.create_variable_for_type_inference( - self._dtype) + self._dtype + ) updated_hidden = self._helper.create_variable_for_type_inference( - self._dtype) - self._helper.append_op(type='gru_unit', - inputs=inputs, - outputs={ - 'Gate': gate, - 'ResetHiddenPrev': reset_hidden_pre, - 'Hidden': updated_hidden, - }, - attrs={ - 'activation': self.activation, - 'gate_activation': self.gate_activation, - }) + self._dtype + ) + self._helper.append_op( + type='gru_unit', + inputs=inputs, + outputs={ + 'Gate': gate, + 'ResetHiddenPrev': reset_hidden_pre, + 'Hidden': updated_hidden, + }, + attrs={ + 'activation': self.activation, + 'gate_activation': self.gate_activation, + }, + ) return updated_hidden, reset_hidden_pre, gate @@ -2161,25 +2434,29 @@ class NCE(layers.Layer): """ - def __init__(self, - num_total_classes, - dim, - sample_weight=None, - param_attr=None, - bias_attr=None, - num_neg_samples=None, - sampler="uniform", - custom_dist=None, - seed=0, - is_sparse=False, - dtype='float32'): + def __init__( + self, + num_total_classes, + dim, + sample_weight=None, + param_attr=None, + bias_attr=None, + num_neg_samples=None, + sampler="uniform", + custom_dist=None, + seed=0, + is_sparse=False, + dtype='float32', + ): super(NCE, self).__init__() self._param_attr = param_attr self._bias_attr = bias_attr self._num_total_classes = num_total_classes self._dtype = dtype self._inputs = dict() - self._inputs['SampleWeight'] = sample_weight if sample_weight is not None else [] + self._inputs['SampleWeight'] = ( + sample_weight if sample_weight is not None else [] + ) if sampler == "uniform": sampler = 0 elif sampler == "log_uniform": @@ -2235,16 +2512,20 @@ class NCE(layers.Layer): attr=ParamAttr(), shape=numpy_array.shape, dtype=numpy_array.dtype, - default_initializer=NumpyArrayInitializer(numpy_array)) + default_initializer=NumpyArrayInitializer(numpy_array), + ) ret.stop_gradient = True return ret self._inputs['CustomDistProbs'] = _init_by_numpy_array( - np.array(custom_dist).astype('float32')) + np.array(custom_dist).astype('float32') + ) self._inputs['CustomDistAlias'] = _init_by_numpy_array( - np.array(alias_).astype('int32')) + np.array(alias_).astype('int32') + ) self._inputs['CustomDistAliasProbs'] = _init_by_numpy_array( - np.array(alias_probs_).astype('float32')) + np.array(alias_probs_).astype('float32') + ) sampler = 2 else: raise Exception("Unsupported sampler type.") @@ -2264,64 +2545,88 @@ class NCE(layers.Layer): 'seed': seed, 'sampler': sampler, 'is_sparse': is_sparse, - 'remote_prefetch': remote_prefetch + 'remote_prefetch': remote_prefetch, } self.weight = self.create_parameter( attr=self._param_attr, shape=[self._num_total_classes, dim], is_bias=False, - dtype=self._dtype) + dtype=self._dtype, + ) if self._bias_attr: self.bias = self.create_parameter( attr=self._bias_attr, shape=[self._num_total_classes, 1], is_bias=True, - dtype=self._dtype) + dtype=self._dtype, + ) self._inputs['Bias'] = self.bias self._inputs['Weight'] = self.weight def forward(self, input, label, sample_weight=None): if _non_static_mode(): - attrs = ('num_total_classes', self._attrs['num_total_classes'], - 'num_neg_samples', self._attrs['num_neg_samples'], 'seed', - self._attrs['seed'], 'sampler', self._attrs['sampler'], - 'is_sparse', self._attrs['is_sparse'], 'remote_prefetch', - self._attrs['remote_prefetch']) - cost, _, _ = _legacy_C_ops.nce(input, label, self.weight, self.bias, - self._inputs['SampleWeight'], - self._inputs['CustomDistProbs'], - self._inputs['CustomDistAlias'], - self._inputs['CustomDistAliasProbs'], - *attrs) + attrs = ( + 'num_total_classes', + self._attrs['num_total_classes'], + 'num_neg_samples', + self._attrs['num_neg_samples'], + 'seed', + self._attrs['seed'], + 'sampler', + self._attrs['sampler'], + 'is_sparse', + self._attrs['is_sparse'], + 'remote_prefetch', + self._attrs['remote_prefetch'], + ) + cost, _, _ = _legacy_C_ops.nce( + input, + label, + self.weight, + self.bias, + self._inputs['SampleWeight'], + self._inputs['CustomDistProbs'], + self._inputs['CustomDistAlias'], + self._inputs['CustomDistAliasProbs'], + *attrs + ) return cost / (self._num_neg_samples + 1) check_variable_and_dtype(input, "input", ['float32', 'float64'], "NCE") check_variable_and_dtype(label, "label", ['int64'], "NCE") - check_type(sample_weight, 'sample_weight', (Variable, type(None)), - 'NCE') + check_type( + sample_weight, 'sample_weight', (Variable, type(None)), 'NCE' + ) assert isinstance(input, Variable) assert isinstance(label, Variable) self._inputs['Input'] = input self._inputs['Label'] = label - self._inputs['SampleWeight'] = sample_weight if sample_weight is not None else [] + self._inputs['SampleWeight'] = ( + sample_weight if sample_weight is not None else [] + ) cost = self._helper.create_variable_for_type_inference( - dtype=input.dtype) + dtype=input.dtype + ) sample_logits = self._helper.create_variable_for_type_inference( - dtype=input.dtype) + dtype=input.dtype + ) sample_labels = self._helper.create_variable_for_type_inference( - dtype=label.dtype) - - self._helper.append_op(type='nce', - inputs=self._inputs, - outputs={ - 'Cost': cost, - 'SampleLogits': sample_logits, - 'SampleLabels': sample_labels - }, - attrs=self._attrs) + dtype=label.dtype + ) + + self._helper.append_op( + type='nce', + inputs=self._inputs, + outputs={ + 'Cost': cost, + 'SampleLogits': sample_logits, + 'SampleLabels': sample_labels, + }, + attrs=self._attrs, + ) return cost / (self._num_neg_samples + 1) @@ -2385,12 +2690,14 @@ class PRelu(layers.Layer): """ - def __init__(self, - mode, - channel=None, - input_shape=None, - param_attr=None, - dtype='float32'): + def __init__( + self, + mode, + channel=None, + input_shape=None, + param_attr=None, + dtype='float32', + ): # need specify name_scope since snake-cased 'PRelu' is 'p_relu' super(PRelu, self).__init__(name_scope='prelu') self._mode = mode @@ -2400,26 +2707,27 @@ class PRelu(layers.Layer): self._alpha_shape = [1] elif mode == 'channel': assert isinstance( - channel, - int), "channel argument is required when mode is 'channel'." - #NOTE(zhiqiu): The _alpha_shape should be [1, channel] + [1] * len(input_shape[2:]), not [1, channel, 1, 1]. + channel, int + ), "channel argument is required when mode is 'channel'." + # NOTE(zhiqiu): The _alpha_shape should be [1, channel] + [1] * len(input_shape[2:]), not [1, channel, 1, 1]. # However, the suffix 1 in the list is useless, since the tensor is viewed as one demension array during kernel calculation. # And, input_shape is not required when mode is 'channel', so it is simplified. - #NOTE(zhiqiu): Revert shape to [1, channel, 1, 1] for compatibility with saved model of old version. + # NOTE(zhiqiu): Revert shape to [1, channel, 1, 1] for compatibility with saved model of old version. self._alpha_shape = [1, channel, 1, 1] elif mode == 'element': assert isinstance( - input_shape, - (list, tuple - )), "input_shape argument is required when mode is 'element'." + input_shape, (list, tuple) + ), "input_shape argument is required when mode is 'element'." self._alpha_shape = [1] + list(input_shape)[1:] else: raise ValueError('mode should be one of all, channel, element.') - self.weight = self.create_parameter(attr=self._param_attr, - shape=self._alpha_shape, - dtype='float32', - is_bias=False, - default_initializer=Constant(1.0)) + self.weight = self.create_parameter( + attr=self._param_attr, + shape=self._alpha_shape, + dtype='float32', + is_bias=False, + default_initializer=Constant(1.0), + ) def forward(self, input): if in_dygraph_mode(): @@ -2427,13 +2735,12 @@ class PRelu(layers.Layer): check_variable_and_dtype(input, 'input', ['float32'], 'PRelu') out = self._helper.create_variable_for_type_inference(self._dtype) - self._helper.append_op(type="prelu", - inputs={ - "X": input, - 'Alpha': self.weight - }, - attrs={"mode": self._mode}, - outputs={"Out": out}) + self._helper.append_op( + type="prelu", + inputs={"X": input, 'Alpha': self.weight}, + attrs={"mode": self._mode}, + outputs={"Out": out}, + ) return out @@ -2492,15 +2799,17 @@ class BilinearTensorProduct(layers.Layer): """ - def __init__(self, - input1_dim, - input2_dim, - output_dim, - name=None, - act=None, - param_attr=None, - bias_attr=None, - dtype='float32'): + def __init__( + self, + input1_dim, + input2_dim, + output_dim, + name=None, + act=None, + param_attr=None, + bias_attr=None, + dtype='float32', + ): super(BilinearTensorProduct, self).__init__() self._param_attr = param_attr self._bias_attr = bias_attr @@ -2513,38 +2822,50 @@ class BilinearTensorProduct(layers.Layer): self._dtype = dtype param_shape = [self._output_dim, self._input1_dim, self._input2_dim] - self.weight = self.create_parameter(attr=self._param_attr, - shape=param_shape, - dtype=self._dtype, - is_bias=False) + self.weight = self.create_parameter( + attr=self._param_attr, + shape=param_shape, + dtype=self._dtype, + is_bias=False, + ) bias_size = [1, self._output_dim] - self.bias = self.create_parameter(attr=self._bias_attr, - shape=bias_size, - dtype=self._dtype, - is_bias=True) - - @deprecated(since="2.0.0", - update_to="paddle.nn.Bilinear", - reason="New name and new args in Bilinear, easier to use.") + self.bias = self.create_parameter( + attr=self._bias_attr, + shape=bias_size, + dtype=self._dtype, + is_bias=True, + ) + + @deprecated( + since="2.0.0", + update_to="paddle.nn.Bilinear", + reason="New name and new args in Bilinear, easier to use.", + ) def forward(self, x, y): - check_variable_and_dtype(x, 'x', ['float32', 'float64'], - 'BilinearTensorProduct') - check_variable_and_dtype(y, 'y', ['float32', 'float64'], - 'BilinearTensorProduct') + check_variable_and_dtype( + x, 'x', ['float32', 'float64'], 'BilinearTensorProduct' + ) + check_variable_and_dtype( + y, 'y', ['float32', 'float64'], 'BilinearTensorProduct' + ) self._inputs = {"X": x, "Y": y, "Weight": self.weight} if self.bias is not None: self._inputs["Bias"] = self.bias if self._name is not None: - out = self._helper.create_variable(name=".".join( - [self.full_name(), self._name]), - dtype=self._dtype, - persistable=False) + out = self._helper.create_variable( + name=".".join([self.full_name(), self._name]), + dtype=self._dtype, + persistable=False, + ) else: - out = self._helper.create_variable(dtype=self._dtype, - persistable=False) - self._helper.append_op(type="bilinear_tensor_product", - inputs=self._inputs, - outputs={"Out": out}) + out = self._helper.create_variable( + dtype=self._dtype, persistable=False + ) + self._helper.append_op( + type="bilinear_tensor_product", + inputs=self._inputs, + outputs={"Out": out}, + ) # add activation return self._helper.append_activation(out, act=self._act) @@ -2668,22 +2989,26 @@ class Conv2DTranspose(layers.Layer): """ - def __init__(self, - num_channels, - num_filters, - filter_size, - output_size=None, - padding=0, - stride=1, - dilation=1, - groups=None, - param_attr=None, - bias_attr=None, - use_cudnn=True, - act=None, - dtype='float32'): + def __init__( + self, + num_channels, + num_filters, + filter_size, + output_size=None, + padding=0, + stride=1, + dilation=1, + groups=None, + param_attr=None, + bias_attr=None, + use_cudnn=True, + act=None, + dtype='float32', + ): super(Conv2DTranspose, self).__init__() - assert param_attr is not False, "param_attr should not be False in conv2d_transpose." + assert ( + param_attr is not False + ), "param_attr should not be False in conv2d_transpose." self._param_attr = param_attr self._bias_attr = bias_attr self._act = act @@ -2698,9 +3023,11 @@ class Conv2DTranspose(layers.Layer): self._output_size = output_size self._dtype = dtype - if (self._num_channels == self._groups - and self._num_filters == self._num_channels - and not self._use_cudnn): + if ( + self._num_channels == self._groups + and self._num_filters == self._num_channels + and not self._use_cudnn + ): self._op_type = 'depthwise_conv2d_transpose' else: self._op_type = 'conv2d_transpose' @@ -2710,63 +3037,91 @@ class Conv2DTranspose(layers.Layer): self._dilation = utils.convert_to_list(self._dilation, 2, 'dilation') self._filter_size = utils.convert_to_list( - self._filter_size, 2, 'conv2d_transpose.filter_size') + self._filter_size, 2, 'conv2d_transpose.filter_size' + ) if self._output_size is None: self._output_size = [] elif isinstance(self._output_size, list): if utils._contain_var(self._output_size): self._output_size = utils._convert_to_tensor_list( - self._output_size) + self._output_size + ) else: self._output_size = utils.convert_to_list( - self._output_size, 2, 'output_size') + self._output_size, 2, 'output_size' + ) elif isinstance(self._output_size, int): - self._output_size = utils.convert_to_list(self._output_size, 2, - 'output_size') + self._output_size = utils.convert_to_list( + self._output_size, 2, 'output_size' + ) elif isinstance(self._output_size, Variable): - check_dtype(self._output_size.dtype, 'output_size', - ['int32', 'int64'], 'Conv2DTranspose') + check_dtype( + self._output_size.dtype, + 'output_size', + ['int32', 'int64'], + 'Conv2DTranspose', + ) if len(self._output_size.shape) == 1 and ( - self._output_size.shape[0] == 1 - or self._output_size.shape[0] == 2): + self._output_size.shape[0] == 1 + or self._output_size.shape[0] == 2 + ): if self._output_size.shape[0] == 1: self._output_size = [self._output_size, self._output_size] else: raise ValueError( - "output_size must contain one or two integers.") + "output_size must contain one or two integers." + ) else: raise ValueError("output_size should be list or int or Tensor") self._padding = utils.convert_to_list(self._padding, 2, 'padding') self._groups = 1 if self._groups is None else self._groups - filter_shape = [self._num_channels, self._num_filters // self._groups - ] + self._filter_size + filter_shape = [ + self._num_channels, + self._num_filters // self._groups, + ] + self._filter_size - self.weight = self.create_parameter(dtype=self._dtype, - shape=filter_shape, - attr=self._param_attr) + self.weight = self.create_parameter( + dtype=self._dtype, shape=filter_shape, attr=self._param_attr + ) - self.bias = self.create_parameter(attr=self._bias_attr, - shape=[self._num_filters], - dtype=self._dtype, - is_bias=True) + self.bias = self.create_parameter( + attr=self._bias_attr, + shape=[self._num_filters], + dtype=self._dtype, + is_bias=True, + ) def forward(self, input): if _non_static_mode(): op = getattr(_legacy_C_ops, self._op_type) - out = op(input, self.weight, 'output_size', self._output_size, - 'strides', self._stride, 'paddings', self._padding, - 'dilations', self._dilation, 'groups', self._groups, - 'use_cudnn', self._use_cudnn) + out = op( + input, + self.weight, + 'output_size', + self._output_size, + 'strides', + self._stride, + 'paddings', + self._padding, + 'dilations', + self._dilation, + 'groups', + self._groups, + 'use_cudnn', + self._use_cudnn, + ) pre_bias = out pre_act = dygraph_utils._append_bias_in_dygraph( - pre_bias, self.bias, 1) - return dygraph_utils._append_activation_in_dygraph(pre_act, - act=self._act) + pre_bias, self.bias, 1 + ) + return dygraph_utils._append_activation_in_dygraph( + pre_act, act=self._act + ) - check_variable_and_dtype(input, 'input', - ['float16', 'float32', 'float64'], - "Conv2DTranspose") + check_variable_and_dtype( + input, 'input', ['float16', 'float32', 'float64'], "Conv2DTranspose" + ) inputs = {'Input': [input], 'Filter': [self.weight]} attrs = { @@ -2775,26 +3130,29 @@ class Conv2DTranspose(layers.Layer): 'paddings': self._padding, 'dilations': self._dilation, 'groups': self._groups, - 'use_cudnn': self._use_cudnn + 'use_cudnn': self._use_cudnn, } pre_bias = self._helper.create_variable_for_type_inference( - dtype=input.dtype) - self._helper.append_op(type=self._op_type, - inputs=inputs, - outputs={'Output': pre_bias}, - attrs=attrs) + dtype=input.dtype + ) + self._helper.append_op( + type=self._op_type, + inputs=inputs, + outputs={'Output': pre_bias}, + attrs=attrs, + ) if self.bias is not None: pre_act = self._helper.create_variable_for_type_inference( - dtype=self._dtype) - self._helper.append_op(type='elementwise_add', - inputs={ - 'X': [pre_bias], - 'Y': [self.bias] - }, - outputs={'Out': [pre_act]}, - attrs={'axis': 1}) + dtype=self._dtype + ) + self._helper.append_op( + type='elementwise_add', + inputs={'X': [pre_bias], 'Y': [self.bias]}, + outputs={'Out': [pre_act]}, + attrs={'axis': 1}, + ) else: pre_act = pre_bias @@ -2834,16 +3192,19 @@ class SequenceConv(layers.Layer): Variable: output of sequence_conv """ - def __init__(self, - name_scope, - num_filters, - filter_size=3, - filter_stride=1, - padding=None, - bias_attr=None, - param_attr=None, - act=None): - assert not _non_static_mode( + def __init__( + self, + name_scope, + num_filters, + filter_size=3, + filter_stride=1, + padding=None, + bias_attr=None, + param_attr=None, + act=None, + ): + assert ( + not _non_static_mode() ), "SequenceConv is not supported by dynamic graph mode yet!" super(SequenceConv, self).__init__(name_scope) self._num_filters = num_filters @@ -2857,39 +3218,43 @@ class SequenceConv(layers.Layer): def _build_once(self, input): self._dtype = self._helper.input_dtype(input) filter_shape = [self._filter_size * input.shape[1], self._num_filters] - self.weight = self.create_parameter(attr=self._param_attr, - shape=filter_shape, - dtype=self._dtype) + self.weight = self.create_parameter( + attr=self._param_attr, shape=filter_shape, dtype=self._dtype + ) - self.bias = self.create_parameter(attr=self._bias_attr, - shape=[self._num_filters], - dtype=self._dtype, - is_bias=True) + self.bias = self.create_parameter( + attr=self._bias_attr, + shape=[self._num_filters], + dtype=self._dtype, + is_bias=True, + ) def forward(self, input): pre_bias = self._helper.create_variable_for_type_inference(self._dtype) - self._helper.append_op(type='sequence_conv', - inputs={ - 'X': [input], - 'Filter': [self.weight], - }, - outputs={"Out": pre_bias}, - attrs={ - 'contextStride': self._filter_stride, - 'contextStart': -int(self._filter_size // 2), - 'contextLength': self._filter_size - }) + self._helper.append_op( + type='sequence_conv', + inputs={ + 'X': [input], + 'Filter': [self.weight], + }, + outputs={"Out": pre_bias}, + attrs={ + 'contextStride': self._filter_stride, + 'contextStart': -int(self._filter_size // 2), + 'contextLength': self._filter_size, + }, + ) if self.bias is not None: pre_act = self._helper.create_variable_for_type_inference( - dtype=self._dtype) - self._helper.append_op(type='elementwise_add', - inputs={ - 'X': [pre_bias], - 'Y': [self.bias] - }, - outputs={'Out': [pre_act]}, - attrs={'axis': 1}) + dtype=self._dtype + ) + self._helper.append_op( + type='elementwise_add', + inputs={'X': [pre_bias], 'Y': [self.bias]}, + outputs={'Out': [pre_act]}, + attrs={'axis': 1}, + ) else: pre_act = pre_bias @@ -2944,12 +3309,11 @@ class RowConv(layers.Layer): """ - def __init__(self, - name_scope, - future_context_size, - param_attr=None, - act=None): - assert not _non_static_mode( + def __init__( + self, name_scope, future_context_size, param_attr=None, act=None + ): + assert ( + not _non_static_mode() ), "RowConv is not supported by dynamic graph mode yet!" super(RowConv, self).__init__(name_scope) self._act = act @@ -2959,27 +3323,28 @@ class RowConv(layers.Layer): def _build_once(self, input): self._dtype = self._helper.input_dtype(input) filter_shape = [self._future_context_size + 1, input.shape[1]] - self.weight = self.create_parameter(attr=self._param_attr, - shape=filter_shape, - dtype=self._dtype, - is_bias=False) + self.weight = self.create_parameter( + attr=self._param_attr, + shape=filter_shape, + dtype=self._dtype, + is_bias=False, + ) def forward(self, input): out = self._helper.create_variable_for_type_inference(self._dtype) - self._helper.append_op(type='row_conv', - inputs={ - 'X': [input], - 'Filter': [self.weight] - }, - outputs={'Out': [out]}) + self._helper.append_op( + type='row_conv', + inputs={'X': [input], 'Filter': [self.weight]}, + outputs={'Out': [out]}, + ) return self._helper.append_activation(out, act=self._act) class GroupNorm(layers.Layer): """ :alias_main: paddle.nn.GroupNorm - :alias: paddle.nn.GroupNorm,paddle.nn.layer.GroupNorm,paddle.nn.layer.norm.GroupNorm - :old_api: paddle.fluid.dygraph.GroupNorm + :alias: paddle.nn.GroupNorm,paddle.nn.layer.GroupNorm,paddle.nn.layer.norm.GroupNorm + :old_api: paddle.fluid.dygraph.GroupNorm This interface is used to construct a callable object of the ``GroupNorm`` class. For more details, refer to code examples. @@ -3016,15 +3381,17 @@ class GroupNorm(layers.Layer): """ - def __init__(self, - channels, - groups, - epsilon=1e-05, - param_attr=None, - bias_attr=None, - act=None, - data_layout='NCHW', - dtype='float32'): + def __init__( + self, + channels, + groups, + epsilon=1e-05, + param_attr=None, + bias_attr=None, + act=None, + data_layout='NCHW', + dtype='float32', + ): super(GroupNorm, self).__init__() self._param_attr = param_attr self._bias_attr = bias_attr @@ -3038,31 +3405,44 @@ class GroupNorm(layers.Layer): param_shape = [self._channels] - self.weight = self.create_parameter(attr=self._param_attr or False, - shape=param_shape, - dtype=self._dtype, - default_initializer=Constant(1.0)) + self.weight = self.create_parameter( + attr=self._param_attr or False, + shape=param_shape, + dtype=self._dtype, + default_initializer=Constant(1.0), + ) - self.bias = self.create_parameter(attr=self._bias_attr or False, - shape=param_shape, - dtype=self._dtype, - is_bias=True) + self.bias = self.create_parameter( + attr=self._bias_attr or False, + shape=param_shape, + dtype=self._dtype, + is_bias=True, + ) def forward(self, input): mean_out = self._helper.create_variable_for_type_inference( - dtype=self._dtype, stop_gradient=True) + dtype=self._dtype, stop_gradient=True + ) variance_out = self._helper.create_variable_for_type_inference( - dtype=self._dtype, stop_gradient=True) + dtype=self._dtype, stop_gradient=True + ) if in_dygraph_mode(): - out = _C_ops.group_norm(input, self.weight, self.bias, - self._epsilon, self._groups, "NCHW") + out = _C_ops.group_norm( + input, + self.weight, + self.bias, + self._epsilon, + self._groups, + "NCHW", + ) return dygraph_utils._append_activation_in_dygraph(out, self._act) elif _in_legacy_dygraph(): attrs = ('epsilon', self._epsilon, 'groups', self._groups) - out, _, _ = _legacy_C_ops.group_norm(input, self.weight, self.bias, - mean_out, variance_out, *attrs) + out, _, _ = _legacy_C_ops.group_norm( + input, self.weight, self.bias, mean_out, variance_out, *attrs + ) return dygraph_utils._append_activation_in_dygraph(out, self._act) else: @@ -3074,19 +3454,19 @@ class GroupNorm(layers.Layer): # create output group_norm_out = self._helper.create_variable_for_type_inference( - dtype=self._dtype) - - self._helper.append_op(type="group_norm", - inputs=inputs, - outputs={ - "Y": group_norm_out, - "Mean": mean_out, - "Variance": variance_out, - }, - attrs={ - "epsilon": self._epsilon, - "groups": self._groups - }) + dtype=self._dtype + ) + + self._helper.append_op( + type="group_norm", + inputs=inputs, + outputs={ + "Y": group_norm_out, + "Mean": mean_out, + "Variance": variance_out, + }, + attrs={"epsilon": self._epsilon, "groups": self._groups}, + ) return self._helper.append_activation(group_norm_out, self._act) @@ -3150,12 +3530,9 @@ class SpectralNorm(layers.Layer): """ - def __init__(self, - weight_shape, - dim=0, - power_iters=1, - eps=1e-12, - dtype='float32'): + def __init__( + self, weight_shape, dim=0, power_iters=1, eps=1e-12, dtype='float32' + ): super(SpectralNorm, self).__init__() self._power_iters = power_iters self._eps = eps @@ -3163,48 +3540,61 @@ class SpectralNorm(layers.Layer): self._dtype = dtype self._weight_shape = list(weight_shape) - assert np.prod(self._weight_shape) > 0,\ - "Any dimension of `weight_shape` cannot be equal to 0." - assert dim < len(self._weight_shape), \ - ("The input `dim` should be less than the " + assert ( + np.prod(self._weight_shape) > 0 + ), "Any dimension of `weight_shape` cannot be equal to 0." + assert dim < len(self._weight_shape), ( + "The input `dim` should be less than the " "length of `weight_shape`, but received dim=" - "{}".format(dim)) + "{}".format(dim) + ) h = self._weight_shape[self._dim] w = np.prod(self._weight_shape) // h - self.weight_u = self.create_parameter(attr=ParamAttr(), - shape=[h], - dtype=self._dtype, - default_initializer=Normal( - 0., 1.)) + self.weight_u = self.create_parameter( + attr=ParamAttr(), + shape=[h], + dtype=self._dtype, + default_initializer=Normal(0.0, 1.0), + ) self.weight_u.stop_gradient = True - self.weight_v = self.create_parameter(attr=ParamAttr(), - shape=[w], - dtype=self._dtype, - default_initializer=Normal( - 0., 1.)) + self.weight_v = self.create_parameter( + attr=ParamAttr(), + shape=[w], + dtype=self._dtype, + default_initializer=Normal(0.0, 1.0), + ) self.weight_v.stop_gradient = True def forward(self, weight): if in_dygraph_mode(): - return _C_ops.spectral_norm(weight, self.weight_u, self.weight_v, - self._dim, self._power_iters, self._eps) + return _C_ops.spectral_norm( + weight, + self.weight_u, + self.weight_v, + self._dim, + self._power_iters, + self._eps, + ) - check_variable_and_dtype(weight, "weight", ['float32', 'float64'], - 'SpectralNorm') + check_variable_and_dtype( + weight, "weight", ['float32', 'float64'], 'SpectralNorm' + ) inputs = {'Weight': weight, 'U': self.weight_u, 'V': self.weight_v} out = self._helper.create_variable_for_type_inference(self._dtype) - self._helper.append_op(type="spectral_norm", - inputs=inputs, - outputs={ - "Out": out, - }, - attrs={ - "dim": self._dim, - "power_iters": self._power_iters, - "eps": self._eps, - }) + self._helper.append_op( + type="spectral_norm", + inputs=inputs, + outputs={ + "Out": out, + }, + attrs={ + "dim": self._dim, + "power_iters": self._power_iters, + "eps": self._eps, + }, + ) return out @@ -3254,16 +3644,18 @@ class TreeConv(layers.Layer): ret = treeConv(fluid.dygraph.base.to_variable(nodes_vector), fluid.dygraph.base.to_variable(edge_set)) """ - def __init__(self, - feature_size, - output_size, - num_filters=1, - max_depth=2, - act='tanh', - param_attr=None, - bias_attr=None, - name=None, - dtype='float32'): + def __init__( + self, + feature_size, + output_size, + num_filters=1, + max_depth=2, + act='tanh', + param_attr=None, + bias_attr=None, + name=None, + dtype='float32', + ): super(TreeConv, self).__init__() self._name = name self._feature_size = feature_size @@ -3276,45 +3668,52 @@ class TreeConv(layers.Layer): self._dtype = dtype w_shape = [self._feature_size, 3, self._output_size, self._num_filters] if self._bias_attr: - self.bias = self.create_parameter(attr=self._bias_attr, - shape=[self._num_filters], - dtype=self._dtype, - is_bias=True) - self.weight = self.create_parameter(attr=self._param_attr, - shape=w_shape, - dtype=self._dtype, - is_bias=False) + self.bias = self.create_parameter( + attr=self._bias_attr, + shape=[self._num_filters], + dtype=self._dtype, + is_bias=True, + ) + self.weight = self.create_parameter( + attr=self._param_attr, + shape=w_shape, + dtype=self._dtype, + is_bias=False, + ) def forward(self, nodes_vector, edge_set): check_type(nodes_vector, 'nodes_vector', (Variable), 'TreeConv') check_type(edge_set, 'edge_set', (Variable), 'TreeConv') if self._name: - out = self.create_variable(name=self._name, - dtype=self._dtype, - persistable=False) + out = self.create_variable( + name=self._name, dtype=self._dtype, persistable=False + ) else: out = self._helper.create_variable_for_type_inference( - dtype=self._dtype) - self._helper.append_op(type='tree_conv', - inputs={ - 'NodesVector': nodes_vector, - 'EdgeSet': edge_set, - 'Filter': self.weight - }, - outputs={ - 'Out': out, - }, - attrs={'max_depth': self._max_depth}) + dtype=self._dtype + ) + self._helper.append_op( + type='tree_conv', + inputs={ + 'NodesVector': nodes_vector, + 'EdgeSet': edge_set, + 'Filter': self.weight, + }, + outputs={ + 'Out': out, + }, + attrs={'max_depth': self._max_depth}, + ) if self._bias_attr: pre_activation = self._helper.create_variable_for_type_inference( - dtype=self._dtype) - self._helper.append_op(type='elementwise_add', - inputs={ - 'X': [out], - 'Y': [self.bias] - }, - outputs={'Out': [pre_activation]}, - attrs={'axis': 1}) + dtype=self._dtype + ) + self._helper.append_op( + type='elementwise_add', + inputs={'X': [out], 'Y': [self.bias]}, + outputs={'Out': [pre_activation]}, + attrs={'axis': 1}, + ) else: pre_activation = out return self._helper.append_activation(pre_activation, act=self._act) @@ -3353,7 +3752,7 @@ class Flatten(layers.Layer): self.stop_axis = stop_axis def forward(self, input): - out = paddle.tensor.manipulation.flatten(input, - start_axis=self.start_axis, - stop_axis=self.stop_axis) + out = paddle.tensor.manipulation.flatten( + input, start_axis=self.start_axis, stop_axis=self.stop_axis + ) return out diff --git a/python/paddle/fluid/dygraph/parallel.py b/python/paddle/fluid/dygraph/parallel.py index bd0a4c9e5d2210bae90450276d92cf2a07a77828..51e0527e4fa99fde78696fc6714cc15b1a6cbcb7 100644 --- a/python/paddle/fluid/dygraph/parallel.py +++ b/python/paddle/fluid/dygraph/parallel.py @@ -30,7 +30,12 @@ from paddle.fluid.dygraph import to_variable, no_grad from paddle.utils import deprecated from ..layers import collective from paddle.fluid.dygraph import base as imperative_base -from paddle.fluid.framework import ParamBase, _in_legacy_dygraph, _non_static_mode, in_dygraph_mode +from paddle.fluid.framework import ( + ParamBase, + _in_legacy_dygraph, + _non_static_mode, + in_dygraph_mode, +) __all__ = ["prepare_context", "ParallelEnv", "DataParallel"] @@ -50,24 +55,29 @@ def prepare_context(strategy=None): strategy.current_endpoint = Env().current_endpoint if strategy.nranks < 2: return - assert framework._non_static_mode() is True, \ - "dygraph.prepare_context should be used with dygraph mode." + assert ( + framework._non_static_mode() is True + ), "dygraph.prepare_context should be used with dygraph mode." place = framework._current_expected_place() - assert place is not None, \ - "dygraph.prepare_context should be used in fluid.dygraph.guard(place) guard." + assert ( + place is not None + ), "dygraph.prepare_context should be used in fluid.dygraph.guard(place) guard." if not parallel_helper._is_parallel_ctx_initialized(): if isinstance(place, core.CUDAPlace): parallel_helper._set_parallel_ctx( - core.NCCLParallelContext(strategy, place)) + core.NCCLParallelContext(strategy, place) + ) elif isinstance(place, core.XPUPlace): parallel_helper._set_parallel_ctx( - core.BKCLParallelContext(strategy, place)) + core.BKCLParallelContext(strategy, place) + ) elif isinstance(place, core.NPUPlace): parallel_helper._set_parallel_ctx( - core.HCCLParallelContext(strategy, place)) + core.HCCLParallelContext(strategy, place) + ) else: # TODO(Yancey1989): add Gloo Parallel Context to support CPU parallel computation - assert ("Only support CUDAPlace or XPUPlace or NPUPlace for now.") + assert "Only support CUDAPlace or XPUPlace or NPUPlace for now." parallel_helper._init_parallel_ctx() return strategy @@ -122,9 +132,11 @@ class ParallelEnv(object): # imperative only support one gpu or xpu if self._device_type != "": FLAGS_selected_custom_devices = 'FLAGS_selected_{}s'.format( - self._device_type) - selected_custom_devices = os.getenv(FLAGS_selected_custom_devices, - "0").split(",") + self._device_type + ) + selected_custom_devices = os.getenv( + FLAGS_selected_custom_devices, "0" + ).split(",") self._device_id = int(selected_custom_devices[0]) else: if core.is_compiled_with_cuda(): @@ -140,14 +152,17 @@ class ParallelEnv(object): selected_mlus = os.getenv("FLAGS_selected_mlus", "0").split(",") self._device_id = int(selected_mlus[0]) - self._trainer_endpoints = os.getenv("PADDLE_TRAINER_ENDPOINTS", - "").split(",") + self._trainer_endpoints = os.getenv( + "PADDLE_TRAINER_ENDPOINTS", "" + ).split(",") self._current_endpoint = os.getenv("PADDLE_CURRENT_ENDPOINT", "") self._nrings = int(os.getenv("FLAGS_nccl_nrings", "1")) - assert self._nrings > 0, \ - "nccl_nrings must be an integer greater than 0." - assert self._nrings < 9, \ - "nccl_nrings should be less than 9, which is enough in most scenarios." + assert ( + self._nrings > 0 + ), "nccl_nrings must be an integer greater than 0." + assert ( + self._nrings < 9 + ), "nccl_nrings should be less than 9, which is enough in most scenarios." @property def rank(self): @@ -297,6 +312,7 @@ def _build_default_parallel_strategy(): def _coalesce_tensors(var_groups): from ..layers import nn + coalesced_grads_and_grad_vars = [] for group_id, grad_vars in var_groups.items(): flattened_vars = [] @@ -304,43 +320,50 @@ def _coalesce_tensors(var_groups): for g_var in grad_vars: g_var_shapes.append(g_var.shape) flattened_vars.append( - nn.reshape(x=g_var, shape=[np.prod(g_var.shape)])) + nn.reshape(x=g_var, shape=[np.prod(g_var.shape)]) + ) coalesced_grad = nn.concat(flattened_vars) coalesced_grads_and_grad_vars.append( - [coalesced_grad, grad_vars, g_var_shapes]) + [coalesced_grad, grad_vars, g_var_shapes] + ) return coalesced_grads_and_grad_vars @framework.dygraph_only def _reshape_inplace(x, shape): x_shape = framework._varbase_creator(dtype=x.dtype) - framework._dygraph_tracer().trace_op(type="reshape2", - inputs={'X': x}, - outputs={ - 'Out': x, - 'XShape': x_shape - }, - attrs={'shape': shape}) + framework._dygraph_tracer().trace_op( + type="reshape2", + inputs={'X': x}, + outputs={'Out': x, 'XShape': x_shape}, + attrs={'shape': shape}, + ) @framework.dygraph_only def _split_tensors(coalesced_grads_and_grad_vars): if _in_legacy_dygraph(): - for coalesced_grad, origin_grad_vars, grad_shapes in coalesced_grads_and_grad_vars: + for ( + coalesced_grad, + origin_grad_vars, + grad_shapes, + ) in coalesced_grads_and_grad_vars: grad_var_len = [np.prod(g_shape) for g_shape in grad_shapes] framework._dygraph_tracer().trace_op( type='split', inputs={'X': coalesced_grad}, outputs={'Out': origin_grad_vars}, - attrs={ - 'sections': grad_var_len, - 'axis': 0 - }) + attrs={'sections': grad_var_len, 'axis': 0}, + ) for g_var, g_shape in zip(origin_grad_vars, grad_shapes): _reshape_inplace(x=g_var, shape=g_shape) assert g_var.shape == g_shape elif in_dygraph_mode(): - for coalesced_grad, origin_grad_vars, grad_shapes in coalesced_grads_and_grad_vars: + for ( + coalesced_grad, + origin_grad_vars, + grad_shapes, + ) in coalesced_grads_and_grad_vars: grad_var_len = [np.prod(g_shape) for g_shape in grad_shapes] attrs = () attrs += ('sections', grad_var_len) @@ -357,7 +380,8 @@ def scale_loss(loss): return loss loss_scale = to_variable( - np.array([ParallelEnv().world_size]).astype("float32")) + np.array([ParallelEnv().world_size]).astype("float32") + ) loss_scale.stop_gradient = True scaled_loss = loss / loss_scale return scaled_loss @@ -385,16 +409,16 @@ def build_groups(vars, group_size): @imperative_base.no_grad @framework.dygraph_only -def sync_params_buffers(model, - comm_group=None, - src_rank=0, - is_model_parallel=False): +def sync_params_buffers( + model, comm_group=None, src_rank=0, is_model_parallel=False +): model_vars = [] for _, param in model._obtain_parameters_buffers().items(): if not isinstance(param, (core.VarBase, core.eager.Tensor)): raise TypeError( - "The data type of '%s' must be Varbase or eager.Tensor" % - param.name) + "The data type of '%s' must be Varbase or eager.Tensor" + % param.name + ) # is_distributed param not need to sync when in mp mode if isinstance(param, (ParamBase, core.eager.Tensor)): @@ -416,10 +440,9 @@ def sync_params_buffers(model, coalesced_vars = build_groups(model_vars, 128 * 1024 * 1024) for coalesced_var, _, _ in coalesced_vars: - paddle.distributed.broadcast(coalesced_var, - src=src_rank, - group=comm_group, - sync_op=True) + paddle.distributed.broadcast( + coalesced_var, src=src_rank, group=comm_group, sync_op=True + ) for coalesced_var, origin_vars, var_shapes in coalesced_vars: var_len = [np.prod(v_shape) for v_shape in var_shapes] @@ -427,10 +450,8 @@ def sync_params_buffers(model, type='split', inputs={'X': coalesced_var}, outputs={'Out': origin_vars}, - attrs={ - 'sections': var_len, - 'axis': 0 - }) + attrs={'sections': var_len, 'axis': 0}, + ) class DataParallel(layers.Layer): @@ -594,25 +615,30 @@ class DataParallel(layers.Layer): """ - def __init__(self, - layers, - strategy=None, - comm_buffer_size=25, - last_comm_buffer_size=1, - find_unused_parameters=False, - group=None): - super(DataParallel, - self).__init__(layers.full_name() + "_data_parallel") - - assert _non_static_mode(), \ - "It's not supported to construct DataParallel in static mode." + def __init__( + self, + layers, + strategy=None, + comm_buffer_size=25, + last_comm_buffer_size=1, + find_unused_parameters=False, + group=None, + ): + super(DataParallel, self).__init__( + layers.full_name() + "_data_parallel" + ) + + assert ( + _non_static_mode() + ), "It's not supported to construct DataParallel in static mode." self._layers = layers self.find_unused_parameters = find_unused_parameters self.grad_need_sync = True self.group = group - self.var_dtype = core.eager.Tensor if in_dygraph_mode( - ) else core.VarBase + self.var_dtype = ( + core.eager.Tensor if in_dygraph_mode() else core.VarBase + ) # NOTE(chenweihang): The ParallelStrategy here is not strictly a strategy. # It just stores some environment variables, which can be constructed by @@ -625,16 +651,21 @@ class DataParallel(layers.Layer): if self._strategy.nranks > 1: # check the environment - assert parallel_helper.__parallel_ctx__clz__ is not None, \ - "ParallelContext must be initialized before. You should use init_parallel_env() before" \ - "constructing the DataParallel." + assert parallel_helper.__parallel_ctx__clz__ is not None, ( + "ParallelContext must be initialized before. You should use init_parallel_env() before" + "constructing the DataParallel." + ) if in_dygraph_mode(): - self.group = paddle.distributed.collective._get_default_group( - ) if self.group is None else self.group + self.group = ( + paddle.distributed.collective._get_default_group() + if self.group is None + else self.group + ) - assert isinstance(self.group, paddle.distributed.collective.Group), \ - "ProcessGroup must be an instance of Group in DataParallel." + assert isinstance( + self.group, paddle.distributed.collective.Group + ), "ProcessGroup must be an instance of Group in DataParallel." # sync buffer and params # TODO(liuyuhui) Currently not support xpu. xpu is @@ -647,14 +678,17 @@ class DataParallel(layers.Layer): # the size of the group, Default: 1MB. The role of this small group is: # when the last group allreduce, the overlap cannot work. Making the # the last group small is useful to improve performance. - self.last_comm_buffer_size = int(last_comm_buffer_size * 1024 * - 1024) + self.last_comm_buffer_size = int( + last_comm_buffer_size * 1024 * 1024 + ) self.init_reducer() else: - warnings.warn("The program will return to single-card operation. " - "Please check 1, whether you use spawn or fleetrun " - "to start the program. 2, Whether it is a multi-card " - "program. 3, Is the current environment multi-card.") + warnings.warn( + "The program will return to single-card operation. " + "Please check 1, whether you use spawn or fleetrun " + "to start the program. 2, Whether it is a multi-card " + "program. 3, Is the current environment multi-card." + ) def init_reducer(self): layers_param = [] @@ -665,16 +699,19 @@ class DataParallel(layers.Layer): continue params_set.add(param) if not isinstance(param, self.var_dtype): - raise TypeError("The data type of '%s' must be '%s'" % - (param.name, self.var_dtype)) + raise TypeError( + "The data type of '%s' must be '%s'" + % (param.name, self.var_dtype) + ) if param.trainable: layers_param.append((sublayer, param)) trainable_parameters = [param for _, param in layers_param] - assert len(trainable_parameters) > 0, \ - "This model does not have any parameters to train, and " \ + assert len(trainable_parameters) > 0, ( + "This model does not have any parameters to train, and " "does not need to use DataParallel" + ) # NOTE(shenliang03): Here we can only use the attributes to judge whether # parameter is sparse(or SelectedRows). The reason is that the sparse message @@ -695,24 +732,34 @@ class DataParallel(layers.Layer): if in_dygraph_mode(): self.group_indices = core.eager_assign_group_by_size( - trainable_parameters, is_sparse_gradient, - [self.last_comm_buffer_size, self.comm_buffer_size]) + trainable_parameters, + is_sparse_gradient, + [self.last_comm_buffer_size, self.comm_buffer_size], + ) self._reducer = core.EagerReducer( - trainable_parameters, list(reversed(self.group_indices)), - is_sparse_gradient, self.group.process_group, + trainable_parameters, + list(reversed(self.group_indices)), + is_sparse_gradient, + self.group.process_group, [self.last_comm_buffer_size, self.comm_buffer_size], - self.find_unused_parameters) + self.find_unused_parameters, + ) elif _in_legacy_dygraph(): self.group_indices = core.assign_group_by_size( - trainable_parameters, is_sparse_gradient, - [self.last_comm_buffer_size, self.comm_buffer_size]) + trainable_parameters, + is_sparse_gradient, + [self.last_comm_buffer_size, self.comm_buffer_size], + ) self._reducer = core.Reducer( - trainable_parameters, list(reversed(self.group_indices)), - is_sparse_gradient, parallel_helper.__parallel_ctx__clz__, + trainable_parameters, + list(reversed(self.group_indices)), + is_sparse_gradient, + parallel_helper.__parallel_ctx__clz__, [self.last_comm_buffer_size, self.comm_buffer_size], - self.find_unused_parameters) + self.find_unused_parameters, + ) def _find_varbase(self, obj): var_type = core.eager.Tensor if in_dygraph_mode() else core.VarBase @@ -771,14 +818,19 @@ class DataParallel(layers.Layer): def forward(self, *inputs, **kwargs): outputs = self._layers(*inputs, **kwargs) - if self._strategy.nranks > 1 and framework._dygraph_tracer( - )._has_grad and self.grad_need_sync: - self._reducer.prepare_for_backward(list( - self._find_varbase(outputs))) + if ( + self._strategy.nranks > 1 + and framework._dygraph_tracer()._has_grad + and self.grad_need_sync + ): + self._reducer.prepare_for_backward( + list(self._find_varbase(outputs)) + ) return outputs - @deprecated(since="2.0.0", - reason="This method does not need to be called anymore.") + @deprecated( + since="2.0.0", reason="This method does not need to be called anymore." + ) def scale_loss(self, loss): """ Deprecated method, now ``scale_loss`` is an empty method, @@ -786,8 +838,9 @@ class DataParallel(layers.Layer): """ return loss - @deprecated(since="2.0.0", - reason="This method does not need to be called anymore.") + @deprecated( + since="2.0.0", reason="This method does not need to be called anymore." + ) def apply_collective_grads(self): """ Deprecated method, now ``apply_collective_grads`` is an empty method, @@ -795,10 +848,12 @@ class DataParallel(layers.Layer): """ return - def state_dict(self, - destination=None, - include_sublayers=True, - structured_name_prefix=""): + def state_dict( + self, + destination=None, + include_sublayers=True, + structured_name_prefix="", + ): ''' Get all parameters and persistable buffers of current layer and its sub-layers. And set them into a dict @@ -828,7 +883,8 @@ class DataParallel(layers.Layer): return self._layers.state_dict( destination=destination, include_sublayers=include_sublayers, - structured_name_prefix=structured_name_prefix) + structured_name_prefix=structured_name_prefix, + ) @framework.deprecate_stat_dict def set_state_dict(self, state_dict, use_structured_name=True): @@ -861,8 +917,9 @@ class DataParallel(layers.Layer): ''' - self._layers.set_state_dict(state_dict, - use_structured_name=use_structured_name) + self._layers.set_state_dict( + state_dict, use_structured_name=use_structured_name + ) # [aliases] Compatible with old method names set_dict = set_state_dict diff --git a/python/paddle/fluid/dygraph/parallel_helper.py b/python/paddle/fluid/dygraph/parallel_helper.py index bc0bb4603525ec744ba1c31e352ca091e395fd05..32d7974af31bb21973dad48c97238024182ea851 100644 --- a/python/paddle/fluid/dygraph/parallel_helper.py +++ b/python/paddle/fluid/dygraph/parallel_helper.py @@ -20,8 +20,10 @@ __parallel_ctx__clz__ = None def _is_data_parallel_mode(): global __parallel_ctx__clz__ - return __parallel_ctx__clz__ is not None and int( - os.getenv("PADDLE_TRAINERS_NUM", "1")) > 1 + return ( + __parallel_ctx__clz__ is not None + and int(os.getenv("PADDLE_TRAINERS_NUM", "1")) > 1 + ) def _is_parallel_ctx_initialized(): @@ -31,15 +33,17 @@ def _is_parallel_ctx_initialized(): def _set_parallel_ctx(ccl_parallel_context): global __parallel_ctx__clz__ - assert __parallel_ctx__clz__ is None, \ - "ParallelContext can only be initialized once." + assert ( + __parallel_ctx__clz__ is None + ), "ParallelContext can only be initialized once." __parallel_ctx__clz__ = ccl_parallel_context def _init_parallel_ctx(): global __parallel_ctx__clz__ - assert __parallel_ctx__clz__ is not None, \ - "ParallelContext should be initialized." + assert ( + __parallel_ctx__clz__ is not None + ), "ParallelContext should be initialized." __parallel_ctx__clz__.init() @@ -47,7 +51,8 @@ def _broadcast_parameters(parameters): for param in parameters: # In model parallel, some parameters are split into multiple devices, # so we could not broadcast these parameters. - if param.is_distributed: continue + if param.is_distributed: + continue if isinstance(param, Parameter) and param.trainable: collective._broadcast(param, 0, sync_mode=True) diff --git a/python/paddle/fluid/dygraph/rnn.py b/python/paddle/fluid/dygraph/rnn.py index e70533662976fa99958924abd15f3363c7854b04..4ae9c5ba5fec42f4888021ea6830936754bc7929 100644 --- a/python/paddle/fluid/dygraph/rnn.py +++ b/python/paddle/fluid/dygraph/rnn.py @@ -13,7 +13,16 @@ # limitations under the License. from . import Layer -from ..layers import sigmoid, tanh, concat, fill_constant, matmul, elementwise_add, elementwise_mul, split +from ..layers import ( + sigmoid, + tanh, + concat, + fill_constant, + matmul, + elementwise_add, + elementwise_mul, + split, +) import copy __all__ = ['LSTMCell', 'GRUCell'] @@ -111,16 +120,18 @@ class LSTMCell(Layer): """ - def __init__(self, - hidden_size, - input_size, - param_attr=None, - bias_attr=None, - gate_activation=None, - activation=None, - forget_bias=1.0, - use_cudnn_impl=True, - dtype='float64'): + def __init__( + self, + hidden_size, + input_size, + param_attr=None, + bias_attr=None, + gate_activation=None, + activation=None, + forget_bias=1.0, + use_cudnn_impl=True, + dtype='float64', + ): super(LSTMCell, self).__init__(dtype) self._hidden_size = hidden_size @@ -134,7 +145,10 @@ class LSTMCell(Layer): if self._use_cudnn_impl: - if self._param_attr is not None and self._param_attr.name is not None: + if ( + self._param_attr is not None + and self._param_attr.name is not None + ): weight_ih_param_attr = copy.deepcopy(self._param_attr) weight_hh_param_attr = copy.deepcopy(self._param_attr) weight_ih_param_attr.name += "_weight_ih" @@ -155,40 +169,50 @@ class LSTMCell(Layer): self._weight_ih = self.create_parameter( attr=weight_ih_param_attr, shape=[4 * self._hidden_size, self._input_size], - dtype=self._dtype) + dtype=self._dtype, + ) self._weight_hh = self.create_parameter( attr=weight_hh_param_attr, shape=[4 * self._hidden_size, self._hidden_size], - dtype=self._dtype) - - self._bias_ih = self.create_parameter(attr=bias_ih_param_attr, - shape=[4 * self._hidden_size], - dtype=self._dtype, - is_bias=True) - self._bias_hh = self.create_parameter(attr=bias_hh_param_attr, - shape=[4 * self._hidden_size], - dtype=self._dtype, - is_bias=True) + dtype=self._dtype, + ) + + self._bias_ih = self.create_parameter( + attr=bias_ih_param_attr, + shape=[4 * self._hidden_size], + dtype=self._dtype, + is_bias=True, + ) + self._bias_hh = self.create_parameter( + attr=bias_hh_param_attr, + shape=[4 * self._hidden_size], + dtype=self._dtype, + is_bias=True, + ) else: - self._forget_bias = fill_constant([1], - dtype=dtype, - value=forget_bias) + self._forget_bias = fill_constant( + [1], dtype=dtype, value=forget_bias + ) self._forget_bias.stop_gradient = False self._weight = self.create_parameter( attr=self._param_attr, shape=[ - self._input_size + self._hidden_size, 4 * self._hidden_size + self._input_size + self._hidden_size, + 4 * self._hidden_size, ], - dtype=dtype) + dtype=dtype, + ) - self._bias = self.create_parameter(attr=self._bias_attr, - shape=[4 * self._hidden_size], - dtype=dtype, - is_bias=True) + self._bias = self.create_parameter( + attr=self._bias_attr, + shape=[4 * self._hidden_size], + dtype=dtype, + is_bias=True, + ) def forward(self, input, pre_hidden, pre_cell): @@ -226,9 +250,12 @@ class LSTMCell(Layer): new_cell = elementwise_add( elementwise_mul( pre_cell, - self._gate_activation(elementwise_add(f, - self._forget_bias))), - elementwise_mul(sigmoid(i), tanh(j))) + self._gate_activation( + elementwise_add(f, self._forget_bias) + ), + ), + elementwise_mul(sigmoid(i), tanh(j)), + ) new_hidden = self._activation(new_cell) * self._gate_activation(o) return new_hidden, new_cell @@ -312,15 +339,17 @@ class GRUCell(Layer): """ - def __init__(self, - hidden_size, - input_size, - param_attr=None, - bias_attr=None, - gate_activation=None, - activation=None, - use_cudnn_impl=True, - dtype='float64'): + def __init__( + self, + hidden_size, + input_size, + param_attr=None, + bias_attr=None, + gate_activation=None, + activation=None, + use_cudnn_impl=True, + dtype='float64', + ): super(GRUCell, self).__init__() self._hidden_size = hidden_size @@ -334,7 +363,10 @@ class GRUCell(Layer): if self._use_cudnn_impl: - if self._param_attr is not None and self._param_attr.name is not None: + if ( + self._param_attr is not None + and self._param_attr.name is not None + ): weight_ih_param_attr = copy.deepcopy(self._param_attr) weight_hh_param_attr = copy.deepcopy(self._param_attr) weight_ih_param_attr.name += "_weight_ih" @@ -355,25 +387,34 @@ class GRUCell(Layer): self._weight_ih = self.create_parameter( attr=weight_ih_param_attr, shape=[3 * self._hidden_size, self._input_size], - dtype=self._dtype) + dtype=self._dtype, + ) self._weight_hh = self.create_parameter( attr=weight_hh_param_attr, shape=[3 * self._hidden_size, self._hidden_size], - dtype=self._dtype) - - self._bias_ih = self.create_parameter(attr=bias_ih_param_attr, - shape=[3 * self._hidden_size], - dtype=self._dtype, - is_bias=True) - self._bias_hh = self.create_parameter(attr=bias_hh_param_attr, - shape=[3 * self._hidden_size], - dtype=self._dtype, - is_bias=True) + dtype=self._dtype, + ) + + self._bias_ih = self.create_parameter( + attr=bias_ih_param_attr, + shape=[3 * self._hidden_size], + dtype=self._dtype, + is_bias=True, + ) + self._bias_hh = self.create_parameter( + attr=bias_hh_param_attr, + shape=[3 * self._hidden_size], + dtype=self._dtype, + is_bias=True, + ) else: - if self._param_attr is not None and self._param_attr.name is not None: + if ( + self._param_attr is not None + and self._param_attr.name is not None + ): gate_weight_param_attr = copy.deepcopy(self._param_attr) candidate_weight_param_attr = copy.deepcopy(self._param_attr) gate_weight_param_attr.name += "_gate_weight" @@ -394,25 +435,30 @@ class GRUCell(Layer): self._gate_weight = self.create_parameter( attr=gate_weight_param_attr, shape=[ - self._input_size + self._hidden_size, 2 * self._hidden_size + self._input_size + self._hidden_size, + 2 * self._hidden_size, ], - dtype=dtype) + dtype=dtype, + ) self._candidate_weight = self.create_parameter( attr=candidate_weight_param_attr, shape=[self._input_size + self._hidden_size, self._hidden_size], - dtype=dtype) + dtype=dtype, + ) self._gate_bias = self.create_parameter( attr=gate_bias_param_attr, shape=[2 * self._hidden_size], dtype=dtype, - is_bias=True) + is_bias=True, + ) self._candidate_bias = self.create_parameter( attr=candidate_bias_param_attr, shape=[self._hidden_size], dtype=dtype, - is_bias=True) + is_bias=True, + ) def forward(self, input, pre_hidden): @@ -450,8 +496,9 @@ class GRUCell(Layer): r_hidden = r * pre_hidden - candidate = matmul(concat([input, r_hidden], 1), - self._candidate_weight) + candidate = matmul( + concat([input, r_hidden], 1), self._candidate_weight + ) candidate = elementwise_add(candidate, self._candidate_bias) c = self._activation(candidate) diff --git a/python/paddle/fluid/dygraph/tracer.py b/python/paddle/fluid/dygraph/tracer.py index 1553efad41a2555565f05347323507195c1e03fe..a274296cce39efd9e9c6b80a109ed95e3d35bba9 100644 --- a/python/paddle/fluid/dygraph/tracer.py +++ b/python/paddle/fluid/dygraph/tracer.py @@ -24,7 +24,7 @@ name_mapping = { "src_index": "Src_index", "dst_index": "Dst_index", "out": "Out", - "dst_count": "Dst_count" + "dst_count": "Dst_count", }, "matmul_v2": { "final_op_name": "matmul", @@ -106,13 +106,15 @@ class Tracer(core.Tracer): self._train_mode = True - def eager_legacy_trace_op(self, - op_type, - inputs, - outputs, - attrs, - stop_gradient=False, - inplace_map=None): + def eager_legacy_trace_op( + self, + op_type, + inputs, + outputs, + attrs, + stop_gradient=False, + inplace_map=None, + ): function_ptr = _legacy_C_ops.__dict__[op_type] core_ops_args_info = _legacy_C_ops.get_core_ops_args_info() @@ -186,14 +188,17 @@ class Tracer(core.Tracer): if isinstance(returns[i], list): for j in range(len(returns[i])): outputs[retname][j].reconstruct_from_( - returns[i][j], False) + returns[i][j], False + ) else: if isinstance(outputs[retname], list): outputs[retname][0].reconstruct_from_( - returns[i], False) + returns[i], False + ) else: outputs[retname].reconstruct_from_( - returns[i], False) + returns[i], False + ) elif isinstance(returns, list): assert len(outputs.keys()) == 1 key = list(outputs.keys())[0] @@ -207,13 +212,15 @@ class Tracer(core.Tracer): else: outputs[key].reconstruct_from_(returns, False) - def eager_trace_op(self, - op_type, - inputs, - outputs, - attrs, - stop_gradient=False, - inplace_map=None): + def eager_trace_op( + self, + op_type, + inputs, + outputs, + attrs, + stop_gradient=False, + inplace_map=None, + ): assert op_type in name_mapping.keys() op_type = name_mapping[op_type]["final_op_name"] @@ -273,7 +280,8 @@ class Tracer(core.Tracer): if isinstance(returns[i], list): for j in range(len(returns[i])): outputs[retname][j].reconstruct_from_( - returns[i][j], False) + returns[i][j], False + ) else: outputs[retname][0].reconstruct_from_(returns[i], False) elif isinstance(returns, list): @@ -289,13 +297,15 @@ class Tracer(core.Tracer): else: outputs[key].reconstruct_from_(returns, False) - def trace_op(self, - type, - inputs, - outputs, - attrs, - stop_gradient=False, - inplace_map=None): + def trace_op( + self, + type, + inputs, + outputs, + attrs, + stop_gradient=False, + inplace_map=None, + ): if not framework._in_legacy_dygraph(): # inputs : {"sum": [tensor], ...} # outputs : {"sum": [tensor], ...} @@ -303,16 +313,23 @@ class Tracer(core.Tracer): type = name_mapping[type]["final_op_name"] assert type in _legacy_C_ops.__dict__ - self.eager_trace_op(type, inputs, outputs, attrs, stop_gradient, - inplace_map) + self.eager_trace_op( + type, inputs, outputs, attrs, stop_gradient, inplace_map + ) else: - self.eager_legacy_trace_op(type, inputs, outputs, attrs, - stop_gradient, inplace_map) + self.eager_legacy_trace_op( + type, inputs, outputs, attrs, stop_gradient, inplace_map + ) else: - self.trace(type, inputs, outputs, attrs, - framework._current_expected_place(), self._has_grad - and not stop_gradient, - inplace_map if inplace_map else {}) + self.trace( + type, + inputs, + outputs, + attrs, + framework._current_expected_place(), + self._has_grad and not stop_gradient, + inplace_map if inplace_map else {}, + ) def train_mode(self): self._train_mode = True diff --git a/python/paddle/fluid/dygraph/varbase_patch_methods.py b/python/paddle/fluid/dygraph/varbase_patch_methods.py index 618bcd540f0a75d8c388ce0ba3d56b20a0c5d742..7dbcbb6f98efd65d0750dd28c9782d7f3880309b 100644 --- a/python/paddle/fluid/dygraph/varbase_patch_methods.py +++ b/python/paddle/fluid/dygraph/varbase_patch_methods.py @@ -23,7 +23,15 @@ from .. import framework from ..framework import convert_np_dtype_to_dtype_, _in_legacy_dygraph from .. import core from .. import unique_name -from ..framework import Variable, Parameter, ParamBase, _getitem_impl_, _setitem_impl_, EagerParamBase, in_dygraph_mode +from ..framework import ( + Variable, + Parameter, + ParamBase, + _getitem_impl_, + _setitem_impl_, + EagerParamBase, + in_dygraph_mode, +) from .base import switch_to_static_graph from .math_op_patch import monkey_patch_math_varbase from .parallel import scale_loss @@ -43,8 +51,9 @@ class TensorHookRemoveHelper(object): """ def __init__(self, tensor, hook_id): - self._tensor = tensor if framework._in_eager_mode_ else weakref.ref( - tensor) + self._tensor = ( + tensor if framework._in_eager_mode_ else weakref.ref(tensor) + ) self._hook_id = hook_id def remove(self): @@ -62,7 +71,9 @@ class TensorHookRemoveHelper(object): else: warnings.warn( "The backward hook (ID: %d) of Tensor `%s` you want to remove does not exist or has been removed." - % (self._hook_id, tensor.name), RuntimeWarning) + % (self._hook_id, tensor.name), + RuntimeWarning, + ) return False @@ -70,7 +81,6 @@ _already_patch_repr = False def monkey_patch_varbase(): - @switch_to_static_graph def _to_static_var(self, to_parameter=False, **kwargs): """ @@ -111,8 +121,9 @@ def monkey_patch_varbase(): attr_names = [] for name in dir(self): if name not in attr_not_need_keys: - if not inspect.ismethod(getattr( - self, name)) and not name.startswith('_'): + if not inspect.ismethod( + getattr(self, name) + ) and not name.startswith('_'): attr_names.append(name) attr_kwargs = {name: getattr(self, name) for name in attr_names} @@ -169,38 +180,45 @@ def monkey_patch_varbase(): base_tensor = core.eager.Tensor else: base_tensor = core.VarBase - assert isinstance(value, (np.ndarray, base_tensor, dict, str)), \ - "Variable set_value function, arguments type only support Variable, numpy, VarBase, dict, string." + assert isinstance( + value, (np.ndarray, base_tensor, dict, str) + ), "Variable set_value function, arguments type only support Variable, numpy, VarBase, dict, string." if isinstance(value, (dict, str)): assert len(self) == len( value ), "Variable length not match, Variable [ {} ] need tensor with length {} but load set tensor with length {}".format( - self.name, len(self), len(value)) + self.name, len(self), len(value) + ) if isinstance(value, dict): self.value().set_vocab(value) else: self.value().set_string_list(value) else: - assert self.shape == list(value.shape), \ - "Variable Shape not match, Variable [ {} ] need tensor with shape {} but load set tensor with shape {}".format( - self.name, self.shape, value.shape) + assert self.shape == list( + value.shape + ), "Variable Shape not match, Variable [ {} ] need tensor with shape {} but load set tensor with shape {}".format( + self.name, self.shape, value.shape + ) if isinstance(value, base_tensor): dtype = value.dtype else: dtype = convert_np_dtype_to_dtype_(value.dtype) - assert self.dtype == dtype, \ - "Variable dtype not match, Variable [ {} ] need tensor with dtype {} but load tensor with dtype {}".format( - self.name, self.dtype, dtype) + assert ( + self.dtype == dtype + ), "Variable dtype not match, Variable [ {} ] need tensor with dtype {} but load tensor with dtype {}".format( + self.name, self.dtype, dtype + ) # NOTE(wuweilong): self could be VarBase or Tensor, the subsequent behavior are defined in different files # if self is VarBase, method value() return Variable that bindded in imperative.cc, get_tensor() bindded in pybind.cc # if self is Tensor, method value() return self that defined in this file, get_tensor() defined in eager_method.cc # this Interface behavior will be unifed in the future. - self.value().get_tensor().set(value, - framework._current_expected_place()) + self.value().get_tensor().set( + value, framework._current_expected_place() + ) @framework.dygraph_only def backward(self, grad_tensor=None, retain_graph=False): @@ -258,7 +276,8 @@ def monkey_patch_varbase(): if framework._non_static_mode(): if in_profiler_mode(): record_event = profiler.RecordEvent( - "Gradient Backward", profiler.TracerEventType.Backward) + "Gradient Backward", profiler.TracerEventType.Backward + ) record_event.begin() if grad_tensor is not None: if framework._in_eager_mode_: @@ -269,9 +288,11 @@ def monkey_patch_varbase(): assert isinstance( grad_tensor, paddle.Tensor ), "The type of grad_tensor must be paddle.Tensor" - assert grad_tensor.shape == self.shape, \ - "Tensor shape not match, Tensor of grad_tensor [ {} ] with shape {} mismatch Tensor [ {} ] with shape {}".format( - grad_tensor.name, grad_tensor.shape, self.name, self.shape) + assert ( + grad_tensor.shape == self.shape + ), "Tensor shape not match, Tensor of grad_tensor [ {} ] with shape {} mismatch Tensor [ {} ] with shape {}".format( + grad_tensor.name, grad_tensor.shape, self.name, self.shape + ) if framework._in_eager_mode_: if grad_tensor is None: @@ -281,36 +302,46 @@ def monkey_patch_varbase(): if _grad_scalar: # When using amp with Fleet DistributedStrategy, we do loss scaling implicitly. self = _grad_scalar.scale(self) - if paddle.is_compiled_with_xpu() or paddle.is_compiled_with_npu( - ) or paddle.is_compiled_with_mlu(): + if ( + paddle.is_compiled_with_xpu() + or paddle.is_compiled_with_npu() + or paddle.is_compiled_with_mlu() + ): # TODO(liuyuhui): Currently only for xpu. Will be removed in the future. scaled_loss = scale_loss(self) if framework._in_eager_mode_: - core.eager.run_backward([scaled_loss], grad_tensor, - retain_graph) + core.eager.run_backward( + [scaled_loss], grad_tensor, retain_graph + ) else: - core.dygraph_run_backward([scaled_loss], [grad_tensor], - retain_graph, - framework._dygraph_tracer()) + core.dygraph_run_backward( + [scaled_loss], + [grad_tensor], + retain_graph, + framework._dygraph_tracer(), + ) else: if framework._in_eager_mode_: core.eager.run_backward([self], grad_tensor, retain_graph) else: - core.dygraph_run_backward([self], [grad_tensor], - retain_graph, - framework._dygraph_tracer()) + core.dygraph_run_backward( + [self], + [grad_tensor], + retain_graph, + framework._dygraph_tracer(), + ) if in_profiler_mode(): record_event.end() else: raise ValueError( - "Variable.backward() is only available in DyGraph mode") + "Variable.backward() is only available in DyGraph mode" + ) @framework.dygraph_only @deprecated( since="2.1.0", level=1, - reason= - "Please use tensor.grad, which returns the tensor value of the gradient." + reason="Please use tensor.grad, which returns the tensor value of the gradient.", ) def gradient(self): """ @@ -347,9 +378,10 @@ def monkey_patch_varbase(): new_ivar = self._grad_ivar()._copy_to(core.CPUPlace(), True) if self._grad_ivar().type == core.VarDesc.VarType.SELECTED_ROWS: - return (np.array( - new_ivar.value().get_selected_rows().get_tensor()), - np.array(new_ivar.value().get_selected_rows().rows())) + return ( + np.array(new_ivar.value().get_selected_rows().get_tensor()), + np.array(new_ivar.value().get_selected_rows().rows()), + ) else: return np.array(new_ivar.value().get_tensor()) @@ -414,7 +446,8 @@ def monkey_patch_varbase(): """ if self.stop_gradient is True: raise RuntimeError( - "Cannot register hook on a tensor that stop gradient.") + "Cannot register hook on a tensor that stop gradient." + ) hook_id = self._register_grad_hook(hook) helper = TensorHookRemoveHelper(self, hook_id) @@ -430,21 +463,28 @@ def monkey_patch_varbase(): if isinstance(device, str): device = paddle.device._convert_to_place(device) elif isinstance( - device, - (core.CPUPlace, core.CUDAPlace, core.CUDAPinnedPlace, - core.XPUPlace, core.CustomPlace)): + device, + ( + core.CPUPlace, + core.CUDAPlace, + core.CUDAPinnedPlace, + core.XPUPlace, + core.CustomPlace, + ), + ): pass else: raise ValueError( "device value error, must be str, paddle.CPUPlace(), paddle.CUDAPlace(), paddle.CUDAPinnedPlace(), paddle.XPUPlace() or paddle.CustomPlace(), but the type of device is " - + type(device).__name__) + + type(device).__name__ + ) if blocking is None: blocking = True else: assert isinstance( - blocking, - bool), "blocking value error, must be the True, False or None" + blocking, bool + ), "blocking value error, must be the True, False or None" def transform(t, device, dtype, blocking): if device is None: @@ -461,7 +501,8 @@ def monkey_patch_varbase(): # waiting_alloc_memory will compute the memory space occupied by 't'. # Coefficient 1.2 is used to avoid OOM that may occur in this critical state when the memory is just enough. waiting_alloc_memory = ( - (t._numel() * size_dtype) / 256 + 1) * 256 * 1.2 + ((t._numel() * size_dtype) / 256 + 1) * 256 * 1.2 + ) gpu_memory_available = core.gpu_memory_available() if gpu_memory_available < waiting_alloc_memory: # Copy Tensor to cpu @@ -477,7 +518,8 @@ def monkey_patch_varbase(): # 2. cast Tensor to dtype if dtype is not None and dtype != t_used.dtype: with paddle.fluid.framework._dygraph_place_guard( - place=t_used.place): + place=t_used.place + ): t_casted = t_used.cast(dtype=dtype) else: t_casted = t_used @@ -523,10 +565,12 @@ def monkey_patch_varbase(): # Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=False, [500.]) """ - msg = 'tensor.grad will return the tensor value of the gradient.' \ - ' This is an incompatible upgrade for tensor.grad API. ' \ - ' It\'s return type changes from numpy.ndarray in version 2.0 to paddle.Tensor in version 2.1.0. ' \ + msg = ( + 'tensor.grad will return the tensor value of the gradient.' + ' This is an incompatible upgrade for tensor.grad API. ' + ' It\'s return type changes from numpy.ndarray in version 2.0 to paddle.Tensor in version 2.1.0. ' ' If you want to get the numpy value of the gradient, you can use :code:`x.grad.numpy()`' + ) warning_msg = "\033[93m\nWarning:\n%s \033[0m" % (msg) # ensure ANSI escape sequences print correctly in cmd and powershell if sys.platform.lower() == 'win32': @@ -622,9 +666,11 @@ def monkey_patch_varbase(): """ if framework._in_eager_mode_: from paddle.tensor.to_string import tensor_to_string + return tensor_to_string(self) else: from paddle.tensor.to_string import to_string + return to_string(self) def __deepcopy__(self, memo): @@ -667,7 +713,9 @@ def monkey_patch_varbase(): def __nonzero__(self): numel = np.prod(self.shape) - assert numel == 1, "When Variable is used as the condition of if/while , Variable can only contain one element." + assert ( + numel == 1 + ), "When Variable is used as the condition of if/while , Variable can only contain one element." if framework._in_eager_mode_: assert self._is_initialized(), "tensor not initialized" return bool(np.all(self.numpy() > 0)) @@ -711,21 +759,22 @@ def monkey_patch_varbase(): for slice_item in item: if isinstance(slice_item, slice): - if isinstance(slice_item.start, Variable) \ - or isinstance(slice_item.stop, Variable) \ - or isinstance(slice_item.step, Variable): + if ( + isinstance(slice_item.start, Variable) + or isinstance(slice_item.stop, Variable) + or isinstance(slice_item.step, Variable) + ): return True else: - if isinstance( - slice_item, - (Variable, np.ndarray)) and Variable.dtype != paddle.bool: + if ( + isinstance(slice_item, (Variable, np.ndarray)) + and Variable.dtype != paddle.bool + ): return True return False def __getitem__(self, item): - def is_list_tuple(index, contain_type): - def _is_list_tuple(item): if isinstance(item, (tuple, list)): for s in item: @@ -753,7 +802,6 @@ def monkey_patch_varbase(): return self._getitem_index_not_tensor(item) def __setitem__(self, item, value): - def contain_tensor_or_list(item): if not isinstance(item, tuple): item = [item] @@ -813,7 +861,8 @@ def monkey_patch_varbase(): self._unset_fake_empty() else: raise TypeError( - "_set_grad_ivar is only supported for Parameter Tensor") + "_set_grad_ivar is only supported for Parameter Tensor" + ) @framework.dygraph_only def value(self): @@ -971,25 +1020,30 @@ def monkey_patch_varbase(): if framework._in_eager_mode_ and not hasattr(core, "eager"): return - for method_name, method in (("__bool__", __bool__), ("__nonzero__", - __nonzero__), - ("_to_static_var", - _to_static_var), ("set_value", set_value), - ("block", block), ("backward", backward), - ("clear_grad", clear_grad), ("inplace_version", - inplace_version), - ("gradient", gradient), ("register_hook", - register_hook), - ("__str__", __str__), ("__repr__", __str__), - ("__deepcopy__", __deepcopy__), ("__module__", - "paddle"), - ("__array__", - __array__), ("__getitem__", - __getitem__), ("item", item), - ("__setitem__", - __setitem__), ("_to", _to), ("values", values), - ("to_dense", to_dense), ("to_sparse_coo", - to_sparse_coo)): + for method_name, method in ( + ("__bool__", __bool__), + ("__nonzero__", __nonzero__), + ("_to_static_var", _to_static_var), + ("set_value", set_value), + ("block", block), + ("backward", backward), + ("clear_grad", clear_grad), + ("inplace_version", inplace_version), + ("gradient", gradient), + ("register_hook", register_hook), + ("__str__", __str__), + ("__repr__", __str__), + ("__deepcopy__", __deepcopy__), + ("__module__", "paddle"), + ("__array__", __array__), + ("__getitem__", __getitem__), + ("item", item), + ("__setitem__", __setitem__), + ("_to", _to), + ("values", values), + ("to_dense", to_dense), + ("to_sparse_coo", to_sparse_coo), + ): if framework._in_eager_mode_: setattr(core.eager.Tensor, method_name, method) else: diff --git a/python/paddle/fluid/dygraph_utils.py b/python/paddle/fluid/dygraph_utils.py index d93915e8bb5ff1ca399e2f7f127d8d674e0cbccb..e89f8f591082a1860a8a1ec3bc04e81f6519fa1d 100644 --- a/python/paddle/fluid/dygraph_utils.py +++ b/python/paddle/fluid/dygraph_utils.py @@ -18,10 +18,9 @@ from paddle import _C_ops, _legacy_C_ops @dygraph_only -def _append_activation_in_dygraph(input, - act=None, - use_cudnn=None, - use_mkldnn=None): +def _append_activation_in_dygraph( + input, act=None, use_cudnn=None, use_mkldnn=None +): """Append activation in dygraph mode. Args: @@ -60,5 +59,6 @@ def _append_bias_in_dygraph(input, bias=None, axis=1, use_mkldnn=False): if bias is None: return input - return _legacy_C_ops.elementwise_add(input, bias, 'axis', axis, - 'use_mkldnn', use_mkldnn) + return _legacy_C_ops.elementwise_add( + input, bias, 'axis', axis, 'use_mkldnn', use_mkldnn + ) diff --git a/python/paddle/fluid/entry_attr.py b/python/paddle/fluid/entry_attr.py index 4d481943229edc4b4d44ca9e17bbd286e85cbc1e..51e9f5a6d51d01c6e24f08c053dba2b8e4a4c71b 100644 --- a/python/paddle/fluid/entry_attr.py +++ b/python/paddle/fluid/entry_attr.py @@ -37,7 +37,6 @@ class EntryAttr(object): class ProbabilityEntry(EntryAttr): - def __init__(self, probability): super(ProbabilityEntry, self).__init__() @@ -55,17 +54,18 @@ class ProbabilityEntry(EntryAttr): class CountFilterEntry(EntryAttr): - def __init__(self, count_filter): super(CountFilterEntry, self).__init__() if not isinstance(count_filter, int): raise ValueError( - "count_filter must be a valid integer greater than 0") + "count_filter must be a valid integer greater than 0" + ) if count_filter < 0: raise ValueError( - "count_filter must be a valid integer greater or equal than 0") + "count_filter must be a valid integer greater or equal than 0" + ) self._name = "count_filter_entry" self._count_filter = count_filter diff --git a/python/paddle/fluid/evaluator.py b/python/paddle/fluid/evaluator.py index 6c3ab7c497e5be3309d8649996f425656eea43ab..06f01ecdf5e6873994e715fe6ac61f645c25314d 100644 --- a/python/paddle/fluid/evaluator.py +++ b/python/paddle/fluid/evaluator.py @@ -31,12 +31,14 @@ __all__ = [ def _clone_var_(block, var): assert isinstance(var, Variable) - return block.create_var(name=var.name, - shape=var.shape, - dtype=var.dtype, - type=var.type, - lod_level=var.lod_level, - persistable=True) + return block.create_var( + name=var.name, + shape=var.shape, + dtype=var.dtype, + type=var.type, + lod_level=var.lod_level, + persistable=True, + ) class Evaluator(object): @@ -66,7 +68,9 @@ class Evaluator(object): def __init__(self, name, **kwargs): warnings.warn( "The %s is deprecated, because maintain a modified program inside evaluator cause bug easily, please use fluid.metrics.%s instead." - % (self.__class__.__name__, self.__class__.__name__), Warning) + % (self.__class__.__name__, self.__class__.__name__), + Warning, + ) self.states = [] self.metrics = [] self.helper = LayerHelper(name, **kwargs) @@ -86,10 +90,9 @@ class Evaluator(object): for var in self.states: assert isinstance(var, Variable) g_var = _clone_var_(reset_program.current_block(), var) - layers.fill_constant(shape=g_var.shape, - value=0.0, - dtype=g_var.dtype, - out=g_var) + layers.fill_constant( + shape=g_var.shape, value=0.0, dtype=g_var.dtype, out=g_var + ) executor.run(reset_program) @@ -114,11 +117,12 @@ class Evaluator(object): Returns: State variable """ - state = self.helper.create_variable(name="_".join( - [unique_name.generate(self.helper.name), suffix]), - persistable=True, - dtype=dtype, - shape=shape) + state = self.helper.create_variable( + name="_".join([unique_name.generate(self.helper.name), suffix]), + persistable=True, + dtype=dtype, + shape=shape, + ) self.states.append(state) return state @@ -169,27 +173,41 @@ class ChunkEvaluator(Evaluator): if main_program.current_block().idx != 0: raise ValueError("You can only invoke Evaluator in root block") - self.num_infer_chunks = self._create_state(dtype='int64', - shape=[1], - suffix='num_infer_chunks') - self.num_label_chunks = self._create_state(dtype='int64', - shape=[1], - suffix='num_label_chunks') + self.num_infer_chunks = self._create_state( + dtype='int64', shape=[1], suffix='num_infer_chunks' + ) + self.num_label_chunks = self._create_state( + dtype='int64', shape=[1], suffix='num_label_chunks' + ) self.num_correct_chunks = self._create_state( - dtype='int64', shape=[1], suffix='num_correct_chunks') - precision, recall, f1_score, num_infer_chunks, num_label_chunks, num_correct_chunks = layers.chunk_eval( + dtype='int64', shape=[1], suffix='num_correct_chunks' + ) + ( + precision, + recall, + f1_score, + num_infer_chunks, + num_label_chunks, + num_correct_chunks, + ) = layers.chunk_eval( input=input, label=label, chunk_scheme=chunk_scheme, num_chunk_types=num_chunk_types, excluded_chunk_types=excluded_chunk_types, ) - layers.sums(input=[self.num_infer_chunks, num_infer_chunks], - out=self.num_infer_chunks) - layers.sums(input=[self.num_label_chunks, num_label_chunks], - out=self.num_label_chunks) - layers.sums(input=[self.num_correct_chunks, num_correct_chunks], - out=self.num_correct_chunks) + layers.sums( + input=[self.num_infer_chunks, num_infer_chunks], + out=self.num_infer_chunks, + ) + layers.sums( + input=[self.num_label_chunks, num_label_chunks], + out=self.num_label_chunks, + ) + layers.sums( + input=[self.num_correct_chunks, num_correct_chunks], + out=self.num_correct_chunks, + ) self.metrics.extend([precision, recall, f1_score]) @@ -199,18 +217,31 @@ class ChunkEvaluator(Evaluator): block = eval_program.current_block() num_infer_chunks, num_label_chunks, num_correct_chunks = executor.run( eval_program, - fetch_list=[_clone_var_(block, state) for state in self.states]) + fetch_list=[_clone_var_(block, state) for state in self.states], + ) num_infer_chunks = num_infer_chunks[0] num_label_chunks = num_label_chunks[0] num_correct_chunks = num_correct_chunks[0] - precision = float( - num_correct_chunks) / num_infer_chunks if num_infer_chunks else 0 - recall = float( - num_correct_chunks) / num_label_chunks if num_label_chunks else 0 - f1_score = float(2 * precision * recall) / ( - precision + recall) if num_correct_chunks else 0 - return np.array([precision], dtype='float32'), np.array( - [recall], dtype='float32'), np.array([f1_score], dtype='float32') + precision = ( + float(num_correct_chunks) / num_infer_chunks + if num_infer_chunks + else 0 + ) + recall = ( + float(num_correct_chunks) / num_label_chunks + if num_label_chunks + else 0 + ) + f1_score = ( + float(2 * precision * recall) / (precision + recall) + if num_correct_chunks + else 0 + ) + return ( + np.array([precision], dtype='float32'), + np.array([recall], dtype='float32'), + np.array([f1_score], dtype='float32'), + ) class EditDistance(Evaluator): @@ -250,31 +281,35 @@ class EditDistance(Evaluator): if main_program.current_block().idx != 0: raise ValueError("You can only invoke Evaluator in root block") - self.total_distance = self._create_state(dtype='float32', - shape=[1], - suffix='total_distance') - self.seq_num = self._create_state(dtype='int64', - shape=[1], - suffix='seq_num') - self.instance_error = self._create_state(dtype='int64', - shape=[1], - suffix='instance_error') - distances, seq_num = layers.edit_distance(input=input, - label=label, - ignored_tokens=ignored_tokens) + self.total_distance = self._create_state( + dtype='float32', shape=[1], suffix='total_distance' + ) + self.seq_num = self._create_state( + dtype='int64', shape=[1], suffix='seq_num' + ) + self.instance_error = self._create_state( + dtype='int64', shape=[1], suffix='instance_error' + ) + distances, seq_num = layers.edit_distance( + input=input, label=label, ignored_tokens=ignored_tokens + ) zero = layers.fill_constant(shape=[1], value=0.0, dtype='float32') compare_result = layers.equal(distances, zero) compare_result_int = layers.cast(x=compare_result, dtype='int64') seq_right_count = layers.reduce_sum(compare_result_int) - instance_error_count = layers.elementwise_sub(x=seq_num, - y=seq_right_count) + instance_error_count = layers.elementwise_sub( + x=seq_num, y=seq_right_count + ) total_distance = layers.reduce_sum(distances) - layers.sums(input=[self.total_distance, total_distance], - out=self.total_distance) + layers.sums( + input=[self.total_distance, total_distance], out=self.total_distance + ) layers.sums(input=[self.seq_num, seq_num], out=self.seq_num) - layers.sums(input=[self.instance_error, instance_error_count], - out=self.instance_error) + layers.sums( + input=[self.instance_error, instance_error_count], + out=self.instance_error, + ) self.metrics.append(total_distance) self.metrics.append(instance_error_count) @@ -289,10 +324,12 @@ class EditDistance(Evaluator): seq_num = layers.cast(x=seq_num, dtype='float32') instance_error = layers.cast(x=instance_error, dtype='float32') avg_distance = layers.elementwise_div(x=total_distance, y=seq_num) - avg_instance_error = layers.elementwise_div(x=instance_error, - y=seq_num) - result = executor.run(eval_program, - fetch_list=[avg_distance, avg_instance_error]) + avg_instance_error = layers.elementwise_div( + x=instance_error, y=seq_num + ) + result = executor.run( + eval_program, fetch_list=[avg_distance, avg_instance_error] + ) return np.array(result[0]), np.array(result[1]) @@ -355,16 +392,18 @@ class DetectionMAP(Evaluator): 'accum_map_v' is the accumulative mAP of one pass. """ - def __init__(self, - input, - gt_label, - gt_box, - gt_difficult=None, - class_num=None, - background_label=0, - overlap_threshold=0.5, - evaluate_difficult=True, - ap_version='integral'): + def __init__( + self, + input, + gt_label, + gt_box, + gt_difficult=None, + class_num=None, + background_label=0, + overlap_threshold=0.5, + evaluate_difficult=True, + ap_version='integral', + ): super(DetectionMAP, self).__init__("map_eval") gt_label = layers.cast(x=gt_label, dtype=gt_box.dtype) @@ -375,26 +414,29 @@ class DetectionMAP(Evaluator): label = layers.concat([gt_label, gt_box], axis=1) # calculate mean average precision (mAP) of current mini-batch - map = detection.detection_map(input, - label, - class_num, - background_label, - overlap_threshold=overlap_threshold, - evaluate_difficult=evaluate_difficult, - ap_version=ap_version) + map = detection.detection_map( + input, + label, + class_num, + background_label, + overlap_threshold=overlap_threshold, + evaluate_difficult=evaluate_difficult, + ap_version=ap_version, + ) self._create_state(dtype='int32', shape=None, suffix='accum_pos_count') self._create_state(dtype='float32', shape=None, suffix='accum_true_pos') - self._create_state(dtype='float32', - shape=None, - suffix='accum_false_pos') + self._create_state( + dtype='float32', shape=None, suffix='accum_false_pos' + ) self.has_state = None - var = self.helper.create_variable(persistable=True, - dtype='int32', - shape=[1]) - self.helper.set_variable_initializer(var, - initializer=Constant(value=int(0))) + var = self.helper.create_variable( + persistable=True, dtype='int32', shape=[1] + ) + self.helper.set_variable_initializer( + var, initializer=Constant(value=int(0)) + ) self.has_state = var # calculate accumulative mAP @@ -408,12 +450,15 @@ class DetectionMAP(Evaluator): has_state=self.has_state, input_states=self.states, out_states=self.states, - ap_version=ap_version) + ap_version=ap_version, + ) - layers.fill_constant(shape=self.has_state.shape, - value=1, - dtype=self.has_state.dtype, - out=self.has_state) + layers.fill_constant( + shape=self.has_state.shape, + value=1, + dtype=self.has_state.dtype, + out=self.has_state, + ) self.cur_map = map self.accum_map = accum_map @@ -426,8 +471,7 @@ class DetectionMAP(Evaluator): reset_program = Program() with program_guard(main_program=reset_program): var = _clone_var_(reset_program.current_block(), self.has_state) - layers.fill_constant(shape=var.shape, - value=0, - dtype=var.dtype, - out=var) + layers.fill_constant( + shape=var.shape, value=0, dtype=var.dtype, out=var + ) executor.run(reset_program) diff --git a/python/paddle/fluid/executor.py b/python/paddle/fluid/executor.py index 489ed0b476a56f20571284d70089c894339df998..ba8221bb2f0fb2cca44f2e178b245072c09d4d75 100755 --- a/python/paddle/fluid/executor.py +++ b/python/paddle/fluid/executor.py @@ -144,10 +144,12 @@ def as_numpy(tensor, copy=False): assert isinstance(tensor, core.LoDTensor) lod = tensor.lod() if len(lod) > 0: - raise RuntimeError("Some of your fetched tensors hold LoD information. \ + raise RuntimeError( + "Some of your fetched tensors hold LoD information. \ They can not be completely cast to Python ndarray. \ Please set the parameter 'return_numpy' as 'False' to \ - return LoDTensor itself directly.") + return LoDTensor itself directly." + ) if tensor._is_initialized(): if copy: return np.array(tensor) @@ -237,21 +239,29 @@ def check_feed_shape_type(var, feed, num_places=1): if diff_shape is not None: raise ValueError( 'The fed Variable %r should have dimensions = %d, shape = ' - '%r, but received fed shape %r on each device' % - (var.name, len(var.shape), var.shape, diff_shape)) + '%r, but received fed shape %r on each device' + % (var.name, len(var.shape), var.shape, diff_shape) + ) if not dtype_is_compatible_with(feed._dtype(), var.dtype): - var_dtype_format = convert_dtype(var.dtype) if isinstance( - var.dtype, core.VarDesc.VarType) else var.dtype - feed_dtype_format = convert_dtype(feed._dtype()) if isinstance( - feed._dtype(), core.VarDesc.VarType) else feed._dtype() + var_dtype_format = ( + convert_dtype(var.dtype) + if isinstance(var.dtype, core.VarDesc.VarType) + else var.dtype + ) + feed_dtype_format = ( + convert_dtype(feed._dtype()) + if isinstance(feed._dtype(), core.VarDesc.VarType) + else feed._dtype() + ) raise ValueError( - 'The data type of fed Variable %r must be %r, but received %r' % - (var.name, var_dtype_format, feed_dtype_format)) + 'The data type of fed Variable %r must be %r, but received %r' + % (var.name, var_dtype_format, feed_dtype_format) + ) return True def has_feed_operators(block, feed_targets, feed_holder_name): - """ Check whether the block already has feed operators. + """Check whether the block already has feed operators. Return false if the block does not have any feed operators. If some feed operators have been prepended to the block, check that @@ -280,20 +290,22 @@ def has_feed_operators(block, feed_targets, feed_holder_name): if feed_target_name not in feed_targets: raise Exception( "'feed_targets' does not have {} variable".format( - feed_target_name)) + feed_target_name + ) + ) else: break if feed_count > 0 and feed_count != len(feed_targets): raise Exception( - "Feed operators in program desc do not match 'feed_targets'") + "Feed operators in program desc do not match 'feed_targets'" + ) return feed_count > 0 -def has_fetch_operators(block, - fetch_targets, - fetch_holder_name, - fetch_op='fetch'): - """ Check whether the block already has fetch operators. +def has_fetch_operators( + block, fetch_targets, fetch_holder_name, fetch_op='fetch' +): + """Check whether the block already has fetch operators. Return false if the block does not have any fetch operators. If some fetch operators have been appended to the block, check that @@ -321,25 +333,25 @@ def has_fetch_operators(block, assert op.desc.output('Out')[0] == fetch_holder_name fetch_target_name = op.desc.input('X')[0] if fetch_target_name not in [ - var.desc.name() for var in fetch_targets + var.desc.name() for var in fetch_targets ]: raise Exception( "'fetch_targets' does not have {} variable".format( - fetch_target_name)) + fetch_target_name + ) + ) idx = op.desc.attr('col') assert fetch_target_name == fetch_targets[idx].desc.name() if fetch_count > 0 and fetch_count != len(fetch_targets): raise Exception( - "Fetch operators in program desc do not match 'fetch_targets'") + "Fetch operators in program desc do not match 'fetch_targets'" + ) return fetch_count > 0 -def _add_feed_fetch_ops(program, - feed, - fetch_list, - feed_var_name, - fetch_var_name, - use_fetch_v2=False): +def _add_feed_fetch_ops( + program, feed, fetch_list, feed_var_name, fetch_var_name, use_fetch_v2=False +): tmp_program = program.clone() global_block = tmp_program.global_block() @@ -350,7 +362,8 @@ def _add_feed_fetch_ops(program, feed_var = global_block.create_var( name=feed_var_name, type=core.VarDesc.VarType.FEED_MINIBATCH, - persistable=True) + persistable=True, + ) if fetch_var_name in global_block.vars: fetch_var = global_block.var(fetch_var_name) @@ -358,21 +371,25 @@ def _add_feed_fetch_ops(program, fetch_var = global_block.create_var( name=fetch_var_name, type=core.VarDesc.VarType.FETCH_LIST, - persistable=True) + persistable=True, + ) # prepend feed operators if not has_feed_operators(global_block, feed, feed_var_name): for i, name in enumerate(feed): if global_block.has_var(name): out = global_block.var(name) - global_block._prepend_op(type='feed', - inputs={'X': [feed_var]}, - outputs={'Out': [out]}, - attrs={'col': i}) + global_block._prepend_op( + type='feed', + inputs={'X': [feed_var]}, + outputs={'Out': [out]}, + attrs={'col': i}, + ) else: warnings.warn( "The variable %s is not found in program. It is not declared or is pruned." - % name) + % name + ) if use_fetch_v2: fetch_op = 'fetch_v2' @@ -380,22 +397,26 @@ def _add_feed_fetch_ops(program, fetch_op = 'fetch' # append fetch_operators - if not has_fetch_operators(global_block, fetch_list, fetch_var_name, - fetch_op): + if not has_fetch_operators( + global_block, fetch_list, fetch_var_name, fetch_op + ): for i, var in enumerate(fetch_list): assert isinstance(var, Variable) or isinstance( - var, str), ("Wrong type for fetch_list[%s]: %s" % - (i, type(var))) - global_block.append_op(type=fetch_op, - inputs={'X': [var]}, - outputs={'Out': [fetch_var]}, - attrs={'col': i}) + var, str + ), "Wrong type for fetch_list[%s]: %s" % (i, type(var)) + global_block.append_op( + type=fetch_op, + inputs={'X': [var]}, + outputs={'Out': [fetch_var]}, + attrs={'col': i}, + ) return tmp_program -def _apply_inplace_addto_pass(program, enable_inplace, enable_addto, - skip_var_names): +def _apply_inplace_addto_pass( + program, enable_inplace, enable_addto, skip_var_names +): use_cuda = True if core.is_compiled_with_cuda() else False attrs = {"use_cuda": use_cuda, "mem_opt_skip_vars": skip_var_names} @@ -404,12 +425,14 @@ def _apply_inplace_addto_pass(program, enable_inplace, enable_addto, empty_startup_program = Program() if enable_inplace: pass_name = "buffer_shared_inplace_pass" - _apply_pass(program, empty_startup_program, pass_name, attrs, - attr_types) + _apply_pass( + program, empty_startup_program, pass_name, attrs, attr_types + ) if enable_addto and use_cuda: pass_name = "inplace_addto_op_pass" - _apply_pass(program, empty_startup_program, pass_name, attrs, - attr_types) + _apply_pass( + program, empty_startup_program, pass_name, attrs, attr_types + ) def _fetch_var(name, scope=None, return_numpy=True): @@ -438,7 +461,8 @@ def _fetch_var(name, scope=None, return_numpy=True): assert var is not None, ( "Cannot find " + name + " in scope. Perhaps you need to make the" " variable persistable by using var.persistable = True in your" - " program.") + " program." + ) tensor = var.get_tensor() if return_numpy: tensor = as_numpy(tensor, copy=True) @@ -446,7 +470,6 @@ def _fetch_var(name, scope=None, return_numpy=True): def _to_name_str(var): - def _to_str(var): if isinstance(var, Variable): return var.desc.name() @@ -471,19 +494,26 @@ def _to_name_str(var): def _is_enable_standalone_executor(): - return framework._enable_standalone_executor_ is None or framework._enable_standalone_executor_ in [ - 1, '1', True, 'True', 'true' - ] + return ( + framework._enable_standalone_executor_ is None + or framework._enable_standalone_executor_ + in [1, '1', True, 'True', 'true'] + ) def _is_dy2st_enable_standalone_executor(): return framework._dy2st_enable_standalone_executor_ in [ - 1, '1', True, 'True', 'true' + 1, + '1', + True, + 'True', + 'true', ] def _prepare_fleet_executor(): from ..distributed.fleet.proto import fleet_executor_desc_pb2 + trainer_endpoints_str = os.getenv("PADDLE_TRAINER_ENDPOINTS", "") trainer_endpoints = trainer_endpoints_str.split(',') fleet_exe_desc = fleet_executor_desc_pb2.FleetExecutorDesc() @@ -501,7 +531,8 @@ def _prepare_fleet_executor(): def _get_strong_program_cache_key_for_new_exe(program, feed, fetch_list): return program.desc.cached_hash_str() + _get_program_cache_key( - feed, fetch_list) + feed, fetch_list + ) def _get_strong_program_cache_key(program, feed, fetch_list): @@ -512,10 +543,16 @@ def _get_strong_program_cache_key(program, feed, fetch_list): block_str.append(var_name) return "\n".join(block_str) - inner_program = program._program if isinstance( - program, compiler.CompiledProgram) else program - return _get_varname_from_block(inner_program.blocks[0]) + str( - id(program)) + _get_program_cache_key(feed, fetch_list) + inner_program = ( + program._program + if isinstance(program, compiler.CompiledProgram) + else program + ) + return ( + _get_varname_from_block(inner_program.blocks[0]) + + str(id(program)) + + _get_program_cache_key(feed, fetch_list) + ) def _get_program_cache_key(feed, fetch_list): @@ -531,30 +568,35 @@ def _get_program_cache_key(feed, fetch_list): def _as_lodtensor(data, place, dtype=None): """ - Convert numpy.ndarray to Tensor, its only support Tensor without LoD information. - For higher dimensional sequence data, please use LoDTensor directly. + Convert numpy.ndarray to Tensor, its only support Tensor without LoD information. + For higher dimensional sequence data, please use LoDTensor directly. - Examples: - >>> import paddle.fluid as fluid - >>> place = fluid.CPUPlace() - >>> exe = fluid.executor(place) - >>> data = np.array(size=(100, 200, 300)) - >>> np_outs = map(lambda x: fluid.executor._as_lodtensor(x, place), data) - >>> ... + Examples: + >>> import paddle.fluid as fluid + >>> place = fluid.CPUPlace() + >>> exe = fluid.executor(place) + >>> data = np.array(size=(100, 200, 300)) + >>> np_outs = map(lambda x: fluid.executor._as_lodtensor(x, place), data) + >>> ... - Args: - data(numpy.ndarray|list|tuple|scalar): a instance of array, scalar, list or tuple - data(core.Place): the place of created tensor - dtype(core.VarDesc.VarType|str): the expected data type of created tensor + Args: + data(numpy.ndarray|list|tuple|scalar): a instance of array, scalar, list or tuple + data(core.Place): the place of created tensor + dtype(core.VarDesc.VarType|str): the expected data type of created tensor - Returns: - LoDTensor - """ - #NOTE(zhiqiu): convert python builtin, like float, int, and list, to numpy ndarray + Returns: + LoDTensor + """ + # NOTE(zhiqiu): convert python builtin, like float, int, and list, to numpy ndarray if not isinstance(data, np.ndarray): - assert dtype is not None, 'The dtype should be given when feed data is not np.ndarray' - dtype = convert_dtype(dtype) if isinstance( - dtype, core.VarDesc.VarType) else dtype + assert ( + dtype is not None + ), 'The dtype should be given when feed data is not np.ndarray' + dtype = ( + convert_dtype(dtype) + if isinstance(dtype, core.VarDesc.VarType) + else dtype + ) if np.isscalar(data): data = np.array([data]).astype(dtype) elif isinstance(data, (list, tuple)): @@ -569,7 +611,9 @@ def _as_lodtensor(data, place, dtype=None): else: raise TypeError( "Convert data of type {} to Tensor is not supported".format( - type(data))) + type(data) + ) + ) # convert numpy.ndarray to tensor tensor = core.LoDTensor() @@ -578,7 +622,6 @@ def _as_lodtensor(data, place, dtype=None): class FetchHandler(object): - def __init__(self, var_dict=None, period_secs=60): assert var_dict != None self.var_dict = var_dict @@ -592,7 +635,8 @@ class FetchHandler(object): @staticmethod def help(): - print(""" + print( + """ class FetchHandlerExample(FetchHandler): def handler(self, res_dict): print(res_dict["auc"]) @@ -601,11 +645,11 @@ class FetchHandlerExample(FetchHandler): auc = Variable() var_dict = {"auc": auc} handler = FetchHandlerExample(var_dict=var_dict) -""") +""" + ) class _StandaloneExecutor(object): - def __init__(self, place, main_program, scope): self._place = core.Place() self._place.set_place(place) @@ -625,8 +669,9 @@ class _StandaloneExecutor(object): """ fetch_list = self._check_fetch(fetch_list) - tensors = self._new_exe.run(scope, feed_names, - fetch_list)._move_to_list() + tensors = self._new_exe.run( + scope, feed_names, fetch_list + )._move_to_list() if return_numpy: return as_numpy(tensors, copy=True) else: @@ -658,8 +703,9 @@ class _StandaloneExecutor(object): if not isinstance(feed, dict): raise TypeError( - "feed requires dict as its Parameter. But you passed in %s" % - (type(feed))) + "feed requires dict as its Parameter. But you passed in %s" + % (type(feed)) + ) global_block = self._main_program.global_block() for feed_name in list(feed.keys()): @@ -667,7 +713,8 @@ class _StandaloneExecutor(object): feed.pop(feed_name) warnings.warn( "The variable %s is not found in program. It is not declared or is pruned." - % feed_name) + % feed_name + ) return feed @@ -681,19 +728,27 @@ class _StandaloneExecutor(object): fetch_var = fetch_var.name elif not isinstance(fetch_var, str): raise TypeError( - "Required fetch_var shall be str|Variable, but received {}". - format(type(fetch_var).__name__)) + "Required fetch_var shall be str|Variable, but received {}".format( + type(fetch_var).__name__ + ) + ) res.append(fetch_var) return res class _ExecutorCache(object): - class _CachedData(object): - - def __init__(self, program, feed, fetch_list, feed_var_name, - fetch_var_name, place, scope): + def __init__( + self, + program, + feed, + fetch_list, + feed_var_name, + fetch_var_name, + place, + scope, + ): self.program = program self.feed = feed self.fetch_list = fetch_list @@ -709,18 +764,25 @@ class _ExecutorCache(object): # The program holds no _program, maybe it is constructed by graph. # Convert graph to program in order to generate key. self.program._program = framework.IrGraph( - self.program._graph).to_program() + self.program._graph + ).to_program() self.key = hash( _get_strong_program_cache_key_for_new_exe( - self.program._program, feed, fetch_list)) + self.program._program, feed, fetch_list + ) + ) else: self.key = hash( _get_strong_program_cache_key_for_new_exe( - self.program, feed, fetch_list)) + self.program, feed, fetch_list + ) + ) def __eq__(self, other): - return isinstance( - other, _ExecutorCache._CachedData) and self.key == other.key + return ( + isinstance(other, _ExecutorCache._CachedData) + and self.key == other.key + ) def __hash__(self): return self.key @@ -730,21 +792,41 @@ class _ExecutorCache(object): # the _ExecutorCache instance, otherwise a global cache may not be released after # the Executor instance deleted self._get_cached_program_and_executor = lru_cache(maxsize=8)( - self._get_program_and_executor) + self._get_program_and_executor + ) def clear(self): self._get_cached_program_and_executor.cache_clear() - def get_program_and_executor(self, program, feed, fetch_list, feed_var_name, - fetch_var_name, place, scope): + def get_program_and_executor( + self, + program, + feed, + fetch_list, + feed_var_name, + fetch_var_name, + place, + scope, + ): return self._get_cached_program_and_executor( - self._CachedData(program, feed, fetch_list, feed_var_name, - fetch_var_name, place, scope)) + self._CachedData( + program, + feed, + fetch_list, + feed_var_name, + fetch_var_name, + place, + scope, + ) + ) def _get_program_and_executor(self, cached_data): program = cached_data.program - inner_program = program._program if isinstance( - program, compiler.CompiledProgram) else program + inner_program = ( + program._program + if isinstance(program, compiler.CompiledProgram) + else program + ) feed = cached_data.feed fetch_list = cached_data.fetch_list feed_var_name = cached_data.feed_var_name @@ -754,9 +836,13 @@ class _ExecutorCache(object): # To apply IR pass, compile the Program to IrGraph and convert it back to Program if isinstance(program, compiler.CompiledProgram) or isinstance( - program._graph, compiler.CompiledProgram): - compiled_program = program if isinstance( - program, compiler.CompiledProgram) else program._graph + program._graph, compiler.CompiledProgram + ): + compiled_program = ( + program + if isinstance(program, compiler.CompiledProgram) + else program._graph + ) build_strategy = compiled_program._build_strategy # print(f"Program before convert:\n {inner_program}", flush=True) compiled_program._compile(scope, place) @@ -771,21 +857,26 @@ class _ExecutorCache(object): else: build_strategy = None from paddle.incubate.autograd import prim_enabled, prim2orig + if prim_enabled() and program == default_main_program(): prim2orig() inner_program = program - program = _add_feed_fetch_ops(program=inner_program, - feed=feed, - fetch_list=fetch_list, - feed_var_name=feed_var_name, - fetch_var_name=fetch_var_name, - use_fetch_v2=True) + program = _add_feed_fetch_ops( + program=inner_program, + feed=feed, + fetch_list=fetch_list, + feed_var_name=feed_var_name, + fetch_var_name=fetch_var_name, + use_fetch_v2=True, + ) - if os.environ.get('FLAGS_CONVERT_GRAPH_TO_PROGRAM', None) in [ - 1, '1', True, 'True', 'true' - ] and not program._is_start_up_program_: + if ( + os.environ.get('FLAGS_CONVERT_GRAPH_TO_PROGRAM', None) + in [1, '1', True, 'True', 'true'] + and not program._is_start_up_program_ + ): if program.num_blocks > 1: # If there are multiple blocks in the program, subblock will not be executed with the new executor in temporary logging.warning("There are more than 1 block in program.") @@ -796,13 +887,22 @@ class _ExecutorCache(object): # standalone executor will apply buffer_shared_inplace_pass and # inplace_addto_op_pass to program according to build_strategy - enable_inplace = True if build_strategy is None or build_strategy.enable_inplace else False - enable_addto = True if build_strategy is not None and build_strategy.enable_addto else False + enable_inplace = ( + True + if build_strategy is None or build_strategy.enable_inplace + else False + ) + enable_addto = ( + True + if build_strategy is not None and build_strategy.enable_addto + else False + ) if enable_inplace or enable_addto: # inplace should skip feed and fetch var skip_var_names = eval(_get_program_cache_key(feed, fetch_list)) - _apply_inplace_addto_pass(program, enable_inplace, enable_addto, - skip_var_names) + _apply_inplace_addto_pass( + program, enable_inplace, enable_addto, skip_var_names + ) new_program = program.clone() new_exe = _StandaloneExecutor(place, new_program, scope) @@ -906,7 +1006,8 @@ class Executor(object): self._prepare_to_run_called = False self._auto_checkpoint_name = unique_name.generate( - "__auto_checkpoint_executor__") + "__auto_checkpoint_executor__" + ) # NOTE: Whether to use experimental executor `StandaloneExecutor`. self._enable_interpreter_core = _is_enable_standalone_executor() @@ -976,8 +1077,9 @@ class Executor(object): var = global_block.var(feed_target_name) if var.dtype != core.VarDesc.VarType.STRINGS: if not isinstance(cur_feed, core.LoDTensor): - cur_feed = _as_lodtensor(cur_feed, self.place, - var.dtype) + cur_feed = _as_lodtensor( + cur_feed, self.place, var.dtype + ) check_feed_shape_type(var, cur_feed) idx = op.desc.attr('col') core.set_feed_variable(scope, cur_feed, feed_var_name, idx) @@ -1015,14 +1117,19 @@ class Executor(object): _optimize_ops.append(item) else: raise TypeError( - "The operator in fetch_list is not an optimize_op") - elif isinstance(item, Variable) or isinstance( - item, str) or isinstance(item, str): + "The operator in fetch_list is not an optimize_op" + ) + elif ( + isinstance(item, Variable) + or isinstance(item, str) + or isinstance(item, str) + ): _fetch_list.append(item) else: raise TypeError( "The item in fetch_list should be str, variable or optimize_op, but received %s.", - type(item)) + type(item), + ) for index, item in enumerate(fetch_list): # NOTE(zhiqiu): to support (optimizer_ops, param_and_grads) and optimizer_ops in fetch_list @@ -1034,9 +1141,10 @@ class Executor(object): elif isinstance(item, tuple): if not isinstance(item[0], (list, tuple)): raise TypeError( - "Requires fetch_list[{}][0] shall be one of (list, tuple) when type(fetch_list[{}]) is `tuple`, but received fetch_list[{}][0]'s type is `{}`." - .format(index, index, index, - type(item[0]).__name__)) + "Requires fetch_list[{}][0] shall be one of (list, tuple) when type(fetch_list[{}]) is `tuple`, but received fetch_list[{}][0]'s type is `{}`.".format( + index, index, index, type(item[0]).__name__ + ) + ) for i in item[0]: _get_targets(_optimize_ops, _fetch_list, i) else: @@ -1045,11 +1153,9 @@ class Executor(object): return _fetch_list, _optimize_ops @classmethod - def _prune_program(cls, - program, - feed=None, - fetch_list=None, - optimize_ops=None): + def _prune_program( + cls, program, feed=None, fetch_list=None, optimize_ops=None + ): """ Prune operators and variables which are not needed to generate :code:`fetch_list` and optimize operators. @@ -1141,7 +1247,8 @@ class Executor(object): feed.pop(feed_name) warnings.warn( "The variable %s is not found in program. It is not declared or is pruned." - % feed_name) + % feed_name + ) elif isinstance(feed, list) or isinstance(feed, tuple): for i, each in enumerate(feed): @@ -1150,7 +1257,8 @@ class Executor(object): each.pop(feed_name) warnings.warn( "The variable %s is not found in program. It is not declared or is pruned." - % feed_name) + % feed_name + ) return feed ''' @@ -1185,9 +1293,18 @@ class Executor(object): del trainer_instance self._default_executor.close() - def _run_parallel(self, program, scope, feed, fetch_list, fetch_var_name, - return_numpy, return_merged): + def _run_parallel( + self, + program, + scope, + feed, + fetch_list, + fetch_var_name, + return_numpy, + return_merged, + ): from paddle.optimizer.lr import LRScheduler + exe = program._executor # TODO(zhenghuihuang): quantization uses Graph in CompiledProgram # instead of program. We will add support for checking Vars in Graph @@ -1202,9 +1319,11 @@ class Executor(object): if not isinstance(feed_tensor, core.LoDTensor): # always set to CPU place, since the tensor need to be split # it is fast in CPU - feed_tensor = _as_lodtensor(feed[feed_name], - core.CPUPlace(), - var.dtype if var else None) + feed_tensor = _as_lodtensor( + feed[feed_name], + core.CPUPlace(), + var.dtype if var else None, + ) if need_check_feed: check_feed_shape_type(var, feed_tensor, exe.device_count()) feed_tensor_dict[feed_name] = feed_tensor @@ -1215,16 +1334,20 @@ class Executor(object): for i, each in enumerate(feed): if not isinstance(each, dict): raise TypeError( - "Each element of feed list should be a dict") + "Each element of feed list should be a dict" + ) res_dict = dict() for feed_name in each: tensor = each[feed_name] - var = global_block.var( - feed_name) if need_check_feed else None + var = ( + global_block.var(feed_name) if need_check_feed else None + ) if not isinstance(tensor, core.LoDTensor): - tensor = _as_lodtensor(each[feed_name], - program._places[i], - var.dtype if var else None) + tensor = _as_lodtensor( + each[feed_name], + program._places[i], + var.dtype if var else None, + ) if need_check_feed: check_feed_shape_type(var, tensor) res_dict[feed_name] = tensor @@ -1245,23 +1368,26 @@ class Executor(object): ) else: exe.feed_and_split_tensor_into_local_scopes( - {lr_sheduler._var_name: lr_tensor}) + {lr_sheduler._var_name: lr_tensor} + ) fetch_var_names = list(map(_to_name_str, fetch_list)) tensors = exe.run(fetch_var_names, return_merged)._move_to_list() return as_numpy(tensors) if return_numpy else tensors - def run(self, - program=None, - feed=None, - fetch_list=None, - feed_var_name='feed', - fetch_var_name='fetch', - scope=None, - return_numpy=True, - use_program_cache=False, - return_merged=True, - use_prune=False): + def run( + self, + program=None, + feed=None, + fetch_list=None, + feed_var_name='feed', + fetch_var_name='fetch', + scope=None, + return_numpy=True, + use_program_cache=False, + return_merged=True, + use_prune=False, + ): """ Run the specified :code:`Program` or :code:`CompiledProgram`. It should be noted that the executor will execute all the operators in :code:`Program` or :code:`CompiledProgram` without pruning some @@ -1436,32 +1562,49 @@ class Executor(object): """ # Temporary FLAGS, just for testing the performance of program cache force_use_program_cache = os.environ.get( - 'FLAGS_FORCE_USE_PROGRAM_CACHE', None) + 'FLAGS_FORCE_USE_PROGRAM_CACHE', None + ) if force_use_program_cache is not None: use_program_cache = force_use_program_cache in [ - 1, '1', True, 'True', 'true' + 1, + '1', + True, + 'True', + 'true', ] self._log_force_set_program_cache(use_program_cache) try: - res = self._run_impl(program=program, - feed=feed, - fetch_list=fetch_list, - feed_var_name=feed_var_name, - fetch_var_name=fetch_var_name, - scope=scope, - return_numpy=return_numpy, - use_program_cache=use_program_cache, - use_prune=use_prune, - return_merged=return_merged) + res = self._run_impl( + program=program, + feed=feed, + fetch_list=fetch_list, + feed_var_name=feed_var_name, + fetch_var_name=fetch_var_name, + scope=scope, + return_numpy=return_numpy, + use_program_cache=use_program_cache, + use_prune=use_prune, + return_merged=return_merged, + ) core.update_autotune_status() return res except Exception as e: six.reraise(*sys.exc_info()) - def _run_impl(self, program, feed, fetch_list, feed_var_name, - fetch_var_name, scope, return_numpy, use_program_cache, - return_merged, use_prune): + def _run_impl( + self, + program, + feed, + fetch_list, + feed_var_name, + fetch_var_name, + scope, + return_numpy, + use_program_cache, + return_merged, + use_prune, + ): if self._closed: raise RuntimeError("Attempted to use a closed Executor") @@ -1480,17 +1623,19 @@ class Executor(object): program=program, feed=feed, fetch_list=fetch_list, - with_standalone_executor=self. - _fleet_executor_with_standalone) + with_standalone_executor=self._fleet_executor_with_standalone, + ) if "startup_program" in program._pipeline_opt: program = program._pipeline_opt["startup_program"] else: - return self._run_pipeline(program, - fetch_list=fetch_list, - use_program_cache=use_program_cache) + return self._run_pipeline( + program, + fetch_list=fetch_list, + use_program_cache=use_program_cache, + ) if isinstance(program, Program) and program._heter_pipeline_opt: - #print("program._heter_pipeline_opt: {}".format( + # print("program._heter_pipeline_opt: {}".format( # program._heter_pipeline_opt)) ## change default executor heter_place = program._heter_pipeline_opt["heter_place"] @@ -1500,20 +1645,26 @@ class Executor(object): self._default_executor = core.Executor(p) # TODO(zhangminxu): support heterps pipeline training using exe.run if "startup_program" in program._heter_pipeline_opt: - #print("get startup_program from _pipeline_opt") + # print("get startup_program from _pipeline_opt") program = program._heter_pipeline_opt["startup_program"] - if isinstance(program, Program) and \ - len(program.global_block().ops) == 0: + if ( + isinstance(program, Program) + and len(program.global_block().ops) == 0 + ): if use_default_main_program: - error_info = "Now you are using default_main_program, "\ - "but there are no operators in the program to be executed. "\ - "Please ensure you create model correctly or you can pass "\ + error_info = ( + "Now you are using default_main_program, " + "but there are no operators in the program to be executed. " + "Please ensure you create model correctly or you can pass " "the Program or the CompiledProgram manually." + ) else: - error_info = "There are no operators in the program to be executed. "\ - "If you pass Program manually, please use fluid.program_guard "\ + error_info = ( + "There are no operators in the program to be executed. " + "If you pass Program manually, please use fluid.program_guard " "to ensure the current Program is being used." + ) warnings.warn(error_info) if scope is None: @@ -1523,27 +1674,36 @@ class Executor(object): _origin_fetch_list = fetch_list _origin_program = program fetch_list, optimize_ops = self._split_optimize_ops_in_fetch_list( - fetch_list) + fetch_list + ) if optimize_ops: use_prune = True if use_prune: - cache_key = _get_strong_program_cache_key(program, feed, - _origin_fetch_list) + cache_key = _get_strong_program_cache_key( + program, feed, _origin_fetch_list + ) cached_pruned_program = self._get_pruned_program_cache(cache_key) if cached_pruned_program is None: if isinstance(program, compiler.CompiledProgram): program_scope_cache = self._get_pruned_program_scope_cache( - str(id(_origin_program))) + str(id(_origin_program)) + ) # copy the original program, so it can be cached. program = copy.copy(program) # share the local scopes for same original CompiledProgram. program._share_vars_from = program_scope_cache - if self._get_pruned_program_scope_cache( - str(id(_origin_program))) is None: + if ( + self._get_pruned_program_scope_cache( + str(id(_origin_program)) + ) + is None + ): self._add_pruned_program_scope_cache( - str(id(_origin_program)), program) - pruned_program = self._prune_program(program, feed, fetch_list, - optimize_ops) + str(id(_origin_program)), program + ) + pruned_program = self._prune_program( + program, feed, fetch_list, optimize_ops + ) self._add_pruned_program_cache(cache_key, pruned_program) else: pruned_program = cached_pruned_program @@ -1556,68 +1716,97 @@ class Executor(object): return False use_standalone_executor_for_distribution = os.environ.get( - 'FLAGS_CONVERT_GRAPH_TO_PROGRAM', - None) in [1, '1', True, 'True', 'true'] + 'FLAGS_CONVERT_GRAPH_TO_PROGRAM', None + ) in [1, '1', True, 'True', 'true'] - compiled = isinstance(program, - compiler.CompiledProgram) or isinstance( - program._graph, compiler.CompiledProgram) + compiled = isinstance( + program, compiler.CompiledProgram + ) or isinstance(program._graph, compiler.CompiledProgram) if compiled: - compiled_program = program if isinstance( - program, compiler.CompiledProgram) else program._graph + compiled_program = ( + program + if isinstance(program, compiler.CompiledProgram) + else program._graph + ) # Unsupported case 1: data parallel - if compiled_program._is_data_parallel and len( + if ( + compiled_program._is_data_parallel + and len( compiled_program._get_places( - place, compiled_program._places)) != 1: + place, compiled_program._places + ) + ) + != 1 + ): warnings.warn( "Standalone executor is not used for data parallel", - UserWarning) + UserWarning, + ) return False # Unsupported case 2: parallel graph if core.globals()['FLAGS_enable_parallel_graph'] in [ - 1, '1', True, 'True', 'true' + 1, + '1', + True, + 'True', + 'true', ]: warnings.warn( "Standalone executor is not used for parallel graph", - UserWarning) + UserWarning, + ) return False # Unsupported case 3: inference if compiled_program._is_inference: warnings.warn( "Standalone executor is not used for inference", - UserWarning) + UserWarning, + ) return False # Unsupported case 4: CUDA Graph - if compiled_program._build_strategy is not None and compiled_program._build_strategy.allow_cuda_graph_capture: + if ( + compiled_program._build_strategy is not None + and compiled_program._build_strategy.allow_cuda_graph_capture + ): warnings.warn( "Standalone executor is not used for CUDA Graph", - UserWarning) + UserWarning, + ) return False # Unsupported case 5: async mode - if compiled_program._build_strategy is not None and compiled_program._build_strategy.async_mode: + if ( + compiled_program._build_strategy is not None + and compiled_program._build_strategy.async_mode + ): warnings.warn( "Standalone executor is not used for async mode", - UserWarning) + UserWarning, + ) return False # delete this code after supporting fleet from paddle.distributed.fleet import fleet + if fleet._role_maker is not None: - warnings.warn("Standalone executor is not used for fleet", - UserWarning) + warnings.warn( + "Standalone executor is not used for fleet", UserWarning + ) return use_standalone_executor_for_distribution return True # NOTE: This is an experimental feature. If `export FLAGS_USE_STANDALONE_EXECUTOR=1 `, # use StandaloneExecutor to run the program. - if return_merged and self._enable_interpreter_core and _can_use_interpreter_core( - program, self.place): + if ( + return_merged + and self._enable_interpreter_core + and _can_use_interpreter_core(program, self.place) + ): if feed is None: feed = {} @@ -1627,18 +1816,27 @@ class Executor(object): if not isinstance(feed, dict): raise TypeError( "feed requires dict as its Parameter. But you passed in %s" - % (type(feed))) + % (type(feed)) + ) feed = self._update_feed(program, feed) program, new_exe = self._executor_cache.get_program_and_executor( - program, feed, fetch_list, feed_var_name, fetch_var_name, - self.place, scope) + program, + feed, + fetch_list, + feed_var_name, + fetch_var_name, + self.place, + scope, + ) self._feed_data(program, feed, feed_var_name, scope) if hasattr(program, 'lr_sheduler'): from paddle.optimizer.lr import LRScheduler - assert isinstance(program.lr_sheduler, - LRScheduler), "must be LRScheduler" + + assert isinstance( + program.lr_sheduler, LRScheduler + ), "must be LRScheduler" lr_sheduler = program.lr_sheduler lr_value = lr_sheduler() lr_var = program.global_block().vars[lr_sheduler._var_name] @@ -1652,8 +1850,9 @@ class Executor(object): else: tensor._copy_from(cpu_tensor, self.place) - return new_exe.run(scope, list(feed.keys()), fetch_list, - return_numpy) + return new_exe.run( + scope, list(feed.keys()), fetch_list, return_numpy + ) compiled = isinstance(program, compiler.CompiledProgram) @@ -1668,13 +1867,15 @@ class Executor(object): varobj = global_block.vars[varname] # Can not check var build by fluid.layers.data(), bucause fluid.layers.data() had not set need_check_feed - if vardesc.persistable() == False and \ - vardesc.type() == core.VarDesc.VarType.LOD_TENSOR and \ - vardesc.need_check_feed() == True and \ - varobj.stop_gradient == True and \ - varobj.is_data == True and \ - varobj.belong_to_optimizer == False and \ - varname not in feed: + if ( + vardesc.persistable() == False + and vardesc.type() == core.VarDesc.VarType.LOD_TENSOR + and vardesc.need_check_feed() == True + and varobj.stop_gradient == True + and varobj.is_data == True + and varobj.belong_to_optimizer == False + and varname not in feed + ): raise ValueError('Need feed data for variable %s' % varname) acp._auto_checkpoint(self, program) @@ -1682,46 +1883,63 @@ class Executor(object): # For backward compatibility, run directly. if not compiled: # In distributed training, the compiled program is saved in Program._graph - has_compiled_graph = isinstance(program._graph, - compiler.CompiledProgram) + has_compiled_graph = isinstance( + program._graph, compiler.CompiledProgram + ) if has_compiled_graph: program._graph._compile(scope, self.place) # _graph in program does not support inference since the _graph is optimized # through optimizer.minimize function and should not be used as inference graph # assert not program._graph._is_inference - return self._run_parallel(program._graph, - scope=scope, - feed=feed, - fetch_list=fetch_list, - fetch_var_name=fetch_var_name, - return_numpy=return_numpy, - return_merged=return_merged) - - return self._run_program(program, - feed=feed, - fetch_list=fetch_list, - feed_var_name=feed_var_name, - fetch_var_name=fetch_var_name, - scope=scope, - return_numpy=return_numpy, - use_program_cache=use_program_cache) + return self._run_parallel( + program._graph, + scope=scope, + feed=feed, + fetch_list=fetch_list, + fetch_var_name=fetch_var_name, + return_numpy=return_numpy, + return_merged=return_merged, + ) + + return self._run_program( + program, + feed=feed, + fetch_list=fetch_list, + feed_var_name=feed_var_name, + fetch_var_name=fetch_var_name, + scope=scope, + return_numpy=return_numpy, + use_program_cache=use_program_cache, + ) program._compile(scope, self.place) if program._is_inference: return self._run_inference(program._executor, feed) else: - return self._run_parallel(program, - scope=scope, - feed=feed, - fetch_list=fetch_list, - fetch_var_name=fetch_var_name, - return_numpy=return_numpy, - return_merged=return_merged) - - def _run_program(self, program, feed, fetch_list, feed_var_name, - fetch_var_name, scope, return_numpy, use_program_cache): + return self._run_parallel( + program, + scope=scope, + feed=feed, + fetch_list=fetch_list, + fetch_var_name=fetch_var_name, + return_numpy=return_numpy, + return_merged=return_merged, + ) + + def _run_program( + self, + program, + feed, + fetch_list, + feed_var_name, + fetch_var_name, + scope, + return_numpy, + use_program_cache, + ): from paddle.optimizer.lr import LRScheduler + if feed is None: feed = {} elif isinstance(feed, (list, tuple)): @@ -1730,19 +1948,22 @@ class Executor(object): if not isinstance(feed, dict): raise TypeError( - "feed requires dict as its Parameter. But you passed in %s" % - (type(feed))) + "feed requires dict as its Parameter. But you passed in %s" + % (type(feed)) + ) assert program is not None, "The program should not be Empty" if not isinstance(program, Program): raise TypeError( "Executor requires Program as its Parameter. But you passed in %s" - % (type(program))) + % (type(program)) + ) if not isinstance(fetch_var_name, str): raise TypeError( "The name of fetch variable requires string as its Parameter. But you passed in %s" - % (type(fetch_var_name))) + % (type(fetch_var_name)) + ) if use_program_cache: cache_key = _get_strong_program_cache_key(program, feed, fetch_list) @@ -1755,35 +1976,41 @@ class Executor(object): feed=feed, fetch_list=fetch_list, feed_var_name=feed_var_name, - fetch_var_name=fetch_var_name) + fetch_var_name=fetch_var_name, + ) self._add_program_cache(cache_key, cached_program) fetch_list_str = list(map(_to_name_str, fetch_list)) cached_ctx = self._default_executor.prepare( - cached_program.desc, 0, fetch_list_str, False) + cached_program.desc, 0, fetch_list_str, False + ) # currently, we cache program, vars, sub_scope here # we suppose that in a life cycle of training, a user # will not create many programs. So, here the basic # rule of caching is to cache all unseen (program, var, scope) # when a user use use_program_cache. cached_scope = scope.new_scope() - self._default_executor.create_variables(cached_program.desc, - cached_scope, 0) + self._default_executor.create_variables( + cached_program.desc, cached_scope, 0 + ) self._add_ctx_cache(cache_key, cached_ctx) self._add_scope_cache(cache_key, cached_scope) program = cached_program ctx = cached_ctx scope = cached_scope else: - program = _add_feed_fetch_ops(program=program, - feed=feed, - fetch_list=fetch_list, - feed_var_name=feed_var_name, - fetch_var_name=fetch_var_name) + program = _add_feed_fetch_ops( + program=program, + feed=feed, + fetch_list=fetch_list, + feed_var_name=feed_var_name, + fetch_var_name=fetch_var_name, + ) self._feed_data(program, feed, feed_var_name, scope) if hasattr(program, 'lr_sheduler'): - assert isinstance(program.lr_sheduler, - LRScheduler), "must be LRScheduler" + assert isinstance( + program.lr_sheduler, LRScheduler + ), "must be LRScheduler" lr_sheduler = program.lr_sheduler lr_value = lr_sheduler() lr_var = program.global_block().vars[lr_sheduler._var_name] @@ -1792,11 +2019,13 @@ class Executor(object): tensor.set(data, self.place) if not use_program_cache: - self._default_executor.run(program.desc, scope, 0, True, True, - [fetch_var_name]) + self._default_executor.run( + program.desc, scope, 0, True, True, [fetch_var_name] + ) else: - self._default_executor.run_prepared_ctx(ctx, scope, False, False, - False) + self._default_executor.run_prepared_ctx( + ctx, scope, False, False, False + ) arr = scope.find_var(fetch_var_name).get_fetch_list() tensors = arr._move_to_list() if return_numpy: @@ -1811,13 +2040,16 @@ class Executor(object): is_fetch_var = lambda var: isinstance(var, (Variable, str)) is_tuple_list = lambda var: isinstance(var, (tuple, list)) - if fetch_list is None: return [] - if is_fetch_var(fetch_list): return [fetch_list] + if fetch_list is None: + return [] + if is_fetch_var(fetch_list): + return [fetch_list] - assert is_tuple_list(fetch_list), \ - "Currently , The fetch_list type only should be list or tuple, \n"\ - "but the input type is {}. For more information please refer to \n"\ + assert is_tuple_list(fetch_list), ( + "Currently , The fetch_list type only should be list or tuple, \n" + "but the input type is {}. For more information please refer to \n" "the executor.run(...).".format(type(fetch_list)) + ) res = [] for i, var in enumerate(fetch_list): @@ -1831,9 +2063,10 @@ class Executor(object): res.append(var) else: raise TypeError( - "Require fetch_list[{}] 's type shall be one of (Variable, str), but received {}." - .format(i, - type(var).__name__)) + "Require fetch_list[{}] 's type shall be one of (Variable, str), but received {}.".format( + i, type(var).__name__ + ) + ) return res @@ -1850,25 +2083,30 @@ class Executor(object): pipeline_num = filelist_length print( "Pipeline training: setting the pipeline num to %d is enough because there are only %d files" - % (filelist_length, filelist_length)) + % (filelist_length, filelist_length) + ) if filelist_length < pipeline_num * pipeline_opt["concurrency_list"][0]: print( "Pipeline training: setting the 1st element in concurrency_list to %d is enough because there are only %d files" - % (filelist_length // pipeline_num, filelist_length)) - pipeline_opt["concurrency_list"][ - 0] = filelist_length // pipeline_num + % (filelist_length // pipeline_num, filelist_length) + ) + pipeline_opt["concurrency_list"][0] = ( + filelist_length // pipeline_num + ) dataset.set_thread(pipeline_opt["concurrency_list"][0] * pipeline_num) return pipeline_num - def _prepare_trainer(self, - program=None, - dataset=None, - scope=None, - thread=0, - debug=False, - fetch_list=None, - fetch_info=None, - print_period=100): + def _prepare_trainer( + self, + program=None, + dataset=None, + scope=None, + thread=0, + debug=False, + fetch_list=None, + fetch_info=None, + print_period=100, + ): is_heter = 0 use_ps_gpu = 0 if not program._fleet_opt is None: @@ -1889,16 +2127,19 @@ class Executor(object): if is_heter: from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil + fu = FleetUtil() ret = fu.split_program_by_device(program) if not compiled: # TODO: Need a better way to distinguish and specify different execution mode if program._pipeline_opt: trainer = TrainerFactory()._create_trainer( - program._pipeline_opt) + program._pipeline_opt + ) elif program._heter_pipeline_opt: trainer = TrainerFactory()._create_trainer( - program._heter_pipeline_opt) + program._heter_pipeline_opt + ) else: trainer = TrainerFactory()._create_trainer(program._fleet_opt) trainer._set_thread_barrier(program._is_distributed) @@ -1908,13 +2149,16 @@ class Executor(object): else: if program._pipeline_opt: trainer = TrainerFactory()._create_trainer( - program.program._pipeline_opt) + program.program._pipeline_opt + ) elif program._heter_pipeline_opt: trainer = TrainerFactory()._create_trainer( - program.program._heter_pipeline_opt) + program.program._heter_pipeline_opt + ) else: trainer = TrainerFactory()._create_trainer( - program.program._fleet_opt) + program.program._fleet_opt + ) trainer._set_program(program.program) if thread <= 0: @@ -1923,7 +2167,8 @@ class Executor(object): elif dataset.thread_num <= 0: raise RuntimeError( "You should set thread num first, either in Dataset" - "or in Executor.train_from_dataset") + "or in Executor.train_from_dataset" + ) else: trainer._set_thread(dataset.thread_num) else: @@ -1933,19 +2178,22 @@ class Executor(object): trainer._set_fetch_var_and_info(fetch_list, fetch_info, print_period) return scope, trainer - def _run_from_dataset(self, - program=None, - dataset=None, - scope=None, - thread=0, - is_infer=False, - debug=False, - fetch_list=None, - fetch_info=None, - print_period=100, - fetch_handler=None): + def _run_from_dataset( + self, + program=None, + dataset=None, + scope=None, + thread=0, + is_infer=False, + debug=False, + fetch_list=None, + fetch_info=None, + print_period=100, + fetch_handler=None, + ): if program._pipeline_opt is not None: import paddle + if dataset is not None: raise RuntimeError("dataset should be None for pipeline mode") # The following fake dataset is created to call @@ -1956,24 +2204,28 @@ class Executor(object): data_vars.append(var) if core.is_compiled_with_npu(): dataset = paddle.fluid.DatasetFactory().create_dataset( - 'InMemoryDataset') + 'InMemoryDataset' + ) else: dataset = paddle.fluid.DatasetFactory().create_dataset( - 'FileInstantDataset') + 'FileInstantDataset' + ) dataset.set_batch_size(1) dataset.set_thread(1) dataset.set_filelist(['None']) dataset.set_use_var(data_vars) elif program._heter_pipeline_opt is not None: stage_id = program._heter_pipeline_opt["pipeline_stage"] - #print("test_fl_stage_id: {}".format(stage_id)) + # print("test_fl_stage_id: {}".format(stage_id)) heter_place = program._heter_pipeline_opt["heter_place"] if stage_id != 0: if "is_fl_mode" not in program._heter_pipeline_opt: import paddle + if dataset is not None: raise RuntimeError( - "dataset should be None for heter pipeline mode") + "dataset should be None for heter pipeline mode" + ) # The following fake dataset is created to call # the _prepare_trainer api, and it is meaningless. data_vars = [] @@ -1981,7 +2233,8 @@ class Executor(object): if var.is_data: data_vars.append(var) dataset = paddle.fluid.DatasetFactory().create_dataset( - 'InMemoryDataset') + 'InMemoryDataset' + ) dataset.set_batch_size(1) dataset.set_thread(1) dataset.set_filelist(['None']) @@ -1989,7 +2242,8 @@ class Executor(object): else: if dataset is None: raise RuntimeError( - "dataset is need and should be initialized") + "dataset is need and should be initialized" + ) ## change default executor heter_place = framework._get_paddle_place(heter_place) p = core.Place() @@ -2016,7 +2270,8 @@ class Executor(object): feed=[], fetch_list=real_fetch_list, feed_var_name='feed', - fetch_var_name='fetch') + fetch_var_name='fetch', + ) main_block = program._pipeline_opt["section_program"].block(0) for op in main_block.ops: # set the op_role of fetch op to Optimize to avoid @@ -2024,16 +2279,19 @@ class Executor(object): if op.type == 'fetch': op._set_attr( 'op_role', - core.op_proto_and_checker_maker.OpRole.Optimize) + core.op_proto_and_checker_maker.OpRole.Optimize, + ) fetch_list = None - scope, trainer = self._prepare_trainer(program=program, - dataset=dataset, - scope=scope, - thread=thread, - debug=debug, - fetch_list=fetch_list, - fetch_info=fetch_info, - print_period=print_period) + scope, trainer = self._prepare_trainer( + program=program, + dataset=dataset, + scope=scope, + thread=thread, + debug=debug, + fetch_list=fetch_list, + fetch_info=fetch_info, + print_period=print_period, + ) trainer._set_infer(is_infer) trainer._gen_trainer_desc() @@ -2048,8 +2306,11 @@ class Executor(object): dataset._dynamic_adjust_before_train(trainer.proto_desc.thread_num) if program._heter_pipeline_opt is None: - trainer_instance = self._default_executor.init_for_dataset( # -->InitForDataset - program.desc, trainer._desc(), scope, dataset.dataset) + trainer_instance = ( + self._default_executor.init_for_dataset( # -->InitForDataset + program.desc, trainer._desc(), scope, dataset.dataset + ) + ) else: # cache trainer instance for heterps pipeline training if fetch_list == None: @@ -2058,8 +2319,9 @@ class Executor(object): trainer_instance = self._get_trainer_cache(cache_key) if trainer_instance is None: trainer_instance = self._default_executor.init_for_dataset( - program.desc, trainer._desc(), scope, dataset.dataset) - #print("test_fl_ps - trainer_desc: {}\n".format(trainer)) + program.desc, trainer._desc(), scope, dataset.dataset + ) + # print("test_fl_ps - trainer_desc: {}\n".format(trainer)) self._add_trainer_cache(cache_key, trainer_instance) else: trainer_instance.ResetDataset(dataset.dataset) @@ -2086,18 +2348,20 @@ class Executor(object): return None - def _prepare_pipeline_ctx(self, - program=None, - dataset=None, - scope=None, - thread=0, - is_infer=False, - debug=False, - fetch_list=None, - fetch_info=None, - print_period=100, - fetch_handler=None, - use_program_cache=False): + def _prepare_pipeline_ctx( + self, + program=None, + dataset=None, + scope=None, + thread=0, + is_infer=False, + debug=False, + fetch_list=None, + fetch_info=None, + print_period=100, + fetch_handler=None, + use_program_cache=False, + ): assert program._pipeline_opt is not None assert dataset is None, "dataset should be None for pipeline mode" @@ -2117,10 +2381,12 @@ class Executor(object): data_vars.append(var) if core.is_compiled_with_npu(): dataset = paddle.fluid.DatasetFactory().create_dataset( - 'InMemoryDataset') + 'InMemoryDataset' + ) else: dataset = paddle.fluid.DatasetFactory().create_dataset( - 'FileInstantDataset') + 'FileInstantDataset' + ) dataset.set_batch_size(1) dataset.set_thread(1) dataset.set_filelist(['None']) @@ -2141,11 +2407,13 @@ class Executor(object): if fetch_var_name in real_program.global_block().vars: real_fetch_list.append(fetch_var) - real_program = _add_feed_fetch_ops(program=real_program, - feed=[], - fetch_list=real_fetch_list, - feed_var_name='feed', - fetch_var_name='fetch') + real_program = _add_feed_fetch_ops( + program=real_program, + feed=[], + fetch_list=real_fetch_list, + feed_var_name='feed', + fetch_var_name='fetch', + ) main_block = real_program.block(0) for op in main_block.ops: # set the op_role of fetch op to Optimize to avoid @@ -2153,7 +2421,8 @@ class Executor(object): if op.type == 'fetch': op._set_attr( 'op_role', - core.op_proto_and_checker_maker.OpRole.Optimize) + core.op_proto_and_checker_maker.OpRole.Optimize, + ) return real_program, real_fetch_list real_program, real_fetch_list = _get_real_program_fetch_list() @@ -2161,14 +2430,16 @@ class Executor(object): program._pipeline_opt["section_program"] = real_program fetch_list = None - scope, trainer = self._prepare_trainer(program=program, - dataset=dataset, - scope=scope, - thread=thread, - debug=debug, - fetch_list=fetch_list, - fetch_info=fetch_info, - print_period=print_period) + scope, trainer = self._prepare_trainer( + program=program, + dataset=dataset, + scope=scope, + thread=thread, + debug=debug, + fetch_list=fetch_list, + fetch_info=fetch_info, + print_period=print_period, + ) trainer._set_infer(is_infer) trainer._gen_trainer_desc() @@ -2183,59 +2454,84 @@ class Executor(object): trainer_desc = trainer._desc() # slow, cache trainer_instance = self._default_executor.init_for_dataset( - program.desc, trainer_desc, scope, dataset.dataset) + program.desc, trainer_desc, scope, dataset.dataset + ) ctx = [scope, real_fetch_list, trainer_instance] - if use_program_cache: self._add_ctx_cache(cache_key, ctx) + if use_program_cache: + self._add_ctx_cache(cache_key, ctx) return ctx - def _prepare_fleet_executor_carrier(self, - carrier_id="", - program=None, - scope=None, - fleet_opt=None, - with_standalone_executor=False): - num_micro_batches = fleet_opt[ - "num_micro_batches"] if "num_micro_batches" in fleet_opt else 1 + def _prepare_fleet_executor_carrier( + self, + carrier_id="", + program=None, + scope=None, + fleet_opt=None, + with_standalone_executor=False, + ): + num_micro_batches = ( + fleet_opt["num_micro_batches"] + if "num_micro_batches" in fleet_opt + else 1 + ) cur_rank = int(os.getenv("PADDLE_TRAINER_ID", 0)) trainer_endpoints = os.getenv("PADDLE_TRAINER_ENDPOINTS", "").split(',') nrank = len(trainer_endpoints) - assert 'scheduler' in fleet_opt or 'tasks' in fleet_opt, \ - "Fleet executor need configuration for scheduler, you can choose from 1F1B or Origin. " \ + assert 'scheduler' in fleet_opt or 'tasks' in fleet_opt, ( + "Fleet executor need configuration for scheduler, you can choose from 1F1B or Origin. " "Or you can provide a list of task nodes to init fleet executor directly." + ) if 'tasks' in fleet_opt: - assert 'task_id_to_rank' in fleet_opt, "If you provide tasks to init fleet executor," \ - " task_id_to_rank should also be provided." + assert 'task_id_to_rank' in fleet_opt, ( + "If you provide tasks to init fleet executor," + " task_id_to_rank should also be provided." + ) print('fleet executor will use user defined task nodes') tasks = [task.task_node() for task in fleet_opt['tasks']] task_id_to_rank = fleet_opt['task_id_to_rank'] else: scheduler = fleet_opt['scheduler'] if scheduler == '1F1B': - from paddle.distributed.fleet.fleet_executor_utils import run1f1b - if "dist_strategy" not in fleet_opt or \ - "pp_degree" not in fleet_opt["dist_strategy"] or \ - fleet_opt["dist_strategy"]["pp_degree"] == 1: + from paddle.distributed.fleet.fleet_executor_utils import ( + run1f1b, + ) + + if ( + "dist_strategy" not in fleet_opt + or "pp_degree" not in fleet_opt["dist_strategy"] + or fleet_opt["dist_strategy"]["pp_degree"] == 1 + ): warnings.warn("Using 1F1B scheduler with pp_degree == 1.") tasks, task_id_to_rank = run1f1b( - program, cur_rank, fleet_opt.get('num_micro_batches', 1), - fleet_opt.get('dist_strategy', {}), nrank, - with_standalone_executor) + program, + cur_rank, + fleet_opt.get('num_micro_batches', 1), + fleet_opt.get('dist_strategy', {}), + nrank, + with_standalone_executor, + ) elif scheduler == 'Origin': from paddle.distributed.fleet.fleet_executor_utils import origin - if "dist_strategy" in fleet_opt and \ - "pp_degree" in fleet_opt["dist_strategy"]: - assert fleet_opt["dist_strategy"]["pp_degree"] == 1, \ - "For pipeline mode, the scheduler should be 1F1B instead of Origin." + + if ( + "dist_strategy" in fleet_opt + and "pp_degree" in fleet_opt["dist_strategy"] + ): + assert ( + fleet_opt["dist_strategy"]["pp_degree"] == 1 + ), "For pipeline mode, the scheduler should be 1F1B instead of Origin." if "num_micro_batches" in fleet_opt: - assert fleet_opt["num_micro_batches"] == 1, \ - "For origin scheduler mode, the num micro batches should be 1." + assert ( + fleet_opt["num_micro_batches"] == 1 + ), "For origin scheduler mode, the num micro batches should be 1." tasks, task_id_to_rank = origin(program, cur_rank) else: - raise "Fleet_executor only supports 1F1B and Origin scheduler, " \ - "but received " + str(scheduler) + "." + raise "Fleet_executor only supports 1F1B and Origin scheduler, " "but received " + str( + scheduler + ) + "." # NOTE: have to hold these vars, otherwise will be destructed fleet_opt['tasks'] = tasks fleet_opt['task_id_to_rank'] = task_id_to_rank @@ -2243,16 +2539,26 @@ class Executor(object): place.set_place(self.place) # NOTE: the last argument is used to force create some vars in root scope, # won't be used during train. - self._fleet_executor.init(carrier_id, program.desc, scope, place, - num_micro_batches, tasks, task_id_to_rank, []) - - def _run_using_fleet_executor(self, - program=None, - feed=None, - feed_var_name="feed", - fetch_var_name="fetch", - fetch_list=None, - with_standalone_executor=False): + self._fleet_executor.init( + carrier_id, + program.desc, + scope, + place, + num_micro_batches, + tasks, + task_id_to_rank, + [], + ) + + def _run_using_fleet_executor( + self, + program=None, + feed=None, + feed_var_name="feed", + fetch_var_name="fetch", + fetch_list=None, + with_standalone_executor=False, + ): cache_key = _get_strong_program_cache_key(program, feed, fetch_list) cached_program = self._get_program_cache(cache_key) cached_scope = self._get_scope_cache(cache_key) @@ -2260,16 +2566,20 @@ class Executor(object): cached_scope = global_scope() self._add_scope_cache(cache_key, cached_scope) if cached_program is None: - assert program._pipeline_opt, "program should have _pipeline_opt to start carrier" + assert ( + program._pipeline_opt + ), "program should have _pipeline_opt to start carrier" real_feed = [] if feed is None else feed real_program = program if "section_program" in program._pipeline_opt: real_program = program._pipeline_opt["section_program"] - cached_program = _add_feed_fetch_ops(program=real_program, - feed=real_feed, - fetch_list=fetch_list, - feed_var_name=feed_var_name, - fetch_var_name=fetch_var_name) + cached_program = _add_feed_fetch_ops( + program=real_program, + feed=real_feed, + fetch_list=fetch_list, + feed_var_name=feed_var_name, + fetch_var_name=fetch_var_name, + ) main_block = cached_program.block(0) for op in main_block.ops: # set the op_role of fetch op to Optimize to avoid @@ -2277,7 +2587,8 @@ class Executor(object): if op.type == 'fetch': op._set_attr( 'op_role', - core.op_proto_and_checker_maker.OpRole.Optimize) + core.op_proto_and_checker_maker.OpRole.Optimize, + ) self._add_program_cache(cache_key, cached_program) fleet_opt = program._pipeline_opt["fleet_opt"] if 'tasks' in fleet_opt: @@ -2291,9 +2602,11 @@ class Executor(object): feed_task = fleet_opt['tasks'][0] print("Inserting feed ops for task", feed_task.task_id()) feed_program = feed_task.get_program() - feed_program = self._add_feed_ops(program=feed_program, - feed=real_feed, - feed_var_name=feed_var_name) + feed_program = self._add_feed_ops( + program=feed_program, + feed=real_feed, + feed_var_name=feed_var_name, + ) feed_task.set_program(feed_program) # Insert fetch ops @@ -2303,7 +2616,8 @@ class Executor(object): fetch_program = self._add_fetch_ops( program=fetch_program, fetch_list=fetch_list, - fetch_var_name=fetch_var_name) + fetch_var_name=fetch_var_name, + ) main_block = fetch_program.block(0) for op in main_block.ops: # set the op_role of fetch op to Optimize to avoid @@ -2311,7 +2625,8 @@ class Executor(object): if op.type == 'fetch': op._set_attr( 'op_role', - core.op_proto_and_checker_maker.OpRole.Optimize) + core.op_proto_and_checker_maker.OpRole.Optimize, + ) fetch_task.set_program(fetch_program) self._prepare_fleet_executor_carrier( @@ -2319,7 +2634,8 @@ class Executor(object): program=cached_program, scope=cached_scope, fleet_opt=fleet_opt, - with_standalone_executor=with_standalone_executor) + with_standalone_executor=with_standalone_executor, + ) if feed: # NOTE: don't have to traverse programs in task nodes, @@ -2328,14 +2644,16 @@ class Executor(object): self._feed_data(cached_program, feed, feed_var_name, cached_scope) from paddle.optimizer.lr import LRScheduler + if hasattr(program, 'lr_sheduler'): lr_sheduler = program.lr_sheduler assert isinstance(lr_sheduler, LRScheduler), "must be LRScheduler" lr_value = lr_sheduler() lr_var = program.global_block().vars[lr_sheduler._var_name] data = np.array([lr_value]).astype(convert_dtype(lr_var.dtype)) - tensor = core.get_variable_tensor(cached_scope, - lr_sheduler._var_name) + tensor = core.get_variable_tensor( + cached_scope, lr_sheduler._var_name + ) tensor.set(data, self.place) self._fleet_executor.run(cache_key) @@ -2357,30 +2675,32 @@ class Executor(object): feed_var = global_block.create_var( name=feed_var_name, type=core.VarDesc.VarType.FEED_MINIBATCH, - persistable=True) + persistable=True, + ) # prepend feed operators if not has_feed_operators(global_block, feed, feed_var_name): for i, name in enumerate(feed): if global_block.has_var(name): out = global_block.var(name) - global_block._prepend_op(type='feed', - inputs={'X': [feed_var]}, - outputs={'Out': [out]}, - attrs={'col': i}) + global_block._prepend_op( + type='feed', + inputs={'X': [feed_var]}, + outputs={'Out': [out]}, + attrs={'col': i}, + ) else: warnings.warn( "The variable %s is not found in program. It is not declared or is pruned." - % name) + % name + ) return tmp_program @classmethod - def _add_fetch_ops(cls, - program, - fetch_list, - fetch_var_name, - use_fetch_v2=False): + def _add_fetch_ops( + cls, program, fetch_list, fetch_var_name, use_fetch_v2=False + ): tmp_program = program.clone() global_block = tmp_program.global_block() @@ -2391,7 +2711,8 @@ class Executor(object): fetch_var = global_block.create_var( name=fetch_var_name, type=core.VarDesc.VarType.FETCH_LIST, - persistable=True) + persistable=True, + ) if use_fetch_v2: fetch_op = 'fetch_v2' @@ -2399,16 +2720,19 @@ class Executor(object): fetch_op = 'fetch' # append fetch_operators - if not has_fetch_operators(global_block, fetch_list, fetch_var_name, - fetch_op): + if not has_fetch_operators( + global_block, fetch_list, fetch_var_name, fetch_op + ): for i, var in enumerate(fetch_list): assert isinstance(var, Variable) or isinstance( - var, str), ("Wrong type for fetch_list[%s]: %s" % - (i, type(var))) - global_block.append_op(type=fetch_op, - inputs={'X': [var]}, - outputs={'Out': [fetch_var]}, - attrs={'col': i}) + var, str + ), "Wrong type for fetch_list[%s]: %s" % (i, type(var)) + global_block.append_op( + type=fetch_op, + inputs={'X': [var]}, + outputs={'Out': [fetch_var]}, + attrs={'col': i}, + ) return tmp_program @@ -2423,25 +2747,36 @@ class Executor(object): return tmp_program - def _run_pipeline(self, - program=None, - dataset=None, - scope=None, - thread=0, - is_infer=False, - debug=False, - fetch_list=None, - fetch_info=None, - print_period=100, - fetch_handler=None, - use_program_cache=False): - scope, real_fetch_list, trainer_instance = \ - self._prepare_pipeline_ctx(program, dataset, scope, thread, - is_infer, debug, fetch_list, fetch_info, - print_period, fetch_handler, - use_program_cache) + def _run_pipeline( + self, + program=None, + dataset=None, + scope=None, + thread=0, + is_infer=False, + debug=False, + fetch_list=None, + fetch_info=None, + print_period=100, + fetch_handler=None, + use_program_cache=False, + ): + scope, real_fetch_list, trainer_instance = self._prepare_pipeline_ctx( + program, + dataset, + scope, + thread, + is_infer, + debug, + fetch_list, + fetch_info, + print_period, + fetch_handler, + use_program_cache, + ) from paddle.optimizer.lr import LRScheduler + if hasattr(program, 'lr_sheduler'): lr_sheduler = program.lr_sheduler assert isinstance(lr_sheduler, LRScheduler), "must be LRScheduler" @@ -2463,16 +2798,18 @@ class Executor(object): return None - def infer_from_dataset(self, - program=None, - dataset=None, - scope=None, - thread=0, - debug=False, - fetch_list=None, - fetch_info=None, - print_period=100, - fetch_handler=None): + def infer_from_dataset( + self, + program=None, + dataset=None, + scope=None, + thread=0, + debug=False, + fetch_list=None, + fetch_info=None, + print_period=100, + fetch_handler=None, + ): """ Infer from a pre-defined Dataset. Dataset is defined in paddle.fluid.dataset. Given a program, either a program or compiled program, infer_from_dataset will @@ -2528,26 +2865,39 @@ class Executor(object): dataset=dataset) """ - return self._run_from_dataset(program, dataset, scope, thread, True, - debug, fetch_list, fetch_info, - print_period, fetch_handler) - - def start_heter_trainer(self, - program=None, - scope=None, - debug=False, - fetch_list=None, - fetch_info=None, - print_period=100, - fetch_handler=None): - scope, trainer = self._prepare_trainer(program=program, - dataset=None, - scope=scope, - thread=1, - debug=debug, - fetch_list=fetch_list, - fetch_info=fetch_info, - print_period=print_period) + return self._run_from_dataset( + program, + dataset, + scope, + thread, + True, + debug, + fetch_list, + fetch_info, + print_period, + fetch_handler, + ) + + def start_heter_trainer( + self, + program=None, + scope=None, + debug=False, + fetch_list=None, + fetch_info=None, + print_period=100, + fetch_handler=None, + ): + scope, trainer = self._prepare_trainer( + program=program, + dataset=None, + scope=scope, + thread=1, + debug=debug, + fetch_list=fetch_list, + fetch_info=fetch_info, + print_period=print_period, + ) trainer._set_infer(False) trainer._gen_trainer_desc() @@ -2555,32 +2905,35 @@ class Executor(object): self._dump_debug_info(program=program, trainer=trainer) trainer_instance = self._default_executor.init_for_dataset( - program.desc, trainer._desc(), scope, None) + program.desc, trainer._desc(), scope, None + ) - #if fetch_handler is not None: + # if fetch_handler is not None: # scope0 = trainer_instance.get_worker_scope(0) # fetch_monitor = FetchHandlerMonitor(scope0, fetch_handler) # fetch_monitor.start() # self._default_executor.run_from_dataset(trainer_instance) # fetch_monitor.stop() # self._default_executor.release_trainer(trainer_instance) - #else: + # else: self._default_executor.run_from_dataset(trainer_instance) - #self._default_executor.release_trainer(trainer_instance) + # self._default_executor.release_trainer(trainer_instance) return trainer_instance - def train_from_dataset(self, - program=None, - dataset=None, - scope=None, - thread=0, - debug=False, - fetch_list=None, - fetch_info=None, - print_period=100, - fetch_handler=None): + def train_from_dataset( + self, + program=None, + dataset=None, + scope=None, + thread=0, + debug=False, + fetch_list=None, + fetch_info=None, + print_period=100, + fetch_handler=None, + ): """ Train from a pre-defined Dataset. Dataset is defined in paddle.fluid.dataset. Given a program, either a program or compiled program, train_from_dataset will @@ -2635,6 +2988,15 @@ class Executor(object): dataset=dataset) """ - return self._run_from_dataset(program, dataset, scope, thread, False, - debug, fetch_list, fetch_info, - print_period, fetch_handler) + return self._run_from_dataset( + program, + dataset, + scope, + thread, + False, + debug, + fetch_list, + fetch_info, + print_period, + fetch_handler, + ) diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 99abb76d2138836cac9dee877879a0cf62f3acd4..46bd46d706ae2451b3fbd48ca2bb68a09f560d0e 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -72,7 +72,7 @@ ZERO_VAR_SUFFIX = core.kZeroVarSuffix() CONTROL_DEP_VAR_PREFIX = core.kControlDepVarName() _dygraph_tracer_ = None -_in_eager_mode_ = (os.environ.get('FLAGS_enable_eager_mode', '1') == '1') +_in_eager_mode_ = os.environ.get('FLAGS_enable_eager_mode', '1') == '1' _global_expected_place_ = None _current_device = None global_prog_seed = 0 @@ -81,10 +81,12 @@ _already_patch_eager_tensor = False _already_patch_varbase = False _current_cuda_graph_mode = None _global_flags_ = core.globals() -_enable_standalone_executor_ = (os.environ.get('FLAGS_USE_STANDALONE_EXECUTOR', - None)) -_dy2st_enable_standalone_executor_ = (os.environ.get( - 'FLAGS_DY2ST_USE_STANDALONE_EXECUTOR', 1)) +_enable_standalone_executor_ = os.environ.get( + 'FLAGS_USE_STANDALONE_EXECUTOR', None +) +_dy2st_enable_standalone_executor_ = os.environ.get( + 'FLAGS_DY2ST_USE_STANDALONE_EXECUTOR', 1 +) # Some explanation of our execution system 2022.03 # For now we have 3 kinds of execution system, since we refactored dygraph mode to @@ -145,6 +147,7 @@ def _update_monkey_methods(is_eager): def _switch_tensor_bind_type(is_eager): import paddle + if is_eager: paddle.Tensor = core.eager.Tensor else: @@ -179,8 +182,11 @@ def _fallback_legacy_dygraph(): global _is_first_import_ need_fallback = False # Only enable eager on CPU/GPU/XPU - is_not_support = core.is_compiled_with_npu() or core.is_compiled_with_ipu( - ) or core.is_compiled_with_mlu() + is_not_support = ( + core.is_compiled_with_npu() + or core.is_compiled_with_ipu() + or core.is_compiled_with_mlu() + ) if _in_eager_mode_ and is_not_support: # switch into legacy dygraph mode @@ -358,7 +364,6 @@ def set_ipu_shard(call_func, index=-1, stage=-1): """ def decorate(func): - def wrapper(*args, **kwargs): with ipu_shard_guard(index=index, stage=stage): return func(*args, **kwargs) @@ -366,16 +371,17 @@ def set_ipu_shard(call_func, index=-1, stage=-1): return wrapper from .dygraph.layers import Layer + if not isinstance(call_func, Layer): if callable(call_func): return decorate(call_func) else: raise TypeError( - "Unsupported type. Only accept paddle.nn.Layer or function.") + "Unsupported type. Only accept paddle.nn.Layer or function." + ) # patch paddle.nn.Layer class BlockFn(type(call_func)): - def __call__(self, *args, **kwargs): with ipu_shard_guard(index=index, stage=stage): return super().__call__(*args, **kwargs) @@ -387,62 +393,68 @@ def set_ipu_shard(call_func, index=-1, stage=-1): def require_version(min_version, max_version=None): """ - Check if the installed version of PaddlePaddle is in [min_version, max_version], - if the installed version is lower than ``min_version`` or higher than ``max_version``, - an exception will be thrown, NO returns if the installed version is satisfied. + Check if the installed version of PaddlePaddle is in [min_version, max_version], + if the installed version is lower than ``min_version`` or higher than ``max_version``, + an exception will be thrown, NO returns if the installed version is satisfied. - Args: - min_version (str): the minimum version required (like '1.4.0'). - max_version (str, optional): the max version required (like '1.6.0'), default is None, - meaning any version equal or higher than ``min_version`` is acceptable. + Args: + min_version (str): the minimum version required (like '1.4.0'). + max_version (str, optional): the max version required (like '1.6.0'), default is None, + meaning any version equal or higher than ``min_version`` is acceptable. - Returns: - None. + Returns: + None. - Raises: - TypeError: if the type of ``min_version`` is not str. - TypeError: if the type of ``max_version`` is not str or type(None). - ValueError: if the value of ``min_version`` is not in version format. - ValueError: if the value of ``max_version`` is not in version format or None. - Exception: if the installed version is lower than ``min_version`` or higher than ``max_version``. + Raises: + TypeError: if the type of ``min_version`` is not str. + TypeError: if the type of ``max_version`` is not str or type(None). + ValueError: if the value of ``min_version`` is not in version format. + ValueError: if the value of ``max_version`` is not in version format or None. + Exception: if the installed version is lower than ``min_version`` or higher than ``max_version``. - Examples: - .. code-block:: python + Examples: + .. code-block:: python - import paddle.fluid as fluid + import paddle.fluid as fluid - # any version >= 0.1.0 is acceptable. - fluid.require_version('0.1.0') + # any version >= 0.1.0 is acceptable. + fluid.require_version('0.1.0') - # if 0.1.0 <= version <= 10.0.0, it is acceptable. - fluid.require_version(min_version='0.1.0', max_version='10.0.0') - """ + # if 0.1.0 <= version <= 10.0.0, it is acceptable. + fluid.require_version(min_version='0.1.0', max_version='10.0.0') + """ if not isinstance(min_version, str): raise TypeError( "The type of 'min_version' in require_version must be str, but received %s." - % (type(min_version))) + % (type(min_version)) + ) if not isinstance(max_version, (str, type(None))): raise TypeError( "The type of 'max_version' in require_version must be str or type(None), but received %s." - % (type(max_version))) + % (type(max_version)) + ) check_format = re.match(r'\d+(\.\d+){0,3}', min_version) if check_format is None or check_format.group() != min_version: raise ValueError( "The value of 'min_version' in require_version must be in format '\\d+(\\.\\d+){0,3}', " - "like '1.5.2.0', but received %s" % min_version) + "like '1.5.2.0', but received %s" % min_version + ) if max_version is not None: check_format = re.match(r'\d+(\.\d+){0,3}', max_version) if check_format is None or check_format.group() != max_version: raise ValueError( "The value of 'max_version' in require_version must be in format '\\d+(\\.\\d+){0,3}', " - "like '1.5.2.0', but received %s" % max_version) + "like '1.5.2.0', but received %s" % max_version + ) version_installed = [ - fluid_version.major, fluid_version.minor, fluid_version.patch, - fluid_version.rc + fluid_version.major, + fluid_version.minor, + fluid_version.patch, + fluid_version.rc, ] zero_version = ['0', '0', '0', '0'] @@ -459,75 +471,86 @@ def require_version(min_version, max_version=None): warnings.warn( "PaddlePaddle version in [%s, %s] required, but %s installed. " "Maybe you are using a develop version, " - "please make sure the version is good with your code." % - (min_version, max_version, fluid_version.full_version)) + "please make sure the version is good with your code." + % (min_version, max_version, fluid_version.full_version) + ) else: warnings.warn( "PaddlePaddle version %s or higher is required, but %s installed, " "Maybe you are using a develop version, " - "please make sure the version is good with your code." % - (min_version, fluid_version.full_version)) + "please make sure the version is good with your code." + % (min_version, fluid_version.full_version) + ) return min_version_split = min_version.split('.') - min_version_to_check = min_version_split + zero_version[ - len(min_version_split):] + min_version_to_check = ( + min_version_split + zero_version[len(min_version_split) :] + ) if max_version is not None: max_version_split = max_version.split('.') - max_version_to_check = max_version_split + zero_version[ - len(max_version_split):] + max_version_to_check = ( + max_version_split + zero_version[len(max_version_split) :] + ) - if version_cmp(version_installed, - max_version_to_check) > 0 or version_cmp( - version_installed, min_version_to_check) < 0: + if ( + version_cmp(version_installed, max_version_to_check) > 0 + or version_cmp(version_installed, min_version_to_check) < 0 + ): raise Exception( "VersionError: PaddlePaddle version in [%s, %s] required, but %s installed." - % (min_version, max_version, fluid_version.full_version)) + % (min_version, max_version, fluid_version.full_version) + ) else: if version_cmp(version_installed, min_version_to_check) < 0: raise Exception( "VersionError: PaddlePaddle version %s or higher is required, but %s installed, " "please upgrade your PaddlePaddle to %s or other higher version." - % (min_version, fluid_version.full_version, min_version)) + % (min_version, fluid_version.full_version, min_version) + ) def _dygraph_not_support_(func): - def __impl__(*args, **kwargs): - assert not _non_static_mode( - ), "We don't support %s in dynamic graph mode" % func.__name__ + assert not _non_static_mode(), ( + "We don't support %s in dynamic graph mode" % func.__name__ + ) return func(*args, **kwargs) return __impl__ def _dygraph_only_(func): - def __impl__(*args, **kwargs): - assert _non_static_mode( - ), "We only support '%s()' in dynamic graph mode, please call 'paddle.disable_static()' to enter dynamic graph mode." % func.__name__ + assert _non_static_mode(), ( + "We only support '%s()' in dynamic graph mode, please call 'paddle.disable_static()' to enter dynamic graph mode." + % func.__name__ + ) return func(*args, **kwargs) return __impl__ def _non_static_only_(func): - def __impl__(*args, **kwargs): from .dygraph.base import in_declarative_mode - assert _non_static_mode() or in_declarative_mode( - ), "We only support '%s()' in dynamic graph mode, please call 'paddle.disable_static()' to enter dynamic graph mode." % func.__name__ + + assert _non_static_mode() or in_declarative_mode(), ( + "We only support '%s()' in dynamic graph mode, please call 'paddle.disable_static()' to enter dynamic graph mode." + % func.__name__ + ) return func(*args, **kwargs) return __impl__ def _static_only_(func): - def __impl__(*args, **kwargs): - assert not _non_static_mode( - ), "In PaddlePaddle 2.x, we turn on dynamic graph mode by default, and '%s()' is only supported in static graph mode. So if you want to use this api, please call 'paddle.enable_static()' before this api to enter static graph mode." % func.__name__ + assert not _non_static_mode(), ( + "In PaddlePaddle 2.x, we turn on dynamic graph mode by default, and '%s()' is only supported in static graph mode. So if you want to use this api, please call 'paddle.enable_static()' before this api to enter static graph mode." + % func.__name__ + ) return func(*args, **kwargs) return __impl__ @@ -546,14 +569,14 @@ def _set_pipeline_stage(stage): # TODO(zhiqiu): We should make VarBase consistent with Variable in future, for example, by inheritting # same base class. def _fake_interface_only_(func): - def __impl__(*args, **kwargs): raise AssertionError( "'%s' only can be called by `paddle.Tensor` in dynamic graph mode. Suggestions:\n" " 1. If you are in static graph mode, you can switch to dynamic graph mode by turning off `paddle.enable_static()` or calling `paddle.disable_static()`.\n" " 2. If you are using `@paddle.jit.to_static`, you can turn off ProgramTranslator by calling `paddle.jit.ProgramTranslator().enable(False)`. " "If you have to translate dynamic graph to static graph, please use other API to replace '%s'." - % (func.__name__, func.__name__)) + % (func.__name__, func.__name__) + ) return __impl__ @@ -564,13 +587,13 @@ def _fake_interface_only_(func): # NOTE(chenweihang): not using `wrap_decorator` here is because `wrap_decorator` will # move kwargs to args, which doesn't work in this decorate case def deprecate_stat_dict(func): - @functools.wraps(func) def wrapper(*args, **kwargs): if 'stat_dict' in kwargs: warnings.warn( "The argument `stat_dict` has deprecated, please change it to `state_dict`.", - DeprecationWarning) + DeprecationWarning, + ) kwargs['state_dict'] = kwargs['stat_dict'] kwargs.pop('stat_dict') return func(*args, **kwargs) @@ -676,7 +699,9 @@ def _cpu_num(): 'And if this parameter are set as N (equal to the number of physical CPU core) the program may be faster.\n\n' 'export CPU_NUM={} # for example, set CPU_NUM as number of physical CPU core which is {}.\n\n' '!!! The default number of CPU_NUM=1.\n'.format( - multiprocessing.cpu_count(), multiprocessing.cpu_count())) + multiprocessing.cpu_count(), multiprocessing.cpu_count() + ) + ) os.environ['CPU_NUM'] = str(1) cpu_num = os.environ.get('CPU_NUM') return int(cpu_num) @@ -858,8 +883,7 @@ def cuda_places(device_ids=None): cuda_places = static.cuda_places() """ - assert core.is_compiled_with_cuda(), \ - "Not compiled with CUDA" + assert core.is_compiled_with_cuda(), "Not compiled with CUDA" if device_ids is None: device_ids = _cuda_ids() elif not isinstance(device_ids, (list, tuple)): @@ -898,8 +922,7 @@ def xpu_places(device_ids=None): paddle.enable_static() xpu_places = static.xpu_places() """ - assert core.is_compiled_with_xpu(), \ - "Not compiled with XPU" + assert core.is_compiled_with_xpu(), "Not compiled with XPU" if device_ids is None: device_ids = _xpu_ids() elif not isinstance(device_ids, (list, tuple)): @@ -939,8 +962,7 @@ def npu_places(device_ids=None): paddle.enable_static() npu_places = static.npu_places() """ - assert core.is_compiled_with_npu(), \ - "Not compiled with NPU" + assert core.is_compiled_with_npu(), "Not compiled with NPU" if device_ids is None: device_ids = _npu_ids() elif not isinstance(device_ids, (list, tuple)): @@ -1008,8 +1030,7 @@ def cuda_pinned_places(device_count=None): cuda_pinned_places = fluid.cuda_pinned_places(1) """ - assert core.is_compiled_with_cuda(), \ - "Not compiled with CUDA" + assert core.is_compiled_with_cuda(), "Not compiled with CUDA" if device_count is None: device_count = len(_cuda_ids()) return [core.CUDAPinnedPlace()] * device_count @@ -1049,8 +1070,7 @@ def mlu_places(device_ids=None): paddle.enable_static() mlu_places = static.mlu_places() """ - assert core.is_compiled_with_mlu(), \ - "Not compiled with MLU" + assert core.is_compiled_with_mlu(), "Not compiled with MLU" if device_ids is None: device_ids = _mlu_ids() elif not isinstance(device_ids, (list, tuple)): @@ -1059,7 +1079,6 @@ def mlu_places(device_ids=None): class NameScope(object): - def __init__(self, name="", parent=None): self._children = dict() self._name = name @@ -1070,8 +1089,9 @@ class NameScope(object): new_child = NameScope(prefix, self) self._children[prefix] = [new_child] else: - new_child = NameScope(prefix + "_%d" % len(self._children[prefix]), - self) + new_child = NameScope( + prefix + "_%d" % len(self._children[prefix]), self + ) self._children[prefix].append(new_child) return new_child @@ -1161,6 +1181,7 @@ def _full_name_scope(): def generate_control_dev_var_name(): import random + return CONTROL_DEP_VAR_PREFIX + "@" + str(random.random()) @@ -1234,8 +1255,9 @@ def dtype_is_floating(dtype): dtype = convert_np_dtype_to_dtype_(dtype) return dtype in [ - core.VarDesc.VarType.FP16, core.VarDesc.VarType.FP32, - core.VarDesc.VarType.FP64 + core.VarDesc.VarType.FP16, + core.VarDesc.VarType.FP32, + core.VarDesc.VarType.FP64, ] @@ -1255,16 +1277,20 @@ def _debug_string_(proto, throw_on_error=True): if not proto.IsInitialized(error_fields) and throw_on_error: raise ValueError( "{0} are not initialized.\nThe message is {1}:\n".format( - error_fields, proto)) + error_fields, proto + ) + ) return proto.__str__() -def _varbase_creator(type=core.VarDesc.VarType.LOD_TENSOR, - name=None, - shape=None, - dtype=None, - persistable=None, - **kwargs): +def _varbase_creator( + type=core.VarDesc.VarType.LOD_TENSOR, + name=None, + shape=None, + dtype=None, + persistable=None, + **kwargs +): if dtype is not None: if not isinstance(dtype, core.VarDesc.VarType): dtype = convert_np_dtype_to_dtype_(dtype) @@ -1272,16 +1298,21 @@ def _varbase_creator(type=core.VarDesc.VarType.LOD_TENSOR, if _in_eager_mode_: eager_tensor = core.eager.Tensor( dtype if dtype else core.VarDesc.VarType.FP32, - list(shape) if shape else [], name, + list(shape) if shape else [], + name, type if type else core.VarDesc.VarType.LOD_TENSOR, - True if persistable else False) + True if persistable else False, + ) eager_tensor.retain_grads() return eager_tensor else: - return core.VarBase(dtype if dtype else core.VarDesc.VarType.FP32, - list(shape) if shape else [], name, - type if type else core.VarDesc.VarType.LOD_TENSOR, - True if persistable else False) + return core.VarBase( + dtype if dtype else core.VarDesc.VarType.FP32, + list(shape) if shape else [], + name, + type if type else core.VarDesc.VarType.LOD_TENSOR, + True if persistable else False, + ) def _all_is_type(vals, expected_type): @@ -1291,12 +1322,12 @@ def _all_is_type(vals, expected_type): NOTE: BuiltIn all() will always return True if vals is empty. """ assert isinstance(vals, (list, tuple)) - if not vals: return False + if not vals: + return False return all(isinstance(v, expected_type) for v in vals) class VariableMetaClass(type): - @classmethod def __instancecheck__(cls, instance): t = type(instance) @@ -1309,7 +1340,6 @@ class VariableMetaClass(type): class ParameterMetaClass(VariableMetaClass): - @classmethod def __instancecheck__(cls, instance): t = type(instance) @@ -1365,21 +1395,23 @@ class Variable(metaclass=VariableMetaClass): """ - def __init__(self, - block, - type=core.VarDesc.VarType.LOD_TENSOR, - name=None, - shape=None, - dtype=None, - lod_level=None, - capacity=None, - persistable=None, - error_clip=None, - stop_gradient=False, - is_data=False, - need_check_feed=False, - belong_to_optimizer=False, - **kwargs): + def __init__( + self, + block, + type=core.VarDesc.VarType.LOD_TENSOR, + name=None, + shape=None, + dtype=None, + lod_level=None, + capacity=None, + persistable=None, + error_clip=None, + stop_gradient=False, + is_data=False, + need_check_feed=False, + belong_to_optimizer=False, + **kwargs + ): self.block = block if name is None: name = unique_name.generate('_generated_var') @@ -1409,10 +1441,11 @@ class Variable(metaclass=VariableMetaClass): if is_new_var: self.desc.set_type(type) elif self.desc.type() != type: - raise ValueError("Variable '{0}' has been created before. The " - "previous type is {1}, the new type is {2}. They" - " are not matched".format(self.name, - self.desc.type(), type)) + raise ValueError( + "Variable '{0}' has been created before. The " + "previous type is {1}, the new type is {2}. They" + " are not matched".format(self.name, self.desc.type(), type) + ) if shape is not None: if is_new_var: @@ -1424,29 +1457,32 @@ class Variable(metaclass=VariableMetaClass): raise ValueError( "Variable '{0}' has been created before. The previous " "shape is {1}, the new shape is {2}. They are not " - "matched.".format(self.name, old_shape, shape)) + "matched.".format(self.name, old_shape, shape) + ) if dtype is not None: if is_new_var: self.desc.set_dtype(dtype) else: old_dtype = self.dtype if dtype != old_dtype: - raise ValueError("Variable '{0}' has been created before. " - "The previous data type is {1}, the new " - "data type is {2}. They are not " - "matched.".format(self.name, old_dtype, - dtype)) + raise ValueError( + "Variable '{0}' has been created before. " + "The previous data type is {1}, the new " + "data type is {2}. They are not " + "matched.".format(self.name, old_dtype, dtype) + ) if lod_level is not None: if is_new_var: self.desc.set_lod_level(lod_level) else: if lod_level != self.lod_level: - raise ValueError("Variable '{0}' has been created before. " - "The previous lod_level is {1}, the new " - "lod_level is {2}. They are not " - "matched".format(self.name, self.lod_level, - lod_level)) + raise ValueError( + "Variable '{0}' has been created before. " + "The previous lod_level is {1}, the new " + "lod_level is {2}. They are not " + "matched".format(self.name, self.lod_level, lod_level) + ) if persistable is not None: if is_new_var: self.desc.set_persistable(persistable) @@ -1456,7 +1492,9 @@ class Variable(metaclass=VariableMetaClass): "Variable '{0}' has been created before." "The previous persistable is {1}, the new " "persistable is {2}. They are not matched".format( - self.name, self.persistable, persistable)) + self.name, self.persistable, persistable + ) + ) if need_check_feed and is_new_var: self.desc.set_need_check_feed(need_check_feed) @@ -1497,20 +1535,22 @@ class Variable(metaclass=VariableMetaClass): y = x.detach() """ - assert self.type == core.VarDesc.VarType.SELECTED_ROWS or \ - self.type == core.VarDesc.VarType.LOD_TENSOR, \ - "only support a variable with SELECTED_ROWS or LOD_TENSOR to be detached" + assert ( + self.type == core.VarDesc.VarType.SELECTED_ROWS + or self.type == core.VarDesc.VarType.LOD_TENSOR + ), "only support a variable with SELECTED_ROWS or LOD_TENSOR to be detached" output = self.block.create_var( name=unique_name.generate_with_ignorable_key("detach_" + self.name), dtype=self.dtype, type=self.type, persistable=self.persistable, - stop_gradient=True) + stop_gradient=True, + ) - self.block.append_op(type='share_data', - inputs={'X': [self]}, - outputs={'Out': [output]}) + self.block.append_op( + type='share_data', inputs={'X': [self]}, outputs={'Out': [output]} + ) return output @fake_interface_only @@ -1700,14 +1740,20 @@ class Variable(metaclass=VariableMetaClass): """ # VarType.LOD_TENSOR -> LOD_TENSOR type_str = str(self.type).split('.')[1] - if self.type == core.VarDesc.VarType.SELECTED_ROWS or self.type == core.VarDesc.VarType.LOD_TENSOR: + if ( + self.type == core.VarDesc.VarType.SELECTED_ROWS + or self.type == core.VarDesc.VarType.LOD_TENSOR + ): dtype_str = str(self.dtype).split('.')[1] - var_str = "{name} : {type}.shape{shape}.dtype({dtype}).stop_gradient({stop_gradient})".\ - format(name=self.name, type=type_str, shape=self.shape, - dtype=dtype_str, stop_gradient=self.stop_gradient) + var_str = "{name} : {type}.shape{shape}.dtype({dtype}).stop_gradient({stop_gradient})".format( + name=self.name, + type=type_str, + shape=self.shape, + dtype=dtype_str, + stop_gradient=self.stop_gradient, + ) else: - var_str = "{name} : {type})".\ - format(name=self.name, type=type_str) + var_str = "{name} : {type})".format(name=self.name, type=type_str) if self.is_parameter: if self.trainable: @@ -1720,12 +1766,16 @@ class Variable(metaclass=VariableMetaClass): if self.persistable: var_str = "persist " + var_str - from paddle.distributed.auto_parallel.dist_context import get_default_distributed_context + from paddle.distributed.auto_parallel.dist_context import ( + get_default_distributed_context, + ) + dist_context = get_default_distributed_context() dist_tensor = dist_context.get_dist_tensor_for_program(self) if dist_tensor is not None: - var_str += ", {name} = {value}".format(name="dist_attr", - value=dist_tensor) + var_str += ", {name} = {value}".format( + name="dist_attr", value=dist_tensor + ) return var_str @@ -1759,12 +1809,13 @@ class Variable(metaclass=VariableMetaClass): print(new_variable.to_string(True, True)) """ assert isinstance(throw_on_error, bool) and isinstance( - with_details, bool) + with_details, bool + ) protostr = self.desc.serialize_to_string() proto = framework_pb2.VarDesc.FromString(bytes(protostr)) res_str = _debug_string_(proto, throw_on_error) if with_details: - additional_attr = ("error_clip", ) + additional_attr = ("error_clip",) for attr_name in additional_attr: res_str += "%s: %s\n" % (attr_name, getattr(self, attr_name)) @@ -2061,21 +2112,22 @@ class Variable(metaclass=VariableMetaClass): dtype=self.dtype, type=self.type, persistable=False, - stop_gradient=False) + stop_gradient=False, + ) input_shape = self.block.create_var( name=unique_name.generate_with_ignorable_key(self.name + '.tmp'), dtype=self.dtype, type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=False) - - self.block.append_op(type='transpose2', - inputs={'X': [self]}, - outputs={ - 'Out': [out], - 'XShape': [input_shape] - }, - attrs={'axis': perm}) + stop_gradient=False, + ) + + self.block.append_op( + type='transpose2', + inputs={'X': [self]}, + outputs={'Out': [out], 'XShape': [input_shape]}, + attrs={'axis': perm}, + ) return out def clone(self): @@ -2106,11 +2158,12 @@ class Variable(metaclass=VariableMetaClass): dtype=self.dtype, type=self.type, persistable=self.persistable, - stop_gradient=self.stop_gradient) + stop_gradient=self.stop_gradient, + ) - self.block.append_op(type='assign', - inputs={'X': [self]}, - outputs={'Out': [output]}) + self.block.append_op( + type='assign', inputs={'X': [self]}, outputs={'Out': [output]} + ) return output def _set_error_clip(self, error_clip): @@ -2176,8 +2229,9 @@ class Variable(metaclass=VariableMetaClass): start = upper if step < 0 else lower else: start = slice.start - start = max(start + - length, lower) if start < 0 else min(start, upper) + start = ( + max(start + length, lower) if start < 0 else min(start, upper) + ) # Compute stop. if slice.stop is None: @@ -2223,11 +2277,15 @@ class Variable(metaclass=VariableMetaClass): for index, o in enumerate(item): if isinstance(o, int): start = int(o) - if (index > 0 and index >= self.shape[index]) \ - or (index < 0 and (index + self.shape[index]) < 0): + if (index > 0 and index >= self.shape[index]) or ( + index < 0 and (index + self.shape[index]) < 0 + ): raise IndexError("invalid index") - start = max(start + self.shape[index], 0) if start < 0 else min( - start, self.shape[index]) + start = ( + max(start + self.shape[index], 0) + if start < 0 + else min(start, self.shape[index]) + ) starts.append(start) ends.append(start + 1) elif isinstance(o, slice): @@ -2245,30 +2303,31 @@ class Variable(metaclass=VariableMetaClass): if not copy: return self.block.create_var( name=unique_name.generate_with_ignorable_key(self.name), - dtype=self.dtype) + dtype=self.dtype, + ) else: return self def _sliceVar(self, axes, starts, ends): new_var = self._cloneVar() - self.block.append_op(type="slice", - inputs={'Input': [self]}, - outputs={'Out': [new_var]}, - attrs={ - 'axes': axes, - 'starts': starts, - 'ends': ends - }) + self.block.append_op( + type="slice", + inputs={'Input': [self]}, + outputs={'Out': [new_var]}, + attrs={'axes': axes, 'starts': starts, 'ends': ends}, + ) return new_var def _concatVar(self, inputs, axis): new_var = self._cloneVar() - self.block.append_op(type="concat", - inputs={'X': inputs}, - outputs={'Out': [new_var]}, - attrs={ - 'axis': axis, - }) + self.block.append_op( + type="concat", + inputs={'X': inputs}, + outputs={'Out': [new_var]}, + attrs={ + 'axis': axis, + }, + ) return new_var def _sliceAndConcatVar(self, item, axis): @@ -2282,21 +2341,24 @@ class Variable(metaclass=VariableMetaClass): vars = [] if step > 0: while start < stop: - vars.append(self._sliceVar([axis], [start], - [start + 1])) + vars.append( + self._sliceVar([axis], [start], [start + 1]) + ) start += step else: while start > stop: - vars.append(self._sliceVar([axis], [start], - [start + 1])) + vars.append( + self._sliceVar([axis], [start], [start + 1]) + ) start += step return self._concatVar(vars, axis) elif isinstance(item, int): if self.shape[axis] < 0: return self._cloneVar(True) index = int(item) - if (index > 0 and index >= self.shape[axis]) \ - or (index < 0 and (index + self.shape[axis]) < 0): + if (index > 0 and index >= self.shape[axis]) or ( + index < 0 and (index + self.shape[axis]) < 0 + ): raise IndexError("invalid index") return self._sliceVar([axis], [index], [index + 1]) else: @@ -2353,17 +2415,21 @@ class Variable(metaclass=VariableMetaClass): # can not be imported at the begainning of this file. # Therefore, the above two modules are dynamically imported. from .executor import global_scope + if scope is not None and not isinstance(scope, core._Scope): raise TypeError( - "`scope` should be None or `paddle.static.Scope` type, but received {}." - .format(type(scope))) + "`scope` should be None or `paddle.static.Scope` type, but received {}.".format( + type(scope) + ) + ) if scope is None: scope = global_scope() var_temp = scope.find_var(self.name) if var_temp is None: - raise ValueError("Can not find Variable '{}' in the Scope.".format( - self.name)) + raise ValueError( + "Can not find Variable '{}' in the Scope.".format(self.name) + ) t = var_temp.get_tensor() return t @@ -2417,21 +2483,26 @@ class Variable(metaclass=VariableMetaClass): if not (isinstance(value, np.ndarray) or hasattr(value, '__array__')): raise TypeError( - "`value` should be `numpy.ndarray` or `LoDTensor`, but received {}." - .format(type(value))) + "`value` should be `numpy.ndarray` or `LoDTensor`, but received {}.".format( + type(value) + ) + ) if scope is not None and not isinstance(scope, core._Scope): raise TypeError( - "`scope` should be None or `paddle.static.Scope` type, but received {}." - .format(type(scope))) + "`scope` should be None or `paddle.static.Scope` type, but received {}.".format( + type(scope) + ) + ) if scope is None: scope = global_scope() var_temp = scope.find_var(self.name) if var_temp is None: - raise ValueError("Can not find Variable '{}' in the Scope.".format( - self.name)) + raise ValueError( + "Can not find Variable '{}' in the Scope.".format(self.name) + ) t = var_temp.get_tensor() @@ -2442,8 +2513,10 @@ class Variable(metaclass=VariableMetaClass): value_shape = value.shape if list(t.shape()) != list(value_shape): raise ValueError( - "{} expected a shape {}, but the received shape is {}.". - format(self.name, list(t.shape()), list(value_shape))) + "{} expected a shape {}, but the received shape is {}.".format( + self.name, list(t.shape()), list(value_shape) + ) + ) p = t._place() if p.is_cpu_place(): @@ -2492,11 +2565,12 @@ class Variable(metaclass=VariableMetaClass): output = self.block.create_var( name=unique_name.generate_with_ignorable_key(self.name + "_size"), - dtype=core.VarDesc.VarType.INT64) + dtype=core.VarDesc.VarType.INT64, + ) - self.block.append_op(type='size', - inputs={'Input': [self]}, - outputs={'Out': [output]}) + self.block.append_op( + type='size', inputs={'Input': [self]}, outputs={'Out': [output]} + ) return output def _set_attr(self, name, val): @@ -2595,8 +2669,8 @@ class OpProtoHolder(object): def __init__(self): assert not hasattr( - self.__class__, - '_instance'), 'Please use `instance()` to get OpProtoHolder object!' + self.__class__, '_instance' + ), 'Please use `instance()` to get OpProtoHolder object!' op_protos = get_all_op_protos() self.op_proto_map = {} for proto in op_protos: @@ -2632,7 +2706,7 @@ class OpProtoHolder(object): core.op_proto_and_checker_maker.kOpRoleVarAttrName(), core.op_proto_and_checker_maker.kOpNameScopeAttrName(), core.op_proto_and_checker_maker.kOpCreationCallstackAttrName(), - core.op_proto_and_checker_maker.kOpDeviceAttrName() + core.op_proto_and_checker_maker.kOpDeviceAttrName(), } @@ -2679,24 +2753,44 @@ class Operator(object): inputs={"X": [var1, var2, var3]}, outputs={"Out": [var1]}) """ + OP_WITHOUT_KERNEL_SET = { - 'feed', 'fetch', 'recurrent', 'go', 'rnn_memory_helper_grad', - 'conditional_block', 'while', 'send', 'recv', 'listen_and_serv', - 'fl_listen_and_serv', 'ncclInit', 'select', 'checkpoint_notify', - 'gen_bkcl_id', 'c_gen_bkcl_id', 'gen_nccl_id', 'c_gen_nccl_id', - 'c_comm_init', 'c_sync_calc_stream', 'c_sync_comm_stream', - 'queue_generator', 'dequeue', 'enqueue', 'heter_listen_and_serv', - 'c_wait_comm', 'c_wait_compute', 'c_gen_hccl_id', 'c_comm_init_hccl', - 'copy_cross_scope', 'c_gen_cncl_id' + 'feed', + 'fetch', + 'recurrent', + 'go', + 'rnn_memory_helper_grad', + 'conditional_block', + 'while', + 'send', + 'recv', + 'listen_and_serv', + 'fl_listen_and_serv', + 'ncclInit', + 'select', + 'checkpoint_notify', + 'gen_bkcl_id', + 'c_gen_bkcl_id', + 'gen_nccl_id', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_sync_calc_stream', + 'c_sync_comm_stream', + 'queue_generator', + 'dequeue', + 'enqueue', + 'heter_listen_and_serv', + 'c_wait_comm', + 'c_wait_compute', + 'c_gen_hccl_id', + 'c_comm_init_hccl', + 'copy_cross_scope', + 'c_gen_cncl_id', } - def __init__(self, - block, - desc, - type=None, - inputs=None, - outputs=None, - attrs=None): + def __init__( + self, block, desc, type=None, inputs=None, outputs=None, attrs=None + ): # read attr type index from op proto to avoid unexpected type # conversions, e.g. narrowing conversion like double to float try: @@ -2710,7 +2804,8 @@ class Operator(object): if _non_static_mode(): if type is None: raise ValueError( - "`type` to initialized an Operator can not be None.") + "`type` to initialized an Operator can not be None." + ) self._type = type self.attrs = attrs if attrs else {} else: @@ -2730,11 +2825,14 @@ class Operator(object): if op_maker.kOpRoleAttrName() not in op_attrs: op_attrs[ - op_maker.kOpRoleAttrName()] = self.block.program._op_role + op_maker.kOpRoleAttrName() + ] = self.block.program._op_role role_var_name = op_maker.kOpRoleVarAttrName() - if len(self.block.program._op_role_var - ) != 0 and role_var_name not in op_attrs: + if ( + len(self.block.program._op_role_var) != 0 + and role_var_name not in op_attrs + ): op_attrs[role_var_name] = self.block.program._op_role_var if role_var_name in op_attrs and len(op_attrs[role_var_name]) == 0: @@ -2749,16 +2847,20 @@ class Operator(object): return if type is None: raise ValueError( - "`type` to initialized an Operator can not be None.") + "`type` to initialized an Operator can not be None." + ) else: callstack_var_name = op_maker.kOpCreationCallstackAttrName() op_attrs[callstack_var_name] = [] for frame in traceback.extract_stack(): op_attrs[callstack_var_name].append( ' File "{}", line {}, in {}'.format( - frame[0], frame[1], frame[2])) - op_attrs[callstack_var_name].append(' {}'.format( - frame[3])) + frame[0], frame[1], frame[2] + ) + ) + op_attrs[callstack_var_name].append( + ' {}'.format(frame[3]) + ) self.desc.set_type(type) proto = OpProtoHolder.instance().get_op_proto(type) @@ -2774,20 +2876,25 @@ class Operator(object): op_device = op_maker.kOpDeviceAttrName() op_attrs[op_device] = _current_device else: - warnings.warn("The Op(%s) is not support to set device." % - type) + warnings.warn( + "The Op(%s) is not support to set device." % type + ) if 'force_cpu' in op_attrs: - if (type == 'less_than' and op_attrs['force_cpu'] != None - ) or op_attrs['force_cpu'] != False: + if ( + type == 'less_than' and op_attrs['force_cpu'] != None + ) or op_attrs['force_cpu'] != False: warnings.warn( "The Attr(force_cpu) of Op(%s) will be deprecated in the future, " "please use 'device_guard' instead. 'device_guard' has higher priority when they are " - "used at the same time." % type) + "used at the same time." % type + ) if _current_pipeline_stage is not None: - pipeline_attr_name = 'pipeline_stage' + core.kAutoParallelSuffix( + pipeline_attr_name = ( + 'pipeline_stage' + core.kAutoParallelSuffix() + ) + self._update_desc_attr( + pipeline_attr_name, _current_pipeline_stage ) - self._update_desc_attr(pipeline_attr_name, - _current_pipeline_stage) def find_name(var_list, name): for var_name in var_list: @@ -2798,8 +2905,9 @@ class Operator(object): if inputs is not None: for in_proto in proto.inputs: found = find_name(inputs, in_proto.name) - assert found or in_proto.dispensable, "Input {} not found".format( - in_proto.name) + assert ( + found or in_proto.dispensable + ), "Input {} not found".format(in_proto.name) if found: in_args = inputs[in_proto.name] if not isinstance(in_args, (list, tuple)): @@ -2807,7 +2915,8 @@ class Operator(object): if not in_proto.duplicable and len(in_args) > 1: raise ValueError( "Input %s expects only one input, but %d are given." - % (in_proto.name, len(in_args))) + % (in_proto.name, len(in_args)) + ) in_arg_names = [] for index, arg in enumerate(in_args): if isinstance(arg, str): @@ -2821,8 +2930,9 @@ class Operator(object): "The type of '%s' in operator %s should be " "one of [basestring(), str, Varibale] in python2, " "or one of [str, bytes, Variable] in python3." - "but received : %s" % - (in_proto.name, type, arg)) + "but received : %s" + % (in_proto.name, type, arg) + ) self.desc.set_input(in_proto.name, in_arg_names) else: self.desc.set_input(in_proto.name, []) @@ -2833,9 +2943,12 @@ class Operator(object): continue if not ((m.name in outputs) or m.dispensable): raise ValueError( - ("Incorrect setting for output(s) of " - "operator \"%s\", should set: [%s].") % - (type, m.name)) + ( + "Incorrect setting for output(s) of " + "operator \"%s\", should set: [%s]." + ) + % (type, m.name) + ) for out_proto in proto.outputs: if out_proto.name not in outputs: continue @@ -2845,7 +2958,8 @@ class Operator(object): if not out_proto.duplicable and len(out_args) > 1: raise ValueError( "Output %s expects only one output, but %d are given." - % (out_proto.name, len(out_args))) + % (out_proto.name, len(out_args)) + ) out_arg_names = [] for arg in out_args: if isinstance(arg, str): @@ -2866,27 +2980,32 @@ class Operator(object): raise TypeError("'attrs' should be a dict.") for attr in proto.attrs: attr_name = attr.name - if (attr_name - not in op_attrs) or (op_attrs[attr_name] is None): + if (attr_name not in op_attrs) or ( + op_attrs[attr_name] is None + ): continue attr_val = op_attrs[attr_name] self._update_desc_attr(attr_name, attr_val) for attr_name in extra_attrs_map.keys(): - if (attr_name - not in op_attrs) or (op_attrs[attr_name] is None): - self._update_desc_attr(attr_name, - extra_attrs_map[attr_name]) + if (attr_name not in op_attrs) or ( + op_attrs[attr_name] is None + ): + self._update_desc_attr( + attr_name, extra_attrs_map[attr_name] + ) else: self._update_desc_attr(attr_name, op_attrs[attr_name]) # proto.attrs doesn't include ipu_index if core.is_compiled_with_ipu(): if global_ipu_index >= 0: - self._update_desc_attr(ipu_index_attr_name, - global_ipu_index) + self._update_desc_attr( + ipu_index_attr_name, global_ipu_index + ) if global_ipu_stage >= 0: - self._update_desc_attr(ipu_stage_attr_name, - global_ipu_stage) + self._update_desc_attr( + ipu_stage_attr_name, global_ipu_stage + ) self.desc.check_attrs() if self._has_kernel(type): @@ -2945,7 +3064,8 @@ class Operator(object): assert isinstance( skip_op_callstack, bool ), "skip_op_callstack parameter's type is error, expect bool, received {}".format( - type(skip_op_callstack)) + type(skip_op_callstack) + ) outputs_str = "{" for i in range(0, len(self.output_names)): outputs_str += "{name}=".format(name=self.output_names[i]) @@ -2975,9 +3095,9 @@ class Operator(object): attr_type = self.desc.attr_type(name, True) if attr_type == core.AttrType.VAR: attr_var_name = self.desc.attr(name, True).name() - a = "{name} = Var['{value}']".format(name=name, - type=attr_type, - value=attr_var_name) + a = "{name} = Var['{value}']".format( + name=name, type=attr_type, value=attr_var_name + ) attrs_str += a if i != len(attr_names) - 1: attrs_str += ", " @@ -2988,7 +3108,8 @@ class Operator(object): "'%s'" % var.name() for var in self.desc.attr(name, True) ] a = "{name} = Vars[{value}]".format( - name=name, type=attr_type, value=','.join(attr_var_names)) + name=name, type=attr_type, value=','.join(attr_var_names) + ) attrs_str += a if i != len(attr_names) - 1: attrs_str += ", " @@ -2996,7 +3117,8 @@ class Operator(object): if attr_type == core.AttrType.BLOCK: a = "{name} = block[{value}]".format( - name=name, type=attr_type, value=self._block_attr_id(name)) + name=name, type=attr_type, value=self._block_attr_id(name) + ) attrs_str += a if i != len(attr_names) - 1: attrs_str += ", " @@ -3004,17 +3126,19 @@ class Operator(object): if attr_type == core.AttrType.BLOCKS: a = "{name} = blocks{value}".format( - name=name, - type=attr_type, - value=self._blocks_attr_ids(name)) + name=name, type=attr_type, value=self._blocks_attr_ids(name) + ) attrs_str += a if i != len(attr_names) - 1: attrs_str += ", " continue # it is bytes of serialized protobuf - if is_compiled_with_cinn( - ) and self.type == 'cinn_launch' and name == 'compilation_key': + if ( + is_compiled_with_cinn() + and self.type == 'cinn_launch' + and name == 'compilation_key' + ): key = self.desc.attr(name) v = core.get_serialize_comile_key(key) prog = Program() @@ -3026,28 +3150,36 @@ class Operator(object): else: value = self.desc.attr(name) - a = "{name} = {value}".format(name=name, - type=attr_type, - value=value) + a = "{name} = {value}".format( + name=name, type=attr_type, value=value + ) attrs_str += a if i != len(attr_names) - 1: attrs_str += ", " - from paddle.distributed.auto_parallel.dist_context import get_default_distributed_context + from paddle.distributed.auto_parallel.dist_context import ( + get_default_distributed_context, + ) + dist_context = get_default_distributed_context() dist_op = dist_context.get_dist_op_for_program(self) if dist_op is not None: - attrs_str += ", {name} = {value}".format(name="dist_attr", - value=dist_op) + attrs_str += ", {name} = {value}".format( + name="dist_attr", value=dist_op + ) if outputs_str != "{}": - op_str = "{outputs} = {op_type}(inputs={inputs}, {attrs})".\ - format(outputs=outputs_str, op_type=self.type, - inputs=inputs_str, attrs=attrs_str) + op_str = "{outputs} = {op_type}(inputs={inputs}, {attrs})".format( + outputs=outputs_str, + op_type=self.type, + inputs=inputs_str, + attrs=attrs_str, + ) else: - op_str = "{op_type}(inputs={inputs}, {attrs})".\ - format(op_type=self.type, inputs=inputs_str, attrs=attrs_str) + op_str = "{op_type}(inputs={inputs}, {attrs})".format( + op_type=self.type, inputs=inputs_str, attrs=attrs_str + ) return op_str def __str__(self): @@ -3133,7 +3265,8 @@ class Operator(object): if op == self: return i raise ValueError( - "Can't find op itself in it's block. It could be a bug of Paddle.") + "Can't find op itself in it's block. It could be a bug of Paddle." + ) def has_attr(self, name): """ @@ -3195,8 +3328,9 @@ class Operator(object): self.desc.set_block_attr(name, val.desc) elif isinstance(val, list) and val and _all_is_type(val, Block): self.desc.set_blocks_attr(name, [v.desc for v in val]) - elif isinstance(val, core.BlockDesc) or \ - isinstance(val, core.ProgramDesc): + elif isinstance(val, core.BlockDesc) or isinstance( + val, core.ProgramDesc + ): self.desc.set_serialized_attr(name, val.serialize_to_string()) else: self._update_desc_plain_attr(name, val) @@ -3277,7 +3411,7 @@ class Operator(object): """ id = self._block_attr_id(name) - assert (id >= 0 and id < len(self.block.program.blocks)) + assert id >= 0 and id < len(self.block.program.blocks) return self.block.program.blocks[id] def _blocks_attr(self, name): @@ -3292,7 +3426,7 @@ class Operator(object): """ attrs = [] for i in self._blocks_attr_ids(name): - assert (i >= 0 and i < len(self.block.program.blocks)) + assert i >= 0 and i < len(self.block.program.blocks) attrs.append(self.block.program.blocks[i]) return attrs @@ -3321,8 +3455,11 @@ class Operator(object): Variable: the Variable attribute. """ attr_type = self.desc.attr_type(name, True) - assert attr_type == core.AttrType.VAR, "Required type attr({}) is Variable, but received {}".format( - name, attr_type) + assert ( + attr_type == core.AttrType.VAR + ), "Required type attr({}) is Variable, but received {}".format( + name, attr_type + ) attr_var_name = self.desc.attr(name, True).name() return self.block._var_recursive(attr_var_name) @@ -3337,8 +3474,11 @@ class Operator(object): Variables: the Variables attribute. """ attr_type = self.desc.attr_type(name, True) - assert attr_type == core.AttrType.VARS, "Required type attr({}) is list[Variable], but received {}".format( - name, attr_type) + assert ( + attr_type == core.AttrType.VARS + ), "Required type attr({}) is list[Variable], but received {}".format( + name, attr_type + ) attr_vars = [ self.block._var_recursive(var.name()) for var in self.desc.attr(name, True) @@ -3485,7 +3625,8 @@ class Block(object): assert isinstance( skip_op_callstack, bool ), "skip_op_callstack parameter's type is error, expect bool, received {}".format( - type(skip_op_callstack)) + type(skip_op_callstack) + ) block_str = "{ // block " block_str += "{}\n".format(self.idx) for var in list(self.vars.values()): @@ -3493,7 +3634,8 @@ class Block(object): block_str += "\n" for op in self.ops: block_str += " {}\n".format( - op._to_readable_code(skip_op_callstack)) + op._to_readable_code(skip_op_callstack) + ) block_str += "}" return block_str @@ -3512,17 +3654,22 @@ class Block(object): str: The debug string. """ assert isinstance(throw_on_error, bool) and isinstance( - with_details, bool) + with_details, bool + ) if with_details: re_add_indent = re.compile(r"\n(.)") res_str = "blocks {\n idx: %d\n parent_idx: %d" % ( - self.idx, self.parent_idx) + self.idx, + self.parent_idx, + ) for var in list(self.vars.values()): res_str += "\n vars {\n %s }" % re_add_indent.sub( - r"\n \1", var.to_string(throw_on_error, with_details)) + r"\n \1", var.to_string(throw_on_error, with_details) + ) for op in self.ops: res_str += "\n ops {\n %s }" % re_add_indent.sub( - r"\n \1", op.to_string(throw_on_error)) + r"\n \1", op.to_string(throw_on_error) + ) res_str += "\n}" else: protostr = self.desc.serialize_to_string() @@ -3580,8 +3727,9 @@ class Block(object): """ if not isinstance(name, str): raise TypeError( - "var require string as parameter, but get %s instead." % - (type(name))) + "var require string as parameter, but get %s instead." + % (type(name)) + ) v = self.vars.get(name, None) if v is None: raise ValueError("var %s not in this block" % name) @@ -3647,8 +3795,11 @@ class Block(object): return list(self.iter_parameters()) def iter_parameters(self): - return (item[1] for item in self.vars.items() - if isinstance(item[1], Parameter)) + return ( + item[1] + for item in self.vars.items() + if isinstance(item[1], Parameter) + ) def create_var(self, *args, **kwargs): if _non_static_mode(): @@ -3680,8 +3831,9 @@ class Block(object): """ # Ensure the type of name and new_name is str name = name.decode() if isinstance(name, bytes) else name - new_name = new_name.decode() if isinstance(new_name, - bytes) else new_name + new_name = ( + new_name.decode() if isinstance(new_name, bytes) else new_name + ) if not self.has_var(name): raise ValueError("var %s is not in current block" % name) @@ -3705,43 +3857,51 @@ class Block(object): d = self.desc.find_var(new_name.encode()) if var_type == "Parameter": if in_dygraph_mode(): - var = EagerParamBase(d.shape(), - d.dtype(), - type=orig_var_type, - name=new_name, - stop_gradient=stop_gradient, - trainable=trainable, - optimize_attr=optimize_attr, - regularizer=regularizer, - error_clip=error_clip) + var = EagerParamBase( + d.shape(), + d.dtype(), + type=orig_var_type, + name=new_name, + stop_gradient=stop_gradient, + trainable=trainable, + optimize_attr=optimize_attr, + regularizer=regularizer, + error_clip=error_clip, + ) else: if _in_legacy_dygraph(): - var = ParamBase(d.shape(), - d.dtype(), - type=orig_var_type, - name=new_name, - stop_gradient=stop_gradient, - trainable=trainable, - optimize_attr=optimize_attr, - regularizer=regularizer, - error_clip=error_clip) + var = ParamBase( + d.shape(), + d.dtype(), + type=orig_var_type, + name=new_name, + stop_gradient=stop_gradient, + trainable=trainable, + optimize_attr=optimize_attr, + regularizer=regularizer, + error_clip=error_clip, + ) else: - var = Parameter(self, - d.shape(), - d.dtype(), - type=orig_var_type, - name=new_name, - stop_gradient=stop_gradient, - trainable=trainable, - optimize_attr=optimize_attr, - regularizer=regularizer, - error_clip=error_clip) + var = Parameter( + self, + d.shape(), + d.dtype(), + type=orig_var_type, + name=new_name, + stop_gradient=stop_gradient, + trainable=trainable, + optimize_attr=optimize_attr, + regularizer=regularizer, + error_clip=error_clip, + ) elif var_type == "Variable": - var = Variable(self, - type=orig_var_type, - name=new_name, - error_clip=error_clip, - stop_gradient=stop_gradient) + var = Variable( + self, + type=orig_var_type, + name=new_name, + error_clip=error_clip, + stop_gradient=stop_gradient, + ) # rename the python side, _sync_with_cpp will only add # new vars/ops to python side. @@ -3778,8 +3938,9 @@ class Block(object): # Think of "c_broadcast" and "c_sync_comm_stream" as a special case here. # NOTE: "coalesce_tensor" is a special case for rnn with cudnn support if op.type in [ - "c_broadcast", "c_sync_comm_stream", - "coalesce_tensor" + "c_broadcast", + "c_sync_comm_stream", + "coalesce_tensor", ]: continue init_ops.append(op) @@ -3789,9 +3950,12 @@ class Block(object): init_ops = _is_inited_by(global_block, param) init_ops_len = len(init_ops) if init_ops_len > 1: - raise RuntimeError("param " + param.name + - " is inited by multiple init ops " + - str(init_ops)) + raise RuntimeError( + "param " + + param.name + + " is inited by multiple init ops " + + str(init_ops) + ) elif init_ops_len == 1: # TODO already inited, do nothing, should log a warning pass @@ -3813,24 +3977,31 @@ class Block(object): warnings.warn( "Op `%s` is executed through `append_op` under the dynamic mode, " "the corresponding API implementation needs to be upgraded to " - "using `_C_ops` method." % type, DeprecationWarning) - op = Operator(block=self, - desc=None, - type=type, - inputs=None, - outputs=None, - attrs=attrs) + "using `_C_ops` method." % type, + DeprecationWarning, + ) + op = Operator( + block=self, + desc=None, + type=type, + inputs=None, + outputs=None, + attrs=attrs, + ) # record ops in tracer rather than blocks # # TODO(minqiyang): add op stop_gradient support in static mode too. # currently, we only support stop_gradient in dygraph mode. - _dygraph_tracer().trace_op(type, kwargs.get("inputs", {}), - kwargs.get("outputs", - {}), attrs if attrs else {}, - kwargs.get("stop_gradient", False), - inplace_map) + _dygraph_tracer().trace_op( + type, + kwargs.get("inputs", {}), + kwargs.get("outputs", {}), + attrs if attrs else {}, + kwargs.get("stop_gradient", False), + inplace_map, + ) else: from paddle.fluid.dygraph.base import param_guard @@ -3841,12 +4012,14 @@ class Block(object): inputs = kwargs.get("inputs", None) outputs = kwargs.get("outputs", None) with param_guard(inputs), param_guard(outputs): - op = Operator(block=self, - desc=op_desc, - type=kwargs.get("type", None), - inputs=inputs, - outputs=outputs, - attrs=kwargs.get("attrs", None)) + op = Operator( + block=self, + desc=op_desc, + type=kwargs.get("type", None), + inputs=inputs, + outputs=outputs, + attrs=kwargs.get("attrs", None), + ) self.ops.append(op) @@ -3913,25 +4086,27 @@ class Block(object): if _non_static_mode(): type = kwargs.get("type", None) attrs = kwargs.get("attrs", {}) - op = Operator(self, - None, - type=type, - inputs=None, - outputs=None, - attrs=attrs) - - _dygraph_tracer().trace_op(type, kwargs.get("inputs", {}), - kwargs.get("outputs", {}), - attrs if attrs else {}, - kwargs.get("stop_gradient", False)) + op = Operator( + self, None, type=type, inputs=None, outputs=None, attrs=attrs + ) + + _dygraph_tracer().trace_op( + type, + kwargs.get("inputs", {}), + kwargs.get("outputs", {}), + attrs if attrs else {}, + kwargs.get("stop_gradient", False), + ) else: op_desc = self.desc._prepend_op() - op = Operator(self, - op_desc, - type=kwargs.get("type", None), - inputs=kwargs.get("inputs", None), - outputs=kwargs.get("outputs", None), - attrs=kwargs.get("attrs", None)) + op = Operator( + self, + op_desc, + type=kwargs.get("type", None), + inputs=kwargs.get("inputs", None), + outputs=kwargs.get("outputs", None), + attrs=kwargs.get("attrs", None), + ) self.ops.insert(0, op) return op @@ -3948,17 +4123,21 @@ class Block(object): if var.has_stop_gradient(): is_stop_gradient = var.stop_gradient() if var.has_is_parameter() and var.is_parameter(): - self.create_parameter(name=var.name(), - desc=var, - type=var.type(), - shape=var.shape(), - dtype=var.dtype(), - stop_gradient=is_stop_gradient) + self.create_parameter( + name=var.name(), + desc=var, + type=var.type(), + shape=var.shape(), + dtype=var.dtype(), + stop_gradient=is_stop_gradient, + ) else: - self.create_var(name=var.name(), - desc=var, - type=var.type(), - stop_gradient=is_stop_gradient) + self.create_var( + name=var.name(), + desc=var, + type=var.type(), + stop_gradient=is_stop_gradient, + ) # sync variables removed from c++ end for var in list(self.vars.keys()): @@ -4004,9 +4183,12 @@ class Block(object): ops_in_cpp_index = 0 ops_in_python_index = 0 while ops_in_python_index < len( - self.ops) and ops_in_cpp_index < len(ops_in_cpp): - if self.ops[ops_in_python_index].desc != ops_in_cpp[ - ops_in_cpp_index]: + self.ops + ) and ops_in_cpp_index < len(ops_in_cpp): + if ( + self.ops[ops_in_python_index].desc + != ops_in_cpp[ops_in_cpp_index] + ): del self.ops[ops_in_python_index] else: ops_in_cpp_index += 1 @@ -4032,7 +4214,8 @@ class Block(object): """ if not isinstance(other, Block): raise TypeError( - "_copy_param_info_from should be invoked with Block") + "_copy_param_info_from should be invoked with Block" + ) for p in other.iter_parameters(): assert isinstance(p, Parameter) v = self.vars.get(p.name, None) @@ -4042,28 +4225,32 @@ class Block(object): assert isinstance(v, Variable) new_p = None if in_dygraph_mode(): - new_p = EagerParamBase(shape=v.shape, - dtype=v.dtype, - type=v.type, - lod_level=v.lod_level, - stop_gradient=p.stop_gradient, - trainable=p.trainable, - optimize_attr=p.optimize_attr, - regularizer=p.regularizer, - error_clip=p.error_clip, - name=v.name) + new_p = EagerParamBase( + shape=v.shape, + dtype=v.dtype, + type=v.type, + lod_level=v.lod_level, + stop_gradient=p.stop_gradient, + trainable=p.trainable, + optimize_attr=p.optimize_attr, + regularizer=p.regularizer, + error_clip=p.error_clip, + name=v.name, + ) else: if _in_legacy_dygraph(): - new_p = ParamBase(shape=v.shape, - dtype=v.dtype, - type=v.type, - lod_level=v.lod_level, - stop_gradient=p.stop_gradient, - trainable=p.trainable, - optimize_attr=p.optimize_attr, - regularizer=p.regularizer, - error_clip=p.error_clip, - name=v.name) + new_p = ParamBase( + shape=v.shape, + dtype=v.dtype, + type=v.type, + lod_level=v.lod_level, + stop_gradient=p.stop_gradient, + trainable=p.trainable, + optimize_attr=p.optimize_attr, + regularizer=p.regularizer, + error_clip=p.error_clip, + name=v.name, + ) else: new_p = Parameter( block=self, @@ -4071,13 +4258,15 @@ class Block(object): dtype=v.dtype, type=v.type, lod_level=v.lod_level - if v.type == core.VarDesc.VarType.LOD_TENSOR else None, + if v.type == core.VarDesc.VarType.LOD_TENSOR + else None, stop_gradient=p.stop_gradient, trainable=p.trainable, optimize_attr=p.optimize_attr, regularizer=p.regularizer, error_clip=p.error_clip, - name=v.name) + name=v.name, + ) self.vars[new_p.name] = new_p def _clone_variable(self, var, force_persistable=True): @@ -4097,13 +4286,13 @@ class Block(object): ret_var = None # make STEP_SCOPES var can be safely cloned. if var.type == core.VarDesc.VarType.STEP_SCOPES: - ret_var = self.create_var(name=var.name, - persistable=var.persistable, - type=var.type) + ret_var = self.create_var( + name=var.name, persistable=var.persistable, type=var.type + ) elif var.type == core.VarDesc.VarType.RAW: - ret_var = self.create_var(name=var.name, - persistable=var.persistable, - type=var.type) + ret_var = self.create_var( + name=var.name, persistable=var.persistable, type=var.type + ) elif var.type == core.VarDesc.VarType.SELECTED_ROWS: ret_var = self.create_var( name=var.name, @@ -4112,7 +4301,8 @@ class Block(object): type=var.type, persistable=True if force_persistable else var.persistable, is_data=var.is_data, - need_check_feed=var.desc.need_check_feed()) + need_check_feed=var.desc.need_check_feed(), + ) else: ret_var = self.create_var( name=var.name, @@ -4122,7 +4312,8 @@ class Block(object): lod_level=var.lod_level, persistable=True if force_persistable else var.persistable, is_data=var.is_data, - need_check_feed=var.desc.need_check_feed()) + need_check_feed=var.desc.need_check_feed(), + ) return ret_var @@ -4132,17 +4323,20 @@ class Block(object): # re-constructed inside this method. The underlying VarDesc(OpDesc) # of some old Python Variables(all old Python Operators) may have # been destructed. -def _apply_pass(main_program, - startup_program, - pass_name, - pass_attrs={}, - pass_attr_types={}): +def _apply_pass( + main_program, startup_program, pass_name, pass_attrs={}, pass_attr_types={} +): assert isinstance(pass_attrs, dict), "pass_attrs must be dict" assert isinstance(pass_attr_types, dict), "pass_attr_types must be dict" tmp_main_program = core.ProgramDesc(main_program.desc) tmp_startup_program = core.ProgramDesc(startup_program.desc) - attrs = core.apply_pass(tmp_main_program, tmp_startup_program, pass_name, - pass_attrs, pass_attr_types) + attrs = core.apply_pass( + tmp_main_program, + tmp_startup_program, + pass_name, + pass_attrs, + pass_attr_types, + ) main_program._rebuild_from_desc(tmp_main_program) startup_program._rebuild_from_desc(tmp_startup_program) return attrs @@ -4160,8 +4354,9 @@ class IrNode(object): Args: node(core.Node): C++ Node. """ - assert isinstance(node, - core.Node), 'node must be the instance of core.Node.' + assert isinstance( + node, core.Node + ), 'node must be the instance of core.Node.' self.node = node def name(self): @@ -4337,8 +4532,9 @@ class IrVarNode(IrNode): Args: node(core.Node): C++ Node. """ - assert isinstance(node, core.Node) and node.is_var(), \ - 'node must be the instance of core.Node and it must be a variable node.' + assert ( + isinstance(node, core.Node) and node.is_var() + ), 'node must be the instance of core.Node and it must be a variable node.' super(IrVarNode, self).__init__(node) self.node = node @@ -4349,8 +4545,9 @@ class IrVarNode(IrNode): Args: shape(list): shape to be set. """ - assert self.node.var() is not None, \ - "The node variable description can not be None." + assert ( + self.node.var() is not None + ), "The node variable description can not be None." self.node.var().set_shape(shape) def persistable(self): @@ -4360,8 +4557,9 @@ class IrVarNode(IrNode): Returns: bool: indicate whether the variable is persistable. """ - assert self.node.var() is not None, \ - "The node variable description can not be None." + assert ( + self.node.var() is not None + ), "The node variable description can not be None." return self.node.var().persistable() def type(self): @@ -4371,8 +4569,9 @@ class IrVarNode(IrNode): Returns: core.VarDesc.VarType: the variable type. """ - assert self.node.var() is not None, \ - "The node variable description can not be None." + assert ( + self.node.var() is not None + ), "The node variable description can not be None." return self.node.var().type() def dtype(self): @@ -4382,8 +4581,9 @@ class IrVarNode(IrNode): Returns: core.VarDesc.VarType: the variable data type. """ - assert self.node.var() is not None, \ - "The node variable description can not be None." + assert ( + self.node.var() is not None + ), "The node variable description can not be None." return self.node.var().dtype() def shape(self): @@ -4393,8 +4593,9 @@ class IrVarNode(IrNode): Returns: list: the variable shape. """ - assert self.node.var() is not None, \ - "The node variable description can not be None." + assert ( + self.node.var() is not None + ), "The node variable description can not be None." return self.node.var().shape() @property @@ -4430,8 +4631,9 @@ class IrOpNode(IrNode): Args: node(core.Node): C++ Node. """ - assert isinstance(node, core.Node) and node.is_op(), \ - 'node must be the instance of core.Node and it must be a operator node.' + assert ( + isinstance(node, core.Node) and node.is_op() + ), 'node must be the instance of core.Node and it must be a operator node.' super(IrOpNode, self).__init__(node) self.node = node @@ -4443,8 +4645,9 @@ class IrOpNode(IrNode): old_input_name(str): the old input name. new_input_name(str): the new input name. """ - assert self.node.op() is not None, \ - "The node operator description can not be None." + assert ( + self.node.op() is not None + ), "The node operator description can not be None." self.node.op()._rename_input(old_input_name, new_input_name) def rename_output(self, old_output_name, new_output_name): @@ -4455,8 +4658,9 @@ class IrOpNode(IrNode): old_output_name(str): the old output name. new_output_name(str): the new output name. """ - assert self.node.op() is not None, \ - "The node operator description can not be None." + assert ( + self.node.op() is not None + ), "The node operator description can not be None." self.node.op()._rename_output(old_output_name, new_output_name) def input(self, name): @@ -4469,8 +4673,9 @@ class IrOpNode(IrNode): Returns: list(str): the argument name list. """ - assert self.node.op() is not None, \ - "The node operator description can not be None." + assert ( + self.node.op() is not None + ), "The node operator description can not be None." return self.node.op().input(name) def output(self, name): @@ -4483,8 +4688,9 @@ class IrOpNode(IrNode): Returns: list(str): the argument name list. """ - assert self.node.op() is not None, \ - "The node operator description can not be None." + assert ( + self.node.op() is not None + ), "The node operator description can not be None." return self.node.op().output(name) def set_type(self, new_type): @@ -4494,8 +4700,9 @@ class IrOpNode(IrNode): Args: new_type(str): new operator type to be set. """ - assert self.node.op() is not None, \ - "The node operator description can not be None." + assert ( + self.node.op() is not None + ), "The node operator description can not be None." return self.node.op().set_type(new_type) def set_attr(self, name, val): @@ -4512,8 +4719,9 @@ class IrOpNode(IrNode): """ Update the value of the op desc's attribute by attribute's name. """ - assert self.node.op() is not None, \ - "The node operator description can not be None." + assert ( + self.node.op() is not None + ), "The node operator description can not be None." desc = self.node.op() if isinstance(val, Variable): desc.set_var_attr(name, val.desc) @@ -4523,8 +4731,9 @@ class IrOpNode(IrNode): desc.set_block_attr(name, val.desc) elif isinstance(val, list) and val and _all_is_type(val, Block): desc.set_blocks_attr(name, [v.desc for v in val]) - elif isinstance(val, core.BlockDesc) or \ - isinstance(val, core.ProgramDesc): + elif isinstance(val, core.BlockDesc) or isinstance( + val, core.ProgramDesc + ): desc.set_serialized_attr(name, val.serialize_to_string()) else: desc._set_attr(name, val) @@ -4536,8 +4745,9 @@ class IrOpNode(IrNode): Returns: list(str): input arguments' names of this op node. """ - assert self.node.op() is not None, \ - "The node operator description can not be None." + assert ( + self.node.op() is not None + ), "The node operator description can not be None." return self.node.op().input_arg_names() def output_arg_names(self): @@ -4547,8 +4757,9 @@ class IrOpNode(IrNode): Returns: list(str): output arguments' names of this op node. """ - assert self.node.op() is not None, \ - "The node operator description can not be None." + assert ( + self.node.op() is not None + ), "The node operator description can not be None." return self.node.op().output_arg_names() @property @@ -4589,7 +4800,8 @@ class IrGraph(object): for_test(bool): True for the test graph and false for the train graph. """ assert isinstance( - graph, core.Graph), 'graph must be the instance of core.Graph.' + graph, core.Graph + ), 'graph must be the instance of core.Graph.' self.graph = graph self._for_test = for_test @@ -4630,8 +4842,11 @@ class IrGraph(object): """ persistable_nodes = set() for node in self.graph.nodes(): - if node.is_var() and node.var() is not None and node.var( - ).persistable(): + if ( + node.is_var() + and node.var() is not None + and node.var().persistable() + ): persistable_nodes.add(node) return {IrVarNode(p) for p in persistable_nodes} @@ -4738,13 +4953,15 @@ class IrGraph(object): for input_name, var_nodes in inputs.items(): if not isinstance(var_nodes, list): var_nodes = [var_nodes] - op_desc.set_input(input_name, - [var_node.name() for var_node in var_nodes]) + op_desc.set_input( + input_name, [var_node.name() for var_node in var_nodes] + ) for output_name, var_nodes in outputs.items(): if not isinstance(var_nodes, list): var_nodes = [var_nodes] - op_desc.set_output(output_name, - [var_node.name() for var_node in var_nodes]) + op_desc.set_output( + output_name, [var_node.name() for var_node in var_nodes] + ) return IrOpNode(self.graph.create_op_node(op_desc)) def create_op_node_from_desc(self, op_desc): @@ -4768,9 +4985,11 @@ class IrGraph(object): new_input_node(IrNode): the new input node of the giving op_node. op_node(IrOpNode): the operator node that is needed to update input's link. """ - assert old_input_node.node in self.graph.nodes() and new_input_node.node in \ - self.graph.nodes() and op_node.node in self.graph.nodes(), \ - 'The three arguments(old_input_node&new_input_node&op_node) must be in the graph nodes.' + assert ( + old_input_node.node in self.graph.nodes() + and new_input_node.node in self.graph.nodes() + and op_node.node in self.graph.nodes() + ), 'The three arguments(old_input_node&new_input_node&op_node) must be in the graph nodes.' old_input_node.remove_output(op_node) op_node.remove_input(old_input_node) new_input_node.append_output(op_node) @@ -4786,9 +5005,11 @@ class IrGraph(object): new_output_node(IrNode): the new output node of the giving op_node. op_node(IrOpNode): the operator node that is needed to update input's link. """ - assert old_output_node.node in self.graph.nodes() and new_output_node.node in \ - self.graph.nodes() and op_node.node in self.graph.nodes(), \ - 'The three arguments(old_output_node &new_output_node &op_node) must be in the graph nodes.' + assert ( + old_output_node.node in self.graph.nodes() + and new_output_node.node in self.graph.nodes() + and op_node.node in self.graph.nodes() + ), 'The three arguments(old_output_node &new_output_node &op_node) must be in the graph nodes.' old_output_node.remove_input(op_node) op_node.remove_output(old_output_node) new_output_node.append_input(op_node) @@ -4804,9 +5025,11 @@ class IrGraph(object): node_out(IrNode): the output node. """ assert node_in.node in self.graph.nodes(), ( - 'node_in(%s) must be in the graph nodes.' % node_in.node.name()) + 'node_in(%s) must be in the graph nodes.' % node_in.node.name() + ) assert node_out.node in self.graph.nodes(), ( - 'node_out(%s) must be in the graph nodes.' % node_out.node.name()) + 'node_out(%s) must be in the graph nodes.' % node_out.node.name() + ) node_in.append_output(node_out) node_out.append_input(node_in) @@ -4843,8 +5066,8 @@ class IrGraph(object): ] else: var_nodes[each_var_name].append( - self._find_node_by_name(node.outputs, - each_var_name)) + self._find_node_by_name(node.outputs, each_var_name) + ) self.graph.resolve_hazard(var_nodes) def has_circle(self): @@ -4906,13 +5129,15 @@ class IrGraph(object): def _convert_to_pdf(dot_file_path): pdf_save_path = os.path.splitext(dot_file_path)[0] + '.pdf' - exited_code = subprocess.call('dot -Tpdf ' + dot_file_path + - ' -o ' + pdf_save_path, - shell=True) + exited_code = subprocess.call( + 'dot -Tpdf ' + dot_file_path + ' -o ' + pdf_save_path, + shell=True, + ) if exited_code != 0: print('The dot command is needed for creating pdf files.') - print('The {} is saved as the dot filetype.'.format( - dot_file_path)) + print( + 'The {} is saved as the dot filetype.'.format(dot_file_path) + ) remove_ctr_vars = set() if remove_ctr_var: @@ -4969,7 +5194,8 @@ class IrGraph(object): if n.name() == node_name: target_node = n assert target_node is not None, ( - "Cannot find the target node (%s)in the giving set." % node_name) + "Cannot find the target node (%s)in the giving set." % node_name + ) return target_node def _update_desc_attr(self, desc, name, val): @@ -4984,8 +5210,9 @@ class IrGraph(object): desc.set_block_attr(name, val.desc) elif isinstance(val, list) and val and _all_is_type(val, Block): desc.set_blocks_attr(name, [v.desc for v in val]) - elif isinstance(val, core.BlockDesc) or \ - isinstance(val, core.ProgramDesc): + elif isinstance(val, core.BlockDesc) or isinstance( + val, core.ProgramDesc + ): desc.set_serialized_attr(name, val.serialize_to_string()) else: desc._set_attr(name, val) @@ -5087,7 +5314,8 @@ class Program(object): # identifier for auto checkpoint self._auto_checkpoint_name = unique_name.generate( - "__auto_checkpoint_program__") + "__auto_checkpoint_program__" + ) # compiled program, i.e. Graph self._graph = None @@ -5107,7 +5335,7 @@ class Program(object): all_new_vars = [] block_num = new_desc.num_blocks() for idx in range(block_num): - if (idx > (len(self.blocks) - 1)): + if idx > (len(self.blocks) - 1): self._create_block() new_block_desc = new_desc.block(idx) all_new_vars.append([]) @@ -5119,60 +5347,75 @@ class Program(object): old_var = None kwargs = { - 'type': - new_var_desc.type(), - 'name': - new_var_desc.name(), - 'shape': - get_var_desc_attr_or_none(new_var_desc, "shape", [ - core.VarDesc.VarType.LOD_TENSOR, - core.VarDesc.VarType.SELECTED_ROWS, - core.VarDesc.VarType.LOD_TENSOR_ARRAY, - ]), - 'dtype': - get_var_desc_attr_or_none(new_var_desc, "dtype", [ - core.VarDesc.VarType.LOD_TENSOR, - core.VarDesc.VarType.SELECTED_ROWS, - core.VarDesc.VarType.LOD_TENSOR_ARRAY, - ]), - 'lod_level': - get_var_desc_attr_or_none(new_var_desc, "lod_level", [ - core.VarDesc.VarType.LOD_TENSOR, - core.VarDesc.VarType.LOD_TENSOR_ARRAY, - ]), - 'error_clip': - old_var.error_clip if old_var is not None else None, - 'stop_gradient': - old_var.stop_gradient if old_var is not None else False, - 'is_data': - old_var.is_data if old_var is not None else False, - 'need_check_feed': - new_var_desc.need_check_feed(), - 'belong_to_optimizer': - old_var.belong_to_optimizer - if old_var is not None else False, + 'type': new_var_desc.type(), + 'name': new_var_desc.name(), + 'shape': get_var_desc_attr_or_none( + new_var_desc, + "shape", + [ + core.VarDesc.VarType.LOD_TENSOR, + core.VarDesc.VarType.SELECTED_ROWS, + core.VarDesc.VarType.LOD_TENSOR_ARRAY, + ], + ), + 'dtype': get_var_desc_attr_or_none( + new_var_desc, + "dtype", + [ + core.VarDesc.VarType.LOD_TENSOR, + core.VarDesc.VarType.SELECTED_ROWS, + core.VarDesc.VarType.LOD_TENSOR_ARRAY, + ], + ), + 'lod_level': get_var_desc_attr_or_none( + new_var_desc, + "lod_level", + [ + core.VarDesc.VarType.LOD_TENSOR, + core.VarDesc.VarType.LOD_TENSOR_ARRAY, + ], + ), + 'error_clip': old_var.error_clip + if old_var is not None + else None, + 'stop_gradient': old_var.stop_gradient + if old_var is not None + else False, + 'is_data': old_var.is_data + if old_var is not None + else False, + 'need_check_feed': new_var_desc.need_check_feed(), + 'belong_to_optimizer': old_var.belong_to_optimizer + if old_var is not None + else False, } if isinstance(old_var, Parameter): - kwargs.update({ - 'trainable': old_var.trainable, - 'optimize_attr': old_var.optimize_attr, - 'regularizer': old_var.regularizer, - 'do_model_average': old_var.do_model_average, - 'need_clip': old_var.need_clip, - 'is_distributed': old_var.is_distributed, - 'is_parameter': old_var.is_parameter, - }) - block_new_vars.append({ - 'class': Parameter, - 'kwargs': copy.deepcopy(kwargs), - }) + kwargs.update( + { + 'trainable': old_var.trainable, + 'optimize_attr': old_var.optimize_attr, + 'regularizer': old_var.regularizer, + 'do_model_average': old_var.do_model_average, + 'need_clip': old_var.need_clip, + 'is_distributed': old_var.is_distributed, + 'is_parameter': old_var.is_parameter, + } + ) + block_new_vars.append( + { + 'class': Parameter, + 'kwargs': copy.deepcopy(kwargs), + } + ) else: kwargs['persistable'] = new_var_desc.persistable() - block_new_vars.append({ - 'class': Variable, - 'kwargs': copy.deepcopy(kwargs), - }) + block_new_vars.append( + { + 'class': Variable, + 'kwargs': copy.deepcopy(kwargs), + } + ) return all_new_vars @@ -5404,7 +5647,8 @@ class Program(object): assert isinstance( skip_op_callstack, bool ), "skip_op_callstack parameter's type is error, expect bool, received {}".format( - type(skip_op_callstack)) + type(skip_op_callstack) + ) program_str = "" for block in self.blocks: program_str += block._to_readable_code(skip_op_callstack) @@ -5446,11 +5690,13 @@ class Program(object): assert isinstance( throw_on_error, bool ), "The type of throw_on_error parameter is wrong, expected bool, but received {}.".format( - type(throw_on_error)) + type(throw_on_error) + ) assert isinstance( with_details, bool ), "The type of with_details parameter is wrong, expected bool, but received {}.".format( - type(with_details)) + type(with_details) + ) if with_details: res_str = "" @@ -5654,7 +5900,8 @@ class Program(object): if for_test: forward_prog = Program() forward_prog.desc, pruned_origin_block_id_map = core.prune_backward( - self.desc) + self.desc + ) forward_prog.blocks = [ Block(forward_prog, i) for i in range(forward_prog.desc.num_blocks()) @@ -5732,7 +5979,8 @@ class Program(object): if not isinstance(var, str): raise ValueError( "All feeded_var_names of Program._prune_with_input() can only be " - "str, but received %s." % type(var)) + "str, but received %s." % type(var) + ) # find out all variables that can be generated or updated with given feed generatable_vars = set() @@ -5760,7 +6008,8 @@ class Program(object): else: raise ValueError( "All targets of Program._prune_with_input() can only be " - "Variable or Operator, but received %s." % type(t)) + "Variable or Operator, but received %s." % type(t) + ) # NOTEZ(zhiqiu): For variable to be fed in fetch_list, there two cases: # (1) the variable is leaf, it has no op that generates it; @@ -5794,7 +6043,8 @@ class Program(object): res = Program() res.desc, pruned_origin_block_id_map = core.prune( - self.desc, set(feeded_var_names), targets_idx) + self.desc, set(feeded_var_names), targets_idx + ) res.blocks = [Block(res, i) for i in range(res.desc.num_blocks())] res._sync_with_cpp() @@ -5833,8 +6083,10 @@ class Program(object): root_block = res.desc.block(0) if prune_read_op: while True: - if read_op_idx >= root_block.op_size() or root_block.op( - read_op_idx).type() == 'read': + if ( + read_op_idx >= root_block.op_size() + or root_block.op(read_op_idx).type() == 'read' + ): break read_op_idx += 1 if read_op_idx < root_block.op_size(): @@ -5926,14 +6178,22 @@ class Program(object): # for name in remove_output_list: # op.remove_output(name) - op_quant_name = core.op_proto_and_checker_maker.kOpWithQuantAttrName( + op_quant_name = ( + core.op_proto_and_checker_maker.kOpWithQuantAttrName() + ) + quant = ( + bool(op.attr(op_quant_name)) + if op_quant_name in op.attr_names() + else False ) - quant = bool(op.attr(op_quant_name) - ) if op_quant_name in op.attr_names() else False quant_attrs = [ - op_quant_name, "quantization_type", "skip_quant", - "activation_bits", "bit_length", "quantize_weight_bits", - "weight_quant_scale" + op_quant_name, + "quantization_type", + "skip_quant", + "activation_bits", + "bit_length", + "quantize_weight_bits", + "weight_quant_scale", ] for extra_attr_name in extra_attrs_map.keys(): op.remove_attr(extra_attr_name) @@ -6096,7 +6356,8 @@ class Program(object): if not isinstance(seed, int): raise ValueError( "Program.random_seed's input seed must be an integer, but received %s." - % type(seed)) + % type(seed) + ) self._seed = seed def __repr__(self): @@ -6193,8 +6454,11 @@ class Program(object): Block: The new block. """ new_block_idx = len(self.blocks) - parent = self.current_block() if parent_idx is None else self.block( - parent_idx) + parent = ( + self.current_block() + if parent_idx is None + else self.block(parent_idx) + ) self.desc.append_block(parent.desc) self.current_block_idx = new_block_idx self.blocks.append(Block(self, self.current_block_idx)) @@ -6240,7 +6504,8 @@ class Program(object): if not isinstance(other, Program): raise TypeError( "Function Program._copy_param_info_from() needs to pass in a source Program, but received %s" - % type(other)) + % type(other) + ) self.global_block()._copy_param_info_from(other.global_block()) @@ -6257,7 +6522,8 @@ class Program(object): if not isinstance(other, Program): raise TypeError( "Function Program._copy_param_info_from() needs to pass in a source Program, but received %s" - % type(other)) + % type(other) + ) self._is_distributed = other._is_distributed self._is_chief = other._is_chief self._parameters_on_pservers = other._parameters_on_pservers @@ -6285,12 +6551,12 @@ class Program(object): if not isinstance(other, Program): raise TypeError( "Function Program._copy_param_info_from() needs to pass in a source Program, but received %s" - % type(other)) + % type(other) + ) if not pruned_origin_block_id_map: pruned_origin_block_id_map = { - i: i - for i in range(self.desc.num_blocks()) + i: i for i in range(self.desc.num_blocks()) } # NOTE(zhiqiu): All vars in cloned program exist in original program. @@ -6418,10 +6684,13 @@ class Program(object): # can not be imported at the begainning of this file. # Therefore, the above two modules are dynamically imported. from .executor import global_scope + if scope is not None and not isinstance(scope, core._Scope): raise TypeError( - "`scope` should be None or `paddle.static.Scope'` type, but received {}." - .format(type(scope))) + "`scope` should be None or `paddle.static.Scope'` type, but received {}.".format( + type(scope) + ) + ) if scope is None: scope = global_scope() @@ -6429,15 +6698,19 @@ class Program(object): if not isinstance(mode, str): raise TypeError( "Type of `mode` should be string, but received {}.".format( - type(mode))) + type(mode) + ) + ) def is_parameter(var): return isinstance(var, Parameter) def is_persistable(var): - if var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH or \ - var.desc.type() == core.VarDesc.VarType.FETCH_LIST or \ - var.desc.type() == core.VarDesc.VarType.READER: + if ( + var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH + or var.desc.type() == core.VarDesc.VarType.FETCH_LIST + or var.desc.type() == core.VarDesc.VarType.READER + ): return False return var.persistable @@ -6456,8 +6729,10 @@ class Program(object): return is_parameter(var) or is_belong_to_optimizer(var) else: raise ValueError( - "`mode` string should be 'param', 'opt' or 'all', but received {}." - .format(mode)) + "`mode` string should be 'param', 'opt' or 'all', but received {}.".format( + mode + ) + ) var_list = filter(condition, self.list_vars()) @@ -6466,8 +6741,10 @@ class Program(object): var_temp = scope.find_var(var.name) if var_temp is None: raise ValueError( - "Can not find Variable '{}' in the scope. Make sure it is initialized" - .format(var.name)) + "Can not find Variable '{}' in the scope. Make sure it is initialized".format( + var.name + ) + ) state_dict[var.name] = var_temp.get_tensor() return state_dict @@ -6517,10 +6794,14 @@ class Program(object): if not isinstance(state_dict, dict): raise TypeError( "Type of `state_dict` should be dict, but received {}.".format( - type(state_dict))) + type(state_dict) + ) + ) vars_dict = {var.name: var for var in self.list_vars()} - condition = True if 'StructuredToParameterName@@' in state_dict else False + condition = ( + True if 'StructuredToParameterName@@' in state_dict else False + ) for name, value in state_dict.items(): if condition: if name == "StructuredToParameterName@@": @@ -6532,14 +6813,20 @@ class Program(object): vars_dict[name].set_value(value, scope) except ValueError as err: warnings.warn( - ("Skip loading for '{}'. ".format(name) + str(err))) + ("Skip loading for '{}'. ".format(name) + str(err)) + ) except TypeError as err: warnings.warn( - ("Skip loading for '{}'. ".format(name) + str(err))) + ("Skip loading for '{}'. ".format(name) + str(err)) + ) else: warnings.warn( - ("Skip loading for '{0}'. Because '{0}' not in the program." - .format(name))) + ( + "Skip loading for '{0}'. Because '{0}' not in the program.".format( + name + ) + ) + ) class Parameter(Variable, metaclass=ParameterMetaClass): @@ -6566,12 +6853,14 @@ class Parameter(Variable, metaclass=ParameterMetaClass): in optimizer. Default is True. """ - def __init__(self, - block, - shape, - dtype, - type=core.VarDesc.VarType.LOD_TENSOR, - **kwargs): + def __init__( + self, + block, + shape, + dtype, + type=core.VarDesc.VarType.LOD_TENSOR, + **kwargs + ): if shape is None: raise ValueError("The shape of Parameter should not be None") if dtype is None: @@ -6581,15 +6870,18 @@ class Parameter(Variable, metaclass=ParameterMetaClass): if each < 0: raise ValueError( "Each dimension of shape for Parameter must be greater than 0, but received %s" - % list(shape)) - - Variable.__init__(self, - block, - persistable=True, - shape=shape, - dtype=dtype, - type=type, - **kwargs) + % list(shape) + ) + + Variable.__init__( + self, + block, + persistable=True, + shape=shape, + dtype=dtype, + type=type, + **kwargs + ) self.trainable = kwargs.get('trainable', True) self.optimize_attr = kwargs.get('optimize_attr', {'learning_rate': 1.0}) @@ -6630,11 +6922,17 @@ class Parameter(Variable, metaclass=ParameterMetaClass): print(debug_str) """ assert isinstance(throw_on_error, bool) and isinstance( - with_details, bool) + with_details, bool + ) if with_details: res_str = Variable.to_string(self, throw_on_error, True) - additional_attr = ("trainable", "optimize_attr", "regularizer", - "do_model_average", "need_clip") + additional_attr = ( + "trainable", + "optimize_attr", + "regularizer", + "do_model_average", + "need_clip", + ) for attr_name in additional_attr: res_str += "%s: %s\n" % (attr_name, getattr(self, attr_name)) else: @@ -6680,7 +6978,8 @@ class ParamBase(core.VarBase): if each < 0: raise ValueError( "Each dimension of shape for Parameter must be greater than 0, but received %s" - % list(shape)) + % list(shape) + ) if dtype is not None: if not isinstance(dtype, core.VarDesc.VarType): @@ -6688,10 +6987,13 @@ class ParamBase(core.VarBase): name = kwargs.get('name', unique_name.generate('_param_base')) - super(ParamBase, - self).__init__(dtype if dtype else core.VarDesc.VarType.FP32, - list(shape) if shape else [], name, - core.VarDesc.VarType.LOD_TENSOR, True) + super(ParamBase, self).__init__( + dtype if dtype else core.VarDesc.VarType.FP32, + list(shape) if shape else [], + name, + core.VarDesc.VarType.LOD_TENSOR, + True, + ) trainable = kwargs.get('trainable', True) self.stop_gradient = not trainable @@ -6718,7 +7020,8 @@ class ParamBase(core.VarBase): else: raise ValueError( "The type of trainable MUST be bool, but the type is ", - type(trainable)) + type(trainable), + ) def __str__(self): """ @@ -6739,7 +7042,8 @@ class ParamBase(core.VarBase): # [-0.54217887, 0.48439729, 0.34082305]]) """ return "Parameter containing:\n{tensor}".format( - tensor=super(ParamBase, self).__str__()) + tensor=super(ParamBase, self).__str__() + ) def __deepcopy__(self, memo): """ @@ -6822,7 +7126,8 @@ class EagerParamBase(_core_eager_eagertensor): if each < 0: raise ValueError( "Each dimension of shape for Parameter must be greater than 0, but received %s" - % list(shape)) + % list(shape) + ) if dtype is not None: if not isinstance(dtype, core.VarDesc.VarType): @@ -6833,10 +7138,13 @@ class EagerParamBase(_core_eager_eagertensor): if isinstance(shape, core.eager.Tensor): shape = shape.numpy() - super(EagerParamBase, - self).__init__(dtype if dtype else core.VarDesc.VarType.FP32, - list(shape) if shape else [], name, - core.VarDesc.VarType.LOD_TENSOR, True) + super(EagerParamBase, self).__init__( + dtype if dtype else core.VarDesc.VarType.FP32, + list(shape) if shape else [], + name, + core.VarDesc.VarType.LOD_TENSOR, + True, + ) self.retain_grads() trainable = kwargs.get('trainable', True) @@ -6860,7 +7168,9 @@ class EagerParamBase(_core_eager_eagertensor): @dygraph_only def initialize(self): - assert self._init_func is not None, "Required self._init_func is not None, but received None." + assert ( + self._init_func is not None + ), "Required self._init_func is not None, but received None." self._init_func() # clear function handle to release resource self._init_func = None @@ -6876,13 +7186,16 @@ class EagerParamBase(_core_eager_eagertensor): else: raise ValueError( "The type of trainable MUST be bool, but the type is ", - type(trainable)) + type(trainable), + ) def _create_init_op(self, block): """ Call init_op_creator function to create initializer operation in block. """ - assert self._init_op_creator is not None, "Required self._init_op_creator is not None, but received None." + assert ( + self._init_op_creator is not None + ), "Required self._init_op_creator is not None, but received None." self._init_op_creator(block) def __str__(self): @@ -6904,7 +7217,8 @@ class EagerParamBase(_core_eager_eagertensor): # [-0.54217887, 0.48439729, 0.34082305]]) """ return "Parameter containing:\n{tensor}".format( - tensor=super(EagerParamBase, self).__str__()) + tensor=super(EagerParamBase, self).__str__() + ) def __deepcopy__(self, memo): """ @@ -7091,12 +7405,18 @@ def program_guard(main_program, startup_program=None): """ from .data_feeder import check_type - check_type(main_program, 'main_program', Program, - 'paddle.static.program_guard') + + check_type( + main_program, 'main_program', Program, 'paddle.static.program_guard' + ) main_program = switch_main_program(main_program) if startup_program is not None: - check_type(startup_program, 'startup_program', Program, - 'paddle.static.program_guard') + check_type( + startup_program, + 'startup_program', + Program, + 'paddle.static.program_guard', + ) # Tag the program __is_start_up as True startup_program._is_start_up_program_ = True startup_program = switch_startup_program(startup_program) @@ -7219,7 +7539,8 @@ def device_guard(device=None): if device not in ['cpu', 'gpu', 'npu', 'xpu', 'mlu', '', None]: raise ValueError( "The Attr(device) should be 'cpu' 'npu' 'xpu' 'mlu' or 'gpu', and it can also be empty string or None " - "when there is no need to specify device. But received %s" % device) + "when there is no need to specify device. But received %s" % device + ) if index: device = ":".join([device, index]) pre_device = switch_device(device) @@ -7249,9 +7570,11 @@ def _cuda_graph_guard(cuda_graph_attr=None): cuda_graph_attr(str|None): The cuda graph attr with the format of: cuda_graph_capture_mode;memory_pool_id;cuda_graph_id """ - assert not _non_static_mode( + assert ( + not _non_static_mode() ), "cuda_graph_guard only works under static mode" - assert core.is_compiled_with_cuda( + assert ( + core.is_compiled_with_cuda() ), "cuda_graph_guard context can be only used when Paddle is compiled with cuda" pre_mode = _switch_cuda_graph_mode(cuda_graph_attr) try: @@ -7281,7 +7604,8 @@ def set_flags(flags): _global_flags()[key] = value else: raise ValueError( - "Flag %s cannot set its value through this function." % (key)) + "Flag %s cannot set its value through this function." % (key) + ) def get_flags(flags): @@ -7308,22 +7632,24 @@ def get_flags(flags): flags_value = {} if isinstance(flags, (list, tuple)): for key in flags: - if (_global_flags().is_public(key)): + if _global_flags().is_public(key): value = _global_flags()[key] temp = {key: value} flags_value.update(temp) else: raise ValueError( - 'Flag %s cannot get its value through this function.' % - (key)) + 'Flag %s cannot get its value through this function.' + % (key) + ) elif isinstance(flags, str): - if (_global_flags().is_public(flags)): + if _global_flags().is_public(flags): value = _global_flags()[flags] temp = {flags: value} flags_value.update(temp) else: raise ValueError( - 'Flag %s cannot get its value through this function.' % (flags)) + 'Flag %s cannot get its value through this function.' % (flags) + ) else: raise TypeError('Flags in get_flags should be a list, tuple or string.') return flags_value @@ -7333,20 +7659,32 @@ def _get_paddle_place(place): "convert the string to paddle Place" if place is None: return place - if isinstance(place, (core.Place, core.XPUPlace, core.CPUPlace, - core.CUDAPinnedPlace, core.CUDAPlace, core.NPUPlace, - core.IPUPlace, core.MLUPlace, core.CustomPlace)): + if isinstance( + place, + ( + core.Place, + core.XPUPlace, + core.CPUPlace, + core.CUDAPinnedPlace, + core.CUDAPlace, + core.NPUPlace, + core.IPUPlace, + core.MLUPlace, + core.CustomPlace, + ), + ): return place if not isinstance(place, str): raise ValueError( - "place only support string which is 'Place' and so on.") + "place only support string which is 'Place' and so on." + ) place = place.lower() - if (place == "cpu"): + if place == "cpu": return core.CPUPlace() - if (place == "device"): + if place == "device": return core.Place() # GPU @@ -7354,8 +7692,9 @@ def _get_paddle_place(place): if place == "gpu_pinned" or place == "gpu" or avaliable_gpu_place: if not core.is_compiled_with_cuda(): raise ValueError( - "The device should not be {}, since PaddlePaddle is " \ - "not compiled with CUDA".format(avaliable_gpu_place)) + "The device should not be {}, since PaddlePaddle is " + "not compiled with CUDA".format(avaliable_gpu_place) + ) if place == "gpu_pinned": return core.CUDAPinnedPlace() elif place == "gpu": @@ -7371,8 +7710,9 @@ def _get_paddle_place(place): if avaliable_xpu_place: if not core.is_compiled_with_xpu(): raise ValueError( - "The device should not be {}, since PaddlePaddle is " \ - "not compiled with XPU".format(avaliable_xpu_place)) + "The device should not be {}, since PaddlePaddle is " + "not compiled with XPU".format(avaliable_xpu_place) + ) place_info_list = place.split(':', 1) device_id = place_info_list[1] device_id = int(device_id) @@ -7383,8 +7723,9 @@ def _get_paddle_place(place): if avaliable_npu_place: if not core.is_compiled_with_npu(): raise ValueError( - "The device should not be {}, since PaddlePaddle is " \ - "not compiled with NPU".format(avaliable_npu_place)) + "The device should not be {}, since PaddlePaddle is " + "not compiled with NPU".format(avaliable_npu_place) + ) place_info_list = place.split(':', 1) device_id = place_info_list[1] device_id = int(device_id) @@ -7395,8 +7736,9 @@ def _get_paddle_place(place): if avaliable_ipu_place: if not core.is_compiled_with_ipu(): raise ValueError( - "The device should not be {}, since PaddlePaddle is " \ - "not compiled with IPU".format(avaliable_ipu_place)) + "The device should not be {}, since PaddlePaddle is " + "not compiled with IPU".format(avaliable_ipu_place) + ) place_info_list = place.split(':', 1) device_id = place_info_list[1] device_id = int(device_id) @@ -7407,16 +7749,19 @@ def _get_paddle_place(place): if avaliable_mlu_place: if not core.is_compiled_with_mlu(): raise ValueError( - "The device should not be {}, since PaddlePaddle is " \ - "not compiled with MLU".format(avaliable_mlu_place)) + "The device should not be {}, since PaddlePaddle is " + "not compiled with MLU".format(avaliable_mlu_place) + ) place_info_list = place.split(':', 1) device_id = place_info_list[1] device_id = int(device_id) return core.MLUPlace(device_id) raise ValueError( - "Paddle supports CPUPlace, CUDAPlace,CUDAPinnedPlace, XPUPlace, IPUPlace, MLUPlace and NPUPlace, but received {}." - .format(place)) + "Paddle supports CPUPlace, CUDAPlace,CUDAPinnedPlace, XPUPlace, IPUPlace, MLUPlace and NPUPlace, but received {}.".format( + place + ) + ) def _get_paddle_place_list(places): diff --git a/python/paddle/fluid/generator.py b/python/paddle/fluid/generator.py index 5bbe7a0e12c3fa29d549d3fa2ad3402272203269..4982de95f498c87487c0f535234c506baad9a90d 100644 --- a/python/paddle/fluid/generator.py +++ b/python/paddle/fluid/generator.py @@ -43,4 +43,5 @@ class Generator(core.Generator): else: raise ValueError( "Generator class with %s does is not supported yet, currently only support generator with CPUPlace " - % place) + % place + ) diff --git a/python/paddle/fluid/graphviz.py b/python/paddle/fluid/graphviz.py index 9579bff409efe94b98b64d21ef6d13da8b3d53a5..b7ef1c7531fc00fd47167303500e64adf997f395 100644 --- a/python/paddle/fluid/graphviz.py +++ b/python/paddle/fluid/graphviz.py @@ -26,7 +26,6 @@ def crepr(v): class Rank(object): - def __init__(self, kind, name, priority): ''' kind: str @@ -42,8 +41,12 @@ class Rank(object): if not self.nodes: return '' - return '{' + 'rank={};'.format(self.kind) + \ - ','.join([node.name for node in self.nodes]) + '}' + return ( + '{' + + 'rank={};'.format(self.kind) + + ','.join([node.name for node in self.nodes]) + + '}' + ) class Graph(object): @@ -84,28 +87,36 @@ class Graph(object): def compile(self, dot_path): file = open(dot_path, 'w') file.write(self.__str__()) - image_path = os.path.join(os.path.dirname(dot_path), - dot_path[:-3] + "pdf") + image_path = os.path.join( + os.path.dirname(dot_path), dot_path[:-3] + "pdf" + ) cmd = ["dot", "-Tpdf", dot_path, "-o", image_path] - subprocess.Popen(cmd, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + subprocess.Popen( + cmd, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) logging.warning("write block debug graph to {}".format(image_path)) return image_path def show(self, dot_path): image = self.compile(dot_path) cmd = ["open", image] - subprocess.Popen(cmd, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + subprocess.Popen( + cmd, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) def _rank_repr(self): - ranks = sorted(self.rank_groups.items(), - key=functools.cmp_to_key( - lambda a, b: a[1].priority > b[1].priority)) + ranks = sorted( + self.rank_groups.items(), + key=functools.cmp_to_key( + lambda a, b: a[1].priority > b[1].priority + ), + ) repr = [] for x in ranks: repr.append(str(x[1])) @@ -118,8 +129,9 @@ class Graph(object): ] for attr in self.attrs: - reprs.append("{key}={value};".format(key=attr, - value=crepr(self.attrs[attr]))) + reprs.append( + "{key}={value};".format(key=attr, value=crepr(self.attrs[attr])) + ) reprs.append(self._rank_repr()) @@ -147,14 +159,18 @@ class Node(object): reprs = '{name} [label={label} {extra} ];'.format( name=self.name, label=self.label, - extra=',' + ','.join("%s=%s" % (key, crepr(value)) - for key, value in self.attrs.items()) - if self.attrs else "") + extra=',' + + ','.join( + "%s=%s" % (key, crepr(value)) + for key, value in self.attrs.items() + ) + if self.attrs + else "", + ) return reprs class Edge(object): - def __init__(self, source, target, **attrs): ''' Link source to target. @@ -171,9 +187,15 @@ class Edge(object): repr = "{source} -> {target} {extra}".format( source=self.source.name, target=self.target.name, - extra="" if not self.attrs else "[" + - ','.join("{}={}".format(attr[0], crepr(attr[1])) - for attr in self.attrs.items()) + "]") + extra="" + if not self.attrs + else "[" + + ','.join( + "{}={}".format(attr[0], crepr(attr[1])) + for attr in self.attrs.items() + ) + + "]", + ) return repr @@ -202,31 +224,34 @@ class GraphPreviewGenerator(object): self.graph.show(path) def add_param(self, name, data_type, highlight=False): - label = '\n'.join([ - '<', - ' ', - ' ', - ' ', - ' ', - ' ' - ' ', - '
', - ' ', - name, - ' ', - '
', - str(data_type), - '
>', - ]) - return self.graph.node(label, - prefix="param", - description=name, - shape="none", - style="rounded,filled,bold", - width="1.3", - color="#148b97" if not highlight else "orange", - fontcolor="#ffffff", - fontname="Arial") + label = '\n'.join( + [ + '<', + ' ', + ' ', + ' ', + ' ', + ' ' ' ', + '
', + ' ', + name, + ' ', + '
', + str(data_type), + '
>', + ] + ) + return self.graph.node( + label, + prefix="param", + description=name, + shape="none", + style="rounded,filled,bold", + width="1.3", + color="#148b97" if not highlight else "orange", + fontcolor="#ffffff", + fontname="Arial", + ) def add_op(self, opType, **kwargs): highlight = False @@ -247,21 +272,25 @@ class GraphPreviewGenerator(object): ) def add_arg(self, name, highlight=False): - return self.graph.node(crepr(name), - prefix="arg", - description=name, - shape="box", - style="rounded,filled,bold", - fontname="Arial", - fontcolor="#999999", - color="#dddddd" if not highlight else "orange") + return self.graph.node( + crepr(name), + prefix="arg", + description=name, + shape="box", + style="rounded,filled,bold", + fontname="Arial", + fontcolor="#999999", + color="#dddddd" if not highlight else "orange", + ) def add_edge(self, source, target, **kwargs): highlight = False if 'highlight' in kwargs: highlight = kwargs['highlight'] del kwargs['highlight'] - return self.graph.edge(source, - target, - color="#00000" if not highlight else "orange", - **kwargs) + return self.graph.edge( + source, + target, + color="#00000" if not highlight else "orange", + **kwargs + ) diff --git a/python/paddle/fluid/incubate/checkpoint/auto_checkpoint.py b/python/paddle/fluid/incubate/checkpoint/auto_checkpoint.py index e0d8062877ca6de43c52eebd6cf93c7b80602298..134723cdbc0c76adb086e637816e0cf75286dcaa 100644 --- a/python/paddle/fluid/incubate/checkpoint/auto_checkpoint.py +++ b/python/paddle/fluid/incubate/checkpoint/auto_checkpoint.py @@ -55,7 +55,8 @@ def _get_logger(log_level, name="auto_checkpoint"): log_handler = logging.StreamHandler() log_format = logging.Formatter( - '%(levelname)s %(asctime)s %(filename)s:%(lineno)d] %(message)s') + '%(levelname)s %(asctime)s %(filename)s:%(lineno)d] %(message)s' + ) log_handler.setFormatter(log_format) logger.addHandler(log_handler) @@ -63,12 +64,12 @@ def _get_logger(log_level, name="auto_checkpoint"): def _thread_checker(): - assert current_thread().name == "MainThread", \ - "auto checkpoint must run under main thread" + assert ( + current_thread().name == "MainThread" + ), "auto checkpoint must run under main thread" class AutoCheckpointChecker(object): - def __init__(self): self._run_env = None self._platform = None @@ -91,35 +92,43 @@ class AutoCheckpointChecker(object): self._hdfs_name = os.environ["PADDLE_EDL_HDFS_NAME"] self._hdfs_ugi = os.environ["PADDLE_EDL_HDFS_UGI"] self._hdfs_checkpoint_path = os.environ[ - "PADDLE_EDL_HDFS_CHECKPOINT_PATH"] + "PADDLE_EDL_HDFS_CHECKPOINT_PATH" + ] self._trainer_id = int(os.environ["PADDLE_TRAINER_ID"]) self._ce_test = int(os.getenv("PADDLE_EDL_ONLY_FOR_CE_TEST", "0")) self._fs_cache = os.getenv("PADDLE_EDL_FS_CACHE", ".cache") self._save_checkpoint_inter = int( - os.getenv("PADDLE_EDL_SAVE_CHECKPOINT_INTER", "900")) # s + os.getenv("PADDLE_EDL_SAVE_CHECKPOINT_INTER", "900") + ) # s if not self._ce_test: - assert len(self._hdfs_home) > 3 and \ - len(self._hdfs_name) > 6 and \ - len(self._hdfs_ugi) > 3 and \ - len(self._hdfs_checkpoint_path) > 0, "hdfs environ must set" + assert ( + len(self._hdfs_home) > 3 + and len(self._hdfs_name) > 6 + and len(self._hdfs_ugi) > 3 + and len(self._hdfs_checkpoint_path) > 0 + ), "hdfs environ must set" else: - assert len(self._hdfs_home) > 3 and \ - len(self._hdfs_checkpoint_path) > 0, "hdfs environ must set" + assert ( + len(self._hdfs_home) > 3 + and len(self._hdfs_checkpoint_path) > 0 + ), "hdfs environ must set" except Exception as e: logger.fatal("exception:{}".format(e)) sys.exit(1) def get_range_checkpoint_path(self, name): - return "{}/{}/range/{}".format(self.hdfs_checkpoint_path, self.job_id, - name) + return "{}/{}/range/{}".format( + self.hdfs_checkpoint_path, self.job_id, name + ) def get_exe_checkpoint_path(self, name): - return "{}/{}/exe/{}".format(self.hdfs_checkpoint_path, self.job_id, - name) + return "{}/{}/exe/{}".format( + self.hdfs_checkpoint_path, self.job_id, name + ) def get_job_path(self): return "{}/{}".format(self.hdfs_checkpoint_path, self.job_id) @@ -132,22 +141,30 @@ class AutoCheckpointChecker(object): if _non_static_mode(): return False - return self._run_env is not None and \ - self._platform is not None and \ - self._job_id is not None and \ - self._hdfs_home is not None and \ - self._hdfs_name is not None and \ - self._hdfs_ugi is not None and \ - self._hdfs_checkpoint_path is not None and \ - self._trainer_id is not None + return ( + self._run_env is not None + and self._platform is not None + and self._job_id is not None + and self._hdfs_home is not None + and self._hdfs_name is not None + and self._hdfs_ugi is not None + and self._hdfs_checkpoint_path is not None + and self._trainer_id is not None + ) def __str__(self): return "run_env:{} platform:{} job_id:{} \ hdfs_home:{} hdfs_name:{} hdfs_ugi:{} \ hdfs_checkpoint_path:{} trainer_id:{} ce_test".format( - self._run_env, self._platform, self._hdfs_home, self._hdfs_name, - self._hdfs_ugi, self._hdfs_checkpoint_path, self._trainer_id, - self._ce_test) + self._run_env, + self._platform, + self._hdfs_home, + self._hdfs_name, + self._hdfs_ugi, + self._hdfs_checkpoint_path, + self._trainer_id, + self._ce_test, + ) @property def trainer_id(self): @@ -191,7 +208,6 @@ class AutoCheckpointChecker(object): class ExeTrainStatus(SerializableBase): - def __init__(self): self._epoch_no = -1 # start epoch_no self._hash_key = None @@ -207,13 +223,15 @@ class ExeTrainStatus(SerializableBase): self._file_name = "exe_train_status" def __eq__(self, t): - return self._epoch_no == t._epoch_no and \ - self._hash_key == t._hash_key and \ - self._key == t._key and \ - self._checkpoint_path == t._checkpoint_path and \ - self._checkpoint_no == t._checkpoint_no and \ - self._exe_name == t._exe_name and \ - self._program_name == t._program_name + return ( + self._epoch_no == t._epoch_no + and self._hash_key == t._hash_key + and self._key == t._key + and self._checkpoint_path == t._checkpoint_path + and self._checkpoint_no == t._checkpoint_no + and self._exe_name == t._exe_name + and self._program_name == t._program_name + ) def __ne__(self, t): return not self == t @@ -256,7 +274,7 @@ class ExeTrainStatus(SerializableBase): "restored_from": self._restored_from, "exe_name": self._exe_name, "program_name": self._program_name, - "checkpoint_no": self._checkpoint_no + "checkpoint_no": self._checkpoint_no, } def __str__(self): @@ -264,12 +282,9 @@ class ExeTrainStatus(SerializableBase): class TrainEpochRange(SerializableBase): - - def __init__(self, - max_epoch_num, - name, - checkpoint_inter=None, - restored=True): + def __init__( + self, max_epoch_num, name, checkpoint_inter=None, restored=True + ): self._max_epoch_num = max_epoch_num self._epoch_no = -1 # current epoch_no self._name = name @@ -282,8 +297,9 @@ class TrainEpochRange(SerializableBase): self._save_checkpoint_inter = checkpoint_inter else: self._save_checkpoint_inter = self._checker.save_checkpoint_inter - assert self._save_checkpoint_inter >= 0, "checkpointer:{} must >=0".format( - self._save_checkpoint_inter) + assert ( + self._save_checkpoint_inter >= 0 + ), "checkpointer:{} must >=0".format(self._save_checkpoint_inter) self._last_checkpoint_time = time.time() self._load_cp_nos = None @@ -301,13 +317,14 @@ class TrainEpochRange(SerializableBase): config = { "fs.default.name": self._checker.hdfs_name, - "hadoop.job.ugi": self._checker.hdfs_ugi + "hadoop.job.ugi": self._checker.hdfs_ugi, } if self._checker.ce_test: config = None from paddle.distributed.fleet.utils.fs import HDFSClient + self._hdfs = HDFSClient(self._checker.hdfs_home, config) self._cper = CheckpointSaver(self._hdfs) @@ -321,10 +338,13 @@ class TrainEpochRange(SerializableBase): epoch_no = -1 for i in cp_nos[::-1]: t = TrainEpochRange(self._max_epoch_num, self.name, restored=False) - self._cper.load_checkpoint(self._checkpoint_path, [t], - self._checker.trainer_id, - checkpoint_no=i, - local_cache_path=self._checker._fs_cache) + self._cper.load_checkpoint( + self._checkpoint_path, + [t], + self._checker.trainer_id, + checkpoint_no=i, + local_cache_path=self._checker._fs_cache, + ) cps.append(t) logger.debug("look for valid:{} t:{}".format(i, t._serialize())) if epoch_no < 0: @@ -344,14 +364,18 @@ class TrainEpochRange(SerializableBase): if g_acp_type == CONST_ACP_TYPE: # get the last one - self._cper.load_checkpoint(self._checkpoint_path, [self], - self._checker.trainer_id, - local_cache_path=self._checker._fs_cache) + self._cper.load_checkpoint( + self._checkpoint_path, + [self], + self._checker.trainer_id, + local_cache_path=self._checker._fs_cache, + ) self._restored_from = CONST_CHECKPOINT self._checkpoint_epoch_no = self._epoch_no - logger.info("load tain_epoch_range checkpoint:{}".format( - self._serialize())) + logger.info( + "load tain_epoch_range checkpoint:{}".format(self._serialize()) + ) elif g_acp_type == CONST_DACP_TYPE: t, i = self._look_for_valid(self._load_cp_nos) @@ -359,15 +383,19 @@ class TrainEpochRange(SerializableBase): self._restored_from = CONST_MEMORYINIT return - self._cper.load_checkpoint(self._checkpoint_path, [self], - self._checker.trainer_id, - checkpoint_no=i, - local_cache_path=self._checker._fs_cache) + self._cper.load_checkpoint( + self._checkpoint_path, + [self], + self._checker.trainer_id, + checkpoint_no=i, + local_cache_path=self._checker._fs_cache, + ) self._restored_from = CONST_CHECKPOINT self._checkpoint_epoch_no = self._epoch_no - logger.info("load tain_epoch_range checkpoint:{}".format( - self._serialize())) + logger.info( + "load tain_epoch_range checkpoint:{}".format(self._serialize()) + ) else: assert False, "not supported acp_type:{}".format(g_acp_type) @@ -378,7 +406,7 @@ class TrainEpochRange(SerializableBase): "name": self._name, "checkpoint_path": self._checkpoint_path, "restored_from": self._restored_from, - "checkpoint_epoch_no": self._checkpoint_epoch_no + "checkpoint_epoch_no": self._checkpoint_epoch_no, } return d @@ -438,12 +466,16 @@ class TrainEpochRange(SerializableBase): self._max_epoch_num = sys.maxint assert self._epoch_no >= -1, "self._epoch_no:{} must >=-1".format( - self._epoch_no) + self._epoch_no + ) self._last_checkpoint_time = time.time() start = self._epoch_no + 1 - logger.info("started epoch_no:{} max_epoch_num:{}".format( - start, self._max_epoch_num)) + logger.info( + "started epoch_no:{} max_epoch_num:{}".format( + start, self._max_epoch_num + ) + ) for i in range(start, self._max_epoch_num): self._epoch_no = i @@ -458,11 +490,16 @@ class TrainEpochRange(SerializableBase): # not save last one because exe and program can't be restored. if self._checker.trainer_id == 0: - if time.time() - self._last_checkpoint_time >= \ - self._save_checkpoint_inter: + if ( + time.time() - self._last_checkpoint_time + >= self._save_checkpoint_inter + ): if g_acp_type == CONST_ACP_TYPE: # not save the last one - if self._max_epoch_num > 0 and self._epoch_no != self._max_epoch_num - 1: + if ( + self._max_epoch_num > 0 + and self._epoch_no != self._max_epoch_num - 1 + ): self._save_checkpoint() elif g_acp_type == CONST_DACP_TYPE: self._save_checkpoint() @@ -484,9 +521,11 @@ class TrainEpochRange(SerializableBase): p = self._checker.get_exe_checkpoint_path(t._hash_key) t._epoch_no = self.get() path, checkpoint_no = self._cper.save_checkpoint( - p, [m], + p, + [m], self._checker.trainer_id, - local_cache_path=self._checker._fs_cache) + local_cache_path=self._checker._fs_cache, + ) # index info t._checkpoint_path = path t._checkpoint_no = checkpoint_no @@ -496,10 +535,14 @@ class TrainEpochRange(SerializableBase): logger.debug("save executor checkpoint:{}".format(t._serialize())) if len(self._exe_status) > 0: - self._cper.save_checkpoint(self._checkpoint_path, [self], - local_cache_path=self._checker._fs_cache) - logger.info("save train_epoch_range checkpoint:{}".format( - self._serialize())) + self._cper.save_checkpoint( + self._checkpoint_path, + [self], + local_cache_path=self._checker._fs_cache, + ) + logger.info( + "save train_epoch_range checkpoint:{}".format(self._serialize()) + ) self._generate_flag() @@ -538,13 +581,13 @@ def _check_program_oprole(program): def _can_auto_checkpoint(prog): - if not isinstance(prog, compiler.CompiledProgram) and \ - not isinstance(prog, Program): + if not isinstance(prog, compiler.CompiledProgram) and not isinstance( + prog, Program + ): return False if isinstance(prog, compiler.CompiledProgram): - if prog._program is None or \ - prog._program._is_distributed: + if prog._program is None or prog._program._is_distributed: return False else: if prog._is_distributed: @@ -564,8 +607,11 @@ def _can_auto_checkpoint(prog): g_program_attr[program._auto_checkpoint_name] = ret if not ret: - logger.debug("program {} need't to auto checkpoint".format( - program._auto_checkpoint_name)) + logger.debug( + "program {} need't to auto checkpoint".format( + program._auto_checkpoint_name + ) + ) return False return g_checker.valid() and g_train_epoch_range is not None @@ -597,7 +643,8 @@ def train_epoch_range(max_epoch_num, save_checkpoint_inter=None): global g_acp_type if not _get_checker().valid(): logger.warning( - "auto checkpoint will take effect automaticly on PaddleCloud") + "auto checkpoint will take effect automaticly on PaddleCloud" + ) for i in _normal_yield(max_epoch_num): yield i @@ -617,7 +664,8 @@ def train_epoch_range(max_epoch_num, save_checkpoint_inter=None): g_train_epoch_range = TrainEpochRange( max_epoch_num, g_checker.generate_range_name(), - checkpoint_inter=save_checkpoint_inter) + checkpoint_inter=save_checkpoint_inter, + ) for i in g_train_epoch_range.next(): yield i @@ -643,12 +691,16 @@ def _auto_checkpoint(exe, prog): assert program._auto_checkpoint_name != None exe_status = g_train_epoch_range._exe_status - key = _get_running_key(exe._auto_checkpoint_name, - program._auto_checkpoint_name) + key = _get_running_key( + exe._auto_checkpoint_name, program._auto_checkpoint_name + ) if g_train_epoch_range.restored_from == CONST_CHECKPOINT: - assert key in exe_status, "when restored key:{} must be in train_epoch_range:{}".format( - key, g_train_epoch_range) + assert ( + key in exe_status + ), "when restored key:{} must be in train_epoch_range:{}".format( + key, g_train_epoch_range + ) t = None if key in exe_status: @@ -656,10 +708,13 @@ def _auto_checkpoint(exe, prog): if t._restored_from is None: a = CheckpointSaver(g_train_epoch_range._hdfs) m = PaddleModel(exe, program) - a.load_checkpoint(g_checker.get_exe_checkpoint_path(key), [m], - trainer_id=g_checker.trainer_id, - checkpoint_no=t._checkpoint_no, - local_cache_path=g_checker._fs_cache) + a.load_checkpoint( + g_checker.get_exe_checkpoint_path(key), + [m], + trainer_id=g_checker.trainer_id, + checkpoint_no=t._checkpoint_no, + local_cache_path=g_checker._fs_cache, + ) t._restored_from = CONST_CHECKPOINT logger.info("load executor checkpoint {}".format(t)) t._exe = exe diff --git a/python/paddle/fluid/incubate/checkpoint/checkpoint_saver.py b/python/paddle/fluid/incubate/checkpoint/checkpoint_saver.py index c8aeb50f157c0f66df920fef133cf5870cd57052..21e305afc16bca0dba630113f17ff17fcd6577ee 100644 --- a/python/paddle/fluid/incubate/checkpoint/checkpoint_saver.py +++ b/python/paddle/fluid/incubate/checkpoint/checkpoint_saver.py @@ -16,7 +16,6 @@ from ...compiler import CompiledProgram class SerializableBase(object): - def serialize(self, path): raise NotImplementedError @@ -25,7 +24,6 @@ class SerializableBase(object): class PaddleModel(SerializableBase): - def __init__(self, exe, program): self._exe = exe self._origin_program = program @@ -37,30 +35,33 @@ class PaddleModel(SerializableBase): def serialize(self, path): from ...io import save_persistables - save_persistables(executor=self._exe, - dirname=path, - main_program=self._program, - filename=self._file_name) + + save_persistables( + executor=self._exe, + dirname=path, + main_program=self._program, + filename=self._file_name, + ) def deserialize(self, path): from ...io import load_persistables - load_persistables(executor=self._exe, - dirname=path, - main_program=self._program, - filename=self._file_name) + load_persistables( + executor=self._exe, + dirname=path, + main_program=self._program, + filename=self._file_name, + ) -class CheckpointSaver(object): +class CheckpointSaver(object): def __init__(self, fs): self._fs = fs self._checkpoint_prefix = "__paddle_checkpoint__" - def save_checkpoint(self, - path, - slists, - trainer_id=None, - local_cache_path=".cache"): + def save_checkpoint( + self, path, slists, trainer_id=None, local_cache_path=".cache" + ): """ Serialize objects in slists to path Return really saved path and checkpoint_no @@ -69,7 +70,8 @@ class CheckpointSaver(object): self._fs.mkdirs(path) else: assert self._fs.is_dir(path), "path:{} must be a directory".format( - path) + path + ) max_no = self._get_last_checkpoint_no(path) if max_no < 0: @@ -81,13 +83,14 @@ class CheckpointSaver(object): saved_path = tmp_path from paddle.distributed.fleet.utils.fs import LocalFS + local_fs = LocalFS() cache_path = None if self._fs.need_upload_download(): - cache_path = "{}/{}.{}.saved_cache".format(local_cache_path, - self._checkpoint_prefix, - max_no) + cache_path = "{}/{}.{}.saved_cache".format( + local_cache_path, self._checkpoint_prefix, max_no + ) if trainer_id is not None: cache_path = "{}.{}".format(cache_path, trainer_id) @@ -95,8 +98,9 @@ class CheckpointSaver(object): if not local_fs.is_exist(cache_path): local_fs.mkdirs(cache_path) else: - assert local_fs.is_dir(cache_path), \ - "cache path:{} must be a directory".format(cache_path) + assert local_fs.is_dir( + cache_path + ), "cache path:{} must be a directory".format(cache_path) saved_path = cache_path @@ -111,13 +115,15 @@ class CheckpointSaver(object): return real_path, max_no - def load_checkpoint(self, - path, - slists, - trainer_id, - local_cache_path=".cache", - checkpoint_no=None, - ignore_empty=True): + def load_checkpoint( + self, + path, + slists, + trainer_id, + local_cache_path=".cache", + checkpoint_no=None, + ignore_empty=True, + ): """ Deserialize objects in slists from path Return really load path @@ -137,11 +143,12 @@ class CheckpointSaver(object): assert checkpoint_no >= 0 from paddle.distributed.fleet.utils.fs import LocalFS + local_fs = LocalFS() if self._fs.need_upload_download(): - cache_path = "{}/{}.{}.load_cache".format(local_cache_path, - self._checkpoint_prefix, - checkpoint_no) + cache_path = "{}/{}.{}.load_cache".format( + local_cache_path, self._checkpoint_prefix, checkpoint_no + ) if trainer_id is not None: cache_path = "{}.{}".format(cache_path, trainer_id) @@ -151,8 +158,9 @@ class CheckpointSaver(object): if local_fs.is_exist(cache_path): local_fs.delete(cache_path) - real_path = "{}/{}.{}".format(path, self._checkpoint_prefix, - checkpoint_no) + real_path = "{}/{}.{}".format( + path, self._checkpoint_prefix, checkpoint_no + ) load_path = real_path if self._fs.need_upload_download(): self._fs.download(real_path, cache_path) @@ -217,8 +225,9 @@ class CheckpointSaver(object): try: n = int(g[1]) if n not in s: - path = "{}/{}.{}".format(root_path, self._checkpoint_prefix, - n) + path = "{}/{}.{}".format( + root_path, self._checkpoint_prefix, n + ) self._fs.delete(path) except Exception as e: print(e) diff --git a/python/paddle/fluid/incubate/data_generator/__init__.py b/python/paddle/fluid/incubate/data_generator/__init__.py index 60bfa9eb1108ab5aecf7ecd27f0f89cf1b0e418e..10e4fba92dd5a78636c92d82f0f27b2840c975cc 100644 --- a/python/paddle/fluid/incubate/data_generator/__init__.py +++ b/python/paddle/fluid/incubate/data_generator/__init__.py @@ -31,8 +31,9 @@ class DataGenerator(object): def _set_line_limit(self, line_limit): if not isinstance(line_limit, int): - raise ValueError("line_limit%s must be in int type" % - type(line_limit)) + raise ValueError( + "line_limit%s must be in int type" % type(line_limit) + ) if line_limit < 1: raise ValueError("line_limit can not less than 1") self._line_limit = line_limit @@ -140,7 +141,8 @@ class DataGenerator(object): Return a string data that can be read directly by the datafeed. ''' raise NotImplementedError( - "pls use MultiSlotDataGenerator or PairWiseDataGenerator") + "pls use MultiSlotDataGenerator or PairWiseDataGenerator" + ) def generate_sample(self, line): ''' @@ -172,8 +174,9 @@ class DataGenerator(object): return local_iter ''' raise NotImplementedError( - "Please rewrite this function to return a list or tuple: " + - "[(name, [feasign, ...]), ...] or ((name, [feasign, ...]), ...)") + "Please rewrite this function to return a list or tuple: " + + "[(name, [feasign, ...]), ...] or ((name, [feasign, ...]), ...)" + ) def generate_batch(self, samples): ''' @@ -214,7 +217,6 @@ class DataGenerator(object): # add more generalized DataGenerator that can adapt user-defined slot # for example, [(name, float_list), (name, str_list), (name, int_list)] class MultiSlotStringDataGenerator(DataGenerator): - def _gen_str(self, line): ''' Further processing the output of the process() function rewritten by @@ -238,7 +240,8 @@ class MultiSlotStringDataGenerator(DataGenerator): if not isinstance(line, list) and not isinstance(line, tuple): raise ValueError( "the output of process() must be in list or tuple type" - "Examples: [('words', ['1926', '08', '17']), ('label', ['1'])]") + "Examples: [('words', ['1926', '08', '17']), ('label', ['1'])]" + ) output = "" for index, item in enumerate(line): name, elements = item @@ -252,7 +255,6 @@ class MultiSlotStringDataGenerator(DataGenerator): class MultiSlotDataGenerator(DataGenerator): - def _gen_str(self, line): ''' Further processing the output of the process() function rewritten by @@ -281,7 +283,8 @@ class MultiSlotDataGenerator(DataGenerator): if not isinstance(line, list) and not isinstance(line, tuple): raise ValueError( "the output of process() must be in list or tuple type" - "Example: [('words', [1926, 08, 17]), ('label', [1])]") + "Example: [('words', [1926, 08, 17]), ('label', [1])]" + ) output = "" if self._proto_info is None: @@ -291,8 +294,9 @@ class MultiSlotDataGenerator(DataGenerator): if not isinstance(name, str): raise ValueError("name%s must be in str type" % type(name)) if not isinstance(elements, list): - raise ValueError("elements%s must be in list type" % - type(elements)) + raise ValueError( + "elements%s must be in list type" % type(elements) + ) if not elements: raise ValueError( "the elements of each field can not be empty, you need padding it in process()." @@ -305,10 +309,12 @@ class MultiSlotDataGenerator(DataGenerator): if isinstance(elem, float): self._proto_info[-1] = (name, "float") elif not isinstance(elem, int) and not isinstance( - elem, long): + elem, long + ): raise ValueError( - "the type of element%s must be in int or float" % - type(elem)) + "the type of element%s must be in int or float" + % type(elem) + ) output += " " + str(elem) else: if len(line) != len(self._proto_info): @@ -320,8 +326,9 @@ class MultiSlotDataGenerator(DataGenerator): if not isinstance(name, str): raise ValueError("name%s must be in str type" % type(name)) if not isinstance(elements, list): - raise ValueError("elements%s must be in list type" % - type(elements)) + raise ValueError( + "elements%s must be in list type" % type(elements) + ) if not elements: raise ValueError( "the elements of each field can not be empty, you need padding it in process()." @@ -329,7 +336,8 @@ class MultiSlotDataGenerator(DataGenerator): if name != self._proto_info[index][0]: raise ValueError( "the field name of two given line are not match: require<%s>, get<%s>." - % (self._proto_info[index][0], name)) + % (self._proto_info[index][0], name) + ) if output: output += " " output += str(len(elements)) @@ -338,9 +346,11 @@ class MultiSlotDataGenerator(DataGenerator): if isinstance(elem, float): self._proto_info[index] = (name, "float") elif not isinstance(elem, int) and not isinstance( - elem, long): + elem, long + ): raise ValueError( "the type of element%s must be in int or float" - % type(elem)) + % type(elem) + ) output += " " + str(elem) return output + "\n" diff --git a/python/paddle/fluid/incubate/fleet/base/fleet_base.py b/python/paddle/fluid/incubate/fleet/base/fleet_base.py index f49dbafb5547a49378ab8709fe4082dfa5cd3cbd..ea03e9305605a19d8ad792180ef297f66784bba0 100644 --- a/python/paddle/fluid/incubate/fleet/base/fleet_base.py +++ b/python/paddle/fluid/incubate/fleet/base/fleet_base.py @@ -21,7 +21,9 @@ from paddle.optimizer import SGD as SGD_v2 from paddle.fluid.incubate.fleet.base.mode import Mode from paddle.distributed.fleet.base.role_maker import RoleMakerBase -from paddle.fluid.contrib.mixed_precision.decorator import OptimizerWithMixedPrecision +from paddle.fluid.contrib.mixed_precision.decorator import ( + OptimizerWithMixedPrecision, +) from . import mode __all__ = ['Fleet', 'DistributedOptimizer'] @@ -38,6 +40,7 @@ class Fleet(object): Returns: None """ + __metaclass__ = abc.ABCMeta def __init__(self, mode): @@ -179,7 +182,7 @@ class Fleet(object): trainer_files = [[]] * trainers begin = 0 for i in range(trainers): - trainer_files[i] = files[begin:begin + blocks[i]] + trainer_files[i] = files[begin : begin + blocks[i]] begin += blocks[i] return trainer_files[trainer_id] @@ -199,10 +202,14 @@ class Fleet(object): self._executor = Executor(fluid.CPUPlace()) if role_maker and not isinstance(role_maker, RoleMakerBase): - from paddle.fluid.incubate.fleet.base.role_maker import RoleMakerBase as RoleMakerBaseIncubate + from paddle.fluid.incubate.fleet.base.role_maker import ( + RoleMakerBase as RoleMakerBaseIncubate, + ) + if role_maker and not isinstance(role_maker, RoleMakerBaseIncubate): raise TypeError( - "role_maker must be an instance of RoleMakerBase") + "role_maker must be an instance of RoleMakerBase" + ) self._role_maker = role_maker self._role_maker.generate_role() @@ -245,13 +252,15 @@ class Fleet(object): pass @abc.abstractmethod - def save_inference_model(self, - executor, - dirname, - feeded_var_names, - target_vars, - main_program=None, - export_for_deployment=True): + def save_inference_model( + self, + executor, + dirname, + feeded_var_names, + target_vars, + main_program=None, + export_for_deployment=True, + ): pass @abc.abstractmethod @@ -277,24 +286,29 @@ class DistributedOptimizer(object): None """ + __metaclass__ = abc.ABCMeta def __init__(self, optimizer, strategy=None): - if not isinstance(optimizer, SGD.__bases__) \ - and not isinstance(optimizer, OptimizerWithMixedPrecision) \ - and not isinstance(optimizer, SGD_v2.__base__): + if ( + not isinstance(optimizer, SGD.__bases__) + and not isinstance(optimizer, OptimizerWithMixedPrecision) + and not isinstance(optimizer, SGD_v2.__base__) + ): raise TypeError("optimizer must be an instance of Optimizer") self._optimizer = optimizer self._strategy = strategy @abc.abstractmethod - def backward(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None, - callbacks=None): + def backward( + self, + loss, + startup_program=None, + parameter_list=None, + no_grad_set=None, + callbacks=None, + ): """ First part of `minimize`, do auto-diff to append backward ops for the current program. @@ -341,12 +355,14 @@ class DistributedOptimizer(object): pass @abc.abstractmethod - def minimize(self, - losses, - scopes=None, - startup_programs=None, - parameter_list=None, - no_grad_set=None): + def minimize( + self, + losses, + scopes=None, + startup_programs=None, + parameter_list=None, + no_grad_set=None, + ): """ Add operations to minimize `loss` by updating `parameter_list`. diff --git a/python/paddle/fluid/incubate/fleet/base/mode.py b/python/paddle/fluid/incubate/fleet/base/mode.py index a940630d4c6caa4464d1d27a679d79808b4be6c2..80bd7e6b4b51dcb193f8a4517ebbab7a409895af 100644 --- a/python/paddle/fluid/incubate/fleet/base/mode.py +++ b/python/paddle/fluid/incubate/fleet/base/mode.py @@ -19,6 +19,7 @@ class Mode: """ There are various mode for fleet, each of them is designed for different model. """ + TRANSPILER = 1 PSLIB = 2 COLLECTIVE = 3 diff --git a/python/paddle/fluid/incubate/fleet/base/role_maker.py b/python/paddle/fluid/incubate/fleet/base/role_maker.py index 31a20a409b6d831c206b62bbcbcdda4479acbe31..3349dcf275b3fc8ff875013c1808694f77e1f43a 100644 --- a/python/paddle/fluid/incubate/fleet/base/role_maker.py +++ b/python/paddle/fluid/incubate/fleet/base/role_maker.py @@ -19,8 +19,13 @@ import os import time __all__ = [ - 'Role', 'RoleMakerBase', 'MPISymetricRoleMaker', 'UserDefinedRoleMaker', - 'UserDefinedCollectiveRoleMaker', 'PaddleCloudRoleMaker', 'GeneralRoleMaker' + 'Role', + 'RoleMakerBase', + 'MPISymetricRoleMaker', + 'UserDefinedRoleMaker', + 'UserDefinedCollectiveRoleMaker', + 'PaddleCloudRoleMaker', + 'GeneralRoleMaker', ] @@ -145,8 +150,11 @@ class RoleMakerBase(object): def to_string(self): return "role: {}, current_id: {}, worker_endpoints: {}, server_endpoints: {}".format( - self._role, self._current_id, self._worker_endpoints, - self._server_endpoints) + self._role, + self._current_id, + self._worker_endpoints, + self._server_endpoints, + ) def all_gather(self, input): """ @@ -196,6 +204,7 @@ class MPIRoleMaker(RoleMakerBase): """Init.""" super(MPIRoleMaker, self).__init__() from mpi4py import MPI + self.MPI = MPI self._comm = MPI.COMM_WORLD self._node_type_comm = None @@ -251,6 +260,7 @@ class MPIRoleMaker(RoleMakerBase): def get_local_ip(self): """Return get local ip.""" import socket + self._ip = socket.gethostbyname(socket.gethostname()) return self._ip @@ -347,6 +357,7 @@ class MPISymetricRoleMaker(MPIRoleMaker): """ if self._pserver_rand_port <= 0: import random + random.seed(self._server_num()) # port will be randomly generated from 60001 to 63999 # random seed is server num so that all nodes will get @@ -495,7 +506,8 @@ class PaddleCloudRoleMaker(RoleMakerBase): # Environment variable PADDLE_PSERVERS_IP_PORT_LIST must be set # format: string(ip:port), eg. 127.0.0.1:6001 eplist = os.environ["PADDLE_PSERVERS_IP_PORT_LIST"].split( - ",") + "," + ) # note that, we usually assign the same port to different ips # if we run parameter server training in local mode # port should be different in environment variables @@ -505,7 +517,8 @@ class PaddleCloudRoleMaker(RoleMakerBase): if training_role not in ["TRAINER", "PSERVER"]: raise ValueError( - "TRAINING_ROLE must be PSERVER or TRAINER") + "TRAINING_ROLE must be PSERVER or TRAINER" + ) if training_role == "TRAINER": role = Role.WORKER @@ -518,7 +531,8 @@ class PaddleCloudRoleMaker(RoleMakerBase): current_id = eplist.index(curr_endpoint) else: raise ValueError( - "TRAINING_ROLE must be PSERVER or TRAINER") + "TRAINING_ROLE must be PSERVER or TRAINER" + ) except ValueError as ve: raise ValueError( "something wrong with PaddleCloud, please check environment" @@ -530,12 +544,15 @@ class PaddleCloudRoleMaker(RoleMakerBase): self._current_id = current_id else: self._current_id = int(os.getenv("PADDLE_TRAINER_ID", "0")) - self._training_role = os.getenv("PADDLE_TRAINING_ROLE", - "TRAINER") - assert (self._training_role == "TRAINER") + self._training_role = os.getenv( + "PADDLE_TRAINING_ROLE", "TRAINER" + ) + assert self._training_role == "TRAINER" self._worker_endpoints = os.getenv("PADDLE_TRAINER_ENDPOINTS") self._current_endpoint = os.getenv("PADDLE_CURRENT_ENDPOINT") - assert self._worker_endpoints is not None, "can't find PADDLE_TRAINER_ENDPOINTS" + assert ( + self._worker_endpoints is not None + ), "can't find PADDLE_TRAINER_ENDPOINTS" self._worker_endpoints = self._worker_endpoints.split(",") self._trainers_num = len(self._worker_endpoints) @@ -630,7 +647,8 @@ class GeneralRoleMaker(RoleMakerBase): self._is_barrier_all = 1 if "PADDLE_IS_BARRIER_ALL_ROLE" in os.environ: self._is_barrier_all = int( - os.environ["PADDLE_IS_BARRIER_ALL_ROLE"]) + os.environ["PADDLE_IS_BARRIER_ALL_ROLE"] + ) if training_role == "TRAINER": role = Role.WORKER current_id = int(os.environ["PADDLE_TRAINER_ID"]) @@ -638,12 +656,13 @@ class GeneralRoleMaker(RoleMakerBase): size_d = { "trainer": len(worker_endpoints), "pserver": len(eplist), - "all": len(worker_endpoints) + len(eplist) + "all": len(worker_endpoints) + len(eplist), } # child process for http server - self._http_server = Process(target=self.__start_kv_server, - args=(self._http_server_d, - size_d)) + self._http_server = Process( + target=self.__start_kv_server, + args=(self._http_server_d, size_d), + ) self._http_server.daemon = True # set running status to True self._http_server_d["running"] = True @@ -657,15 +676,21 @@ class GeneralRoleMaker(RoleMakerBase): gloo.set_size(len(worker_endpoints)) gloo.set_prefix(self._prefix) gloo.set_iface(self._iface) - gloo.set_timeout_seconds(self._init_timeout_seconds, - self._run_timeout_seconds) + gloo.set_timeout_seconds( + self._init_timeout_seconds, self._run_timeout_seconds + ) if len(self._http_ip_port) != 0: - gloo.set_http_store(self._http_ip_port[0], - int(self._http_ip_port[1]), - "trainer") + gloo.set_http_store( + self._http_ip_port[0], + int(self._http_ip_port[1]), + "trainer", + ) else: - gloo.set_hdfs_store(self._hdfs_path + "/trainer", - self._hdfs_name, self._hdfs_ugi) + gloo.set_hdfs_store( + self._hdfs_path + "/trainer", + self._hdfs_name, + self._hdfs_ugi, + ) gloo.init() self._node_type_comm = gloo if self._use_ps_gpu or self._use_metric: @@ -676,7 +701,9 @@ class GeneralRoleMaker(RoleMakerBase): Gloo_strategy.ip_port = int(self._http_ip_port[1]) Default_init_timeout_seconds = 3600 Default_run_timeout_seconds = 9999999 - Gloo_strategy.init_seconds = Default_init_timeout_seconds + Gloo_strategy.init_seconds = ( + Default_init_timeout_seconds + ) Gloo_strategy.run_seconds = Default_run_timeout_seconds Gloo = fluid.core.GlooParallelContext(Gloo_strategy) Gloo.init() @@ -700,14 +727,21 @@ class GeneralRoleMaker(RoleMakerBase): gloo.set_size(len(eplist)) gloo.set_prefix(self._prefix) gloo.set_iface(self._iface) - gloo.set_timeout_seconds(self._init_timeout_seconds, - self._run_timeout_seconds) + gloo.set_timeout_seconds( + self._init_timeout_seconds, self._run_timeout_seconds + ) if len(self._http_ip_port) != 0: - gloo.set_http_store(self._http_ip_port[0], - int(self._http_ip_port[1]), "pserver") + gloo.set_http_store( + self._http_ip_port[0], + int(self._http_ip_port[1]), + "pserver", + ) else: - gloo.set_hdfs_store(self._hdfs_path + "/pserver", - self._hdfs_name, self._hdfs_ugi) + gloo.set_hdfs_store( + self._hdfs_path + "/pserver", + self._hdfs_name, + self._hdfs_ugi, + ) gloo.init() self._node_type_comm = gloo @@ -717,14 +751,17 @@ class GeneralRoleMaker(RoleMakerBase): gloo.set_size(len(all_list)) gloo.set_prefix(self._prefix) gloo.set_iface(self._iface) - gloo.set_timeout_seconds(self._init_timeout_seconds, - self._run_timeout_seconds) + gloo.set_timeout_seconds( + self._init_timeout_seconds, self._run_timeout_seconds + ) if len(self._http_ip_port) != 0: - gloo.set_http_store(self._http_ip_port[0], - int(self._http_ip_port[1]), "all") + gloo.set_http_store( + self._http_ip_port[0], int(self._http_ip_port[1]), "all" + ) else: - gloo.set_hdfs_store(self._hdfs_path + "/all", self._hdfs_name, - self._hdfs_ugi) + gloo.set_hdfs_store( + self._hdfs_path + "/all", self._hdfs_name, self._hdfs_ugi + ) gloo.init() self._all_comm = gloo self._trainers_num = trainers_num @@ -977,8 +1014,12 @@ class GeneralRoleMaker(RoleMakerBase): gateway = None if len(item) > gateway_idx: gateway = item[gateway_idx] - if gateway and gateway != '*' and gateway != "0.0.0.0" and len( - item) > iface_idx: + if ( + gateway + and gateway != '*' + and gateway != "0.0.0.0" + and len(item) > iface_idx + ): return item[iface_idx] return "lo" @@ -986,8 +1027,9 @@ class GeneralRoleMaker(RoleMakerBase): """ get default physical interface """ - res = os.popen("ip -f inet addr | awk NR%3==1").read().strip().split( - "\n") + res = ( + os.popen("ip -f inet addr | awk NR%3==1").read().strip().split("\n") + ) for item in res: if "BROADCAST" in item: return item.split(":")[1].strip() @@ -995,6 +1037,7 @@ class GeneralRoleMaker(RoleMakerBase): def __start_kv_server(self, http_server_d, size_d): from paddle.fluid.incubate.fleet.utils.http_server import KVServer + http_server = KVServer(int(self._http_ip_port[1]), size_d) http_server.start() wait_seconds = 5 @@ -1028,7 +1071,8 @@ class HeterRoleMaker(GeneralRoleMaker): xpu_num = len(xpu_endpoints) if training_role not in ["TRAINER", "PSERVER", "XPU"]: raise ValueError( - "TRAINING_ROLE must be PSERVER or TRAINER or XPU") + "TRAINING_ROLE must be PSERVER or TRAINER or XPU" + ) if training_role == "TRAINER": role = Role.WORKER current_id = int(os.environ["PADDLE_TRAINER_ID"]) @@ -1040,11 +1084,14 @@ class HeterRoleMaker(GeneralRoleMaker): gloo.set_size(len(worker_endpoints)) gloo.set_prefix(self._prefix) gloo.set_iface(self._iface) - gloo.set_timeout_seconds(self._init_timeout_seconds, - self._run_timeout_seconds) + gloo.set_timeout_seconds( + self._init_timeout_seconds, self._run_timeout_seconds + ) gloo.set_hdfs_store( - self._hdfs_path.rstrip("/") + "/trainer", self._hdfs_name, - self._hdfs_ugi) + self._hdfs_path.rstrip("/") + "/trainer", + self._hdfs_name, + self._hdfs_ugi, + ) gloo.init() self._node_type_comm = gloo elif training_role == "XPU": @@ -1058,11 +1105,14 @@ class HeterRoleMaker(GeneralRoleMaker): gloo.set_size(len(xpu_endpoints)) gloo.set_prefix(self._prefix) gloo.set_iface(self._iface) - gloo.set_timeout_seconds(self._init_timeout_seconds, - self._run_timeout_seconds) + gloo.set_timeout_seconds( + self._init_timeout_seconds, self._run_timeout_seconds + ) gloo.set_hdfs_store( - self._hdfs_path.rstrip("/") + "/xpu", self._hdfs_name, - self._hdfs_ugi) + self._hdfs_path.rstrip("/") + "/xpu", + self._hdfs_name, + self._hdfs_ugi, + ) gloo.init() self._node_type_comm = gloo elif training_role == "PSERVER": @@ -1083,11 +1133,14 @@ class HeterRoleMaker(GeneralRoleMaker): gloo.set_size(len(eplist)) gloo.set_prefix(self._prefix) gloo.set_iface(self._iface) - gloo.set_timeout_seconds(self._init_timeout_seconds, - self._run_timeout_seconds) + gloo.set_timeout_seconds( + self._init_timeout_seconds, self._run_timeout_seconds + ) gloo.set_hdfs_store( - self._hdfs_path.rstrip("/") + "/pserver", self._hdfs_name, - self._hdfs_ugi) + self._hdfs_path.rstrip("/") + "/pserver", + self._hdfs_name, + self._hdfs_ugi, + ) gloo.init() self._node_type_comm = gloo @@ -1099,11 +1152,14 @@ class HeterRoleMaker(GeneralRoleMaker): gloo.set_size(len(heter_list)) gloo.set_prefix(self._prefix) gloo.set_iface(self._iface) - gloo.set_timeout_seconds(self._init_timeout_seconds, - self._run_timeout_seconds) + gloo.set_timeout_seconds( + self._init_timeout_seconds, self._run_timeout_seconds + ) gloo.set_hdfs_store( - self._hdfs_path.rstrip("/") + "/heter", self._hdfs_name, - self._hdfs_ugi) + self._hdfs_path.rstrip("/") + "/heter", + self._hdfs_name, + self._hdfs_ugi, + ) gloo.init() self._heter_comm = gloo @@ -1114,11 +1170,14 @@ class HeterRoleMaker(GeneralRoleMaker): gloo.set_size(len(all_list)) gloo.set_prefix(self._prefix) gloo.set_iface(self._iface) - gloo.set_timeout_seconds(self._init_timeout_seconds, - self._run_timeout_seconds) + gloo.set_timeout_seconds( + self._init_timeout_seconds, self._run_timeout_seconds + ) gloo.set_hdfs_store( - self._hdfs_path.rstrip("/") + "/all", self._hdfs_name, - self._hdfs_ugi) + self._hdfs_path.rstrip("/") + "/all", + self._hdfs_name, + self._hdfs_ugi, + ) gloo.init() self._all_comm = gloo @@ -1167,8 +1226,7 @@ class HeterRoleMaker(GeneralRoleMaker): self._heter_comm.barrier() def xpu_num(self): - """ - """ + """ """ if not self._role_is_generated: self.generate_role() return len(self._xpu_endpoints) @@ -1181,18 +1239,21 @@ class UserDefinedRoleMaker(RoleMakerBase): on each physical node, It can be assign by user. """ - def __init__(self, - current_id=0, - role=Role.WORKER, - worker_num=0, - server_endpoints=None): + def __init__( + self, + current_id=0, + role=Role.WORKER, + worker_num=0, + server_endpoints=None, + ): super(UserDefinedRoleMaker, self).__init__() if not isinstance(server_endpoints, list): raise TypeError("server_endpoints must be as string list") elif len(server_endpoints) <= 0: raise ValueError( - "the length of server_endpoints list must be greater than 0") + "the length of server_endpoints list must be greater than 0" + ) elif len(server_endpoints) != len(set(server_endpoints)): raise ValueError("server_endpoints can't have duplicate elements") else: @@ -1213,9 +1274,11 @@ class UserDefinedRoleMaker(RoleMakerBase): else: if current_id < 0: raise ValueError( - "current_id must be greater than or equal to 0") + "current_id must be greater than or equal to 0" + ) elif self._role == Role.SERVER and current_id >= len( - server_endpoints): + server_endpoints + ): raise ValueError( "if role is Role.SERVER, current_id must be less than or equal to len(server_endpoints) - 1" ) @@ -1263,7 +1326,8 @@ class UserDefinedCollectiveRoleMaker(RoleMakerBase): raise TypeError("worker_endpoints must be as string list") elif len(worker_endpoints) <= 0: raise ValueError( - "the length of worker_endpoints list must be greater than 0") + "the length of worker_endpoints list must be greater than 0" + ) elif len(worker_endpoints) != len(set(worker_endpoints)): raise ValueError("worker_endpoints can't have duplicate elements") else: @@ -1279,7 +1343,8 @@ class UserDefinedCollectiveRoleMaker(RoleMakerBase): else: if current_id < 0: raise ValueError( - "current_id must be greater than or equal to 0") + "current_id must be greater than or equal to 0" + ) elif current_id >= len(worker_endpoints): raise ValueError( "current_id must be less than or equal to len(worker_endpoints) - 1" diff --git a/python/paddle/fluid/incubate/fleet/collective/__init__.py b/python/paddle/fluid/incubate/fleet/collective/__init__.py index 8887a75f1d348fea5e24475eace801fbdcdc8de7..229d6e44bd6214baca7b3d51a2d164dfaf60981a 100644 --- a/python/paddle/fluid/incubate/fleet/collective/__init__.py +++ b/python/paddle/fluid/incubate/fleet/collective/__init__.py @@ -26,7 +26,10 @@ from paddle.fluid.incubate.fleet.base.fleet_base import Mode from paddle.fluid.incubate.fleet.base.fleet_base import DistributedOptimizer from paddle.fluid import compiler -from paddle.fluid.incubate.checkpoint.checkpoint_saver import PaddleModel, CheckpointSaver +from paddle.fluid.incubate.checkpoint.checkpoint_saver import ( + PaddleModel, + CheckpointSaver, +) import paddle @@ -38,19 +41,16 @@ import shutil class LambConfig(object): - def __init__(self): pass class DistFCConfig(object): - def __init__(self): pass class Collective(Fleet): - def __init__(self): super(Collective, self).__init__(Mode.COLLECTIVE) self._local_ip = 0 @@ -64,60 +64,73 @@ class Collective(Fleet): def init_worker(self): logging.warn( - "You should not call 'init_worker' method for collective mode.") + "You should not call 'init_worker' method for collective mode." + ) def run_worker(self, main_programs=None, scopes=None): logging.warn( - "You should not call 'run_worker' method for collective mode.") + "You should not call 'run_worker' method for collective mode." + ) def init_server(self, model_dir=None): logging.warn( - "You should not call 'init_server' method for collective mode.") + "You should not call 'init_server' method for collective mode." + ) def run_server(self): logging.warn( - "You should not call 'run_server' method for collective mode.") + "You should not call 'run_server' method for collective mode." + ) def stop_worker(self): logging.warn( - "You should not call 'stop_worker' method for collective mode.") + "You should not call 'stop_worker' method for collective mode." + ) def distributed_optimizer(self, optimizer, strategy=None): - self._optimizer = \ - CollectiveOptimizer(optimizer, strategy) + self._optimizer = CollectiveOptimizer(optimizer, strategy) return self._optimizer - def save_inference_model(self, - executor, - dirname, - feeded_var_names=None, - target_vars=None, - main_program=None, - export_for_deployment=True): + def save_inference_model( + self, + executor, + dirname, + feeded_var_names=None, + target_vars=None, + main_program=None, + export_for_deployment=True, + ): """ Prune the given `main_program` to build a new program especially for inference, and then save it and all related parameters to given `dirname` by the `executor`. """ - assert isinstance(executor, Executor), \ - "In fleet.save_inference_model() function, executor must be as" \ + assert isinstance(executor, Executor), ( + "In fleet.save_inference_model() function, executor must be as" " Executor type." + ) if main_program is None: main_program = self._origin_program - assert isinstance(main_program, Program), \ - "In fleet.save_inference_model() function, main_program " \ + assert isinstance(main_program, Program), ( + "In fleet.save_inference_model() function, main_program " "must be as Program type." - - io.save_inference_model(dirname, feeded_var_names, target_vars, - executor, main_program, None, None, - export_for_deployment) - - def save_persistables(self, - executor, - dirname, - main_program=None, - filename=None): + ) + + io.save_inference_model( + dirname, + feeded_var_names, + target_vars, + executor, + main_program, + None, + None, + export_for_deployment, + ) + + def save_persistables( + self, executor, dirname, main_program=None, filename=None + ): """ This function filters out all variables with `persistable==True` from the give `main_program` and then saves these variables to the folder @@ -128,28 +141,32 @@ class Collective(Fleet): files, set `filename` None; if you would like to save all variables in a single file, use `filename` to specify the file name. """ - assert isinstance(executor, Executor), \ - "In fleet.save_inference_model() function, executor must be as" \ + assert isinstance(executor, Executor), ( + "In fleet.save_inference_model() function, executor must be as" " Executor type." + ) if main_program is None: main_program = self._origin_program - assert isinstance(main_program, Program), \ - "In fleet.save_inference_model() function, main_program " \ + assert isinstance(main_program, Program), ( + "In fleet.save_inference_model() function, main_program " "must be as Program type." + ) io.save_persistables(executor, dirname, main_program, filename=filename) - def save_checkpoint(self, - executor, - path, - trainer_id, - train_status, - fs, - main_program=None, - local_cache_path=".cache", - remain_all_checkpoint=True): + def save_checkpoint( + self, + executor, + path, + trainer_id, + train_status, + fs, + main_program=None, + local_cache_path=".cache", + remain_all_checkpoint=True, + ): """ This function save persistables and current epoch num to path. """ @@ -163,22 +180,25 @@ class Collective(Fleet): path=path, slists=[m, t], trainer_id=trainer_id, - local_cache_path=local_cache_path) + local_cache_path=local_cache_path, + ) if not remain_all_checkpoint: c.clean_redundant_checkpoints(path) return real_path, checkpoint_no - def load_checkpoint(self, - executor, - path, - trainer_id, - train_status, - fs, - main_program=None, - local_cache_path=".cache", - ignore_empty=True): + def load_checkpoint( + self, + executor, + path, + trainer_id, + train_status, + fs, + main_program=None, + local_cache_path=".cache", + ignore_empty=True, + ): """ This function load persistables and current epoch num from path. """ @@ -188,10 +208,13 @@ class Collective(Fleet): m = PaddleModel(executor, main_program) c = CheckpointSaver(fs) - return c.load_checkpoint(path, [m, train_status], - trainer_id=trainer_id, - ignore_empty=ignore_empty, - local_cache_path=local_cache_path) + return c.load_checkpoint( + path, + [m, train_status], + trainer_id=trainer_id, + ignore_empty=ignore_empty, + local_cache_path=local_cache_path, + ) fleet = Collective() @@ -230,18 +253,21 @@ class CollectiveOpBasedOptimizer(DistributedOptimizer): def __init__(self, optimizer, strategy=None): assert isinstance( - strategy, - DistributedStrategy), "strategy must be DistributedStrategy" + strategy, DistributedStrategy + ), "strategy must be DistributedStrategy" super(CollectiveOpBasedOptimizer, self).__init__(optimizer, strategy) - def backward(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None, - callbacks=None): - return self._optimizer.backward(loss, startup_program, parameter_list, - no_grad_set, callbacks) + def backward( + self, + loss, + startup_program=None, + parameter_list=None, + no_grad_set=None, + callbacks=None, + ): + return self._optimizer.backward( + loss, startup_program, parameter_list, no_grad_set, callbacks + ) def apply_gradients(self, params_grads): return self._optimizer.apply_gradients(params_grads) @@ -263,22 +289,26 @@ class CollectiveOptimizer(DistributedOptimizer): strategy = DistributedStrategy() super(CollectiveOptimizer, self).__init__(optimizer, strategy) self._forward_recompute = strategy.forward_recompute - if (not isinstance(strategy.recompute_checkpoints, list)): - raise ValueError("DistStrategy.recompute_checkpoints should" - "be a List") + if not isinstance(strategy.recompute_checkpoints, list): + raise ValueError( + "DistStrategy.recompute_checkpoints should" "be a List" + ) self._recompute_checkpoints = strategy.recompute_checkpoints self._use_amp = strategy.use_amp self._amp_loss_scaling = strategy.amp_loss_scaling self.print_config = False - def backward(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None, - callbacks=None): - return self._optimizer.backward(loss, startup_program, parameter_list, - no_grad_set, callbacks) + def backward( + self, + loss, + startup_program=None, + parameter_list=None, + no_grad_set=None, + callbacks=None, + ): + return self._optimizer.backward( + loss, startup_program, parameter_list, no_grad_set, callbacks + ) def apply_gradients(self, params_grads): return self._optimizer.apply_gradients(params_grads) @@ -295,29 +325,40 @@ class CollectiveOptimizer(DistributedOptimizer): if strategy.use_local_sgd: strategy.mode = "collective" strategy.collective_mode = "local_sgd" - self._check_condition("use_local_sgd", - use_dgc=main_program._enable_dgc, - use_dist_fc=strategy.use_dist_fc, - use_lamb=main_program._use_lamb) + self._check_condition( + "use_local_sgd", + use_dgc=main_program._enable_dgc, + use_dist_fc=strategy.use_dist_fc, + use_lamb=main_program._use_lamb, + ) if strategy.use_dist_fc: - self._check_condition("use_dist_fc", - use_dgc=main_program._enable_dgc, - use_local_sgd=strategy.use_local_sgd, - use_lamb=main_program._use_lamb) - assert strategy.dist_fc_config is not None, "DistributedStrategy.dist_fc_config should be set" + self._check_condition( + "use_dist_fc", + use_dgc=main_program._enable_dgc, + use_local_sgd=strategy.use_local_sgd, + use_lamb=main_program._use_lamb, + ) + assert ( + strategy.dist_fc_config is not None + ), "DistributedStrategy.dist_fc_config should be set" if strategy._ut4grad_allreduce: strategy.mode = "collective" strategy.collective_mode = "grad_allreduce" - self._check_condition("_ut4grad_allreduce", - use_dgc=main_program._enable_dgc, - use_lamb=main_program._use_lamb) + self._check_condition( + "_ut4grad_allreduce", + use_dgc=main_program._enable_dgc, + use_lamb=main_program._use_lamb, + ) - if self._strategy.collective_mode=="local_sgd" \ - or self._strategy.collective_mode == "grad_allreduce": - assert self._strategy.mode == "collective", \ - "local_sgd and grad_allreduce can be used under collective mode" + if ( + self._strategy.collective_mode == "local_sgd" + or self._strategy.collective_mode == "grad_allreduce" + ): + assert ( + self._strategy.mode == "collective" + ), "local_sgd and grad_allreduce can be used under collective mode" def _transpile(self, startup_program, main_program): """ @@ -330,9 +371,12 @@ class CollectiveOptimizer(DistributedOptimizer): trainers_num = fleet.worker_num() if self.print_config: - print("worker_endpoints:{} trainers_num:{} current_endpoint:{} \ - trainer_id:{}".format(worker_endpoints, trainers_num, - current_endpoint, trainer_id)) + print( + "worker_endpoints:{} trainers_num:{} current_endpoint:{} \ + trainer_id:{}".format( + worker_endpoints, trainers_num, current_endpoint, trainer_id + ) + ) # call transpiler config = dist_transpiler.DistributeTranspilerConfig() @@ -340,15 +384,21 @@ class CollectiveOptimizer(DistributedOptimizer): config.collective_mode = self._strategy.collective_mode config.nccl_comm_num = self._strategy.nccl_comm_num - config.use_hierarchical_allreduce = self._strategy.use_hierarchical_allreduce - config.hierarchical_allreduce_inter_nranks = self._strategy.hierarchical_allreduce_inter_nranks + config.use_hierarchical_allreduce = ( + self._strategy.use_hierarchical_allreduce + ) + config.hierarchical_allreduce_inter_nranks = ( + self._strategy.hierarchical_allreduce_inter_nranks + ) t = dist_transpiler.DistributeTranspiler(config=config) - t.transpile(trainer_id=trainer_id, - trainers=worker_endpoints_env, - startup_program=startup_program, - program=main_program, - current_endpoint=current_endpoint) + t.transpile( + trainer_id=trainer_id, + trainers=worker_endpoints_env, + startup_program=startup_program, + program=main_program, + current_endpoint=current_endpoint, + ) def _get_node_ips_from_endpoints(self, endpoints): ss = set() @@ -415,11 +465,18 @@ class CollectiveOptimizer(DistributedOptimizer): ) if self.print_config: - print("node_num:", node_num, "num_threads:", - exec_strategy.num_threads, "use_hierarchical_allreduce:", - self._strategy.use_hierarchical_allreduce, "nccl_comm_num:", - self._strategy.nccl_comm_num, "FLAGS_sync_nccl_allreduce:", - sync_allreduce) + print( + "node_num:", + node_num, + "num_threads:", + exec_strategy.num_threads, + "use_hierarchical_allreduce:", + self._strategy.use_hierarchical_allreduce, + "nccl_comm_num:", + self._strategy.nccl_comm_num, + "FLAGS_sync_nccl_allreduce:", + sync_allreduce, + ) self._transpile(startup_program, main_program) @@ -437,19 +494,20 @@ class CollectiveOptimizer(DistributedOptimizer): loss_name=self._loss.name, build_strategy=self._strategy, exec_strategy=self._strategy.exec_strategy, - share_vars_from=None) + share_vars_from=None, + ) return self._compiled_program def raiseOptimizeError(self, strategy_name, optimize_name): - raise ValueError("can not use {0} when you set DistStrategy.{1} " - "as True".format(optimize_name, strategy_name)) - - def minimize(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None): + raise ValueError( + "can not use {0} when you set DistStrategy.{1} " + "as True".format(optimize_name, strategy_name) + ) + + def minimize( + self, loss, startup_program=None, parameter_list=None, no_grad_set=None + ): """ minimize a program through loss Args: @@ -470,28 +528,36 @@ class CollectiveOptimizer(DistributedOptimizer): # check optimizer conflicts if self._forward_recompute: if self._recompute_checkpoints == []: - raise ValueError("please set strategy.recompute_checkpoints" - "when set strategy.forward_recompute as True") + raise ValueError( + "please set strategy.recompute_checkpoints" + "when set strategy.forward_recompute as True" + ) if self._optimizer.__class__.__name__ in [ - "RecomputeOptimizer", "OptimizerWithMixedPrecision" + "RecomputeOptimizer", + "OptimizerWithMixedPrecision", ]: - self.raiseOptimizeError("forward_recompute", - self._optimizer.__class__.__name__) + self.raiseOptimizeError( + "forward_recompute", self._optimizer.__class__.__name__ + ) - self._optimizer = \ - fluid.optimizer.RecomputeOptimizer(self._optimizer) + self._optimizer = fluid.optimizer.RecomputeOptimizer( + self._optimizer + ) self._optimizer._set_checkpoints(self._recompute_checkpoints) if self._use_amp: if self._optimizer.__class__.__name__ in [ - "OptimizerWithMixedPrecision", "DGCMomentumOptimizer" + "OptimizerWithMixedPrecision", + "DGCMomentumOptimizer", ]: - self.raiseOptimizeError("mixed_precision", - self._optimizer.__class__.__name__) + self.raiseOptimizeError( + "mixed_precision", self._optimizer.__class__.__name__ + ) self._optimizer = fluid.contrib.mixed_precision.decorate( self._optimizer, init_loss_scaling=self._amp_loss_scaling, - use_dynamic_loss_scaling=True) + use_dynamic_loss_scaling=True, + ) main_program = loss.block.program if startup_program is None: @@ -500,11 +566,13 @@ class CollectiveOptimizer(DistributedOptimizer): self._loss = loss - self._check_collective_mode(main_program, self._optimizer, - self._strategy) + self._check_collective_mode( + main_program, self._optimizer, self._strategy + ) optimize_ops, param_grads = self._optimizer.minimize( - loss, startup_program, parameter_list, no_grad_set=no_grad_set) + loss, startup_program, parameter_list, no_grad_set=no_grad_set + ) fleet._origin_program = main_program.clone(for_test=False) fleet._transpiled_program = main_program diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/distribute_transpiler/__init__.py b/python/paddle/fluid/incubate/fleet/parameter_server/distribute_transpiler/__init__.py index 67c1a0f0f8b47470973fab7c6ea071be01a4ca69..5ba4f92732229309173eb07dd647ff51e9de99e3 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/distribute_transpiler/__init__.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/distribute_transpiler/__init__.py @@ -29,26 +29,43 @@ from paddle.fluid.executor import Executor from paddle.fluid.parallel_executor import ParallelExecutor from paddle.fluid.optimizer import Optimizer -from paddle.fluid.transpiler.distribute_transpiler import DistributeTranspilerConfig +from paddle.fluid.transpiler.distribute_transpiler import ( + DistributeTranspilerConfig, +) from paddle.fluid.incubate.fleet.base.fleet_base import Fleet from paddle.fluid.incubate.fleet.base.mode import Mode from paddle.fluid.incubate.fleet.base.role_maker import MPISymetricRoleMaker from paddle.fluid.incubate.fleet.parameter_server import version -from paddle.fluid.incubate.fleet.parameter_server.ir.public import get_sparse_tablenames +from paddle.fluid.incubate.fleet.parameter_server.ir.public import ( + get_sparse_tablenames, +) from paddle.fluid.incubate.fleet.parameter_server.ir.public import _get_lr_ops -from paddle.fluid.incubate.fleet.parameter_server.ir.public import _has_global_step -from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import TrainerRuntimeConfig, DistributedStrategy, \ - SyncStrategy, AsyncStrategy, HalfAsyncStrategy, GeoStrategy, StrategyFactory +from paddle.fluid.incubate.fleet.parameter_server.ir.public import ( + _has_global_step, +) +from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import ( + TrainerRuntimeConfig, + DistributedStrategy, + SyncStrategy, + AsyncStrategy, + HalfAsyncStrategy, + GeoStrategy, + StrategyFactory, +) from paddle.fluid.transpiler.details.checkport import wait_server_ready from paddle.fluid.incubate.fleet.parameter_server.mode import PSMode from paddle.fluid.incubate.fleet.base.fleet_base import DistributedOptimizer -from paddle.fluid.incubate.fleet.parameter_server.ir import trainer_pass as worker -from paddle.fluid.incubate.fleet.parameter_server.ir import pserver_pass as server +from paddle.fluid.incubate.fleet.parameter_server.ir import ( + trainer_pass as worker, +) +from paddle.fluid.incubate.fleet.parameter_server.ir import ( + pserver_pass as server, +) from paddle.fluid.incubate.fleet.parameter_server.ir import public as public @@ -105,25 +122,29 @@ class FleetTranspiler(Fleet): def sync_strategy_envs(): kwargs = {} kwargs[ - "pserver_endpoints"] = self._role_maker.get_pserver_endpoints() + "pserver_endpoints" + ] = self._role_maker.get_pserver_endpoints() kwargs["trainer_id"] = self._role_maker.worker_index() return kwargs def geo_strategy_envs(): - def get_sparse_attrs(): opt_init_map = {} opt_init_map["gaussian_random"] = ["seed", "mean", "std"] opt_init_map["fill_constant"] = ["value"] opt_init_map["uniform_random"] = ["seed", "min", "max"] opt_init_map["truncated_gaussian_random"] = [ - "seed", "mean", "std" + "seed", + "mean", + "std", ] - dist_varnames = get_sparse_tablenames(self._origin_main_program, - True) + dist_varnames = get_sparse_tablenames( + self._origin_main_program, True + ) sparse_varnames = get_sparse_tablenames( - self._origin_main_program, False) + self._origin_main_program, False + ) if len(dist_varnames) != 0: raise ValueError( @@ -132,15 +153,18 @@ class FleetTranspiler(Fleet): init_attrs = [] for value_name in sparse_varnames: - value_var = self._origin_main_program.global_block( - ).vars[value_name] + value_var = self._origin_main_program.global_block().vars[ + value_name + ] value_attr = [ value_name, - ",".join([str(dim) for dim in value_var.shape]) + ",".join([str(dim) for dim in value_var.shape]), ] for op in self._origin_startup_program.global_block().ops: - if op.type in opt_init_map.keys( - ) and value_name == op.output("Out")[0]: + if ( + op.type in opt_init_map.keys() + and value_name == op.output("Out")[0] + ): init_attr = [op.type] for attr in opt_init_map[op.type]: init_attr.append(str(op.attr(attr))) @@ -185,15 +209,18 @@ class FleetTranspiler(Fleet): if self.compiled_config.is_geo_mode(): recv_ctx = fleet.compiled_config.get_communicator_recv_context( - recv_type=4) + recv_type=4 + ) else: recv_ctx = fleet.compiled_config.get_communicator_recv_context( - recv_type=1) + recv_type=1 + ) from paddle.fluid.communicator import Communicator + self._communicator = Communicator( - trainer_config.mode, kwargs, - trainer_config.get_communicator_flags()) + trainer_config.mode, kwargs, trainer_config.get_communicator_flags() + ) self._communicator.init_with_ctx(send_ctx, recv_ctx) @@ -201,7 +228,8 @@ class FleetTranspiler(Fleet): self._communicator.start() else: raise ValueError( - "Communicator can only be inited once, please check") + "Communicator can only be inited once, please check" + ) def init_worker(self): """ @@ -231,23 +259,31 @@ class FleetTranspiler(Fleet): raise ValueError("There is no directory named '%s'", model_dir) sparse_varnames = self.compiled_config.get_sparse_varname_on_ps( - True) - distribtued_varnames = self.compiled_config.get_sparse_varname_on_ps( - False) + True + ) + distribtued_varnames = ( + self.compiled_config.get_sparse_varname_on_ps(False) + ) remaining_vars = list( filter( - FleetTranspiler.__exclude_vars(sparse_varnames + - distribtued_varnames), - self.main_program.list_vars())) + FleetTranspiler.__exclude_vars( + sparse_varnames + distribtued_varnames + ), + self.main_program.list_vars(), + ) + ) - fluid.io.load_vars(self._executor, - main_program=self.main_program, - dirname=model_dir, - vars=remaining_vars) + fluid.io.load_vars( + self._executor, + main_program=self.main_program, + dirname=model_dir, + vars=remaining_vars, + ) - self._load_sparse_params(dirname=model_dir, - varnames=sparse_varnames) + self._load_sparse_params( + dirname=model_dir, varnames=sparse_varnames + ) # todo(tangwei12) load distributed vars # self._load_sparse_params(dirname=model_dir, varnames=distribtued_varnames) @@ -366,13 +402,15 @@ class FleetTranspiler(Fleet): self._optimizer = ParameterServerOptimizer(optimizer, _strategy) return self._optimizer - def save_inference_model(self, - executor, - dirname, - feeded_var_names, - target_vars, - main_program=None, - export_for_deployment=True): + def save_inference_model( + self, + executor, + dirname, + feeded_var_names, + target_vars, + main_program=None, + export_for_deployment=True, + ): """ Prune the given `main_program` to build a new program especially for inference, and then save it and all related parameters to given `dirname` by the `executor`. @@ -402,14 +440,28 @@ class FleetTranspiler(Fleet): raise TypeError( "in fleet.save_inference_model() function, main_program must be as Program type, CompiledProgram is not allowed" ) - fluid.io.save_inference_model(dirname, feeded_var_names, - target_vars, executor, main_program, - None, None, export_for_deployment) + fluid.io.save_inference_model( + dirname, + feeded_var_names, + target_vars, + executor, + main_program, + None, + None, + export_for_deployment, + ) else: - fluid.io.save_inference_model(dirname, feeded_var_names, - target_vars, executor, - self._origin_main_program, None, None, - export_for_deployment, True) + fluid.io.save_inference_model( + dirname, + feeded_var_names, + target_vars, + executor, + self._origin_main_program, + None, + None, + export_for_deployment, + True, + ) model_basename = "__model__" model_filename = os.path.join(dirname, model_basename) @@ -423,6 +475,7 @@ class FleetTranspiler(Fleet): def _load_sparse_params(self, dirname, varnames): from paddle.fluid.communicator import LargeScaleKV + scale_kv = LargeScaleKV() for varname in varnames: origin_varname, _, _ = public._get_varname_parts(varname) @@ -431,8 +484,15 @@ class FleetTranspiler(Fleet): def _get_optimizer_status(self, op, param_name): supported_opts = [ - "sgd", "adam", "adagrad", "adamax", "momentum", "lars_momentum", - "rmsprop", "decayed_adagrad", "ftrl" + "sgd", + "adam", + "adagrad", + "adamax", + "momentum", + "lars_momentum", + "rmsprop", + "decayed_adagrad", + "ftrl", ] reshaped_val_map = {} @@ -443,7 +503,9 @@ class FleetTranspiler(Fleet): reshaped_val_map["momentum"] = ["velocity_0"] reshaped_val_map["lars_momentum"] = ["velocity_0"] reshaped_val_map["rmsprop"] = [ - "momentum_0", "mean_square_0", "mean_grad_0" + "momentum_0", + "mean_square_0", + "mean_grad_0", ] reshaped_val_map["decayed_adagrad"] = ["moment_0"] reshaped_val_map["ftrl"] = ["squared_0", "linear_0"] @@ -454,8 +516,10 @@ class FleetTranspiler(Fleet): if op not in supported_opts: raise ValueError( - "fleet can not support optimizer: {}, only this can be supported: {}" - .format(op, supported_opts)) + "fleet can not support optimizer: {}, only this can be supported: {}".format( + op, supported_opts + ) + ) reshaped_names = [ param_name + "_" + val for val in reshaped_val_map[op] @@ -472,8 +536,11 @@ class FleetTranspiler(Fleet): def _get_optimizer_op(self, param_name): opts = public._get_optimize_ops(self._origin_main_program) for op in opts: - if "Param" in op.input_names and \ - "LearningRate" in op.input_names and op.input("Param")[0] == param_name: + if ( + "Param" in op.input_names + and "LearningRate" in op.input_names + and op.input("Param")[0] == param_name + ): return op def _save_dense_params(self, executor, dirname, context, main_program): @@ -492,27 +559,24 @@ class FleetTranspiler(Fleet): optimizer = self._get_optimizer_op(varname) reshaped_varnames, origin_varnames = self._get_optimizer_status( - optimizer.type, varname) + optimizer.type, varname + ) for var_name in [varname] + reshaped_varnames + origin_varnames: var = self._origin_main_program.global_block().vars[var_name] - block.append_op(type='recv_save', - attrs={ - "trainer_id": - self._role_maker.worker_index(), - "shape": - var.shape, - "slice_shapes": - [",".join([str(i) for i in var.shape])], - "slice_varnames": [var.name], - "remote_varnames": [var.name], - "is_sparse": - False, - "endpoints": - var_ctx.split_endpoints(), - "file_path": - os.path.join(dirname, var.name) - }) + block.append_op( + type='recv_save', + attrs={ + "trainer_id": self._role_maker.worker_index(), + "shape": var.shape, + "slice_shapes": [",".join([str(i) for i in var.shape])], + "slice_varnames": [var.name], + "remote_varnames": [var.name], + "is_sparse": False, + "endpoints": var_ctx.split_endpoints(), + "file_path": os.path.join(dirname, var.name), + }, + ) executor.run(prog) return local_vars @@ -531,7 +595,8 @@ class FleetTranspiler(Fleet): optimizer = self._get_optimizer_op(varname) reshaped_varnames, origin_varnames = self._get_optimizer_status( - optimizer.type, varname) + optimizer.type, varname + ) var = self._origin_main_program.global_block().vars[varname] slice_shapes = [] @@ -540,37 +605,34 @@ class FleetTranspiler(Fleet): for section in var_ctx.sections(): slice_shapes.append(str(section) + dims1) - block.append_op(type='recv_save', - attrs={ - "trainer_id": - self._role_maker.worker_index(), - "shape": - var.shape, - "slice_shapes": - slice_shapes, - "slice_varnames": - var_ctx.split_varnames(), - "remote_varnames": - var_ctx.split_varnames(), - "is_sparse": - True, - "endpoints": - var_ctx.split_endpoints(), - "pserver_num": - len(self._role_maker.get_pserver_endpoints()), - "file_path": - os.path.join(dirname, var.name) - }) + block.append_op( + type='recv_save', + attrs={ + "trainer_id": self._role_maker.worker_index(), + "shape": var.shape, + "slice_shapes": slice_shapes, + "slice_varnames": var_ctx.split_varnames(), + "remote_varnames": var_ctx.split_varnames(), + "is_sparse": True, + "endpoints": var_ctx.split_endpoints(), + "pserver_num": len( + self._role_maker.get_pserver_endpoints() + ), + "file_path": os.path.join(dirname, var.name), + }, + ) for reshaped_varname in reshaped_varnames: - var = self._origin_main_program.global_block( - ).vars[reshaped_varname] + var = self._origin_main_program.global_block().vars[ + reshaped_varname + ] slice_varnames = [] remote_varnames = [] for i in range(len(var_ctx.split_varnames())): - slice_varnames.append("{}.block{}".format( - reshaped_varname, i)) + slice_varnames.append( + "{}.block{}".format(reshaped_varname, i) + ) remote_varnames.append(reshaped_varname) block.append_op( @@ -583,97 +645,112 @@ class FleetTranspiler(Fleet): "remote_varnames": remote_varnames, "is_sparse": True, "endpoints": var_ctx.split_endpoints(), - "pserver_num": - len(self._role_maker.get_pserver_endpoints()), - "file_path": os.path.join(dirname, var.name) - }) + "pserver_num": len( + self._role_maker.get_pserver_endpoints() + ), + "file_path": os.path.join(dirname, var.name), + }, + ) for origin_varname in origin_varnames: - var = self._origin_main_program.global_block( - ).vars[origin_varname] - - block.append_op(type='recv_save', - attrs={ - "trainer_id": - self._role_maker.worker_index(), - "shape": - var.shape, - "slice_shapes": - [",".join([str(i) for i in var.shape])], - "slice_varnames": [origin_varname], - "remote_varnames": [origin_varname], - "is_sparse": - False, - "endpoints": - var_ctx.split_endpoints()[:1], - "file_path": - os.path.join(dirname, var.name) - }) + var = self._origin_main_program.global_block().vars[ + origin_varname + ] + + block.append_op( + type='recv_save', + attrs={ + "trainer_id": self._role_maker.worker_index(), + "shape": var.shape, + "slice_shapes": [",".join([str(i) for i in var.shape])], + "slice_varnames": [origin_varname], + "remote_varnames": [origin_varname], + "is_sparse": False, + "endpoints": var_ctx.split_endpoints()[:1], + "file_path": os.path.join(dirname, var.name), + }, + ) executor.run(prog) return context.keys() - def _save_distributed_params(self, executor, dirname, context, - main_program): + def _save_distributed_params( + self, executor, dirname, context, main_program + ): prog = Program() block = prog.global_block() for name, var_ctx in context.items(): - block.append_op(type='checkpoint_notify', - attrs={ - "varname": name, - "is_slice": True, - "slice_varnames": var_ctx.split_varnames(), - "remote_varnames": var_ctx.split_varnames(), - "endpoints": var_ctx.split_endpoints(), - "dirname": dirname - }) + block.append_op( + type='checkpoint_notify', + attrs={ + "varname": name, + "is_slice": True, + "slice_varnames": var_ctx.split_varnames(), + "remote_varnames": var_ctx.split_varnames(), + "endpoints": var_ctx.split_endpoints(), + "dirname": dirname, + }, + ) executor.run(prog) return context.keys() def _save_distributed_persistables(self, executor, dirname, main_program): dense_ctx = fleet.compiled_config.get_communicator_recv_context( - recv_type=1) + recv_type=1 + ) sparse_ctx = fleet.compiled_config.get_communicator_recv_context( - recv_type=2) + recv_type=2 + ) distributed_ctx = fleet.compiled_config.get_communicator_recv_context( - recv_type=3) + recv_type=3 + ) - recv_dense_varnames = self._save_dense_params(executor, dirname, - dense_ctx, main_program) + recv_dense_varnames = self._save_dense_params( + executor, dirname, dense_ctx, main_program + ) - recv_sparse_varnames = self._save_sparse_params(executor, dirname, - sparse_ctx, - main_program) + recv_sparse_varnames = self._save_sparse_params( + executor, dirname, sparse_ctx, main_program + ) recv_distributed_varnames = self._save_distributed_params( - executor, dirname, distributed_ctx, main_program) + executor, dirname, distributed_ctx, main_program + ) - saved_varnames = recv_dense_varnames + list( - recv_sparse_varnames) + list(recv_distributed_varnames) + saved_varnames = ( + recv_dense_varnames + + list(recv_sparse_varnames) + + list(recv_distributed_varnames) + ) remaining_vars = list( - filter(FleetTranspiler.__exclude_vars(saved_varnames), - main_program.list_vars())) + filter( + FleetTranspiler.__exclude_vars(saved_varnames), + main_program.list_vars(), + ) + ) - fluid.io.save_vars(executor, - main_program=main_program, - dirname=dirname, - vars=remaining_vars) + fluid.io.save_vars( + executor, + main_program=main_program, + dirname=dirname, + vars=remaining_vars, + ) def save_persistables(self, executor, dirname, main_program=None, **kwargs): """ - This function filters out all variables with `persistable==True` from the - give `main_program` and then saves these variables to the folder `dirname` - or file `filename`. - - The `dirname` is used to specify the folder where persistable variables - are going to be saved. If you would like to save variables in separate - files, set `filename` None; -if you would like to save all variables in a - single file, use `filename` to specify the file name. + This function filters out all variables with `persistable==True` from the + give `main_program` and then saves these variables to the folder `dirname` + or file `filename`. + + The `dirname` is used to specify the folder where persistable variables + are going to be saved. If you would like to save variables in separate + files, set `filename` None; + if you would like to save all variables in a + single file, use `filename` to specify the file name. """ if self._inner_mode == PSMode.PSLIB: @@ -702,12 +779,12 @@ if you would like to save all variables in a "in fleet.save_persistables() function, main_program must be as Program type, CompiledProgram is not allowed" ) - self._save_distributed_persistables(save_executor, dirname, - main_program) + self._save_distributed_persistables( + save_executor, dirname, main_program + ) @staticmethod def __exclude_vars(exclude_var_names=[]): - def is_valid(var): if var.name in exclude_var_names: return False @@ -719,9 +796,11 @@ if you would like to save all variables in a if origin_varname == "learning_rate_0": return False - if var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH or \ - var.desc.type() == core.VarDesc.VarType.FETCH_LIST or \ - var.desc.type() == core.VarDesc.VarType.READER: + if ( + var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH + or var.desc.type() == core.VarDesc.VarType.FETCH_LIST + or var.desc.type() == core.VarDesc.VarType.READER + ): return False return var.persistable @@ -760,7 +839,8 @@ class ParameterServerOptimizer(DistributedOptimizer): "Currently, distributed optimizer only support Adam" "Will config built-in adam for you." "We will support more functions in DistributedOptimizer", - sys.stderr) + sys.stderr, + ) self._optimizer_name = "DistributedAdam" self._optimizer = globals()[self._optimizer_name](optimizer) @@ -770,16 +850,22 @@ class ParameterServerOptimizer(DistributedOptimizer): self._window = 1 self.type = "downpour" self.data_norm_name = [ - ".batch_size", ".batch_square_sum", ".batch_sum", - ".batch_size@GRAD", ".batch_square_sum@GRAD", ".batch_sum@GRAD" + ".batch_size", + ".batch_square_sum", + ".batch_sum", + ".batch_size@GRAD", + ".batch_square_sum@GRAD", + ".batch_sum@GRAD", ] - def backward(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None, - callbacks=None): + def backward( + self, + loss, + startup_program=None, + parameter_list=None, + no_grad_set=None, + callbacks=None, + ): raise NotImplementedError() def apply_gradients(self, params_grads): @@ -799,7 +885,8 @@ class ParameterServerOptimizer(DistributedOptimizer): _startup = worker.fake_init_ops_pass(_startup, compiled_config) _startup = worker.init_from_server_pass(_startup, compiled_config) _startup = worker.delet_extra_optimizes_pass( - _startup, compiled_config) + _startup, compiled_config + ) else: _main = worker.append_send_ops_pass(_main, compiled_config) _startup = _startup @@ -814,57 +901,76 @@ class ParameterServerOptimizer(DistributedOptimizer): _main = server.add_listen_and_serv_pass(_main, compiled_config) _main = server.add_rpc_global_flags_pass(_main, compiled_config) _main = server.add_optimizer_pass(_main, compiled_config) - _main = server.large_scale_sparse_pass(_main, _main, - compiled_config, False) + _main = server.large_scale_sparse_pass( + _main, _main, compiled_config, False + ) _startup = server.build_pserver_startup_program_pass( - _startup, _main, compiled_config) - _startup = server.large_scale_sparse_pass(_startup, _main, - compiled_config, True) + _startup, _main, compiled_config + ) + _startup = server.large_scale_sparse_pass( + _startup, _main, compiled_config, True + ) if not compiled_config.is_sync_mode(): _main = server.delete_unused_in_main_pass( - _main, compiled_config) + _main, compiled_config + ) _startup = server.delete_unused_in_startup_pass( - _startup, _main, compiled_config) + _startup, _main, compiled_config + ) else: _main = server.add_listen_and_serv_pass(_main, compiled_config) _main = server.add_rpc_global_flags_pass(_main, compiled_config) _main = server.add_geo_optimizer_pass(_main, compiled_config) - _main = server.large_scale_sparse_pass(_main, _main, - compiled_config, False) + _main = server.large_scale_sparse_pass( + _main, _main, compiled_config, False + ) _startup = server.build_pserver_startup_program_pass( - _startup, _main, compiled_config) - _startup = server.large_scale_sparse_pass(_startup, _main, - compiled_config, True) + _startup, _main, compiled_config + ) + _startup = server.large_scale_sparse_pass( + _startup, _main, compiled_config, True + ) _startup = server.delete_unused_in_startup_pass( - _startup, _main, compiled_config) + _startup, _main, compiled_config + ) return _main, _startup - def minimize(self, - losses, - scopes=None, - startup_programs=None, - parameter_list=None, - no_grad_set=None): + def minimize( + self, + losses, + scopes=None, + startup_programs=None, + parameter_list=None, + no_grad_set=None, + ): if isinstance(losses, list): raise ValueError("need implement later") - self._optimizer.minimize(losses, startup_programs, parameter_list, - no_grad_set) + self._optimizer.minimize( + losses, startup_programs, parameter_list, no_grad_set + ) fleet._origin_main_program = default_main_program().clone( - for_test=False) + for_test=False + ) fleet._origin_startup_program = default_startup_program().clone( - for_test=False) + for_test=False + ) compiled_config = public.CompileTimeStrategy( - fleet._origin_main_program, fleet._origin_startup_program, - self._strategy, fleet._role_maker) + fleet._origin_main_program, + fleet._origin_startup_program, + self._strategy, + fleet._role_maker, + ) fleet.compiled_config = compiled_config - fleet.main_program, fleet.startup_program = \ - self._build_trainer_programs(compiled_config) if fleet.is_worker() \ + fleet.main_program, fleet.startup_program = ( + self._build_trainer_programs(compiled_config) + if fleet.is_worker() else self._build_pserver_programs(compiled_config) + ) diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/distribute_transpiler/distributed_strategy.py b/python/paddle/fluid/incubate/fleet/parameter_server/distribute_transpiler/distributed_strategy.py index 8e40fa81ebbc4d4c6bf89f5fcce92364a054d1d5..e035b9136c2ddbd2f430c98f0969679de0645ca9 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/distribute_transpiler/distributed_strategy.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/distribute_transpiler/distributed_strategy.py @@ -13,45 +13,61 @@ # limitations under the License. __all__ = [ - "TrainerRuntimeConfig", "DistributedStrategy", "SyncStrategy", - "AsyncStrategy", "HalfAsyncStrategy", "GeoStrategy", "StrategyFactory" + "TrainerRuntimeConfig", + "DistributedStrategy", + "SyncStrategy", + "AsyncStrategy", + "HalfAsyncStrategy", + "GeoStrategy", + "StrategyFactory", ] import os import paddle.fluid as fluid -from paddle.fluid.transpiler.distribute_transpiler import DistributeTranspilerConfig, ServerRuntimeConfig +from paddle.fluid.transpiler.distribute_transpiler import ( + DistributeTranspilerConfig, + ServerRuntimeConfig, +) from paddle.fluid.incubate.fleet.parameter_server.mode import DistributedMode class TrainerRuntimeConfig(object): - def __init__(self): self.mode = None num_threads = os.getenv("CPU_NUM", "1") self.runtime_configs = {} self.runtime_configs['communicator_max_merge_var_num'] = os.getenv( - "FLAGS_communicator_max_merge_var_num", num_threads) + "FLAGS_communicator_max_merge_var_num", num_threads + ) self.runtime_configs['communicator_send_queue_size'] = os.getenv( - "FLAGS_communicator_send_queue_size", num_threads) + "FLAGS_communicator_send_queue_size", num_threads + ) self.runtime_configs[ - 'communicator_independent_recv_thread'] = os.getenv( - "FLAGS_communicator_independent_recv_thread", "1") + 'communicator_independent_recv_thread' + ] = os.getenv("FLAGS_communicator_independent_recv_thread", "1") self.runtime_configs[ - 'communicator_min_send_grad_num_before_recv'] = os.getenv( - "FLAGS_communicator_min_send_grad_num_before_recv", num_threads) + 'communicator_min_send_grad_num_before_recv' + ] = os.getenv( + "FLAGS_communicator_min_send_grad_num_before_recv", num_threads + ) self.runtime_configs['communicator_thread_pool_size'] = os.getenv( - "FLAGS_communicator_thread_pool_size", "5") + "FLAGS_communicator_thread_pool_size", "5" + ) self.runtime_configs['communicator_send_wait_times'] = os.getenv( - "FLAGS_communicator_send_wait_times", "5") + "FLAGS_communicator_send_wait_times", "5" + ) self.runtime_configs['communicator_is_sgd_optimizer'] = os.getenv( - "FLAGS_communicator_is_sgd_optimizer", "1") + "FLAGS_communicator_is_sgd_optimizer", "1" + ) # not used self.runtime_configs['rpc_deadline'] = os.getenv( - "FLAGS_rpc_deadline", "180000") + "FLAGS_rpc_deadline", "180000" + ) self.runtime_configs['rpc_retry_times'] = os.getenv( - "FLAGS_rpc_retry_times", "3") + "FLAGS_rpc_retry_times", "3" + ) def get_communicator_flags(self): need_keys = [] @@ -60,45 +76,62 @@ class TrainerRuntimeConfig(object): if self.mode is None or self.mode == DistributedMode.ASYNC: need_keys = self.runtime_configs.keys() mode_str = "async" - elif self.mode == DistributedMode.SYNC or self.mode == DistributedMode.HALF_ASYNC: + elif ( + self.mode == DistributedMode.SYNC + or self.mode == DistributedMode.HALF_ASYNC + ): mode_str = "sync or half_async" need_keys = [ 'communicator_max_merge_var_num', - 'communicator_send_wait_times', 'communicator_thread_pool_size', - 'communicator_send_queue_size' + 'communicator_send_wait_times', + 'communicator_thread_pool_size', + 'communicator_send_queue_size', ] elif self.mode == DistributedMode.GEO: mode_str = "GEO" need_keys = [ - 'communicator_thread_pool_size', 'communicator_send_wait_times', - 'communicator_max_merge_var_num', 'communicator_send_queue_size' + 'communicator_thread_pool_size', + 'communicator_send_wait_times', + 'communicator_max_merge_var_num', + 'communicator_send_queue_size', ] else: raise ValueError("Unsupported Mode") - if self.mode == DistributedMode.SYNC or self.mode == DistributedMode.HALF_ASYNC: + if ( + self.mode == DistributedMode.SYNC + or self.mode == DistributedMode.HALF_ASYNC + ): max_merge_var_num = self.runtime_configs[ - 'communicator_max_merge_var_num'] + 'communicator_max_merge_var_num' + ] send_queue_size = self.runtime_configs[ - 'communicator_send_queue_size'] + 'communicator_send_queue_size' + ] if max_merge_var_num != num_threads: - print('WARNING: In {} mode, communicator_max_merge_var_num ' - 'must be equal to CPU_NUM. But received, ' - 'communicator_max_merge_var_num = {}, CPU_NUM = ' - '{}. communicator_max_merge_var_num will be fored to {}.'. - format(mode_str, max_merge_var_num, num_threads, - num_threads)) + print( + 'WARNING: In {} mode, communicator_max_merge_var_num ' + 'must be equal to CPU_NUM. But received, ' + 'communicator_max_merge_var_num = {}, CPU_NUM = ' + '{}. communicator_max_merge_var_num will be fored to {}.'.format( + mode_str, max_merge_var_num, num_threads, num_threads + ) + ) self.runtime_configs[ - 'communicator_max_merge_var_num'] = num_threads + 'communicator_max_merge_var_num' + ] = num_threads if send_queue_size != num_threads: - print('WARNING: In {} mode, communicator_send_queue_size ' - 'must be equal to CPU_NUM. But received, ' - 'communicator_send_queue_size = {}, CPU_NUM = ' - '{}. communicator_send_queue_size will be fored to {}.'. - format(mode_str, send_queue_size, num_threads, - num_threads)) + print( + 'WARNING: In {} mode, communicator_send_queue_size ' + 'must be equal to CPU_NUM. But received, ' + 'communicator_send_queue_size = {}, CPU_NUM = ' + '{}. communicator_send_queue_size will be fored to {}.'.format( + mode_str, send_queue_size, num_threads, num_threads + ) + ) self.runtime_configs[ - 'communicator_send_queue_size'] = num_threads + 'communicator_send_queue_size' + ] = num_threads return dict((key, str(self.runtime_configs[key])) for key in need_keys) @@ -128,7 +161,6 @@ class TrainerRuntimeConfig(object): class PSLibRuntimeConfig(object): - def __init__(self): self.runtime_configs = {} @@ -137,7 +169,6 @@ class PSLibRuntimeConfig(object): class DistributedStrategy(object): - def __init__(self): self._program_config = DistributeTranspilerConfig() self._trainer_runtime_config = TrainerRuntimeConfig() @@ -150,7 +181,9 @@ class DistributedStrategy(object): self._execute_strategy.num_threads = num_threads if num_threads > 1: - self._build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce + self._build_strategy.reduce_strategy = ( + fluid.BuildStrategy.ReduceStrategy.Reduce + ) self.debug_opt = None self.use_ps_gpu = False @@ -162,11 +195,13 @@ class DistributedStrategy(object): if self.debug_opt is not None and isinstance(self.debug_opt, dict): opt_info["dump_slot"] = bool(self.debug_opt.get("dump_slot", 0)) opt_info["dump_converter"] = str( - self.debug_opt.get("dump_converter", "")) + self.debug_opt.get("dump_converter", "") + ) opt_info["dump_fields"] = self.debug_opt.get("dump_fields", []) opt_info["dump_file_num"] = self.debug_opt.get("dump_file_num", 16) opt_info["dump_fields_path"] = self.debug_opt.get( - "dump_fields_path", "") + "dump_fields_path", "" + ) opt_info["dump_param"] = self.debug_opt.get("dump_param", []) return opt_info @@ -182,8 +217,10 @@ class DistributedStrategy(object): setattr(self._program_config, key, config[key]) else: raise ValueError( - "DistributeTranspilerConfig doesn't have key: {}". - format(key)) + "DistributeTranspilerConfig doesn't have key: {}".format( + key + ) + ) else: raise TypeError( "program_config only accept input type: dict or DistributeTranspilerConfig" @@ -207,7 +244,8 @@ class DistributedStrategy(object): self._trainer_runtime_config.runtime_configs[key] = Value else: raise ValueError( - "TrainerRuntimeConfig doesn't have key: {}".format(key)) + "TrainerRuntimeConfig doesn't have key: {}".format(key) + ) else: raise TypeError( "trainer_runtime_config only accept input type: dict or TrainerRuntimeConfig" @@ -237,7 +275,8 @@ class DistributedStrategy(object): setattr(self._server_runtime_config, key, config[key]) else: raise ValueError( - "ServerRuntimeConfig doesn't have key: {}".format(key)) + "ServerRuntimeConfig doesn't have key: {}".format(key) + ) else: raise TypeError( "server_runtime_config only accept input type: dict or ServerRuntimeConfig" @@ -261,7 +300,8 @@ class DistributedStrategy(object): setattr(self._execute_strategy, key, config[key]) else: raise ValueError( - "ExecutionStrategy doesn't have key: {}".format(key)) + "ExecutionStrategy doesn't have key: {}".format(key) + ) else: raise TypeError( "execute_strategy only accept input type: dict or ExecutionStrategy" @@ -285,10 +325,12 @@ class DistributedStrategy(object): setattr(self._build_strategy, key, config[key]) else: raise ValueError( - "BuildStrategy doesn't have key: {}".format(key)) + "BuildStrategy doesn't have key: {}".format(key) + ) else: raise TypeError( - "build_strategy only accept input type: dict or BuildStrategy") + "build_strategy only accept input type: dict or BuildStrategy" + ) self.check_build_strategy() def check_build_strategy(self): @@ -298,7 +340,6 @@ class DistributedStrategy(object): class SyncStrategy(DistributedStrategy): - def __init__(self): super(SyncStrategy, self).__init__() self.check_program_config() @@ -327,7 +368,6 @@ class SyncStrategy(DistributedStrategy): class AsyncStrategy(DistributedStrategy): - def __init__(self): super(AsyncStrategy, self).__init__() self.check_program_config() @@ -354,7 +394,6 @@ class AsyncStrategy(DistributedStrategy): class HalfAsyncStrategy(DistributedStrategy): - def __init__(self): super(HalfAsyncStrategy, self).__init__() self.check_program_config() @@ -382,7 +421,6 @@ class HalfAsyncStrategy(DistributedStrategy): class GeoStrategy(DistributedStrategy): - def __init__(self, update_frequency=100): super(GeoStrategy, self).__init__() self._program_config.geo_sgd_need_push_nums = update_frequency @@ -401,10 +439,12 @@ class GeoStrategy(DistributedStrategy): self._trainer_runtime_config.mode = DistributedMode.GEO self._trainer_runtime_config.runtime_configs[ - 'communicator_send_queue_size'] = self._program_config.geo_sgd_need_push_nums + 'communicator_send_queue_size' + ] = self._program_config.geo_sgd_need_push_nums self._trainer_runtime_config.runtime_configs[ - 'communicator_max_merge_var_num'] = self._program_config.geo_sgd_need_push_nums + 'communicator_max_merge_var_num' + ] = self._program_config.geo_sgd_need_push_nums def check_server_runtime_config(self): pass @@ -417,7 +457,6 @@ class GeoStrategy(DistributedStrategy): class StrategyFactory(object): - def __init_(self): pass diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/ir/heter_trainer_pass.py b/python/paddle/fluid/incubate/fleet/parameter_server/ir/heter_trainer_pass.py index bcb83a56370c87b800a26c414f6f9ff6acffda39..f799a69e2c8e090cc19956574b99ebe898b1ba2a 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/ir/heter_trainer_pass.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/ir/heter_trainer_pass.py @@ -18,13 +18,27 @@ import paddle.fluid.core as core import paddle.fluid.framework as framework from paddle.fluid.transpiler.details.program_utils import delete_ops -from paddle.fluid.incubate.fleet.parameter_server.ir.trainer_pass import find_heter_ops -from paddle.fluid.incubate.fleet.parameter_server.ir.trainer_pass import union_forward_gradient_op -from paddle.fluid.incubate.fleet.parameter_server.ir.trainer_pass import create_heter_program -from paddle.fluid.incubate.fleet.parameter_server.ir.trainer_pass import create_trainer_program -from paddle.fluid.incubate.fleet.parameter_server.ir.trainer_pass import find_block_joints -from paddle.fluid.incubate.fleet.parameter_server.ir.trainer_pass import find_op_input_output -from paddle.fluid.incubate.fleet.parameter_server.ir.trainer_pass import get_vars_name_in_block +from paddle.fluid.incubate.fleet.parameter_server.ir.trainer_pass import ( + find_heter_ops, +) +from paddle.fluid.incubate.fleet.parameter_server.ir.trainer_pass import ( + union_forward_gradient_op, +) +from paddle.fluid.incubate.fleet.parameter_server.ir.trainer_pass import ( + create_heter_program, +) +from paddle.fluid.incubate.fleet.parameter_server.ir.trainer_pass import ( + create_trainer_program, +) +from paddle.fluid.incubate.fleet.parameter_server.ir.trainer_pass import ( + find_block_joints, +) +from paddle.fluid.incubate.fleet.parameter_server.ir.trainer_pass import ( + find_op_input_output, +) +from paddle.fluid.incubate.fleet.parameter_server.ir.trainer_pass import ( + get_vars_name_in_block, +) def split_heter_worker_ops_pass(program, config, stage_id, device): @@ -36,7 +50,8 @@ def split_heter_worker_ops_pass(program, config, stage_id, device): """ default_deveice = "cpu" program, heter_ops, _, program_block_ops = find_heter_ops( - program, default_deveice) + program, default_deveice + ) if len(heter_ops) == 0: warnings.warn( "Currently running in Heter Parameter Server mode, but no OP running on heterogeneous devices, Please check your code." @@ -46,8 +61,16 @@ def split_heter_worker_ops_pass(program, config, stage_id, device): program_block_ops = union_forward_gradient_op(program_block_ops) block_vars_detail = find_block_joints(program, program_block_ops, heter_ops) heter_program = framework.Program() - create_heter_program(program, config, heter_program, program_block_ops, - heter_ops, block_vars_detail, device, stage_id) + create_heter_program( + program, + config, + heter_program, + program_block_ops, + heter_ops, + block_vars_detail, + device, + stage_id, + ) return heter_program @@ -61,11 +84,13 @@ def split_trainer_ops_pass(program, config, default_device="cpu"): # Todo: support user define default_device (MrChengmo) default_device_ = default_device program, heter_ops, default_ops, program_block_ops = find_heter_ops( - program, default_device_) + program, default_device_ + ) program_block_ops = union_forward_gradient_op(program_block_ops) block_vars_detail = find_block_joints(program, program_block_ops, heter_ops) trainer_program = program.clone() - create_trainer_program(trainer_program, program, config, program_block_ops, - block_vars_detail) + create_trainer_program( + trainer_program, program, config, program_block_ops, block_vars_detail + ) return trainer_program diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/ir/pserver_pass.py b/python/paddle/fluid/incubate/fleet/parameter_server/ir/pserver_pass.py index 65d186c855ced5772a2f54b60da65f5ee2e444fc..1d10bc1516821ded1118063bbc6fc6b7b3c52eea 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/ir/pserver_pass.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/ir/pserver_pass.py @@ -17,12 +17,22 @@ import collections from paddle.fluid import core from paddle.fluid.framework import Block -from paddle.fluid.incubate.fleet.parameter_server.ir.public import _get_optimize_ops +from paddle.fluid.incubate.fleet.parameter_server.ir.public import ( + _get_optimize_ops, +) from paddle.fluid.incubate.fleet.parameter_server.ir.public import _orig_varname -from paddle.fluid.incubate.fleet.parameter_server.ir.public import _get_varname_parts -from paddle.fluid.incubate.fleet.parameter_server.ir.public import is_distributed_sparse_op -from paddle.fluid.incubate.fleet.parameter_server.ir.public import get_sparse_tablename -from paddle.fluid.incubate.fleet.parameter_server.ir.public import get_sparse_tablenames +from paddle.fluid.incubate.fleet.parameter_server.ir.public import ( + _get_varname_parts, +) +from paddle.fluid.incubate.fleet.parameter_server.ir.public import ( + is_distributed_sparse_op, +) +from paddle.fluid.incubate.fleet.parameter_server.ir.public import ( + get_sparse_tablename, +) +from paddle.fluid.incubate.fleet.parameter_server.ir.public import ( + get_sparse_tablenames, +) from paddle.fluid.incubate.fleet.parameter_server.ir.public import _get_lr_ops LEARNING_RATE_DECAY_COUNTER = "@LR_DECAY_COUNTER@" @@ -33,8 +43,7 @@ LR_SCHED_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.LRSched def _is_optimizer_op(op): - if "Param" in op.input_names and \ - "LearningRate" in op.input_names: + if "Param" in op.input_names and "LearningRate" in op.input_names: return True return False @@ -75,12 +84,12 @@ def _get_optimizer_input_shape(op_type, varkey, orig_shape, param_shape): pass else: raise ValueError( - "Not supported optimizer for distributed training: %s" % op_type) + "Not supported optimizer for distributed training: %s" % op_type + ) return orig_shape def _append_pserver_non_opt_ops(optimize_block, opt_op, origin_program, config): - def _get_pserver_grad_param_var(var, var_dict): """ Return pserver side grad/param variable, return None @@ -120,8 +129,8 @@ def _append_pserver_non_opt_ops(optimize_block, opt_op, origin_program, config): # for ops like clipping and weight decay, get the split var(xxx.block0) # for inputs / outputs grad_block = _get_pserver_grad_param_var( - var, - program.global_block().vars) + var, program.global_block().vars + ) if grad_block: varlist[i] = grad_block elif var.name not in program.global_block().vars: @@ -131,16 +140,17 @@ def _append_pserver_non_opt_ops(optimize_block, opt_op, origin_program, config): varlist[i] = program.global_block().vars[var.name] inputs[key] = varlist - outputs = _get_output_map_from_op(origin_program.global_block().vars, - opt_op) + outputs = _get_output_map_from_op( + origin_program.global_block().vars, opt_op + ) for key, varlist in outputs.items(): if not isinstance(varlist, list): varlist = [varlist] for i in range(len(varlist)): var = varlist[i] grad_block = _get_pserver_grad_param_var( - var, - program.global_block().vars) + var, program.global_block().vars + ) if grad_block: varlist[i] = grad_block elif var.name not in program.global_block().vars: @@ -150,15 +160,24 @@ def _append_pserver_non_opt_ops(optimize_block, opt_op, origin_program, config): varlist[i] = program.global_block().vars[var.name] outputs[key] = varlist - return optimize_block.append_op(type=opt_op.type, - inputs=inputs, - outputs=outputs, - attrs=opt_op.all_attrs()) - - -def _append_pserver_ops(optimize_block, opt_op, endpoint, grad_to_block_id, - origin_program, merged_var, sparse_grad_to_param, - config): + return optimize_block.append_op( + type=opt_op.type, + inputs=inputs, + outputs=outputs, + attrs=opt_op.all_attrs(), + ) + + +def _append_pserver_ops( + optimize_block, + opt_op, + endpoint, + grad_to_block_id, + origin_program, + merged_var, + sparse_grad_to_param, + config, +): program = optimize_block.program pserver_block = program.global_block() new_inputs = collections.OrderedDict() @@ -180,8 +199,10 @@ def _append_pserver_ops(optimize_block, opt_op, endpoint, grad_to_block_id, for pairs in config.merged_variables_pairs: merged_p = pairs[0] if merged_p.merged_var.name == orig_varname: - if merged_p.merged_var.name == merged_p.ordered_vars[ - 0].name: + if ( + merged_p.merged_var.name + == merged_p.ordered_vars[0].name + ): unmerged_vars.append(merged_p.ordered_vars[0]) else: merged_vars.append(merged_p.merged_var) @@ -209,8 +230,10 @@ def _append_pserver_ops(optimize_block, opt_op, endpoint, grad_to_block_id, # because it will create a new tensor for # decayed gradient but not inplace modify the origin one origin_grad_name = opt_op.input(key)[0] - if core.kNewGradSuffix( - ) in origin_grad_name and pserver_block.has_var(origin_grad_name): + if ( + core.kNewGradSuffix() in origin_grad_name + and pserver_block.has_var(origin_grad_name) + ): new_grad = pserver_block.var(origin_grad_name) new_inputs[key] = new_grad else: @@ -220,10 +243,12 @@ def _append_pserver_ops(optimize_block, opt_op, endpoint, grad_to_block_id, if not param_block: return - tmpvar = pserver_block.create_var(name=param_block.name, - persistable=True, - dtype=param_block.dtype, - shape=param_block.shape) + tmpvar = pserver_block.create_var( + name=param_block.name, + persistable=True, + dtype=param_block.dtype, + shape=param_block.shape, + ) new_inputs[key] = tmpvar elif key == "LearningRate": @@ -238,40 +263,52 @@ def _append_pserver_ops(optimize_block, opt_op, endpoint, grad_to_block_id, name=origin_var.name, persistable=origin_var.persistable, dtype=origin_var.dtype, - shape=origin_var.shape) + shape=origin_var.shape, + ) new_inputs[key] = tmpvar for key in opt_op.input_names: new_shape = None if key in [ - "Param", "Grad", "LearningRate", "MasterParam", "Beta1Tensor", - "Beta2Tensor" + "Param", + "Grad", + "LearningRate", + "MasterParam", + "Beta1Tensor", + "Beta2Tensor", ]: continue var = origin_program.global_block().vars[opt_op.input(key)[0]] param_var = new_inputs["Param"] # update accumulator variable shape - new_shape = _get_optimizer_input_shape(opt_op.type, key, var.shape, - param_var.shape) - tmpvar = pserver_block.create_var(name=var.name, - persistable=var.persistable, - dtype=var.dtype, - shape=new_shape) + new_shape = _get_optimizer_input_shape( + opt_op.type, key, var.shape, param_var.shape + ) + tmpvar = pserver_block.create_var( + name=var.name, + persistable=var.persistable, + dtype=var.dtype, + shape=new_shape, + ) new_inputs[key] = tmpvar # change output's ParamOut variable - outputs = _get_output_map_from_op(origin_program.global_block().vars, - opt_op) + outputs = _get_output_map_from_op( + origin_program.global_block().vars, opt_op + ) outputs["ParamOut"] = new_inputs["Param"] - optimize_block.append_op(type=opt_op.type, - inputs=new_inputs, - outputs=outputs, - attrs=opt_op.all_attrs()) + optimize_block.append_op( + type=opt_op.type, + inputs=new_inputs, + outputs=outputs, + attrs=opt_op.all_attrs(), + ) # record sparse grad to param name if new_inputs["Grad"].type == core.VarDesc.VarType.SELECTED_ROWS: sparse_grad_to_param.append( - str(new_inputs["Grad"].name) + ":" + str(new_inputs["Param"].name)) + str(new_inputs["Grad"].name) + ":" + str(new_inputs["Param"].name) + ) def _get_input_map_from_op(varmap, op): @@ -316,7 +353,6 @@ def add_listen_and_serv_pass(program, config): "lr_decay_block_id": None, "dense_optimize_blocks": None, "sparse_optimize_blocks": None, - # runtime attribute "endpoint": config.get_ps_endpoint(), "pserver_id": config.get_role_id(), @@ -324,14 +360,13 @@ def add_listen_and_serv_pass(program, config): "distributed_mode": config.get_distributed_mode(), "rpc_get_thread_num": -1, "rpc_send_thread_num": -1, - "rpc_prefetch_thread_num": -1 + "rpc_prefetch_thread_num": -1, } # step5 append the listen_and_serv op - program.global_block().append_op(type="listen_and_serv", - inputs={'X': []}, - outputs={}, - attrs=attrs) + program.global_block().append_op( + type="listen_and_serv", inputs={'X': []}, outputs={}, attrs=attrs + ) return program @@ -346,7 +381,8 @@ def add_rpc_global_flags_pass(program, config): if get_threads < 1 or send_threads < 1 or pull_threads < 1: raise ValueError( - "error arguments in get_threads/send_threads/pull_threads") + "error arguments in get_threads/send_threads/pull_threads" + ) op._set_attr("rpc_get_thread_num", get_threads) op._set_attr("rpc_send_thread_num", send_threads) @@ -356,18 +392,20 @@ def add_rpc_global_flags_pass(program, config): def _clone_var(block, var, persistable=True): - return block.create_var(name=var.name, - shape=var.shape, - dtype=var.dtype, - type=var.type, - lod_level=var.lod_level, - persistable=persistable) + return block.create_var( + name=var.name, + shape=var.shape, + dtype=var.dtype, + type=var.type, + lod_level=var.lod_level, + persistable=persistable, + ) def add_optimizer_pass(program, config): - - def _append_pserver_grad_merge_ops(optimize_block, grad_varname_for_block, - endpoint, grad_to_block_id): + def _append_pserver_grad_merge_ops( + optimize_block, grad_varname_for_block, endpoint, grad_to_block_id + ): trainers = config.get_trainers() program = optimize_block.program @@ -375,8 +413,7 @@ def add_optimizer_pass(program, config): grad_block = None for g in config.param_grad_ep_mapping[endpoint]["grads"]: - if _orig_varname(g.name) == \ - _orig_varname(grad_varname_for_block): + if _orig_varname(g.name) == _orig_varname(grad_varname_for_block): grad_block = g break @@ -386,41 +423,48 @@ def add_optimizer_pass(program, config): return None orig_varname, block_name, trainer_name = _get_varname_parts( - grad_block.name) + grad_block.name + ) if block_name: merged_var_name = '.'.join([orig_varname, block_name]) else: merged_var_name = orig_varname - merged_var = pserver_block.create_var(name=grad_block.name, - persistable=True, - type=grad_block.type, - dtype=grad_block.dtype, - shape=grad_block.shape) + merged_var = pserver_block.create_var( + name=grad_block.name, + persistable=True, + type=grad_block.type, + dtype=grad_block.dtype, + shape=grad_block.shape, + ) grad_to_block_id.append(merged_var.name + ":" + str(optimize_block.idx)) if config.is_sync_mode() and trainers > 1: vars2merge = [] for i in range(trainers): - per_trainer_name = "%s.trainer_%d" % \ - (merged_var_name, i) + per_trainer_name = "%s.trainer_%d" % (merged_var_name, i) per_trainer_var = pserver_block.create_var( name=per_trainer_name, persistable=False, type=grad_block.type, dtype=grad_block.dtype, - shape=grad_block.shape) + shape=grad_block.shape, + ) vars2merge.append(per_trainer_var) - optimize_block.append_op(type="sum", - inputs={"X": vars2merge}, - outputs={"Out": merged_var}, - attrs={"use_mkldnn": False}) - optimize_block.append_op(type="scale", - inputs={"X": merged_var}, - outputs={"Out": merged_var}, - attrs={"scale": 1.0 / float(trainers)}) + optimize_block.append_op( + type="sum", + inputs={"X": vars2merge}, + outputs={"Out": merged_var}, + attrs={"use_mkldnn": False}, + ) + optimize_block.append_op( + type="scale", + inputs={"X": merged_var}, + outputs={"Out": merged_var}, + attrs={"scale": 1.0 / float(trainers)}, + ) return merged_var origin_program = config.get_origin_main_program() @@ -450,8 +494,10 @@ def add_optimizer_pass(program, config): for pairs in config.merged_variables_pairs: merged_p = pairs[0] if merged_p.merged_var.name == orig_varname: - if merged_p.merged_var.name == merged_p.ordered_vars[ - 0].name: + if ( + merged_p.merged_var.name + == merged_p.ordered_vars[0].name + ): unmerged_varnames.append(merged_p.ordered_vars[0].name) else: merged_varnames.append(merged_p.merged_var.name) @@ -473,9 +519,16 @@ def add_optimizer_pass(program, config): def __append_optimize_op__(op, block, grad_to_block_id, merged_var, lr_ops): if _is_optimizer_op(op): - _append_pserver_ops(block, op, ps_endpoint, grad_to_block_id, - origin_program, merged_var, - sparse_grad_to_param, config) + _append_pserver_ops( + block, + op, + ps_endpoint, + grad_to_block_id, + origin_program, + merged_var, + sparse_grad_to_param, + config, + ) elif op not in lr_ops: _append_pserver_non_opt_ops(block, op, origin_program, config) @@ -506,8 +559,9 @@ def add_optimizer_pass(program, config): lr_decay_block = program._create_block(program.num_blocks - 1) optimize_blocks.append(lr_decay_block) for op in lr_ops: - cloned_op = _append_pserver_non_opt_ops(lr_decay_block, op, - origin_program, config) + cloned_op = _append_pserver_non_opt_ops( + lr_decay_block, op, origin_program, config + ) # append sub blocks to pserver_program in lr_decay_op # todo(tangwei12): __clone_lr_op_sub_block__ lr_decay_block_id = lr_decay_block.idx @@ -529,18 +583,25 @@ def add_optimizer_pass(program, config): grad_varname_for_block = op.attr(OP_ROLE_VAR_ATTR_NAME)[1] if op.attr(OP_ROLE_VAR_ATTR_NAME)[0] == optimize_target_param_name: merged_var = _append_pserver_grad_merge_ops( - per_opt_block, grad_varname_for_block, ps_endpoint, - grad_to_block_id) + per_opt_block, + grad_varname_for_block, + ps_endpoint, + grad_to_block_id, + ) if merged_var: break # append optimize op once then append other ops. if merged_var: for _, op in enumerate(optimize_ops): # optimizer is connected to itself - if op.attr(OP_ROLE_VAR_ATTR_NAME)[0] == optimize_target_param_name and \ - op not in global_ops: - __append_optimize_op__(op, per_opt_block, grad_to_block_id, - merged_var, lr_ops) + if ( + op.attr(OP_ROLE_VAR_ATTR_NAME)[0] + == optimize_target_param_name + and op not in global_ops + ): + __append_optimize_op__( + op, per_opt_block, grad_to_block_id, merged_var, lr_ops + ) # dedup grad to ids list grad_to_block_id = list(set(grad_to_block_id)) @@ -549,8 +610,9 @@ def add_optimizer_pass(program, config): opt_state_block = program._create_block(program.num_blocks - 1) optimize_blocks.append(opt_state_block) for glb_op in global_ops: - __append_optimize_op__(glb_op, opt_state_block, grad_to_block_id, - None, lr_ops) + __append_optimize_op__( + glb_op, opt_state_block, grad_to_block_id, None, lr_ops + ) if len(optimize_blocks) == 0: pre_block_idx = program.num_blocks - 1 @@ -590,8 +652,10 @@ def large_scale_sparse_pass(program, main_program, config, is_startup=False): origin_name = _orig_varname(param_name) o_main_program = config.get_origin_main_program() for op in o_main_program.global_block().ops: - if is_distributed_sparse_op(op) and get_sparse_tablename( - op) == origin_name: + if ( + is_distributed_sparse_op(op) + and get_sparse_tablename(op) == origin_name + ): entry = op.attr("entry") return entry @@ -604,8 +668,10 @@ def large_scale_sparse_pass(program, main_program, config, is_startup=False): for value_name in acture_value_names: origin_var_name = _orig_varname(value_name) for op in o_startup_program.global_block().ops: - if op.type in opt_init_map.keys( - ) and origin_var_name == op.output("Out")[0]: + if ( + op.type in opt_init_map.keys() + and origin_var_name == op.output("Out")[0] + ): init_attr = [op.type] for attr in opt_init_map[op.type]: init_attr.append(str(op.attr(attr))) @@ -646,8 +712,16 @@ def large_scale_sparse_pass(program, main_program, config, is_startup=False): break return grad, opt_idx, value_names, value_dims, acture_names, fuse - def add_fuse_large_scale_op(block, global_block, table_name, value_names, - acture_names, grad, is_entry, opt_idx): + def add_fuse_large_scale_op( + block, + global_block, + table_name, + value_names, + acture_names, + grad, + is_entry, + opt_idx, + ): op = block.ops[opt_idx] @@ -655,99 +729,108 @@ def large_scale_sparse_pass(program, main_program, config, is_startup=False): grad = main_program.global_block().vars[op.input("Grad")[0]] lr = main_program.global_block().vars[op.input("LearningRate")[0]] - block._insert_op(opt_idx, - type="lookup_sparse_table_fuse_sgd", - inputs={ - "Grad": grad, - "LearningRate": lr - }, - attrs={ - "is_entry": is_entry, - "tablename": table_name, - "value_names": value_names - }) + block._insert_op( + opt_idx, + type="lookup_sparse_table_fuse_sgd", + inputs={"Grad": grad, "LearningRate": lr}, + attrs={ + "is_entry": is_entry, + "tablename": table_name, + "value_names": value_names, + }, + ) elif op.type == "adam": grad = main_program.global_block().vars[op.input("Grad")[0]] lr = main_program.global_block().vars[op.input("LearningRate")[0]] - beta1_pow = main_program.global_block().vars[op.input("Beta1Pow") - [0]] - beta2_pow = main_program.global_block().vars[op.input("Beta2Pow") - [0]] - beta1_pow_o = main_program.global_block().vars[op.output( - "Beta1PowOut")[0]] - beta2_pow_o = main_program.global_block().vars[op.output( - "Beta2PowOut")[0]] + beta1_pow = main_program.global_block().vars[ + op.input("Beta1Pow")[0] + ] + beta2_pow = main_program.global_block().vars[ + op.input("Beta2Pow")[0] + ] + beta1_pow_o = main_program.global_block().vars[ + op.output("Beta1PowOut")[0] + ] + beta2_pow_o = main_program.global_block().vars[ + op.output("Beta2PowOut")[0] + ] beta1 = op.attr('beta1') beta2 = op.attr('beta2') epsilon = op.attr('epsilon') - block._insert_op(opt_idx, - type="lookup_sparse_table_fuse_adam", - inputs={ - "Grad": grad, - "LearningRate": lr, - "Beta1Pow": beta1_pow, - "Beta2Pow": beta2_pow - }, - outputs={ - "Beta1PowOut": beta1_pow_o, - "Beta2PowOut": beta2_pow_o - }, - attrs={ - "beta1": beta1, - "beta2": beta2, - "epsilon": epsilon, - "is_entry": is_entry, - "tablename": table_name, - "value_names": value_names - }) + block._insert_op( + opt_idx, + type="lookup_sparse_table_fuse_adam", + inputs={ + "Grad": grad, + "LearningRate": lr, + "Beta1Pow": beta1_pow, + "Beta2Pow": beta2_pow, + }, + outputs={ + "Beta1PowOut": beta1_pow_o, + "Beta2PowOut": beta2_pow_o, + }, + attrs={ + "beta1": beta1, + "beta2": beta2, + "epsilon": epsilon, + "is_entry": is_entry, + "tablename": table_name, + "value_names": value_names, + }, + ) else: raise ValueError("only support sgd/adam optimizer now") - def add_large_scale_op(block, global_block, table_name, value_names, - acture_names, grad, is_entry, opt_idx): - ids = global_block.create_var(name="kSparseIDs@{}".format(table_name), - persistable=False, - dtype="int64", - shape=[1, 1], - lod_level=0) + def add_large_scale_op( + block, + global_block, + table_name, + value_names, + acture_names, + grad, + is_entry, + opt_idx, + ): + ids = global_block.create_var( + name="kSparseIDs@{}".format(table_name), + persistable=False, + dtype="int64", + shape=[1, 1], + lod_level=0, + ) # insert grad split to ids and tensor op - block._insert_op(opt_idx, - type="lookup_sparse_table_grad_split", - inputs={"Grad": grad}, - outputs={ - "Row": ids, - "Value": grad - }, - attrs={ - "tablename": table_name, - "is_entry": is_entry - }) + block._insert_op( + opt_idx, + type="lookup_sparse_table_grad_split", + inputs={"Grad": grad}, + outputs={"Row": ids, "Value": grad}, + attrs={"tablename": table_name, "is_entry": is_entry}, + ) # insert read at first vars = [global_block.vars[acture_name] for acture_name in acture_names] - block._insert_op(opt_idx + 1, - type="lookup_sparse_table_read", - inputs={"Ids": ids}, - outputs={"Out": vars}, - attrs={ - "tablename": table_name, - "value_names": value_names - }) + block._insert_op( + opt_idx + 1, + type="lookup_sparse_table_read", + inputs={"Ids": ids}, + outputs={"Out": vars}, + attrs={"tablename": table_name, "value_names": value_names}, + ) # append write at last inputs = {"Ids": ids, "In": vars} - block.append_op(type="lookup_sparse_table_write", - inputs=inputs, - outputs={}, - attrs={ - "tablename": table_name, - "value_names": value_names - }) + block.append_op( + type="lookup_sparse_table_write", + inputs=inputs, + outputs={}, + attrs={"tablename": table_name, "value_names": value_names}, + ) op = get_op_by_type(main_program.global_block(), "listen_and_serv") @@ -775,27 +858,53 @@ def large_scale_sparse_pass(program, main_program, config, is_startup=False): for param, blockid in param_blockid_map.items(): opt_block = program.block(blockid) - grad, opt_idx, value_names, value_dims, acture_names, fuse = \ - get_optimizer_values(opt_block) + ( + grad, + opt_idx, + value_names, + value_dims, + acture_names, + fuse, + ) = get_optimizer_values(opt_block) entry_attr = get_entry_attr(param) is_entry = False if entry_attr == "none" else True if fuse: - add_fuse_large_scale_op(opt_block, program.global_block(), - param, value_names, acture_names, grad, - is_entry, opt_idx) + add_fuse_large_scale_op( + opt_block, + program.global_block(), + param, + value_names, + acture_names, + grad, + is_entry, + opt_idx, + ) else: - add_large_scale_op(opt_block, program.global_block(), param, - value_names, acture_names, grad, is_entry, - opt_idx) + add_large_scale_op( + opt_block, + program.global_block(), + param, + value_names, + acture_names, + grad, + is_entry, + opt_idx, + ) else: large_scale_kv_metas = [] for param, blockid in param_blockid_map.items(): opt_block = main_program.block(blockid) - grad, opt_idx, value_names, value_dims, acture_names, fuse = \ - get_optimizer_values(opt_block) + ( + grad, + opt_idx, + value_names, + value_dims, + acture_names, + fuse, + ) = get_optimizer_values(opt_block) entry_attr = get_entry_attr(param) @@ -811,10 +920,18 @@ def large_scale_sparse_pass(program, main_program, config, is_startup=False): cached_str = ",".join(acture_names + [ids_name]) init_attr_str = get_initializer_attrs(acture_names) - meta_str = ":".join([ - param, names_str, dims_str, mode, grad.name, cached_str, - init_attr_str, entry_attr - ]) + meta_str = ":".join( + [ + param, + names_str, + dims_str, + mode, + grad.name, + cached_str, + init_attr_str, + entry_attr, + ] + ) print("large_scale_metas: {}".format(meta_str)) large_scale_kv_metas.append(meta_str) @@ -822,7 +939,8 @@ def large_scale_sparse_pass(program, main_program, config, is_startup=False): type="lookup_sparse_table_init", inputs=None, outputs=None, - attrs={"large_scale_metas": large_scale_kv_metas}) + attrs={"large_scale_metas": large_scale_kv_metas}, + ) # todo: need delete unused var. return program @@ -843,7 +961,8 @@ def get_distributed_from_listen_and_serv(program, origin_program): def delete_unused_in_main_pass(program, config): origin_program = config.get_origin_main_program() sparse_params = get_distributed_from_listen_and_serv( - program, origin_program) + program, origin_program + ) for var in sparse_params: if program.global_block().has_var(var): @@ -854,7 +973,8 @@ def delete_unused_in_main_pass(program, config): def delete_unused_in_startup_pass(program, main_program, config): origin_program = config.get_origin_main_program() sparse_params = get_distributed_from_listen_and_serv( - main_program, origin_program) + main_program, origin_program + ) remove_ops = [] for op in program.global_block().ops: @@ -937,15 +1057,19 @@ def build_pserver_startup_program_pass(program, p_main_program, config): new_inputs = _get_input_map_from_op(pserver_vars, op) if op.type in [ - "gaussian_random", "fill_constant", "uniform_random", - "truncated_gaussian_random" + "gaussian_random", + "fill_constant", + "uniform_random", + "truncated_gaussian_random", ]: op._set_attr("shape", list(new_outputs["Out"].shape)) - program.global_block().append_op(type=op.type, - inputs=new_inputs, - outputs=new_outputs, - attrs=op.all_attrs()) + program.global_block().append_op( + type=op.type, + inputs=new_inputs, + outputs=new_outputs, + attrs=op.all_attrs(), + ) return program @@ -954,8 +1078,9 @@ def add_geo_optimizer_pass(program, config): endpoint = config.get_ps_endpoint() params = [p for p in config.param_grad_ep_mapping[endpoint]["params"]] - sparse_tablenames = get_sparse_tablenames(config.get_origin_main_program(), - False) + sparse_tablenames = get_sparse_tablenames( + config.get_origin_main_program(), False + ) for param in params: _clone_var(program.global_block(), param) @@ -978,15 +1103,17 @@ def add_geo_optimizer_pass(program, config): if origin_varname in sparse_tablenames: sparse_grad_to_param.append(":".join([delta_var_name, param.name])) - delta_var = pserver_block.create_var(name=delta_var_name, - persistable=False, - type=param.type, - dtype=param.dtype, - shape=param.shape) - - per_opt_block.append_op(type="sum", - inputs={"X": [param, delta_var]}, - outputs={"Out": param}) + delta_var = pserver_block.create_var( + name=delta_var_name, + persistable=False, + type=param.type, + dtype=param.dtype, + shape=param.shape, + ) + + per_opt_block.append_op( + type="sum", inputs={"X": [param, delta_var]}, outputs={"Out": param} + ) param_to_block_id.append(delta_var_name + ":" + str(per_opt_block.idx)) diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/ir/public.py b/python/paddle/fluid/incubate/fleet/parameter_server/ir/public.py index 1778ace9c39b5bf6ebe9f7dd8b12eff02200337c..9b762bb9ee7f865f842f2d78327d0cf2e2252b1d 100755 --- a/python/paddle/fluid/incubate/fleet/parameter_server/ir/public.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/ir/public.py @@ -26,7 +26,10 @@ from paddle.fluid.core import CommContext import paddle.fluid.framework as framework from paddle.fluid.incubate.fleet.parameter_server.mode import DistributedMode from paddle.fluid.incubate.fleet.parameter_server.ir import vars_metatools -from paddle.fluid.incubate.fleet.parameter_server.ir.ps_dispatcher import RoundRobin, PSDispatcher +from paddle.fluid.incubate.fleet.parameter_server.ir.ps_dispatcher import ( + RoundRobin, + PSDispatcher, +) from paddle.fluid.transpiler.details.program_utils import delete_ops OP_NAME_SCOPE = "op_namescope" @@ -49,9 +52,9 @@ def _get_lr_ops(program): lr_ops = [] for index, op in enumerate(program.global_block().ops): role_id = int(op.attr(RPC_OP_ROLE_ATTR_NAME)) - if role_id == int(LR_SCHED_OP_ROLE_ATTR_VALUE) or \ - role_id == int(LR_SCHED_OP_ROLE_ATTR_VALUE) | \ - int(OPT_OP_ROLE_ATTR_VALUE): + if role_id == int(LR_SCHED_OP_ROLE_ATTR_VALUE) or role_id == int( + LR_SCHED_OP_ROLE_ATTR_VALUE + ) | int(OPT_OP_ROLE_ATTR_VALUE): lr_ops.append(op) return lr_ops @@ -68,12 +71,17 @@ def _has_global_step(lr_ops): def is_sparse_op(op): - if op.type in SPARSE_OP_LIST and op.attr('is_sparse') is True and op.attr( - 'is_distributed') is False: + if ( + op.type in SPARSE_OP_LIST + and op.attr('is_sparse') is True + and op.attr('is_distributed') is False + ): return True - if op.type == "distributed_lookup_table" and op.attr( - 'is_distributed') is False: + if ( + op.type == "distributed_lookup_table" + and op.attr('is_distributed') is False + ): return True return False @@ -83,8 +91,10 @@ def is_distributed_sparse_op(op): if op.type in SPARSE_OP_LIST and op.attr('is_distributed') is True: return True - if op.type == "distributed_lookup_table" and op.attr( - 'is_distributed') is True: + if ( + op.type == "distributed_lookup_table" + and op.attr('is_distributed') is True + ): return True return False @@ -108,7 +118,6 @@ def get_sparse_tablenames(program, is_distributed): class MergedVariable: - def __init__(self, merged, ordered, offsets): self.merged_var = merged self.ordered_vars = ordered @@ -128,7 +137,6 @@ def Singleton(cls): @Singleton class CompileTimeStrategy(object): - def __init__(self, main_program, startup_program, strategy, role_maker): self.min_block_size = 81920 @@ -268,20 +276,24 @@ class CompileTimeStrategy(object): def get_origin_ps_startup_program(self): return self.origin_ps_startup_program - def add_tensor_table(self, - feed_var_name, - fetch_var_name="", - startup_program=None, - main_program=None, - tensor_table_class=""): + def add_tensor_table( + self, + feed_var_name, + fetch_var_name="", + startup_program=None, + main_program=None, + tensor_table_class="", + ): self.tensor_table_dict[feed_var_name] = {} self.tensor_table_dict[feed_var_name]["feed_var_name"] = feed_var_name self.tensor_table_dict[feed_var_name]["fetch_var_name"] = fetch_var_name self.tensor_table_dict[feed_var_name][ - "startup_program"] = startup_program + "startup_program" + ] = startup_program self.tensor_table_dict[feed_var_name]["main_program"] = main_program self.tensor_table_dict[feed_var_name][ - "tensor_table_class"] = tensor_table_class + "tensor_table_class" + ] = tensor_table_class def get_tensor_table_dict(self): return self.tensor_table_dict @@ -289,8 +301,9 @@ class CompileTimeStrategy(object): def get_sparse_varname_on_ps(self, is_distributed, endpoint=None): if not endpoint: endpoint = self.get_ps_endpoint() - varnames = get_sparse_tablenames(self.get_origin_main_program(), - is_distributed) + varnames = get_sparse_tablenames( + self.get_origin_main_program(), is_distributed + ) ps_sparse_varnames = [] for varname in varnames: @@ -313,13 +326,19 @@ class CompileTimeStrategy(object): # check all input for key in op.input_names: if key in [ - "Param", "Grad", "LearningRate", "Beta1Tensor", - "Beta2Tensor" + "Param", + "Grad", + "LearningRate", + "Beta1Tensor", + "Beta2Tensor", ]: continue # check varibale shape related param, e.g: Moment1 - optimize_var_names += self._get_optimizer_param_related_var_name( - op, op.type, key) + optimize_var_names += ( + self._get_optimizer_param_related_var_name( + op, op.type, key + ) + ) return optimize_var_names def _get_optimizer_param_related_var_name(self, op, op_type, varkey): @@ -346,18 +365,13 @@ class CompileTimeStrategy(object): pass else: raise ValueError( - "Not supported optimizer for distributed training: %s" % - op_type) + "Not supported optimizer for distributed training: %s" % op_type + ) return related_var_names - def build_ctx(self, - vars, - mapping, - is_grad, - is_sparse, - is_send, - is_distributed=False): - + def build_ctx( + self, vars, mapping, is_grad, is_sparse, is_send, is_distributed=False + ): def get_grad_var_ep(slices): names = [] eps = [] @@ -369,10 +383,12 @@ class CompileTimeStrategy(object): names.append("{}.delta".format(slice.name)) else: names.append(slice.name) - elif is_grad and self.is_sync_mode( - ) and self.get_trainers() > 1: - names.append("{}.trainer_{}".format(slice.name, - self.get_role_id())) + elif ( + is_grad and self.is_sync_mode() and self.get_trainers() > 1 + ): + names.append( + "{}.trainer_{}".format(slice.name, self.get_role_id()) + ) else: names.append(slice.name) @@ -400,21 +416,33 @@ class CompileTimeStrategy(object): trainer_id = self.get_role_id() aggregate = True - ctx = CommContext(name, names, eps, sections, origin_varnames, - trainer_id, aggregate, is_sparse, is_distributed, []) + ctx = CommContext( + name, + names, + eps, + sections, + origin_varnames, + trainer_id, + aggregate, + is_sparse, + is_distributed, + [], + ) return ctx def get_trainer_send_context(self): send_ctx = {} - distibuted_varnames = get_sparse_tablenames(self.origin_main_program, - True) + distibuted_varnames = get_sparse_tablenames( + self.origin_main_program, True + ) idx = 0 if not self.is_geo_mode(): for merged in self.merged_dense_pairs: grad = merged[1] - ctx = self.build_ctx(grad, self.grad_var_mapping, True, False, - True) + ctx = self.build_ctx( + grad, self.grad_var_mapping, True, False, True + ) send_ctx[ctx.var_name()] = ctx for merged in self.merged_sparse_pairs: @@ -423,10 +451,18 @@ class CompileTimeStrategy(object): param_name = param.merged_var.name - is_distributed = True if param_name in distibuted_varnames else False - - ctx = self.build_ctx(grad, self.grad_var_mapping, True, True, - True, is_distributed) + is_distributed = ( + True if param_name in distibuted_varnames else False + ) + + ctx = self.build_ctx( + grad, + self.grad_var_mapping, + True, + True, + True, + is_distributed, + ) send_ctx[ctx.var_name()] = ctx idx += 1 @@ -437,21 +473,39 @@ class CompileTimeStrategy(object): for pairs in self.origin_sparse_pairs: param, grad = pairs param_name = param.name - is_distributed = True if param_name in distibuted_varnames else False - - param_ctx = self.build_ctx(param, self.param_var_mapping, False, - True, True, is_distributed) - grad_ctx = self.build_ctx(grad, self.grad_var_mapping, True, - True, True, is_distributed) - - ctx = CommContext(param_ctx.var_name(), - param_ctx.split_varnames(), - param_ctx.split_endpoints(), - param_ctx.sections(), - grad_ctx.origin_varnames(), - param_ctx.trainer_id(), param_ctx.aggregate(), - param_ctx.is_sparse(), - param_ctx.is_distributed(), []) + is_distributed = ( + True if param_name in distibuted_varnames else False + ) + + param_ctx = self.build_ctx( + param, + self.param_var_mapping, + False, + True, + True, + is_distributed, + ) + grad_ctx = self.build_ctx( + grad, + self.grad_var_mapping, + True, + True, + True, + is_distributed, + ) + + ctx = CommContext( + param_ctx.var_name(), + param_ctx.split_varnames(), + param_ctx.split_endpoints(), + param_ctx.sections(), + grad_ctx.origin_varnames(), + param_ctx.trainer_id(), + param_ctx.aggregate(), + param_ctx.is_sparse(), + param_ctx.is_distributed(), + [], + ) send_ctx[ctx.var_name()] = ctx idx += 1 @@ -461,24 +515,34 @@ class CompileTimeStrategy(object): def get_communicator_send_context(self): send_ctx = {} - distibuted_varnames = get_sparse_tablenames(self.origin_main_program, - True) + distibuted_varnames = get_sparse_tablenames( + self.origin_main_program, True + ) idx = 0 if self.is_geo_mode(): for pairs in self.merged_dense_pairs: param = pairs[0] - ctx = self.build_ctx(param, self.param_var_mapping, False, - False, True) + ctx = self.build_ctx( + param, self.param_var_mapping, False, False, True + ) send_ctx[ctx.var_name()] = ctx for pairs in self.merged_sparse_pairs: param = pairs[0] param_name = param.merged_var.name - is_distributed = True if param_name in distibuted_varnames else False - - ctx = self.build_ctx(param, self.param_var_mapping, False, True, - True, is_distributed) + is_distributed = ( + True if param_name in distibuted_varnames else False + ) + + ctx = self.build_ctx( + param, + self.param_var_mapping, + False, + True, + True, + is_distributed, + ) send_ctx[ctx.var_name()] = ctx idx += 1 name, ctx = self._step_ctx(idx) @@ -486,18 +550,27 @@ class CompileTimeStrategy(object): else: for merged in self.merged_dense_pairs: grad = merged[1] - ctx = self.build_ctx(grad, self.grad_var_mapping, True, False, - True) + ctx = self.build_ctx( + grad, self.grad_var_mapping, True, False, True + ) send_ctx[ctx.var_name()] = ctx for merged in self.merged_sparse_pairs: param, grad = merged param_name = param.merged_var.name - is_distributed = True if param_name in distibuted_varnames else False - - ctx = self.build_ctx(grad, self.grad_var_mapping, True, True, - True, is_distributed) + is_distributed = ( + True if param_name in distibuted_varnames else False + ) + + ctx = self.build_ctx( + grad, + self.grad_var_mapping, + True, + True, + True, + is_distributed, + ) send_ctx[ctx.var_name()] = ctx idx += 1 @@ -505,13 +578,14 @@ class CompileTimeStrategy(object): send_ctx[name] = ctx return send_ctx - def get_communicator_recv_context(self, - recv_type=1, - use_origin_program=False): + def get_communicator_recv_context( + self, recv_type=1, use_origin_program=False + ): # recv_type # 1 : DENSE 2. SPARSE 3. DISTRIBUTED 4. ALL - distibuted_varnames = get_sparse_tablenames(self.origin_main_program, - True) + distibuted_varnames = get_sparse_tablenames( + self.origin_main_program, True + ) sparse_varnames = [] for pairs in self.origin_sparse_pairs: param, grad = pairs @@ -521,26 +595,33 @@ class CompileTimeStrategy(object): sparse_recv_ctx = {} distributed_recv_ctx = {} - variables_pairs = self.merged_variables_pairs if not use_origin_program else self.origin_merged_variables_pairs + variables_pairs = ( + self.merged_variables_pairs + if not use_origin_program + else self.origin_merged_variables_pairs + ) for merged in variables_pairs: params = merged[0] if params.merged_var.name in sparse_varnames: continue - ctx = self.build_ctx(params, self.param_var_mapping, False, False, - False, False) + ctx = self.build_ctx( + params, self.param_var_mapping, False, False, False, False + ) dense_recv_ctx[ctx.var_name()] = ctx for pairs in self.origin_sparse_pairs: param, grad = pairs if param.name in distibuted_varnames: - ctx = self.build_ctx(param, self.param_var_mapping, False, True, - False, True) + ctx = self.build_ctx( + param, self.param_var_mapping, False, True, False, True + ) distributed_recv_ctx[ctx.var_name()] = ctx else: - ctx = self.build_ctx(param, self.param_var_mapping, False, True, - False, False) + ctx = self.build_ctx( + param, self.param_var_mapping, False, True, False, False + ) sparse_recv_ctx[ctx.var_name()] = ctx if recv_type == 1: @@ -564,28 +645,44 @@ class CompileTimeStrategy(object): idx = 0 distibuted_varnames = get_sparse_tablenames( - self.origin_main_program, True) + self.origin_main_program, True + ) for merged in self.merged_sparse_pairs: param, grad = merged grad_name = grad.merged_var.name param_name = param.merged_var.name - is_distributed = True if param_name in distibuted_varnames else False + is_distributed = ( + True if param_name in distibuted_varnames else False + ) var = self.origin_main_program.global_block().vars[ - grad.merged_var.name] + grad.merged_var.name + ] var_numel = reduce(lambda x, y: x * y, var.shape[1:]) - sparse_ctx = CommContext(grad_name, [grad_name], - ["127.0.0.1:6071"], [var_numel], - [grad_name], trainer_id, True, True, - is_distributed, idx, False, False, -1, - []) + sparse_ctx = CommContext( + grad_name, + [grad_name], + ["127.0.0.1:6071"], + [var_numel], + [grad_name], + trainer_id, + True, + True, + is_distributed, + idx, + False, + False, + -1, + [], + ) idx += 1 send_ctx[sparse_ctx.var_name()] = sparse_ctx if len(send_ctx) == 0: raise ValueError( - "GeoSGD require sparse parameters in your net.") + "GeoSGD require sparse parameters in your net." + ) if len(self.tensor_table_dict) > 0 and self.role_maker._is_worker(): name, ctx = self._step_ctx(idx) @@ -595,12 +692,14 @@ class CompileTimeStrategy(object): else: return self.get_the_one_send_context(split_dense_table) - def get_dense_send_context(self, - send_ctx, - idx, - merged_dense_pairs, - trainer_id, - split_dense_table=False): + def get_dense_send_context( + self, + send_ctx, + idx, + merged_dense_pairs, + trainer_id, + split_dense_table=False, + ): if len(merged_dense_pairs) < 1: return idx if not split_dense_table: @@ -610,52 +709,87 @@ class CompileTimeStrategy(object): grad = merged[1] origin_varnames.append(grad.merged_var.name) var = self.origin_main_program.global_block().vars[ - grad.merged_var.name] + grad.merged_var.name + ] var_numel += reduce(lambda x, y: x * y, var.shape) grad_name = "Dense@Grad" trainer_id = self.get_role_id() aggregate = True - dense_ctx = CommContext(grad_name, [grad_name], ["127.0.0.1:6071"], - [var_numel], origin_varnames, trainer_id, - aggregate, False, False, idx, False, False, - -1, []) + dense_ctx = CommContext( + grad_name, + [grad_name], + ["127.0.0.1:6071"], + [var_numel], + origin_varnames, + trainer_id, + aggregate, + False, + False, + idx, + False, + False, + -1, + [], + ) send_ctx[grad_name] = dense_ctx idx += 1 else: for merged in merged_dense_pairs: grad = merged[1] origin_varname = grad.merged_var.name - var = self.origin_main_program.global_block( - ).vars[origin_varname] + var = self.origin_main_program.global_block().vars[ + origin_varname + ] var_numel = reduce(lambda x, y: x * y, var.shape) grad_name = origin_varname aggregate = True - dense_ctx = CommContext(grad_name, [grad_name], - ["127.0.0.1:6071"], [var_numel], - [origin_varname], trainer_id, aggregate, - False, False, idx, False, False, -1, []) + dense_ctx = CommContext( + grad_name, + [grad_name], + ["127.0.0.1:6071"], + [var_numel], + [origin_varname], + trainer_id, + aggregate, + False, + False, + idx, + False, + False, + -1, + [], + ) send_ctx[grad_name] = dense_ctx idx += 1 return idx - def get_the_one_send_context(self, - split_dense_table=False, - use_origin_program=False, - ep_list=None): + def get_the_one_send_context( + self, split_dense_table=False, use_origin_program=False, ep_list=None + ): if ep_list is None: ep_list = ["127.0.0.1:6071"] send_ctx = {} trainer_id = self.get_role_id() idx = 0 - merged_dense_pairs = self.origin_merged_dense_pairs if use_origin_program else self.merged_dense_pairs - merged_sparse_pairs = self.origin_merged_sparse_pairs if use_origin_program else self.merged_sparse_pairs + merged_dense_pairs = ( + self.origin_merged_dense_pairs + if use_origin_program + else self.merged_dense_pairs + ) + merged_sparse_pairs = ( + self.origin_merged_sparse_pairs + if use_origin_program + else self.merged_sparse_pairs + ) - idx += self.get_dense_send_context(send_ctx, idx, merged_dense_pairs, - trainer_id, split_dense_table) + idx += self.get_dense_send_context( + send_ctx, idx, merged_dense_pairs, trainer_id, split_dense_table + ) - distibuted_varnames = get_sparse_tablenames(self.origin_main_program, - True) + distibuted_varnames = get_sparse_tablenames( + self.origin_main_program, True + ) for merged in merged_sparse_pairs: param, grad = merged grad_name = grad.merged_var.name @@ -665,17 +799,33 @@ class CompileTimeStrategy(object): for i in range(len(ep_list)): splited_varname.append("{}.block{}".format(param_name, i)) - is_distributed = True if param_name in distibuted_varnames else False + is_distributed = ( + True if param_name in distibuted_varnames else False + ) var = self.origin_main_program.global_block().vars[ - grad.merged_var.name] + grad.merged_var.name + ] shape = list(var.shape) shape[0] = 0 if is_distributed else shape[0] - sparse_ctx = CommContext(grad_name, splited_varname, ep_list, shape, - [grad_name], trainer_id, True, True, - is_distributed, idx, False, False, -1, []) + sparse_ctx = CommContext( + grad_name, + splited_varname, + ep_list, + shape, + [grad_name], + trainer_id, + True, + True, + is_distributed, + idx, + False, + False, + -1, + [], + ) idx += 1 send_ctx[sparse_ctx.var_name()] = sparse_ctx @@ -686,15 +836,15 @@ class CompileTimeStrategy(object): return send_ctx - def get_the_one_recv_context(self, - is_dense=True, - split_dense_table=False, - use_origin_program=False): + def get_the_one_recv_context( + self, is_dense=True, split_dense_table=False, use_origin_program=False + ): recv_id_maps = {} if is_dense: send_ctx = self.get_the_one_send_context( split_dense_table=split_dense_table, - use_origin_program=use_origin_program) + use_origin_program=use_origin_program, + ) for idx, (name, ctx) in enumerate(send_ctx.items()): if ctx.is_sparse(): continue @@ -752,8 +902,22 @@ class CompileTimeStrategy(object): endpoints = self.get_ps_endpoints() sections = [1] * len(endpoints) names = [name] * len(endpoints) - ctx = CommContext(name, names, endpoints, sections, [name], trainer_id, - True, False, False, idx, True, False, -1, []) + ctx = CommContext( + name, + names, + endpoints, + sections, + [name], + trainer_id, + True, + False, + False, + idx, + True, + False, + -1, + [], + ) return name, ctx def _create_vars_from_blocklist(self, block_list): @@ -784,20 +948,23 @@ class CompileTimeStrategy(object): if len(split) == 1: var_mapping[varname] = [orig_var] - self.var_distributed.add_distributed_var(origin_var=orig_var, - slice_var=orig_var, - block_id=0, - offset=0, - is_slice=False, - vtype="Param") + self.var_distributed.add_distributed_var( + origin_var=orig_var, + slice_var=orig_var, + block_id=0, + offset=0, + is_slice=False, + vtype="Param", + ) else: var_mapping[varname] = [] orig_shape = orig_var.shape orig_dim1_flatten = 1 if len(orig_shape) >= 2: - orig_dim1_flatten = reduce(lambda x, y: x * y, - orig_shape[1:]) + orig_dim1_flatten = reduce( + lambda x, y: x * y, orig_shape[1:] + ) for i, block in enumerate(split): size = block[1] @@ -813,7 +980,8 @@ class CompileTimeStrategy(object): dtype=orig_var.dtype, type=orig_var.type, lod_level=orig_var.lod_level, - persistable=False) + persistable=False, + ) var_mapping[varname].append(slice_var) self.var_distributed.add_distributed_var( @@ -822,7 +990,8 @@ class CompileTimeStrategy(object): block_id=i, offset=-1, is_slice=False, - vtype="Param") + vtype="Param", + ) return var_mapping @@ -871,11 +1040,9 @@ class CompileTimeStrategy(object): self.param_grad_ep_mapping[ep]["params"].append(recv_vars[i]) self.param_grad_ep_mapping[ep]["grads"].append(send_vars[i]) - def _slice_variable(self, - var_list, - slice_count, - min_block_size, - uniform=False): + def _slice_variable( + self, var_list, slice_count, min_block_size, uniform=False + ): """ We may need to split dense tensor to one or more blocks and put them equally onto parameter server. One block is a sub-tensor @@ -906,7 +1073,8 @@ class CompileTimeStrategy(object): else: split_count = slice_count max_pserver_count = int( - math.floor(var_numel / float(min_block_size))) + math.floor(var_numel / float(min_block_size)) + ) if max_pserver_count == 0: max_pserver_count = 1 if max_pserver_count < slice_count: @@ -922,10 +1090,12 @@ class CompileTimeStrategy(object): # update split_count after aligning split_count = int(math.ceil(var_numel / float(block_size))) for block_id in range(split_count): - curr_block_size = min(block_size, - var_numel - ((block_id) * block_size)) - block = vars_metatools.VarBlock(var.name, block_id, - curr_block_size) + curr_block_size = min( + block_size, var_numel - ((block_id) * block_size) + ) + block = vars_metatools.VarBlock( + var.name, block_id, curr_block_size + ) blocks.append(str(block)) else: block_size = var.shape[0] / slice_count @@ -966,13 +1136,13 @@ class CompileTimeStrategy(object): # when we slice var up into blocks, we will slice the var according to # pserver services' count. A pserver may have two or more listening ports. - grad_blocks = self._slice_variable(grad_list, - len(self.get_ps_endpoints()), - min_block_size, uniform) + grad_blocks = self._slice_variable( + grad_list, len(self.get_ps_endpoints()), min_block_size, uniform + ) - param_blocks = self._slice_variable(param_list, - len(self.get_ps_endpoints()), - min_block_size, uniform) + param_blocks = self._slice_variable( + param_list, len(self.get_ps_endpoints()), min_block_size, uniform + ) return param_blocks, grad_blocks def _var_slice_and_distribute(self): @@ -982,15 +1152,17 @@ class CompileTimeStrategy(object): # 3. grad_param_mapping : grad.blockx->param.blockx # 4. param_grad_ep_mapping : ep->{"params" : [], "grads" : [] } - dps, dgs = self._get_param_grad_blocks(self.merged_dense_pairs, - self.min_block_size, False) - sps, sgs = self._get_param_grad_blocks(self.merged_sparse_pairs, - self.min_block_size, True) + dps, dgs = self._get_param_grad_blocks( + self.merged_dense_pairs, self.min_block_size, False + ) + sps, sgs = self._get_param_grad_blocks( + self.merged_sparse_pairs, self.min_block_size, True + ) param_blocks = dps + sps grad_blocks = dgs + sgs - assert (len(grad_blocks) == len(param_blocks)) + assert len(grad_blocks) == len(param_blocks) # origin_param_name->[splited_param_vars] self.param_var_mapping = self._create_vars_from_blocklist(param_blocks) @@ -1001,8 +1173,9 @@ class CompileTimeStrategy(object): for g, p in zip(grad_blocks, param_blocks): g_name, g_bid, _ = g.split(":") p_name, p_bid, _ = p.split(":") - self.grad_param_mapping[self.grad_var_mapping[g_name][int(g_bid)]] = \ - self.param_var_mapping[p_name][int(p_bid)] + self.grad_param_mapping[ + self.grad_var_mapping[g_name][int(g_bid)] + ] = self.param_var_mapping[p_name][int(p_bid)] print_maps = {} for k, v in self.grad_param_mapping.items(): @@ -1011,10 +1184,8 @@ class CompileTimeStrategy(object): # create mapping of endpoint->split var to create pserver side program self.param_grad_ep_mapping = collections.OrderedDict() [ - self.param_grad_ep_mapping.update({ep: { - "params": [], - "grads": [] - }}) for ep in self.get_ps_endpoints() + self.param_grad_ep_mapping.update({ep: {"params": [], "grads": []}}) + for ep in self.get_ps_endpoints() ] def _build_var_distributed(self): @@ -1055,7 +1226,8 @@ class CompileTimeStrategy(object): for merged in self.merged_variables_pairs: m_param, m_grad = merged self.merged_variable_map[ - m_param.merged_var.name] = m_param.merged_var + m_param.merged_var.name + ] = m_param.merged_var self.merged_variable_map[m_grad.merged_var.name] = m_grad.merged_var param_merges = [] @@ -1096,8 +1268,10 @@ class CompileTimeStrategy(object): for op in block.ops: if _is_opt_role_op(op): # delete clip op from opt_ops when run in Parameter Server mode - if OP_NAME_SCOPE in op.all_attrs() \ - and CLIP_OP_NAME_SCOPE in op.attr(OP_NAME_SCOPE): + if ( + OP_NAME_SCOPE in op.all_attrs() + and CLIP_OP_NAME_SCOPE in op.attr(OP_NAME_SCOPE) + ): op._set_attr("op_role", role_id) continue if op.attr(OP_ROLE_VAR_ATTR_NAME): @@ -1105,8 +1279,10 @@ class CompileTimeStrategy(object): grad_name = op.attr(OP_ROLE_VAR_ATTR_NAME)[1] if param_name not in optimize_params: optimize_params.add(param_name) - param_grad = (origin_var_dict[param_name], - origin_var_dict[grad_name]) + param_grad = ( + origin_var_dict[param_name], + origin_var_dict[grad_name], + ) if param_name in sparse_varnames: sparse_param_grads.append(param_grad) @@ -1117,8 +1293,10 @@ class CompileTimeStrategy(object): def _get_sparse_varnames(): varnames = [] for op in origin_program.global_block().ops: - if op.type in SPARSE_OP_TYPE_DICT.keys() \ - and op.attr('remote_prefetch') is True: + if ( + op.type in SPARSE_OP_TYPE_DICT.keys() + and op.attr('remote_prefetch') is True + ): param_name = op.input(SPARSE_OP_TYPE_DICT[op.type])[0] varnames.append(param_name) @@ -1126,7 +1304,8 @@ class CompileTimeStrategy(object): sparse_varnames = _get_sparse_varnames() sparse_param_grads, dense_param_grads = _get_params_grads( - sparse_varnames) + sparse_varnames + ) return sparse_param_grads, dense_param_grads @@ -1160,8 +1339,9 @@ def _is_opt_role_op(op): # optimize op_maker = core.op_proto_and_checker_maker optimize_role = core.op_proto_and_checker_maker.OpRole.Optimize - if op_maker.kOpRoleAttrName() in op.attr_names and \ - int(op.all_attrs()[op_maker.kOpRoleAttrName()]) == int(optimize_role): + if op_maker.kOpRoleAttrName() in op.attr_names and int( + op.all_attrs()[op_maker.kOpRoleAttrName()] + ) == int(optimize_role): return True return False @@ -1172,11 +1352,14 @@ def _get_optimize_ops(_program): for op in block.ops: if _is_opt_role_op(op): # delete clip op from opt_ops when run in Parameter Server mode - if OP_NAME_SCOPE in op.all_attrs() \ - and CLIP_OP_NAME_SCOPE in op.attr(OP_NAME_SCOPE): + if ( + OP_NAME_SCOPE in op.all_attrs() + and CLIP_OP_NAME_SCOPE in op.attr(OP_NAME_SCOPE) + ): op._set_attr( "op_role", - int(core.op_proto_and_checker_maker.OpRole.Backward)) + int(core.op_proto_and_checker_maker.OpRole.Backward), + ) continue opt_ops.append(op) return opt_ops @@ -1185,17 +1368,28 @@ def _get_optimize_ops(_program): def _add_lr_decay_table_pass(main_program, compiled_config, lr_decay_steps): if hasattr(compiled_config.origin_main_program, 'lr_sheduler'): from paddle.optimizer.lr import LRScheduler - assert isinstance(compiled_config.origin_main_program.lr_sheduler, - LRScheduler), "must be LRScheduler" + + assert isinstance( + compiled_config.origin_main_program.lr_sheduler, LRScheduler + ), "must be LRScheduler" ops = _get_optimize_ops(compiled_config.origin_main_program) lr_param_dict = _get_lr_param_dict(ops) - lr_decay_main_program, lr_decay_startup_program, lr_name = _get_lr_sheduler_program( - compiled_config.origin_main_program.lr_sheduler, lr_param_dict, - lr_decay_steps) - compiled_config.add_tensor_table("@LR_DECAY_COUNTER@", lr_name, - lr_decay_startup_program, - lr_decay_main_program, - "GlobalStepTable") + ( + lr_decay_main_program, + lr_decay_startup_program, + lr_name, + ) = _get_lr_sheduler_program( + compiled_config.origin_main_program.lr_sheduler, + lr_param_dict, + lr_decay_steps, + ) + compiled_config.add_tensor_table( + "@LR_DECAY_COUNTER@", + lr_name, + lr_decay_startup_program, + lr_decay_main_program, + "GlobalStepTable", + ) def _get_lr_param_dict(opt_ops): @@ -1211,11 +1405,26 @@ def _get_lr_param_dict(opt_ops): def _get_lr_sheduler_program(lr_sheduler, lr_param_dict, lr_decay_steps): schedler_decay = [ - 'NoamDecay', 'NaturalExpDecay', 'InverseTimeDecay', 'ExponentialDecay' + 'NoamDecay', + 'NaturalExpDecay', + 'InverseTimeDecay', + 'ExponentialDecay', ] - from paddle.optimizer.lr import ExponentialDecay, NoamDecay, PiecewiseDecay, NaturalExpDecay, InverseTimeDecay - from paddle.fluid.layers.learning_rate_scheduler import exponential_decay, noam_decay, piecewise_decay, natural_exp_decay, inverse_time_decay + from paddle.optimizer.lr import ( + ExponentialDecay, + NoamDecay, + PiecewiseDecay, + NaturalExpDecay, + InverseTimeDecay, + ) + from paddle.fluid.layers.learning_rate_scheduler import ( + exponential_decay, + noam_decay, + piecewise_decay, + natural_exp_decay, + inverse_time_decay, + ) decay_main_program = fluid.framework.Program() decay_startup_program = fluid.framework.Program() @@ -1230,13 +1439,16 @@ def _get_lr_sheduler_program(lr_sheduler, lr_param_dict, lr_decay_steps): "\t strategy = paddle.distributed.fleet.DistributedStrategy() \n " "\t strategy.a_sync = True \n" "\t strategy.a_sync_configs= { 'lr_decay_steps' : YOUR_DECAY_STEP } \n" - % lr_decay_steps) + % lr_decay_steps + ) elif isinstance(lr_sheduler, NoamDecay): with fluid.program_guard(decay_main_program, decay_startup_program): lr = noam_decay(lr_sheduler.d_model, lr_sheduler.warmup_steps, 1.0) lr_name = lr.name - logging.warn("NoamDecay is set, warmup steps is [ %d ]" % - lr_sheduler.warmup_steps) + logging.warn( + "NoamDecay is set, warmup steps is [ %d ]" + % lr_sheduler.warmup_steps + ) elif isinstance(lr_sheduler, NaturalExpDecay): with fluid.program_guard(decay_main_program, decay_startup_program): lr = natural_exp_decay(1.0, lr_decay_steps, lr_sheduler.gamma, True) @@ -1246,22 +1458,27 @@ def _get_lr_sheduler_program(lr_sheduler, lr_param_dict, lr_decay_steps): "\t strategy = paddle.distributed.fleet.DistributedStrategy() \n " "\t strategy.a_sync = True \n" "\t strategy.a_sync_configs= { 'lr_decay_steps' : YOUR_DECAY_STEP } \n" - % lr_decay_steps) + % lr_decay_steps + ) elif isinstance(lr_sheduler, InverseTimeDecay): with fluid.program_guard(decay_main_program, decay_startup_program): - lr = inverse_time_decay(1.0, lr_decay_steps, lr_sheduler.gamma, - True) + lr = inverse_time_decay( + 1.0, lr_decay_steps, lr_sheduler.gamma, True + ) lr_name = lr.name logging.warn( "InverseTimeDecay is set, staircase = True, global learning rate decay step is [ %d ], Change decay steps as follow: \n" "\t strategy = paddle.distributed.fleet.DistributedStrategy() \n " "\t strategy.a_sync = True \n" "\t strategy.a_sync_configs= { 'lr_decay_steps' : YOUR_DECAY_STEP } \n" - % lr_decay_steps) + % lr_decay_steps + ) else: raise ValueError( - "Not supported current LearningRate strategy, please use follow decay strategy: {}" - .format(schedler_decay)) + "Not supported current LearningRate strategy, please use follow decay strategy: {}".format( + schedler_decay + ) + ) return decay_main_program, decay_startup_program, lr_name @@ -1273,15 +1490,15 @@ def _get_varname_parts(varname): block_part = "" trainer_idx = varname.find(".trainer_") if trainer_idx >= 0: - trainer_part = varname[trainer_idx + 1:] + trainer_part = varname[trainer_idx + 1 :] else: trainer_idx = len(varname) block_index = varname.find(".block") if block_index >= 0: - block_part = varname[block_index + 1:trainer_idx] + block_part = varname[block_index + 1 : trainer_idx] else: block_index = len(varname) - orig_var_name = varname[0:min(block_index, trainer_idx)] + orig_var_name = varname[0 : min(block_index, trainer_idx)] return orig_var_name, block_part, trainer_part diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/ir/trainer_pass.py b/python/paddle/fluid/incubate/fleet/parameter_server/ir/trainer_pass.py index acd337720ca91580bae1c93b8eb11f104561c13e..fa818e3c413003d2eb4c5a851023ed8b38967ff4 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/ir/trainer_pass.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/ir/trainer_pass.py @@ -24,9 +24,13 @@ import paddle.fluid.core as core import paddle.fluid.framework as framework from paddle.fluid.transpiler.details.program_utils import delete_ops -from paddle.fluid.incubate.fleet.parameter_server.ir.public import _get_optimize_ops +from paddle.fluid.incubate.fleet.parameter_server.ir.public import ( + _get_optimize_ops, +) from paddle.fluid.incubate.fleet.parameter_server.ir.public import _get_lr_ops -from paddle.fluid.incubate.fleet.parameter_server.ir.public import get_sparse_tablenames +from paddle.fluid.incubate.fleet.parameter_server.ir.public import ( + get_sparse_tablenames, +) from paddle.fluid.incubate.fleet.parameter_server.mode import DistributedMode OP_NAME_SCOPE = "op_namescope" @@ -42,7 +46,7 @@ op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName() SPARSE_OP_TYPE_DICT = {"lookup_table": "W", "lookup_table_v2": "W"} SPARSE_GRAD_OP_TYPE_DICT = { "lookup_table_grad": "W", - "lookup_table_v2_grad": "W" + "lookup_table_v2_grad": "W", } DEVICE_LIST = ["cpu", "gpu", "xpu"] COMMUNICATE_OPS_TYPE = ["send", "recv", "fetch_barrier", "send_barrier"] @@ -50,7 +54,6 @@ DEFAULT_DEVICE = 'cpu' def delete_optimizer_pass(program, config): - def _delete_optimizer_op_and_vars(_program, optimize_ops): optimize_vars = [] optimize_op_role_vars = [] @@ -75,14 +78,17 @@ def delete_optimizer_pass(program, config): def _add_lr_var(main_program, compiled_config): # Todo: hard code for pe - lr_var = compiled_config.origin_main_program.global_block( - ).vars["learning_rate_0"] - main_program.global_block().create_var(name=lr_var.name, - shape=lr_var.shape, - dtype=lr_var.dtype, - type=lr_var.type, - lod_level=lr_var.lod_level, - persistable=True) + lr_var = compiled_config.origin_main_program.global_block().vars[ + "learning_rate_0" + ] + main_program.global_block().create_var( + name=lr_var.name, + shape=lr_var.shape, + dtype=lr_var.dtype, + type=lr_var.type, + lod_level=lr_var.lod_level, + persistable=True, + ) optimizer_ops = _get_optimize_ops(program) lr_ops = _get_lr_ops(program) @@ -98,7 +104,8 @@ def delete_optimizer_pass(program, config): def distributed_ops_pass(program, config, use_ps_gpu=False): trainer_id = config.get_role_id() send_ctx = config.get_the_one_send_context( - split_dense_table=config.is_heter_ps_mode) + split_dense_table=config.is_heter_ps_mode + ) w_2_table_id = {} emb_size = {} @@ -108,8 +115,10 @@ def distributed_ops_pass(program, config, use_ps_gpu=False): push_sparse_ops = {} ops = {} for op in _program.global_block().ops: - if op.type in SPARSE_OP_TYPE_DICT.keys() \ - and op.attr('remote_prefetch') is True: + if ( + op.type in SPARSE_OP_TYPE_DICT.keys() + and op.attr('remote_prefetch') is True + ): param_name = op.input(SPARSE_OP_TYPE_DICT[op.type])[0] if config.is_heter_ps_mode: # trick for matchnet, need to modify @@ -123,15 +132,16 @@ def distributed_ops_pass(program, config, use_ps_gpu=False): for op in _program.global_block().ops: if op.type in SPARSE_GRAD_OP_TYPE_DICT.keys(): param_name = op.input(SPARSE_GRAD_OP_TYPE_DICT[op.type])[0] - if param_name in pull_sparse_ids and op.input( - "Ids")[0] in pull_sparse_ids[param_name]: + if ( + param_name in pull_sparse_ids + and op.input("Ids")[0] in pull_sparse_ids[param_name] + ): ops = push_sparse_ops.get(param_name, []) ops.append(op) push_sparse_ops[param_name] = ops return pull_sparse_ops, push_sparse_ops def _pull_sparse_fuse(_program, pull_sparse_ops, use_ps_gpu): - def dag_check_up_and_reorder(program, inputs, outputs): global_block = program.global_block() min_output_index = len(global_block.ops) @@ -248,7 +258,8 @@ def distributed_ops_pass(program, config, use_ps_gpu=False): if table_id == -1: raise ValueError( - "can not find suitable sparse table, please check") + "can not find suitable sparse table, please check" + ) w_2_table_id[param] = table_id padding_idx = ops[0].attr("padding_idx") @@ -279,8 +290,9 @@ def distributed_ops_pass(program, config, use_ps_gpu=False): ins = op.input(op.input_names[i]) for out_id, out_var in enumerate(outputs): if out_var.name in ins: - outputs_idxs[out_id] = min(idx, - outputs_idxs[out_id]) + outputs_idxs[out_id] = min( + idx, outputs_idxs[out_id] + ) if min(outputs_idxs) - max(inputs_idxs) >= 1: if max(inputs_idxs) == -1: @@ -292,32 +304,28 @@ def distributed_ops_pass(program, config, use_ps_gpu=False): program.global_block()._insert_op( index=distributed_idx, type="pull_gpups_sparse", - inputs={ - "Ids": inputs, - 'W': w - }, + inputs={"Ids": inputs, 'W': w}, outputs={"Out": outputs}, attrs={ "size": [w.shape[1] for i in inputs], "is_distributed": True, - "is_sparse": True - }) + "is_sparse": True, + }, + ) else: program.global_block()._insert_op( index=distributed_idx, type="distributed_lookup_table", - inputs={ - "Ids": inputs, - 'W': w - }, + inputs={"Ids": inputs, 'W': w}, outputs={"Outputs": outputs}, attrs={ "is_distributed": is_distributed, "padding_idx": padding_idx, "table_id": table_id, "lookup_table_version": op_type, - "op_device": op_device - }) + "op_device": op_device, + }, + ) else: for i in range(len(inputs_idxs)): distributed_idx = op_idxs[i] @@ -325,18 +333,16 @@ def distributed_ops_pass(program, config, use_ps_gpu=False): program.global_block()._insert_op( index=distributed_idx, type="distributed_lookup_table", - inputs={ - "Ids": [inputs[i]], - 'W': w - }, + inputs={"Ids": [inputs[i]], 'W': w}, outputs={"Outputs": [outputs[i]]}, attrs={ "is_distributed": is_distributed, "padding_idx": padding_idx, "table_id": table_id, "lookup_table_version": op_type, - "op_device": op_device - }) + "op_device": op_device, + }, + ) def _push_sparse_fuse(_program, push_sparse_ops, use_ps_gpu): if use_ps_gpu: @@ -357,8 +363,10 @@ def distributed_ops_pass(program, config, use_ps_gpu=False): if len(entry) == 3 and entry[0] == 'show_click_entry': show_var_name = entry[1] click_var_name = entry[2] - if show_var_name in program.global_block( - ).vars and click_var_name in program.global_block().vars: + if ( + show_var_name in program.global_block().vars + and click_var_name in program.global_block().vars + ): show = program.global_block().vars[show_var_name] clk = program.global_block().vars[click_var_name] use_entry = True @@ -373,7 +381,8 @@ def distributed_ops_pass(program, config, use_ps_gpu=False): name="show", dtype=core.VarDesc.VarType.INT64, persistable=False, - stop_gradient=True) + stop_gradient=True, + ) program.global_block()._insert_op( index=0, type='fill_constant', @@ -383,14 +392,16 @@ def distributed_ops_pass(program, config, use_ps_gpu=False): 'shape': [1], 'dtype': show.dtype, 'value': 1, - #OP_ROLE_KEY: OpRole.Forward - }) + # OP_ROLE_KEY: OpRole.Forward + }, + ) clk = program.global_block().create_var( name="clk", dtype=core.VarDesc.VarType.INT64, persistable=False, - stop_gradient=True) + stop_gradient=True, + ) program.global_block()._insert_op( index=0, type='fill_constant', @@ -400,8 +411,9 @@ def distributed_ops_pass(program, config, use_ps_gpu=False): 'shape': [1], 'dtype': clk.dtype, 'value': 0, - #OP_ROLE_KEY: OpRole.Forward - }) + # OP_ROLE_KEY: OpRole.Forward + }, + ) for param, ops in push_sparse_ops.items(): all_ops = program.global_block().ops @@ -423,35 +435,35 @@ def distributed_ops_pass(program, config, use_ps_gpu=False): for idx in op_idxs[::-1]: program.global_block()._remove_op(idx) - -# if use_ps_gpu: -# program.global_block().append_op( -# type="push_box_sparse", -# inputs={"Ids": inputs, -# 'Out': outputs}, -# outputs={"Out": outputs}, -# attrs={ -# "size": w.shape[1], -# "is_distributed": True, -# "is_sparse": True -# }) -# else: - program.global_block().append_op(type="distributed_push_sparse", - inputs={ - "Ids": inputs, - 'W': w, - "Outputs": outputs, - "Shows": show, - "Clicks": clk - }, - outputs={"Outputs": outputs}, - attrs={ - "is_distributed": - is_distributed, - "padding_idx": padding_idx, - "table_id": table_id, - "size": emb_size[param] - }) + # if use_ps_gpu: + # program.global_block().append_op( + # type="push_box_sparse", + # inputs={"Ids": inputs, + # 'Out': outputs}, + # outputs={"Out": outputs}, + # attrs={ + # "size": w.shape[1], + # "is_distributed": True, + # "is_sparse": True + # }) + # else: + program.global_block().append_op( + type="distributed_push_sparse", + inputs={ + "Ids": inputs, + 'W': w, + "Outputs": outputs, + "Shows": show, + "Clicks": clk, + }, + outputs={"Outputs": outputs}, + attrs={ + "is_distributed": is_distributed, + "padding_idx": padding_idx, + "table_id": table_id, + "size": emb_size[param], + }, + ) pull_sparse_ops, push_sparse_ops = _get_pull_sparse_ops(program) _pull_sparse_fuse(program, pull_sparse_ops, use_ps_gpu) @@ -476,40 +488,40 @@ def append_send_ops_pass(program, config): dummy_output = [] if mode in [DistributedMode.SYNC, DistributedMode.HALF_ASYNC]: dummy_output = program.global_block().create_var( - name=framework.generate_control_dev_var_name()) - - program.global_block().append_op(type="send", - inputs={"X": send_input_vars}, - outputs={"Out": dummy_output}, - attrs={ - "send_varnames": [queue], - "is_sparse": - is_sparse, - "table_id": - table_id, - RPC_OP_ROLE_ATTR_NAME: - RPC_OP_ROLE_ATTR_VALUE - }) + name=framework.generate_control_dev_var_name() + ) + + program.global_block().append_op( + type="send", + inputs={"X": send_input_vars}, + outputs={"Out": dummy_output}, + attrs={ + "send_varnames": [queue], + "is_sparse": is_sparse, + "table_id": table_id, + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE, + }, + ) return dummy_output def _append_barrier_op(dummys): - program.global_block().append_op(type="send_barrier", - inputs={"X": dummys}, - outputs={"Out": []}, - attrs={ - "trainer_id": - trainer_id, - "half_async": - True, - RPC_OP_ROLE_ATTR_NAME: - RPC_OP_ROLE_ATTR_VALUE - }) + program.global_block().append_op( + type="send_barrier", + inputs={"X": dummys}, + outputs={"Out": []}, + attrs={ + "trainer_id": trainer_id, + "half_async": True, + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE, + }, + ) dummys = [] sends = config.get_the_one_trainer_send_context( - split_dense_table=config.is_heter_ps_mode) + split_dense_table=config.is_heter_ps_mode + ) for merged_name, send in sends.items(): if send.is_sparse() and not config.is_geo_mode(): @@ -517,8 +529,10 @@ def append_send_ops_pass(program, config): is_sparse = 1 if send.is_sparse() else 0 is_sparse = 2 if send.is_distributed() else is_sparse dummys.append( - _append_send_op(send.origin_varnames(), merged_name, is_sparse, - send.table_id())) + _append_send_op( + send.origin_varnames(), merged_name, is_sparse, send.table_id() + ) + ) if mode in [DistributedMode.SYNC, DistributedMode.HALF_ASYNC]: _append_barrier_op(dummys) @@ -532,19 +546,19 @@ def init_from_server_pass(program, config): return program fetch_barrier_out = program.global_block().create_var( - name=framework.generate_control_dev_var_name()) - - program.global_block().append_op(type="fetch_barrier", - inputs={}, - outputs={"Out": fetch_barrier_out}, - attrs={ - "endpoints": - config.get_ps_endpoints(), - "trainer_id": - config.get_role_id(), - RPC_OP_ROLE_ATTR_NAME: - RPC_OP_ROLE_ATTR_VALUE - }) + name=framework.generate_control_dev_var_name() + ) + + program.global_block().append_op( + type="fetch_barrier", + inputs={}, + outputs={"Out": fetch_barrier_out}, + attrs={ + "endpoints": config.get_ps_endpoints(), + "trainer_id": config.get_role_id(), + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE, + }, + ) return program @@ -566,14 +580,16 @@ def fake_init_ops_pass(program, config): table_param_init_op.append(op) init_op_num = len(table_param_init_op) if init_op_num != 1: - raise ValueError("table init op num should be 1, now is " + - str(init_op_num)) + raise ValueError( + "table init op num should be 1, now is " + str(init_op_num) + ) table_init_op = table_param_init_op[0] program.global_block().append_op( type="fake_init", inputs={}, outputs={"Out": table_var}, - attrs={"shape": table_init_op.attr('shape')}) + attrs={"shape": table_init_op.attr('shape')}, + ) delete_ops(program.global_block(), table_param_init_op) sparse_tables = _get_sparse_table_names() @@ -583,7 +599,6 @@ def fake_init_ops_pass(program, config): def ps_gpu_pass(program): - def _add_push_box_sparse_op(program): op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName() backward = core.op_proto_and_checker_maker.OpRole.Backward @@ -591,7 +606,8 @@ def ps_gpu_pass(program): if op.type != "pull_box_sparse" and op.type != "pull_gpups_sparse": continue grad_op_desc, op_grad_to_var = core.get_grad_op_desc( - op.desc, set(), []) + op.desc, set(), [] + ) for op_desc in grad_op_desc: new_op_desc = program.global_block().desc.append_op() new_op_desc.copy_from(op_desc) @@ -700,8 +716,11 @@ def delete_extra_optimizes_pass(program, config): def find_heter_ops(program, default_device="cpu"): if default_device not in DEVICE_LIST: - raise ValueError("Given device {} is not in device list {}".format( - default_device, DEVICE_LIST)) + raise ValueError( + "Given device {} is not in device list {}".format( + default_device, DEVICE_LIST + ) + ) def _is_heter_op(op, current_heter_device, default_device="cpu"): heter_devices = list(DEVICE_LIST) @@ -710,10 +729,13 @@ def find_heter_ops(program, default_device="cpu"): op_type = op.type if op_device in heter_devices: return True - elif op_type in COMMUNICATE_OPS_TYPE and current_heter_device != default_device: + elif ( + op_type in COMMUNICATE_OPS_TYPE + and current_heter_device != default_device + ): # for distributed communciate ops: send & recv & barrier etc. # Todo: need update this method - #op._set_attr('op_device', current_heter_device) + # op._set_attr('op_device', current_heter_device) return True elif op_device == None or op_device == default_device: op._set_attr('op_device', default_device) @@ -748,16 +770,20 @@ def find_heter_ops(program, default_device="cpu"): op = op_list[i] if "_grad" in op.type: forward_op_type = op.type.split("_grad")[0] - if forward_op_type in SPARSE_OP_TYPE_DICT.keys() \ - and op.attr('remote_prefetch') is True: + if ( + forward_op_type in SPARSE_OP_TYPE_DICT.keys() + and op.attr('remote_prefetch') is True + ): param_name = op.input(SPARSE_OP_TYPE_DICT[forward_op_type])[0] if param_name in var2idx: ## insert sum op & remove sum op from var2idx and origin place op_list = list(block.ops) sum_op = op_list[var2idx[param_name]] sum_op_inputs = { - sum_op.input_names[0]: - [block.vars[input] for input in sum_op.input_arg_names] + sum_op.input_names[0]: [ + block.vars[input] + for input in sum_op.input_arg_names + ] } sum_op_outputs = { sum_op.output_names[0]: [ @@ -765,11 +791,13 @@ def find_heter_ops(program, default_device="cpu"): for output in sum_op.output_arg_names ] } - block._insert_op(index=i + 1, - type=sum_op.type, - inputs=sum_op_inputs, - outputs=sum_op_outputs, - attrs=sum_op.all_attrs()) + block._insert_op( + index=i + 1, + type=sum_op.type, + inputs=sum_op_inputs, + outputs=sum_op_outputs, + attrs=sum_op.all_attrs(), + ) block._remove_op(var2idx[param_name] + 1) var2idx.pop(param_name) for var_ in var2idx: @@ -790,9 +818,9 @@ def find_heter_ops(program, default_device="cpu"): for no_grad_var in output_vars_no_grad: if no_grad_var in var2idx: """ - insert sum op & remove sum op from var2idx and origin place + insert sum op & remove sum op from var2idx and origin place - """ + """ op_list = list(block.ops) sum_op = op_list[var2idx[no_grad_var]] sum_op_inputs = { @@ -807,11 +835,13 @@ def find_heter_ops(program, default_device="cpu"): for output in sum_op.output_arg_names ] } - block._insert_op(index=i + 1, - type=sum_op.type, - inputs=sum_op_inputs, - outputs=sum_op_outputs, - attrs=sum_op.all_attrs()) + block._insert_op( + index=i + 1, + type=sum_op.type, + inputs=sum_op_inputs, + outputs=sum_op_outputs, + attrs=sum_op.all_attrs(), + ) block._remove_op(var2idx[no_grad_var] + 1) var2idx.pop(no_grad_var) for var_ in var2idx: @@ -824,12 +854,16 @@ def find_heter_ops(program, default_device="cpu"): pre_op = op_list[i - 1] if "_grad" in pre_op.type: forward_op_type = pre_op.type.split("_grad")[0] - if forward_op_type in SPARSE_OP_TYPE_DICT.keys() \ - and pre_op.attr('remote_prefetch') is True: + if ( + forward_op_type in SPARSE_OP_TYPE_DICT.keys() + and pre_op.attr('remote_prefetch') is True + ): param_name = pre_op.input( - SPARSE_OP_TYPE_DICT[forward_op_type])[0] + SPARSE_OP_TYPE_DICT[forward_op_type] + )[0] if param_name == origin_var and op.attr( - "op_device") == pre_op.attr("op_device"): + "op_device" + ) == pre_op.attr("op_device"): continue else: var2idx[origin_var] = i @@ -882,7 +916,8 @@ def find_heter_ops(program, default_device="cpu"): # for cpu-op block append if len(current_default_block_ops) > 1: default_ops[default_device][ - block_index] = current_default_block_ops + block_index + ] = current_default_block_ops program_block_ops.append(current_default_block_ops) current_default_block_ops = [] block_index += 1 @@ -938,14 +973,24 @@ def find_heter_ops(program, default_device="cpu"): for _, heter_block in heter_block_dict.items(): total_heter_ops += len(heter_block) print( - "There are {} OPs in your main_program, and contains {} heter-OPs which is made up of {} heter-blocks." - .format(len(block.ops), total_heter_ops, heter_blocks)) + "There are {} OPs in your main_program, and contains {} heter-OPs which is made up of {} heter-blocks.".format( + len(block.ops), total_heter_ops, heter_blocks + ) + ) return origin_porgram, heter_ops, default_ops, program_block_ops -def create_heter_program(program, config, heter_program, program_block_ops_list, - heter_ops, block_var_detail, current_device, stage_id): +def create_heter_program( + program, + config, + heter_program, + program_block_ops_list, + heter_ops, + block_var_detail, + current_device, + stage_id, +): # This function mainly includes the following contents: # 1. For every heter block: # a) copy heter device op from origin program @@ -993,57 +1038,76 @@ def create_heter_program(program, config, heter_program, program_block_ops_list, for _, op in enumerate(heter_block_ops_backward): block_append_op(heter_program, program, heter_block_bp, op) - bp_entrance_vars = block_var_detail[stage_id - - 1]["backward"]["entrance"] - add_vars_by_var_list(bp_entrance_vars, program, heter_program, - heter_block_bp) + bp_entrance_vars = block_var_detail[stage_id - 1]["backward"][ + "entrance" + ] + add_vars_by_var_list( + bp_entrance_vars, program, heter_program, heter_block_bp + ) bp_exit_vars = block_var_detail[stage_id - 1]["backward"]["exit"] - add_vars_by_var_list(bp_exit_vars, program, heter_program, - heter_block_bp) - backward_comm_info = get_communicate_var_info(program, - stage_id, - bp_entrance_vars, - type="backward") + add_vars_by_var_list( + bp_exit_vars, program, heter_program, heter_block_bp + ) + backward_comm_info = get_communicate_var_info( + program, stage_id, bp_entrance_vars, type="backward" + ) - grad_to_block_id.append(backward_comm_info["block_input_var_name"] + - ":" + str(heter_block_bp.idx)) + grad_to_block_id.append( + backward_comm_info["block_input_var_name"] + + ":" + + str(heter_block_bp.idx) + ) else: for _, op in enumerate(heter_block_ops_backward): block_append_op(heter_program, program, heter_block, op) - bp_entrance_vars = block_var_detail[stage_id - - 1]["backward"]["entrance"] - add_vars_by_var_list(bp_entrance_vars, program, heter_program, - heter_block) + bp_entrance_vars = block_var_detail[stage_id - 1]["backward"][ + "entrance" + ] + add_vars_by_var_list( + bp_entrance_vars, program, heter_program, heter_block + ) bp_exit_vars = block_var_detail[stage_id - 1]["backward"]["exit"] add_vars_by_var_list(bp_exit_vars, program, heter_program, heter_block) heter_block_bp = heter_block - forward_comm_info = get_communicate_var_info(program, - stage_id, - entrance_vars, - type="forward") + forward_comm_info = get_communicate_var_info( + program, stage_id, entrance_vars, type="forward" + ) - grad_to_block_id.append(forward_comm_info["block_input_var_name"] + ":" + - str(heter_block.idx)) + grad_to_block_id.append( + forward_comm_info["block_input_var_name"] + ":" + str(heter_block.idx) + ) first_op_index_bp = len(heter_block_bp.ops) if stage_id <= len(block_var_detail) - 1: - static_var = insert_communicate_op(program, config, heter_block, - stage_id, first_op_index_fp, - block_var_detail, current_device) - static_var_bp = insert_communicate_op(program, config, heter_block_bp, - stage_id, first_op_index_bp, - block_var_detail, current_device, - False) + static_var = insert_communicate_op( + program, + config, + heter_block, + stage_id, + first_op_index_fp, + block_var_detail, + current_device, + ) + static_var_bp = insert_communicate_op( + program, + config, + heter_block_bp, + stage_id, + first_op_index_bp, + block_var_detail, + current_device, + False, + ) # add send op - send_grad_var_list = add_heter_send_op(program, heter_program, - heter_block_bp, - block_var_detail[stage_id - 1]) + send_grad_var_list = add_heter_send_op( + program, heter_program, heter_block_bp, block_var_detail[stage_id - 1] + ) # --------------- # add step conter @@ -1064,9 +1128,9 @@ def create_heter_program(program, config, heter_program, program_block_ops_list, # add info in listen&serv attrs = { - #"mode": "sync", - #"trainers": config.get_trainers(), - #"trainer_id": config.get_role_id() + config.get_trainers(), + # "mode": "sync", + # "trainers": config.get_trainers(), + # "trainer_id": config.get_role_id() + config.get_trainers(), "message_to_block_id": grad_to_block_id, "optimize_blocks": optimizer_block, # runtime attribute @@ -1075,13 +1139,12 @@ def create_heter_program(program, config, heter_program, program_block_ops_list, "pserver_id": config.get_role_id(), "distributed_mode": config.get_distributed_mode(), "rpc_exec_thread_num": int(os.getenv("CPU_NUM", 32)), - RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE, } # append the listen_and_serv op - heter_program.global_block().append_op(type="heter_listen_and_serv", - inputs={'X': []}, - outputs={}, - attrs=attrs) + heter_program.global_block().append_op( + type="heter_listen_and_serv", inputs={'X': []}, outputs={}, attrs=attrs + ) check_heter_compile_time_strategy(program, config, send_grad_var_list) @@ -1093,14 +1156,16 @@ def check_heter_compile_time_strategy(program, config, send_grad_var_list): origin_grad_var_list = list(set(origin_grad_var_list)) send_grad_var_list = list(set(send_grad_var_list)) useless_grad_var_list = list( - set(origin_grad_var_list) - set(send_grad_var_list)) + set(origin_grad_var_list) - set(send_grad_var_list) + ) for useless_grad_var in useless_grad_var_list: config.remove_var_pair_by_grad(useless_grad_var) -def create_trainer_program(program, origin_program, config, - program_block_ops_list, block_var_detail): +def create_trainer_program( + program, origin_program, config, program_block_ops_list, block_var_detail +): # This function mainly includes the following contents: # 1. For every heter block in origin program # a) delete heter op and related variables @@ -1113,13 +1178,16 @@ def create_trainer_program(program, origin_program, config, # 2. check every op's device static_var = [] for heter_block_index in range(1, len(program_block_ops_list)): - ops_list = program_block_ops_list[heter_block_index][ - "forward"] + program_block_ops_list[heter_block_index]["backward"] - static_var += replace_ops_by_communicate_op(program, config, - heter_block_index, ops_list, - block_var_detail) - remove_trainer_send_op(program, config, heter_block_index, - block_var_detail) + ops_list = ( + program_block_ops_list[heter_block_index]["forward"] + + program_block_ops_list[heter_block_index]["backward"] + ) + static_var += replace_ops_by_communicate_op( + program, config, heter_block_index, ops_list, block_var_detail + ) + remove_trainer_send_op( + program, config, heter_block_index, block_var_detail + ) optimizer_block = [] grad_to_block_id = [] @@ -1127,23 +1195,26 @@ def create_trainer_program(program, origin_program, config, bp_ops_list = program_block_ops_list[0]["backward"] delete_same_ops(program.global_block(), bp_ops_list) delete_trainer_useless_var(config, program, static_var) - backward_block = create_backward_block(program, origin_program, config, - bp_ops_list, block_var_detail) + backward_block = create_backward_block( + program, origin_program, config, bp_ops_list, block_var_detail + ) bp_entrance_vars = block_var_detail[0]["backward"]["entrance"] - backward_comm_info = get_communicate_var_info(origin_program, - 1, - bp_entrance_vars, - type="backward") - - grad_to_block_id.append(backward_comm_info["block_input_var_name"] + ":" + - str(backward_block.idx)) + backward_comm_info = get_communicate_var_info( + origin_program, 1, bp_entrance_vars, type="backward" + ) + + grad_to_block_id.append( + backward_comm_info["block_input_var_name"] + + ":" + + str(backward_block.idx) + ) optimizer_block.append(backward_block) attrs = { - #"mode": "sync", - #"trainers": config.get_trainers(), - #"trainer_id": config.get_role_id(), + # "mode": "sync", + # "trainers": config.get_trainers(), + # "trainer_id": config.get_role_id(), "message_to_block_id": grad_to_block_id, "optimize_blocks": optimizer_block, # runtime attribute @@ -1152,68 +1223,74 @@ def create_trainer_program(program, origin_program, config, "pserver_id": config.get_role_id(), "distributed_mode": config.get_distributed_mode(), "rpc_exec_thread_num": int(os.getenv("CPU_NUM", 32)), - RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE, } # append the listen_and_serv op - program.global_block()._insert_op(index=0, - type="heter_listen_and_serv", - inputs={'X': []}, - outputs={}, - attrs=attrs) + program.global_block()._insert_op( + index=0, + type="heter_listen_and_serv", + inputs={'X': []}, + outputs={}, + attrs=attrs, + ) ## TODO add check for bp block check_op_device(program.global_block(), DEFAULT_DEVICE) -def insert_communicate_op(orign_program, - config, - heter_block, - stage_id, - first_op_index, - block_var_detail, - device, - is_forward=True): +def insert_communicate_op( + orign_program, + config, + heter_block, + stage_id, + first_op_index, + block_var_detail, + device, + is_forward=True, +): if is_forward: next_heter_worker_endpoints = config.get_next_stage_trainers() previous_heter_worker_endpoints = config.get_previous_stage_trainers() entrance_var = block_var_detail[stage_id]["forward"]["entrance"] - comm_info = get_communicate_var_info(orign_program, stage_id + 1, - entrance_var) + comm_info = get_communicate_var_info( + orign_program, stage_id + 1, entrance_var + ) else: next_heter_worker_endpoints = config.get_next_stage_trainers() - #if next_heter_worker_endpoints == "": + # if next_heter_worker_endpoints == "": # next_heter_worker_endpoints = [] previous_heter_worker_endpoints = config.get_previous_stage_trainers() entrance_var = block_var_detail[stage_id - 1]["backward"]["exit"] - comm_info = get_communicate_var_info(orign_program, stage_id - 1, - entrance_var, "backward") - - heter_block._insert_op(index=first_op_index, - type="send_and_recv", - inputs={"X": heter_block.vars[entrance_var[0]]}, - outputs={"Out": []}, - attrs={ - "mode": "forward" if is_forward else "backward", - "send_var_name": - entrance_var + ["microbatch_id"], - "recv_var_name": [], - "message_name": - comm_info["block_input_var_name"], - "next_endpoints": next_heter_worker_endpoints, - "previous_endpoints": - previous_heter_worker_endpoints, - "trainer_id": config.get_role_id(), - "op_device": device, - RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE - }) + comm_info = get_communicate_var_info( + orign_program, stage_id - 1, entrance_var, "backward" + ) + + heter_block._insert_op( + index=first_op_index, + type="send_and_recv", + inputs={"X": heter_block.vars[entrance_var[0]]}, + outputs={"Out": []}, + attrs={ + "mode": "forward" if is_forward else "backward", + "send_var_name": entrance_var + ["microbatch_id"], + "recv_var_name": [], + "message_name": comm_info["block_input_var_name"], + "next_endpoints": next_heter_worker_endpoints, + "previous_endpoints": previous_heter_worker_endpoints, + "trainer_id": config.get_role_id(), + "op_device": device, + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE, + }, + ) return entrance_var -def create_backward_block(program, origin_program, config, bp_ops_list, - block_var_detail): +def create_backward_block( + program, origin_program, config, bp_ops_list, block_var_detail +): pre_block_idx = program.num_blocks - 1 heter_block = program._create_block(pre_block_idx) @@ -1222,8 +1299,10 @@ def create_backward_block(program, origin_program, config, bp_ops_list, send_varnames = op.attr('send_varnames') is_skip = False for varname in send_varnames: - if varname not in program.global_block( - ).vars and varname not in heter_block.vars: + if ( + varname not in program.global_block().vars + and varname not in heter_block.vars + ): is_skip = True break if is_skip == True: @@ -1237,8 +1316,9 @@ def create_backward_block(program, origin_program, config, bp_ops_list, return heter_block -def replace_ops_by_communicate_op(program, config, heter_block_index, ops_list, - block_var_detail): +def replace_ops_by_communicate_op( + program, config, heter_block_index, ops_list, block_var_detail +): all_op = program.global_block().ops start_op = ops_list[0] first_op_idx = -1 @@ -1256,10 +1336,12 @@ def replace_ops_by_communicate_op(program, config, heter_block_index, ops_list, next_heter_worker_endpoints = config.get_next_stage_trainers() entrance_var = block_var_detail[heter_block_index]["forward"][ - "entrance"] + "entrance" + ] - comm_info = get_communicate_var_info(program, heter_block_index + 1, - entrance_var) + comm_info = get_communicate_var_info( + program, heter_block_index + 1, entrance_var + ) program.global_block()._insert_op( index=first_op_idx, type="send_and_recv", @@ -1273,25 +1355,30 @@ def replace_ops_by_communicate_op(program, config, heter_block_index, ops_list, "next_endpoints": next_heter_worker_endpoints, "previous_endpoints": [], "trainer_id": config.get_role_id(), - RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE - }) + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE, + }, + ) return entrance_var -def remove_trainer_send_op(program, config, heter_block_index, - block_var_detail): +def remove_trainer_send_op( + program, config, heter_block_index, block_var_detail +): # if trainer do FF->BP->SEND, it has follow vars: var, var@GRAD # if trainer only do SEND, it has one var: var@GRAD # Delete Send op ,if trainer doesn't has pair var (var<->var@GRAD) - persistables = block_var_detail[heter_block_index]["forward"]["persistables"] + \ - block_var_detail[heter_block_index]["backward"]["persistables"] + persistables = ( + block_var_detail[heter_block_index]["forward"]["persistables"] + + block_var_detail[heter_block_index]["backward"]["persistables"] + ) need_remove_send_op = [] need_remove_grad_var = [] for op in find_send_op(program): - input_list, _ = find_op_input_output(program, program.global_block(), - op) + input_list, _ = find_op_input_output( + program, program.global_block(), op + ) for var_name in input_list: origin_var_name = var_name.split("@GRAD")[0] if origin_var_name in persistables: @@ -1304,13 +1391,13 @@ def remove_trainer_send_op(program, config, heter_block_index, def add_heter_send_op(program, heter_program, block, block_var_detail): - def _get_send_op_dict(): send_op_dict = {} send_op_list = find_send_op(program) for op in send_op_list: - input_list, _ = find_op_input_output(program, - program.global_block(), op) + input_list, _ = find_op_input_output( + program, program.global_block(), op + ) for var in input_list: send_op_dict[var] = op return send_op_dict @@ -1345,21 +1432,23 @@ def add_heter_send_op(program, heter_program, block, block_var_detail): for table_id in table_dict: dummy_output = block.create_var( - name=framework.generate_control_dev_var_name()) + name=framework.generate_control_dev_var_name() + ) send_input_vars = [ block.vars[union_var] for union_var in table_dict[table_id]['var_list'] ] - block.append_op(type="send", - inputs={"X": send_input_vars}, - outputs={"Out": dummy_output}, - attrs={ - "send_varnames": - table_dict[table_id]['send_varnames'], - "is_sparse": is_sparse, - "table_id": table_id, - RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE - }) + block.append_op( + type="send", + inputs={"X": send_input_vars}, + outputs={"Out": dummy_output}, + attrs={ + "send_varnames": table_dict[table_id]['send_varnames'], + "is_sparse": is_sparse, + "table_id": table_id, + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE, + }, + ) return send_grad_var_list @@ -1372,19 +1461,20 @@ def find_send_op(program): return send_op_list -def get_communicate_var_info(program, - block_index, - entrance_var_list, - type="forward"): +def get_communicate_var_info( + program, block_index, entrance_var_list, type="forward" +): input_var_reshape_dim = [] input_var_reshape_name = [] if type == "forward": block_input_var_name = "forward_joint_{}_{}@Heter".format( - block_index - 1, block_index) + block_index - 1, block_index + ) else: block_input_var_name = "backward_joint_{}_{}@Heter".format( - block_index + 1, block_index) + block_index + 1, block_index + ) entrance_var_list.sort() # input @@ -1402,7 +1492,7 @@ def get_communicate_var_info(program, # output # var -> reshape -> var@Heter_SERVER_BLOCK@INPUT_RESHAPE_VAR -> concat -> Heter_SERVER_BLOCK_index@JOINT_VAR - #for var_name in exit_var_list: + # for var_name in exit_var_list: # var = program.global_block().vars[var_name] # shape = var.shape # # if len(shape) < 2 or shape[0] != -1: @@ -1473,11 +1563,14 @@ def union_forward_gradient_op(program_block_ops_list): ''' union_program_block_ops_list = [] - assert block_length % 2 != 0, "the length of program_block_ops_list should be odd" + assert ( + block_length % 2 != 0 + ), "the length of program_block_ops_list should be odd" for i in range(0, block_length // 2): block_op_list = {"forward": program_block_ops_list[i]} block_op_list.update( - {"backward": program_block_ops_list[block_length - 1 - i]}) + {"backward": program_block_ops_list[block_length - 1 - i]} + ) union_program_block_ops_list.append(block_op_list) block_op_list = {"forward": [], "backward": []} @@ -1491,13 +1584,15 @@ def union_forward_gradient_op(program_block_ops_list): def find_block_joints(program, program_block_ops_list, heter_ops): - block_var_detail = find_entrance_exit_private(program, - program_block_ops_list) - block_var_detail = entrance_exit_check(program, program_block_ops_list, - block_var_detail, heter_ops) - block_var_detail = delete_block_useless_exit(program, - program_block_ops_list, - block_var_detail) + block_var_detail = find_entrance_exit_private( + program, program_block_ops_list + ) + block_var_detail = entrance_exit_check( + program, program_block_ops_list, block_var_detail, heter_ops + ) + block_var_detail = delete_block_useless_exit( + program, program_block_ops_list, block_var_detail + ) return block_var_detail @@ -1508,9 +1603,11 @@ def find_entrance_exit_private(program, program_block_ops_list): for index, block_op_list in enumerate(program_block_ops_list): ## forward block_input, block_output = find_ops_list_input_output( - program, block_op_list["forward"]) + program, block_op_list["forward"] + ) persistables = screen_persistables( - program, block_input) + screen_persistables(program, block_output) + program, block_input + ) + screen_persistables(program, block_output) # find entrance & exit block_private_vars = list(set(block_input) & set(block_output)) block_entrance = list(set(block_input) - set(block_private_vars)) @@ -1520,35 +1617,40 @@ def find_entrance_exit_private(program, program_block_ops_list): "entrance": block_entrance, "exit": block_exit, "private": block_private_vars, - "persistables": persistables + "persistables": persistables, } } ## backward bp_block_input, bp_block_output = find_ops_list_input_output( - program, block_op_list["backward"]) + program, block_op_list["backward"] + ) bp_persistables = screen_persistables( - program, bp_block_input) + screen_persistables( - program, bp_block_output) + program, bp_block_input + ) + screen_persistables(program, bp_block_output) # find entrance & exit bp_block_private_vars = list(set(bp_block_input) & set(bp_block_output)) bp_block_entrance = list( - set(bp_block_input) - set(bp_block_private_vars)) + set(bp_block_input) - set(bp_block_private_vars) + ) bp_block_exit = list(set(bp_block_output) - set(bp_block_private_vars)) - detail.update({ - "backward": { - "entrance": bp_block_entrance, - "exit": bp_block_exit, - "private": bp_block_private_vars, - "persistables": bp_persistables + detail.update( + { + "backward": { + "entrance": bp_block_entrance, + "exit": bp_block_exit, + "private": bp_block_private_vars, + "persistables": bp_persistables, + } } - }) + ) block_var_detail.append(detail) return block_var_detail -def entrance_exit_check(program, program_block_ops_list, block_var_detail, - heter_ops): +def entrance_exit_check( + program, program_block_ops_list, block_var_detail, heter_ops +): for index in range(len(block_var_detail) - 1, -1, -1): if index - 1 < 0: break @@ -1558,9 +1660,11 @@ def entrance_exit_check(program, program_block_ops_list, block_var_detail, backward_entrance = block_var_detail[index]["backward"]["entrance"] - forward_all = block_var_detail[index]["forward"][ - "entrance"] + block_var_detail[index]["forward"][ - "private"] + block_var_detail[index]["forward"]["exit"] + forward_all = ( + block_var_detail[index]["forward"]["entrance"] + + block_var_detail[index]["forward"]["private"] + + block_var_detail[index]["forward"]["exit"] + ) for var in backward_entrance: if not ("@GRAD" in var) and not (var in forward_all): @@ -1571,18 +1675,24 @@ def entrance_exit_check(program, program_block_ops_list, block_var_detail, if previous_block_exit == current_block_entrance: continue exist_vars = list( - set(previous_block_exit) & set(current_block_entrance)) + set(previous_block_exit) & set(current_block_entrance) + ) need_add_vars = list(set(current_block_entrance) - set(exist_vars)) # var in different stage should not be ignored, since they are not placed in the same program & device - #need_add_vars = find_need_var_from_previous_block( + # need_add_vars = find_need_var_from_previous_block( # need_add_vars, block_var_detail, index, heter_ops) - previous_block_private = block_var_detail[index - - 1]["forward"]["private"] - previous_block_entrance = block_var_detail[index - - 1]["forward"]["entrance"] + previous_block_private = block_var_detail[index - 1]["forward"][ + "private" + ] + previous_block_entrance = block_var_detail[index - 1]["forward"][ + "entrance" + ] for var in need_add_vars: - if var not in previous_block_private and var not in previous_block_entrance: + if ( + var not in previous_block_private + and var not in previous_block_entrance + ): previous_block_entrance.append(var) previous_block_exit.append(var) if not var in current_block_entrance: @@ -1598,27 +1708,35 @@ def entrance_exit_check(program, program_block_ops_list, block_var_detail, if previous_block_exit == current_block_entrance: continue exist_vars = list( - set(previous_block_exit) & set(current_block_entrance)) + set(previous_block_exit) & set(current_block_entrance) + ) need_add_vars = list(set(current_block_entrance) - set(exist_vars)) need_ignore_vars = [] for var in need_add_vars: if not "@GRAD" in var: need_ignore_vars.append(var) need_add_vars = list( - set(need_add_vars).difference(set(need_ignore_vars))) - previous_block_private = block_var_detail[index + - 1]["backward"]["private"] - previous_block_entrance = block_var_detail[index + - 1]["backward"]["entrance"] + set(need_add_vars).difference(set(need_ignore_vars)) + ) + previous_block_private = block_var_detail[index + 1]["backward"][ + "private" + ] + previous_block_entrance = block_var_detail[index + 1]["backward"][ + "entrance" + ] for var in need_add_vars: - if var not in previous_block_private and var not in previous_block_entrance: + if ( + var not in previous_block_private + and var not in previous_block_entrance + ): previous_block_entrance.append(var) previous_block_exit.append(var) return block_var_detail -def find_need_var_from_previous_block(need_add_vars, block_var_detail, - current_index, heter_ops): +def find_need_var_from_previous_block( + need_add_vars, block_var_detail, current_index, heter_ops +): # create index_device_map index_device_map = {} for index in range(len(block_var_detail)): @@ -1633,15 +1751,21 @@ def find_need_var_from_previous_block(need_add_vars, block_var_detail, # if need_add_var in current device, no need communicate for var in need_add_vars: - while (pre_index >= 0): + while pre_index >= 0: previous_block_private = block_var_detail[pre_index]["private"] previous_block_exit = block_var_detail[pre_index]["exit"] previous_block_entrance = block_var_detail[pre_index]["entrance"] - total_var = previous_block_private + previous_block_exit + previous_block_entrance + total_var = ( + previous_block_private + + previous_block_exit + + previous_block_entrance + ) if var in total_var: - if index_device_map[current_index] == index_device_map[ - pre_index] and index_device_map[ - current_index] == DEFAULT_DEVICE: + if ( + index_device_map[current_index] + == index_device_map[pre_index] + and index_device_map[current_index] == DEFAULT_DEVICE + ): need_ignore_var.append(var) break pre_index -= 1 @@ -1650,8 +1774,9 @@ def find_need_var_from_previous_block(need_add_vars, block_var_detail, return need_add_vars -def delete_block_useless_exit(program, program_block_ops_list, - block_var_detail): +def delete_block_useless_exit( + program, program_block_ops_list, block_var_detail +): ## forward for index in range(len(block_var_detail)): if index == len(block_var_detail) - 1: @@ -1670,8 +1795,9 @@ def delete_block_useless_exit(program, program_block_ops_list, if index - 1 < 0: break current_block_exit = block_var_detail[index]["backward"]["exit"] - next_block_entrance = block_var_detail[index - - 1]["backward"]["entrance"] + next_block_entrance = block_var_detail[index - 1]["backward"][ + "entrance" + ] need_delete_var = [] for var in current_block_exit: if var not in next_block_entrance: @@ -1706,71 +1832,82 @@ def screen_persistables(program, var_list): return need_remove -def insert_reshape_op(program, - block, - index, - var_name, - new_var_name, - new_var_shape=None): +def insert_reshape_op( + program, block, index, var_name, new_var_name, new_var_shape=None +): input_var = block.vars[var_name] if new_var_name not in block.vars: - out = block.create_var(name=new_var_name, - shape=new_var_shape, - dtype=input_var.dtype, - type=input_var.type) + out = block.create_var( + name=new_var_name, + shape=new_var_shape, + dtype=input_var.dtype, + type=input_var.type, + ) else: out = block.vars[new_var_name] new_var_shape = out.shape - x_shape = block.create_var(name="{}.xshape@Heter".format(var_name), - dtype=input_var.dtype) - block._insert_op(index=index, - type="reshape2", - inputs={"X": input_var}, - attrs={'shape': new_var_shape}, - outputs={ - "Out": out, - "XShape": x_shape - }) - - -def insert_send_concat_op(program, block, index, var_name_list, new_var_name, - new_var_shape): + x_shape = block.create_var( + name="{}.xshape@Heter".format(var_name), dtype=input_var.dtype + ) + block._insert_op( + index=index, + type="reshape2", + inputs={"X": input_var}, + attrs={'shape': new_var_shape}, + outputs={"Out": out, "XShape": x_shape}, + ) + + +def insert_send_concat_op( + program, block, index, var_name_list, new_var_name, new_var_shape +): input_var_list = [block.vars[var_name] for var_name in var_name_list] - out = program.global_block().create_var(name=new_var_name, - shape=new_var_shape, - dtype=input_var_list[0].dtype, - type=input_var_list[0].type) - - block._insert_op(index=index, - type='concat', - inputs={"X": input_var_list}, - outputs={'Out': [out]}, - attrs={ - 'axis': -1, - 'use_stack': False - }) - - -def insert_recv_slice_op(program, block, index, var_name, var_shape, dtype, - type, new_var_name_list, new_var_shape_list): + out = program.global_block().create_var( + name=new_var_name, + shape=new_var_shape, + dtype=input_var_list[0].dtype, + type=input_var_list[0].type, + ) + + block._insert_op( + index=index, + type='concat', + inputs={"X": input_var_list}, + outputs={'Out': [out]}, + attrs={'axis': -1, 'use_stack': False}, + ) + + +def insert_recv_slice_op( + program, + block, + index, + var_name, + var_shape, + dtype, + type, + new_var_name_list, + new_var_shape_list, +): if var_name not in program.global_block().vars: - input_var = program.global_block().create_var(name=var_name, - shape=var_shape, - dtype=dtype, - type=type) + input_var = program.global_block().create_var( + name=var_name, shape=var_shape, dtype=dtype, type=type + ) else: input_var = program.global_block().vars[var_name] out_list = [] for i in range(len(new_var_name_list)): if new_var_name_list[i] not in block.vars: - out = block.create_var(name=new_var_name_list[i], - shape=new_var_shape_list[i], - dtype=input_var.dtype, - type=input_var.type) + out = block.create_var( + name=new_var_name_list[i], + shape=new_var_shape_list[i], + dtype=input_var.dtype, + type=input_var.type, + ) else: out = block.vars[new_var_name_list[i]] out_list.append(out) @@ -1787,25 +1924,31 @@ def insert_recv_slice_op(program, block, index, var_name, var_shape, dtype, attrs['starts'] = starts attrs['ends'] = ends - block._insert_op(index=index, - type='slice', - inputs={'Input': input_var}, - attrs=attrs, - outputs={'Out': out_list[i]}) + block._insert_op( + index=index, + type='slice', + inputs={'Input': input_var}, + attrs=attrs, + outputs={'Out': out_list[i]}, + ) start_index = end_index index += 1 -def add_heter_trainer_useful_vars(config, program, heter_program, heter_block, - static_var): +def add_heter_trainer_useful_vars( + config, program, heter_program, heter_block, static_var +): static_var = list(set(static_var)) for var_name in static_var: - if var_name not in heter_program.global_block( - ).vars and var_name not in heter_block.vars: + if ( + var_name not in heter_program.global_block().vars + and var_name not in heter_block.vars + ): var = program.global_block().vars[var_name] if var.persistable: heter_program.global_block()._clone_variable( - var, force_persistable=False) + var, force_persistable=False + ) else: heter_block._clone_variable(var, force_persistable=False) @@ -1815,14 +1958,18 @@ def delete_trainer_useless_var(config, program, static_var): program_useful_var_list = [] for op in program.global_block().ops: input_var_list, output_var_list = find_op_input_output( - program, program.global_block(), op) + program, program.global_block(), op + ) op_var_list = list(set(input_var_list).union(set(output_var_list))) program_useful_var_list = list( - set(program_useful_var_list).union(set(op_var_list))) + set(program_useful_var_list).union(set(op_var_list)) + ) program_useful_var_list += static_var program_useless_var_list = list( set(get_vars_name_in_block(program.global_block())).difference( - set(program_useful_var_list))) + set(program_useful_var_list) + ) + ) for var in program_useless_var_list: program.global_block()._remove_var(var) return program_useless_var_list @@ -1837,11 +1984,14 @@ def block_append_op(program, origin_program, block, op): if not isinstance(varlist, list): varlist = [varlist] for var in varlist: - if var.name not in program.global_block( - ).vars and var.name not in block.vars: + if ( + var.name not in program.global_block().vars + and var.name not in block.vars + ): if var.persistable: program.global_block()._clone_variable( - var, force_persistable=False) + var, force_persistable=False + ) else: block._clone_variable(var, force_persistable=False) @@ -1850,20 +2000,22 @@ def block_append_op(program, origin_program, block, op): if not isinstance(varlist, list): varlist = [varlist] for var in varlist: - if var.name not in program.global_block( - ).vars and var.name not in block.vars: + if ( + var.name not in program.global_block().vars + and var.name not in block.vars + ): if var.persistable: program.global_block()._clone_variable( - var, force_persistable=False) + var, force_persistable=False + ) else: block._clone_variable(var, force_persistable=False) if "_grad" not in op.type: # for forward op - return block.append_op(type=op.type, - inputs=inputs, - outputs=outputs, - attrs=op.all_attrs()) + return block.append_op( + type=op.type, inputs=inputs, outputs=outputs, attrs=op.all_attrs() + ) else: # for grad op op_desc = op.desc @@ -1885,12 +2037,15 @@ def block_append_op(program, origin_program, block, op): def add_vars_by_var_list(var_name_list, origin_program, program, block): for var_name in var_name_list: - if var_name not in program.global_block( - ).vars and var_name not in block.vars: + if ( + var_name not in program.global_block().vars + and var_name not in block.vars + ): var = origin_program.global_block().vars[var_name] if var.persistable: - program.global_block()._clone_variable(var, - force_persistable=False) + program.global_block()._clone_variable( + var, force_persistable=False + ) else: block._clone_variable(var, force_persistable=False) diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/ir/ufind.py b/python/paddle/fluid/incubate/fleet/parameter_server/ir/ufind.py index 0e30d0e3f9c5712c494daf17b2b4bcec86f69c23..fef6f24570c17bfc28dc87d699891d84c292a59e 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/ir/ufind.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/ir/ufind.py @@ -14,7 +14,7 @@ class UnionFind(object): - """ Union-find data structure. + """Union-find data structure. Union-find is a data structure that keeps track of a set of elements partitioned into a number of disjoint (non-overlapping) subsets. diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/ir/vars_metatools.py b/python/paddle/fluid/incubate/fleet/parameter_server/ir/vars_metatools.py index b1beecd66d5fa4393e5e765544088ffc5b124cc2..745c05d986a6e66dbf4dce6089dbbe44d05cc84c 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/ir/vars_metatools.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/ir/vars_metatools.py @@ -29,7 +29,6 @@ dtype_to_size = { class VarBlock: - def __init__(self, varname, offset, size): self.varname = varname # NOTE: real offset is offset * size @@ -48,8 +47,9 @@ def create_var_struct(var): else: raise ValueError("can only support SELECTED_ROWS/LOD_TENSOR now") - return VarStruct(var.name, var.shape, var.dtype, var.type, lod_level, - var.persistable) + return VarStruct( + var.name, var.shape, var.dtype, var.type, lod_level, var.persistable + ) class VarStruct(object): @@ -70,8 +70,14 @@ class VarStruct(object): def __str__(self): return "N: {}, S: {}, D: {}, T: {}, LL: {}, P: {}, M: {}".format( - self.name, self.shape, self.dtype, self.type, self.lod_level, - self.persistable, self.m_size) + self.name, + self.shape, + self.dtype, + self.type, + self.lod_level, + self.persistable, + self.m_size, + ) class VarDistributed(object): @@ -81,14 +87,16 @@ class VarDistributed(object): the slice var's properties, such as type/shape/offset/endpoint. """ - def __init__(self, - origin_var, - slice_var, - is_slice=None, - block_id=None, - offset=None, - vtype=None, - endpoint=None): + def __init__( + self, + origin_var, + slice_var, + is_slice=None, + block_id=None, + offset=None, + vtype=None, + endpoint=None, + ): """ Args: origin_var(Variable|VarStruct): origin var properties @@ -138,26 +146,45 @@ class VarDistributed(object): """ assert isinstance(var1, VarStruct) and isinstance(var2, VarStruct) - return var1.name == var2.name and \ - var1.type == var2.type and \ - var1.shape == var2.shape and \ - var1.dtype == var2.dtype and \ - var1.lod_level == var2.lod_level and \ - var1.persistable == var2.persistable + return ( + var1.name == var2.name + and var1.type == var2.type + and var1.shape == var2.shape + and var1.dtype == var2.dtype + and var1.lod_level == var2.lod_level + and var1.persistable == var2.persistable + ) def __str__(self): - origin_var_str = "{name} : fluid.{type}.shape{shape}.astype({dtype})". \ - format(i="{", e="}", name=self.origin.name, type=self.origin.type, - shape=self.origin.shape, dtype=self.origin.dtype) - - slice_var_str = "{name} : fluid.{type}.shape{shape}.astype({dtype})" \ - ".slice({is_slice}).block({block_id}).offset({offset})". \ - format(i="{", e="}", name=self.slice.name, type=self.slice.type, - shape=self.slice.shape, dtype=self.slice.dtype, - is_slice=self.is_slice, block_id=self.block_id, offset=self.offset) + origin_var_str = ( + "{name} : fluid.{type}.shape{shape}.astype({dtype})".format( + i="{", + e="}", + name=self.origin.name, + type=self.origin.type, + shape=self.origin.shape, + dtype=self.origin.dtype, + ) + ) + + slice_var_str = ( + "{name} : fluid.{type}.shape{shape}.astype({dtype})" + ".slice({is_slice}).block({block_id}).offset({offset})".format( + i="{", + e="}", + name=self.slice.name, + type=self.slice.type, + shape=self.slice.shape, + dtype=self.slice.dtype, + is_slice=self.is_slice, + block_id=self.block_id, + offset=self.offset, + ) + ) return "var owned: {}, origin var: ( {} ), slice var: ( {} ), endpoint: {} ".format( - self.vtype, origin_var_str, slice_var_str, self.endpoint) + self.vtype, origin_var_str, slice_var_str, self.endpoint + ) class VarsDistributed(object): @@ -171,14 +198,16 @@ class VarsDistributed(object): def __init__(self): self.distributed_vars = [] - def add_distributed_var(self, - origin_var, - slice_var, - is_slice=None, - block_id=None, - offset=None, - vtype=None, - endpoint=None): + def add_distributed_var( + self, + origin_var, + slice_var, + is_slice=None, + block_id=None, + offset=None, + vtype=None, + endpoint=None, + ): """ add distributed var in this. @@ -194,5 +223,13 @@ class VarsDistributed(object): None """ self.distributed_vars.append( - VarDistributed(origin_var, slice_var, is_slice, block_id, offset, - vtype, endpoint)) + VarDistributed( + origin_var, + slice_var, + is_slice, + block_id, + offset, + vtype, + endpoint, + ) + ) diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/mode.py b/python/paddle/fluid/incubate/fleet/parameter_server/mode.py index 0733f9b8a23e42f14817b603f0ca3a3d02b132bf..623e919ba35a8aa135875a2b266062c07964e53d 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/mode.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/mode.py @@ -17,6 +17,7 @@ class PSMode: """ There are various mode for fleet, each of them is designed for different model. """ + TRANSPILER = 1 PSLIB = 2 diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/pslib/__init__.py b/python/paddle/fluid/incubate/fleet/parameter_server/pslib/__init__.py index ef6c34b23fdb7350da2acb7d5883fb98ee2ff2d4..39d6d3ff1c73800431df61960a9dbaceba5f9240 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/pslib/__init__.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/pslib/__init__.py @@ -49,8 +49,9 @@ class PSLib(Fleet): if isinstance(role_maker, HeterRoleMaker): self._heter_ptr = fluid.core.Heter() - def _set_client_communication_config(self, request_timeout_ms, - connect_timeout_ms, max_retry): + def _set_client_communication_config( + self, request_timeout_ms, connect_timeout_ms, max_retry + ): self._client2client_request_timeout_ms = request_timeout_ms self._client2client_connect_timeout_ms = connect_timeout_ms self._client2client_max_retry = max_retry @@ -70,33 +71,41 @@ class PSLib(Fleet): if len(self._main_programs) == 0: raise ValueError( - "You should run DistributedOptimizer.minimize() first") + "You should run DistributedOptimizer.minimize() first" + ) if self._opt_info: if "fleet_desc" in self._opt_info: self._dist_desc_str = text_format.MessageToString( - self._opt_info["fleet_desc"]) + self._opt_info["fleet_desc"] + ) self._dist_desc = self._opt_info["fleet_desc"] else: raise Exception( - "You should run DistributedOptimizer.minimize() first") + "You should run DistributedOptimizer.minimize() first" + ) # barrier_all for init_server, wait for server starts if isinstance(self._role_maker, HeterRoleMaker): if self._role_maker.is_xpu(): local_endpoint = self._role_maker.get_local_endpoint() local_endpoint = local_endpoint.split(":") - self._heter_ptr.start_xpu_service(str(local_endpoint[0]), - int(local_endpoint[1])) + self._heter_ptr.start_xpu_service( + str(local_endpoint[0]), int(local_endpoint[1]) + ) self._role_maker._barrier_all() self.all_ips_ = self._role_maker._all_gather(self._local_ip) # worker_index * 2 is for compatible with older versions of pslib - self._fleet_ptr.init_worker(self._dist_desc_str, self.all_ips_, - self._role_maker._get_size(), - self._role_maker.worker_index() * 2) + self._fleet_ptr.init_worker( + self._dist_desc_str, + self.all_ips_, + self._role_maker._get_size(), + self._role_maker.worker_index() * 2, + ) if isinstance(self._role_maker, HeterRoleMaker): if self._role_maker.is_worker(): self._heter_ptr.set_xpu_list( - self._role_maker._xpu_endpoints) + self._role_maker._xpu_endpoints + ) self._heter_ptr.create_client2xpu_connection() # barrier_all for init_worker self._role_maker._barrier_all() @@ -110,7 +119,8 @@ class PSLib(Fleet): self._fleet_ptr.set_client2client_config( self._client2client_request_timeout_ms, self._client2client_connect_timeout_ms, - self._client2client_max_retry) + self._client2client_max_retry, + ) self._fleet_ptr.create_client2client_connection() # barrier for init model self._role_maker._barrier_worker() @@ -136,19 +146,22 @@ class PSLib(Fleet): var_name = table.dense_variable_name[i] if scope.find_var(var_name) is None: raise ValueError( - "var " + var_name + - " not found in scope, " + - "you should run startup program first") + "var " + + var_name + + " not found in scope, " + + "you should run startup program first" + ) var_name_list.append(var_name) if not self._opt_info["use_ps_gpu"]: - self._fleet_ptr.init_model(scope, - int(table.table_id), - var_name_list) + self._fleet_ptr.init_model( + scope, int(table.table_id), var_name_list + ) # barrier for init model done self._role_maker._barrier_worker() else: raise NameError( - "You should run DistributedOptimizer.minimize() first") + "You should run DistributedOptimizer.minimize() first" + ) def init_server(self, model_dir=None, **kwargs): """ @@ -177,83 +190,102 @@ class PSLib(Fleet): def run_server(self): """ - init_pserver(): will be called by user. When a user knows current process is_worker(), he/she - should call init_pserver() to initialize global information about parameter server + init_pserver(): will be called by user. When a user knows current process is_worker(), he/she + should call init_pserver() to initialize global information about parameter server """ if self._opt_info: if "fleet_desc" in self._opt_info: self._dist_desc_str = text_format.MessageToString( - self._opt_info["fleet_desc"]) + self._opt_info["fleet_desc"] + ) self._dist_desc = self._opt_info["fleet_desc"] else: raise Exception( - "You should run DistributedOptimizer.minimize() first") + "You should run DistributedOptimizer.minimize() first" + ) # server_index * 2 is for compatible with older versions of pslib - self._fleet_ptr.init_server(self._dist_desc_str, - self._role_maker.server_index() * 2) + self._fleet_ptr.init_server( + self._dist_desc_str, self._role_maker.server_index() * 2 + ) if isinstance(self._role_maker, MPISymetricRoleMaker): self._local_ip = self._fleet_ptr.run_server() else: local_endpoint = self._role_maker.get_local_endpoint() local_endpoint = local_endpoint.split(":") self._local_ip = self._fleet_ptr.run_server( - str(local_endpoint[0]), int(local_endpoint[1])) + str(local_endpoint[0]), int(local_endpoint[1]) + ) # barrier_all for init_server self._role_maker._barrier_all() self.all_ips_ = self._role_maker._all_gather(self._local_ip) - self._fleet_ptr.gather_servers(self.all_ips_, - self._role_maker._get_size()) + self._fleet_ptr.gather_servers( + self.all_ips_, self._role_maker._get_size() + ) # barrier_all for init_worker, wait all workers start self._role_maker._barrier_all() else: raise Exception( - "You should run DistributedOptimizer.minimize() first") + "You should run DistributedOptimizer.minimize() first" + ) def end_pass(self, scope): if self._role_maker.worker_index() < self._role_maker.xpu_num(): self._heter_ptr.end_pass(scope, self._role_maker.worker_index()) self._heter_ptr.stop_xpu_service(self._role_maker.worker_index()) - def train_from_dataset(self, - executor, - program=None, - dataset=None, - scope=None, - thread=0, - debug=False, - fetch_list=None, - fetch_info=None, - print_period=100, - fetch_handler=None): - """ - - """ + def train_from_dataset( + self, + executor, + program=None, + dataset=None, + scope=None, + thread=0, + debug=False, + fetch_list=None, + fetch_info=None, + print_period=100, + fetch_handler=None, + ): + """ """ if self._role_maker.is_worker(): self._role_maker._barrier_heter() - executor.train_from_dataset(program, dataset, scope, thread, debug, - fetch_list, fetch_info, print_period, - fetch_handler) - - def start_heter_trainer(self, - executor, - program=None, - scope=None, - debug=False, - fetch_list=None, - fetch_info=None, - print_period=100, - fetch_handler=None): - """ - - """ - - trainer_instance = executor.start_heter_trainer(program, scope, debug, - fetch_list, fetch_info, - print_period, - fetch_handler) + executor.train_from_dataset( + program, + dataset, + scope, + thread, + debug, + fetch_list, + fetch_info, + print_period, + fetch_handler, + ) + + def start_heter_trainer( + self, + executor, + program=None, + scope=None, + debug=False, + fetch_list=None, + fetch_info=None, + print_period=100, + fetch_handler=None, + ): + """ """ + + trainer_instance = executor.start_heter_trainer( + program, + scope, + debug, + fetch_list, + fetch_info, + print_period, + fetch_handler, + ) if self._role_maker.is_xpu(): print("barrier heter") self._role_maker._barrier_heter() @@ -293,13 +325,15 @@ class PSLib(Fleet): self._optimizer = DownpourOptimizer(optimizer, strategy) return self._optimizer - def save_inference_model(self, - executor, - dirname, - feeded_var_names=None, - target_vars=None, - main_program=None, - export_for_deployment=True): + def save_inference_model( + self, + executor, + dirname, + feeded_var_names=None, + target_vars=None, + main_program=None, + export_for_deployment=True, + ): """ save pserver model called from a worker Args: @@ -369,12 +403,9 @@ class PSLib(Fleet): self._fleet_ptr.save_model(dirname, mode) self._role_maker._barrier_worker() - def save_model_with_whitelist(self, - executor, - dirname, - whitelist_path, - main_program=None, - **kwargs): + def save_model_with_whitelist( + self, executor, dirname, whitelist_path, main_program=None, **kwargs + ): """ save whitelist, mode is consistent with fleet.save_persistables, when using fleet, it will save sparse and dense feature @@ -400,8 +431,9 @@ class PSLib(Fleet): self._fleet_ptr.client_flush() self._role_maker._barrier_worker() if self._role_maker.is_first_worker(): - self._fleet_ptr.save_model_with_whitelist(table_id, dirname, mode, - whitelist_path) + self._fleet_ptr.save_model_with_whitelist( + table_id, dirname, mode, whitelist_path + ) self._role_maker._barrier_worker() def save_multi_table_one_path(self, table_ids, model_dir, **kwargs): @@ -422,8 +454,9 @@ class PSLib(Fleet): mode = kwargs.get("mode", 0) self._role_maker._barrier_worker() if self._role_maker.is_first_worker(): - self._fleet_ptr.save_multi_table_one_path(table_ids, model_dir, - mode) + self._fleet_ptr.save_multi_table_one_path( + table_ids, model_dir, mode + ) self._role_maker._barrier_worker() def save_cache_model(self, executor, dirname, main_program=None, **kwargs): @@ -452,12 +485,13 @@ class PSLib(Fleet): if self._role_maker.is_first_worker(): cache_threshold = self._fleet_ptr.get_cache_threshold(table_id) - #check cache threshold right or not + # check cache threshold right or not self._role_maker._barrier_worker() if self._role_maker.is_first_worker(): - self._fleet_ptr.cache_shuffle(table_id, dirname, mode, - cache_threshold) + self._fleet_ptr.cache_shuffle( + table_id, dirname, mode, cache_threshold + ) self._role_maker._barrier_worker() @@ -516,8 +550,9 @@ class PSLib(Fleet): break if skip: continue - self._fleet_ptr.shrink_dense_table(i.table_id, scope, - var_list, decay, emb_dim) + self._fleet_ptr.shrink_dense_table( + i.table_id, scope, var_list, decay, emb_dim + ) self._role_maker._barrier_worker() def clear_one_table(self, table_id): @@ -587,8 +622,9 @@ class PSLib(Fleet): self._role_maker._barrier_worker() mode = kwargs.get("mode", 0) if self._role_maker.is_first_worker(): - self._fleet_ptr.load_table_with_whitelist(table_id, model_path, - mode) + self._fleet_ptr.load_table_with_whitelist( + table_id, model_path, mode + ) self._role_maker._barrier_worker() def load_one_table(self, table_id, model_path, **kwargs): @@ -631,20 +667,27 @@ class PSLib(Fleet): load_combine = kwargs.get("load_combine", False) self._role_maker._barrier_worker() if scope is not None and model_proto_file is not None: - self._load_one_table_from_paddle_model(scope, table_id, model_path, - model_proto_file, var_names, - load_combine) + self._load_one_table_from_paddle_model( + scope, + table_id, + model_path, + model_proto_file, + var_names, + load_combine, + ) elif self._role_maker.is_first_worker(): self._fleet_ptr.load_model_one_table(table_id, model_path, mode) self._role_maker._barrier_worker() - def _load_one_table_from_paddle_model(self, - scope, - table_id, - model_path, - model_proto_file, - var_names=None, - load_combine=False): + def _load_one_table_from_paddle_model( + self, + scope, + table_id, + model_path, + model_proto_file, + var_names=None, + load_combine=False, + ): """ load params from paddle model, and push params to pserver Args: @@ -660,26 +703,46 @@ class PSLib(Fleet): if self._role_maker.is_first_worker(): # get fs config from fleet_desc fs_name = self._opt_info["fleet_desc"].fs_client_param.uri - fs_ugi = self._opt_info["fleet_desc"].fs_client_param.user + "," + \ - self._opt_info["fleet_desc"].fs_client_param.passwd + fs_ugi = ( + self._opt_info["fleet_desc"].fs_client_param.user + + "," + + self._opt_info["fleet_desc"].fs_client_param.passwd + ) hadoop_bin = self._opt_info["fleet_desc"].fs_client_param.hadoop_bin # download model_path if it's hdfs/afs if model_path.startswith("hdfs:") or model_path.startswith("afs:"): dest = "./model_for_load_table_%s" % table_id - cmd = hadoop_bin + " fs -D fs.default.name=" + fs_name + \ - " -D hadoop.job.ugi=" + fs_ugi + " -get " + model_path + \ - " " + dest + cmd = ( + hadoop_bin + + " fs -D fs.default.name=" + + fs_name + + " -D hadoop.job.ugi=" + + fs_ugi + + " -get " + + model_path + + " " + + dest + ) ret = os.system(cmd) if ret != 0: raise RuntimeError("download model failed") model_path = dest # download model_proto_file if it's hdfs/afs - if model_proto_file.startswith("hdfs:") or \ - model_proto_file.startswith("afs:"): + if model_proto_file.startswith( + "hdfs:" + ) or model_proto_file.startswith("afs:"): dest = "./model_proto_file_for_load_table_%s" % table_id - cmd = hadoop_bin + " fs -D fs.default.name=" + fs_name + \ - " -D hadoop.job.ugi=" + fs_ugi + " -get " + \ - model_proto_file + " " + dest + cmd = ( + hadoop_bin + + " fs -D fs.default.name=" + + fs_name + + " -D hadoop.job.ugi=" + + fs_ugi + + " -get " + + model_proto_file + + " " + + dest + ) ret = os.system(cmd) if ret != 0: raise RuntimeError("download model proto file failed") @@ -697,8 +760,14 @@ class PSLib(Fleet): if skip: continue self._fleet_ptr.load_from_paddle_model( - scope, table_id, var_names, model_path, - model_proto_file, table_var_names, load_combine) + scope, + table_id, + var_names, + model_path, + model_proto_file, + table_var_names, + load_combine, + ) self._role_maker._barrier_worker() def confirm(self): @@ -784,7 +853,8 @@ class PSLib(Fleet): if self._role_maker.is_first_worker(): if prefix is not None: self._fleet_ptr.save_model_one_table_with_prefix( - table_id, model_dir, mode, prefix) + table_id, model_dir, mode, prefix + ) else: self._fleet_ptr.save_model_one_table(table_id, model_dir, mode) self._role_maker._barrier_worker() @@ -808,13 +878,15 @@ class PSLib(Fleet): fleet = PSLib() -def _prepare_params(input, - size, - is_sparse=False, - is_distributed=False, - padding_idx=None, - param_attr=None, - dtype='float32'): +def _prepare_params( + input, + size, + is_sparse=False, + is_distributed=False, + padding_idx=None, + param_attr=None, + dtype='float32', +): """ preprocess params, this interface is not for users. Args: @@ -844,16 +916,18 @@ def _prepare_params(input, if d_size.get(name) is None: d_size[name] = size elif d_size[name] != size: - raise ValueError("embedding size error: %s vs %s" % - (size, d_size[name])) + raise ValueError( + "embedding size error: %s vs %s" % (size, d_size[name]) + ) # check embedding accessor accessor = FLEET_GLOBAL_DICT["cur_accessor"] if d_accessor.get(name) is None: d_accessor[name] = accessor elif d_accessor[name] != accessor: - raise ValueError("embedding size error: %s vs %s" % - (d_accessor[name], accessor)) + raise ValueError( + "embedding size error: %s vs %s" % (d_accessor[name], accessor) + ) # check embedding table id if d_table.get(name) is None: @@ -869,13 +943,15 @@ def _prepare_params(input, raise ValueError("dtype must be float32") -def _fleet_embedding(input, - size, - is_sparse=False, - is_distributed=False, - padding_idx=None, - param_attr=None, - dtype='float32'): +def _fleet_embedding( + input, + size, + is_sparse=False, + is_distributed=False, + padding_idx=None, + param_attr=None, + dtype='float32', +): """ add fleet embedding, this interface is not for users. Args: @@ -888,8 +964,9 @@ def _fleet_embedding(input, dtype(str): data type of output """ # check and set params - _prepare_params(input, size, is_sparse, is_distributed, padding_idx, - param_attr, dtype) + _prepare_params( + input, size, is_sparse, is_distributed, padding_idx, param_attr, dtype + ) name = param_attr.name size = size[-1] if padding_idx is None: @@ -904,16 +981,19 @@ def _fleet_embedding(input, ctr_label_name=FLEET_GLOBAL_DICT["click_name"], padding_id=padding_idx, dtype=dtype, - scale_sparse_grad=FLEET_GLOBAL_DICT["scale_sparse_grad"]) - - -def _fleet_embedding_v2(input, - size, - is_sparse=False, - is_distributed=False, - padding_idx=None, - param_attr=None, - dtype='float32'): + scale_sparse_grad=FLEET_GLOBAL_DICT["scale_sparse_grad"], + ) + + +def _fleet_embedding_v2( + input, + size, + is_sparse=False, + is_distributed=False, + padding_idx=None, + param_attr=None, + dtype='float32', +): """ add fleet embedding v2, this interface is not for users. Args: @@ -926,8 +1006,9 @@ def _fleet_embedding_v2(input, dtype(str): data type of output """ # check and set params - _prepare_params(input, size, is_sparse, is_distributed, padding_idx, - param_attr, dtype) + _prepare_params( + input, size, is_sparse, is_distributed, padding_idx, param_attr, dtype + ) name = param_attr.name size = size[-1] if padding_idx is None: @@ -942,7 +1023,8 @@ def _fleet_embedding_v2(input, ctr_label_name=FLEET_GLOBAL_DICT["click_name"], padding_id=padding_idx, dtype=dtype, - scale_sparse_grad=FLEET_GLOBAL_DICT["scale_sparse_grad"]) + scale_sparse_grad=FLEET_GLOBAL_DICT["scale_sparse_grad"], + ) class fleet_embedding(object): @@ -1012,17 +1094,20 @@ class DownpourOptimizer(DistributedOptimizer): "Currently, distributed optimizer only support Adam" "Will config built-in adam for you." "We will support more functions in DistributedOptimizer", - sys.stderr) + sys.stderr, + ) self._optimizer_name = "DistributedAdam" self._distributed_optimizer = globals()[self._optimizer_name](optimizer) - def backward(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None, - callbacks=None): + def backward( + self, + loss, + startup_program=None, + parameter_list=None, + no_grad_set=None, + callbacks=None, + ): """ Currently, backward function can not be called through DistributedOptimizer """ @@ -1050,7 +1135,8 @@ class DownpourOptimizer(DistributedOptimizer): current_endpoint = '' num_trainers = 0 if os.getenv('PADDLE_TRAINER_ENDPOINTS') and os.getenv( - 'PADDLE_CURRENT_ENDPOINT'): + 'PADDLE_CURRENT_ENDPOINT' + ): trainer_endpoints = os.getenv('PADDLE_TRAINER_ENDPOINTS') current_endpoint = os.getenv('PADDLE_CURRENT_ENDPOINT') num_trainers = len(trainer_endpoints.split(',')) @@ -1059,7 +1145,7 @@ class DownpourOptimizer(DistributedOptimizer): 'trainer_id': trainer_id, 'num_trainers': num_trainers, 'current_endpoint': current_endpoint, - 'trainer_endpoints': trainer_endpoints + 'trainer_endpoints': trainer_endpoints, } def _remove_collective_op_for_embedding(self, loss, table_name): @@ -1086,13 +1172,15 @@ class DownpourOptimizer(DistributedOptimizer): for index in need_remove_op_index: block._remove_op(index) - def minimize(self, - losses, - scopes=None, - startup_programs=None, - parameter_list=None, - no_grad_set=None, - program_mode="all_reduce"): + def minimize( + self, + losses, + scopes=None, + startup_programs=None, + parameter_list=None, + no_grad_set=None, + program_mode="all_reduce", + ): """ minimize a program through loss, loss can be a list in DistributedOptimizer. Note that in parameter server mode, a worker will not get anything about optimize_os @@ -1115,13 +1203,17 @@ class DownpourOptimizer(DistributedOptimizer): if not isinstance(losses, list): losses = [losses] - optimize_ops, param_grads, opt_info = \ - self._distributed_optimizer._minimize( - losses, - startup_programs, - parameter_list, - no_grad_set, - self._strategy) + ( + optimize_ops, + param_grads, + opt_info, + ) = self._distributed_optimizer._minimize( + losses, + startup_programs, + parameter_list, + no_grad_set, + self._strategy, + ) opt_info["mpi_rank"] = fleet.worker_index() opt_info["mpi_size"] = fleet.worker_num() fleet._set_opt_info(opt_info) @@ -1140,13 +1232,18 @@ class DownpourOptimizer(DistributedOptimizer): fleet._scopes = scopes if opt_info["use_ps_gpu"]: from paddle.fluid.transpiler.collective import MultiThread + # check start program if program_mode not in [ - "all_reduce", "fuse_all_reduce", "all_gather", - "all_reduce_xpu" + "all_reduce", + "fuse_all_reduce", + "all_gather", + "all_reduce_xpu", ]: - raise ValueError("You should set program_mode in [ all_reduce, \ - fuse_all_reduce, all_gather, all_reduce_xpu ]") + raise ValueError( + "You should set program_mode in [ all_reduce, \ + fuse_all_reduce, all_gather, all_reduce_xpu ]" + ) env = self.get_dist_env() if not isinstance(losses, list): startup_programs = [startup_programs] @@ -1155,19 +1252,23 @@ class DownpourOptimizer(DistributedOptimizer): t = MultiThread(trans_mode=program_mode) start_program = startup_programs[i] main_program = programs[i] - t.transpile(startup_program=start_program, - main_program=main_program, - rank=env["trainer_id"], - endpoints=env["trainer_endpoints"], - current_endpoint=env['current_endpoint'], - wait_port=False) + t.transpile( + startup_program=start_program, + main_program=main_program, + rank=env["trainer_id"], + endpoints=env["trainer_endpoints"], + current_endpoint=env['current_endpoint'], + wait_port=False, + ) if i > 0: - self._remove_collective_ops(start_program, - "c_comm_init_all") + self._remove_collective_ops( + start_program, "c_comm_init_all" + ) for i in range(0, len(losses)): loss = losses[i] embedding_table = self._distributed_optimizer._find_multi_distributed_lookup_table( - [loss]) + [loss] + ) self._remove_collective_op_for_embedding(loss, embedding_table) return [optimize_ops, param_grads] diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/pslib/node.py b/python/paddle/fluid/incubate/fleet/parameter_server/pslib/node.py index 308261cea0676d7efc621397bfbe54b645cbe638..d1c9fae5ccbeb368de74a6dd788995b2af0e3a28 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/pslib/node.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/pslib/node.py @@ -13,14 +13,15 @@ """Defination of Server and Worker.""" from . import ps_pb2 as pslib + # NOTE: reduce removed in fuctools in python3 from functools import reduce class Server(object): """ - A Server basic class - it's a base class, does not have implementation + A Server basic class + it's a base class, does not have implementation """ def __init__(self): @@ -29,8 +30,8 @@ class Server(object): class Worker(object): """ - A Worker basic class. - it's a base class, does not have implementation + A Worker basic class. + it's a base class, does not have implementation """ def __init__(self): @@ -39,18 +40,24 @@ class Worker(object): class DownpourServer(Server): """ - DownpourServer class is used to generate server program_desc - Args: - server: it is pslib.ServerParameter() - Examples: - server = DownpourServer() + DownpourServer class is used to generate server program_desc + Args: + server: it is pslib.ServerParameter() + Examples: + server = DownpourServer() """ def __init__(self): self._server = pslib.ServerParameter() - self._server.downpour_server_param.service_param.server_class = "DownpourBrpcPsServer" - self._server.downpour_server_param.service_param.client_class = "DownpourBrpcPsClient" - self._server.downpour_server_param.service_param.service_class = "DownpourPsService" + self._server.downpour_server_param.service_param.server_class = ( + "DownpourBrpcPsServer" + ) + self._server.downpour_server_param.service_param.client_class = ( + "DownpourBrpcPsClient" + ) + self._server.downpour_server_param.service_param.service_class = ( + "DownpourPsService" + ) self._server.downpour_server_param.service_param.start_server_port = 0 self._server.downpour_server_param.service_param.server_thread_num = 12 @@ -68,27 +75,60 @@ class DownpourServer(Server): if table.type == pslib.PS_SPARSE_TABLE: return else: - raise ValueError("expect table %s type=%s, but actual type=%s" \ - %(table_id, pslib.PS_SPARSE_TABLE, table.type)) + raise ValueError( + "expect table %s type=%s, but actual type=%s" + % (table_id, pslib.PS_SPARSE_TABLE, table.type) + ) if strategy is None: strategy = dict() table = self._server.downpour_server_param.downpour_table_param.add() table.table_id = table_id table.type = pslib.PS_SPARSE_TABLE - support_sparse_key_list = ['sparse_table_class', 'sparse_compress_in_save', 'sparse_shard_num', \ - 'sparse_accessor_class', 'sparse_learning_rate', 'sparse_initial_g2sum', 'sparse_initial_range', \ - 'sparse_weight_bounds', 'sparse_embedx_dim', 'sparse_embedx_threshold', 'sparse_nonclk_coeff', \ - 'sparse_click_coeff', 'sparse_base_threshold', 'sparse_delta_threshold', 'sparse_delta_keep_days', \ - 'sparse_delete_after_unseen_days', 'sparse_show_click_decay_rate', 'sparse_delete_threshold', \ - 'sparse_converter', 'sparse_deconverter', 'sparse_enable_cache', 'sparse_cache_rate', \ - 'sparse_cache_file_num', 'sparse_beta1_decay_rate', 'sparse_beta2_decay_rate', \ - 'sparse_ada_epsilon', 'sparse_optimizer', 'sparse_ssd_unseenday_threshold', \ - 'embed_sparse_optimizer', 'embed_sparse_learning_rate', 'embed_sparse_weight_bounds', \ - 'embed_sparse_initial_range', 'embed_sparse_initial_g2sum', 'embed_sparse_beta1_decay_rate', \ - 'embed_sparse_beta2_decay_rate', 'embedx_sparse_optimizer', 'embedx_sparse_learning_rate', \ - 'embedx_sparse_weight_bounds', 'embedx_sparse_initial_range', 'embedx_sparse_initial_g2sum', \ - 'embedx_sparse_beta1_decay_rate', 'embedx_sparse_beta2_decay_rate'] + support_sparse_key_list = [ + 'sparse_table_class', + 'sparse_compress_in_save', + 'sparse_shard_num', + 'sparse_accessor_class', + 'sparse_learning_rate', + 'sparse_initial_g2sum', + 'sparse_initial_range', + 'sparse_weight_bounds', + 'sparse_embedx_dim', + 'sparse_embedx_threshold', + 'sparse_nonclk_coeff', + 'sparse_click_coeff', + 'sparse_base_threshold', + 'sparse_delta_threshold', + 'sparse_delta_keep_days', + 'sparse_delete_after_unseen_days', + 'sparse_show_click_decay_rate', + 'sparse_delete_threshold', + 'sparse_converter', + 'sparse_deconverter', + 'sparse_enable_cache', + 'sparse_cache_rate', + 'sparse_cache_file_num', + 'sparse_beta1_decay_rate', + 'sparse_beta2_decay_rate', + 'sparse_ada_epsilon', + 'sparse_optimizer', + 'sparse_ssd_unseenday_threshold', + 'embed_sparse_optimizer', + 'embed_sparse_learning_rate', + 'embed_sparse_weight_bounds', + 'embed_sparse_initial_range', + 'embed_sparse_initial_g2sum', + 'embed_sparse_beta1_decay_rate', + 'embed_sparse_beta2_decay_rate', + 'embedx_sparse_optimizer', + 'embedx_sparse_learning_rate', + 'embedx_sparse_weight_bounds', + 'embedx_sparse_initial_range', + 'embedx_sparse_initial_g2sum', + 'embedx_sparse_beta1_decay_rate', + 'embedx_sparse_beta2_decay_rate', + ] for key in strategy: if key not in support_sparse_key_list: @@ -100,21 +140,30 @@ class DownpourServer(Server): if table_class not in support_table_calss: raise ValueError( "support sparse_table_class: [ 'DownpourSparseTable', 'DownpourSparseSSDTable'], \ - but actual %s" % (table_class)) + but actual %s" + % (table_class) + ) else: table_class = 'DownpourSparseTable' table.table_class = table_class - if table_class == 'DownpourSparseTable' or table_class == 'DownpourSparseSSDTable': + if ( + table_class == 'DownpourSparseTable' + or table_class == 'DownpourSparseSSDTable' + ): table.enable_sparse_table_cache = strategy.get( - 'sparse_enable_cache', True) + 'sparse_enable_cache', True + ) table.sparse_table_cache_rate = strategy.get( - 'sparse_cache_rate', 0.00055) + 'sparse_cache_rate', 0.00055 + ) table.sparse_table_cache_file_num = strategy.get( - 'sparse_cache_file_num', 16) - table.compress_in_save = strategy.get('sparse_compress_in_save', - True) + 'sparse_cache_file_num', 16 + ) + table.compress_in_save = strategy.get( + 'sparse_compress_in_save', True + ) table.shard_num = strategy.get('sparse_shard_num', 1000) # DownpourFeatureValueAccessor: for ctr task, has cvm, embedding and sgd info # DownpourCtrAccessor : for ctr task, has cvm, slot, embedding and sgd info @@ -123,10 +172,13 @@ class DownpourServer(Server): # DownpourUnitAccessor : for ctr task, has cvm, slot, embedding and sgd info support_accessor_class = [ - 'DownpourFeatureValueAccessor', 'DownpourCtrAccessor', - 'DownpourCtrDymfAccessor', 'DownpourSparseValueAccessor', - 'DownpourCtrDoubleAccessor', 'DownpourUnitAccessor', - 'DownpourDoubleUnitAccessor' + 'DownpourFeatureValueAccessor', + 'DownpourCtrAccessor', + 'DownpourCtrDymfAccessor', + 'DownpourSparseValueAccessor', + 'DownpourCtrDoubleAccessor', + 'DownpourUnitAccessor', + 'DownpourDoubleUnitAccessor', ] if strategy.get('sparse_accessor_class') is not None: accessor_class = strategy.get('sparse_accessor_class') @@ -134,56 +186,76 @@ class DownpourServer(Server): raise ValueError( "support sparse_accessor_class: ['DownpourFeatureValueAccessor', 'DownpourCtrAccessor', 'DownpourCtrDymfAccessor', \ 'DownpourSparseValueAccessor', 'DownpourCtrDoubleAccessor'], \ - but actual %s" % (accessor_class)) + but actual %s" + % (accessor_class) + ) else: accessor_class = 'DownpourCtrAccessor' table.accessor.accessor_class = accessor_class - if accessor_class == 'DownpourFeatureValueAccessor' \ - or accessor_class == 'DownpourCtrAccessor' \ - or accessor_class == 'DownpourCtrDymfAccessor' \ - or accessor_class == 'DownpourCtrDoubleAccessor': + if ( + accessor_class == 'DownpourFeatureValueAccessor' + or accessor_class == 'DownpourCtrAccessor' + or accessor_class == 'DownpourCtrDymfAccessor' + or accessor_class == 'DownpourCtrDoubleAccessor' + ): table.accessor.sparse_sgd_param.learning_rate = strategy.get( - 'sparse_learning_rate', 0.05) + 'sparse_learning_rate', 0.05 + ) table.accessor.sparse_sgd_param.initial_g2sum = strategy.get( - 'sparse_initial_g2sum', 3) + 'sparse_initial_g2sum', 3 + ) table.accessor.sparse_sgd_param.initial_range = strategy.get( - 'sparse_initial_range', 1e-4) + 'sparse_initial_range', 1e-4 + ) if strategy.get('sparse_weight_bounds') is None: table.accessor.sparse_sgd_param.weight_bounds.extend( - [-10, 10]) + [-10, 10] + ) else: table.accessor.sparse_sgd_param.weight_bounds.extend( - strategy.get('sparse_weight_bounds')) + strategy.get('sparse_weight_bounds') + ) table.accessor.embedx_dim = strategy.get('sparse_embedx_dim', 8) table.accessor.embedx_threshold = strategy.get( - 'sparse_embedx_threshold', 10) + 'sparse_embedx_threshold', 10 + ) table.accessor.fea_dim = int(table.accessor.embedx_dim) + 3 - table.accessor.downpour_accessor_param.nonclk_coeff = strategy.get( - 'sparse_nonclk_coeff', 0.1) - table.accessor.downpour_accessor_param.click_coeff = strategy.get( - 'sparse_click_coeff', 1) - table.accessor.downpour_accessor_param.base_threshold = strategy.get( - 'sparse_base_threshold', 1.5) - table.accessor.downpour_accessor_param.delta_threshold = strategy.get( - 'sparse_delta_threshold', 0.25) - table.accessor.downpour_accessor_param.delta_keep_days = strategy.get( - 'sparse_delta_keep_days', 16) + table.accessor.downpour_accessor_param.nonclk_coeff = ( + strategy.get('sparse_nonclk_coeff', 0.1) + ) + table.accessor.downpour_accessor_param.click_coeff = ( + strategy.get('sparse_click_coeff', 1) + ) + table.accessor.downpour_accessor_param.base_threshold = ( + strategy.get('sparse_base_threshold', 1.5) + ) + table.accessor.downpour_accessor_param.delta_threshold = ( + strategy.get('sparse_delta_threshold', 0.25) + ) + table.accessor.downpour_accessor_param.delta_keep_days = ( + strategy.get('sparse_delta_keep_days', 16) + ) table.accessor.downpour_accessor_param.delete_after_unseen_days = strategy.get( - 'sparse_delete_after_unseen_days', 30) + 'sparse_delete_after_unseen_days', 30 + ) table.accessor.downpour_accessor_param.ssd_unseenday_threshold = strategy.get( - 'sparse_ssd_unseenday_threshold', 1) - table.accessor.downpour_accessor_param.show_click_decay_rate = strategy.get( - 'sparse_show_click_decay_rate', 0.98) - table.accessor.downpour_accessor_param.delete_threshold = strategy.get( - 'sparse_delete_threshold', 0.8) + 'sparse_ssd_unseenday_threshold', 1 + ) + table.accessor.downpour_accessor_param.show_click_decay_rate = ( + strategy.get('sparse_show_click_decay_rate', 0.98) + ) + table.accessor.downpour_accessor_param.delete_threshold = ( + strategy.get('sparse_delete_threshold', 0.8) + ) converter = strategy.get( 'sparse_converter', - "(scripts/xbox_compressor_mf.py | bin/xbox_pb_converter)") + "(scripts/xbox_compressor_mf.py | bin/xbox_pb_converter)", + ) deconverter = strategy.get( 'sparse_deconverter', - "(bin/xbox_pb_deconverter | scripts/xbox_decompressor_mf.awk)" + "(bin/xbox_pb_deconverter | scripts/xbox_decompressor_mf.awk)", ) table1 = table.accessor.table_accessor_save_param.add() @@ -201,52 +273,69 @@ class DownpourServer(Server): table.accessor.embedx_dim = strategy.get('sparse_embedx_dim', 8) table.accessor.fea_dim = int(table.accessor.embedx_dim) if optimizer_name == "naive": - table.accessor.sparse_commonsgd_param.naive.learning_rate = \ - strategy.get('sparse_learning_rate', 0.05) - table.accessor.sparse_commonsgd_param.naive.initial_range = \ - strategy.get('sparse_initial_range', 1e-4) + table.accessor.sparse_commonsgd_param.naive.learning_rate = strategy.get( + 'sparse_learning_rate', 0.05 + ) + table.accessor.sparse_commonsgd_param.naive.initial_range = strategy.get( + 'sparse_initial_range', 1e-4 + ) if strategy.get('sparse_weight_bounds') is None: table.accessor.sparse_commonsgd_param.naive.weight_bounds.extend( - [-10, 10]) + [-10, 10] + ) else: table.accessor.sparse_commonsgd_param.naive.weight_bounds.extend( - strategy.get('sparse_weight_bounds')) + strategy.get('sparse_weight_bounds') + ) elif optimizer_name == "adagrad": - table.accessor.sparse_commonsgd_param.adagrad.learning_rate = \ - strategy.get('sparse_learning_rate', 0.05) - table.accessor.sparse_commonsgd_param.adagrad.initial_range = \ - strategy.get('sparse_initial_range', 1e-4) + table.accessor.sparse_commonsgd_param.adagrad.learning_rate = strategy.get( + 'sparse_learning_rate', 0.05 + ) + table.accessor.sparse_commonsgd_param.adagrad.initial_range = strategy.get( + 'sparse_initial_range', 1e-4 + ) table.accessor.sparse_commonsgd_param.adagrad.initial_g2sum = strategy.get( - 'sparse_initial_g2sum', 3) + 'sparse_initial_g2sum', 3 + ) if strategy.get('sparse_weight_bounds') is None: table.accessor.sparse_commonsgd_param.adagrad.weight_bounds.extend( - [-10, 10]) + [-10, 10] + ) else: table.accessor.sparse_commonsgd_param.adagrad.weight_bounds.extend( - strategy.get('sparse_weight_bounds')) + strategy.get('sparse_weight_bounds') + ) elif optimizer_name == "adam": - table.accessor.sparse_commonsgd_param.adam.learning_rate = \ + table.accessor.sparse_commonsgd_param.adam.learning_rate = ( strategy.get('sparse_learning_rate', 0.001) - table.accessor.sparse_commonsgd_param.adam.initial_range = \ + ) + table.accessor.sparse_commonsgd_param.adam.initial_range = ( strategy.get('sparse_initial_range', 1e-4) + ) table.accessor.sparse_commonsgd_param.adam.beta1_decay_rate = strategy.get( - 'sparse_beta1_decay_rate', 0.9) + 'sparse_beta1_decay_rate', 0.9 + ) table.accessor.sparse_commonsgd_param.adam.beta2_decay_rate = strategy.get( - 'sparse_beta2_decay_rate', 0.999) - table.accessor.sparse_commonsgd_param.adam.ada_epsilon = strategy.get( - 'sparse_ada_epsilon', 1e-8) + 'sparse_beta2_decay_rate', 0.999 + ) + table.accessor.sparse_commonsgd_param.adam.ada_epsilon = ( + strategy.get('sparse_ada_epsilon', 1e-8) + ) if strategy.get('sparse_weight_bounds') is None: table.accessor.sparse_commonsgd_param.adam.weight_bounds.extend( - [-10, 10]) + [-10, 10] + ) else: table.accessor.sparse_commonsgd_param.adam.weight_bounds.extend( - strategy.get('sparse_weight_bounds')) + strategy.get('sparse_weight_bounds') + ) converter = strategy.get( 'sparse_converter', - "(scripts/xbox_compressor_mf.py | bin/xbox_pb_converter)") + "(scripts/xbox_compressor_mf.py | bin/xbox_pb_converter)", + ) deconverter = strategy.get( 'sparse_deconverter', - "(bin/xbox_pb_deconverter | scripts/xbox_decompressor_mf.awk)" + "(bin/xbox_pb_deconverter | scripts/xbox_decompressor_mf.awk)", ) table1 = table.accessor.table_accessor_save_param.add() @@ -258,15 +347,21 @@ class DownpourServer(Server): table2.param = 2 table2.converter = converter table2.deconverter = deconverter - elif accessor_class == 'DownpourUnitAccessor' or accessor_class == 'DownpourDoubleUnitAccessor': + elif ( + accessor_class == 'DownpourUnitAccessor' + or accessor_class == 'DownpourDoubleUnitAccessor' + ): self.add_sparse_table_common_config(table, strategy) - self.add_sparse_optimizer(table.accessor.embed_sgd_param, - strategy, "embed_") - self.add_sparse_optimizer(table.accessor.embedx_sgd_param, - strategy, "embedx_") + self.add_sparse_optimizer( + table.accessor.embed_sgd_param, strategy, "embed_" + ) + self.add_sparse_optimizer( + table.accessor.embedx_sgd_param, strategy, "embedx_" + ) - def add_dense_table(self, table_id, param_var, grad_var, strategy, - sparse_table_names): + def add_dense_table( + self, table_id, param_var, grad_var, strategy, sparse_table_names + ): """ Args: table_id(int): id of sparse params table @@ -292,45 +387,72 @@ class DownpourServer(Server): table.accessor.fea_dim = fea_dim return else: - raise ValueError("expect table %s type=%s, but actual type=%s" \ - %(table_id, pslib.PS_DENSE_TABLE, table.type)) + raise ValueError( + "expect table %s type=%s, but actual type=%s" + % (table_id, pslib.PS_DENSE_TABLE, table.type) + ) if strategy is None: strategy = dict() table = self._server.downpour_server_param.downpour_table_param.add() table.table_id = table_id - support_dense_key_list = ['dense_table_class', 'dense_compress_in_save', 'dense_accessor_class', \ - 'dense_optimizer', 'dense_learning_rate', 'dense_avg_decay', 'dense_ada_decay', \ - 'dense_ada_epsilon', 'dense_mom_decay', 'dense_naive_lr'] + support_dense_key_list = [ + 'dense_table_class', + 'dense_compress_in_save', + 'dense_accessor_class', + 'dense_optimizer', + 'dense_learning_rate', + 'dense_avg_decay', + 'dense_ada_decay', + 'dense_ada_epsilon', + 'dense_mom_decay', + 'dense_naive_lr', + ] for key in strategy: if key not in support_dense_key_list: raise ValueError("strategy key '%s' not support" % (key)) - table.table_class = strategy.get('dense_table_class', - "DownpourDenseTable") + table.table_class = strategy.get( + 'dense_table_class', "DownpourDenseTable" + ) table.type = pslib.PS_DENSE_TABLE table.compress_in_save = strategy.get('dense_compress_in_save', True) table.accessor.accessor_class = strategy.get( - 'dense_accessor_class', "DownpourDenseValueAccessor") + 'dense_accessor_class', "DownpourDenseValueAccessor" + ) table.accessor.dense_sgd_param.name = strategy.get( - 'dense_optimizer', "adam") + 'dense_optimizer', "adam" + ) table.accessor.dense_sgd_param.adam.learning_rate = strategy.get( - 'dense_learning_rate', 5e-06) + 'dense_learning_rate', 5e-06 + ) table.accessor.dense_sgd_param.adam.avg_decay_rate = strategy.get( - 'dense_avg_decay', 0.999993) + 'dense_avg_decay', 0.999993 + ) table.accessor.dense_sgd_param.adam.ada_decay_rate = strategy.get( - 'dense_ada_decay', 0.9999) + 'dense_ada_decay', 0.9999 + ) table.accessor.dense_sgd_param.adam.ada_epsilon = strategy.get( - 'dense_ada_epsilon', 1e-8) + 'dense_ada_epsilon', 1e-8 + ) table.accessor.dense_sgd_param.adam.mom_decay_rate = strategy.get( - 'dense_mom_decay', 0.99) + 'dense_mom_decay', 0.99 + ) table.accessor.dense_sgd_param.naive.learning_rate = strategy.get( - 'dense_naive_lr', 0.0002) + 'dense_naive_lr', 0.0002 + ) table.accessor.fea_dim = fea_dim - def add_data_norm_table(self, table_id, learning_rate, param_var, grad_var, - strategy, sparse_table_names): + def add_data_norm_table( + self, + table_id, + learning_rate, + param_var, + grad_var, + strategy, + sparse_table_names, + ): """ Args: table_id(int): id of datanorm table @@ -357,13 +479,20 @@ class DownpourServer(Server): table.accessor.fea_dim = fea_dim return else: - raise ValueError("expect table %s type=%s, but actual type=%s" \ - %(table_id, pslib.PS_DENSE_TABLE, table.type)) + raise ValueError( + "expect table %s type=%s, but actual type=%s" + % (table_id, pslib.PS_DENSE_TABLE, table.type) + ) if strategy is None: strategy = dict() - support_datanorm_key_list = ['datanorm_table_class', 'datanorm_compress_in_save', \ - 'datanorm_accessor_class', 'datanorm_operation', 'datanorm_decay_rate'] + support_datanorm_key_list = [ + 'datanorm_table_class', + 'datanorm_compress_in_save', + 'datanorm_accessor_class', + 'datanorm_operation', + 'datanorm_decay_rate', + ] for key in strategy: if key not in support_datanorm_key_list: @@ -371,91 +500,119 @@ class DownpourServer(Server): table = self._server.downpour_server_param.downpour_table_param.add() table.table_id = table_id - table.table_class = strategy.get('datanorm_table_class', - 'DownpourDenseTable') + table.table_class = strategy.get( + 'datanorm_table_class', 'DownpourDenseTable' + ) table.type = pslib.PS_DENSE_TABLE table.compress_in_save = strategy.get('datanorm_compress_in_save', True) table.accessor.accessor_class = strategy.get( - 'datanorm_accessor_class', 'DownpourDenseValueAccessor') + 'datanorm_accessor_class', 'DownpourDenseValueAccessor' + ) table.accessor.dense_sgd_param.name = strategy.get( - 'datanorm_operation', 'summary') - table.accessor.dense_sgd_param.summary.summary_decay_rate = strategy.get( - 'datanorm_decay_rate', 0.999999) + 'datanorm_operation', 'summary' + ) + table.accessor.dense_sgd_param.summary.summary_decay_rate = ( + strategy.get('datanorm_decay_rate', 0.999999) + ) table.accessor.fea_dim = fea_dim def add_sparse_optimizer(self, sgd, strategy, prefix): optimizer_name = strategy.get(prefix + "sparse_optimizer", "adagrad") sgd.name = optimizer_name if optimizer_name == "naive": - sgd.naive.learning_rate = \ - strategy.get(prefix + 'sparse_learning_rate', 0.05) - sgd.naive.initial_range = \ - strategy.get(prefix + 'sparse_initial_range', 1e-4) + sgd.naive.learning_rate = strategy.get( + prefix + 'sparse_learning_rate', 0.05 + ) + sgd.naive.initial_range = strategy.get( + prefix + 'sparse_initial_range', 1e-4 + ) bounds = strategy.get(prefix + 'sparse_weight_bounds', [-10, 10]) sgd.naive.weight_bounds.extend(bounds) elif optimizer_name == "adagrad": - sgd.adagrad.learning_rate = \ - strategy.get(prefix + 'sparse_learning_rate', 0.05) - sgd.adagrad.initial_range = \ - strategy.get(prefix + 'sparse_initial_range', 1e-4) + sgd.adagrad.learning_rate = strategy.get( + prefix + 'sparse_learning_rate', 0.05 + ) + sgd.adagrad.initial_range = strategy.get( + prefix + 'sparse_initial_range', 1e-4 + ) if prefix == "embed_": sgd.adagrad.initial_range = 0 sgd.adagrad.initial_g2sum = strategy.get( - prefix + 'sparse_initial_g2sum', 3) + prefix + 'sparse_initial_g2sum', 3 + ) bounds = strategy.get(prefix + 'sparse_weight_bounds', [-10, 10]) sgd.adagrad.weight_bounds.extend(bounds) elif optimizer_name == "std_adagrad": - sgd.adagrad.learning_rate = \ - strategy.get(prefix + 'sparse_learning_rate', 0.05) - sgd.adagrad.initial_range = \ - strategy.get(prefix + 'sparse_initial_range', 1e-4) + sgd.adagrad.learning_rate = strategy.get( + prefix + 'sparse_learning_rate', 0.05 + ) + sgd.adagrad.initial_range = strategy.get( + prefix + 'sparse_initial_range', 1e-4 + ) if prefix == "embed_": sgd.adagrad.initial_range = 0 sgd.adagrad.initial_g2sum = strategy.get( - prefix + 'sparse_initial_g2sum', 3) + prefix + 'sparse_initial_g2sum', 3 + ) bounds = strategy.get(prefix + 'sparse_weight_bounds', [-10, 10]) sgd.adagrad.weight_bounds.extend(bounds) elif optimizer_name == "adam": - sgd.adam.learning_rate = \ - strategy.get(prefix + 'sparse_learning_rate', 0.001) - sgd.adam.initial_range = \ - strategy.get(prefix + 'sparse_initial_range', 1e-4) + sgd.adam.learning_rate = strategy.get( + prefix + 'sparse_learning_rate', 0.001 + ) + sgd.adam.initial_range = strategy.get( + prefix + 'sparse_initial_range', 1e-4 + ) sgd.adam.beta1_decay_rate = strategy.get( - prefix + 'sparse_beta1_decay_rate', 0.9) + prefix + 'sparse_beta1_decay_rate', 0.9 + ) sgd.adam.beta2_decay_rate = strategy.get( - prefix + 'sparse_beta2_decay_rate', 0.999) - sgd.adam.ada_epsilon = strategy.get(prefix + 'sparse_ada_epsilon', - 1e-8) + prefix + 'sparse_beta2_decay_rate', 0.999 + ) + sgd.adam.ada_epsilon = strategy.get( + prefix + 'sparse_ada_epsilon', 1e-8 + ) bounds = strategy.get(prefix + 'sparse_weight_bounds', [-10, 10]) sgd.adam.weight_bounds.extend(bounds) def add_sparse_table_common_config(self, table, strategy): table.accessor.embedx_dim = strategy.get('sparse_embedx_dim', 8) table.accessor.embedx_threshold = strategy.get( - 'sparse_embedx_threshold', 10) + 'sparse_embedx_threshold', 10 + ) table.accessor.fea_dim = int(table.accessor.embedx_dim) + 3 table.accessor.downpour_accessor_param.nonclk_coeff = strategy.get( - 'sparse_nonclk_coeff', 0.1) + 'sparse_nonclk_coeff', 0.1 + ) table.accessor.downpour_accessor_param.click_coeff = strategy.get( - 'sparse_click_coeff', 1) + 'sparse_click_coeff', 1 + ) table.accessor.downpour_accessor_param.base_threshold = strategy.get( - 'sparse_base_threshold', 1.5) + 'sparse_base_threshold', 1.5 + ) table.accessor.downpour_accessor_param.delta_threshold = strategy.get( - 'sparse_delta_threshold', 0.25) + 'sparse_delta_threshold', 0.25 + ) table.accessor.downpour_accessor_param.delta_keep_days = strategy.get( - 'sparse_delta_keep_days', 16) - table.accessor.downpour_accessor_param.delete_after_unseen_days = strategy.get( - 'sparse_delete_after_unseen_days', 30) - table.accessor.downpour_accessor_param.show_click_decay_rate = strategy.get( - 'sparse_show_click_decay_rate', 0.98) + 'sparse_delta_keep_days', 16 + ) + table.accessor.downpour_accessor_param.delete_after_unseen_days = ( + strategy.get('sparse_delete_after_unseen_days', 30) + ) + table.accessor.downpour_accessor_param.show_click_decay_rate = ( + strategy.get('sparse_show_click_decay_rate', 0.98) + ) table.accessor.downpour_accessor_param.delete_threshold = strategy.get( - 'sparse_delete_threshold', 0.8) + 'sparse_delete_threshold', 0.8 + ) converter = strategy.get( 'sparse_converter', - "(scripts/xbox_compressor_mf.py | bin/xbox_pb_converter)") + "(scripts/xbox_compressor_mf.py | bin/xbox_pb_converter)", + ) deconverter = strategy.get( 'sparse_deconverter', - "(bin/xbox_pb_deconverter | scripts/xbox_decompressor_mf.awk)") + "(bin/xbox_pb_deconverter | scripts/xbox_decompressor_mf.awk)", + ) table1 = table.accessor.table_accessor_save_param.add() table1.param = 1 @@ -476,23 +633,21 @@ class DownpourServer(Server): class DownpourWorker(Worker): """ - DownpourWorker class is used to generate worker program_desc - Args: - window (int): push params frequency - worker: it is pslib.DownpourTrainerParameter - Examples: - worker = DownpourWorker(1) + DownpourWorker class is used to generate worker program_desc + Args: + window (int): push params frequency + worker: it is pslib.DownpourTrainerParameter + Examples: + worker = DownpourWorker(1) """ def __init__(self, window): self.window = window self._worker = pslib.DownpourTrainerParameter() - def add_sparse_table(self, - table_id, - slot_key_vars, - slot_value_vars, - slot_value_grads=None): + def add_sparse_table( + self, table_id, slot_key_vars, slot_value_vars, slot_value_grads=None + ): """ Args: table_id(int): id of sparse params table @@ -503,8 +658,9 @@ class DownpourWorker(Worker): return None """ if slot_value_grads is None: - slot_value_grad_names = \ - [var.name + "@GRAD" for var in slot_value_vars] + slot_value_grad_names = [ + var.name + "@GRAD" for var in slot_value_vars + ] else: value_to_key = {} for i in range(len(slot_key_vars)): @@ -514,12 +670,19 @@ class DownpourWorker(Worker): for var in slot_value_vars: if var.name + "@GRAD" in all_grad_names: slot_value_grad_names.append(var.name + "@GRAD") - sorted_slot_value_vars = [i for i in slot_value_vars if \ - i.name + "@GRAD" in slot_value_grad_names] - sorted_slot_value_vars += [i for i in slot_value_vars if \ - i.name + "@GRAD" not in slot_value_grad_names] - sorted_slot_key_vars = \ - [value_to_key[v.name] for v in sorted_slot_value_vars] + sorted_slot_value_vars = [ + i + for i in slot_value_vars + if i.name + "@GRAD" in slot_value_grad_names + ] + sorted_slot_value_vars += [ + i + for i in slot_value_vars + if i.name + "@GRAD" not in slot_value_grad_names + ] + sorted_slot_key_vars = [ + value_to_key[v.name] for v in sorted_slot_value_vars + ] target_table = None for table in self._worker.sparse_table: @@ -528,8 +691,9 @@ class DownpourWorker(Worker): key_names = [var.name for var in sorted_slot_key_vars] for key_name in key_names: if key_name not in keys: - raise ValueError("sparse table %s slot_key error" % - table_id) + raise ValueError( + "sparse table %s slot_key error" % table_id + ) target_table = table break @@ -542,8 +706,15 @@ class DownpourWorker(Worker): table.slot_value.extend([var.name for var in sorted_slot_value_vars]) table.slot_gradient.extend(slot_value_grad_names) - def add_dense_table(self, table_id, learning_rate, param_vars, grad_vars, - dense_start_table_id, sparse_table_names): + def add_dense_table( + self, + table_id, + learning_rate, + param_vars, + grad_vars, + dense_start_table_id, + sparse_table_names, + ): r""" Args: table_id(int): id of sparse params table @@ -580,22 +751,25 @@ class DownpourWorker(Worker): if dense_param_name == desc_dense_param_name: desc_dense_grad_name = list( - table.dense_gradient_variable_name) + table.dense_gradient_variable_name + ) desc_dense_grad_name.sort() if dense_grad_name == desc_dense_grad_name: return else: raise ValueError( "dense table %s dense_gradient_variable_name " - "error" % table_id) + "error" % table_id + ) else: raise ValueError( - "dense table %s dense_variable_name error" % table_id) + "dense table %s dense_variable_name error" % table_id + ) table = self._worker.dense_table.add() table.table_id = table_id - #def cmp_fc(x, y): + # def cmp_fc(x, y): # if x.startswith("fc_") and y.startswith("fc_"): # index_x = x.find('.') # index_y = y.find('.') @@ -616,8 +790,8 @@ class DownpourWorker(Worker): # else: # return 1 - #table.dense_variable_name.extend(sorted(dense_param_name, cmp_fc)) - #table.dense_gradient_variable_name.extend( + # table.dense_variable_name.extend(sorted(dense_param_name, cmp_fc)) + # table.dense_gradient_variable_name.extend( # sorted(dense_grad_name, cmp_fc)) table.dense_variable_name.extend(dense_param_name) table.dense_gradient_variable_name.extend(dense_grad_name) diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/pslib/optimizer_factory.py b/python/paddle/fluid/incubate/fleet/parameter_server/pslib/optimizer_factory.py index 35cda4c34b00969012b4a9f564095309d2ee9659..ed9db255146e51013b78112f5ba01455ef0a0399 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/pslib/optimizer_factory.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/pslib/optimizer_factory.py @@ -17,8 +17,12 @@ __all__ = ["DistributedAdam", "FLEET_GLOBAL_DICT"] import paddle.fluid as fluid from paddle.fluid import core from paddle.fluid.distribute_lookup_table import find_distributed_lookup_table -from paddle.fluid.distribute_lookup_table import find_distributed_lookup_table_inputs -from paddle.fluid.distribute_lookup_table import find_distributed_lookup_table_outputs +from paddle.fluid.distribute_lookup_table import ( + find_distributed_lookup_table_inputs, +) +from paddle.fluid.distribute_lookup_table import ( + find_distributed_lookup_table_outputs, +) from google.protobuf import text_format from collections import OrderedDict import copy @@ -61,11 +65,13 @@ class DistributedOptimizerImplBase(object): self._learning_rate = optimizer._learning_rate self._regularization = optimizer.regularization - def minimize(self, - losses, - startup_program=None, - parameter_list=None, - no_grad_set=None): + def minimize( + self, + losses, + startup_program=None, + parameter_list=None, + no_grad_set=None, + ): """ Args: losses(Variable): loss variable defined by user @@ -90,15 +96,24 @@ class DistributedAdam(DistributedOptimizerImplBase): self._window = 1 self.type = "downpour" self.data_norm_name = [ - ".batch_size", ".batch_square_sum", ".batch_sum", - ".batch_size@GRAD", ".batch_square_sum@GRAD", ".batch_sum@GRAD" + ".batch_size", + ".batch_square_sum", + ".batch_sum", + ".batch_size@GRAD", + ".batch_square_sum@GRAD", + ".batch_sum@GRAD", ] self.supported_embedding_types = [ - "lookup_table", "pull_sparse", "pull_sparse_v2", "pull_box_sparse", - "pull_gpups_sparse" + "lookup_table", + "pull_sparse", + "pull_sparse_v2", + "pull_box_sparse", + "pull_gpups_sparse", ] self.supported_embedding_grad_types = [ - "lookup_table_grad", "push_sparse", "push_sparse_v2" + "lookup_table_grad", + "push_sparse", + "push_sparse_v2", ] op_maker = core.op_proto_and_checker_maker self.op_role_key = op_maker.kOpRoleAttrName() @@ -122,7 +137,8 @@ class DistributedAdam(DistributedOptimizerImplBase): if op.type in self.supported_embedding_types: if op.input("W")[0] in table_names: inputs_dict[op.input("W")[0]].extend( - [local_vars[name] for name in op.input("Ids")]) + [local_vars[name] for name in op.input("Ids")] + ) return inputs_dict def _find_distributed_lookup_table_outputs(self, program, table_names): @@ -144,7 +160,8 @@ class DistributedAdam(DistributedOptimizerImplBase): if op.type in self.supported_embedding_types: if op.input("W")[0] in table_names: outputs_dict[op.input("W")[0]].extend( - [local_vars[name] for name in op.output("Out")]) + [local_vars[name] for name in op.output("Out")] + ) return outputs_dict def _find_distributed_lookup_table_grads(self, program, table_names): @@ -157,12 +174,14 @@ class DistributedAdam(DistributedOptimizerImplBase): if op.type in self.supported_embedding_grad_types: if op.input("W")[0] in table_names: grads_dict[op.input("W")[0]].extend( - [local_vars[name] for name in op.input("Out@GRAD")]) + [local_vars[name] for name in op.input("Out@GRAD")] + ) return grads_dict def _is_optimizer_op(self, op): - return self.op_role_key in op.attr_names and \ - int(op.all_attrs()[self.op_role_key]) & int(OpRole.Optimize) + return self.op_role_key in op.attr_names and int( + op.all_attrs()[self.op_role_key] + ) & int(OpRole.Optimize) def _remove_optimize_op_for_embedding(self, loss, table_name): """ @@ -217,8 +236,9 @@ class DistributedAdam(DistributedOptimizerImplBase): return False return True - def _generte_cond_para_map(self, op, _fill_value_dict, _equal_fill_dict, - _now_program, _all_params): + def _generte_cond_para_map( + self, op, _fill_value_dict, _equal_fill_dict, _now_program, _all_params + ): # generate cond value to parameter map recursively cond_str = op.input('Cond')[0] vars_ = op.input('Input') @@ -241,9 +261,13 @@ class DistributedAdam(DistributedOptimizerImplBase): ops_cond = _now_program.block(int(op.attr('sub_block').id)).ops for op in ops_cond: if op.type == 'conditional_block': - self._generte_cond_para_map(op, _fill_value_dict, - _equal_fill_dict, _now_program, - _all_params) + self._generte_cond_para_map( + op, + _fill_value_dict, + _equal_fill_dict, + _now_program, + _all_params, + ) def _has_conditional_block(self, loss): now_program = loss.block.program @@ -256,8 +280,10 @@ class DistributedAdam(DistributedOptimizerImplBase): def _check_params_grads(self, params, grads): if len(params) != len(grads): - raise ValueError("params size != grads size, %s vs %s" % - (len(params), len(grads))) + raise ValueError( + "params size != grads size, %s vs %s" + % (len(params), len(grads)) + ) pname2grad = dict() for i in range(len(params)): @@ -269,13 +295,15 @@ class DistributedAdam(DistributedOptimizerImplBase): return pname2grad - def _generate_multi_dense_table(self, - params, - grads, - cond_params, - other_params, - sparse_table_names, - dense_table_id=0): + def _generate_multi_dense_table( + self, + params, + grads, + cond_params, + other_params, + sparse_table_names, + dense_table_id=0, + ): # generate multi dense table by cond value pname2grad = self._check_params_grads(params, grads) root_params_list = [] @@ -307,7 +335,14 @@ class DistributedAdam(DistributedOptimizerImplBase): lists_params[name2key[p.name]].append(p) lists_grads[name2key[p.name]].append(pname2grad[p.name]) - return dense_tables, cond2denseid, lists_params, lists_grads, root_params_list, root_grads_list + return ( + dense_tables, + cond2denseid, + lists_params, + lists_grads, + root_params_list, + root_grads_list, + ) def _gen_distributed_emb_to_size_dict(self, program): d_size = dict() @@ -321,13 +356,16 @@ class DistributedAdam(DistributedOptimizerImplBase): if d_size.get(table_name) is None: d_size[table_name] = emb_size elif d_size[table_name] != emb_size: - raise ValueError("embedding size error: %s vs %s" % - (emb_size, d_size[table_name])) + raise ValueError( + "embedding size error: %s vs %s" + % (emb_size, d_size[table_name]) + ) return d_size - def _check_config_fleet_with_program_op(self, strategy, table_name, - emb_to_size): + def _check_config_fleet_with_program_op( + self, strategy, table_name, emb_to_size + ): if strategy.get(table_name) is None: strategy[table_name] = dict() st = strategy[table_name] @@ -337,67 +375,93 @@ class DistributedAdam(DistributedOptimizerImplBase): accessor = st["sparse_accessor_class"] # set sparse_embedx_dim in the strategy according to accessor and use_cvm config - if accessor == "DownpourFeatureValueAccessor" \ - or accessor == "DownpourCtrAccessor" \ - or accessor == "DownpourCtrDymfAccessor" \ - or accessor == "DownpourDoubleUnitAccessor" \ - or accessor == "DownpourUnitAccessor": - if st.get("sparse_embedx_dim") is not None \ - and strategy.get("use_cvm") == True \ - and st["sparse_embedx_dim"] != emb_to_size[table_name] - 3: + if ( + accessor == "DownpourFeatureValueAccessor" + or accessor == "DownpourCtrAccessor" + or accessor == "DownpourCtrDymfAccessor" + or accessor == "DownpourDoubleUnitAccessor" + or accessor == "DownpourUnitAccessor" + ): + if ( + st.get("sparse_embedx_dim") is not None + and strategy.get("use_cvm") == True + and st["sparse_embedx_dim"] != emb_to_size[table_name] - 3 + ): raise ValueError( "fleet config sparse_embedx_dim=%s not" - " equal to embedding dim - 3 = %s" % - (st["sparse_embedx_dim"], emb_to_size[table_name] - 3)) - if st.get("sparse_embedx_dim") is not None \ - and strategy.get("use_cvm") == False \ - and st["sparse_embedx_dim"] != emb_to_size[table_name] - 1: + " equal to embedding dim - 3 = %s" + % (st["sparse_embedx_dim"], emb_to_size[table_name] - 3) + ) + if ( + st.get("sparse_embedx_dim") is not None + and strategy.get("use_cvm") == False + and st["sparse_embedx_dim"] != emb_to_size[table_name] - 1 + ): raise ValueError( "fleet config sparse_embedx_dim=%s not" - " equal to embedding dim - 1 = %s" % - (st["sparse_embedx_dim"], emb_to_size[table_name] - 1)) - if st.get("sparse_embedx_dim") is None \ - and strategy.get("use_cvm") == True: + " equal to embedding dim - 1 = %s" + % (st["sparse_embedx_dim"], emb_to_size[table_name] - 1) + ) + if ( + st.get("sparse_embedx_dim") is None + and strategy.get("use_cvm") == True + ): logger.warning( "sparse embedding dim for table name '{}' is: {}, while sparse_embedx_dim " "with same sparse table name is not set in config_fleet.py. " - "Hence automatically set sparse_embedx_dim = {} - 3.". - format(table_name, emb_to_size[table_name], - emb_to_size[table_name])) + "Hence automatically set sparse_embedx_dim = {} - 3.".format( + table_name, + emb_to_size[table_name], + emb_to_size[table_name], + ) + ) st["sparse_embedx_dim"] = emb_to_size[table_name] - 3 - if st.get("sparse_embedx_dim") is None \ - and strategy.get("use_cvm") == False: + if ( + st.get("sparse_embedx_dim") is None + and strategy.get("use_cvm") == False + ): logger.warning( "sparse embedding dim for table name '{}' is: {}, while sparse_embedx_dim " "with same sparse table name is not set in config_fleet.py. " - "Hence automatically set sparse_embedx_dim = {} - 1.". - format(table_name, emb_to_size[table_name], - emb_to_size[table_name])) + "Hence automatically set sparse_embedx_dim = {} - 1.".format( + table_name, + emb_to_size[table_name], + emb_to_size[table_name], + ) + ) st["sparse_embedx_dim"] = emb_to_size[table_name] - 1 elif accessor == "DownpourSparseValueAccessor": - if st.get("sparse_embedx_dim") is not None \ - and st["sparse_embedx_dim"] != emb_to_size[table_name]: + if ( + st.get("sparse_embedx_dim") is not None + and st["sparse_embedx_dim"] != emb_to_size[table_name] + ): raise ValueError( "fleet config sparse_embedx_dim=%s not" - " equal to embedding dim = %s" % - (st["sparse_embedx_dim"], emb_to_size[table_name])) + " equal to embedding dim = %s" + % (st["sparse_embedx_dim"], emb_to_size[table_name]) + ) if st.get("sparse_embedx_dim") is None: logger.warning( "sparse embedding dim for table name '{}' is: {}, while sparse_embedx_dim " "with same sparse table name is not set in config_fleet.py. " "Hence automatically set sparse_embedx_dim = {}.".format( - table_name, emb_to_size[table_name], - emb_to_size[table_name])) + table_name, + emb_to_size[table_name], + emb_to_size[table_name], + ) + ) st["sparse_embedx_dim"] = emb_to_size[table_name] return strategy - def _minimize(self, - losses, - startup_program=None, - parameter_list=None, - no_grad_set=None, - strategy={}): + def _minimize( + self, + losses, + startup_program=None, + parameter_list=None, + no_grad_set=None, + strategy={}, + ): """ DownpounSGD is a distributed optimizer so that user can call minimize to generate backward @@ -439,9 +503,10 @@ class DistributedAdam(DistributedOptimizerImplBase): parameters = parameter_list[num] prog_id = str(id(loss.block.program)) # param_grads of program - params_grads = sorted(fluid.backward.append_backward( - loss, parameters, no_grad_set), - key=lambda x: x[0].name) + params_grads = sorted( + fluid.backward.append_backward(loss, parameters, no_grad_set), + key=lambda x: x[0].name, + ) flag_use_ps_gpu = strategy.get("use_ps_gpu", False) if flag_use_ps_gpu: @@ -451,9 +516,11 @@ class DistributedAdam(DistributedOptimizerImplBase): optimize_ops = optimizer.apply_optimize( loss, startup_program=startup_program[num], - params_grads=params_grads) + params_grads=params_grads, + ) embedding_table = self._find_multi_distributed_lookup_table( - [loss]) + [loss] + ) self._remove_optimize_op_for_embedding(loss, embedding_table) # has condition_block op means multi-task flag_multi_task = self._has_conditional_block(loss) @@ -476,9 +543,13 @@ class DistributedAdam(DistributedOptimizerImplBase): if op.type == 'equal': equal_fill_dict[op.output('Out')[0]] = op.input('Y')[0] if op.type == 'conditional_block': - self._generte_cond_para_map(op, fill_value_dict, - equal_fill_dict, - now_program, all_params) + self._generte_cond_para_map( + op, + fill_value_dict, + equal_fill_dict, + now_program, + all_params, + ) if prog_id not in program_id_set: program_id_set.add(prog_id) @@ -493,28 +564,32 @@ class DistributedAdam(DistributedOptimizerImplBase): # get {table_name: emb_size} dict from program ops emb_to_size = self._gen_distributed_emb_to_size_dict( - loss.block.program) + loss.block.program + ) # get inputs_dict inputs_dict = self._find_distributed_lookup_table_inputs( - loss.block.program, sparse_table) + loss.block.program, sparse_table + ) prog_id_to_inputs_dict[prog_id] = inputs_dict # get outputs_dict outputs_dict = self._find_distributed_lookup_table_outputs( - loss.block.program, sparse_table) + loss.block.program, sparse_table + ) prog_id_to_outputs_dict[prog_id] = outputs_dict prog_id_to_worker[prog_id] = DownpourWorker(self._window) grads_dict = self._find_distributed_lookup_table_grads( - loss.block.program, sparse_table) + loss.block.program, sparse_table + ) prog_id_to_sparse_grads[prog_id] = grads_dict if prog_id not in prog_id_to_param_grads: prog_id_to_param_grads[prog_id] = [] prog_id_to_param_grads[prog_id].append(params_grads) - #if strategy.get("parallel_compute") + # if strategy.get("parallel_compute") # if user specify a fleet_desc.prototxt file, then load the file # instead of creating default fleet_desc.prototxt. @@ -527,17 +602,20 @@ class DistributedAdam(DistributedOptimizerImplBase): if len(ps_param.trainer_param) == 1: for k in prog_id_to_worker: prog_id_to_worker[k].get_desc().CopyFrom( - ps_param.trainer_param[0]) + ps_param.trainer_param[0] + ) else: if len(ps_param.trainer_param) != len(prog_id_to_worker): raise ValueError( - "trainer param size != program size, %s vs %s" % - (len(ps_param.trainer_param), len(prog_id_to_worker))) + "trainer param size != program size, %s vs %s" + % (len(ps_param.trainer_param), len(prog_id_to_worker)) + ) idx = 0 # prog_id_to_worker is OrderedDict for k in prog_id_to_worker: prog_id_to_worker[k].get_desc().CopyFrom( - ps_param.trainer_param[idx]) + ps_param.trainer_param[idx] + ) idx += 1 # check config in op defination and fleet config @@ -550,10 +628,13 @@ class DistributedAdam(DistributedOptimizerImplBase): if len(sparse_table_to_index) != len(emb_to_table): raise ValueError( "sparse tables from program != sparse tables from op: %s " - "vs %s" % (len(sparse_table_to_index), len(emb_to_table))) + "vs %s" % (len(sparse_table_to_index), len(emb_to_table)) + ) for key in sparse_table_to_index: - if key not in emb_to_table or \ - sparse_table_to_index[key] != emb_to_table[key]: + if ( + key not in emb_to_table + or sparse_table_to_index[key] != emb_to_table[key] + ): print("sparse_table_to_index ", sparse_table_to_index) print("emb_to_table ", emb_to_table) raise ValueError("key error: %s" % key) @@ -564,8 +645,9 @@ class DistributedAdam(DistributedOptimizerImplBase): accessor = None if st.get("sparse_accessor_class") is not None: accessor = st["sparse_accessor_class"] - tables = \ + tables = ( server.get_desc().downpour_server_param.downpour_table_param + ) for table in tables: if table.table_id == sparse_table_to_index[key]: accessor = table.accessor.accessor_class @@ -574,44 +656,57 @@ class DistributedAdam(DistributedOptimizerImplBase): for loss in losses: for op in loss.block.program.global_block().ops: if op.type in self.supported_embedding_types: - if accessor is not None \ - and op.has_attr("AccessorClass"): + if accessor is not None and op.has_attr( + "AccessorClass" + ): op._set_attr("AccessorClass", accessor) if one_slot is None: - one_slot = loss.block.program. \ - global_block().var(op.input("Ids")[0]) + one_slot = ( + loss.block.program.global_block().var( + op.input("Ids")[0] + ) + ) # if accessor is None, use default accessor in op definition if accessor is None: accessor = emb_to_accessor[key] # set sparse_embedx_dim in strategy, # user do not have to set it in config_fleet - if accessor == "DownpourFeatureValueAccessor" \ - or accessor == "DownpourCtrDymfAccessor" \ - or accessor == "DownpourCtrAccessor" \ - or accessor == "DownpourDoubleUnitAccessor" \ - or accessor == "DownpourUnitAccessor": - if st.get("sparse_embedx_dim") is not None \ - and st["sparse_embedx_dim"] != emb_to_size[key] - 3: + if ( + accessor == "DownpourFeatureValueAccessor" + or accessor == "DownpourCtrDymfAccessor" + or accessor == "DownpourCtrAccessor" + or accessor == "DownpourDoubleUnitAccessor" + or accessor == "DownpourUnitAccessor" + ): + if ( + st.get("sparse_embedx_dim") is not None + and st["sparse_embedx_dim"] != emb_to_size[key] - 3 + ): raise ValueError( "fleet config sparse_embedx_dim=%s not" - " equal to embedding size - 3 = %s" % - (st["sparse_embedx_dim"], emb_to_size[key] - 3)) + " equal to embedding size - 3 = %s" + % (st["sparse_embedx_dim"], emb_to_size[key] - 3) + ) st["sparse_embedx_dim"] = emb_to_size[key] - 3 elif accessor == "DownpourSparseValueAccessor": - if st.get("sparse_embedx_dim") is not None \ - and st["sparse_embedx_dim"] != emb_to_size[key]: + if ( + st.get("sparse_embedx_dim") is not None + and st["sparse_embedx_dim"] != emb_to_size[key] + ): raise ValueError( "fleet config sparse_embedx_dim=%s not" - " equal to embedding size = %s" % - (st["sparse_embedx_dim"], emb_to_size[key])) + " equal to embedding size = %s" + % (st["sparse_embedx_dim"], emb_to_size[key]) + ) st["sparse_embedx_dim"] = emb_to_size[key] # ServerParameter add all sparse tables for tn in sparse_table_to_index: sparse_table_index = sparse_table_to_index[tn] st = self._check_config_fleet_with_program_op( - strategy, tn, emb_to_size) + strategy, tn, emb_to_size + ) if st.get(tn) is not None: server.add_sparse_table(sparse_table_index, st[tn]) else: @@ -629,8 +724,12 @@ class DistributedAdam(DistributedOptimizerImplBase): for tn in prog_id_to_sparse_table[prog_id]: sparse_table_index = sparse_table_to_index[tn] grads_dict = prog_id_to_sparse_grads[prog_id] - worker.add_sparse_table(sparse_table_index, inputs_dict[tn], - outputs_dict[tn], grads_dict[tn]) + worker.add_sparse_table( + sparse_table_index, + inputs_dict[tn], + outputs_dict[tn], + grads_dict[tn], + ) dense_start_table_id = len(sparse_table_to_index) dense_table_index = len(sparse_table_to_index) @@ -644,12 +743,13 @@ class DistributedAdam(DistributedOptimizerImplBase): program_id_set.add(program_id) worker = prog_id_to_worker[program_id] sparse_table_names = prog_id_to_sparse_table[program_id] - sparse_table_index = \ - [sparse_table_to_index[i] for i in sparse_table_names] + sparse_table_index = [ + sparse_table_to_index[i] for i in sparse_table_names + ] program_configs[program_id] = { "pull_sparse": [t_index for t_index in sparse_table_index], - "push_sparse": [t_index for t_index in sparse_table_index] + "push_sparse": [t_index for t_index in sparse_table_index], } params_grads = prog_id_to_param_grads[program_id] @@ -679,12 +779,24 @@ class DistributedAdam(DistributedOptimizerImplBase): multi_task_dense_tables_push = [] multi_task_dense_tables_pull = [] if flag_multi_task: - dense_tables, cond2denseid, lists_params, lists_grads, root_params_list, root_grads_list = self._generate_multi_dense_table( - params, grads, self._cond_params, - self._other_params, sparse_table_names, - dense_table_index) + ( + dense_tables, + cond2denseid, + lists_params, + lists_grads, + root_params_list, + root_grads_list, + ) = self._generate_multi_dense_table( + params, + grads, + self._cond_params, + self._other_params, + sparse_table_names, + dense_table_index, + ) program_configs[program_id][ - 'cond2denseid'] = cond2denseid + 'cond2denseid' + ] = cond2denseid multi_task_dense_tables_push = dense_tables multi_task_dense_tables_pull = dense_tables[:] @@ -692,56 +804,76 @@ class DistributedAdam(DistributedOptimizerImplBase): if flag_multi_task: server_dense_table_index = dense_table_index if len(root_params_list) > 0: - server.add_dense_table(server_dense_table_index, - root_params_list, - root_grads_list, - strategy['dense_table'], - sparse_table_names) + server.add_dense_table( + server_dense_table_index, + root_params_list, + root_grads_list, + strategy['dense_table'], + sparse_table_names, + ) server_dense_table_index += 1 for i in range(len(lists_params)): - server.add_dense_table(server_dense_table_index, - lists_params[i], - lists_grads[i], - strategy['dense_table'], - sparse_table_names) + server.add_dense_table( + server_dense_table_index, + lists_params[i], + lists_grads[i], + strategy['dense_table'], + sparse_table_names, + ) server_dense_table_index += 1 else: - server.add_dense_table(dense_table_index, params, - grads, - strategy['dense_table'], - sparse_table_names) + server.add_dense_table( + dense_table_index, + params, + grads, + strategy['dense_table'], + sparse_table_names, + ) else: - server.add_dense_table(dense_table_index, params, grads, - None, sparse_table_names) + server.add_dense_table( + dense_table_index, + params, + grads, + None, + sparse_table_names, + ) if flag_multi_task: if len(root_params_list) > 0: - worker.add_dense_table(dense_table_index, - self._learning_rate, - root_params_list, - root_grads_list, - dense_start_table_id, - sparse_table_names) + worker.add_dense_table( + dense_table_index, + self._learning_rate, + root_params_list, + root_grads_list, + dense_start_table_id, + sparse_table_names, + ) dense_table_index += 1 for i in range(len(lists_params)): - worker.add_dense_table(dense_table_index, - self._learning_rate, - lists_params[i], - lists_grads[i], - dense_start_table_id, - sparse_table_names) + worker.add_dense_table( + dense_table_index, + self._learning_rate, + lists_params[i], + lists_grads[i], + dense_start_table_id, + sparse_table_names, + ) dense_table_index += 1 dense_table_index -= 1 else: - worker.add_dense_table(dense_table_index, - self._learning_rate, params, - grads, dense_start_table_id, - sparse_table_names) + worker.add_dense_table( + dense_table_index, + self._learning_rate, + params, + grads, + dense_start_table_id, + sparse_table_names, + ) if FLEET_GLOBAL_DICT["enable"]: cur_prog = losses[loss_index].block.program @@ -751,30 +883,39 @@ class DistributedAdam(DistributedOptimizerImplBase): attrs={ "InputNames": [i.name for i in grads], "TableId": dense_table_index, - "ScaleDataNorm": - strategy.get("scale_datanorm", -1) - }) - - if "pull_dense" in program_configs[ - program_id] and "push_dense" in program_configs[ - program_id] and len(program_configs[program_id] - ["pull_dense"]) > 0: + "ScaleDataNorm": strategy.get( + "scale_datanorm", -1 + ), + }, + ) + + if ( + "pull_dense" in program_configs[program_id] + and "push_dense" in program_configs[program_id] + and len(program_configs[program_id]["pull_dense"]) > 0 + ): if flag_multi_task: program_configs[program_id]["pull_dense"].extend( - multi_task_dense_tables_pull) + multi_task_dense_tables_pull + ) program_configs[program_id]["push_dense"].extend( - multi_task_dense_tables_push) + multi_task_dense_tables_push + ) else: program_configs[program_id]["pull_dense"].extend( - [dense_table_index]) + [dense_table_index] + ) program_configs[program_id]["push_dense"].extend( - [dense_table_index]) + [dense_table_index] + ) else: if flag_multi_task: program_configs[program_id][ - "pull_dense"] = multi_task_dense_tables_pull + "pull_dense" + ] = multi_task_dense_tables_pull program_configs[program_id][ - "push_dense"] = multi_task_dense_tables_push + "push_dense" + ] = multi_task_dense_tables_push else: program_configs[program_id]["pull_dense"] = [ dense_table_index @@ -787,22 +928,31 @@ class DistributedAdam(DistributedOptimizerImplBase): dense_table_index += 1 if strategy.get('datanorm_table') is not None: server.add_data_norm_table( - dense_table_index, self._learning_rate, - data_norm_params, data_norm_grads, - strategy['datanorm_table'], sparse_table_names) + dense_table_index, + self._learning_rate, + data_norm_params, + data_norm_grads, + strategy['datanorm_table'], + sparse_table_names, + ) else: - server.add_data_norm_table(dense_table_index, - self._learning_rate, - data_norm_params, - data_norm_grads, None, - sparse_table_names) - - worker.add_dense_table(dense_table_index, - self._learning_rate, - data_norm_params, - data_norm_grads, - dense_start_table_id, - sparse_table_names) + server.add_data_norm_table( + dense_table_index, + self._learning_rate, + data_norm_params, + data_norm_grads, + None, + sparse_table_names, + ) + + worker.add_dense_table( + dense_table_index, + self._learning_rate, + data_norm_params, + data_norm_grads, + dense_start_table_id, + sparse_table_names, + ) if FLEET_GLOBAL_DICT["enable"]: cur_prog = losses[loss_index].block.program @@ -810,18 +960,22 @@ class DistributedAdam(DistributedOptimizerImplBase): type="push_dense", inputs={"Ids": one_slot}, attrs={ - "InputNames": - [i.name for i in data_norm_grads], - "TableId": - dense_table_index, - "ScaleDataNorm": - strategy.get("scale_datanorm", -1) - }) + "InputNames": [ + i.name for i in data_norm_grads + ], + "TableId": dense_table_index, + "ScaleDataNorm": strategy.get( + "scale_datanorm", -1 + ), + }, + ) program_configs[program_id]["pull_dense"].extend( - [dense_table_index]) + [dense_table_index] + ) program_configs[program_id]["push_dense"].extend( - [dense_table_index]) + [dense_table_index] + ) dense_table_index += 1 # Todo(guru4elephant): figure out how to support more sparse parameters @@ -865,36 +1019,47 @@ class DistributedAdam(DistributedOptimizerImplBase): opt_info["use_cvm"] = strategy.get("use_cvm", False) opt_info["no_cvm"] = strategy.get("no_cvm", False) opt_info["scale_sparse_gradient_with_batch_size"] = strategy.get( - "scale_sparse_gradient_with_batch_size", True) - opt_info["worker_class"] = strategy.get("worker_class", - "DownpourWorker") + "scale_sparse_gradient_with_batch_size", True + ) + opt_info["worker_class"] = strategy.get( + "worker_class", "DownpourWorker" + ) opt_info["stat_var_names"] = strategy.get("stat_var_names", []) opt_info["local_tables"] = strategy.get("local_tables", []) opt_info["async_tables"] = strategy.get("async_tables", []) opt_info["async_tables"] = strategy.get("async_tables", []) opt_info["scale_datanorm"] = strategy.get("scale_datanorm", -1) - opt_info["check_nan_var_names"] = strategy.get("check_nan_var_names", - []) + opt_info["check_nan_var_names"] = strategy.get( + "check_nan_var_names", [] + ) opt_info["dump_slot"] = False opt_info["dump_converter"] = "" opt_info["dump_fields"] = strategy.get("dump_fields", []) opt_info["dump_file_num"] = strategy.get("dump_file_num", 16) opt_info["user_define_dump_filename"] = strategy.get( - "user_define_dump_filename", "") + "user_define_dump_filename", "" + ) opt_info["dump_fields_path"] = strategy.get("dump_fields_path", "") opt_info["dump_param"] = strategy.get("dump_param", []) gpus_env = os.getenv("FLAGS_selected_gpus", "0") opt_info["worker_places"] = [int(s) for s in gpus_env.split(",")] opt_info["use_ps_gpu"] = strategy.get("use_ps_gpu", False) if server._server.downpour_server_param.downpour_table_param[ - 0].accessor.accessor_class in [ - "DownpourCtrAccessor", "DownpourCtrDoubleAccessor", - "DownpourUnitAccessor", "DownpourDoubleUnitAccessor", - "DownpourCtrDymfAccessor" - ]: + 0 + ].accessor.accessor_class in [ + "DownpourCtrAccessor", + "DownpourCtrDoubleAccessor", + "DownpourUnitAccessor", + "DownpourDoubleUnitAccessor", + "DownpourCtrDymfAccessor", + ]: opt_info["dump_slot"] = True - elif server._server.downpour_server_param.downpour_table_param[ - 0].accessor.accessor_class == "DownpourSparseValueAccessor": + elif ( + server._server.downpour_server_param.downpour_table_param[ + 0 + ].accessor.accessor_class + == "DownpourSparseValueAccessor" + ): opt_info["no_cvm"] = True opt_info["adjust_ins_weight"] = strategy.get("adjust_ins_weight", {}) opt_info["copy_table"] = strategy.get("copy_table", {}) diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/pslib/ps_pb2.py b/python/paddle/fluid/incubate/fleet/parameter_server/pslib/ps_pb2.py index eec51ef827c5766e50910ba866ba7e5bc39d175b..4291115b0bcbb679e9d6676b161323e363dbe382 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/pslib/ps_pb2.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/pslib/ps_pb2.py @@ -24,6 +24,7 @@ from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() @@ -34,7 +35,8 @@ DESCRIPTOR = _descriptor.FileDescriptor( syntax='proto2', serialized_pb=_b( '\n\x08ps.proto\x12\x06paddle\"\xb5\x02\n\x0bPSParameter\x12\x14\n\x0cworker_class\x18\x01 \x01(\t\x12\x14\n\x0cserver_class\x18\x02 \x01(\t\x12\x16\n\x0einstance_class\x18\x03 \x01(\t\x12\x15\n\x0binit_gflags\x18\x04 \x01(\t:\x00\x12-\n\x0cworker_param\x18\x65 \x01(\x0b\x32\x17.paddle.WorkerParameter\x12-\n\x0cserver_param\x18\x66 \x01(\x0b\x32\x17.paddle.ServerParameter\x12\x38\n\rtrainer_param\x18\xad\x02 \x03(\x0b\x32 .paddle.DownpourTrainerParameter\x12\x33\n\x0f\x66s_client_param\x18\xf5\x03 \x01(\x0b\x32\x19.paddle.FsClientParameter\"Q\n\x0fWorkerParameter\x12>\n\x15\x64ownpour_worker_param\x18\x01 \x01(\x0b\x32\x1f.paddle.DownpourWorkerParameter\"Q\n\x0fServerParameter\x12>\n\x15\x64ownpour_server_param\x18\x01 \x01(\x0b\x32\x1f.paddle.DownpourServerParameter\"O\n\x17\x44ownpourWorkerParameter\x12\x34\n\x14\x64ownpour_table_param\x18\x01 \x03(\x0b\x32\x16.paddle.TableParameter\"\xfd\x01\n\x18\x44ownpourTrainerParameter\x12\x30\n\x0b\x64\x65nse_table\x18\x01 \x03(\x0b\x32\x1b.paddle.DenseTableParameter\x12\x32\n\x0csparse_table\x18\x02 \x03(\x0b\x32\x1c.paddle.SparseTableParameter\x12\x1d\n\x15push_sparse_per_batch\x18\x03 \x01(\x05\x12\x1c\n\x14push_dense_per_batch\x18\x04 \x01(\x05\x12\x0f\n\x07skip_op\x18\x05 \x03(\t\x12-\n\x0eprogram_config\x18\x06 \x03(\x0b\x32\x15.paddle.ProgramConfig\"\x99\x01\n\rProgramConfig\x12\x12\n\nprogram_id\x18\x01 \x02(\t\x12\x1c\n\x14push_sparse_table_id\x18\x02 \x03(\x05\x12\x1b\n\x13push_dense_table_id\x18\x03 \x03(\x05\x12\x1c\n\x14pull_sparse_table_id\x18\x04 \x03(\x05\x12\x1b\n\x13pull_dense_table_id\x18\x05 \x03(\x05\"{\n\x13\x44\x65nseTableParameter\x12\x10\n\x08table_id\x18\x01 \x01(\x05\x12\x1b\n\x13\x64\x65nse_variable_name\x18\x02 \x03(\t\x12$\n\x1c\x64\x65nse_gradient_variable_name\x18\x03 \x03(\t\x12\x0f\n\x07\x66\x65\x61_dim\x18\x04 \x01(\x05\"z\n\x14SparseTableParameter\x12\x10\n\x08table_id\x18\x01 \x01(\x05\x12\x13\n\x0b\x66\x65\x61ture_dim\x18\x02 \x01(\x05\x12\x10\n\x08slot_key\x18\x03 \x03(\t\x12\x12\n\nslot_value\x18\x04 \x03(\t\x12\x15\n\rslot_gradient\x18\x05 \x03(\t\"\x86\x01\n\x17\x44ownpourServerParameter\x12\x34\n\x14\x64ownpour_table_param\x18\x01 \x03(\x0b\x32\x16.paddle.TableParameter\x12\x35\n\rservice_param\x18\x02 \x01(\x0b\x32\x1e.paddle.ServerServiceParameter\"\xd7\x01\n\x16ServerServiceParameter\x12*\n\x0cserver_class\x18\x01 \x01(\t:\x14\x44ownpourBrpcPsServer\x12*\n\x0c\x63lient_class\x18\x02 \x01(\t:\x14\x44ownpourBrpcPsClient\x12(\n\rservice_class\x18\x03 \x01(\t:\x11\x44ownpourPsService\x12\x1c\n\x11start_server_port\x18\x04 \x01(\r:\x01\x30\x12\x1d\n\x11server_thread_num\x18\x05 \x01(\r:\x02\x31\x32\"\xc0\x02\n\x0eTableParameter\x12\x10\n\x08table_id\x18\x01 \x01(\x04\x12\x13\n\x0btable_class\x18\x02 \x01(\t\x12\x17\n\tshard_num\x18\x03 \x01(\x04:\x04\x31\x30\x30\x30\x12\x30\n\x08\x61\x63\x63\x65ssor\x18\x04 \x01(\x0b\x32\x1e.paddle.TableAccessorParameter\x12\x1f\n\x04type\x18\x05 \x01(\x0e\x32\x11.paddle.TableType\x12\x1f\n\x10\x63ompress_in_save\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\'\n\x19\x65nable_sparse_table_cache\x18\x07 \x01(\x08:\x04true\x12(\n\x17sparse_table_cache_rate\x18\x08 \x01(\x01:\x07\x30.00055\x12\'\n\x1bsparse_table_cache_file_num\x18\t \x01(\r:\x02\x31\x36\"\xc1\x04\n\x16TableAccessorParameter\x12\x16\n\x0e\x61\x63\x63\x65ssor_class\x18\x01 \x01(\t\x12\x38\n\x10sparse_sgd_param\x18\x02 \x01(\x0b\x32\x1e.paddle.SparseSGDRuleParameter\x12\x36\n\x0f\x64\x65nse_sgd_param\x18\x03 \x01(\x0b\x32\x1d.paddle.DenseSGDRuleParameter\x12\x13\n\x07\x66\x65\x61_dim\x18\x04 \x01(\r:\x02\x31\x31\x12\x15\n\nembedx_dim\x18\x05 \x01(\r:\x01\x38\x12\x1c\n\x10\x65mbedx_threshold\x18\x06 \x01(\r:\x02\x31\x30\x12G\n\x17\x64ownpour_accessor_param\x18\x07 \x01(\x0b\x32&.paddle.DownpourTableAccessorParameter\x12\x45\n\x19table_accessor_save_param\x18\x08 \x03(\x0b\x32\".paddle.TableAccessorSaveParameter\x12\x44\n\x16sparse_commonsgd_param\x18\t \x01(\x0b\x32$.paddle.SparseCommonSGDRuleParameter\x12=\n\x0f\x65mbed_sgd_param\x18\n \x01(\x0b\x32$.paddle.SparseCommonSGDRuleParameter\x12>\n\x10\x65mbedx_sgd_param\x18\x0b \x01(\x0b\x32$.paddle.SparseCommonSGDRuleParameter\"\xba\x02\n\x1e\x44ownpourTableAccessorParameter\x12\x19\n\x0cnonclk_coeff\x18\x01 \x01(\x02:\x03\x30.1\x12\x16\n\x0b\x63lick_coeff\x18\x02 \x01(\x02:\x01\x31\x12\x1b\n\x0e\x62\x61se_threshold\x18\x03 \x01(\x02:\x03\x31.5\x12\x1d\n\x0f\x64\x65lta_threshold\x18\x04 \x01(\x02:\x04\x30.25\x12\x1b\n\x0f\x64\x65lta_keep_days\x18\x05 \x01(\x02:\x02\x31\x36\x12#\n\x15show_click_decay_rate\x18\x06 \x01(\x02:\x04\x30.98\x12\x1d\n\x10\x64\x65lete_threshold\x18\x07 \x01(\x02:\x03\x30.8\x12$\n\x18\x64\x65lete_after_unseen_days\x18\x08 \x01(\x02:\x02\x33\x30\x12\"\n\x17ssd_unseenday_threshold\x18\t \x01(\x05:\x01\x31\"S\n\x1aTableAccessorSaveParameter\x12\r\n\x05param\x18\x01 \x01(\r\x12\x11\n\tconverter\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65\x63onverter\x18\x03 \x01(\t\"e\n\x10PsRequestMessage\x12\x0e\n\x06\x63md_id\x18\x01 \x02(\r\x12\x10\n\x08table_id\x18\x02 \x01(\r\x12\x0e\n\x06params\x18\x03 \x03(\x0c\x12\x11\n\tclient_id\x18\x04 \x01(\x05\x12\x0c\n\x04\x64\x61ta\x18\x05 \x01(\x0c\"\x85\x01\n\x16SparseSGDRuleParameter\x12\x1b\n\rlearning_rate\x18\x01 \x01(\x01:\x04\x30.05\x12\x18\n\rinitial_g2sum\x18\x02 \x01(\x01:\x01\x33\x12\x1d\n\rinitial_range\x18\x03 \x01(\x01:\x06\x30.0001\x12\x15\n\rweight_bounds\x18\x04 \x03(\x02\"\xc6\x01\n\x1cSparseCommonSGDRuleParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x32\n\x05naive\x18\x02 \x01(\x0b\x32#.paddle.SparseNaiveSGDRuleParameter\x12\x36\n\x07\x61\x64\x61grad\x18\x03 \x01(\x0b\x32%.paddle.SparseAdagradSGDRuleParameter\x12,\n\x04\x61\x64\x61m\x18\x04 \x01(\x0b\x32\x1e.paddle.SparseAdamSGDParameter\"p\n\x1bSparseNaiveSGDRuleParameter\x12\x1b\n\rlearning_rate\x18\x01 \x01(\x01:\x04\x30.05\x12\x1d\n\rinitial_range\x18\x02 \x01(\x01:\x06\x30.0001\x12\x15\n\rweight_bounds\x18\x03 \x03(\x02\"\x8c\x01\n\x1dSparseAdagradSGDRuleParameter\x12\x1b\n\rlearning_rate\x18\x01 \x01(\x01:\x04\x30.05\x12\x18\n\rinitial_g2sum\x18\x02 \x01(\x01:\x01\x33\x12\x1d\n\rinitial_range\x18\x03 \x01(\x01:\x06\x30.0001\x12\x15\n\rweight_bounds\x18\x04 \x03(\x02\"\xc8\x01\n\x16SparseAdamSGDParameter\x12\x1c\n\rlearning_rate\x18\x01 \x01(\x01:\x05\x30.001\x12\x1d\n\rinitial_range\x18\x02 \x01(\x01:\x06\x30.0001\x12\x1d\n\x10\x62\x65ta1_decay_rate\x18\x03 \x01(\x01:\x03\x30.9\x12\x1f\n\x10\x62\x65ta2_decay_rate\x18\x04 \x01(\x01:\x05\x30.999\x12\x1a\n\x0b\x61\x64\x61_epsilon\x18\x05 \x01(\x01:\x05\x31\x65-08\x12\x15\n\rweight_bounds\x18\x06 \x03(\x02\"\xe1\x01\n\x15\x44\x65nseSGDRuleParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12&\n\x04\x61\x64\x61m\x18\x02 \x01(\x0b\x32\x18.paddle.AdamSGDParameter\x12(\n\x05naive\x18\x03 \x01(\x0b\x32\x19.paddle.NaiveSGDParameter\x12,\n\x07summary\x18\x04 \x01(\x0b\x32\x1b.paddle.SummarySGDParameter\x12:\n\x0emoving_average\x18\x05 \x01(\x0b\x32\".paddle.MovingAverageRuleParameter\"\xac\x01\n\x10\x41\x64\x61mSGDParameter\x12\x1c\n\rlearning_rate\x18\x01 \x01(\x01:\x05\x35\x65-06\x12 \n\x0e\x61vg_decay_rate\x18\x02 \x01(\x01:\x08\x30.999993\x12\x1e\n\x0e\x61\x64\x61_decay_rate\x18\x03 \x01(\x01:\x06\x30.9999\x12\x1a\n\x0b\x61\x64\x61_epsilon\x18\x04 \x01(\x01:\x05\x31\x65-08\x12\x1c\n\x0emom_decay_rate\x18\x05 \x01(\x01:\x04\x30.99\"J\n\x11NaiveSGDParameter\x12\x1d\n\rlearning_rate\x18\x01 \x01(\x01:\x06\x30.0002\x12\x16\n\x0e\x61vg_decay_rate\x18\x02 \x01(\x01\";\n\x13SummarySGDParameter\x12$\n\x12summary_decay_rate\x18\x01 \x01(\x01:\x08\x30.999999\".\n\x1aMovingAverageRuleParameter\x12\x10\n\x08momentum\x18\x01 \x01(\x01\"I\n\x11PsResponseMessage\x12\x13\n\x08\x65rr_code\x18\x01 \x02(\x05:\x01\x30\x12\x11\n\x07\x65rr_msg\x18\x02 \x02(\t:\x00\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\x0c\"\xd5\x01\n\x11\x46sClientParameter\x12:\n\x07\x66s_type\x18\x01 \x01(\x0e\x32#.paddle.FsClientParameter.FsApiType:\x04HDFS\x12\x0b\n\x03uri\x18\x02 \x01(\t\x12\x0c\n\x04user\x18\x03 \x01(\t\x12\x0e\n\x06passwd\x18\x04 \x01(\t\x12\x13\n\x0b\x62uffer_size\x18\x05 \x01(\x05\x12\x12\n\nhadoop_bin\x18\x33 \x01(\t\x12\x10\n\x08\x61\x66s_conf\x18\x65 \x01(\t\"\x1e\n\tFsApiType\x12\x08\n\x04HDFS\x10\x00\x12\x07\n\x03\x41\x46S\x10\x01*4\n\tTableType\x12\x13\n\x0fPS_SPARSE_TABLE\x10\x00\x12\x12\n\x0ePS_DENSE_TABLE\x10\x01*\xba\x04\n\x07PsCmdID\x12\x17\n\x13PS_PULL_DENSE_TABLE\x10\x00\x12\x17\n\x13PS_PUSH_DENSE_TABLE\x10\x01\x12\x18\n\x14PS_PULL_SPARSE_TABLE\x10\x02\x12\x18\n\x14PS_PUSH_SPARSE_TABLE\x10\x03\x12\x13\n\x0fPS_SHRINK_TABLE\x10\x04\x12\x15\n\x11PS_SAVE_ONE_TABLE\x10\x05\x12\x15\n\x11PS_SAVE_ALL_TABLE\x10\x06\x12\x15\n\x11PS_LOAD_ONE_TABLE\x10\x07\x12\x15\n\x11PS_LOAD_ALL_TABLE\x10\x08\x12\x16\n\x12PS_CLEAR_ONE_TABLE\x10\t\x12\x16\n\x12PS_CLEAR_ALL_TABLE\x10\n\x12\x17\n\x13PS_PUSH_DENSE_PARAM\x10\x0b\x12\x12\n\x0ePS_STOP_SERVER\x10\x0c\x12\x1b\n\x17PS_SAVE_ONE_CACHE_TABLE\x10\r\x12\x1a\n\x16PS_GET_CACHE_THRESHOLD\x10\x0e\x12\x14\n\x10PS_CACHE_SHUFFLE\x10\x0f\x12\x11\n\rPS_COPY_TABLE\x10\x10\x12\x1c\n\x18PS_COPY_TABLE_BY_FEASIGN\x10\x11\x12(\n$PS_PULL_SPARSE_TABLE_WITH_DEPENDENCY\x10\x12\x12(\n$PS_PUSH_SPARSE_TABLE_WITH_DEPENDENCY\x10\x13\x12\x17\n\x13PS_PRINT_TABLE_STAT\x10\x14\x12\x0e\n\nPS_S2S_MSG\x10\x65\x32K\n\tPsService\x12>\n\x07service\x12\x18.paddle.PsRequestMessage\x1a\x19.paddle.PsResponseMessageB\x06\x80\x01\x01\xf8\x01\x01' - )) + ), +) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _TABLETYPE = _descriptor.EnumDescriptor( @@ -43,16 +45,12 @@ _TABLETYPE = _descriptor.EnumDescriptor( filename=None, file=DESCRIPTOR, values=[ - _descriptor.EnumValueDescriptor(name='PS_SPARSE_TABLE', - index=0, - number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PS_DENSE_TABLE', - index=1, - number=1, - options=None, - type=None), + _descriptor.EnumValueDescriptor( + name='PS_SPARSE_TABLE', index=0, number=0, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name='PS_DENSE_TABLE', index=1, number=1, options=None, type=None + ), ], containing_type=None, options=None, @@ -68,118 +66,128 @@ _PSCMDID = _descriptor.EnumDescriptor( filename=None, file=DESCRIPTOR, values=[ - _descriptor.EnumValueDescriptor(name='PS_PULL_DENSE_TABLE', - index=0, - number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PS_PUSH_DENSE_TABLE', - index=1, - number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PS_PULL_SPARSE_TABLE', - index=2, - number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PS_PUSH_SPARSE_TABLE', - index=3, - number=3, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PS_SHRINK_TABLE', - index=4, - number=4, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PS_SAVE_ONE_TABLE', - index=5, - number=5, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PS_SAVE_ALL_TABLE', - index=6, - number=6, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PS_LOAD_ONE_TABLE', - index=7, - number=7, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PS_LOAD_ALL_TABLE', - index=8, - number=8, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PS_CLEAR_ONE_TABLE', - index=9, - number=9, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PS_CLEAR_ALL_TABLE', - index=10, - number=10, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PS_PUSH_DENSE_PARAM', - index=11, - number=11, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PS_STOP_SERVER', - index=12, - number=12, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PS_SAVE_ONE_CACHE_TABLE', - index=13, - number=13, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PS_GET_CACHE_THRESHOLD', - index=14, - number=14, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PS_CACHE_SHUFFLE', - index=15, - number=15, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PS_COPY_TABLE', - index=16, - number=16, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PS_COPY_TABLE_BY_FEASIGN', - index=17, - number=17, - options=None, - type=None), + _descriptor.EnumValueDescriptor( + name='PS_PULL_DENSE_TABLE', + index=0, + number=0, + options=None, + type=None, + ), + _descriptor.EnumValueDescriptor( + name='PS_PUSH_DENSE_TABLE', + index=1, + number=1, + options=None, + type=None, + ), + _descriptor.EnumValueDescriptor( + name='PS_PULL_SPARSE_TABLE', + index=2, + number=2, + options=None, + type=None, + ), + _descriptor.EnumValueDescriptor( + name='PS_PUSH_SPARSE_TABLE', + index=3, + number=3, + options=None, + type=None, + ), + _descriptor.EnumValueDescriptor( + name='PS_SHRINK_TABLE', index=4, number=4, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name='PS_SAVE_ONE_TABLE', index=5, number=5, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name='PS_SAVE_ALL_TABLE', index=6, number=6, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name='PS_LOAD_ONE_TABLE', index=7, number=7, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name='PS_LOAD_ALL_TABLE', index=8, number=8, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name='PS_CLEAR_ONE_TABLE', + index=9, + number=9, + options=None, + type=None, + ), + _descriptor.EnumValueDescriptor( + name='PS_CLEAR_ALL_TABLE', + index=10, + number=10, + options=None, + type=None, + ), + _descriptor.EnumValueDescriptor( + name='PS_PUSH_DENSE_PARAM', + index=11, + number=11, + options=None, + type=None, + ), + _descriptor.EnumValueDescriptor( + name='PS_STOP_SERVER', index=12, number=12, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name='PS_SAVE_ONE_CACHE_TABLE', + index=13, + number=13, + options=None, + type=None, + ), + _descriptor.EnumValueDescriptor( + name='PS_GET_CACHE_THRESHOLD', + index=14, + number=14, + options=None, + type=None, + ), + _descriptor.EnumValueDescriptor( + name='PS_CACHE_SHUFFLE', + index=15, + number=15, + options=None, + type=None, + ), + _descriptor.EnumValueDescriptor( + name='PS_COPY_TABLE', index=16, number=16, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name='PS_COPY_TABLE_BY_FEASIGN', + index=17, + number=17, + options=None, + type=None, + ), _descriptor.EnumValueDescriptor( name='PS_PULL_SPARSE_TABLE_WITH_DEPENDENCY', index=18, number=18, options=None, - type=None), + type=None, + ), _descriptor.EnumValueDescriptor( name='PS_PUSH_SPARSE_TABLE_WITH_DEPENDENCY', index=19, number=19, options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PS_PRINT_TABLE_STAT', - index=20, - number=20, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='PS_S2S_MSG', - index=21, - number=101, - options=None, - type=None), + type=None, + ), + _descriptor.EnumValueDescriptor( + name='PS_PRINT_TABLE_STAT', + index=20, + number=20, + options=None, + type=None, + ), + _descriptor.EnumValueDescriptor( + name='PS_S2S_MSG', index=21, number=101, options=None, type=None + ), ], containing_type=None, options=None, @@ -220,16 +228,12 @@ _FSCLIENTPARAMETER_FSAPITYPE = _descriptor.EnumDescriptor( filename=None, file=DESCRIPTOR, values=[ - _descriptor.EnumValueDescriptor(name='HDFS', - index=0, - number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor(name='AFS', - index=1, - number=1, - options=None, - type=None), + _descriptor.EnumValueDescriptor( + name='HDFS', index=0, number=0, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name='AFS', index=1, number=1, options=None, type=None + ), ], containing_type=None, options=None, @@ -245,36 +249,40 @@ _PSPARAMETER = _descriptor.Descriptor( file=DESCRIPTOR, containing_type=None, fields=[ - _descriptor.FieldDescriptor(name='worker_class', - full_name='paddle.PSParameter.worker_class', - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode('utf-8'), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor(name='server_class', - full_name='paddle.PSParameter.server_class', - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode('utf-8'), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), + _descriptor.FieldDescriptor( + name='worker_class', + full_name='paddle.PSParameter.worker_class', + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode('utf-8'), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), + _descriptor.FieldDescriptor( + name='server_class', + full_name='paddle.PSParameter.server_class', + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode('utf-8'), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), _descriptor.FieldDescriptor( name='instance_class', full_name='paddle.PSParameter.instance_class', @@ -290,52 +298,59 @@ _PSPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor(name='init_gflags', - full_name='paddle.PSParameter.init_gflags', - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=True, - default_value=_b("").decode('utf-8'), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor(name='worker_param', - full_name='paddle.PSParameter.worker_param', - index=4, - number=101, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor(name='server_param', - full_name='paddle.PSParameter.server_param', - index=5, - number=102, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), + options=None, + ), + _descriptor.FieldDescriptor( + name='init_gflags', + full_name='paddle.PSParameter.init_gflags', + index=3, + number=4, + type=9, + cpp_type=9, + label=1, + has_default_value=True, + default_value=_b("").decode('utf-8'), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), + _descriptor.FieldDescriptor( + name='worker_param', + full_name='paddle.PSParameter.worker_param', + index=4, + number=101, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), + _descriptor.FieldDescriptor( + name='server_param', + full_name='paddle.PSParameter.server_param', + index=5, + number=102, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), _descriptor.FieldDescriptor( name='trainer_param', full_name='paddle.PSParameter.trainer_param', @@ -351,7 +366,8 @@ _PSPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='fs_client_param', full_name='paddle.PSParameter.fs_client_param', @@ -367,7 +383,8 @@ _PSPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -403,7 +420,8 @@ _WORKERPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -439,7 +457,8 @@ _SERVERPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -475,7 +494,8 @@ _DOWNPOURWORKERPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -511,7 +531,8 @@ _DOWNPOURTRAINERPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='sparse_table', full_name='paddle.DownpourTrainerParameter.sparse_table', @@ -527,7 +548,8 @@ _DOWNPOURTRAINERPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='push_sparse_per_batch', full_name='paddle.DownpourTrainerParameter.push_sparse_per_batch', @@ -543,7 +565,8 @@ _DOWNPOURTRAINERPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='push_dense_per_batch', full_name='paddle.DownpourTrainerParameter.push_dense_per_batch', @@ -559,7 +582,8 @@ _DOWNPOURTRAINERPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='skip_op', full_name='paddle.DownpourTrainerParameter.skip_op', @@ -575,7 +599,8 @@ _DOWNPOURTRAINERPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='program_config', full_name='paddle.DownpourTrainerParameter.program_config', @@ -591,7 +616,8 @@ _DOWNPOURTRAINERPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -612,21 +638,23 @@ _PROGRAMCONFIG = _descriptor.Descriptor( file=DESCRIPTOR, containing_type=None, fields=[ - _descriptor.FieldDescriptor(name='program_id', - full_name='paddle.ProgramConfig.program_id', - index=0, - number=1, - type=9, - cpp_type=9, - label=2, - has_default_value=False, - default_value=_b("").decode('utf-8'), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), + _descriptor.FieldDescriptor( + name='program_id', + full_name='paddle.ProgramConfig.program_id', + index=0, + number=1, + type=9, + cpp_type=9, + label=2, + has_default_value=False, + default_value=_b("").decode('utf-8'), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), _descriptor.FieldDescriptor( name='push_sparse_table_id', full_name='paddle.ProgramConfig.push_sparse_table_id', @@ -642,7 +670,8 @@ _PROGRAMCONFIG = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='push_dense_table_id', full_name='paddle.ProgramConfig.push_dense_table_id', @@ -658,7 +687,8 @@ _PROGRAMCONFIG = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='pull_sparse_table_id', full_name='paddle.ProgramConfig.pull_sparse_table_id', @@ -674,7 +704,8 @@ _PROGRAMCONFIG = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='pull_dense_table_id', full_name='paddle.ProgramConfig.pull_dense_table_id', @@ -690,7 +721,8 @@ _PROGRAMCONFIG = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -726,7 +758,8 @@ _DENSETABLEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='dense_variable_name', full_name='paddle.DenseTableParameter.dense_variable_name', @@ -742,7 +775,8 @@ _DENSETABLEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='dense_gradient_variable_name', full_name='paddle.DenseTableParameter.dense_gradient_variable_name', @@ -758,7 +792,8 @@ _DENSETABLEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='fea_dim', full_name='paddle.DenseTableParameter.fea_dim', @@ -774,7 +809,8 @@ _DENSETABLEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -810,7 +846,8 @@ _SPARSETABLEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='feature_dim', full_name='paddle.SparseTableParameter.feature_dim', @@ -826,7 +863,8 @@ _SPARSETABLEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='slot_key', full_name='paddle.SparseTableParameter.slot_key', @@ -842,7 +880,8 @@ _SPARSETABLEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='slot_value', full_name='paddle.SparseTableParameter.slot_value', @@ -858,7 +897,8 @@ _SPARSETABLEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='slot_gradient', full_name='paddle.SparseTableParameter.slot_gradient', @@ -874,7 +914,8 @@ _SPARSETABLEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -910,7 +951,8 @@ _DOWNPOURSERVERPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='service_param', full_name='paddle.DownpourServerParameter.service_param', @@ -926,7 +968,8 @@ _DOWNPOURSERVERPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -962,7 +1005,8 @@ _SERVERSERVICEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='client_class', full_name='paddle.ServerServiceParameter.client_class', @@ -978,7 +1022,8 @@ _SERVERSERVICEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='service_class', full_name='paddle.ServerServiceParameter.service_class', @@ -994,7 +1039,8 @@ _SERVERSERVICEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='start_server_port', full_name='paddle.ServerServiceParameter.start_server_port', @@ -1010,7 +1056,8 @@ _SERVERSERVICEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='server_thread_num', full_name='paddle.ServerServiceParameter.server_thread_num', @@ -1026,7 +1073,8 @@ _SERVERSERVICEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -1047,21 +1095,23 @@ _TABLEPARAMETER = _descriptor.Descriptor( file=DESCRIPTOR, containing_type=None, fields=[ - _descriptor.FieldDescriptor(name='table_id', - full_name='paddle.TableParameter.table_id', - index=0, - number=1, - type=4, - cpp_type=4, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), + _descriptor.FieldDescriptor( + name='table_id', + full_name='paddle.TableParameter.table_id', + index=0, + number=1, + type=4, + cpp_type=4, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), _descriptor.FieldDescriptor( name='table_class', full_name='paddle.TableParameter.table_class', @@ -1077,52 +1127,59 @@ _TABLEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor(name='shard_num', - full_name='paddle.TableParameter.shard_num', - index=2, - number=3, - type=4, - cpp_type=4, - label=1, - has_default_value=True, - default_value=1000, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor(name='accessor', - full_name='paddle.TableParameter.accessor', - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor(name='type', - full_name='paddle.TableParameter.type', - index=4, - number=5, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), + options=None, + ), + _descriptor.FieldDescriptor( + name='shard_num', + full_name='paddle.TableParameter.shard_num', + index=2, + number=3, + type=4, + cpp_type=4, + label=1, + has_default_value=True, + default_value=1000, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), + _descriptor.FieldDescriptor( + name='accessor', + full_name='paddle.TableParameter.accessor', + index=3, + number=4, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), + _descriptor.FieldDescriptor( + name='type', + full_name='paddle.TableParameter.type', + index=4, + number=5, + type=14, + cpp_type=8, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), _descriptor.FieldDescriptor( name='compress_in_save', full_name='paddle.TableParameter.compress_in_save', @@ -1138,7 +1195,8 @@ _TABLEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='enable_sparse_table_cache', full_name='paddle.TableParameter.enable_sparse_table_cache', @@ -1154,7 +1212,8 @@ _TABLEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='sparse_table_cache_rate', full_name='paddle.TableParameter.sparse_table_cache_rate', @@ -1170,7 +1229,8 @@ _TABLEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='sparse_table_cache_file_num', full_name='paddle.TableParameter.sparse_table_cache_file_num', @@ -1186,7 +1246,8 @@ _TABLEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -1222,7 +1283,8 @@ _TABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='sparse_sgd_param', full_name='paddle.TableAccessorParameter.sparse_sgd_param', @@ -1238,7 +1300,8 @@ _TABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='dense_sgd_param', full_name='paddle.TableAccessorParameter.dense_sgd_param', @@ -1254,7 +1317,8 @@ _TABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='fea_dim', full_name='paddle.TableAccessorParameter.fea_dim', @@ -1270,7 +1334,8 @@ _TABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='embedx_dim', full_name='paddle.TableAccessorParameter.embedx_dim', @@ -1286,7 +1351,8 @@ _TABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='embedx_threshold', full_name='paddle.TableAccessorParameter.embedx_threshold', @@ -1302,7 +1368,8 @@ _TABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='downpour_accessor_param', full_name='paddle.TableAccessorParameter.downpour_accessor_param', @@ -1318,7 +1385,8 @@ _TABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='table_accessor_save_param', full_name='paddle.TableAccessorParameter.table_accessor_save_param', @@ -1334,7 +1402,8 @@ _TABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='sparse_commonsgd_param', full_name='paddle.TableAccessorParameter.sparse_commonsgd_param', @@ -1350,7 +1419,8 @@ _TABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='embed_sgd_param', full_name='paddle.TableAccessorParameter.embed_sgd_param', @@ -1366,7 +1436,8 @@ _TABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='embedx_sgd_param', full_name='paddle.TableAccessorParameter.embedx_sgd_param', @@ -1382,7 +1453,8 @@ _TABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -1418,7 +1490,8 @@ _DOWNPOURTABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='click_coeff', full_name='paddle.DownpourTableAccessorParameter.click_coeff', @@ -1434,7 +1507,8 @@ _DOWNPOURTABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='base_threshold', full_name='paddle.DownpourTableAccessorParameter.base_threshold', @@ -1450,7 +1524,8 @@ _DOWNPOURTABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='delta_threshold', full_name='paddle.DownpourTableAccessorParameter.delta_threshold', @@ -1466,7 +1541,8 @@ _DOWNPOURTABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='delta_keep_days', full_name='paddle.DownpourTableAccessorParameter.delta_keep_days', @@ -1482,11 +1558,11 @@ _DOWNPOURTABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='show_click_decay_rate', - full_name= - 'paddle.DownpourTableAccessorParameter.show_click_decay_rate', + full_name='paddle.DownpourTableAccessorParameter.show_click_decay_rate', index=5, number=6, type=2, @@ -1499,7 +1575,8 @@ _DOWNPOURTABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='delete_threshold', full_name='paddle.DownpourTableAccessorParameter.delete_threshold', @@ -1515,11 +1592,11 @@ _DOWNPOURTABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='delete_after_unseen_days', - full_name= - 'paddle.DownpourTableAccessorParameter.delete_after_unseen_days', + full_name='paddle.DownpourTableAccessorParameter.delete_after_unseen_days', index=7, number=8, type=2, @@ -1532,11 +1609,11 @@ _DOWNPOURTABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='ssd_unseenday_threshold', - full_name= - 'paddle.DownpourTableAccessorParameter.ssd_unseenday_threshold', + full_name='paddle.DownpourTableAccessorParameter.ssd_unseenday_threshold', index=8, number=9, type=5, @@ -1549,7 +1626,8 @@ _DOWNPOURTABLEACCESSORPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -1585,7 +1663,8 @@ _TABLEACCESSORSAVEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='converter', full_name='paddle.TableAccessorSaveParameter.converter', @@ -1601,7 +1680,8 @@ _TABLEACCESSORSAVEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='deconverter', full_name='paddle.TableAccessorSaveParameter.deconverter', @@ -1617,7 +1697,8 @@ _TABLEACCESSORSAVEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -1638,21 +1719,23 @@ _PSREQUESTMESSAGE = _descriptor.Descriptor( file=DESCRIPTOR, containing_type=None, fields=[ - _descriptor.FieldDescriptor(name='cmd_id', - full_name='paddle.PsRequestMessage.cmd_id', - index=0, - number=1, - type=13, - cpp_type=3, - label=2, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), + _descriptor.FieldDescriptor( + name='cmd_id', + full_name='paddle.PsRequestMessage.cmd_id', + index=0, + number=1, + type=13, + cpp_type=3, + label=2, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), _descriptor.FieldDescriptor( name='table_id', full_name='paddle.PsRequestMessage.table_id', @@ -1668,22 +1751,25 @@ _PSREQUESTMESSAGE = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor(name='params', - full_name='paddle.PsRequestMessage.params', - index=2, - number=3, - type=12, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), + options=None, + ), + _descriptor.FieldDescriptor( + name='params', + full_name='paddle.PsRequestMessage.params', + index=2, + number=3, + type=12, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), _descriptor.FieldDescriptor( name='client_id', full_name='paddle.PsRequestMessage.client_id', @@ -1699,22 +1785,25 @@ _PSREQUESTMESSAGE = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor(name='data', - full_name='paddle.PsRequestMessage.data', - index=4, - number=5, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), + options=None, + ), + _descriptor.FieldDescriptor( + name='data', + full_name='paddle.PsRequestMessage.data', + index=4, + number=5, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), ], extensions=[], nested_types=[], @@ -1750,7 +1839,8 @@ _SPARSESGDRULEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='initial_g2sum', full_name='paddle.SparseSGDRuleParameter.initial_g2sum', @@ -1766,7 +1856,8 @@ _SPARSESGDRULEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='initial_range', full_name='paddle.SparseSGDRuleParameter.initial_range', @@ -1782,7 +1873,8 @@ _SPARSESGDRULEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='weight_bounds', full_name='paddle.SparseSGDRuleParameter.weight_bounds', @@ -1798,7 +1890,8 @@ _SPARSESGDRULEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -1834,7 +1927,8 @@ _SPARSECOMMONSGDRULEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='naive', full_name='paddle.SparseCommonSGDRuleParameter.naive', @@ -1850,7 +1944,8 @@ _SPARSECOMMONSGDRULEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='adagrad', full_name='paddle.SparseCommonSGDRuleParameter.adagrad', @@ -1866,7 +1961,8 @@ _SPARSECOMMONSGDRULEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='adam', full_name='paddle.SparseCommonSGDRuleParameter.adam', @@ -1882,7 +1978,8 @@ _SPARSECOMMONSGDRULEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -1918,7 +2015,8 @@ _SPARSENAIVESGDRULEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='initial_range', full_name='paddle.SparseNaiveSGDRuleParameter.initial_range', @@ -1934,7 +2032,8 @@ _SPARSENAIVESGDRULEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='weight_bounds', full_name='paddle.SparseNaiveSGDRuleParameter.weight_bounds', @@ -1950,7 +2049,8 @@ _SPARSENAIVESGDRULEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -1986,7 +2086,8 @@ _SPARSEADAGRADSGDRULEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='initial_g2sum', full_name='paddle.SparseAdagradSGDRuleParameter.initial_g2sum', @@ -2002,7 +2103,8 @@ _SPARSEADAGRADSGDRULEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='initial_range', full_name='paddle.SparseAdagradSGDRuleParameter.initial_range', @@ -2018,7 +2120,8 @@ _SPARSEADAGRADSGDRULEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='weight_bounds', full_name='paddle.SparseAdagradSGDRuleParameter.weight_bounds', @@ -2034,7 +2137,8 @@ _SPARSEADAGRADSGDRULEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -2070,7 +2174,8 @@ _SPARSEADAMSGDPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='initial_range', full_name='paddle.SparseAdamSGDParameter.initial_range', @@ -2086,7 +2191,8 @@ _SPARSEADAMSGDPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='beta1_decay_rate', full_name='paddle.SparseAdamSGDParameter.beta1_decay_rate', @@ -2102,7 +2208,8 @@ _SPARSEADAMSGDPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='beta2_decay_rate', full_name='paddle.SparseAdamSGDParameter.beta2_decay_rate', @@ -2118,7 +2225,8 @@ _SPARSEADAMSGDPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='ada_epsilon', full_name='paddle.SparseAdamSGDParameter.ada_epsilon', @@ -2134,7 +2242,8 @@ _SPARSEADAMSGDPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='weight_bounds', full_name='paddle.SparseAdamSGDParameter.weight_bounds', @@ -2150,7 +2259,8 @@ _SPARSEADAMSGDPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -2186,7 +2296,8 @@ _DENSESGDRULEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='adam', full_name='paddle.DenseSGDRuleParameter.adam', @@ -2202,7 +2313,8 @@ _DENSESGDRULEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='naive', full_name='paddle.DenseSGDRuleParameter.naive', @@ -2218,7 +2330,8 @@ _DENSESGDRULEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='summary', full_name='paddle.DenseSGDRuleParameter.summary', @@ -2234,7 +2347,8 @@ _DENSESGDRULEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='moving_average', full_name='paddle.DenseSGDRuleParameter.moving_average', @@ -2250,7 +2364,8 @@ _DENSESGDRULEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -2286,7 +2401,8 @@ _ADAMSGDPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='avg_decay_rate', full_name='paddle.AdamSGDParameter.avg_decay_rate', @@ -2302,7 +2418,8 @@ _ADAMSGDPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='ada_decay_rate', full_name='paddle.AdamSGDParameter.ada_decay_rate', @@ -2318,7 +2435,8 @@ _ADAMSGDPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='ada_epsilon', full_name='paddle.AdamSGDParameter.ada_epsilon', @@ -2334,7 +2452,8 @@ _ADAMSGDPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='mom_decay_rate', full_name='paddle.AdamSGDParameter.mom_decay_rate', @@ -2350,7 +2469,8 @@ _ADAMSGDPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -2386,7 +2506,8 @@ _NAIVESGDPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='avg_decay_rate', full_name='paddle.NaiveSGDParameter.avg_decay_rate', @@ -2402,7 +2523,8 @@ _NAIVESGDPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -2438,7 +2560,8 @@ _SUMMARYSGDPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -2474,7 +2597,8 @@ _MOVINGAVERAGERULEPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -2510,7 +2634,8 @@ _PSRESPONSEMESSAGE = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='err_msg', full_name='paddle.PsResponseMessage.err_msg', @@ -2526,22 +2651,25 @@ _PSRESPONSEMESSAGE = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor(name='data', - full_name='paddle.PsResponseMessage.data', - index=2, - number=3, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), + options=None, + ), + _descriptor.FieldDescriptor( + name='data', + full_name='paddle.PsResponseMessage.data', + index=2, + number=3, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), ], extensions=[], nested_types=[], @@ -2577,52 +2705,59 @@ _FSCLIENTPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor(name='uri', - full_name='paddle.FsClientParameter.uri', - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode('utf-8'), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor(name='user', - full_name='paddle.FsClientParameter.user', - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode('utf-8'), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), - _descriptor.FieldDescriptor(name='passwd', - full_name='paddle.FsClientParameter.passwd', - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode('utf-8'), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None), + options=None, + ), + _descriptor.FieldDescriptor( + name='uri', + full_name='paddle.FsClientParameter.uri', + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode('utf-8'), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), + _descriptor.FieldDescriptor( + name='user', + full_name='paddle.FsClientParameter.user', + index=2, + number=3, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode('utf-8'), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), + _descriptor.FieldDescriptor( + name='passwd', + full_name='paddle.FsClientParameter.passwd', + index=3, + number=4, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode('utf-8'), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), _descriptor.FieldDescriptor( name='buffer_size', full_name='paddle.FsClientParameter.buffer_size', @@ -2638,7 +2773,8 @@ _FSCLIENTPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='hadoop_bin', full_name='paddle.FsClientParameter.hadoop_bin', @@ -2654,7 +2790,8 @@ _FSCLIENTPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), _descriptor.FieldDescriptor( name='afs_conf', full_name='paddle.FsClientParameter.afs_conf', @@ -2670,7 +2807,8 @@ _FSCLIENTPARAMETER = _descriptor.Descriptor( containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, + ), ], extensions=[], nested_types=[], @@ -2689,95 +2827,132 @@ _FSCLIENTPARAMETER = _descriptor.Descriptor( _PSPARAMETER.fields_by_name['worker_param'].message_type = _WORKERPARAMETER _PSPARAMETER.fields_by_name['server_param'].message_type = _SERVERPARAMETER _PSPARAMETER.fields_by_name[ - 'trainer_param'].message_type = _DOWNPOURTRAINERPARAMETER + 'trainer_param' +].message_type = _DOWNPOURTRAINERPARAMETER _PSPARAMETER.fields_by_name['fs_client_param'].message_type = _FSCLIENTPARAMETER _WORKERPARAMETER.fields_by_name[ - 'downpour_worker_param'].message_type = _DOWNPOURWORKERPARAMETER + 'downpour_worker_param' +].message_type = _DOWNPOURWORKERPARAMETER _SERVERPARAMETER.fields_by_name[ - 'downpour_server_param'].message_type = _DOWNPOURSERVERPARAMETER + 'downpour_server_param' +].message_type = _DOWNPOURSERVERPARAMETER _DOWNPOURWORKERPARAMETER.fields_by_name[ - 'downpour_table_param'].message_type = _TABLEPARAMETER + 'downpour_table_param' +].message_type = _TABLEPARAMETER _DOWNPOURTRAINERPARAMETER.fields_by_name[ - 'dense_table'].message_type = _DENSETABLEPARAMETER + 'dense_table' +].message_type = _DENSETABLEPARAMETER _DOWNPOURTRAINERPARAMETER.fields_by_name[ - 'sparse_table'].message_type = _SPARSETABLEPARAMETER + 'sparse_table' +].message_type = _SPARSETABLEPARAMETER _DOWNPOURTRAINERPARAMETER.fields_by_name[ - 'program_config'].message_type = _PROGRAMCONFIG + 'program_config' +].message_type = _PROGRAMCONFIG _DOWNPOURSERVERPARAMETER.fields_by_name[ - 'downpour_table_param'].message_type = _TABLEPARAMETER + 'downpour_table_param' +].message_type = _TABLEPARAMETER _DOWNPOURSERVERPARAMETER.fields_by_name[ - 'service_param'].message_type = _SERVERSERVICEPARAMETER + 'service_param' +].message_type = _SERVERSERVICEPARAMETER _TABLEPARAMETER.fields_by_name[ - 'accessor'].message_type = _TABLEACCESSORPARAMETER + 'accessor' +].message_type = _TABLEACCESSORPARAMETER _TABLEPARAMETER.fields_by_name['type'].enum_type = _TABLETYPE _TABLEACCESSORPARAMETER.fields_by_name[ - 'sparse_sgd_param'].message_type = _SPARSESGDRULEPARAMETER + 'sparse_sgd_param' +].message_type = _SPARSESGDRULEPARAMETER _TABLEACCESSORPARAMETER.fields_by_name[ - 'dense_sgd_param'].message_type = _DENSESGDRULEPARAMETER + 'dense_sgd_param' +].message_type = _DENSESGDRULEPARAMETER _TABLEACCESSORPARAMETER.fields_by_name[ - 'downpour_accessor_param'].message_type = _DOWNPOURTABLEACCESSORPARAMETER + 'downpour_accessor_param' +].message_type = _DOWNPOURTABLEACCESSORPARAMETER _TABLEACCESSORPARAMETER.fields_by_name[ - 'table_accessor_save_param'].message_type = _TABLEACCESSORSAVEPARAMETER + 'table_accessor_save_param' +].message_type = _TABLEACCESSORSAVEPARAMETER _TABLEACCESSORPARAMETER.fields_by_name[ - 'sparse_commonsgd_param'].message_type = _SPARSECOMMONSGDRULEPARAMETER + 'sparse_commonsgd_param' +].message_type = _SPARSECOMMONSGDRULEPARAMETER _TABLEACCESSORPARAMETER.fields_by_name[ - 'embed_sgd_param'].message_type = _SPARSECOMMONSGDRULEPARAMETER + 'embed_sgd_param' +].message_type = _SPARSECOMMONSGDRULEPARAMETER _TABLEACCESSORPARAMETER.fields_by_name[ - 'embedx_sgd_param'].message_type = _SPARSECOMMONSGDRULEPARAMETER + 'embedx_sgd_param' +].message_type = _SPARSECOMMONSGDRULEPARAMETER _SPARSECOMMONSGDRULEPARAMETER.fields_by_name[ - 'naive'].message_type = _SPARSENAIVESGDRULEPARAMETER + 'naive' +].message_type = _SPARSENAIVESGDRULEPARAMETER _SPARSECOMMONSGDRULEPARAMETER.fields_by_name[ - 'adagrad'].message_type = _SPARSEADAGRADSGDRULEPARAMETER + 'adagrad' +].message_type = _SPARSEADAGRADSGDRULEPARAMETER _SPARSECOMMONSGDRULEPARAMETER.fields_by_name[ - 'adam'].message_type = _SPARSEADAMSGDPARAMETER + 'adam' +].message_type = _SPARSEADAMSGDPARAMETER _DENSESGDRULEPARAMETER.fields_by_name['adam'].message_type = _ADAMSGDPARAMETER _DENSESGDRULEPARAMETER.fields_by_name['naive'].message_type = _NAIVESGDPARAMETER _DENSESGDRULEPARAMETER.fields_by_name[ - 'summary'].message_type = _SUMMARYSGDPARAMETER + 'summary' +].message_type = _SUMMARYSGDPARAMETER _DENSESGDRULEPARAMETER.fields_by_name[ - 'moving_average'].message_type = _MOVINGAVERAGERULEPARAMETER + 'moving_average' +].message_type = _MOVINGAVERAGERULEPARAMETER _FSCLIENTPARAMETER.fields_by_name[ - 'fs_type'].enum_type = _FSCLIENTPARAMETER_FSAPITYPE + 'fs_type' +].enum_type = _FSCLIENTPARAMETER_FSAPITYPE _FSCLIENTPARAMETER_FSAPITYPE.containing_type = _FSCLIENTPARAMETER DESCRIPTOR.message_types_by_name['PSParameter'] = _PSPARAMETER DESCRIPTOR.message_types_by_name['WorkerParameter'] = _WORKERPARAMETER DESCRIPTOR.message_types_by_name['ServerParameter'] = _SERVERPARAMETER DESCRIPTOR.message_types_by_name[ - 'DownpourWorkerParameter'] = _DOWNPOURWORKERPARAMETER + 'DownpourWorkerParameter' +] = _DOWNPOURWORKERPARAMETER DESCRIPTOR.message_types_by_name[ - 'DownpourTrainerParameter'] = _DOWNPOURTRAINERPARAMETER + 'DownpourTrainerParameter' +] = _DOWNPOURTRAINERPARAMETER DESCRIPTOR.message_types_by_name['ProgramConfig'] = _PROGRAMCONFIG DESCRIPTOR.message_types_by_name['DenseTableParameter'] = _DENSETABLEPARAMETER DESCRIPTOR.message_types_by_name['SparseTableParameter'] = _SPARSETABLEPARAMETER DESCRIPTOR.message_types_by_name[ - 'DownpourServerParameter'] = _DOWNPOURSERVERPARAMETER + 'DownpourServerParameter' +] = _DOWNPOURSERVERPARAMETER DESCRIPTOR.message_types_by_name[ - 'ServerServiceParameter'] = _SERVERSERVICEPARAMETER + 'ServerServiceParameter' +] = _SERVERSERVICEPARAMETER DESCRIPTOR.message_types_by_name['TableParameter'] = _TABLEPARAMETER DESCRIPTOR.message_types_by_name[ - 'TableAccessorParameter'] = _TABLEACCESSORPARAMETER + 'TableAccessorParameter' +] = _TABLEACCESSORPARAMETER DESCRIPTOR.message_types_by_name[ - 'DownpourTableAccessorParameter'] = _DOWNPOURTABLEACCESSORPARAMETER + 'DownpourTableAccessorParameter' +] = _DOWNPOURTABLEACCESSORPARAMETER DESCRIPTOR.message_types_by_name[ - 'TableAccessorSaveParameter'] = _TABLEACCESSORSAVEPARAMETER + 'TableAccessorSaveParameter' +] = _TABLEACCESSORSAVEPARAMETER DESCRIPTOR.message_types_by_name['PsRequestMessage'] = _PSREQUESTMESSAGE DESCRIPTOR.message_types_by_name[ - 'SparseSGDRuleParameter'] = _SPARSESGDRULEPARAMETER + 'SparseSGDRuleParameter' +] = _SPARSESGDRULEPARAMETER DESCRIPTOR.message_types_by_name[ - 'SparseCommonSGDRuleParameter'] = _SPARSECOMMONSGDRULEPARAMETER + 'SparseCommonSGDRuleParameter' +] = _SPARSECOMMONSGDRULEPARAMETER DESCRIPTOR.message_types_by_name[ - 'SparseNaiveSGDRuleParameter'] = _SPARSENAIVESGDRULEPARAMETER + 'SparseNaiveSGDRuleParameter' +] = _SPARSENAIVESGDRULEPARAMETER DESCRIPTOR.message_types_by_name[ - 'SparseAdagradSGDRuleParameter'] = _SPARSEADAGRADSGDRULEPARAMETER + 'SparseAdagradSGDRuleParameter' +] = _SPARSEADAGRADSGDRULEPARAMETER DESCRIPTOR.message_types_by_name[ - 'SparseAdamSGDParameter'] = _SPARSEADAMSGDPARAMETER + 'SparseAdamSGDParameter' +] = _SPARSEADAMSGDPARAMETER DESCRIPTOR.message_types_by_name[ - 'DenseSGDRuleParameter'] = _DENSESGDRULEPARAMETER + 'DenseSGDRuleParameter' +] = _DENSESGDRULEPARAMETER DESCRIPTOR.message_types_by_name['AdamSGDParameter'] = _ADAMSGDPARAMETER DESCRIPTOR.message_types_by_name['NaiveSGDParameter'] = _NAIVESGDPARAMETER DESCRIPTOR.message_types_by_name['SummarySGDParameter'] = _SUMMARYSGDPARAMETER DESCRIPTOR.message_types_by_name[ - 'MovingAverageRuleParameter'] = _MOVINGAVERAGERULEPARAMETER + 'MovingAverageRuleParameter' +] = _MOVINGAVERAGERULEPARAMETER DESCRIPTOR.message_types_by_name['PsResponseMessage'] = _PSRESPONSEMESSAGE DESCRIPTOR.message_types_by_name['FsClientParameter'] = _FSCLIENTPARAMETER DESCRIPTOR.enum_types_by_name['TableType'] = _TABLETYPE @@ -2785,254 +2960,303 @@ DESCRIPTOR.enum_types_by_name['PsCmdID'] = _PSCMDID PSParameter = _reflection.GeneratedProtocolMessageType( 'PSParameter', - (_message.Message, ), - dict(DESCRIPTOR=_PSPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.PSParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_PSPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.PSParameter) + ), +) _sym_db.RegisterMessage(PSParameter) WorkerParameter = _reflection.GeneratedProtocolMessageType( 'WorkerParameter', - (_message.Message, ), - dict(DESCRIPTOR=_WORKERPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.WorkerParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_WORKERPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.WorkerParameter) + ), +) _sym_db.RegisterMessage(WorkerParameter) ServerParameter = _reflection.GeneratedProtocolMessageType( 'ServerParameter', - (_message.Message, ), - dict(DESCRIPTOR=_SERVERPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.ServerParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_SERVERPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.ServerParameter) + ), +) _sym_db.RegisterMessage(ServerParameter) DownpourWorkerParameter = _reflection.GeneratedProtocolMessageType( 'DownpourWorkerParameter', - (_message.Message, ), - dict(DESCRIPTOR=_DOWNPOURWORKERPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.DownpourWorkerParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_DOWNPOURWORKERPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.DownpourWorkerParameter) + ), +) _sym_db.RegisterMessage(DownpourWorkerParameter) DownpourTrainerParameter = _reflection.GeneratedProtocolMessageType( 'DownpourTrainerParameter', - (_message.Message, ), - dict(DESCRIPTOR=_DOWNPOURTRAINERPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.DownpourTrainerParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_DOWNPOURTRAINERPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.DownpourTrainerParameter) + ), +) _sym_db.RegisterMessage(DownpourTrainerParameter) ProgramConfig = _reflection.GeneratedProtocolMessageType( 'ProgramConfig', - (_message.Message, ), - dict(DESCRIPTOR=_PROGRAMCONFIG, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.ProgramConfig) - )) + (_message.Message,), + dict( + DESCRIPTOR=_PROGRAMCONFIG, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.ProgramConfig) + ), +) _sym_db.RegisterMessage(ProgramConfig) DenseTableParameter = _reflection.GeneratedProtocolMessageType( 'DenseTableParameter', - (_message.Message, ), - dict(DESCRIPTOR=_DENSETABLEPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.DenseTableParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_DENSETABLEPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.DenseTableParameter) + ), +) _sym_db.RegisterMessage(DenseTableParameter) SparseTableParameter = _reflection.GeneratedProtocolMessageType( 'SparseTableParameter', - (_message.Message, ), - dict(DESCRIPTOR=_SPARSETABLEPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.SparseTableParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_SPARSETABLEPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.SparseTableParameter) + ), +) _sym_db.RegisterMessage(SparseTableParameter) DownpourServerParameter = _reflection.GeneratedProtocolMessageType( 'DownpourServerParameter', - (_message.Message, ), - dict(DESCRIPTOR=_DOWNPOURSERVERPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.DownpourServerParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_DOWNPOURSERVERPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.DownpourServerParameter) + ), +) _sym_db.RegisterMessage(DownpourServerParameter) ServerServiceParameter = _reflection.GeneratedProtocolMessageType( 'ServerServiceParameter', - (_message.Message, ), - dict(DESCRIPTOR=_SERVERSERVICEPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.ServerServiceParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_SERVERSERVICEPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.ServerServiceParameter) + ), +) _sym_db.RegisterMessage(ServerServiceParameter) TableParameter = _reflection.GeneratedProtocolMessageType( 'TableParameter', - (_message.Message, ), - dict(DESCRIPTOR=_TABLEPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.TableParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_TABLEPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.TableParameter) + ), +) _sym_db.RegisterMessage(TableParameter) TableAccessorParameter = _reflection.GeneratedProtocolMessageType( 'TableAccessorParameter', - (_message.Message, ), - dict(DESCRIPTOR=_TABLEACCESSORPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.TableAccessorParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_TABLEACCESSORPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.TableAccessorParameter) + ), +) _sym_db.RegisterMessage(TableAccessorParameter) DownpourTableAccessorParameter = _reflection.GeneratedProtocolMessageType( 'DownpourTableAccessorParameter', - (_message.Message, ), + (_message.Message,), dict( DESCRIPTOR=_DOWNPOURTABLEACCESSORPARAMETER, __module__='ps_pb2' # @@protoc_insertion_point(class_scope:paddle.DownpourTableAccessorParameter) - )) + ), +) _sym_db.RegisterMessage(DownpourTableAccessorParameter) TableAccessorSaveParameter = _reflection.GeneratedProtocolMessageType( 'TableAccessorSaveParameter', - (_message.Message, ), + (_message.Message,), dict( DESCRIPTOR=_TABLEACCESSORSAVEPARAMETER, __module__='ps_pb2' # @@protoc_insertion_point(class_scope:paddle.TableAccessorSaveParameter) - )) + ), +) _sym_db.RegisterMessage(TableAccessorSaveParameter) PsRequestMessage = _reflection.GeneratedProtocolMessageType( 'PsRequestMessage', - (_message.Message, ), - dict(DESCRIPTOR=_PSREQUESTMESSAGE, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.PsRequestMessage) - )) + (_message.Message,), + dict( + DESCRIPTOR=_PSREQUESTMESSAGE, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.PsRequestMessage) + ), +) _sym_db.RegisterMessage(PsRequestMessage) SparseSGDRuleParameter = _reflection.GeneratedProtocolMessageType( 'SparseSGDRuleParameter', - (_message.Message, ), - dict(DESCRIPTOR=_SPARSESGDRULEPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.SparseSGDRuleParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_SPARSESGDRULEPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.SparseSGDRuleParameter) + ), +) _sym_db.RegisterMessage(SparseSGDRuleParameter) SparseCommonSGDRuleParameter = _reflection.GeneratedProtocolMessageType( 'SparseCommonSGDRuleParameter', - (_message.Message, ), + (_message.Message,), dict( DESCRIPTOR=_SPARSECOMMONSGDRULEPARAMETER, __module__='ps_pb2' # @@protoc_insertion_point(class_scope:paddle.SparseCommonSGDRuleParameter) - )) + ), +) _sym_db.RegisterMessage(SparseCommonSGDRuleParameter) SparseNaiveSGDRuleParameter = _reflection.GeneratedProtocolMessageType( 'SparseNaiveSGDRuleParameter', - (_message.Message, ), + (_message.Message,), dict( DESCRIPTOR=_SPARSENAIVESGDRULEPARAMETER, __module__='ps_pb2' # @@protoc_insertion_point(class_scope:paddle.SparseNaiveSGDRuleParameter) - )) + ), +) _sym_db.RegisterMessage(SparseNaiveSGDRuleParameter) SparseAdagradSGDRuleParameter = _reflection.GeneratedProtocolMessageType( 'SparseAdagradSGDRuleParameter', - (_message.Message, ), + (_message.Message,), dict( DESCRIPTOR=_SPARSEADAGRADSGDRULEPARAMETER, __module__='ps_pb2' # @@protoc_insertion_point(class_scope:paddle.SparseAdagradSGDRuleParameter) - )) + ), +) _sym_db.RegisterMessage(SparseAdagradSGDRuleParameter) SparseAdamSGDParameter = _reflection.GeneratedProtocolMessageType( 'SparseAdamSGDParameter', - (_message.Message, ), - dict(DESCRIPTOR=_SPARSEADAMSGDPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.SparseAdamSGDParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_SPARSEADAMSGDPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.SparseAdamSGDParameter) + ), +) _sym_db.RegisterMessage(SparseAdamSGDParameter) DenseSGDRuleParameter = _reflection.GeneratedProtocolMessageType( 'DenseSGDRuleParameter', - (_message.Message, ), - dict(DESCRIPTOR=_DENSESGDRULEPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.DenseSGDRuleParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_DENSESGDRULEPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.DenseSGDRuleParameter) + ), +) _sym_db.RegisterMessage(DenseSGDRuleParameter) AdamSGDParameter = _reflection.GeneratedProtocolMessageType( 'AdamSGDParameter', - (_message.Message, ), - dict(DESCRIPTOR=_ADAMSGDPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.AdamSGDParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_ADAMSGDPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.AdamSGDParameter) + ), +) _sym_db.RegisterMessage(AdamSGDParameter) NaiveSGDParameter = _reflection.GeneratedProtocolMessageType( 'NaiveSGDParameter', - (_message.Message, ), - dict(DESCRIPTOR=_NAIVESGDPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.NaiveSGDParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_NAIVESGDPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.NaiveSGDParameter) + ), +) _sym_db.RegisterMessage(NaiveSGDParameter) SummarySGDParameter = _reflection.GeneratedProtocolMessageType( 'SummarySGDParameter', - (_message.Message, ), - dict(DESCRIPTOR=_SUMMARYSGDPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.SummarySGDParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_SUMMARYSGDPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.SummarySGDParameter) + ), +) _sym_db.RegisterMessage(SummarySGDParameter) MovingAverageRuleParameter = _reflection.GeneratedProtocolMessageType( 'MovingAverageRuleParameter', - (_message.Message, ), + (_message.Message,), dict( DESCRIPTOR=_MOVINGAVERAGERULEPARAMETER, __module__='ps_pb2' # @@protoc_insertion_point(class_scope:paddle.MovingAverageRuleParameter) - )) + ), +) _sym_db.RegisterMessage(MovingAverageRuleParameter) PsResponseMessage = _reflection.GeneratedProtocolMessageType( 'PsResponseMessage', - (_message.Message, ), - dict(DESCRIPTOR=_PSRESPONSEMESSAGE, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.PsResponseMessage) - )) + (_message.Message,), + dict( + DESCRIPTOR=_PSRESPONSEMESSAGE, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.PsResponseMessage) + ), +) _sym_db.RegisterMessage(PsResponseMessage) FsClientParameter = _reflection.GeneratedProtocolMessageType( 'FsClientParameter', - (_message.Message, ), - dict(DESCRIPTOR=_FSCLIENTPARAMETER, - __module__='ps_pb2' - # @@protoc_insertion_point(class_scope:paddle.FsClientParameter) - )) + (_message.Message,), + dict( + DESCRIPTOR=_FSCLIENTPARAMETER, + __module__='ps_pb2' + # @@protoc_insertion_point(class_scope:paddle.FsClientParameter) + ), +) _sym_db.RegisterMessage(FsClientParameter) DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), - _b('\200\001\001\370\001\001')) +DESCRIPTOR._options = _descriptor._ParseOptions( + descriptor_pb2.FileOptions(), _b('\200\001\001\370\001\001') +) # @@protoc_insertion_point(module_scope) diff --git a/python/paddle/fluid/incubate/fleet/tests/ctr_dataset_reader.py b/python/paddle/fluid/incubate/fleet/tests/ctr_dataset_reader.py index 2530a611336c051734d976b5c1c4e5a36251965f..97914fe16123b049f00707d05c4ba3aff6ac6621 100644 --- a/python/paddle/fluid/incubate/fleet/tests/ctr_dataset_reader.py +++ b/python/paddle/fluid/incubate/fleet/tests/ctr_dataset_reader.py @@ -20,9 +20,9 @@ import paddle import paddle.distributed.fleet as fleet from paddle.fluid.log_helper import get_logger -logger = get_logger("paddle", - logging.INFO, - fmt='%(asctime)s - %(levelname)s - %(message)s') +logger = get_logger( + "paddle", logging.INFO, fmt='%(asctime)s - %(levelname)s - %(message)s' +) DATA_URL = "http://paddle-ctr-data.bj.bcebos.com/avazu_ctr_data.tgz" DATA_MD5 = "c11df99fbd14e53cd4bfa6567344b26e" @@ -59,17 +59,16 @@ def load_lr_input_record(sent): class DatasetCtrReader(fleet.MultiSlotDataGenerator): - def generate_sample(self, line): - def iter(): fs = line.strip().split('\t') dnn_input = load_dnn_input_record(fs[0]) lr_input = load_lr_input_record(fs[1]) click = [int(fs[2])] - yield ("dnn_data", dnn_input), \ - ("lr_data", lr_input), \ - ("click", click) + yield ("dnn_data", dnn_input), ("lr_data", lr_input), ( + "click", + click, + ) return iter @@ -85,7 +84,9 @@ def prepare_data(): lines = f.readlines() err_info = "wrong meta format" assert len(lines) == 2, err_info - assert 'dnn_input_dim:' in lines[0] and 'lr_input_dim:' in lines[1], err_info + assert ( + 'dnn_input_dim:' in lines[0] and 'lr_input_dim:' in lines[1] + ), err_info res = map(int, [_.split(':')[1] for _ in lines]) res = list(res) dnn_input_dim = res[0] diff --git a/python/paddle/fluid/incubate/fleet/tests/fleet_deep_ctr.py b/python/paddle/fluid/incubate/fleet/tests/fleet_deep_ctr.py index 1b763c6ed59527954e0886b1402a6eaf322df985..d2c843ca4d0b59770c6d7bfcac8c225cac7768bd 100644 --- a/python/paddle/fluid/incubate/fleet/tests/fleet_deep_ctr.py +++ b/python/paddle/fluid/incubate/fleet/tests/fleet_deep_ctr.py @@ -19,16 +19,20 @@ import time import paddle import paddle.fluid as fluid import paddle.fluid.incubate.fleet.base.role_maker as role_maker -from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet -from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import StrategyFactory +from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import ( + fleet, +) +from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import ( + StrategyFactory, +) from paddle.fluid.log_helper import get_logger import ctr_dataset_reader -logger = get_logger("fluid", - logging.INFO, - fmt='%(asctime)s - %(levelname)s - %(message)s') +logger = get_logger( + "fluid", logging.INFO, fmt='%(asctime)s - %(levelname)s - %(message)s' +) def parse_args(): @@ -39,48 +43,64 @@ def parse_args(): '--role', type=str, default='pserver', # trainer or pserver - help='The path for model to store (default: models)') + help='The path for model to store (default: models)', + ) parser.add_argument( '--endpoints', type=str, default='127.0.0.1:6000', - help='The pserver endpoints, like: 127.0.0.1:6000,127.0.0.1:6001') + help='The pserver endpoints, like: 127.0.0.1:6000,127.0.0.1:6001', + ) parser.add_argument( '--current_endpoint', type=str, default='127.0.0.1:6000', - help='The path for model to store (default: 127.0.0.1:6000)') - parser.add_argument('--trainer_id', - type=int, - default=0, - help='The path for model to store (default: models)') - parser.add_argument('--trainers', - type=int, - default=1, - help='The num of trainers, (default: 1)') + help='The path for model to store (default: 127.0.0.1:6000)', + ) + parser.add_argument( + '--trainer_id', + type=int, + default=0, + help='The path for model to store (default: models)', + ) + parser.add_argument( + '--trainers', + type=int, + default=1, + help='The num of trainers, (default: 1)', + ) return parser.parse_args() def model(): - dnn_input_dim, lr_input_dim, train_file_path = ctr_dataset_reader.prepare_data( - ) + ( + dnn_input_dim, + lr_input_dim, + train_file_path, + ) = ctr_dataset_reader.prepare_data() """ network definition """ - dnn_data = fluid.layers.data(name="dnn_data", - shape=[-1, 1], - dtype="int64", - lod_level=1, - append_batch_size=False) - lr_data = fluid.layers.data(name="lr_data", - shape=[-1, 1], - dtype="int64", - lod_level=1, - append_batch_size=False) - label = fluid.layers.data(name="click", - shape=[-1, 1], - dtype="int64", - lod_level=0, - append_batch_size=False) + dnn_data = fluid.layers.data( + name="dnn_data", + shape=[-1, 1], + dtype="int64", + lod_level=1, + append_batch_size=False, + ) + lr_data = fluid.layers.data( + name="lr_data", + shape=[-1, 1], + dtype="int64", + lod_level=1, + append_batch_size=False, + ) + label = fluid.layers.data( + name="click", + shape=[-1, 1], + dtype="int64", + lod_level=0, + append_batch_size=False, + ) datas = [dnn_data, lr_data, label] @@ -92,8 +112,10 @@ def model(): size=[dnn_input_dim, dnn_layer_dims[0]], param_attr=fluid.ParamAttr( name="deep_embedding", - initializer=fluid.initializer.Constant(value=0.01)), - is_sparse=True) + initializer=fluid.initializer.Constant(value=0.01), + ), + is_sparse=True, + ) dnn_pool = fluid.layers.sequence_pool(input=dnn_embedding, pool_type="sum") dnn_out = dnn_pool for i, dim in enumerate(dnn_layer_dims[1:]): @@ -101,9 +123,11 @@ def model(): input=dnn_out, size=dim, act="relu", - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.01)), - name='dnn-fc-%d' % i) + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.01) + ), + name='dnn-fc-%d' % i, + ) dnn_out = fc # build lr model @@ -113,16 +137,19 @@ def model(): size=[lr_input_dim, 1], param_attr=fluid.ParamAttr( name="wide_embedding", - initializer=fluid.initializer.Constant(value=0.01)), - is_sparse=True) + initializer=fluid.initializer.Constant(value=0.01), + ), + is_sparse=True, + ) lr_pool = fluid.layers.sequence_pool(input=lr_embbding, pool_type="sum") merge_layer = fluid.layers.concat(input=[dnn_out, lr_pool], axis=1) predict = fluid.layers.fc(input=merge_layer, size=2, act='softmax') acc = fluid.layers.accuracy(input=predict, label=label) - auc_var, batch_auc_var, auc_states = fluid.layers.auc(input=predict, - label=label) + auc_var, batch_auc_var, auc_states = fluid.layers.auc( + input=predict, label=label + ) cost = fluid.layers.cross_entropy(input=predict, label=label) avg_cost = paddle.mean(x=cost) @@ -140,9 +167,11 @@ def train(args): role = role_maker.UserDefinedRoleMaker( current_id=current_id, role=role_maker.Role.WORKER - if args.role.upper() == "TRAINER" else role_maker.Role.SERVER, + if args.role.upper() == "TRAINER" + else role_maker.Role.SERVER, worker_num=args.trainers, - server_endpoints=endpoints) + server_endpoints=endpoints, + ) exe = fluid.Executor(fluid.CPUPlace()) fleet.init(role) @@ -183,15 +212,18 @@ def train(args): logger.info("epoch {} start".format(epoch_id)) pass_start = time.time() dataset.set_filelist(filelist) - exe.train_from_dataset(program=fleet.main_program, - dataset=dataset, - fetch_list=[avg_cost], - fetch_info=["cost"], - print_period=100, - debug=False) + exe.train_from_dataset( + program=fleet.main_program, + dataset=dataset, + fetch_list=[avg_cost], + fetch_info=["cost"], + print_period=100, + debug=False, + ) pass_time = time.time() - pass_start - logger.info("epoch {} finished, pass_time {}".format( - epoch_id, pass_time)) + logger.info( + "epoch {} finished, pass_time {}".format(epoch_id, pass_time) + ) fleet.stop_worker() diff --git a/python/paddle/fluid/incubate/fleet/utils/fleet_util.py b/python/paddle/fluid/incubate/fleet/utils/fleet_util.py index 365043f92d007636ac703508bf958a4fa5d4a651..b15892e0f5ec01c7c520480c8d2b04dbe2b10049 100644 --- a/python/paddle/fluid/incubate/fleet/utils/fleet_util.py +++ b/python/paddle/fluid/incubate/fleet/utils/fleet_util.py @@ -32,9 +32,9 @@ OpRole = core.op_proto_and_checker_maker.OpRole __all__ = ["FleetUtil", "GPUPSUtil"] -_logger = get_logger(__name__, - logging.INFO, - fmt='%(asctime)s %(levelname)s: %(message)s') +_logger = get_logger( + __name__, logging.INFO, fmt='%(asctime)s %(levelname)s: %(message)s' +) fleet = None @@ -57,14 +57,21 @@ class FleetUtil(object): op_maker = core.op_proto_and_checker_maker self.op_role_key = op_maker.kOpRoleAttrName() if mode == "pslib": - from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet as fleet_pslib + from paddle.fluid.incubate.fleet.parameter_server.pslib import ( + fleet as fleet_pslib, + ) + fleet = fleet_pslib elif mode == "transpiler": - from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet as fleet_transpiler + from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import ( + fleet as fleet_transpiler, + ) + fleet = fleet_transpiler else: raise ValueError( - "Please choose one mode from [\"pslib\", \"transpiler\"]") + "Please choose one mode from [\"pslib\", \"transpiler\"]" + ) def rank0_print(self, s): """ @@ -124,11 +131,13 @@ class FleetUtil(object): return _logger.error(s) - def set_zero(self, - var_name, - scope=fluid.global_scope(), - place=fluid.CPUPlace(), - param_type="int64"): + def set_zero( + self, + var_name, + scope=fluid.global_scope(), + place=fluid.CPUPlace(), + param_type="int64", + ): """ Set tensor of a Variable to zero. @@ -150,11 +159,13 @@ class FleetUtil(object): param_array = np.zeros(param._get_dims()).astype(param_type) param.set(param_array, place) - def print_global_auc(self, - scope=fluid.global_scope(), - stat_pos="_generated_var_2", - stat_neg="_generated_var_3", - print_prefix=""): + def print_global_auc( + self, + scope=fluid.global_scope(), + stat_pos="_generated_var_2", + stat_neg="_generated_var_3", + print_prefix="", + ): r""" Print global auc of all distributed workers. @@ -189,10 +200,12 @@ class FleetUtil(object): auc_value = self.get_global_auc(scope, stat_pos, stat_neg) self.rank0_print(print_prefix + " global auc = %s" % auc_value) - def get_global_auc(self, - scope=fluid.global_scope(), - stat_pos="_generated_var_2", - stat_neg="_generated_var_3"): + def get_global_auc( + self, + scope=fluid.global_scope(), + stat_pos="_generated_var_2", + stat_neg="_generated_var_3", + ): """ Get global auc of all distributed workers. @@ -324,15 +337,17 @@ class FleetUtil(object): """ fleet.save_persistables(None, path, mode=mode) - def _get_xbox_str(self, - output_path, - day, - model_path, - xbox_base_key, - data_path, - hadoop_fs_name, - monitor_data={}, - mode="patch"): + def _get_xbox_str( + self, + output_path, + day, + model_path, + xbox_base_key, + data_path, + hadoop_fs_name, + monitor_data={}, + mode="patch", + ): xbox_dict = collections.OrderedDict() if mode == "base": xbox_dict["id"] = str(xbox_base_key) @@ -344,7 +359,7 @@ class FleetUtil(object): xbox_dict["id"] = str(int(time.time())) xbox_dict["key"] = str(xbox_base_key) if model_path.startswith("hdfs:") or model_path.startswith("afs:"): - model_path = model_path[model_path.find(":") + 1:] + model_path = model_path[model_path.find(":") + 1 :] xbox_dict["input"] = hadoop_fs_name + model_path.rstrip("/") + "/000" xbox_dict["record_count"] = "111111" xbox_dict["partition_type"] = "2" @@ -360,20 +375,23 @@ class FleetUtil(object): xbox_dict["job_id"] = job_id_with_host # currently hard code here, set monitor_data empty string xbox_dict["monitor_data"] = "" - xbox_dict["monitor_path"] = output_path.rstrip("/") + "/monitor/" \ - + day + ".txt" + xbox_dict["monitor_path"] = ( + output_path.rstrip("/") + "/monitor/" + day + ".txt" + ) xbox_dict["mpi_size"] = str(fleet.worker_num()) return json.dumps(xbox_dict) - def write_model_donefile(self, - output_path, - day, - pass_id, - xbox_base_key, - hadoop_fs_name, - hadoop_fs_ugi, - hadoop_home="$HADOOP_HOME", - donefile_name="donefile.txt"): + def write_model_donefile( + self, + output_path, + day, + pass_id, + xbox_base_key, + hadoop_fs_name, + hadoop_fs_ugi, + hadoop_home="$HADOOP_HOME", + donefile_name="donefile.txt", + ): """ write donefile when save model @@ -414,11 +432,16 @@ class FleetUtil(object): if fleet.worker_index() == 0: donefile_path = output_path + "/" + donefile_name - content = "%s\t%lu\t%s\t%s\t%d" % (day, xbox_base_key,\ - model_path, pass_id, 0) + content = "%s\t%lu\t%s\t%s\t%d" % ( + day, + xbox_base_key, + model_path, + pass_id, + 0, + ) configs = { "fs.default.name": hadoop_fs_name, - "hadoop.job.ugi": hadoop_fs_ugi + "hadoop.job.ugi": hadoop_fs_ugi, } client = HDFSClient(hadoop_home, configs) if client.is_file(donefile_path): @@ -428,8 +451,9 @@ class FleetUtil(object): pass_list = [i.split("\t")[3] for i in pre_content_list] exist = False for i in range(len(day_list)): - if int(day) == int(day_list[i]) and \ - int(pass_id) == int(pass_list[i]): + if int(day) == int(day_list[i]) and int(pass_id) == int( + pass_list[i] + ): exist = True break if not exist: @@ -438,30 +462,36 @@ class FleetUtil(object): f.write(content + "\n") client.delete(donefile_path) client.upload(donefile_name, output_path) - self.rank0_error("write %s/%s %s succeed" % \ - (day, pass_id, donefile_name)) + self.rank0_error( + "write %s/%s %s succeed" % (day, pass_id, donefile_name) + ) else: - self.rank0_error("not write %s because %s/%s already " - "exists" % (donefile_name, day, pass_id)) + self.rank0_error( + "not write %s because %s/%s already " + "exists" % (donefile_name, day, pass_id) + ) else: with open(donefile_name, "w") as f: f.write(content + "\n") client.upload(donefile_name, output_path) - self.rank0_error("write %s/%s %s succeed" % \ - (day, pass_id, donefile_name)) + self.rank0_error( + "write %s/%s %s succeed" % (day, pass_id, donefile_name) + ) fleet._role_maker._barrier_worker() - def write_xbox_donefile(self, - output_path, - day, - pass_id, - xbox_base_key, - data_path, - hadoop_fs_name, - hadoop_fs_ugi, - monitor_data={}, - hadoop_home="$HADOOP_HOME", - donefile_name=None): + def write_xbox_donefile( + self, + output_path, + day, + pass_id, + xbox_base_key, + data_path, + hadoop_fs_name, + hadoop_fs_ugi, + monitor_data={}, + hadoop_home="$HADOOP_HOME", + donefile_name=None, + ): """ write delta donefile or xbox base donefile @@ -518,12 +548,19 @@ class FleetUtil(object): if fleet.worker_index() == 0: donefile_path = output_path + "/" + donefile_name - xbox_str = self._get_xbox_str(output_path, day, model_path, \ - xbox_base_key, data_path, hadoop_fs_name, monitor_data={}, - mode=mode) + xbox_str = self._get_xbox_str( + output_path, + day, + model_path, + xbox_base_key, + data_path, + hadoop_fs_name, + monitor_data={}, + mode=mode, + ) configs = { "fs.default.name": hadoop_fs_name, - "hadoop.job.ugi": hadoop_fs_ugi + "hadoop.job.ugi": hadoop_fs_ugi, } client = HDFSClient(hadoop_home, configs) if client.is_file(donefile_path): @@ -532,9 +569,11 @@ class FleetUtil(object): last_day = last_dict["input"].split("/")[-3] last_pass = last_dict["input"].split("/")[-2].split("-")[-1] exist = False - if int(day) < int(last_day) or \ - int(day) == int(last_day) and \ - int(pass_id) <= int(last_pass): + if ( + int(day) < int(last_day) + or int(day) == int(last_day) + and int(pass_id) <= int(last_pass) + ): exist = True if not exist: with open(donefile_name, "w") as f: @@ -542,29 +581,35 @@ class FleetUtil(object): f.write(xbox_str + "\n") client.delete(donefile_path) client.upload(donefile_name, output_path) - self.rank0_error("write %s/%s %s succeed" % \ - (day, pass_id, donefile_name)) + self.rank0_error( + "write %s/%s %s succeed" % (day, pass_id, donefile_name) + ) else: - self.rank0_error("not write %s because %s/%s already " - "exists" % (donefile_name, day, pass_id)) + self.rank0_error( + "not write %s because %s/%s already " + "exists" % (donefile_name, day, pass_id) + ) else: with open(donefile_name, "w") as f: f.write(xbox_str + "\n") client.upload(donefile_name, output_path) - self.rank0_error("write %s/%s %s succeed" % \ - (day, pass_id, donefile_name)) + self.rank0_error( + "write %s/%s %s succeed" % (day, pass_id, donefile_name) + ) fleet._role_maker._barrier_worker() - def write_cache_donefile(self, - output_path, - day, - pass_id, - key_num, - hadoop_fs_name, - hadoop_fs_ugi, - hadoop_home="$HADOOP_HOME", - donefile_name="sparse_cache.meta", - **kwargs): + def write_cache_donefile( + self, + output_path, + day, + pass_id, + key_num, + hadoop_fs_name, + hadoop_fs_ugi, + hadoop_home="$HADOOP_HOME", + donefile_name="sparse_cache.meta", + **kwargs + ): """ write cache donefile @@ -613,15 +658,18 @@ class FleetUtil(object): donefile_path = model_path + "/" + donefile_name configs = { "fs.default.name": hadoop_fs_name, - "hadoop.job.ugi": hadoop_fs_ugi + "hadoop.job.ugi": hadoop_fs_ugi, } client = HDFSClient(hadoop_home, configs) if client.is_file(donefile_path): - self.rank0_error( \ - "not write because %s already exists" % donefile_path) + self.rank0_error( + "not write because %s already exists" % donefile_path + ) else: - meta_str = "file_prefix:part\npart_num:%s\nkey_num:%d\n" \ - % (file_num, key_num) + meta_str = "file_prefix:part\npart_num:%s\nkey_num:%d\n" % ( + file_num, + key_num, + ) with open(donefile_name, "w") as f: f.write(meta_str) client.upload(donefile_name, model_path) @@ -779,10 +827,9 @@ class FleetUtil(object): suffix_name = "/%s/delta-%s" % (day, pass_id) model_path = output_path.rstrip("/") + suffix_name self.rank0_print("going to save_cache_model %s" % model_path) - key_num = fleet.save_cache_model(None, - model_path, - mode=mode, - table_id=table_id) + key_num = fleet.save_cache_model( + None, model_path, mode=mode, table_id=table_id + ) self.rank0_print("save_cache_model done") return key_num @@ -813,10 +860,9 @@ class FleetUtil(object): suffix_name = "/%s/base" % day model_path = output_path.rstrip("/") + suffix_name self.rank0_print("going to save_cache_base_model %s" % model_path) - key_num = fleet.save_cache_model(None, - model_path, - mode=2, - table_id=table_id) + key_num = fleet.save_cache_model( + None, model_path, mode=2, table_id=table_id + ) self.rank0_print("save_cache_base_model done") return key_num @@ -839,8 +885,11 @@ class FleetUtil(object): fleet._role_maker._barrier_worker() if fleet._role_maker.is_first_worker(): prog_id = str(id(program)) - tables = fleet._opt_info["program_id_to_worker"][prog_id].\ - get_desc().dense_table + tables = ( + fleet._opt_info["program_id_to_worker"][prog_id] + .get_desc() + .dense_table + ) prog_conf = fleet._opt_info['program_configs'][prog_id] prog_tables = {} for key in prog_conf: @@ -855,27 +904,33 @@ class FleetUtil(object): for i in range(0, len(table.dense_variable_name)): var_name = table.dense_variable_name[i] if scope.find_var(var_name) is None: - raise ValueError("var " + var_name + - " not found in scope " + - "when pull dense") + raise ValueError( + "var " + + var_name + + " not found in scope " + + "when pull dense" + ) var_name_list.append(var_name) - fleet._fleet_ptr.pull_dense(scope, int(table.table_id), - var_name_list) + fleet._fleet_ptr.pull_dense( + scope, int(table.table_id), var_name_list + ) fleet._role_maker._barrier_worker() - def save_paddle_inference_model(self, - executor, - scope, - program, - feeded_vars, - target_vars, - output_path, - day, - pass_id, - hadoop_fs_name, - hadoop_fs_ugi, - hadoop_home="$HADOOP_HOME", - save_combine=True): + def save_paddle_inference_model( + self, + executor, + scope, + program, + feeded_vars, + target_vars, + output_path, + day, + pass_id, + hadoop_fs_name, + hadoop_fs_ugi, + hadoop_home="$HADOOP_HOME", + save_combine=True, + ): """ save paddle inference model, and upload to hdfs dnn_plugin path @@ -925,26 +980,31 @@ class FleetUtil(object): target_vars=target_vars, executor=executor, main_program=program.clone(), - params_filename="params") + params_filename="params", + ) else: fluid.io.save_inference_model( dirname=model_name, feeded_var_names=feeded_var_names, target_vars=target_vars, executor=executor, - main_program=program.clone()) + main_program=program.clone(), + ) configs = { "fs.default.name": hadoop_fs_name, - "hadoop.job.ugi": hadoop_fs_ugi + "hadoop.job.ugi": hadoop_fs_ugi, } client = HDFSClient(hadoop_home, configs) if pass_id == "-1": dest = "%s/%s/base/dnn_plugin/" % (output_path, day) else: - dest = "%s/%s/delta-%s/dnn_plugin/" % (output_path, day, - pass_id) + dest = "%s/%s/delta-%s/dnn_plugin/" % ( + output_path, + day, + pass_id, + ) if not client.is_exist(dest): client.makedirs(dest) @@ -952,19 +1012,21 @@ class FleetUtil(object): fleet._role_maker._barrier_worker() - def save_paddle_params(self, - executor, - scope, - program, - model_name, - output_path, - day, - pass_id, - hadoop_fs_name, - hadoop_fs_ugi, - hadoop_home="$HADOOP_HOME", - var_names=None, - save_combine=True): + def save_paddle_params( + self, + executor, + scope, + program, + model_name, + output_path, + day, + pass_id, + hadoop_fs_name, + hadoop_fs_ugi, + hadoop_home="$HADOOP_HOME", + var_names=None, + save_combine=True, + ): """ save paddle model, and upload to hdfs dnn_plugin path @@ -1028,36 +1090,39 @@ class FleetUtil(object): vars = [program.global_block().var(i) for i in var_names] with fluid.scope_guard(scope): if save_combine: - fluid.io.save_vars(executor, - "./", - program, - vars=vars, - filename=model_name) + fluid.io.save_vars( + executor, "./", program, vars=vars, filename=model_name + ) else: fluid.io.save_vars(executor, model_name, program, vars=vars) configs = { "fs.default.name": hadoop_fs_name, - "hadoop.job.ugi": hadoop_fs_ugi + "hadoop.job.ugi": hadoop_fs_ugi, } client = HDFSClient(hadoop_home, configs) if pass_id == "-1": dest = "%s/%s/base/dnn_plugin/" % (output_path, day) else: - dest = "%s/%s/delta-%s/dnn_plugin/" % (output_path, day, - pass_id) + dest = "%s/%s/delta-%s/dnn_plugin/" % ( + output_path, + day, + pass_id, + ) if not client.is_exist(dest): client.mkdirs(dest) client.upload(model_name, dest, multi_processes=5, overwrite=True) fleet._role_maker._barrier_worker() - def get_last_save_xbox_base(self, - output_path, - hadoop_fs_name, - hadoop_fs_ugi, - hadoop_home="$HADOOP_HOME"): + def get_last_save_xbox_base( + self, + output_path, + hadoop_fs_name, + hadoop_fs_ugi, + hadoop_home="$HADOOP_HOME", + ): r""" get last saved base xbox info from xbox_base_done.txt @@ -1086,7 +1151,7 @@ class FleetUtil(object): donefile_path = output_path + "/xbox_base_done.txt" configs = { "fs.default.name": hadoop_fs_name, - "hadoop.job.ugi": hadoop_fs_ugi + "hadoop.job.ugi": hadoop_fs_ugi, } client = HDFSClient(hadoop_home, configs) if not client.is_file(donefile_path): @@ -1098,11 +1163,13 @@ class FleetUtil(object): xbox_base_key = int(last_dict["key"]) return [last_day, last_path, xbox_base_key] - def get_last_save_xbox(self, - output_path, - hadoop_fs_name, - hadoop_fs_ugi, - hadoop_home="$HADOOP_HOME"): + def get_last_save_xbox( + self, + output_path, + hadoop_fs_name, + hadoop_fs_ugi, + hadoop_home="$HADOOP_HOME", + ): r""" get last saved xbox info from xbox_patch_done.txt @@ -1131,7 +1198,7 @@ class FleetUtil(object): donefile_path = output_path + "/xbox_patch_done.txt" configs = { "fs.default.name": hadoop_fs_name, - "hadoop.job.ugi": hadoop_fs_ugi + "hadoop.job.ugi": hadoop_fs_ugi, } client = HDFSClient(hadoop_home, configs) if not client.is_file(donefile_path): @@ -1144,11 +1211,13 @@ class FleetUtil(object): xbox_base_key = int(last_dict["key"]) return [last_day, last_pass, last_path, xbox_base_key] - def get_last_save_model(self, - output_path, - hadoop_fs_name, - hadoop_fs_ugi, - hadoop_home="$HADOOP_HOME"): + def get_last_save_model( + self, + output_path, + hadoop_fs_name, + hadoop_fs_ugi, + hadoop_home="$HADOOP_HOME", + ): r""" get last saved model info from donefile.txt @@ -1180,7 +1249,7 @@ class FleetUtil(object): donefile_path = output_path + "/donefile.txt" configs = { "fs.default.name": hadoop_fs_name, - "hadoop.job.ugi": hadoop_fs_ugi + "hadoop.job.ugi": hadoop_fs_ugi, } client = HDFSClient(hadoop_home, configs) if not client.is_file(donefile_path): @@ -1193,8 +1262,9 @@ class FleetUtil(object): xbox_base_key = int(content[1]) return [last_save_day, last_save_pass, last_path, xbox_base_key] - def get_online_pass_interval(self, days, hours, split_interval, - split_per_pass, is_data_hourly_placed): + def get_online_pass_interval( + self, days, hours, split_interval, split_per_pass, is_data_hourly_placed + ): """ get online pass interval @@ -1254,16 +1324,18 @@ class FleetUtil(object): return online_pass_interval - def get_global_metrics(self, - scope=fluid.global_scope(), - stat_pos_name="_generated_var_2", - stat_neg_name="_generated_var_3", - sqrerr_name="sqrerr", - abserr_name="abserr", - prob_name="prob", - q_name="q", - pos_ins_num_name="pos", - total_ins_num_name="total"): + def get_global_metrics( + self, + scope=fluid.global_scope(), + stat_pos_name="_generated_var_2", + stat_neg_name="_generated_var_3", + sqrerr_name="sqrerr", + abserr_name="abserr", + prob_name="prob", + q_name="q", + pos_ins_num_name="pos", + total_ins_num_name="total", + ): r""" get global metrics, including auc, bucket_error, mae, rmse, actual_ctr, predicted_ctr, copc, mean_predict_qvalue, total_ins_num. @@ -1317,8 +1389,10 @@ class FleetUtil(object): similarity_norm, label) """ - if scope.find_var(stat_pos_name) is None or \ - scope.find_var(stat_neg_name) is None: + if ( + scope.find_var(stat_pos_name) is None + or scope.find_var(stat_neg_name) is None + ): self.rank0_print("not found auc bucket") return [None] * 9 elif scope.find_var(sqrerr_name) is None: @@ -1337,8 +1411,9 @@ class FleetUtil(object): self.rank0_print("not found pos_ins_num_name=%s" % pos_ins_num_name) return [None] * 9 elif scope.find_var(total_ins_num_name) is None: - self.rank0_print("not found total_ins_num_name=%s" % \ - total_ins_num_name) + self.rank0_print( + "not found total_ins_num_name=%s" % total_ins_num_name + ) return [None] * 9 # barrier worker to ensure all workers finished training @@ -1427,8 +1502,9 @@ class FleetUtil(object): adjust_ctr = ctr_sum / impression_sum if adjust_ctr == 0: continue - relative_error = \ - math.sqrt((1 - adjust_ctr) / (adjust_ctr * impression_sum)) + relative_error = math.sqrt( + (1 - adjust_ctr) / (adjust_ctr * impression_sum) + ) if relative_error < k_relative_error_bound: actual_ctr = click_sum / impression_sum relative_ctr_error = abs(actual_ctr / adjust_ctr - 1) @@ -1439,22 +1515,30 @@ class FleetUtil(object): bucket_error = error_sum / error_count if error_count > 0 else 0.0 return [ - auc, bucket_error, mae, rmse, return_actual_ctr, predicted_ctr, - copc, mean_predict_qvalue, - int(total_ins_num) + auc, + bucket_error, + mae, + rmse, + return_actual_ctr, + predicted_ctr, + copc, + mean_predict_qvalue, + int(total_ins_num), ] - def print_global_metrics(self, - scope=fluid.global_scope(), - stat_pos_name="_generated_var_2", - stat_neg_name="_generated_var_3", - sqrerr_name="sqrerr", - abserr_name="abserr", - prob_name="prob", - q_name="q", - pos_ins_num_name="pos", - total_ins_num_name="total", - print_prefix=""): + def print_global_metrics( + self, + scope=fluid.global_scope(), + stat_pos_name="_generated_var_2", + stat_neg_name="_generated_var_3", + sqrerr_name="sqrerr", + abserr_name="abserr", + prob_name="prob", + q_name="q", + pos_ins_num_name="pos", + total_ins_num_name="total", + print_prefix="", + ): r""" print global metrics, including auc, bucket_error, mae, rmse, actual_ctr, predicted_ctr, copc, mean_predict_qvalue, total_ins_num. @@ -1505,8 +1589,10 @@ class FleetUtil(object): similarity_norm, label) """ - if scope.find_var(stat_pos_name) is None or \ - scope.find_var(stat_neg_name) is None: + if ( + scope.find_var(stat_pos_name) is None + or scope.find_var(stat_neg_name) is None + ): self.rank0_print("not found auc bucket") return elif scope.find_var(sqrerr_name) is None: @@ -1525,26 +1611,56 @@ class FleetUtil(object): self.rank0_print("not found pos_ins_num_name=%s" % pos_ins_num_name) return elif scope.find_var(total_ins_num_name) is None: - self.rank0_print("not found total_ins_num_name=%s" % \ - total_ins_num_name) + self.rank0_print( + "not found total_ins_num_name=%s" % total_ins_num_name + ) return - auc, bucket_error, mae, rmse, actual_ctr, predicted_ctr, copc,\ - mean_predict_qvalue, total_ins_num = self.get_global_metrics(\ - scope, stat_pos_name, stat_neg_name, sqrerr_name, abserr_name,\ - prob_name, q_name, pos_ins_num_name, total_ins_num_name) + ( + auc, + bucket_error, + mae, + rmse, + actual_ctr, + predicted_ctr, + copc, + mean_predict_qvalue, + total_ins_num, + ) = self.get_global_metrics( + scope, + stat_pos_name, + stat_neg_name, + sqrerr_name, + abserr_name, + prob_name, + q_name, + pos_ins_num_name, + total_ins_num_name, + ) self.rank0_print( "%s global AUC=%.6f BUCKET_ERROR=%.6f MAE=%.6f " "RMSE=%.6f Actural_CTR=%.6f Predicted_CTR=%.6f " - "COPC=%.6f MEAN Q_VALUE=%.6f Ins number=%s" % - (print_prefix, auc, bucket_error, mae, rmse, actual_ctr, - predicted_ctr, copc, mean_predict_qvalue, total_ins_num)) + "COPC=%.6f MEAN Q_VALUE=%.6f Ins number=%s" + % ( + print_prefix, + auc, + bucket_error, + mae, + rmse, + actual_ctr, + predicted_ctr, + copc, + mean_predict_qvalue, + total_ins_num, + ) + ) def program_type_trans(self, prog_dir, prog_fn, is_text): return utils.program_type_trans(prog_dir, prog_fn, is_text) - def draw_from_program_file(self, model_filename, is_text, output_dir, - output_filename): + def draw_from_program_file( + self, model_filename, is_text, output_dir, output_filename + ): """draw program from file""" program = utils.load_program(model_filename, is_text) utils.graphviz(program.global_block(), output_dir, output_filename) @@ -1554,14 +1670,17 @@ class FleetUtil(object): utils.graphviz(program.global_block(), output_dir, output_name) def check_two_programs(self, config): - train_prog = utils.load_program(config.train_prog_path, - config.is_text_train_program) - pruned_prog = utils.load_program(config.pruned_prog_path, - config.is_text_pruned_program) + train_prog = utils.load_program( + config.train_prog_path, config.is_text_train_program + ) + pruned_prog = utils.load_program( + config.pruned_prog_path, config.is_text_pruned_program + ) if config.draw: pruned_dir = os.path.dirname(config.pruned_prog_path) - self.draw_from_program(pruned_prog, pruned_dir, - config.draw_out_name) + self.draw_from_program( + pruned_prog, pruned_dir, config.draw_out_name + ) res = utils.check_pruned_program_vars(train_prog, pruned_prog) if res: _logger.info("check_programs succeed.") @@ -1574,9 +1693,14 @@ class FleetUtil(object): def check_vars_and_dump(self, config): _logger.info("start check_vars_and_dump.") results = utils.check_saved_vars_try_dump( - config.dump_model_dir, config.dump_program_filename, - config.is_text_dump_program, config.feed_config, - config.fetch_config, config.batch_size, config.save_params_filename) + config.dump_model_dir, + config.dump_program_filename, + config.is_text_dump_program, + config.feed_config, + config.fetch_config, + config.batch_size, + config.save_params_filename, + ) _logger.info("check_vars_and_dump succeed.") return results @@ -1607,8 +1731,9 @@ class FleetUtil(object): utils.parse_program(program, output_dir) def _is_optimizer_op(self, op): - return self.op_role_key in op.attr_names and \ - int(op.all_attrs()[self.op_role_key]) & int(OpRole.Optimize) + return self.op_role_key in op.attr_names and int( + op.all_attrs()[self.op_role_key] + ) & int(OpRole.Optimize) def split_program_by_device(self, program): ops_list = [] @@ -1619,8 +1744,11 @@ class FleetUtil(object): if self._is_optimizer_op(op): break if op.has_attr("op_device"): - cur_attr = op.attr( - "op_device") if op.attr("op_device") != "" else type_cpu + cur_attr = ( + op.attr("op_device") + if op.attr("op_device") != "" + else type_cpu + ) if pre is None or pre != cur_attr: ops_list.append([]) type_list.append(cur_attr) @@ -1710,8 +1838,9 @@ class FleetUtil(object): send_list[i].extend(list(in_from_pre[i + 1])) prog = program.clone() if merged_type_list[i] != type_cpu: - prog = prog._prune_with_input(list(in_from_pre[i]), - list(send_list[i])) + prog = prog._prune_with_input( + list(in_from_pre[i]), list(send_list[i]) + ) program_list.append(prog) else: program_list.append(prog) @@ -1729,8 +1858,13 @@ class FleetUtil(object): print("warning: non heter program") return None else: - return [start_list[heter_index], end_list[heter_index], send_list[heter_index], \ - recv_list[heter_index], program_list[heter_index]] + return [ + start_list[heter_index], + end_list[heter_index], + send_list[heter_index], + recv_list[heter_index], + program_list[heter_index], + ] class GPUPSUtil(FleetUtil): @@ -1921,12 +2055,14 @@ class GPUPSUtil(FleetUtil): os.remove("donefile.txt") return [last_save_day, last_save_pass, last_path, xbox_base_key] - def write_model_donefile(self, - output_path, - day, - pass_id, - xbox_base_key, - donefile_name="donefile.txt"): + def write_model_donefile( + self, + output_path, + day, + pass_id, + xbox_base_key, + donefile_name="donefile.txt", + ): """ write donefile when save model @@ -1965,8 +2101,13 @@ class GPUPSUtil(FleetUtil): if fleet.worker_index() == 0: donefile_path = output_path + "/" + donefile_name - content = "%s\t%lu\t%s\t%s\t%d" % (day, xbox_base_key,\ - model_path, pass_id, 0) + content = "%s\t%lu\t%s\t%s\t%d" % ( + day, + xbox_base_key, + model_path, + pass_id, + 0, + ) if self._afs.is_file(donefile_path): self._afs.download(donefile_path, donefile_name) pre_content = "" @@ -1978,8 +2119,9 @@ class GPUPSUtil(FleetUtil): os.remove(donefile_name) exist = False for i in range(len(day_list)): - if int(day) == int(day_list[i]) and \ - int(pass_id) == int(pass_list[i]): + if int(day) == int(day_list[i]) and int(pass_id) == int( + pass_list[i] + ): exist = True break if not exist: @@ -1988,29 +2130,35 @@ class GPUPSUtil(FleetUtil): f.write(content + "\n") self._afs.delete(donefile_path) self._afs.upload(donefile_name, donefile_path) - self.rank0_error("write %s/%s %s succeed" % \ - (day, pass_id, donefile_name)) + self.rank0_error( + "write %s/%s %s succeed" % (day, pass_id, donefile_name) + ) else: - self.rank0_error("not write %s because %s/%s already " - "exists" % (donefile_name, day, pass_id)) + self.rank0_error( + "not write %s because %s/%s already " + "exists" % (donefile_name, day, pass_id) + ) else: with open(donefile_name, "w") as f: f.write(content + "\n") self._afs.upload(donefile_name, donefile_path) - self.rank0_error("write %s/%s %s succeed" % \ - (day, pass_id, donefile_name)) - - def write_xbox_donefile(self, - output_path, - day, - pass_id, - xbox_base_key, - data_path, - hadoop_fs_name, - hadoop_fs_ugi, - monitor_data={}, - hadoop_home="$HADOOP_HOME", - donefile_name=None): + self.rank0_error( + "write %s/%s %s succeed" % (day, pass_id, donefile_name) + ) + + def write_xbox_donefile( + self, + output_path, + day, + pass_id, + xbox_base_key, + data_path, + hadoop_fs_name, + hadoop_fs_ugi, + monitor_data={}, + hadoop_home="$HADOOP_HOME", + donefile_name=None, + ): """ write delta donefile or xbox base donefile @@ -2063,9 +2211,16 @@ class GPUPSUtil(FleetUtil): data_path = ",".join(data_path) if fleet.worker_index() == 0: donefile_path = output_path + "/" + donefile_name - xbox_str = self._get_xbox_str(output_path, day, model_path, \ - xbox_base_key, data_path, hadoop_fs_name, monitor_data={}, - mode=mode) + xbox_str = self._get_xbox_str( + output_path, + day, + model_path, + xbox_base_key, + data_path, + hadoop_fs_name, + monitor_data={}, + mode=mode, + ) if self._afs.is_exist(donefile_path): self.rank0_info("exist %s succeed" % (donefile_path)) @@ -2080,9 +2235,11 @@ class GPUPSUtil(FleetUtil): os.remove(donefile_name) self.rank0_info("remove %s succeed" % (donefile_name)) exist = False - if int(day) < int(last_day) or \ - int(day) == int(last_day) and \ - int(pass_id) <= int(last_pass): + if ( + int(day) < int(last_day) + or int(day) == int(last_day) + and int(pass_id) <= int(last_pass) + ): exist = True if not exist: with open(donefile_name, "w") as f: @@ -2090,25 +2247,31 @@ class GPUPSUtil(FleetUtil): f.write(xbox_str + "\n") self._afs.delete(donefile_path) self._afs.upload(donefile_name, donefile_path) - self.rank0_info("write %s/%s %s succeed" % \ - (day, pass_id, donefile_name)) + self.rank0_info( + "write %s/%s %s succeed" % (day, pass_id, donefile_name) + ) else: - self.rank0_info("not write %s because %s/%s already " - "exists" % (donefile_name, day, pass_id)) + self.rank0_info( + "not write %s because %s/%s already " + "exists" % (donefile_name, day, pass_id) + ) else: with open(donefile_name, "w") as f: f.write(xbox_str + "\n") self._afs.upload(donefile_name, donefile_path) - self.rank0_error("write %s/%s %s succeed" % \ - (day, pass_id, donefile_name)) - - def write_cache_donefile(self, - output_path, - day, - pass_id, - key_num, - donefile_name="sparse_cache.meta", - **kwargs): + self.rank0_error( + "write %s/%s %s succeed" % (day, pass_id, donefile_name) + ) + + def write_cache_donefile( + self, + output_path, + day, + pass_id, + key_num, + donefile_name="sparse_cache.meta", + **kwargs + ): """ write cache donefile @@ -2154,25 +2317,30 @@ class GPUPSUtil(FleetUtil): donefile_path = model_path + "/" + donefile_name if self._afs.is_file(donefile_path): - self.rank0_error( \ - "not write because %s already exists" % donefile_path) + self.rank0_error( + "not write because %s already exists" % donefile_path + ) else: - meta_str = "file_prefix:part\npart_num:%s\nkey_num:%d\n" \ - % (file_num, key_num) + meta_str = "file_prefix:part\npart_num:%s\nkey_num:%d\n" % ( + file_num, + key_num, + ) with open(donefile_name, "w") as f: f.write(meta_str) self._afs.upload(donefile_name, donefile_path) self.rank0_error("write %s succeed" % donefile_path) - def _get_xbox_str(self, - output_path, - day, - model_path, - xbox_base_key, - data_path, - hadoop_fs_name, - monitor_data={}, - mode="patch"): + def _get_xbox_str( + self, + output_path, + day, + model_path, + xbox_base_key, + data_path, + hadoop_fs_name, + monitor_data={}, + mode="patch", + ): xbox_dict = collections.OrderedDict() if mode == "base": xbox_dict["id"] = str(xbox_base_key) @@ -2184,7 +2352,7 @@ class GPUPSUtil(FleetUtil): xbox_dict["id"] = str(int(time.time())) xbox_dict["key"] = str(xbox_base_key) if model_path.startswith("hdfs:") or model_path.startswith("afs:"): - model_path = model_path[model_path.find(":") + 1:] + model_path = model_path[model_path.find(":") + 1 :] xbox_dict["input"] = hadoop_fs_name + model_path.rstrip("/") + "/000" xbox_dict["record_count"] = "111111" xbox_dict["partition_type"] = "2" @@ -2194,7 +2362,8 @@ class GPUPSUtil(FleetUtil): xbox_dict["job_id"] = os.environ.get("PADDLE_JOB_ID", "") # currently hard code here, set monitor_data empty string xbox_dict["monitor_data"] = "" - xbox_dict["monitor_path"] = output_path.rstrip("/") + "/monitor/" \ - + day + ".txt" + xbox_dict["monitor_path"] = ( + output_path.rstrip("/") + "/monitor/" + day + ".txt" + ) xbox_dict["mpi_size"] = str(fleet.worker_num()) return json.dumps(xbox_dict) diff --git a/python/paddle/fluid/incubate/fleet/utils/hdfs.py b/python/paddle/fluid/incubate/fleet/utils/hdfs.py index 41aff381959e25fce39792aef1da5ff05ccf1783..c35e266357e0e8fb480b3d0d7f87090096b2c7af 100644 --- a/python/paddle/fluid/incubate/fleet/utils/hdfs.py +++ b/python/paddle/fluid/incubate/fleet/utils/hdfs.py @@ -24,8 +24,17 @@ import copy import errno import time import logging -#from . import fs -from paddle.distributed.fleet.utils.fs import FS, LocalFS, FSFileExistsError, FSFileNotExistsError, ExecuteError, FSTimeOut, FSShellCmdAborted + +# from . import fs +from paddle.distributed.fleet.utils.fs import ( + FS, + LocalFS, + FSFileExistsError, + FSFileNotExistsError, + ExecuteError, + FSTimeOut, + FSShellCmdAborted, +) from paddle.fluid import core import functools @@ -35,9 +44,7 @@ __all__ = ["HDFSClient"] def _handle_errors(max_time_out=None): - def decorator(f): - @functools.wraps(f) def handler(*args, **kwargs): o = args[0] @@ -53,19 +60,23 @@ def _handle_errors(max_time_out=None): while True: try: return f(*args, **kwargs) - #important: only ExecuteError need to retry + # important: only ExecuteError need to retry except ExecuteError as e: if time.time() - start >= time_out: - raise FSTimeOut("args:{} timeout:{}".format( - args, - time.time() - start)) + raise FSTimeOut( + "args:{} timeout:{}".format( + args, time.time() - start + ) + ) time.sleep(inter) if time.time() - last_print_time > 30: - print("hadoop operator timeout:args:{} timeout:{}".format( - args, - time.time() - start)) + print( + "hadoop operator timeout:args:{} timeout:{}".format( + args, time.time() - start + ) + ) last_print_time = time.time() return handler @@ -74,13 +85,13 @@ def _handle_errors(max_time_out=None): class HDFSClient(FS): - def __init__( - self, - hadoop_home, - configs, - time_out=5 * 60 * 1000, #ms - sleep_inter=1000): #ms + self, + hadoop_home, + configs, + time_out=5 * 60 * 1000, # ms + sleep_inter=1000, + ): # ms # Raise exception if JAVA_HOME not exists. self.pre_commands = [] @@ -98,7 +109,8 @@ class HDFSClient(FS): self._sleep_inter = sleep_inter self._base_cmd = " ".join(self.pre_commands) self._bd_err_re = re.compile( - r'\s?responseErrorMsg\s?\:.*, errorCode\:\s?[0-9]+, path\:') + r'\s?responseErrorMsg\s?\:.*, errorCode\:\s?[0-9]+, path\:' + ) def _run_cmd(self, cmd, redirect_stderr=False): exe_cmd = "{} -{}".format(self._base_cmd, cmd) @@ -269,7 +281,8 @@ class HDFSClient(FS): if test_exists: if not self.is_exist(fs_src_path): raise FSFileNotExistsError( - "{} is not exists".format(fs_src_path)) + "{} is not exists".format(fs_src_path) + ) if self.is_exist(fs_dst_path): raise FSFileExistsError("{} exists already".format(fs_dst_path)) @@ -285,8 +298,7 @@ class HDFSClient(FS): if ret != 0: raise ExecuteError(cmd) except Exception as e: - if not self.is_exist(fs_src_path) and \ - self.is_exist(fs_dst_path): + if not self.is_exist(fs_src_path) and self.is_exist(fs_dst_path): return raise e diff --git a/python/paddle/fluid/incubate/fleet/utils/http_server.py b/python/paddle/fluid/incubate/fleet/utils/http_server.py index 685228f07490ee699829b797373144cf70ea6522..269f912073e858f7ae38bd5447611fe5f58fdb28 100644 --- a/python/paddle/fluid/incubate/fleet/utils/http_server.py +++ b/python/paddle/fluid/incubate/fleet/utils/http_server.py @@ -14,6 +14,7 @@ """Http Server.""" import logging + # NOTE: HTTPServer has a different name in python2 and python3 from http.server import HTTPServer import http.server as SimpleHTTPServer @@ -32,9 +33,9 @@ def get_logger(name, level, fmt): return logger -_http_server_logger = get_logger(__name__, - logging.INFO, - fmt='%(asctime)s-%(levelname)s: %(message)s') +_http_server_logger = get_logger( + __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s' +) class KVHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): @@ -164,7 +165,8 @@ class KVServer: start server until user calls stop to let it quit. """ self.listen_thread = threading.Thread( - target=lambda: self.http_server.serve_forever()) + target=lambda: self.http_server.serve_forever() + ) self.listen_thread.start() def stop(self): diff --git a/python/paddle/fluid/incubate/fleet/utils/utils.py b/python/paddle/fluid/incubate/fleet/utils/utils.py index c154fcb2f1dfb376331894836d1926c099221cfd..c675fea39bc9c286d31d16b1baf832c9711602af 100644 --- a/python/paddle/fluid/incubate/fleet/utils/utils.py +++ b/python/paddle/fluid/incubate/fleet/utils/utils.py @@ -28,9 +28,13 @@ from paddle.fluid.framework import Program from paddle.fluid.proto import framework_pb2 __all__ = [ - "load_program", "save_program", "program_type_trans", - "check_saved_vars_try_dump", "parse_program", "check_pruned_program_vars", - "graphviz" + "load_program", + "save_program", + "program_type_trans", + "check_saved_vars_try_dump", + "parse_program", + "check_pruned_program_vars", + "graphviz", ] logger = logging.getLogger(__name__) @@ -45,7 +49,8 @@ all_vars_out_fn = "vars_all.log" ops_out_fn = "ops.log" feed_fetch_type_list = [ - core.VarDesc.VarType.FEED_MINIBATCH, core.VarDesc.VarType.FETCH_LIST + core.VarDesc.VarType.FEED_MINIBATCH, + core.VarDesc.VarType.FETCH_LIST, ] not_expected_op_types = ["lookup_table"] @@ -85,12 +90,16 @@ def save_program(program, model_filename='__model__', is_text=False): def check_pruned_program_vars(train_prog, pruned_prog): is_match = True - pruned_vars = [(v.name, v) for v in pruned_prog.list_vars() - if fluid.io.is_persistable(v)] + pruned_vars = [ + (v.name, v) + for v in pruned_prog.list_vars() + if fluid.io.is_persistable(v) + ] pruned_vars = OrderedDict(pruned_vars) pruned_vars_name = [name for name in pruned_vars] logger.info( - "persistable vars in pruned program: {}".format(pruned_vars_name)) + "persistable vars in pruned program: {}".format(pruned_vars_name) + ) for var_name in pruned_vars: var = pruned_vars[var_name] @@ -102,14 +111,23 @@ def check_pruned_program_vars(train_prog, pruned_prog): except ValueError as e: logger.error( "not find variable '%s' in train program. please check pruning." - % var_name) + % var_name + ) logger.error(e) continue - if var.shape != train_prog_var.shape or var.dtype != train_prog_var.dtype: + if ( + var.shape != train_prog_var.shape + or var.dtype != train_prog_var.dtype + ): logger.error( - "variable: {} not match. in pruned program shape: {} dtype:{}, in train program shape: {} dtype: {}" - .format(var_name, var.shape, var.dtype, train_prog_var.shape, - train_prog_var.dtype)) + "variable: {} not match. in pruned program shape: {} dtype:{}, in train program shape: {} dtype: {}".format( + var_name, + var.shape, + var.dtype, + train_prog_var.shape, + train_prog_var.dtype, + ) + ) is_match = False return is_match @@ -119,10 +137,12 @@ def graphviz(block, output_dir="", filename='debug'): pdf_path = os.path.join(output_dir, filename + '.pdf') debugger.draw_block_graphviz(block, path=dot_path) cmd = ["dot", "-Tpdf", dot_path, "-o", pdf_path] - p = subprocess.Popen(cmd, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + p = subprocess.Popen( + cmd, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) p.wait() @@ -134,17 +154,18 @@ def program_type_trans(prog_dir, prog_fn, is_text): def append_save_op(block, var, path): - block.append_op(type='save', - inputs={'X': [var]}, - outputs={}, - attrs={'file_path': path}) + block.append_op( + type='save', inputs={'X': [var]}, outputs={}, attrs={'file_path': path} + ) def append_load_op(block, var, path): - block.append_op(type='load', - inputs={}, - outputs={'Out': [var]}, - attrs={'file_path': path}) + block.append_op( + type='load', + inputs={}, + outputs={'Out': [var]}, + attrs={'file_path': path}, + ) def save_var(np_array, var_name, shape_list, dtype, save_path): @@ -199,19 +220,34 @@ def feed_gen(batch_size, feeded_vars_dims, feeded_vars_filelist): return batch_feed -def try_load_model_vars(dump_dir, dump_prog_fn, is_text_dump_program, - batch_size, feed_config, fetch_config, save_filename, - saved_params): +def try_load_model_vars( + dump_dir, + dump_prog_fn, + is_text_dump_program, + batch_size, + feed_config, + fetch_config, + save_filename, + saved_params, +): place = fluid.CPUPlace() exe = fluid.Executor(place) scope = fluid.core.Scope() with fluid.scope_guard(scope): if is_text_dump_program: - dump_prog_fn = program_type_trans(dump_dir, dump_prog_fn, - is_text_dump_program) - inference_program, feed_target_names, fetch_targets = \ - fluid.io.load_inference_model(dump_dir, exe, model_filename=dump_prog_fn, - params_filename=save_filename) + dump_prog_fn = program_type_trans( + dump_dir, dump_prog_fn, is_text_dump_program + ) + ( + inference_program, + feed_target_names, + fetch_targets, + ) = fluid.io.load_inference_model( + dump_dir, + exe, + model_filename=dump_prog_fn, + params_filename=save_filename, + ) # check program vars and saved vars shape orig_para_shape = { @@ -222,13 +258,17 @@ def try_load_model_vars(dump_dir, dump_prog_fn, is_text_dump_program, var_temp = fluid.global_scope().find_var(each_var.name) assert var_temp != None, "can't not find var: " + each_var.name new_shape = (np.array(var_temp.get_tensor())).shape - assert each_var.name in orig_para_shape, each_var.name + "MUST in var list" + assert each_var.name in orig_para_shape, ( + each_var.name + "MUST in var list" + ) orig_shape = orig_para_shape.get(each_var.name) if new_shape != orig_shape: raise RuntimeError( "Shape not matching: the Program requires a parameter with a shape of ({}), " - "while the loaded parameter (namely [ {} ]) has a shape of ({})." - .format(orig_shape, each_var.name, new_shape)) + "while the loaded parameter (namely [ {} ]) has a shape of ({}).".format( + orig_shape, each_var.name, new_shape + ) + ) # check feed/fetch vars in program and config fetch_targets_names = [v.name for v in fetch_targets] @@ -238,10 +278,15 @@ def try_load_model_vars(dump_dir, dump_prog_fn, is_text_dump_program, logger.warning("no fetch targets in program.") fetch_list = fetch_targets feed_name_list = feed_target_names - if feed_config.feeded_vars_names is not None and feed_target_names != feed_config.feeded_vars_names: + if ( + feed_config.feeded_vars_names is not None + and feed_target_names != feed_config.feeded_vars_names + ): logger.warning( - "feed vars in program and config are diff: feed in program: {}. feed in config {}." - .format(feed_target_names, feed_config.feeded_vars_names)) + "feed vars in program and config are diff: feed in program: {}. feed in config {}.".format( + feed_target_names, feed_config.feeded_vars_names + ) + ) feed_name_list = feed_config.feeded_vars_names # remove feed op in inference_program. new feed op will be added in exe.run global_block = inference_program.global_block() @@ -252,10 +297,15 @@ def try_load_model_vars(dump_dir, dump_prog_fn, is_text_dump_program, need_to_remove_op_index.append(i) for index in need_to_remove_op_index[::-1]: global_block._remove_op(index) - if fetch_config.fetch_vars_names is not None and fetch_targets_names != fetch_config.fetch_vars_names: + if ( + fetch_config.fetch_vars_names is not None + and fetch_targets_names != fetch_config.fetch_vars_names + ): logger.warning( - "fetch vars in program and config are diff: fetch in program: {}. fetch in config {}." - .format(fetch_targets_names, fetch_config.fetch_vars_names)) + "fetch vars in program and config are diff: fetch in program: {}. fetch in config {}.".format( + fetch_targets_names, fetch_config.fetch_vars_names + ) + ) fetch_list = [ inference_program.global_block().var(i) for i in fetch_config.fetch_vars_names @@ -275,70 +325,101 @@ def try_load_model_vars(dump_dir, dump_prog_fn, is_text_dump_program, # try dump fetch_targets feed_tensors = [] - assert len(feed_config.feeded_vars_names) == len( - feed_config.feeded_vars_dims) == len(feed_config.feeded_vars_types) + assert ( + len(feed_config.feeded_vars_names) + == len(feed_config.feeded_vars_dims) + == len(feed_config.feeded_vars_types) + ) # check program vars and feed tensor shape in config for i in range(len(feed_config.feeded_vars_names)): var = inference_program.global_block().var( - feed_config.feeded_vars_names[i]) + feed_config.feeded_vars_names[i] + ) if not isinstance(feed_config.feeded_vars_dims[i], (list, tuple)): - tensor_shape = (feed_config.feeded_vars_dims[i], ) + tensor_shape = (feed_config.feeded_vars_dims[i],) else: tensor_shape = tuple(feed_config.feeded_vars_dims[i]) feed_config.feeded_vars_dims[i] = tensor_shape var_shape = var.shape[1:] if tensor_shape != var_shape: raise RuntimeError( - "feed variable '{}' shape not match. infer program shape: {}. feed tensor shape: {}" - .format(feed_config.feeded_vars_names[i], var_shape, - tensor_shape)) + "feed variable '{}' shape not match. infer program shape: {}. feed tensor shape: {}".format( + feed_config.feeded_vars_names[i], + var_shape, + tensor_shape, + ) + ) if not feed_config.feeded_vars_filelist: logger.info("generate random feed vars.") for i in range(len(feed_config.feeded_vars_names)): var = inference_program.global_block().var( - feed_config.feeded_vars_names[i]) + feed_config.feeded_vars_names[i] + ) # create fake feed tensor. if lod_level > 1, should create_lod_tensor() if var.lod_level == 0: feed_tensors.append( - np.array(np.random.random( - tuple([batch_size] + - list(feed_config.feeded_vars_dims[i]))), - dtype=feed_config.feeded_vars_types[i])) + np.array( + np.random.random( + tuple( + [batch_size] + + list(feed_config.feeded_vars_dims[i]) + ) + ), + dtype=feed_config.feeded_vars_types[i], + ) + ) elif var.lod_level == 1: - t = np.array(np.random.random( - tuple([batch_size] + - list(feed_config.feeded_vars_dims[i]))), - dtype=feed_config.feeded_vars_types[i]) + t = np.array( + np.random.random( + tuple( + [batch_size] + + list(feed_config.feeded_vars_dims[i]) + ) + ), + dtype=feed_config.feeded_vars_types[i], + ) feed_tensors.append( - fluid.create_lod_tensor(t, [[1] * batch_size], place)) + fluid.create_lod_tensor(t, [[1] * batch_size], place) + ) else: raise RuntimeError( "vars with lod_level >= 2 is not supported now in this infer program check tool." ) - results = exe.run(inference_program, - feed={ - name: feed_tensors[i] - for i, name in enumerate(feed_name_list) - }, - fetch_list=fetch_list, - return_numpy=return_numpy) + results = exe.run( + inference_program, + feed={ + name: feed_tensors[i] + for i, name in enumerate(feed_name_list) + }, + fetch_list=fetch_list, + return_numpy=return_numpy, + ) else: - logger.info("load feed vars from files: {}.".format( - feed_config.feeded_vars_filelist)) + logger.info( + "load feed vars from files: {}.".format( + feed_config.feeded_vars_filelist + ) + ) feed_vars = [ inference_program.global_block().var( - feed_config.feeded_vars_names[i]) + feed_config.feeded_vars_names[i] + ) for i in range(len(feed_config.feeded_vars_names)) ] feeder = fluid.DataFeeder(feed_list=feed_vars, place=place) - batch_feed = feed_gen(batch_size, feed_config.feeded_vars_dims, - feed_config.feeded_vars_filelist) + batch_feed = feed_gen( + batch_size, + feed_config.feeded_vars_dims, + feed_config.feeded_vars_filelist, + ) slots = [batch_feed] - results = exe.run(inference_program, - feed=feeder.feed(slots), - fetch_list=fetch_list, - return_numpy=return_numpy) + results = exe.run( + inference_program, + feed=feeder.feed(slots), + fetch_list=fetch_list, + return_numpy=return_numpy, + ) for i, v in enumerate(fetch_list): logger.info("fetch_targets name: %s" % v.name) logger.info("fetch_targets: {}".format(results[i])) @@ -350,31 +431,46 @@ def check_not_expected_ops(prog): for op in prog.global_block().ops: if op.type in not_expected_op_types and op.type not in op_types_set: logger.warning( - "find op type '{}' in program, please check if your program is pruned correctly !" - .format(op.type)) + "find op type '{}' in program, please check if your program is pruned correctly !".format( + op.type + ) + ) op_types_set.add(op.type) -def check_saved_vars_try_dump(dump_dir, - dump_prog_fn, - is_text_dump_program, - feed_config, - fetch_config, - batch_size=1, - save_filename=None): - dump_prog = load_program(os.path.join(dump_dir, dump_prog_fn), - is_text_dump_program) +def check_saved_vars_try_dump( + dump_dir, + dump_prog_fn, + is_text_dump_program, + feed_config, + fetch_config, + batch_size=1, + save_filename=None, +): + dump_prog = load_program( + os.path.join(dump_dir, dump_prog_fn), is_text_dump_program + ) saved_params = [ v for v in dump_prog.list_vars() if fluid.io.is_persistable(v) ] - logger.info("persistable vars in dump program: {}".format( - [v.name for v in saved_params])) + logger.info( + "persistable vars in dump program: {}".format( + [v.name for v in saved_params] + ) + ) check_not_expected_ops(dump_prog) - return try_load_model_vars(dump_dir, dump_prog_fn, is_text_dump_program, - batch_size, feed_config, fetch_config, - save_filename, saved_params) + return try_load_model_vars( + dump_dir, + dump_prog_fn, + is_text_dump_program, + batch_size, + feed_config, + fetch_config, + save_filename, + saved_params, + ) def parse_program(program, output_dir): @@ -383,13 +479,16 @@ def parse_program(program, output_dir): persistable_vars = [ v for v in program.list_vars() if fluid.io.is_persistable(v) ] - output["persistable_vars"] = [{ - 'name': str(v.name), - 'shape': str(v.shape), - 'lod_level': int(v.lod_level), - 'dtype': str(v.dtype), - 'type': str(v.type) - } for v in persistable_vars] + output["persistable_vars"] = [ + { + 'name': str(v.name), + 'shape': str(v.shape), + 'lod_level': int(v.lod_level), + 'dtype': str(v.dtype), + 'type': str(v.type), + } + for v in persistable_vars + ] with open(os.path.join(output_dir, persistable_vars_out_fn), 'w') as f: f.write("persistable vars:\n") for var in output["persistable_vars"]: @@ -398,15 +497,17 @@ def parse_program(program, output_dir): # all vars all_vars = [v for v in program.list_vars()] - output["all_vars"] = [{ - 'name': str(v.name), - 'shape': str(v.shape), - 'lod_level': int(v.lod_level), - 'dtype': str(v.dtype) - } if v.type not in feed_fetch_type_list else { - 'name': str(v.name), - 'type': str(v.type) - } for v in all_vars] + output["all_vars"] = [ + { + 'name': str(v.name), + 'shape': str(v.shape), + 'lod_level': int(v.lod_level), + 'dtype': str(v.dtype), + } + if v.type not in feed_fetch_type_list + else {'name': str(v.name), 'type': str(v.type)} + for v in all_vars + ] with open(os.path.join(output_dir, all_vars_out_fn), 'w') as f: f.write("all vars:\n") for var in output["all_vars"]: @@ -415,11 +516,14 @@ def parse_program(program, output_dir): # ops ops = program.global_block().ops - output["ops"] = [{ - 'type': op.type, - 'input_arg_names': str(op.input_arg_names), - 'output_arg_names': str(op.output_arg_names) - } for op in ops] + output["ops"] = [ + { + 'type': op.type, + 'input_arg_names': str(op.input_arg_names), + 'output_arg_names': str(op.output_arg_names), + } + for op in ops + ] with open(os.path.join(output_dir, ops_out_fn), 'w') as f: f.write("ops:\n") for op in output["ops"]: diff --git a/python/paddle/fluid/inference/__init__.py b/python/paddle/fluid/inference/__init__.py index d6b8b102487923d84b90bafba8ce0bd52f09b6b3..51127de403f7fd6bf9ff4bf115afa9bfb9ed1829 100644 --- a/python/paddle/fluid/inference/__init__.py +++ b/python/paddle/fluid/inference/__init__.py @@ -12,7 +12,22 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .wrapper import Config, DataType, PlaceType, PrecisionType, Tensor, Predictor +from .wrapper import ( + Config, + DataType, + PlaceType, + PrecisionType, + Tensor, + Predictor, +) from .wrapper import convert_to_mixed_precision -from ..core import create_predictor, get_version, _get_phi_kernel_name, get_num_bytes_of_data_type, PredictorPool, get_trt_compile_version, get_trt_runtime_version +from ..core import ( + create_predictor, + get_version, + _get_phi_kernel_name, + get_num_bytes_of_data_type, + PredictorPool, + get_trt_compile_version, + get_trt_runtime_version, +) diff --git a/python/paddle/fluid/inference/wrapper.py b/python/paddle/fluid/inference/wrapper.py index 83811012e529458e91df34d8dcedfc7efc4b0942..ffad69335a427218493a74acfabb0b76209e6e42 100644 --- a/python/paddle/fluid/inference/wrapper.py +++ b/python/paddle/fluid/inference/wrapper.py @@ -33,8 +33,9 @@ def tensor_copy_from_cpu(self, data): ''' Support input type check based on tensor.copy_from_cpu. ''' - if isinstance(data, np.ndarray) or (isinstance(data, list) and len(data) > 0 - and isinstance(data[0], str)): + if isinstance(data, np.ndarray) or ( + isinstance(data, list) and len(data) > 0 and isinstance(data[0], str) + ): self.copy_from_cpu_bind(data) else: raise TypeError( @@ -50,17 +51,20 @@ def tensor_share_external_data(self, data): self.share_external_data_bind(data) else: raise TypeError( - "In share_external_data, we only support LoDTensor data type.") + "In share_external_data, we only support LoDTensor data type." + ) -def convert_to_mixed_precision(model_file: str, - params_file: str, - mixed_model_file: str, - mixed_params_file: str, - mixed_precision: PrecisionType, - backend: PlaceType, - keep_io_types: bool = True, - black_list: Set = set()): +def convert_to_mixed_precision( + model_file: str, + params_file: str, + mixed_model_file: str, + mixed_params_file: str, + mixed_precision: PrecisionType, + backend: PlaceType, + keep_io_types: bool = True, + black_list: Set = set(), +): ''' Convert a fp32 model to mixed precision model. @@ -80,9 +84,16 @@ def convert_to_mixed_precision(model_file: str, os.makedirs(mixed_model_dirname) if not os.path.exists(mixed_params_dirname): os.makedirs(mixed_params_dirname) - convert_to_mixed_precision_bind(model_file, params_file, mixed_model_file, - mixed_params_file, mixed_precision, backend, - keep_io_types, black_list) + convert_to_mixed_precision_bind( + model_file, + params_file, + mixed_model_file, + mixed_params_file, + mixed_precision, + backend, + keep_io_types, + black_list, + ) Tensor.copy_from_cpu = tensor_copy_from_cpu diff --git a/python/paddle/fluid/initializer.py b/python/paddle/fluid/initializer.py index 540ea40833d8d042b358d648b7cd80bdcdae2d8b..3e6c9ef3674ac2d6f9445b77bad810d6d9b42fff 100644 --- a/python/paddle/fluid/initializer.py +++ b/python/paddle/fluid/initializer.py @@ -16,7 +16,13 @@ import math import functools from . import framework from . import core -from .framework import _non_static_mode, in_dygraph_mode, _in_legacy_dygraph, default_main_program, _current_expected_place +from .framework import ( + _non_static_mode, + in_dygraph_mode, + _in_legacy_dygraph, + default_main_program, + _current_expected_place, +) from .lazy_init import lazy_init_helper from .framework import program_guard import numpy as np @@ -27,10 +33,22 @@ from paddle import _C_ops, _legacy_C_ops import paddle __all__ = [ - 'Constant', 'Uniform', 'Normal', 'TruncatedNormal', 'Xavier', 'Bilinear', - 'MSRA', 'ConstantInitializer', 'UniformInitializer', 'NormalInitializer', - 'TruncatedNormalInitializer', 'XavierInitializer', 'BilinearInitializer', - 'MSRAInitializer', 'NumpyArrayInitializer', 'set_global_initializer' + 'Constant', + 'Uniform', + 'Normal', + 'TruncatedNormal', + 'Xavier', + 'Bilinear', + 'MSRA', + 'ConstantInitializer', + 'UniformInitializer', + 'NormalInitializer', + 'TruncatedNormalInitializer', + 'XavierInitializer', + 'BilinearInitializer', + 'MSRAInitializer', + 'NumpyArrayInitializer', + 'set_global_initializer', ] _global_weight_initializer_ = None @@ -56,8 +74,7 @@ class Initializer(object): return self._lazy_init(param, block) def forward(self, param, block=None): - """Add corresponding initialization operations to the network - """ + """Add corresponding initialization operations to the network""" raise NotImplementedError() def _lazy_init(self, param, block=None): @@ -74,8 +91,9 @@ class Initializer(object): # Add hook function for initializing param in dygraph mode param.set_init_func(functools.partial(self.forward, param, block)) - param._init_op_creator = functools.partial(init_op_creator, - self.forward, param) + param._init_op_creator = functools.partial( + init_op_creator, self.forward, param + ) return param @@ -160,35 +178,47 @@ class ConstantInitializer(Initializer): """ block = self._check_block(block) - assert (isinstance(var, framework.Variable) - or isinstance(var, framework.EagerParamBase)) + assert isinstance(var, framework.Variable) or isinstance( + var, framework.EagerParamBase + ) assert isinstance(block, framework.Block) if in_dygraph_mode(): place = _current_expected_place() if self._force_cpu: place = core.CPUPlace() - _C_ops.full_(var, var.shape, str(float(self._value)), var.dtype, - place) + _C_ops.full_( + var, var.shape, str(float(self._value)), var.dtype, place + ) return None elif _in_legacy_dygraph(): - _legacy_C_ops.fill_constant(var, 'value', float(self._value), - 'force_cpu', self._force_cpu, 'dtype', - int(var.dtype), 'str_value', - str(float(self._value)), 'shape', - var.shape) + _legacy_C_ops.fill_constant( + var, + 'value', + float(self._value), + 'force_cpu', + self._force_cpu, + 'dtype', + int(var.dtype), + 'str_value', + str(float(self._value)), + 'shape', + var.shape, + ) return None else: - op = block.append_op(type="fill_constant", - outputs={"Out": var}, - attrs={ - "shape": var.shape, - "dtype": int(var.dtype), - "value": float(self._value), - 'str_value': str(float(self._value)), - 'force_cpu': self._force_cpu - }, - stop_gradient=True) + op = block.append_op( + type="fill_constant", + outputs={"Out": var}, + attrs={ + "shape": var.shape, + "dtype": int(var.dtype), + "value": float(self._value), + 'str_value': str(float(self._value)), + 'force_cpu': self._force_cpu, + }, + stop_gradient=True, + ) var.op = op return op @@ -214,16 +244,12 @@ class UniformInitializer(Initializer): import paddle.fluid as fluid x = fluid.data(name='x', shape=[None, 1], dtype='float32') fc = fluid.layers.fc(input=x, size=10, - param_attr=fluid.initializer.Uniform(low=-0.5, high=0.5)) + param_attr=fluid.initializer.Uniform(low=-0.5, high=0.5)) """ - def __init__(self, - low=-1.0, - high=1.0, - seed=0, - diag_num=0, - diag_step=0, - diag_val=1.0): + def __init__( + self, low=-1.0, high=1.0, seed=0, diag_num=0, diag_step=0, diag_val=1.0 + ): assert low is not None assert high is not None assert high >= low @@ -232,7 +258,7 @@ class UniformInitializer(Initializer): assert diag_step is not None assert diag_val is not None if diag_num > 0 or diag_step > 0: - assert (diag_num > 0 and diag_step > 0) + assert diag_num > 0 and diag_step > 0 super(UniformInitializer, self).__init__() self._low = low self._high = high @@ -255,9 +281,12 @@ class UniformInitializer(Initializer): block = self._check_block(block) assert isinstance(block, framework.Block) - check_variable_and_dtype(var, "Out", - ["uint16", "float16", "float32", "float64"], - "uniform_random") + check_variable_and_dtype( + var, + "Out", + ["uint16", "float16", "float32", "float64"], + "uniform_random", + ) if self._seed == 0: self._seed = block.program.random_seed @@ -265,62 +294,88 @@ class UniformInitializer(Initializer): # to be compatible of fp16 initializers if var.dtype == VarDesc.VarType.FP16: out_dtype = VarDesc.VarType.FP32 - out_var = block.create_var(name=unique_name.generate(".".join( - ['uniform_random', var.name, 'tmp'])), - shape=var.shape, - dtype=out_dtype, - type=VarDesc.VarType.LOD_TENSOR, - persistable=False) + out_var = block.create_var( + name=unique_name.generate( + ".".join(['uniform_random', var.name, 'tmp']) + ), + shape=var.shape, + dtype=out_dtype, + type=VarDesc.VarType.LOD_TENSOR, + persistable=False, + ) else: out_dtype = var.dtype out_var = var if framework._non_static_mode(): if in_dygraph_mode(): - out_var = _C_ops.uniform_random(var.shape, out_dtype, self._low, - self._high, self._seed, - _current_expected_place()) + out_var = _C_ops.uniform_random( + var.shape, + out_dtype, + self._low, + self._high, + self._seed, + _current_expected_place(), + ) elif _in_legacy_dygraph(): out_var = _legacy_C_ops.uniform_random( - 'shape', var.shape, 'min', self._low, 'max', self._high, - 'seed', self._seed, 'dtype', out_dtype, 'diag_num', - self._diag_num, 'diag_step', self._diag_step, 'diag_val', - self._diag_val) + 'shape', + var.shape, + 'min', + self._low, + 'max', + self._high, + 'seed', + self._seed, + 'dtype', + out_dtype, + 'diag_num', + self._diag_num, + 'diag_step', + self._diag_step, + 'diag_val', + self._diag_val, + ) if var.dtype == VarDesc.VarType.FP16: if in_dygraph_mode(): var_tmp = _C_ops.cast(out_var, var.dtype) elif _in_legacy_dygraph(): - var_tmp = _legacy_C_ops.cast(out_var, 'in_dtype', - out_var.dtype, 'out_dtype', - var.dtype) + var_tmp = _legacy_C_ops.cast( + out_var, + 'in_dtype', + out_var.dtype, + 'out_dtype', + var.dtype, + ) var_tmp._share_underline_tensor_to(var) else: out_var._share_underline_tensor_to(var) return None else: - op = block.append_op(type="uniform_random", - inputs={}, - outputs={"Out": out_var}, - attrs={ - "shape": var.shape, - "dtype": out_dtype, - "min": self._low, - "max": self._high, - "seed": self._seed, - "diag_num": self._diag_num, - "diag_step": self._diag_step, - "diag_val": self._diag_val - }, - stop_gradient=True) + op = block.append_op( + type="uniform_random", + inputs={}, + outputs={"Out": out_var}, + attrs={ + "shape": var.shape, + "dtype": out_dtype, + "min": self._low, + "max": self._high, + "seed": self._seed, + "diag_num": self._diag_num, + "diag_step": self._diag_step, + "diag_val": self._diag_val, + }, + stop_gradient=True, + ) if var.dtype == VarDesc.VarType.FP16: - block.append_op(type="cast", - inputs={"X": out_var}, - outputs={"Out": var}, - attrs={ - "in_dtype": out_var.dtype, - "out_dtype": var.dtype - }) + block.append_op( + type="cast", + inputs={"X": out_var}, + outputs={"Out": var}, + attrs={"in_dtype": out_var.dtype, "out_dtype": var.dtype}, + ) var.op = op return op @@ -368,40 +423,61 @@ class NormalInitializer(Initializer): assert isinstance(block, framework.Block) - check_variable_and_dtype(var, "Out", - ["uint16", "float16", "float32", "float64"], - "guassian_random") + check_variable_and_dtype( + var, + "Out", + ["uint16", "float16", "float32", "float64"], + "guassian_random", + ) if self._seed == 0: self._seed = block.program.random_seed if in_dygraph_mode(): place = _current_expected_place() - out_var = _C_ops.gaussian_random(var.shape, self._mean, - self._std_dev, self._seed, - var.dtype, place) + out_var = _C_ops.gaussian_random( + var.shape, + self._mean, + self._std_dev, + self._seed, + var.dtype, + place, + ) out_var._share_underline_tensor_to(var) return None if _in_legacy_dygraph(): out_var = _legacy_C_ops.gaussian_random( - 'shape', var.shape, 'dtype', var.dtype, 'mean', self._mean, - 'std', self._std_dev, 'seed', self._seed, 'use_mkldnn', False) + 'shape', + var.shape, + 'dtype', + var.dtype, + 'mean', + self._mean, + 'std', + self._std_dev, + 'seed', + self._seed, + 'use_mkldnn', + False, + ) out_var._share_underline_tensor_to(var) return None else: - op = block.append_op(type="gaussian_random", - outputs={"Out": var}, - attrs={ - "shape": var.shape, - "dtype": var.dtype, - "mean": self._mean, - "std": self._std_dev, - "seed": self._seed, - "use_mkldnn": False - }, - stop_gradient=True) + op = block.append_op( + type="gaussian_random", + outputs={"Out": var}, + attrs={ + "shape": var.shape, + "dtype": var.dtype, + "mean": self._mean, + "std": self._std_dev, + "seed": self._seed, + "use_mkldnn": False, + }, + stop_gradient=True, + ) var.op = op return op @@ -454,20 +530,28 @@ class TruncatedNormalInitializer(Initializer): # to be compatible of fp16 initalizers if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]: out_dtype = VarDesc.VarType.FP32 - out_var = block.create_var(name=unique_name.generate(".".join( - ['truncated_gaussian_random', var.name, 'tmp'])), - shape=var.shape, - dtype=out_dtype, - type=VarDesc.VarType.LOD_TENSOR, - persistable=False) + out_var = block.create_var( + name=unique_name.generate( + ".".join(['truncated_gaussian_random', var.name, 'tmp']) + ), + shape=var.shape, + dtype=out_dtype, + type=VarDesc.VarType.LOD_TENSOR, + persistable=False, + ) else: out_dtype = var.dtype out_var = var if in_dygraph_mode(): out_var = _C_ops.truncated_gaussian_random( - var.shape, self._mean, self._std_dev, self._seed, out_dtype, - _current_expected_place()) + var.shape, + self._mean, + self._std_dev, + self._seed, + out_dtype, + _current_expected_place(), + ) if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]: var_tmp = _C_ops.cast(out_var, var.dtype) var_tmp._share_underline_tensor_to(var) @@ -477,35 +561,46 @@ class TruncatedNormalInitializer(Initializer): if _in_legacy_dygraph(): out_var = _legacy_C_ops.truncated_gaussian_random( - 'shape', var.shape, 'dtype', out_dtype, 'mean', self._mean, - 'std', self._std_dev, 'seed', self._seed) + 'shape', + var.shape, + 'dtype', + out_dtype, + 'mean', + self._mean, + 'std', + self._std_dev, + 'seed', + self._seed, + ) if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]: - var_tmp = _legacy_C_ops.cast(out_var, 'in_dtype', out_var.dtype, - 'out_dtype', var.dtype) + var_tmp = _legacy_C_ops.cast( + out_var, 'in_dtype', out_var.dtype, 'out_dtype', var.dtype + ) var_tmp._share_underline_tensor_to(var) else: out_var._share_underline_tensor_to(var) return None else: - op = block.append_op(type="truncated_gaussian_random", - outputs={"Out": out_var}, - attrs={ - "shape": var.shape, - "dtype": out_dtype, - "mean": self._mean, - "std": self._std_dev, - "seed": self._seed - }, - stop_gradient=True) + op = block.append_op( + type="truncated_gaussian_random", + outputs={"Out": out_var}, + attrs={ + "shape": var.shape, + "dtype": out_dtype, + "mean": self._mean, + "std": self._std_dev, + "seed": self._seed, + }, + stop_gradient=True, + ) if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]: - block.append_op(type="cast", - inputs={"X": out_var}, - outputs={"Out": var}, - attrs={ - "in_dtype": out_var.dtype, - "out_dtype": var.dtype - }) + block.append_op( + type="cast", + inputs={"X": out_var}, + outputs={"Out": var}, + attrs={"in_dtype": out_var.dtype, "out_dtype": var.dtype}, + ) var.op = op return op @@ -578,9 +673,12 @@ class XavierInitializer(Initializer): block = self._check_block(block) assert isinstance(block, framework.Block) - check_variable_and_dtype(var, "Out", - ["uint16", "float16", "float32", "float64"], - "xavier_init") + check_variable_and_dtype( + var, + "Out", + ["uint16", "float16", "float32", "float64"], + "xavier_init", + ) f_in, f_out = self._compute_fans(var) @@ -593,14 +691,18 @@ class XavierInitializer(Initializer): # to be compatible of fp16 initalizers if var.dtype == VarDesc.VarType.FP16 or ( - var.dtype == VarDesc.VarType.BF16 and not self._uniform): + var.dtype == VarDesc.VarType.BF16 and not self._uniform + ): out_dtype = VarDesc.VarType.FP32 - out_var = block.create_var(name=unique_name.generate(".".join( - ['xavier_init', var.name, 'tmp'])), - shape=var.shape, - dtype=out_dtype, - type=VarDesc.VarType.LOD_TENSOR, - persistable=False) + out_var = block.create_var( + name=unique_name.generate( + ".".join(['xavier_init', var.name, 'tmp']) + ), + shape=var.shape, + dtype=out_dtype, + type=VarDesc.VarType.LOD_TENSOR, + persistable=False, + ) else: out_dtype = var.dtype out_var = var @@ -609,34 +711,62 @@ class XavierInitializer(Initializer): if self._uniform: limit = math.sqrt(6.0 / float(fan_in + fan_out)) if in_dygraph_mode(): - out_var = _C_ops.uniform_random(out_var.shape, out_dtype, - -limit, limit, self._seed, - _current_expected_place()) + out_var = _C_ops.uniform_random( + out_var.shape, + out_dtype, + -limit, + limit, + self._seed, + _current_expected_place(), + ) elif _in_legacy_dygraph(): out_var = _legacy_C_ops.uniform_random( - 'shape', out_var.shape, 'min', -limit, 'max', limit, - 'seed', self._seed, 'dtype', out_dtype) + 'shape', + out_var.shape, + 'min', + -limit, + 'max', + limit, + 'seed', + self._seed, + 'dtype', + out_dtype, + ) else: std = math.sqrt(2.0 / float(fan_in + fan_out)) if in_dygraph_mode(): place = _current_expected_place() - out_var = _C_ops.gaussian_random(out_var.shape, 0.0, std, - self._seed, out_dtype, - place) + out_var = _C_ops.gaussian_random( + out_var.shape, 0.0, std, self._seed, out_dtype, place + ) else: out_var = _legacy_C_ops.gaussian_random( - 'shape', out_var.shape, 'dtype', out_dtype, 'mean', 0.0, - 'std', std, 'seed', self._seed) + 'shape', + out_var.shape, + 'dtype', + out_dtype, + 'mean', + 0.0, + 'std', + std, + 'seed', + self._seed, + ) if var.dtype == VarDesc.VarType.FP16 or ( - var.dtype == VarDesc.VarType.BF16 and not self._uniform): + var.dtype == VarDesc.VarType.BF16 and not self._uniform + ): if in_dygraph_mode(): var_tmp = _C_ops.cast(out_var, var.dtype) elif _in_legacy_dygraph(): - var_tmp = _legacy_C_ops.cast(out_var, 'in_dtype', - out_var.dtype, 'out_dtype', - var.dtype) + var_tmp = _legacy_C_ops.cast( + out_var, + 'in_dtype', + out_var.dtype, + 'out_dtype', + var.dtype, + ) var_tmp._share_underline_tensor_to(var) else: out_var._share_underline_tensor_to(var) @@ -644,39 +774,43 @@ class XavierInitializer(Initializer): else: if self._uniform: limit = math.sqrt(6.0 / float(fan_in + fan_out)) - op = block.append_op(type="uniform_random", - inputs={}, - outputs={"Out": out_var}, - attrs={ - "shape": out_var.shape, - "dtype": out_dtype, - "min": -limit, - "max": limit, - "seed": self._seed - }, - stop_gradient=True) + op = block.append_op( + type="uniform_random", + inputs={}, + outputs={"Out": out_var}, + attrs={ + "shape": out_var.shape, + "dtype": out_dtype, + "min": -limit, + "max": limit, + "seed": self._seed, + }, + stop_gradient=True, + ) else: std = math.sqrt(2.0 / float(fan_in + fan_out)) - op = block.append_op(type="gaussian_random", - outputs={"Out": out_var}, - attrs={ - "shape": out_var.shape, - "dtype": out_var.dtype, - "mean": 0.0, - "std": std, - "seed": self._seed - }, - stop_gradient=True) + op = block.append_op( + type="gaussian_random", + outputs={"Out": out_var}, + attrs={ + "shape": out_var.shape, + "dtype": out_var.dtype, + "mean": 0.0, + "std": std, + "seed": self._seed, + }, + stop_gradient=True, + ) if var.dtype == VarDesc.VarType.FP16 or ( - var.dtype == VarDesc.VarType.BF16 and not self._uniform): - block.append_op(type="cast", - inputs={"X": out_var}, - outputs={"Out": var}, - attrs={ - "in_dtype": out_var.dtype, - "out_dtype": var.dtype - }) + var.dtype == VarDesc.VarType.BF16 and not self._uniform + ): + block.append_op( + type="cast", + inputs={"X": out_var}, + outputs={"Out": var}, + attrs={"in_dtype": out_var.dtype, "out_dtype": var.dtype}, + ) var.op = op return op @@ -725,14 +859,15 @@ class MSRAInitializer(Initializer): """ - def __init__(self, - uniform=True, - fan_in=None, - seed=0, - negative_slope=0, - nonlinearity='relu'): - """Constructor for MSRAInitializer - """ + def __init__( + self, + uniform=True, + fan_in=None, + seed=0, + negative_slope=0, + nonlinearity='relu', + ): + """Constructor for MSRAInitializer""" assert uniform is not None assert seed is not None super(MSRAInitializer, self).__init__() @@ -767,14 +902,18 @@ class MSRAInitializer(Initializer): # to be compatible of fp16 initalizers if var.dtype == VarDesc.VarType.FP16 or ( - var.dtype == VarDesc.VarType.BF16 and not self._uniform): + var.dtype == VarDesc.VarType.BF16 and not self._uniform + ): out_dtype = VarDesc.VarType.FP32 - out_var = block.create_var(name=unique_name.generate(".".join( - ['masra_init', var.name, 'tmp'])), - shape=var.shape, - dtype=out_dtype, - type=VarDesc.VarType.LOD_TENSOR, - persistable=False) + out_var = block.create_var( + name=unique_name.generate( + ".".join(['masra_init', var.name, 'tmp']) + ), + shape=var.shape, + dtype=out_dtype, + type=VarDesc.VarType.LOD_TENSOR, + persistable=False, + ) else: out_dtype = var.dtype out_var = var @@ -784,34 +923,62 @@ class MSRAInitializer(Initializer): gain = calculate_gain(self._nonlinearity, self._negative_slope) limit = gain * math.sqrt(3.0 / float(fan_in)) if in_dygraph_mode(): - out_var = _C_ops.uniform_random(var.shape, out_dtype, - -limit, limit, self._seed, - _current_expected_place()) + out_var = _C_ops.uniform_random( + var.shape, + out_dtype, + -limit, + limit, + self._seed, + _current_expected_place(), + ) else: out_var = _legacy_C_ops.uniform_random( - 'shape', out_var.shape, 'min', -limit, 'max', limit, - 'seed', self._seed, 'dtype', int(out_dtype)) + 'shape', + out_var.shape, + 'min', + -limit, + 'max', + limit, + 'seed', + self._seed, + 'dtype', + int(out_dtype), + ) else: gain = calculate_gain(self._nonlinearity, self._negative_slope) std = gain / math.sqrt(float(fan_in)) if in_dygraph_mode(): place = _current_expected_place() - out_var = _C_ops.gaussian_random(out_var.shape, 0.0, std, - self._seed, out_dtype, - place) + out_var = _C_ops.gaussian_random( + out_var.shape, 0.0, std, self._seed, out_dtype, place + ) else: out_var = _legacy_C_ops.gaussian_random( - 'shape', out_var.shape, 'dtype', int(out_dtype), 'mean', - 0.0, 'std', std, 'seed', self._seed) + 'shape', + out_var.shape, + 'dtype', + int(out_dtype), + 'mean', + 0.0, + 'std', + std, + 'seed', + self._seed, + ) if var.dtype == VarDesc.VarType.FP16 or ( - var.dtype == VarDesc.VarType.BF16 and not self._uniform): + var.dtype == VarDesc.VarType.BF16 and not self._uniform + ): if in_dygraph_mode(): var_tmp = _C_ops.cast(out_var, var.dtype) elif _in_legacy_dygraph(): - var_tmp = _legacy_C_ops.cast(out_var, 'in_dtype', - out_var.dtype, 'out_dtype', - var.dtype) + var_tmp = _legacy_C_ops.cast( + out_var, + 'in_dtype', + out_var.dtype, + 'out_dtype', + var.dtype, + ) var_tmp._share_underline_tensor_to(var) else: out_var._share_underline_tensor_to(var) @@ -820,41 +987,45 @@ class MSRAInitializer(Initializer): if self._uniform: gain = calculate_gain(self._nonlinearity, self._negative_slope) limit = gain * math.sqrt(3.0 / float(fan_in)) - op = block.append_op(type="uniform_random", - inputs={}, - outputs={"Out": out_var}, - attrs={ - "shape": out_var.shape, - "dtype": int(out_dtype), - "min": -limit, - "max": limit, - "seed": self._seed - }, - stop_gradient=True) + op = block.append_op( + type="uniform_random", + inputs={}, + outputs={"Out": out_var}, + attrs={ + "shape": out_var.shape, + "dtype": int(out_dtype), + "min": -limit, + "max": limit, + "seed": self._seed, + }, + stop_gradient=True, + ) else: gain = calculate_gain(self._nonlinearity, self._negative_slope) std = gain / math.sqrt(float(fan_in)) - op = block.append_op(type="gaussian_random", - outputs={"Out": out_var}, - attrs={ - "shape": out_var.shape, - "dtype": int(out_dtype), - "mean": 0.0, - "std": std, - "seed": self._seed - }, - stop_gradient=True) + op = block.append_op( + type="gaussian_random", + outputs={"Out": out_var}, + attrs={ + "shape": out_var.shape, + "dtype": int(out_dtype), + "mean": 0.0, + "std": std, + "seed": self._seed, + }, + stop_gradient=True, + ) if var.dtype == VarDesc.VarType.FP16 or ( - var.dtype == VarDesc.VarType.BF16 and not self._uniform): - block.append_op(type="cast", - inputs={"X": out_var}, - outputs={"Out": var}, - attrs={ - "in_dtype": out_var.dtype, - "out_dtype": var.dtype - }) + var.dtype == VarDesc.VarType.BF16 and not self._uniform + ): + block.append_op( + type="cast", + inputs={"X": out_var}, + outputs={"Out": var}, + attrs={"in_dtype": out_var.dtype, "out_dtype": var.dtype}, + ) var.op = op return op @@ -905,8 +1076,7 @@ class BilinearInitializer(Initializer): """ def __init__(self): - """Constructor for BilinearInitializer. - """ + """Constructor for BilinearInitializer.""" super(BilinearInitializer, self).__init__() def forward(self, var, block=None): @@ -937,9 +1107,9 @@ class BilinearInitializer(Initializer): weight = np.zeros(np.prod(var.shape), dtype='float32') size = shape[3] # factor - f = np.ceil(size / 2.) + f = np.ceil(size / 2.0) # center - c = (2 * f - 1 - f % 2) / (2. * f) + c = (2 * f - 1 - f % 2) / (2.0 * f) for i in range(np.prod(shape)): x = i % size y = (i / size) % size @@ -948,15 +1118,20 @@ class BilinearInitializer(Initializer): # to be compatible of fp16 initalizers if var.dtype in [ - VarDesc.VarType.FP16, VarDesc.VarType.BF16, VarDesc.VarType.FP64 + VarDesc.VarType.FP16, + VarDesc.VarType.BF16, + VarDesc.VarType.FP64, ]: out_dtype = VarDesc.VarType.FP32 - out_var = block.create_var(name=unique_name.generate(".".join( - ['bilinear_init', var.name, 'tmp'])), - shape=var.shape, - dtype=out_dtype, - type=VarDesc.VarType.LOD_TENSOR, - persistable=False) + out_var = block.create_var( + name=unique_name.generate( + ".".join(['bilinear_init', var.name, 'tmp']) + ), + shape=var.shape, + dtype=out_dtype, + type=VarDesc.VarType.LOD_TENSOR, + persistable=False, + ) else: out_dtype = var.dtype out_var = var @@ -972,46 +1147,64 @@ class BilinearInitializer(Initializer): if framework._non_static_mode(): if in_dygraph_mode(): - _C_ops.assign_value_(out_var, list(shape), out_dtype, values, - _current_expected_place()) + _C_ops.assign_value_( + out_var, + list(shape), + out_dtype, + values, + _current_expected_place(), + ) elif _in_legacy_dygraph(): - _legacy_C_ops.assign_value(out_var, 'shape', list(shape), - 'dtype', out_dtype, value_name, - values) + _legacy_C_ops.assign_value( + out_var, + 'shape', + list(shape), + 'dtype', + out_dtype, + value_name, + values, + ) if var.dtype in [ - VarDesc.VarType.FP16, VarDesc.VarType.BF16, - VarDesc.VarType.FP64 + VarDesc.VarType.FP16, + VarDesc.VarType.BF16, + VarDesc.VarType.FP64, ]: if in_dygraph_mode(): var_tmp = _C_ops.cast(out_var, var.dtype) elif _in_legacy_dygraph(): - var_tmp = _legacy_C_ops.cast(out_var, 'in_dtype', - out_var.dtype, 'out_dtype', - var.dtype) + var_tmp = _legacy_C_ops.cast( + out_var, + 'in_dtype', + out_var.dtype, + 'out_dtype', + var.dtype, + ) var_tmp._share_underline_tensor_to(var) else: out_var._share_underline_tensor_to(var) return None else: - op = block.append_op(type='assign_value', - outputs={'Out': [out_var]}, - attrs={ - 'dtype': out_dtype, - 'shape': list(shape), - value_name: values - }) + op = block.append_op( + type='assign_value', + outputs={'Out': [out_var]}, + attrs={ + 'dtype': out_dtype, + 'shape': list(shape), + value_name: values, + }, + ) if var.dtype in [ - VarDesc.VarType.FP16, VarDesc.VarType.BF16, - VarDesc.VarType.FP64 + VarDesc.VarType.FP16, + VarDesc.VarType.BF16, + VarDesc.VarType.FP64, ]: - block.append_op(type="cast", - inputs={"X": out_var}, - outputs={"Out": var}, - attrs={ - "in_dtype": out_var.dtype, - "out_dtype": var.dtype - }) + block.append_op( + type="cast", + inputs={"X": out_var}, + outputs={"Out": var}, + attrs={"in_dtype": out_var.dtype, "out_dtype": var.dtype}, + ) var.op = op return op @@ -1039,6 +1232,7 @@ class NumpyArrayInitializer(Initializer): def __init__(self, value): import numpy + assert isinstance(value, numpy.ndarray) super(NumpyArrayInitializer, self).__init__() self._value = value @@ -1063,12 +1257,15 @@ class NumpyArrayInitializer(Initializer): if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]: out_dtype = VarDesc.VarType.FP32 np_value = self._value.astype("float32") - out_var = block.create_var(name=unique_name.generate(".".join( - ['numpy_array_init', var.name, 'tmp'])), - shape=var.shape, - dtype=out_dtype, - type=VarDesc.VarType.LOD_TENSOR, - persistable=False) + out_var = block.create_var( + name=unique_name.generate( + ".".join(['numpy_array_init', var.name, 'tmp']) + ), + shape=var.shape, + dtype=out_dtype, + type=VarDesc.VarType.LOD_TENSOR, + persistable=False, + ) else: out_var = var out_dtype = var.dtype @@ -1083,47 +1280,64 @@ class NumpyArrayInitializer(Initializer): else: raise ValueError("Unsupported dtype %s", self._value.dtype) if self._value.size > 1024 * 1024 * 1024: - raise ValueError("The size of input is too big. Please consider " - "saving it to file and 'load_op' to load it") + raise ValueError( + "The size of input is too big. Please consider " + "saving it to file and 'load_op' to load it" + ) if framework._non_static_mode(): if in_dygraph_mode(): - _C_ops.assign_value_(out_var, - list(self._value.shape), out_dtype, values, - _current_expected_place()) + _C_ops.assign_value_( + out_var, + list(self._value.shape), + out_dtype, + values, + _current_expected_place(), + ) elif _in_legacy_dygraph(): - _legacy_C_ops.assign_value(out_var, 'shape', - list(self._value.shape), 'dtype', - out_dtype, value_name, values) + _legacy_C_ops.assign_value( + out_var, + 'shape', + list(self._value.shape), + 'dtype', + out_dtype, + value_name, + values, + ) if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]: if in_dygraph_mode(): var_tmp = _C_ops.cast(out_var, var.dtype) elif _in_legacy_dygraph(): - var_tmp = _legacy_C_ops.cast(out_var, 'in_dtype', - out_var.dtype, 'out_dtype', - var.dtype) + var_tmp = _legacy_C_ops.cast( + out_var, + 'in_dtype', + out_var.dtype, + 'out_dtype', + var.dtype, + ) var_tmp._share_underline_tensor_to(var) else: out_var._share_underline_tensor_to(var) return None else: - op = block.append_op(type='assign_value', - outputs={'Out': out_var}, - attrs={ - 'dtype': out_dtype, - 'shape': list(self._value.shape), - value_name: values - }, - stop_gradient=True) + op = block.append_op( + type='assign_value', + outputs={'Out': out_var}, + attrs={ + 'dtype': out_dtype, + 'shape': list(self._value.shape), + value_name: values, + }, + stop_gradient=True, + ) if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]: - block.append_op(type="cast", - inputs={"X": out_var}, - outputs={"Out": var}, - attrs={ - "in_dtype": out_var.dtype, - "out_dtype": var.dtype - }) + block.append_op( + type="cast", + inputs={"X": out_var}, + outputs={"Out": var}, + attrs={"in_dtype": out_var.dtype, "out_dtype": var.dtype}, + ) var.op = op return op @@ -1179,13 +1393,21 @@ def set_global_initializer(weight_init, bias_init=None): nn.initializer.set_global_initializer(None) """ - check_type(weight_init, 'weight_init', (Initializer, type(None)), - 'set_global_initializer') + check_type( + weight_init, + 'weight_init', + (Initializer, type(None)), + 'set_global_initializer', + ) global _global_weight_initializer_ _global_weight_initializer_ = weight_init - check_type(bias_init, 'bias_init', (Initializer, type(None)), - 'set_global_initializer') + check_type( + bias_init, + 'bias_init', + (Initializer, type(None)), + 'set_global_initializer', + ) global _global_bias_initializer_ _global_bias_initializer_ = bias_init @@ -1244,14 +1466,16 @@ def calculate_gain(nonlinearity, param=None): 'tanh': 5.0 / 3, 'relu': math.sqrt(2.0), 'leaky_relu': math.sqrt(2.0 / (1 + param**2)), - 'selu': 3.0 / 4 + 'selu': 3.0 / 4, } if nonlinearity in recommended_gain.keys(): return recommended_gain[nonlinearity] else: raise ValueError( "nonlinearity function {} is not suppported now.".format( - nonlinearity)) + nonlinearity + ) + ) # We short the class name, since users will use the initializer with the package diff --git a/python/paddle/fluid/input.py b/python/paddle/fluid/input.py index bfab41f86a8e691a8eb00054558fd7cd07da9005..255a17d6483c48da1952e77063334bc8abc02beb 100644 --- a/python/paddle/fluid/input.py +++ b/python/paddle/fluid/input.py @@ -25,8 +25,8 @@ __all__ = ['one_hot', 'embedding'] def one_hot(input, depth, allow_out_of_range=False): """ :alias_main: paddle.nn.functional.one_hot - :alias: paddle.nn.functional.one_hot,paddle.nn.functional.common.one_hot - :old_api: paddle.fluid.one_hot + :alias: paddle.nn.functional.one_hot,paddle.nn.functional.common.one_hot + :old_api: paddle.fluid.one_hot The operator converts each id in the input to an one-hot vector with a depth length. The value in the vector dimension corresponding to the id @@ -122,23 +122,27 @@ def one_hot(input, depth, allow_out_of_range=False): depth.stop_gradient = True inputs = {'X': input, 'depth_tensor': depth} attrs = {'allow_out_of_range': allow_out_of_range} - helper.append_op(type="one_hot_v2", - inputs=inputs, - attrs=attrs, - outputs={'Out': one_hot_out}, - stop_gradient=True) + helper.append_op( + type="one_hot_v2", + inputs=inputs, + attrs=attrs, + outputs={'Out': one_hot_out}, + stop_gradient=True, + ) return one_hot_out @static_only @deprecated(since='2.0.0', update_to='paddle.nn.functional.embedding') -def embedding(input, - size, - is_sparse=False, - is_distributed=False, - padding_idx=None, - param_attr=None, - dtype='float32'): +def embedding( + input, + size, + is_sparse=False, + is_distributed=False, + padding_idx=None, + param_attr=None, + dtype='float32', +): r""" :api_attr: Static Graph @@ -310,28 +314,35 @@ def embedding(input, helper = LayerHelper('embedding', **locals()) check_variable_and_dtype(input, 'input', ['int64'], 'fluid.embedding') - check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64', 'uint16'], - 'fluid.embedding') + check_dtype( + dtype, + 'dtype', + ['float16', 'float32', 'float64', 'uint16'], + 'fluid.embedding', + ) remote_prefetch = is_sparse and (not is_distributed) if remote_prefetch: assert is_sparse is True and is_distributed is False - w = helper.create_parameter(attr=helper.param_attr, - shape=size, - dtype=dtype, - is_bias=False) + w = helper.create_parameter( + attr=helper.param_attr, shape=size, dtype=dtype, is_bias=False + ) tmp = helper.create_variable_for_type_inference(dtype) - padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else ( - size[0] + padding_idx) - helper.append_op(type='lookup_table_v2', - inputs={ - 'Ids': input, - 'W': w - }, - outputs={'Out': tmp}, - attrs={ - 'is_sparse': is_sparse, - 'is_distributed': is_distributed, - 'remote_prefetch': remote_prefetch, - 'padding_idx': padding_idx - }) + padding_idx = ( + -1 + if padding_idx is None + else padding_idx + if padding_idx >= 0 + else (size[0] + padding_idx) + ) + helper.append_op( + type='lookup_table_v2', + inputs={'Ids': input, 'W': w}, + outputs={'Out': tmp}, + attrs={ + 'is_sparse': is_sparse, + 'is_distributed': is_distributed, + 'remote_prefetch': remote_prefetch, + 'padding_idx': padding_idx, + }, + ) return tmp diff --git a/python/paddle/fluid/install_check.py b/python/paddle/fluid/install_check.py index 299275be5e2c32d5ac162d96ef624b9e00e02ecf..74bc8df61bbcb81679efbab7d7e0e558dca6a697 100644 --- a/python/paddle/fluid/install_check.py +++ b/python/paddle/fluid/install_check.py @@ -14,7 +14,13 @@ import os import paddle -from .framework import Program, program_guard, unique_name, cuda_places, cpu_places +from .framework import ( + Program, + program_guard, + unique_name, + cuda_places, + cpu_places, +) from .param_attr import ParamAttr from .initializer import Constant from . import layers @@ -31,13 +37,11 @@ __all__ = ['run_check'] class SimpleLayer(Layer): - def __init__(self, input_size): super(SimpleLayer, self).__init__() self._linear1 = nn.Linear( - input_size, - 3, - param_attr=ParamAttr(initializer=Constant(value=0.1))) + input_size, 3, param_attr=ParamAttr(initializer=Constant(value=0.1)) + ) def forward(self, inputs): x = self._linear1(inputs) @@ -74,7 +78,8 @@ def run_check(): except Exception as e: logging.warning( "You are using GPU version Paddle Fluid, But Your CUDA Device is not set properly" - "\n Original Error is {}".format(e)) + "\n Original Error is {}".format(e) + ) return 0 device_list = cuda_places() else: @@ -100,22 +105,29 @@ def run_check(): simple_layer = SimpleLayer(input_size=2) out = simple_layer(inp) exe = executor.Executor( - core.CUDAPlace(0) if core.is_compiled_with_cuda() and - (core.get_cuda_device_count() > 0) else core.CPUPlace()) + core.CUDAPlace(0) + if core.is_compiled_with_cuda() + and (core.get_cuda_device_count() > 0) + else core.CPUPlace() + ) loss = paddle.mean(out) loss.persistable = True optimizer.SGD(learning_rate=0.01).minimize(loss) startup_prog.random_seed = 1 compiled_prog = compiler.CompiledProgram( - train_prog).with_data_parallel( - build_strategy=build_strategy, - loss_name=loss.name, - places=device_list) + train_prog + ).with_data_parallel( + build_strategy=build_strategy, + loss_name=loss.name, + places=device_list, + ) exe.run(startup_prog) - exe.run(compiled_prog, - feed={inp.name: np_inp_muti}, - fetch_list=[loss.name]) + exe.run( + compiled_prog, + feed={inp.name: np_inp_muti}, + fetch_list=[loss.name], + ) def test_simple_exe(): train_prog = Program() @@ -124,20 +136,26 @@ def run_check(): with executor.scope_guard(scope): with program_guard(train_prog, startup_prog): with unique_name.guard(): - inp0 = layers.data(name="inp", - shape=[2, 2], - append_batch_size=False) + inp0 = layers.data( + name="inp", shape=[2, 2], append_batch_size=False + ) simple_layer0 = SimpleLayer(input_size=2) out0 = simple_layer0(inp0) param_grads = backward.append_backward( out0, - parameter_list=[simple_layer0._linear1.weight.name])[0] + parameter_list=[simple_layer0._linear1.weight.name], + )[0] exe0 = executor.Executor( - core.CUDAPlace(0) if core.is_compiled_with_cuda() and - (core.get_cuda_device_count() > 0) else core.CPUPlace()) + core.CUDAPlace(0) + if core.is_compiled_with_cuda() + and (core.get_cuda_device_count() > 0) + else core.CPUPlace() + ) exe0.run(startup_prog) - exe0.run(feed={inp0.name: np_inp_single}, - fetch_list=[out0.name, param_grads[1].name]) + exe0.run( + feed={inp0.name: np_inp_single}, + fetch_list=[out0.name, param_grads[1].name], + ) test_simple_exe() @@ -160,6 +178,7 @@ def run_check(): print("\n Original Error is: {}".format(e)) print( "Your Paddle Fluid is installed successfully ONLY for SINGLE GPU or CPU! " - "\n Let's start deep Learning with Paddle Fluid now") + "\n Let's start deep Learning with Paddle Fluid now" + ) paddle.disable_static() diff --git a/python/paddle/fluid/io.py b/python/paddle/fluid/io.py index 1af1d0a606d8e799294c03305cbbb1206af3c4eb..c5e55a95186a2b9e58ad0d0ace9c23023284a05a 100644 --- a/python/paddle/fluid/io.py +++ b/python/paddle/fluid/io.py @@ -28,10 +28,28 @@ import paddle from paddle.fluid import layers from paddle.fluid.executor import Executor, global_scope from paddle.fluid.evaluator import Evaluator -from paddle.fluid.framework import Program, Parameter, default_main_program, default_startup_program, Variable, \ - program_guard, dygraph_not_support, static_only -from paddle.reader import cache, map_readers, buffered, compose, chain, shuffle, \ - ComposeNotAligned, firstn, xmap_readers, multiprocess_reader +from paddle.fluid.framework import ( + Program, + Parameter, + default_main_program, + default_startup_program, + Variable, + program_guard, + dygraph_not_support, + static_only, +) +from paddle.reader import ( + cache, + map_readers, + buffered, + compose, + chain, + shuffle, + ComposeNotAligned, + firstn, + xmap_readers, + multiprocess_reader, +) from .wrapped_decorator import signature_safe_contextmanager from paddle.fluid.compiler import CompiledProgram from paddle.fluid.log_helper import get_logger @@ -64,13 +82,12 @@ __all__ = [ 'get_program_persistable_vars', ] + reader.__all__ -_logger = get_logger(__name__, - logging.INFO, - fmt='%(asctime)s-%(levelname)s: %(message)s') +_logger = get_logger( + __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s' +) class _open_buffer(object): - def __init__(self, buffer): self.buffer = buffer @@ -79,7 +96,6 @@ class _open_buffer(object): class _buffer_reader(_open_buffer): - def __init__(self, buffer): super(_buffer_reader, self).__init__(buffer) self.initial_tell = self.buffer.tell() @@ -91,7 +107,6 @@ class _buffer_reader(_open_buffer): class _buffer_writer(_open_buffer): - def __exit__(self, *args): self.buffer.flush() @@ -111,7 +126,8 @@ def _open_file_buffer(path_or_buffer, mode): return _buffer_reader(path_or_buffer) else: raise ValueError( - "Expected 'r' or 'w' in mode but got {}".format(mode)) + "Expected 'r' or 'w' in mode but got {}".format(mode) + ) def _is_memory_buffer(buffer): @@ -163,9 +179,11 @@ def is_persistable(var): param = fluid.default_main_program().global_block().var('fc.b') res = fluid.io.is_persistable(param) """ - if var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH or \ - var.desc.type() == core.VarDesc.VarType.FETCH_LIST or \ - var.desc.type() == core.VarDesc.VarType.READER: + if ( + var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH + or var.desc.type() == core.VarDesc.VarType.FETCH_LIST + or var.desc.type() == core.VarDesc.VarType.READER + ): return False return var.persistable @@ -232,18 +250,22 @@ def get_program_persistable_vars(program): def _clone_var_in_block_(block, var): assert isinstance(var, Variable) if var.desc.type() == core.VarDesc.VarType.LOD_TENSOR: - return block.create_var(name=var.name, - shape=var.shape, - dtype=var.dtype, - type=var.type, - lod_level=var.lod_level, - persistable=True) + return block.create_var( + name=var.name, + shape=var.shape, + dtype=var.dtype, + type=var.type, + lod_level=var.lod_level, + persistable=True, + ) else: - return block.create_var(name=var.name, - shape=var.shape, - dtype=var.dtype, - type=var.type, - persistable=True) + return block.create_var( + name=var.name, + shape=var.shape, + dtype=var.dtype, + type=var.type, + persistable=True, + ) @signature_safe_contextmanager @@ -268,21 +290,25 @@ def _get_valid_program(main_program=None): "The type of input main_program is invalid, expected tyep is Program, but received None" ) warnings.warn( - "The input is a CompiledProgram, this is not recommended.") + "The input is a CompiledProgram, this is not recommended." + ) if not isinstance(main_program, Program): raise TypeError( "The type of input main_program is invalid, expected type is fluid.Program, but received %s" - % type(main_program)) + % type(main_program) + ) return main_program @dygraph_not_support -def save_vars(executor, - dirname, - main_program=None, - vars=None, - predicate=None, - filename=None): +def save_vars( + executor, + dirname, + main_program=None, + vars=None, + predicate=None, + filename=None, +): """ Save specific variables in the `Program` to files. @@ -360,11 +386,13 @@ def save_vars(executor, main_program = _get_valid_program(main_program) if vars is None: - return save_vars(executor, - main_program=main_program, - dirname=dirname, - vars=list(filter(predicate, main_program.list_vars())), - filename=filename) + return save_vars( + executor, + main_program=main_program, + dirname=dirname, + vars=list(filter(predicate, main_program.list_vars())), + filename=filename, + ) else: params_var_name = "saved_params" # give warning when there is no var in model @@ -384,13 +412,15 @@ def save_vars(executor, continue new_var = _clone_var_in_block_(save_block, each_var) if filename is None and save_to_memory is False: - save_file_path = os.path.join(os.path.normpath(dirname), - new_var.name) + save_file_path = os.path.join( + os.path.normpath(dirname), new_var.name + ) save_block.append_op( type='save', inputs={'X': [new_var]}, outputs={}, - attrs={'file_path': os.path.normpath(save_file_path)}) + attrs={'file_path': os.path.normpath(save_file_path)}, + ) else: save_var_map[new_var.name] = new_var @@ -403,16 +433,19 @@ def save_vars(executor, if save_to_memory is False: save_path = os.path.join(os.path.normpath(dirname), filename) - saved_params = save_block.create_var(type=core.VarDesc.VarType.RAW, - name=params_var_name) + saved_params = save_block.create_var( + type=core.VarDesc.VarType.RAW, name=params_var_name + ) saved_params.desc.set_persistable(True) - save_block.append_op(type='save_combine', - inputs={'X': save_var_list}, - outputs={'Y': saved_params}, - attrs={ - 'file_path': save_path, - 'save_to_memory': save_to_memory - }) + save_block.append_op( + type='save_combine', + inputs={'X': save_var_list}, + outputs={'Y': saved_params}, + attrs={ + 'file_path': save_path, + 'save_to_memory': save_to_memory, + }, + ) # NOTE(zhiqiu): save op will add variable kLookupTablePath in save_program.desc, # which leads to diff on save_program and its desc. Call _sync_with_cpp @@ -489,12 +522,14 @@ def save_params(executor, dirname, main_program=None, filename=None): # The parameters weights and bias of the fc layer in the network are going to # be saved in different files in the path "./my_paddle_model" """ - return save_vars(executor, - dirname=dirname, - main_program=main_program, - vars=None, - predicate=is_parameter, - filename=filename) + return save_vars( + executor, + dirname=dirname, + main_program=main_program, + vars=None, + predicate=is_parameter, + filename=filename, + ) def _save_distributed_persistables(executor, dirname, main_program): @@ -568,21 +603,24 @@ def _save_distributed_persistables(executor, dirname, main_program): tmp = [str(dim) for dim in slice.shape] slice_shapes.append(",".join(tmp)) - block.append_op(type='recv_save', - attrs={ - "trainer_id": 0, - "shape": origin.shape, - "slice_shapes": slice_shapes, - "slice_varnames": slice_varnames, - "remote_varnames": remote_varnames, - "endpoints": endpoints, - "file_path": os.path.join(dirname, origin.name) - }) + block.append_op( + type='recv_save', + attrs={ + "trainer_id": 0, + "shape": origin.shape, + "slice_shapes": slice_shapes, + "slice_varnames": slice_varnames, + "remote_varnames": remote_varnames, + "endpoints": endpoints, + "file_path": os.path.join(dirname, origin.name), + }, + ) executor.run(prog) - def __save_distributed_lookup_tables(executor, dirname, - distributed_lookup_table, endpoints): + def __save_distributed_lookup_tables( + executor, dirname, distributed_lookup_table, endpoints + ): """ because the distributed lookup table may too huge to merge and save at one place, it will be saved at parameter server independent respectively. @@ -599,20 +637,20 @@ def _save_distributed_persistables(executor, dirname, main_program): attrs['epmap'] = endpoints attrs['dir'] = lookup_table_filename attrs['lookup_table'] = distributed_lookup_table - block.append_op(type='checkpoint_notify', - inputs={}, - outputs={}, - attrs=attrs) + block.append_op( + type='checkpoint_notify', inputs={}, outputs={}, attrs=attrs + ) executor.run(prog) def __exclude_vars(exclude_var_names=[]): - def is_valid(var): if var.name in exclude_var_names: return False - if var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH or \ - var.desc.type() == core.VarDesc.VarType.FETCH_LIST or \ - var.desc.type() == core.VarDesc.VarType.READER: + if ( + var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH + or var.desc.type() == core.VarDesc.VarType.FETCH_LIST + or var.desc.type() == core.VarDesc.VarType.READER + ): return False return var.persistable @@ -626,8 +664,11 @@ def _save_distributed_persistables(executor, dirname, main_program): "'_save_distributed_persistables' just be designed for distributed training." ) - remote_params_map = main_program._parameters_on_pservers.get_distributed_vars_by_vtypes( - ["Optimizer", "RemotePrefetch"], groupby=True) + remote_params_map = ( + main_program._parameters_on_pservers.get_distributed_vars_by_vtypes( + ["Optimizer", "RemotePrefetch"], groupby=True + ) + ) exclude_var_names = [] if remote_params_map: @@ -640,19 +681,22 @@ def _save_distributed_persistables(executor, dirname, main_program): exclude_var_names.append(main_program._distributed_lookup_table) local_vars = list( - filter(__exclude_vars(exclude_var_names), main_program.list_vars())) - save_vars(executor, - main_program=main_program, - dirname=dirname, - vars=local_vars) + filter(__exclude_vars(exclude_var_names), main_program.list_vars()) + ) + save_vars( + executor, main_program=main_program, dirname=dirname, vars=local_vars + ) if main_program._is_chief: if remote_params_map: __save_remote_params(executor, dirname, remote_params_map) if main_program._distributed_lookup_table: __save_distributed_lookup_tables( - executor, dirname, main_program._distributed_lookup_table, - main_program._endpoints) + executor, + dirname, + main_program._distributed_lookup_table, + main_program._endpoints, + ) @dygraph_not_support @@ -714,24 +758,28 @@ def save_persistables(executor, dirname, main_program=None, filename=None): # "./my_paddle_model" """ if main_program and main_program._is_distributed: - return _save_distributed_persistables(executor, - dirname=dirname, - main_program=main_program) + return _save_distributed_persistables( + executor, dirname=dirname, main_program=main_program + ) else: - return save_vars(executor, - dirname=dirname, - main_program=main_program, - vars=None, - predicate=is_persistable, - filename=filename) - - -def load_vars(executor, - dirname, - main_program=None, - vars=None, - predicate=None, - filename=None): + return save_vars( + executor, + dirname=dirname, + main_program=main_program, + vars=None, + predicate=is_persistable, + filename=filename, + ) + + +def load_vars( + executor, + dirname, + main_program=None, + vars=None, + predicate=None, + filename=None, +): """ :api_attr: Static Graph @@ -820,13 +868,16 @@ def load_vars(executor, if not isinstance(main_program, Program): raise TypeError( "The type of input main_program is invalid, expected type is fluid.Program, but received %s" - % type(main_program)) + % type(main_program) + ) - load_vars(executor, - dirname=dirname, - main_program=main_program, - vars=list(filter(predicate, main_program.list_vars())), - filename=filename) + load_vars( + executor, + dirname=dirname, + main_program=main_program, + vars=list(filter(predicate, main_program.list_vars())), + filename=filename, + ) else: load_prog = Program() load_block = load_prog.global_block() @@ -837,7 +888,8 @@ def load_vars(executor, if not isinstance(main_program, Program): raise TypeError( "The type of input main_program is invalid, expected type is fluid.Program, but received %s" - % type(main_program)) + % type(main_program) + ) # save origin param shape orig_para_shape = {} @@ -854,7 +906,8 @@ def load_vars(executor, if isinstance(each_var, Parameter): orig_para_shape[each_var.name] = tuple( - each_var.desc.get_shape()) + each_var.desc.get_shape() + ) if each_var.type == core.VarDesc.VarType.SELECTED_ROWS: sparse_vars.append(each_var) @@ -872,7 +925,8 @@ def load_vars(executor, type='load', inputs={}, outputs={'Out': [new_var]}, - attrs={'file_path': os.path.join(dirname, new_var.name)}) + attrs={'file_path': os.path.join(dirname, new_var.name)}, + ) else: load_var_map[new_var.name] = new_var @@ -881,7 +935,8 @@ def load_vars(executor, if filename is not None: raise ValueError( - "SelectedRows can not be load with load_combine") + "SelectedRows can not be load with load_combine" + ) new_var = _clone_var_in_block_(load_block, each_var) @@ -889,14 +944,17 @@ def load_vars(executor, if not os.path.exists(var_path): raise ValueError( "SelectedRows var {} can not find at {}".format( - new_var.name, var_path)) + new_var.name, var_path + ) + ) if os.path.isfile(var_path): load_block.append_op( type='load', inputs={}, outputs={'Out': [new_var]}, - attrs={'file_path': os.path.join(dirname, new_var.name)}) + attrs={'file_path': os.path.join(dirname, new_var.name)}, + ) else: blocks = [] block_paths = os.listdir(var_path) @@ -907,23 +965,29 @@ def load_vars(executor, slices = [] for block in blocks: - slice = load_block.create_var(name=block, - type=new_var.type, - shape=new_var.shape, - dtype=new_var.dtype, - persistable=False) + slice = load_block.create_var( + name=block, + type=new_var.type, + shape=new_var.shape, + dtype=new_var.dtype, + persistable=False, + ) slices.append(slice) file_path = os.path.join(var_path, block, "Param") - load_block.append_op(type='load', - inputs={}, - outputs={'Out': [slice]}, - attrs={'file_path': file_path}) + load_block.append_op( + type='load', + inputs={}, + outputs={'Out': [slice]}, + attrs={'file_path': file_path}, + ) - load_block.append_op(type='lookup_sparse_table_merge', - inputs={'X': slices}, - outputs={'Out': new_var}, - attrs={}) + load_block.append_op( + type='lookup_sparse_table_merge', + inputs={'X': slices}, + outputs={'Out': new_var}, + attrs={}, + ) if filename is not None: load_var_list = [] @@ -933,13 +997,15 @@ def load_vars(executor, if vars_from_memory is False: filename = os.path.join(dirname, filename) - load_block.append_op(type='load_combine', - inputs={}, - outputs={"Out": load_var_list}, - attrs={ - 'file_path': filename, - 'model_from_memory': vars_from_memory - }) + load_block.append_op( + type='load_combine', + inputs={}, + outputs={"Out": load_var_list}, + attrs={ + 'file_path': filename, + 'model_from_memory': vars_from_memory, + }, + ) executor.run(load_prog) # check var shape @@ -949,13 +1015,17 @@ def load_vars(executor, var_temp = paddle.fluid.global_scope().find_var(each_var.name) assert var_temp != None, "can't not find var: " + each_var.name new_shape = (np.array(var_temp.get_tensor())).shape - assert each_var.name in orig_para_shape, each_var.name + "MUST in var list" + assert each_var.name in orig_para_shape, ( + each_var.name + "MUST in var list" + ) orig_shape = orig_para_shape.get(each_var.name) if new_shape != orig_shape: raise RuntimeError( "Variable's shape does not match, the Program requires a parameter with the shape of ({}), " - "while the loaded parameter (namely [ {} ]) has a shape of ({})." - .format(orig_shape, each_var.name, new_shape)) + "while the loaded parameter (namely [ {} ]) has a shape of ({}).".format( + orig_shape, each_var.name, new_shape + ) + ) @dygraph_not_support @@ -1012,11 +1082,13 @@ def load_params(executor, dirname, main_program=None, filename=None): fluid.io.load_params(executor=exe, dirname=param_path, main_program=None) """ - load_vars(executor, - dirname=dirname, - main_program=main_program, - predicate=is_parameter, - filename=filename) + load_vars( + executor, + dirname=dirname, + main_program=main_program, + predicate=is_parameter, + filename=filename, + ) @dygraph_not_support @@ -1064,15 +1136,17 @@ def load_persistables(executor, dirname, main_program=None, filename=None): """ if main_program and main_program._is_distributed: - _load_distributed_persistables(executor, - dirname=dirname, - main_program=main_program) + _load_distributed_persistables( + executor, dirname=dirname, main_program=main_program + ) else: - load_vars(executor, - dirname=dirname, - main_program=main_program, - predicate=is_persistable, - filename=filename) + load_vars( + executor, + dirname=dirname, + main_program=main_program, + predicate=is_persistable, + filename=filename, + ) def _load_distributed_persistables(executor, dirname, main_program=None): @@ -1122,35 +1196,38 @@ def _load_distributed_persistables(executor, dirname, main_program=None): offset = param.offset if is_slice: - slice = load_block.create_var(name=slice_var.name, - type=slice_var.type, - shape=slice_var.shape, - dtype=slice_var.dtype, - persistable=True) - - load_block.append_op(type='load', - inputs={}, - outputs={'Out': [slice]}, - attrs={ - 'file_path': - os.path.join(dirname, origin_var.name), - 'seek': - offset, - 'shape': - slice.shape - }) + slice = load_block.create_var( + name=slice_var.name, + type=slice_var.type, + shape=slice_var.shape, + dtype=slice_var.dtype, + persistable=True, + ) + + load_block.append_op( + type='load', + inputs={}, + outputs={'Out': [slice]}, + attrs={ + 'file_path': os.path.join(dirname, origin_var.name), + 'seek': offset, + 'shape': slice.shape, + }, + ) else: - origin = load_block.create_var(name="{}".format( - origin_var.name), - type=origin_var.type, - shape=origin_var.shape, - dtype=origin_var.dtype, - persistable=True) + origin = load_block.create_var( + name="{}".format(origin_var.name), + type=origin_var.type, + shape=origin_var.shape, + dtype=origin_var.dtype, + persistable=True, + ) load_block.append_op( type='load', inputs={}, outputs={'Out': [origin]}, - attrs={'file_path': os.path.join(dirname, origin_var.name)}) + attrs={'file_path': os.path.join(dirname, origin_var.name)}, + ) load_block.append_op( type='delete_var', @@ -1172,63 +1249,78 @@ def _load_distributed_persistables(executor, dirname, main_program=None): "'_load_distributed_persistables' need current_endpoint set in DistributeTranspiler.transpile" ) - need_load_vars = main_program._parameters_on_pservers.get_distributed_vars_by_ep( - main_program._ps_endpoint) + need_load_vars = ( + main_program._parameters_on_pservers.get_distributed_vars_by_ep( + main_program._ps_endpoint + ) + ) __load_persistable_vars(executor, dirname, need_load_vars) -def prepend_feed_ops(inference_program, - feed_target_names, - feed_holder_name='feed'): +def prepend_feed_ops( + inference_program, feed_target_names, feed_holder_name='feed' +): if len(feed_target_names) == 0: return global_block = inference_program.global_block() - feed_var = global_block.create_var(name=feed_holder_name, - type=core.VarDesc.VarType.FEED_MINIBATCH, - persistable=True) + feed_var = global_block.create_var( + name=feed_holder_name, + type=core.VarDesc.VarType.FEED_MINIBATCH, + persistable=True, + ) for i, name in enumerate(feed_target_names): if not global_block.has_var(name): raise ValueError( "The feeded_var_names[{i}]: '{name}' doesn't exist in pruned inference program. " "Please check whether '{name}' is a valid feed_var name, or remove it from feeded_var_names " - "if '{name}' is not involved in the target_vars calculation.". - format(i=i, name=name)) + "if '{name}' is not involved in the target_vars calculation.".format( + i=i, name=name + ) + ) out = global_block.var(name) - global_block._prepend_op(type='feed', - inputs={'X': [feed_var]}, - outputs={'Out': [out]}, - attrs={'col': i}) + global_block._prepend_op( + type='feed', + inputs={'X': [feed_var]}, + outputs={'Out': [out]}, + attrs={'col': i}, + ) -def append_fetch_ops(inference_program, - fetch_target_names, - fetch_holder_name='fetch'): +def append_fetch_ops( + inference_program, fetch_target_names, fetch_holder_name='fetch' +): global_block = inference_program.global_block() - fetch_var = global_block.create_var(name=fetch_holder_name, - type=core.VarDesc.VarType.FETCH_LIST, - persistable=True) + fetch_var = global_block.create_var( + name=fetch_holder_name, + type=core.VarDesc.VarType.FETCH_LIST, + persistable=True, + ) for i, name in enumerate(fetch_target_names): - global_block.append_op(type='fetch', - inputs={'X': [name]}, - outputs={'Out': [fetch_var]}, - attrs={'col': i}) + global_block.append_op( + type='fetch', + inputs={'X': [name]}, + outputs={'Out': [fetch_var]}, + attrs={'col': i}, + ) @static_only @deprecated(since="2.0.0", update_to="paddle.static.save_inference_model") -def save_inference_model(dirname, - feeded_var_names, - target_vars, - executor, - main_program=None, - model_filename=None, - params_filename=None, - export_for_deployment=True, - program_only=False, - clip_extra=True): +def save_inference_model( + dirname, + feeded_var_names, + target_vars, + executor, + main_program=None, + model_filename=None, + params_filename=None, + export_for_deployment=True, + program_only=False, + clip_extra=True, +): """ Prune the given `main_program` to build a new program especially for inference, and then save it and all related parameters to given `dirname` . @@ -1314,16 +1406,19 @@ def save_inference_model(dirname, elif export_for_deployment: if len(feeded_var_names) > 0: # TODO(paddle-dev): polish these code blocks - if not (bool(feeded_var_names) - and all(isinstance(name, str) - for name in feeded_var_names)): + if not ( + bool(feeded_var_names) + and all(isinstance(name, str) for name in feeded_var_names) + ): raise ValueError("'feed_var_names' should be a list of str.") if isinstance(target_vars, Variable): target_vars = [target_vars] elif export_for_deployment: - if not (bool(target_vars) - and all(isinstance(var, Variable) for var in target_vars)): + if not ( + bool(target_vars) + and all(isinstance(var, Variable) for var in target_vars) + ): raise ValueError("'target_vars' should be a list of Variable.") main_program = _get_valid_program(main_program) @@ -1384,7 +1479,8 @@ def save_inference_model(dirname, main_program.desc.flush() main_program = main_program._prune_with_input( - feeded_var_names=feeded_var_names, targets=target_vars) + feeded_var_names=feeded_var_names, targets=target_vars + ) main_program = main_program._inference_optimize(prune_read_op=True) fetch_var_names = [v.name for v in target_vars] @@ -1394,7 +1490,8 @@ def save_inference_model(dirname, name=target_v.name, shape=target_v.shape, dtype=target_v.dtype, - persistable=target_v.persistable) + persistable=target_v.persistable, + ) prepend_feed_ops(main_program, feeded_var_names) append_fetch_ops(main_program, fetch_var_names) @@ -1404,14 +1501,18 @@ def save_inference_model(dirname, with open(model_basename, "wb") as f: f.write( main_program._remove_training_info( - clip_extra=clip_extra).desc.serialize_to_string()) + clip_extra=clip_extra + ).desc.serialize_to_string() + ) else: # TODO(panyx0718): Save more information so that it can also be used # for training and more flexible post-processing. with open(model_basename + ".main_program", "wb") as f: f.write( main_program._remove_training_info( - clip_extra=clip_extra).desc.serialize_to_string()) + clip_extra=clip_extra + ).desc.serialize_to_string() + ) if program_only: warnings.warn( @@ -1430,11 +1531,13 @@ def save_inference_model(dirname, @static_only @deprecated(since="2.0.0", update_to="paddle.static.load_inference_model") -def load_inference_model(dirname, - executor, - model_filename=None, - params_filename=None, - pserver_endpoints=None): +def load_inference_model( + dirname, + executor, + model_filename=None, + params_filename=None, + pserver_endpoints=None, +): """ Load the inference model from a given directory. By this API, you can get the model structure(Inference Program) and model parameters. If you just want to load @@ -1534,8 +1637,9 @@ def load_inference_model(dirname, if model_filename is None: model_filename = '__model__' - model_filename = os.path.join(load_dirname, - os.path.basename(model_filename)) + model_filename = os.path.join( + load_dirname, os.path.basename(model_filename) + ) if params_filename is not None: params_filename = os.path.basename(params_filename) @@ -1554,8 +1658,9 @@ def load_inference_model(dirname, program = Program.parse_from_string(program_desc_str) if not core._is_program_version_supported(program._version()): - raise ValueError("Unsupported program version: %d\n" % - program._version()) + raise ValueError( + "Unsupported program version: %d\n" % program._version() + ) # Binary data also need versioning. load_persistables(executor, load_dirname, program, params_filename) @@ -1666,8 +1771,10 @@ def _save_persistable_nodes(executor, dirname, graph): var_list = [] for node in persistable_nodes: var_desc = node.var() - if var_desc.type() == core.VarDesc.VarType.RAW or \ - var_desc.type() == core.VarDesc.VarType.READER: + if ( + var_desc.type() == core.VarDesc.VarType.RAW + or var_desc.type() == core.VarDesc.VarType.READER + ): continue var = program.global_block().create_var( name=var_desc.name(), @@ -1675,7 +1782,8 @@ def _save_persistable_nodes(executor, dirname, graph): dtype=var_desc.dtype(), type=var_desc.type(), lod_level=var_desc.lod_level(), - persistable=var_desc.persistable()) + persistable=var_desc.persistable(), + ) var_list.append(var) save_vars(executor=executor, dirname=dirname, vars=var_list) @@ -1705,8 +1813,10 @@ def _load_persistable_nodes(executor, dirname, graph): for node in persistable_nodes: var_desc = node.var() - if var_desc.type() == core.VarDesc.VarType.RAW or \ - var_desc.type() == core.VarDesc.VarType.READER: + if ( + var_desc.type() == core.VarDesc.VarType.RAW + or var_desc.type() == core.VarDesc.VarType.READER + ): continue var = program.global_block().create_var( name=var_desc.name(), @@ -1714,7 +1824,8 @@ def _load_persistable_nodes(executor, dirname, graph): dtype=var_desc.dtype(), type=var_desc.type(), lod_level=var_desc.lod_level(), - persistable=var_desc.persistable()) + persistable=var_desc.persistable(), + ) if _exist(var): var_list.append(var) else: @@ -1731,7 +1842,8 @@ def _unpack_saved_dict(saved_obj, protocol): for key, value in saved_obj.items(): if isinstance(value, np.ndarray): MAX_NUMBER_OF_ELEMENT = int( - (2**30 - 1) / value.dtype.itemsize) + (2**30 - 1) / value.dtype.itemsize + ) num_element = np.prod(value.shape) if num_element > MAX_NUMBER_OF_ELEMENT: unpack_infor[key] = {} @@ -1739,15 +1851,19 @@ def _unpack_saved_dict(saved_obj, protocol): unpack_infor[key]["slices"] = [] value = value.flatten() for i in range( - int( - math.ceil(num_element * 1.0 / - MAX_NUMBER_OF_ELEMENT))): + int( + math.ceil( + num_element * 1.0 / MAX_NUMBER_OF_ELEMENT + ) + ) + ): part_name = key + "@@." + str(i) unpack_infor[key]["slices"].append(part_name) temp_saved_obj[part_name] = value[ - i * - MAX_NUMBER_OF_ELEMENT:MAX_NUMBER_OF_ELEMENT * - (i + 1)] + i + * MAX_NUMBER_OF_ELEMENT : MAX_NUMBER_OF_ELEMENT + * (i + 1) + ] if unpack_infor: for key, value in unpack_infor.items(): @@ -1767,7 +1883,8 @@ def _pack_loaded_dict(load_obj): for key, value in load_obj[unpack_info].items(): slices = [load_obj[part] for part in value["slices"]] load_obj[key] = np.concatenate(slices).reshape( - value["OriginShape"]) + value["OriginShape"] + ) removes += value["slices"] for key in removes: load_obj.pop(key) @@ -1778,7 +1895,6 @@ def _pack_loaded_dict(load_obj): @static_only def _legacy_save(param_dict, model_path, protocol=2): - def get_tensor(var): if isinstance(var, (core.VarBase, core.eager.Tensor)): return var.numpy() @@ -1789,14 +1905,16 @@ def _legacy_save(param_dict, model_path, protocol=2): param_dict = {name: get_tensor(param_dict[name]) for name in param_dict} # When value of dict is lager than 4GB ,there is a Bug on 'MAC python3' - if _is_file_path( - model_path - ) and sys.platform == 'darwin' and sys.version_info.major == 3: + if ( + _is_file_path(model_path) + and sys.platform == 'darwin' + and sys.version_info.major == 3 + ): pickle_bytes = pickle.dumps(param_dict, protocol=protocol) with open(model_path, 'wb') as f: max_bytes = 2**30 for i in range(0, len(pickle_bytes), max_bytes): - f.write(pickle_bytes[i:i + max_bytes]) + f.write(pickle_bytes[i : i + max_bytes]) else: with _open_file_buffer(model_path, 'wb') as f: pickle.dump(param_dict, f, protocol=protocol) @@ -1843,8 +1961,9 @@ def save(program, model_path, protocol=4, **configs): """ base_name = os.path.basename(model_path) - assert base_name != "", \ - "The input model_path MUST be format of dirname/filename [dirname\\filename in Windows system], but received model_path is empty string." + assert ( + base_name != "" + ), "The input model_path MUST be format of dirname/filename [dirname\\filename in Windows system], but received model_path is empty string." if 'pickle_protocol' in configs: protocol = configs['pickle_protocol'] warnings.warn( @@ -1852,13 +1971,16 @@ def save(program, model_path, protocol=4, **configs): ) if not isinstance(protocol, int): - raise ValueError("The 'protocol' MUST be `int`, but received {}".format( - type(protocol))) + raise ValueError( + "The 'protocol' MUST be `int`, but received {}".format( + type(protocol) + ) + ) if protocol < 2 or protocol > 4: raise ValueError( - "Expected 1<'protocol'<5, but received protocol={}".format( - protocol)) + "Expected 1<'protocol'<5, but received protocol={}".format(protocol) + ) dir_name = os.path.dirname(model_path) if dir_name and not os.path.exists(dir_name): @@ -1879,13 +2001,14 @@ def save(program, model_path, protocol=4, **configs): with open(model_path + ".pdparams", 'wb') as f: max_bytes = 2**30 for i in range(0, len(pickle_bytes), max_bytes): - f.write(pickle_bytes[i:i + max_bytes]) + f.write(pickle_bytes[i : i + max_bytes]) else: with open(model_path + ".pdparams", 'wb') as f: pickle.dump(param_dict, f, protocol=protocol) optimizer_var_list = list( - filter(is_belong_to_optimizer, program.list_vars())) + filter(is_belong_to_optimizer, program.list_vars()) + ) opt_dict = {p.name: get_tensor(p) for p in optimizer_var_list} with open(model_path + ".pdopt", 'wb') as f: @@ -1971,8 +2094,10 @@ def load(program, model_path, executor=None, var_list=None): # model file save by fluid.save not found, try to load model file saved with # [save_vars, save_params, save_persistables] _logger.debug( - "{} not found, try to load model file saved with [ save_params, save_persistables, save_vars ]" - .format(parameter_file_name)) + "{} not found, try to load model file saved with [ save_params, save_persistables, save_vars ]".format( + parameter_file_name + ) + ) if executor is None: raise ValueError( "executor is required when loading model file saved with [ save_params, save_persistables, save_vars ]" @@ -1988,30 +2113,36 @@ def load(program, model_path, executor=None, var_list=None): for root, dirs, files in os.walk(model_path, topdown=False): for f in files: binary_file_set.add( - os.path.join(root, f).replace("\\", "/")) + os.path.join(root, f).replace("\\", "/") + ) program_var_list = list(program.list_vars()) loaded_var_list = [] for var in program_var_list: var_path = os.path.join(model_path, var.name).replace("\\", "/") - load_condition = var_list_names is None or var.name in var_list_names + load_condition = ( + var_list_names is None or var.name in var_list_names + ) if var_path in binary_file_set and load_condition: loaded_var_list.append(var) binary_file_set.remove(var_path) if len(binary_file_set) > 0: unused_var_list = " ".join(list(binary_file_set)) - _logger.warning("variable file [ %s ] not used" % - (" ".join(list(binary_file_set)))) + _logger.warning( + "variable file [ %s ] not used" + % (" ".join(list(binary_file_set))) + ) try: - load_vars(executor=executor, - dirname=model_path, - vars=loaded_var_list) + load_vars( + executor=executor, dirname=model_path, vars=loaded_var_list + ) except RuntimeError as e: _logger.error(e) raise e except: raise RuntimeError( "Failed to load model file, please make sure model file is saved with the " - "following APIs: save_params, save_persistables, save_vars") + "following APIs: save_params, save_persistables, save_vars" + ) return elif os.path.isfile(model_path): @@ -2026,21 +2157,26 @@ def load(program, model_path, executor=None, var_list=None): for var in var_list: if var.name not in program_var_name_set: raise LookupError( - "loaded var [{}] is not in program variable list") + "loaded var [{}] is not in program variable list" + ) dir_name, file_name = os.path.split(model_path) try: - load_vars(executor=executor, - dirname=dir_name, - vars=var_list, - filename=file_name) + load_vars( + executor=executor, + dirname=dir_name, + vars=var_list, + filename=file_name, + ) except RuntimeError as e: _logger.error(e) raise e except: - raise RuntimeError("Failed to load model file , please make sure model file is saved with the " \ - "the following APIs: [ save_params, save_persistables, save_vars ]. " \ - "When these API called, filename CANNOT be None") + raise RuntimeError( + "Failed to load model file , please make sure model file is saved with the " + "the following APIs: [ save_params, save_persistables, save_vars ]. " + "When these API called, filename CANNOT be None" + ) return @@ -2073,9 +2209,9 @@ def load(program, model_path, executor=None, var_list=None): parameter_list = list(filter(is_parameter, program.list_vars())) if executor: - paddle.fluid.core._create_loaded_parameter(parameter_list, - global_scope(), - executor._default_executor) + paddle.fluid.core._create_loaded_parameter( + parameter_list, global_scope(), executor._default_executor + ) with open(parameter_file_name, 'rb') as f: # When value of dict is lager than 4GB ,there is a Bug on 'MAC python3' @@ -2085,29 +2221,36 @@ def load(program, model_path, executor=None, var_list=None): load_dict = pickle.load(f, encoding='latin1') load_dict = _pack_loaded_dict(load_dict) for v in parameter_list: - assert v.name in load_dict, \ - "Can not find [{}] in model file [{}]".format( - v.name, parameter_file_name) + assert ( + v.name in load_dict + ), "Can not find [{}] in model file [{}]".format( + v.name, parameter_file_name + ) set_var(v, load_dict[v.name]) optimizer_var_list = list( - filter(is_belong_to_optimizer, program.list_vars())) + filter(is_belong_to_optimizer, program.list_vars()) + ) if len(optimizer_var_list) > 0: opt_file_name = model_prefix + ".pdopt" - assert os.path.exists(opt_file_name), \ - "Optimizer file [{}] not exits".format(opt_file_name) + assert os.path.exists( + opt_file_name + ), "Optimizer file [{}] not exits".format(opt_file_name) if executor: paddle.fluid.core._create_loaded_parameter( - optimizer_var_list, global_scope(), executor._default_executor) + optimizer_var_list, global_scope(), executor._default_executor + ) with open(opt_file_name, 'rb') as f: load_dict = pickle.load(f, encoding='latin1') for v in optimizer_var_list: - assert v.name in load_dict, \ - "Can not find [{}] in model file [{}]".format( - v.name, opt_file_name) + assert ( + v.name in load_dict + ), "Can not find [{}] in model file [{}]".format( + v.name, opt_file_name + ) set_var(v, load_dict[v.name]) @@ -2160,13 +2303,16 @@ def load_program_state(model_path, var_list=None): # model file saved with fluid.save is not found, try to load model file saved with # [save_vars, save_params, save_persistables] _logger.debug( - "{} not found, try to load model file saved with [ save_params, save_persistables, save_vars ]" - .format(parameter_file_name)) + "{} not found, try to load model file saved with [ save_params, save_persistables, save_vars ]".format( + parameter_file_name + ) + ) var_name_list = [] if var_list is None and os.path.isfile(model_path): raise ValueError( - "var_list can not be None when model_path is a file type") + "var_list can not be None when model_path is a file type" + ) for root, dirs, files in os.walk(model_path, topdown=False): for f in files: @@ -2187,27 +2333,34 @@ def load_program_state(model_path, var_list=None): shape=var.shape, dtype=var.dtype, type=var.type, - lod_level=var.lod_level if var.desc.type() - == core.VarDesc.VarType.LOD_TENSOR else None, - persistable=True) - - def _load_vars_with_try_catch(exe, - dirname, - vars, - filename, - raise_error=True): + lod_level=var.lod_level + if var.desc.type() == core.VarDesc.VarType.LOD_TENSOR + else None, + persistable=True, + ) + + def _load_vars_with_try_catch( + exe, dirname, vars, filename, raise_error=True + ): try: - load_vars(executor=exe, - dirname=dirname, - vars=vars, - filename=filename) + load_vars( + executor=exe, + dirname=dirname, + vars=vars, + filename=filename, + ) return True except: - error_str = "Failed to load model/variables `%s`, please make sure " \ - "model/variables file is saved with the following APIs: " \ - "save_params, save_persistables, save_vars." - filenames = [var.name for var in vars - ] if filename is None else filename + error_str = ( + "Failed to load model/variables `%s`, please make sure " + "model/variables file is saved with the following APIs: " + "save_params, save_persistables, save_vars." + ) + filenames = ( + [var.name for var in vars] + if filename is None + else filename + ) if raise_error: raise RuntimeError(error_str % filenames) else: @@ -2224,16 +2377,19 @@ def load_program_state(model_path, var_list=None): dir_name, file_name = os.path.split(model_path) for var in var_list: loaded_var_list.append(clone_var_to_block(load_block, var)) - _load_vars_with_try_catch(exe, dir_name, loaded_var_list, - file_name) + _load_vars_with_try_catch( + exe, dir_name, loaded_var_list, file_name + ) else: # var_list can be None or not None if var_list is not None: for var in var_list: loaded_var_list.append( - clone_var_to_block(load_block, var)) - _load_vars_with_try_catch(exe, model_path, loaded_var_list, - None) + clone_var_to_block(load_block, var) + ) + _load_vars_with_try_catch( + exe, model_path, loaded_var_list, None + ) else: for var_name in var_name_list: # NOTE(chenweihang): If identify which files the user wants @@ -2241,21 +2397,25 @@ def load_program_state(model_path, var_list=None): # If a file does not exist, we only warn the user that the # file may be an irrelevant file, but does not throw an error # to ensure that other legal variables can be loaded. - temp_var = load_block.create_var(name=var_name, - persistable=True) - if _load_vars_with_try_catch(exe, model_path, - [temp_var], None, False): + temp_var = load_block.create_var( + name=var_name, persistable=True + ) + if _load_vars_with_try_catch( + exe, model_path, [temp_var], None, False + ): loaded_var_list.append(temp_var) res_dict = {} for var in loaded_var_list: res_dict[var.name] = np.asarray( - paddle.fluid.global_scope().find_var(var.name).get_tensor()) + paddle.fluid.global_scope().find_var(var.name).get_tensor() + ) return res_dict - assert os.path.exists(parameter_file_name), \ - "Parameter file [{}] not exits".format(parameter_file_name) + assert os.path.exists( + parameter_file_name + ), "Parameter file [{}] not exits".format(parameter_file_name) with open(parameter_file_name, 'rb') as f: # When value of dict is lager than 4GB ,there is a Bug on 'MAC python3' @@ -2318,25 +2478,32 @@ def set_program_state(program, state_dict): used_para_list = {} for para in parameter_list: var_temp = paddle.fluid.global_scope().find_var(para.name) - assert var_temp != None, \ - "Variable [ {} ] Not found, Please make sure run startup program".format(para.name) + assert ( + var_temp != None + ), "Variable [ {} ] Not found, Please make sure run startup program".format( + para.name + ) if para.name in state_dict: # set value from state dict orig_para_np = np.array(var_temp.get_tensor()) new_para_np = state_dict[para.name] - assert orig_para_np.shape == new_para_np.shape, \ - "Parameter's shape does not match, the Program requires a parameter with the shape of ({}), " \ - "while the loaded parameter (namely [ {} ]) has a shape of ({})." \ - .format(orig_para_np.shape, para.name, new_para_np.shape) - assert orig_para_np.dtype == new_para_np.dtype, \ - "Parameter's data type does not match, the Program requires a parameter with a dtype of ({}), " \ - "while the loaded parameter (namely [ {} ]) has a dtype of ({})." \ - .format(orig_para_np.dtype, para.name, new_para_np.dtype) + assert orig_para_np.shape == new_para_np.shape, ( + "Parameter's shape does not match, the Program requires a parameter with the shape of ({}), " + "while the loaded parameter (namely [ {} ]) has a shape of ({}).".format( + orig_para_np.shape, para.name, new_para_np.shape + ) + ) + assert orig_para_np.dtype == new_para_np.dtype, ( + "Parameter's data type does not match, the Program requires a parameter with a dtype of ({}), " + "while the loaded parameter (namely [ {} ]) has a dtype of ({}).".format( + orig_para_np.dtype, para.name, new_para_np.dtype + ) + ) ten = var_temp.get_tensor() ten_place = ten._place() - #assert ten_place.is_gpu_place() or ten_place.is_cpu_place(), \ + # assert ten_place.is_gpu_place() or ten_place.is_cpu_place(), \ # "Place not support, only support CPUPlace and GPUPlace, now is {}".format(str(ten_place)) py_place = paddle.fluid.CPUPlace() if ten_place.is_cuda_pinned_place(): @@ -2368,5 +2535,7 @@ def set_program_state(program, state_dict): unused_para_list.append(k) if len(unused_para_list) > 0: warnings.warn( - "This list is not set, Because of Paramerter not found in program. There are: {}" - .format(" ".join(unused_para_list))) + "This list is not set, Because of Paramerter not found in program. There are: {}".format( + " ".join(unused_para_list) + ) + ) diff --git a/python/paddle/fluid/ir.py b/python/paddle/fluid/ir.py index 3e58af416d506fe738431c67ef0745f18c19cdda..7dbe815b5e0e01e987eea838302212818e1b1cb6 100644 --- a/python/paddle/fluid/ir.py +++ b/python/paddle/fluid/ir.py @@ -20,10 +20,12 @@ from . import core, unique_name from .framework import _apply_pass, OpProtoHolder from .proto import framework_pb2 + try: from .proto import pass_desc_pb2 except ModuleNotFoundError: import sys + sys.path.append(path.join(path.dirname(__file__), 'proto')) from .proto import pass_desc_pb2 @@ -65,9 +67,9 @@ def _update_grad_persistable(main_program): g_var.persistable = True -def apply_build_strategy(main_program, startup_program, build_strategy, - pass_attrs): - +def apply_build_strategy( + main_program, startup_program, build_strategy, pass_attrs +): def update_attr(attrs, attr_types, name, value, typ=None): if name not in attrs: attrs[name] = value @@ -80,8 +82,13 @@ def apply_build_strategy(main_program, startup_program, build_strategy, update_attr(attrs, attr_types, "nranks", 1, "size_t") update_attr(attrs, attr_types, "use_cuda", False, "bool") # TODO(zjl): how to skip fetch variables ? - update_attr(attrs, attr_types, "mem_opt_skip_vars", - get_data_vars(main_program), "list[str]") + update_attr( + attrs, + attr_types, + "mem_opt_skip_vars", + get_data_vars(main_program), + "list[str]", + ) _apply_pass(main_program, startup_program, name, attrs, attr_types) _update_grad_persistable(main_program) @@ -109,12 +116,14 @@ def apply_build_strategy(main_program, startup_program, build_strategy, apply_pass("fuse_elewise_add_act_pass") build_strategy.fuse_elewise_add_act_ops = False if build_strategy.fuse_all_optimizer_ops: - apply_pass([ - "coalesce_grad_tensor_pass", - "fuse_adam_op_pass", - "fuse_sgd_op_pass", - "fuse_momentum_op_pass", - ]) + apply_pass( + [ + "coalesce_grad_tensor_pass", + "fuse_adam_op_pass", + "fuse_sgd_op_pass", + "fuse_momentum_op_pass", + ] + ) build_strategy.fuse_all_optimizer_ops = False # TODO(zjl): support fuse all reduce ops if build_strategy.cache_runtime_context: @@ -147,8 +156,10 @@ class RegisterPassHelper(object): input_spec = self._input_specs.get(arg_name) if isinstance(input_spec, paddle.static.InputSpec): args.append( - PassDesc.VarHelper(arg_name, input_spec.shape, - input_spec.dtype)) + PassDesc.VarHelper( + arg_name, input_spec.shape, input_spec.dtype + ) + ) elif isinstance(input_spec, paddle.ParamAttr): args.append(paddle.ParamAttr(arg_name)) else: @@ -158,12 +169,15 @@ class RegisterPassHelper(object): def _prune_program_desc(self, ops): for op_desc in ops: default_attrs = core.get_op_attrs_default_value( - op_desc.type.encode()) + op_desc.type.encode() + ) remove_attrs = list() for attr in op_desc.attrs: # attr must not in if attr.name not in [ - "op_namescope", "op_callstack", "op_device" + "op_namescope", + "op_callstack", + "op_device", ]: attr_list_fields = attr.ListFields() # attr format must be: name, type, value @@ -192,8 +206,10 @@ class RegisterPassHelper(object): op_outs = out.Outputs() if len(op_outs) != 1: raise ValueError( - "Operator '{}' has multiple outputs, please specify one output variable." - .format(out._type)) + "Operator '{}' has multiple outputs, please specify one output variable.".format( + out._type + ) + ) for op_out in op_outs.values(): vars.extend(op_out) else: @@ -205,7 +221,6 @@ class RegisterPassHelper(object): return vars, program.current_block().ops def _convert_vars_to_pass_desc(self, patterns, replaces, desc): - def _add_element_conditions(conditions, elements): for element in elements: if element._condition: @@ -257,11 +272,14 @@ class RegisterPassHelper(object): pass_desc = multi_pass_desc.pass_descs.add() # Convert ProgramDescs of pattern and replace subgraphs. pattern_vars, pattern_ops = self._func_to_program_desc( - pattern, pass_desc.pattern) + pattern, pass_desc.pattern + ) replace_vars, replace_ops = self._func_to_program_desc( - replace, pass_desc.replace) - self._convert_vars_to_pass_desc(pattern_vars, replace_vars, - pass_desc) + replace, pass_desc.replace + ) + self._convert_vars_to_pass_desc( + pattern_vars, replace_vars, pass_desc + ) self._convert_ops_to_pass_desc(pattern_ops, replace_ops, pass_desc) if switch_static_mode: paddle.disable_static() @@ -269,9 +287,7 @@ class RegisterPassHelper(object): class PassDesc(object): - class AttrHelper(object): - def __init__(self, obj, name, element_index=None): self._obj = obj self._name = name @@ -283,9 +299,9 @@ class PassDesc(object): self._mapped = None def __getitem__(self, index): - element = PassDesc.AttrHelper(self._obj, - self._name, - element_index=index) + element = PassDesc.AttrHelper( + self._obj, self._name, element_index=index + ) self._elements.append(element) return element @@ -311,8 +327,9 @@ class PassDesc(object): raise NotImplementedError("Unimplemented transform operation.") def _clone_with_operation(self, type, value=None): - attr = PassDesc.AttrHelper(self._obj, self._name, - self._element_index) + attr = PassDesc.AttrHelper( + self._obj, self._name, self._element_index + ) self._elements.append(attr) if value is None: attr._operation_type = type @@ -329,19 +346,23 @@ class PassDesc(object): def __sub__(self, value): return self._clone_with_operation( - pass_desc_pb2.PassDesc.OperationType.kSub, value) + pass_desc_pb2.PassDesc.OperationType.kSub, value + ) def __add__(self, value): return self._clone_with_operation( - pass_desc_pb2.PassDesc.OperationType.kAdd, value) + pass_desc_pb2.PassDesc.OperationType.kAdd, value + ) def Mod(self, value): return self._clone_with_operation( - pass_desc_pb2.PassDesc.OperationType.kMod, value) + pass_desc_pb2.PassDesc.OperationType.kMod, value + ) def Size(self): return self._clone_with_operation( - pass_desc_pb2.PassDesc.OperationType.kSize) + pass_desc_pb2.PassDesc.OperationType.kSize + ) def _set_with_condition(self, type, value): condition = pass_desc_pb2.PassDesc.AttrCondition() @@ -356,36 +377,36 @@ class PassDesc(object): self._condition = condition def EQ(self, value): - self._set_with_condition(pass_desc_pb2.PassDesc.ConditionType.kEQ, - value) - - def MappedPattern(self, - var=None, - op=None, - index=0, - name=None, - element_index=None): + self._set_with_condition( + pass_desc_pb2.PassDesc.ConditionType.kEQ, value + ) + + def MappedPattern( + self, var=None, op=None, index=0, name=None, element_index=None + ): if all([var, op]): raise ValueError("Only mapped one of which var or op.") def mapped_var(pattern_ops): raise NotImplementedError( - "Mapping to variable is not implemented.") + "Mapping to variable is not implemented." + ) def mapped_op(pattern_ops): ops = [o for o in pattern_ops if o._type == op] if len(ops) <= index: raise ValueError( "Index '{}' of operator '{}' is incorrect.".format( - index, op)) - return PassDesc.AttrHelper(ops[index], - name, - element_index=element_index) + index, op + ) + ) + return PassDesc.AttrHelper( + ops[index], name, element_index=element_index + ) self._mapped = mapped_op if var is None else mapped_var class VarHelper(paddle.static.Variable): - def __init__(self, *args, **kwargs): block = paddle.static.default_main_program().current_block() self._var = paddle.static.data(*args, **kwargs) @@ -402,7 +423,6 @@ class PassDesc(object): return attr class OpHelper(object): - def __init__(self, type=None): self._type = type @@ -414,18 +434,23 @@ class PassDesc(object): def __call__(self, *args, **kwargs): if len(args) > 0: raise ValueError( - "Each input argument needs to specify a parameter name.") + "Each input argument needs to specify a parameter name." + ) for (in_name, in_args) in kwargs.items(): op_input = self._inputs.get(in_name) if op_input is None: raise ValueError( "Operator '{}' does not have input named '{}'.".format( - self._type, in_name)) + self._type, in_name + ) + ) if isinstance(in_args, (list, tuple)): if len(in_args) == 0: raise ValueError( - "Input '{}' of operator '{}' cannot be empty.". - format(in_name, self._type)) + "Input '{}' of operator '{}' cannot be empty.".format( + in_name, self._type + ) + ) else: in_args = [in_args] for in_arg in in_args: @@ -433,8 +458,10 @@ class PassDesc(object): op_outs = in_arg.Outputs() if len(op_outs) != 1: raise ValueError( - "The size of outputs of operator '{}' is not equal 1, please specify one output variable." - .format(in_arg._type)) + "The size of outputs of operator '{}' is not equal 1, please specify one output variable.".format( + in_arg._type + ) + ) for op_out in op_outs.values(): op_input.extend(op_out) else: @@ -453,7 +480,9 @@ class PassDesc(object): if self._proto is None: raise AttributeError( "type object 'OpHelper' has no attribute '{}'".format( - self._type)) + self._type + ) + ) self._index = len(block.ops) self._desc = block.desc.append_op() self._desc.set_type(self._type) @@ -480,7 +509,9 @@ class PassDesc(object): if output is None: raise ValueError( "Operator '{}' does not have output named '{}'.".format( - self._type, name)) + self._type, name + ) + ) return output def Outputs(self): @@ -541,7 +572,8 @@ def RegisterPass(function=None, input_specs=dict()): signature = inspect.signature(python_func) if len(signature.parameters) > 0: raise NotImplementedError( - "Pass function with parameter is not supported now.") + "Pass function with parameter is not supported now." + ) elif len(signature.parameters) == 0: pass_pairs = python_func() if _is_pass_pair(pass_pairs): diff --git a/python/paddle/fluid/layer_helper.py b/python/paddle/fluid/layer_helper.py index fc63bbea4e5aa3c9552673ae8a718f1b7252991d..595aa656f2946d99be0e95216d7b8c80ed9da6f8 100644 --- a/python/paddle/fluid/layer_helper.py +++ b/python/paddle/fluid/layer_helper.py @@ -14,7 +14,13 @@ import copy -from .framework import Parameter, dtype_is_floating, _non_static_mode, OpProtoHolder, _global_flags +from .framework import ( + Parameter, + dtype_is_floating, + _non_static_mode, + OpProtoHolder, + _global_flags, +) from . import unique_name from paddle.fluid.initializer import Constant, Xavier from .param_attr import ParamAttr @@ -25,7 +31,6 @@ from .dygraph_utils import _append_activation_in_dygraph class LayerHelper(LayerHelperBase): - def __init__(self, layer_type, **kwargs): self.kwargs = kwargs name = self.kwargs.get('name', None) @@ -35,8 +40,9 @@ class LayerHelper(LayerHelperBase): if name is None: self.kwargs['name'] = unique_name.generate(layer_type) - super(LayerHelper, self).__init__(self.kwargs['name'], - layer_type=layer_type) + super(LayerHelper, self).__init__( + self.kwargs['name'], layer_type=layer_type + ) def append_op(self, *args, **kwargs): return self.main_program.current_block().append_op(*args, **kwargs) @@ -65,7 +71,7 @@ class LayerHelper(LayerHelperBase): def bias_attr(self): return ParamAttr._to_attr(self.kwargs.get('bias_attr', None)) - #TODO (jiabin): reconstruct this in LayerObjHelper and avoid dependency of param_attr + # TODO (jiabin): reconstruct this in LayerObjHelper and avoid dependency of param_attr def multiple_param_attr(self, length): param_attr = self.param_attr if isinstance(param_attr, ParamAttr): @@ -93,8 +99,9 @@ class LayerHelper(LayerHelperBase): if dtype is None: dtype = each.dtype elif dtype != each.dtype: - raise ValueError("Data Type mismatch: %d to %d" % - (dtype, each.dtype)) + raise ValueError( + "Data Type mismatch: %d to %d" % (dtype, each.dtype) + ) return dtype def get_parameter(self, name): @@ -103,7 +110,7 @@ class LayerHelper(LayerHelperBase): raise ValueError("no Parameter name %s found" % name) return param - #TODO (jiabin): reconstruct this in LayerObjHelper and avoid dependency of bias_attr + # TODO (jiabin): reconstruct this in LayerObjHelper and avoid dependency of bias_attr def append_bias_op(self, input_var, dim_start=1, dim_end=None): """ Append bias operator and return its output. If the user does not set @@ -123,21 +130,19 @@ class LayerHelper(LayerHelperBase): if not bias_attr: return input_var - b = self.create_parameter(attr=bias_attr, - shape=size, - dtype=input_var.dtype, - is_bias=True) + b = self.create_parameter( + attr=bias_attr, shape=size, dtype=input_var.dtype, is_bias=True + ) tmp = self.create_variable_for_type_inference(dtype=input_var.dtype) - self.append_op(type='elementwise_add', - inputs={ - 'X': [input_var], - 'Y': [b] - }, - outputs={'Out': [tmp]}, - attrs={'axis': dim_start}) + self.append_op( + type='elementwise_add', + inputs={'X': [input_var], 'Y': [b]}, + outputs={'Out': [tmp]}, + attrs={'axis': dim_start}, + ) return tmp - #TODO (jiabin): reconstruct this in LayerObjHelper and avoid dependency of act + # TODO (jiabin): reconstruct this in LayerObjHelper and avoid dependency of act def append_activation(self, input_var): act = self.kwargs.get('act', None) if act is None: @@ -152,24 +157,27 @@ class LayerHelper(LayerHelperBase): use_cudnn = self.kwargs.get('use_cudnn') act['use_cudnn'] = use_cudnn use_mkldnn = self.kwargs.get( - 'use_mkldnn', - _global_flags().get("FLAGS_use_mkldnn", False)) + 'use_mkldnn', _global_flags().get("FLAGS_use_mkldnn", False) + ) if use_mkldnn: act['use_mkldnn'] = use_mkldnn act_type = act.pop('type') if _non_static_mode(): - res = _append_activation_in_dygraph(input_var, act_type, use_cudnn, - use_mkldnn) + res = _append_activation_in_dygraph( + input_var, act_type, use_cudnn, use_mkldnn + ) return res else: tmp = self.create_variable_for_type_inference(dtype=input_var.dtype) - self.append_op(type=act_type, - inputs={"X": [input_var]}, - outputs={"Out": [tmp]}, - attrs=act) + self.append_op( + type=act_type, + inputs={"X": [input_var]}, + outputs={"Out": [tmp]}, + attrs=act, + ) return tmp - #TODO (jiabin): should we remove this since it has never be used + # TODO (jiabin): should we remove this since it has never be used def _get_default_initializer(self, dtype): if dtype is None or dtype_is_floating(dtype) is True: return Xavier() @@ -177,9 +185,13 @@ class LayerHelper(LayerHelperBase): # For integer and boolean types, initialize with all zeros return Constant() - #TODO (jiabin): reconstruct this in LayerObjHelper and avoid dependency of kwargs + # TODO (jiabin): reconstruct this in LayerObjHelper and avoid dependency of kwargs def is_instance(self, param_name, cls): param = self.kwargs.get(param_name, None) if not isinstance(param, cls): - raise TypeError("The input {0} parameter of method {1} must be {2}", - param_name, self.layer_type, cls.__name__) + raise TypeError( + "The input {0} parameter of method {1} must be {2}", + param_name, + self.layer_type, + cls.__name__, + ) diff --git a/python/paddle/fluid/layer_helper_base.py b/python/paddle/fluid/layer_helper_base.py index 3a688cf9f444f21fe45514c568bc065202d9ac78..1ea8d504add89abd78ba385cd161474ac3bc7bb7 100644 --- a/python/paddle/fluid/layer_helper_base.py +++ b/python/paddle/fluid/layer_helper_base.py @@ -15,7 +15,14 @@ import copy import numpy as np -from .framework import Variable, default_main_program, default_startup_program, _non_static_mode, _current_expected_place, _in_eager_without_dygraph_check +from .framework import ( + Variable, + default_main_program, + default_startup_program, + _non_static_mode, + _current_expected_place, + _in_eager_without_dygraph_check, +) from . import unique_name from .param_attr import ParamAttr, WeightNormParamAttr from . import core @@ -81,123 +88,151 @@ class LayerHelperBase(object): """ if isinstance(value, np.ndarray): if _in_eager_without_dygraph_check(): - return core.eager.Tensor(value, _current_expected_place(), - False, False, name if name else None, - True) + return core.eager.Tensor( + value, + _current_expected_place(), + False, + False, + name if name else None, + True, + ) else: - py_var = core.VarBase(value=value, - name=name if name else '', - persistable=False, - place=_current_expected_place(), - zero_copy=False) + py_var = core.VarBase( + value=value, + name=name if name else '', + persistable=False, + place=_current_expected_place(), + zero_copy=False, + ) return py_var elif isinstance(value, (core.VarBase, Variable, core.eager.Tensor)): return value else: raise TypeError( "The type of input value is invalid, expected type is 'ndarray' or 'Variable', but received %s" - % type(value)) + % type(value) + ) def _create_weight_normalize(self, attr, shape, dtype): from .layers import elementwise_mul, elementwise_div, reshape # Remove these ops when LayerHelper and layers support indicating # program and block. - def __norm_op(x, - out=None, - p=2, - dim=None, - keep_dim=False, - block=self.startup_program.global_block()): + def __norm_op( + x, + out=None, + p=2, + dim=None, + keep_dim=False, + block=self.startup_program.global_block(), + ): if out is None: out = block.create_var( - name=unique_name.generate_with_ignorable_key(".".join( - [self.name, 'weight_norm_norm'])), + name=unique_name.generate_with_ignorable_key( + ".".join([self.name, 'weight_norm_norm']) + ), dtype=dtype, - persistable=False) + persistable=False, + ) abs_out = block.create_var( - name=unique_name.generate_with_ignorable_key(".".join( - [self.name, 'weight_norm_abs'])), + name=unique_name.generate_with_ignorable_key( + ".".join([self.name, 'weight_norm_abs']) + ), dtype=dtype, - persistable=False) - block.append_op(type='abs', - inputs={'X': x}, - outputs={'Out': abs_out}) + persistable=False, + ) + block.append_op( + type='abs', inputs={'X': x}, outputs={'Out': abs_out} + ) pow_out = block.create_var( - name=unique_name.generate_with_ignorable_key(".".join( - [self.name, 'weight_norm_pow'])), + name=unique_name.generate_with_ignorable_key( + ".".join([self.name, 'weight_norm_pow']) + ), dtype=dtype, - persistable=False) - block.append_op(type='pow', - inputs={'X': abs_out}, - outputs={'Out': pow_out}, - attrs={'factor': float(p)}) + persistable=False, + ) + block.append_op( + type='pow', + inputs={'X': abs_out}, + outputs={'Out': pow_out}, + attrs={'factor': float(p)}, + ) sum_out = block.create_var( - name=unique_name.generate_with_ignorable_key(".".join( - [self.name, 'weight_norm_sum'])), + name=unique_name.generate_with_ignorable_key( + ".".join([self.name, 'weight_norm_sum']) + ), dtype=dtype, - persistable=False) - block.append_op(type='reduce_sum', - inputs={'X': pow_out}, - outputs={'Out': sum_out}, - attrs={ - 'dim': dim, - 'keep_dim': keep_dim, - 'reduce_all': True if dim is None else False - }) - block.append_op(type='pow', - inputs={'X': sum_out}, - outputs={'Out': out}, - attrs={'factor': 1. / p}) + persistable=False, + ) + block.append_op( + type='reduce_sum', + inputs={'X': pow_out}, + outputs={'Out': sum_out}, + attrs={ + 'dim': dim, + 'keep_dim': keep_dim, + 'reduce_all': True if dim is None else False, + }, + ) + block.append_op( + type='pow', + inputs={'X': sum_out}, + outputs={'Out': out}, + attrs={'factor': 1.0 / p}, + ) return out - def __reshape_op(x, - shape, - out=None, - block=self.startup_program.global_block()): + def __reshape_op( + x, shape, out=None, block=self.startup_program.global_block() + ): if out is None: out = block.create_var( - name=unique_name.generate_with_ignorable_key(".".join( - [self.name, 'weight_norm_reshape'])), + name=unique_name.generate_with_ignorable_key( + ".".join([self.name, 'weight_norm_reshape']) + ), dtype=dtype, - persistable=False) + persistable=False, + ) x_shape = block.create_var(name="Xshape", dtype=x.dtype) - block.append_op(type="reshape2", - inputs={'X': x}, - attrs={'shape': shape}, - outputs={ - "Out": out, - "XShape": x_shape - }) + block.append_op( + type="reshape2", + inputs={'X': x}, + attrs={'shape': shape}, + outputs={"Out": out, "XShape": x_shape}, + ) return out - def __transpose_op(x, - axis, - out=None, - block=self.startup_program.global_block()): + def __transpose_op( + x, axis, out=None, block=self.startup_program.global_block() + ): if out is None: out = block.create_var( - name=unique_name.generate_with_ignorable_key(".".join( - [self.name, 'weight_norm_transpose'])), + name=unique_name.generate_with_ignorable_key( + ".".join([self.name, 'weight_norm_transpose']) + ), dtype=dtype, - persistable=False) - block.append_op(type='transpose', - inputs={'X': x}, - outputs={'Out': out}, - attrs={'axis': axis}) + persistable=False, + ) + block.append_op( + type='transpose', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'axis': axis}, + ) return out - def __norm_except_dim(x, - out=None, - dim=None, - block=self.startup_program.global_block()): + def __norm_except_dim( + x, out=None, dim=None, block=self.startup_program.global_block() + ): """Computes the norm over all dimensions except dim""" if out is None: out = block.create_var( - name=unique_name.generate_with_ignorable_key(".".join( - [self.name, 'weight_norm_norm'])), + name=unique_name.generate_with_ignorable_key( + ".".join([self.name, 'weight_norm_norm']) + ), dtype=dtype, - persistable=False) + persistable=False, + ) if dim is None: __norm_op(x, out, dim=dim, block=block) elif dim == 0: @@ -214,11 +249,12 @@ class LayerHelperBase(object): perm = list(range(len(x.shape))) perm[0], perm[dim] = dim, 0 transpose = __transpose_op(x, perm, block=block) - out_shape = [transpose.shape[0] - ] + [1] * (len(transpose.shape) - 1) - reshape = __reshape_op(transpose, - shape=[transpose.shape[0], -1], - block=block) + out_shape = [transpose.shape[0]] + [1] * ( + len(transpose.shape) - 1 + ) + reshape = __reshape_op( + transpose, shape=[transpose.shape[0], -1], block=block + ) norm = __norm_op(reshape, dim=[1], block=block) reshape2 = __reshape_op(norm, shape=out_shape, block=block) __transpose_op(reshape2, perm, out=out, block=block) @@ -226,18 +262,22 @@ class LayerHelperBase(object): def __weight_normalize(g, v, dim): """Calculations for weight normalization""" - norm = __norm_except_dim(v, - dim=dim, - block=self.main_program.current_block()) + norm = __norm_except_dim( + v, dim=dim, block=self.main_program.current_block() + ) scale = elementwise_div( - x=g, y=norm) # The shapes of g and norm are the same. + x=g, y=norm + ) # The shapes of g and norm are the same. # Currently, elementwise_mul only support broadcast when the shape # of y is a subset of the shape of x. Thus, we reshape y to squeeze # to achieve the subset. - w = elementwise_mul(x=v, - y=scale if dim is None else reshape( - x=scale, shape=[v.shape[dim]]), - axis=-1 if dim is None else dim) + w = elementwise_mul( + x=v, + y=scale + if dim is None + else reshape(x=scale, shape=[v.shape[dim]]), + axis=-1 if dim is None else dim, + ) # To serialize the original parameter for inference, maybe a # parameter rather than a variable should be returned. return w @@ -261,39 +301,49 @@ class LayerHelperBase(object): g_param = self.startup_program.global_block().create_parameter( dtype=dtype, shape=g_param_shape, - **g_param_attr._to_kwargs(with_initializer=False)) + **g_param_attr._to_kwargs(with_initializer=False) + ) v_param = self.startup_program.global_block().create_parameter( dtype=dtype, shape=v_param_shape, - **v_param_attr._to_kwargs(with_initializer=True)) - __norm_except_dim(x=v_param, - out=g_param, - dim=attr.dim, - block=self.startup_program.global_block()) + **v_param_attr._to_kwargs(with_initializer=True) + ) + __norm_except_dim( + x=v_param, + out=g_param, + dim=attr.dim, + block=self.startup_program.global_block(), + ) # keep g_param shape to be consistent with that in main_program - __reshape_op(g_param, - g_param_shape, - out=g_param, - block=self.startup_program.global_block()) + __reshape_op( + g_param, + g_param_shape, + out=g_param, + block=self.startup_program.global_block(), + ) # Add weight normalization to main_program g_param = self.main_program.global_block().create_parameter( - dtype=dtype, shape=g_param_shape, **g_param_attr._to_kwargs()) + dtype=dtype, shape=g_param_shape, **g_param_attr._to_kwargs() + ) v_param = self.main_program.global_block().create_parameter( - dtype=dtype, shape=v_param_shape, **v_param_attr._to_kwargs()) + dtype=dtype, shape=v_param_shape, **v_param_attr._to_kwargs() + ) w_param = __weight_normalize(g_param, v_param, dim=attr.dim) return w_param # TODO: hide the func after we move the layers to Layers - def create_parameter(self, - attr, - shape, - dtype=None, - is_bias=False, - default_initializer=None, - stop_gradient=False, - type=core.VarDesc.VarType.LOD_TENSOR): + def create_parameter( + self, + attr, + shape, + dtype=None, + is_bias=False, + default_initializer=None, + stop_gradient=False, + type=core.VarDesc.VarType.LOD_TENSOR, + ): """Create parameters for this layers. Args: @@ -312,36 +362,45 @@ class LayerHelperBase(object): return None assert isinstance(attr, ParamAttr) for i, size in enumerate(shape): - assert size > 0, ("Expected every dim's size to be larger than 0, " - "but the size of the {}-th dim is {}".format( - i, size)) + assert size > 0, ( + "Expected every dim's size to be larger than 0, " + "but the size of the {}-th dim is {}".format(i, size) + ) # set global dtype if not dtype: dtype = self.__dtype if is_bias: suffix = 'b' - default_initializer = _global_bias_initializer( - ) if _global_bias_initializer() is not None else default_initializer + default_initializer = ( + _global_bias_initializer() + if _global_bias_initializer() is not None + else default_initializer + ) else: suffix = 'w' - default_initializer = _global_weight_initializer( - ) if _global_weight_initializer( - ) is not None else default_initializer + default_initializer = ( + _global_weight_initializer() + if _global_weight_initializer() is not None + else default_initializer + ) if attr.name is None: attr.name = unique_name.generate(".".join([self.name, suffix])) if default_initializer is None and attr.initializer is None: if isinstance(dtype, core.VarDesc.VarType): - if dtype != core.VarDesc.VarType.FP32 and \ - dtype != core.VarDesc.VarType.FP64 and \ - dtype != core.VarDesc.VarType.FP16 and \ - dtype != core.VarDesc.VarType.BF16: + if ( + dtype != core.VarDesc.VarType.FP32 + and dtype != core.VarDesc.VarType.FP64 + and dtype != core.VarDesc.VarType.FP16 + and dtype != core.VarDesc.VarType.BF16 + ): raise TypeError( "Can not create parameter with default initializer when dtype is not float type. Set default_initializer to fit the parameter dtype!" ) else: - if not (dtype.startswith("float") - or dtype in ["double", "uint16"]): + if not ( + dtype.startswith("float") or dtype in ["double", "uint16"] + ): raise TypeError( "Can not create parameter with default initializer when dtype is not float type. Set default_initializer to fit the parameter dtype!" ) @@ -368,26 +427,29 @@ class LayerHelperBase(object): "parameter name [{}] have be been used. " "In dygraph mode, the name of parameter can't be same." "Please check the parameter attr value passed to self.create_parameter or " - "constructor of dygraph Layers".format(attr.name)) + "constructor of dygraph Layers".format(attr.name) + ) return self.main_program.global_block().create_parameter( dtype=dtype, shape=shape, type=type, stop_gradient=stop_gradient, - **attr._to_kwargs(with_initializer=True)) + **attr._to_kwargs(with_initializer=True) + ) else: self.startup_program.global_block().create_parameter( dtype=dtype, shape=shape, type=type, - **attr._to_kwargs(with_initializer=True)) + **attr._to_kwargs(with_initializer=True) + ) return self.main_program.global_block().create_parameter( - dtype=dtype, shape=shape, type=type, **attr._to_kwargs()) + dtype=dtype, shape=shape, type=type, **attr._to_kwargs() + ) - def create_variable_for_type_inference(self, - dtype, - stop_gradient=False, - shape=None): + def create_variable_for_type_inference( + self, dtype, stop_gradient=False, shape=None + ): """Create a temporary variable that should be type inferred layer. Note: @@ -400,18 +462,19 @@ class LayerHelperBase(object): if not dtype: dtype = self.__dtype return self.main_program.current_block().create_var( - name=unique_name.generate_with_ignorable_key(".".join( - [self.name, 'tmp'])), + name=unique_name.generate_with_ignorable_key( + ".".join([self.name, 'tmp']) + ), dtype=dtype, shape=shape, type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=stop_gradient) + stop_gradient=stop_gradient, + ) - def create_sparse_variable_for_type_inference(self, - dtype, - stop_gradient=False, - shape=None): + def create_sparse_variable_for_type_inference( + self, dtype, stop_gradient=False, shape=None + ): """Create a temporary sparse variable that should be type inferred layer. Note: @@ -424,13 +487,15 @@ class LayerHelperBase(object): if not dtype: dtype = self.__dtype return self.main_program.current_block().create_var( - name=unique_name.generate_with_ignorable_key(".".join( - [self.name, 'tmp'])), + name=unique_name.generate_with_ignorable_key( + ".".join([self.name, 'tmp']) + ), dtype=dtype, shape=shape, type=core.VarDesc.VarType.SPARSE_COO, persistable=False, - stop_gradient=stop_gradient) + stop_gradient=stop_gradient, + ) def create_variable(self, *args, **kwargs): """Create Variable for this layers. @@ -449,7 +514,8 @@ class LayerHelperBase(object): Returns(Variable): the created variable. """ return self.main_program.global_block().create_var( - *args, persistable=persistable, **kwargs) + *args, persistable=persistable, **kwargs + ) def create_or_get_global_variable(self, name, *args, **kwargs): """ @@ -464,9 +530,9 @@ class LayerHelperBase(object): def set_variable_initializer(self, var, initializer): """Set target Variable's initializer - Args: - var: target Variable - initializer: initializer to use + Args: + var: target Variable + initializer: initializer to use """ assert isinstance(var, Variable) if _non_static_mode(): @@ -478,4 +544,5 @@ class LayerHelperBase(object): dtype=var.dtype, shape=var.shape, persistable=True, - initializer=initializer) + initializer=initializer, + ) diff --git a/python/paddle/fluid/layers/collective.py b/python/paddle/fluid/layers/collective.py index df3147a27cdccaf16d0d10bd847cbcf20f00a6b2..7764235dc2f3fcc0f3c75194673c581a9b1367d0 100644 --- a/python/paddle/fluid/layers/collective.py +++ b/python/paddle/fluid/layers/collective.py @@ -35,40 +35,38 @@ def _allreduce(x, out=None, reduce_type="sum", sync_mode=False): if out is None: out = helper.create_variable( - name=unique_name.generate_with_ignorable_key(".".join( - [x.name, 'tmp'])), + name=unique_name.generate_with_ignorable_key( + ".".join([x.name, 'tmp']) + ), shape=x.shape, dtype=x.dtype, type=x.type, persistable=x.persistable, - stop_gradient=True) - helper.append_op(type='allreduce', - inputs={'X': [x]}, - outputs={'Out': [out]}, - attrs={ - "reduce_type": red_typ_int, - "sync_mode": sync_mode - }) + stop_gradient=True, + ) + helper.append_op( + type='allreduce', + inputs={'X': [x]}, + outputs={'Out': [out]}, + attrs={"reduce_type": red_typ_int, "sync_mode": sync_mode}, + ) return out def _broadcast(x, root, sync_mode=False): helper = LayerHelper("broadcast", **locals()) - helper.append_op(type='broadcast', - inputs={'X': [x]}, - outputs={'Out': [x]}, - attrs={ - "sync_mode": sync_mode, - "root": root - }) + helper.append_op( + type='broadcast', + inputs={'X': [x]}, + outputs={'Out': [x]}, + attrs={"sync_mode": sync_mode, "root": root}, + ) return x -def _c_allreduce(x, - out=None, - reduce_type='sum', - ring_id=0, - use_calc_stream=False): +def _c_allreduce( + x, out=None, reduce_type='sum', ring_id=0, use_calc_stream=False +): helper = LayerHelper('c_allreduce', **locals()) if reduce_type not in ['sum', 'prob', 'max', 'min']: @@ -77,34 +75,37 @@ def _c_allreduce(x, op_type = 'c_allreduce_' + reduce_type if out is None: out = helper.create_variable( - name=unique_name.generate_with_ignorable_key('.'.join( - [x.name, op_type])), + name=unique_name.generate_with_ignorable_key( + '.'.join([x.name, op_type]) + ), shape=x.shape, dtype=x.dtype, type=x.type, - persistable=x.persistable) - - helper.append_op(type=op_type, - inputs={'X': [x]}, - outputs={'Out': [out]}, - attrs={ - 'ring_id': ring_id, - 'use_calc_stream': use_calc_stream - }) + persistable=x.persistable, + ) + + helper.append_op( + type=op_type, + inputs={'X': [x]}, + outputs={'Out': [out]}, + attrs={'ring_id': ring_id, 'use_calc_stream': use_calc_stream}, + ) return out def _c_broadcast(x, root=0, ring_id=0, use_calc_stream=False): op_type = 'c_broadcast' helper = LayerHelper(op_type, **locals()) - helper.append_op(type=op_type, - inputs={'X': [x]}, - outputs={'Out': [x]}, - attrs={ - 'root': root, - 'ring_id': ring_id, - 'use_calc_stream': use_calc_stream - }) + helper.append_op( + type=op_type, + inputs={'X': [x]}, + outputs={'Out': [x]}, + attrs={ + 'root': root, + 'ring_id': ring_id, + 'use_calc_stream': use_calc_stream, + }, + ) return x @@ -121,28 +122,39 @@ def _c_allgather(x, nranks, ring_id=0, use_calc_stream=False): return out if _in_legacy_dygraph(): - attrs = ('nranks', nranks, 'ring_id', ring_id, 'use_calc_stream', - use_calc_stream) + attrs = ( + 'nranks', + nranks, + 'ring_id', + ring_id, + 'use_calc_stream', + use_calc_stream, + ) return _legacy_C_ops.c_allgather(x, *attrs) helper = LayerHelper(op_type, **locals()) out_shape = list(x.shape[:]) if out_shape[0] > 0: out_shape[0] *= nranks - out = helper.create_variable(name=unique_name.generate_with_ignorable_key( - '.'.join([x.name, op_type])), - shape=out_shape, - dtype=x.dtype, - type=x.type, - persistable=x.persistable) - helper.append_op(type=op_type, - inputs={'X': [x]}, - outputs={'Out': [out]}, - attrs={ - 'nranks': nranks, - 'ring_id': ring_id, - 'use_calc_stream': use_calc_stream - }) + out = helper.create_variable( + name=unique_name.generate_with_ignorable_key( + '.'.join([x.name, op_type]) + ), + shape=out_shape, + dtype=x.dtype, + type=x.type, + persistable=x.persistable, + ) + helper.append_op( + type=op_type, + inputs={'X': [x]}, + outputs={'Out': [out]}, + attrs={ + 'nranks': nranks, + 'ring_id': ring_id, + 'use_calc_stream': use_calc_stream, + }, + ) return out @@ -152,28 +164,34 @@ def _c_reducescatter(x, nranks, ring_id=0, use_calc_stream=False): if x.shape[0] > 0 and x.shape[0] % nranks != 0: raise ValueError( - 'x.shape[0](%d) cannot be evenly divided by nranks(%d)' % - (x.shape[0], nranks)) + 'x.shape[0](%d) cannot be evenly divided by nranks(%d)' + % (x.shape[0], nranks) + ) op_type = 'c_reducescatter' helper = LayerHelper(op_type, **locals()) out_shape = list(x.shape[:]) if out_shape[0] > 0: out_shape[0] //= nranks - out = helper.create_variable(name=unique_name.generate_with_ignorable_key( - '.'.join([x.name, op_type])), - shape=out_shape, - dtype=x.dtype, - type=x.type, - persistable=x.persistable) - helper.append_op(type=op_type, - inputs={'X': [x]}, - outputs={'Out': [out]}, - attrs={ - 'nranks': nranks, - 'ring_id': ring_id, - 'use_calc_stream': use_calc_stream - }) + out = helper.create_variable( + name=unique_name.generate_with_ignorable_key( + '.'.join([x.name, op_type]) + ), + shape=out_shape, + dtype=x.dtype, + type=x.type, + persistable=x.persistable, + ) + helper.append_op( + type=op_type, + inputs={'X': [x]}, + outputs={'Out': [out]}, + attrs={ + 'nranks': nranks, + 'ring_id': ring_id, + 'use_calc_stream': use_calc_stream, + }, + ) return out @@ -187,8 +205,10 @@ def _c_sync_calc_stream(x): def _c_sync_comm_stream(x, ring_id): op_type = 'c_sync_comm_stream' helper = LayerHelper(op_type, **locals()) - helper.append_op(type=op_type, - inputs={'X': [x]}, - outputs={'Out': [x]}, - attrs={'ring_id': ring_id}) + helper.append_op( + type=op_type, + inputs={'X': [x]}, + outputs={'Out': [x]}, + attrs={'ring_id': ring_id}, + ) return x diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index b800e67fe70f61a04fd66c1ea0af9605dba75891..eb654458fbf29ecc575f301c566df9287a947c44 100755 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -17,24 +17,66 @@ from ..wrapped_decorator import signature_safe_contextmanager from .layer_function_generator import autodoc, templatedoc from .tensor import assign, cast, fill_constant from .. import core -from ..framework import Program, Variable, Operator, _non_static_mode, static_only, _in_legacy_dygraph, in_dygraph_mode +from ..framework import ( + Program, + Variable, + Operator, + _non_static_mode, + static_only, + _in_legacy_dygraph, + in_dygraph_mode, +) from ..layer_helper import LayerHelper, unique_name from .nn import logical_and, logical_not, logical_or -from .utils import assert_same_structure, map_structure, hold_mutable_vars, copy_mutable_vars, padding_to_same_structure, is_sequence, pack_sequence_as, flatten, to_sequence +from .utils import ( + assert_same_structure, + map_structure, + hold_mutable_vars, + copy_mutable_vars, + padding_to_same_structure, + is_sequence, + pack_sequence_as, + flatten, + to_sequence, +) import numpy import warnings from functools import reduce, partial -from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype +from ..data_feeder import ( + convert_dtype, + check_variable_and_dtype, + check_type, + check_dtype, +) from ... import compat as cpt from ..backward import _infer_var_data_type_shape_ from paddle import _C_ops, _legacy_C_ops __all__ = [ - 'While', 'Switch', 'increment', 'array_write', 'create_array', 'less_than', - 'less_equal', 'greater_than', 'greater_equal', 'equal', 'not_equal', - 'array_read', 'array_length', 'cond', 'IfElse', 'DynamicRNN', 'StaticRNN', - 'reorder_lod_tensor_by_rank', 'Print', 'Assert', 'is_empty', 'case', - 'switch_case', 'while_loop' + 'While', + 'Switch', + 'increment', + 'array_write', + 'create_array', + 'less_than', + 'less_equal', + 'greater_than', + 'greater_equal', + 'equal', + 'not_equal', + 'array_read', + 'array_length', + 'cond', + 'IfElse', + 'DynamicRNN', + 'StaticRNN', + 'reorder_lod_tensor_by_rank', + 'Print', + 'Assert', + 'is_empty', + 'case', + 'switch_case', + 'while_loop', ] @@ -59,12 +101,11 @@ def select_output(input, outputs, mask): check_variable_and_dtype(mask, 'mask', ['int32'], 'select_output') check_type(outputs, 'outputs', (list, tuple), 'select_output') - helper.append_op(type='select_output', - inputs={ - 'X': input, - 'Mask': mask - }, - outputs={'Out': outputs}) + helper.append_op( + type='select_output', + inputs={'X': input, 'Mask': mask}, + outputs={'Out': outputs}, + ) return outputs @@ -83,7 +124,8 @@ def _select_input_infer_shape(first_shape, second_shape): ) return second_shape out_shape = list( - map(lambda a, b: a if a == b else -1, first_shape, second_shape)) + map(lambda a, b: a if a == b else -1, first_shape, second_shape) + ) return out_shape @@ -107,32 +149,34 @@ def select_input(inputs, mask): check_variable_and_dtype(mask, 'mask', ['int32'], 'select_input') # Select input should expand the shape. If it is - 1 and valid number, use - 1 first. If the dim is different, an error will be reported directly - #assert inputs[0].dtype == inputs[1].dtype, f"Expect the inputs should have the same dtype, but get {inputs[0].dtype} and {inputs[1].dtype}" + # assert inputs[0].dtype == inputs[1].dtype, f"Expect the inputs should have the same dtype, but get {inputs[0].dtype} and {inputs[1].dtype}" output_shape = _select_input_infer_shape(inputs[0].shape, inputs[1].shape) output_dtype = inputs[1].dtype output_type = inputs[1].type - out = helper.create_variable(dtype=output_dtype, - shape=output_shape, - type=output_type) - helper.append_op(type='select_input', - inputs={ - 'X': inputs, - 'Mask': mask - }, - outputs={'Out': out}) + out = helper.create_variable( + dtype=output_dtype, shape=output_shape, type=output_type + ) + helper.append_op( + type='select_input', + inputs={'X': inputs, 'Mask': mask}, + outputs={'Out': out}, + ) return out def select_input_with_buildin_type(inputs, mask, name): - from paddle.fluid.dygraph.dygraph_to_static.variable_trans_func import to_static_variable + from paddle.fluid.dygraph.dygraph_to_static.variable_trans_func import ( + to_static_variable, + ) from paddle.fluid.dygraph.dygraph_to_static.utils import UndefinedVar + false_var, true_var = inputs if isinstance(false_var, UndefinedVar) and isinstance( - true_var, UndefinedVar): - """ None -> UndefinedVar, so the real value is a [None, UndefinedVar] or [None, None], we just return None. - """ + true_var, UndefinedVar + ): + """None -> UndefinedVar, so the real value is a [None, UndefinedVar] or [None, None], we just return None.""" return None if isinstance(false_var, Variable) and isinstance(true_var, Variable): @@ -140,50 +184,63 @@ def select_input_with_buildin_type(inputs, mask, name): return select_input(inputs, mask) except Exception as e: raise RuntimeError( - f"Exceptions throwed while doing select_input on {name}:\n{e}") + f"Exceptions throwed while doing select_input on {name}:\n{e}" + ) - elif (isinstance(false_var, support_ret_buildin_type) - and isinstance(false_var, type(true_var))): + elif isinstance(false_var, support_ret_buildin_type) and isinstance( + false_var, type(true_var) + ): if false_var == true_var: return false_var else: inputs = [ to_static_variable(false_var), - to_static_variable(true_var) + to_static_variable(true_var), ] # Deal with the situations like this: false_var is int and true_var is Variable - elif ((isinstance(false_var, support_ret_buildin_type) - and isinstance(true_var, Variable)) - or (isinstance(true_var, support_ret_buildin_type) - and isinstance(false_var, Variable))): + elif ( + isinstance(false_var, support_ret_buildin_type) + and isinstance(true_var, Variable) + ) or ( + isinstance(true_var, support_ret_buildin_type) + and isinstance(false_var, Variable) + ): inputs = [to_static_variable(false_var), to_static_variable(true_var)] warnings.warn( "Return results from different branches in cond are not same type: " "false_var returned by fasle_fn is '{}' and true_var of true_fn is " - "'{}'".format(type(false_var), type(true_var))) - elif ((isinstance(false_var, UndefinedVar) - and isinstance(true_var, (Variable, ) + support_ret_buildin_type)) - or (isinstance(true_var, UndefinedVar) - and isinstance(false_var, - (Variable, ) + support_ret_buildin_type))): + "'{}'".format(type(false_var), type(true_var)) + ) + elif ( + isinstance(false_var, UndefinedVar) + and isinstance(true_var, (Variable,) + support_ret_buildin_type) + ) or ( + isinstance(true_var, UndefinedVar) + and isinstance(false_var, (Variable,) + support_ret_buildin_type) + ): def create_var_if_not_undefined_var(a): - if isinstance(a, UndefinedVar): return a + if isinstance(a, UndefinedVar): + return a return to_static_variable(a) true_var, false_var = to_static_variable(true_var), to_static_variable( - false_var) + false_var + ) inputs = [false_var, true_var] else: raise TypeError( "Unsupported return type of true_fn and false_fn in cond: false_var " - "returned by fasle_fn is '{}' and true_var of true_fn is '{}'". - format(type(false_var), type(true_var))) + "returned by fasle_fn is '{}' and true_var of true_fn is '{}'".format( + type(false_var), type(true_var) + ) + ) try: return select_input(inputs, mask) except Exception as e: raise RuntimeError( - f"Exceptions throwed while doing select_input on {name}:\n{e}") + f"Exceptions throwed while doing select_input on {name}:\n{e}" + ) def split_lod_tensor(input, mask, level=0): @@ -220,23 +277,26 @@ def split_lod_tensor(input, mask, level=0): input=x, mask=y, level=level) """ - check_type(input, 'input', (Variable, list, tuple, type(None)), - 'fluid.layers.split_lod_tensor') + check_type( + input, + 'input', + (Variable, list, tuple, type(None)), + 'fluid.layers.split_lod_tensor', + ) check_type(mask, 'mask', (Variable, list), 'fluid.layers.split_lod_tensor') check_type(level, 'level', int, 'fluid.layers.split_lod_tensor') helper = LayerHelper('split_lod_tensor', **locals()) out_true = helper.create_variable_for_type_inference(dtype=input.dtype) out_false = helper.create_variable_for_type_inference(dtype=input.dtype) - helper.append_op(type='split_lod_tensor', - inputs={ - 'X': input, - 'Mask': mask, - }, - outputs={ - 'OutTrue': out_true, - 'OutFalse': out_false - }, - attrs={'level': level}) + helper.append_op( + type='split_lod_tensor', + inputs={ + 'X': input, + 'Mask': mask, + }, + outputs={'OutTrue': out_true, 'OutFalse': out_false}, + attrs={'level': level}, + ) return out_true, out_false @@ -278,37 +338,48 @@ def merge_lod_tensor(in_true, in_false, x, mask, level=0): in_true=out_true, in_false=out_false, mask=y, x=x, level=level) """ helper = LayerHelper('merge_lod_tensor', **locals()) - check_type(x, 'x', (Variable, list, tuple, type(None)), - 'fluid.layers.merge_lod_tensor') + check_type( + x, + 'x', + (Variable, list, tuple, type(None)), + 'fluid.layers.merge_lod_tensor', + ) check_type(mask, 'mask', (Variable, list), 'fluid.layers.merge_lod_tensor') - check_type(in_true, 'in_true', (Variable, list, tuple, type(None)), - 'fluid.layers.merge_lod_tensor') - check_type(in_false, 'in_false', (Variable, list, tuple, type(None)), - 'fluid.layers.merge_lod_tensor') + check_type( + in_true, + 'in_true', + (Variable, list, tuple, type(None)), + 'fluid.layers.merge_lod_tensor', + ) + check_type( + in_false, + 'in_false', + (Variable, list, tuple, type(None)), + 'fluid.layers.merge_lod_tensor', + ) out = helper.create_variable_for_type_inference(dtype=in_true.dtype) - helper.append_op(type='merge_lod_tensor', - inputs={ - 'X': x, - 'Mask': mask, - 'InTrue': in_true, - 'InFalse': in_false - }, - outputs={'Out': out}, - attrs={'level': level}) + helper.append_op( + type='merge_lod_tensor', + inputs={'X': x, 'Mask': mask, 'InTrue': in_true, 'InFalse': in_false}, + outputs={'Out': out}, + attrs={'level': level}, + ) return out @static_only -def Print(input, - first_n=-1, - message=None, - summarize=20, - print_tensor_name=True, - print_tensor_type=True, - print_tensor_shape=True, - print_tensor_layout=True, - print_tensor_lod=True, - print_phase='both'): +def Print( + input, + first_n=-1, + message=None, + summarize=20, + print_tensor_name=True, + print_tensor_type=True, + print_tensor_shape=True, + print_tensor_layout=True, + print_tensor_lod=True, + print_phase='both', +): ''' :api_attr: Static Graph @@ -366,26 +437,31 @@ def Print(input, # - dtype: long # - data: [3 3 3 3 3 3] ''' - check_variable_and_dtype(input, 'input', - ['float32', 'float64', 'int32', 'int64', 'bool'], - 'fluid.layers.Print') + check_variable_and_dtype( + input, + 'input', + ['float32', 'float64', 'int32', 'int64', 'bool'], + 'fluid.layers.Print', + ) helper = LayerHelper('print' + "_" + input.name, **locals()) output = helper.create_variable_for_type_inference(input.dtype) - helper.append_op(type='print', - inputs={'In': input}, - outputs={'Out': output}, - attrs={ - 'first_n': first_n, - 'summarize': summarize, - 'message': message or "", - 'print_tensor_name': print_tensor_name, - 'print_tensor_type': print_tensor_type, - 'print_tensor_shape': print_tensor_shape, - 'print_tensor_layout': print_tensor_layout, - 'print_tensor_lod': print_tensor_lod, - 'print_phase': print_phase.upper() - }) + helper.append_op( + type='print', + inputs={'In': input}, + outputs={'Out': output}, + attrs={ + 'first_n': first_n, + 'summarize': summarize, + 'message': message or "", + 'print_tensor_name': print_tensor_name, + 'print_tensor_type': print_tensor_type, + 'print_tensor_shape': print_tensor_shape, + 'print_tensor_layout': print_tensor_layout, + 'print_tensor_lod': print_tensor_lod, + 'print_phase': print_phase.upper(), + }, + ) return output @@ -452,12 +528,11 @@ def Assert(cond, data=None, summarize=20, name=None): layer_name = name if name else ('assert_' + cond.name) helper = LayerHelper(layer_name, **locals()) - op = helper.append_op(type="assert", - inputs={ - "Cond": cond, - "Data": [] if data is None else list(data) - }, - attrs={"summarize": summarize}) + op = helper.append_op( + type="assert", + inputs={"Cond": cond, "Data": [] if data is None else list(data)}, + attrs={"summarize": summarize}, + ) return op @@ -507,8 +582,9 @@ class BlockGuardWithCompletion(BlockGuard): return False self.rnn.status = StaticRNN.AFTER_RNN_BLOCK self.rnn._complete_op() - return super(BlockGuardWithCompletion, - self).__exit__(exc_type, exc_val, exc_tb) + return super(BlockGuardWithCompletion, self).__exit__( + exc_type, exc_val, exc_tb + ) class StaticRNNMemoryLink(object): @@ -580,6 +656,7 @@ class StaticRNN(object): result = rnn() """ + BEFORE_RNN_BLOCK = 0 IN_RNN_BLOCK = 1 AFTER_RNN_BLOCK = 2 @@ -605,13 +682,15 @@ class StaticRNN(object): if self.status != StaticRNN.IN_RNN_BLOCK: raise ValueError("You must invoke {0} in rnn block".format(method)) - def memory(self, - init=None, - shape=None, - batch_ref=None, - init_value=0.0, - init_batch_dim_idx=0, - ref_batch_dim_idx=1): + def memory( + self, + init=None, + shape=None, + batch_ref=None, + init_value=0.0, + init_batch_dim_idx=0, + ref_batch_dim_idx=1, + ): """ Create a memory variable for static rnn. If the :code:`init` is not None, :code:`memory` will be initialized by @@ -637,97 +716,118 @@ class StaticRNN(object): Examples 1: .. code-block:: python - import paddle.fluid as fluid - import paddle.fluid.layers as layers - - vocab_size, hidden_size=10000, 200 - x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64') - # create word sequence - x_emb = layers.embedding( - input=x, - size=[vocab_size, hidden_size], - dtype='float32', - is_sparse=False) - # transform batch size to dim 1 - x_emb = layers.transpose(x_emb, perm=[1, 0, 2]) - - rnn = fluid.layers.StaticRNN() - with rnn.step(): - # mark created x_emb as input, each step process a word - word = rnn.step_input(x_emb) - # create prev memory parameter, batch size comes from word - prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word) - hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu') - # use hidden to update prev - rnn.update_memory(prev, hidden) + import paddle.fluid as fluid + import paddle.fluid.layers as layers + + vocab_size, hidden_size=10000, 200 + x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64') + # create word sequence + x_emb = layers.embedding( + input=x, + size=[vocab_size, hidden_size], + dtype='float32', + is_sparse=False) + # transform batch size to dim 1 + x_emb = layers.transpose(x_emb, perm=[1, 0, 2]) + + rnn = fluid.layers.StaticRNN() + with rnn.step(): + # mark created x_emb as input, each step process a word + word = rnn.step_input(x_emb) + # create prev memory parameter, batch size comes from word + prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word) + hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu') + # use hidden to update prev + rnn.update_memory(prev, hidden) Examples 2: .. code-block:: python - import paddle.fluid as fluid - import paddle.fluid.layers as layers - vocab_size, hidden_size=10000, 200 - x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64') - # create word sequence - x_emb = layers.embedding( - input=x, - size=[vocab_size, hidden_size], - dtype='float32', - is_sparse=False) - # transform batch size to dim 1 - x_emb = layers.transpose(x_emb, perm=[1, 0, 2]) - boot_memory = fluid.layers.data(name='boot', shape=[hidden_size], dtype='float32', lod_level=1) - rnn = fluid.layers.StaticRNN() - with rnn.step(): - # mark created x_emb as input, each step process a word - word = rnn.step_input(x_emb) - # init memory - prev = rnn.memory(init=boot_memory) - hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu') - # update hidden with prev - rnn.update_memory(prev, hidden) + import paddle.fluid as fluid + import paddle.fluid.layers as layers + vocab_size, hidden_size=10000, 200 + x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64') + # create word sequence + x_emb = layers.embedding( + input=x, + size=[vocab_size, hidden_size], + dtype='float32', + is_sparse=False) + # transform batch size to dim 1 + x_emb = layers.transpose(x_emb, perm=[1, 0, 2]) + boot_memory = fluid.layers.data(name='boot', shape=[hidden_size], dtype='float32', lod_level=1) + rnn = fluid.layers.StaticRNN() + with rnn.step(): + # mark created x_emb as input, each step process a word + word = rnn.step_input(x_emb) + # init memory + prev = rnn.memory(init=boot_memory) + hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu') + # update hidden with prev + rnn.update_memory(prev, hidden) """ self._assert_in_rnn_block_('memory') - check_type(init, "init", (Variable, type(None)), - "fluid.layers.StaticRNN.memory") - check_type(shape, "shape", (list, tuple, type(None)), - "fluid.layers.StaticRNN.memory") - check_type(batch_ref, "batch_ref", (Variable, type(None)), - "fluid.layers.StaticRNN.memory") + check_type( + init, + "init", + (Variable, type(None)), + "fluid.layers.StaticRNN.memory", + ) + check_type( + shape, + "shape", + (list, tuple, type(None)), + "fluid.layers.StaticRNN.memory", + ) + check_type( + batch_ref, + "batch_ref", + (Variable, type(None)), + "fluid.layers.StaticRNN.memory", + ) if init is None: if shape is None or batch_ref is None: raise ValueError( - "if init is None, memory at least need shape and batch_ref") + "if init is None, memory at least need shape and batch_ref" + ) parent_block = self._parent_block() - var_name = unique_name.generate_with_ignorable_key("@".join( - [self.helper.name, "memory_boot"])) - boot_var = parent_block.create_var(name=var_name, - shape=shape, - dtype=batch_ref.dtype, - persistable=False) - - parent_block.append_op(type="fill_constant_batch_size_like", - inputs={'Input': [batch_ref]}, - outputs={'Out': [boot_var]}, - attrs={ - 'value': init_value, - 'shape': boot_var.shape, - 'dtype': boot_var.dtype, - 'input_dim_idx': ref_batch_dim_idx, - 'output_dim_idx': init_batch_dim_idx - }) + var_name = unique_name.generate_with_ignorable_key( + "@".join([self.helper.name, "memory_boot"]) + ) + boot_var = parent_block.create_var( + name=var_name, + shape=shape, + dtype=batch_ref.dtype, + persistable=False, + ) + + parent_block.append_op( + type="fill_constant_batch_size_like", + inputs={'Input': [batch_ref]}, + outputs={'Out': [boot_var]}, + attrs={ + 'value': init_value, + 'shape': boot_var.shape, + 'dtype': boot_var.dtype, + 'input_dim_idx': ref_batch_dim_idx, + 'output_dim_idx': init_batch_dim_idx, + }, + ) return self.memory(init=boot_var) else: pre_mem = self.helper.create_variable( - name=unique_name.generate_with_ignorable_key("@".join( - [self.helper.name, "mem"])), + name=unique_name.generate_with_ignorable_key( + "@".join([self.helper.name, "mem"]) + ), dtype=init.dtype, - shape=init.shape) - self.memories[pre_mem.name] = StaticRNNMemoryLink(init=init, - pre_mem=pre_mem) + shape=init.shape, + ) + self.memories[pre_mem.name] = StaticRNNMemoryLink( + init=init, pre_mem=pre_mem + ) return pre_mem def step_input(self, x): @@ -744,29 +844,29 @@ class StaticRNN(object): Examples: .. code-block:: python - import paddle.fluid as fluid - import paddle.fluid.layers as layers - - vocab_size, hidden_size=10000, 200 - x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64') - # create word sequence - x_emb = layers.embedding( - input=x, - size=[vocab_size, hidden_size], - dtype='float32', - is_sparse=False) - # transform batch size to dim 1 - x_emb = layers.transpose(x_emb, perm=[1, 0, 2]) - - rnn = fluid.layers.StaticRNN() - with rnn.step(): - # mark created x_emb as input, each step process a word - word = rnn.step_input(x_emb) - # create prev memory parameter, batch size comes from word - prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word) - hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu') - # use hidden to update prev - rnn.update_memory(prev, hidden) + import paddle.fluid as fluid + import paddle.fluid.layers as layers + + vocab_size, hidden_size=10000, 200 + x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64') + # create word sequence + x_emb = layers.embedding( + input=x, + size=[vocab_size, hidden_size], + dtype='float32', + is_sparse=False) + # transform batch size to dim 1 + x_emb = layers.transpose(x_emb, perm=[1, 0, 2]) + + rnn = fluid.layers.StaticRNN() + with rnn.step(): + # mark created x_emb as input, each step process a word + word = rnn.step_input(x_emb) + # create prev memory parameter, batch size comes from word + prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word) + hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu') + # use hidden to update prev + rnn.update_memory(prev, hidden) """ self._assert_in_rnn_block_('step_input') @@ -776,10 +876,9 @@ class StaticRNN(object): elif x.shape[0] != -1 and self.seq_len != x.shape[0]: raise ValueError("Static RNN only take fix seq_len input") - ipt = self.helper.create_variable(name=x.name, - dtype=x.dtype, - shape=list(x.shape[1:]), - type=x.type) + ipt = self.helper.create_variable( + name=x.name, dtype=x.dtype, shape=list(x.shape[1:]), type=x.type + ) self.inputs.append(ipt) return ipt @@ -796,47 +895,50 @@ class StaticRNN(object): Examples: .. code-block:: python - import paddle.fluid as fluid - import paddle.fluid.layers as layers - - vocab_size, hidden_size=10000, 200 - x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64') - # create word sequence - x_emb = layers.embedding( - input=x, - size=[vocab_size, hidden_size], - dtype='float32', - is_sparse=False) - # transform batch size to dim 1 - x_emb = layers.transpose(x_emb, perm=[1, 0, 2]) - - rnn = fluid.layers.StaticRNN() - with rnn.step(): - # mark created x_emb as input, each step process a word - word = rnn.step_input(x_emb) - # create prev memory parameter, batch size comes from word - prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word) - hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu') - # use hidden to update prev - rnn.update_memory(prev, hidden) - rnn.step_output(hidden) - - result = rnn() + import paddle.fluid as fluid + import paddle.fluid.layers as layers + + vocab_size, hidden_size=10000, 200 + x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64') + # create word sequence + x_emb = layers.embedding( + input=x, + size=[vocab_size, hidden_size], + dtype='float32', + is_sparse=False) + # transform batch size to dim 1 + x_emb = layers.transpose(x_emb, perm=[1, 0, 2]) + + rnn = fluid.layers.StaticRNN() + with rnn.step(): + # mark created x_emb as input, each step process a word + word = rnn.step_input(x_emb) + # create prev memory parameter, batch size comes from word + prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word) + hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu') + # use hidden to update prev + rnn.update_memory(prev, hidden) + rnn.step_output(hidden) + + result = rnn() """ self._assert_in_rnn_block_('step_output') check_type(o, "o", Variable, "fluid.layers.StaticRNN.step_output") tmp_o = self.helper.create_variable_for_type_inference(dtype=o.dtype) - self.helper.append_op(type='rnn_memory_helper', - inputs={'X': [o]}, - outputs={'Out': tmp_o}, - attrs={'dtype': o.dtype}) + self.helper.append_op( + type='rnn_memory_helper', + inputs={'X': [o]}, + outputs={'Out': tmp_o}, + attrs={'dtype': o.dtype}, + ) - out_var = self._parent_block().create_var(name=tmp_o.name, - shape=[self.seq_len] + - list(tmp_o.shape), - dtype=tmp_o.dtype) + out_var = self._parent_block().create_var( + name=tmp_o.name, + shape=[self.seq_len] + list(tmp_o.shape), + dtype=tmp_o.dtype, + ) self.outputs.append(out_var) @@ -853,33 +955,33 @@ class StaticRNN(object): Examples: .. code-block:: python - import paddle.fluid as fluid - import paddle.fluid.layers as layers - - vocab_size, hidden_size=10000, 200 - x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64') - # create word sequence - x_emb = layers.embedding( - input=x, - size=[vocab_size, hidden_size], - dtype='float32', - is_sparse=False) - # transform batch size to dim 1 - x_emb = layers.transpose(x_emb, perm=[1, 0, 2]) - - rnn = fluid.layers.StaticRNN() - with rnn.step(): - # mark created x_emb as input, each step process a word - word = rnn.step_input(x_emb) - # create prev memory parameter, batch size comes from word - prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word) - hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu') - # use hidden to update prev - rnn.update_memory(prev, hidden) - # mark each step's hidden and word as output - rnn.output(hidden, word) - - result = rnn() + import paddle.fluid as fluid + import paddle.fluid.layers as layers + + vocab_size, hidden_size=10000, 200 + x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64') + # create word sequence + x_emb = layers.embedding( + input=x, + size=[vocab_size, hidden_size], + dtype='float32', + is_sparse=False) + # transform batch size to dim 1 + x_emb = layers.transpose(x_emb, perm=[1, 0, 2]) + + rnn = fluid.layers.StaticRNN() + with rnn.step(): + # mark created x_emb as input, each step process a word + word = rnn.step_input(x_emb) + # create prev memory parameter, batch size comes from word + prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word) + hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu') + # use hidden to update prev + rnn.update_memory(prev, hidden) + # mark each step's hidden and word as output + rnn.output(hidden, word) + + result = rnn() """ for each in outputs: self.step_output(each) @@ -952,7 +1054,8 @@ class StaticRNN(object): ] step_scope = parent_block.create_var( - type=core.VarDesc.VarType.STEP_SCOPES) + type=core.VarDesc.VarType.STEP_SCOPES + ) inlinks = [parent_block.var(i.name) for i in self.inputs] outlinks = self.outputs @@ -964,39 +1067,41 @@ class StaticRNN(object): for _, mem in self.memories.items(): boot_memories.append(mem.init) pre_memories.append(mem.pre_mem.name) - assert mem.mem is not None, "%s should be updated in every step." % ( - mem.init.name) + assert ( + mem.mem is not None + ), "%s should be updated in every step." % (mem.init.name) mem_var = rnn_block.var(mem.mem.name) assert isinstance(mem_var, Variable) new_mem = self.helper.create_variable_for_type_inference( - dtype=mem_var.dtype) - rnn_block.append_op(type='rnn_memory_helper', - inputs={'X': [mem_var]}, - outputs={'Out': [new_mem]}, - attrs={'dtype': mem_var.dtype}) + dtype=mem_var.dtype + ) + rnn_block.append_op( + type='rnn_memory_helper', + inputs={'X': [mem_var]}, + outputs={'Out': [new_mem]}, + attrs={'dtype': mem_var.dtype}, + ) memories.append(new_mem.name) - parent_block.append_op(type='recurrent', - inputs={ - 'inputs': inlinks, - 'initial_states': boot_memories, - 'parameters': parameters - }, - outputs={ - 'outputs': outlinks, - 'step_scopes': [step_scope] - }, - attrs={ - 'has_states': len(pre_memories) > 0, - 'ex_states': pre_memories, - 'states': memories, - 'sub_block': rnn_block - }) + parent_block.append_op( + type='recurrent', + inputs={ + 'inputs': inlinks, + 'initial_states': boot_memories, + 'parameters': parameters, + }, + outputs={'outputs': outlinks, 'step_scopes': [step_scope]}, + attrs={ + 'has_states': len(pre_memories) > 0, + 'ex_states': pre_memories, + 'states': memories, + 'sub_block': rnn_block, + }, + ) class WhileGuard(BlockGuard): - def __init__(self, while_op): if not isinstance(while_op, While): raise TypeError("WhileGuard takes a while op") @@ -1015,8 +1120,9 @@ class WhileGuard(BlockGuard): return super(WhileGuard, self).__exit__(exc_type, exc_val, exc_tb) -def get_inputs_outputs_in_block(current_block, inner_inputs, inner_outputs, - helper): +def get_inputs_outputs_in_block( + current_block, inner_inputs, inner_outputs, helper +): """ Find inputs and outputs in current control flow block. :param current_block: Current control flow block. @@ -1047,7 +1153,8 @@ def get_inputs_outputs_in_block(current_block, inner_inputs, inner_outputs, for iname in op.input_names: for in_var_name in op.input(iname): if in_var_name not in inner_outputs and not is_ignore_vars( - op, in_var_name): + op, in_var_name + ): inner_inputs.add(in_var_name) for oname in op.output_names: @@ -1063,8 +1170,11 @@ def get_inputs_outputs_in_block(current_block, inner_inputs, inner_outputs, current_block_var = None if current_block.has_var(in_var_name): current_block_var = current_block.var(in_var_name) - if not parent_block_var and current_block_var and \ - current_block_var.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY: + if ( + not parent_block_var + and current_block_var + and current_block_var.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY + ): remove_inner_inputs.add(in_var_name) inner_inputs = inner_inputs - remove_inner_inputs @@ -1154,8 +1264,10 @@ class While(object): check_variable_and_dtype(cond, 'cond', ['bool'], 'fluid.layers.While') if reduce(lambda a, b: a * b, cond.shape, 1) != 1: raise TypeError( - "condition expected shape as [1], but given shape as {0}.". - format(list(cond.shape))) + "condition expected shape as [1], but given shape as {0}.".format( + list(cond.shape) + ) + ) self.cond_var = cond self.is_test = is_test @@ -1166,12 +1278,14 @@ class While(object): main_program = self.helper.main_program while_block = main_program.current_block() parent_block = main_program.block( - main_program.current_block().parent_idx) + main_program.current_block().parent_idx + ) inner_outputs = {self.cond_var.name} x_name_list = set() x_name_list, inner_outputs = get_inputs_outputs_in_block( - while_block, x_name_list, inner_outputs, self.helper) + while_block, x_name_list, inner_outputs, self.helper + ) out_vars = [] for inner_out_name in inner_outputs: @@ -1185,23 +1299,21 @@ class While(object): x_name_list -= {self.cond_var.name} step_scope = parent_block.create_var( - type=core.VarDesc.VarType.STEP_SCOPES) + type=core.VarDesc.VarType.STEP_SCOPES + ) parent_block.append_op( type='while', inputs={ - 'X': - [parent_block._var_recursive(x_name) for x_name in x_name_list], - 'Condition': [self.cond_var] - }, - outputs={ - 'Out': out_vars, - 'StepScopes': [step_scope] + 'X': [ + parent_block._var_recursive(x_name) + for x_name in x_name_list + ], + 'Condition': [self.cond_var], }, - attrs={ - 'sub_block': while_block, - "is_test": self.is_test - }) + outputs={'Out': out_vars, 'StepScopes': [step_scope]}, + attrs={'sub_block': while_block, "is_test": self.is_test}, + ) support_ret_buildin_type = (bool, float, int) @@ -1213,14 +1325,17 @@ def assign_skip_lod_tensor_array(input, output): """ def has_shape_diff(x_var, y_var): - if len(x_var.shape) != len(y_var.shape): return True + if len(x_var.shape) != len(y_var.shape): + return True for x_dim, y_dim in zip(x_var.shape, y_var.shape): - if x_dim != y_dim and -1 not in [x_dim, y_dim]: return True + if x_dim != y_dim and -1 not in [x_dim, y_dim]: + return True return False if not isinstance(input, (Variable, core.VarBase)): if isinstance(output, Variable) and isinstance( - input, support_ret_buildin_type): + input, support_ret_buildin_type + ): assign(input, output) else: output = input @@ -1229,15 +1344,21 @@ def assign_skip_lod_tensor_array(input, output): if input.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY: main_program = input.block.program parent_block = main_program.block( - main_program.current_block().parent_idx) + main_program.current_block().parent_idx + ) if parent_block and not parent_block._find_var_recursive(input.name): assign(input, output) else: - if isinstance(output, Variable) and isinstance( - input, Variable) and has_shape_diff(input, output): + if ( + isinstance(output, Variable) + and isinstance(input, Variable) + and has_shape_diff(input, output) + ): warnings.warn( - "In dy2static mode, we attemp to assign a variable with shape {} into a variable with shape{}, which is not always right." - .format(input.shape, output.shape)) + "In dy2static mode, we attemp to assign a variable with shape {} into a variable with shape{}, which is not always right.".format( + input.shape, output.shape + ) + ) assign(input, output) @@ -1253,7 +1374,7 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None): Args: cond(Callable): A callable returning a boolean tensor controlling whether to continue looping. And ``cond`` takes - as many arguments as ``loop_vars`` . + as many arguments as ``loop_vars`` . body(Callable): A callable returning a tuple or list of tensors or LoDTensorArrays of the same arity (length and structure) and types as ``loops_vars`` . And ``body`` takes as many arguments as ``loop_vars`` . loop_vars(list|tuple): A list or tuple of tensors or LoDTensorArrays that is passed to both ``cond`` and ``body`` . @@ -1299,23 +1420,26 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None): raise ValueError("loop_vars in while_loop should not be empty") pre_cond = cond(*loop_vars) - check_variable_and_dtype(pre_cond, 'var of cond returned', ['bool'], - 'fluid.layers.while_loop') + check_variable_and_dtype( + pre_cond, 'var of cond returned', ['bool'], 'fluid.layers.while_loop' + ) if reduce(lambda a, b: a * b, pre_cond.shape, 1) != 1: raise TypeError( "the shape of the variable returned by cond should be [1]," - "but given shape as {0}.".format(list(pre_cond.shape))) + "but given shape as {0}.".format(list(pre_cond.shape)) + ) if _non_static_mode(): now_cond = pre_cond.numpy()[0] - while (now_cond): + while now_cond: output_vars = body(*loop_vars) if not isinstance(output_vars, (list, tuple)): output_vars = [output_vars] if len(output_vars) != len(loop_vars): raise ValueError( "body in while_loop should return the same arity " - "(length and structure) and types as loop_vars") + "(length and structure) and types as loop_vars" + ) now_cond = cond(*output_vars).numpy()[0] map_structure(assign_skip_lod_tensor_array, output_vars, loop_vars) return loop_vars @@ -1340,7 +1464,8 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None): except ValueError as e: raise ValueError( "body in while_loop should return the same arity " - "(length and structure) as loop_vars: {0}".format(e)) + "(length and structure) as loop_vars: {0}".format(e) + ) now_cond = cond(*output_vars) map_structure(assign_skip_lod_tensor_array, output_vars, loop_vars) assign(now_cond, pre_cond) @@ -1348,19 +1473,24 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None): def _deal_with_undefined_var(output_vars, loop_vars): - """ Deal with undefined var cases, We create undefined variable based on the results of body(). - In Dy2Static, we use undefined var to represent the var created in control flow. This function - expand the loop_vars and replace original loop_vars. - 1. UndefinedVar = Variable # create a variable - 2. UndefinedVar = None # create a undefined var with RETURN_NO_VALUE_MAGIC_NUM - 3. UndefinedVar = List(int) # create a list of variable - 4. UndefinedVar = value # create a variable + """Deal with undefined var cases, We create undefined variable based on the results of body(). + In Dy2Static, we use undefined var to represent the var created in control flow. This function + expand the loop_vars and replace original loop_vars. + 1. UndefinedVar = Variable # create a variable + 2. UndefinedVar = None # create a undefined var with RETURN_NO_VALUE_MAGIC_NUM + 3. UndefinedVar = List(int) # create a list of variable + 4. UndefinedVar = value # create a variable """ - from paddle.fluid.dygraph.dygraph_to_static.utils import UndefinedVar, create_undefined_variable + from paddle.fluid.dygraph.dygraph_to_static.utils import ( + UndefinedVar, + create_undefined_variable, + ) def create_var_like(o_var): - if isinstance(o_var, - (Variable, ) + support_ret_buildin_type) or o_var is None: + if ( + isinstance(o_var, (Variable,) + support_ret_buildin_type) + or o_var is None + ): return create_undefined_variable() if is_sequence(o_var): """ @@ -1431,16 +1561,21 @@ def lod_rank_table(x, level=0): check_type(x, 'x', (Variable, list), 'lod_rank_table') if isinstance(x, (list)): for i, input_x in enumerate(x): - check_type(input_x, 'input[' + str(i) + ']', Variable, - 'lod_rank_table') + check_type( + input_x, 'input[' + str(i) + ']', Variable, 'lod_rank_table' + ) helper = LayerHelper("lod_rank_table", **locals()) - table = helper.create_variable(type=core.VarDesc.VarType.LOD_RANK_TABLE, - name=unique_name.generate("lod_rank_table")) - helper.append_op(type='lod_rank_table', - inputs={'X': x}, - outputs={'Out': table}, - attrs={'level': level}) + table = helper.create_variable( + type=core.VarDesc.VarType.LOD_RANK_TABLE, + name=unique_name.generate("lod_rank_table"), + ) + helper.append_op( + type='lod_rank_table', + inputs={'X': x}, + outputs={'Out': table}, + attrs={'level': level}, + ) return table @@ -1463,9 +1598,11 @@ def max_sequence_len(rank_table): """ helper = LayerHelper("max_seqence_len", **locals()) res = helper.create_variable_for_type_inference(dtype="int64") - helper.append_op(type="max_sequence_len", - inputs={"RankTable": rank_table}, - outputs={"Out": res}) + helper.append_op( + type="max_sequence_len", + inputs={"RankTable": rank_table}, + outputs={"Out": res}, + ) return res @@ -1501,24 +1638,32 @@ def lod_tensor_to_array(x, table): check_type(x, 'x', (Variable, list), 'lod_tensor_to_array') if isinstance(x, (list)): for i, input_x in enumerate(x): - check_type(input_x, 'input[' + str(i) + ']', Variable, - 'lod_tensor_to_array') + check_type( + input_x, + 'input[' + str(i) + ']', + Variable, + 'lod_tensor_to_array', + ) check_type(table, 'table', (Variable, list), 'lod_tensor_to_array') if isinstance(table, (list)): for i, table_x in enumerate(table): - check_type(table_x, 'table[' + str(i) + ']', Variable, - 'lod_tensor_to_array') + check_type( + table_x, + 'table[' + str(i) + ']', + Variable, + 'lod_tensor_to_array', + ) helper = LayerHelper("lod_tensor_to_array", **locals()) array = helper.create_variable( name=unique_name.generate("lod_tensor_to_array"), type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, - dtype=x.dtype) - helper.append_op(type='lod_tensor_to_array', - inputs={ - 'X': x, - 'RankTable': table - }, - outputs={'Out': array}) + dtype=x.dtype, + ) + helper.append_op( + type='lod_tensor_to_array', + inputs={'X': x, 'RankTable': table}, + outputs={'Out': array}, + ) return array @@ -1547,22 +1692,29 @@ def array_to_lod_tensor(x, table): check_type(x, 'x', (Variable, list), 'array_to_lod_tensor') if isinstance(x, (list)): for i, input_x in enumerate(x): - check_type(input_x, 'input[' + str(i) + ']', Variable, - 'array_to_lod_tensor') + check_type( + input_x, + 'input[' + str(i) + ']', + Variable, + 'array_to_lod_tensor', + ) check_type(table, 'table', (Variable, list), 'array_to_lod_tensor') if isinstance(table, (list)): for i, table_x in enumerate(table): - check_type(table_x, 'table[' + str(i) + ']', Variable, - 'array_to_lod_tensor') + check_type( + table_x, + 'table[' + str(i) + ']', + Variable, + 'array_to_lod_tensor', + ) helper = LayerHelper("array_to_lod_tensor", **locals()) tmp = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type="array_to_lod_tensor", - inputs={ - 'X': x, - 'RankTable': table - }, - outputs={'Out': tmp}) + helper.append_op( + type="array_to_lod_tensor", + inputs={'X': x, 'RankTable': table}, + outputs={'Out': tmp}, + ) return tmp @@ -1590,17 +1742,20 @@ def increment(x, value=1.0, in_place=True): if in_dygraph_mode(): return _C_ops.increment_(x, value) - check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], - 'increment') + check_variable_and_dtype( + x, 'x', ['float32', 'float64', 'int32', 'int64'], 'increment' + ) helper = LayerHelper("increment", **locals()) if not in_place: out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = x - helper.append_op(type='increment', - inputs={'X': [x]}, - outputs={'Out': [out]}, - attrs={'step': float(value)}) + helper.append_op( + type='increment', + inputs={'X': [x]}, + outputs={'Out': [out]}, + attrs={'step': float(value)}, + ) return out @@ -1668,8 +1823,8 @@ def array_write(x, i, array=None): if array is None: array = create_array(x.dtype) assert isinstance( - array, - list), "The 'array' in array_write must be a list in dygraph mode" + array, list + ), "The 'array' in array_write must be a list in dygraph mode" assert i <= len( array ), "The index 'i' should not be greater than the length of 'array' in dygraph mode" @@ -1683,22 +1838,24 @@ def array_write(x, i, array=None): check_type(x, 'x', (Variable), 'array_write') helper = LayerHelper('array_write', **locals()) if array is not None: - if not isinstance( - array, Variable - ) or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY: + if ( + not isinstance(array, Variable) + or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY + ): raise TypeError( - "array should be tensor array vairable in array_write Op") + "array should be tensor array vairable in array_write Op" + ) if array is None: array = helper.create_variable( name="{0}.out".format(helper.name), type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, - dtype=x.dtype) - helper.append_op(type='write_to_array', - inputs={ - 'X': [x], - 'I': [i] - }, - outputs={'Out': [array]}) + dtype=x.dtype, + ) + helper.append_op( + type='write_to_array', + inputs={'X': [x], 'I': [i]}, + outputs={'Out': [array]}, + ) return array @@ -1729,16 +1886,20 @@ def create_array(dtype, initialized_list=None): if initialized_list is not None: if not isinstance(initialized_list, (list, tuple)): raise TypeError( - "Require type(initialized_list) should be list/tuple, but received {}" - .format(type(initialized_list))) + "Require type(initialized_list) should be list/tuple, but received {}".format( + type(initialized_list) + ) + ) array = list(initialized_list) # NOTE: Only support plain list like [x, y,...], not support nested list in static mode. for val in array: if not isinstance(val, Variable): raise TypeError( - "All values in `initialized_list` should be Variable, but recevied {}." - .format(type(val))) + "All values in `initialized_list` should be Variable, but recevied {}.".format( + type(val) + ) + ) if _non_static_mode(): return array @@ -1747,7 +1908,8 @@ def create_array(dtype, initialized_list=None): tensor_array = helper.create_variable( name="{0}.out".format(helper.name), type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, - dtype=dtype) + dtype=dtype, + ) for val in array: array_write(x=val, i=array_length(tensor_array), array=tensor_array) @@ -1784,10 +1946,12 @@ def less_than(x, y, force_cpu=None, cond=None, name=None): print(result) # [True, False, False, False] """ - check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"], - "less_than") - check_variable_and_dtype(y, "y", ["float32", "float64", "int32", "int64"], - "less_than") + check_variable_and_dtype( + x, "x", ["float32", "float64", "int32", "int64"], "less_than" + ) + check_variable_and_dtype( + y, "y", ["float32", "float64", "int32", "int64"], "less_than" + ) if cond is not None: check_type(cond, "cond", Variable, "less_than") if force_cpu != None: @@ -1802,13 +1966,12 @@ def less_than(x, y, force_cpu=None, cond=None, name=None): if force_cpu is not None: attrs['force_cpu'] = force_cpu - helper.append_op(type='less_than', - inputs={ - 'X': [x], - 'Y': [y] - }, - outputs={'Out': [cond]}, - attrs=attrs) + helper.append_op( + type='less_than', + inputs={'X': [x], 'Y': [y]}, + outputs={'Out': [cond]}, + attrs=attrs, + ) return cond @@ -1816,8 +1979,8 @@ def less_than(x, y, force_cpu=None, cond=None, name=None): def less_equal(x, y, cond=None, name=None): """ :alias_main: paddle.less_equal - :alias: paddle.less_equal,paddle.tensor.less_equal,paddle.tensor.logic.less_equal - :old_api: paddle.fluid.layers.less_equal + :alias: paddle.less_equal,paddle.tensor.less_equal,paddle.tensor.logic.less_equal + :old_api: paddle.fluid.layers.less_equal This OP returns the truth value of :math:`x <= y` elementwise, which is equivalent function to the overloaded operator `<=`. @@ -1843,10 +2006,12 @@ def less_equal(x, y, cond=None, name=None): out1 = label<= limit #out1=[True, False] """ - check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"], - "less_equal") - check_variable_and_dtype(y, "y", ["float32", "float64", "int32", "int64"], - "less_equal") + check_variable_and_dtype( + x, "x", ["float32", "float64", "int32", "int64"], "less_equal" + ) + check_variable_and_dtype( + y, "y", ["float32", "float64", "int32", "int64"], "less_equal" + ) if cond is not None: check_type(cond, "cond", Variable, "less_equal") @@ -1857,13 +2022,12 @@ def less_equal(x, y, cond=None, name=None): attrs = dict() - helper.append_op(type='less_equal', - inputs={ - 'X': [x], - 'Y': [y] - }, - outputs={'Out': [cond]}, - attrs=attrs) + helper.append_op( + type='less_equal', + inputs={'X': [x], 'Y': [y]}, + outputs={'Out': [cond]}, + attrs=attrs, + ) return cond @@ -1871,8 +2035,8 @@ def less_equal(x, y, cond=None, name=None): def greater_than(x, y, cond=None, name=None): """ :alias_main: paddle.greater_than - :alias: paddle.greater_than,paddle.tensor.greater_than,paddle.tensor.logic.greater_than - :old_api: paddle.fluid.layers.greater_than + :alias: paddle.greater_than,paddle.tensor.greater_than,paddle.tensor.logic.greater_than + :old_api: paddle.fluid.layers.greater_than This OP returns the truth value of :math:`x > y` elementwise, which is equivalent function to the overloaded operator `>`. @@ -1897,10 +2061,12 @@ def greater_than(x, y, cond=None, name=None): out = fluid.layers.greater_than(x=label, y=limit) #out=[False, True] out1 = label > limit #out1=[False, True] """ - check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"], - "greater_than") - check_variable_and_dtype(y, "y", ["float32", "float64", "int32", "int64"], - "greater_than") + check_variable_and_dtype( + x, "x", ["float32", "float64", "int32", "int64"], "greater_than" + ) + check_variable_and_dtype( + y, "y", ["float32", "float64", "int32", "int64"], "greater_than" + ) if cond is not None: check_type(cond, "cond", Variable, "greater_than") @@ -1914,13 +2080,12 @@ def greater_than(x, y, cond=None, name=None): if in_dygraph_mode(): return _C_ops.greater_than(x, y, -1) else: - helper.append_op(type='greater_than', - inputs={ - 'X': [x], - 'Y': [y] - }, - outputs={'Out': [cond]}, - attrs=attrs) + helper.append_op( + type='greater_than', + inputs={'X': [x], 'Y': [y]}, + outputs={'Out': [cond]}, + attrs=attrs, + ) return cond @@ -1928,8 +2093,8 @@ def greater_than(x, y, cond=None, name=None): def greater_equal(x, y, cond=None, name=None): """ :alias_main: paddle.greater_equal - :alias: paddle.greater_equal,paddle.tensor.greater_equal,paddle.tensor.logic.greater_equal - :old_api: paddle.fluid.layers.greater_equal + :alias: paddle.greater_equal,paddle.tensor.greater_equal,paddle.tensor.logic.greater_equal + :old_api: paddle.fluid.layers.greater_equal This OP returns the truth value of :math:`x >= y` elementwise, which is equivalent function to the overloaded operator `>=`. @@ -1956,10 +2121,12 @@ def greater_equal(x, y, cond=None, name=None): out_1 = label >= limit #out1=[True, False] """ - check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"], - "greater_equal") - check_variable_and_dtype(y, "y", ["float32", "float64", "int32", "int64"], - "greater_equal") + check_variable_and_dtype( + x, "x", ["float32", "float64", "int32", "int64"], "greater_equal" + ) + check_variable_and_dtype( + y, "y", ["float32", "float64", "int32", "int64"], "greater_equal" + ) if cond is not None: check_type(cond, "cond", Variable, "greater_equal") @@ -1970,13 +2137,12 @@ def greater_equal(x, y, cond=None, name=None): attrs = dict() - helper.append_op(type='greater_equal', - inputs={ - 'X': [x], - 'Y': [y] - }, - outputs={'Out': [cond]}, - attrs=attrs) + helper.append_op( + type='greater_equal', + inputs={'X': [x], 'Y': [y]}, + outputs={'Out': [cond]}, + attrs=attrs, + ) return cond @@ -2013,10 +2179,12 @@ def equal(x, y, cond=None, name=None): default_axis = -1 return _C_ops.equal(x, y, default_axis) - check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"], - "equal") - check_variable_and_dtype(y, "y", ["float32", "float64", "int32", "int64"], - "equal") + check_variable_and_dtype( + x, "x", ["float32", "float64", "int32", "int64"], "equal" + ) + check_variable_and_dtype( + y, "y", ["float32", "float64", "int32", "int64"], "equal" + ) if cond is not None: check_type(cond, "cond", Variable, "equal") @@ -2025,20 +2193,17 @@ def equal(x, y, cond=None, name=None): cond = helper.create_variable_for_type_inference(dtype='bool') cond.stop_gradient = True - helper.append_op(type='equal', - inputs={ - 'X': [x], - 'Y': [y] - }, - outputs={'Out': [cond]}) + helper.append_op( + type='equal', inputs={'X': [x], 'Y': [y]}, outputs={'Out': [cond]} + ) return cond def not_equal(x, y, cond=None, name=None): """ :alias_main: paddle.not_equal - :alias: paddle.not_equal,paddle.tensor.not_equal,paddle.tensor.logic.not_equal - :old_api: paddle.fluid.layers.not_equal + :alias: paddle.not_equal,paddle.tensor.not_equal,paddle.tensor.logic.not_equal + :old_api: paddle.fluid.layers.not_equal This OP returns the truth value of :math:`x != y` elementwise, which is equivalent function to the overloaded operator `!=`. @@ -2062,10 +2227,12 @@ def not_equal(x, y, cond=None, name=None): limit = fluid.layers.fill_constant(shape=[1], value=1, dtype='int64') out = fluid.layers.not_equal(x=label, y=limit) """ - check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"], - "not_equal") - check_variable_and_dtype(y, "y", ["float32", "float64", "int32", "int64"], - "not_equal") + check_variable_and_dtype( + x, "x", ["float32", "float64", "int32", "int64"], "not_equal" + ) + check_variable_and_dtype( + y, "y", ["float32", "float64", "int32", "int64"], "not_equal" + ) if cond is not None: check_type(cond, "cond", Variable, "not_equal") @@ -2074,12 +2241,9 @@ def not_equal(x, y, cond=None, name=None): cond = helper.create_variable_for_type_inference(dtype='bool') cond.stop_gradient = True - helper.append_op(type='not_equal', - inputs={ - 'X': [x], - 'Y': [y] - }, - outputs={'Out': [cond]}) + helper.append_op( + type='not_equal', inputs={'X': [x], 'Y': [y]}, outputs={'Out': [cond]} + ) return cond @@ -2146,8 +2310,8 @@ def array_read(array, i): """ if _non_static_mode(): assert isinstance( - array, - list), "The 'array' in array_read must be list in dygraph mode" + array, list + ), "The 'array' in array_read must be list in dygraph mode" assert isinstance( i, Variable ), "The index 'i' in array_read must be Variable in dygraph mode" @@ -2159,17 +2323,17 @@ def array_read(array, i): check_variable_and_dtype(i, 'i', ['int64'], 'array_read') helper = LayerHelper('array_read', **locals()) - if not isinstance( - array, - Variable) or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY: + if ( + not isinstance(array, Variable) + or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY + ): raise TypeError("array should be tensor array vairable") out = helper.create_variable_for_type_inference(dtype=array.dtype) - helper.append_op(type='read_from_array', - inputs={ - 'X': [array], - 'I': [i] - }, - outputs={'Out': [out]}) + helper.append_op( + type='read_from_array', + inputs={'X': [array], 'I': [i]}, + outputs={'Out': [out]}, + ) return out @@ -2203,14 +2367,12 @@ def shrink_memory(x, i, table): check_type(i, 'i', Variable, 'shrink_memory') check_type(table, 'table', Variable, 'shrink_memory') out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='shrink_rnn_memory', - inputs={ - 'X': [x], - 'I': [i], - 'RankTable': [table] - }, - outputs={'Out': [out]}, - attrs={}) + helper.append_op( + type='shrink_rnn_memory', + inputs={'X': [x], 'I': [i], 'RankTable': [table]}, + outputs={'Out': [out]}, + attrs={}, + ) return out @@ -2262,22 +2424,24 @@ def array_length(array): if _non_static_mode(): assert isinstance( - array, - list), "The 'array' in array_write must be a list in dygraph mode" + array, list + ), "The 'array' in array_write must be a list in dygraph mode" return len(array) - if not isinstance( - array, - Variable) or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY: + if ( + not isinstance(array, Variable) + or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY + ): raise TypeError( - "array should be tensor array vairable in array_length Op") + "array should be tensor array vairable in array_length Op" + ) helper = LayerHelper('array_length', **locals()) tmp = helper.create_variable_for_type_inference(dtype='int64') tmp.stop_gradient = True - helper.append_op(type='lod_array_length', - inputs={'X': [array]}, - outputs={'Out': [tmp]}) + helper.append_op( + type='lod_array_length', inputs={'X': [array]}, outputs={'Out': [tmp]} + ) return tmp @@ -2299,8 +2463,9 @@ class ConditionalBlockGuard(BlockGuard): def __exit__(self, exc_type, exc_val, exc_tb): self.block.complete() - return super(ConditionalBlockGuard, - self).__exit__(exc_type, exc_val, exc_tb) + return super(ConditionalBlockGuard, self).__exit__( + exc_type, exc_val, exc_tb + ) class ConditionalBlock(object): @@ -2346,10 +2511,9 @@ class ConditionalBlock(object): intermediate = set() params = set() - params, intermediate = get_inputs_outputs_in_block(inside_block, - params, - intermediate, - helper=self.helper) + params, intermediate = get_inputs_outputs_in_block( + inside_block, params, intermediate, helper=self.helper + ) # Todo(liym27) Here assume that all params are in recursive parent block # but when minimize() called in control flow, some params may be in @@ -2365,25 +2529,25 @@ class ConditionalBlock(object): out_list.append(inner_var) step_scope = parent_block.create_var( - type=core.VarDesc.VarType.STEP_SCOPES) + type=core.VarDesc.VarType.STEP_SCOPES + ) conditional_block_op = parent_block.append_op( type='conditional_block', inputs={ 'Cond': self.inputs, 'Input': param_list, }, - outputs={ - 'Out': out_list, - 'Scope': [step_scope] - }, + outputs={'Out': out_list, 'Scope': [step_scope]}, attrs={ 'sub_block': inside_block, - 'is_scalar_condition': self.is_scalar_condition - }) + 'is_scalar_condition': self.is_scalar_condition, + }, + ) if self.need_append_conditional_block_grad(inside_block): - self.append_conditional_block_grad(parent_block, inside_block, - conditional_block_op) + self.append_conditional_block_grad( + parent_block, inside_block, conditional_block_op + ) def need_append_conditional_block_grad(self, inside_block): grad_sub_block_idx = inside_block.backward_block_idx @@ -2391,10 +2555,13 @@ class ConditionalBlock(object): # if inside_block have grad_block and grad_block is not itself, # we will append conditional block grad. - return grad_sub_block_idx != -1 and grad_sub_block_idx != inside_block_idx + return ( + grad_sub_block_idx != -1 and grad_sub_block_idx != inside_block_idx + ) - def append_conditional_block_grad(self, parent_block, inside_block, - conditional_block_op): + def append_conditional_block_grad( + self, parent_block, inside_block, conditional_block_op + ): ''' Append op `conditional_block_grad` manually. When `optimizer.minimize/append_backward` is called in Paddle control flow, @@ -2433,7 +2600,8 @@ class ConditionalBlock(object): param_list.append(inner_var.name) grad_op_desc, op_grad_to_var = core.get_grad_op_desc( - conditional_block_op.desc, set(), [grad_sub_block.desc]) + conditional_block_op.desc, set(), [grad_sub_block.desc] + ) # append op_desc in grad_op_descs to target_block op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName() @@ -2443,13 +2611,16 @@ class ConditionalBlock(object): new_op_desc._set_attr(op_role_attr_name, backward) # set input and output manually new_op_desc.set_input('Input', param_list) - new_op_desc.set_output('Input@GRAD', - [param + "@GRAD" for param in param_list]) + new_op_desc.set_output( + 'Input@GRAD', [param + "@GRAD" for param in param_list] + ) new_vars = set() for grad_var_name in new_op_desc.output_arg_names(): - if grad_sub_block.desc.has_var_recursive(grad_var_name.encode( - )) or grad_var_name == core.empty_var_name(): + if ( + grad_sub_block.desc.has_var_recursive(grad_var_name.encode()) + or grad_var_name == core.empty_var_name() + ): continue grad_sub_block.desc.var(grad_var_name.encode()) new_vars.add(grad_var_name) @@ -2472,16 +2643,20 @@ def copy_var_to_parent_block(var, layer_helper): return var prog = layer_helper.main_program parent_idx = prog.current_block().parent_idx - assert parent_idx >= 0, "Got wrong parent block index when assigning var to parent scope in control_flow" + assert ( + parent_idx >= 0 + ), "Got wrong parent block index when assigning var to parent scope in control_flow" parent_block = prog.block(parent_idx) - if var.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY \ - and parent_block._find_var_recursive(var.name): + if ( + var.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY + and parent_block._find_var_recursive(var.name) + ): parent_block_var = var else: - parent_block_var = parent_block.create_var(dtype=var.dtype, - shape=var.shape, - type=var.type) + parent_block_var = parent_block.create_var( + dtype=var.dtype, shape=var.shape, type=var.type + ) assign(var, parent_block_var) return parent_block_var @@ -2594,15 +2769,19 @@ def cond(pred, true_fn=None, false_fn=None, name=None, return_names=None): if true_fn is not None: if not callable(true_fn): raise TypeError( - "The true_fn in cond must be callable, but received {}". - format(type(true_fn).__name__)) + "The true_fn in cond must be callable, but received {}".format( + type(true_fn).__name__ + ) + ) return true_fn() else: if false_fn is not None: if not callable(false_fn): raise TypeError( - "The false_fn in cond must be callable, but received {}" - .format(type(false_fn).__name__)) + "The false_fn in cond must be callable, but received {}".format( + type(false_fn).__name__ + ) + ) return false_fn() return None @@ -2616,25 +2795,32 @@ def cond(pred, true_fn=None, false_fn=None, name=None, return_names=None): if not callable(true_fn): raise TypeError( "The true_fn in cond must be callable, but received {}".format( - type(true_fn).__name__)) + type(true_fn).__name__ + ) + ) true_cond_block = ConditionalBlock([pred], is_scalar_condition=True) with true_cond_block.block(): origin_true_output = true_fn() if origin_true_output is not None: - true_output = map_structure(copy_to_parent_func, - origin_true_output) + true_output = map_structure( + copy_to_parent_func, origin_true_output + ) if false_fn is not None: if not callable(false_fn): raise TypeError( "The false_fn in cond must be callable, but received {}".format( - type(false_fn).__name__)) - false_cond_block = ConditionalBlock([logical_not(pred)], - is_scalar_condition=True) + type(false_fn).__name__ + ) + ) + false_cond_block = ConditionalBlock( + [logical_not(pred)], is_scalar_condition=True + ) with false_cond_block.block(): origin_false_output = false_fn() if origin_false_output is not None: - false_output = map_structure(copy_to_parent_func, - origin_false_output) + false_output = map_structure( + copy_to_parent_func, origin_false_output + ) if true_output is None and false_output is None: return None @@ -2642,11 +2828,13 @@ def cond(pred, true_fn=None, false_fn=None, name=None, return_names=None): if true_output is None: raise ValueError( "Incompatible return values of true_fn and false_fn in cond: " - "true_fn returns None while false_fn returns non-None") + "true_fn returns None while false_fn returns non-None" + ) if false_output is None: raise ValueError( "Incompatible return values of true_fn and false_fn in cond: " - "true_fn returns non-None while false_fn returns None") + "true_fn returns non-None while false_fn returns None" + ) # Merge ture and false output if they are not None if return_names is None: @@ -2664,22 +2852,28 @@ def cond(pred, true_fn=None, false_fn=None, name=None, return_names=None): # a = 1 # Because we can not use variable to express 'None' true_output, false_output = expand_undefined_var( - true_output, false_output, return_names) + true_output, false_output, return_names + ) if len(to_sequence(true_output)) != len(to_sequence(false_output)): raise ValueError( - "true fn returns {} vars, but false fn returns {} vars, which is not equals" - .format(len(to_sequence(true_output)), - len(to_sequence(false_output)))) - for true_out, false_out, return_name in zip(to_sequence(true_output), - to_sequence(false_output), - to_sequence(return_names)): + "true fn returns {} vars, but false fn returns {} vars, which is not equals".format( + len(to_sequence(true_output)), len(to_sequence(false_output)) + ) + ) + for true_out, false_out, return_name in zip( + to_sequence(true_output), + to_sequence(false_output), + to_sequence(return_names), + ): try: assert_same_structure(true_out, false_out, check_types=False) except ValueError as e: raise ValueError( - "Incompatible return values of `{}` in true_fn and false_fn in cond: {}" - .format(return_name, e)) + "Incompatible return values of `{}` in true_fn and false_fn in cond: {}".format( + return_name, e + ) + ) def check_ret_none(seq_true, seq_false, seq_names): length = len(seq_true) @@ -2687,32 +2881,53 @@ def cond(pred, true_fn=None, false_fn=None, name=None, return_names=None): f_true = flatten(seq_true[i]) f_false = flatten(seq_false[i]) for idx in range(len(f_true)): - if f_true[idx] is None and f_false[idx] is not None or f_false[ - idx] is None and f_true[idx] is not None: + if ( + f_true[idx] is None + and f_false[idx] is not None + or f_false[idx] is None + and f_true[idx] is not None + ): warnings.warn( "In cond : Var '{}' or part of it is set differently in ifelse branchs, " "<{}, {}> in true branch and <{}, {}> in false branch. Set var to " "'None' in ifelse block might lead to error.".format( - seq_names[i], type(f_true[idx]), f_true[idx], - type(f_false[idx]), f_false[idx])) - - check_ret_none(to_sequence(true_output), to_sequence(false_output), - to_sequence(return_names)) + seq_names[i], + type(f_true[idx]), + f_true[idx], + type(f_false[idx]), + f_false[idx], + ) + ) + + check_ret_none( + to_sequence(true_output), + to_sequence(false_output), + to_sequence(return_names), + ) if is_dy2staic: true_output, false_output = change_none_to_undefinedvar( - true_output, false_output) + true_output, false_output + ) mask = cast(pred, dtype='int32') - merge_func = lambda name, false_var, true_var: select_input_with_buildin_type( - [false_var, true_var], mask, name) + merge_func = ( + lambda name, false_var, true_var: select_input_with_buildin_type( + [false_var, true_var], mask, name + ) + ) def merge_every_var_list(false_vars, true_vars, name): return map_structure(partial(merge_func, name), false_vars, true_vars) merged_output = list( - map(merge_every_var_list, to_sequence(false_output), - to_sequence(true_output), to_sequence(return_names))) + map( + merge_every_var_list, + to_sequence(false_output), + to_sequence(true_output), + to_sequence(return_names), + ) + ) merged_output = pack_sequence_as(false_output, flatten(merged_output)) return merged_output @@ -2721,7 +2936,8 @@ def change_none_to_undefinedvar(nest1, nest2): from paddle.fluid.dygraph.dygraph_to_static.utils import UndefinedVar def map_fn(x): - if x is None: return UndefinedVar("padding") + if x is None: + return UndefinedVar("padding") return x nest1_out = pack_sequence_as(nest1, list(map(map_fn, flatten(nest1)))) @@ -2730,56 +2946,81 @@ def change_none_to_undefinedvar(nest1, nest2): def expand_undefined_var(nest1, nest2, names): - """ TODO: make this function recursively. - nest1: Var1, (UndefinedVar, [1,2,3]) - nest2: Var2, ([1,2,3,4], UndefinedVar) - In this case, we should not expand recursively. + """TODO: make this function recursively. + nest1: Var1, (UndefinedVar, [1,2,3]) + nest2: Var2, ([1,2,3,4], UndefinedVar) + In this case, we should not expand recursively. """ from paddle.fluid.dygraph.dygraph_to_static.utils import UndefinedVar - from paddle.fluid.dygraph.dygraph_to_static.return_transformer import RETURN_VALUE_PREFIX + from paddle.fluid.dygraph.dygraph_to_static.return_transformer import ( + RETURN_VALUE_PREFIX, + ) def pack_undefined_var_as(seq): - return pack_sequence_as(seq, - [UndefinedVar("padding") for i in flatten(seq)]) + return pack_sequence_as( + seq, [UndefinedVar("padding") for i in flatten(seq)] + ) def map_fn(n1, n2, name, order): - if not name.startswith(RETURN_VALUE_PREFIX) and (isinstance( - n1, UndefinedVar) or n1 is None): + if not name.startswith(RETURN_VALUE_PREFIX) and ( + isinstance(n1, UndefinedVar) or n1 is None + ): if n1 is None and n2 is not None: if order == 0: warnings.warn( "In cond : Var '{}' or part of it is set differently in ifelse branchs, " "<{}, {}> in true branch and <{}, {}> in false branch. Set var to " "'None' in ifelse block might lead to error.".format( - name, type(n1), n1, type(n2), n2)) + name, type(n1), n1, type(n2), n2 + ) + ) else: warnings.warn( "In cond : Var '{}' or part of it is set differently in ifelse branchs, " "<{}, {}> in true branch and <{}, {}> in false branch. Set var to " "'None' in ifelse block might lead to error.".format( - name, type(n2), n2, type(n1), n1)) + name, type(n2), n2, type(n1), n1 + ) + ) return pack_undefined_var_as(n2) return n1 nest1_out = list( - map(map_fn, to_sequence(nest1), to_sequence(nest2), to_sequence(names), - [0 for i in to_sequence(names)])) + map( + map_fn, + to_sequence(nest1), + to_sequence(nest2), + to_sequence(names), + [0 for i in to_sequence(names)], + ) + ) nest2_out = list( - map(map_fn, to_sequence(nest2), to_sequence(nest1), to_sequence(names), - [1 for i in to_sequence(names)])) - if not is_sequence(nest1): nest1_out = nest1_out[0] - if not is_sequence(nest2): nest2_out = nest2_out[0] + map( + map_fn, + to_sequence(nest2), + to_sequence(nest1), + to_sequence(names), + [1 for i in to_sequence(names)], + ) + ) + if not is_sequence(nest1): + nest1_out = nest1_out[0] + if not is_sequence(nest2): + nest2_out = nest2_out[0] return nest1_out, nest2_out def _error_message(what, arg_name, op_name, right_value, error_value): - error_message = "{what} of '{arg_name}' in {op_name} must be " \ + error_message = ( + "{what} of '{arg_name}' in {op_name} must be " "{right_value}, but received: {error_value}.".format( - what=what, - arg_name=arg_name, - op_name=op_name, - right_value=right_value, - error_value=error_value) + what=what, + arg_name=arg_name, + op_name=op_name, + right_value=right_value, + error_value=error_value, + ) + ) return error_message @@ -2860,24 +3101,42 @@ def case(pred_fn_pairs, default=None, name=None): for pred_fn in pred_fn_pairs: if not isinstance(pred_fn, tuple): raise TypeError( - _error_message("The elements' type", "pred_fn_pairs", - "case", tuple, type(pred_fn))) + _error_message( + "The elements' type", + "pred_fn_pairs", + "case", + tuple, + type(pred_fn), + ) + ) if len(pred_fn) != 2: raise TypeError( - _error_message("The tuple's size", "pred_fn_pairs", "case", - "2", - str(len(pred_fn)) + "-tuple")) + _error_message( + "The tuple's size", + "pred_fn_pairs", + "case", + "2", + str(len(pred_fn)) + "-tuple", + ) + ) pred, fn = pred_fn if not isinstance(pred, Variable): raise TypeError( - _error_message("The pred's type", "pred_fn_pairs", "case", - "boolean Variable", type(pred))) + _error_message( + "The pred's type", + "pred_fn_pairs", + "case", + "boolean Variable", + type(pred), + ) + ) if not callable(fn): raise TypeError( "The fn for {} of pred_fn_pairs in Op(case) must" - " be callable.".format(pred.name)) + " be callable.".format(pred.name) + ) if default is None: default_index = len(pred_fn_pairs) - 1 # pick the last one @@ -2979,8 +3238,11 @@ class Switch(object): raise ValueError("case should be called inside with") check_variable_and_dtype( - condition, 'condition', ['bool'], - 'the member function case of fluid.layers.Switch') + condition, + 'condition', + ['bool'], + 'the member function case of fluid.layers.Switch', + ) if len(self.pre_not_conditions) == 0: cond_block = ConditionalBlock([condition], is_scalar_condition=True) @@ -2989,12 +3251,14 @@ class Switch(object): else: pre_cond_num = len(self.pre_not_conditions) pre_not_cond = self.pre_not_conditions[pre_cond_num - 1] - new_not_cond = logical_and(x=pre_not_cond, - y=logical_not(x=condition)) + new_not_cond = logical_and( + x=pre_not_cond, y=logical_not(x=condition) + ) self.pre_not_conditions.append(new_not_cond) cond_block = ConditionalBlock( [logical_and(x=pre_not_cond, y=condition)], - is_scalar_condition=True) + is_scalar_condition=True, + ) return ConditionalBlockGuard(cond_block) @@ -3004,7 +3268,8 @@ class Switch(object): raise ValueError("there should be at least one condition") cond_block = ConditionalBlock( [self.pre_not_conditions[pre_cond_num - 1]], - is_scalar_condition=True) + is_scalar_condition=True, + ) return ConditionalBlockGuard(cond_block) def __enter__(self): @@ -3024,7 +3289,6 @@ class Switch(object): class IfElseBlockGuard(object): - def __init__(self, is_true, ifelse): if not isinstance(ifelse, IfElse): raise TypeError("ifelse must be an instance of IfElse class") @@ -3045,7 +3309,11 @@ class IfElseBlockGuard(object): self.cond_block = self.cond_block.block() def __enter__(self): - self.ie.status = IfElse.IN_IF_ELSE_TRUE_BLOCKS if self.is_true else IfElse.IN_IF_ELSE_FALSE_BLOCKS + self.ie.status = ( + IfElse.IN_IF_ELSE_TRUE_BLOCKS + if self.is_true + else IfElse.IN_IF_ELSE_FALSE_BLOCKS + ) self.cond_block.__enter__() def __exit__(self, exc_type, exc_val, exc_tb): @@ -3132,6 +3400,7 @@ class IfElse(object): There is a ``call ()`` function inside the object, that is, by calling ``output = ie ()``, all the outputs inside the block of False are fused as the whole output, the output type is a list, and the type of each element in the list is Variable. """ + OUT_IF_ELSE_BLOCKS = 0 IN_IF_ELSE_TRUE_BLOCKS = 1 IN_IF_ELSE_FALSE_BLOCKS = 2 @@ -3153,24 +3422,27 @@ class IfElse(object): if id(x) not in self.input_table: parent_block = self._parent_block() out_true = parent_block.create_var( - name=unique_name.generate_with_ignorable_key('ifelse_input' + - self.helper.name), - dtype=x.dtype) + name=unique_name.generate_with_ignorable_key( + 'ifelse_input' + self.helper.name + ), + dtype=x.dtype, + ) out_false = parent_block.create_var( - name=unique_name.generate_with_ignorable_key('ifelse_input' + - self.helper.name), - dtype=x.dtype) - parent_block.append_op(type='split_lod_tensor', - inputs={ - 'X': x, - 'Mask': self.cond, - }, - outputs={ - 'OutTrue': out_true, - 'OutFalse': out_false - }, - attrs={'level': 0}) + name=unique_name.generate_with_ignorable_key( + 'ifelse_input' + self.helper.name + ), + dtype=x.dtype, + ) + parent_block.append_op( + type='split_lod_tensor', + inputs={ + 'X': x, + 'Mask': self.cond, + }, + outputs={'OutTrue': out_true, 'OutFalse': out_false}, + attrs={'level': 0}, + ) self.input_table[id(x)] = (out_true, out_false) else: out_true, out_false = self.input_table[id(x)] @@ -3194,17 +3466,21 @@ class IfElse(object): if self.status == self.OUT_IF_ELSE_BLOCKS: raise ValueError("output can only be invoked in the sub-block") - out_table = self.output_table[1 if self.status == - self.IN_IF_ELSE_TRUE_BLOCKS else 0] + out_table = self.output_table[ + 1 if self.status == self.IN_IF_ELSE_TRUE_BLOCKS else 0 + ] parent_block = self._parent_block() for each_out in outs: - check_type(each_out, "each output", Variable, - "fluid.layers.IfElse.output") + check_type( + each_out, "each output", Variable, "fluid.layers.IfElse.output" + ) # create outside tensor outside_out = parent_block.create_var( - name=unique_name.generate_with_ignorable_key("_".join( - [self.helper.name, 'output'])), - dtype=each_out.dtype) + name=unique_name.generate_with_ignorable_key( + "_".join([self.helper.name, 'output']) + ), + dtype=each_out.dtype, + ) out_table.append(outside_out) # assign local var to outside @@ -3215,8 +3491,9 @@ class IfElse(object): raise ValueError("IfElse::__call__ must be out of sub-block") false_len, true_len = list(map(len, self.output_table)) if false_len == 0 and true_len == 0: - raise ValueError("Must invoke true_block/false_block before " - "__call__") + raise ValueError( + "Must invoke true_block/false_block before " "__call__" + ) elif false_len != true_len and false_len != 0 and true_len != 0: raise ValueError("The output side must be same") elif false_len == 0 or true_len == 0: @@ -3227,11 +3504,14 @@ class IfElse(object): rlist = [] for false_var, true_var in zip(*self.output_table): rlist.append( - merge_lod_tensor(in_true=true_var, - in_false=false_var, - mask=self.cond, - x=self.cond, - level=0)) + merge_lod_tensor( + in_true=true_var, + in_false=false_var, + mask=self.cond, + x=self.cond, + level=0, + ) + ) return rlist @@ -3302,6 +3582,7 @@ class DynamicRNN(object): # Get RNN's result of the last time step last = fluid.layers.sequence_last_step(out) """ + BEFORE_RNN = 0 IN_RNN = 1 AFTER_RNN = 2 @@ -3419,39 +3700,44 @@ class DynamicRNN(object): if self.lod_rank_table is None: self.lod_rank_table = parent_block.create_var( name=unique_name.generate('lod_rank_table'), - type=core.VarDesc.VarType.LOD_RANK_TABLE) + type=core.VarDesc.VarType.LOD_RANK_TABLE, + ) self.lod_rank_table.stop_gradient = True - parent_block.append_op(type='lod_rank_table', - inputs={"X": x}, - outputs={"Out": self.lod_rank_table}, - attrs={"level": level}) + parent_block.append_op( + type='lod_rank_table', + inputs={"X": x}, + outputs={"Out": self.lod_rank_table}, + attrs={"level": level}, + ) self.max_seq_len = parent_block.create_var( name=unique_name.generate('dynamic_rnn_max_seq_len'), - dtype='int64') + dtype='int64', + ) self.max_seq_len.stop_gradient = False - parent_block.append_op(type='max_sequence_len', - inputs={'RankTable': self.lod_rank_table}, - outputs={"Out": self.max_seq_len}) + parent_block.append_op( + type='max_sequence_len', + inputs={'RankTable': self.lod_rank_table}, + outputs={"Out": self.max_seq_len}, + ) self.cond.stop_gradient = True - parent_block.append_op(type='less_than', - inputs={ - 'X': self.step_idx, - 'Y': self.max_seq_len - }, - outputs={'Out': self.cond}, - attrs={'force_cpu': True}) + parent_block.append_op( + type='less_than', + inputs={'X': self.step_idx, 'Y': self.max_seq_len}, + outputs={'Out': self.cond}, + attrs={'force_cpu': True}, + ) input_array = parent_block.create_var( name=unique_name.generate('dynamic_rnn_input_array'), type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, - dtype=x.dtype) + dtype=x.dtype, + ) self.input_array.append((input_array, x.dtype)) - parent_block.append_op(type='lod_tensor_to_array', - inputs={ - 'X': x, - 'RankTable': self.lod_rank_table - }, - outputs={'Out': input_array}) + parent_block.append_op( + type='lod_tensor_to_array', + inputs={'X': x, 'RankTable': self.lod_rank_table}, + outputs={'Out': input_array}, + ) return array_read(array=input_array, i=self.step_idx) def static_input(self, x): @@ -3584,18 +3870,19 @@ class DynamicRNN(object): check_type(x, 'x', Variable, 'fluid.layers.DynamicRNN.static_input()') if self.lod_rank_table is None: raise RuntimeError( - "static_input() must be called after step_input().") + "static_input() must be called after step_input()." + ) parent_block = self._parent_block_() x_reordered = parent_block.create_var( name=unique_name.generate("dynamic_rnn_static_input_reordered"), type=core.VarDesc.VarType.LOD_TENSOR, - dtype=x.dtype) - parent_block.append_op(type='reorder_lod_tensor_by_rank', - inputs={ - 'X': [x], - 'RankTable': [self.lod_rank_table] - }, - outputs={'Out': [x_reordered]}) + dtype=x.dtype, + ) + parent_block.append_op( + type='reorder_lod_tensor_by_rank', + inputs={'X': [x], 'RankTable': [self.lod_rank_table]}, + outputs={'Out': [x_reordered]}, + ) return shrink_memory(x_reordered, self.step_idx, self.lod_rank_table) @signature_safe_contextmanager @@ -3610,10 +3897,9 @@ class DynamicRNN(object): """ if self.status != DynamicRNN.BEFORE_RNN: raise ValueError("rnn.block() can only be invoke once") - self.step_idx = fill_constant(shape=[1], - dtype='int64', - value=0, - force_cpu=True) + self.step_idx = fill_constant( + shape=[1], dtype='int64', value=0, force_cpu=True + ) self.step_idx.stop_gradient = False self.status = DynamicRNN.IN_RNN with self.while_op.block(): @@ -3623,15 +3909,18 @@ class DynamicRNN(object): for new_mem, mem_array in self.mem_link: array_write(x=new_mem, i=self.step_idx, array=mem_array) - less_than(x=self.step_idx, - y=self.max_seq_len, - force_cpu=True, - cond=self.cond) + less_than( + x=self.step_idx, + y=self.max_seq_len, + force_cpu=True, + cond=self.cond, + ) self.status = DynamicRNN.AFTER_RNN for each_array in self.output_array: self.outputs.append( - array_to_lod_tensor(x=each_array, table=self.lod_rank_table)) + array_to_lod_tensor(x=each_array, table=self.lod_rank_table) + ) def __call__(self, *args, **kwargs): """ @@ -3647,19 +3936,25 @@ class DynamicRNN(object): ValueError: When :code:`__call__()` is called before :code:`block()` . """ if self.status != DynamicRNN.AFTER_RNN: - raise ValueError(("Output of the dynamic RNN can only be visited " - "outside the rnn block.")) + raise ValueError( + ( + "Output of the dynamic RNN can only be visited " + "outside the rnn block." + ) + ) if len(self.outputs) == 1: return self.outputs[0] else: return self.outputs - def memory(self, - init=None, - shape=None, - value=0.0, - need_reorder=False, - dtype='float32'): + def memory( + self, + init=None, + shape=None, + value=0.0, + need_reorder=False, + dtype='float32', + ): r""" Create a memory Variable for DynamicRNN to deliver data cross time steps. It can be initialized by an existing Tensor or a constant Tensor of given @@ -3748,11 +4043,16 @@ class DynamicRNN(object): self._assert_in_rnn_block_('memory') self._init_zero_idx_() if shape is not None: - check_type(shape, 'shape', (list, tuple), - 'fluid.layers.DynamicRNN.memory()') + check_type( + shape, + 'shape', + (list, tuple), + 'fluid.layers.DynamicRNN.memory()', + ) if init is not None: - check_type(init, 'init', Variable, - 'fluid.layers.DynamicRNN.memory()') + check_type( + init, 'init', Variable, 'fluid.layers.DynamicRNN.memory()' + ) parent_block = self._parent_block_() init_tensor = init if need_reorder == True: @@ -3760,32 +4060,36 @@ class DynamicRNN(object): raise ValueError( 'If set need_reorder to True, make sure step_input be ' 'invoked before ' - 'memory(init=init, need_reordered=True, ...).') + 'memory(init=init, need_reordered=True, ...).' + ) init_reordered = parent_block.create_var( name=unique_name.generate('dynamic_rnn_mem_init_reordered'), type=core.VarDesc.VarType.LOD_TENSOR, - dtype=init.dtype) - parent_block.append_op(type='reorder_lod_tensor_by_rank', - inputs={ - 'X': [init_tensor], - 'RankTable': [self.lod_rank_table] - }, - outputs={'Out': [init_reordered]}) + dtype=init.dtype, + ) + parent_block.append_op( + type='reorder_lod_tensor_by_rank', + inputs={ + 'X': [init_tensor], + 'RankTable': [self.lod_rank_table], + }, + outputs={'Out': [init_reordered]}, + ) init_tensor = init_reordered mem_array = parent_block.create_var( name=unique_name.generate('dynamic_rnn_mem_array'), type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, - dtype=init.dtype) - parent_block.append_op(type='write_to_array', - inputs={ - 'X': init_tensor, - 'I': self.zero_idx - }, - outputs={'Out': mem_array}) + dtype=init.dtype, + ) + parent_block.append_op( + type='write_to_array', + inputs={'X': init_tensor, 'I': self.zero_idx}, + outputs={'Out': mem_array}, + ) retv = array_read(array=mem_array, i=self.step_idx) - retv = shrink_memory(x=retv, - i=self.step_idx, - table=self.lod_rank_table) + retv = shrink_memory( + x=retv, i=self.step_idx, table=self.lod_rank_table + ) self.mem_dict[retv.name] = mem_array return retv else: @@ -3795,24 +4099,27 @@ class DynamicRNN(object): ) parent_block = self._parent_block_() init = parent_block.create_var( - name=unique_name.generate('mem_init'), dtype=dtype) + name=unique_name.generate('mem_init'), dtype=dtype + ) arr, dtype = self.input_array[0] - in0 = parent_block.create_var(name=unique_name.generate('in0'), - dtype=dtype) - parent_block.append_op(type='read_from_array', - inputs={ - 'X': [arr], - 'I': [self.zero_idx] - }, - outputs={'Out': [in0]}) - parent_block.append_op(type='fill_constant_batch_size_like', - inputs={'Input': [in0]}, - outputs={'Out': [init]}, - attrs={ - 'shape': [-1] + shape, - 'value': float(value), - 'dtype': init.dtype - }) + in0 = parent_block.create_var( + name=unique_name.generate('in0'), dtype=dtype + ) + parent_block.append_op( + type='read_from_array', + inputs={'X': [arr], 'I': [self.zero_idx]}, + outputs={'Out': [in0]}, + ) + parent_block.append_op( + type='fill_constant_batch_size_like', + inputs={'Input': [in0]}, + outputs={'Out': [init]}, + attrs={ + 'shape': [-1] + shape, + 'value': float(value), + 'dtype': init.dtype, + }, + ) return self.memory(init=init) def update_memory(self, ex_mem, new_mem): @@ -3834,10 +4141,18 @@ class DynamicRNN(object): ValueError: When :code:`update_memory()` is called before :code:`step_input()` . """ self._assert_in_rnn_block_('update_memory') - check_type(ex_mem, 'ex_mem', Variable, - 'fluid.layers.DynamicRNN.update_memory()') - check_type(new_mem, 'new_mem', Variable, - 'fluid.layers.DynamicRNN.update_memory()') + check_type( + ex_mem, + 'ex_mem', + Variable, + 'fluid.layers.DynamicRNN.update_memory()', + ) + check_type( + new_mem, + 'new_mem', + Variable, + 'fluid.layers.DynamicRNN.update_memory()', + ) mem_array = self.mem_dict.get(ex_mem.name, None) if mem_array is None: @@ -3864,13 +4179,16 @@ class DynamicRNN(object): self._assert_in_rnn_block_('output') parent_block = self._parent_block_() for each in outputs: - check_type(each, "outputs", Variable, - "fluid.layers.DynamicRNN.output") + check_type( + each, "outputs", Variable, "fluid.layers.DynamicRNN.output" + ) outside_array = parent_block.create_var( - name=unique_name.generate_with_ignorable_key("_".join( - [self.helper.name, "output_array", each.name])), + name=unique_name.generate_with_ignorable_key( + "_".join([self.helper.name, "output_array", each.name]) + ), type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, - dtype=each.dtype) + dtype=each.dtype, + ) array_write(x=each, i=self.step_idx, array=outside_array) self.output_array.append(outside_array) @@ -3878,16 +4196,19 @@ class DynamicRNN(object): if self.zero_idx is None: parent_block = self._parent_block_() self.zero_idx = parent_block.create_var( - name=unique_name.generate('zero_idx'), dtype='int64') - parent_block.append_op(type='fill_constant', - inputs={}, - outputs={'Out': [self.zero_idx]}, - attrs={ - 'shape': [1], - 'dtype': self.zero_idx.dtype, - 'value': float(0), - 'force_cpu': True - }) + name=unique_name.generate('zero_idx'), dtype='int64' + ) + parent_block.append_op( + type='fill_constant', + inputs={}, + outputs={'Out': [self.zero_idx]}, + attrs={ + 'shape': [1], + 'dtype': self.zero_idx.dtype, + 'value': float(0), + 'force_cpu': True, + }, + ) def _parent_block_(self): prog = self.helper.main_program @@ -3900,7 +4221,8 @@ class DynamicRNN(object): def _assert_in_rnn_block_(self, method): if self.status != DynamicRNN.IN_RNN: raise ValueError( - "{0} can only be invoked inside rnn block.".format(method)) + "{0} can only be invoked inside rnn block.".format(method) + ) def switch_case(branch_index, branch_fns, default=None, name=None): @@ -3977,44 +4299,71 @@ def switch_case(branch_index, branch_fns, default=None, name=None): def _check_args(branch_index, branch_fns, default): - check_variable_and_dtype(branch_index, 'branch_index', - ['uint8', 'int32', 'int64'], 'switch_case') + check_variable_and_dtype( + branch_index, + 'branch_index', + ['uint8', 'int32', 'int64'], + 'switch_case', + ) if convert_dtype(branch_index.dtype) != "int64": branch_index = cast(branch_index, "int64") check_type(branch_fns, 'branch_fns', (list, tuple, dict), 'switch_case') - branch_fns = branch_fns.items() if isinstance(branch_fns, - dict) else branch_fns + branch_fns = ( + branch_fns.items() if isinstance(branch_fns, dict) else branch_fns + ) - branch_fns = list(enumerate(branch_fns)) if all( - callable(fn) for fn in branch_fns) else branch_fns + branch_fns = ( + list(enumerate(branch_fns)) + if all(callable(fn) for fn in branch_fns) + else branch_fns + ) keys_of_fns = [] for index_fn_pair in branch_fns: if not isinstance(index_fn_pair, tuple): raise TypeError( - _error_message("The elements' type", "branch_fns", - "switch_case", tuple, type(branch_fns))) + _error_message( + "The elements' type", + "branch_fns", + "switch_case", + tuple, + type(branch_fns), + ) + ) if len(index_fn_pair) != 2: raise TypeError( - _error_message("The tuple's size", "branch_fns", - "switch_case", "2", - str(len(index_fn_pair)) + "-tuple")) + _error_message( + "The tuple's size", + "branch_fns", + "switch_case", + "2", + str(len(index_fn_pair)) + "-tuple", + ) + ) key, fn = index_fn_pair if not isinstance(key, int): raise TypeError( - _error_message("The key's type", "branch_fns", - "switch_case", int, type(key))) + _error_message( + "The key's type", + "branch_fns", + "switch_case", + int, + type(key), + ) + ) if key in keys_of_fns: raise ValueError( - "The key in 'branch_fns' must be unique, but '{}' appears more than once." - .format(key)) + "The key in 'branch_fns' must be unique, but '{}' appears more than once.".format( + key + ) + ) else: keys_of_fns.append(key) @@ -4022,7 +4371,12 @@ def switch_case(branch_index, branch_fns, default=None, name=None): raise TypeError( _error_message( "The type of function for key {}".format(key), - "branch_fns", "switch_case", "callable", type(fn))) + "branch_fns", + "switch_case", + "callable", + type(fn), + ) + ) if default is None: default = sorted(branch_fns)[-1][1] @@ -4073,20 +4427,20 @@ def reorder_lod_tensor_by_rank(x, rank_table): """ check_type(x, 'x', (Variable), 'reorder_lod_tensor_by_rank') - check_type(rank_table, 'rank_table', (Variable), - 'reorder_lod_tensor_by_rank') + check_type( + rank_table, 'rank_table', (Variable), 'reorder_lod_tensor_by_rank' + ) if rank_table.type != core.VarDesc.VarType.LOD_RANK_TABLE: raise TypeError("The type of rank_table should be LOD_RANK_TABLE.") helper = LayerHelper('reorder_lod_tensor_by_rank', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='reorder_lod_tensor_by_rank', - inputs={ - 'X': [x], - 'RankTable': [rank_table] - }, - outputs={'Out': [out]}) + helper.append_op( + type='reorder_lod_tensor_by_rank', + inputs={'X': [x], 'RankTable': [rank_table]}, + outputs={'Out': [out]}, + ) return out @@ -4125,14 +4479,15 @@ def is_empty(x, name=None): if _in_legacy_dygraph(): return _legacy_C_ops.is_empty(x) - check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], - 'is_empty') + check_variable_and_dtype( + x, 'x', ['float32', 'float64', 'int32', 'int64'], 'is_empty' + ) check_type(name, "name", (str, type(None)), "is_empty") helper = LayerHelper("is_empty", **locals()) cond = helper.create_variable_for_type_inference(dtype='bool') cond.stop_gradient = True - helper.append_op(type='is_empty', - inputs={'X': [x]}, - outputs={'Out': [cond]}) + helper.append_op( + type='is_empty', inputs={'X': [x]}, outputs={'Out': [cond]} + ) return cond diff --git a/python/paddle/fluid/layers/detection.py b/python/paddle/fluid/layers/detection.py index 80754898e72cac6bbe5c82309eb89af05a548521..d577459237472c37b46c2e075a359d378fe48171 100644 --- a/python/paddle/fluid/layers/detection.py +++ b/python/paddle/fluid/layers/detection.py @@ -31,7 +31,12 @@ from ..data_feeder import check_variable_and_dtype, check_type, check_dtype import math import numpy as np from functools import reduce -from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype +from ..data_feeder import ( + convert_dtype, + check_variable_and_dtype, + check_type, + check_dtype, +) from paddle.utils import deprecated from paddle import _C_ops, _legacy_C_ops from ..framework import in_dygraph_mode @@ -68,17 +73,19 @@ __all__ = [ ] -def retinanet_target_assign(bbox_pred, - cls_logits, - anchor_box, - anchor_var, - gt_boxes, - gt_labels, - is_crowd, - im_info, - num_classes=1, - positive_overlap=0.5, - negative_overlap=0.4): +def retinanet_target_assign( + bbox_pred, + cls_logits, + anchor_box, + anchor_var, + gt_boxes, + gt_labels, + is_crowd, + im_info, + num_classes=1, + positive_overlap=0.5, + negative_overlap=0.4, +): r""" **Target Assign Layer for the detector RetinaNet.** @@ -245,22 +252,42 @@ def retinanet_target_assign(bbox_pred, """ - check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'], - 'retinanet_target_assign') - check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'], - 'retinanet_target_assign') - check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'], - 'retinanet_target_assign') - check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'], - 'retinanet_target_assign') - check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'], - 'retinanet_target_assign') - check_variable_and_dtype(gt_labels, 'gt_labels', ['int32'], - 'retinanet_target_assign') - check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'], - 'retinanet_target_assign') - check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'], - 'retinanet_target_assign') + check_variable_and_dtype( + bbox_pred, + 'bbox_pred', + ['float32', 'float64'], + 'retinanet_target_assign', + ) + check_variable_and_dtype( + cls_logits, + 'cls_logits', + ['float32', 'float64'], + 'retinanet_target_assign', + ) + check_variable_and_dtype( + anchor_box, + 'anchor_box', + ['float32', 'float64'], + 'retinanet_target_assign', + ) + check_variable_and_dtype( + anchor_var, + 'anchor_var', + ['float32', 'float64'], + 'retinanet_target_assign', + ) + check_variable_and_dtype( + gt_boxes, 'gt_boxes', ['float32', 'float64'], 'retinanet_target_assign' + ) + check_variable_and_dtype( + gt_labels, 'gt_labels', ['int32'], 'retinanet_target_assign' + ) + check_variable_and_dtype( + is_crowd, 'is_crowd', ['int32'], 'retinanet_target_assign' + ) + check_variable_and_dtype( + im_info, 'im_info', ['float32', 'float64'], 'retinanet_target_assign' + ) helper = LayerHelper('retinanet_target_assign', **locals()) # Assign target label to anchors @@ -268,30 +295,34 @@ def retinanet_target_assign(bbox_pred, score_index = helper.create_variable_for_type_inference(dtype='int32') target_label = helper.create_variable_for_type_inference(dtype='int32') target_bbox = helper.create_variable_for_type_inference( - dtype=anchor_box.dtype) + dtype=anchor_box.dtype + ) bbox_inside_weight = helper.create_variable_for_type_inference( - dtype=anchor_box.dtype) + dtype=anchor_box.dtype + ) fg_num = helper.create_variable_for_type_inference(dtype='int32') - helper.append_op(type="retinanet_target_assign", - inputs={ - 'Anchor': anchor_box, - 'GtBoxes': gt_boxes, - 'GtLabels': gt_labels, - 'IsCrowd': is_crowd, - 'ImInfo': im_info - }, - outputs={ - 'LocationIndex': loc_index, - 'ScoreIndex': score_index, - 'TargetLabel': target_label, - 'TargetBBox': target_bbox, - 'BBoxInsideWeight': bbox_inside_weight, - 'ForegroundNumber': fg_num - }, - attrs={ - 'positive_overlap': positive_overlap, - 'negative_overlap': negative_overlap - }) + helper.append_op( + type="retinanet_target_assign", + inputs={ + 'Anchor': anchor_box, + 'GtBoxes': gt_boxes, + 'GtLabels': gt_labels, + 'IsCrowd': is_crowd, + 'ImInfo': im_info, + }, + outputs={ + 'LocationIndex': loc_index, + 'ScoreIndex': score_index, + 'TargetLabel': target_label, + 'TargetBBox': target_bbox, + 'BBoxInsideWeight': bbox_inside_weight, + 'ForegroundNumber': fg_num, + }, + attrs={ + 'positive_overlap': positive_overlap, + 'negative_overlap': negative_overlap, + }, + ) loc_index.stop_gradient = True score_index.stop_gradient = True @@ -305,22 +336,31 @@ def retinanet_target_assign(bbox_pred, predicted_cls_logits = nn.gather(cls_logits, score_index) predicted_bbox_pred = nn.gather(bbox_pred, loc_index) - return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight, fg_num - - -def rpn_target_assign(bbox_pred, - cls_logits, - anchor_box, - anchor_var, - gt_boxes, - is_crowd, - im_info, - rpn_batch_size_per_im=256, - rpn_straddle_thresh=0.0, - rpn_fg_fraction=0.5, - rpn_positive_overlap=0.7, - rpn_negative_overlap=0.3, - use_random=True): + return ( + predicted_cls_logits, + predicted_bbox_pred, + target_label, + target_bbox, + bbox_inside_weight, + fg_num, + ) + + +def rpn_target_assign( + bbox_pred, + cls_logits, + anchor_box, + anchor_var, + gt_boxes, + is_crowd, + im_info, + rpn_batch_size_per_im=256, + rpn_straddle_thresh=0.0, + rpn_fg_fraction=0.5, + rpn_positive_overlap=0.7, + rpn_negative_overlap=0.3, + use_random=True, +): """ **Target Assign Layer for region proposal network (RPN) in Faster-RCNN detection.** @@ -410,51 +450,62 @@ def rpn_target_assign(bbox_pred, helper = LayerHelper('rpn_target_assign', **locals()) - check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'], - 'rpn_target_assign') - check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'], - 'rpn_target_assign') - check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'], - 'rpn_target_assign') - check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'], - 'rpn_target_assign') - check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'], - 'rpn_target_assign') - check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'], - 'rpn_target_assign') - check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'], - 'rpn_target_assign') + check_variable_and_dtype( + bbox_pred, 'bbox_pred', ['float32', 'float64'], 'rpn_target_assign' + ) + check_variable_and_dtype( + cls_logits, 'cls_logits', ['float32', 'float64'], 'rpn_target_assign' + ) + check_variable_and_dtype( + anchor_box, 'anchor_box', ['float32', 'float64'], 'rpn_target_assign' + ) + check_variable_and_dtype( + anchor_var, 'anchor_var', ['float32', 'float64'], 'rpn_target_assign' + ) + check_variable_and_dtype( + gt_boxes, 'gt_boxes', ['float32', 'float64'], 'rpn_target_assign' + ) + check_variable_and_dtype( + is_crowd, 'is_crowd', ['int32'], 'rpn_target_assign' + ) + check_variable_and_dtype( + im_info, 'im_info', ['float32', 'float64'], 'rpn_target_assign' + ) # Assign target label to anchors loc_index = helper.create_variable_for_type_inference(dtype='int32') score_index = helper.create_variable_for_type_inference(dtype='int32') target_label = helper.create_variable_for_type_inference(dtype='int32') target_bbox = helper.create_variable_for_type_inference( - dtype=anchor_box.dtype) + dtype=anchor_box.dtype + ) bbox_inside_weight = helper.create_variable_for_type_inference( - dtype=anchor_box.dtype) - helper.append_op(type="rpn_target_assign", - inputs={ - 'Anchor': anchor_box, - 'GtBoxes': gt_boxes, - 'IsCrowd': is_crowd, - 'ImInfo': im_info - }, - outputs={ - 'LocationIndex': loc_index, - 'ScoreIndex': score_index, - 'TargetLabel': target_label, - 'TargetBBox': target_bbox, - 'BBoxInsideWeight': bbox_inside_weight - }, - attrs={ - 'rpn_batch_size_per_im': rpn_batch_size_per_im, - 'rpn_straddle_thresh': rpn_straddle_thresh, - 'rpn_positive_overlap': rpn_positive_overlap, - 'rpn_negative_overlap': rpn_negative_overlap, - 'rpn_fg_fraction': rpn_fg_fraction, - 'use_random': use_random - }) + dtype=anchor_box.dtype + ) + helper.append_op( + type="rpn_target_assign", + inputs={ + 'Anchor': anchor_box, + 'GtBoxes': gt_boxes, + 'IsCrowd': is_crowd, + 'ImInfo': im_info, + }, + outputs={ + 'LocationIndex': loc_index, + 'ScoreIndex': score_index, + 'TargetLabel': target_label, + 'TargetBBox': target_bbox, + 'BBoxInsideWeight': bbox_inside_weight, + }, + attrs={ + 'rpn_batch_size_per_im': rpn_batch_size_per_im, + 'rpn_straddle_thresh': rpn_straddle_thresh, + 'rpn_positive_overlap': rpn_positive_overlap, + 'rpn_negative_overlap': rpn_negative_overlap, + 'rpn_fg_fraction': rpn_fg_fraction, + 'use_random': use_random, + }, + ) loc_index.stop_gradient = True score_index.stop_gradient = True @@ -467,7 +518,13 @@ def rpn_target_assign(bbox_pred, predicted_cls_logits = nn.gather(cls_logits, score_index) predicted_bbox_pred = nn.gather(bbox_pred, loc_index) - return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight + return ( + predicted_cls_logits, + predicted_bbox_pred, + target_label, + target_bbox, + bbox_inside_weight, + ) def sigmoid_focal_loss(x, label, fg_num, gamma=2.0, alpha=0.25): @@ -597,8 +654,9 @@ def sigmoid_focal_loss(x, label, fg_num, gamma=2.0, alpha=0.25): print(outs) """ - check_variable_and_dtype(x, 'x', ['float32', 'float64'], - 'sigmoid_focal_loss') + check_variable_and_dtype( + x, 'x', ['float32', 'float64'], 'sigmoid_focal_loss' + ) check_variable_and_dtype(label, 'label', ['int32'], 'sigmoid_focal_loss') check_variable_and_dtype(fg_num, 'fg_num', ['int32'], 'sigmoid_focal_loss') @@ -606,31 +664,28 @@ def sigmoid_focal_loss(x, label, fg_num, gamma=2.0, alpha=0.25): out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type="sigmoid_focal_loss", - inputs={ - "X": x, - "Label": label, - "FgNum": fg_num - }, - attrs={ - "gamma": gamma, - 'alpha': alpha - }, - outputs={"Out": out}) + helper.append_op( + type="sigmoid_focal_loss", + inputs={"X": x, "Label": label, "FgNum": fg_num}, + attrs={"gamma": gamma, 'alpha': alpha}, + outputs={"Out": out}, + ) return out -def detection_output(loc, - scores, - prior_box, - prior_box_var, - background_label=0, - nms_threshold=0.3, - nms_top_k=400, - keep_top_k=200, - score_threshold=0.01, - nms_eta=1.0, - return_index=False): +def detection_output( + loc, + scores, + prior_box, + prior_box_var, + background_label=0, + nms_threshold=0.3, + nms_top_k=400, + keep_top_k=200, + score_threshold=0.01, + nms_eta=1.0, + return_index=False, +): """ Given the regression locations, classification confidences and prior boxes, @@ -715,50 +770,48 @@ def detection_output(loc, return_index=True) """ helper = LayerHelper("detection_output", **locals()) - decoded_box = box_coder(prior_box=prior_box, - prior_box_var=prior_box_var, - target_box=loc, - code_type='decode_center_size') + decoded_box = box_coder( + prior_box=prior_box, + prior_box_var=prior_box_var, + target_box=loc, + code_type='decode_center_size', + ) scores = nn.softmax(input=scores) scores = nn.transpose(scores, perm=[0, 2, 1]) scores.stop_gradient = True nmsed_outs = helper.create_variable_for_type_inference( - dtype=decoded_box.dtype) + dtype=decoded_box.dtype + ) if return_index: index = helper.create_variable_for_type_inference(dtype='int') - helper.append_op(type="multiclass_nms2", - inputs={ - 'Scores': scores, - 'BBoxes': decoded_box - }, - outputs={ - 'Out': nmsed_outs, - 'Index': index - }, - attrs={ - 'background_label': 0, - 'nms_threshold': nms_threshold, - 'nms_top_k': nms_top_k, - 'keep_top_k': keep_top_k, - 'score_threshold': score_threshold, - 'nms_eta': 1.0, - }) + helper.append_op( + type="multiclass_nms2", + inputs={'Scores': scores, 'BBoxes': decoded_box}, + outputs={'Out': nmsed_outs, 'Index': index}, + attrs={ + 'background_label': 0, + 'nms_threshold': nms_threshold, + 'nms_top_k': nms_top_k, + 'keep_top_k': keep_top_k, + 'score_threshold': score_threshold, + 'nms_eta': 1.0, + }, + ) index.stop_gradient = True else: - helper.append_op(type="multiclass_nms", - inputs={ - 'Scores': scores, - 'BBoxes': decoded_box - }, - outputs={'Out': nmsed_outs}, - attrs={ - 'background_label': 0, - 'nms_threshold': nms_threshold, - 'nms_top_k': nms_top_k, - 'keep_top_k': keep_top_k, - 'score_threshold': score_threshold, - 'nms_eta': 1.0, - }) + helper.append_op( + type="multiclass_nms", + inputs={'Scores': scores, 'BBoxes': decoded_box}, + outputs={'Out': nmsed_outs}, + attrs={ + 'background_label': 0, + 'nms_threshold': nms_threshold, + 'nms_top_k': nms_top_k, + 'keep_top_k': keep_top_k, + 'score_threshold': score_threshold, + 'nms_eta': 1.0, + }, + ) nmsed_outs.stop_gradient = True if return_index: return nmsed_outs, index @@ -768,9 +821,9 @@ def detection_output(loc, @templatedoc() def iou_similarity(x, y, box_normalized=True, name=None): """ - :alias_main: paddle.nn.functional.iou_similarity - :alias: paddle.nn.functional.iou_similarity,paddle.nn.functional.loss.iou_similarity - :old_api: paddle.fluid.layers.iou_similarity + :alias_main: paddle.nn.functional.iou_similarity + :alias: paddle.nn.functional.iou_similarity,paddle.nn.functional.loss.iou_similarity + :old_api: paddle.fluid.layers.iou_similarity ${comment} @@ -810,24 +863,25 @@ def iou_similarity(x, y, box_normalized=True, name=None): helper = LayerHelper("iou_similarity", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type="iou_similarity", - inputs={ - "X": x, - "Y": y - }, - attrs={"box_normalized": box_normalized}, - outputs={"Out": out}) + helper.append_op( + type="iou_similarity", + inputs={"X": x, "Y": y}, + attrs={"box_normalized": box_normalized}, + outputs={"Out": out}, + ) return out @templatedoc() -def box_coder(prior_box, - prior_box_var, - target_box, - code_type="encode_center_size", - box_normalized=True, - name=None, - axis=0): +def box_coder( + prior_box, + prior_box_var, + target_box, + code_type="encode_center_size", + box_normalized=True, + name=None, + axis=0, +): r""" **Box Coder Layer** @@ -942,33 +996,49 @@ def box_coder(prior_box, box_normalized=False, axis=1) """ - check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'], - 'box_coder') - check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'], - 'box_coder') + check_variable_and_dtype( + prior_box, 'prior_box', ['float32', 'float64'], 'box_coder' + ) + check_variable_and_dtype( + target_box, 'target_box', ['float32', 'float64'], 'box_coder' + ) if in_dygraph_mode(): if isinstance(prior_box_var, Variable): - box_coder_op = _C_ops.box_coder(prior_box, prior_box_var, - target_box, code_type, - box_normalized, axis, []) + box_coder_op = _C_ops.box_coder( + prior_box, + prior_box_var, + target_box, + code_type, + box_normalized, + axis, + [], + ) elif isinstance(prior_box_var, list): - box_coder_op = _C_ops.box_coder(prior_box, None, target_box, - code_type, box_normalized, axis, - prior_box_var) + box_coder_op = _C_ops.box_coder( + prior_box, + None, + target_box, + code_type, + box_normalized, + axis, + prior_box_var, + ) else: raise TypeError( - "Input variance of box_coder must be Variable or lisz") + "Input variance of box_coder must be Variable or lisz" + ) return box_coder_op helper = LayerHelper("box_coder", **locals()) output_box = helper.create_variable_for_type_inference( - dtype=prior_box.dtype) + dtype=prior_box.dtype + ) inputs = {"PriorBox": prior_box, "TargetBox": target_box} attrs = { "code_type": code_type, "box_normalized": box_normalized, - "axis": axis + "axis": axis, } if isinstance(prior_box_var, Variable): inputs['PriorBoxVar'] = prior_box_var @@ -976,10 +1046,12 @@ def box_coder(prior_box, attrs['variance'] = prior_box_var else: raise TypeError("Input variance of box_coder must be Variable or lisz") - helper.append_op(type="box_coder", - inputs=inputs, - attrs=attrs, - outputs={"OutputBox": output_box}) + helper.append_op( + type="box_coder", + inputs=inputs, + attrs=attrs, + outputs={"OutputBox": output_box}, + ) return output_box @@ -1004,32 +1076,37 @@ def polygon_box_transform(input, name=None): input = fluid.data(name='input', shape=[4, 10, 5, 5], dtype='float32') out = fluid.layers.polygon_box_transform(input) """ - check_variable_and_dtype(input, "input", ['float32', 'float64'], - 'polygon_box_transform') + check_variable_and_dtype( + input, "input", ['float32', 'float64'], 'polygon_box_transform' + ) helper = LayerHelper("polygon_box_transform", **locals()) output = helper.create_variable_for_type_inference(dtype=input.dtype) - helper.append_op(type="polygon_box_transform", - inputs={"Input": input}, - attrs={}, - outputs={"Output": output}) + helper.append_op( + type="polygon_box_transform", + inputs={"Input": input}, + attrs={}, + outputs={"Output": output}, + ) return output @deprecated(since="2.0.0", update_to="paddle.vision.ops.yolo_loss") @templatedoc(op_type="yolov3_loss") -def yolov3_loss(x, - gt_box, - gt_label, - anchors, - anchor_mask, - class_num, - ignore_thresh, - downsample_ratio, - gt_score=None, - use_label_smooth=True, - name=None, - scale_x_y=1.): +def yolov3_loss( + x, + gt_box, + gt_label, + anchors, + anchor_mask, + class_num, + ignore_thresh, + downsample_ratio, + gt_score=None, + use_label_smooth=True, + name=None, + scale_x_y=1.0, +): """ ${comment} @@ -1105,18 +1182,33 @@ def yolov3_loss(x, raise TypeError("Attr class_num of yolov3_loss must be an integer") if not isinstance(ignore_thresh, float): raise TypeError( - "Attr ignore_thresh of yolov3_loss must be a float number") + "Attr ignore_thresh of yolov3_loss must be a float number" + ) if not isinstance(use_label_smooth, bool): raise TypeError( - "Attr use_label_smooth of yolov3_loss must be a bool value") + "Attr use_label_smooth of yolov3_loss must be a bool value" + ) if _non_static_mode(): - attrs = ("anchors", anchors, "anchor_mask", anchor_mask, "class_num", - class_num, "ignore_thresh", ignore_thresh, "downsample_ratio", - downsample_ratio, "use_label_smooth", use_label_smooth, - "scale_x_y", scale_x_y) - loss, _, _ = _legacy_C_ops.yolov3_loss(x, gt_box, gt_label, gt_score, - *attrs) + attrs = ( + "anchors", + anchors, + "anchor_mask", + anchor_mask, + "class_num", + class_num, + "ignore_thresh", + ignore_thresh, + "downsample_ratio", + downsample_ratio, + "use_label_smooth", + use_label_smooth, + "scale_x_y", + scale_x_y, + ) + loss, _, _ = _legacy_C_ops.yolov3_loss( + x, gt_box, gt_label, gt_score, *attrs + ) return loss helper = LayerHelper('yolov3_loss', **locals()) @@ -1142,30 +1234,34 @@ def yolov3_loss(x, "scale_x_y": scale_x_y, } - helper.append_op(type='yolov3_loss', - inputs=inputs, - outputs={ - 'Loss': loss, - 'ObjectnessMask': objectness_mask, - 'GTMatchMask': gt_match_mask - }, - attrs=attrs) + helper.append_op( + type='yolov3_loss', + inputs=inputs, + outputs={ + 'Loss': loss, + 'ObjectnessMask': objectness_mask, + 'GTMatchMask': gt_match_mask, + }, + attrs=attrs, + ) return loss @deprecated(since="2.0.0", update_to="paddle.vision.ops.yolo_box") @templatedoc(op_type="yolo_box") -def yolo_box(x, - img_size, - anchors, - class_num, - conf_thresh, - downsample_ratio, - clip_bbox=True, - name=None, - scale_x_y=1., - iou_aware=False, - iou_aware_factor=0.5): +def yolo_box( + x, + img_size, + anchors, + class_num, + conf_thresh, + downsample_ratio, + clip_bbox=True, + name=None, + scale_x_y=1.0, + iou_aware=False, + iou_aware_factor=0.5, +): """ ${comment} @@ -1233,33 +1329,37 @@ def yolo_box(x, "clip_bbox": clip_bbox, "scale_x_y": scale_x_y, "iou_aware": iou_aware, - "iou_aware_factor": iou_aware_factor + "iou_aware_factor": iou_aware_factor, } - helper.append_op(type='yolo_box', - inputs={ - "X": x, - "ImgSize": img_size, - }, - outputs={ - 'Boxes': boxes, - 'Scores': scores, - }, - attrs=attrs) + helper.append_op( + type='yolo_box', + inputs={ + "X": x, + "ImgSize": img_size, + }, + outputs={ + 'Boxes': boxes, + 'Scores': scores, + }, + attrs=attrs, + ) return boxes, scores @templatedoc() -def detection_map(detect_res, - label, - class_num, - background_label=0, - overlap_threshold=0.3, - evaluate_difficult=True, - has_state=None, - input_states=None, - out_states=None, - ap_version='integral'): +def detection_map( + detect_res, + label, + class_num, + background_label=0, + overlap_threshold=0.3, + evaluate_difficult=True, + has_state=None, + input_states=None, + out_states=None, + ap_version='integral', +): """ ${comment} @@ -1307,45 +1407,49 @@ def detection_map(detect_res, return helper.create_variable_for_type_inference(dtype=type) map_out = __create_var('float32') - accum_pos_count_out = out_states[ - 0] if out_states is not None else __create_var('int32') - accum_true_pos_out = out_states[ - 1] if out_states is not None else __create_var('float32') - accum_false_pos_out = out_states[ - 2] if out_states is not None else __create_var('float32') + accum_pos_count_out = ( + out_states[0] if out_states is not None else __create_var('int32') + ) + accum_true_pos_out = ( + out_states[1] if out_states is not None else __create_var('float32') + ) + accum_false_pos_out = ( + out_states[2] if out_states is not None else __create_var('float32') + ) pos_count = input_states[0] if input_states is not None else None true_pos = input_states[1] if input_states is not None else None false_pos = input_states[2] if input_states is not None else None - helper.append_op(type="detection_map", - inputs={ - 'Label': label, - 'DetectRes': detect_res, - 'HasState': has_state, - 'PosCount': pos_count, - 'TruePos': true_pos, - 'FalsePos': false_pos - }, - outputs={ - 'MAP': map_out, - 'AccumPosCount': accum_pos_count_out, - 'AccumTruePos': accum_true_pos_out, - 'AccumFalsePos': accum_false_pos_out - }, - attrs={ - 'overlap_threshold': overlap_threshold, - 'evaluate_difficult': evaluate_difficult, - 'ap_type': ap_version, - 'class_num': class_num, - }) + helper.append_op( + type="detection_map", + inputs={ + 'Label': label, + 'DetectRes': detect_res, + 'HasState': has_state, + 'PosCount': pos_count, + 'TruePos': true_pos, + 'FalsePos': false_pos, + }, + outputs={ + 'MAP': map_out, + 'AccumPosCount': accum_pos_count_out, + 'AccumTruePos': accum_true_pos_out, + 'AccumFalsePos': accum_false_pos_out, + }, + attrs={ + 'overlap_threshold': overlap_threshold, + 'evaluate_difficult': evaluate_difficult, + 'ap_type': ap_version, + 'class_num': class_num, + }, + ) return map_out -def bipartite_match(dist_matrix, - match_type=None, - dist_threshold=None, - name=None): +def bipartite_match( + dist_matrix, match_type=None, dist_threshold=None, name=None +): """ This operator implements a greedy bipartite matching algorithm, which is @@ -1417,25 +1521,30 @@ def bipartite_match(dist_matrix, helper = LayerHelper('bipartite_match', **locals()) match_indices = helper.create_variable_for_type_inference(dtype='int32') match_distance = helper.create_variable_for_type_inference( - dtype=dist_matrix.dtype) - helper.append_op(type='bipartite_match', - inputs={'DistMat': dist_matrix}, - attrs={ - 'match_type': match_type, - 'dist_threshold': dist_threshold, - }, - outputs={ - 'ColToRowMatchIndices': match_indices, - 'ColToRowMatchDist': match_distance - }) + dtype=dist_matrix.dtype + ) + helper.append_op( + type='bipartite_match', + inputs={'DistMat': dist_matrix}, + attrs={ + 'match_type': match_type, + 'dist_threshold': dist_threshold, + }, + outputs={ + 'ColToRowMatchIndices': match_indices, + 'ColToRowMatchDist': match_distance, + }, + ) return match_indices, match_distance -def target_assign(input, - matched_indices, - negative_indices=None, - mismatch_value=None, - name=None): +def target_assign( + input, + matched_indices, + negative_indices=None, + mismatch_value=None, + name=None, +): """ This operator can be, for given the target bounding boxes or labels, @@ -1524,36 +1633,37 @@ def target_assign(input, helper = LayerHelper('target_assign', **locals()) out = helper.create_variable_for_type_inference(dtype=input.dtype) out_weight = helper.create_variable_for_type_inference(dtype='float32') - helper.append_op(type='target_assign', - inputs={ - 'X': input, - 'MatchIndices': matched_indices, - 'NegIndices': negative_indices - }, - outputs={ - 'Out': out, - 'OutWeight': out_weight - }, - attrs={'mismatch_value': mismatch_value}) + helper.append_op( + type='target_assign', + inputs={ + 'X': input, + 'MatchIndices': matched_indices, + 'NegIndices': negative_indices, + }, + outputs={'Out': out, 'OutWeight': out_weight}, + attrs={'mismatch_value': mismatch_value}, + ) return out, out_weight -def ssd_loss(location, - confidence, - gt_box, - gt_label, - prior_box, - prior_box_var=None, - background_label=0, - overlap_threshold=0.5, - neg_pos_ratio=3.0, - neg_overlap=0.5, - loc_loss_weight=1.0, - conf_loss_weight=1.0, - match_type='per_prediction', - mining_type='max_negative', - normalize=True, - sample_size=None): +def ssd_loss( + location, + confidence, + gt_box, + gt_label, + prior_box, + prior_box_var=None, + background_label=0, + overlap_threshold=0.5, + neg_pos_ratio=3.0, + neg_overlap=0.5, + loc_loss_weight=1.0, + conf_loss_weight=1.0, + match_type='per_prediction', + mining_type='max_negative', + normalize=True, + sample_size=None, +): r""" :alias_main: paddle.nn.functional.ssd_loss :alias: paddle.nn.functional.ssd_loss,paddle.nn.functional.loss.ssd_loss @@ -1685,17 +1795,19 @@ def ssd_loss(location, # 1.1 Compute IOU similarity between ground-truth boxes and prior boxes. iou = iou_similarity(x=gt_box, y=prior_box) # 1.2 Compute matched bounding box by bipartite matching algorithm. - matched_indices, matched_dist = bipartite_match(iou, match_type, - overlap_threshold) + matched_indices, matched_dist = bipartite_match( + iou, match_type, overlap_threshold + ) # 2. Compute confidence for mining hard examples # 2.1. Get the target label based on matched indices - gt_label = nn.reshape(x=gt_label, - shape=(len(gt_label.shape) - 1) * (0, ) + (-1, 1)) + gt_label = nn.reshape( + x=gt_label, shape=(len(gt_label.shape) - 1) * (0,) + (-1, 1) + ) gt_label.stop_gradient = True - target_label, _ = target_assign(gt_label, - matched_indices, - mismatch_value=background_label) + target_label, _ = target_assign( + gt_label, matched_indices, mismatch_value=background_label + ) # 2.2. Compute confidence loss. # Reshape confidence to 2D tensor. confidence = __reshape_to_2d(confidence) @@ -1708,47 +1820,54 @@ def ssd_loss(location, actual_shape.stop_gradient = True # shape=(-1, 0) is set for compile-time, the correct shape is set by # actual_shape in runtime. - conf_loss = nn.reshape(x=conf_loss, - shape=(-1, 0), - actual_shape=actual_shape) + conf_loss = nn.reshape( + x=conf_loss, shape=(-1, 0), actual_shape=actual_shape + ) conf_loss.stop_gradient = True neg_indices = helper.create_variable_for_type_inference(dtype='int32') dtype = matched_indices.dtype updated_matched_indices = helper.create_variable_for_type_inference( - dtype=dtype) - helper.append_op(type='mine_hard_examples', - inputs={ - 'ClsLoss': conf_loss, - 'LocLoss': None, - 'MatchIndices': matched_indices, - 'MatchDist': matched_dist, - }, - outputs={ - 'NegIndices': neg_indices, - 'UpdatedMatchIndices': updated_matched_indices - }, - attrs={ - 'neg_pos_ratio': neg_pos_ratio, - 'neg_dist_threshold': neg_overlap, - 'mining_type': mining_type, - 'sample_size': sample_size, - }) + dtype=dtype + ) + helper.append_op( + type='mine_hard_examples', + inputs={ + 'ClsLoss': conf_loss, + 'LocLoss': None, + 'MatchIndices': matched_indices, + 'MatchDist': matched_dist, + }, + outputs={ + 'NegIndices': neg_indices, + 'UpdatedMatchIndices': updated_matched_indices, + }, + attrs={ + 'neg_pos_ratio': neg_pos_ratio, + 'neg_dist_threshold': neg_overlap, + 'mining_type': mining_type, + 'sample_size': sample_size, + }, + ) # 4. Assign classification and regression targets # 4.1. Encoded bbox according to the prior boxes. - encoded_bbox = box_coder(prior_box=prior_box, - prior_box_var=prior_box_var, - target_box=gt_box, - code_type='encode_center_size') + encoded_bbox = box_coder( + prior_box=prior_box, + prior_box_var=prior_box_var, + target_box=gt_box, + code_type='encode_center_size', + ) # 4.2. Assign regression targets target_bbox, target_loc_weight = target_assign( - encoded_bbox, updated_matched_indices, mismatch_value=background_label) + encoded_bbox, updated_matched_indices, mismatch_value=background_label + ) # 4.3. Assign classification targets target_label, target_conf_weight = target_assign( gt_label, updated_matched_indices, negative_indices=neg_indices, - mismatch_value=background_label) + mismatch_value=background_label, + ) # 5. Compute loss. # 5.1 Compute confidence loss. @@ -1794,7 +1913,7 @@ def prior_box( image, min_sizes, max_sizes=None, - aspect_ratios=[1.], + aspect_ratios=[1.0], variance=[0.1, 0.1, 0.2, 0.2], flip=False, clip=False, @@ -1840,66 +1959,66 @@ def prior_box( Tuple: A tuple with two Variable (boxes, variances) boxes(Variable): the output prior boxes of PriorBox. - 4-D tensor, the layout is [H, W, num_priors, 4]. + 4-D tensor, the layout is [H, W, num_priors, 4]. H is the height of input, W is the width of input, num_priors is the total box count of each position of input. variances(Variable): the expanded variances of PriorBox. - 4-D tensor, the layput is [H, W, num_priors, 4]. + 4-D tensor, the layput is [H, W, num_priors, 4]. H is the height of input, W is the width of input num_priors is the total box count of each position of input Examples: .. code-block:: python - #declarative mode - import paddle.fluid as fluid - import numpy as np + #declarative mode + import paddle.fluid as fluid + import numpy as np import paddle paddle.enable_static() - input = fluid.data(name="input", shape=[None,3,6,9]) - image = fluid.data(name="image", shape=[None,3,9,12]) - box, var = fluid.layers.prior_box( + input = fluid.data(name="input", shape=[None,3,6,9]) + image = fluid.data(name="image", shape=[None,3,9,12]) + box, var = fluid.layers.prior_box( input=input, image=image, - min_sizes=[100.], + min_sizes=[100.], clip=True, flip=True) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) - # prepare a batch of data - input_data = np.random.rand(1,3,6,9).astype("float32") - image_data = np.random.rand(1,3,9,12).astype("float32") + # prepare a batch of data + input_data = np.random.rand(1,3,6,9).astype("float32") + image_data = np.random.rand(1,3,9,12).astype("float32") - box_out, var_out = exe.run(fluid.default_main_program(), + box_out, var_out = exe.run(fluid.default_main_program(), feed={"input":input_data,"image":image_data}, fetch_list=[box,var], return_numpy=True) - # print(box_out.shape) - # (6, 9, 1, 4) - # print(var_out.shape) - # (6, 9, 1, 4) - - # imperative mode - import paddle.fluid.dygraph as dg - - with dg.guard(place) as g: - input = dg.to_variable(input_data) - image = dg.to_variable(image_data) - box, var = fluid.layers.prior_box( - input=input, - image=image, - min_sizes=[100.], - clip=True, - flip=True) - # print(box.shape) - # [6L, 9L, 1L, 4L] + # print(box_out.shape) + # (6, 9, 1, 4) + # print(var_out.shape) + # (6, 9, 1, 4) + + # imperative mode + import paddle.fluid.dygraph as dg + + with dg.guard(place) as g: + input = dg.to_variable(input_data) + image = dg.to_variable(image_data) + box, var = fluid.layers.prior_box( + input=input, + image=image, + min_sizes=[100.], + clip=True, + flip=True) + # print(box.shape) + # [6L, 9L, 1L, 4L] # print(var.shape) - # [6L, 9L, 1L, 4L] + # [6L, 9L, 1L, 4L] """ @@ -1907,25 +2026,38 @@ def prior_box( step_w, step_h = steps if max_sizes == None: max_sizes = [] - return _C_ops.prior_box(input, image, min_sizes, aspect_ratios, - variance, max_sizes, flip, clip, step_w, step_h, - offset, min_max_aspect_ratios_order) + return _C_ops.prior_box( + input, + image, + min_sizes, + aspect_ratios, + variance, + max_sizes, + flip, + clip, + step_w, + step_h, + offset, + min_max_aspect_ratios_order, + ) helper = LayerHelper("prior_box", **locals()) dtype = helper.input_dtype() - check_variable_and_dtype(input, 'input', - ['uint8', 'int8', 'float32', 'float64'], - 'prior_box') + check_variable_and_dtype( + input, 'input', ['uint8', 'int8', 'float32', 'float64'], 'prior_box' + ) def _is_list_or_tuple_(data): - return (isinstance(data, list) or isinstance(data, tuple)) + return isinstance(data, list) or isinstance(data, tuple) if not _is_list_or_tuple_(min_sizes): min_sizes = [min_sizes] if not _is_list_or_tuple_(aspect_ratios): aspect_ratios = [aspect_ratios] if not (_is_list_or_tuple_(steps) and len(steps) == 2): - raise ValueError('steps should be a list or tuple ', - 'with length 2, (step_width, step_height).') + raise ValueError( + 'steps should be a list or tuple ', + 'with length 2, (step_width, step_height).', + ) min_sizes = list(map(float, min_sizes)) aspect_ratios = list(map(float, aspect_ratios)) @@ -1940,7 +2072,7 @@ def prior_box( 'step_w': steps[0], 'step_h': steps[1], 'offset': offset, - 'min_max_aspect_ratios_order': min_max_aspect_ratios_order + 'min_max_aspect_ratios_order': min_max_aspect_ratios_order, } if max_sizes is not None and len(max_sizes) > 0 and max_sizes[0] > 0: if not _is_list_or_tuple_(max_sizes): @@ -1951,14 +2083,8 @@ def prior_box( var = helper.create_variable_for_type_inference(dtype) helper.append_op( type="prior_box", - inputs={ - "Input": input, - "Image": image - }, - outputs={ - "Boxes": box, - "Variances": var - }, + inputs={"Input": input, "Image": image}, + outputs={"Boxes": box, "Variances": var}, attrs=attrs, ) box.stop_gradient = True @@ -1966,17 +2092,19 @@ def prior_box( return box, var -def density_prior_box(input, - image, - densities=None, - fixed_sizes=None, - fixed_ratios=None, - variance=[0.1, 0.1, 0.2, 0.2], - clip=False, - steps=[0.0, 0.0], - offset=0.5, - flatten_to_2d=False, - name=None): +def density_prior_box( + input, + image, + densities=None, + fixed_sizes=None, + fixed_ratios=None, + variance=[0.1, 0.1, 0.2, 0.2], + clip=False, + steps=[0.0, 0.0], + offset=0.5, + flatten_to_2d=False, + name=None, +): r""" This op generates density prior boxes for SSD(Single Shot MultiBox Detector) @@ -2100,11 +2228,12 @@ def density_prior_box(input, """ helper = LayerHelper("density_prior_box", **locals()) dtype = helper.input_dtype() - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'density_prior_box') + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'density_prior_box' + ) def _is_list_or_tuple_(data): - return (isinstance(data, list) or isinstance(data, tuple)) + return isinstance(data, list) or isinstance(data, tuple) check_type(densities, 'densities', (list, tuple), 'density_prior_box') check_type(fixed_sizes, 'fixed_sizes', (list, tuple), 'density_prior_box') @@ -2113,8 +2242,10 @@ def density_prior_box(input, raise ValueError('densities and fixed_sizes length should be euqal.') if not (_is_list_or_tuple_(steps) and len(steps) == 2): - raise ValueError('steps should be a list or tuple ', - 'with length 2, (step_width, step_height).') + raise ValueError( + 'steps should be a list or tuple ', + 'with length 2, (step_width, step_height).', + ) densities = list(map(int, densities)) fixed_sizes = list(map(float, fixed_sizes)) @@ -2136,14 +2267,8 @@ def density_prior_box(input, var = helper.create_variable_for_type_inference(dtype) helper.append_op( type="density_prior_box", - inputs={ - "Input": input, - "Image": image - }, - outputs={ - "Boxes": box, - "Variances": var - }, + inputs={"Input": input, "Image": image}, + outputs={"Boxes": box, "Variances": var}, attrs=attrs, ) box.stop_gradient = True @@ -2152,29 +2277,31 @@ def density_prior_box(input, @static_only -def multi_box_head(inputs, - image, - base_size, - num_classes, - aspect_ratios, - min_ratio=None, - max_ratio=None, - min_sizes=None, - max_sizes=None, - steps=None, - step_w=None, - step_h=None, - offset=0.5, - variance=[0.1, 0.1, 0.2, 0.2], - flip=True, - clip=False, - kernel_size=1, - pad=0, - stride=1, - name=None, - min_max_aspect_ratios_order=False): +def multi_box_head( + inputs, + image, + base_size, + num_classes, + aspect_ratios, + min_ratio=None, + max_ratio=None, + min_sizes=None, + max_sizes=None, + steps=None, + step_w=None, + step_h=None, + offset=0.5, + variance=[0.1, 0.1, 0.2, 0.2], + flip=True, + clip=False, + kernel_size=1, + pad=0, + stride=1, + name=None, + min_max_aspect_ratios_order=False, +): """ - :api_attr: Static Graph + :api_attr: Static Graph Base on SSD ((Single Shot MultiBox Detector) algorithm, generate prior boxes, regression location and classification confidence on multiple input feature @@ -2319,7 +2446,7 @@ def multi_box_head(inputs, return out def _is_list_or_tuple_(data): - return (isinstance(data, list) or isinstance(data, tuple)) + return isinstance(data, list) or isinstance(data, tuple) def _is_list_or_tuple_and_equal(data, length, err_info): if not (_is_list_or_tuple_(data) and len(data) == length): @@ -2338,31 +2465,39 @@ def multi_box_head(inputs, max_sizes = [] step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2))) for ratio in range(min_ratio, max_ratio + 1, step): - min_sizes.append(base_size * ratio / 100.) - max_sizes.append(base_size * (ratio + step) / 100.) - min_sizes = [base_size * .10] + min_sizes - max_sizes = [base_size * .20] + max_sizes + min_sizes.append(base_size * ratio / 100.0) + max_sizes.append(base_size * (ratio + step) / 100.0) + min_sizes = [base_size * 0.10] + min_sizes + max_sizes = [base_size * 0.20] + max_sizes if aspect_ratios: _is_list_or_tuple_and_equal( - aspect_ratios, num_layer, + aspect_ratios, + num_layer, 'aspect_ratios should be list or tuple, and the length of inputs ' - 'and aspect_ratios should be the same.') + 'and aspect_ratios should be the same.', + ) if step_h is not None: _is_list_or_tuple_and_equal( - step_h, num_layer, + step_h, + num_layer, 'step_h should be list or tuple, and the length of inputs and ' - 'step_h should be the same.') + 'step_h should be the same.', + ) if step_w is not None: _is_list_or_tuple_and_equal( - step_w, num_layer, + step_w, + num_layer, 'step_w should be list or tuple, and the length of inputs and ' - 'step_w should be the same.') + 'step_w should be the same.', + ) if steps is not None: _is_list_or_tuple_and_equal( - steps, num_layer, + steps, + num_layer, 'steps should be list or tuple, and the length of inputs and ' - 'step_w should be the same.') + 'step_w should be the same.', + ) step_w = steps step_h = steps @@ -2386,9 +2521,20 @@ def multi_box_head(inputs, aspect_ratio = [aspect_ratio] step = [step_w[i] if step_w else 0.0, step_h[i] if step_w else 0.0] - box, var = prior_box(input, image, min_size, max_size, aspect_ratio, - variance, flip, clip, step, offset, None, - min_max_aspect_ratios_order) + box, var = prior_box( + input, + image, + min_size, + max_size, + aspect_ratio, + variance, + flip, + clip, + step, + offset, + None, + min_max_aspect_ratios_order, + ) box_results.append(box) var_results.append(var) @@ -2397,11 +2543,13 @@ def multi_box_head(inputs, # get loc num_loc_output = num_boxes * 4 - mbox_loc = nn.conv2d(input=input, - num_filters=num_loc_output, - filter_size=kernel_size, - padding=pad, - stride=stride) + mbox_loc = nn.conv2d( + input=input, + num_filters=num_loc_output, + filter_size=kernel_size, + padding=pad, + stride=stride, + ) mbox_loc = nn.transpose(mbox_loc, perm=[0, 2, 3, 1]) mbox_loc_flatten = nn.flatten(mbox_loc, axis=1) @@ -2409,11 +2557,13 @@ def multi_box_head(inputs, # get conf num_conf_output = num_boxes * num_classes - conf_loc = nn.conv2d(input=input, - num_filters=num_conf_output, - filter_size=kernel_size, - padding=pad, - stride=stride) + conf_loc = nn.conv2d( + input=input, + num_filters=num_conf_output, + filter_size=kernel_size, + padding=pad, + stride=stride, + ) conf_loc = nn.transpose(conf_loc, perm=[0, 2, 3, 1]) conf_loc_flatten = nn.flatten(conf_loc, axis=1) mbox_confs.append(conf_loc_flatten) @@ -2435,21 +2585,24 @@ def multi_box_head(inputs, mbox_locs_concat = tensor.concat(mbox_locs, axis=1) mbox_locs_concat = nn.reshape(mbox_locs_concat, shape=[0, -1, 4]) mbox_confs_concat = tensor.concat(mbox_confs, axis=1) - mbox_confs_concat = nn.reshape(mbox_confs_concat, - shape=[0, -1, num_classes]) + mbox_confs_concat = nn.reshape( + mbox_confs_concat, shape=[0, -1, num_classes] + ) box.stop_gradient = True var.stop_gradient = True return mbox_locs_concat, mbox_confs_concat, box, var -def anchor_generator(input, - anchor_sizes=None, - aspect_ratios=None, - variance=[0.1, 0.1, 0.2, 0.2], - stride=None, - offset=0.5, - name=None): +def anchor_generator( + input, + anchor_sizes=None, + aspect_ratios=None, + variance=[0.1, 0.1, 0.2, 0.2], + stride=None, + offset=0.5, + name=None, +): """ **Anchor generator operator** @@ -2513,15 +2666,17 @@ def anchor_generator(input, dtype = helper.input_dtype() def _is_list_or_tuple_(data): - return (isinstance(data, list) or isinstance(data, tuple)) + return isinstance(data, list) or isinstance(data, tuple) if not _is_list_or_tuple_(anchor_sizes): anchor_sizes = [anchor_sizes] if not _is_list_or_tuple_(aspect_ratios): aspect_ratios = [aspect_ratios] if not (_is_list_or_tuple_(stride) and len(stride) == 2): - raise ValueError('stride should be a list or tuple ', - 'with length 2, (stride_width, stride_height).') + raise ValueError( + 'stride should be a list or tuple ', + 'with length 2, (stride_width, stride_height).', + ) anchor_sizes = list(map(float, anchor_sizes)) aspect_ratios = list(map(float, aspect_ratios)) @@ -2532,7 +2687,7 @@ def anchor_generator(input, 'aspect_ratios': aspect_ratios, 'variances': variance, 'stride': stride, - 'offset': offset + 'offset': offset, } anchor = helper.create_variable_for_type_inference(dtype) @@ -2540,10 +2695,7 @@ def anchor_generator(input, helper.append_op( type="anchor_generator", inputs={"Input": input}, - outputs={ - "Anchors": anchor, - "Variances": var - }, + outputs={"Anchors": anchor, "Variances": var}, attrs=attrs, ) anchor.stop_gradient = True @@ -2551,12 +2703,14 @@ def anchor_generator(input, return anchor, var -def roi_perspective_transform(input, - rois, - transformed_height, - transformed_width, - spatial_scale=1.0, - name=None): +def roi_perspective_transform( + input, + rois, + transformed_height, + transformed_width, + spatial_scale=1.0, + name=None, +): """ **The** `rois` **of this op should be a LoDTensor.** @@ -2606,16 +2760,24 @@ def roi_perspective_transform(input, rois = fluid.data(name='rois', shape=[None, 8], lod_level=1, dtype='float32') out, mask, transform_matrix = fluid.layers.roi_perspective_transform(x, rois, 7, 7, 1.0) """ - check_variable_and_dtype(input, 'input', ['float32'], - 'roi_perspective_transform') - check_variable_and_dtype(rois, 'rois', ['float32'], - 'roi_perspective_transform') - check_type(transformed_height, 'transformed_height', int, - 'roi_perspective_transform') - check_type(transformed_width, 'transformed_width', int, - 'roi_perspective_transform') - check_type(spatial_scale, 'spatial_scale', float, - 'roi_perspective_transform') + check_variable_and_dtype( + input, 'input', ['float32'], 'roi_perspective_transform' + ) + check_variable_and_dtype( + rois, 'rois', ['float32'], 'roi_perspective_transform' + ) + check_type( + transformed_height, + 'transformed_height', + int, + 'roi_perspective_transform', + ) + check_type( + transformed_width, 'transformed_width', int, 'roi_perspective_transform' + ) + check_type( + spatial_scale, 'spatial_scale', float, 'roi_perspective_transform' + ) helper = LayerHelper('roi_perspective_transform', **locals()) dtype = helper.input_dtype() @@ -2624,43 +2786,44 @@ def roi_perspective_transform(input, transform_matrix = helper.create_variable_for_type_inference(dtype) out2in_idx = helper.create_variable_for_type_inference(dtype="int32") out2in_w = helper.create_variable_for_type_inference(dtype) - helper.append_op(type="roi_perspective_transform", - inputs={ - "X": input, - "ROIs": rois - }, - outputs={ - "Out": out, - "Out2InIdx": out2in_idx, - "Out2InWeights": out2in_w, - "Mask": mask, - "TransformMatrix": transform_matrix - }, - attrs={ - "transformed_height": transformed_height, - "transformed_width": transformed_width, - "spatial_scale": spatial_scale - }) + helper.append_op( + type="roi_perspective_transform", + inputs={"X": input, "ROIs": rois}, + outputs={ + "Out": out, + "Out2InIdx": out2in_idx, + "Out2InWeights": out2in_w, + "Mask": mask, + "TransformMatrix": transform_matrix, + }, + attrs={ + "transformed_height": transformed_height, + "transformed_width": transformed_width, + "spatial_scale": spatial_scale, + }, + ) return out, mask, transform_matrix -def generate_proposal_labels(rpn_rois, - gt_classes, - is_crowd, - gt_boxes, - im_info, - batch_size_per_im=256, - fg_fraction=0.25, - fg_thresh=0.25, - bg_thresh_hi=0.5, - bg_thresh_lo=0.0, - bbox_reg_weights=[0.1, 0.1, 0.2, 0.2], - class_nums=None, - use_random=True, - is_cls_agnostic=False, - is_cascade_rcnn=False, - max_overlap=None, - return_max_overlap=False): +def generate_proposal_labels( + rpn_rois, + gt_classes, + is_crowd, + gt_boxes, + im_info, + batch_size_per_im=256, + fg_fraction=0.25, + fg_thresh=0.25, + bg_thresh_hi=0.5, + bg_thresh_lo=0.0, + bbox_reg_weights=[0.1, 0.1, 0.2, 0.2], + class_nums=None, + use_random=True, + is_cls_agnostic=False, + is_cascade_rcnn=False, + max_overlap=None, + return_max_overlap=False, +): """ **Generate Proposal Labels of Faster-RCNN** @@ -2730,26 +2893,36 @@ def generate_proposal_labels(rpn_rois, helper = LayerHelper('generate_proposal_labels', **locals()) - check_variable_and_dtype(rpn_rois, 'rpn_rois', ['float32', 'float64'], - 'generate_proposal_labels') - check_variable_and_dtype(gt_classes, 'gt_classes', ['int32'], - 'generate_proposal_labels') - check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'], - 'generate_proposal_labels') + check_variable_and_dtype( + rpn_rois, 'rpn_rois', ['float32', 'float64'], 'generate_proposal_labels' + ) + check_variable_and_dtype( + gt_classes, 'gt_classes', ['int32'], 'generate_proposal_labels' + ) + check_variable_and_dtype( + is_crowd, 'is_crowd', ['int32'], 'generate_proposal_labels' + ) if is_cascade_rcnn: - assert max_overlap is not None, "Input max_overlap of generate_proposal_labels should not be None if is_cascade_rcnn is True" + assert ( + max_overlap is not None + ), "Input max_overlap of generate_proposal_labels should not be None if is_cascade_rcnn is True" rois = helper.create_variable_for_type_inference(dtype=rpn_rois.dtype) labels_int32 = helper.create_variable_for_type_inference( - dtype=gt_classes.dtype) + dtype=gt_classes.dtype + ) bbox_targets = helper.create_variable_for_type_inference( - dtype=rpn_rois.dtype) + dtype=rpn_rois.dtype + ) bbox_inside_weights = helper.create_variable_for_type_inference( - dtype=rpn_rois.dtype) + dtype=rpn_rois.dtype + ) bbox_outside_weights = helper.create_variable_for_type_inference( - dtype=rpn_rois.dtype) + dtype=rpn_rois.dtype + ) max_overlap_with_gt = helper.create_variable_for_type_inference( - dtype=rpn_rois.dtype) + dtype=rpn_rois.dtype + ) inputs = { 'RpnRois': rpn_rois, @@ -2760,28 +2933,30 @@ def generate_proposal_labels(rpn_rois, } if max_overlap is not None: inputs['MaxOverlap'] = max_overlap - helper.append_op(type="generate_proposal_labels", - inputs=inputs, - outputs={ - 'Rois': rois, - 'LabelsInt32': labels_int32, - 'BboxTargets': bbox_targets, - 'BboxInsideWeights': bbox_inside_weights, - 'BboxOutsideWeights': bbox_outside_weights, - 'MaxOverlapWithGT': max_overlap_with_gt - }, - attrs={ - 'batch_size_per_im': batch_size_per_im, - 'fg_fraction': fg_fraction, - 'fg_thresh': fg_thresh, - 'bg_thresh_hi': bg_thresh_hi, - 'bg_thresh_lo': bg_thresh_lo, - 'bbox_reg_weights': bbox_reg_weights, - 'class_nums': class_nums, - 'use_random': use_random, - 'is_cls_agnostic': is_cls_agnostic, - 'is_cascade_rcnn': is_cascade_rcnn - }) + helper.append_op( + type="generate_proposal_labels", + inputs=inputs, + outputs={ + 'Rois': rois, + 'LabelsInt32': labels_int32, + 'BboxTargets': bbox_targets, + 'BboxInsideWeights': bbox_inside_weights, + 'BboxOutsideWeights': bbox_outside_weights, + 'MaxOverlapWithGT': max_overlap_with_gt, + }, + attrs={ + 'batch_size_per_im': batch_size_per_im, + 'fg_fraction': fg_fraction, + 'fg_thresh': fg_thresh, + 'bg_thresh_hi': bg_thresh_hi, + 'bg_thresh_lo': bg_thresh_lo, + 'bbox_reg_weights': bbox_reg_weights, + 'class_nums': class_nums, + 'use_random': use_random, + 'is_cls_agnostic': is_cls_agnostic, + 'is_cascade_rcnn': is_cascade_rcnn, + }, + ) rois.stop_gradient = True labels_int32.stop_gradient = True @@ -2791,12 +2966,33 @@ def generate_proposal_labels(rpn_rois, max_overlap_with_gt.stop_gradient = True if return_max_overlap: - return rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights, max_overlap_with_gt - return rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights + return ( + rois, + labels_int32, + bbox_targets, + bbox_inside_weights, + bbox_outside_weights, + max_overlap_with_gt, + ) + return ( + rois, + labels_int32, + bbox_targets, + bbox_inside_weights, + bbox_outside_weights, + ) -def generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois, - labels_int32, num_classes, resolution): +def generate_mask_labels( + im_info, + gt_classes, + is_crowd, + gt_segms, + rois, + labels_int32, + num_classes, + resolution, +): r""" **Generate Mask Labels for Mask-RCNN** @@ -2912,28 +3108,29 @@ def generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois, mask_rois = helper.create_variable_for_type_inference(dtype=rois.dtype) roi_has_mask_int32 = helper.create_variable_for_type_inference( - dtype=gt_classes.dtype) + dtype=gt_classes.dtype + ) mask_int32 = helper.create_variable_for_type_inference( - dtype=gt_classes.dtype) - - helper.append_op(type="generate_mask_labels", - inputs={ - 'ImInfo': im_info, - 'GtClasses': gt_classes, - 'IsCrowd': is_crowd, - 'GtSegms': gt_segms, - 'Rois': rois, - 'LabelsInt32': labels_int32 - }, - outputs={ - 'MaskRois': mask_rois, - 'RoiHasMaskInt32': roi_has_mask_int32, - 'MaskInt32': mask_int32 - }, - attrs={ - 'num_classes': num_classes, - 'resolution': resolution - }) + dtype=gt_classes.dtype + ) + + helper.append_op( + type="generate_mask_labels", + inputs={ + 'ImInfo': im_info, + 'GtClasses': gt_classes, + 'IsCrowd': is_crowd, + 'GtSegms': gt_segms, + 'Rois': rois, + 'LabelsInt32': labels_int32, + }, + outputs={ + 'MaskRois': mask_rois, + 'RoiHasMaskInt32': roi_has_mask_int32, + 'MaskInt32': mask_int32, + }, + attrs={'num_classes': num_classes, 'resolution': resolution}, + ) mask_rois.stop_gradient = True roi_has_mask_int32.stop_gradient = True @@ -2942,18 +3139,20 @@ def generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois, return mask_rois, roi_has_mask_int32, mask_int32 -def generate_proposals(scores, - bbox_deltas, - im_info, - anchors, - variances, - pre_nms_top_n=6000, - post_nms_top_n=1000, - nms_thresh=0.5, - min_size=0.1, - eta=1.0, - return_rois_num=False, - name=None): +def generate_proposals( + scores, + bbox_deltas, + im_info, + anchors, + variances, + pre_nms_top_n=6000, + post_nms_top_n=1000, + nms_thresh=0.5, + min_size=0.1, + eta=1.0, + return_rois_num=False, + name=None, +): """ **Generate proposal Faster-RCNN** @@ -3031,18 +3230,20 @@ def generate_proposals(scores, im_info, anchors, variances) """ - return paddle.vision.ops.generate_proposals(scores=scores, - bbox_deltas=bbox_deltas, - img_size=im_info[:2], - anchors=anchors, - variances=variances, - pre_nms_top_n=pre_nms_top_n, - post_nms_top_n=post_nms_top_n, - nms_thresh=nms_thresh, - min_size=min_size, - eta=eta, - return_rois_num=return_rois_num, - name=name) + return paddle.vision.ops.generate_proposals( + scores=scores, + bbox_deltas=bbox_deltas, + img_size=im_info[:2], + anchors=anchors, + variances=variances, + pre_nms_top_n=pre_nms_top_n, + post_nms_top_n=post_nms_top_n, + nms_thresh=nms_thresh, + min_size=min_size, + eta=eta, + return_rois_num=return_rois_num, + name=name, + ) def box_clip(input, im_info, name=None): @@ -3097,8 +3298,9 @@ def box_clip(input, im_info, name=None): """ check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'box_clip') - check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'], - 'box_clip') + check_variable_and_dtype( + im_info, 'im_info', ['float32', 'float64'], 'box_clip' + ) helper = LayerHelper("box_clip", **locals()) output = helper.create_variable_for_type_inference(dtype=input.dtype) @@ -3108,15 +3310,17 @@ def box_clip(input, im_info, name=None): return output -def retinanet_detection_output(bboxes, - scores, - anchors, - im_info, - score_threshold=0.05, - nms_top_k=1000, - keep_top_k=100, - nms_threshold=0.3, - nms_eta=1.0): +def retinanet_detection_output( + bboxes, + scores, + anchors, + im_info, + score_threshold=0.05, + nms_top_k=1000, + keep_top_k=100, + nms_threshold=0.3, + nms_eta=1.0, +): """ **Detection Output Layer for the detector RetinaNet.** @@ -3225,54 +3429,69 @@ def retinanet_detection_output(bboxes, check_type(bboxes, 'bboxes', (list), 'retinanet_detection_output') for i, bbox in enumerate(bboxes): - check_variable_and_dtype(bbox, 'bbox{}'.format(i), - ['float32', 'float64'], - 'retinanet_detection_output') + check_variable_and_dtype( + bbox, + 'bbox{}'.format(i), + ['float32', 'float64'], + 'retinanet_detection_output', + ) check_type(scores, 'scores', (list), 'retinanet_detection_output') for i, score in enumerate(scores): - check_variable_and_dtype(score, 'score{}'.format(i), - ['float32', 'float64'], - 'retinanet_detection_output') + check_variable_and_dtype( + score, + 'score{}'.format(i), + ['float32', 'float64'], + 'retinanet_detection_output', + ) check_type(anchors, 'anchors', (list), 'retinanet_detection_output') for i, anchor in enumerate(anchors): - check_variable_and_dtype(anchor, 'anchor{}'.format(i), - ['float32', 'float64'], - 'retinanet_detection_output') - check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'], - 'retinanet_detection_output') + check_variable_and_dtype( + anchor, + 'anchor{}'.format(i), + ['float32', 'float64'], + 'retinanet_detection_output', + ) + check_variable_and_dtype( + im_info, 'im_info', ['float32', 'float64'], 'retinanet_detection_output' + ) helper = LayerHelper('retinanet_detection_output', **locals()) output = helper.create_variable_for_type_inference( - dtype=helper.input_dtype('scores')) - helper.append_op(type="retinanet_detection_output", - inputs={ - 'BBoxes': bboxes, - 'Scores': scores, - 'Anchors': anchors, - 'ImInfo': im_info - }, - attrs={ - 'score_threshold': score_threshold, - 'nms_top_k': nms_top_k, - 'nms_threshold': nms_threshold, - 'keep_top_k': keep_top_k, - 'nms_eta': 1., - }, - outputs={'Out': output}) + dtype=helper.input_dtype('scores') + ) + helper.append_op( + type="retinanet_detection_output", + inputs={ + 'BBoxes': bboxes, + 'Scores': scores, + 'Anchors': anchors, + 'ImInfo': im_info, + }, + attrs={ + 'score_threshold': score_threshold, + 'nms_top_k': nms_top_k, + 'nms_threshold': nms_threshold, + 'keep_top_k': keep_top_k, + 'nms_eta': 1.0, + }, + outputs={'Out': output}, + ) output.stop_gradient = True return output -def multiclass_nms(bboxes, - scores, - score_threshold, - nms_top_k, - keep_top_k, - nms_threshold=0.3, - normalized=True, - nms_eta=1., - background_label=0, - name=None): +def multiclass_nms( + bboxes, + scores, + score_threshold, + nms_top_k, + keep_top_k, + nms_threshold=0.3, + normalized=True, + nms_eta=1.0, + background_label=0, + name=None, +): """ **Multiclass NMS** @@ -3384,10 +3603,12 @@ def multiclass_nms(bboxes, keep_top_k=200, normalized=False) """ - check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'], - 'multiclass_nms') - check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'], - 'multiclass_nms') + check_variable_and_dtype( + bboxes, 'BBoxes', ['float32', 'float64'], 'multiclass_nms' + ) + check_variable_and_dtype( + scores, 'Scores', ['float32', 'float64'], 'multiclass_nms' + ) check_type(score_threshold, 'score_threshold', float, 'multicalss_nms') check_type(nms_top_k, 'nums_top_k', int, 'multiclass_nms') check_type(keep_top_k, 'keep_top_k', int, 'mutliclass_nms') @@ -3398,36 +3619,37 @@ def multiclass_nms(bboxes, helper = LayerHelper('multiclass_nms', **locals()) output = helper.create_variable_for_type_inference(dtype=bboxes.dtype) - helper.append_op(type="multiclass_nms", - inputs={ - 'BBoxes': bboxes, - 'Scores': scores - }, - attrs={ - 'background_label': background_label, - 'score_threshold': score_threshold, - 'nms_top_k': nms_top_k, - 'nms_threshold': nms_threshold, - 'nms_eta': nms_eta, - 'keep_top_k': keep_top_k, - 'normalized': normalized - }, - outputs={'Out': output}) + helper.append_op( + type="multiclass_nms", + inputs={'BBoxes': bboxes, 'Scores': scores}, + attrs={ + 'background_label': background_label, + 'score_threshold': score_threshold, + 'nms_top_k': nms_top_k, + 'nms_threshold': nms_threshold, + 'nms_eta': nms_eta, + 'keep_top_k': keep_top_k, + 'normalized': normalized, + }, + outputs={'Out': output}, + ) output.stop_gradient = True return output -def locality_aware_nms(bboxes, - scores, - score_threshold, - nms_top_k, - keep_top_k, - nms_threshold=0.3, - normalized=True, - nms_eta=1., - background_label=-1, - name=None): +def locality_aware_nms( + bboxes, + scores, + score_threshold, + nms_top_k, + keep_top_k, + nms_threshold=0.3, + normalized=True, + nms_eta=1.0, + background_label=-1, + name=None, +): """ **Local Aware NMS** @@ -3506,10 +3728,12 @@ def locality_aware_nms(bboxes, keep_top_k=200, normalized=False) """ - check_variable_and_dtype(bboxes, 'bboxes', ['float32', 'float64'], - 'locality_aware_nms') - check_variable_and_dtype(scores, 'scores', ['float32', 'float64'], - 'locality_aware_nms') + check_variable_and_dtype( + bboxes, 'bboxes', ['float32', 'float64'], 'locality_aware_nms' + ) + check_variable_and_dtype( + scores, 'scores', ['float32', 'float64'], 'locality_aware_nms' + ) check_type(background_label, 'background_label', int, 'locality_aware_nms') check_type(score_threshold, 'score_threshold', float, 'locality_aware_nms') check_type(nms_top_k, 'nms_top_k', int, 'locality_aware_nms') @@ -3520,47 +3744,49 @@ def locality_aware_nms(bboxes, shape = scores.shape assert len(shape) == 3, "dim size of scores must be 3" - assert shape[ - 1] == 1, "locality_aware_nms only support one class, Tensor score shape must be [N, 1, M]" + assert ( + shape[1] == 1 + ), "locality_aware_nms only support one class, Tensor score shape must be [N, 1, M]" helper = LayerHelper('locality_aware_nms', **locals()) output = helper.create_variable_for_type_inference(dtype=bboxes.dtype) out = {'Out': output} - helper.append_op(type="locality_aware_nms", - inputs={ - 'BBoxes': bboxes, - 'Scores': scores - }, - attrs={ - 'background_label': background_label, - 'score_threshold': score_threshold, - 'nms_top_k': nms_top_k, - 'nms_threshold': nms_threshold, - 'nms_eta': nms_eta, - 'keep_top_k': keep_top_k, - 'nms_eta': nms_eta, - 'normalized': normalized - }, - outputs={'Out': output}) + helper.append_op( + type="locality_aware_nms", + inputs={'BBoxes': bboxes, 'Scores': scores}, + attrs={ + 'background_label': background_label, + 'score_threshold': score_threshold, + 'nms_top_k': nms_top_k, + 'nms_threshold': nms_threshold, + 'nms_eta': nms_eta, + 'keep_top_k': keep_top_k, + 'nms_eta': nms_eta, + 'normalized': normalized, + }, + outputs={'Out': output}, + ) output.stop_gradient = True return output -def matrix_nms(bboxes, - scores, - score_threshold, - post_threshold, - nms_top_k, - keep_top_k, - use_gaussian=False, - gaussian_sigma=2., - background_label=0, - normalized=True, - return_index=False, - name=None): +def matrix_nms( + bboxes, + scores, + score_threshold, + post_threshold, + nms_top_k, + keep_top_k, + use_gaussian=False, + gaussian_sigma=2.0, + background_label=0, + normalized=True, + return_index=False, + name=None, +): """ **Matrix NMS** @@ -3637,8 +3863,16 @@ def matrix_nms(bboxes, normalized=False) """ if in_dygraph_mode(): - attrs = (score_threshold, nms_top_k, keep_top_k, post_threshold, - use_gaussian, gaussian_sigma, background_label, normalized) + attrs = ( + score_threshold, + nms_top_k, + keep_top_k, + post_threshold, + use_gaussian, + gaussian_sigma, + background_label, + normalized, + ) out, index = _C_ops.matrix_nms(bboxes, scores, *attrs) if return_index: @@ -3646,10 +3880,12 @@ def matrix_nms(bboxes, else: return out - check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'], - 'matrix_nms') - check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'], - 'matrix_nms') + check_variable_and_dtype( + bboxes, 'BBoxes', ['float32', 'float64'], 'matrix_nms' + ) + check_variable_and_dtype( + scores, 'Scores', ['float32', 'float64'], 'matrix_nms' + ) check_type(score_threshold, 'score_threshold', float, 'matrix_nms') check_type(post_threshold, 'post_threshold', float, 'matrix_nms') check_type(nms_top_k, 'nums_top_k', int, 'matrix_nms') @@ -3662,25 +3898,21 @@ def matrix_nms(bboxes, helper = LayerHelper('matrix_nms', **locals()) output = helper.create_variable_for_type_inference(dtype=bboxes.dtype) index = helper.create_variable_for_type_inference(dtype='int') - helper.append_op(type="matrix_nms", - inputs={ - 'BBoxes': bboxes, - 'Scores': scores - }, - attrs={ - 'score_threshold': score_threshold, - 'post_threshold': post_threshold, - 'nms_top_k': nms_top_k, - 'keep_top_k': keep_top_k, - 'use_gaussian': use_gaussian, - 'gaussian_sigma': gaussian_sigma, - 'background_label': background_label, - 'normalized': normalized - }, - outputs={ - 'Out': output, - 'Index': index - }) + helper.append_op( + type="matrix_nms", + inputs={'BBoxes': bboxes, 'Scores': scores}, + attrs={ + 'score_threshold': score_threshold, + 'post_threshold': post_threshold, + 'nms_top_k': nms_top_k, + 'keep_top_k': keep_top_k, + 'use_gaussian': use_gaussian, + 'gaussian_sigma': gaussian_sigma, + 'background_label': background_label, + 'normalized': normalized, + }, + outputs={'Out': output, 'Index': index}, + ) output.stop_gradient = True if return_index: @@ -3689,13 +3921,15 @@ def matrix_nms(bboxes, return output -def distribute_fpn_proposals(fpn_rois, - min_level, - max_level, - refer_level, - refer_scale, - rois_num=None, - name=None): +def distribute_fpn_proposals( + fpn_rois, + min_level, + max_level, + refer_level, + refer_scale, + rois_num=None, + name=None, +): r""" **This op only takes LoDTensor as input.** In Feature Pyramid Networks @@ -3763,22 +3997,21 @@ def distribute_fpn_proposals(fpn_rois, refer_level=4, refer_scale=224) """ - return paddle.vision.ops.distribute_fpn_proposals(fpn_rois=fpn_rois, - min_level=min_level, - max_level=max_level, - refer_level=refer_level, - refer_scale=refer_scale, - rois_num=rois_num, - name=name) + return paddle.vision.ops.distribute_fpn_proposals( + fpn_rois=fpn_rois, + min_level=min_level, + max_level=max_level, + refer_level=refer_level, + refer_scale=refer_scale, + rois_num=rois_num, + name=name, + ) @templatedoc() -def box_decoder_and_assign(prior_box, - prior_box_var, - target_box, - box_score, - box_clip, - name=None): +def box_decoder_and_assign( + prior_box, prior_box_var, target_box, box_score, box_clip, name=None +): """ ${comment} @@ -3818,41 +4051,53 @@ def box_decoder_and_assign(prior_box, pb, pbv, loc, scores, 4.135) """ - check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'], - 'box_decoder_and_assign') - check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'], - 'box_decoder_and_assign') - check_variable_and_dtype(box_score, 'box_score', ['float32', 'float64'], - 'box_decoder_and_assign') + check_variable_and_dtype( + prior_box, 'prior_box', ['float32', 'float64'], 'box_decoder_and_assign' + ) + check_variable_and_dtype( + target_box, + 'target_box', + ['float32', 'float64'], + 'box_decoder_and_assign', + ) + check_variable_and_dtype( + box_score, 'box_score', ['float32', 'float64'], 'box_decoder_and_assign' + ) helper = LayerHelper("box_decoder_and_assign", **locals()) decoded_box = helper.create_variable_for_type_inference( - dtype=prior_box.dtype) + dtype=prior_box.dtype + ) output_assign_box = helper.create_variable_for_type_inference( - dtype=prior_box.dtype) - - helper.append_op(type="box_decoder_and_assign", - inputs={ - "PriorBox": prior_box, - "PriorBoxVar": prior_box_var, - "TargetBox": target_box, - "BoxScore": box_score - }, - attrs={"box_clip": box_clip}, - outputs={ - "DecodeBox": decoded_box, - "OutputAssignBox": output_assign_box - }) + dtype=prior_box.dtype + ) + + helper.append_op( + type="box_decoder_and_assign", + inputs={ + "PriorBox": prior_box, + "PriorBoxVar": prior_box_var, + "TargetBox": target_box, + "BoxScore": box_score, + }, + attrs={"box_clip": box_clip}, + outputs={ + "DecodeBox": decoded_box, + "OutputAssignBox": output_assign_box, + }, + ) return decoded_box, output_assign_box -def collect_fpn_proposals(multi_rois, - multi_scores, - min_level, - max_level, - post_nms_top_n, - rois_num_per_level=None, - name=None): +def collect_fpn_proposals( + multi_rois, + multi_scores, + min_level, + max_level, + post_nms_top_n, + rois_num_per_level=None, + name=None, +): """ **This OP only supports LoDTensor as input**. Concat multi-level RoIs @@ -3922,17 +4167,21 @@ def collect_fpn_proposals(multi_rois, input_scores = multi_scores[:num_lvl] if _non_static_mode(): - assert rois_num_per_level is not None, "rois_num_per_level should not be None in dygraph mode." + assert ( + rois_num_per_level is not None + ), "rois_num_per_level should not be None in dygraph mode." attrs = ('post_nms_topN', post_nms_top_n) output_rois, rois_num = _legacy_C_ops.collect_fpn_proposals( - input_rois, input_scores, rois_num_per_level, *attrs) + input_rois, input_scores, rois_num_per_level, *attrs + ) check_type(multi_rois, 'multi_rois', list, 'collect_fpn_proposals') check_type(multi_scores, 'multi_scores', list, 'collect_fpn_proposals') helper = LayerHelper('collect_fpn_proposals', **locals()) dtype = helper.input_dtype('multi_rois') - check_dtype(dtype, 'multi_rois', ['float32', 'float64'], - 'collect_fpn_proposals') + check_dtype( + dtype, 'multi_rois', ['float32', 'float64'], 'collect_fpn_proposals' + ) output_rois = helper.create_variable_for_type_inference(dtype) output_rois.stop_gradient = True @@ -3946,10 +4195,12 @@ def collect_fpn_proposals(multi_rois, rois_num = helper.create_variable_for_type_inference(dtype='int32') rois_num.stop_gradient = True outputs['RoisNum'] = rois_num - helper.append_op(type='collect_fpn_proposals', - inputs=inputs, - outputs=outputs, - attrs={'post_nms_topN': post_nms_top_n}) + helper.append_op( + type='collect_fpn_proposals', + inputs=inputs, + outputs=outputs, + attrs={'post_nms_topN': post_nms_top_n}, + ) if rois_num_per_level is not None: return output_rois, rois_num return output_rois diff --git a/python/paddle/fluid/layers/device.py b/python/paddle/fluid/layers/device.py index a5c2e8c3e200dc29575ef99de5b0f22ad5e17cb4..ac352ef52b18bcfe86071de5f835d52ec9e6717b 100644 --- a/python/paddle/fluid/layers/device.py +++ b/python/paddle/fluid/layers/device.py @@ -28,15 +28,16 @@ __all__ = [] def get_places(device_count=None, device_type=None): helper = LayerHelper('get_places', **locals()) out_places = helper.create_variable( - name=unique_name.generate_with_ignorable_key(helper.name + ".out")) + name=unique_name.generate_with_ignorable_key(helper.name + ".out") + ) attrs = dict() if device_count is not None: attrs['device_count'] = int(device_count) if device_type is not None: attrs['device_type'] = str(device_type) - helper.append_op(type='get_places', - outputs={"Out": [out_places]}, - attrs=attrs) + helper.append_op( + type='get_places', outputs={"Out": [out_places]}, attrs=attrs + ) return out_places diff --git a/python/paddle/fluid/layers/distributions.py b/python/paddle/fluid/layers/distributions.py index e77634632328193cbe8cbee8a79350c5f641eb29..f955b9299c74eba48f0361eeaa5587e40de2ce56 100644 --- a/python/paddle/fluid/layers/distributions.py +++ b/python/paddle/fluid/layers/distributions.py @@ -20,7 +20,12 @@ import math import numpy as np import warnings -from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype +from ..data_feeder import ( + convert_dtype, + check_variable_and_dtype, + check_type, + check_dtype, +) __all__ = ['Uniform', 'Normal', 'Categorical', 'MultivariateNormalDiag'] @@ -64,7 +69,8 @@ class Distribution(object): if is_variable and is_number: raise ValueError( - 'if one argument is Variable, all arguments should be Variable') + 'if one argument is Variable, all arguments should be Variable' + ) return is_variable @@ -79,7 +85,7 @@ class Distribution(object): """ numpy_args = [] variable_args = [] - tmp = 0. + tmp = 0.0 for arg in args: valid_arg = False @@ -87,7 +93,9 @@ class Distribution(object): if isinstance(arg, cls): valid_arg = True break - assert valid_arg, "type of input args must be float, list, numpy.ndarray or Variable." + assert ( + valid_arg + ), "type of input args must be float, list, numpy.ndarray or Variable." if isinstance(arg, float): arg = np.zeros(1) + arg arg_np = np.array(arg) @@ -175,10 +183,12 @@ class Uniform(Distribution): """ def __init__(self, low, high): - check_type(low, 'low', (float, np.ndarray, tensor.Variable, list), - 'Uniform') - check_type(high, 'high', (float, np.ndarray, tensor.Variable, list), - 'Uniform') + check_type( + low, 'low', (float, np.ndarray, tensor.Variable, list), 'Uniform' + ) + check_type( + high, 'high', (float, np.ndarray, tensor.Variable, list), 'Uniform' + ) self.all_arg_is_float = False self.batch_size_unknown = False @@ -209,17 +219,26 @@ class Uniform(Distribution): if self.batch_size_unknown: output_shape = shape + batch_shape zero_tmp = tensor.fill_constant_batch_size_like( - self.low + self.high, batch_shape + shape, self.low.dtype, 0.) + self.low + self.high, batch_shape + shape, self.low.dtype, 0.0 + ) uniform_random_tmp = nn.uniform_random_batch_size_like( - zero_tmp, zero_tmp.shape, min=0., max=1., seed=seed) - output = uniform_random_tmp * (zero_tmp + self.high - - self.low) + self.low + zero_tmp, zero_tmp.shape, min=0.0, max=1.0, seed=seed + ) + output = ( + uniform_random_tmp * (zero_tmp + self.high - self.low) + + self.low + ) return nn.reshape(output, output_shape) else: output_shape = shape + batch_shape - output = nn.uniform_random(output_shape, seed=seed) * ( - tensor.zeros(output_shape, dtype=self.low.dtype) + - (self.high - self.low)) + self.low + output = ( + nn.uniform_random(output_shape, seed=seed) + * ( + tensor.zeros(output_shape, dtype=self.low.dtype) + + (self.high - self.low) + ) + + self.low + ) if self.all_arg_is_float: return nn.reshape(output, shape) else: @@ -235,8 +254,9 @@ class Uniform(Distribution): Variable: log probability.The data type is same with value. """ - check_variable_and_dtype(value, 'value', ['float32', 'float64'], - 'log_prob') + check_variable_and_dtype( + value, 'value', ['float32', 'float64'], 'log_prob' + ) lb_bool = control_flow.less_than(self.low, value) ub_bool = control_flow.less_than(value, self.high) @@ -317,10 +337,12 @@ class Normal(Distribution): """ def __init__(self, loc, scale): - check_type(loc, 'loc', (float, np.ndarray, tensor.Variable, list), - 'Normal') - check_type(scale, 'scale', (float, np.ndarray, tensor.Variable, list), - 'Normal') + check_type( + loc, 'loc', (float, np.ndarray, tensor.Variable, list), 'Normal' + ) + check_type( + scale, 'scale', (float, np.ndarray, tensor.Variable, list), 'Normal' + ) self.batch_size_unknown = False self.all_arg_is_float = False @@ -353,18 +375,24 @@ class Normal(Distribution): if self.batch_size_unknown: output_shape = shape + batch_shape zero_tmp = tensor.fill_constant_batch_size_like( - self.loc + self.scale, batch_shape + shape, self.loc.dtype, 0.) + self.loc + self.scale, batch_shape + shape, self.loc.dtype, 0.0 + ) zero_tmp_shape = nn.shape(zero_tmp) - normal_random_tmp = nn.gaussian_random(zero_tmp_shape, - mean=0., - std=1., - seed=seed) + normal_random_tmp = nn.gaussian_random( + zero_tmp_shape, mean=0.0, std=1.0, seed=seed + ) output = normal_random_tmp * (zero_tmp + self.scale) + self.loc return nn.reshape(output, output_shape) else: output_shape = shape + batch_shape - output = nn.gaussian_random(output_shape, mean=0., std=1., seed=seed) * \ - (tensor.zeros(output_shape, dtype=self.loc.dtype) + self.scale) + self.loc + output = ( + nn.gaussian_random(output_shape, mean=0.0, std=1.0, seed=seed) + * ( + tensor.zeros(output_shape, dtype=self.loc.dtype) + + self.scale + ) + + self.loc + ) if self.all_arg_is_float: return nn.reshape(output, shape) else: @@ -378,11 +406,12 @@ class Normal(Distribution): """ batch_shape = list((self.loc + self.scale).shape) - zero_tmp = tensor.fill_constant_batch_size_like(self.loc + self.scale, - batch_shape, - self.loc.dtype, 0.) - return 0.5 + 0.5 * math.log(2 * math.pi) + nn.log( - (self.scale + zero_tmp)) + zero_tmp = tensor.fill_constant_batch_size_like( + self.loc + self.scale, batch_shape, self.loc.dtype, 0.0 + ) + return ( + 0.5 + 0.5 * math.log(2 * math.pi) + nn.log((self.scale + zero_tmp)) + ) def log_prob(self, value): """Log probability density/mass function. @@ -394,14 +423,17 @@ class Normal(Distribution): Variable: log probability.The data type is same with value. """ - check_variable_and_dtype(value, 'value', ['float32', 'float64'], - 'log_prob') + check_variable_and_dtype( + value, 'value', ['float32', 'float64'], 'log_prob' + ) var = self.scale * self.scale log_scale = nn.log(self.scale) - return -1. * ((value - self.loc) * - (value - self.loc)) / (2. * var) - log_scale - math.log( - math.sqrt(2. * math.pi)) + return ( + -1.0 * ((value - self.loc) * (value - self.loc)) / (2.0 * var) + - log_scale + - math.log(math.sqrt(2.0 * math.pi)) + ) def kl_divergence(self, other): """The KL-divergence between two normal distributions. @@ -417,10 +449,10 @@ class Normal(Distribution): check_type(other, 'other', Normal, 'kl_divergence') var_ratio = self.scale / other.scale - var_ratio = (var_ratio * var_ratio) + var_ratio = var_ratio * var_ratio t1 = (self.loc - other.loc) / other.scale - t1 = (t1 * t1) - return 0.5 * (var_ratio + t1 - 1. - nn.log(var_ratio)) + t1 = t1 * t1 + return 0.5 * (var_ratio + t1 - 1.0 - nn.log(var_ratio)) class Categorical(Distribution): @@ -477,8 +509,9 @@ class Categorical(Distribution): Args: logits(list|numpy.ndarray|Variable): The logits input of categorical distribution. The data type is float32. """ - check_type(logits, 'logits', (np.ndarray, tensor.Variable, list), - 'Categorical') + check_type( + logits, 'logits', (np.ndarray, tensor.Variable, list), 'Categorical' + ) if self._validate_args(logits): self.logits = logits @@ -499,7 +532,8 @@ class Categorical(Distribution): logits = self.logits - nn.reduce_max(self.logits, dim=-1, keep_dim=True) other_logits = other.logits - nn.reduce_max( - other.logits, dim=-1, keep_dim=True) + other.logits, dim=-1, keep_dim=True + ) e_logits = ops.exp(logits) other_e_logits = ops.exp(other_logits) z = nn.reduce_sum(e_logits, dim=-1, keep_dim=True) @@ -508,7 +542,8 @@ class Categorical(Distribution): kl = nn.reduce_sum( prob * (logits - nn.log(z) - other_logits + nn.log(other_z)), dim=-1, - keep_dim=True) + keep_dim=True, + ) return kl @@ -524,7 +559,8 @@ class Categorical(Distribution): z = nn.reduce_sum(e_logits, dim=-1, keep_dim=True) prob = e_logits / z entropy = -1.0 * nn.reduce_sum( - prob * (logits - nn.log(z)), dim=-1, keep_dim=True) + prob * (logits - nn.log(z)), dim=-1, keep_dim=True + ) return entropy @@ -598,10 +634,18 @@ class MultivariateNormalDiag(Distribution): """ def __init__(self, loc, scale): - check_type(loc, 'loc', (np.ndarray, tensor.Variable, list), - 'MultivariateNormalDiag') - check_type(scale, 'scale', (np.ndarray, tensor.Variable, list), - 'MultivariateNormalDiag') + check_type( + loc, + 'loc', + (np.ndarray, tensor.Variable, list), + 'MultivariateNormalDiag', + ) + check_type( + scale, + 'scale', + (np.ndarray, tensor.Variable, list), + 'MultivariateNormalDiag', + ) if self._validate_args(loc, scale): self.loc = loc @@ -614,7 +658,8 @@ class MultivariateNormalDiag(Distribution): batch_shape = list(value.shape) one_all = tensor.ones(shape=batch_shape, dtype=self.loc.dtype) one_diag = tensor.diag( - tensor.ones(shape=[batch_shape[0]], dtype=self.loc.dtype)) + tensor.ones(shape=[batch_shape[0]], dtype=self.loc.dtype) + ) det_diag = nn.reduce_prod(value + one_all - one_diag) return det_diag @@ -624,7 +669,8 @@ class MultivariateNormalDiag(Distribution): batch_shape = list(value.shape) one_all = tensor.ones(shape=batch_shape, dtype=self.loc.dtype) one_diag = tensor.diag( - tensor.ones(shape=[batch_shape[0]], dtype=self.loc.dtype)) + tensor.ones(shape=[batch_shape[0]], dtype=self.loc.dtype) + ) inv_diag = nn.elementwise_pow(value, (one_all - 2 * one_diag)) return inv_diag @@ -636,8 +682,10 @@ class MultivariateNormalDiag(Distribution): Variable: Shannon entropy of Multivariate Normal distribution. The data type is float32. """ - entropy = 0.5 * (self.scale.shape[0] * (1.0 + math.log(2 * math.pi)) + - nn.log(self._det(self.scale))) + entropy = 0.5 * ( + self.scale.shape[0] * (1.0 + math.log(2 * math.pi)) + + nn.log(self._det(self.scale)) + ) return entropy @@ -654,8 +702,9 @@ class MultivariateNormalDiag(Distribution): check_type(other, 'other', MultivariateNormalDiag, 'kl_divergence') tr_cov_matmul = nn.reduce_sum(self._inv(other.scale) * self.scale) - loc_matmul_cov = nn.matmul((other.loc - self.loc), - self._inv(other.scale)) + loc_matmul_cov = nn.matmul( + (other.loc - self.loc), self._inv(other.scale) + ) tri_matmul = nn.matmul(loc_matmul_cov, (other.loc - self.loc)) k = list(self.scale.shape)[0] ln_cov = nn.log(self._det(other.scale)) - nn.log(self._det(self.scale)) diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index 5c62dc6d61d66490b16ad5c9a08fd2004b72d66b..86654bbf669587c71177af74d4b538b4f54acf31 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -23,30 +23,46 @@ from .control_flow import BlockGuard from .layer_function_generator import templatedoc from .. import core from ..executor import global_scope -from ..framework import convert_np_dtype_to_dtype_, default_main_program, \ - default_startup_program, program_guard, Program, Variable +from ..framework import ( + convert_np_dtype_to_dtype_, + default_main_program, + default_startup_program, + program_guard, + Program, + Variable, +) from ..layer_helper import LayerHelper from ..unique_name import generate as unique_name import logging from ..data_feeder import check_dtype, check_type from paddle.fluid.framework import static_only -from ..framework import _get_paddle_place, _current_expected_place, _set_expected_place +from ..framework import ( + _get_paddle_place, + _current_expected_place, + _set_expected_place, +) __all__ = [ - 'data', 'read_file', 'double_buffer', 'py_reader', - 'create_py_reader_by_data', 'load' + 'data', + 'read_file', + 'double_buffer', + 'py_reader', + 'create_py_reader_by_data', + 'load', ] @static_only -def data(name, - shape, - append_batch_size=True, - dtype='float32', - lod_level=0, - type=core.VarDesc.VarType.LOD_TENSOR, - stop_gradient=True): +def data( + name, + shape, + append_batch_size=True, + dtype='float32', + lod_level=0, + type=core.VarDesc.VarType.LOD_TENSOR, + stop_gradient=True, +): """ **Data Layer** @@ -124,13 +140,15 @@ def data(name, if append_batch_size: shape = [-1] + shape # append batch size as -1 - data_var = helper.create_global_variable(name=name, - shape=shape, - dtype=dtype, - type=type, - stop_gradient=stop_gradient, - lod_level=lod_level, - is_data=True) + data_var = helper.create_global_variable( + name=name, + shape=shape, + dtype=dtype, + type=type, + stop_gradient=stop_gradient, + lod_level=lod_level, + is_data=True, + ) return data_var @@ -245,13 +263,13 @@ class ListenAndServ(object): attrs={ 'endpoint': self.endpoint, 'Fanin': self.fan_in, - 'optimize_blocks': - [current_block - ], # did not support multiple optimize blocks in layers - 'distributed_mode': - DistributedMode.SYNC, # did not support async now in layers - 'grad_to_block_id': [""] - }) + 'optimize_blocks': [ + current_block + ], # did not support multiple optimize blocks in layers + 'distributed_mode': DistributedMode.SYNC, # did not support async now in layers + 'grad_to_block_id': [""], + }, + ) def Send(endpoints, send_vars, dummy_output=None, sync=True): @@ -266,14 +284,14 @@ def Send(endpoints, send_vars, dummy_output=None, sync=True): sync (bool): whether to wait the request finish """ - assert (type(send_vars) == list) + assert type(send_vars) == list if dummy_output is None: dummy_output = [] elif isinstance(dummy_output, Variable): dummy_output = [dummy_output] - assert (type(dummy_output) == list) + assert type(dummy_output) == list epmap = endpoints.split(",") endpoints = list(set(epmap)) @@ -281,22 +299,23 @@ def Send(endpoints, send_vars, dummy_output=None, sync=True): helper = LayerHelper("Send", **locals()) rpc_op_role_name = core.op_proto_and_checker_maker.kOpRoleAttrName() - helper.append_op(type="send", - inputs={"X": send_vars}, - outputs={"Out": dummy_output}, - attrs={ - "endpoints": - endpoints, - "epmap": - epmap, - rpc_op_role_name: - core.op_proto_and_checker_maker.OpRole.RPC - }) + helper.append_op( + type="send", + inputs={"X": send_vars}, + outputs={"Out": dummy_output}, + attrs={ + "endpoints": endpoints, + "epmap": epmap, + rpc_op_role_name: core.op_proto_and_checker_maker.OpRole.RPC, + }, + ) if sync: - helper.append_op(type="send_barrier", - inputs={"X": dummy_output}, - outputs={"Out": []}, - attrs={"endpoints": endpoints}) + helper.append_op( + type="send_barrier", + inputs={"X": dummy_output}, + outputs={"Out": []}, + attrs={"endpoints": endpoints}, + ) def Recv(endpoints, get_vars, dummy_input=None, sync=True): @@ -312,35 +331,35 @@ def Recv(endpoints, get_vars, dummy_input=None, sync=True): Returns: list: list of received variables """ - assert (type(get_vars) == list) + assert type(get_vars) == list if dummy_input is None: dummy_input = [] elif isinstance(dummy_input, Variable): dummy_input = [dummy_input] - assert (type(dummy_input) == list) + assert type(dummy_input) == list epmap = endpoints.split(",") endpoints = list(set(epmap)) helper = LayerHelper("Recv", **locals()) - helper.append_op(type="recv", - inputs={"X": dummy_input}, - outputs={"Out": get_vars}, - attrs={ - "endpoints": endpoints, - "epmap": epmap - }) + helper.append_op( + type="recv", + inputs={"X": dummy_input}, + outputs={"Out": get_vars}, + attrs={"endpoints": endpoints, "epmap": epmap}, + ) if sync: - helper.append_op(type="fetch_barrier", - outputs={"Out": get_vars}, - attrs={"endpoints": endpoints}) + helper.append_op( + type="fetch_barrier", + outputs={"Out": get_vars}, + attrs={"endpoints": endpoints}, + ) return get_vars def monkey_patch_reader_methods(reader): - def __get_reader__(): scope = global_scope() var = scope.find_var(reader.name) @@ -381,24 +400,30 @@ def _copy_reader_create_op_(block, op): for arg_name in arg_names: new_output_map[param_name].append(block.var(arg_name)) - new_op = block.append_op(type=op.type, - inputs=new_input_map, - outputs=new_output_map, - attrs=op.all_attrs()) + new_op = block.append_op( + type=op.type, + inputs=new_input_map, + outputs=new_output_map, + attrs=op.all_attrs(), + ) return new_op -def _py_reader(capacity, - shapes, - dtypes, - lod_levels=None, - name=None, - use_double_buffer=True, - feed_list=None): +def _py_reader( + capacity, + shapes, + dtypes, + lod_levels=None, + name=None, + use_double_buffer=True, + feed_list=None, +): if feed_list is not None: if not isinstance(feed_list, list): - raise TypeError("feed_list should be a list of Variable" - " instead of " + str(type(feed_list))) + raise TypeError( + "feed_list should be a list of Variable" + " instead of " + str(type(feed_list)) + ) lod_levels = [] dtypes = [] shape_concat = [] @@ -440,22 +465,25 @@ def _py_reader(capacity, startup_blk = default_startup_program().current_block() startup_var = startup_blk.create_var(name=reader_name) - startup_blk.append_op(type='create_py_reader', - inputs={'blocking_queue': [queue_name]}, - outputs={'Out': [startup_var]}, - attrs={ - 'shape_concat': shape_concat, - 'lod_levels': lod_levels, - 'dtypes': dtype_int, - 'need_check_feed': need_check_feed, - 'ranks': ranks - }) + startup_blk.append_op( + type='create_py_reader', + inputs={'blocking_queue': [queue_name]}, + outputs={'Out': [startup_var]}, + attrs={ + 'shape_concat': shape_concat, + 'lod_levels': lod_levels, + 'dtypes': dtype_int, + 'need_check_feed': need_check_feed, + 'ranks': ranks, + }, + ) startup_var.desc.set_dtypes(dtypes) startup_var.persistable = True - main_prog_var = _copy_reader_var_(default_main_program().current_block(), - startup_var) + main_prog_var = _copy_reader_var_( + default_main_program().current_block(), startup_var + ) reader = monkey_patch_reader_methods(main_prog_var) if use_double_buffer: @@ -473,7 +501,6 @@ def _py_reader(capacity, reader.exited = False def start_provide_thread(func): - def __provider_thread__(legacy_expected_place): try: # See _DataLoaderIterSingleProcess._thread_loop() for why set expected place here. @@ -501,8 +528,9 @@ def _py_reader(capacity, logging.warn('Your decorated reader has raised an exception!') six.reraise(*sys.exc_info()) - reader.thread = threading.Thread(target=__provider_thread__, - args=(_current_expected_place(), )) + reader.thread = threading.Thread( + target=__provider_thread__, args=(_current_expected_place(),) + ) reader.thread.daemon = True reader.thread.start() @@ -518,17 +546,22 @@ def _py_reader(capacity, for dtype, shape, lod_level in zip(dtypes, shapes, lod_levels): name = str(counter) actual_feed_list.append( - data(name=name, - dtype=dtype, - shape=shape, - lod_level=lod_level)) + data( + name=name, + dtype=dtype, + shape=shape, + lod_level=lod_level, + ) + ) counter += 1 data_names = [feed_data.name for feed_data in actual_feed_list] - feeder = DataFeeder(feed_list=actual_feed_list, - place=core.CPUPlace()) - paddle_reader = feeder.decorate_reader(paddle_reader, - multi_devices=False) + feeder = DataFeeder( + feed_list=actual_feed_list, place=core.CPUPlace() + ) + paddle_reader = feeder.decorate_reader( + paddle_reader, multi_devices=False + ) def __tensor_provider__(): for slots in paddle_reader(): @@ -557,14 +590,11 @@ def _py_reader(capacity, return reader -def py_reader(capacity, - shapes, - dtypes, - lod_levels=None, - name=None, - use_double_buffer=True): +def py_reader( + capacity, shapes, dtypes, lod_levels=None, name=None, use_double_buffer=True +): """ - :api_attr: Static Graph + :api_attr: Static Graph Create a Python reader for data feeding in Python @@ -718,21 +748,23 @@ def py_reader(capacity, """ logging.warn( 'paddle.fluid.layers.py_reader() may be deprecated in the near future. ' - 'Please use paddle.fluid.io.DataLoader.from_generator() instead.') - return _py_reader(capacity=capacity, - shapes=shapes, - dtypes=dtypes, - lod_levels=lod_levels, - name=name, - use_double_buffer=use_double_buffer) - - -def create_py_reader_by_data(capacity, - feed_list, - name=None, - use_double_buffer=True): + 'Please use paddle.fluid.io.DataLoader.from_generator() instead.' + ) + return _py_reader( + capacity=capacity, + shapes=shapes, + dtypes=dtypes, + lod_levels=lod_levels, + name=name, + use_double_buffer=use_double_buffer, + ) + + +def create_py_reader_by_data( + capacity, feed_list, name=None, use_double_buffer=True +): """ - :api_attr: Static Graph + :api_attr: Static Graph The OP creates a Python reader for data feeding in Python, it is similar to :ref:`api_fluid_layers_py_reader` except that it can read data from @@ -799,24 +831,29 @@ def create_py_reader_by_data(capacity, """ logging.warn( 'paddle.fluid.layers.create_py_reader_by_data() may be deprecated in the near future. ' - 'Please use paddle.fluid.io.DataLoader.from_generator() instead.') - return _py_reader(capacity=capacity, - shapes=None, - dtypes=None, - lod_levels=None, - name=name, - use_double_buffer=use_double_buffer, - feed_list=feed_list) + 'Please use paddle.fluid.io.DataLoader.from_generator() instead.' + ) + return _py_reader( + capacity=capacity, + shapes=None, + dtypes=None, + lod_levels=None, + name=name, + use_double_buffer=use_double_buffer, + feed_list=feed_list, + ) def __create_shared_decorated_reader__(op_type, reader, attrs): var_name = unique_name(op_type) startup_blk = default_startup_program().current_block() startup_var = startup_blk.create_var(name=var_name) - startop_op = startup_blk.append_op(type=op_type, - inputs={'UnderlyingReader': reader}, - outputs={'Out': [startup_var]}, - attrs=attrs) + startop_op = startup_blk.append_op( + type=op_type, + inputs={'UnderlyingReader': reader}, + outputs={'Out': [startup_var]}, + attrs=attrs, + ) startup_var.persistable = True main_prog_block = default_main_program().current_block() main_prog_var = _copy_reader_var_(main_prog_block, startup_var) @@ -828,10 +865,12 @@ def __create_unshared_decorated_reader__(op_type, reader, attrs, name=None): new_reader_name = name if name is not None else unique_name(op_type) main_blk = default_main_program().current_block() new_reader = main_blk.create_var(name=new_reader_name) - main_blk.append_op(type=op_type, - inputs={'UnderlyingReader': reader}, - outputs={'Out': [new_reader]}, - attrs=attrs) + main_blk.append_op( + type=op_type, + inputs={'UnderlyingReader': reader}, + outputs={'Out': [new_reader]}, + attrs=attrs, + ) return monkey_patch_reader_methods(new_reader) @@ -864,15 +903,14 @@ def double_buffer(reader, place=None, name=None): if place is not None: attrs['place'] = str(_get_paddle_place(place)).upper() - return __create_unshared_decorated_reader__('create_double_buffer_reader', - reader, - attrs, - name=name) + return __create_unshared_decorated_reader__( + 'create_double_buffer_reader', reader, attrs, name=name + ) def read_file(reader): """ - :api_attr: Static Graph + :api_attr: Static Graph Execute the given reader and get data via it. @@ -898,13 +936,14 @@ def read_file(reader): """ helper = LayerHelper('read_file') out = [ - helper.create_variable_for_type_inference(stop_gradient=True, - dtype='float32') + helper.create_variable_for_type_inference( + stop_gradient=True, dtype='float32' + ) for _ in range(len(reader.desc.shapes())) ] - helper.append_op(type='read', - inputs={'Reader': [reader]}, - outputs={'Out': out}) + helper.append_op( + type='read', inputs={'Reader': [reader]}, outputs={'Out': out} + ) if len(out) == 1: return out[0] else: diff --git a/python/paddle/fluid/layers/layer_function_generator.py b/python/paddle/fluid/layers/layer_function_generator.py index 3beb3bae352c1e7ca4e1cc4f666fdb81da1edad3..b75e628a37a7693fbbb9da3d6b98981a987d4b17 100755 --- a/python/paddle/fluid/layers/layer_function_generator.py +++ b/python/paddle/fluid/layers/layer_function_generator.py @@ -19,15 +19,26 @@ import string from io import StringIO from ..proto import framework_pb2 -from ..framework import OpProtoHolder, Variable, core, convert_np_dtype_to_dtype_, _non_static_mode, in_dygraph_mode, _in_legacy_dygraph +from ..framework import ( + OpProtoHolder, + Variable, + core, + convert_np_dtype_to_dtype_, + _non_static_mode, + in_dygraph_mode, + _in_legacy_dygraph, +) from ..layer_helper import LayerHelper from ..data_feeder import check_variable_and_dtype from paddle.fluid.framework import in_dygraph_mode, _in_legacy_dygraph from paddle import _C_ops, _legacy_C_ops __all__ = [ - 'generate_layer_fn', 'generate_activation_fn', 'generate_inplace_fn', - 'autodoc', 'templatedoc' + 'generate_layer_fn', + 'generate_activation_fn', + 'generate_inplace_fn', + 'autodoc', + 'templatedoc', ] @@ -57,16 +68,16 @@ _two_bang_pattern_ = re.compile(r"!!([^!]+)!!") def escape_math(text): - #return _two_bang_pattern_.sub( + # return _two_bang_pattern_.sub( # r'$$\1$$', # _single_dollar_pattern_.sub(r':math:\n`\1`', # _two_dollar_pattern_.sub(r"!!\1!!", text))) return _two_dollar_pattern_.sub(r':math:`\1`', text) -def _generate_doc_string_(op_proto, - additional_args_lines=None, - skip_attrs_set=None): +def _generate_doc_string_( + op_proto, additional_args_lines=None, skip_attrs_set=None +): """ Generate docstring by OpProto @@ -146,23 +157,30 @@ def generate_layer_fn(op_type): """ op_proto = OpProtoHolder.instance().get_op_proto(op_type) - not_intermediate_outputs = \ - [output for output in op_proto.outputs if not output.intermediate] - intermediate_outputs = \ - [output for output in op_proto.outputs if output.intermediate] + not_intermediate_outputs = [ + output for output in op_proto.outputs if not output.intermediate + ] + intermediate_outputs = [ + output for output in op_proto.outputs if output.intermediate + ] if len(not_intermediate_outputs) != 1: - raise ValueError("Only one non intermediate output operator can be", - "automatically generated. {0}".format(op_type)) + raise ValueError( + "Only one non intermediate output operator can be", + "automatically generated. {0}".format(op_type), + ) if not_intermediate_outputs[0].duplicable: raise ValueError( - "Only non duplicable op can be automatically generated.") + "Only non duplicable op can be automatically generated." + ) for output in intermediate_outputs: if output.duplicable: - raise ValueError("The op can be automatically generated only when ", - "all intermediate ops are not duplicable.") + raise ValueError( + "The op can be automatically generated only when ", + "all intermediate ops are not duplicable.", + ) o_name = not_intermediate_outputs[0].name intermediate_output_names = [output.name for output in intermediate_outputs] @@ -187,14 +205,17 @@ def generate_layer_fn(op_type): for each in val: if not isinstance(each, Variable): raise ValueError( - "input of {0} must be variable".format(op_type)) + "input of {0} must be variable".format(op_type) + ) if dtype is None: dtype = each.dtype elif dtype != each.dtype: raise ValueError( "operator {0} must input same dtype. {1} vs {2}".format( - op_type, dtype, each.dtype)) + op_type, dtype, each.dtype + ) + ) if dtype is None: arg_dtype = kwargs.get("dtype") @@ -226,8 +247,11 @@ def generate_layer_fn(op_type): outputs = dict() out = kwargs.pop(_convert_(o_name), []) if out: - out_var = out[0] if (isinstance(out, list) - or isinstance(out, tuple)) else out + out_var = ( + out[0] + if (isinstance(out, list) or isinstance(out, tuple)) + else out + ) else: out_var = helper.create_variable_for_type_inference(dtype=dtype) outputs[o_name] = [out_var] @@ -235,10 +259,9 @@ def generate_layer_fn(op_type): outputs[name] = [ helper.create_variable_for_type_inference(dtype=dtype) ] - helper.append_op(type=op_type, - inputs=inputs, - outputs=outputs, - attrs=kwargs) + helper.append_op( + type=op_type, inputs=inputs, outputs=outputs, attrs=kwargs + ) return helper.append_activation(out_var) func.__name__ = op_type @@ -269,14 +292,25 @@ def generate_activation_fn(op_type): return op(x) if op_type not in ["abs", "exp", "square"]: - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - op_type) + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], op_type + ) else: # abs exp square ops support dtype(int32, int64, float16, float32, float64) - check_variable_and_dtype(x, 'x', [ - 'int32', 'int64', 'float16', 'float32', 'float64', 'complex64', - 'complex128' - ], op_type) + check_variable_and_dtype( + x, + 'x', + [ + 'int32', + 'int64', + 'float16', + 'float32', + 'float64', + 'complex64', + 'complex128', + ], + op_type, + ) helper = LayerHelper(op_type, **locals()) @@ -289,7 +323,8 @@ def generate_activation_fn(op_type): op_proto, additional_args_lines=[ "name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`." - ]) + ], + ) return func @@ -309,24 +344,31 @@ def generate_inplace_fn(inplace_op_type): op = getattr(_legacy_C_ops, inplace_op_type) return op(x) warnings.warn( - "In static mode, {}() is the same as {}() and does not perform inplace operation." - .format(inplace_op_type, origin_op_type)) + "In static mode, {}() is the same as {}() and does not perform inplace operation.".format( + inplace_op_type, origin_op_type + ) + ) return generate_activation_fn(origin_op_type)(x, name) func.__name__ = inplace_op_type func.__doc__ = """ Inplace version of ``{0}`` API, the output Tensor will be inplaced with input ``x``. Please refer to :ref:`api_fluid_layers_{1}`. -""".format(origin_op_type, origin_op_type) +""".format( + origin_op_type, origin_op_type + ) return func def autodoc(comment=""): - def __impl__(func): - func.__doc__ = _generate_doc_string_( - OpProtoHolder.instance().get_op_proto(func.__name__)) + comment + func.__doc__ = ( + _generate_doc_string_( + OpProtoHolder.instance().get_op_proto(func.__name__) + ) + + comment + ) return func return __impl__ @@ -371,18 +413,21 @@ def templatedoc(op_type=None): for each_input in op_proto.inputs: input_name = _convert_(each_input.name) args["{0}_comment".format(input_name)] = trim_ending_dot( - each_input.comment) + each_input.comment + ) args["{0}_type".format(input_name)] = "Variable" for each_attr in op_proto.attrs: input_name = _convert_(each_attr.name) args["{0}_comment".format(input_name)] = trim_ending_dot( - each_attr.comment) + each_attr.comment + ) args["{0}_type".format(input_name)] = _type_to_str_(each_attr.type) for each_opt in op_proto.outputs: output_name = _convert_(each_opt.name) args["{0}_comment".format(output_name)] = trim_ending_dot( - each_opt.comment) + each_opt.comment + ) args["{0}_type".format(output_name)] = "Variable" func.__doc__ = tmpl.substitute(args) return func diff --git a/python/paddle/fluid/layers/learning_rate_scheduler.py b/python/paddle/fluid/layers/learning_rate_scheduler.py index 3b1147979e1d76ec4cc4ff170006689699cc5c92..737d9e75e11cba1bbbb9c3c61b97ee66729736d6 100644 --- a/python/paddle/fluid/layers/learning_rate_scheduler.py +++ b/python/paddle/fluid/layers/learning_rate_scheduler.py @@ -34,16 +34,22 @@ from ..dygraph import learning_rate_scheduler as imperate_lr from ..data_feeder import check_variable_and_dtype, check_type __all__ = [ - 'exponential_decay', 'natural_exp_decay', 'inverse_time_decay', - 'polynomial_decay', 'piecewise_decay', 'noam_decay', 'cosine_decay', - 'linear_lr_warmup' + 'exponential_decay', + 'natural_exp_decay', + 'inverse_time_decay', + 'polynomial_decay', + 'piecewise_decay', + 'noam_decay', + 'cosine_decay', + 'linear_lr_warmup', ] def _decay_step_counter(begin=0): # the first global step is zero in learning rate decay global_step = nn.autoincreased_step_counter( - counter_name='@LR_DECAY_COUNTER@', begin=begin, step=1) + counter_name='@LR_DECAY_COUNTER@', begin=begin, step=1 + ) global_step = tensor.cast(global_step, 'float32') return global_step @@ -94,17 +100,18 @@ def noam_decay(d_model, warmup_steps, learning_rate=1.0): """ with default_main_program()._lr_schedule_guard(): if _non_static_mode(): - decay = imperate_lr.NoamDecay(d_model, - warmup_steps, - learning_rate=learning_rate) + decay = imperate_lr.NoamDecay( + d_model, warmup_steps, learning_rate=learning_rate + ) return decay else: global_step = _decay_step_counter(1) a = global_step**-0.5 b = (warmup_steps**-1.5) * global_step - lr_value = learning_rate * (d_model**-0.5) * nn.elementwise_min( - a, b) + lr_value = ( + learning_rate * (d_model**-0.5) * nn.elementwise_min(a, b) + ) return lr_value @@ -147,17 +154,18 @@ def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False): paddle.enable_static() base_lr = 0.1 sgd_optimizer = fluid.optimizer.SGD( - learning_rate=fluid.layers.exponential_decay( - learning_rate=base_lr, - decay_steps=10000, - decay_rate=0.5, - staircase=True)) + learning_rate=fluid.layers.exponential_decay( + learning_rate=base_lr, + decay_steps=10000, + decay_rate=0.5, + staircase=True)) """ with default_main_program()._lr_schedule_guard(): if _non_static_mode(): - decay = imperate_lr.ExponentialDecay(learning_rate, decay_steps, - decay_rate, staircase) + decay = imperate_lr.ExponentialDecay( + learning_rate, decay_steps, decay_rate, staircase + ) return decay else: global_step = _decay_step_counter() @@ -173,52 +181,53 @@ def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False): def natural_exp_decay(learning_rate, decay_steps, decay_rate, staircase=False): """ -Applies natural exponential decay to the initial learning rate. + Applies natural exponential decay to the initial learning rate. - When training a model, it is often recommended to lower the learning rate as the - training progresses. By using this function, the learning rate will be decayed by - natural exponential power 'decay_rate' every 'decay_steps' steps. + When training a model, it is often recommended to lower the learning rate as the + training progresses. By using this function, the learning rate will be decayed by + natural exponential power 'decay_rate' every 'decay_steps' steps. - Decayed learning rate calculates as follows: + Decayed learning rate calculates as follows: - >>> if not staircase: - >>> decayed_learning_rate = learning_rate * exp(- decay_rate * (global_step / decay_steps)) - >>> else: - >>> decayed_learning_rate = learning_rate * exp(- decay_rate * floor(global_step / decay_steps)) + >>> if not staircase: + >>> decayed_learning_rate = learning_rate * exp(- decay_rate * (global_step / decay_steps)) + >>> else: + >>> decayed_learning_rate = learning_rate * exp(- decay_rate * floor(global_step / decay_steps)) - Args: - learning_rate(Variable|float): The initial learning rate. It should be a Variable - or a float - decay_steps(int): The learning rate decay steps. See the decay computation above. - decay_rate(float): The learning rate decay rate. See the decay computation above. - staircase(bool): If True, decay the learning rate at discrete intervals, which - means the learning rate will be decayed by natural exponential power - `decay_rate` every `decay_steps`. If False, learning rate will be - decayed continuously and following the formula above. Default: False + Args: + learning_rate(Variable|float): The initial learning rate. It should be a Variable + or a float + decay_steps(int): The learning rate decay steps. See the decay computation above. + decay_rate(float): The learning rate decay rate. See the decay computation above. + staircase(bool): If True, decay the learning rate at discrete intervals, which + means the learning rate will be decayed by natural exponential power + `decay_rate` every `decay_steps`. If False, learning rate will be + decayed continuously and following the formula above. Default: False - Returns: - The decayed learning rate. The data type is float32. + Returns: + The decayed learning rate. The data type is float32. - Examples: - .. code-block:: python + Examples: + .. code-block:: python - import paddle.fluid as fluid - import paddle + import paddle.fluid as fluid + import paddle - paddle.enable_static() - base_lr = 0.1 - sgd_optimizer = fluid.optimizer.SGD( - learning_rate=fluid.layers.natural_exp_decay( - learning_rate=base_lr, - decay_steps=10000, - decay_rate=0.5, - staircase=True)) + paddle.enable_static() + base_lr = 0.1 + sgd_optimizer = fluid.optimizer.SGD( + learning_rate=fluid.layers.natural_exp_decay( + learning_rate=base_lr, + decay_steps=10000, + decay_rate=0.5, + staircase=True)) """ with default_main_program()._lr_schedule_guard(): if _non_static_mode(): - decay = imperate_lr.NaturalExpDecay(learning_rate, decay_steps, - decay_rate, staircase) + decay = imperate_lr.NaturalExpDecay( + learning_rate, decay_steps, decay_rate, staircase + ) return decay else: global_step = _decay_step_counter() @@ -268,16 +277,17 @@ def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False): paddle.enable_static() base_lr = 0.1 sgd_optimizer = fluid.optimizer.SGD( - learning_rate=fluid.layers.inverse_time_decay( - learning_rate=base_lr, - decay_steps=10000, - decay_rate=0.5, - staircase=True)) + learning_rate=fluid.layers.inverse_time_decay( + learning_rate=base_lr, + decay_steps=10000, + decay_rate=0.5, + staircase=True)) """ with default_main_program()._lr_schedule_guard(): if _non_static_mode(): - decay = imperate_lr.InverseTimeDecay(learning_rate, decay_steps, - decay_rate, staircase) + decay = imperate_lr.InverseTimeDecay( + learning_rate, decay_steps, decay_rate, staircase + ) return decay else: global_step = _decay_step_counter() @@ -291,11 +301,9 @@ def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False): return decayed_lr -def polynomial_decay(learning_rate, - decay_steps, - end_learning_rate=0.0001, - power=1.0, - cycle=False): +def polynomial_decay( + learning_rate, decay_steps, end_learning_rate=0.0001, power=1.0, cycle=False +): """ Applies polynomial decay to the initial learning rate. @@ -332,74 +340,77 @@ def polynomial_decay(learning_rate, """ with default_main_program()._lr_schedule_guard(): if _non_static_mode(): - decay = imperate_lr.PolynomialDecay(learning_rate, decay_steps, - end_learning_rate, power, cycle) + decay = imperate_lr.PolynomialDecay( + learning_rate, decay_steps, end_learning_rate, power, cycle + ) return decay else: global_step = _decay_step_counter() if cycle: div_res = ops.ceil(global_step / decay_steps) - zero_var = tensor.fill_constant(shape=[1], - dtype='float32', - value=0.0) - one_var = tensor.fill_constant(shape=[1], - dtype='float32', - value=1.0) + zero_var = tensor.fill_constant( + shape=[1], dtype='float32', value=0.0 + ) + one_var = tensor.fill_constant( + shape=[1], dtype='float32', value=1.0 + ) with control_flow.Switch() as switch: with switch.case(global_step == zero_var): tensor.assign(input=one_var, output=div_res) decay_steps = decay_steps * div_res else: - decay_steps_var = tensor.fill_constant(shape=[1], - dtype='float32', - value=float(decay_steps)) - global_step = nn.elementwise_min(x=global_step, - y=decay_steps_var) - - decayed_lr = (learning_rate - end_learning_rate) * \ - ((1 - global_step / decay_steps) ** power) + end_learning_rate + decay_steps_var = tensor.fill_constant( + shape=[1], dtype='float32', value=float(decay_steps) + ) + global_step = nn.elementwise_min( + x=global_step, y=decay_steps_var + ) + + decayed_lr = (learning_rate - end_learning_rate) * ( + (1 - global_step / decay_steps) ** power + ) + end_learning_rate return decayed_lr def piecewise_decay(boundaries, values): """ -Applies piecewise decay to the initial learning rate. - - The algorithm can be described as the code below. + Applies piecewise decay to the initial learning rate. - .. code-block:: text + The algorithm can be described as the code below. - boundaries = [10000, 20000] - values = [1.0, 0.5, 0.1] - if step < 10000: - learning_rate = 1.0 - elif 10000 <= step < 20000: - learning_rate = 0.5 - else: - learning_rate = 0.1 - Args: - boundaries: A list of steps numbers. - values: A list of learning rate values that will be picked during - different step boundaries. + .. code-block:: text - Returns: - The decayed learning rate. - - Examples: - .. code-block:: python - - import paddle.fluid as fluid - import paddle - paddle.enable_static() boundaries = [10000, 20000] values = [1.0, 0.5, 0.1] - optimizer = fluid.optimizer.Momentum( - momentum=0.9, - learning_rate=fluid.layers.piecewise_decay(boundaries=boundaries, values=values), - regularization=fluid.regularizer.L2Decay(1e-4)) + if step < 10000: + learning_rate = 1.0 + elif 10000 <= step < 20000: + learning_rate = 0.5 + else: + learning_rate = 0.1 + Args: + boundaries: A list of steps numbers. + values: A list of learning rate values that will be picked during + different step boundaries. + + Returns: + The decayed learning rate. + + Examples: + .. code-block:: python + + import paddle.fluid as fluid + import paddle + paddle.enable_static() + boundaries = [10000, 20000] + values = [1.0, 0.5, 0.1] + optimizer = fluid.optimizer.Momentum( + momentum=0.9, + learning_rate=fluid.layers.piecewise_decay(boundaries=boundaries, values=values), + regularization=fluid.regularizer.L2Decay(1e-4)) """ @@ -413,29 +424,36 @@ Applies piecewise decay to the initial learning rate. else: global_step = _decay_step_counter() - lr = tensor.create_global_var(shape=[1], - value=0.0, - dtype='float32', - persistable=True, - name="learning_rate") + lr = tensor.create_global_var( + shape=[1], + value=0.0, + dtype='float32', + persistable=True, + name="learning_rate", + ) with control_flow.Switch() as switch: for i in range(len(boundaries)): - boundary_val = tensor.fill_constant(shape=[1], - dtype='float32', - value=float( - boundaries[i]), - force_cpu=True) + boundary_val = tensor.fill_constant( + shape=[1], + dtype='float32', + value=float(boundaries[i]), + force_cpu=True, + ) with switch.case(global_step < boundary_val): - tensor.fill_constant(shape=[1], - dtype="float32", - value=float(values[i]), - out=lr) + tensor.fill_constant( + shape=[1], + dtype="float32", + value=float(values[i]), + out=lr, + ) with switch.default(): - tensor.fill_constant(shape=[1], - dtype="float32", - value=float(values[len(values) - 1]), - out=lr) + tensor.fill_constant( + shape=[1], + dtype="float32", + value=float(values[len(values) - 1]), + out=lr, + ) return lr @@ -469,20 +487,25 @@ def cosine_decay(learning_rate, step_each_epoch, epochs): lr = fluid.layers.cosine_decay( learning_rate = base_lr, step_each_epoch=10000, epochs=120) """ - check_type(learning_rate, 'learning_rate', (float, tensor.Variable), - 'cosine_decay') + check_type( + learning_rate, 'learning_rate', (float, tensor.Variable), 'cosine_decay' + ) with default_main_program()._lr_schedule_guard(): if _non_static_mode(): - decay = imperate_lr.CosineDecay(learning_rate, step_each_epoch, - epochs) + decay = imperate_lr.CosineDecay( + learning_rate, step_each_epoch, epochs + ) return decay else: global_step = _decay_step_counter() cur_epoch = ops.floor(global_step / step_each_epoch) - decayed_lr = learning_rate * 0.5 * ( - ops.cos(cur_epoch * math.pi / epochs) + 1) + decayed_lr = ( + learning_rate + * 0.5 + * (ops.cos(cur_epoch * math.pi / epochs) + 1) + ) return decayed_lr @@ -551,26 +574,31 @@ def linear_lr_warmup(learning_rate, warmup_steps, start_lr, end_lr): with default_main_program()._lr_schedule_guard(): if _non_static_mode(): - lr = imperate_lr.LinearLrWarmup(learning_rate, warmup_steps, - start_lr, end_lr) + lr = imperate_lr.LinearLrWarmup( + learning_rate, warmup_steps, start_lr, end_lr + ) return lr else: - lr = tensor.create_global_var(shape=[1], - value=0.0, - dtype=dtype, - persistable=True, - name="learning_rate_warmup") + lr = tensor.create_global_var( + shape=[1], + value=0.0, + dtype=dtype, + persistable=True, + name="learning_rate_warmup", + ) global_step = _decay_step_counter() with control_flow.Switch() as switch: with switch.case(global_step < warmup_steps): - decayed_lr = start_lr + linear_step * (global_step / - float(warmup_steps)) + decayed_lr = start_lr + linear_step * ( + global_step / float(warmup_steps) + ) tensor.assign(decayed_lr, lr) with switch.default(): if not isinstance(learning_rate, Variable): learning_rate = tensor.fill_constant( - shape=[1], dtype=dtype, value=float(learning_rate)) + shape=[1], dtype=dtype, value=float(learning_rate) + ) tensor.assign(learning_rate, lr) return lr diff --git a/python/paddle/fluid/layers/loss.py b/python/paddle/fluid/layers/loss.py index 23835d7e7a01f2309c5096768d1529093667e42a..6dbbb7338a70155b54d33ced81d6309489b1d444 100644 --- a/python/paddle/fluid/layers/loss.py +++ b/python/paddle/fluid/layers/loss.py @@ -19,7 +19,13 @@ from paddle.utils import deprecated from . import nn from .layer_function_generator import templatedoc from ..layer_helper import LayerHelper -from ..framework import Variable, _non_static_mode, static_only, _in_legacy_dygraph, in_dygraph_mode +from ..framework import ( + Variable, + _non_static_mode, + static_only, + _in_legacy_dygraph, + in_dygraph_mode, +) from .. import core from ..data_feeder import check_variable_and_dtype, check_type from ..param_attr import ParamAttr @@ -52,12 +58,9 @@ __all__ = [ kIgnoreIndex = -100 -def center_loss(input, - label, - num_classes, - alpha, - param_attr, - update_center=True): +def center_loss( + input, label, num_classes, alpha, param_attr, update_center=True +): r""" :api_attr: Static Graph @@ -107,20 +110,22 @@ def center_loss(input, """ helper = LayerHelper('center_loss', **locals()) dtype = helper.input_dtype() - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'center_loss') + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'center_loss' + ) check_variable_and_dtype(label, 'label', ['int32', 'int64'], 'center_loss') centers_shape = [num_classes, input.shape[1]] - centers_param = helper.create_parameter(attr=param_attr, - shape=centers_shape, - dtype=dtype) + centers_param = helper.create_parameter( + attr=param_attr, shape=centers_shape, dtype=dtype + ) centers_param.stop_gradient = True if isinstance(alpha, Variable): alpha_param = alpha - check_variable_and_dtype(alpha, 'alpha', ['float32', 'float64'], - 'center_loss') + check_variable_and_dtype( + alpha, 'alpha', ['float32', 'float64'], 'center_loss' + ) else: assert isinstance(alpha, float) alpha_param = helper.create_variable( @@ -130,26 +135,26 @@ def center_loss(input, type=core.VarDesc.VarType.LOD_TENSOR, persistable=True, stop_gradient=True, - initializer=Constant(alpha)) + initializer=Constant(alpha), + ) centersdiff = helper.create_variable_for_type_inference(dtype=input.dtype) loss = helper.create_variable_for_type_inference(dtype=input.dtype) - helper.append_op(type='center_loss', - inputs={ - 'X': [input], - 'Label': [label], - 'Centers': [centers_param], - 'CenterUpdateRate': [alpha_param] - }, - outputs={ - 'SampleCenterDiff': [centersdiff], - 'Loss': [loss], - 'CentersOut': [centers_param] - }, - attrs={ - 'cluster_num': num_classes, - 'need_update': update_center - }) + helper.append_op( + type='center_loss', + inputs={ + 'X': [input], + 'Label': [label], + 'Centers': [centers_param], + 'CenterUpdateRate': [alpha_param], + }, + outputs={ + 'SampleCenterDiff': [centersdiff], + 'Loss': [loss], + 'CentersOut': [centers_param], + }, + attrs={'cluster_num': num_classes, 'need_update': update_center}, + ) return loss @@ -195,22 +200,22 @@ def bpr_loss(input, label, name=None): """ helper = LayerHelper('bpr_loss', **locals()) out = helper.create_variable_for_type_inference(dtype=input.dtype) - check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'], - 'bpr_loss') - helper.append_op(type='bpr_loss', - inputs={ - 'X': [input], - 'Label': [label] - }, - outputs={'Y': [out]}) + check_variable_and_dtype( + input, 'input', ['float16', 'float32', 'float64'], 'bpr_loss' + ) + helper.append_op( + type='bpr_loss', + inputs={'X': [input], 'Label': [label]}, + outputs={'Y': [out]}, + ) return out def cross_entropy(input, label, soft_label=False, ignore_index=kIgnoreIndex): r""" :alias_main: paddle.nn.functional.cross_entropy - :alias: paddle.nn.functional.cross_entropy,paddle.nn.functional.loss.cross_entropy - :old_api: paddle.fluid.layers.cross_entropy + :alias: paddle.nn.functional.cross_entropy,paddle.nn.functional.loss.cross_entropy + :old_api: paddle.fluid.layers.cross_entropy This operator computes the cross entropy between input and label. It supports both hard-label and and soft-label cross entropy computation. @@ -264,46 +269,46 @@ def cross_entropy(input, label, soft_label=False, ignore_index=kIgnoreIndex): return cross_entropy2(input, label, ignore_index) if _non_static_mode(): - return _legacy_C_ops.cross_entropy(input, label, "soft_label", - soft_label, "ignore_index", - ignore_index) + return _legacy_C_ops.cross_entropy( + input, label, "soft_label", soft_label, "ignore_index", ignore_index + ) inputs = {'X': [input], 'Label': [label]} attrs = {"soft_label": soft_label, "ignore_index": ignore_index} - check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'], - 'cross_entropy') + check_variable_and_dtype( + input, 'input', ['float16', 'float32', 'float64'], 'cross_entropy' + ) helper = LayerHelper('cross_entropy', **locals()) out = helper.create_variable_for_type_inference(dtype=input.dtype) - helper.append_op(type='cross_entropy', - inputs=inputs, - outputs={'Y': [out]}, - attrs=attrs) + helper.append_op( + type='cross_entropy', inputs=inputs, outputs={'Y': [out]}, attrs=attrs + ) return out def cross_entropy2(input, label, ignore_index=kIgnoreIndex): if _non_static_mode(): - loss, _, _ = _legacy_C_ops.cross_entropy2(input, label, 'ignore_index', - ignore_index) + loss, _, _ = _legacy_C_ops.cross_entropy2( + input, label, 'ignore_index', ignore_index + ) return loss inputs = {'X': [input], 'Label': [label]} attrs = {'ignore_index': ignore_index} - check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'], - 'cross_entropy2') + check_variable_and_dtype( + input, 'input', ['float16', 'float32', 'float64'], 'cross_entropy2' + ) helper = LayerHelper('cross_entropy2', **locals()) out = helper.create_variable_for_type_inference(dtype=input.dtype) xshape = helper.create_variable_for_type_inference(dtype=input.dtype) match_x = helper.create_variable_for_type_inference(dtype=input.dtype) - helper.append_op(type='cross_entropy2', - inputs=inputs, - outputs={ - 'Y': [out], - 'MatchX': [match_x], - 'XShape': [xshape] - }, - attrs=attrs) + helper.append_op( + type='cross_entropy2', + inputs=inputs, + outputs={'Y': [out], 'MatchX': [match_x], 'XShape': [xshape]}, + attrs=attrs, + ) return out @@ -342,12 +347,14 @@ def square_error_cost(input, label): return paddle.nn.functional.square_error_cost(input, label) -def edit_distance(input, - label, - normalized=True, - ignored_tokens=None, - input_length=None, - label_length=None): +def edit_distance( + input, + label, + normalized=True, + ignored_tokens=None, + input_length=None, + label_length=None, +): """ This op computes the edit distances, also called Levenshtein distance, between a batch of hypothesis strings and their references. It measures how dissimilar two strings are by counting @@ -382,7 +389,7 @@ def edit_distance(input, NOTE: This Api is different from fluid.metrics.EditDistance Returns: - Tuple: + Tuple: distance(Tensor): edit distance result, its data type is float32, and its shape is (batch_size, 1). sequence_num(Tensor): sequence number, its data type is float32, and its shape is (1,). @@ -415,17 +422,19 @@ def edit_distance(input, # [4] """ - return paddle.nn.functional.loss.edit_distance(input, label, normalized, - ignored_tokens, input_length, - label_length) + return paddle.nn.functional.loss.edit_distance( + input, label, normalized, ignored_tokens, input_length, label_length + ) -def warpctc(input, - label, - blank=0, - norm_by_times=False, - input_length=None, - label_length=None): +def warpctc( + input, + label, + blank=0, + norm_by_times=False, + input_length=None, + label_length=None, +): """ An operator integrating the open source Warp-CTC library (https://github.com/baidu-research/warp-ctc) @@ -548,8 +557,9 @@ def warpctc(input, raise ValueError( "input_length and label_length must not be None in dygraph mode!" ) - loss_out = _C_ops.warpctc(input, label, input_length, label_length, - blank, norm_by_times) + loss_out = _C_ops.warpctc( + input, label, input_length, label_length, blank, norm_by_times + ) return loss_out if _non_static_mode(): if input_length is None or label_length is None: @@ -572,26 +582,27 @@ def warpctc(input, check_variable_and_dtype(label, 'label', ['int32'], "warpctc") this_inputs = {'Logits': [input], 'Label': [label]} if input_length is not None and label_length is not None: - check_variable_and_dtype(input_length, 'LogitsLength', ['int64'], - "warpctc") - check_variable_and_dtype(label_length, 'LabelLength', ['int64'], - "warpctc") + check_variable_and_dtype( + input_length, 'LogitsLength', ['int64'], "warpctc" + ) + check_variable_and_dtype( + label_length, 'LabelLength', ['int64'], "warpctc" + ) this_inputs['LogitsLength'] = [input_length] this_inputs['LabelLength'] = [label_length] loss_out = helper.create_variable_for_type_inference(dtype=input.dtype) grad_out = helper.create_variable_for_type_inference(dtype=input.dtype) - helper.append_op(type='warpctc', - inputs=this_inputs, - outputs={ - 'WarpCTCGrad': [grad_out], - 'Loss': [loss_out] - }, - attrs={ - 'blank': blank, - 'norm_by_times': norm_by_times, - }) + helper.append_op( + type='warpctc', + inputs=this_inputs, + outputs={'WarpCTCGrad': [grad_out], 'Loss': [loss_out]}, + attrs={ + 'blank': blank, + 'norm_by_times': norm_by_times, + }, + ) return loss_out @@ -600,18 +611,20 @@ def warpctc(input, # the type is often "Variable", and arguments may vary. @static_only @templatedoc(op_type="nce") -def nce(input, - label, - num_total_classes, - sample_weight=None, - param_attr=None, - bias_attr=None, - num_neg_samples=None, - name=None, - sampler="uniform", - custom_dist=None, - seed=0, - is_sparse=False): +def nce( + input, + label, + num_total_classes, + sample_weight=None, + param_attr=None, + bias_attr=None, + num_neg_samples=None, + name=None, + sampler="uniform", + custom_dist=None, + seed=0, + is_sparse=False, +): """ :api_attr: Static Graph @@ -696,16 +709,20 @@ def nce(input, dim = input.shape[1] num_true_class = label.shape[1] - w = helper.create_parameter(attr=helper.param_attr, - shape=[num_total_classes, dim], - is_bias=False, - dtype=input.dtype) + w = helper.create_parameter( + attr=helper.param_attr, + shape=[num_total_classes, dim], + is_bias=False, + dtype=input.dtype, + ) inputs = {} if helper.bias_attr: - b = helper.create_parameter(attr=helper.bias_attr, - shape=[num_total_classes, 1], - is_bias=True, - dtype=input.dtype) + b = helper.create_parameter( + attr=helper.bias_attr, + shape=[num_total_classes, 1], + is_bias=True, + dtype=input.dtype, + ) inputs['Bias'] = b cost = helper.create_variable_for_type_inference(dtype=input.dtype) sample_logits = helper.create_variable_for_type_inference(dtype=input.dtype) @@ -770,16 +787,20 @@ def nce(input, attr=ParamAttr(), shape=numpy_array.shape, dtype=numpy_array.dtype, - default_initializer=NumpyArrayInitializer(numpy_array)) + default_initializer=NumpyArrayInitializer(numpy_array), + ) ret.stop_gradient = True return ret inputs['CustomDistProbs'] = _init_by_numpy_array( - np.array(custom_dist).astype('float32')) + np.array(custom_dist).astype('float32') + ) inputs['CustomDistAlias'] = _init_by_numpy_array( - np.array(alias_).astype('int32')) + np.array(alias_).astype('int32') + ) inputs['CustomDistAliasProbs'] = _init_by_numpy_array( - np.array(alias_probs_).astype('float32')) + np.array(alias_probs_).astype('float32') + ) sampler = 2 else: raise Exception("Unsupported sampler type.") @@ -800,30 +821,34 @@ def nce(input, 'seed': seed, 'sampler': sampler, 'is_sparse': is_sparse, - 'remote_prefetch': remote_prefetch + 'remote_prefetch': remote_prefetch, } - helper.append_op(type='nce', - inputs=inputs, - outputs={ - 'Cost': cost, - 'SampleLogits': sample_logits, - 'SampleLabels': sample_labels - }, - attrs=attrs) + helper.append_op( + type='nce', + inputs=inputs, + outputs={ + 'Cost': cost, + 'SampleLogits': sample_logits, + 'SampleLabels': sample_labels, + }, + attrs=attrs, + ) return cost / (num_neg_samples + 1) -def hsigmoid(input, - label, - num_classes, - param_attr=None, - bias_attr=None, - name=None, - path_table=None, - path_code=None, - is_custom=False, - is_sparse=False): +def hsigmoid( + input, + label, + num_classes, + param_attr=None, + bias_attr=None, + name=None, + path_table=None, + path_code=None, + is_custom=False, + is_sparse=False, +): """ :api_attr: Static Graph @@ -906,16 +931,19 @@ def hsigmoid(input, dim = input.shape[1] if ((num_classes is None) or (num_classes < 2)) and (not is_custom): raise ValueError( - "num_classes must not be less than 2 with default tree") + "num_classes must not be less than 2 with default tree" + ) if (not is_custom) and (is_sparse): print("Sparse mode should not be used without custom tree") is_sparse = False - if (not is_custom) and ((path_table is not None) or - (path_code is not None)): + if (not is_custom) and ( + (path_table is not None) or (path_code is not None) + ): raise ValueError( - "only num_classes should be passed without custom tree") + "only num_classes should be passed without custom tree" + ) if (is_custom) and (path_code is None): raise ValueError("path_code should not be None with custom tree") @@ -932,59 +960,67 @@ def hsigmoid(input, "With sparse mode, if your models has only small parameter prefetch may cause speed down" ) if not is_custom: - weights = helper.create_parameter(attr=helper.param_attr, - shape=[num_classes - 1, dim], - is_bias=False, - dtype=input.dtype) + weights = helper.create_parameter( + attr=helper.param_attr, + shape=[num_classes - 1, dim], + is_bias=False, + dtype=input.dtype, + ) else: - weights = helper.create_parameter(attr=helper.param_attr, - shape=[num_classes, dim], - is_bias=False, - dtype=input.dtype) + weights = helper.create_parameter( + attr=helper.param_attr, + shape=[num_classes, dim], + is_bias=False, + dtype=input.dtype, + ) inputs = { "X": input, "W": weights, "PathTable": path_table, "PathCode": path_code, - "Label": label + "Label": label, } if helper.bias_attr: if not is_custom: - bias = helper.create_parameter(attr=helper.bias_attr, - shape=[num_classes - 1, 1], - is_bias=True, - dtype=input.dtype) + bias = helper.create_parameter( + attr=helper.bias_attr, + shape=[num_classes - 1, 1], + is_bias=True, + dtype=input.dtype, + ) inputs['Bias'] = bias else: - bias = helper.create_parameter(attr=helper.bias_attr, - shape=[num_classes, 1], - is_bias=True, - dtype=input.dtype) + bias = helper.create_parameter( + attr=helper.bias_attr, + shape=[num_classes, 1], + is_bias=True, + dtype=input.dtype, + ) inputs['Bias'] = bias - helper.append_op(type="hierarchical_sigmoid", - inputs=inputs, - outputs={ - "Out": out, - "PreOut": pre_out, - "W_Out": weights - }, - attrs={ - "num_classes": num_classes, - "is_sparse": is_sparse, - "remote_prefetch": remote_prefetch - }) + helper.append_op( + type="hierarchical_sigmoid", + inputs=inputs, + outputs={"Out": out, "PreOut": pre_out, "W_Out": weights}, + attrs={ + "num_classes": num_classes, + "is_sparse": is_sparse, + "remote_prefetch": remote_prefetch, + }, + ) return out -def sampled_softmax_with_cross_entropy(logits, - label, - num_samples, - num_true=1, - remove_accidental_hits=True, - use_customized_samples=False, - customized_samples=None, - customized_probabilities=None, - seed=0): +def sampled_softmax_with_cross_entropy( + logits, + label, + num_samples, + num_true=1, + remove_accidental_hits=True, + use_customized_samples=False, + customized_samples=None, + customized_probabilities=None, + seed=0, +): """ **Sampled Softmax With Cross Entropy Operator.** @@ -1052,90 +1088,121 @@ def sampled_softmax_with_cross_entropy(logits, logits=fc, label=label, num_samples=25) """ if _non_static_mode(): - sample_logits_attrs = ('use_customized_samples', use_customized_samples, - 'uniq', True, 'remove_accidental_hits', - remove_accidental_hits, 'num_samples', - num_samples, 'seed', seed) - _, _, _, _, sampled_logits_out, sampled_label_out = _legacy_C_ops.sample_logits( - logits, label, *sample_logits_attrs) + sample_logits_attrs = ( + 'use_customized_samples', + use_customized_samples, + 'uniq', + True, + 'remove_accidental_hits', + remove_accidental_hits, + 'num_samples', + num_samples, + 'seed', + seed, + ) + ( + _, + _, + _, + _, + sampled_logits_out, + sampled_label_out, + ) = _legacy_C_ops.sample_logits(logits, label, *sample_logits_attrs) depth = num_samples + 1 - sampled_softlabel_out = _legacy_C_ops.one_hot(sampled_label_out, - 'depth', depth) + sampled_softlabel_out = _legacy_C_ops.one_hot( + sampled_label_out, 'depth', depth + ) - softmax_with_cross_entropy_attrs = ('soft_label', True, - 'numeric_stable_mode', False) + softmax_with_cross_entropy_attrs = ( + 'soft_label', + True, + 'numeric_stable_mode', + False, + ) _, loss = _legacy_C_ops.softmax_with_cross_entropy( - sampled_logits_out, sampled_softlabel_out, - *softmax_with_cross_entropy_attrs) + sampled_logits_out, + sampled_softlabel_out, + *softmax_with_cross_entropy_attrs + ) return loss / num_true helper = LayerHelper('sample_logits', **locals()) - samples = customized_samples if use_customized_samples else helper.create_variable_for_type_inference( - dtype='int64') - probabilities = customized_probabilities if use_customized_samples else helper.create_variable_for_type_inference( - dtype=logits.dtype) - sampled_logits \ - = helper.create_variable_for_type_inference(dtype=logits.dtype) + samples = ( + customized_samples + if use_customized_samples + else helper.create_variable_for_type_inference(dtype='int64') + ) + probabilities = ( + customized_probabilities + if use_customized_samples + else helper.create_variable_for_type_inference(dtype=logits.dtype) + ) + sampled_logits = helper.create_variable_for_type_inference( + dtype=logits.dtype + ) sampled_label = helper.create_variable_for_type_inference(dtype='int64') sampled_softlabel = helper.create_variable_for_type_inference( - dtype=logits.dtype) + dtype=logits.dtype + ) logits_dim = helper.create_variable_for_type_inference(dtype=logits.dtype) labels_dim = helper.create_variable_for_type_inference(dtype=label.type) - helper.append_op(type='sample_logits', - inputs={ - 'Logits': logits, - 'Labels': label, - 'CustomizedSamples': customized_samples, - 'CustomizedProbabilities': customized_probabilities - }, - outputs={ - 'Samples': samples, - 'Probabilities': probabilities, - 'SampledLabels': sampled_label, - 'SampledLogits': sampled_logits, - 'LogitsDim': logits_dim, - 'LabelsDim': labels_dim - }, - attrs={ - 'use_customized_samples': use_customized_samples, - 'uniq': True, - 'remove_accidental_hits': remove_accidental_hits, - 'num_samples': num_samples, - 'seed': seed - }) + helper.append_op( + type='sample_logits', + inputs={ + 'Logits': logits, + 'Labels': label, + 'CustomizedSamples': customized_samples, + 'CustomizedProbabilities': customized_probabilities, + }, + outputs={ + 'Samples': samples, + 'Probabilities': probabilities, + 'SampledLabels': sampled_label, + 'SampledLogits': sampled_logits, + 'LogitsDim': logits_dim, + 'LabelsDim': labels_dim, + }, + attrs={ + 'use_customized_samples': use_customized_samples, + 'uniq': True, + 'remove_accidental_hits': remove_accidental_hits, + 'num_samples': num_samples, + 'seed': seed, + }, + ) loss = helper.create_variable_for_type_inference(dtype=logits.dtype) softmax = helper.create_variable_for_type_inference(dtype=logits.dtype) - helper.append_op(type='one_hot', - inputs={'X': sampled_label}, - attrs={'depth': num_samples + 1}, - outputs={'Out': sampled_softlabel}) - - helper.append_op(type='softmax_with_cross_entropy', - inputs={ - 'Logits': sampled_logits, - 'Label': sampled_softlabel - }, - outputs={ - 'Softmax': softmax, - 'Loss': loss - }, - attrs={ - 'soft_label': True, - 'ignore_index': False, - 'numeric_stable_mode': False - }) + helper.append_op( + type='one_hot', + inputs={'X': sampled_label}, + attrs={'depth': num_samples + 1}, + outputs={'Out': sampled_softlabel}, + ) + + helper.append_op( + type='softmax_with_cross_entropy', + inputs={'Logits': sampled_logits, 'Label': sampled_softlabel}, + outputs={'Softmax': softmax, 'Loss': loss}, + attrs={ + 'soft_label': True, + 'ignore_index': False, + 'numeric_stable_mode': False, + }, + ) return loss / num_true -def softmax_with_cross_entropy(logits, - label, - soft_label=False, - ignore_index=kIgnoreIndex, - numeric_stable_mode=True, - return_softmax=False, - axis=-1): +def softmax_with_cross_entropy( + logits, + label, + soft_label=False, + ignore_index=kIgnoreIndex, + numeric_stable_mode=True, + return_softmax=False, + axis=-1, +): r""" This operator implements the cross entropy loss function with softmax. This function @@ -1231,8 +1298,14 @@ def softmax_with_cross_entropy(logits, print(out) """ return paddle.nn.functional.loss.fluid_softmax_with_cross_entropy( - logits, label, soft_label, ignore_index, numeric_stable_mode, - return_softmax, axis) + logits, + label, + soft_label, + ignore_index, + numeric_stable_mode, + return_softmax, + axis, + ) def identity_loss(x, reduction="none"): @@ -1285,10 +1358,9 @@ def identity_loss(x, reduction="none"): helper = LayerHelper('identity_loss', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type="identity_loss", - inputs={"X": x}, - outputs={"Out": out}, - attrs=attrs) + helper.append_op( + type="identity_loss", inputs={"X": x}, outputs={"Out": out}, attrs=attrs + ) return out @@ -1346,13 +1418,11 @@ def rank_loss(label, left, right, name=None): out = helper.create_variable_for_type_inference("float32") - helper.append_op(type='rank_loss', - inputs={ - "Label": label, - "Left": left, - "Right": right - }, - outputs={'Out': out}) + helper.append_op( + type='rank_loss', + inputs={"Label": label, "Left": left, "Right": right}, + outputs={'Out': out}, + ) return out @@ -1397,26 +1467,19 @@ def margin_rank_loss(label, left, right, margin=0.1, name=None): check_variable_and_dtype(label, 'right', ['float32'], 'margin_rank_loss') out = helper.create_variable_for_type_inference(left.dtype) act = helper.create_variable_for_type_inference(left.dtype) - helper.append_op(type='margin_rank_loss', - inputs={ - "Label": label, - "X1": left, - "X2": right - }, - outputs={ - 'Out': out, - 'Activated': act - }, - attrs={'margin': margin}) + helper.append_op( + type='margin_rank_loss', + inputs={"Label": label, "X1": left, "X2": right}, + outputs={'Out': out, 'Activated': act}, + attrs={'margin': margin}, + ) return out @templatedoc() -def sigmoid_cross_entropy_with_logits(x, - label, - ignore_index=kIgnoreIndex, - name=None, - normalize=False): +def sigmoid_cross_entropy_with_logits( + x, label, ignore_index=kIgnoreIndex, name=None, normalize=False +): """ ${comment} @@ -1453,32 +1516,32 @@ def sigmoid_cross_entropy_with_logits(x, """ if in_dygraph_mode(): - return _C_ops.sigmoid_cross_entropy_with_logits(x, label, normalize, - int(ignore_index)) - check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'], - 'sigmoid_cross_entropy_with_logits') + return _C_ops.sigmoid_cross_entropy_with_logits( + x, label, normalize, int(ignore_index) + ) + check_variable_and_dtype( + x, + 'input', + ['float16', 'float32', 'float64'], + 'sigmoid_cross_entropy_with_logits', + ) helper = LayerHelper("sigmoid_cross_entropy_with_logits", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type="sigmoid_cross_entropy_with_logits", - inputs={ - "X": x, - "Label": label - }, - attrs={ - "ignore_index": ignore_index, - 'normalize': normalize - }, - outputs={"Out": out}) + helper.append_op( + type="sigmoid_cross_entropy_with_logits", + inputs={"X": x, "Label": label}, + attrs={"ignore_index": ignore_index, 'normalize': normalize}, + outputs={"Out": out}, + ) return out -def teacher_student_sigmoid_loss(input, - label, - soft_max_up_bound=15.0, - soft_max_lower_bound=-15.0): +def teacher_student_sigmoid_loss( + input, label, soft_max_up_bound=15.0, soft_max_lower_bound=-15.0 +): """ **Teacher Student Log Loss Layer** @@ -1517,22 +1580,30 @@ def teacher_student_sigmoid_loss(input, cost = fluid.layers.teacher_student_sigmoid_loss(input=similarity, label=label) """ - check_variable_and_dtype(input, "input", - ['float32', 'float64', 'int32', 'int64'], - 'teacher_student_sigmoid_loss') - check_variable_and_dtype(label, "label", - ['float32', 'float64', 'int32', 'int64'], - 'teacher_student_sigmoid_loss') + check_variable_and_dtype( + input, + "input", + ['float32', 'float64', 'int32', 'int64'], + 'teacher_student_sigmoid_loss', + ) + check_variable_and_dtype( + label, + "label", + ['float32', 'float64', 'int32', 'int64'], + 'teacher_student_sigmoid_loss', + ) helper = LayerHelper('teacher_student_sigmoid_loss', **locals()) out = helper.create_variable(dtype=input.dtype) helper.append_op( type='teacher_student_sigmoid_loss', - inputs={'X': [input], - 'Label': [label]}, + inputs={'X': [input], 'Label': [label]}, outputs={'Y': [out]}, - attrs={"soft_max_lower_bound": float(soft_max_lower_bound), \ - "soft_max_up_bound": float(soft_max_up_bound)}) + attrs={ + "soft_max_lower_bound": float(soft_max_lower_bound), + "soft_max_up_bound": float(soft_max_up_bound), + }, + ) return out @@ -1587,23 +1658,22 @@ def huber_loss(input, label, delta): return out helper = LayerHelper('huber_loss', **locals()) - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'huber_loss') - check_variable_and_dtype(label, 'label', ['float32', 'float64'], - 'huber_loss') + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'huber_loss' + ) + check_variable_and_dtype( + label, 'label', ['float32', 'float64'], 'huber_loss' + ) residual = helper.create_variable_for_type_inference( - dtype=helper.input_dtype()) + dtype=helper.input_dtype() + ) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) - helper.append_op(type='huber_loss', - inputs={ - 'X': input, - 'Y': label - }, - outputs={ - 'Out': out, - 'Residual': residual - }, - attrs={'delta': delta}) + helper.append_op( + type='huber_loss', + inputs={'X': input, 'Y': label}, + outputs={'Out': out, 'Residual': residual}, + attrs={'delta': delta}, + ) return out @@ -1653,17 +1723,17 @@ def kldiv_loss(x, target, reduction='mean', name=None): """ helper = LayerHelper('kldiv_loss', **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'kldiv_loss') - check_variable_and_dtype(target, 'target', ['float32', 'float64'], - 'kldiv_loss') + check_variable_and_dtype( + target, 'target', ['float32', 'float64'], 'kldiv_loss' + ) check_type(reduction, 'reduction', str, 'kldiv_loss') loss = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='kldiv_loss', - inputs={ - 'X': x, - 'Target': target - }, - outputs={'Loss': loss}, - attrs={'reduction': reduction}) + helper.append_op( + type='kldiv_loss', + inputs={'X': x, 'Target': target}, + outputs={'Loss': loss}, + attrs={'reduction': reduction}, + ) return loss diff --git a/python/paddle/fluid/layers/math_op_patch.py b/python/paddle/fluid/layers/math_op_patch.py index a0fd6782db6331c5f8811b2ce86f1934867b2b84..fc71321f1473212b829977342440a19c5c71bf6d 100644 --- a/python/paddle/fluid/layers/math_op_patch.py +++ b/python/paddle/fluid/layers/math_op_patch.py @@ -53,14 +53,13 @@ EXPRESSION_MAP = { "__lt__": "A < B", "__le__": "A <= B", "__gt__": "A > B", - "__ge__": "A >= B" + "__ge__": "A >= B", } _already_patch_variable = False def monkey_patch_variable(): - def unique_tmp_name(): return unique_name.generate("tmp") @@ -85,15 +84,17 @@ def monkey_patch_variable(): def create_tensor(block, value, dtype, shape): value = float(value) var = create_new_tmp_var(block, dtype) - block.append_op(type="fill_constant", - outputs={'Out': [var]}, - attrs={ - 'dtype': var.dtype, - 'shape': shape, - 'value': value, - 'force_cpu': False - }, - stop_gradient=True) + block.append_op( + type="fill_constant", + outputs={'Out': [var]}, + attrs={ + 'dtype': var.dtype, + 'shape': shape, + 'value': value, + 'force_cpu': False, + }, + stop_gradient=True, + ) var.stop_gradient = True return var @@ -117,16 +118,18 @@ def monkey_patch_variable(): else: out_shape.append(d) assert batch_dim != -1 - block.append_op(type='fill_constant_batch_size_like', - outputs={'Out': [var]}, - inputs={'Input': [ref_var]}, - attrs={ - 'shape': out_shape, - 'value': value, - 'input_dim_idx': batch_dim, - 'output_dim_idx': batch_dim - }, - stop_gradient=True) + block.append_op( + type='fill_constant_batch_size_like', + outputs={'Out': [var]}, + inputs={'Input': [ref_var]}, + attrs={ + 'shape': out_shape, + 'value': value, + 'input_dim_idx': batch_dim, + 'output_dim_idx': batch_dim, + }, + stop_gradient=True, + ) var.stop_gradient = True return var @@ -208,37 +211,40 @@ def monkey_patch_variable(): """ block = current_block(self) out = create_new_tmp_var(block, dtype) - block.append_op(type="cast", - inputs={"X": [self]}, - outputs={"Out": [out]}, - attrs={ - "in_dtype": self.dtype, - "out_dtype": out.dtype - }) + block.append_op( + type="cast", + inputs={"X": [self]}, + outputs={"Out": [out]}, + attrs={"in_dtype": self.dtype, "out_dtype": out.dtype}, + ) out.stop_gradient = self.stop_gradient return out @static_only def append(self, var): """ - **Notes**: - **The type variable must be LoD Tensor Array. + **Notes**: + **The type variable must be LoD Tensor Array. """ if not isinstance(var, Variable): if in_declarative_mode(): - """ in dy2static mode, x may be tensorable values such as int, float, np.array - """ + """in dy2static mode, x may be tensorable values such as int, float, np.array""" from paddle.tensor.creation import to_tensor + var = to_tensor(var) else: raise TypeError( - "Required input var should be Variable, but received {}". - format(type(var))) + "Required input var should be Variable, but received {}".format( + type(var) + ) + ) if self.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY: raise TypeError( - "Only Variable with VarType.LOD_TENSOR_ARRAY support `append` method, but received type: {}" - .format(self.type)) + "Only Variable with VarType.LOD_TENSOR_ARRAY support `append` method, but received type: {}".format( + self.type + ) + ) array_write(x=var, i=array_length(self), array=self) @static_only @@ -249,8 +255,10 @@ def monkey_patch_variable(): """ if len(self.shape) > 1: raise TypeError( - "Required input var should be 1-D Variable, but received {}". - format(self.shape)) + "Required input var should be 1-D Variable, but received {}".format( + self.shape + ) + ) return self @static_only @@ -266,23 +274,27 @@ def monkey_patch_variable(): Returns: Variable: self[index] """ - from paddle.fluid.dygraph.dygraph_to_static.convert_operators import _run_paddle_pop + from paddle.fluid.dygraph.dygraph_to_static.convert_operators import ( + _run_paddle_pop, + ) + if self.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY: raise TypeError( - "Only Variable with VarType.LOD_TENSOR_ARRAY support `append` method, but received type: {}" - .format(self.type)) + "Only Variable with VarType.LOD_TENSOR_ARRAY support `append` method, but received type: {}".format( + self.type + ) + ) return _run_paddle_pop(self, *args) def _scalar_op_(var, scale, bias): block = current_block(var) out = create_new_tmp_var(block, var.dtype) - block.append_op(type="scale", - inputs={"X": [var]}, - outputs={"Out": [out]}, - attrs={ - "scale": scale, - "bias": bias - }) + block.append_op( + type="scale", + inputs={"X": [var]}, + outputs={"Out": [out]}, + attrs={"scale": scale, "bias": bias}, + ) return out def _neg_(var): @@ -325,11 +337,9 @@ def monkey_patch_variable(): def _scalar_div_(var, value): return _scalar_op_(var, 1.0 / value, 0.0) - def _binary_creator_(method_name, - op_type, - reverse=False, - scalar_method=None): - + def _binary_creator_( + method_name, op_type, reverse=False, scalar_method=None + ): def __impl__(self, other_var): # 1. scalar exists cases # we need combine the tensor.dtype and scalar.dtype, cast correct object @@ -352,7 +362,10 @@ def monkey_patch_variable(): # so the calculation result here and the calculation result of numpy are # different after 6 decimal point. If necessary, we can also use float64 here. # torch's behavior here is consistent with ours - if op_type == 'elementwise_div' and self.dtype in _supported_int_dtype_: + if ( + op_type == 'elementwise_div' + and self.dtype in _supported_int_dtype_ + ): self = astype(self, 'float32') # here use `scale` replace `elementwise` to get better performance # but only +, -, *, / can use this method @@ -372,18 +385,21 @@ def monkey_patch_variable(): has_batch_size = True break if not has_batch_size: - other_var = create_tensor(current_block(self), - other_var, - dtype=lhs_dtype, - shape=self.shape) + other_var = create_tensor( + current_block(self), + other_var, + dtype=lhs_dtype, + shape=self.shape, + ) else: other_var = create_tensor_with_batchsize( - self, other_var, lhs_dtype) + self, other_var, lhs_dtype + ) else: # add fill_op to current_block - other_var = create_scalar(current_block(self), - value=other_var, - dtype=lhs_dtype) + other_var = create_scalar( + current_block(self), value=other_var, dtype=lhs_dtype + ) # 3. unify right var type to left var rhs_dtype = safe_get_dtype(other_var) @@ -409,16 +425,22 @@ def monkey_patch_variable(): "%s:%s\nThe behavior of expression %s has been unified with %s(X, Y, axis=-1) from Paddle 2.0. " "If your code works well in the older versions but crashes in this version, try to use " "%s(X, Y, axis=0) instead of %s. This transitional warning will be dropped in the future." - % (file_name, line_num, EXPRESSION_MAP[method_name], - op_type, op_type, EXPRESSION_MAP[method_name]), - category=DeprecationWarning) - current_block(self).append_op(type=op_type, - inputs={ - 'X': [self], - 'Y': [other_var] - }, - outputs={'Out': out}, - attrs={'axis': axis}) + % ( + file_name, + line_num, + EXPRESSION_MAP[method_name], + op_type, + op_type, + EXPRESSION_MAP[method_name], + ), + category=DeprecationWarning, + ) + current_block(self).append_op( + type=op_type, + inputs={'X': [self], 'Y': [other_var]}, + outputs={'Out': out}, + attrs={'axis': axis}, + ) return out comment = OpProtoHolder.instance().get_op_proto(op_type).comment @@ -431,35 +453,43 @@ def monkey_patch_variable(): Returns: Variable - """.format(comment) + """.format( + comment + ) __impl__.__name__ = method_name return __impl__ def values(var): block = current_block(var) out = create_new_tmp_var(block, var.dtype) - block.append_op(type="sparse_values", - inputs={"x": [var]}, - outputs={"out": [out]}, - attrs={}) + block.append_op( + type="sparse_values", + inputs={"x": [var]}, + outputs={"out": [out]}, + attrs={}, + ) return out def indices(var): block = current_block(var) out = create_new_tmp_var(block, var.dtype) - block.append_op(type="sparse_indices", - inputs={"x": [var]}, - outputs={"out": [out]}, - attrs={}) + block.append_op( + type="sparse_indices", + inputs={"x": [var]}, + outputs={"out": [out]}, + attrs={}, + ) return out def to_dense(var): block = current_block(var) out = create_new_tmp_var(block, var.dtype) - block.append_op(type="sparse_to_dense", - inputs={"x": [var]}, - outputs={"out": [out]}, - attrs={}) + block.append_op( + type="sparse_to_dense", + inputs={"x": [var]}, + outputs={"out": [out]}, + attrs={}, + ) return out variable_methods = [ @@ -475,39 +505,78 @@ def monkey_patch_variable(): ('dim', lambda x: len(x.shape)), ('ndimension', lambda x: len(x.shape)), ('ndim', _ndim_), - ('__add__', - _binary_creator_('__add__', 'elementwise_add', False, _scalar_add_)), + ( + '__add__', + _binary_creator_('__add__', 'elementwise_add', False, _scalar_add_), + ), # a+b == b+a. Do not need to reverse explicitly - ('__radd__', - _binary_creator_('__radd__', 'elementwise_add', False, _scalar_add_)), - ('__sub__', - _binary_creator_('__sub__', 'elementwise_sub', False, _scalar_sub_)), - ('__rsub__', - _binary_creator_('__rsub__', 'elementwise_sub', True, _scalar_rsub_)), - ('__mul__', - _binary_creator_('__mul__', 'elementwise_mul', False, _scalar_mul_)), + ( + '__radd__', + _binary_creator_( + '__radd__', 'elementwise_add', False, _scalar_add_ + ), + ), + ( + '__sub__', + _binary_creator_('__sub__', 'elementwise_sub', False, _scalar_sub_), + ), + ( + '__rsub__', + _binary_creator_( + '__rsub__', 'elementwise_sub', True, _scalar_rsub_ + ), + ), + ( + '__mul__', + _binary_creator_('__mul__', 'elementwise_mul', False, _scalar_mul_), + ), # a*b == b*a. Do not need to reverse explicitly - ('__rmul__', - _binary_creator_('__rmul__', 'elementwise_mul', False, _scalar_mul_)), - ('__div__', - _binary_creator_('__div__', 'elementwise_div', False, _scalar_div_)), - ('__truediv__', - _binary_creator_('__truediv__', 'elementwise_div', False, - _scalar_div_)), - ('__rdiv__', _binary_creator_('__rdiv__', 'elementwise_div', True, - None)), - ('__rtruediv__', - _binary_creator_('__rtruediv__', 'elementwise_div', True, None)), - ('__pow__', _binary_creator_('__pow__', 'elementwise_pow', False, - None)), - ('__rpow__', _binary_creator_('__rpow__', 'elementwise_pow', True, - None)), - ('__floordiv__', - _binary_creator_('__floordiv__', 'elementwise_floordiv', False, None)), - ('__mod__', _binary_creator_('__mod__', 'elementwise_mod', False, - None)), - ('__matmul__', _binary_creator_('__matmul__', "matmul_v2", False, - None)), + ( + '__rmul__', + _binary_creator_( + '__rmul__', 'elementwise_mul', False, _scalar_mul_ + ), + ), + ( + '__div__', + _binary_creator_('__div__', 'elementwise_div', False, _scalar_div_), + ), + ( + '__truediv__', + _binary_creator_( + '__truediv__', 'elementwise_div', False, _scalar_div_ + ), + ), + ( + '__rdiv__', + _binary_creator_('__rdiv__', 'elementwise_div', True, None), + ), + ( + '__rtruediv__', + _binary_creator_('__rtruediv__', 'elementwise_div', True, None), + ), + ( + '__pow__', + _binary_creator_('__pow__', 'elementwise_pow', False, None), + ), + ( + '__rpow__', + _binary_creator_('__rpow__', 'elementwise_pow', True, None), + ), + ( + '__floordiv__', + _binary_creator_( + '__floordiv__', 'elementwise_floordiv', False, None + ), + ), + ( + '__mod__', + _binary_creator_('__mod__', 'elementwise_mod', False, None), + ), + ( + '__matmul__', + _binary_creator_('__matmul__', "matmul_v2", False, None), + ), # for logical compare ('__eq__', _binary_creator_('__eq__', 'equal', False, None)), ('__ne__', _binary_creator_('__ne__', 'not_equal', False, None)), @@ -528,13 +597,17 @@ def monkey_patch_variable(): setattr(Variable, method_name, method_impl) else: import paddle.tensor + for method_name in paddle.tensor.tensor_method_func: - if hasattr(Variable, method_name): continue + if hasattr(Variable, method_name): + continue method_impl = getattr(paddle.tensor, method_name, None) - if method_impl: setattr(Variable, method_name, method_impl) + if method_impl: + setattr(Variable, method_name, method_impl) for magic_method, origin_method in paddle.tensor.magic_method_func: impl = getattr(paddle.tensor, origin_method, None) - if impl: setattr(Variable, magic_method, impl) + if impl: + setattr(Variable, magic_method, impl) _already_patch_variable = True diff --git a/python/paddle/fluid/layers/metric_op.py b/python/paddle/fluid/layers/metric_op.py index 36679f13b5a433f348f992062df1447d3efb30b0..8a63b55089e8b9db040c9a02694c01ec1630856c 100755 --- a/python/paddle/fluid/layers/metric_op.py +++ b/python/paddle/fluid/layers/metric_op.py @@ -18,7 +18,13 @@ All layers just related to metric. import warnings from ..layer_helper import LayerHelper from ..initializer import Normal, Constant -from ..framework import Variable, _non_static_mode, _varbase_creator, _in_legacy_dygraph, in_dygraph_mode +from ..framework import ( + Variable, + _non_static_mode, + _varbase_creator, + _in_legacy_dygraph, + in_dygraph_mode, +) from .. import core from ..param_attr import ParamAttr from . import nn @@ -74,15 +80,18 @@ def accuracy(input, label, k=1, correct=None, total=None): total = _varbase_creator(dtype="int32") _k = k.numpy().item(0) if isinstance(k, Variable) else k - topk_out, topk_indices = _legacy_C_ops.top_k_v2(input, 'k', _k, - 'sorted', False) - _acc, _, _ = _legacy_C_ops.accuracy(topk_out, topk_indices, label, - correct, total) + topk_out, topk_indices = _legacy_C_ops.top_k_v2( + input, 'k', _k, 'sorted', False + ) + _acc, _, _ = _legacy_C_ops.accuracy( + topk_out, topk_indices, label, correct, total + ) return _acc helper = LayerHelper("accuracy", **locals()) - check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'], - 'accuracy') + check_variable_and_dtype( + input, 'input', ['float16', 'float32', 'float64'], 'accuracy' + ) topk_out = helper.create_variable_for_type_inference(dtype=input.dtype) topk_indices = helper.create_variable_for_type_inference(dtype="int64") inputs = {"X": [input]} @@ -91,39 +100,38 @@ def accuracy(input, label, k=1, correct=None, total=None): else: attrs = {'k': k} attrs['sorted'] = False - helper.append_op(type="top_k_v2", - inputs=inputs, - attrs=attrs, - outputs={ - "Out": [topk_out], - "Indices": [topk_indices] - }) + helper.append_op( + type="top_k_v2", + inputs=inputs, + attrs=attrs, + outputs={"Out": [topk_out], "Indices": [topk_indices]}, + ) acc_out = helper.create_variable_for_type_inference(dtype="float32") if correct is None: correct = helper.create_variable_for_type_inference(dtype="int32") if total is None: total = helper.create_variable_for_type_inference(dtype="int32") - helper.append_op(type="accuracy", - inputs={ - "Out": [topk_out], - "Indices": [topk_indices], - "Label": [label] - }, - outputs={ - "Accuracy": [acc_out], - "Correct": [correct], - "Total": [total], - }) + helper.append_op( + type="accuracy", + inputs={"Out": [topk_out], "Indices": [topk_indices], "Label": [label]}, + outputs={ + "Accuracy": [acc_out], + "Correct": [correct], + "Total": [total], + }, + ) return acc_out -def auc(input, - label, - curve='ROC', - num_thresholds=2**12 - 1, - topk=1, - slide_steps=1, - ins_tag_weight=None): +def auc( + input, + label, + curve='ROC', + num_thresholds=2**12 - 1, + topk=1, + slide_steps=1, + ins_tag_weight=None, +): """ **Area Under the Curve (AUC) Layer** @@ -214,13 +222,14 @@ def auc(input, helper = LayerHelper("auc", **locals()) if ins_tag_weight is None: - ins_tag_weight = tensor.fill_constant(shape=[1, 1], - dtype="float32", - value=1.0) + ins_tag_weight = tensor.fill_constant( + shape=[1, 1], dtype="float32", value=1.0 + ) check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'auc') check_variable_and_dtype(label, 'label', ['int32', 'int64'], 'auc') - check_variable_and_dtype(ins_tag_weight, 'ins_tag_weight', - ['float32', 'float64'], 'auc') + check_variable_and_dtype( + ins_tag_weight, 'ins_tag_weight', ['float32', 'float64'], 'auc' + ) auc_out = helper.create_variable_for_type_inference(dtype="float64") batch_auc_out = helper.create_variable_for_type_inference(dtype="float64") # make tp, tn, fp, fn persistable, so that can accumulate all batches. @@ -234,62 +243,71 @@ def auc(input, batch_stat_pos = helper.create_global_variable( persistable=True, dtype='int64', - shape=[(1 + slide_steps) * (num_thresholds + 1) + 1]) + shape=[(1 + slide_steps) * (num_thresholds + 1) + 1], + ) batch_stat_neg = helper.create_global_variable( persistable=True, dtype='int64', - shape=[(1 + slide_steps) * (num_thresholds + 1) + 1]) + shape=[(1 + slide_steps) * (num_thresholds + 1) + 1], + ) # for global auc # Needn't maintain the batch id - stat_pos = helper.create_global_variable(persistable=True, - dtype='int64', - shape=[1, num_thresholds + 1]) - stat_neg = helper.create_global_variable(persistable=True, - dtype='int64', - shape=[1, num_thresholds + 1]) + stat_pos = helper.create_global_variable( + persistable=True, dtype='int64', shape=[1, num_thresholds + 1] + ) + stat_neg = helper.create_global_variable( + persistable=True, dtype='int64', shape=[1, num_thresholds + 1] + ) for var in [batch_stat_pos, batch_stat_neg, stat_pos, stat_neg]: - helper.set_variable_initializer(var, Constant(value=0.0, - force_cpu=False)) + helper.set_variable_initializer( + var, Constant(value=0.0, force_cpu=False) + ) - #"InsTagWeight": [ins_tag_weight] + # "InsTagWeight": [ins_tag_weight] # Batch AUC - helper.append_op(type="auc", - inputs={ - "Predict": [input], - "Label": [label], - "StatPos": [batch_stat_pos], - "StatNeg": [batch_stat_neg] - }, - attrs={ - "curve": curve, - "num_thresholds": num_thresholds, - "slide_steps": slide_steps - }, - outputs={ - "AUC": [batch_auc_out], - "StatPosOut": [batch_stat_pos], - "StatNegOut": [batch_stat_neg] - }) + helper.append_op( + type="auc", + inputs={ + "Predict": [input], + "Label": [label], + "StatPos": [batch_stat_pos], + "StatNeg": [batch_stat_neg], + }, + attrs={ + "curve": curve, + "num_thresholds": num_thresholds, + "slide_steps": slide_steps, + }, + outputs={ + "AUC": [batch_auc_out], + "StatPosOut": [batch_stat_pos], + "StatNegOut": [batch_stat_neg], + }, + ) # Global AUC - helper.append_op(type="auc", - inputs={ - "Predict": [input], - "Label": [label], - "StatPos": [stat_pos], - "StatNeg": [stat_neg] - }, - attrs={ - "curve": curve, - "num_thresholds": num_thresholds, - "slide_steps": 0 - }, - outputs={ - "AUC": [auc_out], - "StatPosOut": [stat_pos], - "StatNegOut": [stat_neg] - }) - return auc_out, batch_auc_out, [ - batch_stat_pos, batch_stat_neg, stat_pos, stat_neg - ] + helper.append_op( + type="auc", + inputs={ + "Predict": [input], + "Label": [label], + "StatPos": [stat_pos], + "StatNeg": [stat_neg], + }, + attrs={ + "curve": curve, + "num_thresholds": num_thresholds, + "slide_steps": 0, + }, + outputs={ + "AUC": [auc_out], + "StatPosOut": [stat_pos], + "StatNegOut": [stat_neg], + }, + ) + return ( + auc_out, + batch_auc_out, + [batch_stat_pos, batch_stat_neg, stat_pos, stat_neg], + ) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 38d1b30f0a471bc7028146877a4db348ed4ace9a..9f7cbb1141193da67298cc5657efdc3878736b0a 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -25,18 +25,39 @@ import paddle from ..layer_helper import LayerHelper from paddle.fluid.framework import _in_legacy_dygraph from ..initializer import Normal, Constant, NumpyArrayInitializer -from ..framework import Variable, OpProtoHolder, _non_static_mode, dygraph_only, _dygraph_tracer, default_main_program, _varbase_creator, static_only, _global_flags, _in_legacy_dygraph, in_dygraph_mode +from ..framework import ( + Variable, + OpProtoHolder, + _non_static_mode, + dygraph_only, + _dygraph_tracer, + default_main_program, + _varbase_creator, + static_only, + _global_flags, + _in_legacy_dygraph, + in_dygraph_mode, +) from ..framework import _current_expected_place from .. import dygraph_utils from ..param_attr import ParamAttr -from .layer_function_generator import autodoc, templatedoc, _generate_doc_string_ +from .layer_function_generator import ( + autodoc, + templatedoc, + _generate_doc_string_, +) from .tensor import concat, assign, fill_constant, zeros, tensor_array_to_tensor from . import utils from .. import unique_name from functools import reduce from .. import core from ...utils import deprecated -from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype +from ..data_feeder import ( + convert_dtype, + check_variable_and_dtype, + check_type, + check_dtype, +) import paddle from paddle.utils import deprecated from paddle import _C_ops, _legacy_C_ops @@ -219,8 +240,10 @@ def _get_reduce_dim(dim, input): dim = [dim] else: raise TypeError( - "The type of dim must be int, list, tuple or range, but received {}" - .format(type(axis))) + "The type of dim must be int, list, tuple or range, but received {}".format( + type(axis) + ) + ) if dim is None: dim = [] if dim == [] or len(dim) == len(input.shape): @@ -232,13 +255,9 @@ def _get_reduce_dim(dim, input): @dygraph_only -def _elementwise_op_in_dygraph(x, - y, - axis=-1, - act=None, - use_mkldnn=False, - op_name=None): - +def _elementwise_op_in_dygraph( + x, y, axis=-1, act=None, use_mkldnn=False, op_name=None +): def is_inplace(op_name): return op_name[-1] == "_" @@ -249,24 +268,27 @@ def _elementwise_op_in_dygraph(x, if in_dygraph_mode(): op = getattr( _C_ops, - OP_NAMEMAPPING[op_name] if not is_inplace(op_name) else op_name) + OP_NAMEMAPPING[op_name] if not is_inplace(op_name) else op_name, + ) out = op(x, y) if _in_legacy_dygraph(): op = getattr(_legacy_C_ops, op_name) out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn) - return dygraph_utils._append_activation_in_dygraph(out, - act, - use_mkldnn=use_mkldnn) - - -def fc(input, - size, - num_flatten_dims=1, - param_attr=None, - bias_attr=None, - act=None, - name=None): + return dygraph_utils._append_activation_in_dygraph( + out, act, use_mkldnn=use_mkldnn + ) + + +def fc( + input, + size, + num_flatten_dims=1, + param_attr=None, + bias_attr=None, + act=None, + name=None, +): r""" :api_attr: Static Graph @@ -384,8 +406,9 @@ def fc(input, for i, input_x in enumerate(input): check_type(input_x, 'input[' + str(i) + ']', Variable, 'fc') dtype = helper.input_dtype() - check_dtype(dtype, 'input', ['float16', 'uint16', 'float32', 'float64'], - 'fc') + check_dtype( + dtype, 'input', ['float16', 'uint16', 'float32', 'float64'], 'fc' + ) mul_results = [] for input_var, param_attr in helper.iter_inputs_and_params(): input_shape = input_var.shape @@ -395,31 +418,28 @@ def fc(input, reduce(lambda a, b: a * b, input_shape[num_flatten_dims:], 1) ] + [size] - w = helper.create_parameter(attr=param_attr, - shape=param_shape, - dtype=dtype, - is_bias=False) + w = helper.create_parameter( + attr=param_attr, shape=param_shape, dtype=dtype, is_bias=False + ) tmp = helper.create_variable_for_type_inference(dtype) - helper.append_op(type="mul", - inputs={ - "X": input_var, - "Y": w - }, - outputs={"Out": tmp}, - attrs={ - "x_num_col_dims": num_flatten_dims, - "y_num_col_dims": 1 - }) + helper.append_op( + type="mul", + inputs={"X": input_var, "Y": w}, + outputs={"Out": tmp}, + attrs={"x_num_col_dims": num_flatten_dims, "y_num_col_dims": 1}, + ) mul_results.append(tmp) if len(mul_results) == 1: pre_bias = mul_results[0] else: pre_bias = helper.create_variable_for_type_inference(dtype) - helper.append_op(type="sum", - inputs={"X": mul_results}, - outputs={"Out": pre_bias}, - attrs={"use_mkldnn": False}) + helper.append_op( + type="sum", + inputs={"X": mul_results}, + outputs={"Out": pre_bias}, + attrs={"use_mkldnn": False}, + ) # add bias pre_activation = helper.append_bias_op(pre_bias, dim_start=num_flatten_dims) # add activation @@ -427,13 +447,15 @@ def fc(input, @deprecated(since="2.0.0", update_to="paddle.nn.functional.embedding") -def embedding(input, - size, - is_sparse=False, - is_distributed=False, - padding_idx=None, - param_attr=None, - dtype='float32'): +def embedding( + input, + size, + is_sparse=False, + is_distributed=False, + padding_idx=None, + param_attr=None, + dtype='float32', +): r""" :api_attr: Static Graph @@ -546,10 +568,15 @@ def embedding(input, """ helper = LayerHelper('embedding', **locals()) - check_variable_and_dtype(input, 'input', ['int64'], - 'fluid.layers.embedding') - check_dtype(dtype, 'dtype', ['uint16', 'float16', 'float32', 'float64'], - 'fluid.layers.embedding') + check_variable_and_dtype( + input, 'input', ['int64'], 'fluid.layers.embedding' + ) + check_dtype( + dtype, + 'dtype', + ['uint16', 'float16', 'float32', 'float64'], + 'fluid.layers.embedding', + ) if is_distributed: is_distributed = False @@ -559,37 +586,42 @@ def embedding(input, remote_prefetch = True if is_sparse else False - w = helper.create_parameter(attr=helper.param_attr, - shape=size, - dtype=dtype, - is_bias=False) + w = helper.create_parameter( + attr=helper.param_attr, shape=size, dtype=dtype, is_bias=False + ) tmp = helper.create_variable_for_type_inference(dtype) - padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else ( - size[0] + padding_idx) - helper.append_op(type='lookup_table', - inputs={ - 'Ids': input, - 'W': w - }, - outputs={'Out': tmp}, - attrs={ - 'is_sparse': is_sparse, - 'is_distributed': is_distributed, - 'remote_prefetch': remote_prefetch, - 'padding_idx': padding_idx - }) + padding_idx = ( + -1 + if padding_idx is None + else padding_idx + if padding_idx >= 0 + else (size[0] + padding_idx) + ) + helper.append_op( + type='lookup_table', + inputs={'Ids': input, 'W': w}, + outputs={'Out': tmp}, + attrs={ + 'is_sparse': is_sparse, + 'is_distributed': is_distributed, + 'remote_prefetch': remote_prefetch, + 'padding_idx': padding_idx, + }, + ) return tmp -def _pull_sparse(input, - size, - table_id, - accessor_class, - name="embedding", - ctr_label_name="", - padding_id=0, - dtype='float32', - scale_sparse_grad=True): +def _pull_sparse( + input, + size, + table_id, + accessor_class, + name="embedding", + ctr_label_name="", + padding_id=0, + dtype='float32', + scale_sparse_grad=True, +): r""" **Pull Fleet Sparse Layer** @@ -636,35 +668,34 @@ def _pull_sparse(input, 'ScaleSparseGrad': scale_sparse_grad, 'InputNames': input_names, # this is only for compatible with embedding op - 'is_distributed': True + 'is_distributed': True, } # this is only for compatible with embedding op - w, _ = helper.create_or_get_global_variable(name=name, - shape=[size], - dtype=dtype, - is_bias=False, - persistable=True) - helper.append_op(type='pull_sparse', - inputs={ - 'Ids': inputs, - 'W': w - }, - outputs={'Out': outs}, - attrs=attrs) + w, _ = helper.create_or_get_global_variable( + name=name, shape=[size], dtype=dtype, is_bias=False, persistable=True + ) + helper.append_op( + type='pull_sparse', + inputs={'Ids': inputs, 'W': w}, + outputs={'Out': outs}, + attrs=attrs, + ) if len(outs) == 1: return outs[0] return outs -def _pull_sparse_v2(input, - size, - table_id, - accessor_class, - name="embedding", - ctr_label_name="", - padding_id=0, - dtype='float32', - scale_sparse_grad=True): +def _pull_sparse_v2( + input, + size, + table_id, + accessor_class, + name="embedding", + ctr_label_name="", + padding_id=0, + dtype='float32', + scale_sparse_grad=True, +): r""" **Pull Fleet Sparse Layer** @@ -711,31 +742,26 @@ def _pull_sparse_v2(input, 'ScaleSparseGrad': scale_sparse_grad, 'InputNames': input_names, # this is only for compatible with embedding op - 'is_distributed': True + 'is_distributed': True, } # this is only for compatible with embedding op - w, _ = helper.create_or_get_global_variable(name=name, - shape=[size], - dtype=dtype, - is_bias=False, - persistable=True) - helper.append_op(type='pull_sparse_v2', - inputs={ - 'Ids': inputs, - 'W': w - }, - outputs={'Out': outs}, - attrs=attrs) + w, _ = helper.create_or_get_global_variable( + name=name, shape=[size], dtype=dtype, is_bias=False, persistable=True + ) + helper.append_op( + type='pull_sparse_v2', + inputs={'Ids': inputs, 'W': w}, + outputs={'Out': outs}, + attrs=attrs, + ) if len(outs) == 1: return outs[0] return outs -def _pull_gpups_sparse(input, - size, - dtype='float32', - is_distributed=False, - is_sparse=False): +def _pull_gpups_sparse( + input, size, dtype='float32', is_distributed=False, is_sparse=False +): r""" **Pull GpuPS Sparse Layer** @@ -769,39 +795,36 @@ def _pull_gpups_sparse(input, helper = LayerHelper('pull_gpups_sparse', **locals()) if dtype != 'float32': raise ValueError( - "GpuPS only support float type embedding now, and your type is: " + - dtype) + "GpuPS only support float type embedding now, and your type is: " + + dtype + ) helper.input_dtype() inputs = helper.multiple_input() outs = [ helper.create_variable_for_type_inference(dtype) for i in range(len(inputs)) ] - w = helper.create_parameter(attr=helper.param_attr, - shape=[size[0]], - dtype=dtype, - is_bias=False) - helper.append_op(type='pull_gpups_sparse', - inputs={ - 'Ids': inputs, - 'W': w - }, - outputs={'Out': outs}, - attrs={ - 'size': size, - 'is_distributed': is_distributed, - 'is_sparse': is_sparse - }) + w = helper.create_parameter( + attr=helper.param_attr, shape=[size[0]], dtype=dtype, is_bias=False + ) + helper.append_op( + type='pull_gpups_sparse', + inputs={'Ids': inputs, 'W': w}, + outputs={'Out': outs}, + attrs={ + 'size': size, + 'is_distributed': is_distributed, + 'is_sparse': is_sparse, + }, + ) if len(outs) == 1: return outs[0] return outs -def _pull_box_sparse(input, - size, - dtype='float32', - is_distributed=False, - is_sparse=False): +def _pull_box_sparse( + input, size, dtype='float32', is_distributed=False, is_sparse=False +): r""" **Pull Box Sparse Layer** @@ -831,29 +854,28 @@ def _pull_box_sparse(input, helper = LayerHelper('pull_box_sparse', **locals()) if dtype != 'float32': raise ValueError( - "BoxPS only support float type embedding now, and your type is: " + - dtype) + "BoxPS only support float type embedding now, and your type is: " + + dtype + ) helper.input_dtype() inputs = helper.multiple_input() outs = [ helper.create_variable_for_type_inference(dtype) for i in range(len(inputs)) ] - w = helper.create_parameter(attr=helper.param_attr, - shape=[size], - dtype=dtype, - is_bias=False) - helper.append_op(type='pull_box_sparse', - inputs={ - 'Ids': inputs, - 'W': w - }, - outputs={'Out': outs}, - attrs={ - 'size': size, - 'is_distributed': is_distributed, - 'is_sparse': is_sparse - }) + w = helper.create_parameter( + attr=helper.param_attr, shape=[size], dtype=dtype, is_bias=False + ) + helper.append_op( + type='pull_box_sparse', + inputs={'Ids': inputs, 'W': w}, + outputs={'Out': outs}, + attrs={ + 'size': size, + 'is_distributed': is_distributed, + 'is_sparse': is_sparse, + }, + ) if len(outs) == 1: return outs[0] return outs @@ -949,37 +971,46 @@ def linear_chain_crf(input, label, param_attr=None, length=None): print(transition) """ - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'linear_chain_crf') + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'linear_chain_crf' + ) check_variable_and_dtype(label, 'label', ['int64'], 'linear_chain_crf') helper = LayerHelper('linear_chain_crf', **locals()) size = input.shape[2] if length else input.shape[1] - transition = helper.create_parameter(attr=helper.param_attr, - shape=[size + 2, size], - dtype=helper.input_dtype()) + transition = helper.create_parameter( + attr=helper.param_attr, + shape=[size + 2, size], + dtype=helper.input_dtype(), + ) alpha = helper.create_variable_for_type_inference( - dtype=helper.input_dtype()) + dtype=helper.input_dtype() + ) emission_exps = helper.create_variable_for_type_inference( - dtype=helper.input_dtype()) + dtype=helper.input_dtype() + ) transition_exps = helper.create_variable_for_type_inference( - dtype=helper.input_dtype()) + dtype=helper.input_dtype() + ) log_likelihood = helper.create_variable_for_type_inference( - dtype=helper.input_dtype()) + dtype=helper.input_dtype() + ) this_inputs = { "Emission": [input], "Transition": transition, - "Label": [label] + "Label": [label], } if length: this_inputs['Length'] = [length] - helper.append_op(type='linear_chain_crf', - inputs=this_inputs, - outputs={ - "Alpha": [alpha], - "EmissionExps": [emission_exps], - "TransitionExps": transition_exps, - "LogLikelihood": log_likelihood - }) + helper.append_op( + type='linear_chain_crf', + inputs=this_inputs, + outputs={ + "Alpha": [alpha], + "EmissionExps": [emission_exps], + "TransitionExps": transition_exps, + "LogLikelihood": log_likelihood, + }, + ) return log_likelihood @@ -1035,18 +1066,22 @@ def crf_decoding(input, param_attr, label=None, length=None): crf_decode = paddle.static.nn.crf_decoding(input=emission, length=length, param_attr=paddle.ParamAttr(name="crfw_pad")) """ - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'crf_decoding') + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'crf_decoding' + ) helper = LayerHelper('crf_decoding', **locals()) transition = helper.get_parameter(param_attr.name) viterbi_path = helper.create_variable_for_type_inference( - dtype=core.VarDesc.VarType.INT64) + dtype=core.VarDesc.VarType.INT64 + ) inputs = {"Emission": [input], "Transition": transition, "Label": label} if length: inputs['Length'] = length - helper.append_op(type='crf_decoding', - inputs=inputs, - outputs={"ViterbiPath": [viterbi_path]}) + helper.append_op( + type='crf_decoding', + inputs=inputs, + outputs={"ViterbiPath": [viterbi_path]}, + ) return viterbi_path @@ -1080,26 +1115,23 @@ def cos_sim(X, Y): out = helper.create_variable_for_type_inference(dtype=X.dtype) xnorm = helper.create_variable_for_type_inference(dtype=X.dtype) ynorm = helper.create_variable_for_type_inference(dtype=X.dtype) - helper.append_op(type='cos_sim', - inputs={ - 'X': [X], - 'Y': [Y] - }, - outputs={ - 'Out': [out], - 'XNorm': [xnorm], - 'YNorm': [ynorm] - }) + helper.append_op( + type='cos_sim', + inputs={'X': [X], 'Y': [Y]}, + outputs={'Out': [out], 'XNorm': [xnorm], 'YNorm': [ynorm]}, + ) return out @deprecated(since="2.0.0", update_to="paddle.nn.functional.dropout") -def dropout(x, - dropout_prob, - is_test=None, - seed=None, - name=None, - dropout_implementation="downgrade_in_infer"): +def dropout( + x, + dropout_prob, + is_test=None, + seed=None, + name=None, + dropout_implementation="downgrade_in_infer", +): """ Computes dropout. @@ -1157,23 +1189,32 @@ def dropout(x, """ if not isinstance(dropout_prob, (float, int, Variable)): raise TypeError( - "dropout_prob argument should be a number(int|float) or Variable") + "dropout_prob argument should be a number(int|float) or Variable" + ) # fast return for p == 0 if isinstance(dropout_prob, (int, float)) and dropout_prob == 0: return x if _non_static_mode(): - if (seed is None - or seed == 0) and default_main_program().random_seed != 0: + if ( + seed is None or seed == 0 + ) and default_main_program().random_seed != 0: seed = default_main_program().random_seed if is_test is None: is_test = not _dygraph_tracer()._train_mode - out, mask = _legacy_C_ops.dropout(x, 'dropout_prob', dropout_prob, - 'is_test', is_test, 'fix_seed', seed - is not None, 'seed', - seed if seed is not None else 0, - 'dropout_implementation', - dropout_implementation) + out, mask = _legacy_C_ops.dropout( + x, + 'dropout_prob', + dropout_prob, + 'is_test', + is_test, + 'fix_seed', + seed is not None, + 'seed', + seed if seed is not None else 0, + 'dropout_implementation', + dropout_implementation, + ) return out def get_attrs(prog, dropout_prob, is_test, seed): @@ -1181,8 +1222,10 @@ def dropout(x, seed = prog.random_seed if isinstance(dropout_prob, Variable) and not dropout_prob.shape != [1]: raise TypeError( - "Required dropout_prob.shape == [1] if type(dropout_prob) is Variable, but received dropout_prob.shape = {}" - .format(dropout_prob.shape)) + "Required dropout_prob.shape == [1] if type(dropout_prob) is Variable, but received dropout_prob.shape = {}".format( + dropout_prob.shape + ) + ) attrs = { 'dropout_prob': dropout_prob, 'is_test': is_test, @@ -1193,32 +1236,35 @@ def dropout(x, return attrs helper = LayerHelper('dropout', **locals()) - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'dropout') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], 'dropout' + ) out = helper.create_variable_for_type_inference(dtype=x.dtype) mask = helper.create_variable_for_type_inference( - dtype=core.VarDesc.VarType.UINT8, stop_gradient=True) + dtype=core.VarDesc.VarType.UINT8, stop_gradient=True + ) attrs = get_attrs(helper.main_program, dropout_prob, is_test, seed) - helper.append_op(type='dropout', - inputs={'X': [x]}, - outputs={ - 'Out': [out], - 'Mask': [mask] - }, - attrs=attrs) + helper.append_op( + type='dropout', + inputs={'X': [x]}, + outputs={'Out': [out], 'Mask': [mask]}, + attrs=attrs, + ) return out @templatedoc() -def chunk_eval(input, - label, - chunk_scheme, - num_chunk_types, - excluded_chunk_types=None, - seq_length=None): +def chunk_eval( + input, + label, + chunk_scheme, + num_chunk_types, + excluded_chunk_types=None, + seq_length=None, +): r""" This operator computes the precision, recall and F1-score for chunk detection. It is often used in sequence tagging tasks, such as Named Entity Recognition(NER). @@ -1337,30 +1383,39 @@ def chunk_eval(input, num_infer_chunks = helper.create_variable_for_type_inference(dtype="int64") num_label_chunks = helper.create_variable_for_type_inference(dtype="int64") num_correct_chunks = helper.create_variable_for_type_inference( - dtype="int64") + dtype="int64" + ) this_input = {"Inference": [input], "Label": [label]} if seq_length is not None: this_input["SeqLength"] = [seq_length] - helper.append_op(type="chunk_eval", - inputs=this_input, - outputs={ - "Precision": [precision], - "Recall": [recall], - "F1-Score": [f1_score], - "NumInferChunks": [num_infer_chunks], - "NumLabelChunks": [num_label_chunks], - "NumCorrectChunks": [num_correct_chunks] - }, - attrs={ - "num_chunk_types": num_chunk_types, - "chunk_scheme": chunk_scheme, - "excluded_chunk_types": excluded_chunk_types or [] - }) - return (precision, recall, f1_score, num_infer_chunks, num_label_chunks, - num_correct_chunks) + helper.append_op( + type="chunk_eval", + inputs=this_input, + outputs={ + "Precision": [precision], + "Recall": [recall], + "F1-Score": [f1_score], + "NumInferChunks": [num_infer_chunks], + "NumLabelChunks": [num_label_chunks], + "NumCorrectChunks": [num_correct_chunks], + }, + attrs={ + "num_chunk_types": num_chunk_types, + "chunk_scheme": chunk_scheme, + "excluded_chunk_types": excluded_chunk_types or [], + }, + ) + return ( + precision, + recall, + f1_score, + num_infer_chunks, + num_label_chunks, + num_correct_chunks, + ) @deprecated(since="2.0.0", update_to="paddle.nn.functional.softmax") @@ -1481,38 +1536,44 @@ def softmax(input, use_cudnn=True, name=None, axis=-1): return _C_ops.softmax(input, axis) if _non_static_mode(): - return _legacy_C_ops.softmax(input, 'axis', axis, 'use_cudnn', - use_cudnn) + return _legacy_C_ops.softmax( + input, 'axis', axis, 'use_cudnn', use_cudnn + ) inputs = {"X": [input]} attrs = {"axis": axis, "use_cudnn": use_cudnn} helper = LayerHelper('softmax', **locals()) - check_variable_and_dtype(input, 'input/x', - ['float16', 'float32', 'float64'], 'softmax') + check_variable_and_dtype( + input, 'input/x', ['float16', 'float32', 'float64'], 'softmax' + ) dtype = helper.input_dtype() softmax_out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type="softmax", - inputs={"X": input}, - outputs={"Out": softmax_out}, - attrs=attrs) + helper.append_op( + type="softmax", + inputs={"X": input}, + outputs={"Out": softmax_out}, + attrs=attrs, + ) return softmax_out -def conv2d(input, - num_filters, - filter_size, - stride=1, - padding=0, - dilation=1, - groups=None, - param_attr=None, - bias_attr=None, - use_cudnn=True, - act=None, - name=None, - data_format="NCHW"): +def conv2d( + input, + num_filters, + filter_size, + stride=1, + padding=0, + dilation=1, + groups=None, + param_attr=None, + bias_attr=None, + use_cudnn=True, + act=None, + name=None, + data_format="NCHW", +): r""" :api_attr: Static Graph @@ -1648,27 +1709,34 @@ def conv2d(input, print(conv2d.shape) # [-1, 2, 30, 30] """ - check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'], - 'conv2d') + check_variable_and_dtype( + input, 'input', ['float16', 'float32', 'float64'], 'conv2d' + ) if len(input.shape) != 4: - raise ValueError("Input size should be 4, " - "but received {}".format(len(input.shape))) + raise ValueError( + "Input size should be 4, " + "but received {}".format(len(input.shape)) + ) num_channels = input.shape[1] if not isinstance(use_cudnn, bool): - raise ValueError("Attr(use_cudnn) should be True or False. Received " - "Attr(use_cudnn): %s. " % str(use_cudnn)) + raise ValueError( + "Attr(use_cudnn) should be True or False. Received " + "Attr(use_cudnn): %s. " % str(use_cudnn) + ) if data_format not in ["NCHW", "NHWC"]: raise ValueError( "Attr(data_format) should be 'NCHW' or 'NHWC'. Received " - "Attr(data_format): %s." % str(data_format)) + "Attr(data_format): %s." % str(data_format) + ) - channel_last = (data_format == "NHWC") + channel_last = data_format == "NHWC" num_channels = input.shape[3] if channel_last else input.shape[1] if num_channels < 0: raise ValueError( "The channel dimmention of the input(%s) should be defined. " - "Received: %s." % (str(input.shape), str(num_channels))) + "Received: %s." % (str(input.shape), str(num_channels)) + ) assert param_attr is not False, "param_attr should not be False here." if groups is None: @@ -1676,27 +1744,35 @@ def conv2d(input, elif groups <= 0: raise ValueError( "the groups of input must be greater than 0, " - "but received the groups of input is {}".format(groups)) + "but received the groups of input is {}".format(groups) + ) else: if num_channels % groups != 0: raise ValueError( "the channel of input must be divisible by groups," "received: the channel of input is {}, the shape of input is {}" - ", the groups is {}".format(num_channels, input.shape, groups)) + ", the groups is {}".format(num_channels, input.shape, groups) + ) num_filter_channels = num_channels // groups l_type = 'conv2d' - if (num_channels == groups and num_filters % num_channels == 0 - and not use_cudnn): + if ( + num_channels == groups + and num_filters % num_channels == 0 + and not use_cudnn + ): l_type = 'depthwise_conv2d' - if (num_channels == groups and num_filters % num_channels == 0 - and core.is_compiled_with_rocm()): + if ( + num_channels == groups + and num_filters % num_channels == 0 + and core.is_compiled_with_rocm() + ): l_type = 'depthwise_conv2d' # NPU only supports depthwise_conv2d when "input_channel = output_channel = groups" if core.is_compiled_with_npu(): - if (num_channels == groups and num_channels == num_filters): + if num_channels == groups and num_channels == num_filters: l_type = 'depthwise_conv2d' else: l_type = 'conv2d' @@ -1710,7 +1786,6 @@ def conv2d(input, # padding def _update_padding(padding, data_format): - def is_list_or_tuple(ele): if isinstance(ele, list) or isinstance(ele, tuple): return True @@ -1721,14 +1796,16 @@ def conv2d(input, if not (padding[0] == [0, 0] and padding[1] == [0, 0]): raise ValueError( "Non-zero padding(%s) in the batch or channel dimensions " - "is not supported." % str(padding)) + "is not supported." % str(padding) + ) padding = padding[2:4] padding = [ele for a_list in padding for ele in a_list] elif is_list_or_tuple(padding[0]) and (data_format == "NHWC"): if not (padding[0] == [0, 0] and padding[3] == [0, 0]): raise ValueError( "Non-zero padding(%s) in the batch or channel dimensions " - "is not supported." % str(padding)) + "is not supported." % str(padding) + ) padding = padding[1:3] padding = [ele for a_list in padding for ele in a_list] padding = utils.convert_to_list(padding, 4, 'padding') @@ -1745,8 +1822,9 @@ def conv2d(input, padding = padding.upper() if padding not in ["SAME", "VALID"]: raise ValueError( - "Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." % - str(padding)) + "Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." + % str(padding) + ) if padding == "VALID": padding_algorithm = "VALID" padding = [0, 0] @@ -1764,39 +1842,47 @@ def conv2d(input, raise ValueError( "Invalid filter number, excepted number is larger than 0, but" " received {}, please check the input shape and " - "filter size.".format(filter_elem_num)) - std = (2.0 / filter_elem_num)**0.5 + "filter size.".format(filter_elem_num) + ) + std = (2.0 / filter_elem_num) ** 0.5 return Normal(0.0, std, 0) filter_param = helper.create_parameter( attr=helper.param_attr, shape=filter_shape, dtype=dtype, - default_initializer=_get_default_param_initializer()) + default_initializer=_get_default_param_initializer(), + ) pre_bias = helper.create_variable_for_type_inference(dtype) - if (core.is_compiled_with_cuda() and paddle.fluid.get_flags( - "FLAGS_conv2d_disable_cudnn")["FLAGS_conv2d_disable_cudnn"]): + if ( + core.is_compiled_with_cuda() + and paddle.fluid.get_flags("FLAGS_conv2d_disable_cudnn")[ + "FLAGS_conv2d_disable_cudnn" + ] + ): use_cudnn = False - helper.append_op(type=l_type, - inputs={ - 'Input': input, - 'Filter': filter_param, - }, - outputs={"Output": pre_bias}, - attrs={ - 'strides': stride, - 'paddings': padding, - 'dilations': dilation, - 'groups': groups, - 'use_cudnn': use_cudnn, - 'use_mkldnn': False, - 'fuse_relu_before_depthwise_conv': False, - "padding_algorithm": padding_algorithm, - "data_format": data_format, - }) + helper.append_op( + type=l_type, + inputs={ + 'Input': input, + 'Filter': filter_param, + }, + outputs={"Output": pre_bias}, + attrs={ + 'strides': stride, + 'paddings': padding, + 'dilations': dilation, + 'groups': groups, + 'use_cudnn': use_cudnn, + 'use_mkldnn': False, + 'fuse_relu_before_depthwise_conv': False, + "padding_algorithm": padding_algorithm, + "data_format": data_format, + }, + ) if data_format == 'NCHW': pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2) @@ -1806,19 +1892,21 @@ def conv2d(input, return helper.append_activation(pre_act) -def conv3d(input, - num_filters, - filter_size, - stride=1, - padding=0, - dilation=1, - groups=None, - param_attr=None, - bias_attr=None, - use_cudnn=True, - act=None, - name=None, - data_format="NCDHW"): +def conv3d( + input, + num_filters, + filter_size, + stride=1, + padding=0, + dilation=1, + groups=None, + param_attr=None, + bias_attr=None, + use_cudnn=True, + act=None, + name=None, + data_format="NCDHW", +): r""" :api_attr: Static Graph @@ -1961,37 +2049,46 @@ def conv3d(input, dtype = helper.input_dtype() if not isinstance(use_cudnn, bool): - raise ValueError("Attr(use_cudnn) should be True or False. Received " - "Attr(use_cudnn): %s. " % str(use_cudnn)) + raise ValueError( + "Attr(use_cudnn) should be True or False. Received " + "Attr(use_cudnn): %s. " % str(use_cudnn) + ) if data_format not in ["NCDHW", "NDHWC"]: raise ValueError( "Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received " - "Attr(data_format): %s." % str(data_format)) + "Attr(data_format): %s." % str(data_format) + ) - channel_last = (data_format == "NDHWC") + channel_last = data_format == "NDHWC" if len(input.shape) != 5: raise ValueError( - "Input should be 5D tensor, but received input with the shape of {}" - .format(input.shape)) + "Input should be 5D tensor, but received input with the shape of {}".format( + input.shape + ) + ) num_channels = input.shape[4] if channel_last else input.shape[1] if num_channels < 0: raise ValueError( "The channel dimmention of the input(%s) should be defined. " - "Received: %s." % (str(input.shape), str(num_channels))) + "Received: %s." % (str(input.shape), str(num_channels)) + ) if groups is None: num_filter_channels = num_channels elif groups <= 0: raise ValueError( - "the groups of conv3d should be greater than 0. Received groups: {}" - .format(groups)) + "the groups of conv3d should be greater than 0. Received groups: {}".format( + groups + ) + ) else: if num_channels % groups != 0: raise ValueError( "The number of input channels must be divisible by Attr(groups). " - "Received: number of channels(%s), groups(%s)." % - (str(num_channels), str(groups))) + "Received: number of channels(%s), groups(%s)." + % (str(num_channels), str(groups)) + ) num_filter_channels = num_channels // groups filter_size = utils.convert_to_list(filter_size, 3, 'filter_size') @@ -1999,7 +2096,6 @@ def conv3d(input, dilation = utils.convert_to_list(dilation, 3, 'dilation') def _update_padding(padding, data_format): - def is_list_or_tuple(ele): if isinstance(ele, list) or isinstance(ele, tuple): return True @@ -2010,14 +2106,16 @@ def conv3d(input, if not (padding[0] == [0, 0] and padding[1] == [0, 0]): raise ValueError( "Non-zero padding(%s) in the batch or channel dimensions " - "is not supported." % str(padding)) + "is not supported." % str(padding) + ) padding = padding[2:5] padding = [ele for a_list in padding for ele in a_list] elif is_list_or_tuple(padding[0]) and (data_format == "NDHWC"): if not (padding[0] == [0, 0] and padding[4] == [0, 0]): raise ValueError( "Non-zero padding(%s) in the batch or channel dimensions " - "is not supported." % str(padding)) + "is not supported." % str(padding) + ) padding = padding[1:4] padding = [ele for a_list in padding for ele in a_list] padding = utils.convert_to_list(padding, 6, 'padding') @@ -2037,8 +2135,9 @@ def conv3d(input, padding = padding.upper() if padding not in ["SAME", "VALID"]: raise ValueError( - "Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." % - str(padding)) + "Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." + % str(padding) + ) if padding == "VALID": padding_algorithm = "VALID" padding = [0, 0, 0] @@ -2052,41 +2151,46 @@ def conv3d(input, filter_shape = [num_filters, num_filter_channels] + filter_size def _get_default_param_initializer(): - filter_elem_num = filter_size[0] * filter_size[1] * filter_size[ - 2] * num_channels + filter_elem_num = ( + filter_size[0] * filter_size[1] * filter_size[2] * num_channels + ) if filter_elem_num <= 0: raise ValueError( "Invalid filter number, excepted number is larger than 0, but" " received {}, please check the input shape and " - "filter size.".format(filter_elem_num)) + "filter size.".format(filter_elem_num) + ) - std = (2.0 / filter_elem_num)**0.5 + std = (2.0 / filter_elem_num) ** 0.5 return Normal(0.0, std, 0) filter_param = helper.create_parameter( attr=helper.param_attr, shape=filter_shape, dtype=dtype, - default_initializer=_get_default_param_initializer()) + default_initializer=_get_default_param_initializer(), + ) pre_bias = helper.create_variable_for_type_inference(dtype) - helper.append_op(type=l_type, - inputs={ - 'Input': input, - 'Filter': filter_param, - }, - outputs={"Output": pre_bias}, - attrs={ - 'strides': stride, - 'paddings': padding, - 'dilations': dilation, - 'groups': groups, - 'use_cudnn': use_cudnn, - 'use_mkldnn': False, - "padding_algorithm": padding_algorithm, - "data_format": data_format, - }) + helper.append_op( + type=l_type, + inputs={ + 'Input': input, + 'Filter': filter_param, + }, + outputs={"Output": pre_bias}, + attrs={ + 'strides': stride, + 'paddings': padding, + 'dilations': dilation, + 'groups': groups, + 'use_cudnn': use_cudnn, + 'use_mkldnn': False, + "padding_algorithm": padding_algorithm, + "data_format": data_format, + }, + ) if data_format == 'NCDHW': pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2) @@ -2097,17 +2201,19 @@ def conv3d(input, @templatedoc() -def pool2d(input, - pool_size=-1, - pool_type="max", - pool_stride=1, - pool_padding=0, - global_pooling=False, - use_cudnn=True, - ceil_mode=False, - name=None, - exclusive=True, - data_format="NCHW"): +def pool2d( + input, + pool_size=-1, + pool_type="max", + pool_stride=1, + pool_padding=0, + global_pooling=False, + use_cudnn=True, + ceil_mode=False, + name=None, + exclusive=True, + data_format="NCHW", +): """ ${comment} @@ -2218,27 +2324,31 @@ def pool2d(input, if pool_type not in ["max", "avg"]: raise ValueError( "Unknown Attr(pool_type): '%s'. It can only be 'max' or 'avg'.", - str(pool_type)) + str(pool_type), + ) if global_pooling is False and pool_size == -1: raise ValueError( "When Attr(global_pooling) is False, Attr(pool_size) must be passed " - "and be a valid value. Received pool_size: %s." % str(pool_size)) + "and be a valid value. Received pool_size: %s." % str(pool_size) + ) if not isinstance(use_cudnn, bool): - raise TypeError("Attr(use_cudnn) should be True or False. Received " - "Attr(use_cudnn): %s." % str(use_cudnn)) + raise TypeError( + "Attr(use_cudnn) should be True or False. Received " + "Attr(use_cudnn): %s." % str(use_cudnn) + ) if data_format not in ["NCHW", "NHWC"]: raise ValueError( "Attr(data_format) should be 'NCHW' or 'NHWC'. Received " - "Attr(data_format): %s." % str(data_format)) + "Attr(data_format): %s." % str(data_format) + ) pool_size = utils.convert_to_list(pool_size, 2, 'pool_size') pool_stride = utils.convert_to_list(pool_stride, 2, 'pool_stride') def update_padding(padding, data_format): - def is_list_or_tuple(ele): if isinstance(ele, list) or isinstance(ele, tuple): return True @@ -2249,14 +2359,16 @@ def pool2d(input, if not (padding[0] == [0, 0] and padding[1] == [0, 0]): raise ValueError( "Non-zero pool_padding(%s) in the batch or channel dimensions " - "is not supported." % str(padding)) + "is not supported." % str(padding) + ) padding = padding[2:4] padding = [ele for a_list in padding for ele in a_list] elif is_list_or_tuple(padding[0]) and (data_format == "NHWC"): if not (padding[0] == [0, 0] and padding[3] == [0, 0]): raise ValueError( "Non-zero pool_padding(%s) in the batch or channel dimensions " - "is not supported." % str(padding)) + "is not supported." % str(padding) + ) padding = padding[1:3] padding = [ele for a_list in padding for ele in a_list] padding = utils.convert_to_list(padding, 4, 'padding') @@ -2274,61 +2386,77 @@ def pool2d(input, if pool_padding not in ["SAME", "VALID"]: raise ValueError( "Unknown Attr(pool_padding): '%s'. It can only be 'SAME' or 'VALID'." - % str(pool_padding)) + % str(pool_padding) + ) if pool_padding == "VALID": padding_algorithm = "VALID" pool_padding = [0, 0] if ceil_mode != False: raise ValueError( "When Attr(pool_padding) is \"VALID\", Attr(ceil_mode) must be False. " - "Received ceil_mode: True.") + "Received ceil_mode: True." + ) elif pool_padding == "SAME": padding_algorithm = "SAME" pool_padding = [0, 0] pool_padding = update_padding(pool_padding, data_format) if in_dygraph_mode(): - return _C_ops.pool2d(input, pool_size, pool_stride, pool_padding, - ceil_mode, exclusive, data_format, pool_type, - global_pooling, False, padding_algorithm, - use_cudnn) + return _C_ops.pool2d( + input, + pool_size, + pool_stride, + pool_padding, + ceil_mode, + exclusive, + data_format, + pool_type, + global_pooling, + False, + padding_algorithm, + use_cudnn, + ) op_type = 'pool2d' helper = LayerHelper(op_type, **locals()) dtype = helper.input_dtype() pool_out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type=op_type, - inputs={"X": input}, - outputs={"Out": pool_out}, - attrs={ - "pooling_type": pool_type, - "ksize": pool_size, - "global_pooling": global_pooling, - "strides": pool_stride, - "paddings": pool_padding, - "padding_algorithm": padding_algorithm, - "use_cudnn": use_cudnn, - "ceil_mode": ceil_mode, - "use_mkldnn": False, - "exclusive": exclusive, - "data_format": data_format, - }) + helper.append_op( + type=op_type, + inputs={"X": input}, + outputs={"Out": pool_out}, + attrs={ + "pooling_type": pool_type, + "ksize": pool_size, + "global_pooling": global_pooling, + "strides": pool_stride, + "paddings": pool_padding, + "padding_algorithm": padding_algorithm, + "use_cudnn": use_cudnn, + "ceil_mode": ceil_mode, + "use_mkldnn": False, + "exclusive": exclusive, + "data_format": data_format, + }, + ) return pool_out @templatedoc() -def pool3d(input, - pool_size=-1, - pool_type="max", - pool_stride=1, - pool_padding=0, - global_pooling=False, - use_cudnn=True, - ceil_mode=False, - name=None, - exclusive=True, - data_format="NCDHW"): +def pool3d( + input, + pool_size=-1, + pool_type="max", + pool_stride=1, + pool_padding=0, + global_pooling=False, + use_cudnn=True, + ceil_mode=False, + name=None, + exclusive=True, + data_format="NCDHW", +): """ ${comment} @@ -2445,28 +2573,32 @@ def pool3d(input, if pool_type not in ["max", "avg"]: raise ValueError( "Unknown Attr(pool_type): '%s'. It can only be 'max' or 'avg'.", - str(pool_type)) + str(pool_type), + ) if global_pooling is False and pool_size == -1: raise ValueError( "When Attr(global_pooling) is False, Attr(pool_size) must be passed " - "and be a valid value. Received Attr(pool_size): %s." % - str(pool_size)) + "and be a valid value. Received Attr(pool_size): %s." + % str(pool_size) + ) if not isinstance(use_cudnn, bool): - raise TypeError("Attr(use_cudnn) should be True or False. Received " - "Attr(use_cudnn): %s. " % str(use_cudnn)) + raise TypeError( + "Attr(use_cudnn) should be True or False. Received " + "Attr(use_cudnn): %s. " % str(use_cudnn) + ) if data_format not in ["NCDHW", "NDHWC"]: raise ValueError( "Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received " - "Attr(data_format): %s" % str(data_format)) + "Attr(data_format): %s" % str(data_format) + ) pool_size = utils.convert_to_list(pool_size, 3, 'pool_size') pool_stride = utils.convert_to_list(pool_stride, 3, 'pool_stride') def update_padding(padding, data_format): - def is_list_or_tuple(ele): if isinstance(ele, (list, tuple)): return True @@ -2477,14 +2609,16 @@ def pool3d(input, if not (padding[0] == [0, 0] and padding[1] == [0, 0]): raise ValueError( "Non-zero pool_padding(%s) in the batch or channel dimensions " - "is not supported." % str(padding)) + "is not supported." % str(padding) + ) padding = padding[2:5] padding = [ele for a_list in padding for ele in a_list] elif is_list_or_tuple(padding[0]) and (data_format == "NDHWC"): if not (padding[0] == [0, 0] and padding[4] == [0, 0]): raise ValueError( "Non-zero pool_padding(%s) in the batch or channel dimensions " - "is not supported." % str(padding)) + "is not supported." % str(padding) + ) padding = padding[1:4] padding = [ele for a_list in padding for ele in a_list] padding = utils.convert_to_list(padding, 6, 'padding') @@ -2506,14 +2640,16 @@ def pool3d(input, if pool_padding not in ["SAME", "VALID"]: raise ValueError( "Unknown Attr(pool_padding): '%s'. It can only be 'SAME' or 'VALID'." - % str(pool_padding)) + % str(pool_padding) + ) if pool_padding == "VALID": padding_algorithm = "VALID" pool_padding = [0, 0, 0] if ceil_mode != False: raise ValueError( "When Attr(pool_padding) is \"VALID\", ceil_mode must be False. " - "Received ceil_mode: True.") + "Received ceil_mode: True." + ) elif pool_padding == "SAME": padding_algorithm = "SAME" pool_padding = [0, 0, 0] @@ -2525,33 +2661,33 @@ def pool3d(input, dtype = helper.input_dtype() pool_out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type=op_type, - inputs={"X": input}, - outputs={"Out": pool_out}, - attrs={ - "pooling_type": pool_type, - "ksize": pool_size, - "global_pooling": global_pooling, - "strides": pool_stride, - "paddings": pool_padding, - "padding_algorithm": padding_algorithm, - "use_cudnn": use_cudnn, - "ceil_mode": ceil_mode, - "use_mkldnn": False, - "exclusive": exclusive, - "data_format": data_format, - }) + helper.append_op( + type=op_type, + inputs={"X": input}, + outputs={"Out": pool_out}, + attrs={ + "pooling_type": pool_type, + "ksize": pool_size, + "global_pooling": global_pooling, + "strides": pool_stride, + "paddings": pool_padding, + "padding_algorithm": padding_algorithm, + "use_cudnn": use_cudnn, + "ceil_mode": ceil_mode, + "use_mkldnn": False, + "exclusive": exclusive, + "data_format": data_format, + }, + ) return pool_out @deprecated(since="2.0.0") @templatedoc(op_type="pool2d") -def adaptive_pool2d(input, - pool_size, - pool_type="max", - require_index=False, - name=None): +def adaptive_pool2d( + input, pool_size, pool_type="max", require_index=False, name=None +): r""" This operation calculates the output based on the input, pool_size, @@ -2648,19 +2784,24 @@ def adaptive_pool2d(input, pool_type='max') """ check_variable_and_dtype( - input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], - 'adaptive_pool2d') + input, + 'input', + ['float16', 'float32', 'float64', 'int32', 'int64'], + 'adaptive_pool2d', + ) check_type(pool_type, 'pool_type', str, 'adaptive_pool2d') check_type(pool_size, 'pool_size', (int, list, tuple), 'adaptive_pool2d') check_type(require_index, 'require_index', bool, 'adaptive_pool2d') if pool_type not in ["max", "avg"]: raise ValueError( "Unknown pool_type: '%s'. It can only be 'max' or 'avg'.", - str(pool_type)) + str(pool_type), + ) if pool_type == "avg" and require_index: raise ValueError( - "invalid setting 'require_index' true when 'pool_type' is 'avg'.") + "invalid setting 'require_index' true when 'pool_type' is 'avg'." + ) pool_size = utils.convert_to_list(pool_size, 2, 'pool_size') @@ -2678,25 +2819,25 @@ def adaptive_pool2d(input, mask = helper.create_variable_for_type_inference(dtype) outputs["Mask"] = mask - helper.append_op(type=l_type, - inputs={"X": input}, - outputs=outputs, - attrs={ - "pooling_type": pool_type, - "ksize": pool_size, - "adaptive": True, - }) + helper.append_op( + type=l_type, + inputs={"X": input}, + outputs=outputs, + attrs={ + "pooling_type": pool_type, + "ksize": pool_size, + "adaptive": True, + }, + ) return (pool_out, mask) if require_index else pool_out @deprecated(since="2.0.0") @templatedoc(op_type="pool3d") -def adaptive_pool3d(input, - pool_size, - pool_type="max", - require_index=False, - name=None): +def adaptive_pool3d( + input, pool_size, pool_type="max", require_index=False, name=None +): r""" This operation calculates the output based on the input, pool_size, @@ -2807,19 +2948,24 @@ def adaptive_pool3d(input, pool_type='max') """ check_variable_and_dtype( - input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], - 'adaptive_pool3d') + input, + 'input', + ['float16', 'float32', 'float64', 'int32', 'int64'], + 'adaptive_pool3d', + ) check_type(pool_type, 'pool_type', str, 'adaptive_pool3d') check_type(pool_size, 'pool_size', (int, list, tuple), 'adaptive_pool3d') check_type(require_index, 'require_index', bool, 'adaptive_pool3d') if pool_type not in ["max", "avg"]: raise ValueError( "Unknown pool_type: '%s'. It can only be 'max' or 'avg'.", - str(pool_type)) + str(pool_type), + ) if pool_type == "avg" and require_index: raise ValueError( - "invalid setting 'require_index' true when 'pool_type' is 'avg'.") + "invalid setting 'require_index' true when 'pool_type' is 'avg'." + ) pool_size = utils.convert_to_list(pool_size, 3, 'pool_size') @@ -2837,32 +2983,36 @@ def adaptive_pool3d(input, mask = helper.create_variable_for_type_inference(dtype) outputs["Mask"] = mask - helper.append_op(type=l_type, - inputs={"X": input}, - outputs=outputs, - attrs={ - "pooling_type": pool_type, - "ksize": pool_size, - "adaptive": True, - }) + helper.append_op( + type=l_type, + inputs={"X": input}, + outputs=outputs, + attrs={ + "pooling_type": pool_type, + "ksize": pool_size, + "adaptive": True, + }, + ) return (pool_out, mask) if require_index else pool_out -def batch_norm(input, - act=None, - is_test=False, - momentum=0.9, - epsilon=1e-05, - param_attr=None, - bias_attr=None, - data_layout='NCHW', - in_place=False, - name=None, - moving_mean_name=None, - moving_variance_name=None, - do_model_average_for_mean_and_var=True, - use_global_stats=False): +def batch_norm( + input, + act=None, + is_test=False, + momentum=0.9, + epsilon=1e-05, + param_attr=None, + bias_attr=None, + data_layout='NCHW', + in_place=False, + name=None, + moving_mean_name=None, + moving_variance_name=None, + do_model_average_for_mean_and_var=True, + use_global_stats=False, +): r""" :api_attr: Static Graph @@ -2977,11 +3127,14 @@ def batch_norm(input, print(hidden2.shape) # [3, 200] """ - assert bias_attr is not False, "bias_attr should not be False in batch_norm." + assert ( + bias_attr is not False + ), "bias_attr should not be False in batch_norm." helper = LayerHelper('batch_norm', **locals()) - check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'], - 'batch_norm') + check_variable_and_dtype( + input, 'input', ['float16', 'float32', 'float64'], 'batch_norm' + ) dtype = helper.input_dtype() # use fp32 for bn parameter @@ -3000,31 +3153,38 @@ def batch_norm(input, param_shape = [channel_num] # create parameter - scale = helper.create_parameter(attr=helper.param_attr, - shape=param_shape, - dtype=dtype, - default_initializer=Constant(1.0)) - bias = helper.create_parameter(attr=helper.bias_attr, - shape=param_shape, - dtype=dtype, - is_bias=True) - - mean = helper.create_parameter(attr=ParamAttr( - name=moving_mean_name, - initializer=Constant(0.0), - trainable=False, - do_model_average=do_model_average_for_mean_and_var), - shape=param_shape, - dtype=dtype) + scale = helper.create_parameter( + attr=helper.param_attr, + shape=param_shape, + dtype=dtype, + default_initializer=Constant(1.0), + ) + bias = helper.create_parameter( + attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True + ) + + mean = helper.create_parameter( + attr=ParamAttr( + name=moving_mean_name, + initializer=Constant(0.0), + trainable=False, + do_model_average=do_model_average_for_mean_and_var, + ), + shape=param_shape, + dtype=dtype, + ) mean.stop_gradient = True - variance = helper.create_parameter(attr=ParamAttr( - name=moving_variance_name, - initializer=Constant(1.0), - trainable=False, - do_model_average=do_model_average_for_mean_and_var), - shape=param_shape, - dtype=dtype) + variance = helper.create_parameter( + attr=ParamAttr( + name=moving_variance_name, + initializer=Constant(1.0), + trainable=False, + do_model_average=do_model_average_for_mean_and_var, + ), + shape=param_shape, + dtype=dtype, + ) variance.stop_gradient = True # create output @@ -3044,38 +3204,81 @@ def batch_norm(input, attrs_ = () if attrs_has_momentum: - attrs_ = ('momentum', momentum, 'epsilon', epsilon, 'is_test', - is_test, 'data_layout', data_layout, 'use_mkldnn', False, - 'fuse_with_relu', False, 'use_global_stats', - use_global_stats) + attrs_ = ( + 'momentum', + momentum, + 'epsilon', + epsilon, + 'is_test', + is_test, + 'data_layout', + data_layout, + 'use_mkldnn', + False, + 'fuse_with_relu', + False, + 'use_global_stats', + use_global_stats, + ) else: - attrs_ = ('epsilon', epsilon, 'is_test', is_test, 'data_layout', - data_layout, 'use_mkldnn', False, 'fuse_with_relu', False, - 'use_global_stats', use_global_stats) + attrs_ = ( + 'epsilon', + epsilon, + 'is_test', + is_test, + 'data_layout', + data_layout, + 'use_mkldnn', + False, + 'fuse_with_relu', + False, + 'use_global_stats', + use_global_stats, + ) if inputs_has_MomemtumTensor: batch_norm_out, _, _, _, _, _ = _legacy_C_ops.batch_norm( - input, scale, bias, mean, variance, momentum, mean_out, - variance_out, *attrs_) + input, + scale, + bias, + mean, + variance, + momentum, + mean_out, + variance_out, + *attrs_, + ) else: batch_norm_out, _, _, _, _, _ = _legacy_C_ops.batch_norm( - input, scale, bias, mean, variance, None, mean_out, - variance_out, *attrs_) + input, + scale, + bias, + mean, + variance, + None, + mean_out, + variance_out, + *attrs_, + ) - return dygraph_utils._append_activation_in_dygraph(batch_norm_out, - act=act, - use_mkldnn=False) + return dygraph_utils._append_activation_in_dygraph( + batch_norm_out, act=act, use_mkldnn=False + ) - saved_mean = helper.create_variable_for_type_inference(dtype=dtype, - stop_gradient=True) + saved_mean = helper.create_variable_for_type_inference( + dtype=dtype, stop_gradient=True + ) saved_variance = helper.create_variable_for_type_inference( - dtype=dtype, stop_gradient=True) + dtype=dtype, stop_gradient=True + ) reserve_space = None if not is_test: reserve_space = helper.create_variable_for_type_inference( - dtype=helper.input_dtype(), stop_gradient=True) + dtype=helper.input_dtype(), stop_gradient=True + ) - batch_norm_out = input if in_place else \ - helper.create_variable_for_type_inference(dtype) + batch_norm_out = ( + input if in_place else helper.create_variable_for_type_inference(dtype) + ) inputs = { "X": input, @@ -3084,7 +3287,7 @@ def batch_norm(input, "Mean": mean, "Variance": variance, "MeanOut": mean_out, - "VarianceOut": variance_out + "VarianceOut": variance_out, } attrs = { "epsilon": epsilon, @@ -3092,7 +3295,7 @@ def batch_norm(input, "data_layout": data_layout, "use_mkldnn": False, "fuse_with_relu": False, - "use_global_stats": use_global_stats + "use_global_stats": use_global_stats, } if isinstance(momentum, Variable): inputs['MomemtumTensor'] = momentum @@ -3104,33 +3307,34 @@ def batch_norm(input, "MeanOut": mean_out, "VarianceOut": variance_out, "SavedMean": saved_mean, - "SavedVariance": saved_variance + "SavedVariance": saved_variance, } if reserve_space is not None: outputs["ReserveSpace"] = reserve_space - helper.append_op(type="batch_norm", - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type="batch_norm", inputs=inputs, outputs=outputs, attrs=attrs + ) return helper.append_activation(batch_norm_out) -def inplace_abn(input, - act=None, - is_test=False, - momentum=0.9, - epsilon=1e-05, - param_attr=None, - bias_attr=None, - data_layout='NCHW', - name=None, - moving_mean_name=None, - moving_variance_name=None, - do_model_average_for_mean_and_var=True, - use_global_stats=False, - act_alpha=1.0): +def inplace_abn( + input, + act=None, + is_test=False, + momentum=0.9, + epsilon=1e-05, + param_attr=None, + bias_attr=None, + data_layout='NCHW', + name=None, + moving_mean_name=None, + moving_variance_name=None, + do_model_average_for_mean_and_var=True, + use_global_stats=False, + act_alpha=1.0, +): r""" **In-place Activation Batch Normalization Layer** @@ -3164,14 +3368,14 @@ def inplace_abn(input, numerical stability. Default is 1e-5. param_attr(ParamAttr|None): The parameter attribute for Parameter `scale` of inplace_abn. If it is set to None or one attribute of ParamAttr, inplace_abn - will create ParamAttr as param_attr, the name of scale can be set in ParamAttr. - If the Initializer of the param_attr is not set, the parameter is initialized - with Xavier. Default: None. + will create ParamAttr as param_attr, the name of scale can be set in ParamAttr. + If the Initializer of the param_attr is not set, the parameter is initialized + with Xavier. Default: None. bias_attr(ParamAttr|None): The parameter attribute for the bias of inplace_abn. If it is set to None or one attribute of ParamAttr, inplace_abn - will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr. - If the Initializer of the bias_attr is not set, the bias is initialized zero. - Default: None. + will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr. + If the Initializer of the bias_attr is not set, the bias is initialized zero. + Default: None. data_layout (str, optional): Specify the data format of the input, and the data format of the output will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: @@ -3209,14 +3413,18 @@ def inplace_abn(input, hidden3 = fluid.layers.inplace_abn(input=hidden2, act='leaky_relu', act_alpha=0.2) """ - assert act in [None, 'identity', 'leaky_relu', 'elu'], \ - "inplace_abn only support act as None, 'identity', " \ + assert act in [None, 'identity', 'leaky_relu', 'elu'], ( + "inplace_abn only support act as None, 'identity', " "'leaky_relu', 'elu' currently" - assert bias_attr is not False, "bias_attr should not be False in inplace_abn." + ) + assert ( + bias_attr is not False + ), "bias_attr should not be False in inplace_abn." helper = LayerHelper('inplace_abn', **locals()) - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'inplace_abn') + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'inplace_abn' + ) dtype = helper.input_dtype() input_shape = input.shape @@ -3231,31 +3439,38 @@ def inplace_abn(input, param_shape = [channel_num] # create parameter - scale = helper.create_parameter(attr=helper.param_attr, - shape=param_shape, - dtype=dtype, - default_initializer=Constant(1.0)) - bias = helper.create_parameter(attr=helper.bias_attr, - shape=param_shape, - dtype=dtype, - is_bias=True) - - mean = helper.create_parameter(attr=ParamAttr( - name=moving_mean_name, - initializer=Constant(0.0), - trainable=False, - do_model_average=do_model_average_for_mean_and_var), - shape=param_shape, - dtype=dtype) + scale = helper.create_parameter( + attr=helper.param_attr, + shape=param_shape, + dtype=dtype, + default_initializer=Constant(1.0), + ) + bias = helper.create_parameter( + attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True + ) + + mean = helper.create_parameter( + attr=ParamAttr( + name=moving_mean_name, + initializer=Constant(0.0), + trainable=False, + do_model_average=do_model_average_for_mean_and_var, + ), + shape=param_shape, + dtype=dtype, + ) mean.stop_gradient = True - variance = helper.create_parameter(attr=ParamAttr( - name=moving_variance_name, - initializer=Constant(1.0), - trainable=False, - do_model_average=do_model_average_for_mean_and_var), - shape=param_shape, - dtype=dtype) + variance = helper.create_parameter( + attr=ParamAttr( + name=moving_variance_name, + initializer=Constant(1.0), + trainable=False, + do_model_average=do_model_average_for_mean_and_var, + ), + shape=param_shape, + dtype=dtype, + ) variance.stop_gradient = True # create output @@ -3277,39 +3492,88 @@ def inplace_abn(input, attrs__ = () if attrs_has_momentum: - attrs__ = ('momentum', momentum, 'epsilon', epsilon, 'is_test', - is_test, 'data_layout', data_layout, 'use_mkldnn', False, - 'fuse_with_relu', False, 'use_global_stats', - use_global_stats, 'activation', act, 'alpha', act_alpha) + attrs__ = ( + 'momentum', + momentum, + 'epsilon', + epsilon, + 'is_test', + is_test, + 'data_layout', + data_layout, + 'use_mkldnn', + False, + 'fuse_with_relu', + False, + 'use_global_stats', + use_global_stats, + 'activation', + act, + 'alpha', + act_alpha, + ) else: - attrs__ = ('epsilon', epsilon, 'is_test', is_test, 'data_layout', - data_layout, 'use_mkldnn', False, 'fuse_with_relu', - False, 'use_global_stats', use_global_stats, - 'activation', act, 'alpha', act_alpha) + attrs__ = ( + 'epsilon', + epsilon, + 'is_test', + is_test, + 'data_layout', + data_layout, + 'use_mkldnn', + False, + 'fuse_with_relu', + False, + 'use_global_stats', + use_global_stats, + 'activation', + act, + 'alpha', + act_alpha, + ) if inputs_has_MomemtumTensor: batch_norm_out, _, _, _, _, _ = _legacy_C_ops.inplace_abn_( - input, scale, bias, mean, variance, momentum, mean_out, - variance_out, *attrs__) + input, + scale, + bias, + mean, + variance, + momentum, + mean_out, + variance_out, + *attrs__, + ) return batch_norm_out else: batch_norm_out, _, _, _, _, _ = _legacy_C_ops.inplace_abn_( - input, scale, bias, mean, variance, None, mean_out, - variance_out, *attrs__) + input, + scale, + bias, + mean, + variance, + None, + mean_out, + variance_out, + *attrs__, + ) return batch_norm_out - saved_mean = helper.create_variable_for_type_inference(dtype=dtype, - stop_gradient=True) + saved_mean = helper.create_variable_for_type_inference( + dtype=dtype, stop_gradient=True + ) saved_variance = helper.create_variable_for_type_inference( - dtype=dtype, stop_gradient=True) + dtype=dtype, stop_gradient=True + ) reserve_space = helper.create_variable_for_type_inference( - dtype=dtype, stop_gradient=True) + dtype=dtype, stop_gradient=True + ) inputs = { "X": input, "Scale": scale, "Bias": bias, "Mean": mean, - "Variance": variance + "Variance": variance, } attrs = { "epsilon": epsilon, @@ -3330,24 +3594,21 @@ def inplace_abn(input, "MeanOut": mean_out, "VarianceOut": variance_out, "SavedMean": saved_mean, - "SavedVariance": saved_variance + "SavedVariance": saved_variance, } if reserve_space is not None: outputs["ReserveSpace"] = reserve_space - helper.append_op(type="inplace_abn", - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type="inplace_abn", inputs=inputs, outputs=outputs, attrs=attrs + ) return batch_norm_out -def instance_norm(input, - epsilon=1e-05, - param_attr=None, - bias_attr=None, - name=None): +def instance_norm( + input, epsilon=1e-05, param_attr=None, bias_attr=None, name=None +): r""" :api_attr: Static Graph @@ -3411,10 +3672,13 @@ def instance_norm(input, hidden1 = paddle.static.nn.fc(x, size=200) hidden2 = paddle.static.nn.instance_norm(hidden1) """ - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'instance_norm') + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'instance_norm' + ) if param_attr is False: - assert bias_attr is False, "param_attr and bias_attr must be set to Fasle at the same time in instance_norm" + assert ( + bias_attr is False + ), "param_attr and bias_attr must be set to Fasle at the same time in instance_norm" helper = LayerHelper('instance_norm', **locals()) dtype = helper.input_dtype() @@ -3426,29 +3690,37 @@ def instance_norm(input, input_shape = input.shape if len(input.shape) < 2 or len(input.shape) > 5: raise ValueError( - 'expected 2D or 3D or 4D or 5D input (got {}D input, input shape is: {})' - .format(len(input.shape), input_shape)) + 'expected 2D or 3D or 4D or 5D input (got {}D input, input shape is: {})'.format( + len(input.shape), input_shape + ) + ) channel_num = input_shape[1] param_shape = [channel_num] if param_attr != False and bias_attr != False: # create parameter - scale = helper.create_parameter(attr=helper.param_attr, - shape=param_shape, - dtype=dtype, - default_initializer=Constant(1.0)) - bias = helper.create_parameter(attr=helper.bias_attr, - shape=param_shape, - dtype=dtype, - is_bias=True, - default_initializer=Constant(0.0)) + scale = helper.create_parameter( + attr=helper.param_attr, + shape=param_shape, + dtype=dtype, + default_initializer=Constant(1.0), + ) + bias = helper.create_parameter( + attr=helper.bias_attr, + shape=param_shape, + dtype=dtype, + is_bias=True, + default_initializer=Constant(0.0), + ) # create output - saved_mean = helper.create_variable_for_type_inference(dtype=dtype, - stop_gradient=True) + saved_mean = helper.create_variable_for_type_inference( + dtype=dtype, stop_gradient=True + ) saved_variance = helper.create_variable_for_type_inference( - dtype=dtype, stop_gradient=True) + dtype=dtype, stop_gradient=True + ) instance_norm_out = helper.create_variable_for_type_inference(dtype) @@ -3457,35 +3729,39 @@ def instance_norm(input, inputs["Scale"] = scale inputs["Bias"] = bias - helper.append_op(type="instance_norm", - inputs=inputs, - outputs={ - "Y": instance_norm_out, - "SavedMean": saved_mean, - "SavedVariance": saved_variance - }, - attrs={ - "epsilon": epsilon, - }) + helper.append_op( + type="instance_norm", + inputs=inputs, + outputs={ + "Y": instance_norm_out, + "SavedMean": saved_mean, + "SavedVariance": saved_variance, + }, + attrs={ + "epsilon": epsilon, + }, + ) return instance_norm_out @static_only -def data_norm(input, - act=None, - epsilon=1e-05, - param_attr=None, - data_layout='NCHW', - in_place=False, - name=None, - moving_mean_name=None, - moving_variance_name=None, - do_model_average_for_mean_and_var=True, - slot_dim=-1, - sync_stats=False, - summary_decay_rate=0.9999999, - enable_scale_and_shift=False): +def data_norm( + input, + act=None, + epsilon=1e-05, + param_attr=None, + data_layout='NCHW', + in_place=False, + name=None, + moving_mean_name=None, + moving_variance_name=None, + do_model_average_for_mean_and_var=True, + slot_dim=-1, + sync_stats=False, + summary_decay_rate=0.9999999, + enable_scale_and_shift=False, +): r""" :api_attr: Static Graph @@ -3583,39 +3859,54 @@ def data_norm(input, if name == None: name = "dn" if enable_scale_and_shift: - scale_w = helper.create_parameter(attr=ParamAttr( - name=name + '.scale_w', - initializer=Constant(value=float(scale_w_default)), - trainable=True), - shape=param_shape, - dtype=input.dtype) - bias = helper.create_parameter(attr=ParamAttr( - name=name + '.bias', - initializer=Constant(value=float(bias_default)), - trainable=True), - shape=param_shape, - dtype=input.dtype) + scale_w = helper.create_parameter( + attr=ParamAttr( + name=name + '.scale_w', + initializer=Constant(value=float(scale_w_default)), + trainable=True, + ), + shape=param_shape, + dtype=input.dtype, + ) + bias = helper.create_parameter( + attr=ParamAttr( + name=name + '.bias', + initializer=Constant(value=float(bias_default)), + trainable=True, + ), + shape=param_shape, + dtype=input.dtype, + ) # create parameter - batch_size = helper.create_parameter(attr=ParamAttr( - name=name + '.batch_size', - initializer=Constant(value=float(batch_size_default)), - trainable=True), - shape=param_shape, - dtype=input.dtype) - - batch_sum = helper.create_parameter(attr=ParamAttr( - name=name + '.batch_sum', - initializer=Constant(value=float(batch_sum_default)), - trainable=True), - shape=param_shape, - dtype=input.dtype) - - batch_square_sum = helper.create_parameter(attr=ParamAttr( - name=name + '.batch_square_sum', - initializer=Constant(value=float(batch_square_sum_default)), - trainable=True), - shape=param_shape, - dtype=input.dtype) + batch_size = helper.create_parameter( + attr=ParamAttr( + name=name + '.batch_size', + initializer=Constant(value=float(batch_size_default)), + trainable=True, + ), + shape=param_shape, + dtype=input.dtype, + ) + + batch_sum = helper.create_parameter( + attr=ParamAttr( + name=name + '.batch_sum', + initializer=Constant(value=float(batch_sum_default)), + trainable=True, + ), + shape=param_shape, + dtype=input.dtype, + ) + + batch_square_sum = helper.create_parameter( + attr=ParamAttr( + name=name + '.batch_square_sum', + initializer=Constant(value=float(batch_square_sum_default)), + trainable=True, + ), + shape=param_shape, + dtype=input.dtype, + ) means = helper.create_variable(dtype=dtype, stop_gradient=True) scales = helper.create_variable(dtype=dtype, stop_gradient=True) @@ -3626,7 +3917,7 @@ def data_norm(input, "X": input, "BatchSize": batch_size, "BatchSum": batch_sum, - "BatchSquareSum": batch_square_sum + "BatchSquareSum": batch_square_sum, } attrs = { "epsilon": epsilon, @@ -3641,31 +3932,35 @@ def data_norm(input, if enable_scale_and_shift: inputs["scale_w"] = scale_w inputs["bias"] = bias - helper.append_op(type="data_norm", - inputs=inputs, - outputs={ - "Y": data_norm_out, - "Means": means, - "Scales": scales, - "BatchSize": batch_size, - "BatchSum": batch_sum, - "BatchSquareSum": batch_square_sum - }, - attrs=attrs) + helper.append_op( + type="data_norm", + inputs=inputs, + outputs={ + "Y": data_norm_out, + "Means": means, + "Scales": scales, + "BatchSize": batch_size, + "BatchSum": batch_sum, + "BatchSquareSum": batch_square_sum, + }, + attrs=attrs, + ) return helper.append_activation(data_norm_out) @templatedoc() -def layer_norm(input, - scale=True, - shift=True, - begin_norm_axis=1, - epsilon=1e-05, - param_attr=None, - bias_attr=None, - act=None, - name=None): +def layer_norm( + input, + scale=True, + shift=True, + begin_norm_axis=1, + epsilon=1e-05, + param_attr=None, + bias_attr=None, + act=None, + name=None, +): r""" :api_attr: Static Graph @@ -3728,11 +4023,13 @@ def layer_norm(input, output = paddle.static.nn.layer_norm(input=x, begin_norm_axis=1) print(output.shape) # [8, 32, 32] """ - assert _non_static_mode( - ) is not True, "please use LayerNorm instead of layer_norm in dygraph mode!" + assert ( + _non_static_mode() is not True + ), "please use LayerNorm instead of layer_norm in dygraph mode!" helper = LayerHelper('layer_norm', **locals()) - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'layer_norm') + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'layer_norm' + ) dtype = helper.input_dtype() # create intput and parameters @@ -3740,57 +4037,65 @@ def layer_norm(input, input_shape = input.shape param_shape = [reduce(lambda x, y: x * y, input_shape[begin_norm_axis:])] if scale: - assert param_attr is not False, "param_attr should not be False when using scale." - scale = helper.create_parameter(attr=helper.param_attr, - shape=param_shape, - dtype=dtype, - default_initializer=Constant(1.0)) + assert ( + param_attr is not False + ), "param_attr should not be False when using scale." + scale = helper.create_parameter( + attr=helper.param_attr, + shape=param_shape, + dtype=dtype, + default_initializer=Constant(1.0), + ) inputs['Scale'] = scale else: if param_attr: warnings.warn("param_attr is only available with scale is True.") if shift: - assert bias_attr is not False, "bias_attr should not be False when using shift." - bias = helper.create_parameter(attr=helper.bias_attr, - shape=param_shape, - dtype=dtype, - is_bias=True) + assert ( + bias_attr is not False + ), "bias_attr should not be False when using shift." + bias = helper.create_parameter( + attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True + ) inputs['Bias'] = bias else: if bias_attr: warnings.warn("bias_attr is only available with shift is True.") # create output - mean_out = helper.create_variable_for_type_inference(dtype=dtype, - stop_gradient=True) - variance_out = helper.create_variable_for_type_inference(dtype=dtype, - stop_gradient=True) + mean_out = helper.create_variable_for_type_inference( + dtype=dtype, stop_gradient=True + ) + variance_out = helper.create_variable_for_type_inference( + dtype=dtype, stop_gradient=True + ) layer_norm_out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type="layer_norm", - inputs=inputs, - outputs={ - "Y": layer_norm_out, - "Mean": mean_out, - "Variance": variance_out, - }, - attrs={ - "epsilon": epsilon, - "begin_norm_axis": begin_norm_axis - }) + helper.append_op( + type="layer_norm", + inputs=inputs, + outputs={ + "Y": layer_norm_out, + "Mean": mean_out, + "Variance": variance_out, + }, + attrs={"epsilon": epsilon, "begin_norm_axis": begin_norm_axis}, + ) return helper.append_activation(layer_norm_out) @templatedoc() -def group_norm(input, - groups, - epsilon=1e-05, - param_attr=None, - bias_attr=None, - act=None, - data_layout='NCHW', - name=None): +def group_norm( + input, + groups, + epsilon=1e-05, + param_attr=None, + bias_attr=None, + act=None, + data_layout='NCHW', + name=None, +): """ :api_attr: Static Graph @@ -3835,8 +4140,9 @@ def group_norm(input, """ helper = LayerHelper('group_norm', **locals()) dtype = helper.input_dtype() - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'group_norm') + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'group_norm' + ) # create intput and parameters inputs = {'X': input} input_shape = input.shape @@ -3847,20 +4153,23 @@ def group_norm(input, if data_layout != 'NCHW' and data_layout != 'NHWC': raise ValueError( "Param(data_layout) of Op(fluid.layers.group_norm) got wrong value: received " - + data_layout + " but only NCHW or NHWC supported.") + + data_layout + + " but only NCHW or NHWC supported." + ) channel_num = input_shape[1] if data_layout == 'NCHW' else input_shape[-1] param_shape = [channel_num] if param_attr: - scale = helper.create_parameter(attr=helper.param_attr, - shape=param_shape, - dtype=dtype, - default_initializer=Constant(1.0)) + scale = helper.create_parameter( + attr=helper.param_attr, + shape=param_shape, + dtype=dtype, + default_initializer=Constant(1.0), + ) inputs['Scale'] = scale if bias_attr: - bias = helper.create_parameter(attr=helper.bias_attr, - shape=param_shape, - dtype=dtype, - is_bias=True) + bias = helper.create_parameter( + attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True + ) inputs['Bias'] = bias # create output @@ -3868,18 +4177,20 @@ def group_norm(input, variance_out = helper.create_variable(dtype=dtype, stop_gradient=True) group_norm_out = helper.create_variable(dtype=dtype) - helper.append_op(type="group_norm", - inputs=inputs, - outputs={ - "Y": group_norm_out, - "Mean": mean_out, - "Variance": variance_out, - }, - attrs={ - "epsilon": epsilon, - "groups": groups, - "data_layout": data_layout - }) + helper.append_op( + type="group_norm", + inputs=inputs, + outputs={ + "Y": group_norm_out, + "Mean": mean_out, + "Variance": variance_out, + }, + attrs={ + "epsilon": epsilon, + "groups": groups, + "data_layout": data_layout, + }, + ) return helper.append_activation(group_norm_out) @@ -3948,8 +4259,9 @@ def spectral_norm(weight, dim=0, power_iters=1, eps=1e-12, name=None): print(x.shape) # [2, 8, 32, 32] """ helper = LayerHelper('spectral_norm', **locals()) - check_variable_and_dtype(weight, 'weight', ['float32', 'float64'], - 'spectral_norm') + check_variable_and_dtype( + weight, 'weight', ['float32', 'float64'], 'spectral_norm' + ) check_type(dim, 'dim', int, 'spectral_norm') check_type(power_iters, 'power_iters', int, 'spectral_norm') check_type(eps, 'eps', float, 'spectral_norm') @@ -3958,21 +4270,27 @@ def spectral_norm(weight, dim=0, power_iters=1, eps=1e-12, name=None): # create intput and parameters input_shape = weight.shape assert weight.numel() > 0, "Any dimension of input cannot be equal to 0." - assert dim < len(input_shape), ("The input `dim` should be less than the " - "rank of `weight`, but received dim=" - "{}".format(dim)) + assert dim < len(input_shape), ( + "The input `dim` should be less than the " + "rank of `weight`, but received dim=" + "{}".format(dim) + ) h = input_shape[dim] w = np.prod(input_shape) // h - u = helper.create_parameter(attr=ParamAttr(), - shape=[h], - dtype=dtype, - default_initializer=Normal(0., 1.)) + u = helper.create_parameter( + attr=ParamAttr(), + shape=[h], + dtype=dtype, + default_initializer=Normal(0.0, 1.0), + ) u.stop_gradient = True - v = helper.create_parameter(attr=ParamAttr(), - shape=[w], - dtype=dtype, - default_initializer=Normal(0., 1.)) + v = helper.create_parameter( + attr=ParamAttr(), + shape=[w], + dtype=dtype, + default_initializer=Normal(0.0, 1.0), + ) v.stop_gradient = True if in_dygraph_mode(): @@ -3985,34 +4303,38 @@ def spectral_norm(weight, dim=0, power_iters=1, eps=1e-12, name=None): # create output out = helper.create_variable(dtype=dtype) - helper.append_op(type="spectral_norm", - inputs=inputs, - outputs={ - "Out": out, - }, - attrs={ - "dim": dim, - "power_iters": power_iters, - "eps": eps, - }) + helper.append_op( + type="spectral_norm", + inputs=inputs, + outputs={ + "Out": out, + }, + attrs={ + "dim": dim, + "power_iters": power_iters, + "eps": eps, + }, + ) return out -def conv2d_transpose(input, - num_filters, - output_size=None, - filter_size=None, - padding=0, - stride=1, - dilation=1, - groups=None, - param_attr=None, - bias_attr=None, - use_cudnn=True, - act=None, - name=None, - data_format='NCHW'): +def conv2d_transpose( + input, + num_filters, + output_size=None, + filter_size=None, + padding=0, + stride=1, + dilation=1, + groups=None, + param_attr=None, + bias_attr=None, + use_cudnn=True, + act=None, + name=None, + data_format='NCHW', +): r""" :api_attr: Static Graph @@ -4168,20 +4490,29 @@ def conv2d_transpose(input, conv2d_transpose = paddle.static.nn.conv2d_transpose(input=data, num_filters=2, filter_size=3) print(conv2d_transpose.shape) # [-1, 2, 34, 34] """ - assert param_attr is not False, "param_attr should not be False in conv2d_transpose." + assert ( + param_attr is not False + ), "param_attr should not be False in conv2d_transpose." if len(input.shape) != 4: - raise ValueError("Input size should be 4, " - "but received {}".format(len(input.shape))) + raise ValueError( + "Input size should be 4, " + "but received {}".format(len(input.shape)) + ) if data_format not in ['NCHW', 'NHWC']: raise ValueError( "Attr(data_format) of Op(fluid.layers.conv2d_transpose) got wrong value: received " - + data_format + " but only NCHW or NHWC supported.") + + data_format + + " but only NCHW or NHWC supported." + ) input_channel = input.shape[1] if data_format == 'NCHW' else input.shape[-1] op_type = 'conv2d_transpose' - if (input_channel == groups and num_filters == input_channel - and not use_cudnn): + if ( + input_channel == groups + and num_filters == input_channel + and not use_cudnn + ): op_type = 'depthwise_conv2d_transpose' helper = LayerHelper(op_type, **locals()) @@ -4195,7 +4526,6 @@ def conv2d_transpose(input, raise ValueError("use_cudnn should be True or False") def _update_padding(padding, data_format): - def is_list_or_tuple(ele): if isinstance(ele, list) or isinstance(ele, tuple): return True @@ -4206,14 +4536,16 @@ def conv2d_transpose(input, if not (padding[0] == [0, 0] and padding[1] == [0, 0]): raise ValueError( "Non-zero padding(%s) in the batch or channel dimensions " - "is not supported." % str(padding)) + "is not supported." % str(padding) + ) padding = padding[2:4] padding = [ele for a_list in padding for ele in a_list] elif is_list_or_tuple(padding[0]) and (data_format == "NHWC"): if not (padding[0] == [0, 0] and padding[3] == [0, 0]): raise ValueError( "Non-zero padding(%s) in the batch or channel dimensions " - "is not supported." % str(padding)) + "is not supported." % str(padding) + ) padding = padding[1:3] padding = [ele for a_list in padding for ele in a_list] padding = utils.convert_to_list(padding, 4, 'padding') @@ -4227,8 +4559,9 @@ def conv2d_transpose(input, padding = padding.upper() if padding not in ["SAME", "VALID"]: raise ValueError( - "Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." % - str(padding)) + "Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." + % str(padding) + ) if padding == "VALID": padding_algorithm = "VALID" padding = [0, 0, 0, 0] @@ -4248,44 +4581,63 @@ def conv2d_transpose(input, elif isinstance(output_size, int): output_size = utils.convert_to_list(output_size, 2, 'output_size') elif isinstance(output_size, Variable): - check_dtype(output_size.dtype, 'output_size', ['int32', 'int64'], - 'conv2d_transpose') - if len(output_size.shape) == 1 and (output_size.shape[0] == 1 - or output_size.shape[0] == 2): + check_dtype( + output_size.dtype, + 'output_size', + ['int32', 'int64'], + 'conv2d_transpose', + ) + if len(output_size.shape) == 1 and ( + output_size.shape[0] == 1 or output_size.shape[0] == 2 + ): if output_size.shape[0] == 1: output_size = [output_size, output_size] else: raise ValueError("output_size must contain one or two integers.") else: raise ValueError( - "output_size should be int, list[int] or tuple[int] or Tensor") + "output_size should be int, list[int] or tuple[int] or Tensor" + ) if filter_size is None: if output_size is []: raise ValueError("output_size must be set when filter_size is None") if not _non_static_mode(): - if isinstance(output_size, - Variable) or utils._contain_var(output_size): + if isinstance(output_size, Variable) or utils._contain_var( + output_size + ): raise ValueError( "filter_size should not be None when output_size is Variable or contain Variable in static mode." ) else: output_size = utils.convert_shape_to_list(output_size) if len(output_size) == 1: - output_size = utils.convert_to_list(output_size[0], 2, - 'output_size') + output_size = utils.convert_to_list( + output_size[0], 2, 'output_size' + ) h_in = input.shape[2] if data_format == 'NCHW' else input.shape[1] w_in = input.shape[3] if data_format == 'NCHW' else input.shape[2] - filter_size_h = (output_size[0] - (h_in - 1) * stride[0] + padding[0] + - padding[1] - 1) // dilation[0] + 1 - filter_size_w = (output_size[1] - (w_in - 1) * stride[1] + padding[2] + - padding[3] - 1) // dilation[1] + 1 + filter_size_h = ( + output_size[0] + - (h_in - 1) * stride[0] + + padding[0] + + padding[1] + - 1 + ) // dilation[0] + 1 + filter_size_w = ( + output_size[1] + - (w_in - 1) * stride[1] + + padding[2] + + padding[3] + - 1 + ) // dilation[1] + 1 filter_size = [filter_size_h, filter_size_w] else: - filter_size = utils.convert_to_list(filter_size, 2, - 'conv2d_transpose.filter_size') + filter_size = utils.convert_to_list( + filter_size, 2, 'conv2d_transpose.filter_size' + ) if len(padding) == 4 and utils._is_symmetric_padding(padding, 2): padding = [padding[0], padding[2]] @@ -4295,31 +4647,31 @@ def conv2d_transpose(input, elif groups <= 0: raise ValueError( "the groups of input must be greater than 0, " - "but received the groups of input is {}".format(groups)) + "but received the groups of input is {}".format(groups) + ) filter_shape = [input_channel, num_filters // groups] + filter_size - img_filter = helper.create_parameter(dtype=input.dtype, - shape=filter_shape, - attr=helper.param_attr) + img_filter = helper.create_parameter( + dtype=input.dtype, shape=filter_shape, attr=helper.param_attr + ) pre_bias = helper.create_variable_for_type_inference(dtype=input.dtype) - helper.append_op(type=op_type, - inputs={ - 'Input': [input], - 'Filter': [img_filter] - }, - outputs={'Output': pre_bias}, - attrs={ - 'output_size': output_size, - 'strides': stride, - 'paddings': padding, - 'padding_algorithm': padding_algorithm, - 'dilations': dilation, - 'groups': groups, - 'use_cudnn': use_cudnn, - 'data_format': data_format - }) + helper.append_op( + type=op_type, + inputs={'Input': [input], 'Filter': [img_filter]}, + outputs={'Output': pre_bias}, + attrs={ + 'output_size': output_size, + 'strides': stride, + 'paddings': padding, + 'padding_algorithm': padding_algorithm, + 'dilations': dilation, + 'groups': groups, + 'use_cudnn': use_cudnn, + 'data_format': data_format, + }, + ) if data_format == 'NCHW': pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2) @@ -4329,20 +4681,22 @@ def conv2d_transpose(input, return out -def conv3d_transpose(input, - num_filters, - output_size=None, - filter_size=None, - padding=0, - stride=1, - dilation=1, - groups=None, - param_attr=None, - bias_attr=None, - use_cudnn=True, - act=None, - name=None, - data_format='NCDHW'): +def conv3d_transpose( + input, + num_filters, + output_size=None, + filter_size=None, + padding=0, + stride=1, + dilation=1, + groups=None, + param_attr=None, + bias_attr=None, + use_cudnn=True, + act=None, + name=None, + data_format='NCDHW', +): r""" :api_attr: Static Graph @@ -4506,11 +4860,15 @@ def conv3d_transpose(input, output = exe.run(feed={"data": x}, fetch_list=[res]) print(output) """ - assert param_attr is not False, "param_attr should not be False in conv3d_transpose." + assert ( + param_attr is not False + ), "param_attr should not be False in conv3d_transpose." if data_format not in ['NCDHW', 'NDHWC']: raise ValueError( "Param(data_format) of Op(fluid.layers.conv3d_transpose) got wrong value: received " - + data_format + " but only NCDHW or NDHWC supported.") + + data_format + + " but only NCDHW or NDHWC supported." + ) l_type = "conv3d_transpose" helper = LayerHelper(l_type, **locals()) @@ -4518,9 +4876,13 @@ def conv3d_transpose(input, raise TypeError("Input of conv3d_transpose must be Variable") if len(input.shape) != 5: raise ValueError( - "Input should be 5D tensor, but received input with the shape of {}" - .format(input.shape)) - input_channel = input.shape[1] if data_format == 'NCDHW' else input.shape[-1] + "Input should be 5D tensor, but received input with the shape of {}".format( + input.shape + ) + ) + input_channel = ( + input.shape[1] if data_format == 'NCDHW' else input.shape[-1] + ) stride = utils.convert_to_list(stride, 3, 'stride') dilation = utils.convert_to_list(dilation, 3, 'dilation') @@ -4529,7 +4891,6 @@ def conv3d_transpose(input, raise ValueError("use_cudnn should be True or False") def _update_padding(padding, data_format): - def is_list_or_tuple(ele): if isinstance(ele, list) or isinstance(ele, tuple): return True @@ -4540,14 +4901,16 @@ def conv3d_transpose(input, if not (padding[0] == [0, 0] and padding[1] == [0, 0]): raise ValueError( "Non-zero padding(%s) in the batch or channel dimensions " - "is not supported." % str(padding)) + "is not supported." % str(padding) + ) padding = padding[2:5] padding = [ele for a_list in padding for ele in a_list] elif is_list_or_tuple(padding[0]) and (data_format == "NDHWC"): if not (padding[0] == [0, 0] and padding[4] == [0, 0]): raise ValueError( "Non-zero padding(%s) in the batch or channel dimensions " - "is not supported." % str(padding)) + "is not supported." % str(padding) + ) padding = padding[1:4] padding = [ele for a_list in padding for ele in a_list] padding = utils.convert_to_list(padding, 6, 'padding') @@ -4558,8 +4921,12 @@ def conv3d_transpose(input, else: padding = utils.convert_to_list(padding, 3, 'padding') padding = [ - padding[0], padding[0], padding[1], padding[1], padding[2], - padding[2] + padding[0], + padding[0], + padding[1], + padding[1], + padding[2], + padding[2], ] return padding @@ -4568,8 +4935,9 @@ def conv3d_transpose(input, padding = padding.upper() if padding not in ["SAME", "VALID"]: raise ValueError( - "Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." % - str(padding)) + "Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." + % str(padding) + ) if padding == "VALID": padding_algorithm = "VALID" padding = [0, 0, 0, 0, 0, 0] @@ -4589,16 +4957,32 @@ def conv3d_transpose(input, h_in = input.shape[3] if data_format == 'NCDHW' else input.shape[2] w_in = input.shape[4] if data_format == 'NCDHW' else input.shape[3] - filter_size_d = (output_size[0] - (d_in - 1) * stride[0] + padding[0] + - padding[1] - 1) // dilation[0] + 1 - filter_size_h = (output_size[1] - (h_in - 1) * stride[1] + padding[2] + - padding[3] - 1) // dilation[1] + 1 - filter_size_w = (output_size[2] - (w_in - 1) * stride[2] + padding[4] + - padding[5] - 1) // dilation[2] + 1 + filter_size_d = ( + output_size[0] + - (d_in - 1) * stride[0] + + padding[0] + + padding[1] + - 1 + ) // dilation[0] + 1 + filter_size_h = ( + output_size[1] + - (h_in - 1) * stride[1] + + padding[2] + + padding[3] + - 1 + ) // dilation[1] + 1 + filter_size_w = ( + output_size[2] + - (w_in - 1) * stride[2] + + padding[4] + + padding[5] + - 1 + ) // dilation[2] + 1 filter_size = [filter_size_d, filter_size_h, filter_size_w] else: - filter_size = utils.convert_to_list(filter_size, 3, - 'conv3d_transpose.filter_size') + filter_size = utils.convert_to_list( + filter_size, 3, 'conv3d_transpose.filter_size' + ) if len(padding) == 6 and utils._is_symmetric_padding(padding, 3): padding = [padding[0], padding[2], padding[4]] @@ -4613,18 +4997,22 @@ def conv3d_transpose(input, groups = 1 if groups is None else groups if groups <= 0: raise ValueError( - "the groups of conv3d_transpose should be greater than 0. Received groups: {}" - .format(groups)) + "the groups of conv3d_transpose should be greater than 0. Received groups: {}".format( + groups + ) + ) if num_filters % groups != 0: raise ValueError( "Attr(num_filters) must be divisible by groups," "Received: Attr(num_filters) is {}, the groups is {}".format( - num_filters, groups)) + num_filters, groups + ) + ) filter_shape = [input_channel, num_filters // groups] + filter_size - img_filter = helper.create_parameter(dtype=input.dtype, - shape=filter_shape, - attr=helper.param_attr) + img_filter = helper.create_parameter( + dtype=input.dtype, shape=filter_shape, attr=helper.param_attr + ) if data_format == 'NCDHW': data_format = 'NCHW' @@ -4632,22 +5020,21 @@ def conv3d_transpose(input, data_format = 'NHWC' pre_bias = helper.create_variable_for_type_inference(dtype=input.dtype) - helper.append_op(type=l_type, - inputs={ - 'Input': [input], - 'Filter': [img_filter] - }, - outputs={'Output': pre_bias}, - attrs={ - 'output_size': output_size, - 'strides': stride, - 'paddings': padding, - 'padding_algorithm': padding_algorithm, - 'dilations': dilation, - 'groups': groups, - 'use_cudnn': use_cudnn, - 'data_format': data_format - }) + helper.append_op( + type=l_type, + inputs={'Input': [input], 'Filter': [img_filter]}, + outputs={'Output': pre_bias}, + attrs={ + 'output_size': output_size, + 'strides': stride, + 'paddings': padding, + 'padding_algorithm': padding_algorithm, + 'dilations': dilation, + 'groups': groups, + 'use_cudnn': use_cudnn, + 'data_format': data_format, + }, + ) if data_format == 'NCHW': pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2) @@ -4717,18 +5104,24 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None): if in_dygraph_mode(): return _C_ops.sum(input, dim, None, keep_dim) elif _in_legacy_dygraph(): - return _legacy_C_ops.reduce_sum(input, 'dim', dim, 'keep_dim', keep_dim, - 'reduce_all', reduce_all) + return _legacy_C_ops.reduce_sum( + input, 'dim', dim, 'keep_dim', keep_dim, 'reduce_all', reduce_all + ) attrs = {'dim': dim, 'keep_dim': keep_dim, 'reduce_all': reduce_all} check_variable_and_dtype( - input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], - 'reduce_sum') + input, + 'input', + ['float16', 'float32', 'float64', 'int32', 'int64'], + 'reduce_sum', + ) helper = LayerHelper('reduce_sum', **locals()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) - helper.append_op(type='reduce_sum', - inputs={'X': input}, - outputs={'Out': out}, - attrs=attrs) + helper.append_op( + type='reduce_sum', + inputs={'X': input}, + outputs={'Out': out}, + attrs=attrs, + ) return out @@ -4846,18 +5239,18 @@ def reduce_max(input, dim=None, keep_dim=False, name=None): if in_dygraph_mode(): return _C_ops.max(input, dim if dim != None else [], keep_dim) - helper.append_op(type='reduce_max', - inputs={'X': input}, - outputs={'Out': out}, - attrs={ - 'dim': - dim if dim != None and dim != [] else [0], - 'keep_dim': - keep_dim, - 'reduce_all': - True if dim == None or dim == [] - or len(dim) == len(input.shape) else False - }) + helper.append_op( + type='reduce_max', + inputs={'X': input}, + outputs={'Out': out}, + attrs={ + 'dim': dim if dim != None and dim != [] else [0], + 'keep_dim': keep_dim, + 'reduce_all': True + if dim == None or dim == [] or len(dim) == len(input.shape) + else False, + }, + ) return out @@ -4918,18 +5311,18 @@ def reduce_min(input, dim=None, keep_dim=False, name=None): if in_dygraph_mode(): return _C_ops.min(input, dim if dim != None else [], keep_dim) - helper.append_op(type='reduce_min', - inputs={'X': input}, - outputs={'Out': out}, - attrs={ - 'dim': - dim if dim != None and dim != [] else [0], - 'keep_dim': - keep_dim, - 'reduce_all': - True if dim == None or dim == [] - or len(dim) == len(input.shape) else False - }) + helper.append_op( + type='reduce_min', + inputs={'X': input}, + outputs={'Out': out}, + attrs={ + 'dim': dim if dim != None and dim != [] else [0], + 'keep_dim': keep_dim, + 'reduce_all': True + if dim == None or dim == [] or len(dim) == len(input.shape) + else False, + }, + ) return out @@ -4990,30 +5383,37 @@ def reduce_prod(input, dim=None, keep_dim=False, name=None): dim = [dim] else: raise TypeError( - "The type of axis must be int, list or tuple, but received {}". - format(type(dim))) + "The type of axis must be int, list or tuple, but received {}".format( + type(dim) + ) + ) if in_dygraph_mode(): return _C_ops.reduce_prod( - input, dim if dim != None and dim != [] else [0], keep_dim, True if - dim == None or dim == [] or len(dim) == len(input.shape) else False) + input, + dim if dim != None and dim != [] else [0], + keep_dim, + True + if dim == None or dim == [] or len(dim) == len(input.shape) + else False, + ) helper = LayerHelper('reduce_prod', **locals()) - check_variable_and_dtype(input, 'input', - ['float32', 'float64', 'int32', 'int64'], - 'reduce_prod') + check_variable_and_dtype( + input, 'input', ['float32', 'float64', 'int32', 'int64'], 'reduce_prod' + ) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) - helper.append_op(type='reduce_prod', - inputs={'X': input}, - outputs={'Out': out}, - attrs={ - 'dim': - dim if dim != None and dim != [] else [0], - 'keep_dim': - keep_dim, - 'reduce_all': - True if dim == None or dim == [] - or len(dim) == len(input.shape) else False - }) + helper.append_op( + type='reduce_prod', + inputs={'X': input}, + outputs={'Out': out}, + attrs={ + 'dim': dim if dim != None and dim != [] else [0], + 'keep_dim': keep_dim, + 'reduce_all': True + if dim == None or dim == [] or len(dim) == len(input.shape) + else False, + }, + ) return out @@ -5070,18 +5470,18 @@ def reduce_all(input, dim=None, keep_dim=False, name=None): check_variable_and_dtype(input, 'input', ('bool'), 'reduce_all') helper = LayerHelper('reduce_all', **locals()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) - helper.append_op(type='reduce_all', - inputs={'X': input}, - outputs={'Out': out}, - attrs={ - 'dim': - dim if dim != None and dim != [] else [0], - 'keep_dim': - keep_dim, - 'reduce_all': - True if dim == None or dim == [] - or len(dim) == len(input.shape) else False - }) + helper.append_op( + type='reduce_all', + inputs={'X': input}, + outputs={'Out': out}, + attrs={ + 'dim': dim if dim != None and dim != [] else [0], + 'keep_dim': keep_dim, + 'reduce_all': True + if dim == None or dim == [] or len(dim) == len(input.shape) + else False, + }, + ) return out @@ -5133,18 +5533,18 @@ def reduce_any(input, dim=None, keep_dim=False, name=None): out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) if dim is not None and not isinstance(dim, list): dim = [dim] - helper.append_op(type='reduce_any', - inputs={'X': input}, - outputs={'Out': out}, - attrs={ - 'dim': - dim if dim != None and dim != [] else [0], - 'keep_dim': - keep_dim, - 'reduce_all': - True if dim == None or dim == [] - or len(dim) == len(input.shape) else False - }) + helper.append_op( + type='reduce_any', + inputs={'X': input}, + outputs={'Out': out}, + attrs={ + 'dim': dim if dim != None and dim != [] else [0], + 'keep_dim': keep_dim, + 'reduce_all': True + if dim == None or dim == [] or len(dim) == len(input.shape) + else False, + }, + ) return out @@ -5219,15 +5619,17 @@ def split(input, num_or_sections, dim=-1, name=None): if utils._contain_var(num_or_sections): for index, item in enumerate(num_or_sections): if isinstance(item, Variable): - num_or_sections[index] = num_or_sections[index].numpy( - )[0] + num_or_sections[index] = num_or_sections[index].numpy()[ + 0 + ] attrs += ('sections', list(num_or_sections)) else: attrs += ('sections', list(num_or_sections)) else: raise TypeError( "The type of 'num_or_sections' in split must be int, list or tuple in imperative mode, but " - "received %s." % (type(num_or_sections))) + "received %s." % (type(num_or_sections)) + ) if in_dygraph_mode(): if isinstance(num_or_sections, int): return _C_ops.split_with_num(input, num_or_sections, dim) @@ -5239,8 +5641,11 @@ def split(input, num_or_sections, dim=-1, name=None): return out check_variable_and_dtype( - input, 'input', - ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], 'split') + input, + 'input', + ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], + 'split', + ) check_type(num_or_sections, 'num_or_sections', (list, int, tuple), 'split') check_type(dim, 'dim', (int, Variable), 'split') if isinstance(dim, Variable): @@ -5260,19 +5665,18 @@ def split(input, num_or_sections, dim=-1, name=None): dim_size.stop_gradient = True tensor_list.append(dim_size) else: - assert (isinstance(dim_size, int)) + assert isinstance(dim_size, int) if dim_size == -1: assert unk_dim_idx == -1, ( "Only one value of 'num_or_section' in split can " - "be -1. But received num_or_section[%d] is also -1." % - idx) + "be -1. But received num_or_section[%d] is also -1." + % idx + ) unk_dim_idx = idx temp_out = helper.create_variable_for_type_inference('int32') - fill_constant([1], - 'int32', - dim_size, - force_cpu=True, - out=temp_out) + fill_constant( + [1], 'int32', dim_size, force_cpu=True, out=temp_out + ) tensor_list.append(temp_out) return tensor_list @@ -5287,31 +5691,37 @@ def split(input, num_or_sections, dim=-1, name=None): if isinstance(num_or_sections, int): assert num_or_sections > 1, 'num_or_sections must be more than 1.' if isinstance(dim, int) and input_shape[dim] > 0: - assert input_shape[dim] % num_or_sections ==0, \ - "The input's size along the split dimension " \ - "must be evenly divisible by Attr(num_or_sections). " \ - "But %d is not evenly divisible by %d. " % (num_or_sections,input_shape[dim]) + assert input_shape[dim] % num_or_sections == 0, ( + "The input's size along the split dimension " + "must be evenly divisible by Attr(num_or_sections). " + "But %d is not evenly divisible by %d. " + % (num_or_sections, input_shape[dim]) + ) num = num_or_sections else: if isinstance(dim, int) and input_shape[dim] > 0: - assert len(num_or_sections) <= input_shape[ - dim], 'len(num_or_sections) must not be more than input.shape[dim].' + assert ( + len(num_or_sections) <= input_shape[dim] + ), 'len(num_or_sections) must not be more than input.shape[dim].' num = len(num_or_sections) attrs['sections'] = list( - map(lambda ele: -1 - if isinstance(ele, Variable) else ele, num_or_sections)) + map( + lambda ele: -1 if isinstance(ele, Variable) else ele, + num_or_sections, + ) + ) if utils._contain_var(num_or_sections): inputs['SectionsTensorList'] = _get_SectionsTensorList( - num_or_sections) + num_or_sections + ) outs = [ helper.create_variable_for_type_inference(dtype=helper.input_dtype()) for i in range(num) ] - helper.append_op(type='split', - inputs=inputs, - outputs={'Out': outs}, - attrs=attrs) + helper.append_op( + type='split', inputs=inputs, outputs={'Out': outs}, attrs=attrs + ) return outs @@ -5362,8 +5772,9 @@ def l2_normalize(x, axis, epsilon=1e-12, name=None): if in_dygraph_mode(): out, _ = _C_ops.norm(x, 1 if axis is None else axis, epsilon, False) elif _in_legacy_dygraph(): - _, out = _legacy_C_ops.norm(x, 'axis', 1 if axis is None else axis, - 'epsilon', epsilon) + _, out = _legacy_C_ops.norm( + x, 'axis', 1 if axis is None else axis, 'epsilon', epsilon + ) return out check_variable_and_dtype(x, "X", ("float16", "float32", "float64"), "norm") @@ -5371,16 +5782,15 @@ def l2_normalize(x, axis, epsilon=1e-12, name=None): helper = LayerHelper("l2_normalize", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) norm = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type="norm", - inputs={"X": x}, - outputs={ - "Out": out, - "Norm": norm - }, - attrs={ - "axis": 1 if axis is None else axis, - "epsilon": epsilon, - }) + helper.append_op( + type="norm", + inputs={"X": x}, + outputs={"Out": out, "Norm": norm}, + attrs={ + "axis": 1 if axis is None else axis, + "epsilon": epsilon, + }, + ) return out @@ -5461,16 +5871,25 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None): """ if _non_static_mode(): out = _varbase_creator(dtype=x.dtype) - _legacy_C_ops.matmul(x, y, out, 'transpose_X', transpose_x, - 'transpose_Y', transpose_y, 'alpha', float(alpha)) + _legacy_C_ops.matmul( + x, + y, + out, + 'transpose_X', + transpose_x, + 'transpose_Y', + transpose_y, + 'alpha', + float(alpha), + ) return out def __check_input(x, y): var_names = {'x': x, 'y': y} for name, val in var_names.items(): - check_variable_and_dtype(val, name, - ['float16', 'float32', 'float64'], - 'matmul') + check_variable_and_dtype( + val, name, ['float16', 'float32', 'float64'], 'matmul' + ) x_shape = list(x.shape) y_shape = list(y.shape) if len(x_shape) == 1: @@ -5484,11 +5903,12 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None): if transpose_y: y_shape[-2], y_shape[-1] = y_shape[-1], y_shape[-2] if x_shape[-1] != y_shape[-2]: - assert (x_shape[-1] == -1) or (y_shape[-2] == -1), \ - "After performing an optional transpose, Input X's width should be " \ - "equal to Y's width for multiplication " \ - "prerequisites. But received X's shape: %s, Y's shape: %s\n" % \ - (x_shape, y_shape) + assert (x_shape[-1] == -1) or (y_shape[-2] == -1), ( + "After performing an optional transpose, Input X's width should be " + "equal to Y's width for multiplication " + "prerequisites. But received X's shape: %s, Y's shape: %s\n" + % (x_shape, y_shape) + ) if len(y_shape) > 2 and len(x_shape) > 2: for i, dim_x in enumerate(x_shape[:-2]): @@ -5500,7 +5920,8 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None): "When the matrix is larger than 2 dimensions, the higher " "dimensional values of the two matrices need to be equal. " "But received x_shape[%d] != y_shape[%d]. X's shape: %s, " - "Y's shape: %s.\n" % (i, i, x_shape, y_shape)) + "Y's shape: %s.\n" % (i, i, x_shape, y_shape) + ) attrs = { 'transpose_X': transpose_x, @@ -5512,21 +5933,20 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None): helper = LayerHelper('matmul', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='matmul', - inputs={ - 'X': x, - 'Y': y - }, - outputs={'Out': out}, - attrs=attrs) + helper.append_op( + type='matmul', + inputs={'X': x, 'Y': y}, + outputs={'Out': out}, + attrs=attrs, + ) return out def topk(input, k, name=None): """ :alias_main: paddle.topk - :alias: paddle.topk,paddle.tensor.topk,paddle.tensor.search.topk - :old_api: paddle.fluid.layers.topk + :alias: paddle.topk,paddle.tensor.topk,paddle.tensor.search.topk + :old_api: paddle.fluid.layers.topk This OP is used to find values and indices of the k largest entries for the last dimension. @@ -5611,23 +6031,20 @@ def topk(input, k, name=None): values = helper.create_variable_for_type_inference(dtype=input.dtype) indices = helper.create_variable_for_type_inference(dtype="int64") - helper.append_op(type="top_k", - inputs=inputs, - outputs={ - "Out": [values], - "Indices": [indices] - }, - attrs=attrs) + helper.append_op( + type="top_k", + inputs=inputs, + outputs={"Out": [values], "Indices": [indices]}, + attrs=attrs, + ) values.stop_gradient = True indices.stop_gradient = True return values, indices -def ctc_greedy_decoder(input, - blank, - input_length=None, - padding_value=0, - name=None): +def ctc_greedy_decoder( + input, blank, input_length=None, padding_value=0, name=None +): r""" This op is used to decode sequences by greedy policy by the following steps: @@ -5753,8 +6170,9 @@ def ctc_greedy_decoder(input, input_length=x_pad_len) """ - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'ctc_greedy_decoder') + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'ctc_greedy_decoder' + ) helper = LayerHelper("ctc_greedy_decoder", **locals()) _, topk_indices = topk(input, k=1) @@ -5763,32 +6181,27 @@ def ctc_greedy_decoder(input, ctc_out = helper.create_variable_for_type_inference(dtype="int64") if input_length is None: - helper.append_op(type="ctc_align", - inputs={"Input": [topk_indices]}, - outputs={"Output": [ctc_out]}, - attrs={ - "merge_repeated": True, - "blank": blank - }) + helper.append_op( + type="ctc_align", + inputs={"Input": [topk_indices]}, + outputs={"Output": [ctc_out]}, + attrs={"merge_repeated": True, "blank": blank}, + ) return ctc_out else: ctc_out_len = helper.create_variable_for_type_inference(dtype="int64") ctc_input = squeeze(topk_indices, [2]) - helper.append_op(type="ctc_align", - inputs={ - "Input": [ctc_input], - "InputLength": [input_length] - }, - outputs={ - "Output": [ctc_out], - "OutputLength": [ctc_out_len] - }, - attrs={ - "merge_repeated": True, - "blank": blank, - "padding_value": padding_value - }) + helper.append_op( + type="ctc_align", + inputs={"Input": [ctc_input], "InputLength": [input_length]}, + outputs={"Output": [ctc_out], "OutputLength": [ctc_out_len]}, + attrs={ + "merge_repeated": True, + "blank": blank, + "padding_value": padding_value, + }, + ) return ctc_out, ctc_out_len @@ -5849,10 +6262,21 @@ def transpose(x, perm, name=None): out, _ = _legacy_C_ops.transpose2(x, 'axis', perm) return out - check_variable_and_dtype(x, 'x', [ - 'bool', 'float16', 'float32', 'float64', 'int32', 'int64', 'complex64', - 'complex128' - ], 'transpose') + check_variable_and_dtype( + x, + 'x', + [ + 'bool', + 'float16', + 'float32', + 'float64', + 'int32', + 'int64', + 'complex64', + 'complex128', + ], + 'transpose', + ) check_type(perm, 'perm', (list, tuple), 'transpose') if isinstance(perm, tuple): perm = list(perm) @@ -5861,34 +6285,37 @@ def transpose(x, perm, name=None): "Input(perm) is the permutation of dimensions of Input(x), " "its length should be equal to dimensions of Input(x), " "but received dimension of Input(x) is %s, " - "the length of Input(perm) is %s." % (len(x.shape), len(perm))) + "the length of Input(perm) is %s." % (len(x.shape), len(perm)) + ) for idx, dim in enumerate(perm): if dim >= len(x.shape): raise ValueError( "Each element in Input(perm) should be less than Input(x)'s dimension, " "but %d-th element in Input(perm) is %d which exceeds Input(x)'s " - "dimension %d." % (idx, perm[idx], len(x.shape))) + "dimension %d." % (idx, perm[idx], len(x.shape)) + ) helper = LayerHelper('transpose', **locals()) out = helper.create_variable_for_type_inference(x.dtype) x_shape = helper.create_variable_for_type_inference(x.dtype) - helper.append_op(type='transpose2', - inputs={'X': [x]}, - outputs={ - 'Out': [out], - 'XShape': [x_shape] - }, - attrs={'axis': perm}) + helper.append_op( + type='transpose2', + inputs={'X': [x]}, + outputs={'Out': [out], 'XShape': [x_shape]}, + attrs={'axis': perm}, + ) return out -def im2sequence(input, - filter_size=1, - stride=1, - padding=0, - input_image_size=None, - out_stride=1, - name=None): +def im2sequence( + input, + filter_size=1, + stride=1, + padding=0, + input_image_size=None, + out_stride=1, + name=None, +): r""" :api_attr: Static Graph @@ -6002,8 +6429,9 @@ def im2sequence(input, """ - assert not _non_static_mode(), ( - "sequence layer is not supported in dygraph mode yet.") + assert ( + not _non_static_mode() + ), "sequence layer is not supported in dygraph mode yet." check_variable_and_dtype(input, 'input', ['float32'], 'im2sequence') @@ -6025,10 +6453,9 @@ def im2sequence(input, attrs["out_stride"] = out_stride helper = LayerHelper('im2sequence', **locals()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) - helper.append_op(type='im2sequence', - inputs=inputs, - outputs={'Out': out}, - attrs=attrs) + helper.append_op( + type='im2sequence', inputs=inputs, outputs={'Out': out}, attrs=attrs + ) return out @@ -6068,16 +6495,15 @@ def row_conv(input, future_context_size, param_attr=None, act=None): check_variable_and_dtype(input, 'input', ['float32'], 'row_conv') dtype = helper.input_dtype() filter_shape = [future_context_size + 1, input.shape[-1]] - filter_param = helper.create_parameter(attr=helper.param_attr, - shape=filter_shape, - dtype=dtype) + filter_param = helper.create_parameter( + attr=helper.param_attr, shape=filter_shape, dtype=dtype + ) out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='row_conv', - inputs={ - 'X': [input], - 'Filter': [filter_param] - }, - outputs={'Out': [out]}) + helper.append_op( + type='row_conv', + inputs={'X': [input], 'Filter': [filter_param]}, + outputs={'Out': [out]}, + ) return helper.append_activation(out) @@ -6143,20 +6569,23 @@ def multiplex(inputs, index, name=None): check_type(inputs, 'inputs', (list), 'multiplex') if len(inputs) < 2: raise ValueError( - "inputs should be a list object with at least 2 elements.") + "inputs should be a list object with at least 2 elements." + ) for id, x in enumerate(inputs): - check_variable_and_dtype(x, 'input[' + str(id) + ']', - ['float32', 'float64', 'int32', 'int64'], - 'multiplex') + check_variable_and_dtype( + x, + 'input[' + str(id) + ']', + ['float32', 'float64', 'int32', 'int64'], + 'multiplex', + ) check_variable_and_dtype(index, "index", ['int32', 'int64'], 'multiplex') out = helper.create_variable_for_type_inference(inputs[0].dtype) - helper.append_op(type='multiplex', - inputs={ - 'X': inputs, - 'Ids': index - }, - outputs={'Out': [out]}) + helper.append_op( + type='multiplex', + inputs={'X': inputs, 'Ids': index}, + outputs={'Out': [out]}, + ) return out @@ -6223,18 +6652,17 @@ def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None): diff = helper.create_variable_for_type_inference(dtype=x.dtype) loss = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='smooth_l1_loss', - inputs={ - 'X': x, - 'Y': y, - 'InsideWeight': inside_weight, - 'OutsideWeight': outside_weight - }, - outputs={ - 'Diff': diff, - 'Out': loss - }, - attrs={'sigma': sigma if sigma is not None else 1.0}) + helper.append_op( + type='smooth_l1_loss', + inputs={ + 'X': x, + 'Y': y, + 'InsideWeight': inside_weight, + 'OutsideWeight': outside_weight, + }, + outputs={'Diff': diff, 'Out': loss}, + attrs={'sigma': sigma if sigma is not None else 1.0}, + ) return loss @@ -6326,10 +6754,12 @@ def one_hot(input, depth, allow_out_of_range=False): if isinstance(depth, Variable): depth = depth.numpy() assert depth.shape == ( - 1, ), "depth of type Variable should have shape [1]" + 1, + ), "depth of type Variable should have shape [1]" depth = depth.item(0) - out = _legacy_C_ops.one_hot(input, 'depth', depth, 'allow_out_of_range', - allow_out_of_range) + out = _legacy_C_ops.one_hot( + input, 'depth', depth, 'allow_out_of_range', allow_out_of_range + ) out.stop_gradient = True return out @@ -6346,10 +6776,9 @@ def one_hot(input, depth, allow_out_of_range=False): depth.stop_gradient = True inputs = {'X': input, 'depth_tensor': depth} attrs = {'allow_out_of_range': allow_out_of_range} - helper.append_op(type="one_hot", - inputs=inputs, - attrs=attrs, - outputs={'Out': one_hot_out}) + helper.append_op( + type="one_hot", inputs=inputs, attrs=attrs, outputs={'Out': one_hot_out} + ) one_hot_out.stop_gradient = True return one_hot_out @@ -6387,16 +6816,18 @@ def autoincreased_step_counter(counter_name=None, begin=1, step=1): dtype='int64', shape=[1], persistable=True, - belong_to_optimizer=True) + belong_to_optimizer=True, + ) if is_new_var: - helper.set_variable_initializer(counter, - initializer=Constant(value=begin - 1, - force_cpu=True)) + helper.set_variable_initializer( + counter, initializer=Constant(value=begin - 1, force_cpu=True) + ) helper.main_program.global_block()._prepend_op( type='increment', inputs={'X': [counter]}, outputs={'Out': [counter]}, - attrs={'step': float(step)}) + attrs={'step': float(step)}, + ) counter.stop_gradient = True return counter @@ -6500,7 +6931,7 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None): """ if in_dygraph_mode(): tmp_tensor_type = core.eager.Tensor - #TODO(zhiqiu): enable inplace in dygraph mode. + # TODO(zhiqiu): enable inplace in dygraph mode. if inplace: warnings.warn( "Inplace on reshape is not allowed and will be discarded in dygraph mode currently." @@ -6518,7 +6949,8 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None): else: raise ValueError( "shape must be an instance of `list`, `tuple` or `Variable`," - " got '{}.'".format(type(shape))) + " got '{}.'".format(type(shape)) + ) return dygraph_utils._append_activation_in_dygraph(out, act) else: @@ -6540,14 +6972,26 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None): else: raise ValueError( "shape must be an instance of `list`, `tuple` or `Variable`," - " got '{}.'".format(type(shape))) + " got '{}.'".format(type(shape)) + ) return dygraph_utils._append_activation_in_dygraph(out, act) - check_variable_and_dtype(x, 'x', [ - 'float16', 'float32', 'float64', 'int16', 'int32', 'int64', 'bool', - 'uint16' - ], 'reshape') + check_variable_and_dtype( + x, + 'x', + [ + 'float16', + 'float32', + 'float64', + 'int16', + 'int32', + 'int64', + 'bool', + 'uint16', + ], + 'reshape', + ) check_type(shape, 'shape', (list, tuple, Variable), 'reshape') check_type(actual_shape, 'actual_shape', (Variable, type(None)), 'reshape') @@ -6571,20 +7015,23 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None): "\t# z.shape is [-1, -1, 4]\n\n" " If your target shape in Reshape represents dynamic shape, " "please turn it into a Tensor under @to_static. See above example for details." - % dim_idx) + % dim_idx + ) unk_dim_idx = dim_idx elif dim_size == 0: assert dim_idx < len(x.shape), ( "The index of 0 in `shape` must be less than " "the input tensor X's dimensions. " - "But received shape[%d] = 0, X's dimensions = %d." % - (dim_idx, len(x.shape))) + "But received shape[%d] = 0, X's dimensions = %d." + % (dim_idx, len(x.shape)) + ) else: assert dim_size > 0, ( "Each dimension value of 'shape' in reshape must not " "be negative except one unknown dimension. " - "But received shape[%d] = %s." % - (dim_idx, str(dim_size))) + "But received shape[%d] = %s." + % (dim_idx, str(dim_size)) + ) return attrs_shape inputs = {"X": x} @@ -6593,8 +7040,10 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None): shape.stop_gradient = True inputs["Shape"] = shape elif isinstance(shape, (list, tuple)): - assert len(shape) > 0, ("The size of 'shape' in reshape can't be zero, " - "but received %s." % len(shape)) + assert len(shape) > 0, ( + "The size of 'shape' in reshape can't be zero, " + "but received %s." % len(shape) + ) attrs["shape"] = get_attr_shape(shape) if utils._contain_var(shape): inputs['ShapeTensor'] = utils._convert_to_tensor_list(shape) @@ -6602,16 +7051,18 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None): actual_shape.stop_gradient = True inputs["Shape"] = actual_shape - out = x if inplace else helper.create_variable_for_type_inference( - dtype=x.dtype) + out = ( + x + if inplace + else helper.create_variable_for_type_inference(dtype=x.dtype) + ) x_shape = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type="reshape2", - inputs=inputs, - attrs=attrs, - outputs={ - "Out": out, - "XShape": x_shape - }) + helper.append_op( + type="reshape2", + inputs=inputs, + attrs=attrs, + outputs={"Out": out, "XShape": x_shape}, + ) return helper.append_activation(out) @@ -6676,10 +7127,22 @@ def squeeze(input, axes, name=None): return out helper = LayerHelper("squeeze", **locals()) - check_variable_and_dtype(input, 'input', [ - 'float16', 'float32', 'float64', 'bool', 'int8', 'int32', 'int64', - 'complex64', 'complex128' - ], 'squeeze') + check_variable_and_dtype( + input, + 'input', + [ + 'float16', + 'float32', + 'float64', + 'bool', + 'int8', + 'int32', + 'int64', + 'complex64', + 'complex128', + ], + 'squeeze', + ) check_type(axes, 'axis/axes', (list, tuple, Variable), 'squeeze') attrs = {} @@ -6693,13 +7156,12 @@ def squeeze(input, axes, name=None): attrs["axes"] = axes out = helper.create_variable_for_type_inference(dtype=input.dtype) x_shape = helper.create_variable_for_type_inference(dtype=input.dtype) - helper.append_op(type="squeeze2", - inputs={"X": input}, - attrs=attrs, - outputs={ - "Out": out, - "XShape": x_shape - }) + helper.append_op( + type="squeeze2", + inputs={"X": input}, + attrs=attrs, + outputs={"Out": out, "XShape": x_shape}, + ) return out @@ -6749,18 +7211,23 @@ def unsqueeze(input, axes, name=None): return _C_ops.unsqueeze(input, axes) check_type(axes, 'axis/axes', (int, list, tuple, Variable), 'unsqueeze') - check_variable_and_dtype(input, 'input', [ - 'float16', - 'float32', - 'float64', - 'bool', - 'int8', - 'int16', - 'int32', - 'int64', - 'complex64', - 'complex128', - ], 'unsqueeze') + check_variable_and_dtype( + input, + 'input', + [ + 'float16', + 'float32', + 'float64', + 'bool', + 'int8', + 'int16', + 'int32', + 'int64', + 'complex64', + 'complex128', + ], + 'unsqueeze', + ) helper = LayerHelper("unsqueeze2", **locals()) inputs = {"X": input} attrs = {} @@ -6778,13 +7245,12 @@ def unsqueeze(input, axes, name=None): out = helper.create_variable_for_type_inference(dtype=input.dtype) x_shape = helper.create_variable_for_type_inference(dtype=input.dtype) - helper.append_op(type="unsqueeze2", - inputs=inputs, - attrs=attrs, - outputs={ - "Out": out, - "XShape": x_shape - }) + helper.append_op( + type="unsqueeze2", + inputs=inputs, + attrs=attrs, + outputs={"Out": out, "XShape": x_shape}, + ) return out @@ -6870,24 +7336,24 @@ def lod_reset(x, y=None, target_lod=None): y = fluid.layers.data(name='y', shape=[10, 20], lod_level=2) out = fluid.layers.lod_reset(x=x, y=y) """ - check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], - 'lod_reset') + check_variable_and_dtype( + x, 'x', ['float32', 'float64', 'int32', 'int64'], 'lod_reset' + ) helper = LayerHelper("lod_reset", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) if y is not None: check_type(y, 'y', (Variable), 'lod_reset') - #TODO: check y.lod_level = 0 dtype - helper.append_op(type="lod_reset", - inputs={ - 'X': x, - 'Y': y - }, - outputs={'Out': out}) + # TODO: check y.lod_level = 0 dtype + helper.append_op( + type="lod_reset", inputs={'X': x, 'Y': y}, outputs={'Out': out} + ) elif target_lod is not None: - helper.append_op(type="lod_reset", - inputs={'X': x}, - attrs={'target_lod': target_lod}, - outputs={'Out': out}) + helper.append_op( + type="lod_reset", + inputs={'X': x}, + attrs={'target_lod': target_lod}, + outputs={'Out': out}, + ) else: raise ValueError("y and target_lod should not be both none.") return out @@ -6941,8 +7407,9 @@ def lod_append(x, level): if (not isinstance(level, Iterable)) and (not isinstance(level, Variable)): raise ValueError("Input(level) must be list, tuple or Variable.") - check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], - 'lod_append') + check_variable_and_dtype( + x, 'x', ['float32', 'float64', 'int32', 'int64'], 'lod_append' + ) helper = LayerHelper("lod_append", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -6952,27 +7419,22 @@ def lod_append(x, level): if isinstance(level, Variable): inputs['Y'] = level - #TODO: check y.lod_level = 0 dtype + # TODO: check y.lod_level = 0 dtype else: attrs['target_lod'] = level - helper.append_op(type="lod_reset", - inputs=inputs, - attrs=attrs, - outputs={'Out': out}) + helper.append_op( + type="lod_reset", inputs=inputs, attrs=attrs, outputs={'Out': out} + ) return out -def lrn(input, - n=5, - k=1.0, - alpha=1e-4, - beta=0.75, - name=None, - data_format='NCHW'): +def lrn( + input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None, data_format='NCHW' +): r""" :alias_main: paddle.nn.functional.lrn - :alias: paddle.nn.functional.lrn,paddle.nn.functional.norm.lrn - :old_api: paddle.fluid.layers.lrn + :alias: paddle.nn.functional.lrn,paddle.nn.functional.norm.lrn + :old_api: paddle.fluid.layers.lrn This operator implements the Local Response Normalization Layer. This layer performs a type of "lateral inhibition" by normalizing over local input regions. @@ -7030,38 +7492,44 @@ def lrn(input, if dims != 4: raise ValueError( - "Input's dimension size of Op(lrn) must be 4, but received %d." % - (dims)) + "Input's dimension size of Op(lrn) must be 4, but received %d." + % (dims) + ) if data_format not in ['NCHW', 'NHWC']: raise ValueError( - "Attr(data_format) of Op(lrn) got wrong value: received " + - data_format + " but only NCHW or NHWC supported.") + "Attr(data_format) of Op(lrn) got wrong value: received " + + data_format + + " but only NCHW or NHWC supported." + ) - mid_out = helper.create_variable_for_type_inference(dtype=dtype, - stop_gradient=True) + mid_out = helper.create_variable_for_type_inference( + dtype=dtype, stop_gradient=True + ) lrn_out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type="lrn", - inputs={"X": input}, - outputs={ - "Out": lrn_out, - "MidOut": mid_out, - }, - attrs={ - "n": n, - "k": k, - "alpha": alpha, - "beta": beta, - "data_format": data_format - }) + helper.append_op( + type="lrn", + inputs={"X": input}, + outputs={ + "Out": lrn_out, + "MidOut": mid_out, + }, + attrs={ + "n": n, + "k": k, + "alpha": alpha, + "beta": beta, + "data_format": data_format, + }, + ) return lrn_out -def pad(x, paddings, pad_value=0., name=None): +def pad(x, paddings, pad_value=0.0, name=None): r""" :alias_main: paddle.nn.functional.pad - :alias: paddle.nn.functional.pad,paddle.nn.functional.common.pad - :old_api: paddle.fluid.layers.pad + :alias: paddle.nn.functional.pad,paddle.nn.functional.common.pad + :old_api: paddle.fluid.layers.pad This op will pad a tensor with a constant value given by :attr:`pad_value`, and the padded shape is specified by :attr:`paddings`. @@ -7112,10 +7580,20 @@ def pad(x, paddings, pad_value=0., name=None): x = fluid.data(name='data', shape=[300, 300], dtype='float32') out = fluid.layers.pad(x=x, paddings=[0, 1, 1, 2], pad_value=0.) """ - check_variable_and_dtype(x, 'x', [ - 'float16', 'float32', 'float64', 'int32', 'int64', 'complex64', - 'complex128' - ], "pad") + check_variable_and_dtype( + x, + 'x', + [ + 'float16', + 'float32', + 'float64', + 'int32', + 'int64', + 'complex64', + 'complex128', + ], + "pad", + ) check_type(pad_value, 'pad_value', (float, int, Variable), 'pad') if isinstance(pad_value, int): @@ -7124,17 +7602,16 @@ def pad(x, paddings, pad_value=0., name=None): helper = LayerHelper('pad', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='pad', - inputs={'X': x}, - outputs={'Out': out}, - attrs={ - 'paddings': paddings, - 'pad_value': pad_value - }) + helper.append_op( + type='pad', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'paddings': paddings, 'pad_value': pad_value}, + ) return out -def pad_constant_like(x, y, pad_value=0., name=None): +def pad_constant_like(x, y, pad_value=0.0, name=None): r""" Pad :attr:`y` with :attr:`pad_value`, the number of values padded to the edges of each axis is specified by the difference of the shape @@ -7214,31 +7691,29 @@ def pad_constant_like(x, y, pad_value=0., name=None): # out is a rank 4 tensor variable, and out.shape = [2, 3 ,2 , 3] """ check_type(x, 'x', (Variable), 'pad_constant_like') - check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'], - "pad_constant_like") + check_variable_and_dtype( + y, 'y', ['float32', 'float64', 'int32', 'int64'], "pad_constant_like" + ) helper = LayerHelper('pad_constant_like', **locals()) dtype = helper.input_dtype(input_param_name='y') out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='pad_constant_like', - inputs={ - 'X': x, - 'Y': y - }, - outputs={'Out': out}, - attrs={'pad_value': float(pad_value)}) + helper.append_op( + type='pad_constant_like', + inputs={'X': x, 'Y': y}, + outputs={'Out': out}, + attrs={'pad_value': float(pad_value)}, + ) return out -def label_smooth(label, - prior_dist=None, - epsilon=0.1, - dtype="float32", - name=None): +def label_smooth( + label, prior_dist=None, epsilon=0.1, dtype="float32", name=None +): r""" :alias_main: paddle.nn.functional.label_smooth - :alias: paddle.nn.functional.label_smooth,paddle.nn.functional.common.label_smooth - :old_api: paddle.fluid.layers.label_smooth + :alias: paddle.nn.functional.label_smooth,paddle.nn.functional.common.label_smooth + :old_api: paddle.fluid.layers.label_smooth Label smoothing is a mechanism to regularize the classifier layer and is called label-smoothing regularization (LSR). @@ -7295,37 +7770,42 @@ def label_smooth(label, if in_dygraph_mode(): return _C_ops.label_smooth(label, prior_dist, float(epsilon)) - if epsilon > 1. or epsilon < 0.: + if epsilon > 1.0 or epsilon < 0.0: raise ValueError("The value of epsilon must be between 0 and 1.") if _non_static_mode(): - return _legacy_C_ops.label_smooth(label, prior_dist, 'epsilon', - float(epsilon)) + return _legacy_C_ops.label_smooth( + label, prior_dist, 'epsilon', float(epsilon) + ) - check_variable_and_dtype(label, 'label', ['float32', 'float64'], - 'label_smooth') + check_variable_and_dtype( + label, 'label', ['float32', 'float64'], 'label_smooth' + ) helper = LayerHelper("label_smooth", **locals()) label.stop_gradient = True smooth_label = helper.create_variable_for_type_inference(dtype) - helper.append_op(type="label_smooth", - inputs={ - "X": label, - "PriorDist": prior_dist - } if prior_dist else {"X": label}, - outputs={"Out": smooth_label}, - attrs={"epsilon": float(epsilon)}) + helper.append_op( + type="label_smooth", + inputs={"X": label, "PriorDist": prior_dist} + if prior_dist + else {"X": label}, + outputs={"Out": smooth_label}, + attrs={"epsilon": float(epsilon)}, + ) return smooth_label @templatedoc() -def roi_pool(input, - rois, - pooled_height=1, - pooled_width=1, - spatial_scale=1.0, - rois_num=None, - name=None): +def roi_pool( + input, + rois, + pooled_height=1, + pooled_width=1, + spatial_scale=1.0, + rois_num=None, + name=None, +): """ This operator implements the roi_pooling layer. @@ -7391,10 +7871,20 @@ def roi_pool(input, print(np.array(out).shape) # (2, 1, 1, 1) """ if _non_static_mode(): - assert rois_num is not None, "rois_num should not be None in dygraph mode." + assert ( + rois_num is not None + ), "rois_num should not be None in dygraph mode." pool_out, argmaxes = _legacy_C_ops.roi_pool( - input, rois, rois_num, "pooled_height", pooled_height, - "pooled_width", pooled_width, "spatial_scale", spatial_scale) + input, + rois, + rois_num, + "pooled_height", + pooled_height, + "pooled_width", + pooled_width, + "spatial_scale", + spatial_scale, + ) return pool_out, argmaxes check_variable_and_dtype(input, 'input', ['float32'], 'roi_pool') @@ -7410,29 +7900,30 @@ def roi_pool(input, } if rois_num is not None: inputs['RoisNum'] = rois_num - helper.append_op(type="roi_pool", - inputs=inputs, - outputs={ - "Out": pool_out, - "Argmax": argmaxes - }, - attrs={ - "pooled_height": pooled_height, - "pooled_width": pooled_width, - "spatial_scale": spatial_scale - }) + helper.append_op( + type="roi_pool", + inputs=inputs, + outputs={"Out": pool_out, "Argmax": argmaxes}, + attrs={ + "pooled_height": pooled_height, + "pooled_width": pooled_width, + "spatial_scale": spatial_scale, + }, + ) return pool_out @templatedoc() -def roi_align(input, - rois, - pooled_height=1, - pooled_width=1, - spatial_scale=1.0, - sampling_ratio=-1, - rois_num=None, - name=None): +def roi_align( + input, + rois, + pooled_height=1, + pooled_width=1, + spatial_scale=1.0, + sampling_ratio=-1, + rois_num=None, + name=None, +): """ ${comment} @@ -7480,21 +7971,41 @@ def roi_align(input, rois_num=rois_num) """ if in_dygraph_mode(): - assert rois_num is not None, "rois_num should not be None in dygraph mode." - return _C_ops.roi_align(input, rois, rois_num, pooled_height, - pooled_width, spatial_scale, sampling_ratio, - False) + assert ( + rois_num is not None + ), "rois_num should not be None in dygraph mode." + return _C_ops.roi_align( + input, + rois, + rois_num, + pooled_height, + pooled_width, + spatial_scale, + sampling_ratio, + False, + ) if _in_legacy_dygraph(): - assert rois_num is not None, "rois_num should not be None in dygraph mode." - align_out = _legacy_C_ops.roi_align(input, rois, rois_num, - "pooled_height", pooled_height, - "pooled_width", pooled_width, - "spatial_scale", spatial_scale, - "sampling_ratio", sampling_ratio) + assert ( + rois_num is not None + ), "rois_num should not be None in dygraph mode." + align_out = _legacy_C_ops.roi_align( + input, + rois, + rois_num, + "pooled_height", + pooled_height, + "pooled_width", + pooled_width, + "spatial_scale", + spatial_scale, + "sampling_ratio", + sampling_ratio, + ) return align_out - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'roi_align') + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'roi_align' + ) check_variable_and_dtype(rois, 'rois', ['float32', 'float64'], 'roi_align') helper = LayerHelper('roi_align', **locals()) dtype = helper.input_dtype() @@ -7505,15 +8016,17 @@ def roi_align(input, } if rois_num is not None: inputs['RoisNum'] = rois_num - helper.append_op(type="roi_align", - inputs=inputs, - outputs={"Out": align_out}, - attrs={ - "pooled_height": pooled_height, - "pooled_width": pooled_width, - "spatial_scale": spatial_scale, - "sampling_ratio": sampling_ratio - }) + helper.append_op( + type="roi_align", + inputs=inputs, + outputs={"Out": align_out}, + attrs={ + "pooled_height": pooled_height, + "pooled_width": pooled_width, + "spatial_scale": spatial_scale, + "sampling_ratio": sampling_ratio, + }, + ) return align_out @@ -7559,21 +8072,22 @@ def dice_loss(input, label, epsilon=0.00001, name=None): predictions = F.softmax(x) loss = F.dice_loss(input=predictions, label=label) """ - return paddle.nn.functional.dice_loss(input, - label, - epsilon=epsilon, - name=name) - - -def image_resize(input, - out_shape=None, - scale=None, - name=None, - resample='BILINEAR', - actual_shape=None, - align_corners=True, - align_mode=1, - data_format='NCHW'): + return paddle.nn.functional.dice_loss( + input, label, epsilon=epsilon, name=name + ) + + +def image_resize( + input, + out_shape=None, + scale=None, + name=None, + resample='BILINEAR', + actual_shape=None, + align_corners=True, + align_mode=1, + data_format='NCHW', +): """ This op resizes a batch of images. @@ -7813,65 +8327,65 @@ def image_resize(input, Examples: .. code-block:: python - #declarative mode - import paddle - import paddle.fluid as fluid - import numpy as np - paddle.enable_static() - input = fluid.data(name="input", shape=[None,3,6,10]) + #declarative mode + import paddle + import paddle.fluid as fluid + import numpy as np + paddle.enable_static() + input = fluid.data(name="input", shape=[None,3,6,10]) - #1 - output = fluid.layers.image_resize(input=input,out_shape=[12,12]) + #1 + output = fluid.layers.image_resize(input=input,out_shape=[12,12]) - #2 - #x = np.array([2]).astype("int32") - #dim1 = fluid.data(name="dim1", shape=[1], dtype="int32") - #fluid.layers.assign(input=x, output=dim1) - #output = fluid.layers.image_resize(input=input,out_shape=[12,dim1]) + #2 + #x = np.array([2]).astype("int32") + #dim1 = fluid.data(name="dim1", shape=[1], dtype="int32") + #fluid.layers.assign(input=x, output=dim1) + #output = fluid.layers.image_resize(input=input,out_shape=[12,dim1]) - #3 - #x = np.array([3,12]).astype("int32") - #shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32") - #fluid.layers.assign(input=x, output=shape_tensor) - #output = fluid.layers.image_resize(input=input,out_shape=shape_tensor) + #3 + #x = np.array([3,12]).astype("int32") + #shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32") + #fluid.layers.assign(input=x, output=shape_tensor) + #output = fluid.layers.image_resize(input=input,out_shape=shape_tensor) - #4 - #x = np.array([0.5]).astype("float32") - #scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32") - #fluid.layers.assign(x,scale_tensor) - #output = fluid.layers.image_resize(input=input,scale=scale_tensor) + #4 + #x = np.array([0.5]).astype("float32") + #scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32") + #fluid.layers.assign(x,scale_tensor) + #output = fluid.layers.image_resize(input=input,scale=scale_tensor) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) - input_data = np.random.rand(2,3,6,10).astype("float32") + input_data = np.random.rand(2,3,6,10).astype("float32") - output_data = exe.run(fluid.default_main_program(), + output_data = exe.run(fluid.default_main_program(), feed={"input":input_data}, fetch_list=[output], return_numpy=True) - print(output_data[0].shape) + print(output_data[0].shape) - #1 - # (2, 3, 12, 12) - #2 - # (2, 3, 12, 2) - #3 - # (2, 3, 3, 12) - #4 - # (2, 3, 3, 5) + #1 + # (2, 3, 12, 12) + #2 + # (2, 3, 12, 2) + #3 + # (2, 3, 3, 12) + #4 + # (2, 3, 3, 5) - #imperative mode - import paddle.fluid.dygraph as dg + #imperative mode + import paddle.fluid.dygraph as dg - with dg.guard(place) as g: - input = dg.to_variable(input_data) - output = fluid.layers.image_resize(input=input, out_shape=[12,12]) - print(output.shape) + with dg.guard(place) as g: + input = dg.to_variable(input_data) + output = fluid.layers.image_resize(input=input, out_shape=[12,12]) + print(output.shape) - # [2L, 3L, 12L, 12L] + # [2L, 3L, 12L, 12L] """ resample_methods = { @@ -7885,7 +8399,8 @@ def image_resize(input, if resample not in resample_methods: raise ValueError( "The 'resample' of image_resize can only be 'LINEAR', 'BILINEAR', 'TRILINEAR' " - "or 'NEAREST' currently.") + "or 'NEAREST' currently." + ) resample_type = resample_methods[resample] if resample == 'LINEAR' and len(input.shape) != 3: @@ -7907,19 +8422,25 @@ def image_resize(input, if len(input.shape) == 3 and data_format not in ['NCW', 'NWC']: raise ValueError( - "Got wrong value for param `data_format`: " + data_format + - " received but only `NCW` or `NWC` supported for 3-D input.") + "Got wrong value for param `data_format`: " + + data_format + + " received but only `NCW` or `NWC` supported for 3-D input." + ) elif len(input.shape) == 4 and data_format not in ['NCHW', 'NHWC']: raise ValueError( - "Got wrong value for param `data_format`: " + data_format + - " received but only `NCHW` or `NHWC` supported for 4-D input.") + "Got wrong value for param `data_format`: " + + data_format + + " received but only `NCHW` or `NHWC` supported for 4-D input." + ) elif len(input.shape) == 5 and data_format not in ['NCDHW', 'NDHWC']: raise ValueError( - "Got wrong value for param `data_format`: " + data_format + - " received but only `NCDHW` or `NDHWC` supported for 5-D input.") + "Got wrong value for param `data_format`: " + + data_format + + " received but only `NCDHW` or `NDHWC` supported for 5-D input." + ) def _is_list_or_turple_(data): - return (isinstance(data, list) or isinstance(data, tuple)) + return isinstance(data, list) or isinstance(data, tuple) if data_format == 'NCHW' or data_format == 'NCDHW' or data_format == 'NCW': data_layout = 'NCHW' @@ -7934,7 +8455,7 @@ def image_resize(input, "interp_method": resample_type, "align_corners": align_corners, "align_mode": align_mode, - "data_layout": data_layout + "data_layout": data_layout, } if out_shape is not None: @@ -7952,16 +8473,17 @@ def image_resize(input, out_shape[i] = dim.numpy()[0] if not (_is_list_or_turple_(out_shape)): raise TypeError( - "out_shape should be a list or tuple or Variable.") + "out_shape should be a list or tuple or Variable." + ) # Validate the shape contain_var = False for dim_idx, dim_size in enumerate(out_shape): if isinstance(dim_size, Variable): contain_var = True continue - assert dim_size > 0, ( - "Each dimension size given in out_shape must be greater than 0." - ) + assert ( + dim_size > 0 + ), "Each dimension size given in out_shape must be greater than 0." if contain_var: new_size_tensor = [] @@ -7972,22 +8494,22 @@ def image_resize(input, new_size_tensor.append(dim) size_list.append(-1) else: - assert (isinstance(dim, int)) + assert isinstance(dim, int) temp_out = helper.create_variable_for_type_inference( - 'int32') - fill_constant([1], - 'int32', - dim, - force_cpu=True, - out=temp_out) + 'int32' + ) + fill_constant( + [1], 'int32', dim, force_cpu=True, out=temp_out + ) new_size_tensor.append(temp_out) size_list.append(dim) inputs['SizeTensor'] = new_size_tensor if len(input.shape) == 3: if len(out_shape) != 1: - raise ValueError("out_shape length should be 1 for " - "input 3-D tensor.") + raise ValueError( + "out_shape length should be 1 for " "input 3-D tensor." + ) if contain_var: attrs['out_w'] = size_list[0] else: @@ -7995,8 +8517,9 @@ def image_resize(input, attrs['out_w'] = out_shape[0] elif len(input.shape) == 4: if len(out_shape) != 2: - raise ValueError("out_shape length should be 2 for " - "input 4-D tensor.") + raise ValueError( + "out_shape length should be 2 for " "input 4-D tensor." + ) if contain_var: attrs['out_h'] = size_list[0] attrs['out_w'] = size_list[1] @@ -8006,8 +8529,9 @@ def image_resize(input, attrs['out_w'] = out_shape[1] if len(input.shape) == 5: if len(out_shape) != 3: - raise ValueError("out_shape length should be 3 for " - "input 5-D tensor.") + raise ValueError( + "out_shape length should be 3 for " "input 5-D tensor." + ) if contain_var: attrs['out_d'] = size_list[0] attrs['out_h'] = size_list[1] @@ -8030,7 +8554,8 @@ def image_resize(input, attrs['scale'] = float(scale) else: raise TypeError( - "Attr(scale)'s type should be float, int or Variable.") + "Attr(scale)'s type should be float, int or Variable." + ) if isinstance(actual_shape, Variable): warnings.warn( @@ -8062,22 +8587,26 @@ def image_resize(input, return out out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='{}_interp'.format(resample_type), - inputs=inputs, - outputs={"Out": out}, - attrs=attrs) + helper.append_op( + type='{}_interp'.format(resample_type), + inputs=inputs, + outputs={"Out": out}, + attrs=attrs, + ) return out @templatedoc(op_type="linear_interp") -def resize_linear(input, - out_shape=None, - scale=None, - name=None, - actual_shape=None, - align_corners=True, - align_mode=1, - data_format='NCW'): +def resize_linear( + input, + out_shape=None, + scale=None, + name=None, + actual_shape=None, + align_corners=True, + align_mode=1, + data_format='NCW', +): """ This op resizes the input by performing linear interpolation based on given output shape which specified by actual_shape, out_shape and scale @@ -8153,58 +8682,69 @@ def resize_linear(input, For more information, please refer to :ref:`api_guide_Name` Returns: - Variable: 3-D tensor(NCW or NWC). + Variable: 3-D tensor(NCW or NWC). Examples: .. code-block:: python - #declarative mode - import paddle.fluid as fluid - import numpy as np - input = fluid.data(name="input", shape=[None,3,100]) + #declarative mode + import paddle.fluid as fluid + import numpy as np + input = fluid.data(name="input", shape=[None,3,100]) - output = fluid.layers.resize_linear(input=input,out_shape=[50,]) + output = fluid.layers.resize_linear(input=input,out_shape=[50,]) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) - input_data = np.random.rand(1,3,100).astype("float32") + input_data = np.random.rand(1,3,100).astype("float32") - output_data = exe.run(fluid.default_main_program(), + output_data = exe.run(fluid.default_main_program(), feed={"input":input_data}, fetch_list=[output], return_numpy=True) - print(output_data[0].shape) + print(output_data[0].shape) - # (1, 3, 50) + # (1, 3, 50) - #imperative mode - import paddle.fluid.dygraph as dg + #imperative mode + import paddle.fluid.dygraph as dg - with dg.guard(place) as g: - input = dg.to_variable(input_data) - output = fluid.layers.resize_linear(input=input, out_shape=[50,]) - print(output.shape) + with dg.guard(place) as g: + input = dg.to_variable(input_data) + output = fluid.layers.resize_linear(input=input, out_shape=[50,]) + print(output.shape) - # [1L, 3L, 50L] + # [1L, 3L, 50L] """ - return image_resize(input, out_shape, scale, name, 'LINEAR', actual_shape, - align_corners, align_mode, data_format) + return image_resize( + input, + out_shape, + scale, + name, + 'LINEAR', + actual_shape, + align_corners, + align_mode, + data_format, + ) @templatedoc(op_type="bilinear_interp") -def resize_bilinear(input, - out_shape=None, - scale=None, - name=None, - actual_shape=None, - align_corners=True, - align_mode=1, - data_format='NCHW'): +def resize_bilinear( + input, + out_shape=None, + scale=None, + name=None, + actual_shape=None, + align_corners=True, + align_mode=1, + data_format='NCHW', +): """ This op resizes the input by performing bilinear interpolation based on given @@ -8291,86 +8831,97 @@ def resize_bilinear(input, name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: - Variable: 4-D tensor(NCHW or NHWC). + Variable: 4-D tensor(NCHW or NHWC). Examples: .. code-block:: python - #declarative mode - import paddle.fluid as fluid - import numpy as np - import paddle - paddle.enable_static() - input = fluid.data(name="input", shape=[None,3,6,10]) + #declarative mode + import paddle.fluid as fluid + import numpy as np + import paddle + paddle.enable_static() + input = fluid.data(name="input", shape=[None,3,6,10]) - #1 - output = fluid.layers.resize_bilinear(input=input,out_shape=[12,12]) + #1 + output = fluid.layers.resize_bilinear(input=input,out_shape=[12,12]) - #2 - #x = np.array([2]).astype("int32") - #dim1 = fluid.data(name="dim1", shape=[1], dtype="int32") - #fluid.layers.assign(input=x, output=dim1) - #output = fluid.layers.resize_bilinear(input=input,out_shape=[12,dim1]) + #2 + #x = np.array([2]).astype("int32") + #dim1 = fluid.data(name="dim1", shape=[1], dtype="int32") + #fluid.layers.assign(input=x, output=dim1) + #output = fluid.layers.resize_bilinear(input=input,out_shape=[12,dim1]) - #3 - #x = np.array([3,12]).astype("int32") - #shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32") - #fluid.layers.assign(input=x, output=shape_tensor) - #output = fluid.layers.resize_bilinear(input=input,out_shape=shape_tensor) + #3 + #x = np.array([3,12]).astype("int32") + #shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32") + #fluid.layers.assign(input=x, output=shape_tensor) + #output = fluid.layers.resize_bilinear(input=input,out_shape=shape_tensor) - #4 - #x = np.array([0.5]).astype("float32") - #scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32") - #fluid.layers.assign(x,scale_tensor) - #output = fluid.layers.resize_bilinear(input=input,scale=scale_tensor) + #4 + #x = np.array([0.5]).astype("float32") + #scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32") + #fluid.layers.assign(x,scale_tensor) + #output = fluid.layers.resize_bilinear(input=input,scale=scale_tensor) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) - input_data = np.random.rand(2,3,6,10).astype("float32") + input_data = np.random.rand(2,3,6,10).astype("float32") - output_data = exe.run(fluid.default_main_program(), + output_data = exe.run(fluid.default_main_program(), feed={"input":input_data}, fetch_list=[output], return_numpy=True) - print(output_data[0].shape) + print(output_data[0].shape) - #1 - # (2, 3, 12, 12) - #2 - # (2, 3, 12, 2) - #3 - # (2, 3, 3, 12) - #4 - # (2, 3, 3, 5) + #1 + # (2, 3, 12, 12) + #2 + # (2, 3, 12, 2) + #3 + # (2, 3, 3, 12) + #4 + # (2, 3, 3, 5) - #imperative mode - import paddle.fluid.dygraph as dg + #imperative mode + import paddle.fluid.dygraph as dg - with dg.guard(place) as g: - input = dg.to_variable(input_data) - output = fluid.layers.resize_bilinear(input=input, out_shape=[12,12]) - print(output.shape) + with dg.guard(place) as g: + input = dg.to_variable(input_data) + output = fluid.layers.resize_bilinear(input=input, out_shape=[12,12]) + print(output.shape) - # [2L, 3L, 12L, 12L] + # [2L, 3L, 12L, 12L] """ - return image_resize(input, out_shape, scale, name, 'BILINEAR', actual_shape, - align_corners, align_mode, data_format) + return image_resize( + input, + out_shape, + scale, + name, + 'BILINEAR', + actual_shape, + align_corners, + align_mode, + data_format, + ) @templatedoc(op_type="trilinear_interp") -def resize_trilinear(input, - out_shape=None, - scale=None, - name=None, - actual_shape=None, - align_corners=True, - align_mode=1, - data_format='NCDHW'): +def resize_trilinear( + input, + out_shape=None, + scale=None, + name=None, + actual_shape=None, + align_corners=True, + align_mode=1, + data_format='NCDHW', +): """ This op resizes the input by performing trilinear interpolation based on given @@ -8462,82 +9013,93 @@ def resize_trilinear(input, Examples: .. code-block:: python - #declarative mode - import paddle.fluid as fluid - import paddle - import numpy as np - paddle.enable_static() - input = fluid.data(name="input", shape=[None,3,6,8,10]) + #declarative mode + import paddle.fluid as fluid + import paddle + import numpy as np + paddle.enable_static() + input = fluid.data(name="input", shape=[None,3,6,8,10]) - #1 - output = fluid.layers.resize_trilinear(input=input,out_shape=[12,12,12]) + #1 + output = fluid.layers.resize_trilinear(input=input,out_shape=[12,12,12]) - #2 - #x = np.array([2]).astype("int32") - #dim1 = fluid.data(name="dim1", shape=[1], dtype="int32") - #fluid.layers.assign(input=x, output=dim1) - #output = fluid.layers.resize_trilinear(input=input,out_shape=[12,dim1,4]) + #2 + #x = np.array([2]).astype("int32") + #dim1 = fluid.data(name="dim1", shape=[1], dtype="int32") + #fluid.layers.assign(input=x, output=dim1) + #output = fluid.layers.resize_trilinear(input=input,out_shape=[12,dim1,4]) - #3 - #x = np.array([3,12,12]).astype("int32") - #shape_tensor = fluid.data(name="shape_tensor", shape=[3], dtype="int32") - #fluid.layers.assign(input=x, output=shape_tensor) - #output = fluid.layers.resize_trilinear(input=input,out_shape=shape_tensor) + #3 + #x = np.array([3,12,12]).astype("int32") + #shape_tensor = fluid.data(name="shape_tensor", shape=[3], dtype="int32") + #fluid.layers.assign(input=x, output=shape_tensor) + #output = fluid.layers.resize_trilinear(input=input,out_shape=shape_tensor) - #4 - #x = np.array([0.5]).astype("float32") - #scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32") - #fluid.layers.assign(x,scale_tensor) - #output = fluid.layers.resize_trilinear(input=input,scale=scale_tensor) + #4 + #x = np.array([0.5]).astype("float32") + #scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32") + #fluid.layers.assign(x,scale_tensor) + #output = fluid.layers.resize_trilinear(input=input,scale=scale_tensor) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) - input_data = np.random.rand(2,3,6,8,10).astype("float32") + input_data = np.random.rand(2,3,6,8,10).astype("float32") - output_data = exe.run(fluid.default_main_program(), + output_data = exe.run(fluid.default_main_program(), feed={"input":input_data}, fetch_list=[output], return_numpy=True) - print(output_data[0].shape) + print(output_data[0].shape) - #1 - # (2, 3, 12, 12, 12) - #2 - # (2, 3, 12, 2, 4) - #3 - # (2, 3, 3, 12, 12) - #4 - # (2, 3, 3, 4, 5) + #1 + # (2, 3, 12, 12, 12) + #2 + # (2, 3, 12, 2, 4) + #3 + # (2, 3, 3, 12, 12) + #4 + # (2, 3, 3, 4, 5) - #imperative mode - import paddle.fluid.dygraph as dg + #imperative mode + import paddle.fluid.dygraph as dg - with dg.guard(place) as g: - input = dg.to_variable(input_data) - output = fluid.layers.resize_trilinear(input=input, out_shape=[12,12,12]) - print(output.shape) + with dg.guard(place) as g: + input = dg.to_variable(input_data) + output = fluid.layers.resize_trilinear(input=input, out_shape=[12,12,12]) + print(output.shape) - # [2L, 3L, 12L, 12L, 12L] + # [2L, 3L, 12L, 12L, 12L] """ - return image_resize(input, out_shape, scale, name, 'TRILINEAR', - actual_shape, align_corners, align_mode, data_format) + return image_resize( + input, + out_shape, + scale, + name, + 'TRILINEAR', + actual_shape, + align_corners, + align_mode, + data_format, + ) @templatedoc(op_type="nearest_interp") -def resize_nearest(input, - out_shape=None, - scale=None, - name=None, - actual_shape=None, - align_corners=True, - data_format='NCHW'): +def resize_nearest( + input, + out_shape=None, + scale=None, + name=None, + actual_shape=None, + align_corners=True, + data_format='NCHW', +): """ This op resizes the input by performing nearest neighbor interpolation in both the @@ -8593,7 +9155,7 @@ def resize_nearest(input, And :attr:`out_shape` has a higher priority than :attr:`scale`. Default: None. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` - actual_shape(Variable): An optional input to specify output shape + actual_shape(Variable): An optional input to specify output shape dynamically. If provided, image resize according to this given shape rather than :attr:`out_shape` and :attr:`scale` specifying @@ -8613,85 +9175,87 @@ def resize_nearest(input, `[batch_size, input_channels, input_height, input_width]`. Returns: - Variable: 4-D tensor(NCHW or NHWC). + Variable: 4-D tensor(NCHW or NHWC). Examples: .. code-block:: python - #declarative mode - import paddle.fluid as fluid - import numpy as np - import paddle - paddle.enable_static() + #declarative mode + import paddle.fluid as fluid + import numpy as np + import paddle + paddle.enable_static() - input = fluid.data(name="input", shape=[None,3,6,10]) + input = fluid.data(name="input", shape=[None,3,6,10]) - #1 - output = fluid.layers.resize_nearest(input=input,out_shape=[12,12]) + #1 + output = fluid.layers.resize_nearest(input=input,out_shape=[12,12]) - #2 - #x = np.array([2]).astype("int32") - #dim1 = fluid.data(name="dim1", shape=[1], dtype="int32") - #fluid.layers.assign(input=x, output=dim1) - #output = fluid.layers.resize_nearest(input=input,out_shape=[12,dim1]) + #2 + #x = np.array([2]).astype("int32") + #dim1 = fluid.data(name="dim1", shape=[1], dtype="int32") + #fluid.layers.assign(input=x, output=dim1) + #output = fluid.layers.resize_nearest(input=input,out_shape=[12,dim1]) - #3 - #x = np.array([3,12]).astype("int32") - #shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32") - #fluid.layers.assign(input=x, output=shape_tensor) - #output = fluid.layers.resize_nearest(input=input,out_shape=shape_tensor) + #3 + #x = np.array([3,12]).astype("int32") + #shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32") + #fluid.layers.assign(input=x, output=shape_tensor) + #output = fluid.layers.resize_nearest(input=input,out_shape=shape_tensor) - #4 - #x = np.array([0.5]).astype("float32") - #scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32") - #fluid.layers.assign(x,scale_tensor) - #output = fluid.layers.resize_nearest(input=input,scale=scale_tensor) + #4 + #x = np.array([0.5]).astype("float32") + #scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32") + #fluid.layers.assign(x,scale_tensor) + #output = fluid.layers.resize_nearest(input=input,scale=scale_tensor) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) - input_data = np.random.rand(2,3,6,10).astype("float32") + input_data = np.random.rand(2,3,6,10).astype("float32") - output_data = exe.run(fluid.default_main_program(), + output_data = exe.run(fluid.default_main_program(), feed={"input":input_data}, fetch_list=[output], return_numpy=True) - print(output_data[0].shape) + print(output_data[0].shape) - #1 - # (2, 3, 12, 12) - #2 - # (2, 3, 12, 2) - #3 - # (2, 3, 3, 12) - #4 - # (2, 3, 3, 5) + #1 + # (2, 3, 12, 12) + #2 + # (2, 3, 12, 2) + #3 + # (2, 3, 3, 12) + #4 + # (2, 3, 3, 5) - #imperative mode - import paddle.fluid.dygraph as dg + #imperative mode + import paddle.fluid.dygraph as dg - with dg.guard(place) as g: - input = dg.to_variable(input_data) - output = fluid.layers.resize_nearest(input=input, out_shape=[12,12]) - print(output.shape) + with dg.guard(place) as g: + input = dg.to_variable(input_data) + output = fluid.layers.resize_nearest(input=input, out_shape=[12,12]) + print(output.shape) - # [2L, 3L, 12L, 12L] + # [2L, 3L, 12L, 12L] """ - return image_resize(input, - out_shape, - scale, - name, - 'NEAREST', - actual_shape, - align_corners, - align_mode=1, - data_format=data_format) + return image_resize( + input, + out_shape, + scale, + name, + 'NEAREST', + actual_shape, + align_corners, + align_mode=1, + data_format=data_format, + ) def image_resize_short(input, out_short_len, resample='BILINEAR'): @@ -8719,15 +9283,18 @@ def image_resize_short(input, out_short_len, resample='BILINEAR'): in_shape = input.shape if len(in_shape) != 4: raise ValueError( - "The rank of input must be 4 (num_batches, channels, in_h, in_w).") + "The rank of input must be 4 (num_batches, channels, in_h, in_w)." + ) hw = in_shape[2:4] short_idx = hw.index(min(hw)) long_idx = 1 - short_idx out_shape = list(hw) out_shape[short_idx] = out_short_len out_shape[long_idx] = int( - float(out_shape[long_idx]) * - (float(out_short_len) / float(hw[short_idx])) + 0.5) + float(out_shape[long_idx]) + * (float(out_short_len) / float(hw[short_idx])) + + 0.5 + ) return image_resize(input=input, out_shape=out_shape, resample=resample) @@ -8766,8 +9333,8 @@ def gather(input, index, overwrite=True): index (Tensor): The index input tensor with rank=1. Data type is int32 or int64. overwrite (bool, optional): The mode that updating the grad when has same index. If True, use the overwrite mode to update the grad of the same index, - if False, use the accumulate mode to update the grad of the same index. - Default value is True. + if False, use the accumulate mode to update the grad of the same index. + Default value is True. Returns: output (Tensor): The output is a tensor with the same rank as input. @@ -8788,19 +9355,21 @@ def gather(input, index, overwrite=True): return _legacy_C_ops.gather(input, index, None, 'overwrite', overwrite) check_variable_and_dtype( - input, 'x', - ['float16', 'float32', 'float64', 'int32', 'int64', 'uint8'], 'gather') + input, + 'x', + ['float16', 'float32', 'float64', 'int32', 'int64', 'uint8'], + 'gather', + ) check_variable_and_dtype(index, 'index', ['int32', 'int64'], 'gather') helper = LayerHelper('gather', **locals()) dtype = helper.input_dtype() out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type="gather", - inputs={ - "X": input, - "Index": index - }, - outputs={"Out": out}, - attrs={'overwrite': overwrite}) + helper.append_op( + type="gather", + inputs={"X": input, "Index": index}, + outputs={"Out": out}, + attrs={'overwrite': overwrite}, + ) return out @@ -8885,18 +9454,20 @@ def gather_nd(input, index, name=None): if _in_legacy_dygraph(): return _legacy_C_ops.gather_nd(input, index) check_variable_and_dtype( - input, 'input', - ['bool', 'float32', 'float64', 'int16', 'int32', 'int64'], 'gather_np') + input, + 'input', + ['bool', 'float32', 'float64', 'int16', 'int32', 'int64'], + 'gather_np', + ) check_variable_and_dtype(index, 'index', ['int32', 'int64'], 'gather_np') helper = LayerHelper('gather_nd', **locals()) dtype = helper.input_dtype() output = helper.create_variable_for_type_inference(dtype) - helper.append_op(type="gather_nd", - inputs={ - "X": input, - "Index": index - }, - outputs={"Out": output}) + helper.append_op( + type="gather_nd", + inputs={"X": input, "Index": index}, + outputs={"Out": output}, + ) return output @@ -8904,8 +9475,8 @@ def gather_nd(input, index, name=None): def scatter(input, index, updates, name=None, overwrite=True): """ :alias_main: paddle.scatter - :alias: paddle.scatter,paddle.tensor.scatter,paddle.tensor.manipulation.scatter - :old_api: paddle.fluid.layers.scatter + :alias: paddle.scatter,paddle.tensor.scatter,paddle.tensor.manipulation.scatter + :old_api: paddle.fluid.layers.scatter **Scatter Layer** @@ -8944,8 +9515,8 @@ def scatter(input, index, updates, name=None, overwrite=True): name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . overwrite (bool): The mode that updating the output when there are same indices. If True, use the overwrite mode to update the output of the same index, - if False, use the accumulate mode to update the output of the same index. - Default value is True. + if False, use the accumulate mode to update the output of the same index. + Default value is True. Returns: Variable(Tensor|LoDTensor): The output is a Tensor with the same shape as input. @@ -8981,14 +9552,12 @@ def scatter(input, index, updates, name=None, overwrite=True): helper = LayerHelper('scatter', **locals()) dtype = helper.input_dtype() out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type="scatter", - inputs={ - "X": input, - "Ids": index, - "Updates": updates - }, - attrs={'overwrite': overwrite}, - outputs={"Out": out}) + helper.append_op( + type="scatter", + inputs={"X": input, "Ids": index, "Updates": updates}, + attrs={'overwrite': overwrite}, + outputs={"Out": out}, + ) return out @@ -9073,13 +9642,11 @@ def scatter_nd_add(ref, index, updates, name=None): helper = LayerHelper('scatter_nd_add', **locals()) dtype = helper.input_dtype(input_param_name='ref') output = helper.create_variable_for_type_inference(dtype) - helper.append_op(type="scatter_nd_add", - inputs={ - "X": ref, - "Index": index, - "Updates": updates - }, - outputs={"Out": output}) + helper.append_op( + type="scatter_nd_add", + inputs={"X": ref, "Index": index, "Updates": updates}, + outputs={"Out": output}, + ) return output @@ -9156,9 +9723,9 @@ def random_crop(x, shape, seed=None): """ helper = LayerHelper("random_crop", **locals()) - check_variable_and_dtype(x, 'x', - ['float32', 'float64', 'uint8', 'int16', 'int32'], - 'random_crop') + check_variable_and_dtype( + x, 'x', ['float32', 'float64', 'uint8', 'int16', 'int32'], 'random_crop' + ) check_type(shape, 'shape', (list, Variable), 'random_crop') dtype = x.dtype out = helper.create_variable_for_type_inference(dtype) @@ -9170,19 +9737,16 @@ def random_crop(x, shape, seed=None): seed = helper.create_variable( name=unique_name.generate("random_crop_seed"), dtype="int64", - persistable=True) + persistable=True, + ) elif not isinstance(seed, Variable): raise ValueError("'seed' must be a Variable or an int.") - helper.append_op(type="random_crop", - inputs={ - "X": x, - "Seed": seed - }, - outputs={ - "Out": out, - "SeedOut": seed - }, - attrs=op_attrs) + helper.append_op( + type="random_crop", + inputs={"X": x, "Seed": seed}, + outputs={"Out": out, "SeedOut": seed}, + attrs=op_attrs, + ) return out @@ -9253,8 +9817,7 @@ def relu(x, name=None): out1 = fluid.layers.relu(x1) print(out1.numpy()) # [[0. 0. ] - # [1. 2.6]] -""" + # [1. 2.6]]""" if in_dygraph_mode(): return _C_ops.relu(x) @@ -9267,9 +9830,9 @@ def relu(x, name=None): helper = LayerHelper('relu', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type="relu", - inputs={"X": helper.input('x')}, - outputs={"Out": out}) + helper.append_op( + type="relu", inputs={"X": helper.input('x')}, outputs={"Out": out} + ) return out @@ -9339,10 +9902,9 @@ def selu(x, scale=None, alpha=None, name=None): if alpha is not None: attrs["alpha"] = alpha - helper.append_op(type="selu", - inputs={"X": x}, - outputs={"Out": out}, - attrs=attrs) + helper.append_op( + type="selu", inputs={"X": x}, outputs={"Out": out}, attrs=attrs + ) return out @@ -9393,23 +9955,23 @@ def mean_iou(input, label, num_classes): return _legacy_C_ops.mean_iou(input, label, 'num_classes', num_classes) helper = LayerHelper('mean_iou', **locals()) - check_variable_and_dtype(input, 'Predictions', ['int32', 'int64'], - 'mean_iou') + check_variable_and_dtype( + input, 'Predictions', ['int32', 'int64'], 'mean_iou' + ) check_variable_and_dtype(label, 'Labels', ['int32', 'int64'], 'mean_iou') out_mean_iou = helper.create_variable_for_type_inference(dtype='float32') out_wrong = helper.create_variable_for_type_inference(dtype='int32') out_correct = helper.create_variable_for_type_inference(dtype='int32') - helper.append_op(type="mean_iou", - inputs={ - "Predictions": input, - "Labels": label - }, - outputs={ - "OutMeanIou": out_mean_iou, - "OutWrong": out_wrong, - "OutCorrect": out_correct - }, - attrs={"num_classes": num_classes}) + helper.append_op( + type="mean_iou", + inputs={"Predictions": input, "Labels": label}, + outputs={ + "OutMeanIou": out_mean_iou, + "OutWrong": out_wrong, + "OutCorrect": out_correct, + }, + attrs={"num_classes": num_classes}, + ) return out_mean_iou, out_wrong, out_correct @@ -9506,10 +10068,12 @@ def crop(x, shape=None, offsets=None, name=None): else: attrs['offsets'] = offsets - helper.append_op(type='crop', - inputs=ipts, - outputs={'Out': out}, - attrs=None if len(attrs) == 0 else attrs) + helper.append_op( + type='crop', + inputs=ipts, + outputs={'Out': out}, + attrs=None if len(attrs) == 0 else attrs, + ) return out @@ -9602,11 +10166,13 @@ def crop_tensor(x, shape=None, offsets=None, name=None): """ helper = LayerHelper('crop_tensor', **locals()) - check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], - 'crop_tensor') + check_variable_and_dtype( + x, 'x', ['float32', 'float64', 'int32', 'int64'], 'crop_tensor' + ) check_type(shape, 'shape', (list, tuple, Variable), 'crop_tensor') - check_type(offsets, 'offsets', (list, tuple, Variable, type(None)), - 'crop_tensor') + check_type( + offsets, 'offsets', (list, tuple, Variable, type(None)), 'crop_tensor' + ) if offsets is None: offsets = [0] * len(x.shape) @@ -9619,25 +10185,30 @@ def crop_tensor(x, shape=None, offsets=None, name=None): if not isinstance(shape_val, int): raise TypeError( "Attr(shape)'s dtype of Op(crop_tensor) should be int32, but received: %s." - % type(shape_val)) + % type(shape_val) + ) if shape_val == 0: raise ValueError( "Attr(shape) of Op(crop_tensor) should not be zero, but received: %s." - % str(shape_val)) + % str(shape_val) + ) if shape_val < -1: raise ValueError( "When the element in Attr(shape) of Op(crop_tensor) is negative, only -1 is supported, but received: %s." - % str(shape_val)) + % str(shape_val) + ) def _attr_offsets_check(offset_val): if not isinstance(offset_val, int): raise TypeError( "Attr(offsets)'s dtype of Op(crop_tensor) should be int32, but received: %s." - % type(offset_val)) + % type(offset_val) + ) if offset_val < 0: raise ValueError( "Attr(offsets) of Op(crop_tensor) should be greater or equal to zero, but received: %s." - % str(offset_val)) + % str(offset_val) + ) if isinstance(offsets, Variable): offsets.stop_gradient = True @@ -9678,11 +10249,9 @@ def crop_tensor(x, shape=None, offsets=None, name=None): else: _attr_shape_check(dim_size) temp_out = helper.create_variable_for_type_inference('int32') - fill_constant([1], - 'int32', - dim_size, - force_cpu=True, - out=temp_out) + fill_constant( + [1], 'int32', dim_size, force_cpu=True, out=temp_out + ) new_shape_tensor.append(temp_out) shape_attr.append(dim_size) ipts['ShapeTensor'] = new_shape_tensor @@ -9692,18 +10261,20 @@ def crop_tensor(x, shape=None, offsets=None, name=None): _attr_shape_check(dim_size) attrs['shape'] = shape - helper.append_op(type='crop_tensor', - inputs=ipts, - outputs={'Out': out}, - attrs=None if len(attrs) == 0 else attrs) + helper.append_op( + type='crop_tensor', + inputs=ipts, + outputs={'Out': out}, + attrs=None if len(attrs) == 0 else attrs, + ) return out def affine_grid(theta, out_shape, name=None): """ :alias_main: paddle.nn.functional.affine_grid - :alias: paddle.nn.functional.affine_grid,paddle.nn.functional.vision.affine_grid - :old_api: paddle.fluid.layers.affine_grid + :alias: paddle.nn.functional.affine_grid,paddle.nn.functional.vision.affine_grid + :old_api: paddle.fluid.layers.affine_grid It generates a grid of (x,y) coordinates using the parameters of the affine transformation that correspond to a set of points where @@ -9746,11 +10317,15 @@ def affine_grid(theta, out_shape, name=None): """ helper = LayerHelper('affine_grid') - check_variable_and_dtype(theta, 'theta', ['float32', 'float64'], - 'affine_grid') - - if not (isinstance(out_shape, list) or isinstance(out_shape, tuple) or \ - isinstance(out_shape, Variable)): + check_variable_and_dtype( + theta, 'theta', ['float32', 'float64'], 'affine_grid' + ) + + if not ( + isinstance(out_shape, list) + or isinstance(out_shape, tuple) + or isinstance(out_shape, Variable) + ): raise ValueError("The out_shape should be a list, tuple or Variable.") if not isinstance(theta, Variable): @@ -9761,27 +10336,32 @@ def affine_grid(theta, out_shape, name=None): attrs = {} if isinstance(out_shape, Variable): ipts['OutputShape'] = out_shape - check_variable_and_dtype(out_shape, 'out_shape', ['int32'], - 'affine_grid') + check_variable_and_dtype( + out_shape, 'out_shape', ['int32'], 'affine_grid' + ) else: attrs['output_shape'] = out_shape if core.is_compiled_with_rocm(): # ROCM platform do not have MIOPEN kernel for affine_grid attrs['use_cudnn'] = False - helper.append_op(type='affine_grid', - inputs=ipts, - outputs={'Output': out}, - attrs=None if len(attrs) == 0 else attrs) + helper.append_op( + type='affine_grid', + inputs=ipts, + outputs={'Output': out}, + attrs=None if len(attrs) == 0 else attrs, + ) return out -def pad2d(input, - paddings=[0, 0, 0, 0], - mode='constant', - pad_value=0.0, - data_format="NCHW", - name=None): +def pad2d( + input, + paddings=[0, 0, 0, 0], + mode='constant', + pad_value=0.0, + data_format="NCHW", + name=None, +): """ Pad 2-d images according to 'paddings' and 'mode'. @@ -9795,10 +10375,10 @@ def pad2d(input, Otherwise, it is a 1-D Tensor with shape [4]. Data type is int32. Default is [0, 0, 0, 0]. mode (str): Three modes: 'constant' (default), 'reflect', 'edge' . - When in 'constant' mode, this op uses a constant value to pad the input tensor. - When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor. - When in 'edge' mode, uses input boundaries to pad the input tensor. - Default is 'constant' + When in 'constant' mode, this op uses a constant value to pad the input tensor. + When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor. + When in 'edge' mode, uses input boundaries to pad the input tensor. + Default is 'constant' pad_value (float32): The value to fill the padded areas in 'constant' mode . Default is 0.0 data_format (str): An string from: "NHWC", "NCHW". Specify the data format of the input data. @@ -9869,15 +10449,29 @@ def pad2d(input, # [2. 1. 2. 3. 2.]]]] """ if _non_static_mode(): - _paddings = paddings.numpy().tolist() if isinstance( - paddings, Variable) else paddings - return _legacy_C_ops.pad2d(input, 'mode', mode, 'pad_value', pad_value, - 'data_format', data_format, 'paddings', - _paddings) + _paddings = ( + paddings.numpy().tolist() + if isinstance(paddings, Variable) + else paddings + ) + return _legacy_C_ops.pad2d( + input, + 'mode', + mode, + 'pad_value', + pad_value, + 'data_format', + data_format, + 'paddings', + _paddings, + ) check_variable_and_dtype( - input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], - "pad2d") + input, + 'input', + ['float16', 'float32', 'float64', 'int32', 'int64'], + "pad2d", + ) attrs = {'mode': mode, 'pad_value': pad_value, 'data_format': data_format} inputs = {'X': [input]} @@ -9889,16 +10483,18 @@ def pad2d(input, helper = LayerHelper('pad2d', **locals()) - assert mode in ['reflect', 'edge', 'constant' - ], "mode should be one of constant, reflect, edge." + assert mode in [ + 'reflect', + 'edge', + 'constant', + ], "mode should be one of constant, reflect, edge." dtype = helper.input_dtype(input_param_name='input') out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='pad2d', - inputs=inputs, - outputs={"Out": out}, - attrs=attrs) + helper.append_op( + type='pad2d', inputs=inputs, outputs={"Out": out}, attrs=attrs + ) return out @@ -9907,8 +10503,8 @@ def pad2d(input, def elu(x, alpha=1.0, name=None): """ :alias_main: paddle.nn.functional.elu - :alias: paddle.nn.functional.elu,paddle.nn.functional.activation.elu - :old_api: paddle.fluid.layers.elu + :alias: paddle.nn.functional.elu,paddle.nn.functional.activation.elu + :old_api: paddle.fluid.layers.elu ${comment} Args: @@ -9937,10 +10533,12 @@ def elu(x, alpha=1.0, name=None): helper = LayerHelper('elu', **locals()) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'elu') out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='elu', - inputs={'X': x}, - outputs={'Out': out}, - attrs={'alpha': alpha}) + helper.append_op( + type='elu', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'alpha': alpha}, + ) return out @@ -9978,13 +10576,15 @@ def relu6(x, threshold=6.0, name=None): helper = LayerHelper('relu6', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='relu6', - inputs={'X': x}, - outputs={'Out': out}, - attrs={ - 'threshold': threshold, - 'use_mkldnn': _global_flags()["FLAGS_use_mkldnn"] - }) + helper.append_op( + type='relu6', + inputs={'X': x}, + outputs={'Out': out}, + attrs={ + 'threshold': threshold, + 'use_mkldnn': _global_flags()["FLAGS_use_mkldnn"], + }, + ) return out @@ -10021,7 +10621,8 @@ def pow(x, factor=1.0, name=None): # y_2 is x^{3.0} """ check_variable_and_dtype( - x, 'x', ['int32', 'int64', 'float16', 'float32', 'float64'], 'pow') + x, 'x', ['int32', 'int64', 'float16', 'float32', 'float64'], 'pow' + ) helper = LayerHelper('pow', **locals()) inputs = {'X': x} @@ -10034,10 +10635,9 @@ def pow(x, factor=1.0, name=None): attrs['factor'] = factor out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='pow', - inputs=inputs, - outputs={'Out': out}, - attrs=attrs) + helper.append_op( + type='pow', inputs=inputs, outputs={'Out': out}, attrs=attrs + ) return out @@ -10077,13 +10677,12 @@ def stanh(x, scale_a=0.67, scale_b=1.7159, name=None): helper = LayerHelper('stanh', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='stanh', - inputs={'X': x}, - outputs={'Out': out}, - attrs={ - 'scale_a': scale_a, - 'scale_b': scale_b - }) + helper.append_op( + type='stanh', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'scale_a': scale_a, 'scale_b': scale_b}, + ) return out @@ -10116,18 +10715,18 @@ def hard_sigmoid(x, slope=0.2, offset=0.5, name=None): if _non_static_mode(): return _legacy_C_ops.hard_sigmoid(x, 'slope', slope, 'offset', offset) - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'hard_sigmoid') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], 'hard_sigmoid' + ) helper = LayerHelper('hard_sigmoid', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='hard_sigmoid', - inputs={'X': x}, - outputs={'Out': out}, - attrs={ - 'slope': slope, - 'offset': offset - }) + helper.append_op( + type='hard_sigmoid', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'slope': slope, 'offset': offset}, + ) return out @@ -10135,8 +10734,8 @@ def hard_sigmoid(x, slope=0.2, offset=0.5, name=None): def swish(x, beta=1.0, name=None): r""" :alias_main: paddle.nn.functional.swish - :alias: paddle.nn.functional.swish,paddle.nn.functional.activation.swish - :old_api: paddle.fluid.layers.swish + :alias: paddle.nn.functional.swish,paddle.nn.functional.activation.swish + :old_api: paddle.fluid.layers.swish Elementwise swish activation function. See `Searching for Activation Functions `_ for more details. @@ -10208,10 +10807,12 @@ def swish(x, beta=1.0, name=None): helper = LayerHelper('swish', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='swish', - inputs={'X': x}, - outputs={'Out': out}, - attrs={'slope': beta}) + helper.append_op( + type='swish', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'slope': beta}, + ) return out @@ -10272,52 +10873,57 @@ def prelu(x, mode, param_attr=None, data_format="NCHW", name=None): if mode == 'channel': true_data_format = [ - 'NC', 'NCL', 'NCHW', 'NCDHW', 'NLC', 'NHWC', 'NDHWC' + 'NC', + 'NCL', + 'NCHW', + 'NCDHW', + 'NLC', + 'NHWC', + 'NDHWC', ] if data_format not in true_data_format: raise ValueError( "data_format must be one of 'NC', 'NCL', 'NCHW', 'NCDHW', " - "'NLC', 'NHWC', 'NDHWC' but receive {}".format(data_format)) + "'NLC', 'NHWC', 'NDHWC' but receive {}".format(data_format) + ) data_format = 'NCHW' if data_format[1] == 'C' else 'NHWC' - assert len( - x.shape - ) >= 2, "The size of input shape should be equal or larger than 2 in prelu() when mode is 'channel'" - #NOTE(zhiqiu): The alpha_shape should be [1, channel] + [1] * len(x.shape[2:]). + assert ( + len(x.shape) >= 2 + ), "The size of input shape should be equal or larger than 2 in prelu() when mode is 'channel'" + # NOTE(zhiqiu): The alpha_shape should be [1, channel] + [1] * len(x.shape[2:]). # To be consistent with Prelu, it is simplified. - #NOTE(zhiqiu): Revert shape to [1, channel, 1, 1] for compatibility with saved model of old version. - #NOTE(GuoxiaWang): support NHWC data format + # NOTE(zhiqiu): Revert shape to [1, channel, 1, 1] for compatibility with saved model of old version. + # NOTE(GuoxiaWang): support NHWC data format if data_format == 'NHWC': alpha_shape = [1, 1, 1, x.shape[-1]] else: alpha_shape = [1, x.shape[1], 1, 1] elif mode == 'element': - assert len( - x.shape - ) >= 1, "The size of input shape should be equal or larger than 1 in prelu() when mode is 'element'" + assert ( + len(x.shape) >= 1 + ), "The size of input shape should be equal or larger than 1 in prelu() when mode is 'element'" alpha_shape = [1] + list(x.shape)[1:] dtype = helper.input_dtype(input_param_name='x') - alpha = helper.create_parameter(attr=helper.param_attr, - shape=alpha_shape, - dtype=dtype, - is_bias=False, - default_initializer=Constant(0.25)) + alpha = helper.create_parameter( + attr=helper.param_attr, + shape=alpha_shape, + dtype=dtype, + is_bias=False, + default_initializer=Constant(0.25), + ) if in_dygraph_mode(): return _C_ops.prelu(x, alpha, data_format, mode) out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type="prelu", - inputs={ - "X": x, - 'Alpha': alpha - }, - attrs={ - "mode": mode, - "data_format": data_format - }, - outputs={"Out": out}) + helper.append_op( + type="prelu", + inputs={"X": x, 'Alpha': alpha}, + attrs={"mode": mode, "data_format": data_format}, + outputs={"Out": out}, + ) return out @@ -10358,13 +10964,12 @@ def brelu(x, t_min=0.0, t_max=24.0, name=None): helper = LayerHelper('brelu', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='brelu', - inputs={'X': x}, - outputs={'Out': out}, - attrs={ - 't_min': t_min, - 't_max': t_max - }) + helper.append_op( + type='brelu', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'t_min': t_min, 't_max': t_max}, + ) return out @@ -10431,15 +11036,18 @@ def soft_relu(x, threshold=40.0, name=None): res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output]) print(res) # [array([[0.6931472, 1.3132616], [2.126928 , 3.0485873]], dtype=float32)] """ - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'soft_relu') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], 'soft_relu' + ) helper = LayerHelper('soft_relu', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='soft_relu', - inputs={'X': x}, - outputs={'Out': out}, - attrs={'threshold': threshold}) + helper.append_op( + type='soft_relu', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'threshold': threshold}, + ) return out @@ -10508,8 +11116,11 @@ def flatten(x, axis=1, name=None): # out shape is [16, 3] """ check_variable_and_dtype( - x, 'x', ['float32', 'float64', 'int8', 'int32', 'int64', 'uint8'], - 'flatten') + x, + 'x', + ['float32', 'float64', 'int8', 'int32', 'int64', 'uint8'], + 'flatten', + ) if _non_static_mode(): return _legacy_C_ops.flatten2(x, 'axis', axis)[0] @@ -10523,13 +11134,12 @@ def flatten(x, axis=1, name=None): out = helper.create_variable_for_type_inference(x.dtype) x_shape = helper.create_variable_for_type_inference(x.dtype) - helper.append_op(type='flatten2', - inputs={"X": x}, - outputs={ - 'Out': out, - 'XShape': x_shape - }, - attrs={"axis": axis}) + helper.append_op( + type='flatten2', + inputs={"X": x}, + outputs={'Out': out, 'XShape': x_shape}, + attrs={"axis": axis}, + ) return out @@ -10623,42 +11233,53 @@ def stack(x, axis=0, name=None): if not isinstance(x, list) and not isinstance(x, tuple): # NOTE:(zhiqiu) Only support Variable as input if the Variable is a LOD_TENSOR_ARRAY create by create_array, array_write, array_read, etc. # In that case, Variable is array of tensors indeed. - if isinstance(x, Variable) and x.desc.type( - ) == core.VarDesc.VarType.LOD_TENSOR_ARRAY: + if ( + isinstance(x, Variable) + and x.desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY + ): x = [x] else: raise TypeError( - "The type of '%s' in %s must be %s, but received %s" % - ('x', 'stack', 'list[Tensor], tuple[Tensor] or TensorArray', - type(x))) + "The type of '%s' in %s must be %s, but received %s" + % ( + 'x', + 'stack', + 'list[Tensor], tuple[Tensor] or TensorArray', + type(x), + ) + ) helper = LayerHelper('stack', **locals()) out = helper.create_variable_for_type_inference(x[0].dtype) if x[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY: - assert len(x) == 1, "If the elements of 'x' in stack are Variable(LoDTensorArray), " \ - "number of the elements must be 1, but received %s." % len(x) + assert len(x) == 1, ( + "If the elements of 'x' in stack are Variable(LoDTensorArray), " + "number of the elements must be 1, but received %s." % len(x) + ) out_index = helper.create_variable_for_type_inference(dtype="int32") for i in x: - check_variable_and_dtype(i, 'x', \ - ['float16', 'float32', 'float64', 'int32', 'int64'], 'stack') - - helper.append_op(type='tensor_array_to_tensor', - inputs={'X': x[0]}, - outputs={ - 'Out': [out], - 'OutIndex': [out_index] - }, - attrs={ - 'axis': axis, - 'use_stack': True - }) + check_variable_and_dtype( + i, + 'x', + ['float16', 'float32', 'float64', 'int32', 'int64'], + 'stack', + ) + + helper.append_op( + type='tensor_array_to_tensor', + inputs={'X': x[0]}, + outputs={'Out': [out], 'OutIndex': [out_index]}, + attrs={'axis': axis, 'use_stack': True}, + ) else: - helper.append_op(type='stack', - inputs={'X': x}, - outputs={'Y': out}, - attrs={'axis': axis}) + helper.append_op( + type='stack', + inputs={'X': x}, + outputs={'Y': out}, + attrs={'axis': axis}, + ) return out @@ -10722,21 +11343,12 @@ def filter_by_instag(ins, ins_tag, filter_tag, is_lod, out_val_if_empty=0): out = helper.create_variable_for_type_inference(dtype=ins.dtype) loss_weight = helper.create_variable_for_type_inference(dtype=np.float64) mmap = helper.create_variable_for_type_inference(dtype=ins_tag.dtype) - helper.append_op(type='filter_by_instag', - inputs={ - 'Ins': ins, - 'Ins_tag': ins_tag, - 'Filter_tag': filter_tag - }, - outputs={ - 'Out': out, - 'LossWeight': loss_weight, - 'IndexMap': mmap - }, - attrs={ - 'is_lod': is_lod, - 'out_val_if_empty': out_val_if_empty - }) + helper.append_op( + type='filter_by_instag', + inputs={'Ins': ins, 'Ins_tag': ins_tag, 'Filter_tag': filter_tag}, + outputs={'Out': out, 'LossWeight': loss_weight, 'IndexMap': mmap}, + attrs={'is_lod': is_lod, 'out_val_if_empty': out_val_if_empty}, + ) return [out, loss_weight] @@ -10744,8 +11356,8 @@ def filter_by_instag(ins, ins_tag, filter_tag, is_lod, out_val_if_empty=0): def unstack(x, axis=0, num=None): """ :alias_main: paddle.unstack - :alias: paddle.unstack,paddle.tensor.unstack,paddle.tensor.manipulation.unstack - :old_api: paddle.fluid.layers.unstack + :alias: paddle.unstack,paddle.tensor.unstack,paddle.tensor.manipulation.unstack + :old_api: paddle.fluid.layers.unstack **UnStack Layer** @@ -10794,13 +11406,12 @@ def unstack(x, axis=0, num=None): for _ in range(num): outs.append(helper.create_variable_for_type_inference(x.dtype)) - helper.append_op(type='unstack', - inputs={'X': [x]}, - outputs={'Y': outs}, - attrs={ - 'axis': axis, - 'num': num - }) + helper.append_op( + type='unstack', + inputs={'X': [x]}, + outputs={'Y': outs}, + attrs={'axis': axis, 'num': num}, + ) return outs @@ -10808,8 +11419,8 @@ def unstack(x, axis=0, num=None): def expand(x, expand_times, name=None): """ :alias_main: paddle.expand - :alias: paddle.expand,paddle.tensor.expand,paddle.tensor.manipulation.expand - :old_api: paddle.fluid.layers.expand + :alias: paddle.expand,paddle.tensor.expand,paddle.tensor.manipulation.expand + :old_api: paddle.fluid.layers.expand This operation tiles ``x`` multiple times according to the parameter ``expand_times``. The times number for each dimension of ``x`` is set by the parameter ``expand_times``. @@ -10883,12 +11494,16 @@ def expand(x, expand_times, name=None): inputs = {"X": [x]} attrs = {} check_variable_and_dtype( - x, 'x', ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], - 'expand') + x, + 'x', + ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], + 'expand', + ) check_type(expand_times, 'expand_times', (list, tuple, Variable), 'expand') if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == True: raise ValueError( - "expand op bool date type must set the stop_gradient to be False") + "expand op bool date type must set the stop_gradient to be False" + ) helper = LayerHelper('expand', input=x, **locals()) @@ -10899,8 +11514,9 @@ def expand(x, expand_times, name=None): attrs_expand_times.append(-1) else: attrs_expand_times.append(times) - assert times > 0, ( - "Each element given in expand_times must not be negative.") + assert ( + times > 0 + ), "Each element given in expand_times must not be negative." return attrs_expand_times if isinstance(expand_times, Variable): @@ -10910,14 +11526,14 @@ def expand(x, expand_times, name=None): attrs['expand_times'] = get_attr_expand_times(expand_times) if utils._contain_var(expand_times): inputs['expand_times_tensor'] = utils._convert_to_tensor_list( - expand_times) + expand_times + ) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='expand', - inputs=inputs, - outputs={'Out': out}, - attrs=attrs) + helper.append_op( + type='expand', inputs=inputs, outputs={'Out': out}, attrs=attrs + ) return out @@ -10925,8 +11541,8 @@ def expand(x, expand_times, name=None): def expand_as(x, target_tensor, name=None): """ :alias_main: paddle.expand_as - :alias: paddle.expand_as,paddle.tensor.expand_as,paddle.tensor.manipulation.expand_as - :old_api: paddle.fluid.layers.expand_as + :alias: paddle.expand_as,paddle.tensor.expand_as,paddle.tensor.manipulation.expand_as + :old_api: paddle.fluid.layers.expand_as expand_as operator tiles to the input by given expand tensor. You should set expand tensor for each dimension by providing tensor 'target_tensor'. The rank of X @@ -10992,12 +11608,15 @@ def expand_as(x, target_tensor, name=None): if _non_static_mode(): return _legacy_C_ops.expand_as(x, target_tensor) - check_variable_and_dtype(x, 'x', - ['float32', 'float64', 'int32', 'int64', 'bool'], - 'expand_as') - check_variable_and_dtype(target_tensor, 'target_tensor', - ['float32', 'float64', 'int32', 'int64', 'bool'], - 'expand_as') + check_variable_and_dtype( + x, 'x', ['float32', 'float64', 'int32', 'int64', 'bool'], 'expand_as' + ) + check_variable_and_dtype( + target_tensor, + 'target_tensor', + ['float32', 'float64', 'int32', 'int64', 'bool'], + 'expand_as', + ) helper = LayerHelper('expand_as', input=x, **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) @@ -11011,14 +11630,16 @@ from paddle.fluid.framework import convert_np_dtype_to_dtype_ @deprecated(since='1.8.0', update_to="paddle.uniform") @templatedoc() -def uniform_random_batch_size_like(input, - shape, - dtype='float32', - input_dim_idx=0, - output_dim_idx=0, - min=-1.0, - max=1.0, - seed=0): +def uniform_random_batch_size_like( + input, + shape, + dtype='float32', + input_dim_idx=0, + output_dim_idx=0, + min=-1.0, + max=1.0, + seed=0, +): """ This OP initializes a variable with random values sampled from a uniform distribution in the range [min, max). The input_dim_idx used to get the input dimension value which will be used to resize the output dimension. @@ -11081,39 +11702,46 @@ def uniform_random_batch_size_like(input, """ - check_variable_and_dtype(input, 'Input', ("float32", 'float64', "uint16"), - 'uniform_random_batch_size_like') + check_variable_and_dtype( + input, + 'Input', + ("float32", 'float64', "uint16"), + 'uniform_random_batch_size_like', + ) check_type(shape, 'shape', (list, tuple), 'uniform_random_batch_size_like') - check_dtype(dtype, 'dtype', ('float32', 'float64', "uint16"), - 'uniform_random_batch_size_like') + check_dtype( + dtype, + 'dtype', + ('float32', 'float64', "uint16"), + 'uniform_random_batch_size_like', + ) helper = LayerHelper('uniform_random_batch_size_like', **locals()) out = helper.create_variable_for_type_inference(dtype) c_dtype = convert_np_dtype_to_dtype_(dtype) - helper.append_op(type='uniform_random_batch_size_like', - inputs={'Input': input}, - outputs={'Out': out}, - attrs={ - 'shape': shape, - 'input_dim_idx': input_dim_idx, - 'output_dim_idx': output_dim_idx, - 'min': min, - 'max': max, - 'seed': seed, - 'dtype': c_dtype - }) + helper.append_op( + type='uniform_random_batch_size_like', + inputs={'Input': input}, + outputs={'Out': out}, + attrs={ + 'shape': shape, + 'input_dim_idx': input_dim_idx, + 'output_dim_idx': output_dim_idx, + 'min': min, + 'max': max, + 'seed': seed, + 'dtype': c_dtype, + }, + ) return out @deprecated(since="2.0.0", update_to="paddle.normal") @templatedoc() -def gaussian_random(shape, - mean=0.0, - std=1.0, - seed=0, - dtype='float32', - name=None): +def gaussian_random( + shape, mean=0.0, std=1.0, seed=0, dtype='float32', name=None +): """ This OP returns a Tensor filled with random values sampled from a Gaussian distribution, with ``shape`` and ``dtype``. @@ -11212,15 +11840,24 @@ def gaussian_random(shape, if in_dygraph_mode(): shape = utils.convert_shape_to_list(shape) place = _current_expected_place() - return _C_ops.gaussian_random(shape, float(mean), float(std), seed, - dtype, place) + return _C_ops.gaussian_random( + shape, float(mean), float(std), seed, dtype, place + ) if _in_legacy_dygraph(): shape = utils.convert_shape_to_list(shape) - return _legacy_C_ops.gaussian_random('shape', shape, - 'mean', float(mean), 'std', - float(std), 'seed', seed, 'dtype', - dtype) + return _legacy_C_ops.gaussian_random( + 'shape', + shape, + 'mean', + float(mean), + 'std', + float(std), + 'seed', + seed, + 'dtype', + dtype, + ) check_type(shape, 'shape', (list, tuple, Variable), 'gaussian_random/randn') check_dtype(dtype, 'dtype', ['float32', 'float64'], 'gaussian_random/randn') @@ -11231,19 +11868,17 @@ def gaussian_random(shape, 'std': std, 'seed': seed, 'dtype': dtype, - 'use_mkldnn': False + 'use_mkldnn': False, } - utils.get_shape_tensor_inputs(inputs=inputs, - attrs=attrs, - shape=shape, - op_type='gaussian_random/randn') + utils.get_shape_tensor_inputs( + inputs=inputs, attrs=attrs, shape=shape, op_type='gaussian_random/randn' + ) helper = LayerHelper('gaussian_random', **locals()) out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='gaussian_random', - inputs=inputs, - outputs={'Out': out}, - attrs=attrs) + helper.append_op( + type='gaussian_random', inputs=inputs, outputs={'Out': out}, attrs=attrs + ) return out @@ -11277,28 +11912,28 @@ def sampling_id(x, min=0.0, max=1.0, seed=0, dtype='float32'): helper = LayerHelper('sampling_id', **locals()) out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='sampling_id', - inputs={'X': x}, - outputs={'Out': out}, - attrs={ - 'min': min, - 'max': max, - 'seed': seed - }) + helper.append_op( + type='sampling_id', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'min': min, 'max': max, 'seed': seed}, + ) return out @deprecated(since='1.8.0', update_to="paddle.normal") @templatedoc() -def gaussian_random_batch_size_like(input, - shape, - input_dim_idx=0, - output_dim_idx=0, - mean=0.0, - std=1.0, - seed=0, - dtype='float32'): +def gaussian_random_batch_size_like( + input, + shape, + input_dim_idx=0, + output_dim_idx=0, + mean=0.0, + std=1.0, + seed=0, + dtype='float32', +): """ ${comment} @@ -11329,26 +11964,40 @@ def gaussian_random_batch_size_like(input, """ helper = LayerHelper('gaussian_random_batch_size_like', **locals()) - check_type(input, 'input', (Variable), - 'fluid.layers.gaussian_random_batch_size_like') - check_type(shape, 'shape', (list, tuple), - 'fluid.layers.gaussian_random_batch_size_like') - check_dtype(dtype, 'dtype', ['float16', 'float32', 'int'], - 'fluid.layers.gaussian_random_batch_size_like') + check_type( + input, + 'input', + (Variable), + 'fluid.layers.gaussian_random_batch_size_like', + ) + check_type( + shape, + 'shape', + (list, tuple), + 'fluid.layers.gaussian_random_batch_size_like', + ) + check_dtype( + dtype, + 'dtype', + ['float16', 'float32', 'int'], + 'fluid.layers.gaussian_random_batch_size_like', + ) out = helper.create_variable_for_type_inference(dtype) c_dtype = convert_np_dtype_to_dtype_(dtype) - helper.append_op(type='gaussian_random_batch_size_like', - inputs={'Input': input}, - outputs={'Out': out}, - attrs={ - 'shape': shape, - 'input_dim_idx': input_dim_idx, - 'output_dim_idx': output_dim_idx, - 'mean': mean, - 'std': std, - 'seed': seed, - 'dtype': c_dtype - }) + helper.append_op( + type='gaussian_random_batch_size_like', + inputs={'Input': input}, + outputs={'Out': out}, + attrs={ + 'shape': shape, + 'input_dim_idx': input_dim_idx, + 'output_dim_idx': output_dim_idx, + 'mean': mean, + 'std': std, + 'seed': seed, + 'dtype': c_dtype, + }, + ) return out @@ -11507,7 +12156,8 @@ def slice(input, axes, starts, ends): axes = list(axes) if len(axes) == 0: raise ValueError( - "Input axes should not be an empty list/tuple.") + "Input axes should not be an empty list/tuple." + ) for i in range(len(axes)): if axes[i] < 0: axes[i] = max(0, axes[i] + len(input.shape)) @@ -11516,8 +12166,10 @@ def slice(input, axes, starts, ends): else: raise ValueError( - "Input axes must be a python list or tuple, but reveived {}". - format(type(axes))) + "Input axes must be a python list or tuple, but reveived {}".format( + type(axes) + ) + ) infer_flags = list(1 for i in range(len(axes))) @@ -11525,7 +12177,8 @@ def slice(input, axes, starts, ends): if isinstance(starts, (list, tuple)): starts = [ item.numpy().item(0) - if isinstance(item, tmp_tensor_type) else item + if isinstance(item, tmp_tensor_type) + else item for item in starts ] elif isinstance(starts, tmp_tensor_type): @@ -11535,7 +12188,9 @@ def slice(input, axes, starts, ends): if isinstance(ends, (list, tuple)): ends = [ item.numpy().item(0) - if isinstance(item, tmp_tensor_type) else item for item in ends + if isinstance(item, tmp_tensor_type) + else item + for item in ends ] attrs += ('ends', ends) elif isinstance(ends, tmp_tensor_type): @@ -11553,7 +12208,8 @@ def slice(input, axes, starts, ends): axes = list(axes) if len(axes) == 0: raise ValueError( - "Input axes should not be an empty list/tuple.") + "Input axes should not be an empty list/tuple." + ) for i in range(len(axes)): if axes[i] < 0: axes[i] = max(0, axes[i] + len(input.shape)) @@ -11562,8 +12218,10 @@ def slice(input, axes, starts, ends): else: raise ValueError( - "Input axes must be a python list or tuple, but reveived {}" - .format(type(axes))) + "Input axes must be a python list or tuple, but reveived {}".format( + type(axes) + ) + ) infer_flags = list(1 for i in range(len(axes))) @@ -11572,7 +12230,8 @@ def slice(input, axes, starts, ends): if isinstance(starts, (list, tuple)): starts = [ item.numpy().item(0) - if isinstance(item, tmp_tensor_type) else item + if isinstance(item, tmp_tensor_type) + else item for item in starts ] attrs += ('starts', starts) @@ -11584,7 +12243,8 @@ def slice(input, axes, starts, ends): if isinstance(ends, (list, tuple)): ends = [ item.numpy().item(0) - if isinstance(item, tmp_tensor_type) else item + if isinstance(item, tmp_tensor_type) + else item for item in ends ] attrs += ('ends', ends) @@ -11593,16 +12253,27 @@ def slice(input, axes, starts, ends): ends_tensor.stop_gradient = True infer_flags = list(-1 for i in range(len(axes))) - return _legacy_C_ops.slice(input, starts_tensor, ends_tensor, None, - None, 'axes', axes, 'infer_flags', - infer_flags, *attrs) + return _legacy_C_ops.slice( + input, + starts_tensor, + ends_tensor, + None, + None, + 'axes', + axes, + 'infer_flags', + infer_flags, + *attrs, + ) if not isinstance(starts, (list, tuple, Variable)): raise ValueError( - "Input starts must be an Variable, python list or tuple.") + "Input starts must be an Variable, python list or tuple." + ) if not isinstance(ends, (list, tuple, Variable)): raise ValueError( - "Input ends must be an Variable, python list or tuple.") + "Input ends must be an Variable, python list or tuple." + ) helper = LayerHelper('slice', **locals()) @@ -11649,11 +12320,11 @@ def slice(input, axes, starts, ends): # infer_flags attrs['infer_flags'] = infer_flags out = helper.create_variable_for_type_inference( - dtype=helper.input_dtype('input')) - helper.append_op(type='slice', - inputs=inputs, - attrs=attrs, - outputs={'Out': out}) + dtype=helper.input_dtype('input') + ) + helper.append_op( + type='slice', inputs=inputs, attrs=attrs, outputs={'Out': out} + ) return out @@ -11662,8 +12333,8 @@ def slice(input, axes, starts, ends): def strided_slice(input, axes, starts, ends, strides): """ :alias_main: paddle.strided_slice - :alias: paddle.strided_slice,paddle.tensor.strided_slice,paddle.tensor.manipulation.strided_slice - :old_api: paddle.fluid.layers.strided_slice + :alias: paddle.strided_slice,paddle.tensor.strided_slice,paddle.tensor.manipulation.strided_slice + :old_api: paddle.fluid.layers.strided_slice This operator produces a slice of ``input`` along multiple axes. Similar to numpy: https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html @@ -11764,9 +12435,12 @@ def strided_slice(input, axes, starts, ends, strides): helper = LayerHelper('strided_slice', **locals()) - check_variable_and_dtype(input, 'input', - ['bool', 'float32', 'float64', 'int32', 'int64'], - 'strided_slice') + check_variable_and_dtype( + input, + 'input', + ['bool', 'float32', 'float64', 'int32', 'int64'], + 'strided_slice', + ) check_type(axes, 'axes', (list, tuple), 'strided_slice') check_type(starts, 'starts', (list, tuple, Variable), 'strided_slice') check_type(ends, 'ends', (list, tuple, Variable), 'strided_slice') @@ -11774,8 +12448,9 @@ def strided_slice(input, axes, starts, ends, strides): def check_list_elements_dtype(list_input, input_name): if isinstance(list_input, Variable): - check_dtype(list_input.dtype, input_name, ['int32'], - 'strided_slice') + check_dtype( + list_input.dtype, input_name, ['int32'], 'strided_slice' + ) else: for i, var in enumerate(list_input): var_name = input_name + '[' + str(i) + ']' @@ -11794,7 +12469,7 @@ def strided_slice(input, axes, starts, ends, strides): dim.stop_gradient = True new_list_tensor.append(dim) else: - assert (isinstance(dim, int)) + assert isinstance(dim, int) temp_out = helper.create_variable_for_type_inference('int32') fill_constant([1], 'int32', dim, force_cpu=True, out=temp_out) new_list_tensor.append(temp_out) @@ -11811,7 +12486,7 @@ def strided_slice(input, axes, starts, ends, strides): 'starts': starts, 'ends': ends, 'strides': strides, - 'infer_flags': infer_flags + 'infer_flags': infer_flags, } else: # starts @@ -11866,11 +12541,11 @@ def strided_slice(input, axes, starts, ends, strides): attrs['strides'] = strides attrs['infer_flags'] = infer_flags out = helper.create_variable_for_type_inference( - dtype=helper.input_dtype('input')) - helper.append_op(type='strided_slice', - inputs=inputs, - attrs=attrs, - outputs={'Out': out}) + dtype=helper.input_dtype('input') + ) + helper.append_op( + type='strided_slice', inputs=inputs, attrs=attrs, outputs={'Out': out} + ) return out @@ -11878,8 +12553,8 @@ def strided_slice(input, axes, starts, ends, strides): def shape(input): """ :alias_main: paddle.shape - :alias: paddle.shape,paddle.tensor.shape,paddle.tensor.attribute.shape - :old_api: paddle.fluid.layers.shape + :alias: paddle.shape,paddle.tensor.shape,paddle.tensor.attribute.shape + :old_api: paddle.fluid.layers.shape **Shape Layer** @@ -11937,16 +12612,29 @@ def shape(input): out.stop_gradient = True return out - check_variable_and_dtype(input, 'input', [ - 'bool', 'float16', 'float32', 'float64', 'int32', 'int64', 'complex64', - 'complex128' - ], 'shape') + check_variable_and_dtype( + input, + 'input', + [ + 'bool', + 'float16', + 'float32', + 'float64', + 'int32', + 'int64', + 'complex64', + 'complex128', + ], + 'shape', + ) helper = LayerHelper('shape', **locals()) out = helper.create_variable_for_type_inference(dtype='int32') - helper.append_op(type='shape', - inputs={'Input': input}, - outputs={'Out': out}, - stop_gradient=True) + helper.append_op( + type='shape', + inputs={'Input': input}, + outputs={'Out': out}, + stop_gradient=True, + ) return out @@ -12014,8 +12702,11 @@ def size(input): return _legacy_C_ops.size(input) check_variable_and_dtype( - input, 'input', - ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], "size") + input, + 'input', + ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], + "size", + ) helper = LayerHelper('size', **locals()) out = helper.create_variable_for_type_inference(dtype='int64') helper.append_op(type='size', inputs={'Input': input}, outputs={'Out': out}) @@ -12031,27 +12722,29 @@ def _elementwise_op(helper): assert x is not None, 'x cannot be None in {}'.format(op_type) assert y is not None, 'y cannot be None in {}'.format(op_type) check_variable_and_dtype( - x, 'x', ['float16', 'uint16', 'float32', 'float64', 'int32', 'int64'], - op_type) + x, + 'x', + ['float16', 'uint16', 'float32', 'float64', 'int32', 'int64'], + op_type, + ) check_variable_and_dtype( - y, 'y', ['float16', 'uint16', 'float32', 'float64', 'int32', 'int64'], - op_type) + y, + 'y', + ['float16', 'uint16', 'float32', 'float64', 'int32', 'int64'], + op_type, + ) axis = helper.kwargs.get('axis', -1) use_mkldnn = helper.kwargs.get('use_mkldnn', False) name = helper.kwargs.get('name', None) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type=op_type, - inputs={ - 'X': x, - 'Y': y - }, - outputs={'Out': out}, - attrs={ - 'axis': axis, - 'use_mkldnn': use_mkldnn - }) + helper.append_op( + type=op_type, + inputs={'X': x, 'Y': y}, + outputs={'Out': out}, + attrs={'axis': axis, 'use_mkldnn': use_mkldnn}, + ) return helper.append_activation(out) @@ -12107,15 +12800,33 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): return dygraph_utils._append_activation_in_dygraph(out) if _non_static_mode(): _scale = scale.numpy().item(0) if isinstance(scale, Variable) else scale - out = _legacy_C_ops.scale(x, 'scale', float(_scale), 'bias', - float(bias), 'bias_after_scale', - bias_after_scale) + out = _legacy_C_ops.scale( + x, + 'scale', + float(_scale), + 'bias', + float(bias), + 'bias_after_scale', + bias_after_scale, + ) return dygraph_utils._append_activation_in_dygraph(out) - check_variable_and_dtype(x, "x", [ - 'float16', 'uint16', 'float32', 'float64', 'int8', 'int16', 'int32', - 'int64', 'uint8' - ], "scale") + check_variable_and_dtype( + x, + "x", + [ + 'float16', + 'uint16', + 'float32', + 'float64', + 'int8', + 'int16', + 'int32', + 'int64', + 'uint8', + ], + "scale", + ) inputs = {'X': [x]} attrs = { 'bias': float(bias), @@ -12128,91 +12839,90 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): helper = LayerHelper('scale', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='scale', - inputs=inputs, - outputs={'Out': out}, - attrs=attrs) + helper.append_op( + type='scale', inputs=inputs, outputs={'Out': out}, attrs=attrs + ) return helper.append_activation(out) def elementwise_add(x, y, axis=-1, act=None, name=None): """ -Examples: + Examples: - .. code-block:: python + .. code-block:: python - import paddle.fluid as fluid - import numpy as np - import paddle - def gen_data(): - return { - "x": np.array([2, 3, 4]).astype('float32'), - "y": np.array([1, 5, 2]).astype('float32') - } - paddle.enable_static() - x = fluid.data(name="x", shape=[3], dtype='float32') - y = fluid.data(name="y", shape=[3], dtype='float32') - z = fluid.layers.elementwise_add(x, y) - # z = x + y + import paddle.fluid as fluid + import numpy as np + import paddle + def gen_data(): + return { + "x": np.array([2, 3, 4]).astype('float32'), + "y": np.array([1, 5, 2]).astype('float32') + } + paddle.enable_static() + x = fluid.data(name="x", shape=[3], dtype='float32') + y = fluid.data(name="y", shape=[3], dtype='float32') + z = fluid.layers.elementwise_add(x, y) + # z = x + y - place = fluid.CPUPlace() - exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) - print(z_value) # [3., 8., 6.] + print(z_value) # [3., 8., 6.] - .. code-block:: python + .. code-block:: python - import paddle.fluid as fluid - import numpy as np - import paddle + import paddle.fluid as fluid + import numpy as np + import paddle - def gen_data(): - return { - "x": np.ones((2, 3, 4, 5)).astype('float32'), - "y": np.zeros((3, 4)).astype('float32') - } - paddle.enable_static() - x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32') - y = fluid.data(name="y", shape=[3,4], dtype='float32') - z = fluid.layers.elementwise_add(x, y, axis=1) - # z = x + y + def gen_data(): + return { + "x": np.ones((2, 3, 4, 5)).astype('float32'), + "y": np.zeros((3, 4)).astype('float32') + } + paddle.enable_static() + x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32') + y = fluid.data(name="y", shape=[3,4], dtype='float32') + z = fluid.layers.elementwise_add(x, y, axis=1) + # z = x + y - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = fluid.CPUPlace() + exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) - print(z_value) # z.shape=[2,3,4,5] + print(z_value) # z.shape=[2,3,4,5] - .. code-block:: python + .. code-block:: python - import paddle.fluid as fluid - import numpy as np - import paddle + import paddle.fluid as fluid + import numpy as np + import paddle - def gen_data(): - return { - "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'), - "y": np.random.randint(1, 5, size=[5]).astype('float32') - } - paddle.enable_static() - x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32') - y = fluid.data(name="y", shape=[5], dtype='float32') - z = fluid.layers.elementwise_add(x, y, axis=3) - # z = x + y + def gen_data(): + return { + "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'), + "y": np.random.randint(1, 5, size=[5]).astype('float32') + } + paddle.enable_static() + x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32') + y = fluid.data(name="y", shape=[5], dtype='float32') + z = fluid.layers.elementwise_add(x, y, axis=3) + # z = x + y - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = fluid.CPUPlace() + exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) - print(z_value) # z.shape=[2,3,4,5] + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + print(z_value) # z.shape=[2,3,4,5] """ if _non_static_mode(): @@ -12222,7 +12932,8 @@ Examples: axis=axis, act=act, op_name='elementwise_add', - use_mkldnn=_global_flags()["FLAGS_use_mkldnn"]) + use_mkldnn=_global_flags()["FLAGS_use_mkldnn"], + ) return _elementwise_op(LayerHelper('elementwise_add', **locals())) @@ -12231,90 +12942,88 @@ Examples: def elementwise_div(x, y, axis=-1, act=None, name=None): """ -Examples: + Examples: - .. code-block:: python + .. code-block:: python - import paddle.fluid as fluid - import numpy as np - import paddle + import paddle.fluid as fluid + import numpy as np + import paddle - def gen_data(): - return { - "x": np.array([2, 3, 4]).astype('float32'), - "y": np.array([1, 5, 2]).astype('float32') - } - paddle.enable_static() - x = fluid.data(name="x", shape=[3], dtype='float32') - y = fluid.data(name="y", shape=[3], dtype='float32') - z = fluid.layers.elementwise_div(x, y) - # z = x / y + def gen_data(): + return { + "x": np.array([2, 3, 4]).astype('float32'), + "y": np.array([1, 5, 2]).astype('float32') + } + paddle.enable_static() + x = fluid.data(name="x", shape=[3], dtype='float32') + y = fluid.data(name="y", shape=[3], dtype='float32') + z = fluid.layers.elementwise_div(x, y) + # z = x / y - place = fluid.CPUPlace() - exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) - print(z_value) # [2., 0.6, 2.] + print(z_value) # [2., 0.6, 2.] - .. code-block:: python + .. code-block:: python - import paddle.fluid as fluid - import numpy as np - import paddle + import paddle.fluid as fluid + import numpy as np + import paddle - def gen_data(): - return { - "x": np.ones((2, 3, 4, 5)).astype('float32'), - "y": np.zeros((3, 4)).astype('float32') - } - paddle.enable_static() - x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32') - y = fluid.data(name="y", shape=[3,4], dtype='float32') - z = fluid.layers.elementwise_div(x, y, axis=1) - # z = x / y + def gen_data(): + return { + "x": np.ones((2, 3, 4, 5)).astype('float32'), + "y": np.zeros((3, 4)).astype('float32') + } + paddle.enable_static() + x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32') + y = fluid.data(name="y", shape=[3,4], dtype='float32') + z = fluid.layers.elementwise_div(x, y, axis=1) + # z = x / y - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = fluid.CPUPlace() + exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) - print(z_value) # z.shape=[2,3,4,5] + print(z_value) # z.shape=[2,3,4,5] - .. code-block:: python + .. code-block:: python - import paddle.fluid as fluid - import numpy as np - import paddle + import paddle.fluid as fluid + import numpy as np + import paddle - def gen_data(): - return { - "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'), - "y": np.random.randint(1, 5, size=[5]).astype('float32') - } - paddle.enable_static() - x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32') - y = fluid.data(name="y", shape=[5], dtype='float32') - z = fluid.layers.elementwise_div(x, y, axis=3) - # z = x / y + def gen_data(): + return { + "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'), + "y": np.random.randint(1, 5, size=[5]).astype('float32') + } + paddle.enable_static() + x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32') + y = fluid.data(name="y", shape=[5], dtype='float32') + z = fluid.layers.elementwise_div(x, y, axis=3) + # z = x / y - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = fluid.CPUPlace() + exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) - print(z_value) # z.shape=[2,3,4,5] + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + print(z_value) # z.shape=[2,3,4,5] """ if _non_static_mode(): - return _elementwise_op_in_dygraph(x, - y, - axis=axis, - act=act, - op_name='elementwise_div') + return _elementwise_op_in_dygraph( + x, y, axis=axis, act=act, op_name='elementwise_div' + ) return _elementwise_op(LayerHelper('elementwise_div', **locals())) @@ -12322,90 +13031,88 @@ Examples: def elementwise_sub(x, y, axis=-1, act=None, name=None): """ -Examples: + Examples: - .. code-block:: python + .. code-block:: python - import paddle.fluid as fluid - import numpy as np - import paddle + import paddle.fluid as fluid + import numpy as np + import paddle - def gen_data(): - return { - "x": np.array([2, 3, 4]).astype('float32'), - "y": np.array([1, 5, 2]).astype('float32') - } - paddle.enable_static() - x = fluid.data(name="x", shape=[3], dtype='float32') - y = fluid.data(name="y", shape=[3], dtype='float32') - z = fluid.layers.elementwise_sub(x, y) - # z = x - y + def gen_data(): + return { + "x": np.array([2, 3, 4]).astype('float32'), + "y": np.array([1, 5, 2]).astype('float32') + } + paddle.enable_static() + x = fluid.data(name="x", shape=[3], dtype='float32') + y = fluid.data(name="y", shape=[3], dtype='float32') + z = fluid.layers.elementwise_sub(x, y) + # z = x - y - place = fluid.CPUPlace() - exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) - print(z_value) # [1., -2., 2.] + print(z_value) # [1., -2., 2.] - .. code-block:: python + .. code-block:: python - import paddle.fluid as fluid - import numpy as np - import paddle + import paddle.fluid as fluid + import numpy as np + import paddle - def gen_data(): - return { - "x": np.ones((2, 3, 4, 5)).astype('float32'), - "y": np.zeros((3, 4)).astype('float32') - } - paddle.enable_static() - x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32') - y = fluid.data(name="y", shape=[3,4], dtype='float32') - z = fluid.layers.elementwise_sub(x, y, axis=1) - # z = x - y + def gen_data(): + return { + "x": np.ones((2, 3, 4, 5)).astype('float32'), + "y": np.zeros((3, 4)).astype('float32') + } + paddle.enable_static() + x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32') + y = fluid.data(name="y", shape=[3,4], dtype='float32') + z = fluid.layers.elementwise_sub(x, y, axis=1) + # z = x - y - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = fluid.CPUPlace() + exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) - print(z_value) # z.shape=[2,3,4,5] + print(z_value) # z.shape=[2,3,4,5] - .. code-block:: python + .. code-block:: python - import paddle.fluid as fluid - import numpy as np - import paddle + import paddle.fluid as fluid + import numpy as np + import paddle - def gen_data(): - return { - "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'), - "y": np.random.randint(1, 5, size=[5]).astype('float32') - } - paddle.enable_static() - x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32') - y = fluid.data(name="y", shape=[5], dtype='float32') - z = fluid.layers.elementwise_sub(x, y, axis=3) - # z = x - y + def gen_data(): + return { + "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'), + "y": np.random.randint(1, 5, size=[5]).astype('float32') + } + paddle.enable_static() + x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32') + y = fluid.data(name="y", shape=[5], dtype='float32') + z = fluid.layers.elementwise_sub(x, y, axis=3) + # z = x - y - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = fluid.CPUPlace() + exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) - print(z_value) # z.shape=[2,3,4,5] + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + print(z_value) # z.shape=[2,3,4,5] """ if _non_static_mode(): - return _elementwise_op_in_dygraph(x, - y, - axis=axis, - act=act, - op_name='elementwise_sub') + return _elementwise_op_in_dygraph( + x, y, axis=axis, act=act, op_name='elementwise_sub' + ) return _elementwise_op(LayerHelper('elementwise_sub', **locals())) @@ -12414,222 +13121,216 @@ Examples: def elementwise_mul(x, y, axis=-1, act=None, name=None): """ -Examples: + Examples: - .. code-block:: python + .. code-block:: python - import paddle.fluid as fluid - import numpy as np - import paddle + import paddle.fluid as fluid + import numpy as np + import paddle - def gen_data(): - return { - "x": np.array([2, 3, 4]).astype('float32'), - "y": np.array([1, 5, 2]).astype('float32') - } - paddle.enable_static() - x = fluid.data(name="x", shape=[3], dtype='float32') - y = fluid.data(name="y", shape=[3], dtype='float32') - z = fluid.layers.elementwise_mul(x, y) - # z = x * y + def gen_data(): + return { + "x": np.array([2, 3, 4]).astype('float32'), + "y": np.array([1, 5, 2]).astype('float32') + } + paddle.enable_static() + x = fluid.data(name="x", shape=[3], dtype='float32') + y = fluid.data(name="y", shape=[3], dtype='float32') + z = fluid.layers.elementwise_mul(x, y) + # z = x * y - place = fluid.CPUPlace() - exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) - print(z_value) # [2., 15., 8.] + print(z_value) # [2., 15., 8.] - .. code-block:: python + .. code-block:: python - import paddle.fluid as fluid - import numpy as np - import paddle + import paddle.fluid as fluid + import numpy as np + import paddle - def gen_data(): - return { - "x": np.ones((2, 3, 4, 5)).astype('float32'), - "y": np.zeros((3, 4)).astype('float32') - } - paddle.enable_static() - x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32') - y = fluid.data(name="y", shape=[3,4], dtype='float32') - z = fluid.layers.elementwise_mul(x, y, axis=1) - # z = x * y + def gen_data(): + return { + "x": np.ones((2, 3, 4, 5)).astype('float32'), + "y": np.zeros((3, 4)).astype('float32') + } + paddle.enable_static() + x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32') + y = fluid.data(name="y", shape=[3,4], dtype='float32') + z = fluid.layers.elementwise_mul(x, y, axis=1) + # z = x * y - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = fluid.CPUPlace() + exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) - print(z_value) # z.shape=[2,3,4,5] + print(z_value) # z.shape=[2,3,4,5] - .. code-block:: python + .. code-block:: python - import paddle.fluid as fluid - import numpy as np - import paddle + import paddle.fluid as fluid + import numpy as np + import paddle - def gen_data(): - return { - "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'), - "y": np.random.randint(1, 5, size=[5]).astype('float32') - } - paddle.enable_static() - x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32') - y = fluid.data(name="y", shape=[5], dtype='float32') - z = fluid.layers.elementwise_mul(x, y, axis=3) - # z = x * y + def gen_data(): + return { + "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'), + "y": np.random.randint(1, 5, size=[5]).astype('float32') + } + paddle.enable_static() + x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32') + y = fluid.data(name="y", shape=[5], dtype='float32') + z = fluid.layers.elementwise_mul(x, y, axis=3) + # z = x * y - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = fluid.CPUPlace() + exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) - print(z_value) # z.shape=[2,3,4,5] + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) + print(z_value) # z.shape=[2,3,4,5] """ if _non_static_mode(): - return _elementwise_op_in_dygraph(x, - y, - axis=axis, - act=act, - op_name='elementwise_mul') + return _elementwise_op_in_dygraph( + x, y, axis=axis, act=act, op_name='elementwise_mul' + ) return _elementwise_op(LayerHelper('elementwise_mul', **locals())) def elementwise_max(x, y, axis=-1, act=None, name=None): """ - :alias_main: paddle.elementwise_max - :alias: paddle.elementwise_max,paddle.tensor.elementwise_max,paddle.tensor.math.elementwise_max - :old_api: paddle.fluid.layers.elementwise_max + :alias_main: paddle.elementwise_max + :alias: paddle.elementwise_max,paddle.tensor.elementwise_max,paddle.tensor.math.elementwise_max + :old_api: paddle.fluid.layers.elementwise_max -Examples: + Examples: - .. code-block:: python + .. code-block:: python - import paddle.fluid as fluid - import numpy as np - import paddle + import paddle.fluid as fluid + import numpy as np + import paddle - def gen_data(): - return { - "x": np.array([2, 3, 4]).astype('float32'), - "y": np.array([1, 5, 2]).astype('float32') - } - paddle.enable_static() - x = fluid.data(name="x", shape=[3], dtype='float32') - y = fluid.data(name="y", shape=[3], dtype='float32') - z = fluid.layers.elementwise_max(x, y) + def gen_data(): + return { + "x": np.array([2, 3, 4]).astype('float32'), + "y": np.array([1, 5, 2]).astype('float32') + } + paddle.enable_static() + x = fluid.data(name="x", shape=[3], dtype='float32') + y = fluid.data(name="y", shape=[3], dtype='float32') + z = fluid.layers.elementwise_max(x, y) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) - print(z_value) #[2, 5, 4] + print(z_value) #[2, 5, 4] - .. code-block:: python + .. code-block:: python - import paddle.fluid as fluid - import numpy as np - import paddle + import paddle.fluid as fluid + import numpy as np + import paddle - def gen_data(): - return { - "x": np.ones((2, 3, 4, 5)).astype('float32'), - "y": np.zeros((3, 4)).astype('float32') - } - paddle.enable_static() - x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32') - y = fluid.data(name="y", shape=[3,4], dtype='float32') - z = fluid.layers.elementwise_max(x, y, axis=1) + def gen_data(): + return { + "x": np.ones((2, 3, 4, 5)).astype('float32'), + "y": np.zeros((3, 4)).astype('float32') + } + paddle.enable_static() + x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32') + y = fluid.data(name="y", shape=[3,4], dtype='float32') + z = fluid.layers.elementwise_max(x, y, axis=1) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = fluid.CPUPlace() + exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) - print(z_value)#[[[[1., 1., 1., 1., 1.] .... [1., 1., 1., 1., 1.]]]] + print(z_value)#[[[[1., 1., 1., 1., 1.] .... [1., 1., 1., 1., 1.]]]] """ if _non_static_mode(): - return _elementwise_op_in_dygraph(x, - y, - axis=axis, - act=act, - op_name='elementwise_max') + return _elementwise_op_in_dygraph( + x, y, axis=axis, act=act, op_name='elementwise_max' + ) return _elementwise_op(LayerHelper('elementwise_max', **locals())) def elementwise_min(x, y, axis=-1, act=None, name=None): """ - :alias_main: paddle.elementwise_min - :alias: paddle.elementwise_min,paddle.tensor.elementwise_min,paddle.tensor.math.elementwise_min - :old_api: paddle.fluid.layers.elementwise_min + :alias_main: paddle.elementwise_min + :alias: paddle.elementwise_min,paddle.tensor.elementwise_min,paddle.tensor.math.elementwise_min + :old_api: paddle.fluid.layers.elementwise_min -Examples: + Examples: - .. code-block:: python + .. code-block:: python - import paddle.fluid as fluid - import numpy as np - import paddle + import paddle.fluid as fluid + import numpy as np + import paddle - def gen_data(): - return { - "x": np.array([2, 3, 4]).astype('float32'), - "y": np.array([1, 5, 2]).astype('float32') - } - paddle.enable_static() - x = fluid.data(name="x", shape=[3], dtype='float32') - y = fluid.data(name="y", shape=[3], dtype='float32') - z = fluid.layers.elementwise_min(x, y) + def gen_data(): + return { + "x": np.array([2, 3, 4]).astype('float32'), + "y": np.array([1, 5, 2]).astype('float32') + } + paddle.enable_static() + x = fluid.data(name="x", shape=[3], dtype='float32') + y = fluid.data(name="y", shape=[3], dtype='float32') + z = fluid.layers.elementwise_min(x, y) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) - print(z_value) #[1, 3, 2] + print(z_value) #[1, 3, 2] - .. code-block:: python + .. code-block:: python - import paddle.fluid as fluid - import numpy as np - import paddle + import paddle.fluid as fluid + import numpy as np + import paddle - def gen_data(): - return { - "x": np.ones((2, 3, 4, 5)).astype('float32'), - "y": np.zeros((3, 4)).astype('float32') - } - paddle.enable_static() - x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32') - y = fluid.data(name="y", shape=[3,4], dtype='float32') - z = fluid.layers.elementwise_min(x, y, axis=1) + def gen_data(): + return { + "x": np.ones((2, 3, 4, 5)).astype('float32'), + "y": np.zeros((3, 4)).astype('float32') + } + paddle.enable_static() + x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32') + y = fluid.data(name="y", shape=[3,4], dtype='float32') + z = fluid.layers.elementwise_min(x, y, axis=1) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = fluid.CPUPlace() + exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) - print(z_value)#[[[[0., 0., 0., 0., 0.] .... [0., 0., 0., 0., 0.]]]] + print(z_value)#[[[[0., 0., 0., 0., 0.] .... [0., 0., 0., 0., 0.]]]] """ if _non_static_mode(): - return _elementwise_op_in_dygraph(x, - y, - axis=axis, - act=act, - op_name='elementwise_min') + return _elementwise_op_in_dygraph( + x, y, axis=axis, act=act, op_name='elementwise_min' + ) return _elementwise_op(LayerHelper('elementwise_min', **locals())) @@ -12637,37 +13338,35 @@ Examples: def elementwise_pow(x, y, axis=-1, act=None, name=None): """ -Examples: + Examples: - .. code-block:: python + .. code-block:: python - import paddle.fluid as fluid - import numpy as np - import paddle + import paddle.fluid as fluid + import numpy as np + import paddle - def gen_data(): - return { - "x": np.array([2, 3, 4]).astype('float32'), - "y": np.array([1, 5, 2]).astype('float32') - } - paddle.enable_static() - x = fluid.data(name="x", shape=[3], dtype='float32') - y = fluid.data(name="y", shape=[3], dtype='float32') - z = fluid.layers.elementwise_pow(x, y) + def gen_data(): + return { + "x": np.array([2, 3, 4]).astype('float32'), + "y": np.array([1, 5, 2]).astype('float32') + } + paddle.enable_static() + x = fluid.data(name="x", shape=[3], dtype='float32') + y = fluid.data(name="y", shape=[3], dtype='float32') + z = fluid.layers.elementwise_pow(x, y) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) - print(z_value) #[2, 243, 16] + print(z_value) #[2, 243, 16] """ if _non_static_mode(): - return _elementwise_op_in_dygraph(x, - y, - axis=axis, - act=act, - op_name='elementwise_pow') + return _elementwise_op_in_dygraph( + x, y, axis=axis, act=act, op_name='elementwise_pow' + ) return _elementwise_op(LayerHelper('elementwise_pow', **locals())) @@ -12675,37 +13374,35 @@ Examples: def elementwise_mod(x, y, axis=-1, act=None, name=None): """ -Examples: + Examples: - .. code-block:: python + .. code-block:: python - import paddle.fluid as fluid - import numpy as np - import paddle + import paddle.fluid as fluid + import numpy as np + import paddle - def gen_data(): - return { - "x": np.array([10, 15, 8]).astype('int32'), - "y": np.array([3, 6, 5]).astype('int32') - } - paddle.enable_static() - x = fluid.data(name="x", shape=[3], dtype='int32') - y = fluid.data(name="y", shape=[3], dtype='int32') - z = fluid.layers.elementwise_mod(x, y) + def gen_data(): + return { + "x": np.array([10, 15, 8]).astype('int32'), + "y": np.array([3, 6, 5]).astype('int32') + } + paddle.enable_static() + x = fluid.data(name="x", shape=[3], dtype='int32') + y = fluid.data(name="y", shape=[3], dtype='int32') + z = fluid.layers.elementwise_mod(x, y) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) - print(z_value) #[1, 3, 3] + print(z_value) #[1, 3, 3] """ if _non_static_mode(): - return _elementwise_op_in_dygraph(x, - y, - axis=axis, - act=act, - op_name='elementwise_mod') + return _elementwise_op_in_dygraph( + x, y, axis=axis, act=act, op_name='elementwise_mod' + ) return _elementwise_op(LayerHelper('elementwise_mod', **locals())) @@ -12714,78 +13411,89 @@ Examples: def elementwise_floordiv(x, y, axis=-1, act=None, name=None): """ -Examples: + Examples: - .. code-block:: python + .. code-block:: python - import paddle.fluid as fluid - import numpy as np - import paddle + import paddle.fluid as fluid + import numpy as np + import paddle - def gen_data(): - return { - "x": np.array([10, 15, 8]).astype('int32'), - "y": np.array([3, 7, 5]).astype('int32') - } - paddle.enable_static() - x = fluid.data(name="x", shape=[3], dtype='int32') - y = fluid.data(name="y", shape=[3], dtype='int32') - z = fluid.layers.elementwise_floordiv(x, y) + def gen_data(): + return { + "x": np.array([10, 15, 8]).astype('int32'), + "y": np.array([3, 7, 5]).astype('int32') + } + paddle.enable_static() + x = fluid.data(name="x", shape=[3], dtype='int32') + y = fluid.data(name="y", shape=[3], dtype='int32') + z = fluid.layers.elementwise_floordiv(x, y) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), + fetch_list=[z.name]) - print(z_value) #[3, 2, 1] + print(z_value) #[3, 2, 1] """ if _non_static_mode(): - return _elementwise_op_in_dygraph(x, - y, - axis=axis, - act=act, - op_name='elementwise_floordiv') + return _elementwise_op_in_dygraph( + x, y, axis=axis, act=act, op_name='elementwise_floordiv' + ) return _elementwise_op(LayerHelper('elementwise_floordiv', **locals())) for func in [ - elementwise_add, - elementwise_div, - elementwise_sub, - elementwise_mul, - elementwise_max, - elementwise_pow, - elementwise_min, - elementwise_mod, - elementwise_floordiv, + elementwise_add, + elementwise_div, + elementwise_sub, + elementwise_mul, + elementwise_max, + elementwise_pow, + elementwise_min, + elementwise_mod, + elementwise_floordiv, ]: op_proto = OpProtoHolder.instance().get_op_proto(func.__name__) # insert the c++ doc string on top of python doc string - func.__doc__ = _generate_doc_string_( - op_proto, - additional_args_lines=[ - "axis (int32, optional): If X.dimension != Y.dimension, \ + func.__doc__ = ( + _generate_doc_string_( + op_proto, + additional_args_lines=[ + "axis (int32, optional): If X.dimension != Y.dimension, \ Y.dimension must be a subsequence of x.dimension. \ And axis is the start dimension index for broadcasting Y onto X. ", - "act (string, optional): Activation applied to the output. \ + "act (string, optional): Activation applied to the output. \ Default is None. Details: :ref:`api_guide_activations_en` ", - "name (string, optional): Name of the output. \ + "name (string, optional): Name of the output. \ Default is None. It's used to print debug info for developers. Details: \ - :ref:`api_guide_Name` " - ], - skip_attrs_set={ - "x_data_format", "y_data_format", "axis", "use_quantizer", - "mkldnn_data_type", "Scale_x", "Scale_y", "Scale_out" - }) + """\n""" + str(func.__doc__) + :ref:`api_guide_Name` ", + ], + skip_attrs_set={ + "x_data_format", + "y_data_format", + "axis", + "use_quantizer", + "mkldnn_data_type", + "Scale_x", + "Scale_y", + "Scale_out", + }, + ) + + """\n""" + + str(func.__doc__) + ) doc_list = func.__doc__.splitlines() for idx, val in enumerate(doc_list): - if val.startswith("Warning: ") and val.endswith( - " instead." - ) and "and will be removed in future versions." in val: + if ( + val.startswith("Warning: ") + and val.endswith(" instead.") + and "and will be removed in future versions." in val + ): doc_list.insert(0, doc_list.pop(idx)) func.__doc__ = "\n" + "\n".join(i for i in doc_list) break @@ -12796,9 +13504,12 @@ for func in []: op_proto, additional_args_lines=[ "act (basestring|None): Activation applied to the output.", - "name (basestring|None): Name of the output." - ]) - func.__doc__ = func.__doc__ + """ + "name (basestring|None): Name of the output.", + ], + ) + func.__doc__ = ( + func.__doc__ + + """ Examples: .. code-block:: python @@ -12833,8 +13544,16 @@ Examples: x5 = fluid.layers.data(name="x5", shape=[2, 3, 4, 5], dtype='float32') y5 = fluid.layers.data(name="y5", shape=[2], dtype='float32') z5 = fluid.layers.%s(x5, y5, axis=0) - """ % (func.__name__, func.__name__, func.__name__, func.__name__, - func.__name__, func.__name__) + """ + % ( + func.__name__, + func.__name__, + func.__name__, + func.__name__, + func.__name__, + func.__name__, + ) + ) def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): @@ -12845,14 +13564,18 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): else: return op(x) check_variable_and_dtype( - x, "x", + x, + "x", ["bool", "int8", "int16", "int32", "int64", "float32", "float64"], - op_name) + op_name, + ) if y is not None: check_variable_and_dtype( - y, "y", + y, + "y", ["bool", "int8", "int16", "int32", "int64", "float32", "float64"], - op_name) + op_name, + ) if out is not None: check_type(out, "out", Variable, op_name) @@ -12861,18 +13584,16 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): if binary_op and x.dtype != y.dtype: raise ValueError( "(InvalidArgument) The DataType of %s Op's Variable must be consistent, but received %s and %s." - % (op_name, x.dtype, y.dtype)) + % (op_name, x.dtype, y.dtype) + ) if out is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) if binary_op: - helper.append_op(type=op_name, - inputs={ - "X": x, - "Y": y - }, - outputs={"Out": out}) + helper.append_op( + type=op_name, inputs={"X": x, "Y": y}, outputs={"Out": out} + ) else: helper.append_op(type=op_name, inputs={"X": x}, outputs={"Out": out}) @@ -12914,12 +13635,9 @@ def logical_and(x, y, out=None, name=None): if in_dygraph_mode(): return _C_ops.logical_and(x, y) - return _logical_op(op_name="logical_and", - x=x, - y=y, - name=name, - out=out, - binary_op=True) + return _logical_op( + op_name="logical_and", x=x, y=y, name=name, out=out, binary_op=True + ) def logical_or(x, y, out=None, name=None): @@ -12959,12 +13677,9 @@ def logical_or(x, y, out=None, name=None): """ if in_dygraph_mode(): return _C_ops.logical_or(x, y) - return _logical_op(op_name="logical_or", - x=x, - y=y, - name=name, - out=out, - binary_op=True) + return _logical_op( + op_name="logical_or", x=x, y=y, name=name, out=out, binary_op=True + ) def logical_xor(x, y, out=None, name=None): @@ -13005,12 +13720,9 @@ def logical_xor(x, y, out=None, name=None): if in_dygraph_mode(): return _C_ops.logical_xor(x, y) - return _logical_op(op_name="logical_xor", - x=x, - y=y, - name=name, - out=out, - binary_op=True) + return _logical_op( + op_name="logical_xor", x=x, y=y, name=name, out=out, binary_op=True + ) @templatedoc() @@ -13043,18 +13755,15 @@ def logical_not(x, out=None, name=None): """ if in_dygraph_mode(): return _C_ops.logical_not(x) - return _logical_op(op_name="logical_not", - x=x, - y=None, - name=name, - out=out, - binary_op=False) + return _logical_op( + op_name="logical_not", x=x, y=None, name=name, out=out, binary_op=False + ) @templatedoc() def clip(x, min, max, name=None): """ - :old_api: paddle.fluid.layers.clip + :old_api: paddle.fluid.layers.clip ${comment} @@ -13085,21 +13794,20 @@ def clip(x, min, max, name=None): check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'clip') if name is None: - name = unique_name.generate_with_ignorable_key(".".join( - [helper.name, 'tmp'])) - - out = helper.create_variable(type=x.type, - name=name, - dtype=x.dtype, - persistable=False) - - helper.append_op(type="clip", - inputs={"X": x}, - attrs={ - "min": min, - "max": max - }, - outputs={"Out": out}) + name = unique_name.generate_with_ignorable_key( + ".".join([helper.name, 'tmp']) + ) + + out = helper.create_variable( + type=x.type, name=name, dtype=x.dtype, persistable=False + ) + + helper.append_op( + type="clip", + inputs={"X": x}, + attrs={"min": min, "max": max}, + outputs={"Out": out}, + ) return out @@ -13143,18 +13851,20 @@ def clip_by_norm(x, max_norm, name=None): check_type(max_norm, 'max_norm', (float), 'clip_by_norm') if name is None: - name = unique_name.generate_with_ignorable_key(".".join( - [helper.name, 'tmp'])) + name = unique_name.generate_with_ignorable_key( + ".".join([helper.name, 'tmp']) + ) - out = helper.create_variable(type=x.type, - name=name, - dtype=x.dtype, - persistable=False) + out = helper.create_variable( + type=x.type, name=name, dtype=x.dtype, persistable=False + ) - helper.append_op(type="clip_by_norm", - inputs={"X": x}, - attrs={"max_norm": max_norm}, - outputs={"Out": out}) + helper.append_op( + type="clip_by_norm", + inputs={"X": x}, + attrs={"max_norm": max_norm}, + outputs={"Out": out}, + ) return out @@ -13193,10 +13903,9 @@ def mean(x, name=None): check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mean') out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type="mean", - inputs={"X": x}, - attrs={}, - outputs={"Out": out}) + helper.append_op( + type="mean", inputs={"X": x}, attrs={}, outputs={"Out": out} + ) return out @@ -13231,10 +13940,12 @@ def merge_selected_rows(x, name=None): helper = LayerHelper("merge_selected_rows", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type="merge_selected_rows", - inputs={"X": x}, - attrs={}, - outputs={"Out": out}) + helper.append_op( + type="merge_selected_rows", + inputs={"X": x}, + attrs={}, + outputs={"Out": out}, + ) return out @@ -13274,8 +13985,14 @@ def mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None): """ if _non_static_mode(): - return _legacy_C_ops.mul(x, y, 'x_num_col_dims', x_num_col_dims, - 'y_num_col_dims', y_num_col_dims) + return _legacy_C_ops.mul( + x, + y, + 'x_num_col_dims', + x_num_col_dims, + 'y_num_col_dims', + y_num_col_dims, + ) inputs = {"X": [x], "Y": [y]} attrs = {"x_num_col_dims": x_num_col_dims, "y_num_col_dims": y_num_col_dims} @@ -13284,13 +14001,9 @@ def mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None): check_variable_and_dtype(y, 'y', ['float16', 'float32', 'float64'], 'mul') out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type="mul", - inputs={ - "X": x, - "Y": y - }, - attrs=attrs, - outputs={"Out": out}) + helper.append_op( + type="mul", inputs={"X": x, "Y": y}, attrs=attrs, outputs={"Out": out} + ) return out @@ -13420,24 +14133,27 @@ def space_to_depth(x, blocksize, name=None): if not (isinstance(blocksize, int)): raise ValueError("blocksize must be a python Int") - check_variable_and_dtype(x, 'x', \ - ['float16', 'float32', 'float64', 'int32', 'int64'], 'space_to_depth') + check_variable_and_dtype( + x, + 'x', + ['float16', 'float32', 'float64', 'int32', 'int64'], + 'space_to_depth', + ) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type="space_to_depth", - inputs={"X": x}, - attrs={"blocksize": blocksize}, - outputs={"Out": out}) + helper.append_op( + type="space_to_depth", + inputs={"X": x}, + attrs={"blocksize": blocksize}, + outputs={"Out": out}, + ) return out -def affine_channel(x, - scale=None, - bias=None, - data_layout='NCHW', - name=None, - act=None): +def affine_channel( + x, scale=None, bias=None, data_layout='NCHW', name=None, act=None +): """ Applies a separate affine transformation to each channel of the input. @@ -13504,14 +14220,12 @@ def affine_channel(x, check_type(bias, 'bias', (Variable, type(None)), 'affine_channel') out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type="affine_channel", - inputs={ - "X": x, - 'Scale': scale, - 'Bias': bias - }, - attrs={"data_layout": data_layout}, - outputs={"Out": out}) + helper.append_op( + type="affine_channel", + inputs={"X": x, 'Scale': scale, 'Bias': bias}, + attrs={"data_layout": data_layout}, + outputs={"Out": out}, + ) return helper.append_activation(out) @@ -13610,8 +14324,9 @@ def similarity_focus(input, axis, indexes, name=None): """ helper = LayerHelper('similarity_focus', **locals()) # check attrs - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - "similarity_focus") + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], "similarity_focus" + ) check_type(axis, 'axis', int, "similarity_focus") check_type(indexes, 'indexes', list, "similarity_focus") if axis != 1 and axis != 2 and axis != 3: @@ -13620,13 +14335,12 @@ def similarity_focus(input, axis, indexes, name=None): raise ValueError("indexes can not be empty.") out = helper.create_variable_for_type_inference(dtype=input.dtype) - helper.append_op(type='similarity_focus', - inputs={'X': input}, - outputs={'Out': out}, - attrs={ - "axis": axis, - "indexes": indexes - }) + helper.append_op( + type='similarity_focus', + inputs={'X': input}, + outputs={'Out': out}, + attrs={"axis": axis, "indexes": indexes}, + ) return out @@ -13681,15 +14395,15 @@ def hash(input, hash_size, num_hash=1, name=None): check_type(hash_size, 'hash_size', int, 'hash') check_type(num_hash, 'num_hash', int, 'hash') helper = LayerHelper('hash', **locals()) - out = helper.create_variable_for_type_inference(helper.input_dtype(), - stop_gradient=True) - helper.append_op(type='hash', - inputs={'X': input}, - outputs={'Out': out}, - attrs={ - 'num_hash': num_hash, - 'mod_by': hash_size - }) + out = helper.create_variable_for_type_inference( + helper.input_dtype(), stop_gradient=True + ) + helper.append_op( + type='hash', + inputs={'X': input}, + outputs={'Out': out}, + attrs={'num_hash': num_hash, 'mod_by': hash_size}, + ) return out @@ -13783,8 +14497,9 @@ def grid_sampler(x, grid, name=None): helper = LayerHelper("grid_sampler", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'grid_sampler') - check_variable_and_dtype(grid, 'grid', ['float32', 'float64'], - 'grid_sampler') + check_variable_and_dtype( + grid, 'grid', ['float32', 'float64'], 'grid_sampler' + ) if not isinstance(x, Variable): return ValueError("The x should be a Variable") @@ -13796,10 +14511,9 @@ def grid_sampler(x, grid, name=None): attrs = {'use_cudnn': False} if core.is_compiled_with_rocm() else {} - helper.append_op(type='grid_sampler', - inputs=ipts, - outputs={'Output': out}, - attrs=attrs) + helper.append_op( + type='grid_sampler', inputs=ipts, outputs={'Output': out}, attrs=attrs + ) return out @@ -13892,33 +14606,30 @@ def add_position_encoding(input, alpha, beta, name=None): """ if _non_static_mode(): - return _legacy_C_ops.add_position_encoding(input, "alpha", alpha, - "beta", beta) + return _legacy_C_ops.add_position_encoding( + input, "alpha", alpha, "beta", beta + ) helper = LayerHelper('add_position_encoding', **locals()) - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - "add_position_encoding") + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], "add_position_encoding" + ) dtype = helper.input_dtype() out = helper.create_variable_for_type_inference(dtype=dtype) - helper.append_op(type="add_position_encoding", - inputs={"X": input}, - outputs={"Out": out}, - attrs={ - "alpha": alpha, - "beta": beta - }) + helper.append_op( + type="add_position_encoding", + inputs={"X": input}, + outputs={"Out": out}, + attrs={"alpha": alpha, "beta": beta}, + ) return out -def bilinear_tensor_product(x, - y, - size, - act=None, - name=None, - param_attr=None, - bias_attr=None): +def bilinear_tensor_product( + x, y, size, act=None, name=None, param_attr=None, bias_attr=None +): r""" :api_attr: Static Graph @@ -13969,23 +14680,21 @@ def bilinear_tensor_product(x, param_shape = [size, x.shape[1], y.shape[1]] - w = helper.create_parameter(attr=helper.param_attr, - shape=param_shape, - dtype=dtype, - is_bias=False) + w = helper.create_parameter( + attr=helper.param_attr, shape=param_shape, dtype=dtype, is_bias=False + ) out = helper.create_variable_for_type_inference(dtype=dtype) inputs = {"X": x, "Y": y, "Weight": w} if helper.bias_attr: bias_size = [1, size] - bias = helper.create_parameter(attr=helper.bias_attr, - shape=bias_size, - dtype=dtype, - is_bias=True) + bias = helper.create_parameter( + attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True + ) inputs["Bias"] = bias - helper.append_op(type="bilinear_tensor_product", - inputs=inputs, - outputs={"Out": out}) + helper.append_op( + type="bilinear_tensor_product", inputs=inputs, outputs={"Out": out} + ) # add activation return helper.append_activation(out) @@ -14035,10 +14744,12 @@ def get_tensor_from_selected_rows(x, name=None): ) helper = LayerHelper('get_tensor_from_selected_rows', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='get_tensor_from_selected_rows', - inputs={'X': x}, - outputs={'Out': out}, - attrs={}) + helper.append_op( + type='get_tensor_from_selected_rows', + inputs={'X': x}, + outputs={'Out': out}, + attrs={}, + ) return out @@ -14108,10 +14819,12 @@ def shuffle_channel(x, group, name=None): if not isinstance(group, int): raise TypeError("group must be int type") - helper.append_op(type="shuffle_channel", - inputs={"X": x}, - outputs={"Out": out}, - attrs={"group": group}) + helper.append_op( + type="shuffle_channel", + inputs={"X": x}, + outputs={"Out": out}, + attrs={"group": group}, + ) return out @@ -14149,8 +14862,9 @@ def temporal_shift(x, seg_num, shift_ratio=0.25, name=None, data_format="NCHW"): input = paddle.randn([6, 4, 2, 2]) out = F.temporal_shift(x=input, seg_num=2, shift_ratio=0.2) """ - return paddle.nn.functional.temporal_shift(x, seg_num, shift_ratio, name, - data_format) + return paddle.nn.functional.temporal_shift( + x, seg_num, shift_ratio, name, data_format + ) class PyFuncRegistry(object): @@ -14209,7 +14923,7 @@ class PyFuncRegistry(object): func_ret = self._func(*args[idx:], **kwargs) if not isinstance(func_ret, (list, tuple)): - func_ret = (func_ret, ) + func_ret = (func_ret,) ret = [] for each_ret in func_ret: @@ -14425,11 +15139,13 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None): out_list = out else: raise TypeError( - 'Output must be Variable/list(Variable)/tuple(Variable)') + 'Output must be Variable/list(Variable)/tuple(Variable)' + ) fwd_func_id = PyFuncRegistry(func).id - bwd_func_id = PyFuncRegistry( - backward_func).id if backward_func is not None else -1 + bwd_func_id = ( + PyFuncRegistry(backward_func).id if backward_func is not None else -1 + ) for each_out in out_list: if len(each_out.shape) == 0: @@ -14449,18 +15165,22 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None): for v in skip_vars_in_backward_input: if not v.name in fwd_in_out: raise ValueError( - 'Variable {} is not found in forward inputs and outputs'. - format(v.name)) + 'Variable {} is not found in forward inputs and outputs'.format( + v.name + ) + ) backward_skip_vars.add(v.name) - helper.append_op(type='py_func', - inputs={'X': x}, - outputs={'Out': out_list}, - attrs={ - 'forward_callable_id': fwd_func_id, - 'backward_callable_id': bwd_func_id, - 'backward_skip_vars': list(backward_skip_vars) - }) + helper.append_op( + type='py_func', + inputs={'X': x}, + outputs={'Out': out_list}, + attrs={ + 'forward_callable_id': fwd_func_id, + 'backward_callable_id': bwd_func_id, + 'backward_skip_vars': list(backward_skip_vars), + }, + ) return out @@ -14470,13 +15190,15 @@ py_func.registered_func_num = PyFuncRegistry.registered_func_num @templatedoc() -def psroi_pool(input, - rois, - output_channels, - spatial_scale, - pooled_height, - pooled_width, - name=None): +def psroi_pool( + input, + rois, + output_channels, + spatial_scale, + pooled_height, + pooled_width, + name=None, +): """ ${comment} @@ -14524,29 +15246,30 @@ def psroi_pool(input, raise TypeError("pooled_width must be int type") dtype = helper.input_dtype() out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='psroi_pool', - inputs={ - 'X': input, - 'ROIs': rois - }, - outputs={'Out': out}, - attrs={ - 'output_channels': output_channels, - 'spatial_scale': spatial_scale, - 'pooled_height': pooled_height, - 'pooled_width': pooled_width - }) + helper.append_op( + type='psroi_pool', + inputs={'X': input, 'ROIs': rois}, + outputs={'Out': out}, + attrs={ + 'output_channels': output_channels, + 'spatial_scale': spatial_scale, + 'pooled_height': pooled_height, + 'pooled_width': pooled_width, + }, + ) return out @templatedoc() -def prroi_pool(input, - rois, - spatial_scale=1.0, - pooled_height=1, - pooled_width=1, - batch_roi_nums=None, - name=None): +def prroi_pool( + input, + rois, + spatial_scale=1.0, + pooled_height=1, + pooled_width=1, + batch_roi_nums=None, + name=None, +): """ The precise roi pooling implementation for paddle. Reference: https://arxiv.org/pdf/1807.11590.pdf @@ -14609,14 +15332,16 @@ def prroi_pool(input, inputs_op = {'X': input, 'ROIs': rois} if batch_roi_nums is not None: inputs_op['BatchRoINums'] = batch_roi_nums - helper.append_op(type='prroi_pool', - inputs=inputs_op, - outputs={'Out': out}, - attrs={ - 'spatial_scale': spatial_scale, - 'pooled_height': pooled_height, - 'pooled_width': pooled_width - }) + helper.append_op( + type='prroi_pool', + inputs=inputs_op, + outputs={'Out': out}, + attrs={ + 'spatial_scale': spatial_scale, + 'pooled_height': pooled_height, + 'pooled_width': pooled_width, + }, + ) return out @@ -14645,23 +15370,23 @@ def pixel_shuffle(x, upscale_factor): Examples: .. code-block:: python - # declarative mode - import paddle.fluid as fluid - import numpy as np - input = fluid.data(name="input", shape=[2,9,4,4]) - output = fluid.layers.pixel_shuffle(x=input, upscale_factor=3) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + # declarative mode + import paddle.fluid as fluid + import numpy as np + input = fluid.data(name="input", shape=[2,9,4,4]) + output = fluid.layers.pixel_shuffle(x=input, upscale_factor=3) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) - input_data = np.random.rand(2,9,4,4).astype("float32") - output_data = exe.run(fluid.default_main_program(), + input_data = np.random.rand(2,9,4,4).astype("float32") + output_data = exe.run(fluid.default_main_program(), feed={"input":input_data}, fetch_list=[output], return_numpy=True) - # print(output.shape) - # (2L, 1L, 12L, 12L) + # print(output.shape) + # (2L, 1L, 12L, 12L) """ @@ -14673,10 +15398,12 @@ def pixel_shuffle(x, upscale_factor): if not isinstance(upscale_factor, int): raise TypeError("upscale factor must be int type") - helper.append_op(type="pixel_shuffle", - inputs={"X": x}, - outputs={"Out": out}, - attrs={"upscale_factor": upscale_factor}) + helper.append_op( + type="pixel_shuffle", + inputs={"X": x}, + outputs={"Out": out}, + attrs={"upscale_factor": upscale_factor}, + ) return out @@ -14726,8 +15453,9 @@ def fsp_matrix(x, y): check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'fsp_matrix') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'fsp_matrix') helper = LayerHelper('fsp_matrix', **locals()) - out = helper.create_variable_for_type_inference(dtype=helper.input_dtype( - input_param_name='x')) + out = helper.create_variable_for_type_inference( + dtype=helper.input_dtype(input_param_name='x') + ) helper.append_op(type='fsp', inputs={'X': x, 'Y': y}, outputs={'Out': out}) return out @@ -14777,15 +15505,15 @@ def continuous_value_model(input, cvm, use_cvm=True): """ helper = LayerHelper('cvm', **locals()) out = helper.create_variable(dtype=input.dtype) - check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'], - 'cvm') - helper.append_op(type='cvm', - inputs={ - 'X': [input], - 'CVM': [cvm] - }, - outputs={'Y': [out]}, - attrs={"use_cvm": use_cvm}) + check_variable_and_dtype( + input, 'input', ['float16', 'float32', 'float64'], 'cvm' + ) + helper.append_op( + type='cvm', + inputs={'X': [input], 'CVM': [cvm]}, + outputs={'Y': [out]}, + attrs={"use_cvm": use_cvm}, + ) return out @@ -14831,11 +15559,14 @@ def where(condition): helper = LayerHelper("where_index", **locals()) out = helper.create_variable_for_type_inference( - dtype=core.VarDesc.VarType.INT64) - - helper.append_op(type='where_index', - inputs={'Condition': condition}, - outputs={'Out': [out]}) + dtype=core.VarDesc.VarType.INT64 + ) + + helper.append_op( + type='where_index', + inputs={'Condition': condition}, + outputs={'Out': [out]}, + ) return out @@ -14894,21 +15625,21 @@ def unique(x, dtype='int32'): out, index = fluid.layers.unique(x) # out is [2, 3, 1, 5]; index is [0, 1, 1, 2, 3, 1] """ - check_variable_and_dtype(x, "x", ['float32', 'float64', 'int32', 'int64'], - "unique") + check_variable_and_dtype( + x, "x", ['float32', 'float64', 'int32', 'int64'], "unique" + ) helper = LayerHelper("unique", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) index = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='unique', - inputs={'X': x}, - attrs={'dtype': convert_np_dtype_to_dtype_(dtype)}, - outputs={ - 'Out': [out], - 'Index': [index] - }) + helper.append_op( + type='unique', + inputs={'X': x}, + attrs={'dtype': convert_np_dtype_to_dtype_(dtype)}, + outputs={'Out': [out], 'Index': [index]}, + ) return out, index @@ -14941,11 +15672,13 @@ def unique_with_counts(x, dtype='int32'): # count is [1, 3, 1, 1] # x.shape=(6,) out.shape=(4,), index.shape=(6,), count.shape=(4,) """ - check_variable_and_dtype(x, "x", ['float32', 'float64', 'int32', 'int64'], - "unique_with_counts") + check_variable_and_dtype( + x, "x", ['float32', 'float64', 'int32', 'int64'], "unique_with_counts" + ) if not (dtype == 'int32' or dtype == 'int64'): raise TypeError( - "Op unique_with_counts, index dtype must be int32 or int64") + "Op unique_with_counts, index dtype must be int32 or int64" + ) if x is None or len(x.shape) != 1: raise ValueError( @@ -14960,33 +15693,33 @@ def unique_with_counts(x, dtype='int32'): count = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='unique_with_counts', - inputs={'X': x}, - attrs={'dtype': convert_np_dtype_to_dtype_(dtype)}, - outputs={ - 'Out': [out], - 'Index': [index], - 'Count': [count] - }) + helper.append_op( + type='unique_with_counts', + inputs={'X': x}, + attrs={'dtype': convert_np_dtype_to_dtype_(dtype)}, + outputs={'Out': [out], 'Index': [index], 'Count': [count]}, + ) return out, index, count -def deformable_conv(input, - offset, - mask, - num_filters, - filter_size, - stride=1, - padding=0, - dilation=1, - groups=None, - deformable_groups=None, - im2col_step=None, - param_attr=None, - bias_attr=None, - modulated=True, - name=None): +def deformable_conv( + input, + offset, + mask, + num_filters, + filter_size, + stride=1, + padding=0, + dilation=1, + groups=None, + deformable_groups=None, + im2col_step=None, + param_attr=None, + bias_attr=None, + modulated=True, + name=None, +): r""" :api_attr: Static Graph @@ -15117,10 +15850,12 @@ def deformable_conv(input, num_filters=2, filter_size=filter_size, padding=1, modulated=False) """ - check_variable_and_dtype(input, "input", ['float32', 'float64'], - 'deformable_conv') - check_variable_and_dtype(offset, "offset", ['float32', 'float64'], - 'deformable_conv') + check_variable_and_dtype( + input, "input", ['float32', 'float64'], 'deformable_conv' + ) + check_variable_and_dtype( + offset, "offset", ['float32', 'float64'], 'deformable_conv' + ) check_type(mask, 'mask', (Variable, type(None)), 'deformable_conv') num_channels = input.shape[1] @@ -15155,52 +15890,58 @@ def deformable_conv(input, raise ValueError( "Invalid filter number, excepted number is larger than 0, but" " received {}, please check the input shape and " - "filter size.".format(filter_elem_num)) - std = (2.0 / filter_elem_num)**0.5 + "filter size.".format(filter_elem_num) + ) + std = (2.0 / filter_elem_num) ** 0.5 return Normal(0.0, std, 0) filter_param = helper.create_parameter( attr=helper.param_attr, shape=filter_shape, dtype=dtype, - default_initializer=_get_default_param_initializer()) + default_initializer=_get_default_param_initializer(), + ) pre_bias = helper.create_variable_for_type_inference(dtype) if modulated: - helper.append_op(type='deformable_conv', - inputs={ - 'Input': input, - 'Filter': filter_param, - 'Offset': offset, - 'Mask': mask, - }, - outputs={"Output": pre_bias}, - attrs={ - 'strides': stride, - 'paddings': padding, - 'dilations': dilation, - 'groups': groups, - 'deformable_groups': deformable_groups, - 'im2col_step': im2col_step, - }) + helper.append_op( + type='deformable_conv', + inputs={ + 'Input': input, + 'Filter': filter_param, + 'Offset': offset, + 'Mask': mask, + }, + outputs={"Output": pre_bias}, + attrs={ + 'strides': stride, + 'paddings': padding, + 'dilations': dilation, + 'groups': groups, + 'deformable_groups': deformable_groups, + 'im2col_step': im2col_step, + }, + ) else: - helper.append_op(type='deformable_conv_v1', - inputs={ - 'Input': input, - 'Filter': filter_param, - 'Offset': offset, - }, - outputs={"Output": pre_bias}, - attrs={ - 'strides': stride, - 'paddings': padding, - 'dilations': dilation, - 'groups': groups, - 'deformable_groups': deformable_groups, - 'im2col_step': im2col_step, - }) + helper.append_op( + type='deformable_conv_v1', + inputs={ + 'Input': input, + 'Filter': filter_param, + 'Offset': offset, + }, + outputs={"Output": pre_bias}, + attrs={ + 'strides': stride, + 'paddings': padding, + 'dilations': dilation, + 'groups': groups, + 'deformable_groups': deformable_groups, + 'im2col_step': im2col_step, + }, + ) output = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2) return output @@ -15276,23 +16017,26 @@ def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None): y = F.unfold(x, [3, 3], 1, 1, 1) """ - return paddle.nn.functional.unfold(x, kernel_sizes, strides, paddings, - dilations, name) - - -def deformable_roi_pooling(input, - rois, - trans, - no_trans=False, - spatial_scale=1.0, - group_size=[1, 1], - pooled_height=1, - pooled_width=1, - part_size=None, - sample_per_part=1, - trans_std=0.1, - position_sensitive=False, - name=None): + return paddle.nn.functional.unfold( + x, kernel_sizes, strides, paddings, dilations, name + ) + + +def deformable_roi_pooling( + input, + rois, + trans, + no_trans=False, + spatial_scale=1.0, + group_size=[1, 1], + pooled_height=1, + pooled_width=1, + part_size=None, + sample_per_part=1, + trans_std=0.1, + position_sensitive=False, + name=None, +): r""" Deformable ROI Pooling Layer @@ -15397,17 +16141,22 @@ def deformable_roi_pooling(input, position_sensitive=False) """ - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'deformable_roi_pooling') - check_variable_and_dtype(rois, 'rois', ['float32', 'float64'], - 'deformable_roi_pooling') - check_variable_and_dtype(trans, 'trans', ['float32', 'float64'], - 'deformable_roi_pooling') - check_type(group_size, 'group_size', (list, tuple), - 'deformable_roi_pooling') + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'deformable_roi_pooling' + ) + check_variable_and_dtype( + rois, 'rois', ['float32', 'float64'], 'deformable_roi_pooling' + ) + check_variable_and_dtype( + trans, 'trans', ['float32', 'float64'], 'deformable_roi_pooling' + ) + check_type( + group_size, 'group_size', (list, tuple), 'deformable_roi_pooling' + ) if part_size is not None: - check_type(part_size, 'part_size', (list, tuple), - 'deformable_roi_pooling') + check_type( + part_size, 'part_size', (list, tuple), 'deformable_roi_pooling' + ) input_channels = input.shape[1] if position_sensitive == False: @@ -15425,27 +16174,22 @@ def deformable_roi_pooling(input, dtype = helper.input_dtype() output = helper.create_variable_for_type_inference(dtype) top_count = helper.create_variable_for_type_inference(dtype='int32') - helper.append_op(type="deformable_psroi_pooling", - inputs={ - "Input": input, - "ROIs": rois, - "Trans": trans - }, - outputs={ - "Output": output, - "TopCount": top_count - }, - attrs={ - "no_trans": no_trans, - "spatial_scale": spatial_scale, - "output_dim": output_channels, - "group_size": group_size, - "pooled_height": pooled_height, - "pooled_width": pooled_width, - "part_size": part_size, - "sample_per_part": sample_per_part, - "trans_std": trans_std - }) + helper.append_op( + type="deformable_psroi_pooling", + inputs={"Input": input, "ROIs": rois, "Trans": trans}, + outputs={"Output": output, "TopCount": top_count}, + attrs={ + "no_trans": no_trans, + "spatial_scale": spatial_scale, + "output_dim": output_channels, + "group_size": group_size, + "pooled_height": pooled_height, + "pooled_width": pooled_width, + "part_size": part_size, + "sample_per_part": sample_per_part, + "trans_std": trans_std, + }, + ) return output @@ -15497,27 +16241,31 @@ def shard_index(input, index_num, nshards, shard_id, ignore_value=-1): # [[-1], [1]] """ if in_dygraph_mode(): - return _C_ops.shard_index(input, index_num, nshards, shard_id, - ignore_value) + return _C_ops.shard_index( + input, index_num, nshards, shard_id, ignore_value + ) check_variable_and_dtype(input, 'input', ['int64', 'int32'], 'shard_index') op_type = 'shard_index' helper = LayerHelper(op_type, **locals()) if shard_id < 0 or shard_id >= nshards: - raise ValueError('The shard_id(%d) should be in [0, %d)' % - (shard_id, nshards)) + raise ValueError( + 'The shard_id(%d) should be in [0, %d)' % (shard_id, nshards) + ) out = helper.create_variable_for_type_inference(dtype=input.dtype) - helper.append_op(type=op_type, - inputs={'X': [input]}, - outputs={'Out': out}, - attrs={ - 'index_num': index_num, - 'nshards': nshards, - 'shard_id': shard_id, - 'ignore_value': ignore_value - }, - stop_gradient=True) + helper.append_op( + type=op_type, + inputs={'X': [input]}, + outputs={'Out': out}, + attrs={ + 'index_num': index_num, + 'nshards': nshards, + 'shard_id': shard_id, + 'ignore_value': ignore_value, + }, + stop_gradient=True, + ) return out @@ -15572,22 +16320,22 @@ def hard_swish(x, threshold=6.0, scale=6.0, offset=3.0, name=None): print(out) # [[0.66666667, 1.66666667,3., 4.]] """ if _non_static_mode(): - return _legacy_C_ops.hard_swish(x, 'threshold', threshold, 'scale', - scale, 'offset', offset) + return _legacy_C_ops.hard_swish( + x, 'threshold', threshold, 'scale', scale, 'offset', offset + ) - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'hard_swish') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], 'hard_swish' + ) helper = LayerHelper('hard_swish', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='hard_swish', - inputs={'X': x}, - outputs={'Out': out}, - attrs={ - 'threshold': threshold, - 'scale': scale, - 'offset': offset - }) + helper.append_op( + type='hard_swish', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'threshold': threshold, 'scale': scale, 'offset': offset}, + ) return out @@ -15658,15 +16406,20 @@ def mish(x, threshold=20, name=None): check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'mish') check_type(threshold, 'threshold', (float, int), 'mish') - assert threshold > 0, "threshold of mish should be greater than 0, " \ - "but got {}".format(threshold) + assert ( + threshold > 0 + ), "threshold of mish should be greater than 0, " "but got {}".format( + threshold + ) helper = LayerHelper('mish', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='mish', - inputs={'X': x}, - outputs={'Out': out}, - attrs={'threshold': threshold}) + helper.append_op( + type='mish', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'threshold': threshold}, + ) return out @@ -15736,12 +16489,9 @@ def gather_tree(ids, parents): @deprecated(since="2.0.0", update_to="paddle.uniform") @templatedoc() -def uniform_random(shape, - dtype='float32', - min=-1.0, - max=1.0, - seed=0, - name=None): +def uniform_random( + shape, dtype='float32', min=-1.0, max=1.0, seed=0, name=None +): """ This OP returns a Tensor filled with random values sampled from a uniform distribution in the range [``min``, ``max``), with ``shape`` and ``dtype``. @@ -15821,34 +16571,47 @@ def uniform_random(shape, if in_dygraph_mode(): shape = utils.convert_shape_to_list(shape) - return _C_ops.uniform_random(shape, dtype, float(min), float(max), seed, - _current_expected_place()) + return _C_ops.uniform_random( + shape, + dtype, + float(min), + float(max), + seed, + _current_expected_place(), + ) elif _in_legacy_dygraph(): shape = utils.convert_shape_to_list(shape) - return _legacy_C_ops.uniform_random('shape', - shape, 'min', float(min), 'max', - float(max), 'seed', seed, 'dtype', - dtype) + return _legacy_C_ops.uniform_random( + 'shape', + shape, + 'min', + float(min), + 'max', + float(max), + 'seed', + seed, + 'dtype', + dtype, + ) check_type(shape, 'shape', (list, tuple, Variable), 'uniform_random/rand') - check_dtype(dtype, 'dtype', ('float32', 'float64', 'uint16'), - 'uniform_random/rand') + check_dtype( + dtype, 'dtype', ('float32', 'float64', 'uint16'), 'uniform_random/rand' + ) check_type(min, 'min', (float, int, Variable), 'uniform_random/rand') check_type(max, 'max', (float, int, Variable), 'uniform_random/rand') inputs = dict() attrs = {'seed': seed, 'min': min, 'max': max, 'dtype': dtype} - utils.get_shape_tensor_inputs(inputs=inputs, - attrs=attrs, - shape=shape, - op_type='uniform_random/rand') + utils.get_shape_tensor_inputs( + inputs=inputs, attrs=attrs, shape=shape, op_type='uniform_random/rand' + ) helper = LayerHelper("uniform_random", **locals()) out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type="uniform_random", - inputs=inputs, - attrs=attrs, - outputs={"Out": out}) + helper.append_op( + type="uniform_random", inputs=inputs, attrs=attrs, outputs={"Out": out} + ) utils.try_set_static_shape_tensor(out, shape) return out @@ -15884,11 +16647,13 @@ def unbind(input, axis=0): helper = LayerHelper("unbind", **locals()) check_type(input, 'input', (Variable), 'unbind') dtype = helper.input_dtype() - check_dtype(dtype, 'unbind', ['float32', 'float64', 'int32', 'int64'], - 'unbind') + check_dtype( + dtype, 'unbind', ['float32', 'float64', 'int32', 'int64'], 'unbind' + ) if not isinstance(axis, (int)): - raise TypeError("The type of 'axis' must be int, but received %s." % - (type(axis))) + raise TypeError( + "The type of 'axis' must be int, but received %s." % (type(axis)) + ) if isinstance(axis, np.generic): axis = np.asscalar(axis) input_shape = input.shape @@ -15899,8 +16664,10 @@ def unbind(input, axis=0): for i in range(num) ] - helper.append_op(type="unbind", - inputs={"X": input}, - outputs={"Out": outs}, - attrs={"axis": axis}) + helper.append_op( + type="unbind", + inputs={"X": input}, + outputs={"Out": outs}, + attrs={"axis": axis}, + ) return outs diff --git a/python/paddle/fluid/layers/ops.py b/python/paddle/fluid/layers/ops.py index b1b4a1a55f9bc4b6cf798be9dc9c139d8f4a0d4f..61e12a98a012713f96e788db4ab20dc676885638 100755 --- a/python/paddle/fluid/layers/ops.py +++ b/python/paddle/fluid/layers/ops.py @@ -13,17 +13,27 @@ # limitations under the License. import os -from .layer_function_generator import generate_layer_fn, generate_activation_fn, generate_inplace_fn, add_sample_code +from .layer_function_generator import ( + generate_layer_fn, + generate_activation_fn, + generate_inplace_fn, + add_sample_code, +) from .. import core from ..framework import convert_np_dtype_to_dtype_, Variable, in_dygraph_mode -from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype +from ..data_feeder import ( + convert_dtype, + check_variable_and_dtype, + check_type, + check_dtype, +) from paddle.utils import deprecated from paddle import _C_ops, _legacy_C_ops import paddle __deprecated_func_name__ = { 'tanh_shrink': 'tanhshrink', - 'logsigmoid': 'log_sigmoid' + 'logsigmoid': 'log_sigmoid', } __activations_noattr__ = [ @@ -36,9 +46,28 @@ __activations_noattr__ = [ ] __unary_func__ = [ - 'exp', 'expm1', 'atan', 'sqrt', 'rsqrt', 'abs', 'ceil', 'floor', 'cos', - 'tan', 'acos', 'sin', 'sinh', 'asin', 'cosh', 'round', 'reciprocal', - 'square', 'acosh', 'asinh', 'atanh', 'lgamma' + 'exp', + 'expm1', + 'atan', + 'sqrt', + 'rsqrt', + 'abs', + 'ceil', + 'floor', + 'cos', + 'tan', + 'acos', + 'sin', + 'sinh', + 'asin', + 'cosh', + 'round', + 'reciprocal', + 'square', + 'acosh', + 'asinh', + 'atanh', + 'lgamma', ] __inplace_unary_func__ = [ @@ -80,8 +109,9 @@ for _OP in set(__activations_noattr__): if _OP in __deprecated_func_name__: _new_OP = __deprecated_func_name__[_OP] _func = generate_activation_fn(_OP) - _func = deprecated(since="2.0.0", - update_to="paddle.nn.functional.%s" % (_new_OP))(_func) + _func = deprecated( + since="2.0.0", update_to="paddle.nn.functional.%s" % (_new_OP) + )(_func) globals()[_OP] = _func for _OP in set(__unary_func__): @@ -101,7 +131,8 @@ for _OP in set(__inplace_unary_func__): globals()[_OP] = _func add_sample_code( - globals()["sigmoid"], r""" + globals()["sigmoid"], + r""" Examples: .. code-block:: python @@ -113,10 +144,12 @@ Examples: print(out) # [0.40131234 0.450166 0.52497919 0.57444252] -""") +""", +) add_sample_code( - globals()["silu"], r""" + globals()["silu"], + r""" Examples: .. code-block:: python @@ -128,10 +161,12 @@ Examples: print(out) # [ 0.7310586 1.7615942 2.8577224, 3.9280552 ] -""") +""", +) add_sample_code( - globals()["logsigmoid"], r""" + globals()["logsigmoid"], + r""" Examples: .. code-block:: python @@ -143,10 +178,12 @@ Examples: print(out) # [-0.91301525 -0.79813887 -0.64439666 -0.55435524] -""") +""", +) add_sample_code( - globals()["exp"], r""" + globals()["exp"], + r""" Examples: .. code-block:: python @@ -157,10 +194,12 @@ Examples: print(out) # [0.67032005 0.81873075 1.10517092 1.34985881] -""") +""", +) add_sample_code( - globals()["expm1"], r""" + globals()["expm1"], + r""" Examples: .. code-block:: python @@ -171,10 +210,12 @@ Examples: print(out) # [-0.32967997, -0.18126924, 0.10517092, 0.34985882] -""") +""", +) add_sample_code( - globals()["tanh"], r""" + globals()["tanh"], + r""" Examples: .. code-block:: python @@ -185,10 +226,12 @@ Examples: print(out) # [-0.37994896 -0.19737532 0.09966799 0.29131261] -""") +""", +) add_sample_code( - globals()["atan"], r""" + globals()["atan"], + r""" Examples: .. code-block:: python @@ -199,10 +242,12 @@ Examples: print(out) # [-0.38050638 -0.19739556 0.09966865 0.29145679] -""") +""", +) add_sample_code( - globals()["tanh_shrink"], r""" + globals()["tanh_shrink"], + r""" Examples: .. code-block:: python @@ -214,10 +259,12 @@ Examples: print(out) # [-0.020051, -0.00262468, 0.000332005, 0.00868739] -""") +""", +) add_sample_code( - globals()["sqrt"], r""" + globals()["sqrt"], + r""" Examples: .. code-block:: python @@ -228,10 +275,12 @@ Examples: print(out) # [0.31622777 0.4472136 0.54772256 0.63245553] -""") +""", +) add_sample_code( - globals()["rsqrt"], r""" + globals()["rsqrt"], + r""" Examples: .. code-block:: python @@ -242,10 +291,12 @@ Examples: print(out) # [3.16227766 2.23606798 1.82574186 1.58113883] -""") +""", +) add_sample_code( - globals()["abs"], r""" + globals()["abs"], + r""" Examples: .. code-block:: python @@ -256,10 +307,12 @@ Examples: print(out) # [0.4 0.2 0.1 0.3] -""") +""", +) add_sample_code( - globals()["ceil"], r""" + globals()["ceil"], + r""" Examples: .. code-block:: python @@ -270,10 +323,12 @@ Examples: print(out) # [-0. -0. 1. 1.] -""") +""", +) add_sample_code( - globals()["floor"], r""" + globals()["floor"], + r""" Examples: .. code-block:: python @@ -284,10 +339,12 @@ Examples: print(out) # [-1. -1. 0. 0.] -""") +""", +) add_sample_code( - globals()["cos"], r""" + globals()["cos"], + r""" Examples: .. code-block:: python @@ -298,10 +355,12 @@ Examples: print(out) # [0.92106099 0.98006658 0.99500417 0.95533649] -""") +""", +) add_sample_code( - globals()["tan"], r""" + globals()["tan"], + r""" Examples: .. code-block:: python @@ -312,10 +371,12 @@ Examples: print(out) # [-0.42279324, -0.20271005, 0.10033467, 0.30933627] -""") +""", +) add_sample_code( - globals()["acos"], r""" + globals()["acos"], + r""" Examples: .. code-block:: python @@ -326,10 +387,12 @@ Examples: print(out) # [1.98231317 1.77215425 1.47062891 1.26610367] -""") +""", +) add_sample_code( - globals()["sin"], r""" + globals()["sin"], + r""" Examples: .. code-block:: python @@ -340,10 +403,12 @@ Examples: print(out) # [-0.38941834 -0.19866933 0.09983342 0.29552021] -""") +""", +) add_sample_code( - globals()["asin"], r""" + globals()["asin"], + r""" Examples: .. code-block:: python @@ -354,10 +419,12 @@ Examples: print(out) # [-0.41151685 -0.20135792 0.10016742 0.30469265] -""") +""", +) add_sample_code( - globals()["cosh"], r""" + globals()["cosh"], + r""" Examples: .. code-block:: python @@ -368,10 +435,12 @@ Examples: print(out) # [1.08107237 1.02006676 1.00500417 1.04533851] -""") +""", +) add_sample_code( - globals()["sinh"], r""" + globals()["sinh"], + r""" Examples: .. code-block:: python @@ -382,10 +451,12 @@ Examples: print(out) # [-0.41075233 -0.201336 0.10016675 0.30452029] -""") +""", +) add_sample_code( - globals()["asinh"], r""" + globals()["asinh"], + r""" Examples: .. code-block:: python @@ -396,10 +467,12 @@ Examples: print(out) # [-0.39003533, -0.19869010, 0.09983408, 0.29567307] -""") +""", +) add_sample_code( - globals()["acosh"], r""" + globals()["acosh"], + r""" Examples: .. code-block:: python @@ -410,10 +483,12 @@ Examples: print(out) # [0. , 1.76274729, 2.06343699, 2.29243159] -""") +""", +) add_sample_code( - globals()["atanh"], r""" + globals()["atanh"], + r""" Examples: .. code-block:: python @@ -424,10 +499,12 @@ Examples: print(out) # [-0.42364895, -0.20273256, 0.10033535, 0.30951962] -""") +""", +) add_sample_code( - globals()["round"], r""" + globals()["round"], + r""" Examples: .. code-block:: python @@ -438,10 +515,12 @@ Examples: print(out) # [-1. -0. 1. 2.] -""") +""", +) add_sample_code( - globals()["reciprocal"], r""" + globals()["reciprocal"], + r""" Examples: .. code-block:: python @@ -452,10 +531,12 @@ Examples: print(out) # [-2.5 -5. 10. 3.33333333] -""") +""", +) add_sample_code( - globals()["square"], r""" + globals()["square"], + r""" Examples: .. code-block:: python @@ -466,7 +547,8 @@ Examples: print(out) # [0.16 0.04 0.01 0.09] -""") +""", +) _softplus_ = generate_layer_fn('softplus') @@ -515,7 +597,8 @@ Examples: """ add_sample_code( - globals()["softsign"], r""" + globals()["softsign"], + r""" Examples: .. code-block:: python @@ -527,14 +610,16 @@ Examples: print(out) # [-0.285714, -0.166667, 0.0909091, 0.230769] -""") +""", +) _softshrink_ = generate_layer_fn('softshrink') def softshrink(x, alpha=None): - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'softshrink') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], 'softshrink' + ) locals_var = locals().copy() kwargs = dict() @@ -582,8 +667,9 @@ _hard_shrink_ = generate_layer_fn('hard_shrink') @deprecated(since="2.0.0", update_to="paddle.nn.functional.hardshrink") def hard_shrink(x, threshold=None): - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'hard_shrink') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], 'hard_shrink' + ) locals_var = locals().copy() kwargs = dict() @@ -593,20 +679,25 @@ def hard_shrink(x, threshold=None): return _hard_shrink_(**kwargs) -hard_shrink.__doc__ = _hard_shrink_.__doc__ + """ +hard_shrink.__doc__ = ( + _hard_shrink_.__doc__ + + """ Examples: >>> import paddle.fluid as fluid >>> data = fluid.layers.data(name="input", shape=[784]) >>> result = fluid.layers.hard_shrink(x=data, threshold=0.3) """ +) _cum_sum_ = generate_layer_fn('cumsum') -@deprecated(since="2.0.0", - update_to="paddle.cumsum", - reason="New APIs for Paddle 2.0 are coming.") +@deprecated( + since="2.0.0", + update_to="paddle.cumsum", + reason="New APIs for Paddle 2.0 are coming.", +) def cumsum(x, axis=None, exclusive=None, reverse=None): check_type(x, 'x', (Variable), 'cumsum') locals_var = locals().copy() @@ -645,8 +736,9 @@ _thresholded_relu_ = generate_layer_fn('thresholded_relu') def thresholded_relu(x, threshold=None): - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'thresholded_relu') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], 'thresholded_relu' + ) locals_var = locals().copy() kwargs = dict() diff --git a/python/paddle/fluid/layers/rnn.py b/python/paddle/fluid/layers/rnn.py index 087cffcfe3b323aaf7251b39ef674d2784fe9eca..f2ab7da4c84cb86be09e8780ce0b14ff4c883ce4 100644 --- a/python/paddle/fluid/layers/rnn.py +++ b/python/paddle/fluid/layers/rnn.py @@ -31,6 +31,7 @@ from ..layer_helper import LayerHelper from ..framework import _non_static_mode from ..param_attr import ParamAttr from ..data_feeder import check_variable_and_dtype, check_type, check_dtype + try: from collections.abc import Sequence except: @@ -63,7 +64,7 @@ __all__ = [ class RNNCell(object): """ - :api_attr: Static Graph + :api_attr: Static Graph RNNCell is the base class for abstraction representing the calculations mapping the input and state to the output and new state. It is suitable to @@ -95,12 +96,14 @@ class RNNCell(object): def __call__(self, inputs, states, **kwargs): return self.call(inputs, states, **kwargs) - def get_initial_states(self, - batch_ref, - shape=None, - dtype='float32', - init_value=0, - batch_dim_idx=0): + def get_initial_states( + self, + batch_ref, + shape=None, + dtype='float32', + init_value=0, + batch_dim_idx=0, + ): r""" Generate initialized states according to provided shape, data type and value. @@ -126,9 +129,12 @@ class RNNCell(object): Variable: tensor variable[s] packed in the same structure provided \ by shape, representing the initialized states. """ - check_variable_and_dtype(batch_ref, 'batch_ref', - ['float32', 'float64', 'int32', 'int64'], - 'RNNCell') + check_variable_and_dtype( + batch_ref, + 'batch_ref', + ['float32', 'float64', 'int32', 'int64'], + 'RNNCell', + ) check_type(shape, 'shape', (list, tuple, type(None), int), 'RNNCell') if isinstance(shape, (list, tuple)): shapes = map_structure(lambda x: x, shape) @@ -144,17 +150,17 @@ class RNNCell(object): def _is_shape_sequence(seq): """For shape, list/tuple of integer is the finest-grained objection""" - if (isinstance(seq, list) or isinstance(seq, tuple)): - if reduce(lambda flag, x: isinstance(x, int) and flag, seq, - True): + if isinstance(seq, list) or isinstance(seq, tuple): + if reduce( + lambda flag, x: isinstance(x, int) and flag, seq, True + ): return False # TODO: Add check for the illegal if isinstance(seq, dict): return True - return (isinstance(seq, Sequence) and not isinstance(seq, str)) + return isinstance(seq, Sequence) and not isinstance(seq, str) class Shape(object): - def __init__(self, shape): self.shape = shape if shape[0] == -1 else ([-1] + list(shape)) @@ -180,7 +186,11 @@ class RNNCell(object): shape=shape.shape, dtype=dtype, value=init_value, - input_dim_idx=batch_dim_idx), states_shapes, states_dtypes) + input_dim_idx=batch_dim_idx, + ), + states_shapes, + states_dtypes, + ) return init_states @property @@ -196,7 +206,8 @@ class RNNCell(object): `get_initial_states`. """ raise NotImplementedError( - "Please add implementaion for `state_shape` in the used cell.") + "Please add implementaion for `state_shape` in the used cell." + ) @property def state_dtype(self): @@ -211,12 +222,13 @@ class RNNCell(object): `get_initial_states`. """ raise NotImplementedError( - "Please add implementaion for `state_dtype` in the used cell.") + "Please add implementaion for `state_dtype` in the used cell." + ) class GRUCell(RNNCell): r""" - :api_attr: Static Graph + :api_attr: Static Graph Gated Recurrent Unit cell. It is a wrapper for `fluid.contrib.layers.rnn_impl.BasicGRUUnit` to make it adapt to RNNCell. @@ -244,14 +256,16 @@ class GRUCell(RNNCell): cell = layers.GRUCell(hidden_size=256) """ - def __init__(self, - hidden_size, - param_attr=None, - bias_attr=None, - gate_activation=None, - activation=None, - dtype="float32", - name="GRUCell"): + def __init__( + self, + hidden_size, + param_attr=None, + bias_attr=None, + gate_activation=None, + activation=None, + dtype="float32", + name="GRUCell", + ): """ Constructor of GRUCell. @@ -272,9 +286,16 @@ class GRUCell(RNNCell): check_dtype(dtype, 'dtype', ['float32', 'float64'], 'GRUCell') self.hidden_size = hidden_size from .. import contrib # TODO: resolve recurrent import + self.gru_unit = contrib.layers.rnn_impl.BasicGRUUnit( - name, hidden_size, param_attr, bias_attr, gate_activation, - activation, dtype) + name, + hidden_size, + param_attr, + bias_attr, + gate_activation, + activation, + dtype, + ) def call(self, inputs, states): r""" @@ -295,10 +316,12 @@ class GRUCell(RNNCell): tensor is same as that of `states`. """ - check_variable_and_dtype(inputs, 'inputs', ['float32', 'float64'], - 'GRUCell') - check_variable_and_dtype(states, 'states', ['float32', 'float64'], - 'GRUCell') + check_variable_and_dtype( + inputs, 'inputs', ['float32', 'float64'], 'GRUCell' + ) + check_variable_and_dtype( + states, 'states', ['float32', 'float64'], 'GRUCell' + ) new_hidden = self.gru_unit(inputs, states) return new_hidden, new_hidden @@ -314,7 +337,7 @@ class GRUCell(RNNCell): class LSTMCell(RNNCell): r""" - :api_attr: Static Graph + :api_attr: Static Graph Long-Short Term Memory cell. It is a wrapper for `fluid.contrib.layers.rnn_impl.BasicLSTMUnit` to make it adapt to RNNCell. @@ -343,15 +366,17 @@ class LSTMCell(RNNCell): cell = layers.LSTMCell(hidden_size=256) """ - def __init__(self, - hidden_size, - param_attr=None, - bias_attr=None, - gate_activation=None, - activation=None, - forget_bias=1.0, - dtype="float32", - name="LSTMCell"): + def __init__( + self, + hidden_size, + param_attr=None, + bias_attr=None, + gate_activation=None, + activation=None, + forget_bias=1.0, + dtype="float32", + name="LSTMCell", + ): """ Constructor of LSTMCell. @@ -375,9 +400,17 @@ class LSTMCell(RNNCell): check_dtype(dtype, 'dtype', ['float32', 'float64'], 'LSTMCell') self.hidden_size = hidden_size from .. import contrib # TODO: resolve recurrent import + self.lstm_unit = contrib.layers.rnn_impl.BasicLSTMUnit( - name, hidden_size, param_attr, bias_attr, gate_activation, - activation, forget_bias, dtype) + name, + hidden_size, + param_attr, + bias_attr, + gate_activation, + activation, + forget_bias, + dtype, + ) def call(self, inputs, states): r""" @@ -400,13 +433,18 @@ class LSTMCell(RNNCell): tensors all is same as that of `states`. """ - check_variable_and_dtype(inputs, 'inputs', ['float32', 'float64'], - 'LSTMCell') + check_variable_and_dtype( + inputs, 'inputs', ['float32', 'float64'], 'LSTMCell' + ) check_type(states, 'states', list, 'LSTMCell') if isinstance(states, list): for i, state in enumerate(states): - check_variable_and_dtype(state, 'state[' + str(i) + ']', - ['float32', 'float64'], 'LSTMCell') + check_variable_and_dtype( + state, + 'state[' + str(i) + ']', + ['float32', 'float64'], + 'LSTMCell', + ) pre_hidden, pre_cell = states new_hidden, new_cell = self.lstm_unit(inputs, pre_hidden, pre_cell) @@ -422,13 +460,15 @@ class LSTMCell(RNNCell): return [[self.hidden_size], [self.hidden_size]] -def rnn(cell, - inputs, - initial_states=None, - sequence_length=None, - time_major=False, - is_reverse=False, - **kwargs): +def rnn( + cell, + inputs, + initial_states=None, + sequence_length=None, + time_major=False, + is_reverse=False, + **kwargs +): """ rnn creates a recurrent neural network specified by RNNCell `cell`, which performs :code:`cell.call()` (for dygraph mode :code:`cell.forward`) @@ -483,15 +523,28 @@ def rnn(cell, """ if _non_static_mode(): - return _rnn_dynamic_graph(cell, inputs, initial_states, sequence_length, - time_major, is_reverse, **kwargs) + return _rnn_dynamic_graph( + cell, + inputs, + initial_states, + sequence_length, + time_major, + is_reverse, + **kwargs + ) else: - return _rnn_static_graph(cell, inputs, initial_states, sequence_length, - time_major, is_reverse, **kwargs) + return _rnn_static_graph( + cell, + inputs, + initial_states, + sequence_length, + time_major, + is_reverse, + **kwargs + ) class ArrayWrapper(object): - def __init__(self, x): self.array = [x] @@ -505,8 +558,9 @@ class ArrayWrapper(object): def _maybe_copy(state, new_state, step_mask): """update rnn state or just pass the old state through""" - new_state = nn.elementwise_mul(new_state, step_mask, axis=0) \ - + nn.elementwise_mul(state, (1 - step_mask), axis=0) + new_state = nn.elementwise_mul( + new_state, step_mask, axis=0 + ) + nn.elementwise_mul(state, (1 - step_mask), axis=0) return new_state @@ -515,34 +569,40 @@ def _transpose_batch_time(x): return nn.transpose(x, perm) -def _rnn_dynamic_graph(cell, - inputs, - initial_states=None, - sequence_length=None, - time_major=False, - is_reverse=False, - **kwargs): +def _rnn_dynamic_graph( + cell, + inputs, + initial_states=None, + sequence_length=None, + time_major=False, + is_reverse=False, + **kwargs +): time_step_index = 0 if time_major else 1 flat_inputs = flatten(inputs) time_steps = flat_inputs[0].shape[time_step_index] if initial_states is None: initial_states = cell.get_initial_states( - batch_ref=inputs, batch_dim_idx=1 if time_major else 0) + batch_ref=inputs, batch_dim_idx=1 if time_major else 0 + ) if not time_major: inputs = map_structure(_transpose_batch_time, inputs) if sequence_length is not None: - mask = sequence_lod.sequence_mask(sequence_length, - maxlen=time_steps, - dtype=inputs.dtype) + mask = sequence_lod.sequence_mask( + sequence_length, maxlen=time_steps, dtype=inputs.dtype + ) mask = nn.transpose(mask, [1, 0]) if is_reverse: inputs = map_structure(lambda x: tensor.reverse(x, axis=[0]), inputs) - mask = tensor.reverse(mask, axis=[0]) \ - if sequence_length is not None else None + mask = ( + tensor.reverse(mask, axis=[0]) + if sequence_length is not None + else None + ) states = initial_states outputs = [] @@ -550,42 +610,56 @@ def _rnn_dynamic_graph(cell, step_inputs = map_structure(lambda x: x[i], inputs) step_outputs, new_states = cell(step_inputs, states, **kwargs) if sequence_length is not None: - new_states = map_structure(partial(_maybe_copy, step_mask=mask[i]), - states, new_states) + new_states = map_structure( + partial(_maybe_copy, step_mask=mask[i]), states, new_states + ) states = new_states - outputs = map_structure(lambda x: ArrayWrapper(x), - step_outputs) if i == 0 else map_structure( - lambda x, x_array: x_array.append(x), - step_outputs, outputs) + outputs = ( + map_structure(lambda x: ArrayWrapper(x), step_outputs) + if i == 0 + else map_structure( + lambda x, x_array: x_array.append(x), step_outputs, outputs + ) + ) final_outputs = map_structure( - lambda x: nn.stack(x.array, axis=time_step_index), outputs) + lambda x: nn.stack(x.array, axis=time_step_index), outputs + ) if is_reverse: final_outputs = map_structure( - lambda x: tensor.reverse(x, axis=time_step_index), final_outputs) + lambda x: tensor.reverse(x, axis=time_step_index), final_outputs + ) final_states = new_states return final_outputs, final_states -def _rnn_static_graph(cell, - inputs, - initial_states=None, - sequence_length=None, - time_major=False, - is_reverse=False, - **kwargs): +def _rnn_static_graph( + cell, + inputs, + initial_states=None, + sequence_length=None, + time_major=False, + is_reverse=False, + **kwargs +): check_type(inputs, 'inputs', (Variable, list, tuple), 'rnn') if isinstance(inputs, (list, tuple)): for i, input_x in enumerate(inputs): - check_variable_and_dtype(input_x, 'inputs[' + str(i) + ']', - ['float32', 'float64'], 'rnn') - check_type(initial_states, 'initial_states', - (Variable, list, tuple, type(None)), 'rnn') + check_variable_and_dtype( + input_x, 'inputs[' + str(i) + ']', ['float32', 'float64'], 'rnn' + ) + check_type( + initial_states, + 'initial_states', + (Variable, list, tuple, type(None)), + 'rnn', + ) - check_type(sequence_length, 'sequence_length', (Variable, type(None)), - 'rnn') + check_type( + sequence_length, 'sequence_length', (Variable, type(None)), 'rnn' + ) def _switch_grad(x, stop=False): x.stop_gradient = stop @@ -593,7 +667,8 @@ def _rnn_static_graph(cell, if initial_states is None: initial_states = cell.get_initial_states( - batch_ref=inputs, batch_dim_idx=1 if time_major else 0) + batch_ref=inputs, batch_dim_idx=1 if time_major else 0 + ) initial_states = map_structure(_switch_grad, initial_states) if not time_major: @@ -604,7 +679,8 @@ def _rnn_static_graph(cell, mask = sequence_lod.sequence_mask( sequence_length, maxlen=max_seq_len, - dtype=flatten(initial_states)[0].dtype) + dtype=flatten(initial_states)[0].dtype, + ) mask = nn.transpose(mask, [1, 0]) if is_reverse: inputs = map_structure(lambda x: tensor.reverse(x, axis=[0]), inputs) @@ -621,7 +697,8 @@ def _rnn_static_graph(cell, if sequence_length: step_mask = rnn.step_input(mask) new_states = map_structure( - partial(_maybe_copy, step_mask=step_mask), states, new_states) + partial(_maybe_copy, step_mask=step_mask), states, new_states + ) map_structure(rnn.update_memory, states, new_states) flat_outputs = flatten(outputs) @@ -629,14 +706,15 @@ def _rnn_static_graph(cell, map_structure(rnn.step_output, new_states) rnn_out = rnn() - final_outputs = rnn_out[:len(flat_outputs)] + final_outputs = rnn_out[: len(flat_outputs)] final_outputs = pack_sequence_as(outputs, final_outputs) - final_states = map_structure(lambda x: x[-1], rnn_out[len(flat_outputs):]) + final_states = map_structure(lambda x: x[-1], rnn_out[len(flat_outputs) :]) final_states = pack_sequence_as(new_states, final_states) if is_reverse: - final_outputs = map_structure(lambda x: tensor.reverse(x, axis=[0]), - final_outputs) + final_outputs = map_structure( + lambda x: tensor.reverse(x, axis=[0]), final_outputs + ) if not time_major: final_outputs = map_structure(_transpose_batch_time, final_outputs) @@ -644,13 +722,15 @@ def _rnn_static_graph(cell, return (final_outputs, final_states) -def birnn(cell_fw, - cell_bw, - inputs, - initial_states=None, - sequence_length=None, - time_major=False, - **kwargs): +def birnn( + cell_fw, + cell_bw, + inputs, + initial_states=None, + sequence_length=None, + time_major=False, + **kwargs +): """ birnn creates a bidirectional recurrent neural network specified by RNNCell `cell_fw` and `cell_bw`, which performs :code:`cell.call()` @@ -709,28 +789,35 @@ def birnn(cell_fw, """ if initial_states is None: states_fw = cell_fw.get_initial_states( - batch_ref=inputs, batch_dim_idx=1 if time_major else 0) + batch_ref=inputs, batch_dim_idx=1 if time_major else 0 + ) states_bw = cell_fw.get_initial_states( - batch_ref=inputs, batch_dim_idx=1 if time_major else 0) + batch_ref=inputs, batch_dim_idx=1 if time_major else 0 + ) else: states_fw, states_bw = initial_states - outputs_fw, states_fw = rnn(cell_fw, - inputs, - states_fw, - sequence_length, - time_major=time_major, - **kwargs) - - outputs_bw, states_bw = rnn(cell_bw, - inputs, - states_bw, - sequence_length, - time_major=time_major, - is_reverse=True, - **kwargs) - - outputs = map_structure(lambda x, y: tensor.concat([x, y], -1), outputs_fw, - outputs_bw) + outputs_fw, states_fw = rnn( + cell_fw, + inputs, + states_fw, + sequence_length, + time_major=time_major, + **kwargs + ) + + outputs_bw, states_bw = rnn( + cell_bw, + inputs, + states_bw, + sequence_length, + time_major=time_major, + is_reverse=True, + **kwargs + ) + + outputs = map_structure( + lambda x, y: tensor.concat([x, y], -1), outputs_fw, outputs_bw + ) final_states = (states_fw, states_bw) return outputs, final_states @@ -738,7 +825,7 @@ def birnn(cell_fw, class Decoder(object): """ - :api_attr: Static Graph + :api_attr: Static Graph Decoder is the base class for any decoder instance used in `dynamic_decode`. It provides interface for output generation for one time step, which can be @@ -890,13 +977,15 @@ class BeamSearchDecoder(Decoder): """ - def __init__(self, - cell, - start_token, - end_token, - beam_size, - embedding_fn=None, - output_fn=None): + def __init__( + self, + cell, + start_token, + end_token, + beam_size, + embedding_fn=None, + output_fn=None, + ): """ Constructor of BeamSearchDecoder. @@ -939,22 +1028,23 @@ class BeamSearchDecoder(Decoder): Variable: A tensor with shape `[batch_size * beam_size, ...]`, whose \ data type is same as `x`. """ - check_type(x, 'x', (Variable), - 'BeamSearchDecoder.tile_beam_merge_with_batch') + check_type( + x, 'x', (Variable), 'BeamSearchDecoder.tile_beam_merge_with_batch' + ) x = nn.unsqueeze(x, [1]) # [batch_size, 1, ...] expand_times = [1] * len(x.shape) expand_times[1] = beam_size x = paddle.tile(x, expand_times) # [batch_size, beam_size, ...] - x = nn.transpose(x, - list(range(2, len(x.shape))) + - [0, 1]) # [..., batch_size, beam_size] + x = nn.transpose( + x, list(range(2, len(x.shape))) + [0, 1] + ) # [..., batch_size, beam_size] # use 0 to copy to avoid wrong shape - x = nn.reshape(x, shape=[0] * (len(x.shape) - 2) + - [-1]) # [..., batch_size * beam_size] + x = nn.reshape( + x, shape=[0] * (len(x.shape) - 2) + [-1] + ) # [..., batch_size * beam_size] x = nn.transpose( - x, [len(x.shape) - 1] + - list(range(0, - len(x.shape) - 1))) # [batch_size * beam_size, ...] + x, [len(x.shape) - 1] + list(range(0, len(x.shape) - 1)) + ) # [batch_size * beam_size, ...] return x def _split_batch_beams(self, x): @@ -1032,14 +1122,16 @@ class BeamSearchDecoder(Decoder): replaced with a tensor with all probability on the EOS token. """ check_type(probs, 'probs', (Variable), 'BeamSearchDecoder._mask_probs') - check_type(finished, 'finished', (Variable), - 'BeamSearchDecoder._mask_probs') + check_type( + finished, 'finished', (Variable), 'BeamSearchDecoder._mask_probs' + ) # TODO: use where_op finished = tensor.cast(finished, dtype=probs.dtype) probs = nn.elementwise_mul( paddle.tile(nn.unsqueeze(finished, [2]), [1, 1, self.vocab_size]), self.noend_mask_tensor, - axis=-1) - nn.elementwise_mul(probs, (finished - 1), axis=0) + axis=-1, + ) - nn.elementwise_mul(probs, (finished - 1), axis=0) return probs def _gather(self, x, indices, batch_size): @@ -1059,37 +1151,48 @@ class BeamSearchDecoder(Decoder): """ check_type(x, 'x', (Variable), 'BeamSearchDecoder._gather') check_type(indices, 'indices', (Variable), 'BeamSearchDecoder._gather') - check_type(batch_size, 'batch_size', (Variable), - 'BeamSearchDecoder._gather') + check_type( + batch_size, 'batch_size', (Variable), 'BeamSearchDecoder._gather' + ) # TODO: compatibility of int32 and int64 - batch_size = tensor.cast( - batch_size, - indices.dtype) if batch_size.dtype != indices.dtype else batch_size + batch_size = ( + tensor.cast(batch_size, indices.dtype) + if batch_size.dtype != indices.dtype + else batch_size + ) batch_size.stop_gradient = True # TODO: remove this batch_pos = paddle.tile( - nn.unsqueeze(tensor.range(0, batch_size, 1, dtype=indices.dtype), - [1]), [1, self.beam_size]) + nn.unsqueeze( + tensor.range(0, batch_size, 1, dtype=indices.dtype), [1] + ), + [1, self.beam_size], + ) topk_coordinates = nn.stack([batch_pos, indices], axis=2) topk_coordinates.stop_gradient = True return nn.gather_nd(x, topk_coordinates) class OutputWrapper( - collections.namedtuple("OutputWrapper", - ("scores", "predicted_ids", "parent_ids"))): + collections.namedtuple( + "OutputWrapper", ("scores", "predicted_ids", "parent_ids") + ) + ): """ The structure for the returned value `outputs` of `decoder.step`. A namedtuple includes scores, predicted_ids, parent_ids as fields. """ + pass class StateWrapper( - collections.namedtuple( - "StateWrapper", - ("cell_states", "log_probs", "finished", "lengths"))): + collections.namedtuple( + "StateWrapper", ("cell_states", "log_probs", "finished", "lengths") + ) + ): """ The structure for the argument `states` of `decoder.step`. A namedtuple includes cell_states, log_probs, finished, lengths as fields. """ + pass def initialize(self, initial_cell_states): @@ -1117,22 +1220,30 @@ class BeamSearchDecoder(Decoder): state = flatten(initial_cell_states)[0] self.batch_size = nn.shape(state)[0] - self.start_token_tensor = tensor.fill_constant(shape=[1], - dtype="int64", - value=self.start_token) - self.end_token_tensor = tensor.fill_constant(shape=[1], - dtype="int64", - value=self.end_token) - - init_cell_states = map_structure(self._expand_to_beam_size, - initial_cell_states) - init_inputs = paddle.full(shape=[self.batch_size, self.beam_size], - fill_value=self.start_token_tensor, - dtype=self.start_token_tensor.dtype) + self.start_token_tensor = tensor.fill_constant( + shape=[1], dtype="int64", value=self.start_token + ) + self.end_token_tensor = tensor.fill_constant( + shape=[1], dtype="int64", value=self.end_token + ) + + init_cell_states = map_structure( + self._expand_to_beam_size, initial_cell_states + ) + init_inputs = paddle.full( + shape=[self.batch_size, self.beam_size], + fill_value=self.start_token_tensor, + dtype=self.start_token_tensor.dtype, + ) log_probs = paddle.tile( tensor.assign( - np.array([[0.] + [-self.kinf] * (self.beam_size - 1)], - dtype="float32")), [self.batch_size, 1]) + np.array( + [[0.0] + [-self.kinf] * (self.beam_size - 1)], + dtype="float32", + ) + ), + [self.batch_size, 1], + ) if paddle.get_default_dtype() == "float64": log_probs = tensor.cast(log_probs, "float64") # TODO: remove the restriction of force_cpu @@ -1141,13 +1252,19 @@ class BeamSearchDecoder(Decoder): shape=[-1, self.beam_size], dtype="bool", value=False, - force_cpu=True) + force_cpu=True, + ) init_lengths = tensor.zeros_like(init_inputs) - init_inputs = self.embedding_fn( - init_inputs) if self.embedding_fn else init_inputs - return init_inputs, self.StateWrapper(init_cell_states, log_probs, - init_finished, - init_lengths), init_finished + init_inputs = ( + self.embedding_fn(init_inputs) if self.embedding_fn else init_inputs + ) + return ( + init_inputs, + self.StateWrapper( + init_cell_states, log_probs, init_finished, init_lengths + ), + init_finished, + ) def _beam_search_step(self, time, logits, next_cell_states, beam_state): r""" @@ -1178,50 +1295,61 @@ class BeamSearchDecoder(Decoder): """ self.vocab_size = logits.shape[-1] - self.vocab_size_tensor = tensor.fill_constant(shape=[1], - dtype="int64", - value=self.vocab_size) + self.vocab_size_tensor = tensor.fill_constant( + shape=[1], dtype="int64", value=self.vocab_size + ) noend_array = [-self.kinf] * self.vocab_size noend_array[self.end_token] = 0 self.noend_mask_tensor = tensor.assign(np.array(noend_array, "float32")) if paddle.get_default_dtype() == "float64": - self.noend_mask_tensor = tensor.cast(self.noend_mask_tensor, - "float64") + self.noend_mask_tensor = tensor.cast( + self.noend_mask_tensor, "float64" + ) step_log_probs = nn.log(nn.softmax(logits)) step_log_probs = self._mask_probs(step_log_probs, beam_state.finished) - log_probs = nn.elementwise_add(x=step_log_probs, - y=beam_state.log_probs, - axis=0) + log_probs = nn.elementwise_add( + x=step_log_probs, y=beam_state.log_probs, axis=0 + ) # TODO: length penalty scores = log_probs scores = nn.reshape(scores, [-1, self.beam_size * self.vocab_size]) # TODO: add grad for topk then this beam search can be used to train topk_scores, topk_indices = paddle.topk(x=scores, k=self.beam_size) - beam_indices = nn.elementwise_floordiv(topk_indices, - self.vocab_size_tensor) + beam_indices = nn.elementwise_floordiv( + topk_indices, self.vocab_size_tensor + ) token_indices = nn.elementwise_mod(topk_indices, self.vocab_size_tensor) next_log_probs = self._gather( nn.reshape(log_probs, [-1, self.beam_size * self.vocab_size]), - topk_indices, self.batch_size) + topk_indices, + self.batch_size, + ) next_cell_states = map_structure( lambda x: self._gather(x, beam_indices, self.batch_size), - next_cell_states) - next_finished = self._gather(beam_state.finished, beam_indices, - self.batch_size) - next_lengths = self._gather(beam_state.lengths, beam_indices, - self.batch_size) - next_lengths = next_lengths + tensor.cast(nn.logical_not(next_finished), - beam_state.lengths.dtype) + next_cell_states, + ) + next_finished = self._gather( + beam_state.finished, beam_indices, self.batch_size + ) + next_lengths = self._gather( + beam_state.lengths, beam_indices, self.batch_size + ) + next_lengths = next_lengths + tensor.cast( + nn.logical_not(next_finished), beam_state.lengths.dtype + ) next_finished = control_flow.logical_or( next_finished, - control_flow.equal(token_indices, self.end_token_tensor)) - - beam_search_output = self.OutputWrapper(topk_scores, token_indices, - beam_indices) - beam_search_state = self.StateWrapper(next_cell_states, next_log_probs, - next_finished, next_lengths) + control_flow.equal(token_indices, self.end_token_tensor), + ) + + beam_search_output = self.OutputWrapper( + topk_scores, token_indices, beam_indices + ) + beam_search_state = self.StateWrapper( + next_cell_states, next_log_probs, next_finished, next_lengths + ) return beam_search_output, beam_search_state def step(self, time, inputs, states, **kwargs): @@ -1254,11 +1382,13 @@ class BeamSearchDecoder(Decoder): """ inputs = map_structure(self._merge_batch_beams, inputs) cell_states = map_structure(self._merge_batch_beams, states.cell_states) - cell_outputs, next_cell_states = self.cell(inputs, cell_states, - **kwargs) + cell_outputs, next_cell_states = self.cell( + inputs, cell_states, **kwargs + ) cell_outputs = map_structure(self._split_batch_beams, cell_outputs) - next_cell_states = map_structure(self._split_batch_beams, - next_cell_states) + next_cell_states = map_structure( + self._split_batch_beams, next_cell_states + ) if self.output_fn is not None: cell_outputs = self.output_fn(cell_outputs) @@ -1267,12 +1397,14 @@ class BeamSearchDecoder(Decoder): time=time, logits=cell_outputs, next_cell_states=next_cell_states, - beam_state=states) + beam_state=states, + ) finished = beam_search_state.finished sample_ids = beam_search_output.predicted_ids sample_ids.stop_gradient = True - next_inputs = self.embedding_fn( - sample_ids) if self.embedding_fn else sample_ids + next_inputs = ( + self.embedding_fn(sample_ids) if self.embedding_fn else sample_ids + ) return (beam_search_output, beam_search_state, next_inputs, finished) @@ -1300,8 +1432,9 @@ class BeamSearchDecoder(Decoder): `[time_step, batch_size, beam_size]`. `final_states` is the same \ as the input argument `final_states`. """ - predicted_ids = nn.gather_tree(outputs.predicted_ids, - outputs.parent_ids) + predicted_ids = nn.gather_tree( + outputs.predicted_ids, outputs.parent_ids + ) # TODO: use FinalBeamSearchDecoderOutput as output return predicted_ids, final_states @@ -1319,15 +1452,16 @@ class BeamSearchDecoder(Decoder): return True -def _dynamic_decode_imperative(decoder, - inits=None, - max_step_num=None, - output_time_major=False, - impute_finished=False, - is_test=False, - return_length=False, - **kwargs): - +def _dynamic_decode_imperative( + decoder, + inits=None, + max_step_num=None, + output_time_major=False, + impute_finished=False, + is_test=False, + return_length=False, + **kwargs +): def _maybe_copy(state, new_state, step_mask): # TODO: use where_op state_dtype = state.dtype @@ -1340,28 +1474,30 @@ def _dynamic_decode_imperative(decoder, # to sum(bool) error. step_mask.stop_gradient = True new_state = nn.elementwise_mul( - state, step_mask, axis=0) - nn.elementwise_mul(new_state, - (step_mask - 1), - axis=0) + state, step_mask, axis=0 + ) - nn.elementwise_mul(new_state, (step_mask - 1), axis=0) if convert_dtype(state_dtype) in ["bool"]: new_state = tensor.cast(new_state, dtype=state_dtype) return new_state initial_inputs, initial_states, initial_finished = decoder.initialize(inits) - inputs, states, finished = (initial_inputs, initial_states, - initial_finished) + inputs, states, finished = ( + initial_inputs, + initial_states, + initial_finished, + ) cond = control_flow.logical_not((nn.reduce_all(initial_finished))) sequence_lengths = tensor.cast(tensor.zeros_like(initial_finished), "int64") outputs = None step_idx = 0 - step_idx_tensor = tensor.fill_constant(shape=[1], - dtype="int64", - value=step_idx) + step_idx_tensor = tensor.fill_constant( + shape=[1], dtype="int64", value=step_idx + ) while cond.numpy(): - (step_outputs, next_states, next_inputs, - next_finished) = decoder.step(step_idx_tensor, inputs, states, - **kwargs) + (step_outputs, next_states, next_inputs, next_finished) = decoder.step( + step_idx_tensor, inputs, states, **kwargs + ) if not decoder.tracks_own_finished: # BeamSearchDecoder would track it own finished, since # beams would be reordered and the finished status of each @@ -1373,26 +1509,37 @@ def _dynamic_decode_imperative(decoder, tensor.assign(next_finished, finished) next_sequence_lengths = nn.elementwise_add( sequence_lengths, - tensor.cast(control_flow.logical_not(finished), - sequence_lengths.dtype)) + tensor.cast( + control_flow.logical_not(finished), sequence_lengths.dtype + ), + ) if impute_finished: # rectify the states for the finished. next_states = map_structure( - lambda x, y: _maybe_copy(x, y, finished), states, - next_states) + lambda x, y: _maybe_copy(x, y, finished), + states, + next_states, + ) else: warnings.warn( "`next_states` has no `lengths` attribute, the returned `sequence_lengths` would be all zeros." ) if not hasattr(next_states, "lengths") else None - next_sequence_lengths = getattr(next_states, "lengths", - sequence_lengths) - - outputs = map_structure( - lambda x: ArrayWrapper(x), - step_outputs) if step_idx == 0 else map_structure( - lambda x, x_array: x_array.append(x), step_outputs, outputs) - inputs, states, finished, sequence_lengths = (next_inputs, next_states, - next_finished, - next_sequence_lengths) + next_sequence_lengths = getattr( + next_states, "lengths", sequence_lengths + ) + + outputs = ( + map_structure(lambda x: ArrayWrapper(x), step_outputs) + if step_idx == 0 + else map_structure( + lambda x, x_array: x_array.append(x), step_outputs, outputs + ) + ) + inputs, states, finished, sequence_lengths = ( + next_inputs, + next_states, + next_finished, + next_sequence_lengths, + ) control_flow.increment(x=step_idx_tensor, value=1.0, in_place=True) step_idx += 1 @@ -1405,42 +1552,49 @@ def _dynamic_decode_imperative(decoder, final_states = states try: - final_outputs, final_states = decoder.finalize(final_outputs, - final_states, - sequence_lengths) + final_outputs, final_states = decoder.finalize( + final_outputs, final_states, sequence_lengths + ) except NotImplementedError: pass if not output_time_major: final_outputs = map_structure( lambda x: nn.transpose(x, [1, 0] + list(range(2, len(x.shape)))), - final_outputs) + final_outputs, + ) - return (final_outputs, final_states, - sequence_lengths) if return_length else (final_outputs, - final_states) + return ( + (final_outputs, final_states, sequence_lengths) + if return_length + else (final_outputs, final_states) + ) -def _dynamic_decode_declarative(decoder, - inits=None, - max_step_num=None, - output_time_major=False, - impute_finished=False, - is_test=False, - return_length=False, - **kwargs): +def _dynamic_decode_declarative( + decoder, + inits=None, + max_step_num=None, + output_time_major=False, + impute_finished=False, + is_test=False, + return_length=False, + **kwargs +): initial_inputs, initial_states, initial_finished = decoder.initialize(inits) - global_inputs, global_states, global_finished = (initial_inputs, - initial_states, - initial_finished) + global_inputs, global_states, global_finished = ( + initial_inputs, + initial_states, + initial_finished, + ) global_finished.stop_gradient = True step_idx = tensor.fill_constant(shape=[1], dtype="int64", value=0) cond = control_flow.logical_not((nn.reduce_all(initial_finished))) if max_step_num is not None: - max_step_num = tensor.fill_constant(shape=[1], - dtype="int64", - value=max_step_num) + max_step_num = tensor.fill_constant( + shape=[1], dtype="int64", value=max_step_num + ) while_op = control_flow.While(cond, is_test=is_test) sequence_lengths = tensor.cast(tensor.zeros_like(initial_finished), "int64") @@ -1453,9 +1607,11 @@ def _dynamic_decode_declarative(decoder, else: # inputs and states of all steps must be saved for backward and training inputs_arrays = map_structure( - lambda x: control_flow.array_write(x, step_idx), initial_inputs) + lambda x: control_flow.array_write(x, step_idx), initial_inputs + ) states_arrays = map_structure( - lambda x: control_flow.array_write(x, step_idx), initial_states) + lambda x: control_flow.array_write(x, step_idx), initial_states + ) def _maybe_copy(state, new_state, step_mask): # TODO: use where_op @@ -1469,9 +1625,8 @@ def _dynamic_decode_declarative(decoder, # to sum(bool) error. step_mask.stop_gradient = True new_state = nn.elementwise_mul( - state, step_mask, axis=0) - nn.elementwise_mul(new_state, - (step_mask - 1), - axis=0) + state, step_mask, axis=0 + ) - nn.elementwise_mul(new_state, (step_mask - 1), axis=0) if convert_dtype(state_dtype) in ["bool"]: new_state = tensor.cast(new_state, dtype=state_dtype) return new_state @@ -1481,8 +1636,9 @@ def _dynamic_decode_declarative(decoder, def _create_array_out_of_while(dtype): current_block_idx = default_main_program().current_block_idx - default_main_program().current_block_idx = default_main_program( - ).current_block().parent_idx + default_main_program().current_block_idx = ( + default_main_program().current_block().parent_idx + ) tensor_array = control_flow.create_array(dtype) default_main_program().current_block_idx = current_block_idx return tensor_array @@ -1492,23 +1648,30 @@ def _dynamic_decode_declarative(decoder, if not is_test: inputs = map_structure( lambda array: control_flow.array_read(array, step_idx), - inputs_arrays) + inputs_arrays, + ) states = map_structure( lambda array: control_flow.array_read(array, step_idx), - states_arrays) - (outputs, next_states, next_inputs, - next_finished) = decoder.step(step_idx, inputs, states, **kwargs) + states_arrays, + ) + (outputs, next_states, next_inputs, next_finished) = decoder.step( + step_idx, inputs, states, **kwargs + ) if not decoder.tracks_own_finished: # BeamSearchDecoder would track it own finished, since beams would # be reordered and the finished status of each entry might change. # Otherwise, perform logical OR which would not change the already # finished. - next_finished = control_flow.logical_or(next_finished, - global_finished) + next_finished = control_flow.logical_or( + next_finished, global_finished + ) next_sequence_lengths = nn.elementwise_add( sequence_lengths, - tensor.cast(control_flow.logical_not(global_finished), - sequence_lengths.dtype)) + tensor.cast( + control_flow.logical_not(global_finished), + sequence_lengths.dtype, + ), + ) if impute_finished: # rectify the states for the finished. next_states = map_structure( lambda x, y: _maybe_copy(x, y, global_finished), @@ -1519,16 +1682,22 @@ def _dynamic_decode_declarative(decoder, warnings.warn( "`next_states` has no `lengths` attribute, the returned `sequence_lengths` would be all zeros." ) if not hasattr(next_states, "lengths") else None - next_sequence_lengths = getattr(next_states, "lengths", - sequence_lengths) + next_sequence_lengths = getattr( + next_states, "lengths", sequence_lengths + ) # create tensor array in global block after dtype[s] of outputs can be got outputs_arrays = map_structure( - lambda x: _create_array_out_of_while(x.dtype), outputs) + lambda x: _create_array_out_of_while(x.dtype), outputs + ) map_structure( lambda x, x_array: control_flow.array_write( - x, i=step_idx, array=x_array), outputs, outputs_arrays) + x, i=step_idx, array=x_array + ), + outputs, + outputs_arrays, + ) control_flow.increment(x=step_idx, value=1.0, in_place=True) # update the global_finished first, since it might be also in states of # decoder, which otherwise would write a stale finished status to array @@ -1540,50 +1709,68 @@ def _dynamic_decode_declarative(decoder, else: map_structure( lambda x, x_array: control_flow.array_write( - x, i=step_idx, array=x_array), next_inputs, inputs_arrays) + x, i=step_idx, array=x_array + ), + next_inputs, + inputs_arrays, + ) map_structure( lambda x, x_array: control_flow.array_write( - x, i=step_idx, array=x_array), next_states, states_arrays) + x, i=step_idx, array=x_array + ), + next_states, + states_arrays, + ) if max_step_num is not None: control_flow.logical_and( control_flow.logical_not(nn.reduce_all(global_finished)), - control_flow.less_equal(step_idx, max_step_num), cond) + control_flow.less_equal(step_idx, max_step_num), + cond, + ) else: control_flow.logical_not(nn.reduce_all(global_finished), cond) final_outputs = map_structure( lambda array: tensor.tensor_array_to_tensor( - array, axis=0, use_stack=True)[0], outputs_arrays) + array, axis=0, use_stack=True + )[0], + outputs_arrays, + ) if is_test: final_states = global_states else: final_states = map_structure( lambda array: control_flow.array_read(array, step_idx), - states_arrays) + states_arrays, + ) try: - final_outputs, final_states = decoder.finalize(final_outputs, - final_states, - sequence_lengths) + final_outputs, final_states = decoder.finalize( + final_outputs, final_states, sequence_lengths + ) except NotImplementedError: pass if not output_time_major: final_outputs = map_structure(_transpose_batch_time, final_outputs) - return (final_outputs, final_states, - sequence_lengths) if return_length else (final_outputs, - final_states) + return ( + (final_outputs, final_states, sequence_lengths) + if return_length + else (final_outputs, final_states) + ) -def dynamic_decode(decoder, - inits=None, - max_step_num=None, - output_time_major=False, - impute_finished=False, - is_test=False, - return_length=False, - **kwargs): +def dynamic_decode( + decoder, + inits=None, + max_step_num=None, + output_time_major=False, + impute_finished=False, + is_test=False, + return_length=False, + **kwargs +): r""" Dynamic decoding performs :code:`decoder.step()` repeatedly until the returned Tensor indicating finished status contains all True values or the number of @@ -1659,13 +1846,27 @@ def dynamic_decode(decoder, max_step_num=10) """ if _non_static_mode(): - return _dynamic_decode_imperative(decoder, inits, max_step_num, - output_time_major, impute_finished, - is_test, return_length, **kwargs) + return _dynamic_decode_imperative( + decoder, + inits, + max_step_num, + output_time_major, + impute_finished, + is_test, + return_length, + **kwargs + ) else: - return _dynamic_decode_declarative(decoder, inits, max_step_num, - output_time_major, impute_finished, - is_test, return_length, **kwargs) + return _dynamic_decode_declarative( + decoder, + inits, + max_step_num, + output_time_major, + impute_finished, + is_test, + return_length, + **kwargs + ) class DecodeHelper(object): @@ -1792,11 +1993,14 @@ class TrainingHelper(DecodeHelper): # extend inputs to avoid to slice out of range in `next_inputs` # may be easier and have better performance than condition_op self.inputs_ = map_structure( - lambda x: nn.pad(x, - paddings=([0, 1] + [0, 0] * (len(x.shape) - 1)) - if time_major else ([0, 0, 0, 1] + [0, 0] * - (len(x.shape) - 2))), - self.inputs) + lambda x: nn.pad( + x, + paddings=([0, 1] + [0, 0] * (len(x.shape) - 1)) + if time_major + else ([0, 0, 0, 1] + [0, 0] * (len(x.shape) - 2)), + ), + self.inputs, + ) def initialize(self): r""" @@ -1813,12 +2017,14 @@ class TrainingHelper(DecodeHelper): """ init_finished = control_flow.equal( self.sequence_length, - tensor.fill_constant(shape=[1], - dtype=self.sequence_length.dtype, - value=0)) + tensor.fill_constant( + shape=[1], dtype=self.sequence_length.dtype, value=0 + ), + ) # TODO: support zero length init_inputs = map_structure( - lambda x: x[0] if self.time_major else x[:, 0], self.inputs) + lambda x: x[0] if self.time_major else x[:, 0], self.inputs + ) return init_inputs, init_finished def sample(self, time, outputs, states): @@ -1872,8 +2078,11 @@ class TrainingHelper(DecodeHelper): shape `[batch_size]`. """ # TODO: compatibility of int32 and int64 - time = tensor.cast(time, "int32") if convert_dtype( - time.dtype) not in ["int32"] else time + time = ( + tensor.cast(time, "int32") + if convert_dtype(time.dtype) not in ["int32"] + else time + ) if self.sequence_length.dtype != time.dtype: self.sequence_length = tensor.cast(self.sequence_length, time.dtype) next_time = time + 1 @@ -1881,11 +2090,12 @@ class TrainingHelper(DecodeHelper): def _slice(x): # TODO: use Variable.__getitem__ axes = [0 if self.time_major else 1] - return nn.squeeze(nn.slice(x, - axes=axes, - starts=[next_time], - ends=[next_time + 1]), - axes=axes) + return nn.squeeze( + nn.slice( + x, axes=axes, starts=[next_time], ends=[next_time + 1] + ), + axes=axes, + ) next_inputs = map_structure(_slice, self.inputs_) return finished, next_inputs, states @@ -1943,9 +2153,9 @@ class GreedyEmbeddingHelper(DecodeHelper): """ self.embedding_fn = embedding_fn self.start_tokens = start_tokens - self.end_token = tensor.fill_constant(shape=[1], - dtype="int64", - value=end_token) + self.end_token = tensor.fill_constant( + shape=[1], dtype="int64", value=end_token + ) def initialize(self): r""" @@ -1966,7 +2176,8 @@ class GreedyEmbeddingHelper(DecodeHelper): shape=[-1], dtype="bool", value=False, - force_cpu=True) + force_cpu=True, + ) init_inputs = self.embedding_fn(self.start_tokens) return init_inputs, init_finished @@ -2054,12 +2265,14 @@ class SampleEmbeddingHelper(GreedyEmbeddingHelper): decoder=decoder, inits=decoder_cell.get_initial_states(encoder_output)) """ - def __init__(self, - embedding_fn, - start_tokens, - end_token, - softmax_temperature=None, - seed=None): + def __init__( + self, + embedding_fn, + start_tokens, + end_token, + softmax_temperature=None, + seed=None, + ): r""" Constructor of SampleEmbeddingHelper. @@ -2086,11 +2299,16 @@ class SampleEmbeddingHelper(GreedyEmbeddingHelper): structure of) tensor variable[s], and `finished` is a tensor with \ bool data type. """ - super(SampleEmbeddingHelper, self).__init__(embedding_fn, start_tokens, - end_token) - self.softmax_temperature = tensor.fill_constant( - shape=[1], dtype="float32", value=softmax_temperature - ) if softmax_temperature is not None else None + super(SampleEmbeddingHelper, self).__init__( + embedding_fn, start_tokens, end_token + ) + self.softmax_temperature = ( + tensor.fill_constant( + shape=[1], dtype="float32", value=softmax_temperature + ) + if softmax_temperature is not None + else None + ) self.seed = seed def sample(self, time, outputs, states): @@ -2112,16 +2330,19 @@ class SampleEmbeddingHelper(GreedyEmbeddingHelper): Variable: An `int64` tensor with shape `[batch_size]`, representing \ the sampled ids. """ - logits = (outputs / self.softmax_temperature - ) if self.softmax_temperature is not None else outputs + logits = ( + (outputs / self.softmax_temperature) + if self.softmax_temperature is not None + else outputs + ) probs = nn.softmax(logits) # TODO: remove this stop_gradient. The stop_gradient of sample_ids can # not pass to probs, since sampling_id op does not have corresponding # grad op and thus can not pass. probs.stop_gradient = True - sample_ids = nn.sampling_id(probs, - seed=self.seed, - dtype=self.start_tokens.dtype) + sample_ids = nn.sampling_id( + probs, seed=self.seed, dtype=self.start_tokens.dtype + ) return sample_ids @@ -2201,12 +2422,13 @@ class BasicDecoder(Decoder): return initial_inputs, initial_cell_states, initial_finished class OutputWrapper( - collections.namedtuple("OutputWrapper", - ("cell_outputs", "sample_ids"))): + collections.namedtuple("OutputWrapper", ("cell_outputs", "sample_ids")) + ): """ The structure for the returned value `outputs` of `decoder.step`. A namedtuple includes cell_outputs, sample_ids as fields. """ + pass def step(self, time, inputs, states, **kwargs): @@ -2248,32 +2470,35 @@ class BasicDecoder(Decoder): cell_outputs, cell_states = self.cell(inputs, states, **kwargs) if self.output_fn is not None: cell_outputs = self.output_fn(cell_outputs) - sample_ids = self.helper.sample(time=time, - outputs=cell_outputs, - states=cell_states) + sample_ids = self.helper.sample( + time=time, outputs=cell_outputs, states=cell_states + ) sample_ids.stop_gradient = True - (finished, next_inputs, - next_states) = self.helper.next_inputs(time=time, - outputs=cell_outputs, - states=cell_states, - sample_ids=sample_ids) + (finished, next_inputs, next_states) = self.helper.next_inputs( + time=time, + outputs=cell_outputs, + states=cell_states, + sample_ids=sample_ids, + ) outputs = self.OutputWrapper(cell_outputs, sample_ids) return (outputs, next_states, next_inputs, finished) -def dynamic_lstm(input, - size, - h_0=None, - c_0=None, - param_attr=None, - bias_attr=None, - use_peepholes=True, - is_reverse=False, - gate_activation='sigmoid', - cell_activation='tanh', - candidate_activation='tanh', - dtype='float32', - name=None): +def dynamic_lstm( + input, + size, + h_0=None, + c_0=None, + param_attr=None, + bias_attr=None, + use_peepholes=True, + is_reverse=False, + gate_activation='sigmoid', + cell_activation='tanh', + candidate_activation='tanh', + dtype='float32', + name=None, +): r""" :api_attr: Static Graph @@ -2373,35 +2598,40 @@ def dynamic_lstm(input, forward.shape # (-1, 512) cell.shape # (-1, 512) """ - assert _non_static_mode( - ) is not True, "please use lstm instead of dynamic_lstm in dygraph mode!" - assert bias_attr is not False, "bias_attr should not be False in dynamic_lstm." - - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'dynamic_lstm') + assert ( + _non_static_mode() is not True + ), "please use lstm instead of dynamic_lstm in dygraph mode!" + assert ( + bias_attr is not False + ), "bias_attr should not be False in dynamic_lstm." + + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'dynamic_lstm' + ) check_type(h_0, 'h_0', (Variable, type(None)), 'dynamic_lstm') if isinstance(h_0, Variable): - check_variable_and_dtype(h_0, 'h_0', ['float32', 'float64'], - 'dynamic_lstm') + check_variable_and_dtype( + h_0, 'h_0', ['float32', 'float64'], 'dynamic_lstm' + ) check_type(c_0, 'c_0', (Variable, type(None)), 'dynamic_lstm') if isinstance(c_0, Variable): - check_variable_and_dtype(c_0, 'c_0', ['float32', 'float64'], - 'dynamic_lstm') + check_variable_and_dtype( + c_0, 'c_0', ['float32', 'float64'], 'dynamic_lstm' + ) helper = LayerHelper('lstm', **locals()) size = size // 4 - weight = helper.create_parameter(attr=helper.param_attr, - shape=[size, 4 * size], - dtype=dtype) + weight = helper.create_parameter( + attr=helper.param_attr, shape=[size, 4 * size], dtype=dtype + ) bias_size = [1, 7 * size] if not use_peepholes: bias_size[1] = 4 * size - bias = helper.create_parameter(attr=helper.bias_attr, - shape=bias_size, - dtype=dtype, - is_bias=True) + bias = helper.create_parameter( + attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True + ) hidden = helper.create_variable_for_type_inference(dtype) cell = helper.create_variable_for_type_inference(dtype) @@ -2410,47 +2640,55 @@ def dynamic_lstm(input, inputs = {'Input': input, 'Weight': weight, 'Bias': bias} batch_size = input.shape[0] if h_0: - assert h_0.shape == (batch_size, size), \ + assert h_0.shape == (batch_size, size), ( 'The shape of h0 should be (batch_size, %d)' % size + ) inputs['H0'] = h_0 if c_0: - assert c_0.shape == (batch_size, size), \ + assert c_0.shape == (batch_size, size), ( 'The shape of c0 should be (batch_size, %d)' % size + ) inputs['C0'] = c_0 - helper.append_op(type='lstm', - inputs=inputs, - outputs={ - 'Hidden': hidden, - 'Cell': cell, - 'BatchGate': batch_gate, - 'BatchCellPreAct': batch_cell_pre_act - }, - attrs={ - 'use_peepholes': use_peepholes, - 'is_reverse': is_reverse, - 'gate_activation': gate_activation, - 'cell_activation': cell_activation, - 'candidate_activation': candidate_activation - }) + helper.append_op( + type='lstm', + inputs=inputs, + outputs={ + 'Hidden': hidden, + 'Cell': cell, + 'BatchGate': batch_gate, + 'BatchCellPreAct': batch_cell_pre_act, + }, + attrs={ + 'use_peepholes': use_peepholes, + 'is_reverse': is_reverse, + 'gate_activation': gate_activation, + 'cell_activation': cell_activation, + 'candidate_activation': candidate_activation, + }, + ) return hidden, cell -@deprecated(since='2.0.0', - update_to='paddle.nn.LSTM', - reason="This API may occur CUDNN errors.") -def lstm(input, - init_h, - init_c, - max_len, - hidden_size, - num_layers, - dropout_prob=0.0, - is_bidirec=False, - is_test=False, - name=None, - default_initializer=None, - seed=-1): +@deprecated( + since='2.0.0', + update_to='paddle.nn.LSTM', + reason="This API may occur CUDNN errors.", +) +def lstm( + input, + init_h, + init_c, + max_len, + hidden_size, + num_layers, + dropout_prob=0.0, + is_bidirec=False, + is_test=False, + name=None, + default_initializer=None, + seed=-1, +): r""" :api_attr: Static Graph @@ -2577,63 +2815,71 @@ def lstm(input, weight_size += input_weight_size + hidden_weight_size weight_size += hidden_size * 8 * num_dirrection - weight = helper.create_parameter(attr=helper.param_attr, - shape=[weight_size], - dtype=dtype, - default_initializer=default_initializer) + weight = helper.create_parameter( + attr=helper.param_attr, + shape=[weight_size], + dtype=dtype, + default_initializer=default_initializer, + ) out = helper.create_variable_for_type_inference(dtype) last_h = helper.create_variable_for_type_inference(dtype) last_c = helper.create_variable_for_type_inference(dtype) reserve = helper.create_variable_for_type_inference( - dtype=core.VarDesc.VarType.UINT8, stop_gradient=True) + dtype=core.VarDesc.VarType.UINT8, stop_gradient=True + ) state_out = helper.create_variable_for_type_inference( - dtype=core.VarDesc.VarType.UINT8, stop_gradient=True) + dtype=core.VarDesc.VarType.UINT8, stop_gradient=True + ) state_out.persistable = True - helper.append_op(type='cudnn_lstm', - inputs={ - 'Input': input, - 'InitH': init_h, - 'InitC': init_c, - 'W': weight, - }, - outputs={ - 'Out': out, - 'LastH': last_h, - 'LastC': last_c, - 'Reserve': reserve, - 'StateOut': state_out, - }, - attrs={ - 'is_bidirec': is_bidirec, - 'input_size': input_size, - 'hidden_size': hidden_size, - 'num_layers': num_layers, - 'is_test': is_test, - 'dropout_prob': dropout_prob, - 'seed': seed, - }) + helper.append_op( + type='cudnn_lstm', + inputs={ + 'Input': input, + 'InitH': init_h, + 'InitC': init_c, + 'W': weight, + }, + outputs={ + 'Out': out, + 'LastH': last_h, + 'LastC': last_c, + 'Reserve': reserve, + 'StateOut': state_out, + }, + attrs={ + 'is_bidirec': is_bidirec, + 'input_size': input_size, + 'hidden_size': hidden_size, + 'num_layers': num_layers, + 'is_test': is_test, + 'dropout_prob': dropout_prob, + 'seed': seed, + }, + ) return out, last_h, last_c -def dynamic_lstmp(input, - size, - proj_size, - param_attr=None, - bias_attr=None, - use_peepholes=True, - is_reverse=False, - gate_activation='sigmoid', - cell_activation='tanh', - candidate_activation='tanh', - proj_activation='tanh', - dtype='float32', - name=None, - h_0=None, - c_0=None, - cell_clip=None, - proj_clip=None): +def dynamic_lstmp( + input, + size, + proj_size, + param_attr=None, + bias_attr=None, + use_peepholes=True, + is_reverse=False, + gate_activation='sigmoid', + cell_activation='tanh', + candidate_activation='tanh', + proj_activation='tanh', + dtype='float32', + name=None, + h_0=None, + c_0=None, + cell_clip=None, + proj_clip=None, +): r""" :api_attr: Static Graph @@ -2756,39 +3002,44 @@ def dynamic_lstmp(input, last_c.shape # (-1, 512) """ - assert _non_static_mode( - ) is not True, "please use lstm instead of dynamic_lstmp in dygraph mode!" + assert ( + _non_static_mode() is not True + ), "please use lstm instead of dynamic_lstmp in dygraph mode!" - assert bias_attr is not False, "bias_attr should not be False in dynamic_lstmp." + assert ( + bias_attr is not False + ), "bias_attr should not be False in dynamic_lstmp." - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'dynamic_lstmp') + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'dynamic_lstmp' + ) check_type(h_0, 'h_0', (Variable, type(None)), 'dynamic_lstmp') if isinstance(h_0, Variable): - check_variable_and_dtype(h_0, 'h_0', ['float32', 'float64'], - 'dynamic_lstmp') + check_variable_and_dtype( + h_0, 'h_0', ['float32', 'float64'], 'dynamic_lstmp' + ) check_type(c_0, 'c_0', (Variable, type(None)), 'dynamic_lstmp') if isinstance(c_0, Variable): - check_variable_and_dtype(c_0, 'c_0', ['float32', 'float64'], - 'dynamic_lstmp') + check_variable_and_dtype( + c_0, 'c_0', ['float32', 'float64'], 'dynamic_lstmp' + ) helper = LayerHelper('lstmp', **locals()) size = size // 4 - weight = helper.create_parameter(attr=helper.param_attr, - shape=[proj_size, 4 * size], - dtype=dtype) - proj_weight = helper.create_parameter(attr=helper.param_attr, - shape=[size, proj_size], - dtype=dtype) + weight = helper.create_parameter( + attr=helper.param_attr, shape=[proj_size, 4 * size], dtype=dtype + ) + proj_weight = helper.create_parameter( + attr=helper.param_attr, shape=[size, proj_size], dtype=dtype + ) bias_size = [1, 7 * size] if not use_peepholes: bias_size[1] = 4 * size - bias = helper.create_parameter(attr=helper.bias_attr, - shape=bias_size, - dtype=dtype, - is_bias=True) + bias = helper.create_parameter( + attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True + ) projection = helper.create_variable_for_type_inference(dtype) cell = helper.create_variable_for_type_inference(dtype) @@ -2800,16 +3051,18 @@ def dynamic_lstmp(input, 'Input': input, 'Weight': weight, 'ProjWeight': proj_weight, - 'Bias': bias + 'Bias': bias, } batch_size = input.shape[0] if h_0: - assert h_0.shape == (batch_size, proj_size), \ + assert h_0.shape == (batch_size, proj_size), ( 'The shape of h0 should be (batch_size, %d)' % proj_size + ) inputs['H0'] = h_0 if c_0: - assert c_0.shape == (batch_size, size), \ + assert c_0.shape == (batch_size, size), ( 'The shape of c0 should be (batch_size, %d)' % size + ) inputs['C0'] = c_0 if cell_clip: @@ -2817,37 +3070,41 @@ def dynamic_lstmp(input, if proj_clip: assert proj_clip >= 0, "proj_clip should not be negative." - helper.append_op(type='lstmp', - inputs=inputs, - outputs={ - 'Projection': projection, - 'Cell': cell, - 'BatchHidden': batch_hidden, - 'BatchGate': batch_gate, - 'BatchCellPreAct': batch_cell_pre_act - }, - attrs={ - 'use_peepholes': use_peepholes, - 'cell_clip': cell_clip, - 'proj_clip': proj_clip, - 'is_reverse': is_reverse, - 'gate_activation': gate_activation, - 'cell_activation': cell_activation, - 'candidate_activation': candidate_activation, - 'proj_activation': proj_activation - }) + helper.append_op( + type='lstmp', + inputs=inputs, + outputs={ + 'Projection': projection, + 'Cell': cell, + 'BatchHidden': batch_hidden, + 'BatchGate': batch_gate, + 'BatchCellPreAct': batch_cell_pre_act, + }, + attrs={ + 'use_peepholes': use_peepholes, + 'cell_clip': cell_clip, + 'proj_clip': proj_clip, + 'is_reverse': is_reverse, + 'gate_activation': gate_activation, + 'cell_activation': cell_activation, + 'candidate_activation': candidate_activation, + 'proj_activation': proj_activation, + }, + ) return projection, cell -def dynamic_gru(input, - size, - param_attr=None, - bias_attr=None, - is_reverse=False, - gate_activation='sigmoid', - candidate_activation='tanh', - h_0=None, - origin_mode=False): +def dynamic_gru( + input, + size, + param_attr=None, + bias_attr=None, + is_reverse=False, + gate_activation='sigmoid', + candidate_activation='tanh', + h_0=None, + origin_mode=False, +): r""" :api_attr: Static Graph @@ -2953,33 +3210,35 @@ def dynamic_gru(input, hidden = fluid.layers.dynamic_gru(input=x, size=hidden_dim) """ - assert _non_static_mode( - ) is not True, "please use gru instead of dynamic_gru in dygraph mode!" + assert ( + _non_static_mode() is not True + ), "please use gru instead of dynamic_gru in dygraph mode!" - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'dynamic_gru') + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'dynamic_gru' + ) check_type(h_0, 'h_0', (Variable, type(None)), 'dynamic_gru') if isinstance(h_0, Variable): - check_variable_and_dtype(h_0, 'h_0', ['float32', 'float64'], - 'dynamic_gru') + check_variable_and_dtype( + h_0, 'h_0', ['float32', 'float64'], 'dynamic_gru' + ) helper = LayerHelper('gru', **locals()) dtype = helper.input_dtype() - weight = helper.create_parameter(attr=helper.param_attr, - shape=[size, 3 * size], - dtype=dtype) - bias = helper.create_parameter(attr=helper.bias_attr, - shape=[1, 3 * size], - dtype=dtype, - is_bias=True) + weight = helper.create_parameter( + attr=helper.param_attr, shape=[size, 3 * size], dtype=dtype + ) + bias = helper.create_parameter( + attr=helper.bias_attr, shape=[1, 3 * size], dtype=dtype, is_bias=True + ) batch_size = input.shape[0] inputs = {'Input': input, 'Weight': weight, 'Bias': bias} if h_0: - assert h_0.shape == ( - batch_size, - size), 'The shape of h0 should be(batch_size, %d)' % size + assert h_0.shape == (batch_size, size), ( + 'The shape of h0 should be(batch_size, %d)' % size + ) inputs['H0'] = h_0 hidden = helper.create_variable_for_type_inference(dtype) @@ -2987,31 +3246,35 @@ def dynamic_gru(input, batch_reset_hidden_prev = helper.create_variable_for_type_inference(dtype) batch_hidden = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='gru', - inputs=inputs, - outputs={ - 'Hidden': hidden, - 'BatchGate': batch_gate, - 'BatchResetHiddenPrev': batch_reset_hidden_prev, - 'BatchHidden': batch_hidden - }, - attrs={ - 'is_reverse': is_reverse, - 'gate_activation': gate_activation, - 'activation': candidate_activation, - 'origin_mode': origin_mode - }) + helper.append_op( + type='gru', + inputs=inputs, + outputs={ + 'Hidden': hidden, + 'BatchGate': batch_gate, + 'BatchResetHiddenPrev': batch_reset_hidden_prev, + 'BatchHidden': batch_hidden, + }, + attrs={ + 'is_reverse': is_reverse, + 'gate_activation': gate_activation, + 'activation': candidate_activation, + 'origin_mode': origin_mode, + }, + ) return hidden -def gru_unit(input, - hidden, - size, - param_attr=None, - bias_attr=None, - activation='tanh', - gate_activation='sigmoid', - origin_mode=False): +def gru_unit( + input, + hidden, + size, + param_attr=None, + bias_attr=None, + activation='tanh', + gate_activation='sigmoid', + origin_mode=False, +): r""" :api_attr: Static Graph @@ -3113,8 +3376,9 @@ def gru_unit(input, """ check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'gru_unit') - check_variable_and_dtype(hidden, 'hidden', ['float32', 'float64'], - 'gru_unit') + check_variable_and_dtype( + hidden, 'hidden', ['float32', 'float64'], 'gru_unit' + ) check_type(size, 'size', (int), 'gru_unit') activation_dict = dict( identity=0, @@ -3130,9 +3394,9 @@ def gru_unit(input, size = size // 3 # create weight - weight = helper.create_parameter(attr=helper.param_attr, - shape=[size, 3 * size], - dtype=dtype) + weight = helper.create_parameter( + attr=helper.param_attr, shape=[size, 3 * size], dtype=dtype + ) gate = helper.create_variable_for_type_inference(dtype) reset_hidden_pre = helper.create_variable_for_type_inference(dtype) @@ -3141,10 +3405,9 @@ def gru_unit(input, # create bias if helper.bias_attr: bias_size = [1, 3 * size] - bias = helper.create_parameter(attr=helper.bias_attr, - shape=bias_size, - dtype=dtype, - is_bias=True) + bias = helper.create_parameter( + attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True + ) inputs['Bias'] = bias helper.append_op( @@ -3158,22 +3421,25 @@ def gru_unit(input, attrs={ 'activation': 2, # tanh 'gate_activation': 1, # sigmoid - 'origin_mode': origin_mode - }) + 'origin_mode': origin_mode, + }, + ) return updated_hidden, reset_hidden_pre, gate -def beam_search(pre_ids, - pre_scores, - ids, - scores, - beam_size, - end_id, - level=0, - is_accumulated=True, - name=None, - return_parent_idx=False): +def beam_search( + pre_ids, + pre_scores, + ids, + scores, + beam_size, + end_id, + level=0, + is_accumulated=True, + name=None, + return_parent_idx=False, +): r""" Beam search is a classical algorithm for selecting candidate words in a @@ -3280,11 +3546,13 @@ def beam_search(pre_ids, end_id=end_id) """ check_variable_and_dtype(pre_ids, 'pre_ids', ['int64'], 'beam_search') - check_variable_and_dtype(pre_scores, 'pre_scores', ['float32', 'float64'], - 'beam_search') + check_variable_and_dtype( + pre_scores, 'pre_scores', ['float32', 'float64'], 'beam_search' + ) check_type(ids, 'ids', (Variable, type(None)), 'beam_search') - check_variable_and_dtype(scores, 'scores', ['float32', 'float64'], - 'beam_search') + check_variable_and_dtype( + scores, 'scores', ['float32', 'float64'], 'beam_search' + ) helper = LayerHelper('beam_search', **locals()) score_type = pre_scores.dtype id_type = pre_ids.dtype @@ -3294,7 +3562,8 @@ def beam_search(pre_ids, inputs["ids"] = ids selected_scores = helper.create_variable_for_type_inference( - dtype=score_type) + dtype=score_type + ) selected_ids = helper.create_variable_for_type_inference(dtype=id_type) # parent_idx is a tensor used to gather cell states at the next time # step. Though lod in selected_ids can also be used to gather by @@ -3308,7 +3577,7 @@ def beam_search(pre_ids, outputs={ 'selected_ids': selected_ids, 'selected_scores': selected_scores, - 'parent_idx': parent_idx + 'parent_idx': parent_idx, }, attrs={ # TODO(ChunweiYan) to assure other value support @@ -3316,7 +3585,8 @@ def beam_search(pre_ids, 'beam_size': beam_size, 'end_id': end_id, 'is_accumulated': is_accumulated, - }) + }, + ) if return_parent_idx: return selected_ids, selected_scores, parent_idx else: @@ -3381,37 +3651,37 @@ def beam_search_decode(ids, scores, beam_size, end_id, name=None): ids, scores, beam_size=5, end_id=0) """ check_variable_and_dtype(ids, 'ids', ['int64'], 'beam_search_encode') - check_variable_and_dtype(scores, 'scores', ['float32'], - 'beam_search_encode') + check_variable_and_dtype( + scores, 'scores', ['float32'], 'beam_search_encode' + ) helper = LayerHelper('beam_search_decode', **locals()) sentence_ids = helper.create_variable_for_type_inference(dtype=ids.dtype) sentence_scores = helper.create_variable_for_type_inference( - dtype=scores.dtype) - - helper.append_op(type="beam_search_decode", - inputs={ - "Ids": ids, - "Scores": scores - }, - outputs={ - "SentenceIds": sentence_ids, - "SentenceScores": sentence_scores - }, - attrs={ - "beam_size": beam_size, - "end_id": end_id - }) + dtype=scores.dtype + ) + + helper.append_op( + type="beam_search_decode", + inputs={"Ids": ids, "Scores": scores}, + outputs={ + "SentenceIds": sentence_ids, + "SentenceScores": sentence_scores, + }, + attrs={"beam_size": beam_size, "end_id": end_id}, + ) return sentence_ids, sentence_scores -def lstm_unit(x_t, - hidden_t_prev, - cell_t_prev, - forget_bias=0.0, - param_attr=None, - bias_attr=None, - name=None): +def lstm_unit( + x_t, + hidden_t_prev, + cell_t_prev, + forget_bias=0.0, + param_attr=None, + bias_attr=None, + name=None, +): r""" :api_attr: Static Graph @@ -3496,10 +3766,12 @@ def lstm_unit(x_t, """ helper = LayerHelper('lstm_unit', **locals()) check_variable_and_dtype(x_t, 'x_t', ['float32', 'float64'], 'lstm_unit') - check_variable_and_dtype(hidden_t_prev, 'hidden_t_prev', - ['float32', 'float64'], 'lstm_unit') - check_variable_and_dtype(cell_t_prev, 'cell_t_prev', ['float32', 'float64'], - 'lstm_unit') + check_variable_and_dtype( + hidden_t_prev, 'hidden_t_prev', ['float32', 'float64'], 'lstm_unit' + ) + check_variable_and_dtype( + cell_t_prev, 'cell_t_prev', ['float32', 'float64'], 'lstm_unit' + ) if len(x_t.shape) != 2: raise ValueError("Rank of x_t must be 2.") @@ -3509,37 +3781,41 @@ def lstm_unit(x_t, if len(cell_t_prev.shape) != 2: raise ValueError("Rank of cell_t_prev must be 2.") - if x_t.shape[0] != hidden_t_prev.shape[0] or x_t.shape[ - 0] != cell_t_prev.shape[0]: - raise ValueError("The 1st dimensions of x_t, hidden_t_prev and " - "cell_t_prev must be the same.") + if ( + x_t.shape[0] != hidden_t_prev.shape[0] + or x_t.shape[0] != cell_t_prev.shape[0] + ): + raise ValueError( + "The 1st dimensions of x_t, hidden_t_prev and " + "cell_t_prev must be the same." + ) if hidden_t_prev.shape[1] != cell_t_prev.shape[1]: - raise ValueError("The 2nd dimensions of hidden_t_prev and " - "cell_t_prev must be the same.") + raise ValueError( + "The 2nd dimensions of hidden_t_prev and " + "cell_t_prev must be the same." + ) if bias_attr is None: bias_attr = ParamAttr() size = cell_t_prev.shape[1] concat_out = nn.concat(input=[x_t, hidden_t_prev], axis=1) - fc_out = nn.fc(input=concat_out, - size=4 * size, - param_attr=param_attr, - bias_attr=bias_attr) + fc_out = nn.fc( + input=concat_out, + size=4 * size, + param_attr=param_attr, + bias_attr=bias_attr, + ) dtype = x_t.dtype c = helper.create_variable_for_type_inference(dtype) h = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='lstm_unit', - inputs={ - "X": fc_out, - "C_prev": cell_t_prev - }, - outputs={ - "C": c, - "H": h - }, - attrs={"forget_bias": forget_bias}) + helper.append_op( + type='lstm_unit', + inputs={"X": fc_out, "C_prev": cell_t_prev}, + outputs={"C": c, "H": h}, + attrs={"forget_bias": forget_bias}, + ) return h, c diff --git a/python/paddle/fluid/layers/sequence_lod.py b/python/paddle/fluid/layers/sequence_lod.py index 5deac6ff952b83593d2266f0ffb866c8c2f87276..1ea5e2cf5e5ef24de19249f0bc0e5903b49b85c7 100644 --- a/python/paddle/fluid/layers/sequence_lod.py +++ b/python/paddle/fluid/layers/sequence_lod.py @@ -14,7 +14,14 @@ import paddle from .layer_function_generator import templatedoc -from ..framework import core, Variable, _non_static_mode, in_dygraph_mode, _in_legacy_dygraph, convert_np_dtype_to_dtype_ +from ..framework import ( + core, + Variable, + _non_static_mode, + in_dygraph_mode, + _in_legacy_dygraph, + convert_np_dtype_to_dtype_, +) from ..layer_helper import LayerHelper from ..data_feeder import check_variable_and_dtype, check_type, check_dtype from ..core import VarDesc @@ -41,20 +48,22 @@ __all__ = [ @templatedoc() -def sequence_conv(input, - num_filters, - filter_size=3, - filter_stride=1, - padding=True, - padding_start=None, - bias_attr=None, - param_attr=None, - act=None, - name=None): +def sequence_conv( + input, + num_filters, + filter_size=3, + filter_stride=1, + padding=True, + padding_start=None, + bias_attr=None, + param_attr=None, + act=None, + name=None, +): r""" Note: - Only receives LoDTensor as input. If your input is Tensor, please use conv2d Op.(fluid.layers.** :ref:`api_fluid_layers_conv2d` ). + Only receives LoDTensor as input. If your input is Tensor, please use conv2d Op.(fluid.layers.** :ref:`api_fluid_layers_conv2d` ). This operator receives input sequences with variable length and other convolutional configuration parameters(num_filters, filter_size) to apply the convolution operation. @@ -146,31 +155,35 @@ def sequence_conv(input, x_conved = paddle.static.nn.sequence_conv(input=x, num_filters=2, filter_size=3, padding_start=-1) """ - assert not _non_static_mode(), ( - "sequence layer is not supported in dygraph mode yet.") - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'sequence_conv') + assert ( + not _non_static_mode() + ), "sequence layer is not supported in dygraph mode yet." + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'sequence_conv' + ) helper = LayerHelper('sequence_conv', **locals()) dtype = helper.input_dtype() filter_shape = [filter_size * input.shape[1], num_filters] - filter_param = helper.create_parameter(attr=helper.param_attr, - shape=filter_shape, - dtype=dtype) + filter_param = helper.create_parameter( + attr=helper.param_attr, shape=filter_shape, dtype=dtype + ) pre_bias = helper.create_variable_for_type_inference(dtype) if padding_start is None: padding_start = -int(filter_size // 2) - helper.append_op(type='sequence_conv', - inputs={ - 'X': [input], - 'Filter': [filter_param], - }, - outputs={"Out": pre_bias}, - attrs={ - 'contextStride': filter_stride, - 'contextStart': padding_start, - 'contextLength': filter_size, - }) + helper.append_op( + type='sequence_conv', + inputs={ + 'X': [input], + 'Filter': [filter_param], + }, + outputs={"Out": pre_bias}, + attrs={ + 'contextStride': filter_stride, + 'contextStart': padding_start, + 'contextLength': filter_size, + }, + ) pre_act = helper.append_bias_op(pre_bias) return helper.append_activation(pre_act) @@ -244,17 +257,21 @@ def sequence_softmax(input, use_cudnn=False, name=None): dtype='float32', lod_level=1) x_sequence_softmax_2 = paddle.static.nn.sequence_softmax(input=y) """ - assert not _non_static_mode(), ( - "sequence layer is not supported in dygraph mode yet.") + assert ( + not _non_static_mode() + ), "sequence layer is not supported in dygraph mode yet." helper = LayerHelper('sequence_softmax', **locals()) - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'sequence_softmax') + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'sequence_softmax' + ) dtype = helper.input_dtype() softmax_out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type="sequence_softmax", - inputs={"X": input}, - outputs={"Out": softmax_out}, - attrs={"use_cudnn": use_cudnn}) + helper.append_op( + type="sequence_softmax", + inputs={"X": input}, + outputs={"Out": softmax_out}, + attrs={"use_cudnn": use_cudnn}, + ) return softmax_out @@ -345,26 +362,27 @@ def sequence_pool(input, pool_type, is_test=False, pad_value=0.0): last_x = paddle.static.nn.sequence_pool(input=x, pool_type='last') first_x = paddle.static.nn.sequence_pool(input=x, pool_type='first') """ - assert not _non_static_mode(), ( - "sequence layer is not supported in dygraph mode yet.") - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'sequence_pool') + assert ( + not _non_static_mode() + ), "sequence layer is not supported in dygraph mode yet." + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'sequence_pool' + ) helper = LayerHelper('sequence_pool', **locals()) dtype = helper.input_dtype() pool_out = helper.create_variable_for_type_inference(dtype) max_index = helper.create_variable_for_type_inference(dtype) - helper.append_op(type="sequence_pool", - inputs={"X": input}, - outputs={ - "Out": pool_out, - "MaxIndex": max_index - }, - attrs={ - "pooltype": pool_type.upper(), - "is_test": is_test, - "pad_value": pad_value - }) + helper.append_op( + type="sequence_pool", + inputs={"X": input}, + outputs={"Out": pool_out, "MaxIndex": max_index}, + attrs={ + "pooltype": pool_type.upper(), + "is_test": is_test, + "pad_value": pad_value, + }, + ) # when pool_type is max, variable max_index is initialized, # so we stop the gradient explicitly here @@ -422,20 +440,24 @@ def sequence_concat(input, name=None): y = paddle.static.data(name='y', shape=[-1, 10], dtype='float32', lod_level=1) out = paddle.static.nn.sequence_concat(input=[x, y]) """ - assert not _non_static_mode(), ( - "sequence layer is not supported in dygraph mode yet.") + assert ( + not _non_static_mode() + ), "sequence layer is not supported in dygraph mode yet." helper = LayerHelper('sequence_concat', **locals()) check_type(input, 'input', list, 'fluid.layers.sequence_concat') for i, input_x in enumerate(input): - check_variable_and_dtype(input_x, 'input[' + str(i) + ']', - ['int64', 'float32', 'float64'], - 'fluid.layers.sequence_concat') + check_variable_and_dtype( + input_x, + 'input[' + str(i) + ']', + ['int64', 'float32', 'float64'], + 'fluid.layers.sequence_concat', + ) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) - helper.append_op(type='sequence_concat', - inputs={'X': input}, - outputs={'Out': [out]}) + helper.append_op( + type='sequence_concat', inputs={'X': input}, outputs={'Out': [out]} + ) return out @@ -491,8 +513,9 @@ def sequence_first_step(input): x = paddle.static.data(name='x', shape=[None, 10], dtype='float32', lod_level=1) x_first_step = paddle.static.nn.sequence_first_step(input=x) """ - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'sequence_first_step') + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'sequence_first_step' + ) return sequence_pool(input=input, pool_type="first") @@ -549,8 +572,9 @@ def sequence_last_step(input): x = paddle.static.data(name='x', shape=[None, 10], dtype='float32', lod_level=1) x_last_step = paddle.static.nn.sequence_last_step(input=x) """ - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'sequence_last_step') + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'sequence_last_step' + ) return sequence_pool(input=input, pool_type="last") @@ -615,17 +639,23 @@ def sequence_slice(input, offset, length, name=None): subseqs = paddle.static.nn.sequence_slice(input=seqs, offset=offset, length=length) """ - assert not _non_static_mode(), ( - "sequence layer is not supported in dygraph mode yet.") + assert ( + not _non_static_mode() + ), "sequence layer is not supported in dygraph mode yet." helper = LayerHelper("sequence_slice", **locals()) - check_variable_and_dtype(input, 'input', - ['float32', 'float64', 'int32', 'int64'], - 'sequence_slice') - check_variable_and_dtype(offset, 'offset', ['int32', 'int64'], - 'sequence_slice') - check_variable_and_dtype(length, 'length', ['int32', 'int64'], - 'sequence_slice') + check_variable_and_dtype( + input, + 'input', + ['float32', 'float64', 'int32', 'int64'], + 'sequence_slice', + ) + check_variable_and_dtype( + offset, 'offset', ['int32', 'int64'], 'sequence_slice' + ) + check_variable_and_dtype( + length, 'length', ['int32', 'int64'], 'sequence_slice' + ) dtype = helper.input_dtype() out = helper.create_variable_for_type_inference(dtype) @@ -633,13 +663,11 @@ def sequence_slice(input, offset, length, name=None): offset.stop_gradient = True length.stop_gradient = True - helper.append_op(type="sequence_slice", - inputs={ - "X": input, - "Offset": offset, - "Length": length - }, - outputs={"Out": out}) + helper.append_op( + type="sequence_slice", + inputs={"X": input, "Offset": offset, "Length": length}, + outputs={"Out": out}, + ) return out @@ -765,20 +793,21 @@ def sequence_expand(x, y, ref_level=-1, name=None): # dtype: float # data: [1 2 1 2 3 4 3 4] """ - assert not _non_static_mode(), ( - "sequence layer is not supported in dygraph mode yet.") - check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], - 'sequence_expand') + assert ( + not _non_static_mode() + ), "sequence layer is not supported in dygraph mode yet." + check_variable_and_dtype( + x, 'x', ['float32', 'float64', 'int32', 'int64'], 'sequence_expand' + ) helper = LayerHelper('sequence_expand', **locals()) dtype = helper.input_dtype(input_param_name='x') tmp = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='sequence_expand', - inputs={ - 'X': x, - 'Y': y - }, - outputs={'Out': tmp}, - attrs={'ref_level': ref_level}) + helper.append_op( + type='sequence_expand', + inputs={'X': x, 'Y': y}, + outputs={'Out': tmp}, + attrs={'ref_level': ref_level}, + ) return tmp @@ -886,20 +915,19 @@ def sequence_expand_as(x, y, name=None): # dtype: float # data: [1 1 1 2 2 2 3 4] """ - assert not _non_static_mode(), ( - "sequence layer is not supported in dygraph mode yet.") - check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], - 'sequence_expand_as') + assert ( + not _non_static_mode() + ), "sequence layer is not supported in dygraph mode yet." + check_variable_and_dtype( + x, 'x', ['float32', 'float64', 'int32', 'int64'], 'sequence_expand_as' + ) check_type(y, 'y', Variable, 'sequence_expand_as') helper = LayerHelper('sequence_expand_as', **locals()) dtype = helper.input_dtype(input_param_name='x') tmp = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='sequence_expand_as', - inputs={ - 'X': x, - 'Y': y - }, - outputs={'Out': tmp}) + helper.append_op( + type='sequence_expand_as', inputs={'X': x, 'Y': y}, outputs={'Out': tmp} + ) return tmp @@ -990,14 +1018,22 @@ def sequence_pad(x, pad_value, maxlen=None, name=None): out = paddle.static.nn.sequence_pad(x=x, pad_value=pad_value) """ - assert not _non_static_mode(), ( - "sequence layer is not supported in dygraph mode yet.") + assert ( + not _non_static_mode() + ), "sequence layer is not supported in dygraph mode yet." helper = LayerHelper('sequence_pad', **locals()) - check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], - 'fluid.layers.sequence_pad') - check_variable_and_dtype(pad_value, 'pad_value', - ['float32', 'float64', 'int32', 'int64'], - 'fluid.layers.sequence_pad') + check_variable_and_dtype( + x, + 'x', + ['float32', 'float64', 'int32', 'int64'], + 'fluid.layers.sequence_pad', + ) + check_variable_and_dtype( + pad_value, + 'pad_value', + ['float32', 'float64', 'int32', 'int64'], + 'fluid.layers.sequence_pad', + ) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) length = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) @@ -1007,16 +1043,12 @@ def sequence_pad(x, pad_value, maxlen=None, name=None): if maxlen is None: maxlen = -1 - helper.append_op(type='sequence_pad', - inputs={ - 'X': x, - 'PadValue': pad_value - }, - outputs={ - 'Out': out, - 'Length': length - }, - attrs={'padded_length': maxlen}) + helper.append_op( + type='sequence_pad', + inputs={'X': x, 'PadValue': pad_value}, + outputs={'Out': out, 'Length': length}, + attrs={'padded_length': maxlen}, + ) return out, length @@ -1030,22 +1062,22 @@ def sequence_unpad(x, length, name=None): .. code-block:: text - Case 1: + Case 1: - Given input Variable **x**: - x.data = [[ 1.0, 2.0, 3.0, 4.0, 5.0], - [ 6.0, 7.0, 8.0, 9.0, 10.0], - [11.0, 12.0, 13.0, 14.0, 15.0]], + Given input Variable **x**: + x.data = [[ 1.0, 2.0, 3.0, 4.0, 5.0], + [ 6.0, 7.0, 8.0, 9.0, 10.0], + [11.0, 12.0, 13.0, 14.0, 15.0]], - in which there are 3 sequences padded to length 5, and the actual length - specified by input Variable **length**: + in which there are 3 sequences padded to length 5, and the actual length + specified by input Variable **length**: - length.data = [2, 3, 4], + length.data = [2, 3, 4], - after unpadding, the output Variable will be: + after unpadding, the output Variable will be: - out.data = [[1.0, 2.0, 6.0, 7.0, 8.0, 11.0, 12.0, 13.0, 14.0]] - out.lod = [[0, 2, 5, 9]] + out.data = [[1.0, 2.0, 6.0, 7.0, 8.0, 11.0, 12.0, 13.0, 14.0]] + out.lod = [[0, 2, 5, 9]] Args: x(Variable): A Tensor which contains padding data, and its shape size can not be less than 2. @@ -1075,24 +1107,29 @@ def sequence_unpad(x, length, name=None): unpad_data = paddle.static.nn.sequence_unpad(x=pad_data, length=len) """ - assert not _non_static_mode(), ( - "sequence layer is not supported in dygraph mode yet.") + assert ( + not _non_static_mode() + ), "sequence layer is not supported in dygraph mode yet." helper = LayerHelper('sequence_unpad', **locals()) - check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], - 'fluid.layers.sequence_unpad') - check_variable_and_dtype(length, 'length', ['int64'], - 'fluid.layers.sequence_unpad') + check_variable_and_dtype( + x, + 'x', + ['float32', 'float64', 'int32', 'int64'], + 'fluid.layers.sequence_unpad', + ) + check_variable_and_dtype( + length, 'length', ['int64'], 'fluid.layers.sequence_unpad' + ) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) length.stop_gradient = True - helper.append_op(type='sequence_unpad', - inputs={ - 'X': x, - 'Length': length - }, - outputs={'Out': out}) + helper.append_op( + type='sequence_unpad', + inputs={'X': x, 'Length': length}, + outputs={'Out': out}, + ) return out @@ -1145,17 +1182,23 @@ def sequence_reshape(input, new_dim): x = paddle.static.data(name='x', shape=[None, 16], dtype='float32', lod_level=1) x_reshaped = paddle.static.nn.sequence_reshape(input=x, new_dim=4) """ - assert not _non_static_mode(), ( - "sequence layer is not supported in dygraph mode yet.") + assert ( + not _non_static_mode() + ), "sequence layer is not supported in dygraph mode yet." helper = LayerHelper('sequence_reshape', **locals()) - check_variable_and_dtype(input, 'input', - ['float32', 'float64', 'int32', 'int64'], - 'fluid.layers.sequence_reshape') + check_variable_and_dtype( + input, + 'input', + ['float32', 'float64', 'int32', 'int64'], + 'fluid.layers.sequence_reshape', + ) out = helper.create_variable_for_type_inference(helper.input_dtype()) - helper.append_op(type='sequence_reshape', - inputs={'X': [input]}, - outputs={'Out': [out]}, - attrs={'new_dim': new_dim}) + helper.append_op( + type='sequence_reshape', + inputs={'X': [input]}, + outputs={'Out': [out]}, + attrs={'new_dim': new_dim}, + ) return out @@ -1224,28 +1267,34 @@ def sequence_scatter(input, index, updates, name=None): output = paddle.static.nn.sequence_scatter(input, index, updates) """ - assert not _non_static_mode(), ( - "sequence layer is not supported in dygraph mode yet.") + assert ( + not _non_static_mode() + ), "sequence layer is not supported in dygraph mode yet." helper = LayerHelper('sequence_scatter', **locals()) - check_variable_and_dtype(input, 'input', - ['float32', 'float64', 'int32', 'int64'], - 'sequence_scatter') - check_variable_and_dtype(index, 'index', ['int32', 'int64'], - 'sequence_scatter') - check_variable_and_dtype(updates, 'updates', - ['float32', 'float64', 'int32', 'int64'], - 'sequence_scatter') + check_variable_and_dtype( + input, + 'input', + ['float32', 'float64', 'int32', 'int64'], + 'sequence_scatter', + ) + check_variable_and_dtype( + index, 'index', ['int32', 'int64'], 'sequence_scatter' + ) + check_variable_and_dtype( + updates, + 'updates', + ['float32', 'float64', 'int32', 'int64'], + 'sequence_scatter', + ) dtype = helper.input_dtype() out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type="sequence_scatter", - inputs={ - "X": input, - "Ids": index, - "Updates": updates - }, - outputs={"Out": out}) + helper.append_op( + type="sequence_scatter", + inputs={"X": input, "Ids": index, "Updates": updates}, + outputs={"Out": out}, + ) return out @@ -1300,20 +1349,22 @@ def sequence_enumerate(input, win_size, pad_value=0, name=None): x = paddle.static.data(name='x', shape=[-1, 1], dtype='int32', lod_level=1) out = paddle.static.nn.sequence_enumerate(input=x, win_size=3, pad_value=0) """ - assert not _non_static_mode(), ( - "sequence layer is not supported in dygraph mode yet.") - check_variable_and_dtype(input, 'input', ['int32', 'int64'], - 'sequence_enumerate') + assert ( + not _non_static_mode() + ), "sequence layer is not supported in dygraph mode yet." + check_variable_and_dtype( + input, 'input', ['int32', 'int64'], 'sequence_enumerate' + ) helper = LayerHelper('sequence_enumerate', **locals()) - out = helper.create_variable_for_type_inference(helper.input_dtype(), - stop_gradient=True) - helper.append_op(type='sequence_enumerate', - inputs={'X': input}, - outputs={'Out': out}, - attrs={ - 'win_size': win_size, - 'pad_value': pad_value - }) + out = helper.create_variable_for_type_inference( + helper.input_dtype(), stop_gradient=True + ) + helper.append_op( + type='sequence_enumerate', + inputs={'X': input}, + outputs={'Out': out}, + attrs={'win_size': win_size, 'pad_value': pad_value}, + ) return out @@ -1427,16 +1478,22 @@ def sequence_reverse(x, name=None): x = paddle.static.data(name='x', shape=[None, 10], dtype='float32', lod_level=1) x_reversed = paddle.static.nn.sequence_reverse(x) """ - assert not _non_static_mode(), ( - "sequence layer is not supported in dygraph mode yet.") + assert ( + not _non_static_mode() + ), "sequence layer is not supported in dygraph mode yet." helper = LayerHelper("sequence_reverse", **locals()) - check_variable_and_dtype(x, 'x', - ['float32', 'float64', 'int8', 'int32', 'int64'], - 'fluid.layers.sequence_reverse') + check_variable_and_dtype( + x, + 'x', + ['float32', 'float64', 'int8', 'int32', 'int64'], + 'fluid.layers.sequence_reverse', + ) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type="sequence_reverse", - inputs={"X": x}, - outputs={"Y": out}, - attrs=dict()) + helper.append_op( + type="sequence_reverse", + inputs={"X": x}, + outputs={"Y": out}, + attrs=dict(), + ) return out diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index 5e82bef749bb332d9fda08fbcf35ab774f793491..a282fae027f24035b11cd9568cbf092427d5679e 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -19,14 +19,28 @@ import warnings from ..layer_helper import LayerHelper from ..param_attr import ParamAttr from ..initializer import Initializer -from ..framework import _current_expected_place, convert_np_dtype_to_dtype_, _non_static_mode, _varbase_creator, device_guard, _in_legacy_dygraph, in_dygraph_mode, _get_paddle_place +from ..framework import ( + _current_expected_place, + convert_np_dtype_to_dtype_, + _non_static_mode, + _varbase_creator, + device_guard, + _in_legacy_dygraph, + in_dygraph_mode, + _get_paddle_place, +) from ..framework import Variable from ..initializer import Constant from ..core import VarDesc from .. import core from .layer_function_generator import templatedoc from . import utils -from ..data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype +from ..data_feeder import ( + check_variable_and_dtype, + check_type, + check_dtype, + convert_dtype, +) from paddle.utils import deprecated from .utils import check_shape @@ -83,24 +97,32 @@ def create_tensor(dtype, name=None, persistable=False): import paddle.fluid as fluid tensor = fluid.layers.create_tensor(dtype='float32') """ - check_dtype(dtype, 'dtype', [ - 'bool', 'float16', 'float32', 'float64', 'int8', 'int32', 'int32', - 'int64' - ], 'create_tensor') + check_dtype( + dtype, + 'dtype', + [ + 'bool', + 'float16', + 'float32', + 'float64', + 'int8', + 'int32', + 'int32', + 'int64', + ], + 'create_tensor', + ) helper = LayerHelper("create_tensor", **locals()) - return helper.create_variable(name=helper.name, - dtype=dtype, - persistable=persistable) + return helper.create_variable( + name=helper.name, dtype=dtype, persistable=persistable + ) -def create_parameter(shape, - dtype, - name=None, - attr=None, - is_bias=False, - default_initializer=None): +def create_parameter( + shape, dtype, name=None, attr=None, is_bias=False, default_initializer=None +): """ - :api_attr: Static Graph + :api_attr: Static Graph This function creates a parameter. The parameter is a learnable variable, which can have gradient, and can be optimized. @@ -132,31 +154,55 @@ def create_parameter(shape, """ check_type(shape, 'shape', (list, tuple, numpy.ndarray), 'create_parameter') for item in shape: - check_type(item, 'item of shape', - (int, numpy.uint8, numpy.int8, numpy.int16, numpy.int32, - numpy.int64), 'create_parameter') - - check_dtype(dtype, 'dtype', [ - 'bool', 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', - 'int64', 'uint8' - ], 'create_parameter') + check_type( + item, + 'item of shape', + ( + int, + numpy.uint8, + numpy.int8, + numpy.int16, + numpy.int32, + numpy.int64, + ), + 'create_parameter', + ) + + check_dtype( + dtype, + 'dtype', + [ + 'bool', + 'float16', + 'float32', + 'float64', + 'int8', + 'int16', + 'int32', + 'int64', + 'uint8', + ], + 'create_parameter', + ) check_type(attr, 'attr', (type(None), ParamAttr), 'create_parameter') - check_type(default_initializer, 'default_initializer', - (type(None), Initializer), 'create_parameter') + check_type( + default_initializer, + 'default_initializer', + (type(None), Initializer), + 'create_parameter', + ) helper = LayerHelper("create_parameter", **locals()) if attr is None: attr = ParamAttr(name=name) - return helper.create_parameter(attr, shape, convert_dtype(dtype), is_bias, - default_initializer) + return helper.create_parameter( + attr, shape, convert_dtype(dtype), is_bias, default_initializer + ) -def create_global_var(shape, - value, - dtype, - persistable=False, - force_cpu=False, - name=None): +def create_global_var( + shape, value, dtype, persistable=False, force_cpu=False, name=None +): """ This function creates a new tensor variable with value in the global block(block 0). @@ -183,35 +229,53 @@ def create_global_var(shape, var = paddle.static.create_global_var(shape=[2,3], value=1.0, dtype='float32', persistable=True, force_cpu=True, name='new_var') """ - check_type(shape, 'shape', (list, tuple, numpy.ndarray), - 'create_global_var') + check_type( + shape, 'shape', (list, tuple, numpy.ndarray), 'create_global_var' + ) for item in shape: - check_type(item, 'item of shape', - (int, numpy.uint8, numpy.int8, numpy.int16, numpy.int32, - numpy.int64), 'create_global_var') - - check_dtype(dtype, 'dtype', [ - 'bool', - 'float16', - 'float32', - 'float64', - 'int8', - 'int16', - 'int32', - 'int64', - 'uint8', - 'uint16', - ], 'create_global_var') + check_type( + item, + 'item of shape', + ( + int, + numpy.uint8, + numpy.int8, + numpy.int16, + numpy.int32, + numpy.int64, + ), + 'create_global_var', + ) + + check_dtype( + dtype, + 'dtype', + [ + 'bool', + 'float16', + 'float32', + 'float64', + 'int8', + 'int16', + 'int32', + 'int64', + 'uint8', + 'uint16', + ], + 'create_global_var', + ) helper = LayerHelper("global_var", **locals()) - var = helper.create_global_variable(dtype=dtype, - shape=shape, - persistable=persistable, - name=name, - stop_gradient=True) - helper.set_variable_initializer(var, - initializer=Constant(value=float(value), - force_cpu=force_cpu)) + var = helper.create_global_variable( + dtype=dtype, + shape=shape, + persistable=persistable, + name=name, + stop_gradient=True, + ) + helper.set_variable_initializer( + var, initializer=Constant(value=float(value), force_cpu=force_cpu) + ) return var @@ -251,25 +315,50 @@ def cast(x, dtype): out = _legacy_C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype) return out - check_variable_and_dtype(x, 'x', [ - 'bool', 'float16', 'float32', 'float64', 'int16', 'int32', 'int64', - 'uint8', 'uint16' - ], 'cast') - check_dtype(dtype, 'dtype', [ - 'bool', 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', - 'int64', 'uint8', 'uint16' - ], 'cast') + check_variable_and_dtype( + x, + 'x', + [ + 'bool', + 'float16', + 'float32', + 'float64', + 'int16', + 'int32', + 'int64', + 'uint8', + 'uint16', + ], + 'cast', + ) + check_dtype( + dtype, + 'dtype', + [ + 'bool', + 'float16', + 'float32', + 'float64', + 'int8', + 'int16', + 'int32', + 'int64', + 'uint8', + 'uint16', + ], + 'cast', + ) helper = LayerHelper('cast', **locals()) out = helper.create_variable_for_type_inference( - dtype=dtype, stop_gradient=x.stop_gradient) - helper.append_op(type='cast', - inputs={'X': [x]}, - outputs={'Out': [out]}, - attrs={ - 'in_dtype': x.dtype, - 'out_dtype': out.dtype - }) + dtype=dtype, stop_gradient=x.stop_gradient + ) + helper.append_op( + type='cast', + inputs={'X': [x]}, + outputs={'Out': [out]}, + attrs={'in_dtype': x.dtype, 'out_dtype': out.dtype}, + ) return out @@ -344,9 +433,11 @@ def concat(input, axis=0, name=None): if not isinstance(input, Variable): for id, x in enumerate(input): check_variable_and_dtype( - x, 'input[' + str(id) + ']', + x, + 'input[' + str(id) + ']', ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], - 'concat') + 'concat', + ) if x.dtype != input[0].dtype: raise TypeError( "All the Tensors in the input must have the same data type." @@ -357,8 +448,11 @@ def concat(input, axis=0, name=None): if isinstance(axis, Variable): check_dtype( - axis.dtype, 'axis', ['int32', 'int64'], 'concat', - "The data type of axis must be int32 or int64 when axis is a Tensor" + axis.dtype, + 'axis', + ['int32', 'int64'], + 'concat', + "The data type of axis must be int32 or int64 when axis is a Tensor", ) helper = LayerHelper('concat', **locals()) @@ -369,19 +463,17 @@ def concat(input, axis=0, name=None): # This feature is supported for Dynamic-to-Static, because after transformed, the type of inputs[0] # is LOD_TENSOR_ARRAY in some scenarios. And this feature can be used in static mode. - assert len(input) == 1, "If the elements of 'input' in concat are Variable(LoDTensorArray), " \ - "number of the elements must be 1, but received %s." % len(input) + assert len(input) == 1, ( + "If the elements of 'input' in concat are Variable(LoDTensorArray), " + "number of the elements must be 1, but received %s." % len(input) + ) out_index = helper.create_variable_for_type_inference(dtype="int32") - helper.append_op(type='tensor_array_to_tensor', - inputs={'X': input[0]}, - outputs={ - 'Out': [out], - 'OutIndex': [out_index] - }, - attrs={ - 'axis': axis, - 'use_stack': False - }) + helper.append_op( + type='tensor_array_to_tensor', + inputs={'X': input[0]}, + outputs={'Out': [out], 'OutIndex': [out_index]}, + attrs={'axis': axis, 'use_stack': False}, + ) else: inputs = {'X': input} attrs = {} @@ -389,10 +481,9 @@ def concat(input, axis=0, name=None): axis.stop_gradient = True attrs['axis'] = axis - helper.append_op(type='concat', - inputs=inputs, - outputs={'Out': [out]}, - attrs=attrs) + helper.append_op( + type='concat', inputs=inputs, outputs={'Out': [out]}, attrs=attrs + ) return out @@ -478,33 +569,36 @@ def tensor_array_to_tensor(input, axis=1, name=None, use_stack=False): """ if _non_static_mode(): assert isinstance( - input, list), "The 'input' in tensor_array_to_tensor must be list" + input, list + ), "The 'input' in tensor_array_to_tensor must be list" from .nn import stack, concat from ..dygraph import to_variable + op = stack if use_stack else concat res = op(input, axis=axis) sizes = to_variable( - numpy.array(list(map(lambda x: int(x.shape[axis]), input)))) + numpy.array(list(map(lambda x: int(x.shape[axis]), input))) + ) return res, sizes check_type(input, 'input', (list, Variable), 'tensor_array_to_tensor') if isinstance(input, list): for i, input_x in enumerate(input): - check_type(input_x, 'input[' + str(i) + ']', Variable, - 'tensor_array_to_tensor') + check_type( + input_x, + 'input[' + str(i) + ']', + Variable, + 'tensor_array_to_tensor', + ) helper = LayerHelper('tensor_array_to_tensor', **locals()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) out_index = helper.create_variable_for_type_inference(dtype="int32") - helper.append_op(type='tensor_array_to_tensor', - inputs={'X': input}, - outputs={ - 'Out': [out], - 'OutIndex': [out_index] - }, - attrs={ - 'axis': axis, - 'use_stack': use_stack - }) + helper.append_op( + type='tensor_array_to_tensor', + inputs={'X': input}, + outputs={'Out': [out], 'OutIndex': [out_index]}, + attrs={'axis': axis, 'use_stack': use_stack}, + ) return out, out_index @@ -561,25 +655,36 @@ def sums(input, out=None): check_type(input, 'input', (Variable, tuple, list), 'sums') if isinstance(input, list) or isinstance(input, tuple): for input_section in input: - check_variable_and_dtype(input_section, "input", \ - ['float16', 'float32', 'float64', 'int32', 'int64'], 'sums') + check_variable_and_dtype( + input_section, + "input", + ['float16', 'float32', 'float64', 'int32', 'int64'], + 'sums', + ) else: - check_variable_and_dtype(input, "input", \ - ['float16', 'float32', 'float64', 'int32', 'int64'], 'sums') + check_variable_and_dtype( + input, + "input", + ['float16', 'float32', 'float64', 'int32', 'int64'], + 'sums', + ) helper = LayerHelper('sum', **locals()) if out is None: out = helper.create_variable_for_type_inference( - dtype=helper.input_dtype()) + dtype=helper.input_dtype() + ) else: - check_variable_and_dtype(out, "out", - ['float32', 'float64', 'int32', 'int64'], - 'sums') - - helper.append_op(type='sum', - inputs={'X': input}, - outputs={'Out': out}, - attrs={'use_mkldnn': False}) + check_variable_and_dtype( + out, "out", ['float32', 'float64', 'int32', 'int64'], 'sums' + ) + + helper.append_op( + type='sum', + inputs={'X': input}, + outputs={'Out': out}, + attrs={'use_mkldnn': False}, + ) return out @@ -614,9 +719,12 @@ def assign(input, output=None): result3 = paddle.assign(np.array([[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]], dtype='float32')) # result3 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]] """ helper = LayerHelper('assign', **locals()) - check_type(input, 'input', - (Variable, numpy.ndarray, list, tuple, float, int, bool), - 'assign') + check_type( + input, + 'input', + (Variable, numpy.ndarray, list, tuple, float, int, bool), + 'assign', + ) is_inplace = True if output is not None else False if numpy.isscalar(input) and not isinstance(input, str): @@ -642,16 +750,29 @@ def assign(input, output=None): output = core.eager.Tensor() _legacy_C_ops.assign(input, output) else: - check_dtype(input.dtype, 'input', [ - 'float16', 'uint16', 'float32', 'float64', 'int32', 'int64', - 'uint8', 'bool' - ], 'assign', '(When the type of input in assign is Variable.)') + check_dtype( + input.dtype, + 'input', + [ + 'float16', + 'uint16', + 'float32', + 'float64', + 'int32', + 'int64', + 'uint8', + 'bool', + ], + 'assign', + '(When the type of input in assign is Variable.)', + ) if output is None: output = helper.create_variable_for_type_inference( - dtype=input.dtype) - helper.append_op(type='assign', - inputs={'X': [input]}, - outputs={'Out': [output]}) + dtype=input.dtype + ) + helper.append_op( + type='assign', inputs={'X': [input]}, outputs={'Out': [output]} + ) elif isinstance(input, numpy.ndarray): # Not support [var, var, ...] currently. if len(input.shape) > 0 and any(isinstance(x, Variable) for x in input): @@ -665,7 +786,8 @@ def assign(input, output=None): warnings.warn( "paddle.assign doesn't support float64 input now due " "to current platform protobuf data limitation, we convert " - "it to float32") + "it to float32" + ) dtype = VarDesc.VarType.FP32 if dtype == VarDesc.VarType.BOOL: value_name = "bool_values" @@ -683,31 +805,49 @@ def assign(input, output=None): raise TypeError( "When the type of 'input' in assign is numpy.ndarray, " "the data type of 'input' must be bool, float32, int32 or int64, but " - "received %s." % convert_dtype(dtype)) + "received %s." % convert_dtype(dtype) + ) if input.size > 1024 * 1024: - raise ValueError("The size of input is too big. Please consider " - "saving it to file and 'load_op' to load it") + raise ValueError( + "The size of input is too big. Please consider " + "saving it to file and 'load_op' to load it" + ) if in_dygraph_mode(): if output is None: output = zeros(list(input.shape), dtype) - _C_ops.assign_value_(output, list(input.shape), dtype, values, - _current_expected_place()) + _C_ops.assign_value_( + output, + list(input.shape), + dtype, + values, + _current_expected_place(), + ) elif _in_legacy_dygraph(): if output is None: output = core.VarBase() - _legacy_C_ops.assign_value(output, 'shape', list(input.shape), - 'dtype', dtype, value_name, values) + _legacy_C_ops.assign_value( + output, + 'shape', + list(input.shape), + 'dtype', + dtype, + value_name, + values, + ) else: if output is None: output = helper.create_variable_for_type_inference( - dtype=input.dtype) - helper.append_op(type='assign_value', - outputs={'Out': [output]}, - attrs={ - 'dtype': dtype, - 'shape': list(input.shape), - value_name: values - }) + dtype=input.dtype + ) + helper.append_op( + type='assign_value', + outputs={'Out': [output]}, + attrs={ + 'dtype': dtype, + 'shape': list(input.shape), + value_name: values, + }, + ) if is_inplace and _non_static_mode(): output._bump_inplace_version() @@ -783,7 +923,11 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None): shape = list( map( lambda x: x.numpy().flat[0] - if isinstance(x, Variable) else x, shape)) + if isinstance(x, Variable) + else x, + shape, + ) + ) break if not isinstance(dtype, core.VarDesc.VarType): @@ -811,9 +955,19 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None): else: attrs['str_value'] = str(float(value.numpy().item(0))) - _legacy_C_ops.fill_constant(out, 'value', float(value), 'force_cpu', - force_cpu, 'dtype', out.dtype, 'str_value', - attrs['str_value'], 'shape', shape) + _legacy_C_ops.fill_constant( + out, + 'value', + float(value), + 'force_cpu', + force_cpu, + 'dtype', + out.dtype, + 'str_value', + attrs['str_value'], + 'shape', + shape, + ) out.stop_gradient = True return out @@ -825,43 +979,60 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None): inputs['ValueTensor'] = value check_shape(shape) - check_dtype(dtype, 'dtype', [ - 'bool', 'float16', 'float32', 'float64', 'uint8', 'int16', 'int32', - 'int64', 'complex64', 'complex128' - ], 'fill_constant') + check_dtype( + dtype, + 'dtype', + [ + 'bool', + 'float16', + 'float32', + 'float64', + 'uint8', + 'int16', + 'int32', + 'int64', + 'complex64', + 'complex128', + ], + 'fill_constant', + ) check_type(shape, 'shape', (Variable, list, tuple), 'fill_constant') if out is not None: - check_variable_and_dtype(out, 'out', [convert_dtype(dtype)], - 'fill_constant') + check_variable_and_dtype( + out, 'out', [convert_dtype(dtype)], 'fill_constant' + ) helper = LayerHelper("fill_constant", **locals()) - utils.get_shape_tensor_inputs(inputs=inputs, - attrs=attrs, - shape=shape, - op_type='fill_constant') + utils.get_shape_tensor_inputs( + inputs=inputs, attrs=attrs, shape=shape, op_type='fill_constant' + ) if out is None: out = helper.create_variable_for_type_inference(dtype=dtype) attrs['dtype'] = out.dtype - helper.append_op(type='fill_constant', - inputs=inputs, - outputs={'Out': [out]}, - attrs=attrs, - stop_gradient=True) + helper.append_op( + type='fill_constant', + inputs=inputs, + outputs={'Out': [out]}, + attrs=attrs, + stop_gradient=True, + ) out.stop_gradient = True return out @deprecated(since='1.8.0', update_to="paddle.fluid.layers.fill_constant") @templatedoc() -def fill_constant_batch_size_like(input, - shape, - dtype, - value, - input_dim_idx=0, - output_dim_idx=0, - force_cpu=False): +def fill_constant_batch_size_like( + input, + shape, + dtype, + value, + input_dim_idx=0, + output_dim_idx=0, + force_cpu=False, +): """ This OP creates a Tesnor according the shape and dtype, and initializes the Tensor with the constants provided in ``value``. When the input is LoDTensor @@ -903,8 +1074,9 @@ def fill_constant_batch_size_like(input, place = _current_expected_place() if force_cpu: place = core.CPUPlace() - out = _C_ops.full_batch_size_like(input, shape, dtype, value, - input_dim_idx, output_dim_idx, place) + out = _C_ops.full_batch_size_like( + input, shape, dtype, value, input_dim_idx, output_dim_idx, place + ) out.stop_gradient = True return out @@ -916,25 +1088,27 @@ def fill_constant_batch_size_like(input, 'value': float(value), 'input_dim_idx': input_dim_idx, 'output_dim_idx': output_dim_idx, - 'force_cpu': force_cpu + 'force_cpu': force_cpu, } if convert_dtype(dtype) in ['int64', 'int32']: attrs['str_value'] = str(int(value)) else: attrs['str_value'] = str(float(value)) - helper.append_op(type='fill_constant_batch_size_like', - inputs={'Input': input}, - outputs={'Out': [out]}, - attrs=attrs) + helper.append_op( + type='fill_constant_batch_size_like', + inputs={'Input': input}, + outputs={'Out': [out]}, + attrs=attrs, + ) out.stop_gradient = True return out def argmin(x, axis=0): """ - :alias_main: paddle.argmin - :alias: paddle.argmin,paddle.tensor.argmin,paddle.tensor.search.argmin - :old_api: paddle.fluid.layers.argmin + :alias_main: paddle.argmin + :alias: paddle.argmin,paddle.tensor.argmin,paddle.tensor.search.argmin + :old_api: paddle.fluid.layers.argmin **argmin** @@ -984,14 +1158,19 @@ def argmin(x, axis=0): # [1 0 2]] """ check_variable_and_dtype( - x, 'x', ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'], - 'argmin') + x, + 'x', + ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'], + 'argmin', + ) helper = LayerHelper("arg_min", **locals()) out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) - helper.append_op(type='arg_min', - inputs={'X': x}, - outputs={'Out': [out]}, - attrs={'axis': axis}) + helper.append_op( + type='arg_min', + inputs={'X': x}, + outputs={'Out': [out]}, + attrs={'axis': axis}, + ) out.stop_gradient = True return out @@ -1046,23 +1225,28 @@ def argmax(x, axis=0): # [0 3 1]] """ check_variable_and_dtype( - x, 'x', ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'], - 'argmax') + x, + 'x', + ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'], + 'argmax', + ) helper = LayerHelper("arg_max", **locals()) out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) - helper.append_op(type='arg_max', - inputs={'X': x}, - outputs={'Out': [out]}, - attrs={'axis': axis}) + helper.append_op( + type='arg_max', + inputs={'X': x}, + outputs={'Out': [out]}, + attrs={'axis': axis}, + ) out.stop_gradient = True return out def argsort(input, axis=-1, descending=False, name=None): """ - :alias_main: paddle.argsort - :alias: paddle.argsort,paddle.tensor.argsort,paddle.tensor.search.argsort - :old_api: paddle.fluid.layers.argsort + :alias_main: paddle.argsort + :alias: paddle.argsort,paddle.tensor.argsort,paddle.tensor.search.argsort + :old_api: paddle.fluid.layers.argsort This OP sorts the input along the given axis, and returns sorted output data Varibale and its corresponding index Variable with the same shape as @@ -1133,23 +1317,24 @@ def argsort(input, axis=-1, descending=False, name=None): # [5. 7. 7. 9.]]] """ check_variable_and_dtype( - input, 'input', - ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'], 'argsort') + input, + 'input', + ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'], + 'argsort', + ) helper = LayerHelper("argsort", **locals()) - out = helper.create_variable_for_type_inference(dtype=input.dtype, - stop_gradient=True) - ids = helper.create_variable_for_type_inference(VarDesc.VarType.INT64, - stop_gradient=True) - helper.append_op(type='argsort', - inputs={'X': input}, - outputs={ - 'Out': out, - 'Indices': ids - }, - attrs={ - 'axis': axis, - 'descending': descending - }) + out = helper.create_variable_for_type_inference( + dtype=input.dtype, stop_gradient=True + ) + ids = helper.create_variable_for_type_inference( + VarDesc.VarType.INT64, stop_gradient=True + ) + helper.append_op( + type='argsort', + inputs={'X': input}, + outputs={'Out': out, 'Indices': ids}, + attrs={'axis': axis, 'descending': descending}, + ) return out, ids @@ -1215,9 +1400,9 @@ def zeros(shape, dtype, force_cpu=False, name=None): def reverse(x, axis): """ - :alias_main: paddle.reverse - :alias: paddle.reverse,paddle.tensor.reverse,paddle.tensor.manipulation.reverse - :old_api: paddle.fluid.layers.reverse + :alias_main: paddle.reverse + :alias: paddle.reverse,paddle.tensor.reverse,paddle.tensor.manipulation.reverse + :old_api: paddle.fluid.layers.reverse The OP reverses the tensor :attr:`x` along the given :attr:`axis`. @@ -1275,9 +1460,9 @@ def reverse(x, axis): reversed_tensor_array = fluid.layers.reverse(tensor_array, 0) # {[[3, 4, 5]], [[0, 1, 2]]} """ - check_variable_and_dtype(x, 'x', - ('float32', 'float64', 'int32', 'int64', 'uint8'), - 'reverse') + check_variable_and_dtype( + x, 'x', ('float32', 'float64', 'int32', 'int64', 'uint8'), 'reverse' + ) check_type(axis, 'axis', (int, tuple, list, Variable), 'reverse') if isinstance(axis, int): axis = [axis] @@ -1285,10 +1470,12 @@ def reverse(x, axis): return _C_ops.reverse(x, axis) helper = LayerHelper("reverse", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='reverse', - inputs={'X': x}, - outputs={'Out': [out]}, - attrs={'axis': axis}) + helper.append_op( + type='reverse', + inputs={'X': x}, + outputs={'Out': [out]}, + attrs={'axis': axis}, + ) return out @@ -1304,13 +1491,12 @@ def save(x, file_path, overwrite=True): error will be thrown. """ helper = LayerHelper("save", **locals()) - helper.append_op(type="save", - inputs={"input": x}, - outputs={}, - args={ - "file_path": file_path, - "overwrite": overwrite - }) + helper.append_op( + type="save", + inputs={"input": x}, + outputs={}, + args={"file_path": file_path, "overwrite": overwrite}, + ) def save_combine(x, file_path, overwrite=True): @@ -1342,13 +1528,12 @@ def save_combine(x, file_path, overwrite=True): normed = fluid.layers.save_combine([v1, v2], file_path="output") """ helper = LayerHelper("save_combine", **locals()) - helper.append_op(type="save_combine", - inputs={"input": x}, - outputs={}, - args={ - "file_path": file_path, - "overwrite": overwrite - }) + helper.append_op( + type="save_combine", + inputs={"input": x}, + outputs={}, + args={"file_path": file_path, "overwrite": overwrite}, + ) def load_combine(out, file_path): @@ -1360,10 +1545,12 @@ def load_combine(out, file_path): file_path(str): The path of the disk file. """ helper = LayerHelper("load_combine", **locals()) - helper.append_op(type="load_combine", - inputs={}, - output={"Out": out}, - args={"file_path": file_path}) + helper.append_op( + type="load_combine", + inputs={}, + output={"Out": out}, + args={"file_path": file_path}, + ) def has_inf(x): @@ -1447,8 +1634,9 @@ def isfinite(x): print(y) """ - check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"], - "isfinite") + check_variable_and_dtype( + x, "x", ["float32", "float64", "int32", "int64"], "isfinite" + ) helper = LayerHelper("isfinite", **locals()) out = helper.create_variable_for_type_inference(dtype='bool') @@ -1506,8 +1694,11 @@ def range(start, end, step, dtype, name=None): """ out_shape = None - if not isinstance(start, Variable) and not isinstance( - end, Variable) and not isinstance(step, Variable): + if ( + not isinstance(start, Variable) + and not isinstance(end, Variable) + and not isinstance(step, Variable) + ): out_shape = [int(math.ceil((end - start) / step))] if not isinstance(dtype, core.VarDesc.VarType): @@ -1539,17 +1730,16 @@ def range(start, end, step, dtype, name=None): out.stop_gradient = True return out - check_dtype(dtype, 'dtype', ['float32', 'float64', 'int32', 'int64'], - 'range/arange') + check_dtype( + dtype, 'dtype', ['float32', 'float64', 'int32', 'int64'], 'range/arange' + ) helper = LayerHelper('range', **locals()) out = helper.create_variable_for_type_inference(dtype, shape=out_shape) - helper.append_op(type='range', - inputs={ - 'Start': start, - 'End': end, - 'Step': step - }, - outputs={'Out': out}) + helper.append_op( + type='range', + inputs={'Start': start, 'End': end, 'Step': step}, + outputs={'Out': out}, + ) out.stop_gradient = True if out_shape is not None: out.desc.set_shape(out_shape) @@ -1604,52 +1794,70 @@ def linspace(start, stop, num, dtype=None, name=None): with device_guard("cpu"): tensor_num = fill_constant([1], 'int32', num) if in_dygraph_mode(): - return _C_ops.linspace(tensor_start, tensor_stop, tensor_num, dtype, - _current_expected_place()) + return _C_ops.linspace( + tensor_start, + tensor_stop, + tensor_num, + dtype, + _current_expected_place(), + ) if _in_legacy_dygraph(): - return _legacy_C_ops.linspace(tensor_start, tensor_stop, tensor_num, - 'dtype', dtype) + return _legacy_C_ops.linspace( + tensor_start, tensor_stop, tensor_num, 'dtype', dtype + ) helper = LayerHelper("linspace", **locals()) start_dtype = convert_dtype(tensor_start.dtype) stop_dtype = convert_dtype(tensor_stop.dtype) out_dtype = convert_dtype(dtype) if isinstance(start, Variable): - check_dtype(start.dtype, 'start', - ['float32', 'float64', 'int32', 'int64'], 'linspace') + check_dtype( + start.dtype, + 'start', + ['float32', 'float64', 'int32', 'int64'], + 'linspace', + ) else: check_type(start, 'start', (int, float), 'linspace') if isinstance(stop, Variable): - check_dtype(stop.dtype, 'stop', - ['float32', 'float64', 'int32', 'int64'], 'linspace') + check_dtype( + stop.dtype, + 'stop', + ['float32', 'float64', 'int32', 'int64'], + 'linspace', + ) else: check_type(stop, 'stop', (int, float), 'linspace') if isinstance(num, Variable): check_dtype(num.dtype, 'num', ['int32'], 'linspace') - check_dtype(dtype, 'dtype', ['int32', 'int64', 'float32', 'float64'], - 'linspace') - if ((stop_dtype == "float64" or start_dtype == "float64") - and out_dtype in ["float32", "int32"]) or ( - (stop_dtype == "int64" or start_dtype == "int64") - and out_dtype == "int32"): + check_dtype( + dtype, 'dtype', ['int32', 'int64', 'float32', 'float64'], 'linspace' + ) + if ( + (stop_dtype == "float64" or start_dtype == "float64") + and out_dtype in ["float32", "int32"] + ) or ( + (stop_dtype == "int64" or start_dtype == "int64") + and out_dtype == "int32" + ): raise ValueError( "The dtype of start/stop is {}/{} but the attr(dtype) of linspace is {}, " - "which may cause data type overflows. Please reset attr(dtype) of linspace." - .format(start_dtype, stop_dtype, dtype)) + "which may cause data type overflows. Please reset attr(dtype) of linspace.".format( + start_dtype, stop_dtype, dtype + ) + ) out = helper.create_variable_for_type_inference(dtype=dtype) - helper.append_op(type='linspace', - inputs={ - 'Start': tensor_start, - 'Stop': tensor_stop, - 'Num': tensor_num - }, - attrs={'dtype': dtype}, - outputs={'Out': [out]}) + helper.append_op( + type='linspace', + inputs={'Start': tensor_start, 'Stop': tensor_stop, 'Num': tensor_num}, + attrs={'dtype': dtype}, + outputs={'Out': [out]}, + ) if isinstance(num, int): - out.desc.set_shape((num, )) + out.desc.set_shape((num,)) return out @@ -1679,23 +1887,25 @@ def zeros_like(x, out=None): data = fluid.layers.zeros_like(x) # [0.0, 0.0, 0.0] """ - check_variable_and_dtype(x, "x", - ['bool', 'float32', 'float64', 'int32', 'int64'], - 'zeros_like') + check_variable_and_dtype( + x, "x", ['bool', 'float32', 'float64', 'int32', 'int64'], 'zeros_like' + ) helper = LayerHelper("zeros_like", **locals()) if out is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) else: check_variable_and_dtype( - out, "out", ['bool', 'float32', 'float64', 'int32', 'int64'], - 'zeros_like') - helper.append_op(type='fill_any_like', - inputs={'X': [x]}, - attrs={ - 'value': 0, - "dtype": x.dtype - }, - outputs={'Out': [out]}) + out, + "out", + ['bool', 'float32', 'float64', 'int32', 'int64'], + 'zeros_like', + ) + helper.append_op( + type='fill_any_like', + inputs={'X': [x]}, + attrs={'value': 0, "dtype": x.dtype}, + outputs={'Out': [out]}, + ) out.stop_gradient = True return out @@ -1732,8 +1942,12 @@ def diag(diagonal): """ check_type(diagonal, 'diagonal', (Variable, numpy.ndarray), 'diag') - check_dtype(diagonal.dtype, 'diagonal', - ['float32', 'float64', 'int32', 'int64'], 'diag') + check_dtype( + diagonal.dtype, + 'diagonal', + ['float32', 'float64', 'int32', 'int64'], + 'diag', + ) helper = LayerHelper("diag", **locals()) if not isinstance(diagonal, Variable): @@ -1741,19 +1955,17 @@ def diag(diagonal): out = helper.create_variable_for_type_inference(dtype=diagonal.dtype) - helper.append_op(type='diag', - inputs={'Diagonal': [diagonal]}, - outputs={'Out': [out]}) + helper.append_op( + type='diag', inputs={'Diagonal': [diagonal]}, outputs={'Out': [out]} + ) out.stop_gradient = True return out -def eye(num_rows, - num_columns=None, - batch_shape=None, - dtype='float32', - name=None): +def eye( + num_rows, num_columns=None, batch_shape=None, dtype='float32', name=None +): """ This function constructs a or a batch of 2-D tensor with ones on the diagonal and zeros elsewhere. @@ -1806,25 +2018,33 @@ def eye(num_rows, num_columns = num_rows if in_dygraph_mode(): - out = _C_ops.eye(num_rows, num_columns, dtype, - _current_expected_place()) + out = _C_ops.eye( + num_rows, num_columns, dtype, _current_expected_place() + ) elif _in_legacy_dygraph(): - out = _legacy_C_ops.eye('dtype', dtype, 'num_rows', num_rows, - 'num_columns', num_columns) + out = _legacy_C_ops.eye( + 'dtype', dtype, 'num_rows', num_rows, 'num_columns', num_columns + ) else: helper = LayerHelper("eye", **locals()) - check_dtype(dtype, 'dtype', - ['float16', 'float32', 'float64', 'int32', 'int64'], 'eye') + check_dtype( + dtype, + 'dtype', + ['float16', 'float32', 'float64', 'int32', 'int64'], + 'eye', + ) out = helper.create_variable_for_type_inference(dtype=dtype) - helper.append_op(type='eye', - inputs={}, - outputs={'Out': [out]}, - attrs={ - 'num_rows': num_rows, - 'num_columns': num_columns, - 'dtype': dtype - }, - stop_gradient=True) + helper.append_op( + type='eye', + inputs={}, + outputs={'Out': [out]}, + attrs={ + 'num_rows': num_rows, + 'num_columns': num_columns, + 'dtype': dtype, + }, + stop_gradient=True, + ) if batch_shape is not None: re_shape = [1] * len(batch_shape) @@ -1836,11 +2056,12 @@ def eye(num_rows, if not isinstance(batch_shape, list): raise TypeError("batch_shape should be a list") - for batch_val in (batch_shape): + for batch_val in batch_shape: if batch_val <= 0: raise TypeError("batch_shape should be a positive int list") from .nn import reshape, expand + out = reshape(x=out, shape=re_shape) out = expand(x=out, expand_times=expand_times) @@ -1871,25 +2092,31 @@ def ones_like(x, out=None): data = fluid.layers.ones_like(x) # [1.0, 1.0, 1.0] """ - check_variable_and_dtype(x, "x", - ['bool', 'float32', 'float64', 'int32', 'int64'], - 'ones_like') + check_variable_and_dtype( + x, "x", ['bool', 'float32', 'float64', 'int32', 'int64'], 'ones_like' + ) helper = LayerHelper("ones_like", **locals()) if out is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) else: check_variable_and_dtype( - out, "out", ['bool', 'float32', 'float64', 'int32', 'int64'], - 'ones_like') - helper.append_op(type='fill_any_like', - inputs={'X': [x]}, - attrs={'value': 1.0}, - outputs={'Out': [out]}) + out, + "out", + ['bool', 'float32', 'float64', 'int32', 'int64'], + 'ones_like', + ) + helper.append_op( + type='fill_any_like', + inputs={'X': [x]}, + attrs={'value': 1.0}, + outputs={'Out': [out]}, + ) return out @deprecated(since="2.0.0", update_to="paddle.triu") def triu(input, diagonal=0, name=None): import paddle + return paddle.tensor.triu(x=input, diagonal=diagonal, name=name) diff --git a/python/paddle/fluid/layers/utils.py b/python/paddle/fluid/layers/utils.py index 28d00161b26eb8840e0f4c142ee2676b473c951d..e96e105c141df977af53c94072b6b987268e399d 100644 --- a/python/paddle/fluid/layers/utils.py +++ b/python/paddle/fluid/layers/utils.py @@ -17,9 +17,15 @@ import copy import six import numpy as np from ..framework import Block, Variable, _non_static_mode -from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype +from ..data_feeder import ( + convert_dtype, + check_variable_and_dtype, + check_type, + check_dtype, +) from ..layer_helper import LayerHelper from sys import version_info + try: from collections.abc import Sequence except: @@ -54,25 +60,45 @@ def convert_to_list(value, n, name, dtype=int): try: value_list = list(value) except TypeError: - raise ValueError("The " + name + - "'s type must be list or tuple. Received: " + - str(value)) + raise ValueError( + "The " + + name + + "'s type must be list or tuple. Received: " + + str(value) + ) if len(value_list) != n: - raise ValueError("The " + name + "'s length must be " + str(n) + - ". Received: " + str(value)) + raise ValueError( + "The " + + name + + "'s length must be " + + str(n) + + ". Received: " + + str(value) + ) for single_value in value_list: - assert not isinstance( - single_value, Variable - ), "Required numerical type with '%s', but received Tensor." % dtype + assert not isinstance(single_value, Variable), ( + "Required numerical type with '%s', but received Tensor." + % dtype + ) try: dtype(single_value) except (ValueError, TypeError): - raise ValueError("The " + name + - "'s type must be a list or tuple of " + - str(n) + " " + str(dtype) + " . Received: " + - str(value) + " " - "including element " + str(single_value) + - " of type" + " " + str(type(single_value))) + raise ValueError( + "The " + + name + + "'s type must be a list or tuple of " + + str(n) + + " " + + str(dtype) + + " . Received: " + + str(value) + + " " + "including element " + + str(single_value) + + " of type" + + " " + + str(type(single_value)) + ) return value_list @@ -82,7 +108,7 @@ def is_sequence(seq): """ if isinstance(seq, dict): return True - return (isinstance(seq, Sequence) and not isinstance(seq, str)) + return isinstance(seq, Sequence) and not isinstance(seq, str) def _hash_with_id(*args): @@ -91,7 +117,7 @@ def _hash_with_id(*args): """ assert len(args) > 0 info = tuple([id(v) for v in args]) - return hash(info) & 0xfffffff + return hash(info) & 0xFFFFFFF def _sorted(dict_): @@ -136,9 +162,9 @@ def to_sequence(nest): def flatten(nest): """ - :alias_main: paddle.flatten - :alias: paddle.flatten,paddle.tensor.flatten,paddle.tensor.manipulation.flatten - :old_api: paddle.fluid.layers.flatten + :alias_main: paddle.flatten + :alias: paddle.flatten,paddle.tensor.flatten,paddle.tensor.manipulation.flatten + :old_api: paddle.fluid.layers.flatten Traverse all entries in the nested structure and put them into an list. """ @@ -160,9 +186,12 @@ def _sequence_like(instance, args): # corresponding `OrderedDict` to pack it back). result = dict(zip(_sorted(instance), args)) return type(instance)((key, result[key]) for key in instance.keys()) - elif (isinstance(instance, tuple) and hasattr(instance, "_fields") - and isinstance(instance._fields, Sequence) - and all(isinstance(f, str) for f in instance._fields)): + elif ( + isinstance(instance, tuple) + and hasattr(instance, "_fields") + and isinstance(instance._fields, Sequence) + and all(isinstance(f, str) for f in instance._fields) + ): # This is a namedtuple return type(instance)(*args) else: @@ -195,15 +224,22 @@ def pack_sequence_as(structure, flat_sequence): if not is_sequence(structure): if len(flat_sequence) != 1: raise ValueError( - "Structure is a scalar but len(flat_sequence) == %d > 1" % - len(flat_sequence)) + "Structure is a scalar but len(flat_sequence) == %d > 1" + % len(flat_sequence) + ) return flat_sequence[0] flat_structure = flatten(structure) if len(flat_structure) != len(flat_sequence): raise ValueError( "Could not pack sequence. Structure had %d elements, but flat_sequence " - "had %d elements. Structure: %s, flat_sequence: %s." % - (len(flat_structure), len(flat_sequence), structure, flat_sequence)) + "had %d elements. Structure: %s, flat_sequence: %s." + % ( + len(flat_structure), + len(flat_sequence), + structure, + flat_sequence, + ) + ) _, packed = _packed_nest_with_indices(structure, flat_sequence, 0) return _sequence_like(structure, packed) @@ -243,7 +279,8 @@ def _recursive_assert_same_structure(nest1, nest2, check_types): if is_sequence_nest1 != is_sequence(nest2): raise ValueError( "The two structures don't have the same nested structure.\n\n" - "First structure: %s\n\nSecond structure: %s." % (nest1, nest2)) + "First structure: %s\n\nSecond structure: %s." % (nest1, nest2) + ) if not is_sequence_nest1: return # finished checking if check_types: @@ -252,16 +289,19 @@ def _recursive_assert_same_structure(nest1, nest2, check_types): if type_nest1 != type_nest2: raise TypeError( "The two structures don't have the same sequence type. First " - "structure has type %s, while second structure has type %s." % - (type_nest1, type_nest2)) + "structure has type %s, while second structure has type %s." + % (type_nest1, type_nest2) + ) if isinstance(nest1, dict): keys1 = set(nest1.keys()) keys2 = set(nest2.keys()) if keys1 != keys2: raise ValueError( "The two dictionaries don't have the same set of keys. First " - "structure has keys {}, while second structure has keys {}." - .format(keys1, keys2)) + "structure has keys {}, while second structure has keys {}.".format( + keys1, keys2 + ) + ) nest1_as_sequence = [n for n in _yield_value(nest1)] nest2_as_sequence = [n for n in _yield_value(nest2)] for n1, n2 in zip(nest1_as_sequence, nest2_as_sequence): @@ -269,16 +309,16 @@ def _recursive_assert_same_structure(nest1, nest2, check_types): def padding_to_same_structure(nest1, nest2, obj=None): - def _padding_to_same_structure_single(value, obj): - def change_none_to_obj(x): - if x is None: return obj + if x is None: + return obj return x if is_sequence(value): value = pack_sequence_as( - value, [change_none_to_obj(item) for item in flatten(value)]) + value, [change_none_to_obj(item) for item in flatten(value)] + ) else: value = change_none_to_obj(value) return value @@ -295,10 +335,12 @@ def assert_same_structure(nest1, nest2, check_types=True): len_nest1 = len(flatten(nest1)) if is_sequence(nest1) else 1 len_nest2 = len(flatten(nest2)) if is_sequence(nest2) else 1 if len_nest1 != len_nest2: - raise ValueError("The two structures don't have the same number of " - "elements.\n\nFirst structure (%i elements): %s\n\n" - "Second structure (%i elements): %s" % - (len_nest1, nest1, len_nest2, nest2)) + raise ValueError( + "The two structures don't have the same number of " + "elements.\n\nFirst structure (%i elements): %s\n\n" + "Second structure (%i elements): %s" + % (len_nest1, nest1, len_nest2, nest2) + ) _recursive_assert_same_structure(nest1, nest2, check_types) @@ -343,9 +385,12 @@ def get_shape_tensor_inputs(inputs, attrs, shape, op_type): if isinstance(dim, Variable): dim.stop_gradient = True check_dtype( - dim.dtype, 'shape[' + str(idx) + ']', ['int32', 'int64'], + dim.dtype, + 'shape[' + str(idx) + ']', + ['int32', 'int64'], op_type, - '(When type of shape in' + op_type + 'is list or tuple.)') + '(When type of shape in' + op_type + 'is list or tuple.)', + ) if convert_dtype(dim.dtype) == 'int64': dim = cast(x=dim, dtype='int32') shape_tensor_list.append(dim) @@ -356,9 +401,14 @@ def get_shape_tensor_inputs(inputs, attrs, shape, op_type): if isinstance(shape, Variable): shape.stop_gradient = True - check_dtype(shape.dtype, 'shape', ['int32', 'int64'], 'fill_constant', - '(When type of shape in' + op_type + ' is Variable.)') - if (convert_dtype(shape.dtype) == 'int64'): + check_dtype( + shape.dtype, + 'shape', + ['int32', 'int64'], + 'fill_constant', + '(When type of shape in' + op_type + ' is Variable.)', + ) + if convert_dtype(shape.dtype) == 'int64': shape = cast(shape, 'int32') inputs["ShapeTensor"] = shape elif isinstance(shape, (list, tuple)): @@ -374,6 +424,7 @@ def _convert_to_tensor_list(old_list, dtype="int32"): Converts all elements of a list to Variable. """ from .tensor import fill_constant + new_list_tensor = [] for ele in old_list: @@ -393,8 +444,11 @@ def convert_shape_to_list(shape): """ if isinstance(shape, (list, tuple)): shape = list( - map(lambda x: x.numpy().flat[0] - if isinstance(x, Variable) else x, shape)) + map( + lambda x: x.numpy().flat[0] if isinstance(x, Variable) else x, + shape, + ) + ) else: shape = shape.numpy().astype(int).tolist() return shape @@ -462,7 +516,8 @@ def try_get_constant_shape_from_tensor(shape_tensor): generate_op = shape_tensor.op if generate_op.type == 'shape': var = shape_tensor.block.vars[ - generate_op.input_arg_names[0]] + generate_op.input_arg_names[0] + ] return var.shape except: return None @@ -476,9 +531,11 @@ def get_inputs_outputs_in_block(block): created in this block. """ assert isinstance( - block, - Block), "input non-Block argument for get_inputs_outputs_in_block." - assert block.parent_idx != -1, "input block should be a sub-block, not main block." + block, Block + ), "input non-Block argument for get_inputs_outputs_in_block." + assert ( + block.parent_idx != -1 + ), "input block should be a sub-block, not main block." # Find input/output var names of all ops in block inner_inputs = set() diff --git a/python/paddle/fluid/lazy_init.py b/python/paddle/fluid/lazy_init.py index 8fc175ae3efebf31948f7dd84c2fb6dbc9dbed99..d2118259d03cf902d687199ba272b0501533507a 100644 --- a/python/paddle/fluid/lazy_init.py +++ b/python/paddle/fluid/lazy_init.py @@ -36,7 +36,8 @@ class LazyInitHelper(object): """ if self._state: return - assert framework._non_static_mode( + assert ( + framework._non_static_mode() ), "LazyInit.enable() is only available in dygraph mode." self._state = True @@ -56,7 +57,8 @@ class LazyInitHelper(object): dygraph mode into static mode. """ self.enable() - if self._in_guard: return + if self._in_guard: + return self._tracer = framework._dygraph_tracer_ framework._dygraph_tracer_ = None self._in_guard = True @@ -66,7 +68,8 @@ class LazyInitHelper(object): Exit from lazy mode and recover _dygraph_tracer_. """ self.disable() - if not self._in_guard: return + if not self._in_guard: + return assert self._tracer is not None framework._dygraph_tracer_ = self._tracer self._tracer = None diff --git a/python/paddle/fluid/lod_tensor.py b/python/paddle/fluid/lod_tensor.py index 1b1e74f1fec99d5721f3910bd568c4b9fe497e07..d099536fcd784e673b878e51f21eb6cb0b9ee5a6 100644 --- a/python/paddle/fluid/lod_tensor.py +++ b/python/paddle/fluid/lod_tensor.py @@ -72,10 +72,12 @@ def create_lod_tensor(data, recursive_seq_lens, place): elif isinstance(data, list): # dtype and shape are not important here, # we only want to reuse code of DataToLoDTensorConverter - converter = DataToLoDTensorConverter(place=place, - lod_level=len(recursive_seq_lens), - shape=[], - dtype=core.VarDesc.VarType.FP32) + converter = DataToLoDTensorConverter( + place=place, + lod_level=len(recursive_seq_lens), + shape=[], + dtype=core.VarDesc.VarType.FP32, + ) new_recursive_seq_lens = [] for seq in data: @@ -91,7 +93,7 @@ def create_lod_tensor(data, recursive_seq_lens, place): # FIXME(zjl): the original logic of create_lod_tensor would append # 1 to the shape. Maybe it is not a right way? Currently, we only # follow the previous logic - arr = arr.reshape(arr.shape + (1, )) + arr = arr.reshape(arr.shape + (1,)) tensor = core.LoDTensor() tensor.set(arr, place) tensor.set_recursive_sequence_lengths(recursive_seq_lens) @@ -100,18 +102,21 @@ def create_lod_tensor(data, recursive_seq_lens, place): tensor = core.LoDTensor() tensor.set(data, place) tensor.set_recursive_sequence_lengths(recursive_seq_lens) - assert tensor.has_valid_recursive_sequence_lengths( + assert ( + tensor.has_valid_recursive_sequence_lengths() ), "the provided lod info is invalid" return tensor else: raise TypeError( - "data should be either a LoDTensor, a Numpy array or a list") + "data should be either a LoDTensor, a Numpy array or a list" + ) -def create_random_int_lodtensor(recursive_seq_lens, base_shape, place, low, - high): +def create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low, high +): """ - :api_attr: Static Graph + :api_attr: Static Graph Create a LoDTensor containing random integers. diff --git a/python/paddle/fluid/memory_analysis.py b/python/paddle/fluid/memory_analysis.py index de9a260ada89e57a730759a79b8abbeece9b4d69..9d33927b98bbe48c23dc2e16a4c9486661402bb3 100644 --- a/python/paddle/fluid/memory_analysis.py +++ b/python/paddle/fluid/memory_analysis.py @@ -19,8 +19,9 @@ import numpy as np def get_var_and_memory_size(block, var_name, batch_size=None): var = block._find_var_recursive(var_name) assert var is not None, "Variable {} cannot be found".format(var_name) - assert var.type == core.VarDesc.VarType.LOD_TENSOR, "Variable {} is not Tensor".format( - var_name) + assert ( + var.type == core.VarDesc.VarType.LOD_TENSOR + ), "Variable {} is not Tensor".format(var_name) shape = list(var.shape) if not shape: return var, 0 @@ -31,8 +32,9 @@ def get_var_and_memory_size(block, var_name, batch_size=None): assert not has_none shape[i] = batch_size has_none = True - assert all([s >= 0 - for s in shape]), "shape {} is not deterministic".format(shape) + assert all([s >= 0 for s in shape]), "shape {} is not deterministic".format( + shape + ) mem_size = int(np.prod(shape)) * core.size_of_dtype(var.dtype) return var, mem_size @@ -46,7 +48,9 @@ def pre_allocate_memory(size, place): # NOTE: does not consider inplace yet. def get_max_memory_info(program, batch_size=None): - assert program.num_blocks == 1, "only support to analysis program with only one block" + assert ( + program.num_blocks == 1 + ), "only support to analysis program with only one block" cur_tmp_mem = 0 max_tmp_mem = 0 max_persistable_mem = 0 diff --git a/python/paddle/fluid/metrics.py b/python/paddle/fluid/metrics.py index f14692f79ebb73b655e25153cb376a590e74f807..8f15508e699c10a09efd80b9743159927cb4a4fa 100644 --- a/python/paddle/fluid/metrics.py +++ b/python/paddle/fluid/metrics.py @@ -43,8 +43,12 @@ def _is_numpy_(var): def _is_number_(var): - return isinstance(var, int) or isinstance(var, np.int64) or isinstance( - var, float) or (isinstance(var, np.ndarray) and var.shape == (1, )) + return ( + isinstance(var, int) + or isinstance(var, np.int64) + or isinstance(var, float) + or (isinstance(var, np.ndarray) and var.shape == (1,)) + ) def _is_number_or_matrix_(var): @@ -116,13 +120,14 @@ class MetricBase(object): """ states = { attr: value - for attr, value in self.__dict__.items() if not attr.startswith("_") + for attr, value in self.__dict__.items() + if not attr.startswith("_") } for attr, value in states.items(): if isinstance(value, int): setattr(self, attr, 0) elif isinstance(value, float): - setattr(self, attr, .0) + setattr(self, attr, 0.0) elif isinstance(value, (np.ndarray, np.generic)): setattr(self, attr, np.zeros_like(value)) else: @@ -144,7 +149,8 @@ class MetricBase(object): """ states = { attr: value - for attr, value in self.__dict__.items() if not attr.startswith("_") + for attr, value in self.__dict__.items() + if not attr.startswith("_") } config = {} config.update({"name": self._name, "states": copy.deepcopy(states)}) @@ -170,7 +176,8 @@ class MetricBase(object): """ raise NotImplementedError( - "Should not use it directly, please extend it.") + "Should not use it directly, please extend it." + ) def eval(self): """ @@ -187,7 +194,8 @@ class MetricBase(object): float|list(float)|numpy.array: the metrics via Python. """ raise NotImplementedError( - "Should not use it directly, please extend it.") + "Should not use it directly, please extend it." + ) class CompositeMetric(MetricBase): @@ -340,7 +348,7 @@ class Precision(MetricBase): float: Results of the calculated Precision. Scalar output with float dtype. """ ap = self.tp + self.fp - return float(self.tp) / ap if ap != 0 else .0 + return float(self.tp) / ap if ap != 0 else 0.0 class Recall(MetricBase): @@ -423,7 +431,7 @@ class Recall(MetricBase): float: results of the calculated Recall. Scalar output with float dtype. """ recall = self.tp + self.fn - return float(self.tp) / recall if recall != 0 else .0 + return float(self.tp) / recall if recall != 0 else 0.0 class Accuracy(MetricBase): @@ -465,8 +473,8 @@ class Accuracy(MetricBase): def __init__(self, name=None): super(Accuracy, self).__init__(name) - self.value = .0 - self.weight = .0 + self.value = 0.0 + self.weight = 0.0 def update(self, value, weight): r""" @@ -482,7 +490,8 @@ class Accuracy(MetricBase): """ if not _is_number_or_matrix_(value): raise ValueError( - "The 'value' must be a number(int, float) or a numpy ndarray.") + "The 'value' must be a number(int, float) or a numpy ndarray." + ) if not _is_number_(weight): raise ValueError("The 'weight' must be a number(int, float).") if _is_number_(weight) and weight < 0: @@ -499,8 +508,10 @@ class Accuracy(MetricBase): """ if self.weight == 0: - raise ValueError("There is no data in Accuracy Metrics. \ - Please check layers.accuracy output has added to Accuracy.") + raise ValueError( + "There is no data in Accuracy Metrics. \ + Please check layers.accuracy output has added to Accuracy." + ) return self.value / self.weight @@ -592,13 +603,21 @@ class ChunkEvaluator(MetricBase): float: mean precision, recall and f1 score. """ - precision = float( - self.num_correct_chunks - ) / self.num_infer_chunks if self.num_infer_chunks else 0 - recall = float(self.num_correct_chunks - ) / self.num_label_chunks if self.num_label_chunks else 0 - f1_score = float(2 * precision * recall) / ( - precision + recall) if self.num_correct_chunks else 0 + precision = ( + float(self.num_correct_chunks) / self.num_infer_chunks + if self.num_infer_chunks + else 0 + ) + recall = ( + float(self.num_correct_chunks) / self.num_label_chunks + if self.num_label_chunks + else 0 + ) + f1_score = ( + float(2 * precision * recall) / (precision + recall) + if self.num_correct_chunks + else 0 + ) return precision, recall, f1_score @@ -653,7 +672,7 @@ class EditDistance(MetricBase): def __init__(self, name): super(EditDistance, self).__init__(name) - self.total_distance = .0 + self.total_distance = 0.0 self.seq_num = 0 self.instance_error = 0 @@ -789,11 +808,14 @@ class Auc(MetricBase): tot_neg_prev = tot_neg tot_pos += self._stat_pos[idx] tot_neg += self._stat_neg[idx] - auc += self.trapezoid_area(tot_neg, tot_neg_prev, tot_pos, - tot_pos_prev) + auc += self.trapezoid_area( + tot_neg, tot_neg_prev, tot_pos, tot_pos_prev + ) idx -= 1 - return auc / tot_pos / tot_neg if tot_pos > 0.0 and tot_neg > 0.0 else 0.0 + return ( + auc / tot_pos / tot_neg if tot_pos > 0.0 and tot_neg > 0.0 else 0.0 + ) class DetectionMAP(object): @@ -879,16 +901,18 @@ class DetectionMAP(object): """ - def __init__(self, - input, - gt_label, - gt_box, - gt_difficult=None, - class_num=None, - background_label=0, - overlap_threshold=0.5, - evaluate_difficult=True, - ap_version='integral'): + def __init__( + self, + input, + gt_label, + gt_box, + gt_difficult=None, + class_num=None, + background_label=0, + overlap_threshold=0.5, + evaluate_difficult=True, + ap_version='integral', + ): self.helper = LayerHelper('map_eval') gt_label = layers.cast(x=gt_label, dtype=gt_box.dtype) @@ -899,30 +923,36 @@ class DetectionMAP(object): label = layers.concat([gt_label, gt_box], axis=1) # calculate mean average precision (mAP) of current mini-batch - map = detection.detection_map(input, - label, - class_num, - background_label, - overlap_threshold=overlap_threshold, - evaluate_difficult=evaluate_difficult, - ap_version=ap_version) + map = detection.detection_map( + input, + label, + class_num, + background_label, + overlap_threshold=overlap_threshold, + evaluate_difficult=evaluate_difficult, + ap_version=ap_version, + ) states = [] states.append( - self._create_state(dtype='int32', - shape=None, - suffix='accum_pos_count')) + self._create_state( + dtype='int32', shape=None, suffix='accum_pos_count' + ) + ) states.append( - self._create_state(dtype='float32', - shape=None, - suffix='accum_true_pos')) + self._create_state( + dtype='float32', shape=None, suffix='accum_true_pos' + ) + ) states.append( - self._create_state(dtype='float32', - shape=None, - suffix='accum_false_pos')) + self._create_state( + dtype='float32', shape=None, suffix='accum_false_pos' + ) + ) var = self._create_state(dtype='int32', shape=[1], suffix='has_state') - self.helper.set_variable_initializer(var, - initializer=Constant(value=int(0))) + self.helper.set_variable_initializer( + var, initializer=Constant(value=int(0)) + ) self.has_state = var # calculate accumulative mAP @@ -936,12 +966,15 @@ class DetectionMAP(object): has_state=self.has_state, input_states=states, out_states=states, - ap_version=ap_version) + ap_version=ap_version, + ) - layers.fill_constant(shape=self.has_state.shape, - value=1, - dtype=self.has_state.dtype, - out=self.has_state) + layers.fill_constant( + shape=self.has_state.shape, + value=1, + dtype=self.has_state.dtype, + out=self.has_state, + ) self.cur_map = map self.accum_map = accum_map @@ -955,11 +988,12 @@ class DetectionMAP(object): shape(tuple|list): the shape of state Returns: State variable """ - state = self.helper.create_variable(name="_".join( - [unique_name.generate(self.helper.name), suffix]), - persistable=True, - dtype=dtype, - shape=shape) + state = self.helper.create_variable( + name="_".join([unique_name.generate(self.helper.name), suffix]), + persistable=True, + dtype=dtype, + shape=shape, + ) return state def get_map_var(self): @@ -981,19 +1015,20 @@ class DetectionMAP(object): def _clone_var_(block, var): assert isinstance(var, Variable) - return block.create_var(name=var.name, - shape=var.shape, - dtype=var.dtype, - type=var.type, - lod_level=var.lod_level, - persistable=var.persistable) + return block.create_var( + name=var.name, + shape=var.shape, + dtype=var.dtype, + type=var.type, + lod_level=var.lod_level, + persistable=var.persistable, + ) if reset_program is None: reset_program = Program() with program_guard(main_program=reset_program): var = _clone_var_(reset_program.current_block(), self.has_state) - layers.fill_constant(shape=var.shape, - value=0, - dtype=var.dtype, - out=var) + layers.fill_constant( + shape=var.shape, value=0, dtype=var.dtype, out=var + ) executor.run(reset_program) diff --git a/python/paddle/fluid/multiprocess_utils.py b/python/paddle/fluid/multiprocess_utils.py index 73bba0069cdd21f1235a97313d613c1b87b1a51b..deca95457e61c87b1495e33c50ac2c59aed91db1 100644 --- a/python/paddle/fluid/multiprocess_utils.py +++ b/python/paddle/fluid/multiprocess_utils.py @@ -23,7 +23,7 @@ import queue # multi-process worker check indices queue interval, avoid # hanging in subprocess data loading -MP_STATUS_CHECK_INTERVAL = 5. +MP_STATUS_CHECK_INTERVAL = 5.0 # NOTE: [ mmap files clear ] If there is still data in the multiprocess queue when the main process finishes reading, # the data in the queue needs to be popped. Then the LoDTensor read by the main process @@ -56,7 +56,7 @@ def _cleanup_mmap(): # NOTE used for register a function to be executed at interpreter exit. -class CleanupFuncRegistrar(): +class CleanupFuncRegistrar: # Record the cleanup functions that have been executed _executed_func_set = set() # Record the cleanup functions that have been registered @@ -64,7 +64,6 @@ class CleanupFuncRegistrar(): @classmethod def register(cls, function, signals=[]): - def _func_exectuor(): if function not in cls._executed_func_set: try: @@ -93,8 +92,10 @@ class CleanupFuncRegistrar(): for sig in signals: orig_handler = signal.signal(sig, _signal_handler) if orig_handler not in (signal.SIG_DFL, signal.SIG_IGN): - if (sig == signal.SIGINT - and orig_handler is signal.default_int_handler): + if ( + sig == signal.SIGINT + and orig_handler is signal.default_int_handler + ): continue if orig_handler not in cls._registered_func_set: atexit.register(orig_handler) diff --git a/python/paddle/fluid/net_drawer.py b/python/paddle/fluid/net_drawer.py index b6a4c28b499533ec10217026aba9141206d99500..cf5cbf60ea3d80e0e9436edf8d3d077a5cb2620b 100644 --- a/python/paddle/fluid/net_drawer.py +++ b/python/paddle/fluid/net_drawer.py @@ -30,16 +30,19 @@ except ImportError: 'Cannot import graphviz, which is required for drawing a network. This ' 'can usually be installed in python with "pip install graphviz". Also, ' 'pydot requires graphviz to convert dot files to pdf: in ubuntu, this ' - 'can usually be installed with "sudo apt-get install graphviz".') - print('net_drawer will not run correctly. Please install the correct ' - 'dependencies.') + 'can usually be installed with "sudo apt-get install graphviz".' + ) + print( + 'net_drawer will not run correctly. Please install the correct ' + 'dependencies.' + ) exit(0) OP_STYLE = { 'shape': 'oval', 'color': '#0F9D58', 'style': 'filled', - 'fontcolor': '#FFFFFF' + 'fontcolor': '#FFFFFF', } VAR_STYLE = {} @@ -52,7 +55,6 @@ GRAPH_ID = 0 def unique_id(): - def generator(): GRAPH_ID += 1 return GRAPH_ID @@ -85,7 +87,8 @@ def parse_graph(program, graph, var_dict, **kwargs): temp_id = 0 proto = framework_pb2.ProgramDesc.FromString( - program.desc.serialize_to_string()) + program.desc.serialize_to_string() + ) for block in proto.blocks: for op in block.ops: op.type = op.type + "_" + str(temp_id) @@ -113,12 +116,14 @@ def draw_graph(startup_program, main_program, **kwargs): filename = kwargs.get("filename") if filename == None: filename = str(graph_id) + ".gv" - g = Graph(name=str(graph_id), - filename=filename, - graph_attr=GRAPH_STYLE, - node_attr=OP_STYLE, - edge_attr=VAR_STYLE, - **kwargs) + g = Graph( + name=str(graph_id), + filename=filename, + graph_attr=GRAPH_STYLE, + node_attr=OP_STYLE, + edge_attr=VAR_STYLE, + **kwargs + ) var_dict = {} parse_graph(startup_program, g, var_dict) diff --git a/python/paddle/fluid/nets.py b/python/paddle/fluid/nets.py index a61487ee71d4c84b55e358caaef6b20429e8e2a4..bfff9c12e92413cb9fd948efda0ce6a626242ccc 100644 --- a/python/paddle/fluid/nets.py +++ b/python/paddle/fluid/nets.py @@ -25,24 +25,26 @@ __all__ = [ ] -def simple_img_conv_pool(input, - num_filters, - filter_size, - pool_size, - pool_stride, - pool_padding=0, - pool_type='max', - global_pooling=False, - conv_stride=1, - conv_padding=0, - conv_dilation=1, - conv_groups=1, - param_attr=None, - bias_attr=None, - act=None, - use_cudnn=True): +def simple_img_conv_pool( + input, + num_filters, + filter_size, + pool_size, + pool_stride, + pool_padding=0, + pool_type='max', + global_pooling=False, + conv_stride=1, + conv_padding=0, + conv_dilation=1, + conv_groups=1, + param_attr=None, + bias_attr=None, + act=None, + use_cudnn=True, +): r""" - :api_attr: Static Graph + :api_attr: Static Graph The simple_img_conv_pool api is composed of :ref:`api_fluid_layers_conv2d` and :ref:`api_fluid_layers_pool2d` . @@ -115,42 +117,48 @@ def simple_img_conv_pool(input, pool_stride=2, act="relu") """ - conv_out = layers.conv2d(input=input, - num_filters=num_filters, - filter_size=filter_size, - stride=conv_stride, - padding=conv_padding, - dilation=conv_dilation, - groups=conv_groups, - param_attr=param_attr, - bias_attr=bias_attr, - act=act, - use_cudnn=use_cudnn) - - pool_out = layers.pool2d(input=conv_out, - pool_size=pool_size, - pool_type=pool_type, - pool_stride=pool_stride, - pool_padding=pool_padding, - global_pooling=global_pooling, - use_cudnn=use_cudnn) + conv_out = layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=filter_size, + stride=conv_stride, + padding=conv_padding, + dilation=conv_dilation, + groups=conv_groups, + param_attr=param_attr, + bias_attr=bias_attr, + act=act, + use_cudnn=use_cudnn, + ) + + pool_out = layers.pool2d( + input=conv_out, + pool_size=pool_size, + pool_type=pool_type, + pool_stride=pool_stride, + pool_padding=pool_padding, + global_pooling=global_pooling, + use_cudnn=use_cudnn, + ) return pool_out -def img_conv_group(input, - conv_num_filter, - pool_size, - conv_padding=1, - conv_filter_size=3, - conv_act=None, - param_attr=None, - conv_with_batchnorm=False, - conv_batchnorm_drop_rate=0.0, - pool_stride=1, - pool_type="max", - use_cudnn=True): +def img_conv_group( + input, + conv_num_filter, + pool_size, + conv_padding=1, + conv_filter_size=3, + conv_act=None, + param_attr=None, + conv_with_batchnorm=False, + conv_batchnorm_drop_rate=0.0, + pool_stride=1, + pool_type="max", + use_cudnn=True, +): """ - :api_attr: Static Graph + :api_attr: Static Graph The Image Convolution Group is composed of Convolution2d, BatchNorm, DropOut, and Pool2D. According to the input arguments, img_conv_group will do serials of @@ -210,8 +218,9 @@ def img_conv_group(input, pool_stride=2) """ tmp = input - assert isinstance(conv_num_filter, list) or \ - isinstance(conv_num_filter, tuple) + assert isinstance(conv_num_filter, list) or isinstance( + conv_num_filter, tuple + ) def __extend_list__(obj): if not hasattr(obj, '__len__'): @@ -231,13 +240,15 @@ def img_conv_group(input, if conv_with_batchnorm[i]: local_conv_act = None - tmp = layers.conv2d(input=tmp, - num_filters=conv_num_filter[i], - filter_size=conv_filter_size[i], - padding=conv_padding[i], - param_attr=param_attr[i], - act=local_conv_act, - use_cudnn=use_cudnn) + tmp = layers.conv2d( + input=tmp, + num_filters=conv_num_filter[i], + filter_size=conv_filter_size[i], + padding=conv_padding[i], + param_attr=param_attr[i], + act=local_conv_act, + use_cudnn=use_cudnn, + ) if conv_with_batchnorm[i]: tmp = layers.batch_norm(input=tmp, act=conv_act) @@ -245,23 +256,27 @@ def img_conv_group(input, if abs(drop_rate) > 1e-5: tmp = layers.dropout(x=tmp, dropout_prob=drop_rate) - pool_out = layers.pool2d(input=tmp, - pool_size=pool_size, - pool_type=pool_type, - pool_stride=pool_stride, - use_cudnn=use_cudnn) + pool_out = layers.pool2d( + input=tmp, + pool_size=pool_size, + pool_type=pool_type, + pool_stride=pool_stride, + use_cudnn=use_cudnn, + ) return pool_out -def sequence_conv_pool(input, - num_filters, - filter_size, - param_attr=None, - act="sigmoid", - pool_type="max", - bias_attr=None): +def sequence_conv_pool( + input, + num_filters, + filter_size, + param_attr=None, + act="sigmoid", + pool_type="max", + bias_attr=None, +): """ - :api_attr: Static Graph + :api_attr: Static Graph **This api takes input as an LoDTensor. If input is a Tensor, please use** :ref:`api_fluid_nets_simple_img_conv_pool` **instead** @@ -315,12 +330,14 @@ def sequence_conv_pool(input, """ check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'input') - conv_out = layers.sequence_conv(input=input, - num_filters=num_filters, - filter_size=filter_size, - param_attr=param_attr, - bias_attr=bias_attr, - act=act) + conv_out = layers.sequence_conv( + input=input, + num_filters=num_filters, + filter_size=filter_size, + param_attr=param_attr, + bias_attr=bias_attr, + act=act, + ) pool_out = layers.sequence_pool(input=conv_out, pool_type=pool_type) return pool_out @@ -329,7 +346,7 @@ def sequence_conv_pool(input, @deprecated(since="2.0.0", update_to="paddle.nn.functional.glu") def glu(input, dim=-1): r""" - :api_attr: Static Graph + :api_attr: Static Graph The Gated Linear Units(GLU) composed by :ref:`api_fluid_layers_split` , :ref:`api_fluid_layers_sigmoid` and :ref:`api_fluid_layers_elementwise_mul` . @@ -366,19 +383,18 @@ def glu(input, dim=-1): # shape of output: [-1, 3, 3, 9] output = fluid.nets.glu(input=data, dim=1) """ - check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'], - "glu") + check_variable_and_dtype( + input, 'input', ['float16', 'float32', 'float64'], "glu" + ) a, b = layers.split(input, num_or_sections=2, dim=dim) act_b = layers.sigmoid(x=b) out = layers.elementwise_mul(x=a, y=act_b) return out -def scaled_dot_product_attention(queries, - keys, - values, - num_heads=1, - dropout_rate=0.): +def scaled_dot_product_attention( + queries, keys, values, num_heads=1, dropout_rate=0.0 +): r""" :api_attr: Static Graph @@ -449,46 +465,63 @@ def scaled_dot_product_attention(queries, contexts = fluid.nets.scaled_dot_product_attention(queries, keys, values) contexts.shape # [3, 5, 10] """ - check_variable_and_dtype(queries, 'queries', ['float32', 'float64'], - "scaled_dot_product_attention") - check_variable_and_dtype(keys, 'keys', ['float32', 'float64'], - "scaled_dot_product_attention") - check_variable_and_dtype(values, 'values', ['float32', 'float64'], - "scaled_dot_product_attention") + check_variable_and_dtype( + queries, + 'queries', + ['float32', 'float64'], + "scaled_dot_product_attention", + ) + check_variable_and_dtype( + keys, 'keys', ['float32', 'float64'], "scaled_dot_product_attention" + ) + check_variable_and_dtype( + values, 'values', ['float32', 'float64'], "scaled_dot_product_attention" + ) if not (queries.dtype == keys.dtype == values.dtype): raise TypeError( "The dtype of keys, values and queries should be the same." "But received queries.dtype = %s, " - " keys.dtype = %s, values.dtype) = %s." % - (convert_dtype(queries.dtype), convert_dtype( - keys.dtype), convert_dtype(values.dtype))) + " keys.dtype = %s, values.dtype) = %s." + % ( + convert_dtype(queries.dtype), + convert_dtype(keys.dtype), + convert_dtype(values.dtype), + ) + ) if not (len(queries.shape) == len(keys.shape) == len(values.shape) == 3): raise ValueError( "Inputs queries, keys and values should all be 3-D tensors." "But received len(queries.shape) = %d, " - "len(keys.shape) = %d, len(values.shape) = %d." % - (len(queries.shape), len(keys.shape), len(values.shape))) + "len(keys.shape) = %d, len(values.shape) = %d." + % (len(queries.shape), len(keys.shape), len(values.shape)) + ) if queries.shape[-1] != keys.shape[-1]: raise ValueError( "The hidden size of queries and keys should be the same." "But received queries' hidden size = %d and keys' hidden size = %d." - % (queries.shape[-1], keys.shape[-1])) + % (queries.shape[-1], keys.shape[-1]) + ) if keys.shape[-2] != values.shape[-2]: raise ValueError( "The max sequence length in value batch and in key batch " "should be the same. But received max sequence length in value batch " - "= %d, in key batch = %d." % (values.shape[-2], keys.shape[-2])) + "= %d, in key batch = %d." % (values.shape[-2], keys.shape[-2]) + ) if keys.shape[-1] % num_heads != 0: - raise ValueError("The hidden size of keys (%d) must be divisible " - "by the number of attention heads (%d)." % - (keys.shape[-1], num_heads)) + raise ValueError( + "The hidden size of keys (%d) must be divisible " + "by the number of attention heads (%d)." + % (keys.shape[-1], num_heads) + ) if values.shape[-1] % num_heads != 0: - raise ValueError("The hidden size of values (%d) must be divisible " - "by the number of attention heads (%d)." % - (values.shape[-1], num_heads)) + raise ValueError( + "The hidden size of values (%d) must be divisible " + "by the number of attention heads (%d)." + % (values.shape[-1], num_heads) + ) def __compute_qkv(queries, keys, values, num_heads): """ @@ -535,9 +568,10 @@ def scaled_dot_product_attention(queries, # reshape the 3-D input: [batch_size, max_sequence_length, hidden_dim] # into a 4-D output: # [batch_size, max_sequence_length, num_heads, hidden_size_per_head]. - reshaped = layers.reshape(x=x, - shape=list(x.shape[:-1]) + - [num_heads, hidden_size // num_heads]) + reshaped = layers.reshape( + x=x, + shape=list(x.shape[:-1]) + [num_heads, hidden_size // num_heads], + ) # permute the dimensions into: # [batch_size, num_heads, max_sequence_len, hidden_size_per_head] @@ -557,17 +591,25 @@ def scaled_dot_product_attention(queries, [bs, max_sequence_length, num_heads * hidden_dim]. """ - if len(x.shape) == 3: return x + if len(x.shape) == 3: + return x if len(x.shape) != 4: raise ValueError("Input(x) should be a 4-D Tensor.") trans_x = layers.transpose(x, perm=[0, 2, 1, 3]) - return layers.reshape(x=trans_x, - shape=list( - map(int, [ - trans_x.shape[0], trans_x.shape[1], - trans_x.shape[2] * trans_x.shape[3] - ]))) + return layers.reshape( + x=trans_x, + shape=list( + map( + int, + [ + trans_x.shape[0], + trans_x.shape[1], + trans_x.shape[2] * trans_x.shape[3], + ], + ) + ), + ) q, k, v = __compute_qkv(queries, keys, values, num_heads) @@ -579,13 +621,15 @@ def scaled_dot_product_attention(queries, scaled_q = layers.scale(x=q, scale=key_dim_per_head**-0.5) product = layers.matmul(x=scaled_q, y=k, transpose_y=True) - weights = layers.reshape(x=layers.reshape(x=product, - shape=[-1, product.shape[-1]], - act="softmax"), - shape=product.shape) + weights = layers.reshape( + x=layers.reshape( + x=product, shape=[-1, product.shape[-1]], act="softmax" + ), + shape=product.shape, + ) if dropout_rate: - weights = layers.dropout(weights, - dropout_prob=dropout_rate, - is_test=False) + weights = layers.dropout( + weights, dropout_prob=dropout_rate, is_test=False + ) ctx_multiheads = layers.matmul(weights, v) return __combine_heads(ctx_multiheads) diff --git a/python/paddle/fluid/op.py b/python/paddle/fluid/op.py index bdcdad1218e41791817da40635e86d6c94280984..c8b118127cc13b6a3b017716151f68c4b36a82ed 100644 --- a/python/paddle/fluid/op.py +++ b/python/paddle/fluid/op.py @@ -47,7 +47,8 @@ class OpDescCreationMethod(object): def __init__(self, op_proto): if not isinstance(op_proto, framework_pb2.OpProto): raise TypeError( - "Type of op_proto should be OpProto in PaddlePaddle.") + "Type of op_proto should be OpProto in PaddlePaddle." + ) self.__op_proto__ = op_proto self.__extra_attrs__ = core.get_op_extra_attrs(op_proto.type) @@ -67,8 +68,9 @@ class OpDescCreationMethod(object): if not input_parameter.duplicable and len(input_arguments) > 1: raise ValueError( - "Input %s expects only one input, but %d are given." % - (input_parameter.name, len(input_arguments))) + "Input %s expects only one input, but %d are given." + % (input_parameter.name, len(input_arguments)) + ) ipt = op_desc.inputs.add() ipt.parameter = input_parameter.name @@ -81,8 +83,9 @@ class OpDescCreationMethod(object): if not output_parameter.duplicable and len(output_arguments) > 1: raise ValueError( - "Output %s expects only one output, but %d are given." % - (output_parameter.name, len(output_arguments))) + "Output %s expects only one output, but %d are given." + % (output_parameter.name, len(output_arguments)) + ) out = op_desc.outputs.add() out.parameter = output_parameter.name @@ -126,13 +129,14 @@ class OpDescCreationMethod(object): new_attr.float64 = user_defined_attr else: raise NotImplementedError( - "A not supported attribute type: %s." % - (str(attr.type))) + "A not supported attribute type: %s." % (str(attr.type)) + ) for attr_name, defalut_val in self.__extra_attrs__.items(): user_defined_attr = kwargs.get(attr_name, None) if user_defined_attr is not None: attr_type = int( - core.get_attrtibute_type(op_desc.type, attr_name)) + core.get_attrtibute_type(op_desc.type, attr_name) + ) new_attr = op_desc.attrs.add() new_attr.name = attr_name new_attr.type = attr_type @@ -160,8 +164,8 @@ class OpDescCreationMethod(object): new_attr.longs.extend(user_defined_attr) else: raise NotImplementedError( - "A not supported attribute type: %s." % - (str(attr_type))) + "A not supported attribute type: %s." % (str(attr_type)) + ) return op_desc @@ -178,7 +182,6 @@ class OpDescCreationMethod(object): class OpInfo(object): - def __init__(self, name, method, inputs, outputs, attrs, extra_attrs): self.name = name self.method = method @@ -200,18 +203,17 @@ def create_op_creation_method(op_proto): extra_attrs_map = core.get_op_extra_attrs(op_proto.type) - return OpInfo(method=__impl__, - name=op_proto.type, - inputs=[(var.name, var.duplicable) - for var in op_proto.inputs], - outputs=[(var.name, var.duplicable) - for var in op_proto.outputs], - attrs=[attr.name for attr in op_proto.attrs], - extra_attrs=[item for item in extra_attrs_map.keys()]) + return OpInfo( + method=__impl__, + name=op_proto.type, + inputs=[(var.name, var.duplicable) for var in op_proto.inputs], + outputs=[(var.name, var.duplicable) for var in op_proto.outputs], + attrs=[attr.name for attr in op_proto.attrs], + extra_attrs=[item for item in extra_attrs_map.keys()], + ) class OperatorFactory(object): - def __init__(self): self.op_methods = dict() @@ -224,13 +226,15 @@ class OperatorFactory(object): if len(args) != 0: raise ValueError( "Except the argument \"type\"," - "all of the other arguments should be keyword arguments.") + "all of the other arguments should be keyword arguments." + ) t = kwargs.pop("type") else: if len(args) != 1: raise ValueError( "Except the argument \"type\"," - "all of the other arguments should be keyword arguments.") + "all of the other arguments should be keyword arguments." + ) t = args[0] return self.get_op_info(t).method(**kwargs) diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 4471eb9dcbc7550b3d0fd535ad446bdc4e12945e..ce58c0cfe02fbce2bbf08b8083603de6a8ddd1eb 100755 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -19,20 +19,42 @@ from collections import defaultdict import paddle from paddle.fluid.distribute_lookup_table import find_distributed_lookup_table -from paddle.fluid.framework import Program, Variable, Parameter, name_scope, default_main_program, default_startup_program, device_guard +from paddle.fluid.framework import ( + Program, + Variable, + Parameter, + name_scope, + default_main_program, + default_startup_program, + device_guard, +) from . import framework from . import layers from . import unique_name -from .backward import append_backward, _some_in_set_, _append_grad_suffix_, _get_no_grad_set_name -from .clip import GradientClipBase, GradientClipByNorm, error_clip_callback, append_gradient_clip_ops, ClipGradByGlobalNorm +from .backward import ( + append_backward, + _some_in_set_, + _append_grad_suffix_, + _get_no_grad_set_name, +) +from .clip import ( + GradientClipBase, + GradientClipByNorm, + error_clip_callback, + append_gradient_clip_ops, + ClipGradByGlobalNorm, +) from .framework import program_guard from .initializer import Constant from .layer_helper import LayerHelper from .layers import ops from .dygraph import base as imperative_base from .dygraph import no_grad -from .dygraph.learning_rate_scheduler import LearningRateDecay, _LearningRateEpochDecay +from .dygraph.learning_rate_scheduler import ( + LearningRateDecay, + _LearningRateEpochDecay, +) from paddle.fluid import core from paddle.fluid.layers import tensor from functools import reduce @@ -41,16 +63,40 @@ from .wrapped_decorator import signature_safe_contextmanager from .. import compat as cpt import warnings from paddle import _C_ops, _legacy_C_ops -from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode, _current_expected_place +from ..fluid.framework import ( + _in_legacy_dygraph, + in_dygraph_mode, + _current_expected_place, +) __all__ = [ - 'SGD', 'Momentum', 'Adagrad', 'Adam', 'Adamax', 'Dpsgd', 'DecayedAdagrad', - 'Ftrl', 'SGDOptimizer', 'MomentumOptimizer', 'AdagradOptimizer', - 'AdamOptimizer', 'AdamaxOptimizer', 'DpsgdOptimizer', - 'DecayedAdagradOptimizer', 'RMSPropOptimizer', 'FtrlOptimizer', 'Adadelta', - 'AdadeltaOptimizer', 'ModelAverage', 'LarsMomentum', - 'LarsMomentumOptimizer', 'LambOptimizer', 'ExponentialMovingAverage', - 'PipelineOptimizer', 'LookaheadOptimizer', 'RecomputeOptimizer' + 'SGD', + 'Momentum', + 'Adagrad', + 'Adam', + 'Adamax', + 'Dpsgd', + 'DecayedAdagrad', + 'Ftrl', + 'SGDOptimizer', + 'MomentumOptimizer', + 'AdagradOptimizer', + 'AdamOptimizer', + 'AdamaxOptimizer', + 'DpsgdOptimizer', + 'DecayedAdagradOptimizer', + 'RMSPropOptimizer', + 'FtrlOptimizer', + 'Adadelta', + 'AdadeltaOptimizer', + 'ModelAverage', + 'LarsMomentum', + 'LarsMomentumOptimizer', + 'LambOptimizer', + 'ExponentialMovingAverage', + 'PipelineOptimizer', + 'LookaheadOptimizer', + 'RecomputeOptimizer', ] @@ -63,14 +109,16 @@ class Optimizer(object): """ @imperative_base.no_grad - def __init__(self, - learning_rate, - parameter_list=None, - regularization=None, - grad_clip=None, - flatten_param_grads=False, - align_size=-1, - name=None): + def __init__( + self, + learning_rate, + parameter_list=None, + regularization=None, + grad_clip=None, + flatten_param_grads=False, + align_size=-1, + name=None, + ): """ Args: flatten_param_grads (bool, optional): Whether to flatten all the parameters and grads. @@ -79,15 +127,19 @@ class Optimizer(object): """ # Because of the loop import, so place it in the function body from paddle.optimizer.lr import LRScheduler - self._parameter_list = list( - parameter_list) if parameter_list is not None else None + + self._parameter_list = ( + list(parameter_list) if parameter_list is not None else None + ) self._name = name if framework._non_static_mode(): - if not isinstance(learning_rate, - (float, LearningRateDecay, LRScheduler)): + if not isinstance( + learning_rate, (float, LearningRateDecay, LRScheduler) + ): raise TypeError( "learning rate should be float or LRScheduler, got %s here" - % type(learning_rate)) + % type(learning_rate) + ) if self._parameter_list is None: raise AttributeError( "parameter_list argument given to the Optimizer should not be None in dygraph mode." @@ -98,14 +150,17 @@ class Optimizer(object): logging.info( "If regularizer of a Parameter has been set by 'fluid.ParamAttr' or 'fluid.WeightNormParamAttr' already. " "The Regularization[%s] in Optimizer will not take effect, and it will only be applied to other Parameters!" - % regularization.__str__()) + % regularization.__str__() + ) break else: - if not isinstance(learning_rate, - (float, framework.Variable, LRScheduler)): + if not isinstance( + learning_rate, (float, framework.Variable, LRScheduler) + ): raise TypeError( "learning rate should be float or LRScheduler, got %s here" - % type(learning_rate)) + % type(learning_rate) + ) if grad_clip is not None: if not isinstance(grad_clip, GradientClipBase): @@ -128,7 +183,8 @@ class Optimizer(object): self._learning_rate_map = dict() if isinstance(self._learning_rate, framework.Variable): self._learning_rate_map[ - framework.default_main_program()] = self._learning_rate + framework.default_main_program() + ] = self._learning_rate # Dictionary of accumulators. Some optimizer subclasses need to # allocate and manage extra variables associated with the parameters # to train. These variables are called accumulators. @@ -169,6 +225,7 @@ class Optimizer(object): ''' from paddle.optimizer.lr import LRScheduler + state_dict = {} for k, v in self._accumulators.items(): for para_name, var_tmp in v.items(): @@ -184,14 +241,13 @@ class Optimizer(object): if not isinstance(self._learning_rate, _LearningRateEpochDecay): var_tmp = None - var_temp = framework._varbase_creator(None, - name='global_step', - dtype='int32') + var_temp = framework._varbase_creator( + None, name='global_step', dtype='int32' + ) - tensor.fill_constant([1], - "int32", - self._learning_rate.step_num, - out=var_temp) + tensor.fill_constant( + [1], "int32", self._learning_rate.step_num, out=var_temp + ) state_dict['global_step'] = var_temp return state_dict @@ -230,6 +286,7 @@ class Optimizer(object): para_state_dict, opti_state_dict = fluid.load_dygraph("paddle_dy") ''' from paddle.optimizer.lr import LRScheduler + if isinstance(self._learning_rate, LRScheduler): self._learning_rate.set_dict(state_dict["LR_Scheduler"]) @@ -237,25 +294,33 @@ class Optimizer(object): self._learning_rate.set_dict(state_dict["LR_Scheduler"]) if not isinstance(self._learning_rate, _LearningRateEpochDecay): - assert 'global_step' in state_dict, \ - 'Global step not in state dict, Dygraph use LearningRateDecay, global_step must in state_dict' + assert ( + 'global_step' in state_dict + ), 'Global step not in state dict, Dygraph use LearningRateDecay, global_step must in state_dict' global_step = state_dict['global_step'] if isinstance(global_step, Variable): step_np = global_step step_np = np.array(step_np.value().get_tensor()) - assert step_np.shape == (1,), \ - "global step shape is (1,), the shape is {}".format( step_np.shape ) + assert step_np.shape == ( + 1, + ), "global step shape is (1,), the shape is {}".format( + step_np.shape + ) self._learning_rate.step_num = int(step_np[0]) elif isinstance(global_step, np.ndarray): - assert global_step.shape == (1,), \ - "global step shape is (1,), the shape is {}".format( global_step.shape ) + assert global_step.shape == ( + 1, + ), "global step shape is (1,), the shape is {}".format( + global_step.shape + ) self._learning_rate.step_num = global_step[0] else: raise RuntimeError( "Type not supprt, value in state dict must be [VarBase, Variable, numpy], the type is ", - type(global_step)) + type(global_step), + ) def _load_state_para(state_dict, param): var = param.value() @@ -269,29 +334,36 @@ class Optimizer(object): elif isinstance(load_para, np.ndarray): load_para_np = load_para else: - raise RuntimeError("State dict type {} not supprt".format( - str(type(load_para)))) + raise RuntimeError( + "State dict type {} not supprt".format(str(type(load_para))) + ) - assert model_np.shape == load_para_np.shape, \ - "Parameter shape not match, Dygraph Parameter [ {} ] need tensor with shape {} but load tensor with shape {}".format( - param.name, model_np.shape, load_para_np.shape) + assert ( + model_np.shape == load_para_np.shape + ), "Parameter shape not match, Dygraph Parameter [ {} ] need tensor with shape {} but load tensor with shape {}".format( + param.name, model_np.shape, load_para_np.shape + ) - assert model_np.dtype == load_para_np.dtype, \ - "Parameter dtype not match, Dygraph Parameter [ {} ] need tensor with dtype {} but load tensor with dtype {}".format( - param.name, model_np.dtype, load_para_np.dtype) + assert ( + model_np.dtype == load_para_np.dtype + ), "Parameter dtype not match, Dygraph Parameter [ {} ] need tensor with dtype {} but load tensor with dtype {}".format( + param.name, model_np.dtype, load_para_np.dtype + ) tensor.set(load_para_np, framework._current_expected_place()) self._accumulators_holder = state_dict for k, v in self._accumulators.items(): for para_name, var_tmp in v.items(): - assert var_tmp.name in state_dict, \ - "optimizer variable {} not found".format( var_tmp.name ) + assert ( + var_tmp.name in state_dict + ), "optimizer variable {} not found".format(var_tmp.name) _load_state_para(state_dict, var_tmp) for k, v in self._global_accumulators.items(): - assert v.name in state_dict, \ - "optimizer variable {} not found".format( v.name ) + assert ( + v.name in state_dict + ), "optimizer variable {} not found".format(v.name) _load_state_para(state_dict, v) # [aliases] Compatible with old method names @@ -311,6 +383,7 @@ class Optimizer(object): def _create_global_learning_rate(self): from paddle.optimizer.lr import LRScheduler + if isinstance(self._learning_rate, LRScheduler): lr_var = self._global_learning_rate() # only create global lr_var once @@ -322,16 +395,19 @@ class Optimizer(object): shape=[1], persistable=True, stop_gradient=True, - dtype='float32' if self._dtype is None else self._dtype) + dtype='float32' if self._dtype is None else self._dtype, + ) main_prog = framework.default_main_program() main_prog.lr_sheduler = self._learning_rate main_prog.lr_var = lr_var self._learning_rate_map[ - framework.default_main_program()] = lr_var + framework.default_main_program() + ] = lr_var lr_value = float(self._learning_rate()) self.helper.set_variable_initializer( - lr_var, initializer=Constant(value=lr_value)) + lr_var, initializer=Constant(value=lr_value) + ) return if imperative_base.enabled(): @@ -342,17 +418,20 @@ class Optimizer(object): if isinstance(lr, framework.Variable): return else: - self._learning_rate_map[framework.default_main_program( - )] = layers.create_global_var( + self._learning_rate_map[ + framework.default_main_program() + ] = layers.create_global_var( name=unique_name.generate("learning_rate"), shape=[1], value=float(self._learning_rate), dtype='float32' if self._dtype is None else self._dtype, - persistable=True) + persistable=True, + ) # get learning rate Variable from LearningRateDecay elif isinstance(self._learning_rate, LearningRateDecay): self._learning_rate_map[ - framework.default_main_program()] = self._learning_rate() + framework.default_main_program() + ] = self._learning_rate() else: raise TypeError( "optimizer's learning rate must be float or LearningRateDecay" @@ -371,12 +450,14 @@ class Optimizer(object): # create learning rate in the current main program self._learning_rate_map[ - framework.default_main_program()] = layers.create_global_var( - name=unique_name.generate("learning_rate"), - shape=[1], - value=float(self._learning_rate), - dtype='float32' if self._dtype is None else self._dtype, - persistable=True) + framework.default_main_program() + ] = layers.create_global_var( + name=unique_name.generate("learning_rate"), + shape=[1], + value=float(self._learning_rate), + dtype='float32' if self._dtype is None else self._dtype, + persistable=True, + ) @framework.dygraph_only def set_lr(self, value): @@ -431,7 +512,8 @@ class Optimizer(object): if not isinstance(value, (framework.Variable, float)): raise TypeError( "The type of 'value' in optimizer.set_lr must be (float, Variable), but received %s." - % (type(value))) + % (type(value)) + ) if isinstance(self._learning_rate, LearningRateDecay): raise RuntimeError( "optimizer's learning rate can't be LearningRateDecay when invoke this API, because this will lead to conflict." @@ -442,28 +524,42 @@ class Optimizer(object): if current_lr is not None: if in_dygraph_mode(): place = _current_expected_place() - _C_ops.full_(current_lr, list(current_lr.shape), - float(value), current_lr.dtype, place) + _C_ops.full_( + current_lr, + list(current_lr.shape), + float(value), + current_lr.dtype, + place, + ) elif _in_legacy_dygraph(): - _legacy_C_ops.fill_constant(current_lr, 'value', - float(value), 'dtype', - current_lr.dtype, 'shape', - list(current_lr.shape)) + _legacy_C_ops.fill_constant( + current_lr, + 'value', + float(value), + 'dtype', + current_lr.dtype, + 'shape', + list(current_lr.shape), + ) else: - global_block = framework.default_main_program( - ).global_block() - global_block.append_op(type='fill_constant', - outputs={'Out': [current_lr]}, - attrs={ - 'dtype': current_lr.dtype, - 'shape': list(current_lr.shape), - 'value': float(value) - }, - stop_gradient=True) + global_block = ( + framework.default_main_program().global_block() + ) + global_block.append_op( + type='fill_constant', + outputs={'Out': [current_lr]}, + attrs={ + 'dtype': current_lr.dtype, + 'shape': list(current_lr.shape), + 'value': float(value), + }, + stop_gradient=True, + ) else: - assert len(value.shape) == 1 and value.shape[ - 0] == 1, "optimizer's learning rate must be 1-D Tensor with shape[1]" + assert ( + len(value.shape) == 1 and value.shape[0] == 1 + ), "optimizer's learning rate must be 1-D Tensor with shape[1]" self._learning_rate_map[framework.default_main_program()] = value @framework.dygraph_only @@ -540,8 +636,7 @@ class Optimizer(object): return self._learning_rate_map.get(program, None) def _append_optimize_op(self, block, param_and_grad): - """ append optimize operator to block and return all the added optimize_op - """ + """append optimize operator to block and return all the added optimize_op""" raise NotImplementedError() def _create_param_lr(self, param_and_grad): @@ -555,8 +650,8 @@ class Optimizer(object): return self._global_learning_rate() else: with default_main_program()._lr_schedule_guard( - is_with_opt=True), framework.name_scope( - 'scale_with_param_lr'): + is_with_opt=True + ), framework.name_scope('scale_with_param_lr'): return self._global_learning_rate() * param_lr def _create_accumulators(self, block, parameters): @@ -581,14 +676,16 @@ class Optimizer(object): """ pass - def _add_accumulator(self, - name, - param, - dtype=None, - fill_value=0.0, - shape=None, - type=None, - device=None): + def _add_accumulator( + self, + name, + param, + dtype=None, + fill_value=0.0, + shape=None, + type=None, + device=None, + ): """Utility function to add an accumulator for a parameter Args: @@ -600,13 +697,17 @@ class Optimizer(object): """ if self._name is not None: name = self._name + "_" + name - if (name in self._accumulators - and param.name in self._accumulators[name]): + if ( + name in self._accumulators + and param.name in self._accumulators[name] + ): if framework._non_static_mode(): return self._accumulators[name][param.name] raise Exception( "Accumulator {} already exists for parameter {}".format( - name, param.name)) + name, param.name + ) + ) if shape == None: shape = param.shape assert isinstance(self.helper, LayerHelper) @@ -620,32 +721,39 @@ class Optimizer(object): persistable=True, dtype=dtype or param.dtype, type=core.VarDesc.VarType.LOD_TENSOR - if framework._non_static_mode() else - (param.type if type is None else type), + if framework._non_static_mode() + else (param.type if type is None else type), shape=shape, - belong_to_optimizer=True) + belong_to_optimizer=True, + ) if device is None: device = self._get_device_for_param(param.name) with device_guard(device): self.helper.set_variable_initializer( - var, initializer=Constant(value=float(fill_value))) + var, initializer=Constant(value=float(fill_value)) + ) if framework._non_static_mode(): if len(self._accumulators_holder) > 0: - assert var_name in self._accumulators_holder, \ - "Optimizer set error, {} should in state dict".format( var_name ) + assert ( + var_name in self._accumulators_holder + ), "Optimizer set error, {} should in state dict".format( + var_name + ) var.set_value(self._accumulators_holder[var_name]) self._accumulators[name][param.name] = var return var - def _add_global_accumulator(self, - name, - dtype=None, - fill_value=0.0, - shape=None, - type=None, - device=None): + def _add_global_accumulator( + self, + name, + dtype=None, + fill_value=0.0, + shape=None, + type=None, + device=None, + ): """Utility function to add a global accumulator for all parameters in the model Args: @@ -659,7 +767,7 @@ class Optimizer(object): """ if self._name is not None: name = self._name + "_" + name - if (name in self._global_accumulators): + if name in self._global_accumulators: if framework._non_static_mode(): return self._global_accumulators[name] raise Exception("Global accumulator {} already exists".format(name)) @@ -677,17 +785,22 @@ class Optimizer(object): dtype=dtype if dtype else self._dtype, type=type, shape=shape, - belong_to_optimizer=True) + belong_to_optimizer=True, + ) if device is None: device = 'cpu' with device_guard(device): self.helper.set_variable_initializer( - var, initializer=Constant(value=float(fill_value))) + var, initializer=Constant(value=float(fill_value)) + ) if framework._non_static_mode(): if len(self._accumulators_holder) > 0: - assert var_name in self._accumulators_holder, \ - "Optimizer set error, {} should in state dict".format( var_name ) + assert ( + var_name in self._accumulators_holder + ), "Optimizer set error, {} should in state dict".format( + var_name + ) var.set_value(self._accumulators_holder[var_name]) self._global_accumulators[name] = var @@ -705,11 +818,15 @@ class Optimizer(object): """ if self._name is not None: name = self._name + "_" + name - if (name not in self._accumulators - or param.name not in self._accumulators[name]): + if ( + name not in self._accumulators + or param.name not in self._accumulators[name] + ): raise Exception( "Accumulator {} does not exist for parameter {}".format( - name, param.name)) + name, param.name + ) + ) return self._accumulators[name][param.name] def _get_global_accumulator(self, name): @@ -723,7 +840,7 @@ class Optimizer(object): """ if self._name is not None: name = self._name + "_" + name - if (name not in self._global_accumulators): + if name not in self._global_accumulators: raise Exception("Global accumulator {} does not exist".format(name)) return self._global_accumulators[name] @@ -732,13 +849,15 @@ class Optimizer(object): if param_and_grad[0].trainable is True: param_name = param_and_grad[0].name ops = target_block.ops - device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName( + device_attr_name = ( + core.op_proto_and_checker_maker.kOpDeviceAttrName() ) for op in ops: input_arg_names = op.input_arg_names if param_name in input_arg_names: self._param_device_map[param_name] = op.attr( - device_attr_name) + device_attr_name + ) break def _get_device_for_param(self, param_name): @@ -775,17 +894,19 @@ class Optimizer(object): target_block = global_block current_block = framework.default_main_program().current_block() if current_block.idx != global_block.idx: - assert current_block.backward_block_idx != -1, \ - "current block is not global_block, but it doesn't have backward block." + assert ( + current_block.backward_block_idx != -1 + ), "current block is not global_block, but it doesn't have backward block." target_block = framework.default_main_program().blocks[ - current_block.backward_block_idx] + current_block.backward_block_idx + ] start = len(target_block.ops) self._update_param_device_map(parameters_and_grads, target_block) self._create_accumulators( - target_block, - [p[0] for p in parameters_and_grads if p[0].trainable]) + target_block, [p[0] for p in parameters_and_grads if p[0].trainable] + ) self._create_global_learning_rate() if framework._non_static_mode(): @@ -799,13 +920,16 @@ class Optimizer(object): if param_and_grad[1] is None: continue with param_and_grad[0].block.program._optimized_guard( - param_and_grad), name_scope("optimizer"): + param_and_grad + ), name_scope("optimizer"): if param_and_grad[0].trainable is True: device = self._get_device_for_param( - param_and_grad[0].name) + param_and_grad[0].name + ) with device_guard(device): optimize_op = self._append_optimize_op( - target_block, param_and_grad) + target_block, param_and_grad + ) # Get custom finish ops for subclasses # FIXME: Need to fix this once we figure out how to handle dependencies @@ -834,7 +958,8 @@ class Optimizer(object): if p.name == table_name: if table_param is not None: raise RuntimeError( - "multi dist table var found, only support one now!") + "multi dist table var found, only support one now!" + ) table_param = p table_grad = g else: @@ -842,8 +967,9 @@ class Optimizer(object): sgd_op = None if table_param is not None: param_and_grad = [table_param, table_grad] - with table_param.block.program._optimized_guard(param_and_grad), \ - framework.name_scope("optimizer"): + with table_param.block.program._optimized_guard( + param_and_grad + ), framework.name_scope("optimizer"): self._create_global_learning_rate() # create the optimize op sgd_op = global_block.append_op( @@ -851,17 +977,20 @@ class Optimizer(object): inputs={ "Param": table_param, "Grad": table_grad, - "LearningRate": self._create_param_lr(param_and_grad) + "LearningRate": self._create_param_lr(param_and_grad), }, - outputs={"ParamOut": param_and_grad[0]}) + outputs={"ParamOut": param_and_grad[0]}, + ) return new_param_grads, (table_param, table_grad), sgd_op - def backward(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None, - callbacks=None): + def backward( + self, + loss, + startup_program=None, + parameter_list=None, + no_grad_set=None, + callbacks=None, + ): """ The first part of ``minimize``, do auto-diff to append backward operations for the current program. @@ -897,8 +1026,9 @@ class Optimizer(object): self._dtype = loss.dtype if framework._non_static_mode(): - parameter_list = parameter_list if parameter_list \ - else self._parameter_list + parameter_list = ( + parameter_list if parameter_list else self._parameter_list + ) params_grads = [] for param in parameter_list: @@ -912,29 +1042,36 @@ class Optimizer(object): if callbacks is None: callbacks = [error_clip_callback] else: - assert (isinstance(callbacks, list)) + assert isinstance(callbacks, list) program = loss.block.program - assert len(loss.shape) == 1 and loss.shape[0] == 1, \ - "The loss.shape should be (1L,), but the current loss.shape is {}. " \ + assert len(loss.shape) == 1 and loss.shape[0] == 1, ( + "The loss.shape should be (1L,), but the current loss.shape is {}. " "Maybe that you should call paddle.mean to process the current loss.".format( - loss.shape) - parameter_list = parameter_list if parameter_list \ - else self._parameter_list + loss.shape + ) + ) + parameter_list = ( + parameter_list if parameter_list else self._parameter_list + ) with program_guard(program, startup_program): - params_grads = append_backward(loss, parameter_list, - act_no_grad_set, callbacks) + params_grads = append_backward( + loss, parameter_list, act_no_grad_set, callbacks + ) return params_grads def _create_regularization_of_grad(self, param, grad, regularization=None): - """ Create and add backward regularization Operators + """Create and add backward regularization Operators Function helper of append_regularization_ops. """ # If no gradient or no regularization is specified, then we don't need to do anything if grad is None or ( - (not hasattr(param, 'regularizer') or - (hasattr(param, 'regularizer') and param.regularizer is None)) - and regularization is None): + ( + not hasattr(param, 'regularizer') + or (hasattr(param, 'regularizer') and param.regularizer is None) + ) + and regularization is None + ): return grad regularization_term = None if hasattr(param, 'regularizer') and param.regularizer is not None: @@ -959,7 +1096,8 @@ class Optimizer(object): dtype=param.dtype, shape=param.shape, lod_level=param.lod_level, - type=core.VarDesc.VarType.LOD_TENSOR) + type=core.VarDesc.VarType.LOD_TENSOR, + ) inputs = {"X": [grad, regularization_term]} outputs = {"Out": [new_grad]} @@ -967,9 +1105,9 @@ class Optimizer(object): return new_grad - def append_regularization_ops(self, - parameters_and_grads, - regularization=None): + def append_regularization_ops( + self, parameters_and_grads, regularization=None + ): r"""Create and add backward regularization Operators Creates and adds backward regularization operators in the BlockDesc. @@ -994,23 +1132,28 @@ class Optimizer(object): if framework._non_static_mode(): for param, grad in parameters_and_grads: new_grad = self._create_regularization_of_grad( - param, grad, regularization) + param, grad, regularization + ) params_and_grads.append((param, new_grad)) else: repeate_regularizer = False with framework.name_scope('regularization'): for param, grad in parameters_and_grads: - if not repeate_regularizer and getattr( - param, 'regularizer', - None) is not None and regularization is not None: + if ( + not repeate_regularizer + and getattr(param, 'regularizer', None) is not None + and regularization is not None + ): repeate_regularizer = True logging.info( "If regularizer of a Parameter has been set by 'fluid.ParamAttr' or 'fluid.WeightNormParamAttr' already. " "The Regularization[%s] in Optimizer will not take effect, and it will only be applied to other Parameters!" - % regularization.__str__()) + % regularization.__str__() + ) with param.block.program._optimized_guard([param, grad]): new_grad = self._create_regularization_of_grad( - param, grad, regularization) + param, grad, regularization + ) params_and_grads.append((param, new_grad)) return params_and_grads @@ -1021,11 +1164,14 @@ class Optimizer(object): if g is None: continue g.persistable = True - if getattr(p, 'need_clip', True) is False or getattr( - p, 'regularizer', None) is not None: + if ( + getattr(p, 'need_clip', True) is False + or getattr(p, 'regularizer', None) is not None + ): warnings.warn( "flatten_param_grads=True will be discarded since paramter '{}''s need_clip is False or " - "the regularizer is set".format(p.name)) + "the regularizer is set".format(p.name) + ) self._flatten_param_grads = False return params_grads @@ -1040,7 +1186,8 @@ class Optimizer(object): persistable=True, dtype=need_flatten_params[0].dtype, shape=[np.sum(shape)], - belong_to_optimizer=True) + belong_to_optimizer=True, + ) flatten_param.trainable = True flatten_param.optimize_attr = need_flatten_params[0].optimize_attr @@ -1051,41 +1198,48 @@ class Optimizer(object): persistable=True, dtype=need_flatten_grads[0].dtype, shape=[np.sum(shape)], - belong_to_optimizer=True) + belong_to_optimizer=True, + ) with program_guard(default_main_program()): - block.append_op(type="coalesce_tensor", - inputs={"Input": need_flatten_params}, - outputs={ - "Output": need_flatten_params, - "FusedOutput": flatten_param - }, - attrs={ - "copy_data": True, - "use_align": True, - "align_size": self._align_size, - "dtype": need_flatten_params[0].dtype - }) - - block.append_op(type="coalesce_tensor", - inputs={"Input": need_flatten_grads}, - outputs={ - "Output": need_flatten_grads, - "FusedOutput": flatten_grad - }, - attrs={ - "copy_data": True, - "use_align": True, - "align_size": self._align_size, - "dtype": need_flatten_grads[0].dtype - }) + block.append_op( + type="coalesce_tensor", + inputs={"Input": need_flatten_params}, + outputs={ + "Output": need_flatten_params, + "FusedOutput": flatten_param, + }, + attrs={ + "copy_data": True, + "use_align": True, + "align_size": self._align_size, + "dtype": need_flatten_params[0].dtype, + }, + ) + + block.append_op( + type="coalesce_tensor", + inputs={"Input": need_flatten_grads}, + outputs={ + "Output": need_flatten_grads, + "FusedOutput": flatten_grad, + }, + attrs={ + "copy_data": True, + "use_align": True, + "align_size": self._align_size, + "dtype": need_flatten_grads[0].dtype, + }, + ) - #NOTE(zhiqiu): the initializer should be set after coalesce_tensor op, + # NOTE(zhiqiu): the initializer should be set after coalesce_tensor op, # so the shape of flatten_param and flatten_grad will be inferred. - self.helper.set_variable_initializer(flatten_param, - initializer=Constant(0.0)) - self.helper.set_variable_initializer(flatten_grad, - initializer=Constant(0.0)) + self.helper.set_variable_initializer( + flatten_param, initializer=Constant(0.0) + ) + self.helper.set_variable_initializer( + flatten_grad, initializer=Constant(0.0) + ) return [(flatten_param, flatten_grad)] @@ -1115,8 +1269,9 @@ class Optimizer(object): # NOTE(zhiqiu): currently, only support ClipGradByGlobalNorm and without regularization. if self._flatten_param_grads and self.regularization is None: - if self._grad_clip == None or isinstance(self._grad_clip, - ClipGradByGlobalNorm): + if self._grad_clip == None or isinstance( + self._grad_clip, ClipGradByGlobalNorm + ): params_grads = self.flatten_param_grads(params_grads) # 'optimizer(grad_clip)' or 'set_gradient_clip' @@ -1126,8 +1281,9 @@ class Optimizer(object): params_grads = append_gradient_clip_ops(params_grads) # Add regularization if any - params_grads = self.append_regularization_ops(params_grads, - self.regularization) + params_grads = self.append_regularization_ops( + params_grads, self.regularization + ) optimize_ops = self._create_optimization_pass(params_grads) return optimize_ops @@ -1145,12 +1301,15 @@ class Optimizer(object): list: A list of operators appended to the current program. """ if framework._non_static_mode(): - with program_guard(framework.default_main_program(), - framework.default_startup_program()): + with program_guard( + framework.default_main_program(), + framework.default_startup_program(), + ): if self._grad_clip is not None: params_grads = self._grad_clip(params_grads) params_grads = self.append_regularization_ops( - params_grads, self.regularization) + params_grads, self.regularization + ) optimize_ops = self._create_optimization_pass(params_grads) else: program = loss.block.program @@ -1162,7 +1321,8 @@ class Optimizer(object): no_grad_set = _get_no_grad_set_name(no_grad_set) parameters = loss.block.program.global_block().all_parameters() param_no_trainable = set( - [param.name for param in parameters if param.trainable is False]) + [param.name for param in parameters if param.trainable is False] + ) # If the parameter is no trainable, it should not have a gradient. no_grad_set.update(param_no_trainable) @@ -1202,11 +1362,9 @@ class Optimizer(object): p.clear_gradient() @imperative_base.no_grad - def minimize(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None): + def minimize( + self, loss, startup_program=None, parameter_list=None, no_grad_set=None + ): """ Add operations to minimize ``loss`` by updating ``parameter_list``. @@ -1234,17 +1392,20 @@ class Optimizer(object): """ assert isinstance(loss, Variable), "The loss should be an Variable." - parameter_list = parameter_list if parameter_list \ - else self._parameter_list + parameter_list = ( + parameter_list if parameter_list else self._parameter_list + ) - params_grads = self.backward(loss, - startup_program=startup_program, - parameter_list=parameter_list, - no_grad_set=no_grad_set) + params_grads = self.backward( + loss, + startup_program=startup_program, + parameter_list=parameter_list, + no_grad_set=no_grad_set, + ) - optimize_ops = self.apply_optimize(loss, - startup_program=startup_program, - params_grads=params_grads) + optimize_ops = self.apply_optimize( + loss, startup_program=startup_program, params_grads=params_grads + ) return optimize_ops, params_grads @@ -1305,19 +1466,23 @@ class SGDOptimizer(Optimizer): """ - def __init__(self, - learning_rate, - parameter_list=None, - regularization=None, - grad_clip=None, - multi_precision=False, - name=None): + def __init__( + self, + learning_rate, + parameter_list=None, + regularization=None, + grad_clip=None, + multi_precision=False, + name=None, + ): assert learning_rate is not None - super(SGDOptimizer, self).__init__(learning_rate=learning_rate, - parameter_list=parameter_list, - regularization=regularization, - grad_clip=grad_clip, - name=name) + super(SGDOptimizer, self).__init__( + learning_rate=learning_rate, + parameter_list=parameter_list, + regularization=regularization, + grad_clip=grad_clip, + name=name, + ) self.type = "sgd" self._use_mkldnn = False self._multi_precision = multi_precision @@ -1331,19 +1496,23 @@ class SGDOptimizer(Optimizer): var_name = param.name + "_fp32_master" var_name = unique_name.generate(var_name) - var = layers.create_global_var(name=var_name, - shape=param.shape, - value=0, - dtype='float32', - persistable=True) + var = layers.create_global_var( + name=var_name, + shape=param.shape, + value=0, + dtype='float32', + persistable=True, + ) block = self.helper.startup_program.global_block() - block.append_op(type="cast", - inputs={"X": [param]}, - outputs={"Out": [var]}, - attrs={ - "in_dtype": param.dtype, - "out_dtype": core.VarDesc.VarType.FP32 - }) + block.append_op( + type="cast", + inputs={"X": [param]}, + outputs={"Out": [var]}, + attrs={ + "in_dtype": param.dtype, + "out_dtype": core.VarDesc.VarType.FP32, + }, + ) self._master_weights[param.name] = var return var @@ -1357,7 +1526,10 @@ class SGDOptimizer(Optimizer): if self._multi_precision and p.dtype == core.VarDesc.VarType.FP16: master_p = self._create_master_weight(p) continue - if p.dtype == core.VarDesc.VarType.FP16 and not self._multi_precision: + if ( + p.dtype == core.VarDesc.VarType.FP16 + and not self._multi_precision + ): warnings.warn( "Accumulating with FP16 in optimizer can lead to poor accuracy or slow convergence." "Consider using multi_precision=True option of the Adam optimizer." @@ -1366,19 +1538,35 @@ class SGDOptimizer(Optimizer): @no_grad def _append_optimize_op(self, block, param_and_grad): - find_master = self._multi_precision and param_and_grad[ - 0].dtype == core.VarDesc.VarType.FP16 - master_weight = (self._master_weights[param_and_grad[0].name] - if find_master else None) + find_master = ( + self._multi_precision + and param_and_grad[0].dtype == core.VarDesc.VarType.FP16 + ) + master_weight = ( + self._master_weights[param_and_grad[0].name] + if find_master + else None + ) lr = self._create_param_lr(param_and_grad) if in_dygraph_mode(): - _C_ops.sgd_(param_and_grad[0], lr, param_and_grad[1], master_weight, - find_master) + _C_ops.sgd_( + param_and_grad[0], + lr, + param_and_grad[1], + master_weight, + find_master, + ) return None if _in_legacy_dygraph(): - _legacy_C_ops.sgd(param_and_grad[0], lr, param_and_grad[1], - master_weight, param_and_grad[0], master_weight) + _legacy_C_ops.sgd( + param_and_grad[0], + lr, + param_and_grad[1], + master_weight, + param_and_grad[0], + master_weight, + ) return None assert isinstance(block, framework.Block) @@ -1386,7 +1574,7 @@ class SGDOptimizer(Optimizer): inputs = { "Param": param_and_grad[0], "Grad": param_and_grad[1], - "LearningRate": lr + "LearningRate": lr, } outputs = {"ParamOut": param_and_grad[0]} @@ -1397,11 +1585,13 @@ class SGDOptimizer(Optimizer): inputs["MasterParam"] = master_weight outputs["MasterParamOut"] = master_weight - sgd_op = block.append_op(type=self.type, - inputs=inputs, - outputs=outputs, - attrs=attrs, - stop_gradient=True) + sgd_op = block.append_op( + type=self.type, + inputs=inputs, + outputs=outputs, + attrs=attrs, + stop_gradient=True, + ) return sgd_op @@ -1478,21 +1668,25 @@ class MomentumOptimizer(Optimizer): """ _velocity_acc_str = "velocity" - def __init__(self, - learning_rate, - momentum, - parameter_list=None, - use_nesterov=False, - regularization=None, - grad_clip=None, - name=None): + def __init__( + self, + learning_rate, + momentum, + parameter_list=None, + use_nesterov=False, + regularization=None, + grad_clip=None, + name=None, + ): assert learning_rate is not None assert momentum is not None - super(MomentumOptimizer, self).__init__(learning_rate=learning_rate, - parameter_list=parameter_list, - regularization=regularization, - grad_clip=grad_clip, - name=name) + super(MomentumOptimizer, self).__init__( + learning_rate=learning_rate, + parameter_list=parameter_list, + regularization=regularization, + grad_clip=grad_clip, + name=name, + ) self.type = "momentum" self._momentum = momentum self._use_nesterov = bool(use_nesterov) @@ -1506,15 +1700,26 @@ class MomentumOptimizer(Optimizer): def _append_optimize_op(self, block, param_and_grad): assert isinstance(block, framework.Block) - velocity_acc = self._get_accumulator(self._velocity_acc_str, - param_and_grad[0]) + velocity_acc = self._get_accumulator( + self._velocity_acc_str, param_and_grad[0] + ) lr = self._create_param_lr(param_and_grad) master_weight = None if framework._non_static_mode(): _, _, _ = _legacy_C_ops.momentum( - param_and_grad[0], param_and_grad[1], velocity_acc, lr, - master_weight, param_and_grad[0], velocity_acc, master_weight, - 'mu', self._momentum, 'use_nesterov', self._use_nesterov) + param_and_grad[0], + param_and_grad[1], + velocity_acc, + lr, + master_weight, + param_and_grad[0], + velocity_acc, + master_weight, + 'mu', + self._momentum, + 'use_nesterov', + self._use_nesterov, + ) return None attrs = {"mu": self._momentum, "use_nesterov": self._use_nesterov} @@ -1522,19 +1727,21 @@ class MomentumOptimizer(Optimizer): "Param": [param_and_grad[0]], "Grad": [param_and_grad[1]], "Velocity": [velocity_acc], - "LearningRate": [lr] + "LearningRate": [lr], } outputs = { "ParamOut": [param_and_grad[0]], - "VelocityOut": [velocity_acc] + "VelocityOut": [velocity_acc], } # create the momentum optimize op - momentum_op = block.append_op(type=self.type, - inputs=inputs, - outputs=outputs, - attrs=attrs, - stop_gradient=True) + momentum_op = block.append_op( + type=self.type, + inputs=inputs, + outputs=outputs, + attrs=attrs, + stop_gradient=True, + ) return momentum_op @@ -1607,32 +1814,36 @@ class DGCMomentumOptimizer(Optimizer): _u_velocity_acc_str = "_dgc_u_" _v_velocity_acc_str = "_dgc_v_" - def __init__(self, - learning_rate, - momentum, - rampup_begin_step, - rampup_step=1, - sparsity=[0.999], - parameter_list=None, - use_nesterov=False, - num_trainers=None, - regularization=None, - grad_clip=None, - name=None): + def __init__( + self, + learning_rate, + momentum, + rampup_begin_step, + rampup_step=1, + sparsity=[0.999], + parameter_list=None, + use_nesterov=False, + num_trainers=None, + regularization=None, + grad_clip=None, + name=None, + ): if framework._non_static_mode(): raise Exception("In dygraph, don't support DGCMomentumOptimizer.") - assert core.is_compiled_with_cuda(), \ - "Paddle is not compiled with CUDA. DGC is only support GPU for now." + assert ( + core.is_compiled_with_cuda() + ), "Paddle is not compiled with CUDA. DGC is only support GPU for now." assert learning_rate is not None assert momentum is not None - super(DGCMomentumOptimizer, - self).__init__(learning_rate=learning_rate, - parameter_list=parameter_list, - regularization=regularization, - grad_clip=grad_clip, - name=name) + super(DGCMomentumOptimizer, self).__init__( + learning_rate=learning_rate, + parameter_list=parameter_list, + regularization=regularization, + grad_clip=grad_clip, + name=name, + ) self.type = "dgc_momentum" self._momentum = momentum self._use_nesterov = bool(use_nesterov) @@ -1651,17 +1862,20 @@ class DGCMomentumOptimizer(Optimizer): raise TypeError( "The type of grad_clip should be 'GradientClipByNorm', because DGCMomentumOptimizer only support GradientClipByNorm" ) - assert isinstance( - num_trainers, int - ), "The type of num_trainers should be 'int', but received %s" % type( - num_trainers) - assert num_trainers > 0, "The value of num_trainers should be greater than 0!" + assert isinstance(num_trainers, int), ( + "The type of num_trainers should be 'int', but received %s" + % type(num_trainers) + ) + assert ( + num_trainers > 0 + ), "The value of num_trainers should be greater than 0!" self._num_trainers = num_trainers self._dgc_clip_norm = grad_clip.clip_norm * (num_trainers**-0.5) self.regular_type, self.regular_coeff = self._get_regularization_param( - self.regularization) + self.regularization + ) def _get_regularization_param(self, regularization): regular_type = 0 @@ -1670,6 +1884,7 @@ class DGCMomentumOptimizer(Optimizer): if regularization is not None: regular_coeff = regularization._regularization_coeff from .regularizer import L1Decay, L2Decay + if isinstance(regularization, L1Decay): regular_type = 1 elif isinstance(regularization, L2Decay): @@ -1680,17 +1895,20 @@ class DGCMomentumOptimizer(Optimizer): def _is_use_dgc(self, param_var, grad_var): var_numel = abs(reduce(lambda x, y: x * y, param_var.shape)) - if var_numel < 16384 or \ - param_var.type == core.VarDesc.VarType.SELECTED_ROWS or \ - grad_var.type == core.VarDesc.VarType.SELECTED_ROWS or \ - param_var.dtype != core.VarDesc.VarType.FP32 : + if ( + var_numel < 16384 + or param_var.type == core.VarDesc.VarType.SELECTED_ROWS + or grad_var.type == core.VarDesc.VarType.SELECTED_ROWS + or param_var.dtype != core.VarDesc.VarType.FP32 + ): return False return True def _append_optimize_op(self, block, param_and_grad): assert isinstance(block, framework.Block) - velocity_acc = self._get_accumulator(self._u_velocity_acc_str, - param_and_grad[0]) + velocity_acc = self._get_accumulator( + self._u_velocity_acc_str, param_and_grad[0] + ) assert velocity_acc is not None inputs = { @@ -1709,36 +1927,42 @@ class DGCMomentumOptimizer(Optimizer): type = "momentum" else: type = "dgc_momentum" - inputs.update({ - "current_step": self._global_step_var, - "nranks": self._nranks_var - }) + inputs.update( + { + "current_step": self._global_step_var, + "nranks": self._nranks_var, + } + ) outputs.update({'Grad_out': param_and_grad[1]}) attrs.update({"rampup_begin_step": float(self._rampup_begin_step)}) # create the dgc momentum optimize op - dgc_momentum_op = block.append_op(type=type, - inputs=inputs, - outputs=outputs, - attrs=attrs, - stop_gradient=True) + dgc_momentum_op = block.append_op( + type=type, + inputs=inputs, + outputs=outputs, + attrs=attrs, + stop_gradient=True, + ) return dgc_momentum_op def _add_auto_increment_var(self, counter_name, begin, step=1): helper = LayerHelper('global_step_counter') counter, is_new_var = helper.create_or_get_global_variable( - name=counter_name, dtype='float32', shape=[1], persistable=True) + name=counter_name, dtype='float32', shape=[1], persistable=True + ) if is_new_var: - helper.set_variable_initializer(counter, - initializer=Constant( - value=float(begin - 1), - force_cpu=True)) + helper.set_variable_initializer( + counter, + initializer=Constant(value=float(begin - 1), force_cpu=True), + ) helper.main_program.global_block()._prepend_op( type='increment', inputs={'X': [counter]}, outputs={'Out': [counter]}, attrs={'step': float(step)}, - stop_gradient=True) + stop_gradient=True, + ) counter.stop_gradient = True return counter @@ -1746,12 +1970,13 @@ class DGCMomentumOptimizer(Optimizer): def _add_nranks_var(self, name, value=-1): helper = LayerHelper('global_step_counter') counter, is_new_var = helper.create_or_get_global_variable( - name=name, dtype='float32', shape=[1], persistable=True) + name=name, dtype='float32', shape=[1], persistable=True + ) if is_new_var: - helper.set_variable_initializer(counter, - initializer=Constant( - value=float(value), - force_cpu=True)) + helper.set_variable_initializer( + counter, + initializer=Constant(value=float(value), force_cpu=True), + ) counter.stop_gradient = True return counter @@ -1762,10 +1987,12 @@ class DGCMomentumOptimizer(Optimizer): # step counter self._global_step_var = self._add_auto_increment_var( - counter_name=core.dgc.kDGCCounterName(), begin=0) + counter_name=core.dgc.kDGCCounterName(), begin=0 + ) - self._nranks_var = self._add_nranks_var(name=core.dgc.kDGCNRanksName(), - value=-1) + self._nranks_var = self._add_nranks_var( + name=core.dgc.kDGCNRanksName(), value=-1 + ) # rampup begin step var for all_reduce_op_handle self._rampup_begin_step_var = tensor.create_global_var( @@ -1774,7 +2001,8 @@ class DGCMomentumOptimizer(Optimizer): persistable=True, name=core.dgc.kDGCRampUpBeginStepName(), value=self._rampup_begin_step * 1.0, - force_cpu=True) + force_cpu=True, + ) self.helper = LayerHelper(self.__class__.__name__) @@ -1787,29 +2015,32 @@ class DGCMomentumOptimizer(Optimizer): v_var = self._add_accumulator(self._v_velocity_acc_str, param_var) - k_var = tensor.create_global_var(shape=[1], - dtype=param_var.dtype, - persistable=True, - name=param_var.name + - core.dgc.kDGCKName(), - value=0.0, - force_cpu=True) - - encoded_var = tensor.create_global_var(shape=[1], - dtype=param_var.dtype, - persistable=True, - name=param_var.name + - core.dgc.kDGCEncodedName(), - value=0.0, - force_cpu=False) - - gather_var = tensor.create_global_var(shape=[1], - dtype=param_var.dtype, - persistable=True, - name=param_var.name + - core.dgc.kDGCGatherName(), - value=0.0, - force_cpu=False) + k_var = tensor.create_global_var( + shape=[1], + dtype=param_var.dtype, + persistable=True, + name=param_var.name + core.dgc.kDGCKName(), + value=0.0, + force_cpu=True, + ) + + encoded_var = tensor.create_global_var( + shape=[1], + dtype=param_var.dtype, + persistable=True, + name=param_var.name + core.dgc.kDGCEncodedName(), + value=0.0, + force_cpu=False, + ) + + gather_var = tensor.create_global_var( + shape=[1], + dtype=param_var.dtype, + persistable=True, + name=param_var.name + core.dgc.kDGCGatherName(), + value=0.0, + force_cpu=False, + ) # del back oprolevarname op_maker = core.op_proto_and_checker_maker @@ -1832,14 +2063,23 @@ class DGCMomentumOptimizer(Optimizer): clip_var = grad_var if self._dgc_clip_norm is not None: clip_var = self._append_clip_norm(grad_var, self._dgc_clip_norm) - self._dgc_op(param_var, clip_var, grad_var, u_var, v_var, k_var, - encoded_var, gather_var) + self._dgc_op( + param_var, + clip_var, + grad_var, + u_var, + v_var, + k_var, + encoded_var, + gather_var, + ) def _is_the_backward_op(self, op): op_maker = core.op_proto_and_checker_maker backward = core.op_proto_and_checker_maker.OpRole.Backward - if op_maker.kOpRoleVarAttrName() in op.attr_names and \ - int(op.all_attrs()[op_maker.kOpRoleAttrName()]) == int(backward): + if op_maker.kOpRoleVarAttrName() in op.attr_names and int( + op.all_attrs()[op_maker.kOpRoleAttrName()] + ) == int(backward): return True return False @@ -1849,34 +2089,42 @@ class DGCMomentumOptimizer(Optimizer): helper = LayerHelper("dgc_clip_by_norm_op", **args) if name is None: - name = unique_name.generate_with_ignorable_key(".".join( - [helper.name, 'tmp'])) - - out = helper.create_variable(type=x.type, - name=name, - dtype=x.dtype, - persistable=False) - - helper.append_op(type="dgc_clip_by_norm", - inputs={ - "X": x, - "current_step": self._global_step_var - }, - attrs={ - "max_norm": max_norm, - "rampup_begin_step": float(self._rampup_begin_step) - }, - outputs={"Out": out}) + name = unique_name.generate_with_ignorable_key( + ".".join([helper.name, 'tmp']) + ) + + out = helper.create_variable( + type=x.type, name=name, dtype=x.dtype, persistable=False + ) + + helper.append_op( + type="dgc_clip_by_norm", + inputs={"X": x, "current_step": self._global_step_var}, + attrs={ + "max_norm": max_norm, + "rampup_begin_step": float(self._rampup_begin_step), + }, + outputs={"Out": out}, + ) return out def _append_clip_norm(self, grad_var, clip_norm): with grad_var.block.program._backward_role_guard(): - return self._clip_by_norm(x=grad_var, - max_norm=clip_norm, - name=grad_var.name) + return self._clip_by_norm( + x=grad_var, max_norm=clip_norm, name=grad_var.name + ) - def _dgc_op(self, param_var, clip_var, grad_var, u_var, v_var, k_var, - encoded_var, gather_var): + def _dgc_op( + self, + param_var, + clip_var, + grad_var, + u_var, + v_var, + k_var, + encoded_var, + gather_var, + ): block = framework.default_main_program().global_block() op_maker = core.op_proto_and_checker_maker @@ -1885,47 +2133,44 @@ class DGCMomentumOptimizer(Optimizer): # The regularizer of the Parameters have higher priority if param_var.regularizer is not None: regular_type, regular_coeff = self._get_regularization_param( - param_var.regularizer) - - dgc_op = block.append_op(type="dgc", - inputs={ - "U": u_var, - "V": v_var, - "Grad": clip_var, - "Param": param_var, - "current_step": self._global_step_var, - "nranks": self._nranks_var, - }, - outputs={ - "U_out": u_var, - "V_out": v_var, - "EncodeGrad": encoded_var, - "k": k_var, - "Grad_out": grad_var, - "GatherBuff": gather_var, - }, - attrs={ - "m": - self._momentum, - "sparsity": - self._sparsity, - "use_nesterov": - self._use_nesterov, - "rampup_begin_step": - float(self._rampup_begin_step), - "rampup_step": - float(self._rampup_step), - "regular_coeff": - float(regular_coeff), - "regular_type": - int(regular_type), - }, - stop_gradient=True) + param_var.regularizer + ) + + dgc_op = block.append_op( + type="dgc", + inputs={ + "U": u_var, + "V": v_var, + "Grad": clip_var, + "Param": param_var, + "current_step": self._global_step_var, + "nranks": self._nranks_var, + }, + outputs={ + "U_out": u_var, + "V_out": v_var, + "EncodeGrad": encoded_var, + "k": k_var, + "Grad_out": grad_var, + "GatherBuff": gather_var, + }, + attrs={ + "m": self._momentum, + "sparsity": self._sparsity, + "use_nesterov": self._use_nesterov, + "rampup_begin_step": float(self._rampup_begin_step), + "rampup_step": float(self._rampup_step), + "regular_coeff": float(regular_coeff), + "regular_type": int(regular_type), + }, + stop_gradient=True, + ) backward = op_maker.OpRole.Backward dgc_op._set_attr(op_maker.kOpRoleAttrName(), backward) - dgc_op._set_attr(op_maker.kOpRoleVarAttrName(), - [param_var.name, grad_var.name]) + dgc_op._set_attr( + op_maker.kOpRoleVarAttrName(), [param_var.name, grad_var.name] + ) @imperative_base.no_grad def apply_gradients(self, params_grads): @@ -1935,8 +2180,11 @@ class DGCMomentumOptimizer(Optimizer): self._append_dgc_ops(params_grads) params_grads = sorted(params_grads, key=lambda x: x[0].name) - params_grads, table_param_and_grad, table_optimize_op = \ - self._process_distribute_lookuptable(params_grads) + ( + params_grads, + table_param_and_grad, + table_optimize_op, + ) = self._process_distribute_lookuptable(params_grads) not_dgc_params_grads = [] dgc_params_grads = [] @@ -1952,10 +2200,12 @@ class DGCMomentumOptimizer(Optimizer): not_dgc_params_grads = self._grad_clip(not_dgc_params_grads) else: not_dgc_params_grads = append_gradient_clip_ops( - not_dgc_params_grads) + not_dgc_params_grads + ) not_dgc_params_grads = self.append_regularization_ops( - not_dgc_params_grads, self.regularization) + not_dgc_params_grads, self.regularization + ) params_grads = not_dgc_params_grads + dgc_params_grads params_grads = sorted(params_grads, key=lambda x: x[0].name) @@ -2031,27 +2281,30 @@ class LarsMomentumOptimizer(Optimizer): """ _velocity_acc_str = "velocity" - def __init__(self, - learning_rate, - momentum, - lars_coeff=0.001, - lars_weight_decay=0.0005, - parameter_list=None, - regularization=None, - grad_clip=None, - name=None, - exclude_from_weight_decay=None, - epsilon=0, - multi_precision=False, - rescale_grad=1.0): + def __init__( + self, + learning_rate, + momentum, + lars_coeff=0.001, + lars_weight_decay=0.0005, + parameter_list=None, + regularization=None, + grad_clip=None, + name=None, + exclude_from_weight_decay=None, + epsilon=0, + multi_precision=False, + rescale_grad=1.0, + ): assert learning_rate is not None assert momentum is not None - super(LarsMomentumOptimizer, - self).__init__(learning_rate=learning_rate, - parameter_list=parameter_list, - regularization=regularization, - grad_clip=grad_clip, - name=name) + super(LarsMomentumOptimizer, self).__init__( + learning_rate=learning_rate, + parameter_list=parameter_list, + regularization=regularization, + grad_clip=grad_clip, + name=name, + ) self.type = "lars_momentum" self._momentum = momentum self._lars_coeff = float(lars_coeff) @@ -2073,19 +2326,23 @@ class LarsMomentumOptimizer(Optimizer): var_name = param.name + '_fp32_master' var_name = unique_name.generate(var_name) - var = layers.create_global_var(name=var_name, - shape=param.shape, - value=0, - dtype='float32', - persistable=True) + var = layers.create_global_var( + name=var_name, + shape=param.shape, + value=0, + dtype='float32', + persistable=True, + ) block = self.helper.startup_program.global_block() - block.append_op(type="cast", - inputs={"X": [param]}, - outputs={"Out": [var]}, - attrs={ - "in_dtype": param.dtype, - "out_dtype": core.VarDesc.VarType.FP32 - }) + block.append_op( + type="cast", + inputs={"X": [param]}, + outputs={"Out": [var]}, + attrs={ + "in_dtype": param.dtype, + "out_dtype": core.VarDesc.VarType.FP32, + }, + ) self._master_weights[param.name] = var return var @@ -2099,15 +2356,22 @@ class LarsMomentumOptimizer(Optimizer): """ if self._name is not None: name = self._name + "_" + name - find_master = self._multi_precision and param.dtype == core.VarDesc.VarType.FP16 - target_param = self._master_weights[ - param.name] if find_master else param + find_master = ( + self._multi_precision and param.dtype == core.VarDesc.VarType.FP16 + ) + target_param = ( + self._master_weights[param.name] if find_master else param + ) target_name = target_param.name - if (name not in self._accumulators - or target_name not in self._accumulators[name]): + if ( + name not in self._accumulators + or target_name not in self._accumulators[name] + ): raise Exception( "Accumulator {} does not exist for parameter {}".format( - name, target_name)) + name, target_name + ) + ) return self._accumulators[name][target_name] def _create_accumulators(self, block, parameters): @@ -2118,7 +2382,10 @@ class LarsMomentumOptimizer(Optimizer): master_p = self._create_master_weight(p) self._add_accumulator(self._velocity_acc_str, master_p) continue - if p.dtype == core.VarDesc.VarType.FP16 and not self._multi_precision: + if ( + p.dtype == core.VarDesc.VarType.FP16 + and not self._multi_precision + ): warnings.warn( "Accumulating with FP16 in optimizer can lead to poor accuracy or slow convergence." "Consider using multi_precision=True option of the Lars optimizer." @@ -2135,14 +2402,20 @@ class LarsMomentumOptimizer(Optimizer): _lars_weight_decay = 0.0 break - velocity_acc = self._get_accumulator(self._velocity_acc_str, - param_and_grad[0]) + velocity_acc = self._get_accumulator( + self._velocity_acc_str, param_and_grad[0] + ) lr = self._create_param_lr(param_and_grad) - find_master = self._multi_precision and param_and_grad[ - 0].dtype == core.VarDesc.VarType.FP16 - master_weight = (self._master_weights[param_and_grad[0].name] - if find_master else None) + find_master = ( + self._multi_precision + and param_and_grad[0].dtype == core.VarDesc.VarType.FP16 + ) + master_weight = ( + self._master_weights[param_and_grad[0].name] + if find_master + else None + ) attrs = { "mu": self._momentum, @@ -2150,14 +2423,14 @@ class LarsMomentumOptimizer(Optimizer): "lars_weight_decay": [_lars_weight_decay], "multi_precision": find_master, "epsilon": self._epsilon, - "rescale_grad": self._rescale_grad + "rescale_grad": self._rescale_grad, } inputs = { "Param": param_and_grad[0], "Grad": param_and_grad[1], "Velocity": velocity_acc, - "LearningRate": lr + "LearningRate": lr, } outputs = {"ParamOut": param_and_grad[0], "VelocityOut": velocity_acc} @@ -2168,18 +2441,34 @@ class LarsMomentumOptimizer(Optimizer): if framework._non_static_mode(): tmp, tmp2 = _legacy_C_ops.lars_momentum( - [param_and_grad[0]], [param_and_grad[1]], [velocity_acc], [lr], - [param_and_grad[0]], [velocity_acc], "mu", self._momentum, - "lars_coeff", self._lars_coeff, "lars_weight_decay", - [_lars_weight_decay], "multi_precision", find_master, "epsilon", - self._epsilon, "rescale_grad", self._rescale_grad) + [param_and_grad[0]], + [param_and_grad[1]], + [velocity_acc], + [lr], + [param_and_grad[0]], + [velocity_acc], + "mu", + self._momentum, + "lars_coeff", + self._lars_coeff, + "lars_weight_decay", + [_lars_weight_decay], + "multi_precision", + find_master, + "epsilon", + self._epsilon, + "rescale_grad", + self._rescale_grad, + ) else: # create the momentum optimize op - momentum_op = block.append_op(type=self.type, - inputs=inputs, - outputs=outputs, - attrs=attrs, - stop_gradient=True) + momentum_op = block.append_op( + type=self.type, + inputs=inputs, + outputs=outputs, + attrs=attrs, + stop_gradient=True, + ) return momentum_op @@ -2249,21 +2538,25 @@ class AdagradOptimizer(Optimizer): """ _moment_acc_str = "moment" - def __init__(self, - learning_rate, - epsilon=1.0e-6, - parameter_list=None, - regularization=None, - grad_clip=None, - name=None, - initial_accumulator_value=0.0): + def __init__( + self, + learning_rate, + epsilon=1.0e-6, + parameter_list=None, + regularization=None, + grad_clip=None, + name=None, + initial_accumulator_value=0.0, + ): assert learning_rate is not None assert epsilon is not None - super(AdagradOptimizer, self).__init__(learning_rate=learning_rate, - parameter_list=parameter_list, - regularization=regularization, - grad_clip=grad_clip, - name=name) + super(AdagradOptimizer, self).__init__( + learning_rate=learning_rate, + parameter_list=parameter_list, + regularization=regularization, + grad_clip=grad_clip, + name=name, + ) self.type = "adagrad" self._epsilon = epsilon self.initial_accumulator_value = initial_accumulator_value @@ -2272,26 +2565,38 @@ class AdagradOptimizer(Optimizer): assert isinstance(block, framework.Block) for p in parameters: - self._add_accumulator(self._moment_acc_str, - p, - fill_value=self.initial_accumulator_value) + self._add_accumulator( + self._moment_acc_str, + p, + fill_value=self.initial_accumulator_value, + ) def _append_optimize_op(self, block, param_and_grad): assert isinstance(block, framework.Block) - moment_acc = self._get_accumulator(self._moment_acc_str, - param_and_grad[0]) + moment_acc = self._get_accumulator( + self._moment_acc_str, param_and_grad[0] + ) if in_dygraph_mode(): - _C_ops.adagrad_(param_and_grad[0], param_and_grad[1], moment_acc, - self._create_param_lr(param_and_grad), - self._epsilon) + _C_ops.adagrad_( + param_and_grad[0], + param_and_grad[1], + moment_acc, + self._create_param_lr(param_and_grad), + self._epsilon, + ) return None elif _in_legacy_dygraph(): - _legacy_C_ops.adagrad(param_and_grad[0], param_and_grad[1], - moment_acc, - self._create_param_lr(param_and_grad), - param_and_grad[0], moment_acc, "epsilon", - self._epsilon) + _legacy_C_ops.adagrad( + param_and_grad[0], + param_and_grad[1], + moment_acc, + self._create_param_lr(param_and_grad), + param_and_grad[0], + moment_acc, + "epsilon", + self._epsilon, + ) return None else: # Create the adagrad optimizer op @@ -2301,14 +2606,15 @@ class AdagradOptimizer(Optimizer): "Param": param_and_grad[0], "Grad": param_and_grad[1], "Moment": moment_acc, - "LearningRate": self._create_param_lr(param_and_grad) + "LearningRate": self._create_param_lr(param_and_grad), }, outputs={ "ParamOut": param_and_grad[0], - "MomentOut": moment_acc + "MomentOut": moment_acc, }, attrs={"epsilon": self._epsilon}, - stop_gradient=True) + stop_gradient=True, + ) return adagrad_op @@ -2476,31 +2782,34 @@ class AdamOptimizer(Optimizer): _beta1_pow_acc_str = "beta1_pow_acc" _beta2_pow_acc_str = "beta2_pow_acc" - def __init__(self, - learning_rate=0.001, - beta1=0.9, - beta2=0.999, - epsilon=1e-8, - parameter_list=None, - regularization=None, - grad_clip=None, - name=None, - lazy_mode=False, - use_global_beta_pow=False, - flatten_param_grads=False, - align_size=-1): + def __init__( + self, + learning_rate=0.001, + beta1=0.9, + beta2=0.999, + epsilon=1e-8, + parameter_list=None, + regularization=None, + grad_clip=None, + name=None, + lazy_mode=False, + use_global_beta_pow=False, + flatten_param_grads=False, + align_size=-1, + ): assert learning_rate is not None assert beta1 is not None assert beta2 is not None assert epsilon is not None - super(AdamOptimizer, - self).__init__(learning_rate=learning_rate, - parameter_list=parameter_list, - regularization=regularization, - grad_clip=grad_clip, - flatten_param_grads=flatten_param_grads, - align_size=align_size, - name=name) + super(AdamOptimizer, self).__init__( + learning_rate=learning_rate, + parameter_list=parameter_list, + regularization=regularization, + grad_clip=grad_clip, + flatten_param_grads=flatten_param_grads, + align_size=align_size, + name=name, + ) self.type = "adam" self._beta1 = beta1 self._beta2 = beta2 @@ -2519,65 +2828,109 @@ class AdamOptimizer(Optimizer): self._add_accumulator( name=self._beta1_pow_acc_str, param=p, - fill_value=0.9 if isinstance(self._beta1, Variable) \ - else self._beta1, + fill_value=0.9 + if isinstance(self._beta1, Variable) + else self._beta1, shape=[1], - type=core.VarDesc.VarType.LOD_TENSOR, device='cpu') + type=core.VarDesc.VarType.LOD_TENSOR, + device='cpu', + ) self._add_accumulator( name=self._beta2_pow_acc_str, param=p, - fill_value=0.999 if isinstance(self._beta2, Variable) \ - else self._beta2, + fill_value=0.999 + if isinstance(self._beta2, Variable) + else self._beta2, shape=[1], - type=core.VarDesc.VarType.LOD_TENSOR, device='cpu') + type=core.VarDesc.VarType.LOD_TENSOR, + device='cpu', + ) if self._use_global_beta_pow: self._add_global_accumulator( name=self._beta1_pow_acc_str, - fill_value=0.9 if isinstance(self._beta1, Variable) \ - else self._beta1, + fill_value=0.9 + if isinstance(self._beta1, Variable) + else self._beta1, shape=[1], - type=core.VarDesc.VarType.LOD_TENSOR, device='cpu') + type=core.VarDesc.VarType.LOD_TENSOR, + device='cpu', + ) self._add_global_accumulator( name=self._beta2_pow_acc_str, - fill_value=0.999 if isinstance(self._beta2, Variable) \ - else self._beta2, + fill_value=0.999 + if isinstance(self._beta2, Variable) + else self._beta2, shape=[1], - type=core.VarDesc.VarType.LOD_TENSOR, device='cpu') + type=core.VarDesc.VarType.LOD_TENSOR, + device='cpu', + ) def _append_optimize_op(self, block, param_and_grad): assert isinstance(block, framework.Block) - moment1 = self._get_accumulator(self._moment1_acc_str, - param_and_grad[0]) - moment2 = self._get_accumulator(self._moment2_acc_str, - param_and_grad[0]) + moment1 = self._get_accumulator( + self._moment1_acc_str, param_and_grad[0] + ) + moment2 = self._get_accumulator( + self._moment2_acc_str, param_and_grad[0] + ) if self._use_global_beta_pow: beta1_pow_acc = self._get_global_accumulator( - self._beta1_pow_acc_str) + self._beta1_pow_acc_str + ) beta2_pow_acc = self._get_global_accumulator( - self._beta2_pow_acc_str) + self._beta2_pow_acc_str + ) else: - beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str, - param_and_grad[0]) - beta2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str, - param_and_grad[0]) + beta1_pow_acc = self._get_accumulator( + self._beta1_pow_acc_str, param_and_grad[0] + ) + beta2_pow_acc = self._get_accumulator( + self._beta2_pow_acc_str, param_and_grad[0] + ) lr = self._create_param_lr(param_and_grad) # create the adam optimize op if framework._non_static_mode(): - _beta1 = self._beta1 if not isinstance( - self._beta1, Variable) else self._beta1.numpy().item(0) - _beta2 = self._beta2 if not isinstance( - self._beta2, Variable) else self._beta2.numpy().item(0) + _beta1 = ( + self._beta1 + if not isinstance(self._beta1, Variable) + else self._beta1.numpy().item(0) + ) + _beta2 = ( + self._beta2 + if not isinstance(self._beta2, Variable) + else self._beta2.numpy().item(0) + ) master_weight = None _, _, _, _, _, _ = _legacy_C_ops.adam( - param_and_grad[0], param_and_grad[1], lr, moment1, moment2, - beta1_pow_acc, beta2_pow_acc, master_weight, param_and_grad[0], - moment1, moment2, beta1_pow_acc, beta2_pow_acc, master_weight, - 'epsilon', self._epsilon, 'lazy_mode', self._lazy_mode, - 'min_row_size_to_use_multithread', 1000, 'beta1', _beta1, - 'beta2', _beta2, 'use_global_beta_pow', - self._use_global_beta_pow) + param_and_grad[0], + param_and_grad[1], + lr, + moment1, + moment2, + beta1_pow_acc, + beta2_pow_acc, + master_weight, + param_and_grad[0], + moment1, + moment2, + beta1_pow_acc, + beta2_pow_acc, + master_weight, + 'epsilon', + self._epsilon, + 'lazy_mode', + self._lazy_mode, + 'min_row_size_to_use_multithread', + 1000, + 'beta1', + _beta1, + 'beta2', + _beta2, + 'use_global_beta_pow', + self._use_global_beta_pow, + ) return None @@ -2588,7 +2941,7 @@ class AdamOptimizer(Optimizer): "Moment1": [moment1], "Moment2": [moment2], "Beta1Pow": [beta1_pow_acc], - "Beta2Pow": [beta2_pow_acc] + "Beta2Pow": [beta2_pow_acc], } # Pass found_inf to adam, to skip update for not only param, but also momentum and beta_pow @@ -2607,7 +2960,7 @@ class AdamOptimizer(Optimizer): attrs = { "lazy_mode": self._lazy_mode, "min_row_size_to_use_multithread": 1000, - 'use_global_beta_pow': self._use_global_beta_pow + 'use_global_beta_pow': self._use_global_beta_pow, } if isinstance(self._beta1, Variable): @@ -2623,23 +2976,26 @@ class AdamOptimizer(Optimizer): else: attrs['epsilon'] = self._epsilon - adam_op = block.append_op(type=self.type, - inputs=inputs, - outputs=outputs, - attrs=attrs, - stop_gradient=True) + adam_op = block.append_op( + type=self.type, + inputs=inputs, + outputs=outputs, + attrs=attrs, + stop_gradient=True, + ) return adam_op def _finish_update(self, block, parameters_and_grads): - r"""Update beta1_pow and beta2_pow accumulator - """ + r"""Update beta1_pow and beta2_pow accumulator""" assert isinstance(block, framework.Block) if self._use_global_beta_pow: beta1_pow_acc = self._get_global_accumulator( - self._beta1_pow_acc_str) + self._beta1_pow_acc_str + ) beta2_pow_acc = self._get_global_accumulator( - self._beta2_pow_acc_str) + self._beta2_pow_acc_str + ) with block.program._optimized_guard([]): inputs = {"X": beta1_pow_acc} @@ -2648,18 +3004,22 @@ class AdamOptimizer(Optimizer): if isinstance(self._beta1, Variable): inputs["Y"] = self._beta1 # use elementwise_mul for better performance - block.append_op(type="elementwise_mul", - inputs=inputs, - outputs=outputs, - attrs=attrs, - stop_gradient=True) + block.append_op( + type="elementwise_mul", + inputs=inputs, + outputs=outputs, + attrs=attrs, + stop_gradient=True, + ) else: attrs['scale'] = self._beta1 - block.append_op(type="scale", - inputs=inputs, - outputs=outputs, - attrs=attrs, - stop_gradient=True) + block.append_op( + type="scale", + inputs=inputs, + outputs=outputs, + attrs=attrs, + stop_gradient=True, + ) inputs = {"X": beta2_pow_acc} outputs = {"Out": beta2_pow_acc} @@ -2667,18 +3027,22 @@ class AdamOptimizer(Optimizer): if isinstance(self._beta2, Variable): inputs["Y"] = self._beta2 # use elementwise_mul for better performance - block.append_op(type="elementwise_mul", - inputs=inputs, - outputs=outputs, - attrs=attrs, - stop_gradient=True) + block.append_op( + type="elementwise_mul", + inputs=inputs, + outputs=outputs, + attrs=attrs, + stop_gradient=True, + ) else: attrs['scale'] = self._beta2 - block.append_op(type="scale", - inputs=inputs, - outputs=outputs, - attrs=attrs, - stop_gradient=True) + block.append_op( + type="scale", + inputs=inputs, + outputs=outputs, + attrs=attrs, + stop_gradient=True, + ) class AdamaxOptimizer(Optimizer): @@ -2766,24 +3130,28 @@ class AdamaxOptimizer(Optimizer): _inf_norm_acc_str = "inf_norm" _beta1_pow_acc_str = "beta1_pow_acc" - def __init__(self, - learning_rate=0.001, - beta1=0.9, - beta2=0.999, - epsilon=1e-8, - parameter_list=None, - regularization=None, - grad_clip=None, - name=None): + def __init__( + self, + learning_rate=0.001, + beta1=0.9, + beta2=0.999, + epsilon=1e-8, + parameter_list=None, + regularization=None, + grad_clip=None, + name=None, + ): assert learning_rate is not None assert beta1 is not None assert beta2 is not None assert epsilon is not None - super(AdamaxOptimizer, self).__init__(learning_rate=learning_rate, - parameter_list=parameter_list, - regularization=regularization, - grad_clip=grad_clip, - name=name) + super(AdamaxOptimizer, self).__init__( + learning_rate=learning_rate, + parameter_list=parameter_list, + regularization=regularization, + grad_clip=grad_clip, + name=name, + ) self.type = "adamax" self._beta1 = beta1 self._beta2 = beta2 @@ -2794,31 +3162,54 @@ class AdamaxOptimizer(Optimizer): for p in parameters: self._add_accumulator(self._moment_acc_str, p) self._add_accumulator(self._inf_norm_acc_str, p) - self._add_accumulator(name=self._beta1_pow_acc_str, - param=p, - fill_value=self._beta1, - shape=[1]) + self._add_accumulator( + name=self._beta1_pow_acc_str, + param=p, + fill_value=self._beta1, + shape=[1], + ) def _append_optimize_op(self, block, param_and_grad): assert isinstance(block, framework.Block) moment = self._get_accumulator(self._moment_acc_str, param_and_grad[0]) - inf_norm = self._get_accumulator(self._inf_norm_acc_str, - param_and_grad[0]) - beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str, - param_and_grad[0]) + inf_norm = self._get_accumulator( + self._inf_norm_acc_str, param_and_grad[0] + ) + beta1_pow_acc = self._get_accumulator( + self._beta1_pow_acc_str, param_and_grad[0] + ) if framework.in_dygraph_mode(): - _C_ops.adamax_(param_and_grad[0], param_and_grad[1], - self._create_param_lr(param_and_grad), moment, - inf_norm, beta1_pow_acc, self._beta1, self._beta2, - self._epsilon) + _C_ops.adamax_( + param_and_grad[0], + param_and_grad[1], + self._create_param_lr(param_and_grad), + moment, + inf_norm, + beta1_pow_acc, + self._beta1, + self._beta2, + self._epsilon, + ) elif framework._in_legacy_dygraph(): - _legacy_C_ops.adamax(param_and_grad[0], param_and_grad[1], - self._create_param_lr(param_and_grad), moment, - inf_norm, beta1_pow_acc, param_and_grad[0], - moment, inf_norm, "beta1", self._beta1, - "beta2", self._beta2, "epsilon", self._epsilon) + _legacy_C_ops.adamax( + param_and_grad[0], + param_and_grad[1], + self._create_param_lr(param_and_grad), + moment, + inf_norm, + beta1_pow_acc, + param_and_grad[0], + moment, + inf_norm, + "beta1", + self._beta1, + "beta2", + self._beta2, + "epsilon", + self._epsilon, + ) else: # create the adamax optimize op adamax_op = block.append_op( @@ -2829,47 +3220,53 @@ class AdamaxOptimizer(Optimizer): "LearningRate": self._create_param_lr(param_and_grad), "Moment": moment, "InfNorm": inf_norm, - "Beta1Pow": beta1_pow_acc + "Beta1Pow": beta1_pow_acc, }, outputs={ "ParamOut": param_and_grad[0], "MomentOut": moment, - "InfNormOut": inf_norm + "InfNormOut": inf_norm, }, attrs={ "beta1": self._beta1, "beta2": self._beta2, - "epsilon": self._epsilon + "epsilon": self._epsilon, }, - stop_gradient=True) + stop_gradient=True, + ) return adamax_op def _finish_update(self, block, parameters_and_grads): - """Update Beta1 Power accumulator - """ + """Update Beta1 Power accumulator""" assert isinstance(block, framework.Block) for param, grad in parameters_and_grads: if grad is None or param.trainable is False: continue - with param.block.program._optimized_guard([param, grad - ]), name_scope('adamx'): - beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str, - param) + with param.block.program._optimized_guard( + [param, grad] + ), name_scope('adamx'): + beta1_pow_acc = self._get_accumulator( + self._beta1_pow_acc_str, param + ) if framework._non_static_mode(): if framework.in_dygraph_mode(): - tmp = _C_ops.scale(beta1_pow_acc, self._beta1, 0.0, - True) + tmp = _C_ops.scale( + beta1_pow_acc, self._beta1, 0.0, True + ) else: - tmp = _legacy_C_ops.scale(beta1_pow_acc, "scale", - self._beta1) + tmp = _legacy_C_ops.scale( + beta1_pow_acc, "scale", self._beta1 + ) beta1_pow_acc.copy_(tmp, False) else: - block.append_op(type="scale", - inputs={"X": beta1_pow_acc}, - outputs={"Out": beta1_pow_acc}, - attrs={"scale": self._beta1}, - stop_gradient=True) + block.append_op( + type="scale", + inputs={"X": beta1_pow_acc}, + outputs={"Out": beta1_pow_acc}, + attrs={"scale": self._beta1}, + stop_gradient=True, + ) class DpsgdOptimizer(Optimizer): @@ -2917,18 +3314,21 @@ class DpsgdOptimizer(Optimizer): Currently, DpsgdOptimizer doesn't support sparse parameter optimization. """ - def __init__(self, - learning_rate=0.001, - clip=0.9, - batch_size=0.999, - sigma=1e-8, - parameter_list=None): + def __init__( + self, + learning_rate=0.001, + clip=0.9, + batch_size=0.999, + sigma=1e-8, + parameter_list=None, + ): assert learning_rate is not None assert clip is not None assert batch_size is not None assert sigma is not None - super(DpsgdOptimizer, self).__init__(learning_rate=learning_rate, - parameter_list=parameter_list) + super(DpsgdOptimizer, self).__init__( + learning_rate=learning_rate, parameter_list=parameter_list + ) self.type = "dpsgd" self._clip = clip self._batch_size = batch_size @@ -2949,29 +3349,37 @@ class DpsgdOptimizer(Optimizer): self._seed = 0 if framework._non_static_mode(): - _legacy_C_ops.dpsgd(param_and_grad[0], param_and_grad[1], - self._create_param_lr(param_and_grad), - param_and_grad[0], "clip", self._clip, - "batch_size", self._batch_size, "sigma", - self._sigma, "seed", self._seed) + _legacy_C_ops.dpsgd( + param_and_grad[0], + param_and_grad[1], + self._create_param_lr(param_and_grad), + param_and_grad[0], + "clip", + self._clip, + "batch_size", + self._batch_size, + "sigma", + self._sigma, + "seed", + self._seed, + ) else: - dpsgd_op = block.append_op(type=self.type, - inputs={ - "Param": - param_and_grad[0], - "Grad": - param_and_grad[1], - "LearningRate": - self._create_param_lr(param_and_grad) - }, - outputs={"ParamOut": param_and_grad[0]}, - attrs={ - "clip": self._clip, - "batch_size": self._batch_size, - "sigma": self._sigma, - "seed": self._seed - }, - stop_gradient=True) + dpsgd_op = block.append_op( + type=self.type, + inputs={ + "Param": param_and_grad[0], + "Grad": param_and_grad[1], + "LearningRate": self._create_param_lr(param_and_grad), + }, + outputs={"ParamOut": param_and_grad[0]}, + attrs={ + "clip": self._clip, + "batch_size": self._batch_size, + "sigma": self._sigma, + "seed": self._seed, + }, + stop_gradient=True, + ) return dpsgd_op @@ -3034,24 +3442,27 @@ class DecayedAdagradOptimizer(Optimizer): """ _moment_acc_str = "moment" - def __init__(self, - learning_rate, - decay=0.95, - epsilon=1.0e-6, - parameter_list=None, - regularization=None, - grad_clip=None, - name=None): + def __init__( + self, + learning_rate, + decay=0.95, + epsilon=1.0e-6, + parameter_list=None, + regularization=None, + grad_clip=None, + name=None, + ): assert learning_rate is not None assert decay is not None assert epsilon is not None - super(DecayedAdagradOptimizer, - self).__init__(learning_rate=learning_rate, - parameter_list=parameter_list, - regularization=regularization, - grad_clip=grad_clip, - name=name) + super(DecayedAdagradOptimizer, self).__init__( + learning_rate=learning_rate, + parameter_list=parameter_list, + regularization=regularization, + grad_clip=grad_clip, + name=name, + ) self.type = "decayed_adagrad" self._decay = decay self._epsilon = epsilon @@ -3065,16 +3476,23 @@ class DecayedAdagradOptimizer(Optimizer): def _append_optimize_op(self, block, param_and_grad): assert isinstance(block, framework.Block) - moment_acc = self._get_accumulator(self._moment_acc_str, - param_and_grad[0]) + moment_acc = self._get_accumulator( + self._moment_acc_str, param_and_grad[0] + ) if framework._non_static_mode(): - _legacy_C_ops.decayed_adagrad(param_and_grad[0], param_and_grad[1], - moment_acc, - self._create_param_lr(param_and_grad), - param_and_grad[0], moment_acc, - "epsilon", self._epsilon, "decay", - self._decay) + _legacy_C_ops.decayed_adagrad( + param_and_grad[0], + param_and_grad[1], + moment_acc, + self._create_param_lr(param_and_grad), + param_and_grad[0], + moment_acc, + "epsilon", + self._epsilon, + "decay", + self._decay, + ) else: # Create the decayed adagrad optimizer op decayed_adagrad_op = block.append_op( @@ -3083,17 +3501,15 @@ class DecayedAdagradOptimizer(Optimizer): "Param": param_and_grad[0], "Grad": param_and_grad[1], "Moment": moment_acc, - "LearningRate": self._create_param_lr(param_and_grad) + "LearningRate": self._create_param_lr(param_and_grad), }, outputs={ "ParamOut": param_and_grad[0], - "MomentOut": moment_acc + "MomentOut": moment_acc, }, - attrs={ - "epsilon": self._epsilon, - "decay": self._decay - }, - stop_gradient=True) + attrs={"epsilon": self._epsilon, "decay": self._decay}, + stop_gradient=True, + ) return decayed_adagrad_op @@ -3155,25 +3571,29 @@ class AdadeltaOptimizer(Optimizer): _avg_squared_grad_acc_str = "_avg_squared_grad" _avg_squared_update_acc_str = "_avg_squared_update" - def __init__(self, - learning_rate, - epsilon=1.0e-6, - rho=0.95, - parameter_list=None, - regularization=None, - grad_clip=None, - name=None): + def __init__( + self, + learning_rate, + epsilon=1.0e-6, + rho=0.95, + parameter_list=None, + regularization=None, + grad_clip=None, + name=None, + ): if learning_rate is None: raise ValueError("learning_rate is not set.") if epsilon is None: raise ValueError("epsilon is not set.") if rho is None: raise ValueError("rho is not set.") - super(AdadeltaOptimizer, self).__init__(learning_rate=learning_rate, - parameter_list=parameter_list, - regularization=regularization, - grad_clip=grad_clip, - name=name) + super(AdadeltaOptimizer, self).__init__( + learning_rate=learning_rate, + parameter_list=parameter_list, + regularization=regularization, + grad_clip=grad_clip, + name=name, + ) self.type = "adadelta" self._epsilon = epsilon self._rho = rho @@ -3191,46 +3611,53 @@ class AdadeltaOptimizer(Optimizer): raise TypeError("block is not instance of framework.Block.") avg_squared_grad_acc = self._get_accumulator( - self._avg_squared_grad_acc_str, param_and_grad[0]) + self._avg_squared_grad_acc_str, param_and_grad[0] + ) avg_squared_update_acc = self._get_accumulator( - self._avg_squared_update_acc_str, param_and_grad[0]) + self._avg_squared_update_acc_str, param_and_grad[0] + ) if framework.in_dygraph_mode(): - _C_ops.adadelta_(param_and_grad[0], param_and_grad[1], - avg_squared_grad_acc, avg_squared_update_acc, - self._rho, self._epsilon) + _C_ops.adadelta_( + param_and_grad[0], + param_and_grad[1], + avg_squared_grad_acc, + avg_squared_update_acc, + self._rho, + self._epsilon, + ) elif framework._in_legacy_dygraph(): - _legacy_C_ops.adadelta(param_and_grad[0], param_and_grad[1], - avg_squared_grad_acc, avg_squared_update_acc, - param_and_grad[0], avg_squared_grad_acc, - avg_squared_update_acc, "epsilon", - self._epsilon, "rho", self._rho) + _legacy_C_ops.adadelta( + param_and_grad[0], + param_and_grad[1], + avg_squared_grad_acc, + avg_squared_update_acc, + param_and_grad[0], + avg_squared_grad_acc, + avg_squared_update_acc, + "epsilon", + self._epsilon, + "rho", + self._rho, + ) else: # Create the adadelta optimizer op - adadelta_op = block.append_op(type=self.type, - inputs={ - "Param": - param_and_grad[0], - "Grad": - param_and_grad[1], - "AvgSquaredGrad": - avg_squared_grad_acc, - "AvgSquaredUpdate": - avg_squared_update_acc - }, - outputs={ - "ParamOut": - param_and_grad[0], - "AvgSquaredGradOut": - avg_squared_grad_acc, - "AvgSquaredUpdateOut": - avg_squared_update_acc - }, - attrs={ - "epsilon": self._epsilon, - "rho": self._rho - }, - stop_gradient=True) + adadelta_op = block.append_op( + type=self.type, + inputs={ + "Param": param_and_grad[0], + "Grad": param_and_grad[1], + "AvgSquaredGrad": avg_squared_grad_acc, + "AvgSquaredUpdate": avg_squared_update_acc, + }, + outputs={ + "ParamOut": param_and_grad[0], + "AvgSquaredGradOut": avg_squared_grad_acc, + "AvgSquaredUpdateOut": avg_squared_update_acc, + }, + attrs={"epsilon": self._epsilon, "rho": self._rho}, + stop_gradient=True, + ) return adadelta_op @@ -3346,21 +3773,25 @@ class RMSPropOptimizer(Optimizer): _mean_square_acc_str = "mean_square" _mean_grad_acc_str = "mean_grad" - def __init__(self, - learning_rate, - rho=0.95, - epsilon=1.0e-6, - momentum=0.0, - centered=False, - parameter_list=None, - regularization=None, - grad_clip=None, - name=None): - super(RMSPropOptimizer, self).__init__(learning_rate=learning_rate, - parameter_list=parameter_list, - regularization=regularization, - grad_clip=grad_clip, - name=name) + def __init__( + self, + learning_rate, + rho=0.95, + epsilon=1.0e-6, + momentum=0.0, + centered=False, + parameter_list=None, + regularization=None, + grad_clip=None, + name=None, + ): + super(RMSPropOptimizer, self).__init__( + learning_rate=learning_rate, + parameter_list=parameter_list, + regularization=regularization, + grad_clip=grad_clip, + name=name, + ) if learning_rate is None: raise ValueError("learning_rate is not set.") if rho is None: @@ -3389,27 +3820,49 @@ class RMSPropOptimizer(Optimizer): if not isinstance(block, framework.Block): raise TypeError("block is not instance of framework.Block.") - momentum_acc = self._get_accumulator(self._momentum_acc_str, - param_and_grad[0]) - mean_square_acc = self._get_accumulator(self._mean_square_acc_str, - param_and_grad[0]) - mean_grad_acc = self._get_accumulator(self._mean_grad_acc_str, - param_and_grad[0]) + momentum_acc = self._get_accumulator( + self._momentum_acc_str, param_and_grad[0] + ) + mean_square_acc = self._get_accumulator( + self._mean_square_acc_str, param_and_grad[0] + ) + mean_grad_acc = self._get_accumulator( + self._mean_grad_acc_str, param_and_grad[0] + ) if in_dygraph_mode(): - _C_ops.rmsprop_(param_and_grad[0], mean_square_acc, - param_and_grad[1], momentum_acc, - self._create_param_lr(param_and_grad), - mean_grad_acc, self._epsilon, self._rho, - self._momentum, self._centered) + _C_ops.rmsprop_( + param_and_grad[0], + mean_square_acc, + param_and_grad[1], + momentum_acc, + self._create_param_lr(param_and_grad), + mean_grad_acc, + self._epsilon, + self._rho, + self._momentum, + self._centered, + ) return None elif _in_legacy_dygraph(): - _legacy_C_ops.rmsprop(param_and_grad[0], mean_square_acc, - self._create_param_lr(param_and_grad), - param_and_grad[1], momentum_acc, - param_and_grad[0], momentum_acc, - mean_square_acc, mean_grad_acc, "epsilon", - self._epsilon, "decay", self._rho, "momentum", - self._momentum, "centered", self._centered) + _legacy_C_ops.rmsprop( + param_and_grad[0], + mean_square_acc, + self._create_param_lr(param_and_grad), + param_and_grad[1], + momentum_acc, + param_and_grad[0], + momentum_acc, + mean_square_acc, + mean_grad_acc, + "epsilon", + self._epsilon, + "decay", + self._rho, + "momentum", + self._momentum, + "centered", + self._centered, + ) return None else: rmsprop_op = block.append_op( @@ -3426,15 +3879,16 @@ class RMSPropOptimizer(Optimizer): "ParamOut": param_and_grad[0], "MomentOut": momentum_acc, "MeanSquareOut": mean_square_acc, - "MeanGradOut": mean_grad_acc + "MeanGradOut": mean_grad_acc, }, attrs={ "epsilon": self._epsilon, "decay": self._rho, "momentum": self._momentum, - "centered": self._centered + "centered": self._centered, }, - stop_gradient=True) + stop_gradient=True, + ) return rmsprop_op @@ -3537,20 +3991,24 @@ class FtrlOptimizer(Optimizer): _squared_acc_str = "squared" _linear_acc_str = "linear" - def __init__(self, - learning_rate, - l1=0.0, - l2=0.0, - lr_power=-0.5, - parameter_list=None, - regularization=None, - grad_clip=None, - name=None): - super(FtrlOptimizer, self).__init__(learning_rate=learning_rate, - parameter_list=parameter_list, - regularization=regularization, - grad_clip=grad_clip, - name=name) + def __init__( + self, + learning_rate, + l1=0.0, + l2=0.0, + lr_power=-0.5, + parameter_list=None, + regularization=None, + grad_clip=None, + name=None, + ): + super(FtrlOptimizer, self).__init__( + learning_rate=learning_rate, + parameter_list=parameter_list, + regularization=regularization, + grad_clip=grad_clip, + name=name, + ) if learning_rate is None: raise ValueError("learning_rate is not set.") @@ -3571,43 +4029,52 @@ class FtrlOptimizer(Optimizer): if not isinstance(block, framework.Block): raise TypeError("block is not instance of framework.Block.") - squared_acc = self._get_accumulator(self._squared_acc_str, - param_and_grad[0]) - linear_acc = self._get_accumulator(self._linear_acc_str, - param_and_grad[0]) + squared_acc = self._get_accumulator( + self._squared_acc_str, param_and_grad[0] + ) + linear_acc = self._get_accumulator( + self._linear_acc_str, param_and_grad[0] + ) if framework._non_static_mode(): - _legacy_C_ops.ftrl(param_and_grad[0], squared_acc, linear_acc, - param_and_grad[1], - self._create_param_lr(param_and_grad), - param_and_grad[0], squared_acc, linear_acc, "l1", - self._l1, "l2", self._l2, "lr_power", - self._lr_power) + _legacy_C_ops.ftrl( + param_and_grad[0], + squared_acc, + linear_acc, + param_and_grad[1], + self._create_param_lr(param_and_grad), + param_and_grad[0], + squared_acc, + linear_acc, + "l1", + self._l1, + "l2", + self._l2, + "lr_power", + self._lr_power, + ) else: - ftrl_op = block.append_op(type=self.type, - inputs={ - "Param": - param_and_grad[0], - "Grad": - param_and_grad[1], - "SquaredAccumulator": - squared_acc, - "LinearAccumulator": - linear_acc, - "LearningRate": - self._create_param_lr(param_and_grad), - }, - outputs={ - "ParamOut": param_and_grad[0], - "SquaredAccumOut": squared_acc, - "LinearAccumOut": linear_acc - }, - attrs={ - "l1": self._l1, - "l2": self._l2, - "lr_power": self._lr_power - }, - stop_gradient=True) + ftrl_op = block.append_op( + type=self.type, + inputs={ + "Param": param_and_grad[0], + "Grad": param_and_grad[1], + "SquaredAccumulator": squared_acc, + "LinearAccumulator": linear_acc, + "LearningRate": self._create_param_lr(param_and_grad), + }, + outputs={ + "ParamOut": param_and_grad[0], + "SquaredAccumOut": squared_acc, + "LinearAccumOut": linear_acc, + }, + attrs={ + "l1": self._l1, + "l2": self._l2, + "lr_power": self._lr_power, + }, + stop_gradient=True, + ) return ftrl_op @@ -3690,30 +4157,34 @@ class LambOptimizer(AdamOptimizer): _beta1_pow_acc_str = "beta1_pow_acc" _beta2_pow_acc_str = "beta2_pow_acc" - def __init__(self, - learning_rate=0.001, - lamb_weight_decay=0.01, - beta1=0.9, - beta2=0.999, - epsilon=1e-6, - parameter_list=None, - regularization=None, - grad_clip=None, - exclude_from_weight_decay_fn=None, - name=None): + def __init__( + self, + learning_rate=0.001, + lamb_weight_decay=0.01, + beta1=0.9, + beta2=0.999, + epsilon=1e-6, + parameter_list=None, + regularization=None, + grad_clip=None, + exclude_from_weight_decay_fn=None, + name=None, + ): assert learning_rate is not None assert lamb_weight_decay is not None assert beta1 is not None assert beta2 is not None assert epsilon is not None - super(LambOptimizer, self).__init__(learning_rate=learning_rate, - parameter_list=parameter_list, - regularization=regularization, - grad_clip=grad_clip, - beta1=beta1, - beta2=beta2, - epsilon=epsilon, - name=name) + super(LambOptimizer, self).__init__( + learning_rate=learning_rate, + parameter_list=parameter_list, + regularization=regularization, + grad_clip=grad_clip, + beta1=beta1, + beta2=beta2, + epsilon=epsilon, + name=name, + ) self.type = "lamb" self._weight_decay = lamb_weight_decay self._exclude_from_weight_decay_fn = exclude_from_weight_decay_fn @@ -3722,57 +4193,82 @@ class LambOptimizer(AdamOptimizer): assert isinstance(block, framework.Block) block.program._use_lamb = True - moment1 = self._get_accumulator(self._moment1_acc_str, - param_and_grad[0]) - moment2 = self._get_accumulator(self._moment2_acc_str, - param_and_grad[0]) - beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str, - param_and_grad[0]) - beta2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str, - param_and_grad[0]) - - if self._exclude_from_weight_decay_fn is not None \ - and self._exclude_from_weight_decay_fn(param_and_grad[0]): + moment1 = self._get_accumulator( + self._moment1_acc_str, param_and_grad[0] + ) + moment2 = self._get_accumulator( + self._moment2_acc_str, param_and_grad[0] + ) + beta1_pow_acc = self._get_accumulator( + self._beta1_pow_acc_str, param_and_grad[0] + ) + beta2_pow_acc = self._get_accumulator( + self._beta2_pow_acc_str, param_and_grad[0] + ) + + if ( + self._exclude_from_weight_decay_fn is not None + and self._exclude_from_weight_decay_fn(param_and_grad[0]) + ): weight_decay = 0.0 else: weight_decay = self._weight_decay lr = self._create_param_lr(param_and_grad) master_weight = None if framework._non_static_mode(): - _legacy_C_ops.lamb(param_and_grad[0], param_and_grad[1], lr, - moment1, moment2, beta1_pow_acc, beta2_pow_acc, - master_weight, param_and_grad[0], moment1, - moment2, beta1_pow_acc, beta2_pow_acc, - master_weight, 'beta1', self._beta1, 'beta2', - self._beta2, 'epsilon', self._epsilon, - 'weight_decay', weight_decay) + _legacy_C_ops.lamb( + param_and_grad[0], + param_and_grad[1], + lr, + moment1, + moment2, + beta1_pow_acc, + beta2_pow_acc, + master_weight, + param_and_grad[0], + moment1, + moment2, + beta1_pow_acc, + beta2_pow_acc, + master_weight, + 'beta1', + self._beta1, + 'beta2', + self._beta2, + 'epsilon', + self._epsilon, + 'weight_decay', + weight_decay, + ) return None # create the lamb optimize op - lamb_op = block.append_op(type=self.type, - inputs={ - "Param": param_and_grad[0], - "Grad": param_and_grad[1], - "LearningRate": lr, - "Moment1": moment1, - "Moment2": moment2, - "Beta1Pow": beta1_pow_acc, - "Beta2Pow": beta2_pow_acc - }, - outputs={ - "ParamOut": param_and_grad[0], - "Moment1Out": moment1, - "Moment2Out": moment2, - "Beta1PowOut": beta1_pow_acc, - "Beta2PowOut": beta2_pow_acc - }, - attrs={ - "beta1": self._beta1, - "beta2": self._beta2, - "epsilon": self._epsilon, - "weight_decay": weight_decay - }, - stop_gradient=True) + lamb_op = block.append_op( + type=self.type, + inputs={ + "Param": param_and_grad[0], + "Grad": param_and_grad[1], + "LearningRate": lr, + "Moment1": moment1, + "Moment2": moment2, + "Beta1Pow": beta1_pow_acc, + "Beta2Pow": beta2_pow_acc, + }, + outputs={ + "ParamOut": param_and_grad[0], + "Moment1Out": moment1, + "Moment2Out": moment2, + "Beta1PowOut": beta1_pow_acc, + "Beta2PowOut": beta2_pow_acc, + }, + attrs={ + "beta1": self._beta1, + "beta2": self._beta2, + "epsilon": self._epsilon, + "weight_decay": weight_decay, + }, + stop_gradient=True, + ) return lamb_op @@ -3886,38 +4382,44 @@ class ModelAverage(Optimizer): fetch_list=[loss.name]) """ - def __init__(self, - average_window_rate, - min_average_window=10000, - max_average_window=10000, - regularization=None, - name=None): + def __init__( + self, + average_window_rate, + min_average_window=10000, + max_average_window=10000, + regularization=None, + name=None, + ): if framework._non_static_mode(): raise Exception("In dygraph, don't support ModelAverage.") - super(ModelAverage, self).__init__(0.0, - regularization=regularization, - name=name) + super(ModelAverage, self).__init__( + 0.0, regularization=regularization, name=name + ) self.average_window = average_window_rate self.min_average_window = min_average_window self.max_average_window = max_average_window self.params_grads = [] - for param in framework.default_main_program().global_block( - ).all_parameters(): + for param in ( + framework.default_main_program().global_block().all_parameters() + ): if param.do_model_average != False: grad = param.block.create_var( - name=unique_name.generate_with_ignorable_key(".".join( - [param.name, 'tmp'])), + name=unique_name.generate_with_ignorable_key( + ".".join([param.name, 'tmp']) + ), dtype=param.dtype, persistable=False, - stop_gradient=True) + stop_gradient=True, + ) self.params_grads.append((param, grad)) for param, grad in self.params_grads: if grad is None: continue with param.block.program._optimized_guard( - [param, grad]), name_scope('move_average'): + [param, grad] + ), name_scope('move_average'): self._append_average_accumulate_op(param) self.apply_program = Program() @@ -3939,20 +4441,25 @@ class ModelAverage(Optimizer): sum_2 = block._clone_variable(self._get_accumulator('sum_2', param)) sum_3 = block._clone_variable(self._get_accumulator('sum_3', param)) num_accumulates = block._clone_variable( - self._get_accumulator('num_accumulates', param)) + self._get_accumulator('num_accumulates', param) + ) old_num_accumulates = block._clone_variable( - self._get_accumulator('old_num_accumulates', param)) + self._get_accumulator('old_num_accumulates', param) + ) num_updates = block._clone_variable( - self._get_accumulator('num_updates', param)) + self._get_accumulator('num_updates', param) + ) # backup param value to grad layers.assign(input=param, output=grad) # param = (sum_1 + sum_2 + sum_3) / (num_accumulates + old_num_accumulates) tmp = layers.sum(x=[num_accumulates, old_num_accumulates]) sum = layers.sum(x=[sum_1, sum_2, sum_3]) tmp = layers.cast( - x=tmp, dtype='float32' if self._dtype == None else self._dtype) + x=tmp, dtype='float32' if self._dtype == None else self._dtype + ) sum = layers.cast( - x=sum, dtype='float32' if self._dtype == None else self._dtype) + x=sum, dtype='float32' if self._dtype == None else self._dtype + ) ops._elementwise_div(x=sum, y=tmp, out=param) def _add_average_restore_op(self, block, param_grad): @@ -3965,44 +4472,42 @@ class ModelAverage(Optimizer): sum_1 = self._add_accumulator('sum_1', param) sum_2 = self._add_accumulator('sum_2', param) sum_3 = self._add_accumulator('sum_3', param) - num_accumulates = self._add_accumulator('num_accumulates', - param, - dtype='int64', - shape=[1]) - old_num_accumulates = self._add_accumulator('old_num_accumulates', - param, - dtype='int64', - shape=[1]) - num_updates = self._add_accumulator('num_updates', - param, - dtype='int64', - shape=[1]) - - self.helper.append_op(type='average_accumulates', - inputs={ - "param": param, - "in_sum_1": sum_1, - "in_sum_2": sum_2, - "in_sum_3": sum_3, - "in_num_accumulates": num_accumulates, - "in_old_num_accumulates": old_num_accumulates, - "in_num_updates": num_updates - }, - outputs={ - "out_sum_1": sum_1, - "out_sum_2": sum_2, - "out_sum_3": sum_3, - "out_num_accumulates": num_accumulates, - "out_old_num_accumulates": - old_num_accumulates, - "out_num_updates": num_updates, - }, - attrs={ - "average_window": self.average_window, - "min_average_window": self.min_average_window, - "max_average_window": self.max_average_window, - }, - stop_gradient=True) + num_accumulates = self._add_accumulator( + 'num_accumulates', param, dtype='int64', shape=[1] + ) + old_num_accumulates = self._add_accumulator( + 'old_num_accumulates', param, dtype='int64', shape=[1] + ) + num_updates = self._add_accumulator( + 'num_updates', param, dtype='int64', shape=[1] + ) + + self.helper.append_op( + type='average_accumulates', + inputs={ + "param": param, + "in_sum_1": sum_1, + "in_sum_2": sum_2, + "in_sum_3": sum_3, + "in_num_accumulates": num_accumulates, + "in_old_num_accumulates": old_num_accumulates, + "in_num_updates": num_updates, + }, + outputs={ + "out_sum_1": sum_1, + "out_sum_2": sum_2, + "out_sum_3": sum_3, + "out_num_accumulates": num_accumulates, + "out_old_num_accumulates": old_num_accumulates, + "out_num_updates": num_updates, + }, + attrs={ + "average_window": self.average_window, + "min_average_window": self.min_average_window, + "max_average_window": self.max_average_window, + }, + stop_gradient=True, + ) @signature_safe_contextmanager def apply(self, executor, need_restore=True): @@ -4117,7 +4622,7 @@ class ModelAverage(Optimizer): class ExponentialMovingAverage(object): r""" - :api_attr: Static Graph + :api_attr: Static Graph Compute the moving average of parameters with exponential decay. Given a parameter :math:`\\theta`, its exponential moving average (EMA) @@ -4127,7 +4632,7 @@ class ExponentialMovingAverage(object): \\text{EMA}_0 & = 0 - \\text{EMA}_t & = \\text{decay} * \\text{EMA}_{t-1} + (1 - \\text{decay}) * \\theta_t + \\text{EMA}_t & = \\text{decay} * \\text{EMA}_{t-1} + (1 - \\text{decay}) * \\theta_t The average results calculated by **update()** method will be saved in temporary variables which are created and maintained by the object, and can @@ -4215,7 +4720,8 @@ class ExponentialMovingAverage(object): def __init__(self, decay=0.999, thres_steps=None, name=None): if framework._non_static_mode(): raise Exception( - "In dygraph, don't support ExponentialMovingAverage.") + "In dygraph, don't support ExponentialMovingAverage." + ) self._decay = decay self._thres_steps = thres_steps self._name = name if name is not None else '' @@ -4225,17 +4731,21 @@ class ExponentialMovingAverage(object): self._params_tmps = [] for param in default_main_program().global_block().all_parameters(): if param.do_model_average != False: - tmp = param.block.create_var(name=unique_name.generate(".".join( - [self._name + param.name, 'ema_tmp'])), - dtype=param.dtype, - persistable=False, - stop_gradient=True) + tmp = param.block.create_var( + name=unique_name.generate( + ".".join([self._name + param.name, 'ema_tmp']) + ), + dtype=param.dtype, + persistable=False, + stop_gradient=True, + ) self._params_tmps.append((param, tmp)) self._ema_vars = {} for param, tmp in self._params_tmps: - with param.block.program._optimized_guard( - [param, tmp]), name_scope('moving_average'): + with param.block.program._optimized_guard([param, tmp]), name_scope( + 'moving_average' + ): self._ema_vars[param.name] = self._create_ema_vars(param) self.apply_program = Program() @@ -4250,8 +4760,9 @@ class ExponentialMovingAverage(object): # bias correction with layers.control_flow.Switch() as switch: with switch.case(global_step > 0): - layers.assign(output=param, - input=ema / (1.0 - decay_pow)) + layers.assign( + output=param, input=ema / (1.0 - decay_pow) + ) with switch.default(): layers.assign(output=param, input=ema) @@ -4270,7 +4781,8 @@ class ExponentialMovingAverage(object): value=self._decay, dtype='float32', persistable=True, - name="scheduled_ema_decay_rate") + name="scheduled_ema_decay_rate", + ) if self._thres_steps is not None: decay_t = (self._thres_steps + 1.0) / (self._thres_steps + 10.0) @@ -4279,16 +4791,18 @@ class ExponentialMovingAverage(object): layers.tensor.assign(decay_t, decay_var) with switch.default(): layers.tensor.assign( - np.array([self._decay], dtype=np.float32), - decay_var) + np.array([self._decay], dtype=np.float32), decay_var + ) return decay_var def _get_decay_pow(self, block): - global_step = layers.create_global_var(name=self._step_counter_name, - shape=[1], - value=0, - dtype='int64', - persistable=True) + global_step = layers.create_global_var( + name=self._step_counter_name, + shape=[1], + value=0, + dtype='int64', + persistable=True, + ) global_step = layers.cast(global_step, "float32") decay_var = block._clone_variable(self._decay_var) decay_pow_acc = layers.elementwise_pow(decay_var, global_step) @@ -4300,7 +4814,8 @@ class ExponentialMovingAverage(object): shape=param.shape, value=0.0, dtype=param.dtype, - persistable=True) + persistable=True, + ) return param_ema @@ -4310,18 +4825,21 @@ class ExponentialMovingAverage(object): train program. """ global_step = layers.autoincreased_step_counter( - counter_name=self._step_counter_name) + counter_name=self._step_counter_name + ) param_master_emas = [] for param, tmp in self._params_tmps: - with param.block.program._optimized_guard( - [param, tmp]), name_scope('moving_average'): + with param.block.program._optimized_guard([param, tmp]), name_scope( + 'moving_average' + ): param_ema = self._ema_vars[param.name] if param.name + '.master' in self._ema_vars: master_ema = self._ema_vars[param.name + '.master'] param_master_emas.append([param_ema, master_ema]) else: ema_t = param_ema * self._decay_var + param * ( - 1 - self._decay_var) + 1 - self._decay_var + ) layers.assign(input=ema_t, output=param_ema) # for fp16 params @@ -4332,8 +4850,9 @@ class ExponentialMovingAverage(object): outputs={"Out": param_ema}, attrs={ "in_dtype": master_ema.dtype, - "out_dtype": param_ema.dtype - }) + "out_dtype": param_ema.dtype, + }, + ) @signature_safe_contextmanager def apply(self, executor, need_restore=True): @@ -4363,7 +4882,7 @@ class ExponentialMovingAverage(object): class PipelineOptimizer(object): """ - :api_attr: Static Graph + :api_attr: Static Graph Pipeline Optimizer: Make a program to run as pipeline, that is splitting a program into multiple sections (sub-programs) and each section run on a @@ -4426,14 +4945,19 @@ class PipelineOptimizer(object): self._device = "gpu" if framework._non_static_mode(): raise Exception("In dygraph, don't support PipelineOptimizer.") - valid_optimizers = (Optimizer, paddle.optimizer.Optimizer, - paddle.fluid.contrib.mixed_precision.decorator. - OptimizerWithMixedPrecision) + valid_optimizers = ( + Optimizer, + paddle.optimizer.Optimizer, + paddle.fluid.contrib.mixed_precision.decorator.OptimizerWithMixedPrecision, + ) if not isinstance(optimizer, valid_optimizers): - raise ValueError("The 'optimizer' parameter for " - "PipelineOptimizer must be an instance of " - "{}, but the given type is {}.".format( - valid_optimizers, type(optimizer))) + raise ValueError( + "The 'optimizer' parameter for " + "PipelineOptimizer must be an instance of " + "{}, but the given type is {}.".format( + valid_optimizers, type(optimizer) + ) + ) self._optimizer = optimizer # Get the original optimizer defined by users, such as SGD @@ -4441,11 +4965,13 @@ class PipelineOptimizer(object): while hasattr(self._origin_optimizer, "inner_opt"): self._origin_optimizer = self._origin_optimizer.inner_opt - assert num_microbatches >= 1, ( - "num_microbatches must be a positive value.") + assert ( + num_microbatches >= 1 + ), "num_microbatches must be a positive value." self._num_microbatches = num_microbatches - assert start_cpu_core_id >= 0, ( - "start_cpu_core_id must be a non-negative integer.") + assert ( + start_cpu_core_id >= 0 + ), "start_cpu_core_id must be a non-negative integer." self._start_cpu_core_id = start_cpu_core_id self._place_list = None op_maker = core.op_proto_and_checker_maker @@ -4473,41 +4999,47 @@ class PipelineOptimizer(object): if op.type == "reduce_any": # cast the bool var to int32 to use allreduce_max op temp_var_name = unique_name.generate(out_name + "_cast_int32") - temp_var = block.create_var(name=temp_var_name, - shape=[1], - dtype="int32") - block._insert_op(op_idx + 1 + offset, - type='cast', - inputs={'X': out_var}, - outputs={'Out': temp_var}, - attrs={ - 'in_dtype': out_var.dtype, - 'out_dtype': temp_var.dtype, - self._op_role_key: self._op_role.Optimize - }) + temp_var = block.create_var( + name=temp_var_name, shape=[1], dtype="int32" + ) + block._insert_op( + op_idx + 1 + offset, + type='cast', + inputs={'X': out_var}, + outputs={'Out': temp_var}, + attrs={ + 'in_dtype': out_var.dtype, + 'out_dtype': temp_var.dtype, + self._op_role_key: self._op_role.Optimize, + }, + ) offset += 1 block._insert_op( op_idx + 1 + offset, type='c_allreduce_max' - if op.type == "reduce_any" else 'c_allreduce_sum', + if op.type == "reduce_any" + else 'c_allreduce_sum', inputs={'X': temp_var if op.type == "reduce_any" else out_var}, outputs={'Out': temp_var if op.type == "reduce_any" else out_var}, attrs={ 'ring_id': self.global_ring_id, self._op_role_key: self._op_role.Optimize, - 'use_calc_stream': True - }) + 'use_calc_stream': True, + }, + ) offset += 1 if op.type == "reduce_any": - block._insert_op(op_idx + 1 + offset, - type='cast', - inputs={'X': temp_var}, - outputs={'Out': out_var}, - attrs={ - 'in_dtype': temp_var.dtype, - 'out_dtype': out_var.dtype, - self._op_role_key: self._op_role.Optimize - }) + block._insert_op( + op_idx + 1 + offset, + type='cast', + inputs={'X': temp_var}, + outputs={'Out': out_var}, + attrs={ + 'in_dtype': temp_var.dtype, + 'out_dtype': out_var.dtype, + self._op_role_key: self._op_role.Optimize, + }, + ) offset += 1 return offset @@ -4563,13 +5095,15 @@ class PipelineOptimizer(object): if var in used_var_set or "_blocking_queue" in var: continue used_var_set.add(var) - if block._find_var_recursive(str(var)): continue + if block._find_var_recursive(str(var)): + continue source_var = ori_block._var_recursive(str(var)) if source_var.type == core.VarDesc.VarType.READER: dest_var = block.create_var( name=var, type=core.VarDesc.VarType.READER, - persistable=source_var.persistable) + persistable=source_var.persistable, + ) elif isinstance(source_var, Parameter): dest_var = block.create_parameter( name=source_var.name, @@ -4581,14 +5115,16 @@ class PipelineOptimizer(object): trainable=source_var.trainable, optimize_attr=source_var.optimize_attr, regularizer=source_var.regularizer, - error_clip=source_var.error_clip) + error_clip=source_var.error_clip, + ) else: dest_var = block._clone_variable(source_var, False) self._clone_var_attr(dest_var, source_var) # When use with sharding, allreduce_sum and allreduce_max # used for global gradient clip and amp will be added by sharding. op_idx += 1 - if self.use_sharding or not should_insert: continue + if self.use_sharding or not should_insert: + continue inserted_ops = self._insert_allreduce_op(op_idx - 1, block) added_op_num += inserted_ops op_idx += inserted_ops @@ -4598,15 +5134,18 @@ class PipelineOptimizer(object): assert self._op_role_key in op.attr_names op_role = int(op.attr(self._op_role_key)) return op_role & int(self._op_role.Backward) and op_role & int( - self._op_role.Loss) + self._op_role.Loss + ) def _is_forward_op(self, op): - return self._op_role_key in op.attr_names and (int( - op.attr(self._op_role_key)) == int(self._op_role.Forward)) + return self._op_role_key in op.attr_names and ( + int(op.attr(self._op_role_key)) == int(self._op_role.Forward) + ) def _is_backward_op(self, op): return self._op_role_key in op.attr_names and ( - int(op.attr(self._op_role_key)) & int(self._op_role.Backward)) + int(op.attr(self._op_role_key)) & int(self._op_role.Backward) + ) def _is_loss_op(self, op): assert self._op_role_key in op.attr_names @@ -4614,11 +5153,15 @@ class PipelineOptimizer(object): def _is_optimize_op(self, op): return self._op_role_key in op.attr_names and ( - int(op.attr(self._op_role_key)) & int(self._op_role.Optimize)) + int(op.attr(self._op_role_key)) & int(self._op_role.Optimize) + ) def _is_update_op(self, op): - return 'Param' in op.input_names and 'Grad' in op.input_names and ( - "LearningRate" in op.input_names) + return ( + 'Param' in op.input_names + and 'Grad' in op.input_names + and ("LearningRate" in op.input_names) + ) def _split_program(self, main_program, devices): """ @@ -4665,10 +5208,11 @@ class PipelineOptimizer(object): get the real op_device attribute of the fill_constant as the device where the corresponding parameters on. """ - assert "beta1_pow_acc" in var_name or "beta2_pow_acc" in var_name, \ - 'For accumulators for Adam, the name must contain beta1_pow_acc ' \ + assert "beta1_pow_acc" in var_name or "beta2_pow_acc" in var_name, ( + 'For accumulators for Adam, the name must contain beta1_pow_acc ' 'or beta2_pow_acc.' - param_name = var_name[0:var_name.index('_beta')] + ) + param_name = var_name[0 : var_name.index('_beta')] device = self._param_device_map[param_name] return device @@ -4680,7 +5224,8 @@ class PipelineOptimizer(object): if device == "cpu": assert op.type == "fill_constant", ( "For ops in startup program with the op_device attribute " - "of cpu, they must be of type fill_constant.") + "of cpu, they must be of type fill_constant." + ) output_var = op.output_arg_names[0] device = self._get_op_device_for_startup_program(output_var) @@ -4689,7 +5234,8 @@ class PipelineOptimizer(object): else: # LR related ops device = None - if device and device_index != device_id: continue + if device and device_index != device_id: + continue op_desc = op.desc ap_op = new_startup_program.global_block().desc.append_op() ap_op.copy_from(op_desc) @@ -4709,7 +5255,8 @@ class PipelineOptimizer(object): var_name = var_name.replace('.cast_fp16', '') post_ops = self.input_var_to_op[var_name] - if post_ops == None: return None + if post_ops == None: + return None result_op = None for post_op, post_idx in reversed(post_ops): if post_idx > index: @@ -4723,7 +5270,8 @@ class PipelineOptimizer(object): variable named var_name. """ prev_ops = self.output_var_to_op[var_name] - if prev_ops == None: return None + if prev_ops == None: + return None result_op = None for prev_op, prev_idx in reversed(prev_ops): if prev_idx < index: @@ -4749,7 +5297,8 @@ class PipelineOptimizer(object): lod_level=ref_var.lod_level, persistable=ref_var.persistable, is_data=ref_var.is_data, - need_check_feed=ref_var.desc.need_check_feed()) + need_check_feed=ref_var.desc.need_check_feed(), + ) self._clone_var_attr(new_var, ref_var) return new_var @@ -4775,11 +5324,16 @@ class PipelineOptimizer(object): """ Get the op_device attribute of a op. """ - device = op.attr(self._op_device_key) \ - if op.has_attr(self._op_device_key) else None + device = ( + op.attr(self._op_device_key) + if op.has_attr(self._op_device_key) + else None + ) if device: - assert device[0:3] == 'gpu' or device[0:3] == 'npu', "Now, only gpu and npu devices are " \ + assert device[0:3] == 'gpu' or device[0:3] == 'npu', ( + "Now, only gpu and npu devices are " "supported in pipeline parallemism." + ) return device def _add_op_device_attr_for_op(self, op, idx, block): @@ -4798,40 +5352,48 @@ class PipelineOptimizer(object): elif op.type == "sum" and self._is_backward_op(op): # For sum ops that compute the sum of @RENAMED@ vars for name in op.desc.input_arg_names(): - assert '@RENAME@' in name, \ - "The op must be sum used to accumulate renamed vars." + assert ( + '@RENAME@' in name + ), "The op must be sum used to accumulate renamed vars." assert len(op.desc.output_arg_names()) == 1 out_name = op.desc.output_arg_names()[0] post_op = self._find_post_op(idx, out_name) assert post_op.has_attr( - 'op_device'), "{} has no op_device attr for var {}".format( - post_op.type, out_name) + 'op_device' + ), "{} has no op_device attr for var {}".format( + post_op.type, out_name + ) device = post_op.attr(self._op_device_key) assert device, "The post op must have op_device set." op._set_attr(self._op_device_key, device) - elif (op.type == "cast" - or op.type == "scale") and self._is_backward_op(op): + elif (op.type == "cast" or op.type == "scale") and self._is_backward_op( + op + ): prev_op = self._find_prev_op(idx, op.desc.input("X")[0]) op._set_attr(self._op_device_key, prev_op.attr(self._op_device_key)) elif op.type == "memcpy" and not self._is_optimize_op(op): # for checkpoint offloading - assert len(op.input_arg_names) == 1 and len( - op.output_arg_names) == 1 + assert ( + len(op.input_arg_names) == 1 and len(op.output_arg_names) == 1 + ) input_name = op.input_arg_names[0] output_name = op.output_arg_names[0] if '@Fetch' in output_name: post_op = self._find_post_op(idx, output_name) - op._set_attr(self._op_device_key, - post_op.attr(self._op_device_key)) + op._set_attr( + self._op_device_key, post_op.attr(self._op_device_key) + ) else: prev_op = self._find_prev_op(idx, op.desc.input("X")[0]) - op._set_attr(self._op_device_key, - prev_op.attr(self._op_device_key)) + op._set_attr( + self._op_device_key, prev_op.attr(self._op_device_key) + ) elif self._is_loss_op(op): # For loss * loss_scaling op added by AMP offset = 1 - while (not block.ops[idx + offset].has_attr(self._op_device_key) - or not block.ops[idx + offset].attr(self._op_device_key)): + while not block.ops[idx + offset].has_attr( + self._op_device_key + ) or not block.ops[idx + offset].attr(self._op_device_key): offset += 1 device = block.ops[idx + offset].attr(self._op_device_key) assert device, "Please put you program within device_guard scope." @@ -4847,19 +5409,26 @@ class PipelineOptimizer(object): elif self._is_gradient_clip_op(op) or self._is_regularization_op(op): # For gradient clip and regularization ops, we set their op_device # attribute to the device where their corresponding parameters on. - assert self._op_role_var_key in op.attr_names, "gradient_clip " \ + assert self._op_role_var_key in op.attr_names, ( + "gradient_clip " "and regularization ops must have op_role_var attribute." + ) op_role_var = op.attr(self._op_role_var_key) - assert len(op_role_var) == 2, "op_role_var for gradient_clip " \ + assert len(op_role_var) == 2, ( + "op_role_var for gradient_clip " "regularization ops must have two elements." + ) param_name = op_role_var[0] device = self._param_device_map[param_name] # For sum op added by global gradient clip, it must be # put on all devices - if (op.type == 'sum' or op.type == 'sqrt' - or op.type == 'fill_constant' - or op.type == 'elementwise_max' - or op.type == 'elementwise_div'): + if ( + op.type == 'sum' + or op.type == 'sqrt' + or op.type == 'fill_constant' + or op.type == 'elementwise_max' + or op.type == 'elementwise_div' + ): device = f"{self._device}:all" op._set_attr(self._op_device_key, device) elif op.type == "alloc_float_status" or op.type == "clear_float_status": @@ -4876,12 +5445,18 @@ class PipelineOptimizer(object): float_status_var.persistable = True else: other_known_ops = [ - 'update_loss_scaling', 'reduce_any', 'concat', 'sum', - 'check_finite_and_unscale', 'memcpy' + 'update_loss_scaling', + 'reduce_any', + 'concat', + 'sum', + 'check_finite_and_unscale', + 'memcpy', ] - assert op.type in other_known_ops, "For other ops without " \ - "op_device set, they must be one of {}, but it " \ + assert op.type in other_known_ops, ( + "For other ops without " + "op_device set, they must be one of {}, but it " "is {}".format(other_known_ops, op.type) + ) assert self._is_optimize_op(op) op._set_attr(self._op_device_key, f"{self._device}:all") @@ -4891,8 +5466,11 @@ class PipelineOptimizer(object): not that attribute set. """ for idx, op in enumerate(list(block.ops)): - if (op.type == "create_py_reader" or op.type == "read" - or op.type == "create_double_buffer_reader"): + if ( + op.type == "create_py_reader" + or op.type == "read" + or op.type == "create_double_buffer_reader" + ): # Copy read related ops to all section to make them exit # after each epoch. # We use "gpu:all" to represent the op should be put on all @@ -4901,7 +5479,8 @@ class PipelineOptimizer(object): op._set_attr(self._op_device_key, f"{self._device}:all") continue # op_device attribute has been set - if self._get_op_device_attr(op): continue + if self._get_op_device_attr(op): + continue self._add_op_device_attr_for_op(op, idx, block) def _check_validation(self, block): @@ -4922,33 +5501,42 @@ class PipelineOptimizer(object): ] for op in block.ops: if not op._has_kernel(op.type): - assert op.type == "conditional_block" and (op.attr( - self._op_role_key) == int(self._op_role.LRSched)), ( - "Now, the only supported op without kernel is " - "conditional_block, and its op role must be LRSched.") + assert op.type == "conditional_block" and ( + op.attr(self._op_role_key) == int(self._op_role.LRSched) + ), ( + "Now, the only supported op without kernel is " + "conditional_block, and its op role must be LRSched." + ) assert op.has_attr( - self._op_role_key), ("op ({}) has no {} attribute.".format( - op.type, self._op_role_key)) + self._op_role_key + ), "op ({}) has no {} attribute.".format(op.type, self._op_role_key) op_role = op.attr(self._op_role_key) - assert int(op_role) in valid_op_role_value, \ - "op_role {} for op {} must be one of {}".format( - op_role, - op.type, - valid_op_role_value) + assert ( + int(op_role) in valid_op_role_value + ), "op_role {} for op {} must be one of {}".format( + op_role, op.type, valid_op_role_value + ) assert op.has_attr( - self._op_device_key), ("op ({}) has no {} attribute.".format( - op.type, self._op_device_key)) + self._op_device_key + ), "op ({}) has no {} attribute.".format( + op.type, self._op_device_key + ) device = op.attr(self._op_device_key) - assert device, ("op_device attribute for op " - "{} has not been set.".format(op.type)) - if device == f"{self._device}:all": continue + assert ( + device + ), "op_device attribute for op " "{} has not been set.".format( + op.type + ) + if device == f"{self._device}:all": + continue dev_type = device.split(':')[0] assert dev_type == "gpu" or dev_type == 'npu', ( "Now only gpu and npu devices are supported " - "for pipeline parallelism.") + "for pipeline parallelism." + ) if device not in device_list: device_list.append(device) @@ -4971,16 +5559,18 @@ class PipelineOptimizer(object): break extra_index_info = { 'index': 0, - 'first_optimize_index': first_optimize_index + 'first_optimize_index': first_optimize_index, } for index, op in enumerate(list(block.ops)): cur_device = op.attr(self._op_device_key) - if cur_device == f"{self._device}:all": continue + if cur_device == f"{self._device}:all": + continue for var_name in op.input_arg_names: var = block.var(var_name) # skip data var - if var.is_data: continue + if var.is_data: + continue prev_device = None prev_op = self._find_prev_op(index, var_name) @@ -4990,13 +5580,15 @@ class PipelineOptimizer(object): prev_device = self._param_device_map[var_name] if not prev_device: - prev_device = prev_op.attr(self._op_device_key) \ - if prev_op else None + prev_device = ( + prev_op.attr(self._op_device_key) if prev_op else None + ) if prev_device is None or prev_device == f"{self._device}:all": continue - if prev_device == cur_device: continue + if prev_device == cur_device: + continue if var_name not in input_var_to_device: input_var_to_device[var_name] = [] @@ -5009,20 +5601,25 @@ class PipelineOptimizer(object): # check send/recv stage valid is_forward = self._is_forward_op(op) is_backward = self._is_backward_op(op) - assert is_forward or is_backward, \ - 'send/recv in pipeline should only be inserted in forward or backward,' \ + assert is_forward or is_backward, ( + 'send/recv in pipeline should only be inserted in forward or backward,' 'please check the op_role of op={}'.format(op) + ) if is_forward: - assert prev_id < cur_id, \ - "In forward, send/recv can only be passed forward, but now " \ + assert prev_id < cur_id, ( + "In forward, send/recv can only be passed forward, but now " "prev_stage={} great than cur_stage={}, please check op_device of op={}".format( - prev_id, cur_id, op) + prev_id, cur_id, op + ) + ) elif is_backward: - assert prev_id > cur_id, \ - "In backward, send/recv can only be passed backward, but now " \ + assert prev_id > cur_id, ( + "In backward, send/recv can only be passed backward, but now " "prev_stage={} less than cur_stage={}, please check op_device of op={}".format( - prev_id, cur_id, op) + prev_id, cur_id, op + ) + ) def _insert_send_recv(cur_id, prev_id): cur_dev = device_type + str(cur_id) @@ -5034,13 +5631,15 @@ class PipelineOptimizer(object): _insert_send_recv(cur_id - 1, prev_id) _insert_send_recv(cur_id, cur_id - 1) input_var_to_device[var_name].append( - (cur_dev, prev_dev)) + (cur_dev, prev_dev) + ) return elif cur_id - prev_id < -1: _insert_send_recv(cur_id + 1, prev_id) _insert_send_recv(cur_id, cur_id + 1) input_var_to_device[var_name].append( - (cur_dev, prev_dev)) + (cur_dev, prev_dev) + ) return assert abs(cur_id - prev_id) == 1 @@ -5069,12 +5668,16 @@ class PipelineOptimizer(object): self._op_role_key: op_role, 'use_calc_stream': True, 'peer': 1, - 'ring_id': ring_id - }) + 'ring_id': ring_id, + }, + ) extra_index_info['index'] += 1 var_shape = list(var.shape) - var_shape[0] = self.micro_batch_size if var_shape[ - 0] < 0 else var_shape[0] + var_shape[0] = ( + self.micro_batch_size + if var_shape[0] < 0 + else var_shape[0] + ) block._insert_op_without_sync( index=index + extra_index_info['index'], type='recv_v2', @@ -5086,17 +5689,22 @@ class PipelineOptimizer(object): self._op_role_key: op_role, 'use_calc_stream': True, 'peer': 0, - 'ring_id': ring_id - }) + 'ring_id': ring_id, + }, + ) extra_index_info['index'] += 1 elif self.schedule_mode == '1F1B': # 1F1B var_shape = list(var.shape) - var_shape[0] = self.micro_batch_size if var_shape[ - 0] < 0 else var_shape[0] + var_shape[0] = ( + self.micro_batch_size + if var_shape[0] < 0 + else var_shape[0] + ) numel = np.prod(var_shape) - use_mp = (self.mp_degree > 1) and (numel % - self.mp_degree == 0) + use_mp = (self.mp_degree > 1) and ( + numel % self.mp_degree == 0 + ) if 'subprog' in var.name: # For recompute, if the checkpoints var is layer_norm_6.tmp_2 @@ -5119,32 +5727,34 @@ class PipelineOptimizer(object): self._op_device_key: cur_dev, self._op_role_key: op_role, 'use_calc_stream': True, - }) + }, + ) extra_index_info['index'] += 1 return _check_stage(cur_id, prev_id) - block._insert_op_without_sync(index=index + - extra_index_info['index'], - type='c_sync_calc_stream', - inputs={'X': [var]}, - outputs={'Out': [var]}, - attrs={ - self._op_device_key: - prev_dev, - self._op_role_key: - op_role, - }) + block._insert_op_without_sync( + index=index + extra_index_info['index'], + type='c_sync_calc_stream', + inputs={'X': [var]}, + outputs={'Out': [var]}, + attrs={ + self._op_device_key: prev_dev, + self._op_role_key: op_role, + }, + ) extra_index_info['index'] += 1 prefix_name = var.name.split('@')[0] prefix_var = block.var(prefix_name) - is_param = True if isinstance(prefix_var, - Parameter) else False + is_param = ( + True if isinstance(prefix_var, Parameter) else False + ) block._insert_op_without_sync( index=index + extra_index_info['index'], type='send_v2' - if not use_mp or is_param else 'partial_send', + if not use_mp or is_param + else 'partial_send', inputs={'X': var}, attrs={ self._op_device_key: prev_dev, @@ -5155,12 +5765,14 @@ class PipelineOptimizer(object): # if send_v2, num&id attr is not in op_attrs, will not insert 'num': self.mp_degree, 'id': self.mp_rank, - }) + }, + ) extra_index_info['index'] += 1 insert_index = None if int(op_role) == int(self._op_role.Backward): insert_index = extra_index_info[ - 'first_optimize_index'] + 'first_optimize_index' + ] new_op_role = self._op_role.Optimize else: insert_index = index @@ -5174,14 +5786,16 @@ class PipelineOptimizer(object): self._op_device_key: prev_dev, self._op_role_key: new_op_role, 'ring_id': ring_id, - }) + }, + ) if int(op_role) == int(self._op_role.Forward): sync_comm_op._set_attr('pipeline_flag', '') extra_index_info['index'] += 1 block._insert_op_without_sync( index=index + extra_index_info['index'], type='recv_v2' - if not use_mp or is_param else 'partial_recv', + if not use_mp or is_param + else 'partial_recv', outputs={'Out': [var]}, attrs={ 'out_shape': var_shape, @@ -5194,7 +5808,8 @@ class PipelineOptimizer(object): # if recv_v2, num&id attr is not in op_attrs, will not insert 'num': self.mp_degree, 'id': self.mp_rank, - }) + }, + ) extra_index_info['index'] += 1 if use_mp and not is_param: block._insert_op_without_sync( @@ -5210,27 +5825,33 @@ class PipelineOptimizer(object): # if recv_v2, num&id attr is not in op_attrs, will not insert 'nranks': self.mp_degree, 'rank': self.mp_rank, - }) + }, + ) extra_index_info['index'] += 1 else: raise ValueError( "Now only 'F-then-B' and '1F1B' are supported." - "The given value is {}.".format(self.schedule_mode)) + "The given value is {}.".format(self.schedule_mode) + ) - _insert_send_recv(int(cur_device.split(':')[1]), - int(prev_device.split(':')[1])) + _insert_send_recv( + int(cur_device.split(':')[1]), + int(prev_device.split(':')[1]), + ) block._sync_with_cpp() def _insert_loss_scale(self, block): """ Scale the loss corresponding to number of micro-batches. """ - if self._num_microbatches == 1: return + if self._num_microbatches == 1: + return for index, op in reversed(tuple(enumerate(list(block.ops)))): if self._is_loss_grad_op(op): - assert op.type == 'fill_constant', \ - "loss_grad_op must be fill_constant op, " \ + assert op.type == 'fill_constant', ( + "loss_grad_op must be fill_constant op, " "but this op is {}".format(op.type) + ) assert op.has_attr('value') loss_scale = float(op.attr('value')) loss_scale = loss_scale / self._num_microbatches @@ -5239,24 +5860,25 @@ class PipelineOptimizer(object): def _rename_gradient_var_name(self, block): for index, op in enumerate(block.ops): - if not self._is_optimize_op(op): continue + if not self._is_optimize_op(op): + continue input_names = op.input_arg_names output_names = op.output_arg_names in_out_names = input_names + output_names - if op.type == 'cast' or op.type == "c_sync_comm_stream": continue + if op.type == 'cast' or op.type == "c_sync_comm_stream": + continue # append "MERGED" to the names of parameter gradients, # and mofify the op_role_var attribute (by rename_arg func). for name in in_out_names: - if not core.grad_var_suffix() in name: continue + if not core.grad_var_suffix() in name: + continue param_name = name.strip(core.grad_var_suffix()) new_grad_name = name + "@MERGED" self._rename_arg(op, name, new_grad_name) - def _accumulate_gradients(self, - block, - pp_allreduce_in_optimize=False, - strategy=None, - shard=None): + def _accumulate_gradients( + self, block, pp_allreduce_in_optimize=False, strategy=None, shard=None + ): """ Create a new merged gradient for each parameter and accumulate the corresponding gradient to it. @@ -5264,7 +5886,8 @@ class PipelineOptimizer(object): fp16_allreduce = strategy.fp16_allreduce if strategy else False if strategy and strategy.fuse_grad_merge: fused_gradient_names = self._accumulate_gradients_with_fuse( - block, fp16_allreduce, strategy.fuse_grad_size_in_MB, shard) + block, fp16_allreduce, strategy.fuse_grad_size_in_MB, shard + ) return fused_gradient_names merged_gradient_names = [] @@ -5288,22 +5911,30 @@ class PipelineOptimizer(object): # maybe have no optimize # if first_opt_op_idx == len(block.ops): return - if self._is_backward_op(op) and (self._op_role_var_key - in op.attr_names): + if self._is_backward_op(op) and ( + self._op_role_var_key in op.attr_names + ): op_role_var = op.attr(self._op_role_var_key) - if len(op_role_var) == 0: continue + if len(op_role_var) == 0: + continue assert len(op_role_var) % 2 == 0 for i in range(0, len(op_role_var), 2): offset = 0 param_name = op_role_var[i] - if not block.has_var(param_name): continue - if '@BroadCast' in param_name: continue + if not block.has_var(param_name): + continue + if '@BroadCast' in param_name: + continue param_grad_name = param_name + core.grad_var_suffix() merged_param_grad_name = param_grad_name + merged_suffix if not block.has_var(merged_param_grad_name): - self._create_var(block, block.vars[param_name], - merged_param_grad_name, dtype) + self._create_var( + block, + block.vars[param_name], + merged_param_grad_name, + dtype, + ) assert block.has_var(merged_param_grad_name) param_grad_var = block.var(param_grad_name) @@ -5315,22 +5946,19 @@ class PipelineOptimizer(object): inputs={}, outputs={'Out': [merged_param_grad_var]}, attrs={ - 'shape': - merged_param_grad_var.shape, - 'dtype': - merged_param_grad_var.dtype, - 'value': - float(0), + 'shape': merged_param_grad_var.shape, + 'dtype': merged_param_grad_var.dtype, + 'value': float(0), # a trick to run this op once per mini-batch - self._op_role_key: - self._op_role.Optimize.LRSched, - }) + self._op_role_key: self._op_role.Optimize.LRSched, + }, + ) offset += 1 grad_name = op_role_var[i + 1] grad_var = block.vars[grad_name] is_fp16_grad = 'cast_fp16' in grad_name - need_cast = (is_fp16_grad is not fp16_allreduce) + need_cast = is_fp16_grad is not fp16_allreduce if need_cast: # if fp16_allreduce: @@ -5339,20 +5967,20 @@ class PipelineOptimizer(object): # cast grad to fp32 to accumulate to merged gradient cast_grad_var_name = param_grad_name + '@TMP' cast_grad_var = self._create_var( - block, param_grad_var, cast_grad_var_name, dtype) + block, param_grad_var, cast_grad_var_name, dtype + ) cast_grad_var.persistable = False - block._insert_op(index=first_opt_op_idx + offset, - type='cast', - inputs={'X': grad_var}, - outputs={'Out': cast_grad_var}, - attrs={ - 'in_dtype': - grad_var.dtype, - 'out_dtype': - cast_grad_var.dtype, - self._op_role_key: - self._op_role.Backward, - }) + block._insert_op( + index=first_opt_op_idx + offset, + type='cast', + inputs={'X': grad_var}, + outputs={'Out': cast_grad_var}, + attrs={ + 'in_dtype': grad_var.dtype, + 'out_dtype': cast_grad_var.dtype, + self._op_role_key: self._op_role.Backward, + }, + ) offset += 1 grad_var = cast_grad_var @@ -5363,11 +5991,13 @@ class PipelineOptimizer(object): outputs={'Out': merged_param_grad_var}, attrs={ self._op_role_key: self._op_role.Backward, - }) + }, + ) offset += 1 merged_gradient_names.append(merged_param_grad_name) - if not fp16_allreduce: return merged_gradient_names + if not fp16_allreduce: + return merged_gradient_names first_opt_op_idx = None for index, op in reversed(tuple(enumerate(list(block.ops)))): @@ -5391,28 +6021,31 @@ class PipelineOptimizer(object): grad_var = block.var(grad_name) grad_var.persistable = False - block._insert_op(index=first_opt_op_idx, - type='cast', - inputs={'X': fp16_grad_var}, - outputs={'Out': grad_var}, - attrs={ - 'in_dtype': fp16_grad_var.dtype, - 'out_dtype': grad_var.dtype, - self._op_role_key: self._op_role.Optimize, - }) + block._insert_op( + index=first_opt_op_idx, + type='cast', + inputs={'X': fp16_grad_var}, + outputs={'Out': grad_var}, + attrs={ + 'in_dtype': fp16_grad_var.dtype, + 'out_dtype': grad_var.dtype, + self._op_role_key: self._op_role.Optimize, + }, + ) return merged_gradient_names - def _insert_accumulate_gradients_with_fuse(self, main_block, fp16, - fused_size, grad_param_pairs, - first_opt_op_idx): + def _insert_accumulate_gradients_with_fuse( + self, main_block, fp16, fused_size, grad_param_pairs, first_opt_op_idx + ): grad_param_pairs = self._sort_grad_param_by_dtype( - main_block, grad_param_pairs) + main_block, grad_param_pairs + ) grad_param_segments = [] merged_suffix = '@MERGED@FP16' if fp16 else '@MERGED' dtype = paddle.float16 if fp16 else paddle.float32 - cur_size = 0. + cur_size = 0.0 last_dtype = None # split the grad based on dtype and fused size for grad, param in grad_param_pairs: @@ -5423,7 +6056,8 @@ class PipelineOptimizer(object): dtype=dtype, shape=real_grad.shape, persistable=True, - stop_gradient=False) + stop_gradient=False, + ) real_param = main_block.var(param) if hasattr(real_param, 'is_distributed'): merged_grad_var.is_distributed = real_param.is_distributed @@ -5431,13 +6065,16 @@ class PipelineOptimizer(object): # two strategies for splitting the grad # 1. the current segment's size reach the user defined grad_size_in_MB # 2. the upcoming grad holds different dtype compared with grads in current segment - if len(grad_param_segments) == 0 \ - or cur_size + tmp_size > fused_size \ - or real_grad.dtype != last_dtype: + if ( + len(grad_param_segments) == 0 + or cur_size + tmp_size > fused_size + or real_grad.dtype != last_dtype + ): grad_param_segments.append( - ([real_grad], [real_param], [merged_grad_var])) + ([real_grad], [real_param], [merged_grad_var]) + ) last_dtype = real_grad.dtype - cur_size = 0. + cur_size = 0.0 else: grad_param_segments[-1][0].append(real_grad) grad_param_segments[-1][1].append(real_param) @@ -5450,21 +6087,28 @@ class PipelineOptimizer(object): for grad_param_segment in grad_param_segments: grad_segment = grad_param_segment[0] merged_grad_segment = grad_param_segment[2] - fused_grad = main_block.create_var(name='FusedGrad_{}'.format( - grad_segment[0].name), - dtype=grad_segment[0].dtype, - persistable=False, - stop_gradient=False) + fused_grad = main_block.create_var( + name='FusedGrad_{}'.format(grad_segment[0].name), + dtype=grad_segment[0].dtype, + persistable=False, + stop_gradient=False, + ) # keep the '.cast_fp16' info in the fuse var name - fused_merged_grad_name_prefix = 'FusedMergedGrad.cast_fp16.' if \ - merged_grad_segment[0].dtype == paddle.float16 else 'FusedMergedGrad' - fused_merged_grad_name = fused_merged_grad_name_prefix + '_{}'.format( - merged_grad_segment[0].name) + fused_merged_grad_name_prefix = ( + 'FusedMergedGrad.cast_fp16.' + if merged_grad_segment[0].dtype == paddle.float16 + else 'FusedMergedGrad' + ) + fused_merged_grad_name = ( + fused_merged_grad_name_prefix + + '_{}'.format(merged_grad_segment[0].name) + ) fused_merged_grad = main_block.create_var( name=fused_merged_grad_name, dtype=merged_grad_segment[0].dtype, persistable=True, - stop_gradient=False) + stop_gradient=False, + ) fused_gradients.append(fused_grad) fused_merged_gradients.append(fused_merged_grad) @@ -5490,10 +6134,7 @@ class PipelineOptimizer(object): first_back_op_idx + offset, type="coalesce_tensor", inputs={"Input": params}, - outputs={ - "Output": grads, - "FusedOutput": fused_grad - }, + outputs={"Output": grads, "FusedOutput": fused_grad}, attrs={ # Explanation of user_defined_size_of_dtype: # In coalesce op, the align size is 256 bytes @@ -5517,7 +6158,8 @@ class PipelineOptimizer(object): # To avoid these problematic triggers, set constant is needed for npu "set_constant": core.is_compiled_with_npu(), "constant": float(0.0), - }) + }, + ) offset += 1 # For the gradient_merged_fused_var, given a init value during the coalesce op # this will remove a problematic fill_constant op. This op role of this coalesce @@ -5528,7 +6170,7 @@ class PipelineOptimizer(object): inputs={"Input": params}, outputs={ "Output": merged_grads, - "FusedOutput": fused_merged_grad + "FusedOutput": fused_merged_grad, }, attrs={ "user_defined_size_of_dtype": 2, @@ -5537,8 +6179,9 @@ class PipelineOptimizer(object): "copy_data": False, "use_align": True, "dtype": merged_grads[0].dtype, - self._op_role_key: self._op_role.Optimize.LRSched - }) + self._op_role_key: self._op_role.Optimize.LRSched, + }, + ) offset += 1 # insert gradient merge relating ops @@ -5548,25 +6191,28 @@ class PipelineOptimizer(object): fused_grad = fused_gradients[i] fused_merged_grad = fused_merged_gradients[i] is_fp16_grad = 'cast_fp16' in fused_grad.name - need_cast = (is_fp16_grad is not fp16) + need_cast = is_fp16_grad is not fp16 if need_cast: # for fp16 allreduce, cast fp32 grad to fp16 # for fp32 allreduce, cast fp16 grad to fp32 cast_grad_var_name = fused_grad.name + '@TMP' - cast_grad_var = main_block.create_var(name=cast_grad_var_name, - dtype=dtype, - persistable=False, - stop_gradient=False) - main_block._insert_op(index=first_opt_op_idx + offset, - type='cast', - inputs={'X': fused_grad}, - outputs={'Out': cast_grad_var}, - attrs={ - 'in_dtype': fused_grad.dtype, - 'out_dtype': cast_grad_var.dtype, - self._op_role_key: - self._op_role.Backward, - }) + cast_grad_var = main_block.create_var( + name=cast_grad_var_name, + dtype=dtype, + persistable=False, + stop_gradient=False, + ) + main_block._insert_op( + index=first_opt_op_idx + offset, + type='cast', + inputs={'X': fused_grad}, + outputs={'Out': cast_grad_var}, + attrs={ + 'in_dtype': fused_grad.dtype, + 'out_dtype': cast_grad_var.dtype, + self._op_role_key: self._op_role.Backward, + }, + ) offset += 1 fused_grad = cast_grad_var main_block._insert_op( @@ -5574,7 +6220,8 @@ class PipelineOptimizer(object): type='sum', inputs={'X': [fused_merged_grad, fused_grad]}, outputs={'Out': fused_merged_grad}, - attrs={self._op_role_key: self._op_role.Backward}) + attrs={self._op_role_key: self._op_role.Backward}, + ) offset += 1 if fp16: @@ -5585,21 +6232,24 @@ class PipelineOptimizer(object): assert main_block.has_var(fp16_grad_name) fp16_grad = main_block.var(fp16_grad_name) fp32_grad_name = param + core.grad_var_suffix() + '@MERGED' - fp32_grad = main_block.create_var(name=fp32_grad_name, - dtype=paddle.float32, - shape=real_grad.shape, - persistable=False, - stop_gradient=False) - main_block._insert_op(index=first_opt_op_idx + offset, - type='cast', - inputs={'X': fp16_grad}, - outputs={'Out': fp32_grad}, - attrs={ - 'in_dtype': paddle.float16, - 'out_dtype': paddle.float32, - self._op_role_key: - self._op_role.Optimize, - }) + fp32_grad = main_block.create_var( + name=fp32_grad_name, + dtype=paddle.float32, + shape=real_grad.shape, + persistable=False, + stop_gradient=False, + ) + main_block._insert_op( + index=first_opt_op_idx + offset, + type='cast', + inputs={'X': fp16_grad}, + outputs={'Out': fp32_grad}, + attrs={ + 'in_dtype': paddle.float16, + 'out_dtype': paddle.float32, + self._op_role_key: self._op_role.Optimize, + }, + ) offset += 1 # replace the var with it's name, which will be used for inserting allreduce @@ -5608,11 +6258,9 @@ class PipelineOptimizer(object): return fused_merged_gradients, first_opt_op_idx - def _accumulate_gradients_with_fuse(self, - main_block, - fp16, - fused_size, - shard=None): + def _accumulate_gradients_with_fuse( + self, main_block, fp16, fused_size, shard=None + ): first_opt_op_idx = None grad_param_pairs = [] # obtain all param/grad pairs that needed to be fused @@ -5632,8 +6280,9 @@ class PipelineOptimizer(object): if first_opt_op_idx == len(main_block.ops): return - if self._is_backward_op(op) and (self._op_role_var_key - in op.attr_names): + if self._is_backward_op(op) and ( + self._op_role_var_key in op.attr_names + ): op_role_var = op.attr(self._op_role_var_key) if len(op_role_var) == 0: continue @@ -5645,7 +6294,8 @@ class PipelineOptimizer(object): if '@BroadCast' in param_name: continue grad_param_pairs.append( - (op_role_var[i + 1], op_role_var[i])) + (op_role_var[i + 1], op_role_var[i]) + ) if len(grad_param_pairs) == 0: return @@ -5659,9 +6309,12 @@ class PipelineOptimizer(object): all_fused_merged_gradients = [] for pairs in device_to_pairs: - fused_merged_gradients, first_opt_op_idx = \ - self._insert_accumulate_gradients_with_fuse( - main_block, fp16, fused_size, pairs, first_opt_op_idx) + ( + fused_merged_gradients, + first_opt_op_idx, + ) = self._insert_accumulate_gradients_with_fuse( + main_block, fp16, fused_size, pairs, first_opt_op_idx + ) all_fused_merged_gradients += fused_merged_gradients main_block._sync_with_cpp() @@ -5697,8 +6350,12 @@ class PipelineOptimizer(object): core.VarDesc.VarType.UINT8: 1, } assert -1 not in var.shape - return reduce(lambda x, y: x * y, - var.shape) * dtype_to_size[var.dtype] / 1024.0 / 1024.0 + return ( + reduce(lambda x, y: x * y, var.shape) + * dtype_to_size[var.dtype] + / 1024.0 + / 1024.0 + ) def _add_sub_blocks(self, main_block, program_list): main_program = main_block.program @@ -5719,12 +6376,14 @@ class PipelineOptimizer(object): def _get_device_info(self, block): for op in block.ops: - if not op._has_kernel(op.type): continue + if not op._has_kernel(op.type): + continue op_device = op.attr(self._op_device_key) return op_device - def _process_persistable_vars_in_multi_sections(self, main_program, - startup_prog, program_list): + def _process_persistable_vars_in_multi_sections( + self, main_program, startup_prog, program_list + ): """ Special Case: process persistable vars that exist in multiple sections, e.g., shared weight @@ -5735,9 +6394,11 @@ class PipelineOptimizer(object): for prog in program_list: block = prog.block(0) for var_name in block.vars: - if var_name == "double_buffer_0": continue + if var_name == "double_buffer_0": + continue var = block.var(var_name) - if not var.persistable: continue + if not var.persistable: + continue if not var_name in var_info: var_info[var_name] = [] if not prog in var_info[var_name]: @@ -5753,23 +6414,30 @@ class PipelineOptimizer(object): for prog in var_info[var_name]: block = prog.block(0) for op in block.ops: - if op.type == "recv_v2" or op.type == "create_py_reader" or \ - op.type == "read" or op.type == "update_loss_scaling": + if ( + op.type == "recv_v2" + or op.type == "create_py_reader" + or op.type == "read" + or op.type == "update_loss_scaling" + ): continue # We have processed lr related vars if op.attr(self._op_role_key) == int( - self._op_role.Optimize.LRSched): + self._op_role.Optimize.LRSched + ): continue if var_name in op.desc.output_arg_names(): assert var_name not in write_info, ( "two sections write the same var({}): second " - "op {}.".format(var_name, op)) + "op {}.".format(var_name, op) + ) write_info[var_name] = prog break for var_name in var_info.keys(): # Case 1: read only variables, no special process - if not var_name in write_info: continue + if not var_name in write_info: + continue # Case 2: one write multiple reads write_prog = write_info[var_name] @@ -5778,7 +6446,8 @@ class PipelineOptimizer(object): write_dev_index = int(write_device.split(':')[1]) all_progs = var_info[var_name] for prog in all_progs: - if prog == write_prog: continue + if prog == write_prog: + continue read_block = prog.block(0) read_device = self._get_device_info(read_block) read_dev_index = int(read_device.split(':')[1]) @@ -5799,69 +6468,60 @@ class PipelineOptimizer(object): 'X': write_block.var(var_name), }, attrs={ - self._op_device_key: - write_device, - 'use_calc_stream': - False, + self._op_device_key: write_device, + 'use_calc_stream': False, # A trick to make the role LRSched to avoid copy every # microbatch - self._op_role_key: - self._op_role.LRSched, - 'peer': - read_dev_index, - 'ring_id': - ring_id - }) + self._op_role_key: self._op_role.LRSched, + 'peer': read_dev_index, + 'ring_id': ring_id, + }, + ) read_block._insert_op( index=0, type='recv_v2', outputs={'Out': [read_block.var(var_name)]}, attrs={ - 'out_shape': - read_block.var(var_name).shape, - 'dtype': - read_block.var(var_name).dtype, - self._op_device_key: - read_device, - 'use_calc_stream': - False, + 'out_shape': read_block.var(var_name).shape, + 'dtype': read_block.var(var_name).dtype, + self._op_device_key: read_device, + 'use_calc_stream': False, # A trick to make the role LRSched to avoid copy every # microbatch - self._op_role_key: - self._op_role.LRSched, - 'peer': - write_dev_index, - 'ring_id': - ring_id - }) + self._op_role_key: self._op_role.LRSched, + 'peer': write_dev_index, + 'ring_id': ring_id, + }, + ) read_block._insert_op( index=1, type='c_sync_comm_stream', inputs={'X': [read_block.var(var_name)]}, outputs={'Out': [read_block.var(var_name)]}, attrs={ - self._op_device_key: - read_device, + self._op_device_key: read_device, # A trick to make the role LRSched to avoid copy every # microbatch - self._op_role_key: - self._op_role.LRSched, - 'ring_id': - ring_id - }) + self._op_role_key: self._op_role.LRSched, + 'ring_id': ring_id, + }, + ) def _is_gradient_clip_op(self, op): - return op.desc.has_attr("op_namescope") \ - and op.desc.attr("op_namescope").startswith("/gradient_clip") + return op.desc.has_attr("op_namescope") and op.desc.attr( + "op_namescope" + ).startswith("/gradient_clip") def _is_regularization_op(self, op): - return op.desc.has_attr("op_namescope") \ - and op.desc.attr("op_namescope").startswith("/regularization") + return op.desc.has_attr("op_namescope") and op.desc.attr( + "op_namescope" + ).startswith("/regularization") def _is_weight_decay_op(self, op): # in AdamW namescope is /optimizer_*/weight decay/ - return op.desc.has_attr("op_namescope") \ - and 'weight decay' in op.desc.attr("op_namescope") + return op.desc.has_attr( + "op_namescope" + ) and 'weight decay' in op.desc.attr("op_namescope") def _get_input_output_info(self, block): ''' @@ -5884,7 +6544,8 @@ class PipelineOptimizer(object): """ optimize forward send's sync_comm_stream schedule """ - if self.schedule_mode != '1F1B': return + if self.schedule_mode != '1F1B': + return block = program.block(0) @@ -5896,11 +6557,13 @@ class PipelineOptimizer(object): break # last pipeline stage - if backward_recv_index is None: return + if backward_recv_index is None: + return offset = 0 for index, op in enumerate(list(block.ops)): - if index >= backward_recv_index: break + if index >= backward_recv_index: + break if op.type == 'c_sync_comm_stream' and op.has_attr('pipeline_flag'): var_name = op.input_arg_names[0] var = block.var(var_name) @@ -5917,7 +6580,8 @@ class PipelineOptimizer(object): type='nop', inputs={'X': [var]}, outputs={'Out': [var]}, - attrs={self._op_role_key: self._op_role.Backward}) + attrs={self._op_role_key: self._op_role.Backward}, + ) block._sync_with_cpp() def _mv_head_recv(self, program): @@ -5933,10 +6597,17 @@ class PipelineOptimizer(object): insert_index = None op = program.global_block().ops[i] op_role = int(op.attr(self._op_role_key)) - if op_role == int( - self._op_role.Backward) and backward_insert_index is None: + if ( + op_role == int(self._op_role.Backward) + and backward_insert_index is None + ): backward_insert_index = i - if op.type != "partial_recv" and op.type != "partial_allgather" and op.type != "nop" and op.type != "recv_v2": + if ( + op.type != "partial_recv" + and op.type != "partial_allgather" + and op.type != "nop" + and op.type != "recv_v2" + ): continue if op_role == int(self._op_role.Forward): if i == forward_insert_index: @@ -5956,11 +6627,13 @@ class PipelineOptimizer(object): op_outputs = dict() for name in op.output_names: op_outputs[name] = op.output(name) - block._insert_op_without_sync(index=insert_index, - type=op.type, - inputs=op_inputs, - outputs=op_outputs, - attrs=op.all_attrs()) + block._insert_op_without_sync( + index=insert_index, + type=op.type, + inputs=op_inputs, + outputs=op_outputs, + attrs=op.all_attrs(), + ) block._remove_op(i + 1) if op_role == int(self._op_role.Forward): forward_insert_index += 1 @@ -5995,13 +6668,12 @@ class PipelineOptimizer(object): "However, some backward op don't need this var(NoNeedBufferVars), " "there will be no error at this time.\n" "So please check these persistable vars which changed in " - "forward and used in backward:\n{}".format(used_in_backward)) + "forward and used in backward:\n{}".format(used_in_backward) + ) - def minimize(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None): + def minimize( + self, loss, startup_program=None, parameter_list=None, no_grad_set=None + ): main_block = loss.block self.origin_main_block = main_block main_program = main_block.program @@ -6021,8 +6693,9 @@ class PipelineOptimizer(object): 'mp_rank', ] for key in required_keys: - assert key in pipeline_opt, \ - 'Please use pipeline with fleet to use {}.'.format(key) + assert ( + key in pipeline_opt + ), 'Please use pipeline with fleet to use {}.'.format(key) self.local_rank = pipeline_opt['local_rank'] self.schedule_mode = pipeline_opt['schedule_mode'] self.micro_batch_size = pipeline_opt['micro_batch_size'] @@ -6036,11 +6709,14 @@ class PipelineOptimizer(object): assert 0 <= self.mp_rank < self.mp_degree optimize_ops, params_grads = self._optimizer.minimize( - loss, startup_program, parameter_list, no_grad_set) + loss, startup_program, parameter_list, no_grad_set + ) self._param_device_map = self._origin_optimizer._param_device_map - self.output_var_to_op, self.input_var_to_op = \ - self._get_input_output_info(main_block) + ( + self.output_var_to_op, + self.input_var_to_op, + ) = self._get_input_output_info(main_block) # Step1: add default op_device attribute for ops. self._add_op_device_attr(main_block) device_list = self._check_validation(main_block) @@ -6058,7 +6734,8 @@ class PipelineOptimizer(object): sorted_device_list = sorted(device_list, key=cmp_to_key(device_cmp)) assert sorted_device_list == device_list, ( "With pipeline parallelism, you must use gpu devices one after " - "another in the order of their ids.") + "another in the order of their ids." + ) # Step2: add send and recv ops between section boundaries self._insert_sendrecv_ops_for_boundaries(main_block) @@ -6074,7 +6751,8 @@ class PipelineOptimizer(object): assert self.local_rank < len(device_list), ( "Manually specified " "pipeline stage must be less than total number of pipeline " - "stages.") + "stages." + ) else: self.local_rank %= len(device_list) # Step3.5: optimize forward send sync_comm to overlap send and recv @@ -6099,7 +6777,8 @@ class PipelineOptimizer(object): # Step6: Split startup program new_startup_program = self._split_startup_program( - startup_program, self.local_rank) + startup_program, self.local_rank + ) startup_program._pipeline_opt = { "startup_program": new_startup_program, @@ -6141,12 +6820,18 @@ class PipelineOptimizer(object): "num_microbatches": self._num_microbatches, "start_cpu_core_id": self._start_cpu_core_id, } - return optimize_ops, params_grads, program_list, self._pipeline_pair, self._pp_ring_map + return ( + optimize_ops, + params_grads, + program_list, + self._pipeline_pair, + self._pp_ring_map, + ) class RecomputeOptimizer(Optimizer): """ - :api_attr: Static Graph + :api_attr: Static Graph Recompute Optimizer Wrapper @@ -6225,8 +6910,8 @@ class RecomputeOptimizer(Optimizer): checkpoints, list ), "_checkpoints should be a list of Variable or a list of String" for ckpt in checkpoints: - assert ( - isinstance(ckpt, str) or isinstance(ckpt, Variable) + assert isinstance(ckpt, str) or isinstance( + ckpt, Variable ), "_checkpoints should be a list of Variable or a list of String" self._checkpoints = checkpoints @@ -6237,7 +6922,7 @@ class RecomputeOptimizer(Optimizer): @framework.deprecate_stat_dict def load(self, state_dict): """ - :api_attr: Static Graph + :api_attr: Static Graph load function is not supported by Recompute Optimizer for now. :return: None @@ -6273,7 +6958,8 @@ class RecomputeOptimizer(Optimizer): print(cpt.get_exception_message(e)) """ raise NotImplementedError( - "load function is not supported by Recompute Optimizer for now") + "load function is not supported by Recompute Optimizer for now" + ) def apply_gradients(self, params_grads): """ @@ -6331,14 +7017,16 @@ class RecomputeOptimizer(Optimizer): shape=self.checkpoint_shape, dtype=self._main_program.global_block().var(varname).dtype, persistable=False, - stop_gradient=True) + stop_gradient=True, + ) fetch_var = self._main_program.global_block().create_var( name=fetched_var_name, shape=self.checkpoint_shape, dtype=self._main_program.global_block().var(varname).dtype, persistable=False, - stop_gradient=False) + stop_gradient=False, + ) return pinned_var_name, fetched_var_name @@ -6363,21 +7051,25 @@ class RecomputeOptimizer(Optimizer): shape=self.checkpoint_shape, dtype=self._main_program.global_block().var(var.name).dtype, persistable=False, - stop_gradient=True) - block.append_op(type='fill_constant', - outputs={'Out': varname}, - attrs={ - "shape": var.shape, - "dtype": var.dtype, - "value": 0.0, - "place_type": 2, - OP_ROLE_KEY: op_role, - }) + stop_gradient=True, + ) + block.append_op( + type='fill_constant', + outputs={'Out': varname}, + attrs={ + "shape": var.shape, + "dtype": var.dtype, + "value": 0.0, + "place_type": 2, + OP_ROLE_KEY: op_role, + }, + ) return - def _insert_async_memcpy_op(self, insert_idx, src_varname, dst_varname, - op_role, dst_place_type): + def _insert_async_memcpy_op( + self, insert_idx, src_varname, dst_varname, op_role, dst_place_type + ): OP_ROLE_KEY = core.op_proto_and_checker_maker.kOpRoleAttrName() self.block._insert_op_without_sync( insert_idx, @@ -6386,22 +7078,26 @@ class RecomputeOptimizer(Optimizer): outputs={ 'Out': [self._main_program.global_block().var(dst_varname)] }, - attrs={ - "dst_place_type": int(dst_place_type), - OP_ROLE_KEY: op_role - }) + attrs={"dst_place_type": int(dst_place_type), OP_ROLE_KEY: op_role}, + ) def _insert_fetch_op(self, idx, varname): - assert varname in self.checkpoint_name2pinned_name, "Try to fetch {} from Pinned Memory, but it is NOT a checkpoint".format( - varname) + assert ( + varname in self.checkpoint_name2pinned_name + ), "Try to fetch {} from Pinned Memory, but it is NOT a checkpoint".format( + varname + ) pinned_varname = self.checkpoint_name2pinned_name[varname] fetch_varname = self.checkpoint_name2fetch_name[varname] self._insert_async_memcpy_op(idx, pinned_varname, fetch_varname, 1, 1) def _insert_offload_op(self, idx, varname): - assert varname in self.checkpoint_name2pinned_name, "Try to offload {} to Pinned Memory, but it is NOT a checkpoint".format( - varname) + assert ( + varname in self.checkpoint_name2pinned_name + ), "Try to offload {} to Pinned Memory, but it is NOT a checkpoint".format( + varname + ) pinned_varname = self.checkpoint_name2pinned_name[varname] self._insert_async_memcpy_op(idx, varname, pinned_varname, 0, 2) @@ -6410,8 +7106,9 @@ class RecomputeOptimizer(Optimizer): pass def _record_fetch_op(self, idx): - assert len(self.un_fetch_checkpoint_names - ) > 0, "Could NOT found checkpoint to fetch" + assert ( + len(self.un_fetch_checkpoint_names) > 0 + ), "Could NOT found checkpoint to fetch" checkpoint_name = self.un_fetch_checkpoint_names.pop(-1) logging.debug("Record fetch [{}]".format(checkpoint_name)) self.idx2insertions[idx] = ("fetch", checkpoint_name) @@ -6420,14 +7117,18 @@ class RecomputeOptimizer(Optimizer): def _record_offload_op(self, idx, checkpoint_name): expected_checkpoint_name = self.un_offload_checkpoint_names.pop(0) - assert checkpoint_name == expected_checkpoint_name, "expected to offload [{}] but got [{}]".format( - expected_checkpoint_name, checkpoint_name) + assert ( + checkpoint_name == expected_checkpoint_name + ), "expected to offload [{}] but got [{}]".format( + expected_checkpoint_name, checkpoint_name + ) logging.debug("Record offload [{}]".format(checkpoint_name)) self.idx2insertions[idx] = ("offload", checkpoint_name) def _record_sync_op(self, idx, checkpoint_name): - assert checkpoint_name not in self.synced_checkpoints, "Try to sync the checkpoint [{}] twice".format( - checkpoint_name) + assert ( + checkpoint_name not in self.synced_checkpoints + ), "Try to sync the checkpoint [{}] twice".format(checkpoint_name) self.synced_checkpoints.add(checkpoint_name) logging.debug("Record offload sync [{}]".format(checkpoint_name)) self.idx2insertions[idx] = ("sync", checkpoint_name) @@ -6450,14 +7151,16 @@ class RecomputeOptimizer(Optimizer): break assert self.bw_strart_op_idx < len( - self.block.ops), "Could NOT found backword op in prog" + self.block.ops + ), "Could NOT found backword op in prog" # fetch second to last checkpoint at the beginning of BW fetched_checkpoint_varname = self._record_fetch_op( - self.bw_strart_op_idx) + self.bw_strart_op_idx + ) last_last_fetch_checkpoint = None - for i, op in enumerate(self.block.ops[self.bw_strart_op_idx:]): + for i, op in enumerate(self.block.ops[self.bw_strart_op_idx :]): idx = self.bw_strart_op_idx + i input_vars = op.desc.input_arg_names() @@ -6467,28 +7170,39 @@ class RecomputeOptimizer(Optimizer): # fetch the offloade checkpoint when the first usage of its previous one if self.checkpoint_usage_count[input_var] == 0: # TODO (JZ-LIANG) sync memcpy_stream if extra stream for memcpy - second_to_last_fetch_checkpoint = fetched_checkpoint_varname + second_to_last_fetch_checkpoint = ( + fetched_checkpoint_varname + ) # there is NO fetch ahead the first checkpoint if input_var != self.sorted_checkpoint_names[0]: - fetched_checkpoint_varname = self._record_fetch_op( - idx) + fetched_checkpoint_varname = ( + self._record_fetch_op(idx) + ) # should check the current used checkpoint is ths last fetch one - assert second_to_last_fetch_checkpoint == input_var, "Current recompute segment should use [{}] BUT got [{}]".format( - second_to_last_fetch_checkpoint, input_var) + assert ( + second_to_last_fetch_checkpoint == input_var + ), "Current recompute segment should use [{}] BUT got [{}]".format( + second_to_last_fetch_checkpoint, input_var + ) # rename self.block.ops[idx]._rename_input( input_var, - self.checkpoint_name2fetch_name[input_var]) + self.checkpoint_name2fetch_name[input_var], + ) self.checkpoint_usage_count[input_var] += 1 else: raise ValueError( "use checkpoint [{}] before fetch in BW".format( - input_var)) + input_var + ) + ) - assert len(self.un_fetch_checkpoint_names - ) == 0, "{} checkpoints have NOT been Recorded".format( - self.un_fetch_checkpoint_names) + assert ( + len(self.un_fetch_checkpoint_names) == 0 + ), "{} checkpoints have NOT been Recorded".format( + self.un_fetch_checkpoint_names + ) def _update_backward(self): if len(self.idx2insertions) == 0: @@ -6500,15 +7214,18 @@ class RecomputeOptimizer(Optimizer): if operation == "fetch": self._insert_fetch_op(op_idx, checkpoint_name) logging.debug( - "Insert [{}] fetch op.".format(checkpoint_name)) + "Insert [{}] fetch op.".format(checkpoint_name) + ) del self.idx2insertions[op_idx] elif operation == "sync": self._insert_sync_op(op_idx, checkpoint_name) logging.debug("Sync [{}] fetch op.".format(checkpoint_name)) self.block._sync_with_cpp() - assert len( - self.idx2insertions) == 0, "{} checkpoints left un-Fecthed".format( - [ele[1] for ele in self.idx2insertions.values()]) + assert ( + len(self.idx2insertions) == 0 + ), "{} checkpoints left un-Fecthed".format( + [ele[1] for ele in self.idx2insertions.values()] + ) def _parse_forward(self): @@ -6521,7 +7238,7 @@ class RecomputeOptimizer(Optimizer): for checkpoint_name in self.un_offload_checkpoint_names: self.checkpoint_usage_count_and_idx[checkpoint_name] = { 'count': 0, - 'idx': -1 + 'idx': -1, } self.synced_checkpoints = set() self.fw_strart_op_idx = len(self.block.ops) @@ -6531,11 +7248,13 @@ class RecomputeOptimizer(Optimizer): break assert self.fw_strart_op_idx < len( - self.block.ops), "Could NOT found Forward op in prog" + self.block.ops + ), "Could NOT found Forward op in prog" last_offload_checkpoint = None for i, op in enumerate( - self.block.ops[self.fw_strart_op_idx:self.bw_strart_op_idx]): + self.block.ops[self.fw_strart_op_idx : self.bw_strart_op_idx] + ): idx = self.fw_strart_op_idx + i output_vars = op.desc.output_arg_names() @@ -6543,91 +7262,129 @@ class RecomputeOptimizer(Optimizer): for output_var in output_vars: if output_var in need_offload_checkpoint_names: - assert len( - output_vars - ) == 1, "chekpoint should be the only Output of a certain op, but [{}] is from [{}]".format( - output_var, op) + assert ( + len(output_vars) == 1 + ), "chekpoint should be the only Output of a certain op, but [{}] is from [{}]".format( + output_var, op + ) if output_var in self.un_offload_checkpoint_names: # insert sync op if last checkpoint has not been sync if last_offload_checkpoint != None: - if self.checkpoint_usage_count_and_idx[ - last_offload_checkpoint]['count'] == 0: - self._record_sync_op(idx, - last_offload_checkpoint) + if ( + self.checkpoint_usage_count_and_idx[ + last_offload_checkpoint + ]['count'] + == 0 + ): + self._record_sync_op( + idx, last_offload_checkpoint + ) else: - last_usage_idx = self.checkpoint_usage_count_and_idx[ - last_offload_checkpoint]['idx'] - assert last_usage_idx > 0, "last_usage_idx of checkpoint [{}] should large than 0".format( - last_offload_checkpoint) - self._record_sync_op(last_usage_idx + 1, - last_offload_checkpoint) + last_usage_idx = ( + self.checkpoint_usage_count_and_idx[ + last_offload_checkpoint + ]['idx'] + ) + assert ( + last_usage_idx > 0 + ), "last_usage_idx of checkpoint [{}] should large than 0".format( + last_offload_checkpoint + ) + self._record_sync_op( + last_usage_idx + 1, last_offload_checkpoint + ) # insert offload op after the checkpoint's generation op self._record_offload_op(idx + 1, output_var) last_offload_checkpoint = output_var else: raise ValueError( - "There should be just ONE op that output checkpoint [{}]" - .format(output_var)) + "There should be just ONE op that output checkpoint [{}]".format( + output_var + ) + ) # need to sync the last need to offload checkpoint before the last checkpoint as output op if output_var == last_checkpoint: - assert len( - output_vars - ) == 1, "chekpoint should be the only Output of a certain op, but [{}] is from [{}]".format( - output_var, op) - assert last_offload_checkpoint == self.sorted_checkpoint_names[ - -2], "the last offload chekpoint before [{}] is suppose to be [{}], but got [{}]".format( - last_checkpoint, self.sorted_checkpoint_names[-2], - last_offload_checkpoint) + assert ( + len(output_vars) == 1 + ), "chekpoint should be the only Output of a certain op, but [{}] is from [{}]".format( + output_var, op + ) + assert ( + last_offload_checkpoint + == self.sorted_checkpoint_names[-2] + ), "the last offload chekpoint before [{}] is suppose to be [{}], but got [{}]".format( + last_checkpoint, + self.sorted_checkpoint_names[-2], + last_offload_checkpoint, + ) # sync if last checkpoint has not been sync - if self.checkpoint_usage_count_and_idx[ - last_offload_checkpoint]['idx'] == 0: + if ( + self.checkpoint_usage_count_and_idx[ + last_offload_checkpoint + ]['idx'] + == 0 + ): self._record_sync_op(idx, last_offload_checkpoint) else: last_usage_idx = self.checkpoint_usage_count_and_idx[ - last_offload_checkpoint]['idx'] - assert last_usage_idx > 0, "last_usage_idx of checkpoint [{}] should large than 0".format( - last_offload_checkpoint) - self._record_sync_op(last_usage_idx + 1, - last_offload_checkpoint) + last_offload_checkpoint + ]['idx'] + assert ( + last_usage_idx > 0 + ), "last_usage_idx of checkpoint [{}] should large than 0".format( + last_offload_checkpoint + ) + self._record_sync_op( + last_usage_idx + 1, last_offload_checkpoint + ) # record checkpoint usage for input_var in input_vars: if input_var in need_offload_checkpoint_names: - assert input_var not in self.synced_checkpoints, "checkpoint [{}] used after sync".format( - input_var) + assert ( + input_var not in self.synced_checkpoints + ), "checkpoint [{}] used after sync".format(input_var) self.checkpoint_usage_count_and_idx[input_var]['count'] += 1 self.checkpoint_usage_count_and_idx[input_var]['idx'] = idx - assert len(self.un_offload_checkpoint_names - ) == 0, "{} checkpoints have NOT been Recorded".format( - self.un_fetch_checkpoint_names) + assert ( + len(self.un_offload_checkpoint_names) == 0 + ), "{} checkpoints have NOT been Recorded".format( + self.un_fetch_checkpoint_names + ) assert len(self.synced_checkpoints) == len( need_offload_checkpoint_names ), "{} checkpoints have NOT been Recorded".format( - set(need_offload_checkpoint_names) - set(self.synced_checkpoints)) + set(need_offload_checkpoint_names) - set(self.synced_checkpoints) + ) def _update_forward(self): if len(self.idx2insertions) == 0: return for op_idx in reversed( - range(self.fw_strart_op_idx, self.bw_strart_op_idx)): + range(self.fw_strart_op_idx, self.bw_strart_op_idx) + ): if op_idx in self.idx2insertions: operation, checkpoint_name = self.idx2insertions[op_idx] if operation == "offload": self._insert_offload_op(op_idx, checkpoint_name) logging.debug( - "Insert [{}] offload op.".format(checkpoint_name)) + "Insert [{}] offload op.".format(checkpoint_name) + ) del self.idx2insertions[op_idx] elif operation == "sync": self._insert_sync_op(op_idx, checkpoint_name) logging.debug( - "Insert [{}] offload_sync op.".format(checkpoint_name)) + "Insert [{}] offload_sync op.".format(checkpoint_name) + ) del self.idx2insertions[op_idx] self.block._sync_with_cpp() - assert len(self.idx2insertions - ) == 0, "{} checkpoints left un-Offloaded".format( - [ele[1] for ele in self.idx2insertions.values()]) + assert ( + len(self.idx2insertions) == 0 + ), "{} checkpoints left un-Offloaded".format( + [ele[1] for ele in self.idx2insertions.values()] + ) def _check_offload_fetch(self): # TODO(JZ-LIANG) the single stream offload need no sync @@ -6647,21 +7404,28 @@ class RecomputeOptimizer(Optimizer): startup_program = paddle.static.default_startup_program() with program_guard(self._main_program, startup_program): - assert len(self.checkpoint_shape) > 0, ( - "checkpoints shape {} should be an non empty list like: [12, 512, 1024]" - .format(self.checkpoint_shape)) - assert all([ele > 0 for ele in self.checkpoint_shape]), ( - "all ele in checkpoints shape {} should be a determined integer larger than 0" - .format(self.checkpoint_shape)) + assert ( + len(self.checkpoint_shape) > 0 + ), "checkpoints shape {} should be an non empty list like: [12, 512, 1024]".format( + self.checkpoint_shape + ) + assert all( + [ele > 0 for ele in self.checkpoint_shape] + ), "all ele in checkpoints shape {} should be a determined integer larger than 0".format( + self.checkpoint_shape + ) self.checkpoint_name2pinned_name = dict() self.checkpoint_name2fetch_name = dict() for checkpoint_varname in self.sorted_checkpoint_names: pinned_var_name, fetch_var_name = self._creat_vars( - checkpoint_varname) + checkpoint_varname + ) self.checkpoint_name2pinned_name[ - checkpoint_varname] = pinned_var_name + checkpoint_varname + ] = pinned_var_name self.checkpoint_name2fetch_name[ - checkpoint_varname] = fetch_var_name + checkpoint_varname + ] = fetch_var_name self._append_fill_constant_ops(startup_program) # TODO (JZ-LIANG) to provide two offload stragtegy in future # step 2. parse & update FW: rename, offload, sync @@ -6675,12 +7439,14 @@ class RecomputeOptimizer(Optimizer): return - def backward(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None, - callbacks=None): + def backward( + self, + loss, + startup_program=None, + parameter_list=None, + no_grad_set=None, + callbacks=None, + ): """ call append_backward with checkpoints. @@ -6722,12 +7488,14 @@ class RecomputeOptimizer(Optimizer): no_grad_set=None) print("Finished backward") """ - assert (self._checkpoints - is not None), "You should call _set_checkpoints first" + assert ( + self._checkpoints is not None + ), "You should call _set_checkpoints first" if framework._non_static_mode(): raise NotImplementedError( - "DyGraph current does not support recompute") + "DyGraph current does not support recompute" + ) self._dtype = loss.dtype program = loss.block.program @@ -6745,12 +7513,15 @@ class RecomputeOptimizer(Optimizer): loss, parameter_list, no_grad_set, - checkpoints=checkpoint_vars) + checkpoints=checkpoint_vars, + ) else: - params_grads = append_backward(loss, - parameter_list, - no_grad_set, - checkpoints=checkpoint_vars) + params_grads = append_backward( + loss, + parameter_list, + no_grad_set, + checkpoints=checkpoint_vars, + ) if self.enable_offload: self.sorted_checkpoint_names = sorted_checkpoint_names @@ -6797,39 +7568,43 @@ class RecomputeOptimizer(Optimizer): print("Finished apply_optimize") """ - func = self._optimizer.apply_optimize if hasattr( - self._optimizer, - 'apply_optimize') else self._optimizer._apply_optimize - return func(loss, - startup_program=startup_program, - params_grads=params_grads) - - def minimize(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None): + func = ( + self._optimizer.apply_optimize + if hasattr(self._optimizer, 'apply_optimize') + else self._optimizer._apply_optimize + ) + return func( + loss, startup_program=startup_program, params_grads=params_grads + ) + + def minimize( + self, loss, startup_program=None, parameter_list=None, no_grad_set=None + ): assert isinstance(loss, Variable), "The loss should be an Variable." - assert (self._checkpoints - is not None), "You should call _set_checkpoints first" + assert ( + self._checkpoints is not None + ), "You should call _set_checkpoints first" if framework._non_static_mode(): raise NotImplementedError( - "DyGraph current does not support recompute") - params_grads = self.backward(loss, - startup_program=startup_program, - parameter_list=parameter_list, - no_grad_set=no_grad_set) + "DyGraph current does not support recompute" + ) + params_grads = self.backward( + loss, + startup_program=startup_program, + parameter_list=parameter_list, + no_grad_set=no_grad_set, + ) - optimize_ops = self.apply_optimize(loss, - startup_program=startup_program, - params_grads=params_grads) + optimize_ops = self.apply_optimize( + loss, startup_program=startup_program, params_grads=params_grads + ) return optimize_ops, params_grads class LookaheadOptimizer(object): r""" - :api_attr: Static Graph + :api_attr: Static Graph This implements the Lookahead optimizer of the paper : https://arxiv.org/abs/1907.08610. @@ -6843,7 +7618,7 @@ class LookaheadOptimizer(object): slow\_param_t &= slow\_param_{t-1} + \\alpha * (fast\_param_{t-1} - slow\_param_{t-1}) - fast\_param_t &= slow\_param_t + fast\_param_t &= slow\_param_t Args: inner_optimizer (Optimizer): The optimizer that update fast params step by step. @@ -6892,11 +7667,11 @@ class LookaheadOptimizer(object): if framework._non_static_mode(): raise Exception("In dygraph, don't support LookaheadOptimizer.") - assert (inner_optimizer is not None), "inner optimizer can not be None" + assert inner_optimizer is not None, "inner optimizer can not be None" assert ( 0.0 <= alpha <= 1.0 ), "alpha should be larger or equal to 0.0, and less or equal than 1.0" - assert (isinstance(k, int) and k > 0), "k should be a positive integer" + assert isinstance(k, int) and k > 0, "k should be a positive integer" self.inner_optimizer = inner_optimizer self.alpha = alpha @@ -6907,7 +7682,8 @@ class LookaheadOptimizer(object): # Apply inner optimizer to the main_program mini_out = self.inner_optimizer.minimize( - loss, startup_program=startup_program) + loss, startup_program=startup_program + ) # Get startup_program and main_program if startup_program is None: @@ -6919,58 +7695,68 @@ class LookaheadOptimizer(object): param_to_slow = {} for param in params: fast_var = main_block.var(param) - assert (fast_var is not None) - slow_var = main_block.create_var(name=param + "@SLOW", - shape=fast_var.shape, - dtype=fast_var.dtype, - persistable=True) + assert fast_var is not None + slow_var = main_block.create_var( + name=param + "@SLOW", + shape=fast_var.shape, + dtype=fast_var.dtype, + persistable=True, + ) param_to_slow[param] = slow_var # add some vars to the startup_program startup_block = startup_program.global_block() for param in params: fast_var = startup_block.var(param) - assert (fast_var is not None) - slow_var = startup_block.create_var(name=param + "@SLOW", - shape=fast_var.shape, - dtype=fast_var.dtype, - persistable=True) + assert fast_var is not None + slow_var = startup_block.create_var( + name=param + "@SLOW", + shape=fast_var.shape, + dtype=fast_var.dtype, + persistable=True, + ) - startup_block.append_op(type="assign", - inputs={"X": fast_var}, - outputs={"Out": slow_var}) + startup_block.append_op( + type="assign", inputs={"X": fast_var}, outputs={"Out": slow_var} + ) with framework.program_guard(main_block.program, startup_program): # Add Var k to main prog and startup prog - k = layers.create_global_var(name="lookahead_k", - shape=[1], - value=int(self.k), - dtype='int32', - persistable=True) + k = layers.create_global_var( + name="lookahead_k", + shape=[1], + value=int(self.k), + dtype='int32', + persistable=True, + ) # Add Var alpha to main prog and startup prog - alpha = layers.create_global_var(name="lookahead_alpha", - shape=[1], - value=float(self.alpha), - dtype='float32', - persistable=True) + alpha = layers.create_global_var( + name="lookahead_alpha", + shape=[1], + value=float(self.alpha), + dtype='float32', + persistable=True, + ) # Add Var step - step = layers.create_global_var(name="lookahead_step", - shape=[1], - value=int(0), - dtype='int32', - persistable=True) + step = layers.create_global_var( + name="lookahead_step", + shape=[1], + value=int(0), + dtype='int32', + persistable=True, + ) layers.increment(x=step, value=1.0, in_place=True) # lookahead - zero_var = layers.fill_constant(shape=[1], - dtype='float32', - value=0.0) + zero_var = layers.fill_constant( + shape=[1], dtype='float32', value=0.0 + ) - one_var = layers.fill_constant(shape=[1], - dtype='float32', - value=1.0) + one_var = layers.fill_constant( + shape=[1], dtype='float32', value=1.0 + ) mod = layers.elementwise_mod(step, k) with layers.control_flow.Switch() as switch: @@ -6986,8 +7772,9 @@ class LookaheadOptimizer(object): tmp_var = layers.elementwise_add( layers.elementwise_mul(fast_var, alpha), layers.elementwise_mul( - slow_var, - layers.elementwise_sub(one_var, alpha))) + slow_var, layers.elementwise_sub(one_var, alpha) + ), + ) layers.assign(input=tmp_var, output=slow_var) layers.assign(input=tmp_var, output=fast_var) with switch.default(): @@ -7057,11 +7844,13 @@ class GradientMergeOptimizer(object): raise Exception( "In dygraph, we don't support GradientMergeOptimizer." "You can do Gradient merge by yourself with k-times forward + backward, " - "and one-time optimizer.minimize()") + "and one-time optimizer.minimize()" + ) - assert (inner_optimizer is not None), "inner optimizer can not be None" - assert (isinstance(k_steps, int) - and k_steps > 0), "k_steps should be a positive integer" + assert inner_optimizer is not None, "inner optimizer can not be None" + assert ( + isinstance(k_steps, int) and k_steps > 0 + ), "k_steps should be a positive integer" self.inner_optimizer = inner_optimizer self.k_steps = k_steps @@ -7075,12 +7864,14 @@ class GradientMergeOptimizer(object): def _set_avg(self, avg): self.avg = avg - def backward(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None, - callbacks=None): + def backward( + self, + loss, + startup_program=None, + parameter_list=None, + no_grad_set=None, + callbacks=None, + ): assert isinstance(loss, Variable), "The loss should be an Variable." assert ( parameter_list is None @@ -7090,7 +7881,8 @@ class GradientMergeOptimizer(object): ), "The no_grad_set should be None when using GradientMergeOptimizer" params_grads = self.inner_optimizer.backward( - loss, startup_program=startup_program) + loss, startup_program=startup_program + ) return params_grads def apply_optimize(self, loss, startup_program, params_grads): @@ -7102,26 +7894,33 @@ class GradientMergeOptimizer(object): def _is_the_backward_op(self, op): op_maker = core.op_proto_and_checker_maker backward = core.op_proto_and_checker_maker.OpRole.Backward - if op_maker.kOpRoleVarAttrName() in op.attr_names and \ - int(op.all_attrs()[op_maker.kOpRoleAttrName()]) == int(backward): + if op_maker.kOpRoleVarAttrName() in op.attr_names and int( + op.all_attrs()[op_maker.kOpRoleAttrName()] + ) == int(backward): return True return False def _remove_op_role_var(self, param, grad): op_maker = core.op_proto_and_checker_maker op = grad.op - assert self._is_the_backward_op(op), \ - 'grad.op={} is not the backward op which produces the grad={}' \ - .format(op, grad.name) + assert self._is_the_backward_op( + op + ), 'grad.op={} is not the backward op which produces the grad={}'.format( + op, grad.name + ) block = grad.block var_attr = op.all_attrs()[op_maker.kOpRoleVarAttrName()] - assert param.name in var_attr, \ - 'when using GradientMergeOptimizer, param={} must be in var_attr={}' \ - .format(param.name, var_attr) - assert grad.name in var_attr, \ - 'when using GradientMergeOptimizer, grad={} must be in var_attr={}' \ - .format(param.name, var_attr) + assert ( + param.name in var_attr + ), 'when using GradientMergeOptimizer, param={} must be in var_attr={}'.format( + param.name, var_attr + ) + assert ( + grad.name in var_attr + ), 'when using GradientMergeOptimizer, grad={} must be in var_attr={}'.format( + param.name, var_attr + ) # remove (param, grad) from op_role_var var_attr.remove(param.name) @@ -7148,53 +7947,54 @@ class GradientMergeOptimizer(object): def _get_gm_cond_var(self, main_block): # Add const var - k_step_var = layers.create_global_var(name="gradient_merge_k", - shape=[1], - value=int(self.k_steps), - dtype='int32', - persistable=True, - force_cpu=True) - - zero_var = layers.create_global_var(name="gradient_merge_zero", - shape=[1], - value=int(0), - dtype='int32', - persistable=True, - force_cpu=True) + k_step_var = layers.create_global_var( + name="gradient_merge_k", + shape=[1], + value=int(self.k_steps), + dtype='int32', + persistable=True, + force_cpu=True, + ) + + zero_var = layers.create_global_var( + name="gradient_merge_zero", + shape=[1], + value=int(0), + dtype='int32', + persistable=True, + force_cpu=True, + ) # Add step var & cond var - step_var = layers.create_global_var(name="gradient_merge_step", - shape=[1], - value=int(0), - dtype='int32', - persistable=True, - force_cpu=True) + step_var = layers.create_global_var( + name="gradient_merge_step", + shape=[1], + value=int(0), + dtype='int32', + persistable=True, + force_cpu=True, + ) - cond_var = main_block.create_var(name="gradient_merge_cond", - shape=[1], - dtype='bool') + cond_var = main_block.create_var( + name="gradient_merge_cond", shape=[1], dtype='bool' + ) with device_guard("cpu"): # step_var = (step_var + 1) % k_step layers.increment(x=step_var, value=1.0, in_place=True) - main_block.append_op(type='elementwise_mod', - inputs={ - 'X': step_var, - 'Y': k_step_var - }, - outputs={'Out': step_var}, - attrs={ - 'axis': -1, - 'use_mkldnn': False - }) + main_block.append_op( + type='elementwise_mod', + inputs={'X': step_var, 'Y': k_step_var}, + outputs={'Out': step_var}, + attrs={'axis': -1, 'use_mkldnn': False}, + ) # cond_var = (step_var == 0) - main_block.append_op(type='equal', - inputs={ - 'X': step_var, - 'Y': zero_var - }, - outputs={'Out': cond_var}) + main_block.append_op( + type='equal', + inputs={'X': step_var, 'Y': zero_var}, + outputs={'Out': cond_var}, + ) return cond_var @@ -7206,7 +8006,7 @@ class GradientMergeOptimizer(object): cond = self._get_gm_cond_var(main_block) - #TODO(mapingshuo) support sparse embedding + # TODO(mapingshuo) support sparse embedding # step1: remove grad.op's op_role_var for param, grad in params_grads: assert ( @@ -7225,41 +8025,41 @@ class GradientMergeOptimizer(object): for param, grad in params_grads: param_name = param.name param_var = main_block.var(param_name) - assert (param_var is not None) - gradient_merge_var = main_block.create_var(name=param_name + - "@GRAD@GradientMerge", - shape=param_var.shape, - dtype=param_var.dtype, - persistable=True) + assert param_var is not None + gradient_merge_var = main_block.create_var( + name=param_name + "@GRAD@GradientMerge", + shape=param_var.shape, + dtype=param_var.dtype, + persistable=True, + ) param_to_gradient_merge[param_name] = gradient_merge_var startup_gradient_merge_var = startup_block.create_var( name=param_name + "@GRAD@GradientMerge", shape=param_var.shape, dtype=param_var.dtype, - persistable=True) - startup_block.append_op(type="fill_constant", - outputs={"Out": startup_gradient_merge_var}, - attrs={ - "shape": param_var.shape, - "dtype": param_var.dtype, - "value": float(0), - }) + persistable=True, + ) + startup_block.append_op( + type="fill_constant", + outputs={"Out": startup_gradient_merge_var}, + attrs={ + "shape": param_var.shape, + "dtype": param_var.dtype, + "value": float(0), + }, + ) # grad_merge += grad new_grad_op = main_block.append_op( type="elementwise_add", - inputs={ - 'X': grad, - 'Y': gradient_merge_var - }, + inputs={'X': grad, 'Y': gradient_merge_var}, outputs={'Out': gradient_merge_var}, - attrs={ - 'axis': -1, - 'use_mkldnn': False - }) - self._add_gm_op_role_var(new_grad_op, param, gradient_merge_var, - cond) + attrs={'axis': -1, 'use_mkldnn': False}, + ) + self._add_gm_op_role_var( + new_grad_op, param, gradient_merge_var, cond + ) new_params_grads.append([param, gradient_merge_var]) def true_apply_gradient(): @@ -7273,16 +8073,19 @@ class GradientMergeOptimizer(object): if self.avg: for param, new_grad in new_params_grads: # grad /= k_steps - cur_block.append_op(type='scale', - inputs={'X': new_grad}, - outputs={'Out': new_grad}, - attrs={ - 'scale': 1.0 / self.k_steps, - 'bias': 0.0, - 'bias_after_scale': False - }) - new_grad.op._set_attr(op_maker.kOpRoleAttrName(), - op_maker.OpRole.Backward) + cur_block.append_op( + type='scale', + inputs={'X': new_grad}, + outputs={'Out': new_grad}, + attrs={ + 'scale': 1.0 / self.k_steps, + 'bias': 0.0, + 'bias_after_scale': False, + }, + ) + new_grad.op._set_attr( + op_maker.kOpRoleAttrName(), op_maker.OpRole.Backward + ) for param, new_grad in new_params_grads: # NOTE. regularization will append ops to grad.block, @@ -7292,36 +8095,40 @@ class GradientMergeOptimizer(object): new_grad.block = cur_block self._optimize_ops = self.inner_optimizer.apply_gradients( - new_params_grads) + new_params_grads + ) # clear gradient_merge_vars for param, new_grad in new_params_grads: - layers.fill_constant(shape=new_grad.shape, - dtype=new_grad.dtype, - value=0.0, - out=new_grad) - new_grad.op._set_attr(op_maker.kOpRoleAttrName(), - op_maker.OpRole.Optimize) + layers.fill_constant( + shape=new_grad.shape, + dtype=new_grad.dtype, + value=0.0, + out=new_grad, + ) + new_grad.op._set_attr( + op_maker.kOpRoleAttrName(), op_maker.OpRole.Optimize + ) # step3. apply gradient layers.cond(cond, true_fn=true_apply_gradient, false_fn=None) return self._optimize_ops - def minimize(self, - loss, - startup_program=None, - parameter_list=None, - no_grad_set=None): + def minimize( + self, loss, startup_program=None, parameter_list=None, no_grad_set=None + ): assert isinstance(loss, Variable), "The loss should be an Variable." - params_grads = self.backward(loss, - startup_program=startup_program, - parameter_list=parameter_list, - no_grad_set=no_grad_set) + params_grads = self.backward( + loss, + startup_program=startup_program, + parameter_list=parameter_list, + no_grad_set=no_grad_set, + ) - optimize_ops = self.apply_optimize(loss, - startup_program=startup_program, - params_grads=params_grads) + optimize_ops = self.apply_optimize( + loss, startup_program=startup_program, params_grads=params_grads + ) return optimize_ops, params_grads diff --git a/python/paddle/fluid/parallel_executor.py b/python/paddle/fluid/parallel_executor.py index fe8a2b2610bb2d1c91ca7fb9cb80fe651f649e0a..c0a45b041e6a5d65c1deb18567d928dd72902236 100644 --- a/python/paddle/fluid/parallel_executor.py +++ b/python/paddle/fluid/parallel_executor.py @@ -27,7 +27,7 @@ BuildStrategy = core.ParallelExecutor.BuildStrategy class ParallelExecutor(object): """ - :api_attr: Static Graph + :api_attr: Static Graph The ParallelExecutor is an upgraded version of :code:`paddle.static.Executor` that supports multi-node model training and testing based on the data-parallel mode. In data-parallel mode, @@ -147,16 +147,18 @@ class ParallelExecutor(object): """ - def __init__(self, - use_cuda, - loss_name=None, - main_program=None, - share_vars_from=None, - exec_strategy=None, - build_strategy=None, - num_trainers=1, - trainer_id=0, - scope=None): + def __init__( + self, + use_cuda, + loss_name=None, + main_program=None, + share_vars_from=None, + exec_strategy=None, + build_strategy=None, + num_trainers=1, + trainer_id=0, + scope=None, + ): if build_strategy is None: build_strategy = BuildStrategy() @@ -164,22 +166,28 @@ class ParallelExecutor(object): if num_trainers != 1 and build_strategy.num_trainers != num_trainers: sys.stderr.write( 'The value of build_strategy.num_trainers[%d] is overwritten ' - 'by the passed num_trainers[%d].\n' % - (build_strategy.num_trainers, num_trainers)) + 'by the passed num_trainers[%d].\n' + % (build_strategy.num_trainers, num_trainers) + ) build_strategy.num_trainers = num_trainers if trainer_id != 0 and build_strategy.trainer_id != trainer_id: sys.stderr.write( 'The value of build_strategy.trainer_id[%d] is overwritten ' - 'by the passed trainer_id[%d].\n' % - (build_strategy.trainer_id, trainer_id)) + 'by the passed trainer_id[%d].\n' + % (build_strategy.trainer_id, trainer_id) + ) build_strategy.trainer_id = trainer_id - self._places = framework.cuda_places( - ) if use_cuda else framework.cpu_places() + self._places = ( + framework.cuda_places() if use_cuda else framework.cpu_places() + ) self._scope = scope if scope is not None else executor.global_scope() - main_program = main_program if main_program is not None \ + main_program = ( + main_program + if main_program is not None else framework.default_main_program() + ) self._compiled_program = compiler.CompiledProgram(main_program) if share_vars_from: @@ -192,7 +200,9 @@ class ParallelExecutor(object): build_strategy=build_strategy, exec_strategy=exec_strategy, share_vars_from=share_vars_from._compiled_program - if share_vars_from else None) + if share_vars_from + else None, + ) self._place = core.CUDAPlace(0) if use_cuda else core.CPUPlace() self._exe = executor.Executor(self._place) @@ -299,11 +309,13 @@ class ParallelExecutor(object): fetch_list=[loss.name]) """ - return self._exe.run(program=self._compiled_program, - scope=self._scope, - feed=feed, - fetch_list=fetch_list, - return_numpy=return_numpy) + return self._exe.run( + program=self._compiled_program, + scope=self._scope, + feed=feed, + fetch_list=fetch_list, + return_numpy=return_numpy, + ) @property def device_count(self): @@ -363,14 +375,20 @@ class ParallelExecutor(object): parallel_exe.drop_local_exe_scopes() """ - check_type(self._compiled_program._executor, - "the Executor of compiled program", core.ParallelExecutor, - "ParallelExecutor.drop_local_exe_scopes") + check_type( + self._compiled_program._executor, + "the Executor of compiled program", + core.ParallelExecutor, + "ParallelExecutor.drop_local_exe_scopes", + ) self._compiled_program._executor.drop_local_exe_scopes() # This API is used to check whether DropLocalExeScopes can work. def _need_create_local_exe_scopes(self): - check_type(self._compiled_program._executor, - "the Executor of compiled program", core.ParallelExecutor, - "ParallelExecutor._need_create_local_exe_scopes") + check_type( + self._compiled_program._executor, + "the Executor of compiled program", + core.ParallelExecutor, + "ParallelExecutor._need_create_local_exe_scopes", + ) return self._compiled_program._executor._need_create_local_exe_scopes() diff --git a/python/paddle/fluid/param_attr.py b/python/paddle/fluid/param_attr.py index b43625759e37a6d46648213bdb5435aac77050c8..ba7bead304b95857f83ac88ea50259f6b4fc7b8c 100644 --- a/python/paddle/fluid/param_attr.py +++ b/python/paddle/fluid/param_attr.py @@ -71,24 +71,31 @@ class ParamAttr(object): paddle.nn.Linear(3, 4, weight_attr=weight_attr) """ - def __init__(self, - name=None, - initializer=None, - learning_rate=1.0, - regularizer=None, - trainable=True, - do_model_average=True, - need_clip=True): + def __init__( + self, + name=None, + initializer=None, + learning_rate=1.0, + regularizer=None, + trainable=True, + do_model_average=True, + need_clip=True, + ): check_type(name, "name", (str, type(None)), "ParamAttr") check_type(learning_rate, "learning_rate", (float, int), "ParamAttr") check_type(trainable, "trainable", (bool), "ParamAttr") check_type(do_model_average, "do_model_average", (bool), "ParamAttr") check_type(need_clip, "need_clip", (bool), "ParamAttr") - check_type(initializer, "initializer", (Initializer, type(None)), - "ParamAttr") - check_type(regularizer, "regularizer", - (WeightDecayRegularizer, type(None)), "ParamAttr") + check_type( + initializer, "initializer", (Initializer, type(None)), "ParamAttr" + ) + check_type( + regularizer, + "regularizer", + (WeightDecayRegularizer, type(None)), + "ParamAttr", + ) self.name = name if self.name == "": @@ -191,13 +198,11 @@ class ParamAttr(object): """ kwargs = { 'name': self.name, - 'optimize_attr': { - 'learning_rate': self.learning_rate - }, + 'optimize_attr': {'learning_rate': self.learning_rate}, 'regularizer': self.regularizer, 'trainable': self.trainable, 'do_model_average': self.do_model_average, - 'need_clip': self.need_clip + 'need_clip': self.need_clip, } if with_initializer: kwargs['initializer'] = self.initializer @@ -277,21 +282,24 @@ class WeightNormParamAttr(ParamAttr): # these paramters for inference. params_with_weight_norm = [] - def __init__(self, - dim=None, - name=None, - initializer=None, - learning_rate=1.0, - regularizer=None, - trainable=True, - do_model_average=False, - need_clip=True): - super(WeightNormParamAttr, - self).__init__(name=name, - initializer=initializer, - learning_rate=learning_rate, - regularizer=regularizer, - trainable=trainable, - do_model_average=do_model_average, - need_clip=need_clip) + def __init__( + self, + dim=None, + name=None, + initializer=None, + learning_rate=1.0, + regularizer=None, + trainable=True, + do_model_average=False, + need_clip=True, + ): + super(WeightNormParamAttr, self).__init__( + name=name, + initializer=initializer, + learning_rate=learning_rate, + regularizer=regularizer, + trainable=trainable, + do_model_average=do_model_average, + need_clip=need_clip, + ) self.dim = dim diff --git a/python/paddle/fluid/profiler.py b/python/paddle/fluid/profiler.py index 827c8b42a5b520824a90b782cb0ddd9ca8e7dde6..6ebb2f9a08863836a56199c9ef3aeadc6c10b38d 100644 --- a/python/paddle/fluid/profiler.py +++ b/python/paddle/fluid/profiler.py @@ -20,8 +20,11 @@ import sys from paddle.utils.deprecated import deprecated __all__ = [ - 'cuda_profiler', 'reset_profiler', 'profiler', 'start_profiler', - 'stop_profiler' + 'cuda_profiler', + 'reset_profiler', + 'profiler', + 'start_profiler', + 'stop_profiler', ] NVPROF_CONFIG = [ @@ -39,8 +42,8 @@ NVPROF_CONFIG = [ since="2.3.0", update_to="paddle.profiler.Profiler", level=1, - reason= - "Please use new profiler tool, this profiler tool is no longer maintained.") + reason="Please use new profiler tool, this profiler tool is no longer maintained.", +) @signature_safe_contextmanager def cuda_profiler(output_file, output_mode=None, config=None): """ @@ -118,8 +121,8 @@ def npu_profiler(output_file, config=None): since="2.3.0", update_to="paddle.profiler.Profiler", level=1, - reason= - "Please use new profiler tool, this profiler tool is no longer maintained.") + reason="Please use new profiler tool, this profiler tool is no longer maintained.", +) def reset_profiler(): """ Clear the previous time record. It works for @@ -146,8 +149,8 @@ def reset_profiler(): since="2.3.0", update_to="paddle.profiler.Profiler", level=1, - reason= - "Please use new profiler tool, this profiler tool is no longer maintained.") + reason="Please use new profiler tool, this profiler tool is no longer maintained.", +) def start_profiler(state, tracer_option='Default'): """ Enable the profiler. Uers can use `fluid.profiler.start_profiler` and @@ -204,7 +207,8 @@ def start_profiler(state, tracer_option='Default'): if tracer_option not in ['Default', 'OpDetail', 'AllOpDetail']: raise ValueError( - "tracer option must be 'Default', 'OpDetail', 'AllOpDetail'.") + "tracer option must be 'Default', 'OpDetail', 'AllOpDetail'." + ) if tracer_option == "Default": prof_tracer_option = core.TracerOption.kDefault elif tracer_option == "OpDetail": @@ -220,8 +224,8 @@ def start_profiler(state, tracer_option='Default'): since="2.3.0", update_to="paddle.profiler.Profiler", level=1, - reason= - "Please use new profiler tool, this profiler tool is no longer maintained.") + reason="Please use new profiler tool, this profiler tool is no longer maintained.", +) def stop_profiler(sorted_key=None, profile_path='/tmp/profile'): """ Stop the profiler. Uers can use `fluid.profiler.start_profiler` and @@ -264,8 +268,10 @@ def stop_profiler(sorted_key=None, profile_path='/tmp/profile'): return sorted_key = 'default' if sorted_key is None else sorted_key if sorted_key not in ['default', 'calls', 'total', 'max', 'min', 'ave']: - raise ValueError("The sorted_key must be None or in 'calls', 'total', " - "'max', 'min' and 'ave'") + raise ValueError( + "The sorted_key must be None or in 'calls', 'total', " + "'max', 'min' and 'ave'" + ) key_map = { 'default': core.EventSortingKey.kDefault, 'calls': core.EventSortingKey.kCalls, @@ -283,13 +289,12 @@ def stop_profiler(sorted_key=None, profile_path='/tmp/profile'): since="2.3.0", update_to="paddle.profiler.Profiler", level=1, - reason= - "Please use new profiler tool, this profiler tool is no longer maintained.") + reason="Please use new profiler tool, this profiler tool is no longer maintained.", +) @signature_safe_contextmanager -def profiler(state, - sorted_key=None, - profile_path='/tmp/profile', - tracer_option='Default'): +def profiler( + state, sorted_key=None, profile_path='/tmp/profile', tracer_option='Default' +): """ The profiler interface. This profiler can be used to profile both CPU and GPU program. diff --git a/python/paddle/fluid/reader.py b/python/paddle/fluid/reader.py index 07dd03a2bc69e39b6d0b00ca78b4493bf7f44511..5c2e92ae458e79ba7857b03adb4ce3d81a66cd81 100644 --- a/python/paddle/fluid/reader.py +++ b/python/paddle/fluid/reader.py @@ -21,14 +21,39 @@ import paddle import time import copy -from .framework import Program, Variable, program_guard, default_main_program, default_startup_program, _non_static_mode, cpu_places, _current_expected_place, _in_eager_without_dygraph_check +from .framework import ( + Program, + Variable, + program_guard, + default_main_program, + default_startup_program, + _non_static_mode, + cpu_places, + _current_expected_place, + _in_eager_without_dygraph_check, +) from .executor import global_scope from .data_feeder import DataFeeder, BatchedTensorProvider -from .multiprocess_utils import multiprocess_queue_set, CleanupFuncRegistrar, _cleanup_mmap, _cleanup, _set_SIGCHLD_handler +from .multiprocess_utils import ( + multiprocess_queue_set, + CleanupFuncRegistrar, + _cleanup_mmap, + _cleanup, + _set_SIGCHLD_handler, +) from .dataloader import BatchSampler, Dataset, IterableDataset, Subset -from .dataloader.dataloader_iter import _DataLoaderIterSingleProcess, _DataLoaderIterMultiProcess, _DatasetKind, default_collate_fn +from .dataloader.dataloader_iter import ( + _DataLoaderIterSingleProcess, + _DataLoaderIterMultiProcess, + _DatasetKind, + default_collate_fn, +) from .dataloader.batch_sampler import _InfiniteIterableSampler -from .layers.io import monkey_patch_reader_methods, _copy_reader_var_, double_buffer +from .layers.io import ( + monkey_patch_reader_methods, + _copy_reader_var_, + double_buffer, +) from .unique_name import UniqueNameGenerator from .framework import _get_paddle_place, _get_paddle_place_list from paddle.fluid.framework import _set_expected_place, _current_expected_place @@ -39,8 +64,10 @@ import warnings import os import multiprocessing import signal + # NOTE: queue has a different name in python2 and python3 import queue + # NOTE: [ avoid hanging & failed quickly ] These value is used in getting data from another process QUEUE_GET_TIMEOUT = 60 @@ -120,7 +147,6 @@ def _reader_process_loop(batch_reader, data_queue): class DataLoaderBase(object): - def __init__(self): self._places = None @@ -151,12 +177,12 @@ class DataLoaderBase(object): "this means the input data contains nested lists with different lengths. " "\n\t* Check the reader function passed to 'decorate_batch_generator'" " to locate the data causes this issue.\n\t* Please consider using " - "'fluid.create_lod_tensor' to convert it to a LoD-Tensor.") + "'fluid.create_lod_tensor' to convert it to a LoD-Tensor." + ) return arr class AuToTune(object): - def __init__(self, loader): self.loader = loader self.max_num_worker = multiprocessing.cpu_count() / 2 @@ -174,12 +200,14 @@ class AuToTune(object): # pick the best num_workers auto_tune_start = time.time() logging.debug("========= DataLoader Auto Tune =========") - logging.debug("User config for DataLoader: " + - str(self.loader.num_workers)) + logging.debug( + "User config for DataLoader: " + str(self.loader.num_workers) + ) best_num_workers = 0 min_cost = float("inf") - logging.debug("Tuning Range for num_workers: 0 ~ " + - str(self.max_num_worker)) + logging.debug( + "Tuning Range for num_workers: 0 ~ " + str(self.max_num_worker) + ) num_workers = 0 while num_workers < self.max_num_worker: auto_tune_loader.num_workers = num_workers @@ -188,25 +216,37 @@ class AuToTune(object): min_cost = avg_cost best_num_workers = num_workers else: - update_num = self.is_best(auto_tune_loader, best_num_workers, - min_cost, self.max_num_worker) + update_num = self.is_best( + auto_tune_loader, + best_num_workers, + min_cost, + self.max_num_worker, + ) if update_num == best_num_workers: break else: best_num_workers = update_num - logging.debug("num_workers: " + str(num_workers) + " avg_cost: " + - str(avg_cost)) + logging.debug( + "num_workers: " + + str(num_workers) + + " avg_cost: " + + str(avg_cost) + ) num_workers += 2 - logging.info("auto_tune dataLoader best_num_workers: " + - str(best_num_workers)) - logging.debug("AutoTuning Cost for DataLoader: " + - str(time.time() - auto_tune_start) + ' seconds') + logging.info( + "auto_tune dataLoader best_num_workers: " + str(best_num_workers) + ) + logging.debug( + "AutoTuning Cost for DataLoader: " + + str(time.time() - auto_tune_start) + + ' seconds' + ) # tune the default loader's num_workers return best_num_workers def need_autotune(self): - if (sys.platform == 'darwin' or sys.platform == 'win32'): + if sys.platform == 'darwin' or sys.platform == 'win32': return False else: return True @@ -219,8 +259,9 @@ class AuToTune(object): def get_autotune_loader(self): loader = copy.copy(self.loader) batch_size = self.loader.batch_sampler.batch_size - if isinstance(self.loader.batch_sampler, - paddle.io.DistributedBatchSampler): + if isinstance( + self.loader.batch_sampler, paddle.io.DistributedBatchSampler + ): dataset = self.loader.batch_sampler.dataset sub_dataset = self.get_sub_dataset(dataset, batch_size) loader.batch_sampler = paddle.io.DistributedBatchSampler( @@ -229,14 +270,16 @@ class AuToTune(object): num_replicas=self.loader.batch_sampler.nranks, rank=self.loader.batch_sampler.local_rank, shuffle=self.loader.batch_sampler.shuffle, - drop_last=self.loader.batch_sampler.drop_last) + drop_last=self.loader.batch_sampler.drop_last, + ) elif isinstance(self.loader.batch_sampler, paddle.io.BatchSampler): dataset = self.loader.batch_sampler.sampler.data_source sub_dataset = self.get_sub_dataset(dataset, batch_size) loader.batch_sampler = paddle.io.BatchSampler( dataset=sub_dataset, batch_size=batch_size, - drop_last=self.loader.batch_sampler.drop_last) + drop_last=self.loader.batch_sampler.drop_last, + ) else: loader = None return loader @@ -261,10 +304,14 @@ class AuToTune(object): while num_workers < num_work_boundary and step < 5: self.loader.num_workers = num_workers time = self.evaluate_reader_cost(reader) - logging.debug("for back num_workers: " + str(num_workers) + - " avg_cost: " + str(time)) + logging.debug( + "for back num_workers: " + + str(num_workers) + + " avg_cost: " + + str(time) + ) step += 1 - if (time < best_time * 0.70 * boundary): + if time < best_time * 0.70 * boundary: return num_workers else: num_workers += 1 @@ -442,23 +489,25 @@ class DataLoader(object): """ - def __init__(self, - dataset, - feed_list=None, - places=None, - return_list=True, - batch_sampler=None, - batch_size=1, - shuffle=False, - drop_last=False, - collate_fn=None, - num_workers=0, - use_buffer_reader=True, - prefetch_factor=2, - use_shared_memory=True, - timeout=0, - worker_init_fn=None, - persistent_workers=False): + def __init__( + self, + dataset, + feed_list=None, + places=None, + return_list=True, + batch_sampler=None, + batch_size=1, + shuffle=False, + drop_last=False, + collate_fn=None, + num_workers=0, + use_buffer_reader=True, + prefetch_factor=2, + use_shared_memory=True, + timeout=0, + worker_init_fn=None, + persistent_workers=False, + ): self.return_list = return_list self.collate_fn = collate_fn self.use_buffer_reader = use_buffer_reader @@ -468,8 +517,9 @@ class DataLoader(object): self.dataset = dataset if not return_list and not _non_static_mode(): - assert feed_list is not None, \ - "feed_list should be set when return_list=False" + assert ( + feed_list is not None + ), "feed_list should be set when return_list=False" self.feed_list = feed_list if places is None: @@ -481,11 +531,13 @@ class DataLoader(object): self.places = _convert_places(places) assert num_workers >= 0, "num_workers should be a non-negative value" - if num_workers > 0 and (sys.platform == 'darwin' - or sys.platform == 'win32'): + if num_workers > 0 and ( + sys.platform == 'darwin' or sys.platform == 'win32' + ): warnings.warn( - "DataLoader with multi-process mode is not supported on MacOs and Windows currently." \ - " Please use signle-process mode with num_workers = 0 instead") + "DataLoader with multi-process mode is not supported on MacOs and Windows currently." + " Please use signle-process mode with num_workers = 0 instead" + ) num_workers = 0 self.num_workers = num_workers @@ -502,44 +554,53 @@ class DataLoader(object): self.dataset_kind = _DatasetKind.ITER if shuffle: raise ValueError( - "IterableDataset not support shuffle, but got shuffle={}". - format(shuffle)) + "IterableDataset not support shuffle, but got shuffle={}".format( + shuffle + ) + ) if batch_sampler is not None: raise ValueError( - "IterableDataset expect unspecified batch_sampler") + "IterableDataset expect unspecified batch_sampler" + ) else: self.dataset_kind = _DatasetKind.MAP if batch_sampler is not None: - assert batch_size == 1 and not shuffle and not drop_last, \ - "batch_size/shuffle/drop_last should not be set when " \ + assert batch_size == 1 and not shuffle and not drop_last, ( + "batch_size/shuffle/drop_last should not be set when " "batch_sampler is given" + ) self.batch_sampler = batch_sampler self.batch_size = None elif batch_size is None: self.batch_sampler = None self.batch_size = None else: - assert batch_size > 0, \ - "batch_size should be None or a positive value when " \ + assert batch_size > 0, ( + "batch_size should be None or a positive value when " "batch_sampler is not given" + ) self.batch_size = batch_size if isinstance(dataset, IterableDataset): self.batch_sampler = _InfiniteIterableSampler( - dataset, batch_size) + dataset, batch_size + ) else: - self.batch_sampler = BatchSampler(dataset=dataset, - batch_size=batch_size, - shuffle=shuffle, - drop_last=drop_last) + self.batch_sampler = BatchSampler( + dataset=dataset, + batch_size=batch_size, + shuffle=shuffle, + drop_last=drop_last, + ) self.drop_last = drop_last self.auto_collate_batch = self.batch_sampler is not None self.pin_memory = False if _non_static_mode(): - self.pin_memory = True if use_pinned_memory( - ) is None else use_pinned_memory() + self.pin_memory = ( + True if use_pinned_memory() is None else use_pinned_memory() + ) self._persistent_workers = persistent_workers self._iterator = None @@ -570,13 +631,15 @@ class DataLoader(object): return self.__iter__() @staticmethod - def from_generator(feed_list=None, - capacity=None, - use_double_buffer=True, - iterable=True, - return_list=False, - use_multiprocess=False, - drop_last=True): + def from_generator( + feed_list=None, + capacity=None, + use_double_buffer=True, + iterable=True, + return_list=False, + use_multiprocess=False, + drop_last=True, + ): """ .. warning:: This API will be deprecated in the future, it is recommended to use @@ -888,12 +951,23 @@ class DataLoader(object): print(run_inference(drop_last=False)) # [1.0, 4.0, 9.0] """ if _non_static_mode(): - return DygraphGeneratorLoader(feed_list, capacity, - use_double_buffer, iterable, - return_list, use_multiprocess) + return DygraphGeneratorLoader( + feed_list, + capacity, + use_double_buffer, + iterable, + return_list, + use_multiprocess, + ) else: - return GeneratorLoader(feed_list, capacity, use_double_buffer, - iterable, return_list, drop_last) + return GeneratorLoader( + feed_list, + capacity, + use_double_buffer, + iterable, + return_list, + drop_last, + ) @staticmethod def from_dataset(dataset, places, drop_last=True): @@ -950,13 +1024,15 @@ class DygraphGeneratorLoader(DataLoaderBase): static graph GeneratorLoader, Separate implementation to keep code readable. """ - def __init__(self, - feed_list=None, - capacity=None, - use_double_buffer=True, - iterable=True, - return_list=True, - use_multiprocess=False): + def __init__( + self, + feed_list=None, + capacity=None, + use_double_buffer=True, + iterable=True, + return_list=True, + use_multiprocess=False, + ): self._batch_reader = None self._places = None self._feed_list = feed_list @@ -979,8 +1055,9 @@ class DygraphGeneratorLoader(DataLoaderBase): # NOTE: the multiprocessing in different platform is incompatible, we will solve it later self._use_multiprocess = use_multiprocess - if self._use_multiprocess and (sys.platform == 'darwin' - or sys.platform == 'win32'): + if self._use_multiprocess and ( + sys.platform == 'darwin' or sys.platform == 'win32' + ): warnings.warn( "NOTE: DygraphGeneratorLoader with multiprocess mode is not currently supported on MacOs and Windows." ) @@ -999,8 +1076,9 @@ class DygraphGeneratorLoader(DataLoaderBase): # mode, this thread is used to get next batch data from self._batch_reader, then # push it into self._blocking_queue self._thread = None - self._pin_memory = True if use_pinned_memory( - ) is None else use_pinned_memory() + self._pin_memory = ( + True if use_pinned_memory() is None else use_pinned_memory() + ) @property def queue(self): @@ -1042,14 +1120,20 @@ class DygraphGeneratorLoader(DataLoaderBase): self._dtypes = [] self._need_check_feed = [] self._blocking_queue = core.init_lod_tensor_blocking_queue( - core.Variable(), self._capacity, False) + core.Variable(), self._capacity, False + ) self._reader = None - self._reader = core.create_py_reader(self.queue, self._var_names, - self._shapes, self._dtypes, - self._need_check_feed, - self._places, - self._use_double_buffer, True, - self._pin_memory) + self._reader = core.create_py_reader( + self.queue, + self._var_names, + self._shapes, + self._dtypes, + self._need_check_feed, + self._places, + self._use_double_buffer, + True, + self._pin_memory, + ) def _start(self): if self._use_multiprocess: @@ -1060,9 +1144,10 @@ class DygraphGeneratorLoader(DataLoaderBase): # add _data_queue into global queue set global multiprocess_queue_set multiprocess_queue_set.add(self._data_queue) - self._process = multiprocessing.Process(target=_reader_process_loop, - args=(self._batch_reader, - self._data_queue)) + self._process = multiprocessing.Process( + target=_reader_process_loop, + args=(self._batch_reader, self._data_queue), + ) self._process.daemon = True self._process.start() @@ -1079,13 +1164,15 @@ class DygraphGeneratorLoader(DataLoaderBase): self._thread_done_event = threading.Event() self._thread = threading.Thread( target=self._reader_thread_loop_for_multiprocess, - args=(_current_expected_place(), )) + args=(_current_expected_place(),), + ) self._thread.daemon = True self._thread.start() else: self._thread = threading.Thread( target=self._reader_thread_loop_for_singleprocess, - args=(_current_expected_place(), )) + args=(_current_expected_place(),), + ) self._thread.daemon = True self._thread.start() @@ -1097,8 +1184,9 @@ class DygraphGeneratorLoader(DataLoaderBase): def __iter__(self): assert self.iterable, "DataLoader is not iterable" - assert self._batch_reader is not None, \ - "Data source of DataLoader has not set yet" + assert ( + self._batch_reader is not None + ), "Data source of DataLoader has not set yet" self._init_iterable() self._start() @@ -1108,7 +1196,8 @@ class DygraphGeneratorLoader(DataLoaderBase): try: if _in_eager_without_dygraph_check(): return core.eager.read_next_tensor_list( - self._reader.read_next_list()[0]) + self._reader.read_next_list()[0] + ) else: return self._reader.read_next_var_list() except StopIteration: @@ -1193,23 +1282,22 @@ class DygraphGeneratorLoader(DataLoaderBase): self._blocking_queue.kill() self._thread = None logging.warning( - "DygraphDataLoader reader thread raised an exception.") + "DygraphDataLoader reader thread raised an exception." + ) six.reraise(*sys.exc_info()) - def set_sample_generator(self, - reader, - batch_size, - drop_last=True, - places=None): + def set_sample_generator( + self, reader, batch_size, drop_last=True, places=None + ): assert batch_size > 0, "batch_size must be larger than 0" if isinstance(places, (list, tuple)): places = _get_paddle_place_list(places) else: places = _get_paddle_place(places) - self.set_sample_list_generator(paddle.batch(reader, - batch_size=batch_size, - drop_last=drop_last), - places=places) + self.set_sample_list_generator( + paddle.batch(reader, batch_size=batch_size, drop_last=drop_last), + places=places, + ) return self def set_sample_list_generator(self, reader, places=None): @@ -1241,20 +1329,22 @@ class DygraphGeneratorLoader(DataLoaderBase): if places is None: places = _current_expected_place() self._places = _convert_places(places) - assert len(self._places) == 1, \ - "Number of places must be 1 in imperative mode" + assert ( + len(self._places) == 1 + ), "Number of places must be 1 in imperative mode" return self class GeneratorLoader(DataLoaderBase): - - def __init__(self, - feed_list=None, - capacity=None, - use_double_buffer=True, - iterable=True, - return_list=False, - drop_last=True): + def __init__( + self, + feed_list=None, + capacity=None, + use_double_buffer=True, + iterable=True, + return_list=False, + drop_last=True, + ): self._tensor_reader = None self._places = None self._thread = None @@ -1291,14 +1381,20 @@ class GeneratorLoader(DataLoaderBase): v.desc.need_check_feed() for v in self._feed_list ] self._queue = core.init_lod_tensor_blocking_queue( - core.Variable(), self._capacity, self._keep_order) + core.Variable(), self._capacity, self._keep_order + ) self._reader = None - self._reader = core.create_py_reader(self.queue, self._var_names, - self._shapes, self._dtypes, - self._need_check_feed, - self._places, - self._use_double_buffer, - self._drop_last, False) + self._reader = core.create_py_reader( + self.queue, + self._var_names, + self._shapes, + self._dtypes, + self._need_check_feed, + self._places, + self._use_double_buffer, + self._drop_last, + False, + ) def _init_non_iterable(self): lod_levels = [] @@ -1317,13 +1413,15 @@ class GeneratorLoader(DataLoaderBase): need_check_feed.append(int(feed_data.desc.need_check_feed())) queue_name = data_loader_unique_name_generator( - 'lod_tensor_blocking_queue') + 'lod_tensor_blocking_queue' + ) reader_name = data_loader_unique_name_generator('create_py_reader') double_buffer_name = data_loader_unique_name_generator('double_buffer') var = global_scope().var(queue_name) self._queue = core.init_lod_tensor_blocking_queue( - var, self._capacity, self._keep_order) + var, self._capacity, self._keep_order + ) if self._keep_order: block = default_main_program().current_block() @@ -1333,16 +1431,18 @@ class GeneratorLoader(DataLoaderBase): reader_var = block.create_var(name=reader_name) dtype_int = [int(t) for t in dtypes] - block.append_op(type='create_py_reader', - inputs={'blocking_queue': [queue_name]}, - outputs={'Out': [reader_var]}, - attrs={ - 'shape_concat': shape_concat, - 'lod_levels': lod_levels, - 'dtypes': dtype_int, - 'need_check_feed': need_check_feed, - 'ranks': ranks - }) + block.append_op( + type='create_py_reader', + inputs={'blocking_queue': [queue_name]}, + outputs={'Out': [reader_var]}, + attrs={ + 'shape_concat': shape_concat, + 'lod_levels': lod_levels, + 'dtypes': dtype_int, + 'need_check_feed': need_check_feed, + 'ranks': ranks, + }, + ) reader_var.desc.set_dtypes(dtypes) reader_var.persistable = True @@ -1354,7 +1454,8 @@ class GeneratorLoader(DataLoaderBase): reader.reset = self._queue.reset else: main_prog_var = _copy_reader_var_( - default_main_program().current_block(), reader_var) + default_main_program().current_block(), reader_var + ) main_prog_var.stop_gradient = True main_prog_var.persistable = True @@ -1362,8 +1463,9 @@ class GeneratorLoader(DataLoaderBase): reader = monkey_patch_reader_methods(main_prog_var) if self._use_double_buffer: - double_buffer_reader = double_buffer(reader, - name=double_buffer_name) + double_buffer_reader = double_buffer( + reader, name=double_buffer_name + ) # we return a double buffer reader. However, the reset method comes from # py_reader. double_buffer_reader.reset = reader.reset @@ -1375,7 +1477,8 @@ class GeneratorLoader(DataLoaderBase): type='read', inputs={'Reader': [self._reader]}, outputs={'Out': self._feed_list}, - attrs={'drop_last': self._drop_last}) + attrs={'drop_last': self._drop_last}, + ) @property def queue(self): @@ -1387,8 +1490,9 @@ class GeneratorLoader(DataLoaderBase): def __iter__(self): assert self.iterable, "DataLoader is not iterable" - assert self._tensor_reader is not None, \ - "Data source of DataLoader has not set yet" + assert ( + self._tensor_reader is not None + ), "Data source of DataLoader has not set yet" self._init_iterable() self._start() @@ -1409,15 +1513,18 @@ class GeneratorLoader(DataLoaderBase): six.reraise(*sys.exc_info()) def start(self): - assert not self._iterable, "start() cannot be called when DataLoader is iterable" + assert ( + not self._iterable + ), "start() cannot be called when DataLoader is iterable" self._start() def reset(self): - assert not self._iterable, "reset() cannot be called when DataLoader is iterable" + assert ( + not self._iterable + ), "reset() cannot be called when DataLoader is iterable" self._reset() def _start(self): - def __thread_main__(legacy_expected_place): try: # See _DataLoaderIterSingleProcess._thread_loop() for why set expected place here. @@ -1450,8 +1557,9 @@ class GeneratorLoader(DataLoaderBase): logging.warning('Your reader has raised an exception!') six.reraise(*sys.exc_info()) - self._thread = threading.Thread(target=__thread_main__, - args=(_current_expected_place(), )) + self._thread = threading.Thread( + target=__thread_main__, args=(_current_expected_place(),) + ) self._thread.daemon = True self._thread.start() @@ -1465,11 +1573,9 @@ class GeneratorLoader(DataLoaderBase): self._exited = False self._reader.reset() - def set_sample_generator(self, - reader, - batch_size, - drop_last=True, - places=None): + def set_sample_generator( + self, reader, batch_size, drop_last=True, places=None + ): assert batch_size > 0, "batch_size must be larger than 0" if isinstance(places, (list, tuple)): places = _get_paddle_place_list(places) @@ -1482,16 +1588,20 @@ class GeneratorLoader(DataLoaderBase): break if has_lod: - self.set_sample_list_generator(paddle.batch(reader, - batch_size=batch_size, - drop_last=drop_last), - places=places) + self.set_sample_list_generator( + paddle.batch( + reader, batch_size=batch_size, drop_last=drop_last + ), + places=places, + ) else: - reader = BatchedTensorProvider(feed_list=self._feed_list, - place=core.CPUPlace(), - batch_size=batch_size, - generator=reader, - drop_last=drop_last) + reader = BatchedTensorProvider( + feed_list=self._feed_list, + place=core.CPUPlace(), + batch_size=batch_size, + generator=reader, + drop_last=drop_last, + ) self.set_batch_generator(reader, places=places) return self @@ -1501,8 +1611,9 @@ class GeneratorLoader(DataLoaderBase): else: places = _get_paddle_place(places) with program_guard(Program(), Program()): - feeder = DataFeeder(feed_list=self._feed_list, - place=core.CPUPlace()) + feeder = DataFeeder( + feed_list=self._feed_list, place=core.CPUPlace() + ) paddle_reader = feeder.decorate_reader(reader, multi_devices=False) def __tensor_reader_impl__(): @@ -1519,12 +1630,15 @@ class GeneratorLoader(DataLoaderBase): places = _get_paddle_place(places) self._tensor_reader = reader if self._iterable: - assert places is not None, "Places cannot be None when DataLoader is iterable" + assert ( + places is not None + ), "Places cannot be None when DataLoader is iterable" self._places = _convert_places(places) else: if places is not None: logging.info( - 'places would be ommited when DataLoader is not iterable') + 'places would be ommited when DataLoader is not iterable' + ) return self @@ -1696,15 +1810,17 @@ class PyReader(DataLoaderBase): relu = fluid.layers.relu(image) """ - def __init__(self, - feed_list=None, - capacity=None, - use_double_buffer=True, - iterable=True, - return_list=False): - self._loader = DataLoader.from_generator(feed_list, capacity, - use_double_buffer, iterable, - return_list) + def __init__( + self, + feed_list=None, + capacity=None, + use_double_buffer=True, + iterable=True, + return_list=False, + ): + self._loader = DataLoader.from_generator( + feed_list, capacity, use_double_buffer, iterable, return_list + ) @property def queue(self): @@ -1725,8 +1841,8 @@ class PyReader(DataLoaderBase): Start the data feeding thread. Can only call when the reader object is not iterable. - Example: - .. code-block:: python + Example: + .. code-block:: python import paddle import paddle.fluid as fluid @@ -1754,7 +1870,7 @@ class PyReader(DataLoaderBase): reader.reset() break - ''' + ''' self._loader.start() def reset(self): @@ -1794,11 +1910,9 @@ class PyReader(DataLoaderBase): ''' self._loader.reset() - def decorate_sample_generator(self, - sample_generator, - batch_size, - drop_last=True, - places=None): + def decorate_sample_generator( + self, sample_generator, batch_size, drop_last=True, places=None + ): ''' Set the data source of the PyReader object. @@ -1861,8 +1975,9 @@ class PyReader(DataLoaderBase): executor.run(feed=data, fetch_list=[loss]) ''' - self._loader.set_sample_generator(sample_generator, batch_size, - drop_last, places) + self._loader.set_sample_generator( + sample_generator, batch_size, drop_last, places + ) def decorate_sample_list_generator(self, reader, places=None): ''' @@ -1987,11 +2102,12 @@ class PyReader(DataLoaderBase): class DatasetLoader(DataLoaderBase): - def __init__(self, dataset, places, drop_last): - assert isinstance(dataset, paddle.distributed.fleet.dataset.DatasetBase - ), "dataset must be type of DatasetBase" - assert not _non_static_mode( + assert isinstance( + dataset, paddle.distributed.fleet.dataset.DatasetBase + ), "dataset must be type of DatasetBase" + assert ( + not _non_static_mode() ), "DatasetLoader is not supported in dygraph mode yet" if isinstance(places, (list, tuple)): places = _get_paddle_place_list(places) @@ -2000,32 +2116,48 @@ class DatasetLoader(DataLoaderBase): thread_num = len(places) - assert len(dataset.filelist) >= thread_num, \ - "Filelist number of dataset {} must be not less than place number {}".format(len(dataset.filelist), thread_num) + assert ( + len(dataset.filelist) >= thread_num + ), "Filelist number of dataset {} must be not less than place number {}".format( + len(dataset.filelist), thread_num + ) if dataset.thread_num != 0 and dataset.thread_num != thread_num: logging.warn( 'thread_num {} which is set in Dataset is ignored'.format( - dataset.thread_num)) + dataset.thread_num + ) + ) dataset._set_thread(thread_num) - if isinstance(dataset, paddle.distributed.fleet.dataset.InMemoryDataset - ) and dataset.queue_num > thread_num: + if ( + isinstance( + dataset, paddle.distributed.fleet.dataset.InMemoryDataset + ) + and dataset.queue_num > thread_num + ): logging.warn( "queue_num {} which is set in Dataset is ignored".format( - dataset.queue_num)) + dataset.queue_num + ) + ) dataset._set_queue_num(thread_num) self._dataset = dataset use_slots = [ - slot.name for slot in dataset.proto_desc.multi_slot_desc.slots + slot.name + for slot in dataset.proto_desc.multi_slot_desc.slots if slot.is_used ] self._iterable_dataset = core.IterableDatasetWrapper( - dataset.dataset, use_slots, _convert_places(places), - dataset.proto_desc.batch_size, drop_last) + dataset.dataset, + use_slots, + _convert_places(places), + dataset.proto_desc.batch_size, + drop_last, + ) def __iter__(self): self._dataset._finish_to_run() diff --git a/python/paddle/fluid/regularizer.py b/python/paddle/fluid/regularizer.py index ebf3230984eb7eae8a515863c0e8a95af47450a6..00240a1d3f4dd5ee5de37fea8809ce0f70f636a2 100644 --- a/python/paddle/fluid/regularizer.py +++ b/python/paddle/fluid/regularizer.py @@ -37,13 +37,11 @@ class WeightDecayRegularizer(object): pass def __call__(self, param, grad, block): - """Add corresponding weight decay operations to the network - """ + """Add corresponding weight decay operations to the network""" raise NotImplementedError() def __str__(self): - """Debug string - """ + """Debug string""" raise NotImplementedError() @@ -134,21 +132,25 @@ class L2DecayRegularizer(WeightDecayRegularizer): if framework._non_static_mode(): if framework.in_dygraph_mode(): - return _C_ops.scale(param, self._regularization_coeff, 0.0, - True) + return _C_ops.scale( + param, self._regularization_coeff, 0.0, True + ) else: - return _legacy_C_ops.scale(param, "scale", - self._regularization_coeff) + return _legacy_C_ops.scale( + param, "scale", self._regularization_coeff + ) else: - decay = block.create_var(dtype=param.dtype, - shape=param.shape, - lod_level=param.lod_level) + decay = block.create_var( + dtype=param.dtype, shape=param.shape, lod_level=param.lod_level + ) # Append Op to calculate decay - block.append_op(type='scale', - inputs={"X": param}, - outputs={"Out": decay}, - attrs={"scale": self._regularization_coeff}) + block.append_op( + type='scale', + inputs={"X": param}, + outputs={"Out": decay}, + attrs={"scale": self._regularization_coeff}, + ) return decay @@ -245,12 +247,12 @@ class L1DecayRegularizer(WeightDecayRegularizer): sign = block.create_var(dtype=param.dtype, shape=param.shape) decay = block.create_var(dtype=param.dtype, shape=param.shape) else: - sign = block.create_var(dtype=param.dtype, - shape=param.shape, - lod_level=param.lod_level) - decay = block.create_var(dtype=param.dtype, - shape=param.shape, - lod_level=param.lod_level) + sign = block.create_var( + dtype=param.dtype, shape=param.shape, lod_level=param.lod_level + ) + decay = block.create_var( + dtype=param.dtype, shape=param.shape, lod_level=param.lod_level + ) if in_dygraph_mode(): sign = _C_ops.sign(param) return _C_ops.scale(sign, self._regularization_coeff, 0.0, True) @@ -259,10 +261,12 @@ class L1DecayRegularizer(WeightDecayRegularizer): block.append_op(type='sign', inputs={"X": param}, outputs={"Out": sign}) # Append scale op to the output of sign op - block.append_op(type='scale', - inputs={"X": sign}, - outputs={"Out": decay}, - attrs={"scale": self._regularization_coeff}) + block.append_op( + type='scale', + inputs={"X": sign}, + outputs={"Out": decay}, + attrs={"scale": self._regularization_coeff}, + ) return decay diff --git a/python/paddle/fluid/tests/book/notest_understand_sentiment.py b/python/paddle/fluid/tests/book/notest_understand_sentiment.py index f8e49fafac4d91bcde8df3aa33e185c9455b0c66..02a17b9f92c99c73e58c17c8ca3638f638379f84 100644 --- a/python/paddle/fluid/tests/book/notest_understand_sentiment.py +++ b/python/paddle/fluid/tests/book/notest_understand_sentiment.py @@ -22,43 +22,41 @@ import sys import os -def convolution_net(data, - label, - input_dim, - class_dim=2, - emb_dim=32, - hid_dim=32): - emb = fluid.layers.embedding(input=data, - size=[input_dim, emb_dim], - is_sparse=True) - conv_3 = fluid.nets.sequence_conv_pool(input=emb, - num_filters=hid_dim, - filter_size=3, - act="tanh", - pool_type="sqrt") - conv_4 = fluid.nets.sequence_conv_pool(input=emb, - num_filters=hid_dim, - filter_size=4, - act="tanh", - pool_type="sqrt") - prediction = fluid.layers.fc(input=[conv_3, conv_4], - size=class_dim, - act="softmax") +def convolution_net( + data, label, input_dim, class_dim=2, emb_dim=32, hid_dim=32 +): + emb = fluid.layers.embedding( + input=data, size=[input_dim, emb_dim], is_sparse=True + ) + conv_3 = fluid.nets.sequence_conv_pool( + input=emb, + num_filters=hid_dim, + filter_size=3, + act="tanh", + pool_type="sqrt", + ) + conv_4 = fluid.nets.sequence_conv_pool( + input=emb, + num_filters=hid_dim, + filter_size=4, + act="tanh", + pool_type="sqrt", + ) + prediction = fluid.layers.fc( + input=[conv_3, conv_4], size=class_dim, act="softmax" + ) cost = fluid.layers.cross_entropy(input=prediction, label=label) avg_cost = paddle.mean(cost) accuracy = fluid.layers.accuracy(input=prediction, label=label) return avg_cost, accuracy, prediction -def dyn_rnn_lstm(data, - label, - input_dim, - class_dim=2, - emb_dim=32, - lstm_size=128): - emb = fluid.layers.embedding(input=data, - size=[input_dim, emb_dim], - is_sparse=True) +def dyn_rnn_lstm( + data, label, input_dim, class_dim=2, emb_dim=32, lstm_size=128 +): + emb = fluid.layers.embedding( + input=data, size=[input_dim, emb_dim], is_sparse=True + ) sentence = fluid.layers.fc(input=emb, size=lstm_size, act='tanh') rnn = fluid.layers.DynamicRNN() @@ -73,13 +71,17 @@ def dyn_rnn_lstm(data, return gate0 + gate1 forget_gate = fluid.layers.sigmoid( - x=gate_common(word, prev_hidden, lstm_size)) + x=gate_common(word, prev_hidden, lstm_size) + ) input_gate = fluid.layers.sigmoid( - x=gate_common(word, prev_hidden, lstm_size)) + x=gate_common(word, prev_hidden, lstm_size) + ) output_gate = fluid.layers.sigmoid( - x=gate_common(word, prev_hidden, lstm_size)) + x=gate_common(word, prev_hidden, lstm_size) + ) cell_gate = fluid.layers.sigmoid( - x=gate_common(word, prev_hidden, lstm_size)) + x=gate_common(word, prev_hidden, lstm_size) + ) cell = forget_gate * prev_cell + input_gate * cell_gate hidden = output_gate * fluid.layers.tanh(x=cell) @@ -95,18 +97,14 @@ def dyn_rnn_lstm(data, return avg_cost, accuracy, prediction -def stacked_lstm_net(data, - label, - input_dim, - class_dim=2, - emb_dim=128, - hid_dim=512, - stacked_num=3): +def stacked_lstm_net( + data, label, input_dim, class_dim=2, emb_dim=128, hid_dim=512, stacked_num=3 +): assert stacked_num % 2 == 1 - emb = fluid.layers.embedding(input=data, - size=[input_dim, emb_dim], - is_sparse=True) + emb = fluid.layers.embedding( + input=data, size=[input_dim, emb_dim], is_sparse=True + ) # add bias attr # TODO(qijun) linear act @@ -117,54 +115,57 @@ def stacked_lstm_net(data, for i in range(2, stacked_num + 1): fc = fluid.layers.fc(input=inputs, size=hid_dim) - lstm, cell = fluid.layers.dynamic_lstm(input=fc, - size=hid_dim, - is_reverse=(i % 2) == 0) + lstm, cell = fluid.layers.dynamic_lstm( + input=fc, size=hid_dim, is_reverse=(i % 2) == 0 + ) inputs = [fc, lstm] fc_last = fluid.layers.sequence_pool(input=inputs[0], pool_type='max') lstm_last = fluid.layers.sequence_pool(input=inputs[1], pool_type='max') - prediction = fluid.layers.fc(input=[fc_last, lstm_last], - size=class_dim, - act='softmax') + prediction = fluid.layers.fc( + input=[fc_last, lstm_last], size=class_dim, act='softmax' + ) cost = fluid.layers.cross_entropy(input=prediction, label=label) avg_cost = paddle.mean(cost) accuracy = fluid.layers.accuracy(input=prediction, label=label) return avg_cost, accuracy, prediction -def train(word_dict, - net_method, - use_cuda, - parallel=False, - save_dirname=None, - is_local=True): +def train( + word_dict, + net_method, + use_cuda, + parallel=False, + save_dirname=None, + is_local=True, +): BATCH_SIZE = 128 PASS_NUM = 5 dict_dim = len(word_dict) class_dim = 2 - data = fluid.layers.data(name="words", - shape=[1], - dtype="int64", - lod_level=1) + data = fluid.layers.data( + name="words", shape=[1], dtype="int64", lod_level=1 + ) label = fluid.layers.data(name="label", shape=[1], dtype="int64") if not parallel: - cost, acc_out, prediction = net_method(data, - label, - input_dim=dict_dim, - class_dim=class_dim) + cost, acc_out, prediction = net_method( + data, label, input_dim=dict_dim, class_dim=class_dim + ) else: raise NotImplementedError() adagrad = fluid.optimizer.Adagrad(learning_rate=0.002) adagrad.minimize(cost) - train_data = paddle.batch(paddle.reader.shuffle( - paddle.dataset.imdb.train(word_dict), buf_size=1000), - batch_size=BATCH_SIZE) + train_data = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.imdb.train(word_dict), buf_size=1000 + ), + batch_size=BATCH_SIZE, + ) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) feeder = fluid.DataFeeder(feed_list=[data, label], place=place) @@ -174,19 +175,23 @@ def train(word_dict, for pass_id in range(PASS_NUM): for data in train_data(): - cost_val, acc_val = exe.run(main_program, - feed=feeder.feed(data), - fetch_list=[cost, acc_out]) + cost_val, acc_val = exe.run( + main_program, + feed=feeder.feed(data), + fetch_list=[cost, acc_out], + ) print("cost=" + str(cost_val) + " acc=" + str(acc_val)) if cost_val < 0.4 and acc_val > 0.8: if save_dirname is not None: - fluid.io.save_inference_model(save_dirname, ["words"], - prediction, exe) + fluid.io.save_inference_model( + save_dirname, ["words"], prediction, exe + ) return if math.isnan(float(cost_val)): sys.exit("got NaN loss, training failed.") - raise AssertionError("Cost is too large for {0}".format( - net_method.__name__)) + raise AssertionError( + "Cost is too large for {0}".format(net_method.__name__) + ) if is_local: train_loop(fluid.default_main_program()) @@ -205,8 +210,9 @@ def train(word_dict, t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": pserver_prog = t.get_pserver_program(current_endpoint) - pserver_startup = t.get_startup_program(current_endpoint, - pserver_prog) + pserver_startup = t.get_startup_program( + current_endpoint, pserver_prog + ) exe.run(pserver_startup) exe.run(pserver_prog) elif training_role == "TRAINER": @@ -226,8 +232,11 @@ def infer(word_dict, use_cuda, save_dirname=None): # the feed_target_names (the names of variables that will be fed # data using feed operators), and the fetch_targets (variables that # we want to obtain data from using fetch operators). - [inference_program, feed_target_names, - fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = fluid.io.load_inference_model(save_dirname, exe) word_dict_len = len(word_dict) @@ -244,19 +253,19 @@ def infer(word_dict, use_cuda, save_dirname=None): recursive_seq_lens = [[3, 4, 2]] base_shape = [1] # The range of random integers is [low, high] - tensor_words = fluid.create_random_int_lodtensor(recursive_seq_lens, - base_shape, - place, - low=0, - high=word_dict_len - 1) + tensor_words = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=word_dict_len - 1 + ) # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. assert feed_target_names[0] == "words" - results = exe.run(inference_program, - feed={feed_target_names[0]: tensor_words}, - fetch_list=fetch_targets, - return_numpy=False) + results = exe.run( + inference_program, + feed={feed_target_names[0]: tensor_words}, + fetch_list=fetch_targets, + return_numpy=False, + ) print(results[0].recursive_sequence_lengths()) np_data = np.array(results[0]) print("Inference Shape: ", np_data.shape) @@ -267,16 +276,17 @@ def main(word_dict, net_method, use_cuda, parallel=False, save_dirname=None): if use_cuda and not fluid.core.is_compiled_with_cuda(): return - train(word_dict, - net_method, - use_cuda, - parallel=parallel, - save_dirname=save_dirname) + train( + word_dict, + net_method, + use_cuda, + parallel=parallel, + save_dirname=save_dirname, + ) infer(word_dict, use_cuda, save_dirname) class TestUnderstandSentiment(unittest.TestCase): - @classmethod def setUpClass(cls): cls.word_dict = paddle.dataset.imdb.word_dict() @@ -292,17 +302,21 @@ class TestUnderstandSentiment(unittest.TestCase): def test_conv_cpu(self): with self.new_program_scope(): - main(self.word_dict, - net_method=convolution_net, - use_cuda=False, - save_dirname="understand_sentiment_conv.inference.model") + main( + self.word_dict, + net_method=convolution_net, + use_cuda=False, + save_dirname="understand_sentiment_conv.inference.model", + ) def test_conv_cpu_parallel(self): with self.new_program_scope(): - main(self.word_dict, - net_method=convolution_net, - use_cuda=False, - parallel=True) + main( + self.word_dict, + net_method=convolution_net, + use_cuda=False, + parallel=True, + ) @unittest.skip(reason="make CI faster") def test_stacked_lstm_cpu(self): @@ -311,29 +325,35 @@ class TestUnderstandSentiment(unittest.TestCase): self.word_dict, net_method=stacked_lstm_net, use_cuda=False, - save_dirname="understand_sentiment_stacked_lstm.inference.model" + save_dirname="understand_sentiment_stacked_lstm.inference.model", ) def test_stacked_lstm_cpu_parallel(self): with self.new_program_scope(): - main(self.word_dict, - net_method=stacked_lstm_net, - use_cuda=False, - parallel=True) + main( + self.word_dict, + net_method=stacked_lstm_net, + use_cuda=False, + parallel=True, + ) def test_conv_gpu(self): with self.new_program_scope(): - main(self.word_dict, - net_method=convolution_net, - use_cuda=True, - save_dirname="understand_sentiment_conv.inference.model") + main( + self.word_dict, + net_method=convolution_net, + use_cuda=True, + save_dirname="understand_sentiment_conv.inference.model", + ) def test_conv_gpu_parallel(self): with self.new_program_scope(): - main(self.word_dict, - net_method=convolution_net, - use_cuda=True, - parallel=True) + main( + self.word_dict, + net_method=convolution_net, + use_cuda=True, + parallel=True, + ) @unittest.skip(reason="make CI faster") def test_stacked_lstm_gpu(self): @@ -342,30 +362,36 @@ class TestUnderstandSentiment(unittest.TestCase): self.word_dict, net_method=stacked_lstm_net, use_cuda=True, - save_dirname="understand_sentiment_stacked_lstm.inference.model" + save_dirname="understand_sentiment_stacked_lstm.inference.model", ) def test_stacked_lstm_gpu_parallel(self): with self.new_program_scope(): - main(self.word_dict, - net_method=stacked_lstm_net, - use_cuda=True, - parallel=True) + main( + self.word_dict, + net_method=stacked_lstm_net, + use_cuda=True, + parallel=True, + ) @unittest.skip(reason='make CI faster') def test_dynrnn_lstm_gpu(self): with self.new_program_scope(): - main(self.word_dict, - net_method=dyn_rnn_lstm, - use_cuda=True, - parallel=False) + main( + self.word_dict, + net_method=dyn_rnn_lstm, + use_cuda=True, + parallel=False, + ) def test_dynrnn_lstm_gpu_parallel(self): with self.new_program_scope(): - main(self.word_dict, - net_method=dyn_rnn_lstm, - use_cuda=True, - parallel=True) + main( + self.word_dict, + net_method=dyn_rnn_lstm, + use_cuda=True, + parallel=True, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/book/test_fit_a_line.py b/python/paddle/fluid/tests/book/test_fit_a_line.py index 788e8d12d836a76656d1c9c9b100c163c5abbb13..338008c48d576be00e68aeb5aea7428990f98acd 100644 --- a/python/paddle/fluid/tests/book/test_fit_a_line.py +++ b/python/paddle/fluid/tests/book/test_fit_a_line.py @@ -32,7 +32,8 @@ def convert_uint16_to_float(in_list): in_list = numpy.asarray(in_list) out = numpy.vectorize( lambda x: struct.unpack('> 16)) + numpy.uint16(struct.unpack('> 16) + ) out = numpy.reshape(out, in_list.shape).view(numpy.uint16) return out @@ -73,15 +75,18 @@ def train(use_cuda, save_dirname, is_local, use_bf16, pure_bf16): sgd_optimizer, amp_lists=amp.bf16.AutoMixedPrecisionListsBF16(), use_bf16_guard=False, - use_pure_bf16=pure_bf16) - sgd_optimizer.minimize(avg_cost, - startup_program=fluid.default_startup_program()) + use_pure_bf16=pure_bf16, + ) + sgd_optimizer.minimize( + avg_cost, startup_program=fluid.default_startup_program() + ) BATCH_SIZE = 20 - train_reader = paddle.batch(paddle.reader.shuffle( - paddle.dataset.uci_housing.train(), buf_size=500), - batch_size=BATCH_SIZE) + train_reader = paddle.batch( + paddle.reader.shuffle(paddle.dataset.uci_housing.train(), buf_size=500), + batch_size=BATCH_SIZE, + ) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) @@ -91,29 +96,33 @@ def train(use_cuda, save_dirname, is_local, use_bf16, pure_bf16): exe.run(fluid.default_startup_program()) test_prog = main_program.clone(for_test=True) if pure_bf16: - sgd_optimizer.amp_init(exe.place, - test_program=test_prog, - use_bf16_test=True) + sgd_optimizer.amp_init( + exe.place, test_program=test_prog, use_bf16_test=True + ) PASS_NUM = 100 for pass_id in range(PASS_NUM): for data in train_reader(): - avg_loss_value, = exe.run(main_program, - feed=feeder.feed(data), - fetch_list=[avg_cost]) + (avg_loss_value,) = exe.run( + main_program, feed=feeder.feed(data), fetch_list=[avg_cost] + ) if avg_loss_value.dtype == numpy.uint16: avg_loss_value = convert_uint16_to_float(avg_loss_value) if avg_loss_value[0] < 10.0: if save_dirname is not None: - paddle.static.save_inference_model(save_dirname, [x], - [y_predict], - exe, - clip_extra=False) + paddle.static.save_inference_model( + save_dirname, + [x], + [y_predict], + exe, + clip_extra=False, + ) return if math.isnan(float(avg_loss_value)): sys.exit("got NaN loss, training failed.") - raise AssertionError("Fit a line cost is too large, {0:2.2}".format( - avg_loss_value[0])) + raise AssertionError( + "Fit a line cost is too large, {0:2.2}".format(avg_loss_value[0]) + ) if is_local: train_loop(fluid.default_main_program()) @@ -132,8 +141,9 @@ def train(use_cuda, save_dirname, is_local, use_bf16, pure_bf16): t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": pserver_prog = t.get_pserver_program(current_endpoint) - pserver_startup = t.get_startup_program(current_endpoint, - pserver_prog) + pserver_startup = t.get_startup_program( + current_endpoint, pserver_prog + ) exe.run(pserver_startup) exe.run(pserver_prog) elif training_role == "TRAINER": @@ -153,30 +163,38 @@ def infer(use_cuda, save_dirname=None, use_bf16=False): # the feed_target_names (the names of variables that will be fed # data using feed operators), and the fetch_targets (variables that # we want to obtain data from using fetch operators). - [inference_program, feed_target_names, - fetch_targets] = paddle.static.load_inference_model(save_dirname, exe) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = paddle.static.load_inference_model(save_dirname, exe) # The input's dimension should be 2-D and the second dim is 13 # The input data should be >= 0 batch_size = 10 - test_reader = paddle.batch(paddle.dataset.uci_housing.test(), - batch_size=batch_size) + test_reader = paddle.batch( + paddle.dataset.uci_housing.test(), batch_size=batch_size + ) test_data = next(test_reader()) - test_feat = numpy.array([data[0] - for data in test_data]).astype("float32") + test_feat = numpy.array([data[0] for data in test_data]).astype( + "float32" + ) if use_bf16: test_feat = convert_float_to_uint16(test_feat) - test_label = numpy.array([data[1] - for data in test_data]).astype("float32") + test_label = numpy.array([data[1] for data in test_data]).astype( + "float32" + ) assert feed_target_names[0] == 'x' - results = exe.run(inference_program, - feed={feed_target_names[0]: numpy.array(test_feat)}, - fetch_list=fetch_targets) + results = exe.run( + inference_program, + feed={feed_target_names[0]: numpy.array(test_feat)}, + fetch_list=fetch_targets, + ) if results[0].dtype == numpy.uint16: results[0] = convert_uint16_to_float(results[0]) print("infer shape: ", results[0].shape) @@ -201,7 +219,6 @@ def main(use_cuda, is_local=True, use_bf16=False, pure_bf16=False): class TestFitALineBase(unittest.TestCase): - @contextlib.contextmanager def program_scope_guard(self): prog = fluid.Program() @@ -213,7 +230,6 @@ class TestFitALineBase(unittest.TestCase): class TestFitALine(TestFitALineBase): - def test_cpu(self): with self.program_scope_guard(): main(use_cuda=False) @@ -223,10 +239,10 @@ class TestFitALine(TestFitALineBase): main(use_cuda=True) -@unittest.skipIf(not fluid.core.supports_bfloat16(), - "place does not support BF16 evaluation") +@unittest.skipIf( + not fluid.core.supports_bfloat16(), "place does not support BF16 evaluation" +) class TestFitALineBF16(TestFitALineBase): - def test_bf16(self): with self.program_scope_guard(): main(use_cuda=False, use_bf16=True) diff --git a/python/paddle/fluid/tests/book/test_image_classification.py b/python/paddle/fluid/tests/book/test_image_classification.py index c8a4861370a65fd4f689d2f19a9e245a1e9d9d60..ac4dbaec22682f9f79d589c8ee78b8eab3fe3c16 100644 --- a/python/paddle/fluid/tests/book/test_image_classification.py +++ b/python/paddle/fluid/tests/book/test_image_classification.py @@ -26,21 +26,18 @@ paddle.enable_static() def resnet_cifar10(input, depth=32): - - def conv_bn_layer(input, - ch_out, - filter_size, - stride, - padding, - act='relu', - bias_attr=False): - tmp = fluid.layers.conv2d(input=input, - filter_size=filter_size, - num_filters=ch_out, - stride=stride, - padding=padding, - act=None, - bias_attr=bias_attr) + def conv_bn_layer( + input, ch_out, filter_size, stride, padding, act='relu', bias_attr=False + ): + tmp = fluid.layers.conv2d( + input=input, + filter_size=filter_size, + num_filters=ch_out, + stride=stride, + padding=padding, + act=None, + bias_attr=bias_attr, + ) return fluid.layers.batch_norm(input=tmp, act=act) def shortcut(input, ch_in, ch_out, stride): @@ -63,33 +60,31 @@ def resnet_cifar10(input, depth=32): assert (depth - 2) % 6 == 0 n = (depth - 2) // 6 - conv1 = conv_bn_layer(input=input, - ch_out=16, - filter_size=3, - stride=1, - padding=1) + conv1 = conv_bn_layer( + input=input, ch_out=16, filter_size=3, stride=1, padding=1 + ) res1 = layer_warp(basicblock, conv1, 16, 16, n, 1) res2 = layer_warp(basicblock, res1, 16, 32, n, 2) res3 = layer_warp(basicblock, res2, 32, 64, n, 2) - pool = fluid.layers.pool2d(input=res3, - pool_size=8, - pool_type='avg', - pool_stride=1) + pool = fluid.layers.pool2d( + input=res3, pool_size=8, pool_type='avg', pool_stride=1 + ) return pool def vgg16_bn_drop(input): - def conv_block(input, num_filter, groups, dropouts): - return fluid.nets.img_conv_group(input=input, - pool_size=2, - pool_stride=2, - conv_num_filter=[num_filter] * groups, - conv_filter_size=3, - conv_act='relu', - conv_with_batchnorm=True, - conv_batchnorm_drop_rate=dropouts, - pool_type='max') + return fluid.nets.img_conv_group( + input=input, + pool_size=2, + pool_stride=2, + conv_num_filter=[num_filter] * groups, + conv_filter_size=3, + conv_act='relu', + conv_with_batchnorm=True, + conv_batchnorm_drop_rate=dropouts, + pool_type='max', + ) conv1 = conv_block(input, 64, 2, [0.3, 0]) conv2 = conv_block(conv1, 128, 2, [0.4, 0]) @@ -135,12 +130,16 @@ def train(net_type, use_cuda, save_dirname, is_local): BATCH_SIZE = 128 PASS_NUM = 1 - train_reader = paddle.batch(paddle.reader.shuffle( - paddle.dataset.cifar.train10(), buf_size=128 * 10), - batch_size=BATCH_SIZE) + train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.cifar.train10(), buf_size=128 * 10 + ), + batch_size=BATCH_SIZE, + ) - test_reader = paddle.batch(paddle.dataset.cifar.test10(), - batch_size=BATCH_SIZE) + test_reader = paddle.batch( + paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE + ) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) @@ -157,9 +156,11 @@ def train(net_type, use_cuda, save_dirname, is_local): acc_list = [] avg_loss_list = [] for tid, test_data in enumerate(test_reader()): - loss_t, acc_t = exe.run(program=test_program, - feed=feeder.feed(test_data), - fetch_list=[avg_cost, acc]) + loss_t, acc_t = exe.run( + program=test_program, + feed=feeder.feed(test_data), + fetch_list=[avg_cost, acc], + ) if math.isnan(float(loss_t)): sys.exit("got NaN loss, training failed.") acc_list.append(float(acc_t)) @@ -170,13 +171,18 @@ def train(net_type, use_cuda, save_dirname, is_local): avg_loss_value = numpy.array(avg_loss_list).mean() print( - 'PassID {0:1}, BatchID {1:04}, Test Loss {2:2.2}, Acc {3:2.2}' - .format(pass_id, batch_id + 1, float(avg_loss_value), - float(acc_value))) + 'PassID {0:1}, BatchID {1:04}, Test Loss {2:2.2}, Acc {3:2.2}'.format( + pass_id, + batch_id + 1, + float(avg_loss_value), + float(acc_value), + ) + ) if acc_value > 0.01: # Low threshold for speeding up CI - fluid.io.save_inference_model(save_dirname, ["pixel"], - [predict], exe) + fluid.io.save_inference_model( + save_dirname, ["pixel"], [predict], exe + ) return if is_local: @@ -196,8 +202,9 @@ def train(net_type, use_cuda, save_dirname, is_local): t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": pserver_prog = t.get_pserver_program(current_endpoint) - pserver_startup = t.get_startup_program(current_endpoint, - pserver_prog) + pserver_startup = t.get_startup_program( + current_endpoint, pserver_prog + ) exe.run(pserver_startup) exe.run(pserver_prog) elif training_role == "TRAINER": @@ -217,8 +224,11 @@ def infer(use_cuda, save_dirname=None): # the feed_target_names (the names of variables that will be fed # data using feed operators), and the fetch_targets (variables that # we want to obtain data from using fetch operators). - [inference_program, feed_target_names, - fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = fluid.io.load_inference_model(save_dirname, exe) # The input's dimension of conv should be 4-D or 5-D. # Use normilized image pixels as input data, which should be in the range [0, 1.0]. @@ -227,14 +237,21 @@ def infer(use_cuda, save_dirname=None): # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. - results = exe.run(inference_program, - feed={feed_target_names[0]: tensor_img}, - fetch_list=fetch_targets) + results = exe.run( + inference_program, + feed={feed_target_names[0]: tensor_img}, + fetch_list=fetch_targets, + ) print("infer results: ", results[0]) - fluid.io.save_inference_model(save_dirname, feed_target_names, - fetch_targets, exe, inference_program) + fluid.io.save_inference_model( + save_dirname, + feed_target_names, + fetch_targets, + exe, + inference_program, + ) def main(net_type, use_cuda, is_local=True): @@ -244,7 +261,8 @@ def main(net_type, use_cuda, is_local=True): # Directory for saving the trained model temp_dir = tempfile.TemporaryDirectory() save_dirname = os.path.join( - temp_dir.name, "image_classification_" + net_type + ".inference.model") + temp_dir.name, "image_classification_" + net_type + ".inference.model" + ) train(net_type, use_cuda, save_dirname, is_local) infer(use_cuda, save_dirname) @@ -252,7 +270,6 @@ def main(net_type, use_cuda, is_local=True): class TestImageClassification(unittest.TestCase): - def test_vgg_cuda(self): with self.scope_prog_guard(): main('vgg', use_cuda=True) diff --git a/python/paddle/fluid/tests/book/test_label_semantic_roles.py b/python/paddle/fluid/tests/book/test_label_semantic_roles.py index 16e4a2827779d14562dea9829d806cc3787c586a..69585b64c895145e7598a6d913ccdee415ba433a 100644 --- a/python/paddle/fluid/tests/book/test_label_semantic_roles.py +++ b/python/paddle/fluid/tests/book/test_label_semantic_roles.py @@ -50,26 +50,32 @@ def load_parameter(file_name, h, w): return np.fromfile(f, dtype=np.float32).reshape(h, w) -def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark, - **ignored): +def db_lstm( + word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark, **ignored +): # 8 features - predicate_embedding = fluid.layers.embedding(input=predicate, - size=[pred_dict_len, word_dim], - dtype='float32', - is_sparse=IS_SPARSE, - param_attr='vemb') - - mark_embedding = fluid.layers.embedding(input=mark, - size=[mark_dict_len, mark_dim], - dtype='float32', - is_sparse=IS_SPARSE) + predicate_embedding = fluid.layers.embedding( + input=predicate, + size=[pred_dict_len, word_dim], + dtype='float32', + is_sparse=IS_SPARSE, + param_attr='vemb', + ) + + mark_embedding = fluid.layers.embedding( + input=mark, + size=[mark_dict_len, mark_dim], + dtype='float32', + is_sparse=IS_SPARSE, + ) word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2] emb_layers = [ - fluid.layers.embedding(size=[word_dict_len, word_dim], - input=x, - param_attr=fluid.ParamAttr(name=embedding_name, - trainable=False)) + fluid.layers.embedding( + size=[word_dict_len, word_dim], + input=x, + param_attr=fluid.ParamAttr(name=embedding_name, trainable=False), + ) for x in word_input ] emb_layers.append(predicate_embedding) @@ -81,141 +87,172 @@ def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark, hidden_0 = fluid.layers.sums(input=hidden_0_layers) - lstm_0 = fluid.layers.dynamic_lstm(input=hidden_0, - size=hidden_dim, - candidate_activation='relu', - gate_activation='sigmoid', - cell_activation='sigmoid') + lstm_0 = fluid.layers.dynamic_lstm( + input=hidden_0, + size=hidden_dim, + candidate_activation='relu', + gate_activation='sigmoid', + cell_activation='sigmoid', + ) # stack L-LSTM and R-LSTM with direct edges input_tmp = [hidden_0, lstm_0] for i in range(1, depth): - mix_hidden = fluid.layers.sums(input=[ - fluid.layers.fc(input=input_tmp[0], size=hidden_dim), - fluid.layers.fc(input=input_tmp[1], size=hidden_dim) - ]) - - lstm = fluid.layers.dynamic_lstm(input=mix_hidden, - size=hidden_dim, - candidate_activation='relu', - gate_activation='sigmoid', - cell_activation='sigmoid', - is_reverse=((i % 2) == 1)) + mix_hidden = fluid.layers.sums( + input=[ + fluid.layers.fc(input=input_tmp[0], size=hidden_dim), + fluid.layers.fc(input=input_tmp[1], size=hidden_dim), + ] + ) + + lstm = fluid.layers.dynamic_lstm( + input=mix_hidden, + size=hidden_dim, + candidate_activation='relu', + gate_activation='sigmoid', + cell_activation='sigmoid', + is_reverse=((i % 2) == 1), + ) input_tmp = [mix_hidden, lstm] - feature_out = fluid.layers.sums(input=[ - fluid.layers.fc(input=input_tmp[0], size=label_dict_len, act='tanh'), - fluid.layers.fc(input=input_tmp[1], size=label_dict_len, act='tanh') - ]) + feature_out = fluid.layers.sums( + input=[ + fluid.layers.fc( + input=input_tmp[0], size=label_dict_len, act='tanh' + ), + fluid.layers.fc( + input=input_tmp[1], size=label_dict_len, act='tanh' + ), + ] + ) return feature_out def train(use_cuda, save_dirname=None, is_local=True): # define network topology - word = fluid.layers.data(name='word_data', - shape=[1], - dtype='int64', - lod_level=1) - predicate = fluid.layers.data(name='verb_data', - shape=[1], - dtype='int64', - lod_level=1) - ctx_n2 = fluid.layers.data(name='ctx_n2_data', - shape=[1], - dtype='int64', - lod_level=1) - ctx_n1 = fluid.layers.data(name='ctx_n1_data', - shape=[1], - dtype='int64', - lod_level=1) - ctx_0 = fluid.layers.data(name='ctx_0_data', - shape=[1], - dtype='int64', - lod_level=1) - ctx_p1 = fluid.layers.data(name='ctx_p1_data', - shape=[1], - dtype='int64', - lod_level=1) - ctx_p2 = fluid.layers.data(name='ctx_p2_data', - shape=[1], - dtype='int64', - lod_level=1) - mark = fluid.layers.data(name='mark_data', - shape=[1], - dtype='int64', - lod_level=1) + word = fluid.layers.data( + name='word_data', shape=[1], dtype='int64', lod_level=1 + ) + predicate = fluid.layers.data( + name='verb_data', shape=[1], dtype='int64', lod_level=1 + ) + ctx_n2 = fluid.layers.data( + name='ctx_n2_data', shape=[1], dtype='int64', lod_level=1 + ) + ctx_n1 = fluid.layers.data( + name='ctx_n1_data', shape=[1], dtype='int64', lod_level=1 + ) + ctx_0 = fluid.layers.data( + name='ctx_0_data', shape=[1], dtype='int64', lod_level=1 + ) + ctx_p1 = fluid.layers.data( + name='ctx_p1_data', shape=[1], dtype='int64', lod_level=1 + ) + ctx_p2 = fluid.layers.data( + name='ctx_p2_data', shape=[1], dtype='int64', lod_level=1 + ) + mark = fluid.layers.data( + name='mark_data', shape=[1], dtype='int64', lod_level=1 + ) feature_out = db_lstm(**locals()) - target = fluid.layers.data(name='target', - shape=[1], - dtype='int64', - lod_level=1) - crf_cost = fluid.layers.linear_chain_crf(input=feature_out, - label=target, - param_attr=fluid.ParamAttr( - name='crfw', - learning_rate=mix_hidden_lr)) + target = fluid.layers.data( + name='target', shape=[1], dtype='int64', lod_level=1 + ) + crf_cost = fluid.layers.linear_chain_crf( + input=feature_out, + label=target, + param_attr=fluid.ParamAttr(name='crfw', learning_rate=mix_hidden_lr), + ) avg_cost = paddle.mean(crf_cost) # TODO(qiao) # check other optimizers and check why out will be NAN sgd_optimizer = fluid.optimizer.SGD( - learning_rate=fluid.layers.exponential_decay(learning_rate=0.01, - decay_steps=100000, - decay_rate=0.5, - staircase=True)) + learning_rate=fluid.layers.exponential_decay( + learning_rate=0.01, + decay_steps=100000, + decay_rate=0.5, + staircase=True, + ) + ) sgd_optimizer.minimize(avg_cost) # TODO(qiao) # add dependency track and move this config before optimizer crf_decode = fluid.layers.crf_decoding( - input=feature_out, param_attr=fluid.ParamAttr(name='crfw')) + input=feature_out, param_attr=fluid.ParamAttr(name='crfw') + ) - train_data = paddle.batch(paddle.reader.shuffle( - paddle.dataset.conll05.test(), buf_size=8192), - batch_size=BATCH_SIZE) + train_data = paddle.batch( + paddle.reader.shuffle(paddle.dataset.conll05.test(), buf_size=8192), + batch_size=BATCH_SIZE, + ) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - feeder = fluid.DataFeeder(feed_list=[ - word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, predicate, mark, target - ], - place=place) + feeder = fluid.DataFeeder( + feed_list=[ + word, + ctx_n2, + ctx_n1, + ctx_0, + ctx_p1, + ctx_p2, + predicate, + mark, + target, + ], + place=place, + ) exe = fluid.Executor(place) def train_loop(main_program): exe.run(fluid.default_startup_program()) - embedding_param = fluid.global_scope().find_var( - embedding_name).get_tensor() + embedding_param = ( + fluid.global_scope().find_var(embedding_name).get_tensor() + ) embedding_param.set( load_parameter(conll05.get_embedding(), word_dict_len, word_dim), - place) + place, + ) start_time = time.time() batch_id = 0 for pass_id in range(PASS_NUM): for data in train_data(): - cost = exe.run(main_program, - feed=feeder.feed(data), - fetch_list=[avg_cost]) + cost = exe.run( + main_program, feed=feeder.feed(data), fetch_list=[avg_cost] + ) cost = cost[0] if batch_id % 10 == 0: print("avg_cost:" + str(cost)) if batch_id != 0: - print("second per batch: " + - str((time.time() - start_time) / batch_id)) + print( + "second per batch: " + + str((time.time() - start_time) / batch_id) + ) # Set the threshold low to speed up the CI test if float(cost) < 80.0: if save_dirname is not None: # TODO(liuyiqun): Change the target to crf_decode fluid.io.save_inference_model( - save_dirname, [ - 'word_data', 'verb_data', 'ctx_n2_data', - 'ctx_n1_data', 'ctx_0_data', 'ctx_p1_data', - 'ctx_p2_data', 'mark_data' - ], [feature_out], exe) + save_dirname, + [ + 'word_data', + 'verb_data', + 'ctx_n2_data', + 'ctx_n1_data', + 'ctx_0_data', + 'ctx_p1_data', + 'ctx_p2_data', + 'mark_data', + ], + [feature_out], + exe, + ) return batch_id = batch_id + 1 @@ -241,8 +278,9 @@ def train(use_cuda, save_dirname=None, is_local=True): t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": pserver_prog = t.get_pserver_program(current_endpoint) - pserver_startup = t.get_startup_program(current_endpoint, - pserver_prog) + pserver_startup = t.get_startup_program( + current_endpoint, pserver_prog + ) exe.run(pserver_startup) exe.run(pserver_prog) elif training_role == "TRAINER": @@ -262,8 +300,11 @@ def infer(use_cuda, save_dirname=None): # the feed_target_names (the names of variables that will be fed # data using feed operators), and the fetch_targets (variables that # we want to obtain data from using fetch operators). - [inference_program, feed_target_names, - fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = fluid.io.load_inference_model(save_dirname, exe) # Setup input by creating LoDTensor to represent sequence of words. # Here each word is the basic element of the LoDTensor and the shape of @@ -278,46 +319,30 @@ def infer(use_cuda, save_dirname=None): recursive_seq_lens = [[3, 4, 2]] base_shape = [1] # The range of random integers is [low, high] - word = fluid.create_random_int_lodtensor(recursive_seq_lens, - base_shape, - place, - low=0, - high=word_dict_len - 1) - pred = fluid.create_random_int_lodtensor(recursive_seq_lens, - base_shape, - place, - low=0, - high=pred_dict_len - 1) - ctx_n2 = fluid.create_random_int_lodtensor(recursive_seq_lens, - base_shape, - place, - low=0, - high=word_dict_len - 1) - ctx_n1 = fluid.create_random_int_lodtensor(recursive_seq_lens, - base_shape, - place, - low=0, - high=word_dict_len - 1) - ctx_0 = fluid.create_random_int_lodtensor(recursive_seq_lens, - base_shape, - place, - low=0, - high=word_dict_len - 1) - ctx_p1 = fluid.create_random_int_lodtensor(recursive_seq_lens, - base_shape, - place, - low=0, - high=word_dict_len - 1) - ctx_p2 = fluid.create_random_int_lodtensor(recursive_seq_lens, - base_shape, - place, - low=0, - high=word_dict_len - 1) - mark = fluid.create_random_int_lodtensor(recursive_seq_lens, - base_shape, - place, - low=0, - high=mark_dict_len - 1) + word = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=word_dict_len - 1 + ) + pred = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=pred_dict_len - 1 + ) + ctx_n2 = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=word_dict_len - 1 + ) + ctx_n1 = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=word_dict_len - 1 + ) + ctx_0 = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=word_dict_len - 1 + ) + ctx_p1 = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=word_dict_len - 1 + ) + ctx_p2 = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=word_dict_len - 1 + ) + mark = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=mark_dict_len - 1 + ) # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. @@ -330,19 +355,21 @@ def infer(use_cuda, save_dirname=None): assert feed_target_names[6] == 'ctx_p2_data' assert feed_target_names[7] == 'mark_data' - results = exe.run(inference_program, - feed={ - feed_target_names[0]: word, - feed_target_names[1]: pred, - feed_target_names[2]: ctx_n2, - feed_target_names[3]: ctx_n1, - feed_target_names[4]: ctx_0, - feed_target_names[5]: ctx_p1, - feed_target_names[6]: ctx_p2, - feed_target_names[7]: mark - }, - fetch_list=fetch_targets, - return_numpy=False) + results = exe.run( + inference_program, + feed={ + feed_target_names[0]: word, + feed_target_names[1]: pred, + feed_target_names[2]: ctx_n2, + feed_target_names[3]: ctx_n1, + feed_target_names[4]: ctx_0, + feed_target_names[5]: ctx_p1, + feed_target_names[6]: ctx_p2, + feed_target_names[7]: mark, + }, + fetch_list=fetch_targets, + return_numpy=False, + ) print(results[0].recursive_sequence_lengths()) np_data = np.array(results[0]) print("Inference Shape: ", np_data.shape) @@ -354,8 +381,9 @@ def main(use_cuda, is_local=True): temp_dir = tempfile.TemporaryDirectory() # Directory for saving the trained model - save_dirname = os.path.join(temp_dir.name, - "label_semantic_roles.inference.model") + save_dirname = os.path.join( + temp_dir.name, "label_semantic_roles.inference.model" + ) train(use_cuda, save_dirname, is_local) infer(use_cuda, save_dirname) @@ -364,7 +392,6 @@ def main(use_cuda, is_local=True): class TestLabelSemanticRoles(unittest.TestCase): - def test_cuda(self): with self.scope_prog_guard(): main(use_cuda=True) diff --git a/python/paddle/fluid/tests/book/test_machine_translation.py b/python/paddle/fluid/tests/book/test_machine_translation.py index d97b474d6abc8e90e442351a825d35f373e08f43..4ae6462f02f1061bdc8a675870cf7af360b3d2f8 100644 --- a/python/paddle/fluid/tests/book/test_machine_translation.py +++ b/python/paddle/fluid/tests/book/test_machine_translation.py @@ -40,15 +40,16 @@ decoder_size = hidden_dim def encoder(is_sparse): # encoder - src_word_id = pd.data(name="src_word_id", - shape=[1], - dtype='int64', - lod_level=1) - src_embedding = pd.embedding(input=src_word_id, - size=[dict_size, word_dim], - dtype='float32', - is_sparse=is_sparse, - param_attr=fluid.ParamAttr(name='vemb')) + src_word_id = pd.data( + name="src_word_id", shape=[1], dtype='int64', lod_level=1 + ) + src_embedding = pd.embedding( + input=src_word_id, + size=[dict_size, word_dim], + dtype='float32', + is_sparse=is_sparse, + param_attr=fluid.ParamAttr(name='vemb'), + ) fc1 = pd.fc(input=src_embedding, size=hidden_dim * 4, act='tanh') lstm_hidden0, lstm_0 = pd.dynamic_lstm(input=fc1, size=hidden_dim * 4) @@ -58,27 +59,28 @@ def encoder(is_sparse): def decoder_train(context, is_sparse): # decoder - trg_language_word = pd.data(name="target_language_word", - shape=[1], - dtype='int64', - lod_level=1) - trg_embedding = pd.embedding(input=trg_language_word, - size=[dict_size, word_dim], - dtype='float32', - is_sparse=is_sparse, - param_attr=fluid.ParamAttr(name='vemb')) + trg_language_word = pd.data( + name="target_language_word", shape=[1], dtype='int64', lod_level=1 + ) + trg_embedding = pd.embedding( + input=trg_language_word, + size=[dict_size, word_dim], + dtype='float32', + is_sparse=is_sparse, + param_attr=fluid.ParamAttr(name='vemb'), + ) rnn = pd.DynamicRNN() with rnn.block(): current_word = rnn.step_input(trg_embedding) pre_state = rnn.memory(init=context) - current_state = pd.fc(input=[current_word, pre_state], - size=decoder_size, - act='tanh') + current_state = pd.fc( + input=[current_word, pre_state], size=decoder_size, act='tanh' + ) - current_score = pd.fc(input=current_state, - size=target_dict_dim, - act='softmax') + current_score = pd.fc( + input=current_state, size=target_dict_dim, act='softmax' + ) rnn.update_memory(pre_state, current_state) rnn.output(current_score) @@ -99,10 +101,9 @@ def decoder_decode(context, is_sparse): scores_array = pd.create_array('float32') init_ids = pd.data(name="init_ids", shape=[1], dtype="int64", lod_level=2) - init_scores = pd.data(name="init_scores", - shape=[1], - dtype="float32", - lod_level=2) + init_scores = pd.data( + name="init_scores", shape=[1], dtype="float32", lod_level=2 + ) pd.array_write(init_ids, array=ids_array, i=counter) pd.array_write(init_scores, array=scores_array, i=counter) @@ -118,32 +119,38 @@ def decoder_decode(context, is_sparse): # expand the recursive_sequence_lengths of pre_state to be the same with pre_score pre_state_expanded = pd.sequence_expand(pre_state, pre_score) - pre_ids_emb = pd.embedding(input=pre_ids, - size=[dict_size, word_dim], - dtype='float32', - is_sparse=is_sparse) + pre_ids_emb = pd.embedding( + input=pre_ids, + size=[dict_size, word_dim], + dtype='float32', + is_sparse=is_sparse, + ) # use rnn unit to update rnn - current_state = pd.fc(input=[pre_state_expanded, pre_ids_emb], - size=decoder_size, - act='tanh') + current_state = pd.fc( + input=[pre_state_expanded, pre_ids_emb], + size=decoder_size, + act='tanh', + ) current_state_with_lod = pd.lod_reset(x=current_state, y=pre_score) # use score to do beam search - current_score = pd.fc(input=current_state_with_lod, - size=target_dict_dim, - act='softmax') + current_score = pd.fc( + input=current_state_with_lod, size=target_dict_dim, act='softmax' + ) topk_scores, topk_indices = pd.topk(current_score, k=beam_size) # calculate accumulated scores after topk to reduce computation cost - accu_scores = pd.elementwise_add(x=pd.log(topk_scores), - y=pd.reshape(pre_score, shape=[-1]), - axis=0) - selected_ids, selected_scores = pd.beam_search(pre_ids, - pre_score, - topk_indices, - accu_scores, - beam_size, - end_id=10, - level=0) + accu_scores = pd.elementwise_add( + x=pd.log(topk_scores), y=pd.reshape(pre_score, shape=[-1]), axis=0 + ) + selected_ids, selected_scores = pd.beam_search( + pre_ids, + pre_score, + topk_indices, + accu_scores, + beam_size, + end_id=10, + level=0, + ) pd.increment(x=counter, value=1, in_place=True) @@ -159,7 +166,8 @@ def decoder_decode(context, is_sparse): pd.logical_and(x=length_cond, y=finish_cond, out=cond) translation_ids, translation_scores = pd.beam_search_decode( - ids=ids_array, scores=scores_array, beam_size=beam_size, end_id=10) + ids=ids_array, scores=scores_array, beam_size=beam_size, end_id=10 + ) # return init_ids, init_scores @@ -173,25 +181,31 @@ def train_main(use_cuda, is_sparse, is_local=True): context = encoder(is_sparse) rnn_out = decoder_train(context, is_sparse) - label = pd.data(name="target_language_next_word", - shape=[1], - dtype='int64', - lod_level=1) + label = pd.data( + name="target_language_next_word", shape=[1], dtype='int64', lod_level=1 + ) cost = pd.cross_entropy(input=rnn_out, label=label) avg_cost = pd.mean(cost) optimizer = fluid.optimizer.Adagrad( learning_rate=1e-4, regularization=fluid.regularizer.L2DecayRegularizer( - regularization_coeff=0.1)) + regularization_coeff=0.1 + ), + ) optimizer.minimize(avg_cost) - train_data = paddle.batch(paddle.reader.shuffle( - paddle.dataset.wmt14.train(dict_size), buf_size=1000), - batch_size=batch_size) + train_data = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.wmt14.train(dict_size), buf_size=1000 + ), + batch_size=batch_size, + ) feed_order = [ - 'src_word_id', 'target_language_word', 'target_language_next_word' + 'src_word_id', + 'target_language_word', + 'target_language_next_word', ] exe = Executor(place) @@ -207,12 +221,18 @@ def train_main(use_cuda, is_sparse, is_local=True): batch_id = 0 for pass_id in range(1): for data in train_data(): - outs = exe.run(main_program, - feed=feeder.feed(data), - fetch_list=[avg_cost]) + outs = exe.run( + main_program, feed=feeder.feed(data), fetch_list=[avg_cost] + ) avg_cost_val = np.array(outs[0]) - print('pass_id=' + str(pass_id) + ' batch=' + str(batch_id) + - " avg_cost=" + str(avg_cost_val)) + print( + 'pass_id=' + + str(pass_id) + + ' batch=' + + str(batch_id) + + " avg_cost=" + + str(avg_cost_val) + ) if batch_id > 3: break batch_id += 1 @@ -234,8 +254,9 @@ def train_main(use_cuda, is_sparse, is_local=True): t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": pserver_prog = t.get_pserver_program(current_endpoint) - pserver_startup = t.get_startup_program(current_endpoint, - pserver_prog) + pserver_startup = t.get_startup_program( + current_endpoint, pserver_prog + ) exe.run(pserver_startup) exe.run(pserver_prog) elif training_role == "TRAINER": @@ -254,21 +275,27 @@ def decode_main(use_cuda, is_sparse): exe.run(framework.default_startup_program()) init_ids_data = np.array([1 for _ in range(batch_size)], dtype='int64') - init_scores_data = np.array([1. for _ in range(batch_size)], - dtype='float32') + init_scores_data = np.array( + [1.0 for _ in range(batch_size)], dtype='float32' + ) init_ids_data = init_ids_data.reshape((batch_size, 1)) init_scores_data = init_scores_data.reshape((batch_size, 1)) init_recursive_seq_lens = [1] * batch_size init_recursive_seq_lens = [init_recursive_seq_lens, init_recursive_seq_lens] - init_ids = fluid.create_lod_tensor(init_ids_data, init_recursive_seq_lens, - place) - init_scores = fluid.create_lod_tensor(init_scores_data, - init_recursive_seq_lens, place) + init_ids = fluid.create_lod_tensor( + init_ids_data, init_recursive_seq_lens, place + ) + init_scores = fluid.create_lod_tensor( + init_scores_data, init_recursive_seq_lens, place + ) - train_data = paddle.batch(paddle.reader.shuffle( - paddle.dataset.wmt14.train(dict_size), buf_size=1000), - batch_size=batch_size) + train_data = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.wmt14.train(dict_size), buf_size=1000 + ), + batch_size=batch_size, + ) feed_order = ['src_word_id'] feed_list = [ @@ -286,7 +313,8 @@ def decode_main(use_cuda, is_sparse): framework.default_main_program(), feed=feed_dict, fetch_list=[translation_ids, translation_scores], - return_numpy=False) + return_numpy=False, + ) print(result_ids.recursive_sequence_lengths()) break @@ -306,8 +334,9 @@ def scope_prog_guard(): def inject_test_train(use_cuda, is_sparse): - f_name = 'test_{0}_{1}_train'.format('cuda' if use_cuda else 'cpu', - 'sparse' if is_sparse else 'dense') + f_name = 'test_{0}_{1}_train'.format( + 'cuda' if use_cuda else 'cpu', 'sparse' if is_sparse else 'dense' + ) def f(*args): with scope_prog_guard(): @@ -317,8 +346,9 @@ def inject_test_train(use_cuda, is_sparse): def inject_test_decode(use_cuda, is_sparse, decorator=None): - f_name = 'test_{0}_{1}_decode'.format('cuda' if use_cuda else 'cpu', - 'sparse' if is_sparse else 'dense') + f_name = 'test_{0}_{1}_decode'.format( + 'cuda' if use_cuda else 'cpu', 'sparse' if is_sparse else 'dense' + ) def f(*args): with scope_prog_guard(): @@ -340,11 +370,12 @@ for _use_cuda_ in (False, True): _decorator_ = None if _use_cuda_: _decorator_ = unittest.skip( - reason='Beam Search does not support CUDA!') + reason='Beam Search does not support CUDA!' + ) - inject_test_decode(is_sparse=_is_sparse_, - use_cuda=_use_cuda_, - decorator=_decorator_) + inject_test_decode( + is_sparse=_is_sparse_, use_cuda=_use_cuda_, decorator=_decorator_ + ) if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/book/test_recognize_digits.py b/python/paddle/fluid/tests/book/test_recognize_digits.py index f2afcdefa0626b7e2d1da451adc57031a8e31afd..fd99eb04d99fc5b46ce344c0afb4b69698120172 100644 --- a/python/paddle/fluid/tests/book/test_recognize_digits.py +++ b/python/paddle/fluid/tests/book/test_recognize_digits.py @@ -43,30 +43,36 @@ def mlp(img, label): def conv_net(img, label): - conv_pool_1 = fluid.nets.simple_img_conv_pool(input=img, - filter_size=5, - num_filters=20, - pool_size=2, - pool_stride=2, - act="relu") + conv_pool_1 = fluid.nets.simple_img_conv_pool( + input=img, + filter_size=5, + num_filters=20, + pool_size=2, + pool_stride=2, + act="relu", + ) conv_pool_1 = fluid.layers.batch_norm(conv_pool_1) - conv_pool_2 = fluid.nets.simple_img_conv_pool(input=conv_pool_1, - filter_size=5, - num_filters=50, - pool_size=2, - pool_stride=2, - act="relu") + conv_pool_2 = fluid.nets.simple_img_conv_pool( + input=conv_pool_1, + filter_size=5, + num_filters=50, + pool_size=2, + pool_stride=2, + act="relu", + ) return loss_net(conv_pool_2, label) -def train(nn_type, - use_cuda, - parallel, - save_dirname=None, - save_full_dirname=None, - model_filename=None, - params_filename=None, - is_local=True): +def train( + nn_type, + use_cuda, + parallel, + save_dirname=None, + save_full_dirname=None, + model_filename=None, + params_filename=None, + is_local=True, +): if use_cuda and not fluid.core.is_compiled_with_cuda(): return img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32') @@ -91,11 +97,13 @@ def train(nn_type, exe = fluid.Executor(place) - train_reader = paddle.batch(paddle.reader.shuffle( - paddle.dataset.mnist.train(), buf_size=500), - batch_size=BATCH_SIZE) - test_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=BATCH_SIZE) + train_reader = paddle.batch( + paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=500), + batch_size=BATCH_SIZE, + ) + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=BATCH_SIZE + ) feeder = fluid.DataFeeder(feed_list=[img, label], place=place) def train_loop(main_program): @@ -113,7 +121,8 @@ def train(nn_type, acc_np, avg_loss_np = exe.run( program=test_program, feed=feeder.feed(test_data), - fetch_list=[acc, avg_loss]) + fetch_list=[acc, avg_loss], + ) acc_set.append(float(acc_np)) avg_loss_set.append(float(avg_loss_np)) # get test acc and loss @@ -123,23 +132,33 @@ def train(nn_type, # Smaller value to increase CI speed if save_dirname is not None: fluid.io.save_inference_model( - save_dirname, ["img"], [prediction], + save_dirname, + ["img"], + [prediction], exe, model_filename=model_filename, - params_filename=params_filename) + params_filename=params_filename, + ) if save_full_dirname is not None: fluid.io.save_inference_model( - save_full_dirname, [], [], + save_full_dirname, + [], + [], exe, model_filename=model_filename, params_filename=params_filename, - export_for_deployment=False) + export_for_deployment=False, + ) return else: print( - 'PassID {0:1}, BatchID {1:04}, Test Loss {2:2.2}, Acc {3:2.2}' - .format(pass_id, batch_id + 1, float(avg_loss_val), - float(acc_val))) + 'PassID {0:1}, BatchID {1:04}, Test Loss {2:2.2}, Acc {3:2.2}'.format( + pass_id, + batch_id + 1, + float(avg_loss_val), + float(acc_val), + ) + ) if math.isnan(float(avg_loss_val)): sys.exit("got NaN loss, training failed.") raise AssertionError("Loss of recognize digits is too large") @@ -161,18 +180,18 @@ def train(nn_type, t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": pserver_prog = t.get_pserver_program(current_endpoint) - pserver_startup = t.get_startup_program(current_endpoint, - pserver_prog) + pserver_startup = t.get_startup_program( + current_endpoint, pserver_prog + ) exe.run(pserver_startup) exe.run(pserver_prog) elif training_role == "TRAINER": train_loop(t.get_trainer_program()) -def infer(use_cuda, - save_dirname=None, - model_filename=None, - params_filename=None): +def infer( + use_cuda, save_dirname=None, model_filename=None, params_filename=None +): if save_dirname is None: return @@ -185,22 +204,28 @@ def infer(use_cuda, # the feed_target_names (the names of variables that will be feeded # data using feed operators), and the fetch_targets (variables that # we want to obtain data from using fetch operators). - [inference_program, feed_target_names, - fetch_targets] = fluid.io.load_inference_model(save_dirname, exe, - model_filename, - params_filename) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = fluid.io.load_inference_model( + save_dirname, exe, model_filename, params_filename + ) # The input's dimension of conv should be 4-D or 5-D. # Use normilized image pixels as input data, which should be in the range [-1.0, 1.0]. batch_size = 1 tensor_img = numpy.random.uniform( - -1.0, 1.0, [batch_size, 1, 28, 28]).astype("float32") + -1.0, 1.0, [batch_size, 1, 28, 28] + ).astype("float32") # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. - results = exe.run(inference_program, - feed={feed_target_names[0]: tensor_img}, - fetch_list=fetch_targets) + results = exe.run( + inference_program, + feed={feed_target_names[0]: tensor_img}, + fetch_list=fetch_targets, + ) print("infer results: ", results[0]) @@ -217,17 +242,21 @@ def main(use_cuda, parallel, nn_type, combine): params_filename = "__params_combined__" # call train() with is_local argument to run distributed train - train(nn_type=nn_type, - use_cuda=use_cuda, - parallel=parallel, - save_dirname=save_dirname, - save_full_dirname=save_full_dirname, - model_filename=model_filename, - params_filename=params_filename) - infer(use_cuda=use_cuda, - save_dirname=save_dirname, - model_filename=model_filename, - params_filename=params_filename) + train( + nn_type=nn_type, + use_cuda=use_cuda, + parallel=parallel, + save_dirname=save_dirname, + save_full_dirname=save_full_dirname, + model_filename=model_filename, + params_filename=params_filename, + ) + infer( + use_cuda=use_cuda, + save_dirname=save_dirname, + model_filename=model_filename, + params_filename=params_filename, + ) class TestRecognizeDigits(unittest.TestCase): @@ -235,7 +264,6 @@ class TestRecognizeDigits(unittest.TestCase): def inject_test_method(use_cuda, parallel, nn_type, combine): - def __impl__(self): prog = fluid.Program() startup_prog = fluid.Program() @@ -244,9 +272,12 @@ def inject_test_method(use_cuda, parallel, nn_type, combine): with fluid.program_guard(prog, startup_prog): main(use_cuda, parallel, nn_type, combine) - fn = 'test_{0}_{1}_{2}_{3}'.format(nn_type, 'cuda' if use_cuda else 'cpu', - 'parallel' if parallel else 'normal', - 'combine' if combine else 'separate') + fn = 'test_{0}_{1}_{2}_{3}'.format( + nn_type, + 'cuda' if use_cuda else 'cpu', + 'parallel' if parallel else 'normal', + 'combine' if combine else 'separate', + ) setattr(TestRecognizeDigits, fn, __impl__) @@ -255,7 +286,7 @@ def inject_all_tests(): for use_cuda in (False, True): if use_cuda and not core.is_compiled_with_cuda(): continue - for parallel in (False, ): + for parallel in (False,): for nn_type in ('mlp', 'conv'): inject_test_method(use_cuda, parallel, nn_type, True) diff --git a/python/paddle/fluid/tests/book/test_recommender_system.py b/python/paddle/fluid/tests/book/test_recommender_system.py index 3d9e14a7a1869912f5c3deeefe92e130d543ea3a..132ee1b5214e21f181f8f2442eed32ca462b8f66 100644 --- a/python/paddle/fluid/tests/book/test_recommender_system.py +++ b/python/paddle/fluid/tests/book/test_recommender_system.py @@ -40,11 +40,13 @@ def get_usr_combined_features(): uid = layers.data(name='user_id', shape=[1], dtype='int64') - usr_emb = layers.embedding(input=uid, - dtype='float32', - size=[USR_DICT_SIZE, 32], - param_attr='user_table', - is_sparse=IS_SPARSE) + usr_emb = layers.embedding( + input=uid, + dtype='float32', + size=[USR_DICT_SIZE, 32], + param_attr='user_table', + is_sparse=IS_SPARSE, + ) usr_fc = layers.fc(input=usr_emb, size=32) @@ -52,35 +54,42 @@ def get_usr_combined_features(): usr_gender_id = layers.data(name='gender_id', shape=[1], dtype='int64') - usr_gender_emb = layers.embedding(input=usr_gender_id, - size=[USR_GENDER_DICT_SIZE, 16], - param_attr='gender_table', - is_sparse=IS_SPARSE) + usr_gender_emb = layers.embedding( + input=usr_gender_id, + size=[USR_GENDER_DICT_SIZE, 16], + param_attr='gender_table', + is_sparse=IS_SPARSE, + ) usr_gender_fc = layers.fc(input=usr_gender_emb, size=16) USR_AGE_DICT_SIZE = len(paddle.dataset.movielens.age_table) usr_age_id = layers.data(name='age_id', shape=[1], dtype="int64") - usr_age_emb = layers.embedding(input=usr_age_id, - size=[USR_AGE_DICT_SIZE, 16], - is_sparse=IS_SPARSE, - param_attr='age_table') + usr_age_emb = layers.embedding( + input=usr_age_id, + size=[USR_AGE_DICT_SIZE, 16], + is_sparse=IS_SPARSE, + param_attr='age_table', + ) usr_age_fc = layers.fc(input=usr_age_emb, size=16) USR_JOB_DICT_SIZE = paddle.dataset.movielens.max_job_id() + 1 usr_job_id = layers.data(name='job_id', shape=[1], dtype="int64") - usr_job_emb = layers.embedding(input=usr_job_id, - size=[USR_JOB_DICT_SIZE, 16], - param_attr='job_table', - is_sparse=IS_SPARSE) + usr_job_emb = layers.embedding( + input=usr_job_id, + size=[USR_JOB_DICT_SIZE, 16], + param_attr='job_table', + is_sparse=IS_SPARSE, + ) usr_job_fc = layers.fc(input=usr_job_emb, size=16) concat_embed = layers.concat( - input=[usr_fc, usr_gender_fc, usr_age_fc, usr_job_fc], axis=1) + input=[usr_fc, usr_gender_fc, usr_age_fc, usr_job_fc], axis=1 + ) usr_combined_features = layers.fc(input=concat_embed, size=200, act="tanh") @@ -93,47 +102,51 @@ def get_mov_combined_features(): mov_id = layers.data(name='movie_id', shape=[1], dtype='int64') - mov_emb = layers.embedding(input=mov_id, - dtype='float32', - size=[MOV_DICT_SIZE, 32], - param_attr='movie_table', - is_sparse=IS_SPARSE) + mov_emb = layers.embedding( + input=mov_id, + dtype='float32', + size=[MOV_DICT_SIZE, 32], + param_attr='movie_table', + is_sparse=IS_SPARSE, + ) mov_fc = layers.fc(input=mov_emb, size=32) CATEGORY_DICT_SIZE = len(paddle.dataset.movielens.movie_categories()) - category_id = layers.data(name='category_id', - shape=[1], - dtype='int64', - lod_level=1) + category_id = layers.data( + name='category_id', shape=[1], dtype='int64', lod_level=1 + ) - mov_categories_emb = layers.embedding(input=category_id, - size=[CATEGORY_DICT_SIZE, 32], - is_sparse=IS_SPARSE) + mov_categories_emb = layers.embedding( + input=category_id, size=[CATEGORY_DICT_SIZE, 32], is_sparse=IS_SPARSE + ) - mov_categories_hidden = layers.sequence_pool(input=mov_categories_emb, - pool_type="sum") + mov_categories_hidden = layers.sequence_pool( + input=mov_categories_emb, pool_type="sum" + ) MOV_TITLE_DICT_SIZE = len(paddle.dataset.movielens.get_movie_title_dict()) - mov_title_id = layers.data(name='movie_title', - shape=[1], - dtype='int64', - lod_level=1) + mov_title_id = layers.data( + name='movie_title', shape=[1], dtype='int64', lod_level=1 + ) - mov_title_emb = layers.embedding(input=mov_title_id, - size=[MOV_TITLE_DICT_SIZE, 32], - is_sparse=IS_SPARSE) + mov_title_emb = layers.embedding( + input=mov_title_id, size=[MOV_TITLE_DICT_SIZE, 32], is_sparse=IS_SPARSE + ) - mov_title_conv = nets.sequence_conv_pool(input=mov_title_emb, - num_filters=32, - filter_size=3, - act="tanh", - pool_type="sum") + mov_title_conv = nets.sequence_conv_pool( + input=mov_title_emb, + num_filters=32, + filter_size=3, + act="tanh", + pool_type="sum", + ) concat_embed = layers.concat( - input=[mov_fc, mov_categories_hidden, mov_title_conv], axis=1) + input=[mov_fc, mov_categories_hidden, mov_title_conv], axis=1 + ) # FIXME(dzh) : need tanh operator mov_combined_features = layers.fc(input=concat_embed, size=200, act="tanh") @@ -169,15 +182,23 @@ def train(use_cuda, save_dirname, is_local=True): exe = Executor(place) - train_reader = paddle.batch(paddle.reader.shuffle( - paddle.dataset.movielens.train(), buf_size=8192), - batch_size=BATCH_SIZE) - test_reader = paddle.batch(paddle.dataset.movielens.test(), - batch_size=BATCH_SIZE) + train_reader = paddle.batch( + paddle.reader.shuffle(paddle.dataset.movielens.train(), buf_size=8192), + batch_size=BATCH_SIZE, + ) + test_reader = paddle.batch( + paddle.dataset.movielens.test(), batch_size=BATCH_SIZE + ) feed_order = [ - 'user_id', 'gender_id', 'age_id', 'job_id', 'movie_id', 'category_id', - 'movie_title', 'score' + 'user_id', + 'gender_id', + 'age_id', + 'job_id', + 'movie_id', + 'category_id', + 'movie_title', + 'score', ] def train_loop(main_program): @@ -192,16 +213,20 @@ def train(use_cuda, save_dirname, is_local=True): for pass_id in range(PASS_NUM): for batch_id, data in enumerate(train_reader()): # train a mini-batch - outs = exe.run(program=main_program, - feed=feeder.feed(data), - fetch_list=[avg_cost]) + outs = exe.run( + program=main_program, + feed=feeder.feed(data), + fetch_list=[avg_cost], + ) out = np.array(outs[0]) if (batch_id + 1) % 10 == 0: avg_cost_set = [] for test_data in test_reader(): - avg_cost_np = exe.run(program=test_program, - feed=feeder.feed(test_data), - fetch_list=[avg_cost]) + avg_cost_np = exe.run( + program=test_program, + feed=feeder.feed(test_data), + fetch_list=[avg_cost], + ) avg_cost_set.append(avg_cost_np[0]) break # test only 1 segment for speeding up CI @@ -211,10 +236,19 @@ def train(use_cuda, save_dirname, is_local=True): # if avg_cost less than 6.0, we think our code is good. if save_dirname is not None: fluid.io.save_inference_model( - save_dirname, [ - "user_id", "gender_id", "age_id", "job_id", - "movie_id", "category_id", "movie_title" - ], [scale_infer], exe) + save_dirname, + [ + "user_id", + "gender_id", + "age_id", + "job_id", + "movie_id", + "category_id", + "movie_title", + ], + [scale_infer], + exe, + ) return if math.isnan(float(out[0])): @@ -237,8 +271,9 @@ def train(use_cuda, save_dirname, is_local=True): t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": pserver_prog = t.get_pserver_program(current_endpoint) - pserver_startup = t.get_startup_program(current_endpoint, - pserver_prog) + pserver_startup = t.get_startup_program( + current_endpoint, pserver_prog + ) exe.run(pserver_startup) exe.run(pserver_prog) elif training_role == "TRAINER": @@ -258,8 +293,11 @@ def infer(use_cuda, save_dirname=None): # the feed_target_names (the names of variables that will be fed # data using feed operators), and the fetch_targets (variables that # we want to obtain data from using fetch operators). - [inference_program, feed_target_names, - fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = fluid.io.load_inference_model(save_dirname, exe) # Use the first data from paddle.dataset.movielens.test() as input assert feed_target_names[0] == "user_id" @@ -288,27 +326,32 @@ def infer(use_cuda, save_dirname=None): assert feed_target_names[5] == "category_id" category_id = fluid.create_lod_tensor( - [np.array([10, 8, 9], dtype='int64')], [[3]], place) + [np.array([10, 8, 9], dtype='int64')], [[3]], place + ) assert feed_target_names[6] == "movie_title" movie_title = fluid.create_lod_tensor( - [np.array([1069, 4140, 2923, 710, 988], dtype='int64')], [[5]], - place) + [np.array([1069, 4140, 2923, 710, 988], dtype='int64')], + [[5]], + place, + ) # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. - results = exe.run(inference_program, - feed={ - feed_target_names[0]: user_id, - feed_target_names[1]: gender_id, - feed_target_names[2]: age_id, - feed_target_names[3]: job_id, - feed_target_names[4]: movie_id, - feed_target_names[5]: category_id, - feed_target_names[6]: movie_title - }, - fetch_list=fetch_targets, - return_numpy=False) + results = exe.run( + inference_program, + feed={ + feed_target_names[0]: user_id, + feed_target_names[1]: gender_id, + feed_target_names[2]: age_id, + feed_target_names[3]: job_id, + feed_target_names[4]: movie_id, + feed_target_names[5]: category_id, + feed_target_names[6]: movie_title, + }, + fetch_list=fetch_targets, + return_numpy=False, + ) print("inferred score: ", np.array(results[0])) @@ -318,8 +361,9 @@ def main(use_cuda): # Directory for saving the inference model temp_dir = tempfile.TemporaryDirectory() - save_dirname = os.path.join(temp_dir.name, - "recommender_system.inference.model") + save_dirname = os.path.join( + temp_dir.name, "recommender_system.inference.model" + ) train(use_cuda, save_dirname) infer(use_cuda, save_dirname) diff --git a/python/paddle/fluid/tests/book/test_rnn_encoder_decoder.py b/python/paddle/fluid/tests/book/test_rnn_encoder_decoder.py index b7de6f5da90cf0c16cbb28d790b73a022a4a378f..f896e2449759912b0861025625e0b917dadf3b49 100644 --- a/python/paddle/fluid/tests/book/test_rnn_encoder_decoder.py +++ b/python/paddle/fluid/tests/book/test_rnn_encoder_decoder.py @@ -41,19 +41,23 @@ USE_PEEPHOLES = False def bi_lstm_encoder(input_seq, hidden_size): - input_forward_proj = fluid.layers.fc(input=input_seq, - size=hidden_size * 4, - bias_attr=True) - forward, _ = fluid.layers.dynamic_lstm(input=input_forward_proj, - size=hidden_size * 4, - use_peepholes=USE_PEEPHOLES) - input_backward_proj = fluid.layers.fc(input=input_seq, - size=hidden_size * 4, - bias_attr=True) - backward, _ = fluid.layers.dynamic_lstm(input=input_backward_proj, - size=hidden_size * 4, - is_reverse=True, - use_peepholes=USE_PEEPHOLES) + input_forward_proj = fluid.layers.fc( + input=input_seq, size=hidden_size * 4, bias_attr=True + ) + forward, _ = fluid.layers.dynamic_lstm( + input=input_forward_proj, + size=hidden_size * 4, + use_peepholes=USE_PEEPHOLES, + ) + input_backward_proj = fluid.layers.fc( + input=input_seq, size=hidden_size * 4, bias_attr=True + ) + backward, _ = fluid.layers.dynamic_lstm( + input=input_backward_proj, + size=hidden_size * 4, + is_reverse=True, + use_peepholes=USE_PEEPHOLES, + ) forward_last = fluid.layers.sequence_last_step(input=forward) backward_first = fluid.layers.sequence_first_step(input=backward) @@ -63,7 +67,6 @@ def bi_lstm_encoder(input_seq, hidden_size): # FIXME(peterzhang2029): Replace this function with the lstm_unit_op. def lstm_step(x_t, hidden_t_prev, cell_t_prev, size): - def linear(inputs): return fluid.layers.fc(input=inputs, size=size, bias_attr=True) @@ -72,26 +75,28 @@ def lstm_step(x_t, hidden_t_prev, cell_t_prev, size): output_gate = fluid.layers.sigmoid(x=linear([hidden_t_prev, x_t])) cell_tilde = fluid.layers.tanh(x=linear([hidden_t_prev, x_t])) - cell_t = fluid.layers.sums(input=[ - fluid.layers.elementwise_mul(x=forget_gate, y=cell_t_prev), - fluid.layers.elementwise_mul(x=input_gate, y=cell_tilde) - ]) + cell_t = fluid.layers.sums( + input=[ + fluid.layers.elementwise_mul(x=forget_gate, y=cell_t_prev), + fluid.layers.elementwise_mul(x=input_gate, y=cell_tilde), + ] + ) - hidden_t = fluid.layers.elementwise_mul(x=output_gate, - y=fluid.layers.tanh(x=cell_t)) + hidden_t = fluid.layers.elementwise_mul( + x=output_gate, y=fluid.layers.tanh(x=cell_t) + ) return hidden_t, cell_t -def lstm_decoder_without_attention(target_embedding, decoder_boot, context, - decoder_size): +def lstm_decoder_without_attention( + target_embedding, decoder_boot, context, decoder_size +): rnn = fluid.layers.DynamicRNN() cell_init = fluid.layers.fill_constant_batch_size_like( - input=decoder_boot, - value=0.0, - shape=[-1, decoder_size], - dtype='float32') + input=decoder_boot, value=0.0, shape=[-1, decoder_size], dtype='float32' + ) cell_init.stop_gradient = False with rnn.block(): @@ -100,15 +105,15 @@ def lstm_decoder_without_attention(target_embedding, decoder_boot, context, hidden_mem = rnn.memory(init=decoder_boot, need_reorder=True) cell_mem = rnn.memory(init=cell_init) - decoder_inputs = fluid.layers.concat(input=[context, current_word], - axis=1) + decoder_inputs = fluid.layers.concat( + input=[context, current_word], axis=1 + ) h, c = lstm_step(decoder_inputs, hidden_mem, cell_mem, decoder_size) rnn.update_memory(hidden_mem, h) rnn.update_memory(cell_mem, c) - out = fluid.layers.fc(input=h, - size=target_dict_dim, - bias_attr=True, - act='softmax') + out = fluid.layers.fc( + input=h, size=target_dict_dim, bias_attr=True, act='softmax' + ) rnn.output(out) return rnn() @@ -116,43 +121,44 @@ def lstm_decoder_without_attention(target_embedding, decoder_boot, context, def seq_to_seq_net(): """Construct a seq2seq network.""" - src_word_idx = fluid.layers.data(name='source_sequence', - shape=[1], - dtype='int64', - lod_level=1) + src_word_idx = fluid.layers.data( + name='source_sequence', shape=[1], dtype='int64', lod_level=1 + ) src_embedding = fluid.layers.embedding( input=src_word_idx, size=[source_dict_dim, embedding_dim], - dtype='float32') + dtype='float32', + ) src_forward_last, src_backward_first = bi_lstm_encoder( - input_seq=src_embedding, hidden_size=encoder_size) + input_seq=src_embedding, hidden_size=encoder_size + ) encoded_vector = fluid.layers.concat( - input=[src_forward_last, src_backward_first], axis=1) + input=[src_forward_last, src_backward_first], axis=1 + ) - decoder_boot = fluid.layers.fc(input=src_backward_first, - size=decoder_size, - bias_attr=False, - act='tanh') + decoder_boot = fluid.layers.fc( + input=src_backward_first, size=decoder_size, bias_attr=False, act='tanh' + ) - trg_word_idx = fluid.layers.data(name='target_sequence', - shape=[1], - dtype='int64', - lod_level=1) + trg_word_idx = fluid.layers.data( + name='target_sequence', shape=[1], dtype='int64', lod_level=1 + ) trg_embedding = fluid.layers.embedding( input=trg_word_idx, size=[target_dict_dim, embedding_dim], - dtype='float32') - - prediction = lstm_decoder_without_attention(trg_embedding, decoder_boot, - encoded_vector, decoder_size) - label = fluid.layers.data(name='label_sequence', - shape=[1], - dtype='int64', - lod_level=1) + dtype='float32', + ) + + prediction = lstm_decoder_without_attention( + trg_embedding, decoder_boot, encoded_vector, decoder_size + ) + label = fluid.layers.data( + name='label_sequence', shape=[1], dtype='int64', lod_level=1 + ) cost = fluid.layers.cross_entropy(input=prediction, label=label) avg_cost = paddle.mean(cost) @@ -165,9 +171,12 @@ def train(use_cuda, save_dirname=None): optimizer = fluid.optimizer.Adagrad(learning_rate=1e-4) optimizer.minimize(avg_cost) - train_data = paddle.batch(paddle.reader.shuffle( - paddle.dataset.wmt14.train(dict_size), buf_size=1000), - batch_size=batch_size) + train_data = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.wmt14.train(dict_size), buf_size=1000 + ), + batch_size=batch_size, + ) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = Executor(place) @@ -183,20 +192,31 @@ def train(use_cuda, save_dirname=None): batch_id = 0 for pass_id in range(2): for data in train_data(): - outs = exe.run(framework.default_main_program(), - feed=feeder.feed(data), - fetch_list=[avg_cost]) + outs = exe.run( + framework.default_main_program(), + feed=feeder.feed(data), + fetch_list=[avg_cost], + ) avg_cost_val = np.array(outs[0]) - print('pass_id=' + str(pass_id) + ' batch=' + str(batch_id) + - " avg_cost=" + str(avg_cost_val)) + print( + 'pass_id=' + + str(pass_id) + + ' batch=' + + str(batch_id) + + " avg_cost=" + + str(avg_cost_val) + ) if math.isnan(float(avg_cost_val[0])): sys.exit("got NaN loss, training failed.") if batch_id > 3: if save_dirname is not None: fluid.io.save_inference_model( - save_dirname, ['source_sequence', 'target_sequence'], - [prediction], exe) + save_dirname, + ['source_sequence', 'target_sequence'], + [prediction], + exe, + ) return batch_id += 1 @@ -215,8 +235,11 @@ def infer(use_cuda, save_dirname=None): # the feed_target_names (the names of variables that will be fed # data using feed operators), and the fetch_targets (variables that # we want to obtain data from using fetch operators). - [inference_program, feed_target_names, - fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = fluid.io.load_inference_model(save_dirname, exe) # Setup input by creating LoDTensor to represent sequence of words. # Here each word is the basic element of the LoDTensor and the shape of @@ -231,28 +254,26 @@ def infer(use_cuda, save_dirname=None): recursive_seq_lens = [[4, 6]] base_shape = [1] # The range of random integers is [low, high] - word_data = fluid.create_random_int_lodtensor(recursive_seq_lens, - base_shape, - place, - low=0, - high=1) - trg_word = fluid.create_random_int_lodtensor(recursive_seq_lens, - base_shape, - place, - low=0, - high=1) + word_data = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=1 + ) + trg_word = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=1 + ) # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. assert feed_target_names[0] == 'source_sequence' assert feed_target_names[1] == 'target_sequence' - results = exe.run(inference_program, - feed={ - feed_target_names[0]: word_data, - feed_target_names[1]: trg_word, - }, - fetch_list=fetch_targets, - return_numpy=False) + results = exe.run( + inference_program, + feed={ + feed_target_names[0]: word_data, + feed_target_names[1]: trg_word, + }, + fetch_list=fetch_targets, + return_numpy=False, + ) print(results[0].recursive_sequence_lengths()) np_data = np.array(results[0]) print("Inference shape: ", np_data.shape) @@ -265,8 +286,9 @@ def main(use_cuda): # Directory for saving the trained model temp_dir = tempfile.TemporaryDirectory() - save_dirname = os.path.join(temp_dir.name, - "rnn_encoder_decoder.inference.model") + save_dirname = os.path.join( + temp_dir.name, "rnn_encoder_decoder.inference.model" + ) train(use_cuda, save_dirname) infer(use_cuda, save_dirname) @@ -274,7 +296,6 @@ def main(use_cuda): class TestRnnEncoderDecoder(unittest.TestCase): - def test_cuda(self): with self.scope_prog_guard(): main(use_cuda=True) diff --git a/python/paddle/fluid/tests/book/test_word2vec_book.py b/python/paddle/fluid/tests/book/test_word2vec_book.py index ec7ab9cc8370955acc0f9b287c0a50669d5681cd..69cef4021c906c53fa0de8f9df0b65fa87461673 100644 --- a/python/paddle/fluid/tests/book/test_word2vec_book.py +++ b/python/paddle/fluid/tests/book/test_word2vec_book.py @@ -33,17 +33,21 @@ def get_place(target): return fluid.CPUPlace() else: raise ValueError( - "Target `{0}` is not on the support list: `cuda`, `xpu` and `cpu`.". - format(target)) - - -def train(target, - is_sparse, - is_parallel, - save_dirname, - is_local=True, - use_bf16=False, - pure_bf16=False): + "Target `{0}` is not on the support list: `cuda`, `xpu` and `cpu`.".format( + target + ) + ) + + +def train( + target, + is_sparse, + is_parallel, + save_dirname, + is_local=True, + use_bf16=False, + pure_bf16=False, +): PASS_NUM = 100 EMBED_SIZE = 32 HIDDEN_SIZE = 256 @@ -52,35 +56,44 @@ def train(target, IS_SPARSE = is_sparse def __network__(words): - embed_first = fluid.layers.embedding(input=words[0], - size=[dict_size, EMBED_SIZE], - dtype='float32', - is_sparse=IS_SPARSE, - param_attr='shared_w') - embed_second = fluid.layers.embedding(input=words[1], - size=[dict_size, EMBED_SIZE], - dtype='float32', - is_sparse=IS_SPARSE, - param_attr='shared_w') - embed_third = fluid.layers.embedding(input=words[2], - size=[dict_size, EMBED_SIZE], - dtype='float32', - is_sparse=IS_SPARSE, - param_attr='shared_w') - embed_forth = fluid.layers.embedding(input=words[3], - size=[dict_size, EMBED_SIZE], - dtype='float32', - is_sparse=IS_SPARSE, - param_attr='shared_w') + embed_first = fluid.layers.embedding( + input=words[0], + size=[dict_size, EMBED_SIZE], + dtype='float32', + is_sparse=IS_SPARSE, + param_attr='shared_w', + ) + embed_second = fluid.layers.embedding( + input=words[1], + size=[dict_size, EMBED_SIZE], + dtype='float32', + is_sparse=IS_SPARSE, + param_attr='shared_w', + ) + embed_third = fluid.layers.embedding( + input=words[2], + size=[dict_size, EMBED_SIZE], + dtype='float32', + is_sparse=IS_SPARSE, + param_attr='shared_w', + ) + embed_forth = fluid.layers.embedding( + input=words[3], + size=[dict_size, EMBED_SIZE], + dtype='float32', + is_sparse=IS_SPARSE, + param_attr='shared_w', + ) concat_embed = fluid.layers.concat( - input=[embed_first, embed_second, embed_third, embed_forth], axis=1) - hidden1 = fluid.layers.fc(input=concat_embed, - size=HIDDEN_SIZE, - act='sigmoid') - predict_word = fluid.layers.fc(input=hidden1, - size=dict_size, - act='softmax') + input=[embed_first, embed_second, embed_third, embed_forth], axis=1 + ) + hidden1 = fluid.layers.fc( + input=concat_embed, size=HIDDEN_SIZE, act='sigmoid' + ) + predict_word = fluid.layers.fc( + input=hidden1, size=dict_size, act='softmax' + ) cost = fluid.layers.cross_entropy(input=predict_word, label=words[4]) avg_cost = paddle.mean(cost) return avg_cost, predict_word @@ -96,7 +109,8 @@ def train(target, if not is_parallel: avg_cost, predict_word = __network__( - [first_word, second_word, third_word, forth_word, next_word]) + [first_word, second_word, third_word, forth_word, next_word] + ) else: raise NotImplementedError() @@ -105,20 +119,24 @@ def train(target, sgd_optimizer = paddle.static.amp.bf16.decorate_bf16( sgd_optimizer, amp_lists=paddle.static.amp.bf16.AutoMixedPrecisionListsBF16( - custom_fp32_list={'softmax', 'concat'}, ), + custom_fp32_list={'softmax', 'concat'}, + ), use_bf16_guard=False, - use_pure_bf16=pure_bf16) + use_pure_bf16=pure_bf16, + ) sgd_optimizer.minimize(avg_cost, fluid.default_startup_program()) - train_reader = paddle.batch(paddle.dataset.imikolov.train(word_dict, N), - BATCH_SIZE) + train_reader = paddle.batch( + paddle.dataset.imikolov.train(word_dict, N), BATCH_SIZE + ) place = get_place(target) exe = fluid.Executor(place) feeder = fluid.DataFeeder( feed_list=[first_word, second_word, third_word, forth_word, next_word], - place=place) + place=place, + ) def train_loop(main_program): exe.run(fluid.default_startup_program()) @@ -127,15 +145,17 @@ def train(target, for pass_id in range(PASS_NUM): for data in train_reader(): - avg_cost_np = exe.run(main_program, - feed=feeder.feed(data), - fetch_list=[avg_cost]) + avg_cost_np = exe.run( + main_program, feed=feeder.feed(data), fetch_list=[avg_cost] + ) if avg_cost_np[0] < 5.0: if save_dirname is not None and not pure_bf16: fluid.io.save_inference_model( save_dirname, ['firstw', 'secondw', 'thirdw', 'forthw'], - [predict_word], exe) + [predict_word], + exe, + ) return if math.isnan(float(avg_cost_np[0])): sys.exit("got NaN loss, training failed.") @@ -159,8 +179,9 @@ def train(target, t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": pserver_prog = t.get_pserver_program(current_endpoint) - pserver_startup = t.get_startup_program(current_endpoint, - pserver_prog) + pserver_startup = t.get_startup_program( + current_endpoint, pserver_prog + ) exe.run(pserver_startup) exe.run(pserver_prog) elif training_role == "TRAINER": @@ -179,8 +200,11 @@ def infer(target, save_dirname=None): # the feed_target_names (the names of variables that will be fed # data using feed operators), and the fetch_targets (variables that # we want to obtain data from using fetch operators). - [inference_program, feed_target_names, - fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = fluid.io.load_inference_model(save_dirname, exe) word_dict = paddle.dataset.imikolov.build_dict() dict_size = len(word_dict) @@ -195,26 +219,18 @@ def infer(target, save_dirname=None): recursive_seq_lens = [[1]] base_shape = [1] # The range of random integers is [low, high] - first_word = fluid.create_random_int_lodtensor(recursive_seq_lens, - base_shape, - place, - low=0, - high=dict_size - 1) - second_word = fluid.create_random_int_lodtensor(recursive_seq_lens, - base_shape, - place, - low=0, - high=dict_size - 1) - third_word = fluid.create_random_int_lodtensor(recursive_seq_lens, - base_shape, - place, - low=0, - high=dict_size - 1) - fourth_word = fluid.create_random_int_lodtensor(recursive_seq_lens, - base_shape, - place, - low=0, - high=dict_size - 1) + first_word = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1 + ) + second_word = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1 + ) + third_word = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1 + ) + fourth_word = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1 + ) assert feed_target_names[0] == 'firstw' assert feed_target_names[1] == 'secondw' @@ -223,15 +239,17 @@ def infer(target, save_dirname=None): # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. - results = exe.run(inference_program, - feed={ - feed_target_names[0]: first_word, - feed_target_names[1]: second_word, - feed_target_names[2]: third_word, - feed_target_names[3]: fourth_word - }, - fetch_list=fetch_targets, - return_numpy=False) + results = exe.run( + inference_program, + feed={ + feed_target_names[0]: first_word, + feed_target_names[1]: second_word, + feed_target_names[2]: third_word, + feed_target_names[3]: fourth_word, + }, + fetch_list=fetch_targets, + return_numpy=False, + ) def to_infer_tensor(lod_tensor): infer_tensor = fluid.core.PaddleTensor() @@ -282,18 +300,26 @@ def main(target, is_sparse, is_parallel, use_bf16, pure_bf16): # so only inference is turned on. train("cpu", is_sparse, is_parallel, save_dirname) else: - train(target, - is_sparse, - is_parallel, - save_dirname, - use_bf16=use_bf16, - pure_bf16=pure_bf16) + train( + target, + is_sparse, + is_parallel, + save_dirname, + use_bf16=use_bf16, + pure_bf16=pure_bf16, + ) infer(target, save_dirname) temp_dir.cleanup() -FULL_TEST = os.getenv('FULL_TEST', - '0').lower() in ['true', '1', 't', 'y', 'yes', 'on'] +FULL_TEST = os.getenv('FULL_TEST', '0').lower() in [ + 'true', + '1', + 't', + 'y', + 'yes', + 'on', +] SKIP_REASON = "Only run minimum number of tests in CI server, to make CI faster" @@ -301,15 +327,15 @@ class W2VTest(unittest.TestCase): pass -def inject_test_method(target, - is_sparse, - is_parallel, - use_bf16=False, - pure_bf16=False): +def inject_test_method( + target, is_sparse, is_parallel, use_bf16=False, pure_bf16=False +): fn_name = "test_{0}_{1}_{2}{3}".format( - target, "sparse" if is_sparse else "dense", + target, + "sparse" if is_sparse else "dense", "parallel" if is_parallel else "normal", - "_purebf16" if pure_bf16 else "_bf16" if use_bf16 else "") + "_purebf16" if pure_bf16 else "_bf16" if use_bf16 else "", + ) def __impl__(*args, **kwargs): prog = fluid.Program() @@ -319,20 +345,22 @@ def inject_test_method(target, with fluid.program_guard(prog, startup_prog): main(target, is_sparse, is_parallel, use_bf16, pure_bf16) - if (not fluid.core.is_compiled_with_cuda() - or target == "cuda") and is_sparse: + if ( + not fluid.core.is_compiled_with_cuda() or target == "cuda" + ) and is_sparse: fn = __impl__ else: # skip the other test when on CI server - fn = unittest.skipUnless(condition=FULL_TEST, - reason=SKIP_REASON)(__impl__) + fn = unittest.skipUnless(condition=FULL_TEST, reason=SKIP_REASON)( + __impl__ + ) setattr(W2VTest, fn_name, fn) for target in ("cuda", "cpu", "xpu"): for is_sparse in (False, True): - for is_parallel in (False, ): + for is_parallel in (False,): inject_test_method(target, is_sparse, is_parallel) inject_test_method("cpu", False, False, True) inject_test_method("cpu", False, False, True, True) diff --git a/python/paddle/fluid/tests/custom_kernel/custom_kernel_dot_c_setup.py b/python/paddle/fluid/tests/custom_kernel/custom_kernel_dot_c_setup.py index 39d47d6f4483b99f29190084ddc6976875065730..34110b6395c8d1648cb40901103c2ee7252dd93c 100644 --- a/python/paddle/fluid/tests/custom_kernel/custom_kernel_dot_c_setup.py +++ b/python/paddle/fluid/tests/custom_kernel/custom_kernel_dot_c_setup.py @@ -24,7 +24,6 @@ from setuptools.command.build_ext import build_ext # cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid # for C/ObjC but not for C++ class BuildExt(build_ext): - def build_extensions(self): if '-Wstrict-prototypes' in self.compiler.compiler_so: self.compiler.compiler_so.remove('-Wstrict-prototypes') @@ -48,8 +47,9 @@ paddle_custom_kernel_include = [ os.path.join(site_packages_path, 'paddle', 'include'), ] # include path third_party -compile_third_party_path = os.path.join(os.environ['PADDLE_BINARY_DIR'], - 'third_party') +compile_third_party_path = os.path.join( + os.environ['PADDLE_BINARY_DIR'], 'third_party' +) paddle_custom_kernel_include += [ os.path.join(compile_third_party_path, 'install/gflags/include'), # gflags os.path.join(compile_third_party_path, 'install/glog/include'), # glog @@ -69,10 +69,13 @@ custom_kernel_dot_module = Extension( include_dirs=paddle_custom_kernel_include, library_dirs=paddle_custom_kernel_library_dir, libraries=libs, - extra_compile_args=paddle_extra_compile_args) + extra_compile_args=paddle_extra_compile_args, +) -setup(name='custom_kernel_dot_c', - version='1.0', - description='custom kernel fot compiling', - cmdclass={'build_ext': BuildExt}, - ext_modules=[custom_kernel_dot_module]) +setup( + name='custom_kernel_dot_c', + version='1.0', + description='custom kernel fot compiling', + cmdclass={'build_ext': BuildExt}, + ext_modules=[custom_kernel_dot_module], +) diff --git a/python/paddle/fluid/tests/custom_kernel/custom_kernel_dot_setup.py b/python/paddle/fluid/tests/custom_kernel/custom_kernel_dot_setup.py index 8b953ca87251ac01564127befd92177f2d6e8c06..0ed5610cda4c2840b5e3366a8bd73c491fbb7d26 100644 --- a/python/paddle/fluid/tests/custom_kernel/custom_kernel_dot_setup.py +++ b/python/paddle/fluid/tests/custom_kernel/custom_kernel_dot_setup.py @@ -24,7 +24,6 @@ from setuptools.command.build_ext import build_ext # cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid # for C/ObjC but not for C++ class BuildExt(build_ext): - def build_extensions(self): if '-Wstrict-prototypes' in self.compiler.compiler_so: self.compiler.compiler_so.remove('-Wstrict-prototypes') @@ -45,12 +44,15 @@ if core.is_compiled_with_npu(): # include path site_packages_path = site.getsitepackages() paddle_custom_kernel_include = list( - map(lambda path: os.path.join(path, 'paddle', 'include'), - site_packages_path)) + map( + lambda path: os.path.join(path, 'paddle', 'include'), site_packages_path + ) +) # include path third_party -compile_third_party_path = os.path.join(os.environ['PADDLE_BINARY_DIR'], - 'third_party') +compile_third_party_path = os.path.join( + os.environ['PADDLE_BINARY_DIR'], 'third_party' +) paddle_custom_kernel_include += [ os.path.join(compile_third_party_path, 'install/gflags/include'), # gflags os.path.join(compile_third_party_path, 'install/glog/include'), # glog @@ -58,7 +60,8 @@ paddle_custom_kernel_include += [ # libs path paddle_custom_kernel_library_dir = list( - map(lambda path: os.path.join(path, 'paddle', 'fluid'), site_packages_path)) + map(lambda path: os.path.join(path, 'paddle', 'fluid'), site_packages_path) +) # libs libs = [':libpaddle.so'] @@ -69,10 +72,13 @@ custom_kernel_dot_module = Extension( include_dirs=paddle_custom_kernel_include, library_dirs=paddle_custom_kernel_library_dir, libraries=libs, - extra_compile_args=paddle_extra_compile_args) + extra_compile_args=paddle_extra_compile_args, +) -setup(name='custom_kernel_dot', - version='1.0', - description='custom kernel fot compiling', - cmdclass={'build_ext': BuildExt}, - ext_modules=[custom_kernel_dot_module]) +setup( + name='custom_kernel_dot', + version='1.0', + description='custom kernel fot compiling', + cmdclass={'build_ext': BuildExt}, + ext_modules=[custom_kernel_dot_module], +) diff --git a/python/paddle/fluid/tests/custom_kernel/test_custom_kernel_dot.py b/python/paddle/fluid/tests/custom_kernel/test_custom_kernel_dot.py index 8badd5dfbde81215485dfa6ce937bb6f2e05cc3a..6d941699dd9b37b423fb84f16a244ff65599ea38 100644 --- a/python/paddle/fluid/tests/custom_kernel/test_custom_kernel_dot.py +++ b/python/paddle/fluid/tests/custom_kernel/test_custom_kernel_dot.py @@ -20,14 +20,16 @@ import numpy as np # use dot as test case. class TestCustomKernelDot(unittest.TestCase): - def setUp(self): # compile so and set to current path cur_dir = os.path.dirname(os.path.abspath(__file__)) # --inplace to place output so file to current dir - cmd = 'cd {} && {} custom_kernel_dot_setup.py build_ext --inplace'.format( - cur_dir, sys.executable) + cmd = ( + 'cd {} && {} custom_kernel_dot_setup.py build_ext --inplace'.format( + cur_dir, sys.executable + ) + ) os.system(cmd) def test_custom_kernel_dot_run(self): @@ -37,6 +39,7 @@ class TestCustomKernelDot(unittest.TestCase): result = np.sum(x_data * y_data, axis=1).reshape([2, 1]) import paddle + paddle.set_device('cpu') x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) @@ -46,18 +49,20 @@ class TestCustomKernelDot(unittest.TestCase): out.numpy(), result, err_msg='custom kernel dot out: {},\n numpy dot out: {}'.format( - out.numpy(), result)) + out.numpy(), result + ), + ) class TestCustomKernelDotC(unittest.TestCase): - def setUp(self): # compile so and set to current path cur_dir = os.path.dirname(os.path.abspath(__file__)) # --inplace to place output so file to current dir cmd = 'cd {} && {} custom_kernel_dot_c_setup.py build_ext --inplace'.format( - cur_dir, sys.executable) + cur_dir, sys.executable + ) os.system(cmd) def test_custom_kernel_dot_run(self): @@ -67,6 +72,7 @@ class TestCustomKernelDotC(unittest.TestCase): result = np.sum(x_data * y_data, axis=1).reshape([2, 1]) import paddle + paddle.set_device('cpu') x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) @@ -76,7 +82,9 @@ class TestCustomKernelDotC(unittest.TestCase): out.numpy(), result, err_msg='custom kernel dot out: {},\n numpy dot out: {}'.format( - out.numpy(), result)) + out.numpy(), result + ), + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/custom_kernel/test_custom_kernel_load.py b/python/paddle/fluid/tests/custom_kernel/test_custom_kernel_load.py index ff7ff3e04a88e569af7b97266d41381554b912d3..1ebeb6a4585f1a3df07450c11546fd912bdd6018 100644 --- a/python/paddle/fluid/tests/custom_kernel/test_custom_kernel_load.py +++ b/python/paddle/fluid/tests/custom_kernel/test_custom_kernel_load.py @@ -20,22 +20,25 @@ import numpy as np class TestCustomKernelLoad(unittest.TestCase): - def setUp(self): # compile so and set to current path cur_dir = os.path.dirname(os.path.abspath(__file__)) # --inplace to place output so file to current dir - cmd = 'cd {} && {} custom_kernel_dot_setup.py build_ext --inplace'.format( - cur_dir, sys.executable) + cmd = ( + 'cd {} && {} custom_kernel_dot_setup.py build_ext --inplace'.format( + cur_dir, sys.executable + ) + ) os.system(cmd) # get paddle lib path and place so paddle_lib_path = '' - site_dirs = site.getsitepackages() if hasattr( - site, 'getsitepackages') else [ - x for x in sys.path if 'site-packages' in x - ] + site_dirs = ( + site.getsitepackages() + if hasattr(site, 'getsitepackages') + else [x for x in sys.path if 'site-packages' in x] + ) for site_dir in site_dirs: lib_dir = os.path.sep.join([site_dir, 'paddle', 'libs']) if os.path.exists(lib_dir): @@ -47,10 +50,12 @@ class TestCustomKernelLoad(unittest.TestCase): if os.path.exists(lib_dir): paddle_lib_path = lib_dir self.default_path = os.path.sep.join( - [paddle_lib_path, '..', '..', 'paddle-plugins']) + [paddle_lib_path, '..', '..', 'paddle-plugins'] + ) # copy so to default path - cmd = 'mkdir -p {} && cp ./*.so {}'.format(self.default_path, - self.default_path) + cmd = 'mkdir -p {} && cp ./*.so {}'.format( + self.default_path, self.default_path + ) os.system(cmd) # wait def test_custom_kernel_dot_load(self): @@ -60,6 +65,7 @@ class TestCustomKernelLoad(unittest.TestCase): result = np.sum(x_data * y_data, axis=1).reshape([2, 1]) import paddle + paddle.set_device('cpu') x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) @@ -69,7 +75,9 @@ class TestCustomKernelLoad(unittest.TestCase): out.numpy(), result, err_msg='custom kernel dot out: {},\n numpy dot out: {}'.format( - out.numpy(), result)) + out.numpy(), result + ), + ) def tearDown(self): cmd = 'rm -rf {}'.format(self.default_path) diff --git a/python/paddle/fluid/tests/custom_op/custom_raw_op_kernel_op_setup.py b/python/paddle/fluid/tests/custom_op/custom_raw_op_kernel_op_setup.py index a1ae927997200c9bb796d60bf84510988409c213..c29bf7014f847039b6874cfc6474165c9730480c 100644 --- a/python/paddle/fluid/tests/custom_op/custom_raw_op_kernel_op_setup.py +++ b/python/paddle/fluid/tests/custom_op/custom_raw_op_kernel_op_setup.py @@ -41,9 +41,13 @@ if core.is_compiled_with_nccl(): macros.append(("THRUST_IGNORE_CUB_VERSION_CHECK", None)) include_dirs = list(paddle_includes) + [cwd] -setup(name=os.getenv("MODULE_NAME", "custom_raw_op_kernel_op_setup"), - ext_modules=extension(sources=sources, - include_dirs=include_dirs, - extra_compile_args=extra_compile_args, - _compile_dir=compile_dir, - define_macros=macros)) +setup( + name=os.getenv("MODULE_NAME", "custom_raw_op_kernel_op_setup"), + ext_modules=extension( + sources=sources, + include_dirs=include_dirs, + extra_compile_args=extra_compile_args, + _compile_dir=compile_dir, + define_macros=macros, + ), +) diff --git a/python/paddle/fluid/tests/custom_op/custom_relu_setup.py b/python/paddle/fluid/tests/custom_op/custom_relu_setup.py index 7dd3dd7aa7e38743fb108be948d0f052b451f60d..e71c5584e672957dc39c720b0d5cc2728cc4780e 100644 --- a/python/paddle/fluid/tests/custom_op/custom_relu_setup.py +++ b/python/paddle/fluid/tests/custom_op/custom_relu_setup.py @@ -30,4 +30,6 @@ setup( sources=sources, # test for multi ops include_dirs=paddle_includes, extra_compile_args=extra_compile_args, - verbose=True)) + verbose=True, + ), +) diff --git a/python/paddle/fluid/tests/custom_op/test_check_abi.py b/python/paddle/fluid/tests/custom_op/test_check_abi.py index 39bcf1ae628c6df7119053b99c02f5ca7d9b60af..ab37412362550f5c80783b02c56f7eec1a46dc81 100644 --- a/python/paddle/fluid/tests/custom_op/test_check_abi.py +++ b/python/paddle/fluid/tests/custom_op/test_check_abi.py @@ -20,7 +20,6 @@ import paddle.utils.cpp_extension.extension_utils as utils class TestABIBase(unittest.TestCase): - def test_environ(self): compiler_list = ['gcc', 'cl'] for compiler in compiler_list: @@ -35,7 +34,6 @@ class TestABIBase(unittest.TestCase): class TestCheckCompiler(TestABIBase): - def test_expected_compiler(self): if utils.OS_NAME.startswith('linux'): gt = ['gcc', 'g++', 'gnu-c++', 'gnu-cc'] @@ -73,7 +71,8 @@ class TestCheckCompiler(TestABIBase): # check Compiler Compatibility WARNING self.assertTrue(len(error) == 1) self.assertTrue( - "Compiler Compatibility WARNING" in str(error[0].message)) + "Compiler Compatibility WARNING" in str(error[0].message) + ) def test_exception_windows(self): # clear environ @@ -86,8 +85,10 @@ class TestCheckCompiler(TestABIBase): self.assertFalse(flag) # check ABI Compatibility WARNING self.assertTrue(len(error) == 1) - self.assertTrue("Failed to check compiler version for" in str( - error[0].message)) + self.assertTrue( + "Failed to check compiler version for" + in str(error[0].message) + ) def test_exception_linux(self): # clear environ @@ -107,8 +108,10 @@ class TestCheckCompiler(TestABIBase): self.assertFalse(flag) # check ABI Compatibility WARNING self.assertTrue(len(error) == 1) - self.assertTrue("Failed to check compiler version for" in str( - error[0].message)) + self.assertTrue( + "Failed to check compiler version for" + in str(error[0].message) + ) # restore utils._expected_compiler_current_platform = raw_func @@ -137,7 +140,6 @@ class TestCheckCompiler(TestABIBase): class TestRunCMDException(unittest.TestCase): - def test_exception(self): for verbose in [True, False]: with self.assertRaisesRegexp(RuntimeError, "Failed to run command"): diff --git a/python/paddle/fluid/tests/custom_op/test_context_pool.py b/python/paddle/fluid/tests/custom_op/test_context_pool.py index 69b8b18559ef93bec526c8465426188228b35e6d..0c4829fb90fdb256d48f8ecce3e6468b50318a2f 100644 --- a/python/paddle/fluid/tests/custom_op/test_context_pool.py +++ b/python/paddle/fluid/tests/custom_op/test_context_pool.py @@ -25,7 +25,8 @@ from paddle.fluid.framework import _test_eager_guard # Because Windows don't use docker, the shared lib already exists in the # cache dir, it will not be compiled again unless the shared lib is removed. file = '{}\\context_pool_jit\\context_pool_jit.pyd'.format( - get_build_directory()) + get_build_directory() +) if os.name == 'nt' and os.path.isfile(file): cmd = 'del {}'.format(file) run_cmd(cmd, True) @@ -37,11 +38,11 @@ custom_ops = load( extra_include_paths=paddle_includes, # add for Coverage CI extra_cxx_cflags=extra_cc_args, # test for cflags extra_cuda_cflags=extra_nvcc_args, # test for cflags - verbose=True) + verbose=True, +) class TestContextPool(unittest.TestCase): - def setUp(self): self.devices = ['cpu'] if paddle.is_compiled_with_cuda(): diff --git a/python/paddle/fluid/tests/custom_op/test_custom_attrs_jit.py b/python/paddle/fluid/tests/custom_op/test_custom_attrs_jit.py index a0be75c0a41ebe85b7fcf0294b1d4ca4a9d4a27f..7109d957384c56ac1181223fdfd3d964a3fdd9d2 100644 --- a/python/paddle/fluid/tests/custom_op/test_custom_attrs_jit.py +++ b/python/paddle/fluid/tests/custom_op/test_custom_attrs_jit.py @@ -25,7 +25,8 @@ from paddle.fluid.framework import _test_eager_guard # Because Windows don't use docker, the shared lib already exists in the # cache dir, it will not be compiled again unless the shared lib is removed. file = '{}\\custom_attrs_jit\\custom_attrs_jit.pyd'.format( - get_build_directory()) + get_build_directory() +) if os.name == 'nt' and os.path.isfile(file): cmd = 'del {}'.format(file) run_cmd(cmd, True) @@ -37,11 +38,11 @@ custom_attrs = load( extra_include_paths=paddle_includes, # add for Coverage CI extra_cxx_cflags=extra_cc_args, # test for cflags extra_cuda_cflags=extra_nvcc_args, # test for cflags - verbose=True) + verbose=True, +) class TestJitCustomAttrs(unittest.TestCase): - def setUp(self): paddle.set_device('cpu') # prepare test value @@ -58,11 +59,18 @@ class TestJitCustomAttrs(unittest.TestCase): def func_attr_value(self): x = paddle.ones([2, 2], dtype='float32') x.stop_gradient = False - out = custom_attrs.attr_test(x, self.bool_attr, self.int_attr, - self.float_attr, self.int64_attr, - self.str_attr, self.int_vec_attr, - self.float_vec_attr, self.int64_vec_attr, - self.str_vec_attr) + out = custom_attrs.attr_test( + x, + self.bool_attr, + self.int_attr, + self.float_attr, + self.int64_attr, + self.str_attr, + self.int_vec_attr, + self.float_vec_attr, + self.int64_vec_attr, + self.str_vec_attr, + ) out.stop_gradient = False out.backward() @@ -76,12 +84,18 @@ class TestJitCustomAttrs(unittest.TestCase): def func_const_attr_value(self): x = paddle.ones([2, 2], dtype='float32') x.stop_gradient = False - out = custom_attrs.const_attr_test(x, self.bool_attr, self.int_attr, - self.float_attr, self.int64_attr, - self.str_attr, self.int_vec_attr, - self.float_vec_attr, - self.int64_vec_attr, - self.str_vec_attr) + out = custom_attrs.const_attr_test( + x, + self.bool_attr, + self.int_attr, + self.float_attr, + self.int64_attr, + self.str_attr, + self.int_vec_attr, + self.float_vec_attr, + self.int64_vec_attr, + self.str_vec_attr, + ) out.stop_gradient = False out.backward() diff --git a/python/paddle/fluid/tests/custom_op/test_custom_concat.py b/python/paddle/fluid/tests/custom_op/test_custom_concat.py index ae3022411b18efd41080c9f7c1201a7c3ccf5e11..a0744ce0870dfdab8042953b0ee9659cd140397e 100644 --- a/python/paddle/fluid/tests/custom_op/test_custom_concat.py +++ b/python/paddle/fluid/tests/custom_op/test_custom_concat.py @@ -42,7 +42,8 @@ custom_ops = load( extra_include_paths=paddle_includes, # add for Coverage CI extra_cxx_cflags=extra_cc_args, # test for cc flags extra_cuda_cflags=extra_nvcc_args, # test for nvcc flags - verbose=True) + verbose=True, +) def concat_dynamic(func, dtype, np_inputs, axis_v, with_attr=False): @@ -85,29 +86,29 @@ def concat_static(func, dtype, np_inputs, axis_v, with_attr=False): if with_attr: feed_dict = { "x1": np_inputs[0].astype(dtype), - "x2": np_inputs[1].astype(dtype) + "x2": np_inputs[1].astype(dtype), } else: feed_dict = { "x1": np_inputs[0].astype(dtype), "x2": np_inputs[1].astype(dtype), - "axis": axis + "axis": axis, } out_v, x1_grad_v, x2_grad_v = exe.run( static.default_main_program(), feed=feed_dict, - fetch_list=[out.name, x1.name + "@GRAD", x2.name + "@GRAD"]) + fetch_list=[out.name, x1.name + "@GRAD", x2.name + "@GRAD"], + ) paddle.disable_static() return out_v, x1_grad_v, x2_grad_v class TestCustomConcatDynamicAxisJit(unittest.TestCase): - def setUp(self): self.dtypes = ['float32', 'float64', 'int32', 'int64'] self.np_inputs = [ np.array([[1, 2, 3], [4, 5, 6]]), - np.array([[11, 12, 13], [14, 15, 16]]) + np.array([[11, 12, 13], [14, 15, 16]]), ] self.axises = [0, 1] @@ -116,15 +117,19 @@ class TestCustomConcatDynamicAxisJit(unittest.TestCase): out, pd_out, err_msg='custom op {}: {},\n paddle api {}: {}'.format( - name, out, name, pd_out)) + name, out, name, pd_out + ), + ) def func_dynamic(self): for dtype in self.dtypes: for axis in self.axises: - out, grad_inputs = concat_dynamic(custom_ops.custom_concat, - dtype, self.np_inputs, axis) - pd_out, pd_grad_inputs = concat_dynamic(paddle.concat, dtype, - self.np_inputs, axis) + out, grad_inputs = concat_dynamic( + custom_ops.custom_concat, dtype, self.np_inputs, axis + ) + pd_out, pd_grad_inputs = concat_dynamic( + paddle.concat, dtype, self.np_inputs, axis + ) self.check_output(out, pd_out, "out") for x_grad, pd_x_grad in zip(grad_inputs, pd_grad_inputs): @@ -138,11 +143,12 @@ class TestCustomConcatDynamicAxisJit(unittest.TestCase): def test_static(self): for dtype in self.dtypes: for axis in self.axises: - out, x1_grad, x2_grad = concat_static(custom_ops.custom_concat, - dtype, self.np_inputs, - axis) + out, x1_grad, x2_grad = concat_static( + custom_ops.custom_concat, dtype, self.np_inputs, axis + ) pd_out, pd_x1_grad, pd_x2_grad = concat_static( - paddle.concat, dtype, self.np_inputs, axis) + paddle.concat, dtype, self.np_inputs, axis + ) self.check_output(out, pd_out, "out") self.check_output(x1_grad, pd_x1_grad, "x1_grad") @@ -152,11 +158,15 @@ class TestCustomConcatDynamicAxisJit(unittest.TestCase): for dtype in self.dtypes: for axis in self.axises: out, grad_inputs = concat_dynamic( - custom_ops.custom_concat_with_attr, dtype, self.np_inputs, - axis, True) - pd_out, pd_grad_inputs = concat_dynamic(paddle.concat, dtype, - self.np_inputs, axis, - True) + custom_ops.custom_concat_with_attr, + dtype, + self.np_inputs, + axis, + True, + ) + pd_out, pd_grad_inputs = concat_dynamic( + paddle.concat, dtype, self.np_inputs, axis, True + ) self.check_output(out, pd_out, "out") for x_grad, pd_x_grad in zip(grad_inputs, pd_grad_inputs): @@ -171,10 +181,15 @@ class TestCustomConcatDynamicAxisJit(unittest.TestCase): for dtype in self.dtypes: for axis in self.axises: out, x1_grad, x2_grad = concat_static( - custom_ops.custom_concat_with_attr, dtype, self.np_inputs, - axis, True) + custom_ops.custom_concat_with_attr, + dtype, + self.np_inputs, + axis, + True, + ) pd_out, pd_x1_grad, pd_x2_grad = concat_static( - paddle.concat, dtype, self.np_inputs, axis, True) + paddle.concat, dtype, self.np_inputs, axis, True + ) self.check_output(out, pd_out, "out") self.check_output(x1_grad, pd_x1_grad, "x1_grad") diff --git a/python/paddle/fluid/tests/custom_op/test_custom_conj.py b/python/paddle/fluid/tests/custom_op/test_custom_conj.py index a389a72df7350ad301c0a40664508eaab1825485..a9990facd79f6763a6458d0bcbe36cf1c9d7bf66 100644 --- a/python/paddle/fluid/tests/custom_op/test_custom_conj.py +++ b/python/paddle/fluid/tests/custom_op/test_custom_conj.py @@ -36,12 +36,15 @@ custom_ops = load( extra_include_paths=paddle_includes, # add for Coverage CI extra_cxx_cflags=extra_cc_args, # test for cc flags extra_cuda_cflags=extra_nvcc_args, # test for nvcc flags - verbose=True) + verbose=True, +) def is_complex(dtype): - return dtype == paddle.fluid.core.VarDesc.VarType.COMPLEX64 or \ - dtype == paddle.fluid.core.VarDesc.VarType.COMPLEX128 + return ( + dtype == paddle.fluid.core.VarDesc.VarType.COMPLEX64 + or dtype == paddle.fluid.core.VarDesc.VarType.COMPLEX128 + ) def to_complex(dtype): @@ -83,15 +86,16 @@ def conj_static(func, shape, dtype, np_input): exe = static.Executor() exe.run(static.default_startup_program()) - out_v, x_grad_v = exe.run(static.default_main_program(), - feed={"x": np_input}, - fetch_list=[out.name, x.name + "@GRAD"]) + out_v, x_grad_v = exe.run( + static.default_main_program(), + feed={"x": np_input}, + fetch_list=[out.name, x.name + "@GRAD"], + ) paddle.disable_static() return out_v, x_grad_v class TestCustomConjJit(unittest.TestCase): - def setUp(self): self.dtypes = ['float32', 'float64'] self.shape = [2, 20, 2, 3] @@ -101,7 +105,9 @@ class TestCustomConjJit(unittest.TestCase): out, pd_out, err_msg='custom op {}: {},\n paddle api {}: {}'.format( - name, out, name, pd_out)) + name, out, name, pd_out + ), + ) def run_dynamic(self, dtype, np_input): out, x_grad = conj_dynamic(custom_ops.custom_conj, dtype, np_input) @@ -111,10 +117,12 @@ class TestCustomConjJit(unittest.TestCase): self.check_output(x_grad, pd_x_grad, "x's grad") def run_static(self, dtype, np_input): - out, x_grad = conj_static(custom_ops.custom_conj, self.shape, dtype, - np_input) - pd_out, pd_x_grad = conj_static(paddle.conj, self.shape, dtype, - np_input) + out, x_grad = conj_static( + custom_ops.custom_conj, self.shape, dtype, np_input + ) + pd_out, pd_x_grad = conj_static( + paddle.conj, self.shape, dtype, np_input + ) self.check_output(out, pd_out, "out") self.check_output(x_grad, pd_x_grad, "x's grad") @@ -138,7 +146,8 @@ class TestCustomConjJit(unittest.TestCase): def test_complex_dynamic(self): for dtype in self.dtypes: np_input = np.random.random(self.shape).astype( - dtype) + 1j * np.random.random(self.shape).astype(dtype) + dtype + ) + 1j * np.random.random(self.shape).astype(dtype) self.run_dynamic(to_complex(dtype), np_input) diff --git a/python/paddle/fluid/tests/custom_op/test_custom_linear.py b/python/paddle/fluid/tests/custom_op/test_custom_linear.py index 3ae650ee9494d64d405fc9fd471f00627cac3c2f..bb7bc0358f4e89e00c8259b2e0a78ff032cc6a05 100644 --- a/python/paddle/fluid/tests/custom_op/test_custom_linear.py +++ b/python/paddle/fluid/tests/custom_op/test_custom_linear.py @@ -37,7 +37,8 @@ custom_ops = load( extra_include_paths=paddle_includes, # add for Coverage CI extra_cxx_cflags=extra_cc_args, # test for cc flags extra_cuda_cflags=extra_nvcc_args, # test for nvcc flags - verbose=True) + verbose=True, +) def linear_dynamic(func, device, dtype, np_x, np_weight, np_bias): @@ -56,9 +57,9 @@ def linear_static(func, device, dtype, np_x, np_weight, np_bias): with static.scope_guard(static.Scope()): with static.program_guard(static.Program()): x = static.data(name="x", shape=[None, np_x.shape[1]], dtype=dtype) - weight = static.data(name="weight", - shape=np_weight.shape, - dtype=dtype) + weight = static.data( + name="weight", shape=np_weight.shape, dtype=dtype + ) bias = static.data(name="bias", shape=np_bias.shape, dtype=dtype) x.stop_gradient = False weight.stop_gradient = False @@ -75,18 +76,20 @@ def linear_static(func, device, dtype, np_x, np_weight, np_bias): feed={ "x": np_x.astype(dtype), "weight": np_weight.astype(dtype), - "bias": np_bias.astype(dtype) + "bias": np_bias.astype(dtype), }, fetch_list=[ - out.name, x.name + "@GRAD", weight.name + "@GRAD", - bias.name + "@GRAD" - ]) + out.name, + x.name + "@GRAD", + weight.name + "@GRAD", + bias.name + "@GRAD", + ], + ) paddle.disable_static() return out_v, x_grad_v, weight_grad_v, bias_grad_v class TestCustomLinearJit(unittest.TestCase): - def setUp(self): self.dtypes = ['float32', 'float64'] self.devices = ['cpu'] @@ -101,36 +104,75 @@ class TestCustomLinearJit(unittest.TestCase): out, pd_out, err_msg='custom op {}: {},\n paddle api {}: {}'.format( - name, out, name, pd_out)) + name, out, name, pd_out + ), + ) def test_static(self): for device in self.devices: for dtype in self.dtypes: - phi_out, phi_x_grad, phi_weight_grad, phi_bias_grad = linear_static( - custom_ops.phi_linear, device, dtype, self.np_x, - self.np_weight, self.np_bias) + ( + phi_out, + phi_x_grad, + phi_weight_grad, + phi_bias_grad, + ) = linear_static( + custom_ops.phi_linear, + device, + dtype, + self.np_x, + self.np_weight, + self.np_bias, + ) pd_out, pd_x_grad, pd_weight_grad, pd_bias_grad = linear_static( - F.linear, device, dtype, self.np_x, self.np_weight, - self.np_bias) + F.linear, + device, + dtype, + self.np_x, + self.np_weight, + self.np_bias, + ) self.check_output(phi_out, pd_out, "out") self.check_output(phi_x_grad, pd_x_grad, "x_grad") - self.check_output(phi_weight_grad, pd_weight_grad, - "weight_grad") + self.check_output( + phi_weight_grad, pd_weight_grad, "weight_grad" + ) self.check_output(phi_bias_grad, pd_bias_grad, "bias_grad") def func_dynamic(self): for device in self.devices: for dtype in self.dtypes: - phi_out, phi_x_grad, phi_weight_grad, phi_bias_grad = linear_dynamic( - custom_ops.phi_linear, device, dtype, self.np_x, - self.np_weight, self.np_bias) - pd_out, pd_x_grad, pd_weight_grad, pd_bias_grad = linear_dynamic( - F.linear, device, dtype, self.np_x, self.np_weight, - self.np_bias) + ( + phi_out, + phi_x_grad, + phi_weight_grad, + phi_bias_grad, + ) = linear_dynamic( + custom_ops.phi_linear, + device, + dtype, + self.np_x, + self.np_weight, + self.np_bias, + ) + ( + pd_out, + pd_x_grad, + pd_weight_grad, + pd_bias_grad, + ) = linear_dynamic( + F.linear, + device, + dtype, + self.np_x, + self.np_weight, + self.np_bias, + ) self.check_output(phi_out, pd_out, "phi_out") self.check_output(phi_x_grad, pd_x_grad, "x_grad") - self.check_output(phi_weight_grad, pd_weight_grad, - "weight_grad") + self.check_output( + phi_weight_grad, pd_weight_grad, "weight_grad" + ) self.check_output(phi_bias_grad, pd_bias_grad, "bias_grad") def test_dynamic(self): diff --git a/python/paddle/fluid/tests/custom_op/test_custom_raw_op_kernel_op.py b/python/paddle/fluid/tests/custom_op/test_custom_raw_op_kernel_op.py index 3cd550c95f00efe511551c2b576d50365e450129..33f98c219d84a4d955e974132f6aec646ff3516f 100644 --- a/python/paddle/fluid/tests/custom_op/test_custom_raw_op_kernel_op.py +++ b/python/paddle/fluid/tests/custom_op/test_custom_raw_op_kernel_op.py @@ -36,8 +36,9 @@ def prepare_module_path(): else: site_dir = site.getsitepackages()[0] custom_egg_path = [x for x in os.listdir(site_dir) if MODULE_NAME in x] - assert len(custom_egg_path - ) == 1, "Matched egg number is %d." % len(custom_egg_path) + assert len(custom_egg_path) == 1, "Matched egg number is %d." % len( + custom_egg_path + ) sys.path.append(os.path.join(site_dir, custom_egg_path[0])) @@ -46,7 +47,6 @@ def prepare_module_path(): # temporarily. @unittest.skipIf(os.name == "nt", "Windows does not support yet.") class TestCustomRawReluOp(unittest.TestCase): - @classmethod def setUpClass(cls): path = os.path.dirname(os.path.abspath(__file__)) @@ -78,11 +78,14 @@ class TestCustomRawReluOp(unittest.TestCase): exe = paddle.static.Executor() exe.run(paddle.static.default_startup_program()) - x_np = np.random.uniform(low=-1.0, high=1.0, size=[2, - 3]).astype('float32') - y1_value, y2_value = exe.run(paddle.static.default_main_program(), - feed={x.name: x_np}, - fetch_list=[y1, y2]) + x_np = np.random.uniform(low=-1.0, high=1.0, size=[2, 3]).astype( + 'float32' + ) + y1_value, y2_value = exe.run( + paddle.static.default_main_program(), + feed={x.name: x_np}, + fetch_list=[y1, y2], + ) np.testing.assert_array_equal(y1_value, y2_value) paddle.disable_static() diff --git a/python/paddle/fluid/tests/custom_op/test_custom_relu_model.py b/python/paddle/fluid/tests/custom_op/test_custom_relu_model.py index 762a16b61ff829fa714783c07feee9bf7898d24a..f445410e906dfb720a57c846edcd8e35146707ba 100644 --- a/python/paddle/fluid/tests/custom_op/test_custom_relu_model.py +++ b/python/paddle/fluid/tests/custom_op/test_custom_relu_model.py @@ -28,7 +28,8 @@ from paddle.fluid.framework import _test_eager_guard, _in_legacy_dygraph # Because Windows don't use docker, the shared lib already exists in the # cache dir, it will not be compiled again unless the shared lib is removed. file = '{}\\custom_relu_for_model_jit\\custom_relu_for_model_jit.pyd'.format( - get_build_directory()) + get_build_directory() +) if os.name == 'nt' and os.path.isfile(file): cmd = 'del {}'.format(file) run_cmd(cmd, True) @@ -47,7 +48,8 @@ custom_module = load( extra_include_paths=paddle_includes, # add for Coverage CI extra_cxx_cflags=extra_cc_args, # test for cc flags extra_cuda_cflags=extra_nvcc_args, # test for nvcc flags - verbose=True) + verbose=True, +) class Net(nn.Layer): @@ -59,7 +61,9 @@ class Net(nn.Layer): super(Net, self).__init__() self.fc1 = nn.Linear(in_dim, in_dim) self.fc2 = nn.Linear(in_dim, out_dim) - self.relu_act = custom_module.custom_relu if use_custom_op else nn.functional.relu + self.relu_act = ( + custom_module.custom_relu if use_custom_op else nn.functional.relu + ) def forward(self, x): out = self.fc1(x) @@ -73,7 +77,6 @@ class Net(nn.Layer): class TestDygraphModel(unittest.TestCase): - def tearDown(self): self.temp_dir.cleanup() @@ -85,8 +88,9 @@ class TestDygraphModel(unittest.TestCase): self.batch_num = 10 self.batch_size = 4 self.datas = [ - np.random.uniform( - size=[self.batch_size, self.in_dim]).astype('float32') + np.random.uniform(size=[self.batch_size, self.in_dim]).astype( + 'float32' + ) for i in range(self.batch_num) ] self.labels = [ @@ -100,14 +104,16 @@ class TestDygraphModel(unittest.TestCase): self.temp_dir = tempfile.TemporaryDirectory() self.model_save_dir = os.path.join(self.temp_dir.name, 'infer_model') self.model_path_template = os.path.join( - self.model_save_dir, 'custom_relu_dygaph_model_{}.pdparams') + self.model_save_dir, 'custom_relu_dygaph_model_{}.pdparams' + ) self.model_dy2stat_path = os.path.join( - self.model_save_dir, 'infer_model/custom_relu_model_dy2sta') + self.model_save_dir, 'infer_model/custom_relu_model_dy2sta' + ) # for dy2stat - self.x_spec = paddle.static.InputSpec(shape=[None, self.in_dim], - dtype='float32', - name='x') + self.x_spec = paddle.static.InputSpec( + shape=[None, self.in_dim], dtype='float32', name='x' + ) def func_train_eval(self): for device in self.devices: @@ -120,24 +126,30 @@ class TestDygraphModel(unittest.TestCase): # open this when dy2stat is ready for eager if _in_legacy_dygraph(): custom_relu_dy2stat_train_out = self.train_model( - use_custom_op=True, dy2stat=True) # for to_static - np.testing.assert_array_equal(origin_relu_train_out, - custom_relu_dy2stat_train_out) + use_custom_op=True, dy2stat=True + ) # for to_static + np.testing.assert_array_equal( + origin_relu_train_out, custom_relu_dy2stat_train_out + ) - np.testing.assert_array_equal(origin_relu_train_out, - custom_relu_train_out) + np.testing.assert_array_equal( + origin_relu_train_out, custom_relu_train_out + ) # for eval origin_relu_eval_out = self.eval_model(use_custom_op=False) custom_relu_eval_out = self.eval_model(use_custom_op=True) if _in_legacy_dygraph(): custom_relu_dy2stat_eval_out = self.eval_model( - use_custom_op=True, dy2stat=True) # for to_static - np.testing.assert_array_equal(origin_relu_eval_out, - custom_relu_dy2stat_eval_out) + use_custom_op=True, dy2stat=True + ) # for to_static + np.testing.assert_array_equal( + origin_relu_eval_out, custom_relu_dy2stat_eval_out + ) - np.testing.assert_array_equal(origin_relu_eval_out, - custom_relu_eval_out) + np.testing.assert_array_equal( + origin_relu_eval_out, custom_relu_eval_out + ) def test_train_eval(self): with _test_eager_guard(): @@ -154,8 +166,9 @@ class TestDygraphModel(unittest.TestCase): if dy2stat: net = paddle.jit.to_static(net, input_spec=[self.x_spec]) mse_loss = paddle.nn.MSELoss() - sgd = paddle.optimizer.SGD(learning_rate=0.1, - parameters=net.parameters()) + sgd = paddle.optimizer.SGD( + learning_rate=0.1, parameters=net.parameters() + ) for batch_id in range(self.batch_num): x = paddle.to_tensor(self.datas[batch_id]) @@ -173,8 +186,9 @@ class TestDygraphModel(unittest.TestCase): if dy2stat: paddle.jit.save(net, self.model_dy2stat_path) else: - paddle.save(net.state_dict(), - self.model_path_template.format(use_custom_op)) + paddle.save( + net.state_dict(), self.model_path_template.format(use_custom_op) + ) return out.numpy() @@ -185,7 +199,8 @@ class TestDygraphModel(unittest.TestCase): net = paddle.jit.load(self.model_dy2stat_path) else: state_dict = paddle.load( - self.model_path_template.format(use_custom_op)) + self.model_path_template.format(use_custom_op) + ) net.set_state_dict(state_dict) sample_x = paddle.to_tensor(self.datas[0]) @@ -196,7 +211,6 @@ class TestDygraphModel(unittest.TestCase): class TestStaticModel(unittest.TestCase): - def setUp(self): self.seed = 2021 self.in_dim = 10 @@ -204,8 +218,9 @@ class TestStaticModel(unittest.TestCase): self.batch_num = 10 self.batch_size = 8 self.datas = [ - np.random.uniform( - size=[self.batch_size, self.in_dim]).astype('float32') + np.random.uniform(size=[self.batch_size, self.in_dim]).astype( + 'float32' + ) for i in range(self.batch_num) ] self.labels = [ @@ -219,7 +234,8 @@ class TestStaticModel(unittest.TestCase): self.temp_dir = tempfile.TemporaryDirectory() self.model_save_dir = os.path.join(self.temp_dir.name, 'infer_model') self.model_path_template = os.path.join( - self.model_save_dir, 'custom_relu_static_model_{}_{}') + self.model_save_dir, 'custom_relu_static_model_{}_{}' + ) paddle.enable_static() @@ -230,38 +246,44 @@ class TestStaticModel(unittest.TestCase): def test_train_eval(self): for device in self.devices: # for train - original_relu_train_out = self.train_model(device, - use_custom_op=False) + original_relu_train_out = self.train_model( + device, use_custom_op=False + ) custom_relu_train_out = self.train_model(device, use_custom_op=True) # using PE - original_relu_train_pe_out = self.train_model(device, - use_custom_op=False, - use_pe=True) - custom_relu_train_pe_out = self.train_model(device, - use_custom_op=True, - use_pe=True) - - np.testing.assert_array_equal(original_relu_train_out, - custom_relu_train_out) - np.testing.assert_array_equal(original_relu_train_pe_out, - custom_relu_train_pe_out) + original_relu_train_pe_out = self.train_model( + device, use_custom_op=False, use_pe=True + ) + custom_relu_train_pe_out = self.train_model( + device, use_custom_op=True, use_pe=True + ) + + np.testing.assert_array_equal( + original_relu_train_out, custom_relu_train_out + ) + np.testing.assert_array_equal( + original_relu_train_pe_out, custom_relu_train_pe_out + ) # for eval - original_relu_eval_out = self.eval_model(device, - use_custom_op=False) + original_relu_eval_out = self.eval_model( + device, use_custom_op=False + ) custom_relu_eval_out = self.eval_model(device, use_custom_op=True) # using PE - original_relu_eval_pe_out = self.eval_model(device, - use_custom_op=False, - use_pe=True) - custom_relu_eval_pe_out = self.eval_model(device, - use_custom_op=True, - use_pe=True) - - np.testing.assert_array_equal(original_relu_eval_out, - custom_relu_eval_out) - np.testing.assert_array_equal(original_relu_eval_pe_out, - custom_relu_eval_pe_out) + original_relu_eval_pe_out = self.eval_model( + device, use_custom_op=False, use_pe=True + ) + custom_relu_eval_pe_out = self.eval_model( + device, use_custom_op=True, use_pe=True + ) + + np.testing.assert_array_equal( + original_relu_eval_out, custom_relu_eval_out + ) + np.testing.assert_array_equal( + original_relu_eval_pe_out, custom_relu_eval_pe_out + ) def train_model(self, device, use_custom_op=False, use_pe=False): # reset random seed @@ -271,14 +293,15 @@ class TestStaticModel(unittest.TestCase): paddle.set_device(device) with paddle.static.scope_guard(paddle.static.Scope()): - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): - x = paddle.static.data(shape=[None, self.in_dim], - name='x', - dtype='float32') - y = paddle.static.data(shape=[None, 1], - name='y', - dtype='float32') + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + x = paddle.static.data( + shape=[None, self.in_dim], name='x', dtype='float32' + ) + y = paddle.static.data( + shape=[None, 1], name='y', dtype='float32' + ) net = Net(self.in_dim, self.out_dim, use_custom_op) out = net(x) @@ -292,12 +315,14 @@ class TestStaticModel(unittest.TestCase): # For PE if use_pe: - places = paddle.static.cpu_places( - ) if device == 'cpu' else paddle.static.cuda_places() + places = ( + paddle.static.cpu_places() + if device == 'cpu' + else paddle.static.cuda_places() + ) main_program = paddle.static.CompiledProgram( - paddle.static.default_main_program( - )).with_data_parallel(loss_name=loss.name, - places=places) + paddle.static.default_main_program() + ).with_data_parallel(loss_name=loss.name, places=places) else: main_program = paddle.static.default_main_program() @@ -305,17 +330,19 @@ class TestStaticModel(unittest.TestCase): x_data = self.datas[batch_id] y_data = self.labels[batch_id] - res = exe.run(main_program, - feed={ - 'x': x_data, - 'y': y_data - }, - fetch_list=[out]) + res = exe.run( + main_program, + feed={'x': x_data, 'y': y_data}, + fetch_list=[out], + ) # save model paddle.static.save_inference_model( - self.model_path_template.format(use_custom_op, use_pe), [x], - [out], exe) + self.model_path_template.format(use_custom_op, use_pe), + [x], + [out], + exe, + ) return res[0] @@ -326,15 +353,20 @@ class TestStaticModel(unittest.TestCase): with paddle.static.program_guard(paddle.static.Program()): exe = paddle.static.Executor() - [inference_program, feed_target_names, - fetch_targets] = paddle.static.load_inference_model( - self.model_path_template.format(use_custom_op, use_pe), - exe) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = paddle.static.load_inference_model( + self.model_path_template.format(use_custom_op, use_pe), exe + ) x_data = self.datas[0] - results = exe.run(inference_program, - feed={feed_target_names[0]: x_data}, - fetch_list=fetch_targets) + results = exe.run( + inference_program, + feed={feed_target_names[0]: x_data}, + fetch_list=fetch_targets, + ) return results[0] diff --git a/python/paddle/fluid/tests/custom_op/test_custom_relu_op_jit.py b/python/paddle/fluid/tests/custom_op/test_custom_relu_op_jit.py index b1255bc3879b04809c99fbea8485daf11271afa7..00b2860987b4dc7453bbeb0e11b10adbd4125b7f 100644 --- a/python/paddle/fluid/tests/custom_op/test_custom_relu_op_jit.py +++ b/python/paddle/fluid/tests/custom_op/test_custom_relu_op_jit.py @@ -21,10 +21,12 @@ from paddle.utils.cpp_extension.extension_utils import run_cmd from utils import IS_MAC, extra_cc_args, extra_nvcc_args, paddle_includes from test_custom_relu_op_setup import custom_relu_dynamic, custom_relu_static from paddle.fluid.framework import _test_eager_guard + # Because Windows don't use docker, the shared lib already exists in the # cache dir, it will not be compiled again unless the shared lib is removed. file = '{}\\custom_relu_module_jit\\custom_relu_module_jit.pyd'.format( - get_build_directory()) + get_build_directory() +) if os.name == 'nt' and os.path.isfile(file): cmd = 'del {}'.format(file) run_cmd(cmd, True) @@ -43,16 +45,17 @@ custom_module = load( extra_include_paths=paddle_includes, # add for Coverage CI extra_cxx_cflags=extra_cc_args, # test for cc flags extra_cuda_cflags=extra_nvcc_args, # test for nvcc flags - verbose=True) + verbose=True, +) class TestJITLoad(unittest.TestCase): - def setUp(self): self.custom_ops = [ - custom_module.custom_relu, custom_module.custom_relu_dup, + custom_module.custom_relu, + custom_module.custom_relu_dup, custom_module.custom_relu_no_x_in_backward, - custom_module.custom_relu_out + custom_module.custom_relu_out, ] self.dtypes = ['float32', 'float64'] if paddle.is_compiled_with_cuda(): @@ -69,13 +72,16 @@ class TestJITLoad(unittest.TestCase): x = np.random.uniform(-1, 1, [4, 8]).astype(dtype) for custom_op in self.custom_ops: out = custom_relu_static(custom_op, device, dtype, x) - pd_out = custom_relu_static(custom_op, device, dtype, x, - False) + pd_out = custom_relu_static( + custom_op, device, dtype, x, False + ) np.testing.assert_array_equal( out, pd_out, - err_msg='custom op out: {},\n paddle api out: {}'. - format(out, pd_out)) + err_msg='custom op out: {},\n paddle api out: {}'.format( + out, pd_out + ), + ) def func_dynamic(self): for device in self.devices: @@ -84,20 +90,26 @@ class TestJITLoad(unittest.TestCase): continue x = np.random.uniform(-1, 1, [4, 8]).astype(dtype) for custom_op in self.custom_ops: - out, x_grad = custom_relu_dynamic(custom_op, device, dtype, - x) + out, x_grad = custom_relu_dynamic( + custom_op, device, dtype, x + ) pd_out, pd_x_grad = custom_relu_dynamic( - custom_op, device, dtype, x, False) + custom_op, device, dtype, x, False + ) np.testing.assert_array_equal( out, pd_out, - err_msg='custom op out: {},\n paddle api out: {}'. - format(out, pd_out)) + err_msg='custom op out: {},\n paddle api out: {}'.format( + out, pd_out + ), + ) np.testing.assert_array_equal( x_grad, pd_x_grad, - err_msg='custom op x grad: {},\n paddle api x grad: {}'. - format(x_grad, pd_x_grad)) + err_msg='custom op x grad: {},\n paddle api x grad: {}'.format( + x_grad, pd_x_grad + ), + ) def test_dynamic(self): with _test_eager_guard(): @@ -141,7 +153,8 @@ class TestJITLoad(unittest.TestCase): extra_include_paths=paddle_includes, # add for Coverage CI extra_cxx_cflags=extra_cc_args, # test for cc flags extra_cuda_cflags=extra_nvcc_args, # test for nvcc flags - verbose=True) + verbose=True, + ) custom_conj = custom_module.custom_conj self.assertIsNotNone(custom_conj) diff --git a/python/paddle/fluid/tests/custom_op/test_custom_relu_op_setup.py b/python/paddle/fluid/tests/custom_op/test_custom_relu_op_setup.py index 32e34091d38a6999a7768bd83e03946f4d3d39fb..0f73b4a212413074a9dd1428e36516391256d244 100644 --- a/python/paddle/fluid/tests/custom_op/test_custom_relu_op_setup.py +++ b/python/paddle/fluid/tests/custom_op/test_custom_relu_op_setup.py @@ -41,12 +41,9 @@ def custom_relu_dynamic(func, device, dtype, np_x, use_func=True): return out.numpy(), t.grad.numpy() -def custom_relu_static(func, - device, - dtype, - np_x, - use_func=True, - test_infer=False): +def custom_relu_static( + func, device, dtype, np_x, use_func=True, test_infer=False +): paddle.enable_static() paddle.set_device(device) @@ -60,9 +57,11 @@ def custom_relu_static(func, exe = static.Executor() exe.run(static.default_startup_program()) # in static mode, x data has been covered by out - out_v = exe.run(static.default_main_program(), - feed={'X': np_x}, - fetch_list=[out.name]) + out_v = exe.run( + static.default_main_program(), + feed={'X': np_x}, + fetch_list=[out.name], + ) paddle.disable_static() return out_v @@ -85,11 +84,11 @@ def custom_relu_static_pe(func, device, dtype, np_x, use_func=True): # in static mode, x data has been covered by out compiled_prog = static.CompiledProgram( - static.default_main_program()).with_data_parallel( - loss_name=out.name, places=places) - out_v = exe.run(compiled_prog, - feed={'X': np_x}, - fetch_list=[out.name]) + static.default_main_program() + ).with_data_parallel(loss_name=out.name, places=places) + out_v = exe.run( + compiled_prog, feed={'X': np_x}, fetch_list=[out.name] + ) paddle.disable_static() return out_v @@ -101,9 +100,9 @@ def custom_relu_static_inference(func, device, np_data, np_label, path_prefix): with static.scope_guard(static.Scope()): with static.program_guard(static.Program()): # simple module - data = static.data(name='data', - shape=[None, 1, 28, 28], - dtype='float32') + data = static.data( + name='data', shape=[None, 1, 28, 28], dtype='float32' + ) label = static.data(name='label', shape=[None, 1], dtype='int64') hidden = static.nn.fc(data, size=128) @@ -122,23 +121,21 @@ def custom_relu_static_inference(func, device, np_data, np_label, path_prefix): # train for i in range(4): - avg_loss_v = exe.run(static.default_main_program(), - feed={ - 'data': np_data, - 'label': np_label - }, - fetch_list=[avg_loss]) + avg_loss_v = exe.run( + static.default_main_program(), + feed={'data': np_data, 'label': np_label}, + fetch_list=[avg_loss], + ) # save inference model static.save_inference_model(path_prefix, [data], [predict], exe) # get train predict value - predict_v = exe.run(static.default_main_program(), - feed={ - 'data': np_data, - 'label': np_label - }, - fetch_list=[predict]) + predict_v = exe.run( + static.default_main_program(), + feed={'data': np_data, 'label': np_label}, + fetch_list=[predict], + ) return predict_v @@ -151,10 +148,9 @@ def custom_relu_double_grad_dynamic(func, device, dtype, np_x, use_func=True): out = func(t) if use_func else paddle.nn.functional.relu(t) out.stop_gradient = False - dx = paddle.grad(outputs=[out], - inputs=[t], - create_graph=True, - retain_graph=True) + dx = paddle.grad( + outputs=[out], inputs=[t], create_graph=True, retain_graph=True + ) dx[0].backward() @@ -163,16 +159,17 @@ def custom_relu_double_grad_dynamic(func, device, dtype, np_x, use_func=True): class TestNewCustomOpSetUpInstall(unittest.TestCase): - def setUp(self): cur_dir = os.path.dirname(os.path.abspath(__file__)) # compile, install the custom op egg into site-packages under background if os.name == 'nt': cmd = 'cd /d {} && python custom_relu_setup.py install'.format( - cur_dir) + cur_dir + ) else: cmd = 'cd {} && {} custom_relu_setup.py install'.format( - cur_dir, sys.executable) + cur_dir, sys.executable + ) run_cmd(cmd) # NOTE(Aurelius84): Normally, it's no need to add following codes for users. @@ -188,16 +185,18 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase): custom_egg_path = [ x for x in os.listdir(site_dir) if 'custom_relu_module_setup' in x ] - assert len(custom_egg_path - ) == 1, "Matched egg number is %d." % len(custom_egg_path) + assert len(custom_egg_path) == 1, "Matched egg number is %d." % len( + custom_egg_path + ) sys.path.append(os.path.join(site_dir, custom_egg_path[0])) # usage: import the package directly import custom_relu_module_setup + # `custom_relu_dup` is same as `custom_relu_dup` self.custom_ops = [ custom_relu_module_setup.custom_relu, - custom_relu_module_setup.custom_relu_dup + custom_relu_module_setup.custom_relu_dup, ] self.dtypes = ['float32', 'float64'] @@ -220,13 +219,16 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase): x = np.random.uniform(-1, 1, [4, 8]).astype(dtype) for custom_op in self.custom_ops: out = custom_relu_static(custom_op, device, dtype, x) - pd_out = custom_relu_static(custom_op, device, dtype, x, - False) + pd_out = custom_relu_static( + custom_op, device, dtype, x, False + ) np.testing.assert_array_equal( out, pd_out, - err_msg='custom op out: {},\n paddle api out: {}'. - format(out, pd_out)) + err_msg='custom op out: {},\n paddle api out: {}'.format( + out, pd_out + ), + ) def test_static_pe(self): for device in self.devices: @@ -236,13 +238,16 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase): x = np.random.uniform(-1, 1, [4, 8]).astype(dtype) for custom_op in self.custom_ops: out = custom_relu_static_pe(custom_op, device, dtype, x) - pd_out = custom_relu_static_pe(custom_op, device, dtype, x, - False) + pd_out = custom_relu_static_pe( + custom_op, device, dtype, x, False + ) np.testing.assert_array_equal( out, pd_out, - err_msg='custom op out: {},\n paddle api out: {}'. - format(out, pd_out)) + err_msg='custom op out: {},\n paddle api out: {}'.format( + out, pd_out + ), + ) def func_dynamic(self): for device in self.devices: @@ -251,20 +256,26 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase): continue x = np.random.uniform(-1, 1, [4, 8]).astype(dtype) for custom_op in self.custom_ops: - out, x_grad = custom_relu_dynamic(custom_op, device, dtype, - x) + out, x_grad = custom_relu_dynamic( + custom_op, device, dtype, x + ) pd_out, pd_x_grad = custom_relu_dynamic( - custom_op, device, dtype, x, False) + custom_op, device, dtype, x, False + ) np.testing.assert_array_equal( out, pd_out, - err_msg='custom op out: {},\n paddle api out: {}'. - format(out, pd_out)) + err_msg='custom op out: {},\n paddle api out: {}'.format( + out, pd_out + ), + ) np.testing.assert_array_equal( x_grad, pd_x_grad, - err_msg='custom op x grad: {},\n paddle api x grad: {}'. - format(x_grad, pd_x_grad)) + err_msg='custom op x grad: {},\n paddle api x grad: {}'.format( + x_grad, pd_x_grad + ), + ) def test_dynamic(self): with _test_eager_guard(): @@ -277,22 +288,29 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase): np_label = np.random.random((1, 1)).astype("int64") path_prefix = "custom_op_inference/custom_relu" for device in self.devices: - predict = custom_relu_static_inference(self.custom_ops[0], device, - np_data, np_label, - path_prefix) + predict = custom_relu_static_inference( + self.custom_ops[0], device, np_data, np_label, path_prefix + ) # load inference model with static.scope_guard(static.Scope()): exe = static.Executor() - [inference_program, feed_target_names, - fetch_targets] = static.load_inference_model(path_prefix, exe) - predict_infer = exe.run(inference_program, - feed={feed_target_names[0]: np_data}, - fetch_list=fetch_targets) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = static.load_inference_model(path_prefix, exe) + predict_infer = exe.run( + inference_program, + feed={feed_target_names[0]: np_data}, + fetch_list=fetch_targets, + ) np.testing.assert_array_equal( predict, predict_infer, - err_msg='custom op predict: {},\n custom op infer predict: {}' - .format(predict, predict_infer)) + err_msg='custom op predict: {},\n custom op infer predict: {}'.format( + predict, predict_infer + ), + ) paddle.disable_static() def test_static_save_and_run_inference_predictor(self): @@ -302,26 +320,32 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase): path_prefix = "custom_op_inference/custom_relu" from paddle.inference import Config from paddle.inference import create_predictor + for device in self.devices: - predict = custom_relu_static_inference(self.custom_ops[0], device, - np_data, np_label, - path_prefix) + predict = custom_relu_static_inference( + self.custom_ops[0], device, np_data, np_label, path_prefix + ) # load inference model - config = Config(path_prefix + ".pdmodel", - path_prefix + ".pdiparams") + config = Config( + path_prefix + ".pdmodel", path_prefix + ".pdiparams" + ) predictor = create_predictor(config) input_tensor = predictor.get_input_handle( - predictor.get_input_names()[0]) + predictor.get_input_names()[0] + ) input_tensor.reshape(np_data.shape) input_tensor.copy_from_cpu(np_data.copy()) predictor.run() output_tensor = predictor.get_output_handle( - predictor.get_output_names()[0]) + predictor.get_output_names()[0] + ) predict_infer = output_tensor.copy_to_cpu() self.assertTrue( np.isclose(predict, predict_infer, rtol=5e-5).any(), "custom op predict: {},\n custom op infer predict: {}".format( - predict, predict_infer)) + predict, predict_infer + ), + ) paddle.disable_static() def test_func_double_grad_dynamic(self): @@ -331,33 +355,43 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase): continue x = np.random.uniform(-1, 1, [4, 8]).astype(dtype) out, dx_grad = custom_relu_double_grad_dynamic( - self.custom_ops[0], device, dtype, x) + self.custom_ops[0], device, dtype, x + ) pd_out, pd_dx_grad = custom_relu_double_grad_dynamic( - self.custom_ops[0], device, dtype, x, False) + self.custom_ops[0], device, dtype, x, False + ) np.testing.assert_array_equal( out, pd_out, err_msg='custom op out: {},\n paddle api out: {}'.format( - out, pd_out)) + out, pd_out + ), + ) np.testing.assert_array_equal( dx_grad, pd_dx_grad, - err_msg='custom op dx grad: {},\n paddle api dx grad: {}'. - format(dx_grad, pd_dx_grad)) + err_msg='custom op dx grad: {},\n paddle api dx grad: {}'.format( + dx_grad, pd_dx_grad + ), + ) def test_with_dataloader(self): for device in self.devices: paddle.set_device(device) # data loader transform = Compose( - [Normalize(mean=[127.5], std=[127.5], data_format='CHW')]) - train_dataset = paddle.vision.datasets.MNIST(mode='train', - transform=transform) - train_loader = paddle.io.DataLoader(train_dataset, - batch_size=64, - shuffle=True, - drop_last=True, - num_workers=0) + [Normalize(mean=[127.5], std=[127.5], data_format='CHW')] + ) + train_dataset = paddle.vision.datasets.MNIST( + mode='train', transform=transform + ) + train_loader = paddle.io.DataLoader( + train_dataset, + batch_size=64, + shuffle=True, + drop_last=True, + num_workers=0, + ) for batch_id, (image, _) in enumerate(train_loader()): out = self.custom_ops[0](image) @@ -366,7 +400,9 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase): out, pd_out, err_msg='custom op out: {},\n paddle api out: {}'.format( - out, pd_out)) + out, pd_out + ), + ) if batch_id == 5: break diff --git a/python/paddle/fluid/tests/custom_op/test_custom_simple_slice.py b/python/paddle/fluid/tests/custom_op/test_custom_simple_slice.py index e7d60fd4296eb92a1bd1097dddbe5a59f3f032d9..b67b09131fa0c34ddacd60fd5f3ad0586eef3ce6 100644 --- a/python/paddle/fluid/tests/custom_op/test_custom_simple_slice.py +++ b/python/paddle/fluid/tests/custom_op/test_custom_simple_slice.py @@ -25,7 +25,8 @@ from paddle.fluid.framework import _test_eager_guard # Because Windows don't use docker, the shared lib already exists in the # cache dir, it will not be compiled again unless the shared lib is removed. file = '{}\\custom_simple_slice\\custom_simple_slice.pyd'.format( - get_build_directory()) + get_build_directory() +) if os.name == 'nt' and os.path.isfile(file): cmd = 'del {}'.format(file) run_cmd(cmd, True) @@ -36,11 +37,11 @@ custom_ops = load( extra_include_paths=paddle_includes, # add for Coverage CI extra_cxx_cflags=extra_cc_args, # test for cc flags extra_cuda_cflags=extra_nvcc_args, # test for nvcc flags - verbose=True) + verbose=True, +) class TestCustomSimpleSliceJit(unittest.TestCase): - def func_slice_output(self): np_x = np.random.random((5, 2)).astype("float32") x = paddle.to_tensor(np_x) @@ -49,8 +50,10 @@ class TestCustomSimpleSliceJit(unittest.TestCase): np.testing.assert_array_equal( custom_op_out, np_out, - err_msg='custom op: {},\n numpy: {}'.format(np_out, - custom_op_out.numpy())) + err_msg='custom op: {},\n numpy: {}'.format( + np_out, custom_op_out.numpy() + ), + ) def test_slice_output(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/custom_op/test_custom_tanh_double_grad.py b/python/paddle/fluid/tests/custom_op/test_custom_tanh_double_grad.py index fb65422758f38884ebbe578e01210c1ebb1e10d6..d3fec61f4bdda0a9913252c9355fd0bed146fe26 100644 --- a/python/paddle/fluid/tests/custom_op/test_custom_tanh_double_grad.py +++ b/python/paddle/fluid/tests/custom_op/test_custom_tanh_double_grad.py @@ -36,7 +36,8 @@ custom_ops = load( extra_include_paths=paddle_includes, # add for Coverage CI extra_cxx_cflags=extra_cc_args, # test for cc flags extra_cuda_cflags=extra_nvcc_args, # test for nvcc flags - verbose=True) + verbose=True, +) def custom_tanh_double_grad_dynamic(func, device, dtype, np_x): @@ -48,10 +49,9 @@ def custom_tanh_double_grad_dynamic(func, device, dtype, np_x): out = func(t) out.stop_gradient = False - dx = paddle.grad(outputs=[out], - inputs=[t], - create_graph=True, - retain_graph=True) + dx = paddle.grad( + outputs=[out], inputs=[t], create_graph=True, retain_graph=True + ) dx[0].backward() @@ -62,7 +62,6 @@ def custom_tanh_double_grad_dynamic(func, device, dtype, np_x): class TestCustomTanhDoubleGradJit(unittest.TestCase): - def setUp(self): paddle.set_device('cpu') self.dtypes = ['float32', 'float64'] @@ -73,27 +72,35 @@ class TestCustomTanhDoubleGradJit(unittest.TestCase): for dtype in self.dtypes: x = np.random.uniform(-1, 1, [4, 8]).astype(dtype) out, dx_grad, dout = custom_tanh_double_grad_dynamic( - custom_ops.custom_tanh, device, dtype, x) + custom_ops.custom_tanh, device, dtype, x + ) pd_out, pd_dx_grad, pd_dout = custom_tanh_double_grad_dynamic( - paddle.tanh, device, dtype, x) + paddle.tanh, device, dtype, x + ) np.testing.assert_allclose( out, pd_out, rtol=1e-05, err_msg='custom op out: {},\n paddle api out: {}'.format( - out, pd_out)) + out, pd_out + ), + ) np.testing.assert_allclose( dx_grad, pd_dx_grad, rtol=1e-05, - err_msg='custom op dx grad: {},\n paddle api dx grad: {}'. - format(dx_grad, pd_dx_grad)) + err_msg='custom op dx grad: {},\n paddle api dx grad: {}'.format( + dx_grad, pd_dx_grad + ), + ) np.testing.assert_allclose( dout, pd_dout, rtol=1e-05, - err_msg='custom op out grad: {},\n paddle api out grad: {}'. - format(dout, pd_dout)) + err_msg='custom op out grad: {},\n paddle api out grad: {}'.format( + dout, pd_dout + ), + ) def test_func_double_grad_dynamic(self): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) diff --git a/python/paddle/fluid/tests/custom_op/test_dispatch_jit.py b/python/paddle/fluid/tests/custom_op/test_dispatch_jit.py index ff5192db7aafcc14b3bda045be3b3bcc852023b7..a5bbb405fa4099a3a0749cf5b9ce48508f40d3f8 100644 --- a/python/paddle/fluid/tests/custom_op/test_dispatch_jit.py +++ b/python/paddle/fluid/tests/custom_op/test_dispatch_jit.py @@ -20,6 +20,7 @@ from paddle.utils.cpp_extension import load, get_build_directory from utils import paddle_includes, extra_cc_args from paddle.utils.cpp_extension.extension_utils import run_cmd from paddle.fluid.framework import _test_eager_guard + # Because Windows don't use docker, the shared lib already exists in the # cache dir, it will not be compiled again unless the shared lib is removed. file = '{}\\dispatch_op\\dispatch_op.pyd'.format(get_build_directory()) @@ -32,11 +33,11 @@ dispatch_op = load( sources=['dispatch_test_op.cc'], extra_include_paths=paddle_includes, # add for Coverage CI extra_cxx_cflags=extra_cc_args, - verbose=True) + verbose=True, +) class TestJitDispatch(unittest.TestCase): - def setUp(self): paddle.set_device('cpu') @@ -50,7 +51,8 @@ class TestJitDispatch(unittest.TestCase): np.testing.assert_array_equal( np_x, np_out, - err_msg='custom op x: {},\n custom op out: {}'.format(np_x, np_out)) + err_msg='custom op x: {},\n custom op out: {}'.format(np_x, np_out), + ) def run_dispatch_test(self, func, dtype): with _test_eager_guard(): @@ -69,32 +71,49 @@ class TestJitDispatch(unittest.TestCase): def test_dispatch_float_and_integer(self): dtypes = [ - "float32", "float64", "int32", "int64", "int8", "uint8", "int16" + "float32", + "float64", + "int32", + "int64", + "int8", + "uint8", + "int16", ] for dtype in dtypes: - self.run_dispatch_test(dispatch_op.dispatch_test_float_and_integer, - dtype) + self.run_dispatch_test( + dispatch_op.dispatch_test_float_and_integer, dtype + ) def test_dispatch_float_and_complex(self): dtypes = ["float32", "float64", "complex64", "complex128"] for dtype in dtypes: - self.run_dispatch_test(dispatch_op.dispatch_test_float_and_complex, - dtype) + self.run_dispatch_test( + dispatch_op.dispatch_test_float_and_complex, dtype + ) def test_dispatch_float_and_integer_and_complex(self): dtypes = [ - "float32", "float64", "int32", "int64", "int8", "uint8", "int16", - "complex64", "complex128" + "float32", + "float64", + "int32", + "int64", + "int8", + "uint8", + "int16", + "complex64", + "complex128", ] for dtype in dtypes: self.run_dispatch_test( - dispatch_op.dispatch_test_float_and_integer_and_complex, dtype) + dispatch_op.dispatch_test_float_and_integer_and_complex, dtype + ) def test_dispatch_float_and_half(self): dtypes = ["float32", "float64", "float16"] for dtype in dtypes: - self.run_dispatch_test(dispatch_op.dispatch_test_float_and_half, - dtype) + self.run_dispatch_test( + dispatch_op.dispatch_test_float_and_half, dtype + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/custom_op/test_multi_out_jit.py b/python/paddle/fluid/tests/custom_op/test_multi_out_jit.py index eb9b9e4677fd2f02193153900fbcf70d80f618d1..7b6c30cabac360023e0ac05f49c08f145bb24aee 100644 --- a/python/paddle/fluid/tests/custom_op/test_multi_out_jit.py +++ b/python/paddle/fluid/tests/custom_op/test_multi_out_jit.py @@ -22,6 +22,7 @@ from paddle.utils.cpp_extension import load, get_build_directory from paddle.utils.cpp_extension.extension_utils import run_cmd from utils import paddle_includes, extra_cc_args from paddle.fluid.framework import _test_eager_guard + # Because Windows don't use docker, the shared lib already exists in the # cache dir, it will not be compiled again unless the shared lib is removed. file = '{}\\multi_out_jit\\multi_out_jit.pyd'.format(get_build_directory()) @@ -35,11 +36,11 @@ multi_out_module = load( sources=['multi_out_test_op.cc'], extra_include_paths=paddle_includes, # add for Coverage CI extra_cxx_cflags=extra_cc_args, # test for cflags - verbose=True) + verbose=True, +) class TestMultiOutputDtypes(unittest.TestCase): - def setUp(self): self.custom_op = multi_out_module.multi_out self.dtypes = ['float32', 'float64'] @@ -56,9 +57,11 @@ class TestMultiOutputDtypes(unittest.TestCase): exe = paddle.static.Executor() exe.run(paddle.static.default_startup_program()) - res = exe.run(paddle.static.default_main_program(), - feed={'X': x_data}, - fetch_list=outs) + res = exe.run( + paddle.static.default_main_program(), + feed={'X': x_data}, + fetch_list=outs, + ) return res @@ -69,12 +72,14 @@ class TestMultiOutputDtypes(unittest.TestCase): one_int32 = one_int32.numpy() # Fake_float64 self.assertTrue('float64' in str(zero_float64.dtype)) - np.testing.assert_array_equal(zero_float64, - np.zeros([4, 8]).astype('float64')) + np.testing.assert_array_equal( + zero_float64, np.zeros([4, 8]).astype('float64') + ) # ZFake_int32 self.assertTrue('int32' in str(one_int32.dtype)) - np.testing.assert_array_equal(one_int32, - np.ones([4, 8]).astype('int32')) + np.testing.assert_array_equal( + one_int32, np.ones([4, 8]).astype('int32') + ) def test_static(self): paddle.enable_static() diff --git a/python/paddle/fluid/tests/custom_op/test_sysconfig.py b/python/paddle/fluid/tests/custom_op/test_sysconfig.py index 89ef36931f3ff899d5e70514773154bfc2a19a30..78c0cdf0316ea901d613d6ff34f58504d3f493b0 100644 --- a/python/paddle/fluid/tests/custom_op/test_sysconfig.py +++ b/python/paddle/fluid/tests/custom_op/test_sysconfig.py @@ -18,7 +18,6 @@ import paddle class SysConfigTest(unittest.TestCase): - def test_include(self): inc_dir = paddle.sysconfig.get_include() inc_dirs = inc_dir.split(os.sep) diff --git a/python/paddle/fluid/tests/custom_op/utils.py b/python/paddle/fluid/tests/custom_op/utils.py index adce68cd0f969f983b74b19b4a21f34e32b2d1c2..c21a1bb1624f4cb98c97a595b4d58e153443909c 100644 --- a/python/paddle/fluid/tests/custom_op/utils.py +++ b/python/paddle/fluid/tests/custom_op/utils.py @@ -26,7 +26,7 @@ site_packages_path = get_python_lib() # PaddlePaddle whl. So here we specific `include_dirs` to avoid errors in CI. paddle_includes = [ os.path.join(site_packages_path, 'paddle', 'include'), - os.path.join(site_packages_path, 'paddle', 'include', 'third_party') + os.path.join(site_packages_path, 'paddle', 'include', 'third_party'), ] # Test for extra compile args diff --git a/python/paddle/fluid/tests/custom_runtime/custom_device_multi_process_collective.py b/python/paddle/fluid/tests/custom_runtime/custom_device_multi_process_collective.py index 21e987104269145c5e7aff37a2b23abfe96509cd..b658b885176f3789f9d4b2e6fd037edf644c41a3 100644 --- a/python/paddle/fluid/tests/custom_runtime/custom_device_multi_process_collective.py +++ b/python/paddle/fluid/tests/custom_runtime/custom_device_multi_process_collective.py @@ -27,12 +27,21 @@ def train(prefix): device_ids = os.getenv("PADDLE_WORLD_DEVICE_IDS") current_device_id = os.getenv("PADDLE_LOCAL_DEVICE_IDS") - details = "selected_accelerators:{} selected_custom_devices:{} worker_endpoints:{} trainers_num:{} current_endpoint:{} trainer_id:{} device_ids:{} device_id:{}"\ - .format(selected_accelerators, selected_custom_devices, worker_endpoints, trainers_num, current_endpoint,trainer_id,device_ids, current_device_id) + details = "selected_accelerators:{} selected_custom_devices:{} worker_endpoints:{} trainers_num:{} current_endpoint:{} trainer_id:{} device_ids:{} device_id:{}".format( + selected_accelerators, + selected_custom_devices, + worker_endpoints, + trainers_num, + current_endpoint, + trainer_id, + device_ids, + current_device_id, + ) print(details) - with open("multi_process_{}.check_{}.log".format(prefix, trainer_id), - "w") as f: + with open( + "multi_process_{}.check_{}.log".format(prefix, trainer_id), "w" + ) as f: f.write(details) diff --git a/python/paddle/fluid/tests/custom_runtime/process_group_xccl.py b/python/paddle/fluid/tests/custom_runtime/process_group_xccl.py index b830d0eccf526ad83f0d98c29afeaedacbbff2a0..201b2d3df8657565f3edbee304dc848248aaaf52 100644 --- a/python/paddle/fluid/tests/custom_runtime/process_group_xccl.py +++ b/python/paddle/fluid/tests/custom_runtime/process_group_xccl.py @@ -29,15 +29,16 @@ def init_process_group(strategy=None): is_master = True if rank == 0 else False store = paddle.fluid.core.TCPStore("127.0.0.1", 6173, is_master, nranks) pg_group = core.ProcessGroupCustom( - store, rank, nranks, - paddle.CustomPlace(ParallelEnv().device_type, - ParallelEnv().device_id)) + store, + rank, + nranks, + paddle.CustomPlace(ParallelEnv().device_type, ParallelEnv().device_id), + ) return pg_group class TestProcessGroupFp32(unittest.TestCase): - def setUp(self): paddle.seed(2022) random.seed(2022) @@ -50,8 +51,9 @@ class TestProcessGroupFp32(unittest.TestCase): def test_create_process_group_xccl(self): with _test_eager_guard(): - paddle.set_device('custom_cpu:%d' % - paddle.distributed.ParallelEnv().dev_id) + paddle.set_device( + 'custom_cpu:%d' % paddle.distributed.ParallelEnv().dev_id + ) pg = init_process_group() @@ -147,8 +149,9 @@ class TestProcessGroupFp32(unittest.TestCase): task.wait() # paddle.fluid.core._custom_device_synchronize("custom_cpu", -1) out_1 = paddle.slice(tensor_out, [0], [0], [out_shape[0] // 2]) - out_2 = paddle.slice(tensor_out, [0], [out_shape[0] // 2], - [out_shape[0]]) + out_2 = paddle.slice( + tensor_out, [0], [out_shape[0] // 2], [out_shape[0]] + ) # assert np.array_equal(tensor_x, out_1) # assert np.array_equal(tensor_y, out_2) print("test allgather api ok\n") @@ -163,10 +166,12 @@ class TestProcessGroupFp32(unittest.TestCase): tensor_y = paddle.to_tensor(y) tensor_out1 = paddle.to_tensor(out1) tensor_out2 = paddle.to_tensor(out2) - raw_tensor_x_2 = paddle.slice(tensor_x, [0], [self.shape[0] // 2], - [self.shape[0]]) - raw_tensor_y_1 = paddle.slice(tensor_y, [0], [0], - [self.shape[0] // 2]) + raw_tensor_x_2 = paddle.slice( + tensor_x, [0], [self.shape[0] // 2], [self.shape[0]] + ) + raw_tensor_y_1 = paddle.slice( + tensor_y, [0], [0], [self.shape[0] // 2] + ) if pg.rank() == 0: task = pg.alltoall(tensor_x, tensor_out1) task.wait() @@ -176,8 +181,9 @@ class TestProcessGroupFp32(unittest.TestCase): task = pg.alltoall(tensor_y, tensor_out2) task.wait() # paddle.fluid.core._custom_device_synchronize("custom_cpu", -1) - out1_2 = paddle.slice(tensor_out1, [0], [self.shape[0] // 2], - [self.shape[0]]) + out1_2 = paddle.slice( + tensor_out1, [0], [self.shape[0] // 2], [self.shape[0]] + ) out2_1 = paddle.slice(tensor_out2, [0], [0], [self.shape[0] // 2]) # if pg.rank() == 0: # assert np.array_equal(out1_2.numpy(), raw_tensor_y_1.numpy()) @@ -223,8 +229,9 @@ class TestProcessGroupFp32(unittest.TestCase): task.wait() # paddle.fluid.core._custom_device_synchronize("custom_cpu", -1) out1 = paddle.slice(tensor_x, [0], [0], [self.shape[0]]) - out2 = paddle.slice(tensor_x, [0], [self.shape[0]], - [self.shape[0] * 2]) + out2 = paddle.slice( + tensor_x, [0], [self.shape[0]], [self.shape[0] * 2] + ) # if pg.rank() == 0: # assert np.array_equal(tensor_y, out1) # else: diff --git a/python/paddle/fluid/tests/custom_runtime/test_collective_process_group_xccl.py b/python/paddle/fluid/tests/custom_runtime/test_collective_process_group_xccl.py index 1127352d85d9981da93694691f4e426fb1d006ed..586e0e322ab388a74865efcb964f2ee37b15a79a 100644 --- a/python/paddle/fluid/tests/custom_runtime/test_collective_process_group_xccl.py +++ b/python/paddle/fluid/tests/custom_runtime/test_collective_process_group_xccl.py @@ -20,19 +20,26 @@ import time import tempfile -def start_local_trainers(cluster, - pod, - training_script, - training_script_args, - eager_mode=True, - log_dir=None): - from paddle.distributed.utils.launch_utils import find_free_ports, watch_local_trainers, get_cluster, TrainerProc # noqa: F401 +def start_local_trainers( + cluster, + pod, + training_script, + training_script_args, + eager_mode=True, + log_dir=None, +): + from paddle.distributed.utils.launch_utils import ( # noqa: F401 + find_free_ports, + watch_local_trainers, + get_cluster, + TrainerProc, + ) current_env = copy.copy(os.environ.copy()) - #paddle broadcast ncclUniqueId use socket, and - #proxy maybe make trainers unreachable, so delete them. - #if we set them to "", grpc will log error message "bad uri" - #so just delete them. + # paddle broadcast ncclUniqueId use socket, and + # proxy maybe make trainers unreachable, so delete them. + # if we set them to "", grpc will log error message "bad uri" + # so just delete them. current_env.pop("http_proxy", None) current_env.pop("https_proxy", None) @@ -41,8 +48,8 @@ def start_local_trainers(cluster, os.system("rm -rf log && mkdir -p log") for idx, t in enumerate(pod.trainers): proc_env = { - "FLAGS_selected_custom_cpus": - "%s" % ",".join([str(g) for g in t.gpus]), + "FLAGS_selected_custom_cpus": "%s" + % ",".join([str(g) for g in t.gpus]), "PADDLE_TRAINER_ID": "%d" % t.rank, "PADDLE_CURRENT_ENDPOINT": "%s" % t.endpoint, "PADDLE_TRAINERS_NUM": "%d" % cluster.trainers_nranks(), @@ -65,10 +72,9 @@ def start_local_trainers(cluster, print("start trainer proc:{} env:{}".format(cmd, proc_env)) fn = open("workerlog.%d" % idx, "a") - proc = subprocess.Popen(cmd.split(" "), - env=current_env, - stdout=fn, - stderr=fn) + proc = subprocess.Popen( + cmd.split(" "), env=current_env, stdout=fn, stderr=fn + ) tp = TrainerProc() tp.proc = proc @@ -82,7 +88,12 @@ def start_local_trainers(cluster, def get_cluster_from_args(selected_gpus): - from paddle.distributed.utils.launch_utils import find_free_ports, watch_local_trainers, get_cluster, TrainerProc # noqa: F401 + from paddle.distributed.utils.launch_utils import ( # noqa: F401 + find_free_ports, + watch_local_trainers, + get_cluster, + TrainerProc, + ) cluster_node_ips = '127.0.0.1' node_ip = '127.0.0.1' @@ -104,9 +115,13 @@ def get_cluster_from_args(selected_gpus): class TestMultipleCustomCPU(unittest.TestCase): - def run_mnist_2custom_cpu(self, target_file_name, eager_mode=True): - from paddle.distributed.utils.launch_utils import find_free_ports, watch_local_trainers, get_cluster, TrainerProc # noqa: F401 + from paddle.distributed.utils.launch_utils import ( # noqa: F401 + find_free_ports, + watch_local_trainers, + get_cluster, + TrainerProc, + ) selected_devices = [0, 1] cluster = None @@ -114,11 +129,13 @@ class TestMultipleCustomCPU(unittest.TestCase): cluster, pod = get_cluster_from_args(selected_devices) - procs = start_local_trainers(cluster, - pod, - eager_mode=eager_mode, - training_script=target_file_name, - training_script_args=[]) + procs = start_local_trainers( + cluster, + pod, + eager_mode=eager_mode, + training_script=target_file_name, + training_script_args=[], + ) while True: alive = watch_local_trainers(procs, cluster.trainers_endpoints()) @@ -130,7 +147,6 @@ class TestMultipleCustomCPU(unittest.TestCase): class TestProcessGroup(TestMultipleCustomCPU): - def setUp(self): # compile so and set to current path cur_dir = os.path.dirname(os.path.abspath(__file__)) @@ -142,15 +158,18 @@ class TestProcessGroup(TestMultipleCustomCPU): && git checkout {} -b dev \ && cd backends/custom_cpu \ && mkdir build && cd build && cmake .. && make -j8'.format( - self.temp_dir.name, os.getenv('PLUGIN_URL'), - os.getenv('PLUGIN_TAG')) + self.temp_dir.name, os.getenv('PLUGIN_URL'), os.getenv('PLUGIN_TAG') + ) os.system(cmd) # set environment for loading and registering compiled custom kernels # only valid in current process os.environ['CUSTOM_DEVICE_ROOT'] = os.path.join( - cur_dir, '{}/PaddleCustomDevice/backends/custom_cpu/build'.format( - self.temp_dir.name)) + cur_dir, + '{}/PaddleCustomDevice/backends/custom_cpu/build'.format( + self.temp_dir.name + ), + ) os.environ['FLAGS_selected_custom_cpus'] = '0,1' os.environ['CUSTOM_CPU_VISIBLE_DEVICES'] = '0,1' os.environ['PADDLE_XCCL_BACKEND'] = 'custom_cpu' @@ -159,7 +178,12 @@ class TestProcessGroup(TestMultipleCustomCPU): self.temp_dir.cleanup() def test_process_group_xccl(self): - from paddle.distributed.utils.launch_utils import find_free_ports, watch_local_trainers, get_cluster, TrainerProc # noqa: F401 + from paddle.distributed.utils.launch_utils import ( # noqa: F401 + find_free_ports, + watch_local_trainers, + get_cluster, + TrainerProc, + ) self.run_mnist_2custom_cpu('process_group_xccl.py') diff --git a/python/paddle/fluid/tests/custom_runtime/test_custom_cpu_plugin.py b/python/paddle/fluid/tests/custom_runtime/test_custom_cpu_plugin.py index f1cfa8ef7392a7ff404c150d1a75ff9024b2f911..5d0e5ccc475bdb240ecdc361d70d2a847cc50318 100644 --- a/python/paddle/fluid/tests/custom_runtime/test_custom_cpu_plugin.py +++ b/python/paddle/fluid/tests/custom_runtime/test_custom_cpu_plugin.py @@ -20,7 +20,6 @@ import tempfile class TestCustomCPUPlugin(unittest.TestCase): - def setUp(self): # compile so and set to current path cur_dir = os.path.dirname(os.path.abspath(__file__)) @@ -32,15 +31,18 @@ class TestCustomCPUPlugin(unittest.TestCase): && git checkout {} -b dev \ && cd backends/custom_cpu \ && mkdir build && cd build && cmake .. && make -j8'.format( - self.temp_dir.name, os.getenv('PLUGIN_URL'), - os.getenv('PLUGIN_TAG')) + self.temp_dir.name, os.getenv('PLUGIN_URL'), os.getenv('PLUGIN_TAG') + ) os.system(cmd) # set environment for loading and registering compiled custom kernels # only valid in current process os.environ['CUSTOM_DEVICE_ROOT'] = os.path.join( - cur_dir, '{}/PaddleCustomDevice/backends/custom_cpu/build'.format( - self.temp_dir.name)) + cur_dir, + '{}/PaddleCustomDevice/backends/custom_cpu/build'.format( + self.temp_dir.name + ), + ) def tearDown(self): self.temp_dir.cleanup() @@ -65,16 +67,18 @@ class TestCustomCPUPlugin(unittest.TestCase): paddle.set_device('custom_cpu') dataset = paddle.vision.datasets.MNIST( mode='test', - transform=paddle.vision.transforms.Compose([ - paddle.vision.transforms.CenterCrop(20), - paddle.vision.transforms.RandomResizedCrop(14), - paddle.vision.transforms.Normalize(), - paddle.vision.transforms.ToTensor() - ])) - loader = paddle.io.DataLoader(dataset, - batch_size=32, - num_workers=1, - shuffle=True) + transform=paddle.vision.transforms.Compose( + [ + paddle.vision.transforms.CenterCrop(20), + paddle.vision.transforms.RandomResizedCrop(14), + paddle.vision.transforms.Normalize(), + paddle.vision.transforms.ToTensor(), + ] + ), + ) + loader = paddle.io.DataLoader( + dataset, batch_size=32, num_workers=1, shuffle=True + ) for image, label in loader: self.assertTrue(image.place.is_custom_place()) self.assertTrue(label.place.is_custom_place()) @@ -84,13 +88,13 @@ class TestCustomCPUPlugin(unittest.TestCase): import paddle class MNIST(paddle.nn.Layer): - def __init__(self): super(MNIST, self).__init__() self.shape = 1 * 28 * 28 self.size = 10 self.output_weight = self.create_parameter( - [self.shape, self.size]) + [self.shape, self.size] + ) self.accuracy = paddle.metric.Accuracy() def forward(self, inputs, label=None): @@ -110,15 +114,17 @@ class TestCustomCPUPlugin(unittest.TestCase): dataset = paddle.vision.datasets.MNIST( mode='train', transform=paddle.vision.transforms.Compose( - [paddle.vision.transforms.ToTensor()])) - loader = paddle.io.DataLoader(dataset, - batch_size=64, - num_workers=1, - shuffle=True) + [paddle.vision.transforms.ToTensor()] + ), + ) + loader = paddle.io.DataLoader( + dataset, batch_size=64, num_workers=1, shuffle=True + ) mnist = MNIST() - sgd = paddle.optimizer.SGD(learning_rate=0.01, - parameters=mnist.parameters()) + sgd = paddle.optimizer.SGD( + learning_rate=0.01, parameters=mnist.parameters() + ) data = next(loader()) img = data[0] @@ -139,6 +145,7 @@ class TestCustomCPUPlugin(unittest.TestCase): grad = np.ones([2, 2]).astype("float32") import paddle + paddle.set_device('custom_cpu') paddle.device.get_available_device() x_tensor = paddle.to_tensor(x, stop_gradient=False) @@ -153,18 +160,21 @@ class TestCustomCPUPlugin(unittest.TestCase): def _test_eager_copy_to(self): import paddle + x = np.random.random([2, 2]).astype("float32") # cpu -> custom - cpu_tensor = paddle.to_tensor(x, - dtype='float32', - place=paddle.CPUPlace()) + cpu_tensor = paddle.to_tensor( + x, dtype='float32', place=paddle.CPUPlace() + ) custom_cpu_tensor = cpu_tensor._copy_to( - paddle.CustomPlace('custom_cpu', 0), True) + paddle.CustomPlace('custom_cpu', 0), True + ) np.testing.assert_array_equal(custom_cpu_tensor, x) self.assertTrue(custom_cpu_tensor.place.is_custom_place()) # custom -> custom another_custom_cpu_tensor = custom_cpu_tensor._copy_to( - paddle.CustomPlace('custom_cpu', 0), True) + paddle.CustomPlace('custom_cpu', 0), True + ) np.testing.assert_array_equal(another_custom_cpu_tensor, x) self.assertTrue(another_custom_cpu_tensor.place.is_custom_place()) # custom -> cpu @@ -173,13 +183,15 @@ class TestCustomCPUPlugin(unittest.TestCase): self.assertTrue(another_cpu_tensor.place.is_cpu_place()) # custom -> custom self another_custom_cpu_tensor = another_custom_cpu_tensor._copy_to( - paddle.CustomPlace('custom_cpu', 0), True) + paddle.CustomPlace('custom_cpu', 0), True + ) np.testing.assert_array_equal(another_custom_cpu_tensor, x) self.assertTrue(another_custom_cpu_tensor.place.is_custom_place()) def _test_fallback_kernel(self): # using (custom_cpu, add, int16) which is not registered import paddle + r = np.array([6, 6, 6], 'int16') x = paddle.to_tensor([5, 4, 3], 'int16') y = paddle.to_tensor([1, 2, 3], 'int16') @@ -188,8 +200,10 @@ class TestCustomCPUPlugin(unittest.TestCase): def _test_scalar(self): import paddle - data_1 = paddle.to_tensor([[[[1.0, 4.0, 5.0, 7.0], [3.0, 4.0, 5.0, - 6.0]]]]) + + data_1 = paddle.to_tensor( + [[[[1.0, 4.0, 5.0, 7.0], [3.0, 4.0, 5.0, 6.0]]]] + ) k_t = paddle.to_tensor([3], dtype="int32") value_1, indices_1 = paddle.topk(data_1, k=k_t) diff --git a/python/paddle/fluid/tests/custom_runtime/test_custom_cpu_profiler_plugin.py b/python/paddle/fluid/tests/custom_runtime/test_custom_cpu_profiler_plugin.py index 2e307fbb826b530686e45ce529a8218f71a7553c..82c2ddbbe73e7e46822a029d51c192ff9677492a 100644 --- a/python/paddle/fluid/tests/custom_runtime/test_custom_cpu_profiler_plugin.py +++ b/python/paddle/fluid/tests/custom_runtime/test_custom_cpu_profiler_plugin.py @@ -19,7 +19,6 @@ import tempfile class TestCustomCPUProfilerPlugin(unittest.TestCase): - def setUp(self): # compile so and set to current path cur_dir = os.path.dirname(os.path.abspath(__file__)) @@ -31,15 +30,18 @@ class TestCustomCPUProfilerPlugin(unittest.TestCase): && git checkout {} -b dev \ && cd backends/custom_cpu \ && mkdir build && cd build && cmake .. && make -j8'.format( - self.temp_dir.name, os.getenv('PLUGIN_URL'), - os.getenv('PLUGIN_TAG')) + self.temp_dir.name, os.getenv('PLUGIN_URL'), os.getenv('PLUGIN_TAG') + ) os.system(cmd) # set environment for loading and registering compiled custom kernels # only valid in current process os.environ['CUSTOM_DEVICE_ROOT'] = os.path.join( - cur_dir, '{}/PaddleCustomDevice/backends/custom_cpu/build'.format( - self.temp_dir.name)) + cur_dir, + '{}/PaddleCustomDevice/backends/custom_cpu/build'.format( + self.temp_dir.name + ), + ) def tearDown(self): self.temp_dir.cleanup() @@ -47,6 +49,7 @@ class TestCustomCPUProfilerPlugin(unittest.TestCase): def test_custom_device(self): import paddle + with paddle.fluid.framework._test_eager_guard(): self._test_custom_profiler() @@ -57,9 +60,12 @@ class TestCustomCPUProfilerPlugin(unittest.TestCase): paddle.set_device('custom_cpu') x = paddle.to_tensor([1, 2, 3]) - p = profiler.Profiler(targets=[ - profiler.ProfilerTarget.CPU, profiler.ProfilerTarget.CUSTOM_DEVICE - ]) + p = profiler.Profiler( + targets=[ + profiler.ProfilerTarget.CPU, + profiler.ProfilerTarget.CUSTOM_DEVICE, + ] + ) p.start() for iter in range(10): x = x + 1 diff --git a/python/paddle/fluid/tests/custom_runtime/test_custom_cpu_to_static.py b/python/paddle/fluid/tests/custom_runtime/test_custom_cpu_to_static.py index e5e9638a0b5314d562627185aa0f863e5e08e18f..2d8796152b58889cfc8d3cf11481e5acaea59cb6 100644 --- a/python/paddle/fluid/tests/custom_runtime/test_custom_cpu_to_static.py +++ b/python/paddle/fluid/tests/custom_runtime/test_custom_cpu_to_static.py @@ -35,8 +35,11 @@ def train_func_base(epoch_id, train_loader, model, cost, optimizer): loss.backward() optimizer.step() optimizer.clear_grad() - print("Epoch [{}/{}], Step [{}/{}], Loss: {}".format( - epoch_id + 1, EPOCH_NUM, batch_id + 1, total_step, loss.numpy())) + print( + "Epoch [{}/{}], Step [{}/{}], Loss: {}".format( + epoch_id + 1, EPOCH_NUM, batch_id + 1, total_step, loss.numpy() + ) + ) epoch_end = time.time() print( f"Epoch ID: {epoch_id+1}, FP32 train epoch time: {(epoch_end - epoch_start) * 1000} ms" @@ -51,8 +54,9 @@ def train_func_ampo1(epoch_id, train_loader, model, cost, optimizer, scaler): for batch_id, (images, labels) in enumerate(train_loader()): # forward with paddle.amp.auto_cast( - custom_black_list={"flatten_contiguous_range", "greater_than"}, - level='O1'): + custom_black_list={"flatten_contiguous_range", "greater_than"}, + level='O1', + ): outputs = model(images) loss = cost(outputs, labels) # backward and optimize @@ -60,8 +64,11 @@ def train_func_ampo1(epoch_id, train_loader, model, cost, optimizer, scaler): scaled.backward() scaler.minimize(optimizer, scaled) optimizer.clear_grad() - print("Epoch [{}/{}], Step [{}/{}], Loss: {}".format( - epoch_id + 1, EPOCH_NUM, batch_id + 1, total_step, loss.numpy())) + print( + "Epoch [{}/{}], Step [{}/{}], Loss: {}".format( + epoch_id + 1, EPOCH_NUM, batch_id + 1, total_step, loss.numpy() + ) + ) epoch_end = time.time() print( f"Epoch ID: {epoch_id+1}, AMPO1 train epoch time: {(epoch_end - epoch_start) * 1000} ms" @@ -90,7 +97,6 @@ def test_func(epoch_id, test_loader, model, cost): class TestCustomCPUPlugin(unittest.TestCase): - def setUp(self): # compile so and set to current path cur_dir = os.path.dirname(os.path.abspath(__file__)) @@ -102,15 +108,18 @@ class TestCustomCPUPlugin(unittest.TestCase): && git checkout {} -b dev \ && cd backends/custom_cpu \ && mkdir build && cd build && cmake .. && make -j8'.format( - self.temp_dir.name, os.getenv('PLUGIN_URL'), - os.getenv('PLUGIN_TAG')) + self.temp_dir.name, os.getenv('PLUGIN_URL'), os.getenv('PLUGIN_TAG') + ) os.system(cmd) # set environment for loading and registering compiled custom kernels # only valid in current process os.environ['CUSTOM_DEVICE_ROOT'] = os.path.join( - cur_dir, '{}/PaddleCustomDevice/backends/custom_cpu/build'.format( - self.temp_dir.name)) + cur_dir, + '{}/PaddleCustomDevice/backends/custom_cpu/build'.format( + self.temp_dir.name + ), + ) def tearDown(self): self.temp_dir.cleanup() @@ -123,7 +132,6 @@ class TestCustomCPUPlugin(unittest.TestCase): import paddle class LeNet5(paddle.nn.Layer): - def __init__(self): super(LeNet5, self).__init__() self.fc = paddle.nn.Linear(in_features=1024, out_features=10) @@ -145,35 +153,44 @@ class TestCustomCPUPlugin(unittest.TestCase): # cost and optimizer cost = paddle.nn.CrossEntropyLoss() - optimizer = paddle.optimizer.Adam(learning_rate=0.001, - parameters=model.parameters()) + optimizer = paddle.optimizer.Adam( + learning_rate=0.001, parameters=model.parameters() + ) # convert to static model build_strategy = paddle.static.BuildStrategy() mnist = paddle.jit.to_static(model, build_strategy=build_strategy) # data loader - transform = paddle.vision.transforms.Compose([ - paddle.vision.transforms.Resize((32, 32)), - paddle.vision.transforms.ToTensor(), - paddle.vision.transforms.Normalize(mean=(0.1307, ), std=(0.3081, )) - ]) - train_dataset = paddle.vision.datasets.MNIST(mode='train', - transform=transform, - download=True) - test_dataset = paddle.vision.datasets.MNIST(mode='test', - transform=transform, - download=True) - train_loader = paddle.io.DataLoader(train_dataset, - batch_size=BATCH_SIZE, - shuffle=True, - drop_last=True, - num_workers=2) - test_loader = paddle.io.DataLoader(test_dataset, - batch_size=BATCH_SIZE, - shuffle=True, - drop_last=True, - num_workers=2) + transform = paddle.vision.transforms.Compose( + [ + paddle.vision.transforms.Resize((32, 32)), + paddle.vision.transforms.ToTensor(), + paddle.vision.transforms.Normalize( + mean=(0.1307,), std=(0.3081,) + ), + ] + ) + train_dataset = paddle.vision.datasets.MNIST( + mode='train', transform=transform, download=True + ) + test_dataset = paddle.vision.datasets.MNIST( + mode='test', transform=transform, download=True + ) + train_loader = paddle.io.DataLoader( + train_dataset, + batch_size=BATCH_SIZE, + shuffle=True, + drop_last=True, + num_workers=2, + ) + test_loader = paddle.io.DataLoader( + test_dataset, + batch_size=BATCH_SIZE, + shuffle=True, + drop_last=True, + num_workers=2, + ) # train and eval for epoch_id in range(EPOCH_NUM): @@ -184,7 +201,6 @@ class TestCustomCPUPlugin(unittest.TestCase): import paddle class LeNet5(paddle.nn.Layer): - def __init__(self): super(LeNet5, self).__init__() self.fc = paddle.nn.Linear(in_features=1024, out_features=10) @@ -206,42 +222,52 @@ class TestCustomCPUPlugin(unittest.TestCase): # cost and optimizer cost = paddle.nn.CrossEntropyLoss() - optimizer = paddle.optimizer.Adam(learning_rate=0.001, - parameters=model.parameters()) + optimizer = paddle.optimizer.Adam( + learning_rate=0.001, parameters=model.parameters() + ) # convert to static model scaler = paddle.amp.GradScaler(init_loss_scaling=1024) - model, optimizer = paddle.amp.decorate(models=model, - optimizers=optimizer, - level='O1') + model, optimizer = paddle.amp.decorate( + models=model, optimizers=optimizer, level='O1' + ) # data loader - transform = paddle.vision.transforms.Compose([ - paddle.vision.transforms.Resize((32, 32)), - paddle.vision.transforms.ToTensor(), - paddle.vision.transforms.Normalize(mean=(0.1307, ), std=(0.3081, )) - ]) - train_dataset = paddle.vision.datasets.MNIST(mode='train', - transform=transform, - download=True) - test_dataset = paddle.vision.datasets.MNIST(mode='test', - transform=transform, - download=True) - train_loader = paddle.io.DataLoader(train_dataset, - batch_size=BATCH_SIZE, - shuffle=True, - drop_last=True, - num_workers=2) - test_loader = paddle.io.DataLoader(test_dataset, - batch_size=BATCH_SIZE, - shuffle=True, - drop_last=True, - num_workers=2) + transform = paddle.vision.transforms.Compose( + [ + paddle.vision.transforms.Resize((32, 32)), + paddle.vision.transforms.ToTensor(), + paddle.vision.transforms.Normalize( + mean=(0.1307,), std=(0.3081,) + ), + ] + ) + train_dataset = paddle.vision.datasets.MNIST( + mode='train', transform=transform, download=True + ) + test_dataset = paddle.vision.datasets.MNIST( + mode='test', transform=transform, download=True + ) + train_loader = paddle.io.DataLoader( + train_dataset, + batch_size=BATCH_SIZE, + shuffle=True, + drop_last=True, + num_workers=2, + ) + test_loader = paddle.io.DataLoader( + test_dataset, + batch_size=BATCH_SIZE, + shuffle=True, + drop_last=True, + num_workers=2, + ) # train and eval for epoch_id in range(EPOCH_NUM): - train_func_ampo1(epoch_id, train_loader, model, cost, optimizer, - scaler) + train_func_ampo1( + epoch_id, train_loader, model, cost, optimizer, scaler + ) test_func(epoch_id, test_loader, model, cost) diff --git a/python/paddle/fluid/tests/test_beam_search_decoder.py b/python/paddle/fluid/tests/test_beam_search_decoder.py index 66cd8e89d3237ba1572362af99b06e8c8794c450..3a5c8604648bcc3ca501d6d2c1ee27e197adc791 100644 --- a/python/paddle/fluid/tests/test_beam_search_decoder.py +++ b/python/paddle/fluid/tests/test_beam_search_decoder.py @@ -22,7 +22,12 @@ import paddle.fluid as fluid import paddle.fluid.framework as framework import paddle.fluid.layers as layers from paddle.fluid.executor import Executor -from paddle.fluid.contrib.decoder.beam_search_decoder import BeamSearchDecoder, InitState, StateCell, TrainingDecoder +from paddle.fluid.contrib.decoder.beam_search_decoder import ( + BeamSearchDecoder, + InitState, + StateCell, + TrainingDecoder, +) import unittest paddle.enable_static() @@ -43,14 +48,15 @@ beam_size = 2 def encoder(): # encoder - src_word = layers.data(name="src_word", - shape=[1], - dtype='int64', - lod_level=1) - src_embedding = layers.embedding(input=src_word, - size=[dict_size, word_dim], - dtype='float32', - is_sparse=IS_SPARSE) + src_word = layers.data( + name="src_word", shape=[1], dtype='int64', lod_level=1 + ) + src_embedding = layers.embedding( + input=src_word, + size=[dict_size, word_dim], + dtype='float32', + is_sparse=IS_SPARSE, + ) fc1 = layers.fc(input=src_embedding, size=hidden_dim * 4, act='tanh') lstm_hidden0, lstm_0 = layers.dynamic_lstm(input=fc1, size=hidden_dim * 4) @@ -67,9 +73,9 @@ def decoder_state_cell(context): current_word = state_cell.get_input('x') prev_h = state_cell.get_state('h') # make sure lod of h heritted from prev_h - h = layers.fc(input=[prev_h, current_word], - size=decoder_size, - act='tanh') + h = layers.fc( + input=[prev_h, current_word], size=decoder_size, act='tanh' + ) state_cell.set_state('h', h) return state_cell @@ -77,23 +83,26 @@ def decoder_state_cell(context): def decoder_train(state_cell): # decoder - trg_language_word = layers.data(name="target_word", - shape=[1], - dtype='int64', - lod_level=1) - trg_embedding = layers.embedding(input=trg_language_word, - size=[dict_size, word_dim], - dtype='float32', - is_sparse=IS_SPARSE) + trg_language_word = layers.data( + name="target_word", shape=[1], dtype='int64', lod_level=1 + ) + trg_embedding = layers.embedding( + input=trg_language_word, + size=[dict_size, word_dim], + dtype='float32', + is_sparse=IS_SPARSE, + ) decoder = TrainingDecoder(state_cell) with decoder.block(): current_word = decoder.step_input(trg_embedding) decoder.state_cell.compute_state(inputs={'x': current_word}) - current_score = layers.fc(input=decoder.state_cell.get_state('h'), - size=target_dict_dim, - act='softmax') + current_score = layers.fc( + input=decoder.state_cell.get_state('h'), + size=target_dict_dim, + act='softmax', + ) decoder.state_cell.update_states() decoder.output(current_score) @@ -101,27 +110,27 @@ def decoder_train(state_cell): def decoder_decode(state_cell): - init_ids = layers.data(name="init_ids", - shape=[1], - dtype="int64", - lod_level=2) - init_scores = layers.data(name="init_scores", - shape=[1], - dtype="float32", - lod_level=2) - - decoder = BeamSearchDecoder(state_cell=state_cell, - init_ids=init_ids, - init_scores=init_scores, - target_dict_dim=target_dict_dim, - word_dim=word_dim, - input_var_dict={}, - topk_size=topk_size, - sparse_emb=IS_SPARSE, - max_len=max_length, - beam_size=beam_size, - end_id=1, - name=None) + init_ids = layers.data( + name="init_ids", shape=[1], dtype="int64", lod_level=2 + ) + init_scores = layers.data( + name="init_scores", shape=[1], dtype="float32", lod_level=2 + ) + + decoder = BeamSearchDecoder( + state_cell=state_cell, + init_ids=init_ids, + init_scores=init_scores, + target_dict_dim=target_dict_dim, + word_dim=word_dim, + input_var_dict={}, + topk_size=topk_size, + sparse_emb=IS_SPARSE, + max_len=max_length, + beam_size=beam_size, + end_id=1, + name=None, + ) decoder.decode() translation_ids, translation_scores = decoder() @@ -136,19 +145,21 @@ def train_main(use_cuda): context = encoder() state_cell = decoder_state_cell(context) rnn_out = decoder_train(state_cell) - label = layers.data(name="target_next_word", - shape=[1], - dtype='int64', - lod_level=1) + label = layers.data( + name="target_next_word", shape=[1], dtype='int64', lod_level=1 + ) cost = layers.cross_entropy(input=rnn_out, label=label) avg_cost = paddle.mean(x=cost) optimizer = fluid.optimizer.Adagrad(learning_rate=1e-3) optimizer.minimize(avg_cost) - train_reader = paddle.batch(paddle.reader.shuffle( - paddle.dataset.wmt14.train(dict_size), buf_size=1000), - batch_size=batch_size) + train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.wmt14.train(dict_size), buf_size=1000 + ), + batch_size=batch_size, + ) feed_order = ['src_word', 'target_word', 'target_next_word'] exe = Executor(place) @@ -163,12 +174,18 @@ def train_main(use_cuda): for pass_id in range(1): for batch_id, data in enumerate(train_reader()): - outs = exe.run(main_program, - feed=feeder.feed(data), - fetch_list=[avg_cost]) + outs = exe.run( + main_program, feed=feeder.feed(data), fetch_list=[avg_cost] + ) avg_cost_val = np.array(outs[0]) - print('pass_id=' + str(pass_id) + ' batch=' + str(batch_id) + - " avg_cost=" + str(avg_cost_val)) + print( + 'pass_id=' + + str(pass_id) + + ' batch=' + + str(batch_id) + + " avg_cost=" + + str(avg_cost_val) + ) if batch_id > 3: break @@ -188,8 +205,9 @@ def decode_main(use_cuda): exe.run(framework.default_startup_program()) init_ids_data = np.array([0 for _ in range(batch_size)], dtype='int64') - init_scores_data = np.array([1. for _ in range(batch_size)], - dtype='float32') + init_scores_data = np.array( + [1.0 for _ in range(batch_size)], dtype='float32' + ) init_ids_data = init_ids_data.reshape((batch_size, 1)) init_scores_data = init_scores_data.reshape((batch_size, 1)) init_lod = [1] * batch_size @@ -198,9 +216,12 @@ def decode_main(use_cuda): init_ids = fluid.create_lod_tensor(init_ids_data, init_lod, place) init_scores = fluid.create_lod_tensor(init_scores_data, init_lod, place) - train_reader = paddle.batch(paddle.reader.shuffle( - paddle.dataset.wmt14.train(dict_size), buf_size=1000), - batch_size=batch_size) + train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.wmt14.train(dict_size), buf_size=1000 + ), + batch_size=batch_size, + ) feed_order = ['src_word'] feed_list = [ @@ -218,7 +239,8 @@ def decode_main(use_cuda): framework.default_main_program(), feed=feed_dict, fetch_list=[translation_ids, translation_scores], - return_numpy=False) + return_numpy=False, + ) print(result_ids.lod()) diff --git a/python/paddle/fluid/tests/test_data_feeder.py b/python/paddle/fluid/tests/test_data_feeder.py index 5f97a250d7537735b730f2047873df157ff19f50..515bf44c75518bb414203eef08849d1cb6be2505 100644 --- a/python/paddle/fluid/tests/test_data_feeder.py +++ b/python/paddle/fluid/tests/test_data_feeder.py @@ -20,7 +20,6 @@ paddle.enable_static() class TestDataFeeder(unittest.TestCase): - def test_lod_level_0_converter(self): img = fluid.layers.data(name='image', shape=[1, 28, 28]) label = fluid.layers.data(name='label', shape=[1], dtype='int64') @@ -41,45 +40,48 @@ class TestDataFeeder(unittest.TestCase): def test_lod_level_1_converter(self): # lod_level = 1 # each sentence has a different number of words - sentences = fluid.layers.data(name='sentences', - shape=[1], - dtype='int64', - lod_level=1) + sentences = fluid.layers.data( + name='sentences', shape=[1], dtype='int64', lod_level=1 + ) label = fluid.layers.data(name='label', shape=[1], dtype='int64') feeder = fluid.DataFeeder([sentences, label], fluid.CPUPlace()) # lod = [[0, 3, 5, 9]] # data = [[1, 2, 3], [4, 5], [6, 7, 8, 9]] # label = [1] * len(data) - result = feeder.feed([([1, 2, 3], [1]), ([4, 5], [1]), - ([6, 7, 8, 9], [1])]) + result = feeder.feed( + [([1, 2, 3], [1]), ([4, 5], [1]), ([6, 7, 8, 9], [1])] + ) self.assertEqual(result['sentences'].shape(), [9, 1]) self.assertEqual(result['label'].shape(), [3, 1]) - self.assertEqual(result['sentences'].recursive_sequence_lengths(), - [[3, 2, 4]]) + self.assertEqual( + result['sentences'].recursive_sequence_lengths(), [[3, 2, 4]] + ) self.assertEqual(result['label'].recursive_sequence_lengths(), []) def test_lod_level_2_converter(self): # lod_level = 2 # paragraphs -> sentences -> words - paragraphs = fluid.layers.data(name='paragraphs', - shape=[1], - dtype='int64', - lod_level=2) + paragraphs = fluid.layers.data( + name='paragraphs', shape=[1], dtype='int64', lod_level=2 + ) label = fluid.layers.data(name='label', shape=[1], dtype='int64') feeder = fluid.DataFeeder([paragraphs, label], fluid.CPUPlace()) # lod = [[0, 2, 3], [0, 3, 5, 9]] # data = [[[1, 2, 3], [4, 5]], [[6, 7, 8, 9]]] # label = [1] * len(data) - result = feeder.feed([([[1, 2, 3], [4, 5]], [1]), ([[6, 7, 8, - 9]], [1])]) + result = feeder.feed( + [([[1, 2, 3], [4, 5]], [1]), ([[6, 7, 8, 9]], [1])] + ) self.assertEqual(result['paragraphs'].shape(), [9, 1]) self.assertEqual(result['label'].shape(), [2, 1]) - self.assertEqual(result['paragraphs'].recursive_sequence_lengths(), - [[2, 1], [3, 2, 4]]) + self.assertEqual( + result['paragraphs'].recursive_sequence_lengths(), + [[2, 1], [3, 2, 4]], + ) self.assertEqual(result['label'].recursive_sequence_lengths(), []) diff --git a/python/paddle/fluid/tests/test_detection.py b/python/paddle/fluid/tests/test_detection.py index bd3b8df7385981432554086b95bfb8bdf358c4b8..baa80a73aa2458f8757a598a2309e4aaf247c159 100644 --- a/python/paddle/fluid/tests/test_detection.py +++ b/python/paddle/fluid/tests/test_detection.py @@ -28,7 +28,6 @@ paddle.enable_static() class LayerTest(unittest.TestCase): - @classmethod def setUpClass(cls): cls.seed = 111 @@ -53,57 +52,66 @@ class LayerTest(unittest.TestCase): fluid.default_main_program().random_seed = self.seed yield - def get_static_graph_result(self, - feed, - fetch_list, - with_lod=False, - force_to_use_cpu=False): + def get_static_graph_result( + self, feed, fetch_list, with_lod=False, force_to_use_cpu=False + ): exe = fluid.Executor(self._get_place(force_to_use_cpu)) exe.run(fluid.default_startup_program()) - return exe.run(fluid.default_main_program(), - feed=feed, - fetch_list=fetch_list, - return_numpy=(not with_lod)) + return exe.run( + fluid.default_main_program(), + feed=feed, + fetch_list=fetch_list, + return_numpy=(not with_lod), + ) @contextlib.contextmanager def dynamic_graph(self, force_to_use_cpu=False): with fluid.dygraph.guard( - self._get_place(force_to_use_cpu=force_to_use_cpu)): + self._get_place(force_to_use_cpu=force_to_use_cpu) + ): fluid.default_startup_program().random_seed = self.seed fluid.default_main_program().random_seed = self.seed yield class TestDetection(unittest.TestCase): - def test_detection_output(self): program = Program() with program_guard(program): - pb = layers.data(name='prior_box', - shape=[10, 4], - append_batch_size=False, - dtype='float32') - pbv = layers.data(name='prior_box_var', - shape=[10, 4], - append_batch_size=False, - dtype='float32') - loc = layers.data(name='target_box', - shape=[2, 10, 4], - append_batch_size=False, - dtype='float32') - scores = layers.data(name='scores', - shape=[2, 10, 20], - append_batch_size=False, - dtype='float32') - out = layers.detection_output(scores=scores, - loc=loc, - prior_box=pb, - prior_box_var=pbv) - out2, index = layers.detection_output(scores=scores, - loc=loc, - prior_box=pb, - prior_box_var=pbv, - return_index=True) + pb = layers.data( + name='prior_box', + shape=[10, 4], + append_batch_size=False, + dtype='float32', + ) + pbv = layers.data( + name='prior_box_var', + shape=[10, 4], + append_batch_size=False, + dtype='float32', + ) + loc = layers.data( + name='target_box', + shape=[2, 10, 4], + append_batch_size=False, + dtype='float32', + ) + scores = layers.data( + name='scores', + shape=[2, 10, 20], + append_batch_size=False, + dtype='float32', + ) + out = layers.detection_output( + scores=scores, loc=loc, prior_box=pb, prior_box_var=pbv + ) + out2, index = layers.detection_output( + scores=scores, + loc=loc, + prior_box=pb, + prior_box_var=pbv, + return_index=True, + ) self.assertIsNotNone(out) self.assertIsNotNone(out2) self.assertIsNotNone(index) @@ -115,10 +123,12 @@ class TestDetection(unittest.TestCase): with program_guard(program): x = layers.data(name='x', shape=[4], dtype='float32') y = layers.data(name='z', shape=[4], dtype='float32', lod_level=1) - bcoder = layers.box_coder(prior_box=x, - prior_box_var=[0.1, 0.2, 0.1, 0.2], - target_box=y, - code_type='encode_center_size') + bcoder = layers.box_coder( + prior_box=x, + prior_box_var=[0.1, 0.2, 0.1, 0.2], + target_box=y, + code_type='encode_center_size', + ) self.assertIsNotNone(bcoder) print(str(program)) @@ -126,28 +136,30 @@ class TestDetection(unittest.TestCase): program = Program() with program_guard(program): x1 = fluid.data(name='x1', shape=[10, 4], dtype='int32') - y1 = fluid.data(name='y1', - shape=[10, 4], - dtype='float32', - lod_level=1) + y1 = fluid.data( + name='y1', shape=[10, 4], dtype='float32', lod_level=1 + ) x2 = fluid.data(name='x2', shape=[10, 4], dtype='float32') - y2 = fluid.data(name='y2', - shape=[10, 4], - dtype='int32', - lod_level=1) - - self.assertRaises(TypeError, - layers.box_coder, - prior_box=x1, - prior_box_var=[0.1, 0.2, 0.1, 0.2], - target_box=y1, - code_type='encode_center_size') - self.assertRaises(TypeError, - layers.box_coder, - prior_box=x2, - prior_box_var=[0.1, 0.2, 0.1, 0.2], - target_box=y2, - code_type='encode_center_size') + y2 = fluid.data( + name='y2', shape=[10, 4], dtype='int32', lod_level=1 + ) + + self.assertRaises( + TypeError, + layers.box_coder, + prior_box=x1, + prior_box_var=[0.1, 0.2, 0.1, 0.2], + target_box=y1, + code_type='encode_center_size', + ) + self.assertRaises( + TypeError, + layers.box_coder, + prior_box=x2, + prior_box_var=[0.1, 0.2, 0.1, 0.2], + target_box=y2, + code_type='encode_center_size', + ) def test_detection_api(self): program = Program() @@ -156,10 +168,12 @@ class TestDetection(unittest.TestCase): y = layers.data(name='y', shape=[4], dtype='float32') z = layers.data(name='z', shape=[4], dtype='float32', lod_level=1) iou = layers.iou_similarity(x=x, y=y) - bcoder = layers.box_coder(prior_box=x, - prior_box_var=y, - target_box=z, - code_type='encode_center_size') + bcoder = layers.box_coder( + prior_box=x, + prior_box_var=y, + target_box=z, + code_type='encode_center_size', + ) self.assertIsNotNone(iou) self.assertIsNotNone(bcoder) @@ -167,23 +181,21 @@ class TestDetection(unittest.TestCase): self.assertIsNotNone(matched_indices) self.assertIsNotNone(matched_dist) - gt = layers.data(name='gt', - shape=[1, 1], - dtype='int32', - lod_level=1) - trg, trg_weight = layers.target_assign(gt, - matched_indices, - mismatch_value=0) + gt = layers.data( + name='gt', shape=[1, 1], dtype='int32', lod_level=1 + ) + trg, trg_weight = layers.target_assign( + gt, matched_indices, mismatch_value=0 + ) self.assertIsNotNone(trg) self.assertIsNotNone(trg_weight) - gt2 = layers.data(name='gt2', - shape=[10, 4], - dtype='float32', - lod_level=1) - trg, trg_weight = layers.target_assign(gt2, - matched_indices, - mismatch_value=0) + gt2 = layers.data( + name='gt2', shape=[10, 4], dtype='float32', lod_level=1 + ) + trg, trg_weight = layers.target_assign( + gt2, matched_indices, mismatch_value=0 + ) self.assertIsNotNone(trg) self.assertIsNotNone(trg_weight) @@ -192,24 +204,26 @@ class TestDetection(unittest.TestCase): def test_ssd_loss(self): program = Program() with program_guard(program): - pb = layers.data(name='prior_box', - shape=[10, 4], - append_batch_size=False, - dtype='float32') - pbv = layers.data(name='prior_box_var', - shape=[10, 4], - append_batch_size=False, - dtype='float32') + pb = layers.data( + name='prior_box', + shape=[10, 4], + append_batch_size=False, + dtype='float32', + ) + pbv = layers.data( + name='prior_box_var', + shape=[10, 4], + append_batch_size=False, + dtype='float32', + ) loc = layers.data(name='target_box', shape=[10, 4], dtype='float32') scores = layers.data(name='scores', shape=[10, 21], dtype='float32') - gt_box = layers.data(name='gt_box', - shape=[4], - lod_level=1, - dtype='float32') - gt_label = layers.data(name='gt_label', - shape=[1], - lod_level=1, - dtype='int32') + gt_box = layers.data( + name='gt_box', shape=[4], lod_level=1, dtype='float32' + ) + gt_label = layers.data( + name='gt_label', shape=[1], lod_level=1, dtype='int32' + ) loss = layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv) self.assertIsNotNone(loss) self.assertEqual(loss.shape[-1], 1) @@ -217,73 +231,75 @@ class TestDetection(unittest.TestCase): class TestPriorBox(unittest.TestCase): - def test_prior_box(self): program = Program() with program_guard(program): data_shape = [3, 224, 224] - images = fluid.layers.data(name='pixel', - shape=data_shape, - dtype='float32') + images = fluid.layers.data( + name='pixel', shape=data_shape, dtype='float32' + ) conv1 = fluid.layers.conv2d(images, 3, 3, 2) - box, var = layers.prior_box(input=conv1, - image=images, - min_sizes=[100.0], - aspect_ratios=[1.], - flip=True, - clip=True) + box, var = layers.prior_box( + input=conv1, + image=images, + min_sizes=[100.0], + aspect_ratios=[1.0], + flip=True, + clip=True, + ) assert len(box.shape) == 4 assert box.shape == var.shape assert box.shape[3] == 4 class TestPriorBox2(unittest.TestCase): - def test_prior_box(self): program = Program() with program_guard(program): data_shape = [None, 3, None, None] images = fluid.data(name='pixel', shape=data_shape, dtype='float32') conv1 = fluid.layers.conv2d(images, 3, 3, 2) - box, var = layers.prior_box(input=conv1, - image=images, - min_sizes=[100.0], - aspect_ratios=[1.], - flip=True, - clip=True) + box, var = layers.prior_box( + input=conv1, + image=images, + min_sizes=[100.0], + aspect_ratios=[1.0], + flip=True, + clip=True, + ) assert len(box.shape) == 4 assert box.shape == var.shape assert box.shape[3] == 4 class TestDensityPriorBox(unittest.TestCase): - def test_density_prior_box(self): program = Program() with program_guard(program): data_shape = [3, 224, 224] - images = fluid.layers.data(name='pixel', - shape=data_shape, - dtype='float32') + images = fluid.layers.data( + name='pixel', shape=data_shape, dtype='float32' + ) conv1 = fluid.layers.conv2d(images, 3, 3, 2) - box, var = layers.density_prior_box(input=conv1, - image=images, - densities=[3, 4], - fixed_sizes=[50., 60.], - fixed_ratios=[1.0], - clip=True) + box, var = layers.density_prior_box( + input=conv1, + image=images, + densities=[3, 4], + fixed_sizes=[50.0, 60.0], + fixed_ratios=[1.0], + clip=True, + ) assert len(box.shape) == 4 assert box.shape == var.shape assert box.shape[-1] == 4 class TestAnchorGenerator(unittest.TestCase): - def test_anchor_generator(self): data_shape = [3, 224, 224] - images = fluid.layers.data(name='pixel', - shape=data_shape, - dtype='float32') + images = fluid.layers.data( + name='pixel', shape=data_shape, dtype='float32' + ) conv1 = fluid.layers.conv2d(images, 3, 3, 2) anchor, var = fluid.layers.anchor_generator( input=conv1, @@ -291,14 +307,14 @@ class TestAnchorGenerator(unittest.TestCase): aspect_ratios=[0.5, 1.0, 2.0], variance=[0.1, 0.1, 0.2, 0.2], stride=[16.0, 16.0], - offset=0.5) + offset=0.5, + ) assert len(anchor.shape) == 4 assert anchor.shape == var.shape assert anchor.shape[3] == 4 class TestGenerateProposalLabels(unittest.TestCase): - def check_out(self, outs): rois = outs[0] labels_int32 = outs[1] @@ -320,27 +336,22 @@ class TestGenerateProposalLabels(unittest.TestCase): def test_generate_proposal_labels(self): program = Program() with program_guard(program): - rpn_rois = fluid.data(name='rpn_rois', - shape=[4, 4], - dtype='float32', - lod_level=1) - gt_classes = fluid.data(name='gt_classes', - shape=[6], - dtype='int32', - lod_level=1) - is_crowd = fluid.data(name='is_crowd', - shape=[6], - dtype='int32', - lod_level=1) - gt_boxes = fluid.data(name='gt_boxes', - shape=[6, 4], - dtype='float32', - lod_level=1) + rpn_rois = fluid.data( + name='rpn_rois', shape=[4, 4], dtype='float32', lod_level=1 + ) + gt_classes = fluid.data( + name='gt_classes', shape=[6], dtype='int32', lod_level=1 + ) + is_crowd = fluid.data( + name='is_crowd', shape=[6], dtype='int32', lod_level=1 + ) + gt_boxes = fluid.data( + name='gt_boxes', shape=[6, 4], dtype='float32', lod_level=1 + ) im_info = fluid.data(name='im_info', shape=[1, 3], dtype='float32') - max_overlap = fluid.data(name='max_overlap', - shape=[4], - dtype='float32', - lod_level=1) + max_overlap = fluid.data( + name='max_overlap', shape=[4], dtype='float32', lod_level=1 + ) self.class_nums = 5 outs = fluid.layers.generate_proposal_labels( rpn_rois=rpn_rois, @@ -354,7 +365,8 @@ class TestGenerateProposalLabels(unittest.TestCase): bg_thresh_hi=0.5, bg_thresh_lo=0.0, bbox_reg_weights=[0.1, 0.1, 0.2, 0.2], - class_nums=self.class_nums) + class_nums=self.class_nums, + ) outs_1 = fluid.layers.generate_proposal_labels( rpn_rois=rpn_rois, gt_classes=gt_classes, @@ -370,7 +382,8 @@ class TestGenerateProposalLabels(unittest.TestCase): class_nums=self.class_nums, is_cascade_rcnn=True, max_overlap=max_overlap, - return_max_overlap=True) + return_max_overlap=True, + ) self.check_out(outs) self.check_out(outs_1) @@ -378,57 +391,69 @@ class TestGenerateProposalLabels(unittest.TestCase): class TestGenerateMaskLabels(unittest.TestCase): - def test_generate_mask_labels(self): program = Program() with program_guard(program): - im_info = layers.data(name='im_info', - shape=[1, 3], - dtype='float32', - lod_level=1, - append_batch_size=False) - gt_classes = layers.data(name='gt_classes', - shape=[2, 1], - dtype='int32', - lod_level=1, - append_batch_size=False) - is_crowd = layers.data(name='is_crowd', - shape=[2, 1], - dtype='int32', - lod_level=1, - append_batch_size=False) - gt_segms = layers.data(name='gt_segms', - shape=[20, 2], - dtype='float32', - lod_level=3, - append_batch_size=False) - rois = layers.data(name='rois', - shape=[4, 4], - dtype='float32', - lod_level=1, - append_batch_size=False) - labels_int32 = layers.data(name='labels_int32', - shape=[4, 1], - dtype='int32', - lod_level=1, - append_batch_size=False) + im_info = layers.data( + name='im_info', + shape=[1, 3], + dtype='float32', + lod_level=1, + append_batch_size=False, + ) + gt_classes = layers.data( + name='gt_classes', + shape=[2, 1], + dtype='int32', + lod_level=1, + append_batch_size=False, + ) + is_crowd = layers.data( + name='is_crowd', + shape=[2, 1], + dtype='int32', + lod_level=1, + append_batch_size=False, + ) + gt_segms = layers.data( + name='gt_segms', + shape=[20, 2], + dtype='float32', + lod_level=3, + append_batch_size=False, + ) + rois = layers.data( + name='rois', + shape=[4, 4], + dtype='float32', + lod_level=1, + append_batch_size=False, + ) + labels_int32 = layers.data( + name='labels_int32', + shape=[4, 1], + dtype='int32', + lod_level=1, + append_batch_size=False, + ) num_classes = 5 resolution = 14 - outs = fluid.layers.generate_mask_labels(im_info=im_info, - gt_classes=gt_classes, - is_crowd=is_crowd, - gt_segms=gt_segms, - rois=rois, - labels_int32=labels_int32, - num_classes=num_classes, - resolution=resolution) + outs = fluid.layers.generate_mask_labels( + im_info=im_info, + gt_classes=gt_classes, + is_crowd=is_crowd, + gt_segms=gt_segms, + rois=rois, + labels_int32=labels_int32, + num_classes=num_classes, + resolution=resolution, + ) mask_rois, roi_has_mask_int32, mask_int32 = outs assert mask_rois.shape[1] == 4 assert mask_int32.shape[1] == num_classes * resolution * resolution class TestMultiBoxHead(unittest.TestCase): - def test_multi_box_head(self): data_shape = [3, 224, 224] mbox_locs, mbox_confs, box, var = self.multi_box_head_output(data_shape) @@ -439,9 +464,9 @@ class TestMultiBoxHead(unittest.TestCase): assert mbox_locs.shape[1] == mbox_confs.shape[1] def multi_box_head_output(self, data_shape): - images = fluid.layers.data(name='pixel', - shape=data_shape, - dtype='float32') + images = fluid.layers.data( + name='pixel', shape=data_shape, dtype='float32' + ) conv1 = fluid.layers.conv2d(images, 3, 3, 2) conv2 = fluid.layers.conv2d(conv1, 3, 3, 2) conv3 = fluid.layers.conv2d(conv2, 3, 3, 2) @@ -454,37 +479,47 @@ class TestMultiBoxHead(unittest.TestCase): num_classes=21, min_ratio=20, max_ratio=90, - aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]], + aspect_ratios=[ + [2.0], + [2.0, 3.0], + [2.0, 3.0], + [2.0, 3.0], + [2.0], + [2.0], + ], base_size=300, offset=0.5, flip=True, - clip=True) + clip=True, + ) return mbox_locs, mbox_confs, box, var class TestDetectionMAP(unittest.TestCase): - def test_detection_map(self): program = Program() with program_guard(program): - detect_res = layers.data(name='detect_res', - shape=[10, 6], - append_batch_size=False, - dtype='float32') - label = layers.data(name='label', - shape=[10, 6], - append_batch_size=False, - dtype='float32') + detect_res = layers.data( + name='detect_res', + shape=[10, 6], + append_batch_size=False, + dtype='float32', + ) + label = layers.data( + name='label', + shape=[10, 6], + append_batch_size=False, + dtype='float32', + ) map_out = detection.detection_map(detect_res, label, 21) self.assertIsNotNone(map_out) - self.assertEqual(map_out.shape, (1, )) + self.assertEqual(map_out.shape, (1,)) print(str(program)) class TestRpnTargetAssign(unittest.TestCase): - def test_rpn_target_assign(self): program = Program() with program_guard(program): @@ -492,49 +527,62 @@ class TestRpnTargetAssign(unittest.TestCase): cls_logits_shape = [10, 50, 2] anchor_shape = [50, 4] - bbox_pred = layers.data(name='bbox_pred', - shape=bbox_pred_shape, - append_batch_size=False, - dtype='float32') - cls_logits = layers.data(name='cls_logits', - shape=cls_logits_shape, - append_batch_size=False, - dtype='float32') - anchor_box = layers.data(name='anchor_box', - shape=anchor_shape, - append_batch_size=False, - dtype='float32') - anchor_var = layers.data(name='anchor_var', - shape=anchor_shape, - append_batch_size=False, - dtype='float32') - gt_boxes = layers.data(name='gt_boxes', - shape=[4], - lod_level=1, - dtype='float32') - is_crowd = layers.data(name='is_crowd', - shape=[1, 10], - dtype='int32', - lod_level=1, - append_batch_size=False) - im_info = layers.data(name='im_info', - shape=[1, 3], - dtype='float32', - lod_level=1, - append_batch_size=False) - outs = layers.rpn_target_assign(bbox_pred=bbox_pred, - cls_logits=cls_logits, - anchor_box=anchor_box, - anchor_var=anchor_var, - gt_boxes=gt_boxes, - is_crowd=is_crowd, - im_info=im_info, - rpn_batch_size_per_im=256, - rpn_straddle_thresh=0.0, - rpn_fg_fraction=0.5, - rpn_positive_overlap=0.7, - rpn_negative_overlap=0.3, - use_random=False) + bbox_pred = layers.data( + name='bbox_pred', + shape=bbox_pred_shape, + append_batch_size=False, + dtype='float32', + ) + cls_logits = layers.data( + name='cls_logits', + shape=cls_logits_shape, + append_batch_size=False, + dtype='float32', + ) + anchor_box = layers.data( + name='anchor_box', + shape=anchor_shape, + append_batch_size=False, + dtype='float32', + ) + anchor_var = layers.data( + name='anchor_var', + shape=anchor_shape, + append_batch_size=False, + dtype='float32', + ) + gt_boxes = layers.data( + name='gt_boxes', shape=[4], lod_level=1, dtype='float32' + ) + is_crowd = layers.data( + name='is_crowd', + shape=[1, 10], + dtype='int32', + lod_level=1, + append_batch_size=False, + ) + im_info = layers.data( + name='im_info', + shape=[1, 3], + dtype='float32', + lod_level=1, + append_batch_size=False, + ) + outs = layers.rpn_target_assign( + bbox_pred=bbox_pred, + cls_logits=cls_logits, + anchor_box=anchor_box, + anchor_var=anchor_var, + gt_boxes=gt_boxes, + is_crowd=is_crowd, + im_info=im_info, + rpn_batch_size_per_im=256, + rpn_straddle_thresh=0.0, + rpn_fg_fraction=0.5, + rpn_positive_overlap=0.7, + rpn_negative_overlap=0.3, + use_random=False, + ) pred_scores = outs[0] pred_loc = outs[1] tgt_lbl = outs[2] @@ -553,29 +601,29 @@ class TestRpnTargetAssign(unittest.TestCase): class TestGenerateProposals(LayerTest): - def test_generate_proposals(self): scores_np = np.random.rand(2, 3, 4, 4).astype('float32') bbox_deltas_np = np.random.rand(2, 12, 4, 4).astype('float32') im_info_np = np.array([[8, 8, 0.5], [6, 6, 0.5]]).astype('float32') - anchors_np = np.reshape(np.arange(4 * 4 * 3 * 4), - [4, 4, 3, 4]).astype('float32') + anchors_np = np.reshape(np.arange(4 * 4 * 3 * 4), [4, 4, 3, 4]).astype( + 'float32' + ) variances_np = np.ones((4, 4, 3, 4)).astype('float32') with self.static_graph(): - scores = fluid.data(name='scores', - shape=[2, 3, 4, 4], - dtype='float32') - bbox_deltas = fluid.data(name='bbox_deltas', - shape=[2, 12, 4, 4], - dtype='float32') + scores = fluid.data( + name='scores', shape=[2, 3, 4, 4], dtype='float32' + ) + bbox_deltas = fluid.data( + name='bbox_deltas', shape=[2, 12, 4, 4], dtype='float32' + ) im_info = fluid.data(name='im_info', shape=[2, 3], dtype='float32') - anchors = fluid.data(name='anchors', - shape=[4, 4, 3, 4], - dtype='float32') - variances = fluid.data(name='var', - shape=[4, 4, 3, 4], - dtype='float32') + anchors = fluid.data( + name='anchors', shape=[4, 4, 3, 4], dtype='float32' + ) + variances = fluid.data( + name='var', shape=[4, 4, 3, 4], dtype='float32' + ) rois, roi_probs, rois_num = fluid.layers.generate_proposals( scores, bbox_deltas, @@ -584,17 +632,23 @@ class TestGenerateProposals(LayerTest): variances, pre_nms_top_n=10, post_nms_top_n=5, - return_rois_num=True) - rois_stat, roi_probs_stat, rois_num_stat = self.get_static_graph_result( + return_rois_num=True, + ) + ( + rois_stat, + roi_probs_stat, + rois_num_stat, + ) = self.get_static_graph_result( feed={ 'scores': scores_np, 'bbox_deltas': bbox_deltas_np, 'im_info': im_info_np, 'anchors': anchors_np, - 'var': variances_np + 'var': variances_np, }, fetch_list=[rois, roi_probs, rois_num], - with_lod=False) + with_lod=False, + ) with self.dynamic_graph(): scores_dy = base.to_variable(scores_np) @@ -610,7 +664,8 @@ class TestGenerateProposals(LayerTest): variances_dy, pre_nms_top_n=10, post_nms_top_n=5, - return_rois_num=True) + return_rois_num=True, + ) rois_dy = rois.numpy() roi_probs_dy = roi_probs.numpy() rois_num_dy = rois_num.numpy() @@ -621,7 +676,6 @@ class TestGenerateProposals(LayerTest): class TestYoloDetection(unittest.TestCase): - def test_yolov3_loss(self): program = Program() with program_guard(program): @@ -629,14 +683,18 @@ class TestYoloDetection(unittest.TestCase): gt_box = layers.data(name='gt_box', shape=[10, 4], dtype='float32') gt_label = layers.data(name='gt_label', shape=[10], dtype='int32') gt_score = layers.data(name='gt_score', shape=[10], dtype='float32') - loss = layers.yolov3_loss(x, - gt_box, - gt_label, [10, 13, 30, 13], [0, 1], - 10, - 0.7, - 32, - gt_score=gt_score, - use_label_smooth=False) + loss = layers.yolov3_loss( + x, + gt_box, + gt_label, + [10, 13, 30, 13], + [0, 1], + 10, + 0.7, + 32, + gt_score=gt_score, + use_label_smooth=False, + ) self.assertIsNotNone(loss) @@ -645,8 +703,9 @@ class TestYoloDetection(unittest.TestCase): with program_guard(program): x = layers.data(name='x', shape=[30, 7, 7], dtype='float32') img_size = layers.data(name='img_size', shape=[2], dtype='int32') - boxes, scores = layers.yolo_box(x, img_size, [10, 13, 30, 13], 10, - 0.01, 32) + boxes, scores = layers.yolo_box( + x, img_size, [10, 13, 30, 13], 10, 0.01, 32 + ) self.assertIsNotNone(boxes) self.assertIsNotNone(scores) @@ -657,15 +716,19 @@ class TestYoloDetection(unittest.TestCase): gt_box = layers.data(name='gt_box', shape=[10, 4], dtype='float32') gt_label = layers.data(name='gt_label', shape=[10], dtype='int32') gt_score = layers.data(name='gt_score', shape=[10], dtype='float32') - loss = layers.yolov3_loss(x, - gt_box, - gt_label, [10, 13, 30, 13], [0, 1], - 10, - 0.7, - 32, - gt_score=gt_score, - use_label_smooth=False, - scale_x_y=1.2) + loss = layers.yolov3_loss( + x, + gt_box, + gt_label, + [10, 13, 30, 13], + [0, 1], + 10, + 0.7, + 32, + gt_score=gt_score, + use_label_smooth=False, + scale_x_y=1.2, + ) self.assertIsNotNone(loss) @@ -674,38 +737,32 @@ class TestYoloDetection(unittest.TestCase): with program_guard(program): x = layers.data(name='x', shape=[30, 7, 7], dtype='float32') img_size = layers.data(name='img_size', shape=[2], dtype='int32') - boxes, scores = layers.yolo_box(x, - img_size, [10, 13, 30, 13], - 10, - 0.01, - 32, - scale_x_y=1.2) + boxes, scores = layers.yolo_box( + x, img_size, [10, 13, 30, 13], 10, 0.01, 32, scale_x_y=1.2 + ) self.assertIsNotNone(boxes) self.assertIsNotNone(scores) class TestBoxClip(unittest.TestCase): - def test_box_clip(self): program = Program() with program_guard(program): - input_box = layers.data(name='input_box', - shape=[7, 4], - dtype='float32', - lod_level=1) + input_box = layers.data( + name='input_box', shape=[7, 4], dtype='float32', lod_level=1 + ) im_info = layers.data(name='im_info', shape=[3], dtype='float32') out = layers.box_clip(input_box, im_info) self.assertIsNotNone(out) class TestMulticlassNMS(unittest.TestCase): - def test_multiclass_nms(self): program = Program() with program_guard(program): - bboxes = layers.data(name='bboxes', - shape=[-1, 10, 4], - dtype='float32') + bboxes = layers.data( + name='bboxes', shape=[-1, 10, 4], dtype='float32' + ) scores = layers.data(name='scores', shape=[-1, 10], dtype='float32') output = layers.multiclass_nms(bboxes, scores, 0.3, 400, 200, 0.7) self.assertIsNotNone(output) @@ -713,57 +770,56 @@ class TestMulticlassNMS(unittest.TestCase): def test_multiclass_nms_error(self): program = Program() with program_guard(program): - bboxes1 = fluid.data(name='bboxes1', - shape=[10, 10, 4], - dtype='int32') - scores1 = fluid.data(name='scores1', - shape=[10, 10], - dtype='float32') - bboxes2 = fluid.data(name='bboxes2', - shape=[10, 10, 4], - dtype='float32') + bboxes1 = fluid.data( + name='bboxes1', shape=[10, 10, 4], dtype='int32' + ) + scores1 = fluid.data( + name='scores1', shape=[10, 10], dtype='float32' + ) + bboxes2 = fluid.data( + name='bboxes2', shape=[10, 10, 4], dtype='float32' + ) scores2 = fluid.data(name='scores2', shape=[10, 10], dtype='int32') - self.assertRaises(TypeError, - layers.multiclass_nms, - bboxes=bboxes1, - scores=scores1, - score_threshold=0.5, - nms_top_k=400, - keep_top_k=200) - self.assertRaises(TypeError, - layers.multiclass_nms, - bboxes=bboxes2, - scores=scores2, - score_threshold=0.5, - nms_top_k=400, - keep_top_k=200) + self.assertRaises( + TypeError, + layers.multiclass_nms, + bboxes=bboxes1, + scores=scores1, + score_threshold=0.5, + nms_top_k=400, + keep_top_k=200, + ) + self.assertRaises( + TypeError, + layers.multiclass_nms, + bboxes=bboxes2, + scores=scores2, + score_threshold=0.5, + nms_top_k=400, + keep_top_k=200, + ) class TestMulticlassNMS2(unittest.TestCase): - def test_multiclass_nms2(self): program = Program() with program_guard(program): - bboxes = layers.data(name='bboxes', - shape=[-1, 10, 4], - dtype='float32') + bboxes = layers.data( + name='bboxes', shape=[-1, 10, 4], dtype='float32' + ) scores = layers.data(name='scores', shape=[-1, 10], dtype='float32') - output = fluid.contrib.multiclass_nms2(bboxes, scores, 0.3, 400, - 200, 0.7) - output2, index = fluid.contrib.multiclass_nms2(bboxes, - scores, - 0.3, - 400, - 200, - 0.7, - return_index=True) + output = fluid.contrib.multiclass_nms2( + bboxes, scores, 0.3, 400, 200, 0.7 + ) + output2, index = fluid.contrib.multiclass_nms2( + bboxes, scores, 0.3, 400, 200, 0.7, return_index=True + ) self.assertIsNotNone(output) self.assertIsNotNone(output2) self.assertIsNotNone(index) class TestCollectFpnPropsals(LayerTest): - def test_collect_fpn_proposals(self): multi_bboxes_np = [] multi_scores_np = [] @@ -781,17 +837,21 @@ class TestCollectFpnPropsals(LayerTest): multi_scores = [] rois_num_per_level = [] for i in range(4): - bboxes = fluid.data(name='rois' + str(i), - shape=[5, 4], - dtype='float32', - lod_level=1) - scores = fluid.data(name='scores' + str(i), - shape=[5, 1], - dtype='float32', - lod_level=1) - rois_num = fluid.data(name='rois_num' + str(i), - shape=[None], - dtype='int32') + bboxes = fluid.data( + name='rois' + str(i), + shape=[5, 4], + dtype='float32', + lod_level=1, + ) + scores = fluid.data( + name='scores' + str(i), + shape=[5, 1], + dtype='float32', + lod_level=1, + ) + rois_num = fluid.data( + name='rois_num' + str(i), shape=[None], dtype='int32' + ) multi_bboxes.append(bboxes) multi_scores.append(scores) @@ -803,14 +863,16 @@ class TestCollectFpnPropsals(LayerTest): 2, 5, 10, - rois_num_per_level=rois_num_per_level) + rois_num_per_level=rois_num_per_level, + ) feed = {} for i in range(4): feed['rois' + str(i)] = multi_bboxes_np[i] feed['scores' + str(i)] = multi_scores_np[i] feed['rois_num' + str(i)] = rois_num_per_level_np[i] fpn_rois_stat, rois_num_stat = self.get_static_graph_result( - feed=feed, fetch_list=[fpn_rois, rois_num], with_lod=True) + feed=feed, fetch_list=[fpn_rois, rois_num], with_lod=True + ) fpn_rois_stat = np.array(fpn_rois_stat) rois_num_stat = np.array(rois_num_stat) @@ -831,7 +893,8 @@ class TestCollectFpnPropsals(LayerTest): 2, 5, 10, - rois_num_per_level=rois_num_per_level_dy) + rois_num_per_level=rois_num_per_level_dy, + ) fpn_rois_dy = fpn_rois_dy.numpy() rois_num_dy = rois_num_dy.numpy() @@ -839,72 +902,80 @@ class TestCollectFpnPropsals(LayerTest): np.testing.assert_array_equal(rois_num_stat, rois_num_dy) def test_collect_fpn_proposals_error(self): - def generate_input(bbox_type, score_type, name): multi_bboxes = [] multi_scores = [] for i in range(4): - bboxes = fluid.data(name='rois' + name + str(i), - shape=[10, 4], - dtype=bbox_type, - lod_level=1) - scores = fluid.data(name='scores' + name + str(i), - shape=[10, 1], - dtype=score_type, - lod_level=1) + bboxes = fluid.data( + name='rois' + name + str(i), + shape=[10, 4], + dtype=bbox_type, + lod_level=1, + ) + scores = fluid.data( + name='scores' + name + str(i), + shape=[10, 1], + dtype=score_type, + lod_level=1, + ) multi_bboxes.append(bboxes) multi_scores.append(scores) return multi_bboxes, multi_scores program = Program() with program_guard(program): - bbox1 = fluid.data(name='rois', - shape=[5, 10, 4], - dtype='float32', - lod_level=1) - score1 = fluid.data(name='scores', - shape=[5, 10, 1], - dtype='float32', - lod_level=1) + bbox1 = fluid.data( + name='rois', shape=[5, 10, 4], dtype='float32', lod_level=1 + ) + score1 = fluid.data( + name='scores', shape=[5, 10, 1], dtype='float32', lod_level=1 + ) bbox2, score2 = generate_input('int32', 'float32', '2') - self.assertRaises(TypeError, - layers.collect_fpn_proposals, - multi_rois=bbox1, - multi_scores=score1, - min_level=2, - max_level=5, - post_nms_top_n=2000) - self.assertRaises(TypeError, - layers.collect_fpn_proposals, - multi_rois=bbox2, - multi_scores=score2, - min_level=2, - max_level=5, - post_nms_top_n=2000) + self.assertRaises( + TypeError, + layers.collect_fpn_proposals, + multi_rois=bbox1, + multi_scores=score1, + min_level=2, + max_level=5, + post_nms_top_n=2000, + ) + self.assertRaises( + TypeError, + layers.collect_fpn_proposals, + multi_rois=bbox2, + multi_scores=score2, + min_level=2, + max_level=5, + post_nms_top_n=2000, + ) class TestDistributeFpnProposals(LayerTest): - def test_distribute_fpn_proposals(self): rois_np = np.random.rand(10, 4).astype('float32') rois_num_np = np.array([4, 6]).astype('int32') with self.static_graph(): rois = fluid.data(name='rois', shape=[10, 4], dtype='float32') rois_num = fluid.data(name='rois_num', shape=[None], dtype='int32') - multi_rois, restore_ind, rois_num_per_level = layers.distribute_fpn_proposals( + ( + multi_rois, + restore_ind, + rois_num_per_level, + ) = layers.distribute_fpn_proposals( fpn_rois=rois, min_level=2, max_level=5, refer_level=4, refer_scale=224, - rois_num=rois_num) + rois_num=rois_num, + ) fetch_list = multi_rois + [restore_ind] + rois_num_per_level - output_stat = self.get_static_graph_result(feed={ - 'rois': rois_np, - 'rois_num': rois_num_np - }, - fetch_list=fetch_list, - with_lod=True) + output_stat = self.get_static_graph_result( + feed={'rois': rois_np, 'rois_num': rois_num_np}, + fetch_list=fetch_list, + with_lod=True, + ) output_stat_np = [] for output in output_stat: output_np = np.array(output) @@ -914,13 +985,18 @@ class TestDistributeFpnProposals(LayerTest): with self.dynamic_graph(): rois_dy = base.to_variable(rois_np) rois_num_dy = base.to_variable(rois_num_np) - multi_rois_dy, restore_ind_dy, rois_num_per_level_dy = layers.distribute_fpn_proposals( + ( + multi_rois_dy, + restore_ind_dy, + rois_num_per_level_dy, + ) = layers.distribute_fpn_proposals( fpn_rois=rois_dy, min_level=2, max_level=5, refer_level=4, refer_scale=224, - rois_num=rois_num_dy) + rois_num=rois_num_dy, + ) print(type(multi_rois_dy)) output_dy = multi_rois_dy + [restore_ind_dy] + rois_num_per_level_dy output_dy_np = [] @@ -935,83 +1011,93 @@ class TestDistributeFpnProposals(LayerTest): def test_distribute_fpn_proposals_error(self): program = Program() with program_guard(program): - fpn_rois = fluid.data(name='data_error', - shape=[10, 4], - dtype='int32', - lod_level=1) - self.assertRaises(TypeError, - layers.distribute_fpn_proposals, - fpn_rois=fpn_rois, - min_level=2, - max_level=5, - refer_level=4, - refer_scale=224) + fpn_rois = fluid.data( + name='data_error', shape=[10, 4], dtype='int32', lod_level=1 + ) + self.assertRaises( + TypeError, + layers.distribute_fpn_proposals, + fpn_rois=fpn_rois, + min_level=2, + max_level=5, + refer_level=4, + refer_scale=224, + ) class TestBoxDecoderAndAssign(unittest.TestCase): - def test_box_decoder_and_assign(self): program = Program() with program_guard(program): pb = fluid.data(name='prior_box', shape=[None, 4], dtype='float32') pbv = fluid.data(name='prior_box_var', shape=[4], dtype='float32') - loc = fluid.data(name='target_box', - shape=[None, 4 * 81], - dtype='float32') - scores = fluid.data(name='scores', - shape=[None, 81], - dtype='float32') - decoded_box, output_assign_box = fluid.layers.box_decoder_and_assign( - pb, pbv, loc, scores, 4.135) + loc = fluid.data( + name='target_box', shape=[None, 4 * 81], dtype='float32' + ) + scores = fluid.data( + name='scores', shape=[None, 81], dtype='float32' + ) + ( + decoded_box, + output_assign_box, + ) = fluid.layers.box_decoder_and_assign(pb, pbv, loc, scores, 4.135) self.assertIsNotNone(decoded_box) self.assertIsNotNone(output_assign_box) def test_box_decoder_and_assign_error(self): - def generate_input(pb_type, pbv_type, loc_type, score_type, name): - pb = fluid.data(name='prior_box' + name, - shape=[None, 4], - dtype=pb_type) - pbv = fluid.data(name='prior_box_var' + name, - shape=[4], - dtype=pbv_type) - loc = fluid.data(name='target_box' + name, - shape=[None, 4 * 81], - dtype=loc_type) - scores = fluid.data(name='scores' + name, - shape=[None, 81], - dtype=score_type) + pb = fluid.data( + name='prior_box' + name, shape=[None, 4], dtype=pb_type + ) + pbv = fluid.data( + name='prior_box_var' + name, shape=[4], dtype=pbv_type + ) + loc = fluid.data( + name='target_box' + name, shape=[None, 4 * 81], dtype=loc_type + ) + scores = fluid.data( + name='scores' + name, shape=[None, 81], dtype=score_type + ) return pb, pbv, loc, scores program = Program() with program_guard(program): - pb1, pbv1, loc1, scores1 = generate_input('int32', 'float32', - 'float32', 'float32', '1') - pb2, pbv2, loc2, scores2 = generate_input('float32', 'float32', - 'int32', 'float32', '2') - pb3, pbv3, loc3, scores3 = generate_input('float32', 'float32', - 'float32', 'int32', '3') - self.assertRaises(TypeError, - layers.box_decoder_and_assign, - prior_box=pb1, - prior_box_var=pbv1, - target_box=loc1, - box_score=scores1, - box_clip=4.0) - self.assertRaises(TypeError, - layers.box_decoder_and_assign, - prior_box=pb2, - prior_box_var=pbv2, - target_box=loc2, - box_score=scores2, - box_clip=4.0) - self.assertRaises(TypeError, - layers.box_decoder_and_assign, - prior_box=pb3, - prior_box_var=pbv3, - target_box=loc3, - box_score=scores3, - box_clip=4.0) + pb1, pbv1, loc1, scores1 = generate_input( + 'int32', 'float32', 'float32', 'float32', '1' + ) + pb2, pbv2, loc2, scores2 = generate_input( + 'float32', 'float32', 'int32', 'float32', '2' + ) + pb3, pbv3, loc3, scores3 = generate_input( + 'float32', 'float32', 'float32', 'int32', '3' + ) + self.assertRaises( + TypeError, + layers.box_decoder_and_assign, + prior_box=pb1, + prior_box_var=pbv1, + target_box=loc1, + box_score=scores1, + box_clip=4.0, + ) + self.assertRaises( + TypeError, + layers.box_decoder_and_assign, + prior_box=pb2, + prior_box_var=pbv2, + target_box=loc2, + box_score=scores2, + box_clip=4.0, + ) + self.assertRaises( + TypeError, + layers.box_decoder_and_assign, + prior_box=pb3, + prior_box_var=pbv3, + target_box=loc3, + box_score=scores3, + box_clip=4.0, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/test_error_clip.py b/python/paddle/fluid/tests/test_error_clip.py index eac41b818c2622a12a7baf9511a34279f23157dc..f6e1122e04562b36e806a01c3da587dbe438cd97 100644 --- a/python/paddle/fluid/tests/test_error_clip.py +++ b/python/paddle/fluid/tests/test_error_clip.py @@ -36,12 +36,14 @@ with fluid.program_guard(main_program=prog): prog_clip = prog.clone() prog_clip.block(0).var(hidden1.name)._set_error_clip( - fluid.clip.ErrorClipByValue(max=CLIP_MAX, min=CLIP_MIN)) + fluid.clip.ErrorClipByValue(max=CLIP_MAX, min=CLIP_MIN) +) avg_cost_clip = prog_clip.block(0).var(avg_cost.name) fluid.backward.append_backward(loss=avg_cost) -fluid.backward.append_backward(loss=avg_cost_clip, - callbacks=[fluid.clip.error_clip_callback]) +fluid.backward.append_backward( + loss=avg_cost_clip, callbacks=[fluid.clip.error_clip_callback] +) hidden1_grad = prog.block(0).var(hidden1.name + "@GRAD") hidden1_grad_clip = prog_clip.block(0).var(hidden1.name + "@GRAD") @@ -49,9 +51,10 @@ hidden1_grad_clip = prog_clip.block(0).var(hidden1.name + "@GRAD") hidden2_grad = prog.block(0).var(hidden2.name + "@GRAD") hidden2_grad_clip = prog_clip.block(0).var(hidden2.name + "@GRAD") -train_reader = paddle.batch(paddle.reader.shuffle(paddle.dataset.mnist.train(), - buf_size=8192), - batch_size=BATCH_SIZE) +train_reader = paddle.batch( + paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=8192), + batch_size=BATCH_SIZE, +) place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -63,15 +66,18 @@ for data in train_reader(): count += 1 if count > 5: break - out1, out2 = exe.run(prog, - feed=feeder.feed(data), - fetch_list=[hidden1_grad, hidden2_grad]) + out1, out2 = exe.run( + prog, feed=feeder.feed(data), fetch_list=[hidden1_grad, hidden2_grad] + ) out1_clip, out2_clip = exe.run( prog_clip, feed=feeder.feed(data), - fetch_list=[hidden1_grad_clip, hidden2_grad_clip]) - if not ((out1.clip(min=CLIP_MIN, max=CLIP_MAX) == out1_clip).all() and - (out2 == out2_clip).all()): + fetch_list=[hidden1_grad_clip, hidden2_grad_clip], + ) + if not ( + (out1.clip(min=CLIP_MIN, max=CLIP_MAX) == out1_clip).all() + and (out2 == out2_clip).all() + ): exit(1) exit(0) diff --git a/python/paddle/fluid/tests/test_if_else_op.py b/python/paddle/fluid/tests/test_if_else_op.py index cc9f4996410698f163382f5aecb16b5acfec0019..aa4aca0724c704ca0bfa1a09e1d52447d1782f77 100644 --- a/python/paddle/fluid/tests/test_if_else_op.py +++ b/python/paddle/fluid/tests/test_if_else_op.py @@ -59,19 +59,19 @@ class TestMNISTIfElseOp(unittest.TestCase): prob = layers.fc(input=hidden, size=10, act='softmax') layers.assign(input=prob, output=false_out) - prob = merge_lod_tensor(in_true=true_out, - in_false=false_out, - mask=cond, - x=image) + prob = merge_lod_tensor( + in_true=true_out, in_false=false_out, mask=cond, x=image + ) loss = layers.cross_entropy(input=prob, label=label) avg_loss = paddle.mean(loss) optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9) optimizer.minimize(avg_loss, startup_prog) - train_reader = paddle.batch(paddle.reader.shuffle( - paddle.dataset.mnist.train(), buf_size=8192), - batch_size=10) + train_reader = paddle.batch( + paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=8192), + batch_size=10, + ) place = core.CPUPlace() exe = Executor(place) @@ -84,12 +84,9 @@ class TestMNISTIfElseOp(unittest.TestCase): y_data = np.array([x[1] for x in data]).astype("int64") y_data = np.expand_dims(y_data, axis=1) - outs = exe.run(prog, - feed={ - 'x': x_data, - 'y': y_data - }, - fetch_list=[avg_loss]) + outs = exe.run( + prog, feed={'x': x_data, 'y': y_data}, fetch_list=[avg_loss] + ) print(outs[0]) if outs[0] < 1.0: return @@ -126,9 +123,10 @@ class TestMNISTIfElseOp(unittest.TestCase): optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9) optimizer.minimize(avg_loss, startup_prog) - train_reader = paddle.batch(paddle.reader.shuffle( - paddle.dataset.mnist.train(), buf_size=8192), - batch_size=200) + train_reader = paddle.batch( + paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=8192), + batch_size=200, + ) place = core.CPUPlace() exe = Executor(place) @@ -141,12 +139,9 @@ class TestMNISTIfElseOp(unittest.TestCase): y_data = np.array([x[1] for x in data]).astype("int64") y_data = y_data.reshape((y_data.shape[0], 1)) - outs = exe.run(prog, - feed={ - 'x': x_data, - 'y': y_data - }, - fetch_list=[avg_loss]) + outs = exe.run( + prog, feed={'x': x_data, 'y': y_data}, fetch_list=[avg_loss] + ) print(outs[0]) if outs[0] < 1.0: return @@ -154,7 +149,6 @@ class TestMNISTIfElseOp(unittest.TestCase): class TestIfElse(unittest.TestCase): - def set_test_case(self): # condiction is: self.data < self.cond_value self.cond_value = 0.5 @@ -174,9 +168,9 @@ class TestIfElse(unittest.TestCase): startup_prog = Program() with program_guard(prog, startup_prog): src = layers.data(name='data', shape=[1], dtype='float32') - cond = layers.fill_constant([1], - dtype='float32', - value=self.cond_value) + cond = layers.fill_constant( + [1], dtype='float32', value=self.cond_value + ) ifcond = layers.less_than(x=src, y=cond) ie = layers.IfElse(ifcond) with ie.true_block(): @@ -194,9 +188,11 @@ class TestIfElse(unittest.TestCase): exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) fetch_list = [out] - o1, = exe.run(fluid.default_main_program(), - feed={'data': self.data}, - fetch_list=[out]) + (o1,) = exe.run( + fluid.default_main_program(), + feed={'data': self.data}, + fetch_list=[out], + ) o2 = self.numpy_cal() np.testing.assert_allclose( @@ -216,31 +212,28 @@ class TestIfElse(unittest.TestCase): class TestIfElseTrueBranch(TestIfElse): - def set_test_case(self): # condiction is: self.data < self.cond_value - self.cond_value = 10. + self.cond_value = 10.0 self.data = np.random.rand(25, 1).astype(np.float32) class TestIfElseFalseBranch(TestIfElse): - def set_test_case(self): # condiction is: self.data < self.cond_value - self.cond_value = -10. + self.cond_value = -10.0 self.data = np.random.rand(25, 1).astype(np.float32) class TestIfElseError(unittest.TestCase): - def test_input_type_error(self): main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): src = layers.data(name='data', shape=[1], dtype='float32') - const_value = layers.fill_constant([1], - dtype='float32', - value=123.0) + const_value = layers.fill_constant( + [1], dtype='float32', value=123.0 + ) ifcond = layers.less_than(x=src, y=const_value) with self.assertRaises(TypeError): ie = layers.IfElse(set()) diff --git a/python/paddle/fluid/tests/test_lod_tensor.py b/python/paddle/fluid/tests/test_lod_tensor.py index 4650c3ec2dfacc6c52ed5f38f677468a191fa4f0..b84a11dbdc674e0ef7b1431d863f52294e3e6269 100644 --- a/python/paddle/fluid/tests/test_lod_tensor.py +++ b/python/paddle/fluid/tests/test_lod_tensor.py @@ -14,28 +14,33 @@ import paddle.fluid as fluid import paddle.fluid.core as core -from paddle.fluid.lod_tensor import create_lod_tensor, create_random_int_lodtensor +from paddle.fluid.lod_tensor import ( + create_lod_tensor, + create_random_int_lodtensor, +) import numpy as np import unittest class TestLoDTensor(unittest.TestCase): - def test_pybind_recursive_seq_lens(self): tensor = fluid.LoDTensor() recursive_seq_lens = [] tensor.set_recursive_sequence_lengths(recursive_seq_lens) recursive_seq_lens = [[], [1], [3]] - self.assertRaises(Exception, tensor.set_recursive_sequence_lengths, - recursive_seq_lens) + self.assertRaises( + Exception, tensor.set_recursive_sequence_lengths, recursive_seq_lens + ) recursive_seq_lens = [[0], [2], [3]] - self.assertRaises(Exception, tensor.set_recursive_sequence_lengths, - recursive_seq_lens) + self.assertRaises( + Exception, tensor.set_recursive_sequence_lengths, recursive_seq_lens + ) recursive_seq_lens = [[1, 2, 3]] tensor.set_recursive_sequence_lengths(recursive_seq_lens) - self.assertEqual(tensor.recursive_sequence_lengths(), - recursive_seq_lens) + self.assertEqual( + tensor.recursive_sequence_lengths(), recursive_seq_lens + ) tensor.set(np.random.random([6, 1]), fluid.CPUPlace()) self.assertTrue(tensor.has_valid_recursive_sequence_lengths()) tensor.set(np.random.random([9, 1]), fluid.CPUPlace()) @@ -45,8 +50,9 @@ class TestLoDTensor(unittest.TestCase): # Moreover, last level's sum should be equal to the tensor height recursive_seq_lens = [[2, 3], [1, 3, 1, 2, 2]] tensor.set_recursive_sequence_lengths(recursive_seq_lens) - self.assertEqual(tensor.recursive_sequence_lengths(), - recursive_seq_lens) + self.assertEqual( + tensor.recursive_sequence_lengths(), recursive_seq_lens + ) tensor.set(np.random.random([8, 1]), fluid.CPUPlace()) self.assertFalse(tensor.has_valid_recursive_sequence_lengths()) recursive_seq_lens = [[2, 3], [1, 3, 1, 2, 1]] @@ -57,40 +63,54 @@ class TestLoDTensor(unittest.TestCase): def test_create_lod_tensor(self): # Create LoDTensor from a list - data = [[np.int64(1), np.int64(2), - np.int64(3)], [np.int64(3), np.int64(4)]] + data = [ + [np.int64(1), np.int64(2), np.int64(3)], + [np.int64(3), np.int64(4)], + ] wrong_recursive_seq_lens = [[2, 2]] correct_recursive_seq_lens = [[3, 2]] - self.assertRaises(AssertionError, create_lod_tensor, data, - wrong_recursive_seq_lens, fluid.CPUPlace()) - tensor = create_lod_tensor(data, correct_recursive_seq_lens, - fluid.CPUPlace()) - self.assertEqual(tensor.recursive_sequence_lengths(), - correct_recursive_seq_lens) + self.assertRaises( + AssertionError, + create_lod_tensor, + data, + wrong_recursive_seq_lens, + fluid.CPUPlace(), + ) + tensor = create_lod_tensor( + data, correct_recursive_seq_lens, fluid.CPUPlace() + ) + self.assertEqual( + tensor.recursive_sequence_lengths(), correct_recursive_seq_lens + ) self.assertEqual(tensor._dtype(), core.VarDesc.VarType.INT64) self.assertEqual(tensor.shape(), [5, 1]) np.testing.assert_array_equal( np.array(tensor), - np.array([1, 2, 3, 3, 4]).reshape(tensor.shape()).astype('int64')) + np.array([1, 2, 3, 3, 4]).reshape(tensor.shape()).astype('int64'), + ) # Create LoDTensor from numpy array data = np.random.random([10, 1]).astype('float64') recursive_seq_lens = [[2, 1], [3, 3, 4]] tensor = create_lod_tensor(data, recursive_seq_lens, fluid.CPUPlace()) - self.assertEqual(tensor.recursive_sequence_lengths(), - recursive_seq_lens) + self.assertEqual( + tensor.recursive_sequence_lengths(), recursive_seq_lens + ) self.assertEqual(tensor._dtype(), core.VarDesc.VarType.FP64) self.assertEqual(tensor.shape(), [10, 1]) np.testing.assert_array_equal(np.array(tensor), data) # Create LoDTensor from another LoDTensor, they are differnt instances new_recursive_seq_lens = [[2, 2, 1], [1, 2, 2, 3, 2]] - new_tensor = create_lod_tensor(tensor, new_recursive_seq_lens, - fluid.CPUPlace()) - self.assertEqual(tensor.recursive_sequence_lengths(), - recursive_seq_lens) - self.assertEqual(new_tensor.recursive_sequence_lengths(), - new_recursive_seq_lens) + new_tensor = create_lod_tensor( + tensor, new_recursive_seq_lens, fluid.CPUPlace() + ) + self.assertEqual( + tensor.recursive_sequence_lengths(), recursive_seq_lens + ) + self.assertEqual( + new_tensor.recursive_sequence_lengths(), new_recursive_seq_lens + ) def test_create_random_int_lodtensor(self): # The shape of a word, commonly used in speech and NLP problem, is [1] @@ -99,10 +119,12 @@ class TestLoDTensor(unittest.TestCase): dict_size = 10000 low = 0 high = dict_size - 1 - tensor = create_random_int_lodtensor(recursive_seq_lens, shape, - fluid.CPUPlace(), low, high) - self.assertEqual(tensor.recursive_sequence_lengths(), - recursive_seq_lens) + tensor = create_random_int_lodtensor( + recursive_seq_lens, shape, fluid.CPUPlace(), low, high + ) + self.assertEqual( + tensor.recursive_sequence_lengths(), recursive_seq_lens + ) self.assertEqual(tensor.shape(), [10, 1]) def test_print_lodtensor(self): @@ -111,43 +133,53 @@ class TestLoDTensor(unittest.TestCase): dict_size = 100 low = 0 high = dict_size - 1 - tensor = create_random_int_lodtensor(recursive_seq_lens, shape, - fluid.CPUPlace(), low, high) + tensor = create_random_int_lodtensor( + recursive_seq_lens, shape, fluid.CPUPlace(), low, high + ) print(tensor) self.assertTrue(isinstance(str(tensor), str)) if core.is_compiled_with_cuda(): - gtensor = create_random_int_lodtensor(recursive_seq_lens, shape, - fluid.CUDAPlace(0), low, high) + gtensor = create_random_int_lodtensor( + recursive_seq_lens, shape, fluid.CUDAPlace(0), low, high + ) print(gtensor) self.assertTrue(isinstance(str(gtensor), str)) def test_dlpack_support(self): tensor = fluid.create_lod_tensor( - np.array([[1], [2], [3], [4]]).astype('int'), [[1, 3]], - fluid.CPUPlace()) + np.array([[1], [2], [3], [4]]).astype('int'), + [[1, 3]], + fluid.CPUPlace(), + ) dltensor = tensor._to_dlpack() tensor_from_dlpack = fluid.core.from_dlpack(dltensor) self.assertTrue(isinstance(tensor_from_dlpack, fluid.core.Tensor)) np.testing.assert_array_equal( np.array(tensor_from_dlpack), - np.array([[1], [2], [3], [4]]).astype('int')) + np.array([[1], [2], [3], [4]]).astype('int'), + ) # when build with cuda if core.is_compiled_with_cuda(): gtensor = fluid.create_lod_tensor( - np.array([[1], [2], [3], [4]]).astype('int'), [[1, 3]], - fluid.CUDAPlace(0)) + np.array([[1], [2], [3], [4]]).astype('int'), + [[1, 3]], + fluid.CUDAPlace(0), + ) gdltensor = gtensor._to_dlpack() gtensor_from_dlpack = fluid.core.from_dlpack(gdltensor) self.assertTrue(isinstance(gtensor_from_dlpack, fluid.core.Tensor)) np.testing.assert_array_equal( np.array(gtensor_from_dlpack), - np.array([[1], [2], [3], [4]]).astype('int')) + np.array([[1], [2], [3], [4]]).astype('int'), + ) def test_as_type(self): tensor = fluid.create_lod_tensor( - np.array([[1], [2], [3], [4]]).astype('int'), [[1, 3]], - fluid.CPUPlace()) + np.array([[1], [2], [3], [4]]).astype('int'), + [[1, 3]], + fluid.CPUPlace(), + ) fp32_tensor = tensor._as_type(core.VarDesc.VarType.FP32) print(fp32_tensor) diff --git a/python/paddle/fluid/tests/test_python_operator_overriding.py b/python/paddle/fluid/tests/test_python_operator_overriding.py index 7fded67c9bac31af29a04abf892fd87e2cf94e1e..562b8cef5ff6aa58d507839dc3b986102b104d1c 100644 --- a/python/paddle/fluid/tests/test_python_operator_overriding.py +++ b/python/paddle/fluid/tests/test_python_operator_overriding.py @@ -25,7 +25,6 @@ paddle.enable_static() class TestPythonOperatorOverride(unittest.TestCase): - def check_result(self, fn, place, dtype): shape = [9, 10] @@ -33,27 +32,22 @@ class TestPythonOperatorOverride(unittest.TestCase): y_data = np.random.random(size=shape).astype(dtype) python_out = fn(x_data, y_data) - x_var = layers.create_global_var(name='x', - shape=shape, - value=0.0, - dtype=dtype, - persistable=True) - y_var = layers.create_global_var(name='y', - shape=shape, - value=0.0, - dtype=dtype, - persistable=True) + x_var = layers.create_global_var( + name='x', shape=shape, value=0.0, dtype=dtype, persistable=True + ) + y_var = layers.create_global_var( + name='y', shape=shape, value=0.0, dtype=dtype, persistable=True + ) out = fn(x_var, y_var) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - fluid_out = exe.run(fluid.default_main_program(), - feed={ - 'x': x_data, - 'y': y_data - }, - fetch_list=[out]) + fluid_out = exe.run( + fluid.default_main_program(), + feed={'x': x_data, 'y': y_data}, + fetch_list=[out], + ) np.testing.assert_array_equal(python_out, fluid_out[0]) @@ -79,8 +73,9 @@ class TestPythonOperatorOverride(unittest.TestCase): for place in places: for dtype in dtypes: for compare_fn in compare_fns: - with framework.program_guard(framework.Program(), - framework.Program()): + with framework.program_guard( + framework.Program(), framework.Program() + ): self.check_result(compare_fn, place, dtype) diff --git a/python/paddle/fluid/tests/test_sequential.py b/python/paddle/fluid/tests/test_sequential.py index 09cfbcdd7e378b53323103eae63792bcedab33fe..7446bb83841aa7aec71690641292af5001b18ce3 100644 --- a/python/paddle/fluid/tests/test_sequential.py +++ b/python/paddle/fluid/tests/test_sequential.py @@ -17,7 +17,6 @@ import paddle class TestDataFeeder(unittest.TestCase): - def test_lod_level_1_converter(self): sequential = paddle.nn.Sequential() diff --git a/python/paddle/fluid/tests/unittests/__init__.py b/python/paddle/fluid/tests/unittests/__init__.py index e427eb512474f8f9b2036fd674a6d53175b2c964..126cbeb4938870659f09b1412125e6d0c868093e 100644 --- a/python/paddle/fluid/tests/unittests/__init__.py +++ b/python/paddle/fluid/tests/unittests/__init__.py @@ -17,8 +17,10 @@ # please refer to https://stackoverflow.com/questions/8953844/import-module-from-subfolder import os + if os.name == 'nt': import sys + dirname, filename = os.path.split(os.path.abspath(__file__)) sys.path.insert(0, dirname) print(sys.path) diff --git a/python/paddle/fluid/tests/unittests/ascend_group.py b/python/paddle/fluid/tests/unittests/ascend_group.py index 32b42a00fc657104d95c421432827454ee925ae0..b0928f409319e61c16d8403c157892cb68a7c387 100644 --- a/python/paddle/fluid/tests/unittests/ascend_group.py +++ b/python/paddle/fluid/tests/unittests/ascend_group.py @@ -35,8 +35,9 @@ role = fleet.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) -def init_communicator(startup_program, main_program, current_endpoint, - endpoints, ring_id): +def init_communicator( + startup_program, main_program, current_endpoint, endpoints, ring_id +): nranks = len(endpoints) other_endpoints = endpoints[:] other_endpoints.remove(current_endpoint) @@ -44,55 +45,64 @@ def init_communicator(startup_program, main_program, current_endpoint, assert group_rank >= 0 block = startup_program.global_block() - nccl_id_var = block.create_var(name=unique_name.generate('nccl_id'), - persistable=True, - type=core.VarDesc.VarType.RAW) - block.append_op(type='c_gen_nccl_id', - inputs={}, - outputs={'Out': nccl_id_var}, - attrs={ - 'rank': group_rank, - 'endpoint': current_endpoint, - 'other_endpoints': other_endpoints, - OP_ROLE_KEY: OpRole.Forward, - }) - block.append_op(type='c_comm_init', - inputs={'X': nccl_id_var}, - outputs={}, - attrs={ - 'nranks': nranks, - 'rank': group_rank, - 'ring_id': ring_id, - OP_ROLE_KEY: OpRole.Forward, - }) + nccl_id_var = block.create_var( + name=unique_name.generate('nccl_id'), + persistable=True, + type=core.VarDesc.VarType.RAW, + ) + block.append_op( + type='c_gen_nccl_id', + inputs={}, + outputs={'Out': nccl_id_var}, + attrs={ + 'rank': group_rank, + 'endpoint': current_endpoint, + 'other_endpoints': other_endpoints, + OP_ROLE_KEY: OpRole.Forward, + }, + ) + block.append_op( + type='c_comm_init', + inputs={'X': nccl_id_var}, + outputs={}, + attrs={ + 'nranks': nranks, + 'rank': group_rank, + 'ring_id': ring_id, + OP_ROLE_KEY: OpRole.Forward, + }, + ) # add input op for test fill_var_name = "tensor@Filled" - fill_var = block.create_var(name=fill_var_name, - shape=[10, 10], - dtype='float32', - persistable=False, - stop_gradient=True) - block.append_op(type="fill_constant", - outputs={"Out": fill_var_name}, - attrs={ - "shape": [10, 10], - "dtype": fill_var.dtype, - "value": 1.0, - "place_type": 1 - }) + fill_var = block.create_var( + name=fill_var_name, + shape=[10, 10], + dtype='float32', + persistable=False, + stop_gradient=True, + ) + block.append_op( + type="fill_constant", + outputs={"Out": fill_var_name}, + attrs={ + "shape": [10, 10], + "dtype": fill_var.dtype, + "value": 1.0, + "place_type": 1, + }, + ) with fluid.program_guard(main_program): op_type = "c_allreduce_sum" data = fluid.layers.fill_constant(shape=[1], dtype='float32', value=2.5) helper = LayerHelper(op_type, **locals()) - helper.append_op(type=op_type, - inputs={'X': [data]}, - outputs={'Out': [data]}, - attrs={ - 'ring_id': ring_id, - 'use_calc_stream': True - }) + helper.append_op( + type=op_type, + inputs={'X': [data]}, + outputs={'Out': [data]}, + attrs={'ring_id': ring_id, 'use_calc_stream': True}, + ) print("startup program:", startup_program) print("main program:", main_program) @@ -102,7 +112,7 @@ def train(world_endpoints, world_device_ids, local_device_ids, local_rank): startup_programs = [] main_programs = [] - #trainer_endpoints=["127.0.0.1:6071","127.0.0.1:6072","127.0.0.1:6073","127.0.0.1:6074"] + # trainer_endpoints=["127.0.0.1:6071","127.0.0.1:6072","127.0.0.1:6073","127.0.0.1:6074"] trainer_endpoints = world_endpoints groups = [[], [], []] groups[0] = [trainer_endpoints[0], trainer_endpoints[1]] @@ -132,10 +142,12 @@ def train(world_endpoints, world_device_ids, local_device_ids, local_rank): main_program = main_programs[local_rank] loss = Loss(Block(main_program)) optimizer = ascend_optimizer.AscendOptimizer(None, fetch_list=[]) - optimizer.minimize(loss, - startup_program, - auto_dp=True, - rank_table_file=os.getenv("RANK_TABLE_FILE", None)) + optimizer.minimize( + loss, + startup_program, + auto_dp=True, + rank_table_file=os.getenv("RANK_TABLE_FILE", None), + ) exe = paddle.static.Executor(paddle.CPUPlace()) exe.run(startup_program) diff --git a/python/paddle/fluid/tests/unittests/ascend_multi_process_collective.py b/python/paddle/fluid/tests/unittests/ascend_multi_process_collective.py index 1853e529d51dc0ad3e055fd62792506cf56d932d..113c4286f352ecbee31692b59e209fae315207d6 100644 --- a/python/paddle/fluid/tests/unittests/ascend_multi_process_collective.py +++ b/python/paddle/fluid/tests/unittests/ascend_multi_process_collective.py @@ -27,12 +27,21 @@ def train(prefix): device_ids = os.getenv("PADDLE_WORLD_DEVICE_IDS") current_device_id = os.getenv("PADDLE_LOCAL_DEVICE_IDS") - details = "selected_accelerators:{} selected_npus:{} worker_endpoints:{} trainers_num:{} current_endpoint:{} trainer_id:{} device_ids:{} device_id:{}"\ - .format(selected_accelerators, selected_npus, worker_endpoints, trainers_num, current_endpoint,trainer_id,device_ids, current_device_id) + details = "selected_accelerators:{} selected_npus:{} worker_endpoints:{} trainers_num:{} current_endpoint:{} trainer_id:{} device_ids:{} device_id:{}".format( + selected_accelerators, + selected_npus, + worker_endpoints, + trainers_num, + current_endpoint, + trainer_id, + device_ids, + current_device_id, + ) print(details) - with open("multi_process_{}.check_{}.log".format(prefix, trainer_id), - "w") as f: + with open( + "multi_process_{}.check_{}.log".format(prefix, trainer_id), "w" + ) as f: f.write(details) diff --git a/python/paddle/fluid/tests/unittests/asp/asp_pruning_base.py b/python/paddle/fluid/tests/unittests/asp/asp_pruning_base.py index 6e9603e6761798e3ea3fa1cf6758362c7b3bfd4e..e21d5f598ebf5fef2a25ebf82d624928fae3c2c8 100644 --- a/python/paddle/fluid/tests/unittests/asp/asp_pruning_base.py +++ b/python/paddle/fluid/tests/unittests/asp/asp_pruning_base.py @@ -24,21 +24,18 @@ paddle.enable_static() class TestASPHelperPruningBase(unittest.TestCase): - def setUp(self): self.main_program = fluid.Program() self.startup_program = fluid.Program() def build_model(): - img = fluid.data(name='img', - shape=[None, 3, 32, 32], - dtype='float32') + img = fluid.data( + name='img', shape=[None, 3, 32, 32], dtype='float32' + ) label = fluid.data(name='label', shape=[None, 1], dtype='int64') - hidden = fluid.layers.conv2d(input=img, - num_filters=4, - filter_size=3, - padding=2, - act="relu") + hidden = fluid.layers.conv2d( + input=img, num_filters=4, filter_size=3, padding=2, act="relu" + ) hidden = fluid.layers.fc(input=hidden, size=32, act='relu') prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') return img, label, prediction @@ -46,23 +43,26 @@ class TestASPHelperPruningBase(unittest.TestCase): with fluid.program_guard(self.main_program, self.startup_program): self.img, self.label, self.predict = build_model() - def run_inference_pruning_test(self, get_mask_gen_func, - get_mask_check_func): + def run_inference_pruning_test( + self, get_mask_gen_func, get_mask_check_func + ): place = paddle.CPUPlace() if core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) exe = fluid.Executor(place) - self.__pruning_and_checking(exe, place, get_mask_gen_func, - get_mask_check_func, False) + self.__pruning_and_checking( + exe, place, get_mask_gen_func, get_mask_check_func, False + ) def run_training_pruning_test(self, get_mask_gen_func, get_mask_check_func): with fluid.program_guard(self.main_program, self.startup_program): loss = paddle.mean( - fluid.layers.cross_entropy(input=self.predict, - label=self.label)) + fluid.layers.cross_entropy(input=self.predict, label=self.label) + ) optimizer = paddle.incubate.asp.decorate( - fluid.optimizer.SGD(learning_rate=0.01)) + fluid.optimizer.SGD(learning_rate=0.01) + ) optimizer.minimize(loss, self.startup_program) place = paddle.CPUPlace() @@ -70,19 +70,24 @@ class TestASPHelperPruningBase(unittest.TestCase): place = paddle.CUDAPlace(0) exe = fluid.Executor(place) - self.__pruning_and_checking(exe, place, get_mask_gen_func, - get_mask_check_func, True) + self.__pruning_and_checking( + exe, place, get_mask_gen_func, get_mask_check_func, True + ) - def __pruning_and_checking(self, exe, place, mask_func_name, - check_func_name, with_mask): + def __pruning_and_checking( + self, exe, place, mask_func_name, check_func_name, with_mask + ): exe.run(self.startup_program) - paddle.incubate.asp.prune_model(self.main_program, - mask_algo=mask_func_name, - with_mask=with_mask) + paddle.incubate.asp.prune_model( + self.main_program, mask_algo=mask_func_name, with_mask=with_mask + ) for param in self.main_program.global_block().all_parameters(): if ASPHelper._is_supported_layer(self.main_program, param.name): - mat = np.array(fluid.global_scope().find_var( - param.name).get_tensor()) + mat = np.array( + fluid.global_scope().find_var(param.name).get_tensor() + ) self.assertTrue( paddle.fluid.contrib.sparsity.check_sparsity( - mat.T, func_name=check_func_name, n=2, m=4)) + mat.T, func_name=check_func_name, n=2, m=4 + ) + ) diff --git a/python/paddle/fluid/tests/unittests/asp/test_asp_customized_pruning.py b/python/paddle/fluid/tests/unittests/asp/test_asp_customized_pruning.py index fde68b39e7b091b2ce3f2ea8d13f51db69d5210c..cdccd1071421d315deb3e816500489b166853e78 100644 --- a/python/paddle/fluid/tests/unittests/asp/test_asp_customized_pruning.py +++ b/python/paddle/fluid/tests/unittests/asp/test_asp_customized_pruning.py @@ -19,12 +19,13 @@ import paddle import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.contrib import sparsity -from paddle.fluid.contrib.sparsity.supported_layer_list import supported_layers_and_prune_func_map +from paddle.fluid.contrib.sparsity.supported_layer_list import ( + supported_layers_and_prune_func_map, +) from paddle.fluid.dygraph.layers import Layer, _convert_camel_to_snake class MyOwnLayer(Layer): - def __init__(self): super(MyOwnLayer, self).__init__() @@ -47,7 +48,6 @@ def my_own_pruning(tensor, m, n, mask_algo, param_name): class TestASPAddSupportedLayer(unittest.TestCase): - def test_add_supported_layer_via_name(self): sparsity.add_supported_layer("test_supported_1") sparsity.add_supported_layer("test_supported_2", my_own_pruning) @@ -55,37 +55,41 @@ class TestASPAddSupportedLayer(unittest.TestCase): my_own_layer_name = _convert_camel_to_snake(MyOwnLayer.__name__) self.assertTrue( - "test_supported_1" in supported_layers_and_prune_func_map) + "test_supported_1" in supported_layers_and_prune_func_map + ) + self.assertTrue( + "test_supported_2" in supported_layers_and_prune_func_map + ) self.assertTrue( - "test_supported_2" in supported_layers_and_prune_func_map) + "test_supported_2" in supported_layers_and_prune_func_map + ) self.assertTrue( - "test_supported_2" in supported_layers_and_prune_func_map) - self.assertTrue(supported_layers_and_prune_func_map["test_supported_2"] - == my_own_pruning) + supported_layers_and_prune_func_map["test_supported_2"] + == my_own_pruning + ) self.assertTrue( - my_own_layer_name in supported_layers_and_prune_func_map) + my_own_layer_name in supported_layers_and_prune_func_map + ) class TestASPDynamicCustomerizedPruneFunc(unittest.TestCase): - def setUp(self): paddle.disable_static() class CustomerLayer(paddle.nn.Layer): - def __init__(self): super(CustomerLayer, self).__init__() - self.weight = self.create_parameter(shape=[32, 32], - attr=None, - dtype='float32', - is_bias=False) + self.weight = self.create_parameter( + shape=[32, 32], attr=None, dtype='float32', is_bias=False + ) self.linear1 = paddle.nn.Linear(32, 32) self.linear2 = paddle.nn.Linear(32, 10) def forward(self, input_): - hidden = paddle.nn.functional.linear(x=input_, - weight=self.weight) + hidden = paddle.nn.functional.linear( + x=input_, weight=self.weight + ) hidden = self.linear1(hidden) out = self.linear2(hidden) return out @@ -93,8 +97,11 @@ class TestASPDynamicCustomerizedPruneFunc(unittest.TestCase): sparsity.add_supported_layer(CustomerLayer, my_own_pruning) self.layer = CustomerLayer() - self.customer_prefix = paddle.fluid.dygraph.layers._convert_camel_to_snake( - CustomerLayer.__name__) + self.customer_prefix = ( + paddle.fluid.dygraph.layers._convert_camel_to_snake( + CustomerLayer.__name__ + ) + ) self.supported_layer_count_ref = 3 def test_inference_pruning(self): @@ -106,23 +113,28 @@ class TestASPDynamicCustomerizedPruneFunc(unittest.TestCase): mat = param.numpy() if sparsity.asp.ASPHelper._is_supported_layer( - paddle.static.default_main_program(), param.name): + paddle.static.default_main_program(), param.name + ): supported_layer_count += 1 - if (self.customer_prefix in param.name): + if self.customer_prefix in param.name: self.assertLessEqual( - np.sum(mat.flatten() - static_tensor.flatten()), 1e-4) + np.sum(mat.flatten() - static_tensor.flatten()), 1e-4 + ) else: self.assertTrue( sparsity.check_sparsity( mat.T, func_name=sparsity.CheckMethod.CHECK_1D, n=2, - m=4)) + m=4, + ) + ) self.assertEqual(supported_layer_count, self.supported_layer_count_ref) def test_training_pruning(self): - optimizer = paddle.optimizer.SGD(learning_rate=0.01, - parameters=self.layer.parameters()) + optimizer = paddle.optimizer.SGD( + learning_rate=0.01, parameters=self.layer.parameters() + ) optimizer = sparsity.decorate(optimizer) sparsity.prune_model(self.layer, mask_algo="mask_1d", with_mask=True) @@ -132,37 +144,49 @@ class TestASPDynamicCustomerizedPruneFunc(unittest.TestCase): mat = param.numpy() if sparsity.asp.ASPHelper._is_supported_layer( - paddle.static.default_main_program(), param.name): + paddle.static.default_main_program(), param.name + ): - mat_mask = sparsity.asp.ASPHelper._get_program_asp_info( - paddle.static.default_main_program()).mask_vars[ - param.name].numpy() + mat_mask = ( + sparsity.asp.ASPHelper._get_program_asp_info( + paddle.static.default_main_program() + ) + .mask_vars[param.name] + .numpy() + ) supported_layer_count += 1 - if (self.customer_prefix in param.name): + if self.customer_prefix in param.name: self.assertLessEqual( - np.sum(mat.flatten() - static_tensor.flatten()), 1e-4) + np.sum(mat.flatten() - static_tensor.flatten()), 1e-4 + ) self.assertLessEqual( - np.sum(mat_mask.flatten() - - static_tensor_mask.flatten()), 1e-4) + np.sum( + mat_mask.flatten() - static_tensor_mask.flatten() + ), + 1e-4, + ) else: self.assertTrue( sparsity.check_sparsity( mat.T, func_name=sparsity.CheckMethod.CHECK_1D, n=2, - m=4)) + m=4, + ) + ) self.assertTrue( sparsity.check_sparsity( mat_mask.T, func_name=sparsity.CheckMethod.CHECK_1D, n=2, - m=4)) + m=4, + ) + ) self.assertEqual(supported_layer_count, self.supported_layer_count_ref) class TestASPStaticCustomerizedPruneFunc(unittest.TestCase): - def setUp(self): paddle.enable_static() @@ -172,23 +196,19 @@ class TestASPStaticCustomerizedPruneFunc(unittest.TestCase): self.customer_prefix = "customer_layer" def build_model(): - img = fluid.data(name='img', - shape=[None, 3, 32, 32], - dtype='float32') + img = fluid.data( + name='img', shape=[None, 3, 32, 32], dtype='float32' + ) label = fluid.data(name='label', shape=[None, 1], dtype='int64') - hidden = fluid.layers.conv2d(input=img, - num_filters=4, - filter_size=3, - padding=2, - act="relu") - hidden = fluid.layers.fc(input=hidden, - size=32, - act='relu', - name=self.customer_prefix) - hidden = fluid.layers.fc(input=hidden, - size=32, - act='relu', - name=self.customer_prefix) + hidden = fluid.layers.conv2d( + input=img, num_filters=4, filter_size=3, padding=2, act="relu" + ) + hidden = fluid.layers.fc( + input=hidden, size=32, act='relu', name=self.customer_prefix + ) + hidden = fluid.layers.fc( + input=hidden, size=32, act='relu', name=self.customer_prefix + ) hidden = fluid.layers.fc(input=hidden, size=32, act='relu') prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') return img, label, prediction @@ -207,89 +227,110 @@ class TestASPStaticCustomerizedPruneFunc(unittest.TestCase): def test_inference_pruning(self): self.exe.run(self.startup_program) - sparsity.prune_model(self.main_program, - mask_algo="mask_1d", - with_mask=False) + sparsity.prune_model( + self.main_program, mask_algo="mask_1d", with_mask=False + ) supported_layer_count = 0 for param in self.main_program.global_block().all_parameters(): - mat = np.array(fluid.global_scope().find_var( - param.name).get_tensor()) + mat = np.array( + fluid.global_scope().find_var(param.name).get_tensor() + ) if sparsity.asp.ASPHelper._is_supported_layer( - self.main_program, param.name): + self.main_program, param.name + ): supported_layer_count += 1 - if (self.customer_prefix in param.name): + if self.customer_prefix in param.name: self.assertLessEqual( - np.sum(mat.flatten() - static_tensor.flatten()), 1e-4) + np.sum(mat.flatten() - static_tensor.flatten()), 1e-4 + ) else: - if (len(param.shape) == 4 - and param.shape[1] < 4) or (len(param.shape) == 2 - and param.shape[0] < 4): + if (len(param.shape) == 4 and param.shape[1] < 4) or ( + len(param.shape) == 2 and param.shape[0] < 4 + ): self.assertFalse( - paddle.fluid.contrib.sparsity.check_sparsity(mat.T, - n=2, - m=4)) + paddle.fluid.contrib.sparsity.check_sparsity( + mat.T, n=2, m=4 + ) + ) else: self.assertTrue( sparsity.check_sparsity( mat.T, func_name=sparsity.CheckMethod.CHECK_1D, n=2, - m=4)) + m=4, + ) + ) self.assertEqual(supported_layer_count, self.supported_layer_count_ref) def test_training_pruning(self): with fluid.program_guard(self.main_program, self.startup_program): loss = paddle.mean( - fluid.layers.cross_entropy(input=self.predict, - label=self.label)) + fluid.layers.cross_entropy(input=self.predict, label=self.label) + ) optimizer = sparsity.decorate( - fluid.optimizer.SGD(learning_rate=0.01)) + fluid.optimizer.SGD(learning_rate=0.01) + ) optimizer.minimize(loss, self.startup_program) self.exe.run(self.startup_program) - sparsity.prune_model(self.main_program, - mask_algo="mask_1d", - with_mask=True) + sparsity.prune_model( + self.main_program, mask_algo="mask_1d", with_mask=True + ) supported_layer_count = 0 for param in self.main_program.global_block().all_parameters(): - mat = np.array(fluid.global_scope().find_var( - param.name).get_tensor()) + mat = np.array( + fluid.global_scope().find_var(param.name).get_tensor() + ) if sparsity.asp.ASPHelper._is_supported_layer( - self.main_program, param.name): - mat_mask = np.array(fluid.global_scope().find_var( - sparsity.asp.ASPHelper._get_mask_name( - param.name)).get_tensor()) + self.main_program, param.name + ): + mat_mask = np.array( + fluid.global_scope() + .find_var(sparsity.asp.ASPHelper._get_mask_name(param.name)) + .get_tensor() + ) supported_layer_count += 1 - if (self.customer_prefix in param.name): + if self.customer_prefix in param.name: self.assertLessEqual( - np.sum(mat.flatten() - static_tensor.flatten()), 1e-4) + np.sum(mat.flatten() - static_tensor.flatten()), 1e-4 + ) self.assertLessEqual( - np.sum(mat_mask.flatten() - - static_tensor_mask.flatten()), 1e-4) + np.sum( + mat_mask.flatten() - static_tensor_mask.flatten() + ), + 1e-4, + ) else: - if (len(param.shape) == 4 - and param.shape[1] < 4) or (len(param.shape) == 2 - and param.shape[0] < 4): + if (len(param.shape) == 4 and param.shape[1] < 4) or ( + len(param.shape) == 2 and param.shape[0] < 4 + ): self.assertFalse( - sparsity.check_sparsity(mat.T, n=2, m=4)) + sparsity.check_sparsity(mat.T, n=2, m=4) + ) self.assertFalse( - sparsity.check_sparsity(mat_mask.T, n=2, m=4)) + sparsity.check_sparsity(mat_mask.T, n=2, m=4) + ) else: self.assertTrue( sparsity.check_sparsity( mat.T, func_name=sparsity.CheckMethod.CHECK_1D, n=2, - m=4)) + m=4, + ) + ) self.assertTrue( sparsity.check_sparsity( mat_mask.T, func_name=sparsity.CheckMethod.CHECK_1D, n=2, - m=4)) + m=4, + ) + ) self.assertEqual(supported_layer_count, self.supported_layer_count_ref) diff --git a/python/paddle/fluid/tests/unittests/asp/test_asp_optimize_dynamic.py b/python/paddle/fluid/tests/unittests/asp/test_asp_optimize_dynamic.py index a501ed427947495373ecad7dc0af02a079b00133..389645139c5783fbdba816043d78bbbc9cb07784 100644 --- a/python/paddle/fluid/tests/unittests/asp/test_asp_optimize_dynamic.py +++ b/python/paddle/fluid/tests/unittests/asp/test_asp_optimize_dynamic.py @@ -21,13 +21,11 @@ import numpy as np class MyLayer(paddle.nn.Layer): - def __init__(self): super(MyLayer, self).__init__() - self.conv1 = paddle.nn.Conv2D(in_channels=3, - out_channels=2, - kernel_size=3, - padding=2) + self.conv1 = paddle.nn.Conv2D( + in_channels=3, out_channels=2, kernel_size=3, padding=2 + ) self.linear1 = paddle.nn.Linear(1352, 32) self.linear2 = paddle.nn.Linear(32, 32) self.linear3 = paddle.nn.Linear(32, 10) @@ -42,7 +40,6 @@ class MyLayer(paddle.nn.Layer): class TestASPDynamicOptimize(unittest.TestCase): - def setUp(self): self.layer = MyLayer() @@ -52,41 +49,84 @@ class TestASPDynamicOptimize(unittest.TestCase): self.place = paddle.CUDAPlace(0) self.optimizer = paddle.optimizer.SGD( - learning_rate=0.01, parameters=self.layer.parameters()) + learning_rate=0.01, parameters=self.layer.parameters() + ) def test_is_supported_layers(self): program = paddle.static.default_main_program() names = [ - 'embedding_0.w_0', 'fack_layer_0.w_0', 'conv2d_0.w_0', - 'conv2d_0.b_0', 'conv2d_1.w_0', 'conv2d_1.b_0', 'fc_0.w_0', - 'fc_0.b_0', 'fc_1.w_0', 'fc_1.b_0', 'linear_2.w_0', 'linear_2.b_0' + 'embedding_0.w_0', + 'fack_layer_0.w_0', + 'conv2d_0.w_0', + 'conv2d_0.b_0', + 'conv2d_1.w_0', + 'conv2d_1.b_0', + 'fc_0.w_0', + 'fc_0.b_0', + 'fc_1.w_0', + 'fc_1.b_0', + 'linear_2.w_0', + 'linear_2.b_0', ] ref = [ - False, False, True, False, True, False, True, False, True, False, - True, False + False, + False, + True, + False, + True, + False, + True, + False, + True, + False, + True, + False, ] for i, name in enumerate(names): self.assertTrue( - ref[i] == ASPHelper._is_supported_layer(program, name)) + ref[i] == ASPHelper._is_supported_layer(program, name) + ) paddle.incubate.asp.set_excluded_layers(['fc_1', 'conv2d_0']) ref = [ - False, False, False, False, True, False, True, False, False, False, - True, False + False, + False, + False, + False, + True, + False, + True, + False, + False, + False, + True, + False, ] for i, name in enumerate(names): self.assertTrue( - ref[i] == ASPHelper._is_supported_layer(program, name)) + ref[i] == ASPHelper._is_supported_layer(program, name) + ) paddle.incubate.asp.reset_excluded_layers() ref = [ - False, False, True, False, True, False, True, False, True, False, - True, False + False, + False, + True, + False, + True, + False, + True, + False, + True, + False, + True, + False, ] for i, name in enumerate(names): self.assertTrue( - ref[i] == ASPHelper._is_supported_layer(program, name)) + ref[i] == ASPHelper._is_supported_layer(program, name) + ) def test_decorate(self): param_names = [param.name for param in self.layer.parameters()] @@ -96,7 +136,8 @@ class TestASPDynamicOptimize(unittest.TestCase): for name in param_names: mask_var = ASPHelper._get_program_asp_info(program).mask_vars.get( - name, None) + name, None + ) if ASPHelper._is_supported_layer(program, name): self.assertTrue(mask_var is not None) else: @@ -107,14 +148,18 @@ class TestASPDynamicOptimize(unittest.TestCase): paddle.incubate.asp.prune_model(self.layer) - imgs = paddle.to_tensor(np.random.randn(32, 3, 24, 24), - dtype='float32', - place=self.place, - stop_gradient=False) - labels = paddle.to_tensor(np.random.randint(10, size=(32, 1)), - dtype='float32', - place=self.place, - stop_gradient=False) + imgs = paddle.to_tensor( + np.random.randn(32, 3, 24, 24), + dtype='float32', + place=self.place, + stop_gradient=False, + ) + labels = paddle.to_tensor( + np.random.randint(10, size=(32, 1)), + dtype='float32', + place=self.place, + stop_gradient=False, + ) loss_fn = paddle.nn.MSELoss(reduction='mean') @@ -126,34 +171,41 @@ class TestASPDynamicOptimize(unittest.TestCase): for param in self.layer.parameters(): if ASPHelper._is_supported_layer( - paddle.static.default_main_program(), param.name): + paddle.static.default_main_program(), param.name + ): mat = param.numpy() - if (len(param.shape) == 4 - and param.shape[1] < 4) or (len(param.shape) == 2 - and param.shape[0] < 4): + if (len(param.shape) == 4 and param.shape[1] < 4) or ( + len(param.shape) == 2 and param.shape[0] < 4 + ): self.assertFalse( - paddle.fluid.contrib.sparsity.check_sparsity(mat.T, - n=2, - m=4)) + paddle.fluid.contrib.sparsity.check_sparsity( + mat.T, n=2, m=4 + ) + ) else: self.assertTrue( - paddle.fluid.contrib.sparsity.check_sparsity(mat.T, - n=2, - m=4)) + paddle.fluid.contrib.sparsity.check_sparsity( + mat.T, n=2, m=4 + ) + ) def test_asp_training_with_amp(self): self.optimizer = paddle.incubate.asp.decorate(self.optimizer) paddle.incubate.asp.prune_model(self.layer) - imgs = paddle.to_tensor(np.random.randn(32, 3, 24, 24), - dtype='float32', - place=self.place, - stop_gradient=False) - labels = paddle.to_tensor(np.random.randint(10, size=(32, 1)), - dtype='float32', - place=self.place, - stop_gradient=False) + imgs = paddle.to_tensor( + np.random.randn(32, 3, 24, 24), + dtype='float32', + place=self.place, + stop_gradient=False, + ) + labels = paddle.to_tensor( + np.random.randint(10, size=(32, 1)), + dtype='float32', + place=self.place, + stop_gradient=False, + ) loss_fn = paddle.nn.MSELoss(reduction='mean') scaler = paddle.amp.GradScaler(init_loss_scaling=1024) @@ -168,21 +220,24 @@ class TestASPDynamicOptimize(unittest.TestCase): for param in self.layer.parameters(): if ASPHelper._is_supported_layer( - paddle.static.default_main_program(), param.name): + paddle.static.default_main_program(), param.name + ): mat = param.numpy() - if (len(param.shape) == 4 - and param.shape[1] < 4) or (len(param.shape) == 2 - and param.shape[0] < 4): + if (len(param.shape) == 4 and param.shape[1] < 4) or ( + len(param.shape) == 2 and param.shape[0] < 4 + ): self.assertFalse( - paddle.fluid.contrib.sparsity.check_sparsity(mat.T, - n=2, - m=4)) + paddle.fluid.contrib.sparsity.check_sparsity( + mat.T, n=2, m=4 + ) + ) else: self.assertTrue( - paddle.fluid.contrib.sparsity.check_sparsity(mat.T, - n=2, - m=4)) + paddle.fluid.contrib.sparsity.check_sparsity( + mat.T, n=2, m=4 + ) + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/asp/test_asp_optimize_static.py b/python/paddle/fluid/tests/unittests/asp/test_asp_optimize_static.py index b689a40f88fa731d4b86bcfdbecb0b3c459fa32c..b64763ebe7c93bd9e187f2953768bfe78678ba8e 100644 --- a/python/paddle/fluid/tests/unittests/asp/test_asp_optimize_static.py +++ b/python/paddle/fluid/tests/unittests/asp/test_asp_optimize_static.py @@ -24,21 +24,18 @@ paddle.enable_static() class TestASPStaticOptimize(unittest.TestCase): - def setUp(self): self.main_program = fluid.Program() self.startup_program = fluid.Program() def build_model(): - img = fluid.data(name='img', - shape=[None, 3, 24, 24], - dtype='float32') + img = fluid.data( + name='img', shape=[None, 3, 24, 24], dtype='float32' + ) label = fluid.data(name='label', shape=[None, 1], dtype='int64') - hidden = fluid.layers.conv2d(input=img, - num_filters=4, - filter_size=3, - padding=2, - act="relu") + hidden = fluid.layers.conv2d( + input=img, num_filters=4, filter_size=3, padding=2, act="relu" + ) hidden = fluid.layers.fc(input=hidden, size=32, act='relu') prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') return img, label, prediction @@ -46,11 +43,11 @@ class TestASPStaticOptimize(unittest.TestCase): with fluid.program_guard(self.main_program, self.startup_program): self.img, self.label, predict = build_model() self.loss = paddle.mean( - fluid.layers.cross_entropy(input=predict, label=self.label)) + fluid.layers.cross_entropy(input=predict, label=self.label) + ) self.optimizer = fluid.optimizer.SGD(learning_rate=0.01) def test_get_not_ASP_relevant_vars(self): - def check_params(params, params_from_asp): if len(params_from_asp) != len(params): return False @@ -62,61 +59,112 @@ class TestASPStaticOptimize(unittest.TestCase): params = self.main_program.global_block().all_parameters() params_from_asp = ASPHelper._get_not_ASP_relevant_vars( - self.main_program) + self.main_program + ) self.assertTrue(check_params(params, params_from_asp)) with fluid.program_guard(self.main_program, self.startup_program): - ASPHelper._minimize(self.optimizer, self.loss, self.main_program, - self.startup_program) + ASPHelper._minimize( + self.optimizer, + self.loss, + self.main_program, + self.startup_program, + ) params_from_asp_after_opt = ASPHelper._get_not_ASP_relevant_vars( - self.main_program) + self.main_program + ) self.assertTrue(check_params(params, params_from_asp_after_opt)) def test_is_supported_layers(self): program = paddle.static.default_main_program() names = [ - 'embedding_0.w_0', 'fack_layer_0.w_0', 'conv2d_0.w_0', - 'conv2d_0.b_0', 'conv2d_1.w_0', 'conv2d_1.b_0', 'fc_0.w_0', - 'fc_0.b_0', 'fc_1.w_0', 'fc_1.b_0', 'linear_2.w_0', 'linear_2.b_0' + 'embedding_0.w_0', + 'fack_layer_0.w_0', + 'conv2d_0.w_0', + 'conv2d_0.b_0', + 'conv2d_1.w_0', + 'conv2d_1.b_0', + 'fc_0.w_0', + 'fc_0.b_0', + 'fc_1.w_0', + 'fc_1.b_0', + 'linear_2.w_0', + 'linear_2.b_0', ] ref = [ - False, False, True, False, True, False, True, False, True, False, - True, False + False, + False, + True, + False, + True, + False, + True, + False, + True, + False, + True, + False, ] for i, name in enumerate(names): self.assertTrue( - ref[i] == ASPHelper._is_supported_layer(program, name)) + ref[i] == ASPHelper._is_supported_layer(program, name) + ) paddle.incubate.asp.set_excluded_layers(['fc_1', 'conv2d_0'], program) ref = [ - False, False, False, False, True, False, True, False, False, False, - True, False + False, + False, + False, + False, + True, + False, + True, + False, + False, + False, + True, + False, ] for i, name in enumerate(names): self.assertTrue( - ref[i] == ASPHelper._is_supported_layer(program, name)) + ref[i] == ASPHelper._is_supported_layer(program, name) + ) paddle.incubate.asp.reset_excluded_layers(program) ref = [ - False, False, True, False, True, False, True, False, True, False, - True, False + False, + False, + True, + False, + True, + False, + True, + False, + True, + False, + True, + False, ] for i, name in enumerate(names): self.assertTrue( - ref[i] == ASPHelper._is_supported_layer(program, name)) + ref[i] == ASPHelper._is_supported_layer(program, name) + ) def test_decorate(self): param_names = self.__get_param_names( - self.main_program.global_block().all_parameters()) + self.main_program.global_block().all_parameters() + ) with fluid.program_guard(self.main_program, self.startup_program): self.optimizer = paddle.incubate.asp.decorate(self.optimizer) self.optimizer.minimize(self.loss, self.startup_program) param_names_after_minimize = self.__get_param_names( - self.main_program.global_block().all_parameters()) + self.main_program.global_block().all_parameters() + ) - self.__check_mask_variables_and_ops(param_names, - param_names_after_minimize) + self.__check_mask_variables_and_ops( + param_names, param_names_after_minimize + ) def test_asp_training(self): with fluid.program_guard(self.main_program, self.startup_program): @@ -132,63 +180,77 @@ class TestASPStaticOptimize(unittest.TestCase): exe.run(self.startup_program) paddle.incubate.asp.prune_model(self.main_program) - data = (np.random.randn(32, 3, 24, - 24), np.random.randint(10, size=(32, 1))) + data = ( + np.random.randn(32, 3, 24, 24), + np.random.randint(10, size=(32, 1)), + ) exe.run(self.main_program, feed=feeder.feed([data])) for param in self.main_program.global_block().all_parameters(): if ASPHelper._is_supported_layer(self.main_program, param.name): - mat = np.array(fluid.global_scope().find_var( - param.name).get_tensor()) - if (len(param.shape) == 4 - and param.shape[1] < 4) or (len(param.shape) == 2 - and param.shape[0] < 4): + mat = np.array( + fluid.global_scope().find_var(param.name).get_tensor() + ) + if (len(param.shape) == 4 and param.shape[1] < 4) or ( + len(param.shape) == 2 and param.shape[0] < 4 + ): self.assertFalse( - paddle.fluid.contrib.sparsity.check_sparsity(mat.T, - n=2, - m=4)) + paddle.fluid.contrib.sparsity.check_sparsity( + mat.T, n=2, m=4 + ) + ) else: self.assertTrue( - paddle.fluid.contrib.sparsity.check_sparsity(mat.T, - n=2, - m=4)) + paddle.fluid.contrib.sparsity.check_sparsity( + mat.T, n=2, m=4 + ) + ) def test_asp_training_with_amp(self): if core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) with fluid.program_guard(self.main_program, self.startup_program): - self.optimizer = fluid.contrib.mixed_precision.decorator.decorate( - self.optimizer) + self.optimizer = ( + fluid.contrib.mixed_precision.decorator.decorate( + self.optimizer + ) + ) self.optimizer = paddle.incubate.asp.decorate(self.optimizer) self.optimizer.minimize(self.loss, self.startup_program) exe = fluid.Executor(place) - feeder = fluid.DataFeeder(feed_list=[self.img, self.label], - place=place) + feeder = fluid.DataFeeder( + feed_list=[self.img, self.label], place=place + ) exe.run(self.startup_program) paddle.incubate.asp.prune_model(self.main_program) - data = (np.random.randn(32, 3, 24, - 24), np.random.randint(10, size=(32, 1))) + data = ( + np.random.randn(32, 3, 24, 24), + np.random.randint(10, size=(32, 1)), + ) exe.run(self.main_program, feed=feeder.feed([data])) for param in self.main_program.global_block().all_parameters(): if ASPHelper._is_supported_layer(self.main_program, param.name): - mat = np.array(fluid.global_scope().find_var( - param.name).get_tensor()) - if (len(param.shape) == 4 - and param.shape[1] < 4) or (len(param.shape) == 2 - and param.shape[0] < 4): + mat = np.array( + fluid.global_scope().find_var(param.name).get_tensor() + ) + if (len(param.shape) == 4 and param.shape[1] < 4) or ( + len(param.shape) == 2 and param.shape[0] < 4 + ): self.assertFalse( - paddle.fluid.contrib.sparsity.check_sparsity(mat.T, - n=2, - m=4)) + paddle.fluid.contrib.sparsity.check_sparsity( + mat.T, n=2, m=4 + ) + ) else: self.assertTrue( - paddle.fluid.contrib.sparsity.check_sparsity(mat.T, - n=2, - m=4)) + paddle.fluid.contrib.sparsity.check_sparsity( + mat.T, n=2, m=4 + ) + ) def __get_param_names(self, params): param_names = [] @@ -196,11 +258,15 @@ class TestASPStaticOptimize(unittest.TestCase): param_names.append(p.name) return param_names - def __check_mask_variables_and_ops(self, param_names, - param_names_after_minimize): + def __check_mask_variables_and_ops( + self, param_names, param_names_after_minimize + ): for n in param_names: - self.assertFalse(ASPHelper._is_supported_layer(self.main_program, n) and \ - ASPHelper._get_mask_name(n) not in param_names_after_minimize) + self.assertFalse( + ASPHelper._is_supported_layer(self.main_program, n) + and ASPHelper._get_mask_name(n) + not in param_names_after_minimize + ) mask_names = [] for n in param_names: @@ -209,8 +275,7 @@ class TestASPStaticOptimize(unittest.TestCase): masking_ops = [] for op in self.main_program.global_block().ops: - if op.type == 'elementwise_mul' and \ - op.input('Y')[0] in mask_names: + if op.type == 'elementwise_mul' and op.input('Y')[0] in mask_names: masking_ops.append(op.input('Y')[0]) self.assertTrue(len(masking_ops) == len(mask_names)) diff --git a/python/paddle/fluid/tests/unittests/asp/test_asp_pruning_dynamic.py b/python/paddle/fluid/tests/unittests/asp/test_asp_pruning_dynamic.py index 74f27d74b210c897dfb9424eba735d934bfa3004..ff86ba9e782224b5a3798db250655f0d5236b0e4 100644 --- a/python/paddle/fluid/tests/unittests/asp/test_asp_pruning_dynamic.py +++ b/python/paddle/fluid/tests/unittests/asp/test_asp_pruning_dynamic.py @@ -22,13 +22,11 @@ from paddle.fluid.contrib.sparsity.asp import ASPHelper class MyLayer(paddle.nn.Layer): - def __init__(self): super(MyLayer, self).__init__() - self.conv1 = paddle.nn.Conv2D(in_channels=3, - out_channels=2, - kernel_size=3, - padding=2) + self.conv1 = paddle.nn.Conv2D( + in_channels=3, out_channels=2, kernel_size=3, padding=2 + ) self.linear1 = paddle.nn.Linear(1352, 32) self.linear2 = paddle.nn.Linear(32, 10) @@ -41,7 +39,6 @@ class MyLayer(paddle.nn.Layer): class TestASPDynamicPruningBase(unittest.TestCase): - def setUp(self): self.layer = MyLayer() @@ -49,72 +46,82 @@ class TestASPDynamicPruningBase(unittest.TestCase): if core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) - self.img = paddle.to_tensor(np.random.uniform(low=-0.5, - high=0.5, - size=(32, 3, 24, 24)), - dtype=np.float32, - place=place, - stop_gradient=False) + self.img = paddle.to_tensor( + np.random.uniform(low=-0.5, high=0.5, size=(32, 3, 24, 24)), + dtype=np.float32, + place=place, + stop_gradient=False, + ) self.set_config() def set_config(self): self.mask_gen_func = 'mask_1d' - self.mask_check_func = paddle.fluid.contrib.sparsity.CheckMethod.CHECK_1D + self.mask_check_func = ( + paddle.fluid.contrib.sparsity.CheckMethod.CHECK_1D + ) def test_inference_pruning(self): self.__pruning_and_checking(False) def test_training_pruning(self): - optimizer = paddle.optimizer.SGD(learning_rate=0.01, - parameters=self.layer.parameters()) + optimizer = paddle.optimizer.SGD( + learning_rate=0.01, parameters=self.layer.parameters() + ) optimizer = paddle.incubate.asp.decorate(optimizer) self.__pruning_and_checking(True) def __pruning_and_checking(self, with_mask): - paddle.incubate.asp.prune_model(self.layer, - mask_algo=self.mask_gen_func, - with_mask=with_mask) + paddle.incubate.asp.prune_model( + self.layer, mask_algo=self.mask_gen_func, with_mask=with_mask + ) for param in self.layer.parameters(): if ASPHelper._is_supported_layer( - paddle.static.default_main_program(), param.name): + paddle.static.default_main_program(), param.name + ): mat = param.numpy() - if (len(param.shape) == 4 - and param.shape[1] < 4) or (len(param.shape) == 2 - and param.shape[0] < 4): + if (len(param.shape) == 4 and param.shape[1] < 4) or ( + len(param.shape) == 2 and param.shape[0] < 4 + ): self.assertFalse( - paddle.fluid.contrib.sparsity.check_sparsity(mat.T, - n=2, - m=4)) + paddle.fluid.contrib.sparsity.check_sparsity( + mat.T, n=2, m=4 + ) + ) else: self.assertTrue( paddle.fluid.contrib.sparsity.check_sparsity( - mat.T, func_name=self.mask_check_func, n=2, m=4)) + mat.T, func_name=self.mask_check_func, n=2, m=4 + ) + ) class TestASPDynamicPruning1D(TestASPDynamicPruningBase): - def set_config(self): self.mask_gen_func = 'mask_1d' - self.mask_check_func = paddle.fluid.contrib.sparsity.CheckMethod.CHECK_1D + self.mask_check_func = ( + paddle.fluid.contrib.sparsity.CheckMethod.CHECK_1D + ) class TestASPDynamicPruning2DBest(TestASPDynamicPruningBase): - def set_config(self): self.mask_gen_func = 'mask_2d_best' - self.mask_check_func = paddle.fluid.contrib.sparsity.CheckMethod.CHECK_2D + self.mask_check_func = ( + paddle.fluid.contrib.sparsity.CheckMethod.CHECK_2D + ) class TestASPDynamicPruning2DGreedy(TestASPDynamicPruningBase): - def set_config(self): self.mask_gen_func = 'mask_2d_greedy' - self.mask_check_func = paddle.fluid.contrib.sparsity.CheckMethod.CHECK_2D + self.mask_check_func = ( + paddle.fluid.contrib.sparsity.CheckMethod.CHECK_2D + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/asp/test_asp_pruning_static.py b/python/paddle/fluid/tests/unittests/asp/test_asp_pruning_static.py index 084fbeef2d3dd0ab3a5e0a0539518e3ae4a067fb..f312797f43a873de7261cf563ce63b9d83f02568 100644 --- a/python/paddle/fluid/tests/unittests/asp/test_asp_pruning_static.py +++ b/python/paddle/fluid/tests/unittests/asp/test_asp_pruning_static.py @@ -24,21 +24,18 @@ paddle.enable_static() class TestASPStaticPruningBase(unittest.TestCase): - def setUp(self): self.main_program = fluid.Program() self.startup_program = fluid.Program() def build_model(): - img = fluid.data(name='img', - shape=[None, 3, 24, 24], - dtype='float32') + img = fluid.data( + name='img', shape=[None, 3, 24, 24], dtype='float32' + ) label = fluid.data(name='label', shape=[None, 1], dtype='int64') - hidden = fluid.layers.conv2d(input=img, - num_filters=2, - filter_size=3, - padding=2, - act="relu") + hidden = fluid.layers.conv2d( + input=img, num_filters=2, filter_size=3, padding=2, act="relu" + ) hidden = fluid.layers.fc(input=hidden, size=32, act='softmax') hidden = fluid.layers.fc(input=hidden, size=3, act='softmax') prediction = fluid.layers.fc(input=hidden, size=3, act='softmax') @@ -51,7 +48,9 @@ class TestASPStaticPruningBase(unittest.TestCase): def set_config(self): self.mask_gen_func = 'mask_1d' - self.mask_check_func = paddle.fluid.contrib.sparsity.CheckMethod.CHECK_1D + self.mask_check_func = ( + paddle.fluid.contrib.sparsity.CheckMethod.CHECK_1D + ) def test_inference_pruning(self): place = paddle.CPUPlace() @@ -64,10 +63,11 @@ class TestASPStaticPruningBase(unittest.TestCase): def test_training_pruning(self): with fluid.program_guard(self.main_program, self.startup_program): loss = paddle.mean( - fluid.layers.cross_entropy(input=self.predict, - label=self.label)) + fluid.layers.cross_entropy(input=self.predict, label=self.label) + ) optimizer = paddle.incubate.asp.decorate( - fluid.optimizer.SGD(learning_rate=0.01)) + fluid.optimizer.SGD(learning_rate=0.01) + ) optimizer.minimize(loss, self.startup_program) place = paddle.CPUPlace() @@ -79,45 +79,52 @@ class TestASPStaticPruningBase(unittest.TestCase): def __pruning_and_checking(self, exe, place, with_mask): exe.run(self.startup_program) - paddle.incubate.asp.prune_model(self.main_program, - mask_algo=self.mask_gen_func, - with_mask=with_mask) + paddle.incubate.asp.prune_model( + self.main_program, mask_algo=self.mask_gen_func, with_mask=with_mask + ) for param in self.main_program.global_block().all_parameters(): if ASPHelper._is_supported_layer(self.main_program, param.name): - mat = np.array(fluid.global_scope().find_var( - param.name).get_tensor()) - if (len(param.shape) == 4 - and param.shape[1] < 4) or (len(param.shape) == 2 - and param.shape[0] < 4): + mat = np.array( + fluid.global_scope().find_var(param.name).get_tensor() + ) + if (len(param.shape) == 4 and param.shape[1] < 4) or ( + len(param.shape) == 2 and param.shape[0] < 4 + ): self.assertFalse( - paddle.fluid.contrib.sparsity.check_sparsity(mat.T, - n=2, - m=4)) + paddle.fluid.contrib.sparsity.check_sparsity( + mat.T, n=2, m=4 + ) + ) else: self.assertTrue( paddle.fluid.contrib.sparsity.check_sparsity( - mat.T, func_name=self.mask_check_func, n=2, m=4)) + mat.T, func_name=self.mask_check_func, n=2, m=4 + ) + ) class TestASPStaticPruning1D(TestASPStaticPruningBase): - def set_config(self): self.mask_gen_func = 'mask_1d' - self.mask_check_func = paddle.fluid.contrib.sparsity.CheckMethod.CHECK_1D + self.mask_check_func = ( + paddle.fluid.contrib.sparsity.CheckMethod.CHECK_1D + ) class TestASPStaticPruning2DBest(TestASPStaticPruningBase): - def set_config(self): self.mask_gen_func = 'mask_2d_best' - self.mask_check_func = paddle.fluid.contrib.sparsity.CheckMethod.CHECK_2D + self.mask_check_func = ( + paddle.fluid.contrib.sparsity.CheckMethod.CHECK_2D + ) class TestASPStaticPruning2DGreedy(TestASPStaticPruningBase): - def set_config(self): self.mask_gen_func = 'mask_2d_greedy' - self.mask_check_func = paddle.fluid.contrib.sparsity.CheckMethod.CHECK_2D + self.mask_check_func = ( + paddle.fluid.contrib.sparsity.CheckMethod.CHECK_2D + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/asp/test_asp_save_load.py b/python/paddle/fluid/tests/unittests/asp/test_asp_save_load.py index c059703f0cc0339e5c11077878d5eecfa17b2647..4347371c311d28cb0ecb9c9dd56bf3e8cbfc9048 100644 --- a/python/paddle/fluid/tests/unittests/asp/test_asp_save_load.py +++ b/python/paddle/fluid/tests/unittests/asp/test_asp_save_load.py @@ -22,13 +22,11 @@ import numpy as np class MyLayer(paddle.nn.Layer): - def __init__(self): super(MyLayer, self).__init__() - self.conv1 = paddle.nn.Conv2D(in_channels=3, - out_channels=4, - kernel_size=3, - padding=2) + self.conv1 = paddle.nn.Conv2D( + in_channels=3, out_channels=4, kernel_size=3, padding=2 + ) self.linear1 = paddle.nn.Linear(4624, 32) self.linear2 = paddle.nn.Linear(32, 32) self.linear3 = paddle.nn.Linear(32, 10) @@ -43,7 +41,6 @@ class MyLayer(paddle.nn.Layer): class TestASPDynamicOptimize(unittest.TestCase): - def setUp(self): paddle.disable_static() @@ -54,7 +51,8 @@ class TestASPDynamicOptimize(unittest.TestCase): self.place = paddle.CUDAPlace(0) self.optimizer = paddle.optimizer.SGD( - learning_rate=0.01, parameters=self.layer.parameters()) + learning_rate=0.01, parameters=self.layer.parameters() + ) self.optimizer = paddle.incubate.asp.decorate(self.optimizer) paddle.incubate.asp.prune_model(self.layer) @@ -67,11 +65,13 @@ class TestASPDynamicOptimize(unittest.TestCase): paddle.save(self.optimizer.state_dict(), opt_path) asp_info = ASPHelper._get_program_asp_info( - paddle.static.default_main_program()) + paddle.static.default_main_program() + ) for param_name in asp_info.mask_vars: mask = asp_info.mask_vars[param_name] asp_info.update_mask_vars( - param_name, paddle.ones(shape=mask.shape, dtype=mask.dtype)) + param_name, paddle.ones(shape=mask.shape, dtype=mask.dtype) + ) asp_info.update_masks(param_name, np.ones(shape=mask.shape)) net_state_dict = paddle.load(net_path) @@ -80,14 +80,18 @@ class TestASPDynamicOptimize(unittest.TestCase): self.layer.set_state_dict(net_state_dict) self.optimizer.set_state_dict(opt_state_dict) - imgs = paddle.to_tensor(np.random.randn(64, 3, 32, 32), - dtype='float32', - place=self.place, - stop_gradient=False) - labels = paddle.to_tensor(np.random.randint(10, size=(64, 1)), - dtype='float32', - place=self.place, - stop_gradient=False) + imgs = paddle.to_tensor( + np.random.randn(64, 3, 32, 32), + dtype='float32', + place=self.place, + stop_gradient=False, + ) + labels = paddle.to_tensor( + np.random.randint(10, size=(64, 1)), + dtype='float32', + place=self.place, + stop_gradient=False, + ) loss_fn = paddle.nn.MSELoss(reduction='mean') @@ -99,24 +103,26 @@ class TestASPDynamicOptimize(unittest.TestCase): for param in self.layer.parameters(): if ASPHelper._is_supported_layer( - paddle.static.default_main_program(), param.name): + paddle.static.default_main_program(), param.name + ): mat = param.numpy() - if (len(param.shape) == 4 - and param.shape[1] < 4) or (len(param.shape) == 2 - and param.shape[0] < 4): + if (len(param.shape) == 4 and param.shape[1] < 4) or ( + len(param.shape) == 2 and param.shape[0] < 4 + ): self.assertFalse( - paddle.fluid.contrib.sparsity.check_sparsity(mat.T, - n=2, - m=4)) + paddle.fluid.contrib.sparsity.check_sparsity( + mat.T, n=2, m=4 + ) + ) else: self.assertTrue( - paddle.fluid.contrib.sparsity.check_sparsity(mat.T, - n=2, - m=4)) + paddle.fluid.contrib.sparsity.check_sparsity( + mat.T, n=2, m=4 + ) + ) class TestASPStaticOptimize(unittest.TestCase): - def setUp(self): paddle.enable_static() @@ -124,15 +130,13 @@ class TestASPStaticOptimize(unittest.TestCase): self.startup_program = fluid.Program() def build_model(): - img = fluid.data(name='img', - shape=[None, 3, 32, 32], - dtype='float32') + img = fluid.data( + name='img', shape=[None, 3, 32, 32], dtype='float32' + ) label = fluid.data(name='label', shape=[None, 1], dtype='int64') - hidden = fluid.layers.conv2d(input=img, - num_filters=4, - filter_size=3, - padding=2, - act="relu") + hidden = fluid.layers.conv2d( + input=img, num_filters=4, filter_size=3, padding=2, act="relu" + ) hidden = fluid.layers.fc(input=hidden, size=32, act='relu') prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') return img, label, prediction @@ -140,7 +144,8 @@ class TestASPStaticOptimize(unittest.TestCase): with fluid.program_guard(self.main_program, self.startup_program): self.img, self.label, predict = build_model() self.loss = paddle.mean( - fluid.layers.cross_entropy(input=predict, label=self.label)) + fluid.layers.cross_entropy(input=predict, label=self.label) + ) self.optimizer = fluid.optimizer.SGD(learning_rate=0.01) self.optimizer = paddle.incubate.asp.decorate(self.optimizer) self.optimizer.minimize(self.loss, self.startup_program) @@ -166,29 +171,35 @@ class TestASPStaticOptimize(unittest.TestCase): state_dict = paddle.load(param_path) prog.set_state_dict(state_dict) - feeder = fluid.DataFeeder(feed_list=[self.img, self.label], - place=self.place) + feeder = fluid.DataFeeder( + feed_list=[self.img, self.label], place=self.place + ) - data = (np.random.randn(64, 3, 32, - 32), np.random.randint(10, size=(64, 1))) + data = ( + np.random.randn(64, 3, 32, 32), + np.random.randint(10, size=(64, 1)), + ) self.exe.run(prog, feed=feeder.feed([data])) for param in prog.global_block().all_parameters(): if ASPHelper._is_supported_layer(prog, param.name): - mat = np.array(fluid.global_scope().find_var( - param.name).get_tensor()) - if (len(param.shape) == 4 - and param.shape[1] < 4) or (len(param.shape) == 2 - and param.shape[0] < 4): + mat = np.array( + fluid.global_scope().find_var(param.name).get_tensor() + ) + if (len(param.shape) == 4 and param.shape[1] < 4) or ( + len(param.shape) == 2 and param.shape[0] < 4 + ): self.assertFalse( - paddle.fluid.contrib.sparsity.check_sparsity(mat.T, - n=2, - m=4)) + paddle.fluid.contrib.sparsity.check_sparsity( + mat.T, n=2, m=4 + ) + ) else: self.assertTrue( - paddle.fluid.contrib.sparsity.check_sparsity(mat.T, - n=2, - m=4)) + paddle.fluid.contrib.sparsity.check_sparsity( + mat.T, n=2, m=4 + ) + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/asp/test_asp_utils.py b/python/paddle/fluid/tests/unittests/asp/test_asp_utils.py index a94520662ef05f5068010ad3f1c3773b22495872..a55ba05a75d0ef87c2f2c9a32a683be52a222e27 100644 --- a/python/paddle/fluid/tests/unittests/asp/test_asp_utils.py +++ b/python/paddle/fluid/tests/unittests/asp/test_asp_utils.py @@ -20,33 +20,50 @@ import numpy as np class TestASPUtils(unittest.TestCase): - def test_get_check_method(self): self.assertEqual( paddle.fluid.contrib.sparsity.CheckMethod.get_checking_method( - paddle.fluid.contrib.sparsity.MaskAlgo.MASK_1D), - paddle.fluid.contrib.sparsity.CheckMethod.CHECK_1D) + paddle.fluid.contrib.sparsity.MaskAlgo.MASK_1D + ), + paddle.fluid.contrib.sparsity.CheckMethod.CHECK_1D, + ) self.assertEqual( paddle.fluid.contrib.sparsity.CheckMethod.get_checking_method( - paddle.fluid.contrib.sparsity.MaskAlgo.MASK_2D_GREEDY), - paddle.fluid.contrib.sparsity.CheckMethod.CHECK_2D) + paddle.fluid.contrib.sparsity.MaskAlgo.MASK_2D_GREEDY + ), + paddle.fluid.contrib.sparsity.CheckMethod.CHECK_2D, + ) self.assertEqual( paddle.fluid.contrib.sparsity.CheckMethod.get_checking_method( - paddle.fluid.contrib.sparsity.MaskAlgo.MASK_2D_BEST), - paddle.fluid.contrib.sparsity.CheckMethod.CHECK_2D) + paddle.fluid.contrib.sparsity.MaskAlgo.MASK_2D_BEST + ), + paddle.fluid.contrib.sparsity.CheckMethod.CHECK_2D, + ) def test_density(self): - x = np.array([[1.0, 1.0, 1.0, 0.0, 1.0], [1.0, 1.0, 0.0, 0.0, 1.0], - [1.0, 0.0, 0.0, 0.0, 1.0], [1.0, 1.0, 0.0, 0.0, 1.0], - [0.0, 1.0, 0.0, 0.0, 1.0]]) + x = np.array( + [ + [1.0, 1.0, 1.0, 0.0, 1.0], + [1.0, 1.0, 0.0, 0.0, 1.0], + [1.0, 0.0, 0.0, 0.0, 1.0], + [1.0, 1.0, 0.0, 0.0, 1.0], + [0.0, 1.0, 0.0, 0.0, 1.0], + ] + ) self.assertEqual(paddle.incubate.asp.calculate_density(x), 0.56) x[:, 0] = 0.0 self.assertEqual(paddle.incubate.asp.calculate_density(x), 0.4) def test_check_mask_1d(self): - x = np.array([[1.0, 0.0, 0.0, 1.0, 1.0], [1.0, 1.0, 0.0, 0.0, 1.0], - [1.0, 1.0, 0.0, 0.0, 1.0], [1.0, 1.0, 0.0, 0.0, 1.0], - [0.0, 1.0, 0.0, 0.0, 1.0]]) + x = np.array( + [ + [1.0, 0.0, 0.0, 1.0, 1.0], + [1.0, 1.0, 0.0, 0.0, 1.0], + [1.0, 1.0, 0.0, 0.0, 1.0], + [1.0, 1.0, 0.0, 0.0, 1.0], + [0.0, 1.0, 0.0, 0.0, 1.0], + ] + ) self.assertTrue(paddle.fluid.contrib.sparsity.check_mask_1d(x, 2, 4)) self.assertFalse(paddle.fluid.contrib.sparsity.check_mask_1d(x, 3, 4)) self.assertTrue(paddle.fluid.contrib.sparsity.check_mask_1d(x, 2, 5)) @@ -58,18 +75,26 @@ class TestASPUtils(unittest.TestCase): for _ in range(10): x = np.random.randint(10, size=(5, 5)) x = paddle.fluid.contrib.sparsity.get_mask_1d(x, 2, 4) - self.assertTrue(paddle.fluid.contrib.sparsity.check_mask_1d( - x, 2, 4)) + self.assertTrue( + paddle.fluid.contrib.sparsity.check_mask_1d(x, 2, 4) + ) x = np.random.randn(5, 4) x = paddle.fluid.contrib.sparsity.get_mask_1d(x, 2, 4) - self.assertTrue(paddle.fluid.contrib.sparsity.check_mask_1d( - x, 2, 4)) + self.assertTrue( + paddle.fluid.contrib.sparsity.check_mask_1d(x, 2, 4) + ) def test_check_mask_2d(self): - x = np.array([[1.0, 0.0, 0.0, 1.0, 1.0], [0.0, 1.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 1.0, 0.0, 1.0], [1.0, 1.0, 0.0, 0.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 1.0]]) + x = np.array( + [ + [1.0, 0.0, 0.0, 1.0, 1.0], + [0.0, 1.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 1.0, 0.0, 1.0], + [1.0, 1.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 1.0], + ] + ) self.assertTrue(paddle.fluid.contrib.sparsity.check_mask_2d(x, 2, 4)) self.assertFalse(paddle.fluid.contrib.sparsity.check_mask_2d(x, 3, 4)) self.assertTrue(paddle.fluid.contrib.sparsity.check_mask_2d(x, 2, 5)) @@ -81,28 +106,31 @@ class TestASPUtils(unittest.TestCase): for _ in range(10): x = np.random.randint(10, size=(5, 5)) x = paddle.fluid.contrib.sparsity.get_mask_2d_greedy(x, 2, 4) - self.assertTrue(paddle.fluid.contrib.sparsity.check_mask_2d( - x, 2, 4)) + self.assertTrue( + paddle.fluid.contrib.sparsity.check_mask_2d(x, 2, 4) + ) x = np.random.randn(5, 4) x = paddle.fluid.contrib.sparsity.get_mask_2d_greedy(x, 2, 4) - self.assertTrue(paddle.fluid.contrib.sparsity.check_mask_2d( - x, 2, 4)) + self.assertTrue( + paddle.fluid.contrib.sparsity.check_mask_2d(x, 2, 4) + ) def test_get_mask_2d_best(self): for _ in range(10): x = np.random.randint(10, size=(5, 5)) x = paddle.fluid.contrib.sparsity.get_mask_2d_best(x, 2, 4) - self.assertTrue(paddle.fluid.contrib.sparsity.check_mask_2d( - x, 2, 4)) + self.assertTrue( + paddle.fluid.contrib.sparsity.check_mask_2d(x, 2, 4) + ) x = np.random.randn(5, 4) x = paddle.fluid.contrib.sparsity.get_mask_2d_best(x, 2, 4) - self.assertTrue(paddle.fluid.contrib.sparsity.check_mask_2d( - x, 2, 4)) + self.assertTrue( + paddle.fluid.contrib.sparsity.check_mask_2d(x, 2, 4) + ) def test_threadsafe_valid_2d_patterns(self): - def get_reference(m=4, n=2): from itertools import permutations @@ -112,17 +140,20 @@ class TestASPUtils(unittest.TestCase): patterns = patterns + patterns patterns = np.asarray(list(set(permutations(patterns, m)))) - valid = ((patterns.sum(axis=1) <= n).sum( - axis=1) == m).nonzero()[0].reshape(-1) + valid = ( + ((patterns.sum(axis=1) <= n).sum(axis=1) == m) + .nonzero()[0] + .reshape(-1) + ) valid_patterns = np.empty((valid.shape[0], m, m)) valid_patterns[:] = patterns[valid[:]] return valid_patterns for _ in range(4): computing_thread = threading.Thread( - target=paddle.fluid.contrib.sparsity.utils. - _compute_valid_2d_patterns, - args=(2, 4)) + target=paddle.fluid.contrib.sparsity.utils._compute_valid_2d_patterns, + args=(2, 4), + ) computing_thread.start() time.sleep(3) patterns_map = paddle.fluid.contrib.sparsity.utils._valid_2d_patterns @@ -132,7 +163,8 @@ class TestASPUtils(unittest.TestCase): self.assertTrue(reference_key in patterns_map) self.assertTrue(len(patterns_map) == 1) self.assertTrue( - (reference_patterns == patterns_map[reference_key]).all()) + (reference_patterns == patterns_map[reference_key]).all() + ) def test_check_sparsity(self): for _ in range(10): @@ -173,49 +205,64 @@ class TestASPUtils(unittest.TestCase): mask, func_name=paddle.fluid.contrib.sparsity.CheckMethod.CHECK_1D, n=2, - m=4), paddle.fluid.contrib.sparsity.check_mask_1d(mask, 2, 4)) + m=4, + ), + paddle.fluid.contrib.sparsity.check_mask_1d(mask, 2, 4), + ) mask = paddle.fluid.contrib.sparsity.get_mask_2d_best(x_2d, 2, 4) self.assertEqual( paddle.fluid.contrib.sparsity.check_sparsity( mask, func_name=paddle.fluid.contrib.sparsity.CheckMethod.CHECK_2D, n=2, - m=4), paddle.fluid.contrib.sparsity.check_mask_2d(mask, 2, 4)) + m=4, + ), + paddle.fluid.contrib.sparsity.check_mask_2d(mask, 2, 4), + ) def __test_1D_2D_sparse_mask_generation_methods(self, x): mask = paddle.fluid.contrib.sparsity.create_mask( x, func_name=paddle.fluid.contrib.sparsity.MaskAlgo.MASK_1D, n=2, - m=4) + m=4, + ) self.assertTrue( paddle.fluid.contrib.sparsity.check_sparsity( mask, func_name=paddle.fluid.contrib.sparsity.CheckMethod.CHECK_1D, n=2, - m=4)) + m=4, + ) + ) mask = paddle.fluid.contrib.sparsity.create_mask( x, func_name=paddle.fluid.contrib.sparsity.MaskAlgo.MASK_2D_GREEDY, n=2, - m=4) + m=4, + ) self.assertTrue( paddle.fluid.contrib.sparsity.check_sparsity( mask, func_name=paddle.fluid.contrib.sparsity.CheckMethod.CHECK_2D, n=2, - m=4)) + m=4, + ) + ) mask = paddle.fluid.contrib.sparsity.create_mask( x, func_name=paddle.fluid.contrib.sparsity.MaskAlgo.MASK_2D_BEST, n=2, - m=4) + m=4, + ) self.assertTrue( paddle.fluid.contrib.sparsity.check_sparsity( mask, func_name=paddle.fluid.contrib.sparsity.CheckMethod.CHECK_2D, n=2, - m=4)) + m=4, + ) + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/asp/test_fleet_with_asp_dynamic.py b/python/paddle/fluid/tests/unittests/asp/test_fleet_with_asp_dynamic.py index d132b319f569af41d141a85b0b6c36e357fe7a7a..a2116283a333d163041b12052dd0487d5b4db35f 100644 --- a/python/paddle/fluid/tests/unittests/asp/test_fleet_with_asp_dynamic.py +++ b/python/paddle/fluid/tests/unittests/asp/test_fleet_with_asp_dynamic.py @@ -29,7 +29,6 @@ else: class MyLayer(paddle.nn.Layer): - def __init__(self): super(MyLayer, self).__init__() self.linear1 = paddle.nn.Linear(32, 32) @@ -42,7 +41,6 @@ class MyLayer(paddle.nn.Layer): class TestFleetWithASPDynamic(unittest.TestCase): - def setUp(self): os.environ["PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36213" os.environ["PADDLE_CURRENT_ENDPOINTS"] = "127.0.0.1:36213" @@ -56,7 +54,8 @@ class TestFleetWithASPDynamic(unittest.TestCase): self.place = paddle.CUDAPlace(0) self.optimizer = paddle.optimizer.SGD( - learning_rate=0.01, parameters=self.layer.parameters()) + learning_rate=0.01, parameters=self.layer.parameters() + ) def test_with_asp(self): fleet.init(is_collective=True) @@ -67,14 +66,18 @@ class TestFleetWithASPDynamic(unittest.TestCase): self.optimizer = fleet.distributed_optimizer(self.optimizer) self.layer = fleet.distributed_model(self.layer) - imgs = paddle.to_tensor(np.random.randn(64, 32), - dtype='float32', - place=self.place, - stop_gradient=False) - labels = paddle.to_tensor(np.random.randint(10, size=(64, 1)), - dtype='float32', - place=self.place, - stop_gradient=False) + imgs = paddle.to_tensor( + np.random.randn(64, 32), + dtype='float32', + place=self.place, + stop_gradient=False, + ) + labels = paddle.to_tensor( + np.random.randint(10, size=(64, 1)), + dtype='float32', + place=self.place, + stop_gradient=False, + ) loss_fn = paddle.nn.MSELoss(reduction='mean') @@ -86,24 +89,26 @@ class TestFleetWithASPDynamic(unittest.TestCase): for param in self.layer.parameters(): if ASPHelper._is_supported_layer( - paddle.static.default_main_program(), param.name): + paddle.static.default_main_program(), param.name + ): mat = param.numpy() - if (len(param.shape) == 4 - and param.shape[1] < 4) or (len(param.shape) == 2 - and param.shape[0] < 4): + if (len(param.shape) == 4 and param.shape[1] < 4) or ( + len(param.shape) == 2 and param.shape[0] < 4 + ): self.assertFalse( - paddle.fluid.contrib.sparsity.check_sparsity(mat.T, - n=2, - m=4)) + paddle.fluid.contrib.sparsity.check_sparsity( + mat.T, n=2, m=4 + ) + ) else: self.assertTrue( - paddle.fluid.contrib.sparsity.check_sparsity(mat.T, - n=2, - m=4)) + paddle.fluid.contrib.sparsity.check_sparsity( + mat.T, n=2, m=4 + ) + ) class TestFleetWithASPAMPDynamic(unittest.TestCase): - def setUp(self): os.environ["PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36213" os.environ["PADDLE_CURRENT_ENDPOINTS"] = "127.0.0.1:36213" @@ -117,7 +122,8 @@ class TestFleetWithASPAMPDynamic(unittest.TestCase): self.place = paddle.CUDAPlace(0) self.optimizer = paddle.optimizer.SGD( - learning_rate=0.01, parameters=self.layer.parameters()) + learning_rate=0.01, parameters=self.layer.parameters() + ) def test_with_asp(self): fleet.init(is_collective=True) @@ -128,14 +134,18 @@ class TestFleetWithASPAMPDynamic(unittest.TestCase): self.optimizer = fleet.distributed_optimizer(self.optimizer) self.layer = fleet.distributed_model(self.layer) - imgs = paddle.to_tensor(np.random.randn(64, 32), - dtype='float32', - place=self.place, - stop_gradient=False) - labels = paddle.to_tensor(np.random.randint(10, size=(64, 1)), - dtype='float32', - place=self.place, - stop_gradient=False) + imgs = paddle.to_tensor( + np.random.randn(64, 32), + dtype='float32', + place=self.place, + stop_gradient=False, + ) + labels = paddle.to_tensor( + np.random.randint(10, size=(64, 1)), + dtype='float32', + place=self.place, + stop_gradient=False, + ) loss_fn = paddle.nn.MSELoss(reduction='mean') scaler = paddle.amp.GradScaler(init_loss_scaling=1024) @@ -150,20 +160,23 @@ class TestFleetWithASPAMPDynamic(unittest.TestCase): for param in self.layer.parameters(): if ASPHelper._is_supported_layer( - paddle.static.default_main_program(), param.name): + paddle.static.default_main_program(), param.name + ): mat = param.numpy() - if (len(param.shape) == 4 - and param.shape[1] < 4) or (len(param.shape) == 2 - and param.shape[0] < 4): + if (len(param.shape) == 4 and param.shape[1] < 4) or ( + len(param.shape) == 2 and param.shape[0] < 4 + ): self.assertFalse( - paddle.fluid.contrib.sparsity.check_sparsity(mat.T, - n=2, - m=4)) + paddle.fluid.contrib.sparsity.check_sparsity( + mat.T, n=2, m=4 + ) + ) else: self.assertTrue( - paddle.fluid.contrib.sparsity.check_sparsity(mat.T, - n=2, - m=4)) + paddle.fluid.contrib.sparsity.check_sparsity( + mat.T, n=2, m=4 + ) + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/asp/test_fleet_with_asp_sharding.py b/python/paddle/fluid/tests/unittests/asp/test_fleet_with_asp_sharding.py index e5ff5f839e7be8f8befb456f48ade4f46ac601a6..9e74e10fbc5057d04422c8a821c855fbd9d7a618 100644 --- a/python/paddle/fluid/tests/unittests/asp/test_fleet_with_asp_sharding.py +++ b/python/paddle/fluid/tests/unittests/asp/test_fleet_with_asp_sharding.py @@ -32,7 +32,6 @@ paddle.enable_static() class TestFleetWithASPSharding(unittest.TestCase): - def setUp(self): os.environ["PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36213" os.environ["PADDLE_CURRENT_ENDPOINTS"] = "127.0.0.1:36213" @@ -49,9 +48,9 @@ class TestFleetWithASPSharding(unittest.TestCase): def net(self, main_prog, startup_prog): with fluid.program_guard(main_prog, startup_prog): - input_x = paddle.static.data(name="x", - shape=[-1, 32], - dtype='float32') + input_x = paddle.static.data( + name="x", shape=[-1, 32], dtype='float32' + ) input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') fc_1 = fluid.layers.fc(input=input_x, size=64, act='tanh') @@ -71,7 +70,7 @@ class TestFleetWithASPSharding(unittest.TestCase): "sharding_degree": 8, "mp_degree": 1, "hybrid_dp": False, - "gradient_merge_acc_step": 1 + "gradient_merge_acc_step": 1, } dist_strategy.nccl_comm_num = 1 dist_strategy.asp = True @@ -80,18 +79,21 @@ class TestFleetWithASPSharding(unittest.TestCase): def test_with_asp_sharding(self): fleet.init(is_collective=True) train_prog, startup_prog = fluid.Program(), fluid.Program() - avg_cost, strategy, input_x, input_y = self.net(train_prog, - startup_prog) + avg_cost, strategy, input_x, input_y = self.net( + train_prog, startup_prog + ) with fluid.program_guard(train_prog, startup_prog): optimizer = paddle.fluid.optimizer.SGD(learning_rate=0.01) - optimizer = fleet.distributed_optimizer(optimizer, - strategy=strategy) + optimizer = fleet.distributed_optimizer( + optimizer, strategy=strategy + ) optimizer.minimize(avg_cost) if paddle.fluid.is_compiled_with_cuda(): place = fluid.CUDAPlace( - int(os.environ.get('FLAGS_selected_gpus', 0))) + int(os.environ.get('FLAGS_selected_gpus', 0)) + ) else: place = fluid.CPUPlace() @@ -106,20 +108,23 @@ class TestFleetWithASPSharding(unittest.TestCase): for param in train_prog.global_block().all_parameters(): if ASPHelper._is_supported_layer(train_prog, param.name): - mat = np.array(fluid.global_scope().find_var( - param.name).get_tensor()) - if (len(param.shape) == 4 - and param.shape[1] < 4) or (len(param.shape) == 2 - and param.shape[0] < 4): + mat = np.array( + fluid.global_scope().find_var(param.name).get_tensor() + ) + if (len(param.shape) == 4 and param.shape[1] < 4) or ( + len(param.shape) == 2 and param.shape[0] < 4 + ): self.assertFalse( - paddle.fluid.contrib.sparsity.check_sparsity(mat.T, - n=2, - m=4)) + paddle.fluid.contrib.sparsity.check_sparsity( + mat.T, n=2, m=4 + ) + ) else: self.assertTrue( - paddle.fluid.contrib.sparsity.check_sparsity(mat.T, - n=2, - m=4)) + paddle.fluid.contrib.sparsity.check_sparsity( + mat.T, n=2, m=4 + ) + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/asp/test_fleet_with_asp_static.py b/python/paddle/fluid/tests/unittests/asp/test_fleet_with_asp_static.py index f205610179108f395071684897373759c42705f7..aa4b0208847aa87f1a4ccda55d60ece64872c64a 100644 --- a/python/paddle/fluid/tests/unittests/asp/test_fleet_with_asp_static.py +++ b/python/paddle/fluid/tests/unittests/asp/test_fleet_with_asp_static.py @@ -32,7 +32,6 @@ paddle.enable_static() class TestFleetWithASPStatic(unittest.TestCase): - def setUp(self): os.environ["PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36213" os.environ["PADDLE_CURRENT_ENDPOINTS"] = "127.0.0.1:36213" @@ -41,9 +40,9 @@ class TestFleetWithASPStatic(unittest.TestCase): def net(self, main_prog, startup_prog): with fluid.program_guard(main_prog, startup_prog): - input_x = paddle.static.data(name="x", - shape=[-1, 32], - dtype='float32') + input_x = paddle.static.data( + name="x", shape=[-1, 32], dtype='float32' + ) input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') fc_1 = fluid.layers.fc(input=input_x, size=64, act='tanh') @@ -58,17 +57,22 @@ class TestFleetWithASPStatic(unittest.TestCase): def test_with_asp(self): fleet.init(is_collective=True) train_prog, startup_prog = fluid.Program(), fluid.Program() - avg_cost, strategy, input_x, input_y = self.net(train_prog, - startup_prog) + avg_cost, strategy, input_x, input_y = self.net( + train_prog, startup_prog + ) with fluid.program_guard(train_prog, startup_prog): optimizer = paddle.fluid.optimizer.SGD(learning_rate=0.01) - optimizer = fleet.distributed_optimizer(optimizer, - strategy=strategy) + optimizer = fleet.distributed_optimizer( + optimizer, strategy=strategy + ) optimizer.minimize(avg_cost) - place = fluid.CUDAPlace( - 0) if paddle.fluid.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if paddle.fluid.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) feeder = fluid.DataFeeder(feed_list=[input_x, input_y], place=place) @@ -81,24 +85,26 @@ class TestFleetWithASPStatic(unittest.TestCase): for param in train_prog.global_block().all_parameters(): if ASPHelper._is_supported_layer(train_prog, param.name): - mat = np.array(fluid.global_scope().find_var( - param.name).get_tensor()) - if (len(param.shape) == 4 - and param.shape[1] < 4) or (len(param.shape) == 2 - and param.shape[0] < 4): + mat = np.array( + fluid.global_scope().find_var(param.name).get_tensor() + ) + if (len(param.shape) == 4 and param.shape[1] < 4) or ( + len(param.shape) == 2 and param.shape[0] < 4 + ): self.assertFalse( - paddle.fluid.contrib.sparsity.check_sparsity(mat.T, - n=2, - m=4)) + paddle.fluid.contrib.sparsity.check_sparsity( + mat.T, n=2, m=4 + ) + ) else: self.assertTrue( - paddle.fluid.contrib.sparsity.check_sparsity(mat.T, - n=2, - m=4)) + paddle.fluid.contrib.sparsity.check_sparsity( + mat.T, n=2, m=4 + ) + ) class TestFleetWithASPAMPStatic(unittest.TestCase): - def setUp(self): os.environ["PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36213" os.environ["PADDLE_CURRENT_ENDPOINTS"] = "127.0.0.1:36213" @@ -107,9 +113,9 @@ class TestFleetWithASPAMPStatic(unittest.TestCase): def net(self, main_prog, startup_prog): with fluid.program_guard(main_prog, startup_prog): - input_x = paddle.static.data(name="x", - shape=[-1, 32], - dtype='float32') + input_x = paddle.static.data( + name="x", shape=[-1, 32], dtype='float32' + ) input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') fc_1 = fluid.layers.fc(input=input_x, size=64, act='tanh') @@ -124,18 +130,23 @@ class TestFleetWithASPAMPStatic(unittest.TestCase): def test_with_asp_and_amp(self): fleet.init(is_collective=True) train_prog, startup_prog = fluid.Program(), fluid.Program() - avg_cost, strategy, input_x, input_y = self.net(train_prog, - startup_prog) + avg_cost, strategy, input_x, input_y = self.net( + train_prog, startup_prog + ) strategy.amp = True with fluid.program_guard(train_prog, startup_prog): optimizer = paddle.optimizer.SGD(learning_rate=0.01) - optimizer = fleet.distributed_optimizer(optimizer, - strategy=strategy) + optimizer = fleet.distributed_optimizer( + optimizer, strategy=strategy + ) optimizer.minimize(avg_cost) - place = fluid.CUDAPlace( - 0) if paddle.fluid.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if paddle.fluid.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) feeder = fluid.DataFeeder(feed_list=[input_x, input_y], place=place) @@ -150,41 +161,49 @@ class TestFleetWithASPAMPStatic(unittest.TestCase): for param in train_prog.global_block().all_parameters(): if ASPHelper._is_supported_layer(train_prog, param.name): - mat = np.array(fluid.global_scope().find_var( - param.name).get_tensor()) - if (len(param.shape) == 4 - and param.shape[1] < 4) or (len(param.shape) == 2 - and param.shape[0] < 4): + mat = np.array( + fluid.global_scope().find_var(param.name).get_tensor() + ) + if (len(param.shape) == 4 and param.shape[1] < 4) or ( + len(param.shape) == 2 and param.shape[0] < 4 + ): self.assertFalse( - paddle.fluid.contrib.sparsity.check_sparsity(mat.T, - n=2, - m=4)) + paddle.fluid.contrib.sparsity.check_sparsity( + mat.T, n=2, m=4 + ) + ) else: self.assertTrue( - paddle.fluid.contrib.sparsity.check_sparsity(mat.T, - n=2, - m=4)) + paddle.fluid.contrib.sparsity.check_sparsity( + mat.T, n=2, m=4 + ) + ) def test_with_asp_and_pure_fp16(self): fleet.init(is_collective=True) train_prog, startup_prog = fluid.Program(), fluid.Program() with paddle.static.amp.fp16_guard(): - avg_cost, strategy, \ - input_x, input_y = self.net(train_prog, - startup_prog) + avg_cost, strategy, input_x, input_y = self.net( + train_prog, startup_prog + ) strategy.amp = True strategy.amp_configs = {'use_pure_fp16': True} with fluid.program_guard(train_prog, startup_prog): with paddle.static.amp.fp16_guard(): optimizer = optimizer = paddle.optimizer.Momentum( - learning_rate=0.01, multi_precision=True) - optimizer = fleet.distributed_optimizer(optimizer, - strategy=strategy) + learning_rate=0.01, multi_precision=True + ) + optimizer = fleet.distributed_optimizer( + optimizer, strategy=strategy + ) optimizer.minimize(avg_cost) - place = fluid.CUDAPlace( - 0) if paddle.fluid.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if paddle.fluid.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) feeder = fluid.DataFeeder(feed_list=[input_x, input_y], place=place) @@ -199,20 +218,23 @@ class TestFleetWithASPAMPStatic(unittest.TestCase): for param in train_prog.global_block().all_parameters(): if ASPHelper._is_supported_layer(train_prog, param.name): - mat = np.array(fluid.global_scope().find_var( - param.name).get_tensor()) - if (len(param.shape) == 4 - and param.shape[1] < 4) or (len(param.shape) == 2 - and param.shape[0] < 4): + mat = np.array( + fluid.global_scope().find_var(param.name).get_tensor() + ) + if (len(param.shape) == 4 and param.shape[1] < 4) or ( + len(param.shape) == 2 and param.shape[0] < 4 + ): self.assertFalse( - paddle.fluid.contrib.sparsity.check_sparsity(mat.T, - n=2, - m=4)) + paddle.fluid.contrib.sparsity.check_sparsity( + mat.T, n=2, m=4 + ) + ) else: self.assertTrue( - paddle.fluid.contrib.sparsity.check_sparsity(mat.T, - n=2, - m=4)) + paddle.fluid.contrib.sparsity.check_sparsity( + mat.T, n=2, m=4 + ) + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/auto_checkpoint_utils.py b/python/paddle/fluid/tests/unittests/auto_checkpoint_utils.py index 4303c56675b21396f63d026cb41c396785403df9..a9eba03ae5687deb117128827c3a889bca119d75 100644 --- a/python/paddle/fluid/tests/unittests/auto_checkpoint_utils.py +++ b/python/paddle/fluid/tests/unittests/auto_checkpoint_utils.py @@ -25,7 +25,7 @@ import numpy as np BATCH_NUM = 4 BATCH_SIZE = 1 -#IMAGE_SIZE = 128 +# IMAGE_SIZE = 128 CLASS_NUM = 2 USE_GPU = False # whether use GPU to run model @@ -47,7 +47,6 @@ def get_random_images_and_labels(image_shape, label_shape): def sample_list_generator_creator(): - def __reader__(): for _ in range(BATCH_NUM): sample_list = [] @@ -61,21 +60,17 @@ def sample_list_generator_creator(): class AutoCheckpointBase(unittest.TestCase): - - def _init_env(self, - exe, - main_prog, - startup_prog, - minimize=True, - iterable=True): - + def _init_env( + self, exe, main_prog, startup_prog, minimize=True, iterable=True + ): def simple_net(): image = fluid.data(name='image', shape=[-1, 4, 4], dtype='float32') label = fluid.data(name='label', shape=[-1, 1], dtype='int64') fc_tmp = fluid.layers.fc(image, size=CLASS_NUM) cross_entropy = fluid.layers.softmax_with_cross_entropy( - fc_tmp, label) + fc_tmp, label + ) loss = fluid.layers.reduce_mean(cross_entropy) sgd = fluid.optimizer.SGD(learning_rate=1e-3) if minimize: @@ -87,17 +82,20 @@ class AutoCheckpointBase(unittest.TestCase): if minimize: compiled = fluid.CompiledProgram(main_prog).with_data_parallel( - loss_name=loss.name) + loss_name=loss.name + ) else: compiled = None loader = fluid.io.DataLoader.from_generator( feed_list=[image, label], capacity=64, use_double_buffer=True, - iterable=iterable) + iterable=iterable, + ) - loader.set_sample_list_generator(sample_list_generator_creator(), - places[0]) + loader.set_sample_list_generator( + sample_list_generator_creator(), places[0] + ) if minimize: exe.run(startup_prog) diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/amp_pass_unittest.py b/python/paddle/fluid/tests/unittests/auto_parallel/amp_pass_unittest.py index c00a3367a986dec2873eed89fb0a3e64e0e59842..5d41e1cc484c5f81612d1a6756b6d65243591f2f 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/amp_pass_unittest.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/amp_pass_unittest.py @@ -31,7 +31,9 @@ def apply_pass(use_amp=False, level=None): amp.enable = True amp.custom_white_list = ['softmax', 'layer_norm', 'gelu'] amp.custom_black_list = [ - 'c_softmax_with_cross_entropy', 'elementwise_div', 'reduce_sum' + 'c_softmax_with_cross_entropy', + 'elementwise_div', + 'reduce_sum', ] amp.init_loss_scaling = 32768 amp.use_fp16_guard = False @@ -47,7 +49,6 @@ def reset_prog(): class TestAMPPass(unittest.TestCase): - def setUp(self): self.rtol = 1e-5 self.atol = 1e-8 @@ -82,7 +83,9 @@ class TestAMPPass(unittest.TestCase): rtol=rtol or self.rtol, atol=atol or self.atol, err_msg='pass {} has wrong results!, \nu={}\nv={}\ndiff={}'.format( - __class__, ref_losses, check_losses, ref_losses - check_losses)) + __class__, ref_losses, check_losses, ref_losses - check_losses + ), + ) def test_amp_pass(self): # mp2 training diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/auto_parallel_relaunch_model.py b/python/paddle/fluid/tests/unittests/auto_parallel/auto_parallel_relaunch_model.py index bccf0963ce1030c9393c64a7f781ddc411d320a3..2c03dab50ea51c880232efbc75b973844d231d0f 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/auto_parallel_relaunch_model.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/auto_parallel_relaunch_model.py @@ -36,39 +36,39 @@ def get_random_inputs_and_labels(input_shape, label_shape): def batch_generator_creator(): - def __reader__(): for _ in range(batch_size): batch_input, batch_label = get_random_inputs_and_labels( [batch_size, sequence_len, hidden_size], - [batch_size, sequence_len, 1]) + [batch_size, sequence_len, 1], + ) yield batch_input, batch_label return __reader__ class MLPLayer(nn.Layer): - - def __init__(self, - hidden_size=1024, - intermediate_size=4 * 1024, - dropout_ratio=0.1, - initializer_range=0.02): + def __init__( + self, + hidden_size=1024, + intermediate_size=4 * 1024, + dropout_ratio=0.1, + initializer_range=0.02, + ): super(MLPLayer, self).__init__() d_model = hidden_size dim_feedforward = intermediate_size weight_attr = paddle.ParamAttr( - initializer=nn.initializer.Normal(mean=0.0, std=initializer_range)) + initializer=nn.initializer.Normal(mean=0.0, std=initializer_range) + ) bias_attr = None - self.linear0 = nn.Linear(d_model, - dim_feedforward, - weight_attr, - bias_attr=bias_attr) - self.linear1 = nn.Linear(dim_feedforward, - d_model, - weight_attr, - bias_attr=bias_attr) + self.linear0 = nn.Linear( + d_model, dim_feedforward, weight_attr, bias_attr=bias_attr + ) + self.linear1 = nn.Linear( + dim_feedforward, d_model, weight_attr, bias_attr=bias_attr + ) self.linear2 = nn.Linear(d_model, 1, weight_attr, bias_attr=bias_attr) self.norm = nn.LayerNorm(d_model, epsilon=1e-5) self.dropout = nn.Dropout(dropout_ratio, mode="upscale_in_train") @@ -85,29 +85,34 @@ class MLPLayer(nn.Layer): def mlp_pretrain_forward(train_program, start_program): - with static.program_guard(train_program, - start_program), utils.unique_name.guard(): - input = static.data(name="input", - shape=[batch_size, sequence_len, hidden_size], - dtype='float32') - label = static.data(name="label", - shape=[batch_size, sequence_len, 1], - dtype='float32') + with static.program_guard( + train_program, start_program + ), utils.unique_name.guard(): + input = static.data( + name="input", + shape=[batch_size, sequence_len, hidden_size], + dtype='float32', + ) + label = static.data( + name="label", shape=[batch_size, sequence_len, 1], dtype='float32' + ) auto.shard_tensor(input, _global_process_mesh, [None, None, None]) - mlp = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - dropout_ratio=0.1, - initializer_range=0.02) + mlp = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + dropout_ratio=0.1, + initializer_range=0.02, + ) predict = mlp(input) error_cost = paddle.nn.functional.square_error_cost(predict, label) loss = paddle.mean(error_cost) - loader = paddle.io.DataLoader.from_generator(feed_list=[input, label], - capacity=4 * batch_size, - iterable=True) + loader = paddle.io.DataLoader.from_generator( + feed_list=[input, label], capacity=4 * batch_size, iterable=True + ) return loss, train_program, start_program, loader @@ -125,17 +130,24 @@ def train(): train_program = static.Program() start_program = static.Program() loss, train_program, start_program, loader = mlp_pretrain_forward( - train_program, start_program) + train_program, start_program + ) - optimizer = paddle.fluid.optimizer.AdamOptimizer(learning_rate=0.00001, - beta1=0.9, - beta2=0.999, - epsilon=1e-08, - grad_clip=None) + optimizer = paddle.fluid.optimizer.AdamOptimizer( + learning_rate=0.00001, + beta1=0.9, + beta2=0.999, + epsilon=1e-08, + grad_clip=None, + ) optimizer = fleet.distributed_optimizer(optimizer) - _, _, distributed_startup_program, distributed_main_program = optimizer.minimize( - loss, start_program) + ( + _, + _, + distributed_startup_program, + distributed_main_program, + ) = optimizer.minimize(loss, start_program) places = static.cuda_places() loader.set_batch_generator(batch_generator_creator(), places=places) diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/auto_parallel_relaunch_with_gpt_planner.py b/python/paddle/fluid/tests/unittests/auto_parallel/auto_parallel_relaunch_with_gpt_planner.py index 3634f4258a085792e9f25455fe3debd38238343b..06b5a808cf41db1840caf70be4b558a2c653b94b 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/auto_parallel_relaunch_with_gpt_planner.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/auto_parallel_relaunch_with_gpt_planner.py @@ -21,51 +21,58 @@ import numpy as np sys.path.append("..") import auto_parallel_gpt_model as modeling -from auto_parallel_gpt_model import GPTModel, GPTForPretraining, GPTPretrainingCriterion +from auto_parallel_gpt_model import ( + GPTModel, + GPTForPretraining, + GPTPretrainingCriterion, +) -def get_gpt_model(train_program, start_program, place, batch_size, sequence_len, - vocab_size): +def get_gpt_model( + train_program, start_program, place, batch_size, sequence_len, vocab_size +): modeling.init_global() with static.program_guard(train_program, start_program): - tokens = paddle.static.data(name="tokens", - shape=[batch_size, sequence_len], - dtype='int64') - position_ids = paddle.static.data(name="position_ids", - shape=[batch_size, sequence_len], - dtype='int64') + tokens = paddle.static.data( + name="tokens", shape=[batch_size, sequence_len], dtype='int64' + ) + position_ids = paddle.static.data( + name="position_ids", shape=[batch_size, sequence_len], dtype='int64' + ) attention_mask = paddle.static.data( name="attention_mask", shape=[batch_size, 1, sequence_len, sequence_len], - dtype='float32') - labels = paddle.static.data(name="labels", - shape=[batch_size, sequence_len], - dtype='int64') - loss_mask = paddle.static.data(name="loss_mask", - shape=[batch_size, sequence_len], - dtype='float32') + dtype='float32', + ) + labels = paddle.static.data( + name="labels", shape=[batch_size, sequence_len], dtype='int64' + ) + loss_mask = paddle.static.data( + name="loss_mask", shape=[batch_size, sequence_len], dtype='float32' + ) data_holder = [tokens, position_ids, attention_mask, labels, loss_mask] - gpt = GPTModel(vocab_size=1000, - hidden_size=64, - num_hidden_layers=2, - num_attention_heads=8, - intermediate_size=256, - hidden_act="gelu", - hidden_dropout_prob=0.0, - attention_probs_dropout_prob=0.0, - max_position_embeddings=1024, - type_vocab_size=1, - initializer_range=0.02, - pad_token_id=0, - eos_token_id=7, - bos_token_id=0, - eol_token_id=3) - - model = GPTForPretraining(gpt, - vocab_size=1000, - hidden_size=64, - initializer_range=0.02) + gpt = GPTModel( + vocab_size=1000, + hidden_size=64, + num_hidden_layers=2, + num_attention_heads=8, + intermediate_size=256, + hidden_act="gelu", + hidden_dropout_prob=0.0, + attention_probs_dropout_prob=0.0, + max_position_embeddings=1024, + type_vocab_size=1, + initializer_range=0.02, + pad_token_id=0, + eos_token_id=7, + bos_token_id=0, + eol_token_id=3, + ) + + model = GPTForPretraining( + gpt, vocab_size=1000, hidden_size=64, initializer_range=0.02 + ) preds = model(tokens, position_ids, attention_mask) criterion = GPTPretrainingCriterion() loss = criterion(preds, labels, loss_mask) @@ -102,17 +109,28 @@ def train(): sequence_len = 512 vocab_size = 1000 train_program, start_program, loss, gen_data = get_gpt_model( - train_program, start_program, place, batch_size, sequence_len, - vocab_size) - - optimizer = paddle.fluid.optimizer.AdamOptimizer(learning_rate=0.00001, - beta1=0.9, - beta2=0.999, - epsilon=1e-08, - grad_clip=None) + train_program, + start_program, + place, + batch_size, + sequence_len, + vocab_size, + ) + + optimizer = paddle.fluid.optimizer.AdamOptimizer( + learning_rate=0.00001, + beta1=0.9, + beta2=0.999, + epsilon=1e-08, + grad_clip=None, + ) optimizer = fleet.distributed_optimizer(optimizer) - _, _, distributed_startup_program, distributed_main_program = optimizer.minimize( - loss, start_program) + ( + _, + _, + distributed_startup_program, + distributed_main_program, + ) = optimizer.minimize(loss, start_program) places = static.cuda_places() exe = paddle.static.Executor(places[0]) @@ -121,25 +139,29 @@ def train(): for step in range(10): tokens, position_ids, attention_mask, labels, loss_mask = gen_data() if loss.name in distributed_main_program.global_block().vars: - loss_print, = exe.run(distributed_main_program, - feed={ - "tokens": tokens, - "position_ids": position_ids, - "attention_mask": attention_mask, - "labels": labels, - "loss_mask": loss_mask - }, - fetch_list=[loss]) + (loss_print,) = exe.run( + distributed_main_program, + feed={ + "tokens": tokens, + "position_ids": position_ids, + "attention_mask": attention_mask, + "labels": labels, + "loss_mask": loss_mask, + }, + fetch_list=[loss], + ) print("step: %s, loss: %f" % (step, loss_print[0])) else: - exe.run(distributed_main_program, - feed={ - "tokens": tokens, - "position_ids": position_ids, - "attention_mask": attention_mask, - "labels": labels, - "loss_mask": loss_mask - }) + exe.run( + distributed_main_program, + feed={ + "tokens": tokens, + "position_ids": position_ids, + "attention_mask": attention_mask, + "labels": labels, + "loss_mask": loss_mask, + }, + ) print("step: %s, loss: %s" % (step, "None")) diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/auto_parallel_relaunch_with_planner.py b/python/paddle/fluid/tests/unittests/auto_parallel/auto_parallel_relaunch_with_planner.py index b40a61ed34cb7c4f527dcd0ed992085d874305d8..3111611895a900399247ad16b7d3808740de68e3 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/auto_parallel_relaunch_with_planner.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/auto_parallel_relaunch_with_planner.py @@ -17,12 +17,15 @@ import paddle.static as static from paddle.distributed import fleet from paddle.distributed.auto_parallel.cost import CostEstimator from paddle.distributed.auto_parallel.cluster import Cluster -from paddle.distributed.auto_parallel.dist_context import get_default_distributed_context +from paddle.distributed.auto_parallel.dist_context import ( + get_default_distributed_context, +) def train(): from auto_parallel_relaunch_model import mlp_pretrain_forward from auto_parallel_relaunch_model import batch_generator_creator + dist_strategy = fleet.DistributedStrategy() # init parallel optimizer dist_strategy.auto_search = True @@ -30,17 +33,24 @@ def train(): train_program = static.Program() start_program = static.Program() loss, train_program, start_program, loader = mlp_pretrain_forward( - train_program, start_program) + train_program, start_program + ) - optimizer = paddle.fluid.optimizer.AdamOptimizer(learning_rate=0.00001, - beta1=0.9, - beta2=0.999, - epsilon=1e-08, - grad_clip=None) + optimizer = paddle.fluid.optimizer.AdamOptimizer( + learning_rate=0.00001, + beta1=0.9, + beta2=0.999, + epsilon=1e-08, + grad_clip=None, + ) optimizer = fleet.distributed_optimizer(optimizer) - _, _, distributed_startup_program, distributed_main_program = optimizer.minimize( - loss, start_program) + ( + _, + _, + distributed_startup_program, + distributed_main_program, + ) = optimizer.minimize(loss, start_program) # add cost estimator dist_context = get_default_distributed_context() @@ -51,11 +61,18 @@ def train(): dims_mapping = dist_op.dist_attr.get_input_dims_mapping(var_name) if dims_mapping is None: dist_op.dist_attr.set_input_dims_mapping( - var_name, [ - -1 for i in range( - len(train_program.global_block().vars[var_name]. - shape)) - ]) + var_name, + [ + -1 + for i in range( + len( + train_program.global_block() + .vars[var_name] + .shape + ) + ) + ], + ) cluster.gen_default_config_cluster(device_count=2) cost_estimator = CostEstimator(train_program, cluster) global_cost = cost_estimator.estimate(dist_context) diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/clip_grad_by_global_norm.py b/python/paddle/fluid/tests/unittests/auto_parallel/clip_grad_by_global_norm.py index ba255fba279126f0860a07bb725bcd196c169de2..d25a8bcd4c5f8181c9e86b58efe3e746410d2b35 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/clip_grad_by_global_norm.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/clip_grad_by_global_norm.py @@ -61,7 +61,6 @@ def reset_prog(): class TestGradientClipByGlobalNorm(unittest.TestCase): - def setUp(self): self.batch_size = 2 self.batch_num = 1 @@ -94,9 +93,10 @@ class TestGradientClipByGlobalNorm(unittest.TestCase): sharding_p, rtol=1e-05, atol=1e-08, - err_msg= - 'gradient clip by global norm has wrong results!, \nu={}\nv={}\ndiff={}' - .format(dp_p, sharding_p, dp_p - sharding_p)) + err_msg='gradient clip by global norm has wrong results!, \nu={}\nv={}\ndiff={}'.format( + dp_p, sharding_p, dp_p - sharding_p + ), + ) def test_grad_clip(self): # dp2 training @@ -108,7 +108,8 @@ class TestGradientClipByGlobalNorm(unittest.TestCase): sharding_engine = self.get_engine(True) sharding_engine.fit(self.dataset, 3, batch_size=self.batch_size) sharding_param_values = get_parameter_value( - sharding_engine.main_program) + sharding_engine.main_program + ) self.check_result(dp_param_values, sharding_param_values) diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/converter.py b/python/paddle/fluid/tests/unittests/auto_parallel/converter.py index 291dae6612c7612993e1b525b963c0c9ba2369fd..5e0506c3785db53e869cb0dda622c97997c84b0e 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/converter.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/converter.py @@ -28,21 +28,21 @@ def test_convert(): tensor_name: { "process_shape": [2], "process_group": [0, 1], - "dims_mapping": [-1, -1] + "dims_mapping": [-1, -1], } } row_strategy = { tensor_name: { "process_shape": [2], "process_group": [0, 1], - "dims_mapping": [0, -1] + "dims_mapping": [0, -1], } } col_strategy = { tensor_name: { "process_shape": [2], "process_group": [0, 1], - "dims_mapping": [-1, 0] + "dims_mapping": [-1, 0], } } @@ -70,7 +70,7 @@ def test_convert(): new_name: { "process_shape": [2], "process_group": [0, 1], - "dims_mapping": [0, -1] + "dims_mapping": [0, -1], } } converter = Converter(tensor_dict, col_strategy, row_strategy) @@ -84,14 +84,14 @@ def test_convert(): "tensor_2": { "process_shape": [2], "process_group": [0, 1], - "dims_mapping": [-1, -1] + "dims_mapping": [-1, -1], } } row_strategy = { "tensor_2": { "process_shape": [2], "process_group": [0, 1], - "dims_mapping": [0, -1] + "dims_mapping": [0, -1], } } tensor_dict = {"tensor_2": [complete_tensor]} diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/engine_api.py b/python/paddle/fluid/tests/unittests/auto_parallel/engine_api.py index 38287e98a219a672fa7ba4429c2bdae31a9e800b..c09edf044256652f20d072a9f507a171efa8b650 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/engine_api.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/engine_api.py @@ -45,7 +45,6 @@ my_feed_vars = [] class MyDataset(Dataset): - def __init__(self, num_samples): super(MyDataset, self).__init__() self.num_samples = num_samples @@ -66,38 +65,38 @@ def get_random_inputs_and_labels(image_shape, label_shape): def batch_generator_creator(): - def __reader__(): for _ in range(batch_num): batch_input, batch_label = get_random_inputs_and_labels( - [batch_size, image_size], [batch_size, 1]) + [batch_size, image_size], [batch_size, 1] + ) yield batch_input, batch_label return __reader__ class MLPLayer(nn.Layer): - - def __init__(self, - hidden_size=1024, - intermediate_size=4 * 1024, - dropout_ratio=0.1, - initializer_range=0.02): + def __init__( + self, + hidden_size=1024, + intermediate_size=4 * 1024, + dropout_ratio=0.1, + initializer_range=0.02, + ): super(MLPLayer, self).__init__() d_model = hidden_size dim_feedforward = intermediate_size weight_attr = paddle.ParamAttr( - initializer=nn.initializer.Normal(mean=0.0, std=initializer_range)) + initializer=nn.initializer.Normal(mean=0.0, std=initializer_range) + ) bias_attr = None - self.linear0 = nn.Linear(d_model, - dim_feedforward, - weight_attr, - bias_attr=bias_attr) - self.linear1 = nn.Linear(dim_feedforward, - d_model, - weight_attr, - bias_attr=bias_attr) + self.linear0 = nn.Linear( + d_model, dim_feedforward, weight_attr, bias_attr=bias_attr + ) + self.linear1 = nn.Linear( + dim_feedforward, d_model, weight_attr, bias_attr=bias_attr + ) self.linear2 = nn.Linear(d_model, 1, weight_attr, bias_attr=bias_attr) self.norm = nn.LayerNorm(d_model, epsilon=1e-5) self.dropout = nn.Dropout(dropout_ratio, mode="upscale_in_train") @@ -121,16 +120,20 @@ class MLPLayer(nn.Layer): def train_high_level(fetch): global is_fetch is_fetch = fetch - mlp = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - dropout_ratio=0.1, - initializer_range=0.02) + mlp = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + dropout_ratio=0.1, + initializer_range=0.02, + ) loss = paddle.nn.CrossEntropyLoss() - optimizer = paddle.optimizer.Adam(learning_rate=0.00001, - beta1=0.9, - beta2=0.999, - epsilon=1e-08, - grad_clip=None) + optimizer = paddle.optimizer.Adam( + learning_rate=0.00001, + beta1=0.9, + beta2=0.999, + epsilon=1e-08, + grad_clip=None, + ) metric = paddle.metric.Accuracy() strategy = auto.Strategy() @@ -142,11 +145,13 @@ def train_high_level(fetch): train_dataset = MyDataset(batch_num * batch_size) eval_dataset1 = MyDataset(5 * batch_size) - history = engine.fit(train_data=train_dataset, - epochs=2, - batch_size=batch_size, - valid_data=eval_dataset1, - log_freq=1) + history = engine.fit( + train_data=train_dataset, + epochs=2, + batch_size=batch_size, + valid_data=eval_dataset1, + log_freq=1, + ) # eval eval_dataset2 = MyDataset(batch_size) @@ -165,16 +170,20 @@ def train_high_level(fetch): def train_low_level(): - mlp = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - dropout_ratio=0.1, - initializer_range=0.02) + mlp = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + dropout_ratio=0.1, + initializer_range=0.02, + ) loss = paddle.nn.CrossEntropyLoss() - optimizer = paddle.optimizer.Adam(learning_rate=0.00001, - beta1=0.9, - beta2=0.999, - epsilon=1e-08, - grad_clip=None) + optimizer = paddle.optimizer.Adam( + learning_rate=0.00001, + beta1=0.9, + beta2=0.999, + epsilon=1e-08, + grad_clip=None, + ) metric = paddle.metric.Accuracy() strategy = auto.Strategy() @@ -189,18 +198,18 @@ def train_low_level(): # Build normal normal dataloader # train train_dataset = MyDataset(batch_num * batch_size) - train_dataloader = engine.dataloader(train_dataset, - batch_size=batch_size, - mode="train") + train_dataloader = engine.dataloader( + train_dataset, batch_size=batch_size, mode="train" + ) engine.prepare(mode="train") for data in train_dataloader: outs = engine.run(data, feed=feed_dict, mode="train") # eval eval_dataset2 = MyDataset(batch_size) - eval_dataloader = engine.dataloader(eval_dataset2, - batch_size=batch_size, - mode="eval") + eval_dataloader = engine.dataloader( + eval_dataset2, batch_size=batch_size, mode="eval" + ) engine.prepare(mode="eval") for data in eval_dataloader: outs = engine.run(data, feed=feed_dict, mode="eval") @@ -223,9 +232,9 @@ def train_low_level(): # Build dataloader from generator # train train_dataset = MyDataset(batch_num * batch_size) - train_dataloader = engine.dataloader_from_generator(train_dataset, - batch_size=batch_size, - mode="train") + train_dataloader = engine.dataloader_from_generator( + train_dataset, batch_size=batch_size, mode="train" + ) engine.prepare(mode="train") for data in train_dataloader: outs = engine.run(data, feed=feed_dict, mode="train") @@ -233,17 +242,18 @@ def train_low_level(): # eval engine.to_mode("eval") eval_dataset2 = MyDataset(batch_size) - eval_dataloader = engine.dataloader_from_generator(eval_dataset2, - batch_size=batch_size) + eval_dataloader = engine.dataloader_from_generator( + eval_dataset2, batch_size=batch_size + ) engine.prepare() for data in eval_dataloader: outs = engine.run(data, feed=feed_dict) # predict test_dataset = MyDataset(batch_size) - predict_dataloader = engine.dataloader_from_generator(test_dataset, - batch_size=batch_size, - mode="predict") + predict_dataloader = engine.dataloader_from_generator( + test_dataset, batch_size=batch_size, mode="predict" + ) engine.prepare(mode="predict") for data in predict_dataloader: outs = engine.run(data, feed=feed_dict, mode="predict") @@ -257,16 +267,20 @@ def train_low_level(): def train_builtin_data_vars(): - mlp = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - dropout_ratio=0.1, - initializer_range=0.02) + mlp = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + dropout_ratio=0.1, + initializer_range=0.02, + ) loss = paddle.nn.CrossEntropyLoss() - optimizer = paddle.optimizer.Adam(learning_rate=0.00001, - beta1=0.9, - beta2=0.999, - epsilon=1e-08, - grad_clip=None) + optimizer = paddle.optimizer.Adam( + learning_rate=0.00001, + beta1=0.9, + beta2=0.999, + epsilon=1e-08, + grad_clip=None, + ) metric = paddle.metric.Accuracy() strategy = auto.Strategy() @@ -284,9 +298,9 @@ def train_builtin_data_vars(): with static.program_guard(engine.main_program, engine.startup_program): feed_list = engine.inputs + engine.labels print(feed_list) - loader = paddle.io.DataLoader.from_generator(feed_list=feed_list, - capacity=4 * batch_size, - iterable=False) + loader = paddle.io.DataLoader.from_generator( + feed_list=feed_list, capacity=4 * batch_size, iterable=False + ) places = static.cuda_places() loader.set_batch_generator(batch_generator_creator(), places=places) @@ -297,36 +311,40 @@ def train_builtin_data_vars(): while True: engine.run() except paddle.fluid.core.EOFException: - loader.reset( - ) # call DataLoader.reset() after catching EOFException + loader.reset() # call DataLoader.reset() after catching EOFException def train_non_builtin_data_vars(): main_program = static.Program() startup_program = static.Program() - with static.program_guard(main_program, - startup_program), utils.unique_name.guard(): - input = static.data(name="input", - shape=[batch_size, image_size], - dtype='float32') + with static.program_guard( + main_program, startup_program + ), utils.unique_name.guard(): + input = static.data( + name="input", shape=[batch_size, image_size], dtype='float32' + ) label = static.data(name="label", shape=[batch_size, 1], dtype='int64') - loader = paddle.io.DataLoader.from_generator(feed_list=[input, label], - capacity=4 * batch_size, - iterable=False) + loader = paddle.io.DataLoader.from_generator( + feed_list=[input, label], capacity=4 * batch_size, iterable=False + ) places = static.cuda_places() loader.set_batch_generator(batch_generator_creator(), places=places) - mlp = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - dropout_ratio=0.1, - initializer_range=0.02) + mlp = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + dropout_ratio=0.1, + initializer_range=0.02, + ) loss = paddle.nn.CrossEntropyLoss() - optimizer = paddle.optimizer.Adam(learning_rate=0.00001, - beta1=0.9, - beta2=0.999, - epsilon=1e-08, - grad_clip=None) + optimizer = paddle.optimizer.Adam( + learning_rate=0.00001, + beta1=0.9, + beta2=0.999, + epsilon=1e-08, + grad_clip=None, + ) metric = paddle.metric.Accuracy() predict = mlp(input) loss_var = loss(predict, label) @@ -334,53 +352,58 @@ def train_non_builtin_data_vars(): strategy = auto.Strategy() strategy.auto_mode = "semi" - engine = auto.Engine(loss=loss_var, - optimizer=optimizer, - metrics=metric, - strategy=strategy) + engine = auto.Engine( + loss=loss_var, optimizer=optimizer, metrics=metric, strategy=strategy + ) # train engine.to_mode("train") - engine.prepare(inputs=[input], - labels=[label], - main_program=main_program, - startup_program=startup_program) + engine.prepare( + inputs=[input], + labels=[label], + main_program=main_program, + startup_program=startup_program, + ) for _ in range(epoch_num): loader.start() # call DataLoader.start() before each epoch starts try: while True: engine.run() except paddle.fluid.core.EOFException: - loader.reset( - ) # call DataLoader.reset() after catching EOFException + loader.reset() # call DataLoader.reset() after catching EOFException def get_cost(): main_program = static.default_main_program() startup_program = static.default_startup_program() - with static.program_guard(main_program, - startup_program), utils.unique_name.guard(): - input = static.data(name="input", - shape=[batch_size, image_size], - dtype='float32') + with static.program_guard( + main_program, startup_program + ), utils.unique_name.guard(): + input = static.data( + name="input", shape=[batch_size, image_size], dtype='float32' + ) label = static.data(name="label", shape=[batch_size, 1], dtype='int64') - loader = paddle.io.DataLoader.from_generator(feed_list=[input, label], - capacity=4 * batch_size, - iterable=False) + loader = paddle.io.DataLoader.from_generator( + feed_list=[input, label], capacity=4 * batch_size, iterable=False + ) places = static.cuda_places() loader.set_batch_generator(batch_generator_creator(), places=places) - mlp = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - dropout_ratio=0.1, - initializer_range=0.02) + mlp = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + dropout_ratio=0.1, + initializer_range=0.02, + ) loss = paddle.nn.CrossEntropyLoss() - optimizer = paddle.optimizer.Adam(learning_rate=0.00001, - beta1=0.9, - beta2=0.999, - epsilon=1e-08, - grad_clip=None) + optimizer = paddle.optimizer.Adam( + learning_rate=0.00001, + beta1=0.9, + beta2=0.999, + epsilon=1e-08, + grad_clip=None, + ) metric = paddle.metric.Accuracy() predict = mlp(input) loss_var = loss(predict, label) @@ -388,24 +411,27 @@ def get_cost(): strategy = auto.Strategy() strategy.auto_mode = "semi" - engine = auto.Engine(loss=loss_var, - optimizer=optimizer, - metrics=metric, - strategy=strategy) + engine = auto.Engine( + loss=loss_var, optimizer=optimizer, metrics=metric, strategy=strategy + ) engine.cost() def get_cost_by_spec(): - mlp = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - dropout_ratio=0.1, - initializer_range=0.02) + mlp = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + dropout_ratio=0.1, + initializer_range=0.02, + ) loss = paddle.nn.CrossEntropyLoss() - optimizer = paddle.optimizer.Adam(learning_rate=0.00001, - beta1=0.9, - beta2=0.999, - epsilon=1e-08, - grad_clip=None) + optimizer = paddle.optimizer.Adam( + learning_rate=0.00001, + beta1=0.9, + beta2=0.999, + epsilon=1e-08, + grad_clip=None, + ) metric = paddle.metric.Accuracy() strategy = auto.Strategy() diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/engine_api_dp.py b/python/paddle/fluid/tests/unittests/auto_parallel/engine_api_dp.py index deb084a79fe59054adc34c9f336a707f03c277d8..4a04f9c5a6a2575f1b251eab5f7b217e05e8b986 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/engine_api_dp.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/engine_api_dp.py @@ -34,7 +34,6 @@ paddle.seed(44) class MyDataset(Dataset): - def __init__(self, num_samples): super(MyDataset, self).__init__() self.num_samples = num_samples @@ -49,27 +48,27 @@ class MyDataset(Dataset): class MLPLayer(nn.Layer): - - def __init__(self, - hidden_size=1024, - intermediate_size=4 * 1024, - dropout_ratio=0.1, - initializer_range=0.02): + def __init__( + self, + hidden_size=1024, + intermediate_size=4 * 1024, + dropout_ratio=0.1, + initializer_range=0.02, + ): super(MLPLayer, self).__init__() d_model = hidden_size dim_feedforward = intermediate_size weight_attr = paddle.ParamAttr( - initializer=nn.initializer.Normal(mean=0.0, std=initializer_range)) + initializer=nn.initializer.Normal(mean=0.0, std=initializer_range) + ) bias_attr = None - self.linear0 = nn.Linear(d_model, - dim_feedforward, - weight_attr, - bias_attr=bias_attr) - self.linear1 = nn.Linear(dim_feedforward, - d_model, - weight_attr, - bias_attr=bias_attr) + self.linear0 = nn.Linear( + d_model, dim_feedforward, weight_attr, bias_attr=bias_attr + ) + self.linear1 = nn.Linear( + dim_feedforward, d_model, weight_attr, bias_attr=bias_attr + ) self.linear2 = nn.Linear(d_model, 1, weight_attr, bias_attr=bias_attr) self.norm = nn.LayerNorm(d_model, epsilon=1e-5) self.dropout = nn.Dropout(dropout_ratio, mode="upscale_in_train") @@ -87,26 +86,28 @@ class MLPLayer(nn.Layer): def train(fetch): - mlp = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - dropout_ratio=0.1, - initializer_range=0.02) + mlp = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + dropout_ratio=0.1, + initializer_range=0.02, + ) loss = paddle.nn.CrossEntropyLoss() - optimizer = paddle.fluid.optimizer.AdamOptimizer(learning_rate=0.00001, - beta1=0.9, - beta2=0.999, - epsilon=1e-08, - grad_clip=None) + optimizer = paddle.fluid.optimizer.AdamOptimizer( + learning_rate=0.00001, + beta1=0.9, + beta2=0.999, + epsilon=1e-08, + grad_clip=None, + ) dist_strategy = auto.Strategy() dist_strategy.auto_mode = "semi" # init engine - engine = auto.Engine(mlp, - loss, - optimizer, - paddle.metric.Accuracy(), - strategy=dist_strategy) + engine = auto.Engine( + mlp, loss, optimizer, paddle.metric.Accuracy(), strategy=dist_strategy + ) # train train_dataset = MyDataset(batch_num * batch_size) diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/get_gpt_model.py b/python/paddle/fluid/tests/unittests/auto_parallel/get_gpt_model.py index 318773c71e09eb28e1a3a8423b738c6696b91fc7..178ba60f814234c6e43b2e6e4ddfc17b8fab85a6 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/get_gpt_model.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/get_gpt_model.py @@ -21,14 +21,17 @@ from paddle.distributed.fleet import auto sys.path.append("..") import auto_parallel_gpt_model as modeling -from auto_parallel_gpt_model import GPTModel, GPTForPretraining, GPTPretrainingCriterion +from auto_parallel_gpt_model import ( + GPTModel, + GPTForPretraining, + GPTPretrainingCriterion, +) sequence_len = 512 vocab_size = 1000 class FakeDataset(paddle.io.Dataset): - def __init__(self, num_samples): self.num_samples = num_samples self.sequence_len = sequence_len @@ -40,8 +43,11 @@ class FakeDataset(paddle.io.Dataset): random.seed(2021) tokens = np.random.randint(self.vocab_size, size=self.sequence_len) position_ids = np.arange(self.sequence_len) - attention_mask = np.tril(np.ones(self.sequence_len)).reshape( - (1, self.sequence_len, self.sequence_len)).astype(np.float32) + attention_mask = ( + np.tril(np.ones(self.sequence_len)) + .reshape((1, self.sequence_len, self.sequence_len)) + .astype(np.float32) + ) labels = np.random.randint(self.vocab_size, size=self.sequence_len) loss_mask = np.ones(self.sequence_len).astype(np.float32) return tokens, position_ids, attention_mask, labels, loss_mask @@ -51,30 +57,32 @@ class FakeDataset(paddle.io.Dataset): def create_data_holder(batch_size): - tokens = paddle.static.InputSpec(name="tokens", - shape=[batch_size, sequence_len], - dtype='int64') - position_ids = paddle.static.InputSpec(name="position_ids", - shape=[batch_size, sequence_len], - dtype='int64') + tokens = paddle.static.InputSpec( + name="tokens", shape=[batch_size, sequence_len], dtype='int64' + ) + position_ids = paddle.static.InputSpec( + name="position_ids", shape=[batch_size, sequence_len], dtype='int64' + ) attention_mask = paddle.static.InputSpec( name="attention_mask", shape=[batch_size, 1, sequence_len, sequence_len], - dtype='float32') - labels = paddle.static.InputSpec(name="labels", - shape=[batch_size, sequence_len], - dtype='int64') - loss_mask = paddle.static.InputSpec(name="loss_mask", - shape=[batch_size, sequence_len], - dtype='float32') + dtype='float32', + ) + labels = paddle.static.InputSpec( + name="labels", shape=[batch_size, sequence_len], dtype='int64' + ) + loss_mask = paddle.static.InputSpec( + name="loss_mask", shape=[batch_size, sequence_len], dtype='float32' + ) return [tokens, position_ids, attention_mask], [labels, loss_mask] def generate_model(strategy): modeling.init_global() ranks = list(range(paddle.distributed.get_world_size())) - modeling._global_process_mesh = auto.ProcessMesh(mesh=ranks, - dim_names=["x"]) + modeling._global_process_mesh = auto.ProcessMesh( + mesh=ranks, dim_names=["x"] + ) if strategy == "serial": modeling._global_parallel_strategy = "serial" elif strategy == "mp": @@ -84,24 +92,25 @@ def generate_model(strategy): else: raise ValueError("Only support serial, mp2 and dp2.") - gpt = GPTModel(vocab_size=1000, - hidden_size=64, - num_hidden_layers=2, - num_attention_heads=8, - intermediate_size=256, - hidden_act="gelu", - hidden_dropout_prob=0.0, - attention_probs_dropout_prob=0.0, - max_position_embeddings=1024, - type_vocab_size=1, - initializer_range=0.02, - pad_token_id=0, - eos_token_id=7, - bos_token_id=0, - eol_token_id=3) - model = GPTForPretraining(gpt, - vocab_size=1000, - hidden_size=64, - initializer_range=0.02) + gpt = GPTModel( + vocab_size=1000, + hidden_size=64, + num_hidden_layers=2, + num_attention_heads=8, + intermediate_size=256, + hidden_act="gelu", + hidden_dropout_prob=0.0, + attention_probs_dropout_prob=0.0, + max_position_embeddings=1024, + type_vocab_size=1, + initializer_range=0.02, + pad_token_id=0, + eos_token_id=7, + bos_token_id=0, + eol_token_id=3, + ) + model = GPTForPretraining( + gpt, vocab_size=1000, hidden_size=64, initializer_range=0.02 + ) criterion = GPTPretrainingCriterion() return model, criterion diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/gradient_merge_pass_unittest.py b/python/paddle/fluid/tests/unittests/auto_parallel/gradient_merge_pass_unittest.py index bdce36be605c1ba2938768c17a2cb513f1c8a57f..8ed91061fd4e3835ed96631c3f053beffbf0cab4 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/gradient_merge_pass_unittest.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/gradient_merge_pass_unittest.py @@ -43,7 +43,6 @@ def reset_prog(): class TestGradientMergePass(unittest.TestCase): - def setUp(self): self.rtol = 1e-5 self.atol = 1e-8 @@ -78,23 +77,23 @@ class TestGradientMergePass(unittest.TestCase): rtol=self.rtol, atol=self.atol, err_msg='pass {} has wrong results!, \nu={}\nv={}\ndiff={}'.format( - __class__, ref_losses, check_losses, ref_losses - check_losses)) + __class__, ref_losses, check_losses, ref_losses - check_losses + ), + ) def test_gradient_merge_pass(self): # dp2 training dp_engine = self.get_engine() - history = dp_engine.fit(self.dataset, - 3, - batch_size=self.batch_size, - log_freq=1) + history = dp_engine.fit( + self.dataset, 3, batch_size=self.batch_size, log_freq=1 + ) dp_losses = np.array(history.history["loss"]) # dp2 gradient merge training gm_engine = self.get_engine(True) - history = gm_engine.fit(self.dataset, - 3, - batch_size=self.batch_size, - log_freq=1) + history = gm_engine.fit( + self.dataset, 3, batch_size=self.batch_size, log_freq=1 + ) gm_losses = np.array(history.history["loss"]) # avg_loss = 0 diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/high_order_grad.py b/python/paddle/fluid/tests/unittests/auto_parallel/high_order_grad.py index 8e148719a569179b69dd79d2b3abee2280184223..1ba31ad2e1d0e96b728c9723bebaa9f486f39721 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/high_order_grad.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/high_order_grad.py @@ -22,7 +22,6 @@ paddle.seed(1234) class FCNet: - def __init__(self, num_ins, num_outs, num_layers, hidden_size): self.num_ins = num_ins self.num_outs = num_outs @@ -43,12 +42,12 @@ class FCNet: lsize = self.hidden_size rsize = self.hidden_size - w = paddle.static.create_parameter(shape=[lsize, rsize], - dtype="float32", - is_bias=False) - b = paddle.static.create_parameter(shape=[rsize], - dtype="float32", - is_bias=True) + w = paddle.static.create_parameter( + shape=[lsize, rsize], dtype="float32", is_bias=False + ) + b = paddle.static.create_parameter( + shape=[rsize], dtype="float32", is_bias=True + ) self.weights.append(w) self.biases.append(b) @@ -62,13 +61,14 @@ class FCNet: class LaplaceModel(paddle.nn.Layer): - def __init__(self, num_ins=2, num_outs=1, num_layers=5, hidden_size=20): super(LaplaceModel, self).__init__() - self.net = FCNet(num_ins=num_ins, - num_outs=num_outs, - num_layers=num_layers, - hidden_size=hidden_size) + self.net = FCNet( + num_ins=num_ins, + num_outs=num_outs, + num_layers=num_layers, + hidden_size=hidden_size, + ) def forward(self, inputs, bc_index): inputs.stop_gradient = False @@ -82,7 +82,6 @@ class LaplaceModel(paddle.nn.Layer): class LaplaceDataset(paddle.io.Dataset): - def __init__(self, num_sample): self.num_sample = num_sample @@ -126,10 +125,9 @@ def main(): dist_strategy = auto.Strategy() dist_strategy.auto_mode = "semi" - engine = auto.Engine(laplace, - loss=loss_func, - optimizer=optimizer, - strategy=dist_strategy) + engine = auto.Engine( + laplace, loss=loss_func, optimizer=optimizer, strategy=dist_strategy + ) engine.fit(train_dataset, train_sample_split=2, batch_size=None) dist_context = engine.dist_context diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/iterable_dataset.py b/python/paddle/fluid/tests/unittests/auto_parallel/iterable_dataset.py index d5eb88bc6b55ac3ee03e5a2037fa7123a449e337..b953471eef813305289249e3fa9ccafd3f3cb216 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/iterable_dataset.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/iterable_dataset.py @@ -36,7 +36,6 @@ paddle.seed(44) class MyDataset(paddle.io.IterableDataset): - def __init__(self, num_samples): self.num_samples = num_samples @@ -48,17 +47,18 @@ class MyDataset(paddle.io.IterableDataset): class MyDataset1(paddle.io.Dataset): - def __init__(self, num_samples): self.num_samples = num_samples self.data = [] for i in range(self.num_samples): input1 = np.random.uniform(size=image_size).astype("float32") - label1 = np.array(np.random.randint(0, class_num - 1, - dtype="int64")) + label1 = np.array( + np.random.randint(0, class_num - 1, dtype="int64") + ) input2 = np.random.uniform(size=image_size).astype("float32") - label2 = np.array(np.random.randint(0, class_num - 1, - dtype="int64")) + label2 = np.array( + np.random.randint(0, class_num - 1, dtype="int64") + ) input = np.stack((input1, input2)) label = np.stack((label1, label2)) self.data.append((input, label)) @@ -71,27 +71,27 @@ class MyDataset1(paddle.io.Dataset): class MLPLayer(nn.Layer): - - def __init__(self, - hidden_size=1024, - intermediate_size=4 * 1024, - dropout_ratio=0.1, - initializer_range=0.02): + def __init__( + self, + hidden_size=1024, + intermediate_size=4 * 1024, + dropout_ratio=0.1, + initializer_range=0.02, + ): super(MLPLayer, self).__init__() d_model = hidden_size dim_feedforward = intermediate_size weight_attr = paddle.ParamAttr( - initializer=nn.initializer.Normal(mean=0.0, std=initializer_range)) + initializer=nn.initializer.Normal(mean=0.0, std=initializer_range) + ) bias_attr = None - self.linear0 = nn.Linear(d_model, - dim_feedforward, - weight_attr, - bias_attr=bias_attr) - self.linear1 = nn.Linear(dim_feedforward, - d_model, - weight_attr, - bias_attr=bias_attr) + self.linear0 = nn.Linear( + d_model, dim_feedforward, weight_attr, bias_attr=bias_attr + ) + self.linear1 = nn.Linear( + dim_feedforward, d_model, weight_attr, bias_attr=bias_attr + ) self.linear2 = nn.Linear(d_model, 1, weight_attr, bias_attr=bias_attr) self.norm = nn.LayerNorm(d_model, epsilon=1e-5) self.dropout = nn.Dropout(dropout_ratio, mode="upscale_in_train") @@ -108,27 +108,29 @@ class MLPLayer(nn.Layer): def train(fetch): - mlp = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - dropout_ratio=0.1, - initializer_range=0.02) + mlp = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + dropout_ratio=0.1, + initializer_range=0.02, + ) loss = paddle.nn.CrossEntropyLoss() - optimizer = paddle.optimizer.Adam(learning_rate=0.00001, - beta1=0.9, - beta2=0.999, - epsilon=1e-08, - grad_clip=None) + optimizer = paddle.optimizer.Adam( + learning_rate=0.00001, + beta1=0.9, + beta2=0.999, + epsilon=1e-08, + grad_clip=None, + ) dist_strategy = auto.Strategy() dist_strategy.auto_mode = "semi" dist_strategy.split_data = True # init engine - engine = auto.Engine(mlp, - loss, - optimizer, - paddle.metric.Accuracy(), - strategy=dist_strategy) + engine = auto.Engine( + mlp, loss, optimizer, paddle.metric.Accuracy(), strategy=dist_strategy + ) # train train_dataset = MyDataset(batch_num * batch_size) diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/optimization_tuner_api.py b/python/paddle/fluid/tests/unittests/auto_parallel/optimization_tuner_api.py index 3cc24a538fe081703010c9a9ad0c58c165a0a3d6..b05e27d4c543376588e1edd512651e8bb0447665 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/optimization_tuner_api.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/optimization_tuner_api.py @@ -31,27 +31,27 @@ paddle.seed(44) class MLPLayer(nn.Layer): - - def __init__(self, - hidden_size=1024, - intermediate_size=4 * 1024, - dropout_ratio=0.1, - initializer_range=0.02): + def __init__( + self, + hidden_size=1024, + intermediate_size=4 * 1024, + dropout_ratio=0.1, + initializer_range=0.02, + ): super(MLPLayer, self).__init__() d_model = hidden_size dim_feedforward = intermediate_size weight_attr = paddle.ParamAttr( - initializer=nn.initializer.Normal(mean=0.0, std=initializer_range)) + initializer=nn.initializer.Normal(mean=0.0, std=initializer_range) + ) bias_attr = None - self.linear0 = nn.Linear(d_model, - dim_feedforward, - weight_attr, - bias_attr=bias_attr) - self.linear1 = nn.Linear(dim_feedforward, - d_model, - weight_attr, - bias_attr=bias_attr) + self.linear0 = nn.Linear( + d_model, dim_feedforward, weight_attr, bias_attr=bias_attr + ) + self.linear1 = nn.Linear( + dim_feedforward, d_model, weight_attr, bias_attr=bias_attr + ) self.linear2 = nn.Linear(d_model, 1, weight_attr, bias_attr=bias_attr) self.norm = nn.LayerNorm(d_model, epsilon=1e-5) self.dropout = nn.Dropout(dropout_ratio, mode="upscale_in_train") @@ -68,16 +68,20 @@ class MLPLayer(nn.Layer): def train(fetch): - mlp = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - dropout_ratio=0.1, - initializer_range=0.02) + mlp = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + dropout_ratio=0.1, + initializer_range=0.02, + ) loss = paddle.nn.CrossEntropyLoss() - optimizer = paddle.fluid.optimizer.AdamOptimizer(learning_rate=0.00001, - beta1=0.9, - beta2=0.999, - epsilon=1e-08, - grad_clip=None) + optimizer = paddle.fluid.optimizer.AdamOptimizer( + learning_rate=0.00001, + beta1=0.9, + beta2=0.999, + epsilon=1e-08, + grad_clip=None, + ) dist_strategy = auto.Strategy() dist_strategy.auto_mode = "semi" @@ -97,15 +101,13 @@ def train(fetch): tuning.verbose = True dataset = MyDataset(batch_num * batch_size) - engine = auto.Engine(mlp, - loss, - optimizer, - paddle.metric.Accuracy(), - strategy=dist_strategy) + engine = auto.Engine( + mlp, loss, optimizer, paddle.metric.Accuracy(), strategy=dist_strategy + ) engine._tune(dataset, batch_size=batch_size) # check tuned - assert (engine._dist_contexts['train'].strategy.sharding.stage != 3) + assert engine._dist_contexts['train'].strategy.sharding.stage != 3 if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/recompute_pass_unittest.py b/python/paddle/fluid/tests/unittests/auto_parallel/recompute_pass_unittest.py index 9c17af12d73de2eaf3722f983b0a06f7dd2ea6b1..1aa83f1a8c97864c0bdf1cb7fc53d71b30dec54e 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/recompute_pass_unittest.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/recompute_pass_unittest.py @@ -38,7 +38,6 @@ def reset_prog(): class TestRecomputePass(unittest.TestCase): - def setUp(self): self.rtol = 1e-6 self.atol = 1e-8 @@ -73,7 +72,9 @@ class TestRecomputePass(unittest.TestCase): rtol=self.rtol, atol=self.atol, err_msg='pass {} has wrong results!, \nu={}\nv={}\ndiff={}'.format( - __class__, ref_losses, check_losses, ref_losses - check_losses)) + __class__, ref_losses, check_losses, ref_losses - check_losses + ), + ) def test_recompute_pass(self): # mp2 training diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/sharding_pass_unittest.py b/python/paddle/fluid/tests/unittests/auto_parallel/sharding_pass_unittest.py index a549cd8e6d8eb5e02706f9e52b6f167e86969d81..80c2b74ade22610ea58658f0bba772083b953a8a 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/sharding_pass_unittest.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/sharding_pass_unittest.py @@ -43,7 +43,6 @@ def reset_prog(): class TestShardingPass(unittest.TestCase): - def setUp(self): self.rtol = 1e-6 self.atol = 1e-8 @@ -78,7 +77,9 @@ class TestShardingPass(unittest.TestCase): rtol=self.rtol, atol=self.atol, err_msg='pass {} has wrong results!, \nu={}\nv={}\ndiff={}'.format( - __class__, ref_losses, check_losses, ref_losses - check_losses)) + __class__, ref_losses, check_losses, ref_losses - check_losses + ), + ) def test_sharding_pass(self): # dp2 training @@ -88,25 +89,25 @@ class TestShardingPass(unittest.TestCase): # sharding2 stage1 training sharding1_engine = self.get_engine(True, 1) - history = sharding1_engine.fit(self.dataset, - 3, - batch_size=self.batch_size) + history = sharding1_engine.fit( + self.dataset, 3, batch_size=self.batch_size + ) sharding1_losses = np.array(history.history["loss"]) self.check_results(dp_losses, sharding1_losses) # sharding2 stage2 training sharding2_engine = self.get_engine(True, 2) - history = sharding2_engine.fit(self.dataset, - 3, - batch_size=self.batch_size) + history = sharding2_engine.fit( + self.dataset, 3, batch_size=self.batch_size + ) sharding2_losses = np.array(history.history["loss"]) self.check_results(dp_losses, sharding2_losses) # sharding2 stage3 training sharding3_engine = self.get_engine(True, 3) - history = sharding3_engine.fit(self.dataset, - 3, - batch_size=self.batch_size) + history = sharding3_engine.fit( + self.dataset, 3, batch_size=self.batch_size + ) sharding3_losses = np.array(history.history["loss"]) self.check_results(dp_losses, sharding3_losses) diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_auto_parallel_relaunch.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_auto_parallel_relaunch.py index 53734e21d9a35e7320f9ef277485a28da5506274..42d309866c396384eb3d709b4acad1042ba0b4a6 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_auto_parallel_relaunch.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_auto_parallel_relaunch.py @@ -93,7 +93,6 @@ mapping_josn = """ class TestAutoParallelReLaunch(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -101,10 +100,12 @@ class TestAutoParallelReLaunch(unittest.TestCase): self.temp_dir.cleanup() def test_relaunch(self): - cluster_json_path = os.path.join(self.temp_dir.name, - "auto_parallel_cluster.json") - mapping_json_path = os.path.join(self.temp_dir.name, - "auto_parallel_rank_mapping.json") + cluster_json_path = os.path.join( + self.temp_dir.name, "auto_parallel_cluster.json" + ) + mapping_json_path = os.path.join( + self.temp_dir.name, "auto_parallel_rank_mapping.json" + ) cluster_json_object = json.loads(cluster_json) with open(cluster_json_path, "w") as cluster_json_file: @@ -115,20 +116,32 @@ class TestAutoParallelReLaunch(unittest.TestCase): json.dump(mapping_josn_object, mapping_josn_file) file_dir = os.path.dirname(os.path.abspath(__file__)) - launch_model_path = os.path.join(file_dir, - "auto_parallel_relaunch_model.py") + launch_model_path = os.path.join( + file_dir, "auto_parallel_relaunch_model.py" + ) if os.environ.get("WITH_COVERAGE", "OFF") == "ON": coverage_args = ["-m", "coverage", "run", "--branch", "-p"] else: coverage_args = [] - cmd = [sys.executable, "-u"] + coverage_args + [ - "-m", "paddle.distributed.launch", "--log_dir", self.temp_dir.name, - "--cluster_topo_path", cluster_json_path, "--rank_mapping_path", - mapping_json_path, "--enable_auto_mapping", "True", - launch_model_path - ] + cmd = ( + [sys.executable, "-u"] + + coverage_args + + [ + "-m", + "paddle.distributed.launch", + "--log_dir", + self.temp_dir.name, + "--cluster_topo_path", + cluster_json_path, + "--rank_mapping_path", + mapping_json_path, + "--enable_auto_mapping", + "True", + launch_model_path, + ] + ) process = subprocess.Popen(cmd) process.wait() self.assertEqual(process.returncode, 0) diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_base_cost.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_base_cost.py index 745351f3d7e470520959adf5afda51d1dd70f6c5..b03f3408d1f12be42f94a0163f2987411082ff2d 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_base_cost.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_base_cost.py @@ -28,10 +28,18 @@ from paddle.distributed.auto_parallel.dist_context import DistributedContext from paddle.distributed import fleet from paddle.distributed.auto_parallel.parallelizer import AutoParallelizer from paddle.distributed.auto_parallel.cluster import Cluster -from paddle.distributed.auto_parallel.cost.base_cost import build_comp_desc_from_dist_op -from paddle.distributed.auto_parallel.cost.base_cost import build_comm_desc_from_dist_op -from paddle.distributed.auto_parallel.cost.base_cost import build_comm_costs_from_descs -from paddle.distributed.auto_parallel.cost.base_cost import build_comp_costs_from_descs +from paddle.distributed.auto_parallel.cost.base_cost import ( + build_comp_desc_from_dist_op, +) +from paddle.distributed.auto_parallel.cost.base_cost import ( + build_comm_desc_from_dist_op, +) +from paddle.distributed.auto_parallel.cost.base_cost import ( + build_comm_costs_from_descs, +) +from paddle.distributed.auto_parallel.cost.base_cost import ( + build_comp_costs_from_descs, +) from paddle.distributed.auto_parallel.cost.base_cost import build_dp_costs from paddle.distributed.auto_parallel.cost import AllreduceSumOpCost from paddle.distributed.auto_parallel.cost import _g_op_cost_factory @@ -39,33 +47,34 @@ from test_cluster import cluster_json paddle.enable_static() _global_parallel_strategy = "dp_mp_pp" -_global_process_mesh = auto.ProcessMesh([[[0, 1], [4, 5]], [[2, 3], [6, 7]]], - dim_names=["x", "y", "z"]) +_global_process_mesh = auto.ProcessMesh( + [[[0, 1], [4, 5]], [[2, 3], [6, 7]]], dim_names=["x", "y", "z"] +) PP_MESH_0 = auto.ProcessMesh([[0, 1], [4, 5]], dim_names=["x", "y"]) PP_MESH_1 = auto.ProcessMesh([[2, 3], [6, 7]], dim_names=["x", "y"]) class MLPLayer(nn.Layer): - - def __init__(self, - hidden_size=1024, - intermediate_size=4 * 1024, - initializer_range=0.02): + def __init__( + self, + hidden_size=1024, + intermediate_size=4 * 1024, + initializer_range=0.02, + ): super(MLPLayer, self).__init__() d_model = hidden_size dim_feedforward = intermediate_size weight_attr = paddle.ParamAttr( - initializer=nn.initializer.Normal(mean=0.0, std=initializer_range)) + initializer=nn.initializer.Normal(mean=0.0, std=initializer_range) + ) bias_attr = None - self.linear0 = nn.Linear(d_model, - dim_feedforward, - weight_attr, - bias_attr=bias_attr) - self.linear1 = nn.Linear(dim_feedforward, - d_model, - weight_attr, - bias_attr=bias_attr) + self.linear0 = nn.Linear( + d_model, dim_feedforward, weight_attr, bias_attr=bias_attr + ) + self.linear1 = nn.Linear( + dim_feedforward, d_model, weight_attr, bias_attr=bias_attr + ) self.norm = nn.LayerNorm(d_model, epsilon=1e-5) def forward(self, input): @@ -81,29 +90,33 @@ class MLPLayer(nn.Layer): def mlp_forward(train_program, start_program): - with static.program_guard(train_program, - start_program), utils.unique_name.guard(): + with static.program_guard( + train_program, start_program + ), utils.unique_name.guard(): batch_size = 4 hidden_size = 1024 sequence_len = 512 - input = static.data(name="input", - shape=[batch_size, hidden_size], - dtype='float32') - label = static.data(name="label", - shape=[batch_size, 1], - dtype='float32') + input = static.data( + name="input", shape=[batch_size, hidden_size], dtype='float32' + ) + label = static.data( + name="label", shape=[batch_size, 1], dtype='float32' + ) fill_constant_out = paddle.fluid.layers.fill_constant_batch_size_like( - input=input, shape=[batch_size], value=1, dtype="int32") + input=input, shape=[batch_size], value=1, dtype="int32" + ) embedding = paddle.nn.Embedding(10, hidden_size, sparse=True) embedding_out = embedding(fill_constant_out) auto.shard_tensor(input, PP_MESH_0, ["x", None]) auto.shard_tensor(label, PP_MESH_1, ["x", None]) - mlp = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - initializer_range=0.02) + mlp = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + initializer_range=0.02, + ) predict = mlp(embedding_out) error_cost = paddle.nn.functional.square_error_cost(predict, label) @@ -115,8 +128,9 @@ def mlp_forward(train_program, start_program): def get_prog(train_program, startup_program, dist_context, rank_id): global _global_process_mesh dist_context.process_mesh = _global_process_mesh - loss, train_program, startup_program = mlp_forward(train_program, - startup_program) + loss, train_program, startup_program = mlp_forward( + train_program, startup_program + ) fleet._user_defined_strategy = fleet.DistributedStrategy() fleet.user_defined_optimizer = paddle.fluid.optimizer.AdamOptimizer() @@ -126,19 +140,21 @@ def get_prog(train_program, startup_program, dist_context, rank_id): # serial forward & backward completion completer = Completer(dist_context) complete_train_program = completer.complete_forward_annotation( - train_program) + train_program + ) dist_context.block_state.parse_forward_blocks(complete_train_program) - params_grads = parallelizer._generate_backward(complete_train_program, - startup_program, - loss, - parameter_list=None, - no_grad_set=None, - callbacks=None) + params_grads = parallelizer._generate_backward( + complete_train_program, + startup_program, + loss, + parameter_list=None, + no_grad_set=None, + callbacks=None, + ) return train_program, startup_program, params_grads class TestBaseCost(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -147,8 +163,9 @@ class TestBaseCost(unittest.TestCase): def test_base_cost(self): # Build cluster - cluster_json_path = os.path.join(self.temp_dir.name, - "auto_parallel_cluster.json") + cluster_json_path = os.path.join( + self.temp_dir.name, "auto_parallel_cluster.json" + ) cluster_json_object = json.loads(cluster_json) with open(cluster_json_path, "w") as cluster_json_file: json.dump(cluster_json_object, cluster_json_file) @@ -160,7 +177,8 @@ class TestBaseCost(unittest.TestCase): dist_context = DistributedContext() rank_id = 2 train_program, startup_program, params_grads = get_prog( - train_program, startup_program, dist_context, rank_id) + train_program, startup_program, dist_context, rank_id + ) for op in train_program.global_block().ops: dist_op = dist_context.get_dist_op_for_program(op) @@ -171,13 +189,15 @@ class TestBaseCost(unittest.TestCase): var_names = None if op.input_arg_names: var_names = op.input_arg_names[0] - comm_descs = build_comm_desc_from_dist_op("c_allreduce_sum", - dist_op, - dist_context, - var_names, - attrs=None, - parallel_axis=0, - group_ranks=None) + comm_descs = build_comm_desc_from_dist_op( + "c_allreduce_sum", + dist_op, + dist_context, + var_names, + attrs=None, + parallel_axis=0, + group_ranks=None, + ) self.assertTrue(isinstance(comm_descs, dict) and comm_descs) comm_descs = build_comm_desc_from_dist_op( "c_allreduce_sum", @@ -186,22 +206,38 @@ class TestBaseCost(unittest.TestCase): var_names, attrs=None, parallel_axis=None, - group_ranks=processes) + group_ranks=processes, + ) self.assertTrue(isinstance(comm_descs, dict) and comm_descs) comm_costs = build_comm_costs_from_descs( - AllreduceSumOpCost, dist_context, processes, comm_descs, - cluster) + AllreduceSumOpCost, + dist_context, + processes, + comm_descs, + cluster, + ) self.assertTrue(comm_costs) comp_costs = build_comp_costs_from_descs( - _g_op_cost_factory[op.type], dist_context, processes, - comp_descs, cluster) + _g_op_cost_factory[op.type], + dist_context, + processes, + comp_descs, + cluster, + ) self.assertTrue(comp_costs) result = [] - build_dp_costs(result, dist_op, dist_context, var_names[0], - None, 0, cluster) + build_dp_costs( + result, + dist_op, + dist_context, + var_names[0], + None, + 0, + cluster, + ) self.assertTrue(result) # Remove unnecessary files diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_cluster.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_cluster.py index 1aeed0c72ef7b427a3b0c15558506af4d05828f8..f5c4d53c060166ef75ebb6c52846a51d2ddd533f 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_cluster.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_cluster.py @@ -1968,7 +1968,6 @@ multi_cluster_json = """{ class TestCluster(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -1977,8 +1976,9 @@ class TestCluster(unittest.TestCase): def test_single_machine(self): # Build cluster - cluster_json_path = os.path.join(self.temp_dir.name, - "auto_parallel_cluster_single.json") + cluster_json_path = os.path.join( + self.temp_dir.name, "auto_parallel_cluster_single.json" + ) cluster_json_object = json.loads(cluster_json) with open(cluster_json_path, "w") as cluster_json_file: @@ -2003,8 +2003,9 @@ class TestCluster(unittest.TestCase): def test_multi_machine(self): # Build cluster - cluster_json_path = os.path.join(self.temp_dir.name, - "auto_parallel_cluster_multi.json") + cluster_json_path = os.path.join( + self.temp_dir.name, "auto_parallel_cluster_multi.json" + ) cluster_json_object = json.loads(multi_cluster_json) with open(cluster_json_path, "w") as cluster_json_file: json.dump(cluster_json_object, cluster_json_file) diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_cluster_v2.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_cluster_v2.py index acadc4842a2136176ccc2eee4a3dbb05ac753598..832cdf75f92242251994b36cfb7d405f9fc19e6d 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_cluster_v2.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_cluster_v2.py @@ -19,7 +19,6 @@ from paddle.distributed.auto_parallel.cluster_v2 import DeviceMesh class TestDeviceMesh(unittest.TestCase): - def test_device_mesh(self): name = "my_device_mesh" mesh = [[0, 1, 2], [3, 4, 5]] @@ -77,11 +76,11 @@ class TestDeviceMesh(unittest.TestCase): self.assertEqual(device_mesh.machine(0).device(2), dev2) self.assertEqual(device_mesh.machine(1).link(3, 4), link2) self.assertEqual( - device_mesh.machine(0).devices, - device_mesh.machine(0).devices) + device_mesh.machine(0).devices, device_mesh.machine(0).devices + ) self.assertEqual( - device_mesh.machine(0).links, - device_mesh.machine(0).links) + device_mesh.machine(0).links, device_mesh.machine(0).links + ) self.assertEqual(device_mesh.device_type, "GPU") self.assertEqual(device_mesh.devices, device_mesh.devices) diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_comm_cost.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_comm_cost.py index 5744cf6d39206b70e71f3e22fb48576ff367877f..32b180ffdc16a8011b5980f8d0e58ee3a20d734c 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_comm_cost.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_comm_cost.py @@ -32,7 +32,6 @@ from test_cluster import cluster_json, multi_cluster_json class TestCommOpCost(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -41,8 +40,9 @@ class TestCommOpCost(unittest.TestCase): def test_comm_cost(self): # Build cluster - cluster_json_path = os.path.join(self.temp_dir.name, - "auto_parallel_cluster0.json") + cluster_json_path = os.path.join( + self.temp_dir.name, "auto_parallel_cluster0.json" + ) cluster_json_object = json.loads(cluster_json) with open(cluster_json_path, "w") as cluster_json_file: json.dump(cluster_json_object, cluster_json_file) @@ -55,54 +55,72 @@ class TestCommOpCost(unittest.TestCase): comm_context = CommContext(cluster) # Check AllreduceSumCost 128MB ring cost - allreduce_sum_op_desc = build_comm_desc("c_allreduce_sum", - [0, 1, 2, 3, 4, 5, 6, 7], - paddle.float32, - [1, 32 * (10**6)]) + allreduce_sum_op_desc = build_comm_desc( + "c_allreduce_sum", + [0, 1, 2, 3, 4, 5, 6, 7], + paddle.float32, + [1, 32 * (10**6)], + ) allreduce_sum_op_cost = AllreduceSumOpCost( - op_desc=allreduce_sum_op_desc, comm_context=comm_context) + op_desc=allreduce_sum_op_desc, comm_context=comm_context + ) # Check AllgatherOpCost cost - allgather_op_desc = build_comm_desc("c_allgather", - [0, 1, 2, 3, 4, 5, 6, 7], - paddle.float32, [1, 32 * (10**6)]) - allgather_op_cost = AllgatherOpCost(op_desc=allgather_op_desc, - comm_context=comm_context) + allgather_op_desc = build_comm_desc( + "c_allgather", + [0, 1, 2, 3, 4, 5, 6, 7], + paddle.float32, + [1, 32 * (10**6)], + ) + allgather_op_cost = AllgatherOpCost( + op_desc=allgather_op_desc, comm_context=comm_context + ) self.assertTrue(allgather_op_cost.time > 0) # Check BroadcastOpCost cost - broadcast_op_desc = build_comm_desc("c_broadcast", - [0, 1, 2, 3, 4, 5, 6, 7], - paddle.float32, [1, 32 * (10**6)]) - broadcast_op_cost = BroadcastOpCost(op_desc=broadcast_op_desc, - comm_context=comm_context) + broadcast_op_desc = build_comm_desc( + "c_broadcast", + [0, 1, 2, 3, 4, 5, 6, 7], + paddle.float32, + [1, 32 * (10**6)], + ) + broadcast_op_cost = BroadcastOpCost( + op_desc=broadcast_op_desc, comm_context=comm_context + ) self.assertTrue(broadcast_op_cost.time > 0) # Check SendOpCost cost - send_op_desc = build_comm_desc("send_v2", [0, 1], paddle.float32, - [1, 32 * (10**6)]) - send_op_cost = SendOpCost(op_desc=send_op_desc, - comm_context=comm_context) + send_op_desc = build_comm_desc( + "send_v2", [0, 1], paddle.float32, [1, 32 * (10**6)] + ) + send_op_cost = SendOpCost( + op_desc=send_op_desc, comm_context=comm_context + ) self.assertTrue(send_op_cost.time > 0) # Check RecvOpCost cost - recv_op_desc = build_comm_desc("recv_v2", [0, 1], paddle.float32, - [1, 32 * (10**6)]) - recv_op_cost = RecvOpCost(op_desc=recv_op_desc, - comm_context=comm_context) + recv_op_desc = build_comm_desc( + "recv_v2", [0, 1], paddle.float32, [1, 32 * (10**6)] + ) + recv_op_cost = RecvOpCost( + op_desc=recv_op_desc, comm_context=comm_context + ) self.assertTrue(recv_op_cost.time > 0) # Check IdentityOpCost cost - identity_op_desc = build_comm_desc("c_identity", [0, 1], paddle.float32, - [1, 32 * (10**6)]) - identity_op_cost = IdentityOpCost(op_desc=identity_op_desc, - comm_context=comm_context) + identity_op_desc = build_comm_desc( + "c_identity", [0, 1], paddle.float32, [1, 32 * (10**6)] + ) + identity_op_cost = IdentityOpCost( + op_desc=identity_op_desc, comm_context=comm_context + ) self.assertTrue(identity_op_cost.time >= 0) def test_cross_machine_comm_cost(self): # Build cluster - cluster_json_path = os.path.join(self.temp_dir.name, - "auto_parallel_cluster1.json") + cluster_json_path = os.path.join( + self.temp_dir.name, "auto_parallel_cluster1.json" + ) cluster_json_object = json.loads(multi_cluster_json) with open(cluster_json_path, "w") as cluster_json_file: json.dump(cluster_json_object, cluster_json_file) @@ -118,40 +136,53 @@ class TestCommOpCost(unittest.TestCase): allreduce_sum_op_desc = build_comm_desc( "c_allreduce_sum", [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], - paddle.float32, [1, 32 * (10**6)]) + paddle.float32, + [1, 32 * (10**6)], + ) allreduce_sum_op_cost = AllreduceSumOpCost( - op_desc=allreduce_sum_op_desc, comm_context=comm_context) + op_desc=allreduce_sum_op_desc, comm_context=comm_context + ) # Check AllgatherOpCost cost allgather_op_desc = build_comm_desc( "c_allgather", [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], - paddle.float32, [1, 32 * (10**6)]) - allgather_op_cost = AllgatherOpCost(op_desc=allgather_op_desc, - comm_context=comm_context) + paddle.float32, + [1, 32 * (10**6)], + ) + allgather_op_cost = AllgatherOpCost( + op_desc=allgather_op_desc, comm_context=comm_context + ) self.assertTrue(allgather_op_cost.time > 0) # Check BroadcastOpCost cost broadcast_op_desc = build_comm_desc( "c_broadcast", [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], - paddle.float32, [1, 32 * (10**6)]) - broadcast_op_cost = BroadcastOpCost(op_desc=broadcast_op_desc, - comm_context=comm_context) + paddle.float32, + [1, 32 * (10**6)], + ) + broadcast_op_cost = BroadcastOpCost( + op_desc=broadcast_op_desc, comm_context=comm_context + ) self.assertTrue(broadcast_op_cost.time > 0) # Check SendOpCost cost - send_op_desc = build_comm_desc("send_v2", [0, 1], paddle.float32, - [1, 32 * (10**6)]) - send_op_cost = SendOpCost(op_desc=send_op_desc, - comm_context=comm_context) + send_op_desc = build_comm_desc( + "send_v2", [0, 1], paddle.float32, [1, 32 * (10**6)] + ) + send_op_cost = SendOpCost( + op_desc=send_op_desc, comm_context=comm_context + ) self.assertTrue(send_op_cost.time > 0) # Check RecvOpCost cost - recv_op_desc = build_comm_desc("recv_v2", [0, 1], paddle.float32, - [1, 32 * (10**6)]) - recv_op_cost = RecvOpCost(op_desc=recv_op_desc, - comm_context=comm_context) + recv_op_desc = build_comm_desc( + "recv_v2", [0, 1], paddle.float32, [1, 32 * (10**6)] + ) + recv_op_cost = RecvOpCost( + op_desc=recv_op_desc, comm_context=comm_context + ) self.assertTrue(recv_op_cost.time > 0) # Remove unnecessary files diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_comp_cost.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_comp_cost.py index f57b7d8631115f9b39b190ead523825ddfddd4f9..8520654f37e932bba5c981746c2648f80a62a24e 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_comp_cost.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_comp_cost.py @@ -20,56 +20,100 @@ from paddle.distributed.auto_parallel.cluster import Cluster from paddle.distributed.auto_parallel.cost.comp_op_cost import AssignOpCost from paddle.distributed.auto_parallel.cost.comp_op_cost import AssignValueOpCost from paddle.distributed.auto_parallel.cost.comp_op_cost import BeamSearchOpCost -from paddle.distributed.auto_parallel.cost.comp_op_cost import BeamSearchDecodeOpCost +from paddle.distributed.auto_parallel.cost.comp_op_cost import ( + BeamSearchDecodeOpCost, +) from paddle.distributed.auto_parallel.cost.comp_op_cost import CastOpCost from paddle.distributed.auto_parallel.cost.comp_op_cost import ConcatOpCost -from paddle.distributed.auto_parallel.cost.comp_op_cost import ElementwiseAddOpCost -from paddle.distributed.auto_parallel.cost.comp_op_cost import ElementwiseAddGradOpCost -from paddle.distributed.auto_parallel.cost.comp_op_cost import ElementwiseDivOpCost -from paddle.distributed.auto_parallel.cost.comp_op_cost import ElementwiseDivGradOpCost -from paddle.distributed.auto_parallel.cost.comp_op_cost import ElementwiseMulOpCost -from paddle.distributed.auto_parallel.cost.comp_op_cost import ElementwiseMulGradOpCost -from paddle.distributed.auto_parallel.cost.comp_op_cost import ElementwiseSubOpCost +from paddle.distributed.auto_parallel.cost.comp_op_cost import ( + ElementwiseAddOpCost, +) +from paddle.distributed.auto_parallel.cost.comp_op_cost import ( + ElementwiseAddGradOpCost, +) +from paddle.distributed.auto_parallel.cost.comp_op_cost import ( + ElementwiseDivOpCost, +) +from paddle.distributed.auto_parallel.cost.comp_op_cost import ( + ElementwiseDivGradOpCost, +) +from paddle.distributed.auto_parallel.cost.comp_op_cost import ( + ElementwiseMulOpCost, +) +from paddle.distributed.auto_parallel.cost.comp_op_cost import ( + ElementwiseMulGradOpCost, +) +from paddle.distributed.auto_parallel.cost.comp_op_cost import ( + ElementwiseSubOpCost, +) from paddle.distributed.auto_parallel.cost.comp_op_cost import EmbeddingOpCost -from paddle.distributed.auto_parallel.cost.comp_op_cost import EmbeddingGradOpCost -from paddle.distributed.auto_parallel.cost.comp_op_cost import FillConstantOpCost -from paddle.distributed.auto_parallel.cost.comp_op_cost import FillConstantBatchSizeLikeOpCost +from paddle.distributed.auto_parallel.cost.comp_op_cost import ( + EmbeddingGradOpCost, +) +from paddle.distributed.auto_parallel.cost.comp_op_cost import ( + FillConstantOpCost, +) +from paddle.distributed.auto_parallel.cost.comp_op_cost import ( + FillConstantBatchSizeLikeOpCost, +) from paddle.distributed.auto_parallel.cost.comp_op_cost import GatherOpCost from paddle.distributed.auto_parallel.cost.comp_op_cost import GeluOpCost from paddle.distributed.auto_parallel.cost.comp_op_cost import GeluGradOpCost -from paddle.distributed.auto_parallel.cost.comp_op_cost import GreaterEqualOpCost +from paddle.distributed.auto_parallel.cost.comp_op_cost import ( + GreaterEqualOpCost, +) from paddle.distributed.auto_parallel.cost.comp_op_cost import IncrementOpCost from paddle.distributed.auto_parallel.cost.comp_op_cost import IsEmptyOpCost from paddle.distributed.auto_parallel.cost.comp_op_cost import LayerNormOpCost -from paddle.distributed.auto_parallel.cost.comp_op_cost import LayerNormGradOpCost +from paddle.distributed.auto_parallel.cost.comp_op_cost import ( + LayerNormGradOpCost, +) from paddle.distributed.auto_parallel.cost.comp_op_cost import LessThanOpCost from paddle.distributed.auto_parallel.cost.comp_op_cost import LogicalNotOpCost from paddle.distributed.auto_parallel.cost.comp_op_cost import LogicalAndOpCost from paddle.distributed.auto_parallel.cost.comp_op_cost import LodResetOpCost from paddle.distributed.auto_parallel.cost.comp_op_cost import LogOpCost -from paddle.distributed.auto_parallel.cost.comp_op_cost import LookupTableV2OpCost -from paddle.distributed.auto_parallel.cost.comp_op_cost import LookupTableV2GradOpCost +from paddle.distributed.auto_parallel.cost.comp_op_cost import ( + LookupTableV2OpCost, +) +from paddle.distributed.auto_parallel.cost.comp_op_cost import ( + LookupTableV2GradOpCost, +) from paddle.distributed.auto_parallel.cost.comp_op_cost import MatmulOpCost from paddle.distributed.auto_parallel.cost.comp_op_cost import MatmulV2OpCost -from paddle.distributed.auto_parallel.cost.comp_op_cost import MatmulV2GradOpCost +from paddle.distributed.auto_parallel.cost.comp_op_cost import ( + MatmulV2GradOpCost, +) from paddle.distributed.auto_parallel.cost.comp_op_cost import MemcpyOpCost from paddle.distributed.auto_parallel.cost.comp_op_cost import MulOpCost from paddle.distributed.auto_parallel.cost.comp_op_cost import MulGradOpCost from paddle.distributed.auto_parallel.cost.comp_op_cost import OneHotOpCost -from paddle.distributed.auto_parallel.cost.comp_op_cost import ReadFromArrayOpCost +from paddle.distributed.auto_parallel.cost.comp_op_cost import ( + ReadFromArrayOpCost, +) from paddle.distributed.auto_parallel.cost.comp_op_cost import ReduceSumOpCost -from paddle.distributed.auto_parallel.cost.comp_op_cost import ReduceSumGradOpCost +from paddle.distributed.auto_parallel.cost.comp_op_cost import ( + ReduceSumGradOpCost, +) from paddle.distributed.auto_parallel.cost.comp_op_cost import Reshape2OpCost -from paddle.distributed.auto_parallel.cost.comp_op_cost import Reshape2GradOpCost +from paddle.distributed.auto_parallel.cost.comp_op_cost import ( + Reshape2GradOpCost, +) from paddle.distributed.auto_parallel.cost.comp_op_cost import ReduceMeanOpCost -from paddle.distributed.auto_parallel.cost.comp_op_cost import ReduceMeanGradOpCost +from paddle.distributed.auto_parallel.cost.comp_op_cost import ( + ReduceMeanGradOpCost, +) from paddle.distributed.auto_parallel.cost.comp_op_cost import SamplingIdOpCost from paddle.distributed.auto_parallel.cost.comp_op_cost import ScaleOpCost from paddle.distributed.auto_parallel.cost.comp_op_cost import SliceOpCost from paddle.distributed.auto_parallel.cost.comp_op_cost import SoftmaxOpCost from paddle.distributed.auto_parallel.cost.comp_op_cost import SoftmaxGradOpCost -from paddle.distributed.auto_parallel.cost.comp_op_cost import SoftmaxWithCrossEntropyOpCost -from paddle.distributed.auto_parallel.cost.comp_op_cost import SoftmaxWithCrossEntropyGradOpCost +from paddle.distributed.auto_parallel.cost.comp_op_cost import ( + SoftmaxWithCrossEntropyOpCost, +) +from paddle.distributed.auto_parallel.cost.comp_op_cost import ( + SoftmaxWithCrossEntropyGradOpCost, +) from paddle.distributed.auto_parallel.cost.comp_op_cost import SplitOpCost from paddle.distributed.auto_parallel.cost.comp_op_cost import Squeeze2OpCost from paddle.distributed.auto_parallel.cost.comp_op_cost import SquareOpCost @@ -77,18 +121,25 @@ from paddle.distributed.auto_parallel.cost.comp_op_cost import SquareGradOpCost from paddle.distributed.auto_parallel.cost.comp_op_cost import SumOpCost from paddle.distributed.auto_parallel.cost.comp_op_cost import TopKOpCost from paddle.distributed.auto_parallel.cost.comp_op_cost import Transpose2OpCost -from paddle.distributed.auto_parallel.cost.comp_op_cost import Transpose2GradOpCost +from paddle.distributed.auto_parallel.cost.comp_op_cost import ( + Transpose2GradOpCost, +) from paddle.distributed.auto_parallel.cost.comp_op_cost import Unsqueeze2OpCost -from paddle.distributed.auto_parallel.cost.comp_op_cost import WriteToArrayOpCost +from paddle.distributed.auto_parallel.cost.comp_op_cost import ( + WriteToArrayOpCost, +) from paddle.distributed.auto_parallel.cost.comp_op_cost import DropoutGradOpCost -from paddle.distributed.auto_parallel.cost.comp_op_cost import FusedSoftmaxMaskUpperTriangleOpCost -from paddle.distributed.auto_parallel.cost.comp_op_cost import FusedSoftmaxMaskUpperTriangleGradOpCost +from paddle.distributed.auto_parallel.cost.comp_op_cost import ( + FusedSoftmaxMaskUpperTriangleOpCost, +) +from paddle.distributed.auto_parallel.cost.comp_op_cost import ( + FusedSoftmaxMaskUpperTriangleGradOpCost, +) from test_cluster import cluster_json class TestCompOpCost(unittest.TestCase): - def test_comp_cost(self): # Build cluster file_dir = os.path.dirname(os.path.abspath(__file__)) diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_conditional_block_reshard.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_conditional_block_reshard.py index 86371cbae64366c65dc549976cec0c22554661b0..e7ecdece944e1bd1bdee4105efe954aa4c2adfcb 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_conditional_block_reshard.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_conditional_block_reshard.py @@ -23,36 +23,44 @@ from paddle.distributed.fleet import auto class MLPLayer(nn.Layer): - - def __init__(self, - hidden_size=64, - intermediate_size=4 * 64, - initializer_range=0.02): + def __init__( + self, hidden_size=64, intermediate_size=4 * 64, initializer_range=0.02 + ): super(MLPLayer, self).__init__() self.norm = nn.LayerNorm(hidden_size, epsilon=1e-5) self.linear0 = nn.Linear( hidden_size, intermediate_size, - paddle.ParamAttr(initializer=nn.initializer.Normal( - mean=0.0, std=initializer_range)), - bias_attr=None) + paddle.ParamAttr( + initializer=nn.initializer.Normal( + mean=0.0, std=initializer_range + ) + ), + bias_attr=None, + ) self.linear1 = nn.Linear( intermediate_size, hidden_size, - paddle.ParamAttr(initializer=nn.initializer.Normal( - mean=0.0, std=initializer_range)), - bias_attr=None) + paddle.ParamAttr( + initializer=nn.initializer.Normal( + mean=0.0, std=initializer_range + ) + ), + bias_attr=None, + ) def forward(self, input): out = self.norm(input) - auto.shard_tensor(self.linear0.weight, auto.ProcessMesh([0, 1], "x"), - [None, "x"]) + auto.shard_tensor( + self.linear0.weight, auto.ProcessMesh([0, 1], "x"), [None, "x"] + ) out = self.linear0(out) out = F.gelu(out, approximate=True) - auto.shard_tensor(self.linear1.weight, auto.ProcessMesh([0, 1], "x"), - ["x", None]) + auto.shard_tensor( + self.linear1.weight, auto.ProcessMesh([0, 1], "x"), ["x", None] + ) out = self.linear1(out) if paddle.mean(out) < 2: @@ -75,7 +83,6 @@ def loss_fn(predict, label): class TestSubblock(unittest.TestCase): - def test_subblock(self): mlp = MLPLayer() @@ -87,9 +94,9 @@ class TestSubblock(unittest.TestCase): input_sepc = InputSpec([4, 64], 'float32', 'input') label_spec = InputSpec([4, 1], 'float32', 'label') - engine.prepare(inputs_spec=[input_sepc], - labels_spec=[label_spec], - mode="predict") + engine.prepare( + inputs_spec=[input_sepc], labels_spec=[label_spec], mode="predict" + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_converter.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_converter.py index 7c838513d798716e83dcd8b316170b180a6d77f9..4b8b964ed43364c43edb93e4fe6f5ef838cea68a 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_converter.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_converter.py @@ -21,7 +21,6 @@ from paddle.distributed.auto_parallel.converter import Converter class TestConverter(unittest.TestCase): - def test_converter(self): file_dir = os.path.dirname(os.path.abspath(__file__)) launch_model_path = os.path.join(file_dir, "converter.py") @@ -32,10 +31,19 @@ class TestConverter(unittest.TestCase): coverage_args = [] tmp_dir = tempfile.TemporaryDirectory() - cmd = [sys.executable, "-u"] + coverage_args + [ - "-m", "paddle.distributed.launch", "--devices", "0,1", "--log_dir", - tmp_dir.name, launch_model_path - ] + cmd = ( + [sys.executable, "-u"] + + coverage_args + + [ + "-m", + "paddle.distributed.launch", + "--devices", + "0,1", + "--log_dir", + tmp_dir.name, + launch_model_path, + ] + ) process = subprocess.Popen(cmd) process.wait() @@ -57,7 +65,7 @@ class TestConverter(unittest.TestCase): 'tmp_0': { "process_shape": [1], "process_group": [0], - "dims_mapping": [-1] + "dims_mapping": [-1], } } with self.assertRaises(TypeError): diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_assign.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_assign.py index b21dd606d8cb7a41eaf18d829118b3c700170e27..39d146c2e7f888f09c799d04da07adeeb76a7dae 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_assign.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_assign.py @@ -26,8 +26,9 @@ def make_program(): x = paddle.static.data(name='x', shape=[4, 4, 8], dtype='float32') y = paddle.static.data(name='y', shape=[4, 4, 8], dtype='float32') - auto.shard_tensor(x, auto.ProcessMesh([0, 1], dim_names=["d"]), - [None, "d", None]) + auto.shard_tensor( + x, auto.ProcessMesh([0, 1], dim_names=["d"]), [None, "d", None] + ) z = paddle.add(x, y) paddle.assign(x, output=z) @@ -48,14 +49,14 @@ def parallelizer(program_func, rank): dist_context.block_state.parse_forward_blocks(main_program) partitioner = Partitioner(dist_context, rank) - dist_main_prog, _, _ = partitioner.partition(main_program, start_program, - []) + dist_main_prog, _, _ = partitioner.partition( + main_program, start_program, [] + ) return dist_main_prog, dist_context class TestDistAssign(unittest.TestCase): - def test_dist_assign(self): dist_main_prog, dist_context = parallelizer(make_program, 0) @@ -72,9 +73,11 @@ class TestDistAssign(unittest.TestCase): dist_out = dist_context.get_dist_tensor_for_program(out_var) x_dims_mapping = dist_op.dist_attr.get_input_dims_mapping( - x_name) + x_name + ) out_dims_mapping = dist_op.dist_attr.get_output_dims_mapping( - out_name) + out_name + ) assert x_dims_mapping == out_dims_mapping assert out_dims_mapping == dist_out.dist_attr.dims_mapping diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_attr_v2.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_attr_v2.py index 9977fb4654aa8719fd07f359307db3cb4fa708ba..acff242fd8c34e414ca5af040fd213d3c8dc405f 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_attr_v2.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_attr_v2.py @@ -24,7 +24,6 @@ paddle.enable_static() class TestDistAttr(unittest.TestCase): - def test_tensor_dist_attr_ctor(self): train_program = static.Program() start_program = static.Program() @@ -40,8 +39,9 @@ class TestDistAttr(unittest.TestCase): dist_attr.dims_mapping = [0, -1] dist_attr.batch_dim = 1 dist_attr.dynamic_dims = [1, 1] - self.assertEqual(dist_attr.process_mesh, - ProcessMesh([[0, 1, 2], [3, 4, 5]])) + self.assertEqual( + dist_attr.process_mesh, ProcessMesh([[0, 1, 2], [3, 4, 5]]) + ) self.assertEqual(dist_attr.dims_mapping, [0, -1]) self.assertEqual(dist_attr.batch_dim, 1) self.assertEqual(dist_attr.dynamic_dims, [1, 1]) @@ -59,16 +59,18 @@ class TestDistAttr(unittest.TestCase): dist_attr.dims_mapping = [0, -1] dist_attr.batch_dim = 1 dist_attr.dynamic_dims = [1, 1] - self.assertEqual(input.dist_attr.process_mesh, - ProcessMesh([[0, 1, 2], [3, 4, 5]])) + self.assertEqual( + input.dist_attr.process_mesh, ProcessMesh([[0, 1, 2], [3, 4, 5]]) + ) self.assertEqual(input.dist_attr.dims_mapping, [0, -1]) self.assertEqual(input.dist_attr.batch_dim, 1) self.assertEqual(input.dist_attr.dynamic_dims, [1, 1]) self.assertTrue(input.dist_attr.verify()) input1.dist_attr = dist_attr - self.assertEqual(input1.dist_attr.process_mesh, - ProcessMesh([[0, 1, 2], [3, 4, 5]])) + self.assertEqual( + input1.dist_attr.process_mesh, ProcessMesh([[0, 1, 2], [3, 4, 5]]) + ) self.assertEqual(input1.dist_attr.dims_mapping, [0, -1]) self.assertEqual(input1.dist_attr.batch_dim, 1) self.assertEqual(input1.dist_attr.dynamic_dims, [1, 1]) @@ -100,19 +102,24 @@ class TestDistAttr(unittest.TestCase): op_dist_attr.set_output_dist_attr(output.name, output_dist_attr) self.assertEqual(op_dist_attr.process_mesh, process_mesh) self.assertEqual( - op_dist_attr.input_dist_attr(input.name).process_mesh, process_mesh) + op_dist_attr.input_dist_attr(input.name).process_mesh, process_mesh + ) self.assertEqual( - op_dist_attr.input_dist_attr(input1.name).process_mesh, - process_mesh) + op_dist_attr.input_dist_attr(input1.name).process_mesh, process_mesh + ) self.assertEqual( op_dist_attr.output_dist_attr(output.name).process_mesh, - process_mesh) + process_mesh, + ) self.assertEqual( - op_dist_attr.input_dist_attr(input.name).dims_mapping, [0, -1]) + op_dist_attr.input_dist_attr(input.name).dims_mapping, [0, -1] + ) self.assertEqual( - op_dist_attr.input_dist_attr(input1.name).dims_mapping, [-1, 1]) + op_dist_attr.input_dist_attr(input1.name).dims_mapping, [-1, 1] + ) self.assertEqual( - op_dist_attr.output_dist_attr(output.name).dims_mapping, [0, 1]) + op_dist_attr.output_dist_attr(output.name).dims_mapping, [0, 1] + ) self.assertTrue(op_dist_attr.verify()) self.assertTrue(str(op_dist_attr), str(op_dist_attr)) @@ -164,19 +171,23 @@ class TestDistAttr(unittest.TestCase): self.assertEqual(op.desc.dist_attr.process_mesh, process_mesh) self.assertEqual( - op.dist_attr.input_dist_attr(input.name).process_mesh, process_mesh) + op.dist_attr.input_dist_attr(input.name).process_mesh, process_mesh + ) self.assertEqual( - op.dist_attr.input_dist_attr(input1.name).process_mesh, - process_mesh) + op.dist_attr.input_dist_attr(input1.name).process_mesh, process_mesh + ) self.assertEqual( - op.dist_attr.input_dist_attr(input.name).dims_mapping, [0, -1]) + op.dist_attr.input_dist_attr(input.name).dims_mapping, [0, -1] + ) self.assertEqual( - op.dist_attr.input_dist_attr(input.name).dims_mapping, [0, -1]) + op.dist_attr.input_dist_attr(input.name).dims_mapping, [0, -1] + ) self.assertEqual( - op.desc.dist_attr.input_dist_attr(input1.name).dims_mapping, - [-1, 1]) + op.desc.dist_attr.input_dist_attr(input1.name).dims_mapping, [-1, 1] + ) self.assertEqual( - op.dist_attr.output_dist_attr(output.name).dims_mapping, [0, 1]) + op.dist_attr.output_dist_attr(output.name).dims_mapping, [0, 1] + ) self.assertTrue(op.desc.dist_attr.verify()) self.assertTrue(str(op_dist_attr), str(op_dist_attr)) diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_context.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_context.py index ea24f65479561e9274771d1f25f16eae5d754a2d..c872f9e9d0c4ff7f20bb4c80753efc2fb137955a 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_context.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_context.py @@ -32,7 +32,7 @@ hidden_size = 1024 sequence_len = 512 _g_process_mesh = [ auto.ProcessMesh([0, 1], dim_names=["x"]), - auto.ProcessMesh([2, 3], dim_names=["x"]) + auto.ProcessMesh([2, 3], dim_names=["x"]), ] @@ -43,41 +43,45 @@ def get_random_inputs_and_labels(input_shape, label_shape): def batch_generator_creator(): - def __reader__(): for _ in range(batch_size): batch_input, batch_label = get_random_inputs_and_labels( [batch_size, sequence_len, hidden_size], - [batch_size, sequence_len, 1]) + [batch_size, sequence_len, 1], + ) yield batch_input, batch_label return __reader__ class MLPLayer(nn.Layer): - - def __init__(self, - hidden_size=1024, - intermediate_size=4 * 1024, - dropout_ratio=0.1, - initializer_range=0.02): + def __init__( + self, + hidden_size=1024, + intermediate_size=4 * 1024, + dropout_ratio=0.1, + initializer_range=0.02, + ): super(MLPLayer, self).__init__() d_model = hidden_size dim_feedforward = intermediate_size - param_initializer = nn.initializer.Normal(mean=0.0, - std=initializer_range) + param_initializer = nn.initializer.Normal( + mean=0.0, std=initializer_range + ) self.norm = nn.LayerNorm(d_model, epsilon=1e-5) self.linear0 = nn.Linear( d_model, dim_feedforward, weight_attr=paddle.ParamAttr(initializer=param_initializer), - bias_attr=None) + bias_attr=None, + ) self.linear1 = nn.Linear( dim_feedforward, d_model, weight_attr=paddle.ParamAttr(initializer=param_initializer), - bias_attr=None) + bias_attr=None, + ) def forward(self, input): out = self.norm(input) @@ -99,78 +103,106 @@ def get_program(): start_program = static.Program() with static.program_guard(train_program, start_program): # input - input = static.data(name="input", - shape=[batch_size, sequence_len, hidden_size], - dtype='float32') - label = static.data(name="label", - shape=[batch_size, sequence_len, 1], - dtype='float32') + input = static.data( + name="input", + shape=[batch_size, sequence_len, hidden_size], + dtype='float32', + ) + label = static.data( + name="label", shape=[batch_size, sequence_len, 1], dtype='float32' + ) data_holder = [input, label] # dataloader - dataloader = paddle.io.DataLoader.from_generator(feed_list=data_holder, - capacity=4 * - batch_size, - iterable=False) - dataloader.set_batch_generator(batch_generator_creator(), - places=paddle.static.cuda_places()) + dataloader = paddle.io.DataLoader.from_generator( + feed_list=data_holder, capacity=4 * batch_size, iterable=False + ) + dataloader.set_batch_generator( + batch_generator_creator(), places=paddle.static.cuda_places() + ) # data dist_attr auto.shard_tensor(input, _g_process_mesh[0], ["x", None, None]) auto.shard_tensor(label, _g_process_mesh[0], ["x", None, None]) - mlp_start = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - dropout_ratio=0.1, - initializer_range=0.02) + mlp_start = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + dropout_ratio=0.1, + initializer_range=0.02, + ) pred = mlp_start(input) - mlp_mid = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - dropout_ratio=0.1, - initializer_range=0.02) + mlp_mid = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + dropout_ratio=0.1, + initializer_range=0.02, + ) pred = mlp_mid(pred) - mlp_end = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - dropout_ratio=0.1, - initializer_range=0.02) + mlp_end = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + dropout_ratio=0.1, + initializer_range=0.02, + ) pred = mlp_end(pred) error_cost = paddle.nn.functional.square_error_cost(pred, label) loss = paddle.mean(error_cost) - optimizer = paddle.optimizer.Adam(learning_rate=0.00001, - beta1=0.9, - beta2=0.999, - epsilon=1e-08, - grad_clip=None) + optimizer = paddle.optimizer.Adam( + learning_rate=0.00001, + beta1=0.9, + beta2=0.999, + epsilon=1e-08, + grad_clip=None, + ) feed_vars = {"inputs": [input], "labels": [label]} fetch_vars = {"loss": [loss]} - return train_program, start_program, dataloader, loss, optimizer, feed_vars, fetch_vars + return ( + train_program, + start_program, + dataloader, + loss, + optimizer, + feed_vars, + fetch_vars, + ) class TestDistributedContext(unittest.TestCase): - def test_backup_restore(self): - train_program, start_program, dataloader, loss, optimizer, feed_vars, fetch_vars = get_program( + ( + train_program, + start_program, + dataloader, + loss, + optimizer, + feed_vars, + fetch_vars, + ) = get_program() + dist_context = DistributedContext( + train_program, start_program, optimizer, loss, feed_vars, fetch_vars ) - dist_context = DistributedContext(train_program, start_program, - optimizer, loss, feed_vars, - fetch_vars) dist_context.initialize() dist_context._backup(serial=True, dist=True) - dist_context._restore(serial=True, - serial_mode="to_backup", - dist=True, - dist_mode="to_backup") + dist_context._restore( + serial=True, + serial_mode="to_backup", + dist=True, + dist_mode="to_backup", + ) dist_context._backup(serial=True, dist=True) - dist_context._restore(serial=True, - serial_mode="to_original", - dist=True, - dist_mode="to_original") + dist_context._restore( + serial=True, + serial_mode="to_original", + dist=True, + dist_mode="to_original", + ) dist_context._backup(serial=True, dist=True) dist_context._restore(serial=True, dist=True, dist_mode="to_default") @@ -179,25 +211,44 @@ class TestDistributedContext(unittest.TestCase): dist_context._restore(serial=True, dist=True, dist_mode="to_nothing") def test_deepcopy(self): - train_program, start_program, dataloader, loss, optimizer, feed_vars, fetch_vars = get_program( + ( + train_program, + start_program, + dataloader, + loss, + optimizer, + feed_vars, + fetch_vars, + ) = get_program() + dist_context = DistributedContext( + train_program, start_program, optimizer, loss, feed_vars, fetch_vars ) - dist_context = DistributedContext(train_program, start_program, - optimizer, loss, feed_vars, - fetch_vars) dist_context.initialize() copy_dist_context = copy.deepcopy(dist_context) copy_list = [ - "_original_serial_main_program", "_original_serial_startup_program", \ - "_serial_main_program", "_serial_startup_program", "_serial_graph", \ - "_dist_main_programs", "_dist_startup_programs", \ - "_serial_ordered_nodes", "_serial_ordered_tensor_nodes", \ - "_serial_ordered_op_nodes", "_original_serial_loss", \ - "_original_serial_feed_vars", "_original_serial_fetch_vars", \ - "_serial_loss", "_serial_feed_vars", "_serial_fetch_vars", "_serial_optimizer", \ - "_backup_serial_main_program_stack", "_backup_serial_startup_program_stack", \ - "_pass_context"] + "_original_serial_main_program", + "_original_serial_startup_program", + "_serial_main_program", + "_serial_startup_program", + "_serial_graph", + "_dist_main_programs", + "_dist_startup_programs", + "_serial_ordered_nodes", + "_serial_ordered_tensor_nodes", + "_serial_ordered_op_nodes", + "_original_serial_loss", + "_original_serial_feed_vars", + "_original_serial_fetch_vars", + "_serial_loss", + "_serial_feed_vars", + "_serial_fetch_vars", + "_serial_optimizer", + "_backup_serial_main_program_stack", + "_backup_serial_startup_program_stack", + "_pass_context", + ] for i in range(len(copy_list)): copy_obj = "copy_dist_context." + copy_list[i] diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_embedding.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_embedding.py index 034a2cf1892355530c0fa7af5f3f8d872e6957a0..c302cb7e8179d558f9af46bdbcfa73cb8fbf61b4 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_embedding.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_embedding.py @@ -27,36 +27,41 @@ def make_program_lookup_table_v1_mp_dp(): block = main_program.global_block() with paddle.static.program_guard(main_program, start_program): - src_ids = paddle.static.data(name='src_ids', - shape=[12, 512, 1], - dtype='int64') + src_ids = paddle.static.data( + name='src_ids', shape=[12, 512, 1], dtype='int64' + ) src_ids.stop_gradient = True emb_out = paddle.fluid.layers.embedding( input=src_ids, size=[64, 128], param_attr=paddle.fluid.ParamAttr(name="emb_weight"), dtype="float32", - is_sparse=False) + is_sparse=False, + ) loss = paddle.fluid.layers.reduce_mean(emb_out) auto.shard_tensor( - src_ids, auto.ProcessMesh([[0, 1], [2, 3]], dim_names=["x", "y"]), - ["x", None, None]) + src_ids, + auto.ProcessMesh([[0, 1], [2, 3]], dim_names=["x", "y"]), + ["x", None, None], + ) emb_weight = block.vars["emb_weight"] auto.shard_tensor( - emb_weight, auto.ProcessMesh([[0, 1], [2, 3]], - dim_names=["x", "y"]), ["y", None]) + emb_weight, + auto.ProcessMesh([[0, 1], [2, 3]], dim_names=["x", "y"]), + ["y", None], + ) return main_program, start_program, loss class TestDistPNorm(unittest.TestCase): - def test_lookup_table_v1_mp_dp(self): for rank in range(4): dist_main_prog, dist_context = parallelizer( - make_program_lookup_table_v1_mp_dp, rank) + make_program_lookup_table_v1_mp_dp, rank + ) ops = dist_main_prog.global_block().ops op_types = [] @@ -64,9 +69,16 @@ class TestDistPNorm(unittest.TestCase): op_types.append(op.type) assert op_types == [ - 'reshape2', 'c_embedding', 'c_allreduce_sum', 'reduce_mean', - 'fill_constant', 'reduce_mean_grad', 'c_identity', - 'c_embedding_grad', 'c_allreduce_sum', 'scale' + 'reshape2', + 'c_embedding', + 'c_allreduce_sum', + 'reduce_mean', + 'fill_constant', + 'reduce_mean_grad', + 'c_identity', + 'c_embedding_grad', + 'c_allreduce_sum', + 'scale', ] diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_matmul.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_matmul.py index 6ca0619800e93dc8dbfbf6cdbf5b73a7feb1f174..c53113ae76cf8e4742227ebaf01d8811f182fed4 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_matmul.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_matmul.py @@ -84,10 +84,9 @@ def matmul_dp2mp2(init_x, init_y, trans_x, trans_y): y = init_y(trans_y) x.stop_gradient = False y.stop_gradient = False - out = paddle.fluid.layers.matmul(x, - y, - transpose_x=trans_x, - transpose_y=trans_y) + out = paddle.fluid.layers.matmul( + x, y, transpose_x=trans_x, transpose_y=trans_y + ) loss = paddle.mean(out) return main_program, start_program, loss @@ -123,19 +122,23 @@ def parallelizer(program_func, *args, **kwargs): dist_context.block_state.parse_backward_blocks(main_program) partitioner = Partitioner(dist_context, 0) - dist_main_prog, _, _ = partitioner.partition(main_program, start_program, - []) + dist_main_prog, _, _ = partitioner.partition( + main_program, start_program, [] + ) return dist_main_prog, dist_context class TestDistMatmul(unittest.TestCase): - def check_col_program(self, main_program, dist_ctx): # [0, -1] * [-1, 1] --> [0, 1] ref_ops = [ - "c_identity", "matmul", "reduce_mean", "fill_constant", - "reduce_mean_grad", "matmul_grad" + "c_identity", + "matmul", + "reduce_mean", + "fill_constant", + "reduce_mean_grad", + "matmul_grad", ] ops = [] block = main_program.global_block() @@ -148,10 +151,12 @@ class TestDistMatmul(unittest.TestCase): assert op_dist_attr.impl_idx == 0 assert op_dist_attr.impl_type == "matmul" out_dims_mapping = op_dist_attr.get_output_dims_mapping( - out_name) + out_name + ) assert out_dims_mapping == [0, 1] tensor_dist_attr = dist_ctx.get_tensor_dist_attr_for_program( - out_var) + out_var + ) assert tensor_dist_attr.dims_mapping == [0, 1] if op.type == "matmul_grad": op_dist_attr = dist_ctx.get_op_dist_attr_for_program(op) @@ -163,8 +168,12 @@ class TestDistMatmul(unittest.TestCase): def check_row_program(self, main_program, dist_ctx): # [0, -1, 1] * [1, -1] --> [0, -1, -1] ref_ops = [ - "matmul", "c_allreduce_sum", "reduce_mean", "fill_constant", - "reduce_mean_grad", "matmul_grad" + "matmul", + "c_allreduce_sum", + "reduce_mean", + "fill_constant", + "reduce_mean_grad", + "matmul_grad", ] ops = [] block = main_program.global_block() @@ -177,10 +186,12 @@ class TestDistMatmul(unittest.TestCase): assert op_dist_attr.impl_idx == 1 assert op_dist_attr.impl_type == "matmul" out_dims_mapping = op_dist_attr.get_output_dims_mapping( - out_name) + out_name + ) assert out_dims_mapping == [0, -1, -1] tensor_dist_attr = dist_ctx.get_tensor_dist_attr_for_program( - out_var) + out_var + ) assert tensor_dist_attr.dims_mapping == [0, -1, -1] if op.type == "matmul_grad": op_dist_attr = dist_ctx.get_op_dist_attr_for_program(op) @@ -190,10 +201,10 @@ class TestDistMatmul(unittest.TestCase): class TestDistMatmulCol(TestDistMatmul): - def init(self, trans_x, trans_y): - dist_main_prog, dist_ctx = parallelizer(matmul_dp2mp2, init_x_col, - init_y_col, trans_x, trans_y) + dist_main_prog, dist_ctx = parallelizer( + matmul_dp2mp2, init_x_col, init_y_col, trans_x, trans_y + ) return dist_main_prog, dist_ctx def test_matmul_col(self): @@ -214,10 +225,10 @@ class TestDistMatmulCol(TestDistMatmul): class TestDistMatmulRow(TestDistMatmul): - def init(self, trans_x, trans_y): - dist_main_prog, dist_ctx = parallelizer(matmul_dp2mp2, init_x_row, - init_y_row, trans_x, trans_y) + dist_main_prog, dist_ctx = parallelizer( + matmul_dp2mp2, init_x_row, init_y_row, trans_x, trans_y + ) return dist_main_prog, dist_ctx def test_matmul_row(self): @@ -238,12 +249,15 @@ class TestDistMatmulRow(TestDistMatmul): class TestDistMatmulV2(unittest.TestCase): - def check_col_program(self, main_program, dist_ctx): # [0, -1] * [-1, 1] --> [0, 1] ref_ops = [ - "c_identity", "matmul_v2", "reduce_mean", "fill_constant", - "reduce_mean_grad", "matmul_v2_grad" + "c_identity", + "matmul_v2", + "reduce_mean", + "fill_constant", + "reduce_mean_grad", + "matmul_v2_grad", ] ops = [] block = main_program.global_block() @@ -256,10 +270,12 @@ class TestDistMatmulV2(unittest.TestCase): assert op_dist_attr.impl_idx == 0 assert op_dist_attr.impl_type == "matmul_v2" out_dims_mapping = op_dist_attr.get_output_dims_mapping( - out_name) + out_name + ) assert out_dims_mapping == [0, 1] tensor_dist_attr = dist_ctx.get_tensor_dist_attr_for_program( - out_var) + out_var + ) assert tensor_dist_attr.dims_mapping == [0, 1] if op.type == "matmul_v2_grad": op_dist_attr = dist_ctx.get_op_dist_attr_for_program(op) @@ -271,8 +287,12 @@ class TestDistMatmulV2(unittest.TestCase): def check_row_program(self, main_program, dist_ctx): # [0, -1, 1] * [1, -1] --> [0, -1, -1] ref_ops = [ - "matmul_v2", "c_allreduce_sum", "reduce_mean", "fill_constant", - "reduce_mean_grad", "matmul_v2_grad" + "matmul_v2", + "c_allreduce_sum", + "reduce_mean", + "fill_constant", + "reduce_mean_grad", + "matmul_v2_grad", ] ops = [] block = main_program.global_block() @@ -285,10 +305,12 @@ class TestDistMatmulV2(unittest.TestCase): assert op_dist_attr.impl_idx == 1 assert op_dist_attr.impl_type == "matmul_v2" out_dims_mapping = op_dist_attr.get_output_dims_mapping( - out_name) + out_name + ) assert out_dims_mapping == [0, -1, -1] tensor_dist_attr = dist_ctx.get_tensor_dist_attr_for_program( - out_var) + out_var + ) assert tensor_dist_attr.dims_mapping == [0, -1, -1] if op.type == "matmul_v2_grad": op_dist_attr = dist_ctx.get_op_dist_attr_for_program(op) @@ -298,10 +320,10 @@ class TestDistMatmulV2(unittest.TestCase): class TestDistMatmulV2Col(TestDistMatmulV2): - def init(self, trans_x, trans_y): - dist_main_prog, dist_ctx = parallelizer(matmulv2_dp2mp2, init_x_col, - init_y_col, trans_x, trans_y) + dist_main_prog, dist_ctx = parallelizer( + matmulv2_dp2mp2, init_x_col, init_y_col, trans_x, trans_y + ) return dist_main_prog, dist_ctx def test_matmul_col(self): @@ -322,10 +344,10 @@ class TestDistMatmulV2Col(TestDistMatmulV2): class TestDistMatmulV2Row(TestDistMatmulV2): - def init(self, trans_x, trans_y): - dist_main_prog, dist_ctx = parallelizer(matmulv2_dp2mp2, init_x_row, - init_y_row, trans_x, trans_y) + dist_main_prog, dist_ctx = parallelizer( + matmulv2_dp2mp2, init_x_row, init_y_row, trans_x, trans_y + ) return dist_main_prog, dist_ctx def test_matmul_row(self): diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_op_cost.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_op_cost.py index d7bdbfd7774737493d62e2518db4a25cbc98563c..517debaa58842cc33cc5f99ace8fc28cbaef31e2 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_op_cost.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_op_cost.py @@ -18,7 +18,10 @@ import copy import paddle from paddle.distributed.fleet import auto from paddle.distributed.auto_parallel.cluster import Cluster -from paddle.distributed.auto_parallel.operators.common import get_distributed_operator_impl_container, is_elementwise_op +from paddle.distributed.auto_parallel.operators.common import ( + get_distributed_operator_impl_container, + is_elementwise_op, +) from paddle.fluid import program_guard from paddle.fluid.backward import append_backward @@ -42,7 +45,8 @@ def parallelizer(program_func, rank): # generate backward and complete backward with paddle.static.program_guard(main_program, startup_program): params_grads = append_backward( - loss, None, None, None, distop_context=dist_context.dist_op_context) + loss, None, None, None, distop_context=dist_context.dist_op_context + ) completer.complete_backward_annotation(main_program) dist_context.block_state.parse_backward_blocks(main_program) @@ -57,23 +61,23 @@ def parallelizer(program_func, rank): class TestDistOpCost(unittest.TestCase): - def test_dist_op_cost_part1(self): - def make_program(): main_program = paddle.static.Program() start_program = paddle.static.Program() with paddle.static.program_guard(main_program, start_program): x = paddle.static.data(name='x', shape=[4, 8], dtype='float32') x.stop_gradient = True - label = paddle.static.data(name="label", - shape=[4, 1], - dtype='float32') + label = paddle.static.data( + name="label", shape=[4, 1], dtype='float32' + ) label.stop_gradient = True - auto.shard_tensor(x, auto.ProcessMesh([0, 1], dim_names=["x"]), - ["x", None]) + auto.shard_tensor( + x, auto.ProcessMesh([0, 1], dim_names=["x"]), ["x", None] + ) tmp = paddle.fluid.layers.fill_constant_batch_size_like( - input=x, shape=[2, 8], value=1, dtype='float32') + input=x, shape=[2, 8], value=1, dtype='float32' + ) weight_attr = paddle.ParamAttr() linear = paddle.nn.Linear(8, 1, weight_attr=weight_attr) linear_out = linear(x) @@ -89,43 +93,53 @@ class TestDistOpCost(unittest.TestCase): cluster = Cluster() cluster.gen_default_config_cluster(device_count=2) for idx, op in enumerate(ops): - if op.type != "matmul_v2" and op.type != "matmul_v2_grad" and op.type != "sgd": + if ( + op.type != "matmul_v2" + and op.type != "matmul_v2_grad" + and op.type != "sgd" + ): dist_op = dist_context.get_dist_op_for_program(op) op_dist_attr = dist_op.dist_attr processes = op_dist_attr.process_mesh.processes if is_elementwise_op(op.type): container = get_distributed_operator_impl_container( - "elementwise") + "elementwise" + ) else: container = get_distributed_operator_impl_container( - op_dist_attr.impl_type) + op_dist_attr.impl_type + ) dist_impl = container.impls[op_dist_attr.impl_idx] - dist_op_cost = dist_impl.calc_cost(op.attr('op_role'), dist_op, - dist_context, cluster) + dist_op_cost = dist_impl.calc_cost( + op.attr('op_role'), dist_op, dist_context, cluster + ) self.assertTrue(dist_op_cost) def test_dist_op_cost_part2(self): - def make_program(): main_program = paddle.static.Program() start_program = paddle.static.Program() with paddle.static.program_guard(main_program, start_program): x = paddle.static.data(name='x', shape=[4], dtype='float32') x.stop_gradient = True - label = paddle.static.data(name="label", - shape=[8, 1], - dtype='float32') + label = paddle.static.data( + name="label", shape=[8, 1], dtype='float32' + ) label.stop_gradient = True - auto.shard_tensor(x, auto.ProcessMesh([0, 1], dim_names=["x"]), - ["x"]) - - auto.shard_tensor(label, - auto.ProcessMesh([0, 1], dim_names=["x"]), - ["x", None]) + auto.shard_tensor( + x, auto.ProcessMesh([0, 1], dim_names=["x"]), ["x"] + ) + + auto.shard_tensor( + label, + auto.ProcessMesh([0, 1], dim_names=["x"]), + ["x", None], + ) # embedding tmp = paddle.fluid.layers.fill_constant_batch_size_like( - input=x, shape=[4], value=1, dtype='int32') + input=x, shape=[4], value=1, dtype='int32' + ) embedding = paddle.nn.Embedding(10, 8) out = embedding(tmp) # row parallel embedding @@ -133,47 +147,64 @@ class TestDistOpCost(unittest.TestCase): if op.type == "lookup_table_v2": W = main_program.global_block().vars[op.input("W")[0]] auto.shard_tensor( - W, auto.ProcessMesh([0, 1], dim_names=["x"]), - ["x", None]) - out = paddle.fluid.layers.transpose(out, - [1, 0]) # [8, 2] [-1, 0] + W, + auto.ProcessMesh([0, 1], dim_names=["x"]), + ["x", None], + ) + out = paddle.fluid.layers.transpose( + out, [1, 0] + ) # [8, 2] [-1, 0] # matmul param1 = paddle.fluid.layers.create_parameter( - [4, 8], paddle.float32) # [2, 8] [0, -1] - auto.shard_tensor(param1, - auto.ProcessMesh([0, 1], dim_names=["x"]), - ["x", None]) + [4, 8], paddle.float32 + ) # [2, 8] [0, -1] + auto.shard_tensor( + param1, + auto.ProcessMesh([0, 1], dim_names=["x"]), + ["x", None], + ) param2 = paddle.fluid.layers.create_parameter( - [8, 8], paddle.float32) # [8, 4] [-1, 0] - auto.shard_tensor(param2, - auto.ProcessMesh([0, 1], dim_names=["x"]), - [None, "x"]) - out1 = paddle.fluid.layers.matmul(out, - param1) # [8, 8] [-1, -1] + [8, 8], paddle.float32 + ) # [8, 4] [-1, 0] + auto.shard_tensor( + param2, + auto.ProcessMesh([0, 1], dim_names=["x"]), + [None, "x"], + ) + out1 = paddle.fluid.layers.matmul( + out, param1 + ) # [8, 8] [-1, -1] tmp_param = paddle.fluid.layers.create_parameter( - [8, 8], paddle.float32) # [8, 8] [-1, -1] - auto.shard_tensor(param2, - auto.ProcessMesh([0, 1], dim_names=["x"]), - [None, None]) + [8, 8], paddle.float32 + ) # [8, 8] [-1, -1] + auto.shard_tensor( + param2, + auto.ProcessMesh([0, 1], dim_names=["x"]), + [None, None], + ) tmp_out = paddle.fluid.layers.matmul(out1, tmp_param) - out2 = paddle.fluid.layers.matmul(tmp_out, - param2) # [8, 4] [-1, 0] + out2 = paddle.fluid.layers.matmul( + tmp_out, param2 + ) # [8, 4] [-1, 0] - out8 = paddle.fluid.layers.transpose(out2, - [1, 0]) # [4, 8] [0, -1] + out8 = paddle.fluid.layers.transpose( + out2, [1, 0] + ) # [4, 8] [0, -1] # reshape out9 = paddle.reshape(out8, [8, 2, 4]) # [4, 2, 4] [0, -1, -1] tmp_reshape_out = paddle.reshape(out9, [8, 4, 2]) - out10 = paddle.reshape(tmp_reshape_out, - [8, 8]) # [4, 8] [0, -1] + out10 = paddle.reshape( + tmp_reshape_out, [8, 8] + ) # [4, 8] [0, -1] # softmax softmax = paddle.nn.Softmax() out11 = softmax(out10) error_cost = paddle.nn.functional.square_error_cost( - out11, label) + out11, label + ) loss = paddle.mean(error_cost) return main_program, start_program, loss @@ -187,37 +218,43 @@ class TestDistOpCost(unittest.TestCase): processes = op_dist_attr.process_mesh.processes if is_elementwise_op(op.type): container = get_distributed_operator_impl_container( - "elementwise") + "elementwise" + ) else: container = get_distributed_operator_impl_container( - op_dist_attr.impl_type) + op_dist_attr.impl_type + ) dist_impl = container.impls[op_dist_attr.impl_idx] - dist_op_cost = dist_impl.calc_cost(op.attr('op_role'), dist_op, - dist_context, cluster) + dist_op_cost = dist_impl.calc_cost( + op.attr('op_role'), dist_op, dist_context, cluster + ) self.assertTrue(dist_op_cost) def test_dist_op_cost_part3(self): - def make_program(): main_program = paddle.static.Program() start_program = paddle.static.Program() with paddle.static.program_guard(main_program, start_program): x = paddle.static.data(name='x', shape=[4], dtype='float32') x.stop_gradient = True - label = paddle.static.data(name="label", - shape=[8, 1], - dtype='float32') + label = paddle.static.data( + name="label", shape=[8, 1], dtype='float32' + ) label.stop_gradient = True - auto.shard_tensor(x, auto.ProcessMesh([0, 1], dim_names=["x"]), - ["x"]) - - auto.shard_tensor(label, - auto.ProcessMesh([0, 1], dim_names=["x"]), - ["x", None]) + auto.shard_tensor( + x, auto.ProcessMesh([0, 1], dim_names=["x"]), ["x"] + ) + + auto.shard_tensor( + label, + auto.ProcessMesh([0, 1], dim_names=["x"]), + ["x", None], + ) # embedding tmp = paddle.fluid.layers.fill_constant_batch_size_like( - input=x, shape=[4], value=1, dtype='int32') + input=x, shape=[4], value=1, dtype='int32' + ) embedding = paddle.nn.Embedding(10, 8) out = embedding(tmp) # row parallel embedding @@ -225,46 +262,61 @@ class TestDistOpCost(unittest.TestCase): if op.type == "lookup_table_v2": W = main_program.global_block().vars[op.input("W")[0]] auto.shard_tensor( - W, auto.ProcessMesh([0, 1], dim_names=["x"]), - ["x", None]) - out = paddle.fluid.layers.transpose(out, - [1, 0]) # [8, 2] [-1, 0] + W, + auto.ProcessMesh([0, 1], dim_names=["x"]), + ["x", None], + ) + out = paddle.fluid.layers.transpose( + out, [1, 0] + ) # [8, 2] [-1, 0] # matmul_v2 param1 = paddle.fluid.layers.create_parameter( - [4, 8], paddle.float32) # [2, 8] [0, -1] - auto.shard_tensor(param1, - auto.ProcessMesh([0, 1], dim_names=["x"]), - ["x", None]) + [4, 8], paddle.float32 + ) # [2, 8] [0, -1] + auto.shard_tensor( + param1, + auto.ProcessMesh([0, 1], dim_names=["x"]), + ["x", None], + ) param2 = paddle.fluid.layers.create_parameter( - [8, 8], paddle.float32) # [8, 4] [-1, 0] - auto.shard_tensor(param2, - auto.ProcessMesh([0, 1], dim_names=["x"]), - [None, "x"]) + [8, 8], paddle.float32 + ) # [8, 4] [-1, 0] + auto.shard_tensor( + param2, + auto.ProcessMesh([0, 1], dim_names=["x"]), + [None, "x"], + ) out1 = paddle.matmul(out, param1) # [8, 8] [-1, -1] tmp_param = paddle.fluid.layers.create_parameter( - [8, 8], paddle.float32) # [8, 8] [-1, -1] - auto.shard_tensor(param2, - auto.ProcessMesh([0, 1], dim_names=["x"]), - [None, None]) + [8, 8], paddle.float32 + ) # [8, 8] [-1, -1] + auto.shard_tensor( + param2, + auto.ProcessMesh([0, 1], dim_names=["x"]), + [None, None], + ) tmp_out = paddle.matmul(out1, tmp_param) out2 = paddle.matmul(tmp_out, param2) # [8, 4] [-1, 0] - out8 = paddle.fluid.layers.transpose(out2, - [1, 0]) # [4, 8] [0, -1] + out8 = paddle.fluid.layers.transpose( + out2, [1, 0] + ) # [4, 8] [0, -1] # reshape out9 = paddle.reshape(out8, [8, 2, 4]) # [4, 2, 4] [0, -1, -1] tmp_reshape_out = paddle.reshape(out9, [8, 4, 2]) - out10 = paddle.reshape(tmp_reshape_out, - [8, 8]) # [4, 8] [0, -1] + out10 = paddle.reshape( + tmp_reshape_out, [8, 8] + ) # [4, 8] [0, -1] # softmax softmax = paddle.nn.Softmax() out11 = softmax(out10) error_cost = paddle.nn.functional.square_error_cost( - out11, label) + out11, label + ) loss = paddle.mean(error_cost) return main_program, start_program, loss @@ -278,36 +330,42 @@ class TestDistOpCost(unittest.TestCase): processes = op_dist_attr.process_mesh.processes if is_elementwise_op(op.type): container = get_distributed_operator_impl_container( - "elementwise") + "elementwise" + ) else: container = get_distributed_operator_impl_container( - op_dist_attr.impl_type) + op_dist_attr.impl_type + ) dist_impl = container.impls[op_dist_attr.impl_idx] - dist_op_cost = dist_impl.calc_cost(op.attr('op_role'), dist_op, - dist_context, cluster) + dist_op_cost = dist_impl.calc_cost( + op.attr('op_role'), dist_op, dist_context, cluster + ) self.assertTrue(dist_op_cost) def test_dist_op_cost_part4(self): - def make_program(): main_program = paddle.static.Program() start_program = paddle.static.Program() with paddle.static.program_guard(main_program, start_program): x = paddle.static.data(name='x', shape=[4], dtype='float32') x.stop_gradient = True - label = paddle.static.data(name="label", - shape=[8, 1], - dtype='float32') + label = paddle.static.data( + name="label", shape=[8, 1], dtype='float32' + ) label.stop_gradient = True - auto.shard_tensor(x, auto.ProcessMesh([0, 1], dim_names=["x"]), - ["x"]) - auto.shard_tensor(label, - auto.ProcessMesh([0, 1], dim_names=["x"]), - ["x", None]) + auto.shard_tensor( + x, auto.ProcessMesh([0, 1], dim_names=["x"]), ["x"] + ) + auto.shard_tensor( + label, + auto.ProcessMesh([0, 1], dim_names=["x"]), + ["x", None], + ) # embedding tmp = paddle.fluid.layers.fill_constant_batch_size_like( - input=x, shape=[4], value=1, dtype='int32') + input=x, shape=[4], value=1, dtype='int32' + ) embedding = paddle.nn.Embedding(10, 8) out = embedding(tmp) # row parallel embedding @@ -315,48 +373,64 @@ class TestDistOpCost(unittest.TestCase): if op.type == "lookup_table_v2": W = main_program.global_block().vars[op.input("W")[0]] auto.shard_tensor( - W, auto.ProcessMesh([0, 1], dim_names=["x"]), - ["x", None]) - out = paddle.fluid.layers.transpose(out, - [1, 0]) # [8, 2] [-1, 0] + W, + auto.ProcessMesh([0, 1], dim_names=["x"]), + ["x", None], + ) + out = paddle.fluid.layers.transpose( + out, [1, 0] + ) # [8, 2] [-1, 0] # mul param1 = paddle.fluid.layers.create_parameter( - [4, 8], paddle.float32) # [2, 8] [0, -1] - auto.shard_tensor(param1, - auto.ProcessMesh([0, 1], dim_names=["x"]), - ["x", None]) + [4, 8], paddle.float32 + ) # [2, 8] [0, -1] + auto.shard_tensor( + param1, + auto.ProcessMesh([0, 1], dim_names=["x"]), + ["x", None], + ) param2 = paddle.fluid.layers.create_parameter( - [8, 8], paddle.float32) # [8, 4] [-1, 0] - auto.shard_tensor(param2, - auto.ProcessMesh([0, 1], dim_names=["x"]), - [None, "x"]) + [8, 8], paddle.float32 + ) # [8, 4] [-1, 0] + auto.shard_tensor( + param2, + auto.ProcessMesh([0, 1], dim_names=["x"]), + [None, "x"], + ) out1 = paddle.fluid.layers.mul(out, param1) # [8, 8] [-1, -1] tmp_param = paddle.fluid.layers.create_parameter( - [8, 8], paddle.float32) # [8, 8] [-1, -1] - auto.shard_tensor(param2, - auto.ProcessMesh([0, 1], dim_names=["x"]), - [None, None]) + [8, 8], paddle.float32 + ) # [8, 8] [-1, -1] + auto.shard_tensor( + param2, + auto.ProcessMesh([0, 1], dim_names=["x"]), + [None, None], + ) tmp_out = paddle.fluid.layers.mul(out1, tmp_param) - out2 = paddle.fluid.layers.mul(tmp_out, - param2) # [8, 4] [-1, 0] + out2 = paddle.fluid.layers.mul( + tmp_out, param2 + ) # [8, 4] [-1, 0] - out8 = paddle.fluid.layers.transpose(out2, - [1, 0]) # [4, 8] [0, -1] + out8 = paddle.fluid.layers.transpose( + out2, [1, 0] + ) # [4, 8] [0, -1] # reshape out9 = paddle.reshape(out8, [8, 2, 4]) # [4, 2, 4] [0, -1, -1] tmp_reshape_out = paddle.reshape(out9, [8, 4, 2]) - out10 = paddle.reshape(tmp_reshape_out, - [8, 8]) # [4, 8] [0, -1] + out10 = paddle.reshape( + tmp_reshape_out, [8, 8] + ) # [4, 8] [0, -1] # softmax softmax = paddle.nn.Softmax() out11 = softmax(out10) error_cost = paddle.nn.functional.square_error_cost( - out11, label) + out11, label + ) loss = paddle.mean(error_cost) return main_program, start_program, loss @@ -370,14 +444,17 @@ class TestDistOpCost(unittest.TestCase): processes = op_dist_attr.process_mesh.processes if is_elementwise_op(op.type): container = get_distributed_operator_impl_container( - "elementwise") + "elementwise" + ) else: container = get_distributed_operator_impl_container( - op_dist_attr.impl_type) + op_dist_attr.impl_type + ) dist_impl = container.impls[op_dist_attr.impl_idx] - dist_op_cost = dist_impl.calc_cost(op.attr('op_role'), dist_op, - dist_context, cluster) + dist_op_cost = dist_impl.calc_cost( + op.attr('op_role'), dist_op, dist_context, cluster + ) self.assertTrue(dist_op_cost) diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_pnorm.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_pnorm.py index 9f54f2a774afa8a76eef1dcf802c47491980a5dd..66ce40e4e33cb0275c0b058f02876542eef720f4 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_pnorm.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_pnorm.py @@ -28,8 +28,9 @@ def make_program_dp2(): with paddle.static.program_guard(main_program, start_program): x = paddle.static.data(name='x', shape=[4, 5, 6], dtype='float32') x.stop_gradient = False - auto.shard_tensor(x, auto.ProcessMesh([0, 1], dim_names=["x"]), - ["x", None, None]) + auto.shard_tensor( + x, auto.ProcessMesh([0, 1], dim_names=["x"]), ["x", None, None] + ) tmp_0 = paddle.norm(x, p=2) return main_program, start_program, tmp_0 @@ -40,8 +41,9 @@ def make_program_serial(): with paddle.static.program_guard(main_program, start_program): x = paddle.static.data(name='x', shape=[4, 5, 6], dtype='float32') x.stop_gradient = False - auto.shard_tensor(x, auto.ProcessMesh([0], dim_names=["x"]), - [None, None, None]) + auto.shard_tensor( + x, auto.ProcessMesh([0], dim_names=["x"]), [None, None, None] + ) tmp_0 = paddle.norm(x, p=2) return main_program, start_program, tmp_0 @@ -60,19 +62,20 @@ def parallelizer(program_func, rank): with program_guard(main_program, start_program): params_grads = append_backward( - loss, distop_context=dist_context.dist_op_context) + loss, distop_context=dist_context.dist_op_context + ) completer.complete_backward_annotation(main_program) dist_context.block_state.parse_backward_blocks(main_program) partitioner = Partitioner(dist_context, rank) - dist_main_prog, _, _ = partitioner.partition(main_program, start_program, - []) + dist_main_prog, _, _ = partitioner.partition( + main_program, start_program, [] + ) return dist_main_prog, dist_context class TestDistPNorm(unittest.TestCase): - def test_dist_pnorm_dp2(self): for rank in range(2): @@ -102,7 +105,11 @@ class TestDistPNorm(unittest.TestCase): assert output_attr.dims_mapping[0] == 0 assert set(output_attr.dims_mapping[1:]) == set([-1]) assert op_types == [ - "c_allgather", "p_norm", "fill_constant", "p_norm_grad", "slice" + "c_allgather", + "p_norm", + "fill_constant", + "p_norm_grad", + "slice", ] def test_dist_pnorm_serial(self): diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_reshape.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_reshape.py index 02797d690f23ce0512818a467641c9ec83c9b34e..035fd23562440772a1bd14cbe564f3432ed701ec 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_reshape.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_reshape.py @@ -25,8 +25,9 @@ def make_program_dp2(): with paddle.static.program_guard(main_program, start_program): x = paddle.static.data(name='x', shape=[4, 4, 8], dtype='float32') x.stop_gradient = False - auto.shard_tensor(x, auto.ProcessMesh([0, 1], dim_names=["x"]), - ["x", None, None]) + auto.shard_tensor( + x, auto.ProcessMesh([0, 1], dim_names=["x"]), ["x", None, None] + ) tmp_0 = paddle.reshape(x, shape=[0, 0, 4, 2]) tmp_1 = paddle.reshape(tmp_0, shape=[0, 0, 8]) @@ -47,14 +48,14 @@ def parallelizer(program_func, rank): dist_context.block_state.parse_forward_blocks(main_program) partitioner = Partitioner(dist_context, rank) - dist_main_prog, _, _ = partitioner.partition(main_program, start_program, - []) + dist_main_prog, _, _ = partitioner.partition( + main_program, start_program, [] + ) return dist_main_prog, dist_context class TestDistReshape(unittest.TestCase): - def test_dist_reshape_mp2(self): for rank in range(2): diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_shape.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_shape.py index 5e18b7d90c519d8fa94c8cda543928d05d6ed1b7..767a4cbccb22f4062ac49958df083186e609f0c7 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_shape.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_shape.py @@ -25,8 +25,9 @@ def make_program(): with paddle.static.program_guard(main_program, start_program): x = paddle.static.data(name='x', shape=[4, 4, 8], dtype='float32') x.stop_gradient = False - auto.shard_tensor(x, auto.ProcessMesh([0, 1], dim_names=["x"]), - ["x", None, None]) + auto.shard_tensor( + x, auto.ProcessMesh([0, 1], dim_names=["x"]), ["x", None, None] + ) shape = paddle.shape(x) return main_program, start_program @@ -44,14 +45,14 @@ def parallelizer(program_func, rank): dist_context.block_state.parse_forward_blocks(main_program) partitioner = Partitioner(dist_context, rank) - dist_main_prog, _, _ = partitioner.partition(main_program, start_program, - []) + dist_main_prog, _, _ = partitioner.partition( + main_program, start_program, [] + ) return dist_main_prog, dist_context class TestDistShape(unittest.TestCase): - def test_dist_shape(self): dist_main_prog, dist_context = parallelizer(make_program, 0) diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_slice.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_slice.py index 89261e6c9bf829b65ad21eb802942070adbc1e53..7e4d5eaee90dd90e046da922a20e2b0cfba0f7f8 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_slice.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_slice.py @@ -24,8 +24,9 @@ def make_program_dp2(): start_program = paddle.fluid.Program() with paddle.static.program_guard(main_program, start_program): x = paddle.static.data(name='x', shape=[4, 5, 6], dtype='float32') - auto.shard_tensor(x, auto.ProcessMesh([0, 1], dim_names=["x"]), - ["x", None, None]) + auto.shard_tensor( + x, auto.ProcessMesh([0, 1], dim_names=["x"]), ["x", None, None] + ) tmp_0 = x[0] tmp_1 = x[:, 0, :] @@ -39,8 +40,9 @@ def make_program_serial(): start_program = paddle.fluid.Program() with paddle.static.program_guard(main_program, start_program): x = paddle.static.data(name='x', shape=[4, 5, 6], dtype='float32') - auto.shard_tensor(x, auto.ProcessMesh([0], dim_names=["x"]), - [None, None, None]) + auto.shard_tensor( + x, auto.ProcessMesh([0], dim_names=["x"]), [None, None, None] + ) tmp_0 = x[0] tmp_1 = x[:, 0, :] @@ -64,14 +66,14 @@ def parallelizer(program_func, rank): dist_context.block_state.parse_forward_blocks(main_program) partitioner = Partitioner(dist_context, rank) - dist_main_prog, _, _ = partitioner.partition(main_program, start_program, - []) + dist_main_prog, _, _ = partitioner.partition( + main_program, start_program, [] + ) return dist_main_prog, dist_context class TestDistSlice(unittest.TestCase): - def test_dist_slice_dp2(self): for rank in range(2): dist_main_prog, dist_context = parallelizer(make_program_dp2, rank) diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_split.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_split.py index 7d7c60f4bdd484100a412baf3101be01acf4c452..18e79b8cf95d065d042d75fc96768290bc7ab2e0 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_split.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_dist_split.py @@ -25,8 +25,9 @@ def make_program_dp2(): with paddle.static.program_guard(main_program, start_program): x = paddle.static.data(name='x', shape=[4, 12, 16], dtype='float32') x.stop_gradient = False - auto.shard_tensor(x, auto.ProcessMesh([0, 1], dim_names=["x"]), - ["x", None, None]) + auto.shard_tensor( + x, auto.ProcessMesh([0, 1], dim_names=["x"]), ["x", None, None] + ) out0, out1, out2 = paddle.split(x, num_or_sections=3, axis=1) return main_program, start_program @@ -44,14 +45,14 @@ def parallelizer(program_func, rank): dist_context.block_state.parse_forward_blocks(main_program) partitioner = Partitioner(dist_context, rank) - dist_main_prog, _, _ = partitioner.partition(main_program, start_program, - []) + dist_main_prog, _, _ = partitioner.partition( + main_program, start_program, [] + ) return dist_main_prog, dist_context class TestDistSplit(unittest.TestCase): - def test_dist_split_dp2(self): for rank in range(2): diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_engine_api.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_engine_api.py index c8aba02719cbe33c6a770c7f016b2c5c22f39de0..880f0e582bb9fa7d699402009cafba8bf069baff 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_engine_api.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_engine_api.py @@ -20,7 +20,6 @@ import subprocess class TestEngineAPI(unittest.TestCase): - def test_engine_api(self): file_dir = os.path.dirname(os.path.abspath(__file__)) launch_model_path = os.path.join(file_dir, "engine_api.py") @@ -31,10 +30,19 @@ class TestEngineAPI(unittest.TestCase): coverage_args = [] tmp_dir = tempfile.TemporaryDirectory() - cmd = [sys.executable, "-u"] + coverage_args + [ - "-m", "paddle.distributed.launch", "--devices", "0,1", "--log_dir", - tmp_dir.name, launch_model_path - ] + cmd = ( + [sys.executable, "-u"] + + coverage_args + + [ + "-m", + "paddle.distributed.launch", + "--devices", + "0,1", + "--log_dir", + tmp_dir.name, + launch_model_path, + ] + ) process = subprocess.Popen(cmd) process.wait() diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_engine_api_dp.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_engine_api_dp.py index fab8db0182ea75fa0e6b88bb462878dd6dd0ebd6..b3dad0551c2735fb1091b394165e8d523a43032a 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_engine_api_dp.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_engine_api_dp.py @@ -20,7 +20,6 @@ import subprocess class TestEngineAPI(unittest.TestCase): - def test_engine_api(self): file_dir = os.path.dirname(os.path.abspath(__file__)) launch_model_path = os.path.join(file_dir, "engine_api_dp.py") @@ -31,10 +30,19 @@ class TestEngineAPI(unittest.TestCase): coverage_args = [] tmp_dir = tempfile.TemporaryDirectory() - cmd = [sys.executable, "-u"] + coverage_args + [ - "-m", "paddle.distributed.launch", "--devices", "0,1", "--log_dir", - tmp_dir.name, launch_model_path - ] + cmd = ( + [sys.executable, "-u"] + + coverage_args + + [ + "-m", + "paddle.distributed.launch", + "--devices", + "0,1", + "--log_dir", + tmp_dir.name, + launch_model_path, + ] + ) process = subprocess.Popen(cmd) process.wait() diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_engine_callbacks.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_engine_callbacks.py index 9baaee353f71536fcbb15bdf26d7e4dfae16c7fd..5c7b2f1d1777cd4a9b568155bd54f42822dd8f7d 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_engine_callbacks.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_engine_callbacks.py @@ -31,7 +31,6 @@ paddle.enable_static() class TestCallbacks(unittest.TestCase): - def setUp(self): self.save_dir = tempfile.mkdtemp() @@ -51,14 +50,16 @@ class TestCallbacks(unittest.TestCase): engine = auto.Engine(LeNet(), strategy=strategy) engine.prepare(inputs_spec, mode="predict") - cbks = config_callbacks(engine=engine, - batch_size=128, - epochs=epochs, - steps=steps, - log_freq=freq, - verbose=self.verbose, - metrics=['loss', 'acc'], - save_dir=self.save_dir) + cbks = config_callbacks( + engine=engine, + batch_size=128, + epochs=epochs, + steps=steps, + log_freq=freq, + verbose=self.verbose, + metrics=['loss', 'acc'], + save_dir=self.save_dir, + ) cbks.on_begin('train') logs = {'loss': 50.341673, 'acc': 0.00256} @@ -119,7 +120,6 @@ class TestCallbacks(unittest.TestCase): class TestCallbacksEngine(unittest.TestCase): - def setUp(self): self.save_dir = tempfile.mkdtemp() transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])]) @@ -136,37 +136,39 @@ class TestCallbacksEngine(unittest.TestCase): base_lr = 1e-3 boundaries = [5, 8] values = [base_lr * (0.1**i) for i in range(len(boundaries) + 1)] - lr = paddle.optimizer.lr.PiecewiseDecay(boundaries=boundaries, - values=values, - verbose=False) - optimizer = paddle.optimizer.Adam(learning_rate=lr, - parameters=model.parameters()) + lr = paddle.optimizer.lr.PiecewiseDecay( + boundaries=boundaries, values=values, verbose=False + ) + optimizer = paddle.optimizer.Adam( + learning_rate=lr, parameters=model.parameters() + ) auto.fetch(model.parameters()[0], "param0", logging=True) metrics = paddle.metric.Accuracy(topk=(1, 2)) self.engine = auto.Engine(model, loss, optimizer, metrics) def test_fit_eval(self): - history = self.engine.fit(train_data=self.train_dataset, - valid_data=self.test_dataset, - batch_size=128, - steps_per_epoch=60, - valid_steps=40, - log_freq=20, - save_dir=self.save_dir, - save_freq=1) + history = self.engine.fit( + train_data=self.train_dataset, + valid_data=self.test_dataset, + batch_size=128, + steps_per_epoch=60, + valid_steps=40, + log_freq=20, + save_dir=self.save_dir, + save_freq=1, + ) print(history.history) def test_eval(self): - self.engine.evaluate(valid_data=self.test_dataset, - batch_size=128, - steps=40, - log_freq=10) + self.engine.evaluate( + valid_data=self.test_dataset, batch_size=128, steps=40, log_freq=10 + ) def test_predict(self): logger_cbks = paddle.callbacks.ProgBarLogger() - self.engine.predict(test_data=self.test_dataset, - batch_size=128, - callbacks=[logger_cbks]) + self.engine.predict( + test_data=self.test_dataset, batch_size=128, callbacks=[logger_cbks] + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_high_order_grad.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_high_order_grad.py index 7c3629182faae022918dad598ce154d38ac9296c..7e2728e8e97b6c80a15c5db3349299731de597ab 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_high_order_grad.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_high_order_grad.py @@ -20,7 +20,6 @@ import subprocess class TestHighOrderGrad(unittest.TestCase): - def test_dp2(self): file_dir = os.path.dirname(os.path.abspath(__file__)) launch_model_path = os.path.join(file_dir, "high_order_grad.py") @@ -31,10 +30,19 @@ class TestHighOrderGrad(unittest.TestCase): coverage_args = [] tmp_dir = tempfile.TemporaryDirectory() - cmd = [sys.executable, "-u"] + coverage_args + [ - "-m", "paddle.distributed.launch", "--devices", "0,1", "--log_dir", - tmp_dir.name, launch_model_path - ] + cmd = ( + [sys.executable, "-u"] + + coverage_args + + [ + "-m", + "paddle.distributed.launch", + "--devices", + "0,1", + "--log_dir", + tmp_dir.name, + launch_model_path, + ] + ) process = subprocess.Popen(cmd) process.wait() diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_interface.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_interface.py index a36c142dc863acb317630bc89faa3e34ad4d17aa..332cb134a542c9e46074f7155df04aa4eefd2ef7 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_interface.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_interface.py @@ -18,7 +18,9 @@ import paddle.nn as nn import paddle.nn.functional as F import paddle.static as static from paddle.distributed.fleet import auto -from paddle.distributed.auto_parallel.dist_context import get_default_distributed_context +from paddle.distributed.auto_parallel.dist_context import ( + get_default_distributed_context, +) from paddle.distributed.auto_parallel.process_mesh import ProcessMesh paddle.enable_static() @@ -27,71 +29,83 @@ batch_size = 4 epoch_num = 10 hidden_size = 1024 sequence_len = 512 -process_mesh1 = ProcessMesh(mesh=[[0, 1, 2, 3], [4, 5, 6, 7]], - dim_names=["x", "y"]) +process_mesh1 = ProcessMesh( + mesh=[[0, 1, 2, 3], [4, 5, 6, 7]], dim_names=["x", "y"] +) process_mesh2 = ProcessMesh(mesh=[0, 1, 2, 3], dim_names=["x"]) class MLPLayer(nn.Layer): - - def __init__(self, - hidden_size=1024, - intermediate_size=4 * 1024, - dropout_ratio=0.1, - initializer_range=0.02): + def __init__( + self, + hidden_size=1024, + intermediate_size=4 * 1024, + dropout_ratio=0.1, + initializer_range=0.02, + ): super(MLPLayer, self).__init__() d_model = hidden_size dim_feedforward = intermediate_size - param_initializer = nn.initializer.Normal(mean=0.0, - std=initializer_range) + param_initializer = nn.initializer.Normal( + mean=0.0, std=initializer_range + ) self.linear0 = nn.Linear( d_model, dim_feedforward, weight_attr=paddle.ParamAttr(initializer=param_initializer), - bias_attr=None) + bias_attr=None, + ) self.linear1 = nn.Linear( dim_feedforward, d_model, weight_attr=paddle.ParamAttr(initializer=param_initializer), - bias_attr=None) + bias_attr=None, + ) def forward(self, input): auto.shard_tensor(self.linear0.weight, process_mesh1[0], [None, "y"]) - linear0 = auto.shard_op(self.linear0, process_mesh1, - [["y", None, None]], [[None, "x", None]]) + linear0 = auto.shard_op( + self.linear0, + process_mesh1, + [["y", None, None]], + [[None, "x", None]], + ) linear0_out = linear0(input) gelu = auto.shard_op(F.gelu, process_mesh1, [["y", "x", None], None]) gelu_out = gelu(linear0_out, approximate=True) auto.shard_tensor(self.linear1.weight, shard_spec=["y", None]) - linear1 = auto.shard_op(self.linear1, - process_mesh1[1], - out_shard_specs=[["y", None, None]]) + linear1 = auto.shard_op( + self.linear1, process_mesh1[1], out_shard_specs=[["y", None, None]] + ) linear1_out = linear1(gelu_out) return self.linear0, self.linear1, linear0_out, gelu_out, linear1_out class TestAutoParallelAPI(unittest.TestCase): - def test_api(self): # input - input = static.data(name="input", - shape=[batch_size, sequence_len, hidden_size], - dtype='float32') - label = static.data(name="label", - shape=[batch_size, sequence_len, 1], - dtype='float32') + input = static.data( + name="input", + shape=[batch_size, sequence_len, hidden_size], + dtype='float32', + ) + label = static.data( + name="label", shape=[batch_size, sequence_len, 1], dtype='float32' + ) auto.shard_tensor(input, process_mesh1, ["x", None, None]) auto.shard_tensor(label, process_mesh1, ["y", None, None]) - mlp = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - dropout_ratio=0.1, - initializer_range=0.02) + mlp = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + dropout_ratio=0.1, + initializer_range=0.02, + ) with ProcessMesh(process_mesh1.mesh, process_mesh1.dim_names): linear0, linear1, linear0_out, gelu_out, linear1_out = mlp(input) @@ -124,32 +138,42 @@ class TestAutoParallelAPI(unittest.TestCase): self.assertTrue(dist_input.dist_attr.is_annotated("dims_mapping")) dist_linear0_weight = default_dist_context.get_dist_tensor_for_program( - linear0.weight) - self.assertEqual(dist_linear0_weight.dist_attr.process_mesh, - process_mesh1[0]) + linear0.weight + ) + self.assertEqual( + dist_linear0_weight.dist_attr.process_mesh, process_mesh1[0] + ) self.assertEqual(dist_linear0_weight.dist_attr.dims_mapping, [-1, 0]) self.assertTrue( - dist_linear0_weight.dist_attr.is_annotated("process_mesh")) + dist_linear0_weight.dist_attr.is_annotated("process_mesh") + ) self.assertTrue( - dist_linear0_weight.dist_attr.is_annotated("dims_mapping")) + dist_linear0_weight.dist_attr.is_annotated("dims_mapping") + ) dist_linear1_weight = default_dist_context.get_dist_tensor_for_program( - linear1.weight) - self.assertEqual(dist_linear1_weight.dist_attr.process_mesh, - process_mesh1) + linear1.weight + ) + self.assertEqual( + dist_linear1_weight.dist_attr.process_mesh, process_mesh1 + ) self.assertEqual(dist_linear1_weight.dist_attr.dims_mapping, [1, -1]) self.assertTrue( - dist_linear1_weight.dist_attr.is_annotated("process_mesh")) + dist_linear1_weight.dist_attr.is_annotated("process_mesh") + ) self.assertTrue( - dist_linear1_weight.dist_attr.is_annotated("dims_mapping")) + dist_linear1_weight.dist_attr.is_annotated("dims_mapping") + ) dist_linear1_out = default_dist_context.get_dist_tensor_for_program( - linear1_out) + linear1_out + ) self.assertEqual(dist_linear1_out.dist_attr.process_mesh, process_mesh1) self.assertEqual(dist_linear1_out.dist_attr.dims_mapping, [-1, -1, -1]) self.assertTrue(dist_linear1_out.dist_attr.is_annotated("process_mesh")) self.assertFalse( - dist_linear1_out.dist_attr.is_annotated("dims_mapping")) + dist_linear1_out.dist_attr.is_annotated("dims_mapping") + ) dist_op = default_dist_context.get_dist_op_for_program(matmul0) self.assertEqual(dist_op.dist_attr.process_mesh, process_mesh1) @@ -167,7 +191,8 @@ class TestAutoParallelAPI(unittest.TestCase): self.assertEqual(dist_op.dist_attr.impl_type, "default") self.assertEqual(dist_op.dist_attr.impl_idx, 0) tensor_dist_attr = dist_op.dist_attr.get_output_dist_attr( - linear0_out.name) + linear0_out.name + ) self.assertEqual(tensor_dist_attr.process_mesh, process_mesh1) self.assertEqual(tensor_dist_attr.dims_mapping, [-1, 0, -1]) self.assertTrue(tensor_dist_attr.is_annotated("process_mesh")) @@ -180,7 +205,8 @@ class TestAutoParallelAPI(unittest.TestCase): self.assertEqual(dist_op.dist_attr.impl_idx, 0) self.assertTrue(dist_op.dist_attr.is_annotated("process_mesh")) tensor_dist_attr = dist_op.dist_attr.get_input_dist_attr( - linear0_out.name) + linear0_out.name + ) self.assertEqual(tensor_dist_attr.process_mesh, process_mesh1) self.assertEqual(tensor_dist_attr.dims_mapping, [1, 0, -1]) self.assertTrue(tensor_dist_attr.is_annotated("process_mesh")) @@ -208,7 +234,8 @@ class TestAutoParallelAPI(unittest.TestCase): self.assertEqual(dist_op.dist_attr.impl_idx, 0) self.assertTrue(dist_op.dist_attr.is_annotated("process_mesh")) tensor_dist_attr = dist_op.dist_attr.get_output_dist_attr( - linear1_out.name) + linear1_out.name + ) self.assertEqual(tensor_dist_attr.process_mesh, process_mesh1[1]) self.assertEqual(tensor_dist_attr.dims_mapping, [0, -1, -1]) self.assertTrue(tensor_dist_attr.is_annotated("process_mesh")) diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_iterable_dataset.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_iterable_dataset.py index 8a7d9d576024db5faa4683974a9be1f7664a49f4..f431878ae20a9bcf780f8a453e8f35935467127f 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_iterable_dataset.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_iterable_dataset.py @@ -20,7 +20,6 @@ import subprocess class TestEngineAPI(unittest.TestCase): - def test_engine_api(self): file_dir = os.path.dirname(os.path.abspath(__file__)) launch_model_path = os.path.join(file_dir, "iterable_dataset.py") @@ -31,10 +30,19 @@ class TestEngineAPI(unittest.TestCase): coverage_args = [] tmp_dir = tempfile.TemporaryDirectory() - cmd = [sys.executable, "-u"] + coverage_args + [ - "-m", "paddle.distributed.launch", "--devices", "0,1", "--log_dir", - tmp_dir.name, launch_model_path - ] + cmd = ( + [sys.executable, "-u"] + + coverage_args + + [ + "-m", + "paddle.distributed.launch", + "--devices", + "0,1", + "--log_dir", + tmp_dir.name, + launch_model_path, + ] + ) process = subprocess.Popen(cmd) process.wait() diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_lr_grad_clip.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_lr_grad_clip.py index fc5240a369da1e7cca39f6b7ad967c996c416611..49ff195a50ff402eb5506472910f0fd6a599be16 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_lr_grad_clip.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_lr_grad_clip.py @@ -23,7 +23,6 @@ paddle.enable_static() class TestEngineBase(unittest.TestCase): - def setUp(self): self.batch_size = 4 self.batch_num = 5 @@ -35,15 +34,18 @@ class TestEngineBase(unittest.TestCase): self.init_engine() def init_model(self): - self.mlp = MLPLayer(hidden_size=self.hidden_size, - intermediate_size=4 * self.hidden_size, - dropout_ratio=0.1, - initializer_range=0.02) + self.mlp = MLPLayer( + hidden_size=self.hidden_size, + intermediate_size=4 * self.hidden_size, + dropout_ratio=0.1, + initializer_range=0.02, + ) self.loss = paddle.nn.CrossEntropyLoss() def init_optimizer(self): - self.optimizer = paddle.optimizer.SGD(learning_rate=0.00001, - parameters=self.mlp.parameters()) + self.optimizer = paddle.optimizer.SGD( + learning_rate=0.00001, parameters=self.mlp.parameters() + ) def init_dataset(self): self.dataset = MyDataset(self.batch_num * self.batch_size) @@ -52,17 +54,19 @@ class TestEngineBase(unittest.TestCase): # inputs = InputSpec([self.batch_size, self.hidden_size], 'float32', 'x') # labels = InputSpec([self.batch_size], 'int64', 'label') - self.engine = auto.Engine(model=self.mlp, - loss=self.loss, - optimizer=self.optimizer, - metrics=paddle.metric.Accuracy()) + self.engine = auto.Engine( + model=self.mlp, + loss=self.loss, + optimizer=self.optimizer, + metrics=paddle.metric.Accuracy(), + ) class TestLRScheduler(TestEngineBase): - def init_optimizer(self): scheduler = paddle.optimizer.lr.CosineAnnealingDecay( - learning_rate=0.00001, T_max=10) + learning_rate=0.00001, T_max=10 + ) self.optimizer = paddle.optimizer.SGD(learning_rate=scheduler) def test_lr_scheduler(self): @@ -73,11 +77,11 @@ class TestLRScheduler(TestEngineBase): class TestGradClipByGlobalNorm(TestEngineBase): - def init_optimizer(self): clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0) - self.optimizer = paddle.optimizer.SGD(learning_rate=0.00001, - grad_clip=clip) + self.optimizer = paddle.optimizer.SGD( + learning_rate=0.00001, grad_clip=clip + ) def test_grad_clip(self): @@ -89,19 +93,20 @@ class TestGradClipByGlobalNorm(TestEngineBase): ops = self.engine.main_program.global_block().ops has_grad_clip = False for op in ops: - if op.desc.has_attr("op_namescope") \ - and op.desc.attr("op_namescope").startswith("/gradient_clip"): + if op.desc.has_attr("op_namescope") and op.desc.attr( + "op_namescope" + ).startswith("/gradient_clip"): has_grad_clip = True break assert has_grad_clip is True class TestGradClipByNorm(TestGradClipByGlobalNorm): - def init_optimizer(self): clip = paddle.nn.ClipGradByNorm(clip_norm=1.0) - self.optimizer = paddle.optimizer.SGD(learning_rate=0.00001, - grad_clip=clip) + self.optimizer = paddle.optimizer.SGD( + learning_rate=0.00001, grad_clip=clip + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_new_cost_model.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_new_cost_model.py index 59db55596af0902a585c14295658da768bc4b07d..963d16d239e0f91b8651729556400c5e0a84360c 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_new_cost_model.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_new_cost_model.py @@ -20,9 +20,15 @@ import tempfile import paddle import paddle.distributed.auto_parallel.cost as cost_model -from paddle.distributed.auto_parallel.cost.base_cost import build_comp_desc_from_op -from paddle.distributed.auto_parallel.cost.base_cost import build_comp_desc_str_for_predict -from paddle.distributed.auto_parallel.cost.base_cost import calc_time_by_modeling +from paddle.distributed.auto_parallel.cost.base_cost import ( + build_comp_desc_from_op, +) +from paddle.distributed.auto_parallel.cost.base_cost import ( + build_comp_desc_str_for_predict, +) +from paddle.distributed.auto_parallel.cost.base_cost import ( + calc_time_by_modeling, +) from paddle.distributed.auto_parallel.cluster import Cluster from paddle.distributed.auto_parallel.cost import CommContext from test_cluster import cluster_json @@ -37,7 +43,6 @@ def check_cost(cost): class TestCost(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -60,7 +65,8 @@ class TestCost(unittest.TestCase): matmul_v2_op = op break matmul_v2_cost = cost_model._g_op_cost_factory["matmul_v2"]( - op=matmul_v2_op) + op=matmul_v2_op + ) desc = build_comp_desc_from_op(op=matmul_v2_op) desc_str = build_comp_desc_str_for_predict(desc) self.assertIsNotNone(desc_str) @@ -73,8 +79,9 @@ class TestCost(unittest.TestCase): def test_comm_cost(self): # Build cluster - cluster_json_path = os.path.join(self.temp_dir.name, - "auto_parallel_cluster.json") + cluster_json_path = os.path.join( + self.temp_dir.name, "auto_parallel_cluster.json" + ) cluster_json_object = json.loads(cluster_json) with open(cluster_json_path, "w") as cluster_json_file: json.dump(cluster_json_object, cluster_json_file) @@ -90,7 +97,8 @@ class TestCost(unittest.TestCase): desc["inputs"] = {"X": [(paddle.float32, [100, 200])]} desc["group_ranks"] = [0, 1] allreduce_cost = cost_model._g_op_cost_factory["c_allreduce_sum"]( - op_desc=desc, comm_context=CommContext(cluster)) + op_desc=desc, comm_context=CommContext(cluster) + ) self.assertTrue(check_cost(allreduce_cost.cost)) # Remove unnecessary files @@ -99,8 +107,9 @@ class TestCost(unittest.TestCase): def test_cost_estimator(self): # Build cluster - cluster_json_path = os.path.join(self.temp_dir.name, - "auto_parallel_cluster.json") + cluster_json_path = os.path.join( + self.temp_dir.name, "auto_parallel_cluster.json" + ) cluster_json_object = json.loads(cluster_json) with open(cluster_json_path, "w") as cluster_json_file: json.dump(cluster_json_object, cluster_json_file) @@ -108,8 +117,9 @@ class TestCost(unittest.TestCase): cluster.build_from_file(cluster_json_path) train_program = paddle.static.Program() - cost_estimator = cost_model.CostEstimator(train_program, - cluster=cluster) + cost_estimator = cost_model.CostEstimator( + train_program, cluster=cluster + ) self.assertIsNotNone(cost_estimator) # Remove unnecessary files diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_optimization_tuner_api.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_optimization_tuner_api.py index 68f2376a83400ebfe41ca9ecd4bf6c56b8194e44..a8be44683338e1ec5a8ed0d2702786803c6114a3 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_optimization_tuner_api.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_optimization_tuner_api.py @@ -21,7 +21,6 @@ import subprocess class TestOptimizationTunerAPI(unittest.TestCase): - def test_engine_api(self): file_dir = os.path.dirname(os.path.abspath(__file__)) launch_model_path = os.path.join(file_dir, "optimization_tuner_api.py") @@ -32,10 +31,19 @@ class TestOptimizationTunerAPI(unittest.TestCase): coverage_args = [] tmp_dir = tempfile.TemporaryDirectory() - cmd = [sys.executable, "-u"] + coverage_args + [ - "-m", "launch", "--gpus", "0,1", "--log_dir", tmp_dir.name, - launch_model_path - ] + cmd = ( + [sys.executable, "-u"] + + coverage_args + + [ + "-m", + "launch", + "--gpus", + "0,1", + "--log_dir", + tmp_dir.name, + launch_model_path, + ] + ) process = subprocess.Popen(cmd) process.wait() diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_parallel_tuner.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_parallel_tuner.py index ab48e2838f9b99d598388363ead31273381268d2..ea3bd45c0350757280cb6c5ca8a303185c6e19f0 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_parallel_tuner.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_parallel_tuner.py @@ -19,14 +19,21 @@ import paddle.static as static from paddle.distributed import fleet from paddle.distributed.auto_parallel.cluster import Cluster -from paddle.distributed.auto_parallel.dist_context import DistributedContext, set_default_distributed_context +from paddle.distributed.auto_parallel.dist_context import ( + DistributedContext, + set_default_distributed_context, +) from paddle.distributed.auto_parallel.tuner.parallel_tuner import ParallelTuner from paddle.distributed.auto_parallel.process_mesh import ProcessMesh import sys sys.path.append("..") import auto_parallel_gpt_model as modeling -from auto_parallel_gpt_model import GPTModel, GPTForPretraining, GPTPretrainingCriterion +from auto_parallel_gpt_model import ( + GPTModel, + GPTForPretraining, + GPTPretrainingCriterion, +) paddle.enable_static() @@ -36,7 +43,7 @@ hidden_size = 1024 sequence_len = 512 _g_process_mesh = [ ProcessMesh([0, 1], dim_names=["x"]), - ProcessMesh([2, 3], dim_names=["x"]) + ProcessMesh([2, 3], dim_names=["x"]), ] @@ -59,76 +66,100 @@ def get_program_v3(): # ProcessMesh([[4, 5], [6, 7]], dim_names=["x", "y"]) # ] with static.program_guard(train_program, start_program): - tokens = paddle.static.data(name="tokens", - shape=[batch_size, sequence_len], - dtype='int64') - position_ids = paddle.static.data(name="position_ids", - shape=[batch_size, sequence_len], - dtype='int64') + tokens = paddle.static.data( + name="tokens", shape=[batch_size, sequence_len], dtype='int64' + ) + position_ids = paddle.static.data( + name="position_ids", shape=[batch_size, sequence_len], dtype='int64' + ) attention_mask = paddle.static.data( name="attention_mask", shape=[batch_size, 1, sequence_len, sequence_len], - dtype='float32') - labels = paddle.static.data(name="labels", - shape=[batch_size, sequence_len], - dtype='int64') - loss_mask = paddle.static.data(name="loss_mask", - shape=[batch_size, sequence_len], - dtype='float32') + dtype='float32', + ) + labels = paddle.static.data( + name="labels", shape=[batch_size, sequence_len], dtype='int64' + ) + loss_mask = paddle.static.data( + name="loss_mask", shape=[batch_size, sequence_len], dtype='float32' + ) data_holder = [tokens, position_ids, attention_mask, labels, loss_mask] - gpt = GPTModel(vocab_size=1000, - hidden_size=1024, - num_hidden_layers=2, - num_attention_heads=16, - intermediate_size=4 * 1024, - hidden_act="gelu", - hidden_dropout_prob=0.0, - attention_probs_dropout_prob=0.0, - max_position_embeddings=1024, - type_vocab_size=1, - initializer_range=0.02, - pad_token_id=0, - eos_token_id=7, - bos_token_id=0, - eol_token_id=3, - pp_degree=1) - - model = GPTForPretraining(gpt, - vocab_size=1000, - hidden_size=64, - initializer_range=0.02) + gpt = GPTModel( + vocab_size=1000, + hidden_size=1024, + num_hidden_layers=2, + num_attention_heads=16, + intermediate_size=4 * 1024, + hidden_act="gelu", + hidden_dropout_prob=0.0, + attention_probs_dropout_prob=0.0, + max_position_embeddings=1024, + type_vocab_size=1, + initializer_range=0.02, + pad_token_id=0, + eos_token_id=7, + bos_token_id=0, + eol_token_id=3, + pp_degree=1, + ) + + model = GPTForPretraining( + gpt, vocab_size=1000, hidden_size=64, initializer_range=0.02 + ) preds = model(tokens, position_ids, attention_mask) criterion = GPTPretrainingCriterion() loss = criterion(preds, labels, loss_mask) - optimizer = paddle.fluid.optimizer.AdamOptimizer(learning_rate=0.00001, - beta1=0.9, - beta2=0.999, - epsilon=1e-08, - grad_clip=None) + optimizer = paddle.fluid.optimizer.AdamOptimizer( + learning_rate=0.00001, + beta1=0.9, + beta2=0.999, + epsilon=1e-08, + grad_clip=None, + ) feed_vars = { "inputs": [tokens, position_ids, attention_mask, loss_mask], - "labels": [labels] + "labels": [labels], } fetch_vars = {"loss": [loss]} - return train_program, start_program, None, loss, optimizer, feed_vars, fetch_vars + return ( + train_program, + start_program, + None, + loss, + optimizer, + feed_vars, + fetch_vars, + ) class TestParallelTunerTrain(unittest.TestCase): - def test_tune_with_train(self): flag = False set_default_distributed_context(DistributedContext()) - train_program, start_program, dataloader, loss, optimizer, feed_vars, fetch_vars = get_program_v3( - ) + ( + train_program, + start_program, + dataloader, + loss, + optimizer, + feed_vars, + fetch_vars, + ) = get_program_v3() cluster = Cluster() cluster.gen_default_config_cluster(node_count=1, device_count=8) - dist_context = DistributedContext(train_program, start_program, - optimizer, loss, feed_vars, - fetch_vars, cluster) + dist_context = DistributedContext( + train_program, + start_program, + optimizer, + loss, + feed_vars, + fetch_vars, + cluster, + ) dist_context.initialize() parallel_tuner = ParallelTuner(dist_context, max_trials=3, mode="train") parallel_tuner.tune() diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_parallel_tuner_full.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_parallel_tuner_full.py index 27833a6a185009ca56f9d753a223708eeca2470e..4a7aa6fd208c7362c7be8549b7047daa39ed9113 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_parallel_tuner_full.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_parallel_tuner_full.py @@ -19,7 +19,10 @@ import paddle.static as static from paddle.distributed import fleet from paddle.distributed.auto_parallel.cluster import Cluster -from paddle.distributed.auto_parallel.dist_context import DistributedContext, set_default_distributed_context +from paddle.distributed.auto_parallel.dist_context import ( + DistributedContext, + set_default_distributed_context, +) from paddle.distributed.auto_parallel.tuner.parallel_tuner import ParallelTuner from paddle.distributed.auto_parallel.process_mesh import ProcessMesh from paddle.distributed.auto_parallel.planner_v2 import Planner @@ -28,7 +31,11 @@ import sys sys.path.append("..") import auto_parallel_gpt_model as modeling -from auto_parallel_gpt_model import GPTModel, GPTForPretraining, GPTPretrainingCriterion +from auto_parallel_gpt_model import ( + GPTModel, + GPTForPretraining, + GPTPretrainingCriterion, +) paddle.enable_static() @@ -38,7 +45,7 @@ hidden_size = 1024 sequence_len = 512 _g_process_mesh = [ ProcessMesh([0, 1], dim_names=["x"]), - ProcessMesh([2, 3], dim_names=["x"]) + ProcessMesh([2, 3], dim_names=["x"]), ] @@ -58,86 +65,111 @@ def get_program_v3(): modeling._global_parallel_strategy = "dp_mp_pp" modeling.DPMPPP_MESH_LIST = [ ProcessMesh([[0, 1], [2, 3]], dim_names=["x", "y"]), - ProcessMesh([[4, 5], [6, 7]], dim_names=["x", "y"]) + ProcessMesh([[4, 5], [6, 7]], dim_names=["x", "y"]), ] with static.program_guard(train_program, start_program): - tokens = paddle.static.data(name="tokens", - shape=[batch_size, sequence_len], - dtype='int64') - position_ids = paddle.static.data(name="position_ids", - shape=[batch_size, sequence_len], - dtype='int64') + tokens = paddle.static.data( + name="tokens", shape=[batch_size, sequence_len], dtype='int64' + ) + position_ids = paddle.static.data( + name="position_ids", shape=[batch_size, sequence_len], dtype='int64' + ) attention_mask = paddle.static.data( name="attention_mask", shape=[batch_size, 1, sequence_len, sequence_len], - dtype='float32') - labels = paddle.static.data(name="labels", - shape=[batch_size, sequence_len], - dtype='int64') - loss_mask = paddle.static.data(name="loss_mask", - shape=[batch_size, sequence_len], - dtype='float32') + dtype='float32', + ) + labels = paddle.static.data( + name="labels", shape=[batch_size, sequence_len], dtype='int64' + ) + loss_mask = paddle.static.data( + name="loss_mask", shape=[batch_size, sequence_len], dtype='float32' + ) data_holder = [tokens, position_ids, attention_mask, labels, loss_mask] - gpt = GPTModel(vocab_size=1000, - hidden_size=1024, - num_hidden_layers=2, - num_attention_heads=16, - intermediate_size=4 * 1024, - hidden_act="gelu", - hidden_dropout_prob=0.0, - attention_probs_dropout_prob=0.0, - max_position_embeddings=1024, - type_vocab_size=1, - initializer_range=0.02, - pad_token_id=0, - eos_token_id=7, - bos_token_id=0, - eol_token_id=3, - pp_degree=len(modeling.DPMPPP_MESH_LIST)) - - model = GPTForPretraining(gpt, - vocab_size=1000, - hidden_size=64, - initializer_range=0.02) + gpt = GPTModel( + vocab_size=1000, + hidden_size=1024, + num_hidden_layers=2, + num_attention_heads=16, + intermediate_size=4 * 1024, + hidden_act="gelu", + hidden_dropout_prob=0.0, + attention_probs_dropout_prob=0.0, + max_position_embeddings=1024, + type_vocab_size=1, + initializer_range=0.02, + pad_token_id=0, + eos_token_id=7, + bos_token_id=0, + eol_token_id=3, + pp_degree=len(modeling.DPMPPP_MESH_LIST), + ) + + model = GPTForPretraining( + gpt, vocab_size=1000, hidden_size=64, initializer_range=0.02 + ) preds = model(tokens, position_ids, attention_mask) criterion = GPTPretrainingCriterion() loss = criterion(preds, labels, loss_mask) - optimizer = paddle.fluid.optimizer.AdamOptimizer(learning_rate=0.00001, - beta1=0.9, - beta2=0.999, - epsilon=1e-08, - grad_clip=None) + optimizer = paddle.fluid.optimizer.AdamOptimizer( + learning_rate=0.00001, + beta1=0.9, + beta2=0.999, + epsilon=1e-08, + grad_clip=None, + ) feed_vars = { "inputs": [tokens, position_ids, attention_mask, loss_mask], - "labels": [labels] + "labels": [labels], } fetch_vars = {"loss": [loss]} - return train_program, start_program, None, loss, optimizer, feed_vars, fetch_vars + return ( + train_program, + start_program, + None, + loss, + optimizer, + feed_vars, + fetch_vars, + ) class TestParallelTunerFull(unittest.TestCase): - def test_tune_with_planner(self): flag = False set_default_distributed_context(DistributedContext()) - train_program, start_program, dataloader, loss, optimizer, feed_vars, fetch_vars = get_program_v3( - ) + ( + train_program, + start_program, + dataloader, + loss, + optimizer, + feed_vars, + fetch_vars, + ) = get_program_v3() cluster = Cluster() cluster.gen_default_config_cluster(node_count=1, device_count=8) strategy = Strategy() strategy.auto_mode = "full" - dist_context = DistributedContext(train_program, start_program, - optimizer, loss, feed_vars, - fetch_vars, cluster, strategy) + dist_context = DistributedContext( + train_program, + start_program, + optimizer, + loss, + feed_vars, + fetch_vars, + cluster, + strategy, + ) dist_context.initialize() planner = Planner("train", dist_context) - planner._parallel_tuner = ParallelTuner(planner._dist_context, - mode=planner._mode, - max_trials=3) + planner._parallel_tuner = ParallelTuner( + planner._dist_context, mode=planner._mode, max_trials=3 + ) planner.plan() flag = True self.assertTrue(flag) diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_parallel_tuner_predict.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_parallel_tuner_predict.py index 2d7a2c10579a7aa4093f7bfd1581d1ef4af8d64a..30e7eabcc11565a908e2f0fef77a0457ff1414be 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_parallel_tuner_predict.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_parallel_tuner_predict.py @@ -19,14 +19,21 @@ import paddle.static as static from paddle.distributed import fleet from paddle.distributed.auto_parallel.cluster import Cluster -from paddle.distributed.auto_parallel.dist_context import DistributedContext, set_default_distributed_context +from paddle.distributed.auto_parallel.dist_context import ( + DistributedContext, + set_default_distributed_context, +) from paddle.distributed.auto_parallel.tuner.parallel_tuner import ParallelTuner from paddle.distributed.auto_parallel.process_mesh import ProcessMesh import sys sys.path.append("..") import auto_parallel_gpt_model as modeling -from auto_parallel_gpt_model import GPTModel, GPTForPretraining, GPTPretrainingCriterion +from auto_parallel_gpt_model import ( + GPTModel, + GPTForPretraining, + GPTPretrainingCriterion, +) paddle.enable_static() @@ -36,7 +43,7 @@ hidden_size = 1024 sequence_len = 512 _g_process_mesh = [ ProcessMesh([0, 1], dim_names=["x"]), - ProcessMesh([2, 3], dim_names=["x"]) + ProcessMesh([2, 3], dim_names=["x"]), ] @@ -56,84 +63,108 @@ def get_program_v3(): modeling._global_parallel_strategy = "dp_mp_pp" modeling.DPMPPP_MESH_LIST = [ ProcessMesh([[0, 1], [2, 3]], dim_names=["x", "y"]), - ProcessMesh([[4, 5], [6, 7]], dim_names=["x", "y"]) + ProcessMesh([[4, 5], [6, 7]], dim_names=["x", "y"]), ] with static.program_guard(train_program, start_program): - tokens = paddle.static.data(name="tokens", - shape=[batch_size, sequence_len], - dtype='int64') - position_ids = paddle.static.data(name="position_ids", - shape=[batch_size, sequence_len], - dtype='int64') + tokens = paddle.static.data( + name="tokens", shape=[batch_size, sequence_len], dtype='int64' + ) + position_ids = paddle.static.data( + name="position_ids", shape=[batch_size, sequence_len], dtype='int64' + ) attention_mask = paddle.static.data( name="attention_mask", shape=[batch_size, 1, sequence_len, sequence_len], - dtype='float32') - labels = paddle.static.data(name="labels", - shape=[batch_size, sequence_len], - dtype='int64') - loss_mask = paddle.static.data(name="loss_mask", - shape=[batch_size, sequence_len], - dtype='float32') + dtype='float32', + ) + labels = paddle.static.data( + name="labels", shape=[batch_size, sequence_len], dtype='int64' + ) + loss_mask = paddle.static.data( + name="loss_mask", shape=[batch_size, sequence_len], dtype='float32' + ) data_holder = [tokens, position_ids, attention_mask, labels, loss_mask] - gpt = GPTModel(vocab_size=1000, - hidden_size=1024, - num_hidden_layers=2, - num_attention_heads=16, - intermediate_size=4 * 1024, - hidden_act="gelu", - hidden_dropout_prob=0.0, - attention_probs_dropout_prob=0.0, - max_position_embeddings=1024, - type_vocab_size=1, - initializer_range=0.02, - pad_token_id=0, - eos_token_id=7, - bos_token_id=0, - eol_token_id=3, - pp_degree=len(modeling.DPMPPP_MESH_LIST)) - - model = GPTForPretraining(gpt, - vocab_size=1000, - hidden_size=64, - initializer_range=0.02) + gpt = GPTModel( + vocab_size=1000, + hidden_size=1024, + num_hidden_layers=2, + num_attention_heads=16, + intermediate_size=4 * 1024, + hidden_act="gelu", + hidden_dropout_prob=0.0, + attention_probs_dropout_prob=0.0, + max_position_embeddings=1024, + type_vocab_size=1, + initializer_range=0.02, + pad_token_id=0, + eos_token_id=7, + bos_token_id=0, + eol_token_id=3, + pp_degree=len(modeling.DPMPPP_MESH_LIST), + ) + + model = GPTForPretraining( + gpt, vocab_size=1000, hidden_size=64, initializer_range=0.02 + ) preds = model(tokens, position_ids, attention_mask) criterion = GPTPretrainingCriterion() loss = criterion(preds, labels, loss_mask) - optimizer = paddle.fluid.optimizer.AdamOptimizer(learning_rate=0.00001, - beta1=0.9, - beta2=0.999, - epsilon=1e-08, - grad_clip=None) + optimizer = paddle.fluid.optimizer.AdamOptimizer( + learning_rate=0.00001, + beta1=0.9, + beta2=0.999, + epsilon=1e-08, + grad_clip=None, + ) feed_vars = { "inputs": [tokens, position_ids, attention_mask, loss_mask], - "labels": [labels] + "labels": [labels], } fetch_vars = {"loss": [loss]} - return train_program, start_program, None, loss, optimizer, feed_vars, fetch_vars + return ( + train_program, + start_program, + None, + loss, + optimizer, + feed_vars, + fetch_vars, + ) class TestParallelTunerPredict(unittest.TestCase): - def test_tune_predict(self): flag = False set_default_distributed_context(DistributedContext()) - train_program, start_program, dataloader, loss, optimizer, feed_vars, fetch_vars = get_program_v3( - ) + ( + train_program, + start_program, + dataloader, + loss, + optimizer, + feed_vars, + fetch_vars, + ) = get_program_v3() cluster = Cluster() cluster.gen_default_config_cluster(node_count=1, device_count=8) - dist_context = DistributedContext(train_program, start_program, - optimizer, loss, feed_vars, - fetch_vars, cluster) + dist_context = DistributedContext( + train_program, + start_program, + optimizer, + loss, + feed_vars, + fetch_vars, + cluster, + ) dist_context.initialize() - parallel_tuner = ParallelTuner(dist_context, - max_trials=3, - mode="predict") + parallel_tuner = ParallelTuner( + dist_context, max_trials=3, mode="predict" + ) parallel_tuner.tune() flag = True diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_pass_amp.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_pass_amp.py index 09de1d18c1ac5643b4dc05db15d1fbab5a2ecb4a..2d8f3c3a8e3ca1d57da375ae1651389d317d6864 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_pass_amp.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_pass_amp.py @@ -20,7 +20,6 @@ import subprocess class TestAMPPass(unittest.TestCase): - def test_mp2(self): file_dir = os.path.dirname(os.path.abspath(__file__)) launch_model_path = os.path.join(file_dir, "amp_pass_unittest.py") @@ -31,10 +30,19 @@ class TestAMPPass(unittest.TestCase): coverage_args = [] tmp_dir = tempfile.TemporaryDirectory() - cmd = [sys.executable, "-u"] + coverage_args + [ - "-m", "paddle.distributed.launch", "--devices", "0,1", "--log_dir", - tmp_dir.name, launch_model_path - ] + cmd = ( + [sys.executable, "-u"] + + coverage_args + + [ + "-m", + "paddle.distributed.launch", + "--devices", + "0,1", + "--log_dir", + tmp_dir.name, + launch_model_path, + ] + ) process = subprocess.Popen(cmd) process.wait() diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_pass_grad_clip.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_pass_grad_clip.py index bfc5eb18a065d3a65d4fe5b49bd286fec455d57b..6febed424282e5773d50f3067996344c53f7050a 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_pass_grad_clip.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_pass_grad_clip.py @@ -20,11 +20,11 @@ import subprocess class TestGradientClip(unittest.TestCase): - def test_dp2(self): file_dir = os.path.dirname(os.path.abspath(__file__)) - launch_model_path = os.path.join(file_dir, - "clip_grad_by_global_norm.py") + launch_model_path = os.path.join( + file_dir, "clip_grad_by_global_norm.py" + ) if os.environ.get("WITH_COVERAGE", "OFF") == "ON": coverage_args = ["-m", "coverage", "run", "--branch", "-p"] @@ -32,10 +32,19 @@ class TestGradientClip(unittest.TestCase): coverage_args = [] tmp_dir = tempfile.TemporaryDirectory() - cmd = [sys.executable, "-u"] + coverage_args + [ - "-m", "paddle.distributed.launch", "--devices", "0,1", "--log_dir", - tmp_dir.name, launch_model_path - ] + cmd = ( + [sys.executable, "-u"] + + coverage_args + + [ + "-m", + "paddle.distributed.launch", + "--devices", + "0,1", + "--log_dir", + tmp_dir.name, + launch_model_path, + ] + ) process = subprocess.Popen(cmd) process.wait() diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_pass_gradient_merge.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_pass_gradient_merge.py index 7ce62fa1389f480e6cdac04dfe0cc535eaa846f6..0ca2cae2e1bd34cad61842eeda2481cd14c99662 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_pass_gradient_merge.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_pass_gradient_merge.py @@ -20,11 +20,11 @@ import subprocess class TestGradientMergePass(unittest.TestCase): - def test_dp2(self): file_dir = os.path.dirname(os.path.abspath(__file__)) - launch_model_path = os.path.join(file_dir, - "gradient_merge_pass_unittest.py") + launch_model_path = os.path.join( + file_dir, "gradient_merge_pass_unittest.py" + ) if os.environ.get("WITH_COVERAGE", "OFF") == "ON": coverage_args = ["-m", "coverage", "run", "--branch", "-p"] @@ -32,10 +32,19 @@ class TestGradientMergePass(unittest.TestCase): coverage_args = [] tmp_dir = tempfile.TemporaryDirectory() - cmd = [sys.executable, "-u"] + coverage_args + [ - "-m", "paddle.distributed.launch", "--devices", "0,1", "--log_dir", - tmp_dir.name, launch_model_path - ] + cmd = ( + [sys.executable, "-u"] + + coverage_args + + [ + "-m", + "paddle.distributed.launch", + "--devices", + "0,1", + "--log_dir", + tmp_dir.name, + launch_model_path, + ] + ) process = subprocess.Popen(cmd) process.wait() diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_pass_quantization.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_pass_quantization.py index e8df938d6228200b6a7af06254da6b05644276a7..1181f090cba803894df0831b4bf246bb691d7fe6 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_pass_quantization.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_pass_quantization.py @@ -34,7 +34,6 @@ def apply_pass(): class TestQuantizationPass(unittest.TestCase): - def test_qat_pass(self): batch_size = 8 diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_pass_recompute.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_pass_recompute.py index c6062faa260781b572f9cbb6a7dd8201f22ab0c3..ff873bec4f5b6f65b41477a0e9682ae35c94bc68 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_pass_recompute.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_pass_recompute.py @@ -20,7 +20,6 @@ import subprocess class TestRecomputePass(unittest.TestCase): - def test_mp2(self): file_dir = os.path.dirname(os.path.abspath(__file__)) launch_model_path = os.path.join(file_dir, "recompute_pass_unittest.py") @@ -31,10 +30,19 @@ class TestRecomputePass(unittest.TestCase): coverage_args = [] tmp_dir = tempfile.TemporaryDirectory() - cmd = [sys.executable, "-u"] + coverage_args + [ - "-m", "paddle.distributed.launch", "--devices", "0,1", "--log_dir", - tmp_dir.name, launch_model_path - ] + cmd = ( + [sys.executable, "-u"] + + coverage_args + + [ + "-m", + "paddle.distributed.launch", + "--devices", + "0,1", + "--log_dir", + tmp_dir.name, + launch_model_path, + ] + ) process = subprocess.Popen(cmd) process.wait() diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_pass_sharding.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_pass_sharding.py index 31467b6d505ed1dc88fce97cf7659ecd61cf0b72..eab74b451305f2e21f4b07fbf5fc55c8a1e28768 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_pass_sharding.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_pass_sharding.py @@ -20,7 +20,6 @@ import subprocess class TestShardingPass(unittest.TestCase): - def test_dp2sharding2(self): file_dir = os.path.dirname(os.path.abspath(__file__)) launch_model_path = os.path.join(file_dir, "sharding_pass_unittest.py") @@ -31,10 +30,19 @@ class TestShardingPass(unittest.TestCase): coverage_args = [] tmp_dir = tempfile.TemporaryDirectory() - cmd = [sys.executable, "-u"] + coverage_args + [ - "-m", "paddle.distributed.launch", "--devices", "0,1", "--log_dir", - tmp_dir.name, launch_model_path - ] + cmd = ( + [sys.executable, "-u"] + + coverage_args + + [ + "-m", + "paddle.distributed.launch", + "--devices", + "0,1", + "--log_dir", + tmp_dir.name, + launch_model_path, + ] + ) process = subprocess.Popen(cmd) process.wait() diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_prim_dist_op.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_prim_dist_op.py index 028d2b820a52b263c522cb9c85adb8ceb3e0b8a5..2c22f3fb1c134cdece683da59b92e7f158c90090 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_prim_dist_op.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_prim_dist_op.py @@ -22,7 +22,10 @@ from paddle.distributed.fleet import auto from paddle.distributed.auto_parallel.completion import Completer from paddle.distributed.auto_parallel.partitioner import Partitioner from paddle.distributed.auto_parallel.utils import set_var_dist_attr -from paddle.distributed.auto_parallel.dist_context import DistributedContext, get_default_distributed_context +from paddle.distributed.auto_parallel.dist_context import ( + DistributedContext, + get_default_distributed_context, +) paddle.enable_static() enable_prim() @@ -31,60 +34,68 @@ rank = 0 class TestPrimDistOp(unittest.TestCase): - def setUp(self): self.main_program = paddle.static.Program() self.startup_program = paddle.static.Program() self.layer_help = LayerHelper('TestPrimDistOp') - with paddle.static.program_guard(self.main_program, - self.startup_program): + with paddle.static.program_guard( + self.main_program, self.startup_program + ): self.init_prog() def init_prog(self): # block = self.main_program.global_block() # block = self.main_program.global_block() - self.w = self.layer_help.create_parameter(dtype="float", - shape=[20], - attr=None) - self.w_grad = paddle.static.data(name='w_grad', - shape=[20], - dtype='float') + self.w = self.layer_help.create_parameter( + dtype="float", shape=[20], attr=None + ) + self.w_grad = paddle.static.data( + name='w_grad', shape=[20], dtype='float' + ) self.tmp1 = paddle.static.data(name='tmp1', shape=[20], dtype='float') self.tmp2 = paddle.static.data(name='tmp2', shape=[20], dtype='float') - self.batch_reduced = paddle.static.data(name='batch_reduced', - shape=[1], - dtype='float') + self.batch_reduced = paddle.static.data( + name='batch_reduced', shape=[1], dtype='float' + ) self.attrs = {} default_dist_context = get_default_distributed_context() _global_process_mesh = auto.ProcessMesh(list(range(nranks))) - tensor_dist_attr = set_var_dist_attr(default_dist_context, - self.tmp1, [-1], - _global_process_mesh, - mark_annotated=True) - tensor_dist_attr = set_var_dist_attr(default_dist_context, - self.tmp1, [-1], - _global_process_mesh, - mark_annotated=True) - - op = self.layer_help.append_op(type="add_p", - inputs={ - 'X': self.tmp1, - 'Y': self.w - }, - outputs={'Z': self.w_grad}, - attrs=self.attrs) - - op = self.layer_help.append_op(type="reduce_sum_p", - inputs={'X': self.tmp2}, - outputs={'Y': self.batch_reduced}, - attrs={"axis": [0]}) + tensor_dist_attr = set_var_dist_attr( + default_dist_context, + self.tmp1, + [-1], + _global_process_mesh, + mark_annotated=True, + ) + tensor_dist_attr = set_var_dist_attr( + default_dist_context, + self.tmp1, + [-1], + _global_process_mesh, + mark_annotated=True, + ) + + op = self.layer_help.append_op( + type="add_p", + inputs={'X': self.tmp1, 'Y': self.w}, + outputs={'Z': self.w_grad}, + attrs=self.attrs, + ) + + op = self.layer_help.append_op( + type="reduce_sum_p", + inputs={'X': self.tmp2}, + outputs={'Y': self.batch_reduced}, + attrs={"axis": [0]}, + ) def test_loss_and_grad_allreduce(self): - dist_context = DistributedContext(self.main_program, - self.startup_program) + dist_context = DistributedContext( + self.main_program, self.startup_program + ) completer = Completer(dist_context) completer.complete_prim_annotation(self.main_program) dist_context.block_state.parse_forward_blocks(self.main_program) @@ -95,7 +106,8 @@ class TestPrimDistOp(unittest.TestCase): dist_context.data_parallel_group = list(range(nranks)) partitioner = Partitioner(dist_context, rank) dist_main_prog, dist_startup_prog, _ = partitioner.partition( - self.main_program, self.startup_program, [(self.w, self.w_grad)]) + self.main_program, self.startup_program, [(self.w, self.w_grad)] + ) ops = dist_main_prog.global_block().ops self.assertTrue(ops[1].type == "c_allreduce_sum") diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_process_mesh.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_process_mesh.py index 6b83c4da24596440225407b1579db279057df24e..2e698efc416fd357b137002b68c5c63a166e35d9 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_process_mesh.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_process_mesh.py @@ -19,7 +19,9 @@ import paddle.nn as nn import paddle.nn.functional as F import paddle.static as static from paddle.distributed.auto_parallel.process_mesh import ProcessMesh -from paddle.distributed.auto_parallel.dist_context import get_default_distributed_context +from paddle.distributed.auto_parallel.dist_context import ( + get_default_distributed_context, +) paddle.enable_static() @@ -30,29 +32,33 @@ sequence_len = 512 class MLPLayer(nn.Layer): - - def __init__(self, - hidden_size=1024, - intermediate_size=4 * 1024, - dropout_ratio=0.1, - initializer_range=0.02): + def __init__( + self, + hidden_size=1024, + intermediate_size=4 * 1024, + dropout_ratio=0.1, + initializer_range=0.02, + ): super(MLPLayer, self).__init__() d_model = hidden_size dim_feedforward = intermediate_size - param_initializer = nn.initializer.Normal(mean=0.0, - std=initializer_range) + param_initializer = nn.initializer.Normal( + mean=0.0, std=initializer_range + ) self.norm = nn.LayerNorm(d_model, epsilon=1e-5) self.linear0 = nn.Linear( d_model, dim_feedforward, weight_attr=paddle.ParamAttr(initializer=param_initializer), - bias_attr=None) + bias_attr=None, + ) self.linear1 = nn.Linear( dim_feedforward, d_model, weight_attr=paddle.ParamAttr(initializer=param_initializer), - bias_attr=None) + bias_attr=None, + ) def forward(self, input): out = self.norm(input) @@ -63,7 +69,6 @@ class MLPLayer(nn.Layer): class TestProcessMesh(unittest.TestCase): - def test_construction(self): mesh = [[0, 1, 2], [3, 4, 5]] process_mesh = ProcessMesh(mesh, dim_names=["x", "y"]) @@ -106,17 +111,21 @@ class TestProcessMesh(unittest.TestCase): def test_context_manager(self): mesh = np.array([1, 2, 3, 4]) - input = static.data(name="input", - shape=[batch_size, sequence_len, hidden_size], - dtype='float32') - label = static.data(name="label", - shape=[batch_size, sequence_len, 1], - dtype='float32') - - mlp = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - dropout_ratio=0.1, - initializer_range=0.02) + input = static.data( + name="input", + shape=[batch_size, sequence_len, hidden_size], + dtype='float32', + ) + label = static.data( + name="label", shape=[batch_size, sequence_len, 1], dtype='float32' + ) + + mlp = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + dropout_ratio=0.1, + initializer_range=0.02, + ) with ProcessMesh(mesh, "d"): out = mlp(input) @@ -127,15 +136,18 @@ class TestProcessMesh(unittest.TestCase): for block in default_program.blocks: for tensor in block.vars.values(): dist_tensor = default_dist_context.get_dist_tensor_for_program( - tensor) + tensor + ) if dist_tensor is not None: - self.assertEqual(dist_tensor.dist_attr.process_mesh, - ProcessMesh(mesh)) + self.assertEqual( + dist_tensor.dist_attr.process_mesh, ProcessMesh(mesh) + ) for op in block.ops: dist_op = default_dist_context.get_dist_op_for_program(op) if dist_op is not None: - self.assertEqual(dist_op.dist_attr.process_mesh, - ProcessMesh(mesh)) + self.assertEqual( + dist_op.dist_attr.process_mesh, ProcessMesh(mesh) + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_process_mesh_v2.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_process_mesh_v2.py index 3c58f9e8cd393a40fd62e152a71217464ed3165f..7a813e703056a905e4a5664b2dff4aaf773ddc9a 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_process_mesh_v2.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_process_mesh_v2.py @@ -14,11 +14,13 @@ import unittest from paddle.distributed.auto_parallel.process_mesh_v2 import ( - ProcessMesh, compute_compatible_process_mesh, merge_process_mesh) + ProcessMesh, + compute_compatible_process_mesh, + merge_process_mesh, +) class TestProcessMesh(unittest.TestCase): - def test_process_mesh(self): mesh = [[0, 1, 2], [3, 4, 5]] mesh2 = [[0, 1], [2, 3]] @@ -41,34 +43,41 @@ class TestProcessMesh(unittest.TestCase): self.assertEqual(str(process_mesh), str(process_mesh)) def test_compute_compatible_process_mesh(self): - process_mesh1 = ProcessMesh([[0, 1, 2], [3, 4, 5]], - dim_names=["x", "y"]) + process_mesh1 = ProcessMesh( + [[0, 1, 2], [3, 4, 5]], dim_names=["x", "y"] + ) compatible_process_mesh = compute_compatible_process_mesh( - [process_mesh1, None]) + [process_mesh1, None] + ) self.assertEqual(compatible_process_mesh, process_mesh1) compatible_process_mesh = compute_compatible_process_mesh( - [None, process_mesh1]) + [None, process_mesh1] + ) self.assertEqual(compatible_process_mesh, process_mesh1) process_mesh2 = ProcessMesh([[0, 1, 2], [3, 4, 5]]) compatible_process_mesh = compute_compatible_process_mesh( - [process_mesh1, process_mesh2]) + [process_mesh1, process_mesh2] + ) self.assertEqual(compatible_process_mesh, process_mesh1) self.assertEqual(compatible_process_mesh, process_mesh2) process_mesh2 = ProcessMesh([[0, 1, 2, 3, 4, 5]]) compatible_process_mesh = compute_compatible_process_mesh( - [process_mesh1, process_mesh2]) + [process_mesh1, process_mesh2] + ) self.assertEqual(compatible_process_mesh, process_mesh1) process_mesh2 = ProcessMesh([[0, 1, 2]]) compatible_process_mesh = compute_compatible_process_mesh( - [process_mesh1, process_mesh2]) + [process_mesh1, process_mesh2] + ) self.assertEqual(compatible_process_mesh, process_mesh1) def test_merge_process_mesh(self): - process_mesh1 = ProcessMesh([[0, 1, 2], [3, 4, 5]], - dim_names=["x", "y"]) + process_mesh1 = ProcessMesh( + [[0, 1, 2], [3, 4, 5]], dim_names=["x", "y"] + ) merged_process_mesh = merge_process_mesh([process_mesh1, None]) print(merged_process_mesh) self.assertEqual(merged_process_mesh, ProcessMesh([0, 1, 2, 3, 4, 5])) @@ -85,8 +94,9 @@ class TestProcessMesh(unittest.TestCase): process_mesh2 = ProcessMesh([[6, 7]]) merged_process_mesh = merge_process_mesh([process_mesh1, process_mesh2]) - self.assertEqual(merged_process_mesh, - ProcessMesh([0, 1, 2, 3, 4, 5, 6, 7])) + self.assertEqual( + merged_process_mesh, ProcessMesh([0, 1, 2, 3, 4, 5, 6, 7]) + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_recorder.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_recorder.py index d9594b951983f36ab5d9257acfadb5ea7f6cab2e..5f07a71176896c53becaebb2ba599da6c73177b7 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_recorder.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_recorder.py @@ -19,7 +19,6 @@ from paddle.distributed.auto_parallel.tuner import recorder as rd class TestRecorder(unittest.TestCase): - def test_register(self): recorder = rd.MetricsRecorder() recorder.register("metric") @@ -35,8 +34,9 @@ class TestRecorder(unittest.TestCase): recorder = rd.MetricsRecorder() recorder.update("metric", 4, 1000) self.assertEqual(recorder.records["metric"].direction, "min") - self.assertEqual(recorder.get_records("metric"), - [rd.MetricRecord(4, 1000)]) + self.assertEqual( + recorder.get_records("metric"), [rd.MetricRecord(4, 1000)] + ) def test_get_records(self): recorder = rd.MetricsRecorder() @@ -44,12 +44,15 @@ class TestRecorder(unittest.TestCase): recorder.update("metric", 2, step=1) recorder.update("metric", 3, step=2) recorder.update("metric", 4, step=3) - self.assertEqual(recorder.get_records("metric"), [ - rd.MetricRecord(1, 0), - rd.MetricRecord(2, 1), - rd.MetricRecord(3, 2), - rd.MetricRecord(4, 3), - ]) + self.assertEqual( + recorder.get_records("metric"), + [ + rd.MetricRecord(1, 0), + rd.MetricRecord(2, 1), + rd.MetricRecord(3, 2), + rd.MetricRecord(4, 3), + ], + ) def test_set_records(self): recorder = rd.MetricsRecorder() @@ -62,12 +65,15 @@ class TestRecorder(unittest.TestCase): rd.MetricRecord(4, 3), ], ) - self.assertEqual(recorder.get_records("metric"), [ - rd.MetricRecord(1, 0), - rd.MetricRecord(2, 1), - rd.MetricRecord(3, 2), - rd.MetricRecord(4, 3), - ]) + self.assertEqual( + recorder.get_records("metric"), + [ + rd.MetricRecord(1, 0), + rd.MetricRecord(2, 1), + rd.MetricRecord(3, 2), + rd.MetricRecord(4, 3), + ], + ) def test_get_best_value(self): recorder = rd.MetricsRecorder() diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_relaunch_with_gpt_planner.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_relaunch_with_gpt_planner.py index bde0615a41dcd8983ce2d7d058eff81fa27feed3..0ffe09bfd1ee47512bb29c7cd91f866217971743 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_relaunch_with_gpt_planner.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_relaunch_with_gpt_planner.py @@ -21,7 +21,6 @@ import subprocess class TestPlannerReLaunch(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -31,10 +30,12 @@ class TestPlannerReLaunch(unittest.TestCase): def test_relaunch_with_planner(self): from test_auto_parallel_relaunch import cluster_json, mapping_josn - cluster_json_path = os.path.join(self.temp_dir.name, - "auto_parallel_cluster.json") - mapping_json_path = os.path.join(self.temp_dir.name, - "auto_parallel_rank_mapping.json") + cluster_json_path = os.path.join( + self.temp_dir.name, "auto_parallel_cluster.json" + ) + mapping_json_path = os.path.join( + self.temp_dir.name, "auto_parallel_rank_mapping.json" + ) cluster_json_object = json.loads(cluster_json) with open(cluster_json_path, "w") as cluster_json_file: @@ -46,19 +47,31 @@ class TestPlannerReLaunch(unittest.TestCase): file_dir = os.path.dirname(os.path.abspath(__file__)) launch_model_path = os.path.join( - file_dir, "auto_parallel_relaunch_with_gpt_planner.py") + file_dir, "auto_parallel_relaunch_with_gpt_planner.py" + ) if os.environ.get("WITH_COVERAGE", "OFF") == "ON": coverage_args = ["-m", "coverage", "run", "--branch", "-p"] else: coverage_args = [] - cmd = [sys.executable, "-u"] + coverage_args + [ - "-m", "paddle.distributed.launch", "--log_dir", self.temp_dir.name, - "--cluster_topo_path", cluster_json_path, "--rank_mapping_path", - mapping_json_path, "--enable_auto_mapping", "True", - launch_model_path - ] + cmd = ( + [sys.executable, "-u"] + + coverage_args + + [ + "-m", + "paddle.distributed.launch", + "--log_dir", + self.temp_dir.name, + "--cluster_topo_path", + cluster_json_path, + "--rank_mapping_path", + mapping_json_path, + "--enable_auto_mapping", + "True", + launch_model_path, + ] + ) process = subprocess.Popen(cmd) process.wait() self.assertEqual(process.returncode, 0) diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_relaunch_with_planner.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_relaunch_with_planner.py index 3c17b6d126e58d39dfec2fe01749949521331094..b8469c1a407827d63279385f1a04dbb01b98558e 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_relaunch_with_planner.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_relaunch_with_planner.py @@ -21,7 +21,6 @@ import subprocess class TestPlannerReLaunch(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -31,10 +30,12 @@ class TestPlannerReLaunch(unittest.TestCase): def test_relaunch_with_planner(self): from test_auto_parallel_relaunch import cluster_json, mapping_josn - cluster_json_path = os.path.join(self.temp_dir.name, - "auto_parallel_cluster.json") - mapping_json_path = os.path.join(self.temp_dir.name, - "auto_parallel_rank_mapping.json") + cluster_json_path = os.path.join( + self.temp_dir.name, "auto_parallel_cluster.json" + ) + mapping_json_path = os.path.join( + self.temp_dir.name, "auto_parallel_rank_mapping.json" + ) cluster_json_object = json.loads(cluster_json) with open(cluster_json_path, "w") as cluster_json_file: @@ -46,19 +47,31 @@ class TestPlannerReLaunch(unittest.TestCase): file_dir = os.path.dirname(os.path.abspath(__file__)) launch_model_path = os.path.join( - file_dir, "auto_parallel_relaunch_with_planner.py") + file_dir, "auto_parallel_relaunch_with_planner.py" + ) if os.environ.get("WITH_COVERAGE", "OFF") == "ON": coverage_args = ["-m", "coverage", "run", "--branch", "-p"] else: coverage_args = [] - cmd = [sys.executable, "-u"] + coverage_args + [ - "-m", "paddle.distributed.launch", "--log_dir", self.temp_dir.name, - "--cluster_topo_path", cluster_json_path, "--rank_mapping_path", - mapping_json_path, "--enable_auto_mapping", "True", - launch_model_path - ] + cmd = ( + [sys.executable, "-u"] + + coverage_args + + [ + "-m", + "paddle.distributed.launch", + "--log_dir", + self.temp_dir.name, + "--cluster_topo_path", + cluster_json_path, + "--rank_mapping_path", + mapping_json_path, + "--enable_auto_mapping", + "True", + launch_model_path, + ] + ) process = subprocess.Popen(cmd) process.wait() self.assertEqual(process.returncode, 0) diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_strategy.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_strategy.py index 08cd1dee0e6819a673cb8cb1914e517ef174628b..54b11c4934cd0e50f946c76459e2d3013d6fb050 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_strategy.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_strategy.py @@ -18,7 +18,6 @@ from paddle.distributed.fleet import auto class TestStrategy(unittest.TestCase): - def test_default_config(self): strategy = auto.Strategy() diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_to_static.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_to_static.py index e751ef64be2d95b80d3b5bea1e535d1844bd4c44..3c8b71e71394e7a459ccb4da14bc4f224e03f095 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_to_static.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_to_static.py @@ -34,7 +34,6 @@ class_num = 10 class MyDataset(Dataset): - def __init__(self, num_samples): super(MyDataset, self).__init__() self.num_samples = num_samples @@ -49,26 +48,26 @@ class MyDataset(Dataset): class MLPLayer(nn.Layer): - - def __init__(self, - hidden_size=1024, - intermediate_size=4 * 1024, - dropout_ratio=0.1, - initializer_range=0.02): + def __init__( + self, + hidden_size=1024, + intermediate_size=4 * 1024, + dropout_ratio=0.1, + initializer_range=0.02, + ): super(MLPLayer, self).__init__() d_model = hidden_size dim_feedforward = intermediate_size weight_attr = paddle.ParamAttr( - initializer=nn.initializer.Normal(mean=0.0, std=initializer_range)) - - self.linear0 = nn.Linear(d_model, - dim_feedforward, - weight_attr, - bias_attr=None) - self.linear1 = nn.Linear(dim_feedforward, - d_model, - weight_attr, - bias_attr=None) + initializer=nn.initializer.Normal(mean=0.0, std=initializer_range) + ) + + self.linear0 = nn.Linear( + d_model, dim_feedforward, weight_attr, bias_attr=None + ) + self.linear1 = nn.Linear( + dim_feedforward, d_model, weight_attr, bias_attr=None + ) self.linear2 = nn.Linear(d_model, 1, weight_attr, bias_attr=None) self.norm = nn.LayerNorm(d_model, epsilon=1e-5) self.dropout = nn.Dropout(dropout_ratio, mode="upscale_in_train") @@ -85,17 +84,19 @@ class MLPLayer(nn.Layer): class TestWholeProgram(unittest.TestCase): - def test_apply_optimzier(self): paddle.disable_static() - mlp = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - dropout_ratio=0.1, - initializer_range=0.02) + mlp = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + dropout_ratio=0.1, + initializer_range=0.02, + ) metrics = paddle.metric.Accuracy() loss = paddle.nn.CrossEntropyLoss() - optimizer = paddle.optimizer.SGD(learning_rate=0.00001, - parameters=mlp.parameters()) + optimizer = paddle.optimizer.SGD( + learning_rate=0.00001, parameters=mlp.parameters() + ) inputs = InputSpec([batch_size, hidden_size], 'float32', 'x') labels = InputSpec([batch_size], 'int64', 'label') @@ -114,7 +115,8 @@ class TestWholeProgram(unittest.TestCase): optimize_ops, _ = program_helper.apply_optimizer(optimizer) all_ops = program_helper.main_program.block(0).ops sgd_ops = [ - op for op in program_helper.main_program.block(0).ops + op + for op in program_helper.main_program.block(0).ops if op.type == 'sgd' ] self.assertEqual(len(all_ops), 37) @@ -124,16 +126,18 @@ class TestWholeProgram(unittest.TestCase): class TestToStatic(unittest.TestCase): - def test_to_static(self): - mlp = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - dropout_ratio=0.1, - initializer_range=0.02) + mlp = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + dropout_ratio=0.1, + initializer_range=0.02, + ) loss = paddle.nn.CrossEntropyLoss() - optimizer = paddle.optimizer.SGD(learning_rate=0.00001, - parameters=mlp.parameters()) + optimizer = paddle.optimizer.SGD( + learning_rate=0.00001, parameters=mlp.parameters() + ) dataset = MyDataset(batch_num * batch_size) @@ -141,11 +145,13 @@ class TestToStatic(unittest.TestCase): # labels = InputSpec([batch_size], 'int64', 'label') assert _non_static_mode() == True - engine = auto.Engine(model=mlp, - loss=loss, - optimizer=optimizer, - metrics=paddle.metric.Accuracy(), - strategy=None) + engine = auto.Engine( + model=mlp, + loss=loss, + optimizer=optimizer, + metrics=paddle.metric.Accuracy(), + strategy=None, + ) engine.fit(dataset, batch_size=batch_size) engine.evaluate(dataset, batch_size=batch_size) engine.predict(dataset, batch_size=batch_size) @@ -153,14 +159,15 @@ class TestToStatic(unittest.TestCase): class TestLazyInit(unittest.TestCase): - def test_lazy_init(self): with LazyGuard(): - mlp = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - dropout_ratio=0.1, - initializer_range=0.02) + mlp = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + dropout_ratio=0.1, + initializer_range=0.02, + ) loss = paddle.nn.CrossEntropyLoss() metrics = paddle.metric.Accuracy() diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_trial.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_trial.py index e39991fcaa51485a9c60a1e7740145095e0682b7..fc52d1c394effc223a609ae5db73ea89a25c298b 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_trial.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_trial.py @@ -19,7 +19,6 @@ from paddle.distributed.auto_parallel.tuner import trial as tr class TestTiral(unittest.TestCase): - def test_trial(self): space = ts.TunableSpace() space.choice("choice", [0, 1, 2, 3], default=2) diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_tunable_space.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_tunable_space.py index 58ff36aba09dba968b9097dcdea096de3cf6cf8d..badc90275fd38ad06c2fc1310b87218bdf4aad89 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_tunable_space.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_tunable_space.py @@ -18,7 +18,6 @@ from paddle.distributed.auto_parallel.tuner import tunable_space as ts class TestTunableSpace(unittest.TestCase): - def test_fixed(self): space = ts.TunableSpace() fixed = space.fixed("fixed", default=4) @@ -73,10 +72,9 @@ class TestTunableSpace(unittest.TestCase): def test_float_range(self): space = ts.TunableSpace() - float_range = space.float_range("float_range", - start=0.4, - stop=4.4, - default=2.0) + float_range = space.float_range( + "float_range", start=0.4, stop=4.4, default=2.0 + ) self.assertEqual(space.values["float_range"], 2.0) self.assertEqual(len(space.variables), 1) self.assertEqual(space.variables["float_range"].name, "float_range") diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_tunable_variable.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_tunable_variable.py index ce0a076c83e7e12f7f9b8806d27b6a584c6b20ea..641f7b4347e36f70681ef176acb6468e70aec972 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_tunable_variable.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_tunable_variable.py @@ -18,7 +18,6 @@ from paddle.distributed.auto_parallel.tuner import tunable_variable as tv class TestTunableVariable(unittest.TestCase): - def test_fixed(self): fixed = tv.Fixed("fixed", True) fixed = tv.Fixed.from_state(fixed.get_state()) @@ -64,12 +63,9 @@ class TestTunableVariable(unittest.TestCase): self.assertIn(int_range.random(1234), [1, 2, 3, 4]) self.assertNotEqual(int_range.default, 4) - int_range = tv.IntRange("int_range", - start=1, - stop=8, - step=2, - default=3, - endpoint=True) + int_range = tv.IntRange( + "int_range", start=1, stop=8, step=2, default=3, endpoint=True + ) int_range = tv.IntRange.from_state(int_range.get_state()) self.assertEqual(int_range.default, 3) self.assertIn(int_range.random(), [1, 3, 5, 7]) @@ -77,10 +73,9 @@ class TestTunableVariable(unittest.TestCase): self.assertNotEqual(int_range.default, 2) def test_float_range(self): - float_range = tv.FloatRange("float_range", - start=0.4, - stop=4.4, - default=2.0) + float_range = tv.FloatRange( + "float_range", start=0.4, stop=4.4, default=2.0 + ) float_range = tv.FloatRange.from_state(float_range.get_state()) self.assertEqual(float_range.default, 2.0) self.assertGreaterEqual(float_range.random(), 0.4) @@ -88,12 +83,14 @@ class TestTunableVariable(unittest.TestCase): self.assertNotAlmostEqual(float_range.random(), 1) self.assertNotAlmostEqual(float_range.random(), 4.4) - float_range = tv.FloatRange("float_range", - start=0.4, - stop=8.4, - step=2.0, - default=3.0, - endpoint=True) + float_range = tv.FloatRange( + "float_range", + start=0.4, + stop=8.4, + step=2.0, + default=3.0, + endpoint=True, + ) float_range = tv.FloatRange.from_state(float_range.get_state()) self.assertEqual(float_range.default, 3.0) self.assertGreaterEqual(float_range.random(), 0.4) diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_while_op_completion.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_while_op_completion.py index bba1e2ba6f19fc9cef11f2f2ae96d709248a6874..9a8b9e1a8f36e25ea38314c28cbc113e02882919 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_while_op_completion.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_while_op_completion.py @@ -40,50 +40,56 @@ def get_random_inputs_and_labels(input_shape, label_shape): def batch_generator_creator(): - def __reader__(): for _ in range(batch_size): batch_input, batch_label = get_random_inputs_and_labels( [batch_size, sequence_len, hidden_size], - [batch_size, sequence_len, 1]) + [batch_size, sequence_len, 1], + ) yield batch_input, batch_label return __reader__ class MLPLayer(nn.Layer): - - def __init__(self, - hidden_size=1024, - intermediate_size=4 * 1024, - dropout_ratio=0.1, - initializer_range=0.02): + def __init__( + self, + hidden_size=1024, + intermediate_size=4 * 1024, + dropout_ratio=0.1, + initializer_range=0.02, + ): super(MLPLayer, self).__init__() d_model = hidden_size dim_feedforward = intermediate_size - param_initializer = nn.initializer.Normal(mean=0.0, - std=initializer_range) + param_initializer = nn.initializer.Normal( + mean=0.0, std=initializer_range + ) self.norm = nn.LayerNorm(d_model, epsilon=1e-5) self.linear0 = nn.Linear( d_model, dim_feedforward, weight_attr=paddle.ParamAttr(initializer=param_initializer), - bias_attr=None) + bias_attr=None, + ) self.linear1 = nn.Linear( dim_feedforward, d_model, weight_attr=paddle.ParamAttr(initializer=param_initializer), - bias_attr=None) + bias_attr=None, + ) def forward(self, input): out = self.norm(input) - auto.shard_tensor(self.linear0.weight, _g_process_mesh[:, 0], - [None, 'x']) + auto.shard_tensor( + self.linear0.weight, _g_process_mesh[:, 0], [None, 'x'] + ) out = self.linear0(out) out = F.gelu(out, approximate=True) - auto.shard_tensor(self.linear1.weight, _g_process_mesh[:, 1], - ['x', None]) + auto.shard_tensor( + self.linear1.weight, _g_process_mesh[:, 1], ['x', None] + ) out = self.linear1(out) return out @@ -95,15 +101,19 @@ def loop_cond(i, loop_len, input_array): def loop_body(i, loop_len, input_array): pre_input = paddle.tensor.array_read(array=input_array, i=i) - mlp_while0 = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - dropout_ratio=0.1, - initializer_range=0.02) - - mlp_while1 = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - dropout_ratio=0.1, - initializer_range=0.02) + mlp_while0 = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + dropout_ratio=0.1, + initializer_range=0.02, + ) + + mlp_while1 = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + dropout_ratio=0.1, + initializer_range=0.02, + ) output = mlp_while0(pre_input) cur_pred = mlp_while1(output) @@ -128,41 +138,46 @@ def get_program(): loop_len = paddle.full(shape=[1], fill_value=epoch_num, dtype='int64') # input - input = static.data(name="input", - shape=[batch_size, sequence_len, hidden_size], - dtype='float32') - label = static.data(name="label", - shape=[batch_size, sequence_len, 1], - dtype='float32') + input = static.data( + name="input", + shape=[batch_size, sequence_len, hidden_size], + dtype='float32', + ) + label = static.data( + name="label", shape=[batch_size, sequence_len, 1], dtype='float32' + ) data_holder = [input, label] # dataloader - dataloader = paddle.io.DataLoader.from_generator(feed_list=data_holder, - capacity=4 * - batch_size, - iterable=False) - dataloader.set_batch_generator(batch_generator_creator(), - places=paddle.static.cuda_places()) + dataloader = paddle.io.DataLoader.from_generator( + feed_list=data_holder, capacity=4 * batch_size, iterable=False + ) + dataloader.set_batch_generator( + batch_generator_creator(), places=paddle.static.cuda_places() + ) # data dist_attr auto.shard_tensor(input, _g_process_mesh[:, 0], [None, None, None]) auto.shard_tensor(label, _g_process_mesh[:, 0], [None, None, None]) - mlp_start = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - dropout_ratio=0.1, - initializer_range=0.02) + mlp_start = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + dropout_ratio=0.1, + initializer_range=0.02, + ) pred = mlp_start(input) input_array = paddle.tensor.array_write(pred, i) i, loop_len, input_array = static.nn.while_loop( - cond=loop_cond, - body=loop_body, - loop_vars=[i, loop_len, input_array]) + cond=loop_cond, body=loop_body, loop_vars=[i, loop_len, input_array] + ) end_pred = paddle.tensor.array_read(array=input_array, i=i) - mlp_end = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - dropout_ratio=0.1, - initializer_range=0.02) + mlp_end = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + dropout_ratio=0.1, + initializer_range=0.02, + ) pred = mlp_end(end_pred) error_cost = paddle.nn.functional.square_error_cost(pred, label) @@ -172,13 +187,13 @@ def get_program(): class TestMLP(unittest.TestCase): - def test_completer(self): train_program, start_program, dataloader, i, loss = get_program() dist_context = DistributedContext() completer = Completer(dist_context) complete_train_program = completer.complete_forward_annotation( - train_program) + train_program + ) # print_program_with_dist_attr(complete_train_program, dist_context) def test_completer_by_dist_op(self): @@ -186,7 +201,8 @@ class TestMLP(unittest.TestCase): dist_context = DistributedContext() completer = Completer(dist_context) complete_train_program = completer.complete_forward_annotation( - train_program) + train_program + ) complete_train_program = completer._complete_tensor_dist_attr_by_op() diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_while_op_partition.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_while_op_partition.py index 2e3f1d936338af766d30aa1a629b3e60bf017690..4bb9272748bf2d498e30c194ad1ec00ed03fd890 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_while_op_partition.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_while_op_partition.py @@ -25,7 +25,9 @@ from paddle.distributed import fleet from paddle.distributed.auto_parallel.completion import Completer from paddle.distributed.auto_parallel.partitioner import Partitioner from paddle.distributed.auto_parallel.utils import make_data_unshard -from paddle.distributed.auto_parallel.dist_context import get_default_distributed_context +from paddle.distributed.auto_parallel.dist_context import ( + get_default_distributed_context, +) paddle.enable_static() @@ -43,41 +45,45 @@ def get_random_inputs_and_labels(input_shape, label_shape): def batch_generator_creator(): - def __reader__(): for _ in range(batch_size): batch_input, batch_label = get_random_inputs_and_labels( [batch_size, sequence_len, hidden_size], - [batch_size, sequence_len, 1]) + [batch_size, sequence_len, 1], + ) yield batch_input, batch_label return __reader__ class MLPLayer(nn.Layer): - - def __init__(self, - hidden_size=1024, - intermediate_size=4 * 1024, - dropout_ratio=0.1, - initializer_range=0.02): + def __init__( + self, + hidden_size=1024, + intermediate_size=4 * 1024, + dropout_ratio=0.1, + initializer_range=0.02, + ): super(MLPLayer, self).__init__() d_model = hidden_size dim_feedforward = intermediate_size - param_initializer = nn.initializer.Normal(mean=0.0, - std=initializer_range) + param_initializer = nn.initializer.Normal( + mean=0.0, std=initializer_range + ) self.norm = nn.LayerNorm(d_model, epsilon=1e-5) self.linear0 = nn.Linear( d_model, dim_feedforward, weight_attr=paddle.ParamAttr(initializer=param_initializer), - bias_attr=None) + bias_attr=None, + ) self.linear1 = nn.Linear( dim_feedforward, d_model, weight_attr=paddle.ParamAttr(initializer=param_initializer), - bias_attr=None) + bias_attr=None, + ) def forward(self, input): @@ -114,41 +120,46 @@ def get_program(): auto.shard_tensor(i, _g_process_mesh, [None]) # 循环次数 - loop_len = fluid.layers.fill_constant(shape=[1], - dtype='int64', - value=epoch_num) + loop_len = fluid.layers.fill_constant( + shape=[1], dtype='int64', value=epoch_num + ) auto.shard_tensor(loop_len, _g_process_mesh, [None]) # input - input = static.data(name="input", - shape=[batch_size, sequence_len, hidden_size], - dtype='float32') - label = static.data(name="label", - shape=[batch_size, sequence_len, 1], - dtype='float32') + input = static.data( + name="input", + shape=[batch_size, sequence_len, hidden_size], + dtype='float32', + ) + label = static.data( + name="label", shape=[batch_size, sequence_len, 1], dtype='float32' + ) data_holder = [input, label] # dataloader - dataloader = paddle.io.DataLoader.from_generator(feed_list=data_holder, - capacity=4 * - batch_size, - iterable=False) - dataloader.set_batch_generator(batch_generator_creator(), - places=paddle.static.cuda_places()) + dataloader = paddle.io.DataLoader.from_generator( + feed_list=data_holder, capacity=4 * batch_size, iterable=False + ) + dataloader.set_batch_generator( + batch_generator_creator(), places=paddle.static.cuda_places() + ) # data dist_attr auto.shard_tensor(input, _g_process_mesh, [None, None, None]) auto.shard_tensor(label, _g_process_mesh, [None, None, None]) # fill constant bsz like tmp = paddle.fluid.layers.fill_constant_batch_size_like( - input=input, shape=[-1, 16, 0, 48], dtype='float32', value=0) + input=input, shape=[-1, 16, 0, 48], dtype='float32', value=0 + ) auto.shard_tensor(tmp, _g_process_mesh, [None, 'x', None, None]) # model - mlp_start = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - dropout_ratio=0.1, - initializer_range=0.02) + mlp_start = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + dropout_ratio=0.1, + initializer_range=0.02, + ) pred = mlp_start(input) input_array = fluid.layers.array_write(pred, i) @@ -168,10 +179,12 @@ def get_program(): pre_input = fluid.layers.array_read(array=input_array, i=i) auto.shard_tensor(pre_input, _g_process_mesh, [None, None, None]) - mlp_while = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - dropout_ratio=0.1, - initializer_range=0.02) + mlp_while = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + dropout_ratio=0.1, + initializer_range=0.02, + ) cur_pred = mlp_while(pre_input) # 更新循环条件 @@ -182,10 +195,12 @@ def get_program(): end_pred = fluid.layers.array_read(array=input_array, i=i) auto.shard_tensor(end_pred, _g_process_mesh, [None, None, None]) - mlp_end = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - dropout_ratio=0.1, - initializer_range=0.02) + mlp_end = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + dropout_ratio=0.1, + initializer_range=0.02, + ) pred = mlp_end(end_pred) error_cost = paddle.nn.functional.square_error_cost(pred, label) @@ -345,24 +360,25 @@ def partition(train_program, start_program, dist_context): rank = paddle.distributed.get_rank() partitioner = Partitioner(dist_context, rank) dist_main_prog, dist_startup_prog, _ = partitioner.partition( - train_program, start_program, []) + train_program, start_program, [] + ) return dist_main_prog, dist_startup_prog class TestMLP(unittest.TestCase): - def test_partitioner(self): train_program, start_program, dataloader, i, loss = get_program() dist_context = get_default_distributed_context() - train_program, start_program = completion(train_program, start_program, - dist_context) + train_program, start_program = completion( + train_program, start_program, dist_context + ) dist_context.block_state.parse_forward_blocks(train_program) - dist_main_prog, dist_startup_prog = partition(train_program, - start_program, - dist_context) + dist_main_prog, dist_startup_prog = partition( + train_program, start_program, dist_context + ) global_block_ops = dist_main_prog.blocks[0].ops fill_op = None diff --git a/python/paddle/fluid/tests/unittests/auto_parallel_autoconvert.py b/python/paddle/fluid/tests/unittests/auto_parallel_autoconvert.py index b8c64c0af5454b3138831cea35e3430a5982c931..f2ced04fe56afa64902898e163a3c34a61b0252a 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel_autoconvert.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel_autoconvert.py @@ -26,9 +26,19 @@ from paddle.distributed.fleet import auto from paddle.distributed import fleet from paddle.fluid.initializer import NumpyArrayInitializer -from paddle.distributed.auto_parallel.utils import save_distributed_checkpoint, load_distributed_checkpoint, load_checkpoint_into_program -from paddle.distributed.auto_parallel.utils import get_dist_attr, merge_and_slice_parameter, load_parameter_into_program -from paddle.distributed.auto_parallel.dist_context import set_default_distributed_context +from paddle.distributed.auto_parallel.utils import ( + save_distributed_checkpoint, + load_distributed_checkpoint, + load_checkpoint_into_program, +) +from paddle.distributed.auto_parallel.utils import ( + get_dist_attr, + merge_and_slice_parameter, + load_parameter_into_program, +) +from paddle.distributed.auto_parallel.dist_context import ( + set_default_distributed_context, +) paddle.enable_static() _global_parallel_strategy = None @@ -38,11 +48,9 @@ PP_MESH_1 = None class MLPLayer(nn.Layer): - - def __init__(self, - hidden_size=64, - intermediate_size=4 * 64, - initializer_range=0.02): + def __init__( + self, hidden_size=64, intermediate_size=4 * 64, initializer_range=0.02 + ): super(MLPLayer, self).__init__() d_model = hidden_size dim_feedforward = intermediate_size @@ -52,14 +60,12 @@ class MLPLayer(nn.Layer): weight_attr0 = paddle.ParamAttr(initializer=NumpyArrayInitializer(arr0)) weight_attr1 = paddle.ParamAttr(initializer=NumpyArrayInitializer(arr1)) bias_attr = None - self.linear0 = nn.Linear(d_model, - dim_feedforward, - weight_attr0, - bias_attr=bias_attr) - self.linear1 = nn.Linear(dim_feedforward, - d_model, - weight_attr1, - bias_attr=bias_attr) + self.linear0 = nn.Linear( + d_model, dim_feedforward, weight_attr0, bias_attr=bias_attr + ) + self.linear1 = nn.Linear( + dim_feedforward, d_model, weight_attr1, bias_attr=bias_attr + ) self.norm = nn.LayerNorm(d_model, epsilon=1e-5) def forward(self, input): @@ -67,15 +73,19 @@ class MLPLayer(nn.Layer): auto.shard_tensor(self.linear0.weight, PP_MESH_0, [None, None]) auto.shard_tensor(self.linear1.weight, PP_MESH_1, [None, None]) elif _global_parallel_strategy == "mp": - auto.shard_tensor(self.linear0.weight, _global_process_mesh, - [None, "x"]) - auto.shard_tensor(self.linear1.weight, _global_process_mesh, - ["x", None]) + auto.shard_tensor( + self.linear0.weight, _global_process_mesh, [None, "x"] + ) + auto.shard_tensor( + self.linear1.weight, _global_process_mesh, ["x", None] + ) elif _global_parallel_strategy == "dp": - auto.shard_tensor(self.linear0.weight, _global_process_mesh, - [None, None]) - auto.shard_tensor(self.linear1.weight, _global_process_mesh, - [None, None]) + auto.shard_tensor( + self.linear0.weight, _global_process_mesh, [None, None] + ) + auto.shard_tensor( + self.linear1.weight, _global_process_mesh, [None, None] + ) out = self.norm(input) out = self.linear0(out) @@ -85,16 +95,17 @@ class MLPLayer(nn.Layer): def mlp_forward(train_program, start_program): - with static.program_guard(train_program,start_program), \ - utils.unique_name.guard(): + with static.program_guard( + train_program, start_program + ), utils.unique_name.guard(): batch_size = 4 hidden_size = 64 - input = static.data(name="input", - shape=[batch_size, hidden_size], - dtype='float32') - label = static.data(name="label", - shape=[batch_size, 1], - dtype='float32') + input = static.data( + name="input", shape=[batch_size, hidden_size], dtype='float32' + ) + label = static.data( + name="label", shape=[batch_size, 1], dtype='float32' + ) if _global_parallel_strategy == "pp": auto.shard_tensor(input, PP_MESH_0, [None, None]) @@ -104,9 +115,11 @@ def mlp_forward(train_program, start_program): elif _global_parallel_strategy == "mp": auto.shard_tensor(input, _global_process_mesh, [None, None]) - mlp = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - initializer_range=0.02) + mlp = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + initializer_range=0.02, + ) predict = mlp(input) error_cost = paddle.nn.functional.square_error_cost(predict, label) loss = paddle.mean(error_cost) @@ -119,28 +132,35 @@ def get_distributed_program(): dist_strategy = fleet.DistributedStrategy() dist_strategy.semi_auto = True fleet.init(is_collective=True, strategy=dist_strategy) - loss, train_program, startup_program = mlp_forward(train_program, - startup_program) + loss, train_program, startup_program = mlp_forward( + train_program, startup_program + ) optimizer = paddle.fluid.optimizer.SGDOptimizer(learning_rate=0.01) optimizer = fleet.distributed_optimizer(optimizer) _, _, dist_startup_prog, dist_main_prog = optimizer.minimize( - loss, startup_program) + loss, startup_program + ) return dist_main_prog, dist_startup_prog, loss class TestMLPAutoConvert(unittest.TestCase): - def setUp(self): paddle.seed(2021) random.seed(2021) np.random.seed(2021) def tearDown(self): - os.remove("./model_state_rank{}.pdmodel".format( - str(paddle.distributed.get_rank()))) - os.remove("./dist_attr_rank{}.pdattr".format( - str(paddle.distributed.get_rank()))) + os.remove( + "./model_state_rank{}.pdmodel".format( + str(paddle.distributed.get_rank()) + ) + ) + os.remove( + "./dist_attr_rank{}.pdattr".format( + str(paddle.distributed.get_rank()) + ) + ) def test_mlp_mp2pp(self): set_default_distributed_context(None) @@ -159,16 +179,18 @@ class TestMLPAutoConvert(unittest.TestCase): for step in range(20): if step == 10: - save_distributed_checkpoint(dist_main_prog, - ".", - dist_attr_path=".") - - res = exe.run(dist_main_prog, - feed={ - "input": input[step * 4:(step + 1) * 4, :], - "label": label[step * 4:(step + 1) * 4, :] - }, - fetch_list=[loss]) + save_distributed_checkpoint( + dist_main_prog, ".", dist_attr_path="." + ) + + res = exe.run( + dist_main_prog, + feed={ + "input": input[step * 4 : (step + 1) * 4, :], + "label": label[step * 4 : (step + 1) * 4, :], + }, + fetch_list=[loss], + ) last_res = res[0] set_default_distributed_context(None) @@ -179,50 +201,65 @@ class TestMLPAutoConvert(unittest.TestCase): global PP_MESH_1 PP_MESH_1 = auto.ProcessMesh(mesh=[1], dim_names=["pp1"]) - dist_main_prog_load, dist_start_prog_load, loss_load = get_distributed_program( - ) + ( + dist_main_prog_load, + dist_start_prog_load, + loss_load, + ) = get_distributed_program() place = paddle.set_device("gpu") exe = paddle.static.Executor(place) exe.run(dist_start_prog_load) ckpt_path = [ - "./model_state_rank0.pdmodel", "./model_state_rank1.pdmodel" + "./model_state_rank0.pdmodel", + "./model_state_rank1.pdmodel", ] dist_attr_path = [ - "./dist_attr_rank0.pdattr", "./dist_attr_rank1.pdattr" + "./dist_attr_rank0.pdattr", + "./dist_attr_rank1.pdattr", ] - load_checkpoint_into_program(ckpt_path, dist_attr_path, - dist_main_prog_load) + load_checkpoint_into_program( + ckpt_path, dist_attr_path, dist_main_prog_load + ) for step in range(10, 20): if paddle.distributed.get_rank() in [0]: - res = exe.run(dist_main_prog_load, - feed={ - "input": input[step * 4:(step + 1) * 4, :], - "label": label[step * 4:(step + 1) * 4, :] - }) + res = exe.run( + dist_main_prog_load, + feed={ + "input": input[step * 4 : (step + 1) * 4, :], + "label": label[step * 4 : (step + 1) * 4, :], + }, + ) else: - res = exe.run(dist_main_prog_load, - feed={ - "input": input[step * 4:(step + 1) * 4, :], - "label": label[step * 4:(step + 1) * 4, :] - }, - fetch_list=[loss_load]) + res = exe.run( + dist_main_prog_load, + feed={ + "input": input[step * 4 : (step + 1) * 4, :], + "label": label[step * 4 : (step + 1) * 4, :], + }, + fetch_list=[loss_load], + ) if paddle.distributed.get_rank() in [1]: self.assertEqual(last_res, res[0]) class TestMLPAutoConvert2(unittest.TestCase): - def setUp(self): paddle.seed(2021) random.seed(2021) np.random.seed(2021) def tearDown(self): - os.remove("./model_state_rank{}.pdmodel".format( - str(paddle.distributed.get_rank()))) - os.remove("./dist_attr_rank{}.pdattr".format( - str(paddle.distributed.get_rank()))) + os.remove( + "./model_state_rank{}.pdmodel".format( + str(paddle.distributed.get_rank()) + ) + ) + os.remove( + "./dist_attr_rank{}.pdattr".format( + str(paddle.distributed.get_rank()) + ) + ) def test_mlp_pp2mp(self): set_default_distributed_context(None) @@ -247,18 +284,22 @@ class TestMLPAutoConvert2(unittest.TestCase): save_distributed_checkpoint(dist_main_prog, ".", ".", add_info) if paddle.distributed.get_rank() in [0]: - res = exe.run(dist_main_prog, - feed={ - "input": input[step * 4:(step + 1) * 4, :], - "label": label[step * 4:(step + 1) * 4, :] - }) + res = exe.run( + dist_main_prog, + feed={ + "input": input[step * 4 : (step + 1) * 4, :], + "label": label[step * 4 : (step + 1) * 4, :], + }, + ) else: - res = exe.run(dist_main_prog, - feed={ - "input": input[step * 4:(step + 1) * 4, :], - "label": label[step * 4:(step + 1) * 4, :] - }, - fetch_list=[loss]) + res = exe.run( + dist_main_prog, + feed={ + "input": input[step * 4 : (step + 1) * 4, :], + "label": label[step * 4 : (step + 1) * 4, :], + }, + fetch_list=[loss], + ) if paddle.distributed.get_rank() in [1]: last_res = res[0] @@ -266,41 +307,49 @@ class TestMLPAutoConvert2(unittest.TestCase): _global_parallel_strategy = "mp" _global_process_mesh = auto.ProcessMesh([0, 1], dim_names=["x"]) - dist_main_prog_load, dist_start_prog_load, loss_load = get_distributed_program( - ) + ( + dist_main_prog_load, + dist_start_prog_load, + loss_load, + ) = get_distributed_program() place = paddle.set_device("gpu") exe = paddle.static.Executor(place) exe.run(dist_start_prog_load) ckpt_path = [ - "./model_state_rank0.pdmodel", "./model_state_rank1.pdmodel" + "./model_state_rank0.pdmodel", + "./model_state_rank1.pdmodel", ] dist_attr_path = [ - "./dist_attr_rank0.pdattr", "./dist_attr_rank1.pdattr" + "./dist_attr_rank0.pdattr", + "./dist_attr_rank1.pdattr", ] param_dict, pre_dist_attr, add_info = load_distributed_checkpoint( - ckpt_path, dist_attr_path) + ckpt_path, dist_attr_path + ) batch = add_info["batch"] batch_size = add_info["batch_size"] start_index = batch * batch_size input = input[start_index:, :] label = label[start_index:, :] cur_dist_attr = get_dist_attr(dist_main_prog_load) - sliced_param_dict = merge_and_slice_parameter(param_dict, pre_dist_attr, - cur_dist_attr) + sliced_param_dict = merge_and_slice_parameter( + param_dict, pre_dist_attr, cur_dist_attr + ) load_parameter_into_program(sliced_param_dict, dist_main_prog_load) for step in range(10): - res = exe.run(dist_main_prog_load, - feed={ - "input": input[step * 4:(step + 1) * 4, :], - "label": label[step * 4:(step + 1) * 4, :] - }, - fetch_list=[loss_load]) + res = exe.run( + dist_main_prog_load, + feed={ + "input": input[step * 4 : (step + 1) * 4, :], + "label": label[step * 4 : (step + 1) * 4, :], + }, + fetch_list=[loss_load], + ) if paddle.distributed.get_rank() in [1]: self.assertEqual(last_res, res[0]) class TestMLPAutoConvertInvalid(unittest.TestCase): - def setUp(self): paddle.seed(2021) random.seed(2021) @@ -314,24 +363,32 @@ class TestMLPAutoConvertInvalid(unittest.TestCase): _global_process_mesh = auto.ProcessMesh([0, 1], dim_names=["x"]) dist_main_prog, _, _ = get_distributed_program() with self.assertRaises(TypeError): - save_distributed_checkpoint(dist_main_prog, [""], [""], - addition_info=[0]) + save_distributed_checkpoint( + dist_main_prog, [""], [""], addition_info=[0] + ) with self.assertRaises(ValueError): - save_distributed_checkpoint(dist_main_prog, [""], [""], - addition_info={"step": 0}) + save_distributed_checkpoint( + dist_main_prog, [""], [""], addition_info={"step": 0} + ) with self.assertRaises(ValueError): - save_distributed_checkpoint(dist_main_prog, [""], [""], - addition_info={"batch": 0.0}) + save_distributed_checkpoint( + dist_main_prog, [""], [""], addition_info={"batch": 0.0} + ) with self.assertRaises(ValueError): - load_checkpoint_into_program(["./model_state_rank.pdmodel"], - ["./dist_attr_rank.pdattr"], - dist_main_prog) + load_checkpoint_into_program( + ["./model_state_rank.pdmodel"], + ["./dist_attr_rank.pdattr"], + dist_main_prog, + ) with self.assertRaises(ValueError): - load_distributed_checkpoint(["./model_state_rank.pdmodel"], - ["./dist_attr_rank.pdattr"]) + load_distributed_checkpoint( + ["./model_state_rank.pdmodel"], ["./dist_attr_rank.pdattr"] + ) with self.assertRaises(TypeError): - load_distributed_checkpoint({"0": "./model_state_rank.pdmodel"}, - {"1": "./dist_attr_rank.pdattr"}) + load_distributed_checkpoint( + {"0": "./model_state_rank.pdmodel"}, + {"1": "./dist_attr_rank.pdattr"}, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/auto_parallel_data_unshard.py b/python/paddle/fluid/tests/unittests/auto_parallel_data_unshard.py index e08739039c8acad934c0ca933b43b6022bc862db..53a1250f1912ed881df650cae849b5d4aafdb642 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel_data_unshard.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel_data_unshard.py @@ -28,9 +28,7 @@ paddle.distributed.init_parallel_env() class TestDataUnshard(unittest.TestCase): - def test_dp2pp1mp1(self): - def create_model(train_program, start_program): with paddle.static.program_guard(train_program, start_program): @@ -39,7 +37,8 @@ class TestDataUnshard(unittest.TestCase): label = paddle.static.data(name='label', shape=[2, 8]) weight_attr = paddle.ParamAttr( - initializer=nn.initializer.Normal(mean=0.0, std=0.02)) + initializer=nn.initializer.Normal(mean=0.0, std=0.02) + ) linear0 = nn.Linear(8, 8, weight_attr) linear1 = nn.Linear(8, 8, weight_attr) @@ -52,7 +51,8 @@ class TestDataUnshard(unittest.TestCase): gelu_out = F.gelu(linear0_out) linear1_out = linear1(gelu_out) error_cost = paddle.nn.functional.square_error_cost( - linear1_out, label) + linear1_out, label + ) loss = paddle.mean(error_cost) return train_program, start_program, loss, input, label @@ -60,20 +60,27 @@ class TestDataUnshard(unittest.TestCase): start_program = paddle.static.Program() # serial program train_program, start_program, loss, input, label = create_model( - train_program, start_program) + train_program, start_program + ) dist_strategy = fleet.DistributedStrategy() dist_strategy.semi_auto = True fleet.init(is_collective=True, strategy=dist_strategy) - optimizer = paddle.fluid.optimizer.AdamOptimizer(learning_rate=0.00001, - beta1=0.9, - beta2=0.999, - epsilon=1e-08, - grad_clip=None) + optimizer = paddle.fluid.optimizer.AdamOptimizer( + learning_rate=0.00001, + beta1=0.9, + beta2=0.999, + epsilon=1e-08, + grad_clip=None, + ) optimizer = fleet.distributed_optimizer(optimizer) - _, _, distributed_startup_program, distributed_main_program = optimizer.minimize( - loss, start_program) + ( + _, + _, + distributed_startup_program, + distributed_main_program, + ) = optimizer.minimize(loss, start_program) worker_index = paddle.distributed.get_rank() paddle.seed(worker_index + 2021) @@ -87,20 +94,20 @@ class TestDataUnshard(unittest.TestCase): input_data = np.array(range(2 * 8)).reshape([2, 8]).astype("float32") label_data = np.random.randint(0, 10, [2, 8]).astype("float32") - fetchs = [loss.name, 'split@RESHARD.tmp_0'] if worker_index == 0 else [ - loss.name, 'split@RESHARD.tmp_1' - ] - loss_np, shard_data_np = exe.run(distributed_main_program, - feed={ - "input": input_data, - "label": label_data - }, - fetch_list=fetchs) + fetchs = ( + [loss.name, 'split@RESHARD.tmp_0'] + if worker_index == 0 + else [loss.name, 'split@RESHARD.tmp_1'] + ) + loss_np, shard_data_np = exe.run( + distributed_main_program, + feed={"input": input_data, "label": label_data}, + fetch_list=fetchs, + ) desired = input_data[worker_index].reshape(shard_data_np.shape) np.testing.assert_allclose(shard_data_np, desired) def dp1pp1mp2(self): - def create_model(train_program, start_program): with paddle.static.program_guard(train_program, start_program): @@ -109,7 +116,8 @@ class TestDataUnshard(unittest.TestCase): label = paddle.static.data(name='label', shape=[8, 8]) weight_attr = paddle.ParamAttr( - initializer=nn.initializer.Normal(mean=0.0, std=0.02)) + initializer=nn.initializer.Normal(mean=0.0, std=0.02) + ) linear0 = nn.Linear(8, 8, weight_attr) linear1 = nn.Linear(8, 8, weight_attr) @@ -124,7 +132,8 @@ class TestDataUnshard(unittest.TestCase): linear1_out = linear1(gelu_out) error_cost = paddle.nn.functional.square_error_cost( - linear1_out, label) + linear1_out, label + ) loss = paddle.mean(error_cost) return train_program, start_program, loss, input, label @@ -132,20 +141,27 @@ class TestDataUnshard(unittest.TestCase): start_program = paddle.static.Program() # serial program train_program, start_program, loss, input, label = create_model( - train_program, start_program) + train_program, start_program + ) dist_strategy = fleet.DistributedStrategy() dist_strategy.semi_auto = True fleet.init(is_collective=True, strategy=dist_strategy) - optimizer = paddle.fluid.optimizer.AdamOptimizer(learning_rate=0.00001, - beta1=0.9, - beta2=0.999, - epsilon=1e-08, - grad_clip=None) + optimizer = paddle.fluid.optimizer.AdamOptimizer( + learning_rate=0.00001, + beta1=0.9, + beta2=0.999, + epsilon=1e-08, + grad_clip=None, + ) optimizer = fleet.distributed_optimizer(optimizer) - _, _, distributed_startup_program, distributed_main_program = optimizer.minimize( - loss, start_program) + ( + _, + _, + distributed_startup_program, + distributed_main_program, + ) = optimizer.minimize(loss, start_program) worker_index = paddle.distributed.get_rank() paddle.seed(worker_index + 2021) @@ -160,12 +176,11 @@ class TestDataUnshard(unittest.TestCase): label_data = np.random.randint(0, 10, [8, 8]).astype("float32") fetchs = [loss.name, 'input'] - loss_np, shard_data_np = exe.run(distributed_main_program, - feed={ - "input": input_data, - "label": label_data - }, - fetch_list=fetchs) + loss_np, shard_data_np = exe.run( + distributed_main_program, + feed={"input": input_data, "label": label_data}, + fetch_list=fetchs, + ) desired = input_data.reshape(shard_data_np.shape) np.testing.assert_allclose(shard_data_np, desired) diff --git a/python/paddle/fluid/tests/unittests/auto_parallel_gpt_model.py b/python/paddle/fluid/tests/unittests/auto_parallel_gpt_model.py index 3940aa0170ba78572c59331525aeb6af1eda6a63..c4dc52ebfd2f9638b7b817562ff635427f41f917 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel_gpt_model.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel_gpt_model.py @@ -41,20 +41,23 @@ class MultiHeadAttention(nn.Layer): Multi-Head Attention performs multiple parallel attention to jointly attending to information from different representation subspaces. """ + Cache = collections.namedtuple("Cache", ["k", "v"]) StaticCache = collections.namedtuple("StaticCache", ["k", "v"]) - def __init__(self, - embed_dim, - num_heads, - dropout=0., - kdim=None, - vdim=None, - need_weights=False, - weight_attr=None, - bias_attr=None, - fuse=False, - mesh_idx=None): + def __init__( + self, + embed_dim, + num_heads, + dropout=0.0, + kdim=None, + vdim=None, + need_weights=False, + weight_attr=None, + bias_attr=None, + fuse=False, + mesh_idx=None, + ): super(MultiHeadAttention, self).__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim @@ -65,36 +68,43 @@ class MultiHeadAttention(nn.Layer): self.fuse = fuse self.mesh_idx = mesh_idx self.head_dim = embed_dim // num_heads - assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" + assert ( + self.head_dim * num_heads == self.embed_dim + ), "embed_dim must be divisible by num_heads" if self.fuse: assert self.kdim == embed_dim assert self.vdim == embed_dim - self.qkv_proj = nn.Linear(embed_dim, - 3 * embed_dim, - weight_attr, - bias_attr=bias_attr) + self.qkv_proj = nn.Linear( + embed_dim, 3 * embed_dim, weight_attr, bias_attr=bias_attr + ) else: - self.q_proj = nn.Linear(embed_dim, - embed_dim, - weight_attr=weight_attr, - bias_attr=bias_attr) - self.k_proj = nn.Linear(self.kdim, - embed_dim, - weight_attr=weight_attr, - bias_attr=bias_attr) - self.v_proj = nn.Linear(self.vdim, - embed_dim, - weight_attr=weight_attr, - bias_attr=bias_attr) - self.out_proj = nn.Linear(embed_dim, - embed_dim, - weight_attr=weight_attr, - bias_attr=bias_attr) + self.q_proj = nn.Linear( + embed_dim, + embed_dim, + weight_attr=weight_attr, + bias_attr=bias_attr, + ) + self.k_proj = nn.Linear( + self.kdim, + embed_dim, + weight_attr=weight_attr, + bias_attr=bias_attr, + ) + self.v_proj = nn.Linear( + self.vdim, + embed_dim, + weight_attr=weight_attr, + bias_attr=bias_attr, + ) + self.out_proj = nn.Linear( + embed_dim, embed_dim, weight_attr=weight_attr, bias_attr=bias_attr + ) def _fuse_prepare_qkv(self, query): mix_layer = self.qkv_proj(query) - mix_layer = paddle.reshape_(mix_layer, - [0, 0, self.num_heads, 3 * self.head_dim]) + mix_layer = paddle.reshape_( + mix_layer, [0, 0, self.num_heads, 3 * self.head_dim] + ) mix_layer = paddle.transpose(mix_layer, [0, 2, 1, 3]) q, k, v = paddle.split(mix_layer, num_or_sections=3, axis=-1) return q, k, v @@ -107,17 +117,21 @@ class MultiHeadAttention(nn.Layer): """ q = self.q_proj(query) if _global_parallel_strategy == "mp": - auto.shard_tensor(self.q_proj.weight, _global_process_mesh, - [None, "x"]) + auto.shard_tensor( + self.q_proj.weight, _global_process_mesh, [None, "x"] + ) elif _global_parallel_strategy == "dp_mp": - auto.shard_tensor(self.q_proj.weight, _global_process_mesh, - [None, "y"]) + auto.shard_tensor( + self.q_proj.weight, _global_process_mesh, [None, "y"] + ) elif _global_parallel_strategy == "mp_pp": - auto.shard_tensor(self.q_proj.weight, MPPP_MESH_LIST[self.mesh_idx], - [None, "x"]) + auto.shard_tensor( + self.q_proj.weight, MPPP_MESH_LIST[self.mesh_idx], [None, "x"] + ) elif _global_parallel_strategy == "dp_mp_pp": - auto.shard_tensor(self.q_proj.weight, - DPMPPP_MESH_LIST[self.mesh_idx], [None, "y"]) + auto.shard_tensor( + self.q_proj.weight, DPMPPP_MESH_LIST[self.mesh_idx], [None, "y"] + ) q = tensor.reshape(x=q, shape=[0, 0, self.num_heads, self.head_dim]) q = tensor.transpose(x=q, perm=[0, 2, 1, 3]) @@ -146,30 +160,38 @@ class MultiHeadAttention(nn.Layer): """ k = self.k_proj(key) if _global_parallel_strategy == "mp": - auto.shard_tensor(self.k_proj.weight, _global_process_mesh, - [None, "x"]) + auto.shard_tensor( + self.k_proj.weight, _global_process_mesh, [None, "x"] + ) elif _global_parallel_strategy == "dp_mp": - auto.shard_tensor(self.k_proj.weight, _global_process_mesh, - [None, "y"]) + auto.shard_tensor( + self.k_proj.weight, _global_process_mesh, [None, "y"] + ) elif _global_parallel_strategy == "mp_pp": - auto.shard_tensor(self.k_proj.weight, MPPP_MESH_LIST[self.mesh_idx], - [None, "x"]) + auto.shard_tensor( + self.k_proj.weight, MPPP_MESH_LIST[self.mesh_idx], [None, "x"] + ) elif _global_parallel_strategy == "dp_mp_pp": - auto.shard_tensor(self.k_proj.weight, - DPMPPP_MESH_LIST[self.mesh_idx], [None, "y"]) + auto.shard_tensor( + self.k_proj.weight, DPMPPP_MESH_LIST[self.mesh_idx], [None, "y"] + ) v = self.v_proj(value) if _global_parallel_strategy == "mp": - auto.shard_tensor(self.v_proj.weight, _global_process_mesh, - [None, "x"]) + auto.shard_tensor( + self.v_proj.weight, _global_process_mesh, [None, "x"] + ) elif _global_parallel_strategy == "dp_mp": - auto.shard_tensor(self.v_proj.weight, _global_process_mesh, - [None, "y"]) + auto.shard_tensor( + self.v_proj.weight, _global_process_mesh, [None, "y"] + ) elif _global_parallel_strategy == "mp_pp": - auto.shard_tensor(self.v_proj.weight, MPPP_MESH_LIST[self.mesh_idx], - [None, "x"]) + auto.shard_tensor( + self.v_proj.weight, MPPP_MESH_LIST[self.mesh_idx], [None, "x"] + ) elif _global_parallel_strategy == "dp_mp_pp": - auto.shard_tensor(self.v_proj.weight, - DPMPPP_MESH_LIST[self.mesh_idx], [None, "y"]) + auto.shard_tensor( + self.v_proj.weight, DPMPPP_MESH_LIST[self.mesh_idx], [None, "y"] + ) k = tensor.reshape(x=k, shape=[0, 0, self.num_heads, self.head_dim]) k = tensor.transpose(x=k, perm=[0, 2, 1, 3]) v = tensor.reshape(x=v, shape=[0, 0, self.num_heads, self.head_dim]) @@ -190,24 +212,22 @@ class MultiHeadAttention(nn.Layer): input=key, shape=[-1, self.num_heads, 0, self.head_dim], dtype=key.dtype, - value=0) + value=0, + ) v = layers.fill_constant_batch_size_like( input=key, shape=[-1, self.num_heads, 0, self.head_dim], dtype=key.dtype, - value=0) + value=0, + ) return self.Cache(k, v) else: # incremental_state with initial value, mainly for usage like UniLM return self.Cache(key, value) - def forward(self, - query, - key, - value, - attn_mask=None, - use_cache=False, - cache=None): + def forward( + self, query, key, value, attn_mask=None, use_cache=False, cache=None + ): """ Applies multi-head attention to map queries and a set of key-value pairs to outputs. @@ -221,20 +241,22 @@ class MultiHeadAttention(nn.Layer): else: q, k, v = self._prepare_qkv(query, key, value, use_cache, cache) else: - q, k, v, cache = self._prepare_qkv(query, key, value, use_cache, - cache) - product = layers.matmul(x=q, - y=k, - transpose_y=True, - alpha=self.head_dim**-0.5) + q, k, v, cache = self._prepare_qkv( + query, key, value, use_cache, cache + ) + product = layers.matmul( + x=q, y=k, transpose_y=True, alpha=self.head_dim**-0.5 + ) if attn_mask is not None: product = product + attn_mask weights = F.softmax(product) if self.dropout: - weights = F.dropout(weights, - self.dropout, - training=self.training, - mode="upscale_in_train") + weights = F.dropout( + weights, + self.dropout, + training=self.training, + mode="upscale_in_train", + ) out = tensor.matmul(weights, v) # combine heads out = tensor.transpose(out, perm=[0, 2, 1, 3]) @@ -242,17 +264,23 @@ class MultiHeadAttention(nn.Layer): # project to output out = self.out_proj(out) if _global_parallel_strategy == "mp": - auto.shard_tensor(self.out_proj.weight, _global_process_mesh, - ["x", None]) + auto.shard_tensor( + self.out_proj.weight, _global_process_mesh, ["x", None] + ) elif _global_parallel_strategy == "dp_mp": - auto.shard_tensor(self.out_proj.weight, _global_process_mesh, - ["y", None]) + auto.shard_tensor( + self.out_proj.weight, _global_process_mesh, ["y", None] + ) elif _global_parallel_strategy == "mp_pp": - auto.shard_tensor(self.out_proj.weight, - MPPP_MESH_LIST[self.mesh_idx], ["x", None]) + auto.shard_tensor( + self.out_proj.weight, MPPP_MESH_LIST[self.mesh_idx], ["x", None] + ) elif _global_parallel_strategy == "dp_mp_pp": - auto.shard_tensor(self.out_proj.weight, - DPMPPP_MESH_LIST[self.mesh_idx], ["y", None]) + auto.shard_tensor( + self.out_proj.weight, + DPMPPP_MESH_LIST[self.mesh_idx], + ["y", None], + ) outs = [out] if self.need_weights: @@ -279,13 +307,15 @@ class TransformerDecoder(nn.Layer): raise ValueError("Only support LayerNorm") self.checkpoints = [] - def forward(self, - tgt, - memory, - tgt_mask=None, - memory_mask=None, - use_cache=False, - cache=None): + def forward( + self, + tgt, + memory, + tgt_mask=None, + memory_mask=None, + use_cache=False, + cache=None, + ): """ Applies a stack of N Transformer decoder layers on inputs. If `norm` is provided, also applies layer normalization on the output of last decoder @@ -295,134 +325,170 @@ class TransformerDecoder(nn.Layer): new_caches = [] self.checkpoints = [] if _global_parallel_strategy == "pp": - auto.shard_tensor(output, PP_MESH_LIST[0], - [None for i in range(len(output.shape))]) + auto.shard_tensor( + output, + PP_MESH_LIST[0], + [None for i in range(len(output.shape))], + ) if _global_parallel_strategy == "dp_pp": - auto.shard_tensor(output, DPPP_MESH_LIST[0], ["x"] + - [None for i in range(len(output.shape) - 1)]) + auto.shard_tensor( + output, + DPPP_MESH_LIST[0], + ["x"] + [None for i in range(len(output.shape) - 1)], + ) if _global_parallel_strategy == "mp_pp": - auto.shard_tensor(output, MPPP_MESH_LIST[0], - [None for i in range(len(output.shape))]) + auto.shard_tensor( + output, + MPPP_MESH_LIST[0], + [None for i in range(len(output.shape))], + ) if _global_parallel_strategy == "dp_mp_pp": - auto.shard_tensor(output, DPMPPP_MESH_LIST[0], ["x"] + - [None for i in range(len(output.shape) - 1)]) + auto.shard_tensor( + output, + DPMPPP_MESH_LIST[0], + ["x"] + [None for i in range(len(output.shape) - 1)], + ) for i, mod in enumerate(self.layers): if cache is None: if use_cache: if _global_parallel_strategy == "pp": output, new_cache = auto.shard_op( - mod, PP_MESH_LIST[mod.mesh_idx])(output, memory, - tgt_mask, - use_cache, cache) + mod, PP_MESH_LIST[mod.mesh_idx] + )(output, memory, tgt_mask, use_cache, cache) auto.shard_tensor( - output, PP_MESH_LIST[mod.mesh_idx], - [None for i in range(len(output.shape))]) + output, + PP_MESH_LIST[mod.mesh_idx], + [None for i in range(len(output.shape))], + ) elif _global_parallel_strategy == "dp_pp": output, new_cache = auto.shard_op( - mod, DPPP_MESH_LIST[mod.mesh_idx])(output, memory, - tgt_mask, - use_cache, cache) + mod, DPPP_MESH_LIST[mod.mesh_idx] + )(output, memory, tgt_mask, use_cache, cache) auto.shard_tensor( - output, DPPP_MESH_LIST[mod.mesh_idx], ["x"] + - [None for i in range(len(output.shape) - 1)]) + output, + DPPP_MESH_LIST[mod.mesh_idx], + ["x"] + + [None for i in range(len(output.shape) - 1)], + ) elif _global_parallel_strategy == "mp_pp": output, new_cache = auto.shard_op( - mod, MPPP_MESH_LIST[mod.mesh_idx])(output, memory, - tgt_mask, - use_cache, cache) + mod, MPPP_MESH_LIST[mod.mesh_idx] + )(output, memory, tgt_mask, use_cache, cache) auto.shard_tensor( - output, MPPP_MESH_LIST[mod.mesh_idx], - [None for i in range(len(output.shape))]) + output, + MPPP_MESH_LIST[mod.mesh_idx], + [None for i in range(len(output.shape))], + ) elif _global_parallel_strategy == "dp_mp_pp": output, new_cache = auto.shard_op( - mod, - DPMPPP_MESH_LIST[mod.mesh_idx])(output, memory, - tgt_mask, use_cache, - cache) + mod, DPMPPP_MESH_LIST[mod.mesh_idx] + )(output, memory, tgt_mask, use_cache, cache) auto.shard_tensor( - output, DPMPPP_MESH_LIST[mod.mesh_idx], - [None for i in range(len(output.shape))]) + output, + DPMPPP_MESH_LIST[mod.mesh_idx], + [None for i in range(len(output.shape))], + ) else: - output, new_cache = mod(output, - memory, - tgt_mask=tgt_mask, - use_cache=use_cache, - cache=cache) + output, new_cache = mod( + output, + memory, + tgt_mask=tgt_mask, + use_cache=use_cache, + cache=cache, + ) new_caches.append(new_cache) else: if _global_parallel_strategy == "pp": output = auto.shard_op(mod, PP_MESH_LIST[mod.mesh_idx])( - output, memory, tgt_mask, use_cache, cache) + output, memory, tgt_mask, use_cache, cache + ) auto.shard_tensor( - output, PP_MESH_LIST[mod.mesh_idx], - [None for i in range(len(output.shape))]) + output, + PP_MESH_LIST[mod.mesh_idx], + [None for i in range(len(output.shape))], + ) elif _global_parallel_strategy == "dp_pp": output = auto.shard_op( - mod, DPPP_MESH_LIST[mod.mesh_idx])(output, memory, - tgt_mask, - use_cache, cache) + mod, DPPP_MESH_LIST[mod.mesh_idx] + )(output, memory, tgt_mask, use_cache, cache) auto.shard_tensor( - output, DPPP_MESH_LIST[mod.mesh_idx], ["x"] + - [None for i in range(len(output.shape) - 1)]) + output, + DPPP_MESH_LIST[mod.mesh_idx], + ["x"] + + [None for i in range(len(output.shape) - 1)], + ) elif _global_parallel_strategy == "mp_pp": output = auto.shard_op( - mod, MPPP_MESH_LIST[mod.mesh_idx])(output, memory, - tgt_mask, - use_cache, cache) + mod, MPPP_MESH_LIST[mod.mesh_idx] + )(output, memory, tgt_mask, use_cache, cache) auto.shard_tensor( - output, MPPP_MESH_LIST[mod.mesh_idx], - [None for i in range(len(output.shape))]) + output, + MPPP_MESH_LIST[mod.mesh_idx], + [None for i in range(len(output.shape))], + ) elif _global_parallel_strategy == "dp_mp_pp": - output = auto.shard_op(mod, - DPMPPP_MESH_LIST[mod.mesh_idx])( - output, memory, tgt_mask, - use_cache, cache) + output = auto.shard_op( + mod, DPMPPP_MESH_LIST[mod.mesh_idx] + )(output, memory, tgt_mask, use_cache, cache) auto.shard_tensor( - output, DPMPPP_MESH_LIST[mod.mesh_idx], ["x"] + - [None for i in range(len(output.shape) - 1)]) + output, + DPMPPP_MESH_LIST[mod.mesh_idx], + ["x"] + + [None for i in range(len(output.shape) - 1)], + ) else: - output = mod(output, - memory, - tgt_mask=tgt_mask, - use_cache=use_cache, - cache=cache) + output = mod( + output, + memory, + tgt_mask=tgt_mask, + use_cache=use_cache, + cache=cache, + ) else: if _global_parallel_strategy == "pp": output, new_cache = auto.shard_op( - mod, - PP_MESH_LIST[mod.mesh_idx])(output, memory, tgt_mask, - use_cache, cache) - auto.shard_tensor(output, PP_MESH_LIST[mod.mesh_idx], - [None for i in range(len(output.shape))]) + mod, PP_MESH_LIST[mod.mesh_idx] + )(output, memory, tgt_mask, use_cache, cache) + auto.shard_tensor( + output, + PP_MESH_LIST[mod.mesh_idx], + [None for i in range(len(output.shape))], + ) elif _global_parallel_strategy == "dp_pp": output, new_cache = auto.shard_op( - mod, - DPPP_MESH_LIST[mod.mesh_idx])(output, memory, tgt_mask, - use_cache, cache) + mod, DPPP_MESH_LIST[mod.mesh_idx] + )(output, memory, tgt_mask, use_cache, cache) auto.shard_tensor( - output, DPPP_MESH_LIST[mod.mesh_idx], - ["x"] + [None for i in range(len(output.shape) - 1)]) + output, + DPPP_MESH_LIST[mod.mesh_idx], + ["x"] + [None for i in range(len(output.shape) - 1)], + ) elif _global_parallel_strategy == "mp_pp": output, new_cache = auto.shard_op( - mod, - MPPP_MESH_LIST[mod.mesh_idx])(output, memory, tgt_mask, - use_cache, cache) - auto.shard_tensor(output, MPPP_MESH_LIST[mod.mesh_idx], - [None for i in range(len(output.shape))]) + mod, MPPP_MESH_LIST[mod.mesh_idx] + )(output, memory, tgt_mask, use_cache, cache) + auto.shard_tensor( + output, + MPPP_MESH_LIST[mod.mesh_idx], + [None for i in range(len(output.shape))], + ) elif _global_parallel_strategy == "dp_mp_pp": output, new_cache = auto.shard_op( - mod, DPMPPP_MESH_LIST[mod.mesh_idx])(output, memory, - tgt_mask, - use_cache, cache) + mod, DPMPPP_MESH_LIST[mod.mesh_idx] + )(output, memory, tgt_mask, use_cache, cache) auto.shard_tensor( - output, DPMPPP_MESH_LIST[mod.mesh_idx], - ["x"] + [None for i in range(len(output.shape) - 1)]) + output, + DPMPPP_MESH_LIST[mod.mesh_idx], + ["x"] + [None for i in range(len(output.shape) - 1)], + ) else: - output, new_cache = mod(output, - memory, - tgt_mask=tgt_mask, - use_cache=use_cache, - cache=cache[i]) + output, new_cache = mod( + output, + memory, + tgt_mask=tgt_mask, + use_cache=use_cache, + cache=cache[i], + ) new_caches.append(new_cache) self.checkpoints.append(output.name) if self.norm is not None: @@ -436,7 +502,7 @@ class TransformerDecoder(nn.Layer): produced by `TransformerDecoderLayer.gen_cache`. See `TransformerDecoderLayer.gen_cache` for more details. If `do_zip` is True, apply `zip` on these tuples to get a list with two elements. - """ + """ cache = [layer.gen_cache(memory) for layer in self.layers] if do_zip: cache = list(zip(*cache)) @@ -449,18 +515,20 @@ class TransformerDecoderLayer(nn.Layer): It contains multiheadattention and some linear layers. """ - def __init__(self, - d_model, - nhead, - dim_feedforward, - dropout=0.1, - activation="gelu", - attn_dropout=None, - act_dropout=None, - normalize_before=True, - weight_attr=None, - bias_attr=None, - mesh_idx=None): + def __init__( + self, + d_model, + nhead, + dim_feedforward, + dropout=0.1, + activation="gelu", + attn_dropout=None, + act_dropout=None, + normalize_before=True, + weight_attr=None, + bias_attr=None, + mesh_idx=None, + ): self._config = locals() self._config.pop("self") self._config.pop("__class__", None) # py3 @@ -471,20 +539,20 @@ class TransformerDecoderLayer(nn.Layer): self.normalize_before = normalize_before weight_attrs = _convert_param_attr_to_list(weight_attr, 3) bias_attrs = _convert_param_attr_to_list(bias_attr, 3) - self.self_attn = MultiHeadAttention(d_model, - nhead, - dropout=attn_dropout, - weight_attr=weight_attrs[0], - bias_attr=bias_attrs[0], - mesh_idx=self.mesh_idx) - self.linear1 = nn.Linear(d_model, - dim_feedforward, - weight_attrs[2], - bias_attr=bias_attrs[2]) - self.linear2 = nn.Linear(dim_feedforward, - d_model, - weight_attrs[2], - bias_attr=bias_attrs[2]) + self.self_attn = MultiHeadAttention( + d_model, + nhead, + dropout=attn_dropout, + weight_attr=weight_attrs[0], + bias_attr=bias_attrs[0], + mesh_idx=self.mesh_idx, + ) + self.linear1 = nn.Linear( + d_model, dim_feedforward, weight_attrs[2], bias_attr=bias_attrs[2] + ) + self.linear2 = nn.Linear( + dim_feedforward, d_model, weight_attrs[2], bias_attr=bias_attrs[2] + ) self.norm1 = nn.LayerNorm(d_model, epsilon=1e-5) self.norm2 = nn.LayerNorm(d_model, epsilon=1e-5) self.dropout1 = nn.Dropout(dropout, mode="upscale_in_train") @@ -498,8 +566,9 @@ class TransformerDecoderLayer(nn.Layer): if use_cache is False: tgt = self.self_attn(tgt, tgt, tgt, tgt_mask, use_cache, cache) else: - tgt, incremental_cache = self.self_attn(tgt, tgt, tgt, tgt_mask, - use_cache, cache) + tgt, incremental_cache = self.self_attn( + tgt, tgt, tgt, tgt_mask, use_cache, cache + ) tgt = residual + self.dropout1(tgt) if not self.normalize_before: tgt = self.norm1(tgt) @@ -507,40 +576,54 @@ class TransformerDecoderLayer(nn.Layer): if self.normalize_before: tgt = self.norm2(tgt) if _global_parallel_strategy == "mp": - auto.shard_tensor(self.linear1.weight, _global_process_mesh, - [None, "x"]) + auto.shard_tensor( + self.linear1.weight, _global_process_mesh, [None, "x"] + ) elif _global_parallel_strategy == "dp_mp": - auto.shard_tensor(self.linear1.weight, _global_process_mesh, - [None, "y"]) + auto.shard_tensor( + self.linear1.weight, _global_process_mesh, [None, "y"] + ) elif _global_parallel_strategy == "mp_pp": - auto.shard_tensor(self.linear1.weight, - MPPP_MESH_LIST[self.mesh_idx], [None, "x"]) + auto.shard_tensor( + self.linear1.weight, MPPP_MESH_LIST[self.mesh_idx], [None, "x"] + ) if _global_parallel_strategy == "dp_mp_pp": - auto.shard_tensor(self.linear1.weight, - DPMPPP_MESH_LIST[self.mesh_idx], [None, "y"]) + auto.shard_tensor( + self.linear1.weight, + DPMPPP_MESH_LIST[self.mesh_idx], + [None, "y"], + ) if _global_parallel_strategy == "mp": - auto.shard_tensor(self.linear2.weight, _global_process_mesh, - ["x", None]) + auto.shard_tensor( + self.linear2.weight, _global_process_mesh, ["x", None] + ) elif _global_parallel_strategy == "dp_mp": - auto.shard_tensor(self.linear2.weight, _global_process_mesh, - ["y", None]) + auto.shard_tensor( + self.linear2.weight, _global_process_mesh, ["y", None] + ) elif _global_parallel_strategy == "mp_pp": - auto.shard_tensor(self.linear2.weight, - MPPP_MESH_LIST[self.mesh_idx], ["x", None]) + auto.shard_tensor( + self.linear2.weight, MPPP_MESH_LIST[self.mesh_idx], ["x", None] + ) elif _global_parallel_strategy == "dp_mp_pp": - auto.shard_tensor(self.linear2.weight, - DPMPPP_MESH_LIST[self.mesh_idx], ["y", None]) + auto.shard_tensor( + self.linear2.weight, + DPMPPP_MESH_LIST[self.mesh_idx], + ["y", None], + ) tgt = self.dropout2( - self.linear2(F.gelu(self.linear1(tgt), approximate=True))) + self.linear2(F.gelu(self.linear1(tgt), approximate=True)) + ) tgt = residual + tgt if not self.normalize_before: tgt = self.norm2(tgt) return tgt if use_cache is False else (tgt, incremental_cache) def gen_cache(self, memory): - incremental_cache = self.self_attn.gen_cache(memory, - type=self.self_attn.Cache) + incremental_cache = self.self_attn.gen_cache( + memory, type=self.self_attn.Cache + ) return incremental_cache @@ -549,26 +632,36 @@ class GPTEmbeddings(nn.Layer): Include embeddings from word, position and token_type embeddings """ - def __init__(self, - vocab_size, - hidden_size=768, - hidden_dropout_prob=0.1, - max_position_embeddings=512, - type_vocab_size=16, - initializer_range=0.02): + def __init__( + self, + vocab_size, + hidden_size=768, + hidden_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=16, + initializer_range=0.02, + ): super(GPTEmbeddings, self).__init__() self.word_embeddings = nn.Embedding( vocab_size, hidden_size, - weight_attr=paddle.ParamAttr(name="word_embeddings", - initializer=nn.initializer.Normal( - mean=0.0, std=initializer_range))) + weight_attr=paddle.ParamAttr( + name="word_embeddings", + initializer=nn.initializer.Normal( + mean=0.0, std=initializer_range + ), + ), + ) self.position_embeddings = nn.Embedding( max_position_embeddings, hidden_size, - weight_attr=paddle.ParamAttr(name="pos_embeddings", - initializer=nn.initializer.Normal( - mean=0.0, std=initializer_range))) + weight_attr=paddle.ParamAttr( + name="pos_embeddings", + initializer=nn.initializer.Normal( + mean=0.0, std=initializer_range + ), + ), + ) self.dropout = nn.Dropout(hidden_dropout_prob) def forward(self, input_ids, position_ids=None): @@ -578,17 +671,21 @@ class GPTEmbeddings(nn.Layer): position_ids = seq_length - ones input_embedings = self.word_embeddings(input_ids) if _global_parallel_strategy == "mp": - auto.shard_tensor(self.word_embeddings.weight, _global_process_mesh, - ["x", None]) + auto.shard_tensor( + self.word_embeddings.weight, _global_process_mesh, ["x", None] + ) elif _global_parallel_strategy == "dp_mp": - auto.shard_tensor(self.word_embeddings.weight, _global_process_mesh, - ["y", None]) + auto.shard_tensor( + self.word_embeddings.weight, _global_process_mesh, ["y", None] + ) elif _global_parallel_strategy == "mp_pp": - auto.shard_tensor(self.word_embeddings.weight, MPPP_MESH_LIST[0], - ["x", None]) + auto.shard_tensor( + self.word_embeddings.weight, MPPP_MESH_LIST[0], ["x", None] + ) elif _global_parallel_strategy == "dp_mp_pp": - auto.shard_tensor(self.word_embeddings.weight, DPMPPP_MESH_LIST[0], - ["y", None]) + auto.shard_tensor( + self.word_embeddings.weight, DPMPPP_MESH_LIST[0], ["y", None] + ) position_embeddings = self.position_embeddings(position_ids) embeddings = input_embedings + position_embeddings @@ -601,36 +698,42 @@ class GPTModel(nn.Layer): The base model of gpt. """ - def __init__(self, - vocab_size=50304, - hidden_size=1024, - num_hidden_layers=24, - num_attention_heads=16, - intermediate_size=4096, - hidden_act="gelu", - hidden_dropout_prob=0., - attention_probs_dropout_prob=0., - max_position_embeddings=512, - type_vocab_size=16, - initializer_range=0.02, - pad_token_id=0, - eos_token_id=7, - bos_token_id=0, - eol_token_id=3, - pp_degree=None): + def __init__( + self, + vocab_size=50304, + hidden_size=1024, + num_hidden_layers=24, + num_attention_heads=16, + intermediate_size=4096, + hidden_act="gelu", + hidden_dropout_prob=0.0, + attention_probs_dropout_prob=0.0, + max_position_embeddings=512, + type_vocab_size=16, + initializer_range=0.02, + pad_token_id=0, + eos_token_id=7, + bos_token_id=0, + eol_token_id=3, + pp_degree=None, + ): super(GPTModel, self).__init__() self.pad_token_id = pad_token_id self.initializer_range = initializer_range self.hidden_size = hidden_size self.vocab_size = vocab_size self.layer_per_stage = None - self.pipline_mode = (pp_degree is not None and pp_degree > 1) + self.pipline_mode = pp_degree is not None and pp_degree > 1 if self.pipline_mode: self.layer_per_stage = num_hidden_layers // pp_degree - self.embeddings = GPTEmbeddings(vocab_size, hidden_size, - hidden_dropout_prob, - max_position_embeddings, - type_vocab_size, self.initializer_range) + self.embeddings = GPTEmbeddings( + vocab_size, + hidden_size, + hidden_dropout_prob, + max_position_embeddings, + type_vocab_size, + self.initializer_range, + ) decoder_layers = nn.LayerList() for i in range(num_hidden_layers): mesh_index = None @@ -638,59 +741,82 @@ class GPTModel(nn.Layer): if self.layer_per_stage is not None: mesh_index = i // self.layer_per_stage decoder_layers.append( - DecoderLayer(d_model=hidden_size, - nhead=num_attention_heads, - dim_feedforward=intermediate_size, - dropout=hidden_dropout_prob, - activation=hidden_act, - attn_dropout=attention_probs_dropout_prob, - act_dropout=hidden_dropout_prob, - weight_attr=paddle.ParamAttr( - initializer=nn.initializer.Normal( - mean=0.0, std=self.initializer_range)), - bias_attr=None, - mesh_idx=mesh_index)) + DecoderLayer( + d_model=hidden_size, + nhead=num_attention_heads, + dim_feedforward=intermediate_size, + dropout=hidden_dropout_prob, + activation=hidden_act, + attn_dropout=attention_probs_dropout_prob, + act_dropout=hidden_dropout_prob, + weight_attr=paddle.ParamAttr( + initializer=nn.initializer.Normal( + mean=0.0, std=self.initializer_range + ) + ), + bias_attr=None, + mesh_idx=mesh_index, + ) + ) Decoder = TransformerDecoder - self.decoder = Decoder(decoder_layers, - num_hidden_layers, - norm="LayerNorm", - hidden_size=hidden_size) + self.decoder = Decoder( + decoder_layers, + num_hidden_layers, + norm="LayerNorm", + hidden_size=hidden_size, + ) self.checkpoints = [] - def forward(self, - input_ids, - position_ids=None, - attention_mask=None, - use_cache=False, - cache=None): + def forward( + self, + input_ids, + position_ids=None, + attention_mask=None, + use_cache=False, + cache=None, + ): self.checkpoints = [] if position_ids is None: past_length = 0 if cache is not None: past_length = paddle.shape(cache[0].k)[-2] - position_ids = paddle.arange(past_length, - paddle.shape(input_ids)[-1] + - past_length, - dtype='int64') + position_ids = paddle.arange( + past_length, + paddle.shape(input_ids)[-1] + past_length, + dtype='int64', + ) position_ids = position_ids.unsqueeze(0) position_ids = paddle.fluid.layers.expand_as( - position_ids, input_ids) - embedding_output = self.embeddings(input_ids=input_ids, - position_ids=position_ids) + position_ids, input_ids + ) + embedding_output = self.embeddings( + input_ids=input_ids, position_ids=position_ids + ) if _global_parallel_strategy == "pp": - auto.shard_tensor(input_ids, PP_MESH_LIST[0], - [None for i in range(len(input_ids.shape))]) + auto.shard_tensor( + input_ids, + PP_MESH_LIST[0], + [None for i in range(len(input_ids.shape))], + ) if _global_parallel_strategy == "dp_pp": - auto.shard_tensor(input_ids, DPPP_MESH_LIST[0], ["x"] + - [None for i in range(len(input_ids.shape) - 1)]) + auto.shard_tensor( + input_ids, + DPPP_MESH_LIST[0], + ["x"] + [None for i in range(len(input_ids.shape) - 1)], + ) if _global_parallel_strategy == "dp_mp_pp": - auto.shard_tensor(input_ids, DPMPPP_MESH_LIST[0], ["x"] + - [None for i in range(len(input_ids.shape) - 1)]) - encoder_outputs = self.decoder(embedding_output, - memory=None, - tgt_mask=attention_mask, - use_cache=use_cache, - cache=cache) + auto.shard_tensor( + input_ids, + DPMPPP_MESH_LIST[0], + ["x"] + [None for i in range(len(input_ids.shape) - 1)], + ) + encoder_outputs = self.decoder( + embedding_output, + memory=None, + tgt_mask=attention_mask, + use_cache=use_cache, + cache=cache, + ) self.checkpoints.extend(self.decoder.checkpoints) return encoder_outputs @@ -711,22 +837,26 @@ class GPTForPretraining(nn.Layer): super(GPTForPretraining, self).__init__() self.gpt = gpt - def forward(self, - input_ids, - position_ids=None, - attention_mask=None, - masked_positions=None, - use_cache=False, - cache=None): + def forward( + self, + input_ids, + position_ids=None, + attention_mask=None, + masked_positions=None, + use_cache=False, + cache=None, + ): input_ids.stop_gradient = True position_ids.stop_gradient = True attention_mask.stop_gradient = True - outputs = self.gpt(input_ids, - position_ids=position_ids, - attention_mask=attention_mask, - use_cache=use_cache, - cache=cache) + outputs = self.gpt( + input_ids, + position_ids=position_ids, + attention_mask=attention_mask, + use_cache=use_cache, + cache=cache, + ) if use_cache: encoder_outputs, cached_kvs = outputs[:2] else: @@ -766,8 +896,9 @@ class GPTForPretraining(nn.Layer): w_dims_mapping = ["y"] + [None for i in range(len(w.shape) - 1)] if mesh: - matmul = auto.shard_op(paddle.matmul, mesh, - [x_dims_mapping, w_dims_mapping, None]) + matmul = auto.shard_op( + paddle.matmul, mesh, [x_dims_mapping, w_dims_mapping, None] + ) logits = matmul(x, w, transpose_y=True) else: logits = paddle.matmul(x, w, transpose_y=True) @@ -795,26 +926,31 @@ class GPTPretrainingCriterion(nn.Layer): mesh = None if _global_parallel_strategy == "dp": mesh = _global_process_mesh - dims_mapping = ["x" - ] + [None for i in range(len(loss_mask.shape) - 1)] + dims_mapping = ["x"] + [ + None for i in range(len(loss_mask.shape) - 1) + ] elif _global_parallel_strategy == "dp_mp": mesh = _global_process_mesh - dims_mapping = ["x" - ] + [None for i in range(len(loss_mask.shape) - 1)] + dims_mapping = ["x"] + [ + None for i in range(len(loss_mask.shape) - 1) + ] elif _global_parallel_strategy == "dp_pp": mesh = DPPP_MESH_LIST[-1] - dims_mapping = ["x" - ] + [None for i in range(len(loss_mask.shape) - 1)] + dims_mapping = ["x"] + [ + None for i in range(len(loss_mask.shape) - 1) + ] elif _global_parallel_strategy == "dp_mp_pp": mesh = DPMPPP_MESH_LIST[-1] - dims_mapping = ["x" - ] + [None for i in range(len(loss_mask.shape) - 1)] + dims_mapping = ["x"] + [ + None for i in range(len(loss_mask.shape) - 1) + ] if mesh: auto.shard_tensor(loss_mask, mesh, dims_mapping) - masked_lm_loss = self.loss_func(prediction_scores, - masked_lm_labels.unsqueeze(2)) + masked_lm_loss = self.loss_func( + prediction_scores, masked_lm_labels.unsqueeze(2) + ) loss_mask = loss_mask.reshape([-1]) masked_lm_loss = paddle.sum(masked_lm_loss.reshape([-1]) * loss_mask) total_loss = masked_lm_loss / loss_mask.sum() diff --git a/python/paddle/fluid/tests/unittests/auto_parallel_save_load.py b/python/paddle/fluid/tests/unittests/auto_parallel_save_load.py index 0fc773cc9bc00356acb783fab183b31539bc5334..ce8bc690a95b840ba687c91ee7289463ee4c32bd 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel_save_load.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel_save_load.py @@ -27,7 +27,10 @@ from paddle.distributed.fleet import auto from paddle.distributed import fleet from paddle.fluid.initializer import NumpyArrayInitializer -from paddle.distributed.auto_parallel.utils import save_distributed_checkpoint, load_checkpoint_into_program +from paddle.distributed.auto_parallel.utils import ( + save_distributed_checkpoint, + load_checkpoint_into_program, +) paddle.enable_static() _global_parallel_strategy = None @@ -37,11 +40,9 @@ PP_MESH_1 = None class MLPLayer(nn.Layer): - - def __init__(self, - hidden_size=64, - intermediate_size=4 * 64, - initializer_range=0.02): + def __init__( + self, hidden_size=64, intermediate_size=4 * 64, initializer_range=0.02 + ): super(MLPLayer, self).__init__() d_model = hidden_size dim_feedforward = intermediate_size @@ -50,14 +51,12 @@ class MLPLayer(nn.Layer): weight_attr = paddle.ParamAttr(initializer=NumpyArrayInitializer(arr)) bias_attr = None - self.linear0 = nn.Linear(d_model, - dim_feedforward, - weight_attr, - bias_attr=bias_attr) - self.linear1 = nn.Linear(dim_feedforward, - d_model, - weight_attr, - bias_attr=bias_attr) + self.linear0 = nn.Linear( + d_model, dim_feedforward, weight_attr, bias_attr=bias_attr + ) + self.linear1 = nn.Linear( + dim_feedforward, d_model, weight_attr, bias_attr=bias_attr + ) self.norm = nn.LayerNorm(d_model, epsilon=1e-5) def forward(self, input): @@ -65,15 +64,19 @@ class MLPLayer(nn.Layer): auto.shard_tensor(self.linear0.weight, PP_MESH_0, [None, None]) auto.shard_tensor(self.linear1.weight, PP_MESH_1, [None, None]) elif _global_parallel_strategy == "mp": - auto.shard_tensor(self.linear0.weight, _global_process_mesh, - [None, "x"]) - auto.shard_tensor(self.linear1.weight, _global_process_mesh, - ["x", None]) + auto.shard_tensor( + self.linear0.weight, _global_process_mesh, [None, "x"] + ) + auto.shard_tensor( + self.linear1.weight, _global_process_mesh, ["x", None] + ) elif _global_parallel_strategy == "dp": - auto.shard_tensor(self.linear0.weight, _global_process_mesh, - [None, None]) - auto.shard_tensor(self.linear1.weight, _global_process_mesh, - [None, None]) + auto.shard_tensor( + self.linear0.weight, _global_process_mesh, [None, None] + ) + auto.shard_tensor( + self.linear1.weight, _global_process_mesh, [None, None] + ) out = self.norm(input) out = self.linear0(out) @@ -84,17 +87,18 @@ class MLPLayer(nn.Layer): def mlp_forward(train_program, start_program): - with static.program_guard(train_program,start_program), \ - utils.unique_name.guard(): + with static.program_guard( + train_program, start_program + ), utils.unique_name.guard(): batch_size = 4 hidden_size = 64 - input = static.data(name="input", - shape=[batch_size, hidden_size], - dtype='float32') - label = static.data(name="label", - shape=[batch_size, 1], - dtype='float32') + input = static.data( + name="input", shape=[batch_size, hidden_size], dtype='float32' + ) + label = static.data( + name="label", shape=[batch_size, 1], dtype='float32' + ) if _global_parallel_strategy == "pp": auto.shard_tensor(input, PP_MESH_0, [None, None]) @@ -104,9 +108,11 @@ def mlp_forward(train_program, start_program): elif _global_parallel_strategy == "mp": auto.shard_tensor(input, _global_process_mesh, [None, None]) - mlp = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - initializer_range=0.02) + mlp = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + initializer_range=0.02, + ) predict = mlp(input) error_cost = paddle.nn.functional.square_error_cost(predict, label) @@ -123,19 +129,20 @@ def get_distributed_program(): dist_strategy.semi_auto = True fleet.init(is_collective=True, strategy=dist_strategy) - loss, train_program, startup_program = mlp_forward(train_program, - startup_program) + loss, train_program, startup_program = mlp_forward( + train_program, startup_program + ) optimizer = paddle.fluid.optimizer.SGDOptimizer(learning_rate=0.01) optimizer = fleet.distributed_optimizer(optimizer) _, _, dist_startup_prog, dist_main_prog = optimizer.minimize( - loss, startup_program) + loss, startup_program + ) return dist_main_prog, dist_startup_prog, loss class TestMLPSaveLoad(unittest.TestCase): - def setUp(self): paddle.seed(2021) random.seed(2021) @@ -160,30 +167,34 @@ class TestMLPSaveLoad(unittest.TestCase): os.makedirs(path, exist_ok=True) save_distributed_checkpoint(dist_main_prog, path, path) - res = exe.run(dist_main_prog, - feed={ - "input": input[step * 4:(step + 1) * 4, :], - "label": label[step * 4:(step + 1) * 4, :] - }, - fetch_list=[loss]) + res = exe.run( + dist_main_prog, + feed={ + "input": input[step * 4 : (step + 1) * 4, :], + "label": label[step * 4 : (step + 1) * 4, :], + }, + fetch_list=[loss], + ) last_res = res[0] ckpt_path = [ "./output_dp0/model_state_rank0.pdmodel", - "./output_dp1/model_state_rank1.pdmodel" + "./output_dp1/model_state_rank1.pdmodel", ] dist_attr_path = [ "./output_dp0/dist_attr_rank0.pdattr", - "./output_dp1/dist_attr_rank1.pdattr" + "./output_dp1/dist_attr_rank1.pdattr", ] load_checkpoint_into_program(ckpt_path, dist_attr_path, dist_main_prog) for step in range(10, 20): - res = exe.run(dist_main_prog, - feed={ - "input": input[step * 4:(step + 1) * 4, :], - "label": label[step * 4:(step + 1) * 4, :] - }, - fetch_list=[loss]) + res = exe.run( + dist_main_prog, + feed={ + "input": input[step * 4 : (step + 1) * 4, :], + "label": label[step * 4 : (step + 1) * 4, :], + }, + fetch_list=[loss], + ) self.assertEqual(last_res, res[0]) shutil.rmtree("./output_dp{}".format(paddle.distributed.get_rank())) @@ -208,30 +219,34 @@ class TestMLPSaveLoad(unittest.TestCase): os.makedirs(path, exist_ok=True) save_distributed_checkpoint(dist_main_prog, path, path) - res = exe.run(dist_main_prog, - feed={ - "input": input[step * 4:(step + 1) * 4, :], - "label": label[step * 4:(step + 1) * 4, :] - }, - fetch_list=[loss]) + res = exe.run( + dist_main_prog, + feed={ + "input": input[step * 4 : (step + 1) * 4, :], + "label": label[step * 4 : (step + 1) * 4, :], + }, + fetch_list=[loss], + ) last_res = res[0] ckpt_path = [ "./output_mp0/model_state_rank0.pdmodel", - "./output_mp1/model_state_rank1.pdmodel" + "./output_mp1/model_state_rank1.pdmodel", ] dist_attr_path = [ "./output_mp0/dist_attr_rank0.pdattr", - "./output_mp1/dist_attr_rank1.pdattr" + "./output_mp1/dist_attr_rank1.pdattr", ] load_checkpoint_into_program(ckpt_path, dist_attr_path, dist_main_prog) for step in range(10, 20): - res = exe.run(dist_main_prog, - feed={ - "input": input[step * 4:(step + 1) * 4, :], - "label": label[step * 4:(step + 1) * 4, :] - }, - fetch_list=[loss]) + res = exe.run( + dist_main_prog, + feed={ + "input": input[step * 4 : (step + 1) * 4, :], + "label": label[step * 4 : (step + 1) * 4, :], + }, + fetch_list=[loss], + ) self.assertEqual(last_res, res[0]) shutil.rmtree("./output_mp{}".format(paddle.distributed.get_rank())) @@ -261,45 +276,53 @@ class TestMLPSaveLoad(unittest.TestCase): save_distributed_checkpoint(dist_main_prog, path, path) if paddle.distributed.get_rank() in [0]: - res = exe.run(dist_main_prog, - feed={ - "input": input[step * 4:(step + 1) * 4, :], - "label": label[step * 4:(step + 1) * 4, :] - }) + res = exe.run( + dist_main_prog, + feed={ + "input": input[step * 4 : (step + 1) * 4, :], + "label": label[step * 4 : (step + 1) * 4, :], + }, + ) else: - res = exe.run(dist_main_prog, - feed={ - "input": input[step * 4:(step + 1) * 4, :], - "label": label[step * 4:(step + 1) * 4, :] - }, - fetch_list=[loss]) + res = exe.run( + dist_main_prog, + feed={ + "input": input[step * 4 : (step + 1) * 4, :], + "label": label[step * 4 : (step + 1) * 4, :], + }, + fetch_list=[loss], + ) if paddle.distributed.get_rank() in [1]: last_res = res[0] ckpt_path = [ "./output_pp0/model_state_rank0.pdmodel", - "./output_pp1/model_state_rank1.pdmodel" + "./output_pp1/model_state_rank1.pdmodel", ] dist_attr_path = [ "./output_pp0/dist_attr_rank0.pdattr", - "./output_pp1/dist_attr_rank1.pdattr" + "./output_pp1/dist_attr_rank1.pdattr", ] load_checkpoint_into_program(ckpt_path, dist_attr_path, dist_main_prog) for step in range(10, 20): if paddle.distributed.get_rank() in [0]: - res = exe.run(dist_main_prog, - feed={ - "input": input[step * 4:(step + 1) * 4, :], - "label": label[step * 4:(step + 1) * 4, :] - }) + res = exe.run( + dist_main_prog, + feed={ + "input": input[step * 4 : (step + 1) * 4, :], + "label": label[step * 4 : (step + 1) * 4, :], + }, + ) else: - res = exe.run(dist_main_prog, - feed={ - "input": input[step * 4:(step + 1) * 4, :], - "label": label[step * 4:(step + 1) * 4, :] - }, - fetch_list=[loss]) + res = exe.run( + dist_main_prog, + feed={ + "input": input[step * 4 : (step + 1) * 4, :], + "label": label[step * 4 : (step + 1) * 4, :], + }, + fetch_list=[loss], + ) if paddle.distributed.get_rank() in [1]: self.assertEqual(last_res, res[0]) diff --git a/python/paddle/fluid/tests/unittests/autograd/config.py b/python/paddle/fluid/tests/unittests/autograd/config.py index 311ca49d39555f64bbe76fa9e8a4e5589f5fae38..ff2d64a43bbc92b57625e6e20fca7716815b0656 100644 --- a/python/paddle/fluid/tests/unittests/autograd/config.py +++ b/python/paddle/fluid/tests/unittests/autograd/config.py @@ -23,27 +23,11 @@ DEFAULT_DTYPE = 'float64' # derivative. It's a empirical value provided by Paddle Science team. TOLERANCE = { "float32": { - "first_order_grad": { - "rtol": 1e-3, - "atol": 1e-3, - "eps": 1e-4 - }, - "second_order_grad": { - "rtol": 1e-2, - "atol": 1e-2, - "eps": 1e-2 - } + "first_order_grad": {"rtol": 1e-3, "atol": 1e-3, "eps": 1e-4}, + "second_order_grad": {"rtol": 1e-2, "atol": 1e-2, "eps": 1e-2}, }, "float64": { - "first_order_grad": { - "rtol": 1e-7, - "atol": 1e-7, - "eps": 1e-7 - }, - "second_order_grad": { - "rtol": 1e-5, - "atol": 1e-5, - "eps": 1e-5 - } - } + "first_order_grad": {"rtol": 1e-7, "atol": 1e-7, "eps": 1e-7}, + "second_order_grad": {"rtol": 1e-5, "atol": 1e-5, "eps": 1e-5}, + }, } diff --git a/python/paddle/fluid/tests/unittests/autograd/test_autograd_functional_dynamic.py b/python/paddle/fluid/tests/unittests/autograd/test_autograd_functional_dynamic.py index 054edf2ea28c1a6b79d43311ceefad7183fdd991..81f1bdede6f41abf8b2e5cd9aacb7894c16b871b 100644 --- a/python/paddle/fluid/tests/unittests/autograd/test_autograd_functional_dynamic.py +++ b/python/paddle/fluid/tests/unittests/autograd/test_autograd_functional_dynamic.py @@ -34,7 +34,6 @@ def make_v(f, inputs): class TestAutogradFunctional(unittest.TestCase): - @classmethod def setUpClass(cls): cls.RAW_INPUTS = { @@ -52,8 +51,9 @@ class TestAutogradFunctional(unittest.TestCase): def gen_input(self, inp, stop_gradient=False): if isinstance(inp, paddle.Tensor): return inp - return paddle.to_tensor(self.RAW_INPUTS[inp], - stop_gradient=stop_gradient) + return paddle.to_tensor( + self.RAW_INPUTS[inp], stop_gradient=stop_gradient + ) def gen_inputs(self, inputs): if isinstance(inputs, list): @@ -62,13 +62,9 @@ class TestAutogradFunctional(unittest.TestCase): inputs = [self.gen_input(inputs)] return inputs - def gen_test_pairs(self, - func, - inputs, - v=None, - create_graph=False, - allow_unused=False): - + def gen_test_pairs( + self, func, inputs, v=None, create_graph=False, allow_unused=False + ): def vjp_test(): nonlocal v xs = self.gen_inputs(inputs) @@ -86,27 +82,27 @@ class TestAutogradFunctional(unittest.TestCase): v = self.gen_inputs(v) outputs = func(*xs) if v is not None: - inputs_grad = paddle.grad(outputs, - xs, - v, - create_graph=create_graph, - allow_unused=allow_unused) + inputs_grad = paddle.grad( + outputs, + xs, + v, + create_graph=create_graph, + allow_unused=allow_unused, + ) else: - inputs_grad = paddle.grad(outputs, - xs, - create_graph=create_graph, - allow_unused=allow_unused) + inputs_grad = paddle.grad( + outputs, + xs, + create_graph=create_graph, + allow_unused=allow_unused, + ) return outputs, inputs_grad return vjp_test, grad_test - def gen_jvp_tests(self, - func, - inputs, - v=None, - create_graph=False, - allow_unused=False): - + def gen_jvp_tests( + self, func, inputs, v=None, create_graph=False, allow_unused=False + ): def jvp_test(): nonlocal v xs = self.gen_inputs(inputs) @@ -117,13 +113,15 @@ class TestAutogradFunctional(unittest.TestCase): xs, v, create_graph=create_graph, - allow_unused=allow_unused) + allow_unused=allow_unused, + ) else: outputs, outputs_grad = paddle.incubate.autograd.jvp( func, xs, create_graph=create_graph, - allow_unused=allow_unused) + allow_unused=allow_unused, + ) return outputs, outputs_grad return jvp_test @@ -144,7 +142,6 @@ class TestAutogradFunctional(unittest.TestCase): class TestVJP(TestAutogradFunctional): - def func_vjp_i1o1(self): test_cases = [ [reduce, 'A'], # noqa @@ -222,20 +219,29 @@ class TestVJP(TestAutogradFunctional): def test_input_single_tensor(self): self.assertIsInstance( paddle.incubate.autograd.vjp(paddle.tanh, paddle.rand((3, 4)))[1], - paddle.fluid.framework.Variable) + paddle.fluid.framework.Variable, + ) @utils.place(config.DEVICES) @utils.parameterize( (utils.TEST_CASE_NAME, 'fun', 'xs', 'v', 'expected_exception'), - (('v_shape_not_equal_ys', utils.square, np.random.rand(3), - np.random.rand(1), RuntimeError), )) + ( + ( + 'v_shape_not_equal_ys', + utils.square, + np.random.rand(3), + np.random.rand(1), + RuntimeError, + ), + ), +) class TestVJPException(unittest.TestCase): - def func_vjp(self): with self.assertRaises(self.expected_exception): - paddle.incubate.autograd.vjp(self.fun, paddle.to_tensor(self.xs), - paddle.to_tensor(self.v)) + paddle.incubate.autograd.vjp( + self.fun, paddle.to_tensor(self.xs), paddle.to_tensor(self.v) + ) def test_all_cases(self): with _test_eager_guard(): @@ -245,7 +251,8 @@ class TestVJPException(unittest.TestCase): def jac(grad_fn, f, inputs): assert grad_fn in [ - paddle.incubate.autograd.vjp, paddle.incubate.autograd.jvp + paddle.incubate.autograd.vjp, + paddle.incubate.autograd.jvp, ] if grad_fn is paddle.incubate.autograd.jvp: vs = [paddle.zeros_like(x) for x in inputs] @@ -274,7 +281,6 @@ def jac(grad_fn, f, inputs): class TestJVP(TestAutogradFunctional): - def func_jvp_i1o1(self): test_cases = [ [reduce, 'A'], # noqa @@ -330,36 +336,58 @@ class TestJVP(TestAutogradFunctional): @utils.place(config.DEVICES) -@utils.parameterize((utils.TEST_CASE_NAME, 'func', 'xs'), ( - ('1d_in_1d_out', utils.square, np.array([2., 3.])), - ('3d_in_3d_out', utils.square, np.random.rand(2, 3, 4)), - ('single_in_single_out', utils.square, np.random.rand(2, 3)), - ('multi_in_single_out', paddle.matmul, - (np.random.rand(2, 2), np.random.rand(2, 2))), -)) +@utils.parameterize( + (utils.TEST_CASE_NAME, 'func', 'xs'), + ( + ('1d_in_1d_out', utils.square, np.array([2.0, 3.0])), + ('3d_in_3d_out', utils.square, np.random.rand(2, 3, 4)), + ('single_in_single_out', utils.square, np.random.rand(2, 3)), + ( + 'multi_in_single_out', + paddle.matmul, + (np.random.rand(2, 2), np.random.rand(2, 2)), + ), + ), +) class TestJacobianNoBatch(unittest.TestCase): - def setUp(self): - self._dtype = self.xs[0].dtype if isinstance( - self.xs, typing.Sequence) else self.xs.dtype - self._eps = config.TOLERANCE.get(str( - self._dtype)).get("first_order_grad").get("eps") - self._rtol = config.TOLERANCE.get(str( - self._dtype)).get("first_order_grad").get("rtol") - self._atol = config.TOLERANCE.get(str( - self._dtype)).get("first_order_grad").get("atol") + self._dtype = ( + self.xs[0].dtype + if isinstance(self.xs, typing.Sequence) + else self.xs.dtype + ) + self._eps = ( + config.TOLERANCE.get(str(self._dtype)) + .get("first_order_grad") + .get("eps") + ) + self._rtol = ( + config.TOLERANCE.get(str(self._dtype)) + .get("first_order_grad") + .get("rtol") + ) + self._atol = ( + config.TOLERANCE.get(str(self._dtype)) + .get("first_order_grad") + .get("atol") + ) def func_jacobian(self): - xs = [paddle.to_tensor(x) for x in self.xs] if isinstance( - self.xs, typing.Sequence) else paddle.to_tensor(self.xs) + xs = ( + [paddle.to_tensor(x) for x in self.xs] + if isinstance(self.xs, typing.Sequence) + else paddle.to_tensor(self.xs) + ) self._actual = paddle.incubate.autograd.Jacobian(self.func, xs, False) self._expected = self._get_expected() Index = collections.namedtuple('Index', ('type', 'value')) - indexes = (Index('all', (slice(0, None, None), slice(0, None, None))), - Index('row', (0, slice(0, None, None))), - Index('col', (slice(0, None, None), 0)), - Index('multi-row', (slice(0, 2, 1), slice(0, None, None)))) + indexes = ( + Index('all', (slice(0, None, None), slice(0, None, None))), + Index('row', (0, slice(0, None, None))), + Index('col', (slice(0, None, None), 0)), + Index('multi-row', (slice(0, 2, 1), slice(0, None, None))), + ) self.assertEqual(self._actual[:].numpy().dtype, self._expected.dtype) for index in indexes: np.testing.assert_allclose( @@ -367,15 +395,18 @@ class TestJacobianNoBatch(unittest.TestCase): self._expected.__getitem__(index.value), rtol=self._rtol, atol=self._atol, - err_msg= - f'Testcase {index.type} index not passed, value is {index.value}' + err_msg=f'Testcase {index.type} index not passed, value is {index.value}', ) def _get_expected(self): - xs = [paddle.to_tensor(x) for x in self.xs] if isinstance( - self.xs, typing.Sequence) else paddle.to_tensor(self.xs) - jac = utils._compute_numerical_jacobian(self.func, xs, self._eps, - self._dtype) + xs = ( + [paddle.to_tensor(x) for x in self.xs] + if isinstance(self.xs, typing.Sequence) + else paddle.to_tensor(self.xs) + ) + jac = utils._compute_numerical_jacobian( + self.func, xs, self._eps, self._dtype + ) return utils._np_concat_matrix_sequence(jac, utils.MatrixFormat.NM) def test_all_cases(self): @@ -385,42 +416,71 @@ class TestJacobianNoBatch(unittest.TestCase): @utils.place(config.DEVICES) -@utils.parameterize((utils.TEST_CASE_NAME, 'func', 'xs'), ( - ('1d_in_1d_out', utils.square, np.array([[1., 2., 3.], [3., 4., 3.]])), - ('3d_in_3d_out', utils.square, np.random.rand(2, 3, 4)), - ('multi_in_single_out', utils.square, np.random.rand(2, 3)), -)) +@utils.parameterize( + (utils.TEST_CASE_NAME, 'func', 'xs'), + ( + ( + '1d_in_1d_out', + utils.square, + np.array([[1.0, 2.0, 3.0], [3.0, 4.0, 3.0]]), + ), + ('3d_in_3d_out', utils.square, np.random.rand(2, 3, 4)), + ('multi_in_single_out', utils.square, np.random.rand(2, 3)), + ), +) class TestJacobianBatchFirst(unittest.TestCase): - def setUp(self): - self._dtype = self.xs[0].dtype if isinstance( - self.xs, typing.Sequence) else self.xs.dtype - self._eps = config.TOLERANCE.get(str( - self._dtype)).get("first_order_grad").get("eps") - self._rtol = config.TOLERANCE.get(str( - self._dtype)).get("first_order_grad").get("rtol") - self._atol = config.TOLERANCE.get(str( - self._dtype)).get("first_order_grad").get("atol") + self._dtype = ( + self.xs[0].dtype + if isinstance(self.xs, typing.Sequence) + else self.xs.dtype + ) + self._eps = ( + config.TOLERANCE.get(str(self._dtype)) + .get("first_order_grad") + .get("eps") + ) + self._rtol = ( + config.TOLERANCE.get(str(self._dtype)) + .get("first_order_grad") + .get("rtol") + ) + self._atol = ( + config.TOLERANCE.get(str(self._dtype)) + .get("first_order_grad") + .get("atol") + ) def func_jacobian(self): - xs = [paddle.to_tensor(x) for x in self.xs] if isinstance( - self.xs, typing.Sequence) else paddle.to_tensor(self.xs) + xs = ( + [paddle.to_tensor(x) for x in self.xs] + if isinstance(self.xs, typing.Sequence) + else paddle.to_tensor(self.xs) + ) self._actual = paddle.incubate.autograd.Jacobian(self.func, xs, True) self._expected = self._get_expected() Index = collections.namedtuple('Index', ('type', 'value')) - indexes = (Index( - 'all', - (slice(0, None, None), slice(0, None, None), slice(0, None, None))), - Index('row', - (slice(0, None, None), 0, slice(0, None, None))), - Index('col', - (slice(0, None, None), slice(0, None, None), 0)), - Index('batch', (slice(0, 2, None), slice( - 0, None, None), slice(0, None, None))), - Index('multi_row', - (slice(0, 1, None), slice(0, 2, 1), slice( - 0, None, None)))) + indexes = ( + Index( + 'all', + ( + slice(0, None, None), + slice(0, None, None), + slice(0, None, None), + ), + ), + Index('row', (slice(0, None, None), 0, slice(0, None, None))), + Index('col', (slice(0, None, None), slice(0, None, None), 0)), + Index( + 'batch', + (slice(0, 2, None), slice(0, None, None), slice(0, None, None)), + ), + Index( + 'multi_row', + (slice(0, 1, None), slice(0, 2, 1), slice(0, None, None)), + ), + ) self.assertEqual(self._actual[:].numpy().dtype, self._expected.dtype) for index in indexes: np.testing.assert_allclose( @@ -428,18 +488,22 @@ class TestJacobianBatchFirst(unittest.TestCase): self._expected.__getitem__(index.value), rtol=self._rtol, atol=self._atol, - err_msg= - f'Testcase {index.type} index not passed, value is {index.value}' + err_msg=f'Testcase {index.type} index not passed, value is {index.value}', ) def _get_expected(self): - xs = [paddle.to_tensor(x) for x in self.xs] if isinstance( - self.xs, typing.Sequence) else paddle.to_tensor(self.xs) - jac = utils._compute_numerical_batch_jacobian(self.func, xs, self._eps, - self._dtype, False) + xs = ( + [paddle.to_tensor(x) for x in self.xs] + if isinstance(self.xs, typing.Sequence) + else paddle.to_tensor(self.xs) + ) + jac = utils._compute_numerical_batch_jacobian( + self.func, xs, self._eps, self._dtype, False + ) jac = utils._np_concat_matrix_sequence(jac, utils.MatrixFormat.NBM) - return utils._np_transpose_matrix_format(jac, utils.MatrixFormat.NBM, - utils.MatrixFormat.BNM) + return utils._np_transpose_matrix_format( + jac, utils.MatrixFormat.NBM, utils.MatrixFormat.BNM + ) def test_all_cases(self): with _test_eager_guard(): @@ -448,64 +512,74 @@ class TestJacobianBatchFirst(unittest.TestCase): class TestHessianNoBatch(unittest.TestCase): - @classmethod def setUpClass(self): self.shape = (2, 2) self.dtype = 'float32' self.np_dtype = np.float32 - self.numerical_delta = config.TOLERANCE.get( - self.dtype).get("second_order_grad").get("eps") - self.rtol = config.TOLERANCE.get( - self.dtype).get("second_order_grad").get("rtol") - self.atol = config.TOLERANCE.get( - self.dtype).get("second_order_grad").get("atol") + self.numerical_delta = ( + config.TOLERANCE.get(self.dtype).get("second_order_grad").get("eps") + ) + self.rtol = ( + config.TOLERANCE.get(self.dtype) + .get("second_order_grad") + .get("rtol") + ) + self.atol = ( + config.TOLERANCE.get(self.dtype) + .get("second_order_grad") + .get("atol") + ) self.x = paddle.rand(shape=self.shape, dtype=self.dtype) self.y = paddle.rand(shape=self.shape, dtype=self.dtype) def func_single_input(self): - def func(x): return paddle.sum(paddle.matmul(x, x)) numerical_hessian = utils._compute_numerical_hessian( - func, self.x, self.numerical_delta, self.np_dtype) + func, self.x, self.numerical_delta, self.np_dtype + ) numerical_hessian = utils._np_concat_matrix_sequence(numerical_hessian) self.x.stop_gradient = False hessian = paddle.incubate.autograd.Hessian(func, self.x) - np.testing.assert_allclose(hessian[:].numpy(), numerical_hessian, - self.rtol, self.atol) + np.testing.assert_allclose( + hessian[:].numpy(), numerical_hessian, self.rtol, self.atol + ) def func_multi_input(self): - def func(x, y): return paddle.sum(paddle.matmul(x, y)) numerical_hessian = utils._compute_numerical_hessian( - func, [self.x, self.y], self.numerical_delta, self.np_dtype) + func, [self.x, self.y], self.numerical_delta, self.np_dtype + ) numerical_hessian = utils._np_concat_matrix_sequence(numerical_hessian) self.x.stop_gradient = False self.y.stop_gradient = False hessian = paddle.incubate.autograd.Hessian(func, [self.x, self.y]) - np.testing.assert_allclose(hessian[:].numpy(), - numerical_hessian, - rtol=self.rtol, - atol=self.atol) + np.testing.assert_allclose( + hessian[:].numpy(), + numerical_hessian, + rtol=self.rtol, + atol=self.atol, + ) def func_allow_unused_true(self): - def func(x, y): return paddle.sum(paddle.matmul(x, x)) numerical_hessian = utils._compute_numerical_hessian( - func, [self.x, self.y], self.numerical_delta, self.np_dtype) + func, [self.x, self.y], self.numerical_delta, self.np_dtype + ) numerical_hessian = utils._np_concat_matrix_sequence(numerical_hessian) self.x.stop_gradient = False self.y.stop_gradient = False hessian = paddle.incubate.autograd.Hessian(func, [self.x, self.y]) - np.testing.assert_allclose(hessian[:].numpy(), numerical_hessian, - self.rtol, self.atol) + np.testing.assert_allclose( + hessian[:].numpy(), numerical_hessian, self.rtol, self.atol + ) def func_create_graph_true(self): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) @@ -514,17 +588,18 @@ class TestHessianNoBatch(unittest.TestCase): return paddle.sum(F.sigmoid(x)) numerical_hessian = utils._compute_numerical_hessian( - func, self.x, self.numerical_delta, self.np_dtype) + func, self.x, self.numerical_delta, self.np_dtype + ) numerical_hessian = utils._np_concat_matrix_sequence(numerical_hessian) self.x.stop_gradient = False hessian = paddle.incubate.autograd.Hessian(func, self.x) assert hessian[:].stop_gradient == False - np.testing.assert_allclose(hessian[:].numpy(), numerical_hessian, - self.rtol, self.atol) + np.testing.assert_allclose( + hessian[:].numpy(), numerical_hessian, self.rtol, self.atol + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) def func_out_not_single(self): - def func(x): return x * x @@ -548,7 +623,6 @@ class TestHessianNoBatch(unittest.TestCase): class TestHessianBatchFirst(unittest.TestCase): - @classmethod def setUpClass(self): self.x_shape = (5, 2) @@ -557,108 +631,117 @@ class TestHessianBatchFirst(unittest.TestCase): self.nbatch, self.nrow = 5, 2 self.dtype = 'float32' self.np_dtype = np.float32 - self.numerical_delta = config.TOLERANCE.get( - self.dtype).get('second_order_grad').get('eps') - self.rtol = config.TOLERANCE.get( - self.dtype).get('second_order_grad').get('rtol') - self.atol = config.TOLERANCE.get( - self.dtype).get('second_order_grad').get('atol') + self.numerical_delta = ( + config.TOLERANCE.get(self.dtype).get('second_order_grad').get('eps') + ) + self.rtol = ( + config.TOLERANCE.get(self.dtype) + .get('second_order_grad') + .get('rtol') + ) + self.atol = ( + config.TOLERANCE.get(self.dtype) + .get('second_order_grad') + .get('atol') + ) self.x = paddle.rand(shape=self.x_shape, dtype=self.dtype) self.weight = paddle.rand(shape=self.weight_shape, dtype=self.dtype) self.y = paddle.rand(shape=self.y_shape, dtype=self.dtype) def func_single_input(self): - def func(x): return paddle.matmul(x * x, self.weight)[:, 0:1] expected = utils._compute_numerical_batch_hessian( - func, self.x, self.numerical_delta, self.np_dtype) + func, self.x, self.numerical_delta, self.np_dtype + ) H = paddle.incubate.autograd.Hessian(func, self.x, is_batched=True) - actual = utils._np_transpose_matrix_format(H[:].numpy(), - utils.MatrixFormat.BNM, - utils.MatrixFormat.NBM) + actual = utils._np_transpose_matrix_format( + H[:].numpy(), utils.MatrixFormat.BNM, utils.MatrixFormat.NBM + ) actual = actual.reshape((H.shape[1], -1)) np.testing.assert_allclose(actual, expected, self.rtol, self.atol) def func_multi_input(self): - def func(x, y): return paddle.matmul(x * x * y * y, self.weight)[:, 0:1] xs_len = 2 expected = utils._compute_numerical_batch_hessian( - func, [self.x, self.y], self.numerical_delta, self.np_dtype) + func, [self.x, self.y], self.numerical_delta, self.np_dtype + ) expected = np.reshape( np.array(expected), - (xs_len, xs_len, self.nrow, self.nbatch, self.nrow)) + (xs_len, xs_len, self.nrow, self.nbatch, self.nrow), + ) expected = [[n for n in row] for row in expected] expected = utils._np_concat_matrix_sequence(expected) self.x.stop_gradient = False self.y.stop_gradient = False - H = paddle.incubate.autograd.Hessian(func, [self.x, self.y], - is_batched=True) - actual = utils._np_transpose_matrix_format(H[:].numpy(), - utils.MatrixFormat.BNM, - utils.MatrixFormat.NBM) + H = paddle.incubate.autograd.Hessian( + func, [self.x, self.y], is_batched=True + ) + actual = utils._np_transpose_matrix_format( + H[:].numpy(), utils.MatrixFormat.BNM, utils.MatrixFormat.NBM + ) np.testing.assert_allclose(actual, expected, self.rtol, self.atol) def func_allow_unused(self): - def func(x, y): return paddle.matmul(x * x, self.weight)[:, 0:1] xs_len = 2 expected = utils._compute_numerical_batch_hessian( - func, [self.x, self.y], self.numerical_delta, self.np_dtype) + func, [self.x, self.y], self.numerical_delta, self.np_dtype + ) expected = np.reshape( np.array(expected), - (xs_len, xs_len, self.nrow, self.nbatch, self.nrow)) + (xs_len, xs_len, self.nrow, self.nbatch, self.nrow), + ) expected = [[n for n in row] for row in expected] expected = utils._np_concat_matrix_sequence(expected) - expected = utils._np_transpose_matrix_format(expected, - utils.MatrixFormat.NBM, - utils.MatrixFormat.BNM) + expected = utils._np_transpose_matrix_format( + expected, utils.MatrixFormat.NBM, utils.MatrixFormat.BNM + ) - actual = paddle.incubate.autograd.Hessian(func, [self.x, self.y], - is_batched=True)[:] + actual = paddle.incubate.autograd.Hessian( + func, [self.x, self.y], is_batched=True + )[:] - np.testing.assert_allclose(actual, - expected, - rtol=self.rtol, - atol=self.atol) + np.testing.assert_allclose( + actual, expected, rtol=self.rtol, atol=self.atol + ) def func_stop_gradient(self): - def func(x): return paddle.matmul(x * x, self.weight)[:, 0:1] expected = utils._compute_numerical_batch_hessian( - func, self.x, self.numerical_delta, self.np_dtype) + func, self.x, self.numerical_delta, self.np_dtype + ) x = self.x.clone() x.stop_gradient = True H = paddle.incubate.autograd.Hessian(func, self.x, is_batched=True)[:] - actual = utils._np_transpose_matrix_format(H[:].numpy(), - utils.MatrixFormat.BNM, - utils.MatrixFormat.NBM) + actual = utils._np_transpose_matrix_format( + H[:].numpy(), utils.MatrixFormat.BNM, utils.MatrixFormat.NBM + ) actual = actual.reshape((H.shape[1], -1)) np.testing.assert_allclose(actual, expected, self.rtol, self.atol) def func_out_not_single(self): - def func(x): - return (x * x) + return x * x with self.assertRaises(RuntimeError): - paddle.incubate.autograd.Hessian(func, - paddle.ones((3, 3)), - is_batched=True) + paddle.incubate.autograd.Hessian( + func, paddle.ones((3, 3)), is_batched=True + ) def test_all_cases(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/autograd/test_autograd_functional_prim.py b/python/paddle/fluid/tests/unittests/autograd/test_autograd_functional_prim.py index de12fd1ef901a739dcf10ac44c470753291a2b86..70fb6b1bbfae65798a73c9c270f7dc504c56f599 100644 --- a/python/paddle/fluid/tests/unittests/autograd/test_autograd_functional_prim.py +++ b/python/paddle/fluid/tests/unittests/autograd/test_autograd_functional_prim.py @@ -22,23 +22,35 @@ import utils @utils.place(config.DEVICES) -@utils.parameterize((utils.TEST_CASE_NAME, 'fun', 'args', 'dtype'), ( - ('unary_float32', paddle.tanh, (np.random.rand(2, 3), ), 'float32'), - ('binary_float32', paddle.matmul, - (np.random.rand(2, 3), np.random.rand(3, 2)), 'float32'), - ('unary_float64', paddle.tanh, (np.random.rand(2, 3), ), 'float64'), - ('binary_float64', paddle.matmul, - (np.random.rand(2, 3), np.random.rand(3, 2)), 'float64'), -)) +@utils.parameterize( + (utils.TEST_CASE_NAME, 'fun', 'args', 'dtype'), + ( + ('unary_float32', paddle.tanh, (np.random.rand(2, 3),), 'float32'), + ( + 'binary_float32', + paddle.matmul, + (np.random.rand(2, 3), np.random.rand(3, 2)), + 'float32', + ), + ('unary_float64', paddle.tanh, (np.random.rand(2, 3),), 'float64'), + ( + 'binary_float64', + paddle.matmul, + (np.random.rand(2, 3), np.random.rand(3, 2)), + 'float64', + ), + ), +) class TestJacobianPrim(unittest.TestCase): - @classmethod def setUpClass(cls): cls.args = [arg.astype(cls.dtype) for arg in cls.args] - cls._rtol = config.TOLERANCE.get( - cls.dtype).get('first_order_grad').get('rtol') - cls._atol = config.TOLERANCE.get( - cls.dtype).get('first_order_grad').get('atol') + cls._rtol = ( + config.TOLERANCE.get(cls.dtype).get('first_order_grad').get('rtol') + ) + cls._atol = ( + config.TOLERANCE.get(cls.dtype).get('first_order_grad').get('atol') + ) def setUp(self): paddle.enable_static() @@ -49,7 +61,6 @@ class TestJacobianPrim(unittest.TestCase): paddle.disable_static() def test_jacobian_prim(self): - def wrapper(fun, args): mp = paddle.static.Program() sp = paddle.static.Program() @@ -65,10 +76,11 @@ class TestJacobianPrim(unittest.TestCase): paddle.incubate.autograd.prim2orig() exe = paddle.static.Executor() exe.run(sp) - [jac] = exe.run(mp, - feed={f'arg{i}': arg - for i, arg in enumerate(args)}, - fetch_list=[jac]) + [jac] = exe.run( + mp, + feed={f'arg{i}': arg for i, arg in enumerate(args)}, + fetch_list=[jac], + ) return jac paddle.incubate.autograd.enable_prim() @@ -76,30 +88,41 @@ class TestJacobianPrim(unittest.TestCase): paddle.incubate.autograd.disable_prim() orig_jac = wrapper(self.fun, self.args) - np.testing.assert_allclose(orig_jac, - prim_jac, - rtol=self._rtol, - atol=self._atol) + np.testing.assert_allclose( + orig_jac, prim_jac, rtol=self._rtol, atol=self._atol + ) @utils.place(config.DEVICES) -@utils.parameterize((utils.TEST_CASE_NAME, 'fun', 'args', 'dtype'), ( - ('unary_float32', paddle.tanh, (np.random.rand(1), ), 'float32'), - ('binary_float32', paddle.multiply, - (np.random.rand(1), np.random.rand(1)), 'float32'), - ('unary_float64', paddle.tanh, (np.random.rand(1), ), 'float64'), - ('binary_float64', paddle.multiply, - (np.random.rand(1), np.random.rand(1)), 'float64'), -)) +@utils.parameterize( + (utils.TEST_CASE_NAME, 'fun', 'args', 'dtype'), + ( + ('unary_float32', paddle.tanh, (np.random.rand(1),), 'float32'), + ( + 'binary_float32', + paddle.multiply, + (np.random.rand(1), np.random.rand(1)), + 'float32', + ), + ('unary_float64', paddle.tanh, (np.random.rand(1),), 'float64'), + ( + 'binary_float64', + paddle.multiply, + (np.random.rand(1), np.random.rand(1)), + 'float64', + ), + ), +) class TestHessianPrim(unittest.TestCase): - @classmethod def setUpClass(cls): cls.args = [arg.astype(cls.dtype) for arg in cls.args] - cls._rtol = config.TOLERANCE.get( - cls.dtype).get('second_order_grad').get('rtol') - cls._atol = config.TOLERANCE.get( - cls.dtype).get('second_order_grad').get('atol') + cls._rtol = ( + config.TOLERANCE.get(cls.dtype).get('second_order_grad').get('rtol') + ) + cls._atol = ( + config.TOLERANCE.get(cls.dtype).get('second_order_grad').get('atol') + ) def setUp(self): paddle.enable_static() @@ -110,7 +133,6 @@ class TestHessianPrim(unittest.TestCase): paddle.disable_static() def test_jacobian_prim(self): - def wrapper(fun, args): mp = paddle.static.Program() sp = paddle.static.Program() @@ -126,11 +148,11 @@ class TestHessianPrim(unittest.TestCase): paddle.incubate.autograd.prim2orig() exe = paddle.static.Executor() exe.run(sp) - [hessian - ] = exe.run(mp, - feed={f'arg{i}': arg - for i, arg in enumerate(args)}, - fetch_list=[hessian]) + [hessian] = exe.run( + mp, + feed={f'arg{i}': arg for i, arg in enumerate(args)}, + fetch_list=[hessian], + ) return hessian paddle.incubate.autograd.enable_prim() @@ -138,30 +160,41 @@ class TestHessianPrim(unittest.TestCase): paddle.incubate.autograd.disable_prim() orig_jac = wrapper(self.fun, self.args) - np.testing.assert_allclose(orig_jac, - prim_jac, - rtol=self._rtol, - atol=self._atol) + np.testing.assert_allclose( + orig_jac, prim_jac, rtol=self._rtol, atol=self._atol + ) @utils.place(config.DEVICES) -@utils.parameterize((utils.TEST_CASE_NAME, 'fun', 'args', 'dtype'), ( - ('unary_float32', paddle.tanh, (np.random.rand(2, 3), ), 'float32'), - ('binary_float32', paddle.matmul, - (np.random.rand(2, 3), np.random.rand(3, 2)), 'float32'), - ('unary_float64', paddle.tanh, (np.random.rand(2, 3), ), 'float64'), - ('binary_float64', paddle.matmul, - (np.random.rand(2, 3), np.random.rand(3, 2)), 'float64'), -)) +@utils.parameterize( + (utils.TEST_CASE_NAME, 'fun', 'args', 'dtype'), + ( + ('unary_float32', paddle.tanh, (np.random.rand(2, 3),), 'float32'), + ( + 'binary_float32', + paddle.matmul, + (np.random.rand(2, 3), np.random.rand(3, 2)), + 'float32', + ), + ('unary_float64', paddle.tanh, (np.random.rand(2, 3),), 'float64'), + ( + 'binary_float64', + paddle.matmul, + (np.random.rand(2, 3), np.random.rand(3, 2)), + 'float64', + ), + ), +) class TestJvpPrim(unittest.TestCase): - @classmethod def setUpClass(cls): cls.args = [arg.astype(cls.dtype) for arg in cls.args] - cls._rtol = config.TOLERANCE.get( - cls.dtype).get('first_order_grad').get('rtol') - cls._atol = config.TOLERANCE.get( - cls.dtype).get('first_order_grad').get('atol') + cls._rtol = ( + config.TOLERANCE.get(cls.dtype).get('first_order_grad').get('rtol') + ) + cls._atol = ( + config.TOLERANCE.get(cls.dtype).get('first_order_grad').get('atol') + ) def setUp(self): paddle.enable_static() @@ -172,7 +205,6 @@ class TestJvpPrim(unittest.TestCase): paddle.disable_static() def test_jacobian_prim(self): - def wrapper(fun, args): mp = paddle.static.Program() sp = paddle.static.Program() @@ -190,9 +222,9 @@ class TestJvpPrim(unittest.TestCase): exe.run(sp) jvp_res = exe.run( mp, - feed={f'arg{i}': arg - for i, arg in enumerate(args)}, - fetch_list=[jvp_res]) + feed={f'arg{i}': arg for i, arg in enumerate(args)}, + fetch_list=[jvp_res], + ) return jvp_res paddle.incubate.autograd.enable_prim() @@ -200,30 +232,41 @@ class TestJvpPrim(unittest.TestCase): paddle.incubate.autograd.disable_prim() orig_jvp = wrapper(self.fun, self.args) - np.testing.assert_allclose(orig_jvp, - prim_jvp, - rtol=self._rtol, - atol=self._atol) + np.testing.assert_allclose( + orig_jvp, prim_jvp, rtol=self._rtol, atol=self._atol + ) @utils.place(config.DEVICES) -@utils.parameterize((utils.TEST_CASE_NAME, 'fun', 'args', 'dtype'), ( - ('unary_float32', paddle.tanh, (np.random.rand(2, 3), ), 'float32'), - ('binary_float32', paddle.matmul, - (np.random.rand(2, 3), np.random.rand(3, 2)), 'float32'), - ('unary_float64', paddle.tanh, (np.random.rand(2, 3), ), 'float64'), - ('binary_float64', paddle.matmul, - (np.random.rand(2, 3), np.random.rand(3, 2)), 'float64'), -)) +@utils.parameterize( + (utils.TEST_CASE_NAME, 'fun', 'args', 'dtype'), + ( + ('unary_float32', paddle.tanh, (np.random.rand(2, 3),), 'float32'), + ( + 'binary_float32', + paddle.matmul, + (np.random.rand(2, 3), np.random.rand(3, 2)), + 'float32', + ), + ('unary_float64', paddle.tanh, (np.random.rand(2, 3),), 'float64'), + ( + 'binary_float64', + paddle.matmul, + (np.random.rand(2, 3), np.random.rand(3, 2)), + 'float64', + ), + ), +) class TestVjpPrim(unittest.TestCase): - @classmethod def setUpClass(cls): cls.args = [arg.astype(cls.dtype) for arg in cls.args] - cls._rtol = config.TOLERANCE.get( - cls.dtype).get('first_order_grad').get('rtol') - cls._atol = config.TOLERANCE.get( - cls.dtype).get('first_order_grad').get('atol') + cls._rtol = ( + config.TOLERANCE.get(cls.dtype).get('first_order_grad').get('rtol') + ) + cls._atol = ( + config.TOLERANCE.get(cls.dtype).get('first_order_grad').get('atol') + ) def setUp(self): paddle.enable_static() @@ -234,7 +277,6 @@ class TestVjpPrim(unittest.TestCase): paddle.disable_static() def test_jacobian_prim(self): - def wrapper(fun, args): mp = paddle.static.Program() sp = paddle.static.Program() @@ -252,9 +294,9 @@ class TestVjpPrim(unittest.TestCase): exe.run(sp) vjp_res = exe.run( mp, - feed={f'arg{i}': arg - for i, arg in enumerate(args)}, - fetch_list=[vjp_res]) + feed={f'arg{i}': arg for i, arg in enumerate(args)}, + fetch_list=[vjp_res], + ) return vjp_res paddle.incubate.autograd.enable_prim() @@ -263,10 +305,9 @@ class TestVjpPrim(unittest.TestCase): orig_vjp = wrapper(self.fun, self.args) for orig, prim in zip(orig_vjp, prim_vjp): - np.testing.assert_allclose(orig, - prim, - rtol=self._rtol, - atol=self._atol) + np.testing.assert_allclose( + orig, prim, rtol=self._rtol, atol=self._atol + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/autograd/test_autograd_functional_static.py b/python/paddle/fluid/tests/unittests/autograd/test_autograd_functional_static.py index 2102cd81ca720a64d643e04390580570efc03f50..fd3d7d9458ae13fd18f9bd8c054ab6067d15eb54 100644 --- a/python/paddle/fluid/tests/unittests/autograd/test_autograd_functional_static.py +++ b/python/paddle/fluid/tests/unittests/autograd/test_autograd_functional_static.py @@ -26,29 +26,71 @@ paddle.enable_static() @utils.place(config.DEVICES) -@utils.parameterize((utils.TEST_CASE_NAME, 'fun', 'xs', 'v', 'stop_gradient'), ( - ('tensor_input', utils.reduce, np.random.rand(2, 3), None, False), - ('tensor_sequence_input', utils.reduce, np.random.rand(2, 3), None, False), - ('v_not_none', utils.reduce, np.random.rand(2, - 3), np.random.rand(1), False), - ('xs_stop_gradient', utils.reduce, np.random.rand( - 2, 3), np.random.rand(1), True), - ('func_mutmul', utils.matmul, - (np.random.rand(3, 2), np.random.rand(2, 3)), None, False), - ('func_mul', utils.mul, - (np.random.rand(3, 3), np.random.rand(3, 3)), None, False), - ('func_out_two', utils.o2, - (np.random.rand(10), np.random.rand(10)), None, False), -)) +@utils.parameterize( + (utils.TEST_CASE_NAME, 'fun', 'xs', 'v', 'stop_gradient'), + ( + ('tensor_input', utils.reduce, np.random.rand(2, 3), None, False), + ( + 'tensor_sequence_input', + utils.reduce, + np.random.rand(2, 3), + None, + False, + ), + ( + 'v_not_none', + utils.reduce, + np.random.rand(2, 3), + np.random.rand(1), + False, + ), + ( + 'xs_stop_gradient', + utils.reduce, + np.random.rand(2, 3), + np.random.rand(1), + True, + ), + ( + 'func_mutmul', + utils.matmul, + (np.random.rand(3, 2), np.random.rand(2, 3)), + None, + False, + ), + ( + 'func_mul', + utils.mul, + (np.random.rand(3, 3), np.random.rand(3, 3)), + None, + False, + ), + ( + 'func_out_two', + utils.o2, + (np.random.rand(10), np.random.rand(10)), + None, + False, + ), + ), +) class TestVJP(unittest.TestCase): - def setUp(self): - self.dtype = str(self.xs[0].dtype) if isinstance( - self.xs, typing.Sequence) else str(self.xs.dtype) - self._rtol = config.TOLERANCE.get(str( - self.dtype)).get("first_order_grad").get("rtol") - self._atol = config.TOLERANCE.get(str( - self.dtype)).get("first_order_grad").get("atol") + self.dtype = ( + str(self.xs[0].dtype) + if isinstance(self.xs, typing.Sequence) + else str(self.xs.dtype) + ) + self._rtol = ( + config.TOLERANCE.get(str(self.dtype)) + .get("first_order_grad") + .get("rtol") + ) + self._atol = ( + config.TOLERANCE.get(str(self.dtype)) + .get("first_order_grad") + .get("atol") + ) def _vjp(self): exe = paddle.static.Executor() @@ -56,9 +98,11 @@ class TestVJP(unittest.TestCase): mp = paddle.static.Program() with paddle.static.program_guard(mp, sp): feed, static_xs, static_v = utils.gen_static_data_and_feed( - self.xs, self.v, stop_gradient=self.stop_gradient) - ys, xs_grads = paddle.incubate.autograd.vjp(self.fun, static_xs, - static_v) + self.xs, self.v, stop_gradient=self.stop_gradient + ) + ys, xs_grads = paddle.incubate.autograd.vjp( + self.fun, static_xs, static_v + ) exe.run(sp) return exe.run(mp, feed=feed, fetch_list=[ys, xs_grads]) @@ -68,9 +112,13 @@ class TestVJP(unittest.TestCase): mp = paddle.static.Program() with paddle.static.program_guard(mp, sp): feed, static_xs, static_v = utils.gen_static_data_and_feed( - self.xs, self.v, False) - ys = self.fun(*static_xs) if isinstance( - static_xs, typing.Sequence) else self.fun(static_xs) + self.xs, self.v, False + ) + ys = ( + self.fun(*static_xs) + if isinstance(static_xs, typing.Sequence) + else self.fun(static_xs) + ) xs_grads = paddle.static.gradients(ys, static_xs, static_v) exe.run(sp) return exe.run(mp, feed=feed, fetch_list=[ys, xs_grads]) @@ -80,19 +128,25 @@ class TestVJP(unittest.TestCase): expected = self._expected_vjp() self.assertEqual(len(actual), len(expected)) for i in range(len(actual)): - np.testing.assert_allclose(actual[i], - expected[i], - rtol=self._rtol, - atol=self._atol) + np.testing.assert_allclose( + actual[i], expected[i], rtol=self._rtol, atol=self._atol + ) @utils.place(config.DEVICES) @utils.parameterize( (utils.TEST_CASE_NAME, 'fun', 'xs', 'v', 'expected_exception'), - (('v_shape_not_equal_ys', utils.square, np.random.rand(3), - np.random.rand(1), RuntimeError), )) + ( + ( + 'v_shape_not_equal_ys', + utils.square, + np.random.rand(3), + np.random.rand(1), + RuntimeError, + ), + ), +) class TestVJPException(unittest.TestCase): - def setUp(self): self.exe = paddle.static.Executor() @@ -101,9 +155,11 @@ class TestVJPException(unittest.TestCase): mp = paddle.static.Program() with paddle.static.program_guard(mp, sp): feed, static_xs, static_v = utils.gen_static_data_and_feed( - self.xs, self.v) - ys, xs_grads = paddle.incubate.autograd.vjp(self.fun, static_xs, - static_v) + self.xs, self.v + ) + ys, xs_grads = paddle.incubate.autograd.vjp( + self.fun, static_xs, static_v + ) self.exe.run(sp) return self.exe.run(mp, feed, fetch_list=[ys, xs_grads]) @@ -176,11 +232,11 @@ def make_tensors(inps): all_data_shapes = { - 'A': [[1., 2.]], - 'B': [[1., 2.], [2., 1.]], - 'C': [[2., 2.], [2., 1.]], - 'D': [[[2., 2.], [2., 1.]], [[1., 2.], [2., 1.]]], - 'E': [[[3., 4.], [2., 3.]], [[2., 1.], [1., 3.]]], + 'A': [[1.0, 2.0]], + 'B': [[1.0, 2.0], [2.0, 1.0]], + 'C': [[2.0, 2.0], [2.0, 1.0]], + 'D': [[[2.0, 2.0], [2.0, 1.0]], [[1.0, 2.0], [2.0, 1.0]]], + 'E': [[[3.0, 4.0], [2.0, 3.0]], [[2.0, 1.0], [1.0, 3.0]]], } @@ -190,7 +246,6 @@ def prepare_data(test, input_shapes, dtype): class TestJacobianFloat32(unittest.TestCase): - @classmethod def setUpClass(self): paddle.enable_static() @@ -201,8 +256,9 @@ class TestJacobianFloat32(unittest.TestCase): self.dtype = 'float32' self.np_dtype = np.float32 prepare_data(self, all_data_shapes, self.dtype) - self.eps = config.TOLERANCE.get( - self.dtype).get('first_order_grad').get('eps') + self.eps = ( + config.TOLERANCE.get(self.dtype).get('first_order_grad').get('eps') + ) # self.rtol = config.TOLERANCE.get(self.dtype).get('first_order_grad').get('rtol') # self.atol = config.TOLERANCE.get(self.dtype).get('first_order_grad').get('atol') # Do't use tolerance in config, which will cause this test case failed. @@ -227,17 +283,17 @@ class TestJacobianFloat32(unittest.TestCase): else: feeds = {'x': inps} pd_jacobians = exe.run(main, feed=feeds, fetch_list=[full_jacobian])[0] - np_jacobians = approx_jacobian(np_f, - inps, - self.dtype, - self.eps, - batch=batch) + np_jacobians = approx_jacobian( + np_f, inps, self.dtype, self.eps, batch=batch + ) if batch: np_jacobians = utils._np_transpose_matrix_format( - np_jacobians, utils.MatrixFormat.NBM, utils.MatrixFormat.BNM) + np_jacobians, utils.MatrixFormat.NBM, utils.MatrixFormat.BNM + ) - np.testing.assert_allclose(pd_jacobians, np_jacobians, self.rtol, - self.atol) + np.testing.assert_allclose( + pd_jacobians, np_jacobians, self.rtol, self.atol + ) def run_test_by_rows(self, pd_f, np_f, inps, batch=False): main = fluid.Program() @@ -261,8 +317,9 @@ class TestJacobianFloat32(unittest.TestCase): pd_jac = exe.run(main, feed=feeds, fetch_list=[rows]) np_jac = approx_jacobian(np_f, inps, self.dtype, self.eps, batch=batch) for i in range(nrow): - np.testing.assert_allclose(pd_jac[i], np_jac[i], self.rtol, - self.atol) + np.testing.assert_allclose( + pd_jac[i], np_jac[i], self.rtol, self.atol + ) def run_test_by_entries(self, pd_f, np_f, inps, batch=False): main = fluid.Program() @@ -293,7 +350,6 @@ class TestJacobianFloat32(unittest.TestCase): np.testing.assert_allclose(pd_entry, np_entry, self.rtol, self.atol) def test_square(self): - def pd_f(x): return paddle.multiply(x, x) @@ -305,7 +361,6 @@ class TestJacobianFloat32(unittest.TestCase): self.run_test_by_entries(pd_f, np_f, self.A) def test_mul(self): - def pd_f(x, y): return paddle.multiply(x, y) @@ -322,7 +377,6 @@ class TestJacobianFloat32(unittest.TestCase): self.run_test_by_entries(pd_f, np_f, [self.B, self.C]) def test_matmul(self): - def pd_f(x, y): return paddle.matmul(x, y) @@ -335,7 +389,6 @@ class TestJacobianFloat32(unittest.TestCase): self.run_test_by_entries(pd_f, np_f, [self.B, self.C]) def test_batch_matmul(self): - def pd_f(x, y): return paddle.matmul(x, y) @@ -349,7 +402,6 @@ class TestJacobianFloat32(unittest.TestCase): class TestJacobianFloat64(TestJacobianFloat32): - @classmethod def setUpClass(self): paddle.enable_static() @@ -359,16 +411,18 @@ class TestJacobianFloat64(TestJacobianFloat32): self.place = fluid.CPUPlace() self.dtype = 'float64' prepare_data(self, all_data_shapes, self.dtype) - self.eps = config.TOLERANCE.get( - self.dtype).get('first_order_grad').get('eps') - self.rtol = config.TOLERANCE.get( - self.dtype).get('first_order_grad').get('rtol') - self.atol = config.TOLERANCE.get( - self.dtype).get('first_order_grad').get('atol') + self.eps = ( + config.TOLERANCE.get(self.dtype).get('first_order_grad').get('eps') + ) + self.rtol = ( + config.TOLERANCE.get(self.dtype).get('first_order_grad').get('rtol') + ) + self.atol = ( + config.TOLERANCE.get(self.dtype).get('first_order_grad').get('atol') + ) class TestHessianFloat32(unittest.TestCase): - @classmethod def setUpClass(self): paddle.enable_static() @@ -378,12 +432,19 @@ class TestHessianFloat32(unittest.TestCase): self.place = fluid.CPUPlace() self.dtype = 'float32' prepare_data(self, all_data_shapes, self.dtype) - self.eps = config.TOLERANCE.get( - self.dtype).get('second_order_grad').get('eps') - self.rtol = config.TOLERANCE.get( - self.dtype).get('second_order_grad').get('rtol') - self.atol = config.TOLERANCE.get( - self.dtype).get('second_order_grad').get('atol') + self.eps = ( + config.TOLERANCE.get(self.dtype).get('second_order_grad').get('eps') + ) + self.rtol = ( + config.TOLERANCE.get(self.dtype) + .get('second_order_grad') + .get('rtol') + ) + self.atol = ( + config.TOLERANCE.get(self.dtype) + .get('second_order_grad') + .get('atol') + ) def run_test_by_fullmatrix(self, pd_f, inps, np_hess, batch=False): main = fluid.Program() @@ -403,7 +464,6 @@ class TestHessianFloat32(unittest.TestCase): np.testing.assert_allclose(pd_hess, np_hess, self.rtol, self.atol) def test_square(self): - def pd_f(x): """Input is a square matrix.""" return paddle.matmul(x, x.T).flatten().sum() @@ -418,7 +478,6 @@ class TestHessianFloat32(unittest.TestCase): class TestHessianFloat64(TestHessianFloat32): - @classmethod def setUpClass(self): paddle.enable_static() @@ -428,12 +487,19 @@ class TestHessianFloat64(TestHessianFloat32): self.place = fluid.CPUPlace() self.dtype = 'float64' prepare_data(self, all_data_shapes, self.dtype) - self.eps = config.TOLERANCE.get( - self.dtype).get('second_order_grad').get('eps') - self.rtol = config.TOLERANCE.get( - self.dtype).get('second_order_grad').get('rtol') - self.atol = config.TOLERANCE.get( - self.dtype).get('second_order_grad').get('atol') + self.eps = ( + config.TOLERANCE.get(self.dtype).get('second_order_grad').get('eps') + ) + self.rtol = ( + config.TOLERANCE.get(self.dtype) + .get('second_order_grad') + .get('rtol') + ) + self.atol = ( + config.TOLERANCE.get(self.dtype) + .get('second_order_grad') + .get('atol') + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/autograd/test_jvp_and_transpose.py b/python/paddle/fluid/tests/unittests/autograd/test_jvp_and_transpose.py index f345f583945752afdd7a4d3f0c6674708048f1b6..e90d6871c300e7af4acc659d6195809c982419b6 100644 --- a/python/paddle/fluid/tests/unittests/autograd/test_jvp_and_transpose.py +++ b/python/paddle/fluid/tests/unittests/autograd/test_jvp_and_transpose.py @@ -24,14 +24,14 @@ paddle.enable_static() ############################ Test linearize rules ############################ class TestAddPJVPAndTranspose(unittest.TestCase): - def setUp(self): self.main_program = paddle.static.Program() self.startup_program = paddle.static.Program() self.layer_help = LayerHelper('TestPrim2Orig') - with paddle.static.program_guard(self.main_program, - self.startup_program): + with paddle.static.program_guard( + self.main_program, self.startup_program + ): self.init_data() def init_data(self): @@ -41,8 +41,9 @@ class TestAddPJVPAndTranspose(unittest.TestCase): Y = paddle.static.data(name='Y', shape=[2, 2], dtype='float') self.prim_input = {'X': X, 'Y': Y} self.prim_output = { - 'Z': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Z': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.prim_attrs = {} @@ -67,12 +68,15 @@ class TestAddPJVPAndTranspose(unittest.TestCase): ] def test_op(self): - with paddle.static.program_guard(self.main_program, - self.startup_program): - op = self.layer_help.append_op(type=self.op_type, - inputs=self.prim_input, - outputs=self.prim_output, - attrs=self.prim_attrs) + with paddle.static.program_guard( + self.main_program, self.startup_program + ): + op = self.layer_help.append_op( + type=self.op_type, + inputs=self.prim_input, + outputs=self.prim_output, + attrs=self.prim_attrs, + ) jvp_out = _jvp(op, *self.jvp_args) jvp_out = flatten(jvp_out) @@ -91,7 +95,6 @@ class TestAddPJVPAndTranspose(unittest.TestCase): class TestSubPJVPAndTranspose(TestAddPJVPAndTranspose): - def init_data(self): # Set prim op self.op_type = 'sub_p' @@ -99,8 +102,9 @@ class TestSubPJVPAndTranspose(TestAddPJVPAndTranspose): Y = paddle.static.data(name='Y', shape=[5, 6], dtype='int64') self.prim_input = {'X': X, 'Y': Y} self.prim_output = { - 'Z': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Z': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.prim_attrs = {} @@ -123,12 +127,11 @@ class TestSubPJVPAndTranspose(TestAddPJVPAndTranspose): 'sub_p', # transpose op: 'fill_constant_p', - 'sub_p' + 'sub_p', ] class TestMulPJVPAndTranspose(TestAddPJVPAndTranspose): - def init_data(self): # Set prim op self.op_type = 'mul_p' @@ -136,8 +139,9 @@ class TestMulPJVPAndTranspose(TestAddPJVPAndTranspose): Y = paddle.static.data(name='Y', shape=[5, 6], dtype='int64') self.prim_input = {'X': X, 'Y': Y} self.prim_output = { - 'Z': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Z': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.prim_attrs = {} @@ -163,12 +167,11 @@ class TestMulPJVPAndTranspose(TestAddPJVPAndTranspose): 'mul_p', 'add_p', # transpose op: - 'mul_p' + 'mul_p', ] class TestDivPJVPAndTranspose(TestAddPJVPAndTranspose): - def init_data(self): # Set prim op self.op_type = 'div_p' @@ -176,8 +179,9 @@ class TestDivPJVPAndTranspose(TestAddPJVPAndTranspose): Y = paddle.static.data(name='Y', shape=[5, 6], dtype='int64') self.prim_input = {'X': X, 'Y': Y} self.prim_output = { - 'Z': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Z': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.prim_attrs = {} @@ -205,12 +209,11 @@ class TestDivPJVPAndTranspose(TestAddPJVPAndTranspose): 'mul_p', 'sub_p', # transpose op: - 'div_p' + 'div_p', ] class TestSqrtPJVPAndTranspose(TestAddPJVPAndTranspose): - def init_data(self): # Set prim op self.op_type = 'sqrt_p' @@ -219,14 +222,15 @@ class TestSqrtPJVPAndTranspose(TestAddPJVPAndTranspose): 'X': X, } self.prim_output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.prim_attrs = {} # Set JVP X_DOT = paddle.static.data(name='X_DOT', shape=[5, 6], dtype='int64') - self.jvp_args = (X_DOT, ) + self.jvp_args = (X_DOT,) self.jvp_out_shape_map = {0: self.prim_output['Y']} self.all_ops = [ @@ -242,7 +246,6 @@ class TestSqrtPJVPAndTranspose(TestAddPJVPAndTranspose): class TestRSqrtPJVPAndTranspose(TestAddPJVPAndTranspose): - def init_data(self): # Set prim op self.op_type = 'rsqrt_p' @@ -251,14 +254,15 @@ class TestRSqrtPJVPAndTranspose(TestAddPJVPAndTranspose): 'X': X, } self.prim_output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.prim_attrs = {} # Set JVP X_DOT = paddle.static.data(name='X_DOT', shape=[5, 6], dtype='int64') - self.jvp_args = (X_DOT, ) + self.jvp_args = (X_DOT,) self.jvp_out_shape_map = {0: self.prim_output['Y']} self.all_ops = [ @@ -275,7 +279,6 @@ class TestRSqrtPJVPAndTranspose(TestAddPJVPAndTranspose): class TestTanhPJVPAndTranspose(TestAddPJVPAndTranspose): - def init_data(self): # Set prim op self.op_type = 'tanh_p' @@ -284,14 +287,15 @@ class TestTanhPJVPAndTranspose(TestAddPJVPAndTranspose): 'X': X, } self.prim_output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.prim_attrs = {} # Set JVP X_DOT = paddle.static.data(name='X_DOT', shape=[5, 6], dtype='int64') - self.jvp_args = (X_DOT, ) + self.jvp_args = (X_DOT,) self.jvp_out_shape_map = {0: self.prim_output['Y']} self.all_ops = [ @@ -307,7 +311,6 @@ class TestTanhPJVPAndTranspose(TestAddPJVPAndTranspose): class TestSinPJVPAndTranspose(TestAddPJVPAndTranspose): - def init_data(self): # Set prim op self.op_type = 'sin_p' @@ -316,14 +319,15 @@ class TestSinPJVPAndTranspose(TestAddPJVPAndTranspose): 'X': X, } self.prim_output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.prim_attrs = {} # Set JVP X_DOT = paddle.static.data(name='X_DOT', shape=[5, 6], dtype='int64') - self.jvp_args = (X_DOT, ) + self.jvp_args = (X_DOT,) self.jvp_out_shape_map = {0: self.prim_output['Y']} self.all_ops = [ @@ -337,7 +341,6 @@ class TestSinPJVPAndTranspose(TestAddPJVPAndTranspose): class TestCosPJVPAndTranspose(TestAddPJVPAndTranspose): - def init_data(self): # Set prim op self.op_type = 'cos_p' @@ -346,14 +349,15 @@ class TestCosPJVPAndTranspose(TestAddPJVPAndTranspose): 'X': X, } self.prim_output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.prim_attrs = {} # Set JVP X_DOT = paddle.static.data(name='X_DOT', shape=[5, 6], dtype='int64') - self.jvp_args = (X_DOT, ) + self.jvp_args = (X_DOT,) self.jvp_out_shape_map = {0: self.prim_output['Y']} self.all_ops = [ @@ -369,7 +373,6 @@ class TestCosPJVPAndTranspose(TestAddPJVPAndTranspose): class TestExpPJVPAndTranspose(TestAddPJVPAndTranspose): - def init_data(self): # Set prim op self.op_type = 'exp_p' @@ -378,14 +381,15 @@ class TestExpPJVPAndTranspose(TestAddPJVPAndTranspose): 'X': X, } self.prim_output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.prim_attrs = {} # Set JVP X_DOT = paddle.static.data(name='X_DOT', shape=[5, 6], dtype='int64') - self.jvp_args = (X_DOT, ) + self.jvp_args = (X_DOT,) self.jvp_out_shape_map = {0: self.prim_output['Y']} self.all_ops = [ @@ -398,7 +402,6 @@ class TestExpPJVPAndTranspose(TestAddPJVPAndTranspose): class TestErfPJVPAndTranspose(TestAddPJVPAndTranspose): - def init_data(self): # Set prim op self.op_type = 'erf_p' @@ -407,14 +410,15 @@ class TestErfPJVPAndTranspose(TestAddPJVPAndTranspose): 'X': X, } self.prim_output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.prim_attrs = {} # Set JVP X_DOT = paddle.static.data(name='X_DOT', shape=[5, 6], dtype='int64') - self.jvp_args = (X_DOT, ) + self.jvp_args = (X_DOT,) self.jvp_out_shape_map = {0: self.prim_output['Y']} self.all_ops = [ @@ -434,7 +438,6 @@ class TestErfPJVPAndTranspose(TestAddPJVPAndTranspose): class TestAbsPJVPAndTranspose(TestAddPJVPAndTranspose): - def init_data(self): # Set prim op self.op_type = 'abs_p' @@ -443,14 +446,15 @@ class TestAbsPJVPAndTranspose(TestAddPJVPAndTranspose): 'X': X, } self.prim_output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.prim_attrs = {} # Set JVP X_DOT = paddle.static.data(name='X_DOT', shape=[5, 6], dtype='int64') - self.jvp_args = (X_DOT, ) + self.jvp_args = (X_DOT,) self.jvp_out_shape_map = {0: self.prim_output['Y']} self.all_ops = [ @@ -467,7 +471,6 @@ class TestAbsPJVPAndTranspose(TestAddPJVPAndTranspose): class TestCastPJVPAndTranspose(TestAddPJVPAndTranspose): - def init_data(self): # Set prim op self.op_type = 'cast_p' @@ -476,14 +479,15 @@ class TestCastPJVPAndTranspose(TestAddPJVPAndTranspose): 'X': X, } self.prim_output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.prim_attrs = {'dtype': paddle.float64} # Set JVP X_DOT = paddle.static.data(name='X_DOT', shape=[5, 6], dtype='int64') - self.jvp_args = (X_DOT, ) + self.jvp_args = (X_DOT,) self.jvp_out_shape_map = {0: self.prim_output['Y']} # Set transpose @@ -498,12 +502,11 @@ class TestCastPJVPAndTranspose(TestAddPJVPAndTranspose): # jvp op: 'cast_p', # transpose op: - 'cast_p' + 'cast_p', ] class TestLogPJVPAndTranspose(TestAddPJVPAndTranspose): - def init_data(self): # Set prim op self.op_type = 'log_p' @@ -512,14 +515,15 @@ class TestLogPJVPAndTranspose(TestAddPJVPAndTranspose): 'X': X, } self.prim_output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.prim_attrs = {} # Set JVP X_DOT = paddle.static.data(name='X_DOT', shape=[5, 6], dtype='int64') - self.jvp_args = (X_DOT, ) + self.jvp_args = (X_DOT,) self.jvp_out_shape_map = {0: self.prim_output['Y']} self.all_ops = [ @@ -532,7 +536,6 @@ class TestLogPJVPAndTranspose(TestAddPJVPAndTranspose): class TestReshapePJVPAndTranspose(TestAddPJVPAndTranspose): - def init_data(self): # Set prim op self.op_type = 'reshape_p' @@ -541,14 +544,15 @@ class TestReshapePJVPAndTranspose(TestAddPJVPAndTranspose): 'X': X, } self.prim_output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.prim_attrs = {'shape': [2, 32]} # Set JVP X_DOT = paddle.static.data(name='X_DOT', shape=[8, 8], dtype='int64') - self.jvp_args = (X_DOT, ) + self.jvp_args = (X_DOT,) self.jvp_out_shape_map = {0: self.prim_output['Y']} # Set transpose @@ -570,7 +574,6 @@ class TestReshapePJVPAndTranspose(TestAddPJVPAndTranspose): class TestBroadcastPJVPAndTranspose(TestAddPJVPAndTranspose): - def init_data(self): # Set prim op self.op_type = 'broadcast_p' @@ -579,21 +582,22 @@ class TestBroadcastPJVPAndTranspose(TestAddPJVPAndTranspose): 'X': X, } self.prim_output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.prim_attrs = {'shape': [2, 10, 7]} # Set JVP X_DOT = paddle.static.data(name='X_DOT', shape=[10, 7], dtype='int64') - self.jvp_args = (X_DOT, ) + self.jvp_args = (X_DOT,) self.jvp_out_shape_map = {0: self.prim_output['Y']} # Set transpose check_dot = lambda v: v is X - Y_BAR = paddle.static.data(name='Y_BAR', - shape=[2, 10, 7], - dtype='int64') + Y_BAR = paddle.static.data( + name='Y_BAR', shape=[2, 10, 7], dtype='int64' + ) self.transpose_args = (check_dot, Y_BAR) self.transpose_out_shape_map = { 0: X, @@ -606,12 +610,11 @@ class TestBroadcastPJVPAndTranspose(TestAddPJVPAndTranspose): 'broadcast_p', # transpose op: 'reduce_sum_p', - 'reshape_p' + 'reshape_p', ] class TestTransposePJVPAndTranspose(TestAddPJVPAndTranspose): - def init_data(self): # Set prim op self.op_type = 'transpose_p' @@ -620,23 +623,24 @@ class TestTransposePJVPAndTranspose(TestAddPJVPAndTranspose): 'X': X, } self.prim_output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.prim_attrs = {'axis': [0, 2, 3, 1]} # Set JVP - X_DOT = paddle.static.data(name='X_DOT', - shape=[2, 3, 4, 5], - dtype='int64') - self.jvp_args = (X_DOT, ) + X_DOT = paddle.static.data( + name='X_DOT', shape=[2, 3, 4, 5], dtype='int64' + ) + self.jvp_args = (X_DOT,) self.jvp_out_shape_map = {0: self.prim_output['Y']} # Set transpose check_dot = lambda v: v is X - Y_BAR = paddle.static.data(name='Y_BAR', - shape=[2, 4, 5, 3], - dtype='int64') + Y_BAR = paddle.static.data( + name='Y_BAR', shape=[2, 4, 5, 3], dtype='int64' + ) self.transpose_args = (check_dot, Y_BAR) self.transpose_out_shape_map = { 0: X, @@ -653,7 +657,6 @@ class TestTransposePJVPAndTranspose(TestAddPJVPAndTranspose): class TestSplitPJVPAndTranspose(TestAddPJVPAndTranspose): - def init_data(self): # Set prim op self.op_type = 'split_p' @@ -664,16 +667,18 @@ class TestSplitPJVPAndTranspose(TestAddPJVPAndTranspose): self.prim_output = { 'YS': [ self.layer_help.create_variable_for_type_inference( - dtype=X.dtype) for i in range(4) + dtype=X.dtype + ) + for i in range(4) ] } self.prim_attrs = {'num_or_sections': [2, 3, 4, 1], 'axis': 2} # Set JVP - X_DOT = paddle.static.data(name='X_DOT', - shape=[2, 7, 10], - dtype='int64') - self.jvp_args = (X_DOT, ) + X_DOT = paddle.static.data( + name='X_DOT', shape=[2, 7, 10], dtype='int64' + ) + self.jvp_args = (X_DOT,) self.jvp_out_shape_map = { 0: self.prim_output['YS'][0], 1: self.prim_output['YS'][1], @@ -705,7 +710,6 @@ class TestSplitPJVPAndTranspose(TestAddPJVPAndTranspose): class TestConcatPJVPAndTranspose(TestAddPJVPAndTranspose): - def init_data(self): # Set prim op self.op_type = 'concat_p' @@ -716,8 +720,9 @@ class TestConcatPJVPAndTranspose(TestAddPJVPAndTranspose): 'XS': [X, Y, Z], } self.prim_output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.prim_attrs = {'axis': 1} @@ -727,14 +732,14 @@ class TestConcatPJVPAndTranspose(TestAddPJVPAndTranspose): paddle.static.data(name='X_DOT2', shape=[3, 2, 5], dtype='float64'), paddle.static.data(name='X_DOT3', shape=[3, 3, 5], dtype='float64'), ] - self.jvp_args = (XS_DOT, ) + self.jvp_args = (XS_DOT,) self.jvp_out_shape_map = {0: self.prim_output['Y']} # Set transpose check_dot = lambda v: v is X or v is Y or v is Z - Y_BAR = paddle.static.data(name='Y_BAR', - shape=[3, 14, 5], - dtype='float64') + Y_BAR = paddle.static.data( + name='Y_BAR', shape=[3, 14, 5], dtype='float64' + ) self.transpose_args = (check_dot, Y_BAR) self.transpose_out_shape_map = { 0: X, @@ -753,30 +758,30 @@ class TestConcatPJVPAndTranspose(TestAddPJVPAndTranspose): class TestReduceSumPJVPAndTranspose(TestAddPJVPAndTranspose): - def init_data(self): # Set prim op self.op_type = 'reduce_sum_p' X = paddle.static.data(name='X', shape=[2, 3, 4, 5], dtype='float64') self.prim_input = {'X': X} self.prim_output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.prim_attrs = {'axis': [2], 'keepdim': False} # Set JVP - X_DOT = paddle.static.data(name='X_DOT1', - shape=[2, 3, 4, 5], - dtype='float64') - self.jvp_args = (X_DOT, ) + X_DOT = paddle.static.data( + name='X_DOT1', shape=[2, 3, 4, 5], dtype='float64' + ) + self.jvp_args = (X_DOT,) self.jvp_out_shape_map = {0: self.prim_output['Y']} # Set transpose check_dot = lambda v: v is X - Y_BAR = paddle.static.data(name='Y_BAR', - shape=[2, 3, 5], - dtype='float64') + Y_BAR = paddle.static.data( + name='Y_BAR', shape=[2, 3, 5], dtype='float64' + ) self.transpose_args = (check_dot, Y_BAR) self.transpose_out_shape_map = { 0: X, @@ -794,7 +799,6 @@ class TestReduceSumPJVPAndTranspose(TestAddPJVPAndTranspose): class TestMatmulPJVPAndTranspose(TestAddPJVPAndTranspose): - def init_data(self): # Set prim op self.op_type = 'matmul_p' @@ -802,8 +806,9 @@ class TestMatmulPJVPAndTranspose(TestAddPJVPAndTranspose): Y = paddle.static.data(name='Y', shape=[3, 4], dtype='float64') self.prim_input = {'X': X, 'Y': Y} self.prim_output = { - 'Z': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Z': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.prim_attrs = {} @@ -835,7 +840,6 @@ class TestMatmulPJVPAndTranspose(TestAddPJVPAndTranspose): class TestSliceSelectPJVPAndTranspose(TestAddPJVPAndTranspose): - def init_data(self): # Set prim op self.op_type = 'slice_select_p' @@ -844,19 +848,20 @@ class TestSliceSelectPJVPAndTranspose(TestAddPJVPAndTranspose): 'X': X, } self.prim_output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.prim_attrs = { 'axis': [1], 'starts': [0], 'ends': [20], - 'strides': [2] + 'strides': [2], } # Set JVP X_DOT = paddle.static.data(name='X_DOT', shape=[3, 20], dtype='float64') - self.jvp_args = (X_DOT, ) + self.jvp_args = (X_DOT,) self.jvp_out_shape_map = {0: self.prim_output['Y']} # Set transpose @@ -879,7 +884,6 @@ class TestSliceSelectPJVPAndTranspose(TestAddPJVPAndTranspose): class TestSliceAssignPJVPAndTranspose(TestAddPJVPAndTranspose): - def init_data(self): # Set prim op self.op_type = 'slice_assign_p' @@ -887,14 +891,15 @@ class TestSliceAssignPJVPAndTranspose(TestAddPJVPAndTranspose): Y = paddle.static.data(name='Y', shape=[3, 5], dtype='float64') self.prim_input = {'X': X, 'Y': Y} self.prim_output = { - 'Z': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Z': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.prim_attrs = { 'axis': [1], 'starts': [0], 'ends': [10], - 'strides': [2] + 'strides': [2], } # Set JVP @@ -917,23 +922,23 @@ class TestSliceAssignPJVPAndTranspose(TestAddPJVPAndTranspose): # transpose op: 'slice_assign_p', 'slice_select_p', - 'fill_constant_p' + 'fill_constant_p', ] class TestGatherPJVPAndTranspose(TestAddPJVPAndTranspose): - def init_data(self): # Set prim op self.op_type = 'gather_p' X = paddle.static.data(name='X', shape=[9, 5], dtype='float64') - IndexTensor = paddle.static.data(name='IndexTensor', - shape=[3], - dtype='int32') + IndexTensor = paddle.static.data( + name='IndexTensor', shape=[3], dtype='int32' + ) self.prim_input = {'X': X, 'IndexTensor': IndexTensor} self.prim_output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.prim_attrs = {'axis': 1} @@ -965,19 +970,19 @@ class TestGatherPJVPAndTranspose(TestAddPJVPAndTranspose): class TestScatterAddPJVPAndTranspose(TestAddPJVPAndTranspose): - def init_data(self): # Set prim op self.op_type = 'scatter_add_p' X = paddle.static.data(name='X', shape=[9, 5], dtype='float64') Y = paddle.static.data(name='Y', shape=[9, 3], dtype='float64') - IndexTensor = paddle.static.data(name='IndexTensor', - shape=[3], - dtype='int32') + IndexTensor = paddle.static.data( + name='IndexTensor', shape=[3], dtype='int32' + ) self.prim_input = {'X': X, 'Y': Y, 'IndexTensor': IndexTensor} self.prim_output = { - 'Z': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Z': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.prim_attrs = {'axis': 1} @@ -1001,12 +1006,11 @@ class TestScatterAddPJVPAndTranspose(TestAddPJVPAndTranspose): # transpose op: 'scatter_add_p', 'fill_constant_p', - 'gather_p' + 'gather_p', ] class TestSelectPJVPAndTranspose(TestAddPJVPAndTranspose): - def init_data(self): # Set prim op self.op_type = 'select_p' @@ -1016,15 +1020,16 @@ class TestSelectPJVPAndTranspose(TestAddPJVPAndTranspose): self.prim_input = {'Condition': Cond, 'X': X, 'Y': Y} self.prim_output = { - 'Z': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Z': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.prim_attrs = {} # Set JVP - Cond_DOT = paddle.static.data(name='Cond_DOT', - shape=[9, 5], - dtype='float64') + Cond_DOT = paddle.static.data( + name='Cond_DOT', shape=[9, 5], dtype='float64' + ) X_DOT = paddle.static.data(name='X_DOT', shape=[9, 5], dtype='float64') Y_DOT = paddle.static.data(name='Y_DOT', shape=[9, 5], dtype='float64') self.jvp_args = (Cond_DOT, X_DOT, Y_DOT) @@ -1051,7 +1056,6 @@ class TestSelectPJVPAndTranspose(TestAddPJVPAndTranspose): class TestEqPJVPAndTranspose(TestAddPJVPAndTranspose): - def init_data(self): # Set prim op self.op_type = 'eq_p' @@ -1060,8 +1064,9 @@ class TestEqPJVPAndTranspose(TestAddPJVPAndTranspose): self.prim_input = {'X': X, 'Y': Y} self.prim_output = { - 'Z': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Z': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.prim_attrs = {} @@ -1081,7 +1086,6 @@ class TestEqPJVPAndTranspose(TestAddPJVPAndTranspose): class TestGtPJVPAndTranspose(TestAddPJVPAndTranspose): - def init_data(self): # Set prim op self.op_type = 'gt_p' @@ -1090,8 +1094,9 @@ class TestGtPJVPAndTranspose(TestAddPJVPAndTranspose): self.prim_input = {'X': X, 'Y': Y} self.prim_output = { - 'Z': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Z': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.prim_attrs = {} @@ -1111,7 +1116,6 @@ class TestGtPJVPAndTranspose(TestAddPJVPAndTranspose): class TestGePJVPAndTranspose(TestAddPJVPAndTranspose): - def init_data(self): # Set prim op self.op_type = 'ge_p' @@ -1120,8 +1124,9 @@ class TestGePJVPAndTranspose(TestAddPJVPAndTranspose): self.prim_input = {'X': X, 'Y': Y} self.prim_output = { - 'Z': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Z': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.prim_attrs = {} @@ -1141,7 +1146,6 @@ class TestGePJVPAndTranspose(TestAddPJVPAndTranspose): class TestNePJVPAndTranspose(TestAddPJVPAndTranspose): - def init_data(self): # Set prim op self.op_type = 'ne_p' @@ -1150,8 +1154,9 @@ class TestNePJVPAndTranspose(TestAddPJVPAndTranspose): self.prim_input = {'X': X, 'Y': Y} self.prim_output = { - 'Z': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Z': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.prim_attrs = {} @@ -1171,7 +1176,6 @@ class TestNePJVPAndTranspose(TestAddPJVPAndTranspose): class TestPowPJVPAndTranspose(TestAddPJVPAndTranspose): - def init_data(self): # Set prim op self.op_type = 'pow_p' @@ -1179,8 +1183,9 @@ class TestPowPJVPAndTranspose(TestAddPJVPAndTranspose): Y = paddle.static.data(name='Y', shape=[5, 6], dtype='float32') self.prim_input = {'X': X, 'Y': Y} self.prim_output = { - 'Z': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Z': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.prim_attrs = {} @@ -1211,7 +1216,6 @@ class TestPowPJVPAndTranspose(TestAddPJVPAndTranspose): class TestMaxPJVPAndTranspose(TestAddPJVPAndTranspose): - def init_data(self): # Set prim op self.op_type = 'max_p' @@ -1219,8 +1223,9 @@ class TestMaxPJVPAndTranspose(TestAddPJVPAndTranspose): Y = paddle.static.data(name='Y', shape=[5, 6], dtype='float32') self.prim_input = {'X': X, 'Y': Y} self.prim_output = { - 'Z': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Z': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.prim_attrs = {} diff --git a/python/paddle/fluid/tests/unittests/autograd/test_minimize.py b/python/paddle/fluid/tests/unittests/autograd/test_minimize.py index 10259802c6933392a318df8a7a5f90d1eac1ae2a..e3de0f24e0d522ed85d227aee7e9f16231524408 100644 --- a/python/paddle/fluid/tests/unittests/autograd/test_minimize.py +++ b/python/paddle/fluid/tests/unittests/autograd/test_minimize.py @@ -17,14 +17,16 @@ import unittest import numpy as np import paddle from paddle.incubate.autograd.primx import prim2orig -from paddle.incubate.autograd.utils import (disable_prim, enable_prim, - prim_enabled) +from paddle.incubate.autograd.utils import ( + disable_prim, + enable_prim, + prim_enabled, +) paddle.enable_static() class TestMinimize(unittest.TestCase): - def model(self, x, w, bias, opt): paddle.seed(0) place = paddle.CPUPlace() @@ -36,12 +38,12 @@ class TestMinimize(unittest.TestCase): with paddle.static.program_guard(main, startup): input_x = paddle.static.data('x', x.shape, dtype=x.dtype) input_x.stop_gradient = False - params_w = paddle.static.create_parameter(shape=w.shape, - dtype=w.dtype, - is_bias=False) - params_bias = paddle.static.create_parameter(shape=bias.shape, - dtype=bias.dtype, - is_bias=True) + params_w = paddle.static.create_parameter( + shape=w.shape, dtype=w.dtype, is_bias=False + ) + params_bias = paddle.static.create_parameter( + shape=bias.shape, dtype=bias.dtype, is_bias=True + ) y = paddle.tanh(paddle.matmul(input_x, params_w) + params_bias) loss = paddle.norm(y, p=2) opt = opt @@ -49,13 +51,9 @@ class TestMinimize(unittest.TestCase): if prim_enabled(): prim2orig(main.block(0)) exe.run(startup) - grads = exe.run(main, - feed={ - 'x': x, - 'w': w, - 'bias': bias - }, - fetch_list=grads) + grads = exe.run( + main, feed={'x': x, 'w': w, 'bias': bias}, fetch_list=grads + ) return grads def test_adam(self): diff --git a/python/paddle/fluid/tests/unittests/autograd/test_orig2prim.py b/python/paddle/fluid/tests/unittests/autograd/test_orig2prim.py index 914ea38fa9ad79251f5814d70a51715545f9c407..69161d89a1162f46adb1b6b6aabb6c2c2a9187d8 100644 --- a/python/paddle/fluid/tests/unittests/autograd/test_orig2prim.py +++ b/python/paddle/fluid/tests/unittests/autograd/test_orig2prim.py @@ -24,14 +24,14 @@ paddle.enable_static() ############################ Test orig2prim rules ############################ class TestElementWiseAddOrig2Prim(unittest.TestCase): - def setUp(self): self.main_program = paddle.static.Program() self.startup_program = paddle.static.Program() self.layer_help = LayerHelper('TestOrig2Prim') - with paddle.static.program_guard(self.main_program, - self.startup_program): + with paddle.static.program_guard( + self.main_program, self.startup_program + ): self.init_data() def init_data(self): @@ -41,8 +41,9 @@ class TestElementWiseAddOrig2Prim(unittest.TestCase): self.input = {'X': X, 'Y': Y} self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} @@ -52,12 +53,15 @@ class TestElementWiseAddOrig2Prim(unittest.TestCase): self.out_map = {0: self.output['Out']} def test_op(self): - with paddle.static.program_guard(self.main_program, - self.startup_program): - op = self.layer_help.append_op(type=self.op_type, - inputs=self.input, - outputs=self.output, - attrs=self.attrs) + with paddle.static.program_guard( + self.main_program, self.startup_program + ): + op = self.layer_help.append_op( + type=self.op_type, + inputs=self.input, + outputs=self.output, + attrs=self.attrs, + ) prim_out = _orig2prim(op, *self.orig2prim_args) all_ops = [op.type for op in self.main_program.block(0).ops] @@ -69,7 +73,6 @@ class TestElementWiseAddOrig2Prim(unittest.TestCase): class TestSqrtOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'sqrt' X = paddle.static.data(name='X', shape=[7, 8], dtype='float64') @@ -78,19 +81,19 @@ class TestSqrtOrig2Prim(TestElementWiseAddOrig2Prim): 'X': X, } self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} - self.orig2prim_args = (X, ) + self.orig2prim_args = (X,) self.all_ops = ['sqrt', 'sqrt_p'] # { prim_op_output_index: orig_op_output_var } self.out_map = {0: self.output['Out']} class TestElementWiseMulOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'elementwise_mul' X = paddle.static.data(name='X', shape=[8, 8], dtype='float') @@ -98,8 +101,9 @@ class TestElementWiseMulOrig2Prim(TestElementWiseAddOrig2Prim): self.input = {'X': X, 'Y': Y} self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} @@ -110,7 +114,6 @@ class TestElementWiseMulOrig2Prim(TestElementWiseAddOrig2Prim): class TestElementWiseDivOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'elementwise_div' X = paddle.static.data(name='X', shape=[8, 8], dtype='float') @@ -118,8 +121,9 @@ class TestElementWiseDivOrig2Prim(TestElementWiseAddOrig2Prim): self.input = {'X': X, 'Y': Y} self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} @@ -130,7 +134,6 @@ class TestElementWiseDivOrig2Prim(TestElementWiseAddOrig2Prim): class TestMatmulV2Orig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'matmul_v2' X = paddle.static.data(name='X', shape=[3, 4], dtype='float') @@ -138,8 +141,9 @@ class TestMatmulV2Orig2Prim(TestElementWiseAddOrig2Prim): self.input = {'X': X, 'Y': Y} self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {'trans_x': True, 'trans_y': True} @@ -149,7 +153,6 @@ class TestMatmulV2Orig2Prim(TestElementWiseAddOrig2Prim): class TestTanhOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'tanh' X = paddle.static.data(name='X', shape=[3, 4], dtype='float') @@ -158,18 +161,18 @@ class TestTanhOrig2Prim(TestElementWiseAddOrig2Prim): 'X': X, } self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} - self.orig2prim_args = (X, ) + self.orig2prim_args = (X,) self.all_ops = ['tanh', 'tanh_p'] self.out_map = {0: self.output['Out']} class TestSinOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'sin' X = paddle.static.data(name='X', shape=[3, 4], dtype='float') @@ -178,18 +181,18 @@ class TestSinOrig2Prim(TestElementWiseAddOrig2Prim): 'X': X, } self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} - self.orig2prim_args = (X, ) + self.orig2prim_args = (X,) self.all_ops = ['sin', 'sin_p'] self.out_map = {0: self.output['Out']} class TestCosOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'cos' X = paddle.static.data(name='X', shape=[3, 4], dtype='float') @@ -198,18 +201,18 @@ class TestCosOrig2Prim(TestElementWiseAddOrig2Prim): 'X': X, } self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} - self.orig2prim_args = (X, ) + self.orig2prim_args = (X,) self.all_ops = ['cos', 'cos_p'] self.out_map = {0: self.output['Out']} class TestExpOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'exp' X = paddle.static.data(name='X', shape=[3, 4], dtype='float') @@ -218,18 +221,18 @@ class TestExpOrig2Prim(TestElementWiseAddOrig2Prim): 'X': X, } self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} - self.orig2prim_args = (X, ) + self.orig2prim_args = (X,) self.all_ops = ['exp', 'exp_p'] self.out_map = {0: self.output['Out']} class TestErfOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'erf' X = paddle.static.data(name='X', shape=[3, 4], dtype='float') @@ -238,18 +241,18 @@ class TestErfOrig2Prim(TestElementWiseAddOrig2Prim): 'X': X, } self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} - self.orig2prim_args = (X, ) + self.orig2prim_args = (X,) self.all_ops = ['erf', 'erf_p'] self.out_map = {0: self.output['Out']} class TestAbsOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'abs' X = paddle.static.data(name='X', shape=[3, 4], dtype='float') @@ -258,18 +261,18 @@ class TestAbsOrig2Prim(TestElementWiseAddOrig2Prim): 'X': X, } self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} - self.orig2prim_args = (X, ) + self.orig2prim_args = (X,) self.all_ops = ['abs', 'abs_p'] self.out_map = {0: self.output['Out']} class TestLogOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'log' X = paddle.static.data(name='X', shape=[3, 4], dtype='float') @@ -278,18 +281,18 @@ class TestLogOrig2Prim(TestElementWiseAddOrig2Prim): 'X': X, } self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} - self.orig2prim_args = (X, ) + self.orig2prim_args = (X,) self.all_ops = ['log', 'log_p'] self.out_map = {0: self.output['Out']} class TestReshape2Orig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'reshape2' X = paddle.static.data(name='X', shape=[5, 6], dtype='int64') @@ -298,10 +301,10 @@ class TestReshape2Orig2Prim(TestElementWiseAddOrig2Prim): 'X': X, } self.output = { - 'Out': - X, - 'XShape': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': X, + 'XShape': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ), } self.attrs = {'shape': [6, 5]} @@ -316,7 +319,6 @@ class TestReshape2Orig2Prim(TestElementWiseAddOrig2Prim): class TestConcatOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'concat' X = paddle.static.data(name='X', shape=[5, 6], dtype='int64') @@ -326,8 +328,9 @@ class TestConcatOrig2Prim(TestElementWiseAddOrig2Prim): 'X': [X, Y], } self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {'axis': 0} @@ -340,7 +343,6 @@ class TestConcatOrig2Prim(TestElementWiseAddOrig2Prim): class TestSliceOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'slice' X = paddle.static.data(name='X', shape=[5, 6], dtype='int64') @@ -349,8 +351,9 @@ class TestSliceOrig2Prim(TestElementWiseAddOrig2Prim): 'Input': X, } self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = { 'axes': [0], @@ -364,7 +367,6 @@ class TestSliceOrig2Prim(TestElementWiseAddOrig2Prim): class TestFillZerosLikeOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'fill_zeros_like' X = paddle.static.data(name='X', shape=[5, 6], dtype='int64') @@ -373,18 +375,18 @@ class TestFillZerosLikeOrig2Prim(TestElementWiseAddOrig2Prim): 'X': X, } self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} - self.orig2prim_args = (X, ) + self.orig2prim_args = (X,) self.all_ops = ['fill_zeros_like', 'fill_constant_p'] self.out_map = {0: self.output['Out']} class TestFillAnyLikeOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'fill_any_like' X = paddle.static.data(name='X', shape=[5, 6], dtype='int64') @@ -393,18 +395,18 @@ class TestFillAnyLikeOrig2Prim(TestElementWiseAddOrig2Prim): 'X': X, } self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} - self.orig2prim_args = (X, ) + self.orig2prim_args = (X,) self.all_ops = ['fill_any_like', 'fill_constant_p'] self.out_map = {0: self.output['Out']} class TestFillAnyLikeOrig2Prim2(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'fill_any_like' X = paddle.static.data(name='X', shape=[5, 6], dtype='int64') @@ -413,18 +415,18 @@ class TestFillAnyLikeOrig2Prim2(TestElementWiseAddOrig2Prim): 'X': X, } self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {'dtype': paddle.float32, 'value': 5} - self.orig2prim_args = (X, ) + self.orig2prim_args = (X,) self.all_ops = ['fill_any_like', 'fill_constant_p'] self.out_map = {0: self.output['Out']} class TestSumOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'sum' X = paddle.static.data(name='X', shape=[5, 6], dtype='int64') @@ -432,18 +434,18 @@ class TestSumOrig2Prim(TestElementWiseAddOrig2Prim): self.input = {'X': X, 'Y': Y} self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} - self.orig2prim_args = ((X, Y), ) + self.orig2prim_args = ((X, Y),) self.all_ops = ['sum', 'add_p'] self.out_map = {0: self.output['Out']} class TestPNormOrig2Prim1(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'p_norm' X = paddle.static.data(name='X', shape=[5, 6], dtype='int64') @@ -452,21 +454,21 @@ class TestPNormOrig2Prim1(TestElementWiseAddOrig2Prim): 'X': X, } self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = { 'porder': 1, 'asvector': True, } - self.orig2prim_args = (X, ) + self.orig2prim_args = (X,) self.all_ops = ['p_norm', 'reshape_p', 'abs_p', 'reduce_sum_p'] self.out_map = {0: self.output['Out']} class TestPNormOrig2Prim2(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'p_norm' X = paddle.static.data(name='X', shape=[5, 6], dtype='int64') @@ -475,23 +477,27 @@ class TestPNormOrig2Prim2(TestElementWiseAddOrig2Prim): 'X': X, } self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = { 'porder': 2, 'asvector': True, } - self.orig2prim_args = (X, ) + self.orig2prim_args = (X,) self.all_ops = [ - 'p_norm', 'reshape_p', 'sqrt_p', 'reduce_sum_p', 'mul_p' + 'p_norm', + 'reshape_p', + 'sqrt_p', + 'reduce_sum_p', + 'mul_p', ] self.out_map = {0: self.output['Out']} class TestIndexSelectOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'index_select' X = paddle.static.data(name='X', shape=[5, 6], dtype='int64') @@ -499,8 +505,9 @@ class TestIndexSelectOrig2Prim(TestElementWiseAddOrig2Prim): self.input = {'X': X, 'Index': Index} self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = { 'dim': 0, @@ -515,7 +522,6 @@ class TestIndexSelectOrig2Prim(TestElementWiseAddOrig2Prim): class TestElementwiseSubOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'elementwise_sub' X = paddle.static.data(name='X', shape=[5, 6], dtype='int32') @@ -523,8 +529,9 @@ class TestElementwiseSubOrig2Prim(TestElementWiseAddOrig2Prim): self.input = {'X': X, 'Y': Y} self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = { 'dim': 0, @@ -539,7 +546,6 @@ class TestElementwiseSubOrig2Prim(TestElementWiseAddOrig2Prim): class TestScaleOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'scale' X = paddle.static.data(name='X', shape=[10, 7], dtype='int32') @@ -548,8 +554,9 @@ class TestScaleOrig2Prim(TestElementWiseAddOrig2Prim): 'X': X, } self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {'scale': 2.0, 'bias': 1.0, 'bias_after_scale': True} @@ -558,13 +565,16 @@ class TestScaleOrig2Prim(TestElementWiseAddOrig2Prim): X, ) self.all_ops = [ - 'scale', 'fill_constant_p', 'fill_constant_p', 'mul_p', 'add_p' + 'scale', + 'fill_constant_p', + 'fill_constant_p', + 'mul_p', + 'add_p', ] self.out_map = {0: self.output['Out']} class TestAssignOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'assign' X = paddle.static.data(name='X', shape=[10, 7], dtype='int32') @@ -573,18 +583,18 @@ class TestAssignOrig2Prim(TestElementWiseAddOrig2Prim): 'X': X, } self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} - self.orig2prim_args = (X, ) + self.orig2prim_args = (X,) self.all_ops = ['assign', 'fill_constant_p', 'add_p'] self.out_map = {0: self.output['Out']} class TestWhereOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'where' Cond = paddle.static.data(name='Condition', shape=[5, 6], dtype='bool') @@ -593,8 +603,9 @@ class TestWhereOrig2Prim(TestElementWiseAddOrig2Prim): self.input = {'Condition': Cond, 'X': X, 'Y': Y} self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} self.orig2prim_args = (Cond, X, Y) @@ -603,7 +614,6 @@ class TestWhereOrig2Prim(TestElementWiseAddOrig2Prim): class TestEqualOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'equal' X = paddle.static.data(name='X', shape=[5, 8], dtype='float') @@ -611,8 +621,9 @@ class TestEqualOrig2Prim(TestElementWiseAddOrig2Prim): self.input = {'X': X, 'Y': Y} self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype='bool') + 'Out': self.layer_help.create_variable_for_type_inference( + dtype='bool' + ) } self.attrs = {} self.orig2prim_args = (X, Y) @@ -622,7 +633,6 @@ class TestEqualOrig2Prim(TestElementWiseAddOrig2Prim): class TestNeOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'not_equal' X = paddle.static.data(name='X', shape=[5, 8], dtype='float') @@ -630,8 +640,9 @@ class TestNeOrig2Prim(TestElementWiseAddOrig2Prim): self.input = {'X': X, 'Y': Y} self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype='bool') + 'Out': self.layer_help.create_variable_for_type_inference( + dtype='bool' + ) } self.attrs = {} self.orig2prim_args = (X, Y) @@ -641,7 +652,6 @@ class TestNeOrig2Prim(TestElementWiseAddOrig2Prim): class TestGtOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'greater_than' X = paddle.static.data(name='X', shape=[5, 8], dtype='float') @@ -649,8 +659,9 @@ class TestGtOrig2Prim(TestElementWiseAddOrig2Prim): self.input = {'X': X, 'Y': Y} self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype='bool') + 'Out': self.layer_help.create_variable_for_type_inference( + dtype='bool' + ) } self.attrs = {} self.orig2prim_args = (X, Y) @@ -660,7 +671,6 @@ class TestGtOrig2Prim(TestElementWiseAddOrig2Prim): class TestGeOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'greater_equal' X = paddle.static.data(name='X', shape=[5, 8], dtype='float') @@ -668,8 +678,9 @@ class TestGeOrig2Prim(TestElementWiseAddOrig2Prim): self.input = {'X': X, 'Y': Y} self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype='bool') + 'Out': self.layer_help.create_variable_for_type_inference( + dtype='bool' + ) } self.attrs = {} self.orig2prim_args = (X, Y) @@ -679,7 +690,6 @@ class TestGeOrig2Prim(TestElementWiseAddOrig2Prim): class TestPowOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'elementwise_pow' X = paddle.static.data(name='X', shape=[5, 8], dtype='float') @@ -687,8 +697,9 @@ class TestPowOrig2Prim(TestElementWiseAddOrig2Prim): self.input = {'X': X, 'Y': Y} self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} @@ -699,7 +710,6 @@ class TestPowOrig2Prim(TestElementWiseAddOrig2Prim): class TestMaxOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'elementwise_max' X = paddle.static.data(name='X', shape=[5, 8], dtype='float') @@ -707,8 +717,9 @@ class TestMaxOrig2Prim(TestElementWiseAddOrig2Prim): self.input = {'X': X, 'Y': Y} self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} @@ -719,97 +730,119 @@ class TestMaxOrig2Prim(TestElementWiseAddOrig2Prim): class TestGeluOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'gelu' X = paddle.static.data(name='X', shape=[5, 8], dtype='float') self.input = {'X': X} self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {'approximate': False} - self.orig2prim_args = (X, ) + self.orig2prim_args = (X,) self.all_ops = [ - 'gelu', 'add_p', 'erf_p', 'fill_constant_p', 'fill_constant_p', - 'fill_constant_p', 'mul_p', 'mul_p', 'mul_p' + 'gelu', + 'add_p', + 'erf_p', + 'fill_constant_p', + 'fill_constant_p', + 'fill_constant_p', + 'mul_p', + 'mul_p', + 'mul_p', ] # { prim_op_output_index: orig_op_output_var } self.out_map = {0: self.output['Out']} class TestGeluApproximateOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'gelu' X = paddle.static.data(name='X', shape=[5, 8], dtype='float') self.input = {'X': X} self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {'approximate': True} - self.orig2prim_args = (X, ) + self.orig2prim_args = (X,) self.all_ops = [ - 'add_p', 'add_p', 'fill_constant_p', 'fill_constant_p', - 'fill_constant_p', 'fill_constant_p', 'fill_constant_p', 'gelu', - 'mul_p', 'mul_p', 'mul_p', 'mul_p', 'pow_p', 'tanh_p' + 'add_p', + 'add_p', + 'fill_constant_p', + 'fill_constant_p', + 'fill_constant_p', + 'fill_constant_p', + 'fill_constant_p', + 'gelu', + 'mul_p', + 'mul_p', + 'mul_p', + 'mul_p', + 'pow_p', + 'tanh_p', ] # { prim_op_output_index: orig_op_output_var } self.out_map = {0: self.output['Out']} class TestDropoutOrig2PrimCase1(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'dropout' X = paddle.static.data(name='X', shape=[5, 8], dtype='float') self.input = {'X': X} self.output = { - 'Mask': - self.layer_help.create_variable_for_type_inference( - dtype=paddle.uint8), - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype), + 'Mask': self.layer_help.create_variable_for_type_inference( + dtype=paddle.uint8 + ), + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ), } self.attrs = { 'dropout_prob': 0.5, 'is_test': False, - 'dropout_implementation': 'upscale_in_train' + 'dropout_implementation': 'upscale_in_train', } self.orig2prim_args = (None, X) self.all_ops = [ - 'bernoulli_p', 'mul_p', 'fill_constant_p', 'div_p', 'cast_p', - 'dropout' + 'bernoulli_p', + 'mul_p', + 'fill_constant_p', + 'div_p', + 'cast_p', + 'dropout', ] # { prim_op_output_index: orig_op_output_var } self.out_map = {0: self.output['Mask'], 1: self.output['Out']} class TestDropoutOrig2PrimCase2(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'dropout' X = paddle.static.data(name='X', shape=[5, 8], dtype='float') self.input = {'X': X} self.output = { - 'Mask': - self.layer_help.create_variable_for_type_inference( - dtype=paddle.uint8), - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype), + 'Mask': self.layer_help.create_variable_for_type_inference( + dtype=paddle.uint8 + ), + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ), } self.attrs = { 'dropout_prob': 0.5, 'is_test': False, - 'dropout_implementation': 'downgrade_in_infer' + 'dropout_implementation': 'downgrade_in_infer', } self.orig2prim_args = (None, X) @@ -819,23 +852,23 @@ class TestDropoutOrig2PrimCase2(TestElementWiseAddOrig2Prim): class TestDropoutOrig2PrimCase3(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'dropout' X = paddle.static.data(name='X', shape=[5, 8], dtype='float') self.input = {'X': X} self.output = { - 'Mask': - self.layer_help.create_variable_for_type_inference( - dtype=paddle.uint8), - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype), + 'Mask': self.layer_help.create_variable_for_type_inference( + dtype=paddle.uint8 + ), + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ), } self.attrs = { 'dropout_prob': 0.5, 'is_test': True, - 'dropout_implementation': 'upscale_in_train' + 'dropout_implementation': 'upscale_in_train', } self.orig2prim_args = (None, X) @@ -845,35 +878,38 @@ class TestDropoutOrig2PrimCase3(TestElementWiseAddOrig2Prim): class TestDropoutOrig2PrimCase4(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'dropout' X = paddle.static.data(name='X', shape=[5, 8], dtype='float') self.input = {'X': X} self.output = { - 'Mask': - self.layer_help.create_variable_for_type_inference( - dtype=paddle.uint8), - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype), + 'Mask': self.layer_help.create_variable_for_type_inference( + dtype=paddle.uint8 + ), + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ), } self.attrs = { 'dropout_prob': 0.5, 'is_test': True, - 'dropout_implementation': 'downgrade_in_infer' + 'dropout_implementation': 'downgrade_in_infer', } self.orig2prim_args = (None, X) self.all_ops = [ - 'bernoulli_p', 'fill_constant_p', 'mul_p', 'cast_p', 'dropout' + 'bernoulli_p', + 'fill_constant_p', + 'mul_p', + 'cast_p', + 'dropout', ] # { prim_op_output_index: orig_op_output_var } self.out_map = {0: self.output['Mask'], 1: self.output['Out']} class TestReduceSumOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'reduce_sum' @@ -881,87 +917,90 @@ class TestReduceSumOrig2Prim(TestElementWiseAddOrig2Prim): self.input = {'X': X} self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {'axis': [0, 1], 'keep_dim': False} - self.orig2prim_args = (X, ) + self.orig2prim_args = (X,) self.all_ops = ['reduce_sum', 'reduce_sum_p'] # { prim_op_output_index: orig_op_output_var } self.out_map = {0: self.output['Out']} class TestReduceMeanOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'reduce_mean' X = paddle.static.data(name='X', shape=[5, 8], dtype='float') self.input = {'X': X} self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {'axis': [0, 1], 'keep_dim': False} - self.orig2prim_args = (X, ) + self.orig2prim_args = (X,) self.all_ops = [ - 'reduce_mean', 'reduce_sum_p', 'fill_constant_p', 'div_p' + 'reduce_mean', + 'reduce_sum_p', + 'fill_constant_p', + 'div_p', ] # { prim_op_output_index: orig_op_output_var } self.out_map = {0: self.output['Out']} class TestSizeOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'size' X = paddle.static.data(name='X', shape=[5, 8], dtype='float') self.input = {'Input': X} self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference( - dtype=paddle.int64) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=paddle.int64 + ) } self.attrs = {} - self.orig2prim_args = (X, ) + self.orig2prim_args = (X,) self.all_ops = ['size', 'fill_constant_p'] # { prim_op_output_index: orig_op_output_var } self.out_map = {0: self.output['Out']} class TestCastOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'cast' X = paddle.static.data(name='X', shape=[5, 8], dtype='float') self.input = {'X': X} self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {'in_dtype': X.dtype, 'out_dtype': paddle.float64} - self.orig2prim_args = (X, ) + self.orig2prim_args = (X,) self.all_ops = ['cast', 'cast_p'] # { prim_op_output_index: orig_op_output_var } self.out_map = {0: self.output['Out']} class TestPowScalarOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'pow' X = paddle.static.data(name='X', shape=[5, 8], dtype='float') self.input = {'X': X} self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } - self.attrs = {'factor': 2.} + self.attrs = {'factor': 2.0} self.orig2prim_args = (None, X) self.all_ops = ['pow', 'pow_p', 'fill_constant_p'] # { prim_op_output_index: orig_op_output_var } @@ -969,25 +1008,24 @@ class TestPowScalarOrig2Prim(TestElementWiseAddOrig2Prim): class TestSquareOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'square' X = paddle.static.data(name='X', shape=[5, 8], dtype='float') self.input = {'X': X} self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} - self.orig2prim_args = (X, ) + self.orig2prim_args = (X,) self.all_ops = ['square', 'pow_p', 'fill_constant_p'] # { prim_op_output_index: orig_op_output_var } self.out_map = {0: self.output['Out']} class TestRSqrtOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'rsqrt' X = paddle.static.data(name='X', shape=[7, 8], dtype='float64') @@ -996,19 +1034,19 @@ class TestRSqrtOrig2Prim(TestElementWiseAddOrig2Prim): 'X': X, } self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} - self.orig2prim_args = (X, ) + self.orig2prim_args = (X,) self.all_ops = ['rsqrt', 'rsqrt_p'] # { prim_op_output_index: orig_op_output_var } self.out_map = {0: self.output['Out']} class TestBatchnormOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'batch_norm' x = paddle.static.data(name='X', shape=[5, 8], dtype='float') @@ -1022,20 +1060,23 @@ class TestBatchnormOrig2Prim(TestElementWiseAddOrig2Prim): "Scale": [w], "Bias": [b], "Mean": [m], - "Variance": [v] + "Variance": [v], } saved_variance = self.layer_help.create_variable_for_type_inference( - dtype=x.dtype, stop_gradient=True) + dtype=x.dtype, stop_gradient=True + ) batch_norm_out = self.layer_help.create_variable_for_type_inference( - x.dtype) + x.dtype + ) saved_mean = self.layer_help.create_variable_for_type_inference( - dtype=x.dtype, stop_gradient=True) + dtype=x.dtype, stop_gradient=True + ) self.output = { "Y": [batch_norm_out], "MeanOut": [m], "VarianceOut": [v], "SavedMean": [saved_mean], - "SavedVariance": [saved_variance] + "SavedVariance": [saved_variance], } self.attrs = { @@ -1050,31 +1091,60 @@ class TestBatchnormOrig2Prim(TestElementWiseAddOrig2Prim): } self.orig2prim_args = (b, m, None, w, v, x) self.all_ops = [ - 'add_p', 'add_p', 'add_p', 'add_p', 'batch_norm', 'broadcast_p', - 'broadcast_p', 'broadcast_p', 'broadcast_p', 'broadcast_p', 'div_p', - 'div_p', 'div_p', 'fill_constant_p', 'fill_constant_p', - 'fill_constant_p', 'fill_constant_p', 'fill_constant_p', - 'fill_constant_p', 'fill_constant_p', 'fill_constant_p', - 'fill_constant_p', 'mul_p', 'mul_p', 'mul_p', 'mul_p', 'mul_p', - 'pow_p', 'reduce_sum_p', 'reduce_sum_p', 'reshape_p', 'reshape_p', - 'reshape_p', 'reshape_p', 'sqrt_p', 'sub_p', 'sub_p', 'sub_p', - 'sub_p' + 'add_p', + 'add_p', + 'add_p', + 'add_p', + 'batch_norm', + 'broadcast_p', + 'broadcast_p', + 'broadcast_p', + 'broadcast_p', + 'broadcast_p', + 'div_p', + 'div_p', + 'div_p', + 'fill_constant_p', + 'fill_constant_p', + 'fill_constant_p', + 'fill_constant_p', + 'fill_constant_p', + 'fill_constant_p', + 'fill_constant_p', + 'fill_constant_p', + 'fill_constant_p', + 'mul_p', + 'mul_p', + 'mul_p', + 'mul_p', + 'mul_p', + 'pow_p', + 'reduce_sum_p', + 'reduce_sum_p', + 'reshape_p', + 'reshape_p', + 'reshape_p', + 'reshape_p', + 'sqrt_p', + 'sub_p', + 'sub_p', + 'sub_p', + 'sub_p', ] # { prim_op_output_index: orig_op_output_var } self.out_map = {} class TestFillConstantOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'fill_constant' - self.attrs = {'value': 1., 'shape': (2, 3), 'dtype': paddle.float32} + self.attrs = {'value': 1.0, 'shape': (2, 3), 'dtype': paddle.float32} self.input = {} self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference( - dtype=paddle.float32) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=paddle.float32 + ) } self.orig2prim_args = (None, None, None) @@ -1084,14 +1154,13 @@ class TestFillConstantOrig2Prim(TestElementWiseAddOrig2Prim): class TestUniformRandomOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'uniform_random' self.input = {} self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference( - dtype=paddle.float32) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=paddle.float32 + ) } self.attrs = {'shape': [1, 2]} @@ -1101,7 +1170,6 @@ class TestUniformRandomOrig2Prim(TestElementWiseAddOrig2Prim): class TestSigmoidOrig2Prim(TestElementWiseAddOrig2Prim): - def init_data(self): self.op_type = 'sigmoid' X = paddle.static.data(name='X', shape=[3], dtype='float32') @@ -1109,15 +1177,21 @@ class TestSigmoidOrig2Prim(TestElementWiseAddOrig2Prim): self.attrs = {} self.input = {'X': X} self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference( - dtype=paddle.float32) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=paddle.float32 + ) } - self.orig2prim_args = (X, ) + self.orig2prim_args = (X,) self.all_ops = [ - 'sigmoid', 'div_p', 'fill_constant_p', 'add_p', 'fill_constant_p', - 'exp_p', 'fill_constant_p', 'sub_p' + 'sigmoid', + 'div_p', + 'fill_constant_p', + 'add_p', + 'fill_constant_p', + 'exp_p', + 'fill_constant_p', + 'sub_p', ] self.out_map = {0: self.output['Out']} diff --git a/python/paddle/fluid/tests/unittests/autograd/test_prim2orig.py b/python/paddle/fluid/tests/unittests/autograd/test_prim2orig.py index 13423690ed86994748d2cc7c9f3c4bd4d9eb608a..b864bdaa45f03f76b96f8ec5d0d304d155bf78ad 100644 --- a/python/paddle/fluid/tests/unittests/autograd/test_prim2orig.py +++ b/python/paddle/fluid/tests/unittests/autograd/test_prim2orig.py @@ -24,14 +24,14 @@ paddle.enable_static() ############################ Test prim2orig rules ############################ class TestAddPPrim2Orig(unittest.TestCase): - def setUp(self): self.main_program = paddle.static.Program() self.startup_program = paddle.static.Program() self.layer_help = LayerHelper('TestPrim2Orig') - with paddle.static.program_guard(self.main_program, - self.startup_program): + with paddle.static.program_guard( + self.main_program, self.startup_program + ): self.init_data() def init_data(self): @@ -41,8 +41,9 @@ class TestAddPPrim2Orig(unittest.TestCase): self.input = {'X': X, 'Y': Y} self.output = { - 'Z': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Z': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} @@ -52,12 +53,15 @@ class TestAddPPrim2Orig(unittest.TestCase): self.out_map = {self.output['Z']: 0} def test_op(self): - with paddle.static.program_guard(self.main_program, - self.startup_program): - op = self.layer_help.append_op(type=self.op_type, - inputs=self.input, - outputs=self.output, - attrs=self.attrs) + with paddle.static.program_guard( + self.main_program, self.startup_program + ): + op = self.layer_help.append_op( + type=self.op_type, + inputs=self.input, + outputs=self.output, + attrs=self.attrs, + ) orig_out = _prim2orig(op, *self.prim2orig_args) all_ops = [op.type for op in self.main_program.block(0).ops] @@ -68,7 +72,6 @@ class TestAddPPrim2Orig(unittest.TestCase): class TestSubPPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'sub_p' X = paddle.static.data(name='X', shape=[7, 8], dtype='float64') @@ -76,8 +79,9 @@ class TestSubPPrim2Orig(TestAddPPrim2Orig): self.input = {'X': X, 'Y': Y} self.output = { - 'Z': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Z': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} @@ -87,7 +91,6 @@ class TestSubPPrim2Orig(TestAddPPrim2Orig): class TestMulPPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'mul_p' X = paddle.static.data(name='X', shape=[7, 8], dtype='float64') @@ -95,8 +98,9 @@ class TestMulPPrim2Orig(TestAddPPrim2Orig): self.input = {'X': X, 'Y': Y} self.output = { - 'Z': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Z': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} @@ -106,7 +110,6 @@ class TestMulPPrim2Orig(TestAddPPrim2Orig): class TestDivPPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'div_p' X = paddle.static.data(name='X', shape=[7, 8], dtype='float64') @@ -114,8 +117,9 @@ class TestDivPPrim2Orig(TestAddPPrim2Orig): self.input = {'X': X, 'Y': Y} self.output = { - 'Z': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Z': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} @@ -125,7 +129,6 @@ class TestDivPPrim2Orig(TestAddPPrim2Orig): class TestSqrtPPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'sqrt_p' X = paddle.static.data(name='X', shape=[7, 8], dtype='float64') @@ -134,18 +137,18 @@ class TestSqrtPPrim2Orig(TestAddPPrim2Orig): 'X': X, } self.output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} - self.prim2orig_args = (X, ) + self.prim2orig_args = (X,) self.all_ops = ['sqrt_p', 'sqrt'] self.out_map = {self.output['Y']: 0} class TestTanhPPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'tanh_p' X = paddle.static.data(name='X', shape=[7, 8], dtype='float64') @@ -154,18 +157,18 @@ class TestTanhPPrim2Orig(TestAddPPrim2Orig): 'X': X, } self.output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} - self.prim2orig_args = (X, ) + self.prim2orig_args = (X,) self.all_ops = ['tanh_p', 'tanh'] self.out_map = {self.output['Y']: 0} class TestSinPPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'sin_p' X = paddle.static.data(name='X', shape=[7, 8], dtype='float64') @@ -174,18 +177,18 @@ class TestSinPPrim2Orig(TestAddPPrim2Orig): 'X': X, } self.output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} - self.prim2orig_args = (X, ) + self.prim2orig_args = (X,) self.all_ops = ['sin_p', 'sin'] self.out_map = {self.output['Y']: 0} class TestCosPPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'cos_p' X = paddle.static.data(name='X', shape=[7, 8], dtype='float64') @@ -194,18 +197,18 @@ class TestCosPPrim2Orig(TestAddPPrim2Orig): 'X': X, } self.output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} - self.prim2orig_args = (X, ) + self.prim2orig_args = (X,) self.all_ops = ['cos_p', 'cos'] self.out_map = {self.output['Y']: 0} class TestExpPPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'exp_p' X = paddle.static.data(name='X', shape=[7, 8], dtype='float64') @@ -214,18 +217,18 @@ class TestExpPPrim2Orig(TestAddPPrim2Orig): 'X': X, } self.output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} - self.prim2orig_args = (X, ) + self.prim2orig_args = (X,) self.all_ops = ['exp_p', 'exp'] self.out_map = {self.output['Y']: 0} class TestErfPPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'erf_p' X = paddle.static.data(name='X', shape=[7, 8], dtype='float64') @@ -234,18 +237,18 @@ class TestErfPPrim2Orig(TestAddPPrim2Orig): 'X': X, } self.output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} - self.prim2orig_args = (X, ) + self.prim2orig_args = (X,) self.all_ops = ['erf_p', 'erf'] self.out_map = {self.output['Y']: 0} class TestAbsPPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'abs_p' X = paddle.static.data(name='X', shape=[7, 8], dtype='float64') @@ -254,18 +257,18 @@ class TestAbsPPrim2Orig(TestAddPPrim2Orig): 'X': X, } self.output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} - self.prim2orig_args = (X, ) + self.prim2orig_args = (X,) self.all_ops = ['abs_p', 'abs'] self.out_map = {self.output['Y']: 0} class TestLogPPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'log_p' X = paddle.static.data(name='X', shape=[7, 8], dtype='float64') @@ -274,18 +277,18 @@ class TestLogPPrim2Orig(TestAddPPrim2Orig): 'X': X, } self.output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} - self.prim2orig_args = (X, ) + self.prim2orig_args = (X,) self.all_ops = ['log_p', 'log'] self.out_map = {self.output['Y']: 0} class TestReshapePPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'reshape_p' X = paddle.static.data(name='X', shape=[2, 8], dtype='float64') @@ -294,18 +297,18 @@ class TestReshapePPrim2Orig(TestAddPPrim2Orig): 'X': X, } self.output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {'shape': [4, 4]} - self.prim2orig_args = (X, ) + self.prim2orig_args = (X,) self.all_ops = ['reshape_p', 'reshape2'] self.out_map = {self.output['Y']: 0} class TestBroadcastPPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'broadcast_p' X = paddle.static.data(name='X', shape=[2, 8], dtype='float64') @@ -314,18 +317,18 @@ class TestBroadcastPPrim2Orig(TestAddPPrim2Orig): 'X': X, } self.output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {'shape': [10, 2, 8]} - self.prim2orig_args = (X, ) + self.prim2orig_args = (X,) self.all_ops = ['broadcast_p', 'expand_v2'] self.out_map = {self.output['Y']: 0} class TestTransposePPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'transpose_p' X = paddle.static.data(name='X', shape=[7, 8, 9, 10], dtype='float64') @@ -334,18 +337,18 @@ class TestTransposePPrim2Orig(TestAddPPrim2Orig): 'X': X, } self.output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {'axis': [1, 2, 0, 3]} - self.prim2orig_args = (X, ) + self.prim2orig_args = (X,) self.all_ops = ['transpose_p', 'transpose2'] self.out_map = {self.output['Y']: 0} class TestSplitPPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'split_p' X = paddle.static.data(name='X', shape=[3, 9, 5], dtype='float64') @@ -356,12 +359,14 @@ class TestSplitPPrim2Orig(TestAddPPrim2Orig): self.output = { 'YS': [ self.layer_help.create_variable_for_type_inference( - dtype=X.dtype) for i in range(3) + dtype=X.dtype + ) + for i in range(3) ] } self.attrs = {'num_or_sections': [2, 3, 4], 'axis': 1} - self.prim2orig_args = (X, ) + self.prim2orig_args = (X,) self.all_ops = ['split_p', 'split'] self.out_map = { self.output['YS'][0]: 0, @@ -371,7 +376,6 @@ class TestSplitPPrim2Orig(TestAddPPrim2Orig): class TestConcatPPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'concat_p' X = paddle.static.data(name='X', shape=[3, 9, 5], dtype='float64') @@ -382,36 +386,36 @@ class TestConcatPPrim2Orig(TestAddPPrim2Orig): 'XS': [X, Y, Z], } self.output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {'axis': 0} - self.prim2orig_args = ((X, Y, Z), ) + self.prim2orig_args = ((X, Y, Z),) self.all_ops = ['concat_p', 'concat'] self.out_map = {self.output['Y']: 0} class TestReducePPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'reduce_sum_p' X = paddle.static.data(name='X', shape=[3, 9, 5], dtype='float64') self.input = {'X': X} self.output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {'axis': [1], 'keepdim': True} - self.prim2orig_args = (X, ) + self.prim2orig_args = (X,) self.all_ops = ['reduce_sum_p', 'reduce_sum'] self.out_map = {self.output['Y']: 0} class TestMatmulPPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'matmul_p' X = paddle.static.data(name='X', shape=[9, 5], dtype='float64') @@ -419,8 +423,9 @@ class TestMatmulPPrim2Orig(TestAddPPrim2Orig): self.input = {'X': X, 'Y': Y} self.output = { - 'Z': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Z': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} @@ -430,7 +435,6 @@ class TestMatmulPPrim2Orig(TestAddPPrim2Orig): class TestSliceSelectPPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'slice_select_p' X = paddle.static.data(name='X', shape=[9, 5], dtype='float64') @@ -439,18 +443,18 @@ class TestSliceSelectPPrim2Orig(TestAddPPrim2Orig): 'X': X, } self.output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {'axis': [0], 'starts': [1], 'ends': [8], 'strides': [2]} - self.prim2orig_args = (X, ) + self.prim2orig_args = (X,) self.all_ops = ['slice_select_p', 'strided_slice'] self.out_map = {self.output['Y']: 0} class TestSliceAssignPPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'slice_assign_p' X = paddle.static.data(name='X', shape=[9, 5], dtype='float64') @@ -458,8 +462,9 @@ class TestSliceAssignPPrim2Orig(TestAddPPrim2Orig): self.input = {'X': X, 'Y': Y} self.output = { - 'Z': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Z': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {'axis': [1], 'starts': [0], 'ends': [3], 'strides': [1]} @@ -469,18 +474,18 @@ class TestSliceAssignPPrim2Orig(TestAddPPrim2Orig): class TestGatherPPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'gather_p' X = paddle.static.data(name='X', shape=[9, 5], dtype='float64') - IndexTensor = paddle.static.data(name='IndexTensor', - shape=[3], - dtype='int32') + IndexTensor = paddle.static.data( + name='IndexTensor', shape=[3], dtype='int32' + ) self.input = {'X': X, 'IndexTensor': IndexTensor} self.output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = { 'axis': 0, @@ -495,19 +500,19 @@ class TestGatherPPrim2Orig(TestAddPPrim2Orig): class TestScatterAddPPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'scatter_add_p' X = paddle.static.data(name='X', shape=[9, 5], dtype='float64') Y = paddle.static.data(name='Y', shape=[3, 5], dtype='float64') - IndexTensor = paddle.static.data(name='IndexTensor', - shape=[3], - dtype='int32') + IndexTensor = paddle.static.data( + name='IndexTensor', shape=[3], dtype='int32' + ) self.input = {'X': X, 'Y': Y, 'IndexTensor': IndexTensor} self.output = { - 'Z': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Z': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = { 'axis': 0, @@ -515,20 +520,23 @@ class TestScatterAddPPrim2Orig(TestAddPPrim2Orig): self.prim2orig_args = (IndexTensor, X, Y) self.all_ops = [ - 'scatter_add_p', 'fill_any_like', 'scatter', 'elementwise_add' + 'scatter_add_p', + 'fill_any_like', + 'scatter', + 'elementwise_add', ] self.out_map = {self.output['Z']: 0} class TestFillConstantPPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'fill_constant_p' self.input = {} self.output = { - 'Y': - self.layer_help.create_variable_for_type_inference(paddle.int32) + 'Y': self.layer_help.create_variable_for_type_inference( + paddle.int32 + ) } self.attrs = {'value': 10, 'shape': [5, 5], 'dtype': paddle.int32} @@ -538,7 +546,6 @@ class TestFillConstantPPrim2Orig(TestAddPPrim2Orig): class TestSelectPPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'select_p' Cond = paddle.static.data(name='Condition', shape=[5, 6], dtype='bool') @@ -547,8 +554,9 @@ class TestSelectPPrim2Orig(TestAddPPrim2Orig): self.input = {'Condition': Cond, 'X': X, 'Y': Y} self.output = { - 'Z': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Z': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} self.prim2orig_args = (Cond, X, Y) @@ -557,7 +565,6 @@ class TestSelectPPrim2Orig(TestAddPPrim2Orig): class TestEqPPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'eq_p' X = paddle.static.data(name='X', shape=[7, 8], dtype='float64') @@ -565,8 +572,9 @@ class TestEqPPrim2Orig(TestAddPPrim2Orig): self.input = {'X': X, 'Y': Y} self.output = { - 'Z': - self.layer_help.create_variable_for_type_inference(dtype='bool') + 'Z': self.layer_help.create_variable_for_type_inference( + dtype='bool' + ) } self.attrs = {} @@ -576,7 +584,6 @@ class TestEqPPrim2Orig(TestAddPPrim2Orig): class TestNePPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'ne_p' X = paddle.static.data(name='X', shape=[7, 8], dtype='float64') @@ -584,8 +591,9 @@ class TestNePPrim2Orig(TestAddPPrim2Orig): self.input = {'X': X, 'Y': Y} self.output = { - 'Z': - self.layer_help.create_variable_for_type_inference(dtype='bool') + 'Z': self.layer_help.create_variable_for_type_inference( + dtype='bool' + ) } self.attrs = {} @@ -595,7 +603,6 @@ class TestNePPrim2Orig(TestAddPPrim2Orig): class TestGtPPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'gt_p' X = paddle.static.data(name='X', shape=[7, 8], dtype='float64') @@ -603,8 +610,9 @@ class TestGtPPrim2Orig(TestAddPPrim2Orig): self.input = {'X': X, 'Y': Y} self.output = { - 'Z': - self.layer_help.create_variable_for_type_inference(dtype='bool') + 'Z': self.layer_help.create_variable_for_type_inference( + dtype='bool' + ) } self.attrs = {} @@ -614,7 +622,6 @@ class TestGtPPrim2Orig(TestAddPPrim2Orig): class TestGePPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'ge_p' X = paddle.static.data(name='X', shape=[7, 8], dtype='float64') @@ -622,8 +629,9 @@ class TestGePPrim2Orig(TestAddPPrim2Orig): self.input = {'X': X, 'Y': Y} self.output = { - 'Z': - self.layer_help.create_variable_for_type_inference(dtype='bool') + 'Z': self.layer_help.create_variable_for_type_inference( + dtype='bool' + ) } self.attrs = {} @@ -633,7 +641,6 @@ class TestGePPrim2Orig(TestAddPPrim2Orig): class TestPowPPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'pow_p' X = paddle.static.data(name='X', shape=[7, 8], dtype='float64') @@ -641,8 +648,9 @@ class TestPowPPrim2Orig(TestAddPPrim2Orig): self.input = {'X': X, 'Y': Y} self.output = { - 'Z': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Z': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} @@ -652,7 +660,6 @@ class TestPowPPrim2Orig(TestAddPPrim2Orig): class TestMaxPPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'max_p' X = paddle.static.data(name='X', shape=[7, 8], dtype='float64') @@ -660,8 +667,9 @@ class TestMaxPPrim2Orig(TestAddPPrim2Orig): self.input = {'X': X, 'Y': Y} self.output = { - 'Z': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Z': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} @@ -671,15 +679,14 @@ class TestMaxPPrim2Orig(TestAddPPrim2Orig): class TestBernoulliPPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'bernoulli_p' self.input = {} self.output = { - 'Y': - self.layer_help.create_variable_for_type_inference( - dtype=paddle.float64) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=paddle.float64 + ) } self.attrs = {'shape': [7, 8], 'dtype': paddle.float64, 'p': 0.5} @@ -689,7 +696,6 @@ class TestBernoulliPPrim2Orig(TestAddPPrim2Orig): class TestCastPPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'cast_p' X = paddle.static.data(name='X', shape=[7, 8], dtype='float64') @@ -698,18 +704,18 @@ class TestCastPPrim2Orig(TestAddPPrim2Orig): 'X': X, } self.output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {'dtype': paddle.int64} - self.prim2orig_args = (X, ) + self.prim2orig_args = (X,) self.all_ops = ['cast_p', 'cast'] self.out_map = {self.output['Y']: 0} class TestRsqrtPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'rsqrt_p' X = paddle.static.data(name='X', shape=[7, 8], dtype='float64') @@ -718,33 +724,33 @@ class TestRsqrtPrim2Orig(TestAddPPrim2Orig): 'X': X, } self.output = { - 'Y': - self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + 'Y': self.layer_help.create_variable_for_type_inference( + dtype=X.dtype + ) } self.attrs = {} - self.prim2orig_args = (X, ) + self.prim2orig_args = (X,) self.all_ops = ['rsqrt_p', 'rsqrt'] self.out_map = {self.output['Y']: 0} class TestUniformRandomPrim2Orig(TestAddPPrim2Orig): - def init_data(self): self.op_type = 'uniform_random_p' self.input = {} self.output = { - 'Out': - self.layer_help.create_variable_for_type_inference( - dtype=paddle.float64) + 'Out': self.layer_help.create_variable_for_type_inference( + dtype=paddle.float64 + ) } self.attrs = { 'shape': [1, 2, 3], 'min': -1.0, 'max': 1.0, 'seed': 0, - 'dtype': paddle.float64 + 'dtype': paddle.float64, } self.prim2orig_args = () diff --git a/python/paddle/fluid/tests/unittests/autograd/test_primapi.py b/python/paddle/fluid/tests/unittests/autograd/test_primapi.py index 2451ed4190c9e12daebbac1286a266f15f293fbf..7f1a06c9240ba3d93e655107025310131ad34f10 100644 --- a/python/paddle/fluid/tests/unittests/autograd/test_primapi.py +++ b/python/paddle/fluid/tests/unittests/autograd/test_primapi.py @@ -27,13 +27,30 @@ from paddle.incubate.autograd import primx @utils.place(config.DEVICES) -@utils.parameterize((utils.TEST_CASE_NAME, 'fun', 'xs', 'dtype'), ( - ('uniform_random', - lambda: paddle.uniform([1, 2, 3], dtype='float32', min=0, max=1.0, seed=1), - (), 'int32'), ('sigmoid', paddle.nn.functional.sigmoid, - (np.random.rand(5, ), ), 'float32'))) +@utils.parameterize( + (utils.TEST_CASE_NAME, 'fun', 'xs', 'dtype'), + ( + ( + 'uniform_random', + lambda: paddle.uniform( + [1, 2, 3], dtype='float32', min=0, max=1.0, seed=1 + ), + (), + 'int32', + ), + ( + 'sigmoid', + paddle.nn.functional.sigmoid, + ( + np.random.rand( + 5, + ), + ), + 'float32', + ), + ), +) class TestFowardApi(unittest.TestCase): - @classmethod def setUpClass(cls): cls.xs = tuple(x.astype(cls.dtype) for x in cls.xs) @@ -47,14 +64,14 @@ class TestFowardApi(unittest.TestCase): paddle.disable_static() def test_grad(self): - def expected(): paddle.incubate.autograd.disable_prim() sp = paddle.static.Program() mp = paddle.static.Program() with paddle.static.program_guard(mp, sp): feed, static_xs = utils.gen_static_inputs_and_feed( - self.xs, stop_gradient=False) + self.xs, stop_gradient=False + ) out = self.fun(*static_xs) exe = paddle.static.Executor() exe.run(sp) @@ -68,7 +85,8 @@ class TestFowardApi(unittest.TestCase): mp = paddle.static.Program() with paddle.static.program_guard(mp, sp): feed, static_xs = utils.gen_static_inputs_and_feed( - self.xs, stop_gradient=False) + self.xs, stop_gradient=False + ) out = self.fun(*static_xs) primx.orig2prim(mp.block(0)) primx.prim2orig(mp.block(0)) @@ -86,18 +104,32 @@ class TestFowardApi(unittest.TestCase): @utils.place(config.DEVICES) -@utils.parameterize((utils.TEST_CASE_NAME, 'fun', 'xs', 'v', 'dtype'), - (('dropout', paddle.nn.functional.dropout, - (np.random.rand(5000, 5000), ), None, 'float32'), )) +@utils.parameterize( + (utils.TEST_CASE_NAME, 'fun', 'xs', 'v', 'dtype'), + ( + ( + 'dropout', + paddle.nn.functional.dropout, + (np.random.rand(5000, 5000),), + None, + 'float32', + ), + ), +) class TestDropoutGrad(unittest.TestCase): - @classmethod def setUpClass(cls): cls.xs = tuple(x.astype(cls.dtype) for x in cls.xs) - cls._rtol = config.TOLERANCE.get(str( - cls.dtype)).get("first_order_grad").get("rtol") - cls._atol = config.TOLERANCE.get(str( - cls.dtype)).get("first_order_grad").get("atol") + cls._rtol = ( + config.TOLERANCE.get(str(cls.dtype)) + .get("first_order_grad") + .get("rtol") + ) + cls._atol = ( + config.TOLERANCE.get(str(cls.dtype)) + .get("first_order_grad") + .get("atol") + ) def setUp(self): paddle.enable_static() @@ -108,16 +140,17 @@ class TestDropoutGrad(unittest.TestCase): paddle.disable_static() def test_grad(self): - def expected(): paddle.incubate.autograd.disable_prim() sp = paddle.static.Program() mp = paddle.static.Program() with paddle.static.program_guard(mp, sp): feed, static_xs, static_v = utils.gen_static_data_and_feed( - self.xs, self.v, stop_gradient=False) + self.xs, self.v, stop_gradient=False + ) _, ys_grad = paddle.incubate.autograd.vjp( - self.fun, static_xs, static_v) + self.fun, static_xs, static_v + ) exe = paddle.static.Executor() exe.run(sp) out = exe.run(mp, feed=feed, fetch_list=ys_grad) @@ -130,9 +163,13 @@ class TestDropoutGrad(unittest.TestCase): mp = paddle.static.Program() with paddle.static.program_guard(mp, sp): feed, static_xs, static_v = utils.gen_static_data_and_feed( - self.xs, self.v, stop_gradient=False) - ys = self.fun(*static_xs) if isinstance( - static_xs, typing.Sequence) else self.fun(static_xs) + self.xs, self.v, stop_gradient=False + ) + ys = ( + self.fun(*static_xs) + if isinstance(static_xs, typing.Sequence) + else self.fun(static_xs) + ) ys_grad = paddle.incubate.autograd.grad(ys, static_xs, static_v) paddle.incubate.autograd.prim2orig(mp.block(0)) exe = paddle.static.Executor() @@ -151,17 +188,30 @@ class TestDropoutGrad(unittest.TestCase): @utils.place(config.DEVICES) @utils.parameterize( (utils.TEST_CASE_NAME, 'fun', 'xs', 'v', 'dtype'), - (('matmul', paddle.matmul, - (np.random.rand(2, 3), np.random.rand(3, 2)), None, 'float32'), )) + ( + ( + 'matmul', + paddle.matmul, + (np.random.rand(2, 3), np.random.rand(3, 2)), + None, + 'float32', + ), + ), +) class TestWithoutProgramGuard(unittest.TestCase): - @classmethod def setUpClass(cls): cls.xs = tuple(x.astype(cls.dtype) for x in cls.xs) - cls._rtol = config.TOLERANCE.get(str( - cls.dtype)).get("first_order_grad").get("rtol") - cls._atol = config.TOLERANCE.get(str( - cls.dtype)).get("first_order_grad").get("atol") + cls._rtol = ( + config.TOLERANCE.get(str(cls.dtype)) + .get("first_order_grad") + .get("rtol") + ) + cls._atol = ( + config.TOLERANCE.get(str(cls.dtype)) + .get("first_order_grad") + .get("atol") + ) def setUp(self): paddle.enable_static() @@ -172,18 +222,22 @@ class TestWithoutProgramGuard(unittest.TestCase): paddle.disable_static() def test_forward_grad_without_program_guard(self): - def with_program_guard(): paddle.incubate.autograd.enable_prim() sp = paddle.static.Program() mp = paddle.static.Program() with paddle.static.program_guard(mp, sp): feed, static_xs, static_v = utils.gen_static_data_and_feed( - self.xs, self.v, stop_gradient=False) - ys = self.fun(*static_xs) if isinstance( - static_xs, typing.Sequence) else self.fun(static_xs) + self.xs, self.v, stop_gradient=False + ) + ys = ( + self.fun(*static_xs) + if isinstance(static_xs, typing.Sequence) + else self.fun(static_xs) + ) ys_grad = paddle.incubate.autograd.forward_grad( - ys, static_xs, static_v) + ys, static_xs, static_v + ) paddle.incubate.autograd.prim2orig(mp.block(0)) exe = paddle.static.Executor() exe.run(sp) @@ -194,11 +248,16 @@ class TestWithoutProgramGuard(unittest.TestCase): def without_program_guard(): paddle.incubate.autograd.enable_prim() feed, static_xs, static_v = utils.gen_static_data_and_feed( - self.xs, self.v, stop_gradient=False) - ys = self.fun(*static_xs) if isinstance( - static_xs, typing.Sequence) else self.fun(static_xs) + self.xs, self.v, stop_gradient=False + ) + ys = ( + self.fun(*static_xs) + if isinstance(static_xs, typing.Sequence) + else self.fun(static_xs) + ) ys_grad = paddle.incubate.autograd.forward_grad( - ys, static_xs, static_v) + ys, static_xs, static_v + ) sp = paddle.fluid.framework.default_startup_program() mp = paddle.fluid.framework.default_main_program() exe = paddle.static.Executor() @@ -210,22 +269,27 @@ class TestWithoutProgramGuard(unittest.TestCase): expected = with_program_guard() actual = without_program_guard() self.assertEqual(type(actual), type(expected)) - np.testing.assert_allclose(np.concatenate(actual), - np.concatenate(expected), - rtol=self._rtol, - atol=self._atol) + np.testing.assert_allclose( + np.concatenate(actual), + np.concatenate(expected), + rtol=self._rtol, + atol=self._atol, + ) def test_grad_without_program_guard(self): - def with_program_guard(): paddle.incubate.autograd.enable_prim() sp = paddle.static.Program() mp = paddle.static.Program() with paddle.static.program_guard(mp, sp): feed, static_xs, static_v = utils.gen_static_data_and_feed( - self.xs, self.v, stop_gradient=False) - ys = self.fun(*static_xs) if isinstance( - static_xs, typing.Sequence) else self.fun(static_xs) + self.xs, self.v, stop_gradient=False + ) + ys = ( + self.fun(*static_xs) + if isinstance(static_xs, typing.Sequence) + else self.fun(static_xs) + ) xs_grad = paddle.incubate.autograd.grad(ys, static_xs, static_v) paddle.incubate.autograd.prim2orig(mp.block(0)) exe = paddle.static.Executor() @@ -237,9 +301,13 @@ class TestWithoutProgramGuard(unittest.TestCase): def without_program_guard(): paddle.incubate.autograd.enable_prim() feed, static_xs, static_v = utils.gen_static_data_and_feed( - self.xs, self.v, stop_gradient=False) - ys = self.fun(*static_xs) if isinstance( - static_xs, typing.Sequence) else self.fun(static_xs) + self.xs, self.v, stop_gradient=False + ) + ys = ( + self.fun(*static_xs) + if isinstance(static_xs, typing.Sequence) + else self.fun(static_xs) + ) xs_grad = paddle.incubate.autograd.grad(ys, static_xs, static_v) sp = paddle.fluid.framework.default_startup_program() mp = paddle.fluid.framework.default_main_program() @@ -253,43 +321,91 @@ class TestWithoutProgramGuard(unittest.TestCase): actual = without_program_guard() for i, j in zip(actual, expected): self.assertEqual(type(i), type(j)) - np.testing.assert_allclose(np.concatenate(i), - np.concatenate(j), - rtol=self._rtol, - atol=self._atol) + np.testing.assert_allclose( + np.concatenate(i), + np.concatenate(j), + rtol=self._rtol, + atol=self._atol, + ) @utils.place(config.DEVICES) @utils.parameterize( (utils.TEST_CASE_NAME, 'fun', 'xs', 'v', 'dtype'), - (('matmul', paddle.matmul, - (np.random.rand(2, 3), np.random.rand(3, 2)), None, 'float32'), - ('multiply', paddle.multiply, - (np.random.rand(2, 3), np.random.rand(2, 3)), None, 'float64'), - ('add', paddle.add, - (np.random.rand(2, 3), np.random.rand(2, 3)), None, 'float32'), - ('input_not_sequence', paddle.tanh, - (np.random.rand(5, 5), ), None, 'float64'), - ('input_gradients_not_none', paddle.matmul, - (np.random.rand(3, 3), np.random.rand(3, 3)), - (np.random.rand(3, 3), np.random.rand(3, 3)), 'float64'), - ('log', paddle.log, (np.random.rand(3, 4), ), None, 'float32'), - ('abs', paddle.abs, (np.random.uniform(-10, 10, - (10, 10)), ), None, 'float32'), - ('rsqrt', paddle.rsqrt, (np.random.rand(100, 200), ), None, 'float32'), - ('sigmoid', paddle.nn.functional.sigmoid, - (np.random.rand(5, ), ), None, 'float32'))) + ( + ( + 'matmul', + paddle.matmul, + (np.random.rand(2, 3), np.random.rand(3, 2)), + None, + 'float32', + ), + ( + 'multiply', + paddle.multiply, + (np.random.rand(2, 3), np.random.rand(2, 3)), + None, + 'float64', + ), + ( + 'add', + paddle.add, + (np.random.rand(2, 3), np.random.rand(2, 3)), + None, + 'float32', + ), + ( + 'input_not_sequence', + paddle.tanh, + (np.random.rand(5, 5),), + None, + 'float64', + ), + ( + 'input_gradients_not_none', + paddle.matmul, + (np.random.rand(3, 3), np.random.rand(3, 3)), + (np.random.rand(3, 3), np.random.rand(3, 3)), + 'float64', + ), + ('log', paddle.log, (np.random.rand(3, 4),), None, 'float32'), + ( + 'abs', + paddle.abs, + (np.random.uniform(-10, 10, (10, 10)),), + None, + 'float32', + ), + ('rsqrt', paddle.rsqrt, (np.random.rand(100, 200),), None, 'float32'), + ( + 'sigmoid', + paddle.nn.functional.sigmoid, + ( + np.random.rand( + 5, + ), + ), + None, + 'float32', + ), + ), +) # paddle.where, paddle.pow, paddle.maximum has no double grad definition, # can not compute forward grad use double trick class TestForwardGrad(unittest.TestCase): - @classmethod def setUpClass(cls): cls.xs = tuple(x.astype(cls.dtype) for x in cls.xs) - cls._rtol = config.TOLERANCE.get(str( - cls.dtype)).get("first_order_grad").get("rtol") - cls._atol = config.TOLERANCE.get(str( - cls.dtype)).get("first_order_grad").get("atol") + cls._rtol = ( + config.TOLERANCE.get(str(cls.dtype)) + .get("first_order_grad") + .get("rtol") + ) + cls._atol = ( + config.TOLERANCE.get(str(cls.dtype)) + .get("first_order_grad") + .get("atol") + ) def setUp(self): paddle.enable_static() @@ -300,16 +416,17 @@ class TestForwardGrad(unittest.TestCase): paddle.disable_static() def test_forward_grad(self): - def expected(): paddle.incubate.autograd.disable_prim() sp = paddle.static.Program() mp = paddle.static.Program() with paddle.static.program_guard(mp, sp): feed, static_xs, static_v = utils.gen_static_data_and_feed( - self.xs, self.v, stop_gradient=False) + self.xs, self.v, stop_gradient=False + ) _, ys_grad = paddle.incubate.autograd.jvp( - self.fun, static_xs, static_v) + self.fun, static_xs, static_v + ) exe = paddle.static.Executor() exe.run(sp) out = exe.run(mp, feed=feed, fetch_list=ys_grad) @@ -322,11 +439,16 @@ class TestForwardGrad(unittest.TestCase): mp = paddle.static.Program() with paddle.static.program_guard(mp, sp): feed, static_xs, static_v = utils.gen_static_data_and_feed( - self.xs, self.v, stop_gradient=False) - ys = self.fun(*static_xs) if isinstance( - static_xs, typing.Sequence) else self.fun(static_xs) + self.xs, self.v, stop_gradient=False + ) + ys = ( + self.fun(*static_xs) + if isinstance(static_xs, typing.Sequence) + else self.fun(static_xs) + ) ys_grad = paddle.incubate.autograd.forward_grad( - ys, static_xs, static_v) + ys, static_xs, static_v + ) paddle.incubate.autograd.prim2orig(mp.block(0)) exe = paddle.static.Executor() exe.run(sp) @@ -337,10 +459,12 @@ class TestForwardGrad(unittest.TestCase): actual = actual() expected = expected() self.assertEqual(type(actual), type(expected)) - np.testing.assert_allclose(np.concatenate(actual), - np.concatenate(expected), - rtol=self._rtol, - atol=self._atol) + np.testing.assert_allclose( + np.concatenate(actual), + np.concatenate(expected), + rtol=self._rtol, + atol=self._atol, + ) def test_prim_disabled(self): paddle.incubate.autograd.disable_prim() @@ -349,11 +473,16 @@ class TestForwardGrad(unittest.TestCase): with self.assertRaises(RuntimeError): with paddle.static.program_guard(mp, sp): feed, static_xs, static_v = utils.gen_static_data_and_feed( - self.xs, self.v, stop_gradient=False) - ys = self.fun(*static_xs) if isinstance( - static_xs, typing.Sequence) else self.fun(static_xs) + self.xs, self.v, stop_gradient=False + ) + ys = ( + self.fun(*static_xs) + if isinstance(static_xs, typing.Sequence) + else self.fun(static_xs) + ) ys_grad = paddle.incubate.autograd.forward_grad( - ys, static_xs, static_v) + ys, static_xs, static_v + ) paddle.incubate.autograd.prim2orig(mp.block(0)) exe = paddle.static.Executor() exe.run(sp) @@ -364,11 +493,13 @@ class TestForwardGrad(unittest.TestCase): paddle.incubate.autograd.enable_prim() with self.assertRaises(TypeError): paddle.incubate.autograd.forward_grad( - 1, paddle.static.data('inputs', shape=[1])) + 1, paddle.static.data('inputs', shape=[1]) + ) with self.assertRaises(TypeError): paddle.incubate.autograd.forward_grad( - paddle.static.data('targets', shape=[1]), 1) + paddle.static.data('targets', shape=[1]), 1 + ) paddle.incubate.autograd.disable_prim() @@ -379,107 +510,247 @@ where_wrap = lambda x, y: paddle.where(paddle.eye(3, 4) == 1, x, y) @utils.parameterize( (utils.TEST_CASE_NAME, 'fun', 'xs', 'v', 'dtype'), ( - ('matmul', paddle.matmul, - (np.random.rand(2, 3), np.random.rand(3, 2)), None, 'float32'), - ('multiply', paddle.multiply, - (np.random.rand(2, 3), np.random.rand(2, 3)), None, 'float64'), - ('div', paddle.divide, - (np.random.rand(2, 3), np.random.rand(2, 3)), None, 'float64'), - ('add', paddle.add, - (np.random.rand(2, 3), np.random.rand(2, 3)), None, 'float32'), - ('input_not_sequence', paddle.tanh, - (np.random.rand(5, 5), ), None, 'float64'), - ('input_gradients_not_none', paddle.matmul, - (np.random.rand(3, 3), np.random.rand(3, 3)), - (np.random.rand(3, 3), ), 'float64'), - ('sin', paddle.sin, (np.random.rand(100, 200), ), None, 'float32'), - ('rsqrt', paddle.rsqrt, (np.random.rand(100, 200), ), None, 'float32'), - ('cos', paddle.cos, (np.random.rand(200, 90), ), None, 'float32'), - ('exp', paddle.exp, (np.random.rand(299, 320), ), None, 'float32'), + ( + 'matmul', + paddle.matmul, + (np.random.rand(2, 3), np.random.rand(3, 2)), + None, + 'float32', + ), + ( + 'multiply', + paddle.multiply, + (np.random.rand(2, 3), np.random.rand(2, 3)), + None, + 'float64', + ), + ( + 'div', + paddle.divide, + (np.random.rand(2, 3), np.random.rand(2, 3)), + None, + 'float64', + ), + ( + 'add', + paddle.add, + (np.random.rand(2, 3), np.random.rand(2, 3)), + None, + 'float32', + ), + ( + 'input_not_sequence', + paddle.tanh, + (np.random.rand(5, 5),), + None, + 'float64', + ), + ( + 'input_gradients_not_none', + paddle.matmul, + (np.random.rand(3, 3), np.random.rand(3, 3)), + (np.random.rand(3, 3),), + 'float64', + ), + ('sin', paddle.sin, (np.random.rand(100, 200),), None, 'float32'), + ('rsqrt', paddle.rsqrt, (np.random.rand(100, 200),), None, 'float32'), + ('cos', paddle.cos, (np.random.rand(200, 90),), None, 'float32'), + ('exp', paddle.exp, (np.random.rand(299, 320),), None, 'float32'), # In where op, grad of condition computed by paddle.static.gradients is None, # and paddle.incubate.autograd.grad will replace None with zeros while transpose # will just return None because cond_dot is unused, that is a diff. - ('select', where_wrap, - (np.random.rand(3, 4), np.random.rand(3, 4)), None, 'float32'), + ( + 'select', + where_wrap, + (np.random.rand(3, 4), np.random.rand(3, 4)), + None, + 'float32', + ), # pow_p and pow has diff when compute z_dot of 0^0 - ('pow', paddle.pow, - (np.array([1, 2, 3]), np.array([0, 2, 7])), None, 'float32'), + ( + 'pow', + paddle.pow, + (np.array([1, 2, 3]), np.array([0, 2, 7])), + None, + 'float32', + ), # To make max_p consistent with paddle.maximum, be sure x.grad = 0 and y.grad = 1 when x==y. - ('max', paddle.maximum, ( - np.array([1, 2, 3]), - np.array([2, 2, 2]), - ), None, 'float32'), - ('erf', paddle.erf, (np.random.rand(300, 288), ), None, 'float32'), - ('gelu', paddle.nn.functional.gelu, - (np.random.rand(200, 189), ), None, 'float32'), - ('gelu_approximate', lambda x: paddle.nn.functional.gelu(x, True), - (np.random.rand(200, 189), ), None, 'float32'), - ('sum', paddle.sum, (np.random.rand(200, 345), ), None, 'float32'), - ('sigmoid', paddle.nn.functional.sigmoid, - (np.random.rand(5, ), ), None, 'float32'), - ('sum_with_axis', lambda x: paddle.sum(x, axis=1), - (np.random.rand(200, 345), ), None, 'float32'), - ('sum_with_keepdim', lambda x: paddle.sum(x, keepdim=True), - (np.random.rand(200, 345), ), None, 'float32'), - ('mean', paddle.mean, (np.random.rand(200, 345), ), None, 'float32'), - ('mean_with_axis', lambda x: paddle.mean(x, axis=1), - (np.random.rand(200, 345), ), None, 'float32'), - ('mean_with_keepdim', lambda x: paddle.mean(x, keepdim=True), - (np.random.rand(200, 345), ), None, 'float32'), - ('mean_with_axis_keepdim', - lambda x: paddle.mean(x, axis=0, keepdim=True), - (np.random.rand(200, 345), ), None, 'float32'), - ('abs', paddle.abs, (np.random.uniform(-10, 10, - (200, 345)), ), None, 'float32'), - ('cast_float', lambda x: paddle.cast(x, paddle.float64), - (np.random.rand(10, 20), ), None, 'float32'), - ('cast_int', lambda x: paddle.cast(x, paddle.int32), - (np.random.rand(10, 20), ), None, 'float32'), - ('square', paddle.square, (np.random.rand(100), ), None, 'float32'), - ('pow_scalar', lambda x: paddle.pow(x, 2), - (np.random.rand(20, 30), ), None, 'float32'), - ('var', paddle.var, (np.random.rand(200, 324), ), None, 'float32'), - ('var_with_axis', lambda x: paddle.var(x, axis=1), - (np.random.rand(10, 20, 30), ), None, 'float32'), - ('var_without_unbiased', - lambda x: paddle.var(x, axis=1, unbiased=False), - (np.random.rand(10, 20, 30), ), None, 'float32'), - ('var_with_keepdim', lambda x: paddle.var(x, axis=1, keepdim=True), - (np.random.rand(10, 20, 30), ), None, 'float32'), - ('bn', lambda x, w, b: paddle.nn.functional.batch_norm( - x, paddle.ones((10, )), paddle.ones( - (10, )), w, b), (np.random.rand(10, 10), np.random.rand(10), - np.random.rand(10)), None, 'float32'), - ('bn_train', lambda x, w, b: paddle.nn.functional.batch_norm( - x, paddle.ones((10, )), paddle.ones((10, )), w, b, training=True), - (np.random.rand( - 10, 10), np.random.rand(10), np.random.rand(10)), None, 'float32'), - ('bn_nhwc', lambda x, w, b: paddle.nn.functional.batch_norm( - x, - paddle.ones((10, )) + 1, - paddle.ones((10, )), - w, - b, - training=True, - data_format='NHWC', - ), (np.random.rand( - 10, 10), np.random.rand(10), np.random.rand(10)), None, 'float32'), - ('bn_global_stat', - lambda x, w, b: paddle.nn.functional.batch_norm(x, - paddle.ones( - (10, )) + 3.2, - paddle.ones( - (10, )) + 6.7, - w, - b, - training=True, - data_format='NHWC', - use_global_stats=True), - (np.random.rand( - 10, 10), np.random.rand(10), np.random.rand(10)), None, 'float32'), - )) + ( + 'max', + paddle.maximum, + ( + np.array([1, 2, 3]), + np.array([2, 2, 2]), + ), + None, + 'float32', + ), + ('erf', paddle.erf, (np.random.rand(300, 288),), None, 'float32'), + ( + 'gelu', + paddle.nn.functional.gelu, + (np.random.rand(200, 189),), + None, + 'float32', + ), + ( + 'gelu_approximate', + lambda x: paddle.nn.functional.gelu(x, True), + (np.random.rand(200, 189),), + None, + 'float32', + ), + ('sum', paddle.sum, (np.random.rand(200, 345),), None, 'float32'), + ( + 'sigmoid', + paddle.nn.functional.sigmoid, + ( + np.random.rand( + 5, + ), + ), + None, + 'float32', + ), + ( + 'sum_with_axis', + lambda x: paddle.sum(x, axis=1), + (np.random.rand(200, 345),), + None, + 'float32', + ), + ( + 'sum_with_keepdim', + lambda x: paddle.sum(x, keepdim=True), + (np.random.rand(200, 345),), + None, + 'float32', + ), + ('mean', paddle.mean, (np.random.rand(200, 345),), None, 'float32'), + ( + 'mean_with_axis', + lambda x: paddle.mean(x, axis=1), + (np.random.rand(200, 345),), + None, + 'float32', + ), + ( + 'mean_with_keepdim', + lambda x: paddle.mean(x, keepdim=True), + (np.random.rand(200, 345),), + None, + 'float32', + ), + ( + 'mean_with_axis_keepdim', + lambda x: paddle.mean(x, axis=0, keepdim=True), + (np.random.rand(200, 345),), + None, + 'float32', + ), + ( + 'abs', + paddle.abs, + (np.random.uniform(-10, 10, (200, 345)),), + None, + 'float32', + ), + ( + 'cast_float', + lambda x: paddle.cast(x, paddle.float64), + (np.random.rand(10, 20),), + None, + 'float32', + ), + ( + 'cast_int', + lambda x: paddle.cast(x, paddle.int32), + (np.random.rand(10, 20),), + None, + 'float32', + ), + ('square', paddle.square, (np.random.rand(100),), None, 'float32'), + ( + 'pow_scalar', + lambda x: paddle.pow(x, 2), + (np.random.rand(20, 30),), + None, + 'float32', + ), + ('var', paddle.var, (np.random.rand(200, 324),), None, 'float32'), + ( + 'var_with_axis', + lambda x: paddle.var(x, axis=1), + (np.random.rand(10, 20, 30),), + None, + 'float32', + ), + ( + 'var_without_unbiased', + lambda x: paddle.var(x, axis=1, unbiased=False), + (np.random.rand(10, 20, 30),), + None, + 'float32', + ), + ( + 'var_with_keepdim', + lambda x: paddle.var(x, axis=1, keepdim=True), + (np.random.rand(10, 20, 30),), + None, + 'float32', + ), + ( + 'bn', + lambda x, w, b: paddle.nn.functional.batch_norm( + x, paddle.ones((10,)), paddle.ones((10,)), w, b + ), + (np.random.rand(10, 10), np.random.rand(10), np.random.rand(10)), + None, + 'float32', + ), + ( + 'bn_train', + lambda x, w, b: paddle.nn.functional.batch_norm( + x, paddle.ones((10,)), paddle.ones((10,)), w, b, training=True + ), + (np.random.rand(10, 10), np.random.rand(10), np.random.rand(10)), + None, + 'float32', + ), + ( + 'bn_nhwc', + lambda x, w, b: paddle.nn.functional.batch_norm( + x, + paddle.ones((10,)) + 1, + paddle.ones((10,)), + w, + b, + training=True, + data_format='NHWC', + ), + (np.random.rand(10, 10), np.random.rand(10), np.random.rand(10)), + None, + 'float32', + ), + ( + 'bn_global_stat', + lambda x, w, b: paddle.nn.functional.batch_norm( + x, + paddle.ones((10,)) + 3.2, + paddle.ones((10,)) + 6.7, + w, + b, + training=True, + data_format='NHWC', + use_global_stats=True, + ), + (np.random.rand(10, 10), np.random.rand(10), np.random.rand(10)), + None, + 'float32', + ), + ), +) class TestGrad(unittest.TestCase): - def setUp(self): paddle.enable_static() paddle.incubate.autograd.enable_prim() @@ -491,22 +762,29 @@ class TestGrad(unittest.TestCase): @classmethod def setUpClass(cls): cls.xs = tuple(x.astype(cls.dtype) for x in cls.xs) - cls._rtol = config.TOLERANCE.get(str( - cls.dtype)).get("first_order_grad").get("rtol") - cls._atol = config.TOLERANCE.get(str( - cls.dtype)).get("first_order_grad").get("atol") + cls._rtol = ( + config.TOLERANCE.get(str(cls.dtype)) + .get("first_order_grad") + .get("rtol") + ) + cls._atol = ( + config.TOLERANCE.get(str(cls.dtype)) + .get("first_order_grad") + .get("atol") + ) def test_grad(self): - def expected(): paddle.incubate.autograd.disable_prim() sp = paddle.static.Program() mp = paddle.static.Program() with paddle.static.program_guard(mp, sp): feed, static_xs, static_v = utils.gen_static_data_and_feed( - self.xs, self.v, stop_gradient=False) + self.xs, self.v, stop_gradient=False + ) _, ys_grad = paddle.incubate.autograd.vjp( - self.fun, static_xs, static_v) + self.fun, static_xs, static_v + ) exe = paddle.static.Executor() exe.run(sp) out = exe.run(mp, feed=feed, fetch_list=ys_grad) @@ -519,9 +797,13 @@ class TestGrad(unittest.TestCase): mp = paddle.static.Program() with paddle.static.program_guard(mp, sp): feed, static_xs, static_v = utils.gen_static_data_and_feed( - self.xs, self.v, stop_gradient=False) - ys = self.fun(*static_xs) if isinstance( - static_xs, typing.Sequence) else self.fun(static_xs) + self.xs, self.v, stop_gradient=False + ) + ys = ( + self.fun(*static_xs) + if isinstance(static_xs, typing.Sequence) + else self.fun(static_xs) + ) ys_grad = paddle.incubate.autograd.grad(ys, static_xs, static_v) paddle.incubate.autograd.prim2orig(mp.block(0)) exe = paddle.static.Executor() @@ -540,24 +822,29 @@ class TestGrad(unittest.TestCase): paddle.incubate.autograd.enable_prim() with self.assertRaises(TypeError): paddle.incubate.autograd.grad( - 1, paddle.static.data('inputs', shape=[1])) + 1, paddle.static.data('inputs', shape=[1]) + ) with self.assertRaises(TypeError): paddle.incubate.autograd.grad( - paddle.static.data('targets', shape=[1]), 1) + paddle.static.data('targets', shape=[1]), 1 + ) paddle.incubate.autograd.disable_prim() def test_disable_prim(self): - def expected(): paddle.incubate.autograd.disable_prim() sp = paddle.static.Program() mp = paddle.static.Program() with paddle.static.program_guard(mp, sp): feed, static_xs, static_v = utils.gen_static_data_and_feed( - self.xs, self.v, stop_gradient=False) - ys = self.fun(*static_xs) if isinstance( - static_xs, typing.Sequence) else self.fun(static_xs) + self.xs, self.v, stop_gradient=False + ) + ys = ( + self.fun(*static_xs) + if isinstance(static_xs, typing.Sequence) + else self.fun(static_xs) + ) ys_grad = paddle.incubate.autograd.grad(ys, static_xs, static_v) exe = paddle.static.Executor() exe.run(sp) @@ -571,9 +858,13 @@ class TestGrad(unittest.TestCase): mp = paddle.static.Program() with paddle.static.program_guard(mp, sp): feed, static_xs, static_v = utils.gen_static_data_and_feed( - self.xs, self.v, stop_gradient=False) - ys = self.fun(*static_xs) if isinstance( - static_xs, typing.Sequence) else self.fun(static_xs) + self.xs, self.v, stop_gradient=False + ) + ys = ( + self.fun(*static_xs) + if isinstance(static_xs, typing.Sequence) + else self.fun(static_xs) + ) ys_grad = paddle.static.gradients(ys, static_xs, static_v) exe = paddle.static.Executor() exe.run(sp) @@ -599,7 +890,7 @@ multiply_ag = lambda xs: xs[0] * xs[0] * xs[0] * xs[0] * xs[0] sin_ag = lambda xs: anp.sin(xs[0]) cos_ag = lambda xs: anp.cos(xs[0]) exp_ag = lambda xs: anp.exp(xs[0]) -pow_ag = lambda xs: xs[0]**xs[1] +pow_ag = lambda xs: xs[0] ** xs[1] log_ag = lambda xs: anp.log(xs[0]) erf_ag = lambda xs: ascipy.special.erf(xs[0]) sigmoid_ag = lambda xs: 1.0 / (1 + anp.exp(-xs[0])) @@ -616,28 +907,63 @@ def gelu_ag(x, approximate=False): @utils.place(config.DEVICES) @utils.parameterize( - (utils.TEST_CASE_NAME, 'fun_pd', 'fun_ag', 'xs', 'v', 'dtype'), ( - ('multiply', multiply_pd, multiply_ag, - (np.random.rand(3, 5), ), None, 'float32'), - ('sin', paddle.sin, sin_ag, (np.random.rand(2, 3), ), None, 'float32'), - ('cos', paddle.cos, cos_ag, (np.random.rand(3, 4), ), None, 'float32'), - ('exp', paddle.exp, exp_ag, (np.random.rand(2, 3), ), None, 'float32'), - ('pow', paddle.pow, pow_ag, - (np.random.rand(2, 3), np.random.rand(2, 3)), None, 'float32'), - ('log', paddle.log, log_ag, (np.random.rand(3, 8), ), None, 'float32'), - ('erf', paddle.erf, erf_ag, - (np.random.rand(100, 200), ), None, 'float32'), - ('gelu', paddle.nn.functional.gelu, lambda xs: gelu_ag(xs[0]), - (np.random.rand(10, 20, 30), ), None, 'float32'), - ('gelu_approximate', - lambda x: paddle.nn.functional.gelu(x, approximate=True), - lambda xs: gelu_ag(xs[0], approximate=True), - (np.random.rand(10, 20, 30), ), None, 'float32'), - ('sigmoid', paddle.nn.functional.sigmoid, sigmoid_ag, - (np.random.rand(10, 20), ), None, 'float32'), - )) + (utils.TEST_CASE_NAME, 'fun_pd', 'fun_ag', 'xs', 'v', 'dtype'), + ( + ( + 'multiply', + multiply_pd, + multiply_ag, + (np.random.rand(3, 5),), + None, + 'float32', + ), + ('sin', paddle.sin, sin_ag, (np.random.rand(2, 3),), None, 'float32'), + ('cos', paddle.cos, cos_ag, (np.random.rand(3, 4),), None, 'float32'), + ('exp', paddle.exp, exp_ag, (np.random.rand(2, 3),), None, 'float32'), + ( + 'pow', + paddle.pow, + pow_ag, + (np.random.rand(2, 3), np.random.rand(2, 3)), + None, + 'float32', + ), + ('log', paddle.log, log_ag, (np.random.rand(3, 8),), None, 'float32'), + ( + 'erf', + paddle.erf, + erf_ag, + (np.random.rand(100, 200),), + None, + 'float32', + ), + ( + 'gelu', + paddle.nn.functional.gelu, + lambda xs: gelu_ag(xs[0]), + (np.random.rand(10, 20, 30),), + None, + 'float32', + ), + ( + 'gelu_approximate', + lambda x: paddle.nn.functional.gelu(x, approximate=True), + lambda xs: gelu_ag(xs[0], approximate=True), + (np.random.rand(10, 20, 30),), + None, + 'float32', + ), + ( + 'sigmoid', + paddle.nn.functional.sigmoid, + sigmoid_ag, + (np.random.rand(10, 20),), + None, + 'float32', + ), + ), +) class TestGradWithHigherOrder(unittest.TestCase): - def setUp(self): paddle.enable_static() paddle.incubate.autograd.enable_prim() @@ -649,13 +975,18 @@ class TestGradWithHigherOrder(unittest.TestCase): @classmethod def setUpClass(cls): cls.xs = tuple(x.astype(cls.dtype) for x in cls.xs) - cls._rtol = config.TOLERANCE.get(str( - cls.dtype)).get("first_order_grad").get("rtol") - cls._atol = config.TOLERANCE.get(str( - cls.dtype)).get("first_order_grad").get("atol") + cls._rtol = ( + config.TOLERANCE.get(str(cls.dtype)) + .get("first_order_grad") + .get("rtol") + ) + cls._atol = ( + config.TOLERANCE.get(str(cls.dtype)) + .get("first_order_grad") + .get("atol") + ) def test_grad(self): - def expected(): egrad = autograd.elementwise_grad grad_3 = egrad(egrad(egrad(self.fun_ag)))(self.xs) @@ -671,9 +1002,13 @@ class TestGradWithHigherOrder(unittest.TestCase): startup = paddle.static.Program() with paddle.static.program_guard(main, startup): feed, static_xs, static_v = utils.gen_static_data_and_feed( - self.xs, self.v, stop_gradient=False) - ys = self.fun_pd(*static_xs) if isinstance( - static_xs, typing.Sequence) else self.fun_pd(static_xs) + self.xs, self.v, stop_gradient=False + ) + ys = ( + self.fun_pd(*static_xs) + if isinstance(static_xs, typing.Sequence) + else self.fun_pd(static_xs) + ) grad1 = paddle_grad(ys, static_xs, static_v) grad2 = paddle_grad(grad1, static_xs, static_v) diff --git a/python/paddle/fluid/tests/unittests/autograd/test_primops.py b/python/paddle/fluid/tests/unittests/autograd/test_primops.py index 677c064bb786cd6c0ddad20d0481b5988ec1be05..592229aa3dc2239f84a4982308f93e4388dbdbc7 100644 --- a/python/paddle/fluid/tests/unittests/autograd/test_primops.py +++ b/python/paddle/fluid/tests/unittests/autograd/test_primops.py @@ -27,8 +27,14 @@ paddle.enable_static() @utils.place(config.DEVICES) @utils.parameterize( - (utils.TEST_CASE_NAME, 'op', 'args', 'kwargs', 'expected_shape', - 'expected_dtype'), + ( + utils.TEST_CASE_NAME, + 'op', + 'args', + 'kwargs', + 'expected_shape', + 'expected_dtype', + ), ( ('add', primops.add, (randn(2, 3), randn(2, 3)), {}, (2, 3), 'float64'), ('sub', primops.sub, (randn(2, 3), randn(2, 3)), {}, (2, 3), 'float64'), @@ -43,73 +49,144 @@ paddle.enable_static() ('erf', primops.erf, randn(2, 3), {}, (2, 3), 'float64'), ('abs', primops.abs, randn(2, 3), {}, (2, 3), 'float64'), ('log', primops.log, randn(2, 3), {}, (2, 3), 'float64'), - ('cast', primops.cast, randn(2, 3), { - 'dtype': paddle.int64 - }, (2, 3), 'int64'), - ('reshape', primops.reshape, randn(2, 3), { - 'shape': (3, 2) - }, (3, 2), 'float64'), - ('broadcast', primops.broadcast, randn(2), { - 'shape': (3, 2) - }, (3, 2), 'float64'), - ('transpose', primops.transpose, randn(2, 3), { - 'axis': (1, 0) - }, (3, 2), 'float64'), - ('concat_axis0', primops.concat, ((randn(2, 3), randn(2, 3)), ), { - 'axis': 0 - }, (4, 3), 'float64'), - ('concat_axis1', primops.concat, ((randn(2, 3), randn(2, 3)), ), { - 'axis': 1 - }, (2, 6), 'float64'), - ('reduce_axis1', primops.reduce_sum, randn(2, 3), { - 'axis': (1, ) - }, (2, ), 'float64'), - ('reduce_axis01', primops.reduce_sum, randn(2, 3), { - 'axis': (0, 1) - }, (1, ), 'float64'), - ('split', primops.split, randn(2, 3), { - 'num_or_sections': [1, 2], - 'axis': 1 - }, ((2, 1), (2, 2)), ('float64', 'float64')), - ('matmul', primops.matmul, (randn(2, 3), randn(3, 2)), {}, - (2, 2), 'float64'), - ('slice_select', primops.slice_select, randn(3, 2), { - 'axis': [0], - 'starts': [0], - 'ends': [2], - 'strides': [1] - }, (2, 2), 'float64'), - ('slice_assign', primops.slice_assign, (randn(2, 3), randn(2, 2)), { - 'axis': [1], - 'starts': [1], - 'ends': [3], - 'strides': [1] - }, (2, 3), 'float64'), - ('gather', primops.gather, (randn(3, 2), randint(0, 2, - (5, ), np.int32)), { - 'axis': 0 - }, (5, 2), 'float64'), - ('scatter_add', primops.scatter_add, - (randn(3, 2), randn(5, 2), randint(0, 2, (5, ), np.int32)), { - 'axis': 0 - }, (3, 2), 'float64'), - ('fill_const', primops.fill_const, (), { - 'value': 10, - 'shape': (3, 2), - 'dtype': paddle.float32 - }, (3, 2), 'float32'), + ( + 'cast', + primops.cast, + randn(2, 3), + {'dtype': paddle.int64}, + (2, 3), + 'int64', + ), + ( + 'reshape', + primops.reshape, + randn(2, 3), + {'shape': (3, 2)}, + (3, 2), + 'float64', + ), + ( + 'broadcast', + primops.broadcast, + randn(2), + {'shape': (3, 2)}, + (3, 2), + 'float64', + ), + ( + 'transpose', + primops.transpose, + randn(2, 3), + {'axis': (1, 0)}, + (3, 2), + 'float64', + ), + ( + 'concat_axis0', + primops.concat, + ((randn(2, 3), randn(2, 3)),), + {'axis': 0}, + (4, 3), + 'float64', + ), + ( + 'concat_axis1', + primops.concat, + ((randn(2, 3), randn(2, 3)),), + {'axis': 1}, + (2, 6), + 'float64', + ), + ( + 'reduce_axis1', + primops.reduce_sum, + randn(2, 3), + {'axis': (1,)}, + (2,), + 'float64', + ), + ( + 'reduce_axis01', + primops.reduce_sum, + randn(2, 3), + {'axis': (0, 1)}, + (1,), + 'float64', + ), + ( + 'split', + primops.split, + randn(2, 3), + {'num_or_sections': [1, 2], 'axis': 1}, + ((2, 1), (2, 2)), + ('float64', 'float64'), + ), + ( + 'matmul', + primops.matmul, + (randn(2, 3), randn(3, 2)), + {}, + (2, 2), + 'float64', + ), + ( + 'slice_select', + primops.slice_select, + randn(3, 2), + {'axis': [0], 'starts': [0], 'ends': [2], 'strides': [1]}, + (2, 2), + 'float64', + ), + ( + 'slice_assign', + primops.slice_assign, + (randn(2, 3), randn(2, 2)), + {'axis': [1], 'starts': [1], 'ends': [3], 'strides': [1]}, + (2, 3), + 'float64', + ), + ( + 'gather', + primops.gather, + (randn(3, 2), randint(0, 2, (5,), np.int32)), + {'axis': 0}, + (5, 2), + 'float64', + ), + ( + 'scatter_add', + primops.scatter_add, + (randn(3, 2), randn(5, 2), randint(0, 2, (5,), np.int32)), + {'axis': 0}, + (3, 2), + 'float64', + ), + ( + 'fill_const', + primops.fill_const, + (), + {'value': 10, 'shape': (3, 2), 'dtype': paddle.float32}, + (3, 2), + 'float32', + ), ('neg', primops.neg, randn(2, 3), {}, (2, 3), 'float64'), - ('select', primops.select, - (randn(2, 3) > 0, randn(2, 3), randn(2, 3)), {}, (2, 3), 'float64'), + ( + 'select', + primops.select, + (randn(2, 3) > 0, randn(2, 3), randn(2, 3)), + {}, + (2, 3), + 'float64', + ), ('eq', primops.eq, (randn(2, 3), randn(2, 3)), {}, (2, 3), 'bool'), ('ne', primops.ne, (randn(2, 3), randn(2, 3)), {}, (2, 3), 'bool'), ('gt', primops.gt, (randn(2, 3), randn(2, 3)), {}, (2, 3), 'bool'), ('ge', primops.ge, (randn(2, 3), randn(2, 3)), {}, (2, 3), 'bool'), ('pow', primops.pow, (randn(2, 3), randn(2, 3)), {}, (2, 3), 'float64'), ('max', primops.max, (randn(2, 3), randn(2, 3)), {}, (2, 3), 'float64'), - )) + ), +) class TestPrimops(unittest.TestCase): - @classmethod def setUpClass(cls): paddle.enable_static() @@ -136,15 +213,18 @@ class TestPrimops(unittest.TestCase): """convert numpy ndarray to paddle Variable recursively.""" return [ paddle.static.data(f'x{uuid.uuid4()}', v.shape, v.dtype) - if isinstance(v, np.ndarray) else self.arr2var(v) for v in arr + if isinstance(v, np.ndarray) + else self.arr2var(v) + for v in arr ] def _as_tuple(self, input): if isinstance(input, (tuple, list)) and len(input) == 0: return input if not isinstance(input, (tuple, list)) or all( - isinstance(i, int) for i in input): - return (input, ) + isinstance(i, int) for i in input + ): + return (input,) return input diff --git a/python/paddle/fluid/tests/unittests/autograd/test_transform.py b/python/paddle/fluid/tests/unittests/autograd/test_transform.py index f29b0d9b7164c2c938c77a2ab92ba3626540b311..3498f217bc6678de71886b86a01eddaa30fdfe08 100644 --- a/python/paddle/fluid/tests/unittests/autograd/test_transform.py +++ b/python/paddle/fluid/tests/unittests/autograd/test_transform.py @@ -22,13 +22,13 @@ paddle.enable_static() class TestAutoGradTransformForAdd(unittest.TestCase): - def setUp(self): self.main_program = paddle.static.Program() self.startup_program = paddle.static.Program() - with paddle.static.program_guard(self.main_program, - self.startup_program): + with paddle.static.program_guard( + self.main_program, self.startup_program + ): self.init_data() def init_data(self): @@ -36,13 +36,13 @@ class TestAutoGradTransformForAdd(unittest.TestCase): self.xs_shape_map = {0: (20, 40), 1: (20, 40)} # { output_index: output_shape } self.ys_shape_map = {0: (20, 40)} - X0 = paddle.static.data(name='X0', - shape=self.xs_shape_map[0], - dtype='float32') + X0 = paddle.static.data( + name='X0', shape=self.xs_shape_map[0], dtype='float32' + ) X0.stop_gradient = False - X1 = paddle.static.data(name='X1', - shape=self.xs_shape_map[1], - dtype='float32') + X1 = paddle.static.data( + name='X1', shape=self.xs_shape_map[1], dtype='float32' + ) X1.stop_gradient = False A = paddle.tanh(X0) @@ -94,28 +94,56 @@ class TestAutoGradTransformForAdd(unittest.TestCase): 'fill_constant_p', # transposed op 'mul_p', - 'mul_p' + 'mul_p', ] self.prim2orig_ops_with_blacklist = [ - 'tanh', 'tanh', 'add_p', 'fill_constant', 'fill_constant', - 'fill_constant', 'elementwise_mul', 'sub_p', 'fill_constant', - 'elementwise_mul', 'sub_p', 'fill_constant', 'elementwise_mul', - 'elementwise_mul', 'rsqrt', 'fill_constant', 'elementwise_div', - 'elementwise_div', 'elementwise_mul' + 'tanh', + 'tanh', + 'add_p', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'elementwise_mul', + 'sub_p', + 'fill_constant', + 'elementwise_mul', + 'sub_p', + 'fill_constant', + 'elementwise_mul', + 'elementwise_mul', + 'rsqrt', + 'fill_constant', + 'elementwise_div', + 'elementwise_div', + 'elementwise_mul', ] self.prim2orig_ops = [ - 'tanh', 'tanh', 'elementwise_add', 'fill_constant', 'fill_constant', - 'fill_constant', 'elementwise_mul', 'elementwise_sub', - 'fill_constant', 'elementwise_mul', 'elementwise_sub', - 'fill_constant', 'elementwise_mul', 'elementwise_mul', 'rsqrt', - 'fill_constant', 'elementwise_div', 'elementwise_div', - 'elementwise_mul' + 'tanh', + 'tanh', + 'elementwise_add', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'elementwise_mul', + 'elementwise_sub', + 'fill_constant', + 'elementwise_mul', + 'elementwise_sub', + 'fill_constant', + 'elementwise_mul', + 'elementwise_mul', + 'rsqrt', + 'fill_constant', + 'elementwise_div', + 'elementwise_div', + 'elementwise_mul', ] def test_run(self): # Must using with program_guard(), otherwise prim ops will append other block - with paddle.static.program_guard(self.main_program, - self.startup_program): + with paddle.static.program_guard( + self.main_program, self.startup_program + ): ad = Transform(self.main_program.block(0)) orig_ops = [op.type for op in self.main_program.block(0).ops] self.assertEqual(sorted(orig_ops), sorted(self.orig_ops)) @@ -150,11 +178,13 @@ class TestAutoGradTransformForAdd(unittest.TestCase): self.assertEqual(flatten_ys_bar[k].shape, v) # Test prim2orig with blacklist - prim2orig(block=self.main_program.block(0), - blacklist=['add_p', 'sub_p']) + prim2orig( + block=self.main_program.block(0), blacklist=['add_p', 'sub_p'] + ) prim2orig_ops = [op.type for op in self.main_program.block(0).ops] - self.assertEqual(sorted(prim2orig_ops), - sorted(self.prim2orig_ops_with_blacklist)) + self.assertEqual( + sorted(prim2orig_ops), sorted(self.prim2orig_ops_with_blacklist) + ) # Test prim2orig prim2orig(block=self.main_program.block(0)) @@ -163,19 +193,18 @@ class TestAutoGradTransformForAdd(unittest.TestCase): class TestAutoGradTransformForMatmul(TestAutoGradTransformForAdd): - def init_data(self): # { input_index: input_shape } self.xs_shape_map = {0: (100, 2), 1: (5, 2)} # { output_index: output_shape } self.ys_shape_map = {0: (100, 5)} - X0 = paddle.static.data('X0', - shape=self.xs_shape_map[0], - dtype='float32') + X0 = paddle.static.data( + 'X0', shape=self.xs_shape_map[0], dtype='float32' + ) X0.stop_gradient = False - X1 = paddle.static.data('X1', - shape=self.xs_shape_map[1], - dtype='float32') + X1 = paddle.static.data( + 'X1', shape=self.xs_shape_map[1], dtype='float32' + ) X1.stop_gradient = False A = paddle.reshape(X1, [2, 5]) @@ -189,8 +218,13 @@ class TestAutoGradTransformForMatmul(TestAutoGradTransformForAdd): self.orig_ops = ['reshape2', 'scale', 'matmul_v2'] self.orig2prim_ops = [ - 'reshape_p', 'fill_constant_p', 'fill_constant_p', - 'fill_constant_p', 'mul_p', 'add_p', 'matmul_p' + 'reshape_p', + 'fill_constant_p', + 'fill_constant_p', + 'fill_constant_p', + 'mul_p', + 'add_p', + 'matmul_p', ] self.linearize_ops = self.orig2prim_ops + [ # call fill_const() in linearize() function @@ -204,7 +238,7 @@ class TestAutoGradTransformForMatmul(TestAutoGradTransformForAdd): # 'add_p', 'matmul_p', 'matmul_p', - 'add_p' + 'add_p', ] self.transpose_ops = self.orig2prim_ops + [ # call fill_const() in transpose() function @@ -264,24 +298,23 @@ class TestAutoGradTransformForMatmul(TestAutoGradTransformForAdd): class TestAutoGradTransformForIndexSelect(TestAutoGradTransformForAdd): - def init_data(self): # { input_index: input_shape } - self.xs_shape_map = {0: (7, 8, 9), 1: (8, 1), 2: (7, 8, 9), 3: (3, )} + self.xs_shape_map = {0: (7, 8, 9), 1: (8, 1), 2: (7, 8, 9), 3: (3,)} # { output_index: output_shape } self.ys_shape_map = {0: (3, 16, 9)} - X0 = paddle.static.data('X0', - shape=self.xs_shape_map[0], - dtype='float32') + X0 = paddle.static.data( + 'X0', shape=self.xs_shape_map[0], dtype='float32' + ) X0.stop_gradient = False - X1 = paddle.static.data('X1', - shape=self.xs_shape_map[1], - dtype='float32') + X1 = paddle.static.data( + 'X1', shape=self.xs_shape_map[1], dtype='float32' + ) X1.stop_gradient = False - X2 = paddle.static.data('X2', - shape=self.xs_shape_map[2], - dtype='float32') + X2 = paddle.static.data( + 'X2', shape=self.xs_shape_map[2], dtype='float32' + ) X2.stop_gradient = False X3 = paddle.static.data('X3', shape=self.xs_shape_map[3], dtype='int32') X3.stop_gradient = False @@ -297,12 +330,23 @@ class TestAutoGradTransformForIndexSelect(TestAutoGradTransformForAdd): Y, ] self.orig_ops = [ - 'elementwise_add', 'p_norm', 'elementwise_sub', 'concat', - 'index_select' + 'elementwise_add', + 'p_norm', + 'elementwise_sub', + 'concat', + 'index_select', ] self.orig2prim_ops = [ - 'broadcast_p', 'add_p', 'reshape_p', 'mul_p', 'reduce_sum_p', - 'sqrt_p', 'broadcast_p', 'sub_p', 'concat_p', 'gather_p' + 'broadcast_p', + 'add_p', + 'reshape_p', + 'mul_p', + 'reduce_sum_p', + 'sqrt_p', + 'broadcast_p', + 'sub_p', + 'concat_p', + 'gather_p', ] self.linearize_ops = self.orig2prim_ops + [ # call fill_const() in linearize() function @@ -324,7 +368,7 @@ class TestAutoGradTransformForIndexSelect(TestAutoGradTransformForAdd): 'broadcast_p', 'sub_p', 'concat_p', - 'gather_p' + 'gather_p', ] self.transpose_ops = self.orig2prim_ops + [ # call fill_const() in transpose() function @@ -357,26 +401,81 @@ class TestAutoGradTransformForIndexSelect(TestAutoGradTransformForAdd): ] self.prim2orig_ops_with_blacklist = [ - 'expand_v2', 'add_p', 'reshape2', 'elementwise_mul', 'reduce_sum', - 'sqrt', 'expand_v2', 'sub_p', 'concat', 'gather', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'elementwise_mul', 'reduce_sum', 'reshape2', - 'reshape2', 'elementwise_mul', 'elementwise_mul', 'reshape2', - 'expand_v2', 'elementwise_div', 'reduce_sum', 'reshape2', - 'fill_constant', 'sub_p', 'split', 'fill_constant', 'fill_any_like', - 'add_p', 'scatter', 'elementwise_add', 'add_p' + 'expand_v2', + 'add_p', + 'reshape2', + 'elementwise_mul', + 'reduce_sum', + 'sqrt', + 'expand_v2', + 'sub_p', + 'concat', + 'gather', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'elementwise_mul', + 'reduce_sum', + 'reshape2', + 'reshape2', + 'elementwise_mul', + 'elementwise_mul', + 'reshape2', + 'expand_v2', + 'elementwise_div', + 'reduce_sum', + 'reshape2', + 'fill_constant', + 'sub_p', + 'split', + 'fill_constant', + 'fill_any_like', + 'add_p', + 'scatter', + 'elementwise_add', + 'add_p', ] self.prim2orig_ops = [ - 'expand_v2', 'elementwise_add', 'reshape2', 'elementwise_mul', - 'reduce_sum', 'sqrt', 'expand_v2', 'elementwise_sub', 'concat', - 'gather', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', - 'elementwise_mul', 'reduce_sum', 'reshape2', 'reshape2', - 'elementwise_mul', 'elementwise_mul', 'reshape2', 'expand_v2', - 'elementwise_div', 'reduce_sum', 'reshape2', 'fill_constant', - 'elementwise_sub', 'split', 'fill_constant', 'fill_any_like', - 'elementwise_add', 'scatter', 'elementwise_add', 'elementwise_add' + 'expand_v2', + 'elementwise_add', + 'reshape2', + 'elementwise_mul', + 'reduce_sum', + 'sqrt', + 'expand_v2', + 'elementwise_sub', + 'concat', + 'gather', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'elementwise_mul', + 'reduce_sum', + 'reshape2', + 'reshape2', + 'elementwise_mul', + 'elementwise_mul', + 'reshape2', + 'expand_v2', + 'elementwise_div', + 'reduce_sum', + 'reshape2', + 'fill_constant', + 'elementwise_sub', + 'split', + 'fill_constant', + 'fill_any_like', + 'elementwise_add', + 'scatter', + 'elementwise_add', + 'elementwise_add', ] diff --git a/python/paddle/fluid/tests/unittests/autograd/utils.py b/python/paddle/fluid/tests/unittests/autograd/utils.py index bbbce4eb22f5b9b41d6fa652acd619eba0382e9b..abcc71dd43f1fcb70bbfbf2f57b414ac3a54d4ef 100644 --- a/python/paddle/fluid/tests/unittests/autograd/utils.py +++ b/python/paddle/fluid/tests/unittests/autograd/utils.py @@ -32,20 +32,22 @@ def _product(t): def _get_item(t, idx): assert isinstance( - t, - paddle.fluid.framework.Variable), "The first argument t must be Tensor." - assert isinstance(idx, - int), "The second argument idx must be an int number." + t, paddle.fluid.framework.Variable + ), "The first argument t must be Tensor." + assert isinstance( + idx, int + ), "The second argument idx must be an int number." flat_t = paddle.reshape(t, [-1]) return flat_t.__getitem__(idx) def _set_item(t, idx, value): assert isinstance( - t, - paddle.fluid.framework.Variable), "The first argument t must be Tensor." - assert isinstance(idx, - int), "The second argument idx must be an int number." + t, paddle.fluid.framework.Variable + ), "The first argument t must be Tensor." + assert isinstance( + idx, int + ), "The second argument idx must be an int number." flat_t = paddle.reshape(t, [-1]) flat_t.__setitem__(idx, value) return paddle.reshape(flat_t, t.shape) @@ -60,8 +62,9 @@ def _compute_numerical_jacobian(func, xs, delta, np_dtype): for i in range(fout_size): jac_i = list([] for _ in range(fin_size)) for j in range(fin_size): - jac_i[j] = np.zeros((_product(ys[i].shape), _product(xs[j].shape)), - dtype=np_dtype) + jac_i[j] = np.zeros( + (_product(ys[i].shape), _product(xs[j].shape)), dtype=np_dtype + ) jacobian[i] = jac_i for j in range(fin_size): @@ -81,7 +84,7 @@ def _compute_numerical_jacobian(func, xs, delta, np_dtype): for p in range(_product(ys[i].shape)): y_pos = _get_item(ys_pos[i], p) y_neg = _get_item(ys_neg[i], p) - jacobian[i][j][p][q] = (y_pos - y_neg) / delta / 2. + jacobian[i][j][p][q] = (y_pos - y_neg) / delta / 2.0 return jacobian @@ -94,7 +97,8 @@ def _compute_numerical_hessian(func, xs, delta, np_dtype): hessian_i = list([] for _ in range(fin_size)) for j in range(fin_size): hessian_i[j] = np.zeros( - (_product(xs[i].shape), _product(xs[j].shape)), dtype=np_dtype) + (_product(xs[i].shape), _product(xs[j].shape)), dtype=np_dtype + ) hessian[i] = hessian_i for i in range(fin_size): @@ -105,15 +109,19 @@ def _compute_numerical_hessian(func, xs, delta, np_dtype): x_pos = orig + delta xs[j] = _set_item(xs[j], q, x_pos) jacobian_pos = _compute_numerical_jacobian( - func, xs, delta, np_dtype) + func, xs, delta, np_dtype + ) x_neg = orig - delta xs[j] = _set_item(xs[j], q, x_neg) jacobian_neg = _compute_numerical_jacobian( - func, xs, delta, np_dtype) + func, xs, delta, np_dtype + ) xs[j] = _set_item(xs[j], q, orig) hessian[i][j][p][q] = ( - jacobian_pos[0][i][0][p] - - jacobian_neg[0][i][0][p]) / delta / 2. + (jacobian_pos[0][i][0][p] - jacobian_neg[0][i][0][p]) + / delta + / 2.0 + ) return hessian @@ -125,11 +133,9 @@ def concat_to_matrix(xs, is_batched=False): return np.concatenate(rows, 1) if is_batched else np.concatenate(rows, 0) -def _compute_numerical_batch_jacobian(func, - xs, - delta, - np_dtype, - merge_batch=True): +def _compute_numerical_batch_jacobian( + func, xs, delta, np_dtype, merge_batch=True +): no_batch_jacobian = _compute_numerical_jacobian(func, xs, delta, np_dtype) xs = list(as_tensors(xs)) ys = list(as_tensors(func(*xs))) @@ -192,7 +198,8 @@ def _compute_numerical_batch_hessian(func, xs, delta, np_dtype): mid = len(hessian_res) // 2 for i in range(mid): hessian_result.append( - np.stack((hessian_res[i], hessian_res[mid + i]), axis=0)) + np.stack((hessian_res[i], hessian_res[mid + i]), axis=0) + ) return hessian_result @@ -205,8 +212,9 @@ def _compute_numerical_vjp(func, xs, v, delta, np_dtype): vjp = [np.zeros((_product(x.shape)), dtype=np_dtype) for x in xs] for j in range(len(xs)): for q in range(_product(xs[j].shape)): - vjp[j][q] = np.sum(jacobian[:, j, :, q].reshape(flat_v.shape) * - flat_v) + vjp[j][q] = np.sum( + jacobian[:, j, :, q].reshape(flat_v.shape) * flat_v + ) vjp = [vjp[j].reshape(xs[j].shape) for j in range(len(xs))] return vjp @@ -218,8 +226,9 @@ def _compute_numerical_vhp(func, xs, v, delta, np_dtype): vhp = [np.zeros((_product(x.shape)), dtype=np_dtype) for x in xs] for j in range(len(xs)): for q in range(_product(xs[j].shape)): - vhp[j][q] = np.sum(hessian[:, j, :, q].reshape(flat_v.shape) * - flat_v) + vhp[j][q] = np.sum( + hessian[:, j, :, q].reshape(flat_v.shape) * flat_v + ) vhp = [vhp[j].reshape(xs[j].shape) for j in range(len(xs))] return vhp @@ -256,7 +265,6 @@ def unuse(x, y): def nested(x): - def inner(y): return x * y @@ -286,8 +294,7 @@ def place(devices, key='place'): def decorate(cls): module = sys.modules[cls.__module__].__dict__ raw_classes = { - k: v - for k, v in module.items() if k.startswith(cls.__name__) + k: v for k, v in module.items() if k.startswith(cls.__name__) } for raw_name, raw_cls in raw_classes.items(): @@ -295,7 +302,7 @@ def place(devices, key='place'): test_cls = dict(raw_cls.__dict__) test_cls.update({key: d}) new_name = raw_name + '.' + d.__class__.__name__ - module[new_name] = type(new_name, (raw_cls, ), test_cls) + module[new_name] = type(new_name, (raw_cls,), test_cls) del module[raw_name] return cls @@ -324,10 +331,13 @@ def parameterize(fields, values=None): } test_cls.update(values) name = cls.__name__ + str(i) - name = name + '.' + \ - values.get('suffix') if values.get('suffix') else name + name = ( + name + '.' + values.get('suffix') + if values.get('suffix') + else name + ) - test_cls_module[name] = type(name, (cls, ), test_cls) + test_cls_module[name] = type(name, (cls,), test_cls) for m in list(cls.__dict__): if m.startswith("test"): @@ -375,8 +385,12 @@ def _np_concat_matrix_sequence(src, src_format=MatrixFormat.NM): else: return np.concatenate(xs, axis=1) - supported_format = (MatrixFormat.NBM, MatrixFormat.BNM, MatrixFormat.NMB, - MatrixFormat.NM) + supported_format = ( + MatrixFormat.NBM, + MatrixFormat.BNM, + MatrixFormat.NMB, + MatrixFormat.NM, + ) if src_format not in supported_format: raise ValueError( f"Supported Jacobian format is {supported_format}, but got {src_format}" diff --git a/python/paddle/fluid/tests/unittests/benchmark.py b/python/paddle/fluid/tests/unittests/benchmark.py index 3a27fca61efee6320edc6539f21c663acf7d6776..d6fa01e398cbd2ee8c3ab41544685f8ef6aaf515 100644 --- a/python/paddle/fluid/tests/unittests/benchmark.py +++ b/python/paddle/fluid/tests/unittests/benchmark.py @@ -20,7 +20,6 @@ from op_test import OpTest class BenchmarkSuite(OpTest): - def timeit_function(self, callback, iters, *args, **kwargs): assert iters != 0, "Iters should >= 1" start = time.time() @@ -30,20 +29,23 @@ class BenchmarkSuite(OpTest): return elapse / iters def _assert_cpu_gpu_same(self, cpu_outs, gpu_outs, fetch_list, atol): - for item_cpu_out, item_gpu_out, variable in zip(cpu_outs, gpu_outs, - fetch_list): + for item_cpu_out, item_gpu_out, variable in zip( + cpu_outs, gpu_outs, fetch_list + ): # the cpu version is baseline, expect gpu version keep same with cpu version. expect = item_cpu_out expect_t = np.array(item_cpu_out) actual = item_gpu_out actual_t = np.array(item_gpu_out) var_name = variable if isinstance(variable, str) else variable.name - np.testing.assert_allclose(actual_t, - expect_t, - rtol=1e-05, - atol=atol) - self.assertListEqual(actual.lod(), expect.lod(), - "Output (" + var_name + ") has different lod") + np.testing.assert_allclose( + actual_t, expect_t, rtol=1e-05, atol=atol + ) + self.assertListEqual( + actual.lod(), + expect.lod(), + "Output (" + var_name + ") has different lod", + ) def _get_input_names(self): inputs = [] @@ -83,18 +85,23 @@ class BenchmarkSuite(OpTest): for place in places: elapses.append(self.timeit_output_with_place(place, iters)) for place, elapse in zip(places, elapses): - print("One pass of ({2}_op) at {0} cost {1}".format( - str(place), elapse, self.op_type)) + print( + "One pass of ({2}_op) at {0} cost {1}".format( + str(place), elapse, self.op_type + ) + ) def timeit_grad_with_place(self, place, iters=100): inputs_to_check = self._get_input_names() output_names = self._get_output_names() - return self.timeit_function(self._get_gradient, - iters, - inputs_to_check, - place, - output_names, - no_grad_set=None) + return self.timeit_function( + self._get_gradient, + iters, + inputs_to_check, + place, + output_names, + no_grad_set=None, + ) def timeit_grad(self, iters=100): places = self._get_places() @@ -102,5 +109,8 @@ class BenchmarkSuite(OpTest): for place in places: elapses.append(self.timeit_grad_with_place(place, iters)) for place, elapse in zip(places, elapses): - print("One pass of ({2}_grad_op) at {0} cost {1}".format( - str(place), elapse, self.op_type)) + print( + "One pass of ({2}_grad_op) at {0} cost {1}".format( + str(place), elapse, self.op_type + ) + ) diff --git a/python/paddle/fluid/tests/unittests/benchmark_sum_op.py b/python/paddle/fluid/tests/unittests/benchmark_sum_op.py index d3d909e17e49d9d8c2125fbcd432722a14397750..e8cb02f16cb1ae251f016cc75662d7a5953ff4a2 100644 --- a/python/paddle/fluid/tests/unittests/benchmark_sum_op.py +++ b/python/paddle/fluid/tests/unittests/benchmark_sum_op.py @@ -21,7 +21,6 @@ from benchmark import BenchmarkSuite class TestSumOp(BenchmarkSuite): - def setUp(self): self.op_type = "sum" self.customize_testcase() diff --git a/python/paddle/fluid/tests/unittests/c_embedding_op_base.py b/python/paddle/fluid/tests/unittests/c_embedding_op_base.py index ef3c942d2b09681d66ae25cce84cdf7f1bde1a3e..e9b1ca4d8aa801c8903db81984ca60e759eccc73 100644 --- a/python/paddle/fluid/tests/unittests/c_embedding_op_base.py +++ b/python/paddle/fluid/tests/unittests/c_embedding_op_base.py @@ -32,7 +32,6 @@ def get_c_embedding(start, end, table, ids): class TestCEmbeddingCPU(OpTest): - def setUp(self): self.init_dtype() self.initcase() @@ -46,8 +45,9 @@ class TestCEmbeddingCPU(OpTest): def initcase(self): self.op_type = "c_embedding" table = np.random.random((17, 64)).astype(self.dtype) - ids = np.random.randint(low=0, high=17 * 2, - size=(2, 4)).astype(self.ids_dtype) + ids = np.random.randint(low=0, high=17 * 2, size=(2, 4)).astype( + self.ids_dtype + ) self.start_index = 10 self.end_index = self.start_index + 17 @@ -72,7 +72,6 @@ class TestCEmbeddingCPU(OpTest): class TestCEmbeddingOpBase(TestCEmbeddingCPU): - def setUp(self): self.init_dtype() self.initcase() @@ -106,7 +105,6 @@ class TestCEmbeddingOpBase(TestCEmbeddingCPU): class TestCEmbeddingOpFP32(TestCEmbeddingOpBase): - def setUp(self): self.init_dtype() self.initcase() @@ -114,8 +112,9 @@ class TestCEmbeddingOpFP32(TestCEmbeddingOpBase): def initcase(self): self.op_type = "c_embedding" table = np.random.random((17, 64)).astype(self.dtype) - ids = np.random.randint(low=0, high=17 * 2, - size=(2, 4)).astype(self.ids_dtype) + ids = np.random.randint(low=0, high=17 * 2, size=(2, 4)).astype( + self.ids_dtype + ) self.start_index = 10 ids[0][1] = 12 ids[0][2] = 12 diff --git a/python/paddle/fluid/tests/unittests/check_nan_inf_base.py b/python/paddle/fluid/tests/unittests/check_nan_inf_base.py index 1c4bfd387c42c17987ea1f542b05b6d98ec3e20d..e1319378728ad585e05142f1a9ef04c45996dde5 100644 --- a/python/paddle/fluid/tests/unittests/check_nan_inf_base.py +++ b/python/paddle/fluid/tests/unittests/check_nan_inf_base.py @@ -30,8 +30,9 @@ np.random.seed(0) def generator(): batch_size = 5 for i in range(5): - curr_train_x = np.random.randint(batch_size, - size=(batch_size, 3)).astype("float32") + curr_train_x = np.random.randint( + batch_size, size=(batch_size, 3) + ).astype("float32") if i >= 2: curr_train_x[0, :] = np.nan curr_train_x[-1, :] = np.inf @@ -62,7 +63,8 @@ def net(): hidden = fluid.layers.fc(input=hidden, size=3, act=None) cost, y_predict = fluid.layers.softmax_with_cross_entropy( - hidden, y, return_softmax=True) + hidden, y, return_softmax=True + ) acc_top1 = fluid.layers.accuracy(input=y_predict, label=y, k=1) avg_cost = paddle.mean(cost) @@ -88,14 +90,15 @@ def check(use_cuda): for train_data, y_label in generator(): outs = exe.run( main, - feed={ - 'x': train_data, - 'y': y_label - }, - fetch_list=[y_predict.name, avg_cost.name, acc_top1.name]) + feed={'x': train_data, 'y': y_label}, + fetch_list=[y_predict.name, avg_cost.name, acc_top1.name], + ) step += 1 - print('iter={:.0f},cost={},acc1={}'.format( - step, outs[1][0], outs[2][0])) + print( + 'iter={:.0f},cost={},acc1={}'.format( + step, outs[1][0], outs[2][0] + ) + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/check_nan_inf_base_dygraph.py b/python/paddle/fluid/tests/unittests/check_nan_inf_base_dygraph.py index 9622ee4e9a92e1f0b06b07f9355cd4de77c26218..233cfee3087c5198ca8ad72dee866de8deaaa430 100644 --- a/python/paddle/fluid/tests/unittests/check_nan_inf_base_dygraph.py +++ b/python/paddle/fluid/tests/unittests/check_nan_inf_base_dygraph.py @@ -28,8 +28,9 @@ np.random.seed(0) def generator(): batch_size = 5 for i in range(5): - curr_train_x = np.random.randint(batch_size, - size=(batch_size, 3)).astype("float32") + curr_train_x = np.random.randint( + batch_size, size=(batch_size, 3) + ).astype("float32") if i >= 2: curr_train_x[0, :] = np.nan curr_train_x[-1, :] = np.inf @@ -42,7 +43,6 @@ def generator(): class TestLayer(nn.Layer): - def __init__(self): super(TestLayer, self).__init__() self.linear1 = nn.Linear(3, 400) @@ -82,8 +82,11 @@ def check(use_cuda): acc_top1 = paddle.metric.accuracy(input=y_pred, label=y, k=1) - print('iter={:.0f}, cost={}, acc1={}'.format(step, avg_cost.numpy(), - acc_top1.numpy())) + print( + 'iter={:.0f}, cost={}, acc1={}'.format( + step, avg_cost.numpy(), acc_top1.numpy() + ) + ) sgd.step() sgd.clear_grad() diff --git a/python/paddle/fluid/tests/unittests/collective/collective_allgather_api.py b/python/paddle/fluid/tests/unittests/collective/collective_allgather_api.py index a69e6c59e0edc3de31f3b00d37f00c369c04de4f..94be00887a2e98a1a817baf5dc99ad328bacb22a 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_allgather_api.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_allgather_api.py @@ -24,7 +24,6 @@ paddle.enable_static() class TestCollectiveAllgatherAPI(test_base.TestCollectiveAPIRunnerBase): - def __init__(self): self.global_ring_id = 0 @@ -47,29 +46,30 @@ class TestCollectiveAllgatherAPI(test_base.TestCollectiveAPIRunnerBase): if args['backend'] == 'nccl': device_id = int(os.getenv("FLAGS_selected_gpus", "0")) place = fluid.CUDAPlace( - device_id) #if args.use_gpu else fluid.CPUPlace() + device_id + ) # if args.use_gpu else fluid.CPUPlace() elif args['backend'] == 'bkcl': device_id = int(os.getenv("FLAGS_selected_xpus", "0")) place = fluid.XPUPlace(device_id) else: place = fluid.CPUPlace() - indata = test_base.create_test_data(shape=(10, 1000), - dtype=args["dtype"], - seed=os.getpid()) - assert args[ - 'static_mode'] == 1, "collective_allgather_api only support static mode" - result = self.get_model(train_prog, - startup_prog, - rank, - dtype=args["dtype"]) + indata = test_base.create_test_data( + shape=(10, 1000), dtype=args["dtype"], seed=os.getpid() + ) + assert ( + args['static_mode'] == 1 + ), "collective_allgather_api only support static mode" + result = self.get_model( + train_prog, startup_prog, rank, dtype=args["dtype"] + ) exe = fluid.Executor(place) exe.run(startup_prog) fetch_list = [] for elem in result: fetch_list.append(elem.name) - out = exe.run(train_prog, - feed={'tindata': indata}, - fetch_list=fetch_list) + out = exe.run( + train_prog, feed={'tindata': indata}, fetch_list=fetch_list + ) sys.stdout.buffer.write(pickle.dumps(out)) diff --git a/python/paddle/fluid/tests/unittests/collective/collective_allgather_api_dygraph.py b/python/paddle/fluid/tests/unittests/collective/collective_allgather_api_dygraph.py index 4d5f82e2882203012b8bc28761f152fe07525163..4477b663a5832f58161d9273e33be4ad63b69894 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_allgather_api_dygraph.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_allgather_api_dygraph.py @@ -19,7 +19,6 @@ import test_collective_api_base as test_base class TestCollectiveAllgatherAPI(test_base.TestCollectiveAPIRunnerBase): - def __init__(self): self.global_ring_id = 0 diff --git a/python/paddle/fluid/tests/unittests/collective/collective_allgather_object_api_dygraph.py b/python/paddle/fluid/tests/unittests/collective/collective_allgather_object_api_dygraph.py index 76610a84041352f1605e1f9b02a52750c4ee8871..03771e30d7e670672414fab0836143db37cfe814 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_allgather_object_api_dygraph.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_allgather_object_api_dygraph.py @@ -18,7 +18,6 @@ import test_collective_api_base as test_base class TestCollectiveAllgatherObjectAPI(test_base.TestCollectiveAPIRunnerBase): - def __init__(self): self.global_ring_id = 0 diff --git a/python/paddle/fluid/tests/unittests/collective/collective_allreduce_api.py b/python/paddle/fluid/tests/unittests/collective/collective_allreduce_api.py index 9c86afc5f4b0a4fa05e8980aac17198050376f0c..a28f8b175da21c08268cfc1ec9d6c2617deb0415 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_allreduce_api.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_allreduce_api.py @@ -21,15 +21,14 @@ paddle.enable_static() class TestCollectiveAllreduceAPI(TestCollectiveAPIRunnerBase): - def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank): with fluid.program_guard(main_prog, startup_program): - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype='float32') + tindata = layers.data( + name="tindata", shape=[10, 1000], dtype='float32' + ) paddle.distributed.all_reduce(tindata) return [tindata] diff --git a/python/paddle/fluid/tests/unittests/collective/collective_allreduce_api_dygraph.py b/python/paddle/fluid/tests/unittests/collective/collective_allreduce_api_dygraph.py index 9bdbaa18177e1ce7fca86daed4975be15aa19322..30cb1e5a377fa08a3f25968a8bb587b9a0720aad 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_allreduce_api_dygraph.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_allreduce_api_dygraph.py @@ -19,7 +19,6 @@ import test_collective_api_base as test_base class TestCollectiveAllreduceAPI(test_base.TestCollectiveAPIRunnerBase): - def __init__(self): self.global_ring_id = 0 diff --git a/python/paddle/fluid/tests/unittests/collective/collective_allreduce_new_group_api.py b/python/paddle/fluid/tests/unittests/collective/collective_allreduce_new_group_api.py index 5eb983c890583cd62ebeda678555a58406b164e8..b3258e858a20bdee5d6b36e663db05da8e41c04e 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_allreduce_new_group_api.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_allreduce_new_group_api.py @@ -21,15 +21,14 @@ paddle.enable_static() class TestCollectiveAllreduceNewGroupAPI(TestCollectiveAPIRunnerBase): - def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank): with fluid.program_guard(main_prog, startup_program): - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype='float32') + tindata = layers.data( + name="tindata", shape=[10, 1000], dtype='float32' + ) gp = paddle.distributed.new_group([0, 1]) paddle.distributed.all_reduce(tindata, group=gp, sync_op=True) return [tindata] diff --git a/python/paddle/fluid/tests/unittests/collective/collective_allreduce_op.py b/python/paddle/fluid/tests/unittests/collective/collective_allreduce_op.py index a88b73ce04aacf533ad0a16b55fdb5faf82ae003..69072d40aa8923634352bd8bac494ee688597331 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_allreduce_op.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_allreduce_op.py @@ -22,30 +22,34 @@ paddle.enable_static() class TestCollectiveAllreduce(TestCollectiveRunnerBase): - def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program): ring_id = 0 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype='float32') + tindata = layers.data( + name="tindata", shape=[10, 1000], dtype='float32' + ) toutdata = main_prog.current_block().create_var( name="outofallreduce", dtype='float32', type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=False) - main_prog.global_block().append_op(type="c_allreduce_sum", - inputs={'X': tindata}, - attrs={'ring_id': ring_id}, - outputs={'Out': toutdata}) - main_prog.global_block().append_op(type="c_sync_comm_stream", - inputs={'X': toutdata}, - outputs={'Out': toutdata}, - attrs={'ring_id': ring_id}) + stop_gradient=False, + ) + main_prog.global_block().append_op( + type="c_allreduce_sum", + inputs={'X': tindata}, + attrs={'ring_id': ring_id}, + outputs={'Out': toutdata}, + ) + main_prog.global_block().append_op( + type="c_sync_comm_stream", + inputs={'X': toutdata}, + outputs={'Out': toutdata}, + attrs={'ring_id': ring_id}, + ) return toutdata diff --git a/python/paddle/fluid/tests/unittests/collective/collective_allreduce_op_wait.py b/python/paddle/fluid/tests/unittests/collective/collective_allreduce_op_wait.py index 10b867df69ac9c7eedc79a0fef136151df638274..c700d49415bf3b317f5b2c31bf831de933a6c1c4 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_allreduce_op_wait.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_allreduce_op_wait.py @@ -22,22 +22,22 @@ paddle.enable_static() class TestCollectiveAllreduce(TestCollectiveRunnerBase): - def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program): ring_id = 0 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype='float32') + tindata = layers.data( + name="tindata", shape=[10, 1000], dtype='float32' + ) toutdata = main_prog.current_block().create_var( name="outofallreduce", dtype='float32', type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=False) + stop_gradient=False, + ) # tout = tin + tin - tin = tin if True: @@ -58,21 +58,27 @@ class TestCollectiveAllreduce(TestCollectiveRunnerBase): outputs={'Out': toutdata}, ) - main_prog.global_block().append_op(type='c_wait_compute', - inputs={'X': toutdata}, - outputs={'Out': toutdata}, - attrs={'ring_id': ring_id}) + main_prog.global_block().append_op( + type='c_wait_compute', + inputs={'X': toutdata}, + outputs={'Out': toutdata}, + attrs={'ring_id': ring_id}, + ) - main_prog.global_block().append_op(type="c_allreduce_sum", - inputs={'X': toutdata}, - attrs={'ring_id': ring_id}, - outputs={'Out': toutdata}, - attr={'use_calc_stream': False}) + main_prog.global_block().append_op( + type="c_allreduce_sum", + inputs={'X': toutdata}, + attrs={'ring_id': ring_id}, + outputs={'Out': toutdata}, + attr={'use_calc_stream': False}, + ) - main_prog.global_block().append_op(type="c_wait_comm", - inputs={'X': toutdata}, - outputs={'Out': toutdata}, - attrs={'ring_id': ring_id}) + main_prog.global_block().append_op( + type="c_wait_comm", + inputs={'X': toutdata}, + outputs={'Out': toutdata}, + attrs={'ring_id': ring_id}, + ) # tout = tin + tout - tin = tout if True: diff --git a/python/paddle/fluid/tests/unittests/collective/collective_alltoall_api.py b/python/paddle/fluid/tests/unittests/collective/collective_alltoall_api.py index 57fbc69429a9ab2c45d44909adecee6a2c049df5..e3f23f0cd01181e18d719572db5d8c3f5f3eebb4 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_alltoall_api.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_alltoall_api.py @@ -21,15 +21,14 @@ paddle.enable_static() class TestCollectiveAllToAllAPI(TestCollectiveAPIRunnerBase): - def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank): with fluid.program_guard(main_prog, startup_program): - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype='float32') + tindata = layers.data( + name="tindata", shape=[10, 1000], dtype='float32' + ) tindata = paddle.split(tindata, 2, axis=0) tout_data = [] paddle.distributed.alltoall(tindata, tout_data) diff --git a/python/paddle/fluid/tests/unittests/collective/collective_alltoall_api_dygraph.py b/python/paddle/fluid/tests/unittests/collective/collective_alltoall_api_dygraph.py index eb19cadb11426ea0ac24b2365a87874da6251ba1..560fa8162c5112597d87a5b429b9f23c8248d0db 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_alltoall_api_dygraph.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_alltoall_api_dygraph.py @@ -19,7 +19,6 @@ import test_collective_api_base as test_base class TestCollectiveAllToAllAPI(test_base.TestCollectiveAPIRunnerBase): - def __init__(self): self.global_ring_id = 0 diff --git a/python/paddle/fluid/tests/unittests/collective/collective_alltoall_single.py b/python/paddle/fluid/tests/unittests/collective/collective_alltoall_single.py index e0e0fc74d3767079bd1c4b8323a350742c85f80c..0c9a1253393df3372fb01f52cba779dc4722d092 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_alltoall_single.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_alltoall_single.py @@ -20,13 +20,14 @@ import paddle.distributed as dist class TestCollectiveAllToAllSingle(unittest.TestCase): - def setUp(self): - assert not paddle.distributed.is_initialized(), \ - "The distributed environment has not been initialized." + assert ( + not paddle.distributed.is_initialized() + ), "The distributed environment has not been initialized." dist.init_parallel_env() - assert paddle.distributed.is_initialized(), \ - "The distributed environment has been initialized." + assert ( + paddle.distributed.is_initialized() + ), "The distributed environment has been initialized." paddle.fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) @@ -38,7 +39,8 @@ class TestCollectiveAllToAllSingle(unittest.TestCase): input = paddle.ones([size, size], dtype='int64') * rank output = paddle.empty([size, size], dtype='int64') expected_output = paddle.concat( - [paddle.ones([1, size], dtype='int64') * i for i in range(size)]) + [paddle.ones([1, size], dtype='int64') * i for i in range(size)] + ) group = dist.new_group([0, 1]) dist.alltoall_single(input, output, group=group) @@ -52,18 +54,22 @@ class TestCollectiveAllToAllSingle(unittest.TestCase): input = paddle.ones([sum(in_split_sizes), size], dtype='float32') * rank output = paddle.empty([(rank + 1) * size, size], dtype='float32') - expected_output = paddle.concat([ - paddle.ones([rank + 1, size], dtype='float32') * i - for i in range(size) - ]) + expected_output = paddle.concat( + [ + paddle.ones([rank + 1, size], dtype='float32') * i + for i in range(size) + ] + ) group = dist.new_group([0, 1]) - task = dist.alltoall_single(input, - output, - in_split_sizes, - out_split_sizes, - sync_op=False, - group=group) + task = dist.alltoall_single( + input, + output, + in_split_sizes, + out_split_sizes, + sync_op=False, + group=group, + ) task.wait() np.testing.assert_allclose(output.numpy(), expected_output.numpy()) @@ -71,8 +77,9 @@ class TestCollectiveAllToAllSingle(unittest.TestCase): def tearDown(self): dist.destroy_process_group() - assert not paddle.distributed.is_initialized(), \ - "The distributed environment has been deinitialized." + assert ( + not paddle.distributed.is_initialized() + ), "The distributed environment has been deinitialized." if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/collective/collective_alltoall_single_api_dygraph.py b/python/paddle/fluid/tests/unittests/collective/collective_alltoall_single_api_dygraph.py index f66b3a74bfd2129006a61112b9fc1ee4758029ae..0b5bf5ddde59d89593dd901a46ac2a8ce0dd7d4c 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_alltoall_single_api_dygraph.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_alltoall_single_api_dygraph.py @@ -19,7 +19,6 @@ import test_collective_api_base as test_base class TestCollectiveAllToAllSingleAPI(test_base.TestCollectiveAPIRunnerBase): - def __init__(self): self.global_ring_id = 0 diff --git a/python/paddle/fluid/tests/unittests/collective/collective_barrier_api.py b/python/paddle/fluid/tests/unittests/collective/collective_barrier_api.py index 43fcca0155b3ff9ae66eeb53d001d204a00dbfa1..95054f678a398d45b311fa67427b23315ef9a585 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_barrier_api.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_barrier_api.py @@ -20,7 +20,6 @@ paddle.enable_static() class TestCollectiveBarrierAPI(TestCollectiveAPIRunnerBase): - def __init__(self): self.global_ring_id = 0 diff --git a/python/paddle/fluid/tests/unittests/collective/collective_batch_isend_irecv.py b/python/paddle/fluid/tests/unittests/collective/collective_batch_isend_irecv.py index 2226d302291ca3a57592fbd513d07dc7f938ac54..6757e61fd0712e0a4ac01c8111d4036bb99149b1 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_batch_isend_irecv.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_batch_isend_irecv.py @@ -20,7 +20,6 @@ import paddle.distributed as dist class TestCollectiveBatchIsendIrecv(unittest.TestCase): - def setUp(self): dist.init_parallel_env() paddle.fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) @@ -33,8 +32,9 @@ class TestCollectiveBatchIsendIrecv(unittest.TestCase): # paddle.tensor([1, 2]) # Rank-1 recv_t = paddle.empty(shape=[2], dtype=send_t.dtype) send_op = dist.P2POp(dist.isend, send_t, (rank + 1) % world_size) - recv_op = dist.P2POp(dist.irecv, recv_t, - (rank - 1 + world_size) % world_size) + recv_op = dist.P2POp( + dist.irecv, recv_t, (rank - 1 + world_size) % world_size + ) tasks = dist.batch_isend_irecv([send_op, recv_op]) for task in tasks: diff --git a/python/paddle/fluid/tests/unittests/collective/collective_broadcast_api.py b/python/paddle/fluid/tests/unittests/collective/collective_broadcast_api.py index 0a14a53660955098e6e20e11b71daa1c5dfbe9d8..d7f07e440d8f23aa4f9946986ea52c63d93c99fe 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_broadcast_api.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_broadcast_api.py @@ -21,15 +21,14 @@ paddle.enable_static() class TestCollectiveBroadcastAPI(TestCollectiveAPIRunnerBase): - def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank): with fluid.program_guard(main_prog, startup_program): - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype='float32') + tindata = layers.data( + name="tindata", shape=[10, 1000], dtype='float32' + ) paddle.distributed.broadcast(tindata, src=1) return [tindata] diff --git a/python/paddle/fluid/tests/unittests/collective/collective_broadcast_api_dygraph.py b/python/paddle/fluid/tests/unittests/collective/collective_broadcast_api_dygraph.py index 9004d27d56183c7d4f5bd04f5fca8df75b894134..82bc998a249670b7315e64bbe0bac491e5ee32b2 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_broadcast_api_dygraph.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_broadcast_api_dygraph.py @@ -19,7 +19,6 @@ import test_collective_api_base as test_base class TestCollectiveBroadcastAPI(test_base.TestCollectiveAPIRunnerBase): - def __init__(self): self.global_ring_id = 0 diff --git a/python/paddle/fluid/tests/unittests/collective/collective_broadcast_op.py b/python/paddle/fluid/tests/unittests/collective/collective_broadcast_op.py index ff99792c9428af6a1c1488d084fec091beb32461..902e49b2648fdcd1662c8d01b620f8ff478dac73 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_broadcast_op.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_broadcast_op.py @@ -22,7 +22,6 @@ paddle.enable_static() class TestCollectiveBroadcast(TestCollectiveRunnerBase): - def __init__(self): self.global_ring_id = 0 @@ -30,26 +29,28 @@ class TestCollectiveBroadcast(TestCollectiveRunnerBase): ring_id = 0 rootid = 1 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype='float32') + tindata = layers.data( + name="tindata", shape=[10, 1000], dtype='float32' + ) toutdata = main_prog.current_block().create_var( name="outofbroadcast", dtype='float32', type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=False) - main_prog.global_block().append_op(type="c_broadcast", - inputs={'X': tindata}, - attrs={ - 'ring_id': ring_id, - 'root': rootid - }, - outputs={'Out': toutdata}) - main_prog.global_block().append_op(type="c_sync_comm_stream", - inputs={'X': toutdata}, - outputs={'Out': toutdata}, - attrs={'ring_id': ring_id}) + stop_gradient=False, + ) + main_prog.global_block().append_op( + type="c_broadcast", + inputs={'X': tindata}, + attrs={'ring_id': ring_id, 'root': rootid}, + outputs={'Out': toutdata}, + ) + main_prog.global_block().append_op( + type="c_sync_comm_stream", + inputs={'X': toutdata}, + outputs={'Out': toutdata}, + attrs={'ring_id': ring_id}, + ) return toutdata diff --git a/python/paddle/fluid/tests/unittests/collective/collective_concat_op.py b/python/paddle/fluid/tests/unittests/collective/collective_concat_op.py index 34d49aeb284d3b3909059cb2834f9731f9c0d6c6..533357fc44d7f474419fb0472e9ab8faa4bdb363 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_concat_op.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_concat_op.py @@ -22,7 +22,6 @@ paddle.enable_static() class TestCollectiveConcat(TestCollectiveRunnerBase): - def __init__(self): self.global_ring_id = 0 @@ -30,23 +29,22 @@ class TestCollectiveConcat(TestCollectiveRunnerBase): ring_id = 0 nranks = 2 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype='float32') + tindata = layers.data( + name="tindata", shape=[10, 1000], dtype='float32' + ) toutdata = main_prog.current_block().create_var( name="outofconcat", dtype='float32', type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=False) - main_prog.global_block().append_op(type="c_concat", - inputs={'X': tindata}, - attrs={ - 'ring_id': ring_id, - 'rank': self.rank, - 'nranks': nranks - }, - outputs={'Out': toutdata}) + stop_gradient=False, + ) + main_prog.global_block().append_op( + type="c_concat", + inputs={'X': tindata}, + attrs={'ring_id': ring_id, 'rank': self.rank, 'nranks': nranks}, + outputs={'Out': toutdata}, + ) return toutdata diff --git a/python/paddle/fluid/tests/unittests/collective/collective_global_gather.py b/python/paddle/fluid/tests/unittests/collective/collective_global_gather.py index 4403e06681b644ebac348fef8dc0cccacadf8d71..6aa268b50a3509a8849741cdfb8d7ecdff023236 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_global_gather.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_global_gather.py @@ -26,7 +26,6 @@ paddle.enable_static() class TestCollectiveGlobalGatherAPI(TestCollectiveAPIRunnerBase): - def __init__(self): self.global_ring_id = 0 @@ -38,19 +37,19 @@ class TestCollectiveGlobalGatherAPI(TestCollectiveAPIRunnerBase): n_expert = 2 world_size = 2 tot_expert = n_expert * world_size - local_input_buf = paddle.static.data(name="local_input_buf", - shape=[-1, in_feat], - dtype="float32") - local_expert_count = paddle.static.data(name="local_expert_count", - shape=[tot_expert], - dtype="int64") - global_expert_count = paddle.static.data(name="global_expert_count", - shape=[tot_expert], - dtype="int64") + local_input_buf = paddle.static.data( + name="local_input_buf", shape=[-1, in_feat], dtype="float32" + ) + local_expert_count = paddle.static.data( + name="local_expert_count", shape=[tot_expert], dtype="int64" + ) + global_expert_count = paddle.static.data( + name="global_expert_count", shape=[tot_expert], dtype="int64" + ) - output = moe_utils.global_gather(local_input_buf, - local_expert_count, - global_expert_count) + output = moe_utils.global_gather( + local_input_buf, local_expert_count, global_expert_count + ) return [output] @@ -65,7 +64,8 @@ class TestCollectiveGlobalGatherAPI(TestCollectiveAPIRunnerBase): if args['backend'] == 'nccl': device_id = int(os.getenv("FLAGS_selected_gpus", "0")) place = fluid.CUDAPlace( - device_id) #if args.use_gpu else fluid.CPUPlace() + device_id + ) # if args.use_gpu else fluid.CPUPlace() elif args['backend'] == 'bkcl': device_id = int(os.getenv("FLAGS_selected_xpus", "0")) place = fluid.XPUPlace(device_id) @@ -81,19 +81,22 @@ class TestCollectiveGlobalGatherAPI(TestCollectiveAPIRunnerBase): # Call paddle.distributed.alltoall() under legacy dygraph _enable_legacy_dygraph() np.random.seed(os.getpid()) - local_expert_count = np.random.randint(1, 4, - size=tot_expert).astype("int64") + local_expert_count = np.random.randint(1, 4, size=tot_expert).astype( + "int64" + ) local_expert_count = paddle.to_tensor(local_expert_count) global_expert_count = [] - paddle.distributed.alltoall(paddle.split(local_expert_count, 2, axis=0), - global_expert_count) + paddle.distributed.alltoall( + paddle.split(local_expert_count, 2, axis=0), global_expert_count + ) global_expert_count = paddle.concat(global_expert_count, axis=0) global_expert_count = global_expert_count.numpy() local_expert_count = local_expert_count.numpy() fwd_expert_count = sum(global_expert_count) np.random.seed(os.getpid()) - local_input_buf = np.random.rand(fwd_expert_count, - in_feat).astype("float32") + local_input_buf = np.random.rand(fwd_expert_count, in_feat).astype( + "float32" + ) paddle.enable_static() if args['static_mode']: @@ -103,13 +106,15 @@ class TestCollectiveGlobalGatherAPI(TestCollectiveAPIRunnerBase): fetch_list = [] for elem in result: fetch_list.append(elem.name) - out = exe.run(train_prog, - feed={ - 'local_expert_count': local_expert_count, - 'global_expert_count': global_expert_count, - 'local_input_buf': local_input_buf - }, - fetch_list=fetch_list) + out = exe.run( + train_prog, + feed={ + 'local_expert_count': local_expert_count, + 'global_expert_count': global_expert_count, + 'local_input_buf': local_input_buf, + }, + fetch_list=fetch_list, + ) sys.stdout.buffer.write(pickle.dumps(out)) diff --git a/python/paddle/fluid/tests/unittests/collective/collective_global_gather_dygraph.py b/python/paddle/fluid/tests/unittests/collective/collective_global_gather_dygraph.py index acba43268f2d4d85e9cd28bae38a92de2b7e8c19..fb8d831f0450d94e9bdd18a5fd493b4621e3b4ce 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_global_gather_dygraph.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_global_gather_dygraph.py @@ -21,7 +21,6 @@ import paddle.distributed.utils.moe_utils as moe_utils class TestCollectiveGlobalGatherAPI(TestCollectiveAPIRunnerBase): - def __init__(self): self.global_ring_id = 0 @@ -34,22 +33,24 @@ class TestCollectiveGlobalGatherAPI(TestCollectiveAPIRunnerBase): world_size = 2 tot_expert = n_expert * world_size local_expert_count = np.random.randint( - 1, 4, size=tot_expert).astype("int") + 1, 4, size=tot_expert + ).astype("int") local_expert_count = paddle.to_tensor(local_expert_count) global_expert_count = [] paddle.distributed.alltoall( - paddle.split(local_expert_count, 2, axis=0), - global_expert_count) + paddle.split(local_expert_count, 2, axis=0), global_expert_count + ) global_expert_count = paddle.concat(global_expert_count, axis=0) fwd_expert_count = sum(global_expert_count) np.random.seed(seed) - local_input_buf = np.random.rand(fwd_expert_count, - in_feat).astype("float32") + local_input_buf = np.random.rand(fwd_expert_count, in_feat).astype( + "float32" + ) local_input_buf = paddle.to_tensor(local_input_buf) local_input_buf.stop_gradient = False - output = moe_utils.global_gather(local_input_buf, - local_expert_count, - global_expert_count) + output = moe_utils.global_gather( + local_input_buf, local_expert_count, global_expert_count + ) output.stop_gradient = False c = output * output c.stop_gradient = False diff --git a/python/paddle/fluid/tests/unittests/collective/collective_global_scatter.py b/python/paddle/fluid/tests/unittests/collective/collective_global_scatter.py index a7103d3c641866e9464ba662d0a503244776796a..7fb0dc6ff717ac978ed03acc47fa431d6083a4e5 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_global_scatter.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_global_scatter.py @@ -25,7 +25,6 @@ paddle.enable_static() class TestCollectiveGlobalScatterAPI(TestCollectiveAPIRunnerBase): - def __init__(self): self.global_ring_id = 0 @@ -37,20 +36,20 @@ class TestCollectiveGlobalScatterAPI(TestCollectiveAPIRunnerBase): n_expert = 2 world_size = 2 tot_expert = n_expert * world_size - local_input_buf = paddle.static.data(name="local_input_buf", - shape=[-1, in_feat], - dtype="float32") - local_expert_count = paddle.static.data(name="local_expert_count", - shape=[tot_expert], - dtype="int64") + local_input_buf = paddle.static.data( + name="local_input_buf", shape=[-1, in_feat], dtype="float32" + ) + local_expert_count = paddle.static.data( + name="local_expert_count", shape=[tot_expert], dtype="int64" + ) global_expert_count = [] paddle.distributed.alltoall( - paddle.split(local_expert_count, 2, axis=0), - global_expert_count) + paddle.split(local_expert_count, 2, axis=0), global_expert_count + ) global_expert_count = paddle.concat(global_expert_count, axis=0) - output = moe_utils.global_scatter(local_input_buf, - local_expert_count, - global_expert_count) + output = moe_utils.global_scatter( + local_input_buf, local_expert_count, global_expert_count + ) return [output] def run_trainer(self, args): @@ -64,7 +63,8 @@ class TestCollectiveGlobalScatterAPI(TestCollectiveAPIRunnerBase): if args['backend'] == 'nccl': device_id = int(os.getenv("FLAGS_selected_gpus", "0")) place = fluid.CUDAPlace( - device_id) #if args.use_gpu else fluid.CPUPlace() + device_id + ) # if args.use_gpu else fluid.CPUPlace() elif args['backend'] == 'bkcl': device_id = int(os.getenv("FLAGS_selected_xpus", "0")) place = fluid.XPUPlace(device_id) @@ -75,11 +75,13 @@ class TestCollectiveGlobalScatterAPI(TestCollectiveAPIRunnerBase): n_expert = 2 world_size = 2 tot_expert = n_expert * world_size - local_expert_count = np.random.randint(1, 4, - size=tot_expert).astype("int64") + local_expert_count = np.random.randint(1, 4, size=tot_expert).astype( + "int64" + ) fwd_expert_count = sum(local_expert_count) - local_input_buf = np.random.rand(fwd_expert_count, - in_feat).astype("float32") + local_input_buf = np.random.rand(fwd_expert_count, in_feat).astype( + "float32" + ) if args['static_mode']: result = self.get_model(train_prog, startup_prog, rank) exe = fluid.Executor(place) @@ -87,12 +89,14 @@ class TestCollectiveGlobalScatterAPI(TestCollectiveAPIRunnerBase): fetch_list = [] for elem in result: fetch_list.append(elem.name) - out = exe.run(train_prog, - feed={ - 'local_expert_count': local_expert_count, - 'local_input_buf': local_input_buf - }, - fetch_list=fetch_list) + out = exe.run( + train_prog, + feed={ + 'local_expert_count': local_expert_count, + 'local_input_buf': local_input_buf, + }, + fetch_list=fetch_list, + ) sys.stdout.buffer.write(pickle.dumps(out)) diff --git a/python/paddle/fluid/tests/unittests/collective/collective_global_scatter_dygraph.py b/python/paddle/fluid/tests/unittests/collective/collective_global_scatter_dygraph.py index b90905d5d397e707ae1df459ed5b06c8366e5398..324f079ee8cccf89f9b584b61d2b64fa1f5415c1 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_global_scatter_dygraph.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_global_scatter_dygraph.py @@ -21,7 +21,6 @@ import paddle.distributed.utils.moe_utils as moe_utils class TestCollectiveGlobalScatterAPI(TestCollectiveAPIRunnerBase): - def __init__(self): self.global_ring_id = 0 @@ -34,21 +33,23 @@ class TestCollectiveGlobalScatterAPI(TestCollectiveAPIRunnerBase): world_size = 2 tot_expert = n_expert * world_size local_expert_count = np.random.randint( - 1, 4, size=tot_expert).astype("int") + 1, 4, size=tot_expert + ).astype("int") fwd_expert_count = sum(local_expert_count) - local_input_buf = np.random.rand(fwd_expert_count, - in_feat).astype("float32") + local_input_buf = np.random.rand(fwd_expert_count, in_feat).astype( + "float32" + ) local_expert_count = paddle.to_tensor(local_expert_count) local_input_buf = paddle.to_tensor(local_input_buf) global_expert_count = [] paddle.distributed.alltoall( - paddle.split(local_expert_count, 2, axis=0), - global_expert_count) + paddle.split(local_expert_count, 2, axis=0), global_expert_count + ) global_expert_count = paddle.concat(global_expert_count, axis=0) local_input_buf.stop_gradient = False - output = moe_utils.global_scatter(local_input_buf, - local_expert_count, - global_expert_count) + output = moe_utils.global_scatter( + local_input_buf, local_expert_count, global_expert_count + ) output.stop_gradient = False c = output * output c.backward() diff --git a/python/paddle/fluid/tests/unittests/collective/collective_identity_op.py b/python/paddle/fluid/tests/unittests/collective/collective_identity_op.py index 0486ea616421a09ad6aa825e9000f342825ef795..8fc2449f4f2325d2625377d5a5b8688acae76d49 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_identity_op.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_identity_op.py @@ -22,7 +22,6 @@ paddle.enable_static() class TestCollectiveIdentity(TestCollectiveRunnerBase): - def __init__(self): self.global_ring_id = 0 @@ -30,22 +29,22 @@ class TestCollectiveIdentity(TestCollectiveRunnerBase): ring_id = 0 nranks = 2 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype='float32') + tindata = layers.data( + name="tindata", shape=[10, 1000], dtype='float32' + ) toutdata = main_prog.current_block().create_var( name="outofgather", dtype='float32', type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=False) - main_prog.global_block().append_op(type="c_identity", - inputs={'X': tindata}, - outputs={'Out': toutdata}, - attrs={ - 'ring_id': ring_id, - 'nranks': nranks - }) + stop_gradient=False, + ) + main_prog.global_block().append_op( + type="c_identity", + inputs={'X': tindata}, + outputs={'Out': toutdata}, + attrs={'ring_id': ring_id, 'nranks': nranks}, + ) return toutdata diff --git a/python/paddle/fluid/tests/unittests/collective/collective_isend_irecv_api_dygraph.py b/python/paddle/fluid/tests/unittests/collective/collective_isend_irecv_api_dygraph.py index 37a38b218c5dc13f5b53c7838290131dab62b575..d76e48f5ba5bcec69ebcd33cf8c75c28d1c85af6 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_isend_irecv_api_dygraph.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_isend_irecv_api_dygraph.py @@ -19,7 +19,6 @@ import test_collective_api_base as test_base class TestCollectiveIsendIrecvAPI(test_base.TestCollectiveAPIRunnerBase): - def __init__(self): self.global_ring_id = 0 diff --git a/python/paddle/fluid/tests/unittests/collective/collective_reduce_api.py b/python/paddle/fluid/tests/unittests/collective/collective_reduce_api.py index 83f4e3458624ae9ba03fb2520ca01ad190e60985..81e6d7daa2770b29a5b0c18c456e0d8ff2ddd2a9 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_reduce_api.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_reduce_api.py @@ -21,15 +21,14 @@ paddle.enable_static() class TestCollectiveReduceAPI(TestCollectiveAPIRunnerBase): - def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank): with fluid.program_guard(main_prog, startup_program): - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype='float32') + tindata = layers.data( + name="tindata", shape=[10, 1000], dtype='float32' + ) paddle.distributed.reduce(tindata, dst=0) return [tindata] diff --git a/python/paddle/fluid/tests/unittests/collective/collective_reduce_api_dygraph.py b/python/paddle/fluid/tests/unittests/collective/collective_reduce_api_dygraph.py index 5e9dfc8265ea1f997dba4399ce108e7d6b772311..5486e317d897ac440217d3d54004c6ee9b173066 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_reduce_api_dygraph.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_reduce_api_dygraph.py @@ -19,7 +19,6 @@ import test_collective_api_base as test_base class TestCollectiveReduceAPI(test_base.TestCollectiveAPIRunnerBase): - def __init__(self): self.global_ring_id = 0 diff --git a/python/paddle/fluid/tests/unittests/collective/collective_reduce_op.py b/python/paddle/fluid/tests/unittests/collective/collective_reduce_op.py index d30b845098ce51c36ae2c20f63d7d5888cddec89..a0b34247a8de4e8a09c3f631caff15547983239a 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_reduce_op.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_reduce_op.py @@ -22,7 +22,6 @@ paddle.enable_static() class TestCollectiveReduce(TestCollectiveRunnerBase): - def __init__(self): self.global_ring_id = 0 @@ -30,26 +29,28 @@ class TestCollectiveReduce(TestCollectiveRunnerBase): ring_id = 0 rootid = 1 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype='float32') + tindata = layers.data( + name="tindata", shape=[10, 1000], dtype='float32' + ) toutdata = main_prog.current_block().create_var( name="outofreduce", dtype='float32', type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=False) - main_prog.global_block().append_op(type="c_reduce_sum", - inputs={'X': tindata}, - attrs={ - 'ring_id': ring_id, - 'root_id': rootid - }, - outputs={'Out': toutdata}) - main_prog.global_block().append_op(type="c_sync_comm_stream", - inputs={'X': toutdata}, - outputs={'Out': toutdata}, - attrs={'ring_id': ring_id}) + stop_gradient=False, + ) + main_prog.global_block().append_op( + type="c_reduce_sum", + inputs={'X': tindata}, + attrs={'ring_id': ring_id, 'root_id': rootid}, + outputs={'Out': toutdata}, + ) + main_prog.global_block().append_op( + type="c_sync_comm_stream", + inputs={'X': toutdata}, + outputs={'Out': toutdata}, + attrs={'ring_id': ring_id}, + ) return toutdata diff --git a/python/paddle/fluid/tests/unittests/collective/collective_reduce_op_calc_stream.py b/python/paddle/fluid/tests/unittests/collective/collective_reduce_op_calc_stream.py index ab3fe725df849ba7cf37e8cad8a66b08b0f02232..c7fe247d455853b2c899016a3a47f5a6fcb814f1 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_reduce_op_calc_stream.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_reduce_op_calc_stream.py @@ -22,7 +22,6 @@ paddle.enable_static() class TestCollectiveReduce(TestCollectiveRunnerBase): - def __init__(self): self.global_ring_id = 0 @@ -30,27 +29,32 @@ class TestCollectiveReduce(TestCollectiveRunnerBase): ring_id = 0 rootid = 1 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype='float32') + tindata = layers.data( + name="tindata", shape=[10, 1000], dtype='float32' + ) toutdata = main_prog.current_block().create_var( name="outofreduce", dtype='float32', type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=False) - main_prog.global_block().append_op(type="c_reduce_sum", - inputs={'X': tindata}, - attrs={ - 'ring_id': ring_id, - 'use_calc_stream': True, - 'root_id': rootid - }, - outputs={'Out': toutdata}) - main_prog.global_block().append_op(type="c_sync_comm_stream", - inputs={'X': toutdata}, - outputs={'Out': toutdata}, - attrs={'ring_id': ring_id}) + stop_gradient=False, + ) + main_prog.global_block().append_op( + type="c_reduce_sum", + inputs={'X': tindata}, + attrs={ + 'ring_id': ring_id, + 'use_calc_stream': True, + 'root_id': rootid, + }, + outputs={'Out': toutdata}, + ) + main_prog.global_block().append_op( + type="c_sync_comm_stream", + inputs={'X': toutdata}, + outputs={'Out': toutdata}, + attrs={'ring_id': ring_id}, + ) return toutdata diff --git a/python/paddle/fluid/tests/unittests/collective/collective_reduce_scatter.py b/python/paddle/fluid/tests/unittests/collective/collective_reduce_scatter.py index d7c67e60a46ab404a3249a41b6512a1450e3a110..a8a7c67d96ee87b41535fad5b5e996777e70a03d 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_reduce_scatter.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_reduce_scatter.py @@ -20,7 +20,6 @@ import paddle.distributed as dist class TestCollectiveReduceScatter(unittest.TestCase): - def setUp(self): dist.init_parallel_env() paddle.fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) @@ -76,9 +75,9 @@ class TestCollectiveReduceScatter(unittest.TestCase): # [1, 2, 3, 4] # Rank-1 output = paddle.empty(shape=[2], dtype=input.dtype) - task = paddle.distributed.collective._reduce_scatter_base(output, - input, - sync_op=False) + task = paddle.distributed.collective._reduce_scatter_base( + output, input, sync_op=False + ) task.wait() diff --git a/python/paddle/fluid/tests/unittests/collective/collective_reduce_scatter_api_dygraph.py b/python/paddle/fluid/tests/unittests/collective/collective_reduce_scatter_api_dygraph.py index c9df2459a78e0f242beb954f4add467e83e9ec6b..617f9889873f4d684f8576e207a9a4af32fd0ae7 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_reduce_scatter_api_dygraph.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_reduce_scatter_api_dygraph.py @@ -19,7 +19,6 @@ import test_collective_api_base as test_base class TestCollectiveReduceScatterAPI(test_base.TestCollectiveAPIRunnerBase): - def __init__(self): self.global_ring_id = 0 diff --git a/python/paddle/fluid/tests/unittests/collective/collective_scatter_api.py b/python/paddle/fluid/tests/unittests/collective/collective_scatter_api.py index f198ae467d560ed68d78160fc5790abfaa395f6a..ec2ca037bbfdf969f970305d8bbefd5e914776a1 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_scatter_api.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_scatter_api.py @@ -21,19 +21,20 @@ paddle.enable_static() class TestCollectiveScatterAPI(TestCollectiveAPIRunnerBase): - def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank): with fluid.program_guard(main_prog, startup_program): - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype='float32', - append_batch_size=False) - toutdata = layers.fill_constant(shape=[5, 1000], - dtype='float32', - value=1.0) + tindata = layers.data( + name="tindata", + shape=[10, 1000], + dtype='float32', + append_batch_size=False, + ) + toutdata = layers.fill_constant( + shape=[5, 1000], dtype='float32', value=1.0 + ) tensor_list = None if rank == 1: tensor_list = paddle.split(tindata, 2, axis=0) diff --git a/python/paddle/fluid/tests/unittests/collective/collective_scatter_api_dygraph.py b/python/paddle/fluid/tests/unittests/collective/collective_scatter_api_dygraph.py index 8f27f84a32d5201e51877e66630016b3520d790a..9061a1b5a4f06d0b86df651394ef652fcf42f5f0 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_scatter_api_dygraph.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_scatter_api_dygraph.py @@ -19,7 +19,6 @@ import test_collective_api_base as test_base class TestCollectiveScatterAPI(test_base.TestCollectiveAPIRunnerBase): - def __init__(self): self.global_ring_id = 0 @@ -32,9 +31,9 @@ class TestCollectiveScatterAPI(test_base.TestCollectiveAPIRunnerBase): if rank == 0: dist.scatter(subdata1, src=1) else: - dist.scatter(subdata1, - tensor_list=[subdata1, subdata2], - src=1) + dist.scatter( + subdata1, tensor_list=[subdata1, subdata2], src=1 + ) return [subdata1.cast("float32").numpy()] else: tindata = paddle.to_tensor(indata) @@ -42,9 +41,9 @@ class TestCollectiveScatterAPI(test_base.TestCollectiveAPIRunnerBase): if rank == 0: dist.scatter(subdata1, src=1) else: - dist.scatter(subdata1, - tensor_list=[subdata1, subdata2], - src=1) + dist.scatter( + subdata1, tensor_list=[subdata1, subdata2], src=1 + ) return [subdata1.numpy()] diff --git a/python/paddle/fluid/tests/unittests/collective/collective_scatter_op.py b/python/paddle/fluid/tests/unittests/collective/collective_scatter_op.py index 4a8a1acfe5f8cfb456dc8aae34f3e80b5f1dfc08..29cfe40117bc7c6effda5a73d8848e444c470167 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_scatter_op.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_scatter_op.py @@ -22,7 +22,6 @@ paddle.enable_static() class TestCollectiveScatter(TestCollectiveRunnerBase): - def __init__(self): self.global_ring_id = 0 @@ -30,27 +29,28 @@ class TestCollectiveScatter(TestCollectiveRunnerBase): ring_id = 0 rootid = 1 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype='float32') + tindata = layers.data( + name="tindata", shape=[10, 1000], dtype='float32' + ) toutdata = main_prog.current_block().create_var( name="outofreduce", dtype='float32', type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=False) - main_prog.global_block().append_op(type="c_scatter", - inputs={'X': tindata}, - attrs={ - 'ring_id': ring_id, - 'root': rootid, - 'nranks': 2 - }, - outputs={'Out': toutdata}) - main_prog.global_block().append_op(type="c_sync_comm_stream", - inputs={'X': toutdata}, - outputs={'Out': toutdata}, - attrs={'ring_id': ring_id}) + stop_gradient=False, + ) + main_prog.global_block().append_op( + type="c_scatter", + inputs={'X': tindata}, + attrs={'ring_id': ring_id, 'root': rootid, 'nranks': 2}, + outputs={'Out': toutdata}, + ) + main_prog.global_block().append_op( + type="c_sync_comm_stream", + inputs={'X': toutdata}, + outputs={'Out': toutdata}, + attrs={'ring_id': ring_id}, + ) return toutdata diff --git a/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_api.py b/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_api.py index d22080e8cdf535da71bb799887411fd95ca85a79..b8c233051ee6b9b8074bb9e5d6bcd1d1a6ef3359 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_api.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_api.py @@ -21,16 +21,17 @@ paddle.enable_static() class TestCollectiveSendRecvAPI(TestCollectiveAPIRunnerBase): - def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank): with fluid.program_guard(main_prog, startup_program): - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype='float32', - append_batch_size=False) + tindata = layers.data( + name="tindata", + shape=[10, 1000], + dtype='float32', + append_batch_size=False, + ) if rank == 0: paddle.distributed.send(tindata, dst=1) else: diff --git a/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_api_dygraph.py b/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_api_dygraph.py index b4bf24ffbfaa962e3936b933800c7807fbbf2d91..701c8c8158293a61fdccd6a51adbc82a67c042e1 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_api_dygraph.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_api_dygraph.py @@ -19,7 +19,6 @@ import test_collective_api_base as test_base class TestCollectiveSendRecvAPI(test_base.TestCollectiveAPIRunnerBase): - def __init__(self): self.global_ring_id = 0 diff --git a/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_op.py b/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_op.py index 6324e619f9ea53afa56b8933380a0cee368eed18..708326a3f49685441acda5f8fdb360a3cc9d143a 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_op.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_op.py @@ -21,36 +21,40 @@ paddle.enable_static() class TestCollectiveSendRecv(TestCollectiveRunnerBase): - def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program): ring_id = self.global_ring_id with fluid.program_guard(main_prog, startup_program): - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype='float64', - append_batch_size=False) + tindata = layers.data( + name="tindata", + shape=[10, 1000], + dtype='float64', + append_batch_size=False, + ) if self.rank == 0: - main_prog.global_block().append_op(type="send_v2", - inputs={'X': tindata}, - attrs={ - 'ring_id': ring_id, - 'peer': 1, - 'use_calc_stream': True - }) + main_prog.global_block().append_op( + type="send_v2", + inputs={'X': tindata}, + attrs={ + 'ring_id': ring_id, + 'peer': 1, + 'use_calc_stream': True, + }, + ) else: - main_prog.global_block().append_op(type="recv_v2", - outputs={'Out': tindata}, - attrs={ - 'peer': 0, - 'ring_id': ring_id, - 'dtype': tindata.dtype, - 'out_shape': - tindata.shape, - 'use_calc_stream': True, - }) + main_prog.global_block().append_op( + type="recv_v2", + outputs={'Out': tindata}, + attrs={ + 'peer': 0, + 'ring_id': ring_id, + 'dtype': tindata.dtype, + 'out_shape': tindata.shape, + 'use_calc_stream': True, + }, + ) return tindata diff --git a/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_op_array.py b/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_op_array.py index ec12db6f5be04bd3ec939d879ca64e402bd34031..eea4ef4615f8537de732ea1bcb0d35bd1c1c2d67 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_op_array.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_op_array.py @@ -22,39 +22,46 @@ paddle.enable_static() class TestCollectiveSendRecv(TestCollectiveRunnerBase): - def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program): ring_id = self.global_ring_id with fluid.program_guard(main_prog, startup_program): - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype='float64', - append_batch_size=False) + tindata = layers.data( + name="tindata", + shape=[10, 1000], + dtype='float64', + append_batch_size=False, + ) if self.rank == 0: data1 = fluid.layers.assign( - np.array([[0, 1, 2]], dtype='float32')) + np.array([[0, 1, 2]], dtype='float32') + ) data2 = fluid.layers.assign( - np.array([[3, 4, 5]], dtype='float32')) + np.array([[3, 4, 5]], dtype='float32') + ) elif self.rank == 1: data1 = fluid.layers.assign( - np.array([[3, 4, 5]], dtype='float32')) + np.array([[3, 4, 5]], dtype='float32') + ) data2 = fluid.layers.assign( - np.array([[0, 1, 2]], dtype='float32')) + np.array([[0, 1, 2]], dtype='float32') + ) tensor_array = fluid.layers.create_array(dtype='float32') i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) fluid.layers.array_write(data1, i, tensor_array) fluid.layers.array_write(data2, i + 1, tensor_array) if self.rank == 0: - main_prog.global_block().append_op(type="send_v2", - inputs={'X': tensor_array}, - attrs={ - 'ring_id': ring_id, - 'peer': 1, - 'use_calc_stream': True - }) + main_prog.global_block().append_op( + type="send_v2", + inputs={'X': tensor_array}, + attrs={ + 'ring_id': ring_id, + 'peer': 1, + 'use_calc_stream': True, + }, + ) else: main_prog.global_block().append_op( type="recv_v2", @@ -65,7 +72,8 @@ class TestCollectiveSendRecv(TestCollectiveRunnerBase): 'dtype': data1.dtype, 'out_shape': [1, 3], 'use_calc_stream': True, - }) + }, + ) return tensor_array diff --git a/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_op_dynamic_shape.py b/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_op_dynamic_shape.py index da6ef74c11460b5560d5f42bb57a7c770181e349..28804d1570fbb348138a64fb5b90585cd36e34ca 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_op_dynamic_shape.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_op_dynamic_shape.py @@ -21,41 +21,46 @@ paddle.enable_static() class TestCollectiveSendRecvDynamicShape(TestCollectiveRunnerBase): - def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program): ring_id = self.global_ring_id with fluid.program_guard(main_prog, startup_program): - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype='float64', - append_batch_size=False) + tindata = layers.data( + name="tindata", + shape=[10, 1000], + dtype='float64', + append_batch_size=False, + ) if self.rank == 0: - main_prog.global_block().append_op(type="send_v2", - inputs={'X': tindata}, - attrs={ - 'ring_id': ring_id, - 'peer': 1, - 'use_calc_stream': True, - 'dynamic_shape': True - }) + main_prog.global_block().append_op( + type="send_v2", + inputs={'X': tindata}, + attrs={ + 'ring_id': ring_id, + 'peer': 1, + 'use_calc_stream': True, + 'dynamic_shape': True, + }, + ) else: - main_prog.global_block().append_op(type="recv_v2", - outputs={'Out': tindata}, - attrs={ - 'peer': 0, - 'ring_id': ring_id, - 'dtype': tindata.dtype, - 'out_shape': - tindata.shape, - 'use_calc_stream': True, - 'dynamic_shape': True - }) + main_prog.global_block().append_op( + type="recv_v2", + outputs={'Out': tindata}, + attrs={ + 'peer': 0, + 'ring_id': ring_id, + 'dtype': tindata.dtype, + 'out_shape': tindata.shape, + 'use_calc_stream': True, + 'dynamic_shape': True, + }, + ) return tindata if __name__ == "__main__": - runtime_main(TestCollectiveSendRecvDynamicShape, "sendrecv_dynamic_shape", - 0) + runtime_main( + TestCollectiveSendRecvDynamicShape, "sendrecv_dynamic_shape", 0 + ) diff --git a/python/paddle/fluid/tests/unittests/collective/collective_split_op.py b/python/paddle/fluid/tests/unittests/collective/collective_split_op.py index 2e71ccf304939533cb77cd57c9976d0cc2c71377..57a8aa4fcc5d120278235f78e0fb71dde05b62d1 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_split_op.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_split_op.py @@ -22,7 +22,6 @@ paddle.enable_static() class TestCollectiveAllGather(TestCollectiveRunnerBase): - def __init__(self): self.global_ring_id = 0 @@ -30,23 +29,22 @@ class TestCollectiveAllGather(TestCollectiveRunnerBase): ring_id = 0 nranks = 2 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype='float32') + tindata = layers.data( + name="tindata", shape=[10, 1000], dtype='float32' + ) toutdata = main_prog.current_block().create_var( name="outofsplit", dtype='float32', type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=False) - main_prog.global_block().append_op(type="c_split", - inputs={'X': tindata}, - attrs={ - 'ring_id': ring_id, - 'rank': self.rank, - 'nranks': nranks - }, - outputs={'Out': toutdata}) + stop_gradient=False, + ) + main_prog.global_block().append_op( + type="c_split", + inputs={'X': tindata}, + attrs={'ring_id': ring_id, 'rank': self.rank, 'nranks': nranks}, + outputs={'Out': toutdata}, + ) return toutdata diff --git a/python/paddle/fluid/tests/unittests/collective/column_parallel_linear_api.py b/python/paddle/fluid/tests/unittests/collective/column_parallel_linear_api.py index 5f6178dbfd65204bc2ced4e7b37cc937fd294605..accecd08c10472d6b4b78ae54dbcdbeb49494a14 100644 --- a/python/paddle/fluid/tests/unittests/collective/column_parallel_linear_api.py +++ b/python/paddle/fluid/tests/unittests/collective/column_parallel_linear_api.py @@ -22,7 +22,6 @@ paddle.enable_static() class TestColumnParallelLinearAPI(TestCollectiveAPIRunnerBase): - def __init__(self): self.global_ring_id = 0 @@ -32,18 +31,22 @@ class TestColumnParallelLinearAPI(TestCollectiveAPIRunnerBase): np.random.seed(2020) np_array = np.random.rand(1000, 16) - data = paddle.static.data(name='tindata', - shape=[10, 1000], - dtype="float32") + data = paddle.static.data( + name='tindata', shape=[10, 1000], dtype="float32" + ) paddle.distributed.broadcast(data, src=0) if rank == 0: param_attr = paddle.fluid.ParamAttr( initializer=paddle.fluid.initializer.NumpyArrayInitializer( - np_array[:, 0:8]), ) + np_array[:, 0:8] + ), + ) else: param_attr = paddle.fluid.ParamAttr( initializer=paddle.fluid.initializer.NumpyArrayInitializer( - np_array[:, 8:16]), ) + np_array[:, 8:16] + ), + ) linear_out = paddle.distributed.split( data, diff --git a/python/paddle/fluid/tests/unittests/collective/communication_stream_allgather_api_dygraph.py b/python/paddle/fluid/tests/unittests/collective/communication_stream_allgather_api_dygraph.py index f4ff1e5b7b37cc8bf9e94a636f8d9c8be40f5cf7..a04e30dfae1403f781d72d5af50bf75b6b778a00 100644 --- a/python/paddle/fluid/tests/unittests/collective/communication_stream_allgather_api_dygraph.py +++ b/python/paddle/fluid/tests/unittests/collective/communication_stream_allgather_api_dygraph.py @@ -19,8 +19,7 @@ import paddle.distributed as dist import test_collective_api_base as test_collective_base -class StreamAllgatherTestCase(): - +class StreamAllgatherTestCase: def __init__(self): self._sync_op = eval(os.getenv("sync_op")) self._use_calc_stream = eval(os.getenv("use_calc_stream")) @@ -30,7 +29,8 @@ class StreamAllgatherTestCase(): self._seeds = eval(os.getenv("seeds")) if self._backend not in ["nccl", "gloo"]: raise NotImplementedError( - "Only support nccl and gloo as the backend for now.") + "Only support nccl and gloo as the backend for now." + ) os.environ["PADDLE_DISTRI_BACKEND"] = self._backend def run_test_case(self): @@ -39,47 +39,53 @@ class StreamAllgatherTestCase(): test_data_list = [] for seed in self._seeds: test_data_list.append( - test_collective_base.create_test_data(shape=self._shape, - dtype=self._dtype, - seed=seed)) + test_collective_base.create_test_data( + shape=self._shape, dtype=self._dtype, seed=seed + ) + ) rank = dist.get_rank() tensor = paddle.to_tensor(test_data_list[rank]) # case 1: pass an empty tensor list empty_tensor_list = [] - task = dist.stream.all_gather(empty_tensor_list, - tensor, - sync_op=self._sync_op, - use_calc_stream=self._use_calc_stream) + task = dist.stream.all_gather( + empty_tensor_list, + tensor, + sync_op=self._sync_op, + use_calc_stream=self._use_calc_stream, + ) if not self._sync_op: task.wait() - assert np.allclose(empty_tensor_list, - test_data_list, - rtol=1e-05, - atol=1e-05) + assert np.allclose( + empty_tensor_list, test_data_list, rtol=1e-05, atol=1e-05 + ) # case 2: pass a pre-sized tensor list full_tensor_list = [paddle.empty_like(tensor) for _ in test_data_list] - task = dist.stream.all_gather(full_tensor_list, - tensor, - sync_op=self._sync_op, - use_calc_stream=self._use_calc_stream) + task = dist.stream.all_gather( + full_tensor_list, + tensor, + sync_op=self._sync_op, + use_calc_stream=self._use_calc_stream, + ) if not self._sync_op: task.wait() - assert np.allclose(full_tensor_list, - test_data_list, - rtol=1e-05, - atol=1e-05) + assert np.allclose( + full_tensor_list, test_data_list, rtol=1e-05, atol=1e-05 + ) # case 3: pass a pre-sized tensor result_tensor = paddle.concat( - [paddle.to_tensor(data) for data in test_data_list]) + [paddle.to_tensor(data) for data in test_data_list] + ) out_tensor = paddle.empty_like(result_tensor) - task = dist.stream.all_gather(out_tensor, - tensor, - sync_op=self._sync_op, - use_calc_stream=self._use_calc_stream) + task = dist.stream.all_gather( + out_tensor, + tensor, + sync_op=self._sync_op, + use_calc_stream=self._use_calc_stream, + ) if not self._sync_op: task.wait() assert np.allclose(out_tensor, result_tensor, rtol=1e-05, atol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/collective/communication_stream_allreduce_api_dygraph.py b/python/paddle/fluid/tests/unittests/collective/communication_stream_allreduce_api_dygraph.py index abf455203517d86a181b58cc95c7c0e0c10e293d..be8841345e2f593b6301dd00a3e5e4b9cb55fca1 100644 --- a/python/paddle/fluid/tests/unittests/collective/communication_stream_allreduce_api_dygraph.py +++ b/python/paddle/fluid/tests/unittests/collective/communication_stream_allreduce_api_dygraph.py @@ -19,8 +19,7 @@ import paddle.distributed as dist import test_collective_api_base as test_collective_base -class StreamAllReduceTestCase(): - +class StreamAllReduceTestCase: def __init__(self): self._sync_op = eval(os.getenv("sync_op")) self._use_calc_stream = eval(os.getenv("use_calc_stream")) @@ -30,7 +29,8 @@ class StreamAllReduceTestCase(): self._seeds = eval(os.getenv("seeds")) if self._backend not in ["nccl", "gloo"]: raise NotImplementedError( - "Only support nccl and gloo as the backend for now.") + "Only support nccl and gloo as the backend for now." + ) os.environ["PADDLE_DISTRI_BACKEND"] = self._backend def run_test_case(self): @@ -39,15 +39,16 @@ class StreamAllReduceTestCase(): test_data_list = [] for seed in self._seeds: test_data_list.append( - test_collective_base.create_test_data(shape=self._shape, - dtype=self._dtype, - seed=seed)) + test_collective_base.create_test_data( + shape=self._shape, dtype=self._dtype, seed=seed + ) + ) rank = dist.get_rank() tensor = paddle.to_tensor(test_data_list[rank]) - task = dist.stream.all_reduce(tensor, - sync_op=self._sync_op, - use_calc_stream=self._use_calc_stream) + task = dist.stream.all_reduce( + tensor, sync_op=self._sync_op, use_calc_stream=self._use_calc_stream + ) if not self._sync_op: task.wait() diff --git a/python/paddle/fluid/tests/unittests/collective/communication_stream_alltoall_api_dygraph.py b/python/paddle/fluid/tests/unittests/collective/communication_stream_alltoall_api_dygraph.py index 8842bcd08b640dc88076cbbe1c6904825f0bb1dc..07693342c6b4ae95488e58a5a8e22eb419b85d22 100644 --- a/python/paddle/fluid/tests/unittests/collective/communication_stream_alltoall_api_dygraph.py +++ b/python/paddle/fluid/tests/unittests/collective/communication_stream_alltoall_api_dygraph.py @@ -19,8 +19,7 @@ import paddle.distributed as dist import test_collective_api_base as test_collective_base -class StreamAllToAllTestCase(): - +class StreamAllToAllTestCase: def __init__(self): self._sync_op = eval(os.getenv("sync_op")) self._use_calc_stream = eval(os.getenv("use_calc_stream")) @@ -30,7 +29,8 @@ class StreamAllToAllTestCase(): self._seeds = eval(os.getenv("seeds")) if self._backend not in ["nccl", "gloo"]: raise NotImplementedError( - "Only support nccl and gloo as the backend for now.") + "Only support nccl and gloo as the backend for now." + ) os.environ["PADDLE_DISTRI_BACKEND"] = self._backend def run_test_case(self): @@ -39,17 +39,23 @@ class StreamAllToAllTestCase(): test_data_list = [] for seed in self._seeds: test_data_list.append( - test_collective_base.create_test_data(shape=self._shape, - dtype=self._dtype, - seed=seed)) + test_collective_base.create_test_data( + shape=self._shape, dtype=self._dtype, seed=seed + ) + ) nranks = len(test_data_list) data1 = test_data_list[0] data2 = test_data_list[1] result1 = np.vstack( - [data1[0:data1.shape[0] // 2, :], data2[0:data2.shape[0] // 2, :]]) + [ + data1[0 : data1.shape[0] // 2, :], + data2[0 : data2.shape[0] // 2, :], + ] + ) result2 = np.vstack( - [data1[data1.shape[0] // 2:, :], data2[data2.shape[0] // 2:, :]]) + [data1[data1.shape[0] // 2 :, :], data2[data2.shape[0] // 2 :, :]] + ) rank = dist.get_rank() tensor = paddle.to_tensor(test_data_list[rank]) @@ -57,48 +63,52 @@ class StreamAllToAllTestCase(): # case 1: pass an empty tensor list empty_tensor_list = [] - task = dist.stream.alltoall(empty_tensor_list, [t1, t2], - sync_op=self._sync_op, - use_calc_stream=self._use_calc_stream) + task = dist.stream.alltoall( + empty_tensor_list, + [t1, t2], + sync_op=self._sync_op, + use_calc_stream=self._use_calc_stream, + ) if not self._sync_op: task.wait() result_tensor_list = np.vstack(empty_tensor_list) if rank == 0: - assert np.allclose(result_tensor_list, - result1, - rtol=1e-05, - atol=1e-05) + assert np.allclose( + result_tensor_list, result1, rtol=1e-05, atol=1e-05 + ) else: - assert np.allclose(result_tensor_list, - result2, - rtol=1e-05, - atol=1e-05) + assert np.allclose( + result_tensor_list, result2, rtol=1e-05, atol=1e-05 + ) # case 2: pass a pre-sized tensor list full_tensor_list = [paddle.empty_like(t1) for _ in test_data_list] - task = dist.stream.alltoall(full_tensor_list, [t1, t2], - sync_op=self._sync_op, - use_calc_stream=self._use_calc_stream) + task = dist.stream.alltoall( + full_tensor_list, + [t1, t2], + sync_op=self._sync_op, + use_calc_stream=self._use_calc_stream, + ) if not self._sync_op: task.wait() result_tensor_list = np.vstack(full_tensor_list) if rank == 0: - assert np.allclose(result_tensor_list, - result1, - rtol=1e-05, - atol=1e-05) + assert np.allclose( + result_tensor_list, result1, rtol=1e-05, atol=1e-05 + ) else: - assert np.allclose(result_tensor_list, - result2, - rtol=1e-05, - atol=1e-05) + assert np.allclose( + result_tensor_list, result2, rtol=1e-05, atol=1e-05 + ) # case 3: pass a pre-sized tensor out_tensor = paddle.empty_like(tensor) - task = dist.stream.alltoall(out_tensor, - tensor, - sync_op=self._sync_op, - use_calc_stream=self._use_calc_stream) + task = dist.stream.alltoall( + out_tensor, + tensor, + sync_op=self._sync_op, + use_calc_stream=self._use_calc_stream, + ) if not self._sync_op: task.wait() if rank == 0: diff --git a/python/paddle/fluid/tests/unittests/collective/communication_stream_alltoall_single_api_dygraph.py b/python/paddle/fluid/tests/unittests/collective/communication_stream_alltoall_single_api_dygraph.py index bb36693b0fbc9c951f3ac454a684d5b375784c4f..cb235dd9d1172223b233106dc3dcd91ba8350d62 100644 --- a/python/paddle/fluid/tests/unittests/collective/communication_stream_alltoall_single_api_dygraph.py +++ b/python/paddle/fluid/tests/unittests/collective/communication_stream_alltoall_single_api_dygraph.py @@ -19,8 +19,7 @@ import paddle.distributed as dist import test_collective_api_base as test_collective_base -class StreamAllToAllSingleTestCase(): - +class StreamAllToAllSingleTestCase: def __init__(self): self._sync_op = eval(os.getenv("sync_op")) self._use_calc_stream = eval(os.getenv("use_calc_stream")) @@ -30,7 +29,8 @@ class StreamAllToAllSingleTestCase(): self._seeds = eval(os.getenv("seeds")) if self._backend not in ["nccl", "gloo"]: raise NotImplementedError( - "Only support nccl and gloo as the backend for now.") + "Only support nccl and gloo as the backend for now." + ) os.environ["PADDLE_DISTRI_BACKEND"] = self._backend def run_test_case(self): @@ -39,17 +39,23 @@ class StreamAllToAllSingleTestCase(): test_data_list = [] for seed in self._seeds: test_data_list.append( - test_collective_base.create_test_data(shape=self._shape, - dtype=self._dtype, - seed=seed)) + test_collective_base.create_test_data( + shape=self._shape, dtype=self._dtype, seed=seed + ) + ) nranks = len(test_data_list) data1 = paddle.to_tensor(test_data_list[0]) data2 = paddle.to_tensor(test_data_list[1]) result1 = np.vstack( - (data1[0:data1.shape[0] // 2, :], data2[0:data2.shape[0] // 2, :])) + ( + data1[0 : data1.shape[0] // 2, :], + data2[0 : data2.shape[0] // 2, :], + ) + ) result2 = np.vstack( - (data1[data1.shape[0] // 2:, :], data2[data2.shape[0] // 2:, :])) + (data1[data1.shape[0] // 2 :, :], data2[data2.shape[0] // 2 :, :]) + ) rank = dist.get_rank() tensor = paddle.to_tensor(test_data_list[rank]) @@ -59,7 +65,8 @@ class StreamAllToAllSingleTestCase(): out_tensor, tensor, sync_op=self._sync_op, - use_calc_stream=self._use_calc_stream) + use_calc_stream=self._use_calc_stream, + ) if not self._sync_op: task.wait() if rank == 0: diff --git a/python/paddle/fluid/tests/unittests/collective/communication_stream_broadcast_api_dygraph.py b/python/paddle/fluid/tests/unittests/collective/communication_stream_broadcast_api_dygraph.py index 487dfd6ae68942c751a8ad6a5806e08ec10622f8..68d496a11e48de8231a3f9adf3d02d53b1a49f86 100644 --- a/python/paddle/fluid/tests/unittests/collective/communication_stream_broadcast_api_dygraph.py +++ b/python/paddle/fluid/tests/unittests/collective/communication_stream_broadcast_api_dygraph.py @@ -19,8 +19,7 @@ import paddle.distributed as dist import test_collective_api_base as test_collective_base -class StreamBroadcastTestCase(): - +class StreamBroadcastTestCase: def __init__(self): self._sync_op = eval(os.getenv("sync_op")) self._use_calc_stream = eval(os.getenv("use_calc_stream")) @@ -30,7 +29,8 @@ class StreamBroadcastTestCase(): self._seeds = eval(os.getenv("seeds")) if self._backend not in ["nccl", "gloo"]: raise NotImplementedError( - "Only support nccl and gloo as the backend for now.") + "Only support nccl and gloo as the backend for now." + ) os.environ["PADDLE_DISTRI_BACKEND"] = self._backend def run_test_case(self): @@ -38,12 +38,15 @@ class StreamBroadcastTestCase(): src_rank = 1 result = test_collective_base.create_test_data( - shape=self._shape, dtype=self._dtype, seed=self._seeds[src_rank]) + shape=self._shape, dtype=self._dtype, seed=self._seeds[src_rank] + ) tensor = paddle.to_tensor(result) - task = dist.stream.broadcast(tensor, - src=src_rank, - sync_op=self._sync_op, - use_calc_stream=self._use_calc_stream) + task = dist.stream.broadcast( + tensor, + src=src_rank, + sync_op=self._sync_op, + use_calc_stream=self._use_calc_stream, + ) if not self._sync_op: task.wait() diff --git a/python/paddle/fluid/tests/unittests/collective/communication_stream_reduce_api_dygraph.py b/python/paddle/fluid/tests/unittests/collective/communication_stream_reduce_api_dygraph.py index a487eac566ab5e29398f6738f915b3133486a628..444390d20c949c156f17e318af17bdd9d7006ae3 100644 --- a/python/paddle/fluid/tests/unittests/collective/communication_stream_reduce_api_dygraph.py +++ b/python/paddle/fluid/tests/unittests/collective/communication_stream_reduce_api_dygraph.py @@ -19,8 +19,7 @@ import paddle.distributed as dist import test_collective_api_base as test_collective_base -class StreamReduceTestCase(): - +class StreamReduceTestCase: def __init__(self): self._sync_op = eval(os.getenv("sync_op")) self._use_calc_stream = eval(os.getenv("use_calc_stream")) @@ -30,7 +29,8 @@ class StreamReduceTestCase(): self._seeds = eval(os.getenv("seeds")) if self._backend not in ["nccl", "gloo"]: raise NotImplementedError( - "Only support nccl and gloo as the backend for now.") + "Only support nccl and gloo as the backend for now." + ) os.environ["PADDLE_DISTRI_BACKEND"] = self._backend def run_test_case(self): @@ -39,16 +39,19 @@ class StreamReduceTestCase(): test_data_list = [] for seed in self._seeds: test_data_list.append( - test_collective_base.create_test_data(shape=self._shape, - dtype=self._dtype, - seed=seed)) + test_collective_base.create_test_data( + shape=self._shape, dtype=self._dtype, seed=seed + ) + ) rank = dist.get_rank() tensor = paddle.to_tensor(test_data_list[rank]) - task = dist.stream.reduce(tensor, - dst=1, - sync_op=self._sync_op, - use_calc_stream=self._use_calc_stream) + task = dist.stream.reduce( + tensor, + dst=1, + sync_op=self._sync_op, + use_calc_stream=self._use_calc_stream, + ) if not self._sync_op: task.wait() @@ -56,10 +59,9 @@ class StreamReduceTestCase(): if rank == 1: assert np.allclose(tensor, result, rtol=1e-05, atol=1e-05) else: - assert np.allclose(tensor, - test_data_list[rank], - rtol=1e-05, - atol=1e-05) + assert np.allclose( + tensor, test_data_list[rank], rtol=1e-05, atol=1e-05 + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/communication_stream_reduce_scatter_api_dygraph.py b/python/paddle/fluid/tests/unittests/collective/communication_stream_reduce_scatter_api_dygraph.py index effaf1cb6c99a46e0c2e11e4773da2abc716c2e7..d65d99ba8e55b2f59bdc7f7038f49b1592b77a84 100644 --- a/python/paddle/fluid/tests/unittests/collective/communication_stream_reduce_scatter_api_dygraph.py +++ b/python/paddle/fluid/tests/unittests/collective/communication_stream_reduce_scatter_api_dygraph.py @@ -17,11 +17,12 @@ import numpy as np import paddle import paddle.distributed as dist import test_collective_api_base as test_collective_base -from paddle.distributed.communication.stream.reduce_scatter import _reduce_scatter_base +from paddle.distributed.communication.stream.reduce_scatter import ( + _reduce_scatter_base, +) -class StreamReduceScatterTestCase(): - +class StreamReduceScatterTestCase: def __init__(self): self._sync_op = eval(os.getenv("sync_op")) self._use_calc_stream = eval(os.getenv("use_calc_stream")) @@ -31,7 +32,8 @@ class StreamReduceScatterTestCase(): self._seeds = eval(os.getenv("seeds")) if self._backend not in ["nccl", "gloo"]: raise NotImplementedError( - "Only support nccl and gloo as the backend for now.") + "Only support nccl and gloo as the backend for now." + ) os.environ["PADDLE_DISTRI_BACKEND"] = self._backend def run_test_case(self): @@ -40,12 +42,13 @@ class StreamReduceScatterTestCase(): test_data_list = [] for seed in self._seeds: test_data_list.append( - test_collective_base.create_test_data(shape=self._shape, - dtype=self._dtype, - seed=seed)) + test_collective_base.create_test_data( + shape=self._shape, dtype=self._dtype, seed=seed + ) + ) reduce_result = sum(test_data_list) - result1 = reduce_result[0:reduce_result.shape[0] // 2] - result2 = reduce_result[reduce_result.shape[0] // 2:] + result1 = reduce_result[0 : reduce_result.shape[0] // 2] + result2 = reduce_result[reduce_result.shape[0] // 2 :] rank = dist.get_rank() tensor = paddle.to_tensor(test_data_list[rank]) @@ -53,9 +56,12 @@ class StreamReduceScatterTestCase(): # case 1: pass a pre-sized tensor list t1, t2 = paddle.split(tensor, 2, axis=0) result_tensor = paddle.empty_like(t1) - task = dist.stream.reduce_scatter(result_tensor, [t1, t2], - sync_op=self._sync_op, - use_calc_stream=self._use_calc_stream) + task = dist.stream.reduce_scatter( + result_tensor, + [t1, t2], + sync_op=self._sync_op, + use_calc_stream=self._use_calc_stream, + ) if not self._sync_op: task.wait() if rank == 0: @@ -65,10 +71,12 @@ class StreamReduceScatterTestCase(): # case 2: pass a pre-sized tensor result_tensor = paddle.empty_like(t1) - task = dist.stream.reduce_scatter(result_tensor, - tensor, - sync_op=self._sync_op, - use_calc_stream=self._use_calc_stream) + task = dist.stream.reduce_scatter( + result_tensor, + tensor, + sync_op=self._sync_op, + use_calc_stream=self._use_calc_stream, + ) if not self._sync_op: task.wait() if rank == 0: @@ -78,10 +86,12 @@ class StreamReduceScatterTestCase(): # case 3: test the legacy API result_tensor = paddle.empty_like(t1) - task = _reduce_scatter_base(result_tensor, - tensor, - sync_op=self._sync_op, - use_calc_stream=self._use_calc_stream) + task = _reduce_scatter_base( + result_tensor, + tensor, + sync_op=self._sync_op, + use_calc_stream=self._use_calc_stream, + ) if not self._sync_op: task.wait() if rank == 0: diff --git a/python/paddle/fluid/tests/unittests/collective/communication_stream_scatter_api_dygraph.py b/python/paddle/fluid/tests/unittests/collective/communication_stream_scatter_api_dygraph.py index 6060e5050ca09bdb0e04b2eae073c4a47f170006..b4f01798a6677173e8a8daf3792e93ae481e7727 100644 --- a/python/paddle/fluid/tests/unittests/collective/communication_stream_scatter_api_dygraph.py +++ b/python/paddle/fluid/tests/unittests/collective/communication_stream_scatter_api_dygraph.py @@ -19,8 +19,7 @@ import paddle.distributed as dist import test_collective_api_base as test_collective_base -class StreamScatterTestCase(): - +class StreamScatterTestCase: def __init__(self): self._sync_op = eval(os.getenv("sync_op")) self._use_calc_stream = eval(os.getenv("use_calc_stream")) @@ -30,7 +29,8 @@ class StreamScatterTestCase(): self._seeds = eval(os.getenv("seeds")) if self._backend not in ["nccl", "gloo"]: raise NotImplementedError( - "Only support nccl and gloo as the backend for now.") + "Only support nccl and gloo as the backend for now." + ) os.environ["PADDLE_DISTRI_BACKEND"] = self._backend def run_test_case(self): @@ -39,24 +39,28 @@ class StreamScatterTestCase(): test_data_list = [] for seed in self._seeds: test_data_list.append( - test_collective_base.create_test_data(shape=self._shape, - dtype=self._dtype, - seed=seed)) + test_collective_base.create_test_data( + shape=self._shape, dtype=self._dtype, seed=seed + ) + ) src_rank = 1 src_data = test_data_list[src_rank] - result1 = src_data[0:src_data.shape[0] // 2] - result2 = src_data[src_data.shape[0] // 2:] + result1 = src_data[0 : src_data.shape[0] // 2] + result2 = src_data[src_data.shape[0] // 2 :] rank = dist.get_rank() # case 1: pass a pre-sized tensor list tensor = paddle.to_tensor(test_data_list[rank]) t1, t2 = paddle.split(tensor, 2, axis=0) - task = dist.stream.scatter(t1, [t1, t2], - src=src_rank, - sync_op=self._sync_op, - use_calc_stream=self._use_calc_stream) + task = dist.stream.scatter( + t1, + [t1, t2], + src=src_rank, + sync_op=self._sync_op, + use_calc_stream=self._use_calc_stream, + ) if not self._sync_op: task.wait() if rank == src_rank: @@ -67,11 +71,13 @@ class StreamScatterTestCase(): # case 2: pass a pre-sized tensor tensor = paddle.to_tensor(src_data) t1 = paddle.empty_like(t1) - task = dist.stream.scatter(t1, - tensor, - src=src_rank, - sync_op=self._sync_op, - use_calc_stream=self._use_calc_stream) + task = dist.stream.scatter( + t1, + tensor, + src=src_rank, + sync_op=self._sync_op, + use_calc_stream=self._use_calc_stream, + ) if not self._sync_op: task.wait() if rank == src_rank: diff --git a/python/paddle/fluid/tests/unittests/collective/communication_stream_sendrecv_api_dygraph.py b/python/paddle/fluid/tests/unittests/collective/communication_stream_sendrecv_api_dygraph.py index f9b2806fc1b840350691490f6d0fd6787d0f963f..aa7891a7ab7526e4a690f8310d49c651e574163b 100644 --- a/python/paddle/fluid/tests/unittests/collective/communication_stream_sendrecv_api_dygraph.py +++ b/python/paddle/fluid/tests/unittests/collective/communication_stream_sendrecv_api_dygraph.py @@ -19,8 +19,7 @@ import paddle.distributed as dist import test_collective_api_base as test_collective_base -class StreamSendRecvTestCase(): - +class StreamSendRecvTestCase: def __init__(self): self._sync_op = eval(os.getenv("sync_op")) self._use_calc_stream = eval(os.getenv("use_calc_stream")) @@ -30,7 +29,8 @@ class StreamSendRecvTestCase(): self._seeds = eval(os.getenv("seeds")) if self._backend not in ["nccl", "gloo"]: raise NotImplementedError( - "Only support nccl and gloo as the backend for now.") + "Only support nccl and gloo as the backend for now." + ) os.environ["PADDLE_DISTRI_BACKEND"] = self._backend def run_test_case(self): @@ -39,9 +39,10 @@ class StreamSendRecvTestCase(): test_data_list = [] for seed in self._seeds: test_data_list.append( - test_collective_base.create_test_data(shape=self._shape, - dtype=self._dtype, - seed=seed)) + test_collective_base.create_test_data( + shape=self._shape, dtype=self._dtype, seed=seed + ) + ) src_rank = 0 dst_rank = 1 @@ -49,15 +50,19 @@ class StreamSendRecvTestCase(): rank = dist.get_rank() tensor = paddle.to_tensor(test_data_list[rank]) if rank == 0: - task = dist.stream.send(tensor, - dst=dst_rank, - sync_op=self._sync_op, - use_calc_stream=self._use_calc_stream) + task = dist.stream.send( + tensor, + dst=dst_rank, + sync_op=self._sync_op, + use_calc_stream=self._use_calc_stream, + ) else: - task = dist.stream.recv(tensor, - src=src_rank, - sync_op=self._sync_op, - use_calc_stream=self._use_calc_stream) + task = dist.stream.recv( + tensor, + src=src_rank, + sync_op=self._sync_op, + use_calc_stream=self._use_calc_stream, + ) if not self._sync_op: task.wait() diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/auto_parallel_parallelizer.py b/python/paddle/fluid/tests/unittests/collective/fleet/auto_parallel_parallelizer.py index 185e2c23fed953d0090922c3e1b1d73d5bf86ff5..d87cacb21389f6e4da664a95a6990aba436fe412 100755 --- a/python/paddle/fluid/tests/unittests/collective/fleet/auto_parallel_parallelizer.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/auto_parallel_parallelizer.py @@ -30,27 +30,27 @@ _global_process_mesh = None class MLPLayer(nn.Layer): - - def __init__(self, - hidden_size=1024, - intermediate_size=4 * 1024, - dropout_ratio=0.1, - initializer_range=0.02): + def __init__( + self, + hidden_size=1024, + intermediate_size=4 * 1024, + dropout_ratio=0.1, + initializer_range=0.02, + ): super(MLPLayer, self).__init__() d_model = hidden_size dim_feedforward = intermediate_size weight_attr = paddle.ParamAttr( - initializer=nn.initializer.Normal(mean=0.0, std=initializer_range)) + initializer=nn.initializer.Normal(mean=0.0, std=initializer_range) + ) bias_attr = None - self.linear0 = nn.Linear(d_model, - dim_feedforward, - weight_attr, - bias_attr=bias_attr) - self.linear1 = nn.Linear(dim_feedforward, - d_model, - weight_attr, - bias_attr=bias_attr) + self.linear0 = nn.Linear( + d_model, dim_feedforward, weight_attr, bias_attr=bias_attr + ) + self.linear1 = nn.Linear( + dim_feedforward, d_model, weight_attr, bias_attr=bias_attr + ) self.linear2 = nn.Linear(d_model, 1, weight_attr, bias_attr=bias_attr) self.norm = nn.LayerNorm(d_model, epsilon=1e-5) self.dropout = nn.Dropout(dropout_ratio, mode="upscale_in_train") @@ -67,24 +67,29 @@ class MLPLayer(nn.Layer): def mlp_pretrain_forward(train_program, start_program): - with static.program_guard(train_program, - start_program), utils.unique_name.guard(): + with static.program_guard( + train_program, start_program + ), utils.unique_name.guard(): batch_size = 4 hidden_size = 1024 sequence_len = 512 - input = static.data(name="input", - shape=[batch_size, sequence_len, hidden_size], - dtype='float32') - label = static.data(name="label", - shape=[batch_size, sequence_len, 1], - dtype='float32') + input = static.data( + name="input", + shape=[batch_size, sequence_len, hidden_size], + dtype='float32', + ) + label = static.data( + name="label", shape=[batch_size, sequence_len, 1], dtype='float32' + ) auto.shard_tensor(input, _global_process_mesh, [None, None, None]) - mlp = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - dropout_ratio=0.1, - initializer_range=0.02) + mlp = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + dropout_ratio=0.1, + initializer_range=0.02, + ) predict = mlp(input) @@ -95,7 +100,6 @@ def mlp_pretrain_forward(train_program, start_program): class TestMLPAutoParallelizer(unittest.TestCase): - def test_mlp_serial(self): global _global_process_mesh @@ -114,17 +118,24 @@ class TestMLPAutoParallelizer(unittest.TestCase): train_program = static.Program() start_program = static.Program() loss, train_program, start_program = mlp_pretrain_forward( - train_program, start_program) + train_program, start_program + ) - optimizer = paddle.fluid.optimizer.AdamOptimizer(learning_rate=0.00001, - beta1=0.9, - beta2=0.999, - epsilon=1e-08, - grad_clip=None) + optimizer = paddle.fluid.optimizer.AdamOptimizer( + learning_rate=0.00001, + beta1=0.9, + beta2=0.999, + epsilon=1e-08, + grad_clip=None, + ) optimizer = fleet.distributed_optimizer(optimizer) - _, _, distributed_startup_program, distributed_main_program = optimizer.minimize( - loss, start_program) + ( + _, + _, + distributed_startup_program, + distributed_main_program, + ) = optimizer.minimize(loss, start_program) suffix = core.kAutoParallelSuffix() for block in distributed_main_program.blocks: for op in block.ops: diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/c_comm_init_op.py b/python/paddle/fluid/tests/unittests/collective/fleet/c_comm_init_op.py index 4c981156ebb74b86efc0942e58c0f1b13b3b3ac9..a90c7f1baae68cb7da7d0fde32b505b7893ffa17 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/c_comm_init_op.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/c_comm_init_op.py @@ -15,14 +15,15 @@ import unittest import os import paddle.fluid as fluid -from paddle.distributed.fleet.base.private_helper_function import wait_server_ready +from paddle.distributed.fleet.base.private_helper_function import ( + wait_server_ready, +) import paddle paddle.enable_static() class TestCCommInitOp(unittest.TestCase): - def setUp(self): self.endpoints = os.getenv("PADDLE_TRAINER_ENDPOINTS").split(',') self.current_endpoint = os.getenv("PADDLE_CURRENT_ENDPOINT") @@ -42,24 +43,29 @@ class TestCCommInitOp(unittest.TestCase): nccl_id_var = block.create_var( name=fluid.unique_name.generate('nccl_id'), persistable=True, - type=fluid.core.VarDesc.VarType.RAW) - block.append_op(type='c_gen_nccl_id', - inputs={}, - outputs={'Out': nccl_id_var}, - attrs={ - 'rank': self.rank, - 'endpoint': self.current_endpoint, - 'other_endpoints': self.other_endpoints - }) - block.append_op(type='c_comm_init', - inputs={'X': nccl_id_var}, - outputs={}, - attrs={ - 'nranks': self.nranks, - 'rank': self.rank, - 'ring_id': 0, - 'device_id': self.gpu_id - }) + type=fluid.core.VarDesc.VarType.RAW, + ) + block.append_op( + type='c_gen_nccl_id', + inputs={}, + outputs={'Out': nccl_id_var}, + attrs={ + 'rank': self.rank, + 'endpoint': self.current_endpoint, + 'other_endpoints': self.other_endpoints, + }, + ) + block.append_op( + type='c_comm_init', + inputs={'X': nccl_id_var}, + outputs={}, + attrs={ + 'nranks': self.nranks, + 'rank': self.rank, + 'ring_id': 0, + 'device_id': self.gpu_id, + }, + ) self.exe.run(program) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/dist_mnist_gradient_merge.py b/python/paddle/fluid/tests/unittests/collective/fleet/dist_mnist_gradient_merge.py index dc144f997d4646746174a734c5cfa8d6330d8962..9e2e2da3876bccac0b7930de37967fe905874675 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/dist_mnist_gradient_merge.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/dist_mnist_gradient_merge.py @@ -26,7 +26,6 @@ fluid.default_main_program().random_seed = 1 class TestDistMnist2x2(TestDistRunnerBase): - def get_model(self, batch_size=2): # Input data images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE) @@ -39,23 +38,33 @@ class TestDistMnist2x2(TestDistRunnerBase): # Evaluator batch_size_tensor = fluid.layers.create_tensor(dtype='int64') - batch_acc = fluid.layers.accuracy(input=predict, - label=label, - total=batch_size_tensor) + batch_acc = fluid.layers.accuracy( + input=predict, label=label, total=batch_size_tensor + ) inference_program = fluid.default_main_program().clone() # Optimization - opt = fluid.optimizer.MomentumOptimizer(learning_rate=0.001, - momentum=0.9) + opt = fluid.optimizer.MomentumOptimizer( + learning_rate=0.001, momentum=0.9 + ) opt = fluid.optimizer.GradientMergeOptimizer(opt, 2) # Reader - train_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=batch_size) - test_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=batch_size) + train_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size + ) + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size + ) opt.minimize(avg_cost) - return inference_program, avg_cost, train_reader, test_reader, batch_acc, predict + return ( + inference_program, + avg_cost, + train_reader, + test_reader, + batch_acc, + predict, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/dist_mnist_gradient_merge_raw_optimizer.py b/python/paddle/fluid/tests/unittests/collective/fleet/dist_mnist_gradient_merge_raw_optimizer.py index ff31a7016a673fe13863a17463e4988658053be1..f4de5280d0f7d2606126e48d664052dccf089ff2 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/dist_mnist_gradient_merge_raw_optimizer.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/dist_mnist_gradient_merge_raw_optimizer.py @@ -23,7 +23,6 @@ from dist_mnist import cnn_model class TestDistMnistGradientMergeRawOptimizer(TestDistRunnerBase): - def get_model(self, batch_size=2, single_device=False): paddle.enable_static() paddle.seed(1) @@ -54,9 +53,9 @@ class TestDistMnistGradientMergeRawOptimizer(TestDistRunnerBase): strategy.without_graph_optimization = True fleet.init(is_collective=True, strategy=strategy) - image = paddle.static.data(name='image', - shape=[None, 1, 28, 28], - dtype="float32") + image = paddle.static.data( + name='image', shape=[None, 1, 28, 28], dtype="float32" + ) label = paddle.static.data(name='label', shape=[None, 1], dtype='int64') predict = cnn_model(image) acc = paddle.metric.accuracy(predict, label) @@ -68,7 +67,8 @@ class TestDistMnistGradientMergeRawOptimizer(TestDistRunnerBase): optimizer = fluid.optimizer.GradientMergeOptimizer( optimizer, k_steps=strategy.gradient_merge_configs["k_steps"], - avg=strategy.gradient_merge_configs["avg"]) + avg=strategy.gradient_merge_configs["avg"], + ) world_size = 1 else: optimizer = fleet.distributed_optimizer(optimizer) @@ -88,10 +88,12 @@ class TestDistMnistGradientMergeRawOptimizer(TestDistRunnerBase): else: assert start_allreduce_idx == 1 - train_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=batch_size) - test_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=batch_size) + train_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size + ) + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size + ) return test_program, cost, train_reader, test_reader, acc, predict diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_group_sharded_api.py b/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_group_sharded_api.py index bac023a8e7d95d621512a31a52c0049f10f31e27..134503249340997539a5e163301b8ec774552cdf 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_group_sharded_api.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_group_sharded_api.py @@ -21,7 +21,10 @@ import paddle.fluid as fluid from paddle.fluid.dygraph.nn import Linear from paddle.distributed import fleet from paddle.fluid.framework import _test_eager_guard -from paddle.distributed.sharding import group_sharded_parallel, save_group_sharded_model +from paddle.distributed.sharding import ( + group_sharded_parallel, + save_group_sharded_model, +) epoch = 10 paddle.seed(2022) @@ -33,7 +36,6 @@ batch_size = 100 class MLP(fluid.Layer): - def __init__(self, linear_size=1000, param_attr=None, bias_attr=None): super(MLP, self).__init__() @@ -49,7 +51,6 @@ class MLP(fluid.Layer): def reader_decorator(linear_size=1000): - def __reader__(): for _ in range(100): img = np.random.rand(linear_size).astype('float32') @@ -62,45 +63,46 @@ def reader_decorator(linear_size=1000): def optimizer_setting(model, use_multi_precision, opt_group=False): clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0) optimizer = paddle.optimizer.Momentum( - parameters=[{ - "params": list(model.parameters()) - }] if opt_group else list(model.parameters()), + parameters=[{"params": list(model.parameters())}] + if opt_group + else list(model.parameters()), learning_rate=0.001, weight_decay=0.00001, grad_clip=clip, - multi_precision=use_multi_precision) + multi_precision=use_multi_precision, + ) return optimizer -def train_mlp(model, - shard_level, - use_multi_precision, - output_dir, - amp_level='O1'): +def train_mlp( + model, shard_level, use_multi_precision, output_dir, amp_level='O1' +): group = paddle.distributed.new_group([0, 1]) - optimizer = optimizer_setting(model=model, - use_multi_precision=use_multi_precision) - model = paddle.amp.decorate(models=model, - level=amp_level, - save_dtype='float32') + optimizer = optimizer_setting( + model=model, use_multi_precision=use_multi_precision + ) + model = paddle.amp.decorate( + models=model, level=amp_level, save_dtype='float32' + ) scaler = paddle.amp.GradScaler(init_loss_scaling=32768) - model, optimizer, scaler = group_sharded_parallel(model=model, - optimizer=optimizer, - level=shard_level, - scaler=scaler) - - train_reader = paddle.batch(reader_decorator(), - batch_size=batch_size, - drop_last=True) - - train_loader = paddle.io.DataLoader.from_generator(capacity=32, - use_double_buffer=True, - iterable=True, - return_list=True, - use_multiprocess=True) + model, optimizer, scaler = group_sharded_parallel( + model=model, optimizer=optimizer, level=shard_level, scaler=scaler + ) + + train_reader = paddle.batch( + reader_decorator(), batch_size=batch_size, drop_last=True + ) + + train_loader = paddle.io.DataLoader.from_generator( + capacity=32, + use_double_buffer=True, + iterable=True, + return_list=True, + use_multiprocess=True, + ) train_loader.set_sample_list_generator(train_reader) for eop in range(epoch): @@ -111,8 +113,9 @@ def train_mlp(model, img.stop_gradient = True with paddle.amp.auto_cast(True, level=amp_level): out = model(img) - loss = paddle.nn.functional.cross_entropy(input=out, - label=label) + loss = paddle.nn.functional.cross_entropy( + input=out, label=label + ) avg_loss = paddle.mean(x=loss.cast(dtype=paddle.float32)) if not use_multi_precision: @@ -138,44 +141,56 @@ def test_sharding_api(): output_dir = tempfile.mkdtemp() # fp16 - stage2_params = train_mlp(mlp1, - shard_level="os_g", - use_multi_precision=True, - output_dir=output_dir, - amp_level='O2') - stage3_params = train_mlp(mlp2, - shard_level="p_g_os", - use_multi_precision=True, - output_dir=output_dir, - amp_level='O2') + stage2_params = train_mlp( + mlp1, + shard_level="os_g", + use_multi_precision=True, + output_dir=output_dir, + amp_level='O2', + ) + stage3_params = train_mlp( + mlp2, + shard_level="p_g_os", + use_multi_precision=True, + output_dir=output_dir, + amp_level='O2', + ) for i in range(len(stage3_params)): - np.testing.assert_allclose(stage2_params[i].numpy(), - stage3_params[i].numpy(), - rtol=1e-4, - atol=1e-3) + np.testing.assert_allclose( + stage2_params[i].numpy(), + stage3_params[i].numpy(), + rtol=1e-4, + atol=1e-3, + ) # AMP mlp3, mlp4 = MLP(), MLP() mlp3.set_state_dict(state_dict) mlp4.set_state_dict(state_dict) - stage2_params = train_mlp(mlp3, - shard_level="os_g", - use_multi_precision=True, - output_dir=output_dir, - amp_level='O1') - stage3_params = train_mlp(mlp4, - shard_level="p_g_os", - use_multi_precision=True, - output_dir=output_dir, - amp_level='O1') + stage2_params = train_mlp( + mlp3, + shard_level="os_g", + use_multi_precision=True, + output_dir=output_dir, + amp_level='O1', + ) + stage3_params = train_mlp( + mlp4, + shard_level="p_g_os", + use_multi_precision=True, + output_dir=output_dir, + amp_level='O1', + ) for i in range(len(stage3_params)): - np.testing.assert_allclose(stage2_params[i].numpy(), - stage3_params[i].numpy(), - rtol=1e-4, - atol=1e-3) + np.testing.assert_allclose( + stage2_params[i].numpy(), + stage3_params[i].numpy(), + rtol=1e-4, + atol=1e-3, + ) shutil.rmtree(output_dir) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_group_sharded_api_eager.py b/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_group_sharded_api_eager.py index f57dfd7c98d7aca3581c51da66fa2978e219dc04..2f07007c66a0b8828d4159414f31df52c6068bfb 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_group_sharded_api_eager.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_group_sharded_api_eager.py @@ -19,7 +19,10 @@ import paddle import paddle.fluid as fluid from paddle.fluid.dygraph.nn import Linear from paddle.fluid.framework import _test_eager_guard -from paddle.distributed.sharding import group_sharded_parallel, save_group_sharded_model +from paddle.distributed.sharding import ( + group_sharded_parallel, + save_group_sharded_model, +) epoch = 10 paddle.seed(2022) @@ -31,7 +34,6 @@ batch_size = 100 class MLP(fluid.Layer): - def __init__(self, linear_size=1000, param_attr=None, bias_attr=None): super(MLP, self).__init__() @@ -47,7 +49,6 @@ class MLP(fluid.Layer): def reader_decorator(linear_size=1000): - def __reader__(): for _ in range(100): img = np.random.rand(linear_size).astype('float32') @@ -60,47 +61,55 @@ def reader_decorator(linear_size=1000): def optimizer_setting(model, use_multi_precision, opt_group=False): clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0) optimizer = paddle.optimizer.Momentum( - parameters=[{ - "params": list(model.parameters()) - }] if opt_group else list(model.parameters()), + parameters=[{"params": list(model.parameters())}] + if opt_group + else list(model.parameters()), learning_rate=0.001, weight_decay=0.00001, grad_clip=clip, - multi_precision=use_multi_precision) + multi_precision=use_multi_precision, + ) return optimizer -def train_mlp(model, - shard_level, - use_multi_precision, - output_dir, - amp_level='O1', - sync_buffers=False, - dp_group=None): - optimizer = optimizer_setting(model=model, - use_multi_precision=use_multi_precision) - model = paddle.amp.decorate(models=model, - level=amp_level, - save_dtype='float32') +def train_mlp( + model, + shard_level, + use_multi_precision, + output_dir, + amp_level='O1', + sync_buffers=False, + dp_group=None, +): + optimizer = optimizer_setting( + model=model, use_multi_precision=use_multi_precision + ) + model = paddle.amp.decorate( + models=model, level=amp_level, save_dtype='float32' + ) scaler = paddle.amp.GradScaler(init_loss_scaling=32768) - model, optimizer, scaler = group_sharded_parallel(model=model, - optimizer=optimizer, - level=shard_level, - scaler=scaler, - sync_buffers=sync_buffers, - dp_group=dp_group) - - train_reader = paddle.batch(reader_decorator(), - batch_size=batch_size, - drop_last=True) - - train_loader = paddle.io.DataLoader.from_generator(capacity=32, - use_double_buffer=True, - iterable=True, - return_list=True, - use_multiprocess=True) + model, optimizer, scaler = group_sharded_parallel( + model=model, + optimizer=optimizer, + level=shard_level, + scaler=scaler, + sync_buffers=sync_buffers, + dp_group=dp_group, + ) + + train_reader = paddle.batch( + reader_decorator(), batch_size=batch_size, drop_last=True + ) + + train_loader = paddle.io.DataLoader.from_generator( + capacity=32, + use_double_buffer=True, + iterable=True, + return_list=True, + use_multiprocess=True, + ) train_loader.set_sample_list_generator(train_reader) for eop in range(epoch): @@ -111,8 +120,9 @@ def train_mlp(model, img.stop_gradient = True with paddle.amp.auto_cast(True, level=amp_level): out = model(img) - loss = paddle.nn.functional.cross_entropy(input=out, - label=label) + loss = paddle.nn.functional.cross_entropy( + input=out, label=label + ) avg_loss = paddle.mean(x=loss.cast(dtype=paddle.float32)) if not use_multi_precision: @@ -138,51 +148,64 @@ def test_sharding_api(): output_dir = tempfile.mkdtemp() - #test sharding + dp, just for test + # test sharding + dp, just for test dp_group = paddle.distributed.new_group( - list(range(paddle.distributed.get_world_size()))) - - stage2_dp_params = train_mlp(mlp1, - shard_level="os_g", - use_multi_precision=True, - output_dir=output_dir, - amp_level='O2', - sync_buffers=True, - dp_group=dp_group) + list(range(paddle.distributed.get_world_size())) + ) + + stage2_dp_params = train_mlp( + mlp1, + shard_level="os_g", + use_multi_precision=True, + output_dir=output_dir, + amp_level='O2', + sync_buffers=True, + dp_group=dp_group, + ) # fp16 - stage2_params = train_mlp(mlp1, - shard_level="os_g", - use_multi_precision=True, - output_dir=output_dir, - amp_level='O2') - stage3_params = train_mlp(mlp2, - shard_level="p_g_os", - use_multi_precision=True, - output_dir=output_dir, - amp_level='O2') + stage2_params = train_mlp( + mlp1, + shard_level="os_g", + use_multi_precision=True, + output_dir=output_dir, + amp_level='O2', + ) + stage3_params = train_mlp( + mlp2, + shard_level="p_g_os", + use_multi_precision=True, + output_dir=output_dir, + amp_level='O2', + ) for i in range(len(stage3_params)): - np.testing.assert_allclose(stage2_params[i].numpy(), - stage3_params[i].numpy(), - rtol=1e-4, - atol=1e-3) + np.testing.assert_allclose( + stage2_params[i].numpy(), + stage3_params[i].numpy(), + rtol=1e-4, + atol=1e-3, + ) # AMP mlp3, mlp4 = MLP(), MLP() mlp3.set_state_dict(state_dict) mlp4.set_state_dict(state_dict) - stage2_params = train_mlp(mlp3, - shard_level="os_g", - use_multi_precision=True, - output_dir=output_dir, - amp_level='O1') - stage3_params = train_mlp(mlp4, - shard_level="p_g_os", - use_multi_precision=True, - output_dir=output_dir, - amp_level='O1') + stage2_params = train_mlp( + mlp3, + shard_level="os_g", + use_multi_precision=True, + output_dir=output_dir, + amp_level='O1', + ) + stage3_params = train_mlp( + mlp4, + shard_level="p_g_os", + use_multi_precision=True, + output_dir=output_dir, + amp_level='O1', + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_group_sharded_stage2.py b/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_group_sharded_stage2.py index 40767d6fa86fcd6c97466b746d75730d79bff390..970f7a5b03774f85cb5a72883fbff31e8278a4fe 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_group_sharded_stage2.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_group_sharded_stage2.py @@ -23,8 +23,12 @@ import paddle.fluid as fluid from paddle.fluid.dygraph.nn import Linear from paddle.fluid.framework import _test_eager_guard -from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_optimizer_stage2 import GroupShardedOptimizerStage2 -from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage2 import GroupShardedStage2 +from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_optimizer_stage2 import ( + GroupShardedOptimizerStage2, +) +from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage2 import ( + GroupShardedStage2, +) seed = 2022 epoch = 2 @@ -35,7 +39,6 @@ paddle.seed(seed) class MLP(fluid.Layer): - def __init__(self, linear_size=1000, param_attr=None, bias_attr=None): super(MLP, self).__init__() @@ -51,7 +54,6 @@ class MLP(fluid.Layer): def reader_decorator(linear_size=1000): - def __reader__(): for _ in range(100): img = np.random.rand(linear_size).astype('float32') @@ -63,42 +65,50 @@ def reader_decorator(linear_size=1000): def optimizer_setting(model, use_pure_fp16, opt_group=False): clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0) - optimizer = paddle.optimizer.AdamW(parameters=[{ - "params": model.parameters(), - }] if opt_group else model.parameters(), - learning_rate=0.001, - weight_decay=0.00001, - grad_clip=clip, - multi_precision=use_pure_fp16) + optimizer = paddle.optimizer.AdamW( + parameters=[ + { + "params": model.parameters(), + } + ] + if opt_group + else model.parameters(), + learning_rate=0.001, + weight_decay=0.00001, + grad_clip=clip, + multi_precision=use_pure_fp16, + ) return optimizer -def train_mlp(model, - sharding_stage, - batch_size=100, - use_pure_fp16=False, - accumulate_grad=False, - opt_group=False, - save_model=False, - test_minimize=False): +def train_mlp( + model, + sharding_stage, + batch_size=100, + use_pure_fp16=False, + accumulate_grad=False, + opt_group=False, + save_model=False, + test_minimize=False, +): if sharding_stage != "dp": group = paddle.distributed.new_group([0, 1], backend="nccl") if opt_group: - optimizer = optimizer_setting(model=model, - use_pure_fp16=use_pure_fp16, - opt_group=opt_group) + optimizer = optimizer_setting( + model=model, use_pure_fp16=use_pure_fp16, opt_group=opt_group + ) else: optimizer = optimizer_setting(model=model, use_pure_fp16=use_pure_fp16) if sharding_stage == 2: optimizer = GroupShardedOptimizerStage2( - params=optimizer._parameter_list, optim=optimizer, group=group) + params=optimizer._parameter_list, optim=optimizer, group=group + ) - model = GroupShardedStage2(model, - optimizer, - group=group, - buffer_max_size=2**21) + model = GroupShardedStage2( + model, optimizer, group=group, buffer_max_size=2**21 + ) else: model = paddle.DataParallel(model) @@ -108,18 +118,21 @@ def train_mlp(model, optimizer.minimize() except: print( - "====== Find sharding_stage2_optimizer.minimize() error ======") + "====== Find sharding_stage2_optimizer.minimize() error ======" + ) return - train_reader = paddle.batch(reader_decorator(), - batch_size=batch_size, - drop_last=True) - - train_loader = paddle.io.DataLoader.from_generator(capacity=32, - use_double_buffer=True, - iterable=True, - return_list=True, - use_multiprocess=True) + train_reader = paddle.batch( + reader_decorator(), batch_size=batch_size, drop_last=True + ) + + train_loader = paddle.io.DataLoader.from_generator( + capacity=32, + use_double_buffer=True, + iterable=True, + return_list=True, + use_multiprocess=True, + ) train_loader.set_sample_list_generator(train_reader) if sharding_stage == 2: @@ -174,50 +187,50 @@ def test_dp_stage2(): mlp7.set_state_dict(state_dict) # DP VS stage2 - dp_params = train_mlp(mlp1, - sharding_stage="dp", - use_pure_fp16=False, - opt_group=False) - stage2_params = train_mlp(mlp2, - sharding_stage=2, - use_pure_fp16=False, - opt_group=False) + dp_params = train_mlp( + mlp1, sharding_stage="dp", use_pure_fp16=False, opt_group=False + ) + stage2_params = train_mlp( + mlp2, sharding_stage=2, use_pure_fp16=False, opt_group=False + ) for i in range(len(dp_params)): - np.testing.assert_allclose(dp_params[i].numpy(), - stage2_params[i].numpy(), - rtol=1e-6) + np.testing.assert_allclose( + dp_params[i].numpy(), stage2_params[i].numpy(), rtol=1e-6 + ) # stage2 accumulate grad stage2_params = train_mlp(mlp3, sharding_stage=2, accumulate_grad=True) - stage2_accumulate_grad = train_mlp(mlp4, - sharding_stage=2, - batch_size=20, - accumulate_grad=True) + stage2_accumulate_grad = train_mlp( + mlp4, sharding_stage=2, batch_size=20, accumulate_grad=True + ) for i in range(len(stage2_params)): - np.testing.assert_allclose(stage2_params[i].numpy(), - stage2_accumulate_grad[i].numpy(), - rtol=1e-5, - atol=1e-5) + np.testing.assert_allclose( + stage2_params[i].numpy(), + stage2_accumulate_grad[i].numpy(), + rtol=1e-5, + atol=1e-5, + ) # stage2 param list VS param group - stage2_params = train_mlp(mlp5, - sharding_stage=2, - use_pure_fp16=False, - opt_group=True) + stage2_params = train_mlp( + mlp5, sharding_stage=2, use_pure_fp16=False, opt_group=True + ) for i in range(len(dp_params)): - np.testing.assert_allclose(dp_params[i].numpy(), - stage2_params[i].numpy(), - rtol=1e-6) + np.testing.assert_allclose( + dp_params[i].numpy(), stage2_params[i].numpy(), rtol=1e-6 + ) # save/load model output_dir = tempfile.mkdtemp() model_file = os.path.join(output_dir, "model.pdmodel") optimizer_file = os.path.join(output_dir, "model.pdopt") - model_stage2, optimizer_stage2 = train_mlp(mlp6, - sharding_stage=2, - use_pure_fp16=False, - opt_group=False, - save_model=True) + model_stage2, optimizer_stage2 = train_mlp( + mlp6, + sharding_stage=2, + use_pure_fp16=False, + opt_group=False, + save_model=True, + ) paddle.save(model_stage2.state_dict(), model_file) paddle.save(optimizer_stage2.state_dict(), optimizer_file) m_state_dict = paddle.load(model_file) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_group_sharded_stage2_comm_overlap.py b/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_group_sharded_stage2_comm_overlap.py index 7f16d926f537585b2be766f4659fa021515066ce..56c90009da673f50bb5fe85c454736712960ea7e 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_group_sharded_stage2_comm_overlap.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_group_sharded_stage2_comm_overlap.py @@ -23,8 +23,12 @@ import paddle.fluid as fluid from paddle.fluid.dygraph.nn import Linear from paddle.fluid.framework import _test_eager_guard -from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_optimizer_stage2 import GroupShardedOptimizerStage2 -from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage2 import GroupShardedStage2 +from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_optimizer_stage2 import ( + GroupShardedOptimizerStage2, +) +from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage2 import ( + GroupShardedStage2, +) seed = 2022 epoch = 2 @@ -35,7 +39,6 @@ paddle.seed(seed) class MLP(fluid.Layer): - def __init__(self, linear_size=1000, param_attr=None, bias_attr=None): super(MLP, self).__init__() @@ -51,7 +54,6 @@ class MLP(fluid.Layer): def reader_decorator(linear_size=1000): - def __reader__(): for _ in range(100): img = np.random.rand(linear_size).astype('float32') @@ -63,42 +65,50 @@ def reader_decorator(linear_size=1000): def optimizer_setting(model, use_pure_fp16, opt_group=False): clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0) - optimizer = paddle.optimizer.AdamW(parameters=[{ - "params": model.parameters(), - }] if opt_group else model.parameters(), - learning_rate=0.001, - weight_decay=0.00001, - grad_clip=clip, - multi_precision=use_pure_fp16) + optimizer = paddle.optimizer.AdamW( + parameters=[ + { + "params": model.parameters(), + } + ] + if opt_group + else model.parameters(), + learning_rate=0.001, + weight_decay=0.00001, + grad_clip=clip, + multi_precision=use_pure_fp16, + ) return optimizer -def train_mlp(model, - sharding_stage, - batch_size=100, - use_pure_fp16=False, - accumulate_grad=False, - opt_group=False, - save_model=False, - test_minimize=False): +def train_mlp( + model, + sharding_stage, + batch_size=100, + use_pure_fp16=False, + accumulate_grad=False, + opt_group=False, + save_model=False, + test_minimize=False, +): if sharding_stage != "dp": group = paddle.distributed.new_group([0, 1], backend="nccl") if opt_group: - optimizer = optimizer_setting(model=model, - use_pure_fp16=use_pure_fp16, - opt_group=opt_group) + optimizer = optimizer_setting( + model=model, use_pure_fp16=use_pure_fp16, opt_group=opt_group + ) else: optimizer = optimizer_setting(model=model, use_pure_fp16=use_pure_fp16) if sharding_stage == 2: origin_model = model optimizer = GroupShardedOptimizerStage2( - params=optimizer._parameter_list, optim=optimizer, group=group) - model = GroupShardedStage2(model, - optimizer, - group=group, - buffer_max_size=2**21) + params=optimizer._parameter_list, optim=optimizer, group=group + ) + model = GroupShardedStage2( + model, optimizer, group=group, buffer_max_size=2**21 + ) model._set_reduce_overlap(True) optimizer._set_broadcast_overlap(True, model) else: @@ -110,18 +120,21 @@ def train_mlp(model, optimizer.minimize() except: print( - "====== Find sharding_stage2_optimizer.minimize() error ======") + "====== Find sharding_stage2_optimizer.minimize() error ======" + ) return - train_reader = paddle.batch(reader_decorator(), - batch_size=batch_size, - drop_last=True) - - train_loader = paddle.io.DataLoader.from_generator(capacity=32, - use_double_buffer=True, - iterable=True, - return_list=True, - use_multiprocess=True) + train_reader = paddle.batch( + reader_decorator(), batch_size=batch_size, drop_last=True + ) + + train_loader = paddle.io.DataLoader.from_generator( + capacity=32, + use_double_buffer=True, + iterable=True, + return_list=True, + use_multiprocess=True, + ) train_loader.set_sample_list_generator(train_reader) if sharding_stage == 2: @@ -178,50 +191,50 @@ def test_dp_stage2(): mlp7.set_state_dict(state_dict) # DP VS stage2 - dp_params = train_mlp(mlp1, - sharding_stage="dp", - use_pure_fp16=False, - opt_group=False) - stage2_params = train_mlp(mlp2, - sharding_stage=2, - use_pure_fp16=False, - opt_group=False) + dp_params = train_mlp( + mlp1, sharding_stage="dp", use_pure_fp16=False, opt_group=False + ) + stage2_params = train_mlp( + mlp2, sharding_stage=2, use_pure_fp16=False, opt_group=False + ) for i in range(len(dp_params)): - np.testing.assert_allclose(dp_params[i].numpy(), - stage2_params[i].numpy(), - rtol=1e-6) + np.testing.assert_allclose( + dp_params[i].numpy(), stage2_params[i].numpy(), rtol=1e-6 + ) # stage2 accumulate grad stage2_params = train_mlp(mlp3, sharding_stage=2, accumulate_grad=True) - stage2_accumulate_grad = train_mlp(mlp4, - sharding_stage=2, - batch_size=20, - accumulate_grad=True) + stage2_accumulate_grad = train_mlp( + mlp4, sharding_stage=2, batch_size=20, accumulate_grad=True + ) for i in range(len(stage2_params)): - np.testing.assert_allclose(stage2_params[i].numpy(), - stage2_accumulate_grad[i].numpy(), - rtol=1e-5, - atol=1e-5) + np.testing.assert_allclose( + stage2_params[i].numpy(), + stage2_accumulate_grad[i].numpy(), + rtol=1e-5, + atol=1e-5, + ) # stage2 param list VS param group - stage2_params = train_mlp(mlp5, - sharding_stage=2, - use_pure_fp16=False, - opt_group=True) + stage2_params = train_mlp( + mlp5, sharding_stage=2, use_pure_fp16=False, opt_group=True + ) for i in range(len(dp_params)): - np.testing.assert_allclose(dp_params[i].numpy(), - stage2_params[i].numpy(), - rtol=1e-6) + np.testing.assert_allclose( + dp_params[i].numpy(), stage2_params[i].numpy(), rtol=1e-6 + ) # save/load model output_dir = tempfile.mkdtemp() model_file = os.path.join(output_dir, "model.pdmodel") optimizer_file = os.path.join(output_dir, "model.pdopt") - model_stage2, optimizer_stage2 = train_mlp(mlp6, - sharding_stage=2, - use_pure_fp16=False, - opt_group=False, - save_model=True) + model_stage2, optimizer_stage2 = train_mlp( + mlp6, + sharding_stage=2, + use_pure_fp16=False, + opt_group=False, + save_model=True, + ) paddle.save(model_stage2.state_dict(), model_file) paddle.save(optimizer_stage2.state_dict(), optimizer_file) m_state_dict = paddle.load(model_file) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_group_sharded_stage2_offload.py b/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_group_sharded_stage2_offload.py index 3420114d3f9bebff5176114d9b6c159115e955c9..710dc90fcf53b6a554e0a04ab3e42cae444e1361 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_group_sharded_stage2_offload.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_group_sharded_stage2_offload.py @@ -18,11 +18,21 @@ import numpy as np import paddle from paddle.fluid.framework import _test_eager_guard -from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_optimizer_stage2 import GroupShardedOptimizerStage2 -from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage2 import GroupShardedStage2 -from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_utils import GroupShardedScaler - -from dygraph_group_sharded_stage2 import MLP, reader_decorator, optimizer_setting +from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_optimizer_stage2 import ( + GroupShardedOptimizerStage2, +) +from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage2 import ( + GroupShardedStage2, +) +from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_utils import ( + GroupShardedScaler, +) + +from dygraph_group_sharded_stage2 import ( + MLP, + reader_decorator, + optimizer_setting, +) seed = 2021 epoch = 2 @@ -40,20 +50,22 @@ def train_mlp(model, offload=False): scaler = paddle.amp.GradScaler(init_loss_scaling=1024) scaler = GroupShardedScaler(scaler) - optimizer = GroupShardedOptimizerStage2(params=optimizer._parameter_list, - optim=optimizer, - offload=offload) + optimizer = GroupShardedOptimizerStage2( + params=optimizer._parameter_list, optim=optimizer, offload=offload + ) model = GroupShardedStage2(model, optimizer, buffer_max_size=2**21) - train_reader = paddle.batch(reader_decorator(linear_size), - batch_size=batch_size, - drop_last=True) - - train_loader = paddle.io.DataLoader.from_generator(capacity=32, - use_double_buffer=True, - iterable=True, - return_list=True, - use_multiprocess=True) + train_reader = paddle.batch( + reader_decorator(linear_size), batch_size=batch_size, drop_last=True + ) + + train_loader = paddle.io.DataLoader.from_generator( + capacity=32, + use_double_buffer=True, + iterable=True, + return_list=True, + use_multiprocess=True, + ) train_loader.set_sample_list_generator(train_reader) for eop in range(epoch): @@ -66,8 +78,9 @@ def train_mlp(model, offload=False): with paddle.amp.auto_cast(True, level='O2'): out = model(img) - loss = paddle.nn.functional.cross_entropy(input=out, - label=label) + loss = paddle.nn.functional.cross_entropy( + input=out, label=label + ) avg_loss = paddle.mean(x=loss.cast(dtype=paddle.float32)) scaler.scale(avg_loss).backward() @@ -93,10 +106,12 @@ def test_sharding_stage2_offload(): mlp_offload_params = train_mlp(mlp_offload, offload=True) for i in range(len(mlp_params)): - np.testing.assert_allclose(mlp_params[i].numpy(), - mlp_offload_params[i].numpy(), - rtol=5e-3, - atol=5e-3) + np.testing.assert_allclose( + mlp_params[i].numpy(), + mlp_offload_params[i].numpy(), + rtol=5e-3, + atol=5e-3, + ) return diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_group_sharded_stage3.py b/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_group_sharded_stage3.py index fe629541376d21261ed8449fccbecfed2071dc0a..d6f8ef5efb77294c351cdb91ddfabaaf52cdd876 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_group_sharded_stage3.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_group_sharded_stage3.py @@ -23,10 +23,18 @@ import paddle.fluid as fluid from paddle.fluid.dygraph.nn import Linear from paddle.fluid.framework import _test_eager_guard -from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_optimizer_stage2 import GroupShardedOptimizerStage2 -from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage2 import GroupShardedStage2 -from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage3 import GroupShardedStage3 -from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_utils import GroupShardedScaler +from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_optimizer_stage2 import ( + GroupShardedOptimizerStage2, +) +from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage2 import ( + GroupShardedStage2, +) +from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage3 import ( + GroupShardedStage3, +) +from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_utils import ( + GroupShardedScaler, +) epoch = 10 paddle.seed(2022) @@ -37,7 +45,6 @@ l2_decay = 1e-4 class MLP(fluid.Layer): - def __init__(self, linear_size=1000, param_attr=None, bias_attr=None): super(MLP, self).__init__() @@ -53,7 +60,6 @@ class MLP(fluid.Layer): def reader_decorator(linear_size=1000): - def __reader__(): for _ in range(100): img = np.random.rand(linear_size).astype('float32') @@ -66,53 +72,58 @@ def reader_decorator(linear_size=1000): def optimizer_setting(model, use_pure_fp16, opt_group=False): clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0) optimizer = paddle.optimizer.Momentum( - parameters=[{ - "params": list(model.parameters()) - }] if opt_group else list(model.parameters()), + parameters=[{"params": list(model.parameters())}] + if opt_group + else list(model.parameters()), learning_rate=0.001, weight_decay=0.00001, grad_clip=clip, - multi_precision=use_pure_fp16) + multi_precision=use_pure_fp16, + ) return optimizer -def train_mlp(model, - sharding_stage, - use_pure_fp16=False, - accumulate_grad=False, - batch_size=100, - opt_group=False, - sync_comm=False, - test_minimize=False, - save_model=False): +def train_mlp( + model, + sharding_stage, + use_pure_fp16=False, + accumulate_grad=False, + batch_size=100, + opt_group=False, + sync_comm=False, + test_minimize=False, + save_model=False, +): group = paddle.distributed.new_group([0, 1]) if opt_group: - optimizer = optimizer_setting(model=model, - use_pure_fp16=use_pure_fp16, - opt_group=opt_group) + optimizer = optimizer_setting( + model=model, use_pure_fp16=use_pure_fp16, opt_group=opt_group + ) else: optimizer = optimizer_setting(model=model, use_pure_fp16=use_pure_fp16) if use_pure_fp16: - model = paddle.amp.decorate(models=model, - level='O2', - save_dtype='float32') + model = paddle.amp.decorate( + models=model, level='O2', save_dtype='float32' + ) scaler = paddle.amp.GradScaler(init_loss_scaling=32768) scaler = GroupShardedScaler(scaler) if sharding_stage == 2: optimizer = GroupShardedOptimizerStage2( - params=optimizer._parameter_list, optim=optimizer, group=group) - model = GroupShardedStage2(model, - optimizer, - group=group, - buffer_max_size=2**21) + params=optimizer._parameter_list, optim=optimizer, group=group + ) + model = GroupShardedStage2( + model, optimizer, group=group, buffer_max_size=2**21 + ) elif sharding_stage == 3: - model = GroupShardedStage3(model, - optimizer=optimizer, - group=group, - sync_comm=sync_comm, - segment_size=2**15) + model = GroupShardedStage3( + model, + optimizer=optimizer, + group=group, + sync_comm=sync_comm, + segment_size=2**15, + ) # check optimizer.minimize() error if test_minimize: @@ -120,18 +131,21 @@ def train_mlp(model, optimizer.minimize() except: print( - "====== Find sharding_stage3_optimizer.minimize() error ======") + "====== Find sharding_stage3_optimizer.minimize() error ======" + ) return - train_reader = paddle.batch(reader_decorator(), - batch_size=batch_size, - drop_last=True) - - train_loader = paddle.io.DataLoader.from_generator(capacity=32, - use_double_buffer=True, - iterable=True, - return_list=True, - use_multiprocess=True) + train_reader = paddle.batch( + reader_decorator(), batch_size=batch_size, drop_last=True + ) + + train_loader = paddle.io.DataLoader.from_generator( + capacity=32, + use_double_buffer=True, + iterable=True, + return_list=True, + use_multiprocess=True, + ) train_loader.set_sample_list_generator(train_reader) for eop in range(epoch): @@ -142,8 +156,9 @@ def train_mlp(model, img.stop_gradient = True with paddle.amp.auto_cast(True, level='O2'): out = model(img) - loss = paddle.nn.functional.cross_entropy(input=out, - label=label) + loss = paddle.nn.functional.cross_entropy( + input=out, label=label + ) avg_loss = paddle.mean(x=loss.cast(dtype=paddle.float32)) if batch_size == 20: @@ -178,8 +193,19 @@ def train_mlp(model, def test_stage2_stage3(): paddle.distributed.init_parallel_env() - mlp, mlp1, mlp2, mlp3, mlp4, mlp5, mlp6, mlp7, mlp8, mlp9, mlp10 = MLP( - ), MLP(), MLP(), MLP(), MLP(), MLP(), MLP(), MLP(), MLP(), MLP(), MLP() + mlp, mlp1, mlp2, mlp3, mlp4, mlp5, mlp6, mlp7, mlp8, mlp9, mlp10 = ( + MLP(), + MLP(), + MLP(), + MLP(), + MLP(), + MLP(), + MLP(), + MLP(), + MLP(), + MLP(), + MLP(), + ) state_dict = mlp.state_dict() mlp1.set_state_dict(state_dict) mlp2.set_state_dict(state_dict) @@ -193,78 +219,87 @@ def test_stage2_stage3(): mlp10.set_state_dict(state_dict) # fp32 - stage2_params = train_mlp(mlp1, - sharding_stage=2, - use_pure_fp16=False, - opt_group=False) - stage3_params = train_mlp(mlp2, - sharding_stage=3, - use_pure_fp16=False, - opt_group=False) + stage2_params = train_mlp( + mlp1, sharding_stage=2, use_pure_fp16=False, opt_group=False + ) + stage3_params = train_mlp( + mlp2, sharding_stage=3, use_pure_fp16=False, opt_group=False + ) for i in range(len(stage2_params)): - np.testing.assert_allclose(stage2_params[i].numpy(), - stage3_params[i].numpy(), - rtol=1e-6, - atol=1e-6) + np.testing.assert_allclose( + stage2_params[i].numpy(), + stage3_params[i].numpy(), + rtol=1e-6, + atol=1e-6, + ) # fp32 accumulate grad - stage3_params = train_mlp(mlp3, - sharding_stage=3, - use_pure_fp16=False, - accumulate_grad=True, - opt_group=True) - stage3_params_add = train_mlp(mlp4, - sharding_stage=3, - use_pure_fp16=False, - accumulate_grad=True, - batch_size=20, - opt_group=True) + stage3_params = train_mlp( + mlp3, + sharding_stage=3, + use_pure_fp16=False, + accumulate_grad=True, + opt_group=True, + ) + stage3_params_add = train_mlp( + mlp4, + sharding_stage=3, + use_pure_fp16=False, + accumulate_grad=True, + batch_size=20, + opt_group=True, + ) for i in range(len(stage3_params)): - np.testing.assert_allclose(stage3_params[i].numpy(), - stage3_params_add[i].numpy(), - rtol=1e-6, - atol=1e-4) + np.testing.assert_allclose( + stage3_params[i].numpy(), + stage3_params_add[i].numpy(), + rtol=1e-6, + atol=1e-4, + ) # fp16 - stage2_params = train_mlp(mlp5, - sharding_stage=2, - use_pure_fp16=True, - opt_group=False) - stage3_params = train_mlp(mlp6, - sharding_stage=3, - use_pure_fp16=True, - opt_group=False) + stage2_params = train_mlp( + mlp5, sharding_stage=2, use_pure_fp16=True, opt_group=False + ) + stage3_params = train_mlp( + mlp6, sharding_stage=3, use_pure_fp16=True, opt_group=False + ) for i in range(len(stage2_params)): - np.testing.assert_allclose(stage2_params[i].numpy(), - stage3_params[i].numpy(), - rtol=1e-4, - atol=1e-3) + np.testing.assert_allclose( + stage2_params[i].numpy(), + stage3_params[i].numpy(), + rtol=1e-4, + atol=1e-3, + ) # fp16 sync_comm - stage3_params = train_mlp(mlp7, - sharding_stage=3, - use_pure_fp16=True, - opt_group=False) - stage3_params_re = train_mlp(mlp8, - sharding_stage=3, - use_pure_fp16=True, - opt_group=False, - sync_comm=True) + stage3_params = train_mlp( + mlp7, sharding_stage=3, use_pure_fp16=True, opt_group=False + ) + stage3_params_re = train_mlp( + mlp8, + sharding_stage=3, + use_pure_fp16=True, + opt_group=False, + sync_comm=True, + ) for i in range(len(stage3_params)): - np.testing.assert_allclose(stage3_params[i].numpy(), - stage3_params_re[i].numpy(), - rtol=1e-6) + np.testing.assert_allclose( + stage3_params[i].numpy(), stage3_params_re[i].numpy(), rtol=1e-6 + ) # save/load model output_dir = tempfile.mkdtemp() model_file = os.path.join(output_dir, "model.pdmodel") optimizer_file = os.path.join(output_dir, "model.pdopt") - model_stage3, optimizer_stage3 = train_mlp(mlp9, - sharding_stage=3, - use_pure_fp16=False, - opt_group=False, - save_model=True) + model_stage3, optimizer_stage3 = train_mlp( + mlp9, + sharding_stage=3, + use_pure_fp16=False, + opt_group=False, + save_model=True, + ) paddle.save(model_stage3.state_dict(), model_file) paddle.save(optimizer_stage3.state_dict(), optimizer_file) m_state_dict = paddle.load(model_file) @@ -274,11 +309,13 @@ def test_stage2_stage3(): shutil.rmtree(output_dir) # check optimizer.minimize() error - train_mlp(mlp10, - sharding_stage=3, - use_pure_fp16=False, - opt_group=False, - test_minimize=True) + train_mlp( + mlp10, + sharding_stage=3, + use_pure_fp16=False, + opt_group=False, + test_minimize=True, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_group_sharded_stage3_offload.py b/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_group_sharded_stage3_offload.py index 2c586b56946f29612d3cc6d198146eb011f4335c..0c63d2e1d8a301ee832889674d6f67c41d06cfa2 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_group_sharded_stage3_offload.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_group_sharded_stage3_offload.py @@ -20,8 +20,12 @@ import paddle.fluid as fluid from paddle.fluid.dygraph.nn import Linear from paddle.fluid.framework import _test_eager_guard -from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage3 import GroupShardedStage3 -from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_utils import GroupShardedScaler +from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage3 import ( + GroupShardedStage3, +) +from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_utils import ( + GroupShardedScaler, +) epoch = 10 paddle.seed(2022) @@ -32,7 +36,6 @@ l2_decay = 1e-4 class MLP(fluid.Layer): - def __init__(self, linear_size=1000, param_attr=None, bias_attr=None): super(MLP, self).__init__() @@ -48,7 +51,6 @@ class MLP(fluid.Layer): def reader_decorator(linear_size=1000): - def __reader__(): for _ in range(100): img = np.random.rand(linear_size).astype('float32') @@ -60,48 +62,56 @@ def reader_decorator(linear_size=1000): def optimizer_setting(model, use_pure_fp16, opt_group=False): clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0) - optimizer = paddle.optimizer.AdamW(parameters=[{ - "params": model.parameters() - }] if opt_group else model.parameters(), - learning_rate=0.001, - weight_decay=0.00001, - grad_clip=clip, - multi_precision=use_pure_fp16) + optimizer = paddle.optimizer.AdamW( + parameters=[{"params": model.parameters()}] + if opt_group + else model.parameters(), + learning_rate=0.001, + weight_decay=0.00001, + grad_clip=clip, + multi_precision=use_pure_fp16, + ) return optimizer -def train_mlp(model, - use_pure_fp16=False, - accumulate_grad=False, - offload=False, - batch_size=100, - convert2cpu=False): +def train_mlp( + model, + use_pure_fp16=False, + accumulate_grad=False, + offload=False, + batch_size=100, + convert2cpu=False, +): group = paddle.distributed.new_group([0, 1]) optimizer = optimizer_setting(model=model, use_pure_fp16=use_pure_fp16) if use_pure_fp16: - model = paddle.amp.decorate(models=model, - level='O2', - save_dtype='float32') + model = paddle.amp.decorate( + models=model, level='O2', save_dtype='float32' + ) scaler = paddle.amp.GradScaler(init_loss_scaling=32768) scaler = GroupShardedScaler(scaler) - model = GroupShardedStage3(model, - optimizer=optimizer, - group=group, - offload=offload, - segment_size=2**15) - - train_reader = paddle.batch(reader_decorator(), - batch_size=batch_size, - drop_last=True) - - train_loader = paddle.io.DataLoader.from_generator(capacity=32, - use_double_buffer=True, - iterable=True, - return_list=True, - use_multiprocess=True) + model = GroupShardedStage3( + model, + optimizer=optimizer, + group=group, + offload=offload, + segment_size=2**15, + ) + + train_reader = paddle.batch( + reader_decorator(), batch_size=batch_size, drop_last=True + ) + + train_loader = paddle.io.DataLoader.from_generator( + capacity=32, + use_double_buffer=True, + iterable=True, + return_list=True, + use_multiprocess=True, + ) train_loader.set_sample_list_generator(train_reader) for eop in range(epoch): @@ -112,8 +122,9 @@ def train_mlp(model, img.stop_gradient = True with paddle.amp.auto_cast(True, level='O2'): out = model(img) - loss = paddle.nn.functional.cross_entropy(input=out, - label=label) + loss = paddle.nn.functional.cross_entropy( + input=out, label=label + ) avg_loss = paddle.mean(x=loss.cast(dtype=paddle.float32)) if accumulate_grad: @@ -147,8 +158,15 @@ def train_mlp(model, def test_stage3_offload(): paddle.distributed.init_parallel_env() - mlp, mlp1, mlp2, mlp3, mlp4, mlp5, mlp6 = MLP(), MLP(), MLP(), MLP(), MLP( - ), MLP(), MLP() + mlp, mlp1, mlp2, mlp3, mlp4, mlp5, mlp6 = ( + MLP(), + MLP(), + MLP(), + MLP(), + MLP(), + MLP(), + MLP(), + ) state_dict = mlp.state_dict() mlp1.set_state_dict(state_dict) mlp2.set_state_dict(state_dict) @@ -161,36 +179,43 @@ def test_stage3_offload(): stage3_params = train_mlp(mlp1, use_pure_fp16=False) stage3_params_offload = train_mlp(mlp2, use_pure_fp16=False, offload=True) for i in range(len(stage3_params)): - np.testing.assert_allclose(stage3_params[i].numpy(), - stage3_params_offload[i].numpy(), - rtol=1e-6, - atol=1e-8) + np.testing.assert_allclose( + stage3_params[i].numpy(), + stage3_params_offload[i].numpy(), + rtol=1e-6, + atol=1e-8, + ) # fp16 offload stage3_params = train_mlp(mlp3, use_pure_fp16=True) stage3_params_offload = train_mlp(mlp4, use_pure_fp16=True, offload=True) for i in range(len(stage3_params)): - np.testing.assert_allclose(stage3_params[i].numpy(), - stage3_params_offload[i].numpy(), - rtol=1e-2, - atol=1e-2) + np.testing.assert_allclose( + stage3_params[i].numpy(), + stage3_params_offload[i].numpy(), + rtol=1e-2, + atol=1e-2, + ) # fp32 accumulate grad offload - stage3_params = train_mlp(mlp5, - use_pure_fp16=False, - batch_size=20, - accumulate_grad=True) - stage3_params_offload = train_mlp(mlp6, - use_pure_fp16=False, - accumulate_grad=True, - offload=True, - batch_size=20, - convert2cpu=True) + stage3_params = train_mlp( + mlp5, use_pure_fp16=False, batch_size=20, accumulate_grad=True + ) + stage3_params_offload = train_mlp( + mlp6, + use_pure_fp16=False, + accumulate_grad=True, + offload=True, + batch_size=20, + convert2cpu=True, + ) for i in range(len(stage3_params)): - np.testing.assert_allclose(stage3_params[i].numpy(), - stage3_params_offload[i].numpy(), - rtol=1e-6, - atol=1e-8) + np.testing.assert_allclose( + stage3_params[i].numpy(), + stage3_params_offload[i].numpy(), + rtol=1e-6, + atol=1e-8, + ) return diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_sharding_optimizer_stage2.py b/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_sharding_optimizer_stage2.py index 2f5a73830d7067f297975b8c7192b7c693d2e823..78dbd9b2ccdd7eb23728441378b42d65e798211f 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_sharding_optimizer_stage2.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_sharding_optimizer_stage2.py @@ -22,7 +22,9 @@ from paddle.distributed import fleet from paddle.fluid.framework import _test_eager_guard from paddle.distributed.fleet.utils.internal_storage import GradStorage -from paddle.distributed.fleet.meta_optimizers.dygraph_optimizer.sharding_optimizer_stage2 import ShardingOptimizerStage2 +from paddle.distributed.fleet.meta_optimizers.dygraph_optimizer.sharding_optimizer_stage2 import ( + ShardingOptimizerStage2, +) base_lr = 0.1 momentum_rate = 0.9 @@ -34,7 +36,6 @@ class_dim = 102 class MLP(fluid.Layer): - def __init__(self, param_attr=None, bias_attr=None): super(MLP, self).__init__() @@ -48,7 +49,6 @@ class MLP(fluid.Layer): def reader_decorator(): - def __reader__(): for _ in range(100): img = np.random.rand(10).astype('float32') @@ -63,7 +63,8 @@ def optimizer_setting(parameter_list=None): learning_rate=base_lr, momentum=momentum_rate, weight_decay=paddle.regularizer.L2Decay(l2_decay), - parameters=parameter_list) + parameters=parameter_list, + ) return optimizer @@ -74,18 +75,20 @@ def train_mlp(): mlp = MLP() optimizer = optimizer_setting(parameter_list=mlp.parameters()) - oss_optimizer = ShardingOptimizerStage2(params=mlp.parameters(), - optim=optimizer, - group=group) + oss_optimizer = ShardingOptimizerStage2( + params=mlp.parameters(), optim=optimizer, group=group + ) # cover grad_storage code trainable_param2align = dict() for p in mlp.parameters(): trainable_param2align[p.name] = 0 - grad_storage = GradStorage(10000, - dtype=paddle.float32, - device="gpu", - destination=0, - parm2align=trainable_param2align) + grad_storage = GradStorage( + 10000, + dtype=paddle.float32, + device="gpu", + destination=0, + parm2align=trainable_param2align, + ) for p in mlp.parameters(): grad_storage.can_add_grad_view(p, trainable_param2align[p.name]) grad_storage.add_grad(p, trainable_param2align[p.name]) @@ -93,15 +96,17 @@ def train_mlp(): grad_storage.rebuild() grad_storage.reset_checked_in() - train_reader = paddle.batch(reader_decorator(), - batch_size=batch_size, - drop_last=True) - - train_loader = paddle.io.DataLoader.from_generator(capacity=32, - use_double_buffer=True, - iterable=True, - return_list=True, - use_multiprocess=True) + train_reader = paddle.batch( + reader_decorator(), batch_size=batch_size, drop_last=True + ) + + train_loader = paddle.io.DataLoader.from_generator( + capacity=32, + use_double_buffer=True, + iterable=True, + return_list=True, + use_multiprocess=True, + ) train_loader.set_sample_list_generator(train_reader) for eop in range(epoch): diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_sharding_stage2.py b/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_sharding_stage2.py index a025030af2d6cf8f0e214a61f8f74b1a33b7af45..2303efccd02e1220d92d3e63d8997aab3c139bcb 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_sharding_stage2.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_sharding_stage2.py @@ -24,8 +24,12 @@ from paddle.fluid.dygraph.nn import Linear from paddle.distributed import fleet from paddle.fluid.framework import _test_eager_guard -from paddle.distributed.fleet.meta_optimizers.dygraph_optimizer.sharding_optimizer_stage2 import ShardingOptimizerStage2 -from paddle.distributed.fleet.meta_parallel.sharding.sharding_stage2 import ShardingStage2 +from paddle.distributed.fleet.meta_optimizers.dygraph_optimizer.sharding_optimizer_stage2 import ( + ShardingOptimizerStage2, +) +from paddle.distributed.fleet.meta_parallel.sharding.sharding_stage2 import ( + ShardingStage2, +) seed = 2022 epoch = 2 @@ -36,7 +40,7 @@ strategy.hybrid_configs = { "dp_degree": 2, "mp_degree": 1, "pp_degree": 1, - "sharding_degree": 1 + "sharding_degree": 1, } np.random.seed(seed) @@ -44,7 +48,6 @@ paddle.seed(seed) class MLP(fluid.Layer): - def __init__(self, linear_size=1000, param_attr=None, bias_attr=None): super(MLP, self).__init__() @@ -60,7 +63,6 @@ class MLP(fluid.Layer): def reader_decorator(linear_size=1000): - def __reader__(): for _ in range(100): img = np.random.rand(linear_size).astype('float32') @@ -72,58 +74,63 @@ def reader_decorator(linear_size=1000): def optimizer_setting(model, use_pure_fp16, opt_group=False): clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0) - optimizer = paddle.optimizer.AdamW(parameters=[{ - "params": model.parameters() - }] if opt_group else model.parameters(), - learning_rate=0.001, - weight_decay=0.00001, - grad_clip=clip, - multi_precision=use_pure_fp16) + optimizer = paddle.optimizer.AdamW( + parameters=[{"params": model.parameters()}] + if opt_group + else model.parameters(), + learning_rate=0.001, + weight_decay=0.00001, + grad_clip=clip, + multi_precision=use_pure_fp16, + ) return optimizer -def train_mlp(model, - sharding_stage, - batch_size=100, - use_pure_fp16=False, - accumulate_grad=False, - opt_group=False, - save_model=False): +def train_mlp( + model, + sharding_stage, + batch_size=100, + use_pure_fp16=False, + accumulate_grad=False, + opt_group=False, + save_model=False, +): if sharding_stage == "dp": hcg = fleet.get_hybrid_communicate_group() group = hcg.get_check_parallel_group() else: group = paddle.distributed.new_group([0, 1]) if opt_group: - optimizer = optimizer_setting(model=model, - use_pure_fp16=use_pure_fp16, - opt_group=opt_group) + optimizer = optimizer_setting( + model=model, use_pure_fp16=use_pure_fp16, opt_group=opt_group + ) else: optimizer = optimizer_setting(model=model, use_pure_fp16=use_pure_fp16) if sharding_stage == 2: - optimizer = ShardingOptimizerStage2(params=model.parameters(), - optim=optimizer, - group=group) - - model = ShardingStage2(model, - optimizer, - group=group, - buffer_max_size=2**21) + optimizer = ShardingOptimizerStage2( + params=model.parameters(), optim=optimizer, group=group + ) + + model = ShardingStage2( + model, optimizer, group=group, buffer_max_size=2**21 + ) else: optimizer = fleet.distributed_optimizer(optimizer) model = fleet.distributed_model(model) - train_reader = paddle.batch(reader_decorator(), - batch_size=batch_size, - drop_last=True) - - train_loader = paddle.io.DataLoader.from_generator(capacity=32, - use_double_buffer=True, - iterable=True, - return_list=True, - use_multiprocess=True) + train_reader = paddle.batch( + reader_decorator(), batch_size=batch_size, drop_last=True + ) + + train_loader = paddle.io.DataLoader.from_generator( + capacity=32, + use_double_buffer=True, + iterable=True, + return_list=True, + use_multiprocess=True, + ) train_loader.set_sample_list_generator(train_reader) if sharding_stage == 2: @@ -175,50 +182,50 @@ def test_dp_stage2(): mlp6.set_state_dict(state_dict) # DP VS stage2 - dp_params = train_mlp(mlp1, - sharding_stage="dp", - use_pure_fp16=False, - opt_group=False) - stage2_params = train_mlp(mlp2, - sharding_stage=2, - use_pure_fp16=False, - opt_group=False) + dp_params = train_mlp( + mlp1, sharding_stage="dp", use_pure_fp16=False, opt_group=False + ) + stage2_params = train_mlp( + mlp2, sharding_stage=2, use_pure_fp16=False, opt_group=False + ) for i in range(len(dp_params)): - np.testing.assert_allclose(dp_params[i].numpy(), - stage2_params[i].numpy(), - rtol=1e-6) + np.testing.assert_allclose( + dp_params[i].numpy(), stage2_params[i].numpy(), rtol=1e-6 + ) # stage2 accumulate grad stage2_params = train_mlp(mlp3, sharding_stage=2, accumulate_grad=True) - stage2_accumulate_grad = train_mlp(mlp4, - sharding_stage=2, - batch_size=20, - accumulate_grad=True) + stage2_accumulate_grad = train_mlp( + mlp4, sharding_stage=2, batch_size=20, accumulate_grad=True + ) for i in range(len(stage2_params)): - np.testing.assert_allclose(stage2_params[i].numpy(), - stage2_accumulate_grad[i].numpy(), - rtol=1e-5, - atol=1e-5) + np.testing.assert_allclose( + stage2_params[i].numpy(), + stage2_accumulate_grad[i].numpy(), + rtol=1e-5, + atol=1e-5, + ) # stage2 param list VS param group - stage2_params = train_mlp(mlp5, - sharding_stage=2, - use_pure_fp16=False, - opt_group=True) + stage2_params = train_mlp( + mlp5, sharding_stage=2, use_pure_fp16=False, opt_group=True + ) for i in range(len(dp_params)): - np.testing.assert_allclose(dp_params[i].numpy(), - stage2_params[i].numpy(), - rtol=1e-6) + np.testing.assert_allclose( + dp_params[i].numpy(), stage2_params[i].numpy(), rtol=1e-6 + ) # save/load model output_dir = tempfile.mkdtemp() model_file = os.path.join(output_dir, "model.pdmodel") optimizer_file = os.path.join(output_dir, "model.pdopt") - model_stage2, optimizer_stage2 = train_mlp(mlp6, - sharding_stage=2, - use_pure_fp16=False, - opt_group=False, - save_model=True) + model_stage2, optimizer_stage2 = train_mlp( + mlp6, + sharding_stage=2, + use_pure_fp16=False, + opt_group=False, + save_model=True, + ) paddle.save(model_stage2.state_dict(), model_file) paddle.save(optimizer_stage2.state_dict(), optimizer_file) m_state_dict = paddle.load(model_file) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_sharding_stage2_offload.py b/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_sharding_stage2_offload.py index 7469591c9ed5ba24d01b36bf5ea7888b57ec5375..ce795de881c7b9ea10ce4e8b73bd20a55b33fdac 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_sharding_stage2_offload.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_sharding_stage2_offload.py @@ -19,9 +19,15 @@ import paddle from paddle.distributed import fleet from paddle.fluid.framework import _test_eager_guard -from paddle.distributed.fleet.meta_optimizers.dygraph_optimizer.sharding_optimizer_stage2 import ShardingOptimizerStage2 -from paddle.distributed.fleet.meta_parallel.sharding.sharding_stage2 import ShardingStage2 -from paddle.distributed.fleet.meta_parallel.sharding.sharding_utils import ShardingScaler +from paddle.distributed.fleet.meta_optimizers.dygraph_optimizer.sharding_optimizer_stage2 import ( + ShardingOptimizerStage2, +) +from paddle.distributed.fleet.meta_parallel.sharding.sharding_stage2 import ( + ShardingStage2, +) +from paddle.distributed.fleet.meta_parallel.sharding.sharding_utils import ( + ShardingScaler, +) from dygraph_sharding_stage2 import MLP, reader_decorator, optimizer_setting @@ -35,7 +41,7 @@ strategy.hybrid_configs = { "dp_degree": 2, "mp_degree": 1, "pp_degree": 1, - "sharding_degree": 1 + "sharding_degree": 1, } np.random.seed(seed) @@ -49,20 +55,22 @@ def train_mlp(model, offload=False): scaler = paddle.amp.GradScaler(init_loss_scaling=1024) scaler = ShardingScaler(scaler) - optimizer = ShardingOptimizerStage2(params=model.parameters(), - optim=optimizer, - offload=offload) + optimizer = ShardingOptimizerStage2( + params=model.parameters(), optim=optimizer, offload=offload + ) model = ShardingStage2(model, optimizer, buffer_max_size=2**21) - train_reader = paddle.batch(reader_decorator(linear_size), - batch_size=batch_size, - drop_last=True) - - train_loader = paddle.io.DataLoader.from_generator(capacity=32, - use_double_buffer=True, - iterable=True, - return_list=True, - use_multiprocess=True) + train_reader = paddle.batch( + reader_decorator(linear_size), batch_size=batch_size, drop_last=True + ) + + train_loader = paddle.io.DataLoader.from_generator( + capacity=32, + use_double_buffer=True, + iterable=True, + return_list=True, + use_multiprocess=True, + ) train_loader.set_sample_list_generator(train_reader) for eop in range(epoch): @@ -75,8 +83,9 @@ def train_mlp(model, offload=False): with paddle.amp.auto_cast(True, level='O2'): out = model(img) - loss = paddle.nn.functional.cross_entropy(input=out, - label=label) + loss = paddle.nn.functional.cross_entropy( + input=out, label=label + ) avg_loss = paddle.mean(x=loss.cast(dtype=paddle.float32)) scaler.scale(avg_loss).backward() @@ -101,10 +110,12 @@ def test_sharding_stage2_offload(): mlp_offload_params = train_mlp(mlp_offload, offload=True) for i in range(len(mlp_params)): - np.testing.assert_allclose(mlp_params[i].numpy(), - mlp_offload_params[i].numpy(), - rtol=5e-3, - atol=5e-3) + np.testing.assert_allclose( + mlp_params[i].numpy(), + mlp_offload_params[i].numpy(), + rtol=5e-3, + atol=5e-3, + ) return diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_sharding_stage3.py b/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_sharding_stage3.py index 4e9bc7a1cace6093ec1ea7b95323391bc87ea0ce..c5d58934060f65be38f7615cd15fa6658a933821 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_sharding_stage3.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_sharding_stage3.py @@ -24,10 +24,18 @@ from paddle.fluid.dygraph.nn import Linear from paddle.distributed import fleet from paddle.fluid.framework import _test_eager_guard -from paddle.distributed.fleet.meta_optimizers.dygraph_optimizer.sharding_optimizer_stage2 import ShardingOptimizerStage2 -from paddle.distributed.fleet.meta_parallel.sharding.sharding_stage2 import ShardingStage2 -from paddle.distributed.fleet.meta_parallel.sharding.sharding_stage3 import ShardingStage3 -from paddle.distributed.fleet.meta_parallel.sharding.sharding_utils import ShardingScaler +from paddle.distributed.fleet.meta_optimizers.dygraph_optimizer.sharding_optimizer_stage2 import ( + ShardingOptimizerStage2, +) +from paddle.distributed.fleet.meta_parallel.sharding.sharding_stage2 import ( + ShardingStage2, +) +from paddle.distributed.fleet.meta_parallel.sharding.sharding_stage3 import ( + ShardingStage3, +) +from paddle.distributed.fleet.meta_parallel.sharding.sharding_utils import ( + ShardingScaler, +) epoch = 10 paddle.seed(2021) @@ -38,7 +46,6 @@ l2_decay = 1e-4 class MLP(fluid.Layer): - def __init__(self, linear_size=1000, param_attr=None, bias_attr=None): super(MLP, self).__init__() @@ -54,7 +61,6 @@ class MLP(fluid.Layer): def reader_decorator(linear_size=1000): - def __reader__(): for _ in range(100): img = np.random.rand(linear_size).astype('float32') @@ -67,53 +73,54 @@ def reader_decorator(linear_size=1000): def optimizer_setting(model, use_pure_fp16, opt_group=False): clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0) optimizer = paddle.optimizer.Momentum( - parameters=[{ - "params": list(model.parameters()) - }] if opt_group else list(model.parameters()), + parameters=[{"params": list(model.parameters())}] + if opt_group + else list(model.parameters()), learning_rate=0.001, weight_decay=0.00001, grad_clip=clip, - multi_precision=use_pure_fp16) + multi_precision=use_pure_fp16, + ) return optimizer -def train_mlp(model, - sharding_stage, - use_pure_fp16=False, - accumulate_grad=False, - batch_size=100, - opt_group=False, - sync_comm=False, - test_minimize=False, - save_model=False): +def train_mlp( + model, + sharding_stage, + use_pure_fp16=False, + accumulate_grad=False, + batch_size=100, + opt_group=False, + sync_comm=False, + test_minimize=False, + save_model=False, +): group = paddle.distributed.new_group([0, 1]) if opt_group: - optimizer = optimizer_setting(model=model, - use_pure_fp16=use_pure_fp16, - opt_group=opt_group) + optimizer = optimizer_setting( + model=model, use_pure_fp16=use_pure_fp16, opt_group=opt_group + ) else: optimizer = optimizer_setting(model=model, use_pure_fp16=use_pure_fp16) if use_pure_fp16: - model = paddle.amp.decorate(models=model, - level='O2', - save_dtype='float32') + model = paddle.amp.decorate( + models=model, level='O2', save_dtype='float32' + ) scaler = paddle.amp.GradScaler(init_loss_scaling=32768) scaler = ShardingScaler(scaler) if sharding_stage == 2: - optimizer = ShardingOptimizerStage2(params=model.parameters(), - optim=optimizer, - group=group) - model = ShardingStage2(model, - optimizer, - group=group, - buffer_max_size=2**21) + optimizer = ShardingOptimizerStage2( + params=model.parameters(), optim=optimizer, group=group + ) + model = ShardingStage2( + model, optimizer, group=group, buffer_max_size=2**21 + ) elif sharding_stage == 3: - model = ShardingStage3(model, - optimizer=optimizer, - group=group, - sync_comm=sync_comm) + model = ShardingStage3( + model, optimizer=optimizer, group=group, sync_comm=sync_comm + ) # check optimizer.minimize() error if test_minimize: @@ -121,18 +128,21 @@ def train_mlp(model, optimizer.minimize() except: print( - "====== Find sharding_stage3_optimizer.minimize() error ======") + "====== Find sharding_stage3_optimizer.minimize() error ======" + ) return - train_reader = paddle.batch(reader_decorator(), - batch_size=batch_size, - drop_last=True) - - train_loader = paddle.io.DataLoader.from_generator(capacity=32, - use_double_buffer=True, - iterable=True, - return_list=True, - use_multiprocess=True) + train_reader = paddle.batch( + reader_decorator(), batch_size=batch_size, drop_last=True + ) + + train_loader = paddle.io.DataLoader.from_generator( + capacity=32, + use_double_buffer=True, + iterable=True, + return_list=True, + use_multiprocess=True, + ) train_loader.set_sample_list_generator(train_reader) for eop in range(epoch): @@ -143,8 +153,9 @@ def train_mlp(model, img.stop_gradient = True with paddle.amp.auto_cast(True, level='O2'): out = model(img) - loss = paddle.nn.functional.cross_entropy(input=out, - label=label) + loss = paddle.nn.functional.cross_entropy( + input=out, label=label + ) avg_loss = paddle.mean(x=loss.cast(dtype=paddle.float32)) if batch_size == 20: @@ -178,8 +189,19 @@ def train_mlp(model, def test_stage2_stage3(): - mlp, mlp1, mlp2, mlp3, mlp4, mlp5, mlp6, mlp7, mlp8, mlp9, mlp10 = MLP( - ), MLP(), MLP(), MLP(), MLP(), MLP(), MLP(), MLP(), MLP(), MLP(), MLP() + mlp, mlp1, mlp2, mlp3, mlp4, mlp5, mlp6, mlp7, mlp8, mlp9, mlp10 = ( + MLP(), + MLP(), + MLP(), + MLP(), + MLP(), + MLP(), + MLP(), + MLP(), + MLP(), + MLP(), + MLP(), + ) state_dict = mlp.state_dict() mlp1.set_state_dict(state_dict) mlp2.set_state_dict(state_dict) @@ -193,78 +215,87 @@ def test_stage2_stage3(): mlp10.set_state_dict(state_dict) # fp32 - stage2_params = train_mlp(mlp1, - sharding_stage=2, - use_pure_fp16=False, - opt_group=False) - stage3_params = train_mlp(mlp2, - sharding_stage=3, - use_pure_fp16=False, - opt_group=False) + stage2_params = train_mlp( + mlp1, sharding_stage=2, use_pure_fp16=False, opt_group=False + ) + stage3_params = train_mlp( + mlp2, sharding_stage=3, use_pure_fp16=False, opt_group=False + ) for i in range(len(stage2_params)): - np.testing.assert_allclose(stage2_params[i].numpy(), - stage3_params[i].numpy(), - rtol=1e-6, - atol=1e-6) + np.testing.assert_allclose( + stage2_params[i].numpy(), + stage3_params[i].numpy(), + rtol=1e-6, + atol=1e-6, + ) # fp32 accumulate grad - stage3_params = train_mlp(mlp3, - sharding_stage=3, - use_pure_fp16=False, - accumulate_grad=True, - opt_group=True) - stage3_params_add = train_mlp(mlp4, - sharding_stage=3, - use_pure_fp16=False, - accumulate_grad=True, - batch_size=20, - opt_group=True) + stage3_params = train_mlp( + mlp3, + sharding_stage=3, + use_pure_fp16=False, + accumulate_grad=True, + opt_group=True, + ) + stage3_params_add = train_mlp( + mlp4, + sharding_stage=3, + use_pure_fp16=False, + accumulate_grad=True, + batch_size=20, + opt_group=True, + ) for i in range(len(stage3_params)): - np.testing.assert_allclose(stage3_params[i].numpy(), - stage3_params_add[i].numpy(), - rtol=1e-6, - atol=1e-4) + np.testing.assert_allclose( + stage3_params[i].numpy(), + stage3_params_add[i].numpy(), + rtol=1e-6, + atol=1e-4, + ) # fp16 - stage2_params = train_mlp(mlp5, - sharding_stage=2, - use_pure_fp16=True, - opt_group=False) - stage3_params = train_mlp(mlp6, - sharding_stage=3, - use_pure_fp16=True, - opt_group=False) + stage2_params = train_mlp( + mlp5, sharding_stage=2, use_pure_fp16=True, opt_group=False + ) + stage3_params = train_mlp( + mlp6, sharding_stage=3, use_pure_fp16=True, opt_group=False + ) for i in range(len(stage2_params)): - np.testing.assert_allclose(stage2_params[i].numpy(), - stage3_params[i].numpy(), - rtol=1e-4, - atol=1e-3) + np.testing.assert_allclose( + stage2_params[i].numpy(), + stage3_params[i].numpy(), + rtol=1e-4, + atol=1e-3, + ) # fp16 sync_comm - stage3_params = train_mlp(mlp7, - sharding_stage=3, - use_pure_fp16=True, - opt_group=False) - stage3_params_re = train_mlp(mlp8, - sharding_stage=3, - use_pure_fp16=True, - opt_group=False, - sync_comm=True) + stage3_params = train_mlp( + mlp7, sharding_stage=3, use_pure_fp16=True, opt_group=False + ) + stage3_params_re = train_mlp( + mlp8, + sharding_stage=3, + use_pure_fp16=True, + opt_group=False, + sync_comm=True, + ) for i in range(len(stage3_params)): - np.testing.assert_allclose(stage3_params[i].numpy(), - stage3_params_re[i].numpy(), - rtol=1e-6) + np.testing.assert_allclose( + stage3_params[i].numpy(), stage3_params_re[i].numpy(), rtol=1e-6 + ) # save/load model output_dir = tempfile.mkdtemp() model_file = os.path.join(output_dir, "model.pdmodel") optimizer_file = os.path.join(output_dir, "model.pdopt") - model_stage3, optimizer_stage3 = train_mlp(mlp9, - sharding_stage=3, - use_pure_fp16=False, - opt_group=False, - save_model=True) + model_stage3, optimizer_stage3 = train_mlp( + mlp9, + sharding_stage=3, + use_pure_fp16=False, + opt_group=False, + save_model=True, + ) paddle.save(model_stage3.state_dict(), model_file) paddle.save(optimizer_stage3.state_dict(), optimizer_file) m_state_dict = paddle.load(model_file) @@ -274,11 +305,13 @@ def test_stage2_stage3(): shutil.rmtree(output_dir) # check optimizer.minimize() error - train_mlp(mlp10, - sharding_stage=3, - use_pure_fp16=False, - opt_group=False, - test_minimize=True) + train_mlp( + mlp10, + sharding_stage=3, + use_pure_fp16=False, + opt_group=False, + test_minimize=True, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_sharding_stage3_offload.py b/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_sharding_stage3_offload.py index d56ec33694b074d73c1d67c0c10c14b521b9cea4..f7eee14046d7b3eea276a8b4b7566668319798ce 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_sharding_stage3_offload.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_sharding_stage3_offload.py @@ -21,8 +21,12 @@ from paddle.fluid.dygraph.nn import Linear from paddle.distributed import fleet from paddle.fluid.framework import _test_eager_guard -from paddle.distributed.fleet.meta_parallel.sharding.sharding_stage3 import ShardingStage3 -from paddle.distributed.fleet.meta_parallel.sharding.sharding_utils import ShardingScaler +from paddle.distributed.fleet.meta_parallel.sharding.sharding_stage3 import ( + ShardingStage3, +) +from paddle.distributed.fleet.meta_parallel.sharding.sharding_utils import ( + ShardingScaler, +) epoch = 10 paddle.seed(2022) @@ -33,7 +37,6 @@ l2_decay = 1e-4 class MLP(fluid.Layer): - def __init__(self, linear_size=1000, param_attr=None, bias_attr=None): super(MLP, self).__init__() @@ -49,7 +52,6 @@ class MLP(fluid.Layer): def reader_decorator(linear_size=1000): - def __reader__(): for _ in range(100): img = np.random.rand(linear_size).astype('float32') @@ -61,47 +63,52 @@ def reader_decorator(linear_size=1000): def optimizer_setting(model, use_pure_fp16, opt_group=False): clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0) - optimizer = paddle.optimizer.AdamW(parameters=[{ - "params": model.parameters() - }] if opt_group else model.parameters(), - learning_rate=0.001, - weight_decay=0.00001, - grad_clip=clip, - multi_precision=use_pure_fp16) + optimizer = paddle.optimizer.AdamW( + parameters=[{"params": model.parameters()}] + if opt_group + else model.parameters(), + learning_rate=0.001, + weight_decay=0.00001, + grad_clip=clip, + multi_precision=use_pure_fp16, + ) return optimizer -def train_mlp(model, - use_pure_fp16=False, - accumulate_grad=False, - offload=False, - batch_size=100, - convert2cpu=False): +def train_mlp( + model, + use_pure_fp16=False, + accumulate_grad=False, + offload=False, + batch_size=100, + convert2cpu=False, +): group = paddle.distributed.new_group([0, 1]) optimizer = optimizer_setting(model=model, use_pure_fp16=use_pure_fp16) if use_pure_fp16: - model = paddle.amp.decorate(models=model, - level='O2', - save_dtype='float32') + model = paddle.amp.decorate( + models=model, level='O2', save_dtype='float32' + ) scaler = paddle.amp.GradScaler(init_loss_scaling=32768) scaler = ShardingScaler(scaler) - model = ShardingStage3(model, - optimizer=optimizer, - group=group, - offload=offload) - - train_reader = paddle.batch(reader_decorator(), - batch_size=batch_size, - drop_last=True) - - train_loader = paddle.io.DataLoader.from_generator(capacity=32, - use_double_buffer=True, - iterable=True, - return_list=True, - use_multiprocess=True) + model = ShardingStage3( + model, optimizer=optimizer, group=group, offload=offload + ) + + train_reader = paddle.batch( + reader_decorator(), batch_size=batch_size, drop_last=True + ) + + train_loader = paddle.io.DataLoader.from_generator( + capacity=32, + use_double_buffer=True, + iterable=True, + return_list=True, + use_multiprocess=True, + ) train_loader.set_sample_list_generator(train_reader) for eop in range(epoch): @@ -112,8 +119,9 @@ def train_mlp(model, img.stop_gradient = True with paddle.amp.auto_cast(True, level='O2'): out = model(img) - loss = paddle.nn.functional.cross_entropy(input=out, - label=label) + loss = paddle.nn.functional.cross_entropy( + input=out, label=label + ) avg_loss = paddle.mean(x=loss.cast(dtype=paddle.float32)) if accumulate_grad: @@ -146,8 +154,15 @@ def train_mlp(model, def test_stage3_offload(): - mlp, mlp1, mlp2, mlp3, mlp4, mlp5, mlp6 = MLP(), MLP(), MLP(), MLP(), MLP( - ), MLP(), MLP() + mlp, mlp1, mlp2, mlp3, mlp4, mlp5, mlp6 = ( + MLP(), + MLP(), + MLP(), + MLP(), + MLP(), + MLP(), + MLP(), + ) state_dict = mlp.state_dict() mlp1.set_state_dict(state_dict) mlp2.set_state_dict(state_dict) @@ -160,36 +175,43 @@ def test_stage3_offload(): stage3_params = train_mlp(mlp1, use_pure_fp16=False) stage3_params_offload = train_mlp(mlp2, use_pure_fp16=False, offload=True) for i in range(len(stage3_params)): - np.testing.assert_allclose(stage3_params[i].numpy(), - stage3_params_offload[i].numpy(), - rtol=1e-6, - atol=1e-8) + np.testing.assert_allclose( + stage3_params[i].numpy(), + stage3_params_offload[i].numpy(), + rtol=1e-6, + atol=1e-8, + ) # fp16 offload stage3_params = train_mlp(mlp3, use_pure_fp16=True) stage3_params_offload = train_mlp(mlp4, use_pure_fp16=True, offload=True) for i in range(len(stage3_params)): - np.testing.assert_allclose(stage3_params[i].numpy(), - stage3_params_offload[i].numpy(), - rtol=1e-2, - atol=1e-2) + np.testing.assert_allclose( + stage3_params[i].numpy(), + stage3_params_offload[i].numpy(), + rtol=1e-2, + atol=1e-2, + ) # fp32 accumulate grad offload - stage3_params = train_mlp(mlp5, - use_pure_fp16=False, - batch_size=20, - accumulate_grad=True) - stage3_params_offload = train_mlp(mlp6, - use_pure_fp16=False, - accumulate_grad=True, - offload=True, - batch_size=20, - convert2cpu=True) + stage3_params = train_mlp( + mlp5, use_pure_fp16=False, batch_size=20, accumulate_grad=True + ) + stage3_params_offload = train_mlp( + mlp6, + use_pure_fp16=False, + accumulate_grad=True, + offload=True, + batch_size=20, + convert2cpu=True, + ) for i in range(len(stage3_params)): - np.testing.assert_allclose(stage3_params[i].numpy(), - stage3_params_offload[i].numpy(), - rtol=1e-6, - atol=1e-8) + np.testing.assert_allclose( + stage3_params[i].numpy(), + stage3_params_offload[i].numpy(), + rtol=1e-6, + atol=1e-8, + ) return diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_communicate_group.py b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_communicate_group.py index 0e81b08a7ebd78743e28bafd7da44f6facd5ba88..0c6bf6e9deccef5df5341d260c1cc7afbab3f886 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_communicate_group.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_communicate_group.py @@ -18,11 +18,11 @@ from paddle.distributed import fleet class TestNewGroupAPI(object): - def __init__(self): paddle.distributed.init_parallel_env() - topo = fleet.CommunicateTopology(["data", "model", "sharding", "pipe"], - [2, 1, 1, 1]) + topo = fleet.CommunicateTopology( + ["data", "model", "sharding", "pipe"], [2, 1, 1, 1] + ) self.hcg = fleet.HybridCommunicateGroup(topo) d1 = np.array([1, 2, 3]) @@ -49,10 +49,13 @@ class TestNewGroupAPI(object): tmp = np.array([0, 0, 0]) result = paddle.to_tensor(tmp) - paddle.distributed.scatter(result, [self.tensor2, self.tensor1], - src=dp_src_rank, - group=dp_gp, - sync_op=True) + paddle.distributed.scatter( + result, + [self.tensor2, self.tensor1], + src=dp_src_rank, + group=dp_gp, + sync_op=True, + ) if dp_rank == 0: assert np.array_equal(result, self.tensor2) elif dp_rank == 1: @@ -63,13 +66,13 @@ class TestNewGroupAPI(object): assert np.array_equal(result, self.tensor1) print("test broadcast api ok") - paddle.distributed.reduce(result, - dst=dp_src_rank, - group=dp_gp, - sync_op=True) + paddle.distributed.reduce( + result, dst=dp_src_rank, group=dp_gp, sync_op=True + ) if dp_rank == 0: - assert np.array_equal(result, paddle.add(self.tensor1, - self.tensor1)) + assert np.array_equal( + result, paddle.add(self.tensor1, self.tensor1) + ) elif dp_rank == 1: assert np.array_equal(result, self.tensor1) print("test reduce api ok") @@ -77,7 +80,8 @@ class TestNewGroupAPI(object): paddle.distributed.all_reduce(result, sync_op=True) assert np.array_equal( result, - paddle.add(paddle.add(self.tensor1, self.tensor1), self.tensor1)) + paddle.add(paddle.add(self.tensor1, self.tensor1), self.tensor1), + ) print("test all_reduce api ok") paddle.distributed.wait(result, dp_gp, use_calc_stream=True) @@ -85,10 +89,9 @@ class TestNewGroupAPI(object): print("test wait api ok") result = [] - paddle.distributed.all_gather(result, - self.tensor1, - group=dp_gp, - sync_op=True) + paddle.distributed.all_gather( + result, self.tensor1, group=dp_gp, sync_op=True + ) assert np.array_equal(result[0], self.tensor1) assert np.array_equal(result[1], self.tensor1) print("test all_gather api ok") diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_inference_helper.py b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_inference_helper.py index 83b0aaf76c888d616b255b696de762d42811c8ee..e746f30fa57a2a8337cef257987638f6fda680a1 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_inference_helper.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_inference_helper.py @@ -19,7 +19,9 @@ import paddle import numpy as np import paddle.fluid.layers as layers import paddle.distributed.fleet as fleet -from paddle.distributed.fleet.utils.hybrid_parallel_inference import HybridParallelInferenceHelper +from paddle.distributed.fleet.utils.hybrid_parallel_inference import ( + HybridParallelInferenceHelper, +) paddle.enable_static() @@ -40,7 +42,6 @@ def numpy_while(x, w1=1.0, w2=2.0, max_len=2): class TestHybridParallelInferenceHelperClass(unittest.TestCase): - def setUp(self): strategy = fleet.DistributedStrategy() fleet.init(is_collective=True, strategy=strategy) @@ -59,29 +60,27 @@ class TestHybridParallelInferenceHelperClass(unittest.TestCase): with paddle.static.program_guard(main_program, startup_program): with paddle.fluid.device_guard(f'{device}:0'): - X = paddle.static.data(name='X', - shape=[None, 2], - dtype='float32') + X = paddle.static.data( + name='X', shape=[None, 2], dtype='float32' + ) with paddle.fluid.device_guard(f'{device}:all'): - max_len = layers.fill_constant(shape=[1], - dtype="int64", - value=2, - force_cpu=False, - name="n") - step_idx = layers.fill_constant(shape=[1], - dtype="int64", - value=0, - force_cpu=False, - name="i") + max_len = layers.fill_constant( + shape=[1], dtype="int64", value=2, force_cpu=False, name="n" + ) + step_idx = layers.fill_constant( + shape=[1], dtype="int64", value=0, force_cpu=False, name="i" + ) data = layers.array_write(X, step_idx) - cond_int = layers.fill_constant(shape=[1], - dtype="int64", - value=0, - force_cpu=False, - name="cond_int") + cond_int = layers.fill_constant( + shape=[1], + dtype="int64", + value=0, + force_cpu=False, + name="cond_int", + ) print(cond_int.shape) cond = layers.less_than(x=step_idx, y=max_len) while_op = layers.While(cond, is_test=True) @@ -94,20 +93,26 @@ class TestHybridParallelInferenceHelperClass(unittest.TestCase): with paddle.fluid.device_guard(f'{device}:0'): param_attr = paddle.ParamAttr( - initializer=paddle.nn.initializer.Constant(1.0)) - weight1 = paddle.static.create_parameter(shape=[2, 5], - dtype='float32', - attr=param_attr, - is_bias=False) + initializer=paddle.nn.initializer.Constant(1.0) + ) + weight1 = paddle.static.create_parameter( + shape=[2, 5], + dtype='float32', + attr=param_attr, + is_bias=False, + ) hidden1 = paddle.matmul(input, weight1) with paddle.fluid.device_guard(f'{device}:1'): param_attr = paddle.ParamAttr( - initializer=paddle.nn.initializer.Constant(2.0)) - weight2 = paddle.static.create_parameter(shape=[5, 2], - dtype='float32', - attr=param_attr, - is_bias=False) + initializer=paddle.nn.initializer.Constant(2.0) + ) + weight2 = paddle.static.create_parameter( + shape=[5, 2], + dtype='float32', + attr=param_attr, + is_bias=False, + ) hidden2 = paddle.matmul(hidden1, weight2) layers.array_write(hidden2, i=step_idx, array=data) @@ -136,18 +141,20 @@ class TestHybridParallelInferenceHelperClass(unittest.TestCase): num_pp=2, init_comm=nranks > 1, ) - helper.gen_infer_program(['array_write_0.out'], ['cond_int.tmp_0'], - debug=True) + helper.gen_infer_program( + ['array_write_0.out'], ['cond_int.tmp_0'], debug=True + ) exe = paddle.static.Executor(paddle.CUDAPlace(dev_id)) exe.run(startup_program) for step in range(2): - init_data = np.random.uniform(low=0.0, high=1.0, - size=[2, 2]).astype('float32') - [res] = exe.run(main_program, - feed={"X": init_data}, - fetch_list=[out]) + init_data = np.random.uniform( + low=0.0, high=1.0, size=[2, 2] + ).astype('float32') + [res] = exe.run( + main_program, feed={"X": init_data}, fetch_list=[out] + ) res_np = numpy_while(init_data) assert len(res) == len(res_np) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_mp_amp.py b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_mp_amp.py index 4906a1c86cac5285b3438c77019fc2ee8a8acb8f..172753b2fbc2272537886b47c077f04fcbaed4a0 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_mp_amp.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_mp_amp.py @@ -19,22 +19,22 @@ import unittest class TestMPClipGrad(TestDistMPTraning): - def build_optimizer(self, model): grad_clip = paddle.nn.ClipGradByGlobalNorm(2.0) - scheduler = paddle.optimizer.lr.ExponentialDecay(learning_rate=0.001, - gamma=0.999, - verbose=True) - optimizer = paddle.optimizer.SGD(scheduler, - grad_clip=grad_clip, - parameters=[{ - 'params': - model.parameters(), - 'weight_decay': - 0.001, - 'learning_rate': - 0.1 - }]) + scheduler = paddle.optimizer.lr.ExponentialDecay( + learning_rate=0.001, gamma=0.999, verbose=True + ) + optimizer = paddle.optimizer.SGD( + scheduler, + grad_clip=grad_clip, + parameters=[ + { + 'params': model.parameters(), + 'weight_decay': 0.001, + 'learning_rate': 0.1, + } + ], + ) return optimizer def train_batch(self, batch, model, optimizer, is_mp): diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_mp_clip_grad.py b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_mp_clip_grad.py index c9bff4a261d20bbe27a864363f7a2d62d92068fc..91d9789bfa06bc377495be56b23b595da844bc8c 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_mp_clip_grad.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_mp_clip_grad.py @@ -16,20 +16,19 @@ import paddle from hybrid_parallel_mp_model import TestDistMPTraning import unittest -#log = logging.getLogger("HybridParallel") -#log.setLevel(logging.WARNING) +# log = logging.getLogger("HybridParallel") +# log.setLevel(logging.WARNING) class TestMPClipGrad(TestDistMPTraning): - def build_optimizer(self, model): grad_clip = paddle.nn.ClipGradByGlobalNorm(2.0) - scheduler = paddle.optimizer.lr.ExponentialDecay(learning_rate=0.001, - gamma=0.999, - verbose=True) - optimizer = paddle.optimizer.SGD(scheduler, - grad_clip=grad_clip, - parameters=model.parameters()) + scheduler = paddle.optimizer.lr.ExponentialDecay( + learning_rate=0.001, gamma=0.999, verbose=True + ) + optimizer = paddle.optimizer.SGD( + scheduler, grad_clip=grad_clip, parameters=model.parameters() + ) return optimizer diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_mp_fp16.py b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_mp_fp16.py index 611e85ed4a8f548c510234ebdbd1ff12edc7fb99..6a28316c1f485146dc5da73ddcc82c41718a244c 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_mp_fp16.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_mp_fp16.py @@ -19,20 +19,18 @@ import unittest class TestMPFP16(TestDistMPTraning): - def build_optimizer(self, model): grad_clip = paddle.nn.ClipGradByGlobalNorm(1.0) - scheduler = paddle.optimizer.lr.ExponentialDecay(learning_rate=0.001, - gamma=0.999, - verbose=True) - optimizer = paddle.optimizer.SGD(scheduler, - grad_clip=grad_clip, - parameters=model.parameters()) - - model, optimizer = paddle.amp.decorate(models=model, - optimizers=optimizer, - level='O2', - save_dtype='float32') + scheduler = paddle.optimizer.lr.ExponentialDecay( + learning_rate=0.001, gamma=0.999, verbose=True + ) + optimizer = paddle.optimizer.SGD( + scheduler, grad_clip=grad_clip, parameters=model.parameters() + ) + + model, optimizer = paddle.amp.decorate( + models=model, optimizers=optimizer, level='O2', save_dtype='float32' + ) return optimizer diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_mp_layers.py b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_mp_layers.py index 5f20209da4af8779ed3c8f3750a0ac40df3b2f98..9a3917712717349c850b6144bd71d7a36cc36c9e 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_mp_layers.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_mp_layers.py @@ -31,7 +31,6 @@ def set_random_seed(seed): class ColumnLinearNet(fluid.dygraph.Layer): - def __init__(self, input_size, output_size, global_dtype): super(ColumnLinearNet, self).__init__() self.parallel_linear = fleet.meta_parallel.ColumnParallelLinear( @@ -40,7 +39,8 @@ class ColumnLinearNet(fluid.dygraph.Layer): weight_attr=None, has_bias=True, gather_output=True, - name="test_column_linear") + name="test_column_linear", + ) def forward(self, x): output = self.parallel_linear(x) @@ -48,7 +48,6 @@ class ColumnLinearNet(fluid.dygraph.Layer): class RowLinearNet(fluid.dygraph.Layer): - def __init__(self, input_size, output_size): super(RowLinearNet, self).__init__() self.parallel_linear = fleet.meta_parallel.RowParallelLinear( @@ -56,7 +55,8 @@ class RowLinearNet(fluid.dygraph.Layer): out_features=output_size, has_bias=True, input_is_parallel=False, - name="test_row_linear") + name="test_row_linear", + ) def forward(self, x): output = self.parallel_linear(x) @@ -64,11 +64,11 @@ class RowLinearNet(fluid.dygraph.Layer): class EmbeddingNet(fluid.dygraph.Layer): - def __init__(self, vocab_size, hidden_size): super(EmbeddingNet, self).__init__() self.embedding = fleet.meta_parallel.VocabParallelEmbedding( - vocab_size, hidden_size) + vocab_size, hidden_size + ) def forward(self, x): output = self.embedding(x) @@ -76,19 +76,22 @@ class EmbeddingNet(fluid.dygraph.Layer): class SimpleMatmul(fluid.dygraph.Layer): - def __init__(self, weight, output_size, global_dtype): super(SimpleMatmul, self).__init__() self.weight = paddle.create_parameter( shape=weight.shape, dtype=global_dtype, attr=paddle.ParamAttr( - initializer=paddle.nn.initializer.Assign(weight))) + initializer=paddle.nn.initializer.Assign(weight) + ), + ) self.bias = self.create_parameter( shape=[output_size], dtype=global_dtype, attr=paddle.ParamAttr( - initializer=paddle.nn.initializer.Constant(0.0))) + initializer=paddle.nn.initializer.Constant(0.0) + ), + ) def forward(self, x): output = paddle.matmul(x, self.weight) + self.bias @@ -96,7 +99,6 @@ class SimpleMatmul(fluid.dygraph.Layer): class SimpleEmbedding(fluid.dygraph.Layer): - def __init__(self, vocab_size, hidden_size, weight): super(SimpleEmbedding, self).__init__() self.embedding = paddle.nn.Embedding( @@ -104,7 +106,9 @@ class SimpleEmbedding(fluid.dygraph.Layer): hidden_size, weight_attr=paddle.framework.ParamAttr( name="origin_embedding", - initializer=paddle.nn.initializer.Assign(weight))) + initializer=paddle.nn.initializer.Assign(weight), + ), + ) def forward(self, x): output = self.embedding(x) @@ -112,14 +116,13 @@ class SimpleEmbedding(fluid.dygraph.Layer): class TestDistTraning(unittest.TestCase): - def setUp(self): strategy = fleet.DistributedStrategy() self.model_parallel_size = 2 strategy.hybrid_configs = { "dp_degree": 1, "mp_degree": self.model_parallel_size, - "pp_degree": 1 + "pp_degree": 1, } fleet.init(is_collective=True, strategy=strategy) @@ -144,10 +147,12 @@ class TestDistTraning(unittest.TestCase): model_b = SimpleMatmul(integral_w, output_size, global_dtype) - optimizer_a = paddle.optimizer.SGD(learning_rate=0.001, - parameters=model_a.parameters()) - optimizer_b = paddle.optimizer.SGD(learning_rate=0.001, - parameters=model_b.parameters()) + optimizer_a = paddle.optimizer.SGD( + learning_rate=0.001, parameters=model_a.parameters() + ) + optimizer_b = paddle.optimizer.SGD( + learning_rate=0.001, parameters=model_b.parameters() + ) for idx in range(5): input = paddle.randn([batch_size, input_size], global_dtype) input.stop_gradient = True @@ -192,11 +197,13 @@ class TestDistTraning(unittest.TestCase): model_b = SimpleMatmul(integral_w, output_size, global_dtype) - optimizer_a = paddle.optimizer.SGD(learning_rate=0.001, - parameters=model_a.parameters()) + optimizer_a = paddle.optimizer.SGD( + learning_rate=0.001, parameters=model_a.parameters() + ) - optimizer_b = paddle.optimizer.SGD(learning_rate=0.001, - parameters=model_b.parameters()) + optimizer_b = paddle.optimizer.SGD( + learning_rate=0.001, parameters=model_b.parameters() + ) for idx in range(5): input = paddle.randn([batch_size, input_size], global_dtype) @@ -213,9 +220,9 @@ class TestDistTraning(unittest.TestCase): optimizer_a.step() optimizer_b.step() - np.testing.assert_allclose(loss_a.numpy(), - loss_b.numpy(), - rtol=5e-6) + np.testing.assert_allclose( + loss_a.numpy(), loss_b.numpy(), rtol=5e-6 + ) def test_parallel_embedding(self): batch_size = 17 @@ -240,21 +247,25 @@ class TestDistTraning(unittest.TestCase): for idx in range(len(integral_w)): tmp = paddle.gather( integral_w[idx], - paddle.to_tensor(list(range(vocab_size_per_card)))) + paddle.to_tensor(list(range(vocab_size_per_card))), + ) result_w.append(tmp) integral_w = paddle.concat(result_w, axis=0) model_b = SimpleEmbedding(vocab_size, hidden_size, integral_w) - optimizer_a = paddle.optimizer.SGD(learning_rate=0.001, - parameters=model_a.parameters()) + optimizer_a = paddle.optimizer.SGD( + learning_rate=0.001, parameters=model_a.parameters() + ) - optimizer_b = paddle.optimizer.SGD(learning_rate=0.001, - parameters=model_b.parameters()) + optimizer_b = paddle.optimizer.SGD( + learning_rate=0.001, parameters=model_b.parameters() + ) for _ in range(5): - np_input_data = np.random.randint(0, vocab_size, - (batch_size, seq_length)) + np_input_data = np.random.randint( + 0, vocab_size, (batch_size, seq_length) + ) input_data = paddle.to_tensor(np_input_data, dtype="int32") output_a = model_a(input_data) @@ -292,21 +303,23 @@ class TestDistTraning(unittest.TestCase): np.random.seed(seed) for _ in range(5): - np_label = np.random.randint(0, vocab_size, - (batch_size, seq_length)) + np_label = np.random.randint( + 0, vocab_size, (batch_size, seq_length) + ) label = paddle.to_tensor(np_label, dtype="int64") data = paddle.randn( shape=[batch_size, seq_length, class_size_per_card], - dtype='float32') + dtype='float32', + ) data.stop_gradient = False check_group = dist.new_group(list(range(self.model_parallel_size))) integral_data = [] partial_data = data.clone().detach() - paddle.distributed.all_gather(integral_data, - partial_data, - group=check_group) + paddle.distributed.all_gather( + integral_data, partial_data, group=check_group + ) integral_data = paddle.concat(integral_data, axis=-1) integral_data = integral_data.detach().clone() integral_data.stop_gradient = False @@ -315,23 +328,23 @@ class TestDistTraning(unittest.TestCase): loss_b = model_b(integral_data, label).sum() / batch_size print("loss_a: ", loss_a.numpy(), "loss_b: ", loss_b.numpy()) - np.testing.assert_allclose(loss_a.numpy(), - loss_b.numpy(), - rtol=1e-6) + np.testing.assert_allclose( + loss_a.numpy(), loss_b.numpy(), rtol=1e-6 + ) loss_a.backward() loss_b.backward() integral_grad = [] partial_grad = data.grad.clone().detach() - paddle.distributed.all_gather(integral_grad, - partial_grad, - group=check_group) + paddle.distributed.all_gather( + integral_grad, partial_grad, group=check_group + ) integral_grad = paddle.concat(integral_grad, axis=-1) - np.testing.assert_allclose(integral_data.grad.numpy(), - integral_grad.numpy(), - rtol=1e-6) + np.testing.assert_allclose( + integral_data.grad.numpy(), integral_grad.numpy(), rtol=1e-6 + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_mp_model.py b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_mp_model.py index 707bd95a4902e09a607021040ca667b56d1bf8fc..b43d0663ac7ea505baaae7e0671b6a4e7715f0b3 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_mp_model.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_mp_model.py @@ -44,7 +44,8 @@ def parallel_matmul(lm_output, logit_weights, parallel_output): if world_size > 1: input_parallel = paddle.distributed.collective._c_identity( - lm_output, group=model_parallel_group) + lm_output, group=model_parallel_group + ) logits = paddle.matmul(input_parallel, logit_weights, transpose_y=True) @@ -52,53 +53,69 @@ def parallel_matmul(lm_output, logit_weights, parallel_output): return logits return paddle.distributed.collective._c_concat( - logits, group=model_parallel_group) + logits, group=model_parallel_group + ) else: logits = paddle.matmul(lm_output, logit_weights, transpose_y=True) return logits class SimpleMPNet(fluid.dygraph.Layer): - - def __init__(self, vocab_size, hidden_size, inner_size, output_size, np_fc1, - np_fc2, mp_id): + def __init__( + self, + vocab_size, + hidden_size, + inner_size, + output_size, + np_fc1, + np_fc2, + mp_id, + ): super(SimpleMPNet, self).__init__() if mp_id == 0: - init_fc1_data = np_fc1[:, :(inner_size // 2)] - init_fc2_data = np_fc2[:(inner_size // 2), :] + init_fc1_data = np_fc1[:, : (inner_size // 2)] + init_fc2_data = np_fc2[: (inner_size // 2), :] else: - init_fc1_data = np_fc1[:, (inner_size // 2):] - init_fc2_data = np_fc2[(inner_size // 2):, :] + init_fc1_data = np_fc1[:, (inner_size // 2) :] + init_fc2_data = np_fc2[(inner_size // 2) :, :] self.linear1 = fleet.meta_parallel.ColumnParallelLinear( hidden_size, inner_size, weight_attr=paddle.framework.ParamAttr( - initializer=paddle.nn.initializer.Assign(init_fc1_data)), + initializer=paddle.nn.initializer.Assign(init_fc1_data) + ), gather_output=False, - has_bias=True) + has_bias=True, + ) self.linear2 = fleet.meta_parallel.RowParallelLinear( inner_size, hidden_size, weight_attr=paddle.framework.ParamAttr( - initializer=paddle.nn.initializer.Assign(init_fc2_data)), + initializer=paddle.nn.initializer.Assign(init_fc2_data) + ), input_is_parallel=True, - has_bias=True) + has_bias=True, + ) self.linear3 = paddle.nn.Linear( hidden_size, output_size, weight_attr=paddle.framework.ParamAttr( - initializer=paddle.nn.initializer.Constant(0.0)), + initializer=paddle.nn.initializer.Constant(0.0) + ), bias_attr=paddle.framework.ParamAttr( - initializer=paddle.nn.initializer.Constant(0.0))) + initializer=paddle.nn.initializer.Constant(0.0) + ), + ) self.embedding = fleet.meta_parallel.VocabParallelEmbedding( vocab_size, hidden_size, - weight_attr=paddle.nn.initializer.Constant(value=0.5)) + weight_attr=paddle.nn.initializer.Constant(value=0.5), + ) def forward(self, x): x = self.embedding(x) @@ -110,39 +127,49 @@ class SimpleMPNet(fluid.dygraph.Layer): class SimpleDPNet(fluid.dygraph.Layer): - - def __init__(self, vocab_size, hidden_size, inner_size, output_size, np_fc1, - np_fc2): + def __init__( + self, vocab_size, hidden_size, inner_size, output_size, np_fc1, np_fc2 + ): super(SimpleDPNet, self).__init__() self.linear1 = paddle.nn.Linear( hidden_size, inner_size, weight_attr=paddle.framework.ParamAttr( - initializer=paddle.nn.initializer.Assign(np_fc1)), + initializer=paddle.nn.initializer.Assign(np_fc1) + ), bias_attr=paddle.framework.ParamAttr( - initializer=paddle.nn.initializer.Constant(0.0))) + initializer=paddle.nn.initializer.Constant(0.0) + ), + ) self.linear2 = paddle.nn.Linear( inner_size, hidden_size, weight_attr=paddle.framework.ParamAttr( - initializer=paddle.nn.initializer.Assign(np_fc2)), + initializer=paddle.nn.initializer.Assign(np_fc2) + ), bias_attr=paddle.framework.ParamAttr( - initializer=paddle.nn.initializer.Constant(0.0))) + initializer=paddle.nn.initializer.Constant(0.0) + ), + ) self.linear3 = paddle.nn.Linear( hidden_size, output_size, weight_attr=paddle.framework.ParamAttr( - initializer=paddle.nn.initializer.Constant(0.0)), + initializer=paddle.nn.initializer.Constant(0.0) + ), bias_attr=paddle.framework.ParamAttr( - initializer=paddle.nn.initializer.Constant(0.0))) + initializer=paddle.nn.initializer.Constant(0.0) + ), + ) self.embedding = paddle.nn.Embedding( vocab_size, hidden_size, - weight_attr=paddle.nn.initializer.Constant(value=0.5)) + weight_attr=paddle.nn.initializer.Constant(value=0.5), + ) def forward(self, x): x = self.embedding(x) @@ -154,7 +181,6 @@ class SimpleDPNet(fluid.dygraph.Layer): class TestDistMPTraning(unittest.TestCase): - def setUp(self): strategy = fleet.DistributedStrategy() self.model_parallel_size = 2 @@ -162,7 +188,7 @@ class TestDistMPTraning(unittest.TestCase): strategy.hybrid_configs = { "dp_degree": self.data_parallel_size, "mp_degree": self.model_parallel_size, - "pp_degree": 1 + "pp_degree": 1, } fleet.init(is_collective=True, strategy=strategy) @@ -175,8 +201,9 @@ class TestDistMPTraning(unittest.TestCase): return loss def build_optimizer(self, model): - optimizer = paddle.optimizer.SGD(learning_rate=0.001, - parameters=model.parameters()) + optimizer = paddle.optimizer.SGD( + learning_rate=0.001, parameters=model.parameters() + ) return optimizer def build_model_optimizer(self): @@ -190,34 +217,50 @@ class TestDistMPTraning(unittest.TestCase): np_fc1 = np.random.random_sample((hidden_size, inner_size)) np_fc2 = np.random.random_sample((inner_size, hidden_size)) - model_a = SimpleMPNet(vocab_size, hidden_size, inner_size, output_size, - np_fc1, np_fc2, mp_id) + model_a = SimpleMPNet( + vocab_size, + hidden_size, + inner_size, + output_size, + np_fc1, + np_fc2, + mp_id, + ) optimizer_a = self.build_optimizer(model_a) model_a = fleet.distributed_model(model_a) optimizer_a = fleet.distributed_optimizer(optimizer_a) - model_b = SimpleDPNet(vocab_size, hidden_size, inner_size, output_size, - np_fc1, np_fc2) + model_b = SimpleDPNet( + vocab_size, hidden_size, inner_size, output_size, np_fc1, np_fc2 + ) optimizer_b = self.build_optimizer(model_b) return model_a, optimizer_a, model_b, optimizer_b def test_mp_model(self): - model_a, optimizer_a, model_b, optimizer_b = self.build_model_optimizer( - ) + ( + model_a, + optimizer_a, + model_b, + optimizer_b, + ) = self.build_model_optimizer() for _ in range(5): - np_data = np.random.randint(0, vocab_size, ( - batch_size, - seq_length, - )) + np_data = np.random.randint( + 0, + vocab_size, + ( + batch_size, + seq_length, + ), + ) batch = paddle.to_tensor(np_data) loss_a = self.train_batch(batch, model_a, optimizer_a, True) loss_b = self.train_batch(batch, model_b, optimizer_b, False) - np.testing.assert_allclose(loss_a.numpy(), - loss_b.numpy(), - rtol=1e-6) + np.testing.assert_allclose( + loss_a.numpy(), loss_b.numpy(), rtol=1e-6 + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_mp_random.py b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_mp_random.py index f779b3298b8dcadbbf7d846adafe260e6bb99497..96e76b37c7aac8eae47515476a235f046c08253a 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_mp_random.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_mp_random.py @@ -20,14 +20,13 @@ import paddle.distributed.fleet as fleet class TestDistTraning(unittest.TestCase): - def setUp(self): strategy = fleet.DistributedStrategy() self.model_parallel_size = 2 strategy.hybrid_configs = { "dp_degree": 1, "mp_degree": self.model_parallel_size, - "pp_degree": 1 + "pp_degree": 1, } fleet.init(is_collective=True, strategy=strategy) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_amp.py b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_amp.py index 41bd79b58b5423f78312c4a0aed98440f08906a8..af4e68be8f67b5d8f9085b4776ff33aca492c063 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_amp.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_amp.py @@ -34,7 +34,6 @@ micro_batch_size = 2 class TestDistPPTraning(unittest.TestCase): - def setUp(self): strategy = fleet.DistributedStrategy() self.model_parallel_size = 1 @@ -47,7 +46,7 @@ class TestDistPPTraning(unittest.TestCase): } strategy.pipeline_configs = { "accumulate_steps": batch_size // micro_batch_size, - "micro_batch_size": micro_batch_size + "micro_batch_size": micro_batch_size, } fleet.init(is_collective=True, strategy=strategy) @@ -61,14 +60,16 @@ class TestDistPPTraning(unittest.TestCase): grad_clip = paddle.nn.ClipGradByGlobalNorm(1.0) - #construct model a + # construct model a model_a = AlexNet(10) - scheduler_a = paddle.optimizer.lr.PiecewiseDecay(boundaries=[2], - values=[0.001, 0.002], - verbose=True) - optimizer_a = paddle.optimizer.SGD(learning_rate=scheduler_a, - grad_clip=grad_clip, - parameters=model_a.parameters()) + scheduler_a = paddle.optimizer.lr.PiecewiseDecay( + boundaries=[2], values=[0.001, 0.002], verbose=True + ) + optimizer_a = paddle.optimizer.SGD( + learning_rate=scheduler_a, + grad_clip=grad_clip, + parameters=model_a.parameters(), + ) scaler_a = paddle.amp.GradScaler(init_loss_scaling=2**5) @@ -79,12 +80,14 @@ class TestDistPPTraning(unittest.TestCase): # construct model b model_b = AlexNetPipeDesc(num_stages=self.pipeline_parallel_size) - scheduler_b = paddle.optimizer.lr.PiecewiseDecay(boundaries=[2], - values=[0.001, 0.002], - verbose=True) - optimizer_b = paddle.optimizer.SGD(learning_rate=scheduler_b, - grad_clip=grad_clip, - parameters=model_b.parameters()) + scheduler_b = paddle.optimizer.lr.PiecewiseDecay( + boundaries=[2], values=[0.001, 0.002], verbose=True + ) + optimizer_b = paddle.optimizer.SGD( + learning_rate=scheduler_b, + grad_clip=grad_clip, + parameters=model_b.parameters(), + ) model_b = fleet.distributed_model(model_b) optimizer_b = fleet.distributed_optimizer(optimizer_b) scaler_b = paddle.amp.GradScaler(init_loss_scaling=2**5) @@ -94,15 +97,21 @@ class TestDistPPTraning(unittest.TestCase): param.set_value(parameters[idx + pp_id * (param_len // 2)]) # construct reader - train_reader = paddle.batch(paddle.dataset.mnist.train(), - batch_size=batch_size, - drop_last=True) + train_reader = paddle.batch( + paddle.dataset.mnist.train(), batch_size=batch_size, drop_last=True + ) for step_id, data in enumerate(train_reader()): - x_data = np.array([x[0] for x in data]).astype('float32').reshape( - batch_size, 1, 28, 28) - y_data = np.array([x[1] for x in data - ]).astype('int64').reshape(batch_size, 1) + x_data = ( + np.array([x[0] for x in data]) + .astype('float32') + .reshape(batch_size, 1, 28, 28) + ) + y_data = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape(batch_size, 1) + ) img = paddle.to_tensor(x_data) label = paddle.to_tensor(y_data) img.stop_gradient = True @@ -120,15 +129,14 @@ class TestDistPPTraning(unittest.TestCase): scheduler_a.step() with paddle.amp.auto_cast(): - loss_b = model_b.train_batch([img, label], - optimizer_b, - scheduler_b, - scaler=scaler_b) + loss_b = model_b.train_batch( + [img, label], optimizer_b, scheduler_b, scaler=scaler_b + ) print("loss: ", loss_a.numpy(), loss_b.numpy()) - np.testing.assert_allclose(loss_a.numpy(), - loss_b.numpy(), - rtol=5e-5) + np.testing.assert_allclose( + loss_a.numpy(), loss_b.numpy(), rtol=5e-5 + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_clip_grad.py b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_clip_grad.py index 8f1d8e930e2b19538f10364b28982cc04a9c86b1..379a5f6417a251a509ed12168ad66ccf9b9b2af1 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_clip_grad.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_clip_grad.py @@ -18,31 +18,30 @@ from hybrid_parallel_pp_alexnet import TestDistPPTraning class TestPPClipGrad(TestDistPPTraning): - def build_optimizer(self, model): grad_clip = paddle.nn.ClipGradByGlobalNorm(0.5) - scheduler = paddle.optimizer.lr.PiecewiseDecay(boundaries=[2], - values=[0.001, 0.002], - verbose=True) - optimizer = paddle.optimizer.SGD(learning_rate=scheduler, - grad_clip=grad_clip, - parameters=model.parameters()) + scheduler = paddle.optimizer.lr.PiecewiseDecay( + boundaries=[2], values=[0.001, 0.002], verbose=True + ) + optimizer = paddle.optimizer.SGD( + learning_rate=scheduler, + grad_clip=grad_clip, + parameters=model.parameters(), + ) return scheduler, optimizer class TestPPClipGradParamGroup(TestDistPPTraning): - def build_optimizer(self, model): grad_clip = paddle.nn.ClipGradByGlobalNorm(0.5) - scheduler = paddle.optimizer.lr.PiecewiseDecay(boundaries=[2], - values=[0.001, 0.002], - verbose=True) - optimizer = paddle.optimizer.Momentum(learning_rate=scheduler, - grad_clip=grad_clip, - parameters=[{ - "params": - model.parameters() - }]) + scheduler = paddle.optimizer.lr.PiecewiseDecay( + boundaries=[2], values=[0.001, 0.002], verbose=True + ) + optimizer = paddle.optimizer.Momentum( + learning_rate=scheduler, + grad_clip=grad_clip, + parameters=[{"params": model.parameters()}], + ) return scheduler, optimizer diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_embedding.py b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_embedding.py index d55f989054cec5e610ea08abf29bf7d09c36f11a..1c14eb7f67c6a1a307c054cdea810974ef38789c 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_embedding.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_embedding.py @@ -40,29 +40,29 @@ hidden_size = 8 class SimpleNet(Layer): - def __init__(self): super(SimpleNet, self).__init__() self.word_embeddings = nn.Embedding(vocab_size, hidden_size) self.softmax_weight = self.create_parameter( - shape=[hidden_size, vocab_size]) - self.softmax_bias = self.create_parameter(shape=[vocab_size], - is_bias=False) + shape=[hidden_size, vocab_size] + ) + self.softmax_bias = self.create_parameter( + shape=[vocab_size], is_bias=False + ) def forward(self, x1, x2, y1): x_emb = self.word_embeddings(x1) fc = fluid.layers.matmul(x_emb, self.softmax_weight) fc = fluid.layers.elementwise_add(fc, self.softmax_bias) projection = fluid.layers.reshape(fc, shape=[-1, vocab_size]) - loss = fluid.layers.softmax_with_cross_entropy(logits=projection, - label=y1, - soft_label=False) + loss = fluid.layers.softmax_with_cross_entropy( + logits=projection, label=y1, soft_label=False + ) return loss.mean() class EmbeddingNet(Layer): - def __init__(self): super(EmbeddingNet, self).__init__() self.word_embeddings = nn.Embedding(vocab_size, hidden_size) @@ -74,11 +74,11 @@ class EmbeddingNet(Layer): class MatmulNet(Layer): - def __init__(self): super(MatmulNet, self).__init__() self.softmax_weight = self.create_parameter( - shape=[hidden_size, vocab_size]) + shape=[hidden_size, vocab_size] + ) def forward(self, args): x1, x2 = args @@ -88,7 +88,6 @@ class MatmulNet(Layer): class BiasNet(Layer): - def __init__(self): super(BiasNet, self).__init__() self.softmax_bias = self.create_parameter(shape=[vocab_size]) @@ -101,20 +100,18 @@ class BiasNet(Layer): class LossNet(Layer): - def __init__(self): super(LossNet, self).__init__() def forward(self, args, y1): projection, x2 = args - loss = fluid.layers.softmax_with_cross_entropy(logits=projection, - label=y1[0], - soft_label=False) + loss = fluid.layers.softmax_with_cross_entropy( + logits=projection, label=y1[0], soft_label=False + ) return loss.mean() class SimpleNetPipe(Layer): - def __init__(self): super(SimpleNetPipe, self).__init__() self.features = Sequential(EmbeddingNet(), MatmulNet(), BiasNet()) @@ -125,7 +122,6 @@ class SimpleNetPipe(Layer): class TestDistEmbeddingTraning(unittest.TestCase): - def setUp(self): strategy = fleet.DistributedStrategy() self.model_parallel_size = 1 @@ -138,7 +134,7 @@ class TestDistEmbeddingTraning(unittest.TestCase): } strategy.pipeline_configs = { "accumulate_steps": batch_size // micro_batch_size, - "micro_batch_size": micro_batch_size + "micro_batch_size": micro_batch_size, } fleet.init(is_collective=True, strategy=strategy) @@ -150,22 +146,28 @@ class TestDistEmbeddingTraning(unittest.TestCase): rank_id = dist.get_rank() set_random_seed(1024, dp_id, rank_id) - #construct model a + # construct model a model_a = SimpleNet() scheduler_a = paddle.optimizer.lr.PiecewiseDecay( - boundaries=[2, 3, 4], values=[0.01, 0.02, 0.03, 0.04], verbose=True) - optimizer_a = paddle.optimizer.SGD(learning_rate=scheduler_a, - parameters=model_a.parameters()) + boundaries=[2, 3, 4], values=[0.01, 0.02, 0.03, 0.04], verbose=True + ) + optimizer_a = paddle.optimizer.SGD( + learning_rate=scheduler_a, parameters=model_a.parameters() + ) init_net = SimpleNetPipe() - model_b = PipelineLayer(layers=init_net.to_layers(), - num_stages=self.pipeline_parallel_size, - loss_fn=LossNet()) + model_b = PipelineLayer( + layers=init_net.to_layers(), + num_stages=self.pipeline_parallel_size, + loss_fn=LossNet(), + ) scheduler_b = paddle.optimizer.lr.PiecewiseDecay( - boundaries=[2, 3, 4], values=[0.01, 0.02, 0.03, 0.04], verbose=True) - optimizer_b = paddle.optimizer.SGD(learning_rate=scheduler_b, - parameters=model_b.parameters()) + boundaries=[2, 3, 4], values=[0.01, 0.02, 0.03, 0.04], verbose=True + ) + optimizer_b = paddle.optimizer.SGD( + learning_rate=scheduler_b, parameters=model_b.parameters() + ) model_b = fleet.distributed_model(model_b) optimizer_b = fleet.distributed_optimizer(optimizer_b) @@ -202,8 +204,9 @@ class TestDistEmbeddingTraning(unittest.TestCase): optimizer_a.clear_grad() scheduler_a.step() - loss_b = model_b.train_batch([(x1, x2), (y1, )], optimizer_b, - scheduler_b) + loss_b = model_b.train_batch( + [(x1, x2), (y1,)], optimizer_b, scheduler_b + ) print("loss", loss_a.numpy(), loss_b.numpy()) np.testing.assert_allclose(loss_a.numpy(), loss_b.numpy()) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_fp16.py b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_fp16.py index 06813a340ebd9c48fdea2dedca8dbd9958cf84d5..b0429167036e59c0aca4ba74ae49c63823acf87a 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_fp16.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_fp16.py @@ -34,7 +34,6 @@ micro_batch_size = 2 class TestDistPPTraning(unittest.TestCase): - def setUp(self): strategy = fleet.DistributedStrategy() self.model_parallel_size = 1 @@ -47,7 +46,7 @@ class TestDistPPTraning(unittest.TestCase): } strategy.pipeline_configs = { "accumulate_steps": batch_size // micro_batch_size, - "micro_batch_size": micro_batch_size + "micro_batch_size": micro_batch_size, } fleet.init(is_collective=True, strategy=strategy) @@ -61,25 +60,29 @@ class TestDistPPTraning(unittest.TestCase): grad_clip = paddle.nn.ClipGradByGlobalNorm(1.0) - #construct model a + # construct model a model_a = AlexNet(10) - scheduler_a = paddle.optimizer.lr.PiecewiseDecay(boundaries=[2], - values=[0.001, 0.002], - verbose=True) - optimizer_a = paddle.optimizer.SGD(learning_rate=scheduler_a, - grad_clip=grad_clip, - parameters=model_a.parameters()) + scheduler_a = paddle.optimizer.lr.PiecewiseDecay( + boundaries=[2], values=[0.001, 0.002], verbose=True + ) + optimizer_a = paddle.optimizer.SGD( + learning_rate=scheduler_a, + grad_clip=grad_clip, + parameters=model_a.parameters(), + ) scaler_a = paddle.amp.GradScaler(init_loss_scaling=2**5) # construct model b model_b = AlexNetPipeDesc(num_stages=self.pipeline_parallel_size) - scheduler_b = paddle.optimizer.lr.PiecewiseDecay(boundaries=[2], - values=[0.001, 0.002], - verbose=True) - optimizer_b = paddle.optimizer.SGD(learning_rate=scheduler_b, - grad_clip=grad_clip, - parameters=model_b.parameters()) + scheduler_b = paddle.optimizer.lr.PiecewiseDecay( + boundaries=[2], values=[0.001, 0.002], verbose=True + ) + optimizer_b = paddle.optimizer.SGD( + learning_rate=scheduler_b, + grad_clip=grad_clip, + parameters=model_b.parameters(), + ) param_len = len(model_a.parameters()) parameters = [] @@ -89,14 +92,18 @@ class TestDistPPTraning(unittest.TestCase): for idx, param in enumerate(model_b.parameters()): param.set_value(parameters[idx + pp_id * (param_len // 2)]) - model_a, optimizer_a = paddle.amp.decorate(models=model_a, - optimizers=optimizer_a, - level='O2', - save_dtype='float32') - model_b, optimizer_b = paddle.amp.decorate(models=model_b, - optimizers=optimizer_b, - level='O2', - save_dtype='float32') + model_a, optimizer_a = paddle.amp.decorate( + models=model_a, + optimizers=optimizer_a, + level='O2', + save_dtype='float32', + ) + model_b, optimizer_b = paddle.amp.decorate( + models=model_b, + optimizers=optimizer_b, + level='O2', + save_dtype='float32', + ) model_b = fleet.distributed_model(model_b) optimizer_b = fleet.distributed_optimizer(optimizer_b) @@ -104,15 +111,21 @@ class TestDistPPTraning(unittest.TestCase): scaler_b = fleet.distributed_scaler(scaler_b) # construct reader - train_reader = paddle.batch(paddle.dataset.mnist.train(), - batch_size=batch_size, - drop_last=True) + train_reader = paddle.batch( + paddle.dataset.mnist.train(), batch_size=batch_size, drop_last=True + ) for step_id, data in enumerate(train_reader()): - x_data = np.array([x[0] for x in data]).astype('float32').reshape( - batch_size, 1, 28, 28) - y_data = np.array([x[1] for x in data - ]).astype('int64').reshape(batch_size, 1) + x_data = ( + np.array([x[0] for x in data]) + .astype('float32') + .reshape(batch_size, 1, 28, 28) + ) + y_data = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape(batch_size, 1) + ) img = paddle.to_tensor(x_data) label = paddle.to_tensor(y_data) img.stop_gradient = True @@ -129,15 +142,14 @@ class TestDistPPTraning(unittest.TestCase): scheduler_a.step() with paddle.amp.auto_cast(enable=True, level='O2'): - loss_b = model_b.train_batch([img, label], - optimizer_b, - scheduler_b, - scaler=scaler_b) + loss_b = model_b.train_batch( + [img, label], optimizer_b, scheduler_b, scaler=scaler_b + ) print("loss: ", loss_a.numpy(), loss_b.numpy()) - np.testing.assert_allclose(loss_a.numpy(), - loss_b.numpy(), - rtol=5e-3) + np.testing.assert_allclose( + loss_a.numpy(), loss_b.numpy(), rtol=5e-3 + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_layer_with_virtual_stage.py b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_layer_with_virtual_stage.py index 73807abdc18c8b429a2a92831dd0349312c85203..f23df47ae4b5660345f30646ec6480b81d2aa972 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_layer_with_virtual_stage.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_layer_with_virtual_stage.py @@ -17,12 +17,15 @@ import paddle from paddle.distributed import fleet import paddle.nn as nn from paddle.fluid.dygraph.layers import Layer -from paddle.distributed.fleet.meta_parallel import LayerDesc, PipelineLayer, PipelineParallelWithInterleave +from paddle.distributed.fleet.meta_parallel import ( + LayerDesc, + PipelineLayer, + PipelineParallelWithInterleave, +) import paddle.nn.functional as F class ReshapeHelp(Layer): - def __init__(self, shape): super(ReshapeHelp, self).__init__() self.shape = shape @@ -32,7 +35,6 @@ class ReshapeHelp(Layer): class MLPForVirtualStageLayerTest(PipelineLayer): - def __init__(self, num_classes=10, **kwargs): self.num_classes = num_classes decs = [ @@ -45,21 +47,19 @@ class MLPForVirtualStageLayerTest(PipelineLayer): LayerDesc(nn.Linear, 2, self.num_classes), LayerDesc(nn.Linear, self.num_classes, 2), ] - super(MLPForVirtualStageLayerTest, - self).__init__(layers=decs, - loss_fn=nn.CrossEntropyLoss(), - **kwargs) + super(MLPForVirtualStageLayerTest, self).__init__( + layers=decs, loss_fn=nn.CrossEntropyLoss(), **kwargs + ) class TestPipeLayerAPI(unittest.TestCase): - def setUp(self): strategy = fleet.DistributedStrategy() self.pipeline_parallel_size = 2 strategy.hybrid_configs = { "dp_degree": 1, "mp_degree": 1, - "pp_degree": self.pipeline_parallel_size + "pp_degree": self.pipeline_parallel_size, } fleet.init(is_collective=True, strategy=strategy) self.rank = fleet.worker_index() @@ -74,8 +74,9 @@ class TestPipeLayerAPI(unittest.TestCase): recompute_ctx={ "mp_group": self.hcg.get_model_parallel_group(), "offload": False, - "partition": False - }) + "partition": False, + }, + ) assert len(pipe_model.parameters()) > 0 model_chunks = pipe_model.get_model_chunks() assert model_chunks is not None @@ -84,14 +85,14 @@ class TestPipeLayerAPI(unittest.TestCase): optimizer = paddle.optimizer.SGD(parameters=pipe_model.parameters()) try: - model_chunks[0](paddle.to_tensor([1., 2.])) + model_chunks[0](paddle.to_tensor([1.0, 2.0])) raise NotImplementedError except PermissionError: pass # fake call for the forward function of virtual pipeline layer for i in range(len(model_chunks)): - out = pipe_model(paddle.to_tensor([1., 2.]), chunk_id=i) + out = pipe_model(paddle.to_tensor([1.0, 2.0]), chunk_id=i) assert list(out.shape) == [2] out = F.relu(out) loss = paddle.mean(out) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_recompute.py b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_recompute.py index fde0572d3d8ebd309bfe32879de9500720ce2b72..e9443b511e05c0aa27e8751fc005d9f12ca68220 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_recompute.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_recompute.py @@ -42,7 +42,6 @@ dim_feedforward = 4 * d_model class EmbeddingNet(Layer): - def __init__(self): super(EmbeddingNet, self).__init__() self.word_embeddings = nn.Embedding(vocab_size, hidden_size) @@ -56,7 +55,6 @@ class EmbeddingNet(Layer): class TransformerNet(Layer): - def __init__(self): super(TransformerNet, self).__init__() self.linear1 = nn.Linear(d_model, dim_feedforward) @@ -72,7 +70,9 @@ class TransformerNet(Layer): q = self.q_proj(x) k = self.k_proj(x) v = self.v_proj(x) - product = layers.matmul(x=q, y=k, transpose_y=True, alpha=d_model**-0.5) + product = layers.matmul( + x=q, y=k, transpose_y=True, alpha=d_model**-0.5 + ) weights = F.softmax(product) weights = F.dropout(weights, 0.2) @@ -86,20 +86,17 @@ class TransformerNet(Layer): class EmbeddingPipe(EmbeddingNet): - def forward(self, x): return super().forward(x) class TransformerNetPipe(TransformerNet): - def forward(self, x): output = super().forward(x) return output class CriterionPipe(Layer): - def __init__(self): super(CriterionPipe, self).__init__() @@ -109,7 +106,6 @@ class CriterionPipe(Layer): class ModelPipe(PipelineLayer): - def __init__(self, hcg): self.descs = [] self.descs.append(LayerDesc(EmbeddingPipe)) @@ -118,20 +114,21 @@ class ModelPipe(PipelineLayer): for x in range(2): self.descs.append(LayerDesc(TransformerNetPipe)) - super().__init__(layers=self.descs, - loss_fn=CriterionPipe(), - topology=self.hcg.topology(), - seg_method="layer:TransformerNetPipe", - recompute_interval=1, - recompute_ctx={ - "mp_group": self.hcg.get_model_parallel_group(), - "offload": False, - "partition": False - }) + super().__init__( + layers=self.descs, + loss_fn=CriterionPipe(), + topology=self.hcg.topology(), + seg_method="layer:TransformerNetPipe", + recompute_interval=1, + recompute_ctx={ + "mp_group": self.hcg.get_model_parallel_group(), + "offload": False, + "partition": False, + }, + ) class TestDistPPTraning(unittest.TestCase): - def setUp(self): strategy = fleet.DistributedStrategy() self.model_parallel_size = 1 @@ -144,7 +141,7 @@ class TestDistPPTraning(unittest.TestCase): } strategy.pipeline_configs = { "accumulate_steps": batch_size // micro_batch_size, - "micro_batch_size": micro_batch_size + "micro_batch_size": micro_batch_size, } fleet.init(is_collective=True, strategy=strategy) @@ -158,11 +155,12 @@ class TestDistPPTraning(unittest.TestCase): set_random_seed(1024, dp_id, rank_id) model = ModelPipe(hcg) - scheduler = paddle.optimizer.lr.PiecewiseDecay(boundaries=[2], - values=[0.001, 0.002], - verbose=True) - optimizer = paddle.optimizer.SGD(learning_rate=scheduler, - parameters=model.parameters()) + scheduler = paddle.optimizer.lr.PiecewiseDecay( + boundaries=[2], values=[0.001, 0.002], verbose=True + ) + optimizer = paddle.optimizer.SGD( + learning_rate=scheduler, parameters=model.parameters() + ) model = fleet.distributed_model(model) optimizer = fleet.distributed_optimizer(optimizer) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_save_load.py b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_save_load.py index 5d52093a86f20778b314bdd638f137f27f21e4d2..b7990ff0237803411b965844cfef9c414c51310b 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_save_load.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_save_load.py @@ -29,7 +29,6 @@ vocab_size = 128 class TestDistPPSaveLoadTraning(unittest.TestCase): - def setUp(self): strategy = fleet.DistributedStrategy() self.model_parallel_size = 1 @@ -42,7 +41,7 @@ class TestDistPPSaveLoadTraning(unittest.TestCase): } strategy.pipeline_configs = { "accumulate_steps": batch_size // micro_batch_size, - "micro_batch_size": micro_batch_size + "micro_batch_size": micro_batch_size, } fleet.init(is_collective=True, strategy=strategy) @@ -56,11 +55,12 @@ class TestDistPPSaveLoadTraning(unittest.TestCase): set_random_seed(1024, dp_id, rank_id) model = ModelPipe(topology) - scheduler = paddle.optimizer.lr.PiecewiseDecay(boundaries=[2], - values=[0.001, 0.002], - verbose=True) - optimizer = paddle.optimizer.SGD(learning_rate=scheduler, - parameters=model.parameters()) + scheduler = paddle.optimizer.lr.PiecewiseDecay( + boundaries=[2], values=[0.001, 0.002], verbose=True + ) + optimizer = paddle.optimizer.SGD( + learning_rate=scheduler, parameters=model.parameters() + ) model = fleet.distributed_model(model) optimizer = fleet.distributed_optimizer(optimizer) @@ -74,14 +74,16 @@ class TestDistPPSaveLoadTraning(unittest.TestCase): loss = model.train_batch([x, x], optimizer, scheduler) model._layers.save_state_dict(output_dir) - paddle.save(optimizer.state_dict(), - os.path.join(output_dir, "model_state.pdopt")) + paddle.save( + optimizer.state_dict(), + os.path.join(output_dir, "model_state.pdopt"), + ) # construct data test_steps = 5 - np_data = np.random.randint(0, - vocab_size, - size=[test_steps, batch_size, length]) + np_data = np.random.randint( + 0, vocab_size, size=[test_steps, batch_size, length] + ) origin_loss = [] for step_id in range(5): @@ -101,8 +103,12 @@ class TestDistPPSaveLoadTraning(unittest.TestCase): x = paddle.to_tensor(x_data) x.stop_gradient = True loss = model.train_batch([x, x], optimizer, scheduler) - print("origin loss: ", origin_loss[step_id], "current loss: ", - loss.numpy()) + print( + "origin loss: ", + origin_loss[step_id], + "current loss: ", + loss.numpy(), + ) np.testing.assert_allclose(loss.numpy(), origin_loss[step_id]) # finally, remove the model/optimizer path diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_save_load_with_virtual_stage.py b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_save_load_with_virtual_stage.py index f71e2511a2023f9a67d7be7d044dffff55978ff8..dfe4ecae40392f4be7584e7e146badbd863e1cb7 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_save_load_with_virtual_stage.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_save_load_with_virtual_stage.py @@ -20,7 +20,10 @@ import shutil import tempfile import paddle.distributed as dist import paddle.distributed.fleet as fleet -from hybrid_parallel_pp_transformer_with_virtual_stage import ModelPipe, set_random_seed +from hybrid_parallel_pp_transformer_with_virtual_stage import ( + ModelPipe, + set_random_seed, +) batch_size = 8 length = 8 @@ -29,7 +32,6 @@ vocab_size = 128 class TestDistPPSaveLoadTraning(unittest.TestCase): - def setUp(self): strategy = fleet.DistributedStrategy() self.model_parallel_size = 1 @@ -42,7 +44,7 @@ class TestDistPPSaveLoadTraning(unittest.TestCase): } strategy.pipeline_configs = { "accumulate_steps": batch_size // micro_batch_size, - "micro_batch_size": micro_batch_size + "micro_batch_size": micro_batch_size, } fleet.init(is_collective=True, strategy=strategy) @@ -56,11 +58,12 @@ class TestDistPPSaveLoadTraning(unittest.TestCase): set_random_seed(1024, dp_id, rank_id) model = ModelPipe(topology) - scheduler = paddle.optimizer.lr.PiecewiseDecay(boundaries=[2], - values=[0.001, 0.002], - verbose=True) - optimizer = paddle.optimizer.SGD(learning_rate=scheduler, - parameters=model.parameters()) + scheduler = paddle.optimizer.lr.PiecewiseDecay( + boundaries=[2], values=[0.001, 0.002], verbose=True + ) + optimizer = paddle.optimizer.SGD( + learning_rate=scheduler, parameters=model.parameters() + ) model = fleet.distributed_model(model) optimizer = fleet.distributed_optimizer(optimizer) @@ -74,14 +77,16 @@ class TestDistPPSaveLoadTraning(unittest.TestCase): loss = model.train_batch([x, x], optimizer, scheduler) model._layers.save_state_dict(output_dir) - paddle.save(optimizer.state_dict(), - os.path.join(output_dir, "model_state.pdopt")) + paddle.save( + optimizer.state_dict(), + os.path.join(output_dir, "model_state.pdopt"), + ) # construct data test_steps = 5 - np_data = np.random.randint(0, - vocab_size, - size=[test_steps, batch_size, length]) + np_data = np.random.randint( + 0, vocab_size, size=[test_steps, batch_size, length] + ) origin_loss = [] for step_id in range(5): @@ -101,8 +106,12 @@ class TestDistPPSaveLoadTraning(unittest.TestCase): x = paddle.to_tensor(x_data) x.stop_gradient = True loss = model.train_batch([x, x], optimizer, scheduler) - print("origin loss: ", origin_loss[step_id], "current loss: ", - loss.numpy()) + print( + "origin loss: ", + origin_loss[step_id], + "current loss: ", + loss.numpy(), + ) np.testing.assert_allclose(loss.numpy(), origin_loss[step_id]) # finally, remove the model/optimizer path diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_transformer.py b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_transformer.py index de0475018d24fe7af3841427b059abe216b25752..e1f4aa1762e12295416239fdb99f947f099183be 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_transformer.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_transformer.py @@ -42,15 +42,15 @@ dim_feedforward = 4 * d_model class EmbeddingNet(Layer): - def __init__(self): super(EmbeddingNet, self).__init__() self.word_embeddings = nn.Embedding(vocab_size, hidden_size) self.position_embeddings = nn.Embedding(vocab_size, hidden_size) def forward(self, x): - attention_mask = paddle.tensor.triu((paddle.ones( - (length, length), dtype="float32") * -1e9), 1) + attention_mask = paddle.tensor.triu( + (paddle.ones((length, length), dtype="float32") * -1e9), 1 + ) no_used = paddle.ones((3, 3), dtype="int32") @@ -65,7 +65,6 @@ class EmbeddingNet(Layer): class TransformerNet(Layer): - def __init__(self): super(TransformerNet, self).__init__() self.linear1 = nn.Linear(d_model, dim_feedforward) @@ -81,7 +80,9 @@ class TransformerNet(Layer): q = self.q_proj(x) k = self.k_proj(x) v = self.v_proj(x) - product = layers.matmul(x=q, y=k, transpose_y=True, alpha=d_model**-0.5) + product = layers.matmul( + x=q, y=k, transpose_y=True, alpha=d_model**-0.5 + ) weights = F.softmax(product + mask) # TODO(shenliang03) For save/load in PipeLineParallel, can’t support dropout temporarily. @@ -96,13 +97,11 @@ class TransformerNet(Layer): class EmbeddingPipe(EmbeddingNet): - def forward(self, x): return super().forward(x) class TransformerNetPipe(TransformerNet): - def forward(self, args): x, mask, no_used, p_emb = args[0], args[1], args[2], args[3] @@ -113,7 +112,6 @@ class TransformerNetPipe(TransformerNet): class CriterionPipe(Layer): - def __init__(self): super(CriterionPipe, self).__init__() @@ -123,7 +121,6 @@ class CriterionPipe(Layer): class ModelPipe(PipelineLayer): - def __init__(self, topology): self.descs = [] self.descs.append(LayerDesc(EmbeddingPipe)) @@ -133,14 +130,15 @@ class ModelPipe(PipelineLayer): self.descs.append(lambda x: x[0]) - super().__init__(layers=self.descs, - loss_fn=CriterionPipe(), - topology=topology, - seg_method="layer:TransformerNetPipe") + super().__init__( + layers=self.descs, + loss_fn=CriterionPipe(), + topology=topology, + seg_method="layer:TransformerNetPipe", + ) class TestDistPPTraning(unittest.TestCase): - def setUp(self): strategy = fleet.DistributedStrategy() self.model_parallel_size = 1 @@ -153,7 +151,7 @@ class TestDistPPTraning(unittest.TestCase): } strategy.pipeline_configs = { "accumulate_steps": batch_size // micro_batch_size, - "micro_batch_size": micro_batch_size + "micro_batch_size": micro_batch_size, } fleet.init(is_collective=True, strategy=strategy) @@ -167,11 +165,12 @@ class TestDistPPTraning(unittest.TestCase): set_random_seed(1024, dp_id, rank_id) model = ModelPipe(topology) - scheduler = paddle.optimizer.lr.PiecewiseDecay(boundaries=[2], - values=[0.001, 0.002], - verbose=True) - optimizer = paddle.optimizer.SGD(learning_rate=scheduler, - parameters=model.parameters()) + scheduler = paddle.optimizer.lr.PiecewiseDecay( + boundaries=[2], values=[0.001, 0.002], verbose=True + ) + optimizer = paddle.optimizer.SGD( + learning_rate=scheduler, parameters=model.parameters() + ) model = fleet.distributed_model(model) optimizer = fleet.distributed_optimizer(optimizer) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_transformer_with_virtual_stage.py b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_transformer_with_virtual_stage.py index edad5e5bc99bd5d67e955ebb79ed242ab8ba2c76..89e0410e2ca7f3cd167cfaf51e9ca39c2903fcfb 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_transformer_with_virtual_stage.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_transformer_with_virtual_stage.py @@ -43,15 +43,15 @@ dim_feedforward = 4 * d_model class EmbeddingNet(Layer): - def __init__(self): super(EmbeddingNet, self).__init__() self.word_embeddings = nn.Embedding(vocab_size, hidden_size) self.position_embeddings = nn.Embedding(vocab_size, hidden_size) def forward(self, x): - attention_mask = paddle.tensor.triu((paddle.ones( - (length, length), dtype="float32") * -1e9), 1) + attention_mask = paddle.tensor.triu( + (paddle.ones((length, length), dtype="float32") * -1e9), 1 + ) no_used = paddle.ones((3, 3), dtype="int32") @@ -66,7 +66,6 @@ class EmbeddingNet(Layer): class TransformerNet(Layer): - def __init__(self): super(TransformerNet, self).__init__() self.linear1 = nn.Linear(d_model, dim_feedforward) @@ -82,7 +81,9 @@ class TransformerNet(Layer): q = self.q_proj(x) k = self.k_proj(x) v = self.v_proj(x) - product = layers.matmul(x=q, y=k, transpose_y=True, alpha=d_model**-0.5) + product = layers.matmul( + x=q, y=k, transpose_y=True, alpha=d_model**-0.5 + ) weights = F.softmax(product + mask) tgt = layers.matmul(weights, v) @@ -95,13 +96,11 @@ class TransformerNet(Layer): class EmbeddingPipe(EmbeddingNet): - def forward(self, x): return super().forward(x) class TransformerNetPipe(TransformerNet): - def forward(self, args): x, mask, no_used, p_emb = args[0], args[1], args[2], args[3] @@ -112,7 +111,6 @@ class TransformerNetPipe(TransformerNet): class CriterionPipe(Layer): - def __init__(self): super(CriterionPipe, self).__init__() @@ -122,7 +120,6 @@ class CriterionPipe(Layer): class ModelPipe(PipelineLayer): - def __init__(self, topology): self.descs = [] self.descs.append(LayerDesc(EmbeddingPipe)) @@ -137,11 +134,11 @@ class ModelPipe(PipelineLayer): loss_fn=CriterionPipe(), topology=topology, num_virtual_pipeline_stages=num_virtual_pipeline_stages, - seg_method="layer:TransformerNetPipe") + seg_method="layer:TransformerNetPipe", + ) class TestDistPPTraning(unittest.TestCase): - def setUp(self): strategy = fleet.DistributedStrategy() self.model_parallel_size = 1 @@ -154,7 +151,7 @@ class TestDistPPTraning(unittest.TestCase): } strategy.pipeline_configs = { "accumulate_steps": batch_size // micro_batch_size, - "micro_batch_size": micro_batch_size + "micro_batch_size": micro_batch_size, } fleet.init(is_collective=True, strategy=strategy) @@ -168,11 +165,12 @@ class TestDistPPTraning(unittest.TestCase): set_random_seed(1024, dp_id, rank_id) model = ModelPipe(topology) - scheduler = paddle.optimizer.lr.PiecewiseDecay(boundaries=[2], - values=[0.001, 0.002], - verbose=True) - optimizer = paddle.optimizer.SGD(learning_rate=scheduler, - parameters=model.parameters()) + scheduler = paddle.optimizer.lr.PiecewiseDecay( + boundaries=[2], values=[0.001, 0.002], verbose=True + ) + optimizer = paddle.optimizer.SGD( + learning_rate=scheduler, parameters=model.parameters() + ) model = fleet.distributed_model(model) optimizer = fleet.distributed_optimizer(optimizer) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_qat.py b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_qat.py index 03e357993493f44928c34eb487d013ae9e6eb96c..80e7b8b4e695cadffb70bb733b30ed95474f5308 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_qat.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_qat.py @@ -79,7 +79,8 @@ def parallel_matmul(lm_output, logit_weights, parallel_output): if world_size > 1: input_parallel = paddle.distributed.collective._c_identity( - lm_output, group=model_parallel_group) + lm_output, group=model_parallel_group + ) logits = paddle.matmul(input_parallel, logit_weights, transpose_y=True) @@ -87,22 +88,23 @@ def parallel_matmul(lm_output, logit_weights, parallel_output): return logits return paddle.distributed.collective._c_concat( - logits, group=model_parallel_group) + logits, group=model_parallel_group + ) else: logits = paddle.matmul(lm_output, logit_weights, transpose_y=True) return logits class PACT(nn.Layer): - def __init__(self, init_value=20): super(PACT, self).__init__() alpha_attr = paddle.ParamAttr( name=self.full_name() + ".pact", - initializer=paddle.nn.initializer.Constant(value=init_value)) - self.alpha = self.create_parameter(shape=[1], - attr=alpha_attr, - dtype='float32') + initializer=paddle.nn.initializer.Constant(value=init_value), + ) + self.alpha = self.create_parameter( + shape=[1], attr=alpha_attr, dtype='float32' + ) def forward(self, x): out_left = paddle.nn.functional.relu(x - self.alpha) @@ -112,46 +114,61 @@ class PACT(nn.Layer): class SimpleMPNet(nn.Layer): - - def __init__(self, vocab_size, hidden_size, inner_size, output_size, np_fc1, - np_fc2, mp_id): + def __init__( + self, + vocab_size, + hidden_size, + inner_size, + output_size, + np_fc1, + np_fc2, + mp_id, + ): super(SimpleMPNet, self).__init__() if mp_id == 0: - init_fc1_data = np_fc1[:, :(inner_size // 2)] - init_fc2_data = np_fc2[:(inner_size // 2), :] + init_fc1_data = np_fc1[:, : (inner_size // 2)] + init_fc2_data = np_fc2[: (inner_size // 2), :] else: - init_fc1_data = np_fc1[:, (inner_size // 2):] - init_fc2_data = np_fc2[(inner_size // 2):, :] + init_fc1_data = np_fc1[:, (inner_size // 2) :] + init_fc2_data = np_fc2[(inner_size // 2) :, :] self.linear1 = fleet.meta_parallel.ColumnParallelLinear( hidden_size, inner_size, weight_attr=paddle.framework.ParamAttr( - initializer=paddle.nn.initializer.Assign(init_fc1_data)), + initializer=paddle.nn.initializer.Assign(init_fc1_data) + ), gather_output=False, - has_bias=True) + has_bias=True, + ) self.linear2 = fleet.meta_parallel.RowParallelLinear( inner_size, hidden_size, weight_attr=paddle.framework.ParamAttr( - initializer=paddle.nn.initializer.Assign(init_fc2_data)), + initializer=paddle.nn.initializer.Assign(init_fc2_data) + ), input_is_parallel=True, - has_bias=True) + has_bias=True, + ) self.linear3 = paddle.nn.Linear( hidden_size, output_size, weight_attr=paddle.framework.ParamAttr( - initializer=paddle.nn.initializer.Constant(0.0)), + initializer=paddle.nn.initializer.Constant(0.0) + ), bias_attr=paddle.framework.ParamAttr( - initializer=paddle.nn.initializer.Constant(0.0))) + initializer=paddle.nn.initializer.Constant(0.0) + ), + ) self.embedding = fleet.meta_parallel.VocabParallelEmbedding( vocab_size, hidden_size, - weight_attr=paddle.nn.initializer.Constant(value=1.)) + weight_attr=paddle.nn.initializer.Constant(value=1.0), + ) def forward(self, x): x = self.embedding(x) @@ -163,53 +180,62 @@ class SimpleMPNet(nn.Layer): class SimpleDPNet(nn.Layer): - - def __init__(self, vocab_size, hidden_size, inner_size, output_size, np_fc1, - np_fc2): + def __init__( + self, vocab_size, hidden_size, inner_size, output_size, np_fc1, np_fc2 + ): super(SimpleDPNet, self).__init__() self.linear1 = paddle.nn.Linear( hidden_size, inner_size, weight_attr=paddle.framework.ParamAttr( - initializer=paddle.nn.initializer.Assign(np_fc1)), + initializer=paddle.nn.initializer.Assign(np_fc1) + ), bias_attr=paddle.framework.ParamAttr( - initializer=paddle.nn.initializer.Constant(0.0))) + initializer=paddle.nn.initializer.Constant(0.0) + ), + ) self.linear2 = paddle.nn.Linear( inner_size, hidden_size, weight_attr=paddle.framework.ParamAttr( - initializer=paddle.nn.initializer.Assign(np_fc2)), + initializer=paddle.nn.initializer.Assign(np_fc2) + ), bias_attr=paddle.framework.ParamAttr( - initializer=paddle.nn.initializer.Constant(0.0))) + initializer=paddle.nn.initializer.Constant(0.0) + ), + ) self.linear3 = paddle.nn.Linear( hidden_size, output_size, weight_attr=paddle.framework.ParamAttr( - initializer=paddle.nn.initializer.Constant(0.0)), + initializer=paddle.nn.initializer.Constant(0.0) + ), bias_attr=paddle.framework.ParamAttr( - initializer=paddle.nn.initializer.Constant(0.0))) + initializer=paddle.nn.initializer.Constant(0.0) + ), + ) self.embedding = paddle.nn.Embedding( vocab_size, hidden_size, - weight_attr=paddle.nn.initializer.Constant(value=1.)) + weight_attr=paddle.nn.initializer.Constant(value=1.0), + ) def forward(self, x): x = self.embedding(x) x = self.linear1(x) x = self.linear2(x) x = self.linear3(x) - x = paddle.matmul(x, - get_attr(self.embedding, "weight"), - transpose_y=True) + x = paddle.matmul( + x, get_attr(self.embedding, "weight"), transpose_y=True + ) return x class TestDistMPTraning(unittest.TestCase): - def setUp(self): strategy = fleet.DistributedStrategy() self.model_parallel_size = 2 @@ -217,7 +243,7 @@ class TestDistMPTraning(unittest.TestCase): strategy.hybrid_configs = { "dp_degree": self.data_parallel_size, "mp_degree": self.model_parallel_size, - "pp_degree": 1 + "pp_degree": 1, } fleet.init(is_collective=True, strategy=strategy) self.onnx_format = False @@ -234,14 +260,14 @@ class TestDistMPTraning(unittest.TestCase): return loss def build_optimizer(self, model): - optimizer = paddle.optimizer.SGD(learning_rate=0.001, - parameters=model.parameters()) + optimizer = paddle.optimizer.SGD( + learning_rate=0.001, parameters=model.parameters() + ) return optimizer - def build_model_optimizer(self, - weight_quantize_type, - activation_quantize_type, - use_pact=False): + def build_model_optimizer( + self, weight_quantize_type, activation_quantize_type, use_pact=False + ): hcg = fleet.get_hybrid_communicate_group() word_size = hcg.get_model_parallel_world_size() mp_id = hcg.get_model_parallel_rank() @@ -251,23 +277,32 @@ class TestDistMPTraning(unittest.TestCase): weight_quantize_type=weight_quantize_type, activation_quantize_type=activation_quantize_type, fuse_conv_bn=self.fuse_conv_bn, - act_preprocess_layer=PACT if use_pact else None) + act_preprocess_layer=PACT if use_pact else None, + ) set_random_seed(1024, dp_id, rank_id) np_fc1 = np.ones((hidden_size, inner_size)) np_fc2 = np.ones( - (inner_size, - hidden_size)) #np.random.random_sample((inner_size, hidden_size)) + (inner_size, hidden_size) + ) # np.random.random_sample((inner_size, hidden_size)) - model_a = SimpleMPNet(vocab_size, hidden_size, inner_size, output_size, - np_fc1, np_fc2, mp_id) + model_a = SimpleMPNet( + vocab_size, + hidden_size, + inner_size, + output_size, + np_fc1, + np_fc2, + mp_id, + ) model_a = imperative_qat.quantize(model_a) optimizer_a = self.build_optimizer(model_a) model_a = fleet.distributed_model(model_a) optimizer_a = fleet.distributed_optimizer(optimizer_a) - model_b = SimpleDPNet(vocab_size, hidden_size, inner_size, output_size, - np_fc1, np_fc2) + model_b = SimpleDPNet( + vocab_size, hidden_size, inner_size, output_size, np_fc1, np_fc2 + ) model_b = imperative_qat.quantize(model_b) optimizer_b = self.build_optimizer(model_b) @@ -277,21 +312,27 @@ class TestDistMPTraning(unittest.TestCase): for epoch in range(5): - np_data = np.random.randint(0, vocab_size, ( - batch_size, - seq_length, - )) + np_data = np.random.randint( + 0, + vocab_size, + ( + batch_size, + seq_length, + ), + ) batch = paddle.to_tensor(np_data) loss_a = self.train_batch(batch, model_a, optimizer_a, True) loss_b = self.train_batch(batch, model_b, optimizer_b, False) - np.testing.assert_allclose(loss_a.numpy(), - loss_b.numpy(), - rtol=1e-6) + np.testing.assert_allclose( + loss_a.numpy(), loss_b.numpy(), rtol=1e-6 + ) def test_mp_model_1(self): - if not fluid.core.is_compiled_with_cuda( - ) or fluid.core.get_cuda_device_count() == 0: + if ( + not fluid.core.is_compiled_with_cuda() + or fluid.core.get_cuda_device_count() == 0 + ): return selected_gpus = get_gpus('0,1') cluster = None @@ -299,12 +340,15 @@ class TestDistMPTraning(unittest.TestCase): model_a, optimizer_a, model_b, optimizer_b = self.build_model_optimizer( weight_quantize_type='abs_max', - activation_quantize_type='moving_average_abs_max') + activation_quantize_type='moving_average_abs_max', + ) self.train(model_a, optimizer_a, model_b, optimizer_b) def test_mp_model_2(self): - if not fluid.core.is_compiled_with_cuda( - ) or fluid.core.get_cuda_device_count() == 0: + if ( + not fluid.core.is_compiled_with_cuda() + or fluid.core.get_cuda_device_count() == 0 + ): return selected_gpus = get_gpus('0,1') cluster = None @@ -313,7 +357,8 @@ class TestDistMPTraning(unittest.TestCase): model_a, optimizer_a, model_b, optimizer_b = self.build_model_optimizer( weight_quantize_type='channel_wise_abs_max', activation_quantize_type='moving_average_abs_max', - use_pact=True) + use_pact=True, + ) self.train(model_a, optimizer_a, model_b, optimizer_b) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_sharding_model.py b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_sharding_model.py index 96a7825dc801cf290cdf85df2fa7cf7e0096603e..147ba1e494c6ad82dcf3ddca3d3398c40863e865 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_sharding_model.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_sharding_model.py @@ -18,7 +18,9 @@ import random import paddle.distributed as dist import paddle.fluid as fluid import paddle.distributed.fleet as fleet -from paddle.distributed.fleet.meta_optimizers.dygraph_optimizer.dygraph_sharding_optimizer import DygraphShardingOptimizer +from paddle.distributed.fleet.meta_optimizers.dygraph_optimizer.dygraph_sharding_optimizer import ( + DygraphShardingOptimizer, +) import unittest vocab_size = 20 @@ -38,7 +40,8 @@ def parallel_matmul(lm_output, logit_weights, parallel_output): if world_size > 1: input_parallel = paddle.distributed.collective._c_identity( - lm_output, group=model_parallel_group) + lm_output, group=model_parallel_group + ) logits = paddle.matmul(input_parallel, logit_weights, transpose_y=True) @@ -46,53 +49,69 @@ def parallel_matmul(lm_output, logit_weights, parallel_output): return logits return paddle.distributed.collective._c_concat( - logits, group=model_parallel_group) + logits, group=model_parallel_group + ) else: logits = paddle.matmul(lm_output, logit_weights, transpose_y=True) return logits class SimpleMPNet(fluid.dygraph.Layer): - - def __init__(self, vocab_size, hidden_size, inner_size, output_size, np_fc1, - np_fc2, mp_id): + def __init__( + self, + vocab_size, + hidden_size, + inner_size, + output_size, + np_fc1, + np_fc2, + mp_id, + ): super(SimpleMPNet, self).__init__() if mp_id == 0: - init_fc1_data = np_fc1[:, :(inner_size // 2)] - init_fc2_data = np_fc2[:(inner_size // 2), :] + init_fc1_data = np_fc1[:, : (inner_size // 2)] + init_fc2_data = np_fc2[: (inner_size // 2), :] else: - init_fc1_data = np_fc1[:, (inner_size // 2):] - init_fc2_data = np_fc2[(inner_size // 2):, :] + init_fc1_data = np_fc1[:, (inner_size // 2) :] + init_fc2_data = np_fc2[(inner_size // 2) :, :] self.linear1 = fleet.meta_parallel.ColumnParallelLinear( hidden_size, inner_size, weight_attr=paddle.framework.ParamAttr( - initializer=paddle.nn.initializer.Assign(init_fc1_data)), + initializer=paddle.nn.initializer.Assign(init_fc1_data) + ), gather_output=False, - has_bias=True) + has_bias=True, + ) self.linear2 = fleet.meta_parallel.RowParallelLinear( inner_size, hidden_size, weight_attr=paddle.framework.ParamAttr( - initializer=paddle.nn.initializer.Assign(init_fc2_data)), + initializer=paddle.nn.initializer.Assign(init_fc2_data) + ), input_is_parallel=True, - has_bias=True) + has_bias=True, + ) self.linear3 = paddle.nn.Linear( hidden_size, output_size, weight_attr=paddle.framework.ParamAttr( - initializer=paddle.nn.initializer.Constant(0.0)), + initializer=paddle.nn.initializer.Constant(0.0) + ), bias_attr=paddle.framework.ParamAttr( - initializer=paddle.nn.initializer.Constant(0.0))) + initializer=paddle.nn.initializer.Constant(0.0) + ), + ) self.embedding = fleet.meta_parallel.VocabParallelEmbedding( vocab_size, hidden_size, - weight_attr=paddle.nn.initializer.Constant(value=0.5)) + weight_attr=paddle.nn.initializer.Constant(value=0.5), + ) def forward(self, x): x = self.embedding(x) @@ -104,39 +123,49 @@ class SimpleMPNet(fluid.dygraph.Layer): class SimpleDPNet(fluid.dygraph.Layer): - - def __init__(self, vocab_size, hidden_size, inner_size, output_size, np_fc1, - np_fc2): + def __init__( + self, vocab_size, hidden_size, inner_size, output_size, np_fc1, np_fc2 + ): super(SimpleDPNet, self).__init__() self.linear1 = paddle.nn.Linear( hidden_size, inner_size, weight_attr=paddle.framework.ParamAttr( - initializer=paddle.nn.initializer.Assign(np_fc1)), + initializer=paddle.nn.initializer.Assign(np_fc1) + ), bias_attr=paddle.framework.ParamAttr( - initializer=paddle.nn.initializer.Constant(0.0))) + initializer=paddle.nn.initializer.Constant(0.0) + ), + ) self.linear2 = paddle.nn.Linear( inner_size, hidden_size, weight_attr=paddle.framework.ParamAttr( - initializer=paddle.nn.initializer.Assign(np_fc2)), + initializer=paddle.nn.initializer.Assign(np_fc2) + ), bias_attr=paddle.framework.ParamAttr( - initializer=paddle.nn.initializer.Constant(0.0))) + initializer=paddle.nn.initializer.Constant(0.0) + ), + ) self.linear3 = paddle.nn.Linear( hidden_size, output_size, weight_attr=paddle.framework.ParamAttr( - initializer=paddle.nn.initializer.Constant(0.0)), + initializer=paddle.nn.initializer.Constant(0.0) + ), bias_attr=paddle.framework.ParamAttr( - initializer=paddle.nn.initializer.Constant(0.0))) + initializer=paddle.nn.initializer.Constant(0.0) + ), + ) self.embedding = paddle.nn.Embedding( vocab_size, hidden_size, - weight_attr=paddle.nn.initializer.Constant(value=0.5)) + weight_attr=paddle.nn.initializer.Constant(value=0.5), + ) def forward(self, x): x = self.embedding(x) @@ -148,7 +177,6 @@ class SimpleDPNet(fluid.dygraph.Layer): class TestDistMPTraning(unittest.TestCase): - def setUp(self): random.seed(2021) np.random.seed(2021) @@ -163,10 +191,15 @@ class TestDistMPTraning(unittest.TestCase): } fleet.init(is_collective=True, strategy=self.strategy) self.data = [ - np.random.randint(0, vocab_size, ( - batch_size, - seq_length, - )) for _ in range(STEPS) + np.random.randint( + 0, + vocab_size, + ( + batch_size, + seq_length, + ), + ) + for _ in range(STEPS) ] def train_batch(self, batch, model, optimizer): @@ -178,11 +211,9 @@ class TestDistMPTraning(unittest.TestCase): optimizer.clear_grad() return loss - def build_optimizer(self, - model, - strategy=None, - is_sharding=True, - Optimizer="adam"): + def build_optimizer( + self, model, strategy=None, is_sharding=True, Optimizer="adam" + ): clip = paddle.nn.ClipGradByGlobalNorm(0.5) if Optimizer == "adam": if is_sharding: @@ -193,13 +224,15 @@ class TestDistMPTraning(unittest.TestCase): inner_optimizer_class=paddle.optimizer.AdamW, learning_rate=0.001, weight_decay=0.00001, - grad_clip=clip) + grad_clip=clip, + ) else: optimizer = paddle.optimizer.AdamW( parameters=model.parameters(), learning_rate=0.001, weight_decay=0.00001, - grad_clip=clip) + grad_clip=clip, + ) else: if is_sharding: optimizer = DygraphShardingOptimizer( @@ -208,12 +241,14 @@ class TestDistMPTraning(unittest.TestCase): params=model.parameters(), inner_optimizer_class=paddle.optimizer.Momentum, learning_rate=0.001, - grad_clip=clip) + grad_clip=clip, + ) else: optimizer = paddle.optimizer.Momentum( learning_rate=0.001, parameters=model.parameters(), - grad_clip=clip) + grad_clip=clip, + ) return optimizer def build_model_optimizer(self, Optimizer="adam"): @@ -226,37 +261,48 @@ class TestDistMPTraning(unittest.TestCase): np_fc1 = np.random.random_sample((hidden_size, inner_size)) np_fc2 = np.random.random_sample((inner_size, hidden_size)) - model_a = SimpleDPNet(vocab_size, hidden_size, inner_size, output_size, - np_fc1, np_fc2) - optimizer_a = self.build_optimizer(model_a, - strategy=self.strategy, - is_sharding=True, - Optimizer=Optimizer) + model_a = SimpleDPNet( + vocab_size, hidden_size, inner_size, output_size, np_fc1, np_fc2 + ) + optimizer_a = self.build_optimizer( + model_a, + strategy=self.strategy, + is_sharding=True, + Optimizer=Optimizer, + ) model_a = fleet.distributed_model(model_a) optimizer_a = fleet.distributed_optimizer(optimizer_a) - model_b = SimpleDPNet(vocab_size, hidden_size, inner_size, output_size, - np_fc1, np_fc2) - optimizer_b = self.build_optimizer(model_b, - strategy=self.strategy, - is_sharding=False, - Optimizer=Optimizer) + model_b = SimpleDPNet( + vocab_size, hidden_size, inner_size, output_size, np_fc1, np_fc2 + ) + optimizer_b = self.build_optimizer( + model_b, + strategy=self.strategy, + is_sharding=False, + Optimizer=Optimizer, + ) return model_a, optimizer_a, model_b, optimizer_b def sharding_model(self, Optimizer, sharded_accumulators): model_a, optimizer_a, model_b, optimizer_b = self.build_model_optimizer( - Optimizer=Optimizer) + Optimizer=Optimizer + ) self.assertTrue( - isinstance(optimizer_a._inner_opt, DygraphShardingOptimizer)) + isinstance(optimizer_a._inner_opt, DygraphShardingOptimizer) + ) for idx in range(STEPS): if idx == 2 and paddle.distributed.get_rank() == 0: self.assertTrue( - set(optimizer_a._inner_opt._inner_optimizer.state_dict(). - keys()) == sharded_accumulators) + set( + optimizer_a._inner_opt._inner_optimizer.state_dict().keys() + ) + == sharded_accumulators + ) if paddle.distributed.get_rank() == 0: batch_sharding = paddle.to_tensor(self.data[idx][:2]) @@ -268,31 +314,49 @@ class TestDistMPTraning(unittest.TestCase): loss_b = self.train_batch(batch_single, model_b, optimizer_b) for j in range(len(model_a.parameters())): - np.testing.assert_allclose(model_a.parameters()[j].numpy(), - model_b.parameters()[j].numpy(), - rtol=1e-6) + np.testing.assert_allclose( + model_a.parameters()[j].numpy(), + model_b.parameters()[j].numpy(), + rtol=1e-6, + ) def test_sharding_adam(self): - sharded_accumulators = set([ - 'linear_0.w_0_moment1_0', 'linear_1.b_0_moment1_0', - 'linear_2.b_0_moment1_0', 'embedding_0.w_0_moment1_0', - 'linear_0.w_0_moment2_0', 'linear_1.b_0_moment2_0', - 'linear_2.b_0_moment2_0', 'embedding_0.w_0_moment2_0', - 'linear_0.w_0_beta1_pow_acc_0', 'linear_1.b_0_beta1_pow_acc_0', - 'linear_2.b_0_beta1_pow_acc_0', 'embedding_0.w_0_beta1_pow_acc_0', - 'linear_0.w_0_beta2_pow_acc_0', 'linear_1.b_0_beta2_pow_acc_0', - 'linear_2.b_0_beta2_pow_acc_0', 'embedding_0.w_0_beta2_pow_acc_0' - ]) - self.sharding_model(Optimizer="adam", - sharded_accumulators=sharded_accumulators) + sharded_accumulators = set( + [ + 'linear_0.w_0_moment1_0', + 'linear_1.b_0_moment1_0', + 'linear_2.b_0_moment1_0', + 'embedding_0.w_0_moment1_0', + 'linear_0.w_0_moment2_0', + 'linear_1.b_0_moment2_0', + 'linear_2.b_0_moment2_0', + 'embedding_0.w_0_moment2_0', + 'linear_0.w_0_beta1_pow_acc_0', + 'linear_1.b_0_beta1_pow_acc_0', + 'linear_2.b_0_beta1_pow_acc_0', + 'embedding_0.w_0_beta1_pow_acc_0', + 'linear_0.w_0_beta2_pow_acc_0', + 'linear_1.b_0_beta2_pow_acc_0', + 'linear_2.b_0_beta2_pow_acc_0', + 'embedding_0.w_0_beta2_pow_acc_0', + ] + ) + self.sharding_model( + Optimizer="adam", sharded_accumulators=sharded_accumulators + ) def test_sharding_momentum(self): - sharded_accumulators = set([ - 'linear_6.w_0_velocity_0', 'linear_7.b_0_velocity_0', - 'linear_8.b_0_velocity_0', 'embedding_2.w_0_velocity_0' - ]) - self.sharding_model(Optimizer="Momentum", - sharded_accumulators=sharded_accumulators) + sharded_accumulators = set( + [ + 'linear_6.w_0_velocity_0', + 'linear_7.b_0_velocity_0', + 'linear_8.b_0_velocity_0', + 'embedding_2.w_0_velocity_0', + ] + ) + self.sharding_model( + Optimizer="Momentum", sharded_accumulators=sharded_accumulators + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_shared_weight.py b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_shared_weight.py index 1696b966d7c52d76d86d151cd414478703397682..1176d70504b7c49095e1379d01f0fc87b6a318db 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_shared_weight.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_shared_weight.py @@ -44,15 +44,16 @@ hidden_size = 16 class SimpleNet(Layer): - def __init__(self): super(SimpleNet, self).__init__() self.word_embeddings = nn.Embedding(vocab_size, hidden_size) self.softmax_weight = self.create_parameter( - shape=[hidden_size, vocab_size]) - self.softmax_bias = self.create_parameter(shape=[vocab_size], - is_bias=False) + shape=[hidden_size, vocab_size] + ) + self.softmax_bias = self.create_parameter( + shape=[vocab_size], is_bias=False + ) def forward(self, x1, x2, y1): x_emb = self.word_embeddings(x1) @@ -62,14 +63,13 @@ class SimpleNet(Layer): projection = paddle.matmul(projection, self.word_embeddings.weight) - loss = fluid.layers.softmax_with_cross_entropy(logits=projection, - label=y1, - soft_label=False) + loss = fluid.layers.softmax_with_cross_entropy( + logits=projection, label=y1, soft_label=False + ) return loss.mean() class EmbeddingPipe(Layer): - def __init__(self): super(EmbeddingPipe, self).__init__() self.word_embeddings = nn.Embedding(vocab_size, hidden_size) @@ -85,11 +85,11 @@ class EmbeddingPipe(Layer): class MatmulNet(Layer): - def __init__(self): super(MatmulNet, self).__init__() self.softmax_weight = self.create_parameter( - shape=[hidden_size, vocab_size]) + shape=[hidden_size, vocab_size] + ) def forward(self, args): x1, x2 = args @@ -99,7 +99,6 @@ class MatmulNet(Layer): class BiasNet(Layer): - def __init__(self): super(BiasNet, self).__init__() self.softmax_bias = self.create_parameter(shape=[vocab_size]) @@ -112,26 +111,25 @@ class BiasNet(Layer): class LossNet(Layer): - def __init__(self): super(LossNet, self).__init__() def forward(self, args, y1): projection = args - loss = fluid.layers.softmax_with_cross_entropy(logits=projection, - label=y1[0], - soft_label=False) + loss = fluid.layers.softmax_with_cross_entropy( + logits=projection, label=y1[0], soft_label=False + ) return loss.mean() class SimpleNetPipe(PipelineLayer): - def __init__(self, **kwargs): self.descs = [] self.descs.append( - SharedLayerDesc('embed', - EmbeddingPipe, - shared_weight_attr='embedding_weight')) + SharedLayerDesc( + 'embed', EmbeddingPipe, shared_weight_attr='embedding_weight' + ) + ) self.descs.append(LayerDesc(MatmulNet)) self.descs.append(LayerDesc(BiasNet)) @@ -140,18 +138,20 @@ class SimpleNetPipe(PipelineLayer): return paddle.matmul(output[0], embedding.embedding_weight) self.descs.append( - SharedLayerDesc('embed', - EmbeddingPipe, - forward_func=_logits_helper, - shared_weight_attr='embedding_weight')) + SharedLayerDesc( + 'embed', + EmbeddingPipe, + forward_func=_logits_helper, + shared_weight_attr='embedding_weight', + ) + ) - super(SimpleNetPipe, self).__init__(layers=self.descs, - loss_fn=LossNet(), - **kwargs) + super(SimpleNetPipe, self).__init__( + layers=self.descs, loss_fn=LossNet(), **kwargs + ) class TestDistEmbeddingTraning(unittest.TestCase): - def setUp(self): strategy = fleet.DistributedStrategy() self.model_parallel_size = 1 @@ -164,7 +164,7 @@ class TestDistEmbeddingTraning(unittest.TestCase): } strategy.pipeline_configs = { "accumulate_steps": batch_size // micro_batch_size, - "micro_batch_size": micro_batch_size + "micro_batch_size": micro_batch_size, } fleet.init(is_collective=True, strategy=strategy) @@ -176,19 +176,23 @@ class TestDistEmbeddingTraning(unittest.TestCase): rank_id = dist.get_rank() set_random_seed(1024, dp_id, rank_id) - #construct model a + # construct model a model_a = SimpleNet() scheduler_a = paddle.optimizer.lr.PiecewiseDecay( - boundaries=[2, 3, 4], values=[0.01, 0.02, 0.03, 0.04], verbose=True) - optimizer_a = paddle.optimizer.SGD(learning_rate=scheduler_a, - parameters=model_a.parameters()) + boundaries=[2, 3, 4], values=[0.01, 0.02, 0.03, 0.04], verbose=True + ) + optimizer_a = paddle.optimizer.SGD( + learning_rate=scheduler_a, parameters=model_a.parameters() + ) model_b = SimpleNetPipe(topology=hcg.topology()) scheduler_b = paddle.optimizer.lr.PiecewiseDecay( - boundaries=[2, 3, 4], values=[0.01, 0.02, 0.03, 0.04], verbose=True) - optimizer_b = paddle.optimizer.SGD(learning_rate=scheduler_b, - parameters=model_b.parameters()) + boundaries=[2, 3, 4], values=[0.01, 0.02, 0.03, 0.04], verbose=True + ) + optimizer_b = paddle.optimizer.SGD( + learning_rate=scheduler_b, parameters=model_b.parameters() + ) model_b = fleet.distributed_model(model_b) optimizer_b = fleet.distributed_optimizer(optimizer_b) @@ -228,8 +232,9 @@ class TestDistEmbeddingTraning(unittest.TestCase): optimizer_a.clear_grad() scheduler_a.step() - loss_b = model_b.train_batch([(x1, x2), (y1, )], optimizer_b, - scheduler_b) + loss_b = model_b.train_batch( + [(x1, x2), (y1,)], optimizer_b, scheduler_b + ) print("loss", loss_a.numpy(), loss_b.numpy()) np.testing.assert_allclose(loss_a.numpy(), loss_b.numpy()) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/new_group.py b/python/paddle/fluid/tests/unittests/collective/fleet/new_group.py index b6e537681b21354765f24fb987aad2f376c832b8..9fa469ea5e234aff2e60b3d5019ac544a9e82aad 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/new_group.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/new_group.py @@ -17,7 +17,6 @@ import paddle class TestNewGroupAPI(object): - def __init__(self): paddle.distributed.init_parallel_env() d1 = np.array([1, 2, 3]) @@ -32,10 +31,9 @@ class TestNewGroupAPI(object): tmp = np.array([0, 0, 0]) result = paddle.to_tensor(tmp) - paddle.distributed.scatter(result, [self.tensor2, self.tensor1], - src=0, - group=gp, - sync_op=True) + paddle.distributed.scatter( + result, [self.tensor2, self.tensor1], src=0, group=gp, sync_op=True + ) if gp.rank == 0: assert np.array_equal(result, self.tensor2) elif gp.rank == 1: @@ -48,8 +46,9 @@ class TestNewGroupAPI(object): paddle.distributed.reduce(result, dst=0, group=gp, sync_op=True) if gp.rank == 0: - assert np.array_equal(result, paddle.add(self.tensor1, - self.tensor1)) + assert np.array_equal( + result, paddle.add(self.tensor1, self.tensor1) + ) elif gp.rank == 1: assert np.array_equal(result, self.tensor1) print("test reduce api ok") @@ -57,7 +56,8 @@ class TestNewGroupAPI(object): paddle.distributed.all_reduce(result, sync_op=True) assert np.array_equal( result, - paddle.add(paddle.add(self.tensor1, self.tensor1), self.tensor1)) + paddle.add(paddle.add(self.tensor1, self.tensor1), self.tensor1), + ) print("test all_reduce api ok") paddle.distributed.wait(result, gp, use_calc_stream=True) @@ -65,10 +65,9 @@ class TestNewGroupAPI(object): print("test wait api ok") result = [] - paddle.distributed.all_gather(result, - self.tensor1, - group=gp, - sync_op=True) + paddle.distributed.all_gather( + result, self.tensor1, group=gp, sync_op=True + ) assert np.array_equal(result[0], self.tensor1) assert np.array_equal(result[1], self.tensor1) print("test all_gather api ok") diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/parallel_class_center_sample.py b/python/paddle/fluid/tests/unittests/collective/fleet/parallel_class_center_sample.py index ef7f54409872a3e5e6364561850322d5334c4c1e..bc75ede1d7e885e9b77faf1b9f2b7aaeafd24b98 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/parallel_class_center_sample.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/parallel_class_center_sample.py @@ -37,10 +37,13 @@ def class_center_sample_numpy(label, classes_list, num_samples): unique_label_per_device = [] for i in range(nranks): - index = np.logical_and(unique_label >= class_interval[i], - unique_label < class_interval[i + 1]) - pos_class_center_per_device.append(unique_label[index] - - class_interval[i]) + index = np.logical_and( + unique_label >= class_interval[i], + unique_label < class_interval[i + 1], + ) + pos_class_center_per_device.append( + unique_label[index] - class_interval[i] + ) unique_label_per_device.append(unique_label[index]) num_samples_per_device = [] @@ -50,8 +53,9 @@ def class_center_sample_numpy(label, classes_list, num_samples): remapped_dict = {} for i in range(nranks): - for idx, v in enumerate(unique_label_per_device[i], - sampled_class_interval[i]): + for idx, v in enumerate( + unique_label_per_device[i], sampled_class_interval[i] + ): remapped_dict[v] = idx remapped_label = [] @@ -62,7 +66,6 @@ def class_center_sample_numpy(label, classes_list, num_samples): class TestParallelClassCenterSampleOp(unittest.TestCase): - def setUp(self): strategy = fleet.DistributedStrategy() fleet.init(is_collective=True, strategy=strategy) @@ -83,24 +86,35 @@ class TestParallelClassCenterSampleOp(unittest.TestCase): for dtype in ('int32', 'int64'): for _ in range(5): - classes_list = np.random.randint(10, 15, (nranks, )) + classes_list = np.random.randint(10, 15, (nranks,)) num_class = np.sum(classes_list) - np_label = np.random.randint(0, - num_class, (batch_size, ), - dtype=dtype) + np_label = np.random.randint( + 0, num_class, (batch_size,), dtype=dtype + ) label = paddle.to_tensor(np_label, dtype=dtype) - np_remapped_label, np_sampled_class_center_per_device = class_center_sample_numpy( - np_label, classes_list, num_samples) - remapped_label, sampled_class_index = paddle.nn.functional.class_center_sample( - label, classes_list[rank_id], num_samples) - np.testing.assert_allclose(remapped_label.numpy(), - np_remapped_label) + ( + np_remapped_label, + np_sampled_class_center_per_device, + ) = class_center_sample_numpy( + np_label, classes_list, num_samples + ) + ( + remapped_label, + sampled_class_index, + ) = paddle.nn.functional.class_center_sample( + label, classes_list[rank_id], num_samples + ) + np.testing.assert_allclose( + remapped_label.numpy(), np_remapped_label + ) np_sampled_class_index = np_sampled_class_center_per_device[ - rank_id] + rank_id + ] np.testing.assert_allclose( - sampled_class_index.numpy()[:len(np_sampled_class_index)], - np_sampled_class_index) + sampled_class_index.numpy()[: len(np_sampled_class_index)], + np_sampled_class_index, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_control_flow_different.py b/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_control_flow_different.py index 24a29f2a71149769b27418b281cf67b53e24f2e4..203396b0fd00829a1a0c859fd90adfa5826d7fce 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_control_flow_different.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_control_flow_different.py @@ -25,14 +25,15 @@ np.random.seed(2021) class SimpleNet(fluid.Layer): - def __init__(self, hidden_size, vocab_size, is_sparse=False): super(SimpleNet, self).__init__() self.hidden_size = hidden_size self.vocab_size = vocab_size - self.embedding = Embedding(size=[self.vocab_size, self.hidden_size], - dtype='float32', - is_sparse=is_sparse) + self.embedding = Embedding( + size=[self.vocab_size, self.hidden_size], + dtype='float32', + is_sparse=is_sparse, + ) self.lin_a = paddle.nn.Linear(self.hidden_size, self.vocab_size) self.lin_b = paddle.nn.Linear(self.vocab_size, 1) @@ -57,10 +58,9 @@ class SimpleNet(fluid.Layer): projection = paddle.reshape(projection, shape=[-1, 1]) output = paddle.gather(projection, emb_mask_inds) target = paddle.gather(label, emb_mask_inds) - loss_box = F.smooth_l1_loss(output, - target, - reduction='sum', - delta=1.0) + loss_box = F.smooth_l1_loss( + output, target, reduction='sum', delta=1.0 + ) loss_box = loss_box / len(conf) return loss_box @@ -72,36 +72,60 @@ batch_num = 2000 hidden_size = 5 vocab_size = 100 -conf_dataset = [[0], [0], [0], [0], [1], [0], [1], [0], [0], [1], [0], [1], [1], - [1], [1], [1], [1], [1], [1], [1], [1], [0], [0], [1]] +conf_dataset = [ + [0], + [0], + [0], + [0], + [1], + [0], + [1], + [0], + [0], + [1], + [0], + [1], + [1], + [1], + [1], + [1], + [1], + [1], + [1], + [1], + [1], + [0], + [0], + [1], +] def fake_sample_reader(): - def __reader__(): for i in range(batch_num): x_data = np.random.randint(0, vocab_size) - y_data = np.random.random_sample((1, )).astype('float32') - conf_data = np.array( - conf_dataset[i % len(conf_dataset)]).astype('int64') + y_data = np.random.random_sample((1,)).astype('float32') + conf_data = np.array(conf_dataset[i % len(conf_dataset)]).astype( + 'int64' + ) yield x_data, y_data, conf_data return __reader__ class TestSimpleNet(TestParallelDyGraphRunnerBase): - def get_model(self): - model = SimpleNet(hidden_size=hidden_size, - vocab_size=vocab_size, - is_sparse=False) + model = SimpleNet( + hidden_size=hidden_size, vocab_size=vocab_size, is_sparse=False + ) - train_reader = paddle.batch(fake_sample_reader(), - batch_size=batch_size, - drop_last=True) + train_reader = paddle.batch( + fake_sample_reader(), batch_size=batch_size, drop_last=True + ) - optimizer = paddle.optimizer.SGD(learning_rate=0.001, - parameters=model.parameters()) + optimizer = paddle.optimizer.SGD( + learning_rate=0.001, parameters=model.parameters() + ) return model, train_reader, optimizer diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_control_flow_same.py b/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_control_flow_same.py index 8b9aaf5294369d877400d2e58685634ea65200f3..f9097fc8461fc92571d7ce9fc2c59888015fe7e8 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_control_flow_same.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_control_flow_same.py @@ -29,15 +29,18 @@ batch_num = 1000 class SimpleNet(fluid.Layer): - def __init__(self): super(SimpleNet, self).__init__() - self.net_a = paddle.nn.Sequential(paddle.nn.Linear(10, 20), - paddle.nn.Linear(20, 20), - paddle.nn.Linear(20, 5)) - self.net_b = paddle.nn.Sequential(paddle.nn.Linear(10, 20), - paddle.nn.Linear(20, 20), - paddle.nn.Linear(20, 5)) + self.net_a = paddle.nn.Sequential( + paddle.nn.Linear(10, 20), + paddle.nn.Linear(20, 20), + paddle.nn.Linear(20, 5), + ) + self.net_b = paddle.nn.Sequential( + paddle.nn.Linear(10, 20), + paddle.nn.Linear(20, 20), + paddle.nn.Linear(20, 5), + ) self.net_unused = Linear(10, 20) self.step = 0 @@ -51,24 +54,23 @@ class SimpleNet(fluid.Layer): def fake_sample_reader(): - def __reader__(): for i in range(batch_num): - x_data = np.random.random_sample((10, )).astype('float32') + x_data = np.random.random_sample((10,)).astype('float32') yield x_data return __reader__ class TestSimpleNet(TestParallelDyGraphRunnerBase): - def get_model(self): model = SimpleNet() - train_reader = paddle.batch(fake_sample_reader(), - batch_size=batch_size, - drop_last=True) - optimizer = paddle.optimizer.SGD(learning_rate=0.001, - parameters=model.parameters()) + train_reader = paddle.batch( + fake_sample_reader(), batch_size=batch_size, drop_last=True + ) + optimizer = paddle.optimizer.SGD( + learning_rate=0.001, parameters=model.parameters() + ) return model, train_reader, optimizer def run_one_loop(self, model, optimizer, batch): diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_no_sync.py b/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_no_sync.py index b00639202a684e2a6b3d3e02a42e24493ba76759..633cdbcb0af3b49c972f19a5741cce39f48b9ad5 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_no_sync.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_no_sync.py @@ -20,7 +20,12 @@ import paddle import paddle.fluid as fluid import paddle.distributed as dist from paddle.fluid.dygraph.nn import Linear -from test_dist_base import print_to_err, print_to_out, runtime_main, TestParallelDyGraphRunnerBase +from test_dist_base import ( + print_to_err, + print_to_out, + runtime_main, + TestParallelDyGraphRunnerBase, +) seed = 90 RUN_STEP = 20 @@ -29,7 +34,6 @@ batch_num = 1000 class SimpleNet(fluid.Layer): - def __init__(self): super(SimpleNet, self).__init__() self.net_a = Linear(input_dim=10, output_dim=20) @@ -44,14 +48,14 @@ class SimpleNet(fluid.Layer): class TestNoSync(TestParallelDyGraphRunnerBase): - def get_model(self): model = SimpleNet() - train_reader = paddle.batch(fake_sample_reader(), - batch_size=batch_size, - drop_last=True) - optimizer = paddle.optimizer.SGD(learning_rate=0.001, - parameters=model.parameters()) + train_reader = paddle.batch( + fake_sample_reader(), batch_size=batch_size, drop_last=True + ) + optimizer = paddle.optimizer.SGD( + learning_rate=0.001, parameters=model.parameters() + ) return model, train_reader, optimizer def run_one_loop(self, model, optimizer, batch): @@ -67,7 +71,7 @@ class TestNoSync(TestParallelDyGraphRunnerBase): device_id = int(os.getenv("FLAGS_selected_gpus", "0")) place = fluid.CUDAPlace(device_id) else: - assert ("Only support CUDAPlace for now.") + assert "Only support CUDAPlace for now." with fluid.dygraph.guard(place): fluid.default_startup_program().random_seed = seed @@ -80,9 +84,11 @@ class TestNoSync(TestParallelDyGraphRunnerBase): dist.init_parallel_env() print_to_err( type(self).__name__, - "begin to prepare context in dygraph with nccl2") + "begin to prepare context in dygraph with nccl2", + ) model = paddle.DataParallel( - model, find_unused_parameters=args.find_unused_parameters) + model, find_unused_parameters=args.find_unused_parameters + ) print_to_err(type(self).__name__, "model built in dygraph") out_losses = self.model_train(args, model, opt, train_reader) print_to_out(out_losses) @@ -109,7 +115,8 @@ class TestNoSync(TestParallelDyGraphRunnerBase): model, train_reader, opt = self.get_model() if args.update_method in ["nccl2", "gloo"]: model = paddle.DataParallel( - model, find_unused_parameters=args.find_unused_parameters) + model, find_unused_parameters=args.find_unused_parameters + ) out_losses = self.model_train(args, model, opt, train_reader) print_to_out(out_losses) @@ -139,10 +146,9 @@ class TestNoSync(TestParallelDyGraphRunnerBase): def fake_sample_reader(): - def __reader__(): for i in range(batch_num): - x_data = np.random.random_sample((10, )).astype('float32') + x_data = np.random.random_sample((10,)).astype('float32') yield x_data return __reader__ diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_no_sync_control_flow.py b/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_no_sync_control_flow.py index afbb4cf01229b233270de1fea95d677b2076cb2b..efe27d461ad6d4be2f341e495c5ad278ac31334b 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_no_sync_control_flow.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_no_sync_control_flow.py @@ -27,7 +27,6 @@ batch_num = 1000 class SimpleNetControlFlow(fluid.Layer): - def __init__(self): super(SimpleNetControlFlow, self).__init__() self.net_a = Linear(input_dim=10, output_dim=20) @@ -46,14 +45,14 @@ class SimpleNetControlFlow(fluid.Layer): class TestNoSyncControlFlow(TestNoSync): - def get_model(self): model = SimpleNetControlFlow() - train_reader = paddle.batch(fake_sample_reader(), - batch_size=batch_size, - drop_last=True) - optimizer = paddle.optimizer.SGD(learning_rate=0.001, - parameters=model.parameters()) + train_reader = paddle.batch( + fake_sample_reader(), batch_size=batch_size, drop_last=True + ) + optimizer = paddle.optimizer.SGD( + learning_rate=0.001, parameters=model.parameters() + ) return model, train_reader, optimizer def run_one_loop(self, model, optimizer, batch): @@ -66,10 +65,9 @@ class TestNoSyncControlFlow(TestNoSync): def fake_sample_reader(): - def __reader__(): for i in range(batch_num): - x_data = np.random.random_sample((10, )).astype('float32') + x_data = np.random.random_sample((10,)).astype('float32') yield x_data return __reader__ diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_no_sync_gradient_check.py b/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_no_sync_gradient_check.py index 51f5a3650ce50c91a9b4c466a7b705f8da85df95..43c36ab491b019d1b25ea5a5d3aaf62525d88e90 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_no_sync_gradient_check.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_no_sync_gradient_check.py @@ -29,17 +29,19 @@ out_dim = 20 class SimpleNet(fluid.Layer): - def __init__(self, train_id): super(SimpleNet, self).__init__() - self.w1 = self.create_parameter(shape=[in_dim, out_dim], - dtype="float32") - self.w2 = self.create_parameter(shape=[in_dim, out_dim], - dtype="float32") + self.w1 = self.create_parameter( + shape=[in_dim, out_dim], dtype="float32" + ) + self.w2 = self.create_parameter( + shape=[in_dim, out_dim], dtype="float32" + ) self.share_net = Linear(out_dim, 1) - self.unused_param = self.create_parameter(shape=[out_dim, in_dim], - dtype="float32") + self.unused_param = self.create_parameter( + shape=[out_dim, in_dim], dtype="float32" + ) # just for test sync_params_buffers self.register_buffer("queue", paddle.randn([10, 5])) @@ -49,9 +51,10 @@ class SimpleNet(fluid.Layer): self.trainer_id = train_id def forward(self, x): - is_use = (paddle.equal_all( - x, paddle.ones(shape=(batch, in_dim))).numpy()[0] - and self.trainer_id == 1) + is_use = ( + paddle.equal_all(x, paddle.ones(shape=(batch, in_dim))).numpy()[0] + and self.trainer_id == 1 + ) if is_use: tmp = paddle.matmul(x, self.w1) @@ -62,7 +65,6 @@ class SimpleNet(fluid.Layer): class TestDistTraning(unittest.TestCase): - def test_multiple_gpus(self): self.trainer_id = dist.get_rank() dist.init_parallel_env() @@ -85,11 +87,13 @@ class TestDistTraning(unittest.TestCase): if step_id % 5 != 0: with model_a.no_sync(): - self.dp_layer(step_id, model_a, model_b, random_input, - ones_input) + self.dp_layer( + step_id, model_a, model_b, random_input, ones_input + ) else: - self.dp_layer(step_id, model_a, model_b, random_input, - ones_input) + self.dp_layer( + step_id, model_a, model_b, random_input, ones_input + ) self.check_gradient(model_a.parameters()) self.check_gradient(model_b.parameters()) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_no_sync_unused_params.py b/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_no_sync_unused_params.py index bd8e598baf165435f565a4bf010233d1db0af357..38804f11b2a87d80cb6ea1307f62179dcb51a609 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_no_sync_unused_params.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_no_sync_unused_params.py @@ -27,7 +27,6 @@ batch_num = 1000 class SimpleNetUnusedParam(fluid.Layer): - def __init__(self): super(SimpleNetUnusedParam, self).__init__() self.net_a = Linear(input_dim=10, output_dim=20) @@ -45,14 +44,14 @@ class SimpleNetUnusedParam(fluid.Layer): class TestNoSyncUnusedParam(TestNoSync): - def get_model(self): model = SimpleNetUnusedParam() - train_reader = paddle.batch(fake_sample_reader(), - batch_size=batch_size, - drop_last=True) - optimizer = paddle.optimizer.SGD(learning_rate=0.001, - parameters=model.parameters()) + train_reader = paddle.batch( + fake_sample_reader(), batch_size=batch_size, drop_last=True + ) + optimizer = paddle.optimizer.SGD( + learning_rate=0.001, parameters=model.parameters() + ) return model, train_reader, optimizer def run_one_loop(self, model, optimizer, batch): @@ -65,10 +64,9 @@ class TestNoSyncUnusedParam(TestNoSync): def fake_sample_reader(): - def __reader__(): for i in range(batch_num): - x_data = np.random.random_sample((10, )).astype('float32') + x_data = np.random.random_sample((10,)).astype('float32') yield x_data return __reader__ diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_se_resnext.py b/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_se_resnext.py index 09878cec229748e7ac4e89a7a3484dfd6fdf4c4b..0a0759f4380a1b703fc8c2b417619b4c78e1fb0d 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_se_resnext.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_se_resnext.py @@ -33,12 +33,12 @@ train_parameters = { "name": "cosine_decay", "batch_size": batch_size, "epochs": [40, 80, 100], - "steps": [0.1, 0.01, 0.001, 0.0001] + "steps": [0.1, 0.01, 0.001, 0.0001], }, "batch_size": batch_size, "lr": 0.0125, "total_images": 6149, - "num_epochs": 200 + "num_epochs": 200, } @@ -56,42 +56,47 @@ def optimizer_setting(params, parameter_list=None): num_epochs = params["num_epochs"] if fluid._non_static_mode(): optimizer = fluid.optimizer.Momentum( - learning_rate=fluid.layers.cosine_decay(learning_rate=lr, - step_each_epoch=step, - epochs=num_epochs), + learning_rate=fluid.layers.cosine_decay( + learning_rate=lr, step_each_epoch=step, epochs=num_epochs + ), momentum=momentum_rate, regularization=fluid.regularizer.L2Decay(l2_decay), - parameter_list=parameter_list) + parameter_list=parameter_list, + ) else: optimizer = fluid.optimizer.Momentum( - learning_rate=fluid.layers.cosine_decay(learning_rate=lr, - step_each_epoch=step, - epochs=num_epochs), + learning_rate=fluid.layers.cosine_decay( + learning_rate=lr, step_each_epoch=step, epochs=num_epochs + ), momentum=momentum_rate, - regularization=fluid.regularizer.L2Decay(l2_decay)) + regularization=fluid.regularizer.L2Decay(l2_decay), + ) return optimizer class ConvBNLayer(fluid.dygraph.Layer): - - def __init__(self, - num_channels, - num_filters, - filter_size, - stride=1, - groups=1, - act=None): + def __init__( + self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + ): super(ConvBNLayer, self).__init__() - self._conv = Conv2D(num_channels=num_channels, - num_filters=num_filters, - filter_size=filter_size, - stride=stride, - padding=(filter_size - 1) // 2, - groups=groups, - act=None, - bias_attr=False) + self._conv = Conv2D( + num_channels=num_channels, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + act=None, + bias_attr=False, + ) # disable BatchNorm in multi-card. disable LayerNorm because of complex input_shape # self._batch_norm = BatchNorm(num_filters, act=act) @@ -104,7 +109,6 @@ class ConvBNLayer(fluid.dygraph.Layer): class SqueezeExcitation(fluid.dygraph.Layer): - def __init__(self, num_channels, reduction_ratio): super(SqueezeExcitation, self).__init__() @@ -115,15 +119,19 @@ class SqueezeExcitation(fluid.dygraph.Layer): num_channels, num_channels // reduction_ratio, param_attr=fluid.ParamAttr( - initializer=fluid.initializer.Uniform(-stdv, stdv)), - act='relu') + initializer=fluid.initializer.Uniform(-stdv, stdv) + ), + act='relu', + ) stdv = 1.0 / math.sqrt(num_channels / 16.0 * 1.0) self._excitation = Linear( num_channels // reduction_ratio, num_channels, param_attr=fluid.ParamAttr( - initializer=fluid.initializer.Uniform(-stdv, stdv)), - act='sigmoid') + initializer=fluid.initializer.Uniform(-stdv, stdv) + ), + act='sigmoid', + ) def forward(self, input): y = self._pool(input) @@ -135,39 +143,49 @@ class SqueezeExcitation(fluid.dygraph.Layer): class BottleneckBlock(fluid.dygraph.Layer): - - def __init__(self, - num_channels, - num_filters, - stride, - cardinality, - reduction_ratio, - shortcut=True): + def __init__( + self, + num_channels, + num_filters, + stride, + cardinality, + reduction_ratio, + shortcut=True, + ): super(BottleneckBlock, self).__init__() - self.conv0 = ConvBNLayer(num_channels=num_channels, - num_filters=num_filters, - filter_size=1, - act="relu") - self.conv1 = ConvBNLayer(num_channels=num_filters, - num_filters=num_filters, - filter_size=3, - stride=stride, - groups=cardinality, - act="relu") - self.conv2 = ConvBNLayer(num_channels=num_filters, - num_filters=num_filters * 2, - filter_size=1, - act=None) - - self.scale = SqueezeExcitation(num_channels=num_filters * 2, - reduction_ratio=reduction_ratio) + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + act="relu", + ) + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + stride=stride, + groups=cardinality, + act="relu", + ) + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters * 2, + filter_size=1, + act=None, + ) + + self.scale = SqueezeExcitation( + num_channels=num_filters * 2, reduction_ratio=reduction_ratio + ) if not shortcut: - self.short = ConvBNLayer(num_channels=num_channels, - num_filters=num_filters * 2, - filter_size=1, - stride=stride) + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters * 2, + filter_size=1, + stride=stride, + ) self.shortcut = shortcut @@ -189,67 +207,76 @@ class BottleneckBlock(fluid.dygraph.Layer): class SeResNeXt(fluid.dygraph.Layer): - def __init__(self, layers=50, class_dim=102): super(SeResNeXt, self).__init__() self.layers = layers supported_layers = [50, 101, 152] - assert layers in supported_layers, \ - "supported layers are {} but input layer is {}".format(supported_layers, layers) + assert ( + layers in supported_layers + ), "supported layers are {} but input layer is {}".format( + supported_layers, layers + ) if layers == 50: cardinality = 32 reduction_ratio = 16 depth = [3, 4, 6, 3] num_filters = [128, 256, 512, 1024] - self.conv0 = ConvBNLayer(num_channels=3, - num_filters=64, - filter_size=7, - stride=2, - act='relu') - self.pool = Pool2D(pool_size=3, - pool_stride=2, - pool_padding=1, - pool_type='max') + self.conv0 = ConvBNLayer( + num_channels=3, + num_filters=64, + filter_size=7, + stride=2, + act='relu', + ) + self.pool = Pool2D( + pool_size=3, pool_stride=2, pool_padding=1, pool_type='max' + ) elif layers == 101: cardinality = 32 reduction_ratio = 16 depth = [3, 4, 23, 3] num_filters = [128, 256, 512, 1024] - self.conv0 = ConvBNLayer(num_channels=3, - num_filters=64, - filter_size=7, - stride=2, - act='relu') - self.pool = Pool2D(pool_size=3, - pool_stride=2, - pool_padding=1, - pool_type='max') + self.conv0 = ConvBNLayer( + num_channels=3, + num_filters=64, + filter_size=7, + stride=2, + act='relu', + ) + self.pool = Pool2D( + pool_size=3, pool_stride=2, pool_padding=1, pool_type='max' + ) elif layers == 152: cardinality = 64 reduction_ratio = 16 depth = [3, 8, 36, 3] num_filters = [128, 256, 512, 1024] - self.conv0 = ConvBNLayer(num_channels=3, - num_filters=64, - filter_size=3, - stride=2, - act='relu') - self.conv1 = ConvBNLayer(num_channels=64, - num_filters=64, - filter_size=3, - stride=1, - act='relu') - self.conv2 = ConvBNLayer(num_channels=64, - num_filters=128, - filter_size=3, - stride=1, - act='relu') - self.pool = Pool2D(pool_size=3, - pool_stride=2, - pool_padding=1, - pool_type='max') + self.conv0 = ConvBNLayer( + num_channels=3, + num_filters=64, + filter_size=3, + stride=2, + act='relu', + ) + self.conv1 = ConvBNLayer( + num_channels=64, + num_filters=64, + filter_size=3, + stride=1, + act='relu', + ) + self.conv2 = ConvBNLayer( + num_channels=64, + num_filters=128, + filter_size=3, + stride=1, + act='relu', + ) + self.pool = Pool2D( + pool_size=3, pool_stride=2, pool_padding=1, pool_type='max' + ) self.bottleneck_block_list = [] num_channels = 64 @@ -258,19 +285,22 @@ class SeResNeXt(fluid.dygraph.Layer): for i in range(depth[block]): bottleneck_block = self.add_sublayer( 'bb_%d_%d' % (block, i), - BottleneckBlock(num_channels=num_channels, - num_filters=num_filters[block], - stride=2 if i == 0 and block != 0 else 1, - cardinality=cardinality, - reduction_ratio=reduction_ratio, - shortcut=shortcut)) + BottleneckBlock( + num_channels=num_channels, + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + cardinality=cardinality, + reduction_ratio=reduction_ratio, + shortcut=shortcut, + ), + ) num_channels = bottleneck_block._num_channels_out self.bottleneck_block_list.append(bottleneck_block) shortcut = True - self.pool2d_avg = Pool2D(pool_size=7, - pool_type='avg', - global_pooling=True) + self.pool2d_avg = Pool2D( + pool_size=7, pool_type='avg', global_pooling=True + ) stdv = 1.0 / math.sqrt(2048 * 1.0) self.pool2d_avg_output = num_filters[len(num_filters) - 1] * 2 * 1 * 1 @@ -279,7 +309,9 @@ class SeResNeXt(fluid.dygraph.Layer): self.pool2d_avg_output, class_dim, param_attr=fluid.param_attr.ParamAttr( - initializer=fluid.initializer.Uniform(-stdv, stdv))) + initializer=fluid.initializer.Uniform(-stdv, stdv) + ), + ) def forward(self, inputs): if self.layers == 50 or self.layers == 101: @@ -300,20 +332,23 @@ class SeResNeXt(fluid.dygraph.Layer): class TestSeResNeXt(TestParallelDyGraphRunnerBase): - def get_model(self): model = SeResNeXt() - train_reader = paddle.batch(paddle.dataset.flowers.test(use_xmap=False), - batch_size=train_parameters["batch_size"], - drop_last=True) - optimizer = optimizer_setting(train_parameters, - parameter_list=model.parameters()) + train_reader = paddle.batch( + paddle.dataset.flowers.test(use_xmap=False), + batch_size=train_parameters["batch_size"], + drop_last=True, + ) + optimizer = optimizer_setting( + train_parameters, parameter_list=model.parameters() + ) return model, train_reader, optimizer def run_one_loop(self, model, opt, data): bs = len(data) - dy_x_data = np.array([x[0].reshape(3, 224, 224) - for x in data]).astype('float32') + dy_x_data = np.array([x[0].reshape(3, 224, 224) for x in data]).astype( + 'float32' + ) dy_x_data = dy_x_data / 255.0 y_data = np.array([x[1] for x in data]).astype('int64').reshape(bs, 1) img = to_variable(dy_x_data) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_sync_batch_norm.py b/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_sync_batch_norm.py index 6d1f100252ca445f8da07b24e013bac56827df54..475eb1fa6d1d321b487a77a6e6a4e7b807cb23b1 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_sync_batch_norm.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_sync_batch_norm.py @@ -23,37 +23,42 @@ from test_dist_base import runtime_main, TestParallelDyGraphRunnerBase class TestLayer(fluid.dygraph.Layer): - - def __init__(self, - num_channels, - num_filters, - filter_size, - stride=1, - groups=1, - act=None): + def __init__( + self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + ): super(TestLayer, self).__init__() - self._conv = Conv2D(in_channels=num_channels, - out_channels=num_filters, - kernel_size=filter_size, - stride=stride, - padding=(filter_size - 1) // 2, - groups=groups, - bias_attr=False) + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + bias_attr=False, + ) self._sync_batch_norm = SyncBatchNorm(num_filters) - self._conv2 = Conv2D(in_channels=num_filters, - out_channels=num_filters, - kernel_size=filter_size, - stride=stride, - padding=(filter_size - 1) // 2, - groups=groups, - bias_attr=False) + self._conv2 = Conv2D( + in_channels=num_filters, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + bias_attr=False, + ) - self._sync_batch_norm2 = SyncBatchNorm(num_filters, - weight_attr=False, - bias_attr=False) + self._sync_batch_norm2 = SyncBatchNorm( + num_filters, weight_attr=False, bias_attr=False + ) def forward(self, inputs): y = self._conv(inputs) @@ -65,20 +70,23 @@ class TestLayer(fluid.dygraph.Layer): class TestSyncBatchNorm(TestParallelDyGraphRunnerBase): - def get_model(self): model = TestLayer(3, 64, 7) - train_reader = paddle.batch(paddle.dataset.flowers.test(use_xmap=False), - batch_size=32, - drop_last=True) - opt = fluid.optimizer.Adam(learning_rate=1e-3, - parameter_list=model.parameters()) + train_reader = paddle.batch( + paddle.dataset.flowers.test(use_xmap=False), + batch_size=32, + drop_last=True, + ) + opt = fluid.optimizer.Adam( + learning_rate=1e-3, parameter_list=model.parameters() + ) return model, train_reader, opt def run_one_loop(self, model, opt, data): batch_size = len(data) - dy_x_data = np.array([x[0].reshape(3, 224, 224) - for x in data]).astype('float32') + dy_x_data = np.array([x[0].reshape(3, 224, 224) for x in data]).astype( + 'float32' + ) img = to_variable(dy_x_data) img.stop_gradient = False diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_transformer.py b/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_transformer.py index afe7799632d3a7a5dbf1701a2c2551c1d796649d..97805222f801162e4b2e2d08ae688855d84273ee 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_transformer.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_transformer.py @@ -16,10 +16,17 @@ import numpy as np import paddle import paddle.fluid as fluid -from paddle.fluid.dygraph import Embedding, LayerNorm, Linear, to_variable, Layer +from paddle.fluid.dygraph import ( + Embedding, + LayerNorm, + Linear, + to_variable, + Layer, +) from paddle.optimizer.lr import NoamDecay from test_dist_base import runtime_main, TestParallelDyGraphRunnerBase + """ Note(chenweihang): To compare loss of single-card and multi-card in our dist test framework, two parameters need to be adjusted: @@ -33,6 +40,7 @@ class TrainTaskConfig(object): """ TrainTaskConfig """ + # the epoch number to train. pass_num = 20 # the number of sequences contained in a mini-batch. @@ -117,12 +125,17 @@ input_descs = { # encoder. # The actual data shape of src_slf_attn_bias is: # [batch_size, n_head, max_src_len_in_batch, max_src_len_in_batch] - "src_slf_attn_bias": - [(batch_size, ModelHyperParams.n_head, seq_len, seq_len), "float32"], + "src_slf_attn_bias": [ + (batch_size, ModelHyperParams.n_head, seq_len, seq_len), + "float32", + ], # The actual data shape of trg_word is: # [batch_size, max_trg_len_in_batch, 1] - "trg_word": [(batch_size, seq_len, 1), "int64", - 2], # lod_level is only used in fast decoder. + "trg_word": [ + (batch_size, seq_len, 1), + "int64", + 2, + ], # lod_level is only used in fast decoder. # The actual data shape of trg_pos is: # [batch_size, max_trg_len_in_batch, 1] "trg_pos": [(batch_size, seq_len, 1), "int64"], @@ -130,14 +143,18 @@ input_descs = { # subsequent words in the decoder. # The actual data shape of trg_slf_attn_bias is: # [batch_size, n_head, max_trg_len_in_batch, max_trg_len_in_batch] - "trg_slf_attn_bias": - [(batch_size, ModelHyperParams.n_head, seq_len, seq_len), "float32"], + "trg_slf_attn_bias": [ + (batch_size, ModelHyperParams.n_head, seq_len, seq_len), + "float32", + ], # This input is used to remove attention weights on paddings of the source # input in the encoder-decoder attention. # The actual data shape of trg_src_attn_bias is: # [batch_size, n_head, max_trg_len_in_batch, max_src_len_in_batch] - "trg_src_attn_bias": - [(batch_size, ModelHyperParams.n_head, seq_len, seq_len), "float32"], + "trg_src_attn_bias": [ + (batch_size, ModelHyperParams.n_head, seq_len, seq_len), + "float32", + ], # This input is used in independent decoder program for inference. # The actual data shape of enc_output is: # [batch_size, max_src_len_in_batch, d_model] @@ -153,7 +170,7 @@ input_descs = { "init_score": [(batch_size, 1), "float32", 2], # This input is used in beam-search decoder for the first gather # (cell states updation) - "init_idx": [(batch_size, ), "int32"], + "init_idx": [(batch_size,), "int32"], } # Names of word embedding table which might be reused for weight sharing. @@ -200,26 +217,30 @@ def position_encoding_init(n_position, d_pos_vec): channels = d_pos_vec position = np.arange(n_position) num_timescales = channels // 2 - log_timescale_increment = (np.log(float(1e4) / float(1)) / - (num_timescales - 1)) - inv_timescales = np.exp( - np.arange(num_timescales)) * -log_timescale_increment + log_timescale_increment = np.log(float(1e4) / float(1)) / ( + num_timescales - 1 + ) + inv_timescales = ( + np.exp(np.arange(num_timescales)) * -log_timescale_increment + ) scaled_time = np.expand_dims(position, 1) * np.expand_dims( - inv_timescales, 0) + inv_timescales, 0 + ) signal = np.concatenate([np.sin(scaled_time), np.cos(scaled_time)], axis=1) signal = np.pad(signal, [[0, 0], [0, np.mod(channels, 2)]], 'constant') position_enc = signal return position_enc.astype("float32") -pos_inp1 = position_encoding_init(ModelHyperParams.max_length, - ModelHyperParams.d_model) -pos_inp2 = position_encoding_init(ModelHyperParams.max_length, - ModelHyperParams.d_model) +pos_inp1 = position_encoding_init( + ModelHyperParams.max_length, ModelHyperParams.d_model +) +pos_inp2 = position_encoding_init( + ModelHyperParams.max_length, ModelHyperParams.d_model +) class PrePostProcessLayer(Layer): - def __init__(self, d_model, process_cmd, shape_len=None): super(PrePostProcessLayer, self).__init__() for cmd in process_cmd: @@ -227,11 +248,14 @@ class PrePostProcessLayer(Layer): self._layer_norm = LayerNorm( normalized_shape=d_model, param_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(1.)), + initializer=fluid.initializer.Constant(1.0) + ), bias_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(0.))) + initializer=fluid.initializer.Constant(0.0) + ), + ) - def forward(self, prev_out, out, process_cmd, dropout_rate=0.): + def forward(self, prev_out, out, process_cmd, dropout_rate=0.0): for cmd in process_cmd: if cmd == "a": # add residual connection out = out + prev_out if prev_out is not None else out @@ -243,12 +267,12 @@ class PrePostProcessLayer(Layer): out, dropout_prob=dropout_rate, seed=ModelHyperParams.dropout_seed, - is_test=False) + is_test=False, + ) return out class PositionwiseFeedForwardLayer(Layer): - def __init__(self, d_inner_hid, d_hid, dropout_rate): super(PositionwiseFeedForwardLayer, self).__init__() self._i2h = Linear(d_hid, d_inner_hid, act="relu") @@ -258,25 +282,28 @@ class PositionwiseFeedForwardLayer(Layer): def forward(self, x): hidden = self._i2h(x) if self._dropout_rate: - hidden = fluid.layers.dropout(hidden, - dropout_prob=self._dropout_rate, - seed=ModelHyperParams.dropout_seed, - is_test=False) + hidden = fluid.layers.dropout( + hidden, + dropout_prob=self._dropout_rate, + seed=ModelHyperParams.dropout_seed, + is_test=False, + ) out = self._h2o(hidden) return out class MultiHeadAttentionLayer(Layer): - - def __init__(self, - d_key, - d_value, - d_model, - n_head=1, - dropout_rate=0., - cache=None, - gather_idx=None, - static_kv=False): + def __init__( + self, + d_key, + d_value, + d_model, + n_head=1, + dropout_rate=0.0, + cache=None, + gather_idx=None, + static_kv=False, + ): super(MultiHeadAttentionLayer, self).__init__() self._n_head = n_head self._d_key = d_key @@ -299,20 +326,25 @@ class MultiHeadAttentionLayer(Layer): # split head reshaped_q = fluid.layers.reshape( - x=q, shape=[0, 0, self._n_head, self._d_key], inplace=False) + x=q, shape=[0, 0, self._n_head, self._d_key], inplace=False + ) transpose_q = fluid.layers.transpose(x=reshaped_q, perm=[0, 2, 1, 3]) reshaped_k = fluid.layers.reshape( - x=k, shape=[0, 0, self._n_head, self._d_key], inplace=False) + x=k, shape=[0, 0, self._n_head, self._d_key], inplace=False + ) transpose_k = fluid.layers.transpose(x=reshaped_k, perm=[0, 2, 1, 3]) reshaped_v = fluid.layers.reshape( - x=v, shape=[0, 0, self._n_head, self._d_value], inplace=False) + x=v, shape=[0, 0, self._n_head, self._d_value], inplace=False + ) transpose_v = fluid.layers.transpose(x=reshaped_v, perm=[0, 2, 1, 3]) # scale dot product attention - product = fluid.layers.matmul(x=transpose_q, - y=transpose_k, - transpose_y=True, - alpha=self._d_model**-0.5) + product = fluid.layers.matmul( + x=transpose_q, + y=transpose_k, + transpose_y=True, + alpha=self._d_model**-0.5, + ) if attn_bias is not None: product += attn_bias weights = fluid.layers.softmax(product) @@ -321,7 +353,8 @@ class MultiHeadAttentionLayer(Layer): weights, dropout_prob=self._dropout_rate, seed=ModelHyperParams.dropout_seed, - is_test=False) + is_test=False, + ) out = fluid.layers.matmul(weights_droped, transpose_v) else: out = fluid.layers.matmul(weights, transpose_v) @@ -333,7 +366,8 @@ class MultiHeadAttentionLayer(Layer): final_out = fluid.layers.reshape( x=trans_x, shape=[0, 0, trans_x.shape[2] * trans_x.shape[3]], - inplace=False) + inplace=False, + ) # fc to output proj_out = self._proj_fc(final_out) @@ -341,118 +375,150 @@ class MultiHeadAttentionLayer(Layer): class EncoderSubLayer(Layer): - - def __init__(self, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - prepostprocess_dropout, - attention_dropout, - relu_dropout, - preprocess_cmd="n", - postprocess_cmd="da"): + def __init__( + self, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd="n", + postprocess_cmd="da", + ): super(EncoderSubLayer, self).__init__() self._preprocess_cmd = preprocess_cmd self._postprocess_cmd = postprocess_cmd self._prepostprocess_dropout = prepostprocess_dropout - self._preprocess_layer = PrePostProcessLayer(d_model, - self._preprocess_cmd, 3) + self._preprocess_layer = PrePostProcessLayer( + d_model, self._preprocess_cmd, 3 + ) self._multihead_attention_layer = MultiHeadAttentionLayer( - d_key, d_value, d_model, n_head, attention_dropout) - self._postprocess_layer = PrePostProcessLayer(d_model, - self._postprocess_cmd, - None) - self._preprocess_layer2 = PrePostProcessLayer(d_model, - self._preprocess_cmd, 3) + d_key, d_value, d_model, n_head, attention_dropout + ) + self._postprocess_layer = PrePostProcessLayer( + d_model, self._postprocess_cmd, None + ) + self._preprocess_layer2 = PrePostProcessLayer( + d_model, self._preprocess_cmd, 3 + ) self._positionwise_feed_forward = PositionwiseFeedForwardLayer( - d_inner_hid, d_model, relu_dropout) - self._postprocess_layer2 = PrePostProcessLayer(d_model, - self._postprocess_cmd, - None) + d_inner_hid, d_model, relu_dropout + ) + self._postprocess_layer2 = PrePostProcessLayer( + d_model, self._postprocess_cmd, None + ) def forward(self, enc_input, attn_bias): pre_process_multihead = self._preprocess_layer( - None, enc_input, self._preprocess_cmd, self._prepostprocess_dropout) - attn_output = self._multihead_attention_layer(pre_process_multihead, - None, None, attn_bias) - attn_output = self._postprocess_layer(enc_input, attn_output, - self._postprocess_cmd, - self._prepostprocess_dropout) + None, enc_input, self._preprocess_cmd, self._prepostprocess_dropout + ) + attn_output = self._multihead_attention_layer( + pre_process_multihead, None, None, attn_bias + ) + attn_output = self._postprocess_layer( + enc_input, + attn_output, + self._postprocess_cmd, + self._prepostprocess_dropout, + ) pre_process2_output = self._preprocess_layer2( - None, attn_output, self._preprocess_cmd, - self._prepostprocess_dropout) + None, + attn_output, + self._preprocess_cmd, + self._prepostprocess_dropout, + ) ffd_output = self._positionwise_feed_forward(pre_process2_output) - return self._postprocess_layer2(attn_output, ffd_output, - self._postprocess_cmd, - self._prepostprocess_dropout) + return self._postprocess_layer2( + attn_output, + ffd_output, + self._postprocess_cmd, + self._prepostprocess_dropout, + ) class EncoderLayer(Layer): - - def __init__(self, - n_layer, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - prepostprocess_dropout, - attention_dropout, - relu_dropout, - preprocess_cmd="n", - postprocess_cmd="da"): + def __init__( + self, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd="n", + postprocess_cmd="da", + ): super(EncoderLayer, self).__init__() self._preprocess_cmd = preprocess_cmd self._encoder_sublayers = list() self._prepostprocess_dropout = prepostprocess_dropout self._n_layer = n_layer - self._preprocess_layer = PrePostProcessLayer(d_model, - self._preprocess_cmd, 3) + self._preprocess_layer = PrePostProcessLayer( + d_model, self._preprocess_cmd, 3 + ) for i in range(n_layer): self._encoder_sublayers.append( self.add_sublayer( 'esl_%d' % i, - EncoderSubLayer(n_head, d_key, d_value, d_model, - d_inner_hid, prepostprocess_dropout, - attention_dropout, relu_dropout, - preprocess_cmd, postprocess_cmd))) + EncoderSubLayer( + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + ), + ) + ) def forward(self, enc_input, attn_bias): for i in range(self._n_layer): enc_output = self._encoder_sublayers[i](enc_input, attn_bias) enc_input = enc_output - return self._preprocess_layer(None, enc_output, self._preprocess_cmd, - self._prepostprocess_dropout) + return self._preprocess_layer( + None, enc_output, self._preprocess_cmd, self._prepostprocess_dropout + ) class PrepareEncoderDecoderLayer(Layer): - - def __init__(self, - src_vocab_size, - src_emb_dim, - src_max_len, - dropout_rate, - is_sparse=False, - word_emb_param_name=None, - pos_enc_param_name=None): + def __init__( + self, + src_vocab_size, + src_emb_dim, + src_max_len, + dropout_rate, + is_sparse=False, + word_emb_param_name=None, + pos_enc_param_name=None, + ): super(PrepareEncoderDecoderLayer, self).__init__() self._src_max_len = src_max_len self._src_emb_dim = src_emb_dim self._src_vocab_size = src_vocab_size self._dropout_rate = dropout_rate - self._input_emb = Embedding(size=[src_vocab_size, src_emb_dim], - is_sparse=is_sparse, - padding_idx=0, - param_attr=fluid.ParamAttr( - name=word_emb_param_name, - initializer=fluid.initializer.Normal( - 0., src_emb_dim**-0.5))) + self._input_emb = Embedding( + size=[src_vocab_size, src_emb_dim], + is_sparse=is_sparse, + padding_idx=0, + param_attr=fluid.ParamAttr( + name=word_emb_param_name, + initializer=fluid.initializer.Normal(0.0, src_emb_dim**-0.5), + ), + ) if pos_enc_param_name is pos_enc_param_names[0]: pos_inp = pos_inp1 @@ -464,41 +530,50 @@ class PrepareEncoderDecoderLayer(Layer): param_attr=fluid.ParamAttr( name=pos_enc_param_name, initializer=fluid.initializer.NumpyArrayInitializer(pos_inp), - trainable=False)) + trainable=False, + ), + ) def forward(self, src_word, src_pos): src_word_emb = self._input_emb(src_word) - src_word_emb = fluid.layers.scale(x=src_word_emb, - scale=self._src_emb_dim**0.5) + src_word_emb = fluid.layers.scale( + x=src_word_emb, scale=self._src_emb_dim**0.5 + ) # # TODO change this to fit dynamic length input src_pos_emb = self._pos_emb(src_pos) src_pos_emb.stop_gradient = True enc_input = src_word_emb + src_pos_emb - return fluid.layers.dropout( - enc_input, - dropout_prob=self._dropout_rate, - seed=ModelHyperParams.dropout_seed, - is_test=False) if self._dropout_rate else enc_input + return ( + fluid.layers.dropout( + enc_input, + dropout_prob=self._dropout_rate, + seed=ModelHyperParams.dropout_seed, + is_test=False, + ) + if self._dropout_rate + else enc_input + ) class WrapEncoderLayer(Layer): - - def __init__(self, - src_vocab_size, - max_length, - n_layer, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - prepostprocess_dropout, - attention_dropout, - relu_dropout, - preprocess_cmd, - postprocess_cmd, - weight_sharing, - is_sparse=False): + def __init__( + self, + src_vocab_size, + max_length, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + weight_sharing, + is_sparse=False, + ): """ The wrapper assembles together all needed layers for the encoder. """ @@ -511,11 +586,21 @@ class WrapEncoderLayer(Layer): prepostprocess_dropout, is_sparse=is_sparse, word_emb_param_name=word_emb_param_names[0], - pos_enc_param_name=pos_enc_param_names[0]) - self._encoder = EncoderLayer(n_layer, n_head, d_key, d_value, d_model, - d_inner_hid, prepostprocess_dropout, - attention_dropout, relu_dropout, - preprocess_cmd, postprocess_cmd) + pos_enc_param_name=pos_enc_param_names[0], + ) + self._encoder = EncoderLayer( + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + ) def forward(self, enc_inputs): src_word, src_pos, src_slf_attn_bias = enc_inputs @@ -525,26 +610,28 @@ class WrapEncoderLayer(Layer): class DecoderSubLayer(Layer): - - def __init__(self, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - prepostprocess_dropout, - attention_dropout, - relu_dropout, - preprocess_cmd, - postprocess_cmd, - cache=None, - gather_idx=None): + def __init__( + self, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + cache=None, + gather_idx=None, + ): super(DecoderSubLayer, self).__init__() self._postprocess_cmd = postprocess_cmd self._preprocess_cmd = preprocess_cmd self._prepostprcess_dropout = prepostprocess_dropout - self._pre_process_layer = PrePostProcessLayer(d_model, preprocess_cmd, - 3) + self._pre_process_layer = PrePostProcessLayer( + d_model, preprocess_cmd, 3 + ) self._multihead_attention_layer = MultiHeadAttentionLayer( d_key, d_value, @@ -552,11 +639,14 @@ class DecoderSubLayer(Layer): n_head, attention_dropout, cache=cache, - gather_idx=gather_idx) - self._post_process_layer = PrePostProcessLayer(d_model, postprocess_cmd, - None) - self._pre_process_layer2 = PrePostProcessLayer(d_model, preprocess_cmd, - 3) + gather_idx=gather_idx, + ) + self._post_process_layer = PrePostProcessLayer( + d_model, postprocess_cmd, None + ) + self._pre_process_layer2 = PrePostProcessLayer( + d_model, preprocess_cmd, 3 + ) self._multihead_attention_layer2 = MultiHeadAttentionLayer( d_key, d_value, @@ -565,63 +655,86 @@ class DecoderSubLayer(Layer): attention_dropout, cache=cache, gather_idx=gather_idx, - static_kv=True) - self._post_process_layer2 = PrePostProcessLayer(d_model, - postprocess_cmd, None) - self._pre_process_layer3 = PrePostProcessLayer(d_model, preprocess_cmd, - 3) + static_kv=True, + ) + self._post_process_layer2 = PrePostProcessLayer( + d_model, postprocess_cmd, None + ) + self._pre_process_layer3 = PrePostProcessLayer( + d_model, preprocess_cmd, 3 + ) self._positionwise_feed_forward_layer = PositionwiseFeedForwardLayer( - d_inner_hid, d_model, relu_dropout) - self._post_process_layer3 = PrePostProcessLayer(d_model, - postprocess_cmd, None) + d_inner_hid, d_model, relu_dropout + ) + self._post_process_layer3 = PrePostProcessLayer( + d_model, postprocess_cmd, None + ) def forward(self, dec_input, enc_output, slf_attn_bias, dec_enc_attn_bias): - pre_process_rlt = self._pre_process_layer(None, dec_input, - self._preprocess_cmd, - self._prepostprcess_dropout) + pre_process_rlt = self._pre_process_layer( + None, dec_input, self._preprocess_cmd, self._prepostprcess_dropout + ) slf_attn_output = self._multihead_attention_layer( - pre_process_rlt, None, None, slf_attn_bias) + pre_process_rlt, None, None, slf_attn_bias + ) slf_attn_output_pp = self._post_process_layer( - dec_input, slf_attn_output, self._postprocess_cmd, - self._prepostprcess_dropout) - pre_process_rlt2 = self._pre_process_layer2(None, slf_attn_output_pp, - self._preprocess_cmd, - self._prepostprcess_dropout) + dec_input, + slf_attn_output, + self._postprocess_cmd, + self._prepostprcess_dropout, + ) + pre_process_rlt2 = self._pre_process_layer2( + None, + slf_attn_output_pp, + self._preprocess_cmd, + self._prepostprcess_dropout, + ) enc_attn_output_pp = self._multihead_attention_layer2( - pre_process_rlt2, enc_output, enc_output, dec_enc_attn_bias) - enc_attn_output = self._post_process_layer2(slf_attn_output_pp, - enc_attn_output_pp, - self._postprocess_cmd, - self._prepostprcess_dropout) - pre_process_rlt3 = self._pre_process_layer3(None, enc_attn_output, - self._preprocess_cmd, - self._prepostprcess_dropout) + pre_process_rlt2, enc_output, enc_output, dec_enc_attn_bias + ) + enc_attn_output = self._post_process_layer2( + slf_attn_output_pp, + enc_attn_output_pp, + self._postprocess_cmd, + self._prepostprcess_dropout, + ) + pre_process_rlt3 = self._pre_process_layer3( + None, + enc_attn_output, + self._preprocess_cmd, + self._prepostprcess_dropout, + ) ffd_output = self._positionwise_feed_forward_layer(pre_process_rlt3) - dec_output = self._post_process_layer3(enc_attn_output, ffd_output, - self._postprocess_cmd, - self._prepostprcess_dropout) + dec_output = self._post_process_layer3( + enc_attn_output, + ffd_output, + self._postprocess_cmd, + self._prepostprcess_dropout, + ) return dec_output class DecoderLayer(Layer): - - def __init__(self, - n_layer, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - prepostprocess_dropout, - attention_dropout, - relu_dropout, - preprocess_cmd, - postprocess_cmd, - caches=None, - gather_idx=None): + def __init__( + self, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + caches=None, + gather_idx=None, + ): super(DecoderLayer, self).__init__() - self._pre_process_layer = PrePostProcessLayer(d_model, preprocess_cmd, - 3) + self._pre_process_layer = PrePostProcessLayer( + d_model, preprocess_cmd, 3 + ) self._decoder_sub_layers = list() self._n_layer = n_layer self._preprocess_cmd = preprocess_cmd @@ -630,53 +743,62 @@ class DecoderLayer(Layer): self._decoder_sub_layers.append( self.add_sublayer( 'dsl_%d' % i, - DecoderSubLayer(n_head, - d_key, - d_value, - d_model, - d_inner_hid, - prepostprocess_dropout, - attention_dropout, - relu_dropout, - preprocess_cmd, - postprocess_cmd, - cache=None if caches is None else caches[i], - gather_idx=gather_idx))) - - def forward(self, dec_input, enc_output, dec_slf_attn_bias, - dec_enc_attn_bias): + DecoderSubLayer( + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + cache=None if caches is None else caches[i], + gather_idx=gather_idx, + ), + ) + ) + + def forward( + self, dec_input, enc_output, dec_slf_attn_bias, dec_enc_attn_bias + ): for i in range(self._n_layer): - tmp_dec_output = self._decoder_sub_layers[i](dec_input, enc_output, - dec_slf_attn_bias, - dec_enc_attn_bias) + tmp_dec_output = self._decoder_sub_layers[i]( + dec_input, enc_output, dec_slf_attn_bias, dec_enc_attn_bias + ) dec_input = tmp_dec_output - dec_output = self._pre_process_layer(None, tmp_dec_output, - self._preprocess_cmd, - self._prepostprocess_dropout) + dec_output = self._pre_process_layer( + None, + tmp_dec_output, + self._preprocess_cmd, + self._prepostprocess_dropout, + ) return dec_output class WrapDecoderLayer(Layer): - - def __init__(self, - trg_vocab_size, - max_length, - n_layer, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - prepostprocess_dropout, - attention_dropout, - relu_dropout, - preprocess_cmd, - postprocess_cmd, - weight_sharing, - caches=None, - gather_idx=None, - is_sparse=False): + def __init__( + self, + trg_vocab_size, + max_length, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + weight_sharing, + caches=None, + gather_idx=None, + is_sparse=False, + ): """ The wrapper assembles together all needed layers for the encoder. """ @@ -689,20 +811,23 @@ class WrapDecoderLayer(Layer): prepostprocess_dropout, is_sparse=is_sparse, word_emb_param_name=word_emb_param_names[1], - pos_enc_param_name=pos_enc_param_names[1]) - self._decoder_layer = DecoderLayer(n_layer, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - prepostprocess_dropout, - attention_dropout, - relu_dropout, - preprocess_cmd, - postprocess_cmd, - caches=caches, - gather_idx=gather_idx) + pos_enc_param_name=pos_enc_param_names[1], + ) + self._decoder_layer = DecoderLayer( + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + caches=caches, + gather_idx=gather_idx, + ) self._weight_sharing = weight_sharing if not weight_sharing: self._fc = Linear(d_model, trg_vocab_size, bias_attr=False) @@ -710,17 +835,20 @@ class WrapDecoderLayer(Layer): def forward(self, dec_inputs=None, enc_output=None): trg_word, trg_pos, trg_slf_attn_bias, trg_src_attn_bias = dec_inputs dec_input = self._prepare_decoder_layer(trg_word, trg_pos) - dec_output = self._decoder_layer(dec_input, enc_output, - trg_slf_attn_bias, trg_src_attn_bias) + dec_output = self._decoder_layer( + dec_input, enc_output, trg_slf_attn_bias, trg_src_attn_bias + ) dec_output_reshape = fluid.layers.reshape( - dec_output, shape=[-1, dec_output.shape[-1]], inplace=False) + dec_output, shape=[-1, dec_output.shape[-1]], inplace=False + ) if self._weight_sharing: predict = fluid.layers.matmul( x=dec_output_reshape, y=self._prepare_decoder_layer._input_emb.weight, - transpose_y=True) + transpose_y=True, + ) else: predict = self._fc(dec_output_reshape) @@ -732,81 +860,91 @@ class WrapDecoderLayer(Layer): class TransFormer(Layer): - - def __init__(self, - src_vocab_size, - trg_vocab_size, - max_length, - n_layer, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - prepostprocess_dropout, - attention_dropout, - relu_dropout, - preprocess_cmd, - postprocess_cmd, - weight_sharing, - label_smooth_eps, - use_py_reader=False, - is_test=False, - is_sparse=False): + def __init__( + self, + src_vocab_size, + trg_vocab_size, + max_length, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + weight_sharing, + label_smooth_eps, + use_py_reader=False, + is_test=False, + is_sparse=False, + ): super(TransFormer, self).__init__() self._label_smooth_eps = label_smooth_eps self._trg_vocab_size = trg_vocab_size if weight_sharing: - assert src_vocab_size == trg_vocab_size, ( - "Vocabularies in source and target should be same for weight sharing." - ) - self._wrap_encoder_layer = WrapEncoderLayer(src_vocab_size, - max_length, - n_layer, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - prepostprocess_dropout, - attention_dropout, - relu_dropout, - preprocess_cmd, - postprocess_cmd, - weight_sharing, - is_sparse=is_sparse) - self._wrap_decoder_layer = WrapDecoderLayer(trg_vocab_size, - max_length, - n_layer, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - prepostprocess_dropout, - attention_dropout, - relu_dropout, - preprocess_cmd, - postprocess_cmd, - weight_sharing, - is_sparse=is_sparse) + assert ( + src_vocab_size == trg_vocab_size + ), "Vocabularies in source and target should be same for weight sharing." + self._wrap_encoder_layer = WrapEncoderLayer( + src_vocab_size, + max_length, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + weight_sharing, + is_sparse=is_sparse, + ) + self._wrap_decoder_layer = WrapDecoderLayer( + trg_vocab_size, + max_length, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + weight_sharing, + is_sparse=is_sparse, + ) if weight_sharing: - self._wrap_decoder_layer._prepare_decoder_layer._input_emb.weight = self._wrap_encoder_layer._prepare_encoder_layer._input_emb.weight + self._wrap_decoder_layer._prepare_decoder_layer._input_emb.weight = ( + self._wrap_encoder_layer._prepare_encoder_layer._input_emb.weight + ) def forward(self, enc_inputs, dec_inputs, label, weights): enc_output = self._wrap_encoder_layer(enc_inputs) predict = self._wrap_decoder_layer(dec_inputs, enc_output) if self._label_smooth_eps: label_out = fluid.layers.label_smooth( - label=fluid.layers.one_hot(input=label, - depth=self._trg_vocab_size), - epsilon=self._label_smooth_eps) + label=fluid.layers.one_hot( + input=label, depth=self._trg_vocab_size + ), + epsilon=self._label_smooth_eps, + ) cost = fluid.layers.softmax_with_cross_entropy( logits=predict, label=label_out, - soft_label=True if self._label_smooth_eps else False) + soft_label=True if self._label_smooth_eps else False, + ) weighted_cost = cost * weights sum_cost = fluid.layers.reduce_sum(weighted_cost) token_num = fluid.layers.reduce_sum(weights) @@ -820,47 +958,54 @@ batch_num = 5 def fake_data_reader(): - def __reader__(): iteration = TrainTaskConfig.batch_size * batch_num for _ in range(iteration): # random data np.random.seed = 90 - src_word_np = np.arange(1, seq_len + 1).reshape([seq_len - ]).astype('int64') - src_pos_np = np.random.randint(1, - seq_len, - size=(seq_len), - dtype='int64') - src_slf_attn_bias_np = np.random.randn(ModelHyperParams.n_head, - seq_len, - seq_len).astype('float32') - - trg_word_np = np.arange(1, seq_len + 1).reshape([seq_len - ]).astype('int64') - trg_pos_np = np.random.randint(1, - seq_len, - size=(seq_len), - dtype='int64') - trg_slf_attn_bias_np = np.random.randn(ModelHyperParams.n_head, - seq_len, - seq_len).astype('float32') - trg_src_attn_bias_np = np.random.randn(ModelHyperParams.n_head, - seq_len, - seq_len).astype('float32') - - lbl_word_np = np.random.randint(1, - ModelHyperParams.src_vocab_size - 1, - size=(seq_len, 1), - dtype='int64') + src_word_np = ( + np.arange(1, seq_len + 1).reshape([seq_len]).astype('int64') + ) + src_pos_np = np.random.randint( + 1, seq_len, size=(seq_len), dtype='int64' + ) + src_slf_attn_bias_np = np.random.randn( + ModelHyperParams.n_head, seq_len, seq_len + ).astype('float32') + + trg_word_np = ( + np.arange(1, seq_len + 1).reshape([seq_len]).astype('int64') + ) + trg_pos_np = np.random.randint( + 1, seq_len, size=(seq_len), dtype='int64' + ) + trg_slf_attn_bias_np = np.random.randn( + ModelHyperParams.n_head, seq_len, seq_len + ).astype('float32') + trg_src_attn_bias_np = np.random.randn( + ModelHyperParams.n_head, seq_len, seq_len + ).astype('float32') + + lbl_word_np = np.random.randint( + 1, + ModelHyperParams.src_vocab_size - 1, + size=(seq_len, 1), + dtype='int64', + ) # Note(chenweihang): weight will introduce diff, so use constant here lbl_weight_np = np.ones((seq_len, 1)).astype('int64') data_inputs = [ - src_word_np, src_pos_np, src_slf_attn_bias_np, trg_word_np, - trg_pos_np, trg_slf_attn_bias_np, trg_src_attn_bias_np, - lbl_word_np, lbl_weight_np + src_word_np, + src_pos_np, + src_slf_attn_bias_np, + trg_word_np, + trg_pos_np, + trg_slf_attn_bias_np, + trg_src_attn_bias_np, + lbl_word_np, + lbl_weight_np, ] yield data_inputs @@ -884,20 +1029,30 @@ def np_to_variable(data): lbl_weight_np = lbl_weight_np.reshape(batch_size * seq_len, 1) data_inputs = [ - src_word_np, src_pos_np, src_slf_attn_bias_np, trg_word_np, trg_pos_np, - trg_slf_attn_bias_np, trg_src_attn_bias_np, lbl_word_np, lbl_weight_np + src_word_np, + src_pos_np, + src_slf_attn_bias_np, + trg_word_np, + trg_pos_np, + trg_slf_attn_bias_np, + trg_src_attn_bias_np, + lbl_word_np, + lbl_weight_np, ] var_inputs = [] - for i, field in enumerate(encoder_data_input_fields + - decoder_data_input_fields[:-1] + - label_data_input_fields): + for i, field in enumerate( + encoder_data_input_fields + + decoder_data_input_fields[:-1] + + label_data_input_fields + ): var_inputs.append(to_variable(data_inputs[i], name=field)) - enc_inputs = var_inputs[0:len(encoder_data_input_fields)] - dec_inputs = var_inputs[len(encoder_data_input_fields - ):len(encoder_data_input_fields) + - len(decoder_data_input_fields[:-1])] + enc_inputs = var_inputs[0 : len(encoder_data_input_fields)] + dec_inputs = var_inputs[ + len(encoder_data_input_fields) : len(encoder_data_input_fields) + + len(decoder_data_input_fields[:-1]) + ] label = var_inputs[-2] weights = var_inputs[-1] @@ -908,38 +1063,45 @@ naive_optimize = True class TestTransformer(TestParallelDyGraphRunnerBase): - def get_model(self): - model = TransFormer(ModelHyperParams.src_vocab_size, - ModelHyperParams.trg_vocab_size, - ModelHyperParams.max_length + 1, - ModelHyperParams.n_layer, - ModelHyperParams.n_head, - ModelHyperParams.d_key, - ModelHyperParams.d_value, - ModelHyperParams.d_model, - ModelHyperParams.d_inner_hid, - ModelHyperParams.prepostprocess_dropout, - ModelHyperParams.attention_dropout, - ModelHyperParams.relu_dropout, - ModelHyperParams.preprocess_cmd, - ModelHyperParams.postprocess_cmd, - ModelHyperParams.weight_sharing, - TrainTaskConfig.label_smooth_eps, - is_sparse=True) - train_reader = paddle.batch(fake_data_reader(), - TrainTaskConfig.batch_size) + model = TransFormer( + ModelHyperParams.src_vocab_size, + ModelHyperParams.trg_vocab_size, + ModelHyperParams.max_length + 1, + ModelHyperParams.n_layer, + ModelHyperParams.n_head, + ModelHyperParams.d_key, + ModelHyperParams.d_value, + ModelHyperParams.d_model, + ModelHyperParams.d_inner_hid, + ModelHyperParams.prepostprocess_dropout, + ModelHyperParams.attention_dropout, + ModelHyperParams.relu_dropout, + ModelHyperParams.preprocess_cmd, + ModelHyperParams.postprocess_cmd, + ModelHyperParams.weight_sharing, + TrainTaskConfig.label_smooth_eps, + is_sparse=True, + ) + train_reader = paddle.batch( + fake_data_reader(), TrainTaskConfig.batch_size + ) if naive_optimize: - optimizer = fluid.optimizer.SGD(learning_rate=0.001, - parameter_list=model.parameters()) + optimizer = fluid.optimizer.SGD( + learning_rate=0.001, parameter_list=model.parameters() + ) else: - optimizer = fluid.optimizer.Adam(learning_rate=NoamDecay( - ModelHyperParams.d_model, TrainTaskConfig.warmup_steps, - TrainTaskConfig.learning_rate), - beta1=TrainTaskConfig.beta1, - beta2=TrainTaskConfig.beta2, - epsilon=TrainTaskConfig.eps, - parameter_list=model.parameters()) + optimizer = fluid.optimizer.Adam( + learning_rate=NoamDecay( + ModelHyperParams.d_model, + TrainTaskConfig.warmup_steps, + TrainTaskConfig.learning_rate, + ), + beta1=TrainTaskConfig.beta1, + beta2=TrainTaskConfig.beta2, + epsilon=TrainTaskConfig.eps, + parameter_list=model.parameters(), + ) return model, train_reader, optimizer @@ -947,7 +1109,8 @@ class TestTransformer(TestParallelDyGraphRunnerBase): enc_inputs, dec_inputs, label, weights = np_to_variable(batch) dy_sum_cost, dy_avg_cost, dy_predict, dy_token_num = model( - enc_inputs, dec_inputs, label, weights) + enc_inputs, dec_inputs, label, weights + ) return dy_avg_cost diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/parallel_margin_cross_entropy.py b/python/paddle/fluid/tests/unittests/collective/fleet/parallel_margin_cross_entropy.py index 9d852785f6db0c52c6dc22162ef28859d9243e64..343cce6f4bb3b1955e343e7adc0adb3c56f74b50 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/parallel_margin_cross_entropy.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/parallel_margin_cross_entropy.py @@ -30,7 +30,6 @@ def set_random_seed(seed): class TestParallelMarginSoftmaxCrossEntropyOp(unittest.TestCase): - def setUp(self): strategy = fleet.DistributedStrategy() fleet.init(is_collective=True, strategy=strategy) @@ -60,57 +59,71 @@ class TestParallelMarginSoftmaxCrossEntropyOp(unittest.TestCase): num_class = np.sum(num_class_per_card) for margin1, margin2, margin3, scale in zip( - margin1s, margin2s, margin3s, scales): + margin1s, margin2s, margin3s, scales + ): for _ in range(5): - np_label = np.random.randint(0, num_class, - (batch_size, )) + np_label = np.random.randint( + 0, num_class, (batch_size,) + ) label = paddle.to_tensor(np_label, dtype="int64") - input = paddle.randn(shape=[batch_size, feature_length], - dtype=dtype) + input = paddle.randn( + shape=[batch_size, feature_length], dtype=dtype + ) input.stop_gradient = False input_l2 = paddle.sqrt( - paddle.sum(paddle.square(input), - axis=1, - keepdim=True)) + paddle.sum( + paddle.square(input), axis=1, keepdim=True + ) + ) norm_input = paddle.divide(input, input_l2) weight = paddle.randn( shape=[feature_length, num_class_per_card[rank_id]], - dtype=dtype) + dtype=dtype, + ) weight.stop_gradient = False weight_l2 = paddle.sqrt( - paddle.sum(paddle.square(weight), - axis=0, - keepdim=True)) + paddle.sum( + paddle.square(weight), axis=0, keepdim=True + ) + ) norm_weight = paddle.divide(weight, weight_l2) data = paddle.matmul(norm_input, norm_weight) data.stop_gradient = False - sta = np.sum( - num_class_per_card[:rank_id]) if rank_id > 0 else 0 - end = np.sum(num_class_per_card[:rank_id + 1]) - - integral_data = np.zeros((batch_size, num_class), - dtype=dtype) - integral_data[:, - sta:end] = data.clone().detach().numpy() - integral_data = paddle.to_tensor(integral_data, - dtype=dtype) + sta = ( + np.sum(num_class_per_card[:rank_id]) + if rank_id > 0 + else 0 + ) + end = np.sum(num_class_per_card[: rank_id + 1]) + + integral_data = np.zeros( + (batch_size, num_class), dtype=dtype + ) + integral_data[:, sta:end] = ( + data.clone().detach().numpy() + ) + integral_data = paddle.to_tensor( + integral_data, dtype=dtype + ) paddle.distributed.all_reduce( integral_data, op=paddle.distributed.ReduceOp.SUM, - group=check_group) + group=check_group, + ) integral_data = integral_data.detach().clone() integral_data.stop_gradient = False # add arcface margin to logit theta = paddle.acos(integral_data) one_hot_label = paddle.nn.functional.one_hot( - label, num_classes=num_class) + label, num_classes=num_class + ) one_hot_label.stop_gradient = False if margin1 != 1.0: @@ -123,7 +136,10 @@ class TestParallelMarginSoftmaxCrossEntropyOp(unittest.TestCase): diff = one_hot_label * (margin_cos - integral_data) arc_data = (integral_data + diff) * scale - loss_a, softmax_a = paddle.nn.functional.margin_cross_entropy( + ( + loss_a, + softmax_a, + ) = paddle.nn.functional.margin_cross_entropy( data, label, margin1=margin1, @@ -132,54 +148,69 @@ class TestParallelMarginSoftmaxCrossEntropyOp(unittest.TestCase): scale=scale, group=check_group, return_softmax=True, - reduction=None) - loss_b, softmax_b = paddle.nn.functional.softmax_with_cross_entropy( + reduction=None, + ) + ( + loss_b, + softmax_b, + ) = paddle.nn.functional.softmax_with_cross_entropy( logits=arc_data, label=paddle.reshape(label, (-1, 1)), - return_softmax=True) - - np.testing.assert_allclose(loss_a.numpy(), - loss_b.numpy(), - rtol=1e-5, - atol=1e-7) - - integral_prob = np.zeros((batch_size, num_class), - dtype=dtype) - integral_prob[:, sta:end] = softmax_a.clone().detach( - ).numpy() - integral_prob = paddle.to_tensor(integral_prob, - dtype=dtype) + return_softmax=True, + ) + + np.testing.assert_allclose( + loss_a.numpy(), loss_b.numpy(), rtol=1e-5, atol=1e-7 + ) + + integral_prob = np.zeros( + (batch_size, num_class), dtype=dtype + ) + integral_prob[:, sta:end] = ( + softmax_a.clone().detach().numpy() + ) + integral_prob = paddle.to_tensor( + integral_prob, dtype=dtype + ) paddle.distributed.all_reduce( integral_prob, op=paddle.distributed.ReduceOp.SUM, - group=check_group) + group=check_group, + ) integral_prob = integral_prob.detach().clone() integral_prob.stop_gradient = False - np.testing.assert_allclose(integral_prob.numpy(), - softmax_b.numpy(), - rtol=1e-5, - atol=1e-6) + np.testing.assert_allclose( + integral_prob.numpy(), + softmax_b.numpy(), + rtol=1e-5, + atol=1e-6, + ) loss_a = loss_a.sum() / batch_size loss_b = loss_b.sum() / batch_size loss_a.backward() loss_b.backward() - integral_grad = np.zeros((batch_size, num_class), - dtype=dtype) + integral_grad = np.zeros( + (batch_size, num_class), dtype=dtype + ) integral_grad[:, sta:end] = data.grad.clone().detach() - integral_grad = paddle.to_tensor(integral_grad, - dtype=dtype) + integral_grad = paddle.to_tensor( + integral_grad, dtype=dtype + ) paddle.distributed.all_reduce( integral_grad, op=paddle.distributed.ReduceOp.SUM, - group=check_group) - - np.testing.assert_allclose(integral_data.grad.numpy(), - integral_grad.numpy(), - rtol=1e-5, - atol=1e-7) + group=check_group, + ) + + np.testing.assert_allclose( + integral_data.grad.numpy(), + integral_grad.numpy(), + rtol=1e-5, + atol=1e-7, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist.py b/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist.py index bd4e761a7a1d4d13adbfd9532db52afa2579f5b5..3e173c790ea3fcb2a345554a5b3ea07de32d73d9 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist.py @@ -36,8 +36,10 @@ def cnn_model(data): pool_size=2, pool_stride=2, act="relu", - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.01))) + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.01) + ), + ) conv_pool_2 = fluid.nets.simple_img_conv_pool( input=conv_pool_1, filter_size=5, @@ -45,13 +47,15 @@ def cnn_model(data): pool_size=2, pool_stride=2, act="relu", - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.01))) + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.01) + ), + ) SIZE = 10 input_shape = conv_pool_2.shape param_shape = [reduce(lambda a, b: a * b, input_shape[1:], 1)] + [SIZE] - scale = (2.0 / (param_shape[0]**2 * SIZE))**0.5 + scale = (2.0 / (param_shape[0] ** 2 * SIZE)) ** 0.5 with fluid.device_guard("gpu:1"): predict = fluid.layers.fc( @@ -59,26 +63,29 @@ def cnn_model(data): size=SIZE, act="softmax", param_attr=fluid.param_attr.ParamAttr( - initializer=fluid.initializer.Constant(value=0.01))) + initializer=fluid.initializer.Constant(value=0.01) + ), + ) # To cover @RENAMED@GRADIENT predict2 = fluid.layers.fc( input=conv_pool_1, size=SIZE, act="softmax", param_attr=fluid.param_attr.ParamAttr( - initializer=fluid.initializer.Constant(value=0.01))) + initializer=fluid.initializer.Constant(value=0.01) + ), + ) predict += predict2 return predict class TestDistMnist2x2(TestDistRunnerBase): - def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): # Input data with fluid.device_guard("gpu:0"): - images = fluid.layers.data(name='pixel', - shape=[1, 28, 28], - dtype=DTYPE) + images = fluid.layers.data( + name='pixel', shape=[1, 28, 28], dtype=DTYPE + ) label = fluid.layers.data(name='label', shape=[1], dtype='int64') if dist_strategy: @@ -86,7 +93,8 @@ class TestDistMnist2x2(TestDistRunnerBase): feed_list=[images, label], capacity=64, use_double_buffer=False, - iterable=False) + iterable=False, + ) # Train program predict = cnn_model(images) with fluid.device_guard("gpu:1"): @@ -96,9 +104,9 @@ class TestDistMnist2x2(TestDistRunnerBase): # Evaluator with fluid.device_guard("gpu:1"): batch_size_tensor = fluid.layers.create_tensor(dtype='int64') - batch_acc = fluid.layers.accuracy(input=predict, - label=label, - total=batch_size_tensor) + batch_acc = fluid.layers.accuracy( + input=predict, label=label, total=batch_size_tensor + ) inference_program = fluid.default_main_program().clone() base_lr = self.lr @@ -110,15 +118,18 @@ class TestDistMnist2x2(TestDistRunnerBase): opt = paddle.optimizer.AdamW( learning_rate=lr_val, - grad_clip=fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0)) + grad_clip=fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0), + ) acc_steps = 2 # accumulated steps for pipeline if dist_strategy: # Reader - train_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=batch_size) - test_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=batch_size) + train_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size + ) + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size + ) fleet.init(is_collective=True) strategy = fleet.DistributedStrategy() strategy.pipeline = True @@ -126,23 +137,41 @@ class TestDistMnist2x2(TestDistRunnerBase): strategy.pipeline_configs = { 'micro_batch_size': batch_size, 'schedule_mode': '1F1B', - 'accumulate_steps': acc_steps + 'accumulate_steps': acc_steps, } - dist_opt = fleet.distributed_optimizer(optimizer=opt, - strategy=strategy) + dist_opt = fleet.distributed_optimizer( + optimizer=opt, strategy=strategy + ) dist_opt.minimize(avg_cost) else: opt.minimize(avg_cost) # Reader - train_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=batch_size * acc_steps) - test_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=batch_size * acc_steps) + train_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size * acc_steps + ) + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size * acc_steps + ) if dist_strategy: - return inference_program, avg_cost, train_reader, test_reader, batch_acc, predict, data_loader + return ( + inference_program, + avg_cost, + train_reader, + test_reader, + batch_acc, + predict, + data_loader, + ) else: - return inference_program, avg_cost, train_reader, test_reader, batch_acc, predict + return ( + inference_program, + avg_cost, + train_reader, + test_reader, + batch_acc, + predict, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist_multi_device.py b/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist_multi_device.py index 1933b55f023a83d86a8b5cddeb8741780e5703f5..e7f8a82f166b72229348bc0062508bea929d6cd4 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist_multi_device.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist_multi_device.py @@ -36,8 +36,10 @@ def cnn_model(data): pool_size=2, pool_stride=2, act="relu", - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.01))) + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.01) + ), + ) conv_pool_2 = fluid.nets.simple_img_conv_pool( input=conv_pool_1, filter_size=5, @@ -45,13 +47,15 @@ def cnn_model(data): pool_size=2, pool_stride=2, act="relu", - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.01))) + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.01) + ), + ) SIZE = 10 input_shape = conv_pool_2.shape param_shape = [reduce(lambda a, b: a * b, input_shape[1:], 1)] + [SIZE] - scale = (2.0 / (param_shape[0]**2 * SIZE))**0.5 + scale = (2.0 / (param_shape[0] ** 2 * SIZE)) ** 0.5 with fluid.device_guard("gpu:1"): predict = fluid.layers.fc( @@ -59,26 +63,29 @@ def cnn_model(data): size=SIZE, act="softmax", param_attr=fluid.param_attr.ParamAttr( - initializer=fluid.initializer.Constant(value=0.01))) + initializer=fluid.initializer.Constant(value=0.01) + ), + ) # To cover @RENAMED@GRADIENT predict2 = fluid.layers.fc( input=conv_pool_1, size=SIZE, act="softmax", param_attr=fluid.param_attr.ParamAttr( - initializer=fluid.initializer.Constant(value=0.01))) + initializer=fluid.initializer.Constant(value=0.01) + ), + ) predict += predict2 return predict class TestDistMnist2x2(TestDistRunnerBase): - def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): # Input data with fluid.device_guard("gpu:0"): - images = fluid.layers.data(name='pixel', - shape=[1, 28, 28], - dtype=DTYPE) + images = fluid.layers.data( + name='pixel', shape=[1, 28, 28], dtype=DTYPE + ) label = fluid.layers.data(name='label', shape=[1], dtype='int64') if dist_strategy: @@ -86,7 +93,8 @@ class TestDistMnist2x2(TestDistRunnerBase): feed_list=[images, label], capacity=64, use_double_buffer=False, - iterable=False) + iterable=False, + ) # Train program predict = cnn_model(images) with fluid.device_guard("gpu:1"): @@ -96,9 +104,9 @@ class TestDistMnist2x2(TestDistRunnerBase): # Evaluator with fluid.device_guard("gpu:1"): batch_size_tensor = fluid.layers.create_tensor(dtype='int64') - batch_acc = fluid.layers.accuracy(input=predict, - label=label, - total=batch_size_tensor) + batch_acc = fluid.layers.accuracy( + input=predict, label=label, total=batch_size_tensor + ) inference_program = fluid.default_main_program().clone() base_lr = self.lr @@ -110,15 +118,18 @@ class TestDistMnist2x2(TestDistRunnerBase): opt = fluid.optimizer.Momentum( learning_rate=lr_val, momentum=0.9, - grad_clip=fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0)) + grad_clip=fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0), + ) acc_steps = 2 # accumulated steps for pipeline if dist_strategy: # Reader - train_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=batch_size) - test_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=batch_size) + train_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size + ) + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size + ) fleet.init(is_collective=True) strategy = fleet.DistributedStrategy() strategy.pipeline = True @@ -126,23 +137,41 @@ class TestDistMnist2x2(TestDistRunnerBase): strategy.pipeline_configs = { 'micro_batch_size': batch_size, 'schedule_mode': 'F-then-B', - 'accumulate_steps': acc_steps + 'accumulate_steps': acc_steps, } - dist_opt = fleet.distributed_optimizer(optimizer=opt, - strategy=strategy) + dist_opt = fleet.distributed_optimizer( + optimizer=opt, strategy=strategy + ) dist_opt.minimize(avg_cost) else: opt.minimize(avg_cost) # Reader - train_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=batch_size * acc_steps) - test_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=batch_size * acc_steps) + train_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size * acc_steps + ) + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size * acc_steps + ) if dist_strategy: - return inference_program, avg_cost, train_reader, test_reader, batch_acc, predict, data_loader + return ( + inference_program, + avg_cost, + train_reader, + test_reader, + batch_acc, + predict, + data_loader, + ) else: - return inference_program, avg_cost, train_reader, test_reader, batch_acc, predict + return ( + inference_program, + avg_cost, + train_reader, + test_reader, + batch_acc, + predict, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist_one_device.py b/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist_one_device.py index 33964a6400e245421766acae87f75d1de52bf694..899cc24fdbad62216b564a5660548b5e6de52e74 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist_one_device.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist_one_device.py @@ -36,8 +36,10 @@ def cnn_model(data): pool_size=2, pool_stride=2, act="relu", - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.01))) + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.01) + ), + ) conv_pool_2 = fluid.nets.simple_img_conv_pool( input=conv_pool_1, filter_size=5, @@ -45,34 +47,37 @@ def cnn_model(data): pool_size=2, pool_stride=2, act="relu", - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.01))) + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.01) + ), + ) SIZE = 10 input_shape = conv_pool_2.shape param_shape = [reduce(lambda a, b: a * b, input_shape[1:], 1)] + [SIZE] - scale = (2.0 / (param_shape[0]**2 * SIZE))**0.5 + scale = (2.0 / (param_shape[0] ** 2 * SIZE)) ** 0.5 predict = fluid.layers.fc( input=conv_pool_2, size=SIZE, act="softmax", param_attr=fluid.param_attr.ParamAttr( - initializer=fluid.initializer.Constant(value=0.01))) + initializer=fluid.initializer.Constant(value=0.01) + ), + ) return predict class TestDistMnist2x2(TestDistRunnerBase): - def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): # Input data device_id = 0 if dist_strategy: fleet.init(is_collective=True) with fluid.device_guard("gpu:0"): - images = fluid.layers.data(name='pixel', - shape=[1, 28, 28], - dtype=DTYPE) + images = fluid.layers.data( + name='pixel', shape=[1, 28, 28], dtype=DTYPE + ) label = fluid.layers.data(name='label', shape=[1], dtype='int64') if dist_strategy: @@ -80,7 +85,8 @@ class TestDistMnist2x2(TestDistRunnerBase): feed_list=[images, label], capacity=64, use_double_buffer=False, - iterable=False) + iterable=False, + ) # Train program predict = cnn_model(images) with fluid.device_guard("gpu:0"): @@ -90,9 +96,9 @@ class TestDistMnist2x2(TestDistRunnerBase): # Evaluator with fluid.device_guard("gpu:0"): batch_size_tensor = fluid.layers.create_tensor(dtype='int64') - batch_acc = fluid.layers.accuracy(input=predict, - label=label, - total=batch_size_tensor) + batch_acc = fluid.layers.accuracy( + input=predict, label=label, total=batch_size_tensor + ) inference_program = fluid.default_main_program().clone() base_lr = self.lr @@ -104,28 +110,46 @@ class TestDistMnist2x2(TestDistRunnerBase): opt = fluid.optimizer.Momentum(learning_rate=lr_val, momentum=0.9) # Reader - train_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=batch_size) - test_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=batch_size) + train_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size + ) + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size + ) if dist_strategy: strategy = fleet.DistributedStrategy() strategy.pipeline = True strategy.pipeline_configs = { 'schedule_mode': 'F-then-B', - 'micro_batch_size': batch_size + 'micro_batch_size': batch_size, } - dist_opt = fleet.distributed_optimizer(optimizer=opt, - strategy=strategy) + dist_opt = fleet.distributed_optimizer( + optimizer=opt, strategy=strategy + ) dist_opt.minimize(avg_cost) else: opt.minimize(avg_cost) if dist_strategy: - return inference_program, avg_cost, train_reader, test_reader, batch_acc, predict, data_loader + return ( + inference_program, + avg_cost, + train_reader, + test_reader, + batch_acc, + predict, + data_loader, + ) else: - return inference_program, avg_cost, train_reader, test_reader, batch_acc, predict + return ( + inference_program, + avg_cost, + train_reader, + test_reader, + batch_acc, + predict, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_by_col.py b/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_by_col.py index 40ec79b9844f1a6cda62c48b1810e4361d2a487f..cd874ae42b09ddc6e0d4adf6de3f7bbf90fd2d5d 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_by_col.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_by_col.py @@ -27,60 +27,63 @@ IN_SIZE = 2 * MODEL_PARALLEL_SIZE OUT_SIZE = 2 * MODEL_PARALLEL_SIZE # Fix seed for test -#fluid.default_startup_program().random_seed = 1 -#fluid.default_main_program().random_seed = 1 +# fluid.default_startup_program().random_seed = 1 +# fluid.default_main_program().random_seed = 1 def get_param_attr(weight, bias): weight_attr = paddle.ParamAttr( - initializer=fluid.initializer.NumpyArrayInitializer(weight)) + initializer=fluid.initializer.NumpyArrayInitializer(weight) + ) bias_attr = paddle.ParamAttr( - initializer=fluid.initializer.NumpyArrayInitializer(bias)) + initializer=fluid.initializer.NumpyArrayInitializer(bias) + ) return weight_attr, bias_attr def create_model(data, rank): np.random.seed(2021) np_weight = np.random.uniform(-1, 1, size=(IN_SIZE, OUT_SIZE)).astype(DTYPE) - np_bias = np.random.uniform(-1, 1, size=(OUT_SIZE, )).astype(DTYPE) + np_bias = np.random.uniform(-1, 1, size=(OUT_SIZE,)).astype(DTYPE) if rank is not None: start_col = 0 if rank == 0 else OUT_SIZE // 2 - np_weight_part = np_weight[:, start_col:start_col + OUT_SIZE // 2] - np_bias_part = np_bias[start_col:start_col + OUT_SIZE // 2] + np_weight_part = np_weight[:, start_col : start_col + OUT_SIZE // 2] + np_bias_part = np_bias[start_col : start_col + OUT_SIZE // 2] weight_attr, bias_attr = get_param_attr(np_weight_part, np_bias_part) - result = paddle.distributed.split(data, - size=(IN_SIZE, OUT_SIZE), - operation='linear', - axis=1, - num_partitions=MODEL_PARALLEL_SIZE, - weight_attr=weight_attr, - bias_attr=bias_attr) + result = paddle.distributed.split( + data, + size=(IN_SIZE, OUT_SIZE), + operation='linear', + axis=1, + num_partitions=MODEL_PARALLEL_SIZE, + weight_attr=weight_attr, + bias_attr=bias_attr, + ) else: weight_attr, bias_attr = get_param_attr(np_weight, np_bias) - result = fluid.layers.fc(data, - size=OUT_SIZE, - param_attr=weight_attr, - bias_attr=bias_attr) + result = fluid.layers.fc( + data, size=OUT_SIZE, param_attr=weight_attr, bias_attr=bias_attr + ) predict = paddle.sum(result) return predict class TestModelParallel(TestDistRunnerBase): - def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): # Input data - data_in = fluid.data(name='data_in', - shape=[batch_size, IN_SIZE], - dtype=DTYPE) + data_in = fluid.data( + name='data_in', shape=[batch_size, IN_SIZE], dtype=DTYPE + ) if dist_strategy: data_loader = fluid.io.DataLoader.from_generator( feed_list=[data_in], capacity=64, use_double_buffer=False, - iterable=False) + iterable=False, + ) if dist_strategy: fleet.init(is_collective=True) @@ -93,8 +96,9 @@ class TestModelParallel(TestDistRunnerBase): opt = fluid.optimizer.SGD(0.1) if dist_strategy: - dist_opt = fleet.distributed_optimizer(optimizer=opt, - strategy=strategy) + dist_opt = fleet.distributed_optimizer( + optimizer=opt, strategy=strategy + ) dist_opt.minimize(avg_cost) else: opt.minimize(avg_cost) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_by_row.py b/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_by_row.py index c14dd021729c34d1dbe346f4188a2cbe12c83222..44b0858712ae15d4d7a656f69c9bb829e3f8164d 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_by_row.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_by_row.py @@ -27,61 +27,67 @@ IN_SIZE = 2 * MODEL_PARALLEL_SIZE OUT_SIZE = 2 * MODEL_PARALLEL_SIZE # Fix seed for test -#fluid.default_startup_program().random_seed = 1 -#fluid.default_main_program().random_seed = 1 +# fluid.default_startup_program().random_seed = 1 +# fluid.default_main_program().random_seed = 1 def get_param_attr(weight, bias): weight_attr = paddle.ParamAttr( - initializer=fluid.initializer.NumpyArrayInitializer(weight)) + initializer=fluid.initializer.NumpyArrayInitializer(weight) + ) bias_attr = paddle.ParamAttr( - initializer=fluid.initializer.NumpyArrayInitializer(bias)) + initializer=fluid.initializer.NumpyArrayInitializer(bias) + ) return weight_attr, bias_attr def create_model(data, rank): np.random.seed(2021) np_weight = np.random.uniform(-1, 1, size=(IN_SIZE, OUT_SIZE)).astype(DTYPE) - np_bias = np.random.uniform(-1, 1, size=(OUT_SIZE, )).astype(DTYPE) + np_bias = np.random.uniform(-1, 1, size=(OUT_SIZE,)).astype(DTYPE) if rank is not None: start_row = 0 if rank == 0 else IN_SIZE // 2 - np_weight_part = np_weight[start_row:start_row + IN_SIZE // 2, :] + np_weight_part = np_weight[start_row : start_row + IN_SIZE // 2, :] weight_attr, bias_attr = get_param_attr(np_weight_part, np_bias) - result = paddle.distributed.split(data, - size=(IN_SIZE, OUT_SIZE), - operation='linear', - axis=0, - num_partitions=MODEL_PARALLEL_SIZE, - weight_attr=weight_attr, - bias_attr=bias_attr) + result = paddle.distributed.split( + data, + size=(IN_SIZE, OUT_SIZE), + operation='linear', + axis=0, + num_partitions=MODEL_PARALLEL_SIZE, + weight_attr=weight_attr, + bias_attr=bias_attr, + ) else: weight_attr, bias_attr = get_param_attr(np_weight, np_bias) result = fluid.layers.fc( data, size=OUT_SIZE, param_attr=paddle.ParamAttr( - initializer=fluid.initializer.NumpyArrayInitializer(np_weight)), - bias_attr=bias_attr) + initializer=fluid.initializer.NumpyArrayInitializer(np_weight) + ), + bias_attr=bias_attr, + ) predict = paddle.sum(result) return predict class TestModelParallel(TestDistRunnerBase): - def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): # Input data - data_in = fluid.data(name='data_in', - shape=[batch_size, IN_SIZE], - dtype=DTYPE) + data_in = fluid.data( + name='data_in', shape=[batch_size, IN_SIZE], dtype=DTYPE + ) if dist_strategy: data_loader = fluid.io.DataLoader.from_generator( feed_list=[data_in], capacity=64, use_double_buffer=False, - iterable=False) + iterable=False, + ) if dist_strategy: fleet.init(is_collective=True) @@ -94,8 +100,9 @@ class TestModelParallel(TestDistRunnerBase): opt = fluid.optimizer.SGD(0.1) if dist_strategy: - dist_opt = fleet.distributed_optimizer(optimizer=opt, - strategy=strategy) + dist_opt = fleet.distributed_optimizer( + optimizer=opt, strategy=strategy + ) dist_opt.minimize(avg_cost) else: opt.minimize(avg_cost) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_embedding.py b/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_embedding.py index f147190a5d22c43f64a73c593071071f3b0e7484..832cad58eb8b69f4ed256a12fac4de8aad556224 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_embedding.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_embedding.py @@ -27,8 +27,8 @@ IN_SIZE = 2 * MODEL_PARALLEL_SIZE OUT_SIZE = 2 * MODEL_PARALLEL_SIZE # Fix seed for test -#fluid.default_startup_program().random_seed = 1 -#fluid.default_main_program().random_seed = 1 +# fluid.default_startup_program().random_seed = 1 +# fluid.default_main_program().random_seed = 1 def create_model(data, rank): @@ -36,15 +36,18 @@ def create_model(data, rank): np_weight = np.random.uniform(-1, 1, size=(IN_SIZE, OUT_SIZE)).astype(DTYPE) if rank is not None: start_row = 0 if rank == 0 else IN_SIZE // 2 - np_weight_part = np_weight[start_row:start_row + IN_SIZE // 2, :] + np_weight_part = np_weight[start_row : start_row + IN_SIZE // 2, :] result = paddle.distributed.split( data, size=(IN_SIZE, OUT_SIZE), operation='linear', axis=0, num_partitions=MODEL_PARALLEL_SIZE, - weight_attr=paddle.ParamAttr(initializer=fluid.initializer. - NumpyArrayInitializer(np_weight_part)), + weight_attr=paddle.ParamAttr( + initializer=fluid.initializer.NumpyArrayInitializer( + np_weight_part + ) + ), bias_attr=False, ) else: @@ -52,7 +55,8 @@ def create_model(data, rank): data, size=OUT_SIZE, param_attr=paddle.ParamAttr( - initializer=fluid.initializer.NumpyArrayInitializer(np_weight)), + initializer=fluid.initializer.NumpyArrayInitializer(np_weight) + ), bias_attr=False, ) @@ -61,19 +65,19 @@ def create_model(data, rank): class TestModelParallel(TestDistRunnerBase): - def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): # Input data - data_in = fluid.data(name='data_in', - shape=[batch_size, IN_SIZE], - dtype=DTYPE) + data_in = fluid.data( + name='data_in', shape=[batch_size, IN_SIZE], dtype=DTYPE + ) if dist_strategy: data_loader = fluid.io.DataLoader.from_generator( feed_list=[data_in], capacity=64, use_double_buffer=False, - iterable=False) + iterable=False, + ) if dist_strategy: fleet.init(is_collective=True) @@ -86,8 +90,9 @@ class TestModelParallel(TestDistRunnerBase): opt = fluid.optimizer.SGD(0.1) if dist_strategy: - dist_opt = fleet.distributed_optimizer(optimizer=opt, - strategy=strategy) + dist_opt = fleet.distributed_optimizer( + optimizer=opt, strategy=strategy + ) dist_opt.minimize(avg_cost) else: opt.minimize(avg_cost) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint.py index 3a06bc29d0f110f280684324739b70299ad2b584..cb61e2c9a8ab292d7c54722aedbe7d8ee21e03ec 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint.py @@ -20,14 +20,16 @@ from paddle.distributed.fleet.utils.fs import LocalFS, HDFSClient import paddle.fluid.incubate.checkpoint.auto_checkpoint as acp from paddle.fluid.incubate.checkpoint.checkpoint_saver import PaddleModel -from paddle.fluid.tests.unittests.auto_checkpoint_utils import AutoCheckpointBase, get_logger +from paddle.fluid.tests.unittests.auto_checkpoint_utils import ( + AutoCheckpointBase, + get_logger, +) paddle.enable_static() logger = get_logger() class AutoCheckPointACLBase(AutoCheckpointBase): - def setUp(self): get_logger() logger.info("enter tests") @@ -44,7 +46,7 @@ class AutoCheckPointACLBase(AutoCheckpointBase): "PADDLE_EDL_HDFS_CHECKPOINT_PATH": "auto_checkpoint", "PADDLE_EDL_ONLY_FOR_CE_TEST": "1", "PADDLE_EDL_FS_CACHE": ".auto_checkpoint_test", - "PADDLE_EDL_SAVE_CHECKPOINT_INTER": "0" + "PADDLE_EDL_SAVE_CHECKPOINT_INTER": "0", } os.environ.update(proc_env) @@ -66,7 +68,8 @@ class AutoCheckPointACLBase(AutoCheckpointBase): logger.info("begin _run_normal") compiled, data_loader, optimizer, loss, image, label = self._init_env( - exe, main_prog, startup_prog) + exe, main_prog, startup_prog + ) for i in range(3): self.assertEqual(acp._get_train_epoch_range(), None) self.assertEqual(acp.g_acp_type, None) @@ -91,8 +94,9 @@ class AutoCheckPointACLBase(AutoCheckpointBase): logger.info("begin _not_use_train") exe, main_prog, startup_prog = self._generate() - compiled, data_loader, optimizer, loss, image, label = \ - self._init_env(exe, main_prog, startup_prog) + compiled, data_loader, optimizer, loss, image, label = self._init_env( + exe, main_prog, startup_prog + ) epochs = [] for i in acp.train_epoch_range(3, 0): @@ -111,8 +115,9 @@ class AutoCheckPointACLBase(AutoCheckpointBase): exe, main_prog, startup_prog = self._generate() - compiled, data_loader, optimizer, loss, image, label = \ - self._init_env(exe, main_prog, startup_prog) + compiled, data_loader, optimizer, loss, image, label = self._init_env( + exe, main_prog, startup_prog + ) o = None i = 0 @@ -149,7 +154,8 @@ class AutoCheckPointACLBase(AutoCheckpointBase): fs.delete(save_dir) compiled, data_loader, optimizer, loss, image, label = self._init_env( - exe, main_prog, startup_prog) + exe, main_prog, startup_prog + ) o = None i = 0 @@ -195,7 +201,6 @@ class AutoCheckPointACLBase(AutoCheckpointBase): class AutoCheckpointTest(AutoCheckPointACLBase): - def setUp(self): get_logger() logger.info("enter tests") @@ -212,7 +217,7 @@ class AutoCheckpointTest(AutoCheckPointACLBase): "PADDLE_EDL_HDFS_CHECKPOINT_PATH": "auto_checkpoint_0", "PADDLE_EDL_ONLY_FOR_CE_TEST": "1", "PADDLE_EDL_FS_CACHE": ".auto_checkpoint_test_0", - "PADDLE_EDL_SAVE_CHECKPOINT_INTER": "0" + "PADDLE_EDL_SAVE_CHECKPOINT_INTER": "0", } os.environ.update(proc_env) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint1.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint1.py index 2db7b1e8f80682670d2ed4cf48d6df45479f95a9..583f84430624d67a1607a84662ef8f563bd2e085 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint1.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint1.py @@ -24,7 +24,6 @@ logger = get_logger() class AutoCheckpointTest1(AutoCheckPointACLBase): - def setUp(self): get_logger() logger.info("enter tests") @@ -41,7 +40,7 @@ class AutoCheckpointTest1(AutoCheckPointACLBase): "PADDLE_EDL_HDFS_CHECKPOINT_PATH": "auto_checkpoint_1", "PADDLE_EDL_ONLY_FOR_CE_TEST": "1", "PADDLE_EDL_FS_CACHE": ".auto_checkpoint_test_1", - "PADDLE_EDL_SAVE_CHECKPOINT_INTER": "0" + "PADDLE_EDL_SAVE_CHECKPOINT_INTER": "0", } os.environ.update(proc_env) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint2.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint2.py index 96b538ee9b846eb8af59a337e30795b7973bef71..3c0cc24c6efb117d4c21518eb47fafb9627cfdcb 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint2.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint2.py @@ -24,7 +24,6 @@ logger = get_logger() class AutoCheckpointTest2(AutoCheckPointACLBase): - def setUp(self): get_logger() logger.info("enter tests") @@ -41,7 +40,7 @@ class AutoCheckpointTest2(AutoCheckPointACLBase): "PADDLE_EDL_HDFS_CHECKPOINT_PATH": "auto_checkpoint_2", "PADDLE_EDL_ONLY_FOR_CE_TEST": "1", "PADDLE_EDL_FS_CACHE": ".auto_checkpoint_test_2", - "PADDLE_EDL_SAVE_CHECKPOINT_INTER": "0" + "PADDLE_EDL_SAVE_CHECKPOINT_INTER": "0", } os.environ.update(proc_env) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint3.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint3.py index 0e75f8e0c477380d62a8c17b64f6eb970265d898..beb19fa0b1eb458c0b1bba4843a0e878b1118d21 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint3.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint3.py @@ -24,7 +24,6 @@ logger = get_logger() class AutoCheckpointTest3(AutoCheckPointACLBase): - def setUp(self): get_logger() logger.info("enter tests") @@ -41,7 +40,7 @@ class AutoCheckpointTest3(AutoCheckPointACLBase): "PADDLE_EDL_HDFS_CHECKPOINT_PATH": "auto_checkpoint_3", "PADDLE_EDL_ONLY_FOR_CE_TEST": "1", "PADDLE_EDL_FS_CACHE": ".auto_checkpoint_test_3", - "PADDLE_EDL_SAVE_CHECKPOINT_INTER": "0" + "PADDLE_EDL_SAVE_CHECKPOINT_INTER": "0", } os.environ.update(proc_env) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint_dist_basic.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint_dist_basic.py index 331f6b104509ea81cfe7b36db4fe474705e701d7..702c3eb24a3fc99ae3c7be86461b87337d91f34a 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint_dist_basic.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint_dist_basic.py @@ -30,7 +30,6 @@ logger = get_logger() class AutoCheckpointTestDist(AutoCheckPointACLBase): - def setUp(self): get_logger() logger.info("enter tests") @@ -47,7 +46,7 @@ class AutoCheckpointTestDist(AutoCheckPointACLBase): "PADDLE_EDL_HDFS_CHECKPOINT_PATH": "auto_checkpoint_dist_basic", "PADDLE_EDL_ONLY_FOR_CE_TEST": "1", "PADDLE_EDL_FS_CACHE": ".auto_checkpoint_test_dist_basic", - "PADDLE_EDL_SAVE_CHECKPOINT_INTER": "0" + "PADDLE_EDL_SAVE_CHECKPOINT_INTER": "0", } os.environ.update(proc_env) @@ -65,8 +64,9 @@ class AutoCheckpointTestDist(AutoCheckPointACLBase): # basic exe, main_prog, startup_prog = self._generate() - compiled, data_loader, optimizer, loss, image, label = \ - self._init_env(exe, main_prog, startup_prog, minimize=False) + compiled, data_loader, optimizer, loss, image, label = self._init_env( + exe, main_prog, startup_prog, minimize=False + ) # fleet os.environ["TRAINING_ROLE"] = "TRAINER" @@ -91,9 +91,9 @@ class AutoCheckpointTestDist(AutoCheckPointACLBase): logger.info("_run_save_0 name:{} epoch_no:{}".format(o.name, i)) for data in data_loader(): - fetch = exe.run(fleet.main_program, - feed=data, - fetch_list=[loss]) + fetch = exe.run( + fleet.main_program, feed=data, fetch_list=[loss] + ) self.assertEqual(len(o._exe_status), 1) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint_multiple.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint_multiple.py index 61e0733aa84a1cc1017f8d2cab7a7bda01a24f20..6bb59c5d2aa0c43336e13e240f8ef6c1ca572393 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint_multiple.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint_multiple.py @@ -27,7 +27,6 @@ logger = get_logger() class AutoCheckpointTestMul(AutoCheckPointACLBase): - def setUp(self): get_logger() logger.info("enter tests") @@ -44,7 +43,7 @@ class AutoCheckpointTestMul(AutoCheckPointACLBase): "PADDLE_EDL_HDFS_CHECKPOINT_PATH": "auto_checkpoint_dist_multiple", "PADDLE_EDL_ONLY_FOR_CE_TEST": "1", "PADDLE_EDL_FS_CACHE": ".auto_checkpoint_test_dist_multiple", - "PADDLE_EDL_SAVE_CHECKPOINT_INTER": "0" + "PADDLE_EDL_SAVE_CHECKPOINT_INTER": "0", } os.environ.update(proc_env) @@ -62,11 +61,23 @@ class AutoCheckpointTestMul(AutoCheckPointACLBase): exe, main_prog1, startup_prog1 = self._generate() _, main_prog2, startup_prog2 = self._generate() - compiled1, data_loader1, optimizer1, loss1, image1, label1 = \ - self._init_env(exe, main_prog1, startup_prog1) - - compiled2, data_loader2, optimizer2, loss2, image2, label2 = \ - self._init_env(exe, main_prog2, startup_prog2) + ( + compiled1, + data_loader1, + optimizer1, + loss1, + image1, + label1, + ) = self._init_env(exe, main_prog1, startup_prog1) + + ( + compiled2, + data_loader2, + optimizer2, + loss2, + image2, + label2, + ) = self._init_env(exe, main_prog2, startup_prog2) o = None epochs = [] diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_communicator_half_async.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_communicator_half_async.py index ab5bd396d2c2ccea1647ebc3c6d2f7e7987fbbc3..fb27d8608801aafc694f32e25a0bbf11057defbc 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_communicator_half_async.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_communicator_half_async.py @@ -27,7 +27,6 @@ paddle.enable_static() class TestCommunicatorHalfAsyncEnd2End(unittest.TestCase): - def net(self): x = fluid.layers.data(name='x', shape=[13], dtype='float32') y_predict = fluid.layers.fc(input=x, size=1, act=None) @@ -38,7 +37,6 @@ class TestCommunicatorHalfAsyncEnd2End(unittest.TestCase): return avg_cost, x, y def fake_reader(self): - def reader(): for i in range(10000): x = numpy.random.random((1, 13)).astype('float32') @@ -74,9 +72,11 @@ class TestCommunicatorHalfAsyncEnd2End(unittest.TestCase): feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) for batch_id, data in enumerate(train_reader()): - exe.run(paddle.static.default_main_program(), - feed=feeder.feed(data), - fetch_list=[]) + exe.run( + paddle.static.default_main_program(), + feed=feeder.feed(data), + fetch_list=[], + ) fleet.stop_worker() @@ -89,9 +89,11 @@ class TestCommunicatorHalfAsyncEnd2End(unittest.TestCase): role = role_maker.UserDefinedRoleMaker( current_id=0, role=role_maker.Role.WORKER - if training_role == "TRAINER" else role_maker.Role.SERVER, + if training_role == "TRAINER" + else role_maker.Role.SERVER, worker_num=1, - server_endpoints=["127.0.0.1:6002"]) + server_endpoints=["127.0.0.1:6002"], + ) if training_role == "TRAINER": self.run_trainer(role, strategy) @@ -137,9 +139,11 @@ half_run_server.run_ut() _python = sys.executable ps_cmd = "{} {}".format(_python, server_file) - ps_proc = subprocess.Popen(ps_cmd.strip().split(" "), - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + ps_proc = subprocess.Popen( + ps_cmd.strip().split(" "), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) os.environ["http_proxy"] = "" os.environ["https_proxy"] = "" diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_communicator_sync.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_communicator_sync.py index 16cba174192b2b9eaddeaee06292caaae6c99b1f..f938079beb2353bfe0f5d18d5e4159867983dffe 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_communicator_sync.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_communicator_sync.py @@ -27,7 +27,6 @@ import paddle.distributed.fleet as fleet class TestCommunicator(unittest.TestCase): - def net(self): x = fluid.layers.data(name='x', shape=[1], dtype='float32') y = fluid.layers.data(name='y', shape=[1], dtype='float32') @@ -43,8 +42,9 @@ class TestCommunicator(unittest.TestCase): os.environ["PADDLE_PORT"] = "36001" os.environ["PADDLE_TRAINER_ID"] = "0" os.environ["PADDLE_TRAINERS_NUM"] = "2" - os.environ["PADDLE_PSERVERS_IP_PORT_LIST"] = \ - "127.0.0.1:36001,127.0.0.2:36001" + os.environ[ + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:36001,127.0.0.2:36001" fleet.init(role_maker.PaddleCloudRoleMaker()) avg_cost = self.net() diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_dgc_momentum_op.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_dgc_momentum_op.py index 0bc6f6daeca0a1427876b88036640720f25a5f10..763df5bf73d7a8779bf8d6673d1a59253df09dbd 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_dgc_momentum_op.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_dgc_momentum_op.py @@ -20,7 +20,6 @@ import paddle.fluid as fluid class TestDGCMomentumOp1(unittest.TestCase): - def get_tensor(self, name, value, place=None): tensor = self.scope.var(name).get_tensor() tensor.set(value, self.place if place is None else place) @@ -49,13 +48,17 @@ class TestDGCMomentumOp1(unittest.TestCase): self.param_name, self.param_tensor = self.get_tensor('Param', param) self.grad_name, self.grad_tensor = self.get_tensor('Grad', grad) self.velocity_name, self.velocity_tensor = self.get_tensor( - 'Velocity', velocity) + 'Velocity', velocity + ) self.learning_rate_name, self.learning_rate_tensor = self.get_tensor( - 'LearningRate', learning_rate) + 'LearningRate', learning_rate + ) self.current_step_name, self.current_step_tensor = self.get_tensor( - 'current_step', current_step, core.CPUPlace()) + 'current_step', current_step, core.CPUPlace() + ) self.nranks_name, self.nranks_tensor = self.get_tensor( - 'nranks', nranks, core.CPUPlace()) + 'nranks', nranks, core.CPUPlace() + ) self.kwargs = { # inputs @@ -65,12 +68,10 @@ class TestDGCMomentumOp1(unittest.TestCase): 'LearningRate': self.learning_rate_name, 'current_step': self.current_step_name, 'nranks': self.nranks_name, - # attrs 'mu': mu, 'use_nesterov': use_nesterov, 'rampup_begin_step': rampup_begin_step, - # outputs 'ParamOut': self.param_name, 'VelocityOut': self.velocity_name, @@ -79,8 +80,9 @@ class TestDGCMomentumOp1(unittest.TestCase): velocity_out = mu * velocity + grad / nranks if use_nesterov: - param_out = param - grad * learning_rate - \ - velocity_out * mu * learning_rate + param_out = ( + param - grad * learning_rate - velocity_out * mu * learning_rate + ) else: param_out = param - learning_rate * velocity_out @@ -89,7 +91,7 @@ class TestDGCMomentumOp1(unittest.TestCase): self.outputs = { 'ParamOut': param_out, 'VelocityOut': velocity_out, - 'SGDOut': sgd_out + 'SGDOut': sgd_out, } def check(self, actual_t, expect_t, place, out_name, atol=1e-5): @@ -98,8 +100,16 @@ class TestDGCMomentumOp1(unittest.TestCase): expect_t, rtol=1e-05, atol=atol, - err_msg='Output (' + out_name + ') has diff at ' + str(place) + - '\nExpect ' + str(expect_t) + '\n' + 'But Got' + str(actual_t)) + err_msg='Output (' + + out_name + + ') has diff at ' + + str(place) + + '\nExpect ' + + str(expect_t) + + '\n' + + 'But Got' + + str(actual_t), + ) def check_momentum_step(self, place): self.setup(place=place) @@ -107,11 +117,19 @@ class TestDGCMomentumOp1(unittest.TestCase): dgc_momentum_op = Operator(self.op_type, **self.kwargs) dgc_momentum_op.run(self.scope, self.place) - self.check(np.array(self.param_tensor), self.outputs['ParamOut'], - self.place, self.param_name) + self.check( + np.array(self.param_tensor), + self.outputs['ParamOut'], + self.place, + self.param_name, + ) - self.check(np.array(self.velocity_tensor), self.outputs['VelocityOut'], - self.place, self.velocity_name) + self.check( + np.array(self.velocity_tensor), + self.outputs['VelocityOut'], + self.place, + self.velocity_name, + ) def check_sgd_step(self, place): self.setup(place=place, step=15.0) @@ -119,8 +137,12 @@ class TestDGCMomentumOp1(unittest.TestCase): dgc_momentum_op = Operator(self.op_type, **self.kwargs) dgc_momentum_op.run(self.scope, self.place) - self.check(np.array(self.param_tensor), self.outputs['SGDOut'], - self.place, self.param_name) + self.check( + np.array(self.param_tensor), + self.outputs['SGDOut'], + self.place, + self.param_name, + ) def test_cuda_place(self): if not core.is_compiled_with_cuda(): diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_dgc_op.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_dgc_op.py index 78d22325cf8e6accbdb764994a2981338cb324f4..6df74578aacadc5b5c6ac27faa3addd17f5e5de0 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_dgc_op.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_dgc_op.py @@ -23,7 +23,6 @@ g_array_size = 102400 class TestDGCOp(unittest.TestCase): - def setup(self, place, array_size=g_array_size): size = array_size np.random.seed(5) # fix seed @@ -72,20 +71,23 @@ class TestDGCOp(unittest.TestCase): self.param_tensor.set(self.param, place) self.current_step_tensor = self.scope.var( - self.current_step_name).get_tensor() + self.current_step_name + ).get_tensor() self.current_step_tensor.set(self.current_step, core.CPUPlace()) self.nranks_tensor = self.scope.var(self.nranks_name).get_tensor() self.nranks_tensor.set(self.nranks, core.CPUPlace()) self.encode_grad_tensor = self.scope.var( - self.encode_grad_name).get_tensor() + self.encode_grad_name + ).get_tensor() self.k_tensor = self.scope.var(self.k_name).get_tensor() self.k_tensor.set(self.k, core.CPUPlace()) self.gather_buff_tensor = self.scope.var( - self.gather_buff_name).get_tensor() + self.gather_buff_name + ).get_tensor() def check(self, actual_t, expect_t, place, out_name, atol=1e-5): np.testing.assert_allclose( @@ -93,8 +95,16 @@ class TestDGCOp(unittest.TestCase): expect_t, rtol=1e-05, atol=atol, - err_msg='Output (' + out_name + ') has diff at ' + str(place) + - '\nExpect ' + str(expect_t) + '\n' + 'But Got' + str(actual_t)) + err_msg='Output (' + + out_name + + ') has diff at ' + + str(place) + + '\nExpect ' + + str(expect_t) + + '\n' + + 'But Got' + + str(actual_t), + ) def test_run_and_check(self): self.setup(place=core.CUDAPlace(0)) @@ -106,7 +116,6 @@ class TestDGCOp(unittest.TestCase): 'Param': self.param_name, 'current_step': self.current_step_name, 'nranks': self.nranks_name, - # outputs 'U_out': self.u_name, 'V_out': self.v_name, @@ -114,7 +123,6 @@ class TestDGCOp(unittest.TestCase): 'Grad_out': self.grad_name, 'k': self.k_name, 'GatherBuff': self.gather_buff_name, - # attrs 'm': 0.9, 'sparsity': [0.75, 0.9375, 0.984375, 0.996, 0.999], @@ -127,7 +135,7 @@ class TestDGCOp(unittest.TestCase): dgc_op = Operator('dgc', **kwargs) - #atol = 1e-6 + # atol = 1e-6 dgc_op.run(self.scope, self.place) u_out = np.array(self.u_tensor) @@ -144,7 +152,7 @@ class TestDGCOp(unittest.TestCase): self.assertEqual(k, int(g_array_size * 0.25)) index = encode_grad_out[0:k].view(dtype=np.int32) - value = encode_grad_out[k:2 * k] + value = encode_grad_out[k : 2 * k] acl = 1e-7 diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_dgc_optimizer.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_dgc_optimizer.py index 50f1b389a0a4706721b1e01f60fb637cf020e169..0da05a377b36d6a2c6381ec33d635701eb9c92ee 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_dgc_optimizer.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_dgc_optimizer.py @@ -24,20 +24,20 @@ paddle.enable_static() class TestDGCMomentumOptimizer(unittest.TestCase): - class MockDGCMomentum(optimizer.DGCMomentumOptimizer): - def get_accumulators(self): return self._accumulators def get_velocity_str(self): return self._u_velocity_acc_str - def check_dgc_momentum_optimizer(self, - dims=[5, 10, 8], - name="momentum", - regularization=None, - use_recompute=False): + def check_dgc_momentum_optimizer( + self, + dims=[5, 10, 8], + name="momentum", + regularization=None, + use_recompute=False, + ): init_program = framework.Program() program = framework.Program() block = program.global_block() @@ -47,23 +47,25 @@ class TestDGCMomentumOptimizer(unittest.TestCase): lod_level=0, name="mul.x", optimize_attr={'learning_rate': 1.1}, - regularizer=None if regularization is not None else - regularizer.L2DecayRegularizer(2e-4)) - mul_y = block.create_var(dtype="float32", - shape=[dims[1], dims[2]], - lod_level=0, - name="mul.y") - mul_out = block.create_var(dtype="float32", - shape=[dims[0], dims[2]], - lod_level=0, - name="mul.out") - block.append_op(type="mul", - inputs={ - "X": mul_x, - "Y": mul_y - }, - outputs={"Out": mul_out}, - attrs={"x_num_col_dims": 1}) + regularizer=None + if regularization is not None + else regularizer.L2DecayRegularizer(2e-4), + ) + mul_y = block.create_var( + dtype="float32", shape=[dims[1], dims[2]], lod_level=0, name="mul.y" + ) + mul_out = block.create_var( + dtype="float32", + shape=[dims[0], dims[2]], + lod_level=0, + name="mul.out", + ) + block.append_op( + type="mul", + inputs={"X": mul_x, "Y": mul_y}, + outputs={"Out": mul_out}, + attrs={"x_num_col_dims": 1}, + ) learning_rate = 0.01 dgc_momentum_optimizer = self.MockDGCMomentum( @@ -72,33 +74,40 @@ class TestDGCMomentumOptimizer(unittest.TestCase): rampup_begin_step=0, num_trainers=2, regularization=regularization, - grad_clip=clip.GradientClipByNorm(1.0)) + grad_clip=clip.GradientClipByNorm(1.0), + ) if use_recompute: dgc_momentum_optimizer = optimizer.RecomputeOptimizer( - dgc_momentum_optimizer) + dgc_momentum_optimizer + ) dgc_momentum_optimizer._set_checkpoints([]) - dgc_momentum_optimizer.get_accumulators = dgc_momentum_optimizer._optimizer.get_accumulators - dgc_momentum_optimizer.get_velocity_str = dgc_momentum_optimizer._optimizer.get_velocity_str - - mean_out = block.create_var(dtype="float32", - shape=[1], - lod_level=0, - name="mean.out") - block.append_op(type="mean", - inputs={"X": mul_out}, - outputs={"Out": mean_out}) + dgc_momentum_optimizer.get_accumulators = ( + dgc_momentum_optimizer._optimizer.get_accumulators + ) + dgc_momentum_optimizer.get_velocity_str = ( + dgc_momentum_optimizer._optimizer.get_velocity_str + ) + + mean_out = block.create_var( + dtype="float32", shape=[1], lod_level=0, name="mean.out" + ) + block.append_op( + type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out} + ) # params_grads = append_backward(mean_out) params_grads = dgc_momentum_optimizer.backward( - mean_out, startup_program=init_program) + mean_out, startup_program=init_program + ) with framework.program_guard(program, init_program): opts = dgc_momentum_optimizer.apply_gradients(params_grads) accumulator_count = 1 if name == "momentum" else 2 self.assertEqual(len(params_grads), 1) - self.assertEqual(len(dgc_momentum_optimizer.get_accumulators()), - accumulator_count) + self.assertEqual( + len(dgc_momentum_optimizer.get_accumulators()), accumulator_count + ) self.assertEqual(len(opts), 2) sgd_op = opts[-1] @@ -109,7 +118,8 @@ class TestDGCMomentumOptimizer(unittest.TestCase): accumulators = dgc_momentum_optimizer.get_accumulators() self.assertEqual(len(accumulators), accumulator_count) self.assertTrue( - dgc_momentum_optimizer.get_velocity_str() in accumulators) + dgc_momentum_optimizer.get_velocity_str() in accumulators + ) velocity_acc = accumulators[dgc_momentum_optimizer.get_velocity_str()] self.assertEqual(len(velocity_acc), 1) self.assertTrue(mul_x.name in velocity_acc) @@ -139,22 +149,26 @@ class TestDGCMomentumOptimizer(unittest.TestCase): momentum=0.2, rampup_begin_step=0, num_trainers=2, - grad_clip=clip.GradientClipByGlobalNorm(1.0)) + grad_clip=clip.GradientClipByGlobalNorm(1.0), + ) def test_momentum_without_dgc(self): self.check_dgc_momentum_optimizer( - regularization=regularizer.L1Decay(1e-4)) + regularization=regularizer.L1Decay(1e-4) + ) def test_momentum_with_dgc(self): # 16 * 1024 = 16384, use dgc momentum self.check_dgc_momentum_optimizer( dims=[16, 1024, 8], name="dgc_momentum", - regularization=regularizer.L2Decay(1e-4)) + regularization=regularizer.L2Decay(1e-4), + ) # check param.regularizer in dgc - self.check_dgc_momentum_optimizer(dims=[16, 1024, 8], - name="dgc_momentum") + self.check_dgc_momentum_optimizer( + dims=[16, 1024, 8], name="dgc_momentum" + ) def test_momentum_with_dgc_recompute(self): # 16 * 1024 = 16384, use dgc momentum @@ -162,7 +176,8 @@ class TestDGCMomentumOptimizer(unittest.TestCase): dims=[16, 1024, 8], name="dgc_momentum", regularization=regularizer.L2Decay(1e-4), - use_recompute=True) + use_recompute=True, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_dist_mnist_dgc_nccl.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_dist_mnist_dgc_nccl.py index 8a4fdfcd07bb8e8aaa09f404c460f02f7b4f2841..7a2b2425f2489e555d0a7fc43406ec3361519823 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_dist_mnist_dgc_nccl.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_dist_mnist_dgc_nccl.py @@ -27,7 +27,11 @@ def count_of_sparse_all_reduce_calls(file_name): # NOTE(Aurelius84): The log file contains some binary contents that causes error # while `grep`. So we add `-a` to fix it. # -a, --text equivalent to --binary-files=text, make binaries equivalent to text. - cmd = 'grep -a sparse_all_reduce_op_handle ' + file_name + ' | grep in_numel | wc -l' + cmd = ( + 'grep -a sparse_all_reduce_op_handle ' + + file_name + + ' | grep in_numel | wc -l' + ) child = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) result = child.communicate()[0] print('test_info: result = ' + str(result)) @@ -37,7 +41,6 @@ def count_of_sparse_all_reduce_calls(file_name): class TestDistMnistNCCL2DGC(TestDistBase): - def _setup_config(self): self._sync_mode = True self._use_reduce = False @@ -47,17 +50,22 @@ class TestDistMnistNCCL2DGC(TestDistBase): def test_dist_train(self): import paddle.fluid as fluid + if fluid.core.is_compiled_with_cuda(): - self.check_with_place(os.path.abspath("../../dist_mnist.py"), - delta=1e-5, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + os.path.abspath("../../dist_mnist.py"), + delta=1e-5, + check_error_log=True, + log_name=flag_name, + ) def tearDown(self): import paddle.fluid as fluid + if fluid.core.is_compiled_with_cuda(): - log_file = os.path.join(self.temp_dir.name, - 'test_dist_mnist_dgc_nccl_tr0_err.log') + log_file = os.path.join( + self.temp_dir.name, 'test_dist_mnist_dgc_nccl_tr0_err.log' + ) result = count_of_sparse_all_reduce_calls(log_file) # only 1 layer use dgc now, run_step=5, rampup_begin_step=2, so 1 * (5 - 2) = 3 @@ -68,7 +76,6 @@ class TestDistMnistNCCL2DGC(TestDistBase): class TestDistMnistNCCL2DGCMultiCards(TestDistBase): - def _setup_config(self): self._sync_mode = True self._use_reduce = False @@ -78,19 +85,23 @@ class TestDistMnistNCCL2DGCMultiCards(TestDistBase): def test_dist_train(self): import paddle.fluid as fluid + if fluid.core.is_compiled_with_cuda(): self.check_with_place_multi_cards( os.path.abspath("../../dist_mnist.py"), delta=1e-5, check_error_log=True, - log_name=flag_name) + log_name=flag_name, + ) def tearDown(self): import paddle.fluid as fluid + if fluid.core.is_compiled_with_cuda(): log_file = os.path.join( self.temp_dir.name, - 'test_dist_mnist_dgc_nccl_dgc_2cards_local.log') + 'test_dist_mnist_dgc_nccl_dgc_2cards_local.log', + ) result = count_of_sparse_all_reduce_calls(log_file) # same as above, but use two cards self.assertEqual(result, 6) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_dist_mnist_gradient_merge.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_dist_mnist_gradient_merge.py index 54c9d6b26c370c8f7b836fbbbc85629598754344..c0b978560e18c1fdb636bcdcc6235025381528eb 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_dist_mnist_gradient_merge.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_dist_mnist_gradient_merge.py @@ -21,7 +21,6 @@ flag_name = os.path.splitext(__file__)[0] class TestDistMnistGradMerge(TestDistBase): - def _setup_config(self): self._sync_mode = True self._use_reduce = False @@ -29,14 +28,15 @@ class TestDistMnistGradMerge(TestDistBase): def test_dist_train(self): if fluid.core.is_compiled_with_cuda(): - self.check_with_place("dist_mnist_gradient_merge.py", - delta=1e-5, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "dist_mnist_gradient_merge.py", + delta=1e-5, + check_error_log=True, + log_name=flag_name, + ) class TestDistMnistGradMergeNoFuse(TestDistBase): - def _setup_config(self): self._sync_mode = True self._use_reduce = False @@ -45,14 +45,15 @@ class TestDistMnistGradMergeNoFuse(TestDistBase): def test_dist_train(self): if fluid.core.is_compiled_with_cuda(): - self.check_with_place("dist_mnist_gradient_merge.py", - delta=1e-5, - check_error_log=True, - log_name=flag_name + "_no_fuse") + self.check_with_place( + "dist_mnist_gradient_merge.py", + delta=1e-5, + check_error_log=True, + log_name=flag_name + "_no_fuse", + ) class TestDistMnistGradMergeRawOptimizerBase(TestDistBase): - def _setup_config(self): self._use_reader_alloc = False self._nccl2_mode = True @@ -66,19 +67,21 @@ class TestDistMnistGradMergeRawOptimizerBase(TestDistBase): if fluid.core.is_compiled_with_cuda(): avg = str(self.enable_avg()) log_name = flag_name + "_raw_optimizer_gm_avg_" + avg - self.check_with_place("dist_mnist_gradient_merge_raw_optimizer.py", - delta=1e-5, - check_error_log=True, - log_name=log_name, - need_envs={ - 'FLAGS_apply_pass_to_program': '1', - 'enable_gm_avg': avg, - }) + self.check_with_place( + "dist_mnist_gradient_merge_raw_optimizer.py", + delta=1e-5, + check_error_log=True, + log_name=log_name, + need_envs={ + 'FLAGS_apply_pass_to_program': '1', + 'enable_gm_avg': avg, + }, + ) class TestDistMnistGradMergeRawOptimizerAvg( - TestDistMnistGradMergeRawOptimizerBase): - + TestDistMnistGradMergeRawOptimizerBase +): def enable_avg(self): return True diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_dist_se_resnext_dgc.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_dist_se_resnext_dgc.py index 28936e36845142ac453c76b0f811f2c688c02104..7020e63248e8680ac699547e988c6b49c1c3ef70 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_dist_se_resnext_dgc.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_dist_se_resnext_dgc.py @@ -20,7 +20,6 @@ flag_name = os.path.splitext(__file__)[0] class TestDistSeResnetNCCL2DGC(TestDistBase): - def _setup_config(self): self._sync_mode = True self._use_reduce = False @@ -31,11 +30,14 @@ class TestDistSeResnetNCCL2DGC(TestDistBase): @unittest.skip(reason="Skip unstable ci") def test_dist_train(self): import paddle.fluid as fluid + if fluid.core.is_compiled_with_cuda(): - self.check_with_place(os.path.abspath("../../dist_se_resnext.py"), - delta=30, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + os.path.abspath("../../dist_se_resnext.py"), + delta=30, + check_error_log=True, + log_name=flag_name, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_distributed_strategy.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_distributed_strategy.py index 92a6715a6424e3c6a46d812bcc36eb0ee50f906b..140a910c336ee4927f2ed2a590b30efa3833a3cd 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_distributed_strategy.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_distributed_strategy.py @@ -15,15 +15,21 @@ import unittest import paddle import paddle.fluid as fluid -from paddle.fluid.transpiler.distribute_transpiler import DistributeTranspilerConfig, ServerRuntimeConfig -from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import StrategyFactory -from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet +from paddle.fluid.transpiler.distribute_transpiler import ( + DistributeTranspilerConfig, + ServerRuntimeConfig, +) +from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import ( + StrategyFactory, +) +from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import ( + fleet, +) import paddle.fluid.incubate.fleet.base.role_maker as role_maker import os class TestStrategyFactor(unittest.TestCase): - def test_sync_strategy(self): os.environ['CPU_NUM'] = "2" strategy = StrategyFactory.create_sync_strategy() @@ -48,19 +54,23 @@ class TestStrategyFactor(unittest.TestCase): # test set_program_config exception program_config_dict['unknown'] = None - self.assertRaises(Exception, strategy.set_program_config, - program_config_dict) + self.assertRaises( + Exception, strategy.set_program_config, program_config_dict + ) program_config_illegal = None - self.assertRaises(Exception, strategy.set_program_config, - program_config_illegal) + self.assertRaises( + Exception, strategy.set_program_config, program_config_illegal + ) trainer_runtime_config = strategy.get_trainer_runtime_config() trainer_runtime_config.runtime_configs[ - 'communicator_send_queue_size'] = '50' + 'communicator_send_queue_size' + ] = '50' runtime_configs = trainer_runtime_config.get_communicator_flags() self.assertIn('communicator_send_queue_size', runtime_configs) - self.assertNotIn('communicator_independent_recv_thread', - runtime_configs) + self.assertNotIn( + 'communicator_independent_recv_thread', runtime_configs + ) self.assertEqual(runtime_configs['communicator_send_queue_size'], '2') def test_geo_strategy(self): @@ -87,19 +97,22 @@ class TestStrategyFactor(unittest.TestCase): # test set_build_strategy exception build_strategy_dict['unknown'] = None - self.assertRaises(Exception, strategy.set_build_strategy, - build_strategy_dict) + self.assertRaises( + Exception, strategy.set_build_strategy, build_strategy_dict + ) build_strategy_illegal = None - self.assertRaises(Exception, strategy.set_build_strategy, - build_strategy_illegal) + self.assertRaises( + Exception, strategy.set_build_strategy, build_strategy_illegal + ) os.environ["CPU_NUM"] = '100' trainer_runtime_config = strategy.get_trainer_runtime_config() runtime_configs = trainer_runtime_config.get_communicator_flags() self.assertIn('communicator_thread_pool_size', runtime_configs) self.assertIn('communicator_send_wait_times', runtime_configs) - self.assertNotIn('communicator_independent_recv_thread', - runtime_configs) + self.assertNotIn( + 'communicator_independent_recv_thread', runtime_configs + ) def test_async_strategy(self): os.environ["CPU_NUM"] = '100' @@ -111,28 +124,40 @@ class TestStrategyFactor(unittest.TestCase): trainer_runtime_config = strategy.get_trainer_runtime_config() self.assertEqual( - trainer_runtime_config. - runtime_configs['communicator_send_queue_size'], '100') + trainer_runtime_config.runtime_configs[ + 'communicator_send_queue_size' + ], + '100', + ) # test set_trainer_runtime_config using dict trainer_runtime_config_dict = dict() trainer_runtime_config_dict['communicator_send_queue_size'] = '20' strategy.set_trainer_runtime_config(trainer_runtime_config_dict) trainer_runtime_config = strategy.get_trainer_runtime_config() - trainer_communicator_flags = trainer_runtime_config.get_communicator_flags( + trainer_communicator_flags = ( + trainer_runtime_config.get_communicator_flags() + ) + self.assertIn( + 'communicator_send_queue_size', trainer_communicator_flags ) - self.assertIn('communicator_send_queue_size', - trainer_communicator_flags) self.assertEqual( - trainer_communicator_flags['communicator_send_queue_size'], '20') + trainer_communicator_flags['communicator_send_queue_size'], '20' + ) # test set_trainer_runtime_config exception trainer_runtime_config_dict['unknown'] = None - self.assertRaises(Exception, strategy.set_trainer_runtime_config, - trainer_runtime_config_dict) + self.assertRaises( + Exception, + strategy.set_trainer_runtime_config, + trainer_runtime_config_dict, + ) trainer_runtime_config_illegal = None - self.assertRaises(Exception, strategy.set_trainer_runtime_config, - trainer_runtime_config_illegal) + self.assertRaises( + Exception, + strategy.set_trainer_runtime_config, + trainer_runtime_config_illegal, + ) # test set_execute_strategy using fluid.ExecutionStrategy exec_strategy_class = fluid.ExecutionStrategy() @@ -150,11 +175,13 @@ class TestStrategyFactor(unittest.TestCase): # test set_execute_strategy exception exec_strategy_dict['unknown'] = None - self.assertRaises(Exception, strategy.set_execute_strategy, - exec_strategy_dict) + self.assertRaises( + Exception, strategy.set_execute_strategy, exec_strategy_dict + ) exec_strategy_illegal = None - self.assertRaises(Exception, strategy.set_execute_strategy, - exec_strategy_illegal) + self.assertRaises( + Exception, strategy.set_execute_strategy, exec_strategy_illegal + ) def test_half_async_strategy(self): strategy = StrategyFactory.create_half_async_strategy() @@ -178,31 +205,39 @@ class TestStrategyFactor(unittest.TestCase): # test set_server_runtime_config exception server_runtime_config_dict['unknown'] = None - self.assertRaises(Exception, strategy.set_server_runtime_config, - server_runtime_config_dict) + self.assertRaises( + Exception, + strategy.set_server_runtime_config, + server_runtime_config_dict, + ) server_runtime_config_illegal = None - self.assertRaises(Exception, strategy.set_server_runtime_config, - server_runtime_config_illegal) + self.assertRaises( + Exception, + strategy.set_server_runtime_config, + server_runtime_config_illegal, + ) os.environ["CPU_NUM"] = '100' trainer_runtime_config = strategy.get_trainer_runtime_config() trainer_runtime_config.runtime_configs[ - 'communicator_send_queue_size'] = '50' + 'communicator_send_queue_size' + ] = '50' runtime_configs = trainer_runtime_config.get_communicator_flags() self.assertIn('communicator_send_queue_size', runtime_configs) - self.assertNotIn('communicator_independent_recv_thread', - runtime_configs) + self.assertNotIn( + 'communicator_independent_recv_thread', runtime_configs + ) self.assertEqual(runtime_configs['communicator_send_queue_size'], '100') class TestCreateDefaultStrategy(unittest.TestCase): - def test_default_strategy(self): role = role_maker.UserDefinedRoleMaker( current_id=0, role=role_maker.Role.WORKER, worker_num=2, - server_endpoints=["127.0.0.1:6001", "127.0.0.1:6002"]) + server_endpoints=["127.0.0.1:6001", "127.0.0.1:6002"], + ) fleet.init(role) def type_error_optimizer(): @@ -213,13 +248,13 @@ class TestCreateDefaultStrategy(unittest.TestCase): class TestHalfAsyncStrategy(unittest.TestCase): - def test_half_async_strategy(self): role = role_maker.UserDefinedRoleMaker( current_id=0, role=role_maker.Role.WORKER, worker_num=2, - server_endpoints=["127.0.0.1:6001", "127.0.0.1:6002"]) + server_endpoints=["127.0.0.1:6001", "127.0.0.1:6002"], + ) fleet.init(role) half_async_config = DistributeTranspilerConfig() @@ -233,7 +268,6 @@ class TestHalfAsyncStrategy(unittest.TestCase): class TestDebugInfo(unittest.TestCase): - def test_debug_info(self): x = fluid.layers.data(name='x', shape=[1], dtype='float32') y = fluid.layers.data(name='y', shape=[1], dtype='float32') @@ -245,16 +279,19 @@ class TestDebugInfo(unittest.TestCase): current_id=0, role=role_maker.Role.WORKER, worker_num=2, - server_endpoints=["127.0.0.1:6001", "127.0.0.1:6002"]) + server_endpoints=["127.0.0.1:6001", "127.0.0.1:6002"], + ) fleet.init(role) optimizer = fluid.optimizer.SGD(0.0001) strategy = StrategyFactory.create_sync_strategy() - strategy.set_debug_opt({ - "dump_param": ["fc_0.tmp_0"], - "dump_fields": ["fc_0.tmp_0", "fc_0.tmp_0@GRAD"], - "dump_fields_path": "dump_text/" - }) + strategy.set_debug_opt( + { + "dump_param": ["fc_0.tmp_0"], + "dump_fields": ["fc_0.tmp_0", "fc_0.tmp_0@GRAD"], + "dump_fields_path": "dump_text/", + } + ) optimizer = fleet.distributed_optimizer(optimizer, strategy) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_dygraph_recompute.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_dygraph_recompute.py index afd619a390b7bbd80ee13d0e7ac9b89c2042d13c..9cadbf4d7b88ba286409f29f75887b8cd724e8c4 100755 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_dygraph_recompute.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_dygraph_recompute.py @@ -24,35 +24,41 @@ from paddle.incubate.distributed.fleet import recompute_sequential def get_fc_block(block_idx, input_size, is_last=False): block_name = "block_" + str(block_idx) block = paddle.nn.Sequential( - (block_name + "_fc_0", - paddle.nn.Linear(input_size, input_size, bias_attr=False)), + ( + block_name + "_fc_0", + paddle.nn.Linear(input_size, input_size, bias_attr=False), + ), (block_name + "_dropout", paddle.nn.Dropout(p=0.5)), (block_name + "_relu_1", paddle.nn.ReLU()), - (block_name + "_fc_1", - paddle.nn.Linear(input_size, input_size, bias_attr=False)), + ( + block_name + "_fc_1", + paddle.nn.Linear(input_size, input_size, bias_attr=False), + ), (block_name + "_relu_2", paddle.nn.ReLU()), ) if is_last: - block.add_sublayer(block_name + "_fc_2", - paddle.nn.Linear(input_size, 1, - bias_attr=False)) # add sublayer + block.add_sublayer( + block_name + "_fc_2", + paddle.nn.Linear(input_size, 1, bias_attr=False), + ) # add sublayer else: - block.add_sublayer(block_name + "_fc_2", - paddle.nn.Linear(input_size, - input_size, - bias_attr=False)) # add sublayer + block.add_sublayer( + block_name + "_fc_2", + paddle.nn.Linear(input_size, input_size, bias_attr=False), + ) # add sublayer return block class Naive_fc_net(paddle.nn.Layer): - - def __init__(self, - input_size=10, - recompute_blocks=[1, 3], - use_fleet_sq=False, - segments=1, - use_raw_recompute=False, - recompute_kwargs={}): + def __init__( + self, + input_size=10, + recompute_blocks=[1, 3], + use_fleet_sq=False, + segments=1, + use_raw_recompute=False, + recompute_kwargs={}, + ): super(Naive_fc_net, self).__init__() self.recompute_blocks = recompute_blocks self.recompute_kwargs = recompute_kwargs @@ -67,28 +73,37 @@ class Naive_fc_net(paddle.nn.Layer): self.runfunc4 = get_fc_block(4, input_size, is_last=True) if self.use_fleet_sq and not use_raw_recompute: - self.runfuncs = paddle.nn.Sequential(self.runfunc0, self.runfunc1, - self.runfunc2, self.runfunc3, - self.runfunc4) + self.runfuncs = paddle.nn.Sequential( + self.runfunc0, + self.runfunc1, + self.runfunc2, + self.runfunc3, + self.runfunc4, + ) self.layers = [ - self.runfunc0, self.runfunc1, self.runfunc2, self.runfunc3, - self.runfunc4 + self.runfunc0, + self.runfunc1, + self.runfunc2, + self.runfunc3, + self.runfunc4, ] # default segments = 2 if use_raw_recompute: self.layers = [ paddle.nn.Sequential(self.runfunc0, self.runfunc1), - paddle.nn.Sequential(self.runfunc2, self.runfunc3, - self.runfunc4) + paddle.nn.Sequential( + self.runfunc2, self.runfunc3, self.runfunc4 + ), ] def forward(self, inputs): if self.use_fleet_sq and not self.use_raw_recompute: - return recompute_sequential({"segments": self.segments}, - self.runfuncs, inputs) + return recompute_sequential( + {"segments": self.segments}, self.runfuncs, inputs + ) if self.use_raw_recompute: inputs = recompute(self.layers[0], inputs) @@ -96,36 +111,42 @@ class Naive_fc_net(paddle.nn.Layer): for i in range(len(self.layers)): if i in self.recompute_blocks: - inputs = recompute(self.layers[i], inputs, - **self.recompute_kwargs) + inputs = recompute( + self.layers[i], inputs, **self.recompute_kwargs + ) else: inputs = self.layers[i](inputs) return inputs -def run_model(recompute_block=[], - recompute_kwargs={}, - use_fleet_sq=False, - use_raw_recompute=False, - segments=1, - enable_autocast=False, - pure_fp16=False): +def run_model( + recompute_block=[], + recompute_kwargs={}, + use_fleet_sq=False, + use_raw_recompute=False, + segments=1, + enable_autocast=False, + pure_fp16=False, +): gen = paddle.seed(10) gen.manual_seed(10) np.random.seed(10) random.seed(10) batch_size, input_size = 1, 10 - model = Naive_fc_net(input_size, - recompute_blocks=recompute_block, - use_fleet_sq=use_fleet_sq, - use_raw_recompute=use_raw_recompute, - segments=segments, - recompute_kwargs=recompute_kwargs) + model = Naive_fc_net( + input_size, + recompute_blocks=recompute_block, + use_fleet_sq=use_fleet_sq, + use_raw_recompute=use_raw_recompute, + segments=segments, + recompute_kwargs=recompute_kwargs, + ) loss_fn = paddle.nn.MSELoss(reduction='mean') - optimizer = paddle.optimizer.SGD(learning_rate=0.01, - parameters=model.parameters()) + optimizer = paddle.optimizer.SGD( + learning_rate=0.01, parameters=model.parameters() + ) if enable_autocast: scaler = paddle.amp.GradScaler() @@ -158,9 +179,7 @@ def run_model(recompute_block=[], class TestPyLayer(unittest.TestCase): - def test_base_case(self, enable_autocast=False, pure_fp16=False): - def check_identical(loss_ref, param_ref, grad_ref, loss, param, grad): self.assertEqual(loss_ref, loss) self.assertEqual(param_ref, param) @@ -170,43 +189,56 @@ class TestPyLayer(unittest.TestCase): loss_ref, param_ref, grad_ref = run_model( recompute_block=[], enable_autocast=enable_autocast, - pure_fp16=pure_fp16) + pure_fp16=pure_fp16, + ) # recompute second block - loss, param, grad = run_model(recompute_block=[1], - enable_autocast=enable_autocast, - pure_fp16=pure_fp16) + loss, param, grad = run_model( + recompute_block=[1], + enable_autocast=enable_autocast, + pure_fp16=pure_fp16, + ) check_identical(loss_ref, param_ref, grad_ref, loss, param, grad) # recompute fourth block - loss, param, grad = run_model(recompute_block=[3], - enable_autocast=enable_autocast, - pure_fp16=pure_fp16) + loss, param, grad = run_model( + recompute_block=[3], + enable_autocast=enable_autocast, + pure_fp16=pure_fp16, + ) check_identical(loss_ref, param_ref, grad_ref, loss, param, grad) # recompute second to fourth block - loss, param, grad = run_model(recompute_block=[1, 2, 3], - enable_autocast=enable_autocast, - pure_fp16=pure_fp16) + loss, param, grad = run_model( + recompute_block=[1, 2, 3], + enable_autocast=enable_autocast, + pure_fp16=pure_fp16, + ) check_identical(loss_ref, param_ref, grad_ref, loss, param, grad) # recompute second & fourth block - loss, param, grad = run_model(recompute_block=[1, 3], - enable_autocast=enable_autocast, - pure_fp16=pure_fp16) + loss, param, grad = run_model( + recompute_block=[1, 3], + enable_autocast=enable_autocast, + pure_fp16=pure_fp16, + ) check_identical(loss_ref, param_ref, grad_ref, loss, param, grad) # recompute second & fourth block using fleet - loss, param, grad = run_model(recompute_block=[1, 3], - enable_autocast=enable_autocast, - pure_fp16=pure_fp16) + loss, param, grad = run_model( + recompute_block=[1, 3], + enable_autocast=enable_autocast, + pure_fp16=pure_fp16, + ) check_identical(loss_ref, param_ref, grad_ref, loss, param, grad) # recompute using recompute_sequential, segments=1 - loss, param, grad = run_model(recompute_block=[], - use_fleet_sq=True, - enable_autocast=enable_autocast, - pure_fp16=pure_fp16) + loss, param, grad = run_model( + recompute_block=[], + use_fleet_sq=True, + enable_autocast=enable_autocast, + pure_fp16=pure_fp16, + ) check_identical(loss_ref, param_ref, grad_ref, loss, param, grad) # with base recompute, and segments=2 @@ -214,14 +246,17 @@ class TestPyLayer(unittest.TestCase): recompute_block=[], enable_autocast=enable_autocast, use_raw_recompute=True, - pure_fp16=pure_fp16) + pure_fp16=pure_fp16, + ) # recompute using recompute_sequential, segments=2 - loss, param, grad = run_model(recompute_block=[], - use_fleet_sq=True, - segments=2, - enable_autocast=enable_autocast, - pure_fp16=pure_fp16) + loss, param, grad = run_model( + recompute_block=[], + use_fleet_sq=True, + segments=2, + enable_autocast=enable_autocast, + pure_fp16=pure_fp16, + ) check_identical(loss_ref, param_ref, grad_ref, loss, param, grad) def test_fc_net_with_dropout(self): @@ -237,8 +272,9 @@ class TestPyLayer(unittest.TestCase): paddle.set_device("gpu") kwargs = {"is_test": False} with self.assertRaises(TypeError): - loss_ref, param_ref, grad_ref = run_model(recompute_block=[2], - recompute_kwargs=kwargs) + loss_ref, param_ref, grad_ref = run_model( + recompute_block=[2], recompute_kwargs=kwargs + ) def test_recompute_cpu_rng(self): paddle.set_device("cpu") diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_dygraph_recompute_for_eager.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_dygraph_recompute_for_eager.py index d69c52144f5f932a268817b5ed58a722a3cd4a92..9683f681451b6834bfae5653123f73b6e903c297 100755 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_dygraph_recompute_for_eager.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_dygraph_recompute_for_eager.py @@ -27,35 +27,41 @@ import random def get_fc_block(block_idx, input_size, is_last=False): block_name = "block_" + str(block_idx) block = paddle.nn.Sequential( - (block_name + "_fc_0", - paddle.nn.Linear(input_size, input_size, bias_attr=False)), + ( + block_name + "_fc_0", + paddle.nn.Linear(input_size, input_size, bias_attr=False), + ), (block_name + "_dropout", paddle.nn.Dropout(p=0.5)), (block_name + "_relu_1", paddle.nn.ReLU()), - (block_name + "_fc_1", - paddle.nn.Linear(input_size, input_size, bias_attr=False)), + ( + block_name + "_fc_1", + paddle.nn.Linear(input_size, input_size, bias_attr=False), + ), (block_name + "_relu_2", paddle.nn.ReLU()), ) if is_last: - block.add_sublayer(block_name + "_fc_2", - paddle.nn.Linear(input_size, 1, - bias_attr=False)) # add sublayer + block.add_sublayer( + block_name + "_fc_2", + paddle.nn.Linear(input_size, 1, bias_attr=False), + ) # add sublayer else: - block.add_sublayer(block_name + "_fc_2", - paddle.nn.Linear(input_size, - input_size, - bias_attr=False)) # add sublayer + block.add_sublayer( + block_name + "_fc_2", + paddle.nn.Linear(input_size, input_size, bias_attr=False), + ) # add sublayer return block class Naive_fc_net(paddle.nn.Layer): - - def __init__(self, - input_size=10, - recompute_blocks=[1, 3], - use_fleet_sq=False, - segments=1, - use_raw_recompute=False, - recompute_kwargs={}): + def __init__( + self, + input_size=10, + recompute_blocks=[1, 3], + use_fleet_sq=False, + segments=1, + use_raw_recompute=False, + recompute_kwargs={}, + ): super(Naive_fc_net, self).__init__() self.recompute_blocks = recompute_blocks self.recompute_kwargs = recompute_kwargs @@ -70,28 +76,37 @@ class Naive_fc_net(paddle.nn.Layer): self.runfunc4 = get_fc_block(4, input_size, is_last=True) if self.use_fleet_sq and not use_raw_recompute: - self.runfuncs = paddle.nn.Sequential(self.runfunc0, self.runfunc1, - self.runfunc2, self.runfunc3, - self.runfunc4) + self.runfuncs = paddle.nn.Sequential( + self.runfunc0, + self.runfunc1, + self.runfunc2, + self.runfunc3, + self.runfunc4, + ) self.layers = [ - self.runfunc0, self.runfunc1, self.runfunc2, self.runfunc3, - self.runfunc4 + self.runfunc0, + self.runfunc1, + self.runfunc2, + self.runfunc3, + self.runfunc4, ] # default segments = 2 if use_raw_recompute: self.layers = [ paddle.nn.Sequential(self.runfunc0, self.runfunc1), - paddle.nn.Sequential(self.runfunc2, self.runfunc3, - self.runfunc4) + paddle.nn.Sequential( + self.runfunc2, self.runfunc3, self.runfunc4 + ), ] def forward(self, inputs): if self.use_fleet_sq and not self.use_raw_recompute: return paddle.incubate.distributed.fleet.recompute_sequential( - {"segments": self.segments}, self.runfuncs, inputs) + {"segments": self.segments}, self.runfuncs, inputs + ) if self.use_raw_recompute: inputs = recompute(self.layers[0], inputs) @@ -99,36 +114,42 @@ class Naive_fc_net(paddle.nn.Layer): for i in range(len(self.layers)): if i in self.recompute_blocks: - inputs = recompute(self.layers[i], inputs, - **self.recompute_kwargs) + inputs = recompute( + self.layers[i], inputs, **self.recompute_kwargs + ) else: inputs = self.layers[i](inputs) return inputs -def run_model(recompute_block=[], - recompute_kwargs={}, - use_fleet_sq=False, - use_raw_recompute=False, - segments=1, - enable_autocast=False, - pure_fp16=False): +def run_model( + recompute_block=[], + recompute_kwargs={}, + use_fleet_sq=False, + use_raw_recompute=False, + segments=1, + enable_autocast=False, + pure_fp16=False, +): gen = paddle.seed(10) gen.manual_seed(10) np.random.seed(10) random.seed(10) batch_size, input_size = 1, 10 - model = Naive_fc_net(input_size, - recompute_blocks=recompute_block, - use_fleet_sq=use_fleet_sq, - use_raw_recompute=use_raw_recompute, - segments=segments, - recompute_kwargs=recompute_kwargs) + model = Naive_fc_net( + input_size, + recompute_blocks=recompute_block, + use_fleet_sq=use_fleet_sq, + use_raw_recompute=use_raw_recompute, + segments=segments, + recompute_kwargs=recompute_kwargs, + ) loss_fn = paddle.nn.MSELoss(reduction='mean') - optimizer = paddle.optimizer.SGD(learning_rate=0.01, - parameters=model.parameters()) + optimizer = paddle.optimizer.SGD( + learning_rate=0.01, parameters=model.parameters() + ) if enable_autocast: scaler = paddle.amp.GradScaler() @@ -161,9 +182,7 @@ def run_model(recompute_block=[], class TestPyLayer(unittest.TestCase): - def test_base_case(self, enable_autocast=False, pure_fp16=False): - def check_identical(loss_ref, param_ref, grad_ref, loss, param, grad): self.assertEqual(loss_ref, loss) self.assertEqual(param_ref, param) @@ -173,37 +192,48 @@ class TestPyLayer(unittest.TestCase): loss_ref, param_ref, grad_ref = run_model( recompute_block=[], enable_autocast=enable_autocast, - pure_fp16=pure_fp16) + pure_fp16=pure_fp16, + ) # recompute second block - loss, param, grad = run_model(recompute_block=[1], - enable_autocast=enable_autocast, - pure_fp16=pure_fp16) + loss, param, grad = run_model( + recompute_block=[1], + enable_autocast=enable_autocast, + pure_fp16=pure_fp16, + ) check_identical(loss_ref, param_ref, grad_ref, loss, param, grad) # recompute fourth block - loss, param, grad = run_model(recompute_block=[3], - enable_autocast=enable_autocast, - pure_fp16=pure_fp16) + loss, param, grad = run_model( + recompute_block=[3], + enable_autocast=enable_autocast, + pure_fp16=pure_fp16, + ) check_identical(loss_ref, param_ref, grad_ref, loss, param, grad) # recompute second to fourth block - loss, param, grad = run_model(recompute_block=[1, 2, 3], - enable_autocast=enable_autocast, - pure_fp16=pure_fp16) + loss, param, grad = run_model( + recompute_block=[1, 2, 3], + enable_autocast=enable_autocast, + pure_fp16=pure_fp16, + ) check_identical(loss_ref, param_ref, grad_ref, loss, param, grad) # recompute second & fourth block - loss, param, grad = run_model(recompute_block=[1, 3], - enable_autocast=enable_autocast, - pure_fp16=pure_fp16) + loss, param, grad = run_model( + recompute_block=[1, 3], + enable_autocast=enable_autocast, + pure_fp16=pure_fp16, + ) check_identical(loss_ref, param_ref, grad_ref, loss, param, grad) # recompute_sequential with segments=1 using fleet - loss, param, grad = run_model(recompute_block=[], - use_fleet_sq=True, - enable_autocast=enable_autocast, - pure_fp16=pure_fp16) + loss, param, grad = run_model( + recompute_block=[], + use_fleet_sq=True, + enable_autocast=enable_autocast, + pure_fp16=pure_fp16, + ) check_identical(loss_ref, param_ref, grad_ref, loss, param, grad) # with base recompute, and segments=2 @@ -211,14 +241,17 @@ class TestPyLayer(unittest.TestCase): recompute_block=[], enable_autocast=enable_autocast, use_raw_recompute=True, - pure_fp16=pure_fp16) + pure_fp16=pure_fp16, + ) # recompute using paddle.incubate.distributed.fleet.recompute_sequential, segments=2 - loss, param, grad = run_model(recompute_block=[], - use_fleet_sq=True, - segments=2, - enable_autocast=enable_autocast, - pure_fp16=pure_fp16) + loss, param, grad = run_model( + recompute_block=[], + use_fleet_sq=True, + segments=2, + enable_autocast=enable_autocast, + pure_fp16=pure_fp16, + ) check_identical(loss_ref, param_ref, grad_ref, loss, param, grad) def test_fc_net_with_dropout(self): @@ -228,7 +261,8 @@ class TestPyLayer(unittest.TestCase): loss_ref, param_ref, grad_ref = run_model( recompute_block=[2], recompute_kwargs={"preserve_rng_state": False}, - enable_autocast=True) + enable_autocast=True, + ) def test_fc_net_with_amp(self): self.test_base_case(enable_autocast=True) @@ -240,8 +274,9 @@ class TestPyLayer(unittest.TestCase): paddle.set_device("gpu") kwargs = {"is_test": False} with self.assertRaises(TypeError): - loss_ref, param_ref, grad_ref = run_model(recompute_block=[2], - recompute_kwargs=kwargs) + loss_ref, param_ref, grad_ref = run_model( + recompute_block=[2], recompute_kwargs=kwargs + ) def test_recompute_cpu_rng(self): paddle.set_device("cpu") diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_dygraph_sharding_optimizer_stage2.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_dygraph_sharding_optimizer_stage2.py index f4d3f9728ef6d79edf2bce6e2001e7bcce9d2ce4..8087fc98b6a2b4338cdb16e1042a2694270c09e6 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_dygraph_sharding_optimizer_stage2.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_dygraph_sharding_optimizer_stage2.py @@ -21,8 +21,9 @@ class TestDygraphShardingOptimizerStage2(TestMultipleGpus): # check sharding logic as well as the accuracy with single mode def test_dygraph_sharding_optimizer_stage2(self): - self.run_mnist_2gpu('dygraph_sharding_optimizer_stage2.py', - eager_mode=False) + self.run_mnist_2gpu( + 'dygraph_sharding_optimizer_stage2.py', eager_mode=False + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_dygraph_sharding_stage2.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_dygraph_sharding_stage2.py index 07a9b1296e43c4a677069bac8515cf4ba4abf490..0dd1d4961b3799b5c3a475f72936d8d86e142461 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_dygraph_sharding_stage2.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_dygraph_sharding_stage2.py @@ -27,8 +27,9 @@ class TestDygraphShardingStage2(TestMultipleGpus): def test_dygraph_sharding_stage2_offload(self): self.run_mnist_2gpu('dygraph_group_sharded_stage2_offload.py') - self.run_mnist_2gpu('dygraph_sharding_stage2_offload.py', - eager_mode=False) + self.run_mnist_2gpu( + 'dygraph_sharding_stage2_offload.py', eager_mode=False + ) def test_dygraph_sharding_stage2_with_comm_overlap(self): self.run_mnist_2gpu('dygraph_group_sharded_stage2_comm_overlap.py') diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_dygraph_sharding_stage3.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_dygraph_sharding_stage3.py index 82e7b8e12bfc9fc669df8e68fc6935eb79aa2946..ea4edbd34934fb3aeae52cb57340fb9579b0fe4b 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_dygraph_sharding_stage3.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_dygraph_sharding_stage3.py @@ -28,8 +28,9 @@ class TestDygraphShardingStage3(TestMultipleGpus): self.run_mnist_2gpu('dygraph_sharding_stage3.py', eager_mode=False) def test_dygraph_sharding_stage3_offload(self): - self.run_mnist_2gpu('dygraph_sharding_stage3_offload.py', - eager_mode=False) + self.run_mnist_2gpu( + 'dygraph_sharding_stage3_offload.py', eager_mode=False + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_amp_init.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_amp_init.py index a7df64c1d92111fa7c51658badf8a83f410263dc..83e09ba98e02a02ca437e805b93220fc451c36d7 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_amp_init.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_amp_init.py @@ -26,23 +26,22 @@ paddle.enable_static() def gen_data(): return { "x": np.random.random(size=(128, 32)).astype('float32'), - "y": np.random.randint(2, size=(128, 1)).astype('int64') + "y": np.random.randint(2, size=(128, 1)).astype('int64'), } def mlp(input_x, input_y, hid_dim=128, label_dim=2): fc_1 = paddle.static.nn.fc(x=input_x, size=hid_dim, activation='tanh') fc_2 = paddle.static.nn.fc(x=fc_1, size=hid_dim, activation='tanh') - prediction = paddle.static.nn.fc(x=[fc_2], - size=label_dim, - activation='softmax') + prediction = paddle.static.nn.fc( + x=[fc_2], size=label_dim, activation='softmax' + ) cost = F.cross_entropy(input=prediction, label=input_y) avg_cost = paddle.mean(x=cost) return avg_cost class TestFleetAMPInit(unittest.TestCase): - def test_fleet_amp_init(self): if not fluid.core.is_compiled_with_cuda(): return @@ -54,19 +53,20 @@ class TestFleetAMPInit(unittest.TestCase): fleet.init(role) with paddle.static.program_guard(main_program, startup_program): - input_x = paddle.static.data(name="x", - shape=[None, 32], - dtype='float32') - input_y = paddle.static.data(name="y", - shape=[None, 1], - dtype='int64') + input_x = paddle.static.data( + name="x", shape=[None, 32], dtype='float32' + ) + input_y = paddle.static.data( + name="y", shape=[None, 1], dtype='int64' + ) cost = mlp(input_x, input_y) optimizer = paddle.optimizer.Momentum( learning_rate=0.001, momentum=0.9, weight_decay=fluid.regularizer.L2Decay(1e-4), - multi_precision=True) + multi_precision=True, + ) optimizer = paddle.static.amp.decorate(optimizer) optimizer = fleet.distributed_optimizer(optimizer) @@ -82,9 +82,9 @@ class TestFleetAMPInit(unittest.TestCase): step = 1 for i in range(step): - cost_val = exe.run(program=main_program, - feed=gen_data(), - fetch_list=[cost.name]) + cost_val = exe.run( + program=main_program, feed=gen_data(), fetch_list=[cost.name] + ) def test_fleet_amp_meta_optimizer_init(self): if not fluid.core.is_compiled_with_cuda(): @@ -97,19 +97,20 @@ class TestFleetAMPInit(unittest.TestCase): fleet.init(role) with paddle.static.program_guard(main_program, startup_program): - input_x = paddle.static.data(name="x", - shape=[None, 32], - dtype='float32') - input_y = paddle.static.data(name="y", - shape=[None, 1], - dtype='int64') + input_x = paddle.static.data( + name="x", shape=[None, 32], dtype='float32' + ) + input_y = paddle.static.data( + name="y", shape=[None, 1], dtype='int64' + ) cost = mlp(input_x, input_y) optimizer = paddle.optimizer.Momentum( learning_rate=0.001, momentum=0.9, weight_decay=fluid.regularizer.L2Decay(1e-4), - multi_precision=True) + multi_precision=True, + ) strategy = paddle.distributed.fleet.DistributedStrategy() strategy.amp = True @@ -131,9 +132,9 @@ class TestFleetAMPInit(unittest.TestCase): step = 3 for i in range(step): - cost_val = exe.run(program=main_program, - feed=gen_data(), - fetch_list=[cost.name]) + cost_val = exe.run( + program=main_program, feed=gen_data(), fetch_list=[cost.name] + ) print(cost_val) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_amp_meta_optimizer.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_amp_meta_optimizer.py index 7eb235a549933cfd0dc425c30c7704baa0579977..b2afb900c97c435651f1a40a81dcd261c82b45a5 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_amp_meta_optimizer.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_amp_meta_optimizer.py @@ -24,14 +24,14 @@ paddle.enable_static() class TestFleetAMPOptimizer(TestFleetMetaOptimizer): - def test_amp_optimizer_backward(self): - """ test amp optimizer backward """ + """test amp optimizer backward""" train_prog, startup_prog = fluid.Program(), fluid.Program() avg_cost, strategy = self.net(train_prog, startup_prog) - opt = fluid.optimizer.MomentumOptimizer(learning_rate=0.001, - momentum=0.9) + opt = fluid.optimizer.MomentumOptimizer( + learning_rate=0.001, momentum=0.9 + ) opt = AMPOptimizer(opt) self.set_strategy(strategy, 'amp') @@ -44,12 +44,13 @@ class TestFleetAMPOptimizer(TestFleetMetaOptimizer): self.assertNotIn('check_finite_and_unscale', ops) def test_amp_optimizer_backward_gradients(self): - """ test amp optimizer backward + gradients""" + """test amp optimizer backward + gradients""" train_prog, startup_prog = fluid.Program(), fluid.Program() avg_cost, strategy = self.net(train_prog, startup_prog) - opt = fluid.optimizer.MomentumOptimizer(learning_rate=0.001, - momentum=0.9) + opt = fluid.optimizer.MomentumOptimizer( + learning_rate=0.001, momentum=0.9 + ) opt = AMPOptimizer(opt) self.set_strategy(strategy, 'amp') @@ -64,12 +65,13 @@ class TestFleetAMPOptimizer(TestFleetMetaOptimizer): self.assertIn('check_finite_and_unscale', ops) def test_amp_optimizer_backward_optimize(self): - """ test amp optimizer backward + optimizer """ + """test amp optimizer backward + optimizer""" train_prog, startup_prog = fluid.Program(), fluid.Program() avg_cost, strategy = self.net(train_prog, startup_prog) - opt = fluid.optimizer.MomentumOptimizer(learning_rate=0.001, - momentum=0.9) + opt = fluid.optimizer.MomentumOptimizer( + learning_rate=0.001, momentum=0.9 + ) opt = AMPOptimizer(opt) self.set_strategy(strategy, 'amp') @@ -83,7 +85,7 @@ class TestFleetAMPOptimizer(TestFleetMetaOptimizer): self.assertIn('check_finite_and_unscale', ops) def test_amp_optimizer(self): - """ test amp """ + """test amp""" train_prog, startup_prog = fluid.Program(), fluid.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'amp') @@ -94,7 +96,7 @@ class TestFleetAMPOptimizer(TestFleetMetaOptimizer): self.assertIn('check_finite_and_unscale', ops) def test_pure_fp16_optimizer(self): - """ test pure fp16 """ + """test pure fp16""" train_prog, startup_prog = fluid.Program(), fluid.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'pure_fp16') @@ -109,7 +111,7 @@ class TestFleetAMPOptimizer(TestFleetMetaOptimizer): self.assertIn('check_finite_and_unscale', ops) def test_amp_distributed_optimizer(self): - """ test amp when distributed """ + """test amp when distributed""" train_prog, startup_prog = fluid.Program(), fluid.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'amp') @@ -125,7 +127,7 @@ class TestFleetAMPOptimizer(TestFleetMetaOptimizer): self.assertEqual(check_count, len(train_prog.all_parameters())) def test_amp_recompute_optimizer(self): - """ test amp + recompute """ + """test amp + recompute""" train_prog, startup_prog = fluid.Program(), fluid.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'amp') @@ -145,7 +147,7 @@ class TestFleetAMPOptimizer(TestFleetMetaOptimizer): self.assertIn('subprog', ''.join(outs)) def test_amp_recompute_lars_optimizer(self): - """ test amp + recompute """ + """test amp + recompute""" train_prog, startup_prog = fluid.Program(), fluid.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'amp') diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_checkpoint.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_checkpoint.py index ef091343204f37c991b73da7e0e8a5fd8e62ee59..a9c92849b0127ebbf511076c917a1799f84b1058 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_checkpoint.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_checkpoint.py @@ -26,7 +26,6 @@ from paddle.fluid.incubate.checkpoint.checkpoint_saver import CheckpointSaver class FleetTest(unittest.TestCase): - def _test_checkpoint(self, fs, dir_path): file_name = "persistables" @@ -39,8 +38,9 @@ class FleetTest(unittest.TestCase): image = fluid.data(name='img', shape=[None, 28, 28], dtype='float32') label = fluid.data(name='label', shape=[None, 1], dtype='int64') - feeder = fluid.DataFeeder(feed_list=[image, label], - place=fluid.CPUPlace()) + feeder = fluid.DataFeeder( + feed_list=[image, label], place=fluid.CPUPlace() + ) predict = fluid.layers.fc(input=image, size=10, act='softmax') loss = fluid.layers.cross_entropy(input=predict, label=label) avg_loss = paddle.mean(loss) @@ -54,26 +54,24 @@ class FleetTest(unittest.TestCase): status = ExeTrainStatus() status.epoch_no = 2 - _, n1 = fleet.save_checkpoint(exe, - dir_path, - trainer_id=0, - train_status=status, - fs=fs) + _, n1 = fleet.save_checkpoint( + exe, dir_path, trainer_id=0, train_status=status, fs=fs + ) status2 = ExeTrainStatus() - fleet.load_checkpoint(exe, - dir_path, - trainer_id=0, - fs=fs, - train_status=status2) + fleet.load_checkpoint( + exe, dir_path, trainer_id=0, fs=fs, train_status=status2 + ) self.assertEqual(status2, status) - _, n2 = fleet.save_checkpoint(exe, - dir_path, - trainer_id=0, - train_status=status, - fs=fs, - remain_all_checkpoint=False) + _, n2 = fleet.save_checkpoint( + exe, + dir_path, + trainer_id=0, + train_status=status, + fs=fs, + remain_all_checkpoint=False, + ) self.assertEqual(n2, n1 + 1) c = CheckpointSaver(fs) @@ -82,36 +80,42 @@ class FleetTest(unittest.TestCase): # unnormal # test remain_all_checkpoint - fleet.save_checkpoint(exe, - dir_path, - trainer_id=0, - train_status=status, - fs=fs, - remain_all_checkpoint=False) + fleet.save_checkpoint( + exe, + dir_path, + trainer_id=0, + train_status=status, + fs=fs, + remain_all_checkpoint=False, + ) # can't save under a file fs = LocalFS() cache_path = "./.load_cache" fs.touch(cache_path) try: - fleet.save_checkpoint(exe, - dir_path, - trainer_id=0, - train_status=status, - fs=fs, - cache_path=cache_path) + fleet.save_checkpoint( + exe, + dir_path, + trainer_id=0, + train_status=status, + fs=fs, + cache_path=cache_path, + ) self.assertFalse(True) except: pass # can't load under a file try: - fleet.load_checkpoint(exe, - dir_path, - trainer_id=0, - train_status=status2, - fs=fs, - cache_path=cache_path) + fleet.load_checkpoint( + exe, + dir_path, + trainer_id=0, + train_status=status2, + fs=fs, + cache_path=cache_path, + ) self.assertFalse(True) except: pass diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_dgc_meta_optimizer.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_dgc_meta_optimizer.py index 522b563bc568377b845a763426893019501e3543..e75771e5c00972269777186e83d291ec21675f60 100755 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_dgc_meta_optimizer.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_dgc_meta_optimizer.py @@ -25,15 +25,15 @@ paddle.enable_static() class TestFleetDGCOptimizer(TestFleetMetaOptimizer): - def test_dgc_optimizer_backward(self): - """ test dgc optimizer backward """ + """test dgc optimizer backward""" train_prog, startup_prog = fluid.Program(), fluid.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'dgc') - opt = fluid.optimizer.MomentumOptimizer(learning_rate=0.001, - momentum=0.9) + opt = fluid.optimizer.MomentumOptimizer( + learning_rate=0.001, momentum=0.9 + ) dgc_opt = DGCOptimizer(opt) role = role_maker.PaddleCloudRoleMaker(is_collective=True) dgc_opt._set_basic_info(avg_cost, role, opt, strategy) @@ -43,13 +43,14 @@ class TestFleetDGCOptimizer(TestFleetMetaOptimizer): self.assertNotIn('dgc', ops) def test_dgc_optimizer_gradients(self): - """ test dgc optimizer backward + gradients """ + """test dgc optimizer backward + gradients""" train_prog, startup_prog = fluid.Program(), fluid.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'dgc') - opt = fluid.optimizer.MomentumOptimizer(learning_rate=0.001, - momentum=0.9) + opt = fluid.optimizer.MomentumOptimizer( + learning_rate=0.001, momentum=0.9 + ) dgc_opt = DGCOptimizer(opt) role = role_maker.PaddleCloudRoleMaker(is_collective=True) dgc_opt._set_basic_info(avg_cost, role, opt, strategy) @@ -62,13 +63,14 @@ class TestFleetDGCOptimizer(TestFleetMetaOptimizer): self.assertIn('dgc_momentum', ops) def test_dgc_optimizer_optimize(self): - """ test dgc optimizer backward + optimize """ + """test dgc optimizer backward + optimize""" train_prog, startup_prog = fluid.Program(), fluid.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'dgc') - opt = fluid.optimizer.MomentumOptimizer(learning_rate=0.001, - momentum=0.9) + opt = fluid.optimizer.MomentumOptimizer( + learning_rate=0.001, momentum=0.9 + ) dgc_opt = DGCOptimizer(opt) role = role_maker.PaddleCloudRoleMaker(is_collective=True) dgc_opt._set_basic_info(avg_cost, role, opt, strategy) @@ -130,8 +132,8 @@ class TestFleetDGCOptimizer(TestFleetMetaOptimizer): self.assertIn('subprog', ''.join(outs)) def test_amp_recompute_lars_dgc_not_apply_optimizer(self): - """ test amp + recompute + lars + dgc, - amp -/-> dgc, max_path is amp-->recompute-->lars + """test amp + recompute + lars + dgc, + amp -/-> dgc, max_path is amp-->recompute-->lars """ train_prog, startup_prog = fluid.Program(), fluid.Program() avg_cost, strategy = self.net(train_prog, startup_prog) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_distributed_strategy.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_distributed_strategy.py index d2b57d9a8d0bc2303203019cf0fc18e752340610..51105cbcb1028accdd23ec10c98bbe3e4711a0a2 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_distributed_strategy.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_distributed_strategy.py @@ -17,7 +17,6 @@ import paddle class TestStrategyConfig(unittest.TestCase): - def test_amp(self): strategy = paddle.distributed.fleet.DistributedStrategy() strategy.amp = True @@ -35,7 +34,7 @@ class TestStrategyConfig(unittest.TestCase): "incr_every_n_steps": 1000, "incr_ratio": 2.0, "use_dynamic_loss_scaling": True, - "decr_ratio": 0.5 + "decr_ratio": 0.5, } strategy.amp_configs = configs self.assertEqual(strategy.amp_configs["init_loss_scaling"], 32768) @@ -78,7 +77,7 @@ class TestStrategyConfig(unittest.TestCase): strategy.hybrid_configs = { "dp_degree": 1, "mp_degree": 2, - "pp_degree": 4 + "pp_degree": 4, } self.assertEqual(strategy.hybrid_configs["dp_degree"], 1) self.assertEqual(strategy.hybrid_configs["mp_degree"], 2) @@ -259,19 +258,26 @@ class TestStrategyConfig(unittest.TestCase): strategy = paddle.distributed.fleet.DistributedStrategy() configs = {} configs['emb'] = { - "table_parameters.emb.accessor.embed_sgd_param.adagrad.learning_rate": - 0.05, + "table_parameters.emb.accessor.embed_sgd_param.adagrad.learning_rate": 0.05, "table_parameters.emb.accessor.table_accessor_save_param.num": 2, - "table_parameters.emb.accessor.table_accessor_save_param.param": - [1, 2] + "table_parameters.emb.accessor.table_accessor_save_param.param": [ + 1, + 2, + ], } strategy.sparse_table_configs = configs self.assertEqual( - strategy.sparse_table_configs[0].accessor.embed_sgd_param.adagrad. - learning_rate, 0.05) + strategy.sparse_table_configs[ + 0 + ].accessor.embed_sgd_param.adagrad.learning_rate, + 0.05, + ) self.assertEqual( - strategy.sparse_table_configs[0].accessor. - table_accessor_save_param[0].param, 1) + strategy.sparse_table_configs[0] + .accessor.table_accessor_save_param[0] + .param, + 1, + ) strategy.adam_d2sum = True self.assertEqual(strategy.adam_d2sum, True) @@ -279,7 +285,7 @@ class TestStrategyConfig(unittest.TestCase): "uri": "123", "user": "456", "passwd": "789", - "hadoop_bin": "hadoop" + "hadoop_bin": "hadoop", } self.assertEqual(strategy.fs_client_param.user, "456") @@ -289,68 +295,90 @@ class TestStrategyConfig(unittest.TestCase): configs['emb'] = {"sparse_optimizer": "adagrad"} strategy.fleet_desc_configs = configs self.assertEqual( - strategy.sparse_table_configs[0].accessor.embed_sgd_param.adagrad. - learning_rate, 0.05) + strategy.sparse_table_configs[ + 0 + ].accessor.embed_sgd_param.adagrad.learning_rate, + 0.05, + ) strategy = paddle.distributed.fleet.DistributedStrategy() configs = {} configs['emb'] = {"sparse_optimizer": "naive"} strategy.fleet_desc_configs = configs self.assertEqual( - strategy.sparse_table_configs[0].accessor.embed_sgd_param.naive. - learning_rate, 0.05) + strategy.sparse_table_configs[ + 0 + ].accessor.embed_sgd_param.naive.learning_rate, + 0.05, + ) strategy = paddle.distributed.fleet.DistributedStrategy() configs = {} configs['emb'] = {"sparse_optimizer": "adam"} strategy.fleet_desc_configs = configs self.assertEqual( - strategy.sparse_table_configs[0].accessor.embed_sgd_param.adam. - beta1_decay_rate, 0.9) + strategy.sparse_table_configs[ + 0 + ].accessor.embed_sgd_param.adam.beta1_decay_rate, + 0.9, + ) strategy = paddle.distributed.fleet.DistributedStrategy() configs = {} configs['emb'] = { "sparse_accessor_class": "DownpourUnitAccessor", - "embed_sparse_optimizer": "std_adagrad" + "embed_sparse_optimizer": "std_adagrad", } strategy.fleet_desc_configs = configs self.assertEqual( - strategy.sparse_table_configs[0].accessor.ctr_accessor_param. - show_scale, False) + strategy.sparse_table_configs[ + 0 + ].accessor.ctr_accessor_param.show_scale, + False, + ) self.assertEqual( - strategy.sparse_table_configs[0].accessor.embed_sgd_param.adagrad. - initial_range, 0) + strategy.sparse_table_configs[ + 0 + ].accessor.embed_sgd_param.adagrad.initial_range, + 0, + ) strategy = paddle.distributed.fleet.DistributedStrategy() configs = {} configs['emb'] = { "sparse_accessor_class": "DownpourCtrDoubleAccessor", - "embed_sparse_optimizer": "std_adagrad" + "embed_sparse_optimizer": "std_adagrad", } strategy.fleet_desc_configs = configs self.assertEqual( - strategy.sparse_table_configs[0].accessor.embed_sgd_param.adagrad. - initial_range, 0.0001) + strategy.sparse_table_configs[ + 0 + ].accessor.embed_sgd_param.adagrad.initial_range, + 0.0001, + ) strategy = paddle.distributed.fleet.DistributedStrategy() configs = {} configs['emb'] = {"sparse_optimizer": "shared_adam"} strategy.fleet_desc_configs = configs self.assertEqual( - strategy.sparse_table_configs[0].accessor.embed_sgd_param.adam. - beta1_decay_rate, 0.9) + strategy.sparse_table_configs[ + 0 + ].accessor.embed_sgd_param.adam.beta1_decay_rate, + 0.9, + ) def test_trainer_desc_configs(self): strategy = paddle.distributed.fleet.DistributedStrategy() configs = { "dump_fields_path": "dump_data", "dump_fields": ["xxx", "yyy"], - "dump_param": ['zzz'] + "dump_param": ['zzz'], } strategy.trainer_desc_configs = configs - self.assertEqual(strategy.trainer_desc_configs["dump_fields_path"], - "dump_data") + self.assertEqual( + strategy.trainer_desc_configs["dump_fields_path"], "dump_data" + ) self.assertEqual(len(strategy.trainer_desc_configs["dump_fields"]), 2) self.assertEqual(len(strategy.trainer_desc_configs["dump_param"]), 1) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_fp16_allreduce_meta_optimizer.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_fp16_allreduce_meta_optimizer.py index 363843dd5e83993a431706f844a41e0505f3f18b..e170c9567ef28fa4717a21f65f3f07c42e56798f 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_fp16_allreduce_meta_optimizer.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_fp16_allreduce_meta_optimizer.py @@ -23,27 +23,27 @@ paddle.enable_static() class TestFleetFP16CompressOptimizer(unittest.TestCase): - def setUp(self): os.environ["PADDLE_TRAINER_ID"] = "0" os.environ["PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001" def net(self, main_prog, startup_prog, dtype='float32'): with fluid.program_guard(main_prog, startup_prog): - input_x = paddle.fluid.layers.data(name="x", - shape=[32], - dtype=dtype) - input_y = paddle.fluid.layers.data(name="y", - shape=[1], - dtype='int64') + input_x = paddle.fluid.layers.data( + name="x", shape=[32], dtype=dtype + ) + input_y = paddle.fluid.layers.data( + name="y", shape=[1], dtype='int64' + ) fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh') fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') - prediction = paddle.fluid.layers.fc(input=[fc_2], - size=2, - act='softmax') - cost = paddle.fluid.layers.cross_entropy(input=prediction, - label=input_y) + prediction = paddle.fluid.layers.fc( + input=[fc_2], size=2, act='softmax' + ) + cost = paddle.fluid.layers.cross_entropy( + input=prediction, label=input_y + ) avg_cost = paddle.mean(x=cost) strategy = paddle.distributed.fleet.DistributedStrategy() @@ -62,7 +62,8 @@ class TestFleetFP16CompressOptimizer(unittest.TestCase): ops = [op.type for op in avg_cost.block.ops] cast_out = [ - op.output('Out')[0] for op in avg_cost.block.ops + op.output('Out')[0] + for op in avg_cost.block.ops if op.type == 'cast' ] diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_gradient_merge_meta_optimizer.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_gradient_merge_meta_optimizer.py index 99bc614b14ee1a0dac8e3b0ccca84ce03e2021b0..459a3233148d96bc7aaac1314c497f629ad3dd3f 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_gradient_merge_meta_optimizer.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_gradient_merge_meta_optimizer.py @@ -21,9 +21,10 @@ paddle.enable_static() class TestFleetGradientMergeMetaOptimizer(TestFleetMetaOptimizer): - def test_gradient_merge_optimizer(self): - train_prog, startup_prog = paddle.fluid.Program(), paddle.fluid.Program( + train_prog, startup_prog = ( + paddle.fluid.Program(), + paddle.fluid.Program(), ) avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'gradient_merge') @@ -33,7 +34,9 @@ class TestFleetGradientMergeMetaOptimizer(TestFleetMetaOptimizer): self.assertIn('@GradientMerge', ''.join(vars)) def test_recom_gm_optimizer(self): - train_prog, startup_prog = paddle.fluid.Program(), paddle.fluid.Program( + train_prog, startup_prog = ( + paddle.fluid.Program(), + paddle.fluid.Program(), ) avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'gradient_merge') @@ -45,7 +48,9 @@ class TestFleetGradientMergeMetaOptimizer(TestFleetMetaOptimizer): self.assertIn('subprog', ''.join(vars)) def test_gm_amp_optimizer(self): - train_prog, startup_prog = paddle.fluid.Program(), paddle.fluid.Program( + train_prog, startup_prog = ( + paddle.fluid.Program(), + paddle.fluid.Program(), ) avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'gradient_merge') @@ -57,7 +62,9 @@ class TestFleetGradientMergeMetaOptimizer(TestFleetMetaOptimizer): self.assertIn('cast', ''.join(vars)) def test_gm_pure_fp16_optimizer(self): - train_prog, startup_prog = paddle.fluid.Program(), paddle.fluid.Program( + train_prog, startup_prog = ( + paddle.fluid.Program(), + paddle.fluid.Program(), ) avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'gradient_merge') @@ -67,8 +74,9 @@ class TestFleetGradientMergeMetaOptimizer(TestFleetMetaOptimizer): params = train_prog.all_parameters() for param in train_prog.all_parameters(): - self.assertEqual(param.dtype, - paddle.fluid.core.VarDesc.VarType.FP16) + self.assertEqual( + param.dtype, paddle.fluid.core.VarDesc.VarType.FP16 + ) vars = [x.name for x in train_prog.list_vars()] self.assertIn('@GradientMerge', ''.join(vars)) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_graph_execution_meta_optimizer.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_graph_execution_meta_optimizer.py index bc6a554f84d8ff494fdcdb7accc0d793c137027b..a146800d0cfaa5523c3f49de8429471efadafc19 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_graph_execution_meta_optimizer.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_graph_execution_meta_optimizer.py @@ -21,7 +21,6 @@ paddle.enable_static() class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): - def setUp(self): try: self._dist_ut_port_0 = int(os.environ["PADDLE_DIST_UT_PORT"]) @@ -34,58 +33,53 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): port_a = self._dist_ut_port_0 port_b = self._dist_ut_port_1 node_a = { - "PADDLE_TRAINER_ID": - "0", - "PADDLE_CURRENT_ENDPOINT": - "127.0.0.1:{}".format(port_a), - "PADDLE_TRAINERS_NUM": - "2", - "PADDLE_TRAINER_ENDPOINTS": - "127.0.0.1:{},127.0.0.1:{}".format(port_a, port_b), - "http_proxy": - "", - "https_proxy": - "" + "PADDLE_TRAINER_ID": "0", + "PADDLE_CURRENT_ENDPOINT": "127.0.0.1:{}".format(port_a), + "PADDLE_TRAINERS_NUM": "2", + "PADDLE_TRAINER_ENDPOINTS": "127.0.0.1:{},127.0.0.1:{}".format( + port_a, port_b + ), + "http_proxy": "", + "https_proxy": "", } node_b = { - "PADDLE_TRAINER_ID": - "1", - "PADDLE_CURRENT_ENDPOINT": - "127.0.0.1:{}".format(port_b), - "PADDLE_TRAINERS_NUM": - "2", - "PADDLE_TRAINER_ENDPOINTS": - "127.0.0.1:{},127.0.0.1:{}".format(port_a, port_b), - "http_proxy": - "", - "https_proxy": - "" + "PADDLE_TRAINER_ID": "1", + "PADDLE_CURRENT_ENDPOINT": "127.0.0.1:{}".format(port_b), + "PADDLE_TRAINERS_NUM": "2", + "PADDLE_TRAINER_ENDPOINTS": "127.0.0.1:{},127.0.0.1:{}".format( + port_a, port_b + ), + "http_proxy": "", + "https_proxy": "", } def node_func(): import paddle.distributed.fleet as fleet + fleet.init(is_collective=True) - input_x = paddle.fluid.layers.data(name="x", - shape=[32], - dtype='float32') - input_y = paddle.fluid.layers.data(name="y", - shape=[1], - dtype='int64') + input_x = paddle.fluid.layers.data( + name="x", shape=[32], dtype='float32' + ) + input_y = paddle.fluid.layers.data( + name="y", shape=[1], dtype='int64' + ) fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh') fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') - prediction = paddle.fluid.layers.fc(input=[fc_2], - size=2, - act='softmax') - cost = paddle.fluid.layers.cross_entropy(input=prediction, - label=input_y) + prediction = paddle.fluid.layers.fc( + input=[fc_2], size=2, act='softmax' + ) + cost = paddle.fluid.layers.cross_entropy( + input=prediction, label=input_y + ) avg_cost = paddle.mean(x=cost) strategy = paddle.distributed.fleet.DistributedStrategy() optimizer = paddle.fluid.optimizer.SGD(learning_rate=0.01) - optimizer = fleet.distributed_optimizer(optimizer, - strategy=strategy) + optimizer = fleet.distributed_optimizer( + optimizer, strategy=strategy + ) optimizer.minimize(avg_cost) exe = paddle.fluid.Executor(place=paddle.fluid.CPUPlace()) @@ -102,60 +96,55 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): port_b = self._dist_ut_port_1 + 2 node_a = { - "PADDLE_TRAINER_ID": - "0", - "PADDLE_CURRENT_ENDPOINT": - "127.0.0.1:{}".format(port_a), - "PADDLE_TRAINERS_NUM": - "2", - "PADDLE_TRAINER_ENDPOINTS": - "127.0.0.1:{},127.0.0.1:{}".format(port_a, port_b), - "http_proxy": - "", - "https_proxy": - "" + "PADDLE_TRAINER_ID": "0", + "PADDLE_CURRENT_ENDPOINT": "127.0.0.1:{}".format(port_a), + "PADDLE_TRAINERS_NUM": "2", + "PADDLE_TRAINER_ENDPOINTS": "127.0.0.1:{},127.0.0.1:{}".format( + port_a, port_b + ), + "http_proxy": "", + "https_proxy": "", } node_b = { - "PADDLE_TRAINER_ID": - "1", - "PADDLE_CURRENT_ENDPOINT": - "127.0.0.1:{}".format(port_b), - "PADDLE_TRAINERS_NUM": - "2", - "PADDLE_TRAINER_ENDPOINTS": - "127.0.0.1:{},127.0.0.1:{}".format(port_a, port_b), - "http_proxy": - "", - "https_proxy": - "" + "PADDLE_TRAINER_ID": "1", + "PADDLE_CURRENT_ENDPOINT": "127.0.0.1:{}".format(port_b), + "PADDLE_TRAINERS_NUM": "2", + "PADDLE_TRAINER_ENDPOINTS": "127.0.0.1:{},127.0.0.1:{}".format( + port_a, port_b + ), + "http_proxy": "", + "https_proxy": "", } def node_func(): import paddle.distributed.fleet as fleet + fleet.init(is_collective=True) - input_x = paddle.fluid.layers.data(name="x", - shape=[32], - dtype='float32') - input_y = paddle.fluid.layers.data(name="y", - shape=[1], - dtype='int64') + input_x = paddle.fluid.layers.data( + name="x", shape=[32], dtype='float32' + ) + input_y = paddle.fluid.layers.data( + name="y", shape=[1], dtype='int64' + ) fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh') fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') - prediction = paddle.fluid.layers.fc(input=[fc_2], - size=2, - act='softmax') - cost = paddle.fluid.layers.cross_entropy(input=prediction, - label=input_y) + prediction = paddle.fluid.layers.fc( + input=[fc_2], size=2, act='softmax' + ) + cost = paddle.fluid.layers.cross_entropy( + input=prediction, label=input_y + ) avg_cost = paddle.mean(x=cost) strategy = paddle.distributed.fleet.DistributedStrategy() strategy.nccl_comm_num = 2 strategy.sync_nccl_allreduce = True optimizer = paddle.fluid.optimizer.SGD(learning_rate=0.01) - optimizer = fleet.distributed_optimizer(optimizer, - strategy=strategy) + optimizer = fleet.distributed_optimizer( + optimizer, strategy=strategy + ) optimizer.minimize(avg_cost) exe = paddle.fluid.Executor(place=paddle.fluid.CPUPlace()) exe.run(paddle.fluid.default_startup_program()) @@ -165,7 +154,7 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): def gen_data(): return { "x": np.random.random(size=(128, 32)).astype('float32'), - "y": np.random.randint(2, size=(128, 1)).astype('int64') + "y": np.random.randint(2, size=(128, 1)).astype('int64'), } for i in range(10): @@ -182,58 +171,53 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): port_a = self._dist_ut_port_0 + 4 port_b = self._dist_ut_port_1 + 4 node_a = { - "PADDLE_TRAINER_ID": - "0", - "PADDLE_CURRENT_ENDPOINT": - "127.0.0.1:{}".format(port_a), - "PADDLE_TRAINERS_NUM": - "2", - "PADDLE_TRAINER_ENDPOINTS": - "127.0.0.1:{},127.0.0.1:{}".format(port_a, port_b), - "http_proxy": - "", - "https_proxy": - "" + "PADDLE_TRAINER_ID": "0", + "PADDLE_CURRENT_ENDPOINT": "127.0.0.1:{}".format(port_a), + "PADDLE_TRAINERS_NUM": "2", + "PADDLE_TRAINER_ENDPOINTS": "127.0.0.1:{},127.0.0.1:{}".format( + port_a, port_b + ), + "http_proxy": "", + "https_proxy": "", } node_b = { - "PADDLE_TRAINER_ID": - "1", - "PADDLE_CURRENT_ENDPOINT": - "127.0.0.1:{}".format(port_b), - "PADDLE_TRAINERS_NUM": - "2", - "PADDLE_TRAINER_ENDPOINTS": - "127.0.0.1:{},127.0.0.1:{}".format(port_a, port_b), - "http_proxy": - "", - "https_proxy": - "" + "PADDLE_TRAINER_ID": "1", + "PADDLE_CURRENT_ENDPOINT": "127.0.0.1:{}".format(port_b), + "PADDLE_TRAINERS_NUM": "2", + "PADDLE_TRAINER_ENDPOINTS": "127.0.0.1:{},127.0.0.1:{}".format( + port_a, port_b + ), + "http_proxy": "", + "https_proxy": "", } def node_func(): import paddle.distributed.fleet as fleet + fleet.init(is_collective=True) - input_x = paddle.fluid.layers.data(name="x", - shape=[32], - dtype='float32') - input_y = paddle.fluid.layers.data(name="y", - shape=[1], - dtype='int64') + input_x = paddle.fluid.layers.data( + name="x", shape=[32], dtype='float32' + ) + input_y = paddle.fluid.layers.data( + name="y", shape=[1], dtype='int64' + ) fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh') fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') - prediction = paddle.fluid.layers.fc(input=[fc_2], - size=2, - act='softmax') - cost = paddle.fluid.layers.cross_entropy(input=prediction, - label=input_y) + prediction = paddle.fluid.layers.fc( + input=[fc_2], size=2, act='softmax' + ) + cost = paddle.fluid.layers.cross_entropy( + input=prediction, label=input_y + ) avg_cost = paddle.mean(x=cost) strategy = paddle.distributed.fleet.DistributedStrategy() optimizer = paddle.fluid.optimizer.SGD(learning_rate=0.01) - optimizer = fleet.distributed_optimizer(optimizer, - strategy=strategy) + optimizer = fleet.distributed_optimizer( + optimizer, strategy=strategy + ) optimizer.minimize(avg_cost) exe = paddle.fluid.Executor(place=paddle.fluid.CPUPlace()) @@ -249,60 +233,55 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): port_a = self._dist_ut_port_0 + 6 port_b = self._dist_ut_port_1 + 6 node_a = { - "PADDLE_TRAINER_ID": - "0", - "PADDLE_CURRENT_ENDPOINT": - "127.0.0.1:{}".format(port_a), - "PADDLE_TRAINERS_NUM": - "2", - "PADDLE_TRAINER_ENDPOINTS": - "127.0.0.1:{},127.0.0.1:{}".format(port_a, port_b), - "http_proxy": - "", - "https_proxy": - "" + "PADDLE_TRAINER_ID": "0", + "PADDLE_CURRENT_ENDPOINT": "127.0.0.1:{}".format(port_a), + "PADDLE_TRAINERS_NUM": "2", + "PADDLE_TRAINER_ENDPOINTS": "127.0.0.1:{},127.0.0.1:{}".format( + port_a, port_b + ), + "http_proxy": "", + "https_proxy": "", } node_b = { - "PADDLE_TRAINER_ID": - "1", - "PADDLE_CURRENT_ENDPOINT": - "127.0.0.1:{}".format(port_b), - "PADDLE_TRAINERS_NUM": - "2", - "PADDLE_TRAINER_ENDPOINTS": - "127.0.0.1:{},127.0.0.1:{}".format(port_a, port_b), - "http_proxy": - "", - "https_proxy": - "" + "PADDLE_TRAINER_ID": "1", + "PADDLE_CURRENT_ENDPOINT": "127.0.0.1:{}".format(port_b), + "PADDLE_TRAINERS_NUM": "2", + "PADDLE_TRAINER_ENDPOINTS": "127.0.0.1:{},127.0.0.1:{}".format( + port_a, port_b + ), + "http_proxy": "", + "https_proxy": "", } def node_func(): import paddle.distributed.fleet as fleet + fleet.init(is_collective=True) - input_x = paddle.fluid.layers.data(name="x", - shape=[32], - dtype='float32') - input_y = paddle.fluid.layers.data(name="y", - shape=[1], - dtype='int64') + input_x = paddle.fluid.layers.data( + name="x", shape=[32], dtype='float32' + ) + input_y = paddle.fluid.layers.data( + name="y", shape=[1], dtype='int64' + ) fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh') fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') - prediction = paddle.fluid.layers.fc(input=[fc_2], - size=2, - act='softmax') - cost = paddle.fluid.layers.cross_entropy(input=prediction, - label=input_y) + prediction = paddle.fluid.layers.fc( + input=[fc_2], size=2, act='softmax' + ) + cost = paddle.fluid.layers.cross_entropy( + input=prediction, label=input_y + ) avg_cost = paddle.mean(x=cost) strategy = paddle.distributed.fleet.DistributedStrategy() strategy.nccl_comm_num = 2 strategy.sync_nccl_allreduce = True optimizer = paddle.fluid.optimizer.SGD(learning_rate=0.01) - optimizer = fleet.distributed_optimizer(optimizer, - strategy=strategy) + optimizer = fleet.distributed_optimizer( + optimizer, strategy=strategy + ) optimizer.minimize(avg_cost) exe = paddle.fluid.Executor(place=paddle.fluid.CPUPlace()) exe.run(paddle.fluid.default_startup_program()) @@ -312,7 +291,7 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): def gen_data(): return { "x": np.random.random(size=(128, 32)).astype('float32'), - "y": np.random.randint(2, size=(128, 1)).astype('int64') + "y": np.random.randint(2, size=(128, 1)).astype('int64'), } for i in range(10): diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_graph_executor.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_graph_executor.py index 3d30baba9a3f61765797232df76dcefe947b5f12..0a75eeac8cb4c2d931e1389adf3087b87a6fc2b0 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_graph_executor.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_graph_executor.py @@ -21,7 +21,6 @@ from launch_function_helper import launch_func class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): - def test_graph_execution_optimizer(self): node_a = { "PADDLE_TRAINER_ID": "0", @@ -30,7 +29,7 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): "PADDLE_TRAINER_ENDPOINTS": "127.0.0.1:36001,127.0.0.1:36002", "http_proxy": "", "https_proxy": "", - "FLAGS_CONVERT_GRAPH_TO_PROGRAM": "1" + "FLAGS_CONVERT_GRAPH_TO_PROGRAM": "1", } node_b = { @@ -40,34 +39,36 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): "PADDLE_TRAINER_ENDPOINTS": "127.0.0.1:36001,127.0.0.1:36002", "http_proxy": "", "https_proxy": "", - "FLAGS_CONVERT_GRAPH_TO_PROGRAM": "1" + "FLAGS_CONVERT_GRAPH_TO_PROGRAM": "1", } def node_func(): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) - input_x = paddle.fluid.layers.data(name="x", - shape=[32], - dtype='float32') - input_y = paddle.fluid.layers.data(name="y", - shape=[1], - dtype='int64') + input_x = paddle.fluid.layers.data( + name="x", shape=[32], dtype='float32' + ) + input_y = paddle.fluid.layers.data( + name="y", shape=[1], dtype='int64' + ) fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh') fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') - prediction = paddle.fluid.layers.fc(input=[fc_2], - size=2, - act='softmax') - cost = paddle.fluid.layers.cross_entropy(input=prediction, - label=input_y) + prediction = paddle.fluid.layers.fc( + input=[fc_2], size=2, act='softmax' + ) + cost = paddle.fluid.layers.cross_entropy( + input=prediction, label=input_y + ) avg_cost = paddle.mean(x=cost) strategy = paddle.distributed.fleet.DistributedStrategy() strategy.nccl_comm_num = 2 strategy.sync_nccl_allreduce = True optimizer = paddle.optimizer.SGD(learning_rate=0.01) - optimizer = fleet.distributed_optimizer(optimizer, - strategy=strategy) + optimizer = fleet.distributed_optimizer( + optimizer, strategy=strategy + ) optimizer.minimize(avg_cost) exe = paddle.fluid.Executor(place=paddle.fluid.CPUPlace()) exe.run(paddle.fluid.default_startup_program()) @@ -77,7 +78,7 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): def gen_data(): return { "x": np.random.random(size=(128, 32)).astype('float32'), - "y": np.random.randint(2, size=(128, 1)).astype('int64') + "y": np.random.randint(2, size=(128, 1)).astype('int64'), } for i in range(5): diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_hybrid_meta_optimizer.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_hybrid_meta_optimizer.py index 3062812223d6451406283be7258721459581be9e..5933b3a96a4c855e303d316e37f29a3df1aa464f 100755 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_hybrid_meta_optimizer.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_hybrid_meta_optimizer.py @@ -24,11 +24,11 @@ paddle.enable_static() class TestFleetHybridOptimizer(TestFleetMetaOptimizer): - def setUp(self): os.environ["PADDLE_TRAINER_ID"] = "3" - os.environ["PADDLE_TRAINER_ENDPOINTS"] = \ - "127.0.0.1:36001,127.0.0.1:36002,127.0.0.1:36003,127.0.0.1:36004" + os.environ[ + "PADDLE_TRAINER_ENDPOINTS" + ] = "127.0.0.1:36001,127.0.0.1:36002,127.0.0.1:36003,127.0.0.1:36004" # pre-assigned ring id self.mp_ring_id = 0 self.sharding_ring_id = 1 @@ -65,40 +65,122 @@ class TestFleetHybridOptimizer(TestFleetMetaOptimizer): main_prog_op_types = [op.type for op in main_prog_ops] # global, sharding, pp_send, pp_recv - self.assertEqual(startup_prog_op_types, [ - 'uniform_random', 'fill_constant', 'uniform_random', - 'fill_constant', 'uniform_random', 'fill_constant', - 'uniform_random', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'c_gen_nccl_id', 'c_comm_init', 'c_gen_nccl_id', 'c_comm_init', - 'c_gen_nccl_id', 'c_comm_init', 'c_gen_nccl_id', 'c_comm_init', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_broadcast' - ]) - - self.assertEqual(main_prog_op_types, [ - 'recv_v2', 'mul', 'elementwise_add', 'tanh', 'mul', - 'elementwise_add', 'tanh', 'mul', 'elementwise_add', 'tanh', 'mul', - 'elementwise_add', 'softmax', 'cross_entropy2', 'reduce_mean', - 'fill_constant', 'reduce_mean_grad', 'cross_entropy_grad2', - 'softmax_grad', 'elementwise_add_grad', 'mul_grad', 'tanh_grad', - 'elementwise_add_grad', 'mul_grad', 'tanh_grad', - 'elementwise_add_grad', 'mul_grad', 'tanh_grad', - 'elementwise_add_grad', 'mul_grad', 'c_sync_calc_stream', 'send_v2', - 'fill_constant', 'sum', 'fill_constant', 'sum', 'fill_constant', - 'sum', 'fill_constant', 'sum', 'fill_constant', 'sum', - 'fill_constant', 'sum', 'fill_constant', 'sum', 'fill_constant', - 'sum', 'c_reduce_sum', 'c_reduce_sum', 'c_reduce_sum', - 'c_reduce_sum', 'c_reduce_sum', 'c_reduce_sum', 'c_reduce_sum', - 'c_reduce_sum', 'c_sync_comm_stream', 'momentum', 'momentum', - 'momentum', 'momentum', 'momentum', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast' - ]) + self.assertEqual( + startup_prog_op_types, + [ + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + ], + ) + + self.assertEqual( + main_prog_op_types, + [ + 'recv_v2', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'elementwise_add', + 'softmax', + 'cross_entropy2', + 'reduce_mean', + 'fill_constant', + 'reduce_mean_grad', + 'cross_entropy_grad2', + 'softmax_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'c_sync_calc_stream', + 'send_v2', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_sync_comm_stream', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + ], + ) # should has ring id for pp created_ring_ids = [ - op.desc.attr("ring_id") for op in startup_prog_ops + op.desc.attr("ring_id") + for op in startup_prog_ops if op.type == "c_comm_init" ] self.assertIn(self.dp_ring_id, created_ring_ids) @@ -107,16 +189,20 @@ class TestFleetHybridOptimizer(TestFleetMetaOptimizer): # check correctness of pp group pp_group_waiting_prots = None for op in startup_prog_ops: - if op.type == "c_gen_nccl_id" and \ - op.desc.output_arg_names()[0] == "comm_id_0": + if ( + op.type == "c_gen_nccl_id" + and op.desc.output_arg_names()[0] == "comm_id_0" + ): pp_group_waiting_prots = op.desc.attr("other_endpoints") self.assertEqual(pp_group_waiting_prots, ['127.0.0.1:36003']) # check correctness of sharding group dp_group_waiting_ports = None for op in startup_prog_ops: - if op.type == "c_gen_nccl_id" \ - and op.desc.output_arg_names()[0] == "comm_id_3": + if ( + op.type == "c_gen_nccl_id" + and op.desc.output_arg_names()[0] == "comm_id_3" + ): dp_group_waiting_ports = op.desc.attr("other_endpoints") self.assertEqual(dp_group_waiting_ports, ['127.0.0.1:36002']) @@ -147,34 +233,109 @@ class TestFleetHybridOptimizer(TestFleetMetaOptimizer): main_prog_op_types = [op.type for op in main_prog_ops] # global, sharding, pp_send, pp_recv - self.assertEqual(startup_prog_op_types, [ - 'uniform_random', 'fill_constant', 'uniform_random', - 'fill_constant', 'uniform_random', 'fill_constant', - 'uniform_random', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'c_gen_nccl_id', 'c_comm_init', 'c_gen_nccl_id', 'c_comm_init', - 'c_gen_nccl_id', 'c_comm_init', 'c_gen_nccl_id', 'c_comm_init', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_broadcast' - ]) - - self.assertEqual(main_prog_op_types, [ - 'recv_v2', 'mul', 'elementwise_add', 'tanh', 'mul', - 'elementwise_add', 'tanh', 'mul', 'elementwise_add', 'tanh', 'mul', - 'elementwise_add', 'softmax', 'cross_entropy2', 'reduce_mean', - 'fill_constant', 'reduce_mean_grad', 'cross_entropy_grad2', - 'softmax_grad', 'elementwise_add_grad', 'mul_grad', 'tanh_grad', - 'elementwise_add_grad', 'mul_grad', 'tanh_grad', - 'elementwise_add_grad', 'mul_grad', 'tanh_grad', - 'elementwise_add_grad', 'mul_grad', 'c_sync_calc_stream', 'send_v2', - 'fill_constant', 'sum', 'fill_constant', 'sum', 'fill_constant', - 'sum', 'fill_constant', 'sum', 'fill_constant', 'sum', - 'fill_constant', 'sum', 'fill_constant', 'sum', 'fill_constant', - 'sum', 'coalesce_tensor', 'c_reduce_sum', 'coalesce_tensor', - 'c_reduce_sum', 'c_sync_comm_stream', 'momentum', 'momentum', - 'momentum', 'momentum', 'momentum', 'coalesce_tensor', - 'c_broadcast', 'coalesce_tensor', 'c_broadcast' - ]) + self.assertEqual( + startup_prog_op_types, + [ + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + ], + ) + + self.assertEqual( + main_prog_op_types, + [ + 'recv_v2', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'elementwise_add', + 'softmax', + 'cross_entropy2', + 'reduce_mean', + 'fill_constant', + 'reduce_mean_grad', + 'cross_entropy_grad2', + 'softmax_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'c_sync_calc_stream', + 'send_v2', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'coalesce_tensor', + 'c_reduce_sum', + 'coalesce_tensor', + 'c_reduce_sum', + 'c_sync_comm_stream', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + 'coalesce_tensor', + 'c_broadcast', + 'coalesce_tensor', + 'c_broadcast', + ], + ) def test_opt_sharding_with_pp_amp_gclip(self): train_prog, startup_prog = static.Program(), static.Program() @@ -194,11 +355,9 @@ class TestFleetHybridOptimizer(TestFleetMetaOptimizer): strategy.fuse_grad_size_in_MB = 32 clip = paddle.fluid.clip.GradientClipByGlobalNorm(1.0) - self.optimizer(avg_cost, - strategy, - train_prog, - startup_prog, - grad_clip=clip) + self.optimizer( + avg_cost, strategy, train_prog, startup_prog, grad_clip=clip + ) train_prog = train_prog._pipeline_opt['section_program'] startup_prog = startup_prog._pipeline_opt['startup_program'] self.debug_program(train_prog, startup_prog) @@ -211,46 +370,169 @@ class TestFleetHybridOptimizer(TestFleetMetaOptimizer): main_prog_op_types = [op.type for op in main_prog_ops] # global, sharding, pp_send, pp_recv - self.assertEqual(startup_prog_op_types, [ - 'uniform_random', 'fill_constant', 'uniform_random', - 'fill_constant', 'uniform_random', 'fill_constant', - 'uniform_random', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'c_gen_nccl_id', - 'c_comm_init', 'c_gen_nccl_id', 'c_comm_init', 'c_gen_nccl_id', - 'c_comm_init', 'c_gen_nccl_id', 'c_comm_init', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast' - ]) - - self.assertEqual(main_prog_op_types, [ - 'recv_v2', 'cast', 'cast', 'mul', 'cast', 'elementwise_add', 'cast', - 'tanh', 'cast', 'cast', 'mul', 'cast', 'elementwise_add', 'cast', - 'tanh', 'cast', 'cast', 'mul', 'cast', 'elementwise_add', 'cast', - 'tanh', 'cast', 'cast', 'mul', 'cast', 'elementwise_add', 'softmax', - 'cast', 'cross_entropy2', 'reduce_mean', 'elementwise_mul', - 'fill_constant', 'elementwise_mul_grad', 'reduce_mean_grad', - 'cross_entropy_grad2', 'cast', 'softmax_grad', - 'elementwise_add_grad', 'mul_grad', 'cast', 'tanh_grad', 'cast', - 'elementwise_add_grad', 'mul_grad', 'cast', 'tanh_grad', 'cast', - 'elementwise_add_grad', 'mul_grad', 'cast', 'tanh_grad', 'cast', - 'elementwise_add_grad', 'mul_grad', 'cast', 'c_sync_calc_stream', - 'send_v2', 'fill_constant', 'cast', 'sum', 'fill_constant', 'cast', - 'sum', 'fill_constant', 'cast', 'sum', 'fill_constant', 'cast', - 'sum', 'fill_constant', 'cast', 'sum', 'fill_constant', 'cast', - 'sum', 'fill_constant', 'cast', 'sum', 'fill_constant', 'cast', - 'sum', 'coalesce_tensor', 'c_reduce_sum', 'coalesce_tensor', - 'c_reduce_sum', 'c_sync_comm_stream', 'check_finite_and_unscale', - 'cast', 'c_allreduce_max', 'c_allreduce_max', 'cast', - 'update_loss_scaling', 'squared_l2_norm', 'squared_l2_norm', - 'squared_l2_norm', 'squared_l2_norm', 'squared_l2_norm', 'sum', - 'c_allreduce_sum', 'c_allreduce_sum', 'sqrt', 'fill_constant', - 'elementwise_max', 'elementwise_div', 'elementwise_mul', - 'elementwise_mul', 'elementwise_mul', 'elementwise_mul', - 'elementwise_mul', 'momentum', 'momentum', 'momentum', 'momentum', - 'momentum', 'coalesce_tensor', 'c_broadcast', 'coalesce_tensor', - 'c_broadcast' - ]) + self.assertEqual( + startup_prog_op_types, + [ + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + ], + ) + + self.assertEqual( + main_prog_op_types, + [ + 'recv_v2', + 'cast', + 'cast', + 'mul', + 'cast', + 'elementwise_add', + 'cast', + 'tanh', + 'cast', + 'cast', + 'mul', + 'cast', + 'elementwise_add', + 'cast', + 'tanh', + 'cast', + 'cast', + 'mul', + 'cast', + 'elementwise_add', + 'cast', + 'tanh', + 'cast', + 'cast', + 'mul', + 'cast', + 'elementwise_add', + 'softmax', + 'cast', + 'cross_entropy2', + 'reduce_mean', + 'elementwise_mul', + 'fill_constant', + 'elementwise_mul_grad', + 'reduce_mean_grad', + 'cross_entropy_grad2', + 'cast', + 'softmax_grad', + 'elementwise_add_grad', + 'mul_grad', + 'cast', + 'tanh_grad', + 'cast', + 'elementwise_add_grad', + 'mul_grad', + 'cast', + 'tanh_grad', + 'cast', + 'elementwise_add_grad', + 'mul_grad', + 'cast', + 'tanh_grad', + 'cast', + 'elementwise_add_grad', + 'mul_grad', + 'cast', + 'c_sync_calc_stream', + 'send_v2', + 'fill_constant', + 'cast', + 'sum', + 'fill_constant', + 'cast', + 'sum', + 'fill_constant', + 'cast', + 'sum', + 'fill_constant', + 'cast', + 'sum', + 'fill_constant', + 'cast', + 'sum', + 'fill_constant', + 'cast', + 'sum', + 'fill_constant', + 'cast', + 'sum', + 'fill_constant', + 'cast', + 'sum', + 'coalesce_tensor', + 'c_reduce_sum', + 'coalesce_tensor', + 'c_reduce_sum', + 'c_sync_comm_stream', + 'check_finite_and_unscale', + 'cast', + 'c_allreduce_max', + 'c_allreduce_max', + 'cast', + 'update_loss_scaling', + 'squared_l2_norm', + 'squared_l2_norm', + 'squared_l2_norm', + 'squared_l2_norm', + 'squared_l2_norm', + 'sum', + 'c_allreduce_sum', + 'c_allreduce_sum', + 'sqrt', + 'fill_constant', + 'elementwise_max', + 'elementwise_div', + 'elementwise_mul', + 'elementwise_mul', + 'elementwise_mul', + 'elementwise_mul', + 'elementwise_mul', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + 'coalesce_tensor', + 'c_broadcast', + 'coalesce_tensor', + 'c_broadcast', + ], + ) def test_opt_sharding_with_pp_amp_gclip_fuse_gm(self): train_prog, startup_prog = static.Program(), static.Program() @@ -271,11 +553,9 @@ class TestFleetHybridOptimizer(TestFleetMetaOptimizer): strategy.fuse_grad_merge = True clip = paddle.fluid.clip.GradientClipByGlobalNorm(1.0) - self.optimizer(avg_cost, - strategy, - train_prog, - startup_prog, - grad_clip=clip) + self.optimizer( + avg_cost, strategy, train_prog, startup_prog, grad_clip=clip + ) train_prog = train_prog._pipeline_opt['section_program'] startup_prog = startup_prog._pipeline_opt['startup_program'] self.debug_program(train_prog, startup_prog) @@ -288,43 +568,151 @@ class TestFleetHybridOptimizer(TestFleetMetaOptimizer): main_prog_op_types = [op.type for op in main_prog_ops] # global, sharding, pp_send, pp_recv - self.assertEqual(startup_prog_op_types, [ - 'uniform_random', 'fill_constant', 'uniform_random', - 'fill_constant', 'uniform_random', 'fill_constant', - 'uniform_random', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'c_gen_nccl_id', - 'c_comm_init', 'c_gen_nccl_id', 'c_comm_init', 'c_gen_nccl_id', - 'c_comm_init', 'c_gen_nccl_id', 'c_comm_init', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast' - ]) - - self.assertEqual(main_prog_op_types, [ - 'recv_v2', 'cast', 'cast', 'mul', 'cast', 'elementwise_add', 'cast', - 'tanh', 'cast', 'cast', 'mul', 'cast', 'elementwise_add', 'cast', - 'tanh', 'cast', 'cast', 'mul', 'cast', 'elementwise_add', 'cast', - 'tanh', 'cast', 'cast', 'mul', 'cast', 'elementwise_add', 'softmax', - 'cast', 'cross_entropy2', 'reduce_mean', 'elementwise_mul', - 'coalesce_tensor', 'coalesce_tensor', 'coalesce_tensor', - 'coalesce_tensor', 'fill_constant', 'elementwise_mul_grad', - 'reduce_mean_grad', 'cross_entropy_grad2', 'cast', 'softmax_grad', - 'elementwise_add_grad', 'mul_grad', 'cast', 'tanh_grad', 'cast', - 'elementwise_add_grad', 'mul_grad', 'cast', 'tanh_grad', 'cast', - 'elementwise_add_grad', 'mul_grad', 'cast', 'tanh_grad', 'cast', - 'elementwise_add_grad', 'mul_grad', 'cast', 'c_sync_calc_stream', - 'send_v2', 'cast', 'sum', 'cast', 'sum', 'c_reduce_sum', - 'c_reduce_sum', 'c_sync_comm_stream', 'check_finite_and_unscale', - 'cast', 'c_allreduce_max', 'c_allreduce_max', 'cast', - 'update_loss_scaling', 'squared_l2_norm', 'squared_l2_norm', - 'squared_l2_norm', 'squared_l2_norm', 'squared_l2_norm', 'sum', - 'c_allreduce_sum', 'c_allreduce_sum', 'sqrt', 'fill_constant', - 'elementwise_max', 'elementwise_div', 'elementwise_mul', - 'elementwise_mul', 'elementwise_mul', 'elementwise_mul', - 'elementwise_mul', 'momentum', 'momentum', 'momentum', 'momentum', - 'momentum', 'coalesce_tensor', 'c_broadcast', 'coalesce_tensor', - 'c_broadcast' - ]) + self.assertEqual( + startup_prog_op_types, + [ + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + ], + ) + + self.assertEqual( + main_prog_op_types, + [ + 'recv_v2', + 'cast', + 'cast', + 'mul', + 'cast', + 'elementwise_add', + 'cast', + 'tanh', + 'cast', + 'cast', + 'mul', + 'cast', + 'elementwise_add', + 'cast', + 'tanh', + 'cast', + 'cast', + 'mul', + 'cast', + 'elementwise_add', + 'cast', + 'tanh', + 'cast', + 'cast', + 'mul', + 'cast', + 'elementwise_add', + 'softmax', + 'cast', + 'cross_entropy2', + 'reduce_mean', + 'elementwise_mul', + 'coalesce_tensor', + 'coalesce_tensor', + 'coalesce_tensor', + 'coalesce_tensor', + 'fill_constant', + 'elementwise_mul_grad', + 'reduce_mean_grad', + 'cross_entropy_grad2', + 'cast', + 'softmax_grad', + 'elementwise_add_grad', + 'mul_grad', + 'cast', + 'tanh_grad', + 'cast', + 'elementwise_add_grad', + 'mul_grad', + 'cast', + 'tanh_grad', + 'cast', + 'elementwise_add_grad', + 'mul_grad', + 'cast', + 'tanh_grad', + 'cast', + 'elementwise_add_grad', + 'mul_grad', + 'cast', + 'c_sync_calc_stream', + 'send_v2', + 'cast', + 'sum', + 'cast', + 'sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_sync_comm_stream', + 'check_finite_and_unscale', + 'cast', + 'c_allreduce_max', + 'c_allreduce_max', + 'cast', + 'update_loss_scaling', + 'squared_l2_norm', + 'squared_l2_norm', + 'squared_l2_norm', + 'squared_l2_norm', + 'squared_l2_norm', + 'sum', + 'c_allreduce_sum', + 'c_allreduce_sum', + 'sqrt', + 'fill_constant', + 'elementwise_max', + 'elementwise_div', + 'elementwise_mul', + 'elementwise_mul', + 'elementwise_mul', + 'elementwise_mul', + 'elementwise_mul', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + 'coalesce_tensor', + 'c_broadcast', + 'coalesce_tensor', + 'c_broadcast', + ], + ) def test_opt_sharding_with_pp_amp_ckp_fuse_gm_optcast(self): train_prog, startup_prog = static.Program(), static.Program() @@ -337,8 +725,12 @@ class TestFleetHybridOptimizer(TestFleetMetaOptimizer): } strategy.recompute = True strategy.recompute_configs = { - "checkpoints": - ["fc_0.tmp_2", "fc_1.tmp_2", "fc_2.tmp_2", "fc_3.tmp_2"] + "checkpoints": [ + "fc_0.tmp_2", + "fc_1.tmp_2", + "fc_2.tmp_2", + "fc_3.tmp_2", + ] } strategy.sharding = True @@ -368,49 +760,158 @@ class TestFleetHybridOptimizer(TestFleetMetaOptimizer): main_prog_op_types = [op.type for op in main_prog_ops] # global, sharding, pp_send, pp_recv - self.assertEqual(startup_prog_op_types, [ - 'uniform_random', 'fill_constant', 'uniform_random', - 'fill_constant', 'uniform_random', 'fill_constant', - 'uniform_random', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'c_gen_nccl_id', - 'c_comm_init', 'c_gen_nccl_id', 'c_comm_init', 'c_gen_nccl_id', - 'c_comm_init', 'c_gen_nccl_id', 'c_comm_init', 'c_broadcast', - 'cast', 'c_broadcast', 'cast', 'c_broadcast', 'cast', 'c_broadcast', - 'cast', 'c_broadcast', 'cast', 'c_broadcast', 'cast', 'c_broadcast', - 'cast', 'c_broadcast' - ]) - - self.assertEqual(main_prog_op_types, [ - 'recv_v2', 'cast', 'mul', 'elementwise_add', 'cast', 'tanh', 'cast', - 'mul', 'elementwise_add', 'cast', 'tanh', 'cast', 'mul', - 'elementwise_add', 'cast', 'tanh', 'cast', 'mul', 'cast', - 'elementwise_add', 'cast', 'softmax', 'cast', 'cross_entropy2', - 'reduce_mean', 'elementwise_mul', 'coalesce_tensor', - 'coalesce_tensor', 'coalesce_tensor', 'coalesce_tensor', - 'coalesce_tensor', 'coalesce_tensor', 'fill_constant', - 'elementwise_mul_grad', 'reduce_mean_grad', 'cross_entropy_grad2', - 'cast', 'softmax_grad', 'cast', 'elementwise_add_grad', 'cast', - 'mul_grad', 'cast', 'tanh_grad', 'cast', 'elementwise_add_grad', - 'mul_grad', 'cast', 'tanh_grad', 'cast', 'elementwise_add_grad', - 'mul_grad', 'cast', 'cast', 'mul', 'elementwise_add', 'cast', - 'tanh_grad', 'cast', 'elementwise_add_grad', 'mul_grad', 'cast', - 'c_sync_calc_stream', 'send_v2', 'cast', 'sum', 'sum', 'cast', - 'sum', 'c_reduce_sum', 'c_reduce_sum', 'c_reduce_sum', - 'c_sync_comm_stream', 'check_finite_and_unscale', 'cast', - 'c_allreduce_max', 'c_allreduce_max', 'cast', 'update_loss_scaling', - 'momentum', 'cast', 'momentum', 'cast', 'momentum', 'cast', - 'momentum', 'momentum', 'cast', 'coalesce_tensor', 'c_broadcast', - 'c_broadcast', 'coalesce_tensor', 'c_broadcast' - ]) + self.assertEqual( + startup_prog_op_types, + [ + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_broadcast', + 'cast', + 'c_broadcast', + 'cast', + 'c_broadcast', + 'cast', + 'c_broadcast', + 'cast', + 'c_broadcast', + 'cast', + 'c_broadcast', + 'cast', + 'c_broadcast', + 'cast', + 'c_broadcast', + ], + ) + + self.assertEqual( + main_prog_op_types, + [ + 'recv_v2', + 'cast', + 'mul', + 'elementwise_add', + 'cast', + 'tanh', + 'cast', + 'mul', + 'elementwise_add', + 'cast', + 'tanh', + 'cast', + 'mul', + 'elementwise_add', + 'cast', + 'tanh', + 'cast', + 'mul', + 'cast', + 'elementwise_add', + 'cast', + 'softmax', + 'cast', + 'cross_entropy2', + 'reduce_mean', + 'elementwise_mul', + 'coalesce_tensor', + 'coalesce_tensor', + 'coalesce_tensor', + 'coalesce_tensor', + 'coalesce_tensor', + 'coalesce_tensor', + 'fill_constant', + 'elementwise_mul_grad', + 'reduce_mean_grad', + 'cross_entropy_grad2', + 'cast', + 'softmax_grad', + 'cast', + 'elementwise_add_grad', + 'cast', + 'mul_grad', + 'cast', + 'tanh_grad', + 'cast', + 'elementwise_add_grad', + 'mul_grad', + 'cast', + 'tanh_grad', + 'cast', + 'elementwise_add_grad', + 'mul_grad', + 'cast', + 'cast', + 'mul', + 'elementwise_add', + 'cast', + 'tanh_grad', + 'cast', + 'elementwise_add_grad', + 'mul_grad', + 'cast', + 'c_sync_calc_stream', + 'send_v2', + 'cast', + 'sum', + 'sum', + 'cast', + 'sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_sync_comm_stream', + 'check_finite_and_unscale', + 'cast', + 'c_allreduce_max', + 'c_allreduce_max', + 'cast', + 'update_loss_scaling', + 'momentum', + 'cast', + 'momentum', + 'cast', + 'momentum', + 'cast', + 'momentum', + 'momentum', + 'cast', + 'coalesce_tensor', + 'c_broadcast', + 'c_broadcast', + 'coalesce_tensor', + 'c_broadcast', + ], + ) class TestFleetHybridOptimizerBoundary(TestFleetMetaOptimizer): - def setUp(self): os.environ["PADDLE_TRAINER_ID"] = "3" - os.environ["PADDLE_TRAINER_ENDPOINTS"] = \ - "127.0.0.1:36001,127.0.0.1:36002,127.0.0.1:36003,127.0.0.1:36004" + os.environ[ + "PADDLE_TRAINER_ENDPOINTS" + ] = "127.0.0.1:36001,127.0.0.1:36002,127.0.0.1:36003,127.0.0.1:36004" # pre-assigned ring id self.mp_ring_id = 0 self.sharding_ring_id = 1 @@ -440,11 +941,9 @@ class TestFleetHybridOptimizerBoundary(TestFleetMetaOptimizer): strategy.fuse_grad_size_in_MB = 32 clip = paddle.fluid.clip.GradientClipByGlobalNorm(1.0) - self.optimizer(avg_cost, - strategy, - train_prog, - startup_prog, - grad_clip=clip) + self.optimizer( + avg_cost, strategy, train_prog, startup_prog, grad_clip=clip + ) train_prog = train_prog._pipeline_opt['section_program'] startup_prog = startup_prog._pipeline_opt['startup_program'] self.debug_program(train_prog, startup_prog) @@ -461,32 +960,74 @@ class TestFleetHybridOptimizerBoundary(TestFleetMetaOptimizer): if is_loss_grad_op(op): self.assertEqual(op.type, 'fill_constant') self.assertTrue(op.has_attr('value')) - scale = strategy.pipeline_configs[ - 'accumulate_steps'] * strategy.sharding_configs['dp_degree'] + scale = ( + strategy.pipeline_configs['accumulate_steps'] + * strategy.sharding_configs['dp_degree'] + ) loss_scale = 1.0 / scale self.assertAlmostEqual(float(op.attr('value')), loss_scale) # global, sharding, pp_send, pp_recv - self.assertEqual(startup_prog_op_types, [ - 'uniform_random', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'c_gen_nccl_id', 'c_comm_init', 'c_gen_nccl_id', - 'c_comm_init', 'c_gen_nccl_id', 'c_comm_init', 'c_gen_nccl_id', - 'c_comm_init', 'c_broadcast' - ]) - - self.assertEqual(main_prog_op_types, [ - 'recv_v2', 'cast', 'matmul', 'cast', 'reduce_mean', - 'elementwise_mul', 'fill_constant', 'elementwise_mul_grad', - 'reduce_mean_grad', 'cast', 'matmul_grad', 'c_sync_calc_stream', - 'send_v2', 'fill_constant', 'cast', 'sum', 'c_reduce_sum', - 'c_sync_comm_stream', 'check_finite_and_unscale', 'cast', - 'c_allreduce_max', 'c_allreduce_max', 'cast', 'update_loss_scaling', - 'fill_constant', 'c_allreduce_sum', 'c_allreduce_sum', 'sqrt', - 'fill_constant', 'elementwise_max', 'elementwise_div', 'c_broadcast' - ]) + self.assertEqual( + startup_prog_op_types, + [ + 'uniform_random', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_broadcast', + ], + ) + + self.assertEqual( + main_prog_op_types, + [ + 'recv_v2', + 'cast', + 'matmul', + 'cast', + 'reduce_mean', + 'elementwise_mul', + 'fill_constant', + 'elementwise_mul_grad', + 'reduce_mean_grad', + 'cast', + 'matmul_grad', + 'c_sync_calc_stream', + 'send_v2', + 'fill_constant', + 'cast', + 'sum', + 'c_reduce_sum', + 'c_sync_comm_stream', + 'check_finite_and_unscale', + 'cast', + 'c_allreduce_max', + 'c_allreduce_max', + 'cast', + 'update_loss_scaling', + 'fill_constant', + 'c_allreduce_sum', + 'c_allreduce_sum', + 'sqrt', + 'fill_constant', + 'elementwise_max', + 'elementwise_div', + 'c_broadcast', + ], + ) def test_opt_sharding_with_pp_amp_gclip_boundary_card1(self): - """ test optimizer sharding without parameter in card0 """ + """test optimizer sharding without parameter in card0""" os.environ["PADDLE_TRAINER_ID"] = "1" train_prog, startup_prog = static.Program(), static.Program() avg_cost, strategy = self.boundary_net(train_prog, startup_prog) @@ -504,11 +1045,9 @@ class TestFleetHybridOptimizerBoundary(TestFleetMetaOptimizer): strategy.fuse_grad_size_in_MB = 32 clip = paddle.fluid.clip.GradientClipByGlobalNorm(1.0) - self.optimizer(avg_cost, - strategy, - train_prog, - startup_prog, - grad_clip=clip) + self.optimizer( + avg_cost, strategy, train_prog, startup_prog, grad_clip=clip + ) train_prog = train_prog._pipeline_opt['section_program'] startup_prog = startup_prog._pipeline_opt['startup_program'] self.debug_program(train_prog, startup_prog) @@ -521,24 +1060,67 @@ class TestFleetHybridOptimizerBoundary(TestFleetMetaOptimizer): main_prog_op_types = [op.type for op in main_prog_ops] # global, sharding, pp_send, pp_recv - self.assertEqual(startup_prog_op_types, [ - 'uniform_random', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'c_gen_nccl_id', 'c_comm_init', - 'c_gen_nccl_id', 'c_comm_init', 'c_gen_nccl_id', 'c_comm_init', - 'c_gen_nccl_id', 'c_comm_init', 'c_broadcast' - ]) - - self.assertEqual(main_prog_op_types, [ - 'recv_v2', 'cast', 'matmul', 'cast', 'reduce_mean', - 'elementwise_mul', 'fill_constant', 'elementwise_mul_grad', - 'reduce_mean_grad', 'cast', 'matmul_grad', 'c_sync_calc_stream', - 'send_v2', 'fill_constant', 'cast', 'sum', 'c_reduce_sum', - 'c_sync_comm_stream', 'check_finite_and_unscale', 'cast', - 'c_allreduce_max', 'c_allreduce_max', 'cast', 'update_loss_scaling', - 'squared_l2_norm', 'sum', 'c_allreduce_sum', 'c_allreduce_sum', - 'sqrt', 'fill_constant', 'elementwise_max', 'elementwise_div', - 'elementwise_mul', 'momentum', 'c_broadcast' - ]) + self.assertEqual( + startup_prog_op_types, + [ + 'uniform_random', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_broadcast', + ], + ) + + self.assertEqual( + main_prog_op_types, + [ + 'recv_v2', + 'cast', + 'matmul', + 'cast', + 'reduce_mean', + 'elementwise_mul', + 'fill_constant', + 'elementwise_mul_grad', + 'reduce_mean_grad', + 'cast', + 'matmul_grad', + 'c_sync_calc_stream', + 'send_v2', + 'fill_constant', + 'cast', + 'sum', + 'c_reduce_sum', + 'c_sync_comm_stream', + 'check_finite_and_unscale', + 'cast', + 'c_allreduce_max', + 'c_allreduce_max', + 'cast', + 'update_loss_scaling', + 'squared_l2_norm', + 'sum', + 'c_allreduce_sum', + 'c_allreduce_sum', + 'sqrt', + 'fill_constant', + 'elementwise_max', + 'elementwise_div', + 'elementwise_mul', + 'momentum', + 'c_broadcast', + ], + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_lamb_meta_optimizer.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_lamb_meta_optimizer.py index 1c20d2e45be03a815bb58f41d6914d1271e077e7..a8fa8c7107b4aca41136b03541d5d296f6258440 100755 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_lamb_meta_optimizer.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_lamb_meta_optimizer.py @@ -23,31 +23,32 @@ paddle.enable_static() class TestFleetLambMetaOptimizer(unittest.TestCase): - def setUp(self): os.environ["PADDLE_TRAINER_ID"] = "1" os.environ[ - "PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001,127.0.0.1:36002" + "PADDLE_TRAINER_ENDPOINTS" + ] = "127.0.0.1:36001,127.0.0.1:36002" def net(self, main_prog, startup_prog): with fluid.program_guard(main_prog, startup_prog): with fluid.unique_name.guard(): - input_x = paddle.fluid.layers.data(name="x", - shape=[32], - dtype='float32') - input_y = paddle.fluid.layers.data(name="y", - shape=[1], - dtype='int64') - - fc_1 = paddle.fluid.layers.fc(input=input_x, - size=64, - act='tanh') + input_x = paddle.fluid.layers.data( + name="x", shape=[32], dtype='float32' + ) + input_y = paddle.fluid.layers.data( + name="y", shape=[1], dtype='int64' + ) + + fc_1 = paddle.fluid.layers.fc( + input=input_x, size=64, act='tanh' + ) fc_2 = paddle.fluid.layers.fc(input=fc_1, size=256, act='tanh') - prediction = paddle.fluid.layers.fc(input=[fc_2], - size=2, - act='softmax') - cost = paddle.fluid.layers.cross_entropy(input=prediction, - label=input_y) + prediction = paddle.fluid.layers.fc( + input=[fc_2], size=2, act='softmax' + ) + cost = paddle.fluid.layers.cross_entropy( + input=prediction, label=input_y + ) avg_cost = paddle.mean(x=cost) strategy = paddle.distributed.fleet.DistributedStrategy() @@ -78,8 +79,9 @@ class TestFleetLambMetaOptimizer(unittest.TestCase): startup_prog = fluid.Program() train_prog = fluid.Program() avg_cost, strategy = self.net(train_prog, startup_prog) - optimizer = paddle.fluid.optimizer.Momentum(learning_rate=0.1, - momentum=0.9) + optimizer = paddle.fluid.optimizer.Momentum( + learning_rate=0.1, momentum=0.9 + ) optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy) optimizer.minimize(avg_cost) @@ -101,7 +103,8 @@ class TestFleetLambMetaOptimizer(unittest.TestCase): optimizer.minimize(avg_cost) ops_without_wd = [ - op for op in avg_cost.block.ops + op + for op in avg_cost.block.ops if op.type == 'lamb' and op.attr('op_role_var')[0].endswith('.b_0') ] for op in ops_without_wd: @@ -110,16 +113,17 @@ class TestFleetLambMetaOptimizer(unittest.TestCase): def test_lamb_apply_with_amp(self): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) - input_x = paddle.fluid.layers.data(name="x", - shape=[32], - dtype='float32') + input_x = paddle.fluid.layers.data( + name="x", shape=[32], dtype='float32' + ) input_y = paddle.fluid.layers.data(name="y", shape=[1], dtype='int64') fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh') fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') prediction = paddle.fluid.layers.fc(input=[fc_2], size=2, act='softmax') - cost = paddle.fluid.layers.cross_entropy(input=prediction, - label=input_y) + cost = paddle.fluid.layers.cross_entropy( + input=prediction, label=input_y + ) avg_cost = paddle.mean(x=cost) strategy = paddle.distributed.fleet.DistributedStrategy() diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_lars_meta_optimizer.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_lars_meta_optimizer.py index b560cdaa66ef47c164e690b311ba7a95754558df..0a7298631cf9d3935632ee12d3e3089b96d6acd4 100755 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_lars_meta_optimizer.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_lars_meta_optimizer.py @@ -23,31 +23,32 @@ paddle.enable_static() class TestFleetLarsMetaOptimizer(unittest.TestCase): - def setUp(self): os.environ["PADDLE_TRAINER_ID"] = "1" os.environ[ - "PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001,127.0.0.1:36002" + "PADDLE_TRAINER_ENDPOINTS" + ] = "127.0.0.1:36001,127.0.0.1:36002" def net(self, main_prog, startup_prog): with fluid.program_guard(main_prog, startup_prog): with fluid.unique_name.guard(): - input_x = paddle.fluid.layers.data(name="x", - shape=[32], - dtype='float32') - input_y = paddle.fluid.layers.data(name="y", - shape=[1], - dtype='int64') - - fc_1 = paddle.fluid.layers.fc(input=input_x, - size=64, - act='tanh') + input_x = paddle.fluid.layers.data( + name="x", shape=[32], dtype='float32' + ) + input_y = paddle.fluid.layers.data( + name="y", shape=[1], dtype='int64' + ) + + fc_1 = paddle.fluid.layers.fc( + input=input_x, size=64, act='tanh' + ) fc_2 = paddle.fluid.layers.fc(input=fc_1, size=256, act='tanh') - prediction = paddle.fluid.layers.fc(input=[fc_2], - size=2, - act='softmax') - cost = paddle.fluid.layers.cross_entropy(input=prediction, - label=input_y) + prediction = paddle.fluid.layers.fc( + input=[fc_2], size=2, act='softmax' + ) + cost = paddle.fluid.layers.cross_entropy( + input=prediction, label=input_y + ) avg_cost = paddle.mean(x=cost) strategy = paddle.distributed.fleet.DistributedStrategy() @@ -67,8 +68,9 @@ class TestFleetLarsMetaOptimizer(unittest.TestCase): startup_prog = fluid.Program() train_prog = fluid.Program() avg_cost, strategy = self.net(train_prog, startup_prog) - optimizer = paddle.fluid.optimizer.Momentum(learning_rate=0.01, - momentum=0.9) + optimizer = paddle.fluid.optimizer.Momentum( + learning_rate=0.01, momentum=0.9 + ) optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy) optimizer.minimize(avg_cost) @@ -94,16 +96,21 @@ class TestFleetLarsMetaOptimizer(unittest.TestCase): startup_prog = fluid.Program() train_prog = fluid.Program() avg_cost, strategy = self.net(train_prog, startup_prog) - optimizer = paddle.fluid.optimizer.Momentum(learning_rate=0.01, - momentum=0.9) + optimizer = paddle.fluid.optimizer.Momentum( + learning_rate=0.01, momentum=0.9 + ) optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy) optimizer.minimize(avg_cost) ops_without_wd = [ - op for op in avg_cost.block.ops - if op.type == 'lars_momentum' and ("batch_norm" in op.attr( - 'op_role_var')[0] or ".b" in op.attr('op_role_var')[0]) + op + for op in avg_cost.block.ops + if op.type == 'lars_momentum' + and ( + "batch_norm" in op.attr('op_role_var')[0] + or ".b" in op.attr('op_role_var')[0] + ) ] for op in ops_without_wd: self.assertEqual(op.attr('lars_weight_decay')[0], 0) @@ -111,16 +118,17 @@ class TestFleetLarsMetaOptimizer(unittest.TestCase): def test_lars_apply_with_amp(self): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) - input_x = paddle.fluid.layers.data(name="x", - shape=[32], - dtype='float32') + input_x = paddle.fluid.layers.data( + name="x", shape=[32], dtype='float32' + ) input_y = paddle.fluid.layers.data(name="y", shape=[1], dtype='int64') fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh') fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') prediction = paddle.fluid.layers.fc(input=[fc_2], size=2, act='softmax') - cost = paddle.fluid.layers.cross_entropy(input=prediction, - label=input_y) + cost = paddle.fluid.layers.cross_entropy( + input=prediction, label=input_y + ) avg_cost = paddle.mean(x=cost) strategy = paddle.distributed.fleet.DistributedStrategy() @@ -143,8 +151,9 @@ class TestFleetLarsMetaOptimizer(unittest.TestCase): "exclude_from_weight_decay": ["batch_norm", ".b"], } - optimizer = paddle.fluid.optimizer.Momentum(learning_rate=0.01, - momentum=0.9) + optimizer = paddle.fluid.optimizer.Momentum( + learning_rate=0.01, momentum=0.9 + ) optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy) optimizer.minimize(avg_cost) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_localsgd_meta_optimizer.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_localsgd_meta_optimizer.py index 2765b8c3eaea00242cc4b5373b18d5a0a412927c..a3711c6f84180e5f32001dfa949ba33801b339b9 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_localsgd_meta_optimizer.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_localsgd_meta_optimizer.py @@ -23,7 +23,6 @@ paddle.enable_static() class TestFleetLocalSGDMetaOptimizer(TestFleetMetaOptimizer): - def test_localsgd_optimizer(self): train_prog, startup_prog = fluid.Program(), fluid.Program() avg_cost, strategy = self.net(train_prog, startup_prog) @@ -32,7 +31,8 @@ class TestFleetLocalSGDMetaOptimizer(TestFleetMetaOptimizer): ops = [op.type for op in avg_cost.block.ops] outs = [ - ''.join(op.output('Out')) for op in avg_cost.block.ops + ''.join(op.output('Out')) + for op in avg_cost.block.ops if op.type == 'conditional_block' ] @@ -48,7 +48,8 @@ class TestFleetLocalSGDMetaOptimizer(TestFleetMetaOptimizer): ops = [op.type for op in avg_cost.block.ops] outs = [ - ''.join(op.output('Out')) for op in avg_cost.block.ops + ''.join(op.output('Out')) + for op in avg_cost.block.ops if op.type == 'conditional_block' ] @@ -61,7 +62,6 @@ class TestFleetLocalSGDMetaOptimizer(TestFleetMetaOptimizer): class TestFleetAdaptiveLocalSGDMetaOptimizer(TestFleetMetaOptimizer): - def test_adaptive_localsgd_optimizer(self): train_prog, startup_prog = fluid.Program(), fluid.Program() avg_cost, strategy = self.net(train_prog, startup_prog) @@ -70,7 +70,8 @@ class TestFleetAdaptiveLocalSGDMetaOptimizer(TestFleetMetaOptimizer): ops = [op.type for op in avg_cost.block.ops] outs = [ - ''.join(op.output('Out')) for op in avg_cost.block.ops + ''.join(op.output('Out')) + for op in avg_cost.block.ops if op.type == 'conditional_block' ] @@ -86,7 +87,8 @@ class TestFleetAdaptiveLocalSGDMetaOptimizer(TestFleetMetaOptimizer): ops = [op.type for op in avg_cost.block.ops] outs = [ - ''.join(op.output('Out')) for op in avg_cost.block.ops + ''.join(op.output('Out')) + for op in avg_cost.block.ops if op.type == 'conditional_block' ] diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_log.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_log.py index 985905bdc815f13bafa5b4fc544d0642e5995778..93027dfb8b7dd2d577c0224d58d227123722d837 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_log.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_log.py @@ -19,7 +19,6 @@ import unittest class TestFleetLog(unittest.TestCase): - def setUp(self): fleet.init(log_level="DEBUG") diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_meta_optimizer_base.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_meta_optimizer_base.py index 0c8db67dcb5f47b5ce1d4556a3383d4fb27b44ee..f127e072dd11c110d5348dbdecc0bf0c7713ceb7 100755 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_meta_optimizer_base.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_meta_optimizer_base.py @@ -17,40 +17,44 @@ import paddle from paddle import fluid import paddle.distributed.fleet as fleet import paddle.distributed.fleet.base.role_maker as role_maker -from paddle.distributed.fleet.meta_optimizers.meta_optimizer_base import MetaOptimizerBase +from paddle.distributed.fleet.meta_optimizers.meta_optimizer_base import ( + MetaOptimizerBase, +) class TestFleetMetaOptimizerBase(unittest.TestCase): - def net(main_prog, startup_prog): with fluid.program_guard(main_prog, startup_prog): with fluid.unique_name.guard(): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) - input_x = paddle.fluid.layers.data(name="x", - shape=[32], - dtype='float32') - input_y = paddle.fluid.layers.data(name="y", - shape=[1], - dtype='int64') - - fc_1 = paddle.fluid.layers.fc(input=input_x, - size=64, - act='tanh') + input_x = paddle.fluid.layers.data( + name="x", shape=[32], dtype='float32' + ) + input_y = paddle.fluid.layers.data( + name="y", shape=[1], dtype='int64' + ) + + fc_1 = paddle.fluid.layers.fc( + input=input_x, size=64, act='tanh' + ) fc_2 = paddle.fluid.layers.fc(input=fc_1, size=256, act='tanh') - prediction = paddle.fluid.layers.fc(input=[fc_2], - size=2, - act='softmax') - cost = paddle.fluid.layers.cross_entropy(input=prediction, - label=input_y) + prediction = paddle.fluid.layers.fc( + input=[fc_2], size=2, act='softmax' + ) + cost = paddle.fluid.layers.cross_entropy( + input=prediction, label=input_y + ) avg_cost = paddle.mean(x=cost) optimizer = paddle.fluid.optimizer.SGD(learning_rate=0.01) opt = MetaOptimizerBase(optimizer) opt_ops, params_grads = opt.minimize(avg_cost) - opt.apply_optimize(avg_cost, - paddle.static.default_startup_program(), - params_grads) + opt.apply_optimize( + avg_cost, + paddle.static.default_startup_program(), + params_grads, + ) return None net(fluid.default_startup_program(), fluid.default_main_program()) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_pipeline_meta_optimizer.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_pipeline_meta_optimizer.py index 279a2e21f70ef0f6198c04208aecf374f3aac888..bee864e1f24f83ac62de649c8af0d57048d002a4 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_pipeline_meta_optimizer.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_pipeline_meta_optimizer.py @@ -24,23 +24,23 @@ paddle.enable_static() class TestFleetMetaOptimizer(unittest.TestCase): - def setUp(self): os.environ["PADDLE_TRAINER_ID"] = "1" os.environ[ - "PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001,127.0.0.1:36002" + "PADDLE_TRAINER_ENDPOINTS" + ] = "127.0.0.1:36001,127.0.0.1:36002" def net(self): with static.device_guard("gpu:0"): - input_x = paddle.fluid.layers.data(name="x", - shape=[32], - dtype='float32') - input_y = paddle.fluid.layers.data(name="y", - shape=[1], - dtype='int64') - input_z = paddle.fluid.layers.data(name="z", - shape=[1], - dtype="float32") + input_x = paddle.fluid.layers.data( + name="x", shape=[32], dtype='float32' + ) + input_y = paddle.fluid.layers.data( + name="y", shape=[1], dtype='int64' + ) + input_z = paddle.fluid.layers.data( + name="z", shape=[1], dtype="float32" + ) with static.device_guard("gpu:all"): input_z = input_z * 1.0 input_z.stop_gradient = True @@ -52,11 +52,12 @@ class TestFleetMetaOptimizer(unittest.TestCase): # for pipeline check_pipeline_persist_var coverage fc_2.persistable = True fc_2 = fc_2 * input_z - prediction = paddle.fluid.layers.fc(input=[fc_2], - size=2, - act='softmax') - cost = paddle.fluid.layers.cross_entropy(input=prediction, - label=input_y) + prediction = paddle.fluid.layers.fc( + input=[fc_2], size=2, act='softmax' + ) + cost = paddle.fluid.layers.cross_entropy( + input=prediction, label=input_y + ) avg_cost = paddle.mean(x=cost) return avg_cost @@ -68,7 +69,7 @@ class TestFleetMetaOptimizer(unittest.TestCase): strategy.pipeline = True strategy.pipeline_configs = { 'micro_batch_size': 1, - 'accumulate_steps': 2 + 'accumulate_steps': 2, } train_prog, startup_prog = static.Program(), static.Program() @@ -77,12 +78,13 @@ class TestFleetMetaOptimizer(unittest.TestCase): avg_cost = self.net() optimizer = paddle.fluid.optimizer.Adam(0.01) - optimizer = fleet.distributed_optimizer(optimizer, - strategy=strategy) + optimizer = fleet.distributed_optimizer( + optimizer, strategy=strategy + ) optimizer.minimize(avg_cost) def test_pipeline_amp_optimizer(self): - """ test pipeline& with device:all """ + """test pipeline& with device:all""" role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) @@ -91,7 +93,7 @@ class TestFleetMetaOptimizer(unittest.TestCase): strategy.pipeline = True strategy.pipeline_configs = { 'micro_batch_size': 1, - 'accumulate_steps': 2 + 'accumulate_steps': 2, } train_prog, startup_prog = static.Program(), static.Program() @@ -100,8 +102,9 @@ class TestFleetMetaOptimizer(unittest.TestCase): avg_cost = self.net() optimizer = paddle.fluid.optimizer.Adam(0.01) - optimizer = fleet.distributed_optimizer(optimizer, - strategy=strategy) + optimizer = fleet.distributed_optimizer( + optimizer, strategy=strategy + ) optimizer.minimize(avg_cost) ops = train_prog._pipeline_opt['section_program'].global_block().ops diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_pipeline_meta_optimizer_with_recompute.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_pipeline_meta_optimizer_with_recompute.py index c45c81c35b42b4fae18cca465279721e87c6e8f4..8ce94b61a633483627cf64b6b6dbf27589a1e754 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_pipeline_meta_optimizer_with_recompute.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_pipeline_meta_optimizer_with_recompute.py @@ -20,24 +20,25 @@ paddle.enable_static() class TestFleetMetaOptimizer(unittest.TestCase): - def setUp(self): os.environ["PADDLE_TRAINER_ID"] = "1" os.environ[ - "PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001,127.0.0.1:36002" + "PADDLE_TRAINER_ENDPOINTS" + ] = "127.0.0.1:36001,127.0.0.1:36002" def test_pipeline_optimizer(self): import paddle.distributed.fleet as fleet import paddle.distributed.fleet.base.role_maker as role_maker + role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) with paddle.fluid.device_guard("gpu:0"): - input_x = paddle.fluid.layers.data(name="x", - shape=[32], - dtype='float32') - input_y = paddle.fluid.layers.data(name="y", - shape=[1], - dtype='int64') + input_x = paddle.fluid.layers.data( + name="x", shape=[32], dtype='float32' + ) + input_y = paddle.fluid.layers.data( + name="y", shape=[1], dtype='int64' + ) fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh') fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') fc_3 = paddle.fluid.layers.fc(input=fc_2, size=64, act='tanh') @@ -47,11 +48,12 @@ class TestFleetMetaOptimizer(unittest.TestCase): with paddle.fluid.device_guard("gpu:1"): fc_7 = paddle.fluid.layers.fc(input=fc_6, size=64, act='tanh') - prediction = paddle.fluid.layers.fc(input=[fc_7], - size=2, - act='softmax') - cost = paddle.fluid.layers.cross_entropy(input=prediction, - label=input_y) + prediction = paddle.fluid.layers.fc( + input=[fc_7], size=2, act='softmax' + ) + cost = paddle.fluid.layers.cross_entropy( + input=prediction, label=input_y + ) avg_cost = paddle.mean(x=cost) strategy = paddle.distributed.fleet.DistributedStrategy() @@ -59,7 +61,7 @@ class TestFleetMetaOptimizer(unittest.TestCase): strategy.pipeline_configs = { 'micro_batch_size': 1, 'accumulate_steps': 2, - 'schedule_mode': '1F1B' + 'schedule_mode': '1F1B', } checkpoints = ['fc_5.tmp_0', 'fc_7.tmp_0'] @@ -67,7 +69,7 @@ class TestFleetMetaOptimizer(unittest.TestCase): strategy.recompute_configs = { "checkpoints": checkpoints, "enable_offload": False, - "checkpoint_shape": [] + "checkpoint_shape": [], } optimizer = paddle.fluid.optimizer.Adam(0.01) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_private_function.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_private_function.py index a4854c695f8ccc7cb112e633095bbba96ad2ce05..94c8437167e81c3c0796cb5c6f76663c8d76fcb8 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_private_function.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_private_function.py @@ -18,11 +18,10 @@ import threading class TestFleetPrivateFunction(unittest.TestCase): - def test_wait_port(self): - def init_server(port): import time + time.sleep(5) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.bind(("127.0.0.1", port)) @@ -33,10 +32,11 @@ class TestFleetPrivateFunction(unittest.TestCase): c.close() break - thr = threading.Thread(target=init_server, args=(9292, )) + thr = threading.Thread(target=init_server, args=(9292,)) thr.start() import paddle.distributed.fleet as fleet + ep = ["127.0.0.1:9292"] fleet.base.private_helper_function.wait_server_ready(ep) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_raw_program_meta_optimizer.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_raw_program_meta_optimizer.py index 3fde52958d3532ba136c2dedc2d0dc4bff5e419d..e985415d943ccb04e23c204fa054a6dc7a746a41 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_raw_program_meta_optimizer.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_raw_program_meta_optimizer.py @@ -20,27 +20,29 @@ paddle.enable_static() class TestFleetMetaOptimizer(unittest.TestCase): - def setUp(self): os.environ["PADDLE_TRAINER_ID"] = "1" os.environ[ - "PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001,127.0.0.1:36002" + "PADDLE_TRAINER_ENDPOINTS" + ] = "127.0.0.1:36001,127.0.0.1:36002" def test_pipeline_optimizer(self): import paddle.distributed.fleet as fleet import paddle.distributed.fleet.base.role_maker as role_maker + role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) - input_x = paddle.fluid.layers.data(name="x", - shape=[32], - dtype='float32') + input_x = paddle.fluid.layers.data( + name="x", shape=[32], dtype='float32' + ) input_y = paddle.fluid.layers.data(name="y", shape=[1], dtype='int64') fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh') fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') prediction = paddle.fluid.layers.fc(input=[fc_2], size=2, act='softmax') - cost = paddle.fluid.layers.cross_entropy(input=prediction, - label=input_y) + cost = paddle.fluid.layers.cross_entropy( + input=prediction, label=input_y + ) avg_cost = paddle.mean(x=cost) strategy = paddle.distributed.fleet.DistributedStrategy() diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_recompute_meta_optimizer.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_recompute_meta_optimizer.py index 0045f7311deeed830aa0ea45152988ff81ced521..9de35f8ca35a195ade854e965c412c0bff01345a 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_recompute_meta_optimizer.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_recompute_meta_optimizer.py @@ -22,15 +22,15 @@ paddle.enable_static() class TestFleetRecomputeMetaOptimizer(TestFleetMetaOptimizer): - def test_recompute_optimizer_backward(self): - """ test recompute optimizer backward """ + """test recompute optimizer backward""" train_prog, startup_prog = fluid.Program(), fluid.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'recompute') - opt = fluid.optimizer.MomentumOptimizer(learning_rate=0.001, - momentum=0.9) + opt = fluid.optimizer.MomentumOptimizer( + learning_rate=0.001, momentum=0.9 + ) opt = RecomputeOptimizer(opt) opt.user_defined_strategy = strategy params_grads = opt.backward(avg_cost, startup_prog) @@ -41,13 +41,14 @@ class TestFleetRecomputeMetaOptimizer(TestFleetMetaOptimizer): self.assertIn('subprog', ''.join(outs)) def test_recompute_optimizer_backward_gradients(self): - """ test recompute optimizer backward + gradients """ + """test recompute optimizer backward + gradients""" train_prog, startup_prog = fluid.Program(), fluid.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'recompute') - opt = fluid.optimizer.MomentumOptimizer(learning_rate=0.001, - momentum=0.9) + opt = fluid.optimizer.MomentumOptimizer( + learning_rate=0.001, momentum=0.9 + ) opt = RecomputeOptimizer(opt) opt.user_defined_strategy = strategy params_grads = opt.backward(avg_cost, startup_prog) @@ -60,13 +61,14 @@ class TestFleetRecomputeMetaOptimizer(TestFleetMetaOptimizer): self.assertIn('subprog', ''.join(outs)) def test_recompute_optimizer_backward_optimize(self): - """ test recompute optimizer backward + optimize """ + """test recompute optimizer backward + optimize""" train_prog, startup_prog = fluid.Program(), fluid.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'recompute') - opt = fluid.optimizer.MomentumOptimizer(learning_rate=0.001, - momentum=0.9) + opt = fluid.optimizer.MomentumOptimizer( + learning_rate=0.001, momentum=0.9 + ) opt = RecomputeOptimizer(opt) opt.user_defined_strategy = strategy params_grads = opt.backward(avg_cost, startup_prog) @@ -126,7 +128,8 @@ class TestFleetRecomputeMetaOptimizer(TestFleetMetaOptimizer): self.optimizer(avg_cost, strategy, train_prog, startup_prog) ops = [op.type for op in avg_cost.block.ops] outs = [ - op.output('Out')[0] for op in avg_cost.block.ops + op.output('Out')[0] + for op in avg_cost.block.ops if op.type == 'memcpy' ] self.assertIn('memcpy', ops) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_rolemaker_new.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_rolemaker_new.py index 18ef1f786332ee4e578cb6f43d06573958bbd0f2..18c2487b66a95d66f24b44d52fa5cbeade41c741 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_rolemaker_new.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_rolemaker_new.py @@ -59,9 +59,11 @@ class TestCloudRoleMaker(unittest.TestCase): """Set up, set envs.""" os.environ["PADDLE_TRAINERS_NUM"] = "2" os.environ[ - "PADDLE_PSERVERS_IP_PORT_LIST"] = "127.0.0.1:36001,127.0.0.2:36001" + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:36001,127.0.0.2:36001" os.environ[ - "PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001,127.0.0.2:36001" + "PADDLE_TRAINER_ENDPOINTS" + ] = "127.0.0.1:36001,127.0.0.2:36001" os.environ["POD_IP"] = "127.0.0.1" def test_tr_rolemaker(self): @@ -103,8 +105,9 @@ class TestCloudRoleMaker(unittest.TestCase): os.environ["POD_IP"] = "127.0.0.1" os.environ["PADDLE_PORT"] = "36001" - ro = role_maker.PaddleCloudRoleMaker(is_collective=False, - init_gloo=False) + ro = role_maker.PaddleCloudRoleMaker( + is_collective=False, init_gloo=False + ) self.assertEqual(ro._server_index(), 0) self.assertFalse(ro._is_worker()) self.assertTrue(ro._is_server()) @@ -139,7 +142,8 @@ class TestUserDefinedRoleMaker(unittest.TestCase): server_endpoints=["127.0.0.1:36001", "127.0.0.1:36001"], role=role_maker.Role.SERVER, current_id=0, - worker_num=2) + worker_num=2, + ) self.assertEqual(ro._server_num(), 2) ro._generate_role() self.assertTrue(ro._is_server()) @@ -152,7 +156,8 @@ class TestUserDefinedRoleMaker(unittest.TestCase): server_endpoints=["127.0.0.1:36001", "127.0.0.1:36001"], role=role_maker.Role.WORKER, current_id=0, - worker_num=2) + worker_num=2, + ) self.assertIn("127.0.0.1:36001", ro._get_pserver_endpoints()) self.assertTrue(ro._is_worker()) @@ -160,7 +165,6 @@ class TestUserDefinedRoleMaker(unittest.TestCase): class TestGlooWithCloudRoleMaker(unittest.TestCase): - def setUp(self): os.environ["PADDLE_TRAINERS_NUM"] = "1" os.environ["PADDLE_PSERVERS_IP_PORT_LIST"] = "127.0.0.1:36001" @@ -287,6 +291,7 @@ class TestGlooWithCloudRoleMaker(unittest.TestCase): role = role_maker.PaddleCloudRoleMaker(is_collective=True) role._generate_role() import time + time.sleep(3) def test_fs_gloo5(self): @@ -443,8 +448,9 @@ class TestGlooWithCloudRoleMaker(unittest.TestCase): x = paddle.fluid.layers.data(name='x', shape=[13], dtype='float32') y_predict = paddle.fluid.layers.fc(input=x, size=1, act=None) y = paddle.fluid.layers.data(name='y', shape=[1], dtype='float32') - cost = paddle.fluid.layers.square_error_cost(input=y_predict, - label=y) + cost = paddle.fluid.layers.square_error_cost( + input=y_predict, label=y + ) avg_cost = paddle.mean(cost) return avg_cost diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_sharding_meta_optimizer.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_sharding_meta_optimizer.py index 6c14b49278c5a446b7397bd170d71a23d2e73ec7..57a199c133395fb66a38d4f97df2487d34db0472 100755 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_sharding_meta_optimizer.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_sharding_meta_optimizer.py @@ -25,9 +25,10 @@ paddle.enable_static() class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): - def test_sharding_optimizer(self): - train_prog, startup_prog = paddle.fluid.Program(), paddle.fluid.Program( + train_prog, startup_prog = ( + paddle.fluid.Program(), + paddle.fluid.Program(), ) avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'sharding') @@ -40,28 +41,74 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): self.assertIn('@BroadCast', ''.join(vars)) self.assertEqual( set(parameters), - set([ - "fc_1.b_0", "fc_2.b_0", "fc_2.w_0", "fc_1.b_0_velocity_0", - "fc_2.b_0_velocity_0", "fc_2.w_0_velocity_0", "learning_rate_0" - ])) - - self.assertEqual(ops, [ - 'fill_constant', 'fill_constant', 'fill_constant', - 'c_sync_calc_stream', 'c_broadcast', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_sync_comm_stream', - 'mul', 'elementwise_add', 'tanh', 'mul', 'elementwise_add', 'tanh', - 'mul', 'elementwise_add', 'softmax', 'cross_entropy2', - 'reduce_mean', 'fill_constant', 'reduce_mean_grad', - 'cross_entropy_grad2', 'softmax_grad', 'elementwise_add_grad', - 'mul_grad', 'tanh_grad', 'elementwise_add_grad', 'mul_grad', - 'tanh_grad', 'elementwise_add_grad', 'mul_grad', - 'c_sync_calc_stream', 'c_reduce_sum', 'c_reduce_sum', - 'c_reduce_sum', 'c_reduce_sum', 'c_reduce_sum', 'c_reduce_sum', - 'c_sync_comm_stream', 'momentum', 'momentum', 'momentum' - ]) + set( + [ + "fc_1.b_0", + "fc_2.b_0", + "fc_2.w_0", + "fc_1.b_0_velocity_0", + "fc_2.b_0_velocity_0", + "fc_2.w_0_velocity_0", + "learning_rate_0", + ] + ), + ) + + self.assertEqual( + ops, + [ + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'c_sync_calc_stream', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_sync_comm_stream', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'elementwise_add', + 'softmax', + 'cross_entropy2', + 'reduce_mean', + 'fill_constant', + 'reduce_mean_grad', + 'cross_entropy_grad2', + 'softmax_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'c_sync_calc_stream', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_sync_comm_stream', + 'momentum', + 'momentum', + 'momentum', + ], + ) def test_sharding_amp_optimizer(self): - train_prog, startup_prog = paddle.fluid.Program(), paddle.fluid.Program( + train_prog, startup_prog = ( + paddle.fluid.Program(), + paddle.fluid.Program(), ) avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'sharding') @@ -77,33 +124,101 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): self.assertIn('check_finite_and_unscale', ops) self.assertEqual( set(parameters), - set([ - "fc_1.b_0", "fc_2.b_0", "fc_2.w_0", "fc_1.b_0_velocity_0", - "fc_2.b_0_velocity_0", "fc_2.w_0_velocity_0", "learning_rate_0", - "loss_scaling_0", "num_bad_steps_0", "num_good_steps_0" - ])) - - self.assertEqual(ops, [ - 'cast', 'cast', 'cast', 'fill_constant', 'fill_constant', - 'fill_constant', 'c_sync_calc_stream', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_broadcast', - 'c_sync_comm_stream', 'cast', 'mul', 'elementwise_add', 'cast', - 'tanh', 'cast', 'mul', 'elementwise_add', 'cast', 'tanh', 'cast', - 'mul', 'elementwise_add', 'softmax', 'cast', 'cross_entropy2', - 'reduce_mean', 'elementwise_mul', 'fill_constant', - 'elementwise_mul_grad', 'reduce_mean_grad', 'cross_entropy_grad2', - 'cast', 'softmax_grad', 'elementwise_add_grad', 'mul_grad', 'cast', - 'tanh_grad', 'cast', 'elementwise_add_grad', 'mul_grad', 'cast', - 'tanh_grad', 'cast', 'elementwise_add_grad', 'mul_grad', - 'c_sync_calc_stream', 'c_reduce_sum', 'c_reduce_sum', - 'c_reduce_sum', 'c_reduce_sum', 'c_reduce_sum', 'c_reduce_sum', - 'c_sync_comm_stream', 'cast', 'cast', 'cast', - 'check_finite_and_unscale', 'cast', 'c_allreduce_max', 'cast', - 'update_loss_scaling', 'momentum', 'momentum', 'momentum' - ]) + set( + [ + "fc_1.b_0", + "fc_2.b_0", + "fc_2.w_0", + "fc_1.b_0_velocity_0", + "fc_2.b_0_velocity_0", + "fc_2.w_0_velocity_0", + "learning_rate_0", + "loss_scaling_0", + "num_bad_steps_0", + "num_good_steps_0", + ] + ), + ) + + self.assertEqual( + ops, + [ + 'cast', + 'cast', + 'cast', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'c_sync_calc_stream', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_sync_comm_stream', + 'cast', + 'mul', + 'elementwise_add', + 'cast', + 'tanh', + 'cast', + 'mul', + 'elementwise_add', + 'cast', + 'tanh', + 'cast', + 'mul', + 'elementwise_add', + 'softmax', + 'cast', + 'cross_entropy2', + 'reduce_mean', + 'elementwise_mul', + 'fill_constant', + 'elementwise_mul_grad', + 'reduce_mean_grad', + 'cross_entropy_grad2', + 'cast', + 'softmax_grad', + 'elementwise_add_grad', + 'mul_grad', + 'cast', + 'tanh_grad', + 'cast', + 'elementwise_add_grad', + 'mul_grad', + 'cast', + 'tanh_grad', + 'cast', + 'elementwise_add_grad', + 'mul_grad', + 'c_sync_calc_stream', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_sync_comm_stream', + 'cast', + 'cast', + 'cast', + 'check_finite_and_unscale', + 'cast', + 'c_allreduce_max', + 'cast', + 'update_loss_scaling', + 'momentum', + 'momentum', + 'momentum', + ], + ) def test_sharding_recompute_optimizer(self): - train_prog, startup_prog = paddle.fluid.Program(), paddle.fluid.Program( + train_prog, startup_prog = ( + paddle.fluid.Program(), + paddle.fluid.Program(), ) avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'sharding') @@ -120,29 +235,78 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): self.assertIn('subprog', ''.join(vars)) self.assertEqual( set(parameters), - set([ - "fc_1.b_0", "fc_2.b_0", "fc_2.w_0", "fc_1.b_0_velocity_0", - "fc_2.b_0_velocity_0", "fc_2.w_0_velocity_0", "learning_rate_0" - ])) - - self.assertEqual(ops, [ - 'fill_constant', 'fill_constant', 'fill_constant', - 'c_sync_calc_stream', 'c_broadcast', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_sync_comm_stream', - 'mul', 'elementwise_add', 'tanh', 'mul', 'elementwise_add', 'tanh', - 'mul', 'elementwise_add', 'softmax', 'cross_entropy2', - 'reduce_mean', 'fill_constant', 'reduce_mean_grad', - 'cross_entropy_grad2', 'softmax_grad', 'elementwise_add_grad', - 'mul_grad', 'mul', 'elementwise_add', 'tanh_grad', - 'elementwise_add_grad', 'mul_grad', 'mul', 'elementwise_add', - 'tanh_grad', 'elementwise_add_grad', 'mul_grad', - 'c_sync_calc_stream', 'c_reduce_sum', 'c_reduce_sum', - 'c_reduce_sum', 'c_reduce_sum', 'c_reduce_sum', 'c_reduce_sum', - 'c_sync_comm_stream', 'momentum', 'momentum', 'momentum' - ]) + set( + [ + "fc_1.b_0", + "fc_2.b_0", + "fc_2.w_0", + "fc_1.b_0_velocity_0", + "fc_2.b_0_velocity_0", + "fc_2.w_0_velocity_0", + "learning_rate_0", + ] + ), + ) + + self.assertEqual( + ops, + [ + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'c_sync_calc_stream', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_sync_comm_stream', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'elementwise_add', + 'softmax', + 'cross_entropy2', + 'reduce_mean', + 'fill_constant', + 'reduce_mean_grad', + 'cross_entropy_grad2', + 'softmax_grad', + 'elementwise_add_grad', + 'mul_grad', + 'mul', + 'elementwise_add', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'mul', + 'elementwise_add', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'c_sync_calc_stream', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_sync_comm_stream', + 'momentum', + 'momentum', + 'momentum', + ], + ) def test_sharding_amp_recompute_optimizer(self): - train_prog, startup_prog = paddle.fluid.Program(), paddle.fluid.Program( + train_prog, startup_prog = ( + paddle.fluid.Program(), + paddle.fluid.Program(), ) avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'sharding') @@ -163,36 +327,115 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): self.assertEqual( set(parameters), - set([ - "fc_1.b_0", "fc_2.b_0", "fc_2.w_0", "fc_1.b_0_velocity_0", - "fc_2.b_0_velocity_0", "fc_2.w_0_velocity_0", "learning_rate_0", - "loss_scaling_0", "num_bad_steps_0", "num_good_steps_0" - ])) - self.assertEqual(ops, [ - 'cast', 'cast', 'cast', 'cast', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'c_sync_calc_stream', 'c_broadcast', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_sync_comm_stream', - 'cast', 'mul', 'elementwise_add', 'cast', 'tanh', 'cast', 'mul', - 'elementwise_add', 'cast', 'tanh', 'cast', 'mul', 'elementwise_add', - 'softmax', 'cast', 'cross_entropy2', 'reduce_mean', - 'elementwise_mul', 'fill_constant', 'elementwise_mul_grad', - 'reduce_mean_grad', 'cross_entropy_grad2', 'cast', 'softmax_grad', - 'elementwise_add_grad', 'mul_grad', 'cast', 'cast', 'mul', - 'elementwise_add', 'cast', 'tanh_grad', 'cast', - 'elementwise_add_grad', 'mul_grad', 'cast', 'mul', - 'elementwise_add', 'cast', 'tanh_grad', 'cast', - 'elementwise_add_grad', 'mul_grad', 'c_sync_calc_stream', - 'c_reduce_sum', 'c_reduce_sum', 'c_reduce_sum', 'c_reduce_sum', - 'c_reduce_sum', 'c_reduce_sum', 'c_sync_comm_stream', 'cast', - 'cast', 'cast', 'check_finite_and_unscale', 'cast', - 'c_allreduce_max', 'cast', 'update_loss_scaling', 'momentum', - 'momentum', 'momentum' - ]) + set( + [ + "fc_1.b_0", + "fc_2.b_0", + "fc_2.w_0", + "fc_1.b_0_velocity_0", + "fc_2.b_0_velocity_0", + "fc_2.w_0_velocity_0", + "learning_rate_0", + "loss_scaling_0", + "num_bad_steps_0", + "num_good_steps_0", + ] + ), + ) + self.assertEqual( + ops, + [ + 'cast', + 'cast', + 'cast', + 'cast', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'c_sync_calc_stream', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_sync_comm_stream', + 'cast', + 'mul', + 'elementwise_add', + 'cast', + 'tanh', + 'cast', + 'mul', + 'elementwise_add', + 'cast', + 'tanh', + 'cast', + 'mul', + 'elementwise_add', + 'softmax', + 'cast', + 'cross_entropy2', + 'reduce_mean', + 'elementwise_mul', + 'fill_constant', + 'elementwise_mul_grad', + 'reduce_mean_grad', + 'cross_entropy_grad2', + 'cast', + 'softmax_grad', + 'elementwise_add_grad', + 'mul_grad', + 'cast', + 'cast', + 'mul', + 'elementwise_add', + 'cast', + 'tanh_grad', + 'cast', + 'elementwise_add_grad', + 'mul_grad', + 'cast', + 'mul', + 'elementwise_add', + 'cast', + 'tanh_grad', + 'cast', + 'elementwise_add_grad', + 'mul_grad', + 'c_sync_calc_stream', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_sync_comm_stream', + 'cast', + 'cast', + 'cast', + 'check_finite_and_unscale', + 'cast', + 'c_allreduce_max', + 'cast', + 'update_loss_scaling', + 'momentum', + 'momentum', + 'momentum', + ], + ) def test_sharding_amp_asp_optimizer(self): - train_prog, startup_prog = paddle.fluid.Program(), paddle.fluid.Program( + train_prog, startup_prog = ( + paddle.fluid.Program(), + paddle.fluid.Program(), ) avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'sharding') @@ -212,44 +455,115 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): self.assertEqual( set(parameters), - set([ - 'fc_2.b_0', 'num_good_steps_0', 'fc_2.w_0', 'loss_scaling_0', - 'num_bad_steps_0', 'fc_2.w_0_velocity_0', 'fc_2.w_0.asp_mask', - 'learning_rate_0', 'fc_1.b_0', 'fc_1.w_0.asp_mask', - 'fc_0.w_0.asp_mask', 'fc_1.b_0_velocity_0', - 'fc_2.b_0_velocity_0' - ])) - self.assertEqual(ops, [ - 'cast', 'cast', 'cast', 'fill_constant', 'fill_constant', - 'fill_constant', 'c_sync_calc_stream', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_broadcast', - 'c_sync_comm_stream', 'cast', 'mul', 'elementwise_add', 'cast', - 'tanh', 'cast', 'mul', 'elementwise_add', 'cast', 'tanh', 'cast', - 'mul', 'elementwise_add', 'softmax', 'cast', 'cross_entropy2', - 'reduce_mean', 'elementwise_mul', 'fill_constant', - 'elementwise_mul_grad', 'reduce_mean_grad', 'cross_entropy_grad2', - 'cast', 'softmax_grad', 'elementwise_add_grad', 'mul_grad', 'cast', - 'tanh_grad', 'cast', 'elementwise_add_grad', 'mul_grad', 'cast', - 'tanh_grad', 'cast', 'elementwise_add_grad', 'mul_grad', - 'c_sync_calc_stream', 'c_reduce_sum', 'c_reduce_sum', - 'c_reduce_sum', 'c_reduce_sum', 'c_reduce_sum', 'c_reduce_sum', - 'c_sync_comm_stream', 'cast', 'cast', 'cast', - 'check_finite_and_unscale', 'cast', 'c_allreduce_max', 'cast', - 'update_loss_scaling', 'momentum', 'momentum', 'momentum', - 'elementwise_mul' - ]) + set( + [ + 'fc_2.b_0', + 'num_good_steps_0', + 'fc_2.w_0', + 'loss_scaling_0', + 'num_bad_steps_0', + 'fc_2.w_0_velocity_0', + 'fc_2.w_0.asp_mask', + 'learning_rate_0', + 'fc_1.b_0', + 'fc_1.w_0.asp_mask', + 'fc_0.w_0.asp_mask', + 'fc_1.b_0_velocity_0', + 'fc_2.b_0_velocity_0', + ] + ), + ) + self.assertEqual( + ops, + [ + 'cast', + 'cast', + 'cast', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'c_sync_calc_stream', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_sync_comm_stream', + 'cast', + 'mul', + 'elementwise_add', + 'cast', + 'tanh', + 'cast', + 'mul', + 'elementwise_add', + 'cast', + 'tanh', + 'cast', + 'mul', + 'elementwise_add', + 'softmax', + 'cast', + 'cross_entropy2', + 'reduce_mean', + 'elementwise_mul', + 'fill_constant', + 'elementwise_mul_grad', + 'reduce_mean_grad', + 'cross_entropy_grad2', + 'cast', + 'softmax_grad', + 'elementwise_add_grad', + 'mul_grad', + 'cast', + 'tanh_grad', + 'cast', + 'elementwise_add_grad', + 'mul_grad', + 'cast', + 'tanh_grad', + 'cast', + 'elementwise_add_grad', + 'mul_grad', + 'c_sync_calc_stream', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_sync_comm_stream', + 'cast', + 'cast', + 'cast', + 'check_finite_and_unscale', + 'cast', + 'c_allreduce_max', + 'cast', + 'update_loss_scaling', + 'momentum', + 'momentum', + 'momentum', + 'elementwise_mul', + ], + ) def test_sharding_weight_decay(self): - train_prog, startup_prog = paddle.fluid.Program(), paddle.fluid.Program( + train_prog, startup_prog = ( + paddle.fluid.Program(), + paddle.fluid.Program(), ) avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'sharding') regularization = paddle.fluid.regularizer.L2Decay(0.0001) - self.optimizer(avg_cost, - strategy, - train_prog, - startup_prog, - regularization=regularization) + self.optimizer( + avg_cost, + strategy, + train_prog, + startup_prog, + regularization=regularization, + ) parameters = [ x.name for x in train_prog.list_vars() if x.persistable == True ] @@ -258,38 +572,87 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): self.assertIn('@BroadCast', ''.join(vars)) self.assertEqual( set(parameters), - set([ - "fc_1.b_0", "fc_2.b_0", "fc_2.w_0", "fc_1.b_0_velocity_0", - "fc_2.b_0_velocity_0", "fc_2.w_0_velocity_0", "learning_rate_0" - ])) - - self.assertEqual(ops, [ - 'fill_constant', 'fill_constant', 'fill_constant', - 'c_sync_calc_stream', 'c_broadcast', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_sync_comm_stream', - 'mul', 'elementwise_add', 'tanh', 'mul', 'elementwise_add', 'tanh', - 'mul', 'elementwise_add', 'softmax', 'cross_entropy2', - 'reduce_mean', 'fill_constant', 'reduce_mean_grad', - 'cross_entropy_grad2', 'softmax_grad', 'elementwise_add_grad', - 'mul_grad', 'tanh_grad', 'elementwise_add_grad', 'mul_grad', - 'tanh_grad', 'elementwise_add_grad', 'mul_grad', - 'c_sync_calc_stream', 'c_reduce_sum', 'c_reduce_sum', - 'c_reduce_sum', 'c_reduce_sum', 'c_reduce_sum', 'c_reduce_sum', - 'c_sync_comm_stream', 'scale', 'sum', 'scale', 'sum', 'scale', - 'sum', 'momentum', 'momentum', 'momentum' - ]) + set( + [ + "fc_1.b_0", + "fc_2.b_0", + "fc_2.w_0", + "fc_1.b_0_velocity_0", + "fc_2.b_0_velocity_0", + "fc_2.w_0_velocity_0", + "learning_rate_0", + ] + ), + ) + + self.assertEqual( + ops, + [ + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'c_sync_calc_stream', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_sync_comm_stream', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'elementwise_add', + 'softmax', + 'cross_entropy2', + 'reduce_mean', + 'fill_constant', + 'reduce_mean_grad', + 'cross_entropy_grad2', + 'softmax_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'c_sync_calc_stream', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_sync_comm_stream', + 'scale', + 'sum', + 'scale', + 'sum', + 'scale', + 'sum', + 'momentum', + 'momentum', + 'momentum', + ], + ) def test_sharding_gradient_clip(self): - train_prog, startup_prog = paddle.fluid.Program(), paddle.fluid.Program( + train_prog, startup_prog = ( + paddle.fluid.Program(), + paddle.fluid.Program(), ) avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'sharding') clip = paddle.fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0) - self.optimizer(avg_cost, - strategy, - train_prog, - startup_prog, - grad_clip=clip) + self.optimizer( + avg_cost, strategy, train_prog, startup_prog, grad_clip=clip + ) parameters = [ x.name for x in train_prog.list_vars() if x.persistable == True ] @@ -298,32 +661,86 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): self.assertIn('@BroadCast', ''.join(vars)) self.assertEqual( set(parameters), - set([ - "fc_1.b_0", "fc_2.b_0", "fc_2.w_0", "fc_1.b_0_velocity_0", - "fc_2.b_0_velocity_0", "fc_2.w_0_velocity_0", "learning_rate_0" - ])) - - self.assertEqual(ops, [ - 'fill_constant', 'fill_constant', 'fill_constant', - 'c_sync_calc_stream', 'c_broadcast', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_sync_comm_stream', - 'mul', 'elementwise_add', 'tanh', 'mul', 'elementwise_add', 'tanh', - 'mul', 'elementwise_add', 'softmax', 'cross_entropy2', - 'reduce_mean', 'fill_constant', 'reduce_mean_grad', - 'cross_entropy_grad2', 'softmax_grad', 'elementwise_add_grad', - 'mul_grad', 'tanh_grad', 'elementwise_add_grad', 'mul_grad', - 'tanh_grad', 'elementwise_add_grad', 'mul_grad', - 'c_sync_calc_stream', 'c_reduce_sum', 'c_reduce_sum', - 'c_reduce_sum', 'c_reduce_sum', 'c_reduce_sum', 'c_reduce_sum', - 'c_sync_comm_stream', 'squared_l2_norm', 'squared_l2_norm', - 'squared_l2_norm', 'sum', 'c_allreduce_sum', 'sqrt', - 'fill_constant', 'elementwise_max', 'elementwise_div', - 'elementwise_mul', 'elementwise_mul', 'elementwise_mul', 'momentum', - 'momentum', 'momentum' - ]) + set( + [ + "fc_1.b_0", + "fc_2.b_0", + "fc_2.w_0", + "fc_1.b_0_velocity_0", + "fc_2.b_0_velocity_0", + "fc_2.w_0_velocity_0", + "learning_rate_0", + ] + ), + ) + + self.assertEqual( + ops, + [ + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'c_sync_calc_stream', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_sync_comm_stream', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'elementwise_add', + 'softmax', + 'cross_entropy2', + 'reduce_mean', + 'fill_constant', + 'reduce_mean_grad', + 'cross_entropy_grad2', + 'softmax_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'c_sync_calc_stream', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_sync_comm_stream', + 'squared_l2_norm', + 'squared_l2_norm', + 'squared_l2_norm', + 'sum', + 'c_allreduce_sum', + 'sqrt', + 'fill_constant', + 'elementwise_max', + 'elementwise_div', + 'elementwise_mul', + 'elementwise_mul', + 'elementwise_mul', + 'momentum', + 'momentum', + 'momentum', + ], + ) def test_sharding_clone_for_test(self): - train_prog, startup_prog = paddle.fluid.Program(), paddle.fluid.Program( + train_prog, startup_prog = ( + paddle.fluid.Program(), + paddle.fluid.Program(), ) avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'sharding') @@ -334,21 +751,41 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): sharding.utils.add_sync_comm(test_prog, 1) ops = [op.type for op in test_prog.global_block().ops] - self.assertEqual(ops, [ - 'fill_constant', 'fill_constant', 'fill_constant', - 'c_sync_calc_stream', 'c_broadcast', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_sync_comm_stream', - 'mul', 'elementwise_add', 'tanh', 'mul', 'elementwise_add', 'tanh', - 'mul', 'elementwise_add', 'softmax', 'cross_entropy2', 'reduce_mean' - ]) + self.assertEqual( + ops, + [ + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'c_sync_calc_stream', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_sync_comm_stream', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'elementwise_add', + 'softmax', + 'cross_entropy2', + 'reduce_mean', + ], + ) class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): - def setUp(self): os.environ["PADDLE_TRAINER_ID"] = "3" os.environ[ - "PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001,127.0.0.1:36002,127.0.0.1:36003,127.0.0.1:36004" + "PADDLE_TRAINER_ENDPOINTS" + ] = "127.0.0.1:36001,127.0.0.1:36002,127.0.0.1:36003,127.0.0.1:36004" # pre-assigned ring id self.mp_ring_id = 0 @@ -359,7 +796,9 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): def test_sharding_with_mp(self): # NOTE(JZ-LIANG) MP parallelism need user to build model with MP API - train_prog, startup_prog = paddle.fluid.Program(), paddle.fluid.Program( + train_prog, startup_prog = ( + paddle.fluid.Program(), + paddle.fluid.Program(), ) avg_cost, _ = self.net(train_prog, startup_prog) strategy = paddle.distributed.fleet.DistributedStrategy() @@ -371,7 +810,7 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): "sharding_degree": 2, "hybrid_dp": False, "gradient_merge_acc_step": 1, - "mp_degree": 2 + "mp_degree": 2, } self.optimizer(avg_cost, strategy, train_prog, startup_prog) startup_prog_ops = startup_prog.global_block().ops @@ -379,7 +818,8 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): # should has ring id for MP created_ring_ids = [ - op.desc.attr("ring_id") for op in startup_prog_ops + op.desc.attr("ring_id") + for op in startup_prog_ops if op.type == "c_comm_init" ] self.assertIn(self.mp_ring_id, created_ring_ids) @@ -387,8 +827,10 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): # check correctness of MP group sharding_group_waiting_port = None for op in startup_prog_ops: - if op.type == "c_gen_nccl_id" and op.desc.output_arg_names( - )[0] == "comm_id_0": + if ( + op.type == "c_gen_nccl_id" + and op.desc.output_arg_names()[0] == "comm_id_0" + ): sharding_group_waiting_ports = op.desc.attr("other_endpoints") self.assertEqual(sharding_group_waiting_ports, ['127.0.0.1:36003']) @@ -396,14 +838,18 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): # check correctness of sharding group sharding_group_waiting_port = None for op in startup_prog_ops: - if op.type == "c_gen_nccl_id" and op.desc.output_arg_names( - )[0] == "comm_id_1": + if ( + op.type == "c_gen_nccl_id" + and op.desc.output_arg_names()[0] == "comm_id_1" + ): dp_group_waiting_ports = op.desc.attr("other_endpoints") self.assertEqual(dp_group_waiting_ports, ['127.0.0.1:36002']) def test_sharding_hybrid_dp(self): - train_prog, startup_prog = paddle.fluid.Program(), paddle.fluid.Program( + train_prog, startup_prog = ( + paddle.fluid.Program(), + paddle.fluid.Program(), ) avg_cost, _ = self.net(train_prog, startup_prog) strategy = paddle.distributed.fleet.DistributedStrategy() @@ -416,7 +862,7 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): "dp_degree": 2, "hybrid_dp": True, "gradient_merge_acc_step": 1, - "mp_degree": 1 + "mp_degree": 1, } strategy.fuse_all_reduce_ops = False @@ -426,7 +872,8 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): # check ring id for outter dp created_ring_ids = [ - op.desc.attr("ring_id") for op in startup_prog_ops + op.desc.attr("ring_id") + for op in startup_prog_ops if op.type == "c_comm_init" ] self.assertIn(self.dp_ring_id, created_ring_ids) @@ -434,8 +881,10 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): # check correctness of sharding group sharding_group_waiting_port = None for op in startup_prog_ops: - if op.type == "c_gen_nccl_id" and op.desc.output_arg_names( - )[0] == "comm_id_0": + if ( + op.type == "c_gen_nccl_id" + and op.desc.output_arg_names()[0] == "comm_id_0" + ): sharding_group_waiting_ports = op.desc.attr("other_endpoints") self.assertEqual(sharding_group_waiting_ports, ['127.0.0.1:36003']) @@ -443,8 +892,10 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): # check correctness of dp group sharding_group_waiting_port = None for op in startup_prog_ops: - if op.type == "c_gen_nccl_id" and op.desc.output_arg_names( - )[0] == "comm_id_1": + if ( + op.type == "c_gen_nccl_id" + and op.desc.output_arg_names()[0] == "comm_id_1" + ): dp_group_waiting_ports = op.desc.attr("other_endpoints") self.assertEqual(dp_group_waiting_ports, ['127.0.0.1:36002']) @@ -453,32 +904,74 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): if is_loss_grad_op(op): self.assertEqual(op.type, 'fill_constant') self.assertTrue(op.has_attr('value')) - scale = strategy.sharding_configs[ - 'sharding_degree'] * strategy.sharding_configs['dp_degree'] + scale = ( + strategy.sharding_configs['sharding_degree'] + * strategy.sharding_configs['dp_degree'] + ) loss_scale = 1.0 / scale self.assertAlmostEqual(float(op.attr('value')), loss_scale) # check program (allreudce) ops = [op.type for op in main_prog_ops] - self.assertEqual(ops, [ - 'fill_constant', 'fill_constant', 'fill_constant', - 'c_sync_calc_stream', 'c_broadcast', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_sync_comm_stream', - 'mul', 'elementwise_add', 'tanh', 'mul', 'elementwise_add', 'tanh', - 'mul', 'elementwise_add', 'softmax', 'cross_entropy2', - 'reduce_mean', 'fill_constant', 'reduce_mean_grad', - 'cross_entropy_grad2', 'softmax_grad', 'elementwise_add_grad', - 'mul_grad', 'tanh_grad', 'elementwise_add_grad', 'mul_grad', - 'tanh_grad', 'elementwise_add_grad', 'mul_grad', - 'c_sync_calc_stream', 'c_reduce_sum', 'c_reduce_sum', - 'c_reduce_sum', 'c_reduce_sum', 'c_reduce_sum', 'c_reduce_sum', - 'c_sync_comm_stream', 'c_allreduce_sum', 'c_allreduce_sum', - 'c_allreduce_sum', 'c_sync_comm_stream', 'momentum', 'momentum', - 'momentum' - ]) + self.assertEqual( + ops, + [ + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'c_sync_calc_stream', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_sync_comm_stream', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'elementwise_add', + 'softmax', + 'cross_entropy2', + 'reduce_mean', + 'fill_constant', + 'reduce_mean_grad', + 'cross_entropy_grad2', + 'softmax_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'c_sync_calc_stream', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_sync_comm_stream', + 'c_allreduce_sum', + 'c_allreduce_sum', + 'c_allreduce_sum', + 'c_sync_comm_stream', + 'momentum', + 'momentum', + 'momentum', + ], + ) def test_sharding_hybrid_dp_gm(self): - train_prog, startup_prog = paddle.fluid.Program(), paddle.fluid.Program( + train_prog, startup_prog = ( + paddle.fluid.Program(), + paddle.fluid.Program(), ) avg_cost, _ = self.net(train_prog, startup_prog) strategy = paddle.distributed.fleet.DistributedStrategy() @@ -491,7 +984,7 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): "dp_degree": 2, "hybrid_dp": True, "gradient_merge_acc_step": 4, - "mp_degree": 1 + "mp_degree": 1, } self.optimizer(avg_cost, strategy, train_prog, startup_prog) startup_prog_ops = startup_prog.global_block().ops @@ -499,7 +992,8 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): # check ring id for outter dp created_ring_ids = [ - op.desc.attr("ring_id") for op in startup_prog_ops + op.desc.attr("ring_id") + for op in startup_prog_ops if op.type == "c_comm_init" ] self.assertIn(self.dp_ring_id, created_ring_ids) @@ -507,8 +1001,10 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): # check correctness of sharding group sharding_group_waiting_port = None for op in startup_prog_ops: - if op.type == "c_gen_nccl_id" and op.desc.output_arg_names( - )[0] == "comm_id_0": + if ( + op.type == "c_gen_nccl_id" + and op.desc.output_arg_names()[0] == "comm_id_0" + ): sharding_group_waiting_ports = op.desc.attr("other_endpoints") self.assertEqual(sharding_group_waiting_ports, ['127.0.0.1:36003']) @@ -516,35 +1012,87 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): # check correctness of dp group sharding_group_waiting_port = None for op in startup_prog_ops: - if op.type == "c_gen_nccl_id" and op.desc.output_arg_names( - )[0] == "comm_id_1": + if ( + op.type == "c_gen_nccl_id" + and op.desc.output_arg_names()[0] == "comm_id_1" + ): dp_group_waiting_ports = op.desc.attr("other_endpoints") self.assertEqual(dp_group_waiting_ports, ['127.0.0.1:36002']) # check program fw_bw_ops = [op.type for op in train_prog.blocks[0].ops] opt_ops = [op.type for op in train_prog.blocks[2].ops] - self.assertEqual(fw_bw_ops, [ - 'fill_constant', 'fill_constant', 'fill_constant', - 'c_sync_calc_stream', 'c_broadcast', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_sync_comm_stream', - 'mul', 'elementwise_add', 'tanh', 'mul', 'elementwise_add', 'tanh', - 'mul', 'elementwise_add', 'softmax', 'cross_entropy2', - 'reduce_mean', 'fill_constant', 'reduce_mean_grad', - 'cross_entropy_grad2', 'softmax_grad', 'elementwise_add_grad', - 'mul_grad', 'tanh_grad', 'elementwise_add_grad', 'mul_grad', - 'tanh_grad', 'elementwise_add_grad', 'mul_grad', - 'c_sync_calc_stream', 'c_reduce_sum', 'c_reduce_sum', - 'c_reduce_sum', 'c_reduce_sum', 'c_reduce_sum', 'c_reduce_sum', - 'c_sync_comm_stream', 'elementwise_add', 'elementwise_add', - 'elementwise_add', 'increment', 'elementwise_mod', 'equal', - 'conditional_block' - ]) - self.assertEqual(opt_ops, [ - 'c_allreduce_sum', 'c_allreduce_sum', 'c_allreduce_sum', 'scale', - 'scale', 'scale', 'momentum', 'momentum', 'momentum', - 'fill_constant', 'fill_constant', 'fill_constant' - ]) + self.assertEqual( + fw_bw_ops, + [ + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'c_sync_calc_stream', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_sync_comm_stream', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'elementwise_add', + 'softmax', + 'cross_entropy2', + 'reduce_mean', + 'fill_constant', + 'reduce_mean_grad', + 'cross_entropy_grad2', + 'softmax_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'c_sync_calc_stream', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_sync_comm_stream', + 'elementwise_add', + 'elementwise_add', + 'elementwise_add', + 'increment', + 'elementwise_mod', + 'equal', + 'conditional_block', + ], + ) + self.assertEqual( + opt_ops, + [ + 'c_allreduce_sum', + 'c_allreduce_sum', + 'c_allreduce_sum', + 'scale', + 'scale', + 'scale', + 'momentum', + 'momentum', + 'momentum', + 'fill_constant', + 'fill_constant', + 'fill_constant', + ], + ) # # check loss scale for gradient merge scale_ = -1 @@ -554,7 +1102,9 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): self.assertEqual(scale_, 0.25) def test_sharding_with_pp(self): - train_prog, startup_prog = paddle.fluid.Program(), paddle.fluid.Program( + train_prog, startup_prog = ( + paddle.fluid.Program(), + paddle.fluid.Program(), ) avg_cost, strategy = self.pp_net(train_prog, startup_prog) strategy.sharding = True @@ -565,7 +1115,7 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): "hybrid_dp": False, "gradient_merge_acc_step": 4, "mp_degree": 1, - "pp_degree": 2 + "pp_degree": 2, } strategy.pipeline = True strategy.pipeline_configs = { @@ -585,39 +1135,112 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): main_prog_op_types = [op.type for op in main_prog_ops] print(startup_prog_op_types) # global, sharding, pp_send, pp_recv - self.assertEqual(startup_prog_op_types, [ - 'fill_constant', 'uniform_random', 'fill_constant', - 'uniform_random', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'c_gen_nccl_id', 'c_comm_init', 'c_gen_nccl_id', 'c_comm_init', - 'c_gen_nccl_id', 'c_comm_init', 'c_gen_nccl_id', 'c_comm_init' - ]) - - self.assertEqual(main_prog_op_types, [ - 'fill_constant', 'fill_constant', 'fill_constant', - 'c_sync_calc_stream', 'c_broadcast', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_sync_comm_stream', 'recv_v2', 'mul', - 'elementwise_add', 'tanh', 'mul', 'elementwise_add', 'tanh', 'mul', - 'elementwise_add', 'tanh', 'mul', 'elementwise_add', 'softmax', - 'cross_entropy2', 'reduce_mean', 'fill_constant', - 'reduce_mean_grad', 'cross_entropy_grad2', 'softmax_grad', - 'elementwise_add_grad', 'mul_grad', 'tanh_grad', - 'elementwise_add_grad', 'mul_grad', 'tanh_grad', - 'elementwise_add_grad', 'mul_grad', 'tanh_grad', - 'elementwise_add_grad', 'mul_grad', 'c_sync_calc_stream', 'send_v2', - 'c_sync_calc_stream', 'c_reduce_sum', 'c_reduce_sum', - 'c_reduce_sum', 'c_reduce_sum', 'c_reduce_sum', 'c_reduce_sum', - 'c_reduce_sum', 'c_reduce_sum', 'c_sync_comm_stream', - 'fill_constant', 'sum', 'fill_constant', 'sum', 'fill_constant', - 'sum', 'fill_constant', 'sum', 'fill_constant', 'sum', - 'c_sync_comm_stream', 'momentum', 'momentum', 'momentum', - 'momentum', 'momentum' - ]) + self.assertEqual( + startup_prog_op_types, + [ + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + ], + ) + + self.assertEqual( + main_prog_op_types, + [ + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'c_sync_calc_stream', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_sync_comm_stream', + 'recv_v2', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'elementwise_add', + 'softmax', + 'cross_entropy2', + 'reduce_mean', + 'fill_constant', + 'reduce_mean_grad', + 'cross_entropy_grad2', + 'softmax_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'c_sync_calc_stream', + 'send_v2', + 'c_sync_calc_stream', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_reduce_sum', + 'c_sync_comm_stream', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'c_sync_comm_stream', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + ], + ) # should has ring id for pp created_ring_ids = [ - op.desc.attr("ring_id") for op in startup_prog_ops + op.desc.attr("ring_id") + for op in startup_prog_ops if op.type == "c_comm_init" ] self.assertIn(self.sharding_ring_id, created_ring_ids) @@ -626,8 +1249,10 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): # check correctness of pp group sharding_group_waiting_port = None for op in startup_prog_ops: - if op.type == "c_gen_nccl_id" and op.desc.output_arg_names( - )[0] == "comm_id_0": + if ( + op.type == "c_gen_nccl_id" + and op.desc.output_arg_names()[0] == "comm_id_0" + ): sharding_group_waiting_ports = op.desc.attr("other_endpoints") self.assertEqual(sharding_group_waiting_ports, ['127.0.0.1:36003']) @@ -635,14 +1260,18 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): # check correctness of sharding group sharding_group_waiting_port = None for op in startup_prog_ops: - if op.type == "c_gen_nccl_id" and op.desc.output_arg_names( - )[0] == "comm_id_1": + if ( + op.type == "c_gen_nccl_id" + and op.desc.output_arg_names()[0] == "comm_id_1" + ): dp_group_waiting_ports = op.desc.attr("other_endpoints") self.assertEqual(dp_group_waiting_ports, ['127.0.0.1:36002']) def test_sharding_dp_with_allreduce_fuse(self): - train_prog, startup_prog = paddle.fluid.Program(), paddle.fluid.Program( + train_prog, startup_prog = ( + paddle.fluid.Program(), + paddle.fluid.Program(), ) avg_cost, _ = self.net(train_prog, startup_prog) strategy = paddle.distributed.fleet.DistributedStrategy() @@ -655,7 +1284,7 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): "dp_degree": 2, "hybrid_dp": True, "gradient_merge_acc_step": 1, - "mp_degree": 1 + "mp_degree": 1, } strategy.fuse_all_reduce_ops = True strategy.fuse_grad_size_in_MB = 2 @@ -672,7 +1301,9 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): assert 'FusedGrad' in op.input_arg_names[0] def test_hybrid_with_mp_pp_amp_gclip(self): - train_prog, startup_prog = paddle.fluid.Program(), paddle.fluid.Program( + train_prog, startup_prog = ( + paddle.fluid.Program(), + paddle.fluid.Program(), ) avg_cost, strategy = self.pp_net(train_prog, startup_prog) self.set_strategy(strategy, 'amp') @@ -690,11 +1321,9 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): "accumulate_steps": 4, } clip = paddle.fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0) - self.optimizer(avg_cost, - strategy, - train_prog, - startup_prog, - grad_clip=clip) + self.optimizer( + avg_cost, strategy, train_prog, startup_prog, grad_clip=clip + ) train_prog = train_prog._pipeline_opt['section_program'] startup_prog = startup_prog._pipeline_opt['startup_program'] @@ -706,45 +1335,166 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): main_prog_op_types = [op.type for op in main_prog_ops] # ring: mp, pp_group, pp_pair, pp_pair - self.assertEqual(startup_prog_op_types, [ - 'uniform_random', 'fill_constant', 'uniform_random', - 'fill_constant', 'uniform_random', 'fill_constant', - 'uniform_random', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'c_gen_nccl_id', 'c_comm_init', - 'c_gen_nccl_id', 'c_comm_init', 'c_gen_nccl_id', 'c_comm_init', - 'c_gen_nccl_id', 'c_comm_init', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast' - ]) - - self.assertEqual(main_prog_op_types, [ - 'partial_recv', 'partial_allgather', 'cast', 'cast', 'mul', 'cast', - 'elementwise_add', 'cast', 'tanh', 'cast', 'cast', 'mul', 'cast', - 'elementwise_add', 'cast', 'tanh', 'cast', 'cast', 'mul', 'cast', - 'elementwise_add', 'cast', 'tanh', 'cast', 'cast', 'mul', 'cast', - 'elementwise_add', 'softmax', 'cast', 'cross_entropy2', - 'reduce_mean', 'elementwise_mul', 'fill_constant', - 'elementwise_mul_grad', 'reduce_mean_grad', 'cross_entropy_grad2', - 'cast', 'softmax_grad', 'elementwise_add_grad', 'mul_grad', 'cast', - 'tanh_grad', 'cast', 'elementwise_add_grad', 'mul_grad', 'cast', - 'tanh_grad', 'cast', 'elementwise_add_grad', 'mul_grad', 'cast', - 'tanh_grad', 'cast', 'elementwise_add_grad', 'mul_grad', 'cast', - 'c_sync_calc_stream', 'partial_send', 'fill_constant', 'cast', - 'sum', 'fill_constant', 'cast', 'sum', 'fill_constant', 'cast', - 'sum', 'fill_constant', 'cast', 'sum', 'fill_constant', 'cast', - 'sum', 'fill_constant', 'cast', 'sum', 'fill_constant', 'cast', - 'sum', 'fill_constant', 'cast', 'sum', 'c_sync_comm_stream', - 'check_finite_and_unscale', 'cast', 'c_allreduce_max', - 'c_allreduce_max', 'cast', 'update_loss_scaling', 'fill_constant', - 'c_allreduce_sum', 'c_allreduce_sum', 'sqrt', 'fill_constant', - 'elementwise_max', 'elementwise_div', 'elementwise_mul', - 'elementwise_mul', 'elementwise_mul', 'elementwise_mul', - 'elementwise_mul', 'elementwise_mul', 'elementwise_mul', - 'elementwise_mul', 'momentum', 'momentum', 'momentum', 'momentum', - 'momentum', 'momentum', 'momentum', 'momentum' - ]) + self.assertEqual( + startup_prog_op_types, + [ + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + ], + ) + + self.assertEqual( + main_prog_op_types, + [ + 'partial_recv', + 'partial_allgather', + 'cast', + 'cast', + 'mul', + 'cast', + 'elementwise_add', + 'cast', + 'tanh', + 'cast', + 'cast', + 'mul', + 'cast', + 'elementwise_add', + 'cast', + 'tanh', + 'cast', + 'cast', + 'mul', + 'cast', + 'elementwise_add', + 'cast', + 'tanh', + 'cast', + 'cast', + 'mul', + 'cast', + 'elementwise_add', + 'softmax', + 'cast', + 'cross_entropy2', + 'reduce_mean', + 'elementwise_mul', + 'fill_constant', + 'elementwise_mul_grad', + 'reduce_mean_grad', + 'cross_entropy_grad2', + 'cast', + 'softmax_grad', + 'elementwise_add_grad', + 'mul_grad', + 'cast', + 'tanh_grad', + 'cast', + 'elementwise_add_grad', + 'mul_grad', + 'cast', + 'tanh_grad', + 'cast', + 'elementwise_add_grad', + 'mul_grad', + 'cast', + 'tanh_grad', + 'cast', + 'elementwise_add_grad', + 'mul_grad', + 'cast', + 'c_sync_calc_stream', + 'partial_send', + 'fill_constant', + 'cast', + 'sum', + 'fill_constant', + 'cast', + 'sum', + 'fill_constant', + 'cast', + 'sum', + 'fill_constant', + 'cast', + 'sum', + 'fill_constant', + 'cast', + 'sum', + 'fill_constant', + 'cast', + 'sum', + 'fill_constant', + 'cast', + 'sum', + 'fill_constant', + 'cast', + 'sum', + 'c_sync_comm_stream', + 'check_finite_and_unscale', + 'cast', + 'c_allreduce_max', + 'c_allreduce_max', + 'cast', + 'update_loss_scaling', + 'fill_constant', + 'c_allreduce_sum', + 'c_allreduce_sum', + 'sqrt', + 'fill_constant', + 'elementwise_max', + 'elementwise_div', + 'elementwise_mul', + 'elementwise_mul', + 'elementwise_mul', + 'elementwise_mul', + 'elementwise_mul', + 'elementwise_mul', + 'elementwise_mul', + 'elementwise_mul', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + ], + ) # pp + mp, partial send recv self.assertIn('partial_recv', main_prog_op_types) @@ -759,7 +1509,8 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): # should has ring id for pp created_ring_ids = [ - op.desc.attr("ring_id") for op in startup_prog_ops + op.desc.attr("ring_id") + for op in startup_prog_ops if op.type == "c_comm_init" ] self.assertIn(self.mp_ring_id, created_ring_ids) @@ -768,8 +1519,10 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): # check correctness of pp group sharding_group_waiting_port = None for op in startup_prog_ops: - if op.type == "c_gen_nccl_id" and op.desc.output_arg_names( - )[0] == "comm_id_0": + if ( + op.type == "c_gen_nccl_id" + and op.desc.output_arg_names()[0] == "comm_id_0" + ): mp_group_waiting_ports = op.desc.attr("other_endpoints") self.assertEqual(mp_group_waiting_ports, ['127.0.0.1:36003']) @@ -777,14 +1530,18 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): # check correctness of sharding group sharding_group_waiting_port = None for op in startup_prog_ops: - if op.type == "c_gen_nccl_id" and op.desc.output_arg_names( - )[0] == "comm_id_1": + if ( + op.type == "c_gen_nccl_id" + and op.desc.output_arg_names()[0] == "comm_id_1" + ): pp_group_waiting_ports = op.desc.attr("other_endpoints") self.assertEqual(pp_group_waiting_ports, ['127.0.0.1:36002']) def test_hybrid_with_mp_pp_amp_gclip_for_optimizer(self): - train_prog, startup_prog = paddle.fluid.Program(), paddle.fluid.Program( + train_prog, startup_prog = ( + paddle.fluid.Program(), + paddle.fluid.Program(), ) avg_cost, strategy = self.pp_net(train_prog, startup_prog) self.set_strategy(strategy, 'amp') @@ -802,12 +1559,14 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): "accumulate_steps": 4, } clip = paddle.fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0) - self.optimizer(avg_cost, - strategy, - train_prog, - startup_prog, - grad_clip=clip, - name="adamw") + self.optimizer( + avg_cost, + strategy, + train_prog, + startup_prog, + grad_clip=clip, + name="adamw", + ) train_prog = train_prog._pipeline_opt['section_program'] startup_prog = startup_prog._pipeline_opt['startup_program'] @@ -819,51 +1578,191 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): main_prog_op_types = [op.type for op in main_prog_ops] # ring: mp, pp_group, pp_pair, pp_pair - self.assertEqual(startup_prog_op_types, [ - 'uniform_random', 'fill_constant', 'uniform_random', - 'fill_constant', 'uniform_random', 'fill_constant', - 'uniform_random', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'c_gen_nccl_id', 'c_comm_init', - 'c_gen_nccl_id', 'c_comm_init', 'c_gen_nccl_id', 'c_comm_init', - 'c_gen_nccl_id', 'c_comm_init', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast' - ]) - - self.assertEqual(main_prog_op_types, [ - 'partial_recv', 'partial_allgather', 'cast', 'cast', 'mul', 'cast', - 'elementwise_add', 'cast', 'tanh', 'cast', 'cast', 'mul', 'cast', - 'elementwise_add', 'cast', 'tanh', 'cast', 'cast', 'mul', 'cast', - 'elementwise_add', 'cast', 'tanh', 'cast', 'cast', 'mul', 'cast', - 'elementwise_add', 'softmax', 'cast', 'cross_entropy2', - 'reduce_mean', 'elementwise_mul', 'fill_constant', - 'elementwise_mul_grad', 'reduce_mean_grad', 'cross_entropy_grad2', - 'cast', 'softmax_grad', 'elementwise_add_grad', 'mul_grad', 'cast', - 'tanh_grad', 'cast', 'elementwise_add_grad', 'mul_grad', 'cast', - 'tanh_grad', 'cast', 'elementwise_add_grad', 'mul_grad', 'cast', - 'tanh_grad', 'cast', 'elementwise_add_grad', 'mul_grad', 'cast', - 'c_sync_calc_stream', 'partial_send', 'fill_constant', 'cast', - 'sum', 'fill_constant', 'cast', 'sum', 'fill_constant', 'cast', - 'sum', 'fill_constant', 'cast', 'sum', 'fill_constant', 'cast', - 'sum', 'fill_constant', 'cast', 'sum', 'fill_constant', 'cast', - 'sum', 'fill_constant', 'cast', 'sum', 'c_sync_comm_stream', - 'check_finite_and_unscale', 'cast', 'c_allreduce_max', - 'c_allreduce_max', 'cast', 'update_loss_scaling', 'memcpy', - 'fill_constant', 'c_allreduce_sum', 'c_allreduce_sum', 'sqrt', - 'fill_constant', 'elementwise_max', 'elementwise_div', - 'elementwise_mul', 'elementwise_mul', 'elementwise_mul', - 'elementwise_mul', 'elementwise_mul', 'elementwise_mul', - 'elementwise_mul', 'elementwise_mul', 'adamw', 'adamw', 'adamw', - 'adamw', 'adamw', 'adamw', 'adamw', 'adamw' - ]) + self.assertEqual( + startup_prog_op_types, + [ + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + ], + ) + + self.assertEqual( + main_prog_op_types, + [ + 'partial_recv', + 'partial_allgather', + 'cast', + 'cast', + 'mul', + 'cast', + 'elementwise_add', + 'cast', + 'tanh', + 'cast', + 'cast', + 'mul', + 'cast', + 'elementwise_add', + 'cast', + 'tanh', + 'cast', + 'cast', + 'mul', + 'cast', + 'elementwise_add', + 'cast', + 'tanh', + 'cast', + 'cast', + 'mul', + 'cast', + 'elementwise_add', + 'softmax', + 'cast', + 'cross_entropy2', + 'reduce_mean', + 'elementwise_mul', + 'fill_constant', + 'elementwise_mul_grad', + 'reduce_mean_grad', + 'cross_entropy_grad2', + 'cast', + 'softmax_grad', + 'elementwise_add_grad', + 'mul_grad', + 'cast', + 'tanh_grad', + 'cast', + 'elementwise_add_grad', + 'mul_grad', + 'cast', + 'tanh_grad', + 'cast', + 'elementwise_add_grad', + 'mul_grad', + 'cast', + 'tanh_grad', + 'cast', + 'elementwise_add_grad', + 'mul_grad', + 'cast', + 'c_sync_calc_stream', + 'partial_send', + 'fill_constant', + 'cast', + 'sum', + 'fill_constant', + 'cast', + 'sum', + 'fill_constant', + 'cast', + 'sum', + 'fill_constant', + 'cast', + 'sum', + 'fill_constant', + 'cast', + 'sum', + 'fill_constant', + 'cast', + 'sum', + 'fill_constant', + 'cast', + 'sum', + 'fill_constant', + 'cast', + 'sum', + 'c_sync_comm_stream', + 'check_finite_and_unscale', + 'cast', + 'c_allreduce_max', + 'c_allreduce_max', + 'cast', + 'update_loss_scaling', + 'memcpy', + 'fill_constant', + 'c_allreduce_sum', + 'c_allreduce_sum', + 'sqrt', + 'fill_constant', + 'elementwise_max', + 'elementwise_div', + 'elementwise_mul', + 'elementwise_mul', + 'elementwise_mul', + 'elementwise_mul', + 'elementwise_mul', + 'elementwise_mul', + 'elementwise_mul', + 'elementwise_mul', + 'adamw', + 'adamw', + 'adamw', + 'adamw', + 'adamw', + 'adamw', + 'adamw', + 'adamw', + ], + ) # pp + mp, partial send recv self.assertIn('partial_recv', main_prog_op_types) @@ -878,7 +1777,8 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): # should has ring id for pp created_ring_ids = [ - op.desc.attr("ring_id") for op in startup_prog_ops + op.desc.attr("ring_id") + for op in startup_prog_ops if op.type == "c_comm_init" ] self.assertIn(self.mp_ring_id, created_ring_ids) @@ -887,8 +1787,10 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): # check correctness of pp group sharding_group_waiting_port = None for op in startup_prog_ops: - if op.type == "c_gen_nccl_id" and op.desc.output_arg_names( - )[0] == "comm_id_0": + if ( + op.type == "c_gen_nccl_id" + and op.desc.output_arg_names()[0] == "comm_id_0" + ): mp_group_waiting_ports = op.desc.attr("other_endpoints") self.assertEqual(mp_group_waiting_ports, ['127.0.0.1:36003']) @@ -896,14 +1798,18 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): # check correctness of sharding group sharding_group_waiting_port = None for op in startup_prog_ops: - if op.type == "c_gen_nccl_id" and op.desc.output_arg_names( - )[0] == "comm_id_1": + if ( + op.type == "c_gen_nccl_id" + and op.desc.output_arg_names()[0] == "comm_id_1" + ): pp_group_waiting_ports = op.desc.attr("other_endpoints") self.assertEqual(pp_group_waiting_ports, ['127.0.0.1:36002']) def test_hybrid_with_pp_dp_amp_fp16allreduce(self): - train_prog, startup_prog = paddle.fluid.Program(), paddle.fluid.Program( + train_prog, startup_prog = ( + paddle.fluid.Program(), + paddle.fluid.Program(), ) avg_cost, strategy = self.pp_net(train_prog, startup_prog) strategy.amp = True @@ -936,46 +1842,145 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): main_prog_op_types = [op.type for op in main_prog_ops] # ring: mp, pp_group, pp_pair, pp_pair - self.assertEqual(startup_prog_op_types, [ - 'uniform_random', 'fill_constant', 'uniform_random', - 'fill_constant', 'uniform_random', 'fill_constant', - 'uniform_random', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'c_gen_nccl_id', 'c_comm_init', - 'c_gen_nccl_id', 'c_comm_init', 'c_gen_nccl_id', 'c_comm_init', - 'c_gen_nccl_id', 'c_comm_init', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast' - ]) - - self.assertEqual(main_prog_op_types, [ - 'recv_v2', 'cast', 'mul', 'cast', 'elementwise_add', 'tanh', 'cast', - 'mul', 'cast', 'elementwise_add', 'tanh', 'cast', 'mul', 'cast', - 'elementwise_add', 'tanh', 'cast', 'mul', 'cast', 'elementwise_add', - 'softmax', 'cross_entropy2', 'reduce_mean', 'elementwise_mul', - 'fill_constant', 'elementwise_mul_grad', 'reduce_mean_grad', - 'cross_entropy_grad2', 'softmax_grad', 'elementwise_add_grad', - 'cast', 'mul_grad', 'tanh_grad', 'elementwise_add_grad', 'mul_grad', - 'tanh_grad', 'elementwise_add_grad', 'mul_grad', 'tanh_grad', - 'elementwise_add_grad', 'mul_grad', 'c_sync_calc_stream', 'send_v2', - 'fill_constant', 'cast', 'sum', 'fill_constant', 'sum', - 'fill_constant', 'sum', 'fill_constant', 'sum', 'fill_constant', - 'sum', 'fill_constant', 'sum', 'fill_constant', 'sum', - 'fill_constant', 'sum', 'coalesce_tensor', 'c_allreduce_sum', - 'cast', 'cast', 'cast', 'cast', 'cast', 'cast', 'cast', 'cast', - 'c_sync_comm_stream', 'check_finite_and_unscale', 'cast', - 'c_allreduce_max', 'cast', 'update_loss_scaling', 'momentum', - 'momentum', 'momentum', 'momentum', 'momentum', 'momentum', - 'momentum', 'momentum' - ]) + self.assertEqual( + startup_prog_op_types, + [ + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + ], + ) + + self.assertEqual( + main_prog_op_types, + [ + 'recv_v2', + 'cast', + 'mul', + 'cast', + 'elementwise_add', + 'tanh', + 'cast', + 'mul', + 'cast', + 'elementwise_add', + 'tanh', + 'cast', + 'mul', + 'cast', + 'elementwise_add', + 'tanh', + 'cast', + 'mul', + 'cast', + 'elementwise_add', + 'softmax', + 'cross_entropy2', + 'reduce_mean', + 'elementwise_mul', + 'fill_constant', + 'elementwise_mul_grad', + 'reduce_mean_grad', + 'cross_entropy_grad2', + 'softmax_grad', + 'elementwise_add_grad', + 'cast', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'c_sync_calc_stream', + 'send_v2', + 'fill_constant', + 'cast', + 'sum', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'coalesce_tensor', + 'c_allreduce_sum', + 'cast', + 'cast', + 'cast', + 'cast', + 'cast', + 'cast', + 'cast', + 'cast', + 'c_sync_comm_stream', + 'check_finite_and_unscale', + 'cast', + 'c_allreduce_max', + 'cast', + 'update_loss_scaling', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + ], + ) # amp check_finite_and_unscale, allreduce(pp) self.assertEqual(main_prog_op_types.count('c_allreduce_max'), 1) # should has ring id for pp created_ring_ids = [ - op.desc.attr("ring_id") for op in startup_prog_ops + op.desc.attr("ring_id") + for op in startup_prog_ops if op.type == "c_comm_init" ] self.assertIn(self.pp_pair_ring_id, created_ring_ids) @@ -983,22 +1988,28 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): # check correctness of pp group for op in startup_prog_ops: - if op.type == "c_gen_nccl_id" and op.desc.output_arg_names( - )[0] == "comm_id_0": + if ( + op.type == "c_gen_nccl_id" + and op.desc.output_arg_names()[0] == "comm_id_0" + ): pp_group_waiting_ports = op.desc.attr("other_endpoints") self.assertEqual(pp_group_waiting_ports, ['127.0.0.1:36003']) # check correctness of dp group for op in startup_prog_ops: - if op.type == "c_gen_nccl_id" and op.desc.output_arg_names( - )[0] == "comm_id_3": + if ( + op.type == "c_gen_nccl_id" + and op.desc.output_arg_names()[0] == "comm_id_3" + ): dp_group_waiting_ports = op.desc.attr("other_endpoints") self.assertEqual(dp_group_waiting_ports, ['127.0.0.1:36002']) def test_hybrid_with_sharding_pp_amp_fp16allreduce_in_optimize(self): - train_prog, startup_prog = paddle.fluid.Program(), paddle.fluid.Program( + train_prog, startup_prog = ( + paddle.fluid.Program(), + paddle.fluid.Program(), ) avg_cost, strategy = self.pp_net(train_prog, startup_prog) strategy.amp = True @@ -1033,14 +2044,33 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): main_prog_op_types = [op.type for op in main_prog_ops] # ring: sharding, pp_group, pp_pair, pp_pair - self.assertEqual(startup_prog_op_types, [ - 'fill_constant', 'uniform_random', 'fill_constant', - 'uniform_random', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'c_gen_nccl_id', - 'c_comm_init', 'c_gen_nccl_id', 'c_comm_init', 'c_gen_nccl_id', - 'c_comm_init', 'c_gen_nccl_id', 'c_comm_init' - ]) + self.assertEqual( + startup_prog_op_types, + [ + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + ], + ) # FIXME(wangxi): some bug in sharding+pp with pp_allreduce_in_optimize # self.assertEqual(main_prog_op_types, []) @@ -1050,7 +2080,8 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): # should has ring id for pp created_ring_ids = [ - op.desc.attr("ring_id") for op in startup_prog_ops + op.desc.attr("ring_id") + for op in startup_prog_ops if op.type == "c_comm_init" ] self.assertIn(self.sharding_ring_id, created_ring_ids) @@ -1058,22 +2089,28 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): # check correctness of sharding group for op in startup_prog_ops: - if op.type == "c_gen_nccl_id" and op.desc.output_arg_names( - )[0] == "comm_id_0": + if ( + op.type == "c_gen_nccl_id" + and op.desc.output_arg_names()[0] == "comm_id_0" + ): sharding_group_waiting_ports = op.desc.attr("other_endpoints") self.assertEqual(sharding_group_waiting_ports, ['127.0.0.1:36003']) # check correctness of pp group for op in startup_prog_ops: - if op.type == "c_gen_nccl_id" and op.desc.output_arg_names( - )[0] == "comm_id_1": + if ( + op.type == "c_gen_nccl_id" + and op.desc.output_arg_names()[0] == "comm_id_1" + ): pp_group_waiting_ports = op.desc.attr("other_endpoints") self.assertEqual(pp_group_waiting_ports, ['127.0.0.1:36002']) def test_hybrid_with_pp_dp_amp_fp16allreduce_optimize_cast(self): - train_prog, startup_prog = paddle.fluid.Program(), paddle.fluid.Program( + train_prog, startup_prog = ( + paddle.fluid.Program(), + paddle.fluid.Program(), ) avg_cost, strategy = self.pp_net(train_prog, startup_prog) strategy.amp = True @@ -1107,48 +2144,152 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): main_prog_op_types = [op.type for op in main_prog_ops] # ring: mp, pp_group, pp_pair, pp_pair - self.assertEqual(startup_prog_op_types, [ - 'uniform_random', 'fill_constant', 'uniform_random', - 'fill_constant', 'uniform_random', 'fill_constant', - 'uniform_random', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'c_gen_nccl_id', 'c_comm_init', - 'c_gen_nccl_id', 'c_comm_init', 'c_gen_nccl_id', 'c_comm_init', - 'c_gen_nccl_id', 'c_comm_init', 'c_broadcast', 'cast', - 'c_broadcast', 'cast', 'c_broadcast', 'cast', 'c_broadcast', 'cast', - 'c_broadcast', 'cast', 'c_broadcast', 'cast', 'c_broadcast', 'cast', - 'c_broadcast' - ]) - - self.assertEqual(main_prog_op_types, [ - 'recv_v2', 'mul', 'elementwise_add', 'tanh', 'mul', - 'elementwise_add', 'tanh', 'mul', 'elementwise_add', 'tanh', 'mul', - 'cast', 'elementwise_add', 'softmax', 'cross_entropy2', - 'reduce_mean', 'elementwise_mul', 'fill_constant', - 'elementwise_mul_grad', 'reduce_mean_grad', 'cross_entropy_grad2', - 'softmax_grad', 'elementwise_add_grad', 'cast', 'mul_grad', - 'tanh_grad', 'elementwise_add_grad', 'mul_grad', 'tanh_grad', - 'elementwise_add_grad', 'mul_grad', 'tanh_grad', - 'elementwise_add_grad', 'mul_grad', 'c_sync_calc_stream', 'send_v2', - 'fill_constant', 'cast', 'sum', 'fill_constant', 'sum', - 'fill_constant', 'sum', 'fill_constant', 'sum', 'fill_constant', - 'sum', 'fill_constant', 'sum', 'fill_constant', 'sum', - 'fill_constant', 'sum', 'coalesce_tensor', 'c_allreduce_sum', - 'cast', 'cast', 'cast', 'cast', 'cast', 'cast', 'cast', 'cast', - 'c_sync_comm_stream', 'check_finite_and_unscale', 'cast', - 'c_allreduce_max', 'cast', 'update_loss_scaling', 'momentum', - 'cast', 'momentum', 'cast', 'momentum', 'cast', 'momentum', 'cast', - 'momentum', 'cast', 'momentum', 'cast', 'momentum', 'momentum', - 'cast' - ]) + self.assertEqual( + startup_prog_op_types, + [ + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_broadcast', + 'cast', + 'c_broadcast', + 'cast', + 'c_broadcast', + 'cast', + 'c_broadcast', + 'cast', + 'c_broadcast', + 'cast', + 'c_broadcast', + 'cast', + 'c_broadcast', + 'cast', + 'c_broadcast', + ], + ) + + self.assertEqual( + main_prog_op_types, + [ + 'recv_v2', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'cast', + 'elementwise_add', + 'softmax', + 'cross_entropy2', + 'reduce_mean', + 'elementwise_mul', + 'fill_constant', + 'elementwise_mul_grad', + 'reduce_mean_grad', + 'cross_entropy_grad2', + 'softmax_grad', + 'elementwise_add_grad', + 'cast', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'c_sync_calc_stream', + 'send_v2', + 'fill_constant', + 'cast', + 'sum', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'coalesce_tensor', + 'c_allreduce_sum', + 'cast', + 'cast', + 'cast', + 'cast', + 'cast', + 'cast', + 'cast', + 'cast', + 'c_sync_comm_stream', + 'check_finite_and_unscale', + 'cast', + 'c_allreduce_max', + 'cast', + 'update_loss_scaling', + 'momentum', + 'cast', + 'momentum', + 'cast', + 'momentum', + 'cast', + 'momentum', + 'cast', + 'momentum', + 'cast', + 'momentum', + 'cast', + 'momentum', + 'momentum', + 'cast', + ], + ) # amp check_finite_and_unscale, allreduce(pp) self.assertEqual(main_prog_op_types.count('c_allreduce_max'), 1) # should has ring id for pp created_ring_ids = [ - op.desc.attr("ring_id") for op in startup_prog_ops + op.desc.attr("ring_id") + for op in startup_prog_ops if op.type == "c_comm_init" ] self.assertIn(self.pp_pair_ring_id, created_ring_ids) @@ -1156,22 +2297,28 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): # check correctness of pp group for op in startup_prog_ops: - if op.type == "c_gen_nccl_id" and op.desc.output_arg_names( - )[0] == "comm_id_0": + if ( + op.type == "c_gen_nccl_id" + and op.desc.output_arg_names()[0] == "comm_id_0" + ): pp_group_waiting_ports = op.desc.attr("other_endpoints") self.assertEqual(pp_group_waiting_ports, ['127.0.0.1:36003']) # check correctness of dp group for op in startup_prog_ops: - if op.type == "c_gen_nccl_id" and op.desc.output_arg_names( - )[0] == "comm_id_3": + if ( + op.type == "c_gen_nccl_id" + and op.desc.output_arg_names()[0] == "comm_id_3" + ): dp_group_waiting_ports = op.desc.attr("other_endpoints") self.assertEqual(dp_group_waiting_ports, ['127.0.0.1:36002']) def test_hybrid_with_pp_dp_amp_fp16allreduce_optimize_offload(self): - train_prog, startup_prog = paddle.fluid.Program(), paddle.fluid.Program( + train_prog, startup_prog = ( + paddle.fluid.Program(), + paddle.fluid.Program(), ) avg_cost, strategy = self.pp_net(train_prog, startup_prog) strategy.amp = True @@ -1205,51 +2352,173 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): main_prog_op_types = [op.type for op in main_prog_ops] # ring: mp, pp_group, pp_pair, pp_pair - self.assertEqual(startup_prog_op_types, [ - 'uniform_random', 'fill_constant', 'uniform_random', - 'fill_constant', 'uniform_random', 'fill_constant', - 'uniform_random', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'c_gen_nccl_id', 'c_comm_init', - 'c_gen_nccl_id', 'c_comm_init', 'c_gen_nccl_id', 'c_comm_init', - 'c_gen_nccl_id', 'c_comm_init', 'c_broadcast', 'cast', 'memcpy', - 'c_broadcast', 'cast', 'memcpy', 'c_broadcast', 'cast', 'memcpy', - 'c_broadcast', 'cast', 'memcpy', 'c_broadcast', 'cast', 'memcpy', - 'c_broadcast', 'cast', 'memcpy', 'c_broadcast', 'cast', 'memcpy', - 'c_broadcast' - ]) - - self.assertEqual(main_prog_op_types, [ - 'recv_v2', 'mul', 'elementwise_add', 'tanh', 'mul', - 'elementwise_add', 'tanh', 'mul', 'elementwise_add', 'tanh', 'mul', - 'cast', 'elementwise_add', 'softmax', 'cross_entropy2', - 'reduce_mean', 'elementwise_mul', 'fill_constant', - 'elementwise_mul_grad', 'reduce_mean_grad', 'cross_entropy_grad2', - 'softmax_grad', 'elementwise_add_grad', 'cast', 'mul_grad', - 'tanh_grad', 'elementwise_add_grad', 'mul_grad', 'tanh_grad', - 'elementwise_add_grad', 'mul_grad', 'tanh_grad', - 'elementwise_add_grad', 'mul_grad', 'c_sync_calc_stream', 'send_v2', - 'fill_constant', 'cast', 'sum', 'fill_constant', 'sum', - 'fill_constant', 'sum', 'fill_constant', 'sum', 'fill_constant', - 'sum', 'fill_constant', 'sum', 'fill_constant', 'sum', - 'fill_constant', 'sum', 'coalesce_tensor', 'c_allreduce_sum', - 'cast', 'cast', 'cast', 'cast', 'cast', 'cast', 'cast', 'cast', - 'c_sync_comm_stream', 'check_finite_and_unscale', 'cast', - 'c_allreduce_max', 'cast', 'update_loss_scaling', 'memcpy', - 'momentum', 'cast', 'memcpy', 'memcpy', 'momentum', 'cast', - 'memcpy', 'memcpy', 'momentum', 'cast', 'memcpy', 'memcpy', - 'momentum', 'cast', 'memcpy', 'memcpy', 'momentum', 'cast', - 'memcpy', 'memcpy', 'momentum', 'cast', 'memcpy', 'momentum', - 'memcpy', 'momentum', 'cast', 'memcpy' - ]) + self.assertEqual( + startup_prog_op_types, + [ + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_broadcast', + 'cast', + 'memcpy', + 'c_broadcast', + 'cast', + 'memcpy', + 'c_broadcast', + 'cast', + 'memcpy', + 'c_broadcast', + 'cast', + 'memcpy', + 'c_broadcast', + 'cast', + 'memcpy', + 'c_broadcast', + 'cast', + 'memcpy', + 'c_broadcast', + 'cast', + 'memcpy', + 'c_broadcast', + ], + ) + + self.assertEqual( + main_prog_op_types, + [ + 'recv_v2', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'cast', + 'elementwise_add', + 'softmax', + 'cross_entropy2', + 'reduce_mean', + 'elementwise_mul', + 'fill_constant', + 'elementwise_mul_grad', + 'reduce_mean_grad', + 'cross_entropy_grad2', + 'softmax_grad', + 'elementwise_add_grad', + 'cast', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'c_sync_calc_stream', + 'send_v2', + 'fill_constant', + 'cast', + 'sum', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'fill_constant', + 'sum', + 'coalesce_tensor', + 'c_allreduce_sum', + 'cast', + 'cast', + 'cast', + 'cast', + 'cast', + 'cast', + 'cast', + 'cast', + 'c_sync_comm_stream', + 'check_finite_and_unscale', + 'cast', + 'c_allreduce_max', + 'cast', + 'update_loss_scaling', + 'memcpy', + 'momentum', + 'cast', + 'memcpy', + 'memcpy', + 'momentum', + 'cast', + 'memcpy', + 'memcpy', + 'momentum', + 'cast', + 'memcpy', + 'memcpy', + 'momentum', + 'cast', + 'memcpy', + 'memcpy', + 'momentum', + 'cast', + 'memcpy', + 'memcpy', + 'momentum', + 'cast', + 'memcpy', + 'momentum', + 'memcpy', + 'momentum', + 'cast', + 'memcpy', + ], + ) # amp check_finite_and_unscale, allreduce(pp) self.assertEqual(main_prog_op_types.count('c_allreduce_max'), 1) # should has ring id for pp created_ring_ids = [ - op.desc.attr("ring_id") for op in startup_prog_ops + op.desc.attr("ring_id") + for op in startup_prog_ops if op.type == "c_comm_init" ] self.assertIn(self.pp_pair_ring_id, created_ring_ids) @@ -1257,23 +2526,30 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): # check correctness of pp group for op in startup_prog_ops: - if op.type == "c_gen_nccl_id" and op.desc.output_arg_names( - )[0] == "comm_id_0": + if ( + op.type == "c_gen_nccl_id" + and op.desc.output_arg_names()[0] == "comm_id_0" + ): pp_group_waiting_ports = op.desc.attr("other_endpoints") self.assertEqual(pp_group_waiting_ports, ['127.0.0.1:36003']) # check correctness of dp group for op in startup_prog_ops: - if op.type == "c_gen_nccl_id" and op.desc.output_arg_names( - )[0] == "comm_id_3": + if ( + op.type == "c_gen_nccl_id" + and op.desc.output_arg_names()[0] == "comm_id_3" + ): dp_group_waiting_ports = op.desc.attr("other_endpoints") self.assertEqual(dp_group_waiting_ports, ['127.0.0.1:36002']) def test_hybrid_with_pp_dp_amp_fp16allreduce_optimize_cast_with_gradient_fuse( - self): - train_prog, startup_prog = paddle.fluid.Program(), paddle.fluid.Program( + self, + ): + train_prog, startup_prog = ( + paddle.fluid.Program(), + paddle.fluid.Program(), ) avg_cost, strategy = self.pp_net(train_prog, startup_prog) strategy.amp = True @@ -1308,46 +2584,142 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): main_prog_op_types = [op.type for op in main_prog_ops] # ring: mp, pp_group, pp_pair, pp_pair - self.assertEqual(startup_prog_op_types, [ - 'uniform_random', 'fill_constant', 'uniform_random', - 'fill_constant', 'uniform_random', 'fill_constant', - 'uniform_random', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'c_gen_nccl_id', 'c_comm_init', - 'c_gen_nccl_id', 'c_comm_init', 'c_gen_nccl_id', 'c_comm_init', - 'c_gen_nccl_id', 'c_comm_init', 'c_broadcast', 'cast', - 'c_broadcast', 'cast', 'c_broadcast', 'cast', 'c_broadcast', 'cast', - 'c_broadcast', 'cast', 'c_broadcast', 'cast', 'c_broadcast', 'cast', - 'c_broadcast' - ]) - - self.assertEqual(main_prog_op_types, [ - 'recv_v2', 'mul', 'elementwise_add', 'tanh', 'mul', - 'elementwise_add', 'tanh', 'mul', 'elementwise_add', 'tanh', 'mul', - 'cast', 'elementwise_add', 'softmax', 'cross_entropy2', - 'reduce_mean', 'elementwise_mul', 'coalesce_tensor', - 'coalesce_tensor', 'coalesce_tensor', 'coalesce_tensor', - 'fill_constant', 'elementwise_mul_grad', 'reduce_mean_grad', - 'cross_entropy_grad2', 'softmax_grad', 'elementwise_add_grad', - 'cast', 'mul_grad', 'tanh_grad', 'elementwise_add_grad', 'mul_grad', - 'tanh_grad', 'elementwise_add_grad', 'mul_grad', 'tanh_grad', - 'elementwise_add_grad', 'mul_grad', 'c_sync_calc_stream', 'send_v2', - 'sum', 'cast', 'sum', 'c_allreduce_sum', 'c_allreduce_sum', 'cast', - 'cast', 'cast', 'cast', 'cast', 'cast', 'cast', 'cast', - 'c_sync_comm_stream', 'check_finite_and_unscale', 'cast', - 'c_allreduce_max', 'cast', 'update_loss_scaling', 'momentum', - 'cast', 'momentum', 'cast', 'momentum', 'cast', 'momentum', 'cast', - 'momentum', 'cast', 'momentum', 'cast', 'momentum', 'momentum', - 'cast' - ]) + self.assertEqual( + startup_prog_op_types, + [ + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_broadcast', + 'cast', + 'c_broadcast', + 'cast', + 'c_broadcast', + 'cast', + 'c_broadcast', + 'cast', + 'c_broadcast', + 'cast', + 'c_broadcast', + 'cast', + 'c_broadcast', + 'cast', + 'c_broadcast', + ], + ) + + self.assertEqual( + main_prog_op_types, + [ + 'recv_v2', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'cast', + 'elementwise_add', + 'softmax', + 'cross_entropy2', + 'reduce_mean', + 'elementwise_mul', + 'coalesce_tensor', + 'coalesce_tensor', + 'coalesce_tensor', + 'coalesce_tensor', + 'fill_constant', + 'elementwise_mul_grad', + 'reduce_mean_grad', + 'cross_entropy_grad2', + 'softmax_grad', + 'elementwise_add_grad', + 'cast', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'c_sync_calc_stream', + 'send_v2', + 'sum', + 'cast', + 'sum', + 'c_allreduce_sum', + 'c_allreduce_sum', + 'cast', + 'cast', + 'cast', + 'cast', + 'cast', + 'cast', + 'cast', + 'cast', + 'c_sync_comm_stream', + 'check_finite_and_unscale', + 'cast', + 'c_allreduce_max', + 'cast', + 'update_loss_scaling', + 'momentum', + 'cast', + 'momentum', + 'cast', + 'momentum', + 'cast', + 'momentum', + 'cast', + 'momentum', + 'cast', + 'momentum', + 'cast', + 'momentum', + 'momentum', + 'cast', + ], + ) # amp check_finite_and_unscale, allreduce(pp) self.assertEqual(main_prog_op_types.count('c_allreduce_max'), 1) # should has ring id for pp created_ring_ids = [ - op.desc.attr("ring_id") for op in startup_prog_ops + op.desc.attr("ring_id") + for op in startup_prog_ops if op.type == "c_comm_init" ] self.assertIn(self.pp_pair_ring_id, created_ring_ids) @@ -1355,22 +2727,28 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): # check correctness of pp group for op in startup_prog_ops: - if op.type == "c_gen_nccl_id" and op.desc.output_arg_names( - )[0] == "comm_id_0": + if ( + op.type == "c_gen_nccl_id" + and op.desc.output_arg_names()[0] == "comm_id_0" + ): pp_group_waiting_ports = op.desc.attr("other_endpoints") self.assertEqual(pp_group_waiting_ports, ['127.0.0.1:36003']) # check correctness of dp group for op in startup_prog_ops: - if op.type == "c_gen_nccl_id" and op.desc.output_arg_names( - )[0] == "comm_id_3": + if ( + op.type == "c_gen_nccl_id" + and op.desc.output_arg_names()[0] == "comm_id_3" + ): dp_group_waiting_ports = op.desc.attr("other_endpoints") self.assertEqual(dp_group_waiting_ports, ['127.0.0.1:36002']) def test_hybrid_with_pp_dp_amp_with_gradient_fuse(self): - train_prog, startup_prog = paddle.fluid.Program(), paddle.fluid.Program( + train_prog, startup_prog = ( + paddle.fluid.Program(), + paddle.fluid.Program(), ) avg_cost, strategy = self.pp_net(train_prog, startup_prog) strategy.amp = True @@ -1403,44 +2781,127 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): main_prog_op_types = [op.type for op in main_prog_ops] # ring: mp, pp_group, pp_pair, pp_pair - self.assertEqual(startup_prog_op_types, [ - 'uniform_random', 'fill_constant', 'uniform_random', - 'fill_constant', 'uniform_random', 'fill_constant', - 'uniform_random', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'c_gen_nccl_id', 'c_comm_init', - 'c_gen_nccl_id', 'c_comm_init', 'c_gen_nccl_id', 'c_comm_init', - 'c_gen_nccl_id', 'c_comm_init', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast' - ]) - - self.assertEqual(main_prog_op_types, [ - 'recv_v2', 'cast', 'mul', 'cast', 'elementwise_add', 'tanh', 'cast', - 'mul', 'cast', 'elementwise_add', 'tanh', 'cast', 'mul', 'cast', - 'elementwise_add', 'tanh', 'cast', 'mul', 'cast', 'elementwise_add', - 'softmax', 'cross_entropy2', 'reduce_mean', 'elementwise_mul', - 'coalesce_tensor', 'coalesce_tensor', 'coalesce_tensor', - 'coalesce_tensor', 'fill_constant', 'elementwise_mul_grad', - 'reduce_mean_grad', 'cross_entropy_grad2', 'softmax_grad', - 'elementwise_add_grad', 'cast', 'mul_grad', 'tanh_grad', - 'elementwise_add_grad', 'mul_grad', 'tanh_grad', - 'elementwise_add_grad', 'mul_grad', 'tanh_grad', - 'elementwise_add_grad', 'mul_grad', 'c_sync_calc_stream', 'send_v2', - 'cast', 'sum', 'sum', 'c_allreduce_sum', 'c_allreduce_sum', - 'c_sync_comm_stream', 'check_finite_and_unscale', 'cast', - 'c_allreduce_max', 'cast', 'update_loss_scaling', 'momentum', - 'momentum', 'momentum', 'momentum', 'momentum', 'momentum', - 'momentum', 'momentum' - ]) + self.assertEqual( + startup_prog_op_types, + [ + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + ], + ) + + self.assertEqual( + main_prog_op_types, + [ + 'recv_v2', + 'cast', + 'mul', + 'cast', + 'elementwise_add', + 'tanh', + 'cast', + 'mul', + 'cast', + 'elementwise_add', + 'tanh', + 'cast', + 'mul', + 'cast', + 'elementwise_add', + 'tanh', + 'cast', + 'mul', + 'cast', + 'elementwise_add', + 'softmax', + 'cross_entropy2', + 'reduce_mean', + 'elementwise_mul', + 'coalesce_tensor', + 'coalesce_tensor', + 'coalesce_tensor', + 'coalesce_tensor', + 'fill_constant', + 'elementwise_mul_grad', + 'reduce_mean_grad', + 'cross_entropy_grad2', + 'softmax_grad', + 'elementwise_add_grad', + 'cast', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'c_sync_calc_stream', + 'send_v2', + 'cast', + 'sum', + 'sum', + 'c_allreduce_sum', + 'c_allreduce_sum', + 'c_sync_comm_stream', + 'check_finite_and_unscale', + 'cast', + 'c_allreduce_max', + 'cast', + 'update_loss_scaling', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + ], + ) # amp check_finite_and_unscale, allreduce(pp) self.assertEqual(main_prog_op_types.count('c_allreduce_max'), 1) # should has ring id for pp created_ring_ids = [ - op.desc.attr("ring_id") for op in startup_prog_ops + op.desc.attr("ring_id") + for op in startup_prog_ops if op.type == "c_comm_init" ] self.assertIn(self.pp_pair_ring_id, created_ring_ids) @@ -1448,22 +2909,28 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): # check correctness of pp group for op in startup_prog_ops: - if op.type == "c_gen_nccl_id" and op.desc.output_arg_names( - )[0] == "comm_id_0": + if ( + op.type == "c_gen_nccl_id" + and op.desc.output_arg_names()[0] == "comm_id_0" + ): pp_group_waiting_ports = op.desc.attr("other_endpoints") self.assertEqual(pp_group_waiting_ports, ['127.0.0.1:36003']) # check correctness of dp group for op in startup_prog_ops: - if op.type == "c_gen_nccl_id" and op.desc.output_arg_names( - )[0] == "comm_id_3": + if ( + op.type == "c_gen_nccl_id" + and op.desc.output_arg_names()[0] == "comm_id_3" + ): dp_group_waiting_ports = op.desc.attr("other_endpoints") self.assertEqual(dp_group_waiting_ports, ['127.0.0.1:36002']) def test_hybrid_with_pp_dp_amp_with_gradient_fuse_and_avg_after_sum(self): - train_prog, startup_prog = paddle.fluid.Program(), paddle.fluid.Program( + train_prog, startup_prog = ( + paddle.fluid.Program(), + paddle.fluid.Program(), ) avg_cost, strategy = self.pp_net(train_prog, startup_prog) strategy.amp = True @@ -1481,11 +2948,11 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): strategy.pipeline_configs = { "schedule_mode": "1F1B", "micro_batch_size": 2, - "accumulate_steps": 4 + "accumulate_steps": 4, } strategy.gradient_scale_configs = { 'scale_strategy': 'avg', - 'scale_gradient': True + 'scale_gradient': True, } strategy.fuse_grad_merge = True self.optimizer(avg_cost, strategy, train_prog, startup_prog) @@ -1499,40 +2966,125 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): startup_prog_op_types = [op.type for op in startup_prog_ops] main_prog_op_types = [op.type for op in main_prog_ops] - self.assertEqual(startup_prog_op_types, [ - 'uniform_random', 'fill_constant', 'uniform_random', - 'fill_constant', 'uniform_random', 'fill_constant', - 'uniform_random', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'c_gen_nccl_id', 'c_comm_init', - 'c_gen_nccl_id', 'c_comm_init', 'c_gen_nccl_id', 'c_comm_init', - 'c_gen_nccl_id', 'c_comm_init', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast' - ]) - - self.assertEqual(main_prog_op_types, [ - 'recv_v2', 'cast', 'mul', 'cast', 'elementwise_add', 'tanh', 'cast', - 'mul', 'cast', 'elementwise_add', 'tanh', 'cast', 'mul', 'cast', - 'elementwise_add', 'tanh', 'cast', 'mul', 'cast', 'elementwise_add', - 'softmax', 'cross_entropy2', 'reduce_mean', 'elementwise_mul', - 'coalesce_tensor', 'coalesce_tensor', 'coalesce_tensor', - 'coalesce_tensor', 'fill_constant', 'elementwise_mul_grad', - 'reduce_mean_grad', 'cross_entropy_grad2', 'softmax_grad', - 'elementwise_add_grad', 'cast', 'mul_grad', 'tanh_grad', - 'elementwise_add_grad', 'mul_grad', 'tanh_grad', - 'elementwise_add_grad', 'mul_grad', 'tanh_grad', - 'elementwise_add_grad', 'mul_grad', 'c_sync_calc_stream', 'send_v2', - 'cast', 'sum', 'sum', 'c_allreduce_sum', 'c_allreduce_sum', - 'c_sync_comm_stream', 'scale', 'check_finite_and_unscale', 'cast', - 'c_allreduce_max', 'cast', 'update_loss_scaling', 'momentum', - 'momentum', 'momentum', 'momentum', 'momentum', 'momentum', - 'momentum', 'momentum' - ]) + self.assertEqual( + startup_prog_op_types, + [ + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + ], + ) + + self.assertEqual( + main_prog_op_types, + [ + 'recv_v2', + 'cast', + 'mul', + 'cast', + 'elementwise_add', + 'tanh', + 'cast', + 'mul', + 'cast', + 'elementwise_add', + 'tanh', + 'cast', + 'mul', + 'cast', + 'elementwise_add', + 'tanh', + 'cast', + 'mul', + 'cast', + 'elementwise_add', + 'softmax', + 'cross_entropy2', + 'reduce_mean', + 'elementwise_mul', + 'coalesce_tensor', + 'coalesce_tensor', + 'coalesce_tensor', + 'coalesce_tensor', + 'fill_constant', + 'elementwise_mul_grad', + 'reduce_mean_grad', + 'cross_entropy_grad2', + 'softmax_grad', + 'elementwise_add_grad', + 'cast', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'c_sync_calc_stream', + 'send_v2', + 'cast', + 'sum', + 'sum', + 'c_allreduce_sum', + 'c_allreduce_sum', + 'c_sync_comm_stream', + 'scale', + 'check_finite_and_unscale', + 'cast', + 'c_allreduce_max', + 'cast', + 'update_loss_scaling', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + ], + ) def test_hybrid_with_pp_dp_with_gradient_fuse_and_avg_after_sum(self): - train_prog, startup_prog = paddle.fluid.Program(), paddle.fluid.Program( + train_prog, startup_prog = ( + paddle.fluid.Program(), + paddle.fluid.Program(), ) avg_cost, strategy = self.pp_net(train_prog, startup_prog) strategy.sharding = True @@ -1546,11 +3098,11 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): strategy.pipeline_configs = { "schedule_mode": "1F1B", "micro_batch_size": 2, - "accumulate_steps": 4 + "accumulate_steps": 4, } strategy.gradient_scale_configs = { 'scale_strategy': 'avg', - 'scale_gradient': True + 'scale_gradient': True, } strategy.fuse_grad_merge = True self.optimizer(avg_cost, strategy, train_prog, startup_prog) @@ -1564,36 +3116,103 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): startup_prog_op_types = [op.type for op in startup_prog_ops] main_prog_op_types = [op.type for op in main_prog_ops] - self.assertEqual(startup_prog_op_types, [ - 'uniform_random', 'fill_constant', 'uniform_random', - 'fill_constant', 'uniform_random', 'fill_constant', - 'uniform_random', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'c_gen_nccl_id', - 'c_comm_init', 'c_gen_nccl_id', 'c_comm_init', 'c_gen_nccl_id', - 'c_comm_init', 'c_gen_nccl_id', 'c_comm_init', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast' - ]) - - self.assertEqual(main_prog_op_types, [ - 'recv_v2', 'mul', 'elementwise_add', 'tanh', 'mul', - 'elementwise_add', 'tanh', 'mul', 'elementwise_add', 'tanh', 'mul', - 'elementwise_add', 'softmax', 'cross_entropy2', 'reduce_mean', - 'coalesce_tensor', 'coalesce_tensor', 'fill_constant', - 'reduce_mean_grad', 'cross_entropy_grad2', 'softmax_grad', - 'elementwise_add_grad', 'mul_grad', 'tanh_grad', - 'elementwise_add_grad', 'mul_grad', 'tanh_grad', - 'elementwise_add_grad', 'mul_grad', 'tanh_grad', - 'elementwise_add_grad', 'mul_grad', 'c_sync_calc_stream', 'send_v2', - 'sum', 'c_allreduce_sum', 'c_sync_comm_stream', 'scale', 'momentum', - 'momentum', 'momentum', 'momentum', 'momentum', 'momentum', - 'momentum', 'momentum' - ]) + self.assertEqual( + startup_prog_op_types, + [ + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + ], + ) + + self.assertEqual( + main_prog_op_types, + [ + 'recv_v2', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'elementwise_add', + 'tanh', + 'mul', + 'elementwise_add', + 'softmax', + 'cross_entropy2', + 'reduce_mean', + 'coalesce_tensor', + 'coalesce_tensor', + 'fill_constant', + 'reduce_mean_grad', + 'cross_entropy_grad2', + 'softmax_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'c_sync_calc_stream', + 'send_v2', + 'sum', + 'c_allreduce_sum', + 'c_sync_comm_stream', + 'scale', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + ], + ) def test_hybrid_with_pp_dp_with_amp_no_dynamic_gradient_fuse_and_avg_after_sum( - self): - train_prog, startup_prog = paddle.fluid.Program(), paddle.fluid.Program( + self, + ): + train_prog, startup_prog = ( + paddle.fluid.Program(), + paddle.fluid.Program(), ) avg_cost, strategy = self.pp_net(train_prog, startup_prog) strategy.sharding = True @@ -1606,17 +3225,17 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): strategy.amp = True strategy.amp_configs = { 'custom_black_varnames': ['fc_6.b_0'], - 'use_dynamic_loss_scaling': False + 'use_dynamic_loss_scaling': False, } strategy.pipeline = True strategy.pipeline_configs = { "schedule_mode": "1F1B", "micro_batch_size": 2, - "accumulate_steps": 4 + "accumulate_steps": 4, } strategy.gradient_scale_configs = { 'scale_strategy': 'avg', - 'scale_gradient': True + 'scale_gradient': True, } strategy.fuse_grad_merge = True self.optimizer(avg_cost, strategy, train_prog, startup_prog) @@ -1630,35 +3249,115 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): startup_prog_op_types = [op.type for op in startup_prog_ops] main_prog_op_types = [op.type for op in main_prog_ops] - self.assertEqual(startup_prog_op_types, [ - 'uniform_random', 'fill_constant', 'uniform_random', - 'fill_constant', 'uniform_random', 'fill_constant', - 'uniform_random', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'c_gen_nccl_id', 'c_comm_init', 'c_gen_nccl_id', 'c_comm_init', - 'c_gen_nccl_id', 'c_comm_init', 'c_gen_nccl_id', 'c_comm_init', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_broadcast' - ]) - - self.assertEqual(main_prog_op_types, [ - 'recv_v2', 'cast', 'mul', 'cast', 'elementwise_add', 'tanh', 'cast', - 'mul', 'cast', 'elementwise_add', 'tanh', 'cast', 'mul', 'cast', - 'elementwise_add', 'tanh', 'cast', 'mul', 'cast', 'elementwise_add', - 'softmax', 'cross_entropy2', 'reduce_mean', 'elementwise_mul', - 'coalesce_tensor', 'coalesce_tensor', 'coalesce_tensor', - 'coalesce_tensor', 'fill_constant', 'elementwise_mul_grad', - 'reduce_mean_grad', 'cross_entropy_grad2', 'softmax_grad', - 'elementwise_add_grad', 'cast', 'mul_grad', 'tanh_grad', - 'elementwise_add_grad', 'mul_grad', 'tanh_grad', - 'elementwise_add_grad', 'mul_grad', 'tanh_grad', - 'elementwise_add_grad', 'mul_grad', 'c_sync_calc_stream', 'send_v2', - 'cast', 'sum', 'sum', 'c_allreduce_sum', 'c_allreduce_sum', - 'c_sync_comm_stream', 'scale', 'scale', 'check_finite_and_unscale', - 'momentum', 'momentum', 'momentum', 'momentum', 'momentum', - 'momentum', 'momentum', 'momentum' - ]) + self.assertEqual( + startup_prog_op_types, + [ + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'uniform_random', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + ], + ) + + self.assertEqual( + main_prog_op_types, + [ + 'recv_v2', + 'cast', + 'mul', + 'cast', + 'elementwise_add', + 'tanh', + 'cast', + 'mul', + 'cast', + 'elementwise_add', + 'tanh', + 'cast', + 'mul', + 'cast', + 'elementwise_add', + 'tanh', + 'cast', + 'mul', + 'cast', + 'elementwise_add', + 'softmax', + 'cross_entropy2', + 'reduce_mean', + 'elementwise_mul', + 'coalesce_tensor', + 'coalesce_tensor', + 'coalesce_tensor', + 'coalesce_tensor', + 'fill_constant', + 'elementwise_mul_grad', + 'reduce_mean_grad', + 'cross_entropy_grad2', + 'softmax_grad', + 'elementwise_add_grad', + 'cast', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'tanh_grad', + 'elementwise_add_grad', + 'mul_grad', + 'c_sync_calc_stream', + 'send_v2', + 'cast', + 'sum', + 'sum', + 'c_allreduce_sum', + 'c_allreduce_sum', + 'c_sync_comm_stream', + 'scale', + 'scale', + 'check_finite_and_unscale', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + 'momentum', + ], + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_static_mp_layers.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_static_mp_layers.py index 19f926748cf1dc27589869a571c3f76c6932b2c3..fe583a4cbc6fb7928d7b25adbb0dc542391b1993 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_static_mp_layers.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_static_mp_layers.py @@ -23,7 +23,6 @@ paddle.enable_static() class ColumnLinearNet(fluid.dygraph.Layer): - def __init__(self, input_size, output_size): super(ColumnLinearNet, self).__init__() self.parallel_linear = fleet.meta_parallel.ColumnParallelLinear( @@ -32,7 +31,8 @@ class ColumnLinearNet(fluid.dygraph.Layer): weight_attr=None, has_bias=True, gather_output=True, - name="test_column_linear") + name="test_column_linear", + ) def forward(self, x): output = self.parallel_linear(x) @@ -40,7 +40,6 @@ class ColumnLinearNet(fluid.dygraph.Layer): class RowLinearNet(fluid.dygraph.Layer): - def __init__(self, input_size, output_size): super(RowLinearNet, self).__init__() self.parallel_linear = fleet.meta_parallel.RowParallelLinear( @@ -48,7 +47,8 @@ class RowLinearNet(fluid.dygraph.Layer): out_features=output_size, has_bias=True, input_is_parallel=False, - name="test_row_linear") + name="test_row_linear", + ) def forward(self, x): output = self.parallel_linear(x) @@ -56,11 +56,11 @@ class RowLinearNet(fluid.dygraph.Layer): class EmbeddingNet(fluid.dygraph.Layer): - def __init__(self, vocab_size, hidden_size): super(EmbeddingNet, self).__init__() self.embedding = fleet.meta_parallel.VocabParallelEmbedding( - vocab_size, hidden_size) + vocab_size, hidden_size + ) def forward(self, x): output = self.embedding(x) @@ -68,11 +68,11 @@ class EmbeddingNet(fluid.dygraph.Layer): class TestDistTraning(unittest.TestCase): - def setUp(self): os.environ["PADDLE_TRAINER_ID"] = "2" os.environ[ - "PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001,127.0.0.1:36002,127.0.0.1:36003,127.0.0.1:36004" + "PADDLE_TRAINER_ENDPOINTS" + ] = "127.0.0.1:36001,127.0.0.1:36002,127.0.0.1:36003,127.0.0.1:36004" strategy = fleet.DistributedStrategy() self.model_parallel_size = 2 @@ -97,19 +97,22 @@ class TestDistTraning(unittest.TestCase): x = paddle.static.data(name='x', shape=[None, input_size]) y = model_a(x) - #print(main_program) + # print(main_program) ops = main_program.global_block().ops ops = [op.type for op in ops] self.assertEqual( - ops, ['c_identity', 'matmul_v2', 'elementwise_add', 'c_concat']) + ops, ['c_identity', 'matmul_v2', 'elementwise_add', 'c_concat'] + ) weight = model_a.parallel_linear.weight bias = model_a.parallel_linear.bias self.assertEqual( weight.shape, - (input_size, output_size // self.model_parallel_size)) - self.assertEqual(bias.shape, - (output_size // self.model_parallel_size, )) + (input_size, output_size // self.model_parallel_size), + ) + self.assertEqual( + bias.shape, (output_size // self.model_parallel_size,) + ) def test_row_parallel_layer(self): main_program, startup_program = self.get_program() @@ -120,19 +123,21 @@ class TestDistTraning(unittest.TestCase): x = paddle.static.data(name='x', shape=[None, input_size]) y = model_a(x) - #print(main_program) + # print(main_program) ops = main_program.global_block().ops ops = [op.type for op in ops] self.assertEqual( ops, - ['c_split', 'matmul_v2', 'c_allreduce_sum', 'elementwise_add']) + ['c_split', 'matmul_v2', 'c_allreduce_sum', 'elementwise_add'], + ) weight = model_a.parallel_linear.weight bias = model_a.parallel_linear.bias self.assertEqual( weight.shape, - (input_size // self.model_parallel_size, output_size)) - self.assertEqual(bias.shape, (output_size, )) + (input_size // self.model_parallel_size, output_size), + ) + self.assertEqual(bias.shape, (output_size,)) def test_parallel_embedding(self): main_program, startup_program = self.get_program() @@ -143,12 +148,12 @@ class TestDistTraning(unittest.TestCase): # model_a model_a = EmbeddingNet(vocab_size, hidden_size) - x = paddle.static.data(name='x', - shape=[None, seq_len], - dtype='int64') + x = paddle.static.data( + name='x', shape=[None, seq_len], dtype='int64' + ) y = model_a(x) - #print(main_program) + # print(main_program) ops = main_program.global_block().ops ops = [op.type for op in ops] self.assertEqual(ops, ['c_embedding', 'c_allreduce_sum']) @@ -156,7 +161,8 @@ class TestDistTraning(unittest.TestCase): weight = model_a.embedding.weight self.assertEqual( weight.shape, - (vocab_size // self.model_parallel_size, hidden_size)) + (vocab_size // self.model_parallel_size, hidden_size), + ) def test_parallel_cross_entropy(self): main_program, startup_program = self.get_program() @@ -170,17 +176,19 @@ class TestDistTraning(unittest.TestCase): model_a = fleet.meta_parallel.ParallelCrossEntropy() x = paddle.static.data( - name='x', shape=[batch_size, seq_length, class_size_per_card]) - label = paddle.static.data(name='label', - shape=[batch_size, seq_length], - dtype='int64') + name='x', shape=[batch_size, seq_length, class_size_per_card] + ) + label = paddle.static.data( + name='label', shape=[batch_size, seq_length], dtype='int64' + ) loss_a = model_a(x, label) - #print(main_program) + # print(main_program) ops = main_program.global_block().ops ops = [op.type for op in ops] - self.assertEqual(ops, - ['unsqueeze2', 'c_softmax_with_cross_entropy']) + self.assertEqual( + ops, ['unsqueeze2', 'c_softmax_with_cross_entropy'] + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_utils.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_utils.py index d7266cf29ab789787342a46c3df891fb59a5bdca..6fc6a6d9a2f215e9406df6e620c091bc4e3dd814 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_utils.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_utils.py @@ -31,8 +31,9 @@ class TestFleetUtils(unittest.TestCase): train_dir = os.path.join("fleet_util_data", "train_program") def download_files(self): - path = download(self.proto_data_url, self.module_name, - self.proto_data_md5) + path = download( + self.proto_data_url, self.module_name, self.proto_data_md5 + ) print('data is downloaded at ' + path) tar = tarfile.open(path) unzip_folder = tempfile.mkdtemp() @@ -50,29 +51,35 @@ class TestFleetUtils(unittest.TestCase): text_program = "pruned_main_program.pbtxt" binary_program = "pruned_main_program.bin" fleet_util = FleetUtil() - text_to_binary = fleet_util.program_type_trans(program_dir, - text_program, True) - binary_to_text = fleet_util.program_type_trans(program_dir, - binary_program, False) + text_to_binary = fleet_util.program_type_trans( + program_dir, text_program, True + ) + binary_to_text = fleet_util.program_type_trans( + program_dir, binary_program, False + ) self.assertTrue( - os.path.exists(os.path.join(program_dir, text_to_binary))) + os.path.exists(os.path.join(program_dir, text_to_binary)) + ) self.assertTrue( - os.path.exists(os.path.join(program_dir, binary_to_text))) + os.path.exists(os.path.join(program_dir, binary_to_text)) + ) def test_parse_program_proto(self): data_dir = self.download_files() parse_program_file_path = os.path.join( - data_dir, os.path.join(self.pruned_dir, - "pruned_main_program.pbtxt")) + data_dir, os.path.join(self.pruned_dir, "pruned_main_program.pbtxt") + ) is_text_parse_program = True parse_output_dir = os.path.join(data_dir, self.pruned_dir) fleet_util = FleetUtil() - fleet_util.parse_program_proto(parse_program_file_path, - is_text_parse_program, parse_output_dir) + fleet_util.parse_program_proto( + parse_program_file_path, is_text_parse_program, parse_output_dir + ) ops_log = os.path.join(parse_output_dir, "ops.log") vars_log = os.path.join(parse_output_dir, "vars_all.log") - vars_persistable = os.path.join(parse_output_dir, - "vars_persistable.log") + vars_persistable = os.path.join( + parse_output_dir, "vars_persistable.log" + ) self.assertTrue(os.path.exists(ops_log)) self.assertTrue(os.path.exists(vars_log)) self.assertTrue(os.path.exists(vars_persistable)) @@ -89,7 +96,7 @@ class TestFleetUtils(unittest.TestCase): feed_config.feeded_vars_types = [np.float32, np.float32] feed_config.feeded_vars_filelist = [ os.path.join(data_dir, os.path.join(self.pruned_dir, "concat_1")), - os.path.join(data_dir, os.path.join(self.pruned_dir, "concat_2")) + os.path.join(data_dir, os.path.join(self.pruned_dir, "concat_2")), ] fetch_config = config() @@ -106,7 +113,9 @@ class TestFleetUtils(unittest.TestCase): fleet_util = FleetUtil() # test saved var's shape - conf.dump_program_filename = "pruned_main_program.save_var_shape_not_match" + conf.dump_program_filename = ( + "pruned_main_program.save_var_shape_not_match" + ) self.assertRaises(Exception, fleet_util.check_vars_and_dump, conf) # test program.proto without feed_op and fetch_op @@ -114,10 +123,13 @@ class TestFleetUtils(unittest.TestCase): results = fleet_util.check_vars_and_dump(conf) self.assertTrue(len(results) == 1) np.testing.assert_array_almost_equal( - results[0], np.array([[3.0590223e-07]], dtype=np.float32)) + results[0], np.array([[3.0590223e-07]], dtype=np.float32) + ) # test feed_var's shape - conf.dump_program_filename = "pruned_main_program.feed_var_shape_not_match" + conf.dump_program_filename = ( + "pruned_main_program.feed_var_shape_not_match" + ) self.assertRaises(Exception, fleet_util.check_vars_and_dump, conf) # test correct case with feed_vars_filelist @@ -125,7 +137,8 @@ class TestFleetUtils(unittest.TestCase): results = fleet_util.check_vars_and_dump(conf) self.assertTrue(len(results) == 1) np.testing.assert_array_almost_equal( - results[0], np.array([[3.0590223e-07]], dtype=np.float32)) + results[0], np.array([[3.0590223e-07]], dtype=np.float32) + ) # test correct case without feed_vars_filelist conf.feed_config.feeded_vars_filelist = None @@ -145,14 +158,17 @@ class TestFleetUtils(unittest.TestCase): conf = config() conf.train_prog_path = os.path.join( - data_dir, os.path.join(self.train_dir, "join_main_program.pbtxt")) + data_dir, os.path.join(self.train_dir, "join_main_program.pbtxt") + ) conf.is_text_train_program = True # test not match conf.pruned_prog_path = os.path.join( data_dir, - os.path.join(self.pruned_dir, - "pruned_main_program.save_var_shape_not_match")) + os.path.join( + self.pruned_dir, "pruned_main_program.save_var_shape_not_match" + ), + ) conf.is_text_pruned_program = True conf.draw = False fleet_util = FleetUtil() @@ -161,8 +177,8 @@ class TestFleetUtils(unittest.TestCase): # test match conf.pruned_prog_path = os.path.join( - data_dir, os.path.join(self.pruned_dir, - "pruned_main_program.pbtxt")) + data_dir, os.path.join(self.pruned_dir, "pruned_main_program.pbtxt") + ) if sys.platform == 'win32' or sys.platform == 'sys.platform': conf.draw = False else: @@ -177,29 +193,39 @@ class TestFleetUtils(unittest.TestCase): else: data_dir = self.download_files() program_path = os.path.join( - data_dir, os.path.join(self.train_dir, - "join_main_program.pbtxt")) + data_dir, + os.path.join(self.train_dir, "join_main_program.pbtxt"), + ) is_text = True program = utils.load_program(program_path, is_text) output_dir = os.path.join(data_dir, self.train_dir) output_filename_1 = "draw_prog_1" output_filename_2 = "draw_prog_2" fleet_util = FleetUtil() - fleet_util.draw_from_program_file(program_path, is_text, output_dir, - output_filename_1) + fleet_util.draw_from_program_file( + program_path, is_text, output_dir, output_filename_1 + ) fleet_util.draw_from_program(program, output_dir, output_filename_2) self.assertTrue( os.path.exists( - os.path.join(output_dir, output_filename_1 + ".dot"))) + os.path.join(output_dir, output_filename_1 + ".dot") + ) + ) self.assertTrue( os.path.exists( - os.path.join(output_dir, output_filename_1 + ".pdf"))) + os.path.join(output_dir, output_filename_1 + ".pdf") + ) + ) self.assertTrue( os.path.exists( - os.path.join(output_dir, output_filename_2 + ".dot"))) + os.path.join(output_dir, output_filename_2 + ".dot") + ) + ) self.assertTrue( os.path.exists( - os.path.join(output_dir, output_filename_2 + ".pdf"))) + os.path.join(output_dir, output_filename_2 + ".pdf") + ) + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_hdfs1.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_hdfs1.py index 694cfe0cacc1f6ee49385baa1a8b183a4d8b37b1..b141b1ed65bc62e70a81b75b26c33f9f01db3b7a 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_hdfs1.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_hdfs1.py @@ -23,12 +23,10 @@ java_home = os.environ["JAVA_HOME"] class FSTest1(FSTestBase): - def test_timeout(self): - fs = HDFSClient("/usr/local/hadoop-2.7.7/", - None, - time_out=6 * 1000, - sleep_inter=100) + fs = HDFSClient( + "/usr/local/hadoop-2.7.7/", None, time_out=6 * 1000, sleep_inter=100 + ) src = "hdfs_test_timeout" dst = "new_hdfs_test_timeout" fs.delete(dst) @@ -40,7 +38,8 @@ class FSTest1(FSTestBase): try: fs.mv(src, dst, test_exists=False) self.assertFalse( - 1, "can't execute cmd:{} output:{}".format(cmd, output)) + 1, "can't execute cmd:{} output:{}".format(cmd, output) + ) except FSTimeOut as e: print("execute mv {} to {} timeout".format(src, dst)) @@ -49,10 +48,9 @@ class FSTest1(FSTestBase): print("second mv ret:{} output:{}".format(ret, output)) def test_is_dir(self): - fs = HDFSClient("/usr/local/hadoop-2.7.7/", - None, - time_out=6 * 1000, - sleep_inter=100) + fs = HDFSClient( + "/usr/local/hadoop-2.7.7/", None, time_out=6 * 1000, sleep_inter=100 + ) self.assertFalse(fs.is_dir("./test_hdfs.py")) s = """ java.io.IOException: Input/output error @@ -67,23 +65,24 @@ java.io.IOException: Input/output error at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:65) at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:79) at org.apache.hadoop.fs.FsShell.main(FsShell.java:2353) - """ + """ # fmt: off, avoid remove tabs in string print("split lines:", s.splitlines()) self.assertTrue(fs._test_match(s.splitlines()) != None) def test_config(self): config = {"fs.default.name": "hdfs://xxx", "hadoop.job.ugi": "ugi"} - fs = HDFSClient("/usr/local/hadoop-2.7.7/", - config, - time_out=6 * 1000, - sleep_inter=100) + fs = HDFSClient( + "/usr/local/hadoop-2.7.7/", + config, + time_out=6 * 1000, + sleep_inter=100, + ) def test_exists(self): - fs = HDFSClient("/usr/local/hadoop-2.7.7/", - None, - time_out=6 * 1000, - sleep_inter=100) + fs = HDFSClient( + "/usr/local/hadoop-2.7.7/", None, time_out=6 * 1000, sleep_inter=100 + ) self.assertFalse(fs.is_exist(os.path.abspath("./xxxx"))) self.assertFalse(fs.is_dir(os.path.abspath("./xxxx"))) self.assertTrue(fs.is_dir(os.path.abspath("./xxx/.."))) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_hdfs2.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_hdfs2.py index 682036a681a22fc568872a90757fed0190b88bd6..787a7c933449c2db635521eb1d9e34cfa707a241 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_hdfs2.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_hdfs2.py @@ -22,12 +22,10 @@ java_home = os.environ["JAVA_HOME"] class FSTest2(FSTestBase): - def test_hdfs(self): - fs = HDFSClient("/usr/local/hadoop-2.7.7/", - None, - time_out=5 * 1000, - sleep_inter=100) + fs = HDFSClient( + "/usr/local/hadoop-2.7.7/", None, time_out=5 * 1000, sleep_inter=100 + ) self._test_rm(fs) self._test_touch(fs) self._test_dirs(fs) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_hdfs3.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_hdfs3.py index 6bf304dc1e79678efb130eb305ca58470c4a377d..9aa0f30e99cfc83b4ff55cb0631177c5088786b0 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_hdfs3.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_hdfs3.py @@ -22,12 +22,10 @@ java_home = os.environ["JAVA_HOME"] class FSTest3(FSTestBase): - def test_hdfs(self): - fs = HDFSClient("/usr/local/hadoop-2.7.7/", - None, - time_out=5 * 1000, - sleep_inter=100) + fs = HDFSClient( + "/usr/local/hadoop-2.7.7/", None, time_out=5 * 1000, sleep_inter=100 + ) self._test_mkdirs(fs) self._test_list_dir(fs) self._test_try_upload(fs) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_hybrid_parallel_inference_helper.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_hybrid_parallel_inference_helper.py index 06b3ec02de0c7be0a6baff10f663316fa2e707b6..b0d07f8e92b87142e3f1ba18cc78940818d98270 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_hybrid_parallel_inference_helper.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_hybrid_parallel_inference_helper.py @@ -18,7 +18,6 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus class TestHybridParallelInferenceHelper(TestMultipleGpus): - def test_hybrid_parallel_inference_helper(self): self.run_mnist_2gpu('hybrid_parallel_inference_helper.py') diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision.py index 026fa8fb9d44a0fe63135acbadac15e979b6de1c..916a21359a4f8e32ad12ceb876354ee99f1791c8 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision.py @@ -32,31 +32,33 @@ if fluid.core.is_compiled_with_cuda(): class SimpleConv(fluid.dygraph.Layer): - - def __init__(self, - num_channels, - num_filters, - filter_size, - stride=1, - groups=1, - act=None): + def __init__( + self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + ): super(SimpleConv, self).__init__() - self._conv = fluid.dygraph.Conv2D(num_channels=num_channels, - num_filters=num_filters, - filter_size=filter_size, - stride=stride, - padding=(filter_size - 1) // 2, - groups=groups, - act=None, - bias_attr=None, - use_cudnn=True) + self._conv = fluid.dygraph.Conv2D( + num_channels=num_channels, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + act=None, + bias_attr=None, + use_cudnn=True, + ) def forward(self, inputs): return self._conv(inputs) class TestAutoCast(unittest.TestCase): - def amp_guard_white_op(self): data = np.random.uniform(-1, 1, [10, 3, 32, 32]).astype('float32') with fluid.dygraph.guard(): @@ -93,30 +95,37 @@ class TestAutoCast(unittest.TestCase): tracer = fluid.framework._dygraph_tracer() base_white_list = fluid.dygraph.amp.auto_cast.WHITE_LIST base_black_list = fluid.dygraph.amp.auto_cast.BLACK_LIST - with fluid.dygraph.amp_guard(custom_white_list=["log"], - custom_black_list=["conv2d"]): + with fluid.dygraph.amp_guard( + custom_white_list=["log"], custom_black_list=["conv2d"] + ): white_list, black_list = tracer._get_amp_op_list() self.assertTrue( - set(white_list) == (set(base_white_list) | {"log"}) - - {"conv2d"}) + set(white_list) + == (set(base_white_list) | {"log"}) - {"conv2d"} + ) self.assertTrue( - set(black_list) == (set(base_black_list) - {"log"}) - | {"conv2d"}) + set(black_list) + == (set(base_black_list) - {"log"}) | {"conv2d"} + ) base_white_list = fluid.dygraph.amp.auto_cast.PURE_FP16_WHITE_LIST base_black_list = fluid.dygraph.amp.auto_cast.PURE_FP16_BLACK_LIST - with fluid.dygraph.amp_guard(custom_white_list=["log"], - custom_black_list=["conv2d"], - level='O2'): + with fluid.dygraph.amp_guard( + custom_white_list=["log"], + custom_black_list=["conv2d"], + level='O2', + ): white_list, black_list = tracer._get_amp_op_list() self.assertTrue( - set(white_list) == (set(base_white_list) | {"log"}) - - {"conv2d"}) + set(white_list) + == (set(base_white_list) | {"log"}) - {"conv2d"} + ) self.assertTrue( - set(black_list) == (set(base_black_list) - {"log"}) - | {"conv2d"}) + set(black_list) + == (set(base_black_list) - {"log"}) | {"conv2d"} + ) def test_custom_op_list(self): self.custom_op_list() @@ -126,13 +135,16 @@ class TestAutoCast(unittest.TestCase): def func(): with fluid.dygraph.guard(): - model = SimpleConv(num_channels=3, - num_filters=64, - filter_size=7, - stride=2, - act='relu') - with fluid.dygraph.amp_guard(custom_white_list=["conv2d"], - custom_black_list=["conv2d"]): + model = SimpleConv( + num_channels=3, + num_filters=64, + filter_size=7, + stride=2, + act='relu', + ) + with fluid.dygraph.amp_guard( + custom_white_list=["conv2d"], custom_black_list=["conv2d"] + ): inp = fluid.dygraph.to_variable(inp_np) out = model(inp) @@ -149,35 +161,34 @@ class TestAutoCast(unittest.TestCase): with fluid.dygraph.amp_guard(True): out_amp_fp16 = conv2d(data) out_amp_fp32 = paddle.expand_as( - out_amp_fp16, - out_amp_fp16) # expand_as_v2 has no fp16 kernel + out_amp_fp16, out_amp_fp16 + ) # expand_as_v2 has no fp16 kernel with fluid.dygraph.amp_guard(True, level='O2'): out_purefp16_fp16 = conv2d(data) out_purefp16_fp32 = paddle.expand_as( - out_purefp16_fp16, - out_purefp16_fp16) # expand_as_v2 has no fp16 kernel + out_purefp16_fp16, out_purefp16_fp16 + ) # expand_as_v2 has no fp16 kernel self.assertTrue(data.dtype == fluid.core.VarDesc.VarType.FP32) self.assertTrue(out_amp_fp16.dtype == fluid.core.VarDesc.VarType.FP16) self.assertTrue(out_amp_fp32.dtype == fluid.core.VarDesc.VarType.FP32) self.assertTrue( - out_purefp16_fp16.dtype == fluid.core.VarDesc.VarType.FP16) + out_purefp16_fp16.dtype == fluid.core.VarDesc.VarType.FP16 + ) self.assertTrue( - out_purefp16_fp32.dtype == fluid.core.VarDesc.VarType.FP32) + out_purefp16_fp32.dtype == fluid.core.VarDesc.VarType.FP32 + ) def test_amp_guard_upsupported_fp16_op(self): self.amp_guard_upsupported_fp16_op() def mode_exception(self): - def func(): data = np.random.uniform(-1, 1, [10, 3, 32, 32]).astype('float32') with fluid.dygraph.guard(): - conv2d = fluid.dygraph.Conv2D(3, - 2, - 3, - bias_attr=False, - act=None) + conv2d = fluid.dygraph.Conv2D( + 3, 2, 3, bias_attr=False, act=None + ) data = fluid.dygraph.to_variable(data) with fluid.dygraph.amp_guard(level='O'): out = conv2d(data) @@ -189,15 +200,14 @@ class TestAutoCast(unittest.TestCase): class TestAmpScaler(unittest.TestCase): - def scale(self): with fluid.dygraph.guard(): data = paddle.rand([10, 1024]) scaler = paddle.fluid.dygraph.AmpScaler(init_loss_scaling=1024) scaled_data = scaler.scale(data) self.assertEqual( - np.array_equal(scaled_data.numpy(), - data.numpy() * 1024), True) + np.array_equal(scaled_data.numpy(), data.numpy() * 1024), True + ) def test_scale(self): self.scale() @@ -209,13 +219,16 @@ class TestAmpScaler(unittest.TestCase): paddle.seed(10) paddle.framework.random._manual_program_seed(10) with fluid.dygraph.guard(): - model = SimpleConv(num_channels=3, - num_filters=64, - filter_size=7, - stride=2, - act='relu') + model = SimpleConv( + num_channels=3, + num_filters=64, + filter_size=7, + stride=2, + act='relu', + ) optimizer = fluid.optimizer.SGDOptimizer( - learning_rate=0.01, parameter_list=model.parameters()) + learning_rate=0.01, parameter_list=model.parameters() + ) scaler = fluid.dygraph.AmpScaler(init_loss_scaling=1024) data = fluid.dygraph.to_variable(inp_np) @@ -226,7 +239,8 @@ class TestAmpScaler(unittest.TestCase): scaled_loss = scaler.scale(loss) scaled_loss.backward() optimize_ops, params_grads = scaler.minimize( - optimizer, scaled_loss) + optimizer, scaled_loss + ) else: print('use no scaler') loss.backward() @@ -236,19 +250,25 @@ class TestAmpScaler(unittest.TestCase): outs_with_scaler = run_simple_conv(inp_np, use_scaler=True) outs_no_scaler = run_simple_conv(inp_np, use_scaler=False) - self.assertEqual(outs_with_scaler[0], - []) # optimize_ops is [] in dygraph mode - self.assertEqual(outs_no_scaler[0], - []) # optimize_ops is [] in dygraph mode + self.assertEqual( + outs_with_scaler[0], [] + ) # optimize_ops is [] in dygraph mode + self.assertEqual( + outs_no_scaler[0], [] + ) # optimize_ops is [] in dygraph mode for i in range(len(outs_with_scaler[1])): # check each grad - np.testing.assert_allclose(outs_with_scaler[1][i][1].numpy(), - outs_no_scaler[1][i][1].numpy(), - rtol=1e-05) + np.testing.assert_allclose( + outs_with_scaler[1][i][1].numpy(), + outs_no_scaler[1][i][1].numpy(), + rtol=1e-05, + ) # check each parameter - np.testing.assert_allclose(outs_with_scaler[1][i][0].numpy(), - outs_no_scaler[1][i][0].numpy(), - rtol=1e-05) + np.testing.assert_allclose( + outs_with_scaler[1][i][0].numpy(), + outs_no_scaler[1][i][0].numpy(), + rtol=1e-05, + ) def test_minimize(self): self.minimize() @@ -260,13 +280,16 @@ class TestAmpScaler(unittest.TestCase): paddle.seed(10) paddle.framework.random._manual_program_seed(10) with fluid.dygraph.guard(): - model = SimpleConv(num_channels=3, - num_filters=64, - filter_size=7, - stride=2, - act='relu') - optimizer = paddle.optimizer.SGD(learning_rate=0.01, - parameters=model.parameters()) + model = SimpleConv( + num_channels=3, + num_filters=64, + filter_size=7, + stride=2, + act='relu', + ) + optimizer = paddle.optimizer.SGD( + learning_rate=0.01, parameters=model.parameters() + ) scaler = paddle.amp.GradScaler(init_loss_scaling=1024) data = fluid.dygraph.to_variable(inp_np) @@ -289,9 +312,11 @@ class TestAmpScaler(unittest.TestCase): for i in range(len(outs_with_scaler)): # check each parameter - np.testing.assert_allclose(outs_with_scaler[i].numpy(), - outs_no_scaler[i].numpy(), - rtol=1e-05) + np.testing.assert_allclose( + outs_with_scaler[i].numpy(), + outs_no_scaler[i].numpy(), + rtol=1e-05, + ) def test_step(self): self.step() @@ -300,16 +325,19 @@ class TestAmpScaler(unittest.TestCase): inp_np = np.random.random(size=[1, 3, 128, 128]).astype(np.float32) inp_np[0][1][2][3] = np.nan with fluid.dygraph.guard(): - model = SimpleConv(num_channels=3, - num_filters=64, - filter_size=7, - stride=2, - act='relu') + model = SimpleConv( + num_channels=3, + num_filters=64, + filter_size=7, + stride=2, + act='relu', + ) params_init = {} for param in model.parameters(): params_init[param.name] = param.numpy() optimizer = fluid.optimizer.SGDOptimizer( - learning_rate=0.01, parameter_list=model.parameters()) + learning_rate=0.01, parameter_list=model.parameters() + ) scaler = fluid.dygraph.AmpScaler(init_loss_scaling=1024) data = fluid.dygraph.to_variable(inp_np) @@ -322,18 +350,19 @@ class TestAmpScaler(unittest.TestCase): for param in model.parameters(): # param not update when tensor contains nan or inf - np.testing.assert_array_equal(param.numpy(), - params_init[param.name]) + np.testing.assert_array_equal( + param.numpy(), params_init[param.name] + ) def test_nan_inf(self): self.nan_inf() def step_update_exception(self): - def func1(): model = paddle.nn.Conv2D(3, 2, 3, bias_attr=True) - optimizer = paddle.optimizer.SGD(learning_rate=0.01, - parameters=model.parameters()) + optimizer = paddle.optimizer.SGD( + learning_rate=0.01, parameters=model.parameters() + ) scaler = paddle.amp.GradScaler(init_loss_scaling=1024) data = paddle.rand([10, 3, 32, 32]) conv = model(data) @@ -347,8 +376,9 @@ class TestAmpScaler(unittest.TestCase): def func2(): model = paddle.nn.Conv2D(3, 2, 3, bias_attr=True) - optimizer = paddle.optimizer.SGD(learning_rate=0.01, - parameters=model.parameters()) + optimizer = paddle.optimizer.SGD( + learning_rate=0.01, parameters=model.parameters() + ) scaler = paddle.amp.GradScaler(init_loss_scaling=1024) data = paddle.rand([10, 3, 32, 32]) conv = model(data) @@ -362,8 +392,9 @@ class TestAmpScaler(unittest.TestCase): def func3(): model = paddle.nn.Conv2D(3, 2, 3, bias_attr=True) - optimizer = paddle.optimizer.SGD(learning_rate=0.01, - parameters=model.parameters()) + optimizer = paddle.optimizer.SGD( + learning_rate=0.01, parameters=model.parameters() + ) scaler = paddle.amp.GradScaler(init_loss_scaling=1024) data = paddle.rand([10, 3, 32, 32]) conv = model(data) @@ -380,13 +411,15 @@ class TestAmpScaler(unittest.TestCase): def test_get_and_set(self): with fluid.dygraph.guard(): - scaler = paddle.amp.GradScaler(enable=True, - init_loss_scaling=1024, - incr_ratio=2.0, - decr_ratio=0.5, - incr_every_n_steps=1000, - decr_every_n_nan_or_inf=2, - use_dynamic_loss_scaling=True) + scaler = paddle.amp.GradScaler( + enable=True, + init_loss_scaling=1024, + incr_ratio=2.0, + decr_ratio=0.5, + incr_every_n_steps=1000, + decr_every_n_nan_or_inf=2, + use_dynamic_loss_scaling=True, + ) self.assertEqual(scaler.is_enable() == True, True) self.assertEqual(scaler.get_init_loss_scaling() == 1024, True) self.assertEqual(scaler.get_incr_ratio() == 2.0, True) @@ -407,13 +440,15 @@ class TestAmpScaler(unittest.TestCase): def test_state_dict_and_load_state_dict(self): with fluid.dygraph.guard(): - scaler1 = paddle.amp.GradScaler(enable=True, - init_loss_scaling=14, - incr_ratio=233.0, - decr_ratio=0.523, - incr_every_n_steps=1090, - decr_every_n_nan_or_inf=20, - use_dynamic_loss_scaling=True) + scaler1 = paddle.amp.GradScaler( + enable=True, + init_loss_scaling=14, + incr_ratio=233.0, + decr_ratio=0.523, + incr_every_n_steps=1090, + decr_every_n_nan_or_inf=20, + use_dynamic_loss_scaling=True, + ) scaler_state = scaler1.state_dict() scaler2 = paddle.amp.GradScaler(enable=True) scaler2.load_state_dict(scaler_state) @@ -428,7 +463,6 @@ class TestAmpScaler(unittest.TestCase): self.assertEqual(scaler3.is_enable() == False, True) def test_state_dict_and_load_state_dict_error(self): - def test_error(): state_empty = {} scaler = paddle.amp.GradScaler(enable=True) @@ -438,7 +472,6 @@ class TestAmpScaler(unittest.TestCase): def reader_decorator(reader): - def __reader__(): for item in reader(): img = np.array(item[0]).astype('float32').reshape(3, 224, 224) @@ -449,11 +482,9 @@ def reader_decorator(reader): class TestGradScalerStateDict(unittest.TestCase): - - def train_resnet(self, - enable_amp=True, - use_data_loader=True, - use_save_load=True): + def train_resnet( + self, enable_amp=True, use_data_loader=True, use_save_load=True + ): seed = 90 batch_size = train_parameters["batch_size"] @@ -463,30 +494,35 @@ class TestGradScalerStateDict(unittest.TestCase): paddle.framework.random._manual_program_seed(seed) resnet = ResNet(use_cudnn=True) - optimizer = optimizer_setting(train_parameters, - parameter_list=resnet.parameters()) + optimizer = optimizer_setting( + train_parameters, parameter_list=resnet.parameters() + ) np.random.seed(seed) train_reader = paddle.batch( - paddle.dataset.flowers.train(use_xmap=False), batch_size=batch_size) + paddle.dataset.flowers.train(use_xmap=False), batch_size=batch_size + ) dy_param_init_value = {} for param in resnet.parameters(): dy_param_init_value[param.name] = param.numpy() program = None - scaler = paddle.amp.GradScaler(enable=enable_amp, - init_loss_scaling=2.**10) + scaler = paddle.amp.GradScaler( + enable=enable_amp, init_loss_scaling=2.0**10 + ) if use_data_loader: - train_reader = paddle.batch(reader_decorator( - paddle.dataset.flowers.train(use_xmap=False)), - batch_size=batch_size, - drop_last=True) + train_reader = paddle.batch( + reader_decorator(paddle.dataset.flowers.train(use_xmap=False)), + batch_size=batch_size, + drop_last=True, + ) train_loader = fluid.io.DataLoader.from_generator( capacity=4, use_double_buffer=True, iterable=True, - return_list=True) + return_list=True, + ) train_loader.set_sample_list_generator(train_reader) train_reader = train_loader @@ -496,13 +532,19 @@ class TestGradScalerStateDict(unittest.TestCase): if use_data_loader: img, label = data else: - dy_x_data = np.array([x[0].reshape(3, 224, 224) - for x in data]).astype('float32') - if len(np.array([x[1] - for x in data]).astype('int64')) != batch_size: + dy_x_data = np.array( + [x[0].reshape(3, 224, 224) for x in data] + ).astype('float32') + if ( + len(np.array([x[1] for x in data]).astype('int64')) + != batch_size + ): continue - y_data = np.array([x[1] for x in data - ]).astype('int64').reshape(-1, 1) + y_data = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape(-1, 1) + ) img = paddle.to_tensor(dy_x_data) label = paddle.to_tensor(y_data) @@ -525,8 +567,9 @@ class TestGradScalerStateDict(unittest.TestCase): for param in resnet.parameters(): if param.trainable: np_array = np.array(param._grad_ivar().value().get_tensor()) - dy_grad_value[param.name + - fluid.core.grad_var_suffix()] = np_array + dy_grad_value[ + param.name + fluid.core.grad_var_suffix() + ] = np_array resnet.clear_gradients() @@ -543,43 +586,37 @@ class TestGradScalerStateDict(unittest.TestCase): return dy_out, dy_param_value, dy_grad_value def test_with_state_dict(self): - def func_isinstance(): with fluid.dygraph.guard(): - out_use_state_dict = self.train_resnet(enable_amp=True, - use_data_loader=True, - use_save_load=True) - out_no_state_dict = self.train_resnet(enable_amp=True, - use_data_loader=True, - use_save_load=False) + out_use_state_dict = self.train_resnet( + enable_amp=True, use_data_loader=True, use_save_load=True + ) + out_no_state_dict = self.train_resnet( + enable_amp=True, use_data_loader=True, use_save_load=False + ) print('save_load:', out_use_state_dict[0], out_no_state_dict[0]) - np.testing.assert_allclose(out_use_state_dict[0], - out_no_state_dict[0], - rtol=1e-05) + np.testing.assert_allclose( + out_use_state_dict[0], out_no_state_dict[0], rtol=1e-05 + ) func_isinstance() class TestAmpDecorator(unittest.TestCase): - def test_mode_exception(self): - def func(): with fluid.dygraph.guard(): model = fluid.dygraph.Conv2D(3, 2, 3, bias_attr=False, act=None) opt = paddle.optimizer.SGD(parameters=model.parameters()) - model, opt = paddle.amp.decorate(models=model, - optimizers=opt, - level='O') + model, opt = paddle.amp.decorate( + models=model, optimizers=opt, level='O' + ) self.assertRaises(ValueError, func) def test_input_type_exception(self): - def test_error_model(): - class MyModel(object): - def __init__(self): print("A fake Model") @@ -598,9 +635,7 @@ class TestAmpDecorator(unittest.TestCase): self.assertRaises(RuntimeError, test_error_distributed_model) def test_error_optimizer(): - class MyOptimizer(object): - def __init__(self): print("A fake Optimizer") @@ -613,48 +648,59 @@ class TestAmpDecorator(unittest.TestCase): def test_set_master_weight(self): model1 = fluid.dygraph.Conv2D(3, 2, 3, bias_attr=False, act=None) - opt1 = paddle.optimizer.Adam(learning_rate=0.0001, - parameters=model1.parameters(), - multi_precision=True) + opt1 = paddle.optimizer.Adam( + learning_rate=0.0001, + parameters=model1.parameters(), + multi_precision=True, + ) model2 = fluid.dygraph.Conv2D(3, 2, 3, bias_attr=False, act=None) - opt2 = paddle.optimizer.Adam(learning_rate=0.0001, - parameters=model2.parameters(), - multi_precision=False) - - model1, opt1 = paddle.amp.decorate(models=model1, - optimizers=opt1, - level='O2', - master_weight=None) + opt2 = paddle.optimizer.Adam( + learning_rate=0.0001, + parameters=model2.parameters(), + multi_precision=False, + ) + + model1, opt1 = paddle.amp.decorate( + models=model1, optimizers=opt1, level='O2', master_weight=None + ) self.assertEqual(opt1._multi_precision, True) - models, opt2 = paddle.amp.decorate(models=[model1, model2], - optimizers=opt2, - level='O2', - master_weight=None) + models, opt2 = paddle.amp.decorate( + models=[model1, model2], + optimizers=opt2, + level='O2', + master_weight=None, + ) self.assertEqual(opt2._multi_precision, True) model3 = fluid.dygraph.Conv2D(3, 2, 3, bias_attr=False, act=None) - opt3 = paddle.optimizer.Adam(learning_rate=0.0001, - parameters=model3.parameters()) + opt3 = paddle.optimizer.Adam( + learning_rate=0.0001, parameters=model3.parameters() + ) model4 = fluid.dygraph.Conv2D(3, 2, 3, bias_attr=False, act=None) - opt4 = paddle.optimizer.Adam(learning_rate=0.0001, - parameters=model4.parameters()) - - model3, opts = paddle.amp.decorate(models=model3, - optimizers=[opt3, opt4], - level='O2', - master_weight=True) + opt4 = paddle.optimizer.Adam( + learning_rate=0.0001, parameters=model4.parameters() + ) + + model3, opts = paddle.amp.decorate( + models=model3, + optimizers=[opt3, opt4], + level='O2', + master_weight=True, + ) self.assertEqual(opts[0]._multi_precision, True) self.assertEqual(opts[1]._multi_precision, True) models = [model3, model4] optimizers = [opt3, opt4] - models, optimizers = paddle.amp.decorate(models=models, - optimizers=optimizers, - level='O2', - master_weight=False) + models, optimizers = paddle.amp.decorate( + models=models, + optimizers=optimizers, + level='O2', + master_weight=False, + ) self.assertEqual(optimizers[0]._multi_precision, False) self.assertEqual(optimizers[1]._multi_precision, False) @@ -689,20 +735,19 @@ class TestAmpDecorator(unittest.TestCase): buffer = paddle.to_tensor(np.array([5]).astype("int32")) model.register_buffer("buffer_name", buffer, persistable=True) model = paddle.amp.decorate(models=model, level='O2') - self.assertEqual((model._buffers["buffer_name"].dtype == paddle.int32), - True) + self.assertEqual( + (model._buffers["buffer_name"].dtype == paddle.int32), True + ) class TestStateDictHookForAMP(unittest.TestCase): - def test_state_dict_hook(self): - def func_isinstance(): paddle.seed(100) model = paddle.nn.Linear(2, 4) - model = paddle.amp.decorate(models=model, - level='O2', - save_dtype='float32') + model = paddle.amp.decorate( + models=model, level='O2', save_dtype='float32' + ) param_value_ori = {} for param in model.parameters(): param_value_ori[param.name] = param.numpy() @@ -723,7 +768,6 @@ class TestStateDictHookForAMP(unittest.TestCase): class TestPureFp16SaveLoad(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -731,22 +775,19 @@ class TestPureFp16SaveLoad(unittest.TestCase): self.temp_dir.cleanup() def test_save_dtype_exception(self): - def func(): paddle.disable_static() model = fluid.dygraph.Conv2D(3, 2, 3, bias_attr=False, act=None) opt = paddle.optimizer.SGD(parameters=model.parameters()) - paddle.amp.decorate(models=model, - optimizers=opt, - level='O2', - save_dtype='int') + paddle.amp.decorate( + models=model, optimizers=opt, level='O2', save_dtype='int' + ) self.assertRaises(ValueError, func) - def train_resnet(self, - enable_amp=True, - use_data_loader=True, - use_save_load=True): + def train_resnet( + self, enable_amp=True, use_data_loader=True, use_save_load=True + ): seed = 90 batch_size = train_parameters["batch_size"] @@ -756,38 +797,45 @@ class TestPureFp16SaveLoad(unittest.TestCase): paddle.framework.random._manual_program_seed(seed) resnet = ResNet(use_cudnn=True) - optimizer = optimizer_setting(train_parameters, - parameter_list=resnet.parameters()) + optimizer = optimizer_setting( + train_parameters, parameter_list=resnet.parameters() + ) np.random.seed(seed) train_reader = paddle.batch( - paddle.dataset.flowers.train(use_xmap=False), batch_size=batch_size) + paddle.dataset.flowers.train(use_xmap=False), batch_size=batch_size + ) dy_param_init_value = {} for param in resnet.parameters(): dy_param_init_value[param.name] = param.numpy() program = None - scaler = paddle.amp.GradScaler(enable=enable_amp, - init_loss_scaling=2.**10) + scaler = paddle.amp.GradScaler( + enable=enable_amp, init_loss_scaling=2.0**10 + ) if use_data_loader: - train_reader = paddle.batch(reader_decorator( - paddle.dataset.flowers.train(use_xmap=False)), - batch_size=batch_size, - drop_last=True) + train_reader = paddle.batch( + reader_decorator(paddle.dataset.flowers.train(use_xmap=False)), + batch_size=batch_size, + drop_last=True, + ) train_loader = fluid.io.DataLoader.from_generator( capacity=4, use_double_buffer=True, iterable=True, - return_list=True) + return_list=True, + ) train_loader.set_sample_list_generator(train_reader) train_reader = train_loader if enable_amp: - resnet, optimizer = paddle.amp.decorate(models=resnet, - optimizers=optimizer, - level='O2', - save_dtype='float32') + resnet, optimizer = paddle.amp.decorate( + models=resnet, + optimizers=optimizer, + level='O2', + save_dtype='float32', + ) for batch_id, data in enumerate(train_reader()): if batch_id >= batch_num: @@ -795,13 +843,19 @@ class TestPureFp16SaveLoad(unittest.TestCase): if use_data_loader: img, label = data else: - dy_x_data = np.array([x[0].reshape(3, 224, 224) - for x in data]).astype('float32') - if len(np.array([x[1] - for x in data]).astype('int64')) != batch_size: + dy_x_data = np.array( + [x[0].reshape(3, 224, 224) for x in data] + ).astype('float32') + if ( + len(np.array([x[1] for x in data]).astype('int64')) + != batch_size + ): continue - y_data = np.array([x[1] for x in data - ]).astype('int64').reshape(-1, 1) + y_data = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape(-1, 1) + ) img = paddle.to_tensor(dy_x_data) label = paddle.to_tensor(y_data) @@ -825,8 +879,9 @@ class TestPureFp16SaveLoad(unittest.TestCase): for param in resnet.parameters(): if param.trainable: np_array = np.array(param._grad_ivar().value().get_tensor()) - dy_grad_value[param.name + - fluid.core.grad_var_suffix()] = np_array + dy_grad_value[ + param.name + fluid.core.grad_var_suffix() + ] = np_array resnet.clear_gradients() @@ -839,7 +894,7 @@ class TestPureFp16SaveLoad(unittest.TestCase): obj = { 'model': resnet.state_dict(), 'opt': optimizer.state_dict(), - 'scaler': scaler.state_dict() + 'scaler': scaler.state_dict(), } path = os.path.join(self.temp_dir.name, 'model.pdparams') paddle.save(obj, path) @@ -847,39 +902,40 @@ class TestPureFp16SaveLoad(unittest.TestCase): obj_load = paddle.load(path) resnet = ResNet(use_cudnn=True) optimizer = optimizer_setting( - train_parameters, parameter_list=resnet.parameters()) + train_parameters, parameter_list=resnet.parameters() + ) resnet.set_state_dict(obj_load['model']) optimizer.set_state_dict(obj_load['opt']) scaler.load_state_dict(obj_load['scaler']) - resnet, optimizer = paddle.amp.decorate(models=resnet, - optimizers=optimizer, - level='O2', - save_dtype='float32') + resnet, optimizer = paddle.amp.decorate( + models=resnet, + optimizers=optimizer, + level='O2', + save_dtype='float32', + ) if use_data_loader: train_reader._reset() return dy_out, dy_param_value, dy_grad_value def test_with_save_load(self): - def func_isinstance(): with fluid.dygraph.guard(): - out_use_save_load = self.train_resnet(enable_amp=True, - use_data_loader=True, - use_save_load=True) - out_no_save_load = self.train_resnet(enable_amp=True, - use_data_loader=True, - use_save_load=False) + out_use_save_load = self.train_resnet( + enable_amp=True, use_data_loader=True, use_save_load=True + ) + out_no_save_load = self.train_resnet( + enable_amp=True, use_data_loader=True, use_save_load=False + ) print('save_load:', out_use_save_load[0], out_no_save_load[0]) - np.testing.assert_allclose(out_use_save_load[0], - out_no_save_load[0], - rtol=1e-05) + np.testing.assert_allclose( + out_use_save_load[0], out_no_save_load[0], rtol=1e-05 + ) func_isinstance() class TestPureFp16InferenceSaveLoad(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -895,21 +951,20 @@ class TestPureFp16InferenceSaveLoad(unittest.TestCase): # define a random dataset class RandomDataset(paddle.io.Dataset): - def __init__(self, num_samples): self.num_samples = num_samples def __getitem__(self, idx): image = np.random.random([IMAGE_SIZE]).astype('float32') - label = np.random.randint(0, CLASS_NUM - 1, - (1, )).astype('int64') + label = np.random.randint(0, CLASS_NUM - 1, (1,)).astype( + 'int64' + ) return image, label def __len__(self): return self.num_samples class LinearNet(nn.Layer): - def __init__(self): super(LinearNet, self).__init__() self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM) @@ -920,10 +975,12 @@ class TestPureFp16InferenceSaveLoad(unittest.TestCase): def train(layer, loader, loss_fn, opt): for epoch_id in range(EPOCH_NUM): for batch_id, (image, label) in enumerate(loader()): - with paddle.amp.auto_cast(enable=True, - custom_white_list=None, - custom_black_list=None, - level='O2'): + with paddle.amp.auto_cast( + enable=True, + custom_white_list=None, + custom_black_list=None, + level='O2', + ): out = layer(image) loss = loss_fn(out, label) loss.backward() @@ -932,27 +989,31 @@ class TestPureFp16InferenceSaveLoad(unittest.TestCase): # train layer = LinearNet() - adam = paddle.optimizer.Adam(learning_rate=0.001, - parameters=layer.parameters(), - multi_precision=True) + adam = paddle.optimizer.Adam( + learning_rate=0.001, + parameters=layer.parameters(), + multi_precision=True, + ) loss_fn = nn.CrossEntropyLoss() - layer, adam = paddle.amp.decorate(models=layer, - optimizers=adam, - save_dtype='float32') + layer, adam = paddle.amp.decorate( + models=layer, optimizers=adam, save_dtype='float32' + ) dataset = RandomDataset(BATCH_NUM * BATCH_SIZE) - loader = paddle.io.DataLoader(dataset, - batch_size=BATCH_SIZE, - shuffle=True, - drop_last=True, - num_workers=2) + loader = paddle.io.DataLoader( + dataset, + batch_size=BATCH_SIZE, + shuffle=True, + drop_last=True, + num_workers=2, + ) train(layer, loader, loss_fn, adam) # save path = os.path.join(self.temp_dir.name, 'example_model/linear') - paddle.jit.save(layer, - path, - input_spec=[InputSpec(shape=[IMAGE_SIZE], name='x')]) + paddle.jit.save( + layer, path, input_spec=[InputSpec(shape=[IMAGE_SIZE], name='x')] + ) # jit.load loaded_layer = paddle.jit.load(path) @@ -966,12 +1027,17 @@ class TestPureFp16InferenceSaveLoad(unittest.TestCase): # load_inference_model paddle.enable_static() exe = paddle.static.Executor() - [inference_program, feed_target_names, - fetch_targets] = (paddle.static.load_inference_model(path, exe)) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = paddle.static.load_inference_model(path, exe) tensor_img = x - results = exe.run(inference_program, - feed={feed_target_names[0]: tensor_img}, - fetch_list=fetch_targets) + results = exe.run( + inference_program, + feed={feed_target_names[0]: tensor_img}, + fetch_list=fetch_targets, + ) print("pred.numpy()", pred.numpy()) print("result", results[0]) np.testing.assert_array_equal(pred.numpy(), results[0]) @@ -986,11 +1052,13 @@ class TestResnet2(unittest.TestCase): Use paddle-2.0 API """ - def train_resnet(self, - enable_amp=True, - level='O1', - use_data_loader=False, - use_param_group=False): + def train_resnet( + self, + enable_amp=True, + level='O1', + use_data_loader=False, + use_param_group=False, + ): seed = 90 batch_size = train_parameters["batch_size"] @@ -1014,43 +1082,42 @@ class TestResnet2(unittest.TestCase): # NOTE(zhiqiu): The Membership test operations(in / not in) calls "is" and "equal", # see details: https://docs.python.org/3/reference/expressions.html#membership-test-operations. # So do not use other_params = [p for p in resnet.parameters() if p not in conv_params] - optimizer = paddle.optimizer.Momentum(parameters=[{ - 'params': - conv_params, - 'learning_rate': - 0.01 - }, { - 'params': - other_params, - 'learning_rate': - 0.001 - }], - multi_precision=True) + optimizer = paddle.optimizer.Momentum( + parameters=[ + {'params': conv_params, 'learning_rate': 0.01}, + {'params': other_params, 'learning_rate': 0.001}, + ], + multi_precision=True, + ) else: optimizer = paddle.optimizer.SGD(parameters=resnet.parameters()) np.random.seed(seed) train_reader = paddle.batch( - paddle.dataset.flowers.train(use_xmap=False), batch_size=batch_size) + paddle.dataset.flowers.train(use_xmap=False), batch_size=batch_size + ) dy_param_init_value = {} for param in resnet.parameters(): dy_param_init_value[param.name] = param.numpy() program = None - scaler = paddle.amp.GradScaler(enable=enable_amp, - init_loss_scaling=2.**10) + scaler = paddle.amp.GradScaler( + enable=enable_amp, init_loss_scaling=2.0**10 + ) if use_data_loader: - train_reader = paddle.batch(reader_decorator( - paddle.dataset.flowers.train(use_xmap=False)), - batch_size=batch_size, - drop_last=True) + train_reader = paddle.batch( + reader_decorator(paddle.dataset.flowers.train(use_xmap=False)), + batch_size=batch_size, + drop_last=True, + ) train_loader = fluid.io.DataLoader.from_generator( capacity=4, use_double_buffer=True, iterable=True, - return_list=True) + return_list=True, + ) train_loader.set_sample_list_generator(train_reader) train_reader = train_loader @@ -1063,13 +1130,19 @@ class TestResnet2(unittest.TestCase): if use_data_loader: img, label = data else: - dy_x_data = np.array([x[0].reshape(3, 224, 224) - for x in data]).astype('float32') - if len(np.array([x[1] - for x in data]).astype('int64')) != batch_size: + dy_x_data = np.array( + [x[0].reshape(3, 224, 224) for x in data] + ).astype('float32') + if ( + len(np.array([x[1] for x in data]).astype('int64')) + != batch_size + ): continue - y_data = np.array([x[1] for x in data - ]).astype('int64').reshape(-1, 1) + y_data = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape(-1, 1) + ) img = paddle.to_tensor(dy_x_data) label = paddle.to_tensor(y_data) @@ -1094,8 +1167,9 @@ class TestResnet2(unittest.TestCase): for param in resnet.parameters(): if param.trainable: np_array = np.array(param._grad_ivar().value().get_tensor()) - dy_grad_value[param.name + - fluid.core.grad_var_suffix()] = np_array + dy_grad_value[ + param.name + fluid.core.grad_var_suffix() + ] = np_array resnet.clear_gradients() @@ -1107,70 +1181,65 @@ class TestResnet2(unittest.TestCase): return dy_out, dy_param_value, dy_grad_value def test_resnet(self): - def func_isinstance(): with fluid.dygraph.guard(): out_fp32 = self.train_resnet(enable_amp=False) out_amp = self.train_resnet(enable_amp=True) out_pure_fp16 = self.train_resnet(enable_amp=True, level='O2') print(out_fp32[0], out_amp[0], out_pure_fp16[0]) - np.testing.assert_allclose(out_fp32[0], - out_amp[0], - rtol=1e-05, - atol=1e-05) - np.testing.assert_allclose(out_fp32[0], - out_pure_fp16[0], - rtol=1e-05, - atol=0.01) + np.testing.assert_allclose( + out_fp32[0], out_amp[0], rtol=1e-05, atol=1e-05 + ) + np.testing.assert_allclose( + out_fp32[0], out_pure_fp16[0], rtol=1e-05, atol=0.01 + ) func_isinstance() def test_with_data_loader(self): - def func_isinstance(): with fluid.dygraph.guard(): - out_fp32 = self.train_resnet(enable_amp=False, - use_data_loader=True) - out_amp = self.train_resnet(enable_amp=True, - use_data_loader=True) - out_pure_fp16 = self.train_resnet(enable_amp=True, - use_data_loader=True, - level='O2') + out_fp32 = self.train_resnet( + enable_amp=False, use_data_loader=True + ) + out_amp = self.train_resnet( + enable_amp=True, use_data_loader=True + ) + out_pure_fp16 = self.train_resnet( + enable_amp=True, use_data_loader=True, level='O2' + ) print(out_fp32[0], out_amp[0], out_pure_fp16[0]) - np.testing.assert_allclose(out_fp32[0], - out_amp[0], - rtol=1e-05, - atol=1e-05) - np.testing.assert_allclose(out_fp32[0], - out_pure_fp16[0], - rtol=1e-05, - atol=0.01) + np.testing.assert_allclose( + out_fp32[0], out_amp[0], rtol=1e-05, atol=1e-05 + ) + np.testing.assert_allclose( + out_fp32[0], out_pure_fp16[0], rtol=1e-05, atol=0.01 + ) func_isinstance() def test_param_group(self): - def func_isinstance(): with fluid.dygraph.guard(): - out_fp32 = self.train_resnet(enable_amp=False, - use_data_loader=True, - use_param_group=True) - out_amp = self.train_resnet(enable_amp=True, - use_data_loader=True, - use_param_group=True) - out_pure_fp16 = self.train_resnet(enable_amp=True, - use_data_loader=True, - use_param_group=True, - level='O2') + out_fp32 = self.train_resnet( + enable_amp=False, use_data_loader=True, use_param_group=True + ) + out_amp = self.train_resnet( + enable_amp=True, use_data_loader=True, use_param_group=True + ) + out_pure_fp16 = self.train_resnet( + enable_amp=True, + use_data_loader=True, + use_param_group=True, + level='O2', + ) print(out_fp32[0], out_amp[0], out_pure_fp16[0]) - np.testing.assert_allclose(out_fp32[0], - out_amp[0], - rtol=1e-05, - atol=1e-05) - np.testing.assert_allclose(out_fp32[0], - out_pure_fp16[0], - rtol=1e-05, - atol=0.01) + np.testing.assert_allclose( + out_fp32[0], out_amp[0], rtol=1e-05, atol=1e-05 + ) + np.testing.assert_allclose( + out_fp32[0], out_pure_fp16[0], rtol=1e-05, atol=0.01 + ) func_isinstance() @@ -1191,42 +1260,54 @@ class TestResnet(unittest.TestCase): paddle.framework.random._manual_program_seed(seed) resnet = ResNet(use_cudnn=True) - optimizer = optimizer_setting(train_parameters, - parameter_list=resnet.parameters()) + optimizer = optimizer_setting( + train_parameters, parameter_list=resnet.parameters() + ) optimizer = paddle.optimizer.Momentum( - parameters=resnet.parameters(), multi_precision=True) + parameters=resnet.parameters(), multi_precision=True + ) np.random.seed(seed) train_reader = paddle.batch( paddle.dataset.flowers.train(use_xmap=False), - batch_size=batch_size) + batch_size=batch_size, + ) dy_param_init_value = {} for param in resnet.parameters(): dy_param_init_value[param.name] = param.numpy() program = None - scaler = paddle.fluid.dygraph.AmpScaler(enable=enable_amp, - init_loss_scaling=2.**10) + scaler = paddle.fluid.dygraph.AmpScaler( + enable=enable_amp, init_loss_scaling=2.0**10 + ) if enable_amp and (level == 'O2'): resnet, optimizer = paddle.fluid.dygraph.amp_decorate( - models=resnet, optimizers=optimizer, level='O2') + models=resnet, optimizers=optimizer, level='O2' + ) for batch_id, data in enumerate(train_reader()): if batch_id >= batch_num: break - dy_x_data = np.array([x[0].reshape(3, 224, 224) - for x in data]).astype('float32') - if len(np.array([x[1] - for x in data]).astype('int64')) != batch_size: + dy_x_data = np.array( + [x[0].reshape(3, 224, 224) for x in data] + ).astype('float32') + if ( + len(np.array([x[1] for x in data]).astype('int64')) + != batch_size + ): continue - y_data = np.array([x[1] for x in data - ]).astype('int64').reshape(-1, 1) + y_data = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape(-1, 1) + ) img = fluid.dygraph.to_variable(dy_x_data) label = fluid.dygraph.to_variable(y_data) label.stop_gradient = True - with paddle.fluid.dygraph.amp_guard(enable=enable_amp, - level=level): + with paddle.fluid.dygraph.amp_guard( + enable=enable_amp, level=level + ): out = resnet(img) loss = fluid.layers.cross_entropy(input=out, label=label) @@ -1243,9 +1324,11 @@ class TestResnet(unittest.TestCase): for param in resnet.parameters(): if param.trainable: np_array = np.array( - param._grad_ivar().value().get_tensor()) - dy_grad_value[param.name + - fluid.core.grad_var_suffix()] = np_array + param._grad_ivar().value().get_tensor() + ) + dy_grad_value[ + param.name + fluid.core.grad_var_suffix() + ] = np_array resnet.clear_gradients() @@ -1256,31 +1339,27 @@ class TestResnet(unittest.TestCase): return dy_out, dy_param_value, dy_grad_value def test_resnet(self): - def func_isinstance(): out_fp32 = self.train_resnet(enable_amp=False) out_amp = self.train_resnet(enable_amp=True) out_pure_fp16 = self.train_resnet(enable_amp=True, level='O2') print(out_fp32[0], out_amp[0], out_pure_fp16[0]) - np.testing.assert_allclose(out_fp32[0], - out_amp[0], - rtol=1e-05, - atol=0.01) - np.testing.assert_allclose(out_fp32[0], - out_pure_fp16[0], - rtol=1e-05, - atol=0.1) + np.testing.assert_allclose( + out_fp32[0], out_amp[0], rtol=1e-05, atol=0.01 + ) + np.testing.assert_allclose( + out_fp32[0], out_pure_fp16[0], rtol=1e-05, atol=0.1 + ) func_isinstance() class TestLayerNormFp16(unittest.TestCase): - r''' layer_norm and batch_norm support mixed inputs, i.e., only input x is fp16 + r'''layer_norm and batch_norm support mixed inputs, i.e., only input x is fp16 and other params are fp32. ''' def test_layer_norm_fp16(self): - def func_isinstance(): if fluid.is_compiled_with_cuda(): with fluid.dygraph.guard(fluid.CUDAPlace(0)): @@ -1290,7 +1369,8 @@ class TestLayerNormFp16(unittest.TestCase): out = layer_norm(x) self.assertTrue( - out.dtype == fluid.core.VarDesc.VarType.FP16) + out.dtype == fluid.core.VarDesc.VarType.FP16 + ) func_isinstance() @@ -1298,7 +1378,8 @@ class TestLayerNormFp16(unittest.TestCase): @unittest.skipIf( paddle.is_compiled_with_cuda() and not core.is_bfloat16_supported(core.CUDAPlace(0)), - "skip bf16 test if cuda is in use but bf16 is not supported by gpu arch.") + "skip bf16 test if cuda is in use but bf16 is not supported by gpu arch.", +) class TestBf16(unittest.TestCase): ''' test amp for BF16 @@ -1306,43 +1387,37 @@ class TestBf16(unittest.TestCase): def train(self, enable_amp=True, amp_level='O1'): paddle.seed(100) - input = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.) + input = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1.0, max=1.0) conv = paddle.nn.Conv2D(4, 6, (3, 3)) if amp_level == 'O2': - conv = paddle.amp.decorate(models=conv, - level=amp_level, - dtype='bfloat16') - with paddle.amp.auto_cast(enable=enable_amp, - level=amp_level, - dtype='bfloat16'): + conv = paddle.amp.decorate( + models=conv, level=amp_level, dtype='bfloat16' + ) + with paddle.amp.auto_cast( + enable=enable_amp, level=amp_level, dtype='bfloat16' + ): output = conv(input) output = output.cast('float32') return output.numpy() def test_bf16(self): - def func_isinstance(): out_fp32 = self.train(enable_amp=False) out_bf16_O1 = self.train(enable_amp=True, amp_level='O1') out_bf16_O2 = self.train(enable_amp=True, amp_level='O2') - np.testing.assert_allclose(out_fp32, - out_bf16_O1, - rtol=0.001, - atol=0.1) - np.testing.assert_allclose(out_fp32, - out_bf16_O2, - rtol=0.001, - atol=0.1) + np.testing.assert_allclose( + out_fp32, out_bf16_O1, rtol=0.001, atol=0.1 + ) + np.testing.assert_allclose( + out_fp32, out_bf16_O2, rtol=0.001, atol=0.1 + ) func_isinstance() class TestAmpWithPyLyer(unittest.TestCase): - def test_pylayer(self): - class MyMM(PyLayer): - @staticmethod def forward(ctx, a, b): ctx.save_for_backward(a, b) @@ -1367,9 +1442,7 @@ class TestAmpWithPyLyer(unittest.TestCase): class TestAmpWithHook(unittest.TestCase): - def test_hook_change_dtype(self): - def func_isinstance(): with paddle.fluid.dygraph.guard(): v = paddle.rand([3, 3]) @@ -1390,7 +1463,6 @@ class TestAmpWithHook(unittest.TestCase): func_isinstance() def test_hook_change_place(self): - def func_isinstance(): with paddle.fluid.dygraph.guard(): v = paddle.rand([3, 3]) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py index 7ff434987a9c1391099d7133c41fe217b5711b37..83c9462a89e9d20d827d0046d026ead3cd507cd1 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py @@ -31,31 +31,33 @@ if fluid.core.is_compiled_with_cuda(): class SimpleConv(fluid.dygraph.Layer): - - def __init__(self, - num_channels, - num_filters, - filter_size, - stride=1, - groups=1, - act=None): + def __init__( + self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + ): super(SimpleConv, self).__init__() - self._conv = fluid.dygraph.Conv2D(num_channels=num_channels, - num_filters=num_filters, - filter_size=filter_size, - stride=stride, - padding=(filter_size - 1) // 2, - groups=groups, - act=None, - bias_attr=None, - use_cudnn=True) + self._conv = fluid.dygraph.Conv2D( + num_channels=num_channels, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + act=None, + bias_attr=None, + use_cudnn=True, + ) def forward(self, inputs): return self._conv(inputs) class TestAutoCast(unittest.TestCase): - def amp_guard_white_op(self): data = np.random.uniform(-1, 1, [10, 3, 32, 32]).astype('float32') with fluid.dygraph.guard(): @@ -92,30 +94,37 @@ class TestAutoCast(unittest.TestCase): tracer = fluid.framework._dygraph_tracer() base_white_list = fluid.dygraph.amp.auto_cast.WHITE_LIST base_black_list = fluid.dygraph.amp.auto_cast.BLACK_LIST - with fluid.dygraph.amp_guard(custom_white_list=["log"], - custom_black_list=["conv2d"]): + with fluid.dygraph.amp_guard( + custom_white_list=["log"], custom_black_list=["conv2d"] + ): white_list, black_list = tracer._get_amp_op_list() self.assertTrue( - set(white_list) == (set(base_white_list) | {"log"}) - - {"conv2d"}) + set(white_list) + == (set(base_white_list) | {"log"}) - {"conv2d"} + ) self.assertTrue( - set(black_list) == (set(base_black_list) - {"log"}) - | {"conv2d"}) + set(black_list) + == (set(base_black_list) - {"log"}) | {"conv2d"} + ) base_white_list = fluid.dygraph.amp.auto_cast.PURE_FP16_WHITE_LIST base_black_list = fluid.dygraph.amp.auto_cast.PURE_FP16_BLACK_LIST - with fluid.dygraph.amp_guard(custom_white_list=["log"], - custom_black_list=["conv2d"], - level='O2'): + with fluid.dygraph.amp_guard( + custom_white_list=["log"], + custom_black_list=["conv2d"], + level='O2', + ): white_list, black_list = tracer._get_amp_op_list() self.assertTrue( - set(white_list) == (set(base_white_list) | {"log"}) - - {"conv2d"}) + set(white_list) + == (set(base_white_list) | {"log"}) - {"conv2d"} + ) self.assertTrue( - set(black_list) == (set(base_black_list) - {"log"}) - | {"conv2d"}) + set(black_list) + == (set(base_black_list) - {"log"}) | {"conv2d"} + ) def test_custom_op_list(self): self.custom_op_list() @@ -125,13 +134,16 @@ class TestAutoCast(unittest.TestCase): def func(): with fluid.dygraph.guard(): - model = SimpleConv(num_channels=3, - num_filters=64, - filter_size=7, - stride=2, - act='relu') - with fluid.dygraph.amp_guard(custom_white_list=["conv2d"], - custom_black_list=["conv2d"]): + model = SimpleConv( + num_channels=3, + num_filters=64, + filter_size=7, + stride=2, + act='relu', + ) + with fluid.dygraph.amp_guard( + custom_white_list=["conv2d"], custom_black_list=["conv2d"] + ): inp = fluid.dygraph.to_variable(inp_np) out = model(inp) @@ -148,35 +160,34 @@ class TestAutoCast(unittest.TestCase): with fluid.dygraph.amp_guard(True): out_amp_fp16 = conv2d(data) out_amp_fp32 = paddle.expand_as( - out_amp_fp16, - out_amp_fp16) # expand_as_v2 has no fp16 kernel + out_amp_fp16, out_amp_fp16 + ) # expand_as_v2 has no fp16 kernel with fluid.dygraph.amp_guard(True, level='O2'): out_purefp16_fp16 = conv2d(data) out_purefp16_fp32 = paddle.expand_as( - out_purefp16_fp16, - out_purefp16_fp16) # expand_as_v2 has no fp16 kernel + out_purefp16_fp16, out_purefp16_fp16 + ) # expand_as_v2 has no fp16 kernel self.assertTrue(data.dtype == fluid.core.VarDesc.VarType.FP32) self.assertTrue(out_amp_fp16.dtype == fluid.core.VarDesc.VarType.FP16) self.assertTrue(out_amp_fp32.dtype == fluid.core.VarDesc.VarType.FP32) self.assertTrue( - out_purefp16_fp16.dtype == fluid.core.VarDesc.VarType.FP16) + out_purefp16_fp16.dtype == fluid.core.VarDesc.VarType.FP16 + ) self.assertTrue( - out_purefp16_fp32.dtype == fluid.core.VarDesc.VarType.FP32) + out_purefp16_fp32.dtype == fluid.core.VarDesc.VarType.FP32 + ) def test_amp_guard_upsupported_fp16_op(self): self.amp_guard_upsupported_fp16_op() def mode_exception(self): - def func(): data = np.random.uniform(-1, 1, [10, 3, 32, 32]).astype('float32') with fluid.dygraph.guard(): - conv2d = fluid.dygraph.Conv2D(3, - 2, - 3, - bias_attr=False, - act=None) + conv2d = fluid.dygraph.Conv2D( + 3, 2, 3, bias_attr=False, act=None + ) data = fluid.dygraph.to_variable(data) with fluid.dygraph.amp_guard(level='O'): out = conv2d(data) @@ -188,15 +199,14 @@ class TestAutoCast(unittest.TestCase): class TestAmpScaler(unittest.TestCase): - def scale(self): with fluid.dygraph.guard(): data = paddle.rand([10, 1024]) scaler = paddle.fluid.dygraph.AmpScaler(init_loss_scaling=1024) scaled_data = scaler.scale(data) self.assertEqual( - np.array_equal(scaled_data.numpy(), - data.numpy() * 1024), True) + np.array_equal(scaled_data.numpy(), data.numpy() * 1024), True + ) def test_scale(self): self.scale() @@ -208,13 +218,16 @@ class TestAmpScaler(unittest.TestCase): paddle.seed(10) paddle.framework.random._manual_program_seed(10) with fluid.dygraph.guard(): - model = SimpleConv(num_channels=3, - num_filters=64, - filter_size=7, - stride=2, - act='relu') + model = SimpleConv( + num_channels=3, + num_filters=64, + filter_size=7, + stride=2, + act='relu', + ) optimizer = fluid.optimizer.SGDOptimizer( - learning_rate=0.01, parameter_list=model.parameters()) + learning_rate=0.01, parameter_list=model.parameters() + ) scaler = fluid.dygraph.AmpScaler(init_loss_scaling=1024) data = fluid.dygraph.to_variable(inp_np) @@ -225,7 +238,8 @@ class TestAmpScaler(unittest.TestCase): scaled_loss = scaler.scale(loss) scaled_loss.backward() optimize_ops, params_grads = scaler.minimize( - optimizer, scaled_loss) + optimizer, scaled_loss + ) else: print('use no scaler') loss.backward() @@ -235,19 +249,25 @@ class TestAmpScaler(unittest.TestCase): outs_with_scaler = run_simple_conv(inp_np, use_scaler=True) outs_no_scaler = run_simple_conv(inp_np, use_scaler=False) - self.assertEqual(outs_with_scaler[0], - []) # optimize_ops is [] in dygraph mode - self.assertEqual(outs_no_scaler[0], - []) # optimize_ops is [] in dygraph mode + self.assertEqual( + outs_with_scaler[0], [] + ) # optimize_ops is [] in dygraph mode + self.assertEqual( + outs_no_scaler[0], [] + ) # optimize_ops is [] in dygraph mode for i in range(len(outs_with_scaler[1])): # check each grad - np.testing.assert_allclose(outs_with_scaler[1][i][1].numpy(), - outs_no_scaler[1][i][1].numpy(), - rtol=1e-05) + np.testing.assert_allclose( + outs_with_scaler[1][i][1].numpy(), + outs_no_scaler[1][i][1].numpy(), + rtol=1e-05, + ) # check each parameter - np.testing.assert_allclose(outs_with_scaler[1][i][0].numpy(), - outs_no_scaler[1][i][0].numpy(), - rtol=1e-05) + np.testing.assert_allclose( + outs_with_scaler[1][i][0].numpy(), + outs_no_scaler[1][i][0].numpy(), + rtol=1e-05, + ) def test_minimize(self): self.minimize() @@ -259,13 +279,16 @@ class TestAmpScaler(unittest.TestCase): paddle.seed(10) paddle.framework.random._manual_program_seed(10) with fluid.dygraph.guard(): - model = SimpleConv(num_channels=3, - num_filters=64, - filter_size=7, - stride=2, - act='relu') - optimizer = paddle.optimizer.SGD(learning_rate=0.01, - parameters=model.parameters()) + model = SimpleConv( + num_channels=3, + num_filters=64, + filter_size=7, + stride=2, + act='relu', + ) + optimizer = paddle.optimizer.SGD( + learning_rate=0.01, parameters=model.parameters() + ) scaler = paddle.amp.GradScaler(init_loss_scaling=1024) data = fluid.dygraph.to_variable(inp_np) @@ -288,9 +311,11 @@ class TestAmpScaler(unittest.TestCase): for i in range(len(outs_with_scaler)): # check each parameter - np.testing.assert_allclose(outs_with_scaler[i].numpy(), - outs_no_scaler[i].numpy(), - rtol=1e-05) + np.testing.assert_allclose( + outs_with_scaler[i].numpy(), + outs_no_scaler[i].numpy(), + rtol=1e-05, + ) def test_step(self): self.step() @@ -299,16 +324,19 @@ class TestAmpScaler(unittest.TestCase): inp_np = np.random.random(size=[1, 3, 128, 128]).astype(np.float32) inp_np[0][1][2][3] = np.nan with fluid.dygraph.guard(): - model = SimpleConv(num_channels=3, - num_filters=64, - filter_size=7, - stride=2, - act='relu') + model = SimpleConv( + num_channels=3, + num_filters=64, + filter_size=7, + stride=2, + act='relu', + ) params_init = {} for param in model.parameters(): params_init[param.name] = param.numpy() optimizer = fluid.optimizer.SGDOptimizer( - learning_rate=0.01, parameter_list=model.parameters()) + learning_rate=0.01, parameter_list=model.parameters() + ) scaler = fluid.dygraph.AmpScaler(init_loss_scaling=1024) data = fluid.dygraph.to_variable(inp_np) @@ -321,18 +349,19 @@ class TestAmpScaler(unittest.TestCase): for param in model.parameters(): # param not update when tensor contains nan or inf - np.testing.assert_array_equal(param.numpy(), - params_init[param.name]) + np.testing.assert_array_equal( + param.numpy(), params_init[param.name] + ) def test_nan_inf(self): self.nan_inf() def step_update_exception(self): - def func1(): model = paddle.nn.Conv2D(3, 2, 3, bias_attr=True) - optimizer = paddle.optimizer.SGD(learning_rate=0.01, - parameters=model.parameters()) + optimizer = paddle.optimizer.SGD( + learning_rate=0.01, parameters=model.parameters() + ) scaler = paddle.amp.GradScaler(init_loss_scaling=1024) data = paddle.rand([10, 3, 32, 32]) conv = model(data) @@ -346,8 +375,9 @@ class TestAmpScaler(unittest.TestCase): def func2(): model = paddle.nn.Conv2D(3, 2, 3, bias_attr=True) - optimizer = paddle.optimizer.SGD(learning_rate=0.01, - parameters=model.parameters()) + optimizer = paddle.optimizer.SGD( + learning_rate=0.01, parameters=model.parameters() + ) scaler = paddle.amp.GradScaler(init_loss_scaling=1024) data = paddle.rand([10, 3, 32, 32]) conv = model(data) @@ -361,8 +391,9 @@ class TestAmpScaler(unittest.TestCase): def func3(): model = paddle.nn.Conv2D(3, 2, 3, bias_attr=True) - optimizer = paddle.optimizer.SGD(learning_rate=0.01, - parameters=model.parameters()) + optimizer = paddle.optimizer.SGD( + learning_rate=0.01, parameters=model.parameters() + ) scaler = paddle.amp.GradScaler(init_loss_scaling=1024) data = paddle.rand([10, 3, 32, 32]) conv = model(data) @@ -379,13 +410,15 @@ class TestAmpScaler(unittest.TestCase): def test_get_and_set(self): with fluid.dygraph.guard(): - scaler = paddle.amp.GradScaler(enable=True, - init_loss_scaling=1024, - incr_ratio=2.0, - decr_ratio=0.5, - incr_every_n_steps=1000, - decr_every_n_nan_or_inf=2, - use_dynamic_loss_scaling=True) + scaler = paddle.amp.GradScaler( + enable=True, + init_loss_scaling=1024, + incr_ratio=2.0, + decr_ratio=0.5, + incr_every_n_steps=1000, + decr_every_n_nan_or_inf=2, + use_dynamic_loss_scaling=True, + ) self.assertEqual(scaler.is_enable() == True, True) self.assertEqual(scaler.get_init_loss_scaling() == 1024, True) self.assertEqual(scaler.get_incr_ratio() == 2.0, True) @@ -406,13 +439,15 @@ class TestAmpScaler(unittest.TestCase): def test_state_dict_and_load_state_dict(self): with fluid.dygraph.guard(): - scaler1 = paddle.amp.GradScaler(enable=True, - init_loss_scaling=14, - incr_ratio=233.0, - decr_ratio=0.523, - incr_every_n_steps=1090, - decr_every_n_nan_or_inf=20, - use_dynamic_loss_scaling=True) + scaler1 = paddle.amp.GradScaler( + enable=True, + init_loss_scaling=14, + incr_ratio=233.0, + decr_ratio=0.523, + incr_every_n_steps=1090, + decr_every_n_nan_or_inf=20, + use_dynamic_loss_scaling=True, + ) scaler_state = scaler1.state_dict() scaler2 = paddle.amp.GradScaler(enable=True) scaler2.load_state_dict(scaler_state) @@ -427,7 +462,6 @@ class TestAmpScaler(unittest.TestCase): self.assertEqual(scaler3.is_enable() == False, True) def test_state_dict_and_load_state_dict_error(self): - def test_error(): state_empty = {} scaler = paddle.amp.GradScaler(enable=True) @@ -437,7 +471,6 @@ class TestAmpScaler(unittest.TestCase): def reader_decorator(reader): - def __reader__(): for item in reader(): img = np.array(item[0]).astype('float32').reshape(3, 224, 224) @@ -448,11 +481,9 @@ def reader_decorator(reader): class TestGradScalerStateDict(unittest.TestCase): - - def train_resnet(self, - enable_amp=True, - use_data_loader=True, - use_save_load=True): + def train_resnet( + self, enable_amp=True, use_data_loader=True, use_save_load=True + ): seed = 90 batch_size = train_parameters["batch_size"] @@ -462,30 +493,35 @@ class TestGradScalerStateDict(unittest.TestCase): paddle.framework.random._manual_program_seed(seed) resnet = ResNet(use_cudnn=True) - optimizer = optimizer_setting(train_parameters, - parameter_list=resnet.parameters()) + optimizer = optimizer_setting( + train_parameters, parameter_list=resnet.parameters() + ) np.random.seed(seed) train_reader = paddle.batch( - paddle.dataset.flowers.train(use_xmap=False), batch_size=batch_size) + paddle.dataset.flowers.train(use_xmap=False), batch_size=batch_size + ) dy_param_init_value = {} for param in resnet.parameters(): dy_param_init_value[param.name] = param.numpy() program = None - scaler = paddle.amp.GradScaler(enable=enable_amp, - init_loss_scaling=2.**10) + scaler = paddle.amp.GradScaler( + enable=enable_amp, init_loss_scaling=2.0**10 + ) if use_data_loader: - train_reader = paddle.batch(reader_decorator( - paddle.dataset.flowers.train(use_xmap=False)), - batch_size=batch_size, - drop_last=True) + train_reader = paddle.batch( + reader_decorator(paddle.dataset.flowers.train(use_xmap=False)), + batch_size=batch_size, + drop_last=True, + ) train_loader = fluid.io.DataLoader.from_generator( capacity=4, use_double_buffer=True, iterable=True, - return_list=True) + return_list=True, + ) train_loader.set_sample_list_generator(train_reader) train_reader = train_loader @@ -495,13 +531,19 @@ class TestGradScalerStateDict(unittest.TestCase): if use_data_loader: img, label = data else: - dy_x_data = np.array([x[0].reshape(3, 224, 224) - for x in data]).astype('float32') - if len(np.array([x[1] - for x in data]).astype('int64')) != batch_size: + dy_x_data = np.array( + [x[0].reshape(3, 224, 224) for x in data] + ).astype('float32') + if ( + len(np.array([x[1] for x in data]).astype('int64')) + != batch_size + ): continue - y_data = np.array([x[1] for x in data - ]).astype('int64').reshape(-1, 1) + y_data = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape(-1, 1) + ) img = paddle.to_tensor(dy_x_data) label = paddle.to_tensor(y_data) @@ -524,8 +566,9 @@ class TestGradScalerStateDict(unittest.TestCase): for param in resnet.parameters(): if param.trainable: np_array = np.array(param._grad_ivar().value().get_tensor()) - dy_grad_value[param.name + - fluid.core.grad_var_suffix()] = np_array + dy_grad_value[ + param.name + fluid.core.grad_var_suffix() + ] = np_array resnet.clear_gradients() @@ -542,43 +585,37 @@ class TestGradScalerStateDict(unittest.TestCase): return dy_out, dy_param_value, dy_grad_value def test_with_state_dict(self): - def func_isinstance(): with fluid.dygraph.guard(): - out_use_state_dict = self.train_resnet(enable_amp=True, - use_data_loader=True, - use_save_load=True) - out_no_state_dict = self.train_resnet(enable_amp=True, - use_data_loader=True, - use_save_load=False) + out_use_state_dict = self.train_resnet( + enable_amp=True, use_data_loader=True, use_save_load=True + ) + out_no_state_dict = self.train_resnet( + enable_amp=True, use_data_loader=True, use_save_load=False + ) print('save_load:', out_use_state_dict[0], out_no_state_dict[0]) - np.testing.assert_allclose(out_use_state_dict[0], - out_no_state_dict[0], - rtol=1e-05) + np.testing.assert_allclose( + out_use_state_dict[0], out_no_state_dict[0], rtol=1e-05 + ) func_isinstance() class TestAmpDecorator(unittest.TestCase): - def test_mode_exception(self): - def func(): with fluid.dygraph.guard(): model = fluid.dygraph.Conv2D(3, 2, 3, bias_attr=False, act=None) opt = paddle.optimizer.SGD(parameters=model.parameters()) - model, opt = paddle.amp.decorate(models=model, - optimizers=opt, - level='O') + model, opt = paddle.amp.decorate( + models=model, optimizers=opt, level='O' + ) self.assertRaises(ValueError, func) def test_input_type_exception(self): - def test_error_model(): - class MyModel(object): - def __init__(self): print("A fake Model") @@ -597,9 +634,7 @@ class TestAmpDecorator(unittest.TestCase): self.assertRaises(RuntimeError, test_error_distributed_model) def test_error_optimizer(): - class MyOptimizer(object): - def __init__(self): print("A fake Optimizer") @@ -612,48 +647,59 @@ class TestAmpDecorator(unittest.TestCase): def test_set_master_weight(self): model1 = fluid.dygraph.Conv2D(3, 2, 3, bias_attr=False, act=None) - opt1 = paddle.optimizer.Adam(learning_rate=0.0001, - parameters=model1.parameters(), - multi_precision=True) + opt1 = paddle.optimizer.Adam( + learning_rate=0.0001, + parameters=model1.parameters(), + multi_precision=True, + ) model2 = fluid.dygraph.Conv2D(3, 2, 3, bias_attr=False, act=None) - opt2 = paddle.optimizer.Adam(learning_rate=0.0001, - parameters=model2.parameters(), - multi_precision=False) - - model1, opt1 = paddle.amp.decorate(models=model1, - optimizers=opt1, - level='O2', - master_weight=None) + opt2 = paddle.optimizer.Adam( + learning_rate=0.0001, + parameters=model2.parameters(), + multi_precision=False, + ) + + model1, opt1 = paddle.amp.decorate( + models=model1, optimizers=opt1, level='O2', master_weight=None + ) self.assertEqual(opt1._multi_precision, True) - models, opt2 = paddle.amp.decorate(models=[model1, model2], - optimizers=opt2, - level='O2', - master_weight=None) + models, opt2 = paddle.amp.decorate( + models=[model1, model2], + optimizers=opt2, + level='O2', + master_weight=None, + ) self.assertEqual(opt2._multi_precision, True) model3 = fluid.dygraph.Conv2D(3, 2, 3, bias_attr=False, act=None) - opt3 = paddle.optimizer.Adam(learning_rate=0.0001, - parameters=model3.parameters()) + opt3 = paddle.optimizer.Adam( + learning_rate=0.0001, parameters=model3.parameters() + ) model4 = fluid.dygraph.Conv2D(3, 2, 3, bias_attr=False, act=None) - opt4 = paddle.optimizer.Adam(learning_rate=0.0001, - parameters=model4.parameters()) - - model3, opts = paddle.amp.decorate(models=model3, - optimizers=[opt3, opt4], - level='O2', - master_weight=True) + opt4 = paddle.optimizer.Adam( + learning_rate=0.0001, parameters=model4.parameters() + ) + + model3, opts = paddle.amp.decorate( + models=model3, + optimizers=[opt3, opt4], + level='O2', + master_weight=True, + ) self.assertEqual(opts[0]._multi_precision, True) self.assertEqual(opts[1]._multi_precision, True) models = [model3, model4] optimizers = [opt3, opt4] - models, optimizers = paddle.amp.decorate(models=models, - optimizers=optimizers, - level='O2', - master_weight=False) + models, optimizers = paddle.amp.decorate( + models=models, + optimizers=optimizers, + level='O2', + master_weight=False, + ) self.assertEqual(optimizers[0]._multi_precision, False) self.assertEqual(optimizers[1]._multi_precision, False) @@ -685,15 +731,13 @@ class TestAmpDecorator(unittest.TestCase): class TestStateDictHookForAMP(unittest.TestCase): - def test_state_dict_hook(self): - def func_isinstance(): paddle.seed(100) model = paddle.nn.Linear(2, 4) - model = paddle.amp.decorate(models=model, - level='O2', - save_dtype='float32') + model = paddle.amp.decorate( + models=model, level='O2', save_dtype='float32' + ) param_value_ori = {} for param in model.parameters(): param_value_ori[param.name] = param.numpy() @@ -714,7 +758,6 @@ class TestStateDictHookForAMP(unittest.TestCase): class TestPureFp16SaveLoad(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -722,22 +765,19 @@ class TestPureFp16SaveLoad(unittest.TestCase): self.temp_dir.cleanup() def test_save_dtype_exception(self): - def func(): paddle.disable_static() model = fluid.dygraph.Conv2D(3, 2, 3, bias_attr=False, act=None) opt = paddle.optimizer.SGD(parameters=model.parameters()) - paddle.amp.decorate(models=model, - optimizers=opt, - level='O2', - save_dtype='int') + paddle.amp.decorate( + models=model, optimizers=opt, level='O2', save_dtype='int' + ) self.assertRaises(ValueError, func) - def train_resnet(self, - enable_amp=True, - use_data_loader=True, - use_save_load=True): + def train_resnet( + self, enable_amp=True, use_data_loader=True, use_save_load=True + ): seed = 90 batch_size = train_parameters["batch_size"] @@ -747,38 +787,45 @@ class TestPureFp16SaveLoad(unittest.TestCase): paddle.framework.random._manual_program_seed(seed) resnet = ResNet(use_cudnn=True) - optimizer = optimizer_setting(train_parameters, - parameter_list=resnet.parameters()) + optimizer = optimizer_setting( + train_parameters, parameter_list=resnet.parameters() + ) np.random.seed(seed) train_reader = paddle.batch( - paddle.dataset.flowers.train(use_xmap=False), batch_size=batch_size) + paddle.dataset.flowers.train(use_xmap=False), batch_size=batch_size + ) dy_param_init_value = {} for param in resnet.parameters(): dy_param_init_value[param.name] = param.numpy() program = None - scaler = paddle.amp.GradScaler(enable=enable_amp, - init_loss_scaling=2.**10) + scaler = paddle.amp.GradScaler( + enable=enable_amp, init_loss_scaling=2.0**10 + ) if use_data_loader: - train_reader = paddle.batch(reader_decorator( - paddle.dataset.flowers.train(use_xmap=False)), - batch_size=batch_size, - drop_last=True) + train_reader = paddle.batch( + reader_decorator(paddle.dataset.flowers.train(use_xmap=False)), + batch_size=batch_size, + drop_last=True, + ) train_loader = fluid.io.DataLoader.from_generator( capacity=4, use_double_buffer=True, iterable=True, - return_list=True) + return_list=True, + ) train_loader.set_sample_list_generator(train_reader) train_reader = train_loader if enable_amp: - resnet, optimizer = paddle.amp.decorate(models=resnet, - optimizers=optimizer, - level='O2', - save_dtype='float32') + resnet, optimizer = paddle.amp.decorate( + models=resnet, + optimizers=optimizer, + level='O2', + save_dtype='float32', + ) for batch_id, data in enumerate(train_reader()): if batch_id >= batch_num: @@ -786,13 +833,19 @@ class TestPureFp16SaveLoad(unittest.TestCase): if use_data_loader: img, label = data else: - dy_x_data = np.array([x[0].reshape(3, 224, 224) - for x in data]).astype('float32') - if len(np.array([x[1] - for x in data]).astype('int64')) != batch_size: + dy_x_data = np.array( + [x[0].reshape(3, 224, 224) for x in data] + ).astype('float32') + if ( + len(np.array([x[1] for x in data]).astype('int64')) + != batch_size + ): continue - y_data = np.array([x[1] for x in data - ]).astype('int64').reshape(-1, 1) + y_data = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape(-1, 1) + ) img = paddle.to_tensor(dy_x_data) label = paddle.to_tensor(y_data) @@ -816,8 +869,9 @@ class TestPureFp16SaveLoad(unittest.TestCase): for param in resnet.parameters(): if param.trainable: np_array = np.array(param._grad_ivar().value().get_tensor()) - dy_grad_value[param.name + - fluid.core.grad_var_suffix()] = np_array + dy_grad_value[ + param.name + fluid.core.grad_var_suffix() + ] = np_array resnet.clear_gradients() @@ -830,7 +884,7 @@ class TestPureFp16SaveLoad(unittest.TestCase): obj = { 'model': resnet.state_dict(), 'opt': optimizer.state_dict(), - 'scaler': scaler.state_dict() + 'scaler': scaler.state_dict(), } path = os.path.join(self.temp_dir.name, 'model.pdparams') paddle.save(obj, path) @@ -838,39 +892,40 @@ class TestPureFp16SaveLoad(unittest.TestCase): obj_load = paddle.load(path) resnet = ResNet(use_cudnn=True) optimizer = optimizer_setting( - train_parameters, parameter_list=resnet.parameters()) + train_parameters, parameter_list=resnet.parameters() + ) resnet.set_state_dict(obj_load['model']) optimizer.set_state_dict(obj_load['opt']) scaler.load_state_dict(obj_load['scaler']) - resnet, optimizer = paddle.amp.decorate(models=resnet, - optimizers=optimizer, - level='O2', - save_dtype='float32') + resnet, optimizer = paddle.amp.decorate( + models=resnet, + optimizers=optimizer, + level='O2', + save_dtype='float32', + ) if use_data_loader: train_reader._reset() return dy_out, dy_param_value, dy_grad_value def test_with_save_load(self): - def func_isinstance(): with fluid.dygraph.guard(): - out_use_save_load = self.train_resnet(enable_amp=True, - use_data_loader=True, - use_save_load=True) - out_no_save_load = self.train_resnet(enable_amp=True, - use_data_loader=True, - use_save_load=False) + out_use_save_load = self.train_resnet( + enable_amp=True, use_data_loader=True, use_save_load=True + ) + out_no_save_load = self.train_resnet( + enable_amp=True, use_data_loader=True, use_save_load=False + ) print('save_load:', out_use_save_load[0], out_no_save_load[0]) - np.testing.assert_allclose(out_use_save_load[0], - out_no_save_load[0], - rtol=1e-05) + np.testing.assert_allclose( + out_use_save_load[0], out_no_save_load[0], rtol=1e-05 + ) func_isinstance() class TestPureFp16InferenceSaveLoad(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -886,21 +941,20 @@ class TestPureFp16InferenceSaveLoad(unittest.TestCase): # define a random dataset class RandomDataset(paddle.io.Dataset): - def __init__(self, num_samples): self.num_samples = num_samples def __getitem__(self, idx): image = np.random.random([IMAGE_SIZE]).astype('float32') - label = np.random.randint(0, CLASS_NUM - 1, - (1, )).astype('int64') + label = np.random.randint(0, CLASS_NUM - 1, (1,)).astype( + 'int64' + ) return image, label def __len__(self): return self.num_samples class LinearNet(nn.Layer): - def __init__(self): super(LinearNet, self).__init__() self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM) @@ -911,10 +965,12 @@ class TestPureFp16InferenceSaveLoad(unittest.TestCase): def train(layer, loader, loss_fn, opt): for epoch_id in range(EPOCH_NUM): for batch_id, (image, label) in enumerate(loader()): - with paddle.amp.auto_cast(enable=True, - custom_white_list=None, - custom_black_list=None, - level='O2'): + with paddle.amp.auto_cast( + enable=True, + custom_white_list=None, + custom_black_list=None, + level='O2', + ): out = layer(image) loss = loss_fn(out, label) loss.backward() @@ -923,27 +979,31 @@ class TestPureFp16InferenceSaveLoad(unittest.TestCase): # train layer = LinearNet() - adam = paddle.optimizer.Adam(learning_rate=0.001, - parameters=layer.parameters(), - multi_precision=True) + adam = paddle.optimizer.Adam( + learning_rate=0.001, + parameters=layer.parameters(), + multi_precision=True, + ) loss_fn = nn.CrossEntropyLoss() - layer, adam = paddle.amp.decorate(models=layer, - optimizers=adam, - save_dtype='float32') + layer, adam = paddle.amp.decorate( + models=layer, optimizers=adam, save_dtype='float32' + ) dataset = RandomDataset(BATCH_NUM * BATCH_SIZE) - loader = paddle.io.DataLoader(dataset, - batch_size=BATCH_SIZE, - shuffle=True, - drop_last=True, - num_workers=2) + loader = paddle.io.DataLoader( + dataset, + batch_size=BATCH_SIZE, + shuffle=True, + drop_last=True, + num_workers=2, + ) train(layer, loader, loss_fn, adam) # save path = os.path.join(self.temp_dir.name, 'example_model/linear') - paddle.jit.save(layer, - path, - input_spec=[InputSpec(shape=[IMAGE_SIZE], name='x')]) + paddle.jit.save( + layer, path, input_spec=[InputSpec(shape=[IMAGE_SIZE], name='x')] + ) # jit.load loaded_layer = paddle.jit.load(path) @@ -957,12 +1017,17 @@ class TestPureFp16InferenceSaveLoad(unittest.TestCase): # load_inference_model paddle.enable_static() exe = paddle.static.Executor() - [inference_program, feed_target_names, - fetch_targets] = (paddle.static.load_inference_model(path, exe)) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = paddle.static.load_inference_model(path, exe) tensor_img = x - results = exe.run(inference_program, - feed={feed_target_names[0]: tensor_img}, - fetch_list=fetch_targets) + results = exe.run( + inference_program, + feed={feed_target_names[0]: tensor_img}, + fetch_list=fetch_targets, + ) print("pred.numpy()", pred.numpy()) print("result", results[0]) np.testing.assert_array_equal(pred.numpy(), results[0]) @@ -977,11 +1042,13 @@ class TestResnet2(unittest.TestCase): Use paddle-2.0 API """ - def train_resnet(self, - enable_amp=True, - level='O1', - use_data_loader=False, - use_param_group=False): + def train_resnet( + self, + enable_amp=True, + level='O1', + use_data_loader=False, + use_param_group=False, + ): seed = 90 batch_size = train_parameters["batch_size"] @@ -1005,43 +1072,42 @@ class TestResnet2(unittest.TestCase): # NOTE(zhiqiu): The Membership test operations(in / not in) calls "is" and "equal", # see details: https://docs.python.org/3/reference/expressions.html#membership-test-operations. # So do not use other_params = [p for p in resnet.parameters() if p not in conv_params] - optimizer = paddle.optimizer.Momentum(parameters=[{ - 'params': - conv_params, - 'learning_rate': - 0.01 - }, { - 'params': - other_params, - 'learning_rate': - 0.001 - }], - multi_precision=True) + optimizer = paddle.optimizer.Momentum( + parameters=[ + {'params': conv_params, 'learning_rate': 0.01}, + {'params': other_params, 'learning_rate': 0.001}, + ], + multi_precision=True, + ) else: optimizer = paddle.optimizer.SGD(parameters=resnet.parameters()) np.random.seed(seed) train_reader = paddle.batch( - paddle.dataset.flowers.train(use_xmap=False), batch_size=batch_size) + paddle.dataset.flowers.train(use_xmap=False), batch_size=batch_size + ) dy_param_init_value = {} for param in resnet.parameters(): dy_param_init_value[param.name] = param.numpy() program = None - scaler = paddle.amp.GradScaler(enable=enable_amp, - init_loss_scaling=2.**10) + scaler = paddle.amp.GradScaler( + enable=enable_amp, init_loss_scaling=2.0**10 + ) if use_data_loader: - train_reader = paddle.batch(reader_decorator( - paddle.dataset.flowers.train(use_xmap=False)), - batch_size=batch_size, - drop_last=True) + train_reader = paddle.batch( + reader_decorator(paddle.dataset.flowers.train(use_xmap=False)), + batch_size=batch_size, + drop_last=True, + ) train_loader = fluid.io.DataLoader.from_generator( capacity=4, use_double_buffer=True, iterable=True, - return_list=True) + return_list=True, + ) train_loader.set_sample_list_generator(train_reader) train_reader = train_loader @@ -1054,13 +1120,19 @@ class TestResnet2(unittest.TestCase): if use_data_loader: img, label = data else: - dy_x_data = np.array([x[0].reshape(3, 224, 224) - for x in data]).astype('float32') - if len(np.array([x[1] - for x in data]).astype('int64')) != batch_size: + dy_x_data = np.array( + [x[0].reshape(3, 224, 224) for x in data] + ).astype('float32') + if ( + len(np.array([x[1] for x in data]).astype('int64')) + != batch_size + ): continue - y_data = np.array([x[1] for x in data - ]).astype('int64').reshape(-1, 1) + y_data = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape(-1, 1) + ) img = paddle.to_tensor(dy_x_data) label = paddle.to_tensor(y_data) @@ -1085,8 +1157,9 @@ class TestResnet2(unittest.TestCase): for param in resnet.parameters(): if param.trainable: np_array = np.array(param._grad_ivar().value().get_tensor()) - dy_grad_value[param.name + - fluid.core.grad_var_suffix()] = np_array + dy_grad_value[ + param.name + fluid.core.grad_var_suffix() + ] = np_array resnet.clear_gradients() @@ -1098,70 +1171,65 @@ class TestResnet2(unittest.TestCase): return dy_out, dy_param_value, dy_grad_value def test_resnet(self): - def func_isinstance(): with fluid.dygraph.guard(): out_fp32 = self.train_resnet(enable_amp=False) out_amp = self.train_resnet(enable_amp=True) out_pure_fp16 = self.train_resnet(enable_amp=True, level='O2') print(out_fp32[0], out_amp[0], out_pure_fp16[0]) - np.testing.assert_allclose(out_fp32[0], - out_amp[0], - rtol=1e-05, - atol=1e-05) - np.testing.assert_allclose(out_fp32[0], - out_pure_fp16[0], - rtol=1e-05, - atol=0.01) + np.testing.assert_allclose( + out_fp32[0], out_amp[0], rtol=1e-05, atol=1e-05 + ) + np.testing.assert_allclose( + out_fp32[0], out_pure_fp16[0], rtol=1e-05, atol=0.01 + ) func_isinstance() def test_with_data_loader(self): - def func_isinstance(): with fluid.dygraph.guard(): - out_fp32 = self.train_resnet(enable_amp=False, - use_data_loader=True) - out_amp = self.train_resnet(enable_amp=True, - use_data_loader=True) - out_pure_fp16 = self.train_resnet(enable_amp=True, - use_data_loader=True, - level='O2') + out_fp32 = self.train_resnet( + enable_amp=False, use_data_loader=True + ) + out_amp = self.train_resnet( + enable_amp=True, use_data_loader=True + ) + out_pure_fp16 = self.train_resnet( + enable_amp=True, use_data_loader=True, level='O2' + ) print(out_fp32[0], out_amp[0], out_pure_fp16[0]) - np.testing.assert_allclose(out_fp32[0], - out_amp[0], - rtol=1e-05, - atol=1e-05) - np.testing.assert_allclose(out_fp32[0], - out_pure_fp16[0], - rtol=1e-05, - atol=0.01) + np.testing.assert_allclose( + out_fp32[0], out_amp[0], rtol=1e-05, atol=1e-05 + ) + np.testing.assert_allclose( + out_fp32[0], out_pure_fp16[0], rtol=1e-05, atol=0.01 + ) func_isinstance() def test_param_group(self): - def func_isinstance(): with fluid.dygraph.guard(): - out_fp32 = self.train_resnet(enable_amp=False, - use_data_loader=True, - use_param_group=True) - out_amp = self.train_resnet(enable_amp=True, - use_data_loader=True, - use_param_group=True) - out_pure_fp16 = self.train_resnet(enable_amp=True, - use_data_loader=True, - use_param_group=True, - level='O2') + out_fp32 = self.train_resnet( + enable_amp=False, use_data_loader=True, use_param_group=True + ) + out_amp = self.train_resnet( + enable_amp=True, use_data_loader=True, use_param_group=True + ) + out_pure_fp16 = self.train_resnet( + enable_amp=True, + use_data_loader=True, + use_param_group=True, + level='O2', + ) print(out_fp32[0], out_amp[0], out_pure_fp16[0]) - np.testing.assert_allclose(out_fp32[0], - out_amp[0], - rtol=1e-05, - atol=1e-05) - np.testing.assert_allclose(out_fp32[0], - out_pure_fp16[0], - rtol=1e-05, - atol=0.01) + np.testing.assert_allclose( + out_fp32[0], out_amp[0], rtol=1e-05, atol=1e-05 + ) + np.testing.assert_allclose( + out_fp32[0], out_pure_fp16[0], rtol=1e-05, atol=0.01 + ) func_isinstance() @@ -1182,42 +1250,54 @@ class TestResnet(unittest.TestCase): paddle.framework.random._manual_program_seed(seed) resnet = ResNet(use_cudnn=True) - optimizer = optimizer_setting(train_parameters, - parameter_list=resnet.parameters()) + optimizer = optimizer_setting( + train_parameters, parameter_list=resnet.parameters() + ) optimizer = paddle.optimizer.Momentum( - parameters=resnet.parameters(), multi_precision=True) + parameters=resnet.parameters(), multi_precision=True + ) np.random.seed(seed) train_reader = paddle.batch( paddle.dataset.flowers.train(use_xmap=False), - batch_size=batch_size) + batch_size=batch_size, + ) dy_param_init_value = {} for param in resnet.parameters(): dy_param_init_value[param.name] = param.numpy() program = None - scaler = paddle.fluid.dygraph.AmpScaler(enable=enable_amp, - init_loss_scaling=2.**10) + scaler = paddle.fluid.dygraph.AmpScaler( + enable=enable_amp, init_loss_scaling=2.0**10 + ) if enable_amp and (level == 'O2'): resnet, optimizer = paddle.fluid.dygraph.amp_decorate( - models=resnet, optimizers=optimizer, level='O2') + models=resnet, optimizers=optimizer, level='O2' + ) for batch_id, data in enumerate(train_reader()): if batch_id >= batch_num: break - dy_x_data = np.array([x[0].reshape(3, 224, 224) - for x in data]).astype('float32') - if len(np.array([x[1] - for x in data]).astype('int64')) != batch_size: + dy_x_data = np.array( + [x[0].reshape(3, 224, 224) for x in data] + ).astype('float32') + if ( + len(np.array([x[1] for x in data]).astype('int64')) + != batch_size + ): continue - y_data = np.array([x[1] for x in data - ]).astype('int64').reshape(-1, 1) + y_data = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape(-1, 1) + ) img = fluid.dygraph.to_variable(dy_x_data) label = fluid.dygraph.to_variable(y_data) label.stop_gradient = True - with paddle.fluid.dygraph.amp_guard(enable=enable_amp, - level=level): + with paddle.fluid.dygraph.amp_guard( + enable=enable_amp, level=level + ): out = resnet(img) loss = fluid.layers.cross_entropy(input=out, label=label) @@ -1234,9 +1314,11 @@ class TestResnet(unittest.TestCase): for param in resnet.parameters(): if param.trainable: np_array = np.array( - param._grad_ivar().value().get_tensor()) - dy_grad_value[param.name + - fluid.core.grad_var_suffix()] = np_array + param._grad_ivar().value().get_tensor() + ) + dy_grad_value[ + param.name + fluid.core.grad_var_suffix() + ] = np_array resnet.clear_gradients() @@ -1247,31 +1329,27 @@ class TestResnet(unittest.TestCase): return dy_out, dy_param_value, dy_grad_value def test_resnet(self): - def func_isinstance(): out_fp32 = self.train_resnet(enable_amp=False) out_amp = self.train_resnet(enable_amp=True) out_pure_fp16 = self.train_resnet(enable_amp=True, level='O2') print(out_fp32[0], out_amp[0], out_pure_fp16[0]) - np.testing.assert_allclose(out_fp32[0], - out_amp[0], - rtol=1e-05, - atol=0.01) - np.testing.assert_allclose(out_fp32[0], - out_pure_fp16[0], - rtol=1e-05, - atol=0.1) + np.testing.assert_allclose( + out_fp32[0], out_amp[0], rtol=1e-05, atol=0.01 + ) + np.testing.assert_allclose( + out_fp32[0], out_pure_fp16[0], rtol=1e-05, atol=0.1 + ) func_isinstance() class TestLayerNormFp16(unittest.TestCase): - r''' layer_norm and batch_norm support mixed inputs, i.e., only input x is fp16 + r'''layer_norm and batch_norm support mixed inputs, i.e., only input x is fp16 and other params are fp32. ''' def test_layer_norm_fp16(self): - def func_isinstance(): if fluid.is_compiled_with_cuda(): with fluid.dygraph.guard(fluid.CUDAPlace(0)): @@ -1281,7 +1359,8 @@ class TestLayerNormFp16(unittest.TestCase): out = layer_norm(x) self.assertTrue( - out.dtype == fluid.core.VarDesc.VarType.FP16) + out.dtype == fluid.core.VarDesc.VarType.FP16 + ) func_isinstance() @@ -1293,45 +1372,41 @@ class TestBf16(unittest.TestCase): def train(self, enable_amp=True, amp_level='O1'): paddle.seed(100) - input = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.) + input = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1.0, max=1.0) conv = paddle.nn.Conv2D(4, 6, (3, 3)) if amp_level == 'O2': - conv = paddle.amp.decorate(models=conv, - level=amp_level, - dtype='bfloat16') - with paddle.amp.auto_cast(enable=enable_amp, - level=amp_level, - dtype='bfloat16'): + conv = paddle.amp.decorate( + models=conv, level=amp_level, dtype='bfloat16' + ) + with paddle.amp.auto_cast( + enable=enable_amp, level=amp_level, dtype='bfloat16' + ): output = conv(input) output = output.cast('float32') return output.numpy() def test_bf16(self): - def func_isinstance(): - if fluid.core.is_compiled_with_cuda( - ) and fluid.core.is_bfloat16_supported(paddle.CUDAPlace(0)): + if ( + fluid.core.is_compiled_with_cuda() + and fluid.core.is_bfloat16_supported(paddle.CUDAPlace(0)) + ): out_fp32 = self.train(enable_amp=False) out_bf16_O1 = self.train(enable_amp=True, amp_level='O1') out_bf16_O2 = self.train(enable_amp=True, amp_level='O2') - np.testing.assert_allclose(out_fp32, - out_bf16_O1, - rtol=0.001, - atol=0.1) - np.testing.assert_allclose(out_fp32, - out_bf16_O2, - rtol=0.001, - atol=0.1) + np.testing.assert_allclose( + out_fp32, out_bf16_O1, rtol=0.001, atol=0.1 + ) + np.testing.assert_allclose( + out_fp32, out_bf16_O2, rtol=0.001, atol=0.1 + ) func_isinstance() class TestAmpWithPyLyer(unittest.TestCase): - def test_pylayer(self): - class MyMM(PyLayer): - @staticmethod def forward(ctx, a, b): ctx.save_for_backward(a, b) @@ -1356,9 +1431,7 @@ class TestAmpWithPyLyer(unittest.TestCase): class TestAmpWithHook(unittest.TestCase): - def test_hook_change_dtype(self): - def func_isinstance(): with paddle.fluid.dygraph.guard(): v = paddle.rand([3, 3]) @@ -1379,7 +1452,6 @@ class TestAmpWithHook(unittest.TestCase): func_isinstance() def test_hook_change_place(self): - def func_isinstance(): with paddle.fluid.dygraph.guard(): v = paddle.rand([3, 3]) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_ir_pass_pipeline.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_ir_pass_pipeline.py index 1b445f8f9873f33d9cd8e44a8e8b0df4bf8ff243..7d11c03a1f177b493192a7dbc151d88250c62931 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_ir_pass_pipeline.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_ir_pass_pipeline.py @@ -17,7 +17,6 @@ import test_pipeline class TestPipelineWithIRPass(test_pipeline.TestPipeline): - def need_envs(self): return {'FLAGS_apply_pass_to_program': '1'} diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_mixed_precision.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_mixed_precision.py index 37fcf7c6509ba43eeac5662514f110b7df08aadc..46f12447b1d8538bea388babfc41d8fa091a861a 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_mixed_precision.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_mixed_precision.py @@ -22,7 +22,6 @@ paddle.enable_static() class SimpleNet(nn.Layer): - def __init__(self, input_size, output_size): super(SimpleNet, self).__init__() self.linear1 = nn.Linear(input_size, output_size) @@ -36,16 +35,15 @@ class SimpleNet(nn.Layer): x = self.linear1(x) # currently, paddle's relu may hide nan/inf, relu(nan) = 0, relu(inf)= inf # so, do not use it here. - #x = self.relu1(x) + # x = self.relu1(x) x = self.linear2(x) - #x = self.relu2(x) + # x = self.relu2(x) x = self.linear3(x) return x class AMPTest(unittest.TestCase): - def setUp(self): self.place = paddle.CUDAPlace(0) @@ -61,10 +59,11 @@ class AMPTest(unittest.TestCase): loss = mse(out, label) opt = paddle.fluid.optimizer.Adam( - learning_rate=0.0001, parameter_list=model.parameters()) # 定义优化器 - opt = paddle.static.amp.decorate(opt, - init_loss_scaling=128.0, - use_dynamic_loss_scaling=True) + learning_rate=0.0001, parameter_list=model.parameters() + ) # 定义优化器 + opt = paddle.static.amp.decorate( + opt, init_loss_scaling=128.0, use_dynamic_loss_scaling=True + ) opt.minimize(loss) return model, loss, opt @@ -79,11 +78,17 @@ class AMPTest(unittest.TestCase): model, loss, opt = self.net() weight = model.linear1.weight moment1 = opt._optimizer._get_accumulator( - opt._optimizer._moment1_acc_str, weight) + opt._optimizer._moment1_acc_str, weight + ) beta_pow1 = opt._optimizer._get_accumulator( - opt._optimizer._beta1_pow_acc_str, weight) + opt._optimizer._beta1_pow_acc_str, weight + ) fetch_list = [ - loss, weight, moment1, beta_pow1, 'find_infinite_scale.tmp_0' + loss, + weight, + moment1, + beta_pow1, + 'find_infinite_scale.tmp_0', ] exe = paddle.static.Executor(self.place) @@ -98,20 +103,24 @@ class AMPTest(unittest.TestCase): ] weight_, moment1_, beta_pow1_ = exe.run( - startup_prog, fetch_list=[weight, moment1, beta_pow1]) - pre_weight_, pre_moment1_, pre_beta_pow1_ = weight_, moment1_, beta_pow1_ + startup_prog, fetch_list=[weight, moment1, beta_pow1] + ) + pre_weight_, pre_moment1_, pre_beta_pow1_ = ( + weight_, + moment1_, + beta_pow1_, + ) for i in range(nums_batch): if i % 2: train_data[i][10] = np.inf loss_, weight_, moment1_, beta_pow1_, found_inf = exe.run( main_prog, - feed={ - "X": train_data[i], - "Y": labels[i] - }, - fetch_list=fetch_list) - print(loss_, weight_[0][0], moment1_[0][0], beta_pow1_, - found_inf) + feed={"X": train_data[i], "Y": labels[i]}, + fetch_list=fetch_list, + ) + print( + loss_, weight_[0][0], moment1_[0][0], beta_pow1_, found_inf + ) if i % 2: self.assertTrue(found_inf) np.testing.assert_array_equal(weight_, pre_weight_) @@ -122,7 +131,11 @@ class AMPTest(unittest.TestCase): self.assertFalse(np.array_equal(weight_, pre_weight_)) self.assertFalse(np.array_equal(moment1_, pre_moment1_)) self.assertFalse(np.array_equal(beta_pow1_, pre_beta_pow1_)) - pre_weight_, pre_moment1_, pre_beta_pow1_ = weight_, moment1_, beta_pow1_ + pre_weight_, pre_moment1_, pre_beta_pow1_ = ( + weight_, + moment1_, + beta_pow1_, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_class_center_sample.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_class_center_sample.py index 0d22628c8aae5729b8dbfc59450fa22167e62203..a583afda701d74084486bd8181705656060694db 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_class_center_sample.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_class_center_sample.py @@ -19,7 +19,6 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus class TestParallelClassCenterSample(TestMultipleGpus): - def test_parallel_class_center_sample(self): self.run_mnist_2gpu('parallel_class_center_sample.py') self.run_mnist_2gpu('parallel_class_center_sample.py', eager_mode=False) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_control_flow.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_control_flow.py index c991818a5fbbde2dd58856749ab72d71b37473ef..9bbccc25e734307a98e6035371873405a3c00732 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_control_flow.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_control_flow.py @@ -22,7 +22,6 @@ flag_name = os.path.splitext(__file__)[0] class TestDygraphControlFlowSame(TestDistBase): - def _setup_config(self): self._sync_mode = False self._nccl2_mode = True @@ -31,14 +30,15 @@ class TestDygraphControlFlowSame(TestDistBase): def test_net(self): if fluid.core.is_compiled_with_cuda(): - self.check_with_place("parallel_dygraph_control_flow_same.py", - delta=1e-5, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "parallel_dygraph_control_flow_same.py", + delta=1e-5, + check_error_log=True, + log_name=flag_name, + ) class TestFleetDygraphControlFlowSame(TestDygraphControlFlowSame): - def _setup_config(self): self._sync_mode = False self._nccl2_mode = True @@ -48,7 +48,6 @@ class TestFleetDygraphControlFlowSame(TestDygraphControlFlowSame): class TestFleetDygraphControlFlowSameAccGrad(TestDygraphControlFlowSame): - def _setup_config(self): self._sync_mode = False self._nccl2_mode = True @@ -58,7 +57,6 @@ class TestFleetDygraphControlFlowSameAccGrad(TestDygraphControlFlowSame): class TestDygraphControlFlowDiff(TestDistBase): - def _setup_config(self): self._sync_mode = False self._nccl2_mode = True @@ -67,14 +65,15 @@ class TestDygraphControlFlowDiff(TestDistBase): def test_net(self): if fluid.core.is_compiled_with_cuda(): - self.check_with_place("parallel_dygraph_control_flow_different.py", - delta=1e-5, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "parallel_dygraph_control_flow_different.py", + delta=1e-5, + check_error_log=True, + log_name=flag_name, + ) class TestFleetDygraphControlFlowDiff(TestDygraphControlFlowDiff): - def _setup_config(self): self._sync_mode = False self._nccl2_mode = True @@ -84,7 +83,6 @@ class TestFleetDygraphControlFlowDiff(TestDygraphControlFlowDiff): class TestFleetDygraphControlFlowDiffAccGrad(TestDygraphControlFlowDiff): - def _setup_config(self): self._sync_mode = False self._nccl2_mode = True diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_mnist.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_mnist.py index 9d5fc508727122db739205a521bc046c241e9a35..a0933061bce8214698529dd33abc85cc0eb90b1c 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_mnist.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_mnist.py @@ -25,7 +25,6 @@ flag_name = os.path.splitext(__file__)[0] class TestParallelDygraphMnist(TestDistBase): - def _setup_config(self): self._sync_mode = False self._nccl2_mode = True @@ -38,14 +37,14 @@ class TestParallelDygraphMnist(TestDistBase): os.path.abspath("../../parallel_dygraph_mnist.py"), delta=1e-5, check_error_log=True, - log_name=flag_name) + log_name=flag_name, + ) -#TODO(liuyuhui): Multi-Card Baidu Kunlun XPU training exist accuracy problems -#it is difficult to find out immediately where the problem is, -#and we will work with frameworkers' help to fix it. +# TODO(liuyuhui): Multi-Card Baidu Kunlun XPU training exist accuracy problems +# it is difficult to find out immediately where the problem is, +# and we will work with frameworkers' help to fix it. class TestParallelDygraphMnistXPU(TestDistBase): - def _setup_config(self): self._sync_mode = False self._bkcl_mode = True @@ -58,18 +57,17 @@ class TestParallelDygraphMnistXPU(TestDistBase): os.path.abspath("../../parallel_dygraph_mnist.py"), delta=1e-4, check_error_log=True, - log_name=flag_name) + log_name=flag_name, + ) class TestParallelDygraphMnistSpawn(TestDistSpawnRunner): - def test_mnist_with_spawn(self): if fluid.core.is_compiled_with_cuda() and sys.version_info >= (3, 4): self.check_dist_result_with_spawn(test_class=TestMnist, delta=1e-5) class TestParallelDygraphMnistAccGrad(TestDistBase): - def _setup_config(self): self._sync_mode = False self._nccl2_mode = True @@ -84,11 +82,11 @@ class TestParallelDygraphMnistAccGrad(TestDistBase): os.path.abspath("../../parallel_dygraph_mnist.py"), delta=1e-5, check_error_log=True, - log_name=flag_name) + log_name=flag_name, + ) class TestFleetDygraphMnistXPU(TestDistBase): - def _setup_config(self): self._sync_mode = False self._bkcl_mode = True @@ -102,7 +100,8 @@ class TestFleetDygraphMnistXPU(TestDistBase): os.path.abspath("../../parallel_dygraph_mnist.py"), delta=1e-4, check_error_log=True, - log_name=flag_name) + log_name=flag_name, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_mp_layers.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_mp_layers.py index 21aa82db3dc99e7045d9191b371a8ef8985a7f04..aeee6a4667678fdc39070565cb441cb947b0958e 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_mp_layers.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_mp_layers.py @@ -19,7 +19,6 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus class TestModelParallelLayer(TestMultipleGpus): - def test_hybrid_parallel_mp_layer(self): self.run_mnist_2gpu('hybrid_parallel_mp_layers.py') self.run_mnist_2gpu('hybrid_parallel_mp_layers.py', eager_mode=False) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_no_sync.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_no_sync.py index 8a1c1e7a0c58dbca6cf3ed63297a1233d8a4ab88..c44af13dc4ea97fdc49607ba208813eb60429ea5 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_no_sync.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_no_sync.py @@ -27,7 +27,6 @@ flag_name = os.path.splitext(__file__)[0] class TestParallelDygraphNoSync(TestDistBase): - def _setup_config(self): self._sync_mode = False self._nccl2_mode = True @@ -36,14 +35,15 @@ class TestParallelDygraphNoSync(TestDistBase): def test_no_sync(self): if fluid.core.is_compiled_with_cuda(): - self.check_with_place("parallel_dygraph_no_sync.py", - delta=1e-5, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "parallel_dygraph_no_sync.py", + delta=1e-5, + check_error_log=True, + log_name=flag_name, + ) class TestParallelDygraphNoSyncUnusedParam(TestDistBase): - def _setup_config(self): self._sync_mode = False self._nccl2_mode = True @@ -52,14 +52,15 @@ class TestParallelDygraphNoSyncUnusedParam(TestDistBase): def test_no_sync_ununsed_param(self): if fluid.core.is_compiled_with_cuda(): - self.check_with_place("parallel_dygraph_no_sync_unused_params.py", - delta=1e-5, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "parallel_dygraph_no_sync_unused_params.py", + delta=1e-5, + check_error_log=True, + log_name=flag_name, + ) class TestParallelDygraphNoSyncControlFlow(TestDistBase): - def _setup_config(self): self._sync_mode = False self._nccl2_mode = True @@ -68,39 +69,40 @@ class TestParallelDygraphNoSyncControlFlow(TestDistBase): def test_no_sync_control_flow(self): if fluid.core.is_compiled_with_cuda(): - self.check_with_place("parallel_dygraph_no_sync_control_flow.py", - delta=1e-5, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "parallel_dygraph_no_sync_control_flow.py", + delta=1e-5, + check_error_log=True, + log_name=flag_name, + ) class TestParallelDygraphNoSyncSpawn(TestDistSpawnRunner): - def test_no_sync_with_spawn(self): if fluid.core.is_compiled_with_cuda() and sys.version_info >= (3, 4): self.check_dist_result_with_spawn(test_class=TestNoSync, delta=1e-5) class TestParallelDygraphNoSyncUnusedParamSpawn(TestDistSpawnRunner): - def _args_config(self, args): args.find_unused_parameters = True def test_no_sync_with_spawn(self): if fluid.core.is_compiled_with_cuda() and sys.version_info >= (3, 4): - self.check_dist_result_with_spawn(test_class=TestNoSyncUnusedParam, - delta=1e-5) + self.check_dist_result_with_spawn( + test_class=TestNoSyncUnusedParam, delta=1e-5 + ) class TestParallelDygraphNoSyncControlFlowSpawn(TestDistSpawnRunner): - def _args_config(self, args): args.find_unused_parameters = True def test_no_sync_with_spawn(self): if fluid.core.is_compiled_with_cuda() and sys.version_info >= (3, 4): - self.check_dist_result_with_spawn(test_class=TestNoSyncControlFlow, - delta=1e-5) + self.check_dist_result_with_spawn( + test_class=TestNoSyncControlFlow, delta=1e-5 + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_no_sync_gradient_check.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_no_sync_gradient_check.py index d170cd0ed54dfe97bc71444e015942895ec082df..e6467e07815a19ec68089c771ab296c911dc80aa 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_no_sync_gradient_check.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_no_sync_gradient_check.py @@ -19,11 +19,11 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus class TestDataParallelLayer(TestMultipleGpus): - def test_parallel_dygraph_dataparallel_no_sync(self): self.run_mnist_2gpu('parallel_dygraph_no_sync_gradient_check.py') - self.run_mnist_2gpu('parallel_dygraph_no_sync_gradient_check.py', - eager_mode=False) + self.run_mnist_2gpu( + 'parallel_dygraph_no_sync_gradient_check.py', eager_mode=False + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_pipeline_parallel.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_pipeline_parallel.py index ebd1096fdc296cf70e07ec23a783cca728ce8dde..f45104de32c8a233534917ce60f3af322037ab7d 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_pipeline_parallel.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_pipeline_parallel.py @@ -19,13 +19,14 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus class TestHybridPipeParallel(TestMultipleGpus): - def test_hybrid_parallel_pp_layer(self): self.run_mnist_2gpu( - os.path.abspath('../../hybrid_parallel_pp_layer.py')) + os.path.abspath('../../hybrid_parallel_pp_layer.py') + ) self.run_mnist_2gpu( os.path.abspath('../../hybrid_parallel_pp_layer.py'), - eager_mode=False) + eager_mode=False, + ) def test_hybrid_parallel_pp_tuple_inputs(self): self.run_mnist_2gpu('hybrid_parallel_pp_embedding.py') @@ -33,8 +34,9 @@ class TestHybridPipeParallel(TestMultipleGpus): def test_hybrid_parallel_shared_weight(self): self.run_mnist_2gpu('hybrid_parallel_shared_weight.py') - self.run_mnist_2gpu('hybrid_parallel_shared_weight.py', - eager_mode=False) + self.run_mnist_2gpu( + 'hybrid_parallel_shared_weight.py', eager_mode=False + ) def test_pipeline_parallel_amp(self): self.run_mnist_2gpu('hybrid_parallel_pp_amp.py') @@ -46,8 +48,9 @@ class TestHybridPipeParallel(TestMultipleGpus): def test_hybrid_parallel_transformer(self): self.run_mnist_2gpu('hybrid_parallel_pp_transformer.py') - self.run_mnist_2gpu('hybrid_parallel_pp_transformer.py', - eager_mode=False) + self.run_mnist_2gpu( + 'hybrid_parallel_pp_transformer.py', eager_mode=False + ) def test_hybrid_parallel_save_load(self): self.run_mnist_2gpu('hybrid_parallel_pp_save_load.py') diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_pipeline_parallel_with_virtual_stage.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_pipeline_parallel_with_virtual_stage.py index 7af7d6dba1a07ef0cc10cdff3e1b6275bbf26e74..d09daa0764cb070649ddc71d50519de3c3efddc6 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_pipeline_parallel_with_virtual_stage.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_pipeline_parallel_with_virtual_stage.py @@ -19,17 +19,18 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus class TestHybridPipeParallelWithVirtualStage(TestMultipleGpus): - def test_hybrid_parallel_pp_layer_with_virtual_stage(self): self.run_mnist_2gpu('hybrid_parallel_pp_layer_with_virtual_stage.py') def test_hybrid_parallel_pp_transformer_with_virtual_stage(self): self.run_mnist_2gpu( - 'hybrid_parallel_pp_transformer_with_virtual_stage.py') + 'hybrid_parallel_pp_transformer_with_virtual_stage.py' + ) def test_hybrid_parallel_save_load_with_virtual_stage(self): self.run_mnist_2gpu( - 'hybrid_parallel_pp_save_load_with_virtual_stage.py') + 'hybrid_parallel_pp_save_load_with_virtual_stage.py' + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_qat.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_qat.py index 842f7166118346bb5999b3e5da20ac98b5eb330c..4ea7ebc9162b59695f200d1aec8934eb9a8e7699 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_qat.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_qat.py @@ -19,7 +19,12 @@ import copy import os import subprocess -from paddle.distributed.utils.launch_utils import find_free_ports, watch_local_trainers, get_cluster, TrainerProc +from paddle.distributed.utils.launch_utils import ( + find_free_ports, + watch_local_trainers, + get_cluster, + TrainerProc, +) def get_cluster_from_args(selected_gpus): @@ -47,17 +52,19 @@ def get_gpus(selected_gpus): return selected_gpus -def start_local_trainers(cluster, - pod, - training_script, - training_script_args, - eager_mode=True, - log_dir=None): +def start_local_trainers( + cluster, + pod, + training_script, + training_script_args, + eager_mode=True, + log_dir=None, +): current_env = copy.copy(os.environ.copy()) - #paddle broadcast ncclUniqueId use socket, and - #proxy maybe make trainers unreachable, so delete them. - #if we set them to "", grpc will log error message "bad uri" - #so just delete them. + # paddle broadcast ncclUniqueId use socket, and + # proxy maybe make trainers unreachable, so delete them. + # if we set them to "", grpc will log error message "bad uri" + # so just delete them. current_env.pop("http_proxy", None) current_env.pop("https_proxy", None) @@ -68,7 +75,7 @@ def start_local_trainers(cluster, "PADDLE_TRAINER_ID": "%d" % t.rank, "PADDLE_CURRENT_ENDPOINT": "%s" % t.endpoint, "PADDLE_TRAINERS_NUM": "%d" % cluster.trainers_nranks(), - "PADDLE_TRAINER_ENDPOINTS": ",".join(cluster.trainers_endpoints()) + "PADDLE_TRAINER_ENDPOINTS": ",".join(cluster.trainers_endpoints()), } if not eager_mode: @@ -101,10 +108,11 @@ def start_local_trainers(cluster, class TestMultipleGpus(unittest.TestCase): - def run_2gpu(self, target_file_name, eager_mode=True): - if not fluid.core.is_compiled_with_cuda( - ) or fluid.core.get_cuda_device_count() == 0: + if ( + not fluid.core.is_compiled_with_cuda() + or fluid.core.get_cuda_device_count() == 0 + ): return selected_gpus = get_gpus('0,1') @@ -113,11 +121,13 @@ class TestMultipleGpus(unittest.TestCase): cluster, pod = get_cluster_from_args(selected_gpus) - procs = start_local_trainers(cluster, - pod, - eager_mode=eager_mode, - training_script=target_file_name, - training_script_args=[]) + procs = start_local_trainers( + cluster, + pod, + eager_mode=eager_mode, + training_script=target_file_name, + training_script_args=[], + ) while True: alive = watch_local_trainers(procs, cluster.trainers_endpoints()) @@ -129,7 +139,6 @@ class TestMultipleGpus(unittest.TestCase): class TestDataParallelQAT(TestMultipleGpus): - def test_multiple_gpus_qat(self): self.run_2gpu('hybrid_parallel_qat.py') diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_se_resnext.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_se_resnext.py index 0bdbb20787e2d5bbc174bcfcf5fdadfbbac19800..68fb9468f4baa82e05d0a9b81ee59b2eddd0066d 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_se_resnext.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_se_resnext.py @@ -25,7 +25,6 @@ flag_name = os.path.splitext(__file__)[0] class TestParallelDygraphSeResNeXt(TestDistBase): - def _setup_config(self): self._sync_mode = False self._nccl2_mode = True @@ -33,18 +32,20 @@ class TestParallelDygraphSeResNeXt(TestDistBase): def test_se_resnext(self): if fluid.core.is_compiled_with_cuda(): - self.check_with_place("parallel_dygraph_se_resnext.py", - delta=0.01, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "parallel_dygraph_se_resnext.py", + delta=0.01, + check_error_log=True, + log_name=flag_name, + ) class TestParallelDygraphSeResNeXtSpawn(TestDistSpawnRunner): - def test_se_resnext_with_spawn(self): if fluid.core.is_compiled_with_cuda() and sys.version_info >= (3, 4): - self.check_dist_result_with_spawn(test_class=TestSeResNeXt, - delta=0.01) + self.check_dist_result_with_spawn( + test_class=TestSeResNeXt, delta=0.01 + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_sharding_parallel.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_sharding_parallel.py index f6c6e3378e43c377e1d350d58ebd637e50f3ce83..d0b17e86257704c914a00e16470fca9b7250f0d7 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_sharding_parallel.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_sharding_parallel.py @@ -23,8 +23,9 @@ class TestHybridParallel(TestMultipleGpus): # check sharding logic as well as the accuracy with single mode def test_hybrid_parallel_sharding_logic(self): self.run_mnist_2gpu('hybrid_parallel_sharding_model.py') - self.run_mnist_2gpu('hybrid_parallel_sharding_model.py', - eager_mode=False) + self.run_mnist_2gpu( + 'hybrid_parallel_sharding_model.py', eager_mode=False + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_sparse_embedding.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_sparse_embedding.py index 2f8ea8feddd2e4a7435aed4f7b98efd84836aca8..b1d304350cf6493d6b46b705d9e68a173f3cefce 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_sparse_embedding.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_sparse_embedding.py @@ -25,7 +25,6 @@ flag_name = os.path.splitext(__file__)[0] class TestParallelDygraphSparseEmdedding(TestDistBase): - def _setup_config(self): self._sync_mode = False self._nccl2_mode = True @@ -37,11 +36,11 @@ class TestParallelDygraphSparseEmdedding(TestDistBase): os.path.abspath("../../parallel_dygraph_sparse_embedding.py"), delta=1e-5, check_error_log=True, - log_name=flag_name) + log_name=flag_name, + ) class TestParallelDygraphSparseEmdeddingFP64(TestDistBase): - def _setup_config(self): self._sync_mode = False self._nccl2_mode = True @@ -49,19 +48,22 @@ class TestParallelDygraphSparseEmdeddingFP64(TestDistBase): def test_sparse_embedding_fp64(self): if fluid.core.is_compiled_with_cuda(): - self.check_with_place(os.path.abspath( - "../../parallel_dygraph_sparse_embedding_fp64.py"), - delta=1e-5, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + os.path.abspath( + "../../parallel_dygraph_sparse_embedding_fp64.py" + ), + delta=1e-5, + check_error_log=True, + log_name=flag_name, + ) class TestParallelDygraphSparseEmdeddingSpawn(TestDistSpawnRunner): - def test_sparse_embedding_with_spawn(self): if fluid.core.is_compiled_with_cuda() and sys.version_info >= (3, 4): - self.check_dist_result_with_spawn(test_class=TestSparseEmbedding, - delta=1e-5) + self.check_dist_result_with_spawn( + test_class=TestSparseEmbedding, delta=1e-5 + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_sparse_embedding_over_height.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_sparse_embedding_over_height.py index b082da1b910d9ac45fd068f5a720e795f8f37534..b82bce4e1e19a91a7dbc0070f8da3b0f52e58679 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_sparse_embedding_over_height.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_sparse_embedding_over_height.py @@ -19,13 +19,14 @@ import unittest import paddle.fluid as fluid from test_dist_base import TestDistBase from spawn_runner_base import TestDistSpawnRunner -from parallel_dygraph_sparse_embedding_over_height import TestSparseEmbeddingOverHeight +from parallel_dygraph_sparse_embedding_over_height import ( + TestSparseEmbeddingOverHeight, +) flag_name = os.path.splitext(__file__)[0] class TestParallelDygraphSparseEmdeddingOverHeight(TestDistBase): - def _setup_config(self): self._sync_mode = False self._nccl2_mode = True @@ -33,19 +34,22 @@ class TestParallelDygraphSparseEmdeddingOverHeight(TestDistBase): def test_sparse_embedding(self): if fluid.core.is_compiled_with_cuda(): - self.check_with_place(os.path.abspath( - "../../parallel_dygraph_sparse_embedding_over_height.py"), - delta=1e-5, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + os.path.abspath( + "../../parallel_dygraph_sparse_embedding_over_height.py" + ), + delta=1e-5, + check_error_log=True, + log_name=flag_name, + ) class TestParallelDygraphSparseEmdeddingOverHeightSpawn(TestDistSpawnRunner): - def test_sparse_embedding_with_spawn(self): if fluid.core.is_compiled_with_cuda() and sys.version_info >= (3, 4): self.check_dist_result_with_spawn( - test_class=TestSparseEmbeddingOverHeight, delta=1e-5) + test_class=TestSparseEmbeddingOverHeight, delta=1e-5 + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_sync_batch_norm.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_sync_batch_norm.py index 26e62bc75ec94d1285b2a8bee053b43af41b434f..1dd0ae278495bbd2d5ed2f55e38222cd329ea141 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_sync_batch_norm.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_sync_batch_norm.py @@ -22,7 +22,6 @@ flag_name = os.path.splitext(__file__)[0] class TestParallelDygraphMnist(TestDistBase): - def _setup_config(self): self._sync_mode = False self._nccl2_mode = True @@ -30,10 +29,12 @@ class TestParallelDygraphMnist(TestDistBase): def test_mnist(self): if fluid.core.is_compiled_with_cuda(): - self.check_with_place("parallel_dygraph_sync_batch_norm.py", - delta=1e-5, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "parallel_dygraph_sync_batch_norm.py", + delta=1e-5, + check_error_log=True, + log_name=flag_name, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_tensor_parallel.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_tensor_parallel.py index c2b1b17fc52cf643f41828560db9063f41ae50e6..c01d6ed6f080d9bcd0519be11ba0006c8dbdb3a0 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_tensor_parallel.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_tensor_parallel.py @@ -19,7 +19,6 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus class TestHybridParallel(TestMultipleGpus): - def test_hybrid_parallel_mp_random(self): self.run_mnist_2gpu('hybrid_parallel_mp_random.py') self.run_mnist_2gpu('hybrid_parallel_mp_random.py', eager_mode=False) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_transformer.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_transformer.py index 8d12dfdce5891b6f19dcfa06b7804521fc773be2..58ad40894fefcea22773dee097103775292d6828 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_transformer.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_transformer.py @@ -22,7 +22,6 @@ flag_name = os.path.splitext(__file__)[0] class TestParallelDygraphTransformer(TestDistBase): - def _setup_config(self): self._sync_mode = False self._nccl2_mode = True @@ -30,14 +29,15 @@ class TestParallelDygraphTransformer(TestDistBase): def test_transformer(self): if fluid.core.is_compiled_with_cuda(): - self.check_with_place("parallel_dygraph_transformer.py", - delta=1e-5, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "parallel_dygraph_transformer.py", + delta=1e-5, + check_error_log=True, + log_name=flag_name, + ) class TestParallelDygraphTransformerAccGrad(TestDistBase): - def _setup_config(self): self._sync_mode = False self._nccl2_mode = True @@ -47,10 +47,12 @@ class TestParallelDygraphTransformerAccGrad(TestDistBase): def test_transformer(self): if fluid.core.is_compiled_with_cuda(): - self.check_with_place("parallel_dygraph_transformer.py", - delta=1e-5, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "parallel_dygraph_transformer.py", + delta=1e-5, + check_error_log=True, + log_name=flag_name, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_unused_variables.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_unused_variables.py index 0a740b13c4eddb73c1fd30d81162b222cdc0b11a..a36eae2ccda01fc98b1737cea3e28ff7046df0b3 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_unused_variables.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_dygraph_unused_variables.py @@ -25,7 +25,6 @@ flag_name = os.path.splitext(__file__)[0] class TestParallelDygraphUnusedVar(TestDistBase): - def _setup_config(self): self._sync_mode = False self._nccl2_mode = True @@ -37,11 +36,11 @@ class TestParallelDygraphUnusedVar(TestDistBase): os.path.abspath("../../parallel_dygraph_unused_variables.py"), delta=1e-5, check_error_log=True, - log_name=flag_name) + log_name=flag_name, + ) class TestFleetDygraphUnusedVar(TestParallelDygraphUnusedVar): - def _setup_config(self): self._sync_mode = False self._nccl2_mode = True @@ -50,15 +49,14 @@ class TestFleetDygraphUnusedVar(TestParallelDygraphUnusedVar): class TestSparseEmbeddingUnusedVarsSpawn(TestDistSpawnRunner): - def test_mnist_with_spawn(self): if fluid.core.is_compiled_with_cuda() and sys.version_info >= (3, 4): self.check_dist_result_with_spawn( - test_class=TestSparseEmbeddingUnusedVars, delta=1e-5) + test_class=TestSparseEmbeddingUnusedVars, delta=1e-5 + ) class TestParallelDygraphNoVar(TestDistBase): - def _setup_config(self): self._sync_mode = False self._nccl2_mode = True @@ -70,11 +68,11 @@ class TestParallelDygraphNoVar(TestDistBase): os.path.abspath("../../parallel_dygraph_none_var.py"), delta=1e-5, check_error_log=True, - log_name=flag_name) + log_name=flag_name, + ) class TestParallelDygraphSharedUnusedVariables(TestDistBase): - def _setup_config(self): self._sync_mode = False self._nccl2_mode = True @@ -86,7 +84,8 @@ class TestParallelDygraphSharedUnusedVariables(TestDistBase): os.path.abspath("../../parallel_dygraph_shared_unused_var.py"), delta=1e-5, check_error_log=True, - log_name=flag_name) + log_name=flag_name, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_margin_cross_entropy.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_margin_cross_entropy.py index b983be50f7309e9f4abc5810c48224836de30682..b4f7cfb39d8387a1feea4a75eaa3015af343d84a 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_margin_cross_entropy.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_parallel_margin_cross_entropy.py @@ -19,11 +19,11 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus class TestParallelMarginSoftmaxWithCrossEntropy(TestMultipleGpus): - def test_parallel_margin_cross_entropy(self): self.run_mnist_2gpu('parallel_margin_cross_entropy.py') - self.run_mnist_2gpu('parallel_margin_cross_entropy.py', - eager_mode=False) + self.run_mnist_2gpu( + 'parallel_margin_cross_entropy.py', eager_mode=False + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_pipeline.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_pipeline.py index e9fd934f2869dd04dec1a7ce89e9f8282f54bea6..a495ac13c77f08736a3d29c22227918d46809bd8 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_pipeline.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_pipeline.py @@ -24,7 +24,6 @@ flag_name = os.path.splitext(__file__)[0] class TestPipeline(TestDistBase): - def _setup_config(self): self._sync_mode = True self._use_reduce = False @@ -41,19 +40,23 @@ class TestPipeline(TestDistBase): # Now pipeline only gets the loss value of the last # microbatch, so it is not consistable with the # non-pipeline one. - self.check_with_place("pipeline_mnist.py", - delta=1e0, - check_error_log=True, - log_name=flag_name, - need_envs=self.need_envs()) + self.check_with_place( + "pipeline_mnist.py", + delta=1e0, + check_error_log=True, + log_name=flag_name, + need_envs=self.need_envs(), + ) def test_dist_train_multi_device(self): if fluid.core.is_compiled_with_cuda(): - self.check_with_place("pipeline_mnist_multi_device.py", - check_error_log=True, - delta=1e0, - log_name=flag_name, - need_envs=self.need_envs()) + self.check_with_place( + "pipeline_mnist_multi_device.py", + check_error_log=True, + delta=1e0, + log_name=flag_name, + need_envs=self.need_envs(), + ) def test_dist_train_one_device(self): if fluid.core.is_compiled_with_cuda(): @@ -61,7 +64,8 @@ class TestPipeline(TestDistBase): "pipeline_mnist_one_device.py", check_error_log=True, log_name=flag_name, - need_envs={"PADDLE_MANUAL_PIPELINE_STAGE": "0"}) + need_envs={"PADDLE_MANUAL_PIPELINE_STAGE": "0"}, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_recv_save_op.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_recv_save_op.py index 927b02d74eb3ef4c6409af27b2db3fbf7a5db882..c75aadd450a7518c049f747aea61d42c853c17c9 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_recv_save_op.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_recv_save_op.py @@ -46,20 +46,18 @@ def run_pserver(pserver_id): param.set(param_array, place) optimize_block = program._create_block(program.global_block().idx) - program.global_block().append_op(type="listen_and_serv", - inputs={'X': []}, - outputs={}, - attrs={ - "optimize_blocks": - [optimize_block], - "endpoint": - '127.0.0.1:0', - "Fanin": - 1, - "distributed_mode": - DistributedMode.SYNC, - "grad_to_block_id": [] - }) + program.global_block().append_op( + type="listen_and_serv", + inputs={'X': []}, + outputs={}, + attrs={ + "optimize_blocks": [optimize_block], + "endpoint": '127.0.0.1:0', + "Fanin": 1, + "distributed_mode": DistributedMode.SYNC, + "grad_to_block_id": [], + }, + ) exe = fluid.Executor(place) exe.run(program) @@ -67,12 +65,11 @@ def run_pserver(pserver_id): @unittest.skip("do not need currently") class TestListenAndServOp(unittest.TestCase): - def setUp(self): self.ps_timeout = 5 def _start_pserver(self, pserver_id, pserver_func): - p = Process(target=pserver_func, args=(pserver_id, )) + p = Process(target=pserver_func, args=(pserver_id,)) p.daemon = True p.start() return p @@ -104,15 +101,17 @@ class TestListenAndServOp(unittest.TestCase): emaps = ['127.0.0.1:' + str(port0), '127.0.0.1:' + str(port1)] # create and run recv and save operator - remote_recv_op = Operator("recv_save", - trainer_id=0, - shape=[10, 8], - slice_shapes=["5,8", "5,8"], - slice_varnames=["table", "table"], - remote_varnames=['table', 'table'], - is_sparse=False, - endpoints=emaps, - file_path=model_file) + remote_recv_op = Operator( + "recv_save", + trainer_id=0, + shape=[10, 8], + slice_shapes=["5,8", "5,8"], + slice_varnames=["table", "table"], + remote_varnames=['table', 'table'], + is_sparse=False, + endpoints=emaps, + file_path=model_file, + ) remote_recv_op.run(scope, place) @@ -125,44 +124,53 @@ class TestListenAndServOp(unittest.TestCase): type=fluid.core.VarDesc.VarType.LOD_TENSOR, shape=[10, 8], dtype="float32", - persistable=True) + persistable=True, + ) slice0 = load_block.create_var( name="var.slice0", type=fluid.core.VarDesc.VarType.LOD_TENSOR, shape=[3, 8], dtype="float32", - persistable=True) + persistable=True, + ) slice1 = load_block.create_var( name="var.slice1", type=fluid.core.VarDesc.VarType.LOD_TENSOR, shape=[5, 8], dtype="float32", - persistable=True) - - load_block.append_op(type='load', - inputs={}, - outputs={'Out': [origin]}, - attrs={'file_path': model_file}) - - load_block.append_op(type='load', - inputs={}, - outputs={'Out': [slice0]}, - attrs={ - 'file_path': model_file, - 'seek': 2 * 8, - 'shape': slice0.shape - }) - - load_block.append_op(type='load', - inputs={}, - outputs={'Out': [slice1]}, - attrs={ - 'file_path': model_file, - 'seek': 5 * 8, - 'shape': slice1.shape - }) + persistable=True, + ) + + load_block.append_op( + type='load', + inputs={}, + outputs={'Out': [origin]}, + attrs={'file_path': model_file}, + ) + + load_block.append_op( + type='load', + inputs={}, + outputs={'Out': [slice0]}, + attrs={ + 'file_path': model_file, + 'seek': 2 * 8, + 'shape': slice0.shape, + }, + ) + + load_block.append_op( + type='load', + inputs={}, + outputs={'Out': [slice1]}, + attrs={ + 'file_path': model_file, + 'seek': 5 * 8, + 'shape': slice1.shape, + }, + ) exe = fluid.Executor(place=fluid.CPUPlace()) exe.run(load_prog) @@ -179,8 +187,9 @@ class TestListenAndServOp(unittest.TestCase): np.testing.assert_equal(origin[5:10], slice1) def _save_by_io_persistables(self, place, port0, port1, dirname, var_name): - self._run_nce_op_two_pserver(place, port0, port1, - os.path.join(dirname, var_name)) + self._run_nce_op_two_pserver( + place, port0, port1, os.path.join(dirname, var_name) + ) def test_recv_save_op_remote(self): # run pserver on CPU in sync mode @@ -198,8 +207,9 @@ class TestListenAndServOp(unittest.TestCase): param_name = "table" for place in places: - self._save_by_io_persistables(place, port0, port1, param_dir, - param_name) + self._save_by_io_persistables( + place, port0, port1, param_dir, param_name + ) # raise SIGTERM to pserver os.kill(p0.pid, signal.SIGINT) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_rnn_dp.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_rnn_dp.py index 8976f7919d294e74c4e2d37b0c8f876afb4b34c5..9d116d74b257c89f836d4afd52691cd09c961737 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_rnn_dp.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_rnn_dp.py @@ -24,27 +24,30 @@ paddle.enable_static() class RNNEncoder(nn.Layer): - - def __init__(self, - input_size, - hidden_size, - num_layers=1, - direction="forward", - dropout=0.0, - pooling_type=None, - **kwargs): + def __init__( + self, + input_size, + hidden_size, + num_layers=1, + direction="forward", + dropout=0.0, + pooling_type=None, + **kwargs + ): super().__init__() self._input_size = input_size self._hidden_size = hidden_size self._direction = direction self._pooling_type = pooling_type - self.rnn_layer = nn.SimpleRNN(input_size=input_size, - hidden_size=hidden_size, - num_layers=num_layers, - direction=direction, - dropout=dropout, - **kwargs) + self.rnn_layer = nn.SimpleRNN( + input_size=input_size, + hidden_size=hidden_size, + num_layers=num_layers, + direction=direction, + dropout=dropout, + **kwargs + ) def get_input_dim(self): return self._input_size @@ -57,34 +60,40 @@ class RNNEncoder(nn.Layer): def forward(self, inputs, sequence_length): encoded_text, last_hidden = self.rnn_layer( - inputs, sequence_length=sequence_length) + inputs, sequence_length=sequence_length + ) output = paddle.max(encoded_text, axis=1) return output class RNNModel(nn.Layer): - - def __init__(self, - vocab_size, - num_classes, - emb_dim=128, - padding_idx=0, - rnn_hidden_size=198, - direction='forward', - rnn_layers=1, - dropout_rate=0.0, - pooling_type=None, - fc_hidden_size=96): + def __init__( + self, + vocab_size, + num_classes, + emb_dim=128, + padding_idx=0, + rnn_hidden_size=198, + direction='forward', + rnn_layers=1, + dropout_rate=0.0, + pooling_type=None, + fc_hidden_size=96, + ): super().__init__() - self.embedder = nn.Embedding(num_embeddings=vocab_size, - embedding_dim=emb_dim, - padding_idx=padding_idx) - self.rnn_encoder = RNNEncoder(emb_dim, - rnn_hidden_size, - num_layers=rnn_layers, - direction=direction, - dropout=dropout_rate, - pooling_type=pooling_type) + self.embedder = nn.Embedding( + num_embeddings=vocab_size, + embedding_dim=emb_dim, + padding_idx=padding_idx, + ) + self.rnn_encoder = RNNEncoder( + emb_dim, + rnn_hidden_size, + num_layers=rnn_layers, + direction=direction, + dropout=dropout_rate, + pooling_type=pooling_type, + ) self.fc = nn.Linear(self.rnn_encoder.get_output_dim(), fc_hidden_size) self.output_layer = nn.Linear(fc_hidden_size, num_classes) @@ -97,26 +106,30 @@ class RNNModel(nn.Layer): def rnn_pretrain_forward(train_program, start_program, topo=None): - with static.program_guard(train_program, - start_program), paddle.utils.unique_name.guard(): + with static.program_guard( + train_program, start_program + ), paddle.utils.unique_name.guard(): batch_size = 1 - tokens = static.data(name="tokens", - shape=[batch_size, -1], - dtype="int64") + tokens = static.data( + name="tokens", shape=[batch_size, -1], dtype="int64" + ) seq_len = static.data(name="ids", shape=[batch_size], dtype="int64") labels = static.data(name="labels", shape=[batch_size], dtype="int64") data_holders = [tokens, seq_len, labels] vocab_size = 10 num_classes = 2 pad_token_id = 0 - model = RNNModel(vocab_size, - num_classes, - direction='forward', - padding_idx=pad_token_id, - pooling_type='max') - - optimizer = paddle.optimizer.Adam(parameters=model.parameters(), - learning_rate=0.001) + model = RNNModel( + vocab_size, + num_classes, + direction='forward', + padding_idx=pad_token_id, + pooling_type='max', + ) + + optimizer = paddle.optimizer.Adam( + parameters=model.parameters(), learning_rate=0.001 + ) criterion = paddle.nn.CrossEntropyLoss() preds = model(tokens, seq_len) loss = criterion(preds, labels) @@ -125,23 +138,30 @@ def rnn_pretrain_forward(train_program, start_program, topo=None): class TestFleetMetaOptimizer(unittest.TestCase): - def setUp(self): os.environ["PADDLE_TRAINER_ID"] = "1" os.environ[ - "PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001,127.0.0.1:36002" + "PADDLE_TRAINER_ENDPOINTS" + ] = "127.0.0.1:36001,127.0.0.1:36002" def test_rnn_raw_optimizer(self): import paddle.distributed.fleet as fleet import paddle.distributed.fleet.base.role_maker as role_maker + role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) train_program = static.Program() start_program = static.Program() - train_program, start_program, loss, optimizer, data_holders = \ - rnn_pretrain_forward(train_program, start_program) + ( + train_program, + start_program, + loss, + optimizer, + data_holders, + ) = rnn_pretrain_forward(train_program, start_program) with paddle.static.program_guard( - train_program, start_program), paddle.utils.unique_name.guard(): + train_program, start_program + ), paddle.utils.unique_name.guard(): strategy = fleet.DistributedStrategy() strategy.without_graph_optimization = True strategy.fuse_all_reduce_ops = True diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_static_model_parallel.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_static_model_parallel.py index 06a9b0568227045325aceb30998c237c1a5961d4..6f51af27952813ce18b7ccbc0ff35621cf97a7f0 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_static_model_parallel.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_static_model_parallel.py @@ -23,7 +23,6 @@ flag_name = os.path.splitext(__file__)[0] class TestStaticModelParallel(TestDistBase): - def _setup_config(self): self._sync_mode = True self._use_reduce = False @@ -33,27 +32,36 @@ class TestStaticModelParallel(TestDistBase): def test_dist_static_model_parallel(self): import paddle.fluid as fluid + if fluid.core.is_compiled_with_cuda(): - self.check_with_place("static_model_parallel_by_row.py", - delta=1e-5, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "static_model_parallel_by_row.py", + delta=1e-5, + check_error_log=True, + log_name=flag_name, + ) def test_dist_static_model_parallel2(self): import paddle.fluid as fluid + if fluid.core.is_compiled_with_cuda(): - self.check_with_place("static_model_parallel_by_col.py", - delta=1e-5, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "static_model_parallel_by_col.py", + delta=1e-5, + check_error_log=True, + log_name=flag_name, + ) def test_dist_static_model_parallel3(self): import paddle.fluid as fluid + if fluid.core.is_compiled_with_cuda(): - self.check_with_place("static_model_parallel_embedding.py", - delta=1e-5, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "static_model_parallel_embedding.py", + delta=1e-5, + check_error_log=True, + log_name=flag_name, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_tcp_store.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_tcp_store.py index ff5cff2c9cb9fc2ff87c1104e279199701d71ae6..01a39bc6241dbc061a10a297de7fa5474fea0c1f 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_tcp_store.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_tcp_store.py @@ -18,7 +18,6 @@ import os class TestTCPStore(unittest.TestCase): - def test_tcp_store(self): dist_port = int(os.getenv("PADDLE_DIST_UT_PORT", 6170)) print("get dist_port:", dist_port) diff --git a/python/paddle/fluid/tests/unittests/collective/init_process_group.py b/python/paddle/fluid/tests/unittests/collective/init_process_group.py index dfb9dcf556c2177fc4fb967089e5aba03e5c9ab3..f45e4004831efc3e87413921431f1c9125f1314c 100644 --- a/python/paddle/fluid/tests/unittests/collective/init_process_group.py +++ b/python/paddle/fluid/tests/unittests/collective/init_process_group.py @@ -19,7 +19,6 @@ from paddle.fluid.framework import _test_eager_guard class TestProcessGroupFp32(unittest.TestCase): - def setUp(self): self.config() diff --git a/python/paddle/fluid/tests/unittests/collective/multinode/common.py b/python/paddle/fluid/tests/unittests/collective/multinode/common.py index cb2ff2ea09e6ca0048d8bfe60e6076264234571f..7849b6575c0880d3cf78a35fd19b7566358d6038 100644 --- a/python/paddle/fluid/tests/unittests/collective/multinode/common.py +++ b/python/paddle/fluid/tests/unittests/collective/multinode/common.py @@ -17,8 +17,8 @@ from paddle.distributed import fleet def init_parallel_env(mode, global_batch_size, seed=1024): ''' - Args: - mode:(str) DP1-MP1-PP1-SH1-O1 + Args: + mode:(str) DP1-MP1-PP1-SH1-O1 ''' def parse_mode(mode): @@ -42,7 +42,7 @@ def init_parallel_env(mode, global_batch_size, seed=1024): "dp_degree": DP, "mp_degree": MP, "pp_degree": PP, - "sharding_degree": SH + "sharding_degree": SH, } accumulate_steps = 1 @@ -50,7 +50,7 @@ def init_parallel_env(mode, global_batch_size, seed=1024): if PP > 1: strategy.pipeline_configs = { "accumulate_steps": accumulate_steps, - "micro_batch_size": global_batch_size // DP // accumulate_steps + "micro_batch_size": global_batch_size // DP // accumulate_steps, } # set control in tensor parallel diff --git a/python/paddle/fluid/tests/unittests/collective/multinode/dygraph_hybrid_dp.py b/python/paddle/fluid/tests/unittests/collective/multinode/dygraph_hybrid_dp.py index 5583041db8b6710f80e5e3c17065cf41bdf063fa..bb904b3d6bd8e79a7cdf53aff55925f94d43c3e3 100644 --- a/python/paddle/fluid/tests/unittests/collective/multinode/dygraph_hybrid_dp.py +++ b/python/paddle/fluid/tests/unittests/collective/multinode/dygraph_hybrid_dp.py @@ -12,31 +12,35 @@ # See the License for the specific language governing permissions and # limitations under the License. -from test_collective_multi_nodes import TestCollectiveAPIRunnerBase, runtime_main +from test_collective_multi_nodes import ( + TestCollectiveAPIRunnerBase, + runtime_main, +) class TestDygrapgHybridDP(TestCollectiveAPIRunnerBase): - def __init__(self): pass def check_pass(self, *args, **kwargs): from common import init_parallel_env import paddle + hcg = init_parallel_env("DP16-MP1-PP1-SH1-O1", 2) import numpy as np + dp_group = hcg.get_data_parallel_group() np.random.seed(1024) data = np.random.random((10 * dp_group.nranks, 100)).reshape( - (dp_group.nranks, -1, 100)) + (dp_group.nranks, -1, 100) + ) data_part = paddle.to_tensor(data[dp_group.rank]) paddle.distributed.collective.all_reduce(data_part) data_reduced = data_part data_sumed = np.sum(data, axis=0) - assert np.allclose(data_sumed, - data_reduced.numpy(), - rtol=1e-8, - atol=1e-8) + assert np.allclose( + data_sumed, data_reduced.numpy(), rtol=1e-8, atol=1e-8 + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/multinode/dygraph_hybrid_dpppmp.py b/python/paddle/fluid/tests/unittests/collective/multinode/dygraph_hybrid_dpppmp.py index 6de0954b0e7e4dd448389c1348962ffd00767f80..055e3e24fbd00c59556f78edff144202ae5b9ada 100644 --- a/python/paddle/fluid/tests/unittests/collective/multinode/dygraph_hybrid_dpppmp.py +++ b/python/paddle/fluid/tests/unittests/collective/multinode/dygraph_hybrid_dpppmp.py @@ -15,7 +15,10 @@ import numpy as np import paddle import paddle.distributed.fleet as fleet -from test_collective_multi_nodes import TestCollectiveAPIRunnerBase, runtime_main +from test_collective_multi_nodes import ( + TestCollectiveAPIRunnerBase, + runtime_main, +) from paddle import nn import numpy as np @@ -28,15 +31,14 @@ def weight_init(mp, shape, col=True, seed=1024): else: if col: step = shape[1] // mp.nranks - _w = w[:, mp.rank * step:mp.rank * step + step] + _w = w[:, mp.rank * step : mp.rank * step + step] else: step = shape[0] // mp.nranks - _w = w[mp.rank * step:mp.rank * step + step, :] + _w = w[mp.rank * step : mp.rank * step + step, :] return paddle.fluid.initializer.NumpyArrayInitializer(_w) class Criterion(nn.Layer): - def __init__(self): super(Criterion, self).__init__() self.loss_func = nn.MSELoss(reduction="mean") @@ -47,7 +49,6 @@ class Criterion(nn.Layer): class ModelPipeline(fleet.meta_parallel.PipelineLayer): - def __init__(self, hcg): paddle.seed(1024) dp_linear = nn.Linear(32, 128) @@ -62,35 +63,38 @@ class ModelPipeline(fleet.meta_parallel.PipelineLayer): 512, weight_attr=weight_init(mp, (128, 512), True, 1204 + i), has_bias=True, - gather_output=False) + gather_output=False, + ) mp_linear_2 = fleet.meta_parallel.RowParallelLinear( 512, 128, weight_attr=weight_init(mp, (512, 128), False, 2012 + i), has_bias=True, - input_is_parallel=True) + input_is_parallel=True, + ) else: - mp_linear_1 = nn.Linear(128, - 512, - weight_attr=weight_init( - None, (128, 512), True, 1204 + i)) - mp_linear_2 = nn.Linear(512, - 128, - weight_attr=weight_init( - None, (512, 128), True, 2012 + i)) + mp_linear_1 = nn.Linear( + 128, + 512, + weight_attr=weight_init(None, (128, 512), True, 1204 + i), + ) + mp_linear_2 = nn.Linear( + 512, + 128, + weight_attr=weight_init(None, (512, 128), True, 2012 + i), + ) act = nn.ReLU6() layer_seq = nn.Sequential(mp_linear_1, mp_linear_2, act) self.layers_pp.append(layer_seq) out = nn.Linear(128, 32) self.layers_pp.append(out) - super(ModelPipeline, self).__init__(layers=self.layers_pp, - loss_fn=Criterion(), - topology=self.topology) + super(ModelPipeline, self).__init__( + layers=self.layers_pp, loss_fn=Criterion(), topology=self.topology + ) class Model(nn.Layer): - def __init__(self, hcg): super(Model, self).__init__() paddle.seed(1024) @@ -105,22 +109,26 @@ class Model(nn.Layer): 512, weight_attr=weight_init(mp, (128, 512), True, 1204 + i), has_bias=True, - gather_output=False) + gather_output=False, + ) mp_linear_2 = fleet.meta_parallel.RowParallelLinear( 512, 128, weight_attr=weight_init(mp, (512, 128), False, 2012 + i), has_bias=True, - input_is_parallel=True) + input_is_parallel=True, + ) else: - mp_linear_1 = nn.Linear(128, - 512, - weight_attr=weight_init( - None, (128, 512), True, 1204 + i)) - mp_linear_2 = nn.Linear(512, - 128, - weight_attr=weight_init( - None, (512, 128), True, 2012 + i)) + mp_linear_1 = nn.Linear( + 128, + 512, + weight_attr=weight_init(None, (128, 512), True, 1204 + i), + ) + mp_linear_2 = nn.Linear( + 512, + 128, + weight_attr=weight_init(None, (512, 128), True, 2012 + i), + ) act = nn.ReLU6() layer_seq = nn.Sequential(mp_linear_1, mp_linear_2, act) self.layers_pp.append(layer_seq) @@ -134,7 +142,6 @@ class Model(nn.Layer): class TestDygrapgHybridDPPPMP(TestCollectiveAPIRunnerBase): - def __init__(self): pass @@ -143,9 +150,11 @@ class TestDygrapgHybridDPPPMP(TestCollectiveAPIRunnerBase): from common import init_parallel_env import paddle from paddle.distributed import fleet + hcg = init_parallel_env("DP4-MP2-PP2-SH1-O1", 64) pp_degree = hcg.get_pipe_parallel_world_size() import numpy as np + crit = Criterion() if pp_degree <= 1: model = Model(hcg) @@ -154,10 +163,12 @@ class TestDygrapgHybridDPPPMP(TestCollectiveAPIRunnerBase): model_base = Model(None) - optimizer = paddle.optimizer.Adam(learning_rate=0.01, - parameters=model.parameters()) + optimizer = paddle.optimizer.Adam( + learning_rate=0.01, parameters=model.parameters() + ) optimizer_base = paddle.optimizer.Adam( - learning_rate=0.01, parameters=model_base.parameters()) + learning_rate=0.01, parameters=model_base.parameters() + ) model = fleet.distributed_model(model) optimizer = fleet.distributed_optimizer(optimizer) diff --git a/python/paddle/fluid/tests/unittests/collective/multinode/dygraph_hybrid_fp16.py b/python/paddle/fluid/tests/unittests/collective/multinode/dygraph_hybrid_fp16.py index 0c4eddffeecec4269732d893af116d10b98789cb..04eacbc7fc445afb8e2c5d9858d43483b964734b 100644 --- a/python/paddle/fluid/tests/unittests/collective/multinode/dygraph_hybrid_fp16.py +++ b/python/paddle/fluid/tests/unittests/collective/multinode/dygraph_hybrid_fp16.py @@ -15,7 +15,10 @@ import numpy as np import paddle import paddle.distributed.fleet as fleet -from test_collective_multi_nodes import TestCollectiveAPIRunnerBase, runtime_main +from test_collective_multi_nodes import ( + TestCollectiveAPIRunnerBase, + runtime_main, +) from paddle import nn import numpy as np @@ -28,15 +31,14 @@ def weight_init(mp, shape, col=True, seed=1024): else: if col: step = shape[1] // mp.nranks - _w = w[:, mp.rank * step:mp.rank * step + step] + _w = w[:, mp.rank * step : mp.rank * step + step] else: step = shape[0] // mp.nranks - _w = w[mp.rank * step:mp.rank * step + step, :] + _w = w[mp.rank * step : mp.rank * step + step, :] return paddle.fluid.initializer.NumpyArrayInitializer(_w) class Criterion(nn.Layer): - def __init__(self): super(Criterion, self).__init__() self.loss_func = nn.MSELoss(reduction="mean") @@ -47,7 +49,6 @@ class Criterion(nn.Layer): class ModelPipeline(fleet.meta_parallel.PipelineLayer): - def __init__(self, hcg): paddle.seed(1024) dp_linear = nn.Linear(32, 128) @@ -62,35 +63,38 @@ class ModelPipeline(fleet.meta_parallel.PipelineLayer): 512, weight_attr=weight_init(mp, (128, 512), True, 1204 + i), has_bias=True, - gather_output=False) + gather_output=False, + ) mp_linear_2 = fleet.meta_parallel.RowParallelLinear( 512, 128, weight_attr=weight_init(mp, (512, 128), False, 2012 + i), has_bias=True, - input_is_parallel=True) + input_is_parallel=True, + ) else: - mp_linear_1 = nn.Linear(128, - 512, - weight_attr=weight_init( - None, (128, 512), True, 1204 + i)) - mp_linear_2 = nn.Linear(512, - 128, - weight_attr=weight_init( - None, (512, 128), True, 2012 + i)) + mp_linear_1 = nn.Linear( + 128, + 512, + weight_attr=weight_init(None, (128, 512), True, 1204 + i), + ) + mp_linear_2 = nn.Linear( + 512, + 128, + weight_attr=weight_init(None, (512, 128), True, 2012 + i), + ) act = nn.ReLU6() layer_seq = nn.Sequential(mp_linear_1, mp_linear_2, act) self.layers_pp.append(layer_seq) out = nn.Linear(128, 32) self.layers_pp.append(out) - super(ModelPipeline, self).__init__(layers=self.layers_pp, - loss_fn=Criterion(), - topology=self.topology) + super(ModelPipeline, self).__init__( + layers=self.layers_pp, loss_fn=Criterion(), topology=self.topology + ) class Model(nn.Layer): - def __init__(self, hcg): super(Model, self).__init__() paddle.seed(1024) @@ -105,22 +109,26 @@ class Model(nn.Layer): 512, weight_attr=weight_init(mp, (128, 512), True, 1204 + i), has_bias=True, - gather_output=False) + gather_output=False, + ) mp_linear_2 = fleet.meta_parallel.RowParallelLinear( 512, 128, weight_attr=weight_init(mp, (512, 128), False, 2012 + i), has_bias=True, - input_is_parallel=True) + input_is_parallel=True, + ) else: - mp_linear_1 = nn.Linear(128, - 512, - weight_attr=weight_init( - None, (128, 512), True, 1204 + i)) - mp_linear_2 = nn.Linear(512, - 128, - weight_attr=weight_init( - None, (512, 128), True, 2012 + i)) + mp_linear_1 = nn.Linear( + 128, + 512, + weight_attr=weight_init(None, (128, 512), True, 1204 + i), + ) + mp_linear_2 = nn.Linear( + 512, + 128, + weight_attr=weight_init(None, (512, 128), True, 2012 + i), + ) act = nn.ReLU6() layer_seq = nn.Sequential(mp_linear_1, mp_linear_2, act) self.layers_pp.append(layer_seq) @@ -134,7 +142,6 @@ class Model(nn.Layer): class TestDygraphHybridFp16(TestCollectiveAPIRunnerBase): - def __init__(self): pass @@ -143,9 +150,11 @@ class TestDygraphHybridFp16(TestCollectiveAPIRunnerBase): from common import init_parallel_env import paddle from paddle.distributed import fleet + hcg = init_parallel_env("DP4-MP2-PP2-SH1-O1", 64) pp_degree = hcg.get_pipe_parallel_world_size() import numpy as np + crit = Criterion() if pp_degree <= 1: model = Model(hcg) @@ -154,17 +163,20 @@ class TestDygraphHybridFp16(TestCollectiveAPIRunnerBase): model_base = Model(None) - optimizer = paddle.optimizer.Adam(learning_rate=0.01, - parameters=model.parameters(), - multi_precision=True) + optimizer = paddle.optimizer.Adam( + learning_rate=0.01, + parameters=model.parameters(), + multi_precision=True, + ) optimizer_base = paddle.optimizer.Adam( - learning_rate=0.01, parameters=model_base.parameters()) + learning_rate=0.01, parameters=model_base.parameters() + ) scaler = paddle.amp.GradScaler(init_loss_scaling=4096) scaler = fleet.distributed_scaler(scaler) - model = paddle.amp.decorate(models=model, - level='O2', - save_dtype='float32') + model = paddle.amp.decorate( + models=model, level='O2', save_dtype='float32' + ) model = fleet.distributed_model(model) optimizer = fleet.distributed_optimizer(optimizer) @@ -177,9 +189,9 @@ class TestDygraphHybridFp16(TestCollectiveAPIRunnerBase): for _ in range(2): if pp_degree > 1: with paddle.amp.auto_cast(True, level='O2'): - loss = model.train_batch([x, y], - optimizer=optimizer, - scaler=scaler) + loss = model.train_batch( + [x, y], optimizer=optimizer, scaler=scaler + ) else: with paddle.amp.auto_cast(True, level='O2'): output = model(x) diff --git a/python/paddle/fluid/tests/unittests/collective/multinode/dygraph_hybrid_recompute.py b/python/paddle/fluid/tests/unittests/collective/multinode/dygraph_hybrid_recompute.py index 16e6d1602f39eefc2369c50f391a5509175e8153..f0441f4eab838e51b0154b7a54b8082dbe2d5878 100644 --- a/python/paddle/fluid/tests/unittests/collective/multinode/dygraph_hybrid_recompute.py +++ b/python/paddle/fluid/tests/unittests/collective/multinode/dygraph_hybrid_recompute.py @@ -15,7 +15,10 @@ import numpy as np import paddle import paddle.distributed.fleet as fleet -from test_collective_multi_nodes import TestCollectiveAPIRunnerBase, runtime_main +from test_collective_multi_nodes import ( + TestCollectiveAPIRunnerBase, + runtime_main, +) from paddle import nn import numpy as np @@ -30,15 +33,14 @@ def weight_init(mp, shape, col=True, seed=1024): else: if col: step = shape[1] // mp.nranks - _w = w[:, mp.rank * step:mp.rank * step + step] + _w = w[:, mp.rank * step : mp.rank * step + step] else: step = shape[0] // mp.nranks - _w = w[mp.rank * step:mp.rank * step + step, :] + _w = w[mp.rank * step : mp.rank * step + step, :] return paddle.fluid.initializer.NumpyArrayInitializer(_w) class Criterion(nn.Layer): - def __init__(self): super(Criterion, self).__init__() self.loss_func = nn.MSELoss(reduction="mean") @@ -49,7 +51,6 @@ class Criterion(nn.Layer): class RecomputeMatmulBlock(nn.Layer): - def __init__(self, mp, seed, m, n, k): super(RecomputeMatmulBlock, self).__init__() self.mp = mp @@ -59,22 +60,22 @@ class RecomputeMatmulBlock(nn.Layer): n, weight_attr=weight_init(mp, (m, n), True, seed), has_bias=True, - gather_output=False) + gather_output=False, + ) mp_linear_2 = fleet.meta_parallel.RowParallelLinear( n, k, weight_attr=weight_init(mp, (n, k), False, seed + 1), has_bias=True, - input_is_parallel=True) + input_is_parallel=True, + ) else: - mp_linear_1 = nn.Linear(m, - n, - weight_attr=weight_init( - None, (m, n), True, seed)) - mp_linear_2 = nn.Linear(n, - k, - weight_attr=weight_init( - None, (n, k), True, seed + 1)) + mp_linear_1 = nn.Linear( + m, n, weight_attr=weight_init(None, (m, n), True, seed) + ) + mp_linear_2 = nn.Linear( + n, k, weight_attr=weight_init(None, (n, k), True, seed + 1) + ) self.layers = nn.Sequential(mp_linear_1, mp_linear_2) def forward(self, x): @@ -88,7 +89,6 @@ RecomputeBlock = RecomputeMatmulBlock class ModelPipeline(fleet.meta_parallel.PipelineLayer): - def __init__(self, hcg): paddle.seed(1024) dp_linear = nn.Linear(32, 64) @@ -104,13 +104,12 @@ class ModelPipeline(fleet.meta_parallel.PipelineLayer): out = nn.Linear(64, 32) self.layers_pp.append(out) - super(ModelPipeline, self).__init__(layers=self.layers_pp, - loss_fn=Criterion(), - topology=self.topology) + super(ModelPipeline, self).__init__( + layers=self.layers_pp, loss_fn=Criterion(), topology=self.topology + ) class Model(nn.Layer): - def __init__(self, hcg): super(Model, self).__init__() paddle.seed(1024) @@ -133,7 +132,6 @@ class Model(nn.Layer): class TestDygrapgHybridRecompute(TestCollectiveAPIRunnerBase): - def __init__(self): pass @@ -142,9 +140,11 @@ class TestDygrapgHybridRecompute(TestCollectiveAPIRunnerBase): from common import init_parallel_env import paddle from paddle.distributed import fleet + hcg = init_parallel_env("DP4-MP2-PP2-SH1-O1", 64) pp_degree = hcg.get_pipe_parallel_world_size() import numpy as np + crit = Criterion() if pp_degree <= 1: model = Model(hcg) @@ -153,10 +153,12 @@ class TestDygrapgHybridRecompute(TestCollectiveAPIRunnerBase): model_base = Model(None) - optimizer = paddle.optimizer.Adam(learning_rate=0.01, - parameters=model.parameters()) + optimizer = paddle.optimizer.Adam( + learning_rate=0.01, parameters=model.parameters() + ) optimizer_base = paddle.optimizer.Adam( - learning_rate=0.01, parameters=model_base.parameters()) + learning_rate=0.01, parameters=model_base.parameters() + ) model = fleet.distributed_model(model) optimizer = fleet.distributed_optimizer(optimizer) diff --git a/python/paddle/fluid/tests/unittests/collective/multinode/mn_dygraph_group_sharded_stage3.py b/python/paddle/fluid/tests/unittests/collective/multinode/mn_dygraph_group_sharded_stage3.py index 9c432eefa8a297bde0064ca57745d935a81f7962..081cd04afb38936d72090bb1649152a972b9adca 100644 --- a/python/paddle/fluid/tests/unittests/collective/multinode/mn_dygraph_group_sharded_stage3.py +++ b/python/paddle/fluid/tests/unittests/collective/multinode/mn_dygraph_group_sharded_stage3.py @@ -23,10 +23,18 @@ import paddle.fluid as fluid from paddle.fluid.dygraph.nn import Linear from paddle.fluid.framework import _test_eager_guard -from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_optimizer_stage2 import GroupShardedOptimizerStage2 -from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage2 import GroupShardedStage2 -from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage3 import GroupShardedStage3 -from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_utils import GroupShardedScaler +from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_optimizer_stage2 import ( + GroupShardedOptimizerStage2, +) +from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage2 import ( + GroupShardedStage2, +) +from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage3 import ( + GroupShardedStage3, +) +from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_utils import ( + GroupShardedScaler, +) epoch = 10 paddle.seed(2022) @@ -37,7 +45,6 @@ l2_decay = 1e-4 class MLP(fluid.Layer): - def __init__(self, linear_size=1000, param_attr=None, bias_attr=None): super(MLP, self).__init__() @@ -57,7 +64,6 @@ class MLP(fluid.Layer): def reader_decorator(linear_size=1000): - def __reader__(): for _ in range(100): img = np.random.rand(linear_size).astype('float32') @@ -70,54 +76,60 @@ def reader_decorator(linear_size=1000): def optimizer_setting(model, use_pure_fp16, opt_group=False): clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0) optimizer = paddle.optimizer.Momentum( - parameters=[{ - "params": list(model.parameters()) - }] if opt_group else list(model.parameters()), + parameters=[{"params": list(model.parameters())}] + if opt_group + else list(model.parameters()), learning_rate=0.001, weight_decay=0.00001, grad_clip=clip, - multi_precision=use_pure_fp16) + multi_precision=use_pure_fp16, + ) return optimizer -def train_mlp(model, - sharding_stage, - use_pure_fp16=False, - accumulate_grad=False, - batch_size=100, - opt_group=False, - sync_comm=False, - test_minimize=False, - save_model=False): +def train_mlp( + model, + sharding_stage, + use_pure_fp16=False, + accumulate_grad=False, + batch_size=100, + opt_group=False, + sync_comm=False, + test_minimize=False, + save_model=False, +): group = paddle.distributed.new_group( - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) if opt_group: - optimizer = optimizer_setting(model=model, - use_pure_fp16=use_pure_fp16, - opt_group=opt_group) + optimizer = optimizer_setting( + model=model, use_pure_fp16=use_pure_fp16, opt_group=opt_group + ) else: optimizer = optimizer_setting(model=model, use_pure_fp16=use_pure_fp16) if use_pure_fp16: - model = paddle.amp.decorate(models=model, - level='O2', - save_dtype='float32') + model = paddle.amp.decorate( + models=model, level='O2', save_dtype='float32' + ) scaler = paddle.amp.GradScaler(init_loss_scaling=32768) scaler = GroupShardedScaler(scaler) if sharding_stage == 2: optimizer = GroupShardedOptimizerStage2( - params=optimizer._parameter_list, optim=optimizer, group=group) - model = GroupShardedStage2(model, - optimizer, - group=group, - buffer_max_size=2**21) + params=optimizer._parameter_list, optim=optimizer, group=group + ) + model = GroupShardedStage2( + model, optimizer, group=group, buffer_max_size=2**21 + ) elif sharding_stage == 3: - model = GroupShardedStage3(model, - optimizer=optimizer, - group=group, - sync_comm=sync_comm, - segment_size=2**15) + model = GroupShardedStage3( + model, + optimizer=optimizer, + group=group, + sync_comm=sync_comm, + segment_size=2**15, + ) # check optimizer.minimize() error if test_minimize: @@ -125,18 +137,21 @@ def train_mlp(model, optimizer.minimize() except: print( - "====== Find sharding_stage3_optimizer.minimize() error ======") + "====== Find sharding_stage3_optimizer.minimize() error ======" + ) return - train_reader = paddle.batch(reader_decorator(), - batch_size=batch_size, - drop_last=True) - - train_loader = paddle.io.DataLoader.from_generator(capacity=32, - use_double_buffer=True, - iterable=True, - return_list=True, - use_multiprocess=True) + train_reader = paddle.batch( + reader_decorator(), batch_size=batch_size, drop_last=True + ) + + train_loader = paddle.io.DataLoader.from_generator( + capacity=32, + use_double_buffer=True, + iterable=True, + return_list=True, + use_multiprocess=True, + ) train_loader.set_sample_list_generator(train_reader) for eop in range(epoch): @@ -147,8 +162,9 @@ def train_mlp(model, img.stop_gradient = True with paddle.amp.auto_cast(True, level='O2'): out = model(img) - loss = paddle.nn.functional.cross_entropy(input=out, - label=label) + loss = paddle.nn.functional.cross_entropy( + input=out, label=label + ) avg_loss = paddle.mean(x=loss.cast(dtype=paddle.float32)) if batch_size == 20: @@ -183,8 +199,19 @@ def train_mlp(model, def test_stage2_stage3(): paddle.distributed.init_parallel_env() - mlp, mlp1, mlp2, mlp3, mlp4, mlp5, mlp6, mlp7, mlp8, mlp9, mlp10 = MLP( - ), MLP(), MLP(), MLP(), MLP(), MLP(), MLP(), MLP(), MLP(), MLP(), MLP() + mlp, mlp1, mlp2, mlp3, mlp4, mlp5, mlp6, mlp7, mlp8, mlp9, mlp10 = ( + MLP(), + MLP(), + MLP(), + MLP(), + MLP(), + MLP(), + MLP(), + MLP(), + MLP(), + MLP(), + MLP(), + ) state_dict = mlp.state_dict() mlp1.set_state_dict(state_dict) mlp2.set_state_dict(state_dict) @@ -198,79 +225,88 @@ def test_stage2_stage3(): mlp10.set_state_dict(state_dict) # fp32 - stage2_params = train_mlp(mlp1, - sharding_stage=2, - use_pure_fp16=False, - opt_group=False) - stage3_params = train_mlp(mlp2, - sharding_stage=3, - use_pure_fp16=False, - opt_group=False) + stage2_params = train_mlp( + mlp1, sharding_stage=2, use_pure_fp16=False, opt_group=False + ) + stage3_params = train_mlp( + mlp2, sharding_stage=3, use_pure_fp16=False, opt_group=False + ) for i in range(len(stage2_params)): - np.testing.assert_allclose(stage2_params[i].numpy(), - stage3_params[i].numpy(), - rtol=1e-6, - atol=1e-6) + np.testing.assert_allclose( + stage2_params[i].numpy(), + stage3_params[i].numpy(), + rtol=1e-6, + atol=1e-6, + ) # fp32 accumulate grad - stage3_params = train_mlp(mlp3, - sharding_stage=3, - use_pure_fp16=False, - accumulate_grad=True, - opt_group=True) - stage3_params_add = train_mlp(mlp4, - sharding_stage=3, - use_pure_fp16=False, - accumulate_grad=True, - batch_size=20, - opt_group=True) + stage3_params = train_mlp( + mlp3, + sharding_stage=3, + use_pure_fp16=False, + accumulate_grad=True, + opt_group=True, + ) + stage3_params_add = train_mlp( + mlp4, + sharding_stage=3, + use_pure_fp16=False, + accumulate_grad=True, + batch_size=20, + opt_group=True, + ) for i in range(len(stage3_params)): - np.testing.assert_allclose(stage3_params[i].numpy(), - stage3_params_add[i].numpy(), - rtol=1e-6, - atol=1e-4) + np.testing.assert_allclose( + stage3_params[i].numpy(), + stage3_params_add[i].numpy(), + rtol=1e-6, + atol=1e-4, + ) # fp16 - stage2_params = train_mlp(mlp5, - sharding_stage=2, - use_pure_fp16=True, - opt_group=False) - stage3_params = train_mlp(mlp6, - sharding_stage=3, - use_pure_fp16=True, - opt_group=False) + stage2_params = train_mlp( + mlp5, sharding_stage=2, use_pure_fp16=True, opt_group=False + ) + stage3_params = train_mlp( + mlp6, sharding_stage=3, use_pure_fp16=True, opt_group=False + ) for i in range(len(stage2_params)): - np.testing.assert_allclose(stage2_params[i].numpy(), - stage3_params[i].numpy(), - rtol=1e-4, - atol=1e-3) + np.testing.assert_allclose( + stage2_params[i].numpy(), + stage3_params[i].numpy(), + rtol=1e-4, + atol=1e-3, + ) # fp16 sync_comm - stage3_params = train_mlp(mlp7, - sharding_stage=3, - use_pure_fp16=True, - opt_group=False) - stage3_params_re = train_mlp(mlp8, - sharding_stage=3, - use_pure_fp16=True, - opt_group=False, - sync_comm=True) + stage3_params = train_mlp( + mlp7, sharding_stage=3, use_pure_fp16=True, opt_group=False + ) + stage3_params_re = train_mlp( + mlp8, + sharding_stage=3, + use_pure_fp16=True, + opt_group=False, + sync_comm=True, + ) for i in range(len(stage3_params)): - np.testing.assert_allclose(stage3_params[i].numpy(), - stage3_params_re[i].numpy(), - rtol=1e-6) + np.testing.assert_allclose( + stage3_params[i].numpy(), stage3_params_re[i].numpy(), rtol=1e-6 + ) # save/load model output_dir = tempfile.mkdtemp() try: model_file = os.path.join(output_dir, "model.pdmodel") optimizer_file = os.path.join(output_dir, "model.pdopt") - model_stage3, optimizer_stage3 = train_mlp(mlp9, - sharding_stage=3, - use_pure_fp16=False, - opt_group=False, - save_model=True) + model_stage3, optimizer_stage3 = train_mlp( + mlp9, + sharding_stage=3, + use_pure_fp16=False, + opt_group=False, + save_model=True, + ) paddle.save(model_stage3.state_dict(), model_file) paddle.save(optimizer_stage3.state_dict(), optimizer_file) m_state_dict = paddle.load(model_file) @@ -284,11 +320,13 @@ def test_stage2_stage3(): shutil.rmtree(output_dir) # check optimizer.minimize() error - train_mlp(mlp10, - sharding_stage=3, - use_pure_fp16=False, - opt_group=False, - test_minimize=True) + train_mlp( + mlp10, + sharding_stage=3, + use_pure_fp16=False, + opt_group=False, + test_minimize=True, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/collective/multinode/mn_dygraph_sharding_stage2.py b/python/paddle/fluid/tests/unittests/collective/multinode/mn_dygraph_sharding_stage2.py index affb6d31f2ae5527bd6bee2cedfa05917dfb0dc6..eeff08c3557ce4eeeb22ecb64900e2391b7abe0e 100644 --- a/python/paddle/fluid/tests/unittests/collective/multinode/mn_dygraph_sharding_stage2.py +++ b/python/paddle/fluid/tests/unittests/collective/multinode/mn_dygraph_sharding_stage2.py @@ -24,8 +24,12 @@ from paddle.fluid.dygraph.nn import Linear from paddle.distributed import fleet from paddle.fluid.framework import _test_eager_guard -from paddle.distributed.fleet.meta_optimizers.dygraph_optimizer.sharding_optimizer_stage2 import ShardingOptimizerStage2 -from paddle.distributed.fleet.meta_parallel.sharding.sharding_stage2 import ShardingStage2 +from paddle.distributed.fleet.meta_optimizers.dygraph_optimizer.sharding_optimizer_stage2 import ( + ShardingOptimizerStage2, +) +from paddle.distributed.fleet.meta_parallel.sharding.sharding_stage2 import ( + ShardingStage2, +) seed = 2022 epoch = 2 @@ -36,7 +40,7 @@ strategy.hybrid_configs = { "dp_degree": 16, "mp_degree": 1, "pp_degree": 1, - "sharding_degree": 1 + "sharding_degree": 1, } np.random.seed(seed) @@ -44,7 +48,6 @@ paddle.seed(seed) class MLP(fluid.Layer): - def __init__(self, linear_size=1000, param_attr=None, bias_attr=None): super(MLP, self).__init__() @@ -64,7 +67,6 @@ class MLP(fluid.Layer): def reader_decorator(linear_size=1000): - def __reader__(): for _ in range(100): img = np.random.rand(linear_size).astype('float32') @@ -76,59 +78,65 @@ def reader_decorator(linear_size=1000): def optimizer_setting(model, use_pure_fp16, opt_group=False): clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0) - optimizer = paddle.optimizer.AdamW(parameters=[{ - "params": model.parameters() - }] if opt_group else model.parameters(), - learning_rate=0.001, - weight_decay=0.00001, - grad_clip=clip, - multi_precision=use_pure_fp16) + optimizer = paddle.optimizer.AdamW( + parameters=[{"params": model.parameters()}] + if opt_group + else model.parameters(), + learning_rate=0.001, + weight_decay=0.00001, + grad_clip=clip, + multi_precision=use_pure_fp16, + ) return optimizer -def train_mlp(model, - sharding_stage, - batch_size=100, - use_pure_fp16=False, - accumulate_grad=False, - opt_group=False, - save_model=False): +def train_mlp( + model, + sharding_stage, + batch_size=100, + use_pure_fp16=False, + accumulate_grad=False, + opt_group=False, + save_model=False, +): if sharding_stage == "dp": hcg = fleet.get_hybrid_communicate_group() group = hcg.get_check_parallel_group() else: group = paddle.distributed.new_group( - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + ) if opt_group: - optimizer = optimizer_setting(model=model, - use_pure_fp16=use_pure_fp16, - opt_group=opt_group) + optimizer = optimizer_setting( + model=model, use_pure_fp16=use_pure_fp16, opt_group=opt_group + ) else: optimizer = optimizer_setting(model=model, use_pure_fp16=use_pure_fp16) if sharding_stage == 2: - optimizer = ShardingOptimizerStage2(params=model.parameters(), - optim=optimizer, - group=group) - - model = ShardingStage2(model, - optimizer, - group=group, - buffer_max_size=2**21) + optimizer = ShardingOptimizerStage2( + params=model.parameters(), optim=optimizer, group=group + ) + + model = ShardingStage2( + model, optimizer, group=group, buffer_max_size=2**21 + ) else: optimizer = fleet.distributed_optimizer(optimizer) model = fleet.distributed_model(model) - train_reader = paddle.batch(reader_decorator(), - batch_size=batch_size, - drop_last=True) - - train_loader = paddle.io.DataLoader.from_generator(capacity=32, - use_double_buffer=True, - iterable=True, - return_list=True, - use_multiprocess=True) + train_reader = paddle.batch( + reader_decorator(), batch_size=batch_size, drop_last=True + ) + + train_loader = paddle.io.DataLoader.from_generator( + capacity=32, + use_double_buffer=True, + iterable=True, + return_list=True, + use_multiprocess=True, + ) train_loader.set_sample_list_generator(train_reader) if sharding_stage == 2: @@ -180,53 +188,51 @@ def test_dp_stage2(): mlp6.set_state_dict(state_dict) # DP VS stage2 - dp_params = train_mlp(mlp1, - sharding_stage="dp", - use_pure_fp16=False, - opt_group=False) - stage2_params = train_mlp(mlp2, - sharding_stage=2, - use_pure_fp16=False, - opt_group=False) + dp_params = train_mlp( + mlp1, sharding_stage="dp", use_pure_fp16=False, opt_group=False + ) + stage2_params = train_mlp( + mlp2, sharding_stage=2, use_pure_fp16=False, opt_group=False + ) for i in range(len(dp_params)): - np.testing.assert_allclose(dp_params[i].numpy(), - stage2_params[i].numpy(), - rtol=1e-6, - atol=5e-4) + np.testing.assert_allclose( + dp_params[i].numpy(), stage2_params[i].numpy(), rtol=1e-6, atol=5e-4 + ) # stage2 accumulate grad stage2_params = train_mlp(mlp3, sharding_stage=2, accumulate_grad=True) - stage2_accumulate_grad = train_mlp(mlp4, - sharding_stage=2, - batch_size=20, - accumulate_grad=True) + stage2_accumulate_grad = train_mlp( + mlp4, sharding_stage=2, batch_size=20, accumulate_grad=True + ) for i in range(len(stage2_params)): - np.testing.assert_allclose(stage2_params[i].numpy(), - stage2_accumulate_grad[i].numpy(), - rtol=1e-5, - atol=1e-5) + np.testing.assert_allclose( + stage2_params[i].numpy(), + stage2_accumulate_grad[i].numpy(), + rtol=1e-5, + atol=1e-5, + ) # stage2 param list VS param group - stage2_params = train_mlp(mlp5, - sharding_stage=2, - use_pure_fp16=False, - opt_group=True) + stage2_params = train_mlp( + mlp5, sharding_stage=2, use_pure_fp16=False, opt_group=True + ) for i in range(len(dp_params)): - np.testing.assert_allclose(dp_params[i].numpy(), - stage2_params[i].numpy(), - rtol=1e-6, - atol=5e-4) + np.testing.assert_allclose( + dp_params[i].numpy(), stage2_params[i].numpy(), rtol=1e-6, atol=5e-4 + ) # save/load model output_dir = tempfile.mkdtemp() try: model_file = os.path.join(output_dir, "model.pdmodel") optimizer_file = os.path.join(output_dir, "model.pdopt") - model_stage2, optimizer_stage2 = train_mlp(mlp6, - sharding_stage=2, - use_pure_fp16=False, - opt_group=False, - save_model=True) + model_stage2, optimizer_stage2 = train_mlp( + mlp6, + sharding_stage=2, + use_pure_fp16=False, + opt_group=False, + save_model=True, + ) paddle.save(model_stage2.state_dict(), model_file) paddle.save(optimizer_stage2.state_dict(), optimizer_file) m_state_dict = paddle.load(model_file) diff --git a/python/paddle/fluid/tests/unittests/collective/multinode/test_collective_multi_nodes.py b/python/paddle/fluid/tests/unittests/collective/multinode/test_collective_multi_nodes.py index c7ad96c061ef794773def975b0a0ab4aa11fbab1..868585f81a0bede0809ac4a13336be3146b17dc7 100644 --- a/python/paddle/fluid/tests/unittests/collective/multinode/test_collective_multi_nodes.py +++ b/python/paddle/fluid/tests/unittests/collective/multinode/test_collective_multi_nodes.py @@ -20,10 +20,10 @@ import tempfile class TestCollectiveAPIRunnerBase(object): - def check_pass(self, *args, **kwargs): raise NotImplementedError( - "get model should be implemented by child class.") + "get model should be implemented by child class." + ) def run_trainer(self, *args, **kwargs): self.check_pass(*args, **kwargs) @@ -37,7 +37,6 @@ def runtime_main(test_class, col_type=None): class TestDistBase(unittest.TestCase): - def setUp(self): self._trainers = 4 self._init_env() @@ -46,15 +45,17 @@ class TestDistBase(unittest.TestCase): self._python_interp = sys.executable self.temp_dir = tempfile.TemporaryDirectory() - def check_with_place(self, - model_file, - backend="nccl", - static_mode=False, - check_error_log=False, - need_envs={}, - eager_mode=True, - args=[], - kwargs={}): + def check_with_place( + self, + model_file, + backend="nccl", + static_mode=False, + check_error_log=False, + need_envs={}, + eager_mode=True, + args=[], + kwargs={}, + ): required_envs = { "FLAGS_fraction_of_gpu_memory_to_use": "0.15", "FLAGS_eager_delete_tensor_gb": "0.0", @@ -68,7 +69,7 @@ class TestDistBase(unittest.TestCase): "PADDLE_WITH_GLOO": "0", "BACKEND": backend, "PADDLE_DISTRI_BACKEND": backend, - "PADDLE_USE_GPU": "1" + "PADDLE_USE_GPU": "1", } required_envs.update(need_envs) if check_error_log: @@ -87,15 +88,18 @@ class TestDistBase(unittest.TestCase): filted_envs = dict() for k in envs.keys(): if "PADDLE_" == k[:7] and k not in [ - "PADDLE_NNODES", "PADDLE_MASTER" + "PADDLE_NNODES", + "PADDLE_MASTER", ]: continue filted_envs[k] = envs[k] - launcher = subprocess.Popen(run_cluster_process.strip().split(), - stdout=sys.stderr, - stderr=sys.stdout, - env=filted_envs) + launcher = subprocess.Popen( + run_cluster_process.strip().split(), + stdout=sys.stderr, + stderr=sys.stdout, + env=filted_envs, + ) launcher.communicate(timeout=240) if launcher.poll() is None: diff --git a/python/paddle/fluid/tests/unittests/collective/multinode/test_multinode_dygraph_hybrid_dp.py b/python/paddle/fluid/tests/unittests/collective/multinode/test_multinode_dygraph_hybrid_dp.py index 05124e0ddd5dd3501f5df367822b59d7855e2d8f..80973d735555cf3d67101bd6f155685a6457cfaa 100755 --- a/python/paddle/fluid/tests/unittests/collective/multinode/test_multinode_dygraph_hybrid_dp.py +++ b/python/paddle/fluid/tests/unittests/collective/multinode/test_multinode_dygraph_hybrid_dp.py @@ -22,15 +22,14 @@ import os class TestDYgraphDPMode(TestDistBase): - def setUp(self): self._trainers = 16 self._init_env() def test_col_parallel_linear(self): - self.check_with_place("dygraph_hybrid_dp.py", - backend="nccl", - need_envs=os.environ) + self.check_with_place( + "dygraph_hybrid_dp.py", backend="nccl", need_envs=os.environ + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/collective/multinode/test_multinode_dygraph_hybrid_dpppmp.py b/python/paddle/fluid/tests/unittests/collective/multinode/test_multinode_dygraph_hybrid_dpppmp.py index aa2a01454ae9b654077c3485e84f8756e9706065..1435dd8ef0e1da7bfa05b32d179e76142084f978 100755 --- a/python/paddle/fluid/tests/unittests/collective/multinode/test_multinode_dygraph_hybrid_dpppmp.py +++ b/python/paddle/fluid/tests/unittests/collective/multinode/test_multinode_dygraph_hybrid_dpppmp.py @@ -22,25 +22,24 @@ import os class TestDYgraphHybrid(TestDistBase): - def setUp(self): self._trainers = 16 self._init_env() def test_hybrid_dpppmp(self): - self.check_with_place("dygraph_hybrid_dpppmp.py", - backend="nccl", - need_envs=os.environ) + self.check_with_place( + "dygraph_hybrid_dpppmp.py", backend="nccl", need_envs=os.environ + ) def test_hybrid_recompute(self): - self.check_with_place("dygraph_hybrid_recompute.py", - backend="nccl", - need_envs=os.environ) + self.check_with_place( + "dygraph_hybrid_recompute.py", backend="nccl", need_envs=os.environ + ) def test_hybrid_fp16(self): - self.check_with_place("dygraph_hybrid_fp16.py", - backend="nccl", - need_envs=os.environ) + self.check_with_place( + "dygraph_hybrid_fp16.py", backend="nccl", need_envs=os.environ + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/collective/multinode/test_multinode_dygraph_sharding.py b/python/paddle/fluid/tests/unittests/collective/multinode/test_multinode_dygraph_sharding.py index f0da628a4c8a3c8982e6d9a95b26d864f17b3535..d1689330d4aa90c36fe494112af7c89b8571e304 100755 --- a/python/paddle/fluid/tests/unittests/collective/multinode/test_multinode_dygraph_sharding.py +++ b/python/paddle/fluid/tests/unittests/collective/multinode/test_multinode_dygraph_sharding.py @@ -22,20 +22,23 @@ import os class TestDYgrapShardingDP(TestDistBase): - def setUp(self): self._trainers = 16 self._init_env() def test_hybrid_sharding_stage2(self): - self.check_with_place("mn_dygraph_sharding_stage2.py", - backend="nccl", - need_envs=os.environ) + self.check_with_place( + "mn_dygraph_sharding_stage2.py", + backend="nccl", + need_envs=os.environ, + ) def test_hybrid_sharding_stage3(self): - self.check_with_place("mn_dygraph_group_sharded_stage3.py", - backend="nccl", - need_envs=os.environ) + self.check_with_place( + "mn_dygraph_group_sharded_stage3.py", + backend="nccl", + need_envs=os.environ, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/collective/orthogonal_strategy.py b/python/paddle/fluid/tests/unittests/collective/orthogonal_strategy.py index 5dbc624b79bb0521b34b0a6316046fed98f2e970..7d39b6772336ce2a2c5637ef11ef042028430ad1 100644 --- a/python/paddle/fluid/tests/unittests/collective/orthogonal_strategy.py +++ b/python/paddle/fluid/tests/unittests/collective/orthogonal_strategy.py @@ -14,27 +14,37 @@ import unittest import paddle.distributed as dist -from paddle.distributed.fleet.base.strategy_group import DPGroup, ShardingGroup, MPGroup, PPGroup +from paddle.distributed.fleet.base.strategy_group import ( + DPGroup, + ShardingGroup, + MPGroup, + PPGroup, +) from paddle.distributed.fleet.base.orthogonal_strategy import OrthogonalStrategy class TestOrthogonalStrategyAPI(unittest.TestCase): - def setUp(self): self._num_of_ranks = 2 dist.init_parallel_env() self._global_rank = dist.get_rank() self._strategy = OrthogonalStrategy( - [("dp", 2, DPGroup), ("mp", 1, MPGroup), - ("sharding", 1, ShardingGroup), ("pp", 1, PPGroup)], - fused_strategy_dict={"checkness": ["mp", "sharding", "pp"]}) + [ + ("dp", 2, DPGroup), + ("mp", 1, MPGroup), + ("sharding", 1, ShardingGroup), + ("pp", 1, PPGroup), + ], + fused_strategy_dict={"checkness": ["mp", "sharding", "pp"]}, + ) def test_orthogonal_strategy(self): dp_group = self._strategy.strategy_group("dp") self.assertEqual(dp_group.world_size, self._num_of_ranks) self.assertEqual(dp_group.group.nranks, self._num_of_ranks) - self.assertEqual(self._strategy.rank_in_strategy("dp"), - self._global_rank) + self.assertEqual( + self._strategy.rank_in_strategy("dp"), self._global_rank + ) fused_group = self._strategy.fused_strategy_group("checkness") self.assertEqual(fused_group.world_size, 1) diff --git a/python/paddle/fluid/tests/unittests/collective/parallel_embedding_api.py b/python/paddle/fluid/tests/unittests/collective/parallel_embedding_api.py index 8b6ab746ce858abb8cb4406db6ffc29d5d7bb474..0c118441b65942731c559f597d48a8dc630473d3 100644 --- a/python/paddle/fluid/tests/unittests/collective/parallel_embedding_api.py +++ b/python/paddle/fluid/tests/unittests/collective/parallel_embedding_api.py @@ -22,7 +22,6 @@ paddle.enable_static() class TestParallelEmbeddingAPI(TestCollectiveAPIRunnerBase): - def __init__(self): self.global_ring_id = 0 @@ -36,24 +35,30 @@ class TestParallelEmbeddingAPI(TestCollectiveAPIRunnerBase): paddle.seed(2020) data_in = paddle.randint(0, size[0], shape=(10, 4)) - data = paddle.static.data(name='tindata', - shape=[10, 1000], - dtype="float32") + data = paddle.static.data( + name='tindata', shape=[10, 1000], dtype="float32" + ) per_part_size = size[0] // 2 if rank == 0: param_attr = paddle.fluid.ParamAttr( initializer=paddle.fluid.initializer.NumpyArrayInitializer( - np_array[0:per_part_size, :]), ) + np_array[0:per_part_size, :] + ), + ) else: param_attr = paddle.fluid.ParamAttr( initializer=paddle.fluid.initializer.NumpyArrayInitializer( - np_array[per_part_size:size[0], :]), ) + np_array[per_part_size : size[0], :] + ), + ) - emb_out = paddle.distributed.split(data_in, - size, - operation="embedding", - num_partitions=2, - weight_attr=param_attr) + emb_out = paddle.distributed.split( + data_in, + size, + operation="embedding", + num_partitions=2, + weight_attr=param_attr, + ) return [data_in, emb_out] diff --git a/python/paddle/fluid/tests/unittests/collective/process_group_gloo.py b/python/paddle/fluid/tests/unittests/collective/process_group_gloo.py index 4e678ef955e59e19964c25e83d6d0836825eb236..3ace517991322174397e3767101fd0e929425299 100644 --- a/python/paddle/fluid/tests/unittests/collective/process_group_gloo.py +++ b/python/paddle/fluid/tests/unittests/collective/process_group_gloo.py @@ -24,7 +24,6 @@ from paddle.fluid.dygraph.parallel import ParallelEnv class TestProcessGroupFp32(unittest.TestCase): - def setUp(self): paddle.seed(2022) random.seed(2022) @@ -40,8 +39,9 @@ class TestProcessGroupFp32(unittest.TestCase): nranks = ParallelEnv().nranks rank = ParallelEnv().local_rank is_master = True if rank == 0 else False - store = paddle.fluid.core.TCPStore("127.0.0.1", 6272, is_master, - nranks, 30) + store = paddle.fluid.core.TCPStore( + "127.0.0.1", 6272, is_master, nranks, 30 + ) place = paddle.fluid.core.CPUPlace() pg = paddle.fluid.core.ProcessGroupGloo(store, rank, nranks, place) @@ -135,8 +135,9 @@ class TestProcessGroupFp32(unittest.TestCase): task = pg.all_gather(tensor_y, tensor_out) task.wait() out_1 = paddle.slice(tensor_out, [0], [0], [out_shape[0] // 2]) - out_2 = paddle.slice(tensor_out, [0], [out_shape[0] // 2], - [out_shape[0]]) + out_2 = paddle.slice( + tensor_out, [0], [out_shape[0] // 2], [out_shape[0]] + ) assert np.array_equal(tensor_x, out_1) assert np.array_equal(tensor_y, out_2) print("test allgather api ok\n") @@ -175,8 +176,9 @@ class TestProcessGroupFp32(unittest.TestCase): task = pg.scatter(tensor_x, tensor_y, 0) task.wait() out1 = paddle.slice(tensor_x, [0], [0], [self.shape[0]]) - out2 = paddle.slice(tensor_x, [0], [self.shape[0]], - [self.shape[0] * 2]) + out2 = paddle.slice( + tensor_x, [0], [self.shape[0]], [self.shape[0] * 2] + ) if pg.rank() == 0: assert np.array_equal(tensor_y, out1) else: diff --git a/python/paddle/fluid/tests/unittests/collective/process_group_mpi.py b/python/paddle/fluid/tests/unittests/collective/process_group_mpi.py index f3ca19ebe7eae154746f088cbc4cd06eab5c98f3..998e323bfb1164cb39ad0da002993fd1102e31af 100644 --- a/python/paddle/fluid/tests/unittests/collective/process_group_mpi.py +++ b/python/paddle/fluid/tests/unittests/collective/process_group_mpi.py @@ -42,12 +42,14 @@ def init_process_group(strategy=None): place = core.CPUPlace() _set_expected_place(place) - group = Group(rank, - world_size, - id=0, - ranks=list(range(world_size)), - pg=pg, - name=_default_group_name) + group = Group( + rank, + world_size, + id=0, + ranks=list(range(world_size)), + pg=pg, + name=_default_group_name, + ) _set_group_map_by_name(_default_group_name, group) _set_group_map(gid, group) _set_group_map_backend(group, "mpi") @@ -84,15 +86,15 @@ def test_allreduce_max(pg, shape, dtype): max_result = paddle.maximum(tensor_x, tensor_y) if pg.rank() == 0: - task = dist.all_reduce(tensor_x, - dist.ReduceOp.MAX, - use_calc_stream=False) + task = dist.all_reduce( + tensor_x, dist.ReduceOp.MAX, use_calc_stream=False + ) task.wait() assert np.array_equal(tensor_x, max_result) else: - task = dist.all_reduce(tensor_y, - dist.ReduceOp.MAX, - use_calc_stream=False) + task = dist.all_reduce( + tensor_y, dist.ReduceOp.MAX, use_calc_stream=False + ) task.wait() assert np.array_equal(tensor_y, max_result) print("test allreduce max api ok") @@ -109,15 +111,15 @@ def test_allreduce_min(pg, shape, dtype): min_result = paddle.minimum(tensor_x, tensor_y) if pg.rank() == 0: - task = dist.all_reduce(tensor_x, - dist.ReduceOp.MIN, - use_calc_stream=False) + task = dist.all_reduce( + tensor_x, dist.ReduceOp.MIN, use_calc_stream=False + ) task.wait() assert np.array_equal(tensor_x, min_result) else: - task = dist.all_reduce(tensor_y, - dist.ReduceOp.MIN, - use_calc_stream=False) + task = dist.all_reduce( + tensor_y, dist.ReduceOp.MIN, use_calc_stream=False + ) task.wait() assert np.array_equal(tensor_y, min_result) print("test allreduce min api ok") @@ -134,15 +136,15 @@ def test_allreduce_prod(pg, shape, dtype): prod_result = np.multiply(x, y) if pg.rank() == 0: - task = dist.all_reduce(tensor_x, - dist.ReduceOp.PROD, - use_calc_stream=False) + task = dist.all_reduce( + tensor_x, dist.ReduceOp.PROD, use_calc_stream=False + ) task.wait() assert np.array_equal(tensor_x, prod_result) else: - task = dist.all_reduce(tensor_y, - dist.ReduceOp.PROD, - use_calc_stream=False) + task = dist.all_reduce( + tensor_y, dist.ReduceOp.PROD, use_calc_stream=False + ) task.wait() assert np.array_equal(tensor_y, prod_result) print("test allreduce prod api ok") @@ -196,7 +198,7 @@ def test_allgather(pg, shape, dtype): else: tensor_out_list = [ paddle.empty_like(tensor_x), - paddle.empty_like(tensor_x) + paddle.empty_like(tensor_x), ] task = dist.all_gather(tensor_out_list, tensor_y, use_calc_stream=False) tensor_out = paddle.concat(tensor_out_list) @@ -309,17 +311,15 @@ def test_reduce_max(pg, shape, dtype): max_result = paddle.maximum(tensor_x, tensor_y) if pg.rank() == 0: - task = dist.reduce(tensor_x, - 0, - dist.ReduceOp.MAX, - use_calc_stream=False) + task = dist.reduce( + tensor_x, 0, dist.ReduceOp.MAX, use_calc_stream=False + ) task.wait() assert np.array_equal(tensor_x, max_result) else: - task = dist.reduce(tensor_y, - 0, - dist.ReduceOp.MAX, - use_calc_stream=False) + task = dist.reduce( + tensor_y, 0, dist.ReduceOp.MAX, use_calc_stream=False + ) task.wait() print("test reduce max api ok") @@ -335,17 +335,15 @@ def test_reduce_min(pg, shape, dtype): min_result = paddle.minimum(tensor_x, tensor_y) if pg.rank() == 0: - task = dist.reduce(tensor_x, - 0, - dist.ReduceOp.MIN, - use_calc_stream=False) + task = dist.reduce( + tensor_x, 0, dist.ReduceOp.MIN, use_calc_stream=False + ) task.wait() assert np.array_equal(tensor_x, min_result) else: - task = dist.reduce(tensor_y, - 0, - dist.ReduceOp.MIN, - use_calc_stream=False) + task = dist.reduce( + tensor_y, 0, dist.ReduceOp.MIN, use_calc_stream=False + ) task.wait() print("test reduce min api ok") @@ -361,17 +359,15 @@ def test_reduce_prod(pg, shape, dtype): prod_result = np.multiply(x, y) if pg.rank() == 0: - task = dist.reduce(tensor_x, - 0, - dist.ReduceOp.PROD, - use_calc_stream=False) + task = dist.reduce( + tensor_x, 0, dist.ReduceOp.PROD, use_calc_stream=False + ) task.wait() assert np.array_equal(tensor_x, prod_result) else: - task = dist.reduce(tensor_y, - 0, - dist.ReduceOp.PROD, - use_calc_stream=False) + task = dist.reduce( + tensor_y, 0, dist.ReduceOp.PROD, use_calc_stream=False + ) task.wait() print("test reduce prod api ok") @@ -436,7 +432,6 @@ def test_send_recv(pg, sub_group, shape, dtype): class TestProcessGroup(unittest.TestCase): - def setUp(self): paddle.seed(2022) random.seed(2022) diff --git a/python/paddle/fluid/tests/unittests/collective/process_group_nccl.py b/python/paddle/fluid/tests/unittests/collective/process_group_nccl.py index aa3a9575f42ffcda41c323ab674e5554e5e1d587..ff949d8f14cf477d235efb78afd552bdab617abb 100644 --- a/python/paddle/fluid/tests/unittests/collective/process_group_nccl.py +++ b/python/paddle/fluid/tests/unittests/collective/process_group_nccl.py @@ -32,7 +32,6 @@ def init_process_group(strategy=None): class TestProcessGroupFp32(unittest.TestCase): - def setUp(self): paddle.seed(2022) random.seed(2022) @@ -45,8 +44,9 @@ class TestProcessGroupFp32(unittest.TestCase): def test_create_process_group_nccl(self): with _test_eager_guard(): - paddle.set_device('gpu:%d' % - paddle.distributed.ParallelEnv().dev_id) + paddle.set_device( + 'gpu:%d' % paddle.distributed.ParallelEnv().dev_id + ) pg = init_process_group() print("rank:", pg.rank(), "size:", pg.size(), "name:", pg.name()) @@ -81,15 +81,15 @@ class TestProcessGroupFp32(unittest.TestCase): max_result = paddle.maximum(tensor_x, tensor_y) if pg.rank() == 0: - task = dist.all_reduce(tensor_x, - dist.ReduceOp.MAX, - sync_op=False) + task = dist.all_reduce( + tensor_x, dist.ReduceOp.MAX, sync_op=False + ) task.wait() assert np.array_equal(tensor_x, max_result) else: - task = dist.all_reduce(tensor_y, - dist.ReduceOp.MAX, - sync_op=False) + task = dist.all_reduce( + tensor_y, dist.ReduceOp.MAX, sync_op=False + ) task.wait() assert np.array_equal(tensor_y, max_result) @@ -106,15 +106,15 @@ class TestProcessGroupFp32(unittest.TestCase): min_result = paddle.minimum(tensor_x, tensor_y) if pg.rank() == 0: - task = dist.all_reduce(tensor_x, - dist.ReduceOp.MIN, - sync_op=False) + task = dist.all_reduce( + tensor_x, dist.ReduceOp.MIN, sync_op=False + ) task.wait() assert np.array_equal(tensor_x, min_result) else: - task = dist.all_reduce(tensor_y, - dist.ReduceOp.MIN, - sync_op=False) + task = dist.all_reduce( + tensor_y, dist.ReduceOp.MIN, sync_op=False + ) task.wait() assert np.array_equal(tensor_y, min_result) @@ -131,15 +131,15 @@ class TestProcessGroupFp32(unittest.TestCase): prod_result = np.multiply(x, y) if pg.rank() == 0: - task = dist.all_reduce(tensor_x, - dist.ReduceOp.PROD, - sync_op=False) + task = dist.all_reduce( + tensor_x, dist.ReduceOp.PROD, sync_op=False + ) task.wait() assert np.array_equal(tensor_x, prod_result) else: - task = dist.all_reduce(tensor_y, - dist.ReduceOp.PROD, - sync_op=False) + task = dist.all_reduce( + tensor_y, dist.ReduceOp.PROD, sync_op=False + ) task.wait() assert np.array_equal(tensor_y, prod_result) @@ -196,14 +196,15 @@ class TestProcessGroupFp32(unittest.TestCase): else: tensor_out_list = [ paddle.empty_like(tensor_x), - paddle.empty_like(tensor_x) + paddle.empty_like(tensor_x), ] task = dist.all_gather(tensor_out_list, tensor_y, sync_op=False) paddle.device.cuda.synchronize() tensor_out = paddle.concat(tensor_out_list) out_1 = paddle.slice(tensor_out, [0], [0], [out_shape[0] // 2]) - out_2 = paddle.slice(tensor_out, [0], [out_shape[0] // 2], - [out_shape[0]]) + out_2 = paddle.slice( + tensor_out, [0], [out_shape[0] // 2], [out_shape[0]] + ) assert np.array_equal(tensor_x, out_1) assert np.array_equal(tensor_y, out_2) print("test allgather api ok\n") @@ -219,8 +220,9 @@ class TestProcessGroupFp32(unittest.TestCase): paddle.device.cuda.synchronize() tensor_out = paddle.concat(tensor_out_list) out_1 = paddle.slice(tensor_out, [0], [0], [out_shape[0] // 2]) - out_2 = paddle.slice(tensor_out, [0], [out_shape[0] // 2], - [out_shape[0]]) + out_2 = paddle.slice( + tensor_out, [0], [out_shape[0] // 2], [out_shape[0]] + ) assert np.array_equal(tensor_x, out_1) assert np.array_equal(tensor_y, out_2) print("test allgather api2 ok\n") @@ -235,10 +237,12 @@ class TestProcessGroupFp32(unittest.TestCase): tensor_y = paddle.to_tensor(y) tensor_out1 = paddle.to_tensor(out1) tensor_out2 = paddle.to_tensor(out2) - raw_tensor_x_2 = paddle.slice(tensor_x, [0], [self.shape[0] // 2], - [self.shape[0]]) - raw_tensor_y_1 = paddle.slice(tensor_y, [0], [0], - [self.shape[0] // 2]) + raw_tensor_x_2 = paddle.slice( + tensor_x, [0], [self.shape[0] // 2], [self.shape[0]] + ) + raw_tensor_y_1 = paddle.slice( + tensor_y, [0], [0], [self.shape[0] // 2] + ) if pg.rank() == 0: task = pg.alltoall(tensor_x, tensor_out1) task.wait() @@ -250,8 +254,9 @@ class TestProcessGroupFp32(unittest.TestCase): task = dist.alltoall([in_1, in_2], out_tensor_list) paddle.device.cuda.synchronize() tensor_out2 = paddle.concat(out_tensor_list) - out1_2 = paddle.slice(tensor_out1, [0], [self.shape[0] // 2], - [self.shape[0]]) + out1_2 = paddle.slice( + tensor_out1, [0], [self.shape[0] // 2], [self.shape[0]] + ) out2_1 = paddle.slice(tensor_out2, [0], [0], [self.shape[0] // 2]) if pg.rank() == 0: assert np.array_equal(out1_2.numpy(), raw_tensor_y_1.numpy()) @@ -267,10 +272,12 @@ class TestProcessGroupFp32(unittest.TestCase): tensor_y = paddle.to_tensor(y) tensor_out1 = paddle.to_tensor(out1) tensor_out2 = paddle.to_tensor(out2) - raw_tensor_x_2 = paddle.slice(tensor_x, [0], [self.shape[0] // 2], - [self.shape[0]]) - raw_tensor_y_1 = paddle.slice(tensor_y, [0], [0], - [self.shape[0] // 2]) + raw_tensor_x_2 = paddle.slice( + tensor_x, [0], [self.shape[0] // 2], [self.shape[0]] + ) + raw_tensor_y_1 = paddle.slice( + tensor_y, [0], [0], [self.shape[0] // 2] + ) if pg.rank() == 0: task = pg.alltoall(tensor_x, tensor_out1) task.wait() @@ -282,8 +289,9 @@ class TestProcessGroupFp32(unittest.TestCase): task = dist.alltoall([in_1, in_2], out_tensor_list) paddle.device.cuda.synchronize() tensor_out2 = paddle.concat(out_tensor_list) - out1_2 = paddle.slice(tensor_out1, [0], [self.shape[0] // 2], - [self.shape[0]]) + out1_2 = paddle.slice( + tensor_out1, [0], [self.shape[0] // 2], [self.shape[0]] + ) out2_1 = paddle.slice(tensor_out2, [0], [0], [self.shape[0] // 2]) if pg.rank() == 0: assert np.array_equal(out1_2.numpy(), raw_tensor_y_1.numpy()) @@ -321,17 +329,15 @@ class TestProcessGroupFp32(unittest.TestCase): max_result = paddle.maximum(tensor_x, tensor_y) if pg.rank() == 0: - task = dist.reduce(tensor_x, - 0, - dist.ReduceOp.MAX, - sync_op=False) + task = dist.reduce( + tensor_x, 0, dist.ReduceOp.MAX, sync_op=False + ) task.wait() assert np.array_equal(tensor_x, max_result) else: - task = dist.reduce(tensor_y, - 0, - dist.ReduceOp.MAX, - sync_op=False) + task = dist.reduce( + tensor_y, 0, dist.ReduceOp.MAX, sync_op=False + ) task.wait() print("test reduce max api ok") @@ -347,17 +353,15 @@ class TestProcessGroupFp32(unittest.TestCase): min_result = paddle.minimum(tensor_x, tensor_y) if pg.rank() == 0: - task = dist.reduce(tensor_x, - 0, - dist.ReduceOp.MIN, - sync_op=False) + task = dist.reduce( + tensor_x, 0, dist.ReduceOp.MIN, sync_op=False + ) task.wait() assert np.array_equal(tensor_x, min_result) else: - task = dist.reduce(tensor_y, - 0, - dist.ReduceOp.MIN, - sync_op=False) + task = dist.reduce( + tensor_y, 0, dist.ReduceOp.MIN, sync_op=False + ) task.wait() print("test reduce min api ok") @@ -373,17 +377,15 @@ class TestProcessGroupFp32(unittest.TestCase): prod_result = np.multiply(x, y) if pg.rank() == 0: - task = dist.reduce(tensor_x, - 0, - dist.ReduceOp.PROD, - sync_op=False) + task = dist.reduce( + tensor_x, 0, dist.ReduceOp.PROD, sync_op=False + ) task.wait() assert np.array_equal(tensor_x, prod_result) else: - task = dist.reduce(tensor_y, - 0, - dist.ReduceOp.PROD, - sync_op=False) + task = dist.reduce( + tensor_y, 0, dist.ReduceOp.PROD, sync_op=False + ) task.wait() print("test reduce prod api ok") @@ -398,7 +400,7 @@ class TestProcessGroupFp32(unittest.TestCase): if pg.rank() == 0: in_1, in_2 = paddle.split(tensor_x, 2) task = dist.scatter(tensor_y, [in_1, in_2], 0, sync_op=True) - #task.wait() + # task.wait() paddle.device.cuda.synchronize() # rank 1 else: @@ -406,8 +408,9 @@ class TestProcessGroupFp32(unittest.TestCase): task.wait() paddle.device.cuda.synchronize() out1 = paddle.slice(tensor_x, [0], [0], [self.shape[0]]) - out2 = paddle.slice(tensor_x, [0], [self.shape[0]], - [self.shape[0] * 2]) + out2 = paddle.slice( + tensor_x, [0], [self.shape[0]], [self.shape[0] * 2] + ) if pg.rank() == 0: assert np.array_equal(tensor_y, out1) else: @@ -450,7 +453,6 @@ class TestProcessGroupFp32(unittest.TestCase): class TestProcessGroupFp16(TestProcessGroupFp32): - def setUp(self): paddle.seed(2022) random.seed(2022) diff --git a/python/paddle/fluid/tests/unittests/collective/row_parallel_linear_api.py b/python/paddle/fluid/tests/unittests/collective/row_parallel_linear_api.py index 854ef9cb69e0d6e3e1a898d74017897340e4b7e9..e20f57f39ae120acf89075d098b1c17cc892b108 100644 --- a/python/paddle/fluid/tests/unittests/collective/row_parallel_linear_api.py +++ b/python/paddle/fluid/tests/unittests/collective/row_parallel_linear_api.py @@ -22,7 +22,6 @@ paddle.enable_static() class TestRowParallelLinearAPI(TestCollectiveAPIRunnerBase): - def __init__(self): self.global_ring_id = 0 @@ -32,19 +31,23 @@ class TestRowParallelLinearAPI(TestCollectiveAPIRunnerBase): np.random.seed(2020) np_array = np.random.rand(1000, 16) - data = paddle.static.data(name='tindata', - shape=[10, 1000], - dtype="float32") + data = paddle.static.data( + name='tindata', shape=[10, 1000], dtype="float32" + ) paddle.distributed.broadcast(data, src=0) data = paddle.split(data, 2, axis=1)[rank] if rank == 0: param_attr = paddle.fluid.ParamAttr( initializer=paddle.fluid.initializer.NumpyArrayInitializer( - np_array[0:500, :]), ) + np_array[0:500, :] + ), + ) else: param_attr = paddle.fluid.ParamAttr( initializer=paddle.fluid.initializer.NumpyArrayInitializer( - np_array[500:1000, :]), ) + np_array[500:1000, :] + ), + ) linear_out = paddle.distributed.split( data, diff --git a/python/paddle/fluid/tests/unittests/collective/strategy_group.py b/python/paddle/fluid/tests/unittests/collective/strategy_group.py index 247a232aecc436f826c08cb021d54d30fdedd48a..75d2995d62a9936c9f10b0b7fe1acfbffecaffd0 100644 --- a/python/paddle/fluid/tests/unittests/collective/strategy_group.py +++ b/python/paddle/fluid/tests/unittests/collective/strategy_group.py @@ -16,7 +16,13 @@ import unittest import numpy as np import paddle import paddle.distributed as dist -from paddle.distributed.fleet.base.strategy_group import StrategyGroupBase, DPGroup, MPGroup, PPGroup, ShardingGroup +from paddle.distributed.fleet.base.strategy_group import ( + StrategyGroupBase, + DPGroup, + MPGroup, + PPGroup, + ShardingGroup, +) def _check_using_all_reduce(group): @@ -39,7 +45,6 @@ def _check_using_recv(group, src): class TestStrategyGroupAPI(unittest.TestCase): - def setUp(self): self._num_of_ranks = 2 self._list_of_rank = [[0, 1]] @@ -74,7 +79,12 @@ class TestStrategyGroupAPI(unittest.TestCase): def test_pipeline_parallel_group(self): pp_group = PPGroup(self._list_of_rank) - send_next_group, send_prev_group, recv_next_group, recv_prev_group = pp_group.p2p_groups + ( + send_next_group, + send_prev_group, + recv_next_group, + recv_prev_group, + ) = pp_group.p2p_groups if self._global_rank == 0: self.assertEqual(pp_group.rank_of_next_stage, 1) self.assertEqual(pp_group.rank_of_prev_stage, 1) diff --git a/python/paddle/fluid/tests/unittests/collective/test_allreduce.py b/python/paddle/fluid/tests/unittests/collective/test_allreduce.py index d137f0f3ec4524c62b6e5d391109af6fcc07d189..19763579b1a42c4112d71fe65e984ff1388533fe 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_allreduce.py +++ b/python/paddle/fluid/tests/unittests/collective/test_allreduce.py @@ -21,7 +21,6 @@ paddle.enable_static() class TestAllReduceOp(TestDistBase): - def _setup_config(self): pass diff --git a/python/paddle/fluid/tests/unittests/collective/test_broadcast.py b/python/paddle/fluid/tests/unittests/collective/test_broadcast.py index ff171d084d3f65cdb000e474aaba6af1eff7dd1c..8aa7be89d15e7059eb1007faaefb24eb0cab8e4e 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_broadcast.py +++ b/python/paddle/fluid/tests/unittests/collective/test_broadcast.py @@ -21,7 +21,6 @@ paddle.enable_static() class TestCBroadcastOp(TestDistBase): - def _setup_config(self): pass diff --git a/python/paddle/fluid/tests/unittests/collective/test_c_concat.py b/python/paddle/fluid/tests/unittests/collective/test_c_concat.py index bc241c1e556f5dd5c496ac29a33a215cb8fa808c..c28e313744aafe359bfdeea482fc6f151b7e623e 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_c_concat.py +++ b/python/paddle/fluid/tests/unittests/collective/test_c_concat.py @@ -21,7 +21,6 @@ paddle.enable_static() class TestConcatOp(TestDistBase): - def _setup_config(self): pass diff --git a/python/paddle/fluid/tests/unittests/collective/test_c_identity.py b/python/paddle/fluid/tests/unittests/collective/test_c_identity.py index 936e2961c57c2d84a691bdd4159b8143873968ef..585c0aff0e4d4a931a602cdbec865fe017f00dd6 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_c_identity.py +++ b/python/paddle/fluid/tests/unittests/collective/test_c_identity.py @@ -21,7 +21,6 @@ paddle.enable_static() class TestIdentityOp(TestDistBase): - def _setup_config(self): pass diff --git a/python/paddle/fluid/tests/unittests/collective/test_c_split.py b/python/paddle/fluid/tests/unittests/collective/test_c_split.py index 43dc766ad24b9a6b7c93f91d20d290792787194d..b75398ca6bd76cde7b93bf1ad8d25225ef151d17 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_c_split.py +++ b/python/paddle/fluid/tests/unittests/collective/test_c_split.py @@ -21,7 +21,6 @@ paddle.enable_static() class TestSplitOp(TestDistBase): - def _setup_config(self): pass diff --git a/python/paddle/fluid/tests/unittests/collective/test_collective_allgather_api.py b/python/paddle/fluid/tests/unittests/collective/test_collective_allgather_api.py index 9040564ce1206c2e246c867259056ecff7166484..f0e9b3bba2f6090b50ed7440023f5c149c0b6837 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_collective_allgather_api.py +++ b/python/paddle/fluid/tests/unittests/collective/test_collective_allgather_api.py @@ -21,59 +21,96 @@ paddle.enable_static() class TestCollectiveAllgatherAPI(TestDistBase): - def _setup_config(self): pass def test_allgather_nccl(self): dtypes_to_test = [ - "float16", "float32", "float64", "int32", "int64", "int8", "uint8", - "bool", "complex64", "complex128" + "float16", + "float32", + "float64", + "int32", + "int64", + "int8", + "uint8", + "bool", + "complex64", + "complex128", ] for dtype in dtypes_to_test: - self.check_with_place("collective_allgather_api.py", - "allgather", - "nccl", - dtype=dtype) + self.check_with_place( + "collective_allgather_api.py", "allgather", "nccl", dtype=dtype + ) def test_allgather_gloo(self): dtypes_to_test = [ - "float16", "float32", "float64", "int32", "int64", "int8", "uint8", - "bool", "complex64", "complex128" + "float16", + "float32", + "float64", + "int32", + "int64", + "int8", + "uint8", + "bool", + "complex64", + "complex128", ] for dtype in dtypes_to_test: - self.check_with_place("collective_allgather_api.py", - "allgather", - "gloo", - "3", - dtype=dtype) + self.check_with_place( + "collective_allgather_api.py", + "allgather", + "gloo", + "3", + dtype=dtype, + ) def test_allgatther_nccl_dygraph(self): dtypes_to_test = [ - "float16", "float32", "float64", "int32", "int64", "int8", "uint8", - "bool", "complex64", "complex128" + "float16", + "float32", + "float64", + "int32", + "int64", + "int8", + "uint8", + "bool", + "complex64", + "complex128", ] if self._nccl_version >= 2100: dtypes_to_test.append("bfloat16") for dtype in dtypes_to_test: - self.check_with_place("collective_allgather_api_dygraph.py", - "allgather", - "nccl", - static_mode="0", - dtype=dtype) + self.check_with_place( + "collective_allgather_api_dygraph.py", + "allgather", + "nccl", + static_mode="0", + dtype=dtype, + ) def test_allgather_gloo_dygraph(self): dtypes_to_test = [ - "float16", "float32", "float64", "int32", "int64", "int8", "uint8", - "bool", "bfloat16", "complex64", "complex128" + "float16", + "float32", + "float64", + "int32", + "int64", + "int8", + "uint8", + "bool", + "bfloat16", + "complex64", + "complex128", ] for dtype in dtypes_to_test: - self.check_with_place("collective_allgather_api_dygraph.py", - "allgather", - "gloo", - "3", - static_mode="0", - dtype=dtype) + self.check_with_place( + "collective_allgather_api_dygraph.py", + "allgather", + "gloo", + "3", + static_mode="0", + dtype=dtype, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/test_collective_allgather_object_api.py b/python/paddle/fluid/tests/unittests/collective/test_collective_allgather_object_api.py index 90539e89c0b0e6afe17fe5f80ae3d3a2b5c53d7d..4a174010c51029a71e6d15b69d51b7d0f3534993 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_collective_allgather_object_api.py +++ b/python/paddle/fluid/tests/unittests/collective/test_collective_allgather_object_api.py @@ -17,35 +17,42 @@ import test_collective_api_base as test_base class TestCollectiveAllgatherObjectAPI(test_base.TestDistBase): - def _setup_config(self): pass def test_allgather_nccl(self): - self.check_with_place("collective_allgather_object_api_dygraph.py", - "allgather_object", - "nccl", - static_mode="0", - dtype="pylist") - self.check_with_place("collective_allgather_object_api_dygraph.py", - "allgather_object", - "nccl", - static_mode="0", - dtype="pydict") + self.check_with_place( + "collective_allgather_object_api_dygraph.py", + "allgather_object", + "nccl", + static_mode="0", + dtype="pylist", + ) + self.check_with_place( + "collective_allgather_object_api_dygraph.py", + "allgather_object", + "nccl", + static_mode="0", + dtype="pydict", + ) def test_allgather_gloo_dygraph(self): - self.check_with_place("collective_allgather_object_api_dygraph.py", - "allgather_object", - "gloo", - "3", - static_mode="0", - dtype="pylist") - self.check_with_place("collective_allgather_object_api_dygraph.py", - "allgather_object", - "gloo", - "3", - static_mode="0", - dtype="pydict") + self.check_with_place( + "collective_allgather_object_api_dygraph.py", + "allgather_object", + "gloo", + "3", + static_mode="0", + dtype="pylist", + ) + self.check_with_place( + "collective_allgather_object_api_dygraph.py", + "allgather_object", + "gloo", + "3", + static_mode="0", + dtype="pydict", + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/collective/test_collective_allreduce_api.py b/python/paddle/fluid/tests/unittests/collective/test_collective_allreduce_api.py index a5080f78bcee2dd68d566cbf35fb5d6881cc786f..6b2b9c0ade8f401cadd40af0f299bfff8fc34f09 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_collective_allreduce_api.py +++ b/python/paddle/fluid/tests/unittests/collective/test_collective_allreduce_api.py @@ -21,50 +21,69 @@ paddle.enable_static() class TestCollectiveAllreduceAPI(TestDistBase): - def _setup_config(self): pass def test_allreduce_nccl(self): if paddle.fluid.core.is_compiled_with_cuda(): - self.check_with_place("collective_allreduce_api.py", "allreduce", - "nccl") + self.check_with_place( + "collective_allreduce_api.py", "allreduce", "nccl" + ) def test_allreduce_bkcl(self): if paddle.fluid.core.is_compiled_with_xpu(): - self.check_with_place("collective_allreduce_api.py", "allreduce", - "bkcl") + self.check_with_place( + "collective_allreduce_api.py", "allreduce", "bkcl" + ) def test_allreduce_gloo(self): - self.check_with_place("collective_allreduce_api.py", "allreduce", - "gloo", "2") + self.check_with_place( + "collective_allreduce_api.py", "allreduce", "gloo", "2" + ) def test_allreduce_nccl_dygraph(self): dtypes_to_test = [ - "float16", "float32", "float64", "int32", "int64", "int8", "uint8", - "bool" + "float16", + "float32", + "float64", + "int32", + "int64", + "int8", + "uint8", + "bool", ] if self._nccl_version >= 2100: dtypes_to_test.append("bfloat16") for dtype in dtypes_to_test: - self.check_with_place("collective_allreduce_api_dygraph.py", - "allreduce", - "nccl", - static_mode="0", - dtype=dtype) + self.check_with_place( + "collective_allreduce_api_dygraph.py", + "allreduce", + "nccl", + static_mode="0", + dtype=dtype, + ) def test_allreduce_gloo_dygraph(self): dtypes_to_test = [ - "float16", "float32", "float64", "int32", "int64", "int8", "uint8", - "bool", "bfloat16" + "float16", + "float32", + "float64", + "int32", + "int64", + "int8", + "uint8", + "bool", + "bfloat16", ] for dtype in dtypes_to_test: - self.check_with_place("collective_allreduce_api_dygraph.py", - "allreduce", - "gloo", - "2", - static_mode="0", - dtype=dtype) + self.check_with_place( + "collective_allreduce_api_dygraph.py", + "allreduce", + "gloo", + "2", + static_mode="0", + dtype=dtype, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/test_collective_alltoall_api.py b/python/paddle/fluid/tests/unittests/collective/test_collective_alltoall_api.py index 1edb06ae512d69e8f88e678d38cfde7f8cc246d9..acbbdec2ca89f9d4f1cd785ec6d751824aa49143 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_collective_alltoall_api.py +++ b/python/paddle/fluid/tests/unittests/collective/test_collective_alltoall_api.py @@ -21,7 +21,6 @@ paddle.enable_static() class TestCollectiveAllToAllAPI(TestDistBase): - def _setup_config(self): pass @@ -30,17 +29,25 @@ class TestCollectiveAllToAllAPI(TestDistBase): def test_alltoall_nccl_dygraph(self): dtypes_to_test = [ - "float16", "float32", "float64", "int32", "int64", "int8", "uint8", - "bool" + "float16", + "float32", + "float64", + "int32", + "int64", + "int8", + "uint8", + "bool", ] if self._nccl_version >= 2100: dtypes_to_test.append("bfloat16") for dtype in dtypes_to_test: - self.check_with_place("collective_alltoall_api_dygraph.py", - "alltoall", - "nccl", - static_mode="0", - dtype=dtype) + self.check_with_place( + "collective_alltoall_api_dygraph.py", + "alltoall", + "nccl", + static_mode="0", + dtype=dtype, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/test_collective_alltoall_single.py b/python/paddle/fluid/tests/unittests/collective/test_collective_alltoall_single.py index 9ef0a7639e18b449d4e8bf2079b5026294361691..6915f07e8fa3ae16b9e8970f034699ee5c1d58d0 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_collective_alltoall_single.py +++ b/python/paddle/fluid/tests/unittests/collective/test_collective_alltoall_single.py @@ -19,7 +19,6 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus class TestCollectiveAllToAllSingle(TestMultipleGpus): - def test_collective_alltoall_single(self): self.run_mnist_2gpu('collective_alltoall_single.py', eager_mode=True) diff --git a/python/paddle/fluid/tests/unittests/collective/test_collective_alltoall_single_api.py b/python/paddle/fluid/tests/unittests/collective/test_collective_alltoall_single_api.py index e3ef3f302f33e8e52d10e66352728c2ccbf89789..26917d8aba5db6d535d88998ad42aec9e126fd17 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_collective_alltoall_single_api.py +++ b/python/paddle/fluid/tests/unittests/collective/test_collective_alltoall_single_api.py @@ -17,23 +17,30 @@ import test_collective_api_base as test_base class TestCollectiveAllToAllSingleAPI(test_base.TestDistBase): - def _setup_config(self): pass def test_alltooall_single_nccl_dygraph(self): dtypes_to_test = [ - "float16", "float32", "float64", "int32", "int64", "int8", "uint8", - "bool" + "float16", + "float32", + "float64", + "int32", + "int64", + "int8", + "uint8", + "bool", ] if self._nccl_version >= 2100: dtypes_to_test.append("bfloat16") for dtype in dtypes_to_test: - self.check_with_place("collective_alltoall_single_api_dygraph.py", - "alltoall", - "nccl", - static_mode="0", - dtype=dtype) + self.check_with_place( + "collective_alltoall_single_api_dygraph.py", + "alltoall", + "nccl", + static_mode="0", + dtype=dtype, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/test_collective_barrier_api.py b/python/paddle/fluid/tests/unittests/collective/test_collective_barrier_api.py index 8a883ce989c184cc09c8b137c1774f63daa079e9..daa23ff7a57dcb5231fb86e589ea11826f6c9602 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_collective_barrier_api.py +++ b/python/paddle/fluid/tests/unittests/collective/test_collective_barrier_api.py @@ -21,7 +21,6 @@ paddle.enable_static() class TestCollectiveBarrierAPI(TestDistBase): - def _setup_config(self): pass @@ -29,8 +28,9 @@ class TestCollectiveBarrierAPI(TestDistBase): self.check_with_place("collective_barrier_api.py", "barrier", "nccl") def test_barrier_gloo(self): - self.check_with_place("collective_barrier_api.py", "barrier", "gloo", - "5") + self.check_with_place( + "collective_barrier_api.py", "barrier", "gloo", "5" + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/collective/test_collective_batch_isend_irecv.py b/python/paddle/fluid/tests/unittests/collective/test_collective_batch_isend_irecv.py index e9fad9aa1c1f695f3cd09cd00958a24d8f5030cc..892e813660e284f2fc35e330d908fde8cb625fd3 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_collective_batch_isend_irecv.py +++ b/python/paddle/fluid/tests/unittests/collective/test_collective_batch_isend_irecv.py @@ -19,7 +19,6 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus class TestCollectiveBatchIsendIrecv(TestMultipleGpus): - def test_collective_batch_isend_irecv(self): self.run_mnist_2gpu('collective_batch_isend_irecv.py', eager_mode=True) diff --git a/python/paddle/fluid/tests/unittests/collective/test_collective_broadcast_api.py b/python/paddle/fluid/tests/unittests/collective/test_collective_broadcast_api.py index 8f4e747b622eba8fe8857b4c76f76744a91a9990..045d1eb31762865d4f48e221279c983a3e1b9561 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_collective_broadcast_api.py +++ b/python/paddle/fluid/tests/unittests/collective/test_collective_broadcast_api.py @@ -21,44 +21,62 @@ paddle.enable_static() class TestCollectiveBroadcastAPI(TestDistBase): - def _setup_config(self): pass def test_broadcast_nccl(self): - self.check_with_place("collective_broadcast_api.py", "broadcast", - "nccl") + self.check_with_place( + "collective_broadcast_api.py", "broadcast", "nccl" + ) def test_broadcast_gloo(self): - self.check_with_place("collective_broadcast_api.py", "broadcast", - "gloo", "0") + self.check_with_place( + "collective_broadcast_api.py", "broadcast", "gloo", "0" + ) def test_broadcast_nccl_dygraph(self): dtypes_to_test = [ - "float16", "float32", "float64", "int32", "int64", "int8", "uint8", - "bool" + "float16", + "float32", + "float64", + "int32", + "int64", + "int8", + "uint8", + "bool", ] if self._nccl_version >= 2100: dtypes_to_test.append("bfloat16") for dtype in dtypes_to_test: - self.check_with_place("collective_broadcast_api_dygraph.py", - "broadcast", - "nccl", - static_mode="0", - dtype=dtype) + self.check_with_place( + "collective_broadcast_api_dygraph.py", + "broadcast", + "nccl", + static_mode="0", + dtype=dtype, + ) def test_broadcast_gloo_dygraph(self): dtypes_to_test = [ - "float16", "float32", "float64", "int32", "int64", "int8", "uint8", - "bool", "bfloat16" + "float16", + "float32", + "float64", + "int32", + "int64", + "int8", + "uint8", + "bool", + "bfloat16", ] for dtype in dtypes_to_test: - self.check_with_place("collective_broadcast_api_dygraph.py", - "broadcast", - "gloo", - "0", - static_mode="0", - dtype=dtype) + self.check_with_place( + "collective_broadcast_api_dygraph.py", + "broadcast", + "gloo", + "0", + static_mode="0", + dtype=dtype, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/test_collective_cpu_barrier_with_gloo.py b/python/paddle/fluid/tests/unittests/collective/test_collective_cpu_barrier_with_gloo.py index 157e7106308ab1fd138e7ccf920b061204c615d3..6049ccbd5d1cf2c1812a4711704fd242ba897045 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_collective_cpu_barrier_with_gloo.py +++ b/python/paddle/fluid/tests/unittests/collective/test_collective_cpu_barrier_with_gloo.py @@ -26,12 +26,11 @@ paddle.enable_static() class CollectiveCPUBarrierWithGlooTest(unittest.TestCase): - def find_free_port(self): - def _free_port(): - with closing(socket.socket(socket.AF_INET, - socket.SOCK_STREAM)) as s: + with closing( + socket.socket(socket.AF_INET, socket.SOCK_STREAM) + ) as s: s.bind(('', 0)) return s.getsockname()[1] @@ -43,8 +42,9 @@ class CollectiveCPUBarrierWithGlooTest(unittest.TestCase): def barrier_func(self, id, rank_num, server_endpoint, out_dict, sleep_time): try: - paddle.distributed.gloo_init_parallel_env(id, rank_num, - server_endpoint) + paddle.distributed.gloo_init_parallel_env( + id, rank_num, server_endpoint + ) # 1st barrier # Run barrier to synchronize processes after starting paddle.distributed.gloo_barrier() @@ -52,7 +52,7 @@ class CollectiveCPUBarrierWithGlooTest(unittest.TestCase): # Let rank 0 sleep for one second and check that all processes # saw that artificial delay through the barrier start = time.time() - if (id == 0): + if id == 0: time.sleep(sleep_time) paddle.distributed.gloo_barrier() end = time.time() @@ -66,8 +66,9 @@ class CollectiveCPUBarrierWithGlooTest(unittest.TestCase): try: main_prog = fluid.Program() startup_prog = fluid.Program() - paddle.distributed.gloo_init_parallel_env(id, rank_num, - server_endpoint) + paddle.distributed.gloo_init_parallel_env( + id, rank_num, server_endpoint + ) place = fluid.CPUPlace() with fluid.program_guard(main_prog, startup_prog): paddle.distributed.barrier() @@ -77,7 +78,7 @@ class CollectiveCPUBarrierWithGlooTest(unittest.TestCase): # Let rank 0 sleep for one second and check that all processes # saw that artificial delay through the barrier start = time.time() - if (id == 0): + if id == 0: time.sleep(sleep_time) exe.run(main_prog) end = time.time() @@ -97,9 +98,10 @@ class CollectiveCPUBarrierWithGlooTest(unittest.TestCase): procs_out_dict = manager.dict() jobs = [] for id in range(num_of_ranks): - p = multiprocessing.Process(target=self.barrier_func, - args=(id, num_of_ranks, ep_str, - procs_out_dict, sleep_time)) + p = multiprocessing.Process( + target=self.barrier_func, + args=(id, num_of_ranks, ep_str, procs_out_dict, sleep_time), + ) jobs.append(p) p.start() for proc in jobs: @@ -117,9 +119,10 @@ class CollectiveCPUBarrierWithGlooTest(unittest.TestCase): procs_out_dict = manager.dict() jobs = [] for id in range(num_of_ranks): - p = multiprocessing.Process(target=self.barrier_op, - args=(id, num_of_ranks, ep_str, - procs_out_dict, sleep_time)) + p = multiprocessing.Process( + target=self.barrier_op, + args=(id, num_of_ranks, ep_str, procs_out_dict, sleep_time), + ) jobs.append(p) p.start() for proc in jobs: diff --git a/python/paddle/fluid/tests/unittests/collective/test_collective_global_gather.py b/python/paddle/fluid/tests/unittests/collective/test_collective_global_gather.py index 85406dd00bb5a9024b8c448ab4d75e3629ed53ec..cc9dbfc10e38b15d05811b8979a4f32174f02420 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_collective_global_gather.py +++ b/python/paddle/fluid/tests/unittests/collective/test_collective_global_gather.py @@ -19,28 +19,32 @@ from test_collective_api_base import TestDistBase class TestCollectiveGlobalGatherAPI(TestDistBase): - def _setup_config(self): pass def test_global_gather_nccl(self): paddle.enable_static() - self.check_with_place("collective_global_gather.py", "global_gather", - "nccl") + self.check_with_place( + "collective_global_gather.py", "global_gather", "nccl" + ) def test_global_gather_nccl_dygraph(self): - self.check_with_place("collective_global_gather_dygraph.py", - "global_gather", - "nccl", - static_mode="0", - eager_mode=False) + self.check_with_place( + "collective_global_gather_dygraph.py", + "global_gather", + "nccl", + static_mode="0", + eager_mode=False, + ) def test_global_gather_nccl_dygraph_eager(self): - self.check_with_place("collective_global_gather_dygraph.py", - "global_gather", - "nccl", - static_mode="0", - eager_mode=True) + self.check_with_place( + "collective_global_gather_dygraph.py", + "global_gather", + "nccl", + static_mode="0", + eager_mode=True, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/collective/test_collective_global_scatter.py b/python/paddle/fluid/tests/unittests/collective/test_collective_global_scatter.py index c2668c62047def3d060b57577773f33329ba441f..2b83f136c1e94afe7bf41361e347c9f50bee6fdb 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_collective_global_scatter.py +++ b/python/paddle/fluid/tests/unittests/collective/test_collective_global_scatter.py @@ -19,28 +19,32 @@ from test_collective_api_base import TestDistBase class TestCollectiveSelectScatterAPI(TestDistBase): - def _setup_config(self): pass def test_global_scatter_nccl(self): paddle.enable_static() - self.check_with_place("collective_global_scatter.py", "global_scatter", - "nccl") + self.check_with_place( + "collective_global_scatter.py", "global_scatter", "nccl" + ) def test_global_scatter_nccl_dygraph(self): - self.check_with_place("collective_global_scatter_dygraph.py", - "global_scatter", - "nccl", - static_mode="0", - eager_mode=False) + self.check_with_place( + "collective_global_scatter_dygraph.py", + "global_scatter", + "nccl", + static_mode="0", + eager_mode=False, + ) def test_global_scatter_nccl_dygraph_eager(self): - self.check_with_place("collective_global_scatter_dygraph.py", - "global_scatter", - "nccl", - static_mode="0", - eager_mode=True) + self.check_with_place( + "collective_global_scatter_dygraph.py", + "global_scatter", + "nccl", + static_mode="0", + eager_mode=True, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/collective/test_collective_isend_irecv_api.py b/python/paddle/fluid/tests/unittests/collective/test_collective_isend_irecv_api.py index 2b0727cae0c8e9c82906de31abf392c170309733..d313fc5a35eb6d6d0fa42aac32aeace7133c3244 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_collective_isend_irecv_api.py +++ b/python/paddle/fluid/tests/unittests/collective/test_collective_isend_irecv_api.py @@ -17,23 +17,30 @@ import test_collective_api_base as test_base class TestCollectiveIsendIrecvAPI(test_base.TestDistBase): - def _setup_config(self): pass def test_isend_irecv_nccl_dygraph(self): dtypes_to_test = [ - "float16", "float32", "float64", "int32", "int64", "int8", "uint8", - "bool" + "float16", + "float32", + "float64", + "int32", + "int64", + "int8", + "uint8", + "bool", ] if self._nccl_version >= 2100: dtypes_to_test.append("bfloat16") for dtype in dtypes_to_test: - self.check_with_place("collective_isend_irecv_api_dygraph.py", - "sendrecv", - "nccl", - static_mode="0", - dtype=dtype) + self.check_with_place( + "collective_isend_irecv_api_dygraph.py", + "sendrecv", + "nccl", + static_mode="0", + dtype=dtype, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/test_collective_optimizer.py b/python/paddle/fluid/tests/unittests/collective/test_collective_optimizer.py index 3e4920f8f4f9e8dbb0ae0db9632eb60e4528d1a9..98c4b10466dcd7d600875c4125b3d2daedd3becb 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_collective_optimizer.py +++ b/python/paddle/fluid/tests/unittests/collective/test_collective_optimizer.py @@ -26,11 +26,13 @@ import unittest import paddle.fluid as fluid -from paddle.fluid.incubate.fleet.collective import CollectiveOptimizer, DistributedStrategy +from paddle.fluid.incubate.fleet.collective import ( + CollectiveOptimizer, + DistributedStrategy, +) class CollectiveOptimizerTest(unittest.TestCase): - def test_ds_as_None(self): optimizer = fluid.optimizer.AdamOptimizer() dist_optimizer = CollectiveOptimizer(optimizer, strategy=None) @@ -40,8 +42,9 @@ class CollectiveOptimizerTest(unittest.TestCase): dist_strategy = DistributedStrategy() dist_strategy.forward_recompute = True dist_strategy.recompute_checkpoints = "NoneListTest" - self.assertRaises(ValueError, CollectiveOptimizer, optimizer, - dist_strategy) + self.assertRaises( + ValueError, CollectiveOptimizer, optimizer, dist_strategy + ) dist_strategy.recompute_checkpoints = [] dist_optimizer = CollectiveOptimizer(optimizer, dist_strategy) self.assertRaises(ValueError, dist_optimizer.minimize, None) @@ -58,7 +61,8 @@ class CollectiveOptimizerTest(unittest.TestCase): def test_amp_strategy(self): optimizer = fluid.optimizer.AdamOptimizer() optimizer = fluid.contrib.mixed_precision.decorate( - optimizer, init_loss_scaling=1.0, use_dynamic_loss_scaling=True) + optimizer, init_loss_scaling=1.0, use_dynamic_loss_scaling=True + ) dist_strategy = DistributedStrategy() dist_strategy.use_amp = True dist_optimizer = CollectiveOptimizer(optimizer, strategy=dist_strategy) diff --git a/python/paddle/fluid/tests/unittests/collective/test_collective_process_group.py b/python/paddle/fluid/tests/unittests/collective/test_collective_process_group.py index 8fe1090e1fff79bea22cbe5a5a8e7ec8424cfae3..fdefe6bda4c8efaa96c4ffb1494d76678448c28b 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_collective_process_group.py +++ b/python/paddle/fluid/tests/unittests/collective/test_collective_process_group.py @@ -17,7 +17,6 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus class TestProcessGroup(TestMultipleGpus): - def test_process_group_nccl(self): self.run_mnist_2gpu('process_group_nccl.py') diff --git a/python/paddle/fluid/tests/unittests/collective/test_collective_reduce.py b/python/paddle/fluid/tests/unittests/collective/test_collective_reduce.py index e5cc5441097507eba46ce057ddacce1e7fcc21a4..96033e2e30763f6d226d982f89a37ddf05775d4e 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_collective_reduce.py +++ b/python/paddle/fluid/tests/unittests/collective/test_collective_reduce.py @@ -21,7 +21,6 @@ paddle.enable_static() class TestCReduceOp(TestDistBase): - def _setup_config(self): pass diff --git a/python/paddle/fluid/tests/unittests/collective/test_collective_reduce_api.py b/python/paddle/fluid/tests/unittests/collective/test_collective_reduce_api.py index 35bff97f91619ee463d30dedc116a1ed0db27615..ae946d99750168f514a7be5e442af7f4a1852676 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_collective_reduce_api.py +++ b/python/paddle/fluid/tests/unittests/collective/test_collective_reduce_api.py @@ -21,7 +21,6 @@ paddle.enable_static() class TestCollectiveReduceAPI(TestDistBase): - def _setup_config(self): pass @@ -38,30 +37,47 @@ class TestCollectiveReduceAPI(TestDistBase): def test_reduce_nccl_dygraph(self): dtypes_to_test = [ - "float16", "float32", "float64", "int32", "int64", "int8", "uint8", - "bool" + "float16", + "float32", + "float64", + "int32", + "int64", + "int8", + "uint8", + "bool", ] if self._nccl_version >= 2100: dtypes_to_test.append("bfloat16") for dtype in dtypes_to_test: - self.check_with_place("collective_reduce_api_dygraph.py", - "reduce", - "nccl", - static_mode="0", - dtype=dtype) + self.check_with_place( + "collective_reduce_api_dygraph.py", + "reduce", + "nccl", + static_mode="0", + dtype=dtype, + ) def test_reduce_gloo_dygraph(self): dtypes_to_test = [ - "float16", "float32", "float64", "int32", "int64", "int8", "uint8", - "bool", "bfloat16" + "float16", + "float32", + "float64", + "int32", + "int64", + "int8", + "uint8", + "bool", + "bfloat16", ] for dtype in dtypes_to_test: - self.check_with_place("collective_reduce_api_dygraph.py", - "reduce", - "gloo", - "1", - static_mode="0", - dtype=dtype) + self.check_with_place( + "collective_reduce_api_dygraph.py", + "reduce", + "gloo", + "1", + static_mode="0", + dtype=dtype, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/test_collective_reduce_scatter.py b/python/paddle/fluid/tests/unittests/collective/test_collective_reduce_scatter.py index a9151e1466539509b3a2895ff501975145df329b..634a7ecee0c3adfe95dc63ce6553f6acd45938af 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_collective_reduce_scatter.py +++ b/python/paddle/fluid/tests/unittests/collective/test_collective_reduce_scatter.py @@ -19,7 +19,6 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus class TestCollectiveReduceScatter(TestMultipleGpus): - def test_collective_reduce_scatter(self): self.run_mnist_2gpu('collective_reduce_scatter.py', eager_mode=True) diff --git a/python/paddle/fluid/tests/unittests/collective/test_collective_reduce_scatter_api.py b/python/paddle/fluid/tests/unittests/collective/test_collective_reduce_scatter_api.py index 669478f58a37dd2c7dbc0f457c1ab414fe00ccda..26dfbd2fe13bbc73ac193d708ef4263c0ad201d0 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_collective_reduce_scatter_api.py +++ b/python/paddle/fluid/tests/unittests/collective/test_collective_reduce_scatter_api.py @@ -17,23 +17,30 @@ import test_collective_api_base as test_base class TestCollectiveReduceScatterAPI(test_base.TestDistBase): - def _setup_config(self): pass def test_reduce_scatter_nccl_dygraph(self): dtypes_to_test = [ - "float16", "float32", "float64", "int32", "int64", "int8", "uint8", - "bool" + "float16", + "float32", + "float64", + "int32", + "int64", + "int8", + "uint8", + "bool", ] if self._nccl_version >= 2100: dtypes_to_test.append("bfloat16") for dtype in dtypes_to_test: - self.check_with_place("collective_reduce_scatter_api_dygraph.py", - "reduce_scatter", - "nccl", - static_mode="0", - dtype=dtype) + self.check_with_place( + "collective_reduce_scatter_api_dygraph.py", + "reduce_scatter", + "nccl", + static_mode="0", + dtype=dtype, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/test_collective_scatter.py b/python/paddle/fluid/tests/unittests/collective/test_collective_scatter.py index e4d26459967dc78e94f32b682fb6856c2d42193c..1f36e5928ed8c8f6e66080939aaace42c81b3671 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_collective_scatter.py +++ b/python/paddle/fluid/tests/unittests/collective/test_collective_scatter.py @@ -21,7 +21,6 @@ paddle.enable_static() class TestCScatterOp(TestDistBase): - def _setup_config(self): pass diff --git a/python/paddle/fluid/tests/unittests/collective/test_collective_scatter_api.py b/python/paddle/fluid/tests/unittests/collective/test_collective_scatter_api.py index ab7de7975feed8252322dbead74cb40b6b053cdd..f421e2ccccf3b7d154a58c46381a69b557a9611d 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_collective_scatter_api.py +++ b/python/paddle/fluid/tests/unittests/collective/test_collective_scatter_api.py @@ -21,43 +21,60 @@ paddle.enable_static() class TestCollectiveScatterAPI(TestDistBase): - def _setup_config(self): pass def test_scatter_gloo(self): - self.check_with_place("collective_scatter_api.py", "scatter", "gloo", - "4") + self.check_with_place( + "collective_scatter_api.py", "scatter", "gloo", "4" + ) def test_scatter_nccl(self): self.check_with_place("collective_scatter_api.py", "scatter", "nccl") def test_scatter_nccl_dygraph(self): dtypes_to_test = [ - "float16", "float32", "float64", "int32", "int64", "int8", "uint8", - "bool" + "float16", + "float32", + "float64", + "int32", + "int64", + "int8", + "uint8", + "bool", ] if self._nccl_version >= 2100: dtypes_to_test.append("bfloat16") for dtype in dtypes_to_test: - self.check_with_place("collective_scatter_api_dygraph.py", - "scatter", - "nccl", - static_mode="0", - dtype=dtype) + self.check_with_place( + "collective_scatter_api_dygraph.py", + "scatter", + "nccl", + static_mode="0", + dtype=dtype, + ) def test_scatter_gloo_dygraph(self): dtypes_to_test = [ - "float16", "float32", "float64", "int32", "int64", "int8", "uint8", - "bool", "bfloat16" + "float16", + "float32", + "float64", + "int32", + "int64", + "int8", + "uint8", + "bool", + "bfloat16", ] for dtype in dtypes_to_test: - self.check_with_place("collective_scatter_api_dygraph.py", - "scatter", - "gloo", - "4", - static_mode="0", - dtype=dtype) + self.check_with_place( + "collective_scatter_api_dygraph.py", + "scatter", + "gloo", + "4", + static_mode="0", + dtype=dtype, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/test_collective_sendrecv.py b/python/paddle/fluid/tests/unittests/collective/test_collective_sendrecv.py index 8f1467cf652032bedd4ba87ec11bbd5744147a60..aa558a73a201e231d101bb1fd05f51ca026a5ab5 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_collective_sendrecv.py +++ b/python/paddle/fluid/tests/unittests/collective/test_collective_sendrecv.py @@ -21,7 +21,6 @@ paddle.enable_static() class TestSendRecvOp(TestDistBase): - def _setup_config(self): pass @@ -29,12 +28,14 @@ class TestSendRecvOp(TestDistBase): self.check_with_place("collective_sendrecv_op.py", "sendrecv") def test_sendrecv_dynamic_shape(self): - self.check_with_place("collective_sendrecv_op_dynamic_shape.py", - "sendrecv_dynamic_shape") + self.check_with_place( + "collective_sendrecv_op_dynamic_shape.py", "sendrecv_dynamic_shape" + ) def test_sendrecv_array(self): - self.check_with_place("collective_sendrecv_op_array.py", - "sendrecv_array") + self.check_with_place( + "collective_sendrecv_op_array.py", "sendrecv_array" + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/collective/test_collective_sendrecv_api.py b/python/paddle/fluid/tests/unittests/collective/test_collective_sendrecv_api.py index 3db6df5d46e19f9011d885238beb7cecf86cf392..72bd44666ff081fee5f9a7fc07ba08588fcf8546 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_collective_sendrecv_api.py +++ b/python/paddle/fluid/tests/unittests/collective/test_collective_sendrecv_api.py @@ -21,28 +21,35 @@ paddle.enable_static() class TestCollectiveSendRecvAPI(TestDistBase): - def _setup_config(self): pass - #def test_sendrecv_nccl(self): + # def test_sendrecv_nccl(self): # if paddle.fluid.core.is_compiled_with_cuda(): # self.check_with_place("collective_sendrecv_api.py", "sendrecv", # "nccl") def test_sendrecv_nccl_dygraph(self): dtypes_to_test = [ - "float16", "float32", "float64", "int32", "int64", "int8", "uint8", - "bool" + "float16", + "float32", + "float64", + "int32", + "int64", + "int8", + "uint8", + "bool", ] if self._nccl_version >= 2100: dtypes_to_test.append("bfloat16") for dtype in dtypes_to_test: - self.check_with_place("collective_sendrecv_api_dygraph.py", - "sendrecv", - "nccl", - static_mode="0", - dtype=dtype) + self.check_with_place( + "collective_sendrecv_api_dygraph.py", + "sendrecv", + "nccl", + static_mode="0", + dtype=dtype, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/collective/test_collective_split_col_linear.py b/python/paddle/fluid/tests/unittests/collective/test_collective_split_col_linear.py index c2983d3ea79930d7f36bcd5b36c61f9f843c7d3b..ee5b334bf2f7973565f6508d51dd6becb05cecb3 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_collective_split_col_linear.py +++ b/python/paddle/fluid/tests/unittests/collective/test_collective_split_col_linear.py @@ -21,13 +21,13 @@ paddle.enable_static() class TestColParallelLinearAPI(TestDistBase): - def _setup_config(self): pass def test_col_parallel_linear(self): - self.check_with_place("column_parallel_linear_api.py", - "column_parallel_linear", "nccl") + self.check_with_place( + "column_parallel_linear_api.py", "column_parallel_linear", "nccl" + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/collective/test_collective_split_embedding.py b/python/paddle/fluid/tests/unittests/collective/test_collective_split_embedding.py index fbab3e68cea4e28efb2e0f67c6dd20efa0ddbd93..5b7e6dc7d73607c336bcc28ff824275821291b08 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_collective_split_embedding.py +++ b/python/paddle/fluid/tests/unittests/collective/test_collective_split_embedding.py @@ -21,13 +21,13 @@ paddle.enable_static() class TestParallelEmbeddingAPI(TestDistBase): - def _setup_config(self): pass def test_parallel_embedding(self): - self.check_with_place("parallel_embedding_api.py", "parallel_embedding", - "nccl") + self.check_with_place( + "parallel_embedding_api.py", "parallel_embedding", "nccl" + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/collective/test_collective_split_embedding_none_divisible.py b/python/paddle/fluid/tests/unittests/collective/test_collective_split_embedding_none_divisible.py index b5888390bcae3aaf723dd678b10335af2bc05b7c..4cf5a59f153352f40f8a968694cfb6015d80a4ff 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_collective_split_embedding_none_divisible.py +++ b/python/paddle/fluid/tests/unittests/collective/test_collective_split_embedding_none_divisible.py @@ -20,15 +20,14 @@ paddle.enable_static() class TestCollectiveSplitAssert(unittest.TestCase): - def network(self): fleet.init() - data = paddle.static.data(name='tindata', - shape=[10, 1000], - dtype="float32") - emb_out = paddle.distributed.split(data, (7, 8), - operation="embedding", - num_partitions=2) + data = paddle.static.data( + name='tindata', shape=[10, 1000], dtype="float32" + ) + emb_out = paddle.distributed.split( + data, (7, 8), operation="embedding", num_partitions=2 + ) def test_assert(self): with self.assertRaises(AssertionError): diff --git a/python/paddle/fluid/tests/unittests/collective/test_collective_split_row_linear.py b/python/paddle/fluid/tests/unittests/collective/test_collective_split_row_linear.py index c89b1cc7a065db2adcbd0825cff3bd721bb488d9..8253208e8e57ba00a40bf14555f5f27a1b0b7b6c 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_collective_split_row_linear.py +++ b/python/paddle/fluid/tests/unittests/collective/test_collective_split_row_linear.py @@ -21,13 +21,13 @@ paddle.enable_static() class TestRowParallelLinearAPI(TestDistBase): - def _setup_config(self): pass def test_row_parallel_linear(self): - self.check_with_place("row_parallel_linear_api.py", - "row_parallel_linear", "nccl") + self.check_with_place( + "row_parallel_linear_api.py", "row_parallel_linear", "nccl" + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/collective/test_collective_wait.py b/python/paddle/fluid/tests/unittests/collective/test_collective_wait.py index cf4bcd98fa0bc002a2814243e1bb558757537b21..934279b7622b44f0e5e66943056ad0607150ce67 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_collective_wait.py +++ b/python/paddle/fluid/tests/unittests/collective/test_collective_wait.py @@ -21,14 +21,13 @@ paddle.enable_static() class TestCWaitOp(TestDistBase): - def _setup_config(self): pass def test_allreduce_wait(self): - self.check_with_place("collective_allreduce_op_wait.py", - "allreduce", - check_error_log=True) + self.check_with_place( + "collective_allreduce_op_wait.py", "allreduce", check_error_log=True + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/collective/test_communication_api_base.py b/python/paddle/fluid/tests/unittests/collective/test_communication_api_base.py index 22edac0015535060f897cd5b1b451d99aef94a64..ee4601972f0dd725b7548860ad130fc5c98bfcca 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_communication_api_base.py +++ b/python/paddle/fluid/tests/unittests/collective/test_communication_api_base.py @@ -22,7 +22,6 @@ import shutil class CommunicationTestDistBase(unittest.TestCase): - def setUp(self, save_log_dir=None, num_of_devices=2, timeout=120): self._python_interp = sys.executable self._save_log_dir = save_log_dir @@ -41,18 +40,24 @@ class CommunicationTestDistBase(unittest.TestCase): start_command_list = start_command.strip().split() try: - self._launcher = subprocess.run(start_command_list, - env=runtime_envs, - timeout=self._timeout, - check=True) + self._launcher = subprocess.run( + start_command_list, + env=runtime_envs, + timeout=self._timeout, + check=True, + ) except subprocess.TimeoutExpired as err: raise TimeoutError( - "Timeout while running command {}, try to set a longer period, {} is not enough." - .format(err.cmd, err.timeout)) + "Timeout while running command {}, try to set a longer period, {} is not enough.".format( + err.cmd, err.timeout + ) + ) except subprocess.CalledProcessError as err: raise RuntimeError( - "Error occurs when running this test case. The return code of command {} is {}" - .format(err.cmd, err.returncode)) + "Error occurs when running this test case. The return code of command {} is {}".format( + err.cmd, err.returncode + ) + ) def tearDown(self): if self._save_log_dir: @@ -63,7 +68,8 @@ class CommunicationTestDistBase(unittest.TestCase): shutil.copytree(self._log_dir.name, dir_name) else: raise RuntimeError( - "Directory {} exists, failed to save log.".format(dir_name)) + "Directory {} exists, failed to save log.".format(dir_name) + ) def gen_product_envs_list(default_envs, changeable_envs): diff --git a/python/paddle/fluid/tests/unittests/collective/test_communication_stream_allgather_api.py b/python/paddle/fluid/tests/unittests/collective/test_communication_stream_allgather_api.py index 306248a03354249f17ddf4624861e62c4c52dc17..d9d22dca3d161a72fae8e04aa3c1cf6aa7602e3a 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_communication_stream_allgather_api.py +++ b/python/paddle/fluid/tests/unittests/collective/test_communication_stream_allgather_api.py @@ -17,29 +17,32 @@ import test_communication_api_base as test_base class TestCommunicationStreamAllgatherAPI(test_base.CommunicationTestDistBase): - def setUp(self): - super(TestCommunicationStreamAllgatherAPI, self).setUp(num_of_devices=2, - timeout=120) + super(TestCommunicationStreamAllgatherAPI, self).setUp( + num_of_devices=2, timeout=120 + ) self._default_envs = { "backend": "nccl", "shape": "(100, 200)", "dtype": "float32", - "seeds": str(self._seeds) + "seeds": str(self._seeds), } self._changeable_envs = { "sync_op": ["True", "False"], - "use_calc_stream": ["True", "False"] + "use_calc_stream": ["True", "False"], } def test_allgather_stream(self): - envs_list = test_base.gen_product_envs_list(self._default_envs, - self._changeable_envs) + envs_list = test_base.gen_product_envs_list( + self._default_envs, self._changeable_envs + ) for envs in envs_list: if eval(envs["use_calc_stream"]) and not eval(envs["sync_op"]): continue - self.run_test_case("communication_stream_allgather_api_dygraph.py", - user_defined_envs=envs) + self.run_test_case( + "communication_stream_allgather_api_dygraph.py", + user_defined_envs=envs, + ) def tearDown(self): super(TestCommunicationStreamAllgatherAPI, self).tearDown() diff --git a/python/paddle/fluid/tests/unittests/collective/test_communication_stream_allreduce_api.py b/python/paddle/fluid/tests/unittests/collective/test_communication_stream_allreduce_api.py index 7fe30a5d50fb34dc82ecee843c2e86f11c8baada..8823466b282ea7b438ec954614ac5e3240c4370a 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_communication_stream_allreduce_api.py +++ b/python/paddle/fluid/tests/unittests/collective/test_communication_stream_allreduce_api.py @@ -17,29 +17,32 @@ import test_communication_api_base as test_base class TestCommunicationStreamAllreduceAPI(test_base.CommunicationTestDistBase): - def setUp(self): - super(TestCommunicationStreamAllreduceAPI, self).setUp(num_of_devices=2, - timeout=120) + super(TestCommunicationStreamAllreduceAPI, self).setUp( + num_of_devices=2, timeout=120 + ) self._default_envs = { "backend": "nccl", "shape": "(100, 200)", "dtype": "float32", - "seeds": str(self._seeds) + "seeds": str(self._seeds), } self._changeable_envs = { "sync_op": ["True", "False"], - "use_calc_stream": ["True", "False"] + "use_calc_stream": ["True", "False"], } def test_allreduce_stream(self): - envs_list = test_base.gen_product_envs_list(self._default_envs, - self._changeable_envs) + envs_list = test_base.gen_product_envs_list( + self._default_envs, self._changeable_envs + ) for envs in envs_list: if eval(envs["use_calc_stream"]) and not eval(envs["sync_op"]): continue - self.run_test_case("communication_stream_allreduce_api_dygraph.py", - user_defined_envs=envs) + self.run_test_case( + "communication_stream_allreduce_api_dygraph.py", + user_defined_envs=envs, + ) def tearDown(self): super(TestCommunicationStreamAllreduceAPI, self).tearDown() diff --git a/python/paddle/fluid/tests/unittests/collective/test_communication_stream_alltoall_api.py b/python/paddle/fluid/tests/unittests/collective/test_communication_stream_alltoall_api.py index b6094d69321767983d45ac88ff3a931f194b0743..0169825e11451413f8d724d166026756be0c7cd4 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_communication_stream_alltoall_api.py +++ b/python/paddle/fluid/tests/unittests/collective/test_communication_stream_alltoall_api.py @@ -17,29 +17,32 @@ import test_communication_api_base as test_base class TestCommunicationStreamAllToAllAPI(test_base.CommunicationTestDistBase): - def setUp(self): - super(TestCommunicationStreamAllToAllAPI, self).setUp(num_of_devices=2, - timeout=120) + super(TestCommunicationStreamAllToAllAPI, self).setUp( + num_of_devices=2, timeout=120 + ) self._default_envs = { "backend": "nccl", "shape": "(100, 200)", "dtype": "float32", - "seeds": str(self._seeds) + "seeds": str(self._seeds), } self._changeable_envs = { "sync_op": ["True", "False"], - "use_calc_stream": ["True", "False"] + "use_calc_stream": ["True", "False"], } def test_alltoall_stream(self): - envs_list = test_base.gen_product_envs_list(self._default_envs, - self._changeable_envs) + envs_list = test_base.gen_product_envs_list( + self._default_envs, self._changeable_envs + ) for envs in envs_list: if eval(envs["use_calc_stream"]) and not eval(envs["sync_op"]): continue - self.run_test_case("communication_stream_alltoall_api_dygraph.py", - user_defined_envs=envs) + self.run_test_case( + "communication_stream_alltoall_api_dygraph.py", + user_defined_envs=envs, + ) def tearDown(self): super(TestCommunicationStreamAllToAllAPI, self).tearDown() diff --git a/python/paddle/fluid/tests/unittests/collective/test_communication_stream_alltoall_single_api.py b/python/paddle/fluid/tests/unittests/collective/test_communication_stream_alltoall_single_api.py index 81836dbc1313829979629a5957d378e1e442d1d3..6a322b511375a3354a2ff8509a4a000b6d79b97e 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_communication_stream_alltoall_single_api.py +++ b/python/paddle/fluid/tests/unittests/collective/test_communication_stream_alltoall_single_api.py @@ -17,31 +17,34 @@ import test_communication_api_base as test_base class TestCommunicationStreamAllToAllSingleAPI( - test_base.CommunicationTestDistBase): - + test_base.CommunicationTestDistBase +): def setUp(self): - super(TestCommunicationStreamAllToAllSingleAPI, - self).setUp(num_of_devices=2, timeout=120) + super(TestCommunicationStreamAllToAllSingleAPI, self).setUp( + num_of_devices=2, timeout=120 + ) self._default_envs = { "backend": "nccl", "shape": "(100, 200)", "dtype": "float32", - "seeds": str(self._seeds) + "seeds": str(self._seeds), } self._changeable_envs = { "sync_op": ["True", "False"], - "use_calc_stream": ["True", "False"] + "use_calc_stream": ["True", "False"], } def test_alltoall_single_stream(self): - envs_list = test_base.gen_product_envs_list(self._default_envs, - self._changeable_envs) + envs_list = test_base.gen_product_envs_list( + self._default_envs, self._changeable_envs + ) for envs in envs_list: if eval(envs["use_calc_stream"]) and not eval(envs["sync_op"]): continue self.run_test_case( "communication_stream_alltoall_single_api_dygraph.py", - user_defined_envs=envs) + user_defined_envs=envs, + ) def tearDown(self): super(TestCommunicationStreamAllToAllSingleAPI, self).tearDown() diff --git a/python/paddle/fluid/tests/unittests/collective/test_communication_stream_broadcast_api.py b/python/paddle/fluid/tests/unittests/collective/test_communication_stream_broadcast_api.py index 693ef95a204e74888a5906e8da8b5f0c4c527b8c..0bdf7e6b9044e3a8bc5f2e2613e6dc2cb6b10985 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_communication_stream_broadcast_api.py +++ b/python/paddle/fluid/tests/unittests/collective/test_communication_stream_broadcast_api.py @@ -17,29 +17,32 @@ import test_communication_api_base as test_base class TestCommunicationStreamBroadcastAPI(test_base.CommunicationTestDistBase): - def setUp(self): - super(TestCommunicationStreamBroadcastAPI, self).setUp(num_of_devices=2, - timeout=120) + super(TestCommunicationStreamBroadcastAPI, self).setUp( + num_of_devices=2, timeout=120 + ) self._default_envs = { "backend": "nccl", "shape": "(100, 200)", "dtype": "float32", - "seeds": str(self._seeds) + "seeds": str(self._seeds), } self._changeable_envs = { "sync_op": ["True", "False"], - "use_calc_stream": ["True", "False"] + "use_calc_stream": ["True", "False"], } def test_broadcast_stream(self): - envs_list = test_base.gen_product_envs_list(self._default_envs, - self._changeable_envs) + envs_list = test_base.gen_product_envs_list( + self._default_envs, self._changeable_envs + ) for envs in envs_list: if eval(envs["use_calc_stream"]) and not eval(envs["sync_op"]): continue - self.run_test_case("communication_stream_broadcast_api_dygraph.py", - user_defined_envs=envs) + self.run_test_case( + "communication_stream_broadcast_api_dygraph.py", + user_defined_envs=envs, + ) def tearDown(self): super(TestCommunicationStreamBroadcastAPI, self).tearDown() diff --git a/python/paddle/fluid/tests/unittests/collective/test_communication_stream_reduce_api.py b/python/paddle/fluid/tests/unittests/collective/test_communication_stream_reduce_api.py index e0b50f6056fef5bf8409f87af27d5a047a5738ee..0fc539ba1a770ac65a7fa4381c8e2417c8d700bb 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_communication_stream_reduce_api.py +++ b/python/paddle/fluid/tests/unittests/collective/test_communication_stream_reduce_api.py @@ -17,29 +17,32 @@ import test_communication_api_base as test_base class TestCommunicationStreamReduceAPI(test_base.CommunicationTestDistBase): - def setUp(self): - super(TestCommunicationStreamReduceAPI, self).setUp(num_of_devices=2, - timeout=120) + super(TestCommunicationStreamReduceAPI, self).setUp( + num_of_devices=2, timeout=120 + ) self._default_envs = { "backend": "nccl", "shape": "(100, 200)", "dtype": "float32", - "seeds": str(self._seeds) + "seeds": str(self._seeds), } self._changeable_envs = { "sync_op": ["True", "False"], - "use_calc_stream": ["True", "False"] + "use_calc_stream": ["True", "False"], } def test_reduce_stream(self): - envs_list = test_base.gen_product_envs_list(self._default_envs, - self._changeable_envs) + envs_list = test_base.gen_product_envs_list( + self._default_envs, self._changeable_envs + ) for envs in envs_list: if eval(envs["use_calc_stream"]) and not eval(envs["sync_op"]): continue - self.run_test_case("communication_stream_reduce_api_dygraph.py", - user_defined_envs=envs) + self.run_test_case( + "communication_stream_reduce_api_dygraph.py", + user_defined_envs=envs, + ) def tearDown(self): super(TestCommunicationStreamReduceAPI, self).tearDown() diff --git a/python/paddle/fluid/tests/unittests/collective/test_communication_stream_reduce_scatter_api.py b/python/paddle/fluid/tests/unittests/collective/test_communication_stream_reduce_scatter_api.py index 1f6522d4362250318e792f972f517b528d16575e..bb8b9237622597727f64b60eb7b2151bfc4e5fb4 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_communication_stream_reduce_scatter_api.py +++ b/python/paddle/fluid/tests/unittests/collective/test_communication_stream_reduce_scatter_api.py @@ -17,31 +17,34 @@ import test_communication_api_base as test_base class TestCommunicationStreamReduceScatterAPI( - test_base.CommunicationTestDistBase): - + test_base.CommunicationTestDistBase +): def setUp(self): - super(TestCommunicationStreamReduceScatterAPI, - self).setUp(num_of_devices=2, timeout=120) + super(TestCommunicationStreamReduceScatterAPI, self).setUp( + num_of_devices=2, timeout=120 + ) self._default_envs = { "backend": "nccl", "shape": "(100, 200)", "dtype": "float32", - "seeds": str(self._seeds) + "seeds": str(self._seeds), } self._changeable_envs = { "sync_op": ["True", "False"], - "use_calc_stream": ["True", "False"] + "use_calc_stream": ["True", "False"], } def test_reduce_scatter_stream(self): - envs_list = test_base.gen_product_envs_list(self._default_envs, - self._changeable_envs) + envs_list = test_base.gen_product_envs_list( + self._default_envs, self._changeable_envs + ) for envs in envs_list: if eval(envs["use_calc_stream"]) and not eval(envs["sync_op"]): continue self.run_test_case( "communication_stream_reduce_scatter_api_dygraph.py", - user_defined_envs=envs) + user_defined_envs=envs, + ) def tearDown(self): super(TestCommunicationStreamReduceScatterAPI, self).tearDown() diff --git a/python/paddle/fluid/tests/unittests/collective/test_communication_stream_scatter_api.py b/python/paddle/fluid/tests/unittests/collective/test_communication_stream_scatter_api.py index 2de99b40e73f19dd95403d1037e3f76bb91cb1fa..5219085bf3b0d51b573d5437fa9134a612c8d609 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_communication_stream_scatter_api.py +++ b/python/paddle/fluid/tests/unittests/collective/test_communication_stream_scatter_api.py @@ -17,29 +17,32 @@ import test_communication_api_base as test_base class TestCommunicationStreamScatterAPI(test_base.CommunicationTestDistBase): - def setUp(self): - super(TestCommunicationStreamScatterAPI, self).setUp(num_of_devices=2, - timeout=120) + super(TestCommunicationStreamScatterAPI, self).setUp( + num_of_devices=2, timeout=120 + ) self._default_envs = { "backend": "nccl", "shape": "(100, 200)", "dtype": "float32", - "seeds": str(self._seeds) + "seeds": str(self._seeds), } self._changeable_envs = { "sync_op": ["True", "False"], - "use_calc_stream": ["True", "False"] + "use_calc_stream": ["True", "False"], } def test_reduce_stream(self): - envs_list = test_base.gen_product_envs_list(self._default_envs, - self._changeable_envs) + envs_list = test_base.gen_product_envs_list( + self._default_envs, self._changeable_envs + ) for envs in envs_list: if eval(envs["use_calc_stream"]) and not eval(envs["sync_op"]): continue - self.run_test_case("communication_stream_scatter_api_dygraph.py", - user_defined_envs=envs) + self.run_test_case( + "communication_stream_scatter_api_dygraph.py", + user_defined_envs=envs, + ) def tearDown(self): super(TestCommunicationStreamScatterAPI, self).tearDown() diff --git a/python/paddle/fluid/tests/unittests/collective/test_communication_stream_sendrecv_api.py b/python/paddle/fluid/tests/unittests/collective/test_communication_stream_sendrecv_api.py index b32ceb2a95229d1e115b6dcd0c02683c4f989f99..5bd668aa4b89346d92c25f9bab86dd49c7e110bf 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_communication_stream_sendrecv_api.py +++ b/python/paddle/fluid/tests/unittests/collective/test_communication_stream_sendrecv_api.py @@ -17,29 +17,32 @@ import test_communication_api_base as test_base class TestCommunicationStreamSendRecvAPI(test_base.CommunicationTestDistBase): - def setUp(self): - super(TestCommunicationStreamSendRecvAPI, self).setUp(num_of_devices=2, - timeout=120) + super(TestCommunicationStreamSendRecvAPI, self).setUp( + num_of_devices=2, timeout=120 + ) self._default_envs = { "backend": "nccl", "shape": "(100, 200)", "dtype": "float32", - "seeds": str(self._seeds) + "seeds": str(self._seeds), } self._changeable_envs = { "sync_op": ["True", "False"], - "use_calc_stream": ["True", "False"] + "use_calc_stream": ["True", "False"], } def test_sendrecv_stream(self): - envs_list = test_base.gen_product_envs_list(self._default_envs, - self._changeable_envs) + envs_list = test_base.gen_product_envs_list( + self._default_envs, self._changeable_envs + ) for envs in envs_list: if eval(envs["use_calc_stream"]) and not eval(envs["sync_op"]): continue - self.run_test_case("communication_stream_sendrecv_api_dygraph.py", - user_defined_envs=envs) + self.run_test_case( + "communication_stream_sendrecv_api_dygraph.py", + user_defined_envs=envs, + ) def tearDown(self): super(TestCommunicationStreamSendRecvAPI, self).tearDown() diff --git a/python/paddle/fluid/tests/unittests/collective/test_eager_dist_api.py b/python/paddle/fluid/tests/unittests/collective/test_eager_dist_api.py index 8fe1090e1fff79bea22cbe5a5a8e7ec8424cfae3..fdefe6bda4c8efaa96c4ffb1494d76678448c28b 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_eager_dist_api.py +++ b/python/paddle/fluid/tests/unittests/collective/test_eager_dist_api.py @@ -17,7 +17,6 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus class TestProcessGroup(TestMultipleGpus): - def test_process_group_nccl(self): self.run_mnist_2gpu('process_group_nccl.py') diff --git a/python/paddle/fluid/tests/unittests/collective/test_gen_nccl_id_op.py b/python/paddle/fluid/tests/unittests/collective/test_gen_nccl_id_op.py index 80afdd393f0ec7db3a4d52be6c33295779011fe9..4656c97e67023304d423d44459f0e645d4e6160d 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_gen_nccl_id_op.py +++ b/python/paddle/fluid/tests/unittests/collective/test_gen_nccl_id_op.py @@ -34,30 +34,35 @@ def run_gen_ncc_id(attr): with paddle.static.program_guard(main_program, startup_program): nccl_id_var = startup_program.global_block().create_var( - name="NCCLID", persistable=True, type=core.VarDesc.VarType.RAW) + name="NCCLID", persistable=True, type=core.VarDesc.VarType.RAW + ) for i in range(1, nccl_comm_num): startup_program.global_block().create_var( name="NCCLID_{}".format(i), persistable=True, - type=core.VarDesc.VarType.RAW) + type=core.VarDesc.VarType.RAW, + ) if use_hallreduce: for i in range(0, nccl_comm_num): startup_program.global_block().create_var( name="Hierarchical_inter_NCCLID_{}".format(i), persistable=True, - type=core.VarDesc.VarType.RAW) + type=core.VarDesc.VarType.RAW, + ) startup_program.global_block().create_var( name="Hierarchical_exter_NCCLID_{}".format(i), persistable=True, - type=core.VarDesc.VarType.RAW) + type=core.VarDesc.VarType.RAW, + ) startup_program.global_block().append_op( type="gen_nccl_id", inputs={}, outputs={"NCCLID": nccl_id_var}, - attrs=attr) + attrs=attr, + ) place = paddle.CPUPlace() exe = paddle.static.Executor(place) @@ -65,7 +70,6 @@ def run_gen_ncc_id(attr): class TestGenNcclIdOp(unittest.TestCase): - def setUp(self): try: self._dist_ut_port_0 = int(os.environ["PADDLE_DIST_UT_PORT"]) @@ -98,7 +102,7 @@ class TestGenNcclIdOp(unittest.TestCase): for i in range(nranks): attr['trainer_id'] = i # NOTE: multiprocessing cannot be covered by coverage - p = Process(target=run_gen_ncc_id, args=(attr, )) + p = Process(target=run_gen_ncc_id, args=(attr,)) p.start() procs.append(p) diff --git a/python/paddle/fluid/tests/unittests/collective/test_new_group_api.py b/python/paddle/fluid/tests/unittests/collective/test_new_group_api.py index dfa0fb2402fd95f09510b3c636cd38558765379b..e70f7073c09d26f0de58c0dd59be89c7600eb033 100644 --- a/python/paddle/fluid/tests/unittests/collective/test_new_group_api.py +++ b/python/paddle/fluid/tests/unittests/collective/test_new_group_api.py @@ -21,13 +21,13 @@ paddle.enable_static() class TestCollectiveAllreduceAPI(TestDistBase): - def _setup_config(self): pass def test_allreduce_nccl(self): - self.check_with_place("collective_allreduce_new_group_api.py", - "allreduce", "nccl") + self.check_with_place( + "collective_allreduce_new_group_api.py", "allreduce", "nccl" + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/collective/world_size_and_rank.py b/python/paddle/fluid/tests/unittests/collective/world_size_and_rank.py index 5916370f7533c37a5526a44167e5a0669a11d963..f6ae8444ef4f871bc9679381c10fb32ec4349c8f 100644 --- a/python/paddle/fluid/tests/unittests/collective/world_size_and_rank.py +++ b/python/paddle/fluid/tests/unittests/collective/world_size_and_rank.py @@ -17,7 +17,6 @@ import paddle.distributed as dist class TestWorldSizeAndRankAPI(unittest.TestCase): - def setUp(self): self._num_of_ranks = 2 self._subgroup_ranks = [0, 1] @@ -33,9 +32,11 @@ class TestWorldSizeAndRankAPI(unittest.TestCase): self.assertEqual(dist.get_world_size(self._subgroup), world_size) def test_given_group_rank(self): - rank = self._subgroup_ranks.index( - self._global_rank - ) if self._global_rank in self._subgroup_ranks else -1 + rank = ( + self._subgroup_ranks.index(self._global_rank) + if self._global_rank in self._subgroup_ranks + else -1 + ) self.assertEqual(dist.get_rank(self._subgroup), rank) diff --git a/python/paddle/fluid/tests/unittests/collective_allgather_op.py b/python/paddle/fluid/tests/unittests/collective_allgather_op.py index e897e1638852fc78dcb02b190b65d08a1376c258..ab39b834fdbc7922d22758bce9d855c8ca33d089 100644 --- a/python/paddle/fluid/tests/unittests/collective_allgather_op.py +++ b/python/paddle/fluid/tests/unittests/collective_allgather_op.py @@ -22,7 +22,6 @@ paddle.enable_static() class TestCollectiveAllGather(TestCollectiveRunnerBase): - def __init__(self): self.global_ring_id = 0 @@ -30,26 +29,28 @@ class TestCollectiveAllGather(TestCollectiveRunnerBase): ring_id = 0 nranks = 2 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype='float32') + tindata = layers.data( + name="tindata", shape=[10, 1000], dtype='float32' + ) toutdata = main_prog.current_block().create_var( name="outofgather", dtype='float32', type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=False) - main_prog.global_block().append_op(type="c_allgather", - inputs={'X': tindata}, - attrs={ - 'ring_id': ring_id, - 'nranks': nranks - }, - outputs={'Out': toutdata}) - main_prog.global_block().append_op(type="c_sync_comm_stream", - inputs={'X': toutdata}, - outputs={'Out': toutdata}, - attrs={'ring_id': ring_id}) + stop_gradient=False, + ) + main_prog.global_block().append_op( + type="c_allgather", + inputs={'X': tindata}, + attrs={'ring_id': ring_id, 'nranks': nranks}, + outputs={'Out': toutdata}, + ) + main_prog.global_block().append_op( + type="c_sync_comm_stream", + inputs={'X': toutdata}, + outputs={'Out': toutdata}, + attrs={'ring_id': ring_id}, + ) return toutdata diff --git a/python/paddle/fluid/tests/unittests/collective_reducescatter.py b/python/paddle/fluid/tests/unittests/collective_reducescatter.py index 480f9aeb3805fd55d03d0884b3495d9857093d72..53f53046321d5765544e4c9a899763e0cb30f8f4 100644 --- a/python/paddle/fluid/tests/unittests/collective_reducescatter.py +++ b/python/paddle/fluid/tests/unittests/collective_reducescatter.py @@ -21,7 +21,6 @@ paddle.enable_static() class TestCollectiveReduceScatter(TestCollectiveRunnerBase): - def __init__(self): self.global_ring_id = 0 @@ -29,9 +28,9 @@ class TestCollectiveReduceScatter(TestCollectiveRunnerBase): ring_id = 0 nranks = 2 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype='float32') + tindata = layers.data( + name="tindata", shape=[10, 1000], dtype='float32' + ) toutdata = fluid.layers.collective._c_reducescatter(tindata, nranks) toutdata = fluid.layers.collective._c_sync_comm_stream(toutdata, 0) return toutdata diff --git a/python/paddle/fluid/tests/unittests/collective_reducescatter_op.py b/python/paddle/fluid/tests/unittests/collective_reducescatter_op.py index 042817f8c3f2ca66cc4c7ff97a465dacec293de6..1605c16183c73839cf6702001348cca37269aea9 100644 --- a/python/paddle/fluid/tests/unittests/collective_reducescatter_op.py +++ b/python/paddle/fluid/tests/unittests/collective_reducescatter_op.py @@ -22,7 +22,6 @@ paddle.enable_static() class TestCollectiveReduceScatter(TestCollectiveRunnerBase): - def __init__(self): self.global_ring_id = 0 @@ -30,26 +29,28 @@ class TestCollectiveReduceScatter(TestCollectiveRunnerBase): ring_id = 0 nranks = 2 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype='float32') + tindata = layers.data( + name="tindata", shape=[10, 1000], dtype='float32' + ) toutdata = main_prog.current_block().create_var( name="outofrs", dtype='float32', type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=False) - main_prog.global_block().append_op(type="c_reducescatter", - inputs={'X': tindata}, - attrs={ - 'ring_id': ring_id, - 'nranks': nranks - }, - outputs={'Out': toutdata}) - main_prog.global_block().append_op(type="c_sync_comm_stream", - inputs={'X': toutdata}, - outputs={'Out': toutdata}, - attrs={'ring_id': ring_id}) + stop_gradient=False, + ) + main_prog.global_block().append_op( + type="c_reducescatter", + inputs={'X': tindata}, + attrs={'ring_id': ring_id, 'nranks': nranks}, + outputs={'Out': toutdata}, + ) + main_prog.global_block().append_op( + type="c_sync_comm_stream", + inputs={'X': toutdata}, + outputs={'Out': toutdata}, + attrs={'ring_id': ring_id}, + ) return toutdata diff --git a/python/paddle/fluid/tests/unittests/ctr_dataset_reader.py b/python/paddle/fluid/tests/unittests/ctr_dataset_reader.py index 981fe0f0ef53d821f1cfc9802452f7eb5f39143a..5cc96cf1c4e58cc757b9baf61f14e012eb4ea2f0 100644 --- a/python/paddle/fluid/tests/unittests/ctr_dataset_reader.py +++ b/python/paddle/fluid/tests/unittests/ctr_dataset_reader.py @@ -61,12 +61,10 @@ def load_lr_input_record(sent): class CtrReader(object): - def __init__(self): pass def _reader_creator(self, filelist): - def get_rand(low=0.0, high=1.0): return random.random() @@ -85,9 +83,7 @@ class CtrReader(object): class DatasetCtrReader(fleet.MultiSlotDataGenerator): - def generate_sample(self, line): - def get_rand(low=0.0, high=1.0): return random.random() @@ -97,9 +93,10 @@ class DatasetCtrReader(fleet.MultiSlotDataGenerator): dnn_input = load_dnn_input_record(fs[0]) lr_input = load_lr_input_record(fs[1]) click = [int(fs[2])] - yield ("dnn_data", dnn_input), \ - ("lr_data", lr_input), \ - ("click", click) + yield ("dnn_data", dnn_input), ("lr_data", lr_input), ( + "click", + click, + ) return iter @@ -115,7 +112,9 @@ def prepare_data(): lines = f.readlines() err_info = "wrong meta format" assert len(lines) == 2, err_info - assert 'dnn_input_dim:' in lines[0] and 'lr_input_dim:' in lines[1], err_info + assert ( + 'dnn_input_dim:' in lines[0] and 'lr_input_dim:' in lines[1] + ), err_info res = map(int, [_.split(':')[1] for _ in lines]) res = list(res) dnn_input_dim = res[0] @@ -126,10 +125,9 @@ def prepare_data(): return dnn_input_dim, lr_input_dim, train_file_path -def gen_fake_line(dnn_data_num=7, - dnn_data_range=1e5, - lr_data_num=5, - lr_data_range=1e5): +def gen_fake_line( + dnn_data_num=7, dnn_data_range=1e5, lr_data_num=5, lr_data_range=1e5 +): line = "" # for deep data @@ -188,16 +186,17 @@ def prepare_fake_data(file_nums=4, file_lines=500): warnings.warn("Fake data write in {}".format(file_dir)) for file_index in range(file_nums): with open( - os.path.join(file_dir, - "ctr_train_data_part_{}".format(file_index)), - 'w+') as fin: + os.path.join(file_dir, "ctr_train_data_part_{}".format(file_index)), + 'w+', + ) as fin: file_str = "" file_str += gen_zero_line() for line_index in range(file_lines - 1): file_str += gen_fake_line() fin.write(file_str) warnings.warn( - "Write done ctr_train_data_part_{}".format(file_index)) + "Write done ctr_train_data_part_{}".format(file_index) + ) file_list = [os.path.join(file_dir, x) for x in os.listdir(file_dir)] assert len(file_list) == file_nums diff --git a/python/paddle/fluid/tests/unittests/decorator_helper.py b/python/paddle/fluid/tests/unittests/decorator_helper.py index 0d7887d9189d13b410ba30102b9fbec5f291760a..d1165e2a9199454dbcc1fda411afad20449bcc92 100644 --- a/python/paddle/fluid/tests/unittests/decorator_helper.py +++ b/python/paddle/fluid/tests/unittests/decorator_helper.py @@ -18,9 +18,7 @@ __all__ = ['many_times', 'prog_scope'] def many_times(times): - def __impl__(fn): - def __fn__(*args, **kwargs): for _ in range(times): fn(*args, **kwargs) @@ -31,9 +29,7 @@ def many_times(times): def prog_scope(): - def __impl__(fn): - def __fn__(*args, **kwargs): prog = fluid.Program() startup_prog = fluid.Program() diff --git a/python/paddle/fluid/tests/unittests/detected_gpu.py b/python/paddle/fluid/tests/unittests/detected_gpu.py index 47287278ed96d4693270924f66fc9b418ad6d396..e77c6f5c8ffe2d9721115b29d26aade322dcd8e0 100644 --- a/python/paddle/fluid/tests/unittests/detected_gpu.py +++ b/python/paddle/fluid/tests/unittests/detected_gpu.py @@ -18,8 +18,10 @@ import paddle.fluid as fluid print("compile with cuda:", fluid.core.is_compiled_with_cuda()) print("get_cuda_device_count:", fluid.core.get_cuda_device_count()) -if fluid.core.is_compiled_with_cuda( -) and fluid.core.get_cuda_device_count() > 0: +if ( + fluid.core.is_compiled_with_cuda() + and fluid.core.get_cuda_device_count() > 0 +): sys.exit(0) else: sys.exit(1) diff --git a/python/paddle/fluid/tests/unittests/dist_allreduce_op.py b/python/paddle/fluid/tests/unittests/dist_allreduce_op.py index 49ef5d64bc73b67452b0fc010fa28a6c8c9befaa..af769963158c2e136b7880302f9d8f28bb853e04 100644 --- a/python/paddle/fluid/tests/unittests/dist_allreduce_op.py +++ b/python/paddle/fluid/tests/unittests/dist_allreduce_op.py @@ -35,8 +35,10 @@ def cnn_model(data): pool_size=2, pool_stride=2, act="relu", - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.01))) + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.01) + ), + ) conv_pool_2 = fluid.nets.simple_img_conv_pool( input=conv_pool_1, filter_size=5, @@ -44,25 +46,28 @@ def cnn_model(data): pool_size=2, pool_stride=2, act="relu", - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.01))) + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.01) + ), + ) SIZE = 10 input_shape = conv_pool_2.shape param_shape = [reduce(lambda a, b: a * b, input_shape[1:], 1)] + [SIZE] - scale = (2.0 / (param_shape[0]**2 * SIZE))**0.5 + scale = (2.0 / (param_shape[0] ** 2 * SIZE)) ** 0.5 predict = fluid.layers.fc( input=conv_pool_2, size=SIZE, act="softmax", param_attr=fluid.param_attr.ParamAttr( - initializer=fluid.initializer.Constant(value=0.01))) + initializer=fluid.initializer.Constant(value=0.01) + ), + ) return predict class TestDistMnist2x2(TestDistRunnerBase): - def get_model(self, batch_size=2, single_device=False): # Input data images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE) @@ -75,17 +80,19 @@ class TestDistMnist2x2(TestDistRunnerBase): # Evaluator batch_size_tensor = fluid.layers.create_tensor(dtype='int64') - batch_acc = fluid.layers.accuracy(input=predict, - label=label, - total=batch_size_tensor) + batch_acc = fluid.layers.accuracy( + input=predict, label=label, total=batch_size_tensor + ) inference_program = fluid.default_main_program().clone() # Reader - train_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=batch_size) - test_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=batch_size) + train_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size + ) + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size + ) # Optimization # TODO(typhoonzero): fix distributed adam optimizer @@ -104,7 +111,14 @@ class TestDistMnist2x2(TestDistRunnerBase): data_parallel_param_grads.append([p, grad_reduce]) opt.apply_gradients(data_parallel_param_grads) - return inference_program, avg_cost, train_reader, test_reader, batch_acc, predict + return ( + inference_program, + avg_cost, + train_reader, + test_reader, + batch_acc, + predict, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/dist_ctr.py b/python/paddle/fluid/tests/unittests/dist_ctr.py index b5578eca6ad081a7af025614b9bc5ffd4a176dbc..ab10ca2d639972371079ee40ecbc133d6878ae70 100644 --- a/python/paddle/fluid/tests/unittests/dist_ctr.py +++ b/python/paddle/fluid/tests/unittests/dist_ctr.py @@ -28,26 +28,31 @@ fluid.default_main_program().random_seed = 1 class TestDistCTR2x2(TestDistRunnerBase): - def get_model(self, batch_size=2): dnn_input_dim, lr_input_dim = dist_ctr_reader.load_data_meta() """ network definition """ - dnn_data = fluid.layers.data(name="dnn_data", - shape=[-1, 1], - dtype="int64", - lod_level=1, - append_batch_size=False) - lr_data = fluid.layers.data(name="lr_data", - shape=[-1, 1], - dtype="int64", - lod_level=1, - append_batch_size=False) - label = fluid.layers.data(name="click", - shape=[-1, 1], - dtype="int64", - lod_level=0, - append_batch_size=False) + dnn_data = fluid.layers.data( + name="dnn_data", + shape=[-1, 1], + dtype="int64", + lod_level=1, + append_batch_size=False, + ) + lr_data = fluid.layers.data( + name="lr_data", + shape=[-1, 1], + dtype="int64", + lod_level=1, + append_batch_size=False, + ) + label = fluid.layers.data( + name="click", + shape=[-1, 1], + dtype="int64", + lod_level=0, + append_batch_size=False, + ) # build dnn model dnn_layer_dims = [128, 64, 32, 1] @@ -57,10 +62,13 @@ class TestDistCTR2x2(TestDistRunnerBase): size=[dnn_input_dim, dnn_layer_dims[0]], param_attr=fluid.ParamAttr( name="deep_embedding", - initializer=fluid.initializer.Constant(value=0.01)), - is_sparse=IS_SPARSE) - dnn_pool = fluid.layers.sequence_pool(input=dnn_embedding, - pool_type="sum") + initializer=fluid.initializer.Constant(value=0.01), + ), + is_sparse=IS_SPARSE, + ) + dnn_pool = fluid.layers.sequence_pool( + input=dnn_embedding, pool_type="sum" + ) dnn_out = dnn_pool for i, dim in enumerate(dnn_layer_dims[1:]): fc = fluid.layers.fc( @@ -68,8 +76,10 @@ class TestDistCTR2x2(TestDistRunnerBase): size=dim, act="relu", param_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(value=0.01)), - name='dnn-fc-%d' % i) + initializer=fluid.initializer.Constant(value=0.01) + ), + name='dnn-fc-%d' % i, + ) dnn_out = fc # build lr model @@ -79,16 +89,19 @@ class TestDistCTR2x2(TestDistRunnerBase): size=[lr_input_dim, 1], param_attr=fluid.ParamAttr( name="wide_embedding", - initializer=fluid.initializer.Constant(value=0.01)), - is_sparse=IS_SPARSE) + initializer=fluid.initializer.Constant(value=0.01), + ), + is_sparse=IS_SPARSE, + ) lr_pool = fluid.layers.sequence_pool(input=lr_embbding, pool_type="sum") merge_layer = fluid.layers.concat(input=[dnn_out, lr_pool], axis=1) predict = fluid.layers.fc(input=merge_layer, size=2, act='softmax') acc = fluid.layers.accuracy(input=predict, label=label) - auc_var, batch_auc_var, auc_states = fluid.layers.auc(input=predict, - label=label) + auc_var, batch_auc_var, auc_states = fluid.layers.auc( + input=predict, label=label + ) cost = fluid.layers.cross_entropy(input=predict, label=label) avg_cost = paddle.mean(x=cost) @@ -98,24 +111,35 @@ class TestDistCTR2x2(TestDistRunnerBase): use_l2_decay = bool(os.getenv('USE_L2_DECAY', 0)) if use_l2_decay: regularization = fluid.regularizer.L2DecayRegularizer( - regularization_coeff=1e-1) + regularization_coeff=1e-1 + ) use_lr_decay = bool(os.getenv('LR_DECAY', 0)) lr = 0.0001 if use_lr_decay: - lr = fluid.layers.exponential_decay(learning_rate=0.0001, - decay_steps=10000, - decay_rate=0.999, - staircase=True) - - sgd_optimizer = fluid.optimizer.SGD(learning_rate=lr, - regularization=regularization) + lr = fluid.layers.exponential_decay( + learning_rate=0.0001, + decay_steps=10000, + decay_rate=0.999, + staircase=True, + ) + + sgd_optimizer = fluid.optimizer.SGD( + learning_rate=lr, regularization=regularization + ) sgd_optimizer.minimize(avg_cost) dataset = dist_ctr_reader.Dataset() train_reader = paddle.batch(dataset.train(), batch_size=batch_size) test_reader = paddle.batch(dataset.test(), batch_size=batch_size) - return inference_program, avg_cost, train_reader, test_reader, None, predict + return ( + inference_program, + avg_cost, + train_reader, + test_reader, + None, + predict, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/dist_ctr_reader.py b/python/paddle/fluid/tests/unittests/dist_ctr_reader.py index 4bc231e4eafe6224c755d19505167e295ed5a09b..fafb5f19c5f7d062759a84e3cebed4b5e5a4bcbd 100644 --- a/python/paddle/fluid/tests/unittests/dist_ctr_reader.py +++ b/python/paddle/fluid/tests/unittests/dist_ctr_reader.py @@ -109,7 +109,6 @@ feeding_index = {'dnn_input': 0, 'lr_input': 1, 'click': 2} class Dataset(object): - def train(self): ''' Load trainset. @@ -164,7 +163,9 @@ def load_data_meta(): lines = read_data('data.meta.txt') err_info = "wrong meta format" assert len(lines) == 2, err_info - assert 'dnn_input_dim:' in lines[0] and 'lr_input_dim:' in lines[1], err_info + assert ( + 'dnn_input_dim:' in lines[0] and 'lr_input_dim:' in lines[1] + ), err_info res = map(int, [_.split(':')[1] for _ in lines]) res = list(res) logger.info('dnn input dim: %d' % res[0]) diff --git a/python/paddle/fluid/tests/unittests/dist_fleet_ctr.py b/python/paddle/fluid/tests/unittests/dist_fleet_ctr.py index 53ef00064bfca9f63127c029822711062b2217ba..dd01938556f14316caf84e838b798c6423140b10 100644 --- a/python/paddle/fluid/tests/unittests/dist_fleet_ctr.py +++ b/python/paddle/fluid/tests/unittests/dist_fleet_ctr.py @@ -35,7 +35,6 @@ fluid.default_main_program().random_seed = 1 def fake_ctr_reader(): - def reader(): for _ in range(1000): deep = np.random.random_integers(0, 1e5 - 1, size=16).tolist() @@ -63,38 +62,47 @@ class TestDistCTR2x2(FleetDistRunnerBase): """ dnn_input_dim, lr_input_dim = int(1e5), int(1e5) - dnn_data = fluid.layers.data(name="dnn_data", - shape=[-1, 1], - dtype="int64", - lod_level=1, - append_batch_size=False) - lr_data = fluid.layers.data(name="lr_data", - shape=[-1, 1], - dtype="int64", - lod_level=1, - append_batch_size=False) - label = fluid.layers.data(name="click", - shape=[-1, 1], - dtype="int64", - lod_level=0, - append_batch_size=False) + dnn_data = fluid.layers.data( + name="dnn_data", + shape=[-1, 1], + dtype="int64", + lod_level=1, + append_batch_size=False, + ) + lr_data = fluid.layers.data( + name="lr_data", + shape=[-1, 1], + dtype="int64", + lod_level=1, + append_batch_size=False, + ) + label = fluid.layers.data( + name="click", + shape=[-1, 1], + dtype="int64", + lod_level=0, + append_batch_size=False, + ) datas = [dnn_data, lr_data, label] if args.reader == "pyreader": if is_train: - self.reader = fluid.io.PyReader(feed_list=datas, - capacity=64, - iterable=False, - use_double_buffer=False) + self.reader = fluid.io.PyReader( + feed_list=datas, + capacity=64, + iterable=False, + use_double_buffer=False, + ) else: - self.test_reader = fluid.io.PyReader(feed_list=datas, - capacity=64, - iterable=False, - use_double_buffer=False) - - -# build dnn model + self.test_reader = fluid.io.PyReader( + feed_list=datas, + capacity=64, + iterable=False, + use_double_buffer=False, + ) + + # build dnn model dnn_layer_dims = [128, 128, 64, 32, 1] dnn_embedding = fluid.layers.embedding( is_distributed=False, @@ -102,11 +110,14 @@ class TestDistCTR2x2(FleetDistRunnerBase): size=[dnn_input_dim, dnn_layer_dims[0]], param_attr=fluid.ParamAttr( name="deep_embedding", - initializer=fluid.initializer.Constant(value=0.01)), + initializer=fluid.initializer.Constant(value=0.01), + ), is_sparse=True, - padding_idx=0) - dnn_pool = fluid.layers.sequence_pool(input=dnn_embedding, - pool_type="sum") + padding_idx=0, + ) + dnn_pool = fluid.layers.sequence_pool( + input=dnn_embedding, pool_type="sum" + ) dnn_out = dnn_pool for i, dim in enumerate(dnn_layer_dims[1:]): fc = fluid.layers.fc( @@ -114,8 +125,10 @@ class TestDistCTR2x2(FleetDistRunnerBase): size=dim, act="relu", param_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(value=0.01)), - name='dnn-fc-%d' % i) + initializer=fluid.initializer.Constant(value=0.01) + ), + name='dnn-fc-%d' % i, + ) dnn_out = fc # build lr model @@ -125,9 +138,11 @@ class TestDistCTR2x2(FleetDistRunnerBase): size=[lr_input_dim, 1], param_attr=fluid.ParamAttr( name="wide_embedding", - initializer=fluid.initializer.Constant(value=0.01)), + initializer=fluid.initializer.Constant(value=0.01), + ), is_sparse=True, - padding_idx=0) + padding_idx=0, + ) lr_pool = fluid.layers.sequence_pool(input=lr_embbding, pool_type="sum") merge_layer = fluid.layers.concat(input=[dnn_out, lr_pool], axis=1) @@ -135,8 +150,9 @@ class TestDistCTR2x2(FleetDistRunnerBase): predict = fluid.layers.fc(input=merge_layer, size=2, act='softmax') acc = fluid.layers.accuracy(input=predict, label=label) - auc_var, batch_auc_var, auc_states = fluid.layers.auc(input=predict, - label=label) + auc_var, batch_auc_var, auc_states = fluid.layers.auc( + input=predict, label=label + ) cost = fluid.layers.cross_entropy(input=predict, label=label) avg_cost = paddle.mean(x=cost) @@ -176,11 +192,14 @@ class TestDistCTR2x2(FleetDistRunnerBase): try: while True: batch_idx += 1 - loss_val = exe.run(program=paddle.static.default_main_program(), - fetch_list=[self.avg_cost.name]) + loss_val = exe.run( + program=paddle.static.default_main_program(), + fetch_list=[self.avg_cost.name], + ) loss_val = np.mean(loss_val) message = "TEST ---> batch_idx: {} loss: {}\n".format( - batch_idx, loss_val) + batch_idx, loss_val + ) fleet.util.print_on_rank(message, 0) except fluid.core.EOFException: self.test_reader.reset() @@ -208,8 +227,10 @@ class TestDistCTR2x2(FleetDistRunnerBase): try: pass_start = time.time() while True: - loss_val = exe.run(program=fluid.default_main_program(), - fetch_list=[self.avg_cost.name]) + loss_val = exe.run( + program=fluid.default_main_program(), + fetch_list=[self.avg_cost.name], + ) loss_val = np.mean(loss_val) # TODO(randomly fail) # reduce_output = fleet.util.all_reduce( @@ -217,7 +238,8 @@ class TestDistCTR2x2(FleetDistRunnerBase): # loss_all_trainer = fleet.util.all_gather(float(loss_val)) # loss_val = float(reduce_output) / len(loss_all_trainer) message = "TRAIN ---> pass: {} loss: {}\n".format( - epoch_id, loss_val) + epoch_id, loss_val + ) fleet.util.print_on_rank(message, 0) pass_time = time.time() - pass_start @@ -229,9 +251,9 @@ class TestDistCTR2x2(FleetDistRunnerBase): fleet.save_persistables(exe, dirname=dirname) model_dir = tempfile.mkdtemp() - fleet.save_inference_model(exe, model_dir, - [feed.name for feed in self.feeds], - self.avg_cost) + fleet.save_inference_model( + exe, model_dir, [feed.name for feed in self.feeds], self.avg_cost + ) if fleet.is_first_worker(): self.check_model_right(model_dir) shutil.rmtree(model_dir) @@ -251,29 +273,36 @@ class TestDistCTR2x2(FleetDistRunnerBase): dataset = paddle.distributed.QueueDataset() pipe_command = 'python ctr_dataset_reader.py' - dataset.init(batch_size=batch_size, - use_var=self.feeds, - pipe_command=pipe_command, - thread_num=thread_num) + dataset.init( + batch_size=batch_size, + use_var=self.feeds, + pipe_command=pipe_command, + thread_num=thread_num, + ) dataset.set_filelist(filelist) for epoch_id in range(1): pass_start = time.time() dataset.set_filelist(filelist) - exe.train_from_dataset(program=fluid.default_main_program(), - dataset=dataset, - fetch_list=[self.avg_cost], - fetch_info=["cost"], - print_period=2, - debug=int(os.getenv("Debug", "0"))) + exe.train_from_dataset( + program=fluid.default_main_program(), + dataset=dataset, + fetch_list=[self.avg_cost], + fetch_info=["cost"], + print_period=2, + debug=int(os.getenv("Debug", "0")), + ) pass_time = time.time() - pass_start if os.getenv("SAVE_MODEL") == "1": model_dir = tempfile.mkdtemp() - fleet.save_inference_model(exe, model_dir, - [feed.name for feed in self.feeds], - self.avg_cost) + fleet.save_inference_model( + exe, + model_dir, + [feed.name for feed in self.feeds], + self.avg_cost, + ) if fleet.is_first_worker(): self.check_model_right(model_dir) shutil.rmtree(model_dir) @@ -311,20 +340,25 @@ class TestDistCTR2x2(FleetDistRunnerBase): for epoch_id in range(1): pass_start = time.time() - exe.train_from_dataset(program=fluid.default_main_program(), - dataset=dataset, - fetch_list=[self.avg_cost], - fetch_info=["cost"], - print_period=2, - debug=int(os.getenv("Debug", "0"))) + exe.train_from_dataset( + program=fluid.default_main_program(), + dataset=dataset, + fetch_list=[self.avg_cost], + fetch_info=["cost"], + print_period=2, + debug=int(os.getenv("Debug", "0")), + ) pass_time = time.time() - pass_start dataset.release_memory() if os.getenv("SAVE_MODEL") == "1": model_dir = tempfile.mkdtemp() - fleet.save_inference_model(exe, model_dir, - [feed.name for feed in self.feeds], - self.avg_cost) + fleet.save_inference_model( + exe, + model_dir, + [feed.name for feed in self.feeds], + self.avg_cost, + ) fleet.load_inference_model(model_dir, mode=0) if fleet.is_first_worker(): self.check_model_right(model_dir) @@ -341,9 +375,12 @@ class TestDistCTR2x2(FleetDistRunnerBase): dense_param_dirname = os.getenv("SAVE_DENSE_PARAM_DIRNAME", None) if dense_param_dirname: - fleet.save_dense_params(exe, dense_param_dirname, - fluid.global_scope(), - fluid.default_main_program()) + fleet.save_dense_params( + exe, + dense_param_dirname, + fluid.global_scope(), + fluid.default_main_program(), + ) save_one_table_dirname = os.getenv("SAVE_ONE_TABLE_DIRNAME", None) if save_one_table_dirname: @@ -355,5 +392,6 @@ class TestDistCTR2x2(FleetDistRunnerBase): fleet.save_persistables(exe, patch_dirname, None, 5) fleet.check_save_pre_patch_done() + if __name__ == "__main__": runtime_main(TestDistCTR2x2) diff --git a/python/paddle/fluid/tests/unittests/dist_fleet_ctr_ps_gpu.py b/python/paddle/fluid/tests/unittests/dist_fleet_ctr_ps_gpu.py index 13ec262697e5d61d4af5aa04b954be1ee8b767f7..62b41a3f455488293b67270f0b077b8e6857f31e 100644 --- a/python/paddle/fluid/tests/unittests/dist_fleet_ctr_ps_gpu.py +++ b/python/paddle/fluid/tests/unittests/dist_fleet_ctr_ps_gpu.py @@ -70,15 +70,19 @@ class TestDistGpuPsCTR2x2(TestDistCTR2x2): try: pass_start = time.time() while True: - loss_val = exe.run(program=fleet.main_program, - fetch_list=[self.avg_cost.name]) + loss_val = exe.run( + program=fleet.main_program, + fetch_list=[self.avg_cost.name], + ) loss_val = np.mean(loss_val) - reduce_output = fleet.util.all_reduce(np.array(loss_val), - mode="sum") + reduce_output = fleet.util.all_reduce( + np.array(loss_val), mode="sum" + ) loss_all_trainer = fleet.util.all_gather(float(loss_val)) loss_val = float(reduce_output) / len(loss_all_trainer) message = "TRAIN ---> pass: {} loss: {}\n".format( - epoch_id, loss_val) + epoch_id, loss_val + ) fleet.util.print_on_rank(message, 0) pass_time = time.time() - pass_start @@ -86,9 +90,9 @@ class TestDistGpuPsCTR2x2(TestDistCTR2x2): self.reader.reset() model_dir = tempfile.mkdtemp() - fleet.save_inference_model(exe, model_dir, - [feed.name for feed in self.feeds], - self.avg_cost) + fleet.save_inference_model( + exe, model_dir, [feed.name for feed in self.feeds], self.avg_cost + ) if fleet.is_first_worker(): self.check_model_right(model_dir) if fleet.is_first_worker(): @@ -96,8 +100,11 @@ class TestDistGpuPsCTR2x2(TestDistCTR2x2): shutil.rmtree(model_dir) def do_dataset_training(self, fleet): - dnn_input_dim, lr_input_dim, train_file_path = ctr_dataset_reader.prepare_data( - ) + ( + dnn_input_dim, + lr_input_dim, + train_file_path, + ) = ctr_dataset_reader.prepare_data() device_id = int(os.getenv("FLAGS_selected_gpus", "0")) place = fluid.CUDAPlace(device_id) @@ -125,19 +132,24 @@ class TestDistGpuPsCTR2x2(TestDistCTR2x2): for epoch_id in range(1): pass_start = time.time() dataset.set_filelist(filelist) - exe.train_from_dataset(program=fleet.main_program, - dataset=dataset, - fetch_list=[self.avg_cost], - fetch_info=["cost"], - print_period=2, - debug=int(os.getenv("Debug", "0"))) + exe.train_from_dataset( + program=fleet.main_program, + dataset=dataset, + fetch_list=[self.avg_cost], + fetch_info=["cost"], + print_period=2, + debug=int(os.getenv("Debug", "0")), + ) pass_time = time.time() - pass_start if os.getenv("SAVE_MODEL") == "1": model_dir = tempfile.mkdtemp() - fleet.save_inference_model(exe, model_dir, - [feed.name for feed in self.feeds], - self.avg_cost) + fleet.save_inference_model( + exe, + model_dir, + [feed.name for feed in self.feeds], + self.avg_cost, + ) if fleet.is_first_worker(): self.check_model_right(model_dir) if fleet.is_first_worker(): diff --git a/python/paddle/fluid/tests/unittests/dist_fleet_debug_gloo.py b/python/paddle/fluid/tests/unittests/dist_fleet_debug_gloo.py index 82f7a2bc672edf22920382e5367ab48e971501aa..5b0c66538bbd70f58910a9bf652e74bac555bcb1 100644 --- a/python/paddle/fluid/tests/unittests/dist_fleet_debug_gloo.py +++ b/python/paddle/fluid/tests/unittests/dist_fleet_debug_gloo.py @@ -13,21 +13,24 @@ # limitations under the License. import logging -#import paddle.fluid.incubate.fleet.base.role_maker as role_maker + +# import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.distributed.fleet.base.role_maker as role_maker -from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet +from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import ( + fleet, +) logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s") logger = logging.getLogger("fluid") logger.setLevel(logging.INFO) -#role = role_maker.GeneralRoleMaker( -#init_timeout_seconds=100, -#run_timeout_seconds=100, -#http_ip_port="127.0.0.1:26001") +# role = role_maker.GeneralRoleMaker( +# init_timeout_seconds=100, +# run_timeout_seconds=100, +# http_ip_port="127.0.0.1:26001") -#role = role_maker.PaddleCloudRoleMaker(http_ip_port="127.0.0.1:26001") +# role = role_maker.PaddleCloudRoleMaker(http_ip_port="127.0.0.1:26001") -#role = role_maker.GeneralRoleMaker(path="./tmp4") +# role = role_maker.GeneralRoleMaker(path="./tmp4") logger.info("Begin") res = [0, 0] @@ -37,7 +40,7 @@ role = role_maker.PaddleCloudRoleMaker(path="./tmp4") fleet.init(role) print("init wancheng") # -#if fleet.is_worker(): +# if fleet.is_worker(): # import time # time.sleep(3) @@ -49,7 +52,7 @@ if fleet.worker_index() == 0: elif fleet.worker_index() == 1: role._all_reduce(role._node_type_comm, b) -#logger.info(res) -#print("res ", res) +# logger.info(res) +# print("res ", res) -#role._barrier_all() +# role._barrier_all() diff --git a/python/paddle/fluid/tests/unittests/dist_fleet_heter_pipeline_ctr.py b/python/paddle/fluid/tests/unittests/dist_fleet_heter_pipeline_ctr.py index c4e78ba16e05e83a21dbce9864b42db8e449ccf0..5f3aaf4efdc8e676dc47792bba23fb8998efafc4 100644 --- a/python/paddle/fluid/tests/unittests/dist_fleet_heter_pipeline_ctr.py +++ b/python/paddle/fluid/tests/unittests/dist_fleet_heter_pipeline_ctr.py @@ -49,21 +49,27 @@ class TestHeterPipelinePsCTR2x2(FleetDistHeterRunnerBase): dnn_input_dim, lr_input_dim = int(1e5), int(1e5) with fluid.device_guard("cpu"): - dnn_data = fluid.layers.data(name="dnn_data", - shape=[-1, 1], - dtype="int64", - lod_level=1, - append_batch_size=False) - lr_data = fluid.layers.data(name="lr_data", - shape=[-1, 1], - dtype="int64", - lod_level=1, - append_batch_size=False) - label = fluid.layers.data(name="click", - shape=[-1, 1], - dtype="float32", - lod_level=0, - append_batch_size=False) + dnn_data = fluid.layers.data( + name="dnn_data", + shape=[-1, 1], + dtype="int64", + lod_level=1, + append_batch_size=False, + ) + lr_data = fluid.layers.data( + name="lr_data", + shape=[-1, 1], + dtype="int64", + lod_level=1, + append_batch_size=False, + ) + label = fluid.layers.data( + name="click", + shape=[-1, 1], + dtype="float32", + lod_level=0, + append_batch_size=False, + ) datas = [dnn_data, lr_data, label] @@ -75,10 +81,13 @@ class TestHeterPipelinePsCTR2x2(FleetDistHeterRunnerBase): size=[dnn_input_dim, dnn_layer_dims[0]], param_attr=fluid.ParamAttr( name="deep_embedding", - initializer=fluid.initializer.Constant(value=0.01)), - is_sparse=True) - dnn_pool = fluid.layers.sequence_pool(input=dnn_embedding, - pool_type="sum") + initializer=fluid.initializer.Constant(value=0.01), + ), + is_sparse=True, + ) + dnn_pool = fluid.layers.sequence_pool( + input=dnn_embedding, pool_type="sum" + ) dnn_out = dnn_pool # build lr model @@ -88,10 +97,13 @@ class TestHeterPipelinePsCTR2x2(FleetDistHeterRunnerBase): size=[lr_input_dim, 1], param_attr=fluid.ParamAttr( name="wide_embedding", - initializer=fluid.initializer.Constant(value=0.01)), - is_sparse=True) - lr_pool = fluid.layers.sequence_pool(input=lr_embbding, - pool_type="sum") + initializer=fluid.initializer.Constant(value=0.01), + ), + is_sparse=True, + ) + lr_pool = fluid.layers.sequence_pool( + input=lr_embbding, pool_type="sum" + ) with fluid.device_guard("gpu"): for i, dim in enumerate(dnn_layer_dims[1:]): @@ -100,8 +112,10 @@ class TestHeterPipelinePsCTR2x2(FleetDistHeterRunnerBase): size=dim, act="relu", param_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(value=0.01)), - name='dnn-fc-%d' % i) + initializer=fluid.initializer.Constant(value=0.01) + ), + name='dnn-fc-%d' % i, + ) dnn_out = fc with fluid.device_guard("cpu"): @@ -135,8 +149,9 @@ class TestHeterPipelinePsCTR2x2(FleetDistHeterRunnerBase): train_file_list = ctr_dataset_reader.prepare_fake_data() exe = fluid.Executor(fluid.CPUPlace()) - real_program = fluid.default_main_program( - )._heter_pipeline_opt["section_program"] + real_program = fluid.default_main_program()._heter_pipeline_opt[ + "section_program" + ] print(real_program) exe.run(fluid.default_startup_program()) @@ -161,12 +176,14 @@ class TestHeterPipelinePsCTR2x2(FleetDistHeterRunnerBase): for epoch_id in range(1): pass_start = time.time() dataset.set_filelist(filelist) - exe.train_from_dataset(program=fluid.default_main_program(), - dataset=dataset, - fetch_list=[self.avg_cost], - fetch_info=["cost"], - print_period=2, - debug=int(os.getenv("Debug", "0"))) + exe.train_from_dataset( + program=fluid.default_main_program(), + dataset=dataset, + fetch_list=[self.avg_cost], + fetch_info=["cost"], + print_period=2, + debug=int(os.getenv("Debug", "0")), + ) pass_time = time.time() - pass_start print("do_dataset_training done. using time {}".format(pass_time)) exe.close() @@ -176,24 +193,27 @@ class TestHeterPipelinePsCTR2x2(FleetDistHeterRunnerBase): exe = fluid.Executor() exe.run(fluid.default_startup_program()) fleet.init_worker() - real_program = fluid.default_main_program( - )._heter_pipeline_opt["section_program"] + real_program = fluid.default_main_program()._heter_pipeline_opt[ + "section_program" + ] print(real_program) thread_num = int(os.getenv("CPU_NUM", 2)) batch_size = 128 pass_start = time.time() - exe.train_from_dataset(program=fluid.default_main_program(), - fetch_list=[self.avg_cost], - fetch_info=["cost"], - print_period=2, - debug=int(os.getenv("Debug", "0"))) + exe.train_from_dataset( + program=fluid.default_main_program(), + fetch_list=[self.avg_cost], + fetch_info=["cost"], + print_period=2, + debug=int(os.getenv("Debug", "0")), + ) exe.close() pass_time = time.time() - pass_start print("do_dataset_heter_training done. using time {}".format(pass_time)) - #for epoch_id in range(1): + # for epoch_id in range(1): # pass_start = time.time() # dataset.set_filelist(filelist) # exe.train_from_dataset( diff --git a/python/paddle/fluid/tests/unittests/dist_fleet_raw_program_optimizer.py b/python/paddle/fluid/tests/unittests/dist_fleet_raw_program_optimizer.py index 54c37822257776516cd65664151442c4ed5b8572..fabf762f63b1db6e4da6cffd70c2a14f7fd9f1c3 100644 --- a/python/paddle/fluid/tests/unittests/dist_fleet_raw_program_optimizer.py +++ b/python/paddle/fluid/tests/unittests/dist_fleet_raw_program_optimizer.py @@ -37,8 +37,10 @@ def cnn_model(data): pool_size=2, pool_stride=2, act="relu", - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.01))) + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.01) + ), + ) conv_pool_2 = fluid.nets.simple_img_conv_pool( input=conv_pool_1, filter_size=5, @@ -46,25 +48,28 @@ def cnn_model(data): pool_size=2, pool_stride=2, act="relu", - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.01))) + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.01) + ), + ) SIZE = 10 input_shape = conv_pool_2.shape param_shape = [reduce(lambda a, b: a * b, input_shape[1:], 1)] + [SIZE] - scale = (2.0 / (param_shape[0]**2 * SIZE))**0.5 + scale = (2.0 / (param_shape[0] ** 2 * SIZE)) ** 0.5 predict = fluid.layers.fc( input=conv_pool_2, size=SIZE, act="softmax", param_attr=fluid.param_attr.ParamAttr( - initializer=fluid.initializer.Constant(value=0.01))) + initializer=fluid.initializer.Constant(value=0.01) + ), + ) return predict class TestFleetMetaOptimizerPrecision(TestDistRunnerBase): - def get_model(self, batch_size=2, single_device=False): # Input data images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE) @@ -77,17 +82,19 @@ class TestFleetMetaOptimizerPrecision(TestDistRunnerBase): # Evaluator batch_size_tensor = fluid.layers.create_tensor(dtype='int64') - batch_acc = fluid.layers.accuracy(input=predict, - label=label, - total=batch_size_tensor) + batch_acc = fluid.layers.accuracy( + input=predict, label=label, total=batch_size_tensor + ) test_program = fluid.default_main_program().clone(for_test=True) # Reader - train_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=batch_size) - test_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=batch_size) + train_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size + ) + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size + ) optimizer = paddle.fluid.optimizer.Adam(0.01) if single_device: @@ -97,11 +104,19 @@ class TestFleetMetaOptimizerPrecision(TestDistRunnerBase): fleet.init(role) strategy = paddle.distributed.fleet.DistributedStrategy() strategy.without_graph_optimization = True - optimizer = fleet.distributed_optimizer(optimizer, - strategy=strategy) + optimizer = fleet.distributed_optimizer( + optimizer, strategy=strategy + ) optimizer.minimize(avg_cost) - return test_program, avg_cost, train_reader, test_reader, batch_acc, predict + return ( + test_program, + avg_cost, + train_reader, + test_reader, + batch_acc, + predict, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/dist_fleet_raw_program_optimizer_fuse_allreduce.py b/python/paddle/fluid/tests/unittests/dist_fleet_raw_program_optimizer_fuse_allreduce.py index 5db0377b36a0e9a1417559c2a1bc70d5b856478a..cf6a1415bcb7621154348bda9e4ac8901f8731a4 100644 --- a/python/paddle/fluid/tests/unittests/dist_fleet_raw_program_optimizer_fuse_allreduce.py +++ b/python/paddle/fluid/tests/unittests/dist_fleet_raw_program_optimizer_fuse_allreduce.py @@ -37,8 +37,10 @@ def cnn_model(data): pool_size=2, pool_stride=2, act="relu", - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.01))) + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.01) + ), + ) conv_pool_2 = fluid.nets.simple_img_conv_pool( input=conv_pool_1, filter_size=5, @@ -46,25 +48,28 @@ def cnn_model(data): pool_size=2, pool_stride=2, act="relu", - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.01))) + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.01) + ), + ) SIZE = 10 input_shape = conv_pool_2.shape param_shape = [reduce(lambda a, b: a * b, input_shape[1:], 1)] + [SIZE] - scale = (2.0 / (param_shape[0]**2 * SIZE))**0.5 + scale = (2.0 / (param_shape[0] ** 2 * SIZE)) ** 0.5 predict = fluid.layers.fc( input=conv_pool_2, size=SIZE, act="softmax", param_attr=fluid.param_attr.ParamAttr( - initializer=fluid.initializer.Constant(value=0.01))) + initializer=fluid.initializer.Constant(value=0.01) + ), + ) return predict class TestFleetMetaOptimizerFuseAllReducePrecision(TestDistRunnerBase): - def get_model(self, batch_size=2, single_device=False): # Input data images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE) @@ -77,17 +82,19 @@ class TestFleetMetaOptimizerFuseAllReducePrecision(TestDistRunnerBase): # Evaluator batch_size_tensor = fluid.layers.create_tensor(dtype='int64') - batch_acc = fluid.layers.accuracy(input=predict, - label=label, - total=batch_size_tensor) + batch_acc = fluid.layers.accuracy( + input=predict, label=label, total=batch_size_tensor + ) test_program = fluid.default_main_program().clone(for_test=True) # Reader - train_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=batch_size) - test_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=batch_size) + train_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size + ) + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size + ) optimizer = paddle.fluid.optimizer.Adam(0.01) if single_device: @@ -100,11 +107,19 @@ class TestFleetMetaOptimizerFuseAllReducePrecision(TestDistRunnerBase): strategy.fuse_all_reduce_ops = True strategy._calc_comm_same_stream = False strategy.fuse_grad_size_in_num = 8 - optimizer = fleet.distributed_optimizer(optimizer, - strategy=strategy) + optimizer = fleet.distributed_optimizer( + optimizer, strategy=strategy + ) optimizer.minimize(avg_cost) - return test_program, avg_cost, train_reader, test_reader, batch_acc, predict + return ( + test_program, + avg_cost, + train_reader, + test_reader, + batch_acc, + predict, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/dist_fleet_simnet_bow.py b/python/paddle/fluid/tests/unittests/dist_fleet_simnet_bow.py index 84687999d9c5051f53f61342d6e68be4d54fe643..f9d926ad1c73356b846005076abb61c98b8eb00e 100644 --- a/python/paddle/fluid/tests/unittests/dist_fleet_simnet_bow.py +++ b/python/paddle/fluid/tests/unittests/dist_fleet_simnet_bow.py @@ -41,7 +41,6 @@ fluid.default_main_program().random_seed = 1 def fake_simnet_reader(): - def reader(): for _ in range(1000): q = np.random.random_integers(0, 1500 - 1, size=1).tolist() @@ -57,62 +56,66 @@ def get_acc(cos_q_nt, cos_q_pt, batch_size): cond = fluid.layers.less_than(cos_q_nt, cos_q_pt) cond = fluid.layers.cast(cond, dtype='float64') cond_3 = fluid.layers.reduce_sum(cond) - acc = fluid.layers.elementwise_div(cond_3, - fluid.layers.fill_constant( - shape=[1], - value=batch_size * 1.0, - dtype='float64'), - name="simnet_acc") + acc = fluid.layers.elementwise_div( + cond_3, + fluid.layers.fill_constant( + shape=[1], value=batch_size * 1.0, dtype='float64' + ), + name="simnet_acc", + ) return acc def get_loss(cos_q_pt, cos_q_nt): loss_op1 = fluid.layers.elementwise_sub( - fluid.layers.fill_constant_batch_size_like(input=cos_q_pt, - shape=[-1, 1], - value=margin, - dtype='float32'), cos_q_pt) + fluid.layers.fill_constant_batch_size_like( + input=cos_q_pt, shape=[-1, 1], value=margin, dtype='float32' + ), + cos_q_pt, + ) loss_op2 = fluid.layers.elementwise_add(loss_op1, cos_q_nt) loss_op3 = fluid.layers.elementwise_max( - fluid.layers.fill_constant_batch_size_like(input=loss_op2, - shape=[-1, 1], - value=0.0, - dtype='float32'), loss_op2) + fluid.layers.fill_constant_batch_size_like( + input=loss_op2, shape=[-1, 1], value=0.0, dtype='float32' + ), + loss_op2, + ) avg_cost = paddle.mean(loss_op3) return avg_cost -def train_network(batch_size, - is_distributed=False, - is_sparse=False, - is_self_contained_lr=False, - is_pyreader=False): +def train_network( + batch_size, + is_distributed=False, + is_sparse=False, + is_self_contained_lr=False, + is_pyreader=False, +): # query - q = fluid.layers.data(name="query_ids", - shape=[1], - dtype="int64", - lod_level=1) + q = fluid.layers.data( + name="query_ids", shape=[1], dtype="int64", lod_level=1 + ) # label data label = fluid.layers.data(name="label", shape=[1], dtype="int64") # pt - pt = fluid.layers.data(name="pos_title_ids", - shape=[1], - dtype="int64", - lod_level=1) + pt = fluid.layers.data( + name="pos_title_ids", shape=[1], dtype="int64", lod_level=1 + ) # nt - nt = fluid.layers.data(name="neg_title_ids", - shape=[1], - dtype="int64", - lod_level=1) + nt = fluid.layers.data( + name="neg_title_ids", shape=[1], dtype="int64", lod_level=1 + ) datas = [q, label, pt, nt] reader = None if is_pyreader: - reader = fluid.io.PyReader(feed_list=datas, - capacity=64, - iterable=False, - use_double_buffer=False) + reader = fluid.io.PyReader( + feed_list=datas, + capacity=64, + iterable=False, + use_double_buffer=False, + ) # embedding q_emb = fluid.embedding( @@ -120,8 +123,10 @@ def train_network(batch_size, is_distributed=is_distributed, size=[dict_dim, emb_dim], param_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(value=0.01), name="__emb__"), - is_sparse=is_sparse) + initializer=fluid.initializer.Constant(value=0.01), name="__emb__" + ), + is_sparse=is_sparse, + ) q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim]) # vsum q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') @@ -133,7 +138,8 @@ def train_network(batch_size, param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__q_fc__", - learning_rate=base_lr), + learning_rate=base_lr, + ), ) # embedding @@ -144,8 +150,10 @@ def train_network(batch_size, param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__emb__", - learning_rate=emb_lr), - is_sparse=is_sparse) + learning_rate=emb_lr, + ), + is_sparse=is_sparse, + ) pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim]) # vsum pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') @@ -155,8 +163,10 @@ def train_network(batch_size, input=pt_ss, size=hid_dim, param_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(value=0.01), name="__fc__"), - bias_attr=fluid.ParamAttr(name="__fc_b__")) + initializer=fluid.initializer.Constant(value=0.01), name="__fc__" + ), + bias_attr=fluid.ParamAttr(name="__fc_b__"), + ) # embedding nt_emb = fluid.embedding( @@ -164,8 +174,10 @@ def train_network(batch_size, is_distributed=is_distributed, size=[dict_dim, emb_dim], param_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(value=0.01), name="__emb__"), - is_sparse=is_sparse) + initializer=fluid.initializer.Constant(value=0.01), name="__emb__" + ), + is_sparse=is_sparse, + ) nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim]) # vsum nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') @@ -175,8 +187,10 @@ def train_network(batch_size, input=nt_ss, size=hid_dim, param_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(value=0.01), name="__fc__"), - bias_attr=fluid.ParamAttr(name="__fc_b__")) + initializer=fluid.initializer.Constant(value=0.01), name="__fc__" + ), + bias_attr=fluid.ParamAttr(name="__fc_b__"), + ) cos_q_pt = fluid.layers.cos_sim(q_fc, pt_fc) cos_q_nt = fluid.layers.cos_sim(q_fc, nt_fc) # loss @@ -192,9 +206,13 @@ class TestDistSimnetBow2x2(FleetDistRunnerBase): """ def net(self, args, batch_size=4, lr=0.01): - avg_cost, _, predict, self.reader = \ - train_network(batch_size=batch_size, is_distributed=False, - is_sparse=True, is_self_contained_lr=False, is_pyreader=(args.reader == "pyreader")) + avg_cost, _, predict, self.reader = train_network( + batch_size=batch_size, + is_distributed=False, + is_sparse=True, + is_self_contained_lr=False, + is_pyreader=(args.reader == "pyreader"), + ) self.avg_cost = avg_cost self.predict = predict @@ -229,11 +247,14 @@ class TestDistSimnetBow2x2(FleetDistRunnerBase): try: pass_start = time.time() while True: - loss_val = exe.run(program=fluid.default_main_program(), - fetch_list=[self.avg_cost.name]) + loss_val = exe.run( + program=fluid.default_main_program(), + fetch_list=[self.avg_cost.name], + ) loss_val = np.mean(loss_val) message = "TRAIN ---> pass: {} loss: {}\n".format( - epoch_id, loss_val) + epoch_id, loss_val + ) fleet.util.print_on_rank(message, 0) pass_time = time.time() - pass_start diff --git a/python/paddle/fluid/tests/unittests/dist_fleet_sparse_embedding_ctr.py b/python/paddle/fluid/tests/unittests/dist_fleet_sparse_embedding_ctr.py index c6a976b089b4ab52077dedc8c20064c0e3d3a2c2..c1410348e0fc401bda417447732fed1c96a18eed 100644 --- a/python/paddle/fluid/tests/unittests/dist_fleet_sparse_embedding_ctr.py +++ b/python/paddle/fluid/tests/unittests/dist_fleet_sparse_embedding_ctr.py @@ -26,7 +26,6 @@ from test_dist_fleet_base import runtime_main, FleetDistRunnerBase def fake_ctr_reader(): - def reader(): for _ in range(1000): deep = np.random.random_integers(0, 1e10, size=16).tolist() @@ -54,29 +53,37 @@ class TestDistCTR2x2(FleetDistRunnerBase): """ dnn_input_dim, lr_input_dim = 10, 10 - dnn_data = fluid.layers.data(name="dnn_data", - shape=[-1, 1], - dtype="int64", - lod_level=1, - append_batch_size=False) - lr_data = fluid.layers.data(name="lr_data", - shape=[-1, 1], - dtype="int64", - lod_level=1, - append_batch_size=False) - label = fluid.layers.data(name="click", - shape=[-1, 1], - dtype="int64", - lod_level=0, - append_batch_size=False) + dnn_data = fluid.layers.data( + name="dnn_data", + shape=[-1, 1], + dtype="int64", + lod_level=1, + append_batch_size=False, + ) + lr_data = fluid.layers.data( + name="lr_data", + shape=[-1, 1], + dtype="int64", + lod_level=1, + append_batch_size=False, + ) + label = fluid.layers.data( + name="click", + shape=[-1, 1], + dtype="int64", + lod_level=0, + append_batch_size=False, + ) datas = [dnn_data, lr_data, label] if args.reader == "pyreader": - self.reader = fluid.io.PyReader(feed_list=datas, - capacity=64, - iterable=False, - use_double_buffer=False) + self.reader = fluid.io.PyReader( + feed_list=datas, + capacity=64, + iterable=False, + use_double_buffer=False, + ) # build dnn model initializer = int(os.getenv("INITIALIZER", "0")) @@ -98,9 +105,11 @@ class TestDistCTR2x2(FleetDistRunnerBase): size=[dnn_input_dim, dnn_layer_dims[0]], is_test=inference, entry=entry, - param_attr=fluid.ParamAttr(name="deep_embedding", initializer=init)) - dnn_pool = fluid.layers.sequence_pool(input=dnn_embedding, - pool_type="sum") + param_attr=fluid.ParamAttr(name="deep_embedding", initializer=init), + ) + dnn_pool = fluid.layers.sequence_pool( + input=dnn_embedding, pool_type="sum" + ) dnn_out = dnn_pool for i, dim in enumerate(dnn_layer_dims[1:]): fc = fluid.layers.fc( @@ -108,8 +117,10 @@ class TestDistCTR2x2(FleetDistRunnerBase): size=dim, act="relu", param_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(value=0.01)), - name='dnn-fc-%d' % i) + initializer=fluid.initializer.Constant(value=0.01) + ), + name='dnn-fc-%d' % i, + ) dnn_out = fc # build lr model @@ -120,7 +131,9 @@ class TestDistCTR2x2(FleetDistRunnerBase): entry=entry, param_attr=fluid.ParamAttr( name="wide_embedding", - initializer=fluid.initializer.Constant(value=0.01))) + initializer=fluid.initializer.Constant(value=0.01), + ), + ) lr_pool = fluid.layers.sequence_pool(input=lr_embbding, pool_type="sum") merge_layer = fluid.layers.concat(input=[dnn_out, lr_pool], axis=1) @@ -159,19 +172,27 @@ class TestDistCTR2x2(FleetDistRunnerBase): self.reader.start() try: while True: - loss_val = exe.run(program=fluid.default_main_program(), - fetch_list=[self.avg_cost.name]) + loss_val = exe.run( + program=fluid.default_main_program(), + fetch_list=[self.avg_cost.name], + ) loss_val = np.mean(loss_val) - print("TRAIN ---> pass: {} loss: {}\n".format( - epoch_id, loss_val)) + print( + "TRAIN ---> pass: {} loss: {}\n".format( + epoch_id, loss_val + ) + ) except fluid.core.EOFException: self.reader.reset() model_dir = os.getenv("MODEL_DIR", None) if model_dir: - fleet.save_inference_model(exe, model_dir, - [feed.name for feed in self.feeds], - self.avg_cost) + fleet.save_inference_model( + exe, + model_dir, + [feed.name for feed in self.feeds], + self.avg_cost, + ) fleet.load_model(model_dir, mode=1) diff --git a/python/paddle/fluid/tests/unittests/dist_mnist.py b/python/paddle/fluid/tests/unittests/dist_mnist.py index 1a0f61411a3ace7bebc24d74d4f8fd08be213305..856ac1b930bbf445e9e6418212f0b1138b9ff6a1 100644 --- a/python/paddle/fluid/tests/unittests/dist_mnist.py +++ b/python/paddle/fluid/tests/unittests/dist_mnist.py @@ -36,8 +36,10 @@ def cnn_model(data): pool_size=2, pool_stride=2, act="relu", - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.01))) + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.01) + ), + ) conv_pool_2 = fluid.nets.simple_img_conv_pool( input=conv_pool_1, filter_size=5, @@ -45,25 +47,28 @@ def cnn_model(data): pool_size=2, pool_stride=2, act="relu", - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.01))) + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.01) + ), + ) SIZE = 10 input_shape = conv_pool_2.shape param_shape = [reduce(lambda a, b: a * b, input_shape[1:], 1)] + [SIZE] - scale = (2.0 / (param_shape[0]**2 * SIZE))**0.5 + scale = (2.0 / (param_shape[0] ** 2 * SIZE)) ** 0.5 predict = fluid.layers.fc( input=conv_pool_2, size=SIZE, act="softmax", param_attr=fluid.param_attr.ParamAttr( - initializer=fluid.initializer.Constant(value=0.01))) + initializer=fluid.initializer.Constant(value=0.01) + ), + ) return predict class TestDistMnist2x2(TestDistRunnerBase): - def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): # Input data images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE) @@ -76,9 +81,9 @@ class TestDistMnist2x2(TestDistRunnerBase): # Evaluator batch_size_tensor = fluid.layers.create_tensor(dtype='int64') - batch_acc = fluid.layers.accuracy(input=predict, - label=label, - total=batch_size_tensor) + batch_acc = fluid.layers.accuracy( + input=predict, label=label, total=batch_size_tensor + ) inference_program = fluid.default_main_program().clone() # Optimization @@ -88,24 +93,34 @@ class TestDistMnist2x2(TestDistRunnerBase): if not use_dgc: opt = fluid.optimizer.Momentum(learning_rate=self.lr, momentum=0.9) else: - opt = fluid.optimizer.DGCMomentumOptimizer(learning_rate=self.lr, - momentum=0.9, - rampup_begin_step=2) + opt = fluid.optimizer.DGCMomentumOptimizer( + learning_rate=self.lr, momentum=0.9, rampup_begin_step=2 + ) # Reader - train_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=batch_size) - test_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=batch_size) + train_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size + ) + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size + ) if dist_strategy: - dist_opt = fleet.distributed_optimizer(optimizer=opt, - strategy=dist_strategy) + dist_opt = fleet.distributed_optimizer( + optimizer=opt, strategy=dist_strategy + ) _, param_grads = dist_opt.minimize(avg_cost) else: opt.minimize(avg_cost) - return inference_program, avg_cost, train_reader, test_reader, batch_acc, predict + return ( + inference_program, + avg_cost, + train_reader, + test_reader, + batch_acc, + predict, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/dist_mnist_batch_merge.py b/python/paddle/fluid/tests/unittests/dist_mnist_batch_merge.py index 67981130fc265ee7f4fc48973cd7dfba84b9ab85..1e022a1c44518b39a73550c9b6f6df9baae7bf6c 100644 --- a/python/paddle/fluid/tests/unittests/dist_mnist_batch_merge.py +++ b/python/paddle/fluid/tests/unittests/dist_mnist_batch_merge.py @@ -35,7 +35,6 @@ def test_merge_reader(repeat_batch_size=8): class TestDistMnist2x2(TestDistRunnerBase): - def get_model(self, batch_size=2): # Input data images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE) @@ -48,9 +47,9 @@ class TestDistMnist2x2(TestDistRunnerBase): # Evaluator batch_size_tensor = fluid.layers.create_tensor(dtype='int64') - batch_acc = fluid.layers.accuracy(input=predict, - label=label, - total=batch_size_tensor) + batch_acc = fluid.layers.accuracy( + input=predict, label=label, total=batch_size_tensor + ) inference_program = fluid.default_main_program().clone() # Optimization @@ -58,10 +57,18 @@ class TestDistMnist2x2(TestDistRunnerBase): # Reader train_reader = paddle.batch(test_merge_reader, batch_size=batch_size) - test_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=batch_size) + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size + ) opt.minimize(avg_cost) - return inference_program, avg_cost, train_reader, test_reader, batch_acc, predict + return ( + inference_program, + avg_cost, + train_reader, + test_reader, + batch_acc, + predict, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/dist_mnist_fp16_allreduce.py b/python/paddle/fluid/tests/unittests/dist_mnist_fp16_allreduce.py index c9d37247b776714b1d13e1e251938b09ed6215a6..c0d92d01e6c6aba6d20fecb70d808eaa499325ed 100644 --- a/python/paddle/fluid/tests/unittests/dist_mnist_fp16_allreduce.py +++ b/python/paddle/fluid/tests/unittests/dist_mnist_fp16_allreduce.py @@ -14,7 +14,9 @@ import paddle import paddle.fluid as fluid -from paddle.distributed.fleet.meta_optimizers import FP16AllReduceOptimizer as FP16AllReduce +from paddle.distributed.fleet.meta_optimizers import ( + FP16AllReduceOptimizer as FP16AllReduce, +) from test_dist_base import TestDistRunnerBase, runtime_main from dist_mnist import cnn_model @@ -27,7 +29,6 @@ fluid.default_main_program().random_seed = 1 class TestDistMnist2x2(TestDistRunnerBase): - def get_model(self, batch_size=2): # Input data images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE) @@ -40,23 +41,33 @@ class TestDistMnist2x2(TestDistRunnerBase): # Evaluator batch_size_tensor = fluid.layers.create_tensor(dtype='int64') - batch_acc = fluid.layers.accuracy(input=predict, - label=label, - total=batch_size_tensor) + batch_acc = fluid.layers.accuracy( + input=predict, label=label, total=batch_size_tensor + ) inference_program = fluid.default_main_program().clone() # Optimization - opt = fluid.optimizer.MomentumOptimizer(learning_rate=0.001, - momentum=0.9) + opt = fluid.optimizer.MomentumOptimizer( + learning_rate=0.001, momentum=0.9 + ) opt = FP16AllReduce(opt) # Reader - train_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=batch_size) - test_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=batch_size) + train_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size + ) + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size + ) opt.minimize(avg_cost) - return inference_program, avg_cost, train_reader, test_reader, batch_acc, predict + return ( + inference_program, + avg_cost, + train_reader, + test_reader, + batch_acc, + predict, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/dist_mnist_lars.py b/python/paddle/fluid/tests/unittests/dist_mnist_lars.py index b93dfb0594eda43f29b635fcd2632f43730d1bbe..c117efb84f8308b3bd44a14390eb6e1793103f79 100644 --- a/python/paddle/fluid/tests/unittests/dist_mnist_lars.py +++ b/python/paddle/fluid/tests/unittests/dist_mnist_lars.py @@ -26,7 +26,6 @@ fluid.default_main_program().random_seed = 1 class TestDistMnist2x2(TestDistRunnerBase): - def get_model(self, batch_size=2): # Input data images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE) @@ -39,22 +38,32 @@ class TestDistMnist2x2(TestDistRunnerBase): # Evaluator batch_size_tensor = fluid.layers.create_tensor(dtype='int64') - batch_acc = fluid.layers.accuracy(input=predict, - label=label, - total=batch_size_tensor) + batch_acc = fluid.layers.accuracy( + input=predict, label=label, total=batch_size_tensor + ) inference_program = fluid.default_main_program().clone() # Optimization - opt = fluid.optimizer.LarsMomentumOptimizer(learning_rate=0.001, - momentum=0.9) + opt = fluid.optimizer.LarsMomentumOptimizer( + learning_rate=0.001, momentum=0.9 + ) # Reader - train_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=batch_size) - test_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=batch_size) + train_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size + ) + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size + ) opt.minimize(avg_cost) - return inference_program, avg_cost, train_reader, test_reader, batch_acc, predict + return ( + inference_program, + avg_cost, + train_reader, + test_reader, + batch_acc, + predict, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/dist_save_load.py b/python/paddle/fluid/tests/unittests/dist_save_load.py index 8d0083aaffea9963bf8d813f6005f7db88b7d149..eb36010ea6fac02eb622def3a4c5dd508f131ce8 100644 --- a/python/paddle/fluid/tests/unittests/dist_save_load.py +++ b/python/paddle/fluid/tests/unittests/dist_save_load.py @@ -28,9 +28,7 @@ from dist_simnet_bow import TestDistSimnetBow2x2, DATA_URL, DATA_MD5 class TestDistSaveLoad2x2(TestDistSimnetBow2x2): - def _load_persistable_vars(self, executor, dirname, program): - def _is_checkpoint_var(var): """ the checkpoint will not save or load all the variables. @@ -38,9 +36,11 @@ class TestDistSaveLoad2x2(TestDistSimnetBow2x2): : param var(Variable) """ - if var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH or \ - var.desc.type() == core.VarDesc.VarType.FETCH_LIST or \ - var.desc.type() == core.VarDesc.VarType.RAW: + if ( + var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH + or var.desc.type() == core.VarDesc.VarType.FETCH_LIST + or var.desc.type() == core.VarDesc.VarType.RAW + ): return False # @GRAD are named for gradient variables, checkpoint will not save it. if "@GRAD" in var.name: @@ -58,21 +58,30 @@ class TestDistSaveLoad2x2(TestDistSimnetBow2x2): return var.persistable - io.load_vars(executor, - dirname=dirname, - main_program=program, - predicate=_is_checkpoint_var, - filename=None) + io.load_vars( + executor, + dirname=dirname, + main_program=program, + predicate=_is_checkpoint_var, + filename=None, + ) def run_pserver(self, args): self.get_model(batch_size=2) # NOTE: pserver should not call memory optimize - t = self.get_transpiler(args.trainer_id, fluid.default_main_program(), - args.endpoints, args.trainers, args.sync_mode, - False, args.current_endpoint) + t = self.get_transpiler( + args.trainer_id, + fluid.default_main_program(), + args.endpoints, + args.trainers, + args.sync_mode, + False, + args.current_endpoint, + ) pserver_prog = t.get_pserver_program(args.current_endpoint) - startup_prog = t.get_startup_program(args.current_endpoint, - pserver_prog) + startup_prog = t.get_startup_program( + args.current_endpoint, pserver_prog + ) need_load = bool(int(os.getenv("LOAD", "0"))) model_dir = os.getenv("MODEL_DIR", "") @@ -87,14 +96,23 @@ class TestDistSaveLoad2x2(TestDistSimnetBow2x2): exe.run(pserver_prog) def run_trainer(self, args): - test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \ - self.get_model(batch_size=2) + ( + test_program, + avg_cost, + train_reader, + test_reader, + batch_acc, + predict, + ) = self.get_model(batch_size=2) if args.update_method == "pserver": - t = self.get_transpiler(args.trainer_id, - fluid.default_main_program(), - args.endpoints, args.trainers, - args.sync_mode) + t = self.get_transpiler( + args.trainer_id, + fluid.default_main_program(), + args.endpoints, + args.trainers, + args.sync_mode, + ) trainer_prog = t.get_trainer_program() else: @@ -114,17 +132,24 @@ class TestDistSaveLoad2x2(TestDistSimnetBow2x2): build_stra = fluid.BuildStrategy() if args.use_reduce: - build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce + build_stra.reduce_strategy = ( + fluid.BuildStrategy.ReduceStrategy.Reduce + ) else: - build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce + build_stra.reduce_strategy = ( + fluid.BuildStrategy.ReduceStrategy.AllReduce + ) - exe = fluid.ParallelExecutor(args.use_cuda, - loss_name=avg_cost.name, - exec_strategy=strategy, - build_strategy=build_stra) + exe = fluid.ParallelExecutor( + args.use_cuda, + loss_name=avg_cost.name, + exec_strategy=strategy, + build_strategy=build_stra, + ) feed_var_list = [ - var for var in trainer_prog.global_block().vars.values() + var + for var in trainer_prog.global_block().vars.values() if var.is_data ] @@ -149,13 +174,15 @@ class TestDistSaveLoad2x2(TestDistSimnetBow2x2): if save_mode == "LOCAL": if need_save: for _ in range(RUN_STEP): - loss, = exe.run(fetch_list=[avg_cost.name], - feed=feeder.feed(get_data())) + (loss,) = exe.run( + fetch_list=[avg_cost.name], feed=feeder.feed(get_data()) + ) if need_save and model_dir: io.save_persistables(startup_exe, model_dir, trainer_prog) var = np.array( - fluid.global_scope().find_var('__fc_b__').get_tensor()) + fluid.global_scope().find_var('__fc_b__').get_tensor() + ) sys.stdout.buffer.write(pickle.dumps(np.ravel(var).tolist())) elif save_mode == "DIST": @@ -163,18 +190,26 @@ class TestDistSaveLoad2x2(TestDistSimnetBow2x2): loss = None if need_save: for idx in range(8): - loss, = exe.run(fetch_list=[avg_cost.name], - feed=feeder.feed(get_data())) - if need_save and model_dir and idx == skip_steps and args.trainer_id == 0: - io.save_persistables(startup_exe, model_dir, - trainer_prog) + (loss,) = exe.run( + fetch_list=[avg_cost.name], feed=feeder.feed(get_data()) + ) + if ( + need_save + and model_dir + and idx == skip_steps + and args.trainer_id == 0 + ): + io.save_persistables( + startup_exe, model_dir, trainer_prog + ) else: for idx in range(8): data = get_data() if idx <= skip_steps: continue - loss, = exe.run(fetch_list=[avg_cost.name], - feed=feeder.feed(data)) + (loss,) = exe.run( + fetch_list=[avg_cost.name], feed=feeder.feed(data) + ) sys.stdout.buffer.write(pickle.dumps(loss.tolist())) else: raise Exception("save_mode must be LOCAL or DIST") diff --git a/python/paddle/fluid/tests/unittests/dist_se_resnext.py b/python/paddle/fluid/tests/unittests/dist_se_resnext.py index 5061bb8595615e234ca90d2cb54aad8480b6666a..0d8ed873f0398fac67d50ad0c0b3f91e7804cf2c 100644 --- a/python/paddle/fluid/tests/unittests/dist_se_resnext.py +++ b/python/paddle/fluid/tests/unittests/dist_se_resnext.py @@ -31,13 +31,12 @@ train_parameters = { "learning_strategy": { "name": "piecewise_decay", "epochs": [30, 60, 90], - "steps": [0.1, 0.01, 0.001, 0.0001] - } + "steps": [0.1, 0.01, 0.001, 0.0001], + }, } -class SE_ResNeXt(): - +class SE_ResNeXt: def __init__(self, layers=50): self.params = train_parameters self.layers = layers @@ -45,64 +44,65 @@ class SE_ResNeXt(): def net(self, input, class_dim=1000): layers = self.layers supported_layers = [50, 101, 152] - assert layers in supported_layers, \ - "supported layers are {} but input layer is {}".format(supported_layers, layers) + assert ( + layers in supported_layers + ), "supported layers are {} but input layer is {}".format( + supported_layers, layers + ) if layers == 50: cardinality = 32 reduction_ratio = 16 depth = [3, 4, 6, 3] num_filters = [128, 256, 512, 1024] - conv = self.conv_bn_layer(input=input, - num_filters=64, - filter_size=7, - stride=2, - act='relu') - conv = fluid.layers.pool2d(input=conv, - pool_size=3, - pool_stride=2, - pool_padding=1, - pool_type='max') + conv = self.conv_bn_layer( + input=input, num_filters=64, filter_size=7, stride=2, act='relu' + ) + conv = fluid.layers.pool2d( + input=conv, + pool_size=3, + pool_stride=2, + pool_padding=1, + pool_type='max', + ) elif layers == 101: cardinality = 32 reduction_ratio = 16 depth = [3, 4, 23, 3] num_filters = [128, 256, 512, 1024] - conv = self.conv_bn_layer(input=input, - num_filters=64, - filter_size=7, - stride=2, - act='relu') - conv = fluid.layers.pool2d(input=conv, - pool_size=3, - pool_stride=2, - pool_padding=1, - pool_type='max') + conv = self.conv_bn_layer( + input=input, num_filters=64, filter_size=7, stride=2, act='relu' + ) + conv = fluid.layers.pool2d( + input=conv, + pool_size=3, + pool_stride=2, + pool_padding=1, + pool_type='max', + ) elif layers == 152: cardinality = 64 reduction_ratio = 16 depth = [3, 8, 36, 3] num_filters = [128, 256, 512, 1024] - conv = self.conv_bn_layer(input=input, - num_filters=64, - filter_size=3, - stride=2, - act='relu') - conv = self.conv_bn_layer(input=conv, - num_filters=64, - filter_size=3, - stride=1, - act='relu') - conv = self.conv_bn_layer(input=conv, - num_filters=128, - filter_size=3, - stride=1, - act='relu') + conv = self.conv_bn_layer( + input=input, num_filters=64, filter_size=3, stride=2, act='relu' + ) + conv = self.conv_bn_layer( + input=conv, num_filters=64, filter_size=3, stride=1, act='relu' + ) + conv = self.conv_bn_layer( + input=conv, num_filters=128, filter_size=3, stride=1, act='relu' + ) conv = fluid.layers.pool2d( - input=conv, pool_size=3, pool_stride=2, pool_padding=1, \ - pool_type='max') + input=conv, + pool_size=3, + pool_stride=2, + pool_padding=1, + pool_type='max', + ) for block in range(len(depth)): for i in range(depth[block]): @@ -111,20 +111,22 @@ class SE_ResNeXt(): num_filters=num_filters[block], stride=2 if i == 0 and block != 0 else 1, cardinality=cardinality, - reduction_ratio=reduction_ratio) + reduction_ratio=reduction_ratio, + ) - pool = fluid.layers.pool2d(input=conv, - pool_size=7, - pool_type='avg', - global_pooling=True) + pool = fluid.layers.pool2d( + input=conv, pool_size=7, pool_type='avg', global_pooling=True + ) drop = fluid.layers.dropout(x=pool, dropout_prob=0.2) stdv = 1.0 / math.sqrt(drop.shape[1] * 1.0) out = fluid.layers.fc( input=drop, size=class_dim, act='softmax', - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.05))) + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.05) + ), + ) return out def shortcut(self, input, ch_out, stride): @@ -135,37 +137,36 @@ class SE_ResNeXt(): else: return input - def bottleneck_block(self, input, num_filters, stride, cardinality, - reduction_ratio): - conv0 = self.conv_bn_layer(input=input, - num_filters=num_filters, - filter_size=1, - act='relu') - conv1 = self.conv_bn_layer(input=conv0, - num_filters=num_filters, - filter_size=3, - stride=stride, - groups=cardinality, - act='relu') - conv2 = self.conv_bn_layer(input=conv1, - num_filters=num_filters * 2, - filter_size=1, - act=None) - scale = self.squeeze_excitation(input=conv2, - num_channels=num_filters * 2, - reduction_ratio=reduction_ratio) + def bottleneck_block( + self, input, num_filters, stride, cardinality, reduction_ratio + ): + conv0 = self.conv_bn_layer( + input=input, num_filters=num_filters, filter_size=1, act='relu' + ) + conv1 = self.conv_bn_layer( + input=conv0, + num_filters=num_filters, + filter_size=3, + stride=stride, + groups=cardinality, + act='relu', + ) + conv2 = self.conv_bn_layer( + input=conv1, num_filters=num_filters * 2, filter_size=1, act=None + ) + scale = self.squeeze_excitation( + input=conv2, + num_channels=num_filters * 2, + reduction_ratio=reduction_ratio, + ) short = self.shortcut(input, num_filters * 2, stride) return fluid.layers.elementwise_add(x=short, y=scale, act='relu') - def conv_bn_layer(self, - input, - num_filters, - filter_size, - stride=1, - groups=1, - act=None): + def conv_bn_layer( + self, input, num_filters, filter_size, stride=1, groups=1, act=None + ): conv = fluid.layers.conv2d( input=input, num_filters=num_filters, @@ -175,41 +176,45 @@ class SE_ResNeXt(): groups=groups, act=None, # avoid pserver CPU init differs from GPU - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.05)), - bias_attr=False) + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.05) + ), + bias_attr=False, + ) return fluid.layers.batch_norm(input=conv, act=act) def squeeze_excitation(self, input, num_channels, reduction_ratio): - pool = fluid.layers.pool2d(input=input, - pool_size=0, - pool_type='avg', - global_pooling=True) + pool = fluid.layers.pool2d( + input=input, pool_size=0, pool_type='avg', global_pooling=True + ) stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0) squeeze = fluid.layers.fc( input=pool, size=num_channels // reduction_ratio, - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.05)), - act='relu') + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.05) + ), + act='relu', + ) stdv = 1.0 / math.sqrt(squeeze.shape[1] * 1.0) excitation = fluid.layers.fc( input=squeeze, size=num_channels, - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.05)), - act='sigmoid') + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.05) + ), + act='sigmoid', + ) scale = fluid.layers.elementwise_mul(x=input, y=excitation, axis=0) return scale class DistSeResneXt2x2(TestDistRunnerBase): - def get_model(self, batch_size=2, use_dgc=False): # Input data - image = fluid.layers.data(name="data", - shape=[3, 224, 224], - dtype='float32') + image = fluid.layers.data( + name="data", shape=[3, 224, 224], dtype='float32' + ) label = fluid.layers.data(name="int64", shape=[1], dtype='int64') # Train program @@ -235,24 +240,30 @@ class DistSeResneXt2x2(TestDistRunnerBase): if not use_dgc: optimizer = fluid.optimizer.Momentum( - learning_rate=fluid.layers.piecewise_decay(boundaries=bd, - values=lr), + learning_rate=fluid.layers.piecewise_decay( + boundaries=bd, values=lr + ), momentum=0.9, - regularization=fluid.regularizer.L2Decay(1e-4)) + regularization=fluid.regularizer.L2Decay(1e-4), + ) else: optimizer = fluid.optimizer.DGCMomentumOptimizer( - learning_rate=fluid.layers.piecewise_decay(boundaries=bd, - values=lr), + learning_rate=fluid.layers.piecewise_decay( + boundaries=bd, values=lr + ), momentum=0.9, rampup_begin_step=0, - regularization=fluid.regularizer.L2Decay(1e-4)) + regularization=fluid.regularizer.L2Decay(1e-4), + ) optimizer.minimize(avg_cost) # Reader - train_reader = paddle.batch(paddle.dataset.flowers.test(use_xmap=False), - batch_size=batch_size) - test_reader = paddle.batch(paddle.dataset.flowers.test(use_xmap=False), - batch_size=batch_size) + train_reader = paddle.batch( + paddle.dataset.flowers.test(use_xmap=False), batch_size=batch_size + ) + test_reader = paddle.batch( + paddle.dataset.flowers.test(use_xmap=False), batch_size=batch_size + ) return test_program, avg_cost, train_reader, test_reader, acc_top1, out diff --git a/python/paddle/fluid/tests/unittests/dist_sharding_save.py b/python/paddle/fluid/tests/unittests/dist_sharding_save.py index b4733b61af85a8fcd9389306374af0ce49414dc2..d816d1b93abb27c00ee653f3915bb109536e0748 100755 --- a/python/paddle/fluid/tests/unittests/dist_sharding_save.py +++ b/python/paddle/fluid/tests/unittests/dist_sharding_save.py @@ -37,20 +37,21 @@ def runtime_main(): fleet.init(role) with fluid.program_guard(train_prog, startup_prog): with fluid.unique_name.guard(): - input_x = paddle.fluid.layers.data(name="x", - shape=[32], - dtype='float32') - input_y = paddle.fluid.layers.data(name="y", - shape=[1], - dtype='int64') + input_x = paddle.fluid.layers.data( + name="x", shape=[32], dtype='float32' + ) + input_y = paddle.fluid.layers.data( + name="y", shape=[1], dtype='int64' + ) fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh') fc_2 = paddle.fluid.layers.fc(input=fc_1, size=256, act='tanh') - prediction = paddle.fluid.layers.fc(input=[fc_2], - size=2, - act='softmax') - cost = paddle.fluid.layers.cross_entropy(input=prediction, - label=input_y) + prediction = paddle.fluid.layers.fc( + input=[fc_2], size=2, act='softmax' + ) + cost = paddle.fluid.layers.cross_entropy( + input=prediction, label=input_y + ) avg_cost = paddle.mean(x=cost) strategy = paddle.distributed.fleet.DistributedStrategy() @@ -61,10 +62,12 @@ def runtime_main(): "sharding_degree": 2, } - optimizer = paddle.fluid.optimizer.Momentum(learning_rate=0.01, - momentum=0.9) - optimizer = fleet.distributed_optimizer(optimizer, - strategy=strategy) + optimizer = paddle.fluid.optimizer.Momentum( + learning_rate=0.01, momentum=0.9 + ) + optimizer = fleet.distributed_optimizer( + optimizer, strategy=strategy + ) optimizer.minimize(avg_cost) # execution @@ -73,17 +76,16 @@ def runtime_main(): exe = fluid.Executor(place) exe.run(startup_prog) dirname = "./ut_sharding_save_model" - sharding.utils.save_persistables(exe, - dirname, - main_program=train_prog, - filename=None) + sharding.utils.save_persistables( + exe, dirname, main_program=train_prog, filename=None + ) out_losses = [] sys.stdout.buffer.write(pickle.dumps(out_losses)) if __name__ == "__main__": - #NOTE(liangjianzhong): dist unittest should be imlpement using runtime_main in test_dist_base.py + # NOTE(liangjianzhong): dist unittest should be imlpement using runtime_main in test_dist_base.py # but the runtime_main in test_dist_base.py use the fleet, DistributedStrategy from # paddle.fluid.incubate.fleet.collective which is not support by sharding (paddle.distributed.fleet). # this should be update in future. diff --git a/python/paddle/fluid/tests/unittests/dist_text_classification.py b/python/paddle/fluid/tests/unittests/dist_text_classification.py index c8315161f0fd942d9b730c0e5f34992c2ae351e7..417ff66e0cb7eb034c7555c7be78d41d8040e56d 100644 --- a/python/paddle/fluid/tests/unittests/dist_text_classification.py +++ b/python/paddle/fluid/tests/unittests/dist_text_classification.py @@ -43,19 +43,23 @@ def get_worddict(dict_path): return word_dict, dict_dim -def conv_net(input, - dict_dim, - emb_dim=128, - window_size=3, - num_filters=128, - fc0_dim=96, - class_dim=2): +def conv_net( + input, + dict_dim, + emb_dim=128, + window_size=3, + num_filters=128, + fc0_dim=96, + class_dim=2, +): emb = fluid.layers.embedding( input=input, size=[dict_dim, emb_dim], is_sparse=False, - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.01))) + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.01) + ), + ) conv_3 = fluid.nets.sequence_conv_pool( input=emb, @@ -63,30 +67,35 @@ def conv_net(input, filter_size=window_size, act="tanh", pool_type="max", - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.01))) + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.01) + ), + ) fc_0 = fluid.layers.fc( input=[conv_3], size=fc0_dim, - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.01))) + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.01) + ), + ) prediction = fluid.layers.fc( input=[fc_0], size=class_dim, act="softmax", - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.01))) + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.01) + ), + ) return prediction def inference_network(dict_dim): - data = fluid.layers.data(name="words", - shape=[1], - dtype="int64", - lod_level=1) + data = fluid.layers.data( + name="words", shape=[1], dtype="int64", lod_level=1 + ) out = conv_net(data, dict_dim) return out @@ -107,17 +116,16 @@ def get_optimizer(learning_rate): class TestDistTextClassification2x2(TestDistRunnerBase): - def get_model(self, batch_size=2): - vocab = os.path.join(paddle.dataset.common.DATA_HOME, - "text_classification", "imdb.vocab") + vocab = os.path.join( + paddle.dataset.common.DATA_HOME, "text_classification", "imdb.vocab" + ) word_dict, dict_dim = get_worddict(vocab) # Input data - data = fluid.layers.data(name="words", - shape=[1], - dtype="int64", - lod_level=1) + data = fluid.layers.data( + name="words", shape=[1], dtype="int64", lod_level=1 + ) label = fluid.layers.data(name='label', shape=[1], dtype='int64') # Train program @@ -134,7 +142,14 @@ class TestDistTextClassification2x2(TestDistRunnerBase): # Reader train_reader, test_reader = get_reader(word_dict, batch_size) - return inference_program, avg_cost, train_reader, test_reader, acc, predict + return ( + inference_program, + avg_cost, + train_reader, + test_reader, + acc, + predict, + ) def tokenize(pattern): @@ -143,8 +158,10 @@ def tokenize(pattern): """ with tarfile.open( - paddle.dataset.common.download(DATA_URL, 'text_classification', - DATA_MD5)) as tarf: + paddle.dataset.common.download( + DATA_URL, 'text_classification', DATA_MD5 + ) + ) as tarf: # Note that we should use tarfile.next(), which does # sequential access of member files, other than # tarfile.extractfile, which does random access and might @@ -154,7 +171,8 @@ def tokenize(pattern): if bool(pattern.match(tf.name)): # newline and punctuations removal and ad-hoc tokenization. yield tarf.extractfile(tf).read().rstrip(b'\n\r').translate( - None, string.punctuation.encode('latin-1')).lower().split() + None, string.punctuation.encode('latin-1') + ).lower().split() tf = tarf.next() @@ -188,8 +206,11 @@ def train(word_idx): :return: Training reader creator :rtype: callable """ - return reader_creator(re.compile(r"train/pos/.*\.txt$"), - re.compile(r"train/neg/.*\.txt$"), word_idx) + return reader_creator( + re.compile(r"train/pos/.*\.txt$"), + re.compile(r"train/neg/.*\.txt$"), + word_idx, + ) def test(word_idx): @@ -204,8 +225,11 @@ def test(word_idx): :return: Test reader creator :rtype: callable """ - return reader_creator(re.compile(r"test/pos/.*\.txt$"), - re.compile(r"test/neg/.*\.txt$"), word_idx) + return reader_creator( + re.compile(r"test/pos/.*\.txt$"), + re.compile(r"test/neg/.*\.txt$"), + word_idx, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/dist_transformer.py b/python/paddle/fluid/tests/unittests/dist_transformer.py index 23bcd1957e0f43ce84159d95532ac55b57b8a8e9..4765ee7d82bf53c09f80c9087cc9700801484b87 100644 --- a/python/paddle/fluid/tests/unittests/dist_transformer.py +++ b/python/paddle/fluid/tests/unittests/dist_transformer.py @@ -35,7 +35,7 @@ fluid.default_startup_program().random_seed = 1 fluid.default_main_program().random_seed = 1 -#from transformer_config import ModelHyperParams, TrainTaskConfig, merge_cfg_from_list +# from transformer_config import ModelHyperParams, TrainTaskConfig, merge_cfg_from_list class TrainTaskConfig(object): # only support GPU currently use_gpu = True @@ -72,7 +72,8 @@ class TrainTaskConfig(object): check_acc = True data_path = expanduser("~") + ( - "/.cache/paddle/dataset/test_dist_transformer/") + "/.cache/paddle/dataset/test_dist_transformer/" + ) src_vocab_fpath = data_path + "vocab.bpe.32000" trg_vocab_fpath = data_path + "vocab.bpe.32000" train_file_pattern = data_path + "train.tok.clean.bpe.32000.en-de" @@ -179,12 +180,17 @@ input_descs = { # encoder. # The actual data shape of src_slf_attn_bias is: # [batch_size, n_head, max_src_len_in_batch, max_src_len_in_batch] - "src_slf_attn_bias": - [(batch_size, ModelHyperParams.n_head, seq_len, seq_len), "float32"], + "src_slf_attn_bias": [ + (batch_size, ModelHyperParams.n_head, seq_len, seq_len), + "float32", + ], # The actual data shape of trg_word is: # [batch_size * max_trg_len_in_batch, 1] - "trg_word": [(batch_size, seq_len, 1), "int64", - 2], # lod_level is only used in fast decoder. + "trg_word": [ + (batch_size, seq_len, 1), + "int64", + 2, + ], # lod_level is only used in fast decoder. # The actual data shape of trg_pos is: # [batch_size * max_trg_len_in_batch, 1] "trg_pos": [(batch_size, seq_len, 1), "int64"], @@ -192,14 +198,18 @@ input_descs = { # subsequent words in the decoder. # The actual data shape of trg_slf_attn_bias is: # [batch_size, n_head, max_trg_len_in_batch, max_trg_len_in_batch] - "trg_slf_attn_bias": - [(batch_size, ModelHyperParams.n_head, seq_len, seq_len), "float32"], + "trg_slf_attn_bias": [ + (batch_size, ModelHyperParams.n_head, seq_len, seq_len), + "float32", + ], # This input is used to remove attention weights on paddings of the source # input in the encoder-decoder attention. # The actual data shape of trg_src_attn_bias is: # [batch_size, n_head, max_trg_len_in_batch, max_src_len_in_batch] - "trg_src_attn_bias": - [(batch_size, ModelHyperParams.n_head, seq_len, seq_len), "float32"], + "trg_src_attn_bias": [ + (batch_size, ModelHyperParams.n_head, seq_len, seq_len), + "float32", + ], # This input is used in independent decoder program for inference. # The actual data shape of enc_output is: # [batch_size, max_src_len_in_batch, d_model] @@ -212,8 +222,8 @@ input_descs = { # [batch_size * max_trg_len_in_batch, 1] "lbl_weight": [(batch_size * seq_len, 1), "float32"], # These inputs are used to change the shape tensor in beam-search decoder. - "trg_slf_attn_pre_softmax_shape_delta": [(2, ), "int32"], - "trg_slf_attn_post_softmax_shape_delta": [(4, ), "int32"], + "trg_slf_attn_pre_softmax_shape_delta": [(2,), "int32"], + "trg_slf_attn_post_softmax_shape_delta": [(4,), "int32"], "init_score": [(batch_size, 1), "float32"], } @@ -257,7 +267,7 @@ fast_decoder_data_input_fields = ( # "trg_slf_attn_post_softmax_shape_delta", ) -#from optim import LearningRateScheduler +# from optim import LearningRateScheduler class LearningRateScheduler(object): """ Wrapper for learning rate scheduling as described in the Transformer paper. @@ -265,12 +275,14 @@ class LearningRateScheduler(object): learning rate will be fed into the main_program as input data. """ - def __init__(self, - d_model, - warmup_steps, - learning_rate=0.001, - current_steps=0, - name="learning_rate"): + def __init__( + self, + d_model, + warmup_steps, + learning_rate=0.001, + current_steps=0, + name="learning_rate", + ): self.current_steps = current_steps self.warmup_steps = warmup_steps self.d_model = d_model @@ -280,68 +292,91 @@ class LearningRateScheduler(object): shape=[1], value=float(learning_rate), dtype="float32", - persistable=True) + persistable=True, + ) def update_learning_rate(self): self.current_steps += 1 - lr_value = np.power(self.d_model, -0.5) * np.min([ - np.power(self.current_steps, -0.5), - np.power(self.warmup_steps, -1.5) * self.current_steps - ]) * self.static_lr + lr_value = ( + np.power(self.d_model, -0.5) + * np.min( + [ + np.power(self.current_steps, -0.5), + np.power(self.warmup_steps, -1.5) * self.current_steps, + ] + ) + * self.static_lr + ) return np.array([lr_value], dtype="float32") -#from transformer_train import train_loop -def pad_batch_data(insts, - pad_idx, - n_head, - is_target=False, - is_label=False, - return_attn_bias=True, - return_max_len=True, - return_num_token=False): +# from transformer_train import train_loop +def pad_batch_data( + insts, + pad_idx, + n_head, + is_target=False, + is_label=False, + return_attn_bias=True, + return_max_len=True, + return_num_token=False, +): """ Pad the instances to the max sequence length in batch, and generate the corresponding position data and attention bias. """ return_list = [] max_len = max(len(inst) for inst in insts) - num_token = functools.reduce(lambda x, y: x + y, - [len(inst) - for inst in insts]) if return_num_token else 0 + num_token = ( + functools.reduce(lambda x, y: x + y, [len(inst) for inst in insts]) + if return_num_token + else 0 + ) # Any token included in dict can be used to pad, since the paddings' loss # will be masked out by weights and make no effect on parameter gradients. inst_data = np.array( - [inst + [pad_idx] * (max_len - len(inst)) for inst in insts]) + [inst + [pad_idx] * (max_len - len(inst)) for inst in insts] + ) return_list += [inst_data.astype("int64").reshape([-1, 1])] if is_label: # label weight - inst_weight = np.array([[1.] * len(inst) + [0.] * (max_len - len(inst)) - for inst in insts]) + inst_weight = np.array( + [ + [1.0] * len(inst) + [0.0] * (max_len - len(inst)) + for inst in insts + ] + ) return_list += [inst_weight.astype("float32").reshape([-1, 1])] else: # position data - inst_pos = np.array([ - list(range(1, - len(inst) + 1)) + [0] * (max_len - len(inst)) - for inst in insts - ]) + inst_pos = np.array( + [ + list(range(1, len(inst) + 1)) + [0] * (max_len - len(inst)) + for inst in insts + ] + ) return_list += [inst_pos.astype("int64").reshape([-1, 1])] if return_attn_bias: if is_target: # This is used to avoid attention on paddings and subsequent # words. slf_attn_bias_data = np.ones((inst_data.shape[0], max_len, max_len)) - slf_attn_bias_data = np.triu(slf_attn_bias_data, - 1).reshape([-1, 1, max_len, max_len]) - slf_attn_bias_data = np.tile(slf_attn_bias_data, - [1, n_head, 1, 1]) * [-1e9] + slf_attn_bias_data = np.triu(slf_attn_bias_data, 1).reshape( + [-1, 1, max_len, max_len] + ) + slf_attn_bias_data = np.tile( + slf_attn_bias_data, [1, n_head, 1, 1] + ) * [-1e9] else: # This is used to avoid attention on paddings. - slf_attn_bias_data = np.array([[0] * len(inst) + [-1e9] * - (max_len - len(inst)) - for inst in insts]) + slf_attn_bias_data = np.array( + [ + [0] * len(inst) + [-1e9] * (max_len - len(inst)) + for inst in insts + ] + ) slf_attn_bias_data = np.tile( slf_attn_bias_data.reshape([-1, 1, 1, max_len]), - [1, n_head, max_len, 1]) + [1, n_head, max_len, 1], + ) return_list += [slf_attn_bias_data.astype("float32")] if return_max_len: return_list += [max_len] @@ -350,22 +385,26 @@ def pad_batch_data(insts, return return_list if len(return_list) > 1 else return_list[0] -def prepare_batch_input(insts, data_input_names, src_pad_idx, trg_pad_idx, - n_head, d_model): +def prepare_batch_input( + insts, data_input_names, src_pad_idx, trg_pad_idx, n_head, d_model +): """ Put all padded data needed by training into a dict. """ src_word, src_pos, src_slf_attn_bias, src_max_len = pad_batch_data( - [inst[0] for inst in insts], src_pad_idx, n_head, is_target=False) + [inst[0] for inst in insts], src_pad_idx, n_head, is_target=False + ) src_word = src_word.reshape(-1, src_max_len, 1) src_pos = src_pos.reshape(-1, src_max_len, 1) trg_word, trg_pos, trg_slf_attn_bias, trg_max_len = pad_batch_data( - [inst[1] for inst in insts], trg_pad_idx, n_head, is_target=True) + [inst[1] for inst in insts], trg_pad_idx, n_head, is_target=True + ) trg_word = trg_word.reshape(-1, trg_max_len, 1) trg_pos = trg_pos.reshape(-1, trg_max_len, 1) - trg_src_attn_bias = np.tile(src_slf_attn_bias[:, :, ::src_max_len, :], - [1, 1, trg_max_len, 1]).astype("float32") + trg_src_attn_bias = np.tile( + src_slf_attn_bias[:, :, ::src_max_len, :], [1, 1, trg_max_len, 1] + ).astype("float32") lbl_word, lbl_weight, num_token = pad_batch_data( [inst[2] for inst in insts], @@ -375,14 +414,27 @@ def prepare_batch_input(insts, data_input_names, src_pad_idx, trg_pad_idx, is_label=True, return_attn_bias=False, return_max_len=False, - return_num_token=True) + return_num_token=True, + ) data_input_dict = dict( list( - zip(data_input_names, [ - src_word, src_pos, src_slf_attn_bias, trg_word, trg_pos, - trg_slf_attn_bias, trg_src_attn_bias, lbl_word, lbl_weight - ]))) + zip( + data_input_names, + [ + src_word, + src_pos, + src_slf_attn_bias, + trg_word, + trg_pos, + trg_slf_attn_bias, + trg_src_attn_bias, + lbl_word, + lbl_weight, + ], + ) + ) + ) return data_input_dict, np.asarray([num_token], dtype="float32") @@ -407,7 +459,7 @@ def read_multiple(reader, count, clip_last=True): if len(data) > count: inst_num_per_part = len(data) // count yield [ - data[inst_num_per_part * i:inst_num_per_part * (i + 1)] + data[inst_num_per_part * i : inst_num_per_part * (i + 1)] for i in range(count) ] @@ -423,21 +475,28 @@ def split_data(data, num_part): data = data[0] inst_num_per_part = len(data) // num_part return [ - data[inst_num_per_part * i:inst_num_per_part * (i + 1)] + data[inst_num_per_part * i : inst_num_per_part * (i + 1)] for i in range(num_part) ] -def test_context(test_program, avg_cost, train_exe, dev_count, data_input_names, - sum_cost, token_num): +def test_context( + test_program, + avg_cost, + train_exe, + dev_count, + data_input_names, + sum_cost, + token_num, +): val_data = DataReader( src_vocab_fpath=TrainTaskConfig.src_vocab_fpath, trg_vocab_fpath=TrainTaskConfig.trg_vocab_fpath, fpattern=TrainTaskConfig.val_file_pattern, token_delimiter=TrainTaskConfig.token_delimiter, use_token_batch=TrainTaskConfig.use_token_batch, - batch_size=TrainTaskConfig.batch_size * - (1 if TrainTaskConfig.use_token_batch else dev_count), + batch_size=TrainTaskConfig.batch_size + * (1 if TrainTaskConfig.use_token_batch else dev_count), pool_size=TrainTaskConfig.pool_size, sort_type=TrainTaskConfig.sort_type, start_mark=TrainTaskConfig.special_token[0], @@ -447,37 +506,47 @@ def test_context(test_program, avg_cost, train_exe, dev_count, data_input_names, max_length=ModelHyperParams.max_length - 2, clip_last_batch=False, shuffle=False, - shuffle_batch=False) + shuffle_batch=False, + ) build_strategy = fluid.BuildStrategy() strategy = fluid.ExecutionStrategy() strategy.num_threads = 1 - test_exe = fluid.ParallelExecutor(use_cuda=TrainTaskConfig.use_gpu, - main_program=test_program, - share_vars_from=train_exe, - build_strategy=build_strategy, - exec_strategy=strategy) + test_exe = fluid.ParallelExecutor( + use_cuda=TrainTaskConfig.use_gpu, + main_program=test_program, + share_vars_from=train_exe, + build_strategy=build_strategy, + exec_strategy=strategy, + ) def test(exe=test_exe): test_total_cost = 0 test_total_token = 0 test_data = read_multiple( reader=val_data.batch_generator, - count=dev_count if TrainTaskConfig.use_token_batch else 1) + count=dev_count if TrainTaskConfig.use_token_batch else 1, + ) for batch_id, data in enumerate(test_data()): feed_list = [] for place_id, data_buffer in enumerate( - split_data(data, num_part=dev_count)): + split_data(data, num_part=dev_count) + ): data_input_dict, _ = prepare_batch_input( - data_buffer, data_input_names, ModelHyperParams.eos_idx, - ModelHyperParams.eos_idx, ModelHyperParams.n_head, - ModelHyperParams.d_model) + data_buffer, + data_input_names, + ModelHyperParams.eos_idx, + ModelHyperParams.eos_idx, + ModelHyperParams.n_head, + ModelHyperParams.d_model, + ) feed_list.append(data_input_dict) - outs = exe.run(feed=feed_list, - fetch_list=[sum_cost.name, token_num.name]) + outs = exe.run( + feed=feed_list, fetch_list=[sum_cost.name, token_num.name] + ) sum_cost_val, token_num_val = np.array(outs[0]), np.array(outs[1]) test_total_cost += sum_cost_val.sum() test_total_token += token_num_val.sum() @@ -488,8 +557,17 @@ def test_context(test_program, avg_cost, train_exe, dev_count, data_input_names, return test -def train_loop(exe, train_progm, dev_count, sum_cost, avg_cost, lr_scheduler, - token_num, predict, test_program): +def train_loop( + exe, + train_progm, + dev_count, + sum_cost, + avg_cost, + lr_scheduler, + token_num, + predict, + test_program, +): # Initialize the parameters. if TrainTaskConfig.ckpt_path: lr_scheduler.current_steps = TrainTaskConfig.start_step @@ -502,8 +580,8 @@ def train_loop(exe, train_progm, dev_count, sum_cost, avg_cost, lr_scheduler, fpattern=TrainTaskConfig.train_file_pattern, token_delimiter=TrainTaskConfig.token_delimiter, use_token_batch=TrainTaskConfig.use_token_batch, - batch_size=TrainTaskConfig.batch_size * - (1 if TrainTaskConfig.use_token_batch else dev_count), + batch_size=TrainTaskConfig.batch_size + * (1 if TrainTaskConfig.use_token_batch else dev_count), pool_size=TrainTaskConfig.pool_size, sort_type=TrainTaskConfig.sort_type, shuffle=TrainTaskConfig.shuffle, @@ -513,39 +591,60 @@ def train_loop(exe, train_progm, dev_count, sum_cost, avg_cost, lr_scheduler, unk_mark=TrainTaskConfig.special_token[2], # count start and end tokens out max_length=ModelHyperParams.max_length - 2, - clip_last_batch=False) + clip_last_batch=False, + ) train_data = read_multiple( reader=train_data.batch_generator, - count=dev_count if TrainTaskConfig.use_token_batch else 1) + count=dev_count if TrainTaskConfig.use_token_batch else 1, + ) build_strategy = fluid.BuildStrategy() # Since the token number differs among devices, customize gradient scale to # use token average cost among multi-devices. and the gradient scale is # `1 / token_number` for average cost. - build_strategy.gradient_scale_strategy = fluid.BuildStrategy.GradientScaleStrategy.Customized + build_strategy.gradient_scale_strategy = ( + fluid.BuildStrategy.GradientScaleStrategy.Customized + ) strategy = fluid.ExecutionStrategy() strategy.num_threads = 1 - train_exe = fluid.ParallelExecutor(use_cuda=TrainTaskConfig.use_gpu, - loss_name=sum_cost.name, - main_program=train_progm, - build_strategy=build_strategy, - exec_strategy=strategy) + train_exe = fluid.ParallelExecutor( + use_cuda=TrainTaskConfig.use_gpu, + loss_name=sum_cost.name, + main_program=train_progm, + build_strategy=build_strategy, + exec_strategy=strategy, + ) - data_input_names = encoder_data_input_fields + decoder_data_input_fields[: - -1] + label_data_input_fields + data_input_names = ( + encoder_data_input_fields + + decoder_data_input_fields[:-1] + + label_data_input_fields + ) if TrainTaskConfig.val_file_pattern is not None: - test = test_context(test_program, avg_cost, train_exe, dev_count, - data_input_names, sum_cost, token_num) + test = test_context( + test_program, + avg_cost, + train_exe, + dev_count, + data_input_names, + sum_cost, + token_num, + ) # the best cross-entropy value with label smoothing - loss_normalizer = -((1. - TrainTaskConfig.label_smooth_eps) * np.log( - (1. - TrainTaskConfig.label_smooth_eps)) + - TrainTaskConfig.label_smooth_eps * - np.log(TrainTaskConfig.label_smooth_eps / - (ModelHyperParams.trg_vocab_size - 1) + 1e-20)) + loss_normalizer = -( + (1.0 - TrainTaskConfig.label_smooth_eps) + * np.log((1.0 - TrainTaskConfig.label_smooth_eps)) + + TrainTaskConfig.label_smooth_eps + * np.log( + TrainTaskConfig.label_smooth_eps + / (ModelHyperParams.trg_vocab_size - 1) + + 1e-20 + ) + ) init = False for pass_id in range(TrainTaskConfig.pass_num): pass_start_time = time.time() @@ -560,36 +659,44 @@ def train_loop(exe, train_progm, dev_count, sum_cost, avg_cost, lr_scheduler, lr_rate = lr_scheduler.update_learning_rate() for place_id, data_buffer in enumerate( - split_data(data, num_part=dev_count)): + split_data(data, num_part=dev_count) + ): data_input_dict, num_token = prepare_batch_input( - data_buffer, data_input_names, ModelHyperParams.eos_idx, - ModelHyperParams.eos_idx, ModelHyperParams.n_head, - ModelHyperParams.d_model) + data_buffer, + data_input_names, + ModelHyperParams.eos_idx, + ModelHyperParams.eos_idx, + ModelHyperParams.n_head, + ModelHyperParams.d_model, + ) total_num_token += num_token feed_kv_pairs = list(data_input_dict.items()) if TrainTaskConfig.local: feed_kv_pairs += list( - {lr_scheduler.learning_rate.name: lr_rate}.items()) + {lr_scheduler.learning_rate.name: lr_rate}.items() + ) feed_list.append(dict(feed_kv_pairs)) if not init: for pos_enc_param_name in pos_enc_param_names: pos_enc = position_encoding_init( ModelHyperParams.max_length + 1, - ModelHyperParams.d_model) + ModelHyperParams.d_model, + ) feed_list[place_id][pos_enc_param_name] = pos_enc if not TrainTaskConfig.check_acc: for feed_dict in feed_list: - feed_dict[sum_cost.name + "@GRAD"] = 1. / total_num_token + feed_dict[sum_cost.name + "@GRAD"] = 1.0 / total_num_token else: b = 100 * TrainTaskConfig.batch_size a = np.asarray([b], dtype="float32") for feed_dict in feed_list: - feed_dict[sum_cost.name + "@GRAD"] = 1. / a + feed_dict[sum_cost.name + "@GRAD"] = 1.0 / a - outs = train_exe.run(fetch_list=[sum_cost.name, token_num.name], - feed=feed_list) + outs = train_exe.run( + fetch_list=[sum_cost.name, token_num.name], feed=feed_list + ) sum_cost_val, token_num_val = np.array(outs[0]), np.array(outs[1]) total_sum_cost = sum_cost_val.sum() @@ -603,10 +710,10 @@ def train_loop(exe, train_progm, dev_count, sum_cost, avg_cost, lr_scheduler, val_avg_cost, val_ppl = test() print("[%f]" % val_avg_cost) else: - assert (False) + assert False -#import transformer_reader as reader +# import transformer_reader as reader class SortType(object): GLOBAL = 'global' POOL = 'pool' @@ -614,7 +721,6 @@ class SortType(object): class Converter(object): - def __init__(self, vocab, beg, end, unk, delimiter): self._vocab = vocab self._beg = beg @@ -623,14 +729,17 @@ class Converter(object): self._delimiter = delimiter def __call__(self, sentence): - return [self._beg] + [ - self._vocab.get(w, self._unk) - for w in sentence.split(self._delimiter) - ] + [self._end] + return ( + [self._beg] + + [ + self._vocab.get(w, self._unk) + for w in sentence.split(self._delimiter) + ] + + [self._end] + ) class ComposedConverter(object): - def __init__(self, converters): self._converters = converters @@ -642,7 +751,6 @@ class ComposedConverter(object): class SentenceBatchCreator(object): - def __init__(self, batch_size): self.batch = [] self._batch_size = batch_size @@ -656,7 +764,6 @@ class SentenceBatchCreator(object): class TokenBatchCreator(object): - def __init__(self, batch_size): self.batch = [] self.max_len = -1 @@ -676,7 +783,6 @@ class TokenBatchCreator(object): class SampleInfo(object): - def __init__(self, i, max_len, min_len): self.i = i self.min_len = min_len @@ -684,7 +790,6 @@ class SampleInfo(object): class MinMaxFilter(object): - def __init__(self, max_len, min_len, underlying_creator): self._min_len = min_len self._max_len = max_len @@ -774,26 +879,28 @@ class DataReader(object): :type seed: int """ - def __init__(self, - src_vocab_fpath, - trg_vocab_fpath, - fpattern, - batch_size, - pool_size, - sort_type=SortType.GLOBAL, - clip_last_batch=True, - tar_fname=None, - min_length=0, - max_length=100, - shuffle=True, - shuffle_batch=False, - use_token_batch=False, - field_delimiter="\t", - token_delimiter=" ", - start_mark="", - end_mark="", - unk_mark="", - seed=0): + def __init__( + self, + src_vocab_fpath, + trg_vocab_fpath, + fpattern, + batch_size, + pool_size, + sort_type=SortType.GLOBAL, + clip_last_batch=True, + tar_fname=None, + min_length=0, + max_length=100, + shuffle=True, + shuffle_batch=False, + use_token_batch=False, + field_delimiter="\t", + token_delimiter=" ", + start_mark="", + end_mark="", + unk_mark="", + seed=0, + ): self._src_vocab = self.load_dict(src_vocab_fpath) self._only_src = True if trg_vocab_fpath is not None: @@ -810,26 +917,33 @@ class DataReader(object): self._max_length = max_length self._field_delimiter = field_delimiter self._token_delimiter = token_delimiter - self.load_src_trg_ids(end_mark, fpattern, start_mark, tar_fname, - unk_mark) + self.load_src_trg_ids( + end_mark, fpattern, start_mark, tar_fname, unk_mark + ) self._random = random.Random(x=seed) - def load_src_trg_ids(self, end_mark, fpattern, start_mark, tar_fname, - unk_mark): + def load_src_trg_ids( + self, end_mark, fpattern, start_mark, tar_fname, unk_mark + ): converters = [ - Converter(vocab=self._src_vocab, - beg=self._src_vocab[start_mark], - end=self._src_vocab[end_mark], - unk=self._src_vocab[unk_mark], - delimiter=self._token_delimiter) + Converter( + vocab=self._src_vocab, + beg=self._src_vocab[start_mark], + end=self._src_vocab[end_mark], + unk=self._src_vocab[unk_mark], + delimiter=self._token_delimiter, + ) ] if not self._only_src: converters.append( - Converter(vocab=self._trg_vocab, - beg=self._trg_vocab[start_mark], - end=self._trg_vocab[end_mark], - unk=self._trg_vocab[unk_mark], - delimiter=self._token_delimiter)) + Converter( + vocab=self._trg_vocab, + beg=self._trg_vocab[start_mark], + end=self._trg_vocab[end_mark], + unk=self._trg_vocab[unk_mark], + delimiter=self._token_delimiter, + ) + ) converters = ComposedConverter(converters) @@ -857,9 +971,9 @@ class DataReader(object): for line in f.extractfile(tar_fname): line = line.decode() fields = line.strip("\n").split(self._field_delimiter) - if (not self._only_src - and len(fields) == 2) or (self._only_src - and len(fields) == 1): + if (not self._only_src and len(fields) == 2) or ( + self._only_src and len(fields) == 1 + ): yield fields else: for fpath in fpaths: @@ -870,9 +984,9 @@ class DataReader(object): for line in f: line = line.decode() fields = line.strip("\n").split(self._field_delimiter) - if (not self._only_src - and len(fields) == 2) or (self._only_src - and len(fields) == 1): + if (not self._only_src and len(fields) == 2) or ( + self._only_src and len(fields) == 1 + ): yield fields @staticmethod @@ -890,9 +1004,9 @@ class DataReader(object): def batch_generator(self): # global sort or global shuffle if self._sort_type == SortType.GLOBAL: - infos = sorted(self._sample_infos, - key=lambda x: x.max_len, - reverse=True) + infos = sorted( + self._sample_infos, key=lambda x: x.max_len, reverse=True + ) else: if self._shuffle: infos = self._sample_infos @@ -902,16 +1016,20 @@ class DataReader(object): if self._sort_type == SortType.POOL: for i in range(0, len(infos), self._pool_size): - infos[i:i + self._pool_size] = sorted( - infos[i:i + self._pool_size], key=lambda x: x.max_len) + infos[i : i + self._pool_size] = sorted( + infos[i : i + self._pool_size], key=lambda x: x.max_len + ) # concat batch batches = [] - batch_creator = TokenBatchCreator( - self._batch_size - ) if self._use_token_batch else SentenceBatchCreator(self._batch_size) - batch_creator = MinMaxFilter(self._max_length, self._min_length, - batch_creator) + batch_creator = ( + TokenBatchCreator(self._batch_size) + if self._use_token_batch + else SentenceBatchCreator(self._batch_size) + ) + batch_creator = MinMaxFilter( + self._max_length, self._min_length, batch_creator + ) for info in infos: batch = batch_creator.append(info) @@ -930,34 +1048,49 @@ class DataReader(object): if self._only_src: yield [[self._src_seq_ids[idx]] for idx in batch_ids] else: - yield [(self._src_seq_ids[idx], self._trg_seq_ids[idx][:-1], - self._trg_seq_ids[idx][1:]) for idx in batch_ids] + yield [ + ( + self._src_seq_ids[idx], + self._trg_seq_ids[idx][:-1], + self._trg_seq_ids[idx][1:], + ) + for idx in batch_ids + ] -#from transformer_model import transformer +# from transformer_model import transformer def position_encoding_init(n_position, d_pos_vec): """ Generate the initial values for the sinusoid position encoding table. """ - position_enc = np.array([[ - pos / np.power(10000, 2 * (j // 2) / d_pos_vec) - for j in range(d_pos_vec) - ] if pos != 0 else np.zeros(d_pos_vec) for pos in range(n_position)]) + position_enc = np.array( + [ + [ + pos / np.power(10000, 2 * (j // 2) / d_pos_vec) + for j in range(d_pos_vec) + ] + if pos != 0 + else np.zeros(d_pos_vec) + for pos in range(n_position) + ] + ) position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2]) # dim 2i position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2]) # dim 2i+1 return position_enc.astype("float32") -def multi_head_attention(queries, - keys, - values, - attn_bias, - d_key, - d_value, - d_model, - n_head=1, - dropout_rate=0., - cache=None): +def multi_head_attention( + queries, + keys, + values, + attn_bias, + d_key, + d_value, + d_model, + n_head=1, + dropout_rate=0.0, + cache=None, +): """ Multi-Head Attention. Note that attn_bias is added to the logit before computing softmax activiation to mask certain selected positions so that @@ -965,27 +1098,34 @@ def multi_head_attention(queries, """ if not (len(queries.shape) == len(keys.shape) == len(values.shape) == 3): raise ValueError( - "Inputs: queries, keys and values should all be 3-D tensors.") + "Inputs: queries, keys and values should all be 3-D tensors." + ) def __compute_qkv(queries, keys, values, n_head, d_key, d_value): """ Add linear projection to queries, keys, and values. """ - q = layers.fc(input=queries, - size=d_key * n_head, - num_flatten_dims=2, - param_attr=const_para_attr, - bias_attr=const_bias_attr) - k = layers.fc(input=keys, - size=d_key * n_head, - num_flatten_dims=2, - param_attr=const_para_attr, - bias_attr=const_bias_attr) - v = layers.fc(input=values, - size=d_value * n_head, - num_flatten_dims=2, - param_attr=const_para_attr, - bias_attr=const_bias_attr) + q = layers.fc( + input=queries, + size=d_key * n_head, + num_flatten_dims=2, + param_attr=const_para_attr, + bias_attr=const_bias_attr, + ) + k = layers.fc( + input=keys, + size=d_key * n_head, + num_flatten_dims=2, + param_attr=const_para_attr, + bias_attr=const_bias_attr, + ) + v = layers.fc( + input=values, + size=d_value * n_head, + num_flatten_dims=2, + param_attr=const_para_attr, + bias_attr=const_bias_attr, + ) return q, k, v def __split_heads(x, n_head): @@ -1001,8 +1141,9 @@ def multi_head_attention(queries, hidden_size = x.shape[-1] # The value 0 in shape attr means copying the corresponding dimension # size of the input as the output dimension size. - reshaped = layers.reshape(x=x, - shape=[0, 0, n_head, hidden_size // n_head]) + reshaped = layers.reshape( + x=x, shape=[0, 0, n_head, hidden_size // n_head] + ) # permute the dimensions into: # [batch_size, n_head, max_sequence_len, hidden_size_per_head] @@ -1013,7 +1154,8 @@ def multi_head_attention(queries, Transpose and then reshape the last two dimensions of input tensor x so that it becomes one dimension, which is reverse to __split_heads. """ - if len(x.shape) == 3: return x + if len(x.shape) == 3: + return x if len(x.shape) != 4: raise ValueError("Input(x) should be a 4-D Tensor.") @@ -1022,7 +1164,8 @@ def multi_head_attention(queries, # size of the input as the output dimension size. return layers.reshape( x=trans_x, - shape=list(map(int, [0, 0, trans_x.shape[2] * trans_x.shape[3]]))) + shape=list(map(int, [0, 0, trans_x.shape[2] * trans_x.shape[3]])), + ) def scaled_dot_product_attention(q, k, v, attn_bias, d_model, dropout_rate): """ @@ -1034,10 +1177,12 @@ def multi_head_attention(queries, product += attn_bias weights = layers.softmax(product) if dropout_rate: - weights = layers.dropout(weights, - dropout_prob=dropout_rate, - seed=ModelHyperParams.dropout_seed, - is_test=False) + weights = layers.dropout( + weights, + dropout_prob=dropout_rate, + seed=ModelHyperParams.dropout_seed, + is_test=False, + ) out = layers.matmul(weights, v) return out @@ -1051,17 +1196,20 @@ def multi_head_attention(queries, k = __split_heads(k, n_head) v = __split_heads(v, n_head) - ctx_multiheads = scaled_dot_product_attention(q, k, v, attn_bias, d_model, - dropout_rate) + ctx_multiheads = scaled_dot_product_attention( + q, k, v, attn_bias, d_model, dropout_rate + ) out = __combine_heads(ctx_multiheads) # Project back to the model size. - proj_out = layers.fc(input=out, - size=d_model, - num_flatten_dims=2, - param_attr=const_para_attr, - bias_attr=const_bias_attr) + proj_out = layers.fc( + input=out, + size=d_model, + num_flatten_dims=2, + param_attr=const_para_attr, + bias_attr=const_bias_attr, + ) return proj_out @@ -1071,21 +1219,25 @@ def positionwise_feed_forward(x, d_inner_hid, d_hid): This module consists of two linear transformations with a ReLU activation in between, which is applied to each position separately and identically. """ - hidden = layers.fc(input=x, - size=d_inner_hid, - num_flatten_dims=2, - act="relu", - param_attr=const_para_attr, - bias_attr=const_bias_attr) - out = layers.fc(input=hidden, - size=d_hid, - num_flatten_dims=2, - param_attr=const_para_attr, - bias_attr=const_bias_attr) + hidden = layers.fc( + input=x, + size=d_inner_hid, + num_flatten_dims=2, + act="relu", + param_attr=const_para_attr, + bias_attr=const_bias_attr, + ) + out = layers.fc( + input=hidden, + size=d_hid, + num_flatten_dims=2, + param_attr=const_para_attr, + bias_attr=const_bias_attr, + ) return out -def pre_post_process_layer(prev_out, out, process_cmd, dropout_rate=0.): +def pre_post_process_layer(prev_out, out, process_cmd, dropout_rate=0.0): """ Add residual connection, layer normalization and droput to the out tensor optionally according to the value of process_cmd. @@ -1096,16 +1248,20 @@ def pre_post_process_layer(prev_out, out, process_cmd, dropout_rate=0.): if cmd == "a": # add residual connection out = out + prev_out if prev_out else out elif cmd == "n": # add layer normalization - out = layers.layer_norm(out, - begin_norm_axis=len(out.shape) - 1, - param_attr=fluid.initializer.Constant(1.), - bias_attr=fluid.initializer.Constant(0.)) + out = layers.layer_norm( + out, + begin_norm_axis=len(out.shape) - 1, + param_attr=fluid.initializer.Constant(1.0), + bias_attr=fluid.initializer.Constant(0.0), + ) elif cmd == "d": # add dropout if dropout_rate: - out = layers.dropout(out, - dropout_prob=dropout_rate, - seed=ModelHyperParams.dropout_seed, - is_test=False) + out = layers.dropout( + out, + dropout_prob=dropout_rate, + seed=ModelHyperParams.dropout_seed, + is_test=False, + ) return out @@ -1113,14 +1269,16 @@ pre_process_layer = partial(pre_post_process_layer, None) post_process_layer = pre_post_process_layer -def prepare_encoder(src_word, - src_pos, - src_vocab_size, - src_emb_dim, - src_max_len, - dropout_rate=0., - word_emb_param_name=None, - pos_enc_param_name=None): +def prepare_encoder( + src_word, + src_pos, + src_vocab_size, + src_emb_dim, + src_max_len, + dropout_rate=0.0, + word_emb_param_name=None, + pos_enc_param_name=None, +): """Add word embeddings and position encodings. The output tensor has a shape of: [batch_size, max_src_length_in_batch, d_model]. @@ -1132,14 +1290,18 @@ def prepare_encoder(src_word, size=[src_vocab_size, src_emb_dim], param_attr=fluid.ParamAttr( name=word_emb_param_name, - initializer=fluid.initializer.ConstantInitializer(0.001))) + initializer=fluid.initializer.ConstantInitializer(0.001), + ), + ) else: src_word_emb = layers.embedding( src_word, size=[src_vocab_size, src_emb_dim], - param_attr=fluid.ParamAttr(name=word_emb_param_name, - initializer=fluid.initializer.Normal( - 0., src_emb_dim**-0.5))) + param_attr=fluid.ParamAttr( + name=word_emb_param_name, + initializer=fluid.initializer.Normal(0.0, src_emb_dim**-0.5), + ), + ) src_word_emb = layers.scale(x=src_word_emb, scale=src_emb_dim**0.5) src_pos_enc = layers.embedding( @@ -1148,76 +1310,109 @@ def prepare_encoder(src_word, param_attr=fluid.ParamAttr( name=pos_enc_param_name, trainable=False, - initializer=fluid.initializer.ConstantInitializer(0.001))) + initializer=fluid.initializer.ConstantInitializer(0.001), + ), + ) src_pos_enc.stop_gradient = True enc_input = src_word_emb + src_pos_enc - return layers.dropout(enc_input, - dropout_prob=dropout_rate, - seed=ModelHyperParams.dropout_seed, - is_test=False) if dropout_rate else enc_input - - -prepare_encoder = partial(prepare_encoder, - pos_enc_param_name=pos_enc_param_names[0]) -prepare_decoder = partial(prepare_encoder, - pos_enc_param_name=pos_enc_param_names[1]) - - -def encoder_layer(enc_input, - attn_bias, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - dropout_rate=0.): + return ( + layers.dropout( + enc_input, + dropout_prob=dropout_rate, + seed=ModelHyperParams.dropout_seed, + is_test=False, + ) + if dropout_rate + else enc_input + ) + + +prepare_encoder = partial( + prepare_encoder, pos_enc_param_name=pos_enc_param_names[0] +) +prepare_decoder = partial( + prepare_encoder, pos_enc_param_name=pos_enc_param_names[1] +) + + +def encoder_layer( + enc_input, + attn_bias, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + dropout_rate=0.0, +): """The encoder layers that can be stacked to form a deep encoder. This module consits of a multi-head (self) attention followed by position-wise feed-forward networks and both the two components companied with the post_process_layer to add residual connection, layer normalization and droput. """ - attn_output = multi_head_attention(enc_input, enc_input, enc_input, - attn_bias, d_key, d_value, d_model, - n_head, dropout_rate) - attn_output = post_process_layer(enc_input, attn_output, "dan", - dropout_rate) + attn_output = multi_head_attention( + enc_input, + enc_input, + enc_input, + attn_bias, + d_key, + d_value, + d_model, + n_head, + dropout_rate, + ) + attn_output = post_process_layer( + enc_input, attn_output, "dan", dropout_rate + ) ffd_output = positionwise_feed_forward(attn_output, d_inner_hid, d_model) return post_process_layer(attn_output, ffd_output, "dan", dropout_rate) -def encoder(enc_input, +def encoder( + enc_input, + attn_bias, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + dropout_rate=0.0, +): + """ + The encoder is composed of a stack of identical layers returned by calling + encoder_layer. + """ + for i in range(n_layer): + enc_output = encoder_layer( + enc_input, attn_bias, - n_layer, n_head, d_key, d_value, d_model, d_inner_hid, - dropout_rate=0.): - """ - The encoder is composed of a stack of identical layers returned by calling - encoder_layer. - """ - for i in range(n_layer): - enc_output = encoder_layer(enc_input, attn_bias, n_head, d_key, d_value, - d_model, d_inner_hid, dropout_rate) + dropout_rate, + ) enc_input = enc_output return enc_output -def decoder_layer(dec_input, - enc_output, - slf_attn_bias, - dec_enc_attn_bias, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - dropout_rate=0., - cache=None): - """ The layer to be stacked in decoder part. +def decoder_layer( + dec_input, + enc_output, + slf_attn_bias, + dec_enc_attn_bias, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + dropout_rate=0.0, + cache=None, +): + """The layer to be stacked in decoder part. The structure of this module is similar to that in the encoder part except a multi-head attention is added to implement encoder-decoder attention. """ @@ -1270,18 +1465,20 @@ def decoder_layer(dec_input, return dec_output -def decoder(dec_input, - enc_output, - dec_slf_attn_bias, - dec_enc_attn_bias, - n_layer, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - dropout_rate=0., - caches=None): +def decoder( + dec_input, + enc_output, + dec_slf_attn_bias, + dec_enc_attn_bias, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + dropout_rate=0.0, + caches=None, +): """ The decoder is composed of a stack of identical decoder_layer layers. """ @@ -1290,17 +1487,19 @@ def decoder(dec_input, if caches is not None: cache = caches[i] - dec_output = decoder_layer(dec_input, - enc_output, - dec_slf_attn_bias, - dec_enc_attn_bias, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - dropout_rate, - cache=cache) + dec_output = decoder_layer( + dec_input, + enc_output, + dec_slf_attn_bias, + dec_enc_attn_bias, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + dropout_rate, + cache=cache, + ) dec_input = dec_output return dec_output @@ -1311,12 +1510,15 @@ def make_all_inputs(input_fields): """ inputs = [] for input_field in input_fields: - input_var = layers.data(name=input_field, - shape=input_descs[input_field][0], - dtype=input_descs[input_field][1], - lod_level=input_descs[input_field][2] - if len(input_descs[input_field]) == 3 else 0, - append_batch_size=False) + input_var = layers.data( + name=input_field, + shape=input_descs[input_field][0], + dtype=input_descs[input_field][1], + lod_level=input_descs[input_field][2] + if len(input_descs[input_field]) == 3 + else 0, + append_batch_size=False, + ) inputs.append(input_var) return inputs @@ -1336,9 +1538,9 @@ def transformer( label_smooth_eps, ): if weight_sharing: - assert src_vocab_size == src_vocab_size, ( - "Vocabularies in source and target should be same for weight sharing." - ) + assert ( + src_vocab_size == src_vocab_size + ), "Vocabularies in source and target should be same for weight sharing." enc_inputs = make_all_inputs(encoder_data_input_fields) enc_output = wrap_encoder( @@ -1376,14 +1578,16 @@ def transformer( # cancel padding index in calculating the loss. label, weights = make_all_inputs(label_data_input_fields) if label_smooth_eps: - label = layers.label_smooth(label=layers.one_hot(input=label, - depth=trg_vocab_size), - epsilon=label_smooth_eps) + label = layers.label_smooth( + label=layers.one_hot(input=label, depth=trg_vocab_size), + epsilon=label_smooth_eps, + ) cost = layers.softmax_with_cross_entropy( logits=layers.reshape(predict, shape=[-1, trg_vocab_size]), label=label, - soft_label=True if label_smooth_eps else False) + soft_label=True if label_smooth_eps else False, + ) weighted_cost = cost * weights sum_cost = layers.reduce_sum(weighted_cost) token_num = layers.reduce_sum(weights) @@ -1392,95 +1596,122 @@ def transformer( return sum_cost, avg_cost, predict, token_num -def wrap_encoder(src_vocab_size, - max_length, - n_layer, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - dropout_rate, - weight_sharing, - enc_inputs=None): +def wrap_encoder( + src_vocab_size, + max_length, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + dropout_rate, + weight_sharing, + enc_inputs=None, +): """ The wrapper assembles together all needed layers for the encoder. """ if enc_inputs is None: # This is used to implement independent encoder program in inference. - src_word, src_pos, src_slf_attn_bias = \ - make_all_inputs(encoder_data_input_fields) + src_word, src_pos, src_slf_attn_bias = make_all_inputs( + encoder_data_input_fields + ) else: - src_word, src_pos, src_slf_attn_bias = \ - enc_inputs - enc_input = prepare_encoder(src_word, - src_pos, - src_vocab_size, - d_model, - max_length, - dropout_rate, - word_emb_param_name=word_emb_param_names[0]) - enc_output = encoder(enc_input, src_slf_attn_bias, n_layer, n_head, d_key, - d_value, d_model, d_inner_hid, dropout_rate) + src_word, src_pos, src_slf_attn_bias = enc_inputs + enc_input = prepare_encoder( + src_word, + src_pos, + src_vocab_size, + d_model, + max_length, + dropout_rate, + word_emb_param_name=word_emb_param_names[0], + ) + enc_output = encoder( + enc_input, + src_slf_attn_bias, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + dropout_rate, + ) return enc_output -def wrap_decoder(trg_vocab_size, - max_length, - n_layer, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - dropout_rate, - weight_sharing, - dec_inputs=None, - enc_output=None, - caches=None): +def wrap_decoder( + trg_vocab_size, + max_length, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + dropout_rate, + weight_sharing, + dec_inputs=None, + enc_output=None, + caches=None, +): """ The wrapper assembles together all needed layers for the decoder. """ if dec_inputs is None: # This is used to implement independent decoder program in inference. - trg_word, trg_pos, trg_slf_attn_bias, trg_src_attn_bias, \ - enc_output = make_all_inputs( - decoder_data_input_fields) + ( + trg_word, + trg_pos, + trg_slf_attn_bias, + trg_src_attn_bias, + enc_output, + ) = make_all_inputs(decoder_data_input_fields) else: trg_word, trg_pos, trg_slf_attn_bias, trg_src_attn_bias = dec_inputs - dec_input = prepare_decoder(trg_word, - trg_pos, - trg_vocab_size, - d_model, - max_length, - dropout_rate, - word_emb_param_name=word_emb_param_names[0] - if weight_sharing else word_emb_param_names[1]) - dec_output = decoder(dec_input, - enc_output, - trg_slf_attn_bias, - trg_src_attn_bias, - n_layer, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - dropout_rate, - caches=caches) + dec_input = prepare_decoder( + trg_word, + trg_pos, + trg_vocab_size, + d_model, + max_length, + dropout_rate, + word_emb_param_name=word_emb_param_names[0] + if weight_sharing + else word_emb_param_names[1], + ) + dec_output = decoder( + dec_input, + enc_output, + trg_slf_attn_bias, + trg_src_attn_bias, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + dropout_rate, + caches=caches, + ) # Return logits for training and probs for inference. if weight_sharing: - predict = layers.matmul(x=dec_output, - y=fluid.framework._get_var( - word_emb_param_names[0]), - transpose_y=True) + predict = layers.matmul( + x=dec_output, + y=fluid.framework._get_var(word_emb_param_names[0]), + transpose_y=True, + ) else: - predict = layers.fc(input=dec_output, - size=trg_vocab_size, - num_flatten_dims=2, - param_attr=const_para_attr, - bias_attr=const_bias_attr) + predict = layers.fc( + input=dec_output, + size=trg_vocab_size, + num_flatten_dims=2, + param_attr=const_para_attr, + bias_attr=const_bias_attr, + ) if dec_inputs is None: predict = layers.softmax(predict) return predict @@ -1506,86 +1737,108 @@ def fast_decode( Use beam search to decode. Caches will be used to store states of history steps which can make the decoding faster. """ - enc_output = wrap_encoder(src_vocab_size, max_in_len, n_layer, n_head, - d_key, d_value, d_model, d_inner_hid, - dropout_rate, weight_sharing) - start_tokens, init_scores, trg_src_attn_bias = \ - make_all_inputs(fast_decoder_data_input_fields ) + enc_output = wrap_encoder( + src_vocab_size, + max_in_len, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + dropout_rate, + weight_sharing, + ) + start_tokens, init_scores, trg_src_attn_bias = make_all_inputs( + fast_decoder_data_input_fields + ) def beam_search(): - max_len = layers.fill_constant(shape=[1], - dtype=start_tokens.dtype, - value=max_out_len) - step_idx = layers.fill_constant(shape=[1], - dtype=start_tokens.dtype, - value=0) + max_len = layers.fill_constant( + shape=[1], dtype=start_tokens.dtype, value=max_out_len + ) + step_idx = layers.fill_constant( + shape=[1], dtype=start_tokens.dtype, value=0 + ) cond = layers.less_than(x=step_idx, y=max_len) while_op = layers.While(cond) # array states will be stored for each step. - ids = layers.array_write(layers.reshape(start_tokens, (-1, 1)), - step_idx) + ids = layers.array_write( + layers.reshape(start_tokens, (-1, 1)), step_idx + ) scores = layers.array_write(init_scores, step_idx) # cell states will be overwrited at each step. # caches contains states of history steps to reduce redundant # computation in decoder. - caches = [{ - "k": - layers.fill_constant_batch_size_like(input=start_tokens, - shape=[-1, 0, d_model], - dtype=enc_output.dtype, - value=0), - "v": - layers.fill_constant_batch_size_like(input=start_tokens, - shape=[-1, 0, d_model], - dtype=enc_output.dtype, - value=0) - } for i in range(n_layer)] + caches = [ + { + "k": layers.fill_constant_batch_size_like( + input=start_tokens, + shape=[-1, 0, d_model], + dtype=enc_output.dtype, + value=0, + ), + "v": layers.fill_constant_batch_size_like( + input=start_tokens, + shape=[-1, 0, d_model], + dtype=enc_output.dtype, + value=0, + ), + } + for i in range(n_layer) + ] with while_op.block(): pre_ids = layers.array_read(array=ids, i=step_idx) pre_ids = layers.reshape(pre_ids, (-1, 1, 1)) pre_scores = layers.array_read(array=scores, i=step_idx) # sequence_expand can gather sequences according to lod thus can be # used in beam search to sift states corresponding to selected ids. - pre_src_attn_bias = layers.sequence_expand(x=trg_src_attn_bias, - y=pre_scores) + pre_src_attn_bias = layers.sequence_expand( + x=trg_src_attn_bias, y=pre_scores + ) pre_enc_output = layers.sequence_expand(x=enc_output, y=pre_scores) - pre_caches = [{ - "k": - layers.sequence_expand(x=cache["k"], y=pre_scores), - "v": - layers.sequence_expand(x=cache["v"], y=pre_scores), - } for cache in caches] + pre_caches = [ + { + "k": layers.sequence_expand(x=cache["k"], y=pre_scores), + "v": layers.sequence_expand(x=cache["v"], y=pre_scores), + } + for cache in caches + ] pre_pos = layers.elementwise_mul( x=layers.fill_constant_batch_size_like( - input= - pre_enc_output, # can't use pre_ids here since it has lod + input=pre_enc_output, # can't use pre_ids here since it has lod value=1, shape=[-1, 1, 1], - dtype=pre_ids.dtype), + dtype=pre_ids.dtype, + ), y=layers.increment(x=step_idx, value=1.0, in_place=False), - axis=0) - logits = wrap_decoder(trg_vocab_size, - max_in_len, - n_layer, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - dropout_rate, - weight_sharing, - dec_inputs=(pre_ids, pre_pos, None, - pre_src_attn_bias), - enc_output=pre_enc_output, - caches=pre_caches) + axis=0, + ) + logits = wrap_decoder( + trg_vocab_size, + max_in_len, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + dropout_rate, + weight_sharing, + dec_inputs=(pre_ids, pre_pos, None, pre_src_attn_bias), + enc_output=pre_enc_output, + caches=pre_caches, + ) logits = layers.reshape(logits, (-1, trg_vocab_size)) topk_scores, topk_indices = layers.topk( - input=layers.softmax(logits), k=beam_size) - accu_scores = layers.elementwise_add(x=layers.log(topk_scores), - y=layers.reshape(pre_scores, - shape=[-1]), - axis=0) + input=layers.softmax(logits), k=beam_size + ) + accu_scores = layers.elementwise_add( + x=layers.log(topk_scores), + y=layers.reshape(pre_scores, shape=[-1]), + axis=0, + ) # beam_search op uses lod to distinguish branches. topk_indices = layers.lod_reset(topk_indices, pre_ids) selected_ids, selected_scores = layers.beam_search( @@ -1594,7 +1847,8 @@ def fast_decode( ids=topk_indices, scores=accu_scores, beam_size=beam_size, - end_id=eos_idx) + end_id=eos_idx, + ) layers.increment(x=step_idx, value=1.0, in_place=True) # update states @@ -1610,7 +1864,8 @@ def fast_decode( layers.logical_and(x=length_cond, y=finish_cond, out=cond) finished_ids, finished_scores = layers.beam_search_decode( - ids, scores, beam_size=beam_size, end_id=eos_idx) + ids, scores, beam_size=beam_size, end_id=eos_idx + ) return finished_ids, finished_scores finished_ids, finished_scores = beam_search() @@ -1619,16 +1874,25 @@ def fast_decode( def get_model(is_dist, is_async): sum_cost, avg_cost, predict, token_num = transformer( - ModelHyperParams.src_vocab_size, ModelHyperParams.trg_vocab_size, - ModelHyperParams.max_length + 1, ModelHyperParams.n_layer, - ModelHyperParams.n_head, ModelHyperParams.d_key, - ModelHyperParams.d_value, ModelHyperParams.d_model, - ModelHyperParams.d_inner_hid, ModelHyperParams.dropout, - ModelHyperParams.weight_sharing, TrainTaskConfig.label_smooth_eps) - - local_lr_scheduler = LearningRateScheduler(ModelHyperParams.d_model, - TrainTaskConfig.warmup_steps, - TrainTaskConfig.learning_rate) + ModelHyperParams.src_vocab_size, + ModelHyperParams.trg_vocab_size, + ModelHyperParams.max_length + 1, + ModelHyperParams.n_layer, + ModelHyperParams.n_head, + ModelHyperParams.d_key, + ModelHyperParams.d_value, + ModelHyperParams.d_model, + ModelHyperParams.d_inner_hid, + ModelHyperParams.dropout, + ModelHyperParams.weight_sharing, + TrainTaskConfig.label_smooth_eps, + ) + + local_lr_scheduler = LearningRateScheduler( + ModelHyperParams.d_model, + TrainTaskConfig.warmup_steps, + TrainTaskConfig.learning_rate, + ) # Context to do validation. test_program = fluid.default_main_program().clone(for_test=True) @@ -1637,24 +1901,33 @@ def get_model(is_dist, is_async): learning_rate=local_lr_scheduler.learning_rate, beta1=TrainTaskConfig.beta1, beta2=TrainTaskConfig.beta2, - epsilon=TrainTaskConfig.eps) + epsilon=TrainTaskConfig.eps, + ) optimizer.minimize(sum_cost) elif is_async: optimizer = fluid.optimizer.SGD(0.003) optimizer.minimize(sum_cost) else: - lr_decay = fluid.layers\ - .learning_rate_scheduler\ - .noam_decay(ModelHyperParams.d_model, - TrainTaskConfig.warmup_steps) - - optimizer = fluid.optimizer.Adam(learning_rate=lr_decay, - beta1=TrainTaskConfig.beta1, - beta2=TrainTaskConfig.beta2, - epsilon=TrainTaskConfig.eps) + lr_decay = fluid.layers.learning_rate_scheduler.noam_decay( + ModelHyperParams.d_model, TrainTaskConfig.warmup_steps + ) + + optimizer = fluid.optimizer.Adam( + learning_rate=lr_decay, + beta1=TrainTaskConfig.beta1, + beta2=TrainTaskConfig.beta2, + epsilon=TrainTaskConfig.eps, + ) optimizer.minimize(sum_cost) - return sum_cost, avg_cost, predict, token_num, local_lr_scheduler, test_program + return ( + sum_cost, + avg_cost, + predict, + token_num, + local_lr_scheduler, + test_program, + ) def update_args(): @@ -1662,24 +1935,33 @@ def update_args(): trg_dict = DataReader.load_dict(TrainTaskConfig.trg_vocab_fpath) dict_args = [ "src_vocab_size", - str(len(src_dict)), "trg_vocab_size", - str(len(trg_dict)), "bos_idx", - str(src_dict[TrainTaskConfig.special_token[0]]), "eos_idx", - str(src_dict[TrainTaskConfig.special_token[1]]), "unk_idx", - str(src_dict[TrainTaskConfig.special_token[2]]) + str(len(src_dict)), + "trg_vocab_size", + str(len(trg_dict)), + "bos_idx", + str(src_dict[TrainTaskConfig.special_token[0]]), + "eos_idx", + str(src_dict[TrainTaskConfig.special_token[1]]), + "unk_idx", + str(src_dict[TrainTaskConfig.special_token[2]]), ] merge_cfg_from_list(dict_args, [TrainTaskConfig, ModelHyperParams]) class DistTransformer2x2(TestDistRunnerBase): - def run_pserver(self, args): get_model(True, not args.sync_mode) - t = self.get_transpiler(args.trainer_id, fluid.default_main_program(), - args.endpoints, args.trainers, args.sync_mode) + t = self.get_transpiler( + args.trainer_id, + fluid.default_main_program(), + args.endpoints, + args.trainers, + args.sync_mode, + ) pserver_prog = t.get_pserver_program(args.current_endpoint) - startup_prog = t.get_startup_program(args.current_endpoint, - pserver_prog) + startup_prog = t.get_startup_program( + args.current_endpoint, pserver_prog + ) place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -1688,18 +1970,31 @@ class DistTransformer2x2(TestDistRunnerBase): def run_trainer(self, args): TrainTaskConfig.use_gpu = args.use_cuda - sum_cost, avg_cost, predict, token_num, local_lr_scheduler, test_program = get_model( - args.is_dist, not args.sync_mode) + ( + sum_cost, + avg_cost, + predict, + token_num, + local_lr_scheduler, + test_program, + ) = get_model(args.is_dist, not args.sync_mode) if args.is_dist: - t = self.get_transpiler(args.trainer_id, - fluid.default_main_program(), - args.endpoints, args.trainers, - args.sync_mode) + t = self.get_transpiler( + args.trainer_id, + fluid.default_main_program(), + args.endpoints, + args.trainers, + args.sync_mode, + ) trainer_prog = t.get_trainer_program() TrainTaskConfig.batch_size = 10 - TrainTaskConfig.train_file_pattern = TrainTaskConfig.data_path + "train.tok.clean.bpe.32000.en-de.train_{}".format( - args.trainer_id) + TrainTaskConfig.train_file_pattern = ( + TrainTaskConfig.data_path + + "train.tok.clean.bpe.32000.en-de.train_{}".format( + args.trainer_id + ) + ) else: TrainTaskConfig.batch_size = 20 trainer_prog = fluid.default_main_program() @@ -1713,8 +2008,17 @@ class DistTransformer2x2(TestDistRunnerBase): TrainTaskConfig.local = not args.is_dist - train_loop(startup_exe, trainer_prog, 1, sum_cost, avg_cost, - local_lr_scheduler, token_num, predict, test_program) + train_loop( + startup_exe, + trainer_prog, + 1, + sum_cost, + avg_cost, + local_lr_scheduler, + token_num, + predict, + test_program, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/dist_word2vec.py b/python/paddle/fluid/tests/unittests/dist_word2vec.py index ca322d30cdc62d971f0fb4d5dcfa711634025c60..0941dabcc5e5b86bbb0e476be321fe39798f7e2c 100644 --- a/python/paddle/fluid/tests/unittests/dist_word2vec.py +++ b/python/paddle/fluid/tests/unittests/dist_word2vec.py @@ -28,7 +28,6 @@ fluid.default_main_program().random_seed = 1 class TestDistWord2vec2x2(TestDistRunnerBase): - def get_model(self, batch_size=2): BATCH_SIZE = batch_size @@ -40,7 +39,9 @@ class TestDistWord2vec2x2(TestDistRunnerBase): is_sparse=IS_SPARSE, param_attr=fluid.ParamAttr( name='shared_w', - initializer=fluid.initializer.Constant(value=0.1))) + initializer=fluid.initializer.Constant(value=0.1), + ), + ) embed_second = fluid.layers.embedding( input=words[1], size=[dict_size, EMBED_SIZE], @@ -48,7 +49,9 @@ class TestDistWord2vec2x2(TestDistRunnerBase): is_sparse=IS_SPARSE, param_attr=fluid.ParamAttr( name='shared_w', - initializer=fluid.initializer.Constant(value=0.1))) + initializer=fluid.initializer.Constant(value=0.1), + ), + ) embed_third = fluid.layers.embedding( input=words[2], size=[dict_size, EMBED_SIZE], @@ -56,7 +59,9 @@ class TestDistWord2vec2x2(TestDistRunnerBase): is_sparse=IS_SPARSE, param_attr=fluid.ParamAttr( name='shared_w', - initializer=fluid.initializer.Constant(value=0.1))) + initializer=fluid.initializer.Constant(value=0.1), + ), + ) embed_forth = fluid.layers.embedding( input=words[3], size=[dict_size, EMBED_SIZE], @@ -64,25 +69,33 @@ class TestDistWord2vec2x2(TestDistRunnerBase): is_sparse=IS_SPARSE, param_attr=fluid.ParamAttr( name='shared_w', - initializer=fluid.initializer.Constant(value=0.1))) + initializer=fluid.initializer.Constant(value=0.1), + ), + ) concat_embed = fluid.layers.concat( input=[embed_first, embed_second, embed_third, embed_forth], - axis=1) + axis=1, + ) hidden1 = fluid.layers.fc( input=concat_embed, size=HIDDEN_SIZE, act='sigmoid', param_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(value=0.1))) + initializer=fluid.initializer.Constant(value=0.1) + ), + ) predict_word = fluid.layers.fc( input=hidden1, size=dict_size, act='softmax', param_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(value=0.1))) - cost = fluid.layers.cross_entropy(input=predict_word, - label=words[4]) + initializer=fluid.initializer.Constant(value=0.1) + ), + ) + cost = fluid.layers.cross_entropy( + input=predict_word, label=words[4] + ) avg_cost = paddle.mean(cost) return avg_cost, predict_word @@ -90,30 +103,41 @@ class TestDistWord2vec2x2(TestDistRunnerBase): dict_size = len(word_dict) first_word = fluid.layers.data(name='firstw', shape=[1], dtype='int64') - second_word = fluid.layers.data(name='secondw', - shape=[1], - dtype='int64') + second_word = fluid.layers.data( + name='secondw', shape=[1], dtype='int64' + ) third_word = fluid.layers.data(name='thirdw', shape=[1], dtype='int64') forth_word = fluid.layers.data(name='forthw', shape=[1], dtype='int64') next_word = fluid.layers.data(name='nextw', shape=[1], dtype='int64') avg_cost, predict_word = __network__( - [first_word, second_word, third_word, forth_word, next_word]) + [first_word, second_word, third_word, forth_word, next_word] + ) inference_program = paddle.fluid.default_main_program().clone() sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) sgd_optimizer.minimize(avg_cost) - train_reader = paddle.batch(paddle.dataset.imikolov.train(word_dict, N), - BATCH_SIZE) - test_reader = paddle.batch(paddle.dataset.imikolov.test(word_dict, N), - BATCH_SIZE) + train_reader = paddle.batch( + paddle.dataset.imikolov.train(word_dict, N), BATCH_SIZE + ) + test_reader = paddle.batch( + paddle.dataset.imikolov.test(word_dict, N), BATCH_SIZE + ) - return inference_program, avg_cost, train_reader, test_reader, None, predict_word + return ( + inference_program, + avg_cost, + train_reader, + test_reader, + None, + predict_word, + ) if __name__ == "__main__": import os + os.environ['CPU_NUM'] = '1' os.environ['USE_CUDA'] = "FALSE" runtime_main(TestDistWord2vec2x2) diff --git a/python/paddle/fluid/tests/unittests/distributed_fused_lamb_test_base.py b/python/paddle/fluid/tests/unittests/distributed_fused_lamb_test_base.py index 230272a1cff77a3cb3dbc445de3b2f6406c25d26..b2ef0976d62e74ce9980fa099c10c48dc0a1ee1d 100644 --- a/python/paddle/fluid/tests/unittests/distributed_fused_lamb_test_base.py +++ b/python/paddle/fluid/tests/unittests/distributed_fused_lamb_test_base.py @@ -73,7 +73,6 @@ def prune_fwd_bwd_ops(program, start_idx): class GradClipDecorator(ClipGradBase): - def __init__(self, clip, clip_after_allreduce): self.clip = clip self.clip_after_allreduce = clip_after_allreduce @@ -89,17 +88,18 @@ class GradClipDecorator(ClipGradBase): scale = 1.0 / world_size # scale = 1.0 for p, g in params_grads: - block.append_op(type='c_allreduce_sum', - inputs={'X': [g]}, - outputs={'Out': [g]}, - attrs={ - 'ring_id': 0, - 'use_calc_stream': True - }) - block.append_op(type='scale', - inputs={'X': [g]}, - outputs={'Out': [g]}, - attrs={'scale': scale}) + block.append_op( + type='c_allreduce_sum', + inputs={'X': [g]}, + outputs={'Out': [g]}, + attrs={'ring_id': 0, 'use_calc_stream': True}, + ) + block.append_op( + type='scale', + inputs={'X': [g]}, + outputs={'Out': [g]}, + attrs={'scale': scale}, + ) def _static_clip(self, params_grads): if self.clip_after_allreduce: @@ -112,7 +112,6 @@ class GradClipDecorator(ClipGradBase): class IdentityGradClip(ClipGradBase): - def _dygraph_clip(self, params_grads): return params_grads @@ -129,12 +128,14 @@ def run_model(use_distributed_lamb, use_fp16, use_master_param_norm, **kwargs): with paddle.static.program_guard(main, startup): with paddle.fluid.unique_name.guard(): with paddle.static.amp.fp16_guard(): - image = paddle.static.data(name='image', - shape=[None, 3, 224, 224], - dtype=paddle.float32) - label = paddle.static.data(name='label', - shape=[None, 1], - dtype=paddle.int64) + image = paddle.static.data( + name='image', + shape=[None, 3, 224, 224], + dtype=paddle.float32, + ) + label = paddle.static.data( + name='label', shape=[None, 1], dtype=paddle.int64 + ) model = resnet() pred = model(image) loss_fn = paddle.nn.loss.CrossEntropyLoss() @@ -160,18 +161,24 @@ def run_model(use_distributed_lamb, use_fp16, use_master_param_norm, **kwargs): kwargs.pop('clip_after_allreduce', None) kwargs.pop('alignment', None) kwargs.pop('use_master_acc_grad', None) - base_clip = grad_clip if grad_clip is not None else IdentityGradClip( + base_clip = ( + grad_clip if grad_clip is not None else IdentityGradClip() + ) + kwargs['grad_clip'] = GradClipDecorator( + base_clip, clip_after_allreduce ) - kwargs['grad_clip'] = GradClipDecorator(base_clip, - clip_after_allreduce) kwargs.pop('gradient_accumulation_steps', None) optimizer = optimizer_class(**kwargs) get_parameter = optimizer._get_parameter amp_list = paddle.static.amp.AutoMixedPrecisionLists( custom_white_list=[ - 'batch_norm', 'batch_norm_grad', 'conv2d', 'conv2d_grad' - ]) + 'batch_norm', + 'batch_norm_grad', + 'conv2d', + 'conv2d_grad', + ] + ) if use_fp16: if not use_distributed_lamb: optimizer._multi_precision = True @@ -182,14 +189,16 @@ def run_model(use_distributed_lamb, use_fp16, use_master_param_norm, **kwargs): init_loss_scaling=1.0, use_dynamic_loss_scaling=False, use_pure_fp16=use_fp16, - use_fp16_guard=use_fp16) + use_fp16_guard=use_fp16, + ) amp_init = optimizer.amp_init else: amp_init = None if gm_steps > 1 and not use_distributed_lamb: optimizer = paddle.fluid.optimizer.GradientMergeOptimizer( - optimizer, k_steps=gm_steps, avg=False) + optimizer, k_steps=gm_steps, avg=False + ) params_grads = optimizer.backward(loss, startup) op_num = len(main.global_block().ops) @@ -222,8 +231,9 @@ def run_model(use_distributed_lamb, use_fp16, use_master_param_norm, **kwargs): def reader(): for _ in range(6): - yield dict([(grad.name, gen_random_grad_tensor(grad)) - for grad in grads]) + yield dict( + [(grad.name, gen_random_grad_tensor(grad)) for grad in grads] + ) scope = paddle.static.Scope() fetch_list = params @@ -253,7 +263,6 @@ def run_model(use_distributed_lamb, use_fp16, use_master_param_norm, **kwargs): class TestDistributedFusedLamb(unittest.TestCase): - @classmethod def setUpClass(cls): if not paddle.is_compiled_with_cuda(): @@ -266,28 +275,28 @@ class TestDistributedFusedLamb(unittest.TestCase): def config(self): clip_after_allreduce = bool( - distutils.util.strtobool(os.getenv('CLIP_AFTER_ALLREDUCE', 'True'))) + distutils.util.strtobool(os.getenv('CLIP_AFTER_ALLREDUCE', 'True')) + ) max_global_norm = float(os.getenv('MAX_GLOBAL_NORM', -1.0)) gm_steps = int(os.getenv('GRADIENT_MERGE_STEPS', 1)) use_master_acc_grad = bool(int(os.getenv('USE_MASTER_ACC_GRAD', '1'))) - print('clip_after_allreduce = {}, max_global_norm = {}'.format( - clip_after_allreduce, max_global_norm)) + print( + 'clip_after_allreduce = {}, max_global_norm = {}'.format( + clip_after_allreduce, max_global_norm + ) + ) return { - 'clip_after_allreduce': - clip_after_allreduce, - 'gradient_accumulation_steps': - gm_steps, - 'grad_clip': - paddle.nn.ClipGradByGlobalNorm(max_global_norm) - if max_global_norm > 0 else None, - 'use_master_acc_grad': - use_master_acc_grad, + 'clip_after_allreduce': clip_after_allreduce, + 'gradient_accumulation_steps': gm_steps, + 'grad_clip': paddle.nn.ClipGradByGlobalNorm(max_global_norm) + if max_global_norm > 0 + else None, + 'use_master_acc_grad': use_master_acc_grad, } - def run_main(self, - use_fp16, - use_master_param_norm=True, - use_master_acc_grad=True): + def run_main( + self, use_fp16, use_master_param_norm=True, use_master_acc_grad=True + ): if not paddle.is_compiled_with_cuda(): return @@ -316,7 +325,8 @@ class TestDistributedFusedLamb(unittest.TestCase): for ret1, ret2 in zip(result1, result2): max_diff = np.max(np.abs(ret1 - ret2)) msg = 'max_diff = {} atol = {} when use_fp16 = {} , use_master_param_norm = {}'.format( - max_diff, atol, use_fp16, use_master_param_norm) + max_diff, atol, use_fp16, use_master_param_norm + ) self.assertTrue(max_diff < atol, msg) print(msg) diff --git a/python/paddle/fluid/tests/unittests/distributed_passes/auto_parallel_pass_test_base.py b/python/paddle/fluid/tests/unittests/distributed_passes/auto_parallel_pass_test_base.py index 4028bae08a45af638ec96f4a30b1d4fcb235e44b..5b9bd4faf38452beb115a84a341ce1d24cf669b4 100644 --- a/python/paddle/fluid/tests/unittests/distributed_passes/auto_parallel_pass_test_base.py +++ b/python/paddle/fluid/tests/unittests/distributed_passes/auto_parallel_pass_test_base.py @@ -25,11 +25,14 @@ from paddle.distributed.fleet import auto sys.path.append("..") import auto_parallel_gpt_model as modeling -from auto_parallel_gpt_model import GPTModel, GPTForPretraining, GPTPretrainingCriterion +from auto_parallel_gpt_model import ( + GPTModel, + GPTForPretraining, + GPTPretrainingCriterion, +) class AutoPallelPassTestBase(DistPassTestBase): - def setUp(self): paddle.enable_static() seed = int(os.environ.get('SEED', -1)) @@ -60,14 +63,12 @@ class AutoPallelPassTestBase(DistPassTestBase): fleet.init(is_collective=True, strategy=dist_strategy) def check_main(self, gpus=None, **kwargs): - no_pass_rets = self._distributed_launch(model=None, - apply_pass=False, - gpus=gpus, - **kwargs) - pass_rets = self._distributed_launch(model=None, - apply_pass=True, - gpus=gpus, - **kwargs) + no_pass_rets = self._distributed_launch( + model=None, apply_pass=False, gpus=gpus, **kwargs + ) + pass_rets = self._distributed_launch( + model=None, apply_pass=True, gpus=gpus, **kwargs + ) self.check_results(no_pass_rets, pass_rets) def _run_gpu_main(self, model, apply_pass, dump_file, **kwargs): @@ -78,12 +79,18 @@ class AutoPallelPassTestBase(DistPassTestBase): self.apply_passes() else: self.apply_no_passes() - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): with paddle.static.scope_guard(scope): with paddle.fluid.unique_name.guard(): - main_prog, startup_prog, inputs, outputs, data_loader = self.get_model( - place, **kwargs) + ( + main_prog, + startup_prog, + inputs, + outputs, + data_loader, + ) = self.get_model(place, **kwargs) inputs = self._to_var_names(inputs) outputs = self._to_var_names(outputs) @@ -98,8 +105,9 @@ class AutoPallelPassTestBase(DistPassTestBase): fetch_values = exe.run(main_prog, fetch_list=outputs) if paddle.distributed.get_rank() == 0: output_dict = OrderedDict(zip(outputs, fetch_values)) - print('batch {}, outputs {}'.format( - batch_id, output_dict)) + print( + 'batch {}, outputs {}'.format(batch_id, output_dict) + ) all_fetch_values.append(fetch_values) batch_id += 1 except paddle.fluid.core.EOFException: @@ -108,9 +116,9 @@ class AutoPallelPassTestBase(DistPassTestBase): with open(dump_file, "wb") as f: pickle.dump(all_fetch_values, f) - def get_gpt_model(self, strategy, place, batch_size, sequence_len, - vocab_size, **kwargs): - + def get_gpt_model( + self, strategy, place, batch_size, sequence_len, vocab_size, **kwargs + ): def gen_data(): np.random.seed(2021) for _ in range(10): @@ -121,14 +129,19 @@ class AutoPallelPassTestBase(DistPassTestBase): loss_mask = [] for _ in range(batch_size): tokens.append( - np.random.randint(vocab_size, - size=sequence_len).astype("int64")) + np.random.randint(vocab_size, size=sequence_len).astype( + "int64" + ) + ) position_ids.append(np.arange(sequence_len).astype("int64")) attention_mask.append( - [np.tril(np.ones(sequence_len)).astype("float32")]) + [np.tril(np.ones(sequence_len)).astype("float32")] + ) labels.append( - np.random.randint(vocab_size, - size=sequence_len).astype("int64")) + np.random.randint(vocab_size, size=sequence_len).astype( + "int64" + ) + ) loss_mask.append(np.ones(sequence_len).astype("float32")) yield tokens, position_ids, attention_mask, labels, loss_mask @@ -136,65 +149,74 @@ class AutoPallelPassTestBase(DistPassTestBase): modeling.init_global() if strategy == "dp": modeling._global_parallel_strategy = "dp" - modeling._global_process_mesh = auto.ProcessMesh(mesh=[0, 1], - dim_names=["x"]) + modeling._global_process_mesh = auto.ProcessMesh( + mesh=[0, 1], dim_names=["x"] + ) elif strategy == "mp": modeling._global_parallel_strategy = "mp" - modeling._global_process_mesh = auto.ProcessMesh(mesh=[0, 1], - dim_names=["x"]) + modeling._global_process_mesh = auto.ProcessMesh( + mesh=[0, 1], dim_names=["x"] + ) else: raise ValueError("'get_gpt_model' only support dp and mp.") - tokens = paddle.static.data(name="tokens", - shape=[batch_size, sequence_len], - dtype='int64') - position_ids = paddle.static.data(name="position_ids", - shape=[batch_size, sequence_len], - dtype='int64') + tokens = paddle.static.data( + name="tokens", shape=[batch_size, sequence_len], dtype='int64' + ) + position_ids = paddle.static.data( + name="position_ids", shape=[batch_size, sequence_len], dtype='int64' + ) attention_mask = paddle.static.data( name="attention_mask", shape=[batch_size, 1, sequence_len, sequence_len], - dtype='float32') - labels = paddle.static.data(name="labels", - shape=[batch_size, sequence_len], - dtype='int64') - loss_mask = paddle.static.data(name="loss_mask", - shape=[batch_size, sequence_len], - dtype='float32') + dtype='float32', + ) + labels = paddle.static.data( + name="labels", shape=[batch_size, sequence_len], dtype='int64' + ) + loss_mask = paddle.static.data( + name="loss_mask", shape=[batch_size, sequence_len], dtype='float32' + ) data_holder = [tokens, position_ids, attention_mask, labels, loss_mask] data_loader = paddle.fluid.io.DataLoader.from_generator( - feed_list=data_holder, capacity=70, iterable=False) + feed_list=data_holder, capacity=70, iterable=False + ) data_loader.set_batch_generator(gen_data, paddle.static.cuda_places()) if modeling._global_parallel_strategy == "dp": - auto.shard_tensor(tokens, modeling._global_process_mesh, - ["x", None]) + auto.shard_tensor( + tokens, modeling._global_process_mesh, ["x", None] + ) elif modeling._global_parallel_strategy == "pp": auto.shard_tensor(tokens, modeling.PP_MESH_LIST[0], [None, None]) - auto.shard_tensor(attention_mask, modeling.PP_MESH_LIST[0], - [None, None, None, None]) - - gpt = GPTModel(vocab_size=1000, - hidden_size=64, - num_hidden_layers=2, - num_attention_heads=8, - intermediate_size=256, - hidden_act="gelu", - hidden_dropout_prob=0.0, - attention_probs_dropout_prob=0.0, - max_position_embeddings=1024, - type_vocab_size=1, - initializer_range=0.02, - pad_token_id=0, - eos_token_id=7, - bos_token_id=0, - eol_token_id=3) - - model = GPTForPretraining(gpt, - vocab_size=1000, - hidden_size=64, - initializer_range=0.02) + auto.shard_tensor( + attention_mask, + modeling.PP_MESH_LIST[0], + [None, None, None, None], + ) + + gpt = GPTModel( + vocab_size=1000, + hidden_size=64, + num_hidden_layers=2, + num_attention_heads=8, + intermediate_size=256, + hidden_act="gelu", + hidden_dropout_prob=0.0, + attention_probs_dropout_prob=0.0, + max_position_embeddings=1024, + type_vocab_size=1, + initializer_range=0.02, + pad_token_id=0, + eos_token_id=7, + bos_token_id=0, + eol_token_id=3, + ) + + model = GPTForPretraining( + gpt, vocab_size=1000, hidden_size=64, initializer_range=0.02 + ) preds = model(tokens, position_ids, attention_mask) criterion = GPTPretrainingCriterion() loss = criterion(preds, labels, loss_mask) @@ -202,17 +224,26 @@ class AutoPallelPassTestBase(DistPassTestBase): clip = paddle.nn.ClipGradByNorm(clip_norm=1.0) if kwargs.get('optimizer', None) == "LarsMomentum": optimizer = paddle.fluid.optimizer.LarsMomentumOptimizer( - learning_rate=0.001, momentum=0.9) + learning_rate=0.001, momentum=0.9 + ) else: - optimizer = paddle.optimizer.Adam(learning_rate=0.00001, - beta1=0.9, - beta2=0.999, - epsilon=1e-08, - grad_clip=clip) + optimizer = paddle.optimizer.Adam( + learning_rate=0.00001, + beta1=0.9, + beta2=0.999, + epsilon=1e-08, + grad_clip=clip, + ) optimizer = fleet.distributed_optimizer(optimizer) startup_program = paddle.static.default_startup_program() _, _, dist_startup_prog, dist_main_prog = optimizer.minimize( - loss, startup_program) - - return dist_main_prog, dist_startup_prog, data_holder, [loss - ], data_loader + loss, startup_program + ) + + return ( + dist_main_prog, + dist_startup_prog, + data_holder, + [loss], + data_loader, + ) diff --git a/python/paddle/fluid/tests/unittests/distributed_passes/check_pass_conflict_example.py b/python/paddle/fluid/tests/unittests/distributed_passes/check_pass_conflict_example.py index ffb8ea8e381c3324b5b5da0adb7d52d77456626d..1223fbc0c19362cd6d9a0aa79e2082f535e3c4be 100644 --- a/python/paddle/fluid/tests/unittests/distributed_passes/check_pass_conflict_example.py +++ b/python/paddle/fluid/tests/unittests/distributed_passes/check_pass_conflict_example.py @@ -19,7 +19,6 @@ from model_zoo import resnet_model class CheckPassConflictTest1(PassConflictChecker): - def pass_config(self): return [ new_pass("fuse_all_reduce", {"max_memory_size": 1024 * 1024}), @@ -31,7 +30,6 @@ class CheckPassConflictTest1(PassConflictChecker): class CheckPassConflictTest2(PassConflictChecker): - def pass_config(self): return [ new_pass("fuse_elewise_add_act"), diff --git a/python/paddle/fluid/tests/unittests/distributed_passes/dist_pass_test_base.py b/python/paddle/fluid/tests/unittests/distributed_passes/dist_pass_test_base.py index 6b0d53d2d00b6262a9c531f0b4d67a00bf44f36d..5f62faeb4228f7f740ee30a2471768b10f94a101 100644 --- a/python/paddle/fluid/tests/unittests/distributed_passes/dist_pass_test_base.py +++ b/python/paddle/fluid/tests/unittests/distributed_passes/dist_pass_test_base.py @@ -42,7 +42,7 @@ def prepare_python_path_and_return_module(path): python_path = dirname os.environ[env_name] = python_path print('GLOG_v=', os.environ.get('GLOG_v', None), flush=1) - return filename[:-len(py_suffix)] + return filename[: -len(py_suffix)] def remove_path_if_exists(path): @@ -57,7 +57,6 @@ def remove_path_if_exists(path): # NOTE: only support GPU now class DistPassTestBase(unittest.TestCase): - def setUp(self): paddle.enable_static() if paddle.is_compiled_with_cuda(): @@ -86,30 +85,31 @@ class DistPassTestBase(unittest.TestCase): raise NotImplementedError() def check_main(self, model=None, gpus=None, **kwargs): - pass_rets = self._distributed_launch(model=model, - apply_pass=True, - gpus=gpus, - **kwargs) - no_pass_rets = self._distributed_launch(model=model, - apply_pass=False, - gpus=gpus, - **kwargs) + pass_rets = self._distributed_launch( + model=model, apply_pass=True, gpus=gpus, **kwargs + ) + no_pass_rets = self._distributed_launch( + model=model, apply_pass=False, gpus=gpus, **kwargs + ) self.check_results(no_pass_rets, pass_rets) def check_results(self, no_pass_rets, pass_rets): self.assertEqual(len(no_pass_rets), len(pass_rets)) for no_pass_ret, pass_ret in zip(no_pass_rets, pass_rets): self.assertEqual(len(no_pass_ret), len(pass_ret)) - for i, (out_var_no_pass, - out_var_pass) in enumerate(zip(no_pass_ret, pass_ret)): + for i, (out_var_no_pass, out_var_pass) in enumerate( + zip(no_pass_ret, pass_ret) + ): if out_var_no_pass is None: self.assertTrue(out_var_pass is None) else: - np.testing.assert_allclose(out_var_no_pass, - out_var_pass, - rtol=self.rtol, - atol=self.atol, - equal_nan=self.equal_nan) + np.testing.assert_allclose( + out_var_no_pass, + out_var_pass, + rtol=self.rtol, + atol=self.atol, + equal_nan=self.equal_nan, + ) @classmethod def _to_var_names(cls, names_or_vars): @@ -129,12 +129,14 @@ class DistPassTestBase(unittest.TestCase): scope = paddle.static.Scope() if model is None: model = self.get_model - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): with paddle.static.scope_guard(scope): with paddle.fluid.unique_name.guard(): main_prog, startup_prog, inputs, outputs, reader = model( - place, **kwargs) + place, **kwargs + ) inputs = self._to_var_names(inputs) outputs = self._to_var_names(outputs) if apply_pass: @@ -146,7 +148,8 @@ class DistPassTestBase(unittest.TestCase): exe.run(startup_prog) for batch_id, input_data in enumerate(reader()): assert len(input_data) == len(inputs), "{} vs {}".format( - len(input_data), len(inputs)) + len(input_data), len(inputs) + ) feed = dict(zip(inputs, input_data)) fetch_values = exe.run(main_prog, feed=feed, fetch_list=outputs) if paddle.distributed.get_rank() == 0: @@ -204,26 +207,30 @@ class DistPassTestBase(unittest.TestCase): with open(model_dump_file, 'wb') as f: pickle.dump(model, f) - cmd = [ - sys.executable, - "-u", - ] + coverage_args + [ - "-m", - "launch", - "--log_dir", - output_dir, - "--gpus", - gpus, - os.path.join(file_dir, "pass_run_main.py"), - "--file_path", - inspect.getfile(type(self)), - "--class_name", - type(self).__name__, - "--input_file", - input_dump_file, - "--output_dir", - output_dir, - ] + cmd = ( + [ + sys.executable, + "-u", + ] + + coverage_args + + [ + "-m", + "launch", + "--log_dir", + output_dir, + "--gpus", + gpus, + os.path.join(file_dir, "pass_run_main.py"), + "--file_path", + inspect.getfile(type(self)), + "--class_name", + type(self).__name__, + "--input_file", + input_dump_file, + "--output_dir", + output_dir, + ] + ) if apply_pass: cmd += ["--apply_pass"] if model is not None: @@ -232,17 +239,22 @@ class DistPassTestBase(unittest.TestCase): prepare_python_path_and_return_module(__file__) exitcode = os.system(' '.join(cmd)) self.assertEqual( - exitcode, 0, - "Pass test failed with apply_pass = {}, please view log in {}". - format(apply_pass, output_dir)) + exitcode, + 0, + "Pass test failed with apply_pass = {}, please view log in {}".format( + apply_pass, output_dir + ), + ) results = [] for i in range(num_gpus): dump_file = '{0}/{1}.bin'.format(output_dir, i) self.assertTrue( os.path.exists(dump_file), - "Pass test failed with apply_pass = {}, please view log in {}" - .format(apply_pass, output_dir)) + "Pass test failed with apply_pass = {}, please view log in {}".format( + apply_pass, output_dir + ), + ) with open(dump_file, "rb") as f: results.append(pickle.load(f)) return results @@ -252,7 +264,6 @@ class DistPassTestBase(unittest.TestCase): class PassConflictChecker(DistPassTestBase): - def setUp(self): os.environ['DEBUG'] = '1' # to save the debug directory super(PassConflictChecker, self).setUp() @@ -270,14 +281,20 @@ class PassConflictChecker(DistPassTestBase): auto_pass_manager = PassManager(passes, auto_solve_conflict=True) new_passes = auto_pass_manager.passes self.assertEqual( - len(passes), len(new_passes), + len(passes), + len(new_passes), "After solving conflicts, the left passes are: {}".format( - auto_pass_manager.names)) + auto_pass_manager.names + ), + ) for i, (p1, p2) in enumerate(zip(passes, new_passes)): self.assertEqual( - id(p1), id(p2), - "After solving conflicts, the {}-th pass is different: {} vs {}" - .format(i, p1.name, p2.name)) + id(p1), + id(p2), + "After solving conflicts, the {}-th pass is different: {} vs {}".format( + i, p1.name, p2.name + ), + ) auto_pass_manager.apply([main_prog], [startup_prog]) diff --git a/python/paddle/fluid/tests/unittests/distributed_passes/model_zoo.py b/python/paddle/fluid/tests/unittests/distributed_passes/model_zoo.py index 9a48d117bb12873b9825dd764a114f8b8f9fcf07..a0bff06e29cd9489d57cf33181ca0d8d6e18585c 100644 --- a/python/paddle/fluid/tests/unittests/distributed_passes/model_zoo.py +++ b/python/paddle/fluid/tests/unittests/distributed_passes/model_zoo.py @@ -28,16 +28,15 @@ def get_seed_from_env(): return int(os.environ.get("SEED", 0)) -def resnet_model(place, - batch_size, - image_shape=[3, 224, 224], - num_classes=1000): - image = paddle.static.data(shape=[batch_size] + image_shape, - dtype='float32', - name='image') - label = paddle.static.data(shape=[batch_size, 1], - dtype='int64', - name='label') +def resnet_model( + place, batch_size, image_shape=[3, 224, 224], num_classes=1000 +): + image = paddle.static.data( + shape=[batch_size] + image_shape, dtype='float32', name='image' + ) + label = paddle.static.data( + shape=[batch_size, 1], dtype='int64', name='label' + ) model = resnet(pretrained=False) loss_fn = nn.loss.CrossEntropyLoss() pred_out = model(image) @@ -58,9 +57,9 @@ def resnet_model(place, np.random.seed(seed + rank) for _ in range(10): image_np = np.random.random(size=image.shape).astype('float32') - label_np = np.random.randint(low=0, - high=num_classes, - size=label.shape).astype('int64') + label_np = np.random.randint( + low=0, high=num_classes, size=label.shape + ).astype('int64') yield image_np, label_np main_program = paddle.static.default_main_program() @@ -69,12 +68,12 @@ def resnet_model(place, def simple_net(place, batch_size, image_shape=[784], num_classes=10): - image = paddle.static.data(shape=[batch_size] + image_shape, - dtype='float32', - name='image') - label = paddle.static.data(shape=[batch_size, 1], - dtype='int64', - name='label') + image = paddle.static.data( + shape=[batch_size] + image_shape, dtype='float32', name='image' + ) + label = paddle.static.data( + shape=[batch_size, 1], dtype='int64', name='label' + ) linears = [nn.Linear(784, 784) for _ in range(3)] hidden = image for linear in linears: @@ -98,9 +97,9 @@ def simple_net(place, batch_size, image_shape=[784], num_classes=10): np.random.seed(seed + rank) for _ in range(10): image_np = np.random.random(size=image.shape).astype('float32') - label_np = np.random.randint(low=0, - high=num_classes, - size=label.shape).astype('int64') + label_np = np.random.randint( + low=0, high=num_classes, size=label.shape + ).astype('int64') yield image_np, label_np main_program = paddle.static.default_main_program() diff --git a/python/paddle/fluid/tests/unittests/distributed_passes/pass_run_main.py b/python/paddle/fluid/tests/unittests/distributed_passes/pass_run_main.py index cd98d39a09ead34415803252ccbb54bb2274c4b2..cfea9a05c4e3ec9fd3e83e5f5d8ca8d2cb0bad1f 100644 --- a/python/paddle/fluid/tests/unittests/distributed_passes/pass_run_main.py +++ b/python/paddle/fluid/tests/unittests/distributed_passes/pass_run_main.py @@ -18,35 +18,43 @@ import pickle import importlib import os from paddle.distributed.fleet.launch_utils import run_with_coverage -from dist_pass_test_base import prepare_python_path_and_return_module, DistPassTestBase +from dist_pass_test_base import ( + prepare_python_path_and_return_module, + DistPassTestBase, +) def parse_args(): parser = argparse.ArgumentParser( - description='arguments for distributed pass tests') + description='arguments for distributed pass tests' + ) parser.add_argument('--file_path', type=str, help='The test file path.') parser.add_argument( '--class_name', type=str, - help= - 'The test class name. It is the class name that inherits the DistPassTestBase class.' + help='The test class name. It is the class name that inherits the DistPassTestBase class.', + ) + parser.add_argument( + '--apply_pass', + default=False, + action="store_true", + help='Whether to apply distributed passes.', ) - parser.add_argument('--apply_pass', - default=False, - action="store_true", - help='Whether to apply distributed passes.') parser.add_argument( '--input_file', type=str, - help='The input file which contains the dumped input arguments.') + help='The input file which contains the dumped input arguments.', + ) parser.add_argument( '--output_dir', type=str, - help='The output directory to save the logs and output results.') + help='The output directory to save the logs and output results.', + ) parser.add_argument( '--model_file', type=str, - help='The input model file which contains the dumped model function.') + help='The input model file which contains the dumped model function.', + ) return parser.parse_args() diff --git a/python/paddle/fluid/tests/unittests/distributed_passes/ps_pass_test_base.py b/python/paddle/fluid/tests/unittests/distributed_passes/ps_pass_test_base.py index bd2fa6b01984811084e2f6063df4ceb563b030b9..87f275c21c6f3bfe5ece1b647f53cb6a19682441 100755 --- a/python/paddle/fluid/tests/unittests/distributed_passes/ps_pass_test_base.py +++ b/python/paddle/fluid/tests/unittests/distributed_passes/ps_pass_test_base.py @@ -17,11 +17,12 @@ import sys import shlex import unittest from paddle.fluid.tests.unittests.distributed_passes.dist_pass_test_base import ( # noqa: F401 - prepare_python_path_and_return_module, remove_path_if_exists) + prepare_python_path_and_return_module, + remove_path_if_exists, +) class PsPassTestBase(unittest.TestCase): - def init(self): self.config = {} self.config['ps_mode_config'] = "" @@ -46,30 +47,43 @@ class PsPassTestBase(unittest.TestCase): if ps_mode == "cpu-ps" or ps_mode == 'heter-ps': os.environ['WITH_DISTRIBUTE'] = 'ON' - cmd = [ - sys.executable, - "-u", - ] + [ - "-m", "launch", "--log_dir", self.config['log_dir'], - "--worker_num", self.config['worker_num'], "--server_num", - self.config['server_num'] + cmd = [sys.executable, "-u",] + [ + "-m", + "launch", + "--log_dir", + self.config['log_dir'], + "--worker_num", + self.config['worker_num'], + "--server_num", + self.config['server_num'], ] if ps_mode == 'heter-ps': os.environ['FLAGS_START_PORT'] = '12004' cmd += [ - '--heter_worker_num', self.config['heter_worker_num'], - '--heter_devices', self.config['heter_devices'] + '--heter_worker_num', + self.config['heter_worker_num'], + '--heter_devices', + self.config['heter_devices'], ] cmd += [ - "../ps/ps_dnn_trainer.py", "-m", self.config['ps_mode_config'], - "--run_minimize", self.config['run_minimize'], - "--run_single_pass", self.config['run_single_pass'], - "--run_the_one_ps", self.config['run_the_one_ps'], - "--debug_new_pass", self.config['debug_new_pass'], - "--debug_new_minimize", self.config['debug_new_minimize'], - "--applied_pass_name", self.config['applied_pass_name'], - "--debug_the_one_ps", self.config['debug_the_one_ps'] + "../ps/ps_dnn_trainer.py", + "-m", + self.config['ps_mode_config'], + "--run_minimize", + self.config['run_minimize'], + "--run_single_pass", + self.config['run_single_pass'], + "--run_the_one_ps", + self.config['run_the_one_ps'], + "--debug_new_pass", + self.config['debug_new_pass'], + "--debug_new_minimize", + self.config['debug_new_minimize'], + "--applied_pass_name", + self.config['applied_pass_name'], + "--debug_the_one_ps", + self.config['debug_the_one_ps'], ] elif ps_mode == "gpu-ps": os.environ['FLAGS_LAUNCH_BARRIER'] = '0' @@ -87,15 +101,25 @@ class PsPassTestBase(unittest.TestCase): os.environ['PADDLE_TRAINER_ID'] = '0' cmd = [ - sys.executable, "-u", "../ps/ps_dnn_trainer.py", "-m", - self.config['ps_mode_config'], "--run_minimize", - self.config['run_minimize'], "--run_single_pass", - self.config['run_single_pass'], "--run_the_one_ps", - self.config['run_the_one_ps'], "--debug_new_pass", - self.config['debug_new_pass'], "--debug_new_minimize", - self.config['debug_new_minimize'], "--applied_pass_name", - self.config['applied_pass_name'], "--debug_the_one_ps", - self.config['debug_the_one_ps'] + sys.executable, + "-u", + "../ps/ps_dnn_trainer.py", + "-m", + self.config['ps_mode_config'], + "--run_minimize", + self.config['run_minimize'], + "--run_single_pass", + self.config['run_single_pass'], + "--run_the_one_ps", + self.config['run_the_one_ps'], + "--debug_new_pass", + self.config['debug_new_pass'], + "--debug_new_minimize", + self.config['debug_new_minimize'], + "--applied_pass_name", + self.config['applied_pass_name'], + "--debug_the_one_ps", + self.config['debug_the_one_ps'], ] cmd = [shlex.quote(c) for c in cmd] diff --git a/python/paddle/fluid/tests/unittests/distributed_passes/test_auto_parallel_amp_pass.py b/python/paddle/fluid/tests/unittests/distributed_passes/test_auto_parallel_amp_pass.py index 0d404d1732d428a9f66056be43c2260f3f868057..e4173e11dbcf728bcb9fbf9df65ccc0b9168a5e5 100755 --- a/python/paddle/fluid/tests/unittests/distributed_passes/test_auto_parallel_amp_pass.py +++ b/python/paddle/fluid/tests/unittests/distributed_passes/test_auto_parallel_amp_pass.py @@ -22,7 +22,6 @@ from auto_parallel_pass_test_base import AutoPallelPassTestBase class TestAMPPass(AutoPallelPassTestBase): - def init(self): if paddle.is_compiled_with_cuda(): paddle.set_flags({'FLAGS_cudnn_deterministic': 1}) @@ -51,14 +50,14 @@ class TestAMPPass(AutoPallelPassTestBase): fleet.init(is_collective=True, strategy=dist_strategy) def test_bs_8(self): - self.check_main(gpus=[0, 1], - batch_size=8, - sequence_len=512, - vocab_size=1000) + self.check_main( + gpus=[0, 1], batch_size=8, sequence_len=512, vocab_size=1000 + ) def get_model(self, place, batch_size, sequence_len, vocab_size): - return self.get_gpt_model("mp", place, batch_size, sequence_len, - vocab_size) + return self.get_gpt_model( + "mp", place, batch_size, sequence_len, vocab_size + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/distributed_passes/test_auto_parallel_data_parallel_optimization_pass.py b/python/paddle/fluid/tests/unittests/distributed_passes/test_auto_parallel_data_parallel_optimization_pass.py index 7da95ddf8d21d04f8544120939b74316839a0a7d..530d4b2661e63a4f89af0971d099264ea654d68e 100644 --- a/python/paddle/fluid/tests/unittests/distributed_passes/test_auto_parallel_data_parallel_optimization_pass.py +++ b/python/paddle/fluid/tests/unittests/distributed_passes/test_auto_parallel_data_parallel_optimization_pass.py @@ -19,7 +19,9 @@ import numpy as np import unittest import paddle import paddle.distributed.fleet as fleet -from paddle.distributed.auto_parallel.dist_context import get_default_distributed_context +from paddle.distributed.auto_parallel.dist_context import ( + get_default_distributed_context, +) from paddle.distributed.passes import PassContext, new_pass from auto_parallel_pass_test_base import AutoPallelPassTestBase @@ -27,7 +29,6 @@ sys.path.append("..") class TestDataParallelPassWithScale1(AutoPallelPassTestBase): - def init(self): if paddle.is_compiled_with_cuda(): paddle.set_flags({'FLAGS_cudnn_deterministic': 1}) @@ -55,24 +56,29 @@ class TestDataParallelPassWithScale1(AutoPallelPassTestBase): self._apply_pass = False def test_bs_8(self): - self.check_main(gpus=[0, 1], - batch_size=8, - sequence_len=512, - vocab_size=1000) + self.check_main( + gpus=[0, 1], batch_size=8, sequence_len=512, vocab_size=1000 + ) # test scaling with fillconstant def get_model(self, place, batch_size, sequence_len, vocab_size): - dist_main_prog, dist_startup_prog, data_holder, [ - loss - ], gen_data = self.get_gpt_model('dp', place, batch_size, sequence_len, - vocab_size) + ( + dist_main_prog, + dist_startup_prog, + data_holder, + [loss], + gen_data, + ) = self.get_gpt_model( + 'dp', place, batch_size, sequence_len, vocab_size + ) if self._apply_pass: config = {} config["dist_context"] = get_default_distributed_context() config["global_rank"] = paddle.distributed.get_rank() - dp_pass = new_pass("auto_parallel_data_parallel_optimization", - config) + dp_pass = new_pass( + "auto_parallel_data_parallel_optimization", config + ) dp_pass.apply([dist_main_prog], [dist_startup_prog], PassContext()) return dist_main_prog, dist_startup_prog, data_holder, [loss], gen_data @@ -83,20 +89,27 @@ class TestDataParallelPassWithScale2(TestDataParallelPassWithScale1): # test scaling with optimizer rescale_grad def get_model(self, place, batch_size, sequence_len, vocab_size): - dist_main_prog, dist_startup_prog, data_holder, [ - loss - ], gen_data = self.get_gpt_model('dp', - place, - batch_size, - sequence_len, - vocab_size, - optimizer='LarsMomentum') + ( + dist_main_prog, + dist_startup_prog, + data_holder, + [loss], + gen_data, + ) = self.get_gpt_model( + 'dp', + place, + batch_size, + sequence_len, + vocab_size, + optimizer='LarsMomentum', + ) if self._apply_pass: config = {} config["dist_context"] = get_default_distributed_context() config["global_rank"] = paddle.distributed.get_rank() - dp_pass = new_pass("auto_parallel_data_parallel_optimization", - config) + dp_pass = new_pass( + "auto_parallel_data_parallel_optimization", config + ) dp_pass.apply([dist_main_prog], [dist_startup_prog], PassContext()) return dist_main_prog, dist_startup_prog, data_holder, [loss], gen_data diff --git a/python/paddle/fluid/tests/unittests/distributed_passes/test_auto_parallel_fp16_pass.py b/python/paddle/fluid/tests/unittests/distributed_passes/test_auto_parallel_fp16_pass.py index 2dfe965fc01b8d6c0f270d7afc80b9e9fd759b4b..b3b2857e2920b3c10c4c427f8866ac966925293a 100644 --- a/python/paddle/fluid/tests/unittests/distributed_passes/test_auto_parallel_fp16_pass.py +++ b/python/paddle/fluid/tests/unittests/distributed_passes/test_auto_parallel_fp16_pass.py @@ -22,7 +22,6 @@ from auto_parallel_pass_test_base import AutoPallelPassTestBase class TestPF16Pass(AutoPallelPassTestBase): - def init(self): if paddle.is_compiled_with_cuda(): paddle.set_flags({'FLAGS_cudnn_deterministic': 1}) @@ -42,29 +41,28 @@ class TestPF16Pass(AutoPallelPassTestBase): 'layer_norm', 'gelu', ], - "custom_black_list": - ['c_softmax_with_cross_entropy', 'elementwise_div', 'reduce_sum'], - "init_loss_scaling": - 32768, - "use_dynamic_loss_scaling": - True, - "use_pure_fp16": - True, - "use_fp16_guard": - False + "custom_black_list": [ + 'c_softmax_with_cross_entropy', + 'elementwise_div', + 'reduce_sum', + ], + "init_loss_scaling": 32768, + "use_dynamic_loss_scaling": True, + "use_pure_fp16": True, + "use_fp16_guard": False, } dist_strategy.semi_auto = True fleet.init(is_collective=True, strategy=dist_strategy) def test_bs_8(self): - self.check_main(gpus=[0, 1], - batch_size=8, - sequence_len=512, - vocab_size=1000) + self.check_main( + gpus=[0, 1], batch_size=8, sequence_len=512, vocab_size=1000 + ) def get_model(self, place, batch_size, sequence_len, vocab_size): - return self.get_gpt_model("mp", place, batch_size, sequence_len, - vocab_size) + return self.get_gpt_model( + "mp", place, batch_size, sequence_len, vocab_size + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/distributed_passes/test_auto_parallel_gradient_merge_pass.py b/python/paddle/fluid/tests/unittests/distributed_passes/test_auto_parallel_gradient_merge_pass.py index 3b6eb35937eb6623730bb0ca52bc0b475cd02154..ddb4c3cea056fdd1132c07616a8598bfac1b5386 100644 --- a/python/paddle/fluid/tests/unittests/distributed_passes/test_auto_parallel_gradient_merge_pass.py +++ b/python/paddle/fluid/tests/unittests/distributed_passes/test_auto_parallel_gradient_merge_pass.py @@ -33,11 +33,9 @@ paddle.enable_static() class MLPLayer(nn.Layer): - - def __init__(self, - hidden_size=128, - intermediate_size=4 * 128, - initializer_range=0.02): + def __init__( + self, hidden_size=128, intermediate_size=4 * 128, initializer_range=0.02 + ): super(MLPLayer, self).__init__() d_model = hidden_size dim_feedforward = intermediate_size @@ -47,30 +45,24 @@ class MLPLayer(nn.Layer): weight_attr0 = paddle.ParamAttr(initializer=NumpyArrayInitializer(arr0)) weight_attr1 = paddle.ParamAttr(initializer=NumpyArrayInitializer(arr1)) bias_attr = None - self.linear0 = nn.Linear(d_model, - dim_feedforward, - weight_attr0, - bias_attr=bias_attr) - self.linear1 = nn.Linear(dim_feedforward, - d_model, - weight_attr1, - bias_attr=bias_attr) - self.linear2 = nn.Linear(d_model, - dim_feedforward, - weight_attr0, - bias_attr=bias_attr) - self.linear3 = nn.Linear(dim_feedforward, - d_model, - weight_attr1, - bias_attr=bias_attr) - self.linear4 = nn.Linear(d_model, - dim_feedforward, - weight_attr0, - bias_attr=bias_attr) - self.linear5 = nn.Linear(dim_feedforward, - d_model, - weight_attr1, - bias_attr=bias_attr) + self.linear0 = nn.Linear( + d_model, dim_feedforward, weight_attr0, bias_attr=bias_attr + ) + self.linear1 = nn.Linear( + dim_feedforward, d_model, weight_attr1, bias_attr=bias_attr + ) + self.linear2 = nn.Linear( + d_model, dim_feedforward, weight_attr0, bias_attr=bias_attr + ) + self.linear3 = nn.Linear( + dim_feedforward, d_model, weight_attr1, bias_attr=bias_attr + ) + self.linear4 = nn.Linear( + d_model, dim_feedforward, weight_attr0, bias_attr=bias_attr + ) + self.linear5 = nn.Linear( + dim_feedforward, d_model, weight_attr1, bias_attr=bias_attr + ) self.norm0 = nn.LayerNorm(d_model, epsilon=1e-5) self.norm1 = nn.LayerNorm(d_model, epsilon=1e-5) self.norm2 = nn.LayerNorm(d_model, epsilon=1e-5) @@ -94,11 +86,14 @@ class MLPLayer(nn.Layer): def mlp_forward(input, label, hidden_size): - auto.shard_tensor(input, auto.ProcessMesh([0], dim_names=["x"]), - [None, None]) - mlp = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - initializer_range=0.02) + auto.shard_tensor( + input, auto.ProcessMesh([0], dim_names=["x"]), [None, None] + ) + mlp = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + initializer_range=0.02, + ) predict = mlp(input) error_cost = paddle.nn.functional.square_error_cost(predict, label) loss = paddle.mean(error_cost) @@ -106,7 +101,6 @@ def mlp_forward(input, label, hidden_size): class TestGradientMergePass(AutoPallelPassTestBase): - def init(self): paddle.seed(2022) random.seed(2022) @@ -120,18 +114,22 @@ class TestGradientMergePass(AutoPallelPassTestBase): fleet.init(is_collective=True, strategy=dist_strategy) def test_result(self): - no_pass_rets = self._distributed_launch(model=None, - apply_pass=False, - gpus=[0], - batch_size=32, - hidden_size=128, - max_step=2) - pass_rets = self._distributed_launch(model=None, - apply_pass=True, - gpus=[0], - batch_size=8, - hidden_size=128, - max_step=8) + no_pass_rets = self._distributed_launch( + model=None, + apply_pass=False, + gpus=[0], + batch_size=32, + hidden_size=128, + max_step=2, + ) + pass_rets = self._distributed_launch( + model=None, + apply_pass=True, + gpus=[0], + batch_size=8, + hidden_size=128, + max_step=8, + ) # avg loss for gradient_merge pass avg_loss = 0 pass_avg_ret_list = [] @@ -146,49 +144,63 @@ class TestGradientMergePass(AutoPallelPassTestBase): for no_pass_ret, pass_ret in zip(no_pass_rets[0], pass_avg_ret_list): print(f"no_pass_ret={no_pass_ret}, pass_ret={pass_ret}") self.assertTrue( - np.isclose(no_pass_ret, - pass_ret, - rtol=self.rtol, - atol=self.atol, - equal_nan=self.equal_nan)) + np.isclose( + no_pass_ret, + pass_ret, + rtol=self.rtol, + atol=self.atol, + equal_nan=self.equal_nan, + ) + ) def get_model(self, place, batch_size, hidden_size, max_step): - def gen_data(): for i in range(max_step): - x_data = input_data[i * batch_size:(i + 1) * batch_size, :] - y_data = label_data[i * batch_size:(i + 1) * batch_size, :] + x_data = input_data[i * batch_size : (i + 1) * batch_size, :] + y_data = label_data[i * batch_size : (i + 1) * batch_size, :] yield x_data, y_data train_program = static.Program() startup_program = static.Program() - with static.program_guard(train_program, startup_program), \ - utils.unique_name.guard(): - input = static.data(name="input", - shape=[batch_size, hidden_size], - dtype='float32') - label = static.data(name="label", - shape=[batch_size, 1], - dtype='float32') + with static.program_guard( + train_program, startup_program + ), utils.unique_name.guard(): + input = static.data( + name="input", shape=[batch_size, hidden_size], dtype='float32' + ) + label = static.data( + name="label", shape=[batch_size, 1], dtype='float32' + ) input.stop_gradient = False data_holder = [input, label] data_loader = paddle.fluid.io.DataLoader.from_generator( - feed_list=data_holder, capacity=70, iterable=False) - data_loader.set_batch_generator(gen_data, - paddle.static.cuda_places()) + feed_list=data_holder, capacity=70, iterable=False + ) + data_loader.set_batch_generator( + gen_data, paddle.static.cuda_places() + ) loss = mlp_forward(input, label, hidden_size) optimizer = paddle.fluid.optimizer.AdamOptimizer(learning_rate=0.01) optimizer = fleet.distributed_optimizer(optimizer) - _, self._params_grads, dist_startup_prog, dist_main_prog = optimizer.minimize( - loss, startup_program) + ( + _, + self._params_grads, + dist_startup_prog, + dist_main_prog, + ) = optimizer.minimize(loss, startup_program) input_data = np.random.random(size=(128, hidden_size)).astype('float32') label_data = np.random.random(size=(128, 1)).astype('float32') - return dist_main_prog, dist_startup_prog, [input, - label], [loss], data_loader + return ( + dist_main_prog, + dist_startup_prog, + [input, label], + [loss], + data_loader, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/distributed_passes/test_auto_parallel_recompute_pass.py b/python/paddle/fluid/tests/unittests/distributed_passes/test_auto_parallel_recompute_pass.py index fa5fd193aba0e633f185561159d3dff7fa62d73a..ddea56b2a1464fe5bb1a2e59f7d17a26d822dbec 100644 --- a/python/paddle/fluid/tests/unittests/distributed_passes/test_auto_parallel_recompute_pass.py +++ b/python/paddle/fluid/tests/unittests/distributed_passes/test_auto_parallel_recompute_pass.py @@ -22,7 +22,6 @@ from auto_parallel_pass_test_base import AutoPallelPassTestBase class TestRecomputePass(AutoPallelPassTestBase): - def init(self): if paddle.is_compiled_with_cuda(): paddle.set_flags({'FLAGS_cudnn_deterministic': 1}) @@ -42,21 +41,21 @@ class TestRecomputePass(AutoPallelPassTestBase): fleet.init(is_collective=True, strategy=dist_strategy) def test_bs_8(self): - self.check_main(gpus=[0, 1], - batch_size=8, - sequence_len=512, - vocab_size=1000) + self.check_main( + gpus=[0, 1], batch_size=8, sequence_len=512, vocab_size=1000 + ) def get_model(self, place, batch_size, sequence_len, vocab_size): - return self.get_gpt_model("mp", place, batch_size, sequence_len, - vocab_size) + return self.get_gpt_model( + "mp", place, batch_size, sequence_len, vocab_size + ) class TestRecomputePassDP(TestRecomputePass): - def get_model(self, place, batch_size, sequence_len, vocab_size): - return self.get_gpt_model("dp", place, batch_size, sequence_len, - vocab_size) + return self.get_gpt_model( + "dp", place, batch_size, sequence_len, vocab_size + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/distributed_passes/test_auto_parallel_sharding_pass.py b/python/paddle/fluid/tests/unittests/distributed_passes/test_auto_parallel_sharding_pass.py index 21a9a81aa08826bf471c46c7b00fe90a5b08155a..83e4667a1a9c2b0a78bdb4c8ed14cff9b25a0f5a 100644 --- a/python/paddle/fluid/tests/unittests/distributed_passes/test_auto_parallel_sharding_pass.py +++ b/python/paddle/fluid/tests/unittests/distributed_passes/test_auto_parallel_sharding_pass.py @@ -25,7 +25,6 @@ sys.path.append("..") class TestShardingPass(AutoPallelPassTestBase): - def init(self): if paddle.is_compiled_with_cuda(): paddle.set_flags({'FLAGS_cudnn_deterministic': 1}) @@ -55,14 +54,14 @@ class TestShardingPass(AutoPallelPassTestBase): fleet.init(is_collective=True, strategy=dist_strategy) def test_bs_8(self): - self.check_main(gpus=[0, 1], - batch_size=8, - sequence_len=512, - vocab_size=1000) + self.check_main( + gpus=[0, 1], batch_size=8, sequence_len=512, vocab_size=1000 + ) def get_model(self, place, batch_size, sequence_len, vocab_size): - return self.get_gpt_model('dp', place, batch_size, sequence_len, - vocab_size) + return self.get_gpt_model( + 'dp', place, batch_size, sequence_len, vocab_size + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_adam_pass.py b/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_adam_pass.py index c1cdd0b7336e30f4c50a2b29ce4e6d5edd665739..b570786f20c3ad4f228b9793df19341d0f231ab9 100644 --- a/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_adam_pass.py +++ b/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_adam_pass.py @@ -25,7 +25,6 @@ paddle.enable_static() class DemoNet(nn.Layer): - def __init__(self): super(DemoNet, self).__init__() @@ -42,15 +41,14 @@ class DemoNet(nn.Layer): class TestFuseAdamPass(DistPassTestBase): - def init(self): self.atol = 1e-4 self.rtol = 1e-4 def get_model(self, place, batch_size=32, image_shape=[224, 224, 3]): - image = paddle.static.data(shape=[batch_size] + image_shape, - dtype='float32', - name='image') + image = paddle.static.data( + shape=[batch_size] + image_shape, dtype='float32', name='image' + ) model = DemoNet() pred_out = model(image) @@ -86,10 +84,14 @@ class TestFuseAdamPass(DistPassTestBase): for op in main_prog.global_block().ops: op_type.append(op.type) if op.type == "adam": - self.assertTrue("@FUSEDVAR@_adam_Param_batch_norm2d_0.b_0" in - op.input("Param")) - self.assertTrue("@FUSEDVAR@_adam_Grad_batch_norm2d_0.b_0@GRAD" - in op.input("Grad")) + self.assertTrue( + "@FUSEDVAR@_adam_Param_batch_norm2d_0.b_0" + in op.input("Param") + ) + self.assertTrue( + "@FUSEDVAR@_adam_Grad_batch_norm2d_0.b_0@GRAD" + in op.input("Grad") + ) self.assertTrue("coalesce_tensor" in op_type) def test_fuse_adam(self): diff --git a/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_all_reduce_pass.py b/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_all_reduce_pass.py index 06cd2ac6da49eadc604141bfe3b24647113ffa4f..46754a1193c10cd66eb4c75af0e9e39dc13790a1 100644 --- a/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_all_reduce_pass.py +++ b/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_all_reduce_pass.py @@ -19,16 +19,17 @@ from model_zoo import resnet_model class TestFuseAllReducePass(DistPassTestBase): - def init(self): self.atol = 0.0 self.rtol = 0.0 def apply_passes(self, main_prog, startup_prog): - pass_manager = PassManager([ - new_pass("fuse_elewise_add_act"), - new_pass("fuse_all_reduce", {"max_memory_size": 1024 * 1024}) - ]) + pass_manager = PassManager( + [ + new_pass("fuse_elewise_add_act"), + new_pass("fuse_all_reduce", {"max_memory_size": 1024 * 1024}), + ] + ) pass_manager.apply([main_prog], [startup_prog]) print(pass_manager.names) diff --git a/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_bn_act_pass.py b/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_bn_act_pass.py index 12aeae57b09aad8c05b6d052516a41600704e9f2..cf00549e52f731391cbb1ecd578fc34a02b0bad9 100644 --- a/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_bn_act_pass.py +++ b/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_bn_act_pass.py @@ -25,7 +25,6 @@ paddle.enable_static() class BatchNormActNet(nn.Layer): - def __init__(self): super(BatchNormActNet, self).__init__() @@ -42,15 +41,14 @@ class BatchNormActNet(nn.Layer): class TestFuseBatchNormActPass(DistPassTestBase): - def init(self): self.atol = 1e-4 self.rtol = 1e-4 def get_model(self, place, batch_size=32, image_shape=[224, 224, 3]): - image = paddle.static.data(shape=[batch_size] + image_shape, - dtype='float32', - name='image') + image = paddle.static.data( + shape=[batch_size] + image_shape, dtype='float32', name='image' + ) model = BatchNormActNet() pred_out = model(image) diff --git a/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_bn_add_act_pass.py b/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_bn_add_act_pass.py index 72dfd60d4a0bd9766da17d8bd5cde9e584fd6ab8..715dad4dc5bea2e1ecfbc20abdb9e0119c1556f8 100644 --- a/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_bn_add_act_pass.py +++ b/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_bn_add_act_pass.py @@ -25,7 +25,6 @@ paddle.enable_static() class BatchNormAddActNet(nn.Layer): - def __init__(self): super(BatchNormAddActNet, self).__init__() @@ -46,15 +45,14 @@ class BatchNormAddActNet(nn.Layer): class TestFuseBatchNormAddActPass(DistPassTestBase): - def init(self): self.atol = 1e-4 self.rtol = 1e-4 def get_model(self, place, batch_size=32, image_shape=[224, 224, 3]): - image = paddle.static.data(shape=[batch_size] + image_shape, - dtype='float32', - name='image') + image = paddle.static.data( + shape=[batch_size] + image_shape, dtype='float32', name='image' + ) model = BatchNormAddActNet() pred_out = model(image) diff --git a/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_momentum_pass.py b/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_momentum_pass.py index c725a9b91569c2282f6d9849345945d458eb9ebb..4642be5839e894103cc84a80ebc377e066c1d56f 100644 --- a/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_momentum_pass.py +++ b/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_momentum_pass.py @@ -25,7 +25,6 @@ paddle.enable_static() class DemoNet(nn.Layer): - def __init__(self): super(DemoNet, self).__init__() @@ -42,15 +41,14 @@ class DemoNet(nn.Layer): class TestFuseAdamPass(DistPassTestBase): - def init(self): self.atol = 1e-4 self.rtol = 1e-4 def get_model(self, place, batch_size=32, image_shape=[224, 224, 3]): - image = paddle.static.data(shape=[batch_size] + image_shape, - dtype='float32', - name='image') + image = paddle.static.data( + shape=[batch_size] + image_shape, dtype='float32', name='image' + ) model = DemoNet() pred_out = model(image) @@ -86,11 +84,14 @@ class TestFuseAdamPass(DistPassTestBase): for op in main_prog.global_block().ops: op_type.append(op.type) if op.type == "momentum": - self.assertTrue("@FUSEDVAR@_momentum_Param_batch_norm2d_0.b_0" - in op.input("Param")) self.assertTrue( - "@FUSEDVAR@_momentum_Grad_batch_norm2d_0.b_0@GRAD" in - op.input("Grad")) + "@FUSEDVAR@_momentum_Param_batch_norm2d_0.b_0" + in op.input("Param") + ) + self.assertTrue( + "@FUSEDVAR@_momentum_Grad_batch_norm2d_0.b_0@GRAD" + in op.input("Grad") + ) self.assertTrue("coalesce_tensor" in op_type) def test_fuse_adam(self): diff --git a/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_relu_depthwise_conv_pass.py b/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_relu_depthwise_conv_pass.py index 61897b37ea7c58603cf85acceb91dd417d2dd731..c2c1404a967f1831b6edd72e964b82bc65e6edb2 100644 --- a/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_relu_depthwise_conv_pass.py +++ b/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_relu_depthwise_conv_pass.py @@ -25,7 +25,6 @@ paddle.enable_static() class ReluDepthwiseConvNet(nn.Layer): - def __init__(self): super(ReluDepthwiseConvNet, self).__init__() @@ -42,15 +41,14 @@ class ReluDepthwiseConvNet(nn.Layer): class TestFuseReluDepthwiseConvPass(DistPassTestBase): - def init(self): self.atol = 1e-4 self.rtol = 1e-4 def get_model(self, place, batch_size=32, image_shape=[3, 224, 224]): - image = paddle.static.data(shape=[batch_size] + image_shape, - dtype='float32', - name='image') + image = paddle.static.data( + shape=[batch_size] + image_shape, dtype='float32', name='image' + ) model = ReluDepthwiseConvNet() pred_out = model(image) diff --git a/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_sgd_pass.py b/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_sgd_pass.py index ab752c60ed34294cfaa52ea9c95e752f52d9c82a..68ca472f6d4363a586e82f87bcc1a9ce3c4dd9dc 100644 --- a/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_sgd_pass.py +++ b/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_sgd_pass.py @@ -25,7 +25,6 @@ paddle.enable_static() class DemoNet(nn.Layer): - def __init__(self): super(DemoNet, self).__init__() @@ -42,15 +41,14 @@ class DemoNet(nn.Layer): class TestFuseAdamPass(DistPassTestBase): - def init(self): self.atol = 1e-4 self.rtol = 1e-4 def get_model(self, place, batch_size=32, image_shape=[224, 224, 3]): - image = paddle.static.data(shape=[batch_size] + image_shape, - dtype='float32', - name='image') + image = paddle.static.data( + shape=[batch_size] + image_shape, dtype='float32', name='image' + ) model = DemoNet() pred_out = model(image) @@ -86,10 +84,14 @@ class TestFuseAdamPass(DistPassTestBase): for op in main_prog.global_block().ops: op_type.append(op.type) if op.type == "sgd": - self.assertTrue("@FUSEDVAR@_sgd_Param_batch_norm2d_0.b_0" in - op.input("Param")) - self.assertTrue("@FUSEDVAR@_sgd_Grad_batch_norm2d_0.b_0@GRAD" in - op.input("Grad")) + self.assertTrue( + "@FUSEDVAR@_sgd_Param_batch_norm2d_0.b_0" + in op.input("Param") + ) + self.assertTrue( + "@FUSEDVAR@_sgd_Grad_batch_norm2d_0.b_0@GRAD" + in op.input("Grad") + ) self.assertTrue("coalesce_tensor" in op_type) def test_fuse_adam(self): diff --git a/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_inplace_addto_pass.py b/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_inplace_addto_pass.py index 0431c53c11c27c64a79034e379ca4bc00dc4cea6..e6f2adfee538d1fece83aa3f614dc9f6931fcf90 100644 --- a/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_inplace_addto_pass.py +++ b/python/paddle/fluid/tests/unittests/distributed_passes/test_dist_inplace_addto_pass.py @@ -25,7 +25,6 @@ paddle.enable_static() class DemoNet(nn.Layer): - def __init__(self): super(DemoNet, self).__init__() @@ -43,16 +42,15 @@ class DemoNet(nn.Layer): class TestInplaceAddtoPass(DistPassTestBase): - def init(self): self.atol = 0.0 self.rtol = 0.0 paddle.fluid.set_flags({"FLAGS_max_inplace_grad_add": 8}) def get_model(self, place, batch_size=32, image_shape=[224, 224, 3]): - image = paddle.static.data(shape=[batch_size] + image_shape, - dtype='float32', - name='image') + image = paddle.static.data( + shape=[batch_size] + image_shape, dtype='float32', name='image' + ) model = DemoNet() pred_out = model(image) @@ -83,7 +81,8 @@ class TestInplaceAddtoPass(DistPassTestBase): def apply_passes(self, main_prog, startup_prog): pass_manager = PassManager( - [new_pass("inplace_addto_op", {"use_cuda": True})]) + [new_pass("inplace_addto_op", {"use_cuda": True})] + ) pass_manager.apply([main_prog], [startup_prog]) print(pass_manager.names) diff --git a/python/paddle/fluid/tests/unittests/distributed_passes/test_ps_server_pass.py b/python/paddle/fluid/tests/unittests/distributed_passes/test_ps_server_pass.py index e4844b664c2ed1df254a439a336b1c74e01ae1d1..b0725c2bf305e7cc37e600e22198a1230278ea0b 100644 --- a/python/paddle/fluid/tests/unittests/distributed_passes/test_ps_server_pass.py +++ b/python/paddle/fluid/tests/unittests/distributed_passes/test_ps_server_pass.py @@ -17,7 +17,6 @@ from ps_pass_test_base import PsPassTestBase class TestPsServerPass(PsPassTestBase): - def init(self): pass diff --git a/python/paddle/fluid/tests/unittests/distributed_passes/test_ps_trainer_pass.py b/python/paddle/fluid/tests/unittests/distributed_passes/test_ps_trainer_pass.py index 9b7f7820c8f2c4326a53d69d7d98eb62ed9f3352..d71a5b07b3079b5b695eb909f29869c9ef692904 100755 --- a/python/paddle/fluid/tests/unittests/distributed_passes/test_ps_trainer_pass.py +++ b/python/paddle/fluid/tests/unittests/distributed_passes/test_ps_trainer_pass.py @@ -19,7 +19,6 @@ from paddle.distributed.ps.utils.public import logger, ps_log_root_dir class TestPsTrainerPass(PsPassTestBase): - def setUp(self): pass @@ -142,7 +141,7 @@ class TestPsTrainerPass(PsPassTestBase): self.config['debug_new_minimize'] = '0' self.config['log_dir'] = ps_log_root_dir + "gpubox_log_old_minimize" remove_path_if_exists(self.config['log_dir']) - #self.ps_launch("gpu-ps") + # self.ps_launch("gpu-ps") self.config['debug_new_minimize'] = '1' self.config['log_dir'] = ps_log_root_dir + "gpubox_log_new_minimize" @@ -163,19 +162,25 @@ class TestPsTrainerPass(PsPassTestBase): self.config['applied_pass_name'] = "append_send_ops_pass" self.config['debug_new_pass'] = '0' - self.config['log_dir'] = ps_log_root_dir + "log_old_" + self.config[ - 'applied_pass_name'] + self.config['log_dir'] = ( + ps_log_root_dir + "log_old_" + self.config['applied_pass_name'] + ) remove_path_if_exists(self.config['log_dir']) self.ps_launch("cpu-ps") self.config['debug_new_pass'] = '1' - self.config['log_dir'] = ps_log_root_dir + "log_new_" + self.config[ - 'applied_pass_name'] + self.config['log_dir'] = ( + ps_log_root_dir + "log_new_" + self.config['applied_pass_name'] + ) remove_path_if_exists(self.config['log_dir']) self.ps_launch("cpu-ps") - file1 = './ps_log/async_append_send_ops_pass_debug:_0_worker_main.prototxt' - file2 = './ps_log/async_append_send_ops_pass_debug:_1_worker_main.prototxt' + file1 = ( + './ps_log/async_append_send_ops_pass_debug:_0_worker_main.prototxt' + ) + file2 = ( + './ps_log/async_append_send_ops_pass_debug:_1_worker_main.prototxt' + ) if self.check(file1, file2): logger.info('test_append_send_ops_pass passed!') else: diff --git a/python/paddle/fluid/tests/unittests/distributed_passes/test_white_lists.py b/python/paddle/fluid/tests/unittests/distributed_passes/test_white_lists.py index 645fa38099df15450fd4056e3b6bfbd5fa054d67..307b41d027ca4eefc15fdc836a61f95828f7c15b 100644 --- a/python/paddle/fluid/tests/unittests/distributed_passes/test_white_lists.py +++ b/python/paddle/fluid/tests/unittests/distributed_passes/test_white_lists.py @@ -13,12 +13,17 @@ # limitations under the License. import unittest -from paddle.distributed.passes.pass_base import register_pass, PassBase, new_pass -from paddle.distributed.passes.pass_base import _make_rule_from_white_lists_dict as make_white_lists_rule +from paddle.distributed.passes.pass_base import ( + register_pass, + PassBase, + new_pass, +) +from paddle.distributed.passes.pass_base import ( + _make_rule_from_white_lists_dict as make_white_lists_rule, +) class TestConcretePass(PassBase): - def __init__(self): super(TestConcretePass, self).__init__() @@ -34,41 +39,35 @@ class TestConcretePass(PassBase): @register_pass("A") class A(TestConcretePass): - def __init__(self): super(A, self).__init__() @register_pass("B") class B(TestConcretePass): - def __init__(self): super(B, self).__init__() @register_pass("C") class C(TestConcretePass): - def __init__(self): super(C, self).__init__() @register_pass("D") class D(TestConcretePass): - def __init__(self): super(D, self).__init__() @register_pass("E") class E(TestConcretePass): - def __init__(self): super(E, self).__init__() class TestMakeWhiteListsRule(unittest.TestCase): - def test_main(self): before_white_lists = {"A": ["B", "C"]} after_white_lists = {"D": ["C"]} diff --git a/python/paddle/fluid/tests/unittests/distribution/config.py b/python/paddle/fluid/tests/unittests/distribution/config.py index aee76250e5d142c02a08cd494b65435c06be9747..29a27890bad2c53db65c6fc6640effa2096f9894 100644 --- a/python/paddle/fluid/tests/unittests/distribution/config.py +++ b/python/paddle/fluid/tests/unittests/distribution/config.py @@ -26,6 +26,6 @@ RTOL = { 'float32': 1e-03, 'complex64': 1e-3, 'float64': 1e-5, - 'complex128': 1e-5 + 'complex128': 1e-5, } ATOL = {'float32': 0.0, 'complex64': 0, 'float64': 0.0, 'complex128': 0} diff --git a/python/paddle/fluid/tests/unittests/distribution/mock_data.py b/python/paddle/fluid/tests/unittests/distribution/mock_data.py index 60299505f218a6f7e9a63092e35ca27767f09bcf..db08fe1c886882976ce51c3fa60b660ae47bd41d 100644 --- a/python/paddle/fluid/tests/unittests/distribution/mock_data.py +++ b/python/paddle/fluid/tests/unittests/distribution/mock_data.py @@ -17,8 +17,9 @@ import paddle class Exponential(paddle.distribution.ExponentialFamily): """mock exponential distribution, which support computing entropy and - kl use bregman divergence + kl use bregman divergence """ + _mean_carrier_measure = 0 def __init__(self, rate): @@ -34,15 +35,14 @@ class Exponential(paddle.distribution.ExponentialFamily): @property def _natural_parameters(self): - return (-self._rate, ) + return (-self._rate,) def _log_normalizer(self, x): return -paddle.log(-x) class DummyExpFamily(paddle.distribution.ExponentialFamily): - """dummy class extend from exponential family - """ + """dummy class extend from exponential family""" def __init__(self, *args): pass @@ -52,7 +52,7 @@ class DummyExpFamily(paddle.distribution.ExponentialFamily): @property def _natural_parameters(self): - return (1.0, ) + return (1.0,) def _log_normalizer(self, x): return -paddle.log(-x) diff --git a/python/paddle/fluid/tests/unittests/distribution/parameterize.py b/python/paddle/fluid/tests/unittests/distribution/parameterize.py index 72c9ac03325a56ef454320f626a0bf1f3b85e4ec..9c3341e34ce72038c4c076425c2c11b080046a5b 100644 --- a/python/paddle/fluid/tests/unittests/distribution/parameterize.py +++ b/python/paddle/fluid/tests/unittests/distribution/parameterize.py @@ -25,16 +25,14 @@ TEST_CASE_NAME = 'suffix' def xrand(shape=(10, 10, 10), dtype=config.DEFAULT_DTYPE, min=1.0, max=10.0): - return ((np.random.rand(*shape).astype(dtype)) * (max - min) + min) + return (np.random.rand(*shape).astype(dtype)) * (max - min) + min def place(devices, key='place'): - def decorate(cls): module = sys.modules[cls.__module__].__dict__ raw_classes = { - k: v - for k, v in module.items() if k.startswith(cls.__name__) + k: v for k, v in module.items() if k.startswith(cls.__name__) } for raw_name, raw_cls in raw_classes.items(): @@ -42,7 +40,7 @@ def place(devices, key='place'): test_cls = dict(raw_cls.__dict__) test_cls.update({key: d}) new_name = raw_name + '.' + d.__class__.__name__ - module[new_name] = type(new_name, (raw_cls, ), test_cls) + module[new_name] = type(new_name, (raw_cls,), test_cls) del module[raw_name] return cls @@ -61,7 +59,7 @@ def parameterize_cls(fields, values=None): name = cls.__name__ + str(k) name = name + '.' + v.get('suffix') if v.get('suffix') else name - test_cls_module[name] = type(name, (cls, ), test_cls) + test_cls_module[name] = type(name, (cls,), test_cls) for m in list(cls.__dict__): if m.startswith("test"): @@ -71,10 +69,9 @@ def parameterize_cls(fields, values=None): return decorate -def parameterize_func(input, - name_func=None, - doc_func=None, - skip_on_empty=False): +def parameterize_func( + input, name_func=None, doc_func=None, skip_on_empty=False +): doc_func = doc_func or default_doc_func name_func = name_func or default_name_func @@ -88,13 +85,15 @@ def parameterize_func(input, raise ValueError( "Parameters iterable is empty (hint: use " "`parameterized.expand([], skip_on_empty=True)` to skip " - "this test when the input is empty)") + "this test when the input is empty)" + ) return wraps(f)(skip_on_empty_helper) digits = len(str(len(parameters) - 1)) for num, p in enumerate(parameters): - name = name_func(f, "{num:0>{digits}}".format(digits=digits, - num=num), p) + name = name_func( + f, "{num:0>{digits}}".format(digits=digits, num=num), p + ) # If the original function has patches applied by 'mock.patch', # re-construct all patches on the just former decoration layer # of param_as_standalone_func so as not to share @@ -113,9 +112,7 @@ def parameterize_func(input, def reapply_patches_if_need(func): - def dummy_wrapper(orgfunc): - @wraps(orgfunc) def dummy_func(*args, **kwargs): return orgfunc(*args, **kwargs) @@ -138,7 +135,7 @@ def delete_patches_if_need(func): def default_name_func(func, num, p): base_name = func.__name__ - name_suffix = "_%s" % (num, ) + name_suffix = "_%s" % (num,) if len(p.args) > 0 and isinstance(p.args[0], str): name_suffix += "_" + to_safe_name(p.args[0]) @@ -167,7 +164,6 @@ def default_doc_func(func, num, p): def param_as_standalone_func(p, func, name): - @functools.wraps(func) def standalone_func(*a): return func(*(a + p.args), **p.kwargs) @@ -209,36 +205,35 @@ _param = collections.namedtuple("param", "args kwargs") class param(_param): - def __new__(cls, *args, **kwargs): return _param.__new__(cls, args, kwargs) @classmethod def explicit(cls, args=None, kwargs=None): - """ Creates a ``param`` by explicitly specifying ``args`` and - ``kwargs``:: - >>> param.explicit([1,2,3]) - param(*(1, 2, 3)) - >>> param.explicit(kwargs={"foo": 42}) - param(*(), **{"foo": "42"}) - """ + """Creates a ``param`` by explicitly specifying ``args`` and + ``kwargs``:: + >>> param.explicit([1,2,3]) + param(*(1, 2, 3)) + >>> param.explicit(kwargs={"foo": 42}) + param(*(), **{"foo": "42"}) + """ args = args or () kwargs = kwargs or {} return cls(*args, **kwargs) @classmethod def from_decorator(cls, args): - """ Returns an instance of ``param()`` for ``@parameterized`` argument - ``args``:: - >>> param.from_decorator((42, )) - param(args=(42, ), kwargs={}) - >>> param.from_decorator("foo") - param(args=("foo", ), kwargs={}) - """ + """Returns an instance of ``param()`` for ``@parameterized`` argument + ``args``:: + >>> param.from_decorator((42, )) + param(args=(42, ), kwargs={}) + >>> param.from_decorator("foo") + param(args=("foo", ), kwargs={}) + """ if isinstance(args, param): return args elif isinstance(args, str): - args = (args, ) + args = (args,) try: return cls(*args) except TypeError as e: @@ -246,7 +241,8 @@ class param(_param): raise raise TypeError( "Parameters must be tuples, but %r is not (hint: use '(%r, )')" - % (args, args), ) + % (args, args), + ) def __repr__(self): return "param(*%r, **%r)" % self diff --git a/python/paddle/fluid/tests/unittests/distribution/test_dirichlet_op.py b/python/paddle/fluid/tests/unittests/distribution/test_dirichlet_op.py index 90209d9529b2360db0fc82b7995a8c23c6934cd1..9b1183a96ebd883688a7eba5de41b6b8bd6b96c5 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_dirichlet_op.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_dirichlet_op.py @@ -30,7 +30,7 @@ class TestDirichletOp(OpTest): def setUp(self): self.op_type = "dirichlet" - self.alpha = np.array((1., 2.)) + self.alpha = np.array((1.0, 2.0)) self.sample_shape = (100000, 2) self.inputs = {'Alpha': np.broadcast_to(self.alpha, self.sample_shape)} @@ -47,5 +47,7 @@ class TestDirichletOp(OpTest): scipy.stats.kstest( outs[0][:, 0], # scipy dirichlet have not cdf, use beta to replace it. - scipy.stats.beta(a=self.alpha[0], b=self.alpha[1]).cdf)[0], - 0.01) + scipy.stats.beta(a=self.alpha[0], b=self.alpha[1]).cdf, + )[0], + 0.01, + ) diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution.py index 30024c10fee38fdf06497d0fe59cd418be878f51..8d592683aba61e0b37f500f727bbb74f2266d64a 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution.py @@ -25,8 +25,7 @@ import parameterize paddle.enable_static() -class DistributionNumpy(): - +class DistributionNumpy: def sample(self): raise NotImplementedError @@ -44,9 +43,8 @@ class DistributionNumpy(): class DistributionTestName(unittest.TestCase): - def get_prefix(self, string): - return (string.split('.')[0]) + return string.split('.')[0] def test_normal_name(self): name = 'test_normal' @@ -136,15 +134,18 @@ class DistributionTestName(unittest.TestCase): @parameterize.place(config.DEVICES) @parameterize.parameterize_cls( (parameterize.TEST_CASE_NAME, 'batch_shape', 'event_shape'), - [('test-tuple', (10, 20), (10, 20)), - ('test-list', [100, 100], [100, 200, 300]), - ('test-null-eventshape', (100, 100), ())]) + [ + ('test-tuple', (10, 20), (10, 20)), + ('test-list', [100, 100], [100, 200, 300]), + ('test-null-eventshape', (100, 100), ()), + ], +) class TestDistributionShape(unittest.TestCase): - def setUp(self): paddle.disable_static() self.dist = paddle.distribution.Distribution( - batch_shape=self.batch_shape, event_shape=self.event_shape) + batch_shape=self.batch_shape, event_shape=self.event_shape + ) def tearDown(self): paddle.enable_static() @@ -162,15 +163,15 @@ class TestDistributionShape(unittest.TestCase): self.dist.prob(paddle.to_tensor(parameterize.xrand())) def test_extend_shape(self): - shapes = [(34, 20), (56, ), ()] + shapes = [(34, 20), (56,), ()] for shape in shapes: self.assertTrue( self.dist._extend_shape(shape), - shape + self.dist.batch_shape + self.dist.event_shape) + shape + self.dist.batch_shape + self.dist.event_shape, + ) class TestDistributionException(unittest.TestCase): - def setUp(self): self._d = paddle.distribution.Distribution() diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution_beta.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution_beta.py index 18315440a21dc7598d81c92554d7bd270119d602..d6febc7ac4d46c866dbc48260cbac9701997c4d6 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution_beta.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution_beta.py @@ -25,11 +25,15 @@ np.random.seed(2022) @place(DEVICES) -@parameterize_cls((TEST_CASE_NAME, 'alpha', 'beta'), - [('test-scale', 1.0, 2.0), ('test-tensor', xrand(), xrand()), - ('test-broadcast', xrand((2, 1)), xrand((2, 5)))]) +@parameterize_cls( + (TEST_CASE_NAME, 'alpha', 'beta'), + [ + ('test-scale', 1.0, 2.0), + ('test-tensor', xrand(), xrand()), + ('test-broadcast', xrand((2, 1)), xrand((2, 5))), + ], +) class TestBeta(unittest.TestCase): - def setUp(self): # scale no need convert to tensor for scale input unittest alpha, beta = self.alpha, self.beta @@ -46,7 +50,8 @@ class TestBeta(unittest.TestCase): self._paddle_beta.mean, scipy.stats.beta.mean(self.alpha, self.beta), rtol=RTOL.get(str(self._paddle_beta.alpha.numpy().dtype)), - atol=ATOL.get(str(self._paddle_beta.alpha.numpy().dtype))) + atol=ATOL.get(str(self._paddle_beta.alpha.numpy().dtype)), + ) def test_variance(self): with paddle.fluid.dygraph.guard(self.place): @@ -54,7 +59,8 @@ class TestBeta(unittest.TestCase): self._paddle_beta.variance, scipy.stats.beta.var(self.alpha, self.beta), rtol=RTOL.get(str(self._paddle_beta.alpha.numpy().dtype)), - atol=ATOL.get(str(self._paddle_beta.alpha.numpy().dtype))) + atol=ATOL.get(str(self._paddle_beta.alpha.numpy().dtype)), + ) def test_prob(self): value = [np.random.rand(*self._paddle_beta.alpha.shape)] @@ -65,7 +71,8 @@ class TestBeta(unittest.TestCase): self._paddle_beta.prob(paddle.to_tensor(v)), scipy.stats.beta.pdf(v, self.alpha, self.beta), rtol=RTOL.get(str(self._paddle_beta.alpha.numpy().dtype)), - atol=ATOL.get(str(self._paddle_beta.alpha.numpy().dtype))) + atol=ATOL.get(str(self._paddle_beta.alpha.numpy().dtype)), + ) def test_log_prob(self): value = [np.random.rand(*self._paddle_beta.alpha.shape)] @@ -76,7 +83,8 @@ class TestBeta(unittest.TestCase): self._paddle_beta.log_prob(paddle.to_tensor(v)), scipy.stats.beta.logpdf(v, self.alpha, self.beta), rtol=RTOL.get(str(self._paddle_beta.alpha.numpy().dtype)), - atol=ATOL.get(str(self._paddle_beta.alpha.numpy().dtype))) + atol=ATOL.get(str(self._paddle_beta.alpha.numpy().dtype)), + ) def test_entropy(self): with paddle.fluid.dygraph.guard(self.place): @@ -84,23 +92,26 @@ class TestBeta(unittest.TestCase): self._paddle_beta.entropy(), scipy.stats.beta.entropy(self.alpha, self.beta), rtol=RTOL.get(str(self._paddle_beta.alpha.numpy().dtype)), - atol=ATOL.get(str(self._paddle_beta.alpha.numpy().dtype))) + atol=ATOL.get(str(self._paddle_beta.alpha.numpy().dtype)), + ) def test_sample_shape(self): cases = [ { 'input': [], - 'expect': [] + paddle.squeeze(self._paddle_beta.alpha).shape + 'expect': [] + paddle.squeeze(self._paddle_beta.alpha).shape, }, { 'input': [2, 3], - 'expect': [2, 3] + paddle.squeeze(self._paddle_beta.alpha).shape + 'expect': [2, 3] + + paddle.squeeze(self._paddle_beta.alpha).shape, }, ] for case in cases: self.assertTrue( - self._paddle_beta.sample(case.get('input')).shape == case.get( - 'expect')) + self._paddle_beta.sample(case.get('input')).shape + == case.get('expect') + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution_beta_static.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution_beta_static.py index 8976293e36c0945f72ac5a33bed07d4148698751..d6976f8b685ba3cc6215182f46ed556f39be1c71 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution_beta_static.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution_beta_static.py @@ -27,100 +27,119 @@ paddle.enable_static() @param.place(config.DEVICES) -@param.parameterize_cls((param.TEST_CASE_NAME, 'alpha', 'beta'), - [('test-tensor', xrand((10, 10)), xrand((10, 10))), - ('test-broadcast', xrand((2, 1)), xrand((2, 5))), - ('test-larger-data', xrand((10, 20)), xrand( - (10, 20)))]) +@param.parameterize_cls( + (param.TEST_CASE_NAME, 'alpha', 'beta'), + [ + ('test-tensor', xrand((10, 10)), xrand((10, 10))), + ('test-broadcast', xrand((2, 1)), xrand((2, 5))), + ('test-larger-data', xrand((10, 20)), xrand((10, 20))), + ], +) class TestBeta(unittest.TestCase): - def setUp(self): self.program = paddle.static.Program() self.executor = paddle.static.Executor(self.place) with paddle.static.program_guard(self.program): # scale no need convert to tensor for scale input unittest - alpha = paddle.static.data('alpha', self.alpha.shape, - self.alpha.dtype) + alpha = paddle.static.data( + 'alpha', self.alpha.shape, self.alpha.dtype + ) beta = paddle.static.data('beta', self.beta.shape, self.beta.dtype) self._paddle_beta = paddle.distribution.Beta(alpha, beta) self.feeds = {'alpha': self.alpha, 'beta': self.beta} def test_mean(self): with paddle.static.program_guard(self.program): - [mean] = self.executor.run(self.program, - feed=self.feeds, - fetch_list=[self._paddle_beta.mean]) - np.testing.assert_allclose(mean, - scipy.stats.beta.mean( - self.alpha, self.beta), - rtol=RTOL.get(str(self.alpha.dtype)), - atol=ATOL.get(str(self.alpha.dtype))) + [mean] = self.executor.run( + self.program, + feed=self.feeds, + fetch_list=[self._paddle_beta.mean], + ) + np.testing.assert_allclose( + mean, + scipy.stats.beta.mean(self.alpha, self.beta), + rtol=RTOL.get(str(self.alpha.dtype)), + atol=ATOL.get(str(self.alpha.dtype)), + ) def test_variance(self): with paddle.static.program_guard(self.program): - [variance - ] = self.executor.run(self.program, - feed=self.feeds, - fetch_list=[self._paddle_beta.variance]) - np.testing.assert_allclose(variance, - scipy.stats.beta.var( - self.alpha, self.beta), - rtol=RTOL.get(str(self.alpha.dtype)), - atol=ATOL.get(str(self.alpha.dtype))) + [variance] = self.executor.run( + self.program, + feed=self.feeds, + fetch_list=[self._paddle_beta.variance], + ) + np.testing.assert_allclose( + variance, + scipy.stats.beta.var(self.alpha, self.beta), + rtol=RTOL.get(str(self.alpha.dtype)), + atol=ATOL.get(str(self.alpha.dtype)), + ) def test_prob(self): with paddle.static.program_guard(self.program): - value = paddle.static.data('value', self._paddle_beta.alpha.shape, - self._paddle_beta.alpha.dtype) + value = paddle.static.data( + 'value', + self._paddle_beta.alpha.shape, + self._paddle_beta.alpha.dtype, + ) prob = self._paddle_beta.prob(value) random_number = np.random.rand(*self._paddle_beta.alpha.shape) feeds = dict(self.feeds, value=random_number) - [prob] = self.executor.run(self.program, - feed=feeds, - fetch_list=[prob]) - np.testing.assert_allclose(prob, - scipy.stats.beta.pdf( - random_number, self.alpha, - self.beta), - rtol=RTOL.get(str(self.alpha.dtype)), - atol=ATOL.get(str(self.alpha.dtype))) + [prob] = self.executor.run( + self.program, feed=feeds, fetch_list=[prob] + ) + np.testing.assert_allclose( + prob, + scipy.stats.beta.pdf(random_number, self.alpha, self.beta), + rtol=RTOL.get(str(self.alpha.dtype)), + atol=ATOL.get(str(self.alpha.dtype)), + ) def test_log_prob(self): with paddle.static.program_guard(self.program): - value = paddle.static.data('value', self._paddle_beta.alpha.shape, - self._paddle_beta.alpha.dtype) + value = paddle.static.data( + 'value', + self._paddle_beta.alpha.shape, + self._paddle_beta.alpha.dtype, + ) prob = self._paddle_beta.log_prob(value) random_number = np.random.rand(*self._paddle_beta.alpha.shape) feeds = dict(self.feeds, value=random_number) - [prob] = self.executor.run(self.program, - feed=feeds, - fetch_list=[prob]) - np.testing.assert_allclose(prob, - scipy.stats.beta.logpdf( - random_number, self.alpha, - self.beta), - rtol=RTOL.get(str(self.alpha.dtype)), - atol=ATOL.get(str(self.alpha.dtype))) + [prob] = self.executor.run( + self.program, feed=feeds, fetch_list=[prob] + ) + np.testing.assert_allclose( + prob, + scipy.stats.beta.logpdf(random_number, self.alpha, self.beta), + rtol=RTOL.get(str(self.alpha.dtype)), + atol=ATOL.get(str(self.alpha.dtype)), + ) def test_entropy(self): with paddle.static.program_guard(self.program): - [entropy - ] = self.executor.run(self.program, - feed=self.feeds, - fetch_list=[self._paddle_beta.entropy()]) - np.testing.assert_allclose(entropy, - scipy.stats.beta.entropy( - self.alpha, self.beta), - rtol=RTOL.get(str(self.alpha.dtype)), - atol=ATOL.get(str(self.alpha.dtype))) + [entropy] = self.executor.run( + self.program, + feed=self.feeds, + fetch_list=[self._paddle_beta.entropy()], + ) + np.testing.assert_allclose( + entropy, + scipy.stats.beta.entropy(self.alpha, self.beta), + rtol=RTOL.get(str(self.alpha.dtype)), + atol=ATOL.get(str(self.alpha.dtype)), + ) def test_sample(self): with paddle.static.program_guard(self.program): - [data] = self.executor.run(self.program, - feed=self.feeds, - fetch_list=self._paddle_beta.sample()) - self.assertTrue(data.shape, - np.broadcast_arrays(self.alpha, self.beta)[0].shape) + [data] = self.executor.run( + self.program, + feed=self.feeds, + fetch_list=self._paddle_beta.sample(), + ) + self.assertTrue( + data.shape, np.broadcast_arrays(self.alpha, self.beta)[0].shape + ) diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution_categorical.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution_categorical.py index 4be500b6084dbb744d29bdfe5dc15f263640de66..0ccfb1e0769fd2df4b6c004e04b81bdb16bc42b5 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution_categorical.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution_categorical.py @@ -26,7 +26,6 @@ np.random.seed(2022) class CategoricalNumpy(DistributionNumpy): - def __init__(self, logits): self.logits = np.array(logits).astype('float32') @@ -35,25 +34,26 @@ class CategoricalNumpy(DistributionNumpy): e_logits = np.exp(logits) z = np.sum(e_logits, axis=-1, keepdims=True) prob = e_logits / z - return -1. * np.sum(prob * (logits - np.log(z)), axis=-1) + return -1.0 * np.sum(prob * (logits - np.log(z)), axis=-1) def kl_divergence(self, other): logits = self.logits - np.max(self.logits, axis=-1, keepdims=True) other_logits = other.logits - np.max( - other.logits, axis=-1, keepdims=True) + other.logits, axis=-1, keepdims=True + ) e_logits = np.exp(logits) other_e_logits = np.exp(other_logits) z = np.sum(e_logits, axis=-1, keepdims=True) other_z = np.sum(other_e_logits, axis=-1, keepdims=True) prob = e_logits / z - return np.sum(prob * - (logits - np.log(z) - other_logits + np.log(other_z)), - axis=-1, - keepdims=True) + return np.sum( + prob * (logits - np.log(z) - other_logits + np.log(other_z)), + axis=-1, + keepdims=True, + ) class CategoricalTest(unittest.TestCase): - def setUp(self, use_gpu=False, batch_size=3, dims=5): self.use_gpu = use_gpu if not use_gpu: @@ -79,8 +79,9 @@ class CategoricalTest(unittest.TestCase): # input logtis is 2-D Tensor # value used in probs and log_prob method is 1-D Tensor self.logits_np = np.random.rand(batch_size, dims).astype('float32') - self.other_logits_np = np.random.rand(batch_size, - dims).astype('float32') + self.other_logits_np = np.random.rand(batch_size, dims).astype( + 'float32' + ) self.value_np = np.array([2, 1, 3]).astype('int64') self.logits_shape = [batch_size, dims] @@ -103,15 +104,15 @@ class CategoricalTest(unittest.TestCase): def init_static_data(self, batch_size, dims): with fluid.program_guard(self.test_program): - self.logits_static = fluid.data(name='logits', - shape=self.logits_shape, - dtype='float32') - self.other_logits_static = fluid.data(name='other_logits', - shape=self.logits_shape, - dtype='float32') - self.value_static = fluid.data(name='value', - shape=self.value_shape, - dtype='int64') + self.logits_static = fluid.data( + name='logits', shape=self.logits_shape, dtype='float32' + ) + self.other_logits_static = fluid.data( + name='other_logits', shape=self.logits_shape, dtype='float32' + ) + self.value_static = fluid.data( + name='value', shape=self.value_shape, dtype='int64' + ) def get_numpy_selected_probs(self, probability): np_probs = np.zeros(self.dist_shape + self.value_shape) @@ -124,36 +125,33 @@ class CategoricalTest(unittest.TestCase): sample, entropy, kl, probs, log_prob = fetch_list log_tolerance = 1e-4 - np.testing.assert_equal(sample.shape, - self.sample_shape + self.dist_shape) + np.testing.assert_equal( + sample.shape, self.sample_shape + self.dist_shape + ) np_categorical = CategoricalNumpy(self.logits_np) np_other_categorical = CategoricalNumpy(self.other_logits_np) np_entropy = np_categorical.entropy() np_kl = np_categorical.kl_divergence(np_other_categorical) - np.testing.assert_allclose(entropy, - np_entropy, - rtol=log_tolerance, - atol=log_tolerance) - np.testing.assert_allclose(kl, - np_kl, - rtol=log_tolerance, - atol=log_tolerance) + np.testing.assert_allclose( + entropy, np_entropy, rtol=log_tolerance, atol=log_tolerance + ) + np.testing.assert_allclose( + kl, np_kl, rtol=log_tolerance, atol=log_tolerance + ) sum_dist = np.sum(self.logits_np, axis=-1, keepdims=True) probability = self.logits_np / sum_dist np_probs = self.get_numpy_selected_probs(probability) np_log_prob = np.log(np_probs) - np.testing.assert_allclose(probs, - np_probs, - rtol=tolerance, - atol=tolerance) - np.testing.assert_allclose(log_prob, - np_log_prob, - rtol=tolerance, - atol=tolerance) + np.testing.assert_allclose( + probs, np_probs, rtol=tolerance, atol=tolerance + ) + np.testing.assert_allclose( + log_prob, np_log_prob, rtol=tolerance, atol=tolerance + ) def test_categorical_distribution_dygraph(self, tolerance=1e-6): paddle.disable_static(self.place) @@ -186,25 +184,25 @@ class CategoricalTest(unittest.TestCase): feed_vars = { 'logits': self.logits_np, 'other_logits': self.other_logits_np, - 'value': self.value_np + 'value': self.value_np, } self.executor.run(fluid.default_startup_program()) - fetch_list = self.executor.run(program=self.test_program, - feed=feed_vars, - fetch_list=fetch_list) + fetch_list = self.executor.run( + program=self.test_program, feed=feed_vars, fetch_list=fetch_list + ) self.compare_with_numpy(fetch_list) class CategoricalTest2(CategoricalTest): - def init_numpy_data(self, batch_size, dims): # input logtis is 2-D Tensor with dtype Float64 # value used in probs and log_prob method is 1-D Tensor self.logits_np = np.random.rand(batch_size, dims).astype('float64') - self.other_logits_np = np.random.rand(batch_size, - dims).astype('float64') + self.other_logits_np = np.random.rand(batch_size, dims).astype( + 'float64' + ) self.value_np = np.array([2, 1, 3]).astype('int64') self.logits_shape = [batch_size, dims] @@ -214,19 +212,18 @@ class CategoricalTest2(CategoricalTest): def init_static_data(self, batch_size, dims): with fluid.program_guard(self.test_program): - self.logits_static = fluid.data(name='logits', - shape=self.logits_shape, - dtype='float64') - self.other_logits_static = fluid.data(name='other_logits', - shape=self.logits_shape, - dtype='float64') - self.value_static = fluid.data(name='value', - shape=self.value_shape, - dtype='int64') + self.logits_static = fluid.data( + name='logits', shape=self.logits_shape, dtype='float64' + ) + self.other_logits_static = fluid.data( + name='other_logits', shape=self.logits_shape, dtype='float64' + ) + self.value_static = fluid.data( + name='value', shape=self.value_shape, dtype='int64' + ) class CategoricalTest3(CategoricalTest): - def init_dynamic_data(self, batch_size, dims): # input logtis is 2-D numpy.ndarray with dtype Float32 # value used in probs and log_prob method is 1-D Tensor @@ -238,19 +235,19 @@ class CategoricalTest3(CategoricalTest): with fluid.program_guard(self.test_program): self.logits_static = self.logits_np self.other_logits_static = self.other_logits_np - self.value_static = fluid.data(name='value', - shape=self.value_shape, - dtype='int64') + self.value_static = fluid.data( + name='value', shape=self.value_shape, dtype='int64' + ) class CategoricalTest4(CategoricalTest): - def init_numpy_data(self, batch_size, dims): # input logtis is 2-D numpy.ndarray with dtype Float64 # value used in probs and log_prob method is 1-D Tensor self.logits_np = np.random.rand(batch_size, dims).astype('float64') - self.other_logits_np = np.random.rand(batch_size, - dims).astype('float64') + self.other_logits_np = np.random.rand(batch_size, dims).astype( + 'float64' + ) self.value_np = np.array([2, 1, 3]).astype('int64') self.logits_shape = [batch_size, dims] @@ -267,14 +264,13 @@ class CategoricalTest4(CategoricalTest): with fluid.program_guard(self.test_program): self.logits_static = self.logits_np self.other_logits_static = self.other_logits_np - self.value_static = fluid.data(name='value', - shape=self.value_shape, - dtype='int64') + self.value_static = fluid.data( + name='value', shape=self.value_shape, dtype='int64' + ) # test shape of logits and value used in probs and log_prob method class CategoricalTest5(CategoricalTest): - def init_numpy_data(self, batch_size, dims): # input logtis is 1-D Tensor # value used in probs and log_prob method is 1-D Tensor @@ -295,7 +291,6 @@ class CategoricalTest5(CategoricalTest): class CategoricalTest6(CategoricalTest): - def init_numpy_data(self, batch_size, dims): # input logtis is 2-D Tensor # value used in probs and log_prob method has the same number of batches with input @@ -317,7 +312,6 @@ class CategoricalTest6(CategoricalTest): class CategoricalTest7(CategoricalTest): - def init_numpy_data(self, batch_size, dims): # input logtis is 3-D Tensor # value used in probs and log_prob method has the same number of distribuions with input @@ -340,7 +334,6 @@ class CategoricalTest7(CategoricalTest): class CategoricalTest8(CategoricalTest): - def init_dynamic_data(self, batch_size, dims): # input logtis is 2-D list # value used in probs and log_prob method is 1-D Tensor @@ -352,13 +345,12 @@ class CategoricalTest8(CategoricalTest): with fluid.program_guard(self.test_program): self.logits_static = self.logits_np.tolist() self.other_logits_static = self.other_logits_np.tolist() - self.value_static = fluid.data(name='value', - shape=self.value_shape, - dtype='int64') + self.value_static = fluid.data( + name='value', shape=self.value_shape, dtype='int64' + ) class CategoricalTest9(CategoricalTest): - def init_dynamic_data(self, batch_size, dims): # input logtis is 2-D tuple # value used in probs and log_prob method is 1-D Tensor @@ -370,13 +362,12 @@ class CategoricalTest9(CategoricalTest): with fluid.program_guard(self.test_program): self.logits_static = tuple(self.logits_np.tolist()) self.other_logits_static = tuple(self.other_logits_np.tolist()) - self.value_static = fluid.data(name='value', - shape=self.value_shape, - dtype='int64') + self.value_static = fluid.data( + name='value', shape=self.value_shape, dtype='int64' + ) class DistributionTestError(unittest.TestCase): - def test_distribution_error(self): distribution = Distribution() @@ -384,13 +375,15 @@ class DistributionTestError(unittest.TestCase): self.assertRaises(NotImplementedError, distribution.entropy) normal = Normal(0.0, 1.0) - self.assertRaises(NotImplementedError, distribution.kl_divergence, - normal) + self.assertRaises( + NotImplementedError, distribution.kl_divergence, normal + ) value_npdata = np.array([0.8], dtype="float32") value_tensor = layers.create_tensor(dtype="float32") - self.assertRaises(NotImplementedError, distribution.log_prob, - value_tensor) + self.assertRaises( + NotImplementedError, distribution.log_prob, value_tensor + ) self.assertRaises(NotImplementedError, distribution.probs, value_tensor) def test_normal_error(self): @@ -456,8 +449,9 @@ class DistributionTestError(unittest.TestCase): categorical_other = Uniform(1.0, 2.0) # type of other must be an instance of Categorical - self.assertRaises(TypeError, categorical.kl_divergence, - categorical_other) + self.assertRaises( + TypeError, categorical.kl_divergence, categorical_other + ) def test_shape_not_match_error(): # shape of value must match shape of logits diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution_constraint.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution_constraint.py index cbf0431d9f34ab611b7ff51fc03c63cb89bfd3f4..3ef352d54bf37cc2c1cec7f56b41aad523d08537 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution_constraint.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution_constraint.py @@ -23,10 +23,10 @@ import parameterize as param np.random.seed(2022) -@param.param_cls((param.TEST_CASE_NAME, 'value'), - [('NotImplement', np.random.rand(2, 3))]) +@param.param_cls( + (param.TEST_CASE_NAME, 'value'), [('NotImplement', np.random.rand(2, 3))] +) class TestConstraint(unittest.TestCase): - def setUp(self): self._constraint = constraint.Constraint() @@ -35,10 +35,10 @@ class TestConstraint(unittest.TestCase): self._constraint(self.value) -@param.param_cls((param.TEST_CASE_NAME, 'value', 'expect'), - [('real', 1., True)]) +@param.param_cls( + (param.TEST_CASE_NAME, 'value', 'expect'), [('real', 1.0, True)] +) class TestReal(unittest.TestCase): - def setUp(self): self._constraint = constraint.Real() @@ -46,10 +46,11 @@ class TestReal(unittest.TestCase): self.assertEqual(self._constraint(self.value), self.expect) -@param.param_cls((param.TEST_CASE_NAME, 'lower', 'upper', 'value', 'expect'), - [('in_range', 0, 1, 0.5, True), ('out_range', 0, 1, 2, False)]) +@param.param_cls( + (param.TEST_CASE_NAME, 'lower', 'upper', 'value', 'expect'), + [('in_range', 0, 1, 0.5, True), ('out_range', 0, 1, 2, False)], +) class TestRange(unittest.TestCase): - def setUp(self): self._constraint = constraint.Range(self.lower, self.upper) @@ -57,10 +58,11 @@ class TestRange(unittest.TestCase): self.assertEqual(self._constraint(self.value), self.expect) -@param.param_cls((param.TEST_CASE_NAME, 'value', 'expect'), - [('positive', 1, True), ('negative', -1, False)]) +@param.param_cls( + (param.TEST_CASE_NAME, 'value', 'expect'), + [('positive', 1, True), ('negative', -1, False)], +) class TestPositive(unittest.TestCase): - def setUp(self): self._constraint = constraint.Positive() @@ -68,11 +70,14 @@ class TestPositive(unittest.TestCase): self.assertEqual(self._constraint(self.value), self.expect) -@param.param_cls((param.TEST_CASE_NAME, 'value', 'expect'), - [('simplex', paddle.to_tensor([0.5, 0.5]), True), - ('non_simplex', paddle.to_tensor([-0.5, 0.5]), False)]) +@param.param_cls( + (param.TEST_CASE_NAME, 'value', 'expect'), + [ + ('simplex', paddle.to_tensor([0.5, 0.5]), True), + ('non_simplex', paddle.to_tensor([-0.5, 0.5]), False), + ], +) class TestSimplex(unittest.TestCase): - def setUp(self): self._constraint = constraint.Simplex() diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution_dirichlet.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution_dirichlet.py index cb5534edcd54163686a0c49a153d7f4765a6fd1d..399fe66b0a06788f00b8efb4eb2a05728e5a346b 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution_dirichlet.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution_dirichlet.py @@ -28,14 +28,15 @@ np.random.seed(2022) @param.param_cls( (param.TEST_CASE_NAME, 'concentration'), [ - ('test-one-dim', param.xrand((89, ))), + ('test-one-dim', param.xrand((89,))), # ('test-multi-dim', config.xrand((10, 20, 30))) - ]) + ], +) class TestDirichlet(unittest.TestCase): - def setUp(self): self._paddle_diric = paddle.distribution.Dirichlet( - paddle.to_tensor(self.concentration)) + paddle.to_tensor(self.concentration) + ) def test_mean(self): with paddle.fluid.dygraph.guard(self.place): @@ -43,7 +44,8 @@ class TestDirichlet(unittest.TestCase): self._paddle_diric.mean, scipy.stats.dirichlet.mean(self.concentration), rtol=RTOL.get(str(self.concentration.dtype)), - atol=ATOL.get(str(self.concentration.dtype))) + atol=ATOL.get(str(self.concentration.dtype)), + ) def test_variance(self): with paddle.fluid.dygraph.guard(self.place): @@ -51,7 +53,8 @@ class TestDirichlet(unittest.TestCase): self._paddle_diric.variance, scipy.stats.dirichlet.var(self.concentration), rtol=RTOL.get(str(self.concentration.dtype)), - atol=ATOL.get(str(self.concentration.dtype))) + atol=ATOL.get(str(self.concentration.dtype)), + ) def test_prob(self): value = [np.random.rand(*self.concentration.shape)] @@ -63,7 +66,8 @@ class TestDirichlet(unittest.TestCase): self._paddle_diric.prob(paddle.to_tensor(v)), scipy.stats.dirichlet.pdf(v, self.concentration), rtol=RTOL.get(str(self.concentration.dtype)), - atol=ATOL.get(str(self.concentration.dtype))) + atol=ATOL.get(str(self.concentration.dtype)), + ) def test_log_prob(self): value = [np.random.rand(*self.concentration.shape)] @@ -75,7 +79,8 @@ class TestDirichlet(unittest.TestCase): self._paddle_diric.log_prob(paddle.to_tensor(v)), scipy.stats.dirichlet.logpdf(v, self.concentration), rtol=RTOL.get(str(self.concentration.dtype)), - atol=ATOL.get(str(self.concentration.dtype))) + atol=ATOL.get(str(self.concentration.dtype)), + ) def test_entropy(self): with paddle.fluid.dygraph.guard(self.place): @@ -83,28 +88,35 @@ class TestDirichlet(unittest.TestCase): self._paddle_diric.entropy(), scipy.stats.dirichlet.entropy(self.concentration), rtol=RTOL.get(str(self.concentration.dtype)), - atol=ATOL.get(str(self.concentration.dtype))) + atol=ATOL.get(str(self.concentration.dtype)), + ) def test_natural_parameters(self): self.assertTrue( - isinstance(self._paddle_diric._natural_parameters, tuple)) + isinstance(self._paddle_diric._natural_parameters, tuple) + ) def test_log_normalizer(self): self.assertTrue( np.all( self._paddle_diric._log_normalizer( - paddle.to_tensor(param.xrand((100, 100, - 100)))).numpy() < 0.0)) + paddle.to_tensor(param.xrand((100, 100, 100))) + ).numpy() + < 0.0 + ) + ) @param.place(DEVICES) - @param.param_cls((param.TEST_CASE_NAME, 'concentration'), - [('test-zero-dim', np.array(1.0))]) + @param.param_cls( + (param.TEST_CASE_NAME, 'concentration'), + [('test-zero-dim', np.array(1.0))], + ) class TestDirichletException(unittest.TestCase): - def TestInit(self): with self.assertRaises(ValueError): - paddle.distribution.Dirichlet(paddle.squeeze( - self.concentration)) + paddle.distribution.Dirichlet( + paddle.squeeze(self.concentration) + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution_dirichlet_static.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution_dirichlet_static.py index 8b0d847ad16e7f4f3fcfe91da2b46d5e75567038..edf66a26e02193f6a47d0472d624c25a69d9e68c 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution_dirichlet_static.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution_dirichlet_static.py @@ -26,82 +26,97 @@ paddle.enable_static() @place(DEVICES) -@parameterize_cls((TEST_CASE_NAME, 'concentration'), - [('test-one-dim', np.random.rand(89) + 5.0)]) +@parameterize_cls( + (TEST_CASE_NAME, 'concentration'), + [('test-one-dim', np.random.rand(89) + 5.0)], +) class TestDirichlet(unittest.TestCase): - def setUp(self): self.program = paddle.static.Program() self.executor = paddle.static.Executor() with paddle.static.program_guard(self.program): - conc = paddle.static.data('conc', self.concentration.shape, - self.concentration.dtype) + conc = paddle.static.data( + 'conc', self.concentration.shape, self.concentration.dtype + ) self._paddle_diric = paddle.distribution.Dirichlet(conc) self.feeds = {'conc': self.concentration} def test_mean(self): with paddle.static.program_guard(self.program): - [out] = self.executor.run(self.program, - feed=self.feeds, - fetch_list=[self._paddle_diric.mean]) + [out] = self.executor.run( + self.program, + feed=self.feeds, + fetch_list=[self._paddle_diric.mean], + ) np.testing.assert_allclose( out, scipy.stats.dirichlet.mean(self.concentration), rtol=RTOL.get(str(self.concentration.dtype)), - atol=ATOL.get(str(self.concentration.dtype))) + atol=ATOL.get(str(self.concentration.dtype)), + ) def test_variance(self): with paddle.static.program_guard(self.program): - [out] = self.executor.run(self.program, - feed=self.feeds, - fetch_list=[self._paddle_diric.variance]) + [out] = self.executor.run( + self.program, + feed=self.feeds, + fetch_list=[self._paddle_diric.variance], + ) np.testing.assert_allclose( out, scipy.stats.dirichlet.var(self.concentration), rtol=RTOL.get(str(self.concentration.dtype)), - atol=ATOL.get(str(self.concentration.dtype))) + atol=ATOL.get(str(self.concentration.dtype)), + ) def test_prob(self): with paddle.static.program_guard(self.program): random_number = np.random.rand(*self.concentration.shape) random_number = random_number / random_number.sum() feeds = dict(self.feeds, value=random_number) - value = paddle.static.data('value', random_number.shape, - random_number.dtype) + value = paddle.static.data( + 'value', random_number.shape, random_number.dtype + ) out = self._paddle_diric.prob(value) - [out] = self.executor.run(self.program, - feed=feeds, - fetch_list=[out]) + [out] = self.executor.run( + self.program, feed=feeds, fetch_list=[out] + ) np.testing.assert_allclose( out, scipy.stats.dirichlet.pdf(random_number, self.concentration), rtol=RTOL.get(str(self.concentration.dtype)), - atol=ATOL.get(str(self.concentration.dtype))) + atol=ATOL.get(str(self.concentration.dtype)), + ) def test_log_prob(self): with paddle.static.program_guard(self.program): random_number = np.random.rand(*self.concentration.shape) random_number = random_number / random_number.sum() feeds = dict(self.feeds, value=random_number) - value = paddle.static.data('value', random_number.shape, - random_number.dtype) + value = paddle.static.data( + 'value', random_number.shape, random_number.dtype + ) out = self._paddle_diric.log_prob(value) - [out] = self.executor.run(self.program, - feed=feeds, - fetch_list=[out]) + [out] = self.executor.run( + self.program, feed=feeds, fetch_list=[out] + ) np.testing.assert_allclose( out, scipy.stats.dirichlet.logpdf(random_number, self.concentration), rtol=RTOL.get(str(self.concentration.dtype)), - atol=ATOL.get(str(self.concentration.dtype))) + atol=ATOL.get(str(self.concentration.dtype)), + ) def test_entropy(self): with paddle.static.program_guard(self.program): - [out] = self.executor.run(self.program, - feed=self.feeds, - fetch_list=[self._paddle_diric.entropy()]) + [out] = self.executor.run( + self.program, + feed=self.feeds, + fetch_list=[self._paddle_diric.entropy()], + ) np.testing.assert_allclose( out, scipy.stats.dirichlet.entropy(self.concentration), rtol=RTOL.get(str(self.concentration.dtype)), - atol=ATOL.get(str(self.concentration.dtype))) + atol=ATOL.get(str(self.concentration.dtype)), + ) diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution_expfamily.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution_expfamily.py index ca54c978ca4eeab65559d5638cac85bba42d2163..001696385e0bf285a2323f665c50d4d97cc1a5bc 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution_expfamily.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution_expfamily.py @@ -27,30 +27,46 @@ np.random.seed(2022) @parameterize.place(config.DEVICES) @parameterize.parameterize_cls( (parameterize.TEST_CASE_NAME, 'dist'), - [('test-mock-exp', - mock.Exponential( - rate=paddle.rand([100, 200, 99], dtype=config.DEFAULT_DTYPE)))]) + [ + ( + 'test-mock-exp', + mock.Exponential( + rate=paddle.rand([100, 200, 99], dtype=config.DEFAULT_DTYPE) + ), + ) + ], +) class TestExponentialFamily(unittest.TestCase): - def test_entropy(self): np.testing.assert_allclose( self.dist.entropy(), paddle.distribution.ExponentialFamily.entropy(self.dist), rtol=config.RTOL.get(config.DEFAULT_DTYPE), - atol=config.ATOL.get(config.DEFAULT_DTYPE)) + atol=config.ATOL.get(config.DEFAULT_DTYPE), + ) @parameterize.place(config.DEVICES) @parameterize.parameterize_cls( (config.TEST_CASE_NAME, 'dist'), - [('test-dummy', mock.DummyExpFamily(0.5, 0.5)), - ('test-dirichlet', - paddle.distribution.Dirichlet(paddle.to_tensor(parameterize.xrand()))), - ('test-beta', - paddle.distribution.Beta(paddle.to_tensor(parameterize.xrand()), - paddle.to_tensor(parameterize.xrand())))]) + [ + ('test-dummy', mock.DummyExpFamily(0.5, 0.5)), + ( + 'test-dirichlet', + paddle.distribution.Dirichlet( + paddle.to_tensor(parameterize.xrand()) + ), + ), + ( + 'test-beta', + paddle.distribution.Beta( + paddle.to_tensor(parameterize.xrand()), + paddle.to_tensor(parameterize.xrand()), + ), + ), + ], +) class TestExponentialFamilyException(unittest.TestCase): - def test_entropy_exception(self): with self.assertRaises(NotImplementedError): paddle.distribution.ExponentialFamily.entropy(self.dist) diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution_expfamily_static.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution_expfamily_static.py index 2d1493a4e52f199a27593e594126c3426aaebe78..7c2fe5b5a4df0960c21cd6fd20699214da1fc799 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution_expfamily_static.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution_expfamily_static.py @@ -27,7 +27,6 @@ paddle.enable_static() @parameterize.place(config.DEVICES) class TestExponentialFamily(unittest.TestCase): - def setUp(self): self.program = paddle.static.Program() self.executor = paddle.static.Executor() @@ -45,17 +44,21 @@ class TestExponentialFamily(unittest.TestCase): fetch_list=[ self.mock_dist.entropy(), paddle.distribution.ExponentialFamily.entropy( - self.mock_dist) - ]) + self.mock_dist + ), + ], + ) np.testing.assert_allclose( out1, out2, rtol=config.RTOL.get(config.DEFAULT_DTYPE), - atol=config.ATOL.get(config.DEFAULT_DTYPE)) + atol=config.ATOL.get(config.DEFAULT_DTYPE), + ) def test_entropy_exception(self): with paddle.static.program_guard(self.program): with self.assertRaises(NotImplementedError): paddle.distribution.ExponentialFamily.entropy( - mock.DummyExpFamily(0.5, 0.5)) + mock.DummyExpFamily(0.5, 0.5) + ) diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution_gumbel.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution_gumbel.py index 3f3cfd8e8bb1a01a7455abf267aff7abff0da1b7..3aa7c5e6436fcf4fa104e121210f694325a09cb3 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution_gumbel.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution_gumbel.py @@ -24,47 +24,58 @@ from paddle.distribution.gumbel import Gumbel @parameterize.place(config.DEVICES) -@parameterize.parameterize_cls((parameterize.TEST_CASE_NAME, 'loc', 'scale'), [ - ('one-dim', parameterize.xrand((4, )), parameterize.xrand((4, ))), - ('multi-dim', parameterize.xrand((5, 3)), parameterize.xrand((5, 3))), -]) +@parameterize.parameterize_cls( + (parameterize.TEST_CASE_NAME, 'loc', 'scale'), + [ + ('one-dim', parameterize.xrand((4,)), parameterize.xrand((4,))), + ('multi-dim', parameterize.xrand((5, 3)), parameterize.xrand((5, 3))), + ], +) class TestGumbel(unittest.TestCase): - def setUp(self): - self._dist = Gumbel(loc=paddle.to_tensor(self.loc), - scale=paddle.to_tensor(self.scale)) + self._dist = Gumbel( + loc=paddle.to_tensor(self.loc), scale=paddle.to_tensor(self.scale) + ) def test_mean(self): mean = self._dist.mean self.assertEqual(mean.numpy().dtype, self._np_mean().dtype) - np.testing.assert_allclose(mean, - self._np_mean(), - rtol=config.RTOL.get(str(self.scale.dtype)), - atol=config.ATOL.get(str(self.scale.dtype))) + np.testing.assert_allclose( + mean, + self._np_mean(), + rtol=config.RTOL.get(str(self.scale.dtype)), + atol=config.ATOL.get(str(self.scale.dtype)), + ) def test_variance(self): var = self._dist.variance self.assertEqual(var.numpy().dtype, self._np_variance().dtype) - np.testing.assert_allclose(var, - self._np_variance(), - rtol=config.RTOL.get(str(self.scale.dtype)), - atol=config.ATOL.get(str(self.scale.dtype))) + np.testing.assert_allclose( + var, + self._np_variance(), + rtol=config.RTOL.get(str(self.scale.dtype)), + atol=config.ATOL.get(str(self.scale.dtype)), + ) def test_stddev(self): stddev = self._dist.stddev self.assertEqual(stddev.numpy().dtype, self._np_stddev().dtype) - np.testing.assert_allclose(stddev, - self._np_stddev(), - rtol=config.RTOL.get(str(self.scale.dtype)), - atol=config.ATOL.get(str(self.scale.dtype))) + np.testing.assert_allclose( + stddev, + self._np_stddev(), + rtol=config.RTOL.get(str(self.scale.dtype)), + atol=config.ATOL.get(str(self.scale.dtype)), + ) def test_entropy(self): entropy = self._dist.entropy() self.assertEqual(entropy.numpy().dtype, self._np_entropy().dtype) - np.testing.assert_allclose(entropy, - self._np_entropy(), - rtol=config.RTOL.get(str(self.scale.dtype)), - atol=config.ATOL.get(str(self.scale.dtype))) + np.testing.assert_allclose( + entropy, + self._np_entropy(), + rtol=config.RTOL.get(str(self.scale.dtype)), + atol=config.ATOL.get(str(self.scale.dtype)), + ) def test_sample(self): @@ -73,16 +84,18 @@ class TestGumbel(unittest.TestCase): sample_values = samples.numpy() self.assertEqual(sample_values.dtype, self.scale.dtype) - np.testing.assert_allclose(sample_values.mean(axis=0), - scipy.stats.gumbel_r.mean(self.loc, - scale=self.scale), - rtol=0.1, - atol=config.ATOL.get(str(self.loc.dtype))) - np.testing.assert_allclose(sample_values.var(axis=0), - scipy.stats.gumbel_r.var(self.loc, - scale=self.scale), - rtol=0.1, - atol=config.ATOL.get(str(self.loc.dtype))) + np.testing.assert_allclose( + sample_values.mean(axis=0), + scipy.stats.gumbel_r.mean(self.loc, scale=self.scale), + rtol=0.1, + atol=config.ATOL.get(str(self.loc.dtype)), + ) + np.testing.assert_allclose( + sample_values.var(axis=0), + scipy.stats.gumbel_r.var(self.loc, scale=self.scale), + rtol=0.1, + atol=config.ATOL.get(str(self.loc.dtype)), + ) def test_rsample(self): @@ -91,16 +104,18 @@ class TestGumbel(unittest.TestCase): sample_values = samples.numpy() self.assertEqual(sample_values.dtype, self.scale.dtype) - np.testing.assert_allclose(sample_values.mean(axis=0), - scipy.stats.gumbel_r.mean(self.loc, - scale=self.scale), - rtol=0.1, - atol=config.ATOL.get(str(self.loc.dtype))) - np.testing.assert_allclose(sample_values.var(axis=0), - scipy.stats.gumbel_r.var(self.loc, - scale=self.scale), - rtol=0.1, - atol=config.ATOL.get(str(self.loc.dtype))) + np.testing.assert_allclose( + sample_values.mean(axis=0), + scipy.stats.gumbel_r.mean(self.loc, scale=self.scale), + rtol=0.1, + atol=config.ATOL.get(str(self.loc.dtype)), + ) + np.testing.assert_allclose( + sample_values.var(axis=0), + scipy.stats.gumbel_r.var(self.loc, scale=self.scale), + rtol=0.1, + atol=config.ATOL.get(str(self.loc.dtype)), + ) def _np_mean(self): return self.loc + self.scale * np.euler_gamma @@ -110,7 +125,8 @@ class TestGumbel(unittest.TestCase): def _np_variance(self): return np.divide( - np.multiply(np.power(self.scale, 2), np.power(np.pi, 2)), 6) + np.multiply(np.power(self.scale, 2), np.power(np.pi, 2)), 6 + ) def _np_entropy(self): return np.log(self.scale) + 1 + np.euler_gamma @@ -118,39 +134,52 @@ class TestGumbel(unittest.TestCase): @parameterize.place(config.DEVICES) @parameterize.parameterize_cls( - (parameterize.TEST_CASE_NAME, 'loc', 'scale', 'value'), [ - ('value-float', np.array([0.1, 0.4]), np.array([1., 4. - ]), np.array([3., 7.])), + (parameterize.TEST_CASE_NAME, 'loc', 'scale', 'value'), + [ + ( + 'value-float', + np.array([0.1, 0.4]), + np.array([1.0, 4.0]), + np.array([3.0, 7.0]), + ), ('value-int', np.array([0.1, 0.4]), np.array([1, 4]), np.array([3, 7])), - ('value-multi-dim', np.array([0.1, 0.4]), np.array( - [1, 4]), np.array([[5., 4], [6, 2]])), - ]) + ( + 'value-multi-dim', + np.array([0.1, 0.4]), + np.array([1, 4]), + np.array([[5.0, 4], [6, 2]]), + ), + ], +) class TestGumbelPDF(unittest.TestCase): - def setUp(self): - self._dist = Gumbel(loc=paddle.to_tensor(self.loc), - scale=paddle.to_tensor(self.scale)) + self._dist = Gumbel( + loc=paddle.to_tensor(self.loc), scale=paddle.to_tensor(self.scale) + ) def test_prob(self): np.testing.assert_allclose( self._dist.prob(paddle.to_tensor(self.value)), scipy.stats.gumbel_r.pdf(self.value, self.loc, self.scale), rtol=config.RTOL.get(str(self.loc.dtype)), - atol=config.ATOL.get(str(self.loc.dtype))) + atol=config.ATOL.get(str(self.loc.dtype)), + ) def test_log_prob(self): np.testing.assert_allclose( self._dist.log_prob(paddle.to_tensor(self.value)), scipy.stats.gumbel_r.logpdf(self.value, self.loc, self.scale), rtol=config.RTOL.get(str(self.loc.dtype)), - atol=config.ATOL.get(str(self.loc.dtype))) + atol=config.ATOL.get(str(self.loc.dtype)), + ) def test_cdf(self): - np.testing.assert_allclose(self._dist.cdf(paddle.to_tensor(self.value)), - scipy.stats.gumbel_r.cdf( - self.value, self.loc, self.scale), - rtol=0.02, - atol=config.ATOL.get(str(self.loc.dtype))) + np.testing.assert_allclose( + self._dist.cdf(paddle.to_tensor(self.value)), + scipy.stats.gumbel_r.cdf(self.value, self.loc, self.scale), + rtol=0.02, + atol=config.ATOL.get(str(self.loc.dtype)), + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution_gumbel_static.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution_gumbel_static.py index 1c39da5fccf378eb19ad8f677e469591909c48ad..7e0176dee561ced3def871d4f2ab92eb00a61a7f 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution_gumbel_static.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution_gumbel_static.py @@ -27,20 +27,23 @@ paddle.enable_static() @parameterize.place(config.DEVICES) -@parameterize.parameterize_cls((parameterize.TEST_CASE_NAME, 'loc', 'scale'), [ - ('one-dim', parameterize.xrand((4, )), parameterize.xrand((4, ))), - ('multi-dim', parameterize.xrand((5, 3)), parameterize.xrand((5, 3))), -]) +@parameterize.parameterize_cls( + (parameterize.TEST_CASE_NAME, 'loc', 'scale'), + [ + ('one-dim', parameterize.xrand((4,)), parameterize.xrand((4,))), + ('multi-dim', parameterize.xrand((5, 3)), parameterize.xrand((5, 3))), + ], +) class TestGumbel(unittest.TestCase): - def setUp(self): startup_program = paddle.static.Program() main_program = paddle.static.Program() executor = paddle.static.Executor(self.place) with paddle.static.program_guard(main_program, startup_program): loc = paddle.static.data('loc', self.loc.shape, self.loc.dtype) - scale = paddle.static.data('scale', self.scale.shape, - self.scale.dtype) + scale = paddle.static.data( + 'scale', self.scale.shape, self.scale.dtype + ) self._dist = Gumbel(loc=loc, scale=scale) self.sample_shape = [50000] mean = self._dist.mean @@ -52,50 +55,63 @@ class TestGumbel(unittest.TestCase): self.feeds = {'loc': self.loc, 'scale': self.scale} executor.run(startup_program) - [self.mean, self.var, self.stddev, self.entropy, - self.samples] = executor.run(main_program, - feed=self.feeds, - fetch_list=fetch_list) + [ + self.mean, + self.var, + self.stddev, + self.entropy, + self.samples, + ] = executor.run(main_program, feed=self.feeds, fetch_list=fetch_list) def test_mean(self): self.assertEqual(str(self.mean.dtype).split('.')[-1], self.scale.dtype) - np.testing.assert_allclose(self.mean, - self._np_mean(), - rtol=config.RTOL.get(str(self.scale.dtype)), - atol=config.ATOL.get(str(self.scale.dtype))) + np.testing.assert_allclose( + self.mean, + self._np_mean(), + rtol=config.RTOL.get(str(self.scale.dtype)), + atol=config.ATOL.get(str(self.scale.dtype)), + ) def test_variance(self): self.assertEqual(str(self.var.dtype).split('.')[-1], self.scale.dtype) - np.testing.assert_allclose(self.var, - self._np_variance(), - rtol=config.RTOL.get(str(self.scale.dtype)), - atol=config.ATOL.get(str(self.scale.dtype))) + np.testing.assert_allclose( + self.var, + self._np_variance(), + rtol=config.RTOL.get(str(self.scale.dtype)), + atol=config.ATOL.get(str(self.scale.dtype)), + ) def test_stddev(self): self.assertEqual( - str(self.stddev.dtype).split('.')[-1], self.scale.dtype) - np.testing.assert_allclose(self.stddev, - self._np_stddev(), - rtol=config.RTOL.get(str(self.scale.dtype)), - atol=config.ATOL.get(str(self.scale.dtype))) + str(self.stddev.dtype).split('.')[-1], self.scale.dtype + ) + np.testing.assert_allclose( + self.stddev, + self._np_stddev(), + rtol=config.RTOL.get(str(self.scale.dtype)), + atol=config.ATOL.get(str(self.scale.dtype)), + ) def test_entropy(self): self.assertEqual( - str(self.entropy.dtype).split('.')[-1], self.scale.dtype) + str(self.entropy.dtype).split('.')[-1], self.scale.dtype + ) def test_sample(self): self.assertEqual(self.samples.dtype, self.scale.dtype) - np.testing.assert_allclose(self.samples.mean(axis=0), - scipy.stats.gumbel_r.mean(self.loc, - scale=self.scale), - rtol=0.1, - atol=config.ATOL.get(str(self.scale.dtype))) - np.testing.assert_allclose(self.samples.var(axis=0), - scipy.stats.gumbel_r.var(self.loc, - scale=self.scale), - rtol=0.1, - atol=config.ATOL.get(str(self.scale.dtype))) + np.testing.assert_allclose( + self.samples.mean(axis=0), + scipy.stats.gumbel_r.mean(self.loc, scale=self.scale), + rtol=0.1, + atol=config.ATOL.get(str(self.scale.dtype)), + ) + np.testing.assert_allclose( + self.samples.var(axis=0), + scipy.stats.gumbel_r.var(self.loc, scale=self.scale), + rtol=0.1, + atol=config.ATOL.get(str(self.scale.dtype)), + ) def _np_mean(self): return self.loc + self.scale * np.euler_gamma @@ -105,7 +121,8 @@ class TestGumbel(unittest.TestCase): def _np_variance(self): return np.divide( - np.multiply(np.power(self.scale, 2), np.power(np.pi, 2)), 6) + np.multiply(np.power(self.scale, 2), np.power(np.pi, 2)), 6 + ) def _np_entropy(self): return np.log(self.scale) + 1 + np.euler_gamma @@ -113,15 +130,24 @@ class TestGumbel(unittest.TestCase): @parameterize.place(config.DEVICES) @parameterize.parameterize_cls( - (parameterize.TEST_CASE_NAME, 'loc', 'scale', 'value'), [ - ('value-float', np.array([0.1, 0.4]), np.array([1., 4. - ]), np.array([3., 7.])), + (parameterize.TEST_CASE_NAME, 'loc', 'scale', 'value'), + [ + ( + 'value-float', + np.array([0.1, 0.4]), + np.array([1.0, 4.0]), + np.array([3.0, 7.0]), + ), ('value-int', np.array([0.1, 0.4]), np.array([1, 4]), np.array([3, 7])), - ('value-multi-dim', np.array([0.1, 0.4]), np.array( - [1, 4]), np.array([[5., 4], [6, 2]])), - ]) + ( + 'value-multi-dim', + np.array([0.1, 0.4]), + np.array([1, 4]), + np.array([[5.0, 4], [6, 2]]), + ), + ], +) class TestGumbelPDF(unittest.TestCase): - def setUp(self): startup_program = paddle.static.Program() main_program = paddle.static.Program() @@ -129,10 +155,12 @@ class TestGumbelPDF(unittest.TestCase): with paddle.static.program_guard(main_program, startup_program): loc = paddle.static.data('loc', self.loc.shape, self.loc.dtype) - scale = paddle.static.data('scale', self.scale.shape, - self.scale.dtype) - value = paddle.static.data('value', self.value.shape, - self.value.dtype) + scale = paddle.static.data( + 'scale', self.scale.shape, self.scale.dtype + ) + value = paddle.static.data( + 'value', self.value.shape, self.value.dtype + ) self._dist = Gumbel(loc=loc, scale=scale) prob = self._dist.prob(value) log_prob = self._dist.log_prob(value) @@ -141,31 +169,33 @@ class TestGumbelPDF(unittest.TestCase): self.feeds = {'loc': self.loc, 'scale': self.scale, 'value': self.value} executor.run(startup_program) - [self.prob, self.log_prob, - self.cdf] = executor.run(main_program, - feed=self.feeds, - fetch_list=fetch_list) + [self.prob, self.log_prob, self.cdf] = executor.run( + main_program, feed=self.feeds, fetch_list=fetch_list + ) def test_prob(self): - np.testing.assert_allclose(self.prob, - scipy.stats.gumbel_r.pdf( - self.value, self.loc, self.scale), - rtol=config.RTOL.get(str(self.loc.dtype)), - atol=config.ATOL.get(str(self.loc.dtype))) + np.testing.assert_allclose( + self.prob, + scipy.stats.gumbel_r.pdf(self.value, self.loc, self.scale), + rtol=config.RTOL.get(str(self.loc.dtype)), + atol=config.ATOL.get(str(self.loc.dtype)), + ) def test_log_prob(self): - np.testing.assert_allclose(self.log_prob, - scipy.stats.gumbel_r.logpdf( - self.value, self.loc, self.scale), - rtol=config.RTOL.get(str(self.loc.dtype)), - atol=config.ATOL.get(str(self.loc.dtype))) + np.testing.assert_allclose( + self.log_prob, + scipy.stats.gumbel_r.logpdf(self.value, self.loc, self.scale), + rtol=config.RTOL.get(str(self.loc.dtype)), + atol=config.ATOL.get(str(self.loc.dtype)), + ) def test_cdf(self): - np.testing.assert_allclose(self.cdf, - scipy.stats.gumbel_r.cdf( - self.value, self.loc, self.scale), - rtol=0.3, - atol=config.ATOL.get(str(self.loc.dtype))) + np.testing.assert_allclose( + self.cdf, + scipy.stats.gumbel_r.cdf(self.value, self.loc, self.scale), + rtol=0.3, + atol=config.ATOL.get(str(self.loc.dtype)), + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution_independent.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution_independent.py index eef5b5a6e442cbcb4f25d2c6c042af3d811a2b83..c4c83ecb12b3c6ca051635e5a9f619e17d2f1774 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution_independent.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution_independent.py @@ -25,35 +25,45 @@ np.random.seed(2022) @param.place(config.DEVICES) @param.param_cls( (param.TEST_CASE_NAME, 'base', 'reinterpreted_batch_rank'), - [('base_beta', - paddle.distribution.Beta(paddle.rand([1, 2]), paddle.rand([1, 2])), 1)]) + [ + ( + 'base_beta', + paddle.distribution.Beta(paddle.rand([1, 2]), paddle.rand([1, 2])), + 1, + ) + ], +) class TestIndependent(unittest.TestCase): - def setUp(self): - self._t = paddle.distribution.Independent(self.base, - self.reinterpreted_batch_rank) + self._t = paddle.distribution.Independent( + self.base, self.reinterpreted_batch_rank + ) def test_mean(self): np.testing.assert_allclose( self.base.mean, self._t.mean, rtol=config.RTOL.get(str(self.base.alpha.numpy().dtype)), - atol=config.ATOL.get(str(self.base.alpha.numpy().dtype))) + atol=config.ATOL.get(str(self.base.alpha.numpy().dtype)), + ) def test_variance(self): np.testing.assert_allclose( self.base.variance, self._t.variance, rtol=config.RTOL.get(str(self.base.alpha.numpy().dtype)), - atol=config.ATOL.get(str(self.base.alpha.numpy().dtype))) + atol=config.ATOL.get(str(self.base.alpha.numpy().dtype)), + ) def test_entropy(self): np.testing.assert_allclose( - self._np_sum_rightmost(self.base.entropy().numpy(), - self.reinterpreted_batch_rank), + self._np_sum_rightmost( + self.base.entropy().numpy(), self.reinterpreted_batch_rank + ), self._t.entropy(), rtol=config.RTOL.get(str(self.base.alpha.numpy().dtype)), - atol=config.ATOL.get(str(self.base.alpha.numpy().dtype))) + atol=config.ATOL.get(str(self.base.alpha.numpy().dtype)), + ) def _np_sum_rightmost(self, value, n): return np.sum(value, tuple(range(-n, 0))) if n > 0 else value @@ -63,10 +73,12 @@ class TestIndependent(unittest.TestCase): np.testing.assert_allclose( self._np_sum_rightmost( self.base.log_prob(paddle.to_tensor(value)).numpy(), - self.reinterpreted_batch_rank), + self.reinterpreted_batch_rank, + ), self._t.log_prob(paddle.to_tensor(value)).numpy(), rtol=config.RTOL.get(str(self.base.alpha.numpy().dtype)), - atol=config.ATOL.get(str(self.base.alpha.numpy().dtype))) + atol=config.ATOL.get(str(self.base.alpha.numpy().dtype)), + ) # TODO(cxxly): Add Kolmogorov-Smirnov test for sample result. def test_sample(self): @@ -79,16 +91,28 @@ class TestIndependent(unittest.TestCase): @param.place(config.DEVICES) @param.param_cls( - (param.TEST_CASE_NAME, 'base', 'reinterpreted_batch_rank', - 'expected_exception'), - [('base_not_transform', '', 1, TypeError), - ('rank_less_than_zero', paddle.distribution.Transform(), -1, ValueError)]) + ( + param.TEST_CASE_NAME, + 'base', + 'reinterpreted_batch_rank', + 'expected_exception', + ), + [ + ('base_not_transform', '', 1, TypeError), + ( + 'rank_less_than_zero', + paddle.distribution.Transform(), + -1, + ValueError, + ), + ], +) class TestIndependentException(unittest.TestCase): - def test_init(self): with self.assertRaises(self.expected_exception): paddle.distribution.IndependentTransform( - self.base, self.reinterpreted_batch_rank) + self.base, self.reinterpreted_batch_rank + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution_independent_static.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution_independent_static.py index 47bf1021e1a3a5ccf6f1285018c426b5910cef76..06881bdcdc244692e64f68cad72712a5f8666849 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution_independent_static.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution_independent_static.py @@ -26,10 +26,17 @@ paddle.enable_static() @param.place(config.DEVICES) @param.param_cls( (param.TEST_CASE_NAME, 'base', 'reinterpreted_batch_rank', 'alpha', 'beta'), - [('base_beta', paddle.distribution.Beta, 1, np.random.rand( - 1, 2), np.random.rand(1, 2))]) + [ + ( + 'base_beta', + paddle.distribution.Beta, + 1, + np.random.rand(1, 2), + np.random.rand(1, 2), + ) + ], +) class TestIndependent(unittest.TestCase): - def setUp(self): value = np.random.rand(1) self.dtype = value.dtype @@ -37,12 +44,14 @@ class TestIndependent(unittest.TestCase): sp = paddle.static.Program() mp = paddle.static.Program() with paddle.static.program_guard(mp, sp): - alpha = paddle.static.data('alpha', self.alpha.shape, - self.alpha.dtype) + alpha = paddle.static.data( + 'alpha', self.alpha.shape, self.alpha.dtype + ) beta = paddle.static.data('beta', self.beta.shape, self.beta.dtype) self.base = self.base(alpha, beta) - t = paddle.distribution.Independent(self.base, - self.reinterpreted_batch_rank) + t = paddle.distribution.Independent( + self.base, self.reinterpreted_batch_rank + ) mean = t.mean variance = t.variance entropy = t.entropy() @@ -55,50 +64,69 @@ class TestIndependent(unittest.TestCase): base_log_prob = self.base.log_prob(static_value) fetch_list = [ - mean, variance, entropy, log_prob, base_mean, base_variance, - base_entropy, base_log_prob + mean, + variance, + entropy, + log_prob, + base_mean, + base_variance, + base_entropy, + base_log_prob, ] exe.run(sp) [ - self.mean, self.variance, self.entropy, self.log_prob, - self.base_mean, self.base_variance, self.base_entropy, - self.base_log_prob - ] = exe.run(mp, - feed={ - 'value': value, - 'alpha': self.alpha, - 'beta': self.beta - }, - fetch_list=fetch_list) + self.mean, + self.variance, + self.entropy, + self.log_prob, + self.base_mean, + self.base_variance, + self.base_entropy, + self.base_log_prob, + ] = exe.run( + mp, + feed={'value': value, 'alpha': self.alpha, 'beta': self.beta}, + fetch_list=fetch_list, + ) def test_mean(self): - np.testing.assert_allclose(self.mean, - self.base_mean, - rtol=config.RTOL.get(str(self.dtype)), - atol=config.ATOL.get(str(self.dtype))) + np.testing.assert_allclose( + self.mean, + self.base_mean, + rtol=config.RTOL.get(str(self.dtype)), + atol=config.ATOL.get(str(self.dtype)), + ) def test_variance(self): - np.testing.assert_allclose(self.variance, - self.base_variance, - rtol=config.RTOL.get(str(self.dtype)), - atol=config.ATOL.get(str(self.dtype))) + np.testing.assert_allclose( + self.variance, + self.base_variance, + rtol=config.RTOL.get(str(self.dtype)), + atol=config.ATOL.get(str(self.dtype)), + ) def test_entropy(self): - np.testing.assert_allclose(self._np_sum_rightmost( - self.base_entropy, self.reinterpreted_batch_rank), - self.entropy, - rtol=config.RTOL.get(str(self.dtype)), - atol=config.ATOL.get(str(self.dtype))) + np.testing.assert_allclose( + self._np_sum_rightmost( + self.base_entropy, self.reinterpreted_batch_rank + ), + self.entropy, + rtol=config.RTOL.get(str(self.dtype)), + atol=config.ATOL.get(str(self.dtype)), + ) def _np_sum_rightmost(self, value, n): return np.sum(value, tuple(range(-n, 0))) if n > 0 else value def test_log_prob(self): - np.testing.assert_allclose(self._np_sum_rightmost( - self.base_log_prob, self.reinterpreted_batch_rank), - self.log_prob, - rtol=config.RTOL.get(str(self.dtype)), - atol=config.ATOL.get(str(self.dtype))) + np.testing.assert_allclose( + self._np_sum_rightmost( + self.base_log_prob, self.reinterpreted_batch_rank + ), + self.log_prob, + rtol=config.RTOL.get(str(self.dtype)), + atol=config.ATOL.get(str(self.dtype)), + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution_laplace.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution_laplace.py index 867fc1846a34d58ec3fd06d27d33364c447d228e..f93e3fac71a36989e7a5f7d9d9eb4711ab3bee76 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution_laplace.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution_laplace.py @@ -23,42 +23,47 @@ import parameterize @parameterize.place(config.DEVICES) @parameterize.parameterize_cls( - (parameterize.TEST_CASE_NAME, 'loc', 'scale'), [ - ('one-dim', parameterize.xrand((2, )),\ - parameterize.xrand((2, ))), - ('multi-dim', parameterize.xrand((5, 5)),\ - parameterize.xrand((5, 5))), - ]) + (parameterize.TEST_CASE_NAME, 'loc', 'scale'), + [ + ('one-dim', parameterize.xrand((2,)), parameterize.xrand((2,))), + ('multi-dim', parameterize.xrand((5, 5)), parameterize.xrand((5, 5))), + ], +) class TestLaplace(unittest.TestCase): - def setUp(self): - self._dist = paddle.distribution.Laplace(loc=paddle.to_tensor(self.loc), - scale=paddle.to_tensor(\ - self.scale)) + self._dist = paddle.distribution.Laplace( + loc=paddle.to_tensor(self.loc), scale=paddle.to_tensor(self.scale) + ) def test_mean(self): mean = self._dist.mean self.assertEqual(mean.numpy().dtype, self.scale.dtype) - np.testing.assert_allclose(mean, - self._np_mean(), - rtol=config.RTOL.get(str(self.scale.dtype)), - atol=config.ATOL.get(str(self.scale.dtype))) + np.testing.assert_allclose( + mean, + self._np_mean(), + rtol=config.RTOL.get(str(self.scale.dtype)), + atol=config.ATOL.get(str(self.scale.dtype)), + ) def test_variance(self): var = self._dist.variance self.assertEqual(var.numpy().dtype, self.scale.dtype) - np.testing.assert_allclose(var, - self._np_variance(), - rtol=config.RTOL.get(str(self.scale.dtype)), - atol=config.ATOL.get(str(self.scale.dtype))) + np.testing.assert_allclose( + var, + self._np_variance(), + rtol=config.RTOL.get(str(self.scale.dtype)), + atol=config.ATOL.get(str(self.scale.dtype)), + ) def test_stddev(self): stddev = self._dist.stddev self.assertEqual(stddev.numpy().dtype, self.scale.dtype) - np.testing.assert_allclose(stddev, - self._np_stddev(), - rtol=config.RTOL.get(str(self.scale.dtype)), - atol=config.ATOL.get(str(self.scale.dtype))) + np.testing.assert_allclose( + stddev, + self._np_stddev(), + rtol=config.RTOL.get(str(self.scale.dtype)), + atol=config.ATOL.get(str(self.scale.dtype)), + ) def test_entropy(self): entropy = self._dist.entropy() @@ -66,27 +71,30 @@ class TestLaplace(unittest.TestCase): def test_sample(self): - sample_shape = (50000, ) + sample_shape = (50000,) samples = self._dist.sample(sample_shape) sample_values = samples.numpy() self.assertEqual(samples.numpy().dtype, self.scale.dtype) - self.assertEqual(tuple(samples.shape), - tuple(self._dist._extend_shape(sample_shape))) + self.assertEqual( + tuple(samples.shape), tuple(self._dist._extend_shape(sample_shape)) + ) self.assertEqual(samples.shape, list(sample_shape + self.loc.shape)) self.assertEqual(sample_values.shape, sample_shape + self.loc.shape) - np.testing.assert_allclose(sample_values.mean(axis=0), - scipy.stats.laplace.mean(self.loc, - scale=self.scale), - rtol=0.2, - atol=0.) - np.testing.assert_allclose(sample_values.var(axis=0), - scipy.stats.laplace.var(self.loc, - scale=self.scale), - rtol=0.1, - atol=0.) + np.testing.assert_allclose( + sample_values.mean(axis=0), + scipy.stats.laplace.mean(self.loc, scale=self.scale), + rtol=0.2, + atol=0.0, + ) + np.testing.assert_allclose( + sample_values.var(axis=0), + scipy.stats.laplace.var(self.loc, scale=self.scale), + rtol=0.1, + atol=0.0, + ) def _np_mean(self): return self.loc @@ -103,18 +111,20 @@ class TestLaplace(unittest.TestCase): @parameterize.place(config.DEVICES) -@parameterize.parameterize_cls((parameterize.TEST_CASE_NAME, 'loc', 'scale'), [ - ('float', 1., 2.), - ('int', 3, 4), -]) +@parameterize.parameterize_cls( + (parameterize.TEST_CASE_NAME, 'loc', 'scale'), + [ + ('float', 1.0, 2.0), + ('int', 3, 4), + ], +) class TestLaplaceKS(unittest.TestCase): - def setUp(self): self._dist = paddle.distribution.Laplace(loc=self.loc, scale=self.scale) def test_sample(self): - sample_shape = (20000, ) + sample_shape = (20000,) samples = self._dist.sample(sample_shape) sample_values = samples.numpy() self.assertTrue(self._kstest(self.loc, self.scale, sample_values)) @@ -122,8 +132,8 @@ class TestLaplaceKS(unittest.TestCase): def _kstest(self, loc, scale, samples): # Uses the Kolmogorov-Smirnov test for goodness of fit. ks, p_value = scipy.stats.kstest( - samples, - scipy.stats.laplace(loc, scale=scale).cdf) + samples, scipy.stats.laplace(loc, scale=scale).cdf + ) return ks < 0.02 @@ -131,78 +141,101 @@ class TestLaplaceKS(unittest.TestCase): @parameterize.parameterize_cls( (parameterize.TEST_CASE_NAME, 'loc', 'scale', 'value'), [ - ('value-float', np.array([0.2, 0.3]),\ - np.array([2., 3.]), np.array([2., 5.])), - ('value-int', np.array([0.2, 0.3]),\ - np.array([2., 3.]), np.array([2, 5])), - ('value-multi-dim', np.array([0.2, 0.3]), np.array([2., 3.]),\ - np.array([[4., 6], [8, 2]])), - ]) + ( + 'value-float', + np.array([0.2, 0.3]), + np.array([2.0, 3.0]), + np.array([2.0, 5.0]), + ), + ( + 'value-int', + np.array([0.2, 0.3]), + np.array([2.0, 3.0]), + np.array([2, 5]), + ), + ( + 'value-multi-dim', + np.array([0.2, 0.3]), + np.array([2.0, 3.0]), + np.array([[4.0, 6], [8, 2]]), + ), + ], +) class TestLaplacePDF(unittest.TestCase): - def setUp(self): - self._dist = paddle.distribution.Laplace(loc=paddle.to_tensor(self.loc), - scale=paddle.to_tensor(\ - self.scale)) + self._dist = paddle.distribution.Laplace( + loc=paddle.to_tensor(self.loc), scale=paddle.to_tensor(self.scale) + ) def test_prob(self): np.testing.assert_allclose( self._dist.prob(paddle.to_tensor(self.value)), scipy.stats.laplace.pdf(self.value, self.loc, self.scale), rtol=config.RTOL.get(str(self.loc.dtype)), - atol=config.ATOL.get(str(self.loc.dtype))) + atol=config.ATOL.get(str(self.loc.dtype)), + ) def test_log_prob(self): np.testing.assert_allclose( self._dist.log_prob(paddle.to_tensor(self.value)), scipy.stats.laplace.logpdf(self.value, self.loc, self.scale), rtol=config.RTOL.get(str(self.loc.dtype)), - atol=config.ATOL.get(str(self.loc.dtype))) + atol=config.ATOL.get(str(self.loc.dtype)), + ) def test_cdf(self): - np.testing.assert_allclose(self._dist.cdf(paddle.to_tensor(self.value)), - scipy.stats.laplace.cdf( - self.value, self.loc, self.scale), - rtol=config.RTOL.get(str(self.loc.dtype)), - atol=config.ATOL.get(str(self.loc.dtype))) + np.testing.assert_allclose( + self._dist.cdf(paddle.to_tensor(self.value)), + scipy.stats.laplace.cdf(self.value, self.loc, self.scale), + rtol=config.RTOL.get(str(self.loc.dtype)), + atol=config.ATOL.get(str(self.loc.dtype)), + ) def test_icdf(self): np.testing.assert_allclose( self._dist.icdf(paddle.to_tensor(self.value)), scipy.stats.laplace.ppf(self.value, self.loc, self.scale), rtol=config.RTOL.get(str(self.loc.dtype)), - atol=config.ATOL.get(str(self.loc.dtype))) + atol=config.ATOL.get(str(self.loc.dtype)), + ) @parameterize.place(config.DEVICES) @parameterize.parameterize_cls( - (parameterize.TEST_CASE_NAME, 'loc1', 'scale1',\ - 'loc2', 'scale2'), [ - ('kl', np.array([0.0]), np.array([1.0]), \ - np.array([1.0]), np.array([0.5])) - ]) + (parameterize.TEST_CASE_NAME, 'loc1', 'scale1', 'loc2', 'scale2'), + [ + ( + 'kl', + np.array([0.0]), + np.array([1.0]), + np.array([1.0]), + np.array([0.5]), + ) + ], +) class TestLaplaceAndLaplaceKL(unittest.TestCase): - def setUp(self): - self._dist_1 = paddle.distribution.Laplace(loc=paddle.to_tensor(self.loc1), - scale=paddle.to_tensor(\ - self.scale1)) - self._dist_2 = paddle.distribution.Laplace(loc=paddle.to_tensor(self.loc2), - scale=paddle.to_tensor(\ - self.scale2)) + self._dist_1 = paddle.distribution.Laplace( + loc=paddle.to_tensor(self.loc1), scale=paddle.to_tensor(self.scale1) + ) + self._dist_2 = paddle.distribution.Laplace( + loc=paddle.to_tensor(self.loc2), scale=paddle.to_tensor(self.scale2) + ) def test_kl_divergence(self): - np.testing.assert_allclose(paddle.distribution.kl_divergence( - self._dist_1, self._dist_2), - self._np_kl(), - atol=0, - rtol=0.50) + np.testing.assert_allclose( + paddle.distribution.kl_divergence(self._dist_1, self._dist_2), + self._np_kl(), + atol=0, + rtol=0.50, + ) def _np_kl(self): - x = np.linspace(scipy.stats.laplace.ppf(0.01),\ - scipy.stats.laplace.ppf(0.99), 1000) - d1 = scipy.stats.laplace.pdf(x, loc=0., scale=1.) - d2 = scipy.stats.laplace.pdf(x, loc=1., scale=0.5) + x = np.linspace( + scipy.stats.laplace.ppf(0.01), scipy.stats.laplace.ppf(0.99), 1000 + ) + d1 = scipy.stats.laplace.pdf(x, loc=0.0, scale=1.0) + d2 = scipy.stats.laplace.pdf(x, loc=1.0, scale=0.5) return scipy.stats.entropy(d1, d2) diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution_laplace_static.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution_laplace_static.py index 62fe225849c43bdf6a1f529a0f2cc8739768a3d7..7c8ecf1609b5041cf25e3482b809ad1236ae77cc 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution_laplace_static.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution_laplace_static.py @@ -26,24 +26,24 @@ paddle.enable_static() @parameterize.place(config.DEVICES) @parameterize.parameterize_cls( - (parameterize.TEST_CASE_NAME, 'loc', 'scale'), [ - ('one-dim', parameterize.xrand((2, )),\ - parameterize.xrand((2, ))), - ('multi-dim', parameterize.xrand((5, 5)),\ - parameterize.xrand((5, 5))), - ]) + (parameterize.TEST_CASE_NAME, 'loc', 'scale'), + [ + ('one-dim', parameterize.xrand((2,)), parameterize.xrand((2,))), + ('multi-dim', parameterize.xrand((5, 5)), parameterize.xrand((5, 5))), + ], +) class TestLaplace(unittest.TestCase): - def setUp(self): startup_program = paddle.static.Program() main_program = paddle.static.Program() executor = paddle.static.Executor(self.place) with paddle.static.program_guard(main_program, startup_program): loc = paddle.static.data('loc', self.loc.shape, self.loc.dtype) - scale = paddle.static.data('scale', self.scale.shape, - self.scale.dtype) + scale = paddle.static.data( + 'scale', self.scale.shape, self.scale.dtype + ) self._dist = paddle.distribution.Laplace(loc=loc, scale=scale) - self.sample_shape = (50000, ) + self.sample_shape = (50000,) mean = self._dist.mean var = self._dist.variance stddev = self._dist.stddev @@ -53,56 +53,71 @@ class TestLaplace(unittest.TestCase): self.feeds = {'loc': self.loc, 'scale': self.scale} executor.run(startup_program) - [self.mean, self.var, self.stddev, self.entropy, - self.samples] = executor.run(main_program, - feed=self.feeds, - fetch_list=fetch_list) + [ + self.mean, + self.var, + self.stddev, + self.entropy, + self.samples, + ] = executor.run(main_program, feed=self.feeds, fetch_list=fetch_list) def test_mean(self): self.assertEqual(str(self.mean.dtype).split('.')[-1], self.scale.dtype) - np.testing.assert_allclose(self.mean, - self._np_mean(), - rtol=config.RTOL.get(str(self.scale.dtype)), - atol=config.ATOL.get(str(self.scale.dtype))) + np.testing.assert_allclose( + self.mean, + self._np_mean(), + rtol=config.RTOL.get(str(self.scale.dtype)), + atol=config.ATOL.get(str(self.scale.dtype)), + ) def test_variance(self): self.assertEqual(str(self.var.dtype).split('.')[-1], self.scale.dtype) - np.testing.assert_allclose(self.var, - self._np_variance(), - rtol=config.RTOL.get(str(self.scale.dtype)), - atol=config.ATOL.get(str(self.scale.dtype))) + np.testing.assert_allclose( + self.var, + self._np_variance(), + rtol=config.RTOL.get(str(self.scale.dtype)), + atol=config.ATOL.get(str(self.scale.dtype)), + ) def test_stddev(self): self.assertEqual( - str(self.stddev.dtype).split('.')[-1], self.scale.dtype) - np.testing.assert_allclose(self.stddev, - self._np_stddev(), - rtol=config.RTOL.get(str(self.scale.dtype)), - atol=config.ATOL.get(str(self.scale.dtype))) + str(self.stddev.dtype).split('.')[-1], self.scale.dtype + ) + np.testing.assert_allclose( + self.stddev, + self._np_stddev(), + rtol=config.RTOL.get(str(self.scale.dtype)), + atol=config.ATOL.get(str(self.scale.dtype)), + ) def test_entropy(self): self.assertEqual( - str(self.entropy.dtype).split('.')[-1], self.scale.dtype) + str(self.entropy.dtype).split('.')[-1], self.scale.dtype + ) def test_sample(self): self.assertEqual(self.samples.dtype, self.scale.dtype) - self.assertEqual(tuple(self.samples.shape), - tuple(self._dist._extend_shape(self.sample_shape))) + self.assertEqual( + tuple(self.samples.shape), + tuple(self._dist._extend_shape(self.sample_shape)), + ) self.assertEqual(self.samples.shape, self.sample_shape + self.loc.shape) self.assertEqual(self.samples.shape, self.sample_shape + self.loc.shape) - np.testing.assert_allclose(self.samples.mean(axis=0), - scipy.stats.laplace.mean(self.loc, - scale=self.scale), - rtol=0.2, - atol=0.) - np.testing.assert_allclose(self.samples.var(axis=0), - scipy.stats.laplace.var(self.loc, - scale=self.scale), - rtol=0.1, - atol=0.) + np.testing.assert_allclose( + self.samples.mean(axis=0), + scipy.stats.laplace.mean(self.loc, scale=self.scale), + rtol=0.2, + atol=0.0, + ) + np.testing.assert_allclose( + self.samples.var(axis=0), + scipy.stats.laplace.var(self.loc, scale=self.scale), + rtol=0.1, + atol=0.0, + ) def _np_mean(self): return self.loc @@ -122,15 +137,27 @@ class TestLaplace(unittest.TestCase): @parameterize.parameterize_cls( (parameterize.TEST_CASE_NAME, 'loc', 'scale', 'value'), [ - ('value-float', np.array([0.2, 0.3]),\ - np.array([2., 3.]), np.array([2., 5.])), - ('value-int', np.array([0.2, 0.3]),\ - np.array([2., 3.]), np.array([2, 5])), - ('value-multi-dim', np.array([0.2, 0.3]), np.array([2., 3.]),\ - np.array([[4., 6], [8, 2]])), - ]) + ( + 'value-float', + np.array([0.2, 0.3]), + np.array([2.0, 3.0]), + np.array([2.0, 5.0]), + ), + ( + 'value-int', + np.array([0.2, 0.3]), + np.array([2.0, 3.0]), + np.array([2, 5]), + ), + ( + 'value-multi-dim', + np.array([0.2, 0.3]), + np.array([2.0, 3.0]), + np.array([[4.0, 6], [8, 2]]), + ), + ], +) class TestLaplacePDF(unittest.TestCase): - def setUp(self): startup_program = paddle.static.Program() main_program = paddle.static.Program() @@ -138,10 +165,12 @@ class TestLaplacePDF(unittest.TestCase): with paddle.static.program_guard(main_program, startup_program): loc = paddle.static.data('loc', self.loc.shape, self.loc.dtype) - scale = paddle.static.data('scale', self.scale.shape, - self.scale.dtype) - value = paddle.static.data('value', self.value.shape, - self.value.dtype) + scale = paddle.static.data( + 'scale', self.scale.shape, self.scale.dtype + ) + value = paddle.static.data( + 'value', self.value.shape, self.value.dtype + ) self._dist = paddle.distribution.Laplace(loc=loc, scale=scale) prob = self._dist.prob(value) log_prob = self._dist.log_prob(value) @@ -151,49 +180,57 @@ class TestLaplacePDF(unittest.TestCase): self.feeds = {'loc': self.loc, 'scale': self.scale, 'value': self.value} executor.run(startup_program) - [self.prob, self.log_prob, self.cdf, - self.icdf] = executor.run(main_program, - feed=self.feeds, - fetch_list=fetch_list) + [self.prob, self.log_prob, self.cdf, self.icdf] = executor.run( + main_program, feed=self.feeds, fetch_list=fetch_list + ) def test_prob(self): - np.testing.assert_allclose(self.prob, - scipy.stats.laplace.pdf( - self.value, self.loc, self.scale), - rtol=config.RTOL.get(str(self.loc.dtype)), - atol=config.ATOL.get(str(self.loc.dtype))) + np.testing.assert_allclose( + self.prob, + scipy.stats.laplace.pdf(self.value, self.loc, self.scale), + rtol=config.RTOL.get(str(self.loc.dtype)), + atol=config.ATOL.get(str(self.loc.dtype)), + ) def test_log_prob(self): - np.testing.assert_allclose(self.log_prob, - scipy.stats.laplace.logpdf( - self.value, self.loc, self.scale), - rtol=config.RTOL.get(str(self.loc.dtype)), - atol=config.ATOL.get(str(self.loc.dtype))) + np.testing.assert_allclose( + self.log_prob, + scipy.stats.laplace.logpdf(self.value, self.loc, self.scale), + rtol=config.RTOL.get(str(self.loc.dtype)), + atol=config.ATOL.get(str(self.loc.dtype)), + ) def test_cdf(self): - np.testing.assert_allclose(self.cdf, - scipy.stats.laplace.cdf( - self.value, self.loc, self.scale), - rtol=config.RTOL.get(str(self.loc.dtype)), - atol=config.ATOL.get(str(self.loc.dtype))) + np.testing.assert_allclose( + self.cdf, + scipy.stats.laplace.cdf(self.value, self.loc, self.scale), + rtol=config.RTOL.get(str(self.loc.dtype)), + atol=config.ATOL.get(str(self.loc.dtype)), + ) def test_icdf(self): - np.testing.assert_allclose(self.icdf, - scipy.stats.laplace.ppf( - self.value, self.loc, self.scale), - rtol=config.RTOL.get(str(self.loc.dtype)), - atol=config.ATOL.get(str(self.loc.dtype))) + np.testing.assert_allclose( + self.icdf, + scipy.stats.laplace.ppf(self.value, self.loc, self.scale), + rtol=config.RTOL.get(str(self.loc.dtype)), + atol=config.ATOL.get(str(self.loc.dtype)), + ) @parameterize.place(config.DEVICES) @parameterize.parameterize_cls( - (parameterize.TEST_CASE_NAME, 'loc1', 'scale1',\ - 'loc2', 'scale2'), [ - ('kl', np.array([0.0]), np.array([1.0]), \ - np.array([1.0]), np.array([0.5])) - ]) + (parameterize.TEST_CASE_NAME, 'loc1', 'scale1', 'loc2', 'scale2'), + [ + ( + 'kl', + np.array([0.0]), + np.array([1.0]), + np.array([1.0]), + np.array([0.5]), + ) + ], +) class TestLaplaceAndLaplaceKL(unittest.TestCase): - def setUp(self): self.mp = paddle.static.Program() self.sp = paddle.static.Program() @@ -201,34 +238,37 @@ class TestLaplaceAndLaplaceKL(unittest.TestCase): with paddle.static.program_guard(self.mp, self.sp): loc1 = paddle.static.data('loc1', self.loc1.shape, self.loc1.dtype) - scale1 = paddle.static.data('scale1', self.scale1.shape, - self.scale1.dtype) + scale1 = paddle.static.data( + 'scale1', self.scale1.shape, self.scale1.dtype + ) loc2 = paddle.static.data('loc2', self.loc2.shape, self.loc2.dtype) - scale2 = paddle.static.data('scale2', self.scale2.shape, - self.scale2.dtype) + scale2 = paddle.static.data( + 'scale2', self.scale2.shape, self.scale2.dtype + ) self._dist_1 = paddle.distribution.Laplace(loc=loc1, scale=scale1) self._dist_2 = paddle.distribution.Laplace(loc=loc2, scale=scale2) self.feeds = { 'loc1': self.loc1, 'scale1': self.scale1, 'loc2': self.loc2, - 'scale2': self.scale2 + 'scale2': self.scale2, } def test_kl_divergence(self): with paddle.static.program_guard(self.mp, self.sp): out = paddle.distribution.kl_divergence(self._dist_1, self._dist_2) self.executor.run(self.sp) - [out] = self.executor.run(self.mp, - feed=self.feeds, - fetch_list=[out]) + [out] = self.executor.run( + self.mp, feed=self.feeds, fetch_list=[out] + ) np.testing.assert_allclose(out, self._np_kl(), atol=0, rtol=0.50) def _np_kl(self): - x = np.linspace(scipy.stats.laplace.ppf(0.01),\ - scipy.stats.laplace.ppf(0.99), 1000) - d1 = scipy.stats.laplace.pdf(x, loc=0., scale=1.) - d2 = scipy.stats.laplace.pdf(x, loc=1., scale=0.5) + x = np.linspace( + scipy.stats.laplace.ppf(0.01), scipy.stats.laplace.ppf(0.99), 1000 + ) + d1 = scipy.stats.laplace.pdf(x, loc=0.0, scale=1.0) + d2 = scipy.stats.laplace.pdf(x, loc=1.0, scale=0.5) return scipy.stats.entropy(d1, d2) diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution_lognormal.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution_lognormal.py index a7c97047505c333c96775259240f0548cdb452ff..be51e1ecf0aa40477d9c5fe7ba40562e8a628c01 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution_lognormal.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution_lognormal.py @@ -28,7 +28,6 @@ from test_distribution import DistributionNumpy class LogNormalNumpy(DistributionNumpy): - def __init__(self, loc, scale): self.loc = np.array(loc) self.scale = np.array(scale) @@ -49,67 +48,86 @@ class LogNormalNumpy(DistributionNumpy): def log_prob(self, value): var = self.scale * self.scale log_scale = np.log(self.scale) - return -( - (np.log(value) - self.loc) * - (np.log(value) - self.loc)) / (2. * var) - log_scale - math.log( - math.sqrt(2. * math.pi)) - np.log(value) + return ( + -((np.log(value) - self.loc) * (np.log(value) - self.loc)) + / (2.0 * var) + - log_scale + - math.log(math.sqrt(2.0 * math.pi)) + - np.log(value) + ) def probs(self, value): var = self.scale * self.scale return np.exp( - -1. * ((np.log(value) - self.loc) * (np.log(value) - self.loc)) / - (2. * var)) / (math.sqrt(2 * math.pi) * self.scale * value) + -1.0 + * ((np.log(value) - self.loc) * (np.log(value) - self.loc)) + / (2.0 * var) + ) / (math.sqrt(2 * math.pi) * self.scale * value) def entropy(self): - return 0.5 + self.loc + 0.5 * np.log( - np.array(2. * math.pi).astype(self.loc.dtype)) + np.log(self.scale) + return ( + 0.5 + + self.loc + + 0.5 * np.log(np.array(2.0 * math.pi).astype(self.loc.dtype)) + + np.log(self.scale) + ) def kl_divergence(self, other): - var_ratio = (self.scale / other.scale) + var_ratio = self.scale / other.scale var_ratio = var_ratio * var_ratio - t1 = ((self.loc - other.loc) / other.scale) - t1 = (t1 * t1) + t1 = (self.loc - other.loc) / other.scale + t1 = t1 * t1 return 0.5 * (var_ratio + t1 - 1 - np.log(var_ratio)) @place(config.DEVICES) -@parameterize_cls((TEST_CASE_NAME, 'loc', 'scale', 'value'), - [('one-dim', xrand((2, )), xrand((2, )), xrand((2, ))), - ('multi-dim', xrand((3, 3)), xrand((3, 3)), xrand((3, 3)))]) +@parameterize_cls( + (TEST_CASE_NAME, 'loc', 'scale', 'value'), + [ + ('one-dim', xrand((2,)), xrand((2,)), xrand((2,))), + ('multi-dim', xrand((3, 3)), xrand((3, 3)), xrand((3, 3))), + ], +) class LogNormalTest(unittest.TestCase): - def setUp(self): paddle.disable_static() - self.paddle_lognormal = LogNormal(loc=paddle.to_tensor(self.loc), - scale=paddle.to_tensor(self.scale)) + self.paddle_lognormal = LogNormal( + loc=paddle.to_tensor(self.loc), scale=paddle.to_tensor(self.scale) + ) self.np_lognormal = LogNormalNumpy(self.loc, self.scale) def test_mean(self): mean = self.paddle_lognormal.mean np_mean = self.np_lognormal.mean self.assertEqual(mean.numpy().dtype, np_mean.dtype) - np.testing.assert_allclose(mean, - np_mean, - rtol=config.RTOL.get(str(self.scale.dtype)), - atol=config.ATOL.get(str(self.scale.dtype))) + np.testing.assert_allclose( + mean, + np_mean, + rtol=config.RTOL.get(str(self.scale.dtype)), + atol=config.ATOL.get(str(self.scale.dtype)), + ) def test_variance(self): var = self.paddle_lognormal.variance np_var = self.np_lognormal.variance self.assertEqual(var.numpy().dtype, np_var.dtype) - np.testing.assert_allclose(var, - np_var, - rtol=config.RTOL.get(str(self.scale.dtype)), - atol=config.ATOL.get(str(self.scale.dtype))) + np.testing.assert_allclose( + var, + np_var, + rtol=config.RTOL.get(str(self.scale.dtype)), + atol=config.ATOL.get(str(self.scale.dtype)), + ) def test_entropy(self): entropy = self.paddle_lognormal.entropy() np_entropy = self.np_lognormal.entropy() self.assertEqual(entropy.numpy().dtype, np_entropy.dtype) - np.testing.assert_allclose(entropy, - np_entropy, - rtol=config.RTOL.get(str(self.scale.dtype)), - atol=config.ATOL.get(str(self.scale.dtype))) + np.testing.assert_allclose( + entropy, + np_entropy, + rtol=config.RTOL.get(str(self.scale.dtype)), + atol=config.ATOL.get(str(self.scale.dtype)), + ) def test_probs(self): with paddle.fluid.dygraph.guard(self.place): @@ -119,94 +137,110 @@ class LogNormalTest(unittest.TestCase): probs, np_probs, rtol=config.RTOL.get(str(self.scale.dtype)), - atol=config.ATOL.get(str(self.scale.dtype))) + atol=config.ATOL.get(str(self.scale.dtype)), + ) def test_log_prob(self): with paddle.fluid.dygraph.guard(self.place): log_prob = self.paddle_lognormal.log_prob( - paddle.to_tensor(self.value)) + paddle.to_tensor(self.value) + ) np_log_prob = self.np_lognormal.log_prob(self.value) np.testing.assert_allclose( log_prob, np_log_prob, rtol=config.RTOL.get(str(self.scale.dtype)), - atol=config.ATOL.get(str(self.scale.dtype))) + atol=config.ATOL.get(str(self.scale.dtype)), + ) @place(config.DEVICES) -@parameterize_cls((TEST_CASE_NAME, 'loc', 'scale'), [('sample', xrand( - (4, )), xrand((4, ), min=0, max=1))]) +@parameterize_cls( + (TEST_CASE_NAME, 'loc', 'scale'), + [('sample', xrand((4,)), xrand((4,), min=0, max=1))], +) class TestLogNormalSample(unittest.TestCase): - def setUp(self): paddle.disable_static() self.paddle_lognormal = LogNormal(loc=self.loc, scale=self.scale) n = 100000 - self.sample_shape = (n, ) - self.rsample_shape = (n, ) + self.sample_shape = (n,) + self.rsample_shape = (n,) self.samples = self.paddle_lognormal.sample(self.sample_shape) self.rsamples = self.paddle_lognormal.rsample(self.rsample_shape) def test_sample(self): samples_mean = self.samples.mean(axis=0) samples_var = self.samples.var(axis=0) - np.testing.assert_allclose(samples_mean, - self.paddle_lognormal.mean, - rtol=0.1, - atol=0) - np.testing.assert_allclose(samples_var, - self.paddle_lognormal.variance, - rtol=0.1, - atol=0) + np.testing.assert_allclose( + samples_mean, self.paddle_lognormal.mean, rtol=0.1, atol=0 + ) + np.testing.assert_allclose( + samples_var, self.paddle_lognormal.variance, rtol=0.1, atol=0 + ) rsamples_mean = self.rsamples.mean(axis=0) rsamples_var = self.rsamples.var(axis=0) - np.testing.assert_allclose(rsamples_mean, - self.paddle_lognormal.mean, - rtol=0.1, - atol=0) - np.testing.assert_allclose(rsamples_var, - self.paddle_lognormal.variance, - rtol=0.1, - atol=0) + np.testing.assert_allclose( + rsamples_mean, self.paddle_lognormal.mean, rtol=0.1, atol=0 + ) + np.testing.assert_allclose( + rsamples_var, self.paddle_lognormal.variance, rtol=0.1, atol=0 + ) batch_shape = (self.loc + self.scale).shape - self.assertEqual(self.samples.shape, - list(self.sample_shape + batch_shape)) - self.assertEqual(self.rsamples.shape, - list(self.rsample_shape + batch_shape)) + self.assertEqual( + self.samples.shape, list(self.sample_shape + batch_shape) + ) + self.assertEqual( + self.rsamples.shape, list(self.rsample_shape + batch_shape) + ) for i in range(len(self.scale)): self.assertTrue( - self._kstest(self.loc[i], self.scale[i], self.samples[:, i])) + self._kstest(self.loc[i], self.scale[i], self.samples[:, i]) + ) self.assertTrue( - self._kstest(self.loc[i], self.scale[i], self.rsamples[:, i])) + self._kstest(self.loc[i], self.scale[i], self.rsamples[:, i]) + ) def _kstest(self, loc, scale, samples): # Uses the Kolmogorov-Smirnov test for goodness of fit. ks, _ = scipy.stats.kstest( - samples, - scipy.stats.lognorm(s=scale, scale=np.exp(loc)).cdf) + samples, scipy.stats.lognorm(s=scale, scale=np.exp(loc)).cdf + ) return ks < 0.02 @place(config.DEVICES) @parameterize_cls( (TEST_CASE_NAME, 'loc1', 'scale1', 'loc2', 'scale2'), - [('one-dim', xrand((2, )), xrand((2, )), xrand((2, )), xrand((2, ))), - ('multi-dim', xrand((2, 2)), xrand((2, 2)), xrand((2, 2)), xrand((2, 2)))]) + [ + ('one-dim', xrand((2,)), xrand((2,)), xrand((2,)), xrand((2,))), + ( + 'multi-dim', + xrand((2, 2)), + xrand((2, 2)), + xrand((2, 2)), + xrand((2, 2)), + ), + ], +) class TestLogNormalKL(unittest.TestCase): - def setUp(self): paddle.disable_static() - self.ln_a = LogNormal(loc=paddle.to_tensor(self.loc1), - scale=paddle.to_tensor(self.scale1)) - self.ln_b = LogNormal(loc=paddle.to_tensor(self.loc2), - scale=paddle.to_tensor(self.scale2)) - self.normal_a = Normal(loc=paddle.to_tensor(self.loc1), - scale=paddle.to_tensor(self.scale1)) - self.normal_b = Normal(loc=paddle.to_tensor(self.loc2), - scale=paddle.to_tensor(self.scale2)) + self.ln_a = LogNormal( + loc=paddle.to_tensor(self.loc1), scale=paddle.to_tensor(self.scale1) + ) + self.ln_b = LogNormal( + loc=paddle.to_tensor(self.loc2), scale=paddle.to_tensor(self.scale2) + ) + self.normal_a = Normal( + loc=paddle.to_tensor(self.loc1), scale=paddle.to_tensor(self.scale1) + ) + self.normal_b = Normal( + loc=paddle.to_tensor(self.loc2), scale=paddle.to_tensor(self.scale2) + ) def test_kl_divergence(self): kl0 = self.ln_a.kl_divergence(self.ln_b) @@ -216,28 +250,34 @@ class TestLogNormalKL(unittest.TestCase): self.assertEqual(tuple(kl0.shape), self.scale1.shape) self.assertEqual(tuple(kl1.shape), self.scale1.shape) - np.testing.assert_allclose(kl0, - kl_formula, - rtol=config.RTOL.get(str(self.scale1.dtype)), - atol=config.ATOL.get(str(self.scale1.dtype))) - np.testing.assert_allclose(kl1, - kl_formula, - rtol=config.RTOL.get(str(self.scale1.dtype)), - atol=config.ATOL.get(str(self.scale1.dtype))) - np.testing.assert_allclose(kl_normal, - kl_formula, - rtol=config.RTOL.get(str(self.scale1.dtype)), - atol=config.ATOL.get(str(self.scale1.dtype))) + np.testing.assert_allclose( + kl0, + kl_formula, + rtol=config.RTOL.get(str(self.scale1.dtype)), + atol=config.ATOL.get(str(self.scale1.dtype)), + ) + np.testing.assert_allclose( + kl1, + kl_formula, + rtol=config.RTOL.get(str(self.scale1.dtype)), + atol=config.ATOL.get(str(self.scale1.dtype)), + ) + np.testing.assert_allclose( + kl_normal, + kl_formula, + rtol=config.RTOL.get(str(self.scale1.dtype)), + atol=config.ATOL.get(str(self.scale1.dtype)), + ) def _kl(self, dist1, dist2): loc1 = np.array(dist1.loc) loc2 = np.array(dist2.loc) scale1 = np.array(dist1.scale) scale2 = np.array(dist2.scale) - var_ratio = (scale1 / scale2) + var_ratio = scale1 / scale2 var_ratio = var_ratio * var_ratio - t1 = ((loc1 - loc2) / scale2) - t1 = (t1 * t1) + t1 = (loc1 - loc2) / scale2 + t1 = t1 * t1 return 0.5 * (var_ratio + t1 - 1 - np.log(var_ratio)) diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution_lognormal_static.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution_lognormal_static.py index 75a9e497f34b7f263510aab39dc1d490c105a4b8..9e5cab2da375128bfe62192ea1c5e3ead39b01cd 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution_lognormal_static.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution_lognormal_static.py @@ -27,11 +27,14 @@ from test_distribution_lognormal import LogNormalNumpy @place(config.DEVICES) -@parameterize_cls((TEST_CASE_NAME, 'loc', 'scale', 'value'), - [('one-dim', xrand((2, )), xrand((2, )), xrand((2, ))), - ('multi-dim', xrand((3, 3)), xrand((3, 3)), xrand((3, 3)))]) +@parameterize_cls( + (TEST_CASE_NAME, 'loc', 'scale', 'value'), + [ + ('one-dim', xrand((2,)), xrand((2,)), xrand((2,))), + ('multi-dim', xrand((3, 3)), xrand((3, 3)), xrand((3, 3))), + ], +) class TestLogNormal(unittest.TestCase): - def setUp(self): paddle.enable_static() startup_program = paddle.static.Program() @@ -39,10 +42,12 @@ class TestLogNormal(unittest.TestCase): executor = paddle.static.Executor(self.place) with paddle.static.program_guard(main_program, startup_program): loc = paddle.static.data('loc', self.loc.shape, self.loc.dtype) - scale = paddle.static.data('scale', self.scale.shape, - self.scale.dtype) - value = paddle.static.data('value', self.value.shape, - self.value.dtype) + scale = paddle.static.data( + 'scale', self.scale.shape, self.scale.dtype + ) + value = paddle.static.data( + 'value', self.value.shape, self.value.dtype + ) self.paddle_lognormal = LogNormal(loc=loc, scale=scale) self.np_lognormal = LogNormalNumpy(loc=self.loc, scale=self.scale) mean = self.paddle_lognormal.mean @@ -54,56 +59,71 @@ class TestLogNormal(unittest.TestCase): self.feeds = {'loc': self.loc, 'scale': self.scale, 'value': self.value} executor.run(startup_program) - [self.mean, self.var, self.entropy, self.probs, - self.log_prob] = executor.run(main_program, - feed=self.feeds, - fetch_list=fetch_list) + [ + self.mean, + self.var, + self.entropy, + self.probs, + self.log_prob, + ] = executor.run(main_program, feed=self.feeds, fetch_list=fetch_list) def test_mean(self): np_mean = self.np_lognormal.mean self.assertEqual(str(self.mean.dtype).split('.')[-1], self.scale.dtype) - np.testing.assert_allclose(self.mean, - np_mean, - rtol=config.RTOL.get(str(self.scale.dtype)), - atol=config.ATOL.get(str(self.scale.dtype))) + np.testing.assert_allclose( + self.mean, + np_mean, + rtol=config.RTOL.get(str(self.scale.dtype)), + atol=config.ATOL.get(str(self.scale.dtype)), + ) def test_var(self): np_var = self.np_lognormal.variance self.assertEqual(str(self.var.dtype).split('.')[-1], self.scale.dtype) - np.testing.assert_allclose(self.var, - np_var, - rtol=config.RTOL.get(str(self.scale.dtype)), - atol=config.ATOL.get(str(self.scale.dtype))) + np.testing.assert_allclose( + self.var, + np_var, + rtol=config.RTOL.get(str(self.scale.dtype)), + atol=config.ATOL.get(str(self.scale.dtype)), + ) def test_entropy(self): np_entropy = self.np_lognormal.entropy() self.assertEqual( - str(self.entropy.dtype).split('.')[-1], self.scale.dtype) - np.testing.assert_allclose(self.entropy, - np_entropy, - rtol=config.RTOL.get(str(self.scale.dtype)), - atol=config.ATOL.get(str(self.scale.dtype))) + str(self.entropy.dtype).split('.')[-1], self.scale.dtype + ) + np.testing.assert_allclose( + self.entropy, + np_entropy, + rtol=config.RTOL.get(str(self.scale.dtype)), + atol=config.ATOL.get(str(self.scale.dtype)), + ) def test_probs(self): np_probs = self.np_lognormal.probs(self.value) - np.testing.assert_allclose(self.probs, - np_probs, - rtol=config.RTOL.get(str(self.scale.dtype)), - atol=config.ATOL.get(str(self.scale.dtype))) + np.testing.assert_allclose( + self.probs, + np_probs, + rtol=config.RTOL.get(str(self.scale.dtype)), + atol=config.ATOL.get(str(self.scale.dtype)), + ) def test_log_prob(self): np_log_prob = self.np_lognormal.log_prob(self.value) - np.testing.assert_allclose(self.log_prob, - np_log_prob, - rtol=config.RTOL.get(str(self.scale.dtype)), - atol=config.ATOL.get(str(self.scale.dtype))) + np.testing.assert_allclose( + self.log_prob, + np_log_prob, + rtol=config.RTOL.get(str(self.scale.dtype)), + atol=config.ATOL.get(str(self.scale.dtype)), + ) @place(config.DEVICES) -@parameterize_cls((TEST_CASE_NAME, 'loc', 'scale'), [('sample', xrand( - (4, )), xrand((4, ), min=0, max=1))]) +@parameterize_cls( + (TEST_CASE_NAME, 'loc', 'scale'), + [('sample', xrand((4,)), xrand((4,), min=0, max=1))], +) class TestLogNormalSample(unittest.TestCase): - def setUp(self): paddle.enable_static() startup_program = paddle.static.Program() @@ -111,11 +131,12 @@ class TestLogNormalSample(unittest.TestCase): executor = paddle.static.Executor(self.place) with paddle.static.program_guard(main_program, startup_program): loc = paddle.static.data('loc', self.loc.shape, self.loc.dtype) - scale = paddle.static.data('scale', self.scale.shape, - self.scale.dtype) + scale = paddle.static.data( + 'scale', self.scale.shape, self.scale.dtype + ) n = 100000 - self.sample_shape = (n, ) - self.rsample_shape = (n, ) + self.sample_shape = (n,) + self.rsample_shape = (n,) self.paddle_lognormal = LogNormal(loc=loc, scale=scale) mean = self.paddle_lognormal.mean variance = self.paddle_lognormal.variance @@ -125,10 +146,9 @@ class TestLogNormalSample(unittest.TestCase): self.feeds = {'loc': self.loc, 'scale': self.scale} executor.run(startup_program) - [self.mean, self.variance, self.samples, - self.rsamples] = executor.run(main_program, - feed=self.feeds, - fetch_list=fetch_list) + [self.mean, self.variance, self.samples, self.rsamples] = executor.run( + main_program, feed=self.feeds, fetch_list=fetch_list + ) def test_sample(self): samples_mean = self.samples.mean(axis=0) @@ -139,10 +159,9 @@ class TestLogNormalSample(unittest.TestCase): rsamples_mean = self.rsamples.mean(axis=0) rsamples_var = self.rsamples.var(axis=0) np.testing.assert_allclose(rsamples_mean, self.mean, rtol=0.1, atol=0) - np.testing.assert_allclose(rsamples_var, - self.variance, - rtol=0.1, - atol=0) + np.testing.assert_allclose( + rsamples_var, self.variance, rtol=0.1, atol=0 + ) batch_shape = (self.loc + self.scale).shape self.assertEqual(self.samples.shape, self.sample_shape + batch_shape) @@ -150,25 +169,35 @@ class TestLogNormalSample(unittest.TestCase): for i in range(len(self.scale)): self.assertTrue( - self._kstest(self.loc[i], self.scale[i], self.samples[:, i])) + self._kstest(self.loc[i], self.scale[i], self.samples[:, i]) + ) self.assertTrue( - self._kstest(self.loc[i], self.scale[i], self.rsamples[:, i])) + self._kstest(self.loc[i], self.scale[i], self.rsamples[:, i]) + ) def _kstest(self, loc, scale, samples): # Uses the Kolmogorov-Smirnov test for goodness of fit. ks, _ = scipy.stats.kstest( - samples, - scipy.stats.lognorm(s=scale, scale=np.exp(loc)).cdf) + samples, scipy.stats.lognorm(s=scale, scale=np.exp(loc)).cdf + ) return ks < 0.02 @place(config.DEVICES) @parameterize_cls( (TEST_CASE_NAME, 'loc1', 'scale1', 'loc2', 'scale2'), - [('one-dim', xrand((2, )), xrand((2, )), xrand((2, )), xrand((2, ))), - ('multi-dim', xrand((2, 2)), xrand((2, 2)), xrand((2, 2)), xrand((2, 2)))]) + [ + ('one-dim', xrand((2,)), xrand((2,)), xrand((2,)), xrand((2,))), + ( + 'multi-dim', + xrand((2, 2)), + xrand((2, 2)), + xrand((2, 2)), + xrand((2, 2)), + ), + ], +) class TestLogNormalKL(unittest.TestCase): - def setUp(self): paddle.enable_static() startup_program = paddle.static.Program() @@ -176,11 +205,13 @@ class TestLogNormalKL(unittest.TestCase): executor = paddle.static.Executor(self.place) with paddle.static.program_guard(main_program, startup_program): loc1 = paddle.static.data('loc1', self.loc1.shape, self.loc1.dtype) - scale1 = paddle.static.data('scale1', self.scale1.shape, - self.scale1.dtype) + scale1 = paddle.static.data( + 'scale1', self.scale1.shape, self.scale1.dtype + ) loc2 = paddle.static.data('loc2', self.loc2.shape, self.loc2.dtype) - scale2 = paddle.static.data('scale2', self.scale2.shape, - self.scale2.dtype) + scale2 = paddle.static.data( + 'scale2', self.scale2.shape, self.scale2.dtype + ) self.ln_a = LogNormal(loc=loc1, scale=scale1) self.ln_b = LogNormal(loc=loc2, scale=scale2) @@ -197,40 +228,45 @@ class TestLogNormalKL(unittest.TestCase): 'loc1': self.loc1, 'scale1': self.scale1, 'loc2': self.loc2, - 'scale2': self.scale2 + 'scale2': self.scale2, } executor.run(startup_program) - [self.kl0, self.kl1, self.kl_normal, - self.kl_formula] = executor.run(main_program, - feed=self.feeds, - fetch_list=fetch_list) + [self.kl0, self.kl1, self.kl_normal, self.kl_formula] = executor.run( + main_program, feed=self.feeds, fetch_list=fetch_list + ) def test_kl_divergence(self): - np.testing.assert_allclose(self.kl0, - self.kl_formula, - rtol=config.RTOL.get(str(self.scale1.dtype)), - atol=config.ATOL.get(str(self.scale1.dtype))) - - np.testing.assert_allclose(self.kl1, - self.kl_formula, - rtol=config.RTOL.get(str(self.scale1.dtype)), - atol=config.ATOL.get(str(self.scale1.dtype))) - - np.testing.assert_allclose(self.kl_normal, - self.kl_formula, - rtol=config.RTOL.get(str(self.scale1.dtype)), - atol=config.ATOL.get(str(self.scale1.dtype))) + np.testing.assert_allclose( + self.kl0, + self.kl_formula, + rtol=config.RTOL.get(str(self.scale1.dtype)), + atol=config.ATOL.get(str(self.scale1.dtype)), + ) + + np.testing.assert_allclose( + self.kl1, + self.kl_formula, + rtol=config.RTOL.get(str(self.scale1.dtype)), + atol=config.ATOL.get(str(self.scale1.dtype)), + ) + + np.testing.assert_allclose( + self.kl_normal, + self.kl_formula, + rtol=config.RTOL.get(str(self.scale1.dtype)), + atol=config.ATOL.get(str(self.scale1.dtype)), + ) def _kl(self, dist1, dist2): loc1 = dist1.loc loc2 = dist2.loc - scale1 = (dist1.scale) - scale2 = (dist2.scale) - var_ratio = (scale1 / scale2) + scale1 = dist1.scale + scale2 = dist2.scale + var_ratio = scale1 / scale2 var_ratio = var_ratio * var_ratio - t1 = ((loc1 - loc2) / scale2) - t1 = (t1 * t1) + t1 = (loc1 - loc2) / scale2 + t1 = t1 * t1 return 0.5 * (var_ratio + t1 - 1 - np.log(var_ratio)) diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution_multinomial.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution_multinomial.py index 0bec1c5a58cd975c048b59a2c4301606efebc3d4..2c18469936cb59208fc7ecb4fdfffeca1ebcbc36 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution_multinomial.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution_multinomial.py @@ -24,41 +24,49 @@ import parameterize @parameterize.place(config.DEVICES) @parameterize.parameterize_cls( - (parameterize.TEST_CASE_NAME, 'total_count', 'probs'), [ - ('one-dim', 10, parameterize.xrand((3, ))), + (parameterize.TEST_CASE_NAME, 'total_count', 'probs'), + [ + ('one-dim', 10, parameterize.xrand((3,))), ('multi-dim', 9, parameterize.xrand((10, 20))), ('prob-sum-one', 10, np.array([0.5, 0.2, 0.3])), - ('prob-sum-non-one', 10, np.array([2., 3., 5.])), - ]) + ('prob-sum-non-one', 10, np.array([2.0, 3.0, 5.0])), + ], +) class TestMultinomial(unittest.TestCase): - def setUp(self): self._dist = paddle.distribution.Multinomial( - total_count=self.total_count, probs=paddle.to_tensor(self.probs)) + total_count=self.total_count, probs=paddle.to_tensor(self.probs) + ) def test_mean(self): mean = self._dist.mean self.assertEqual(mean.numpy().dtype, self.probs.dtype) - np.testing.assert_allclose(mean, - self._np_mean(), - rtol=config.RTOL.get(str(self.probs.dtype)), - atol=config.ATOL.get(str(self.probs.dtype))) + np.testing.assert_allclose( + mean, + self._np_mean(), + rtol=config.RTOL.get(str(self.probs.dtype)), + atol=config.ATOL.get(str(self.probs.dtype)), + ) def test_variance(self): var = self._dist.variance self.assertEqual(var.numpy().dtype, self.probs.dtype) - np.testing.assert_allclose(var, - self._np_variance(), - rtol=config.RTOL.get(str(self.probs.dtype)), - atol=config.ATOL.get(str(self.probs.dtype))) + np.testing.assert_allclose( + var, + self._np_variance(), + rtol=config.RTOL.get(str(self.probs.dtype)), + atol=config.ATOL.get(str(self.probs.dtype)), + ) def test_entropy(self): entropy = self._dist.entropy() self.assertEqual(entropy.numpy().dtype, self.probs.dtype) - np.testing.assert_allclose(entropy, - self._np_entropy(), - rtol=config.RTOL.get(str(self.probs.dtype)), - atol=config.ATOL.get(str(self.probs.dtype))) + np.testing.assert_allclose( + entropy, + self._np_entropy(), + rtol=config.RTOL.get(str(self.probs.dtype)), + atol=config.ATOL.get(str(self.probs.dtype)), + ) def test_sample(self): sample_shape = () @@ -66,26 +74,28 @@ class TestMultinomial(unittest.TestCase): self.assertEqual(samples.numpy().dtype, self.probs.dtype) self.assertEqual( tuple(samples.shape), - sample_shape + self._dist.batch_shape + self._dist.event_shape) + sample_shape + self._dist.batch_shape + self._dist.event_shape, + ) - sample_shape = (6, ) + sample_shape = (6,) samples = self._dist.sample(sample_shape) self.assertEqual(samples.numpy().dtype, self.probs.dtype) self.assertEqual( tuple(samples.shape), - sample_shape + self._dist.batch_shape + self._dist.event_shape) + sample_shape + self._dist.batch_shape + self._dist.event_shape, + ) self.assertTrue( - np.all(samples.sum(-1).numpy() == self._dist.total_count)) + np.all(samples.sum(-1).numpy() == self._dist.total_count) + ) - sample_shape = (5000, ) + sample_shape = (5000,) samples = self._dist.sample(sample_shape) sample_mean = samples.mean(axis=0) # Tolerance value 0.2 is empirical value which is consistent with # TensorFlow - np.testing.assert_allclose(sample_mean, - self._dist.mean, - atol=0, - rtol=0.20) + np.testing.assert_allclose( + sample_mean, self._dist.mean, atol=0, rtol=0.20 + ) def _np_variance(self): probs = self.probs / self.probs.sum(-1, keepdims=True) @@ -104,40 +114,54 @@ class TestMultinomial(unittest.TestCase): @parameterize.parameterize_cls( (parameterize.TEST_CASE_NAME, 'total_count', 'probs', 'value'), [ - ('value-float', 10, np.array([0.2, 0.3, 0.5]), np.array([2., 3., 5.])), + ( + 'value-float', + 10, + np.array([0.2, 0.3, 0.5]), + np.array([2.0, 3.0, 5.0]), + ), ('value-int', 10, np.array([0.2, 0.3, 0.5]), np.array([2, 3, 5])), - ('value-multi-dim', 10, np.array([[0.3, 0.7], [0.5, 0.5] - ]), np.array([[4., 6], [8, 2]])), + ( + 'value-multi-dim', + 10, + np.array([[0.3, 0.7], [0.5, 0.5]]), + np.array([[4.0, 6], [8, 2]]), + ), # ('value-sum-non-n', 10, np.array([0.5, 0.2, 0.3]), np.array([4,5,2])), - ]) + ], +) class TestMultinomialPmf(unittest.TestCase): - def setUp(self): self._dist = paddle.distribution.Multinomial( - total_count=self.total_count, probs=paddle.to_tensor(self.probs)) + total_count=self.total_count, probs=paddle.to_tensor(self.probs) + ) def test_prob(self): np.testing.assert_allclose( self._dist.prob(paddle.to_tensor(self.value)), - scipy.stats.multinomial.pmf(self.value, self.total_count, - self.probs), + scipy.stats.multinomial.pmf( + self.value, self.total_count, self.probs + ), rtol=config.RTOL.get(str(self.probs.dtype)), - atol=config.ATOL.get(str(self.probs.dtype))) + atol=config.ATOL.get(str(self.probs.dtype)), + ) @parameterize.place(config.DEVICES) @parameterize.parameterize_cls( - (config.TEST_CASE_NAME, 'total_count', 'probs'), [ + (config.TEST_CASE_NAME, 'total_count', 'probs'), + [ ('total_count_le_one', 0, np.array([0.3, 0.7])), ('total_count_float', np.array([0.3, 0.7])), ('probs_zero_dim', np.array(0)), - ]) + ], +) class TestMultinomialException(unittest.TestCase): - def TestInit(self): with self.assertRaises(ValueError): - paddle.distribution.Multinomial(self.total_count, - paddle.to_tensor(self.probs)) + paddle.distribution.Multinomial( + self.total_count, paddle.to_tensor(self.probs) + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution_multinomial_static.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution_multinomial_static.py index 56341d7fc0ef814e486c7532680c99a2c69c35e8..90efb815dea598acb0b310e2687912b95552389f 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution_multinomial_static.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution_multinomial_static.py @@ -26,61 +26,74 @@ paddle.enable_static() @parameterize.place(config.DEVICES) @parameterize.parameterize_cls( - (parameterize.TEST_CASE_NAME, 'total_count', 'probs'), [ - ('one-dim', 5, parameterize.xrand((3, ))), + (parameterize.TEST_CASE_NAME, 'total_count', 'probs'), + [ + ('one-dim', 5, parameterize.xrand((3,))), ('multi-dim', 9, parameterize.xrand((2, 3))), ('prob-sum-one', 5, np.array([0.5, 0.2, 0.3])), - ('prob-sum-non-one', 5, np.array([2., 3., 5.])), - ]) + ('prob-sum-non-one', 5, np.array([2.0, 3.0, 5.0])), + ], +) class TestMultinomial(unittest.TestCase): - def setUp(self): startup_program = paddle.static.Program() main_program = paddle.static.Program() executor = paddle.static.Executor(self.place) with paddle.static.program_guard(main_program, startup_program): - probs = paddle.static.data('probs', self.probs.shape, - self.probs.dtype) + probs = paddle.static.data( + 'probs', self.probs.shape, self.probs.dtype + ) dist = paddle.distribution.Multinomial(self.total_count, probs) mean = dist.mean var = dist.variance entropy = dist.entropy() - mini_samples = dist.sample(shape=(6, )) - large_samples = dist.sample(shape=(5000, )) + mini_samples = dist.sample(shape=(6,)) + large_samples = dist.sample(shape=(5000,)) fetch_list = [mean, var, entropy, mini_samples, large_samples] feed = {'probs': self.probs} executor.run(startup_program) [ - self.mean, self.var, self.entropy, self.mini_samples, - self.large_samples + self.mean, + self.var, + self.entropy, + self.mini_samples, + self.large_samples, ] = executor.run(main_program, feed=feed, fetch_list=fetch_list) def test_mean(self): self.assertEqual(str(self.mean.dtype).split('.')[-1], self.probs.dtype) - np.testing.assert_allclose(self.mean, - self._np_mean(), - rtol=config.RTOL.get(str(self.probs.dtype)), - atol=config.ATOL.get(str(self.probs.dtype))) + np.testing.assert_allclose( + self.mean, + self._np_mean(), + rtol=config.RTOL.get(str(self.probs.dtype)), + atol=config.ATOL.get(str(self.probs.dtype)), + ) def test_variance(self): self.assertEqual(str(self.var.dtype).split('.')[-1], self.probs.dtype) - np.testing.assert_allclose(self.var, - self._np_variance(), - rtol=config.RTOL.get(str(self.probs.dtype)), - atol=config.ATOL.get(str(self.probs.dtype))) + np.testing.assert_allclose( + self.var, + self._np_variance(), + rtol=config.RTOL.get(str(self.probs.dtype)), + atol=config.ATOL.get(str(self.probs.dtype)), + ) def test_entropy(self): self.assertEqual( - str(self.entropy.dtype).split('.')[-1], self.probs.dtype) - np.testing.assert_allclose(self.entropy, - self._np_entropy(), - rtol=config.RTOL.get(str(self.probs.dtype)), - atol=config.ATOL.get(str(self.probs.dtype))) + str(self.entropy.dtype).split('.')[-1], self.probs.dtype + ) + np.testing.assert_allclose( + self.entropy, + self._np_entropy(), + rtol=config.RTOL.get(str(self.probs.dtype)), + atol=config.ATOL.get(str(self.probs.dtype)), + ) def test_sample(self): self.assertEqual( - str(self.mini_samples.dtype).split('.')[-1], self.probs.dtype) + str(self.mini_samples.dtype).split('.')[-1], self.probs.dtype + ) self.assertTrue(np.all(self.mini_samples.sum(-1) == self.total_count)) sample_mean = self.large_samples.mean(axis=0) @@ -103,60 +116,75 @@ class TestMultinomial(unittest.TestCase): @parameterize.parameterize_cls( (parameterize.TEST_CASE_NAME, 'total_count', 'probs', 'value'), [ - ('value-float', 5, np.array([0.2, 0.3, 0.5]), np.array([1., 1., 3.])), + ( + 'value-float', + 5, + np.array([0.2, 0.3, 0.5]), + np.array([1.0, 1.0, 3.0]), + ), ('value-int', 5, np.array([0.2, 0.3, 0.5]), np.array([2, 2, 1])), - ('value-multi-dim', 5, np.array([[0.3, 0.7], [0.5, 0.5] - ]), np.array([[1., 4.], [2., 3.]])), + ( + 'value-multi-dim', + 5, + np.array([[0.3, 0.7], [0.5, 0.5]]), + np.array([[1.0, 4.0], [2.0, 3.0]]), + ), # ('value-sum-non-n', 10, np.array([0.5, 0.2, 0.3]), np.array([4,5,2])), - ]) + ], +) class TestMultinomialPmf(unittest.TestCase): - def setUp(self): startup_program = paddle.static.Program() main_program = paddle.static.Program() executor = paddle.static.Executor(self.place) with paddle.static.program_guard(main_program, startup_program): - probs = paddle.static.data('probs', self.probs.shape, - self.probs.dtype) - value = paddle.static.data('value', self.value.shape, - self.value.dtype) + probs = paddle.static.data( + 'probs', self.probs.shape, self.probs.dtype + ) + value = paddle.static.data( + 'value', self.value.shape, self.value.dtype + ) dist = paddle.distribution.Multinomial(self.total_count, probs) pmf = dist.prob(value) feed = {'probs': self.probs, 'value': self.value} fetch_list = [pmf] executor.run(startup_program) - [self.pmf] = executor.run(main_program, - feed=feed, - fetch_list=fetch_list) + [self.pmf] = executor.run( + main_program, feed=feed, fetch_list=fetch_list + ) def test_prob(self): - np.testing.assert_allclose(self.pmf, - scipy.stats.multinomial.pmf( - self.value, self.total_count, - self.probs), - rtol=config.RTOL.get(str(self.probs.dtype)), - atol=config.ATOL.get(str(self.probs.dtype))) + np.testing.assert_allclose( + self.pmf, + scipy.stats.multinomial.pmf( + self.value, self.total_count, self.probs + ), + rtol=config.RTOL.get(str(self.probs.dtype)), + atol=config.ATOL.get(str(self.probs.dtype)), + ) @parameterize.place(config.DEVICES) @parameterize.parameterize_cls( - (parameterize.TEST_CASE_NAME, 'total_count', 'probs'), [ + (parameterize.TEST_CASE_NAME, 'total_count', 'probs'), + [ ('total_count_le_one', 0, np.array([0.3, 0.7])), ('total_count_float', np.array([0.3, 0.7])), ('probs_zero_dim', np.array(0)), - ]) + ], +) class TestMultinomialException(unittest.TestCase): - def setUp(self): startup_program = paddle.static.Program() self.main_program = paddle.static.Program() self.executor = paddle.static.Executor(self.place) with paddle.static.program_guard(self.main_program, startup_program): - probs = paddle.static.data('probs', self.probs.shape, - self.probs.dtype) + probs = paddle.static.data( + 'probs', self.probs.shape, self.probs.dtype + ) dist = paddle.distribution.Multinomial(self.total_count, probs) self.feed = {'probs': self.probs} diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution_normal.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution_normal.py index d90405599554d5cc34e6b2523c15a7e7bdd3e9a4..3f75a8c78c2ce36a97295fdbe07feb07a2dad0ed 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution_normal.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution_normal.py @@ -29,7 +29,6 @@ np.random.seed(2022) class NormalNumpy(DistributionNumpy): - def __init__(self, loc, scale): self.loc = np.array(loc) self.scale = np.array(scale) @@ -44,29 +43,34 @@ class NormalNumpy(DistributionNumpy): def log_prob(self, value): var = self.scale * self.scale log_scale = np.log(self.scale) - return -((value - self.loc) * - (value - self.loc)) / (2. * var) - log_scale - math.log( - math.sqrt(2. * math.pi)) + return ( + -((value - self.loc) * (value - self.loc)) / (2.0 * var) + - log_scale + - math.log(math.sqrt(2.0 * math.pi)) + ) def probs(self, value): var = self.scale * self.scale - return np.exp(-1. * ((value - self.loc) * (value - self.loc)) / - (2. * var)) / (math.sqrt(2 * math.pi) * self.scale) + return np.exp( + -1.0 * ((value - self.loc) * (value - self.loc)) / (2.0 * var) + ) / (math.sqrt(2 * math.pi) * self.scale) def entropy(self): - return 0.5 + 0.5 * np.log( - np.array(2. * math.pi).astype(self.loc.dtype)) + np.log(self.scale) + return ( + 0.5 + + 0.5 * np.log(np.array(2.0 * math.pi).astype(self.loc.dtype)) + + np.log(self.scale) + ) def kl_divergence(self, other): - var_ratio = (self.scale / other.scale) + var_ratio = self.scale / other.scale var_ratio = var_ratio * var_ratio - t1 = ((self.loc - other.loc) / other.scale) - t1 = (t1 * t1) + t1 = (self.loc - other.loc) / other.scale + t1 = t1 * t1 return 0.5 * (var_ratio + t1 - 1 - np.log(var_ratio)) class NormalTest(unittest.TestCase): - def setUp(self, use_gpu=False, batch_size=2, dims=3): self.use_gpu = use_gpu if not use_gpu: @@ -112,9 +116,9 @@ class NormalTest(unittest.TestCase): self.static_other_loc = self.other_loc_np self.static_other_scale = self.other_scale_np with fluid.program_guard(self.test_program): - self.static_values = layers.data(name='values', - shape=[], - dtype='float32') + self.static_values = layers.data( + name='values', shape=[], dtype='float32' + ) def compare_with_numpy(self, fetch_list, sample_shape=7, tolerance=1e-6): sample, entropy, log_prob, probs, kl = fetch_list @@ -134,22 +138,18 @@ class NormalTest(unittest.TestCase): # So set the tolerance from 1e-6 to 1e-4. log_tolerance = 1e-4 np.testing.assert_equal(sample.shape, np_sample.shape) - np.testing.assert_allclose(entropy, - np_entropy, - rtol=tolerance, - atol=tolerance) - np.testing.assert_allclose(log_prob, - np_lp, - rtol=log_tolerance, - atol=log_tolerance) - np.testing.assert_allclose(probs, - np_p, - rtol=log_tolerance, - atol=log_tolerance) - np.testing.assert_allclose(kl, - np_kl, - rtol=log_tolerance, - atol=log_tolerance) + np.testing.assert_allclose( + entropy, np_entropy, rtol=tolerance, atol=tolerance + ) + np.testing.assert_allclose( + log_prob, np_lp, rtol=log_tolerance, atol=log_tolerance + ) + np.testing.assert_allclose( + probs, np_p, rtol=log_tolerance, atol=log_tolerance + ) + np.testing.assert_allclose( + kl, np_kl, rtol=log_tolerance, atol=log_tolerance + ) def test_normal_distribution_dygraph(self, sample_shape=7, tolerance=1e-6): paddle.disable_static(self.place) @@ -174,8 +174,9 @@ class NormalTest(unittest.TestCase): entropy = normal.entropy() log_prob = normal.log_prob(self.static_values) probs = normal.probs(self.static_values) - other_normal = Normal(self.static_other_loc, - self.static_other_scale) + other_normal = Normal( + self.static_other_loc, self.static_other_scale + ) kl = normal.kl_divergence(other_normal) fetch_list = [sample, entropy, log_prob, probs, kl] @@ -185,19 +186,18 @@ class NormalTest(unittest.TestCase): 'scale': self.scale_np, 'values': self.values_np, 'other_loc': self.other_loc_np, - 'other_scale': self.other_scale_np + 'other_scale': self.other_scale_np, } self.executor.run(fluid.default_startup_program()) - fetch_list = self.executor.run(program=self.test_program, - feed=feed_vars, - fetch_list=fetch_list) + fetch_list = self.executor.run( + program=self.test_program, feed=feed_vars, fetch_list=fetch_list + ) self.compare_with_numpy(fetch_list) class NormalTest2(NormalTest): - def init_numpy_data(self, batch_size, dims): # loc ans scale are 'int' self.loc_np = int((np.random.ranf() - 0.5) * 8) @@ -213,7 +213,6 @@ class NormalTest2(NormalTest): class NormalTest3(NormalTest): - def init_numpy_data(self, batch_size, dims): # test broadcast: loc is float, scale is numpy.ndarray with dtype 'float32'. self.loc_np = (np.random.ranf() - 0.5) * 4 @@ -223,11 +222,13 @@ class NormalTest3(NormalTest): self.values_np = np.random.randn(batch_size, dims).astype('float32') # used to construct another Normal object to calculate kl_divergence self.other_loc_np = (np.random.ranf() - 0.5) * 4 - self.other_scale_np = np.random.randn(batch_size, - dims).astype('float32') + self.other_scale_np = np.random.randn(batch_size, dims).astype( + 'float32' + ) while not np.all(self.scale_np > 0): - self.other_scale_np = np.random.randn(batch_size, - dims).astype('float32') + self.other_scale_np = np.random.randn(batch_size, dims).astype( + 'float32' + ) def init_static_data(self, batch_size, dims): self.static_loc = self.loc_np @@ -235,13 +236,12 @@ class NormalTest3(NormalTest): self.static_other_loc = self.other_loc_np self.static_other_scale = self.other_scale_np with fluid.program_guard(self.test_program): - self.static_values = layers.data(name='values', - shape=[dims], - dtype='float32') + self.static_values = layers.data( + name='values', shape=[dims], dtype='float32' + ) class NormalTest4(NormalTest): - def init_numpy_data(self, batch_size, dims): # loc and scale are numpy.ndarray with dtype 'float32'. self.loc_np = np.random.randn(batch_size, dims).astype('float32') @@ -251,11 +251,13 @@ class NormalTest4(NormalTest): self.values_np = np.random.randn(batch_size, dims).astype('float32') # used to construct another Normal object to calculate kl_divergence self.other_loc_np = np.random.randn(batch_size, dims).astype('float32') - self.other_scale_np = np.random.randn(batch_size, - dims).astype('float32') + self.other_scale_np = np.random.randn(batch_size, dims).astype( + 'float32' + ) while not np.all(self.scale_np > 0): - self.other_scale_np = np.random.randn(batch_size, - dims).astype('float32') + self.other_scale_np = np.random.randn(batch_size, dims).astype( + 'float32' + ) def init_static_data(self, batch_size, dims): self.static_loc = self.loc_np @@ -263,13 +265,12 @@ class NormalTest4(NormalTest): self.static_other_loc = self.other_loc_np self.static_other_scale = self.other_scale_np with fluid.program_guard(self.test_program): - self.static_values = layers.data(name='values', - shape=[dims], - dtype='float32') + self.static_values = layers.data( + name='values', shape=[dims], dtype='float32' + ) class NormalTest5(NormalTest): - def init_numpy_data(self, batch_size, dims): # loc and scale are numpy.ndarray with dtype 'float64'. self.loc_np = np.random.randn(batch_size, dims).astype('float64') @@ -279,11 +280,13 @@ class NormalTest5(NormalTest): self.values_np = np.random.randn(batch_size, dims).astype('float64') # used to construct another Normal object to calculate kl_divergence self.other_loc_np = np.random.randn(batch_size, dims).astype('float64') - self.other_scale_np = np.random.randn(batch_size, - dims).astype('float64') + self.other_scale_np = np.random.randn(batch_size, dims).astype( + 'float64' + ) while not np.all(self.scale_np > 0): - self.other_scale_np = np.random.randn(batch_size, - dims).astype('float64') + self.other_scale_np = np.random.randn(batch_size, dims).astype( + 'float64' + ) def init_dynamic_data(self, batch_size, dims): self.dynamic_loc = self.loc_np @@ -298,13 +301,12 @@ class NormalTest5(NormalTest): self.static_other_loc = self.other_loc_np self.static_other_scale = self.other_scale_np with fluid.program_guard(self.test_program): - self.static_values = layers.data(name='values', - shape=[dims], - dtype='float64') + self.static_values = layers.data( + name='values', shape=[dims], dtype='float64' + ) class NormalTest6(NormalTest): - def init_numpy_data(self, batch_size, dims): # loc and scale are Tensor with dtype 'VarType.FP32'. self.loc_np = np.random.randn(batch_size, dims).astype('float32') @@ -314,11 +316,13 @@ class NormalTest6(NormalTest): self.values_np = np.random.randn(batch_size, dims).astype('float32') # used to construct another Normal object to calculate kl_divergence self.other_loc_np = np.random.randn(batch_size, dims).astype('float32') - self.other_scale_np = np.random.randn(batch_size, - dims).astype('float32') + self.other_scale_np = np.random.randn(batch_size, dims).astype( + 'float32' + ) while not np.all(self.scale_np > 0): - self.other_scale_np = np.random.randn(batch_size, - dims).astype('float32') + self.other_scale_np = np.random.randn(batch_size, dims).astype( + 'float32' + ) def init_dynamic_data(self, batch_size, dims): self.dynamic_loc = paddle.to_tensor(self.loc_np) @@ -329,25 +333,24 @@ class NormalTest6(NormalTest): def init_static_data(self, batch_size, dims): with fluid.program_guard(self.test_program): - self.static_loc = layers.data(name='loc', - shape=[dims], - dtype='float32') - self.static_scale = layers.data(name='scale', - shape=[dims], - dtype='float32') - self.static_values = layers.data(name='values', - shape=[dims], - dtype='float32') - self.static_other_loc = layers.data(name='other_loc', - shape=[dims], - dtype='float32') - self.static_other_scale = layers.data(name='other_scale', - shape=[dims], - dtype='float32') + self.static_loc = layers.data( + name='loc', shape=[dims], dtype='float32' + ) + self.static_scale = layers.data( + name='scale', shape=[dims], dtype='float32' + ) + self.static_values = layers.data( + name='values', shape=[dims], dtype='float32' + ) + self.static_other_loc = layers.data( + name='other_loc', shape=[dims], dtype='float32' + ) + self.static_other_scale = layers.data( + name='other_scale', shape=[dims], dtype='float32' + ) class NormalTest7(NormalTest): - def init_numpy_data(self, batch_size, dims): # loc and scale are Tensor with dtype 'VarType.FP64'. self.loc_np = np.random.randn(batch_size, dims).astype('float64') @@ -357,42 +360,45 @@ class NormalTest7(NormalTest): self.values_np = np.random.randn(batch_size, dims).astype('float64') # used to construct another Normal object to calculate kl_divergence self.other_loc_np = np.random.randn(batch_size, dims).astype('float64') - self.other_scale_np = np.random.randn(batch_size, - dims).astype('float64') + self.other_scale_np = np.random.randn(batch_size, dims).astype( + 'float64' + ) while not np.all(self.scale_np > 0): - self.other_scale_np = np.random.randn(batch_size, - dims).astype('float64') + self.other_scale_np = np.random.randn(batch_size, dims).astype( + 'float64' + ) def init_dynamic_data(self, batch_size, dims): self.dynamic_loc = paddle.to_tensor(self.loc_np, dtype='float64') self.dynamic_scale = paddle.to_tensor(self.scale_np, dtype='float64') self.dynamic_values = paddle.to_tensor(self.values_np, dtype='float64') - self.dynamic_other_loc = paddle.to_tensor(self.other_loc_np, - dtype='float64') - self.dynamic_other_scale = paddle.to_tensor(self.other_scale_np, - dtype='float64') + self.dynamic_other_loc = paddle.to_tensor( + self.other_loc_np, dtype='float64' + ) + self.dynamic_other_scale = paddle.to_tensor( + self.other_scale_np, dtype='float64' + ) def init_static_data(self, batch_size, dims): with fluid.program_guard(self.test_program): - self.static_loc = layers.data(name='loc', - shape=[dims], - dtype='float64') - self.static_scale = layers.data(name='scale', - shape=[dims], - dtype='float64') - self.static_values = layers.data(name='values', - shape=[dims], - dtype='float64') - self.static_other_loc = layers.data(name='other_loc', - shape=[dims], - dtype='float64') - self.static_other_scale = layers.data(name='other_scale', - shape=[dims], - dtype='float64') + self.static_loc = layers.data( + name='loc', shape=[dims], dtype='float64' + ) + self.static_scale = layers.data( + name='scale', shape=[dims], dtype='float64' + ) + self.static_values = layers.data( + name='values', shape=[dims], dtype='float64' + ) + self.static_other_loc = layers.data( + name='other_loc', shape=[dims], dtype='float64' + ) + self.static_other_scale = layers.data( + name='other_scale', shape=[dims], dtype='float64' + ) class NormalTest8(NormalTest): - def init_numpy_data(self, batch_size, dims): # loc and scale are Tensor with dtype 'VarType.FP64'. value's dtype is 'VarType.FP32'. self.loc_np = np.random.randn(batch_size, dims).astype('float64') @@ -402,59 +408,66 @@ class NormalTest8(NormalTest): self.values_np = np.random.randn(batch_size, dims).astype('float32') # used to construct another Normal object to calculate kl_divergence self.other_loc_np = np.random.randn(batch_size, dims).astype('float64') - self.other_scale_np = np.random.randn(batch_size, - dims).astype('float64') + self.other_scale_np = np.random.randn(batch_size, dims).astype( + 'float64' + ) while not np.all(self.scale_np > 0): - self.other_scale_np = np.random.randn(batch_size, - dims).astype('float64') + self.other_scale_np = np.random.randn(batch_size, dims).astype( + 'float64' + ) def init_dynamic_data(self, batch_size, dims): self.dynamic_loc = paddle.to_tensor(self.loc_np, dtype='float64') self.dynamic_scale = paddle.to_tensor(self.scale_np, dtype='float64') self.dynamic_values = paddle.to_tensor(self.values_np) - self.dynamic_other_loc = paddle.to_tensor(self.other_loc_np, - dtype='float64') - self.dynamic_other_scale = paddle.to_tensor(self.other_scale_np, - dtype='float64') + self.dynamic_other_loc = paddle.to_tensor( + self.other_loc_np, dtype='float64' + ) + self.dynamic_other_scale = paddle.to_tensor( + self.other_scale_np, dtype='float64' + ) def init_static_data(self, batch_size, dims): with fluid.program_guard(self.test_program): - self.static_loc = layers.data(name='loc', - shape=[dims], - dtype='float64') - self.static_scale = layers.data(name='scale', - shape=[dims], - dtype='float64') - self.static_values = layers.data(name='values', - shape=[dims], - dtype='float32') - self.static_other_loc = layers.data(name='other_loc', - shape=[dims], - dtype='float64') - self.static_other_scale = layers.data(name='other_scale', - shape=[dims], - dtype='float64') + self.static_loc = layers.data( + name='loc', shape=[dims], dtype='float64' + ) + self.static_scale = layers.data( + name='scale', shape=[dims], dtype='float64' + ) + self.static_values = layers.data( + name='values', shape=[dims], dtype='float32' + ) + self.static_other_loc = layers.data( + name='other_loc', shape=[dims], dtype='float64' + ) + self.static_other_scale = layers.data( + name='other_scale', shape=[dims], dtype='float64' + ) class NormalTest9(NormalTest): - def init_numpy_data(self, batch_size, dims): # loc and scale are list. - self.loc_np = np.random.randn(batch_size, - dims).astype('float32').tolist() + self.loc_np = ( + np.random.randn(batch_size, dims).astype('float32').tolist() + ) self.scale_np = np.random.randn(batch_size, dims).astype('float32') while not np.all(self.scale_np > 0): self.scale_np = np.random.randn(batch_size, dims).astype('float32') self.scale_np = self.scale_np.tolist() self.values_np = np.random.randn(batch_size, dims).astype('float32') # used to construct another Normal object to calculate kl_divergence - self.other_loc_np = np.random.randn(batch_size, - dims).astype('float32').tolist() - self.other_scale_np = np.random.randn(batch_size, - dims).astype('float32') + self.other_loc_np = ( + np.random.randn(batch_size, dims).astype('float32').tolist() + ) + self.other_scale_np = np.random.randn(batch_size, dims).astype( + 'float32' + ) while not np.all(self.other_scale_np > 0): - self.other_scale_np = np.random.randn(batch_size, - dims).astype('float32') + self.other_scale_np = np.random.randn(batch_size, dims).astype( + 'float32' + ) self.other_scale_np = self.other_scale_np.tolist() def init_static_data(self, batch_size, dims): @@ -463,17 +476,17 @@ class NormalTest9(NormalTest): self.static_other_loc = self.other_loc_np self.static_other_scale = self.other_scale_np with fluid.program_guard(self.test_program): - self.static_values = layers.data(name='values', - shape=[dims], - dtype='float32') + self.static_values = layers.data( + name='values', shape=[dims], dtype='float32' + ) class NormalTest10(NormalTest): - def init_numpy_data(self, batch_size, dims): # loc and scale are tuple. self.loc_np = tuple( - np.random.randn(batch_size, dims).astype('float32').tolist()) + np.random.randn(batch_size, dims).astype('float32').tolist() + ) self.scale_np = np.random.randn(batch_size, dims).astype('float32') while not np.all(self.scale_np > 0): self.scale_np = np.random.randn(batch_size, dims).astype('float32') @@ -481,12 +494,15 @@ class NormalTest10(NormalTest): self.values_np = np.random.randn(batch_size, dims).astype('float32') # used to construct another Normal object to calculate kl_divergence self.other_loc_np = tuple( - np.random.randn(batch_size, dims).astype('float32').tolist()) - self.other_scale_np = np.random.randn(batch_size, - dims).astype('float32') + np.random.randn(batch_size, dims).astype('float32').tolist() + ) + self.other_scale_np = np.random.randn(batch_size, dims).astype( + 'float32' + ) while not np.all(self.other_scale_np > 0): - self.other_scale_np = np.random.randn(batch_size, - dims).astype('float32') + self.other_scale_np = np.random.randn(batch_size, dims).astype( + 'float32' + ) self.other_scale_np = tuple(self.other_scale_np.tolist()) def init_static_data(self, batch_size, dims): @@ -495,72 +511,73 @@ class NormalTest10(NormalTest): self.static_other_loc = self.other_loc_np self.static_other_scale = self.other_scale_np with fluid.program_guard(self.test_program): - self.static_values = layers.data(name='values', - shape=[dims], - dtype='float32') + self.static_values = layers.data( + name='values', shape=[dims], dtype='float32' + ) @place(config.DEVICES) -@parameterize_cls((TEST_CASE_NAME, 'loc', 'scale'), [('sample', xrand( - (4, )), xrand((4, )))]) +@parameterize_cls( + (TEST_CASE_NAME, 'loc', 'scale'), [('sample', xrand((4,)), xrand((4,)))] +) class TestNormalSampleDygraph(unittest.TestCase): - def setUp(self): paddle.disable_static() self.paddle_normal = Normal(loc=self.loc, scale=self.scale) n = 100000 - self.sample_shape = (n, ) - self.rsample_shape = (n, ) + self.sample_shape = (n,) + self.rsample_shape = (n,) self.samples = self.paddle_normal.sample(self.sample_shape) self.rsamples = self.paddle_normal.rsample(self.rsample_shape) def test_sample(self): samples_mean = self.samples.mean(axis=0) samples_var = self.samples.var(axis=0) - np.testing.assert_allclose(samples_mean, - self.paddle_normal.mean, - rtol=0.1, - atol=0) - np.testing.assert_allclose(samples_var, - self.paddle_normal.variance, - rtol=0.1, - atol=0) + np.testing.assert_allclose( + samples_mean, self.paddle_normal.mean, rtol=0.1, atol=0 + ) + np.testing.assert_allclose( + samples_var, self.paddle_normal.variance, rtol=0.1, atol=0 + ) rsamples_mean = self.rsamples.mean(axis=0) rsamples_var = self.rsamples.var(axis=0) - np.testing.assert_allclose(rsamples_mean, - self.paddle_normal.mean, - rtol=0.1, - atol=0) - np.testing.assert_allclose(rsamples_var, - self.paddle_normal.variance, - rtol=0.1, - atol=0) + np.testing.assert_allclose( + rsamples_mean, self.paddle_normal.mean, rtol=0.1, atol=0 + ) + np.testing.assert_allclose( + rsamples_var, self.paddle_normal.variance, rtol=0.1, atol=0 + ) batch_shape = (self.loc + self.scale).shape - self.assertEqual(self.samples.shape, - list(self.sample_shape + batch_shape)) - self.assertEqual(self.rsamples.shape, - list(self.rsample_shape + batch_shape)) + self.assertEqual( + self.samples.shape, list(self.sample_shape + batch_shape) + ) + self.assertEqual( + self.rsamples.shape, list(self.rsample_shape + batch_shape) + ) for i in range(len(self.scale)): self.assertTrue( - self._kstest(self.loc[i], self.scale[i], self.samples[:, i])) + self._kstest(self.loc[i], self.scale[i], self.samples[:, i]) + ) self.assertTrue( - self._kstest(self.loc[i], self.scale[i], self.rsamples[:, i])) + self._kstest(self.loc[i], self.scale[i], self.rsamples[:, i]) + ) def _kstest(self, loc, scale, samples): # Uses the Kolmogorov-Smirnov test for goodness of fit. - ks, _ = scipy.stats.kstest(samples, - scipy.stats.norm(loc=loc, scale=scale).cdf) + ks, _ = scipy.stats.kstest( + samples, scipy.stats.norm(loc=loc, scale=scale).cdf + ) return ks < 0.02 @place(config.DEVICES) -@parameterize_cls((TEST_CASE_NAME, 'loc', 'scale'), [('sample', xrand( - (4, )), xrand((4, )))]) +@parameterize_cls( + (TEST_CASE_NAME, 'loc', 'scale'), [('sample', xrand((4,)), xrand((4,)))] +) class TestNormalSampleStaic(unittest.TestCase): - def setUp(self): paddle.enable_static() startup_program = paddle.static.Program() @@ -568,11 +585,12 @@ class TestNormalSampleStaic(unittest.TestCase): executor = paddle.static.Executor(self.place) with paddle.static.program_guard(main_program, startup_program): loc = paddle.static.data('loc', self.loc.shape, self.loc.dtype) - scale = paddle.static.data('scale', self.scale.shape, - self.scale.dtype) + scale = paddle.static.data( + 'scale', self.scale.shape, self.scale.dtype + ) n = 100000 - self.sample_shape = (n, ) - self.rsample_shape = (n, ) + self.sample_shape = (n,) + self.rsample_shape = (n,) self.paddle_normal = Normal(loc=loc, scale=scale) mean = self.paddle_normal.mean variance = self.paddle_normal.variance @@ -582,10 +600,9 @@ class TestNormalSampleStaic(unittest.TestCase): self.feeds = {'loc': self.loc, 'scale': self.scale} executor.run(startup_program) - [self.mean, self.variance, self.samples, - self.rsamples] = executor.run(main_program, - feed=self.feeds, - fetch_list=fetch_list) + [self.mean, self.variance, self.samples, self.rsamples] = executor.run( + main_program, feed=self.feeds, fetch_list=fetch_list + ) def test_sample(self): samples_mean = self.samples.mean(axis=0) @@ -596,10 +613,9 @@ class TestNormalSampleStaic(unittest.TestCase): rsamples_mean = self.rsamples.mean(axis=0) rsamples_var = self.rsamples.var(axis=0) np.testing.assert_allclose(rsamples_mean, self.mean, rtol=0.1, atol=0) - np.testing.assert_allclose(rsamples_var, - self.variance, - rtol=0.1, - atol=0) + np.testing.assert_allclose( + rsamples_var, self.variance, rtol=0.1, atol=0 + ) batch_shape = (self.loc + self.scale).shape self.assertEqual(self.samples.shape, self.sample_shape + batch_shape) @@ -607,14 +623,17 @@ class TestNormalSampleStaic(unittest.TestCase): for i in range(len(self.scale)): self.assertTrue( - self._kstest(self.loc[i], self.scale[i], self.samples[:, i])) + self._kstest(self.loc[i], self.scale[i], self.samples[:, i]) + ) self.assertTrue( - self._kstest(self.loc[i], self.scale[i], self.rsamples[:, i])) + self._kstest(self.loc[i], self.scale[i], self.rsamples[:, i]) + ) def _kstest(self, loc, scale, samples): # Uses the Kolmogorov-Smirnov test for goodness of fit. - ks, _ = scipy.stats.kstest(samples, - scipy.stats.norm(loc=loc, scale=scale).cdf) + ks, _ = scipy.stats.kstest( + samples, scipy.stats.norm(loc=loc, scale=scale).cdf + ) return ks < 0.02 diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution_transform.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution_transform.py index 4f997d40bb0942804fa6717ccbc2f2ef2f115a32..f3f89068b351097aab49c980ab85fd0804431e85 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution_transform.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution_transform.py @@ -28,22 +28,33 @@ paddle.seed(2022) @param.place(config.DEVICES) class TestTransform(unittest.TestCase): - def setUp(self): self._t = transform.Transform() - @param.param_func([(paddle.distribution.Distribution(), - paddle.distribution.TransformedDistribution), - (paddle.distribution.ExpTransform(), - paddle.distribution.ChainTransform)]) + @param.param_func( + [ + ( + paddle.distribution.Distribution(), + paddle.distribution.TransformedDistribution, + ), + ( + paddle.distribution.ExpTransform(), + paddle.distribution.ChainTransform, + ), + ] + ) def test_call(self, input, expected_type): t = transform.Transform() self.assertIsInstance(t(input), expected_type) - @param.param_func([(transform.Type.BIJECTION, True), - (transform.Type.INJECTION, True), - (transform.Type.SURJECTION, False), - (transform.Type.OTHER, False)]) + @param.param_func( + [ + (transform.Type.BIJECTION, True), + (transform.Type.INJECTION, True), + (transform.Type.SURJECTION, False), + (transform.Type.OTHER, False), + ] + ) def test_is_injective(self, type, expected): transform.Transform._type = type self.assertEqual(self._t._is_injective(), expected) @@ -54,26 +65,30 @@ class TestTransform(unittest.TestCase): def test_codomain(self): self.assertTrue(isinstance(self._t._codomain, variable.Real)) - @param.param_func([(0, TypeError), (paddle.rand( - (2, 3)), NotImplementedError)]) + @param.param_func( + [(0, TypeError), (paddle.rand((2, 3)), NotImplementedError)] + ) def test_forward(self, input, expected): with self.assertRaises(expected): self._t.forward(input) - @param.param_func([(0, TypeError), (paddle.rand( - (2, 3)), NotImplementedError)]) + @param.param_func( + [(0, TypeError), (paddle.rand((2, 3)), NotImplementedError)] + ) def test_inverse(self, input, expected): with self.assertRaises(expected): self._t.inverse(input) - @param.param_func([(0, TypeError), (paddle.rand( - (2, 3)), NotImplementedError)]) + @param.param_func( + [(0, TypeError), (paddle.rand((2, 3)), NotImplementedError)] + ) def test_forward_log_det_jacobian(self, input, expected): with self.assertRaises(expected): self._t.forward_log_det_jacobian(input) - @param.param_func([(0, TypeError), (paddle.rand( - (2, 3)), NotImplementedError)]) + @param.param_func( + [(0, TypeError), (paddle.rand((2, 3)), NotImplementedError)] + ) def test_inverse_log_det_jacobian(self, input, expected): with self.assertRaises(expected): self._t.inverse_log_det_jacobian(input) @@ -91,7 +106,6 @@ class TestTransform(unittest.TestCase): @param.place(config.DEVICES) class TestAbsTransform(unittest.TestCase): - def setUp(self): self._t = transform.AbsTransform() @@ -108,48 +122,66 @@ class TestAbsTransform(unittest.TestCase): self.assertEqual(self._t._codomain.event_rank, 0) self.assertEqual(self._t._codomain.is_discrete, False) - @param.param_func([(np.array([-1., 1., 0.]), np.array([1., 1., 0.])), - (np.array([[1., -1., -0.1], [-3., -0.1, 0]]), - np.array([[1., 1., 0.1], [3., 0.1, 0]]))]) + @param.param_func( + [ + (np.array([-1.0, 1.0, 0.0]), np.array([1.0, 1.0, 0.0])), + ( + np.array([[1.0, -1.0, -0.1], [-3.0, -0.1, 0]]), + np.array([[1.0, 1.0, 0.1], [3.0, 0.1, 0]]), + ), + ] + ) def test_forward(self, input, expected): - np.testing.assert_allclose(self._t.forward( - paddle.to_tensor(input)).numpy(), - expected, - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) + np.testing.assert_allclose( + self._t.forward(paddle.to_tensor(input)).numpy(), + expected, + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) - @param.param_func([(np.array(1.), (-np.array(1.), np.array(1.)))]) + @param.param_func([(np.array(1.0), (-np.array(1.0), np.array(1.0)))]) def test_inverse(self, input, expected): actual0, actual1 = self._t.inverse(paddle.to_tensor(input)) expected0, expected1 = expected - np.testing.assert_allclose(actual0.numpy(), - expected0, - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) - np.testing.assert_allclose(actual1.numpy(), - expected1, - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) + np.testing.assert_allclose( + actual0.numpy(), + expected0, + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) + np.testing.assert_allclose( + actual1.numpy(), + expected1, + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) def test_forward_log_det_jacobian(self): with self.assertRaises(NotImplementedError): - self._t.forward_log_det_jacobian(paddle.rand((10, ))) + self._t.forward_log_det_jacobian(paddle.rand((10,))) - @param.param_func([ - (np.array(1.), (np.array(0.), np.array(0.))), - ]) + @param.param_func( + [ + (np.array(1.0), (np.array(0.0), np.array(0.0))), + ] + ) def test_inverse_log_det_jacobian(self, input, expected): actual0, actual1 = self._t.inverse_log_det_jacobian( - paddle.to_tensor(input)) + paddle.to_tensor(input) + ) expected0, expected1 = expected - np.testing.assert_allclose(actual0.numpy(), - expected0, - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) - np.testing.assert_allclose(actual1.numpy(), - expected1, - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) + np.testing.assert_allclose( + actual0.numpy(), + expected0, + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) + np.testing.assert_allclose( + actual1.numpy(), + expected1, + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) @param.param_func([((), ()), ((2, 3, 5), (2, 3, 5))]) def test_forward_shape(self, shape, expected_shape): @@ -161,20 +193,25 @@ class TestAbsTransform(unittest.TestCase): @param.place(config.DEVICES) -@param.param_cls((param.TEST_CASE_NAME, 'loc', 'scale'), [ - ('normal', np.random.rand(8, 10), np.random.rand(8, 10)), - ('broadcast', np.random.rand(2, 10), np.random.rand(10)), -]) +@param.param_cls( + (param.TEST_CASE_NAME, 'loc', 'scale'), + [ + ('normal', np.random.rand(8, 10), np.random.rand(8, 10)), + ('broadcast', np.random.rand(2, 10), np.random.rand(10)), + ], +) class TestAffineTransform(unittest.TestCase): - def setUp(self): - self._t = transform.AffineTransform(paddle.to_tensor(self.loc), - paddle.to_tensor(self.scale)) + self._t = transform.AffineTransform( + paddle.to_tensor(self.loc), paddle.to_tensor(self.scale) + ) - @param.param_func([ - (paddle.rand([1]), 0, TypeError), - (0, paddle.rand([1]), TypeError), - ]) + @param.param_func( + [ + (paddle.rand([1]), 0, TypeError), + (0, paddle.rand([1]), TypeError), + ] + ) def test_init_exception(self, loc, scale, exc): with self.assertRaises(exc): paddle.distribution.AffineTransform(loc, scale) @@ -204,7 +241,8 @@ class TestAffineTransform(unittest.TestCase): self._t.forward(paddle.to_tensor(x)).numpy(), self._np_forward(x), rtol=config.RTOL.get(str(self._t.loc.numpy().dtype)), - atol=config.ATOL.get(str(self._t.loc.numpy().dtype))) + atol=config.ATOL.get(str(self._t.loc.numpy().dtype)), + ) def test_inverse(self): y = np.random.random(self.loc.shape) @@ -212,7 +250,8 @@ class TestAffineTransform(unittest.TestCase): self._t.inverse(paddle.to_tensor(y)).numpy(), self._np_inverse(y), rtol=config.RTOL.get(str(self._t.loc.numpy().dtype)), - atol=config.ATOL.get(str(self._t.loc.numpy().dtype))) + atol=config.ATOL.get(str(self._t.loc.numpy().dtype)), + ) def _np_forward(self, x): return self.loc + self.scale * x @@ -232,7 +271,8 @@ class TestAffineTransform(unittest.TestCase): self._t.inverse_log_det_jacobian(paddle.to_tensor(y)).numpy(), self._np_inverse_jacobian(y), rtol=config.RTOL.get(str(self._t.loc.numpy().dtype)), - atol=config.ATOL.get(str(self._t.loc.numpy().dtype))) + atol=config.ATOL.get(str(self._t.loc.numpy().dtype)), + ) def test_forward_log_det_jacobian(self): x = np.random.random(self.scale.shape) @@ -240,24 +280,26 @@ class TestAffineTransform(unittest.TestCase): self._t.forward_log_det_jacobian(paddle.to_tensor(x)).numpy(), self._np_forward_jacobian(x), rtol=config.RTOL.get(str(self._t.loc.numpy().dtype)), - atol=config.ATOL.get(str(self._t.loc.numpy().dtype))) + atol=config.ATOL.get(str(self._t.loc.numpy().dtype)), + ) def test_forward_shape(self): shape = self.loc.shape self.assertEqual( tuple(self._t.forward_shape(shape)), - np.broadcast(np.random.random(shape), self.loc, self.scale).shape) + np.broadcast(np.random.random(shape), self.loc, self.scale).shape, + ) def test_inverse_shape(self): shape = self.scale.shape self.assertEqual( tuple(self._t.forward_shape(shape)), - np.broadcast(np.random.random(shape), self.loc, self.scale).shape) + np.broadcast(np.random.random(shape), self.loc, self.scale).shape, + ) @param.place(config.DEVICES) class TestExpTransform(unittest.TestCase): - def setUp(self): self._t = transform.ExpTransform() @@ -274,48 +316,73 @@ class TestExpTransform(unittest.TestCase): self.assertEqual(self._t._codomain.event_rank, 0) self.assertEqual(self._t._codomain.is_discrete, False) - @param.param_func([(np.array([0., 1., 2., - 3.]), np.exp(np.array([0., 1., 2., 3.]))), - (np.array([[0., 1., 2., 3.], [-5., 6., 7., 8.]]), - np.exp(np.array([[0., 1., 2., 3.], [-5., 6., 7., - 8.]])))]) + @param.param_func( + [ + ( + np.array([0.0, 1.0, 2.0, 3.0]), + np.exp(np.array([0.0, 1.0, 2.0, 3.0])), + ), + ( + np.array([[0.0, 1.0, 2.0, 3.0], [-5.0, 6.0, 7.0, 8.0]]), + np.exp(np.array([[0.0, 1.0, 2.0, 3.0], [-5.0, 6.0, 7.0, 8.0]])), + ), + ] + ) def test_forward(self, input, expected): - np.testing.assert_allclose(self._t.forward( - paddle.to_tensor(input)).numpy(), - expected, - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) - - @param.param_func([(np.array([1., 2., 3.]), np.log(np.array([1., 2., 3.]))), - (np.array([[1., 2., 3.], [6., 7., 8.]]), - np.log(np.array([[1., 2., 3.], [6., 7., 8.]])))]) + np.testing.assert_allclose( + self._t.forward(paddle.to_tensor(input)).numpy(), + expected, + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) + + @param.param_func( + [ + (np.array([1.0, 2.0, 3.0]), np.log(np.array([1.0, 2.0, 3.0]))), + ( + np.array([[1.0, 2.0, 3.0], [6.0, 7.0, 8.0]]), + np.log(np.array([[1.0, 2.0, 3.0], [6.0, 7.0, 8.0]])), + ), + ] + ) def test_inverse(self, input, expected): - np.testing.assert_allclose(self._t.inverse( - paddle.to_tensor(input)).numpy(), - expected, - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) - - @param.param_func([(np.array([1., 2., 3.]), ), - (np.array([[1., 2., 3.], [6., 7., 8.]]), )]) + np.testing.assert_allclose( + self._t.inverse(paddle.to_tensor(input)).numpy(), + expected, + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) + + @param.param_func( + [ + (np.array([1.0, 2.0, 3.0]),), + (np.array([[1.0, 2.0, 3.0], [6.0, 7.0, 8.0]]),), + ] + ) def test_forward_log_det_jacobian(self, input): - np.testing.assert_allclose(self._t.forward_log_det_jacobian( - paddle.to_tensor(input)).numpy(), - self._np_forward_jacobian(input), - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) + np.testing.assert_allclose( + self._t.forward_log_det_jacobian(paddle.to_tensor(input)).numpy(), + self._np_forward_jacobian(input), + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) def _np_forward_jacobian(self, x): return x - @param.param_func([(np.array([1., 2., 3.]), ), - (np.array([[1., 2., 3.], [6., 7., 8.]]), )]) + @param.param_func( + [ + (np.array([1.0, 2.0, 3.0]),), + (np.array([[1.0, 2.0, 3.0], [6.0, 7.0, 8.0]]),), + ] + ) def test_inverse_log_det_jacobian(self, input): - np.testing.assert_allclose(self._t.inverse_log_det_jacobian( - paddle.to_tensor(input)).numpy(), - self._np_inverse_jacobian(input), - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) + np.testing.assert_allclose( + self._t.inverse_log_det_jacobian(paddle.to_tensor(input)).numpy(), + self._np_inverse_jacobian(input), + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) def _np_inverse_jacobian(self, y): return -self._np_forward_jacobian(np.log(y)) @@ -331,127 +398,248 @@ class TestExpTransform(unittest.TestCase): @param.place(config.DEVICES) class TestChainTransform(unittest.TestCase): - - @param.param_func([(paddle.distribution.Transform, TypeError), - ([0], TypeError)]) + @param.param_func( + [(paddle.distribution.Transform, TypeError), ([0], TypeError)] + ) def test_init_exception(self, transforms, exception): with self.assertRaises(exception): paddle.distribution.ChainTransform(transforms) - @param.param_func(((transform.ChainTransform( - (transform.AbsTransform(), - transform.AffineTransform(paddle.rand([1]), paddle.rand([1])))), - False), (transform.ChainTransform(( - transform.AffineTransform(paddle.rand([1]), - paddle.rand([1])), - transform.ExpTransform(), - )), True))) + @param.param_func( + ( + ( + transform.ChainTransform( + ( + transform.AbsTransform(), + transform.AffineTransform( + paddle.rand([1]), paddle.rand([1]) + ), + ) + ), + False, + ), + ( + transform.ChainTransform( + ( + transform.AffineTransform( + paddle.rand([1]), paddle.rand([1]) + ), + transform.ExpTransform(), + ) + ), + True, + ), + ) + ) def test_is_injective(self, chain, expected): self.assertEqual(chain._is_injective(), expected) - @param.param_func(((transform.ChainTransform( - (transform.IndependentTransform(transform.ExpTransform(), 1), - transform.IndependentTransform(transform.ExpTransform(), 10), - transform.IndependentTransform(transform.ExpTransform(), 8))), - variable.Independent(variable.real, 10)), )) + @param.param_func( + ( + ( + transform.ChainTransform( + ( + transform.IndependentTransform( + transform.ExpTransform(), 1 + ), + transform.IndependentTransform( + transform.ExpTransform(), 10 + ), + transform.IndependentTransform( + transform.ExpTransform(), 8 + ), + ) + ), + variable.Independent(variable.real, 10), + ), + ) + ) def test_domain(self, input, expected): self.assertIsInstance(input._domain, type(expected)) self.assertEqual(input._domain.event_rank, expected.event_rank) self.assertEqual(input._domain.is_discrete, expected.is_discrete) - @param.param_func(((transform.ChainTransform( - (transform.IndependentTransform(transform.ExpTransform(), 9), - transform.IndependentTransform(transform.ExpTransform(), 4), - transform.IndependentTransform(transform.ExpTransform(), 5))), - variable.Independent(variable.real, 9)), )) + @param.param_func( + ( + ( + transform.ChainTransform( + ( + transform.IndependentTransform( + transform.ExpTransform(), 9 + ), + transform.IndependentTransform( + transform.ExpTransform(), 4 + ), + transform.IndependentTransform( + transform.ExpTransform(), 5 + ), + ) + ), + variable.Independent(variable.real, 9), + ), + ) + ) def test_codomain(self, input, expected): self.assertIsInstance(input._codomain, variable.Independent) self.assertEqual(input._codomain.event_rank, expected.event_rank) self.assertEqual(input._codomain.is_discrete, expected.is_discrete) - @param.param_func([ - (transform.ChainTransform( - (transform.AffineTransform(paddle.to_tensor(0.0), - paddle.to_tensor(1.0)), - transform.ExpTransform())), np.array([0., 1., 2., 3.]), - np.exp(np.array([0., 1., 2., 3.]) * 1.0)), - (transform.ChainTransform( - (transform.ExpTransform(), transform.TanhTransform())), - np.array([[0., -1., 2., -3.], [-5., 6., 7., -8.]]), - np.tanh(np.exp(np.array([[0., -1., 2., -3.], [-5., 6., 7., -8.]])))) - ]) + @param.param_func( + [ + ( + transform.ChainTransform( + ( + transform.AffineTransform( + paddle.to_tensor(0.0), paddle.to_tensor(1.0) + ), + transform.ExpTransform(), + ) + ), + np.array([0.0, 1.0, 2.0, 3.0]), + np.exp(np.array([0.0, 1.0, 2.0, 3.0]) * 1.0), + ), + ( + transform.ChainTransform( + (transform.ExpTransform(), transform.TanhTransform()) + ), + np.array([[0.0, -1.0, 2.0, -3.0], [-5.0, 6.0, 7.0, -8.0]]), + np.tanh( + np.exp( + np.array( + [[0.0, -1.0, 2.0, -3.0], [-5.0, 6.0, 7.0, -8.0]] + ) + ) + ), + ), + ] + ) def test_forward(self, chain, input, expected): - np.testing.assert_allclose(chain.forward( - paddle.to_tensor(input)).numpy(), - expected, - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) - - @param.param_func([ - (transform.ChainTransform( - (transform.AffineTransform(paddle.to_tensor(0.0), - paddle.to_tensor(-1.0)), - transform.ExpTransform())), np.array([0., 1., 2., 3.]), - np.log(np.array([0., 1., 2., 3.])) / (-1.0)), - (transform.ChainTransform( - (transform.ExpTransform(), transform.TanhTransform())), - np.array([[0., 1., 2., 3.], [5., 6., 7., 8.]]), - np.log(np.arctanh(np.array([[0., 1., 2., 3.], [5., 6., 7., 8.]])))) - ]) + np.testing.assert_allclose( + chain.forward(paddle.to_tensor(input)).numpy(), + expected, + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) + + @param.param_func( + [ + ( + transform.ChainTransform( + ( + transform.AffineTransform( + paddle.to_tensor(0.0), paddle.to_tensor(-1.0) + ), + transform.ExpTransform(), + ) + ), + np.array([0.0, 1.0, 2.0, 3.0]), + np.log(np.array([0.0, 1.0, 2.0, 3.0])) / (-1.0), + ), + ( + transform.ChainTransform( + (transform.ExpTransform(), transform.TanhTransform()) + ), + np.array([[0.0, 1.0, 2.0, 3.0], [5.0, 6.0, 7.0, 8.0]]), + np.log( + np.arctanh( + np.array([[0.0, 1.0, 2.0, 3.0], [5.0, 6.0, 7.0, 8.0]]) + ) + ), + ), + ] + ) def test_inverse(self, chain, input, expected): - np.testing.assert_allclose(chain.inverse( - paddle.to_tensor(input)).numpy(), - expected, - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) - - @param.param_func([ - (transform.ChainTransform( - (transform.AffineTransform(paddle.to_tensor(0.0), - paddle.to_tensor(-1.0)), - transform.PowerTransform(paddle.to_tensor(2.0)))), - np.array([1., 2., 3.]), np.log(2. * np.array([1., 2., 3.]))), - ]) + np.testing.assert_allclose( + chain.inverse(paddle.to_tensor(input)).numpy(), + expected, + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) + + @param.param_func( + [ + ( + transform.ChainTransform( + ( + transform.AffineTransform( + paddle.to_tensor(0.0), paddle.to_tensor(-1.0) + ), + transform.PowerTransform(paddle.to_tensor(2.0)), + ) + ), + np.array([1.0, 2.0, 3.0]), + np.log(2.0 * np.array([1.0, 2.0, 3.0])), + ), + ] + ) def test_forward_log_det_jacobian(self, chain, input, expected): - np.testing.assert_allclose(chain.forward_log_det_jacobian( - paddle.to_tensor(input)).numpy(), - expected, - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) - - @param.param_func([ - (transform.ChainTransform( - (transform.AffineTransform(paddle.to_tensor(0.0), - paddle.to_tensor(-1.0)), - transform.ExpTransform())), (2, 3, 5), (2, 3, 5)), - ]) + np.testing.assert_allclose( + chain.forward_log_det_jacobian(paddle.to_tensor(input)).numpy(), + expected, + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) + + @param.param_func( + [ + ( + transform.ChainTransform( + ( + transform.AffineTransform( + paddle.to_tensor(0.0), paddle.to_tensor(-1.0) + ), + transform.ExpTransform(), + ) + ), + (2, 3, 5), + (2, 3, 5), + ), + ] + ) def test_forward_shape(self, chain, shape, expected_shape): self.assertEqual(chain.forward_shape(shape), expected_shape) - @param.param_func([ - (transform.ChainTransform( - (transform.AffineTransform(paddle.to_tensor(0.0), - paddle.to_tensor(-1.0)), - transform.ExpTransform())), (2, 3, 5), (2, 3, 5)), - ]) + @param.param_func( + [ + ( + transform.ChainTransform( + ( + transform.AffineTransform( + paddle.to_tensor(0.0), paddle.to_tensor(-1.0) + ), + transform.ExpTransform(), + ) + ), + (2, 3, 5), + (2, 3, 5), + ), + ] + ) def test_inverse_shape(self, chain, shape, expected_shape): self.assertEqual(chain.inverse_shape(shape), expected_shape) @param.place(config.DEVICES) @param.param_cls( - (param.TEST_CASE_NAME, 'base', 'reinterpreted_batch_rank', 'x'), [ - ('rank-over-zero', transform.ExpTransform(), 2, np.random.rand(2, 3, - 3)), - ]) + (param.TEST_CASE_NAME, 'base', 'reinterpreted_batch_rank', 'x'), + [ + ( + 'rank-over-zero', + transform.ExpTransform(), + 2, + np.random.rand(2, 3, 3), + ), + ], +) class TestIndependentTransform(unittest.TestCase): - def setUp(self): - self._t = transform.IndependentTransform(self.base, - self.reinterpreted_batch_rank) + self._t = transform.IndependentTransform( + self.base, self.reinterpreted_batch_rank + ) - @param.param_func([(0, 0, TypeError), - (paddle.distribution.Transform(), -1, ValueError)]) + @param.param_func( + [(0, 0, TypeError), (paddle.distribution.Transform(), -1, ValueError)] + ) def test_init_exception(self, base, rank, exc): with self.assertRaises(exc): paddle.distribution.IndependentTransform(base, rank) @@ -463,42 +651,52 @@ class TestIndependentTransform(unittest.TestCase): self.assertTrue(isinstance(self._t._domain, variable.Independent)) self.assertEqual( self._t._domain.event_rank, - self.base._domain.event_rank + self.reinterpreted_batch_rank) - self.assertEqual(self._t._domain.is_discrete, - self.base._domain.is_discrete) + self.base._domain.event_rank + self.reinterpreted_batch_rank, + ) + self.assertEqual( + self._t._domain.is_discrete, self.base._domain.is_discrete + ) def test_codomain(self): self.assertTrue(isinstance(self._t._codomain, variable.Independent)) self.assertEqual( self._t._codomain.event_rank, - self.base._codomain.event_rank + self.reinterpreted_batch_rank) - self.assertEqual(self._t._codomain.is_discrete, - self.base._codomain.is_discrete) + self.base._codomain.event_rank + self.reinterpreted_batch_rank, + ) + self.assertEqual( + self._t._codomain.is_discrete, self.base._codomain.is_discrete + ) def test_forward(self): np.testing.assert_allclose( self._t.forward(paddle.to_tensor(self.x)).numpy(), self.base.forward(paddle.to_tensor(self.x)).numpy(), rtol=config.RTOL.get(str(self.x.dtype)), - atol=config.ATOL.get(str(self.x.dtype))) + atol=config.ATOL.get(str(self.x.dtype)), + ) def test_inverse(self): np.testing.assert_allclose( self._t.inverse(paddle.to_tensor(self.x)).numpy(), self.base.inverse(paddle.to_tensor(self.x)).numpy(), rtol=config.RTOL.get(str(self.x.dtype)), - atol=config.ATOL.get(str(self.x.dtype))) + atol=config.ATOL.get(str(self.x.dtype)), + ) def test_forward_log_det_jacobian(self): actual = self._t.forward_log_det_jacobian(paddle.to_tensor(self.x)) - self.assertEqual(tuple(actual.shape), - self.x.shape[:-self.reinterpreted_batch_rank]) - expected = self.base.forward_log_det_jacobian(paddle.to_tensor( - self.x)).sum(list(range(-self.reinterpreted_batch_rank, 0))) - np.testing.assert_allclose(actual.numpy(), - expected.numpy(), - rtol=config.RTOL.get(str(self.x.dtype)), - atol=config.ATOL.get(str(self.x.dtype))) + self.assertEqual( + tuple(actual.shape), self.x.shape[: -self.reinterpreted_batch_rank] + ) + expected = self.base.forward_log_det_jacobian( + paddle.to_tensor(self.x) + ).sum(list(range(-self.reinterpreted_batch_rank, 0))) + np.testing.assert_allclose( + actual.numpy(), + expected.numpy(), + rtol=config.RTOL.get(str(self.x.dtype)), + atol=config.ATOL.get(str(self.x.dtype)), + ) @param.param_func([((), ()), ((2, 3, 5), (2, 3, 5))]) def test_forward_shape(self, shape, expected_shape): @@ -511,13 +709,12 @@ class TestIndependentTransform(unittest.TestCase): @param.place(config.DEVICES) class TestPowerTransform(unittest.TestCase): - def setUp(self): - self._t = transform.PowerTransform(paddle.to_tensor(2.)) + self._t = transform.PowerTransform(paddle.to_tensor(2.0)) def test_init(self): with self.assertRaises(TypeError): - transform.PowerTransform(1.) + transform.PowerTransform(1.0) def test_is_injective(self): self.assertTrue(self._t._is_injective()) @@ -532,34 +729,50 @@ class TestPowerTransform(unittest.TestCase): self.assertEqual(self._t._codomain.event_rank, 0) self.assertEqual(self._t._codomain.is_discrete, False) - @param.param_func([(np.array([2.]), np.array([0., -1., 2.]), - np.power(np.array([0., -1., 2.]), 2.)), - (np.array([[0.], [3.]]), np.array([[1., 0.], [5., 6.]]), - np.power(np.array([[1., 0.], [5., 6.]]), - np.array([[0.], [3.]])))]) + @param.param_func( + [ + ( + np.array([2.0]), + np.array([0.0, -1.0, 2.0]), + np.power(np.array([0.0, -1.0, 2.0]), 2.0), + ), + ( + np.array([[0.0], [3.0]]), + np.array([[1.0, 0.0], [5.0, 6.0]]), + np.power( + np.array([[1.0, 0.0], [5.0, 6.0]]), np.array([[0.0], [3.0]]) + ), + ), + ] + ) def test_forward(self, power, x, y): t = transform.PowerTransform(paddle.to_tensor(power)) - np.testing.assert_allclose(t.forward(paddle.to_tensor(x)).numpy(), - y, - rtol=config.RTOL.get(str(x.dtype)), - atol=config.ATOL.get(str(x.dtype))) + np.testing.assert_allclose( + t.forward(paddle.to_tensor(x)).numpy(), + y, + rtol=config.RTOL.get(str(x.dtype)), + atol=config.ATOL.get(str(x.dtype)), + ) - @param.param_func([(np.array([2.]), np.array([4.]), np.array([2.]))]) + @param.param_func([(np.array([2.0]), np.array([4.0]), np.array([2.0]))]) def test_inverse(self, power, y, x): t = transform.PowerTransform(paddle.to_tensor(power)) - np.testing.assert_allclose(t.inverse(paddle.to_tensor(y)).numpy(), - x, - rtol=config.RTOL.get(str(x.dtype)), - atol=config.ATOL.get(str(x.dtype))) + np.testing.assert_allclose( + t.inverse(paddle.to_tensor(y)).numpy(), + x, + rtol=config.RTOL.get(str(x.dtype)), + atol=config.ATOL.get(str(x.dtype)), + ) - @param.param_func(((np.array([2.]), np.array([3., 1.4, 0.8])), )) + @param.param_func(((np.array([2.0]), np.array([3.0, 1.4, 0.8])),)) def test_forward_log_det_jacobian(self, power, x): t = transform.PowerTransform(paddle.to_tensor(power)) - np.testing.assert_allclose(t.forward_log_det_jacobian( - paddle.to_tensor(x)).numpy(), - self._np_forward_jacobian(power, x), - rtol=config.RTOL.get(str(x.dtype)), - atol=config.ATOL.get(str(x.dtype))) + np.testing.assert_allclose( + t.forward_log_det_jacobian(paddle.to_tensor(x)).numpy(), + self._np_forward_jacobian(power, x), + rtol=config.RTOL.get(str(x.dtype)), + atol=config.ATOL.get(str(x.dtype)), + ) def _np_forward_jacobian(self, alpha, x): return np.abs(np.log(alpha * np.power(x, alpha - 1))) @@ -575,7 +788,6 @@ class TestPowerTransform(unittest.TestCase): @param.place(config.DEVICES) class TestTanhTransform(unittest.TestCase): - def setUp(self): self._t = transform.TanhTransform() @@ -594,57 +806,83 @@ class TestTanhTransform(unittest.TestCase): self.assertEqual(self._t._codomain._constraint._lower, -1) self.assertEqual(self._t._codomain._constraint._upper, 1) - @param.param_func([(np.array([0., 1., 2., - 3.]), np.tanh(np.array([0., 1., 2., 3.]))), - (np.array([[0., 1., 2., 3.], [-5., 6., 7., 8.]]), - np.tanh(np.array([[0., 1., 2., 3.], [-5., 6., 7., - 8.]])))]) + @param.param_func( + [ + ( + np.array([0.0, 1.0, 2.0, 3.0]), + np.tanh(np.array([0.0, 1.0, 2.0, 3.0])), + ), + ( + np.array([[0.0, 1.0, 2.0, 3.0], [-5.0, 6.0, 7.0, 8.0]]), + np.tanh( + np.array([[0.0, 1.0, 2.0, 3.0], [-5.0, 6.0, 7.0, 8.0]]) + ), + ), + ] + ) def test_forward(self, input, expected): - np.testing.assert_allclose(self._t.forward( - paddle.to_tensor(input)).numpy(), - expected, - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) - - @param.param_func([(np.array([1., 2., - 3.]), np.arctanh(np.array([1., 2., 3.]))), - (np.array([[1., 2., 3.], [6., 7., 8.]]), - np.arctanh(np.array([[1., 2., 3.], [6., 7., 8.]])))]) + np.testing.assert_allclose( + self._t.forward(paddle.to_tensor(input)).numpy(), + expected, + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) + + @param.param_func( + [ + (np.array([1.0, 2.0, 3.0]), np.arctanh(np.array([1.0, 2.0, 3.0]))), + ( + np.array([[1.0, 2.0, 3.0], [6.0, 7.0, 8.0]]), + np.arctanh(np.array([[1.0, 2.0, 3.0], [6.0, 7.0, 8.0]])), + ), + ] + ) def test_inverse(self, input, expected): - np.testing.assert_allclose(self._t.inverse( - paddle.to_tensor(input)).numpy(), - expected, - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) - - @param.param_func([(np.array([1., 2., 3.]), ), - (np.array([[1., 2., 3.], [6., 7., 8.]]), )]) + np.testing.assert_allclose( + self._t.inverse(paddle.to_tensor(input)).numpy(), + expected, + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) + + @param.param_func( + [ + (np.array([1.0, 2.0, 3.0]),), + (np.array([[1.0, 2.0, 3.0], [6.0, 7.0, 8.0]]),), + ] + ) def test_forward_log_det_jacobian(self, input): - np.testing.assert_allclose(self._t.forward_log_det_jacobian( - paddle.to_tensor(input)).numpy(), - self._np_forward_jacobian(input), - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) + np.testing.assert_allclose( + self._t.forward_log_det_jacobian(paddle.to_tensor(input)).numpy(), + self._np_forward_jacobian(input), + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) def _np_forward_jacobian(self, x): - return 2. * (np.log(2.) - x - self._np_softplus(-2. * x)) + return 2.0 * (np.log(2.0) - x - self._np_softplus(-2.0 * x)) - def _np_softplus(self, x, beta=1., threshold=20.): + def _np_softplus(self, x, beta=1.0, threshold=20.0): if np.any(beta * x > threshold): return x - return 1. / beta * np.log1p(np.exp(beta * x)) + return 1.0 / beta * np.log1p(np.exp(beta * x)) def _np_inverse_jacobian(self, y): return -self._np_forward_jacobian(np.arctanh(y)) - @param.param_func([(np.array([1., 2., 3.]), ), - (np.array([[1., 2., 3.], [6., 7., 8.]]), )]) + @param.param_func( + [ + (np.array([1.0, 2.0, 3.0]),), + (np.array([[1.0, 2.0, 3.0], [6.0, 7.0, 8.0]]),), + ] + ) def test_inverse_log_det_jacobian(self, input): - np.testing.assert_allclose(self._t.inverse_log_det_jacobian( - paddle.to_tensor(input)).numpy(), - self._np_inverse_jacobian(input), - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) + np.testing.assert_allclose( + self._t.inverse_log_det_jacobian(paddle.to_tensor(input)).numpy(), + self._np_inverse_jacobian(input), + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) @param.param_func([((), ()), ((2, 3, 5), (2, 3, 5))]) def test_forward_shape(self, shape, expected_shape): @@ -656,20 +894,24 @@ class TestTanhTransform(unittest.TestCase): @param.place(config.DEVICES) -@param.param_cls((param.TEST_CASE_NAME, 'in_event_shape', 'out_event_shape'), [ - ('regular_shape', (2, 3), (3, 2)), -]) +@param.param_cls( + (param.TEST_CASE_NAME, 'in_event_shape', 'out_event_shape'), + [ + ('regular_shape', (2, 3), (3, 2)), + ], +) class TestReshapeTransform(unittest.TestCase): - def setUp(self): - self._t = transform.ReshapeTransform(self.in_event_shape, - self.out_event_shape) + self._t = transform.ReshapeTransform( + self.in_event_shape, self.out_event_shape + ) @param.param_func([(0, 0, TypeError), ((1, 2), (1, 3), ValueError)]) def test_init_exception(self, in_event_shape, out_event_shape, exc): with self.assertRaises(exc): - paddle.distribution.ReshapeTransform(in_event_shape, - out_event_shape) + paddle.distribution.ReshapeTransform( + in_event_shape, out_event_shape + ) def test_is_injective(self): self.assertTrue(self._t._is_injective()) @@ -682,24 +924,30 @@ class TestReshapeTransform(unittest.TestCase): def test_forward(self): x = paddle.ones(self.in_event_shape) - np.testing.assert_allclose(self._t.forward(x), - paddle.ones(self.out_event_shape), - rtol=config.RTOL.get(str(x.numpy().dtype)), - atol=config.ATOL.get(str(x.numpy().dtype))) + np.testing.assert_allclose( + self._t.forward(x), + paddle.ones(self.out_event_shape), + rtol=config.RTOL.get(str(x.numpy().dtype)), + atol=config.ATOL.get(str(x.numpy().dtype)), + ) def test_inverse(self): x = paddle.ones(self.out_event_shape) - np.testing.assert_allclose(self._t.inverse(x).numpy(), - paddle.ones(self.in_event_shape).numpy(), - rtol=config.RTOL.get(str(x.numpy().dtype)), - atol=config.ATOL.get(str(x.numpy().dtype))) + np.testing.assert_allclose( + self._t.inverse(x).numpy(), + paddle.ones(self.in_event_shape).numpy(), + rtol=config.RTOL.get(str(x.numpy().dtype)), + atol=config.ATOL.get(str(x.numpy().dtype)), + ) def test_forward_log_det_jacobian(self): x = paddle.ones(self.in_event_shape) - np.testing.assert_allclose(self._t.forward_log_det_jacobian(x).numpy(), - paddle.zeros([1]).numpy(), - rtol=config.RTOL.get(str(x.numpy().dtype)), - atol=config.ATOL.get(str(x.numpy().dtype))) + np.testing.assert_allclose( + self._t.forward_log_det_jacobian(x).numpy(), + paddle.zeros([1]).numpy(), + rtol=config.RTOL.get(str(x.numpy().dtype)), + atol=config.ATOL.get(str(x.numpy().dtype)), + ) def test_in_event_shape(self): self.assertEqual(self._t.in_event_shape, self.in_event_shape) @@ -718,14 +966,13 @@ class TestReshapeTransform(unittest.TestCase): self._t.inverse_shape(shape) -def _np_softplus(x, beta=1., threshold=20.): +def _np_softplus(x, beta=1.0, threshold=20.0): if np.any(beta * x > threshold): return x - return 1. / beta * np.log1p(np.exp(beta * x)) + return 1.0 / beta * np.log1p(np.exp(beta * x)) class TestSigmoidTransform(unittest.TestCase): - def setUp(self): self._t = transform.SigmoidTransform() @@ -738,32 +985,43 @@ class TestSigmoidTransform(unittest.TestCase): def test_codomain(self): self.assertTrue(isinstance(self._t._codomain, variable.Variable)) - @param.param_func(((np.ones( - (5, 10)), 1 / (1 + np.exp(-np.ones((5, 10))))), )) + @param.param_func( + ((np.ones((5, 10)), 1 / (1 + np.exp(-np.ones((5, 10))))),) + ) def test_forward(self, input, expected): - np.testing.assert_allclose(self._t.forward(paddle.to_tensor(input)), - expected, - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) + np.testing.assert_allclose( + self._t.forward(paddle.to_tensor(input)), + expected, + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) @param.param_func( - ((np.ones(10), np.log(np.ones(10)) - np.log1p(-np.ones(10))), )) + ((np.ones(10), np.log(np.ones(10)) - np.log1p(-np.ones(10))),) + ) def test_inverse(self, input, expected): - np.testing.assert_allclose(self._t.inverse( - paddle.to_tensor(input)).numpy(), - expected, - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) + np.testing.assert_allclose( + self._t.inverse(paddle.to_tensor(input)).numpy(), + expected, + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) @param.param_func( - ((np.ones(10), - -_np_softplus(-np.ones(10)) - _np_softplus(np.ones(10))), )) + ( + ( + np.ones(10), + -_np_softplus(-np.ones(10)) - _np_softplus(np.ones(10)), + ), + ) + ) def test_forward_log_det_jacobian(self, input, expected): - np.testing.assert_allclose(self._t.forward_log_det_jacobian( - paddle.to_tensor(input)).numpy(), - expected, - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) + np.testing.assert_allclose( + self._t.forward_log_det_jacobian(paddle.to_tensor(input)).numpy(), + expected, + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) @param.param_func([((), ()), ((2, 3, 5), (2, 3, 5))]) def test_forward_shape(self, shape, expected_shape): @@ -775,7 +1033,6 @@ class TestSigmoidTransform(unittest.TestCase): class TestSoftmaxTransform(unittest.TestCase): - def setUp(self): self._t = transform.SoftmaxTransform() @@ -788,19 +1045,23 @@ class TestSoftmaxTransform(unittest.TestCase): def test_codomain(self): self.assertTrue(isinstance(self._t._codomain, variable.Variable)) - @param.param_func(((np.random.random((5, 10)), ), )) + @param.param_func(((np.random.random((5, 10)),),)) def test_forward(self, input): - np.testing.assert_allclose(self._t.forward(paddle.to_tensor(input)), - self._np_forward(input), - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) + np.testing.assert_allclose( + self._t.forward(paddle.to_tensor(input)), + self._np_forward(input), + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) - @param.param_func(((np.random.random(10), ), )) + @param.param_func(((np.random.random(10),),)) def test_inverse(self, input): - np.testing.assert_allclose(self._t.inverse(paddle.to_tensor(input)), - self._np_inverse(input), - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) + np.testing.assert_allclose( + self._t.inverse(paddle.to_tensor(input)), + self._np_inverse(input), + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) def _np_forward(self, x): x = np.exp(x - np.max(x, -1, keepdims=True)[0]) @@ -833,7 +1094,6 @@ class TestSoftmaxTransform(unittest.TestCase): class TestStickBreakingTransform(unittest.TestCase): - def setUp(self): self._t = transform.StickBreakingTransform() @@ -846,13 +1106,14 @@ class TestStickBreakingTransform(unittest.TestCase): def test_codomain(self): self.assertTrue(isinstance(self._t._codomain, variable.Variable)) - @param.param_func(((np.random.random((10)), ), )) + @param.param_func(((np.random.random((10)),),)) def test_forward(self, input): - np.testing.assert_allclose(self._t.inverse( - self._t.forward(paddle.to_tensor(input))), - input, - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) + np.testing.assert_allclose( + self._t.inverse(self._t.forward(paddle.to_tensor(input))), + input, + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) @param.param_func([((2, 3, 5), (2, 3, 6))]) def test_forward_shape(self, shape, expected_shape): @@ -862,19 +1123,22 @@ class TestStickBreakingTransform(unittest.TestCase): def test_inverse_shape(self, shape, expected_shape): self.assertEqual(self._t.inverse_shape(shape), expected_shape) - @param.param_func(((np.random.random((10)), ), )) + @param.param_func(((np.random.random((10)),),)) def test_forward_log_det_jacobian(self, x): self.assertEqual( - self._t.forward_log_det_jacobian(paddle.to_tensor(x)).shape, [1]) + self._t.forward_log_det_jacobian(paddle.to_tensor(x)).shape, [1] + ) # Todo @param.place(config.DEVICES) -@param.param_cls((param.TEST_CASE_NAME, 'transforms', 'axis'), [ - ('simple_one_transform', [transform.ExpTransform()], 0), -]) +@param.param_cls( + (param.TEST_CASE_NAME, 'transforms', 'axis'), + [ + ('simple_one_transform', [transform.ExpTransform()], 0), + ], +) class TestStackTransform(unittest.TestCase): - def setUp(self): self._t = transform.StackTransform(self.transforms, self.axis) @@ -887,25 +1151,42 @@ class TestStackTransform(unittest.TestCase): def test_codomain(self): self.assertTrue(isinstance(self._t._codomain, variable.Stack)) - @param.param_func([(np.array([[0., 1., 2., 3.]]), ), - (np.array([[-5., 6., 7., 8.]]), )]) + @param.param_func( + [ + (np.array([[0.0, 1.0, 2.0, 3.0]]),), + (np.array([[-5.0, 6.0, 7.0, 8.0]]),), + ] + ) def test_forward(self, input): - self.assertEqual(tuple(self._t.forward(paddle.to_tensor(input)).shape), - input.shape) + self.assertEqual( + tuple(self._t.forward(paddle.to_tensor(input)).shape), input.shape + ) - @param.param_func([(np.array([[1., 2., 3.]]), ), - (np.array([[6., 7., 8.]], ), )]) + @param.param_func( + [ + (np.array([[1.0, 2.0, 3.0]]),), + ( + np.array( + [[6.0, 7.0, 8.0]], + ), + ), + ] + ) def test_inverse(self, input): - self.assertEqual(tuple(self._t.inverse(paddle.to_tensor(input)).shape), - input.shape) + self.assertEqual( + tuple(self._t.inverse(paddle.to_tensor(input)).shape), input.shape + ) - @param.param_func([(np.array([[1., 2., 3.]]), ), (np.array([[6., 7., - 8.]]), )]) + @param.param_func( + [(np.array([[1.0, 2.0, 3.0]]),), (np.array([[6.0, 7.0, 8.0]]),)] + ) def test_forward_log_det_jacobian(self, input): self.assertEqual( tuple( - self._t.forward_log_det_jacobian( - paddle.to_tensor(input)).shape), input.shape) + self._t.forward_log_det_jacobian(paddle.to_tensor(input)).shape + ), + input.shape, + ) @param.param_func([((), ()), ((2, 3, 5), (2, 3, 5))]) def test_forward_shape(self, shape, expected_shape): @@ -918,9 +1199,13 @@ class TestStackTransform(unittest.TestCase): def test_axis(self): self.assertEqual(self._t.axis, self.axis) - @param.param_func([(0, 0, TypeError), ([0], 0, TypeError), - ([paddle.distribution.ExpTransform()], 'axis', TypeError) - ]) + @param.param_func( + [ + (0, 0, TypeError), + ([0], 0, TypeError), + ([paddle.distribution.ExpTransform()], 'axis', TypeError), + ] + ) def test_init_exception(self, transforms, axis, exc): with self.assertRaises(exc): paddle.distribution.StackTransform(transforms, axis) diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution_transform_static.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution_transform_static.py index 4af17de382e251b8f71cadb1665d57a0a13506e4..c5131468faa99eb32f37fbfbc4987cf156b25f22 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution_transform_static.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution_transform_static.py @@ -28,14 +28,17 @@ paddle.enable_static() @param.place(config.DEVICES) class TestTransform(unittest.TestCase): - def setUp(self): self._t = transform.Transform() - @param.param_func([(transform.Type.BIJECTION, True), - (transform.Type.INJECTION, True), - (transform.Type.SURJECTION, False), - (transform.Type.OTHER, False)]) + @param.param_func( + [ + (transform.Type.BIJECTION, True), + (transform.Type.INJECTION, True), + (transform.Type.SURJECTION, False), + (transform.Type.OTHER, False), + ] + ) def test_is_injective(self, type, expected): transform.Transform._type = type self.assertEqual(self._t._is_injective(), expected) @@ -46,8 +49,12 @@ class TestTransform(unittest.TestCase): def test_codomain(self): self.assertTrue(isinstance(self._t._codomain, variable.Real)) - @param.param_func([(np.array(0), NotImplementedError), - (np.random.random((2, 3)), NotImplementedError)]) + @param.param_func( + [ + (np.array(0), NotImplementedError), + (np.random.random((2, 3)), NotImplementedError), + ] + ) def test_forward(self, input, expected): with self.assertRaises(expected): exe = paddle.static.Executor() @@ -55,14 +62,19 @@ class TestTransform(unittest.TestCase): mp = paddle.static.Program() with paddle.static.program_guard(mp, sp): t = transform.Transform() - static_input = paddle.static.data('input', input.shape, - input.dtype) + static_input = paddle.static.data( + 'input', input.shape, input.dtype + ) output = t.forward(static_input) exe.run(sp) exe.run(mp, feed={'input': input}, fetch_list=[output]) - @param.param_func([(np.array(0), NotImplementedError), - (np.random.random((2, 3)), NotImplementedError)]) + @param.param_func( + [ + (np.array(0), NotImplementedError), + (np.random.random((2, 3)), NotImplementedError), + ] + ) def test_inverse(self, input, expected): with self.assertRaises(expected): exe = paddle.static.Executor() @@ -70,14 +82,19 @@ class TestTransform(unittest.TestCase): mp = paddle.static.Program() with paddle.static.program_guard(mp, sp): t = transform.Transform() - static_input = paddle.static.data('input', input.shape, - input.dtype) + static_input = paddle.static.data( + 'input', input.shape, input.dtype + ) output = t.inverse(static_input) exe.run(sp) exe.run(mp, feed={'input': input}, fetch_list=[output]) - @param.param_func([(np.array(0), NotImplementedError), - (paddle.rand((2, 3)), NotImplementedError)]) + @param.param_func( + [ + (np.array(0), NotImplementedError), + (paddle.rand((2, 3)), NotImplementedError), + ] + ) def test_forward_log_det_jacobian(self, input, expected): with self.assertRaises(expected): exe = paddle.static.Executor() @@ -85,14 +102,19 @@ class TestTransform(unittest.TestCase): mp = paddle.static.Program() with paddle.static.program_guard(mp, sp): t = transform.Transform() - static_input = paddle.static.data('input', input.shape, - input.dtype) + static_input = paddle.static.data( + 'input', input.shape, input.dtype + ) output = t.forward_log_det_jacobian(static_input) exe.run(sp) exe.run(mp, feed={'input': input}, fetch_list=[output]) - @param.param_func([(np.array(0), NotImplementedError), - (paddle.rand((2, 3)), NotImplementedError)]) + @param.param_func( + [ + (np.array(0), NotImplementedError), + (paddle.rand((2, 3)), NotImplementedError), + ] + ) def test_inverse_log_det_jacobian(self, input, expected): with self.assertRaises(expected): exe = paddle.static.Executor() @@ -100,8 +122,9 @@ class TestTransform(unittest.TestCase): mp = paddle.static.Program() with paddle.static.program_guard(mp, sp): t = transform.Transform() - static_input = paddle.static.data('input', input.shape, - input.dtype) + static_input = paddle.static.data( + 'input', input.shape, input.dtype + ) output = t.inverse_log_det_jacobian(static_input) exe.run(sp) exe.run(mp, feed={'input': input}, fetch_list=[output]) @@ -119,7 +142,6 @@ class TestTransform(unittest.TestCase): @param.place(config.DEVICES) class TestAbsTransform(unittest.TestCase): - def setUp(self): self._t = transform.AbsTransform() @@ -136,9 +158,15 @@ class TestAbsTransform(unittest.TestCase): self.assertEqual(self._t._codomain.event_rank, 0) self.assertEqual(self._t._codomain.is_discrete, False) - @param.param_func([(np.array([-1., 1., 0.]), np.array([1., 1., 0.])), - (np.array([[1., -1., -0.1], [-3., -0.1, 0]]), - np.array([[1., 1., 0.1], [3., 0.1, 0]]))]) + @param.param_func( + [ + (np.array([-1.0, 1.0, 0.0]), np.array([1.0, 1.0, 0.0])), + ( + np.array([[1.0, -1.0, -0.1], [-3.0, -0.1, 0]]), + np.array([[1.0, 1.0, 0.1], [3.0, 0.1, 0]]), + ), + ] + ) def test_forward(self, input, expected): exe = paddle.static.Executor() sp = paddle.static.Program() @@ -149,12 +177,14 @@ class TestAbsTransform(unittest.TestCase): output = t.forward(static_input) exe.run(sp) [output] = exe.run(mp, feed={'input': input}, fetch_list=[output]) - np.testing.assert_allclose(output, - expected, - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) - - @param.param_func([(np.array([1.]), (-np.array([1.]), np.array([1.])))]) + np.testing.assert_allclose( + output, + expected, + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) + + @param.param_func([(np.array([1.0]), (-np.array([1.0]), np.array([1.0])))]) def test_inverse(self, input, expected): exe = paddle.static.Executor() sp = paddle.static.Program() @@ -164,36 +194,43 @@ class TestAbsTransform(unittest.TestCase): static_input = paddle.static.data('input', input.shape, input.dtype) actual0, actual1 = t.inverse(static_input) exe.run(sp) - [actual0, actual1] = exe.run(mp, - feed={'input': input}, - fetch_list=[actual0, actual1]) + [actual0, actual1] = exe.run( + mp, feed={'input': input}, fetch_list=[actual0, actual1] + ) expected0, expected1 = expected - np.testing.assert_allclose(actual0, - expected0, - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) - np.testing.assert_allclose(actual1, - expected1, - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) + np.testing.assert_allclose( + actual0, + expected0, + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) + np.testing.assert_allclose( + actual1, + expected1, + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) def test_forward_log_det_jacobian(self): - input = np.random.random((10, )) + input = np.random.random((10,)) with self.assertRaises(NotImplementedError): exe = paddle.static.Executor() sp = paddle.static.Program() mp = paddle.static.Program() with paddle.static.program_guard(mp, sp): t = transform.AbsTransform() - static_input = paddle.static.data('input', input.shape, - input.dtype) + static_input = paddle.static.data( + 'input', input.shape, input.dtype + ) output = t.forward_log_det_jacobian(static_input) exe.run(sp) [output] = exe.run(mp, feed={'input': input}, fetch_list=[output]) - @param.param_func([ - (np.array([1.]), (np.array([0.]), np.array([0.]))), - ]) + @param.param_func( + [ + (np.array([1.0]), (np.array([0.0]), np.array([0.0]))), + ] + ) def test_inverse_log_det_jacobian(self, input, expected): exe = paddle.static.Executor() sp = paddle.static.Program() @@ -203,18 +240,22 @@ class TestAbsTransform(unittest.TestCase): static_input = paddle.static.data('input', input.shape, input.dtype) actual0, actual1 = t.inverse_log_det_jacobian(static_input) exe.run(sp) - [actual0, actual1] = exe.run(mp, - feed={'input': input}, - fetch_list=[actual0, actual1]) + [actual0, actual1] = exe.run( + mp, feed={'input': input}, fetch_list=[actual0, actual1] + ) expected0, expected1 = expected - np.testing.assert_allclose(actual0, - expected0, - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) - np.testing.assert_allclose(actual1, - expected1, - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) + np.testing.assert_allclose( + actual0, + expected0, + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) + np.testing.assert_allclose( + actual1, + expected1, + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) @param.param_func([((), ()), ((2, 3, 5), (2, 3, 5))]) def test_forward_shape(self, shape, expected_shape): @@ -226,19 +267,22 @@ class TestAbsTransform(unittest.TestCase): @param.place(config.DEVICES) -@param.param_cls((param.TEST_CASE_NAME, 'loc', 'scale'), [ - ('normal', np.random.rand(8, 10), np.random.rand(8, 10)), - ('broadcast', np.random.rand(2, 10), np.random.rand(10)), -]) +@param.param_cls( + (param.TEST_CASE_NAME, 'loc', 'scale'), + [ + ('normal', np.random.rand(8, 10), np.random.rand(8, 10)), + ('broadcast', np.random.rand(2, 10), np.random.rand(10)), + ], +) class TestAffineTransform(unittest.TestCase): - def setUp(self): sp = paddle.static.Program() mp = paddle.static.Program() with paddle.static.program_guard(mp, sp): loc = paddle.static.data('loc', self.loc.shape, self.loc.dtype) - scale = paddle.static.data('scale', self.scale.shape, - self.scale.dtype) + scale = paddle.static.data( + 'scale', self.scale.shape, self.scale.dtype + ) self._t = transform.AffineTransform(loc, scale) def test_is_injective(self): @@ -261,24 +305,26 @@ class TestAffineTransform(unittest.TestCase): mp = paddle.static.Program() with paddle.static.program_guard(mp, sp): loc = paddle.static.data('loc', self.loc.shape, self.loc.dtype) - scale = paddle.static.data('scale', self.scale.shape, - self.scale.dtype) + scale = paddle.static.data( + 'scale', self.scale.shape, self.scale.dtype + ) t = transform.AffineTransform(loc, scale) - static_input = paddle.static.data('input', self.loc.shape, - self.loc.dtype) + static_input = paddle.static.data( + 'input', self.loc.shape, self.loc.dtype + ) output = t.forward(static_input) exe.run(sp) - [output] = exe.run(mp, - feed={ - 'input': input, - 'loc': self.loc, - 'scale': self.scale - }, - fetch_list=[output]) - np.testing.assert_allclose(output, - self._np_forward(input), - rtol=config.RTOL.get(str(self.loc.dtype)), - atol=config.ATOL.get(str(self.loc.dtype))) + [output] = exe.run( + mp, + feed={'input': input, 'loc': self.loc, 'scale': self.scale}, + fetch_list=[output], + ) + np.testing.assert_allclose( + output, + self._np_forward(input), + rtol=config.RTOL.get(str(self.loc.dtype)), + atol=config.ATOL.get(str(self.loc.dtype)), + ) def test_inverse(self): input = np.random.random(self.loc.shape) @@ -287,24 +333,26 @@ class TestAffineTransform(unittest.TestCase): mp = paddle.static.Program() with paddle.static.program_guard(mp, sp): loc = paddle.static.data('loc', self.loc.shape, self.loc.dtype) - scale = paddle.static.data('scale', self.scale.shape, - self.scale.dtype) + scale = paddle.static.data( + 'scale', self.scale.shape, self.scale.dtype + ) t = transform.AffineTransform(loc, scale) - static_input = paddle.static.data('input', self.loc.shape, - self.loc.dtype) + static_input = paddle.static.data( + 'input', self.loc.shape, self.loc.dtype + ) output = t.inverse(static_input) exe.run(sp) - [output] = exe.run(mp, - feed={ - 'input': input, - 'loc': self.loc, - 'scale': self.scale - }, - fetch_list=[output]) - np.testing.assert_allclose(output, - self._np_inverse(input), - rtol=config.RTOL.get(str(self.loc.dtype)), - atol=config.ATOL.get(str(self.loc.dtype))) + [output] = exe.run( + mp, + feed={'input': input, 'loc': self.loc, 'scale': self.scale}, + fetch_list=[output], + ) + np.testing.assert_allclose( + output, + self._np_inverse(input), + rtol=config.RTOL.get(str(self.loc.dtype)), + atol=config.ATOL.get(str(self.loc.dtype)), + ) def _np_forward(self, x): return self.loc + self.scale * x @@ -325,23 +373,24 @@ class TestAffineTransform(unittest.TestCase): mp = paddle.static.Program() with paddle.static.program_guard(mp, sp): loc = paddle.static.data('loc', self.loc.shape, self.loc.dtype) - scale = paddle.static.data('scale', self.scale.shape, - self.scale.dtype) + scale = paddle.static.data( + 'scale', self.scale.shape, self.scale.dtype + ) t = transform.AffineTransform(loc, scale) static_input = paddle.static.data('input', input.shape, input.dtype) output = t.inverse_log_det_jacobian(static_input) exe.run(sp) - [output] = exe.run(mp, - feed={ - 'input': input, - 'loc': self.loc, - 'scale': self.scale - }, - fetch_list=[output]) - np.testing.assert_allclose(output, - self._np_inverse_jacobian(input), - rtol=config.RTOL.get(str(self.loc.dtype)), - atol=config.ATOL.get(str(self.loc.dtype))) + [output] = exe.run( + mp, + feed={'input': input, 'loc': self.loc, 'scale': self.scale}, + fetch_list=[output], + ) + np.testing.assert_allclose( + output, + self._np_inverse_jacobian(input), + rtol=config.RTOL.get(str(self.loc.dtype)), + atol=config.ATOL.get(str(self.loc.dtype)), + ) def test_forward_log_det_jacobian(self): input = np.random.random(self.scale.shape) @@ -350,40 +399,42 @@ class TestAffineTransform(unittest.TestCase): mp = paddle.static.Program() with paddle.static.program_guard(mp, sp): loc = paddle.static.data('loc', self.loc.shape, self.loc.dtype) - scale = paddle.static.data('scale', self.scale.shape, - self.scale.dtype) + scale = paddle.static.data( + 'scale', self.scale.shape, self.scale.dtype + ) t = transform.AffineTransform(loc, scale) static_input = paddle.static.data('input', input.shape, input.dtype) output = t.forward_log_det_jacobian(static_input) exe.run(sp) - [output] = exe.run(mp, - feed={ - 'input': input, - 'loc': self.loc, - 'scale': self.scale - }, - fetch_list=[output]) - np.testing.assert_allclose(output, - self._np_forward_jacobian(input), - rtol=config.RTOL.get(str(self.loc.dtype)), - atol=config.ATOL.get(str(self.loc.dtype))) + [output] = exe.run( + mp, + feed={'input': input, 'loc': self.loc, 'scale': self.scale}, + fetch_list=[output], + ) + np.testing.assert_allclose( + output, + self._np_forward_jacobian(input), + rtol=config.RTOL.get(str(self.loc.dtype)), + atol=config.ATOL.get(str(self.loc.dtype)), + ) def test_forward_shape(self): shape = self.loc.shape self.assertEqual( tuple(self._t.forward_shape(shape)), - np.broadcast(np.random.random(shape), self.loc, self.scale).shape) + np.broadcast(np.random.random(shape), self.loc, self.scale).shape, + ) def test_inverse_shape(self): shape = self.scale.shape self.assertEqual( tuple(self._t.forward_shape(shape)), - np.broadcast(np.random.random(shape), self.loc, self.scale).shape) + np.broadcast(np.random.random(shape), self.loc, self.scale).shape, + ) @param.place(config.DEVICES) class TestExpTransform(unittest.TestCase): - def setUp(self): self._t = transform.ExpTransform() @@ -400,11 +451,18 @@ class TestExpTransform(unittest.TestCase): self.assertEqual(self._t._codomain.event_rank, 0) self.assertEqual(self._t._codomain.is_discrete, False) - @param.param_func([(np.array([0., 1., 2., - 3.]), np.exp(np.array([0., 1., 2., 3.]))), - (np.array([[0., 1., 2., 3.], [-5., 6., 7., 8.]]), - np.exp(np.array([[0., 1., 2., 3.], [-5., 6., 7., - 8.]])))]) + @param.param_func( + [ + ( + np.array([0.0, 1.0, 2.0, 3.0]), + np.exp(np.array([0.0, 1.0, 2.0, 3.0])), + ), + ( + np.array([[0.0, 1.0, 2.0, 3.0], [-5.0, 6.0, 7.0, 8.0]]), + np.exp(np.array([[0.0, 1.0, 2.0, 3.0], [-5.0, 6.0, 7.0, 8.0]])), + ), + ] + ) def test_forward(self, input, expected): exe = paddle.static.Executor() sp = paddle.static.Program() @@ -415,14 +473,22 @@ class TestExpTransform(unittest.TestCase): output = t.forward(static_input) exe.run(sp) [output] = exe.run(mp, feed={'input': input}, fetch_list=[output]) - np.testing.assert_allclose(output, - expected, - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) - - @param.param_func([(np.array([1., 2., 3.]), np.log(np.array([1., 2., 3.]))), - (np.array([[1., 2., 3.], [6., 7., 8.]]), - np.log(np.array([[1., 2., 3.], [6., 7., 8.]])))]) + np.testing.assert_allclose( + output, + expected, + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) + + @param.param_func( + [ + (np.array([1.0, 2.0, 3.0]), np.log(np.array([1.0, 2.0, 3.0]))), + ( + np.array([[1.0, 2.0, 3.0], [6.0, 7.0, 8.0]]), + np.log(np.array([[1.0, 2.0, 3.0], [6.0, 7.0, 8.0]])), + ), + ] + ) def test_inverse(self, input, expected): exe = paddle.static.Executor() sp = paddle.static.Program() @@ -433,13 +499,19 @@ class TestExpTransform(unittest.TestCase): output = t.inverse(static_input) exe.run(sp) [output] = exe.run(mp, feed={'input': input}, fetch_list=[output]) - np.testing.assert_allclose(output, - expected, - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) - - @param.param_func([(np.array([1., 2., 3.]), ), - (np.array([[1., 2., 3.], [6., 7., 8.]]), )]) + np.testing.assert_allclose( + output, + expected, + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) + + @param.param_func( + [ + (np.array([1.0, 2.0, 3.0]),), + (np.array([[1.0, 2.0, 3.0], [6.0, 7.0, 8.0]]),), + ] + ) def test_forward_log_det_jacobian(self, input): exe = paddle.static.Executor() sp = paddle.static.Program() @@ -450,16 +522,22 @@ class TestExpTransform(unittest.TestCase): output = t.forward_log_det_jacobian(static_input) exe.run(sp) [output] = exe.run(mp, feed={'input': input}, fetch_list=[output]) - np.testing.assert_allclose(output, - self._np_forward_jacobian(input), - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) + np.testing.assert_allclose( + output, + self._np_forward_jacobian(input), + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) def _np_forward_jacobian(self, x): return x - @param.param_func([(np.array([1., 2., 3.]), ), - (np.array([[1., 2., 3.], [6., 7., 8.]]), )]) + @param.param_func( + [ + (np.array([1.0, 2.0, 3.0]),), + (np.array([[1.0, 2.0, 3.0], [6.0, 7.0, 8.0]]),), + ] + ) def test_inverse_log_det_jacobian(self, input): exe = paddle.static.Executor() sp = paddle.static.Program() @@ -470,10 +548,12 @@ class TestExpTransform(unittest.TestCase): output = t.inverse_log_det_jacobian(static_input) exe.run(sp) [output] = exe.run(mp, feed={'input': input}, fetch_list=[output]) - np.testing.assert_allclose(output, - self._np_inverse_jacobian(input), - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) + np.testing.assert_allclose( + output, + self._np_inverse_jacobian(input), + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) def _np_inverse_jacobian(self, y): return -self._np_forward_jacobian(np.log(y)) @@ -489,44 +569,102 @@ class TestExpTransform(unittest.TestCase): @param.place(config.DEVICES) class TestChainTransform(unittest.TestCase): - - @param.param_func(((transform.ChainTransform( - (transform.AbsTransform(), - transform.AffineTransform(paddle.rand([1]), paddle.rand([1])))), - False), (transform.ChainTransform(( - transform.AffineTransform(paddle.rand([1]), - paddle.rand([1])), - transform.ExpTransform(), - )), True))) + @param.param_func( + ( + ( + transform.ChainTransform( + ( + transform.AbsTransform(), + transform.AffineTransform( + paddle.rand([1]), paddle.rand([1]) + ), + ) + ), + False, + ), + ( + transform.ChainTransform( + ( + transform.AffineTransform( + paddle.rand([1]), paddle.rand([1]) + ), + transform.ExpTransform(), + ) + ), + True, + ), + ) + ) def test_is_injective(self, chain, expected): self.assertEqual(chain._is_injective(), expected) - @param.param_func(((transform.ChainTransform( - (transform.IndependentTransform(transform.ExpTransform(), 1), - transform.IndependentTransform(transform.ExpTransform(), 10), - transform.IndependentTransform(transform.ExpTransform(), 8))), - variable.Independent(variable.real, 10)), )) + @param.param_func( + ( + ( + transform.ChainTransform( + ( + transform.IndependentTransform( + transform.ExpTransform(), 1 + ), + transform.IndependentTransform( + transform.ExpTransform(), 10 + ), + transform.IndependentTransform( + transform.ExpTransform(), 8 + ), + ) + ), + variable.Independent(variable.real, 10), + ), + ) + ) def test_domain(self, input, expected): self.assertIsInstance(input._domain, type(expected)) self.assertEqual(input._domain.event_rank, expected.event_rank) self.assertEqual(input._domain.is_discrete, expected.is_discrete) - @param.param_func(((transform.ChainTransform( - (transform.IndependentTransform(transform.ExpTransform(), 9), - transform.IndependentTransform(transform.ExpTransform(), 4), - transform.IndependentTransform(transform.ExpTransform(), 5))), - variable.Independent(variable.real, 9)), )) + @param.param_func( + ( + ( + transform.ChainTransform( + ( + transform.IndependentTransform( + transform.ExpTransform(), 9 + ), + transform.IndependentTransform( + transform.ExpTransform(), 4 + ), + transform.IndependentTransform( + transform.ExpTransform(), 5 + ), + ) + ), + variable.Independent(variable.real, 9), + ), + ) + ) def test_codomain(self, input, expected): self.assertIsInstance(input._codomain, variable.Independent) self.assertEqual(input._codomain.event_rank, expected.event_rank) self.assertEqual(input._codomain.is_discrete, expected.is_discrete) - @param.param_func([ - (transform.ChainTransform( - (transform.ExpTransform(), transform.TanhTransform())), - np.array([[0., -1., 2., -3.], [-5., 6., 7., -8.]]), - np.tanh(np.exp(np.array([[0., -1., 2., -3.], [-5., 6., 7., -8.]])))) - ]) + @param.param_func( + [ + ( + transform.ChainTransform( + (transform.ExpTransform(), transform.TanhTransform()) + ), + np.array([[0.0, -1.0, 2.0, -3.0], [-5.0, 6.0, 7.0, -8.0]]), + np.tanh( + np.exp( + np.array( + [[0.0, -1.0, 2.0, -3.0], [-5.0, 6.0, 7.0, -8.0]] + ) + ) + ), + ) + ] + ) def test_forward(self, chain, input, expected): exe = paddle.static.Executor() sp = paddle.static.Program() @@ -537,17 +675,28 @@ class TestChainTransform(unittest.TestCase): output = t.forward(static_input) exe.run(sp) [output] = exe.run(mp, feed={'input': input}, fetch_list=[output]) - np.testing.assert_allclose(output, - expected, - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) - - @param.param_func([ - (transform.ChainTransform( - (transform.ExpTransform(), transform.TanhTransform())), - np.array([[0., 1., 2., 3.], [5., 6., 7., 8.]]), - np.log(np.arctanh(np.array([[0., 1., 2., 3.], [5., 6., 7., 8.]])))) - ]) + np.testing.assert_allclose( + output, + expected, + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) + + @param.param_func( + [ + ( + transform.ChainTransform( + (transform.ExpTransform(), transform.TanhTransform()) + ), + np.array([[0.0, 1.0, 2.0, 3.0], [5.0, 6.0, 7.0, 8.0]]), + np.log( + np.arctanh( + np.array([[0.0, 1.0, 2.0, 3.0], [5.0, 6.0, 7.0, 8.0]]) + ) + ), + ) + ] + ) def test_inverse(self, chain, input, expected): exe = paddle.static.Executor() sp = paddle.static.Program() @@ -558,41 +707,69 @@ class TestChainTransform(unittest.TestCase): output = t.inverse(static_input) exe.run(sp) [output] = exe.run(mp, feed={'input': input}, fetch_list=[output]) - np.testing.assert_allclose(output, - expected, - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) - - @param.param_func([ - (transform.ChainTransform( - (transform.AffineTransform(paddle.full([1], 0.0), - paddle.full([1], -1.0)), - transform.ExpTransform())), (2, 3, 5), (2, 3, 5)), - ]) + np.testing.assert_allclose( + output, + expected, + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) + + @param.param_func( + [ + ( + transform.ChainTransform( + ( + transform.AffineTransform( + paddle.full([1], 0.0), paddle.full([1], -1.0) + ), + transform.ExpTransform(), + ) + ), + (2, 3, 5), + (2, 3, 5), + ), + ] + ) def test_forward_shape(self, chain, shape, expected_shape): self.assertEqual(chain.forward_shape(shape), expected_shape) - @param.param_func([ - (transform.ChainTransform( - (transform.AffineTransform(paddle.full([1], 0.0), - paddle.full([1], -1.0)), - transform.ExpTransform())), (2, 3, 5), (2, 3, 5)), - ]) + @param.param_func( + [ + ( + transform.ChainTransform( + ( + transform.AffineTransform( + paddle.full([1], 0.0), paddle.full([1], -1.0) + ), + transform.ExpTransform(), + ) + ), + (2, 3, 5), + (2, 3, 5), + ), + ] + ) def test_inverse_shape(self, chain, shape, expected_shape): self.assertEqual(chain.forward_shape(shape), expected_shape) @param.place(config.DEVICES) @param.param_cls( - (param.TEST_CASE_NAME, 'base', 'reinterpreted_batch_rank', 'x'), [ - ('rank-over-zero', transform.ExpTransform(), 2, np.random.rand(2, 3, - 3)), - ]) + (param.TEST_CASE_NAME, 'base', 'reinterpreted_batch_rank', 'x'), + [ + ( + 'rank-over-zero', + transform.ExpTransform(), + 2, + np.random.rand(2, 3, 3), + ), + ], +) class TestIndependentTransform(unittest.TestCase): - def setUp(self): - self._t = transform.IndependentTransform(self.base, - self.reinterpreted_batch_rank) + self._t = transform.IndependentTransform( + self.base, self.reinterpreted_batch_rank + ) def test_is_injective(self): self.assertEqual(self._t._is_injective(), self.base._is_injective()) @@ -601,81 +778,98 @@ class TestIndependentTransform(unittest.TestCase): self.assertTrue(isinstance(self._t._domain, variable.Independent)) self.assertEqual( self._t._domain.event_rank, - self.base._domain.event_rank + self.reinterpreted_batch_rank) - self.assertEqual(self._t._domain.is_discrete, - self.base._domain.is_discrete) + self.base._domain.event_rank + self.reinterpreted_batch_rank, + ) + self.assertEqual( + self._t._domain.is_discrete, self.base._domain.is_discrete + ) def test_codomain(self): self.assertTrue(isinstance(self._t._codomain, variable.Independent)) self.assertEqual( self._t._codomain.event_rank, - self.base._codomain.event_rank + self.reinterpreted_batch_rank) - self.assertEqual(self._t._codomain.is_discrete, - self.base._codomain.is_discrete) + self.base._codomain.event_rank + self.reinterpreted_batch_rank, + ) + self.assertEqual( + self._t._codomain.is_discrete, self.base._codomain.is_discrete + ) def test_forward(self): exe = paddle.static.Executor() sp = paddle.static.Program() mp = paddle.static.Program() with paddle.static.program_guard(mp, sp): - t = transform.IndependentTransform(self.base, - self.reinterpreted_batch_rank) - static_input = paddle.static.data('input', self.x.shape, - self.x.dtype) + t = transform.IndependentTransform( + self.base, self.reinterpreted_batch_rank + ) + static_input = paddle.static.data( + 'input', self.x.shape, self.x.dtype + ) output = t.forward(static_input) expected = self.base.forward(static_input) exe.run(sp) - [output, expected] = exe.run(mp, - feed={'input': self.x}, - fetch_list=[output, expected]) - np.testing.assert_allclose(output, - expected, - rtol=config.RTOL.get(str(self.x.dtype)), - atol=config.ATOL.get(str(self.x.dtype))) + [output, expected] = exe.run( + mp, feed={'input': self.x}, fetch_list=[output, expected] + ) + np.testing.assert_allclose( + output, + expected, + rtol=config.RTOL.get(str(self.x.dtype)), + atol=config.ATOL.get(str(self.x.dtype)), + ) def test_inverse(self): exe = paddle.static.Executor() sp = paddle.static.Program() mp = paddle.static.Program() with paddle.static.program_guard(mp, sp): - t = transform.IndependentTransform(self.base, - self.reinterpreted_batch_rank) - static_input = paddle.static.data('input', self.x.shape, - self.x.dtype) + t = transform.IndependentTransform( + self.base, self.reinterpreted_batch_rank + ) + static_input = paddle.static.data( + 'input', self.x.shape, self.x.dtype + ) output = t.inverse(static_input) expected = self.base.inverse(static_input) exe.run(sp) - [output, expected] = exe.run(mp, - feed={'input': self.x}, - fetch_list=[output, expected]) - np.testing.assert_allclose(expected, - output, - rtol=config.RTOL.get(str(self.x.dtype)), - atol=config.ATOL.get(str(self.x.dtype))) + [output, expected] = exe.run( + mp, feed={'input': self.x}, fetch_list=[output, expected] + ) + np.testing.assert_allclose( + expected, + output, + rtol=config.RTOL.get(str(self.x.dtype)), + atol=config.ATOL.get(str(self.x.dtype)), + ) def test_forward_log_det_jacobian(self): exe = paddle.static.Executor() sp = paddle.static.Program() mp = paddle.static.Program() with paddle.static.program_guard(mp, sp): - t = transform.IndependentTransform(self.base, - self.reinterpreted_batch_rank) - static_input = paddle.static.data('input', self.x.shape, - self.x.dtype) + t = transform.IndependentTransform( + self.base, self.reinterpreted_batch_rank + ) + static_input = paddle.static.data( + 'input', self.x.shape, self.x.dtype + ) output = t.forward_log_det_jacobian(static_input) expected = self.base.forward_log_det_jacobian( - static_input.sum(list(range(-self.reinterpreted_batch_rank, - 0)))) + static_input.sum(list(range(-self.reinterpreted_batch_rank, 0))) + ) exe.run(sp) - [actual, expected] = exe.run(mp, - feed={'input': self.x}, - fetch_list=[output, expected]) - self.assertEqual(tuple(actual.shape), - self.x.shape[:-self.reinterpreted_batch_rank]) - np.testing.assert_allclose(actual, - expected, - rtol=config.RTOL.get(str(self.x.dtype)), - atol=config.ATOL.get(str(self.x.dtype))) + [actual, expected] = exe.run( + mp, feed={'input': self.x}, fetch_list=[output, expected] + ) + self.assertEqual( + tuple(actual.shape), self.x.shape[: -self.reinterpreted_batch_rank] + ) + np.testing.assert_allclose( + actual, + expected, + rtol=config.RTOL.get(str(self.x.dtype)), + atol=config.ATOL.get(str(self.x.dtype)), + ) @param.param_func([((), ()), ((2, 3, 5), (2, 3, 5))]) def test_forward_shape(self, shape, expected_shape): @@ -688,13 +882,12 @@ class TestIndependentTransform(unittest.TestCase): @param.place(config.DEVICES) class TestPowerTransform(unittest.TestCase): - def setUp(self): - self._t = transform.PowerTransform(paddle.full([1], 2.)) + self._t = transform.PowerTransform(paddle.full([1], 2.0)) def test_init(self): with self.assertRaises(TypeError): - transform.PowerTransform(1.) + transform.PowerTransform(1.0) def test_is_injective(self): self.assertTrue(self._t._is_injective()) @@ -709,11 +902,22 @@ class TestPowerTransform(unittest.TestCase): self.assertEqual(self._t._codomain.event_rank, 0) self.assertEqual(self._t._codomain.is_discrete, False) - @param.param_func([(np.array([2.]), np.array([0., -1., 2.]), - np.power(np.array([0., -1., 2.]), 2.)), - (np.array([[0.], [3.]]), np.array([[1., 0.], [5., 6.]]), - np.power(np.array([[1., 0.], [5., 6.]]), - np.array([[0.], [3.]])))]) + @param.param_func( + [ + ( + np.array([2.0]), + np.array([0.0, -1.0, 2.0]), + np.power(np.array([0.0, -1.0, 2.0]), 2.0), + ), + ( + np.array([[0.0], [3.0]]), + np.array([[1.0, 0.0], [5.0, 6.0]]), + np.power( + np.array([[1.0, 0.0], [5.0, 6.0]]), np.array([[0.0], [3.0]]) + ), + ), + ] + ) def test_forward(self, power, input, expected): exe = paddle.static.Executor() sp = paddle.static.Program() @@ -724,18 +928,17 @@ class TestPowerTransform(unittest.TestCase): static_input = paddle.static.data('input', input.shape, input.dtype) output = t.forward(static_input) exe.run(sp) - [output] = exe.run(mp, - feed={ - 'input': input, - 'power': power - }, - fetch_list=[output]) - np.testing.assert_allclose(output, - expected, - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) - - @param.param_func([(np.array([2.]), np.array([4.]), np.array([2.]))]) + [output] = exe.run( + mp, feed={'input': input, 'power': power}, fetch_list=[output] + ) + np.testing.assert_allclose( + output, + expected, + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) + + @param.param_func([(np.array([2.0]), np.array([4.0]), np.array([2.0]))]) def test_inverse(self, power, input, expected): exe = paddle.static.Executor() sp = paddle.static.Program() @@ -746,18 +949,17 @@ class TestPowerTransform(unittest.TestCase): static_input = paddle.static.data('input', input.shape, input.dtype) output = t.inverse(static_input) exe.run(sp) - [output] = exe.run(mp, - feed={ - 'input': input, - 'power': power - }, - fetch_list=[output]) - np.testing.assert_allclose(output, - expected, - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) - - @param.param_func(((np.array([2.]), np.array([3., 1.4, 0.8])), )) + [output] = exe.run( + mp, feed={'input': input, 'power': power}, fetch_list=[output] + ) + np.testing.assert_allclose( + output, + expected, + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) + + @param.param_func(((np.array([2.0]), np.array([3.0, 1.4, 0.8])),)) def test_forward_log_det_jacobian(self, power, input): exe = paddle.static.Executor() sp = paddle.static.Program() @@ -768,16 +970,15 @@ class TestPowerTransform(unittest.TestCase): static_input = paddle.static.data('input', input.shape, input.dtype) output = t.forward_log_det_jacobian(static_input) exe.run(sp) - [output] = exe.run(mp, - feed={ - 'input': input, - 'power': power - }, - fetch_list=[output]) - np.testing.assert_allclose(output, - self._np_forward_jacobian(power, input), - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) + [output] = exe.run( + mp, feed={'input': input, 'power': power}, fetch_list=[output] + ) + np.testing.assert_allclose( + output, + self._np_forward_jacobian(power, input), + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) def _np_forward_jacobian(self, alpha, x): return np.abs(np.log(alpha * np.power(x, alpha - 1))) @@ -793,7 +994,6 @@ class TestPowerTransform(unittest.TestCase): @param.place(config.DEVICES) class TestTanhTransform(unittest.TestCase): - def setUp(self): self._t = transform.TanhTransform() @@ -812,11 +1012,20 @@ class TestTanhTransform(unittest.TestCase): self.assertEqual(self._t._codomain._constraint._lower, -1) self.assertEqual(self._t._codomain._constraint._upper, 1) - @param.param_func([(np.array([0., 1., 2., - 3.]), np.tanh(np.array([0., 1., 2., 3.]))), - (np.array([[0., 1., 2., 3.], [-5., 6., 7., 8.]]), - np.tanh(np.array([[0., 1., 2., 3.], [-5., 6., 7., - 8.]])))]) + @param.param_func( + [ + ( + np.array([0.0, 1.0, 2.0, 3.0]), + np.tanh(np.array([0.0, 1.0, 2.0, 3.0])), + ), + ( + np.array([[0.0, 1.0, 2.0, 3.0], [-5.0, 6.0, 7.0, 8.0]]), + np.tanh( + np.array([[0.0, 1.0, 2.0, 3.0], [-5.0, 6.0, 7.0, 8.0]]) + ), + ), + ] + ) def test_forward(self, input, expected): exe = paddle.static.Executor() sp = paddle.static.Program() @@ -827,15 +1036,22 @@ class TestTanhTransform(unittest.TestCase): output = t.forward(static_input) exe.run(sp) [output] = exe.run(mp, feed={'input': input}, fetch_list=[output]) - np.testing.assert_allclose(output, - expected, - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) - - @param.param_func([(np.array([1., 2., - 3.]), np.arctanh(np.array([1., 2., 3.]))), - (np.array([[1., 2., 3.], [6., 7., 8.]]), - np.arctanh(np.array([[1., 2., 3.], [6., 7., 8.]])))]) + np.testing.assert_allclose( + output, + expected, + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) + + @param.param_func( + [ + (np.array([1.0, 2.0, 3.0]), np.arctanh(np.array([1.0, 2.0, 3.0]))), + ( + np.array([[1.0, 2.0, 3.0], [6.0, 7.0, 8.0]]), + np.arctanh(np.array([[1.0, 2.0, 3.0], [6.0, 7.0, 8.0]])), + ), + ] + ) def test_inverse(self, input, expected): exe = paddle.static.Executor() sp = paddle.static.Program() @@ -846,13 +1062,19 @@ class TestTanhTransform(unittest.TestCase): output = t.inverse(static_input) exe.run(sp) [output] = exe.run(mp, feed={'input': input}, fetch_list=[output]) - np.testing.assert_allclose(output, - expected, - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) - - @param.param_func([(np.array([1., 2., 3.]), ), - (np.array([[1., 2., 3.], [6., 7., 8.]]), )]) + np.testing.assert_allclose( + output, + expected, + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) + + @param.param_func( + [ + (np.array([1.0, 2.0, 3.0]),), + (np.array([[1.0, 2.0, 3.0], [6.0, 7.0, 8.0]]),), + ] + ) def test_forward_log_det_jacobian(self, input): exe = paddle.static.Executor() sp = paddle.static.Program() @@ -863,24 +1085,30 @@ class TestTanhTransform(unittest.TestCase): output = t.forward_log_det_jacobian(static_input) exe.run(sp) [output] = exe.run(mp, feed={'input': input}, fetch_list=[output]) - np.testing.assert_allclose(output, - self._np_forward_jacobian(input), - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) + np.testing.assert_allclose( + output, + self._np_forward_jacobian(input), + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) def _np_forward_jacobian(self, x): - return 2. * (np.log(2.) - x - self._np_softplus(-2. * x)) + return 2.0 * (np.log(2.0) - x - self._np_softplus(-2.0 * x)) - def _np_softplus(self, x, beta=1., threshold=20.): + def _np_softplus(self, x, beta=1.0, threshold=20.0): if np.any(beta * x > threshold): return x - return 1. / beta * np.log1p(np.exp(beta * x)) + return 1.0 / beta * np.log1p(np.exp(beta * x)) def _np_inverse_jacobian(self, y): return -self._np_forward_jacobian(np.arctanh(y)) - @param.param_func([(np.array([1., 2., 3.]), ), - (np.array([[1., 2., 3.], [6., 7., 8.]]), )]) + @param.param_func( + [ + (np.array([1.0, 2.0, 3.0]),), + (np.array([[1.0, 2.0, 3.0], [6.0, 7.0, 8.0]]),), + ] + ) def test_inverse_log_det_jacobian(self, input): exe = paddle.static.Executor() sp = paddle.static.Program() @@ -891,10 +1119,12 @@ class TestTanhTransform(unittest.TestCase): output = t.inverse_log_det_jacobian(static_input) exe.run(sp) [output] = exe.run(mp, feed={'input': input}, fetch_list=[output]) - np.testing.assert_allclose(output, - self._np_inverse_jacobian(input), - rtol=config.RTOL.get(str(input.dtype)), - atol=config.ATOL.get(str(input.dtype))) + np.testing.assert_allclose( + output, + self._np_inverse_jacobian(input), + rtol=config.RTOL.get(str(input.dtype)), + atol=config.ATOL.get(str(input.dtype)), + ) @param.param_func([((), ()), ((2, 3, 5), (2, 3, 5))]) def test_forward_shape(self, shape, expected_shape): @@ -906,14 +1136,17 @@ class TestTanhTransform(unittest.TestCase): @param.place(config.DEVICES) -@param.param_cls((param.TEST_CASE_NAME, 'in_event_shape', 'out_event_shape'), [ - ('regular_shape', (2, 3), (3, 2)), -]) +@param.param_cls( + (param.TEST_CASE_NAME, 'in_event_shape', 'out_event_shape'), + [ + ('regular_shape', (2, 3), (3, 2)), + ], +) class TestReshapeTransform(unittest.TestCase): - def setUp(self): - self._t = transform.ReshapeTransform(self.in_event_shape, - self.out_event_shape) + self._t = transform.ReshapeTransform( + self.in_event_shape, self.out_event_shape + ) def test_is_injective(self): self.assertTrue(self._t._is_injective()) @@ -930,16 +1163,19 @@ class TestReshapeTransform(unittest.TestCase): mp = paddle.static.Program() with paddle.static.program_guard(mp, sp): x = paddle.ones(self.in_event_shape) - t = transform.ReshapeTransform(self.in_event_shape, - self.out_event_shape) + t = transform.ReshapeTransform( + self.in_event_shape, self.out_event_shape + ) output = self._t.forward(x) exe.run(sp) [output] = exe.run(mp, feed={}, fetch_list=[output]) expected = np.ones(self.out_event_shape) - np.testing.assert_allclose(output, - expected, - rtol=config.RTOL.get(str(expected.dtype)), - atol=config.ATOL.get(str(expected.dtype))) + np.testing.assert_allclose( + output, + expected, + rtol=config.RTOL.get(str(expected.dtype)), + atol=config.ATOL.get(str(expected.dtype)), + ) def test_inverse(self): exe = paddle.static.Executor() @@ -947,17 +1183,20 @@ class TestReshapeTransform(unittest.TestCase): mp = paddle.static.Program() with paddle.static.program_guard(mp, sp): x = paddle.ones(self.out_event_shape) - t = transform.ReshapeTransform(self.in_event_shape, - self.out_event_shape) + t = transform.ReshapeTransform( + self.in_event_shape, self.out_event_shape + ) output = self._t.inverse(x) exe.run(sp) [output] = exe.run(mp, feed={}, fetch_list=[output]) expected = np.ones(self.in_event_shape) - np.testing.assert_allclose(output, - expected, - rtol=config.RTOL.get(str(expected.dtype)), - atol=config.ATOL.get(str(expected.dtype))) + np.testing.assert_allclose( + output, + expected, + rtol=config.RTOL.get(str(expected.dtype)), + atol=config.ATOL.get(str(expected.dtype)), + ) def test_forward_log_det_jacobian(self): exe = paddle.static.Executor() @@ -965,16 +1204,19 @@ class TestReshapeTransform(unittest.TestCase): mp = paddle.static.Program() with paddle.static.program_guard(mp, sp): x = paddle.ones(self.in_event_shape) - t = transform.ReshapeTransform(self.in_event_shape, - self.out_event_shape) + t = transform.ReshapeTransform( + self.in_event_shape, self.out_event_shape + ) output = self._t.forward_log_det_jacobian(x) exe.run(sp) [output] = exe.run(mp, feed={}, fetch_list=[output]) expected = np.zeros([1]) - np.testing.assert_allclose(output, - expected, - rtol=config.RTOL.get(str(expected.dtype)), - atol=config.ATOL.get(str(expected.dtype))) + np.testing.assert_allclose( + output, + expected, + rtol=config.RTOL.get(str(expected.dtype)), + atol=config.ATOL.get(str(expected.dtype)), + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution_transformed_distribution.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution_transformed_distribution.py index c1aebf7e0d66f7876f0b86003218b9ab96956534..3c38c8cb4dfa399163e8ac03b6067339c1f34ef5 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution_transformed_distribution.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution_transformed_distribution.py @@ -21,14 +21,21 @@ import parameterize as param @param.place(config.DEVICES) -@param.param_cls((param.TEST_CASE_NAME, 'base', 'transforms'), - [('base_normal', paddle.distribution.Normal( - 0., 1.), [paddle.distribution.ExpTransform()])]) +@param.param_cls( + (param.TEST_CASE_NAME, 'base', 'transforms'), + [ + ( + 'base_normal', + paddle.distribution.Normal(0.0, 1.0), + [paddle.distribution.ExpTransform()], + ) + ], +) class TestIndependent(unittest.TestCase): - def setUp(self): self._t = paddle.distribution.TransformedDistribution( - self.base, self.transforms) + self.base, self.transforms + ) def _np_sum_rightmost(self, value, n): return np.sum(value, tuple(range(-n, 0))) if n > 0 else value @@ -39,7 +46,8 @@ class TestIndependent(unittest.TestCase): self.simple_log_prob(value, self.base, self.transforms), self._t.log_prob(value), rtol=config.RTOL.get(str(value.numpy().dtype)), - atol=config.ATOL.get(str(value.numpy().dtype))) + atol=config.ATOL.get(str(value.numpy().dtype)), + ) def simple_log_prob(self, value, base, transforms): log_prob = 0.0 diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution_transformed_distribution_static.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution_transformed_distribution_static.py index b9f54498f64d8aa05a52d9d0a9787a7e79ee7b91..f9c7ec6ef749f9b783b030054ebb1a651ee7a796 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution_transformed_distribution_static.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution_transformed_distribution_static.py @@ -23,15 +23,21 @@ paddle.enable_static() @param.place(config.DEVICES) -@param.param_cls((param.TEST_CASE_NAME, 'base', 'transforms'), - [('base_normal', paddle.distribution.Normal, - [paddle.distribution.ExpTransform()])]) +@param.param_cls( + (param.TEST_CASE_NAME, 'base', 'transforms'), + [ + ( + 'base_normal', + paddle.distribution.Normal, + [paddle.distribution.ExpTransform()], + ) + ], +) class TestIndependent(unittest.TestCase): - def setUp(self): value = np.array([0.5]) - loc = np.array([0.]) - scale = np.array([1.]) + loc = np.array([0.0]) + scale = np.array([1.0]) shape = [5, 10, 8] self.dtype = value.dtype exe = paddle.static.Executor() @@ -43,28 +49,32 @@ class TestIndependent(unittest.TestCase): static_scale = paddle.static.data('scale', scale.shape, scale.dtype) self.base = self.base(static_loc, static_scale) self._t = paddle.distribution.TransformedDistribution( - self.base, self.transforms) + self.base, self.transforms + ) actual_log_prob = self._t.log_prob(static_value) expected_log_prob = self.transformed_log_prob( - static_value, self.base, self.transforms) + static_value, self.base, self.transforms + ) sample_data = self._t.sample(shape) exe.run(sp) - [self.actual_log_prob, self.expected_log_prob, - self.sample_data] = exe.run( - mp, - feed={ - 'value': value, - 'loc': loc, - 'scale': scale - }, - fetch_list=[actual_log_prob, expected_log_prob, sample_data]) + [ + self.actual_log_prob, + self.expected_log_prob, + self.sample_data, + ] = exe.run( + mp, + feed={'value': value, 'loc': loc, 'scale': scale}, + fetch_list=[actual_log_prob, expected_log_prob, sample_data], + ) def test_log_prob(self): - np.testing.assert_allclose(self.actual_log_prob, - self.expected_log_prob, - rtol=config.RTOL.get(str(self.dtype)), - atol=config.ATOL.get(str(self.dtype))) + np.testing.assert_allclose( + self.actual_log_prob, + self.expected_log_prob, + rtol=config.RTOL.get(str(self.dtype)), + atol=config.ATOL.get(str(self.dtype)), + ) def transformed_log_prob(self, value, base, transforms): log_prob = 0.0 diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution_uniform.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution_uniform.py index 6aafa9cb356ce4f723e7d673537386019a30cfbc..522e9bebce417ea850819d46fba4320b2d1142f7 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution_uniform.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution_uniform.py @@ -27,7 +27,6 @@ np.random.seed(2022) class UniformNumpy(DistributionNumpy): - def __init__(self, low, high): self.low = np.array(low) self.high = np.array(high) @@ -37,8 +36,9 @@ class UniformNumpy(DistributionNumpy): def sample(self, shape): shape = tuple(shape) + (self.low + self.high).shape - return self.low + (np.random.uniform(size=shape) * - (self.high - self.low)) + return self.low + ( + np.random.uniform(size=shape) * (self.high - self.low) + ) def log_prob(self, value): lb = np.less(self.low, value).astype(self.low.dtype) @@ -55,7 +55,6 @@ class UniformNumpy(DistributionNumpy): class UniformTest(unittest.TestCase): - def setUp(self, use_gpu=False, batch_size=5, dims=6): self.use_gpu = use_gpu if not use_gpu: @@ -90,9 +89,9 @@ class UniformTest(unittest.TestCase): self.static_low = self.low_np self.static_high = self.high_np with fluid.program_guard(self.test_program): - self.static_values = layers.data(name='values', - shape=[], - dtype='float32') + self.static_values = layers.data( + name='values', shape=[], dtype='float32' + ) def compare_with_numpy(self, fetch_list, sample_shape=7, tolerance=1e-6): sample, entropy, log_prob, probs = fetch_list @@ -104,14 +103,12 @@ class UniformTest(unittest.TestCase): np_p = np_uniform.probs(self.values_np) np.testing.assert_equal(sample.shape, np_sample.shape) - np.testing.assert_allclose(entropy, - np_entropy, - rtol=tolerance, - atol=tolerance) - np.testing.assert_allclose(log_prob, - np_lp, - rtol=tolerance, - atol=tolerance) + np.testing.assert_allclose( + entropy, np_entropy, rtol=tolerance, atol=tolerance + ) + np.testing.assert_allclose( + log_prob, np_lp, rtol=tolerance, atol=tolerance + ) np.testing.assert_allclose(probs, np_p, rtol=tolerance, atol=tolerance) def test_uniform_distribution_static(self, sample_shape=7, tolerance=1e-6): @@ -127,13 +124,13 @@ class UniformTest(unittest.TestCase): feed_vars = { 'low': self.low_np, 'high': self.high_np, - 'values': self.values_np + 'values': self.values_np, } self.executor.run(fluid.default_startup_program()) - fetch_list = self.executor.run(program=self.test_program, - feed=feed_vars, - fetch_list=fetch_list) + fetch_list = self.executor.run( + program=self.test_program, feed=feed_vars, fetch_list=fetch_list + ) self.compare_with_numpy(fetch_list) @@ -157,7 +154,6 @@ class UniformTest(unittest.TestCase): class UniformTest2(UniformTest): - def init_numpy_data(self, batch_size, dims): # low ans high are 'int' self.low_np = int(np.random.uniform(-2, 1)) @@ -166,48 +162,48 @@ class UniformTest2(UniformTest): class UniformTest3(UniformTest): - def init_numpy_data(self, batch_size, dims): # test broadcast: low is float, high is numpy.ndarray with dtype 'float32'. self.low_np = np.random.uniform(-2, 1) - self.high_np = np.random.uniform(5.0, 15.0, - (batch_size, dims)).astype('float32') + self.high_np = np.random.uniform(5.0, 15.0, (batch_size, dims)).astype( + 'float32' + ) self.values_np = np.random.randn(batch_size, dims).astype('float32') def init_static_data(self, batch_size, dims): self.static_low = self.low_np self.static_high = self.high_np with fluid.program_guard(self.test_program): - self.static_values = layers.data(name='values', - shape=[dims], - dtype='float32') + self.static_values = layers.data( + name='values', shape=[dims], dtype='float32' + ) class UniformTest4(UniformTest): - def init_numpy_data(self, batch_size, dims): # low and high are numpy.ndarray with dtype 'float32'. self.low_np = np.random.randn(batch_size, dims).astype('float32') - self.high_np = np.random.uniform(5.0, 15.0, - (batch_size, dims)).astype('float32') + self.high_np = np.random.uniform(5.0, 15.0, (batch_size, dims)).astype( + 'float32' + ) self.values_np = np.random.randn(batch_size, dims).astype('float32') def init_static_data(self, batch_size, dims): self.static_low = self.low_np self.static_high = self.high_np with fluid.program_guard(self.test_program): - self.static_values = layers.data(name='values', - shape=[dims], - dtype='float32') + self.static_values = layers.data( + name='values', shape=[dims], dtype='float32' + ) class UniformTest5(UniformTest): - def init_numpy_data(self, batch_size, dims): # low and high are numpy.ndarray with dtype 'float64'. self.low_np = np.random.randn(batch_size, dims).astype('float64') - self.high_np = np.random.uniform(5.0, 15.0, - (batch_size, dims)).astype('float64') + self.high_np = np.random.uniform(5.0, 15.0, (batch_size, dims)).astype( + 'float64' + ) self.values_np = np.random.randn(batch_size, dims).astype('float64') def init_dynamic_data(self, batch_size, dims): @@ -219,18 +215,18 @@ class UniformTest5(UniformTest): self.static_low = self.low_np self.static_high = self.high_np with fluid.program_guard(self.test_program): - self.static_values = layers.data(name='values', - shape=[dims], - dtype='float64') + self.static_values = layers.data( + name='values', shape=[dims], dtype='float64' + ) class UniformTest6(UniformTest): - def init_numpy_data(self, batch_size, dims): # low and high are Tensor with dtype 'VarType.FP32'. self.low_np = np.random.randn(batch_size, dims).astype('float32') - self.high_np = np.random.uniform(5.0, 15.0, - (batch_size, dims)).astype('float32') + self.high_np = np.random.uniform(5.0, 15.0, (batch_size, dims)).astype( + 'float32' + ) self.values_np = np.random.randn(batch_size, dims).astype('float32') def init_dynamic_data(self, batch_size, dims): @@ -240,24 +236,24 @@ class UniformTest6(UniformTest): def init_static_data(self, batch_size, dims): with fluid.program_guard(self.test_program): - self.static_low = layers.data(name='low', - shape=[dims], - dtype='float32') - self.static_high = layers.data(name='high', - shape=[dims], - dtype='float32') - self.static_values = layers.data(name='values', - shape=[dims], - dtype='float32') + self.static_low = layers.data( + name='low', shape=[dims], dtype='float32' + ) + self.static_high = layers.data( + name='high', shape=[dims], dtype='float32' + ) + self.static_values = layers.data( + name='values', shape=[dims], dtype='float32' + ) class UniformTest7(UniformTest): - def init_numpy_data(self, batch_size, dims): # low and high are Tensor with dtype 'VarType.FP64'. self.low_np = np.random.randn(batch_size, dims).astype('float64') - self.high_np = np.random.uniform(5.0, 15.0, - (batch_size, dims)).astype('float64') + self.high_np = np.random.uniform(5.0, 15.0, (batch_size, dims)).astype( + 'float64' + ) self.values_np = np.random.randn(batch_size, dims).astype('float64') def init_dynamic_data(self, batch_size, dims): @@ -267,24 +263,24 @@ class UniformTest7(UniformTest): def init_static_data(self, batch_size, dims): with fluid.program_guard(self.test_program): - self.static_low = layers.data(name='low', - shape=[dims], - dtype='float64') - self.static_high = layers.data(name='high', - shape=[dims], - dtype='float64') - self.static_values = layers.data(name='values', - shape=[dims], - dtype='float64') + self.static_low = layers.data( + name='low', shape=[dims], dtype='float64' + ) + self.static_high = layers.data( + name='high', shape=[dims], dtype='float64' + ) + self.static_values = layers.data( + name='values', shape=[dims], dtype='float64' + ) class UniformTest8(UniformTest): - def init_numpy_data(self, batch_size, dims): # low and high are Tensor with dtype 'VarType.FP64'. value's dtype is 'VarType.FP32'. self.low_np = np.random.randn(batch_size, dims).astype('float64') - self.high_np = np.random.uniform(5.0, 15.0, - (batch_size, dims)).astype('float64') + self.high_np = np.random.uniform(5.0, 15.0, (batch_size, dims)).astype( + 'float64' + ) self.values_np = np.random.randn(batch_size, dims).astype('float32') def init_dynamic_data(self, batch_size, dims): @@ -294,77 +290,81 @@ class UniformTest8(UniformTest): def init_static_data(self, batch_size, dims): with fluid.program_guard(self.test_program): - self.static_low = layers.data(name='low', - shape=[dims], - dtype='float64') - self.static_high = layers.data(name='high', - shape=[dims], - dtype='float64') - self.static_values = layers.data(name='values', - shape=[dims], - dtype='float32') + self.static_low = layers.data( + name='low', shape=[dims], dtype='float64' + ) + self.static_high = layers.data( + name='high', shape=[dims], dtype='float64' + ) + self.static_values = layers.data( + name='values', shape=[dims], dtype='float32' + ) class UniformTest9(UniformTest): - def init_numpy_data(self, batch_size, dims): # low and high are numpy.ndarray with dtype 'float32'. # high < low. self.low_np = np.random.randn(batch_size, dims).astype('float32') - self.high_np = np.random.uniform(-10.0, -5.0, - (batch_size, dims)).astype('float32') + self.high_np = np.random.uniform( + -10.0, -5.0, (batch_size, dims) + ).astype('float32') self.values_np = np.random.randn(batch_size, dims).astype('float32') def init_static_data(self, batch_size, dims): self.static_low = self.low_np self.static_high = self.high_np with fluid.program_guard(self.test_program): - self.static_values = layers.data(name='values', - shape=[dims], - dtype='float32') + self.static_values = layers.data( + name='values', shape=[dims], dtype='float32' + ) class UniformTest10(UniformTest): - def init_numpy_data(self, batch_size, dims): # low and high are list. - self.low_np = np.random.randn(batch_size, - dims).astype('float32').tolist() - self.high_np = np.random.uniform( - 5.0, 15.0, (batch_size, dims)).astype('float32').tolist() + self.low_np = ( + np.random.randn(batch_size, dims).astype('float32').tolist() + ) + self.high_np = ( + np.random.uniform(5.0, 15.0, (batch_size, dims)) + .astype('float32') + .tolist() + ) self.values_np = np.random.randn(batch_size, dims).astype('float32') def init_static_data(self, batch_size, dims): self.static_low = self.low_np self.static_high = self.high_np with fluid.program_guard(self.test_program): - self.static_values = layers.data(name='values', - shape=[dims], - dtype='float32') + self.static_values = layers.data( + name='values', shape=[dims], dtype='float32' + ) class UniformTest11(UniformTest): - def init_numpy_data(self, batch_size, dims): # low and high are tuple. self.low_np = tuple( - np.random.randn(batch_size, dims).astype('float32').tolist()) + np.random.randn(batch_size, dims).astype('float32').tolist() + ) self.high_np = tuple( - np.random.uniform(5.0, 15.0, - (batch_size, dims)).astype('float32').tolist()) + np.random.uniform(5.0, 15.0, (batch_size, dims)) + .astype('float32') + .tolist() + ) self.values_np = np.random.randn(batch_size, dims).astype('float32') def init_static_data(self, batch_size, dims): self.static_low = self.low_np self.static_high = self.high_np with fluid.program_guard(self.test_program): - self.static_values = layers.data(name='values', - shape=[dims], - dtype='float32') + self.static_values = layers.data( + name='values', shape=[dims], dtype='float32' + ) class UniformTestSample(unittest.TestCase): - def setUp(self): self.init_param() @@ -382,7 +382,6 @@ class UniformTestSample(unittest.TestCase): class UniformTestSample2(UniformTestSample): - def init_param(self): self.low = -5.0 self.high = 2.0 diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution_variable.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution_variable.py index 982a47f5e180656d2ffce44845f7ff1cd92a98d5..ead60fa9740b83320b8595ccf28931e338190eca 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution_variable.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution_variable.py @@ -25,47 +25,52 @@ paddle.seed(2022) @param.param_cls( (param.TEST_CASE_NAME, 'is_discrete', 'event_rank', 'constraint'), - [('NotImplement', False, 0, constraint.Constraint())]) + [('NotImplement', False, 0, constraint.Constraint())], +) class TestVariable(unittest.TestCase): - def setUp(self): - self._var = variable.Variable(self.is_discrete, self.event_rank, - self.constraint) + self._var = variable.Variable( + self.is_discrete, self.event_rank, self.constraint + ) - @param.param_func([(1, )]) + @param.param_func([(1,)]) def test_costraint(self, value): with self.assertRaises(NotImplementedError): self._var.constraint(value) -@param.param_cls((param.TEST_CASE_NAME, 'base', 'rank'), - [('real_base', variable.real, 10)]) +@param.param_cls( + (param.TEST_CASE_NAME, 'base', 'rank'), [('real_base', variable.real, 10)] +) class TestIndependent(unittest.TestCase): - def setUp(self): self._var = variable.Independent(self.base, self.rank) - @param.param_func([ - (paddle.rand([2, 3, 4]), ValueError), - ]) + @param.param_func( + [ + (paddle.rand([2, 3, 4]), ValueError), + ] + ) def test_costraint(self, value, expect): with self.assertRaises(expect): self._var.constraint(value) -@param.param_cls((param.TEST_CASE_NAME, 'vars', 'axis'), - [('real_base', [variable.real], 10)]) +@param.param_cls( + (param.TEST_CASE_NAME, 'vars', 'axis'), [('real_base', [variable.real], 10)] +) class TestStack(unittest.TestCase): - def setUp(self): self._var = variable.Stack(self.vars, self.axis) def test_is_discrete(self): self.assertEqual(self._var.is_discrete, False) - @param.param_func([ - (paddle.rand([2, 3, 4]), ValueError), - ]) + @param.param_func( + [ + (paddle.rand([2, 3, 4]), ValueError), + ] + ) def test_costraint(self, value, expect): with self.assertRaises(expect): self._var.constraint(value) diff --git a/python/paddle/fluid/tests/unittests/distribution/test_kl.py b/python/paddle/fluid/tests/unittests/distribution/test_kl.py index 582a9f5942f903b6ae3ff173919d468e903a59e0..5ce6d025422105eea0208694fdb7fd0736a0b832 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_kl.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_kl.py @@ -30,19 +30,26 @@ paddle.set_default_dtype('float64') @param.place(config.DEVICES) -@param.parameterize_cls((param.TEST_CASE_NAME, 'a1', 'b1', 'a2', 'b2'), [ - ('test_regular_input', 6.0 * np.random.random( - (4, 5)) + 1e-4, 6.0 * np.random.random( - (4, 5)) + 1e-4, 6.0 * np.random.random( - (4, 5)) + 1e-4, 6.0 * np.random.random((4, 5)) + 1e-4), -]) +@param.parameterize_cls( + (param.TEST_CASE_NAME, 'a1', 'b1', 'a2', 'b2'), + [ + ( + 'test_regular_input', + 6.0 * np.random.random((4, 5)) + 1e-4, + 6.0 * np.random.random((4, 5)) + 1e-4, + 6.0 * np.random.random((4, 5)) + 1e-4, + 6.0 * np.random.random((4, 5)) + 1e-4, + ), + ], +) class TestKLBetaBeta(unittest.TestCase): - def setUp(self): - self.p = paddle.distribution.Beta(paddle.to_tensor(self.a1), - paddle.to_tensor(self.b1)) - self.q = paddle.distribution.Beta(paddle.to_tensor(self.a2), - paddle.to_tensor(self.b2)) + self.p = paddle.distribution.Beta( + paddle.to_tensor(self.a1), paddle.to_tensor(self.b1) + ) + self.q = paddle.distribution.Beta( + paddle.to_tensor(self.a2), paddle.to_tensor(self.b2) + ) def test_kl_divergence(self): with paddle.fluid.dygraph.guard(self.place): @@ -50,22 +57,31 @@ class TestKLBetaBeta(unittest.TestCase): paddle.distribution.kl_divergence(self.p, self.q), self.scipy_kl_beta_beta(self.a1, self.b1, self.a2, self.b2), rtol=config.RTOL.get(str(self.a1.dtype)), - atol=config.ATOL.get(str(self.a1.dtype))) + atol=config.ATOL.get(str(self.a1.dtype)), + ) def scipy_kl_beta_beta(self, a1, b1, a2, b2): - return (scipy.special.betaln(a2, b2) - scipy.special.betaln(a1, b1) + - (a1 - a2) * scipy.special.digamma(a1) + - (b1 - b2) * scipy.special.digamma(b1) + - (a2 - a1 + b2 - b1) * scipy.special.digamma(a1 + b1)) + return ( + scipy.special.betaln(a2, b2) + - scipy.special.betaln(a1, b1) + + (a1 - a2) * scipy.special.digamma(a1) + + (b1 - b2) * scipy.special.digamma(b1) + + (a2 - a1 + b2 - b1) * scipy.special.digamma(a1 + b1) + ) @param.place(config.DEVICES) -@param.param_cls((param.TEST_CASE_NAME, 'conc1', 'conc2'), [ - ('test-regular-input', np.random.random( - (5, 7, 8, 10)), np.random.random((5, 7, 8, 10))), -]) +@param.param_cls( + (param.TEST_CASE_NAME, 'conc1', 'conc2'), + [ + ( + 'test-regular-input', + np.random.random((5, 7, 8, 10)), + np.random.random((5, 7, 8, 10)), + ), + ], +) class TestKLDirichletDirichlet(unittest.TestCase): - def setUp(self): self.p = paddle.distribution.Dirichlet(paddle.to_tensor(self.conc1)) self.q = paddle.distribution.Dirichlet(paddle.to_tensor(self.conc2)) @@ -76,17 +92,25 @@ class TestKLDirichletDirichlet(unittest.TestCase): paddle.distribution.kl_divergence(self.p, self.q), self.scipy_kl_diric_diric(self.conc1, self.conc2), rtol=config.RTOL.get(str(self.conc1.dtype)), - atol=config.ATOL.get(str(self.conc1.dtype))) + atol=config.ATOL.get(str(self.conc1.dtype)), + ) def scipy_kl_diric_diric(self, conc1, conc2): return ( - scipy.special.gammaln(np.sum(conc1, -1)) - - scipy.special.gammaln(np.sum(conc2, -1)) - np.sum( - scipy.special.gammaln(conc1) - scipy.special.gammaln(conc2), -1) + scipy.special.gammaln(np.sum(conc1, -1)) + - scipy.special.gammaln(np.sum(conc2, -1)) + - np.sum( + scipy.special.gammaln(conc1) - scipy.special.gammaln(conc2), -1 + ) + np.sum( - (conc1 - conc2) * - (scipy.special.digamma(conc1) - - scipy.special.digamma(np.sum(conc1, -1, keepdims=True))), -1)) + (conc1 - conc2) + * ( + scipy.special.digamma(conc1) + - scipy.special.digamma(np.sum(conc1, -1, keepdims=True)) + ), + -1, + ) + ) class DummyDistribution(paddle.distribution.Distribution): @@ -94,10 +118,11 @@ class DummyDistribution(paddle.distribution.Distribution): @param.place(config.DEVICES) -@param.param_cls((param.TEST_CASE_NAME, 'p', 'q'), - [('test-unregister', DummyDistribution(), DummyDistribution)]) +@param.param_cls( + (param.TEST_CASE_NAME, 'p', 'q'), + [('test-unregister', DummyDistribution(), DummyDistribution)], +) class TestDispatch(unittest.TestCase): - def test_dispatch_with_unregister(self): with self.assertRaises(NotImplementedError): paddle.distribution.kl_divergence(self.p, self.q) @@ -106,18 +131,27 @@ class TestDispatch(unittest.TestCase): @param.place(config.DEVICES) @param.param_cls( (param.TEST_CASE_NAME, 'p', 'q'), - [('test-diff-dist', mock.Exponential(paddle.rand((100, 200, 100)) + 1.0), - mock.Exponential(paddle.rand((100, 200, 100)) + 2.0)), - ('test-same-dist', mock.Exponential( - paddle.to_tensor(1.0)), mock.Exponential(paddle.to_tensor(1.0)))]) + [ + ( + 'test-diff-dist', + mock.Exponential(paddle.rand((100, 200, 100)) + 1.0), + mock.Exponential(paddle.rand((100, 200, 100)) + 2.0), + ), + ( + 'test-same-dist', + mock.Exponential(paddle.to_tensor(1.0)), + mock.Exponential(paddle.to_tensor(1.0)), + ), + ], +) class TestKLExpfamilyExpFamily(unittest.TestCase): - def test_kl_expfamily_expfamily(self): - np.testing.assert_allclose(paddle.distribution.kl_divergence( - self.p, self.q), - kl._kl_expfamily_expfamily(self.p, self.q), - rtol=config.RTOL.get(config.DEFAULT_DTYPE), - atol=config.ATOL.get(config.DEFAULT_DTYPE)) + np.testing.assert_allclose( + paddle.distribution.kl_divergence(self.p, self.q), + kl._kl_expfamily_expfamily(self.p, self.q), + rtol=config.RTOL.get(config.DEFAULT_DTYPE), + atol=config.ATOL.get(config.DEFAULT_DTYPE), + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/distribution/test_kl_static.py b/python/paddle/fluid/tests/unittests/distribution/test_kl_static.py index f5e354e85de58a6e2241ce35a104843c92f81005..ea1f8a4aab23928977fc72d0b2894dea17f55553 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_kl_static.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_kl_static.py @@ -30,14 +30,19 @@ paddle.enable_static() @param.place(config.DEVICES) -@param.param_cls((param.TEST_CASE_NAME, 'a1', 'b1', 'a2', 'b2'), [ - ('test_regular_input', 6.0 * np.random.random( - (4, 5)) + 1e-4, 6.0 * np.random.random( - (4, 5)) + 1e-4, 6.0 * np.random.random( - (4, 5)) + 1e-4, 6.0 * np.random.random((4, 5)) + 1e-4), -]) +@param.param_cls( + (param.TEST_CASE_NAME, 'a1', 'b1', 'a2', 'b2'), + [ + ( + 'test_regular_input', + 6.0 * np.random.random((4, 5)) + 1e-4, + 6.0 * np.random.random((4, 5)) + 1e-4, + 6.0 * np.random.random((4, 5)) + 1e-4, + 6.0 * np.random.random((4, 5)) + 1e-4, + ), + ], +) class TestKLBetaBeta(unittest.TestCase): - def setUp(self): self.mp = paddle.static.Program() self.sp = paddle.static.Program() @@ -55,46 +60,57 @@ class TestKLBetaBeta(unittest.TestCase): 'a1': self.a1, 'b1': self.b1, 'a2': self.a2, - 'b2': self.b2 + 'b2': self.b2, } def test_kl_divergence(self): with paddle.static.program_guard(self.mp, self.sp): out = paddle.distribution.kl_divergence(self.p, self.q) self.executor.run(self.sp) - [out] = self.executor.run(self.mp, - feed=self.feeds, - fetch_list=[out]) + [out] = self.executor.run( + self.mp, feed=self.feeds, fetch_list=[out] + ) - np.testing.assert_allclose(out, - self.scipy_kl_beta_beta( - self.a1, self.b1, self.a2, self.b2), - rtol=config.RTOL.get(str(self.a1.dtype)), - atol=config.ATOL.get(str(self.a1.dtype))) + np.testing.assert_allclose( + out, + self.scipy_kl_beta_beta(self.a1, self.b1, self.a2, self.b2), + rtol=config.RTOL.get(str(self.a1.dtype)), + atol=config.ATOL.get(str(self.a1.dtype)), + ) def scipy_kl_beta_beta(self, a1, b1, a2, b2): - return (scipy.special.betaln(a2, b2) - scipy.special.betaln(a1, b1) + - (a1 - a2) * scipy.special.digamma(a1) + - (b1 - b2) * scipy.special.digamma(b1) + - (a2 - a1 + b2 - b1) * scipy.special.digamma(a1 + b1)) + return ( + scipy.special.betaln(a2, b2) + - scipy.special.betaln(a1, b1) + + (a1 - a2) * scipy.special.digamma(a1) + + (b1 - b2) * scipy.special.digamma(b1) + + (a2 - a1 + b2 - b1) * scipy.special.digamma(a1 + b1) + ) @param.place(config.DEVICES) -@param.param_cls((param.TEST_CASE_NAME, 'conc1', 'conc2'), [ - ('test-regular-input', np.random.random( - (5, 7, 8, 10)), np.random.random((5, 7, 8, 10))), -]) +@param.param_cls( + (param.TEST_CASE_NAME, 'conc1', 'conc2'), + [ + ( + 'test-regular-input', + np.random.random((5, 7, 8, 10)), + np.random.random((5, 7, 8, 10)), + ), + ], +) class TestKLDirichletDirichlet(unittest.TestCase): - def setUp(self): self.mp = paddle.static.Program() self.sp = paddle.static.Program() self.executor = paddle.static.Executor(self.place) with paddle.static.program_guard(self.mp, self.sp): - conc1 = paddle.static.data('conc1', self.conc1.shape, - self.conc1.dtype) - conc2 = paddle.static.data('conc2', self.conc2.shape, - self.conc2.dtype) + conc1 = paddle.static.data( + 'conc1', self.conc1.shape, self.conc1.dtype + ) + conc2 = paddle.static.data( + 'conc2', self.conc2.shape, self.conc2.dtype + ) self.p = paddle.distribution.Dirichlet(conc1) self.q = paddle.distribution.Dirichlet(conc2) self.feeds = {'conc1': self.conc1, 'conc2': self.conc2} @@ -104,24 +120,32 @@ class TestKLDirichletDirichlet(unittest.TestCase): with paddle.static.program_guard(self.mp, self.sp): out = paddle.distribution.kl_divergence(self.p, self.q) self.executor.run(self.sp) - [out] = self.executor.run(self.mp, - feed=self.feeds, - fetch_list=[out]) + [out] = self.executor.run( + self.mp, feed=self.feeds, fetch_list=[out] + ) np.testing.assert_allclose( out, self.scipy_kl_diric_diric(self.conc1, self.conc2), rtol=config.RTOL.get(str(self.conc1.dtype)), - atol=config.ATOL.get(str(self.conc1.dtype))) + atol=config.ATOL.get(str(self.conc1.dtype)), + ) def scipy_kl_diric_diric(self, conc1, conc2): return ( - scipy.special.gammaln(np.sum(conc1, -1)) - - scipy.special.gammaln(np.sum(conc2, -1)) - np.sum( - scipy.special.gammaln(conc1) - scipy.special.gammaln(conc2), -1) + scipy.special.gammaln(np.sum(conc1, -1)) + - scipy.special.gammaln(np.sum(conc2, -1)) + - np.sum( + scipy.special.gammaln(conc1) - scipy.special.gammaln(conc2), -1 + ) + np.sum( - (conc1 - conc2) * - (scipy.special.digamma(conc1) - - scipy.special.digamma(np.sum(conc1, -1, keepdims=True))), -1)) + (conc1 - conc2) + * ( + scipy.special.digamma(conc1) + - scipy.special.digamma(np.sum(conc1, -1, keepdims=True)) + ), + -1, + ) + ) class DummyDistribution(paddle.distribution.Distribution): @@ -129,10 +153,10 @@ class DummyDistribution(paddle.distribution.Distribution): @param.place(config.DEVICES) -@param.param_cls((param.TEST_CASE_NAME, 'p', 'q'), - [('test-dispatch-exception')]) +@param.param_cls( + (param.TEST_CASE_NAME, 'p', 'q'), [('test-dispatch-exception')] +) class TestDispatch(unittest.TestCase): - def setUp(self): self.mp = paddle.static.Program() self.sp = paddle.static.Program() @@ -150,23 +174,29 @@ class TestDispatch(unittest.TestCase): @param.place(config.DEVICES) -@param.param_cls((config.TEST_CASE_NAME, 'rate1', 'rate2'), - [('test-diff-dist', np.random.rand(100, 200, 100) + 1.0, - np.random.rand(100, 200, 100) + 2.0), - ('test-same-dist', np.array([1.0]), np.array([1.0]))]) +@param.param_cls( + (config.TEST_CASE_NAME, 'rate1', 'rate2'), + [ + ( + 'test-diff-dist', + np.random.rand(100, 200, 100) + 1.0, + np.random.rand(100, 200, 100) + 2.0, + ), + ('test-same-dist', np.array([1.0]), np.array([1.0])), + ], +) class TestKLExpfamilyExpFamily(unittest.TestCase): - def setUp(self): self.mp = paddle.static.Program() self.sp = paddle.static.Program() self.executor = paddle.static.Executor(self.place) with paddle.static.program_guard(self.mp, self.sp): - rate1 = paddle.static.data('rate1', - shape=self.rate1.shape, - dtype=self.rate1.dtype) - rate2 = paddle.static.data('rate2', - shape=self.rate2.shape, - dtype=self.rate2.dtype) + rate1 = paddle.static.data( + 'rate1', shape=self.rate1.shape, dtype=self.rate1.dtype + ) + rate2 = paddle.static.data( + 'rate2', shape=self.rate2.shape, dtype=self.rate2.dtype + ) self.p = mock.Exponential(rate1) self.q = mock.Exponential(rate2) self.feeds = {'rate1': self.rate1, 'rate2': self.rate2} @@ -176,15 +206,16 @@ class TestKLExpfamilyExpFamily(unittest.TestCase): out1 = paddle.distribution.kl_divergence(self.p, self.q) out2 = kl._kl_expfamily_expfamily(self.p, self.q) self.executor.run(self.sp) - [out1, out2] = self.executor.run(self.mp, - feed=self.feeds, - fetch_list=[out1, out2]) + [out1, out2] = self.executor.run( + self.mp, feed=self.feeds, fetch_list=[out1, out2] + ) np.testing.assert_allclose( out1, out2, rtol=config.RTOL.get(config.DEFAULT_DTYPE), - atol=config.ATOL.get(config.DEFAULT_DTYPE)) + atol=config.ATOL.get(config.DEFAULT_DTYPE), + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/dygraph_fleet_api.py b/python/paddle/fluid/tests/unittests/dygraph_fleet_api.py index bf6139ffae680cf2d24f819714e76f432a724502..a2834516d7166416529e109491db7b5b64a93535 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_fleet_api.py +++ b/python/paddle/fluid/tests/unittests/dygraph_fleet_api.py @@ -21,7 +21,6 @@ from paddle.fluid.framework import _test_eager_guard class TestDygraphFleetAPI(unittest.TestCase): - def setUp(self): paddle.seed(2022) random.seed(2022) @@ -35,12 +34,14 @@ class TestDygraphFleetAPI(unittest.TestCase): def test_dygraph_fleet_api(self): import paddle.distributed.fleet as fleet import paddle.distributed as dist + strategy = fleet.DistributedStrategy() strategy.amp = True strategy.recompute = True fleet.init(is_collective=True, strategy=strategy) - net = paddle.nn.Sequential(paddle.nn.Linear(10, 1), - paddle.nn.Linear(1, 2)) + net = paddle.nn.Sequential( + paddle.nn.Linear(10, 1), paddle.nn.Linear(1, 2) + ) net = dist.fleet.distributed_model(net) data = np.random.uniform(-1, 1, [30, 10]).astype('float32') data = paddle.to_tensor(data) diff --git a/python/paddle/fluid/tests/unittests/dygraph_recompute_hybrid.py b/python/paddle/fluid/tests/unittests/dygraph_recompute_hybrid.py index 105621498fed6fb424468b8afd0b6175b8449f88..7dbd6b6d7ef35ee6b68686292466f162f569968d 100755 --- a/python/paddle/fluid/tests/unittests/dygraph_recompute_hybrid.py +++ b/python/paddle/fluid/tests/unittests/dygraph_recompute_hybrid.py @@ -24,34 +24,40 @@ from paddle.distributed import fleet def get_fc_block(block_idx, input_size, is_last=False): block_name = "block_" + str(block_idx) block = paddle.nn.Sequential( - (block_name + "_fc_0", - paddle.nn.Linear(input_size, input_size, bias_attr=False)), + ( + block_name + "_fc_0", + paddle.nn.Linear(input_size, input_size, bias_attr=False), + ), (block_name + "_dropout", paddle.nn.Dropout(p=0.5)), (block_name + "_relu_1", paddle.nn.ReLU()), - (block_name + "_fc_1", - paddle.nn.Linear(input_size, input_size, bias_attr=False)), + ( + block_name + "_fc_1", + paddle.nn.Linear(input_size, input_size, bias_attr=False), + ), (block_name + "_relu_2", paddle.nn.ReLU()), ) if is_last: - block.add_sublayer(block_name + "_fc_2", - paddle.nn.Linear(input_size, 1, - bias_attr=False)) # add sublayer + block.add_sublayer( + block_name + "_fc_2", + paddle.nn.Linear(input_size, 1, bias_attr=False), + ) # add sublayer else: - block.add_sublayer(block_name + "_fc_2", - paddle.nn.Linear(input_size, - input_size, - bias_attr=False)) # add sublayer + block.add_sublayer( + block_name + "_fc_2", + paddle.nn.Linear(input_size, input_size, bias_attr=False), + ) # add sublayer return block class Naive_fc_net(paddle.nn.Layer): - - def __init__(self, - input_size=10, - recompute_blocks=[1, 3], - offload=False, - partition=False, - recompute_kwargs={}): + def __init__( + self, + input_size=10, + recompute_blocks=[1, 3], + offload=False, + partition=False, + recompute_kwargs={}, + ): super(Naive_fc_net, self).__init__() self.recompute_blocks = recompute_blocks self.recompute_kwargs = recompute_kwargs @@ -65,8 +71,11 @@ class Naive_fc_net(paddle.nn.Layer): self.runfunc4 = get_fc_block(4, input_size, is_last=True) self.layers = [ - self.runfunc0, self.runfunc1, self.runfunc2, self.runfunc3, - self.runfunc4 + self.runfunc0, + self.runfunc1, + self.runfunc2, + self.runfunc3, + self.runfunc4, ] def forward(self, inputs): @@ -76,34 +85,43 @@ class Naive_fc_net(paddle.nn.Layer): { "mp_group": fleet.fleet._hcg.get_model_parallel_group(), "offload": self.offload, - "partition": self.partition - }, self.layers[i], inputs, **self.recompute_kwargs) + "partition": self.partition, + }, + self.layers[i], + inputs, + **self.recompute_kwargs + ) else: inputs = self.layers[i](inputs) return inputs -def run_model(recompute_block=[], - recompute_kwargs={}, - offload=False, - partition=False, - enable_autocast=False, - pure_fp16=False): +def run_model( + recompute_block=[], + recompute_kwargs={}, + offload=False, + partition=False, + enable_autocast=False, + pure_fp16=False, +): gen = paddle.seed(10) gen.manual_seed(10) np.random.seed(10) random.seed(10) batch_size, input_size = 1, 10 - model = Naive_fc_net(input_size, - recompute_blocks=recompute_block, - offload=offload, - partition=partition, - recompute_kwargs=recompute_kwargs) + model = Naive_fc_net( + input_size, + recompute_blocks=recompute_block, + offload=offload, + partition=partition, + recompute_kwargs=recompute_kwargs, + ) loss_fn = paddle.nn.MSELoss(reduction='mean') - optimizer = paddle.optimizer.SGD(learning_rate=0.01, - parameters=model.parameters()) + optimizer = paddle.optimizer.SGD( + learning_rate=0.01, parameters=model.parameters() + ) model = fleet.distributed_model(model) optimizer = fleet.distributed_optimizer(optimizer) @@ -140,7 +158,6 @@ def run_model(recompute_block=[], class TestPyLayer(unittest.TestCase): - def setUp(self): strategy = fleet.DistributedStrategy() self.model_parallel_size = 2 @@ -154,7 +171,6 @@ class TestPyLayer(unittest.TestCase): fleet.init(is_collective=True, strategy=strategy) def test_base_case(self, enable_autocast=False, pure_fp16=False): - def check_identical(loss_ref, param_ref, grad_ref, loss, param, grad): self.assertEqual(loss_ref, loss) self.assertEqual(param_ref, param) @@ -164,34 +180,43 @@ class TestPyLayer(unittest.TestCase): loss_ref, param_ref, grad_ref = run_model( recompute_block=[], enable_autocast=enable_autocast, - pure_fp16=pure_fp16) + pure_fp16=pure_fp16, + ) # with recompute, offload=False, partition=False - loss, param, grad = run_model(recompute_block=[1, 3], - enable_autocast=enable_autocast, - pure_fp16=pure_fp16) + loss, param, grad = run_model( + recompute_block=[1, 3], + enable_autocast=enable_autocast, + pure_fp16=pure_fp16, + ) check_identical(loss_ref, param_ref, grad_ref, loss, param, grad) # with recompute, offload=True, partition=False - loss, param, grad = run_model(recompute_block=[1, 2, 3], - offload=True, - enable_autocast=enable_autocast, - pure_fp16=pure_fp16) + loss, param, grad = run_model( + recompute_block=[1, 2, 3], + offload=True, + enable_autocast=enable_autocast, + pure_fp16=pure_fp16, + ) check_identical(loss_ref, param_ref, grad_ref, loss, param, grad) # with recompute, offload=False, partition=True - loss, param, grad = run_model(recompute_block=[1], - partition=True, - enable_autocast=enable_autocast, - pure_fp16=pure_fp16) + loss, param, grad = run_model( + recompute_block=[1], + partition=True, + enable_autocast=enable_autocast, + pure_fp16=pure_fp16, + ) check_identical(loss_ref, param_ref, grad_ref, loss, param, grad) # with recompute, offload=True, partition=True - loss, param, grad = run_model(recompute_block=[1, 3, 4], - offload=True, - partition=True, - enable_autocast=enable_autocast, - pure_fp16=pure_fp16) + loss, param, grad = run_model( + recompute_block=[1, 3, 4], + offload=True, + partition=True, + enable_autocast=enable_autocast, + pure_fp16=pure_fp16, + ) check_identical(loss_ref, param_ref, grad_ref, loss, param, grad) def test_fc_net_with_dropout(self): @@ -207,8 +232,9 @@ class TestPyLayer(unittest.TestCase): paddle.set_device("gpu") kwargs = {"is_test": False} with self.assertRaises(TypeError): - loss_ref, param_ref, grad_ref = run_model(recompute_block=[2], - recompute_kwargs=kwargs) + loss_ref, param_ref, grad_ref = run_model( + recompute_block=[2], recompute_kwargs=kwargs + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/bert_dygraph_model.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/bert_dygraph_model.py index 60c2a4ef0670c2650df564bfa5ff4987afb40b6d..d9ccc587f7c055e6e54bb2ede62c1ebfe68b116c 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/bert_dygraph_model.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/bert_dygraph_model.py @@ -21,89 +21,105 @@ from transformer_dygraph_model import MultiHeadAttention, PrePostProcessLayer class PositionwiseFeedForwardLayer(Layer): - - def __init__(self, - hidden_act, - d_inner_hid, - d_model, - dropout_rate, - param_initializer=None, - name=""): + def __init__( + self, + hidden_act, + d_inner_hid, + d_model, + dropout_rate, + param_initializer=None, + name="", + ): super(PositionwiseFeedForwardLayer, self).__init__() - self._i2h = Linear(input_dim=d_model, - output_dim=d_inner_hid, - param_attr=fluid.ParamAttr( - name=name + '_fc_0.w_0', - initializer=param_initializer), - bias_attr=name + '_fc_0.b_0', - act=hidden_act) - - self._h2o = Linear(input_dim=d_inner_hid, - output_dim=d_model, - param_attr=fluid.ParamAttr( - name=name + '_fc_1.w_0', - initializer=param_initializer), - bias_attr=name + '_fc_1.b_0') + self._i2h = Linear( + input_dim=d_model, + output_dim=d_inner_hid, + param_attr=fluid.ParamAttr( + name=name + '_fc_0.w_0', initializer=param_initializer + ), + bias_attr=name + '_fc_0.b_0', + act=hidden_act, + ) + + self._h2o = Linear( + input_dim=d_inner_hid, + output_dim=d_model, + param_attr=fluid.ParamAttr( + name=name + '_fc_1.w_0', initializer=param_initializer + ), + bias_attr=name + '_fc_1.b_0', + ) self._dropout_rate = dropout_rate def forward(self, x): hidden = self._i2h(x) if self._dropout_rate: - hidden = fluid.layers.dropout(hidden, - dropout_prob=self._dropout_rate, - is_test=False) + hidden = fluid.layers.dropout( + hidden, dropout_prob=self._dropout_rate, is_test=False + ) out = self._h2o(hidden) return out class EncoderSubLayer(Layer): - - def __init__(self, - hidden_act, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - prepostprocess_dropout, - attention_dropout, - relu_dropout, - preprocess_cmd="n", - postprocess_cmd="da", - param_initializer=None, - name=""): + def __init__( + self, + hidden_act, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd="n", + postprocess_cmd="da", + param_initializer=None, + name="", + ): super(EncoderSubLayer, self).__init__() self.name = name self._preprocess_cmd = preprocess_cmd self._postprocess_cmd = postprocess_cmd self._prepostprocess_dropout = prepostprocess_dropout - self._preprocess_layer = PrePostProcessLayer(self._preprocess_cmd, - d_model, - prepostprocess_dropout) + self._preprocess_layer = PrePostProcessLayer( + self._preprocess_cmd, d_model, prepostprocess_dropout + ) self._multihead_attention_layer = MultiHeadAttention( - d_key, d_value, d_model, n_head, attention_dropout, - param_initializer) + d_key, + d_value, + d_model, + n_head, + attention_dropout, + param_initializer, + ) self._postprocess_layer = PrePostProcessLayer( - self._postprocess_cmd, d_model, self._prepostprocess_dropout) + self._postprocess_cmd, d_model, self._prepostprocess_dropout + ) self._preprocess_layer2 = PrePostProcessLayer( - self._preprocess_cmd, d_model, self._prepostprocess_dropout) + self._preprocess_cmd, d_model, self._prepostprocess_dropout + ) self._positionwise_feed_forward = PositionwiseFeedForwardLayer( hidden_act, d_inner_hid, d_model, relu_dropout, param_initializer, - name=name + "_ffn") + name=name + "_ffn", + ) self._postprocess_layer2 = PrePostProcessLayer( - self._postprocess_cmd, d_model, self._prepostprocess_dropout) + self._postprocess_cmd, d_model, self._prepostprocess_dropout + ) def forward(self, enc_input, attn_bias): pre_process_multihead = self._preprocess_layer(enc_input) - attn_output = self._multihead_attention_layer(pre_process_multihead, - None, None, attn_bias) + attn_output = self._multihead_attention_layer( + pre_process_multihead, None, None, attn_bias + ) attn_output = self._postprocess_layer(attn_output, enc_input) pre_process2_output = self._preprocess_layer2(attn_output) ffd_output = self._positionwise_feed_forward(pre_process2_output) @@ -111,22 +127,23 @@ class EncoderSubLayer(Layer): class EncoderLayer(Layer): - - def __init__(self, - hidden_act, - n_layer, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - prepostprocess_dropout, - attention_dropout, - relu_dropout, - preprocess_cmd="n", - postprocess_cmd="da", - param_initializer=None, - name=""): + def __init__( + self, + hidden_act, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd="n", + postprocess_cmd="da", + param_initializer=None, + name="", + ): super(EncoderLayer, self).__init__() self._preprocess_cmd = preprocess_cmd @@ -135,25 +152,30 @@ class EncoderLayer(Layer): self._n_layer = n_layer self._hidden_act = hidden_act self._preprocess_layer = PrePostProcessLayer( - self._preprocess_cmd, 3, self._prepostprocess_dropout) + self._preprocess_cmd, 3, self._prepostprocess_dropout + ) for i in range(n_layer): self._encoder_sublayers.append( self.add_sublayer( 'esl_%d' % i, - EncoderSubLayer(hidden_act, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - prepostprocess_dropout, - attention_dropout, - relu_dropout, - preprocess_cmd, - postprocess_cmd, - param_initializer, - name=name + '_layer_' + str(i)))) + EncoderSubLayer( + hidden_act, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + param_initializer, + name=name + '_layer_' + str(i), + ), + ) + ) def forward(self, enc_input, attn_bias): for i in range(self._n_layer): @@ -164,7 +186,6 @@ class EncoderLayer(Layer): class BertModelLayer(Layer): - def __init__(self, config, return_pooled_out=True, use_fp16=False): super(BertModelLayer, self).__init__() @@ -185,36 +206,46 @@ class BertModelLayer(Layer): self._dtype = "float16" if use_fp16 else "float32" self._param_initializer = fluid.initializer.TruncatedNormal( - scale=config['initializer_range']) + scale=config['initializer_range'] + ) - self._src_emb = Embedding(size=[self._voc_size, self._emb_size], - param_attr=fluid.ParamAttr( - name=self._word_emb_name, - initializer=self._param_initializer), - dtype=self._dtype) + self._src_emb = Embedding( + size=[self._voc_size, self._emb_size], + param_attr=fluid.ParamAttr( + name=self._word_emb_name, initializer=self._param_initializer + ), + dtype=self._dtype, + ) self._pos_emb = Embedding( size=[self._max_position_seq_len, self._emb_size], - param_attr=fluid.ParamAttr(name=self._pos_emb_name, - initializer=self._param_initializer), - dtype=self._dtype) - - self._sent_emb = Embedding(size=[self._sent_types, self._emb_size], - param_attr=fluid.ParamAttr( - name=self._sent_emb_name, - initializer=self._param_initializer), - dtype=self._dtype) - - self.pooled_fc = Linear(input_dim=self._emb_size, - output_dim=self._emb_size, - param_attr=fluid.ParamAttr( - name="pooled_fc.w_0", - initializer=self._param_initializer), - bias_attr="pooled_fc.b_0", - act="tanh") + param_attr=fluid.ParamAttr( + name=self._pos_emb_name, initializer=self._param_initializer + ), + dtype=self._dtype, + ) + + self._sent_emb = Embedding( + size=[self._sent_types, self._emb_size], + param_attr=fluid.ParamAttr( + name=self._sent_emb_name, initializer=self._param_initializer + ), + dtype=self._dtype, + ) + + self.pooled_fc = Linear( + input_dim=self._emb_size, + output_dim=self._emb_size, + param_attr=fluid.ParamAttr( + name="pooled_fc.w_0", initializer=self._param_initializer + ), + bias_attr="pooled_fc.b_0", + act="tanh", + ) self.pre_process_layer = PrePostProcessLayer( - "nd", self._emb_size, self._prepostprocess_dropout) + "nd", self._emb_size, self._prepostprocess_dropout + ) self._encoder = EncoderLayer( hidden_act=self._hidden_act, @@ -229,7 +260,8 @@ class BertModelLayer(Layer): relu_dropout=0, preprocess_cmd="", postprocess_cmd="dan", - param_initializer=self._param_initializer) + param_initializer=self._param_initializer, + ) def forward(self, src_ids, position_ids, sentence_ids, input_mask): src_emb = self._src_emb(src_ids) @@ -241,16 +273,15 @@ class BertModelLayer(Layer): emb_out = self.pre_process_layer(emb_out) - self_attn_mask = fluid.layers.matmul(x=input_mask, - y=input_mask, - transpose_y=True) - self_attn_mask = fluid.layers.scale(x=self_attn_mask, - scale=10000.0, - bias=-1.0, - bias_after_scale=False) - n_head_self_attn_mask = fluid.layers.stack(x=[self_attn_mask] * - self._n_head, - axis=1) + self_attn_mask = fluid.layers.matmul( + x=input_mask, y=input_mask, transpose_y=True + ) + self_attn_mask = fluid.layers.scale( + x=self_attn_mask, scale=10000.0, bias=-1.0, bias_after_scale=False + ) + n_head_self_attn_mask = fluid.layers.stack( + x=[self_attn_mask] * self._n_head, axis=1 + ) n_head_self_attn_mask.stop_gradient = True enc_output = self._encoder(emb_out, n_head_self_attn_mask) @@ -258,26 +289,27 @@ class BertModelLayer(Layer): # TODO(zhhsplendid): uncomment this in next PR which we support various # length of early return # - #if not self.return_pooled_out: + # if not self.return_pooled_out: # return enc_output - next_sent_feat = fluid.layers.slice(input=enc_output, - axes=[1], - starts=[0], - ends=[1]) + next_sent_feat = fluid.layers.slice( + input=enc_output, axes=[1], starts=[0], ends=[1] + ) next_sent_feat = self.pooled_fc(next_sent_feat) - next_sent_feat = fluid.layers.reshape(next_sent_feat, - shape=[-1, self._emb_size]) + next_sent_feat = fluid.layers.reshape( + next_sent_feat, shape=[-1, self._emb_size] + ) return enc_output, next_sent_feat class PretrainModelLayer(Layer): - - def __init__(self, - config, - return_pooled_out=True, - weight_sharing=False, - use_fp16=False): + def __init__( + self, + config, + return_pooled_out=True, + weight_sharing=False, + use_fp16=False, + ): super(PretrainModelLayer, self).__init__() self.config = config self._voc_size = config['vocab_size'] @@ -287,84 +319,113 @@ class PretrainModelLayer(Layer): self._word_emb_name = "word_embedding" self._param_initializer = fluid.initializer.TruncatedNormal( - scale=config['initializer_range']) + scale=config['initializer_range'] + ) self._weight_sharing = weight_sharing self.use_fp16 = use_fp16 self._dtype = "float16" if use_fp16 else "float32" - self.bert_layer = BertModelLayer(config=self.config, - return_pooled_out=True, - use_fp16=self.use_fp16) + self.bert_layer = BertModelLayer( + config=self.config, return_pooled_out=True, use_fp16=self.use_fp16 + ) self.pre_process_layer = PrePostProcessLayer( - "n", self._emb_size, self._prepostprocess_dropout) - - self.pooled_fc = Linear(input_dim=self._emb_size, - output_dim=self._emb_size, - param_attr=fluid.ParamAttr( - name="mask_lm_trans_fc.w_0", - initializer=self._param_initializer), - bias_attr="mask_lm_trans_fc.b_0", - act="tanh") + "n", self._emb_size, self._prepostprocess_dropout + ) + + self.pooled_fc = Linear( + input_dim=self._emb_size, + output_dim=self._emb_size, + param_attr=fluid.ParamAttr( + name="mask_lm_trans_fc.w_0", initializer=self._param_initializer + ), + bias_attr="mask_lm_trans_fc.b_0", + act="tanh", + ) self.mask_lm_out_bias_attr = fluid.ParamAttr( name="mask_lm_out_fc.b_0", - initializer=fluid.initializer.Constant(value=0.0)) + initializer=fluid.initializer.Constant(value=0.0), + ) if not self._weight_sharing: - self.out_fc = Linear(input_dim=self._emb_size, - output_dim=self._voc_size, - param_attr=fluid.ParamAttr( - name="mask_lm_out_fc.w_0", - initializer=self._param_initializer), - bias_attr=self.mask_lm_out_bias_attr) + self.out_fc = Linear( + input_dim=self._emb_size, + output_dim=self._voc_size, + param_attr=fluid.ParamAttr( + name="mask_lm_out_fc.w_0", + initializer=self._param_initializer, + ), + bias_attr=self.mask_lm_out_bias_attr, + ) else: self.fc_create_params = self.create_parameter( shape=[self._voc_size], dtype=self._dtype, attr=self.mask_lm_out_bias_attr, - is_bias=True) - - self.next_sent_fc = Linear(input_dim=self._emb_size, - output_dim=2, - param_attr=fluid.ParamAttr( - name="next_sent_fc.w_0", - initializer=self._param_initializer), - bias_attr="next_sent_fc.b_0") + is_bias=True, + ) + + self.next_sent_fc = Linear( + input_dim=self._emb_size, + output_dim=2, + param_attr=fluid.ParamAttr( + name="next_sent_fc.w_0", initializer=self._param_initializer + ), + bias_attr="next_sent_fc.b_0", + ) @declarative - def forward(self, src_ids, position_ids, sentence_ids, input_mask, - mask_label, mask_pos, labels): + def forward( + self, + src_ids, + position_ids, + sentence_ids, + input_mask, + mask_label, + mask_pos, + labels, + ): mask_pos = fluid.layers.cast(x=mask_pos, dtype='int32') - enc_output, next_sent_feat = self.bert_layer(src_ids, position_ids, - sentence_ids, input_mask) - reshaped_emb_out = fluid.layers.reshape(x=enc_output, - shape=[-1, self._emb_size]) + enc_output, next_sent_feat = self.bert_layer( + src_ids, position_ids, sentence_ids, input_mask + ) + reshaped_emb_out = fluid.layers.reshape( + x=enc_output, shape=[-1, self._emb_size] + ) mask_feat = fluid.layers.gather(input=reshaped_emb_out, index=mask_pos) mask_trans_feat = self.pooled_fc(mask_feat) mask_trans_feat = self.pre_process_layer(mask_trans_feat) if self._weight_sharing: - fc_out = fluid.layers.matmul(x=mask_trans_feat, - y=self.bert_layer._src_emb._w, - transpose_y=True) + fc_out = fluid.layers.matmul( + x=mask_trans_feat, + y=self.bert_layer._src_emb._w, + transpose_y=True, + ) fc_out += self.fc_create_params else: fc_out = self.out_fc(mask_trans_feat) - mask_lm_loss = fluid.layers.softmax_with_cross_entropy(logits=fc_out, - label=mask_label) + mask_lm_loss = fluid.layers.softmax_with_cross_entropy( + logits=fc_out, label=mask_label + ) mean_mask_lm_loss = paddle.mean(mask_lm_loss) next_sent_fc_out = self.next_sent_fc(next_sent_feat) - next_sent_loss, next_sent_softmax = fluid.layers.softmax_with_cross_entropy( - logits=next_sent_fc_out, label=labels, return_softmax=True) + ( + next_sent_loss, + next_sent_softmax, + ) = fluid.layers.softmax_with_cross_entropy( + logits=next_sent_fc_out, label=labels, return_softmax=True + ) - next_sent_acc = fluid.layers.accuracy(input=next_sent_softmax, - label=labels) + next_sent_acc = fluid.layers.accuracy( + input=next_sent_softmax, label=labels + ) mean_next_sent_loss = paddle.mean(next_sent_loss) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/bert_utils.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/bert_utils.py index 3fb23049f1b8189b9289ca20bd4f570686200e90..0cc22162d202c10b5cc4d889a017cf2e69c0953c 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/bert_utils.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/bert_utils.py @@ -35,7 +35,7 @@ def get_bert_config(): "pooler_size_per_head": 8, "pooler_type": "first_token_transform", "type_vocab_size": 2, - "vocab_size": 21128 + "vocab_size": 21128, } return bert_config @@ -87,8 +87,9 @@ def mask(batch_tokens, total_token_num, vocab_size, CLS=1, SEP=2, MASK=3): # ensure at least mask one word in a sentence while not mask_flag: - token_index = int(self_random.randint(1, high=len(sent) - 1, - size=1)) + token_index = int( + self_random.randint(1, high=len(sent) - 1, size=1) + ) if sent[token_index] != SEP and sent[token_index] != CLS: mask_label.append(sent[token_index]) sent[token_index] = MASK @@ -99,12 +100,14 @@ def mask(batch_tokens, total_token_num, vocab_size, CLS=1, SEP=2, MASK=3): return batch_tokens, mask_label, mask_pos -def pad_batch_data(insts, - pad_idx=0, - return_pos=False, - return_input_mask=False, - return_max_len=False, - return_num_token=False): +def pad_batch_data( + insts, + pad_idx=0, + return_pos=False, + return_input_mask=False, + return_max_len=False, + return_num_token=False, +): """ Pad the instances to the max sequence length in batch, and generate the corresponding position data and input mask. @@ -114,24 +117,27 @@ def pad_batch_data(insts, # Any token included in dict can be used to pad, since the paddings' loss # will be masked out by weights and make no effect on parameter gradients. - inst_data = np.array([ - list(inst) + list([pad_idx] * (max_len - len(inst))) for inst in insts - ]) + inst_data = np.array( + [list(inst) + list([pad_idx] * (max_len - len(inst))) for inst in insts] + ) return_list += [inst_data.astype("int64").reshape([-1, max_len])] # position data if return_pos: - inst_pos = np.array([ - list(range(0, len(inst))) + [pad_idx] * (max_len - len(inst)) - for inst in insts - ]) + inst_pos = np.array( + [ + list(range(0, len(inst))) + [pad_idx] * (max_len - len(inst)) + for inst in insts + ] + ) return_list += [inst_pos.astype("int64").reshape([-1, max_len])] if return_input_mask: # This is used to avoid attention on paddings. input_mask_data = np.array( - [[1] * len(inst) + [0] * (max_len - len(inst)) for inst in insts]) + [[1] * len(inst) + [0] * (max_len - len(inst)) for inst in insts] + ) input_mask_data = np.expand_dims(input_mask_data, axis=-1) return_list += [input_mask_data.astype("float32")] @@ -147,16 +153,18 @@ def pad_batch_data(insts, return return_list if len(return_list) > 1 else return_list[0] -def prepare_batch_data(insts, - total_token_num, - voc_size=0, - pad_id=None, - cls_id=None, - sep_id=None, - mask_id=None, - return_input_mask=True, - return_max_len=True, - return_num_token=False): +def prepare_batch_data( + insts, + total_token_num, + voc_size=0, + pad_id=None, + cls_id=None, + sep_id=None, + mask_id=None, + return_input_mask=True, + return_max_len=True, + return_num_token=False, +): """ 1. generate Tensor of data 2. generate Tensor of position @@ -175,30 +183,38 @@ def prepare_batch_data(insts, # First step: do mask without padding if mask_id >= 0: - out, mask_label, mask_pos = mask(batch_src_ids, - total_token_num, - vocab_size=voc_size, - CLS=cls_id, - SEP=sep_id, - MASK=mask_id) + out, mask_label, mask_pos = mask( + batch_src_ids, + total_token_num, + vocab_size=voc_size, + CLS=cls_id, + SEP=sep_id, + MASK=mask_id, + ) else: out = batch_src_ids # Second step: padding - src_id, self_input_mask = pad_batch_data(out, - pad_idx=pad_id, - return_input_mask=True) - pos_id = pad_batch_data(batch_pos_ids, - pad_idx=pad_id, - return_pos=False, - return_input_mask=False) - sent_id = pad_batch_data(batch_sent_ids, - pad_idx=pad_id, - return_pos=False, - return_input_mask=False) + src_id, self_input_mask = pad_batch_data( + out, pad_idx=pad_id, return_input_mask=True + ) + pos_id = pad_batch_data( + batch_pos_ids, pad_idx=pad_id, return_pos=False, return_input_mask=False + ) + sent_id = pad_batch_data( + batch_sent_ids, + pad_idx=pad_id, + return_pos=False, + return_input_mask=False, + ) if mask_id >= 0: return_list = [ - src_id, pos_id, sent_id, self_input_mask, mask_label, mask_pos + src_id, + pos_id, + sent_id, + self_input_mask, + mask_label, + mask_pos, ] + labels_list else: return_list = [src_id, pos_id, sent_id, self_input_mask] + labels_list @@ -208,16 +224,17 @@ def prepare_batch_data(insts, class DataReader(object): - - def __init__(self, - batch_size=4096, - in_tokens=True, - max_seq_len=512, - shuffle_files=False, - epoch=100, - voc_size=0, - is_test=False, - generate_neg_sample=False): + def __init__( + self, + batch_size=4096, + in_tokens=True, + max_seq_len=512, + shuffle_files=False, + epoch=100, + voc_size=0, + is_test=False, + generate_neg_sample=False, + ): self.batch_size = batch_size self.in_tokens = in_tokens @@ -237,8 +254,10 @@ class DataReader(object): self.is_test = is_test self.generate_neg_sample = generate_neg_sample if self.in_tokens: - assert self.batch_size >= self.max_seq_len, "The number of " \ - "tokens in batch should not be smaller than max seq length." + assert self.batch_size >= self.max_seq_len, ( + "The number of " + "tokens in batch should not be smaller than max seq length." + ) if self.is_test: self.epoch = 1 @@ -253,21 +272,22 @@ class DataReader(object): sent0_len = self_random.randint(50, 100) sent1_len = self_random.randint(50, 100) - token_ids = [1] \ - + [self_random.randint(0, 10000) for i in range(sent0_len-1)] \ - + [self_random.randint(0, 10000) for i in range(sent1_len-1)] \ - + [2] + token_ids = ( + [1] + + [self_random.randint(0, 10000) for i in range(sent0_len - 1)] + + [self_random.randint(0, 10000) for i in range(sent1_len - 1)] + + [2] + ) - sent_ids = [0 for i in range(sent0_len) - ] + [1 for i in range(sent1_len)] + sent_ids = [0 for i in range(sent0_len)] + [ + 1 for i in range(sent1_len) + ] pos_ids = [i for i in range(sent0_len + sent1_len)] label = 1 yield token_ids, sent_ids, pos_ids, label def data_generator(self): - def wrapper(): - def reader(): for epoch in range(self.epoch): self.current_epoch = epoch + 1 @@ -291,25 +311,30 @@ class DataReader(object): total_token_num += len(token_ids) else: yield batch, total_token_num - batch, total_token_num, max_len = [ - parsed_line - ], len(token_ids), len(token_ids) + batch, total_token_num, max_len = ( + [parsed_line], + len(token_ids), + len(token_ids), + ) if len(batch) > 0: yield batch, total_token_num for batch_data, total_token_num in batch_reader( - reader, self.batch_size, self.in_tokens): - yield prepare_batch_data(batch_data, - total_token_num, - voc_size=self.voc_size, - pad_id=self.pad_id, - cls_id=self.cls_id, - sep_id=self.sep_id, - mask_id=self.mask_id, - return_input_mask=True, - return_max_len=False, - return_num_token=False) + reader, self.batch_size, self.in_tokens + ): + yield prepare_batch_data( + batch_data, + total_token_num, + voc_size=self.voc_size, + pad_id=self.pad_id, + cls_id=self.cls_id, + sep_id=self.sep_id, + mask_id=self.mask_id, + return_input_mask=True, + return_max_len=False, + return_num_token=False, + ) return wrapper @@ -324,11 +349,13 @@ class ModelHyperParams(object): def get_feed_data_reader(bert_config): args = ModelHyperParams() - data_reader = DataReader(batch_size=args.batch_size, - in_tokens=args.in_tokens, - voc_size=bert_config['vocab_size'], - epoch=args.epoch, - max_seq_len=args.max_seq_len, - generate_neg_sample=args.generate_neg_sample) + data_reader = DataReader( + batch_size=args.batch_size, + in_tokens=args.in_tokens, + voc_size=bert_config['vocab_size'], + epoch=args.epoch, + max_seq_len=args.max_seq_len, + generate_neg_sample=args.generate_neg_sample, + ) return data_reader diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/darknet.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/darknet.py index d188c693baa0d0bb50cb69fbbe35fcba98006f8b..ea72368fedb625999a25034d44bebc234cad20c6 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/darknet.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/darknet.py @@ -1,16 +1,16 @@ # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. # -#Licensed under the Apache License, Version 2.0 (the "License"); -#you may not use this file except in compliance with the License. -#You may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -#Unless required by applicable law or agreed to in writing, software -#distributed under the License is distributed on an "AS IS" BASIS, -#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#See the License for the specific language governing permissions and -#limitations under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import paddle.fluid as fluid from paddle.fluid.param_attr import ParamAttr @@ -20,35 +20,44 @@ from paddle.fluid.dygraph.nn import Conv2D, BatchNorm class ConvBNLayer(fluid.dygraph.Layer): - - def __init__(self, - ch_in, - ch_out, - filter_size=3, - stride=1, - groups=1, - padding=0, - act="leaky", - is_test=True): + def __init__( + self, + ch_in, + ch_out, + filter_size=3, + stride=1, + groups=1, + padding=0, + act="leaky", + is_test=True, + ): super(ConvBNLayer, self).__init__() - self.conv = Conv2D(num_channels=ch_in, - num_filters=ch_out, - filter_size=filter_size, - stride=stride, - padding=padding, - groups=groups, - param_attr=ParamAttr( - initializer=fluid.initializer.Normal(0., 0.02)), - bias_attr=False, - act=None) + self.conv = Conv2D( + num_channels=ch_in, + num_filters=ch_out, + filter_size=filter_size, + stride=stride, + padding=padding, + groups=groups, + param_attr=ParamAttr( + initializer=fluid.initializer.Normal(0.0, 0.02) + ), + bias_attr=False, + act=None, + ) self.batch_norm = BatchNorm( num_channels=ch_out, is_test=is_test, - param_attr=ParamAttr(initializer=fluid.initializer.Normal(0., 0.02), - regularizer=L2Decay(0.)), - bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0), - regularizer=L2Decay(0.))) + param_attr=ParamAttr( + initializer=fluid.initializer.Normal(0.0, 0.02), + regularizer=L2Decay(0.0), + ), + bias_attr=ParamAttr( + initializer=fluid.initializer.Constant(0.0), + regularizer=L2Decay(0.0), + ), + ) self.act = act @@ -61,23 +70,20 @@ class ConvBNLayer(fluid.dygraph.Layer): class DownSample(fluid.dygraph.Layer): - - def __init__(self, - ch_in, - ch_out, - filter_size=3, - stride=2, - padding=1, - is_test=True): + def __init__( + self, ch_in, ch_out, filter_size=3, stride=2, padding=1, is_test=True + ): super(DownSample, self).__init__() - self.conv_bn_layer = ConvBNLayer(ch_in=ch_in, - ch_out=ch_out, - filter_size=filter_size, - stride=stride, - padding=padding, - is_test=is_test) + self.conv_bn_layer = ConvBNLayer( + ch_in=ch_in, + ch_out=ch_out, + filter_size=filter_size, + stride=stride, + padding=padding, + is_test=is_test, + ) self.ch_out = ch_out def forward(self, inputs): @@ -86,22 +92,25 @@ class DownSample(fluid.dygraph.Layer): class BasicBlock(fluid.dygraph.Layer): - def __init__(self, ch_in, ch_out, is_test=True): super(BasicBlock, self).__init__() - self.conv1 = ConvBNLayer(ch_in=ch_in, - ch_out=ch_out, - filter_size=1, - stride=1, - padding=0, - is_test=is_test) - self.conv2 = ConvBNLayer(ch_in=ch_out, - ch_out=ch_out * 2, - filter_size=3, - stride=1, - padding=1, - is_test=is_test) + self.conv1 = ConvBNLayer( + ch_in=ch_in, + ch_out=ch_out, + filter_size=1, + stride=1, + padding=0, + is_test=is_test, + ) + self.conv2 = ConvBNLayer( + ch_in=ch_out, + ch_out=ch_out * 2, + filter_size=3, + stride=1, + padding=1, + is_test=is_test, + ) def forward(self, inputs): conv1 = self.conv1(inputs) @@ -111,7 +120,6 @@ class BasicBlock(fluid.dygraph.Layer): class LayerWarp(fluid.dygraph.Layer): - def __init__(self, ch_in, ch_out, count, is_test=True): super(LayerWarp, self).__init__() @@ -120,7 +128,8 @@ class LayerWarp(fluid.dygraph.Layer): for i in range(1, count): res_out = self.add_sublayer( "basic_block_%d" % (i), - BasicBlock(ch_out * 2, ch_out, is_test=is_test)) + BasicBlock(ch_out * 2, ch_out, is_test=is_test), + ) self.res_out_list.append(res_out) self.ch_out = ch_out @@ -135,18 +144,19 @@ DarkNet_cfg = {53: ([1, 2, 8, 8, 4])} class DarkNet53_conv_body(fluid.dygraph.Layer): - def __init__(self, ch_in=3, is_test=True): super(DarkNet53_conv_body, self).__init__() self.stages = DarkNet_cfg[53] self.stages = self.stages[0:5] - self.conv0 = ConvBNLayer(ch_in=ch_in, - ch_out=32, - filter_size=3, - stride=1, - padding=1, - is_test=is_test) + self.conv0 = ConvBNLayer( + ch_in=ch_in, + ch_out=32, + filter_size=3, + stride=1, + padding=1, + is_test=is_test, + ) self.downsample0 = DownSample(ch_in=32, ch_out=32 * 2, is_test=is_test) self.darknet53_conv_block_list = [] @@ -155,14 +165,18 @@ class DarkNet53_conv_body(fluid.dygraph.Layer): for i, stage in enumerate(self.stages): conv_block = self.add_sublayer( "stage_%d" % (i), - LayerWarp(int(ch_in[i]), 32 * (2**i), stage, is_test=is_test)) + LayerWarp(int(ch_in[i]), 32 * (2**i), stage, is_test=is_test), + ) self.darknet53_conv_block_list.append(conv_block) for i in range(len(self.stages) - 1): downsample = self.add_sublayer( "stage_%d_downsample" % i, - DownSample(ch_in=32 * (2**(i + 1)), - ch_out=32 * (2**(i + 2)), - is_test=is_test)) + DownSample( + ch_in=32 * (2 ** (i + 1)), + ch_out=32 * (2 ** (i + 2)), + is_test=is_test, + ), + ) self.downsample_list.append(downsample) def forward(self, inputs): diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/decos.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/decos.py index ee566fbcb33486eb0f57e892cd2d96ff37235369..73bbfffcabf8c7563b29ace9a000fd206c78fa1d 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/decos.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/decos.py @@ -18,7 +18,6 @@ from functools import wraps def deco1(fun): - @wraps(fun) def inner(*args, **kwargs): print('in decos.deco1, added 1') @@ -30,9 +29,7 @@ def deco1(fun): def deco2(x=0): - def inner_deco(func): - @wraps(func) def inner(*args, **kwargs): print('in decos.deco2, added {}'.format(x)) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py index d8f2a379d617d10b69e97cc2c0f34352c8278afd..bbd0f25c05f1245a59d19ff487418863fe297470 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py @@ -135,7 +135,8 @@ def dyfunc_with_if_else_early_return2(): def dyfunc_with_if_else_with_list_geneator(x): if 10 > 5: y = paddle.add_n( - [paddle.full(shape=[2], fill_value=v) for v in range(5)]) + [paddle.full(shape=[2], fill_value=v) for v in range(5)] + ) else: y = x return y @@ -162,9 +163,9 @@ def nested_if_else(x_v): if paddle.mean(y).numpy()[0] < batch_size: y = fluid.layers.abs(y) else: - tmp = fluid.layers.fill_constant(y.shape, - dtype='float32', - value=-1) + tmp = fluid.layers.fill_constant( + y.shape, dtype='float32', value=-1 + ) y = y - tmp else: y = x_v - bias @@ -180,15 +181,15 @@ def nested_if_else_2(x): x_shape_0 = x.shape[0] if x_shape_0 < 1: if fluid.layers.shape(y).numpy()[0] < 1: - res = fluid.layers.fill_constant(value=2, - shape=x.shape, - dtype="int32") + res = fluid.layers.fill_constant( + value=2, shape=x.shape, dtype="int32" + ) # `z` is a new var here. z = y + 1 else: - res = fluid.layers.fill_constant(value=3, - shape=x.shape, - dtype="int32") + res = fluid.layers.fill_constant( + value=3, shape=x.shape, dtype="int32" + ) else: res = x return res @@ -213,33 +214,35 @@ def nested_if_else_3(x): else: y_shape = fluid.layers.shape(y) if y_shape.numpy()[0] < 1: - res = fluid.layers.fill_constant(value=2, - shape=x.shape, - dtype="int32") + res = fluid.layers.fill_constant( + value=2, shape=x.shape, dtype="int32" + ) # `z` is created in above code block. z = y + 1 else: - res = fluid.layers.fill_constant(value=3, - shape=x.shape, - dtype="int32") + res = fluid.layers.fill_constant( + value=3, shape=x.shape, dtype="int32" + ) # `out` is a new var. out = x + 1 return res class NetWithControlFlowIf(fluid.dygraph.Layer): - def __init__(self, hidden_dim=16): super(NetWithControlFlowIf, self).__init__() self.hidden_dim = hidden_dim self.fc = fluid.dygraph.Linear( input_dim=hidden_dim, output_dim=5, - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.99)), - bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.5))) - self.alpha = 10. + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.99) + ), + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.5) + ), + ) + self.alpha = 10.0 self.constant_vars = {} @paddle.jit.to_static @@ -247,18 +250,21 @@ class NetWithControlFlowIf(fluid.dygraph.Layer): hidden_dim = input.shape[-1] if hidden_dim != self.hidden_dim: raise ValueError( - "hidden_dim {} of input is not equal to FC.weight[0]: {}". - format(hidden_dim, self.hidden_dim)) - - self.constant_vars['bias'] = fluid.layers.fill_constant([5], - dtype='float32', - value=1) + "hidden_dim {} of input is not equal to FC.weight[0]: {}".format( + hidden_dim, self.hidden_dim + ) + ) + + self.constant_vars['bias'] = fluid.layers.fill_constant( + [5], dtype='float32', value=1 + ) # Control flow `if` statement fc_out = self.fc(input) if paddle.mean(fc_out).numpy()[0] < 0: y = fc_out + self.constant_vars['bias'] self.constant_vars['w'] = fluid.layers.fill_constant( - [5], dtype='float32', value=10) + [5], dtype='float32', value=10 + ) if y.numpy()[0] < self.alpha: # Create new var, but is not used. x = 10 @@ -268,12 +274,13 @@ class NetWithControlFlowIf(fluid.dygraph.Layer): if y.numpy()[-1] < self.alpha: # Modify variable of class self.constant_vars['w'] = fluid.layers.fill_constant( - [hidden_dim], dtype='float32', value=9) + [hidden_dim], dtype='float32', value=9 + ) y = fluid.layers.abs(y) else: - tmp = fluid.layers.fill_constant(y.shape, - dtype='float32', - value=-1) + tmp = fluid.layers.fill_constant( + y.shape, dtype='float32', value=-1 + ) y = y - tmp else: y = fc_out - self.constant_vars['bias'] @@ -284,8 +291,12 @@ class NetWithControlFlowIf(fluid.dygraph.Layer): def if_with_and_or(x_v, label=None): batch_size = fluid.layers.shape(x_v) - if x_v is not None and (paddle.mean(x_v).numpy()[0] > 0 or label - is not None) and batch_size[0] > 1 and True: + if ( + x_v is not None + and (paddle.mean(x_v).numpy()[0] > 0 or label is not None) + and batch_size[0] > 1 + and True + ): x_v = x_v - 1 else: x_v = x_v + 1 @@ -317,8 +328,12 @@ def if_with_and_or_2(x, y=None): def if_with_and_or_3(x, y=None): batch_size = fluid.layers.shape(x) mean_res = paddle.mean(x) - if x is not None and batch_size[0] > 1 and y is not None and mean_res.numpy( - )[0] > 0: + if ( + x is not None + and batch_size[0] > 1 + and y is not None + and mean_res.numpy()[0] > 0 + ): x = x + 1 if mean_res.numpy()[0] > 0 and (x is not None and batch_size[0] > 1) and y: x = x - 1 @@ -328,19 +343,19 @@ def if_with_and_or_3(x, y=None): def if_with_and_or_4(x, y=None): batch_size = fluid.layers.shape(x) mean_res = paddle.mean(x) - if (x is not None and batch_size[0] > 1) or (y is not None - and mean_res.numpy()[0] > 0): + if (x is not None and batch_size[0] > 1) or ( + y is not None and mean_res.numpy()[0] > 0 + ): x = x + 1 - if (x is not None or batch_size[0] > 1) and (y is not None - or mean_res.numpy()[0] > 0): + if (x is not None or batch_size[0] > 1) and ( + y is not None or mean_res.numpy()[0] > 0 + ): x = x - 1 return x def if_with_class_var(x, y=None): - class Foo(object): - def __init__(self): self.a = 1 self.b = 2 diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/predictor_utils.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/predictor_utils.py index 2759177e9780bce3f6bf867bb864126223bdc6a5..554cf95e82483ac4b8bdc56d0284f3e6a0171ded 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/predictor_utils.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/predictor_utils.py @@ -43,7 +43,8 @@ class PredictorTools(object): if os.path.exists(os.path.join(self.model_path, self.params_file)): config = AnalysisConfig( os.path.join(self.model_path, self.model_file), - os.path.join(self.model_path, self.params_file)) + os.path.join(self.model_path, self.params_file), + ) else: config = AnalysisConfig(os.path.join(self.model_path)) @@ -73,7 +74,7 @@ class PredictorTools(object): tensor_shapes = predictor.get_input_tensor_shape() names = predictor.get_input_names() for i, name in enumerate(names): - #assert name in self.feeds_var, '{} not in feeded dict'.format(name) + # assert name in self.feeds_var, '{} not in feeded dict'.format(name) shape = tensor_shapes[name] tensor = predictor.get_input_tensor(name) feed_data = self.feeds_var[i] diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/seq2seq_dygraph_model.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/seq2seq_dygraph_model.py index 5af9b2ab94ba5574b2d48accf4a4926d36594e71..266bcf4e7b786c86260789e4afe4617a7c811fc2 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/seq2seq_dygraph_model.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/seq2seq_dygraph_model.py @@ -24,24 +24,26 @@ from paddle.fluid.dygraph.jit import declarative from paddle.fluid.dygraph.nn import Embedding from seq2seq_utils import Seq2SeqModelHyperParams as args -INF = 1. * 1e5 +INF = 1.0 * 1e5 alpha = 0.6 -uniform_initializer = lambda x: fluid.initializer.UniformInitializer(low=-x, - high=x) +uniform_initializer = lambda x: fluid.initializer.UniformInitializer( + low=-x, high=x +) zero_constant = fluid.initializer.Constant(0.0) class BasicLSTMUnit(Layer): - - def __init__(self, - hidden_size, - input_size, - param_attr=None, - bias_attr=None, - gate_activation=None, - activation=None, - forget_bias=1.0, - dtype='float32'): + def __init__( + self, + hidden_size, + input_size, + param_attr=None, + bias_attr=None, + gate_activation=None, + activation=None, + forget_bias=1.0, + dtype='float32', + ): super(BasicLSTMUnit, self).__init__(dtype) self._hiden_size = hidden_size @@ -56,12 +58,15 @@ class BasicLSTMUnit(Layer): self._weight = self.create_parameter( attr=self._param_attr, shape=[self._input_size + self._hiden_size, 4 * self._hiden_size], - dtype=self._dtype) + dtype=self._dtype, + ) - self._bias = self.create_parameter(attr=self._bias_attr, - shape=[4 * self._hiden_size], - dtype=self._dtype, - is_bias=True) + self._bias = self.create_parameter( + attr=self._bias_attr, + shape=[4 * self._hiden_size], + dtype=self._dtype, + is_bias=True, + ) def forward(self, input, pre_hidden, pre_cell): concat_input_hidden = layers.concat([input, pre_hidden], 1) @@ -70,9 +75,11 @@ class BasicLSTMUnit(Layer): gate_input = layers.elementwise_add(gate_input, self._bias) i, j, f, o = layers.split(gate_input, num_or_sections=4, dim=-1) new_cell = layers.elementwise_add( - layers.elementwise_mul(pre_cell, - layers.sigmoid(f + self._forget_bias)), - layers.elementwise_mul(layers.sigmoid(i), layers.tanh(j))) + layers.elementwise_mul( + pre_cell, layers.sigmoid(f + self._forget_bias) + ), + layers.elementwise_mul(layers.sigmoid(i), layers.tanh(j)), + ) new_hidden = layers.tanh(new_cell) * layers.sigmoid(o) @@ -80,20 +87,21 @@ class BasicLSTMUnit(Layer): class BaseModel(fluid.dygraph.Layer): - - def __init__(self, - hidden_size, - src_vocab_size, - tar_vocab_size, - batch_size, - num_layers=1, - init_scale=0.1, - dropout=None, - beam_size=1, - beam_start_token=1, - beam_end_token=2, - beam_max_step_num=2, - mode='train'): + def __init__( + self, + hidden_size, + src_vocab_size, + tar_vocab_size, + batch_size, + num_layers=1, + init_scale=0.1, + dropout=None, + beam_size=1, + beam_start_token=1, + beam_end_token=2, + beam_max_step_num=2, + mode='train', + ): super(BaseModel, self).__init__() self.hidden_size = hidden_size self.src_vocab_size = src_vocab_size @@ -116,40 +124,54 @@ class BaseModel(fluid.dygraph.Layer): self.src_embeder = Embedding( size=[self.src_vocab_size, self.hidden_size], param_attr=fluid.ParamAttr( - initializer=uniform_initializer(init_scale))) + initializer=uniform_initializer(init_scale) + ), + ) self.tar_embeder = Embedding( size=[self.tar_vocab_size, self.hidden_size], is_sparse=False, param_attr=fluid.ParamAttr( - initializer=uniform_initializer(init_scale))) + initializer=uniform_initializer(init_scale) + ), + ) self.enc_units = [] for i in range(num_layers): self.enc_units.append( self.add_sublayer( "enc_units_%d" % i, - BasicLSTMUnit(hidden_size=self.hidden_size, - input_size=self.hidden_size, - param_attr=param_attr, - bias_attr=bias_attr, - forget_bias=forget_bias))) + BasicLSTMUnit( + hidden_size=self.hidden_size, + input_size=self.hidden_size, + param_attr=param_attr, + bias_attr=bias_attr, + forget_bias=forget_bias, + ), + ) + ) self.dec_units = [] for i in range(num_layers): self.dec_units.append( self.add_sublayer( "dec_units_%d" % i, - BasicLSTMUnit(hidden_size=self.hidden_size, - input_size=self.hidden_size, - param_attr=param_attr, - bias_attr=bias_attr, - forget_bias=forget_bias))) - - self.fc = fluid.dygraph.nn.Linear(self.hidden_size, - self.tar_vocab_size, - param_attr=param_attr, - bias_attr=False) + BasicLSTMUnit( + hidden_size=self.hidden_size, + input_size=self.hidden_size, + param_attr=param_attr, + bias_attr=bias_attr, + forget_bias=forget_bias, + ), + ) + ) + + self.fc = fluid.dygraph.nn.Linear( + self.hidden_size, + self.tar_vocab_size, + param_attr=param_attr, + bias_attr=False, + ) def _transpose_batch_time(self, x): return fluid.layers.transpose(x, [1, 0] + list(range(2, len(x.shape)))) @@ -168,8 +190,9 @@ class BaseModel(fluid.dygraph.Layer): return x def _real_state(self, state, new_state, step_mask): - new_state = fluid.layers.elementwise_mul(new_state, step_mask, axis=0) - \ - fluid.layers.elementwise_mul(state, (step_mask - 1), axis=0) + new_state = fluid.layers.elementwise_mul( + new_state, step_mask, axis=0 + ) - fluid.layers.elementwise_mul(state, (step_mask - 1), axis=0) return new_state def _gather(self, x, indices, batch_pos): @@ -187,26 +210,28 @@ class BaseModel(fluid.dygraph.Layer): # NOTE: modify model code about `enc_hidden` and `enc_cell` to transforme dygraph code successfully. # Because nested list can't be transformed now. enc_hidden_0 = to_variable( - np.zeros((self.batch_size, self.hidden_size), dtype='float32')) + np.zeros((self.batch_size, self.hidden_size), dtype='float32') + ) enc_cell_0 = to_variable( - np.zeros((self.batch_size, self.hidden_size), dtype='float32')) + np.zeros((self.batch_size, self.hidden_size), dtype='float32') + ) zero = fluid.layers.zeros(shape=[1], dtype="int64") enc_hidden = fluid.layers.create_array(dtype="float32") enc_cell = fluid.layers.create_array(dtype="float32") for i in range(self.num_layers): index = zero + i - enc_hidden = fluid.layers.array_write(enc_hidden_0, - index, - array=enc_hidden) - enc_cell = fluid.layers.array_write(enc_cell_0, - index, - array=enc_cell) + enc_hidden = fluid.layers.array_write( + enc_hidden_0, index, array=enc_hidden + ) + enc_cell = fluid.layers.array_write( + enc_cell_0, index, array=enc_cell + ) max_seq_len = src_emb.shape[0] - enc_len_mask = fluid.layers.sequence_mask(src_sequence_length, - maxlen=max_seq_len, - dtype="float32") + enc_len_mask = fluid.layers.sequence_mask( + src_sequence_length, maxlen=max_seq_len, dtype="float32" + ) enc_len_mask = fluid.layers.transpose(enc_len_mask, [1, 0]) # TODO: Because diff exits if call while_loop in static graph. @@ -218,21 +243,24 @@ class BaseModel(fluid.dygraph.Layer): step_mask = enc_len_mask[k] new_enc_hidden, new_enc_cell = [], [] for i in range(self.num_layers): - enc_new_hidden, enc_new_cell = self.enc_units[i](enc_step_input, - enc_hidden[i], - enc_cell[i]) + enc_new_hidden, enc_new_cell = self.enc_units[i]( + enc_step_input, enc_hidden[i], enc_cell[i] + ) if self.dropout != None and self.dropout > 0.0: enc_step_input = fluid.layers.dropout( enc_new_hidden, dropout_prob=self.dropout, - dropout_implementation='upscale_in_train') + dropout_implementation='upscale_in_train', + ) else: enc_step_input = enc_new_hidden new_enc_hidden.append( - self._real_state(enc_hidden[i], enc_new_hidden, step_mask)) + self._real_state(enc_hidden[i], enc_new_hidden, step_mask) + ) new_enc_cell.append( - self._real_state(enc_cell[i], enc_new_cell, step_mask)) + self._real_state(enc_cell[i], enc_new_cell, step_mask) + ) enc_hidden, enc_cell = new_enc_hidden, new_enc_cell @@ -245,30 +273,31 @@ class BaseModel(fluid.dygraph.Layer): step_input = tar_emb[j] new_dec_hidden, new_dec_cell = [], [] for i in range(self.num_layers): - new_hidden, new_cell = self.dec_units[i](step_input, - dec_hidden[i], - dec_cell[i]) + new_hidden, new_cell = self.dec_units[i]( + step_input, dec_hidden[i], dec_cell[i] + ) new_dec_hidden.append(new_hidden) new_dec_cell.append(new_cell) if self.dropout != None and self.dropout > 0.0: step_input = fluid.layers.dropout( new_hidden, dropout_prob=self.dropout, - dropout_implementation='upscale_in_train') + dropout_implementation='upscale_in_train', + ) else: step_input = new_hidden dec_output.append(step_input) dec_output = fluid.layers.stack(dec_output) dec_output = self.fc(self._transpose_batch_time(dec_output)) - loss = fluid.layers.softmax_with_cross_entropy(logits=dec_output, - label=label, - soft_label=False) + loss = fluid.layers.softmax_with_cross_entropy( + logits=dec_output, label=label, soft_label=False + ) loss = fluid.layers.squeeze(loss, axes=[2]) max_tar_seq_len = fluid.layers.shape(tar)[1] - tar_mask = fluid.layers.sequence_mask(tar_sequence_length, - maxlen=max_tar_seq_len, - dtype='float32') + tar_mask = fluid.layers.sequence_mask( + tar_sequence_length, maxlen=max_tar_seq_len, dtype='float32' + ) loss = loss * tar_mask loss = fluid.layers.reduce_mean(loss, dim=[0]) loss = fluid.layers.reduce_sum(loss) @@ -283,26 +312,28 @@ class BaseModel(fluid.dygraph.Layer): src_emb = self.src_embeder(self._transpose_batch_time(src)) enc_hidden_0 = to_variable( - np.zeros((self.batch_size, self.hidden_size), dtype='float32')) + np.zeros((self.batch_size, self.hidden_size), dtype='float32') + ) enc_cell_0 = to_variable( - np.zeros((self.batch_size, self.hidden_size), dtype='float32')) + np.zeros((self.batch_size, self.hidden_size), dtype='float32') + ) zero = fluid.layers.zeros(shape=[1], dtype="int64") enc_hidden = fluid.layers.create_array(dtype="float32") enc_cell = fluid.layers.create_array(dtype="float32") for j in range(self.num_layers): index = zero + j - enc_hidden = fluid.layers.array_write(enc_hidden_0, - index, - array=enc_hidden) - enc_cell = fluid.layers.array_write(enc_cell_0, - index, - array=enc_cell) + enc_hidden = fluid.layers.array_write( + enc_hidden_0, index, array=enc_hidden + ) + enc_cell = fluid.layers.array_write( + enc_cell_0, index, array=enc_cell + ) max_seq_len = src_emb.shape[0] - enc_len_mask = fluid.layers.sequence_mask(src_sequence_length, - maxlen=max_seq_len, - dtype="float32") + enc_len_mask = fluid.layers.sequence_mask( + src_sequence_length, maxlen=max_seq_len, dtype="float32" + ) enc_len_mask = fluid.layers.transpose(enc_len_mask, [1, 0]) for k in range(args.max_seq_len): @@ -312,40 +343,50 @@ class BaseModel(fluid.dygraph.Layer): new_enc_hidden, new_enc_cell = [], [] for i in range(self.num_layers): - enc_new_hidden, enc_new_cell = self.enc_units[i](enc_step_input, - enc_hidden[i], - enc_cell[i]) + enc_new_hidden, enc_new_cell = self.enc_units[i]( + enc_step_input, enc_hidden[i], enc_cell[i] + ) if self.dropout != None and self.dropout > 0.0: enc_step_input = fluid.layers.dropout( enc_new_hidden, dropout_prob=self.dropout, - dropout_implementation='upscale_in_train') + dropout_implementation='upscale_in_train', + ) else: enc_step_input = enc_new_hidden new_enc_hidden.append( - self._real_state(enc_hidden[i], enc_new_hidden, step_mask)) + self._real_state(enc_hidden[i], enc_new_hidden, step_mask) + ) new_enc_cell.append( - self._real_state(enc_cell[i], enc_new_cell, step_mask)) + self._real_state(enc_cell[i], enc_new_cell, step_mask) + ) enc_hidden, enc_cell = new_enc_hidden, new_enc_cell # beam search batch_beam_shape = (self.batch_size, self.beam_size) - vocab_size_tensor = to_variable(np.full( - (1), self.tar_vocab_size)).astype("int64") + vocab_size_tensor = to_variable( + np.full((1), self.tar_vocab_size) + ).astype("int64") start_token_tensor = to_variable( - np.full(batch_beam_shape, self.beam_start_token, dtype='int64')) + np.full(batch_beam_shape, self.beam_start_token, dtype='int64') + ) end_token_tensor = to_variable( - np.full(batch_beam_shape, self.beam_end_token, dtype='int64')) + np.full(batch_beam_shape, self.beam_end_token, dtype='int64') + ) step_input = self.tar_embeder(start_token_tensor) beam_finished = to_variable( - np.full(batch_beam_shape, 0, dtype='float32')) + np.full(batch_beam_shape, 0, dtype='float32') + ) beam_state_log_probs = to_variable( - np.array([[0.] + [-self.kinf] * (self.beam_size - 1)], - dtype="float32")) - beam_state_log_probs = fluid.layers.expand(beam_state_log_probs, - [self.batch_size, 1]) + np.array( + [[0.0] + [-self.kinf] * (self.beam_size - 1)], dtype="float32" + ) + ) + beam_state_log_probs = fluid.layers.expand( + beam_state_log_probs, [self.batch_size, 1] + ) dec_hidden, dec_cell = enc_hidden, enc_cell dec_hidden = [self._expand_to_beam_size(ele) for ele in dec_hidden] dec_cell = [self._expand_to_beam_size(ele) for ele in dec_cell] @@ -353,7 +394,10 @@ class BaseModel(fluid.dygraph.Layer): batch_pos = fluid.layers.expand( fluid.layers.unsqueeze( to_variable(np.arange(0, self.batch_size, 1, dtype="int64")), - [1]), [1, self.beam_size]) + [1], + ), + [1, self.beam_size], + ) predicted_ids = [] parent_ids = [] @@ -369,16 +413,17 @@ class BaseModel(fluid.dygraph.Layer): dec_cell = [self._merge_batch_beams(state) for state in dec_cell] for i in range(self.num_layers): - new_hidden, new_cell = self.dec_units[i](step_input, - dec_hidden[i], - dec_cell[i]) + new_hidden, new_cell = self.dec_units[i]( + step_input, dec_hidden[i], dec_cell[i] + ) new_dec_hidden.append(new_hidden) new_dec_cell.append(new_cell) if self.dropout != None and self.dropout > 0.0: step_input = fluid.layers.dropout( new_hidden, dropout_prob=self.dropout, - dropout_implementation='upscale_in_train') + dropout_implementation='upscale_in_train', + ) else: step_input = new_hidden @@ -386,28 +431,40 @@ class BaseModel(fluid.dygraph.Layer): cell_outputs = self.fc(cell_outputs) step_log_probs = fluid.layers.log( - fluid.layers.softmax(cell_outputs)) + fluid.layers.softmax(cell_outputs) + ) noend_array = [-self.kinf] * self.tar_vocab_size noend_array[self.beam_end_token] = 0 noend_mask_tensor = to_variable( - np.array(noend_array, dtype='float32')) + np.array(noend_array, dtype='float32') + ) step_log_probs = fluid.layers.elementwise_mul( - fluid.layers.expand(fluid.layers.unsqueeze(beam_finished, [2]), [1, 1, self.tar_vocab_size]), - noend_mask_tensor, axis=-1) - \ - fluid.layers.elementwise_mul(step_log_probs, (beam_finished - 1), axis=0) - log_probs = fluid.layers.elementwise_add(x=step_log_probs, - y=beam_state_log_probs, - axis=0) + fluid.layers.expand( + fluid.layers.unsqueeze(beam_finished, [2]), + [1, 1, self.tar_vocab_size], + ), + noend_mask_tensor, + axis=-1, + ) - fluid.layers.elementwise_mul( + step_log_probs, (beam_finished - 1), axis=0 + ) + log_probs = fluid.layers.elementwise_add( + x=step_log_probs, y=beam_state_log_probs, axis=0 + ) scores = fluid.layers.reshape( - log_probs, [-1, self.beam_size * self.tar_vocab_size]) - topk_scores, topk_indices = fluid.layers.topk(input=scores, - k=self.beam_size) + log_probs, [-1, self.beam_size * self.tar_vocab_size] + ) + topk_scores, topk_indices = fluid.layers.topk( + input=scores, k=self.beam_size + ) beam_indices = fluid.layers.elementwise_floordiv( - topk_indices, vocab_size_tensor) + topk_indices, vocab_size_tensor + ) token_indices = fluid.layers.elementwise_mod( - topk_indices, vocab_size_tensor) + topk_indices, vocab_size_tensor + ) next_log_probs = self._gather(scores, topk_indices, batch_pos) x = 0 @@ -434,7 +491,8 @@ class BaseModel(fluid.dygraph.Layer): next_finished = fluid.layers.cast(next_finished, "bool") next_finished = fluid.layers.logical_or( next_finished, - fluid.layers.equal(token_indices, end_token_tensor)) + fluid.layers.equal(token_indices, end_token_tensor), + ) next_finished = fluid.layers.cast(next_finished, "float32") dec_hidden, dec_cell = new_dec_hidden, new_dec_cell @@ -452,20 +510,21 @@ class BaseModel(fluid.dygraph.Layer): class AttentionModel(fluid.dygraph.Layer): - - def __init__(self, - hidden_size, - src_vocab_size, - tar_vocab_size, - batch_size, - num_layers=1, - init_scale=0.1, - dropout=None, - beam_size=1, - beam_start_token=1, - beam_end_token=2, - beam_max_step_num=2, - mode='train'): + def __init__( + self, + hidden_size, + src_vocab_size, + tar_vocab_size, + batch_size, + num_layers=1, + init_scale=0.1, + dropout=None, + beam_size=1, + beam_start_token=1, + beam_end_token=2, + beam_max_step_num=2, + mode='train', + ): super(AttentionModel, self).__init__() self.hidden_size = hidden_size self.src_vocab_size = src_vocab_size @@ -489,25 +548,33 @@ class AttentionModel(fluid.dygraph.Layer): size=[self.src_vocab_size, self.hidden_size], param_attr=fluid.ParamAttr( name='source_embedding', - initializer=uniform_initializer(init_scale))) + initializer=uniform_initializer(init_scale), + ), + ) self.tar_embeder = Embedding( size=[self.tar_vocab_size, self.hidden_size], is_sparse=False, param_attr=fluid.ParamAttr( name='target_embedding', - initializer=uniform_initializer(init_scale))) + initializer=uniform_initializer(init_scale), + ), + ) self.enc_units = [] for i in range(num_layers): self.enc_units.append( self.add_sublayer( "enc_units_%d" % i, - BasicLSTMUnit(hidden_size=self.hidden_size, - input_size=self.hidden_size, - param_attr=param_attr, - bias_attr=bias_attr, - forget_bias=forget_bias))) + BasicLSTMUnit( + hidden_size=self.hidden_size, + input_size=self.hidden_size, + param_attr=param_attr, + bias_attr=bias_attr, + forget_bias=forget_bias, + ), + ) + ) self.dec_units = [] for i in range(num_layers): @@ -515,50 +582,67 @@ class AttentionModel(fluid.dygraph.Layer): self.dec_units.append( self.add_sublayer( "dec_units_%d" % i, - BasicLSTMUnit(hidden_size=self.hidden_size, - input_size=self.hidden_size * 2, - param_attr=ParamAttr( - name="dec_units_%d" % i, - initializer=uniform_initializer( - self.init_scale)), - bias_attr=bias_attr, - forget_bias=forget_bias))) + BasicLSTMUnit( + hidden_size=self.hidden_size, + input_size=self.hidden_size * 2, + param_attr=ParamAttr( + name="dec_units_%d" % i, + initializer=uniform_initializer( + self.init_scale + ), + ), + bias_attr=bias_attr, + forget_bias=forget_bias, + ), + ) + ) else: self.dec_units.append( self.add_sublayer( "dec_units_%d" % i, - BasicLSTMUnit(hidden_size=self.hidden_size, - input_size=self.hidden_size, - param_attr=ParamAttr( - name="dec_units_%d" % i, - initializer=uniform_initializer( - self.init_scale)), - bias_attr=bias_attr, - forget_bias=forget_bias))) + BasicLSTMUnit( + hidden_size=self.hidden_size, + input_size=self.hidden_size, + param_attr=ParamAttr( + name="dec_units_%d" % i, + initializer=uniform_initializer( + self.init_scale + ), + ), + bias_attr=bias_attr, + forget_bias=forget_bias, + ), + ) + ) self.attn_fc = fluid.dygraph.nn.Linear( self.hidden_size, self.hidden_size, - param_attr=ParamAttr(name="self_attn_fc", - initializer=uniform_initializer( - self.init_scale)), - bias_attr=False) + param_attr=ParamAttr( + name="self_attn_fc", + initializer=uniform_initializer(self.init_scale), + ), + bias_attr=False, + ) self.concat_fc = fluid.dygraph.nn.Linear( 2 * self.hidden_size, self.hidden_size, - param_attr=ParamAttr(name="self_concat_fc", - initializer=uniform_initializer( - self.init_scale)), - bias_attr=False) - - self.fc = fluid.dygraph.nn.Linear(self.hidden_size, - self.tar_vocab_size, - param_attr=ParamAttr( - name="self_fc", - initializer=uniform_initializer( - self.init_scale)), - bias_attr=False) + param_attr=ParamAttr( + name="self_concat_fc", + initializer=uniform_initializer(self.init_scale), + ), + bias_attr=False, + ) + + self.fc = fluid.dygraph.nn.Linear( + self.hidden_size, + self.tar_vocab_size, + param_attr=ParamAttr( + name="self_fc", initializer=uniform_initializer(self.init_scale) + ), + bias_attr=False, + ) def _transpose_batch_time(self, x): return fluid.layers.transpose(x, [1, 0] + list(range(2, len(x.shape)))) @@ -571,16 +655,16 @@ class AttentionModel(fluid.dygraph.Layer): expand_times = [1] * len(x.shape) expand_times[1] = self.beam_size x = fluid.layers.expand(x, expand_times) # [batch_size, beam_size, ...] - x = fluid.layers.transpose(x, - list(range(2, len(x.shape))) + - [0, 1]) # [..., batch_size, beam_size] + x = fluid.layers.transpose( + x, list(range(2, len(x.shape))) + [0, 1] + ) # [..., batch_size, beam_size] # use 0 to copy to avoid wrong shape - x = fluid.layers.reshape(x, shape=[0] * (len(x.shape) - 2) + - [-1]) # [..., batch_size * beam_size] + x = fluid.layers.reshape( + x, shape=[0] * (len(x.shape) - 2) + [-1] + ) # [..., batch_size * beam_size] x = fluid.layers.transpose( - x, [len(x.shape) - 1] + - list(range(0, - len(x.shape) - 1))) # [batch_size * beam_size, ...] + x, [len(x.shape) - 1] + list(range(0, len(x.shape) - 1)) + ) # [batch_size * beam_size, ...] return x def _split_batch_beams(self, x): @@ -594,8 +678,9 @@ class AttentionModel(fluid.dygraph.Layer): return x def _real_state(self, state, new_state, step_mask): - new_state = fluid.layers.elementwise_mul(new_state, step_mask, axis=0) - \ - fluid.layers.elementwise_mul(state, (step_mask - 1), axis=0) + new_state = fluid.layers.elementwise_mul( + new_state, step_mask, axis=0 + ) - fluid.layers.elementwise_mul(state, (step_mask - 1), axis=0) return new_state def _gather(self, x, indices, batch_pos): @@ -635,29 +720,31 @@ class AttentionModel(fluid.dygraph.Layer): # NOTE: modify model code about `enc_hidden` and `enc_cell` to transforme dygraph code successfully. # Because nested list can't be transformed now. enc_hidden_0 = to_variable( - np.zeros((self.batch_size, self.hidden_size), dtype='float32')) + np.zeros((self.batch_size, self.hidden_size), dtype='float32') + ) enc_hidden_0.stop_gradient = True enc_cell_0 = to_variable( - np.zeros((self.batch_size, self.hidden_size), dtype='float32')) + np.zeros((self.batch_size, self.hidden_size), dtype='float32') + ) enc_hidden_0.stop_gradient = True zero = fluid.layers.zeros(shape=[1], dtype="int64") enc_hidden = fluid.layers.create_array(dtype="float32") enc_cell = fluid.layers.create_array(dtype="float32") for i in range(self.num_layers): index = zero + i - enc_hidden = fluid.layers.array_write(enc_hidden_0, - index, - array=enc_hidden) - enc_cell = fluid.layers.array_write(enc_cell_0, - index, - array=enc_cell) + enc_hidden = fluid.layers.array_write( + enc_hidden_0, index, array=enc_hidden + ) + enc_cell = fluid.layers.array_write( + enc_cell_0, index, array=enc_cell + ) max_seq_len = src_emb.shape[0] - enc_len_mask = fluid.layers.sequence_mask(src_sequence_length, - maxlen=max_seq_len, - dtype="float32") - enc_padding_mask = (enc_len_mask - 1.0) + enc_len_mask = fluid.layers.sequence_mask( + src_sequence_length, maxlen=max_seq_len, dtype="float32" + ) + enc_padding_mask = enc_len_mask - 1.0 enc_len_mask = fluid.layers.transpose(enc_len_mask, [1, 0]) enc_outputs = [] @@ -670,21 +757,24 @@ class AttentionModel(fluid.dygraph.Layer): step_mask = enc_len_mask[k] new_enc_hidden, new_enc_cell = [], [] for i in range(self.num_layers): - enc_new_hidden, enc_new_cell = self.enc_units[i](enc_step_input, - enc_hidden[i], - enc_cell[i]) + enc_new_hidden, enc_new_cell = self.enc_units[i]( + enc_step_input, enc_hidden[i], enc_cell[i] + ) if self.dropout != None and self.dropout > 0.0: enc_step_input = fluid.layers.dropout( enc_new_hidden, dropout_prob=self.dropout, - dropout_implementation='upscale_in_train') + dropout_implementation='upscale_in_train', + ) else: enc_step_input = enc_new_hidden new_enc_hidden.append( - self._real_state(enc_hidden[i], enc_new_hidden, step_mask)) + self._real_state(enc_hidden[i], enc_new_hidden, step_mask) + ) new_enc_cell.append( - self._real_state(enc_cell[i], enc_new_cell, step_mask)) + self._real_state(enc_cell[i], enc_new_cell, step_mask) + ) enc_outputs.append(enc_step_input) enc_hidden, enc_cell = new_enc_hidden, new_enc_cell @@ -693,7 +783,8 @@ class AttentionModel(fluid.dygraph.Layer): # train input_feed = to_variable( - np.zeros((self.batch_size, self.hidden_size), dtype='float32')) + np.zeros((self.batch_size, self.hidden_size), dtype='float32') + ) # NOTE: set stop_gradient here, otherwise grad var is null input_feed.stop_gradient = True dec_hidden, dec_cell = enc_hidden, enc_cell @@ -707,16 +798,17 @@ class AttentionModel(fluid.dygraph.Layer): step_input = fluid.layers.concat([step_input, input_feed], 1) new_dec_hidden, new_dec_cell = [], [] for i in range(self.num_layers): - new_hidden, new_cell = self.dec_units[i](step_input, - dec_hidden[i], - dec_cell[i]) + new_hidden, new_cell = self.dec_units[i]( + step_input, dec_hidden[i], dec_cell[i] + ) new_dec_hidden.append(new_hidden) new_dec_cell.append(new_cell) if self.dropout != None and self.dropout > 0.0: step_input = fluid.layers.dropout( new_hidden, dropout_prob=self.dropout, - dropout_implementation='upscale_in_train') + dropout_implementation='upscale_in_train', + ) else: step_input = new_hidden dec_att = self.attention(step_input, enc_outputs, enc_padding_mask) @@ -729,14 +821,14 @@ class AttentionModel(fluid.dygraph.Layer): dec_output = fluid.layers.stack(dec_output) dec_output = self.fc(self._transpose_batch_time(dec_output)) - loss = fluid.layers.softmax_with_cross_entropy(logits=dec_output, - label=label, - soft_label=False) + loss = fluid.layers.softmax_with_cross_entropy( + logits=dec_output, label=label, soft_label=False + ) loss = fluid.layers.squeeze(loss, axes=[2]) max_tar_seq_len = fluid.layers.shape(tar)[1] - tar_mask = fluid.layers.sequence_mask(tar_sequence_length, - maxlen=max_tar_seq_len, - dtype='float32') + tar_mask = fluid.layers.sequence_mask( + tar_sequence_length, maxlen=max_tar_seq_len, dtype='float32' + ) loss = loss * tar_mask loss = fluid.layers.reduce_mean(loss, dim=[0]) loss = fluid.layers.reduce_sum(loss) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/seq2seq_utils.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/seq2seq_utils.py index 23e25a974eb7c1b61b9dc33b8d355b9822a00d61..44a55c79c92e33ead837a5c1eae771213587596b 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/seq2seq_utils.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/seq2seq_utils.py @@ -40,7 +40,7 @@ def get_data_iter(batch_size, mode='train', cache_num=20): mask = np.zeros((bs), dtype='int32') for i, ele in enumerate(data): - ids[i, :len(ele)] = ele + ids[i, : len(ele)] = ele if not source: mask[i] = len(ele) - 1 else: @@ -61,7 +61,7 @@ def get_data_iter(batch_size, mode='train', cache_num=20): new_cache = sorted(b_src, key=lambda k: len(k[0])) for i in range(cache_num): - batch_data = new_cache[i * batch_size:(i + 1) * batch_size] + batch_data = new_cache[i * batch_size : (i + 1) * batch_size] src_cache = [w[0] for w in batch_data] tar_cache = [w[1] for w in batch_data] src_ids, src_mask = to_pad_np(src_cache, source=True) @@ -83,7 +83,7 @@ def get_data_iter(batch_size, mode='train', cache_num=20): for i in range(cache_num): batch_end = min(len(new_cache), (i + 1) * batch_size) - batch_data = new_cache[i * batch_size:batch_end] + batch_data = new_cache[i * batch_size : batch_end] src_cache = [w[0] for w in batch_data] tar_cache = [w[1] for w in batch_data] src_ids, src_mask = to_pad_np(src_cache, source=True) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model.py index a18da2173feab117047946a54631f46067d8ddc4..b9a32c6e52a0d18c14c1700e9eb0fdd4c6c12271 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model.py @@ -41,12 +41,14 @@ class EmbeddingLayer(object): """ # TODO(huihuangzheng): The original code set the is_sparse=True, but it # causes crush in dy2stat. Set it to True after fixing it. - emb = Embedding(size=[self.dict_size, self.emb_dim], - is_sparse=True, - padding_idx=self.padding_idx, - param_attr=attr.ParamAttr( - name=self.name, - initializer=fluid.initializer.Xavier())) + emb = Embedding( + size=[self.dict_size, self.emb_dim], + is_sparse=True, + padding_idx=self.padding_idx, + param_attr=attr.ParamAttr( + name=self.name, initializer=fluid.initializer.Xavier() + ), + ) return emb @@ -68,10 +70,12 @@ class FCLayer(object): """ operation """ - fc = FC(size=self.fc_dim, - param_attr=attr.ParamAttr(name="%s.w" % self.name), - bias_attr=attr.ParamAttr(name="%s.b" % self.name), - act=self.act) + fc = FC( + size=self.fc_dim, + param_attr=attr.ParamAttr(name="%s.w" % self.name), + bias_attr=attr.ParamAttr(name="%s.b" % self.name), + act=self.act, + ) return fc @@ -307,14 +311,16 @@ class FC(Layer): conv = fc(data) """ - def __init__(self, - size, - num_flatten_dims=1, - param_attr=None, - bias_attr=None, - act=None, - is_test=False, - dtype="float32"): + def __init__( + self, + size, + num_flatten_dims=1, + param_attr=None, + bias_attr=None, + act=None, + is_test=False, + dtype="float32", + ): super(FC, self).__init__(dtype) self._size = size @@ -328,27 +334,32 @@ class FC(Layer): def _build_once(self, input): i = 0 for inp, param in self._helper.iter_inputs_and_params( - input, self._param_attr): + input, self._param_attr + ): input_shape = inp.shape param_shape = [ - reduce(lambda a, b: a * b, input_shape[self._num_flatten_dims:], - 1) + reduce( + lambda a, b: a * b, input_shape[self._num_flatten_dims :], 1 + ) ] + [self._size] self.__w.append( self.add_parameter( '_w%d' % i, - self.create_parameter(attr=param, - shape=param_shape, - dtype=self._dtype, - is_bias=False))) + self.create_parameter( + attr=param, + shape=param_shape, + dtype=self._dtype, + is_bias=False, + ), + ) + ) i += 1 size = list([self._size]) - self._b = self.create_parameter(attr=self._bias_attr, - shape=size, - dtype=self._dtype, - is_bias=True) + self._b = self.create_parameter( + attr=self._bias_attr, shape=size, dtype=self._dtype, is_bias=True + ) # TODO(songyouwei): We should remove _w property @property @@ -384,18 +395,18 @@ class FC(Layer): mul_results = list() i = 0 for inp, param in self._helper.iter_inputs_and_params( - input, self._param_attr): + input, self._param_attr + ): tmp = self._helper.create_variable_for_type_inference(self._dtype) - self._helper.append_op(type="mul", - inputs={ - "X": inp, - "Y": self.__w[i] - }, - outputs={"Out": tmp}, - attrs={ - "x_num_col_dims": self._num_flatten_dims, - "y_num_col_dims": 1 - }) + self._helper.append_op( + type="mul", + inputs={"X": inp, "Y": self.__w[i]}, + outputs={"Out": tmp}, + attrs={ + "x_num_col_dims": self._num_flatten_dims, + "y_num_col_dims": 1, + }, + ) i += 1 mul_results.append(tmp) @@ -403,22 +414,25 @@ class FC(Layer): pre_bias = mul_results[0] else: pre_bias = self._helper.create_variable_for_type_inference( - self._dtype) - self._helper.append_op(type="sum", - inputs={"X": mul_results}, - outputs={"Out": pre_bias}, - attrs={"use_mkldnn": False}) + self._dtype + ) + self._helper.append_op( + type="sum", + inputs={"X": mul_results}, + outputs={"Out": pre_bias}, + attrs={"use_mkldnn": False}, + ) if self._b is not None: pre_activation = self._helper.create_variable_for_type_inference( - dtype=self._dtype) - self._helper.append_op(type='elementwise_add', - inputs={ - 'X': [pre_bias], - 'Y': [self._b] - }, - outputs={'Out': [pre_activation]}, - attrs={'axis': self._num_flatten_dims}) + dtype=self._dtype + ) + self._helper.append_op( + type='elementwise_add', + inputs={'X': [pre_bias], 'Y': [self._b]}, + outputs={'Out': [pre_activation]}, + attrs={'axis': self._num_flatten_dims}, + ) else: pre_activation = pre_bias # Currently, we don't support inplace in dygraph mode @@ -450,7 +464,10 @@ class HingeLoss(object): constant.ops(neg, neg.shape, "float32", 0.0), elementwise_add.ops( elementwise_sub.ops(neg, pos), - constant.ops(neg, neg.shape, "float32", self.margin)))) + constant.ops(neg, neg.shape, "float32", self.margin), + ), + ) + ) return loss @@ -469,8 +486,9 @@ class BOW(Layer): self.emb_dim = conf_dict["net"]["emb_dim"] self.bow_dim = conf_dict["net"]["bow_dim"] self.seq_len = conf_dict["seq_len"] - self.emb_layer = EmbeddingLayer(self.dict_size, self.emb_dim, - "emb").ops() + self.emb_layer = EmbeddingLayer( + self.dict_size, self.emb_dim, "emb" + ).ops() self.bow_layer = Linear(self.bow_dim, self.bow_dim) self.bow_layer_po = FCLayer(self.bow_dim, None, "fc").ops() self.softmax_layer = FCLayer(2, "softmax", "cos_sim").ops() @@ -484,10 +502,12 @@ class BOW(Layer): # embedding layer left_emb = self.emb_layer(left) right_emb = self.emb_layer(right) - left_emb = fluid.layers.reshape(left_emb, - shape=[-1, self.seq_len, self.bow_dim]) - right_emb = fluid.layers.reshape(right_emb, - shape=[-1, self.seq_len, self.bow_dim]) + left_emb = fluid.layers.reshape( + left_emb, shape=[-1, self.seq_len, self.bow_dim] + ) + right_emb = fluid.layers.reshape( + right_emb, shape=[-1, self.seq_len, self.bow_dim] + ) bow_left = fluid.layers.reduce_sum(left_emb, dim=1) bow_right = fluid.layers.reduce_sum(right_emb, dim=1) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model_v2.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model_v2.py index b82a7b663bd516cc6f04570d6edb633a48a277c0..cc8f90594e4371db1331ec89424ab4398a29ca76 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model_v2.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model_v2.py @@ -43,7 +43,9 @@ class EmbeddingLayer(object): padding_idx=self.padding_idx, param_attr=paddle.ParamAttr( name=self.name, - initializer=paddle.nn.initializer.XavierUniform())) + initializer=paddle.nn.initializer.XavierUniform(), + ), + ) return emb @@ -65,10 +67,12 @@ class FCLayer(object): """ operation """ - fc = FC(size=self.fc_dim, - param_attr=paddle.ParamAttr(name="%s.w" % self.name), - bias_attr=paddle.ParamAttr(name="%s.b" % self.name), - act=self.act) + fc = FC( + size=self.fc_dim, + param_attr=paddle.ParamAttr(name="%s.w" % self.name), + bias_attr=paddle.ParamAttr(name="%s.b" % self.name), + act=self.act, + ) return fc @@ -293,14 +297,16 @@ class FC(paddle.nn.Layer): """ - def __init__(self, - size, - num_flatten_dims=1, - param_attr=None, - bias_attr=None, - act=None, - is_test=False, - dtype="float32"): + def __init__( + self, + size, + num_flatten_dims=1, + param_attr=None, + bias_attr=None, + act=None, + is_test=False, + dtype="float32", + ): super(FC, self).__init__(dtype) self._size = size @@ -314,27 +320,32 @@ class FC(paddle.nn.Layer): def _build_once(self, input): i = 0 for inp, param in self._helper.iter_inputs_and_params( - input, self._param_attr): + input, self._param_attr + ): input_shape = inp.shape param_shape = [ - reduce(lambda a, b: a * b, input_shape[self._num_flatten_dims:], - 1) + reduce( + lambda a, b: a * b, input_shape[self._num_flatten_dims :], 1 + ) ] + [self._size] self.__w.append( self.add_parameter( '_w%d' % i, - self.create_parameter(attr=param, - shape=param_shape, - dtype=self._dtype, - is_bias=False))) + self.create_parameter( + attr=param, + shape=param_shape, + dtype=self._dtype, + is_bias=False, + ), + ) + ) i += 1 size = list([self._size]) - self._b = self.create_parameter(attr=self._bias_attr, - shape=size, - dtype=self._dtype, - is_bias=True) + self._b = self.create_parameter( + attr=self._bias_attr, shape=size, dtype=self._dtype, is_bias=True + ) # TODO(songyouwei): We should remove _w property @property @@ -370,18 +381,18 @@ class FC(paddle.nn.Layer): mul_results = list() i = 0 for inp, param in self._helper.iter_inputs_and_params( - input, self._param_attr): + input, self._param_attr + ): tmp = self._helper.create_variable_for_type_inference(self._dtype) - self._helper.append_op(type="mul", - inputs={ - "X": inp, - "Y": self.__w[i] - }, - outputs={"Out": tmp}, - attrs={ - "x_num_col_dims": self._num_flatten_dims, - "y_num_col_dims": 1 - }) + self._helper.append_op( + type="mul", + inputs={"X": inp, "Y": self.__w[i]}, + outputs={"Out": tmp}, + attrs={ + "x_num_col_dims": self._num_flatten_dims, + "y_num_col_dims": 1, + }, + ) i += 1 mul_results.append(tmp) @@ -389,22 +400,25 @@ class FC(paddle.nn.Layer): pre_bias = mul_results[0] else: pre_bias = self._helper.create_variable_for_type_inference( - self._dtype) - self._helper.append_op(type="sum", - inputs={"X": mul_results}, - outputs={"Out": pre_bias}, - attrs={"use_mkldnn": False}) + self._dtype + ) + self._helper.append_op( + type="sum", + inputs={"X": mul_results}, + outputs={"Out": pre_bias}, + attrs={"use_mkldnn": False}, + ) if self._b is not None: pre_activation = self._helper.create_variable_for_type_inference( - dtype=self._dtype) - self._helper.append_op(type='elementwise_add', - inputs={ - 'X': [pre_bias], - 'Y': [self._b] - }, - outputs={'Out': [pre_activation]}, - attrs={'axis': self._num_flatten_dims}) + dtype=self._dtype + ) + self._helper.append_op( + type='elementwise_add', + inputs={'X': [pre_bias], 'Y': [self._b]}, + outputs={'Out': [pre_activation]}, + attrs={'axis': self._num_flatten_dims}, + ) else: pre_activation = pre_bias # Currently, we don't support inplace in dygraph mode @@ -436,7 +450,10 @@ class HingeLoss(object): constant.ops(neg, neg.shape, "float32", 0.0), elementwise_add.ops( elementwise_sub.ops(neg, pos), - constant.ops(neg, neg.shape, "float32", self.margin)))) + constant.ops(neg, neg.shape, "float32", self.margin), + ), + ) + ) return loss @@ -455,10 +472,12 @@ class BOW(paddle.nn.Layer): self.emb_dim = conf_dict["net"]["emb_dim"] self.bow_dim = conf_dict["net"]["bow_dim"] self.seq_len = conf_dict["seq_len"] - self.emb_layer = EmbeddingLayer(self.dict_size, self.emb_dim, - "emb").ops() - self.bow_layer = paddle.nn.Linear(in_features=self.bow_dim, - out_features=self.bow_dim) + self.emb_layer = EmbeddingLayer( + self.dict_size, self.emb_dim, "emb" + ).ops() + self.bow_layer = paddle.nn.Linear( + in_features=self.bow_dim, out_features=self.bow_dim + ) self.bow_layer_po = FCLayer(self.bow_dim, None, "fc").ops() self.softmax_layer = FCLayer(2, "softmax", "cos_sim").ops() @@ -471,10 +490,12 @@ class BOW(paddle.nn.Layer): # embedding layer left_emb = self.emb_layer(left) right_emb = self.emb_layer(right) - left_emb = paddle.reshape(left_emb, - shape=[-1, self.seq_len, self.bow_dim]) - right_emb = paddle.reshape(right_emb, - shape=[-1, self.seq_len, self.bow_dim]) + left_emb = paddle.reshape( + left_emb, shape=[-1, self.seq_len, self.bow_dim] + ) + right_emb = paddle.reshape( + right_emb, shape=[-1, self.seq_len, self.bow_dim] + ) bow_left = paddle.fluid.layers.reduce_sum(left_emb, dim=1) bow_right = paddle.fluid.layers.reduce_sum(right_emb, dim=1) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_assert.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_assert.py index 05f11c090de02eb9b6b3cfbe87127df9672d59a8..c975a50bffd654146a8f900660fa24425311f501 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_assert.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_assert.py @@ -33,7 +33,6 @@ def dyfunc_assert_non_variable(x=True): class TestAssertVariable(unittest.TestCase): - def _run(self, func, x, with_exception, to_static): ProgramTranslator().enable(to_static) if with_exception: @@ -49,28 +48,28 @@ class TestAssertVariable(unittest.TestCase): self._run(func, x, with_exception, False) def test_non_variable(self): - self._run_dy_static(dyfunc_assert_non_variable, - x=False, - with_exception=True) - self._run_dy_static(dyfunc_assert_non_variable, - x=True, - with_exception=False) + self._run_dy_static( + dyfunc_assert_non_variable, x=False, with_exception=True + ) + self._run_dy_static( + dyfunc_assert_non_variable, x=True, with_exception=False + ) def test_bool_variable(self): - self._run_dy_static(dyfunc_assert_variable, - x=numpy.array([False]), - with_exception=True) - self._run_dy_static(dyfunc_assert_variable, - x=numpy.array([True]), - with_exception=False) + self._run_dy_static( + dyfunc_assert_variable, x=numpy.array([False]), with_exception=True + ) + self._run_dy_static( + dyfunc_assert_variable, x=numpy.array([True]), with_exception=False + ) def test_int_variable(self): - self._run_dy_static(dyfunc_assert_variable, - x=numpy.array([0]), - with_exception=True) - self._run_dy_static(dyfunc_assert_variable, - x=numpy.array([1]), - with_exception=False) + self._run_dy_static( + dyfunc_assert_variable, x=numpy.array([0]), with_exception=True + ) + self._run_dy_static( + dyfunc_assert_variable, x=numpy.array([1]), with_exception=False + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ast_util.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ast_util.py index dbbbae2732e97fd5c102a92d17e41d8324e74ec7..8315caf3e32c563a34e23ea6236b2cb91246927d 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ast_util.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ast_util.py @@ -21,7 +21,11 @@ import paddle import paddle.fluid as fluid from paddle.fluid.dygraph.dygraph_to_static.utils import ast_to_func -from ifelse_simple_func import dyfunc_with_if_else, dyfunc_with_if_else2, nested_if_else +from ifelse_simple_func import ( + dyfunc_with_if_else, + dyfunc_with_if_else2, + nested_if_else, +) class TestAST2Func(unittest.TestCase): @@ -37,7 +41,6 @@ class TestAST2Func(unittest.TestCase): return transformed_func def test_ast2func(self): - def func(x, y): return x + y @@ -55,7 +58,6 @@ class TestAST2Func(unittest.TestCase): self.assertTrue((true_ret == test_ret).all()) def test_ast2func_static(self): - def func(x): y = fluid.layers.relu(x) loss = paddle.mean(y) @@ -74,8 +76,9 @@ class TestAST2Func(unittest.TestCase): def test_ast2func_error(self): with self.assertRaises(Exception) as e: self.assertRaises(TypeError, ast_to_func("x = a + b", 'foo')) - self.assertTrue("Type of ast_root should be gast.AST or ast.AST" in str( - e.exception)) + self.assertTrue( + "Type of ast_root should be gast.AST or ast.AST" in str(e.exception) + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_basic_api_transformation.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_basic_api_transformation.py index 9074c84e086e9270b54e237042e834ebd2d4cd13..656d2768dcac2b66ed21f5b6a3639c2d15377e1b 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_basic_api_transformation.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_basic_api_transformation.py @@ -70,16 +70,22 @@ def dyfunc_bool_to_tensor(x): class TestDygraphBasicApi_ToVariable(unittest.TestCase): - def setUp(self): self.input = np.ones(5).astype("int32") self.test_funcs = [ - dyfunc_to_tensor, dyfunc_bool_to_tensor, dyfunc_int_to_tensor, - dyfunc_float_to_tensor, dyfunc_to_variable, dyfunc_to_variable_2, - dyfunc_to_variable_3 + dyfunc_to_tensor, + dyfunc_bool_to_tensor, + dyfunc_int_to_tensor, + dyfunc_float_to_tensor, + dyfunc_to_variable, + dyfunc_to_variable_2, + dyfunc_to_variable_3, ] - self.place = fluid.CUDAPlace( - 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + self.place = ( + fluid.CUDAPlace(0) + if fluid.is_compiled_with_cuda() + else fluid.CPUPlace() + ) def get_dygraph_output(self): with fluid.dygraph.guard(): @@ -111,13 +117,18 @@ def dyfunc_BilinearTensorProduct(layer1, layer2): input1_dim=5, input2_dim=4, output_dim=1000, - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.99)), - bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.5))) + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.99) + ), + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.5) + ), + ) - res = bilinearTensorProduct(fluid.dygraph.base.to_variable(layer1), - fluid.dygraph.base.to_variable(layer2)) + res = bilinearTensorProduct( + fluid.dygraph.base.to_variable(layer1), + fluid.dygraph.base.to_variable(layer2), + ) return res @@ -126,10 +137,12 @@ def dyfunc_Conv2D(input): num_channels=3, num_filters=2, filter_size=3, - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.99)), - bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.5)), + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.99) + ), + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.5) + ), ) res = conv2d(input) return res @@ -140,10 +153,12 @@ def dyfunc_Conv3D(input): num_channels=3, num_filters=2, filter_size=3, - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.99)), - bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.5)), + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.99) + ), + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.5) + ), ) res = conv3d(input) return res @@ -155,10 +170,12 @@ def dyfunc_Conv2DTranspose(input): num_filters=12, filter_size=12, use_cudnn=False, - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.99)), - bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.5)), + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.99) + ), + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.5) + ), ) ret = conv2dTranspose(input) return ret @@ -170,10 +187,12 @@ def dyfunc_Conv3DTranspose(input): num_filters=12, filter_size=12, use_cudnn=False, - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.99)), - bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.5)), + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.99) + ), + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.5) + ), ) ret = conv3dTranspose(input) return ret @@ -184,24 +203,24 @@ def dyfunc_Linear(input): input_dim=10, output_dim=5, act='relu', - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.99)), - bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.5)), + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.99) + ), + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.5) + ), ) res = fc(input) return res def dyfunc_Pool2D(input): - fluid.dygraph.Pool2D(pool_size=2, - pool_type='avg', - pool_stride=1, - global_pooling=False) - pool2d = fluid.dygraph.Pool2D(pool_size=2, - pool_type='avg', - pool_stride=1, - global_pooling=False) + fluid.dygraph.Pool2D( + pool_size=2, pool_type='avg', pool_stride=1, global_pooling=False + ) + pool2d = fluid.dygraph.Pool2D( + pool_size=2, pool_type='avg', pool_stride=1, global_pooling=False + ) res = pool2d(input) return res @@ -209,7 +228,8 @@ def dyfunc_Pool2D(input): def dyfunc_Prelu(input): prelu0 = fluid.PRelu( mode='all', - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(1.0))) + param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(1.0)), + ) res = prelu0(input=input) return res @@ -252,7 +272,6 @@ class TestDygraphBasicApi(unittest.TestCase): class TestDygraphBasicApi_BilinearTensorProduct(TestDygraphBasicApi): - def setUp(self): self.input1 = np.random.random((5, 5)).astype('float32') self.input2 = np.random.random((5, 4)).astype('float32') @@ -271,8 +290,9 @@ class TestDygraphBasicApi_BilinearTensorProduct(TestDygraphBasicApi): main_program = fluid.Program() main_program.random_seed = SEED with fluid.program_guard(main_program, startup_program): - static_out = dygraph_to_static_func(self.dygraph_func)(self.input1, - self.input2) + static_out = dygraph_to_static_func(self.dygraph_func)( + self.input1, self.input2 + ) exe = fluid.Executor(fluid.CPUPlace()) exe.run(startup_program) @@ -281,42 +301,36 @@ class TestDygraphBasicApi_BilinearTensorProduct(TestDygraphBasicApi): class TestDygraphBasicApi_Conv2D(TestDygraphBasicApi): - def setUp(self): self.input = np.random.random((1, 3, 3, 5)).astype('float32') self.dygraph_func = dyfunc_Conv2D class TestDygraphBasicApi_Conv3D(TestDygraphBasicApi): - def setUp(self): self.input = np.random.random((1, 3, 3, 3, 5)).astype('float32') self.dygraph_func = dyfunc_Conv3D class TestDygraphBasicApi_Conv2DTranspose(TestDygraphBasicApi): - def setUp(self): self.input = np.random.random((5, 3, 32, 32)).astype('float32') self.dygraph_func = dyfunc_Conv2DTranspose class TestDygraphBasicApi_Conv3DTranspose(TestDygraphBasicApi): - def setUp(self): self.input = np.random.random((5, 3, 12, 32, 32)).astype('float32') self.dygraph_func = dyfunc_Conv3DTranspose class TestDygraphBasicApi_Linear(TestDygraphBasicApi): - def setUp(self): self.input = np.random.random((4, 3, 10)).astype('float32') self.dygraph_func = dyfunc_Linear class TestDygraphBasicApi_Prelu(TestDygraphBasicApi): - def setUp(self): self.input = np.ones([5, 20, 10, 10]).astype('float32') self.dygraph_func = dyfunc_Prelu @@ -325,39 +339,36 @@ class TestDygraphBasicApi_Prelu(TestDygraphBasicApi): # 2. test Apis that inherit from LearningRateDecay def dyfunc_CosineDecay(): base_lr = 0.1 - CosineDecay = fluid.dygraph.CosineDecay(learning_rate=base_lr, - step_each_epoch=10000, - epochs=120) + CosineDecay = fluid.dygraph.CosineDecay( + learning_rate=base_lr, step_each_epoch=10000, epochs=120 + ) lr = CosineDecay() return lr def dyfunc_ExponentialDecay(): base_lr = 0.1 - exponential_decay = fluid.dygraph.ExponentialDecay(learning_rate=base_lr, - decay_steps=10000, - decay_rate=0.5, - staircase=True) + exponential_decay = fluid.dygraph.ExponentialDecay( + learning_rate=base_lr, decay_steps=10000, decay_rate=0.5, staircase=True + ) lr = exponential_decay() return lr def dyfunc_InverseTimeDecay(): base_lr = 0.1 - inverse_time_decay = fluid.dygraph.InverseTimeDecay(learning_rate=base_lr, - decay_steps=10000, - decay_rate=0.5, - staircase=True) + inverse_time_decay = fluid.dygraph.InverseTimeDecay( + learning_rate=base_lr, decay_steps=10000, decay_rate=0.5, staircase=True + ) lr = inverse_time_decay() return lr def dyfunc_NaturalExpDecay(): base_lr = 0.1 - natural_exp_decay = fluid.dygraph.NaturalExpDecay(learning_rate=base_lr, - decay_steps=10000, - decay_rate=0.5, - staircase=True) + natural_exp_decay = fluid.dygraph.NaturalExpDecay( + learning_rate=base_lr, decay_steps=10000, decay_rate=0.5, staircase=True + ) lr = natural_exp_decay() return lr @@ -386,7 +397,6 @@ def dyfunc_PolynomialDecay(): class TestDygraphBasicApi_CosineDecay(unittest.TestCase): - def setUp(self): self.dygraph_func = dyfunc_CosineDecay @@ -417,43 +427,38 @@ class TestDygraphBasicApi_CosineDecay(unittest.TestCase): class TestDygraphBasicApi_ExponentialDecay(TestDygraphBasicApi_CosineDecay): - def setUp(self): self.dygraph_func = dyfunc_ExponentialDecay class TestDygraphBasicApi_InverseTimeDecay(TestDygraphBasicApi_CosineDecay): - def setUp(self): self.dygraph_func = dyfunc_InverseTimeDecay class TestDygraphBasicApi_NaturalExpDecay(TestDygraphBasicApi_CosineDecay): - def setUp(self): self.dygraph_func = dyfunc_NaturalExpDecay class TestDygraphBasicApi_NoamDecay(TestDygraphBasicApi_CosineDecay): - def setUp(self): self.dygraph_func = dyfunc_NoamDecay class TestDygraphBasicApi_PiecewiseDecay(TestDygraphBasicApi_CosineDecay): - def setUp(self): self.dygraph_func = dyfunc_PiecewiseDecay class TestDygraphBasicApi_PolynomialDecay(TestDygraphBasicApi_CosineDecay): - def setUp(self): self.dygraph_func = dyfunc_PolynomialDecay def _dygraph_fn(): import paddle.fluid as fluid + x = np.random.random((1, 3)).astype('float32') with fluid.dygraph.guard(): fluid.dygraph.to_variable(x) @@ -461,7 +466,6 @@ def _dygraph_fn(): class TestDygraphApiRecognition(unittest.TestCase): - def setUp(self): self.src = inspect.getsource(_dygraph_fn) self.root = gast.parse(self.src) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bert.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bert.py index 1929df30fe30722bb545b607137d37deaf6633f2..7fb9eca260636d31728942d0c122be1f2706e4b3 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bert.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bert.py @@ -29,15 +29,15 @@ from bert_utils import get_bert_config, get_feed_data_reader from predictor_utils import PredictorTools program_translator = ProgramTranslator() -place = fluid.CUDAPlace( - 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() +place = ( + fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() +) SEED = 2020 STEP_NUM = 10 PRINT_STEP = 2 class TestBert(unittest.TestCase): - def setUp(self): self.bert_config = get_bert_config() self.data_reader = get_feed_data_reader(self.bert_config) @@ -46,8 +46,9 @@ class TestBert(unittest.TestCase): self.model_save_prefix = os.path.join(self.model_save_dir, 'bert') self.model_filename = 'bert' + INFER_MODEL_SUFFIX self.params_filename = 'bert' + INFER_PARAMS_SUFFIX - self.dy_state_dict_save_path = os.path.join(self.temp_dir.name, - 'bert.dygraph') + self.dy_state_dict_save_path = os.path.join( + self.temp_dir.name, 'bert.dygraph' + ) def tearDown(self): self.temp_dir.cleanup() @@ -57,20 +58,30 @@ class TestBert(unittest.TestCase): fluid.default_main_program().random_seed = SEED fluid.default_startup_program().random_seed = SEED - data_loader = fluid.io.DataLoader.from_generator(capacity=50, - iterable=True) - data_loader.set_batch_generator(data_reader.data_generator(), - places=place) + data_loader = fluid.io.DataLoader.from_generator( + capacity=50, iterable=True + ) + data_loader.set_batch_generator( + data_reader.data_generator(), places=place + ) - bert = PretrainModelLayer(config=bert_config, - weight_sharing=False, - use_fp16=False) + bert = PretrainModelLayer( + config=bert_config, weight_sharing=False, use_fp16=False + ) optimizer = fluid.optimizer.Adam(parameter_list=bert.parameters()) step_idx = 0 speed_list = [] for input_data in data_loader(): - src_ids, pos_ids, sent_ids, input_mask, mask_label, mask_pos, labels = input_data + ( + src_ids, + pos_ids, + sent_ids, + input_mask, + mask_label, + mask_pos, + labels, + ) = input_data next_sent_acc, mask_lm_loss, total_loss = bert( src_ids=src_ids, position_ids=pos_ids, @@ -78,7 +89,8 @@ class TestBert(unittest.TestCase): input_mask=input_mask, mask_label=mask_label, mask_pos=mask_pos, - labels=labels) + labels=labels, + ) total_loss.backward() optimizer.minimize(total_loss) bert.clear_gradients() @@ -89,15 +101,18 @@ class TestBert(unittest.TestCase): if step_idx % PRINT_STEP == 0: if step_idx == 0: - print("Step: %d, loss: %f, ppl: %f, next_sent_acc: %f" % - (step_idx, loss, ppl, acc)) + print( + "Step: %d, loss: %f, ppl: %f, next_sent_acc: %f" + % (step_idx, loss, ppl, acc) + ) avg_batch_time = time.time() else: speed = PRINT_STEP / (time.time() - avg_batch_time) speed_list.append(speed) print( "Step: %d, loss: %f, ppl: %f, next_sent_acc: %f, speed: %.3f steps/s" - % (step_idx, loss, ppl, acc, speed)) + % (step_idx, loss, ppl, acc, speed) + ) avg_batch_time = time.time() step_idx += 1 @@ -105,8 +120,9 @@ class TestBert(unittest.TestCase): if to_static: fluid.dygraph.jit.save(bert, self.model_save_prefix) else: - fluid.dygraph.save_dygraph(bert.state_dict(), - self.dy_state_dict_save_path) + fluid.dygraph.save_dygraph( + bert.state_dict(), self.dy_state_dict_save_path + ) break return loss, ppl @@ -122,38 +138,56 @@ class TestBert(unittest.TestCase): paddle.enable_static() exe = fluid.Executor(place) # load inference model - [inference_program, feed_target_names, fetch_targets - ] = fluid.io.load_inference_model(self.model_save_dir, - executor=exe, - model_filename=self.model_filename, - params_filename=self.params_filename) - pred_res = exe.run(inference_program, - feed=dict(zip(feed_target_names, data)), - fetch_list=fetch_targets) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = fluid.io.load_inference_model( + self.model_save_dir, + executor=exe, + model_filename=self.model_filename, + params_filename=self.params_filename, + ) + pred_res = exe.run( + inference_program, + feed=dict(zip(feed_target_names, data)), + fetch_list=fetch_targets, + ) return pred_res def predict_dygraph(self, bert_config, data): program_translator.enable(False) with fluid.dygraph.guard(place): - bert = PretrainModelLayer(config=bert_config, - weight_sharing=False, - use_fp16=False) + bert = PretrainModelLayer( + config=bert_config, weight_sharing=False, use_fp16=False + ) model_dict, _ = fluid.dygraph.load_dygraph( - self.dy_state_dict_save_path) + self.dy_state_dict_save_path + ) bert.set_dict(model_dict) bert.eval() input_vars = [fluid.dygraph.to_variable(x) for x in data] - src_ids, pos_ids, sent_ids, input_mask, mask_label, mask_pos, labels = input_vars - pred_res = bert(src_ids=src_ids, - position_ids=pos_ids, - sentence_ids=sent_ids, - input_mask=input_mask, - mask_label=mask_label, - mask_pos=mask_pos, - labels=labels) + ( + src_ids, + pos_ids, + sent_ids, + input_mask, + mask_label, + mask_pos, + labels, + ) = input_vars + pred_res = bert( + src_ids=src_ids, + position_ids=pos_ids, + sentence_ids=sent_ids, + input_mask=input_mask, + mask_label=mask_label, + mask_pos=mask_pos, + labels=labels, + ) pred_res = [var.numpy() for var in pred_res] return pred_res @@ -163,24 +197,42 @@ class TestBert(unittest.TestCase): bert = fluid.dygraph.jit.load(self.model_save_prefix) bert.eval() - src_ids, pos_ids, sent_ids, input_mask, mask_label, mask_pos, labels = data - pred_res = bert(src_ids, pos_ids, sent_ids, input_mask, mask_label, - mask_pos, labels) + ( + src_ids, + pos_ids, + sent_ids, + input_mask, + mask_label, + mask_pos, + labels, + ) = data + pred_res = bert( + src_ids, + pos_ids, + sent_ids, + input_mask, + mask_label, + mask_pos, + labels, + ) pred_res = [var.numpy() for var in pred_res] return pred_res def predict_analysis_inference(self, data): - output = PredictorTools(self.model_save_dir, self.model_filename, - self.params_filename, data) + output = PredictorTools( + self.model_save_dir, self.model_filename, self.params_filename, data + ) out = output() return out def test_train(self): - static_loss, static_ppl = self.train_static(self.bert_config, - self.data_reader) - dygraph_loss, dygraph_ppl = self.train_dygraph(self.bert_config, - self.data_reader) + static_loss, static_ppl = self.train_static( + self.bert_config, self.data_reader + ) + dygraph_loss, dygraph_ppl = self.train_dygraph( + self.bert_config, self.data_reader + ) np.testing.assert_allclose(static_loss, dygraph_loss, rtol=1e-05) np.testing.assert_allclose(static_ppl, dygraph_ppl, rtol=1e-05) @@ -194,29 +246,38 @@ class TestBert(unittest.TestCase): predictor_pred_res = self.predict_analysis_inference(data) for dy_res, st_res, dy_jit_res, predictor_res in zip( - dygraph_pred_res, static_pred_res, dygraph_jit_pred_res, - predictor_pred_res): + dygraph_pred_res, + static_pred_res, + dygraph_jit_pred_res, + predictor_pred_res, + ): np.testing.assert_allclose( st_res, dy_res, rtol=1e-05, err_msg='dygraph_res: {},\n static_res: {}'.format( dy_res[~np.isclose(st_res, dy_res)], - st_res[~np.isclose(st_res, dy_res)])) + st_res[~np.isclose(st_res, dy_res)], + ), + ) np.testing.assert_allclose( st_res, dy_jit_res, rtol=1e-05, err_msg='dygraph_jit_res: {},\n static_res: {}'.format( dy_jit_res[~np.isclose(st_res, dy_jit_res)], - st_res[~np.isclose(st_res, dy_jit_res)])) + st_res[~np.isclose(st_res, dy_jit_res)], + ), + ) np.testing.assert_allclose( st_res, predictor_res, rtol=1e-05, err_msg='dygraph_jit_res: {},\n static_res: {}'.format( predictor_res[~np.isclose(st_res, predictor_res)], - st_res[~np.isclose(st_res, predictor_res)])) + st_res[~np.isclose(st_res, predictor_res)], + ), + ) break diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py index e54c6274502ba5a6b5d4904b7abb6414d4608546..0f25b6ec71a7f90f19621834b4549720a47eff7d 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py @@ -38,9 +38,10 @@ if fluid.is_compiled_with_cuda(): fluid.set_flags({'FLAGS_cudnn_deterministic': True}) -def get_interp1d_mask(tscale, dscale, prop_boundary_ratio, num_sample, - num_sample_perbin): - """ generate sample mask for each point in Boundary-Matching Map """ +def get_interp1d_mask( + tscale, dscale, prop_boundary_ratio, num_sample, num_sample_perbin +): + """generate sample mask for each point in Boundary-Matching Map""" mask_mat = [] for start_index in range(tscale): mask_mat_vector = [] @@ -51,9 +52,13 @@ def get_interp1d_mask(tscale, dscale, prop_boundary_ratio, num_sample, center_len = float(p_xmax - p_xmin) + 1 sample_xmin = p_xmin - center_len * prop_boundary_ratio sample_xmax = p_xmax + center_len * prop_boundary_ratio - p_mask = _get_interp1d_bin_mask(sample_xmin, sample_xmax, - tscale, num_sample, - num_sample_perbin) + p_mask = _get_interp1d_bin_mask( + sample_xmin, + sample_xmax, + tscale, + num_sample, + num_sample_perbin, + ) else: p_mask = np.zeros([tscale, num_sample]) mask_mat_vector.append(p_mask) @@ -66,9 +71,10 @@ def get_interp1d_mask(tscale, dscale, prop_boundary_ratio, num_sample, return sample_mask -def _get_interp1d_bin_mask(seg_xmin, seg_xmax, tscale, num_sample, - num_sample_perbin): - """ generate sample mask for a boundary-matching pair """ +def _get_interp1d_bin_mask( + seg_xmin, seg_xmax, tscale, num_sample, num_sample_perbin +): + """generate sample mask for a boundary-matching pair""" plen = float(seg_xmax - seg_xmin) plen_sample = plen / (num_sample * num_sample_perbin - 1.0) total_samples = [ @@ -77,8 +83,9 @@ def _get_interp1d_bin_mask(seg_xmin, seg_xmax, tscale, num_sample, ] p_mask = [] for idx in range(num_sample): - bin_samples = total_samples[idx * num_sample_perbin:(idx + 1) * - num_sample_perbin] + bin_samples = total_samples[ + idx * num_sample_perbin : (idx + 1) * num_sample_perbin + ] bin_vector = np.zeros([tscale]) for sample in bin_samples: sample_upper = math.ceil(sample) @@ -94,34 +101,39 @@ def _get_interp1d_bin_mask(seg_xmin, seg_xmax, tscale, num_sample, class Conv1D(fluid.dygraph.Layer): - - def __init__(self, - prefix, - num_channels=256, - num_filters=256, - size_k=3, - padding=1, - groups=1, - act="relu"): + def __init__( + self, + prefix, + num_channels=256, + num_filters=256, + size_k=3, + padding=1, + groups=1, + act="relu", + ): super(Conv1D, self).__init__() fan_in = num_channels * size_k * 1 - k = 1. / math.sqrt(fan_in) - param_attr = ParamAttr(name=prefix + "_w", - initializer=fluid.initializer.Uniform(low=-k, - high=k)) - bias_attr = ParamAttr(name=prefix + "_b", - initializer=fluid.initializer.Uniform(low=-k, - high=k)) - - self._conv2d = fluid.dygraph.Conv2D(num_channels=num_channels, - num_filters=num_filters, - filter_size=(1, size_k), - stride=1, - padding=(0, padding), - groups=groups, - act=act, - param_attr=param_attr, - bias_attr=bias_attr) + k = 1.0 / math.sqrt(fan_in) + param_attr = ParamAttr( + name=prefix + "_w", + initializer=fluid.initializer.Uniform(low=-k, high=k), + ) + bias_attr = ParamAttr( + name=prefix + "_b", + initializer=fluid.initializer.Uniform(low=-k, high=k), + ) + + self._conv2d = fluid.dygraph.Conv2D( + num_channels=num_channels, + num_filters=num_filters, + filter_size=(1, size_k), + stride=1, + padding=(0, padding), + groups=groups, + act=act, + param_attr=param_attr, + bias_attr=bias_attr, + ) def forward(self, x): x = fluid.layers.unsqueeze(input=x, axes=[2]) @@ -131,7 +143,6 @@ class Conv1D(fluid.dygraph.Layer): class BMN(fluid.dygraph.Layer): - def __init__(self, cfg): super(BMN, self).__init__() @@ -146,55 +157,65 @@ class BMN(fluid.dygraph.Layer): self.hidden_dim_3d = 512 # Base Module - self.b_conv1 = Conv1D(prefix="Base_1", - num_channels=cfg.feat_dim, - num_filters=self.hidden_dim_1d, - size_k=3, - padding=1, - groups=4, - act="relu") - self.b_conv2 = Conv1D(prefix="Base_2", - num_filters=self.hidden_dim_1d, - size_k=3, - padding=1, - groups=4, - act="relu") + self.b_conv1 = Conv1D( + prefix="Base_1", + num_channels=cfg.feat_dim, + num_filters=self.hidden_dim_1d, + size_k=3, + padding=1, + groups=4, + act="relu", + ) + self.b_conv2 = Conv1D( + prefix="Base_2", + num_filters=self.hidden_dim_1d, + size_k=3, + padding=1, + groups=4, + act="relu", + ) # Temporal Evaluation Module - self.ts_conv1 = Conv1D(prefix="TEM_s1", - num_filters=self.hidden_dim_1d, - size_k=3, - padding=1, - groups=4, - act="relu") - self.ts_conv2 = Conv1D(prefix="TEM_s2", - num_filters=1, - size_k=1, - padding=0, - act="sigmoid") - self.te_conv1 = Conv1D(prefix="TEM_e1", - num_filters=self.hidden_dim_1d, - size_k=3, - padding=1, - groups=4, - act="relu") - self.te_conv2 = Conv1D(prefix="TEM_e2", - num_filters=1, - size_k=1, - padding=0, - act="sigmoid") - - #Proposal Evaluation Module - self.p_conv1 = Conv1D(prefix="PEM_1d", - num_filters=self.hidden_dim_2d, - size_k=3, - padding=1, - act="relu") + self.ts_conv1 = Conv1D( + prefix="TEM_s1", + num_filters=self.hidden_dim_1d, + size_k=3, + padding=1, + groups=4, + act="relu", + ) + self.ts_conv2 = Conv1D( + prefix="TEM_s2", num_filters=1, size_k=1, padding=0, act="sigmoid" + ) + self.te_conv1 = Conv1D( + prefix="TEM_e1", + num_filters=self.hidden_dim_1d, + size_k=3, + padding=1, + groups=4, + act="relu", + ) + self.te_conv2 = Conv1D( + prefix="TEM_e2", num_filters=1, size_k=1, padding=0, act="sigmoid" + ) + + # Proposal Evaluation Module + self.p_conv1 = Conv1D( + prefix="PEM_1d", + num_filters=self.hidden_dim_2d, + size_k=3, + padding=1, + act="relu", + ) # init to speed up - sample_mask = get_interp1d_mask(self.tscale, self.dscale, - self.prop_boundary_ratio, - self.num_sample, self.num_sample_perbin) + sample_mask = get_interp1d_mask( + self.tscale, + self.dscale, + self.prop_boundary_ratio, + self.num_sample, + self.num_sample_perbin, + ) self.sample_mask = fluid.dygraph.base.to_variable(sample_mask) self.sample_mask.stop_gradient = True @@ -206,7 +227,8 @@ class BMN(fluid.dygraph.Layer): padding=0, act="relu", param_attr=ParamAttr(name="PEM_3d1_w"), - bias_attr=ParamAttr(name="PEM_3d1_b")) + bias_attr=ParamAttr(name="PEM_3d1_b"), + ) self.p_conv2d1 = fluid.dygraph.Conv2D( num_channels=512, @@ -216,7 +238,8 @@ class BMN(fluid.dygraph.Layer): padding=0, act="relu", param_attr=ParamAttr(name="PEM_2d1_w"), - bias_attr=ParamAttr(name="PEM_2d1_b")) + bias_attr=ParamAttr(name="PEM_2d1_b"), + ) self.p_conv2d2 = fluid.dygraph.Conv2D( num_channels=128, num_filters=self.hidden_dim_2d, @@ -225,7 +248,8 @@ class BMN(fluid.dygraph.Layer): padding=1, act="relu", param_attr=ParamAttr(name="PEM_2d2_w"), - bias_attr=ParamAttr(name="PEM_2d2_b")) + bias_attr=ParamAttr(name="PEM_2d2_b"), + ) self.p_conv2d3 = fluid.dygraph.Conv2D( num_channels=128, num_filters=self.hidden_dim_2d, @@ -234,7 +258,8 @@ class BMN(fluid.dygraph.Layer): padding=1, act="relu", param_attr=ParamAttr(name="PEM_2d3_w"), - bias_attr=ParamAttr(name="PEM_2d3_b")) + bias_attr=ParamAttr(name="PEM_2d3_b"), + ) self.p_conv2d4 = fluid.dygraph.Conv2D( num_channels=128, num_filters=2, @@ -243,7 +268,8 @@ class BMN(fluid.dygraph.Layer): padding=0, act="sigmoid", param_attr=ParamAttr(name="PEM_2d4_w"), - bias_attr=ParamAttr(name="PEM_2d4_b")) + bias_attr=ParamAttr(name="PEM_2d4_b"), + ) @to_static def forward(self, x): @@ -263,8 +289,9 @@ class BMN(fluid.dygraph.Layer): xp = self.p_conv1(x) # BM layer xp = fluid.layers.matmul(xp, self.sample_mask) - xp = fluid.layers.reshape(xp, - shape=[0, 0, -1, self.dscale, self.tscale]) + xp = fluid.layers.reshape( + xp, shape=[0, 0, -1, self.dscale, self.tscale] + ) xp = self.p_conv3d1(xp) xp = fluid.layers.squeeze(xp, axes=[2]) @@ -275,51 +302,54 @@ class BMN(fluid.dygraph.Layer): return xp, xs, xe -def bmn_loss_func(pred_bm, pred_start, pred_end, gt_iou_map, gt_start, gt_end, - cfg): - +def bmn_loss_func( + pred_bm, pred_start, pred_end, gt_iou_map, gt_start, gt_end, cfg +): def _get_mask(cfg): dscale = cfg.dscale tscale = cfg.tscale bm_mask = [] for idx in range(dscale): - mask_vector = [1 for i in range(tscale - idx) - ] + [0 for i in range(idx)] + mask_vector = [1 for i in range(tscale - idx)] + [ + 0 for i in range(idx) + ] bm_mask.append(mask_vector) bm_mask = np.array(bm_mask, dtype=np.float32) - self_bm_mask = fluid.layers.create_global_var(shape=[dscale, tscale], - value=0, - dtype=DATATYPE, - persistable=True) + self_bm_mask = fluid.layers.create_global_var( + shape=[dscale, tscale], value=0, dtype=DATATYPE, persistable=True + ) fluid.layers.assign(bm_mask, self_bm_mask) self_bm_mask.stop_gradient = True return self_bm_mask def tem_loss_func(pred_start, pred_end, gt_start, gt_end): - def bi_loss(pred_score, gt_label): - pred_score = fluid.layers.reshape(x=pred_score, - shape=[-1], - inplace=False) - gt_label = fluid.layers.reshape(x=gt_label, - shape=[-1], - inplace=False) + pred_score = fluid.layers.reshape( + x=pred_score, shape=[-1], inplace=False + ) + gt_label = fluid.layers.reshape( + x=gt_label, shape=[-1], inplace=False + ) gt_label.stop_gradient = True pmask = fluid.layers.cast(x=(gt_label > 0.5), dtype=DATATYPE) - num_entries = fluid.layers.cast(fluid.layers.shape(pmask), - dtype=DATATYPE) - num_positive = fluid.layers.cast(fluid.layers.reduce_sum(pmask), - dtype=DATATYPE) + num_entries = fluid.layers.cast( + fluid.layers.shape(pmask), dtype=DATATYPE + ) + num_positive = fluid.layers.cast( + fluid.layers.reduce_sum(pmask), dtype=DATATYPE + ) ratio = num_entries / num_positive coef_0 = 0.5 * ratio / (ratio - 1) coef_1 = 0.5 * ratio epsilon = 0.000001 # temp = fluid.layers.log(pred_score + epsilon) loss_pos = fluid.layers.elementwise_mul( - fluid.layers.log(pred_score + epsilon), pmask) + fluid.layers.log(pred_score + epsilon), pmask + ) loss_pos = coef_1 * fluid.layers.reduce_mean(loss_pos) loss_neg = fluid.layers.elementwise_mul( - fluid.layers.log(1.0 - pred_score + epsilon), (1.0 - pmask)) + fluid.layers.log(1.0 - pred_score + epsilon), (1.0 - pmask) + ) loss_neg = coef_0 * fluid.layers.reduce_mean(loss_neg) loss = -1 * (loss_pos + loss_neg) return loss @@ -336,39 +366,47 @@ def bmn_loss_func(pred_bm, pred_start, pred_end, gt_iou_map, gt_start, gt_end, u_hmask = fluid.layers.cast(x=gt_iou_map > 0.7, dtype=DATATYPE) u_mmask = fluid.layers.logical_and(gt_iou_map <= 0.7, gt_iou_map > 0.3) u_mmask = fluid.layers.cast(x=u_mmask, dtype=DATATYPE) - u_lmask = fluid.layers.logical_and(gt_iou_map <= 0.3, gt_iou_map >= 0.) + u_lmask = fluid.layers.logical_and(gt_iou_map <= 0.3, gt_iou_map >= 0.0) u_lmask = fluid.layers.cast(x=u_lmask, dtype=DATATYPE) u_lmask = fluid.layers.elementwise_mul(u_lmask, mask) - num_h = fluid.layers.cast(fluid.layers.reduce_sum(u_hmask), - dtype=DATATYPE) - num_m = fluid.layers.cast(fluid.layers.reduce_sum(u_mmask), - dtype=DATATYPE) - num_l = fluid.layers.cast(fluid.layers.reduce_sum(u_lmask), - dtype=DATATYPE) + num_h = fluid.layers.cast( + fluid.layers.reduce_sum(u_hmask), dtype=DATATYPE + ) + num_m = fluid.layers.cast( + fluid.layers.reduce_sum(u_mmask), dtype=DATATYPE + ) + num_l = fluid.layers.cast( + fluid.layers.reduce_sum(u_lmask), dtype=DATATYPE + ) r_m = num_h / num_m u_smmask = fluid.layers.assign( local_random.uniform( - 0., 1., - [gt_iou_map.shape[1], gt_iou_map.shape[2]]).astype(DATATYPE)) + 0.0, 1.0, [gt_iou_map.shape[1], gt_iou_map.shape[2]] + ).astype(DATATYPE) + ) u_smmask = fluid.layers.elementwise_mul(u_mmask, u_smmask) - u_smmask = fluid.layers.cast(x=(u_smmask > (1. - r_m)), dtype=DATATYPE) + u_smmask = fluid.layers.cast(x=(u_smmask > (1.0 - r_m)), dtype=DATATYPE) r_l = num_h / num_l u_slmask = fluid.layers.assign( local_random.uniform( - 0., 1., - [gt_iou_map.shape[1], gt_iou_map.shape[2]]).astype(DATATYPE)) + 0.0, 1.0, [gt_iou_map.shape[1], gt_iou_map.shape[2]] + ).astype(DATATYPE) + ) u_slmask = fluid.layers.elementwise_mul(u_lmask, u_slmask) - u_slmask = fluid.layers.cast(x=(u_slmask > (1. - r_l)), dtype=DATATYPE) + u_slmask = fluid.layers.cast(x=(u_slmask > (1.0 - r_l)), dtype=DATATYPE) weights = u_hmask + u_smmask + u_slmask weights.stop_gradient = True loss = fluid.layers.square_error_cost(pred_score, gt_iou_map) loss = fluid.layers.elementwise_mul(loss, weights) - loss = 0.5 * fluid.layers.reduce_sum(loss) / fluid.layers.reduce_sum( - weights) + loss = ( + 0.5 + * fluid.layers.reduce_sum(loss) + / fluid.layers.reduce_sum(weights) + ) return loss @@ -386,24 +424,22 @@ def bmn_loss_func(pred_bm, pred_start, pred_end, gt_iou_map, gt_start, gt_end, coef_1 = 0.5 * ratio epsilon = 0.000001 loss_pos = fluid.layers.elementwise_mul( - fluid.layers.log(pred_score + epsilon), pmask) + fluid.layers.log(pred_score + epsilon), pmask + ) loss_pos = coef_1 * fluid.layers.reduce_sum(loss_pos) loss_neg = fluid.layers.elementwise_mul( - fluid.layers.log(1.0 - pred_score + epsilon), nmask) + fluid.layers.log(1.0 - pred_score + epsilon), nmask + ) loss_neg = coef_0 * fluid.layers.reduce_sum(loss_neg) loss = -1 * (loss_pos + loss_neg) / num_entries return loss - pred_bm_reg = fluid.layers.squeeze(fluid.layers.slice(pred_bm, - axes=[1], - starts=[0], - ends=[1]), - axes=[1]) - pred_bm_cls = fluid.layers.squeeze(fluid.layers.slice(pred_bm, - axes=[1], - starts=[1], - ends=[2]), - axes=[1]) + pred_bm_reg = fluid.layers.squeeze( + fluid.layers.slice(pred_bm, axes=[1], starts=[0], ends=[1]), axes=[1] + ) + pred_bm_cls = fluid.layers.squeeze( + fluid.layers.slice(pred_bm, axes=[1], starts=[1], ends=[2]), axes=[1] + ) bm_mask = _get_mask(cfg) @@ -446,36 +482,35 @@ def optimizer(cfg, parameter_list): fluid.layers.piecewise_decay(boundaries=bd, values=lr), parameter_list=parameter_list, regularization=fluid.regularizer.L2DecayRegularizer( - regularization_coeff=l2_weight_decay)) + regularization_coeff=l2_weight_decay + ), + ) return optimizer def fake_data_reader(args, mode='train'): - def iou_with_anchors(anchors_min, anchors_max, box_min, box_max): - """Compute jaccard score between a box and the anchors. - """ + """Compute jaccard score between a box and the anchors.""" len_anchors = anchors_max - anchors_min int_xmin = np.maximum(anchors_min, box_min) int_xmax = np.minimum(anchors_max, box_max) - inter_len = np.maximum(int_xmax - int_xmin, 0.) + inter_len = np.maximum(int_xmax - int_xmin, 0.0) union_len = len_anchors - inter_len + box_max - box_min jaccard = np.divide(inter_len, union_len) return jaccard def ioa_with_anchors(anchors_min, anchors_max, box_min, box_max): - """Compute intersection between score a box and the anchors. - """ + """Compute intersection between score a box and the anchors.""" len_anchors = anchors_max - anchors_min int_xmin = np.maximum(anchors_min, box_min) int_xmax = np.minimum(anchors_max, box_max) - inter_len = np.maximum(int_xmax - int_xmin, 0.) + inter_len = np.maximum(int_xmax - int_xmin, 0.0) scores = np.divide(inter_len, len_anchors) return scores def get_match_map(tscale): match_map = [] - tgap = 1. / tscale + tgap = 1.0 / tscale for idx in range(tscale): tmp_match_window = [] xmin = tgap * idx @@ -499,17 +534,21 @@ def fake_data_reader(args, mode='train'): gt_bbox = [] gt_iou_map = [] for idx in range(label_num): - duration = local_random.uniform(video_second * 0.4, - video_second * 0.8) - start_t = local_random.uniform(0.1 * video_second, - video_second - duration) + duration = local_random.uniform( + video_second * 0.4, video_second * 0.8 + ) + start_t = local_random.uniform( + 0.1 * video_second, video_second - duration + ) tmp_start = max(min(1, start_t / video_second), 0) tmp_end = max(min(1, (start_t + duration) / video_second), 0) gt_bbox.append([tmp_start, tmp_end]) - tmp_gt_iou_map = iou_with_anchors(match_map[:, 0], match_map[:, 1], - tmp_start, tmp_end) - tmp_gt_iou_map = np.reshape(tmp_gt_iou_map, - [args.dscale, args.tscale]) + tmp_gt_iou_map = iou_with_anchors( + match_map[:, 0], match_map[:, 1], tmp_start, tmp_end + ) + tmp_gt_iou_map = np.reshape( + tmp_gt_iou_map, [args.dscale, args.tscale] + ) gt_iou_map.append(tmp_gt_iou_map) gt_iou_map = np.array(gt_iou_map) gt_iou_map = np.max(gt_iou_map, axis=0) @@ -517,25 +556,38 @@ def fake_data_reader(args, mode='train'): gt_bbox = np.array(gt_bbox) gt_xmins = gt_bbox[:, 0] gt_xmaxs = gt_bbox[:, 1] - gt_len_small = 3. / args.tscale + gt_len_small = 3.0 / args.tscale gt_start_bboxs = np.stack( - (gt_xmins - gt_len_small / 2, gt_xmins + gt_len_small / 2), axis=1) + (gt_xmins - gt_len_small / 2, gt_xmins + gt_len_small / 2), axis=1 + ) gt_end_bboxs = np.stack( - (gt_xmaxs - gt_len_small / 2, gt_xmaxs + gt_len_small / 2), axis=1) + (gt_xmaxs - gt_len_small / 2, gt_xmaxs + gt_len_small / 2), axis=1 + ) match_score_start = [] for jdx in range(len(anchor_xmin)): match_score_start.append( np.max( - ioa_with_anchors(anchor_xmin[jdx], anchor_xmax[jdx], - gt_start_bboxs[:, 0], gt_start_bboxs[:, - 1]))) + ioa_with_anchors( + anchor_xmin[jdx], + anchor_xmax[jdx], + gt_start_bboxs[:, 0], + gt_start_bboxs[:, 1], + ) + ) + ) match_score_end = [] for jdx in range(len(anchor_xmin)): match_score_end.append( np.max( - ioa_with_anchors(anchor_xmin[jdx], anchor_xmax[jdx], - gt_end_bboxs[:, 0], gt_end_bboxs[:, 1]))) + ioa_with_anchors( + anchor_xmin[jdx], + anchor_xmax[jdx], + gt_end_bboxs[:, 0], + gt_end_bboxs[:, 1], + ) + ) + ) gt_start = np.array(match_score_start) gt_end = np.array(match_score_end) @@ -548,18 +600,22 @@ def fake_data_reader(args, mode='train'): for video_idx in range(iter_num): video_feat = local_random.random_sample( - [args.feat_dim, args.tscale]).astype('float32') + [args.feat_dim, args.tscale] + ).astype('float32') gt_iou_map, gt_start, gt_end = get_video_label( - match_map, anchor_xmin, anchor_xmax) + match_map, anchor_xmin, anchor_xmax + ) if mode == 'train' or mode == 'valid': batch_out.append((video_feat, gt_iou_map, gt_start, gt_end)) elif mode == 'test': batch_out.append( - (video_feat, gt_iou_map, gt_start, gt_end, video_idx)) + (video_feat, gt_iou_map, gt_start, gt_end, video_idx) + ) else: raise NotImplementedError( - 'mode {} not implemented'.format(mode)) + 'mode {} not implemented'.format(mode) + ) if len(batch_out) == args.batch_size: yield batch_out batch_out = [] @@ -589,20 +645,26 @@ def val_bmn(model, args): pred_bm, pred_start, pred_end = model(x_data) loss, tem_loss, pem_reg_loss, pem_cls_loss = bmn_loss_func( - pred_bm, pred_start, pred_end, gt_iou_map, gt_start, gt_end, args) + pred_bm, pred_start, pred_end, gt_iou_map, gt_start, gt_end, args + ) avg_loss = paddle.mean(loss) loss_data += [ avg_loss.numpy()[0], tem_loss.numpy()[0], pem_reg_loss.numpy()[0], - pem_cls_loss.numpy()[0] + pem_cls_loss.numpy()[0], ] - print('[VALID] iter {} '.format(batch_id) - + '\tLoss = {}, \ttem_loss = {}, \tpem_reg_loss = {}, \tpem_cls_loss = {}'.format( - '%f' % avg_loss.numpy()[0], '%f' % tem_loss.numpy()[0], \ - '%f' % pem_reg_loss.numpy()[0], '%f' % pem_cls_loss.numpy()[0])) + print( + '[VALID] iter {} '.format(batch_id) + + '\tLoss = {}, \ttem_loss = {}, \tpem_reg_loss = {}, \tpem_cls_loss = {}'.format( + '%f' % avg_loss.numpy()[0], + '%f' % tem_loss.numpy()[0], + '%f' % pem_reg_loss.numpy()[0], + '%f' % pem_cls_loss.numpy()[0], + ) + ) if batch_id == args.valid_batch_num: break @@ -610,11 +672,13 @@ def val_bmn(model, args): class TestTrain(unittest.TestCase): - def setUp(self): self.args = Args() - self.place = fluid.CPUPlace() if not fluid.is_compiled_with_cuda() \ + self.place = ( + fluid.CPUPlace() + if not fluid.is_compiled_with_cuda() else fluid.CUDAPlace(0) + ) self.temp_dir = tempfile.TemporaryDirectory() self.model_save_dir = os.path.join(self.temp_dir.name, 'inference') @@ -643,14 +707,18 @@ class TestTrain(unittest.TestCase): for epoch in range(args.epoch): for batch_id, data in enumerate(train_reader()): - video_feat = np.array([item[0] - for item in data]).astype(DATATYPE) - gt_iou_map = np.array([item[1] - for item in data]).astype(DATATYPE) - gt_start = np.array([item[2] - for item in data]).astype(DATATYPE) - gt_end = np.array([item[3] - for item in data]).astype(DATATYPE) + video_feat = np.array([item[0] for item in data]).astype( + DATATYPE + ) + gt_iou_map = np.array([item[1] for item in data]).astype( + DATATYPE + ) + gt_start = np.array([item[2] for item in data]).astype( + DATATYPE + ) + gt_end = np.array([item[3] for item in data]).astype( + DATATYPE + ) x_data = to_variable(video_feat) gt_iou_map = to_variable(gt_iou_map) @@ -663,8 +731,14 @@ class TestTrain(unittest.TestCase): pred_bm, pred_start, pred_end = bmn(x_data) loss, tem_loss, pem_reg_loss, pem_cls_loss = bmn_loss_func( - pred_bm, pred_start, pred_end, gt_iou_map, gt_start, - gt_end, args) + pred_bm, + pred_start, + pred_end, + gt_iou_map, + gt_start, + gt_end, + args, + ) avg_loss = paddle.mean(loss) avg_loss.backward() @@ -675,15 +749,21 @@ class TestTrain(unittest.TestCase): avg_loss.numpy()[0], tem_loss.numpy()[0], pem_reg_loss.numpy()[0], - pem_cls_loss.numpy()[0] + pem_cls_loss.numpy()[0], ] - if args.log_interval > 0 and (batch_id % args.log_interval - == 0): - print('[TRAIN] Epoch {}, iter {} '.format(epoch, batch_id) - + '\tLoss = {}, \ttem_loss = {}, \tpem_reg_loss = {}, \tpem_cls_loss = {}'.format( - '%f' % avg_loss.numpy()[0], '%f' % tem_loss.numpy()[0], \ - '%f' % pem_reg_loss.numpy()[0], '%f' % pem_cls_loss.numpy()[0])) + if args.log_interval > 0 and ( + batch_id % args.log_interval == 0 + ): + print( + '[TRAIN] Epoch {}, iter {} '.format(epoch, batch_id) + + '\tLoss = {}, \ttem_loss = {}, \tpem_reg_loss = {}, \tpem_cls_loss = {}'.format( + '%f' % avg_loss.numpy()[0], + '%f' % tem_loss.numpy()[0], + '%f' % pem_reg_loss.numpy()[0], + '%f' % pem_cls_loss.numpy()[0], + ) + ) # validation if batch_id % args.valid_interval == 0 and batch_id > 0: @@ -696,8 +776,9 @@ class TestTrain(unittest.TestCase): if to_static: fluid.dygraph.jit.save(bmn, self.model_save_prefix) else: - fluid.dygraph.save_dygraph(bmn.state_dict(), - self.dy_param_path) + fluid.dygraph.save_dygraph( + bmn.state_dict(), self.dy_param_path + ) break return np.array(loss_data) @@ -711,8 +792,10 @@ class TestTrain(unittest.TestCase): rtol=1e-05, err_msg='dygraph_res: {},\n static_res: {}'.format( dygraph_res[~np.isclose(dygraph_res, static_res)], - static_res[~np.isclose(dygraph_res, static_res)]), - atol=1e-8) + static_res[~np.isclose(dygraph_res, static_res)], + ), + atol=1e-8, + ) # Prediction needs trained models, so put `test_predict` at last of `test_train` self.verify_predict() @@ -729,32 +812,41 @@ class TestTrain(unittest.TestCase): predictor_pred_res = self.predict_analysis_inference(video_data) for dy_res, st_res, dy_jit_res, predictor_res in zip( - dygraph_pred_res, static_pred_res, dygraph_jit_pred_res, - predictor_pred_res): + dygraph_pred_res, + static_pred_res, + dygraph_jit_pred_res, + predictor_pred_res, + ): np.testing.assert_allclose( st_res, dy_res, rtol=1e-05, err_msg='dygraph_res: {},\n static_res: {}'.format( dy_res[~np.isclose(st_res, dy_res)], - st_res[~np.isclose(st_res, dy_res)]), - atol=1e-8) + st_res[~np.isclose(st_res, dy_res)], + ), + atol=1e-8, + ) np.testing.assert_allclose( st_res, dy_jit_res, rtol=1e-05, err_msg='dygraph_jit_res: {},\n static_res: {}'.format( dy_jit_res[~np.isclose(st_res, dy_jit_res)], - st_res[~np.isclose(st_res, dy_jit_res)]), - atol=1e-8) + st_res[~np.isclose(st_res, dy_jit_res)], + ), + atol=1e-8, + ) np.testing.assert_allclose( st_res, predictor_res, rtol=1e-05, err_msg='dygraph_jit_res: {},\n static_res: {}'.format( predictor_res[~np.isclose(st_res, predictor_res)], - st_res[~np.isclose(st_res, predictor_res)]), - atol=1e-8) + st_res[~np.isclose(st_res, predictor_res)], + ), + atol=1e-8, + ) break def predict_dygraph(self, data): @@ -776,14 +868,21 @@ class TestTrain(unittest.TestCase): paddle.enable_static() exe = fluid.Executor(self.place) # load inference model - [inference_program, feed_target_names, fetch_targets - ] = fluid.io.load_inference_model(self.model_save_dir, - executor=exe, - model_filename=self.model_filename, - params_filename=self.params_filename) - pred_res = exe.run(inference_program, - feed={feed_target_names[0]: data}, - fetch_list=fetch_targets) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = fluid.io.load_inference_model( + self.model_save_dir, + executor=exe, + model_filename=self.model_filename, + params_filename=self.params_filename, + ) + pred_res = exe.run( + inference_program, + feed={feed_target_names[0]: data}, + fetch_list=fetch_targets, + ) return pred_res @@ -799,8 +898,12 @@ class TestTrain(unittest.TestCase): return pred_res def predict_analysis_inference(self, data): - output = PredictorTools(self.model_save_dir, self.model_filename, - self.params_filename, [data]) + output = PredictorTools( + self.model_save_dir, + self.model_filename, + self.params_filename, + [data], + ) out = output() return out diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_break_continue.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_break_continue.py index b5840bc45e3e626c62f632a62af07ebdf59fb8fd..45b149617d2bfc614d0ad9466a375c73e5e5d2f7 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_break_continue.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_break_continue.py @@ -17,7 +17,9 @@ import numpy as np import paddle import paddle.fluid as fluid from paddle.fluid.dygraph.jit import declarative -from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator +from paddle.fluid.dygraph.dygraph_to_static.program_translator import ( + ProgramTranslator, +) from paddle.fluid.dygraph.dygraph_to_static.utils import Dygraph2StaticException SEED = 2020 @@ -25,7 +27,6 @@ np.random.seed(SEED) class TestDy2staticException(unittest.TestCase): - def setUp(self): self.x = np.random.random([10, 16]).astype('float32') self.dyfunc = None @@ -159,9 +160,7 @@ def test_for_in_else(x): def while_loop_class_var(x): - class Foo(object): - def __init__(self): self.a = 3 self.b = 4 @@ -205,11 +204,13 @@ def test_optim_break_in_while(x): class TestContinueInFor(unittest.TestCase): - def setUp(self): self.input = np.zeros((1)).astype('int64') - self.place = fluid.CUDAPlace( - 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + self.place = ( + fluid.CUDAPlace(0) + if fluid.is_compiled_with_cuda() + else fluid.CPUPlace() + ) self.init_dygraph_func() def init_dygraph_func(self): @@ -233,59 +234,52 @@ class TestContinueInFor(unittest.TestCase): static_res, rtol=1e-05, err_msg='dygraph res is {}\nstatic_res is {}'.format( - dygraph_res, static_res)) + dygraph_res, static_res + ), + ) class TestContinueInForAtEnd(TestContinueInFor): - def init_dygraph_func(self): self.dygraph_func = test_continue_in_for_at_end class TestBreakInFor(TestContinueInFor): - def init_dygraph_func(self): self.dygraph_func = test_break_in_for class TestBreakInForAtEnd(TestContinueInFor): - def init_dygraph_func(self): self.dygraph_func = test_break_in_for_at_end class TestBreakContinueInFor(TestContinueInFor): - def init_dygraph_func(self): self.dygraph_func = test_break_continue_in_for class TestForInElse(TestContinueInFor): - def init_dygraph_func(self): self.dygraph_func = test_for_in_else class TestContinueInWhile(TestContinueInFor): - def init_dygraph_func(self): self.dygraph_func = test_continue_in_while class TestBreakInWhile(TestContinueInWhile): - def init_dygraph_func(self): self.dygraph_func = test_break_in_while class TestWhileLoopClassVar(TestContinueInWhile): - def init_dygraph_func(self): self.dygraph_func = while_loop_class_var class TestOptimBreakInFor(TestDy2staticException): - def setUp(self): self.x = np.random.random([10, 16]).astype('float32') self.dyfunc = test_optim_break_in_for @@ -293,7 +287,6 @@ class TestOptimBreakInFor(TestDy2staticException): class TestOptimBreakInWhile(TestContinueInWhile): - def init_dygraph_func(self): self.dygraph_func = test_optim_break_in_while diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cache_program.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cache_program.py index f23d9ba783c30e31dfba38b98b9fd246fbb5d5a5..7df7c6333123581c40c7a05bfc9d65f3341caaac 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cache_program.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cache_program.py @@ -26,7 +26,6 @@ from test_fetch_feed import Pool2D, Linear class TestCacheProgram(unittest.TestCase): - def setUp(self): self.batch_num = 5 self.dygraph_class = Pool2D @@ -44,26 +43,35 @@ class TestCacheProgram(unittest.TestCase): cur_out = out # Check forward ops prev_ops = cur_ops - cur_ops = Counter([ - op.type for op in fluid.default_main_program().block(0).ops - ]) + cur_ops = Counter( + [ + op.type + for op in fluid.default_main_program().block(0).ops + ] + ) if batch_id > 0: - prev_out_numpy = prev_out[0].numpy() if isinstance( - prev_out, (tuple, list)) else prev_out.numpy() - cur_out_numpy = cur_out[0].numpy() if isinstance( - cur_out, (tuple, list)) else cur_out.numpy() + prev_out_numpy = ( + prev_out[0].numpy() + if isinstance(prev_out, (tuple, list)) + else prev_out.numpy() + ) + cur_out_numpy = ( + cur_out[0].numpy() + if isinstance(cur_out, (tuple, list)) + else cur_out.numpy() + ) np.testing.assert_allclose( prev_out_numpy, cur_out_numpy, rtol=1e-05, - err_msg= - 'Output in previous batch is {}\n Output in current batch is \n{}' - .format(prev_out_numpy, cur_out_numpy)) + err_msg='Output in previous batch is {}\n Output in current batch is \n{}'.format( + prev_out_numpy, cur_out_numpy + ), + ) self.assertEqual(prev_ops, cur_ops) class TestCacheProgram2(TestCacheProgram): - def setUp(self): self.batch_num = 5 self.dygraph_class = Linear @@ -71,7 +79,6 @@ class TestCacheProgram2(TestCacheProgram): class TestCacheProgramWithOptimizer(unittest.TestCase): - def setUp(self): self.dygraph_class = Linear self.data = np.random.random((4, 10)).astype('float32') @@ -90,7 +97,8 @@ class TestCacheProgramWithOptimizer(unittest.TestCase): with fluid.dygraph.guard(fluid.CPUPlace()): dygraph_net = self.dygraph_class() adam = fluid.optimizer.AdamOptimizer( - learning_rate=0.001, parameter_list=dygraph_net.parameters()) + learning_rate=0.001, parameter_list=dygraph_net.parameters() + ) loss_data = [] for batch_id in range(self.batch_num): input = fluid.dygraph.to_variable(self.data) @@ -111,7 +119,9 @@ class TestCacheProgramWithOptimizer(unittest.TestCase): static_loss, rtol=1e-05, err_msg='dygraph is {}\n static_res is \n{}'.format( - dygraph_loss, static_loss)) + dygraph_loss, static_loss + ), + ) def simple_func(x): @@ -121,7 +131,6 @@ def simple_func(x): class TestConvertWithCache(unittest.TestCase): - def test_cache(self): static_func = convert_to_static(simple_func) # Get transformed function from cache. @@ -152,7 +161,6 @@ def sum_under_while(limit): class TestToOutputWithCache(unittest.TestCase): - def test_output(self): with fluid.dygraph.guard(): ret = sum_even_until_limit(80, 10) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cast.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cast.py index fc233ffbc02fea733455a47aa2951bb7233b863f..17f598f0f17e833e6d50ed6a0eb4e75885a156b6 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cast.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cast.py @@ -59,19 +59,23 @@ def test_mix_cast(x): class TestCastBase(unittest.TestCase): - def setUp(self): - self.place = fluid.CUDAPlace( - 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + self.place = ( + fluid.CUDAPlace(0) + if fluid.is_compiled_with_cuda() + else fluid.CPUPlace() + ) self.prepare() self.set_func() def prepare(self): self.input_shape = (16, 32) self.input_dtype = 'float32' - self.input = np.random.binomial( - 4, 0.3, size=np.product(self.input_shape)).reshape( - self.input_shape).astype(self.input_dtype) + self.input = ( + np.random.binomial(4, 0.3, size=np.product(self.input_shape)) + .reshape(self.input_shape) + .astype(self.input_dtype) + ) self.cast_dtype = 'bool' def set_func(self): @@ -87,24 +91,29 @@ class TestCastBase(unittest.TestCase): self.assertTrue( res.dtype == self.cast_dtype, msg='The target dtype is {}, but the casted dtype is {}.'.format( - self.cast_dtype, res.dtype)) + self.cast_dtype, res.dtype + ), + ) ref_val = self.input.astype(self.cast_dtype) np.testing.assert_allclose( res, ref_val, rtol=1e-05, err_msg='The casted value is {}.\nThe correct value is {}.'.format( - res, ref_val)) + res, ref_val + ), + ) class TestIntCast(TestCastBase): - def prepare(self): - self.input_shape = (1, ) + self.input_shape = (1,) self.input_dtype = 'float32' - self.input = np.random.normal( - loc=6, scale=10, size=np.product(self.input_shape)).reshape( - self.input_shape).astype(self.input_dtype) + self.input = ( + np.random.normal(loc=6, scale=10, size=np.product(self.input_shape)) + .reshape(self.input_shape) + .astype(self.input_dtype) + ) self.cast_dtype = 'int32' def set_func(self): @@ -112,13 +121,14 @@ class TestIntCast(TestCastBase): class TestFloatCast(TestCastBase): - def prepare(self): self.input_shape = (8, 16) self.input_dtype = 'bool' - self.input = np.random.binomial( - 2, 0.5, size=np.product(self.input_shape)).reshape( - self.input_shape).astype(self.input_dtype) + self.input = ( + np.random.binomial(2, 0.5, size=np.product(self.input_shape)) + .reshape(self.input_shape) + .astype(self.input_dtype) + ) self.cast_dtype = 'float32' def set_func(self): @@ -126,13 +136,14 @@ class TestFloatCast(TestCastBase): class TestMixCast(TestCastBase): - def prepare(self): self.input_shape = (8, 32) self.input_dtype = 'float32' - self.input = np.random.normal( - loc=6, scale=10, size=np.product(self.input_shape)).reshape( - self.input_shape).astype(self.input_dtype) + self.input = ( + np.random.normal(loc=6, scale=10, size=np.product(self.input_shape)) + .reshape(self.input_shape) + .astype(self.input_dtype) + ) self.cast_int = 'int' self.cast_float = 'float32' self.cast_bool = 'bool' @@ -146,19 +157,26 @@ class TestMixCast(TestCastBase): self.assertTrue( res.dtype == self.cast_dtype, msg='The target dtype is {}, but the casted dtype is {}.'.format( - self.cast_dtype, res.dtype)) - ref_val = self.input.astype(self.cast_int).astype( - self.cast_float).astype(self.cast_bool).astype(self.cast_dtype) + self.cast_dtype, res.dtype + ), + ) + ref_val = ( + self.input.astype(self.cast_int) + .astype(self.cast_float) + .astype(self.cast_bool) + .astype(self.cast_dtype) + ) np.testing.assert_allclose( res, ref_val, rtol=1e-05, err_msg='The casted value is {}.\nThe correct value is {}.'.format( - res, ref_val)) + res, ref_val + ), + ) class TestNotVarCast(TestCastBase): - def prepare(self): self.input = 3.14 self.cast_dtype = 'int' @@ -173,7 +191,9 @@ class TestNotVarCast(TestCastBase): self.assertTrue( res == ref_val, msg='The casted value is {}.\nThe correct value is {}.'.format( - res, ref_val)) + res, ref_val + ), + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_closure_analysis.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_closure_analysis.py index 846576400d7f5b2e510c2268e0bce5fd50d9ce75..8712950e01b05f89574efb0529f459aa8f6b1061 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_closure_analysis.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_closure_analysis.py @@ -15,7 +15,9 @@ import unittest import paddle -from paddle.fluid.dygraph.dygraph_to_static.utils import FunctionNameLivenessAnalysis +from paddle.fluid.dygraph.dygraph_to_static.utils import ( + FunctionNameLivenessAnalysis, +) from paddle.utils import gast import inspect from numpy import append @@ -24,7 +26,6 @@ global_a = [] class JudgeVisitor(gast.NodeVisitor): - def __init__(self, ans, mod): self.ans = ans self.mod = mod @@ -34,27 +35,30 @@ class JudgeVisitor(gast.NodeVisitor): expected = self.ans.get(node.name, set()) exp_mod = self.mod.get(node.name, set()) assert scope.existed_vars() == expected, "Not Equals." - assert scope.modified_vars( - ) == exp_mod, "Not Equals in function:{} . expect {} , but get {}".format( - node.name, exp_mod, scope.modified_vars()) + assert ( + scope.modified_vars() == exp_mod + ), "Not Equals in function:{} . expect {} , but get {}".format( + node.name, exp_mod, scope.modified_vars() + ) self.generic_visit(node) class JudgePushPopVisitor(gast.NodeVisitor): - def __init__(self, push_pop_vars): self.pp_var = push_pop_vars def visit_FunctionDef(self, node): scope = node.pd_scope expected = self.pp_var.get(node.name, set()) - assert scope.push_pop_vars == expected, "Not Equals in function:{} . expect {} , but get {}".format( - node.name, expected, scope.push_pop_vars) + assert ( + scope.push_pop_vars == expected + ), "Not Equals in function:{} . expect {} , but get {}".format( + node.name, expected, scope.push_pop_vars + ) self.generic_visit(node) def test_normal_0(x): - def func(): if True: i = 1 @@ -103,8 +107,7 @@ def test_nonlocal(x, *args, **kargs): def test_push_pop_1(x, *args, **kargs): - """ push_pop_vars in main_function is : `l`, `k` - """ + """push_pop_vars in main_function is : `l`, `k`""" l = [] k = [] for i in range(10): @@ -114,8 +117,7 @@ def test_push_pop_1(x, *args, **kargs): def test_push_pop_2(x, *args, **kargs): - """ push_pop_vars in main_function is : `k` - """ + """push_pop_vars in main_function is : `k`""" l = [] k = [] @@ -128,10 +130,10 @@ def test_push_pop_2(x, *args, **kargs): def test_push_pop_3(x, *args, **kargs): - """ push_pop_vars in main_function is : `k` - NOTE: One may expect `k` and `l` because l - is nonlocal. Name bind analysis is - not implemented yet. + """push_pop_vars in main_function is : `k` + NOTE: One may expect `k` and `l` because l + is nonlocal. Name bind analysis is + not implemented yet. """ l = [] k = [] @@ -146,8 +148,7 @@ def test_push_pop_3(x, *args, **kargs): def test_push_pop_4(x, *args, **kargs): - """ push_pop_vars in main_function is : `k` - """ + """push_pop_vars in main_function is : `k`""" l = [] k = [] for i in range(10): @@ -160,20 +161,19 @@ def test_push_pop_4(x, *args, **kargs): class TestClosureAnalysis(unittest.TestCase): - def setUp(self): self.judge_type = "var and w_vars" self.init_dygraph_func() def init_dygraph_func(self): self.all_dygraph_funcs = [ - test_nonlocal, test_global, test_normal_0, test_normal_argument + test_nonlocal, + test_global, + test_normal_0, + test_normal_argument, ] self.answer = [ - { - 'func': set('k'), - 'test_nonlocal': set('i') - }, + {'func': set('k'), 'test_nonlocal': set('i')}, { 'func': set({'i'}), }, @@ -186,34 +186,27 @@ class TestClosureAnalysis(unittest.TestCase): ] self.modified_var = [ - { - 'func': set('ki'), - 'test_nonlocal': set('i') - }, - { - 'func': set({'i'}), - 'test_global': set({"t"}) - }, + {'func': set('ki'), 'test_nonlocal': set('i')}, + {'func': set({'i'}), 'test_global': set({"t"})}, { 'func': set('i'), }, - { - 'func': set('i'), - 'test_normal_argument': set('x') - }, + {'func': set('i'), 'test_normal_argument': set('x')}, ] def test_main(self): if self.judge_type == 'push_pop_vars': - for push_pop_vars, func in zip(self.push_pop_vars, - self.all_dygraph_funcs): + for push_pop_vars, func in zip( + self.push_pop_vars, self.all_dygraph_funcs + ): test_func = inspect.getsource(func) gast_root = gast.parse(test_func) name_visitor = FunctionNameLivenessAnalysis(gast_root) JudgePushPopVisitor(push_pop_vars).visit(gast_root) else: - for mod, ans, func in zip(self.modified_var, self.answer, - self.all_dygraph_funcs): + for mod, ans, func in zip( + self.modified_var, self.answer, self.all_dygraph_funcs + ): test_func = inspect.getsource(func) gast_root = gast.parse(test_func) name_visitor = FunctionNameLivenessAnalysis(gast_root) @@ -227,41 +220,48 @@ def TestClosureAnalysis_Attribute_func(): class TestClosureAnalysis_Attribute(TestClosureAnalysis): - def init_dygraph_func(self): self.all_dygraph_funcs = [TestClosureAnalysis_Attribute_func] self.answer = [{"TestClosureAnalysis_Attribute_func": set({'i'})}] - self.modified_var = [{ - "TestClosureAnalysis_Attribute_func": - set({'i', 'self.current.function'}) - }] + self.modified_var = [ + { + "TestClosureAnalysis_Attribute_func": set( + {'i', 'self.current.function'} + ) + } + ] class TestClosureAnalysis_PushPop(TestClosureAnalysis): - def init_dygraph_func(self): self.judge_type = "push_pop_vars" self.all_dygraph_funcs = [ - test_push_pop_1, test_push_pop_2, test_push_pop_3, test_push_pop_4 + test_push_pop_1, + test_push_pop_2, + test_push_pop_3, + test_push_pop_4, + ] + self.push_pop_vars = [ + { + "test_push_pop_1": set({'l', 'k'}), + }, + { + "test_push_pop_2": set({'k'}), + "func": set("l"), + }, + { + "test_push_pop_3": set({'k'}), + "func": set("l"), + }, + { + "test_push_pop_4": set({'k', 'l'}), + }, ] - self.push_pop_vars = [{ - "test_push_pop_1": set({'l', 'k'}), - }, { - "test_push_pop_2": set({'k'}), - "func": set("l"), - }, { - "test_push_pop_3": set({'k'}), - "func": set("l"), - }, { - "test_push_pop_4": set({'k', 'l'}), - }] class TestPushPopTrans(unittest.TestCase): - def test(self): - def vlist_of_dict(x): ma = {'a': []} for i in range(3): @@ -299,7 +299,6 @@ class TestPushPopTrans(unittest.TestCase): print(paddle.jit.to_static(vlist_of_dict)(x)) def test4(self): - def vlist_of_dict(x): a = np.array([1, 2, 3]) for i in range(3): @@ -311,7 +310,6 @@ class TestPushPopTrans(unittest.TestCase): print(paddle.jit.to_static(vlist_of_dict)(x)) def test5(self): - def vlist_of_dict(x): a = np.array([1, 2, 3]) for i in range(3): diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_container.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_container.py index 73f0262daa1b9c4c4e57da430c2597f8e4b295b1..f22ca8776165aab5e264629272609a94556ef622 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_container.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_container.py @@ -20,14 +20,13 @@ import tempfile class BufferLayers(paddle.nn.Layer): - def __init__(self, out_channel): super(BufferLayers, self).__init__() self.out_channel = out_channel def forward(self, x): mean = paddle.mean(x) - if mean < 0.: + if mean < 0.0: x = x * self._mask() out = x - mean @@ -38,13 +37,13 @@ class BufferLayers(paddle.nn.Layer): class SequentialNet(paddle.nn.Layer): - def __init__(self, sub_layer, in_channel, out_channel): super(SequentialNet, self).__init__() self.layer = paddle.nn.Sequential( ('l1', paddle.nn.Linear(in_channel, in_channel)), ('l2', paddle.nn.Linear(in_channel, out_channel)), - ('l3', sub_layer(out_channel))) + ('l3', sub_layer(out_channel)), + ) def forward(self, x): out = self.layer(x) @@ -52,7 +51,6 @@ class SequentialNet(paddle.nn.Layer): class NestSequentialNet(paddle.nn.Layer): - def __init__(self): super().__init__() group1 = paddle.nn.Sequential( @@ -70,7 +68,6 @@ class NestSequentialNet(paddle.nn.Layer): class TestSequential(unittest.TestCase): - def setUp(self): paddle.set_device('cpu') self.seed = 2021 @@ -100,7 +97,8 @@ class TestSequential(unittest.TestCase): load_out, out, rtol=1e-05, - err_msg='load_out is {}\\st_out is {}'.format(load_out, out)) + err_msg='load_out is {}\\st_out is {}'.format(load_out, out), + ) return out @@ -113,7 +111,9 @@ class TestSequential(unittest.TestCase): st_out, rtol=1e-05, err_msg='dygraph_res is {}\nstatic_res is {}'.format( - dy_out, st_out)) + dy_out, st_out + ), + ) def _test_load(self, net, x): paddle.jit.save(net, self.model_path) @@ -123,11 +123,11 @@ class TestSequential(unittest.TestCase): class TestNestSequential(TestSequential): - def _init_config(self): self.net = NestSequentialNet() - self.model_path = os.path.join(self.temp_dir.name, - 'nested_sequential_net') + self.model_path = os.path.join( + self.temp_dir.name, 'nested_sequential_net' + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_call.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_call.py index 2e0983ad5ee1f0f1011d5a239aa51187975adba5..241df1517e2dba74edc96a93a41c25923b694a0d 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_call.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_call.py @@ -20,7 +20,9 @@ import numpy as np import paddle import paddle.fluid as fluid from paddle.fluid.dygraph import ProgramTranslator -from paddle.fluid.dygraph.dygraph_to_static.convert_call_func import CONVERSION_OPTIONS +from paddle.fluid.dygraph.dygraph_to_static.convert_call_func import ( + CONVERSION_OPTIONS, +) from test_program_translator import get_source_code import paddle.jit.dy2static as _jst @@ -64,7 +66,6 @@ def dyfunc_with_third_library_logging(x_v): class A: - @staticmethod def add(a, b): """ @@ -81,11 +82,13 @@ def dyfunc_with_staticmethod(x_v): class TestRecursiveCall1(unittest.TestCase): - def setUp(self): self.input = np.random.random([10, 16]).astype('float32') - self.place = fluid.CUDAPlace( - 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + self.place = ( + fluid.CUDAPlace(0) + if fluid.is_compiled_with_cuda() + else fluid.CPUPlace() + ) self.init_test_func() def init_test_func(self): @@ -111,24 +114,28 @@ class TestRecursiveCall1(unittest.TestCase): static_res, rtol=1e-05, err_msg='dygraph res is {}\nstatic_res is {}'.format( - dygraph_res, static_res)) + dygraph_res, static_res + ), + ) lambda_fun = lambda x: x class MyConvLayer(fluid.dygraph.Layer): - def __init__(self): super(MyConvLayer, self).__init__() self._conv = fluid.dygraph.Conv2D( num_channels=3, num_filters=2, filter_size=3, - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.99)), - bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.5))) + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.99) + ), + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.5) + ), + ) @paddle.jit.to_static def forward(self, inputs): @@ -144,7 +151,6 @@ class MyConvLayer(fluid.dygraph.Layer): class MyLayer(fluid.dygraph.Layer): - def __init__(self): super(MyLayer, self).__init__() @@ -153,10 +159,13 @@ class MyLayer(fluid.dygraph.Layer): input_dim=5, output_dim=1, act='relu', - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.99)), - bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.5))) + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.99) + ), + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.5) + ), + ) @paddle.jit.to_static def forward(self, inputs): @@ -166,11 +175,13 @@ class MyLayer(fluid.dygraph.Layer): class TestRecursiveCall2(unittest.TestCase): - def setUp(self): self.input = np.random.random((1, 3, 3, 5)).astype('float32') - self.place = fluid.CUDAPlace( - 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + self.place = ( + fluid.CUDAPlace(0) + if fluid.is_compiled_with_cuda() + else fluid.CPUPlace() + ) self.set_func() def set_func(self): @@ -198,13 +209,11 @@ class TestRecursiveCall2(unittest.TestCase): class TestThirdPartyLibrary(TestRecursiveCall2): - def set_func(self): self.dygraph_func = dyfunc_with_third_library_logging class TestStaticMethod(TestRecursiveCall2): - def set_func(self): self.dygraph_func = dyfunc_with_staticmethod @@ -230,7 +239,6 @@ def func_convert_then_not_to_static(x): class TestClass(paddle.nn.Layer): - @paddle.jit.not_to_static def called_member(self, x): return paddle.sum(x) @@ -242,7 +250,6 @@ class TestClass(paddle.nn.Layer): class TestNotToConvert(TestRecursiveCall2): - def set_func(self): self.dygraph_func = func_not_to_static @@ -253,19 +260,16 @@ class TestNotToConvert(TestRecursiveCall2): class TestNotToConvert2(TestRecursiveCall2): - def set_func(self): self.dygraph_func = func_convert_then_not_to_static class TestNotToConvert3(TestRecursiveCall2): - def set_func(self): self.dygraph_func = TestClass() class TestDynamicToStaticCode(unittest.TestCase): - def setUp(self): self.set_func() self.set_answer_func() @@ -274,9 +278,7 @@ class TestDynamicToStaticCode(unittest.TestCase): self.func = func_not_to_static def set_answer_func(self): - - class StaticCode(): - + class StaticCode: @paddle.jit.not_to_static def func_not_to_static(x): res = func_sum(x) @@ -298,18 +300,17 @@ class TestDynamicToStaticCode(unittest.TestCase): answer_code, transformed_code, msg="\ntransformed_code : \n{}\nanswer_code : \n{}".format( - transformed_code, answer_code)) + transformed_code, answer_code + ), + ) class TestDynamicToStaticCode2(TestDynamicToStaticCode): - def set_func(self): self.func = func_convert_then_not_to_static def set_answer_func(self): - - class StaticCode(): - + class StaticCode: def func_convert_then_not_to_static(x): __return_value_0 = None y = _jst.Call(func_not_to_static)(x) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_call_generator.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_call_generator.py index 9a417cfe5853be95352272f019b916050e0547f2..a938d679158544c8dc0ddabca5b4b803a5ba845c 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_call_generator.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_call_generator.py @@ -24,14 +24,12 @@ def dyfunc_generator(): def main_func(): - """ Error will raise, but we only report a warning not intercept - """ + """Error will raise, but we only report a warning not intercept""" for i in dyfunc_generator(): print(i) class TestConvertGenerator(unittest.TestCase): - def test_raise_error(self): with self.assertRaises(Exception): to_static(main_func)() diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_operators.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_operators.py index 65aec5ca8dd0e40b88eee0b61c3287751d259b4c..284febe07cf4dea650643fe58f849ea610b83c90 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_operators.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_operators.py @@ -18,14 +18,12 @@ import unittest class CallNotExist(paddle.nn.Layer): - def __call__(self): # call a non-exist API to trigger exception return paddle.nn.not_exist_api class ForwardNotExist(paddle.nn.Layer): - def forward(self): return 0 @@ -35,9 +33,7 @@ setattr(net, "forward", "A string so that convert forward will fail") class TestConvertCall(unittest.TestCase): - def test_class_exception(self): - @paddle.jit.to_static def call_not_exist(): net = CallNotExist() @@ -55,16 +51,17 @@ class TestConvertCall(unittest.TestCase): class TestConvertShapeCompare(unittest.TestCase): - def test_non_variable(self): - self.assertEqual(paddle.jit.dy2static.convert_shape_compare(1, "<", 2), - True) self.assertEqual( - paddle.jit.dy2static.convert_shape_compare(1, "<", 2, "<=", 3), - True) + paddle.jit.dy2static.convert_shape_compare(1, "<", 2), True + ) + self.assertEqual( + paddle.jit.dy2static.convert_shape_compare(1, "<", 2, "<=", 3), True + ) self.assertEqual( paddle.jit.dy2static.convert_shape_compare(1, ">", 2, "<=", 3), - False) + False, + ) def error_func(): """ @@ -73,77 +70,108 @@ class TestConvertShapeCompare(unittest.TestCase): raise ValueError("Used for test") self.assertEqual( - paddle.jit.dy2static.convert_shape_compare(1, ">", 2, "<=", - lambda: error_func()), - False) + paddle.jit.dy2static.convert_shape_compare( + 1, ">", 2, "<=", lambda: error_func() + ), + False, + ) self.assertEqual( - paddle.jit.dy2static.convert_shape_compare(1, "<", 2, "in", - [1, 2, 3]), True) + paddle.jit.dy2static.convert_shape_compare( + 1, "<", 2, "in", [1, 2, 3] + ), + True, + ) self.assertEqual( - paddle.jit.dy2static.convert_shape_compare(1, "<", 2, "not in", - [1, 2, 3]), False) + paddle.jit.dy2static.convert_shape_compare( + 1, "<", 2, "not in", [1, 2, 3] + ), + False, + ) self.assertEqual( paddle.jit.dy2static.convert_shape_compare(1, "<", 2, "is", 3), - False) + False, + ) self.assertEqual( - paddle.jit.dy2static.convert_shape_compare(1, "<", 2, "is not", - [1, 2, 3]), True) + paddle.jit.dy2static.convert_shape_compare( + 1, "<", 2, "is not", [1, 2, 3] + ), + True, + ) self.assertEqual( - paddle.jit.dy2static.convert_shape_compare([1, 2], "==", [1, 2], - "!=", [1, 2, 3]), True) + paddle.jit.dy2static.convert_shape_compare( + [1, 2], "==", [1, 2], "!=", [1, 2, 3] + ), + True, + ) self.assertEqual( - paddle.jit.dy2static.convert_shape_compare([1, 2], "!=", [1, 2, 3], - "==", [1, 2]), False) + paddle.jit.dy2static.convert_shape_compare( + [1, 2], "!=", [1, 2, 3], "==", [1, 2] + ), + False, + ) def test_variable(self): paddle.enable_static() - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): x = paddle.static.data(name='x', shape=[3, 2], dtype='float32') y = paddle.static.data(name='y', shape=[3, 2], dtype='float32') self.assertEqual( paddle.jit.dy2static.convert_shape_compare( - x, "is", x, "is not", y), True) + x, "is", x, "is not", y + ), + True, + ) self.assertEqual( paddle.jit.dy2static.convert_shape_compare( - x, "is not", x, "is not", y), False) + x, "is not", x, "is not", y + ), + False, + ) self.assertEqual( paddle.jit.dy2static.convert_shape_compare(x, "is", x, "is", y), - False) + False, + ) eq_out = paddle.jit.dy2static.convert_shape_compare(x, "==", y) not_eq_out = paddle.jit.dy2static.convert_shape_compare(x, "!=", y) long_eq_out = paddle.jit.dy2static.convert_shape_compare( - x, "==", x, "!=", y) - - place = paddle.CUDAPlace( - 0) if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + x, "==", x, "!=", y + ) + + place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() + else paddle.CPUPlace() + ) exe = paddle.static.Executor(place) - x_y_eq_out = exe.run(feed={ - "x": np.ones([3, 2]).astype(np.float32), - "y": np.ones([3, 2]).astype(np.float32) - }, - fetch_list=[eq_out, not_eq_out, long_eq_out]) - np.testing.assert_array_equal(np.array(x_y_eq_out), - np.array([[True], [False], [False]])) + x_y_eq_out = exe.run( + feed={ + "x": np.ones([3, 2]).astype(np.float32), + "y": np.ones([3, 2]).astype(np.float32), + }, + fetch_list=[eq_out, not_eq_out, long_eq_out], + ) + np.testing.assert_array_equal( + np.array(x_y_eq_out), np.array([[True], [False], [False]]) + ) set_a_zero = np.ones([3, 2]).astype(np.float32) set_a_zero[0][0] = 0.0 x_y_not_eq_out = exe.run( - feed={ - "x": np.ones([3, 2]).astype(np.float32), - "y": set_a_zero - }, - fetch_list=[eq_out, not_eq_out, long_eq_out]) - np.testing.assert_array_equal(np.array(x_y_not_eq_out), - np.array([[False], [True], [True]])) + feed={"x": np.ones([3, 2]).astype(np.float32), "y": set_a_zero}, + fetch_list=[eq_out, not_eq_out, long_eq_out], + ) + np.testing.assert_array_equal( + np.array(x_y_not_eq_out), np.array([[False], [True], [True]]) + ) paddle.disable_static() class ShapeLayer(paddle.nn.Layer): - def __init__(self): super(ShapeLayer, self).__init__() @@ -158,7 +186,6 @@ class ShapeLayer(paddle.nn.Layer): class TestChooseShapeAttrOrApiWithLayer(unittest.TestCase): - def test_tensor_shape(self): x = paddle.zeros(shape=[4, 1], dtype='float32') net = ShapeLayer() @@ -168,7 +195,6 @@ class TestChooseShapeAttrOrApiWithLayer(unittest.TestCase): class TestIfElseNoValue(unittest.TestCase): - def test_else_ret_none(self): input_x = paddle.to_tensor([[1, 2, 3], [4, 5, 6]]) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cpu_cuda_to_tensor.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cpu_cuda_to_tensor.py index 53149f758baff700fecd2954882bd3d7ae8bdd10..60f7f70ad4bd7e1bdc59ab8d1b490205244b4bc0 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cpu_cuda_to_tensor.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cpu_cuda_to_tensor.py @@ -18,9 +18,7 @@ import numpy as np class TestCpuCuda(unittest.TestCase): - def test_cpu_cuda(self): - def func(x): x = paddle.to_tensor([1, 2, 3, 4]) x = x.cuda() @@ -33,9 +31,7 @@ class TestCpuCuda(unittest.TestCase): class TestToTensor(unittest.TestCase): - def test_to_tensor_with_variable_list(self): - def func(x): ones = paddle.to_tensor([1]) twos = paddle.to_tensor([2]) @@ -44,15 +40,15 @@ class TestToTensor(unittest.TestCase): x = paddle.to_tensor([3]) print(paddle.jit.to_static(func).code) - np.testing.assert_allclose(paddle.jit.to_static(func)(x).numpy(), - np.array([1, 2, 3, 4]), - rtol=1e-05) + np.testing.assert_allclose( + paddle.jit.to_static(func)(x).numpy(), + np.array([1, 2, 3, 4]), + rtol=1e-05, + ) class TestToTensor1(unittest.TestCase): - def test_to_tensor_with_variable_list(self): - def func(x): ones = paddle.to_tensor([1]) twos = paddle.to_tensor([2]) @@ -64,24 +60,26 @@ class TestToTensor1(unittest.TestCase): x = paddle.to_tensor([3]) print(paddle.jit.to_static(func).code) - np.testing.assert_allclose(paddle.jit.to_static(func)(x).numpy(), - np.array([1, 2, 3, 4]), - rtol=1e-05) + np.testing.assert_allclose( + paddle.jit.to_static(func)(x).numpy(), + np.array([1, 2, 3, 4]), + rtol=1e-05, + ) class TestToTensor2(unittest.TestCase): - def test_to_tensor_with_variable_list(self): - def func(x): x = paddle.to_tensor([[1], [2], [3], [4]]) return x x = paddle.to_tensor([3]) print(paddle.jit.to_static(func).code) - np.testing.assert_allclose(paddle.jit.to_static(func)(x).numpy(), - np.array([[1], [2], [3], [4]]), - rtol=1e-05) + np.testing.assert_allclose( + paddle.jit.to_static(func)(x).numpy(), + np.array([[1], [2], [3], [4]]), + rtol=1e-05, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py index c5dd0ac064b0e73cb7d621a90b85d695f19a4f95..03db89350795c339470eeb5415080680066b2996 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py @@ -30,6 +30,7 @@ import numpy as np from PIL import Image, ImageOps import os + # Use GPU:0 to elimate the influence of other tasks. os.environ["CUDA_VISIBLE_DEVICES"] = "1" @@ -61,19 +62,22 @@ program_translator = ProgramTranslator() class Cycle_Gan(fluid.dygraph.Layer): - def __init__(self, input_channel, istrain=True): super(Cycle_Gan, self).__init__() self.build_generator_resnet_9blocks_a = build_generator_resnet_9blocks( - input_channel) + input_channel + ) self.build_generator_resnet_9blocks_b = build_generator_resnet_9blocks( - input_channel) + input_channel + ) if istrain: self.build_gen_discriminator_a = build_gen_discriminator( - input_channel) + input_channel + ) self.build_gen_discriminator_b = build_gen_discriminator( - input_channel) + input_channel + ) @declarative def forward(self, input_A, input_B): @@ -86,9 +90,11 @@ class Cycle_Gan(fluid.dygraph.Layer): cyc_B = self.build_generator_resnet_9blocks_a(fake_A) diff_A = fluid.layers.abs( - fluid.layers.elementwise_sub(x=input_A, y=cyc_A)) + fluid.layers.elementwise_sub(x=input_A, y=cyc_A) + ) diff_B = fluid.layers.abs( - fluid.layers.elementwise_sub(x=input_B, y=cyc_B)) + fluid.layers.elementwise_sub(x=input_B, y=cyc_B) + ) cyc_A_loss = fluid.layers.reduce_mean(diff_A) * lambda_A cyc_B_loss = fluid.layers.reduce_mean(diff_B) * lambda_B cyc_loss = cyc_A_loss + cyc_B_loss @@ -100,17 +106,41 @@ class Cycle_Gan(fluid.dygraph.Layer): g_B_loss = fluid.layers.reduce_mean(fluid.layers.square(fake_rec_B - 1)) G = g_A_loss + g_B_loss idt_A = self.build_generator_resnet_9blocks_a(input_B) - idt_loss_A = fluid.layers.reduce_mean( - fluid.layers.abs(fluid.layers.elementwise_sub( - x=input_B, y=idt_A))) * lambda_B * lambda_identity + idt_loss_A = ( + fluid.layers.reduce_mean( + fluid.layers.abs( + fluid.layers.elementwise_sub(x=input_B, y=idt_A) + ) + ) + * lambda_B + * lambda_identity + ) idt_B = self.build_generator_resnet_9blocks_b(input_A) - idt_loss_B = fluid.layers.reduce_mean( - fluid.layers.abs(fluid.layers.elementwise_sub( - x=input_A, y=idt_B))) * lambda_A * lambda_identity + idt_loss_B = ( + fluid.layers.reduce_mean( + fluid.layers.abs( + fluid.layers.elementwise_sub(x=input_A, y=idt_B) + ) + ) + * lambda_A + * lambda_identity + ) idt_loss = fluid.layers.elementwise_add(idt_loss_A, idt_loss_B) g_loss = cyc_loss + G + idt_loss - return fake_A, fake_B, cyc_A, cyc_B, g_A_loss, g_B_loss, idt_loss_A, idt_loss_B, cyc_A_loss, cyc_B_loss, g_loss + return ( + fake_A, + fake_B, + cyc_A, + cyc_B, + g_A_loss, + g_B_loss, + idt_loss_A, + idt_loss_B, + cyc_A_loss, + cyc_B_loss, + g_loss, + ) @declarative def discriminatorA(self, input_A, input_B): @@ -134,23 +164,26 @@ class Cycle_Gan(fluid.dygraph.Layer): class build_resnet_block(fluid.dygraph.Layer): - def __init__(self, dim, use_bias=False): super(build_resnet_block, self).__init__() - self.conv0 = conv2d(num_channels=dim, - num_filters=dim, - filter_size=3, - stride=1, - stddev=0.02, - use_bias=False) - self.conv1 = conv2d(num_channels=dim, - num_filters=dim, - filter_size=3, - stride=1, - stddev=0.02, - relu=False, - use_bias=False) + self.conv0 = conv2d( + num_channels=dim, + num_filters=dim, + filter_size=3, + stride=1, + stddev=0.02, + use_bias=False, + ) + self.conv1 = conv2d( + num_channels=dim, + num_filters=dim, + filter_size=3, + stride=1, + stddev=0.02, + relu=False, + use_bias=False, + ) self.dim = dim def forward(self, inputs): @@ -163,33 +196,39 @@ class build_resnet_block(fluid.dygraph.Layer): class build_generator_resnet_9blocks(fluid.dygraph.Layer): - def __init__(self, input_channel): super(build_generator_resnet_9blocks, self).__init__() - self.conv0 = conv2d(num_channels=input_channel, - num_filters=32, - filter_size=7, - stride=1, - padding=0, - stddev=0.02) - self.conv1 = conv2d(num_channels=32, - num_filters=64, - filter_size=3, - stride=2, - padding=1, - stddev=0.02) - self.conv2 = conv2d(num_channels=64, - num_filters=128, - filter_size=3, - stride=2, - padding=1, - stddev=0.02) + self.conv0 = conv2d( + num_channels=input_channel, + num_filters=32, + filter_size=7, + stride=1, + padding=0, + stddev=0.02, + ) + self.conv1 = conv2d( + num_channels=32, + num_filters=64, + filter_size=3, + stride=2, + padding=1, + stddev=0.02, + ) + self.conv2 = conv2d( + num_channels=64, + num_filters=128, + filter_size=3, + stride=2, + padding=1, + stddev=0.02, + ) self.build_resnet_block_list = [] dim = 128 for i in range(9): - Build_Resnet_Block = self.add_sublayer("generator_%d" % (i + 1), - build_resnet_block(dim)) + Build_Resnet_Block = self.add_sublayer( + "generator_%d" % (i + 1), build_resnet_block(dim) + ) self.build_resnet_block_list.append(Build_Resnet_Block) self.deconv0 = DeConv2D( num_channels=dim, @@ -200,22 +239,26 @@ class build_generator_resnet_9blocks(fluid.dygraph.Layer): padding=[1, 1], outpadding=[0, 1, 0, 1], ) - self.deconv1 = DeConv2D(num_channels=32 * 2, - num_filters=32, - filter_size=3, - stride=2, - stddev=0.02, - padding=[1, 1], - outpadding=[0, 1, 0, 1]) - self.conv3 = conv2d(num_channels=32, - num_filters=input_channel, - filter_size=7, - stride=1, - stddev=0.02, - padding=0, - relu=False, - norm=False, - use_bias=True) + self.deconv1 = DeConv2D( + num_channels=32 * 2, + num_filters=32, + filter_size=3, + stride=2, + stddev=0.02, + padding=[1, 1], + outpadding=[0, 1, 0, 1], + ) + self.conv3 = conv2d( + num_channels=32, + num_filters=input_channel, + filter_size=7, + stride=1, + stddev=0.02, + padding=0, + relu=False, + norm=False, + use_bias=True, + ) def forward(self, inputs): pad_input = fluid.layers.pad2d(inputs, [3, 3, 3, 3], mode="reflect") @@ -233,49 +276,58 @@ class build_generator_resnet_9blocks(fluid.dygraph.Layer): class build_gen_discriminator(fluid.dygraph.Layer): - def __init__(self, input_channel): super(build_gen_discriminator, self).__init__() - self.conv0 = conv2d(num_channels=input_channel, - num_filters=64, - filter_size=4, - stride=2, - stddev=0.02, - padding=1, - norm=False, - use_bias=True, - relufactor=0.2) - self.conv1 = conv2d(num_channels=64, - num_filters=128, - filter_size=4, - stride=2, - stddev=0.02, - padding=1, - relufactor=0.2) - self.conv2 = conv2d(num_channels=128, - num_filters=IMAGE_SIZE, - filter_size=4, - stride=2, - stddev=0.02, - padding=1, - relufactor=0.2) - self.conv3 = conv2d(num_channels=IMAGE_SIZE, - num_filters=512, - filter_size=4, - stride=1, - stddev=0.02, - padding=1, - relufactor=0.2) - self.conv4 = conv2d(num_channels=512, - num_filters=1, - filter_size=4, - stride=1, - stddev=0.02, - padding=1, - norm=False, - relu=False, - use_bias=True) + self.conv0 = conv2d( + num_channels=input_channel, + num_filters=64, + filter_size=4, + stride=2, + stddev=0.02, + padding=1, + norm=False, + use_bias=True, + relufactor=0.2, + ) + self.conv1 = conv2d( + num_channels=64, + num_filters=128, + filter_size=4, + stride=2, + stddev=0.02, + padding=1, + relufactor=0.2, + ) + self.conv2 = conv2d( + num_channels=128, + num_filters=IMAGE_SIZE, + filter_size=4, + stride=2, + stddev=0.02, + padding=1, + relufactor=0.2, + ) + self.conv3 = conv2d( + num_channels=IMAGE_SIZE, + num_filters=512, + filter_size=4, + stride=1, + stddev=0.02, + padding=1, + relufactor=0.2, + ) + self.conv4 = conv2d( + num_channels=512, + num_filters=1, + filter_size=4, + stride=1, + stddev=0.02, + padding=1, + norm=False, + relu=False, + use_bias=True, + ) def forward(self, inputs): y = self.conv0(inputs) @@ -289,35 +341,42 @@ class build_gen_discriminator(fluid.dygraph.Layer): class conv2d(fluid.dygraph.Layer): """docstring for Conv2D""" - def __init__(self, - num_channels, - num_filters=64, - filter_size=7, - stride=1, - stddev=0.02, - padding=0, - norm=True, - relu=True, - relufactor=0.0, - use_bias=False): + def __init__( + self, + num_channels, + num_filters=64, + filter_size=7, + stride=1, + stddev=0.02, + padding=0, + norm=True, + relu=True, + relufactor=0.0, + use_bias=False, + ): super(conv2d, self).__init__() if use_bias == False: con_bias_attr = False else: con_bias_attr = fluid.ParamAttr( - initializer=fluid.initializer.Constant(0.0)) - - self.conv = Conv2D(num_channels=num_channels, - num_filters=num_filters, - filter_size=filter_size, - stride=stride, - padding=padding, - use_cudnn=use_cudnn, - param_attr=fluid.ParamAttr( - initializer=fluid.initializer.NormalInitializer( - loc=0.0, scale=stddev)), - bias_attr=con_bias_attr) + initializer=fluid.initializer.Constant(0.0) + ) + + self.conv = Conv2D( + num_channels=num_channels, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=padding, + use_cudnn=use_cudnn, + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.NormalInitializer( + loc=0.0, scale=stddev + ) + ), + bias_attr=con_bias_attr, + ) # Note(Aurelius84): The calculation of GPU kernel in BN is non-deterministic, # failure rate is 1/100 in Dev but seems incremental in CE platform. # If on GPU, we disable BN temporarily. @@ -328,10 +387,13 @@ class conv2d(fluid.dygraph.Layer): use_global_stats=True, # set True to use deterministic algorithm num_channels=num_filters, param_attr=fluid.ParamAttr( - initializer=fluid.initializer.NormalInitializer(1.0, 0.02)), + initializer=fluid.initializer.NormalInitializer(1.0, 0.02) + ), bias_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(0.0)), - trainable_statistics=True) + initializer=fluid.initializer.Constant(0.0) + ), + trainable_statistics=True, + ) self.relufactor = relufactor self.use_bias = use_bias @@ -348,26 +410,28 @@ class conv2d(fluid.dygraph.Layer): class DeConv2D(fluid.dygraph.Layer): - - def __init__(self, - num_channels, - num_filters=64, - filter_size=7, - stride=1, - stddev=0.02, - padding=[0, 0], - outpadding=[0, 0, 0, 0], - relu=True, - norm=True, - relufactor=0.0, - use_bias=False): + def __init__( + self, + num_channels, + num_filters=64, + filter_size=7, + stride=1, + stddev=0.02, + padding=[0, 0], + outpadding=[0, 0, 0, 0], + relu=True, + norm=True, + relufactor=0.0, + use_bias=False, + ): super(DeConv2D, self).__init__() if use_bias == False: de_bias_attr = False else: de_bias_attr = fluid.ParamAttr( - initializer=fluid.initializer.Constant(0.0)) + initializer=fluid.initializer.Constant(0.0) + ) self._deconv = Conv2DTranspose( num_channels, @@ -377,9 +441,12 @@ class DeConv2D(fluid.dygraph.Layer): padding=padding, use_cudnn=use_cudnn, param_attr=fluid.ParamAttr( - initializer=fluid.initializer.NormalInitializer(loc=0.0, - scale=stddev)), - bias_attr=de_bias_attr) + initializer=fluid.initializer.NormalInitializer( + loc=0.0, scale=stddev + ) + ), + bias_attr=de_bias_attr, + ) if fluid.is_compiled_with_cuda(): norm = False if norm: @@ -387,10 +454,13 @@ class DeConv2D(fluid.dygraph.Layer): use_global_stats=True, # set True to use deterministic algorithm num_channels=num_filters, param_attr=fluid.ParamAttr( - initializer=fluid.initializer.NormalInitializer(1.0, 0.02)), + initializer=fluid.initializer.NormalInitializer(1.0, 0.02) + ), bias_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(0.0)), - trainable_statistics=True) + initializer=fluid.initializer.Constant(0.0) + ), + trainable_statistics=True, + ) self.outpadding = outpadding self.relufactor = relufactor @@ -400,10 +470,9 @@ class DeConv2D(fluid.dygraph.Layer): def forward(self, inputs): conv = self._deconv(inputs) - conv = fluid.layers.pad2d(conv, - paddings=self.outpadding, - mode='constant', - pad_value=0.0) + conv = fluid.layers.pad2d( + conv, paddings=self.outpadding, mode='constant', pad_value=0.0 + ) if self.norm: conv = self.bn(conv) @@ -413,7 +482,6 @@ class DeConv2D(fluid.dygraph.Layer): class ImagePool(object): - def __init__(self, pool_size=50): self.pool = [] self.count = 0 @@ -436,11 +504,11 @@ class ImagePool(object): def reader_creater(): - def reader(): while True: fake_image = np.uint8( - np.random.random((IMAGE_SIZE + 30, IMAGE_SIZE + 30, 3)) * 255) + np.random.random((IMAGE_SIZE + 30, IMAGE_SIZE + 30, 3)) * 255 + ) image = Image.fromarray(fake_image) # Resize image = image.resize((286, 286), Image.BICUBIC) @@ -474,20 +542,29 @@ class Args(object): def optimizer_setting(parameters): lr = 0.0002 - optimizer = fluid.optimizer.Adam(learning_rate=fluid.layers.piecewise_decay( - boundaries=[ - 100 * step_per_epoch, 120 * step_per_epoch, 140 * step_per_epoch, - 160 * step_per_epoch, 180 * step_per_epoch - ], - values=[lr, lr * 0.8, lr * 0.6, lr * 0.4, lr * 0.2, lr * 0.1]), - parameter_list=parameters, - beta1=0.5) + optimizer = fluid.optimizer.Adam( + learning_rate=fluid.layers.piecewise_decay( + boundaries=[ + 100 * step_per_epoch, + 120 * step_per_epoch, + 140 * step_per_epoch, + 160 * step_per_epoch, + 180 * step_per_epoch, + ], + values=[lr, lr * 0.8, lr * 0.6, lr * 0.4, lr * 0.2, lr * 0.1], + ), + parameter_list=parameters, + beta1=0.5, + ) return optimizer def train(args, to_static): - place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() \ + place = ( + fluid.CUDAPlace(0) + if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + ) program_translator.enable(to_static) @@ -507,8 +584,10 @@ def train(args, to_static): cycle_gan = Cycle_Gan(input_channel=data_shape[1], istrain=True) t_time = 0 - vars_G = cycle_gan.build_generator_resnet_9blocks_a.parameters( - ) + cycle_gan.build_generator_resnet_9blocks_b.parameters() + vars_G = ( + cycle_gan.build_generator_resnet_9blocks_a.parameters() + + cycle_gan.build_generator_resnet_9blocks_b.parameters() + ) vars_da = cycle_gan.build_gen_discriminator_a.parameters() vars_db = cycle_gan.build_gen_discriminator_b.parameters() @@ -525,17 +604,28 @@ def train(args, to_static): s_time = time.time() data_A = np.array( - [data_A[0].reshape(3, IMAGE_SIZE, - IMAGE_SIZE)]).astype("float32") + [data_A[0].reshape(3, IMAGE_SIZE, IMAGE_SIZE)] + ).astype("float32") data_B = np.array( - [data_B[0].reshape(3, IMAGE_SIZE, - IMAGE_SIZE)]).astype("float32") + [data_B[0].reshape(3, IMAGE_SIZE, IMAGE_SIZE)] + ).astype("float32") data_A = to_variable(data_A) data_B = to_variable(data_B) # optimize the g_A network - fake_A, fake_B, cyc_A, cyc_B, g_A_loss, g_B_loss, idt_loss_A, idt_loss_B, cyc_A_loss, cyc_B_loss, g_loss = cycle_gan( - data_A, data_B) + ( + fake_A, + fake_B, + cyc_A, + cyc_B, + g_A_loss, + g_B_loss, + idt_loss_A, + idt_loss_B, + cyc_A_loss, + cyc_B_loss, + g_loss, + ) = cycle_gan(data_A, data_B) g_loss.backward() optimizer1.minimize(g_loss) @@ -543,21 +633,24 @@ def train(args, to_static): fake_pool_B = B_pool.pool_image(fake_B).numpy() fake_pool_B = np.array( - [fake_pool_B[0].reshape(3, IMAGE_SIZE, - IMAGE_SIZE)]).astype("float32") + [fake_pool_B[0].reshape(3, IMAGE_SIZE, IMAGE_SIZE)] + ).astype("float32") fake_pool_B = to_variable(fake_pool_B) fake_pool_A = A_pool.pool_image(fake_A).numpy() fake_pool_A = np.array( - [fake_pool_A[0].reshape(3, IMAGE_SIZE, - IMAGE_SIZE)]).astype("float32") + [fake_pool_A[0].reshape(3, IMAGE_SIZE, IMAGE_SIZE)] + ).astype("float32") fake_pool_A = to_variable(fake_pool_A) # optimize the d_A network rec_B, fake_pool_rec_B = cycle_gan.discriminatorA( - data_B, fake_pool_B) - d_loss_A = (fluid.layers.square(fake_pool_rec_B) + - fluid.layers.square(rec_B - 1)) / 2.0 + data_B, fake_pool_B + ) + d_loss_A = ( + fluid.layers.square(fake_pool_rec_B) + + fluid.layers.square(rec_B - 1) + ) / 2.0 d_loss_A = fluid.layers.reduce_mean(d_loss_A) d_loss_A.backward() @@ -566,9 +659,12 @@ def train(args, to_static): # optimize the d_B network rec_A, fake_pool_rec_A = cycle_gan.discriminatorB( - data_A, fake_pool_A) - d_loss_B = (fluid.layers.square(fake_pool_rec_A) + - fluid.layers.square(rec_A - 1)) / 2.0 + data_A, fake_pool_A + ) + d_loss_B = ( + fluid.layers.square(fake_pool_rec_A) + + fluid.layers.square(rec_A - 1) + ) / 2.0 d_loss_B = fluid.layers.reduce_mean(d_loss_B) d_loss_B.backward() @@ -578,8 +674,15 @@ def train(args, to_static): # Log generator loss and discriminator loss cur_batch_loss = [ - g_loss, d_loss_A, d_loss_B, g_A_loss, cyc_A_loss, - idt_loss_A, g_B_loss, cyc_B_loss, idt_loss_B + g_loss, + d_loss_A, + d_loss_B, + g_A_loss, + cyc_A_loss, + idt_loss_A, + g_B_loss, + cyc_B_loss, + idt_loss_B, ] cur_batch_loss = [x.numpy()[0] for x in cur_batch_loss] @@ -587,8 +690,10 @@ def train(args, to_static): t_time += batch_time if batch_id % args.log_step == 0: print( - "batch: {}\t Batch_time_cost: {}\n g_loss: {}\t d_A_loss: {}\t d_B_loss:{}\n g_A_loss: {}\t g_A_cyc_loss: {}\t g_A_idt_loss: {}\n g_B_loss: {}\t g_B_cyc_loss: {}\t g_B_idt_loss: {}" - .format(batch_id, batch_time, *cur_batch_loss)) + "batch: {}\t Batch_time_cost: {}\n g_loss: {}\t d_A_loss: {}\t d_B_loss:{}\n g_A_loss: {}\t g_A_cyc_loss: {}\t g_A_idt_loss: {}\n g_B_loss: {}\t g_B_cyc_loss: {}\t g_B_idt_loss: {}".format( + batch_id, batch_time, *cur_batch_loss + ) + ) if batch_id > args.train_step: break @@ -598,7 +703,6 @@ def train(args, to_static): class TestCycleGANModel(unittest.TestCase): - def setUp(self): self.args = Args() @@ -617,8 +721,10 @@ class TestCycleGANModel(unittest.TestCase): if not fluid.is_compiled_with_cuda(): assert_func = np.array_equal - self.assertTrue(assert_func(dy_out, st_out), - msg="dy_out:\n {}\n st_out:\n{}".format(dy_out, st_out)) + self.assertTrue( + assert_func(dy_out, st_out), + msg="dy_out:\n {}\n st_out:\n{}".format(dy_out, st_out), + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_declarative.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_declarative.py index 947cfa35d434e277a8a9fdfd642d91e38c8d78c8..6ea76d7592588602cec73041aff7ac2b5645f043 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_declarative.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_declarative.py @@ -19,8 +19,17 @@ import tempfile import paddle import paddle.fluid as fluid from paddle.static import InputSpec -from paddle.fluid.dygraph import to_variable, declarative, ProgramTranslator, Layer, jit -from paddle.fluid.dygraph.dygraph_to_static.program_translator import ConcreteProgram, StaticFunction +from paddle.fluid.dygraph import ( + to_variable, + declarative, + ProgramTranslator, + Layer, + jit, +) +from paddle.fluid.dygraph.dygraph_to_static.program_translator import ( + ConcreteProgram, + StaticFunction, +) from test_basic_api_transformation import dyfunc_to_variable @@ -28,7 +37,6 @@ program_trans = ProgramTranslator() class SimpleNet(Layer): - def __init__(self): super(SimpleNet, self).__init__() self.linear = fluid.dygraph.Linear(10, 3) @@ -55,10 +63,9 @@ class SimpleNet(Layer): z = z + int_val return z - @declarative(input_spec=[{ - 'x': InputSpec([None, 10]), - 'y': InputSpec([None, 10]) - }]) + @declarative( + input_spec=[{'x': InputSpec([None, 10]), 'y': InputSpec([None, 10])}] + ) def func_with_dict(self, d): x = d['x'] y = d['y'] @@ -66,12 +73,14 @@ class SimpleNet(Layer): return z - @declarative(input_spec=[[ - InputSpec([None]), { - 'x': InputSpec([None, 10]), - 'y': InputSpec([None, 10]) - } - ]]) + @declarative( + input_spec=[ + [ + InputSpec([None]), + {'x': InputSpec([None, 10]), 'y': InputSpec([None, 10])}, + ] + ] + ) def func_with_list_dict(self, dl): bias = dl[0] x = dl[1]['x'] @@ -84,7 +93,6 @@ class SimpleNet(Layer): class TestStaticFunctionInstance(unittest.TestCase): - def test_instance_same_class(self): with fluid.dygraph.guard(fluid.CPUPlace()): net_1 = SimpleNet() @@ -102,7 +110,6 @@ class TestStaticFunctionInstance(unittest.TestCase): class TestInputSpec(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() self.model_path = os.path.join(self.temp_dir.name, 'simple_net') @@ -114,7 +121,7 @@ class TestInputSpec(unittest.TestCase): with fluid.dygraph.guard(fluid.CPUPlace()): x = to_variable(np.ones([4, 10]).astype('float32')) y = to_variable(np.ones([4, 10]).astype('float32') * 2) - int_val = 4. + int_val = 4.0 net = SimpleNet() @@ -150,7 +157,7 @@ class TestInputSpec(unittest.TestCase): with fluid.dygraph.guard(fluid.CPUPlace()): x = to_variable(np.ones([4, 10]).astype('float32')) y = to_variable(np.ones([4, 10]).astype('float32') * 2) - int_val = 4. + int_val = 4.0 net = SimpleNet() @@ -160,26 +167,28 @@ class TestInputSpec(unittest.TestCase): # 2. requires len(input_spec) <= len(args) with self.assertRaises(ValueError): - net.add_func = declarative(net.add_func, - input_spec=[ - InputSpec([-1, 10]), - InputSpec([-1, 10]), - InputSpec([10]) - ]) + net.add_func = declarative( + net.add_func, + input_spec=[ + InputSpec([-1, 10]), + InputSpec([-1, 10]), + InputSpec([10]), + ], + ) net.add_func(x, y) def test_concrete_program(self): with fluid.dygraph.guard(fluid.CPUPlace()): x = to_variable(np.ones([4, 10]).astype('float32')) y = to_variable(np.ones([4, 10]).astype('float32') * 2) - int_val = 4. + int_val = 4.0 net = SimpleNet() # We can get concrete_program by specificing InputSpec information. Faking input is no need. net.add_func = declarative( net.add_func, - input_spec=[InputSpec([-1, 10]), - InputSpec([-1, 10], name='y')]) + input_spec=[InputSpec([-1, 10]), InputSpec([-1, 10], name='y')], + ) cp1 = net.add_func.concrete_program self.assertTrue(cp1.inputs[-1].shape == (-1, 10)) self.assertTrue(cp1.inputs[-1].name == 'y') @@ -187,10 +196,10 @@ class TestInputSpec(unittest.TestCase): # generate another program net.add_func = declarative( net.add_func, - input_spec=[InputSpec([10]), - InputSpec([10], name='label')]) + input_spec=[InputSpec([10]), InputSpec([10], name='label')], + ) cp2 = net.add_func.concrete_program - self.assertTrue(cp2.inputs[-1].shape == (10, )) + self.assertTrue(cp2.inputs[-1].shape == (10,)) self.assertTrue(cp2.inputs[-1].name == 'label') # Note(Aurelius84): New instance will be returned if we use `declarative(foo)` every time. # So number of cache program is 1. @@ -204,7 +213,6 @@ def foo_func(a, b, c=1, d=2): class TestDifferentInputSpecCacheProgram(unittest.TestCase): - def setUp(self): program_trans.enable(True) @@ -218,33 +226,33 @@ class TestDifferentInputSpecCacheProgram(unittest.TestCase): # [16, 10] + [10] (varbase) out_1 = foo(to_variable(x_data), to_variable(y_data)) - np.testing.assert_allclose(x_data + y_data, - out_1.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + x_data + y_data, out_1.numpy(), rtol=1e-05 + ) self.assertTrue(len(foo.program_cache) == 1) self.assertTrue(len(foo.program_cache.concrete_programs()) == 1) first_program = foo.program_cache.last() # [16, 10] + [10] (numpy) out_2 = foo(to_variable(x_data), y_data) - np.testing.assert_allclose(x_data + y_data, - out_2.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + x_data + y_data, out_2.numpy(), rtol=1e-05 + ) self.assertTrue(len(foo.program_cache) == 1) # [16, 10] + [10] (numpy) out_3 = foo(to_variable(x_data), z_data) - np.testing.assert_allclose(x_data + z_data, - out_3.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + x_data + z_data, out_3.numpy(), rtol=1e-05 + ) # hit cache program self.assertTrue(len(foo.program_cache) == 1) # [16, 10] + [10] (numpy) with other different arguments (c=3) out_4 = foo(to_variable(x_data), z_data, 3) - np.testing.assert_allclose(x_data + z_data, - out_4.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + x_data + z_data, out_4.numpy(), rtol=1e-05 + ) # create a new program self.assertTrue(len(foo.program_cache) == 2) @@ -258,26 +266,29 @@ class TestDifferentInputSpecCacheProgram(unittest.TestCase): foo = declarative(foo_func) # 1. specific InputSpec for `x`/`y` - concrete_program_1 = foo.get_concrete_program(InputSpec([None, 10]), - InputSpec([10])) + concrete_program_1 = foo.get_concrete_program( + InputSpec([None, 10]), InputSpec([10]) + ) self.assertTrue(len(foo.program_cache) == 1) # 2. specific `c`/`d` explicitly with same default value - concrete_program_2 = foo.get_concrete_program(InputSpec([None, 10]), - InputSpec([10]), 1, 2) + concrete_program_2 = foo.get_concrete_program( + InputSpec([None, 10]), InputSpec([10]), 1, 2 + ) self.assertTrue(concrete_program_2 == concrete_program_1) self.assertTrue(len(foo.program_cache) == 1) # 3. specific `c` = 2 - concrete_program_3 = foo.get_concrete_program(InputSpec([None, 10]), - InputSpec([10]), - c=2) + concrete_program_3 = foo.get_concrete_program( + InputSpec([None, 10]), InputSpec([10]), c=2 + ) self.assertTrue(concrete_program_3 != concrete_program_1) self.assertTrue(len(foo.program_cache) == 2) # 4. specific x.shape = [10] - concrete_program_4 = foo.get_concrete_program(InputSpec([10]), - InputSpec([10])) + concrete_program_4 = foo.get_concrete_program( + InputSpec([10]), InputSpec([10]) + ) self.assertTrue(concrete_program_4 != concrete_program_1) self.assertTrue(len(foo.program_cache) == 3) @@ -287,19 +298,21 @@ class TestDifferentInputSpecCacheProgram(unittest.TestCase): # 6. specific unknown kwargs `e`=4 with self.assertRaises(TypeError): - concrete_program_5 = foo.get_concrete_program(InputSpec([10]), - InputSpec([10]), - e=4) + concrete_program_5 = foo.get_concrete_program( + InputSpec([10]), InputSpec([10]), e=4 + ) def test_concrete_program(self): with fluid.dygraph.guard(fluid.CPUPlace()): # usage 1 - foo_1 = paddle.jit.to_static(foo_func, - input_spec=[ - InputSpec([10], name='x'), - InputSpec([10], name='y') - ]) + foo_1 = paddle.jit.to_static( + foo_func, + input_spec=[ + InputSpec([10], name='x'), + InputSpec([10], name='y'), + ], + ) self.assertTrue(isinstance(foo_1.concrete_program, ConcreteProgram)) # usage 2 @@ -314,7 +327,6 @@ class TestDifferentInputSpecCacheProgram(unittest.TestCase): class TestInputDefaultName(unittest.TestCase): - def setUp(self): paddle.disable_static() self.net = SimpleNet() @@ -339,7 +351,6 @@ class TestInputDefaultName(unittest.TestCase): class TestDeclarativeAPI(unittest.TestCase): - def test_error(self): func = declarative(dyfunc_to_variable) @@ -358,7 +369,6 @@ class TestDeclarativeAPI(unittest.TestCase): class TestDecorateModelDirectly(unittest.TestCase): - def setUp(self): paddle.disable_static() program_trans.enable(True) @@ -385,27 +395,28 @@ class TestDecorateModelDirectly(unittest.TestCase): class TestErrorWithInitFromStaticMode(unittest.TestCase): - def test_raise_error(self): # disable imperative paddle.enable_static() net = SimpleNet() - with self.assertRaisesRegexp(RuntimeError, - "only available in dynamic mode"): + with self.assertRaisesRegexp( + RuntimeError, "only available in dynamic mode" + ): net.forward.concrete_program - with self.assertRaisesRegexp(RuntimeError, - "only available in dynamic mode"): + with self.assertRaisesRegexp( + RuntimeError, "only available in dynamic mode" + ): net.forward.inputs - with self.assertRaisesRegexp(RuntimeError, - "only available in dynamic mode"): + with self.assertRaisesRegexp( + RuntimeError, "only available in dynamic mode" + ): net.forward.outputs class CallNonForwardFuncNet(paddle.nn.Layer): - def __init__(self): super(CallNonForwardFuncNet, self).__init__() self.sub = CallNonForwardFuncSubNet() @@ -416,7 +427,6 @@ class CallNonForwardFuncNet(paddle.nn.Layer): class CallNonForwardFuncSubNet(paddle.nn.Layer): - def __init__(self): super(CallNonForwardFuncSubNet, self).__init__() self.a = paddle.to_tensor([1, 2]) @@ -427,7 +437,6 @@ class CallNonForwardFuncSubNet(paddle.nn.Layer): class TestCallNonForwardFunc(unittest.TestCase): - def test_call_non_forward(self): paddle.disable_static() net = CallNonForwardFuncNet() @@ -437,7 +446,6 @@ class TestCallNonForwardFunc(unittest.TestCase): class SetBuffersNet1(paddle.nn.Layer): - def __init__(self): super(SetBuffersNet1, self).__init__() self.a = paddle.to_tensor([1]) @@ -449,7 +457,6 @@ class SetBuffersNet1(paddle.nn.Layer): class SetBuffersNet2(paddle.nn.Layer): - def __init__(self): super(SetBuffersNet2, self).__init__() self.b = paddle.to_tensor([2]) @@ -462,7 +469,6 @@ class SetBuffersNet2(paddle.nn.Layer): class TestSetBuffers(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() self.model_path = os.path.join(self.temp_dir.name, 'SetBuffersNet1') @@ -487,13 +493,11 @@ class TestSetBuffers(unittest.TestCase): class ClassNoInheritLayer: - def func(self, x): return x + 1 class TestClassNoInheritLayer(unittest.TestCase): - def test_to_static(self): paddle.disable_static() net = ClassNoInheritLayer() diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_decorator_transform.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_decorator_transform.py index bda090fc4c9edd3a4b424dd27c95c4822dfeae9b..2b73a1075e5a31699c34fc6bbb5410e1514ea1e3 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_decorator_transform.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_decorator_transform.py @@ -22,12 +22,11 @@ from contextlib import contextmanager def deco1(func): - @wraps(func) def inner(*args, **kwargs): print('in deco1, added 1') _x = 2 - if (_x < 1): + if _x < 1: _x += 1 else: _x -= 1 @@ -39,7 +38,6 @@ def deco1(func): def deco2(fun): - @wraps(fun) def inner(*args, **kwargs): print('in deco2, added 2') @@ -51,9 +49,7 @@ def deco2(fun): def deco3(x=3): - def inner_deco(func): - @wraps(func) def inner(*args, **kwargs): print('in deco3, added {}'.format(x)) @@ -67,9 +63,7 @@ def deco3(x=3): def deco4(func=None, x=0): - def decorated(pyfunc): - @wraps(pyfunc) def inner_deco(*args, **kwargs): print('in deco4, added {}'.format(x)) @@ -187,7 +181,6 @@ def deco_with_paddle_api(): class TestDecoratorTransform(unittest.TestCase): - def test_deco_transform(self): outs = forward() np.testing.assert_allclose(outs[0], np.array(3), rtol=1e-05) @@ -206,9 +199,11 @@ class TestDecoratorTransform(unittest.TestCase): warn1() flag = False for warn in w: - if (issubclass(warn.category, UserWarning) - ) and "A context manager decorator is used" in str( - warn.message): + if ( + issubclass(warn.category, UserWarning) + ) and "A context manager decorator is used" in str( + warn.message + ): flag = True break self.assertTrue(flag) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_deepcopy.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_deepcopy.py index ecb2d97fa4482a0a0355768dce3e43c720d31696..a4823be643029df4f33c6215d397ee631520a0e7 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_deepcopy.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_deepcopy.py @@ -15,14 +15,15 @@ import unittest import paddle import numpy as np -from paddle.fluid.dygraph.dygraph_to_static.program_translator import StaticFunction +from paddle.fluid.dygraph.dygraph_to_static.program_translator import ( + StaticFunction, +) from test_rollback import Net, foo from copy import deepcopy class TestDeepCopy(unittest.TestCase): - def test_net(self): net = Net() net = paddle.jit.to_static(net) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_dict.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_dict.py index f6b71eb72654269eaa2d926adbbb80a9992c964b..fcb986045b8760779403a1416ea7f7001f97bc90 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_dict.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_dict.py @@ -18,32 +18,41 @@ import unittest import paddle import paddle.fluid as fluid from paddle.jit import to_static -from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator +from paddle.fluid.dygraph.dygraph_to_static.program_translator import ( + ProgramTranslator, +) -PLACE = fluid.CUDAPlace( - 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() +PLACE = ( + fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() +) class SubNetWithDict(fluid.dygraph.Layer): - def __init__(self, hidden_size=16, output_size=16): super(SubNetWithDict, self).__init__() - init_weight = lambda x: fluid.ParamAttr(initializer=fluid.initializer. - Constant(x)) - - self.q_fc = fluid.dygraph.Linear(input_dim=hidden_size, - output_dim=output_size, - bias_attr=False, - param_attr=init_weight(0.6)) - self.k_fc = fluid.dygraph.Linear(input_dim=hidden_size, - output_dim=output_size, - bias_attr=False, - param_attr=init_weight(0.5)) - self.v_fc = fluid.dygraph.Linear(input_dim=hidden_size, - output_dim=output_size, - bias_attr=False, - param_attr=init_weight(0.2)) + init_weight = lambda x: fluid.ParamAttr( + initializer=fluid.initializer.Constant(x) + ) + + self.q_fc = fluid.dygraph.Linear( + input_dim=hidden_size, + output_dim=output_size, + bias_attr=False, + param_attr=init_weight(0.6), + ) + self.k_fc = fluid.dygraph.Linear( + input_dim=hidden_size, + output_dim=output_size, + bias_attr=False, + param_attr=init_weight(0.5), + ) + self.v_fc = fluid.dygraph.Linear( + input_dim=hidden_size, + output_dim=output_size, + bias_attr=False, + param_attr=init_weight(0.2), + ) def forward(self, input, cache=None): input = fluid.dygraph.to_variable(input) @@ -66,7 +75,6 @@ class SubNetWithDict(fluid.dygraph.Layer): class MainNetWithDict(fluid.dygraph.Layer): - def __init__(self, batch_size=64, hidden_size=16, output_size=16): super(MainNetWithDict, self).__init__() self.batch_size = batch_size @@ -78,16 +86,16 @@ class MainNetWithDict(fluid.dygraph.Layer): def forward(self, input, max_len=4): input = fluid.dygraph.to_variable(input) cache = { - "k": - fluid.layers.fill_constant( + "k": fluid.layers.fill_constant( shape=[self.batch_size, self.output_size], dtype='float32', - value=0), - "v": - fluid.layers.fill_constant( + value=0, + ), + "v": fluid.layers.fill_constant( shape=[self.batch_size, self.output_size], dtype='float32', - value=0), + value=0, + ), } # TODO(Aurelius84): The following code will be converted into: # max_len = layers.cond(layers.shape(input)[0] != max_len, @@ -165,11 +173,13 @@ def test_dic_pop_2(x): class TestDictPop(unittest.TestCase): - def setUp(self): self.input = np.random.random((3)).astype('int32') - self.place = paddle.CUDAPlace( - 0) if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + self.place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() + else paddle.CPUPlace() + ) self._set_test_func() def _set_test_func(self): @@ -197,17 +207,17 @@ class TestDictPop(unittest.TestCase): static_res, rtol=1e-05, err_msg='dygraph result is {}\nstatic result is {}'.format( - dygraph_res, static_res)) + dygraph_res, static_res + ), + ) class TestDictPop2(TestDictPop): - def _set_test_func(self): self.dygraph_func = test_dic_pop_2 class NetWithDictPop(paddle.nn.Layer): - def __init__(self): super(NetWithDictPop, self).__init__() @@ -224,7 +234,6 @@ class NetWithDictPop(paddle.nn.Layer): class TestDictPop3(TestNetWithDict): - def setUp(self): self.x = np.array([2, 2]).astype('float32') @@ -240,15 +249,16 @@ class TestDictPop3(TestNetWithDict): dygraph_result = self._run_dygraph() static_result = self._run_static() - self.assertTrue((dygraph_result == static_result).all(), - msg="dygraph result: {}\nstatic result: {}".format( - dygraph_result, static_result)) + self.assertTrue( + (dygraph_result == static_result).all(), + msg="dygraph result: {}\nstatic result: {}".format( + dygraph_result, static_result + ), + ) class TestDictCmpInFor(unittest.TestCase): - def test_with_for(self): - def func(): pos = [1, 3] neg = [-1, -3] @@ -257,15 +267,14 @@ class TestDictCmpInFor(unittest.TestCase): for (x, y) in zip(pos, neg): val = x - y dict_val.update( - {k: val + dict_val[k] - for k, v in dict_val.items()}) + {k: val + dict_val[k] for k, v in dict_val.items()} + ) return dict_val self.assertEqual(paddle.jit.to_static(func)()['minus'], 8) def test_with_for_enumerate(self): - def func(): pos = [1, 3] neg = [-1, -3] @@ -274,8 +283,8 @@ class TestDictCmpInFor(unittest.TestCase): for i, (x, y) in enumerate(zip(pos, neg)): val = x - y dict_val.update( - {k: val + dict_val[k] - for k, v in dict_val.items()}) + {k: val + dict_val[k] for k, v in dict_val.items()} + ) return dict_val diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_drop_path.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_drop_path.py index 201da70b01770316b26534504927247c47e212d7..c5eb3d7567efe200f4ce22ab8aa32bbec62ad84d 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_drop_path.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_drop_path.py @@ -26,7 +26,6 @@ def drop_path(x, training=False): class DropPath(paddle.nn.Layer): - def __init__(self): super(DropPath, self).__init__() @@ -36,7 +35,6 @@ class DropPath(paddle.nn.Layer): class TestTrainEval(unittest.TestCase): - def setUp(self): self.model = DropPath() diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_duplicate_output.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_duplicate_output.py index 9534d1dc615c853f1ad000783a85c76efdff2047..625051d9364e5c9fef4b6abf94aaed3cbae1eed0 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_duplicate_output.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_duplicate_output.py @@ -26,14 +26,12 @@ else: class SimpleNet(paddle.nn.Layer): - def __init__(self): super().__init__() self._linear = paddle.nn.Linear(1, 1) def forward(self, x): - """ forward with duplicate outputs. - """ + """forward with duplicate outputs.""" x = self._linear(x) return x, x diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_fetch_feed.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_fetch_feed.py index 4eada80aec87b8e5b9a7beeeb6107ffc56fcb009..6e7389cc132d9076e298f17c9648c3349de1d38e 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_fetch_feed.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_fetch_feed.py @@ -23,13 +23,11 @@ SEED = 2020 class Pool2D(fluid.dygraph.Layer): - def __init__(self): super(Pool2D, self).__init__() - self.pool2d = fluid.dygraph.Pool2D(pool_size=2, - pool_type='avg', - pool_stride=1, - global_pooling=False) + self.pool2d = fluid.dygraph.Pool2D( + pool_size=2, pool_type='avg', pool_stride=1, global_pooling=False + ) @declarative def forward(self, x): @@ -42,17 +40,19 @@ class Pool2D(fluid.dygraph.Layer): class Linear(fluid.dygraph.Layer): - def __init__(self, input_dim=10, output_dim=5): super(Linear, self).__init__() self.fc = fluid.dygraph.Linear( input_dim, output_dim, act='relu', - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.99)), - bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.5))) + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.99) + ), + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.5) + ), + ) @declarative def forward(self, x): @@ -62,7 +62,6 @@ class Linear(fluid.dygraph.Layer): class TestPool2D(unittest.TestCase): - def setUp(self): self.dygraph_class = Pool2D self.data = np.random.random((1, 2, 4, 4)).astype('float32') @@ -95,11 +94,12 @@ class TestPool2D(unittest.TestCase): static_res, rtol=1e-05, err_msg='dygraph_res is {}\n static_res is \n{}'.format( - dygraph_res, static_res)) + dygraph_res, static_res + ), + ) class TestLinear(TestPool2D): - def setUp(self): self.dygraph_class = Linear self.data = np.random.random((4, 10)).astype('float32') diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_for_enumerate.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_for_enumerate.py index 6b399b5e9be7d087c059eb9b8b4af5aebefdb24e..ff4e3e8433046caa710c5f6b19a0055ebfbcd54f 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_for_enumerate.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_for_enumerate.py @@ -290,7 +290,6 @@ def for_tuple_as_enumerate_value(x_array): # 20. test for function in a class class ForwardContainsForLayer(paddle.nn.Layer): - def __init__(self): super(ForwardContainsForLayer, self).__init__() self.high = 5 @@ -326,8 +325,8 @@ def for_original_tuple(): # 23. for zip error @paddle.jit.to_static( - input_spec=[InputSpec(shape=[None, 10]), - InputSpec(shape=[None, 10])]) + input_spec=[InputSpec(shape=[None, 10]), InputSpec(shape=[None, 10])] +) def for_zip_error(x, y): for i, j in zip(x, y): a = i + j @@ -336,8 +335,8 @@ def for_zip_error(x, y): # 24. for zip @paddle.jit.to_static( - input_spec=[InputSpec(shape=[2, 10]), - InputSpec(shape=[2, 10])]) + input_spec=[InputSpec(shape=[2, 10]), InputSpec(shape=[2, 10])] +) def for_zip(x, y): for i, j in zip(x, y): a = i + j @@ -345,10 +344,12 @@ def for_zip(x, y): class TestTransformBase(unittest.TestCase): - def setUp(self): - self.place = fluid.CUDAPlace( - 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + self.place = ( + fluid.CUDAPlace(0) + if fluid.is_compiled_with_cuda() + else fluid.CPUPlace() + ) self.set_input() self.set_test_func() @@ -357,7 +358,8 @@ class TestTransformBase(unittest.TestCase): def set_test_func(self): raise NotImplementedError( - "For Enumerate test should implement set_test_func") + "For Enumerate test should implement set_test_func" + ) def _run(self, to_static): program_translator.enable(to_static) @@ -372,22 +374,20 @@ class TestTransformBase(unittest.TestCase): class TestTransform(TestTransformBase): - def transformed_result_compare(self): dy_outs = self.get_dygraph_output() if not isinstance(dy_outs, (tuple, list)): - dy_outs = (dy_outs, ) + dy_outs = (dy_outs,) st_outs = self.get_static_output() if not isinstance(st_outs, (tuple, list)): - st_outs = (st_outs, ) + st_outs = (st_outs,) for x, y in zip(dy_outs, st_outs): np.testing.assert_allclose(x.numpy(), y.numpy(), rtol=1e-05) class TestTransformForOriginalList(TestTransform): - def _run(self, to_static): program_translator.enable(to_static) with fluid.dygraph.guard(): @@ -395,7 +395,6 @@ class TestTransformForOriginalList(TestTransform): class TestTransformError(TestTransformBase): - def transformed_error(self, etype): with self.assertRaises(etype): dy_out = self.get_dygraph_output() @@ -403,7 +402,6 @@ class TestTransformError(TestTransformBase): class TestForInRange(TestTransform): - def set_input(self): self.input = np.array([5]) @@ -415,7 +413,6 @@ class TestForInRange(TestTransform): class TestForIterList(TestTransform): - def set_test_func(self): self.dygraph_func = for_iter_list @@ -424,19 +421,16 @@ class TestForIterList(TestTransform): class TestForEnumerateSimple(TestForIterList): - def set_test_func(self): self.dygraph_func = for_enumerate_list class TestForInRangeWithBreak(TestForInRange): - def set_test_func(self): self.dygraph_func = for_in_range_with_break class TestForIterVarNumpy(TestTransform): - def set_input(self): self.input = np.array([1, 2, 3, 4, 5]) @@ -448,103 +442,86 @@ class TestForIterVarNumpy(TestTransform): class TestForEnumerateVarNumpy(TestForIterVarNumpy): - def set_test_func(self): self.dygraph_func = for_enumerate_var_numpy class TestForEnumerateVarNumpyWithStart(TestForIterVarNumpy): - def set_test_func(self): self.dygraph_func = for_enumerate_var_numpy_with_start class TestForEnumerateVarNumpyWithBreak(TestForIterVarNumpy): - def set_test_func(self): self.dygraph_func = for_enumerate_var_numpy_with_break class TestForEnumerateVarNumpyWithContinue(TestForIterVarNumpy): - def set_test_func(self): self.dygraph_func = for_enumerate_var_numpy_with_continue class TestForEnumerateVarNumpyWithStartAndBreak(TestForIterVarNumpy): - def set_test_func(self): self.dygraph_func = for_enumerate_var_numpy_with_start_break class TestForEnumerateVarNumpyWithStartAndContinue(TestForIterVarNumpy): - def set_test_func(self): self.dygraph_func = for_enumerate_var_numpy_with_start_continue class TestForIterVar(TestForIterVarNumpy): - def set_test_func(self): self.dygraph_func = for_iter_var class TestForIterVarIdx(TestForIterVarNumpy): - def set_test_func(self): self.dygraph_func = for_iter_var_idx class TestForEnumerateVar(TestForIterVarNumpy): - def set_test_func(self): self.dygraph_func = for_enumerate_var class TestForEnumerateVarWithNestedRange(TestForIterVarNumpy): - def set_test_func(self): self.dygraph_func = for_enumerate_var_with_nested_range class TestForIterVarList(TestForInRange): - def set_test_func(self): self.dygraph_func = for_iter_var_list class TestForEnumerateVarList(TestForInRange): - def set_test_func(self): self.dygraph_func = for_enumerate_var_list class TestForTupleAsIterVar(TestForIterVarNumpy): - def set_test_func(self): self.dygraph_func = for_tuple_as_iter_var class TestForTupleAsEnumerateIter(TestForIterVarNumpy): - def set_test_func(self): self.dygraph_func = for_tuple_as_enumerate_iter class TestForTupleAsEnumerateValue(TestForIterVarNumpy): - def set_test_func(self): self.dygraph_func = for_tuple_as_enumerate_value class TestForwardContainsForLayer(TestForIterVarNumpy): - def set_test_func(self): self.dygraph_func = ForwardContainsForLayer() class TestForOriginalList(TestTransformForOriginalList): - def set_test_func(self): self.dygraph_func = for_original_list @@ -553,7 +530,6 @@ class TestForOriginalList(TestTransformForOriginalList): class TestForOriginalTuple(TestTransformForOriginalList): - def set_test_func(self): self.dygraph_func = for_original_tuple @@ -562,7 +538,6 @@ class TestForOriginalTuple(TestTransformForOriginalList): class TestForZip(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_full_name_usage.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_full_name_usage.py index 7a79eaf28db5b5359a65d75bad6ccc379e794c15..a245b607f881dd1e3e5b5853d0e1bb1617cb95e9 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_full_name_usage.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_full_name_usage.py @@ -45,7 +45,6 @@ def decorated_call_decorated(x): class DoubleDecorated(object): - @classmethod @declarative def double_decorated_func1(self, x): @@ -58,20 +57,19 @@ class DoubleDecorated(object): class TestFullNameDecorator(unittest.TestCase): - def test_run_success(self): x = np.ones([1, 2]).astype("float32") answer = np.zeros([1, 2]).astype("float32") with fluid.dygraph.guard(): - np.testing.assert_allclose(dygraph_decorated_func(x).numpy(), - answer, - rtol=1e-05) - np.testing.assert_allclose(jit_decorated_func(x).numpy(), - answer, - rtol=1e-05) - np.testing.assert_allclose(decorated_call_decorated(x).numpy(), - answer, - rtol=1e-05) + np.testing.assert_allclose( + dygraph_decorated_func(x).numpy(), answer, rtol=1e-05 + ) + np.testing.assert_allclose( + jit_decorated_func(x).numpy(), answer, rtol=1e-05 + ) + np.testing.assert_allclose( + decorated_call_decorated(x).numpy(), answer, rtol=1e-05 + ) with self.assertRaises(NotImplementedError): DoubleDecorated().double_decorated_func1(x) with self.assertRaises(NotImplementedError): @@ -79,12 +77,13 @@ class TestFullNameDecorator(unittest.TestCase): class TestImportProgramTranslator(unittest.TestCase): - def test_diff_pkg_same_cls(self): dygraph_prog_trans = fluid.dygraph.ProgramTranslator() - dy_to_stat_prog_trans = fluid.dygraph.dygraph_to_static.ProgramTranslator( + dy_to_stat_prog_trans = ( + fluid.dygraph.dygraph_to_static.ProgramTranslator() ) - full_pkg_prog_trans = fluid.dygraph.dygraph_to_static.program_translator.ProgramTranslator( + full_pkg_prog_trans = ( + fluid.dygraph.dygraph_to_static.program_translator.ProgramTranslator() ) self.assertEqual(dygraph_prog_trans, dy_to_stat_prog_trans) self.assertEqual(dygraph_prog_trans, full_pkg_prog_trans) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_function_spec.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_function_spec.py index 9fdb6e7c6d36d7f842c1f2c321138a2aeeeac49f..37bc3f5dc12c7cf1887824fad720d0d70c394f2b 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_function_spec.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_function_spec.py @@ -24,7 +24,6 @@ paddle.enable_static() class TestFunctionSpec(unittest.TestCase): - def test_constructor(self): foo_spec = FunctionSpec(foo_func) args_name = foo_spec.args_name @@ -51,11 +50,9 @@ class TestFunctionSpec(unittest.TestCase): self.assertTrue(len(kwargs) == 0) # case 2: foo(a=10, b=20, d=4) - args, kwargs = foo_spec.unified_args_and_kwargs([], { - 'a': 10, - 'b': 20, - 'd': 4 - }) + args, kwargs = foo_spec.unified_args_and_kwargs( + [], {'a': 10, 'b': 20, 'd': 4} + ) self.assertTupleEqual(args, (10, 20, 1, 4)) self.assertTrue(len(kwargs) == 0) @@ -83,7 +80,8 @@ class TestFunctionSpec(unittest.TestCase): # case 1 foo_spec = FunctionSpec(foo_func, input_spec=[a_spec, b_spec]) input_with_spec, _ = foo_spec.args_to_input_spec( - (a_tensor, b_tensor, 1, 2), {}) + (a_tensor, b_tensor, 1, 2), {} + ) self.assertTrue(len(input_with_spec) == 4) self.assertTrue(input_with_spec[0] == a_spec) # a @@ -93,8 +91,9 @@ class TestFunctionSpec(unittest.TestCase): # case 2 foo_spec = FunctionSpec(foo_func, input_spec=[a_spec]) - input_with_spec, _ = foo_spec.args_to_input_spec((a_tensor, b_tensor), - {}) + input_with_spec, _ = foo_spec.args_to_input_spec( + (a_tensor, b_tensor), {} + ) self.assertTrue(len(input_with_spec) == 2) self.assertTrue(input_with_spec[0] == a_spec) # a self.assertTupleEqual(input_with_spec[1].shape, (4, 10)) # b.shape @@ -104,14 +103,15 @@ class TestFunctionSpec(unittest.TestCase): # assert kwargs is None if set `input_spec` foo_spec = FunctionSpec(foo_func, input_spec=[a_spec]) with self.assertRaises(ValueError): - input_with_spec = foo_spec.args_to_input_spec((a_tensor, b_tensor), - {'c': 4}) + input_with_spec = foo_spec.args_to_input_spec( + (a_tensor, b_tensor), {'c': 4} + ) # case 4 # assert len(args) >= len(self._input_spec) foo_spec = FunctionSpec(foo_func, input_spec=[a_spec, b_spec]) with self.assertRaises(ValueError): - input_with_spec = foo_spec.args_to_input_spec((a_tensor, ), {}) + input_with_spec = foo_spec.args_to_input_spec((a_tensor,), {}) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_grad.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_grad.py index ae484bd72c68cc83233c8b095d17196b1e8bd936..38d93e3fba357f5fa4bf365dd3a7597ab7eac783 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_grad.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_grad.py @@ -20,7 +20,6 @@ import tempfile class GradLayer(paddle.nn.Layer): - def __init__(self): super(GradLayer, self).__init__() @@ -33,7 +32,6 @@ class GradLayer(paddle.nn.Layer): class GradLinearLayer(paddle.nn.Layer): - def __init__(self): super(GradLinearLayer, self).__init__() self.linear = paddle.nn.Linear(5, 5, bias_attr=False) @@ -45,15 +43,13 @@ class GradLinearLayer(paddle.nn.Layer): for i in range(10): tmp = self.linear(tmp) out = tmp - dx = paddle.grad([out], [x], - None, - create_graph=True, - allow_unused=False)[0] + dx = paddle.grad( + [out], [x], None, create_graph=True, allow_unused=False + )[0] return dx class NoGradLinearLayer(paddle.nn.Layer): - def __init__(self): super(NoGradLinearLayer, self).__init__() self.linear = paddle.nn.Linear(5, 5, bias_attr=False) @@ -70,7 +66,6 @@ class NoGradLinearLayer(paddle.nn.Layer): class TestGrad(unittest.TestCase): - def setUp(self): self.func = GradLayer() self.x = paddle.ones(shape=[10, 2, 5], dtype='float32') @@ -90,17 +85,18 @@ class TestGrad(unittest.TestCase): class TestGradLinear(TestGrad): - def setUp(self): self.func = GradLinearLayer() self.x = paddle.ones(shape=[10, 2, 5], dtype='float32') self.x.stop_gradient = False self.temp_dir = tempfile.TemporaryDirectory() - self.infer_model_path = os.path.join(self.temp_dir.name, - 'double_grad_infer_model') - self.train_model_path = os.path.join(self.temp_dir.name, - 'double_grad_train_model') + self.infer_model_path = os.path.join( + self.temp_dir.name, 'double_grad_infer_model' + ) + self.train_model_path = os.path.join( + self.temp_dir.name, 'double_grad_train_model' + ) def tearDown(self): self.temp_dir.cleanup() @@ -118,9 +114,11 @@ class TestGradLinear(TestGrad): def test_save_train_program(self): grad_clip = paddle.nn.ClipGradByGlobalNorm(2.0) - optimizer = paddle.optimizer.SGD(learning_rate=0.01, - grad_clip=grad_clip, - parameters=self.func.parameters()) + optimizer = paddle.optimizer.SGD( + learning_rate=0.01, + grad_clip=grad_clip, + parameters=self.func.parameters(), + ) for i in range(10): out = self.func(self.x) avg_loss = paddle.mean(paddle.abs(out - 1)) @@ -138,17 +136,18 @@ class TestGradLinear(TestGrad): class TestNoGradLinear(TestGradLinear): - def setUp(self): self.func = NoGradLinearLayer() self.x = paddle.ones(shape=[10, 2, 5], dtype='float32') self.x.stop_gradient = False self.temp_dir = tempfile.TemporaryDirectory() - self.infer_model_path = os.path.join(self.temp_dir.name, - 'no_grad_infer_model') - self.train_model_path = os.path.join(self.temp_dir.name, - 'no_grad_train_model') + self.infer_model_path = os.path.join( + self.temp_dir.name, 'no_grad_infer_model' + ) + self.train_model_path = os.path.join( + self.temp_dir.name, 'no_grad_train_model' + ) def tearDown(self): self.temp_dir.cleanup() diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_gradient_aggregation.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_gradient_aggregation.py index 67232aa9a83bdbb58f9b380e2334b9c38f294e8e..b30338ce0e4067ba3662811a92d7425a07f4a70b 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_gradient_aggregation.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_gradient_aggregation.py @@ -22,7 +22,6 @@ np.random.seed(SEED) class SimpleNet(paddle.nn.Layer): - def __init__(self): super(SimpleNet, self).__init__() self.linear1 = paddle.nn.Linear(10, 3) @@ -32,25 +31,30 @@ class SimpleNet(paddle.nn.Layer): out1 = self.linear1(x) out2 = self.linear2(out1) return [out1, out2] # 梯度为0 - #return [out1] # 梯度正常 - #return [out2, out1] # 梯度正常 + # return [out1] # 梯度正常 + # return [out2, out1] # 梯度正常 class TestGradientAggregationInDy2Static(unittest.TestCase): - def test_to_static(self): - def simplenet_grad(inp, to_static=False): net = SimpleNet() - if to_static: net = paddle.jit.to_static(net) + if to_static: + net = paddle.jit.to_static(net) loss = net(inp) loss[0].backward() return net.linear1.weight.grad - inp = paddle.to_tensor(np.random.randn(10, )).astype("float32") - np.testing.assert_allclose(simplenet_grad(inp, True).numpy(), - simplenet_grad(inp, False).numpy(), - rtol=1e-05) + inp = paddle.to_tensor( + np.random.randn( + 10, + ) + ).astype("float32") + np.testing.assert_allclose( + simplenet_grad(inp, True).numpy(), + simplenet_grad(inp, False).numpy(), + rtol=1e-05, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_grid_generator.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_grid_generator.py index caf349ecf8ebe7121b3263d91a74309cb3f71861..3f80130737f4255311cfb6d6c6c841f811aff6cf 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_grid_generator.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_grid_generator.py @@ -23,7 +23,6 @@ paddle.seed(2020) class GridGenerator(nn.Layer): - def __init__(self, in_channels, num_fiducial): super(GridGenerator, self).__init__() self.eps = 1e-6 @@ -32,15 +31,16 @@ class GridGenerator(nn.Layer): initializer = nn.initializer.Constant(value=0.0) param_attr = ParamAttr(learning_rate=0.0, initializer=initializer) bias_attr = ParamAttr(learning_rate=0.0, initializer=initializer) - self.fc = nn.Linear(in_channels, - 6, - weight_attr=param_attr, - bias_attr=bias_attr) - - @paddle.jit.to_static(input_spec=[ - paddle.static.InputSpec(shape=[None, 3, 32, 100], dtype='float32'), - paddle.static.InputSpec(shape=[32, 100], dtype='float32') - ]) + self.fc = nn.Linear( + in_channels, 6, weight_attr=param_attr, bias_attr=bias_attr + ) + + @paddle.jit.to_static( + input_spec=[ + paddle.static.InputSpec(shape=[None, 3, 32, 100], dtype='float32'), + paddle.static.InputSpec(shape=[32, 100], dtype='float32'), + ] + ) def forward(self, batch_C_prime, I_r_size): """ Generate the grid for the grid_sampler. @@ -54,7 +54,7 @@ class GridGenerator(nn.Layer): return C def build_C_paddle(self): - """ Return coordinates of fiducial points in I_r; C """ + """Return coordinates of fiducial points in I_r; C""" F = self.F ctrl_pts_x = paddle.linspace(-1.0, 1.0, int(F / 2)) ctrl_pts_y_top = -1 * paddle.ones([int(F / 2)]) @@ -68,16 +68,18 @@ class GridGenerator(nn.Layer): I_r_width, I_r_height = I_r_size I_r_grid_x = paddle.divide( (paddle.arange(-I_r_width, I_r_width, 2).astype('float32') + 1.0), - paddle.to_tensor(I_r_width).astype('float32')) + paddle.to_tensor(I_r_width).astype('float32'), + ) I_r_grid_y = paddle.divide( (paddle.arange(-I_r_height, I_r_height, 2).astype('float32') + 1.0), - paddle.to_tensor(I_r_height).astype('float32')) + paddle.to_tensor(I_r_height).astype('float32'), + ) P = paddle.stack(paddle.meshgrid(I_r_grid_x, I_r_grid_y), axis=2) P = paddle.transpose(P, perm=[1, 0, 2]) return P.reshape([-1, 2]) def build_inv_delta_C_paddle(self, C): - """ Return inv_delta_C which is needed to calculate T """ + """Return inv_delta_C which is needed to calculate T""" F = self.F hat_C = paddle.zeros((F, F), dtype='float32') for i in range(0, F): @@ -89,16 +91,19 @@ class GridGenerator(nn.Layer): hat_C[i, j] = r hat_C[j, i] = r hat_C = (hat_C**2) * paddle.log(hat_C) - delta_C = paddle.concat([ - paddle.concat([paddle.ones((F, 1)), C, hat_C], axis=1), - paddle.concat( - [paddle.zeros((2, 3)), - paddle.transpose(C, perm=[1, 0])], - axis=1), - paddle.concat([paddle.zeros( - (1, 3)), paddle.ones((1, F))], axis=1) - ], - axis=0) + delta_C = paddle.concat( + [ + paddle.concat([paddle.ones((F, 1)), C, hat_C], axis=1), + paddle.concat( + [paddle.zeros((2, 3)), paddle.transpose(C, perm=[1, 0])], + axis=1, + ), + paddle.concat( + [paddle.zeros((1, 3)), paddle.ones((1, F))], axis=1 + ), + ], + axis=0, + ) inv_delta_C = paddle.inverse(delta_C) return inv_delta_C @@ -111,8 +116,9 @@ class GridGenerator(nn.Layer): P_diff = P_tile - C_tile rbf_norm = paddle.norm(P_diff, p=2, axis=2, keepdim=False) - rbf = paddle.multiply(paddle.square(rbf_norm), - paddle.log(rbf_norm + eps)) + rbf = paddle.multiply( + paddle.square(rbf_norm), paddle.log(rbf_norm + eps) + ) P_hat = paddle.concat([paddle.ones((n, 1)), P, rbf], axis=1) return P_hat @@ -125,7 +131,6 @@ class GridGenerator(nn.Layer): class TestGridGenerator(unittest.TestCase): - def setUp(self): self.x = paddle.uniform(shape=[1, 20, 2], dtype='float32') diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ifelse.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ifelse.py index d40c37f3fd666f329944cc52b224e401d867ad58..da69243cbe91a6165b77cad01fa86327a93b5e4e 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ifelse.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ifelse.py @@ -17,11 +17,37 @@ import unittest import paddle from paddle.fluid.dygraph.jit import declarative -from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator +from paddle.fluid.dygraph.dygraph_to_static.program_translator import ( + ProgramTranslator, +) from paddle.fluid.dygraph.dygraph_to_static.utils import Dygraph2StaticException import paddle.fluid.core as core -from ifelse_simple_func import NetWithControlFlowIf, add_fn, dyfunc_empty_nonlocal, dyfunc_ifelse_ret_int1, dyfunc_ifelse_ret_int2, dyfunc_ifelse_ret_int3, dyfunc_ifelse_ret_int4, dyfunc_with_if_else, dyfunc_with_if_else2, dyfunc_with_if_else3, dyfunc_with_if_else_with_list_geneator, fluid, if_tensor_case, if_with_and_or, if_with_and_or_1, if_with_and_or_2, if_with_and_or_3, if_with_and_or_4, if_with_class_var, loss_fn, nested_if_else, nested_if_else_2, nested_if_else_3 +from ifelse_simple_func import ( + NetWithControlFlowIf, + add_fn, + dyfunc_empty_nonlocal, + dyfunc_ifelse_ret_int1, + dyfunc_ifelse_ret_int2, + dyfunc_ifelse_ret_int3, + dyfunc_ifelse_ret_int4, + dyfunc_with_if_else, + dyfunc_with_if_else2, + dyfunc_with_if_else3, + dyfunc_with_if_else_with_list_geneator, + fluid, + if_tensor_case, + if_with_and_or, + if_with_and_or_1, + if_with_and_or_2, + if_with_and_or_3, + if_with_and_or_4, + if_with_class_var, + loss_fn, + nested_if_else, + nested_if_else_2, + nested_if_else_3, +) np.random.seed(1) @@ -32,7 +58,6 @@ else: class TestDy2staticException(unittest.TestCase): - def setUp(self): self.x = np.random.random([10, 16]).astype('float32') self.dyfunc = None @@ -75,49 +100,42 @@ class TestDygraphIfElse(unittest.TestCase): class TestDygraphIfElse2(TestDygraphIfElse): - def setUp(self): self.x = np.random.random([10, 16]).astype('float32') self.dyfunc = dyfunc_with_if_else2 class TestDygraphIfElse3(TestDygraphIfElse): - def setUp(self): self.x = np.random.random([10, 16]).astype('float32') self.dyfunc = dyfunc_with_if_else3 class TestDygraphIfElse4(TestDygraphIfElse): - def setUp(self): self.x = np.random.random([10, 16]).astype('float32') self.dyfunc = dyfunc_empty_nonlocal class TestDygraphIfElseWithListGenerator(TestDygraphIfElse): - def setUp(self): self.x = np.random.random([10, 16]).astype('float32') self.dyfunc = dyfunc_with_if_else_with_list_geneator class TestDygraphNestedIfElse(TestDygraphIfElse): - def setUp(self): self.x = np.random.random([10, 16]).astype('float32') self.dyfunc = nested_if_else class TestDygraphNestedIfElse2(TestDygraphIfElse): - def setUp(self): self.x = np.random.random([10, 16]).astype('float32') self.dyfunc = nested_if_else_2 class TestDygraphNestedIfElse3(TestDygraphIfElse): - def setUp(self): self.x = np.random.random([10, 16]).astype('float32') self.dyfunc = nested_if_else_3 @@ -150,7 +168,6 @@ def dyfunc_ifExp_with_while(x): class TestDygraphIfElse6(TestDygraphIfElse): - def setUp(self): self.x = np.random.random([10, 16]).astype('float32') self.dyfunc = dyfunc_ifExp_with_while @@ -175,56 +192,48 @@ def dyfunc_ifExp(x): class TestDygraphIfElse7(TestDygraphIfElse): - def setUp(self): self.x = np.random.random([10, 16]).astype('float32') self.dyfunc = dyfunc_ifExp class TestDygraphIfElseWithAndOr(TestDygraphIfElse): - def setUp(self): self.x = np.random.random([10, 16]).astype('float32') self.dyfunc = if_with_and_or class TestDygraphIfElseWithAndOr1(TestDygraphIfElse): - def setUp(self): self.x = np.random.random([10, 16]).astype('float32') self.dyfunc = if_with_and_or_1 class TestDygraphIfElseWithAndOr2(TestDygraphIfElse): - def setUp(self): self.x = np.random.random([10, 16]).astype('float32') self.dyfunc = if_with_and_or_2 class TestDygraphIfElseWithAndOr3(TestDygraphIfElse): - def setUp(self): self.x = np.random.random([10, 16]).astype('float32') self.dyfunc = if_with_and_or_3 class TestDygraphIfElseWithAndOr4(TestDygraphIfElse): - def setUp(self): self.x = np.random.random([10, 16]).astype('float32') self.dyfunc = if_with_and_or_4 class TestDygraphIfElseWithClassVar(TestDygraphIfElse): - def setUp(self): self.x = np.random.random([10, 16]).astype('float32') self.dyfunc = if_with_class_var class TestDygraphIfTensor(TestDygraphIfElse): - def setUp(self): self.x = np.random.random([10, 16]).astype('float32') self.dyfunc = if_tensor_case @@ -279,14 +288,12 @@ def call_external_func(x, label=None): class TestAst2FuncWithExternalFunc(TestDygraphIfElse): - def setUp(self): self.x = np.random.random([10, 16]).astype('float32') self.dyfunc = call_external_func class NetWithExternalFunc(fluid.dygraph.Layer): - @declarative def forward(self, x, label=None): if paddle.mean(x) < 0: @@ -307,14 +314,12 @@ def softmax(x): class TestNetWithExternalFunc(TestDygraphIfElseNet): - def setUp(self): self.x = np.random.random([10, 16]).astype('float32') self.Net = NetWithExternalFunc class DiffModeNet1(paddle.nn.Layer): - def __init__(self, mode): super(DiffModeNet1, self).__init__() self.mode = mode @@ -331,7 +336,6 @@ class DiffModeNet1(paddle.nn.Layer): class DiffModeNet2(paddle.nn.Layer): - def __init__(self, mode): super(DiffModeNet2, self).__init__() self.mode = mode @@ -371,27 +375,28 @@ class TestDiffModeNet(unittest.TestCase): def test_train_mode(self): self.assertTrue( - (self._run(mode='train', - to_static=True) == self._run(mode='train', - to_static=False)).all()) + ( + self._run(mode='train', to_static=True) + == self._run(mode='train', to_static=False) + ).all() + ) def test_infer_mode(self): self.assertTrue( - (self._run(mode='infer', - to_static=True) == self._run(mode='infer', - to_static=False)).all()) + ( + self._run(mode='infer', to_static=True) + == self._run(mode='infer', to_static=False) + ).all() + ) class TestDiffModeNet2(TestDiffModeNet): - def init_net(self): self.Net = DiffModeNet2 class TestNewVarCreateInOneBranch(unittest.TestCase): - def test_var_used_in_another_for(self): - def case_func(training): # targets and targets_list is dynamically defined by training if training: @@ -414,7 +419,6 @@ class TestNewVarCreateInOneBranch(unittest.TestCase): class TestDy2StIfElseRetInt1(unittest.TestCase): - def setUp(self): self.x = np.random.random([5]).astype('float32') self.dyfunc = dyfunc_ifelse_ret_int1 @@ -433,7 +437,6 @@ class TestDy2StIfElseRetInt1(unittest.TestCase): class TestDy2StIfElseRetInt2(TestDy2staticException): - def setUp(self): self.x = np.random.random([5]).astype('float32') self.error = "Your if/else have different number of return value." @@ -441,7 +444,6 @@ class TestDy2StIfElseRetInt2(TestDy2staticException): class TestDy2StIfElseRetInt3(TestDy2StIfElseRetInt1): - def setUp(self): self.x = np.random.random([5]).astype('float32') self.dyfunc = dyfunc_ifelse_ret_int3 @@ -452,7 +454,6 @@ class TestDy2StIfElseRetInt3(TestDy2StIfElseRetInt1): class TestDy2StIfElseRetInt4(TestDy2StIfElseRetInt1): - def setUp(self): self.x = np.random.random([5]).astype('float32') self.dyfunc = dyfunc_ifelse_ret_int4 @@ -472,12 +473,11 @@ class TestDy2StIfElseRetInt4(TestDy2StIfElseRetInt1): class IfElseNet(paddle.nn.Layer): - def __init__(self): super(IfElseNet, self).__init__() - self.param = self.create_parameter(shape=[3, 2], - dtype='float32', - is_bias=False) + self.param = self.create_parameter( + shape=[3, 2], dtype='float32', is_bias=False + ) @paddle.jit.to_static def forward(self, a, b, c): @@ -493,7 +493,6 @@ class IfElseNet(paddle.nn.Layer): class TestDy2StIfElseBackward(unittest.TestCase): - def test_run_backward(self): a = paddle.randn((4, 3), dtype='float32') a.stop_gradient = False @@ -506,9 +505,9 @@ class TestDy2StIfElseBackward(unittest.TestCase): net.train() out = net(a, b, c) out.backward() - np.testing.assert_allclose((b + net.param).numpy(), - out.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + (b + net.param).numpy(), out.numpy(), rtol=1e-05 + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_isinstance.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_isinstance.py index dc3b4dd74a60ae76e1062850aa7be79f60337a11..3b46602cac2ab5e880ac50e37a54bbb4cc4f0581 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_isinstance.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_isinstance.py @@ -31,13 +31,11 @@ import paddle.nn as nn class SimpleReturnLayer(nn.Layer): - def forward(self, x): return x class AddAttrLayer(nn.Layer): - def __init__(self): super(AddAttrLayer, self).__init__() self.attr = None @@ -48,21 +46,19 @@ class AddAttrLayer(nn.Layer): class IsInstanceLayer(nn.Layer): - def __init__(self, layer): super(IsInstanceLayer, self).__init__() self.layer = layer @paddle.jit.to_static def forward(self, x): - if isinstance(self.layer, (AddAttrLayer, )): + if isinstance(self.layer, (AddAttrLayer,)): self.layer.attr = x res = self.layer(x) return res class SequentialLayer(nn.Layer): - def __init__(self, layers): super(SequentialLayer, self).__init__() self.layers = nn.LayerList(layers) @@ -88,7 +84,6 @@ def train(model, to_static): class TestIsinstance(unittest.TestCase): - def test_isinstance_simple_return_layer(self): model = IsInstanceLayer(SimpleReturnLayer()) self._test_model(model) @@ -108,11 +103,12 @@ class TestIsinstance(unittest.TestCase): def _test_model(self, model): st_out = train(model, to_static=True) dy_out = train(model, to_static=False) - np.testing.assert_allclose(dy_out, - st_out, - rtol=1e-05, - err_msg='dy_out:\n {}\n st_out:\n{}'.format( - dy_out, st_out)) + np.testing.assert_allclose( + dy_out, + st_out, + rtol=1e-05, + err_msg='dy_out:\n {}\n st_out:\n{}'.format(dy_out, st_out), + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_jit_property_save.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_jit_property_save.py index 481d30b8ba2001cb3bfa8cc9ef60a139ab648c1c..0d26ab51e1a51ad3a98e2575a6983c4e4bdf52e9 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_jit_property_save.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_jit_property_save.py @@ -17,8 +17,7 @@ import paddle class TestPropertySave(unittest.TestCase): - """test jit property save - """ + """test jit property save""" def setUp(self): a = paddle.framework.core.Property() @@ -44,8 +43,7 @@ class TestPropertySave(unittest.TestCase): self.a.get_float(1) def test_set(self): - """test propety set. - """ + """test propety set.""" try: a = paddle.framework.core.Property() a.set_float('float', 10.0) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py index 7ffbfbb6533fd059e42a307391e07dd564b5882c..15d18fe838a055fc99cce7d0b4ec8ce0a55f8c14 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py @@ -38,30 +38,33 @@ program_translator = ProgramTranslator() input_specs = [ paddle.static.InputSpec([None, None], 'int64'), paddle.static.InputSpec([None, None], 'int64'), - paddle.static.InputSpec([None], 'int64') + paddle.static.InputSpec([None], 'int64'), ] class DynamicGRU(fluid.dygraph.Layer): - - def __init__(self, - size, - h_0=None, - param_attr=None, - bias_attr=None, - is_reverse=False, - gate_activation='sigmoid', - candidate_activation='tanh', - origin_mode=False, - init_size=None): + def __init__( + self, + size, + h_0=None, + param_attr=None, + bias_attr=None, + is_reverse=False, + gate_activation='sigmoid', + candidate_activation='tanh', + origin_mode=False, + init_size=None, + ): super(DynamicGRU, self).__init__() - self.gru_unit = GRUUnit(size * 3, - param_attr=param_attr, - bias_attr=bias_attr, - activation=candidate_activation, - gate_activation=gate_activation, - origin_mode=origin_mode) + self.gru_unit = GRUUnit( + size * 3, + param_attr=param_attr, + bias_attr=bias_attr, + activation=candidate_activation, + gate_activation=gate_activation, + origin_mode=origin_mode, + ) self.size = size self.h_0 = h_0 @@ -81,15 +84,16 @@ class DynamicGRU(fluid.dygraph.Layer): j = i # input_ = inputs[:, j:j+1, :] # original code - input_ = fluid.layers.slice(inputs, - axes=[1], - starts=[j], - ends=[j + 1]) - input_ = fluid.layers.reshape(input_, [-1, input_.shape[2]], - inplace=False) + input_ = fluid.layers.slice( + inputs, axes=[1], starts=[j], ends=[j + 1] + ) + input_ = fluid.layers.reshape( + input_, [-1, input_.shape[2]], inplace=False + ) hidden, reset, gate = self.gru_unit(input_, hidden) - hidden_ = fluid.layers.reshape(hidden, [-1, 1, hidden.shape[1]], - inplace=False) + hidden_ = fluid.layers.reshape( + hidden, [-1, 1, hidden.shape[1]], inplace=False + ) res.append(hidden_) if self.is_reverse: @@ -99,7 +103,6 @@ class DynamicGRU(fluid.dygraph.Layer): class BiGRU(fluid.dygraph.Layer): - def __init__(self, input_dim, grnn_hidden_dim, init_bound, h_0=None): super(BiGRU, self).__init__() @@ -107,38 +110,54 @@ class BiGRU(fluid.dygraph.Layer): input_dim=input_dim, output_dim=grnn_hidden_dim * 3, param_attr=fluid.ParamAttr( - initializer=fluid.initializer.Uniform(low=-init_bound, - high=init_bound), + initializer=fluid.initializer.Uniform( + low=-init_bound, high=init_bound + ), regularizer=fluid.regularizer.L2DecayRegularizer( - regularization_coeff=1e-4))) + regularization_coeff=1e-4 + ), + ), + ) self.gru = DynamicGRU( size=grnn_hidden_dim, h_0=h_0, param_attr=fluid.ParamAttr( - initializer=fluid.initializer.Uniform(low=-init_bound, - high=init_bound), + initializer=fluid.initializer.Uniform( + low=-init_bound, high=init_bound + ), regularizer=fluid.regularizer.L2DecayRegularizer( - regularization_coeff=1e-4))) + regularization_coeff=1e-4 + ), + ), + ) self.pre_gru_r = Linear( input_dim=input_dim, output_dim=grnn_hidden_dim * 3, param_attr=fluid.ParamAttr( - initializer=fluid.initializer.Uniform(low=-init_bound, - high=init_bound), + initializer=fluid.initializer.Uniform( + low=-init_bound, high=init_bound + ), regularizer=fluid.regularizer.L2DecayRegularizer( - regularization_coeff=1e-4))) + regularization_coeff=1e-4 + ), + ), + ) self.gru_r = DynamicGRU( size=grnn_hidden_dim, is_reverse=True, h_0=h_0, param_attr=fluid.ParamAttr( - initializer=fluid.initializer.Uniform(low=-init_bound, - high=init_bound), + initializer=fluid.initializer.Uniform( + low=-init_bound, high=init_bound + ), regularizer=fluid.regularizer.L2DecayRegularizer( - regularization_coeff=1e-4))) + regularization_coeff=1e-4 + ), + ), + ) def forward(self, input_feature): res_pre_gru = self.pre_gru(input_feature) @@ -152,7 +171,6 @@ class BiGRU(fluid.dygraph.Layer): class LinearChainCRF(fluid.dygraph.Layer): - def __init__(self, param_attr, size=None, is_test=False, dtype='float32'): super(LinearChainCRF, self).__init__() @@ -163,7 +181,8 @@ class LinearChainCRF(fluid.dygraph.Layer): self._transition = self.create_parameter( attr=self._param_attr, shape=[self._size + 2, self._size], - dtype=self._dtype) + dtype=self._dtype, + ) @property def weight(self): @@ -176,41 +195,46 @@ class LinearChainCRF(fluid.dygraph.Layer): def forward(self, input, label, length=None): if _non_static_mode(): _, _, _, log_likelihood = _legacy_C_ops.linear_chain_crf( - input, self._transition, label, length, "is_test", - self._is_test) + input, self._transition, label, length, "is_test", self._is_test + ) return log_likelihood alpha = self._helper.create_variable_for_type_inference( - dtype=self._dtype) + dtype=self._dtype + ) emission_exps = self._helper.create_variable_for_type_inference( - dtype=self._dtype) + dtype=self._dtype + ) transition_exps = self._helper.create_variable_for_type_inference( - dtype=self._dtype) + dtype=self._dtype + ) log_likelihood = self._helper.create_variable_for_type_inference( - dtype=self._dtype) + dtype=self._dtype + ) this_inputs = { "Emission": [input], "Transition": self._transition, - "Label": [label] + "Label": [label], } if length is not None: this_inputs['Length'] = [length] - self._helper.append_op(type='linear_chain_crf', - inputs=this_inputs, - outputs={ - "Alpha": [alpha], - "EmissionExps": [emission_exps], - "TransitionExps": transition_exps, - "LogLikelihood": log_likelihood - }, - attrs={ - "is_test": self._is_test, - }) + self._helper.append_op( + type='linear_chain_crf', + inputs=this_inputs, + outputs={ + "Alpha": [alpha], + "EmissionExps": [emission_exps], + "TransitionExps": transition_exps, + "LogLikelihood": log_likelihood, + }, + attrs={ + "is_test": self._is_test, + }, + ) return log_likelihood class CRFDecoding(fluid.dygraph.Layer): - def __init__(self, param_attr, size=None, is_test=False, dtype='float32'): super(CRFDecoding, self).__init__() @@ -221,7 +245,8 @@ class CRFDecoding(fluid.dygraph.Layer): self._transition = self.create_parameter( attr=self._param_attr, shape=[self._size + 2, self._size], - dtype=self._dtype) + dtype=self._dtype, + ) @property def weight(self): @@ -233,33 +258,35 @@ class CRFDecoding(fluid.dygraph.Layer): def forward(self, input, label=None, length=None): if _non_static_mode(): - return _legacy_C_ops.crf_decoding(input, self._transition, label, - length, "is_test", self._is_test) + return _legacy_C_ops.crf_decoding( + input, self._transition, label, length, "is_test", self._is_test + ) viterbi_path = self._helper.create_variable_for_type_inference( - dtype=self._dtype) + dtype=self._dtype + ) this_inputs = { "Emission": [input], "Transition": self._transition, - "Label": label + "Label": label, } if length is not None: this_inputs['Length'] = [length] - self._helper.append_op(type='crf_decoding', - inputs=this_inputs, - outputs={"ViterbiPath": [viterbi_path]}, - attrs={ - "is_test": self._is_test, - }) + self._helper.append_op( + type='crf_decoding', + inputs=this_inputs, + outputs={"ViterbiPath": [viterbi_path]}, + attrs={ + "is_test": self._is_test, + }, + ) return viterbi_path class ChunkEval(fluid.dygraph.Layer): - - def __init__(self, - num_chunk_types, - chunk_scheme, - excluded_chunk_types=None): + def __init__( + self, num_chunk_types, chunk_scheme, excluded_chunk_types=None + ): super(ChunkEval, self).__init__() self.num_chunk_types = num_chunk_types self.chunk_scheme = chunk_scheme @@ -267,54 +294,69 @@ class ChunkEval(fluid.dygraph.Layer): def forward(self, input, label, seq_length=None): if _non_static_mode(): - return _legacy_C_ops.chunk_eval(input, label, seq_length, - "num_chunk_types", - self.num_chunk_types, - "chunk_scheme", self.chunk_scheme, - "excluded_chunk_types", - self.excluded_chunk_types or []) + return _legacy_C_ops.chunk_eval( + input, + label, + seq_length, + "num_chunk_types", + self.num_chunk_types, + "chunk_scheme", + self.chunk_scheme, + "excluded_chunk_types", + self.excluded_chunk_types or [], + ) precision = self._helper.create_variable_for_type_inference( - dtype="float32") + dtype="float32" + ) recall = self._helper.create_variable_for_type_inference( - dtype="float32") + dtype="float32" + ) f1_score = self._helper.create_variable_for_type_inference( - dtype="float32") + dtype="float32" + ) num_infer_chunks = self._helper.create_variable_for_type_inference( - dtype="int64") + dtype="int64" + ) num_label_chunks = self._helper.create_variable_for_type_inference( - dtype="int64") + dtype="int64" + ) num_correct_chunks = self._helper.create_variable_for_type_inference( - dtype="int64") + dtype="int64" + ) this_input = {"Inference": [input], "Label": [label]} if seq_length is not None: this_input["SeqLength"] = [seq_length] - self._helper.append_op(type='chunk_eval', - inputs=this_input, - outputs={ - "Precision": [precision], - "Recall": [recall], - "F1-Score": [f1_score], - "NumInferChunks": [num_infer_chunks], - "NumLabelChunks": [num_label_chunks], - "NumCorrectChunks": [num_correct_chunks] - }, - attrs={ - "num_chunk_types": - self.num_chunk_types, - "chunk_scheme": - self.chunk_scheme, - "excluded_chunk_types": - self.excluded_chunk_types or [] - }) - return (precision, recall, f1_score, num_infer_chunks, num_label_chunks, - num_correct_chunks) + self._helper.append_op( + type='chunk_eval', + inputs=this_input, + outputs={ + "Precision": [precision], + "Recall": [recall], + "F1-Score": [f1_score], + "NumInferChunks": [num_infer_chunks], + "NumLabelChunks": [num_label_chunks], + "NumCorrectChunks": [num_correct_chunks], + }, + attrs={ + "num_chunk_types": self.num_chunk_types, + "chunk_scheme": self.chunk_scheme, + "excluded_chunk_types": self.excluded_chunk_types or [], + }, + ) + return ( + precision, + recall, + f1_score, + num_infer_chunks, + num_label_chunks, + num_correct_chunks, + ) class LexNet(fluid.dygraph.Layer): - def __init__(self, args, length=None): super(LexNet, self).__init__() """ @@ -330,21 +372,26 @@ class LexNet(fluid.dygraph.Layer): self.vocab_size = args.vocab_size self.num_labels = args.num_labels self.grnn_hidden_dim = args.grnn_hidden_dim - self.emb_lr = args.emb_learning_rate if 'emb_learning_rate' in dir( - args) else 1.0 - self.crf_lr = args.emb_learning_rate if 'crf_learning_rate' in dir( - args) else 1.0 + self.emb_lr = ( + args.emb_learning_rate if 'emb_learning_rate' in dir(args) else 1.0 + ) + self.crf_lr = ( + args.emb_learning_rate if 'crf_learning_rate' in dir(args) else 1.0 + ) self.bigru_num = args.bigru_num self.init_bound = 0.1 self.word_embedding = Embedding( size=[self.vocab_size, self.word_emb_dim], dtype='float32', - param_attr=fluid.ParamAttr(learning_rate=self.emb_lr, - name="word_emb", - initializer=fluid.initializer.Uniform( - low=-self.init_bound, - high=self.init_bound))) + param_attr=fluid.ParamAttr( + learning_rate=self.emb_lr, + name="word_emb", + initializer=fluid.initializer.Uniform( + low=-self.init_bound, high=self.init_bound + ), + ), + ) h_0 = np.zeros((args.batch_size, self.grnn_hidden_dim), dtype="float32") h_0 = to_variable(h_0) @@ -355,34 +402,51 @@ class LexNet(fluid.dygraph.Layer): self.bigru_units.append( self.add_sublayer( "bigru_units%d" % i, - BiGRU(self.grnn_hidden_dim, - self.grnn_hidden_dim, - self.init_bound, - h_0=h_0))) + BiGRU( + self.grnn_hidden_dim, + self.grnn_hidden_dim, + self.init_bound, + h_0=h_0, + ), + ) + ) else: self.bigru_units.append( self.add_sublayer( "bigru_units%d" % i, - BiGRU(self.grnn_hidden_dim * 2, - self.grnn_hidden_dim, - self.init_bound, - h_0=h_0))) - - self.fc = Linear(input_dim=self.grnn_hidden_dim * 2, - output_dim=self.num_labels, - param_attr=fluid.ParamAttr( - initializer=fluid.initializer.Uniform( - low=-self.init_bound, high=self.init_bound), - regularizer=fluid.regularizer.L2DecayRegularizer( - regularization_coeff=1e-4))) - - self.linear_chain_crf = LinearChainCRF(param_attr=fluid.ParamAttr( - name='linear_chain_crfw', learning_rate=self.crf_lr), - size=self.num_labels) - - self.crf_decoding = CRFDecoding(param_attr=fluid.ParamAttr( - name='crfw', learning_rate=self.crf_lr), - size=self.num_labels) + BiGRU( + self.grnn_hidden_dim * 2, + self.grnn_hidden_dim, + self.init_bound, + h_0=h_0, + ), + ) + ) + + self.fc = Linear( + input_dim=self.grnn_hidden_dim * 2, + output_dim=self.num_labels, + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Uniform( + low=-self.init_bound, high=self.init_bound + ), + regularizer=fluid.regularizer.L2DecayRegularizer( + regularization_coeff=1e-4 + ), + ), + ) + + self.linear_chain_crf = LinearChainCRF( + param_attr=fluid.ParamAttr( + name='linear_chain_crfw', learning_rate=self.crf_lr + ), + size=self.num_labels, + ) + + self.crf_decoding = CRFDecoding( + param_attr=fluid.ParamAttr(name='crfw', learning_rate=self.crf_lr), + size=self.num_labels, + ) # share weight self.crf_decoding.weight = self.linear_chain_crf.weight @@ -400,9 +464,9 @@ class LexNet(fluid.dygraph.Layer): emission = self.fc(bigru_output) - crf_cost = self.linear_chain_crf(input=emission, - label=target, - length=length) + crf_cost = self.linear_chain_crf( + input=emission, label=target, length=length + ) avg_cost = paddle.mean(x=crf_cost) crf_decode = self.crf_decoding(input=emission, length=length) return avg_cost, crf_decode @@ -429,10 +493,16 @@ def get_random_input_data(batch_size, vocab_size, num_labels, max_seq_len=64): batch, init_lens = [], [] for i in range(iter_num * batch_size): cur_len = local_random.randint(3, max_seq_len) - word_ids = local_random.randint(0, vocab_size, - [cur_len]).astype('int64').tolist() - label_ids = local_random.randint( - 0, num_labels, [cur_len]).astype('int64').tolist() + word_ids = ( + local_random.randint(0, vocab_size, [cur_len]) + .astype('int64') + .tolist() + ) + label_ids = ( + local_random.randint(0, num_labels, [cur_len]) + .astype('int64') + .tolist() + ) batch.append((word_ids, label_ids)) init_lens.append(cur_len) if len(batch) == batch_size: @@ -457,9 +527,9 @@ def get_random_input_data(batch_size, vocab_size, num_labels, max_seq_len=64): def create_dataloader(reader, place): - data_loader = fluid.io.DataLoader.from_generator(capacity=16, - use_double_buffer=True, - iterable=True) + data_loader = fluid.io.DataLoader.from_generator( + capacity=16, use_double_buffer=True, iterable=True + ) data_loader.set_sample_list_generator(reader, places=place) @@ -467,11 +537,13 @@ def create_dataloader(reader, place): class TestLACModel(unittest.TestCase): - def setUp(self): self.args = Args() - self.place = fluid.CUDAPlace( - 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + self.place = ( + fluid.CUDAPlace(0) + if fluid.is_compiled_with_cuda() + else fluid.CPUPlace() + ) self.temp_dir = tempfile.TemporaryDirectory() self.model_save_dir = os.path.join(self.temp_dir.name, 'inference') self.model_save_prefix = os.path.join(self.model_save_dir, 'lac') @@ -481,22 +553,28 @@ class TestLACModel(unittest.TestCase): def train(self, args, to_static): program_translator.enable(to_static) - place = fluid.CUDAPlace( - 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.dygraph.guard(place): paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) - reader = get_random_input_data(args.batch_size, args.vocab_size, - args.num_labels) + reader = get_random_input_data( + args.batch_size, args.vocab_size, args.num_labels + ) train_loader = create_dataloader(reader, place) model = LexNet(args) optimizer = fluid.optimizer.AdamOptimizer( learning_rate=args.base_learning_rate, - parameter_list=model.parameters()) - chunk_eval = ChunkEval(int(math.ceil((args.num_labels - 1) / 2.0)), - "IOB") + parameter_list=model.parameters(), + ) + chunk_eval = ChunkEval( + int(math.ceil((args.num_labels - 1) / 2.0)), "IOB" + ) step = 0 chunk_evaluator = fluid.metrics.ChunkEvaluator() @@ -517,11 +595,16 @@ class TestLACModel(unittest.TestCase): end_time = time.time() if step % args.print_steps == 0: - (precision, recall, f1_score, num_infer_chunks, - num_label_chunks, - num_correct_chunks) = chunk_eval(input=crf_decode, - label=targets, - seq_length=length) + ( + precision, + recall, + f1_score, + num_infer_chunks, + num_label_chunks, + num_correct_chunks, + ) = chunk_eval( + input=crf_decode, label=targets, seq_length=length + ) outputs = [avg_cost, precision, recall, f1_score] avg_cost, precision, recall, f1_score = [ np.mean(x.numpy()) for x in outputs @@ -529,8 +612,15 @@ class TestLACModel(unittest.TestCase): print( "[train] step = %d, loss = %f, P: %f, R: %f, F1: %f, elapsed time %f" - % (step, avg_cost, precision, recall, f1_score, - end_time - start_time)) + % ( + step, + avg_cost, + precision, + recall, + f1_score, + end_time - start_time, + ) + ) step += 1 # save inference model @@ -539,10 +629,12 @@ class TestLACModel(unittest.TestCase): layer=model, path=self.model_save_prefix, input_spec=[input_specs[0], input_specs[-1]], - output_spec=[crf_decode]) + output_spec=[crf_decode], + ) else: - fluid.dygraph.save_dygraph(model.state_dict(), - self.dy_param_path) + fluid.dygraph.save_dygraph( + model.state_dict(), self.dy_param_path + ) return np.array(loss_data) @@ -554,14 +646,16 @@ class TestLACModel(unittest.TestCase): st_out, rtol=1e-05, err_msg='dygraph output:\n{},\nstatic output:\n {}.'.format( - dy_out, st_out)) + dy_out, st_out + ), + ) # Prediction needs trained models, so put `test_predict` at last of `test_train` # self.verify_predict() def verify_predict(self): - reader = get_random_input_data(self.args.batch_size, - self.args.vocab_size, - self.args.num_labels) + reader = get_random_input_data( + self.args.batch_size, self.args.vocab_size, self.args.num_labels + ) for batch in reader(): batch = [np.vstack(var) for var in zip(*batch)] dy_pre = self.predict_dygraph(batch) @@ -580,8 +674,9 @@ class TestLACModel(unittest.TestCase): model.set_dict(model_dict) model.eval() - _, pred_res = model(to_variable(words), to_variable(targets), - to_variable(length)) + _, pred_res = model( + to_variable(words), to_variable(targets), to_variable(length) + ) return pred_res.numpy() @@ -593,19 +688,23 @@ class TestLACModel(unittest.TestCase): paddle.enable_static() exe = fluid.Executor(self.place) # load inference model - [inference_program, feed_target_names, fetch_targets - ] = fluid.io.load_inference_model(self.model_save_dir, - executor=exe, - model_filename=self.model_filename, - params_filename=self.params_filename) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = fluid.io.load_inference_model( + self.model_save_dir, + executor=exe, + model_filename=self.model_filename, + params_filename=self.params_filename, + ) words, targets, length = batch - pred_res = exe.run(inference_program, - feed={ - feed_target_names[0]: words, - feed_target_names[1]: length - }, - fetch_list=fetch_targets) + pred_res = exe.run( + inference_program, + feed={feed_target_names[0]: words, feed_target_names[1]: length}, + fetch_list=fetch_targets, + ) return pred_res[0] def predict_dygraph_jit(self, batch): diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lambda.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lambda.py index 121954edf98654b7c4c0c965d5c3510fb155e434..d0b45a050bc164b96ed3ef426624d0c80e27bae3 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lambda.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lambda.py @@ -79,18 +79,23 @@ def call_lambda_with_ifExpr2(x): class TestLambda(unittest.TestCase): - def setUp(self): self.x = np.random.random([10, 16]).astype('float32') self.x = np.array([1, 3]).astype('float32') - self.place = fluid.CUDAPlace( - 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + self.place = ( + fluid.CUDAPlace(0) + if fluid.is_compiled_with_cuda() + else fluid.CPUPlace() + ) self.init_func() def init_func(self): self.dyfuncs = [ - call_lambda_as_func, call_lambda_directly, call_lambda_in_func, - call_lambda_with_ifExpr, call_lambda_with_ifExpr2 + call_lambda_as_func, + call_lambda_directly, + call_lambda_in_func, + call_lambda_with_ifExpr, + call_lambda_with_ifExpr2, ] def run_static(self, func): @@ -109,7 +114,8 @@ class TestLambda(unittest.TestCase): def test_ast_to_func(self): for func in self.dyfuncs: self.assertTrue( - (self.run_dygraph(func) == self.run_static(func)).all()) + (self.run_dygraph(func) == self.run_static(func)).all() + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_layer_hook.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_layer_hook.py index b2a23b7d86faab72b918bf5132636822847b957b..ad4aa8e173f61c56eec3dafed75ed9b2711bef99 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_layer_hook.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_layer_hook.py @@ -24,13 +24,14 @@ def forward_post_hook1(layer, input, output): def forward_pre_hook1(layer, input): - input_return = (input[0] * 2, ) + input_return = (input[0] * 2,) return input_return class SimpleNet(paddle.nn.Layer): - - def __init__(self, ): + def __init__( + self, + ): super(SimpleNet, self).__init__() self.fc1 = paddle.nn.Linear(10, 10) # sublayer1 register post hook @@ -53,7 +54,6 @@ class SimpleNet(paddle.nn.Layer): class TestNestLayerHook(unittest.TestCase): - def setUp(self): paddle.seed(2022) self.x = paddle.randn([4, 10]) @@ -90,12 +90,15 @@ class TestNestLayerHook(unittest.TestCase): dy_out, rtol=1e-05, err_msg='dygraph_res is {}\nstatic_res is {}'.format( - dy_out, st_out)) + dy_out, st_out + ), + ) np.testing.assert_allclose( st_out, load_out, rtol=1e-05, - err_msg='load_out is {}\nstatic_res is {}'.format(load_out, st_out)) + err_msg='load_out is {}\nstatic_res is {}'.format(load_out, st_out), + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_len.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_len.py index c2a7fb2d1b9df26d016e623f5453a2d56071dc3e..01e5f1e1074cc828a0755bb282d8f95156f42954 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_len.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_len.py @@ -42,10 +42,12 @@ def len_with_lod_tensor_array(x): class TestLen(unittest.TestCase): - def setUp(self): - self.place = fluid.CUDAPlace( - 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + self.place = ( + fluid.CUDAPlace(0) + if fluid.is_compiled_with_cuda() + else fluid.CPUPlace() + ) self.x_data = np.random.random([10, 16]).astype('float32') self.init_func() @@ -70,7 +72,6 @@ class TestLen(unittest.TestCase): class TestLenWithTensorArray(TestLen): - def init_func(self): self.func = len_with_lod_tensor_array @@ -80,11 +81,13 @@ class TestLenWithTensorArray(TestLen): def len_with_selected_rows(place): block = fluid.default_main_program().global_block() # create selected_rows variable - var = block.create_var(name="X", - dtype="float32", - shape=[-1], - persistable=True, - type=fluid.core.VarDesc.VarType.SELECTED_ROWS) + var = block.create_var( + name="X", + dtype="float32", + shape=[-1], + persistable=True, + type=fluid.core.VarDesc.VarType.SELECTED_ROWS, + ) # y is Variable(SelectedRows) y = fluid.layers.merge_selected_rows(var) y_len = convert_call(len)(y) @@ -110,14 +113,17 @@ def len_with_selected_rows(place): class TestLenWithSelectedRows(unittest.TestCase): - def setUp(self): - self.place = fluid.CUDAPlace( - 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + self.place = ( + fluid.CUDAPlace(0) + if fluid.is_compiled_with_cuda() + else fluid.CPUPlace() + ) def test_len(self): selected_rows_var_len, var_tensor_len = len_with_selected_rows( - self.place) + self.place + ) self.assertEqual(selected_rows_var_len, var_tensor_len) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_list.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_list.py index a1f681193e515b6ff72b44a5116bd49619eb7e24..58ef815b82c713e63c7bd570069dbb8a7ec7d7a1 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_list.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_list.py @@ -42,7 +42,8 @@ def test_list_append_in_if(x): a.append(x) else: a.append( - fluid.layers.fill_constant(shape=[1, 2], value=9, dtype="int64")) + fluid.layers.fill_constant(shape=[1, 2], value=9, dtype="int64") + ) # TODO(Aurelius84): Currently, run_program_op doesn't support output LoDTensorArray. return a[0] @@ -98,9 +99,9 @@ def test_list_append_in_for_loop_with_concat(x, iter_num): def test_list_append_in_while_loop(x, iter_num): x = fluid.dygraph.to_variable(x) - iter_num = fluid.layers.fill_constant(shape=[1], - value=iter_num, - dtype="int32") + iter_num = fluid.layers.fill_constant( + shape=[1], value=iter_num, dtype="int32" + ) a = [] i = 0 while i < iter_num: @@ -111,9 +112,9 @@ def test_list_append_in_while_loop(x, iter_num): def test_list_append_in_while_loop_with_stack(x, iter_num): x = fluid.dygraph.to_variable(x) - iter_num = fluid.layers.fill_constant(shape=[1], - value=iter_num, - dtype="int32") + iter_num = fluid.layers.fill_constant( + shape=[1], value=iter_num, dtype="int32" + ) a = [] i = 0 while i < iter_num.numpy()[0]: @@ -180,9 +181,9 @@ def test_list_pop_in_for_loop(x, iter_num): def test_list_pop_in_while_loop(x, iter_num): x = fluid.dygraph.to_variable(x) - iter_num = fluid.layers.fill_constant(shape=[1], - value=iter_num, - dtype="int32") + iter_num = fluid.layers.fill_constant( + shape=[1], value=iter_num, dtype="int32" + ) a = [] b = [x] b.append(x) @@ -199,10 +200,12 @@ def test_list_pop_in_while_loop(x, iter_num): class TestListWithoutControlFlow(unittest.TestCase): - def setUp(self): - self.place = fluid.CUDAPlace( - 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + self.place = ( + fluid.CUDAPlace(0) + if fluid.is_compiled_with_cuda() + else fluid.CPUPlace() + ) self.init_data() self.init_dygraph_func() @@ -252,24 +255,25 @@ class TestListWithoutControlFlow(unittest.TestCase): dy_res, rtol=1e-05, err_msg='dygraph_res is {}\nstatic_res is {}'.format( - dy_res, stat_res)) + dy_res, stat_res + ), + ) class TestListInIf(TestListWithoutControlFlow): - def init_dygraph_func(self): self.all_dygraph_funcs = [test_list_append_in_if] class TestListInWhileLoop(TestListWithoutControlFlow): - def init_data(self): self.input = np.random.random((3)).astype('int32') self.iter_num = 3 def init_dygraph_func(self): self.all_dygraph_funcs = [ - test_list_append_in_while_loop, test_list_pop_in_while_loop + test_list_append_in_while_loop, + test_list_pop_in_while_loop, ] def train(self, to_static=False): @@ -284,21 +288,19 @@ class TestListInWhileLoop(TestListWithoutControlFlow): class TestListInWhileLoopWithStack(TestListInWhileLoop): - def init_dygraph_func(self): self.all_dygraph_funcs = [test_list_append_in_while_loop_with_stack] class TestListInForLoop(TestListInWhileLoop): - def init_dygraph_func(self): self.all_dygraph_funcs = [ - test_list_append_in_for_loop, test_list_pop_in_for_loop + test_list_append_in_for_loop, + test_list_pop_in_for_loop, ] class TestListInForLoopWithConcat(TestListInWhileLoopWithStack): - def init_dygraph_func(self): self.all_dygraph_funcs = [ test_list_append_in_for_loop_with_concat, @@ -306,11 +308,10 @@ class TestListInForLoopWithConcat(TestListInWhileLoopWithStack): class TestListInForLoopWithSubscript(TestListWithoutControlFlow): - def init_dygraph_func(self): self.all_dygraph_funcs = [ test_list_append_in_for_subscript, - test_list_append_in_while_loop_subscript + test_list_append_in_while_loop_subscript, ] def init_data(self): @@ -318,7 +319,6 @@ class TestListInForLoopWithSubscript(TestListWithoutControlFlow): class ListWithCondNet(paddle.nn.Layer): - def __init__(self): super(ListWithCondNet, self).__init__() @@ -342,13 +342,12 @@ class ListWithCondNet(paddle.nn.Layer): class TestListWithCondGradInferVarType(unittest.TestCase): - def test_to_static(self): net = ListWithCondNet() x = paddle.to_tensor([2, 3, 4], dtype='float32') index = paddle.to_tensor([1]) res = net(x, index) - self.assertEqual(res[0], 48.) + self.assertEqual(res[0], 48.0) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_logging_utils.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_logging_utils.py index d563758cdd61e28d8d0d76461155a9536bdd513c..cdc63ca353ee28ea33ffa90b9a4eb2612105cbc8 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_logging_utils.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_logging_utils.py @@ -26,7 +26,6 @@ from unittest import mock class TestLoggingUtils(unittest.TestCase): - def setUp(self): self.verbosity_level = 1 self.code_level = 3 @@ -50,19 +49,23 @@ class TestLoggingUtils(unittest.TestCase): def test_also_to_stdout(self): logging_utils._TRANSLATOR_LOGGER.need_to_echo_log_to_stdout = None self.assertEqual( - logging_utils._TRANSLATOR_LOGGER.need_to_echo_log_to_stdout, False) + logging_utils._TRANSLATOR_LOGGER.need_to_echo_log_to_stdout, False + ) paddle.jit.set_verbosity(also_to_stdout=False) self.assertEqual( - logging_utils._TRANSLATOR_LOGGER.need_to_echo_log_to_stdout, False) + logging_utils._TRANSLATOR_LOGGER.need_to_echo_log_to_stdout, False + ) logging_utils._TRANSLATOR_LOGGER.need_to_echo_node_to_stdout = None self.assertEqual( - logging_utils._TRANSLATOR_LOGGER.need_to_echo_code_to_stdout, False) + logging_utils._TRANSLATOR_LOGGER.need_to_echo_code_to_stdout, False + ) paddle.jit.set_code_level(also_to_stdout=True) self.assertEqual( - logging_utils._TRANSLATOR_LOGGER.need_to_echo_code_to_stdout, True) + logging_utils._TRANSLATOR_LOGGER.need_to_echo_code_to_stdout, True + ) with self.assertRaises(AssertionError): paddle.jit.set_verbosity(also_to_stdout=1) @@ -99,8 +102,9 @@ class TestLoggingUtils(unittest.TestCase): logging_utils.set_code_level(1, True) logging_utils.log_transformed_code(1, ast_code, "TestTransformer") logging_utils.set_code_level(logging_utils.LOG_AllTransformer, True) - logging_utils.log_transformed_code(logging_utils.LOG_AllTransformer, - ast_code, "TestTransformer") + logging_utils.log_transformed_code( + logging_utils.LOG_AllTransformer, ast_code, "TestTransformer" + ) def test_log_message(self): stream = io.StringIO() @@ -121,7 +125,8 @@ class TestLoggingUtils(unittest.TestCase): logging_utils.log(2, log_msg_2) result_msg = '\n'.join( - [warn_msg, error_msg, "(Level 1) " + log_msg_1, ""]) + [warn_msg, error_msg, "(Level 1) " + log_msg_1, ""] + ) self.assertEqual(result_msg, stream.getvalue()) def test_log_transformed_code(self): @@ -135,12 +140,14 @@ class TestLoggingUtils(unittest.TestCase): with mock.patch.object(sys, 'stdout', stream): paddle.jit.set_code_level(1) - logging_utils.log_transformed_code(1, ast_code, - "BasicApiTransformer") + logging_utils.log_transformed_code( + 1, ast_code, "BasicApiTransformer" + ) paddle.jit.set_code_level() - logging_utils.log_transformed_code(logging_utils.LOG_AllTransformer, - ast_code, "All Transformers") + logging_utils.log_transformed_code( + logging_utils.LOG_AllTransformer, ast_code, "All Transformers" + ) self.assertIn(source_code, stream.getvalue()) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_logical.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_logical.py index a88047839048e9db11b6286562d5f4e9b2e028b1..4ab65c47660706cdea4a53fb9b58cd245aeedfab 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_logical.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_logical.py @@ -23,7 +23,9 @@ import numpy as np import paddle import paddle.fluid as fluid from paddle.fluid.dygraph import ProgramTranslator -from paddle.fluid.dygraph.dygraph_to_static.logical_transformer import cmpop_node_to_str +from paddle.fluid.dygraph.dygraph_to_static.logical_transformer import ( + cmpop_node_to_str, +) program_translator = ProgramTranslator() @@ -171,16 +173,19 @@ def test_shape_not_equal(x): class TestLogicalBase(unittest.TestCase): - def setUp(self): self.input = np.array([3]).astype('int32') - self.place = paddle.CUDAPlace( - 0) if fluid.is_compiled_with_cuda() else paddle.CPUPlace() + self.place = ( + paddle.CUDAPlace(0) + if fluid.is_compiled_with_cuda() + else paddle.CPUPlace() + ) self._set_test_func() def _set_test_func(self): raise NotImplementedError( - "Method 'set_test_func' should be implemented.") + "Method 'set_test_func' should be implemented." + ) def _run(self, to_static): program_translator.enable(to_static) @@ -196,7 +201,6 @@ class TestLogicalBase(unittest.TestCase): class TestLogicalNot(TestLogicalBase): - def _set_test_func(self): self.dygraph_func = test_logical_not @@ -208,11 +212,12 @@ class TestLogicalNot(TestLogicalBase): static_res, rtol=1e-05, err_msg='dygraph result is {}\nstatic_result is {}'.format( - dygraph_res, static_res)) + dygraph_res, static_res + ), + ) class TestLogicalNot2(TestLogicalBase): - def _set_test_func(self): self.dygraph_func = test_logical_not_2 @@ -224,55 +229,49 @@ class TestLogicalNot2(TestLogicalBase): static_res, rtol=1e-05, err_msg='dygraph result is {}\nstatic_result is {}'.format( - dygraph_res, static_res)) + dygraph_res, static_res + ), + ) class TestLogicalAnd(TestLogicalNot): - def _set_test_func(self): self.dygraph_func = test_logical_and class TestLogicalAnd2(TestLogicalNot): - def _set_test_func(self): self.dygraph_func = test_logical_and_2 class TestLogicalOr(TestLogicalNot): - def _set_test_func(self): self.dygraph_func = test_logical_or class TestLogicalOr2(TestLogicalNot): - def _set_test_func(self): self.dygraph_func = test_logical_or_2 class TestLogicalNotAndOr(TestLogicalNot): - def _set_test_func(self): self.dygraph_func = test_logical_not_and_or class TestShapeEqual(TestLogicalNot): - def _set_test_func(self): self.input = np.ones([1, 2, 3]).astype('float32') self.dygraph_func = test_shape_equal class TestShapeNotEqual(TestLogicalNot): - def _set_test_func(self): self.input = np.ones([1, 2, 3]).astype('float32') self.dygraph_func = test_shape_not_equal class TestCmpopNodeToStr(unittest.TestCase): - def test_exception(self): with self.assertRaises(KeyError): cmpop_node_to_str(gast.Or()) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py index 478d573558e402d96b9c19dd0ce37cd8e3f24e90..6bc237f3781b62b952e9818aa376337f698bba49 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py @@ -63,12 +63,15 @@ def while_loop_dyfun_with_conflict_var(x): def while_loop_dyfunc_with_none(x): - i = fluid.dygraph.to_variable(x)\ - if x is not None \ - else fluid.dygraph.to_variable(x+1) + i = ( + fluid.dygraph.to_variable(x) + if x is not None + else fluid.dygraph.to_variable(x + 1) + ) # Use `to_variable` so that static analysis can analyze the type of X is Tensor x = fluid.dygraph.to_variable( - x) # TODO(liym27): Delete it if the type of parameter x can be resolved + x + ) # TODO(liym27): Delete it if the type of parameter x can be resolved flag = 1 while x < 10: i = i + x if flag is not None else x + i @@ -150,9 +153,7 @@ def while_loop_bool_op2(x): def while_loop_class_var(x): - class Foo(object): - def __init__(self): self.a = 3 self.b = 4 @@ -178,9 +179,7 @@ def loop_var_contains_property(x): def for_loop_class_var(max_len): - class Foo(object): - def __init__(self): self.a = 3 self.b = 4 @@ -189,9 +188,9 @@ def for_loop_class_var(max_len): foo = Foo() # Use `to_variable` so that static analysis can analyze the type of X is Tensor - max_len = fluid.layers.fill_constant(shape=[1], - value=max_len, - dtype="int32") + max_len = fluid.layers.fill_constant( + shape=[1], value=max_len, dtype="int32" + ) for i in range(max_len): foo.b = fluid.layers.zeros(shape=[1], dtype='float32') @@ -230,17 +229,18 @@ def for_loop_dufunc_with_listcomp(array): class TestNameVisitor(unittest.TestCase): - def setUp(self): self.loop_funcs = [ - while_loop_dyfunc, for_loop_dyfunc, while_loop_dyfunc_with_none, - for_loop_dufunc_with_listcomp + while_loop_dyfunc, + for_loop_dyfunc, + while_loop_dyfunc_with_none, + for_loop_dufunc_with_listcomp, ] self.loop_var_names = [ set(["i", "x"]), set(["i", "ret", "max_len"]), set(["i", "x"]), - set(["j", "array", "res", "x"]) + set(["j", "array", "res", "x"]), ] self.create_var_names = [set(), set(["ret"]), set(), set(["res", "x"])] @@ -254,8 +254,10 @@ class TestNameVisitor(unittest.TestCase): name_visitor = NameVisitor(gast_root) for node in gast.walk(gast_root): if isinstance(node, (gast.While, gast.For)): - loop_var_names, create_var_names = name_visitor.get_loop_var_names( - node) + ( + loop_var_names, + create_var_names, + ) = name_visitor.get_loop_var_names(node) self.assertEqual(loop_var_names, self.loop_var_names[i]) self.assertEqual(create_var_names, self.create_var_names[i]) @@ -268,34 +270,41 @@ class TestNameVisitor(unittest.TestCase): self.loop_var_names = [ set(["j", "two"]), set(["i", "three", "b"]), - set(["i"]) + set(["i"]), ] self.create_var_names = [set(), set(["b"]), set()] i = 0 for node in gast.walk(gast_root): if isinstance(node, (gast.While, gast.For)): - loop_var_names, create_var_names = name_visitor.get_loop_var_names( - node) + ( + loop_var_names, + create_var_names, + ) = name_visitor.get_loop_var_names(node) self.assertEqual( loop_var_names, self.loop_var_names[i], - msg="loop_var_names : {}, \nexpected loop_var_names : {}". - format(loop_var_names, self.loop_var_names[i])) + msg="loop_var_names : {}, \nexpected loop_var_names : {}".format( + loop_var_names, self.loop_var_names[i] + ), + ) self.assertEqual( create_var_names, self.create_var_names[i], - msg= - "i = {}\ncreate_var_names : {}, \nexpected create_var_names : {}" - .format(i, create_var_names, self.create_var_names[i])) + msg="i = {}\ncreate_var_names : {}, \nexpected create_var_names : {}".format( + i, create_var_names, self.create_var_names[i] + ), + ) i += 1 class TestTransformWhileLoop(unittest.TestCase): - def setUp(self): - self.place = fluid.CUDAPlace( - 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + self.place = ( + fluid.CUDAPlace(0) + if fluid.is_compiled_with_cuda() + else fluid.CPUPlace() + ) self.x = np.zeros(shape=(1), dtype=np.int32) self._init_dyfunc() @@ -329,58 +338,52 @@ class TestTransformWhileLoop(unittest.TestCase): class TestTransformWhileLoopWithoutTensor(TestTransformWhileLoop): - def _init_dyfunc(self): self.dyfunc = while_loop_dyfunc_without_tensor class TestTransformWhileLoopWithConflicVar(TestTransformWhileLoop): - def _init_dyfunc(self): self.dyfunc = while_loop_dyfun_with_conflict_var class TestTransformWhileLoopWithNone(TestTransformWhileLoop): - def _init_dyfunc(self): self.dyfunc = while_loop_dyfunc_with_none class TestForBreakSingleReturn(TestTransformWhileLoop): - def _init_dyfunc(self): self.dyfunc = for_break_single_return class TestWhileLoopBoolOp(TestTransformWhileLoop): - def _init_dyfunc(self): self.dyfunc = while_loop_bool_op class TestWhileLoopBoolOp2(TestTransformWhileLoop): - def _init_dyfunc(self): self.dyfunc = while_loop_bool_op2 class TestWhileLoopClassVar(TestTransformWhileLoop): - def _init_dyfunc(self): self.dyfunc = while_loop_class_var class TestLoopVarContainsProperty(TestTransformWhileLoop): - def _init_dyfunc(self): self.dyfunc = loop_var_contains_property class TestTransformForLoop(unittest.TestCase): - def setUp(self): - self.place = fluid.CUDAPlace( - 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + self.place = ( + fluid.CUDAPlace(0) + if fluid.is_compiled_with_cuda() + else fluid.CPUPlace() + ) self.len = 100 self._init_dyfunc() @@ -402,43 +405,37 @@ class TestTransformForLoop(unittest.TestCase): return ret.numpy() def test_ast_to_func(self): - np.testing.assert_allclose(self._run_dygraph(), - self._run_static(), - rtol=1e-05) + np.testing.assert_allclose( + self._run_dygraph(), self._run_static(), rtol=1e-05 + ) class TestTransformForLoop2(TestTransformForLoop): - def _init_dyfunc(self): self.dyfunc = for_loop_dyfunc2 class TestTransformForLoop3(TestTransformForLoop): - def _init_dyfunc(self): self.dyfunc = for_loop_dyfunc3 class TestTransformForLoop4(TestTransformForLoop): - def _init_dyfunc(self): self.dyfunc = for_loop_dyfunc4 class TestClassVarInForLoop(TestTransformForLoop): - def _init_dyfunc(self): self.dyfunc = for_loop_class_var class TestVarCreateInForLoop(TestTransformForLoop): - def _init_dyfunc(self): self.dyfunc = var_create_in_for_loop class TestErrorInForLoop(TestTransformForLoop): - def _init_dyfunc(self): self.dyfunc = for_loop_dyfunc_not_support diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lstm.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lstm.py index d47445c05e98f1bfdbacfa99a630ae316750cfb1..b4bbfd789c09f911ee84126fe5fd311361b720ab 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lstm.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lstm.py @@ -21,13 +21,11 @@ import tempfile class LSTMLayer(nn.Layer): - def __init__(self, in_channels, hidden_size): super(LSTMLayer, self).__init__() - self.cell = nn.LSTM(in_channels, - hidden_size, - direction='bidirectional', - num_layers=2) + self.cell = nn.LSTM( + in_channels, hidden_size, direction='bidirectional', num_layers=2 + ) def forward(self, x): x, _ = self.cell(x) @@ -35,7 +33,6 @@ class LSTMLayer(nn.Layer): class Net(nn.Layer): - def __init__(self, in_channels, hidden_size): super(Net, self).__init__() self.lstm = LSTMLayer(in_channels, hidden_size) @@ -46,7 +43,6 @@ class Net(nn.Layer): class TestLstm(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -79,15 +75,17 @@ class TestLstm(unittest.TestCase): x.stop_gradient = False dygraph_out = net(x) loss = paddle.mean(dygraph_out) - sgd = paddle.optimizer.SGD(learning_rate=0.001, - parameters=net.parameters()) + sgd = paddle.optimizer.SGD( + learning_rate=0.001, parameters=net.parameters() + ) loss.backward() sgd.step() # switch eval mode firstly net.eval() x = paddle.randn((2, 10, 12)) net = paddle.jit.to_static( - net, input_spec=[paddle.static.InputSpec(shape=[-1, 10, 12])]) + net, input_spec=[paddle.static.InputSpec(shape=[-1, 10, 12])] + ) model_path = os.path.join(self.temp_dir.name, 'simple_lstm') paddle.jit.save(net, model_path) @@ -101,7 +99,9 @@ class TestLstm(unittest.TestCase): static_out.numpy(), rtol=1e-05, err_msg='dygraph_out is {}\n static_out is \n{}'.format( - dygraph_out, static_out)) + dygraph_out, static_out + ), + ) # switch back into train mode. net.train() train_out = net(x) @@ -110,14 +110,15 @@ class TestLstm(unittest.TestCase): train_out.numpy(), rtol=1e-05, err_msg='dygraph_out is {}\n static_out is \n{}'.format( - dygraph_out, train_out)) + dygraph_out, train_out + ), + ) def test_save_without_training(self): self.test_save_in_eval(with_training=False) class LinearNet(nn.Layer): - def __init__(self): super(LinearNet, self).__init__() self.fc = nn.Linear(10, 12) @@ -131,7 +132,6 @@ class LinearNet(nn.Layer): class TestSaveInEvalMode(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -145,15 +145,17 @@ class TestSaveInEvalMode(unittest.TestCase): x.stop_gradient = False dygraph_out = net(x) loss = paddle.mean(dygraph_out) - sgd = paddle.optimizer.SGD(learning_rate=0.001, - parameters=net.parameters()) + sgd = paddle.optimizer.SGD( + learning_rate=0.001, parameters=net.parameters() + ) loss.backward() sgd.step() # switch eval mode firstly net.eval() # save directly net = paddle.jit.to_static( - net, input_spec=[paddle.static.InputSpec(shape=[-1, 10])]) + net, input_spec=[paddle.static.InputSpec(shape=[-1, 10])] + ) model_path = os.path.join(self.temp_dir.name, 'linear_net') paddle.jit.save(net, model_path) @@ -169,11 +171,12 @@ class TestSaveInEvalMode(unittest.TestCase): infer_out.numpy(), rtol=1e-05, err_msg='eval_out is {}\n infer_out is \n{}'.format( - eval_out, infer_out)) + eval_out, infer_out + ), + ) class TestEvalAfterSave(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -186,8 +189,9 @@ class TestEvalAfterSave(unittest.TestCase): x.stop_gradient = False dy_out = net(x) loss = paddle.mean(dy_out) - sgd = paddle.optimizer.SGD(learning_rate=0.001, - parameters=net.parameters()) + sgd = paddle.optimizer.SGD( + learning_rate=0.001, parameters=net.parameters() + ) loss.backward() sgd.step() x = paddle.randn((2, 10, 12)).astype('float32') diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist.py index 7d4074954026ca58ef507d9588ae06886e8e26ec..bc4ce83114278bf91f775c757ac4ad8fedc8606c 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist.py @@ -37,44 +37,49 @@ if paddle.fluid.is_compiled_with_cuda(): class SimpleImgConvPool(fluid.dygraph.Layer): - - def __init__(self, - num_channels, - num_filters, - filter_size, - pool_size, - pool_stride, - pool_padding=0, - pool_type='max', - global_pooling=False, - conv_stride=1, - conv_padding=0, - conv_dilation=1, - conv_groups=1, - act=None, - use_cudnn=True, - param_attr=None, - bias_attr=None): + def __init__( + self, + num_channels, + num_filters, + filter_size, + pool_size, + pool_stride, + pool_padding=0, + pool_type='max', + global_pooling=False, + conv_stride=1, + conv_padding=0, + conv_dilation=1, + conv_groups=1, + act=None, + use_cudnn=True, + param_attr=None, + bias_attr=None, + ): super(SimpleImgConvPool, self).__init__() - self._conv2d = Conv2D(num_channels=num_channels, - num_filters=num_filters, - filter_size=filter_size, - stride=conv_stride, - padding=conv_padding, - dilation=conv_dilation, - groups=conv_groups, - param_attr=None, - bias_attr=None, - act=act, - use_cudnn=use_cudnn) - - self._pool2d = Pool2D(pool_size=pool_size, - pool_type=pool_type, - pool_stride=pool_stride, - pool_padding=pool_padding, - global_pooling=global_pooling, - use_cudnn=use_cudnn) + self._conv2d = Conv2D( + num_channels=num_channels, + num_filters=num_filters, + filter_size=filter_size, + stride=conv_stride, + padding=conv_padding, + dilation=conv_dilation, + groups=conv_groups, + param_attr=None, + bias_attr=None, + act=act, + use_cudnn=use_cudnn, + ) + + self._pool2d = Pool2D( + pool_size=pool_size, + pool_type=pool_type, + pool_stride=pool_stride, + pool_padding=pool_padding, + global_pooling=global_pooling, + use_cudnn=use_cudnn, + ) def forward(self, inputs): x = self._conv2d(inputs) @@ -83,33 +88,30 @@ class SimpleImgConvPool(fluid.dygraph.Layer): class MNIST(fluid.dygraph.Layer): - def __init__(self): super(MNIST, self).__init__() - self._simple_img_conv_pool_1 = SimpleImgConvPool(1, - 20, - 5, - 2, - 2, - act="relu") + self._simple_img_conv_pool_1 = SimpleImgConvPool( + 1, 20, 5, 2, 2, act="relu" + ) - self._simple_img_conv_pool_2 = SimpleImgConvPool(20, - 50, - 5, - 2, - 2, - act="relu") + self._simple_img_conv_pool_2 = SimpleImgConvPool( + 20, 50, 5, 2, 2, act="relu" + ) self.pool_2_shape = 50 * 4 * 4 SIZE = 10 - scale = (2.0 / (self.pool_2_shape**2 * SIZE))**0.5 - self._fc = Linear(self.pool_2_shape, - 10, - param_attr=fluid.param_attr.ParamAttr( - initializer=fluid.initializer.NormalInitializer( - loc=0.0, scale=scale)), - act="softmax") + scale = (2.0 / (self.pool_2_shape**2 * SIZE)) ** 0.5 + self._fc = Linear( + self.pool_2_shape, + 10, + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.NormalInitializer( + loc=0.0, scale=scale + ) + ), + act="softmax", + ) def forward(self, inputs, label=None): x = self.inference(inputs) @@ -131,15 +133,19 @@ class MNIST(fluid.dygraph.Layer): class TestMNIST(unittest.TestCase): - def setUp(self): self.epoch_num = 1 self.batch_size = 64 - self.place = fluid.CUDAPlace( - 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() - self.train_reader = paddle.batch(paddle.dataset.mnist.train(), - batch_size=self.batch_size, - drop_last=True) + self.place = ( + fluid.CUDAPlace(0) + if fluid.is_compiled_with_cuda() + else fluid.CPUPlace() + ) + self.train_reader = paddle.batch( + paddle.dataset.mnist.train(), + batch_size=self.batch_size, + drop_last=True, + ) self.temp_dir = tempfile.TemporaryDirectory() def tearDown(self): @@ -167,7 +173,9 @@ class TestMNISTWithToStatic(TestMNIST): static_loss, rtol=1e-05, err_msg='dygraph is {}\n static_res is \n{}'.format( - dygraph_loss, static_loss)) + dygraph_loss, static_loss + ), + ) with _test_eager_guard(): dygraph_loss = self.train_dygraph() static_loss = self.train_static() @@ -176,7 +184,9 @@ class TestMNISTWithToStatic(TestMNIST): static_loss, rtol=1e-05, err_msg='dygraph is {}\n static_res is \n{}'.format( - dygraph_loss, static_loss)) + dygraph_loss, static_loss + ), + ) def test_mnist_declarative_cpu_vs_mkldnn(self): dygraph_loss_cpu = self.train_dygraph() @@ -190,7 +200,9 @@ class TestMNISTWithToStatic(TestMNIST): dygraph_loss_mkldnn, rtol=1e-05, err_msg='cpu dygraph is {}\n mkldnn dygraph is \n{}'.format( - dygraph_loss_cpu, dygraph_loss_mkldnn)) + dygraph_loss_cpu, dygraph_loss_mkldnn + ), + ) def train(self, to_static=False): @@ -201,17 +213,21 @@ class TestMNISTWithToStatic(TestMNIST): mnist = MNIST() if to_static: mnist = paddle.jit.to_static(mnist) - adam = AdamOptimizer(learning_rate=0.001, - parameter_list=mnist.parameters()) + adam = AdamOptimizer( + learning_rate=0.001, parameter_list=mnist.parameters() + ) for epoch in range(self.epoch_num): start = time() for batch_id, data in enumerate(self.train_reader()): - dy_x_data = np.array([ - x[0].reshape(1, 28, 28) for x in data - ]).astype('float32') - y_data = np.array([x[1] for x in data - ]).astype('int64').reshape(-1, 1) + dy_x_data = np.array( + [x[0].reshape(1, 28, 28) for x in data] + ).astype('float32') + y_data = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape(-1, 1) + ) img = to_variable(dy_x_data) label = to_variable(y_data) @@ -226,66 +242,87 @@ class TestMNISTWithToStatic(TestMNIST): mnist.clear_gradients() if batch_id % 10 == 0: print( - "Loss at epoch {} step {}: loss: {:}, acc: {}, cost: {}" - .format(epoch, batch_id, avg_loss.numpy(), - acc.numpy(), - time() - start)) + "Loss at epoch {} step {}: loss: {:}, acc: {}, cost: {}".format( + epoch, + batch_id, + avg_loss.numpy(), + acc.numpy(), + time() - start, + ) + ) start = time() if batch_id == 50: mnist.eval() prediction, acc, avg_loss = mnist(img, label) loss_data.append(avg_loss.numpy()[0]) # new save load check - self.check_jit_save_load(mnist, [dy_x_data], [img], - to_static, prediction) + self.check_jit_save_load( + mnist, [dy_x_data], [img], to_static, prediction + ) break return loss_data def check_jit_save_load(self, model, inputs, input_spec, to_static, gt_out): if to_static: infer_model_path = os.path.join( - self.temp_dir.name, 'test_mnist_inference_model_by_jit_save') + self.temp_dir.name, 'test_mnist_inference_model_by_jit_save' + ) model_save_dir = os.path.join(self.temp_dir.name, 'inference') model_save_prefix = os.path.join(model_save_dir, 'mnist') model_filename = "mnist" + INFER_MODEL_SUFFIX params_filename = "mnist" + INFER_PARAMS_SUFFIX - fluid.dygraph.jit.save(layer=model, - path=model_save_prefix, - input_spec=input_spec, - output_spec=[gt_out]) + fluid.dygraph.jit.save( + layer=model, + path=model_save_prefix, + input_spec=input_spec, + output_spec=[gt_out], + ) # load in static mode static_infer_out = self.jit_load_and_run_inference_static( - model_save_dir, model_filename, params_filename, inputs) - np.testing.assert_allclose(gt_out.numpy(), - static_infer_out, - rtol=1e-05) + model_save_dir, model_filename, params_filename, inputs + ) + np.testing.assert_allclose( + gt_out.numpy(), static_infer_out, rtol=1e-05 + ) # load in dygraph mode dygraph_infer_out = self.jit_load_and_run_inference_dygraph( - model_save_prefix, inputs) - np.testing.assert_allclose(gt_out.numpy(), - dygraph_infer_out, - rtol=1e-05) + model_save_prefix, inputs + ) + np.testing.assert_allclose( + gt_out.numpy(), dygraph_infer_out, rtol=1e-05 + ) # load in Paddle-Inference - predictor_infer_out = self.predictor_load_and_run_inference_analysis( - model_save_dir, model_filename, params_filename, inputs) - np.testing.assert_allclose(gt_out.numpy(), - predictor_infer_out, - rtol=1e-05) + predictor_infer_out = ( + self.predictor_load_and_run_inference_analysis( + model_save_dir, model_filename, params_filename, inputs + ) + ) + np.testing.assert_allclose( + gt_out.numpy(), predictor_infer_out, rtol=1e-05 + ) @switch_to_static_graph - def jit_load_and_run_inference_static(self, model_path, model_filename, - params_filename, inputs): + def jit_load_and_run_inference_static( + self, model_path, model_filename, params_filename, inputs + ): paddle.enable_static() exe = fluid.Executor(self.place) - [inference_program, feed_target_names, fetch_targets - ] = fluid.io.load_inference_model(dirname=model_path, - executor=exe, - model_filename=model_filename, - params_filename=params_filename) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = fluid.io.load_inference_model( + dirname=model_path, + executor=exe, + model_filename=model_filename, + params_filename=params_filename, + ) assert len(inputs) == len(feed_target_names) - results = exe.run(inference_program, - feed=dict(zip(feed_target_names, inputs)), - fetch_list=fetch_targets) + results = exe.run( + inference_program, + feed=dict(zip(feed_target_names, inputs)), + fetch_list=fetch_targets, + ) return np.array(results[0]) @@ -294,12 +331,13 @@ class TestMNISTWithToStatic(TestMNIST): pred = infer_net(inputs[0]) return pred.numpy() - def predictor_load_and_run_inference_analysis(self, model_path, - model_filename, - params_filename, inputs): - output = PredictorTools(model_path, model_filename, params_filename, - inputs) - out, = output() + def predictor_load_and_run_inference_analysis( + self, model_path, model_filename, params_filename, inputs + ): + output = PredictorTools( + model_path, model_filename, params_filename, inputs + ) + (out,) = output() return out diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist_amp.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist_amp.py index 56b55766af64b0183f2efb5e587aba018e0155fa..3610e72b9539b914d079c422719155fcf9a8a77c 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist_amp.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist_amp.py @@ -24,7 +24,6 @@ if paddle.fluid.is_compiled_with_cuda(): class TestAMP(TestMNIST): - def train_static(self): return self.train(to_static=True) @@ -43,7 +42,9 @@ class TestAMP(TestMNIST): rtol=1e-05, atol=0.001, err_msg='dygraph is {}\n static_res is \n{}'.format( - dygraph_loss, static_loss)) + dygraph_loss, static_loss + ), + ) def train(self, to_static=False): paddle.seed(SEED) @@ -53,8 +54,9 @@ class TestAMP(TestMNIST): print("Successfully to apply @to_static.") mnist = paddle.jit.to_static(mnist) - adam = AdamOptimizer(learning_rate=0.001, - parameter_list=mnist.parameters()) + adam = AdamOptimizer( + learning_rate=0.001, parameter_list=mnist.parameters() + ) scaler = paddle.amp.GradScaler(init_loss_scaling=1024) @@ -62,10 +64,14 @@ class TestAMP(TestMNIST): for epoch in range(self.epoch_num): start = time() for batch_id, data in enumerate(self.train_reader()): - dy_x_data = np.array([x[0].reshape(1, 28, 28) - for x in data]).astype('float32') - y_data = np.array([x[1] for x in data - ]).astype('int64').reshape(-1, 1) + dy_x_data = np.array( + [x[0].reshape(1, 28, 28) for x in data] + ).astype('float32') + y_data = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape(-1, 1) + ) img = paddle.to_tensor(dy_x_data) label = paddle.to_tensor(y_data) @@ -83,9 +89,14 @@ class TestAMP(TestMNIST): mnist.clear_gradients() if batch_id % 10 == 0: print( - "Loss at epoch {} step {}: loss: {:}, acc: {}, cost: {}" - .format(epoch, batch_id, avg_loss.numpy(), acc.numpy(), - time() - start)) + "Loss at epoch {} step {}: loss: {:}, acc: {}, cost: {}".format( + epoch, + batch_id, + avg_loss.numpy(), + acc.numpy(), + time() - start, + ) + ) start = time() if batch_id == 50: break diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist_pure_fp16.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist_pure_fp16.py index beca9d3f9335761b4e5351f9f7d8b6d4ad5429cd..8bdf81537429bf52fcb7214eca53751425bac439 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist_pure_fp16.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist_pure_fp16.py @@ -23,7 +23,6 @@ if paddle.fluid.is_compiled_with_cuda(): class TestPureFP16(TestMNIST): - def train_static(self): return self.train(to_static=True) @@ -41,7 +40,9 @@ class TestPureFP16(TestMNIST): rtol=1e-05, atol=0.001, err_msg='dygraph is {}\n static_res is \n{}'.format( - dygraph_loss, static_loss)) + dygraph_loss, static_loss + ), + ) def train(self, to_static=False): np.random.seed(SEED) @@ -58,33 +59,39 @@ class TestPureFP16(TestMNIST): build_strategy.enable_inplace = False mnist = paddle.jit.to_static(mnist, build_strategy=build_strategy) - optimizer = paddle.optimizer.Adam(learning_rate=0.001, - parameters=mnist.parameters()) + optimizer = paddle.optimizer.Adam( + learning_rate=0.001, parameters=mnist.parameters() + ) scaler = paddle.amp.GradScaler(init_loss_scaling=1024) - mnist, optimizer = paddle.amp.decorate(models=mnist, - optimizers=optimizer, - level='O2', - save_dtype='float32') + mnist, optimizer = paddle.amp.decorate( + models=mnist, optimizers=optimizer, level='O2', save_dtype='float32' + ) loss_data = [] for epoch in range(self.epoch_num): start = time() for batch_id, data in enumerate(self.train_reader()): - dy_x_data = np.array([x[0].reshape(1, 28, 28) - for x in data]).astype('float32') - y_data = np.array([x[1] for x in data - ]).astype('int64').reshape(-1, 1) + dy_x_data = np.array( + [x[0].reshape(1, 28, 28) for x in data] + ).astype('float32') + y_data = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape(-1, 1) + ) img = paddle.to_tensor(dy_x_data) label = paddle.to_tensor(y_data) label.stop_gradient = True - with paddle.amp.auto_cast(enable=True, - custom_white_list=None, - custom_black_list=None, - level='O2'): + with paddle.amp.auto_cast( + enable=True, + custom_white_list=None, + custom_black_list=None, + level='O2', + ): prediction, acc, avg_loss = mnist(img, label=label) scaled = scaler.scale(avg_loss) @@ -96,9 +103,14 @@ class TestPureFP16(TestMNIST): mnist.clear_gradients() if batch_id % 2 == 0: print( - "Loss at epoch {} step {}: loss: {:}, acc: {}, cost: {}" - .format(epoch, batch_id, avg_loss.numpy(), acc.numpy(), - time() - start)) + "Loss at epoch {} step {}: loss: {:}, acc: {}, cost: {}".format( + epoch, + batch_id, + avg_loss.numpy(), + acc.numpy(), + time() - start, + ) + ) start = time() if batch_id == 10: break diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py index ca9a78f5f308df4a292eb160c7197acbf6e13c64..e13a41db89a22726decc71b426357fd6d1761772 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py @@ -39,32 +39,35 @@ program_translator = ProgramTranslator() class ConvBNLayer(fluid.dygraph.Layer): - - def __init__(self, - num_channels, - filter_size, - num_filters, - stride, - padding, - channels=None, - num_groups=1, - act='relu', - use_cudnn=True, - name=None): + def __init__( + self, + num_channels, + filter_size, + num_filters, + stride, + padding, + channels=None, + num_groups=1, + act='relu', + use_cudnn=True, + name=None, + ): super(ConvBNLayer, self).__init__() - self._conv = Conv2D(num_channels=num_channels, - num_filters=num_filters, - filter_size=filter_size, - stride=stride, - padding=padding, - groups=num_groups, - act=None, - use_cudnn=use_cudnn, - param_attr=ParamAttr(initializer=MSRA(), - name=self.full_name() + - "_weights"), - bias_attr=False) + self._conv = Conv2D( + num_channels=num_channels, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=padding, + groups=num_groups, + act=None, + use_cudnn=use_cudnn, + param_attr=ParamAttr( + initializer=MSRA(), name=self.full_name() + "_weights" + ), + bias_attr=False, + ) self._batch_norm = BatchNorm( num_filters, @@ -72,7 +75,8 @@ class ConvBNLayer(fluid.dygraph.Layer): param_attr=ParamAttr(name=self.full_name() + "_bn" + "_scale"), bias_attr=ParamAttr(name=self.full_name() + "_bn" + "_offset"), moving_mean_name=self.full_name() + "_bn" + '_mean', - moving_variance_name=self.full_name() + "_bn" + '_variance') + moving_variance_name=self.full_name() + "_bn" + '_variance', + ) def forward(self, inputs, if_act=False): y = self._conv(inputs) @@ -83,32 +87,35 @@ class ConvBNLayer(fluid.dygraph.Layer): class DepthwiseSeparable(fluid.dygraph.Layer): - - def __init__(self, - num_channels, - num_filters1, - num_filters2, - num_groups, - stride, - scale, - name=None): + def __init__( + self, + num_channels, + num_filters1, + num_filters2, + num_groups, + stride, + scale, + name=None, + ): super(DepthwiseSeparable, self).__init__() - self._depthwise_conv = ConvBNLayer(num_channels=num_channels, - num_filters=int(num_filters1 * - scale), - filter_size=3, - stride=stride, - padding=1, - num_groups=int(num_groups * scale), - use_cudnn=True) + self._depthwise_conv = ConvBNLayer( + num_channels=num_channels, + num_filters=int(num_filters1 * scale), + filter_size=3, + stride=stride, + padding=1, + num_groups=int(num_groups * scale), + use_cudnn=True, + ) self._pointwise_conv = ConvBNLayer( num_channels=int(num_filters1 * scale), filter_size=1, num_filters=int(num_filters2 * scale), stride=1, - padding=0) + padding=0, + ) def forward(self, inputs): y = self._depthwise_conv(inputs) @@ -117,118 +124,148 @@ class DepthwiseSeparable(fluid.dygraph.Layer): class MobileNetV1(fluid.dygraph.Layer): - def __init__(self, scale=1.0, class_dim=1000): super(MobileNetV1, self).__init__() self.scale = scale self.dwsl = [] - self.conv1 = ConvBNLayer(num_channels=3, - filter_size=3, - channels=3, - num_filters=int(32 * scale), - stride=2, - padding=1) - - dws21 = self.add_sublayer(sublayer=DepthwiseSeparable(num_channels=int( - 32 * scale), - num_filters1=32, - num_filters2=64, - num_groups=32, - stride=1, - scale=scale), - name="conv2_1") + self.conv1 = ConvBNLayer( + num_channels=3, + filter_size=3, + channels=3, + num_filters=int(32 * scale), + stride=2, + padding=1, + ) + + dws21 = self.add_sublayer( + sublayer=DepthwiseSeparable( + num_channels=int(32 * scale), + num_filters1=32, + num_filters2=64, + num_groups=32, + stride=1, + scale=scale, + ), + name="conv2_1", + ) self.dwsl.append(dws21) - dws22 = self.add_sublayer(sublayer=DepthwiseSeparable(num_channels=int( - 64 * scale), - num_filters1=64, - num_filters2=128, - num_groups=64, - stride=2, - scale=scale), - name="conv2_2") + dws22 = self.add_sublayer( + sublayer=DepthwiseSeparable( + num_channels=int(64 * scale), + num_filters1=64, + num_filters2=128, + num_groups=64, + stride=2, + scale=scale, + ), + name="conv2_2", + ) self.dwsl.append(dws22) - dws31 = self.add_sublayer(sublayer=DepthwiseSeparable(num_channels=int( - 128 * scale), - num_filters1=128, - num_filters2=128, - num_groups=128, - stride=1, - scale=scale), - name="conv3_1") + dws31 = self.add_sublayer( + sublayer=DepthwiseSeparable( + num_channels=int(128 * scale), + num_filters1=128, + num_filters2=128, + num_groups=128, + stride=1, + scale=scale, + ), + name="conv3_1", + ) self.dwsl.append(dws31) - dws32 = self.add_sublayer(sublayer=DepthwiseSeparable(num_channels=int( - 128 * scale), - num_filters1=128, - num_filters2=256, - num_groups=128, - stride=2, - scale=scale), - name="conv3_2") + dws32 = self.add_sublayer( + sublayer=DepthwiseSeparable( + num_channels=int(128 * scale), + num_filters1=128, + num_filters2=256, + num_groups=128, + stride=2, + scale=scale, + ), + name="conv3_2", + ) self.dwsl.append(dws32) - dws41 = self.add_sublayer(sublayer=DepthwiseSeparable(num_channels=int( - 256 * scale), - num_filters1=256, - num_filters2=256, - num_groups=256, - stride=1, - scale=scale), - name="conv4_1") + dws41 = self.add_sublayer( + sublayer=DepthwiseSeparable( + num_channels=int(256 * scale), + num_filters1=256, + num_filters2=256, + num_groups=256, + stride=1, + scale=scale, + ), + name="conv4_1", + ) self.dwsl.append(dws41) - dws42 = self.add_sublayer(sublayer=DepthwiseSeparable(num_channels=int( - 256 * scale), - num_filters1=256, - num_filters2=512, - num_groups=256, - stride=2, - scale=scale), - name="conv4_2") + dws42 = self.add_sublayer( + sublayer=DepthwiseSeparable( + num_channels=int(256 * scale), + num_filters1=256, + num_filters2=512, + num_groups=256, + stride=2, + scale=scale, + ), + name="conv4_2", + ) self.dwsl.append(dws42) for i in range(5): - tmp = self.add_sublayer(sublayer=DepthwiseSeparable( + tmp = self.add_sublayer( + sublayer=DepthwiseSeparable( + num_channels=int(512 * scale), + num_filters1=512, + num_filters2=512, + num_groups=512, + stride=1, + scale=scale, + ), + name="conv5_" + str(i + 1), + ) + self.dwsl.append(tmp) + + dws56 = self.add_sublayer( + sublayer=DepthwiseSeparable( num_channels=int(512 * scale), num_filters1=512, - num_filters2=512, + num_filters2=1024, num_groups=512, - stride=1, - scale=scale), - name="conv5_" + str(i + 1)) - self.dwsl.append(tmp) - - dws56 = self.add_sublayer(sublayer=DepthwiseSeparable(num_channels=int( - 512 * scale), - num_filters1=512, - num_filters2=1024, - num_groups=512, - stride=2, - scale=scale), - name="conv5_6") + stride=2, + scale=scale, + ), + name="conv5_6", + ) self.dwsl.append(dws56) - dws6 = self.add_sublayer(sublayer=DepthwiseSeparable(num_channels=int( - 1024 * scale), - num_filters1=1024, - num_filters2=1024, - num_groups=1024, - stride=1, - scale=scale), - name="conv6") + dws6 = self.add_sublayer( + sublayer=DepthwiseSeparable( + num_channels=int(1024 * scale), + num_filters1=1024, + num_filters2=1024, + num_groups=1024, + stride=1, + scale=scale, + ), + name="conv6", + ) self.dwsl.append(dws6) self.pool2d_avg = Pool2D(pool_type='avg', global_pooling=True) - self.out = Linear(int(1024 * scale), - class_dim, - param_attr=ParamAttr(initializer=MSRA(), - name=self.full_name() + - "fc7_weights"), - bias_attr=ParamAttr(name="fc7_offset")) + self.out = Linear( + int(1024 * scale), + class_dim, + param_attr=ParamAttr( + initializer=MSRA(), name=self.full_name() + "fc7_weights" + ), + bias_attr=ParamAttr(name="fc7_offset"), + ) @declarative def forward(self, inputs): @@ -242,7 +279,6 @@ class MobileNetV1(fluid.dygraph.Layer): class InvertedResidualUnit(fluid.dygraph.Layer): - def __init__( self, num_channels, @@ -255,30 +291,36 @@ class InvertedResidualUnit(fluid.dygraph.Layer): ): super(InvertedResidualUnit, self).__init__() num_expfilter = int(round(num_in_filter * expansion_factor)) - self._expand_conv = ConvBNLayer(num_channels=num_channels, - num_filters=num_expfilter, - filter_size=1, - stride=1, - padding=0, - act=None, - num_groups=1) - - self._bottleneck_conv = ConvBNLayer(num_channels=num_expfilter, - num_filters=num_expfilter, - filter_size=filter_size, - stride=stride, - padding=padding, - num_groups=num_expfilter, - act=None, - use_cudnn=True) - - self._linear_conv = ConvBNLayer(num_channels=num_expfilter, - num_filters=num_filters, - filter_size=1, - stride=1, - padding=0, - act=None, - num_groups=1) + self._expand_conv = ConvBNLayer( + num_channels=num_channels, + num_filters=num_expfilter, + filter_size=1, + stride=1, + padding=0, + act=None, + num_groups=1, + ) + + self._bottleneck_conv = ConvBNLayer( + num_channels=num_expfilter, + num_filters=num_expfilter, + filter_size=filter_size, + stride=stride, + padding=padding, + num_groups=num_expfilter, + act=None, + use_cudnn=True, + ) + + self._linear_conv = ConvBNLayer( + num_channels=num_expfilter, + num_filters=num_filters, + filter_size=1, + stride=1, + padding=0, + act=None, + num_groups=1, + ) def forward(self, inputs, ifshortcut): y = self._expand_conv(inputs, if_act=True) @@ -290,29 +332,33 @@ class InvertedResidualUnit(fluid.dygraph.Layer): class InvresiBlocks(fluid.dygraph.Layer): - def __init__(self, in_c, t, c, n, s): super(InvresiBlocks, self).__init__() - self._first_block = InvertedResidualUnit(num_channels=in_c, - num_in_filter=in_c, - num_filters=c, - stride=s, - filter_size=3, - padding=1, - expansion_factor=t) + self._first_block = InvertedResidualUnit( + num_channels=in_c, + num_in_filter=in_c, + num_filters=c, + stride=s, + filter_size=3, + padding=1, + expansion_factor=t, + ) self._inv_blocks = [] for i in range(1, n): - tmp = self.add_sublayer(sublayer=InvertedResidualUnit( - num_channels=c, - num_in_filter=c, - num_filters=c, - stride=1, - filter_size=3, - padding=1, - expansion_factor=t), - name=self.full_name() + "_" + str(i + 1)) + tmp = self.add_sublayer( + sublayer=InvertedResidualUnit( + num_channels=c, + num_in_filter=c, + num_filters=c, + stride=1, + filter_size=3, + padding=1, + expansion_factor=t, + ), + name=self.full_name() + "_" + str(i + 1), + ) self._inv_blocks.append(tmp) def forward(self, inputs): @@ -323,7 +369,6 @@ class InvresiBlocks(fluid.dygraph.Layer): class MobileNetV2(fluid.dygraph.Layer): - def __init__(self, class_dim=1000, scale=1.0): super(MobileNetV2, self).__init__() self.scale = scale @@ -339,48 +384,54 @@ class MobileNetV2(fluid.dygraph.Layer): (6, 320, 1, 1), ] - #1. conv1 - self._conv1 = ConvBNLayer(num_channels=3, - num_filters=int(32 * scale), - filter_size=3, - stride=2, - act=None, - padding=1) - - #2. bottleneck sequences + # 1. conv1 + self._conv1 = ConvBNLayer( + num_channels=3, + num_filters=int(32 * scale), + filter_size=3, + stride=2, + act=None, + padding=1, + ) + + # 2. bottleneck sequences self._invl = [] i = 1 in_c = int(32 * scale) for layer_setting in bottleneck_params_list: t, c, n, s = layer_setting i += 1 - tmp = self.add_sublayer(sublayer=InvresiBlocks(in_c=in_c, - t=t, - c=int(c * scale), - n=n, - s=s), - name='conv' + str(i)) + tmp = self.add_sublayer( + sublayer=InvresiBlocks( + in_c=in_c, t=t, c=int(c * scale), n=n, s=s + ), + name='conv' + str(i), + ) self._invl.append(tmp) in_c = int(c * scale) - #3. last_conv + # 3. last_conv self._out_c = int(1280 * scale) if scale > 1.0 else 1280 - self._conv9 = ConvBNLayer(num_channels=in_c, - num_filters=self._out_c, - filter_size=1, - stride=1, - act=None, - padding=0) - - #4. pool + self._conv9 = ConvBNLayer( + num_channels=in_c, + num_filters=self._out_c, + filter_size=1, + stride=1, + act=None, + padding=0, + ) + + # 4. pool self._pool2d_avg = Pool2D(pool_type='avg', global_pooling=True) - #5. fc + # 5. fc tmp_param = ParamAttr(name=self.full_name() + "fc10_weights") - self._fc = Linear(self._out_c, - class_dim, - param_attr=tmp_param, - bias_attr=ParamAttr(name="fc10_offset")) + self._fc = Linear( + self._out_c, + class_dim, + param_attr=tmp_param, + bias_attr=ParamAttr(name="fc10_offset"), + ) @declarative def forward(self, inputs): @@ -399,7 +450,8 @@ def create_optimizer(args, parameter_list): learning_rate=args.lr, momentum=args.momentum_rate, regularization=fluid.regularizer.L2Decay(args.l2_decay), - parameter_list=parameter_list) + parameter_list=parameter_list, + ) return optimizer @@ -430,8 +482,11 @@ class Args(object): class_dim = 50 print_step = 1 train_step = 10 - place = fluid.CUDAPlace( - 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.is_compiled_with_cuda() + else fluid.CPUPlace() + ) model_save_dir = None model_save_prefix = None model_filename = None @@ -477,8 +532,9 @@ def train_mobilenet(args, to_static): t_end = time.time() softmax_out = fluid.layers.softmax(out, use_cudnn=False) - loss = fluid.layers.cross_entropy(input=softmax_out, - label=label) + loss = fluid.layers.cross_entropy( + input=softmax_out, label=label + ) avg_loss = paddle.mean(x=loss) acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1) acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5) @@ -493,17 +549,29 @@ def train_mobilenet(args, to_static): t2 = time.time() train_batch_elapse = t2 - t1 if batch_id % args.print_step == 0: - print("epoch id: %d, batch step: %d, avg_loss %0.5f acc_top1 %0.5f acc_top5 %0.5f %2.4f sec net_t:%2.4f back_t:%2.4f read_t:%2.4f" % \ - (eop, batch_id, avg_loss.numpy(), acc_top1.numpy(), acc_top5.numpy(), train_batch_elapse, - t_end - t_start, t_end_back - t_start_back, t1 - t_last)) + print( + "epoch id: %d, batch step: %d, avg_loss %0.5f acc_top1 %0.5f acc_top5 %0.5f %2.4f sec net_t:%2.4f back_t:%2.4f read_t:%2.4f" + % ( + eop, + batch_id, + avg_loss.numpy(), + acc_top1.numpy(), + acc_top5.numpy(), + train_batch_elapse, + t_end - t_start, + t_end_back - t_start_back, + t1 - t_last, + ) + ) batch_id += 1 t_last = time.time() if batch_id > args.train_step: if to_static: fluid.dygraph.jit.save(net, args.model_save_prefix) else: - fluid.dygraph.save_dygraph(net.state_dict(), - args.dy_state_dict_save_path) + fluid.dygraph.save_dygraph( + net.state_dict(), args.dy_state_dict_save_path + ) break return np.array(loss_data) @@ -514,15 +582,22 @@ def predict_static(args, data): exe = fluid.Executor(args.place) # load inference model - [inference_program, feed_target_names, fetch_targets - ] = fluid.io.load_inference_model(args.model_save_dir, - executor=exe, - model_filename=args.model_filename, - params_filename=args.params_filename) - - pred_res = exe.run(inference_program, - feed={feed_target_names[0]: data}, - fetch_list=fetch_targets) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = fluid.io.load_inference_model( + args.model_save_dir, + executor=exe, + model_filename=args.model_filename, + params_filename=args.params_filename, + ) + + pred_res = exe.run( + inference_program, + feed={feed_target_names[0]: data}, + fetch_list=fetch_targets, + ) return pred_res[0] @@ -554,51 +629,57 @@ def predict_dygraph_jit(args, data): def predict_analysis_inference(args, data): - output = PredictorTools(args.model_save_dir, args.model_filename, - args.params_filename, [data]) - out, = output() + output = PredictorTools( + args.model_save_dir, args.model_filename, args.params_filename, [data] + ) + (out,) = output() return out class TestMobileNet(unittest.TestCase): - def setUp(self): self.args = Args() self.temp_dir = tempfile.TemporaryDirectory() - self.args.model_save_dir = os.path.join(self.temp_dir.name, - "./inference") + self.args.model_save_dir = os.path.join( + self.temp_dir.name, "./inference" + ) def tearDown(self): self.temp_dir.cleanup() def train(self, model_name, to_static): self.args.model = model_name - self.args.model_save_prefix = os.path.join(self.temp_dir.name, - "./inference/" + model_name) + self.args.model_save_prefix = os.path.join( + self.temp_dir.name, "./inference/" + model_name + ) self.args.model_filename = model_name + INFER_MODEL_SUFFIX self.args.params_filename = model_name + INFER_PARAMS_SUFFIX self.args.dy_state_dict_save_path = os.path.join( - self.temp_dir.name, model_name + ".dygraph") + self.temp_dir.name, model_name + ".dygraph" + ) out = train_mobilenet(self.args, to_static) return out def assert_same_loss(self, model_name): dy_out = self.train(model_name, to_static=False) st_out = self.train(model_name, to_static=True) - np.testing.assert_allclose(dy_out, - st_out, - rtol=1e-05, - err_msg='dy_out: {}, st_out: {}'.format( - dy_out, st_out)) + np.testing.assert_allclose( + dy_out, + st_out, + rtol=1e-05, + err_msg='dy_out: {}, st_out: {}'.format(dy_out, st_out), + ) def assert_same_predict(self, model_name): self.args.model = model_name - self.args.model_save_prefix = os.path.join(self.temp_dir.name, - "./inference/" + model_name) + self.args.model_save_prefix = os.path.join( + self.temp_dir.name, "./inference/" + model_name + ) self.args.model_filename = model_name + INFER_MODEL_SUFFIX self.args.params_filename = model_name + INFER_PARAMS_SUFFIX self.args.dy_state_dict_save_path = os.path.join( - self.temp_dir.name, model_name + ".dygraph") + self.temp_dir.name, model_name + ".dygraph" + ) local_random = np.random.RandomState(SEED) image = local_random.random_sample([1, 3, 224, 224]).astype('float32') dy_pre = predict_dygraph(self.args, image) @@ -609,20 +690,25 @@ class TestMobileNet(unittest.TestCase): dy_pre, st_pre, rtol=1e-05, - err_msg='dy_pre:\n {}\n, st_pre: \n{}.'.format(dy_pre, st_pre)) + err_msg='dy_pre:\n {}\n, st_pre: \n{}.'.format(dy_pre, st_pre), + ) np.testing.assert_allclose( dy_jit_pre, st_pre, rtol=1e-05, err_msg='dy_jit_pre:\n {}\n, st_pre: \n{}.'.format( - dy_jit_pre, st_pre)) + dy_jit_pre, st_pre + ), + ) np.testing.assert_allclose( predictor_pre, st_pre, rtol=1e-05, atol=1e-05, err_msg='inference_pred_res:\n {}\n, st_pre: \n{}.'.format( - predictor_pre, st_pre)) + predictor_pre, st_pre + ), + ) def test_mobile_net(self): # MobileNet-V1 diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_multi_forward.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_multi_forward.py index 3b81cd0749fc63a8092a9c2aa5835f3cfc72fc12..f320e9f010cfc3209332a570c773a76c20c0d943 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_multi_forward.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_multi_forward.py @@ -18,20 +18,20 @@ import unittest class MyLayer(paddle.nn.Layer): - def __init__(self): super().__init__() self.linear = paddle.nn.Linear(1, 1) - @paddle.jit.to_static(input_spec=[ - paddle.static.InputSpec(shape=[None, None], dtype=paddle.float32) - ]) + @paddle.jit.to_static( + input_spec=[ + paddle.static.InputSpec(shape=[None, None], dtype=paddle.float32) + ] + ) def forward(self, x): return self.linear(x) class TestBackward(unittest.TestCase): - def test_order_0(self): """ loss = 1 * w * 1 + 2 * w * 2 diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_op_attr.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_op_attr.py index 9bdc6cfcb2c85040ca1548f035d4652c0bd0ae9f..8bfc7a83dfec8768e138870c5fa2cc3e132c7a52 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_op_attr.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_op_attr.py @@ -19,7 +19,6 @@ from paddle.static import InputSpec class MySub(paddle.nn.Layer): - def __init__(self): super(MySub, self).__init__() @@ -28,7 +27,6 @@ class MySub(paddle.nn.Layer): class NetWithOpAttr(paddle.nn.Layer): - def __init__(self, in_num, out_num): super(NetWithOpAttr, self).__init__() @@ -44,7 +42,7 @@ class NetWithOpAttr(paddle.nn.Layer): @paddle.jit.to_static(input_spec=[InputSpec([10, 16])]) def with_cond(self, x): - if paddle.mean(x) > 0.: + if paddle.mean(x) > 0.0: out = self.linear(x) else: out = self.sub(x, x) @@ -53,7 +51,6 @@ class NetWithOpAttr(paddle.nn.Layer): class CheckOpAttr(unittest.TestCase): - def setUp(self): self.in_num = 16 self.out_num = 16 @@ -65,7 +62,7 @@ class CheckOpAttr(unittest.TestCase): "int_val": 10, "int_vals": [10, 20], "float_val": 3.8, - "float_vals": [3.8, -0.2] + "float_vals": [3.8, -0.2], } self.bn_attrs = {"bool_val": True, "bool_vals": [True, False]} self.sub_attrs = {"int_vals": [10, 20], "bool_vals": [True, False]} @@ -75,7 +72,7 @@ class CheckOpAttr(unittest.TestCase): 'elementwise_add': self.fc_attrs, 'batch_norm': self.bn_attrs, 'tanh': self.bn_attrs, - 'elementwise_sub': self.sub_attrs + 'elementwise_sub': self.sub_attrs, } def test_set_op_attrs(self): @@ -89,8 +86,9 @@ class CheckOpAttr(unittest.TestCase): self.assertEqual(len(net.linear._forward_pre_hooks), 1) self.assertEqual(len(net.linear._forward_post_hooks), 1) # to_static - net = paddle.jit.to_static(net, - input_spec=[InputSpec.from_tensor(self.x)]) + net = paddle.jit.to_static( + net, input_spec=[InputSpec.from_tensor(self.x)] + ) # assert attrs have be set. self.check_op_attrs(net.forward.concrete_program.main_program) @@ -103,7 +101,8 @@ class CheckOpAttr(unittest.TestCase): for cur_block in main_program.blocks: ops = cur_block.ops for op in ops: - if op.type not in self.infos: continue + if op.type not in self.infos: + continue for attr_name, expect_vals in self.infos[op.type].items(): op_vals = op.desc.attr(attr_name) if not isinstance(expect_vals, list): @@ -120,8 +119,9 @@ class CheckOpAttr(unittest.TestCase): def test_set_op_attrs_with_sub_block(self): net = NetWithOpAttr(self.in_num, self.out_num) # set attrs - net.linear._set_op_attrs({"int_vals": [0, - 0]}) # test overwrite behavior + net.linear._set_op_attrs( + {"int_vals": [0, 0]} + ) # test overwrite behavior net.linear._set_op_attrs(self.fc_attrs) net.bn._set_op_attrs(self.bn_attrs) net.sub._set_op_attrs(self.sub_attrs) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_param_guard.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_param_guard.py index 8247787b51572715c2fcbae355ad02cd8d89bb29..90444ede53c54f3504a7c654f0cc4a70b9a4534c 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_param_guard.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_param_guard.py @@ -20,7 +20,6 @@ from paddle.jit import to_static, ProgramTranslator class NetWithParameterList(paddle.nn.Layer): - def __init__(self, in_size, out_size): super(NetWithParameterList, self).__init__() weight = self.create_parameter([in_size, out_size]) @@ -36,7 +35,6 @@ class NetWithParameterList(paddle.nn.Layer): class NetWithParameterListIter(NetWithParameterList): - def __init__(self, in_size, out_size): super(NetWithParameterListIter, self).__init__(in_size, out_size) @@ -51,7 +49,6 @@ class NetWithParameterListIter(NetWithParameterList): class TestParameterList(unittest.TestCase): - def setUp(self): self.seed = 2021 self.iter_num = 5 @@ -89,13 +86,14 @@ class TestParameterList(unittest.TestCase): class NetWithRawParamList(paddle.nn.Layer): - def __init__(self, in_size, out_size): super(NetWithRawParamList, self).__init__() - weight = self.add_parameter('w', - self.create_parameter([in_size, out_size])) + weight = self.add_parameter( + 'w', self.create_parameter([in_size, out_size]) + ) bias = self.add_parameter( - 'b', self.create_parameter([out_size], is_bias=True)) + 'b', self.create_parameter([out_size], is_bias=True) + ) self.params = [weight] self.bias_dict = {'b': bias} @@ -108,7 +106,6 @@ class NetWithRawParamList(paddle.nn.Layer): class TestRawParameterList(unittest.TestCase): - def setUp(self): self.seed = 2021 self.iter_num = 5 @@ -142,7 +139,6 @@ class TestRawParameterList(unittest.TestCase): class NetWithSubLayerParamList(paddle.nn.Layer): - def __init__(self, sub_layer): super(NetWithSubLayerParamList, self).__init__() self.sub_layer = sub_layer @@ -158,7 +154,6 @@ class NetWithSubLayerParamList(paddle.nn.Layer): class TestSubLayerParameterList(TestRawParameterList): - def init_net(self): fc = paddle.nn.Linear(10, 3) self.net = NetWithSubLayerParamList(fc) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_params_no_grad.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_params_no_grad.py index f44faa642a00e2c7eb87533ac5600d450204b5f6..24ff71b066d1e5d18f0b91275dca8fb10fbdd87d 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_params_no_grad.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_params_no_grad.py @@ -20,7 +20,6 @@ import unittest class Net(nn.Layer): - def __init__(self): super(Net, self).__init__() self.emb1 = nn.Embedding(100, 16) @@ -54,10 +53,11 @@ def train(): class TestParamsNoGrad(unittest.TestCase): - def test_two_card(self): - if paddle.is_compiled_with_cuda() and len( - paddle.static.cuda_places()) > 1: + if ( + paddle.is_compiled_with_cuda() + and len(paddle.static.cuda_places()) > 1 + ): dist.spawn(train, nprocs=2, gpus='0,1') diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_partial_program.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_partial_program.py index 72dca36d2101ab793022683fff433a28b972e3bc..d11f4e9f15c6f85422593582328637eeafaf1318 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_partial_program.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_partial_program.py @@ -54,7 +54,6 @@ def fake_data(shape): class TestWithNestedInput(unittest.TestCase): - def setUp(self): self.x = None self.y = None @@ -62,15 +61,14 @@ class TestWithNestedInput(unittest.TestCase): def fake_input(self): self.x = fake_data([10, 16]) self.y = [ - fake_data([10, 16]), "preprocess_cmd", 64, { - 'z': [fake_data([10, 12]), - fake_data([10, 12])], + fake_data([10, 16]), + "preprocess_cmd", + 64, + { + 'z': [fake_data([10, 12]), fake_data([10, 12])], 'c': fake_data([10, 10]), - 'd': { - 'da': 12, - 'dc': fake_data([10, 10]) - } - } + 'd': {'da': 12, 'dc': fake_data([10, 10])}, + }, ] def _run(self, to_static): @@ -92,7 +90,6 @@ class TestWithNestedInput(unittest.TestCase): class TestWithNestedOutput(unittest.TestCase): - def setUp(self): self.x = None self.y = None @@ -120,17 +117,17 @@ class TestWithNestedOutput(unittest.TestCase): self.assertTrue(len(dygraph_res) == len(static_res)) for dy_var, st_var in zip(dygraph_res, static_res): - if isinstance(dy_var, - (fluid.core.VarBase, fluid.core.eager.Tensor)): - np.testing.assert_allclose(dy_var.numpy(), - st_var.numpy(), - rtol=1e-05) + if isinstance( + dy_var, (fluid.core.VarBase, fluid.core.eager.Tensor) + ): + np.testing.assert_allclose( + dy_var.numpy(), st_var.numpy(), rtol=1e-05 + ) else: self.assertTrue(dy_var, st_var) class TestWithTrainAndEval(unittest.TestCase): - def test_switch_eval_and_train(self): program_translator = ProgramTranslator() @@ -142,25 +139,27 @@ class TestWithTrainAndEval(unittest.TestCase): _, train_partial_layer = linear_net.forward.program_cache.last()[-1] # check default mode is for training - self.assertEqual(train_partial_layer.program, - train_partial_layer._train_program) + self.assertEqual( + train_partial_layer.program, train_partial_layer._train_program + ) # switch to run test program after `eval()` linear_net.eval() linear_net(x) _, eval_partial_layer = linear_net.forward.program_cache.last()[-1] - self.assertEqual(eval_partial_layer.program, - eval_partial_layer._infer_program) + self.assertEqual( + eval_partial_layer.program, eval_partial_layer._infer_program + ) # switch back into training linear_net.train() linear_net(x) - self.assertEqual(train_partial_layer.program, - train_partial_layer._train_program) + self.assertEqual( + train_partial_layer.program, train_partial_layer._train_program + ) class TestWithNoGrad(unittest.TestCase): - def test_with_no_grad(self): with fluid.dygraph.guard(): linear_net = Linear() @@ -171,18 +170,19 @@ class TestWithNoGrad(unittest.TestCase): linear_net.train() linear_net(x) _, partial_layer = linear_net.forward.program_cache.last()[-1] - self.assertEqual(partial_layer.program, - partial_layer._train_program) + self.assertEqual( + partial_layer.program, partial_layer._train_program + ) class GPT2LMHeadModel(fluid.dygraph.Layer): - def __init__(self): super(GPT2LMHeadModel, self).__init__() self.embedding0 = paddle.nn.Embedding(20, 16) self.embedding1 = paddle.nn.Embedding(20, 32) self.lm_head_weight = paddle.to_tensor( - np.random.rand(2, 3).astype('float32')) + np.random.rand(2, 3).astype('float32') + ) @declarative def forward(self, x): @@ -192,7 +192,6 @@ class GPT2LMHeadModel(fluid.dygraph.Layer): class TestPruneUnusedParamInProgram(unittest.TestCase): - def test_prune(self): input_ids = np.array([[15, 11, 6, 3, 18, 13]]).astype("float32") diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_place.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_place.py index 96cffc32a19f6bdccba6b5fa1ee002d65cf2fa6e..58e8e7b6728f5d98255cb1058df8f819860d9514 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_place.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_place.py @@ -17,7 +17,6 @@ import unittest class TestPlace(unittest.TestCase): - def test_place(self): paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_print.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_print.py index 90737695aaff3771b90f6a862b272ea141a8272e..3603cac199ed5b0cdf9cfd90f0f0e8c5b18f3ccc 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_print.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_print.py @@ -152,11 +152,13 @@ def dyfunc_print_continue_vars(x): class TestPrintBase(unittest.TestCase): - def setUp(self): self.input = numpy.ones(5).astype("int32") - self.place = fluid.CUDAPlace( - 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + self.place = ( + fluid.CUDAPlace(0) + if fluid.is_compiled_with_cuda() + else fluid.CPUPlace() + ) self.set_test_func() def set_test_func(self): @@ -176,7 +178,6 @@ class TestPrintBase(unittest.TestCase): class TestPrintVariable(TestPrintBase): - def set_test_func(self): self.dygraph_func = dyfunc_print_variable @@ -186,37 +187,31 @@ class TestPrintVariable(TestPrintBase): class TestPrintNdArray(TestPrintVariable): - def set_test_func(self): self.dygraph_func = dyfunc_print_ndarray class TestPrintWithFormat(TestPrintVariable): - def set_test_func(self): self.dygraph_func = dyfunc_print_with_format class TestPrintWithFormat2(TestPrintVariable): - def set_test_func(self): self.dygraph_func = dyfunc_print_with_format2 class TestPrintWithIfElse(TestPrintVariable): - def set_test_func(self): self.dygraph_func = dyfunc_print_with_ifelse class TestPrintMultipleVar(TestPrintVariable): - def set_test_func(self): self.dygraph_func = dyfunc_print_multi_vars class TestPrintContinueVar(TestPrintVariable): - def set_test_func(self): self.dygraph_func = dyfunc_print_continue_vars diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_program_translator.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_program_translator.py index 7bd415fdc8a7b93c786a54362a603e1ac0ecbc23..762836a937be7a0f97dc1212e550ed5e9e1dfea9 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_program_translator.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_program_translator.py @@ -26,7 +26,11 @@ from paddle.fluid.dygraph.jit import declarative from paddle.fluid.dygraph.dygraph_to_static.utils import func_to_source_code import paddle.jit.dy2static as _jst -from ifelse_simple_func import dyfunc_with_if_else, dyfunc_with_if_else_early_return1, dyfunc_with_if_else_early_return2 +from ifelse_simple_func import ( + dyfunc_with_if_else, + dyfunc_with_if_else_early_return1, + dyfunc_with_if_else_early_return2, +) np.random.seed(0) @@ -60,8 +64,7 @@ def get_source_code(func): return source_code -class StaticCode1(): - +class StaticCode1: def dyfunc_with_if_else(x_v, label=None): loss = _jst.UndefinedVar('loss') __return_1 = _jst.UndefinedVar('__return_1') @@ -70,11 +73,11 @@ class StaticCode1(): def get_args_0(): nonlocal x_v - return x_v, + return (x_v,) def set_args_0(__args): nonlocal x_v - x_v, = __args + (x_v,) = __args def true_fn_0(): nonlocal x_v @@ -86,12 +89,15 @@ class StaticCode1(): x_v = x_v + 1 return - _jst.IfElse(paddle.mean(x_v)[0] > 5, - true_fn_0, - false_fn_0, - get_args_0, - set_args_0, ('x_v', ), - push_pop_names=None) + _jst.IfElse( + paddle.mean(x_v)[0] > 5, + true_fn_0, + false_fn_0, + get_args_0, + set_args_0, + ('x_v',), + push_pop_names=None, + ) def get_args_1(): nonlocal __return_0, __return_1, __return_value_0, loss @@ -114,17 +120,19 @@ class StaticCode1(): __return_value_0 = x_v return - _jst.IfElse(label is not None, - true_fn_1, - false_fn_1, - get_args_1, - set_args_1, - ('__return_0', '__return_1', '__return_value_0', 'loss'), - push_pop_names=None) + _jst.IfElse( + label is not None, + true_fn_1, + false_fn_1, + get_args_1, + set_args_1, + ('__return_0', '__return_1', '__return_value_0', 'loss'), + push_pop_names=None, + ) return __return_value_0 -class StaticCode2(): +class StaticCode2: # TODO: Transform return statement def dyfunc_with_if_else(x_v, label=None): loss = _jst.UndefinedVar('loss') @@ -134,11 +142,11 @@ class StaticCode2(): def get_args_2(): nonlocal x_v - return x_v, + return (x_v,) def set_args_2(__args): nonlocal x_v - x_v, = __args + (x_v,) = __args def true_fn_2(): nonlocal x_v @@ -150,12 +158,15 @@ class StaticCode2(): x_v = x_v + 1 return - _jst.IfElse(paddle.mean(x_v)[0] > 5, - true_fn_2, - false_fn_2, - get_args_2, - set_args_2, ('x_v', ), - push_pop_names=None) + _jst.IfElse( + paddle.mean(x_v)[0] > 5, + true_fn_2, + false_fn_2, + get_args_2, + set_args_2, + ('x_v',), + push_pop_names=None, + ) def get_args_3(): nonlocal __return_2, __return_3, __return_value_1, loss @@ -178,18 +189,19 @@ class StaticCode2(): __return_value_1 = x_v return - _jst.IfElse(label is not None, - true_fn_3, - false_fn_3, - get_args_3, - set_args_3, - ('__return_2', '__return_3', '__return_value_1', 'loss'), - push_pop_names=None) + _jst.IfElse( + label is not None, + true_fn_3, + false_fn_3, + get_args_3, + set_args_3, + ('__return_2', '__return_3', '__return_value_1', 'loss'), + push_pop_names=None, + ) return __return_value_1 class NetWithError(fluid.dygraph.layers.Layer): - @declarative def forward(self, x): linear = fluid.dygraph.Linear(32, 64) @@ -198,7 +210,6 @@ class NetWithError(fluid.dygraph.layers.Layer): class TestDygraphToStaticCode(unittest.TestCase): - def setUp(self): # set to print all string diff when assertEqual fails self.maxDiff = None @@ -210,7 +221,8 @@ class TestDygraphToStaticCode(unittest.TestCase): answer = get_source_code(StaticCode1.dyfunc_with_if_else) self.assertEqual( answer.replace('\n', '').replace(' ', ''), - code.replace('\n', '').replace(' ', '')) + code.replace('\n', '').replace(' ', ''), + ) def test_program_translator(self): answer = get_source_code(StaticCode2.dyfunc_with_if_else) @@ -219,11 +231,11 @@ class TestDygraphToStaticCode(unittest.TestCase): print(code) self.assertEqual( answer.replace('\n', '').replace(' ', ''), - code.replace('\n', '').replace(' ', '')) + code.replace('\n', '').replace(' ', ''), + ) class TestEnableDeclarative(unittest.TestCase): - def setUp(self): self.x = np.random.randn(30, 10, 32).astype('float32') self.weight = np.random.randn(32, 64).astype('float32') @@ -240,16 +252,20 @@ class TestEnableDeclarative(unittest.TestCase): self.program_translator.enable(True) with fluid.dygraph.guard(): static_output = self.program_translator.get_output( - simple_func, self.x, self.weight) + simple_func, self.x, self.weight + ) self.program_translator.enable(False) with fluid.dygraph.guard(): dygraph_output = self.program_translator.get_output( - simple_func, self.x, self.weight) - np.testing.assert_allclose(static_output.numpy(), - dygraph_output.numpy(), - rtol=1e-05, - atol=1e-4) + simple_func, self.x, self.weight + ) + np.testing.assert_allclose( + static_output.numpy(), + dygraph_output.numpy(), + rtol=1e-05, + atol=1e-4, + ) def test_enable_disable_get_func(self): @@ -266,14 +282,18 @@ class TestEnableDeclarative(unittest.TestCase): self.assertTrue(callable(dygraph_func)) dygraph_output = dygraph_func(self.x, self.weight) self.assertTrue( - isinstance(dygraph_output, - (fluid.core.VarBase, fluid.core.eager.Tensor))) + isinstance( + dygraph_output, + (fluid.core.VarBase, fluid.core.eager.Tensor), + ) + ) def test_enable_disable_get_program(self): self.program_translator.enable(True) static_output = self.program_translator.get_program( - simple_func, self.x, self.weight) + simple_func, self.x, self.weight + ) self.assertTrue(isinstance(static_output, tuple)) self.assertEqual(len(static_output), 4) self.assertTrue(isinstance(static_output[0], fluid.Program)) @@ -288,10 +308,14 @@ class TestEnableDeclarative(unittest.TestCase): self.program_translator.enable(False) with fluid.dygraph.guard(): dygraph_output = self.program_translator.get_program( - simple_func, self.x, self.weight) + simple_func, self.x, self.weight + ) self.assertTrue( - isinstance(dygraph_output, - (fluid.core.VarBase, fluid.core.eager.Tensor))) + isinstance( + dygraph_output, + (fluid.core.VarBase, fluid.core.eager.Tensor), + ) + ) def test_enable_disable_declarative(self): @@ -302,14 +326,15 @@ class TestEnableDeclarative(unittest.TestCase): self.program_translator.enable(False) with fluid.dygraph.guard(): dygraph_output = decorated_simple_func(self.x, self.weight) - np.testing.assert_allclose(static_output.numpy(), - dygraph_output.numpy(), - rtol=1e-05, - atol=1e-4) + np.testing.assert_allclose( + static_output.numpy(), + dygraph_output.numpy(), + rtol=1e-05, + atol=1e-4, + ) class Net(fluid.dygraph.layers.Layer): - def __init__(self): super(Net, self).__init__() @@ -318,7 +343,6 @@ class Net(fluid.dygraph.layers.Layer): class TestErrorWithInitFromStaticMode(unittest.TestCase): - def setUp(self): self.program_translator = ProgramTranslator() self.x = np.random.randn(10, 32).astype('float32') @@ -329,17 +353,18 @@ class TestErrorWithInitFromStaticMode(unittest.TestCase): net = Net() self.program_translator.enable(True) - with self.assertRaisesRegexp(RuntimeError, - "only available in dynamic mode"): + with self.assertRaisesRegexp( + RuntimeError, "only available in dynamic mode" + ): self.program_translator.get_output(net.forward, self.x) - with self.assertRaisesRegexp(RuntimeError, - "only available in dynamic mode"): + with self.assertRaisesRegexp( + RuntimeError, "only available in dynamic mode" + ): self.program_translator.get_program(net.forward, self.x) class SwitchModeNet(paddle.nn.Layer): - def __init__(self): super(SwitchModeNet, self).__init__() @@ -358,7 +383,6 @@ def switch_mode_funciton(): class TestFunctionTrainEvalMode(unittest.TestCase): - def test_switch_mode(self): paddle.disable_static() switch_mode_funciton.eval() @@ -388,7 +412,6 @@ class TestFunctionTrainEvalMode(unittest.TestCase): class TestIfElseEarlyReturn(unittest.TestCase): - def test_ifelse_early_return1(self): answer = np.zeros([2, 2]) + 1 static_func = paddle.jit.to_static(dyfunc_with_if_else_early_return1) @@ -403,7 +426,6 @@ class TestIfElseEarlyReturn(unittest.TestCase): class TestRemoveCommentInDy2St(unittest.TestCase): - def func_with_comment(self): # Comment1 x = paddle.to_tensor([1, 2, 3]) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm.py index b37d4af6b39e9c9d72aa55ea4031eb4f48063cff..12150a3d5a0380c3b638e8852598c087165cab60 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm.py @@ -32,13 +32,9 @@ program_translator = ProgramTranslator() class SimpleLSTMRNN(fluid.Layer): - - def __init__(self, - hidden_size, - num_steps, - num_layers=2, - init_scale=0.1, - dropout=None): + def __init__( + self, hidden_size, num_steps, num_layers=2, init_scale=0.1, dropout=None + ): super(SimpleLSTMRNN, self).__init__() self._hidden_size = hidden_size self._num_layers = num_layers @@ -57,19 +53,26 @@ class SimpleLSTMRNN(fluid.Layer): weight_1 = self.create_parameter( attr=fluid.ParamAttr( initializer=fluid.initializer.UniformInitializer( - low=-self._init_scale, high=self._init_scale)), + low=-self._init_scale, high=self._init_scale + ) + ), shape=[self._hidden_size * 2, self._hidden_size * 4], dtype="float32", default_initializer=fluid.initializer.UniformInitializer( - low=-self._init_scale, high=self._init_scale)) + low=-self._init_scale, high=self._init_scale + ), + ) self.weight_1_arr.append(self.add_parameter('w_%d' % i, weight_1)) bias_1 = self.create_parameter( attr=fluid.ParamAttr( initializer=fluid.initializer.UniformInitializer( - low=-self._init_scale, high=self._init_scale)), + low=-self._init_scale, high=self._init_scale + ) + ), shape=[self._hidden_size * 4], dtype="float32", - default_initializer=fluid.initializer.Constant(0.0)) + default_initializer=fluid.initializer.Constant(0.0), + ) self.bias_arr.append(self.add_parameter('b_%d' % i, bias_1)) def forward(self, input_embedding, init_hidden=None, init_cell=None): @@ -93,11 +96,12 @@ class SimpleLSTMRNN(fluid.Layer): gate_input = fluid.layers.matmul(x=nn, y=weight_1) gate_input = fluid.layers.elementwise_add(gate_input, bias) - i, j, f, o = fluid.layers.split(gate_input, - num_or_sections=4, - dim=-1) + i, j, f, o = fluid.layers.split( + gate_input, num_or_sections=4, dim=-1 + ) c = pre_cell * fluid.layers.sigmoid(f) + fluid.layers.sigmoid( - i) * fluid.layers.tanh(j) + i + ) * fluid.layers.tanh(j) m = fluid.layers.tanh(c) * fluid.layers.sigmoid(o) hidden_array[k] = m cell_array[k] = c @@ -107,31 +111,36 @@ class SimpleLSTMRNN(fluid.Layer): step_input = fluid.layers.dropout( step_input, dropout_prob=self._dropout, - dropout_implementation='upscale_in_train') + dropout_implementation='upscale_in_train', + ) res.append(step_input) real_res = fluid.layers.concat(res, 1) real_res = fluid.layers.reshape( - real_res, [-1, self._num_steps, self._hidden_size]) + real_res, [-1, self._num_steps, self._hidden_size] + ) last_hidden = fluid.layers.concat(hidden_array, 1) last_hidden = fluid.layers.reshape( - last_hidden, shape=[-1, self._num_layers, self._hidden_size]) + last_hidden, shape=[-1, self._num_layers, self._hidden_size] + ) last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2]) last_cell = fluid.layers.concat(cell_array, 1) last_cell = fluid.layers.reshape( - last_cell, shape=[-1, self._num_layers, self._hidden_size]) + last_cell, shape=[-1, self._num_layers, self._hidden_size] + ) last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2]) return real_res, last_hidden, last_cell class PtbModel(fluid.Layer): - - def __init__(self, - hidden_size, - vocab_size, - num_layers=2, - num_steps=20, - init_scale=0.1, - dropout=None): + def __init__( + self, + hidden_size, + vocab_size, + num_layers=2, + num_steps=20, + init_scale=0.1, + dropout=None, + ): super(PtbModel, self).__init__() self.hidden_size = hidden_size self.vocab_size = vocab_size @@ -139,11 +148,13 @@ class PtbModel(fluid.Layer): self.num_layers = num_layers self.num_steps = num_steps self.dropout = dropout - self.simple_lstm_rnn = SimpleLSTMRNN(hidden_size, - num_steps, - num_layers=num_layers, - init_scale=init_scale, - dropout=dropout) + self.simple_lstm_rnn = SimpleLSTMRNN( + hidden_size, + num_steps, + num_layers=num_layers, + init_scale=init_scale, + dropout=dropout, + ) self.embedding = Embedding( size=[vocab_size, hidden_size], dtype='float32', @@ -151,19 +162,26 @@ class PtbModel(fluid.Layer): param_attr=fluid.ParamAttr( name='embedding_para', initializer=fluid.initializer.UniformInitializer( - low=-init_scale, high=init_scale))) + low=-init_scale, high=init_scale + ), + ), + ) self.softmax_weight = self.create_parameter( attr=fluid.ParamAttr(), shape=[self.hidden_size, self.vocab_size], dtype="float32", default_initializer=fluid.initializer.UniformInitializer( - low=-self.init_scale, high=self.init_scale)) + low=-self.init_scale, high=self.init_scale + ), + ) self.softmax_bias = self.create_parameter( attr=fluid.ParamAttr(), shape=[self.vocab_size], dtype="float32", default_initializer=fluid.initializer.UniformInitializer( - low=-self.init_scale, high=self.init_scale)) + low=-self.init_scale, high=self.init_scale + ), + ) def build_once(self, input, label, init_hidden, init_cell): pass @@ -172,29 +190,34 @@ class PtbModel(fluid.Layer): def forward(self, input, label, init_hidden, init_cell): init_h = fluid.layers.reshape( - init_hidden, shape=[self.num_layers, -1, self.hidden_size]) + init_hidden, shape=[self.num_layers, -1, self.hidden_size] + ) init_c = fluid.layers.reshape( - init_cell, shape=[self.num_layers, -1, self.hidden_size]) + init_cell, shape=[self.num_layers, -1, self.hidden_size] + ) x_emb = self.embedding(input) x_emb = fluid.layers.reshape( - x_emb, shape=[-1, self.num_steps, self.hidden_size]) + x_emb, shape=[-1, self.num_steps, self.hidden_size] + ) if self.dropout is not None and self.dropout > 0.0: x_emb = fluid.layers.dropout( x_emb, dropout_prob=self.dropout, - dropout_implementation='upscale_in_train') + dropout_implementation='upscale_in_train', + ) rnn_out, last_hidden, last_cell = self.simple_lstm_rnn( - x_emb, init_h, init_c) + x_emb, init_h, init_c + ) projection = fluid.layers.matmul(rnn_out, self.softmax_weight) projection = fluid.layers.elementwise_add(projection, self.softmax_bias) - loss = fluid.layers.softmax_with_cross_entropy(logits=projection, - label=label, - soft_label=False) + loss = fluid.layers.softmax_with_cross_entropy( + logits=projection, label=label, soft_label=False + ) loss = fluid.layers.reshape(loss, shape=[-1, self.num_steps]) loss = fluid.layers.reduce_mean(loss, dim=[0]) loss = fluid.layers.reduce_sum(loss) @@ -221,15 +244,18 @@ def train(place): with fluid.dygraph.guard(place): paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) - ptb_model = PtbModel(hidden_size=hidden_size, - vocab_size=vocab_size, - num_layers=num_layers, - num_steps=num_steps, - init_scale=init_scale, - dropout=dropout) - - sgd = SGDOptimizer(learning_rate=1e-3, - parameter_list=ptb_model.parameters()) + ptb_model = PtbModel( + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale, + dropout=dropout, + ) + + sgd = SGDOptimizer( + learning_rate=1e-3, parameter_list=ptb_model.parameters() + ) for epoch_id in range(max_epoch): @@ -237,10 +263,12 @@ def train(place): iters = 0.0 total_sample = 0 - init_hidden_data = np.zeros((num_layers, batch_size, hidden_size), - dtype='float32') - init_cell_data = np.zeros((num_layers, batch_size, hidden_size), - dtype='float32') + init_hidden_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32' + ) + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32' + ) init_hidden = to_variable(init_hidden_data) init_cell = to_variable(init_cell_data) @@ -256,7 +284,8 @@ def train(place): y = to_variable(y_data) dy_loss, last_hidden, last_cell = ptb_model( - x, y, init_hidden, init_cell) + x, y, init_hidden, init_cell + ) out_loss = dy_loss.numpy() dy_loss.backward() @@ -269,15 +298,21 @@ def train(place): if step_id % PRINT_STEP == 0: if step_id == 0: logging.info( - "epoch %d | step %d, loss %0.3f" % - (epoch_id, step_id, total_loss / total_sample)) + "epoch %d | step %d, loss %0.3f" + % (epoch_id, step_id, total_loss / total_sample) + ) avg_batch_time = time.time() else: speed = PRINT_STEP / (time.time() - avg_batch_time) logging.info( "epoch %d | step %d, loss %0.3f, speed %.3f steps/s" - % (epoch_id, step_id, total_loss / total_sample, - speed)) + % ( + epoch_id, + step_id, + total_loss / total_sample, + speed, + ) + ) avg_batch_time = time.time() return out_loss, last_hidden.numpy(), last_cell.numpy() @@ -294,10 +329,12 @@ def train_static(place): class TestPtb(unittest.TestCase): - def setUp(self): - self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() \ + self.place = ( + fluid.CUDAPlace(0) + if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + ) def test_check_result(self): loss_1, hidden_1, cell_1 = train_static(self.place) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm_v2.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm_v2.py index ac3c40b599afacc9e7a0c4b4da74fb37b0b272fc..56b0a44bdb538ea75abdd4af4879a6baba1180d9 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm_v2.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm_v2.py @@ -26,13 +26,9 @@ program_translator = paddle.jit.ProgramTranslator() class SimpleLSTMRNN(paddle.nn.Layer): - - def __init__(self, - hidden_size, - num_steps, - num_layers=2, - init_scale=0.1, - dropout=None): + def __init__( + self, hidden_size, num_steps, num_layers=2, init_scale=0.1, dropout=None + ): super(SimpleLSTMRNN, self).__init__() self._hidden_size = hidden_size self._num_layers = num_layers @@ -49,19 +45,28 @@ class SimpleLSTMRNN(paddle.nn.Layer): for i in range(self._num_layers): weight_1 = self.create_parameter( - attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Uniform( - low=-self._init_scale, high=self._init_scale)), + attr=paddle.ParamAttr( + initializer=paddle.nn.initializer.Uniform( + low=-self._init_scale, high=self._init_scale + ) + ), shape=[self._hidden_size * 2, self._hidden_size * 4], dtype="float32", default_initializer=paddle.nn.initializer.Uniform( - low=-self._init_scale, high=self._init_scale)) + low=-self._init_scale, high=self._init_scale + ), + ) self.weight_1_arr.append(self.add_parameter('w_%d' % i, weight_1)) bias_1 = self.create_parameter( - attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Uniform( - low=-self._init_scale, high=self._init_scale)), + attr=paddle.ParamAttr( + initializer=paddle.nn.initializer.Uniform( + low=-self._init_scale, high=self._init_scale + ) + ), shape=[self._hidden_size * 4], dtype="float32", - default_initializer=paddle.nn.initializer.Constant(0.0)) + default_initializer=paddle.nn.initializer.Constant(0.0), + ) self.bias_arr.append(self.add_parameter('b_%d' % i, bias_1)) def forward(self, input_embedding, init_hidden=None, init_cell=None): @@ -85,11 +90,12 @@ class SimpleLSTMRNN(paddle.nn.Layer): gate_input = paddle.matmul(x=nn, y=weight_1) gate_input = paddle.add(x=gate_input, y=bias) - i, j, f, o = paddle.split(x=gate_input, - num_or_sections=4, - axis=-1) + i, j, f, o = paddle.split( + x=gate_input, num_or_sections=4, axis=-1 + ) c = pre_cell * paddle.nn.functional.sigmoid( - f) + paddle.nn.functional.sigmoid(i) * paddle.tanh(j) + f + ) + paddle.nn.functional.sigmoid(i) * paddle.tanh(j) m = paddle.tanh(c) * paddle.nn.functional.sigmoid(o) hidden_array[k] = m cell_array[k] = c @@ -99,31 +105,36 @@ class SimpleLSTMRNN(paddle.nn.Layer): step_input = paddle.nn.functional.dropout( step_input, dropout_prob=self._dropout, - dropout_implementation='upscale_in_train') + dropout_implementation='upscale_in_train', + ) res.append(step_input) real_res = paddle.concat(x=res, axis=1) - real_res = paddle.reshape(real_res, - [-1, self._num_steps, self._hidden_size]) + real_res = paddle.reshape( + real_res, [-1, self._num_steps, self._hidden_size] + ) last_hidden = paddle.concat(x=hidden_array, axis=1) last_hidden = paddle.reshape( - last_hidden, shape=[-1, self._num_layers, self._hidden_size]) + last_hidden, shape=[-1, self._num_layers, self._hidden_size] + ) last_hidden = paddle.transpose(x=last_hidden, perm=[1, 0, 2]) last_cell = paddle.concat(x=cell_array, axis=1) last_cell = paddle.reshape( - last_cell, shape=[-1, self._num_layers, self._hidden_size]) + last_cell, shape=[-1, self._num_layers, self._hidden_size] + ) last_cell = paddle.transpose(x=last_cell, perm=[1, 0, 2]) return real_res, last_hidden, last_cell class PtbModel(paddle.nn.Layer): - - def __init__(self, - hidden_size, - vocab_size, - num_layers=2, - num_steps=20, - init_scale=0.1, - dropout=None): + def __init__( + self, + hidden_size, + vocab_size, + num_layers=2, + num_steps=20, + init_scale=0.1, + dropout=None, + ): super(PtbModel, self).__init__() self.hidden_size = hidden_size self.vocab_size = vocab_size @@ -131,31 +142,40 @@ class PtbModel(paddle.nn.Layer): self.num_layers = num_layers self.num_steps = num_steps self.dropout = dropout - self.simple_lstm_rnn = SimpleLSTMRNN(hidden_size, - num_steps, - num_layers=num_layers, - init_scale=init_scale, - dropout=dropout) + self.simple_lstm_rnn = SimpleLSTMRNN( + hidden_size, + num_steps, + num_layers=num_layers, + init_scale=init_scale, + dropout=dropout, + ) self.embedding = paddle.fluid.dygraph.nn.Embedding( size=[vocab_size, hidden_size], dtype='float32', is_sparse=False, param_attr=paddle.ParamAttr( name='embedding_para', - initializer=paddle.nn.initializer.Uniform(low=-init_scale, - high=init_scale))) + initializer=paddle.nn.initializer.Uniform( + low=-init_scale, high=init_scale + ), + ), + ) self.softmax_weight = self.create_parameter( attr=paddle.ParamAttr(), shape=[self.hidden_size, self.vocab_size], dtype="float32", default_initializer=paddle.nn.initializer.Uniform( - low=-self.init_scale, high=self.init_scale)) + low=-self.init_scale, high=self.init_scale + ), + ) self.softmax_bias = self.create_parameter( attr=paddle.ParamAttr(), shape=[self.vocab_size], dtype="float32", default_initializer=paddle.nn.initializer.Uniform( - low=-self.init_scale, high=self.init_scale)) + low=-self.init_scale, high=self.init_scale + ), + ) def build_once(self, input, label, init_hidden, init_cell): pass @@ -163,29 +183,35 @@ class PtbModel(paddle.nn.Layer): @paddle.jit.to_static def forward(self, input, label, init_hidden, init_cell): - init_h = paddle.reshape(init_hidden, - shape=[self.num_layers, -1, self.hidden_size]) + init_h = paddle.reshape( + init_hidden, shape=[self.num_layers, -1, self.hidden_size] + ) - init_c = paddle.reshape(init_cell, - shape=[self.num_layers, -1, self.hidden_size]) + init_c = paddle.reshape( + init_cell, shape=[self.num_layers, -1, self.hidden_size] + ) x_emb = self.embedding(input) - x_emb = paddle.reshape(x_emb, - shape=[-1, self.num_steps, self.hidden_size]) + x_emb = paddle.reshape( + x_emb, shape=[-1, self.num_steps, self.hidden_size] + ) if self.dropout is not None and self.dropout > 0.0: x_emb = paddle.nn.functional.dropout( x_emb, dropout_prob=self.dropout, - dropout_implementation='upscale_in_train') + dropout_implementation='upscale_in_train', + ) rnn_out, last_hidden, last_cell = self.simple_lstm_rnn( - x_emb, init_h, init_c) + x_emb, init_h, init_c + ) projection = paddle.matmul(x=rnn_out, y=self.softmax_weight) projection = paddle.add(x=projection, y=self.softmax_bias) loss = paddle.nn.functional.softmax_with_cross_entropy( - logits=projection, label=label, soft_label=False) + logits=projection, label=label, soft_label=False + ) loss = paddle.reshape(loss, shape=[-1, self.num_steps]) loss = paddle.mean(loss, axis=[0]) loss = paddle.fluid.layers.reduce_sum(loss) @@ -212,15 +238,18 @@ def train(place): paddle.disable_static(place) paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) - ptb_model = PtbModel(hidden_size=hidden_size, - vocab_size=vocab_size, - num_layers=num_layers, - num_steps=num_steps, - init_scale=init_scale, - dropout=dropout) - - sgd = paddle.optimizer.SGD(learning_rate=1e-3, - parameters=ptb_model.parameters()) + ptb_model = PtbModel( + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale, + dropout=dropout, + ) + + sgd = paddle.optimizer.SGD( + learning_rate=1e-3, parameters=ptb_model.parameters() + ) for epoch_id in range(max_epoch): @@ -228,19 +257,19 @@ def train(place): iters = 0.0 total_sample = 0 - init_hidden_data = np.zeros((num_layers, batch_size, hidden_size), - dtype='float32') - init_cell_data = np.zeros((num_layers, batch_size, hidden_size), - dtype='float32') - - init_hidden = paddle.to_tensor(data=init_hidden_data, - dtype=None, - place=None, - stop_gradient=True) - init_cell = paddle.to_tensor(data=init_cell_data, - dtype=None, - place=None, - stop_gradient=True) + init_hidden_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32' + ) + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32' + ) + + init_hidden = paddle.to_tensor( + data=init_hidden_data, dtype=None, place=None, stop_gradient=True + ) + init_cell = paddle.to_tensor( + data=init_cell_data, dtype=None, place=None, stop_gradient=True + ) for step_id in range(batch_num): x_data = np.arange(12).reshape(4, 3).astype('int64') y_data = np.arange(1, 13).reshape(4, 3).astype('int64') @@ -249,17 +278,16 @@ def train(place): x_data = x_data.reshape((-1, num_steps, 1)) y_data = y_data.reshape((-1, num_steps, 1)) - x = paddle.to_tensor(data=x_data, - dtype=None, - place=None, - stop_gradient=True) - y = paddle.to_tensor(data=y_data, - dtype=None, - place=None, - stop_gradient=True) - - dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden, - init_cell) + x = paddle.to_tensor( + data=x_data, dtype=None, place=None, stop_gradient=True + ) + y = paddle.to_tensor( + data=y_data, dtype=None, place=None, stop_gradient=True + ) + + dy_loss, last_hidden, last_cell = ptb_model( + x, y, init_hidden, init_cell + ) out_loss = dy_loss.numpy() dy_loss.backward() @@ -271,14 +299,17 @@ def train(place): total_sample += 1 if step_id % PRINT_STEP == 0: if step_id == 0: - logging.info("epoch %d | step %d, loss %0.3f" % - (epoch_id, step_id, total_loss / total_sample)) + logging.info( + "epoch %d | step %d, loss %0.3f" + % (epoch_id, step_id, total_loss / total_sample) + ) avg_batch_time = time.time() else: speed = PRINT_STEP / (time.time() - avg_batch_time) logging.info( - "epoch %d | step %d, loss %0.3f, speed %.3f steps/s" % - (epoch_id, step_id, total_loss / total_sample, speed)) + "epoch %d | step %d, loss %0.3f, speed %.3f steps/s" + % (epoch_id, step_id, total_loss / total_sample, speed) + ) avg_batch_time = time.time() ret = out_loss, last_hidden.numpy(), last_cell.numpy() @@ -297,10 +328,12 @@ def train_static(place): class TestPtb(unittest.TestCase): - def setUp(self): - self.place = paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_check_result(self): loss_1, hidden_1, cell_1 = train_static(self.place) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py index 99361fa3949b3a4e43d0c1ffa35c2ccd90bcbff9..d4bcd98e6449f6763ba6c0d0cd24b25439004a72 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py @@ -29,7 +29,6 @@ program_translator = ProgramTranslator() class Policy(Layer): - def __init__(self): super(Policy, self).__init__() @@ -74,17 +73,18 @@ def train(args, place, to_static): eps = np.finfo(np.float32).eps.item() optimizer = fluid.optimizer.AdamaxOptimizer( - learning_rate=1e-2, parameter_list=policy.parameters()) + learning_rate=1e-2, parameter_list=policy.parameters() + ) def get_mean_and_std(values=[]): - n = 0. - s = 0. + n = 0.0 + s = 0.0 for val in values: s += val n += 1 mean = s / n - std = 0. + std = 0.0 for val in values: std += (val - mean) * (val - mean) std /= n @@ -99,14 +99,14 @@ def train(args, place, to_static): while idx < len(probs) and sample > probs[idx]: sample -= probs[idx] idx += 1 - mask = [0.] * len(probs) - mask[idx] = 1. + mask = [0.0] * len(probs) + mask[idx] = 1.0 return idx, np.array([mask]).astype("float32") def choose_best_action(probs): idx = 0 if probs[0] > probs[1] else 1 - mask = [1., 0.] if idx == 0 else [0., 1.] + mask = [1.0, 0.0] if idx == 0 else [0.0, 1.0] return idx, np.array([mask]).astype("float32") @@ -189,9 +189,10 @@ def train(args, place, to_static): running_reward = 0.05 * ep_reward + (1 - 0.05) * running_reward if i_episode % args.log_interval == 0: print( - 'Episode {}\tLast reward: {:.2f}\tAverage reward: {:.2f}\t loss_probs: {}' - .format(i_episode, ep_reward, running_reward, - loss.numpy()[0])) + 'Episode {}\tLast reward: {:.2f}\tAverage reward: {:.2f}\t loss_probs: {}'.format( + i_episode, ep_reward, running_reward, loss.numpy()[0] + ) + ) if i_episode > args.train_step: break @@ -200,10 +201,12 @@ def train(args, place, to_static): class TestDeclarative(unittest.TestCase): - def setUp(self): - self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() \ + self.place = ( + fluid.CUDAPlace(0) + if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + ) self.args = Args() def test_train(self): diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet.py index 86d215faadd320eebca34b7fc65b57c707aa5dbc..a1f85bd3da3f1546997dfcec19c00f08e64a56de 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet.py @@ -36,8 +36,9 @@ l2_decay = 1e-4 # NOTE: Reduce batch_size from 8 to 2 to avoid unittest timeout. batch_size = 2 epoch_num = 1 -place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() \ - else fluid.CPUPlace() +place = ( + fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() +) program_translator = ProgramTranslator() @@ -50,30 +51,34 @@ def optimizer_setting(parameter_list=None): learning_rate=base_lr, momentum=momentum_rate, regularization=fluid.regularizer.L2Decay(l2_decay), - parameter_list=parameter_list) + parameter_list=parameter_list, + ) return optimizer class ConvBNLayer(fluid.dygraph.Layer): - - def __init__(self, - num_channels, - num_filters, - filter_size, - stride=1, - groups=1, - act=None): + def __init__( + self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + ): super(ConvBNLayer, self).__init__() - self._conv = Conv2D(num_channels=num_channels, - num_filters=num_filters, - filter_size=filter_size, - stride=stride, - padding=(filter_size - 1) // 2, - groups=groups, - act=None, - bias_attr=False) + self._conv = Conv2D( + num_channels=num_channels, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + act=None, + bias_attr=False, + ) self._batch_norm = BatchNorm(num_filters, act=act) @@ -85,29 +90,36 @@ class ConvBNLayer(fluid.dygraph.Layer): class BottleneckBlock(fluid.dygraph.Layer): - def __init__(self, num_channels, num_filters, stride, shortcut=True): super(BottleneckBlock, self).__init__() - self.conv0 = ConvBNLayer(num_channels=num_channels, - num_filters=num_filters, - filter_size=1, - act='relu') - self.conv1 = ConvBNLayer(num_channels=num_filters, - num_filters=num_filters, - filter_size=3, - stride=stride, - act='relu') - self.conv2 = ConvBNLayer(num_channels=num_filters, - num_filters=num_filters * 4, - filter_size=1, - act=None) + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + act='relu', + ) + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + stride=stride, + act='relu', + ) + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters * 4, + filter_size=1, + act=None, + ) if not shortcut: - self.short = ConvBNLayer(num_channels=num_channels, - num_filters=num_filters * 4, - filter_size=1, - stride=stride) + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters * 4, + filter_size=1, + stride=stride, + ) self.shortcut = shortcut @@ -125,20 +137,23 @@ class BottleneckBlock(fluid.dygraph.Layer): y = fluid.layers.elementwise_add(x=short, y=conv2) - layer_helper = fluid.layer_helper.LayerHelper(self.full_name(), - act='relu') + layer_helper = fluid.layer_helper.LayerHelper( + self.full_name(), act='relu' + ) return layer_helper.append_activation(y) class ResNet(fluid.dygraph.Layer): - def __init__(self, layers=50, class_dim=102): super(ResNet, self).__init__() self.layers = layers supported_layers = [50, 101, 152] - assert layers in supported_layers, \ - "supported layers are {} but input layer is {}".format(supported_layers, layers) + assert ( + layers in supported_layers + ), "supported layers are {} but input layer is {}".format( + supported_layers, layers + ) if layers == 50: depth = [3, 4, 6, 3] @@ -149,15 +164,12 @@ class ResNet(fluid.dygraph.Layer): num_channels = [64, 256, 512, 1024] num_filters = [64, 128, 256, 512] - self.conv = ConvBNLayer(num_channels=3, - num_filters=64, - filter_size=7, - stride=2, - act='relu') - self.pool2d_max = Pool2D(pool_size=3, - pool_stride=2, - pool_padding=1, - pool_type='max') + self.conv = ConvBNLayer( + num_channels=3, num_filters=64, filter_size=7, stride=2, act='relu' + ) + self.pool2d_max = Pool2D( + pool_size=3, pool_stride=2, pool_padding=1, pool_type='max' + ) self.bottleneck_block_list = [] for block in range(len(depth)): @@ -165,17 +177,21 @@ class ResNet(fluid.dygraph.Layer): for i in range(depth[block]): bottleneck_block = self.add_sublayer( 'bb_%d_%d' % (block, i), - BottleneckBlock(num_channels=num_channels[block] - if i == 0 else num_filters[block] * 4, - num_filters=num_filters[block], - stride=2 if i == 0 and block != 0 else 1, - shortcut=shortcut)) + BottleneckBlock( + num_channels=num_channels[block] + if i == 0 + else num_filters[block] * 4, + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + shortcut=shortcut, + ), + ) self.bottleneck_block_list.append(bottleneck_block) shortcut = True - self.pool2d_avg = Pool2D(pool_size=7, - pool_type='avg', - global_pooling=True) + self.pool2d_avg = Pool2D( + pool_size=7, pool_type='avg', global_pooling=True + ) self.pool2d_avg_output = num_filters[len(num_filters) - 1] * 4 * 1 * 1 @@ -186,7 +202,9 @@ class ResNet(fluid.dygraph.Layer): class_dim, act='softmax', param_attr=fluid.param_attr.ParamAttr( - initializer=fluid.initializer.Uniform(-stdv, stdv))) + initializer=fluid.initializer.Uniform(-stdv, stdv) + ), + ) def forward(self, inputs): y = self.conv(inputs) @@ -201,7 +219,6 @@ class ResNet(fluid.dygraph.Layer): def reader_decorator(reader): - def __reader__(): for item in reader(): img = np.array(item[0]).astype('float32').reshape(3, 224, 224) @@ -212,15 +229,15 @@ def reader_decorator(reader): class ResNetHelper: - def __init__(self): self.temp_dir = tempfile.TemporaryDirectory() self.model_save_dir = os.path.join(self.temp_dir.name, 'inference') self.model_save_prefix = os.path.join(self.model_save_dir, 'resnet') self.model_filename = 'resnet' + INFER_MODEL_SUFFIX self.params_filename = 'resnet' + INFER_PARAMS_SUFFIX - self.dy_state_dict_save_path = os.path.join(self.temp_dir.name, - 'resnet.dygraph') + self.dy_state_dict_save_path = os.path.join( + self.temp_dir.name, 'resnet.dygraph' + ) def __del__(self): self.temp_dir.cleanup() @@ -234,18 +251,21 @@ class ResNetHelper: paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) - train_reader = paddle.batch(reader_decorator( - paddle.dataset.flowers.train(use_xmap=False)), - batch_size=batch_size, - drop_last=True) - data_loader = fluid.io.DataLoader.from_generator(capacity=5, - iterable=True) + train_reader = paddle.batch( + reader_decorator(paddle.dataset.flowers.train(use_xmap=False)), + batch_size=batch_size, + drop_last=True, + ) + data_loader = fluid.io.DataLoader.from_generator( + capacity=5, iterable=True + ) data_loader.set_sample_list_generator(train_reader) resnet = ResNet() if to_static: - resnet = paddle.jit.to_static(resnet, - build_strategy=build_strategy) + resnet = paddle.jit.to_static( + resnet, build_strategy=build_strategy + ) optimizer = optimizer_setting(parameter_list=resnet.parameters()) for epoch in range(epoch_num): @@ -261,12 +281,12 @@ class ResNetHelper: pred = resnet(img) loss = fluid.layers.cross_entropy(input=pred, label=label) avg_loss = paddle.mean(x=loss) - acc_top1 = fluid.layers.accuracy(input=pred, - label=label, - k=1) - acc_top5 = fluid.layers.accuracy(input=pred, - label=label, - k=5) + acc_top1 = fluid.layers.accuracy( + input=pred, label=label, k=1 + ) + acc_top5 = fluid.layers.accuracy( + input=pred, label=label, k=5 + ) avg_loss.backward() optimizer.minimize(avg_loss) @@ -279,17 +299,27 @@ class ResNetHelper: end_time = time.time() if batch_id % 2 == 0: - print( "epoch %d | batch step %d, loss %0.3f, acc1 %0.3f, acc5 %0.3f, time %f" % \ - ( epoch, batch_id, total_loss.numpy() / total_sample, \ - total_acc1.numpy() / total_sample, total_acc5.numpy() / total_sample, end_time-start_time)) + print( + "epoch %d | batch step %d, loss %0.3f, acc1 %0.3f, acc5 %0.3f, time %f" + % ( + epoch, + batch_id, + total_loss.numpy() / total_sample, + total_acc1.numpy() / total_sample, + total_acc5.numpy() / total_sample, + end_time - start_time, + ) + ) if batch_id == 10: if to_static: - fluid.dygraph.jit.save(resnet, - self.model_save_prefix) + fluid.dygraph.jit.save( + resnet, self.model_save_prefix + ) else: fluid.dygraph.save_dygraph( resnet.state_dict(), - self.dy_state_dict_save_path) + self.dy_state_dict_save_path, + ) # avoid dataloader throw abort signaal data_loader._reset() break @@ -302,7 +332,8 @@ class ResNetHelper: resnet = ResNet() model_dict, _ = fluid.dygraph.load_dygraph( - self.dy_state_dict_save_path) + self.dy_state_dict_save_path + ) resnet.set_dict(model_dict) resnet.eval() @@ -313,15 +344,22 @@ class ResNetHelper: def predict_static(self, data): paddle.enable_static() exe = fluid.Executor(place) - [inference_program, feed_target_names, fetch_targets - ] = fluid.io.load_inference_model(self.model_save_dir, - executor=exe, - model_filename=self.model_filename, - params_filename=self.params_filename) - - pred_res = exe.run(inference_program, - feed={feed_target_names[0]: data}, - fetch_list=fetch_targets) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = fluid.io.load_inference_model( + self.model_save_dir, + executor=exe, + model_filename=self.model_filename, + params_filename=self.params_filename, + ) + + pred_res = exe.run( + inference_program, + feed={feed_target_names[0]: data}, + fetch_list=fetch_targets, + ) return pred_res[0] @@ -335,14 +373,17 @@ class ResNetHelper: return pred_res.numpy() def predict_analysis_inference(self, data): - output = PredictorTools(self.model_save_dir, self.model_filename, - self.params_filename, [data]) - out, = output() + output = PredictorTools( + self.model_save_dir, + self.model_filename, + self.params_filename, + [data], + ) + (out,) = output() return out class TestResnet(unittest.TestCase): - def setUp(self): self.resnet_helper = ResNetHelper() @@ -360,19 +401,24 @@ class TestResnet(unittest.TestCase): dy_pre, st_pre, rtol=1e-05, - err_msg='dy_pre:\n {}\n, st_pre: \n{}.'.format(dy_pre, st_pre)) + err_msg='dy_pre:\n {}\n, st_pre: \n{}.'.format(dy_pre, st_pre), + ) np.testing.assert_allclose( dy_jit_pre, st_pre, rtol=1e-05, err_msg='dy_jit_pre:\n {}\n, st_pre: \n{}.'.format( - dy_jit_pre, st_pre)) + dy_jit_pre, st_pre + ), + ) np.testing.assert_allclose( predictor_pre, st_pre, rtol=1e-05, err_msg='predictor_pre:\n {}\n, st_pre: \n{}.'.format( - predictor_pre, st_pre)) + predictor_pre, st_pre + ), + ) def test_resnet(self): static_loss = self.train(to_static=True) @@ -382,7 +428,9 @@ class TestResnet(unittest.TestCase): dygraph_loss, rtol=1e-05, err_msg='static_loss: {} \n dygraph_loss: {}'.format( - static_loss, dygraph_loss)) + static_loss, dygraph_loss + ), + ) self.verify_predict() def test_in_static_mode_mkldnn(self): diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet_amp.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet_amp.py index c535b7223db47d4292a8625fd4702c27f19cdfff..aa53f663deb1f5dcb64c95b12cf08f2268c30f28 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet_amp.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet_amp.py @@ -25,8 +25,9 @@ from test_resnet import ResNet, optimizer_setting, SEED # NOTE: Reduce batch_size from 8 to 2 to avoid unittest timeout. batch_size = 2 epoch_num = 1 -place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() \ - else fluid.CPUPlace() +place = ( + fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() +) program_translator = ProgramTranslator() @@ -58,10 +59,13 @@ def train(to_static, build_strategy=None): for batch_id in range(100): start_time = time.time() img = paddle.to_tensor( - np.random.random([batch_size, 3, 224, - 224]).astype('float32')) + np.random.random([batch_size, 3, 224, 224]).astype( + 'float32' + ) + ) label = paddle.to_tensor( - np.random.randint(0, 100, [batch_size, 1], dtype='int64')) + np.random.randint(0, 100, [batch_size, 1], dtype='int64') + ) img.stop_gradient = True label.stop_gradient = True @@ -87,9 +91,17 @@ def train(to_static, build_strategy=None): end_time = time.time() if batch_id % 2 == 0: - print( "epoch %d | batch step %d, loss %0.3f, acc1 %0.3f, acc5 %0.3f, time %f" % \ - ( epoch, batch_id, total_loss.numpy() / total_sample, \ - total_acc1.numpy() / total_sample, total_acc5.numpy() / total_sample, end_time-start_time)) + print( + "epoch %d | batch step %d, loss %0.3f, acc1 %0.3f, acc5 %0.3f, time %f" + % ( + epoch, + batch_id, + total_loss.numpy() / total_sample, + total_acc1.numpy() / total_sample, + total_acc5.numpy() / total_sample, + end_time - start_time, + ) + ) if batch_id == 10: break @@ -97,7 +109,6 @@ def train(to_static, build_strategy=None): class TestResnet(unittest.TestCase): - def train(self, to_static): program_translator.enable(to_static) return train(to_static) @@ -110,7 +121,9 @@ class TestResnet(unittest.TestCase): dygraph_loss, rtol=1e-05, err_msg='static_loss: {} \n dygraph_loss: {}'.format( - static_loss, dygraph_loss)) + static_loss, dygraph_loss + ), + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet_pure_fp16.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet_pure_fp16.py index f4d724826ab7d7e2a7bd3d5aea558067a1b3472b..9951d67c2b1a7a05fe00a2ee61e314815ccccf9c 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet_pure_fp16.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet_pure_fp16.py @@ -46,10 +46,9 @@ def train(to_static, build_strategy=None): optimizer = optimizer_setting(parameter_list=resnet.parameters()) scaler = paddle.amp.GradScaler(init_loss_scaling=1024) - resnet, optimizer = paddle.amp.decorate(models=resnet, - optimizers=optimizer, - level='O2', - save_dtype='float32') + resnet, optimizer = paddle.amp.decorate( + models=resnet, optimizers=optimizer, level='O2', save_dtype='float32' + ) for epoch in range(epoch_num): loss_data = [] @@ -61,16 +60,20 @@ def train(to_static, build_strategy=None): for batch_id in range(100): start_time = time.time() img = paddle.to_tensor( - np.random.random([batch_size, 3, 224, 224]).astype('float32')) + np.random.random([batch_size, 3, 224, 224]).astype('float32') + ) label = paddle.to_tensor( - np.random.randint(0, 100, [batch_size, 1], dtype='int64')) + np.random.randint(0, 100, [batch_size, 1], dtype='int64') + ) img.stop_gradient = True label.stop_gradient = True - with paddle.amp.auto_cast(enable=True, - custom_white_list=None, - custom_black_list=None, - level='O2'): + with paddle.amp.auto_cast( + enable=True, + custom_white_list=None, + custom_black_list=None, + level='O2', + ): pred = resnet(img) loss = fluid.layers.cross_entropy(input=pred, label=label) avg_loss = paddle.mean(x=pred) @@ -90,9 +93,17 @@ def train(to_static, build_strategy=None): end_time = time.time() if batch_id % 2 == 0: - print( "epoch %d | batch step %d, loss %0.3f, acc1 %0.3f, acc5 %0.3f, time %f" % \ - ( epoch, batch_id, total_loss.numpy() / total_sample, \ - total_acc1.numpy() / total_sample, total_acc5.numpy() / total_sample, end_time-start_time)) + print( + "epoch %d | batch step %d, loss %0.3f, acc1 %0.3f, acc5 %0.3f, time %f" + % ( + epoch, + batch_id, + total_loss.numpy() / total_sample, + total_acc1.numpy() / total_sample, + total_acc5.numpy() / total_sample, + end_time - start_time, + ) + ) if batch_id == 10: break @@ -100,7 +111,6 @@ def train(to_static, build_strategy=None): class TestResnet(unittest.TestCase): - def train(self, to_static): program_translator.enable(to_static) build_strategy = paddle.static.BuildStrategy() @@ -120,7 +130,9 @@ class TestResnet(unittest.TestCase): rtol=1e-05, atol=0.001, err_msg='static_loss: {} \n dygraph_loss: {}'.format( - static_loss, dygraph_loss)) + static_loss, dygraph_loss + ), + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet_v2.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet_v2.py index 301da29d6d9e6340ee035dc52c1367eb76184fae..f4f2e8154494c552469d1ec9b29d463b733bcb3c 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet_v2.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet_v2.py @@ -34,8 +34,9 @@ l2_decay = 1e-4 # NOTE: Reduce batch_size from 8 to 2 to avoid unittest timeout. batch_size = 2 epoch_num = 1 -place = paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \ - else paddle.CPUPlace() +place = ( + paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() else paddle.CPUPlace() +) program_translator = paddle.jit.ProgramTranslator() @@ -48,29 +49,33 @@ def optimizer_setting(parameter_list=None): learning_rate=base_lr, momentum=momentum_rate, weight_decay=paddle.regularizer.L2Decay(l2_decay), - parameters=parameter_list) + parameters=parameter_list, + ) return optimizer class ConvBNLayer(paddle.nn.Layer): - - def __init__(self, - num_channels, - num_filters, - filter_size, - stride=1, - groups=1, - act=None): + def __init__( + self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + ): super(ConvBNLayer, self).__init__() - self._conv = paddle.nn.Conv2D(in_channels=num_channels, - out_channels=num_filters, - kernel_size=filter_size, - stride=stride, - padding=(filter_size - 1) // 2, - groups=groups, - bias_attr=False) + self._conv = paddle.nn.Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + bias_attr=False, + ) self._batch_norm = paddle.nn.BatchNorm(num_filters, act=act) @@ -82,29 +87,36 @@ class ConvBNLayer(paddle.nn.Layer): class BottleneckBlock(paddle.nn.Layer): - def __init__(self, num_channels, num_filters, stride, shortcut=True): super(BottleneckBlock, self).__init__() - self.conv0 = ConvBNLayer(num_channels=num_channels, - num_filters=num_filters, - filter_size=1, - act='relu') - self.conv1 = ConvBNLayer(num_channels=num_filters, - num_filters=num_filters, - filter_size=3, - stride=stride, - act='relu') - self.conv2 = ConvBNLayer(num_channels=num_filters, - num_filters=num_filters * 4, - filter_size=1, - act=None) + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + act='relu', + ) + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + stride=stride, + act='relu', + ) + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters * 4, + filter_size=1, + act=None, + ) if not shortcut: - self.short = ConvBNLayer(num_channels=num_channels, - num_filters=num_filters * 4, - filter_size=1, - stride=stride) + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters * 4, + filter_size=1, + stride=stride, + ) self.shortcut = shortcut @@ -122,20 +134,23 @@ class BottleneckBlock(paddle.nn.Layer): y = paddle.add(x=short, y=conv2) - layer_helper = paddle.fluid.layer_helper.LayerHelper(self.full_name(), - act='relu') + layer_helper = paddle.fluid.layer_helper.LayerHelper( + self.full_name(), act='relu' + ) return layer_helper.append_activation(y) class ResNet(paddle.nn.Layer): - def __init__(self, layers=50, class_dim=102): super(ResNet, self).__init__() self.layers = layers supported_layers = [50, 101, 152] - assert layers in supported_layers, \ - "supported layers are {} but input layer is {}".format(supported_layers, layers) + assert ( + layers in supported_layers + ), "supported layers are {} but input layer is {}".format( + supported_layers, layers + ) if layers == 50: depth = [3, 4, 6, 3] @@ -146,15 +161,12 @@ class ResNet(paddle.nn.Layer): num_channels = [64, 256, 512, 1024] num_filters = [64, 128, 256, 512] - self.conv = ConvBNLayer(num_channels=3, - num_filters=64, - filter_size=7, - stride=2, - act='relu') - self.pool2d_max = paddle.fluid.dygraph.Pool2D(pool_size=3, - pool_stride=2, - pool_padding=1, - pool_type='max') + self.conv = ConvBNLayer( + num_channels=3, num_filters=64, filter_size=7, stride=2, act='relu' + ) + self.pool2d_max = paddle.fluid.dygraph.Pool2D( + pool_size=3, pool_stride=2, pool_padding=1, pool_type='max' + ) self.bottleneck_block_list = [] for block in range(len(depth)): @@ -162,17 +174,21 @@ class ResNet(paddle.nn.Layer): for i in range(depth[block]): bottleneck_block = self.add_sublayer( 'bb_%d_%d' % (block, i), - BottleneckBlock(num_channels=num_channels[block] - if i == 0 else num_filters[block] * 4, - num_filters=num_filters[block], - stride=2 if i == 0 and block != 0 else 1, - shortcut=shortcut)) + BottleneckBlock( + num_channels=num_channels[block] + if i == 0 + else num_filters[block] * 4, + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + shortcut=shortcut, + ), + ) self.bottleneck_block_list.append(bottleneck_block) shortcut = True - self.pool2d_avg = paddle.fluid.dygraph.Pool2D(pool_size=7, - pool_type='avg', - global_pooling=True) + self.pool2d_avg = paddle.fluid.dygraph.Pool2D( + pool_size=7, pool_type='avg', global_pooling=True + ) self.pool2d_avg_output = num_filters[len(num_filters) - 1] * 4 * 1 * 1 @@ -182,7 +198,9 @@ class ResNet(paddle.nn.Layer): in_features=self.pool2d_avg_output, out_features=class_dim, weight_attr=paddle.ParamAttr( - initializer=paddle.nn.initializer.Uniform(-stdv, stdv))) + initializer=paddle.nn.initializer.Uniform(-stdv, stdv) + ), + ) @paddle.jit.to_static def forward(self, inputs): @@ -199,7 +217,6 @@ class ResNet(paddle.nn.Layer): def reader_decorator(reader): - def __reader__(): for item in reader(): img = np.array(item[0]).astype('float32').reshape(3, 224, 224) @@ -210,17 +227,22 @@ def reader_decorator(reader): class TestResnet(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() self.model_save_dir = os.path.join(self.temp_dir.name, "./inference") - self.model_save_prefix = os.path.join(self.temp_dir.name, - "./inference/resnet_v2") - self.model_filename = "resnet_v2" + paddle.fluid.dygraph.io.INFER_MODEL_SUFFIX - self.params_filename = "resnet_v2" + paddle.fluid.dygraph.io.INFER_PARAMS_SUFFIX - self.dy_state_dict_save_path = os.path.join(self.temp_dir.name, - "./resnet_v2.dygraph") + self.model_save_prefix = os.path.join( + self.temp_dir.name, "./inference/resnet_v2" + ) + self.model_filename = ( + "resnet_v2" + paddle.fluid.dygraph.io.INFER_MODEL_SUFFIX + ) + self.params_filename = ( + "resnet_v2" + paddle.fluid.dygraph.io.INFER_PARAMS_SUFFIX + ) + self.dy_state_dict_save_path = os.path.join( + self.temp_dir.name, "./resnet_v2.dygraph" + ) def tearDown(self): self.temp_dir.cleanup() @@ -234,12 +256,14 @@ class TestResnet(unittest.TestCase): paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) - train_reader = paddle.batch(reader_decorator( - paddle.dataset.flowers.train(use_xmap=False)), - batch_size=batch_size, - drop_last=True) - data_loader = paddle.io.DataLoader.from_generator(capacity=5, - iterable=True) + train_reader = paddle.batch( + reader_decorator(paddle.dataset.flowers.train(use_xmap=False)), + batch_size=batch_size, + drop_last=True, + ) + data_loader = paddle.io.DataLoader.from_generator( + capacity=5, iterable=True + ) data_loader.set_sample_list_generator(train_reader) resnet = ResNet() @@ -256,8 +280,9 @@ class TestResnet(unittest.TestCase): img, label = data pred = resnet(img) - loss = paddle.nn.functional.cross_entropy(input=pred, - label=label) + loss = paddle.nn.functional.cross_entropy( + input=pred, label=label + ) avg_loss = paddle.mean(x=loss) acc_top1 = paddle.metric.accuracy(input=pred, label=label, k=1) acc_top5 = paddle.metric.accuracy(input=pred, label=label, k=5) @@ -273,15 +298,24 @@ class TestResnet(unittest.TestCase): end_time = time.time() if batch_id % 2 == 0: - print( "epoch %d | batch step %d, loss %0.3f, acc1 %0.3f, acc5 %0.3f, time %f" % \ - ( epoch, batch_id, total_loss.numpy() / total_sample, \ - total_acc1.numpy() / total_sample, total_acc5.numpy() / total_sample, end_time-start_time)) + print( + "epoch %d | batch step %d, loss %0.3f, acc1 %0.3f, acc5 %0.3f, time %f" + % ( + epoch, + batch_id, + total_loss.numpy() / total_sample, + total_acc1.numpy() / total_sample, + total_acc5.numpy() / total_sample, + end_time - start_time, + ) + ) if batch_id == 10: if to_static: paddle.jit.save(resnet, self.model_save_prefix) else: paddle.fluid.dygraph.save_dygraph( - resnet.state_dict(), self.dy_state_dict_save_path) + resnet.state_dict(), self.dy_state_dict_save_path + ) # avoid dataloader throw abort signaal data_loader._reset() break @@ -295,15 +329,16 @@ class TestResnet(unittest.TestCase): resnet = ResNet() model_dict, _ = paddle.fluid.dygraph.load_dygraph( - self.dy_state_dict_save_path) + self.dy_state_dict_save_path + ) resnet.set_dict(model_dict) resnet.eval() pred_res = resnet( - paddle.to_tensor(data=data, - dtype=None, - place=None, - stop_gradient=True)) + paddle.to_tensor( + data=data, dtype=None, place=None, stop_gradient=True + ) + ) ret = pred_res.numpy() paddle.enable_static() @@ -311,16 +346,22 @@ class TestResnet(unittest.TestCase): def predict_static(self, data): exe = paddle.static.Executor(place) - [inference_program, feed_target_names, - fetch_targets] = paddle.static.load_inference_model( - self.model_save_dir, - executor=exe, - model_filename=self.model_filename, - params_filename=self.params_filename) - - pred_res = exe.run(inference_program, - feed={feed_target_names[0]: data}, - fetch_list=fetch_targets) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = paddle.static.load_inference_model( + self.model_save_dir, + executor=exe, + model_filename=self.model_filename, + params_filename=self.params_filename, + ) + + pred_res = exe.run( + inference_program, + feed={feed_target_names[0]: data}, + fetch_list=fetch_targets, + ) return pred_res[0] @@ -336,9 +377,13 @@ class TestResnet(unittest.TestCase): return ret def predict_analysis_inference(self, data): - output = PredictorTools(self.model_save_dir, self.model_filename, - self.params_filename, [data]) - out, = output() + output = PredictorTools( + self.model_save_dir, + self.model_filename, + self.params_filename, + [data], + ) + (out,) = output() return out def train(self, to_static): @@ -355,19 +400,24 @@ class TestResnet(unittest.TestCase): dy_pre, st_pre, rtol=1e-05, - err_msg='dy_pre:\n {}\n, st_pre: \n{}.'.format(dy_pre, st_pre)) + err_msg='dy_pre:\n {}\n, st_pre: \n{}.'.format(dy_pre, st_pre), + ) np.testing.assert_allclose( dy_jit_pre, st_pre, rtol=1e-05, err_msg='dy_jit_pre:\n {}\n, st_pre: \n{}.'.format( - dy_jit_pre, st_pre)) + dy_jit_pre, st_pre + ), + ) np.testing.assert_allclose( predictor_pre, st_pre, rtol=1e-05, err_msg='predictor_pre:\n {}\n, st_pre: \n{}.'.format( - predictor_pre, st_pre)) + predictor_pre, st_pre + ), + ) def test_resnet(self): static_loss = self.train(to_static=True) @@ -377,7 +427,9 @@ class TestResnet(unittest.TestCase): dygraph_loss, rtol=1e-05, err_msg='static_loss: {} \n dygraph_loss: {}'.format( - static_loss, dygraph_loss)) + static_loss, dygraph_loss + ), + ) self.verify_predict() def test_in_static_mode_mkldnn(self): diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_return.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_return.py index e3323dd084a6a7024be9f8903506907cd725bea1..a194cede9c0dfa80d4baa94fc422f7381ff6f4b9 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_return.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_return.py @@ -168,7 +168,7 @@ def test_return_list_many_values(x): def test_return_tuple_one_value(x): x = fluid.dygraph.to_variable(x) x += 1 - return (x, ) + return (x,) @to_static @@ -250,7 +250,6 @@ def test_return_in_for_2(x): @to_static def test_return_nested(x): - def func(): rr = 0 if True: @@ -266,11 +265,13 @@ def test_return_nested(x): class TestReturnBase(unittest.TestCase): - def setUp(self): self.input = np.ones((1)).astype('int32') - self.place = fluid.CUDAPlace( - 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + self.place = ( + fluid.CUDAPlace(0) + if fluid.is_compiled_with_cuda() + else fluid.CPUPlace() + ) self.init_dygraph_func() self.program_translator = ProgramTranslator() @@ -294,9 +295,9 @@ class TestReturnBase(unittest.TestCase): self.assertTrue(isinstance(static_res, tuple)) self.assertEqual(len(dygraph_res), len(static_res)) for i in range(len(dygraph_res)): - np.testing.assert_allclose(dygraph_res[i], - static_res[i], - rtol=1e-05) + np.testing.assert_allclose( + dygraph_res[i], static_res[i], rtol=1e-05 + ) elif isinstance(dygraph_res, np.ndarray): np.testing.assert_allclose(dygraph_res, static_res, rtol=1e-05) else: @@ -311,134 +312,113 @@ class TestReturnBase(unittest.TestCase): class TestInsideFuncBase(TestReturnBase): - def init_dygraph_func(self): self.dygraph_func = test_inside_func_base class TestReturnIf(TestReturnBase): - def init_dygraph_func(self): self.dygraph_func = test_return_if class TestReturnOnlyIf(TestReturnBase): - def init_dygraph_func(self): self.dygraph_func = test_return_if_else_2 class TestReturnInFor(TestReturnBase): - def init_dygraph_func(self): self.dygraph_func = test_return_in_for class TestReturnInWhile(TestReturnBase): - def init_dygraph_func(self): self.dygraph_func = test_return_in_while class TestReturnIfDiff(TestReturnBase): - def init_dygraph_func(self): self.dygraph_func = test_diff_return class TestReturnIfElse(TestReturnBase): - def init_dygraph_func(self): self.dygraph_func = test_return_if_else class TestReturnInWhile2(TestReturnBase): - def init_dygraph_func(self): self.dygraph_func = test_return_in_while_2 self.error = "Found return statement in While or For body and loop" class TestReturnInFor2(TestReturnBase): - def init_dygraph_func(self): self.dygraph_func = test_return_in_for_2 self.error = "Found return statement in While or For body and loop" class TestRecursiveReturn(TestReturnBase): - def init_dygraph_func(self): self.input = self.input.astype(np.float32) self.dygraph_func = test_recursive_return class TestReturnDifferentLengthIfBody(TestReturnBase): - def init_dygraph_func(self): self.dygraph_func = test_return_different_length_if_body self.error = "Your if/else have different number of return value." class TestReturnDifferentLengthElse(TestReturnBase): - def init_dygraph_func(self): self.dygraph_func = test_return_different_length_else self.error = "Your if/else have different number of return value." class TestNoReturn(TestReturnBase): - def init_dygraph_func(self): self.dygraph_func = test_no_return class TestReturnNone(TestReturnBase): - def init_dygraph_func(self): self.dygraph_func = test_return_none self.error = "Your if/else have different number of return value." class TestReturnNoVariable(TestReturnBase): - def init_dygraph_func(self): self.dygraph_func = test_return_no_variable self.error = "Your if/else have different number of return value." class TestReturnListOneValue(TestReturnBase): - def init_dygraph_func(self): self.dygraph_func = test_return_list_one_value class TestReturnListManyValue(TestReturnBase): - def init_dygraph_func(self): self.dygraph_func = test_return_list_many_values class TestReturnTupleOneValue(TestReturnBase): - def init_dygraph_func(self): self.dygraph_func = test_return_tuple_one_value class TestReturnTupleManyValue(TestReturnBase): - def init_dygraph_func(self): self.dygraph_func = test_return_tuple_many_values class TestReturnNested(TestReturnBase): - def init_dygraph_func(self): self.dygraph_func = test_return_nested class TestReturnSpecial(TestReturnBase): - def init_dygraph_func(self): self.dygraph_func = test_return_without_paddle_cond diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_rollback.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_rollback.py index f949e9c0da78de8800d66f33b8d0694648ac481e..90c017ee9e7698707f5ab2284a2e24e6fcefc7bf 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_rollback.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_rollback.py @@ -16,11 +16,12 @@ import unittest import paddle import numpy as np from paddle.fluid.dygraph.dygraph_to_static.utils import func_to_source_code -from paddle.fluid.dygraph.dygraph_to_static.program_translator import StaticFunction +from paddle.fluid.dygraph.dygraph_to_static.program_translator import ( + StaticFunction, +) class Net(paddle.nn.Layer): - def __init__(self): super(Net, self).__init__() self.sub = SubNet() @@ -38,7 +39,6 @@ class Net(paddle.nn.Layer): class SubNet(paddle.nn.Layer): - def __init__(self): super(SubNet, self).__init__() @@ -59,15 +59,14 @@ class SubNet(paddle.nn.Layer): def foo(x, flag=False): if flag: - out = x * 2. + out = x * 2.0 else: - out = x / 2. + out = x / 2.0 return out class TestRollBackPlainFunction(unittest.TestCase): - def setUp(self): paddle.set_device("cpu") @@ -86,7 +85,6 @@ class TestRollBackPlainFunction(unittest.TestCase): class TestRollBackNet(unittest.TestCase): - def setUp(self): paddle.set_device("cpu") @@ -118,8 +116,9 @@ class TestRollBackNet(unittest.TestCase): self.assertFalse(isinstance(net.infer, StaticFunction)) self.assertFalse("true_fn" in func_to_source_code(net.sub.forward)) dy_infer_out = net.infer(x) - np.testing.assert_array_equal(st_infer_out.numpy(), - dy_infer_out.numpy()) + np.testing.assert_array_equal( + st_infer_out.numpy(), dy_infer_out.numpy() + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_save_inference_model.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_save_inference_model.py index 60aa64abe66f4932aed838956413ee2502e750ae..f1c3bd4c1b617d47eca6f0766064e86e7255cd50 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_save_inference_model.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_save_inference_model.py @@ -21,20 +21,22 @@ import paddle import paddle.fluid as fluid from paddle.fluid.dygraph.dygraph_to_static import ProgramTranslator from paddle.fluid.dygraph.jit import declarative -from paddle.fluid.dygraph.dygraph_to_static.partial_program import partial_program_from +from paddle.fluid.dygraph.dygraph_to_static.partial_program import ( + partial_program_from, +) from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX SEED = 2020 np.random.seed(SEED) -place = fluid.CUDAPlace( - 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() +place = ( + fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() +) program_translator = ProgramTranslator() class SimpleFcLayer(fluid.dygraph.Layer): - def __init__(self, fc_size): super(SimpleFcLayer, self).__init__() self._linear = fluid.dygraph.Linear(fc_size, fc_size) @@ -48,7 +50,6 @@ class SimpleFcLayer(fluid.dygraph.Layer): class TestDyToStaticSaveInferenceModel(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -64,8 +65,9 @@ class TestDyToStaticSaveInferenceModel(unittest.TestCase): x = fluid.dygraph.to_variable(x_data) layer = SimpleFcLayer(fc_size) - adam = fluid.optimizer.SGD(learning_rate=0.1, - parameter_list=layer.parameters()) + adam = fluid.optimizer.SGD( + learning_rate=0.1, parameter_list=layer.parameters() + ) for i in range(5): loss, pred = layer(x) @@ -74,65 +76,78 @@ class TestDyToStaticSaveInferenceModel(unittest.TestCase): layer.clear_gradients() # test for saving model in dygraph.guard infer_model_prefix = os.path.join( - self.temp_dir.name, "test_dy2stat_inference_in_guard/model") - infer_model_dir = os.path.join(self.temp_dir.name, - "test_dy2stat_inference_in_guard") - fluid.dygraph.jit.save(layer=layer, - path=infer_model_prefix, - input_spec=[x], - output_spec=[pred]) + self.temp_dir.name, "test_dy2stat_inference_in_guard/model" + ) + infer_model_dir = os.path.join( + self.temp_dir.name, "test_dy2stat_inference_in_guard" + ) + fluid.dygraph.jit.save( + layer=layer, + path=infer_model_prefix, + input_spec=[x], + output_spec=[pred], + ) # Check the correctness of the inference dygraph_out, _ = layer(x) self.check_save_inference_model(layer, [x_data], dygraph_out.numpy()) - self.check_save_inference_model(layer, [x_data], - dygraph_out.numpy(), - fetch=[loss]) - self.check_save_inference_model(layer, [x_data], - dygraph_out.numpy(), - feed=[x]) - - def check_save_inference_model(self, - model, - inputs, - gt_out, - feed=None, - fetch=None): + self.check_save_inference_model( + layer, [x_data], dygraph_out.numpy(), fetch=[loss] + ) + self.check_save_inference_model( + layer, [x_data], dygraph_out.numpy(), feed=[x] + ) + + def check_save_inference_model( + self, model, inputs, gt_out, feed=None, fetch=None + ): expected_persistable_vars = set([p.name for p in model.parameters()]) - infer_model_prefix = os.path.join(self.temp_dir.name, - "test_dy2stat_inference/model") - infer_model_dir = os.path.join(self.temp_dir.name, - "test_dy2stat_inference") + infer_model_prefix = os.path.join( + self.temp_dir.name, "test_dy2stat_inference/model" + ) + infer_model_dir = os.path.join( + self.temp_dir.name, "test_dy2stat_inference" + ) model_filename = "model" + INFER_MODEL_SUFFIX params_filename = "model" + INFER_PARAMS_SUFFIX - fluid.dygraph.jit.save(layer=model, - path=infer_model_prefix, - input_spec=feed if feed else None, - output_spec=fetch if fetch else None) + fluid.dygraph.jit.save( + layer=model, + path=infer_model_prefix, + input_spec=feed if feed else None, + output_spec=fetch if fetch else None, + ) # Check the correctness of the inference - infer_out = self.load_and_run_inference(infer_model_dir, model_filename, - params_filename, inputs) + infer_out = self.load_and_run_inference( + infer_model_dir, model_filename, params_filename, inputs + ) np.testing.assert_allclose(gt_out, infer_out, rtol=1e-05) - def load_and_run_inference(self, model_path, model_filename, - params_filename, inputs): + def load_and_run_inference( + self, model_path, model_filename, params_filename, inputs + ): paddle.enable_static() exe = fluid.Executor(place) - [inference_program, feed_target_names, fetch_targets - ] = fluid.io.load_inference_model(dirname=model_path, - executor=exe, - model_filename=model_filename, - params_filename=params_filename) - results = exe.run(inference_program, - feed=dict(zip(feed_target_names, inputs)), - fetch_list=fetch_targets) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = fluid.io.load_inference_model( + dirname=model_path, + executor=exe, + model_filename=model_filename, + params_filename=params_filename, + ) + results = exe.run( + inference_program, + feed=dict(zip(feed_target_names, inputs)), + fetch_list=fetch_targets, + ) return np.array(results[0]) class TestPartialProgramRaiseError(unittest.TestCase): - def test_param_type(self): program_translator = ProgramTranslator() program_translator.enable(True) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_save_load.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_save_load.py index 42be11182c0828ea51faad2392e1ae6f2f2756c5..0898b72730441b0cf0de41d5f3875ba3cef17934 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_save_load.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_save_load.py @@ -25,16 +25,17 @@ from test_fetch_feed import Linear np.random.seed(2020) -place = fluid.CUDAPlace( - 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() +place = ( + fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() +) class TestDyToStaticSaveLoad(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() - self.model_path = os.path.join(self.temp_dir.name, - "test_dy2stat_save_load") + self.model_path = os.path.join( + self.temp_dir.name, "test_dy2stat_save_load" + ) def tearDown(self): self.temp_dir.cleanup() @@ -49,8 +50,9 @@ class TestDyToStaticSaveLoad(unittest.TestCase): program_translator.enable(True) x = fluid.dygraph.to_variable(x_data) net = Linear(32, 64) - adam = AdamOptimizer(learning_rate=0.1, - parameter_list=net.parameters()) + adam = AdamOptimizer( + learning_rate=0.1, parameter_list=net.parameters() + ) for i in range(batch_num): static_out, static_loss = net(x) @@ -81,12 +83,12 @@ class TestDyToStaticSaveLoad(unittest.TestCase): program_translator.enable(False) dygraph_out, dygraph_loss = dygraph_net(x) - np.testing.assert_allclose(dygraph_out.numpy(), - static_out.numpy(), - rtol=1e-05) - np.testing.assert_allclose(dygraph_loss.numpy(), - static_loss.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + dygraph_out.numpy(), static_out.numpy(), rtol=1e-05 + ) + np.testing.assert_allclose( + dygraph_loss.numpy(), static_loss.numpy(), rtol=1e-05 + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_se_resnet.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_se_resnet.py index 2a86c3f77a1f60507587aa573b522041c045dbab..74f6bdb2d61017749dfb5eaf64d8441cff863da7 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_se_resnet.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_se_resnet.py @@ -38,8 +38,9 @@ EPOCH_NUM = 1 PRINT_STEP = 2 STEP_NUM = 10 -place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() \ - else fluid.CPUPlace() +place = ( + fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() +) # Note: Set True to eliminate randomness. # 1. For one operation, cuDNN has several algorithms, @@ -52,7 +53,7 @@ train_parameters = { "name": "cosine_decay", "batch_size": BATCH_SIZE, "epochs": [40, 80, 100], - "steps": [0.1, 0.01, 0.001, 0.0001] + "steps": [0.1, 0.01, 0.001, 0.0001], }, "lr": 0.0125, "total_images": 6149, @@ -78,35 +79,39 @@ def optimizer_setting(params, parameter_list): lr = params["lr"] num_epochs = params["num_epochs"] optimizer = fluid.optimizer.Momentum( - learning_rate=fluid.layers.cosine_decay(learning_rate=lr, - step_each_epoch=step, - epochs=num_epochs), + learning_rate=fluid.layers.cosine_decay( + learning_rate=lr, step_each_epoch=step, epochs=num_epochs + ), momentum=momentum_rate, regularization=fluid.regularizer.L2Decay(l2_decay), - parameter_list=parameter_list) + parameter_list=parameter_list, + ) return optimizer class ConvBNLayer(fluid.dygraph.Layer): - - def __init__(self, - num_channels, - num_filters, - filter_size, - stride=1, - groups=1, - act=None): + def __init__( + self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + ): super(ConvBNLayer, self).__init__() - self._conv = Conv2D(num_channels=num_channels, - num_filters=num_filters, - filter_size=filter_size, - stride=stride, - padding=(filter_size - 1) // 2, - groups=groups, - act=None, - bias_attr=False) + self._conv = Conv2D( + num_channels=num_channels, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + act=None, + bias_attr=False, + ) self._batch_norm = BatchNorm(num_filters, act=act) @@ -118,7 +123,6 @@ class ConvBNLayer(fluid.dygraph.Layer): class SqueezeExcitation(fluid.dygraph.Layer): - def __init__(self, num_channels, reduction_ratio): super(SqueezeExcitation, self).__init__() @@ -129,15 +133,19 @@ class SqueezeExcitation(fluid.dygraph.Layer): num_channels, num_channels // reduction_ratio, param_attr=fluid.ParamAttr( - initializer=fluid.initializer.Uniform(-stdv, stdv)), - act='relu') + initializer=fluid.initializer.Uniform(-stdv, stdv) + ), + act='relu', + ) stdv = 1.0 / math.sqrt(num_channels / 16.0 * 1.0) self._excitation = Linear( num_channels // reduction_ratio, num_channels, param_attr=fluid.ParamAttr( - initializer=fluid.initializer.Uniform(-stdv, stdv)), - act='sigmoid') + initializer=fluid.initializer.Uniform(-stdv, stdv) + ), + act='sigmoid', + ) def forward(self, input): y = self._pool(input) @@ -149,39 +157,49 @@ class SqueezeExcitation(fluid.dygraph.Layer): class BottleneckBlock(fluid.dygraph.Layer): - - def __init__(self, - num_channels, - num_filters, - stride, - cardinality, - reduction_ratio, - shortcut=True): + def __init__( + self, + num_channels, + num_filters, + stride, + cardinality, + reduction_ratio, + shortcut=True, + ): super(BottleneckBlock, self).__init__() - self.conv0 = ConvBNLayer(num_channels=num_channels, - num_filters=num_filters, - filter_size=1, - act="relu") - self.conv1 = ConvBNLayer(num_channels=num_filters, - num_filters=num_filters, - filter_size=3, - stride=stride, - groups=cardinality, - act="relu") - self.conv2 = ConvBNLayer(num_channels=num_filters, - num_filters=num_filters * 2, - filter_size=1, - act=None) - - self.scale = SqueezeExcitation(num_channels=num_filters * 2, - reduction_ratio=reduction_ratio) + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + act="relu", + ) + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + stride=stride, + groups=cardinality, + act="relu", + ) + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters * 2, + filter_size=1, + act=None, + ) + + self.scale = SqueezeExcitation( + num_channels=num_filters * 2, reduction_ratio=reduction_ratio + ) if not shortcut: - self.short = ConvBNLayer(num_channels=num_channels, - num_filters=num_filters * 2, - filter_size=1, - stride=stride) + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters * 2, + filter_size=1, + stride=stride, + ) self.shortcut = shortcut @@ -203,67 +221,76 @@ class BottleneckBlock(fluid.dygraph.Layer): class SeResNeXt(fluid.dygraph.Layer): - def __init__(self, layers=50, class_dim=102): super(SeResNeXt, self).__init__() self.layers = layers supported_layers = [50, 101, 152] - assert layers in supported_layers, \ - "supported layers are {} but input layer is {}".format(supported_layers, layers) + assert ( + layers in supported_layers + ), "supported layers are {} but input layer is {}".format( + supported_layers, layers + ) if layers == 50: cardinality = 32 reduction_ratio = 16 depth = [3, 4, 6, 3] num_filters = [128, 256, 512, 1024] - self.conv0 = ConvBNLayer(num_channels=3, - num_filters=64, - filter_size=7, - stride=2, - act='relu') - self.pool = Pool2D(pool_size=3, - pool_stride=2, - pool_padding=1, - pool_type='max') + self.conv0 = ConvBNLayer( + num_channels=3, + num_filters=64, + filter_size=7, + stride=2, + act='relu', + ) + self.pool = Pool2D( + pool_size=3, pool_stride=2, pool_padding=1, pool_type='max' + ) elif layers == 101: cardinality = 32 reduction_ratio = 16 depth = [3, 4, 23, 3] num_filters = [128, 256, 512, 1024] - self.conv0 = ConvBNLayer(num_channels=3, - num_filters=64, - filter_size=7, - stride=2, - act='relu') - self.pool = Pool2D(pool_size=3, - pool_stride=2, - pool_padding=1, - pool_type='max') + self.conv0 = ConvBNLayer( + num_channels=3, + num_filters=64, + filter_size=7, + stride=2, + act='relu', + ) + self.pool = Pool2D( + pool_size=3, pool_stride=2, pool_padding=1, pool_type='max' + ) elif layers == 152: cardinality = 64 reduction_ratio = 16 depth = [3, 8, 36, 3] num_filters = [128, 256, 512, 1024] - self.conv0 = ConvBNLayer(num_channels=3, - num_filters=64, - filter_size=3, - stride=2, - act='relu') - self.conv1 = ConvBNLayer(num_channels=64, - num_filters=64, - filter_size=3, - stride=1, - act='relu') - self.conv2 = ConvBNLayer(num_channels=64, - num_filters=128, - filter_size=3, - stride=1, - act='relu') - self.pool = Pool2D(pool_size=3, - pool_stride=2, - pool_padding=1, - pool_type='max') + self.conv0 = ConvBNLayer( + num_channels=3, + num_filters=64, + filter_size=3, + stride=2, + act='relu', + ) + self.conv1 = ConvBNLayer( + num_channels=64, + num_filters=64, + filter_size=3, + stride=1, + act='relu', + ) + self.conv2 = ConvBNLayer( + num_channels=64, + num_filters=128, + filter_size=3, + stride=1, + act='relu', + ) + self.pool = Pool2D( + pool_size=3, pool_stride=2, pool_padding=1, pool_type='max' + ) self.bottleneck_block_list = [] num_channels = 64 @@ -274,19 +301,22 @@ class SeResNeXt(fluid.dygraph.Layer): for i in range(depth[block]): bottleneck_block = self.add_sublayer( 'bb_%d_%d' % (block, i), - BottleneckBlock(num_channels=num_channels, - num_filters=num_filters[block], - stride=2 if i == 0 and block != 0 else 1, - cardinality=cardinality, - reduction_ratio=reduction_ratio, - shortcut=shortcut)) + BottleneckBlock( + num_channels=num_channels, + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + cardinality=cardinality, + reduction_ratio=reduction_ratio, + shortcut=shortcut, + ), + ) num_channels = bottleneck_block._num_channels_out self.bottleneck_block_list.append(bottleneck_block) shortcut = True - self.pool2d_avg = Pool2D(pool_size=7, - pool_type='avg', - global_pooling=True) + self.pool2d_avg = Pool2D( + pool_size=7, pool_type='avg', global_pooling=True + ) stdv = 1.0 / math.sqrt(2048 * 1.0) self.pool2d_avg_output = num_filters[len(num_filters) - 1] * 2 * 1 * 1 @@ -295,7 +325,9 @@ class SeResNeXt(fluid.dygraph.Layer): self.pool2d_avg_output, class_dim, param_attr=fluid.param_attr.ParamAttr( - initializer=fluid.initializer.Uniform(-stdv, stdv))) + initializer=fluid.initializer.Uniform(-stdv, stdv) + ), + ) @declarative def forward(self, inputs, label): @@ -326,21 +358,23 @@ class SeResNeXt(fluid.dygraph.Layer): class TestSeResnet(unittest.TestCase): - def setUp(self): - self.train_reader = paddle.batch(paddle.dataset.flowers.train( - use_xmap=False, cycle=True), - batch_size=BATCH_SIZE, - drop_last=True) + self.train_reader = paddle.batch( + paddle.dataset.flowers.train(use_xmap=False, cycle=True), + batch_size=BATCH_SIZE, + drop_last=True, + ) self.temp_dir = tempfile.TemporaryDirectory() self.model_save_dir = os.path.join(self.temp_dir.name, "inference") - self.model_save_prefix = os.path.join(self.temp_dir.name, - "inference/se_resnet") + self.model_save_prefix = os.path.join( + self.temp_dir.name, "inference/se_resnet" + ) self.model_filename = "se_resnet" + INFER_MODEL_SUFFIX self.params_filename = "se_resnet" + INFER_PARAMS_SUFFIX - self.dy_state_dict_save_path = os.path.join(self.temp_dir.name, - "se_resnet.dygraph") + self.dy_state_dict_save_path = os.path.join( + self.temp_dir.name, "se_resnet.dygraph" + ) def tearDown(self): self.temp_dir.cleanup() @@ -355,8 +389,9 @@ class TestSeResnet(unittest.TestCase): paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) se_resnext = SeResNeXt() - optimizer = optimizer_setting(train_parameters, - se_resnext.parameters()) + optimizer = optimizer_setting( + train_parameters, se_resnext.parameters() + ) for epoch_id in range(EPOCH_NUM): total_loss = 0.0 @@ -366,12 +401,14 @@ class TestSeResnet(unittest.TestCase): step_idx = 0 speed_list = [] for step_id, data in enumerate(train_reader()): - dy_x_data = np.array([ - x[0].reshape(3, 224, 224) for x in data - ]).astype('float32') - y_data = np.array([x[1] - for x in data]).astype('int64').reshape( - BATCH_SIZE, 1) + dy_x_data = np.array( + [x[0].reshape(3, 224, 224) for x in data] + ).astype('float32') + y_data = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape(BATCH_SIZE, 1) + ) img = to_variable(dy_x_data) label = to_variable(y_data) @@ -392,32 +429,54 @@ class TestSeResnet(unittest.TestCase): total_sample += 1 if step_id % PRINT_STEP == 0: if step_id == 0: - logging.info( "epoch %d | step %d, loss %0.3f, acc1 %0.3f, acc5 %0.3f" % \ - ( epoch_id, step_id, total_loss / total_sample, \ - total_acc1 / total_sample, total_acc5 / total_sample)) + logging.info( + "epoch %d | step %d, loss %0.3f, acc1 %0.3f, acc5 %0.3f" + % ( + epoch_id, + step_id, + total_loss / total_sample, + total_acc1 / total_sample, + total_acc5 / total_sample, + ) + ) avg_batch_time = time.time() else: speed = PRINT_STEP / (time.time() - avg_batch_time) speed_list.append(speed) - logging.info( "epoch %d | step %d, loss %0.3f, acc1 %0.3f, acc5 %0.3f, speed %.3f steps/s" % \ - ( epoch_id, step_id, total_loss / total_sample, \ - total_acc1 / total_sample, total_acc5 / total_sample, speed)) + logging.info( + "epoch %d | step %d, loss %0.3f, acc1 %0.3f, acc5 %0.3f, speed %.3f steps/s" + % ( + epoch_id, + step_id, + total_loss / total_sample, + total_acc1 / total_sample, + total_acc5 / total_sample, + speed, + ) + ) avg_batch_time = time.time() step_idx += 1 if step_idx == STEP_NUM: if to_static: - fluid.dygraph.jit.save(se_resnext, - self.model_save_prefix, - [img], - output_spec=[pred]) + fluid.dygraph.jit.save( + se_resnext, + self.model_save_prefix, + [img], + output_spec=[pred], + ) else: fluid.dygraph.save_dygraph( se_resnext.state_dict(), - self.dy_state_dict_save_path) + self.dy_state_dict_save_path, + ) break - return pred.numpy(), avg_loss.numpy(), acc_top1.numpy( - ), acc_top5.numpy() + return ( + pred.numpy(), + avg_loss.numpy(), + acc_top1.numpy(), + acc_top5.numpy(), + ) def predict_dygraph(self, data): program_translator = ProgramTranslator() @@ -426,7 +485,8 @@ class TestSeResnet(unittest.TestCase): se_resnext = SeResNeXt() model_dict, _ = fluid.dygraph.load_dygraph( - self.dy_state_dict_save_path) + self.dy_state_dict_save_path + ) se_resnext.set_dict(model_dict) se_resnext.eval() @@ -440,15 +500,22 @@ class TestSeResnet(unittest.TestCase): def predict_static(self, data): paddle.enable_static() exe = fluid.Executor(place) - [inference_program, feed_target_names, fetch_targets - ] = fluid.io.load_inference_model(self.model_save_dir, - executor=exe, - model_filename=self.model_filename, - params_filename=self.params_filename) - - pred_res = exe.run(inference_program, - feed={feed_target_names[0]: data}, - fetch_list=fetch_targets) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = fluid.io.load_inference_model( + self.model_save_dir, + executor=exe, + model_filename=self.model_filename, + params_filename=self.params_filename, + ) + + pred_res = exe.run( + inference_program, + feed={feed_target_names[0]: data}, + fetch_list=fetch_targets, + ) return pred_res[0] @@ -462,8 +529,12 @@ class TestSeResnet(unittest.TestCase): return pred_res.numpy() def predict_analysis_inference(self, data): - output = PredictorTools(self.model_save_dir, self.model_filename, - self.params_filename, [data]) + output = PredictorTools( + self.model_save_dir, + self.model_filename, + self.params_filename, + [data], + ) out = output() return out @@ -477,13 +548,16 @@ class TestSeResnet(unittest.TestCase): dy_pre, st_pre, rtol=1e-05, - err_msg='dy_pre:\n {}\n, st_pre: \n{}.'.format(dy_pre, st_pre)) + err_msg='dy_pre:\n {}\n, st_pre: \n{}.'.format(dy_pre, st_pre), + ) np.testing.assert_allclose( dy_jit_pre, st_pre, rtol=1e-05, err_msg='dy_jit_pre:\n {}\n, st_pre: \n{}.'.format( - dy_jit_pre, st_pre)) + dy_jit_pre, st_pre + ), + ) flat_st_pre = st_pre.flatten() flat_predictor_pre = np.array(predictor_pre).flatten() @@ -494,34 +568,42 @@ class TestSeResnet(unittest.TestCase): flat_st_pre[i], delta=1e-6, msg="predictor_pre:\n {}\n, st_pre: \n{}.".format( - flat_predictor_pre[i], flat_st_pre[i])) + flat_predictor_pre[i], flat_st_pre[i] + ), + ) def test_check_result(self): - pred_1, loss_1, acc1_1, acc5_1 = self.train(self.train_reader, - to_static=False) - pred_2, loss_2, acc1_2, acc5_2 = self.train(self.train_reader, - to_static=True) + pred_1, loss_1, acc1_1, acc5_1 = self.train( + self.train_reader, to_static=False + ) + pred_2, loss_2, acc1_2, acc5_2 = self.train( + self.train_reader, to_static=True + ) np.testing.assert_allclose( pred_1, pred_2, rtol=1e-05, - err_msg='static pred: {} \ndygraph pred: {}'.format(pred_1, pred_2)) + err_msg='static pred: {} \ndygraph pred: {}'.format(pred_1, pred_2), + ) np.testing.assert_allclose( loss_1, loss_2, rtol=1e-05, - err_msg='static loss: {} \ndygraph loss: {}'.format(loss_1, loss_2)) + err_msg='static loss: {} \ndygraph loss: {}'.format(loss_1, loss_2), + ) np.testing.assert_allclose( acc1_1, acc1_2, rtol=1e-05, - err_msg='static acc1: {} \ndygraph acc1: {}'.format(acc1_1, acc1_2)) + err_msg='static acc1: {} \ndygraph acc1: {}'.format(acc1_1, acc1_2), + ) np.testing.assert_allclose( acc5_1, acc5_2, rtol=1e-05, - err_msg='static acc5: {} \ndygraph acc5: {}'.format(acc5_1, acc5_2)) + err_msg='static acc5: {} \ndygraph acc5: {}'.format(acc5_1, acc5_2), + ) self.verify_predict() diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_sentiment.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_sentiment.py index f581f45ca59ab29e3c2863446bc530c829343bae..aab18c98ab477a95cd5b01198bacf38c8fa1ddc0 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_sentiment.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_sentiment.py @@ -33,21 +33,24 @@ if fluid.is_compiled_with_cuda(): class SimpleConvPool(fluid.dygraph.Layer): - - def __init__(self, - num_channels, - num_filters, - filter_size, - use_cudnn=True, - batch_size=None): + def __init__( + self, + num_channels, + num_filters, + filter_size, + use_cudnn=True, + batch_size=None, + ): super(SimpleConvPool, self).__init__() self.batch_size = batch_size - self._conv2d = Conv2D(num_channels=num_channels, - num_filters=num_filters, - filter_size=filter_size, - padding=[1, 1], - use_cudnn=use_cudnn, - act='tanh') + self._conv2d = Conv2D( + num_channels=num_channels, + num_filters=num_filters, + filter_size=filter_size, + padding=[1, 1], + use_cudnn=use_cudnn, + act='tanh', + ) def forward(self, inputs): x = self._conv2d(inputs) @@ -57,7 +60,6 @@ class SimpleConvPool(fluid.dygraph.Layer): class CNN(fluid.dygraph.Layer): - def __init__(self, dict_dim, batch_size, seq_len): super(CNN, self).__init__() self.dict_dim = dict_dim @@ -69,29 +71,37 @@ class CNN(fluid.dygraph.Layer): self.win_size = [3, self.hid_dim] self.batch_size = batch_size self.seq_len = seq_len - self.embedding = Embedding(size=[self.dict_dim + 1, self.emb_dim], - dtype='float32', - is_sparse=False) - self._simple_conv_pool_1 = SimpleConvPool(self.channels, - self.hid_dim, - self.win_size, - batch_size=self.batch_size) - self._fc1 = Linear(input_dim=self.hid_dim * self.seq_len, - output_dim=self.fc_hid_dim, - act="softmax") - self._fc_prediction = Linear(input_dim=self.fc_hid_dim, - output_dim=self.class_dim, - act="softmax") + self.embedding = Embedding( + size=[self.dict_dim + 1, self.emb_dim], + dtype='float32', + is_sparse=False, + ) + self._simple_conv_pool_1 = SimpleConvPool( + self.channels, + self.hid_dim, + self.win_size, + batch_size=self.batch_size, + ) + self._fc1 = Linear( + input_dim=self.hid_dim * self.seq_len, + output_dim=self.fc_hid_dim, + act="softmax", + ) + self._fc_prediction = Linear( + input_dim=self.fc_hid_dim, output_dim=self.class_dim, act="softmax" + ) @declarative def forward(self, inputs, label=None): emb = self.embedding(inputs) - o_np_mask = (fluid.layers.reshape(inputs, [-1, 1]) != - self.dict_dim).astype(dtype='float32') + o_np_mask = ( + fluid.layers.reshape(inputs, [-1, 1]) != self.dict_dim + ).astype(dtype='float32') mask_emb = fluid.layers.expand(o_np_mask, [1, self.hid_dim]) emb = emb * mask_emb emb = fluid.layers.reshape( - emb, shape=[-1, self.channels, self.seq_len, self.hid_dim]) + emb, shape=[-1, self.channels, self.seq_len, self.hid_dim] + ) conv_3 = self._simple_conv_pool_1(emb) fc_1 = self._fc1(conv_3) prediction = self._fc_prediction(fc_1) @@ -103,7 +113,6 @@ class CNN(fluid.dygraph.Layer): class BOW(fluid.dygraph.Layer): - def __init__(self, dict_dim, batch_size, seq_len): super(BOW, self).__init__() self.dict_dim = dict_dim @@ -113,24 +122,27 @@ class BOW(fluid.dygraph.Layer): self.class_dim = 2 self.batch_size = batch_size self.seq_len = seq_len - self.embedding = Embedding(size=[self.dict_dim + 1, self.emb_dim], - dtype='float32', - is_sparse=False) - self._fc1 = Linear(input_dim=self.hid_dim, - output_dim=self.hid_dim, - act="tanh") - self._fc2 = Linear(input_dim=self.hid_dim, - output_dim=self.fc_hid_dim, - act="tanh") - self._fc_prediction = Linear(input_dim=self.fc_hid_dim, - output_dim=self.class_dim, - act="softmax") + self.embedding = Embedding( + size=[self.dict_dim + 1, self.emb_dim], + dtype='float32', + is_sparse=False, + ) + self._fc1 = Linear( + input_dim=self.hid_dim, output_dim=self.hid_dim, act="tanh" + ) + self._fc2 = Linear( + input_dim=self.hid_dim, output_dim=self.fc_hid_dim, act="tanh" + ) + self._fc_prediction = Linear( + input_dim=self.fc_hid_dim, output_dim=self.class_dim, act="softmax" + ) @declarative def forward(self, inputs, label=None): emb = self.embedding(inputs) - o_np_mask = (fluid.layers.reshape(inputs, [-1, 1]) != - self.dict_dim).astype(dtype='float32') + o_np_mask = ( + fluid.layers.reshape(inputs, [-1, 1]) != self.dict_dim + ).astype(dtype='float32') mask_emb = fluid.layers.expand(o_np_mask, [1, self.hid_dim]) emb = emb * mask_emb emb = fluid.layers.reshape(emb, shape=[-1, self.seq_len, self.hid_dim]) @@ -147,7 +159,6 @@ class BOW(fluid.dygraph.Layer): class GRU(fluid.dygraph.Layer): - def __init__(self, dict_dim, batch_size, seq_len): super(GRU, self).__init__() self.dict_dim = dict_dim @@ -157,30 +168,34 @@ class GRU(fluid.dygraph.Layer): self.class_dim = 2 self.batch_size = batch_size self.seq_len = seq_len - self.embedding = Embedding(size=[self.dict_dim + 1, self.emb_dim], - dtype='float32', - param_attr=fluid.ParamAttr(learning_rate=30), - is_sparse=False) + self.embedding = Embedding( + size=[self.dict_dim + 1, self.emb_dim], + dtype='float32', + param_attr=fluid.ParamAttr(learning_rate=30), + is_sparse=False, + ) h_0 = np.zeros((self.batch_size, self.hid_dim), dtype="float32") h_0 = to_variable(h_0) self._fc1 = Linear(input_dim=self.hid_dim, output_dim=self.hid_dim * 3) - self._fc2 = Linear(input_dim=self.hid_dim, - output_dim=self.fc_hid_dim, - act="tanh") - self._fc_prediction = Linear(input_dim=self.fc_hid_dim, - output_dim=self.class_dim, - act="softmax") + self._fc2 = Linear( + input_dim=self.hid_dim, output_dim=self.fc_hid_dim, act="tanh" + ) + self._fc_prediction = Linear( + input_dim=self.fc_hid_dim, output_dim=self.class_dim, act="softmax" + ) self._gru = DynamicGRU(size=self.hid_dim, h_0=h_0) @declarative def forward(self, inputs, label=None): emb = self.embedding(inputs) - o_np_mask = (fluid.layers.reshape(inputs, [-1, 1]) != - self.dict_dim).astype('float32') + o_np_mask = ( + fluid.layers.reshape(inputs, [-1, 1]) != self.dict_dim + ).astype('float32') mask_emb = fluid.layers.expand(o_np_mask, [1, self.hid_dim]) emb = emb * mask_emb - emb = fluid.layers.reshape(emb, - shape=[self.batch_size, -1, self.hid_dim]) + emb = fluid.layers.reshape( + emb, shape=[self.batch_size, -1, self.hid_dim] + ) fc_1 = self._fc1(emb) gru_hidden = self._gru(fc_1) gru_hidden = fluid.layers.reduce_max(gru_hidden, dim=1) @@ -195,7 +210,6 @@ class GRU(fluid.dygraph.Layer): class BiGRU(fluid.dygraph.Layer): - def __init__(self, dict_dim, batch_size, seq_len): super(BiGRU, self).__init__() self.dict_dim = dict_dim @@ -205,42 +219,47 @@ class BiGRU(fluid.dygraph.Layer): self.class_dim = 2 self.batch_size = batch_size self.seq_len = seq_len - self.embedding = Embedding(size=[self.dict_dim + 1, self.emb_dim], - dtype='float32', - param_attr=fluid.ParamAttr(learning_rate=30), - is_sparse=False) + self.embedding = Embedding( + size=[self.dict_dim + 1, self.emb_dim], + dtype='float32', + param_attr=fluid.ParamAttr(learning_rate=30), + is_sparse=False, + ) h_0 = np.zeros((self.batch_size, self.hid_dim), dtype="float32") h_0 = to_variable(h_0) self._fc1 = Linear(input_dim=self.hid_dim, output_dim=self.hid_dim * 3) - self._fc2 = Linear(input_dim=self.hid_dim * 2, - output_dim=self.fc_hid_dim, - act="tanh") - self._fc_prediction = Linear(input_dim=self.fc_hid_dim, - output_dim=self.class_dim, - act="softmax") - self._gru_forward = DynamicGRU(size=self.hid_dim, - h_0=h_0, - is_reverse=False) - self._gru_backward = DynamicGRU(size=self.hid_dim, - h_0=h_0, - is_reverse=True) + self._fc2 = Linear( + input_dim=self.hid_dim * 2, output_dim=self.fc_hid_dim, act="tanh" + ) + self._fc_prediction = Linear( + input_dim=self.fc_hid_dim, output_dim=self.class_dim, act="softmax" + ) + self._gru_forward = DynamicGRU( + size=self.hid_dim, h_0=h_0, is_reverse=False + ) + self._gru_backward = DynamicGRU( + size=self.hid_dim, h_0=h_0, is_reverse=True + ) @declarative def forward(self, inputs, label=None): emb = self.embedding(inputs) - o_np_mask = (fluid.layers.reshape(inputs, [-1, 1]) != - self.dict_dim).astype('float32') + o_np_mask = ( + fluid.layers.reshape(inputs, [-1, 1]) != self.dict_dim + ).astype('float32') mask_emb = fluid.layers.expand(o_np_mask, [1, self.hid_dim]) emb = emb * mask_emb - emb = fluid.layers.reshape(emb, - shape=[self.batch_size, -1, self.hid_dim]) + emb = fluid.layers.reshape( + emb, shape=[self.batch_size, -1, self.hid_dim] + ) fc_1 = self._fc1(emb) gru_forward = self._gru_forward(fc_1) gru_backward = self._gru_backward(fc_1) gru_forward_tanh = fluid.layers.tanh(gru_forward) gru_backward_tanh = fluid.layers.tanh(gru_backward) encoded_vector = fluid.layers.concat( - input=[gru_forward_tanh, gru_backward_tanh], axis=2) + input=[gru_forward_tanh, gru_backward_tanh], axis=2 + ) encoded_vector = fluid.layers.reduce_max(encoded_vector, dim=1) fc_2 = self._fc2(encoded_vector) prediction = self._fc_prediction(fc_2) @@ -261,11 +280,13 @@ def fake_data_reader(class_num, vocab_size, batch_size, padding_size): batch_data = [] while True: label = local_random.randint(0, class_num) - seq_len = local_random.randint(padding_size // 2, - int(padding_size * 1.2)) + seq_len = local_random.randint( + padding_size // 2, int(padding_size * 1.2) + ) word_ids = local_random.randint(0, vocab_size, [seq_len]).tolist() - word_ids = word_ids[:padding_size] + [vocab_size - ] * (padding_size - seq_len) + word_ids = word_ids[:padding_size] + [vocab_size] * ( + padding_size - seq_len + ) batch_data.append((word_ids, [label], seq_len)) if len(batch_data) == batch_size: yield batch_data @@ -287,16 +308,20 @@ class Args(object): def train(args, to_static): program_translator.enable(to_static) - place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() \ + place = ( + fluid.CUDAPlace(0) + if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + ) with fluid.dygraph.guard(place): np.random.seed(SEED) paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) - train_reader = fake_data_reader(args.class_num, args.vocab_size, - args.batch_size, args.padding_size) + train_reader = fake_data_reader( + args.class_num, args.vocab_size, args.batch_size, args.padding_size + ) train_loader = fluid.io.DataLoader.from_generator(capacity=24) train_loader.set_sample_list_generator(train_reader) @@ -309,7 +334,8 @@ def train(args, to_static): elif args.model_type == 'bigru_net': model = BiGRU(args.vocab_size, args.batch_size, args.padding_size) sgd_optimizer = fluid.optimizer.Adagrad( - learning_rate=args.lr, parameter_list=model.parameters()) + learning_rate=args.lr, parameter_list=model.parameters() + ) loss_data = [] for eop in range(args.epoch): @@ -333,9 +359,14 @@ def train(args, to_static): # used_time may be 0.0, cause zero division error if used_time < 1e-5: used_time = 1e-5 - print("step: %d, ave loss: %f, speed: %f steps/s" % - (batch_id, avg_cost.numpy()[0], - args.log_step / used_time)) + print( + "step: %d, ave loss: %f, speed: %f steps/s" + % ( + batch_id, + avg_cost.numpy()[0], + args.log_step / used_time, + ) + ) time_begin = time.time() if batch_id == args.train_step: @@ -345,7 +376,6 @@ def train(args, to_static): class TestSentiment(unittest.TestCase): - def setUp(self): self.args = Args() @@ -353,11 +383,12 @@ class TestSentiment(unittest.TestCase): self.args.model_type = model_type st_out = train(self.args, True) dy_out = train(self.args, False) - np.testing.assert_allclose(dy_out, - st_out, - rtol=1e-05, - err_msg='dy_out:\n {}\n st_out:\n {}'.format( - dy_out, st_out)) + np.testing.assert_allclose( + dy_out, + st_out, + rtol=1e-05, + err_msg='dy_out:\n {}\n st_out:\n {}'.format(dy_out, st_out), + ) def test_train(self): model_types = ['cnn_net', 'bow_net', 'gru_net', 'bigru_net'] diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_seq2seq.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_seq2seq.py index 7ed2d12f5a810c2709308b0c9ffd57365d25b02f..d4932b710c39186c9cbbff1ff450c34cf9639b22 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_seq2seq.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_seq2seq.py @@ -26,8 +26,9 @@ from seq2seq_dygraph_model import BaseModel, AttentionModel from seq2seq_utils import Seq2SeqModelHyperParams from seq2seq_utils import get_data_iter -place = fluid.CUDAPlace( - 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() +place = ( + fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() +) program_translator = ProgramTranslator() STEP_NUM = 10 PRINT_STEP = 2 @@ -51,26 +52,32 @@ def train(args, attn_model=False): fluid.default_main_program().random_seed = 2020 if attn_model: - model = AttentionModel(args.hidden_size, - args.src_vocab_size, - args.tar_vocab_size, - args.batch_size, - num_layers=args.num_layers, - init_scale=args.init_scale, - dropout=args.dropout) + model = AttentionModel( + args.hidden_size, + args.src_vocab_size, + args.tar_vocab_size, + args.batch_size, + num_layers=args.num_layers, + init_scale=args.init_scale, + dropout=args.dropout, + ) else: - model = BaseModel(args.hidden_size, - args.src_vocab_size, - args.tar_vocab_size, - args.batch_size, - num_layers=args.num_layers, - init_scale=args.init_scale, - dropout=args.dropout) + model = BaseModel( + args.hidden_size, + args.src_vocab_size, + args.tar_vocab_size, + args.batch_size, + num_layers=args.num_layers, + init_scale=args.init_scale, + dropout=args.dropout, + ) gloabl_norm_clip = GradientClipByGlobalNorm(args.max_grad_norm) - optimizer = fluid.optimizer.SGD(args.learning_rate, - parameter_list=model.parameters(), - grad_clip=gloabl_norm_clip) + optimizer = fluid.optimizer.SGD( + args.learning_rate, + parameter_list=model.parameters(), + grad_clip=gloabl_norm_clip, + ) model.train() train_data_iter = get_data_iter(args.batch_size) @@ -96,8 +103,15 @@ def train(args, attn_model=False): if batch_id % PRINT_STEP == 0: print( "Batch:[%d]; Time: %.5f s; loss: %.5f; total_loss: %.5f; word num: %.5f; ppl: %.5f" - % (batch_id, batch_time, loss.numpy(), total_loss.numpy(), - word_count, np.exp(total_loss.numpy() / word_count))) + % ( + batch_id, + batch_time, + loss.numpy(), + total_loss.numpy(), + word_count, + np.exp(total_loss.numpy() / word_count), + ) + ) if attn_model: # NOTE: Please see code of AttentionModel. @@ -108,7 +122,9 @@ def train(args, attn_model=False): if batch_id + 1 >= STEP_NUM: break - model_path = args.attn_model_path if attn_model else args.base_model_path + model_path = ( + args.attn_model_path if attn_model else args.base_model_path + ) model_dir = os.path.join(model_path) if not os.path.exists(model_dir): @@ -121,27 +137,33 @@ def infer(args, attn_model=False): with fluid.dygraph.guard(place): if attn_model: - model = AttentionModel(args.hidden_size, - args.src_vocab_size, - args.tar_vocab_size, - args.batch_size, - beam_size=args.beam_size, - num_layers=args.num_layers, - init_scale=args.init_scale, - dropout=0.0, - mode='beam_search') + model = AttentionModel( + args.hidden_size, + args.src_vocab_size, + args.tar_vocab_size, + args.batch_size, + beam_size=args.beam_size, + num_layers=args.num_layers, + init_scale=args.init_scale, + dropout=0.0, + mode='beam_search', + ) else: - model = BaseModel(args.hidden_size, - args.src_vocab_size, - args.tar_vocab_size, - args.batch_size, - beam_size=args.beam_size, - num_layers=args.num_layers, - init_scale=args.init_scale, - dropout=0.0, - mode='beam_search') - - model_path = args.attn_model_path if attn_model else args.base_model_path + model = BaseModel( + args.hidden_size, + args.src_vocab_size, + args.tar_vocab_size, + args.batch_size, + beam_size=args.beam_size, + num_layers=args.num_layers, + init_scale=args.init_scale, + dropout=0.0, + mode='beam_search', + ) + + model_path = ( + args.attn_model_path if attn_model else args.base_model_path + ) state_dict, _ = fluid.dygraph.load_dygraph(model_path) model.set_dict(state_dict) model.eval() @@ -158,16 +180,18 @@ def infer(args, attn_model=False): class TestSeq2seq(unittest.TestCase): - def setUp(self): self.args = Seq2SeqModelHyperParams self.temp_dir = tempfile.TemporaryDirectory() - self.args.base_model_path = os.path.join(self.temp_dir.name, - self.args.base_model_path) - self.args.attn_model_path = os.path.join(self.temp_dir.name, - self.args.attn_model_path) - self.args.reload_model = os.path.join(self.temp_dir.name, - self.args.reload_model) + self.args.base_model_path = os.path.join( + self.temp_dir.name, self.args.base_model_path + ) + self.args.attn_model_path = os.path.join( + self.temp_dir.name, self.args.attn_model_path + ) + self.args.reload_model = os.path.join( + self.temp_dir.name, self.args.reload_model + ) def tearDown(self): self.temp_dir.cleanup() @@ -190,17 +214,23 @@ class TestSeq2seq(unittest.TestCase): dygraph_loss = self.run_dygraph(mode="train", attn_model=attn_model) static_loss = self.run_static(mode="train", attn_model=attn_model) result = np.allclose(dygraph_loss, static_loss) - self.assertTrue(result, - msg="\ndygraph_loss = {} \nstatic_loss = {}".format( - dygraph_loss, static_loss)) + self.assertTrue( + result, + msg="\ndygraph_loss = {} \nstatic_loss = {}".format( + dygraph_loss, static_loss + ), + ) def _test_predict(self, attn_model=False): pred_dygraph = self.run_dygraph(mode="test", attn_model=attn_model) pred_static = self.run_static(mode="test", attn_model=attn_model) result = np.allclose(pred_static, pred_dygraph) - self.assertTrue(result, - msg="\npred_dygraph = {} \npred_static = {}".format( - pred_dygraph, pred_static)) + self.assertTrue( + result, + msg="\npred_dygraph = {} \npred_static = {}".format( + pred_dygraph, pred_static + ), + ) def test_base_model(self): self._test_train(attn_model=False) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_setter_helper.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_setter_helper.py index 0a5d567996d92e90b64e37a8574649489a8d3265..405eb089d0c51bfca942c1f40f451b0264962f63 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_setter_helper.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_setter_helper.py @@ -28,10 +28,10 @@ def setter(values): class TestGetterSetterHelper(unittest.TestCase): - def test_1(self): - helper = GetterSetterHelper(getter, setter, ['a', 'b', 'e'], - ['d', 'f', 'e']) + helper = GetterSetterHelper( + getter, setter, ['a', 'b', 'e'], ['d', 'f', 'e'] + ) print(helper.union()) expect_union = ['a', 'b', 'd', 'e', 'f'] assert helper.union() == expect_union diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet.py index 2bc344ae95a6b2a69ef3baf0e29ad8c87d8c41f6..466c6affcb846fcbc82422d44d822eca2b7d17c7 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet.py @@ -36,22 +36,24 @@ def create_conf_dict(): def parse_args(): parser = argparse.ArgumentParser() - parser.add_argument("--batch_size", - type=int, - default=32, - help="Total examples' number in batch for training.") - parser.add_argument("--seq_len", - type=int, - default=32, - help="The length of each sentence.") - parser.add_argument("--epoch", - type=int, - default=1, - help="The number of training epoch.") - parser.add_argument("--fake_sample_size", - type=int, - default=128, - help="The number of samples of fake data.") + parser.add_argument( + "--batch_size", + type=int, + default=32, + help="Total examples' number in batch for training.", + ) + parser.add_argument( + "--seq_len", type=int, default=32, help="The length of each sentence." + ) + parser.add_argument( + "--epoch", type=int, default=1, help="The number of training epoch." + ) + parser.add_argument( + "--fake_sample_size", + type=int, + default=128, + help="The number of samples of fake data.", + ) args = parser.parse_args([]) return args @@ -72,7 +74,6 @@ vocab = fake_vocabulary() class FakeReaderProcessor(object): - def __init__(self, args, vocab): self.vocab = vocab self.seq_len = args.seq_len @@ -83,10 +84,10 @@ class FakeReaderProcessor(object): pos_title = query[:] neg_title = [26 - q for q in query] self.data_samples.append( - np.array([query, pos_title, neg_title]).astype(np.int64)) + np.array([query, pos_title, neg_title]).astype(np.int64) + ) def get_reader(self, mode, epoch=0): - def reader_with_pairwise(): if mode == "train": for i in range(self.sample_size): @@ -125,7 +126,8 @@ def train(conf_dict, to_static): beta1=0.9, beta2=0.999, epsilon=1e-08, - parameter_list=net.parameters()) + parameter_list=net.parameters(), + ) metric = fluid.metrics.Auc(name="auc") @@ -133,14 +135,14 @@ def train(conf_dict, to_static): losses = [] train_loader = fluid.io.DataLoader.from_generator( - capacity=16, - return_list=True, - iterable=True, - use_double_buffer=True) - get_train_examples = simnet_process.get_reader("train", - epoch=args.epoch) + capacity=16, return_list=True, iterable=True, use_double_buffer=True + ) + get_train_examples = simnet_process.get_reader( + "train", epoch=args.epoch + ) train_loader.set_sample_list_generator( - paddle.batch(get_train_examples, batch_size=args.batch_size), place) + paddle.batch(get_train_examples, batch_size=args.batch_size), place + ) for left, pos_right, neg_right in train_loader(): left = fluid.layers.reshape(left, shape=[-1, 1]) @@ -160,7 +162,6 @@ def train(conf_dict, to_static): class TestSimnet(unittest.TestCase): - def test_dygraph_static_same_loss(self): if fluid.is_compiled_with_cuda(): fluid.set_flags({"FLAGS_cudnn_deterministic": True}) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet_v2.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet_v2.py index f2c72e9932ea7029bb1d99370e6cb4b6432b4d34..bbb408e48c3281fdca73cfdab24d2fdcb42e9945 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet_v2.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet_v2.py @@ -34,22 +34,24 @@ def create_conf_dict(): def parse_args(): parser = argparse.ArgumentParser() - parser.add_argument("--batch_size", - type=int, - default=32, - help="Total examples' number in batch for training.") - parser.add_argument("--seq_len", - type=int, - default=32, - help="The length of each sentence.") - parser.add_argument("--epoch", - type=int, - default=1, - help="The number of training epoch.") - parser.add_argument("--fake_sample_size", - type=int, - default=128, - help="The number of samples of fake data.") + parser.add_argument( + "--batch_size", + type=int, + default=32, + help="Total examples' number in batch for training.", + ) + parser.add_argument( + "--seq_len", type=int, default=32, help="The length of each sentence." + ) + parser.add_argument( + "--epoch", type=int, default=1, help="The number of training epoch." + ) + parser.add_argument( + "--fake_sample_size", + type=int, + default=128, + help="The number of samples of fake data.", + ) args = parser.parse_args([]) return args @@ -70,7 +72,6 @@ vocab = fake_vocabulary() class FakeReaderProcessor(object): - def __init__(self, args, vocab): self.vocab = vocab self.seq_len = args.seq_len @@ -81,10 +82,10 @@ class FakeReaderProcessor(object): pos_title = query[:] neg_title = [26 - q for q in query] self.data_samples.append( - np.array([query, pos_title, neg_title]).astype(np.int64)) + np.array([query, pos_title, neg_title]).astype(np.int64) + ) def get_reader(self, mode, epoch=0): - def reader_with_pairwise(): if mode == "train": for i in range(self.sample_size): @@ -118,24 +119,26 @@ def train(conf_dict, to_static): net = BOW(conf_dict) loss = HingeLoss(conf_dict) - optimizer = paddle.optimizer.Adam(learning_rate=0.001, - beta1=0.9, - beta2=0.999, - epsilon=1e-08, - parameters=net.parameters()) + optimizer = paddle.optimizer.Adam( + learning_rate=0.001, + beta1=0.9, + beta2=0.999, + epsilon=1e-08, + parameters=net.parameters(), + ) metric = paddle.metric.Auc(name="auc") global_step = 0 losses = [] - train_loader = paddle.io.DataLoader.from_generator(capacity=16, - return_list=True, - iterable=True, - use_double_buffer=True) + train_loader = paddle.io.DataLoader.from_generator( + capacity=16, return_list=True, iterable=True, use_double_buffer=True + ) get_train_examples = simnet_process.get_reader("train", epoch=args.epoch) train_loader.set_sample_list_generator( - paddle.batch(get_train_examples, batch_size=args.batch_size), place) + paddle.batch(get_train_examples, batch_size=args.batch_size), place + ) for left, pos_right, neg_right in train_loader(): left = paddle.reshape(left, shape=[-1, 1]) @@ -156,7 +159,6 @@ def train(conf_dict, to_static): class TestSimnet(unittest.TestCase): - def test_dygraph_static_same_loss(self): if paddle.is_compiled_with_cuda(): paddle.fluid.set_flags({"FLAGS_cudnn_deterministic": True}) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_slice.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_slice.py index 96bcc6d016e0c78a32c9b6424844cd81ed8f6f88..ffd3e8879334dc7933c6fb02f6f6c96b1e65a938 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_slice.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_slice.py @@ -97,7 +97,6 @@ def test_set_value(x): class LayerWithSetValue(paddle.nn.Layer): - def __init__(self, input_dim, hidden): super(LayerWithSetValue, self).__init__() self.linear = paddle.nn.Linear(input_dim, hidden) @@ -110,11 +109,13 @@ class LayerWithSetValue(paddle.nn.Layer): class TestSliceWithoutControlFlow(unittest.TestCase): - def setUp(self): self.init_input() - self.place = paddle.CUDAPlace( - 0) if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + self.place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() + else paddle.CPUPlace() + ) self.init_dygraph_func() paddle.disable_static() @@ -142,25 +143,21 @@ class TestSliceWithoutControlFlow(unittest.TestCase): class TestSliceInIf(TestSliceWithoutControlFlow): - def init_dygraph_func(self): self.dygraph_func = test_slice_in_if class TestSliceInWhileLoop(TestSliceWithoutControlFlow): - def init_dygraph_func(self): self.dygraph_func = test_slice_in_while_loop class TestSliceInForLoop(TestSliceWithoutControlFlow): - def init_dygraph_func(self): self.dygraph_func = test_slice_in_for_loop class TestSetValue(TestSliceWithoutControlFlow): - def init_input(self): self.input = np.full([3, 4, 5], 5).astype('float32') @@ -169,11 +166,11 @@ class TestSetValue(TestSliceWithoutControlFlow): class TestSetValueWithLayerAndSave(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() - self.model_path = os.path.join(self.temp_dir.name, - "layer_use_set_value") + self.model_path = os.path.join( + self.temp_dir.name, "layer_use_set_value" + ) def tearDown(self): self.temp_dir.cleanup() @@ -182,10 +179,9 @@ class TestSetValueWithLayerAndSave(unittest.TestCase): prog_trans.enable(True) model = LayerWithSetValue(input_dim=10, hidden=1) x = paddle.full(shape=[5, 10], fill_value=5.0, dtype="float32") - paddle.jit.save(layer=model, - path=self.model_path, - input_spec=[x], - output_spec=None) + paddle.jit.save( + layer=model, path=self.model_path, input_spec=[x], output_spec=None + ) class TestSliceSupplementSpecialCase(unittest.TestCase): @@ -218,18 +214,20 @@ class TestSliceSupplementSpecialCase(unittest.TestCase): return inps[::2], inps[::-2] origin_result = func(inps) - sfunc = paddle.jit.to_static(func, - input_spec=[InputSpec(shape=[None, 4, 4])]) + sfunc = paddle.jit.to_static( + func, input_spec=[InputSpec(shape=[None, 4, 4])] + ) static_result = sfunc(inps) - np.testing.assert_array_equal(origin_result[0].numpy(), - static_result[0].numpy()) - np.testing.assert_array_equal(origin_result[1].numpy(), - static_result[1].numpy()) + np.testing.assert_array_equal( + origin_result[0].numpy(), static_result[0].numpy() + ) + np.testing.assert_array_equal( + origin_result[1].numpy(), static_result[1].numpy() + ) class TestPaddleStridedSlice(unittest.TestCase): - def test_compare_paddle_strided_slice_with_numpy(self): paddle.disable_static() array = np.arange(5) @@ -238,19 +236,21 @@ class TestPaddleStridedSlice(unittest.TestCase): s1 = 3 e1 = 1 stride1 = -2 - sl = paddle.strided_slice(pt, - axes=[ - 0, - ], - starts=[ - s1, - ], - ends=[ - e1, - ], - strides=[ - stride1, - ]) + sl = paddle.strided_slice( + pt, + axes=[ + 0, + ], + starts=[ + s1, + ], + ends=[ + e1, + ], + strides=[ + stride1, + ], + ) self.assertTrue(array[s1:e1:stride1], sl) @@ -259,27 +259,27 @@ class TestPaddleStridedSlice(unittest.TestCase): s2 = [8, -1] e2 = [1, -5] stride2 = [-2, -3] - sl = paddle.strided_slice(pt, - axes=[0, 1], - starts=s2, - ends=e2, - strides=stride2) + sl = paddle.strided_slice( + pt, axes=[0, 1], starts=s2, ends=e2, strides=stride2 + ) np.testing.assert_array_equal( - sl.numpy(), array[s2[0]:e2[0]:stride2[0], s2[1]:e2[1]:stride2[1]]) + sl.numpy(), + array[s2[0] : e2[0] : stride2[0], s2[1] : e2[1] : stride2[1]], + ) array = np.arange(6 * 7 * 8).reshape((6, 7, 8)) pt = paddle.to_tensor(array) s2 = [7, -1] e2 = [2, -5] stride2 = [-2, -3] - sl = paddle.strided_slice(pt, - axes=[0, 2], - starts=s2, - ends=e2, - strides=stride2) + sl = paddle.strided_slice( + pt, axes=[0, 2], starts=s2, ends=e2, strides=stride2 + ) - array_slice = array[s2[0]:e2[0]:stride2[0], ::, s2[1]:e2[1]:stride2[1]] + array_slice = array[ + s2[0] : e2[0] : stride2[0], ::, s2[1] : e2[1] : stride2[1] + ] np.testing.assert_array_equal(sl.numpy(), array_slice) @@ -289,7 +289,6 @@ def slice_zero_shape_tensor(x): class TestSliceZeroShapeTensor(unittest.TestCase): - def test_slice(self): paddle.disable_static() x = paddle.ones([0, 0, 0, 0]) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_spec_names.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_spec_names.py index cfbf47a318ffa5149d2da5985b3dc7118de79a61..8ee2fcd0d593e41d9229d1353fea6a76c0b31c73 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_spec_names.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_spec_names.py @@ -18,7 +18,6 @@ import unittest class Net(Layer): - def __init__(self): super(Net, self).__init__() self.fc = paddle.nn.Linear(16, 3) @@ -36,7 +35,6 @@ class Net(Layer): class TestArgsSpecName(unittest.TestCase): - def read_from_dataset(self): self.x = paddle.randn([4, 2, 8]) self.y = paddle.randn([4, 2, 8]) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_static_analysis.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_static_analysis.py index 8f06b96122d2532ca29ed22bfef0e9d84ff2c78a..7a2e0e09b21455257d5eedc345f5295a724066f6 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_static_analysis.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_static_analysis.py @@ -19,7 +19,10 @@ import paddle import paddle.fluid as fluid import unittest -from paddle.fluid.dygraph.dygraph_to_static import NodeVarType, StaticAnalysisVisitor +from paddle.fluid.dygraph.dygraph_to_static import ( + NodeVarType, + StaticAnalysisVisitor, +) def func_to_test1(a, b): @@ -71,7 +74,7 @@ result_var_type3 = { 'i': {NodeVarType.BOOLEAN}, 'j': {NodeVarType.UNKNOWN}, 'k': {NodeVarType.FLOAT}, - 'l': {NodeVarType.PADDLE_RETURN_TYPES} + 'l': {NodeVarType.PADDLE_RETURN_TYPES}, } @@ -87,12 +90,11 @@ result_var_type4 = { 'a': {NodeVarType.NUMPY_NDARRAY}, 'b': {NodeVarType.NUMPY_NDARRAY}, 'c': {NodeVarType.TENSOR}, - 'd': {NodeVarType.TENSOR} + 'd': {NodeVarType.TENSOR}, } def func_to_test5(): - def inner_int_func(): return 1 @@ -139,7 +141,7 @@ result_var_type6 = { 'i': {NodeVarType.INT}, 'x': {NodeVarType.INT}, 'y': {NodeVarType.INT}, - 'add': {NodeVarType.INT} + 'add': {NodeVarType.INT}, } @@ -156,21 +158,30 @@ result_var_type7 = { 'd': {NodeVarType.STRING}, 'e': {NodeVarType.PADDLE_RETURN_TYPES}, 'f': {NodeVarType.PADDLE_RETURN_TYPES}, - 'g': {NodeVarType.TENSOR} + 'g': {NodeVarType.TENSOR}, } test_funcs = [ - func_to_test1, func_to_test2, func_to_test3, func_to_test4, func_to_test5, - func_to_test6, func_to_test7 + func_to_test1, + func_to_test2, + func_to_test3, + func_to_test4, + func_to_test5, + func_to_test6, + func_to_test7, ] result_var_type = [ - result_var_type1, result_var_type2, result_var_type3, result_var_type4, - result_var_type5, result_var_type6, result_var_type7 + result_var_type1, + result_var_type2, + result_var_type3, + result_var_type4, + result_var_type5, + result_var_type6, + result_var_type7, ] class TestStaticAnalysis(unittest.TestCase): - def _check_wrapper(self, wrapper, node_to_wrapper_map): self.assertEqual(node_to_wrapper_map[wrapper.node], wrapper) if wrapper.parent is not None: diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tensor_methods.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tensor_methods.py index 3d88a03247ce99eb0d652f88f1e10780fd3ae1e2..ecda3427e7ea203b84c28f2798d91c77a30a150d 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tensor_methods.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tensor_methods.py @@ -25,7 +25,6 @@ def tensor_clone(x): class TestTensorClone(unittest.TestCase): - def _run(self, to_static): prog_trans = paddle.jit.ProgramTranslator() prog_trans.enable(to_static) @@ -46,7 +45,6 @@ def tensor_numpy(x): class TestTensorDygraphOnlyMethodError(unittest.TestCase): - def _run(self, to_static): prog_trans = paddle.jit.ProgramTranslator() prog_trans.enable(to_static) @@ -68,7 +66,6 @@ def tensor_item(x): class TestTensorItem(unittest.TestCase): - def _run(self, to_static): prog_trans = paddle.jit.ProgramTranslator() prog_trans.enable(to_static) @@ -92,7 +89,6 @@ def tensor_size(x): class TestTensorSize(unittest.TestCase): - def _run(self, to_static): prog_trans = paddle.jit.ProgramTranslator() prog_trans.enable(to_static) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tensor_shape.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tensor_shape.py index 55412fb0c49a5597e48a8816a0026a171ef5ab7f..5d06d1c694ec61670c14a984e46bb0ce271b778a 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tensor_shape.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tensor_shape.py @@ -109,13 +109,13 @@ def dyfunc_with_if_1(x): # `res.shape[0]` is transformed into # `paddle.jit.dy2static.convert_var_shape(res)[0]` if res.shape[0] > 1: - res = fluid.layers.fill_constant(value=2, - shape=x.shape, - dtype="int32") + res = fluid.layers.fill_constant( + value=2, shape=x.shape, dtype="int32" + ) else: - res = fluid.layers.fill_constant(value=3, - shape=x.shape, - dtype="int32") + res = fluid.layers.fill_constant( + value=3, shape=x.shape, dtype="int32" + ) return res @@ -231,11 +231,13 @@ def dyfunc_dict_assign_shape(): # 1. Basic tests without control flow class TestTensorShapeBasic(unittest.TestCase): - def setUp(self): self.input = np.ones(5).astype("int32") - self.place = fluid.CUDAPlace( - 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + self.place = ( + fluid.CUDAPlace(0) + if fluid.is_compiled_with_cuda() + else fluid.CPUPlace() + ) self._set_input_spec() self._set_expected_op_num() self.init_test_func() @@ -277,9 +279,11 @@ class TestTensorShapeBasic(unittest.TestCase): for block in program.blocks: self.shape_op_num += len( - [op for op in block.ops if op.type == "shape"]) + [op for op in block.ops if op.type == "shape"] + ) self.slice_op_num += len( - [op for op in block.ops if op.type == "slice"]) + [op for op in block.ops if op.type == "slice"] + ) def test_op_num(self): static_layer = paddle.jit.to_static(self.dygraph_func, self.input_spec) @@ -291,7 +295,6 @@ class TestTensorShapeBasic(unittest.TestCase): class TestTensorShapeBasic2(TestTensorShapeBasic): - def init_test_func(self): self.dygraph_func = dyfunc_tensor_shape_2 @@ -302,19 +305,16 @@ class TestTensorShapeBasic2(TestTensorShapeBasic): class TestTensorShapeBasic3(TestTensorShapeBasic): - def init_test_func(self): self.dygraph_func = dyfunc_tensor_shape_3 class TestTensorShapeBasic4(TestTensorShapeBasic): - def init_test_func(self): self.dygraph_func = dyfunc_tensor_shape_4 class TestTensorShapeBasic5(TestTensorShapeBasic): - def init_test_func(self): self.dygraph_func = dyfunc_tensor_shape_5 @@ -325,7 +325,6 @@ class TestTensorShapeBasic5(TestTensorShapeBasic): class TestTensorShapeBasic6(TestTensorShapeBasic): - def init_test_func(self): self.dygraph_func = dyfunc_tensor_shape_6 @@ -336,7 +335,6 @@ class TestTensorShapeBasic6(TestTensorShapeBasic): class TestTupleShape1(TestTensorShapeBasic): - def init_test_func(self): self.input = np.ones((5, 7)).astype("int32") self.input_spec = [ @@ -351,7 +349,6 @@ class TestTupleShape1(TestTensorShapeBasic): class TestTupleShape2(TestTensorShapeBasic): - def init_test_func(self): self.input = np.ones((5, 7)).astype("int32") self.input_spec = [ @@ -366,7 +363,6 @@ class TestTupleShape2(TestTensorShapeBasic): class TestTupleShape3(TestTensorShapeBasic): - def init_test_func(self): self.input = np.ones((5, 7)).astype("int32") self.input_spec = [paddle.static.InputSpec(shape=[5, 7], dtype="int32")] @@ -379,7 +375,6 @@ class TestTupleShape3(TestTensorShapeBasic): class TestPaddleShapeApi(TestTensorShapeBasic): - def init_test_func(self): self.input = np.ones((5, 7)).astype("int32") self.input_spec = [paddle.static.InputSpec(shape=[5, 7], dtype="int32")] @@ -393,7 +388,6 @@ class TestPaddleShapeApi(TestTensorShapeBasic): # 2. Tests with control flow if class TestTensorShapeInIf1(TestTensorShapeBasic): - def init_test_func(self): self.dygraph_func = dyfunc_with_if_1 @@ -404,7 +398,6 @@ class TestTensorShapeInIf1(TestTensorShapeBasic): class TestTensorShapeInIf2(TestTensorShapeBasic): - def init_test_func(self): self.dygraph_func = dyfunc_with_if_2 @@ -416,7 +409,6 @@ class TestTensorShapeInIf2(TestTensorShapeBasic): # 3. Tests with control flow for loop class TestTensorShapeInFor1(TestTensorShapeBasic): - def init_test_func(self): self.dygraph_func = dyfunc_with_for_1 @@ -427,7 +419,6 @@ class TestTensorShapeInFor1(TestTensorShapeBasic): class TestTensorShapeInFor2(TestTensorShapeInFor1): - def init_test_func(self): self.dygraph_func = dyfunc_with_for_2 @@ -438,7 +429,6 @@ class TestTensorShapeInFor2(TestTensorShapeInFor1): class TestTensorShapeInFor3(TestTensorShapeInFor1): - def init_test_func(self): self.dygraph_func = dyfunc_with_for_3 @@ -450,7 +440,6 @@ class TestTensorShapeInFor3(TestTensorShapeInFor1): # 4. Tests with control flow while loop class TestTensorShapeInWhile1(TestTensorShapeInFor1): - def init_test_func(self): self.dygraph_func = dyfunc_with_while_1 @@ -461,7 +450,6 @@ class TestTensorShapeInWhile1(TestTensorShapeInFor1): class TestTensorShapeInWhile2(TestTensorShapeInFor1): - def init_test_func(self): self.dygraph_func = dyfunc_with_while_2 @@ -472,7 +460,6 @@ class TestTensorShapeInWhile2(TestTensorShapeInFor1): class TestTensorShapeInWhile3(TestTensorShapeBasic): - def init_test_func(self): self.dygraph_func = dyfunc_with_while_3 @@ -483,7 +470,6 @@ class TestTensorShapeInWhile3(TestTensorShapeBasic): class TestTensorShapeInWhile4(TestTensorShapeBasic): - def init_test_func(self): self.dygraph_func = dyfunc_with_while_4 @@ -495,7 +481,6 @@ class TestTensorShapeInWhile4(TestTensorShapeBasic): # 5. Test op num for negetive dim class TestOpNumBasicWithTensorShape(unittest.TestCase): - def setUp(self): self._set_input_spec() self._set_test_func() @@ -521,9 +506,11 @@ class TestOpNumBasicWithTensorShape(unittest.TestCase): for block in program.blocks: self.shape_op_num += len( - [op for op in block.ops if op.type == "shape"]) + [op for op in block.ops if op.type == "shape"] + ) self.slice_op_num += len( - [op for op in block.ops if op.type == "slice"]) + [op for op in block.ops if op.type == "slice"] + ) def test_op_num(self): static_layer = paddle.jit.to_static(self.dygraph_func, self.input_spec) @@ -536,7 +523,6 @@ class TestOpNumBasicWithTensorShape(unittest.TestCase): class TestOpNumBasicWithTensorShape4(TestOpNumBasicWithTensorShape): - def _set_test_func(self): self.dygraph_func = dyfunc_tensor_shape_4 @@ -547,7 +533,6 @@ class TestOpNumBasicWithTensorShape4(TestOpNumBasicWithTensorShape): class TestOpNumWithTensorShapeTuple1(TestOpNumBasicWithTensorShape): - def _set_test_func(self): self.dygraph_func = dyfunc_tuple_shape_1 @@ -558,7 +543,6 @@ class TestOpNumWithTensorShapeTuple1(TestOpNumBasicWithTensorShape): class TestOpNumWithTensorShapeInIf1(TestOpNumBasicWithTensorShape): - def _set_test_func(self): self.dygraph_func = dyfunc_with_if_1 @@ -569,7 +553,6 @@ class TestOpNumWithTensorShapeInIf1(TestOpNumBasicWithTensorShape): class TestOpNumWithTensorShapeInFor1(TestOpNumBasicWithTensorShape): - def _set_test_func(self): self.dygraph_func = dyfunc_with_for_1 @@ -580,7 +563,6 @@ class TestOpNumWithTensorShapeInFor1(TestOpNumBasicWithTensorShape): class TestOpNumWithTensorShapeInWhile1(TestOpNumBasicWithTensorShape): - def _set_test_func(self): self.dygraph_func = dyfunc_with_while_1 @@ -591,7 +573,6 @@ class TestOpNumWithTensorShapeInWhile1(TestOpNumBasicWithTensorShape): class TestChangeShapeAfterAssign(TestTensorShapeBasic): - def init_test_func(self): self.input = np.ones((2, 3)).astype("int32") self.input_spec = [ @@ -613,15 +594,14 @@ def dyfunc_with_static_convert_var_shape(x): else: # Test for correctly to find `batch_size__static_convert_var_shape_suffix_0` in # deeply nested scope. - res = fluid.layers.fill_constant(value=8, - shape=[batch_size], - dtype="int32") + res = fluid.layers.fill_constant( + value=8, shape=[batch_size], dtype="int32" + ) return res class TestFindStatiConvertVarShapeSuffixVar(unittest.TestCase): - def test(self): x_spec = paddle.static.InputSpec(shape=[None, 10]) func = paddle.jit.to_static(dyfunc_with_if_2, input_spec=[x_spec]) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_to_tensor.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_to_tensor.py index af543a5d97bb1f50a9326184f913ac75ed737ba4..2e8e9f06a1b05a7959f2537670d10dbf8ebe6853 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_to_tensor.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_to_tensor.py @@ -37,10 +37,9 @@ def case2(x): place = paddle.CUDAPlace(0) else: place = paddle.CPUPlace() - a = paddle.to_tensor([1.0, 2.0, 3.0], - place=place, - dtype="int64", - stop_gradient=False) + a = paddle.to_tensor( + [1.0, 2.0, 3.0], place=place, dtype="int64", stop_gradient=False + ) return a @@ -84,7 +83,6 @@ def case6(x): class TestToTensorReturnVal(unittest.TestCase): - def test_to_tensor_badreturn(self): paddle.disable_static() x = paddle.to_tensor([3]) @@ -133,7 +131,6 @@ class TestToTensorReturnVal(unittest.TestCase): class TestStatic(unittest.TestCase): - def test_static(self): paddle.enable_static() main_prog = Program() @@ -144,10 +141,12 @@ class TestStatic(unittest.TestCase): else: place = paddle.CPUPlace() - x = paddle.to_tensor(paddle.randn([5, 2]), - dtype='float64', - stop_gradient=False, - place=place) + x = paddle.to_tensor( + paddle.randn([5, 2]), + dtype='float64', + stop_gradient=False, + place=place, + ) out = paddle.static.nn.fc(x, 1) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_transformer.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_transformer.py index f7b16c06cb8a4f388bd12117eb8e005115efa6be..d9eb890394f8003ac7f84af0be27ddd6e066402e 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_transformer.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_transformer.py @@ -23,11 +23,16 @@ import paddle import paddle.fluid as fluid import transformer_util as util -from transformer_dygraph_model import CrossEntropyCriterion, Transformer, position_encoding_init +from transformer_dygraph_model import ( + CrossEntropyCriterion, + Transformer, + position_encoding_init, +) trainer_count = 1 -place = fluid.CUDAPlace( - 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() +place = ( + fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() +) SEED = 10 STEP_NUM = 10 @@ -42,46 +47,73 @@ def train_static(args, batch_generator): with fluid.program_guard(train_prog, startup_prog): with fluid.unique_name.guard(): # define input and reader - input_field_names = util.encoder_data_input_fields + \ - util.decoder_data_input_fields[:-1] + util.label_data_input_fields + input_field_names = ( + util.encoder_data_input_fields + + util.decoder_data_input_fields[:-1] + + util.label_data_input_fields + ) input_descs = util.get_input_descs(args) - input_slots = [{ - "name": name, - "shape": input_descs[name][0], - "dtype": input_descs[name][1] - } for name in input_field_names] + input_slots = [ + { + "name": name, + "shape": input_descs[name][0], + "dtype": input_descs[name][1], + } + for name in input_field_names + ] input_field = util.InputField(input_slots) # Define DataLoader data_loader = fluid.io.DataLoader.from_generator( - input_field.feed_list, capacity=60) + input_field.feed_list, capacity=60 + ) data_loader.set_batch_generator(batch_generator, places=place) # define model transformer = Transformer( - args.src_vocab_size, args.trg_vocab_size, args.max_length + 1, - args.n_layer, args.n_head, args.d_key, args.d_value, - args.d_model, args.d_inner_hid, args.prepostprocess_dropout, - args.attention_dropout, args.relu_dropout, args.preprocess_cmd, - args.postprocess_cmd, args.weight_sharing, args.bos_idx, - args.eos_idx) + args.src_vocab_size, + args.trg_vocab_size, + args.max_length + 1, + args.n_layer, + args.n_head, + args.d_key, + args.d_value, + args.d_model, + args.d_inner_hid, + args.prepostprocess_dropout, + args.attention_dropout, + args.relu_dropout, + args.preprocess_cmd, + args.postprocess_cmd, + args.weight_sharing, + args.bos_idx, + args.eos_idx, + ) logits = transformer(*input_field.feed_list[:7]) # define loss criterion = CrossEntropyCriterion(args.label_smooth_eps) lbl_word, lbl_weight = input_field.feed_list[7:] - sum_cost, avg_cost, token_num = criterion(logits, lbl_word, - lbl_weight) + sum_cost, avg_cost, token_num = criterion( + logits, lbl_word, lbl_weight + ) # define optimizer learning_rate = fluid.layers.learning_rate_scheduler.noam_decay( - args.d_model, args.warmup_steps, args.learning_rate) - optimizer = fluid.optimizer.Adam(learning_rate=learning_rate, - beta1=args.beta1, - beta2=args.beta2, - epsilon=float(args.eps)) + args.d_model, args.warmup_steps, args.learning_rate + ) + optimizer = fluid.optimizer.Adam( + learning_rate=learning_rate, + beta1=args.beta1, + beta2=args.beta2, + epsilon=float(args.eps), + ) optimizer.minimize(avg_cost) # the best cross-entropy value with label smoothing - loss_normalizer = -((1. - args.label_smooth_eps) * np.log( - (1. - args.label_smooth_eps)) + args.label_smooth_eps * - np.log(args.label_smooth_eps / - (args.trg_vocab_size - 1) + 1e-20)) + loss_normalizer = -( + (1.0 - args.label_smooth_eps) + * np.log((1.0 - args.label_smooth_eps)) + + args.label_smooth_eps + * np.log( + args.label_smooth_eps / (args.trg_vocab_size - 1) + 1e-20 + ) + ) step_idx = 0 total_batch_num = 0 avg_loss = [] @@ -90,12 +122,15 @@ def train_static(args, batch_generator): for pass_id in range(args.epoch): batch_id = 0 for feed_dict in data_loader: - outs = exe.run(program=train_prog, - feed=feed_dict, - fetch_list=[sum_cost.name, token_num.name]) + outs = exe.run( + program=train_prog, + feed=feed_dict, + fetch_list=[sum_cost.name, token_num.name], + ) if step_idx % args.print_step == 0: sum_cost_val, token_num_val = np.array(outs[0]), np.array( - outs[1]) + outs[1] + ) total_sum_cost = sum_cost_val.sum() total_token_num = token_num_val.sum() total_avg_cost = total_sum_cost / total_token_num @@ -103,27 +138,40 @@ def train_static(args, batch_generator): if step_idx == 0: logging.info( "step_idx: %d, epoch: %d, batch: %d, avg loss: %f, " - "normalized loss: %f, ppl: %f" % - (step_idx, pass_id, batch_id, total_avg_cost, - total_avg_cost - loss_normalizer, - np.exp([min(total_avg_cost, 100)]))) + "normalized loss: %f, ppl: %f" + % ( + step_idx, + pass_id, + batch_id, + total_avg_cost, + total_avg_cost - loss_normalizer, + np.exp([min(total_avg_cost, 100)]), + ) + ) avg_batch_time = time.time() else: logging.info( "step_idx: %d, epoch: %d, batch: %d, avg loss: %f, " - "normalized loss: %f, ppl: %f, speed: %.2f steps/s" % - (step_idx, pass_id, batch_id, total_avg_cost, - total_avg_cost - loss_normalizer, - np.exp([min(total_avg_cost, 100)]), args.print_step / - (time.time() - avg_batch_time))) + "normalized loss: %f, ppl: %f, speed: %.2f steps/s" + % ( + step_idx, + pass_id, + batch_id, + total_avg_cost, + total_avg_cost - loss_normalizer, + np.exp([min(total_avg_cost, 100)]), + args.print_step / (time.time() - avg_batch_time), + ) + ) avg_batch_time = time.time() batch_id += 1 step_idx += 1 total_batch_num = total_batch_num + 1 if step_idx == STEP_NUM: if args.save_dygraph_model_path: - model_path = os.path.join(args.save_static_model_path, - "transformer") + model_path = os.path.join( + args.save_static_model_path, "transformer" + ) fluid.save(train_prog, model_path) break return np.array(avg_loss) @@ -139,30 +187,45 @@ def train_dygraph(args, batch_generator): train_loader.set_batch_generator(batch_generator, places=place) # define model transformer = Transformer( - args.src_vocab_size, args.trg_vocab_size, args.max_length + 1, - args.n_layer, args.n_head, args.d_key, args.d_value, args.d_model, - args.d_inner_hid, args.prepostprocess_dropout, - args.attention_dropout, args.relu_dropout, args.preprocess_cmd, - args.postprocess_cmd, args.weight_sharing, args.bos_idx, - args.eos_idx) + args.src_vocab_size, + args.trg_vocab_size, + args.max_length + 1, + args.n_layer, + args.n_head, + args.d_key, + args.d_value, + args.d_model, + args.d_inner_hid, + args.prepostprocess_dropout, + args.attention_dropout, + args.relu_dropout, + args.preprocess_cmd, + args.postprocess_cmd, + args.weight_sharing, + args.bos_idx, + args.eos_idx, + ) # define loss criterion = CrossEntropyCriterion(args.label_smooth_eps) # define optimizer learning_rate = fluid.layers.learning_rate_scheduler.noam_decay( - args.d_model, args.warmup_steps, args.learning_rate) + args.d_model, args.warmup_steps, args.learning_rate + ) # define optimizer optimizer = fluid.optimizer.Adam( learning_rate=learning_rate, beta1=args.beta1, beta2=args.beta2, epsilon=float(args.eps), - parameter_list=transformer.parameters()) + parameter_list=transformer.parameters(), + ) # the best cross-entropy value with label smoothing loss_normalizer = -( - (1. - args.label_smooth_eps) * np.log( - (1. - args.label_smooth_eps)) + - args.label_smooth_eps * np.log(args.label_smooth_eps / - (args.trg_vocab_size - 1) + 1e-20)) + (1.0 - args.label_smooth_eps) + * np.log((1.0 - args.label_smooth_eps)) + + args.label_smooth_eps + * np.log(args.label_smooth_eps / (args.trg_vocab_size - 1) + 1e-20) + ) ce_time = [] ce_ppl = [] avg_loss = [] @@ -171,14 +234,29 @@ def train_dygraph(args, batch_generator): pass_start_time = time.time() batch_id = 0 for input_data in train_loader(): - (src_word, src_pos, src_slf_attn_bias, trg_word, trg_pos, - trg_slf_attn_bias, trg_src_attn_bias, lbl_word, - lbl_weight) = input_data - logits = transformer(src_word, src_pos, src_slf_attn_bias, - trg_word, trg_pos, trg_slf_attn_bias, - trg_src_attn_bias) + ( + src_word, + src_pos, + src_slf_attn_bias, + trg_word, + trg_pos, + trg_slf_attn_bias, + trg_src_attn_bias, + lbl_word, + lbl_weight, + ) = input_data + logits = transformer( + src_word, + src_pos, + src_slf_attn_bias, + trg_word, + trg_pos, + trg_slf_attn_bias, + trg_src_attn_bias, + ) sum_cost, avg_cost, token_num = criterion( - logits, lbl_word, lbl_weight) + logits, lbl_word, lbl_weight + ) avg_cost.backward() optimizer.minimize(avg_cost) transformer.clear_gradients() @@ -188,20 +266,32 @@ def train_dygraph(args, batch_generator): if step_idx == 0: logging.info( "step_idx: %d, epoch: %d, batch: %d, avg loss: %f, " - "normalized loss: %f, ppl: %f" % - (step_idx, pass_id, batch_id, total_avg_cost, - total_avg_cost - loss_normalizer, - np.exp([min(total_avg_cost, 100)]))) + "normalized loss: %f, ppl: %f" + % ( + step_idx, + pass_id, + batch_id, + total_avg_cost, + total_avg_cost - loss_normalizer, + np.exp([min(total_avg_cost, 100)]), + ) + ) avg_batch_time = time.time() else: logging.info( "step_idx: %d, epoch: %d, batch: %d, avg loss: %f, " "normalized loss: %f, ppl: %f, speed: %.2f steps/s" - % (step_idx, pass_id, batch_id, total_avg_cost, - total_avg_cost - loss_normalizer, - np.exp([min(total_avg_cost, 100) - ]), args.print_step / - (time.time() - avg_batch_time))) + % ( + step_idx, + pass_id, + batch_id, + total_avg_cost, + total_avg_cost - loss_normalizer, + np.exp([min(total_avg_cost, 100)]), + args.print_step + / (time.time() - avg_batch_time), + ) + ) ce_ppl.append(np.exp([min(total_avg_cost, 100)])) avg_batch_time = time.time() batch_id += 1 @@ -213,10 +303,12 @@ def train_dygraph(args, batch_generator): os.makedirs(model_dir) fluid.save_dygraph( transformer.state_dict(), - os.path.join(model_dir, "transformer")) + os.path.join(model_dir, "transformer"), + ) fluid.save_dygraph( optimizer.state_dict(), - os.path.join(model_dir, "transformer")) + os.path.join(model_dir, "transformer"), + ) break time_consumed = time.time() - pass_start_time ce_time.append(time_consumed) @@ -234,22 +326,37 @@ def predict_dygraph(args, batch_generator): # define model transformer = Transformer( - args.src_vocab_size, args.trg_vocab_size, args.max_length + 1, - args.n_layer, args.n_head, args.d_key, args.d_value, args.d_model, - args.d_inner_hid, args.prepostprocess_dropout, - args.attention_dropout, args.relu_dropout, args.preprocess_cmd, - args.postprocess_cmd, args.weight_sharing, args.bos_idx, - args.eos_idx) + args.src_vocab_size, + args.trg_vocab_size, + args.max_length + 1, + args.n_layer, + args.n_head, + args.d_key, + args.d_value, + args.d_model, + args.d_inner_hid, + args.prepostprocess_dropout, + args.attention_dropout, + args.relu_dropout, + args.preprocess_cmd, + args.postprocess_cmd, + args.weight_sharing, + args.bos_idx, + args.eos_idx, + ) # load the trained model model_dict, _ = util.load_dygraph( - os.path.join(args.save_dygraph_model_path, "transformer")) + os.path.join(args.save_dygraph_model_path, "transformer") + ) # to avoid a longer length than training, reset the size of position # encoding to max_length model_dict["encoder.pos_encoder.weight"] = position_encoding_init( - args.max_length + 1, args.d_model) + args.max_length + 1, args.d_model + ) model_dict["decoder.pos_encoder.weight"] = position_encoding_init( - args.max_length + 1, args.d_model) + args.max_length + 1, args.d_model + ) transformer.load_dict(model_dict) # set evaluate mode @@ -258,8 +365,13 @@ def predict_dygraph(args, batch_generator): step_idx = 0 speed_list = [] for input_data in test_loader(): - (src_word, src_pos, src_slf_attn_bias, trg_word, - trg_src_attn_bias) = input_data + ( + src_word, + src_pos, + src_slf_attn_bias, + trg_word, + trg_src_attn_bias, + ) = input_data seq_ids, seq_scores = transformer.beam_search( src_word, src_pos, @@ -269,28 +381,32 @@ def predict_dygraph(args, batch_generator): bos_id=args.bos_idx, eos_id=args.eos_idx, beam_size=args.beam_size, - max_len=args.max_out_len) + max_len=args.max_out_len, + ) seq_ids = seq_ids.numpy() seq_scores = seq_scores.numpy() if step_idx % args.print_step == 0: if step_idx == 0: logging.info( "Dygraph Predict: step_idx: %d, 1st seq_id: %d, 1st seq_score: %.2f" - % (step_idx, seq_ids[0][0][0], seq_scores[0][0])) + % (step_idx, seq_ids[0][0][0], seq_scores[0][0]) + ) avg_batch_time = time.time() else: speed = args.print_step / (time.time() - avg_batch_time) speed_list.append(speed) logging.info( "Dygraph Predict: step_idx: %d, 1st seq_id: %d, 1st seq_score: %.2f, speed: %.3f steps/s" - % (step_idx, seq_ids[0][0][0], seq_scores[0][0], speed)) + % (step_idx, seq_ids[0][0][0], seq_scores[0][0], speed) + ) avg_batch_time = time.time() step_idx += 1 if step_idx == STEP_NUM: break - logging.info("Dygraph Predict: avg_speed: %.4f steps/s" % - (np.mean(speed_list))) + logging.info( + "Dygraph Predict: avg_speed: %.4f steps/s" % (np.mean(speed_list)) + ) return seq_ids, seq_scores @@ -301,33 +417,53 @@ def predict_static(args, batch_generator): paddle.framework.random._manual_program_seed(SEED) # define input and reader - input_field_names = util.encoder_data_input_fields + util.fast_decoder_data_input_fields + input_field_names = ( + util.encoder_data_input_fields + util.fast_decoder_data_input_fields + ) input_descs = util.get_input_descs(args, 'test') - input_slots = [{ - "name": name, - "shape": input_descs[name][0], - "dtype": input_descs[name][1] - } for name in input_field_names] + input_slots = [ + { + "name": name, + "shape": input_descs[name][0], + "dtype": input_descs[name][1], + } + for name in input_field_names + ] input_field = util.InputField(input_slots) feed_list = input_field.feed_list - loader = fluid.io.DataLoader.from_generator(feed_list=feed_list, - capacity=10) + loader = fluid.io.DataLoader.from_generator( + feed_list=feed_list, capacity=10 + ) # define model transformer = Transformer( - args.src_vocab_size, args.trg_vocab_size, args.max_length + 1, - args.n_layer, args.n_head, args.d_key, args.d_value, args.d_model, - args.d_inner_hid, args.prepostprocess_dropout, - args.attention_dropout, args.relu_dropout, args.preprocess_cmd, - args.postprocess_cmd, args.weight_sharing, args.bos_idx, - args.eos_idx) - - out_ids, out_scores = transformer.beam_search(*feed_list, - bos_id=args.bos_idx, - eos_id=args.eos_idx, - beam_size=args.beam_size, - max_len=args.max_out_len) + args.src_vocab_size, + args.trg_vocab_size, + args.max_length + 1, + args.n_layer, + args.n_head, + args.d_key, + args.d_value, + args.d_model, + args.d_inner_hid, + args.prepostprocess_dropout, + args.attention_dropout, + args.relu_dropout, + args.preprocess_cmd, + args.postprocess_cmd, + args.weight_sharing, + args.bos_idx, + args.eos_idx, + ) + + out_ids, out_scores = transformer.beam_search( + *feed_list, + bos_id=args.bos_idx, + eos_id=args.eos_idx, + beam_size=args.beam_size, + max_len=args.max_out_len + ) # This is used here to set dropout to the test mode. test_prog = test_prog.clone(for_test=True) @@ -335,8 +471,9 @@ def predict_static(args, batch_generator): # define the executor and program for training exe = fluid.Executor(place) - util.load(test_prog, os.path.join(args.save_static_model_path, - "transformer"), exe) + util.load( + test_prog, os.path.join(args.save_static_model_path, "transformer"), exe + ) loader.set_batch_generator(batch_generator, places=place) @@ -347,32 +484,35 @@ def predict_static(args, batch_generator): test_prog, feed=feed_dict, fetch_list=[out_ids.name, out_scores.name], - return_numpy=True) + return_numpy=True, + ) if step_idx % args.print_step == 0: if step_idx == 0: logging.info( "Static Predict: step_idx: %d, 1st seq_id: %d, 1st seq_score: %.2f," - % (step_idx, seq_ids[0][0][0], seq_scores[0][0])) + % (step_idx, seq_ids[0][0][0], seq_scores[0][0]) + ) avg_batch_time = time.time() else: speed = args.print_step / (time.time() - avg_batch_time) speed_list.append(speed) logging.info( "Static Predict: step_idx: %d, 1st seq_id: %d, 1st seq_score: %.2f, speed: %.3f steps/s" - % (step_idx, seq_ids[0][0][0], seq_scores[0][0], speed)) + % (step_idx, seq_ids[0][0][0], seq_scores[0][0], speed) + ) avg_batch_time = time.time() step_idx += 1 if step_idx == STEP_NUM: break - logging.info("Static Predict: avg_speed: %.4f steps/s" % - (np.mean(speed_list))) + logging.info( + "Static Predict: avg_speed: %.4f steps/s" % (np.mean(speed_list)) + ) return seq_ids, seq_scores class TestTransformer(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -382,11 +522,14 @@ class TestTransformer(unittest.TestCase): def prepare(self, mode='train'): args = util.ModelHyperParams() args.save_dygraph_model_path = os.path.join( - self.temp_dir.name, args.save_dygraph_model_path) - args.save_static_model_path = os.path.join(self.temp_dir.name, - args.save_static_model_path) - args.inference_model_dir = os.path.join(self.temp_dir.name, - args.inference_model_dir) + self.temp_dir.name, args.save_dygraph_model_path + ) + args.save_static_model_path = os.path.join( + self.temp_dir.name, args.save_static_model_path + ) + args.inference_model_dir = os.path.join( + self.temp_dir.name, args.inference_model_dir + ) args.output_file = os.path.join(self.temp_dir.name, args.output_file) batch_generator = util.get_feed_data_reader(args, mode) return args, batch_generator @@ -395,9 +538,9 @@ class TestTransformer(unittest.TestCase): args, batch_generator = self.prepare(mode='train') static_avg_loss = train_static(args, batch_generator) dygraph_avg_loss = train_dygraph(args, batch_generator) - np.testing.assert_allclose(static_avg_loss, - dygraph_avg_loss, - rtol=1e-05) + np.testing.assert_allclose( + static_avg_loss, dygraph_avg_loss, rtol=1e-05 + ) def _test_predict(self): args, batch_generator = self.prepare(mode='test') diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py index 7561e7aa13798acb6cbb2977f363a0c75d0adb33..36232b627fdfaa257c56fe0025689aca58fb9c3d 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py @@ -1,16 +1,16 @@ # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. # -#Licensed under the Apache License, Version 2.0 (the "License"); -#you may not use this file except in compliance with the License. -#You may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -#Unless required by applicable law or agreed to in writing, software -#distributed under the License is distributed on an "AS IS" BASIS, -#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#See the License for the specific language governing permissions and -#limitations under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import argparse import numpy as np @@ -30,43 +30,52 @@ np.random.seed(0) def parse_args(): parser = argparse.ArgumentParser("Paddle Video train script") - parser.add_argument('--config', - type=str, - default='tsm.yaml', - help='path to config file of model') - parser.add_argument('--use_gpu', - type=bool, - default=fluid.is_compiled_with_cuda(), - help='default use gpu.') + parser.add_argument( + '--config', + type=str, + default='tsm.yaml', + help='path to config file of model', + ) + parser.add_argument( + '--use_gpu', + type=bool, + default=fluid.is_compiled_with_cuda(), + help='default use gpu.', + ) args = parser.parse_args(['--config', 'tsm.yaml']) return args class ConvBNLayer(fluid.dygraph.Layer): - - def __init__(self, - num_channels, - num_filters, - filter_size, - stride=1, - groups=1, - act=None): + def __init__( + self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + ): super(ConvBNLayer, self).__init__() - self._conv = Conv2D(num_channels=num_channels, - num_filters=num_filters, - filter_size=filter_size, - stride=stride, - padding=(filter_size - 1) // 2, - groups=None, - act=None, - param_attr=fluid.param_attr.ParamAttr(), - bias_attr=False) - - self._batch_norm = BatchNorm(num_filters, - act=act, - param_attr=fluid.param_attr.ParamAttr(), - bias_attr=fluid.param_attr.ParamAttr()) + self._conv = Conv2D( + num_channels=num_channels, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=None, + act=None, + param_attr=fluid.param_attr.ParamAttr(), + bias_attr=False, + ) + + self._batch_norm = BatchNorm( + num_filters, + act=act, + param_attr=fluid.param_attr.ParamAttr(), + bias_attr=fluid.param_attr.ParamAttr(), + ) def forward(self, inputs): y = self._conv(inputs) @@ -76,34 +85,38 @@ class ConvBNLayer(fluid.dygraph.Layer): class BottleneckBlock(fluid.dygraph.Layer): - - def __init__(self, - num_channels, - num_filters, - stride, - shortcut=True, - seg_num=8): + def __init__( + self, num_channels, num_filters, stride, shortcut=True, seg_num=8 + ): super(BottleneckBlock, self).__init__() - self.conv0 = ConvBNLayer(num_channels=num_channels, - num_filters=num_filters, - filter_size=1, - act='relu') - self.conv1 = ConvBNLayer(num_channels=num_filters, - num_filters=num_filters, - filter_size=3, - stride=stride, - act='relu') - self.conv2 = ConvBNLayer(num_channels=num_filters, - num_filters=num_filters * 4, - filter_size=1, - act=None) + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + act='relu', + ) + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + stride=stride, + act='relu', + ) + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters * 4, + filter_size=1, + act=None, + ) if not shortcut: - self.short = ConvBNLayer(num_channels=num_channels, - num_filters=num_filters * 4, - filter_size=1, - stride=stride) + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters * 4, + filter_size=1, + stride=stride, + ) self.shortcut = shortcut self.seg_num = seg_num self._num_channels_out = int(num_filters * 4) @@ -122,7 +135,6 @@ class BottleneckBlock(fluid.dygraph.Layer): class TSM_ResNet(fluid.dygraph.Layer): - def __init__(self, name_scope, config, mode): super(TSM_ResNet, self).__init__(name_scope) @@ -130,8 +142,9 @@ class TSM_ResNet(fluid.dygraph.Layer): self.seg_num = config.MODEL.seg_num self.class_dim = config.MODEL.num_classes self.reshape_list = [ - config.MODEL.seglen * 3, config[mode.upper()]['target_size'], - config[mode.upper()]['target_size'] + config.MODEL.seglen * 3, + config[mode.upper()]['target_size'], + config[mode.upper()]['target_size'], ] if self.layers == 50: @@ -140,15 +153,12 @@ class TSM_ResNet(fluid.dygraph.Layer): raise NotImplementedError num_filters = [64, 128, 256, 512] - self.conv = ConvBNLayer(num_channels=3, - num_filters=64, - filter_size=7, - stride=2, - act='relu') - self.pool2d_max = Pool2D(pool_size=3, - pool_stride=2, - pool_padding=1, - pool_type='max') + self.conv = ConvBNLayer( + num_channels=3, num_filters=64, filter_size=7, stride=2, act='relu' + ) + self.pool2d_max = Pool2D( + pool_size=3, pool_stride=2, pool_padding=1, pool_type='max' + ) self.bottleneck_block_list = [] num_channels = 64 @@ -158,19 +168,23 @@ class TSM_ResNet(fluid.dygraph.Layer): for i in range(depth[block]): bottleneck_block = self.add_sublayer( 'bb_%d_%d' % (block, i), - BottleneckBlock(num_channels=num_channels, - num_filters=num_filters[block], - stride=2 if i == 0 and block != 0 else 1, - shortcut=shortcut, - seg_num=self.seg_num)) + BottleneckBlock( + num_channels=num_channels, + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + shortcut=shortcut, + seg_num=self.seg_num, + ), + ) num_channels = int(bottleneck_block._num_channels_out) self.bottleneck_block_list.append(bottleneck_block) shortcut = True - self.pool2d_avg = Pool2D(pool_size=7, - pool_type='avg', - global_pooling=True) + self.pool2d_avg = Pool2D( + pool_size=7, pool_type='avg', global_pooling=True + ) import math + stdv = 1.0 / math.sqrt(2048 * 1.0) self.out = Linear( @@ -178,9 +192,12 @@ class TSM_ResNet(fluid.dygraph.Layer): self.class_dim, act="softmax", param_attr=fluid.param_attr.ParamAttr( - initializer=fluid.initializer.Uniform(-stdv, stdv)), + initializer=fluid.initializer.Uniform(-stdv, stdv) + ), bias_attr=fluid.param_attr.ParamAttr( - learning_rate=2.0, regularizer=fluid.regularizer.L2Decay(0.))) + learning_rate=2.0, regularizer=fluid.regularizer.L2Decay(0.0) + ), + ) @declarative def forward(self, inputs): @@ -199,7 +216,6 @@ class TSM_ResNet(fluid.dygraph.Layer): class FakeDataReader(object): - def __init__(self, mode, cfg): self.format = cfg.MODEL.format self.num_classes = cfg.MODEL.num_classes @@ -207,13 +223,18 @@ class FakeDataReader(object): self.seglen = cfg.MODEL.seglen self.target_size = cfg[mode.upper()]['target_size'] - self.img_mean = np.array(cfg.MODEL.image_mean).reshape( - [3, 1, 1]).astype(np.float32) - self.img_std = np.array(cfg.MODEL.image_std).reshape([3, 1, 1]).astype( - np.float32) - - self.batch_size = 1 if sys.platform == 'darwin' or os.name == 'nt' else cfg[ - mode.upper()]['batch_size'] + self.img_mean = ( + np.array(cfg.MODEL.image_mean).reshape([3, 1, 1]).astype(np.float32) + ) + self.img_std = ( + np.array(cfg.MODEL.image_std).reshape([3, 1, 1]).astype(np.float32) + ) + + self.batch_size = ( + 1 + if sys.platform == 'darwin' or os.name == 'nt' + else cfg[mode.upper()]['batch_size'] + ) self.generator_out = [] self.total_iter = 3 for i in range(self.total_iter): @@ -222,15 +243,20 @@ class FakeDataReader(object): label = np.int64(random.randint(0, self.num_classes - 1)) random_mean = self.img_mean[0][0][0] random_std = self.img_std[0][0][0] - imgs = np.random.normal(random_mean, random_std, [ - self.seg_num, self.seglen * 3, self.target_size, - self.target_size - ]).astype(np.float32) + imgs = np.random.normal( + random_mean, + random_std, + [ + self.seg_num, + self.seglen * 3, + self.target_size, + self.target_size, + ], + ).astype(np.float32) batch_out.append((imgs, label)) self.generator_out.append(batch_out) def create_reader(self): - def batch_reader(): for i in range(self.total_iter): yield self.generator_out[i] @@ -240,7 +266,9 @@ class FakeDataReader(object): def create_optimizer(cfg, params): total_videos = cfg.total_videos - batch_size = 1 if sys.platform == 'darwin' or os.name == 'nt' else cfg.batch_size + batch_size = ( + 1 if sys.platform == 'darwin' or os.name == 'nt' else cfg.batch_size + ) step = int(total_videos / batch_size + 1) bd = [e * step for e in cfg.decay_epochs] base_lr = cfg.learning_rate @@ -253,7 +281,8 @@ def create_optimizer(cfg, params): learning_rate=fluid.layers.piecewise_decay(boundaries=bd, values=lr), momentum=momentum, regularization=fluid.regularizer.L2Decay(l2_weight_decay), - parameter_list=params) + parameter_list=params, + ) return optimizer @@ -277,8 +306,9 @@ def train(args, fake_data_reader, to_static): video_model = TSM_ResNet("TSM", train_config, 'Train') - optimizer = create_optimizer(train_config.TRAIN, - video_model.parameters()) + optimizer = create_optimizer( + train_config.TRAIN, video_model.parameters() + ) train_reader = fake_data_reader.create_reader() @@ -297,16 +327,16 @@ def train(args, fake_data_reader, to_static): labels = to_variable(y_data) labels.stop_gradient = True outputs = video_model(imgs) - loss = fluid.layers.cross_entropy(input=outputs, - label=labels, - ignore_index=-1) + loss = fluid.layers.cross_entropy( + input=outputs, label=labels, ignore_index=-1 + ) avg_loss = paddle.mean(loss) - acc_top1 = fluid.layers.accuracy(input=outputs, - label=labels, - k=1) - acc_top5 = fluid.layers.accuracy(input=outputs, - label=labels, - k=5) + acc_top1 = fluid.layers.accuracy( + input=outputs, label=labels, k=1 + ) + acc_top5 = fluid.layers.accuracy( + input=outputs, label=labels, k=5 + ) avg_loss.backward() optimizer.minimize(avg_loss) @@ -317,26 +347,35 @@ def train(args, fake_data_reader, to_static): total_acc5 += acc_top5.numpy()[0] total_sample += 1 - print('TRAIN Epoch {}, iter {}, loss = {}, acc1 {}, acc5 {}'. - format(epoch, batch_id, - avg_loss.numpy()[0], - acc_top1.numpy()[0], - acc_top5.numpy()[0])) - ret.extend([ - avg_loss.numpy()[0], - acc_top1.numpy()[0], - acc_top5.numpy()[0] - ]) + print( + 'TRAIN Epoch {}, iter {}, loss = {}, acc1 {}, acc5 {}'.format( + epoch, + batch_id, + avg_loss.numpy()[0], + acc_top1.numpy()[0], + acc_top5.numpy()[0], + ) + ) + ret.extend( + [ + avg_loss.numpy()[0], + acc_top1.numpy()[0], + acc_top5.numpy()[0], + ] + ) print( - 'TRAIN End, Epoch {}, avg_loss= {}, avg_acc1= {}, avg_acc5= {}'. - format(epoch, total_loss / total_sample, - total_acc1 / total_sample, total_acc5 / total_sample)) + 'TRAIN End, Epoch {}, avg_loss= {}, avg_acc1= {}, avg_acc5= {}'.format( + epoch, + total_loss / total_sample, + total_acc1 / total_sample, + total_acc5 / total_sample, + ) + ) return ret class TestTsm(unittest.TestCase): - def test_dygraph_static_same_loss(self): if fluid.is_compiled_with_cuda(): fluid.set_flags({"FLAGS_cudnn_deterministic": True}) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_typehint.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_typehint.py index b8addd53d5b1804f127658c46ac431facc0821b4..206ab40d0bc9b0621a392325031f0fa4544e0859 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_typehint.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_typehint.py @@ -32,10 +32,12 @@ def function(x: A) -> A: class TestTransformWhileLoop(unittest.TestCase): - def setUp(self): - self.place = fluid.CUDAPlace( - 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + self.place = ( + fluid.CUDAPlace(0) + if fluid.is_compiled_with_cuda() + else fluid.CPUPlace() + ) self.x = np.zeros(shape=(1), dtype=np.int32) self._init_dyfunc() @@ -69,7 +71,6 @@ class TestTransformWhileLoop(unittest.TestCase): class TestTypeHint(TestTransformWhileLoop): - def _init_dyfunc(self): self.dyfunc = function diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_typing.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_typing.py index b55fb242199f47b77c1ec9de53ef45cb43a26544..e4dea9d6abf84348b75ef7134ba381237edf57c3 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_typing.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_typing.py @@ -20,7 +20,6 @@ from typing import Dict, List, Tuple class BaseLayer(paddle.nn.Layer): - def __init__(self, in_size, out_size): super(BaseLayer, self).__init__() self._linear = paddle.nn.Linear(in_size, out_size) @@ -32,7 +31,6 @@ class BaseLayer(paddle.nn.Layer): class LinearNetWithTuple(BaseLayer): - def __init__(self, in_size, out_size): super(LinearNetWithTuple, self).__init__(in_size, out_size) @@ -42,7 +40,6 @@ class LinearNetWithTuple(BaseLayer): class LinearNetWithTuple2(BaseLayer): - def __init__(self, in_size, out_size): super(LinearNetWithTuple2, self).__init__(in_size, out_size) @@ -52,7 +49,6 @@ class LinearNetWithTuple2(BaseLayer): class LinearNetWithList(BaseLayer): - def __init__(self, in_size, out_size): super(LinearNetWithList, self).__init__(in_size, out_size) @@ -62,7 +58,6 @@ class LinearNetWithList(BaseLayer): class LinearNetWithDict(BaseLayer): - def __init__(self, in_size, out_size): super(LinearNetWithDict, self).__init__(in_size, out_size) @@ -72,7 +67,6 @@ class LinearNetWithDict(BaseLayer): class TestTyping(unittest.TestCase): - def setUp(self): self.in_num = 16 self.out_num = 16 @@ -105,7 +99,6 @@ class TestTyping(unittest.TestCase): class TestTypingTuple(TestTyping): - def build_net(self): return LinearNetWithTuple2(self.in_num, self.out_num) @@ -116,7 +109,6 @@ class TestTypingTuple(TestTyping): class TestTypingList(TestTyping): - def build_net(self): return LinearNetWithList(self.in_num, self.out_num) @@ -126,7 +118,6 @@ class TestTypingList(TestTyping): class TestTypingDict(TestTyping): - def build_net(self): return LinearNetWithDict(self.in_num, self.out_num) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_utils.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_utils.py index 30300ddf2e4b1839cf2fb9a14926b65f2aea3e96..4edb81504458c435cd9914c7b8369db761929e53 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_utils.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_utils.py @@ -20,7 +20,6 @@ from paddle.fluid.dygraph.dygraph_to_static.utils import is_paddle_func class TestIndexInList(unittest.TestCase): - def test_index_in_list(self): list_to_test = [1, 2, 3, 4, 5] self.assertEqual(index_in_list(list_to_test, 4), 3) @@ -37,8 +36,7 @@ def dyfunc_assign(input): [x, y] = m, n = z -class StaticCode(): - +class StaticCode: def dyfunc_assign(input): b = 1 a = b @@ -53,7 +51,6 @@ class StaticCode(): class TestIsPaddle(unittest.TestCase): - def fake_module(self): return types.ModuleType('paddlenlp') diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_variable_trans_func.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_variable_trans_func.py index 901778916934a602a9b4c7916bcd63a728cda8ac..353a5e8b79738afce5438a7cf2f19bcc81e3cdac 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_variable_trans_func.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_variable_trans_func.py @@ -15,29 +15,33 @@ import unittest from paddle.fluid.dygraph.dygraph_to_static.utils import ast_to_source_code -from paddle.fluid.dygraph.dygraph_to_static.variable_trans_func import create_fill_constant_node +from paddle.fluid.dygraph.dygraph_to_static.variable_trans_func import ( + create_fill_constant_node, +) class TestVariableTransFunc(unittest.TestCase): - def test_create_fill_constant_node(self): node = create_fill_constant_node("a", 1.0) source = "a = paddle.full(shape=[1], dtype='float64', fill_value=1.0, name='a')" self.assertEqual( ast_to_source_code(node).replace('\n', '').replace(' ', ''), - source.replace(' ', '')) + source.replace(' ', ''), + ) node = create_fill_constant_node("b", True) source = "b = paddle.full(shape=[1], dtype='bool', fill_value=True, name='b')" self.assertEqual( ast_to_source_code(node).replace('\n', '').replace(' ', ''), - source.replace(' ', '')) + source.replace(' ', ''), + ) node = create_fill_constant_node("c", 4293) source = "c = paddle.full(shape=[1], dtype='int64', fill_value=4293, name='c')" self.assertEqual( ast_to_source_code(node).replace('\n', '').replace(' ', ''), - source.replace(' ', '')) + source.replace(' ', ''), + ) self.assertIsNone(create_fill_constant_node("e", None)) self.assertIsNone(create_fill_constant_node("e", [])) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_warning.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_warning.py index 846cb9e7aa15e124ee58d74b2ec6bf33f740a73f..a232c70fa71728e9923be3bf90a76f1f6d07c0cf 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_warning.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_warning.py @@ -37,7 +37,6 @@ def false_fn(): class TestReturnNoneInIfelse(unittest.TestCase): - def test_dy2static_warning(self): paddle.disable_static() with warnings.catch_warnings(record=True) as w: @@ -46,9 +45,10 @@ class TestReturnNoneInIfelse(unittest.TestCase): flag = False for warn in w: if ( - issubclass(warn.category, UserWarning) + issubclass(warn.category, UserWarning) ) and "Set var to 'None' in ifelse block might lead to error." in str( - warn.message): + warn.message + ): flag = True break self.assertTrue(flag) @@ -63,9 +63,10 @@ class TestReturnNoneInIfelse(unittest.TestCase): flag = False for warn in w: if ( - issubclass(warn.category, UserWarning) + issubclass(warn.category, UserWarning) ) and "Set var to 'None' in ifelse block might lead to error." in str( - warn.message): + warn.message + ): flag = True break self.assertTrue(flag) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_word2vec.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_word2vec.py index fbabe1bc36bbb6b5761682835f67dd9b146f061f..a597e33518787a6f095451f0b6c2d894bb6dc109 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_word2vec.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_word2vec.py @@ -55,15 +55,15 @@ def build_dict(corpus, min_freq=3): word_freq_dict[word] = 0 word_freq_dict[word] += 1 - word_freq_dict = sorted(word_freq_dict.items(), - key=lambda x: x[1], - reverse=True) + word_freq_dict = sorted( + word_freq_dict.items(), key=lambda x: x[1], reverse=True + ) word2id_dict = dict() word2id_freq = dict() id2word_dict = dict() - word2id_freq[0] = 1. + word2id_freq[0] = 1.0 word2id_dict['[oov]'] = 0 id2word_dict[0] = '[oov]' @@ -85,8 +85,10 @@ word2id_freq, word2id_dict, id2word_dict = build_dict(corpus) vocab_size = len(word2id_freq) print("there are totoally %d different words in the corpus" % vocab_size) for _, (word, word_id) in zip(range(50), word2id_dict.items()): - print("word %s, its id %d, its word freq %d" % - (word, word_id, word2id_freq[word_id])) + print( + "word %s, its id %d, its word freq %d" + % (word, word_id, word2id_freq[word_id]) + ) def convert_corpus_to_id(corpus, word2id_dict): @@ -94,7 +96,9 @@ def convert_corpus_to_id(corpus, word2id_dict): for line in corpus: new_line = [ word2id_dict[word] - if word in word2id_dict else word2id_dict['[oov]'] for word in line + if word in word2id_dict + else word2id_dict['[oov]'] + for word in line ] new_corpus.append(new_line) return new_corpus @@ -104,10 +108,10 @@ corpus = convert_corpus_to_id(corpus, word2id_dict) def subsampling(corpus, word2id_freq): - def keep(word_id): return random.uniform(0, 1) < math.sqrt( - 1e-4 / word2id_freq[word_id] * len(corpus)) + 1e-4 / word2id_freq[word_id] * len(corpus) + ) new_corpus = [] for line in corpus: @@ -119,11 +123,13 @@ def subsampling(corpus, word2id_freq): corpus = subsampling(corpus, word2id_freq) -def build_data(corpus, - word2id_dict, - word2id_freq, - max_window_size=3, - negative_sample_num=10): +def build_data( + corpus, + word2id_dict, + word2id_freq, + max_window_size=3, + negative_sample_num=10, +): dataset = [] @@ -132,13 +138,15 @@ def build_data(corpus, window_size = random.randint(1, max_window_size) center_word = line[center_word_idx] - positive_word_range = (max(0, center_word_idx - window_size), - min( - len(line) - 1, - center_word_idx + window_size)) + positive_word_range = ( + max(0, center_word_idx - window_size), + min(len(line) - 1, center_word_idx + window_size), + ) positive_word_candidates = [ - line[idx] for idx in range(positive_word_range[0], - positive_word_range[1] + 1) + line[idx] + for idx in range( + positive_word_range[0], positive_word_range[1] + 1 + ) if idx != center_word_idx and line[idx] != line[center_word_idx] ] @@ -161,8 +169,10 @@ def build_data(corpus, dataset = build_data(corpus, word2id_dict, word2id_freq) for _, (center_word, target_word, label) in zip(range(50), dataset): - print("center_word %s, target %s, label %d" % - (id2word_dict[center_word], id2word_dict[target_word], label)) + print( + "center_word %s, target %s, label %d" + % (id2word_dict[center_word], id2word_dict[target_word], label) + ) def build_batch(dataset, batch_size, epoch_num): @@ -185,9 +195,14 @@ def build_batch(dataset, batch_size, epoch_num): if len(center_word_batch) == batch_size: yield np.array(center_word_batch).astype("int64"), np.array( - target_word_batch).astype("int64"), np.array( - label_batch).astype("float32"), np.array( - eval_word_batch).astype("int64") + target_word_batch + ).astype("int64"), np.array(label_batch).astype( + "float32" + ), np.array( + eval_word_batch + ).astype( + "int64" + ) center_word_batch = [] target_word_batch = [] label_batch = [] @@ -195,12 +210,15 @@ def build_batch(dataset, batch_size, epoch_num): if len(center_word_batch) > 0: yield np.array(center_word_batch).astype("int64"), np.array( - target_word_batch).astype("int64"), np.array(label_batch).astype( - "float32"), np.array(eval_word_batch).astype("int64") + target_word_batch + ).astype("int64"), np.array(label_batch).astype("float32"), np.array( + eval_word_batch + ).astype( + "int64" + ) class SkipGram(fluid.dygraph.Layer): - def __init__(self, name_scope, vocab_size, embedding_size, init_scale=0.1): super(SkipGram, self).__init__(name_scope) self.vocab_size = vocab_size @@ -213,7 +231,10 @@ class SkipGram(fluid.dygraph.Layer): name='embedding_para', initializer=fluid.initializer.UniformInitializer( low=-0.5 / self.embedding_size, - high=0.5 / self.embedding_size))) + high=0.5 / self.embedding_size, + ), + ), + ) self.embedding_out = Embedding( size=[self.vocab_size, self.embedding_size], @@ -222,7 +243,10 @@ class SkipGram(fluid.dygraph.Layer): name='embedding_out_para', initializer=fluid.initializer.UniformInitializer( low=-0.5 / self.embedding_size, - high=0.5 / self.embedding_size))) + high=0.5 / self.embedding_size, + ), + ), + ) @declarative def forward(self, center_words, target_words, label): @@ -231,8 +255,9 @@ class SkipGram(fluid.dygraph.Layer): # center_words_emb = [batch_size, embedding_size] # target_words_emb = [batch_size, embedding_size] - word_sim = fluid.layers.elementwise_mul(center_words_emb, - target_words_emb) + word_sim = fluid.layers.elementwise_mul( + center_words_emb, target_words_emb + ) word_sim = fluid.layers.reduce_sum(word_sim, dim=-1) pred = fluid.layers.sigmoid(word_sim) @@ -257,27 +282,34 @@ def train(to_static): random.seed(0) np.random.seed(0) - place = fluid.CUDAPlace( - 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.dygraph.guard(place): fluid.default_startup_program().random_seed = 1000 fluid.default_main_program().random_seed = 1000 - skip_gram_model = SkipGram("skip_gram_model", vocab_size, - embedding_size) + skip_gram_model = SkipGram( + "skip_gram_model", vocab_size, embedding_size + ) adam = fluid.optimizer.AdamOptimizer( learning_rate=learning_rate, - parameter_list=skip_gram_model.parameters()) + parameter_list=skip_gram_model.parameters(), + ) step = 0 ret = [] for center_words, target_words, label, eval_words in build_batch( - dataset, batch_size, epoch_num): + dataset, batch_size, epoch_num + ): center_words_var = fluid.dygraph.to_variable(center_words) target_words_var = fluid.dygraph.to_variable(target_words) label_var = fluid.dygraph.to_variable(label) - pred, loss = skip_gram_model(center_words_var, target_words_var, - label_var) + pred, loss = skip_gram_model( + center_words_var, target_words_var, label_var + ) loss.backward() adam.minimize(loss) @@ -291,7 +323,6 @@ def train(to_static): class TestWord2Vec(unittest.TestCase): - def test_dygraph_static_same_loss(self): dygraph_loss = train(to_static=False) static_loss = train(to_static=True) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_yolov3.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_yolov3.py index 0fb791a3a28ca6d08ced265f322eba8d6924613b..eecd6806fa540b2616dd4624c3e69a3277f33f5d 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_yolov3.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_yolov3.py @@ -47,27 +47,26 @@ class SmoothedValue(object): class FakeDataReader(object): - def __init__(self): self.generator_out = [] self.total_iter = cfg.max_iter for i in range(self.total_iter): batch_out = [] for j in range(cfg.batch_size): - img = np.random.normal(0.485, 0.229, - [3, cfg.input_size, cfg.input_size]) + img = np.random.normal( + 0.485, 0.229, [3, cfg.input_size, cfg.input_size] + ) point1 = cfg.input_size / 4 point2 = cfg.input_size / 2 gt_boxes = np.array([[point1, point1, point2, point2]]) - gt_labels = np.random.randint(low=0, - high=cfg.class_num, - size=[1]) + gt_labels = np.random.randint( + low=0, high=cfg.class_num, size=[1] + ) gt_scores = np.zeros([1]) batch_out.append([img, gt_boxes, gt_labels, gt_scores]) self.generator_out.append(batch_out) def reader(self): - def generator(): for i in range(self.total_iter): yield self.generator_out[i] @@ -97,9 +96,9 @@ def train(to_static): learning_rate = cfg.learning_rate values = [learning_rate * (gamma**i) for i in range(step_num + 1)] - lr = fluid.dygraph.PiecewiseDecay(boundaries=boundaries, - values=values, - begin=0) + lr = fluid.dygraph.PiecewiseDecay( + boundaries=boundaries, values=values, begin=0 + ) lr = fluid.layers.linear_lr_warmup( learning_rate=lr, @@ -112,7 +111,8 @@ def train(to_static): learning_rate=lr, regularization=fluid.regularizer.L2Decay(cfg.weight_decay), momentum=cfg.momentum, - parameter_list=model.parameters()) + parameter_list=model.parameters(), + ) start_time = time.time() snapshot_loss = 0 @@ -150,9 +150,13 @@ def train(to_static): snapshot_time += start_time - prev_start_time total_sample += 1 - print("Iter {:d}, loss {:.6f}, time {:.5f}".format( - iter_id, smoothed_loss.get_mean_value(), - start_time - prev_start_time)) + print( + "Iter {:d}, loss {:.6f}, time {:.5f}".format( + iter_id, + smoothed_loss.get_mean_value(), + start_time - prev_start_time, + ) + ) ret.append(smoothed_loss.get_mean_value()) loss.backward() @@ -164,14 +168,12 @@ def train(to_static): class TestYolov3(unittest.TestCase): - def test_dygraph_static_same_loss(self): dygraph_loss = train(to_static=False) static_loss = train(to_static=True) - np.testing.assert_allclose(dygraph_loss, - static_loss, - rtol=0.001, - atol=1e-05) + np.testing.assert_allclose( + dygraph_loss, static_loss, rtol=0.001, atol=1e-05 + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_dygraph_model.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_dygraph_model.py index 4c33f88469ceb4fe27bc0fd7c2eb72f52b76d49a..27139811c0abf7bbe6457a4b3fdd51b8c161c342 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_dygraph_model.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_dygraph_model.py @@ -16,7 +16,13 @@ import numpy as np import paddle.fluid as fluid import paddle.fluid.layers as layers -from paddle.fluid.dygraph import Embedding, Layer, LayerNorm, Linear, to_variable +from paddle.fluid.dygraph import ( + Embedding, + Layer, + LayerNorm, + Linear, + to_variable, +) from paddle.fluid.dygraph.jit import dygraph_to_static_func from paddle.fluid.layers.utils import map_structure import paddle @@ -29,12 +35,15 @@ def position_encoding_init(n_position, d_pos_vec): channels = d_pos_vec position = np.arange(n_position) num_timescales = channels // 2 - log_timescale_increment = (np.log(float(1e4) / float(1)) / - (num_timescales - 1)) - inv_timescales = np.exp( - np.arange(num_timescales)) * -log_timescale_increment + log_timescale_increment = np.log(float(1e4) / float(1)) / ( + num_timescales - 1 + ) + inv_timescales = ( + np.exp(np.arange(num_timescales)) * -log_timescale_increment + ) scaled_time = np.expand_dims(position, 1) * np.expand_dims( - inv_timescales, 0) + inv_timescales, 0 + ) signal = np.concatenate([np.sin(scaled_time), np.cos(scaled_time)], axis=1) signal = np.pad(signal, [[0, 0], [0, np.mod(channels, 2)]], 'constant') position_enc = signal @@ -42,7 +51,6 @@ def position_encoding_init(n_position, d_pos_vec): class PrePostProcessLayer(Layer): - def __init__(self, process_cmd, d_model, dropout_rate): super(PrePostProcessLayer, self).__init__() self.process_cmd = process_cmd @@ -53,18 +61,24 @@ class PrePostProcessLayer(Layer): elif cmd == "n": # add layer normalization self.functors.append( self.add_sublayer( - "layer_norm_%d" % - len([layer for layer in self.children()]), + "layer_norm_%d" + % len([layer for layer in self.children()]), LayerNorm( normalized_shape=d_model, param_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(1.)), + initializer=fluid.initializer.Constant(1.0) + ), bias_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(0.))))) + initializer=fluid.initializer.Constant(0.0) + ), + ), + ) + ) elif cmd == "d": # add dropout if dropout_rate: self.functors.append( - lambda x: layers.dropout(x, dropout_prob=dropout_rate)) + lambda x: layers.dropout(x, dropout_prob=dropout_rate) + ) def forward(self, x, residual=None): for i, cmd in enumerate(self.process_cmd): @@ -76,14 +90,15 @@ class PrePostProcessLayer(Layer): class MultiHeadAttention(Layer): - - def __init__(self, - d_key, - d_value, - d_model, - n_head=1, - dropout_rate=0., - param_initializer=None): + def __init__( + self, + d_key, + d_value, + d_model, + n_head=1, + dropout_rate=0.0, + param_initializer=None, + ): super(MultiHeadAttention, self).__init__() self.n_head = n_head self.d_key = d_key @@ -94,22 +109,26 @@ class MultiHeadAttention(Layer): input_dim=d_model, output_dim=d_key * n_head, bias_attr=False, - param_attr=fluid.ParamAttr(initializer=param_initializer)) + param_attr=fluid.ParamAttr(initializer=param_initializer), + ) self.k_fc = Linear( input_dim=d_model, output_dim=d_key * n_head, bias_attr=False, - param_attr=fluid.ParamAttr(initializer=param_initializer)) + param_attr=fluid.ParamAttr(initializer=param_initializer), + ) self.v_fc = Linear( input_dim=d_model, output_dim=d_value * n_head, bias_attr=False, - param_attr=fluid.ParamAttr(initializer=param_initializer)) + param_attr=fluid.ParamAttr(initializer=param_initializer), + ) self.proj_fc = Linear( input_dim=d_value * n_head, output_dim=d_model, bias_attr=False, - param_attr=fluid.ParamAttr(initializer=param_initializer)) + param_attr=fluid.ParamAttr(initializer=param_initializer), + ) def forward(self, queries, keys, values, attn_bias, cache=None): # compute q ,k ,v @@ -132,10 +151,9 @@ class MultiHeadAttention(Layer): v = layers.concat([cache_v, v], axis=2) cache["k"], cache["v"] = k, v # scale dot product attention - product = layers.matmul(x=q, - y=k, - transpose_y=True, - alpha=self.d_model**-0.5) + product = layers.matmul( + x=q, y=k, transpose_y=True, alpha=self.d_model**-0.5 + ) if attn_bias is not None: product += attn_bias weights = layers.softmax(product) @@ -149,7 +167,6 @@ class MultiHeadAttention(Layer): class FFN(Layer): - def __init__(self, d_inner_hid, d_model, dropout_rate): super(FFN, self).__init__() self.dropout_rate = dropout_rate @@ -165,37 +182,44 @@ class FFN(Layer): class EncoderLayer(Layer): - - def __init__(self, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - prepostprocess_dropout, - attention_dropout, - relu_dropout, - preprocess_cmd="n", - postprocess_cmd="da"): + def __init__( + self, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd="n", + postprocess_cmd="da", + ): super(EncoderLayer, self).__init__() - self.preprocesser1 = PrePostProcessLayer(preprocess_cmd, d_model, - prepostprocess_dropout) - self.self_attn = MultiHeadAttention(d_key, d_value, d_model, n_head, - attention_dropout) - self.postprocesser1 = PrePostProcessLayer(postprocess_cmd, d_model, - prepostprocess_dropout) + self.preprocesser1 = PrePostProcessLayer( + preprocess_cmd, d_model, prepostprocess_dropout + ) + self.self_attn = MultiHeadAttention( + d_key, d_value, d_model, n_head, attention_dropout + ) + self.postprocesser1 = PrePostProcessLayer( + postprocess_cmd, d_model, prepostprocess_dropout + ) - self.preprocesser2 = PrePostProcessLayer(preprocess_cmd, d_model, - prepostprocess_dropout) + self.preprocesser2 = PrePostProcessLayer( + preprocess_cmd, d_model, prepostprocess_dropout + ) self.ffn = FFN(d_inner_hid, d_model, relu_dropout) - self.postprocesser2 = PrePostProcessLayer(postprocess_cmd, d_model, - prepostprocess_dropout) + self.postprocesser2 = PrePostProcessLayer( + postprocess_cmd, d_model, prepostprocess_dropout + ) def forward(self, enc_input, attn_bias): - attn_output = self.self_attn(self.preprocesser1(enc_input), None, None, - attn_bias) + attn_output = self.self_attn( + self.preprocesser1(enc_input), None, None, attn_bias + ) attn_output = self.postprocesser1(attn_output, enc_input) ffn_output = self.ffn(self.preprocesser2(attn_output)) ffn_output = self.postprocesser2(ffn_output, attn_output) @@ -203,19 +227,20 @@ class EncoderLayer(Layer): class Encoder(Layer): - - def __init__(self, - n_layer, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - prepostprocess_dropout, - attention_dropout, - relu_dropout, - preprocess_cmd="n", - postprocess_cmd="da"): + def __init__( + self, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd="n", + postprocess_cmd="da", + ): super(Encoder, self).__init__() @@ -224,12 +249,23 @@ class Encoder(Layer): self.encoder_layers.append( self.add_sublayer( "layer_%d" % i, - EncoderLayer(n_head, d_key, d_value, d_model, d_inner_hid, - prepostprocess_dropout, attention_dropout, - relu_dropout, preprocess_cmd, - postprocess_cmd))) - self.processer = PrePostProcessLayer(preprocess_cmd, d_model, - prepostprocess_dropout) + EncoderLayer( + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + ), + ) + ) + self.processer = PrePostProcessLayer( + preprocess_cmd, d_model, prepostprocess_dropout + ) def forward(self, enc_input, attn_bias): for encoder_layer in self.encoder_layers: @@ -240,14 +276,15 @@ class Encoder(Layer): class Embedder(Layer): - def __init__(self, vocab_size, emb_dim, bos_idx=0): super(Embedder, self).__init__() self.word_embedder = Embedding( size=[vocab_size, emb_dim], padding_idx=bos_idx, param_attr=fluid.ParamAttr( - initializer=fluid.initializer.Normal(0., emb_dim**-0.5))) + initializer=fluid.initializer.Normal(0.0, emb_dim**-0.5) + ), + ) def forward(self, word): word_emb = self.word_embedder(word) @@ -255,11 +292,23 @@ class Embedder(Layer): class WrapEncoder(Layer): - - def __init__(self, src_vocab_size, max_length, n_layer, n_head, d_key, - d_value, d_model, d_inner_hid, prepostprocess_dropout, - attention_dropout, relu_dropout, preprocess_cmd, - postprocess_cmd, word_embedder): + def __init__( + self, + src_vocab_size, + max_length, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + word_embedder, + ): super(WrapEncoder, self).__init__() self.emb_dropout = prepostprocess_dropout self.emb_dim = d_model @@ -268,12 +317,24 @@ class WrapEncoder(Layer): size=[max_length, self.emb_dim], param_attr=fluid.ParamAttr( initializer=fluid.initializer.NumpyArrayInitializer( - position_encoding_init(max_length, self.emb_dim)), - trainable=False)) - self.encoder = Encoder(n_layer, n_head, d_key, d_value, d_model, - d_inner_hid, prepostprocess_dropout, - attention_dropout, relu_dropout, preprocess_cmd, - postprocess_cmd) + position_encoding_init(max_length, self.emb_dim) + ), + trainable=False, + ), + ) + self.encoder = Encoder( + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + ) def forward(self, src_word, src_pos, src_slf_attn_bias): word_emb = self.word_embedder(src_word) @@ -281,71 +342,96 @@ class WrapEncoder(Layer): pos_enc = self.pos_encoder(src_pos) pos_enc.stop_gradient = True emb = word_emb + pos_enc - enc_input = layers.dropout( - emb, - dropout_prob=self.emb_dropout, - ) if self.emb_dropout else emb + enc_input = ( + layers.dropout( + emb, + dropout_prob=self.emb_dropout, + ) + if self.emb_dropout + else emb + ) enc_output = self.encoder(enc_input, src_slf_attn_bias) return enc_output class DecoderLayer(Layer): - - def __init__(self, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - prepostprocess_dropout, - attention_dropout, - relu_dropout, - preprocess_cmd="n", - postprocess_cmd="da"): + def __init__( + self, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd="n", + postprocess_cmd="da", + ): super(DecoderLayer, self).__init__() - self.preprocesser1 = PrePostProcessLayer(preprocess_cmd, d_model, - prepostprocess_dropout) - self.self_attn = MultiHeadAttention(d_key, d_value, d_model, n_head, - attention_dropout) - self.postprocesser1 = PrePostProcessLayer(postprocess_cmd, d_model, - prepostprocess_dropout) - self.preprocesser2 = PrePostProcessLayer(preprocess_cmd, d_model, - prepostprocess_dropout) - self.cross_attn = MultiHeadAttention(d_key, d_value, d_model, n_head, - attention_dropout) - self.postprocesser2 = PrePostProcessLayer(postprocess_cmd, d_model, - prepostprocess_dropout) - self.preprocesser3 = PrePostProcessLayer(preprocess_cmd, d_model, - prepostprocess_dropout) + self.preprocesser1 = PrePostProcessLayer( + preprocess_cmd, d_model, prepostprocess_dropout + ) + self.self_attn = MultiHeadAttention( + d_key, d_value, d_model, n_head, attention_dropout + ) + self.postprocesser1 = PrePostProcessLayer( + postprocess_cmd, d_model, prepostprocess_dropout + ) + self.preprocesser2 = PrePostProcessLayer( + preprocess_cmd, d_model, prepostprocess_dropout + ) + self.cross_attn = MultiHeadAttention( + d_key, d_value, d_model, n_head, attention_dropout + ) + self.postprocesser2 = PrePostProcessLayer( + postprocess_cmd, d_model, prepostprocess_dropout + ) + self.preprocesser3 = PrePostProcessLayer( + preprocess_cmd, d_model, prepostprocess_dropout + ) self.ffn = FFN(d_inner_hid, d_model, relu_dropout) - self.postprocesser3 = PrePostProcessLayer(postprocess_cmd, d_model, - prepostprocess_dropout) + self.postprocesser3 = PrePostProcessLayer( + postprocess_cmd, d_model, prepostprocess_dropout + ) - def forward(self, - dec_input, - enc_output, - self_attn_bias, - cross_attn_bias, - cache=None): - self_attn_output = self.self_attn(self.preprocesser1(dec_input), None, - None, self_attn_bias, cache) + def forward( + self, dec_input, enc_output, self_attn_bias, cross_attn_bias, cache=None + ): + self_attn_output = self.self_attn( + self.preprocesser1(dec_input), None, None, self_attn_bias, cache + ) self_attn_output = self.postprocesser1(self_attn_output, dec_input) cross_attn_output = self.cross_attn( - self.preprocesser2(self_attn_output), enc_output, enc_output, - cross_attn_bias) - cross_attn_output = self.postprocesser2(cross_attn_output, - self_attn_output) + self.preprocesser2(self_attn_output), + enc_output, + enc_output, + cross_attn_bias, + ) + cross_attn_output = self.postprocesser2( + cross_attn_output, self_attn_output + ) ffn_output = self.ffn(self.preprocesser3(cross_attn_output)) ffn_output = self.postprocesser3(ffn_output, cross_attn_output) return ffn_output class Decoder(Layer): - - def __init__(self, n_layer, n_head, d_key, d_value, d_model, d_inner_hid, - prepostprocess_dropout, attention_dropout, relu_dropout, - preprocess_cmd, postprocess_cmd): + def __init__( + self, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + ): super(Decoder, self).__init__() self.decoder_layers = list() @@ -353,33 +439,63 @@ class Decoder(Layer): self.decoder_layers.append( self.add_sublayer( "layer_%d" % i, - DecoderLayer(n_head, d_key, d_value, d_model, d_inner_hid, - prepostprocess_dropout, attention_dropout, - relu_dropout, preprocess_cmd, - postprocess_cmd))) - self.processer = PrePostProcessLayer(preprocess_cmd, d_model, - prepostprocess_dropout) - - def forward(self, + DecoderLayer( + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + ), + ) + ) + self.processer = PrePostProcessLayer( + preprocess_cmd, d_model, prepostprocess_dropout + ) + + def forward( + self, + dec_input, + enc_output, + self_attn_bias, + cross_attn_bias, + caches=None, + ): + for i, decoder_layer in enumerate(self.decoder_layers): + dec_output = decoder_layer( dec_input, enc_output, self_attn_bias, cross_attn_bias, - caches=None): - for i, decoder_layer in enumerate(self.decoder_layers): - dec_output = decoder_layer(dec_input, enc_output, self_attn_bias, - cross_attn_bias, - None if caches is None else caches[i]) + None if caches is None else caches[i], + ) dec_input = dec_output return self.processer(dec_output) class WrapDecoder(Layer): - - def __init__(self, trg_vocab_size, max_length, n_layer, n_head, d_key, - d_value, d_model, d_inner_hid, prepostprocess_dropout, - attention_dropout, relu_dropout, preprocess_cmd, - postprocess_cmd, share_input_output_embed, word_embedder): + def __init__( + self, + trg_vocab_size, + max_length, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + share_input_output_embed, + word_embedder, + ): super(WrapDecoder, self).__init__() self.emb_dropout = prepostprocess_dropout @@ -389,40 +505,58 @@ class WrapDecoder(Layer): size=[max_length, self.emb_dim], param_attr=fluid.ParamAttr( initializer=fluid.initializer.NumpyArrayInitializer( - position_encoding_init(max_length, self.emb_dim)), - trainable=False)) - self.decoder = Decoder(n_layer, n_head, d_key, d_value, d_model, - d_inner_hid, prepostprocess_dropout, - attention_dropout, relu_dropout, preprocess_cmd, - postprocess_cmd) + position_encoding_init(max_length, self.emb_dim) + ), + trainable=False, + ), + ) + self.decoder = Decoder( + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + ) if share_input_output_embed: - self.linear = lambda x: layers.matmul(x=x, - y=self.word_embedder. - word_embedder.weight, - transpose_y=True) + self.linear = lambda x: layers.matmul( + x=x, y=self.word_embedder.word_embedder.weight, transpose_y=True + ) else: - self.linear = Linear(input_dim=d_model, - output_dim=trg_vocab_size, - bias_attr=False) - - def forward(self, - trg_word, - trg_pos, - trg_slf_attn_bias, - trg_src_attn_bias, - enc_output, - caches=None): + self.linear = Linear( + input_dim=d_model, output_dim=trg_vocab_size, bias_attr=False + ) + + def forward( + self, + trg_word, + trg_pos, + trg_slf_attn_bias, + trg_src_attn_bias, + enc_output, + caches=None, + ): word_emb = self.word_embedder(trg_word) word_emb = layers.scale(x=word_emb, scale=self.emb_dim**0.5) pos_enc = self.pos_encoder(trg_pos) pos_enc.stop_gradient = True emb = word_emb + pos_enc - dec_input = layers.dropout( - emb, - dropout_prob=self.emb_dropout, - ) if self.emb_dropout else emb - dec_output = self.decoder(dec_input, enc_output, trg_slf_attn_bias, - trg_src_attn_bias, caches) + dec_input = ( + layers.dropout( + emb, + dropout_prob=self.emb_dropout, + ) + if self.emb_dropout + else emb + ) + dec_output = self.decoder( + dec_input, enc_output, trg_slf_attn_bias, trg_src_attn_bias, caches + ) dec_output = layers.reshape( dec_output, shape=[-1, dec_output.shape[-1]], @@ -432,20 +566,21 @@ class WrapDecoder(Layer): class CrossEntropyCriterion(object): - def __init__(self, label_smooth_eps): self.label_smooth_eps = label_smooth_eps def __call__(self, predict, label, weights): if self.label_smooth_eps: - label_out = layers.label_smooth(label=layers.one_hot( - input=label, depth=predict.shape[-1]), - epsilon=self.label_smooth_eps) + label_out = layers.label_smooth( + label=layers.one_hot(input=label, depth=predict.shape[-1]), + epsilon=self.label_smooth_eps, + ) cost = layers.softmax_with_cross_entropy( logits=predict, label=label_out, - soft_label=True if self.label_smooth_eps else False) + soft_label=True if self.label_smooth_eps else False, + ) weighted_cost = cost * weights sum_cost = layers.reduce_sum(weighted_cost) token_num = layers.reduce_sum(weights) @@ -455,49 +590,72 @@ class CrossEntropyCriterion(object): class Transformer(Layer): - - def __init__(self, - src_vocab_size, - trg_vocab_size, - max_length, - n_layer, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - prepostprocess_dropout, - attention_dropout, - relu_dropout, - preprocess_cmd, - postprocess_cmd, - weight_sharing, - bos_id=0, - eos_id=1): + def __init__( + self, + src_vocab_size, + trg_vocab_size, + max_length, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + weight_sharing, + bos_id=0, + eos_id=1, + ): super(Transformer, self).__init__() - src_word_embedder = Embedder(vocab_size=src_vocab_size, - emb_dim=d_model, - bos_idx=bos_id) - self.encoder = WrapEncoder(src_vocab_size, max_length, n_layer, n_head, - d_key, d_value, d_model, d_inner_hid, - prepostprocess_dropout, attention_dropout, - relu_dropout, preprocess_cmd, - postprocess_cmd, src_word_embedder) + src_word_embedder = Embedder( + vocab_size=src_vocab_size, emb_dim=d_model, bos_idx=bos_id + ) + self.encoder = WrapEncoder( + src_vocab_size, + max_length, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + src_word_embedder, + ) if weight_sharing: - assert src_vocab_size == trg_vocab_size, ( - "Vocabularies in source and target should be same for weight sharing." - ) + assert ( + src_vocab_size == trg_vocab_size + ), "Vocabularies in source and target should be same for weight sharing." trg_word_embedder = src_word_embedder else: - trg_word_embedder = Embedder(vocab_size=trg_vocab_size, - emb_dim=d_model, - bos_idx=bos_id) - self.decoder = WrapDecoder(trg_vocab_size, max_length, n_layer, n_head, - d_key, d_value, d_model, d_inner_hid, - prepostprocess_dropout, attention_dropout, - relu_dropout, preprocess_cmd, - postprocess_cmd, weight_sharing, - trg_word_embedder) + trg_word_embedder = Embedder( + vocab_size=trg_vocab_size, emb_dim=d_model, bos_idx=bos_id + ) + self.decoder = WrapDecoder( + trg_vocab_size, + max_length, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + weight_sharing, + trg_word_embedder, + ) self.trg_vocab_size = trg_vocab_size self.n_layer = n_layer @@ -506,28 +664,39 @@ class Transformer(Layer): self.d_value = d_value @dygraph_to_static_func - def forward(self, src_word, src_pos, src_slf_attn_bias, trg_word, trg_pos, - trg_slf_attn_bias, trg_src_attn_bias): + def forward( + self, + src_word, + src_pos, + src_slf_attn_bias, + trg_word, + trg_pos, + trg_slf_attn_bias, + trg_src_attn_bias, + ): enc_output = self.encoder(src_word, src_pos, src_slf_attn_bias) - predict = self.decoder(trg_word, trg_pos, trg_slf_attn_bias, - trg_src_attn_bias, enc_output) + predict = self.decoder( + trg_word, trg_pos, trg_slf_attn_bias, trg_src_attn_bias, enc_output + ) return predict @dygraph_to_static_func - def beam_search(self, - src_word, - src_pos, - src_slf_attn_bias, - trg_word, - trg_src_attn_bias, - bos_id=0, - eos_id=1, - beam_size=4, - max_len=256): - + def beam_search( + self, + src_word, + src_pos, + src_slf_attn_bias, + trg_word, + trg_src_attn_bias, + bos_id=0, + eos_id=1, + beam_size=4, + max_len=256, + ): def expand_to_beam_size(tensor, beam_size): - tensor = layers.reshape(tensor, [tensor.shape[0], 1] + - list(tensor.shape[1:])) + tensor = layers.reshape( + tensor, [tensor.shape[0], 1] + list(tensor.shape[1:]) + ) tile_dims = [1] * len(tensor.shape) tile_dims[1] = beam_size return layers.expand(tensor, tile_dims) @@ -536,44 +705,60 @@ class Transformer(Layer): var_dim_in_state = 2 # count in beam dim tensor = layers.transpose( tensor, - list(range(var_dim_in_state, len(tensor.shape))) + - list(range(0, var_dim_in_state))) + list(range(var_dim_in_state, len(tensor.shape))) + + list(range(0, var_dim_in_state)), + ) - tensor = layers.reshape(tensor, [0] * - (len(tensor.shape) - var_dim_in_state) + - [batch_size * beam_size]) + tensor = layers.reshape( + tensor, + [0] * (len(tensor.shape) - var_dim_in_state) + + [batch_size * beam_size], + ) res = layers.transpose( tensor, list( - range((len(tensor.shape) + 1 - var_dim_in_state), - len(tensor.shape))) + - list(range(0, (len(tensor.shape) + 1 - var_dim_in_state)))) + range( + (len(tensor.shape) + 1 - var_dim_in_state), + len(tensor.shape), + ) + ) + + list(range(0, (len(tensor.shape) + 1 - var_dim_in_state))), + ) return res def split_batch_beams(tensor): var_dim_in_state = 1 tensor = layers.transpose( tensor, - list(range(var_dim_in_state, len(tensor.shape))) + - list(range(0, var_dim_in_state))) - tensor = layers.reshape(tensor, [0] * - (len(tensor.shape) - var_dim_in_state) + - [batch_size, beam_size]) + list(range(var_dim_in_state, len(tensor.shape))) + + list(range(0, var_dim_in_state)), + ) + tensor = layers.reshape( + tensor, + [0] * (len(tensor.shape) - var_dim_in_state) + + [batch_size, beam_size], + ) res = layers.transpose( tensor, list( - range((len(tensor.shape) - 1 - var_dim_in_state), - len(tensor.shape))) + - list(range(0, (len(tensor.shape) - 1 - var_dim_in_state)))) + range( + (len(tensor.shape) - 1 - var_dim_in_state), + len(tensor.shape), + ) + ) + + list(range(0, (len(tensor.shape) - 1 - var_dim_in_state))), + ) return res def mask_probs(probs, finished, noend_mask_tensor): finished = layers.cast(finished, dtype=probs.dtype) - probs = layers.elementwise_mul(layers.expand( - layers.unsqueeze(finished, [2]), [1, 1, self.trg_vocab_size]), - noend_mask_tensor, - axis=-1) - layers.elementwise_mul( - probs, (finished - 1), axis=0) + probs = layers.elementwise_mul( + layers.expand( + layers.unsqueeze(finished, [2]), [1, 1, self.trg_vocab_size] + ), + noend_mask_tensor, + axis=-1, + ) - layers.elementwise_mul(probs, (finished - 1), axis=0) return probs def gather(input, indices, batch_pos): @@ -585,87 +770,108 @@ class Transformer(Layer): batch_size = enc_output.shape[0] # constant number - inf = float(1. * 1e7) + inf = float(1.0 * 1e7) max_len = (enc_output.shape[1] + 20) if max_len is None else max_len - vocab_size_tensor = layers.fill_constant(shape=[1], - dtype="int64", - value=self.trg_vocab_size) + vocab_size_tensor = layers.fill_constant( + shape=[1], dtype="int64", value=self.trg_vocab_size + ) end_token_tensor = to_variable( - np.full([batch_size, beam_size], eos_id, dtype="int64")) + np.full([batch_size, beam_size], eos_id, dtype="int64") + ) noend_array = [-inf] * self.trg_vocab_size noend_array[eos_id] = 0 noend_mask_tensor = to_variable(np.array(noend_array, dtype="float32")) batch_pos = layers.expand( layers.unsqueeze( - to_variable(np.arange(0, batch_size, 1, dtype="int64")), [1]), - [1, beam_size]) + to_variable(np.arange(0, batch_size, 1, dtype="int64")), [1] + ), + [1, beam_size], + ) predict_ids = [] parent_ids = [] ### initialize states of beam search ### log_probs = to_variable( - np.array([[0.] + [-inf] * (beam_size - 1)] * batch_size, - dtype="float32")) + np.array( + [[0.0] + [-inf] * (beam_size - 1)] * batch_size, dtype="float32" + ) + ) - finished = to_variable(np.full([batch_size, beam_size], 0, - dtype="bool")) + finished = to_variable( + np.full([batch_size, beam_size], 0, dtype="bool") + ) - trg_word = layers.fill_constant(shape=[batch_size * beam_size, 1], - dtype="int64", - value=bos_id) + trg_word = layers.fill_constant( + shape=[batch_size * beam_size, 1], dtype="int64", value=bos_id + ) trg_src_attn_bias = merge_batch_beams( - expand_to_beam_size(trg_src_attn_bias, beam_size)) + expand_to_beam_size(trg_src_attn_bias, beam_size) + ) enc_output = merge_batch_beams( - expand_to_beam_size(enc_output, beam_size)) + expand_to_beam_size(enc_output, beam_size) + ) # init states (caches) for transformer, need to be updated according to selected beam - caches = [{ - "k": - layers.fill_constant( - shape=[batch_size, beam_size, self.n_head, 0, self.d_key], - dtype=enc_output.dtype, - value=0), - "v": - layers.fill_constant( - shape=[batch_size, beam_size, self.n_head, 0, self.d_value], - dtype=enc_output.dtype, - value=0), - } for i in range(self.n_layer)] + caches = [ + { + "k": layers.fill_constant( + shape=[batch_size, beam_size, self.n_head, 0, self.d_key], + dtype=enc_output.dtype, + value=0, + ), + "v": layers.fill_constant( + shape=[batch_size, beam_size, self.n_head, 0, self.d_value], + dtype=enc_output.dtype, + value=0, + ), + } + for i in range(self.n_layer) + ] for i in range(paddle.to_tensor(max_len)): - trg_pos = layers.fill_constant(shape=trg_word.shape, - dtype="int64", - value=i) - caches = map_structure(merge_batch_beams, - caches) # TODO: modified for dygraph2static - logits = self.decoder(trg_word, trg_pos, None, trg_src_attn_bias, - enc_output, caches) + trg_pos = layers.fill_constant( + shape=trg_word.shape, dtype="int64", value=i + ) + caches = map_structure( + merge_batch_beams, caches + ) # TODO: modified for dygraph2static + logits = self.decoder( + trg_word, trg_pos, None, trg_src_attn_bias, enc_output, caches + ) caches = map_structure(split_batch_beams, caches) step_log_probs = split_batch_beams( - fluid.layers.log(fluid.layers.softmax(logits))) - - step_log_probs = mask_probs(step_log_probs, finished, - noend_mask_tensor) - log_probs = layers.elementwise_add(x=step_log_probs, - y=log_probs, - axis=0) - log_probs = layers.reshape(log_probs, - [-1, beam_size * self.trg_vocab_size]) + fluid.layers.log(fluid.layers.softmax(logits)) + ) + + step_log_probs = mask_probs( + step_log_probs, finished, noend_mask_tensor + ) + log_probs = layers.elementwise_add( + x=step_log_probs, y=log_probs, axis=0 + ) + log_probs = layers.reshape( + log_probs, [-1, beam_size * self.trg_vocab_size] + ) scores = log_probs - topk_scores, topk_indices = fluid.layers.topk(input=scores, - k=beam_size) + topk_scores, topk_indices = fluid.layers.topk( + input=scores, k=beam_size + ) beam_indices = fluid.layers.elementwise_floordiv( - topk_indices, vocab_size_tensor) + topk_indices, vocab_size_tensor + ) token_indices = fluid.layers.elementwise_mod( - topk_indices, vocab_size_tensor) + topk_indices, vocab_size_tensor + ) # update states - caches = map_structure(lambda x: gather(x, beam_indices, batch_pos), - caches) + caches = map_structure( + lambda x: gather(x, beam_indices, batch_pos), caches + ) log_probs = gather(log_probs, topk_indices, batch_pos) finished = gather(finished, beam_indices, batch_pos) finished = layers.logical_or( - finished, layers.equal(token_indices, end_token_tensor)) + finished, layers.equal(token_indices, end_token_tensor) + ) trg_word = layers.reshape(token_indices, [-1, 1]) predict_ids.append(token_indices) @@ -677,7 +883,8 @@ class Transformer(Layer): predict_ids = layers.stack(predict_ids, axis=0) parent_ids = layers.stack(parent_ids, axis=0) finished_seq = layers.transpose( - layers.gather_tree(predict_ids, parent_ids), [1, 2, 0]) + layers.gather_tree(predict_ids, parent_ids), [1, 2, 0] + ) finished_scores = topk_scores return finished_seq, finished_scores diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_util.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_util.py index 3b4d1438c5f007d71f940949c2ad15054be9a0ed..7e77f1ad033db1ef6a5d18ac3db95c1421cc7395 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_util.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_util.py @@ -31,36 +31,45 @@ def get_input_descs(args, mode="train"): input_descs_train = { "src_word": [(batch_size, seq_len), "int64", 2], "src_pos": [(batch_size, seq_len), "int64"], - "src_slf_attn_bias": [(batch_size, n_head, seq_len, seq_len), - "float32"], + "src_slf_attn_bias": [ + (batch_size, n_head, seq_len, seq_len), + "float32", + ], "trg_word": [(batch_size, seq_len), "int64", 2], "trg_pos": [(batch_size, seq_len), "int64"], - "trg_slf_attn_bias": [(batch_size, n_head, seq_len, seq_len), - "float32"], - "trg_src_attn_bias": - [(batch_size, n_head, seq_len, seq_len), - "float32"], # TODO: 1 for predict, seq_len for train + "trg_slf_attn_bias": [ + (batch_size, n_head, seq_len, seq_len), + "float32", + ], + "trg_src_attn_bias": [ + (batch_size, n_head, seq_len, seq_len), + "float32", + ], # TODO: 1 for predict, seq_len for train "enc_output": [(batch_size, seq_len, d_model), "float32"], "lbl_word": [(None, 1), "int64"], "lbl_weight": [(None, 1), "float32"], "init_score": [(batch_size, 1), "float32", 2], - "init_idx": [(batch_size, ), "int32"], + "init_idx": [(batch_size,), "int32"], } input_descs_predict = { "src_word": [(batch_size, seq_len), "int64", 2], "src_pos": [(batch_size, seq_len), "int64"], - "src_slf_attn_bias": [(batch_size, n_head, seq_len, seq_len), - "float32"], + "src_slf_attn_bias": [ + (batch_size, n_head, seq_len, seq_len), + "float32", + ], "trg_word": [(batch_size, seq_len), "int64", 2], "trg_pos": [(batch_size, seq_len), "int64"], - "trg_slf_attn_bias": [(batch_size, n_head, seq_len, seq_len), - "float32"], + "trg_slf_attn_bias": [ + (batch_size, n_head, seq_len, seq_len), + "float32", + ], "trg_src_attn_bias": [(batch_size, n_head, 1, seq_len), "float32"], "enc_output": [(batch_size, seq_len, d_model), "float32"], "lbl_word": [(None, 1), "int64"], "lbl_weight": [(None, 1), "float32"], "init_score": [(batch_size, 1), "float32", 2], - "init_idx": [(batch_size, ), "int32"], + "init_idx": [(batch_size,), "int32"], } return input_descs_train if mode == "train" else input_descs_predict @@ -125,43 +134,58 @@ class ModelHyperParams(object): weight_sharing = True -def pad_batch_data(insts, - pad_idx, - n_head, - is_target=False, - is_label=False, - return_attn_bias=True, - return_max_len=True, - return_num_token=False): +def pad_batch_data( + insts, + pad_idx, + n_head, + is_target=False, + is_label=False, + return_attn_bias=True, + return_max_len=True, + return_num_token=False, +): return_list = [] max_len = max(len(inst) for inst in insts) inst_data = np.array( - [inst + [pad_idx] * (max_len - len(inst)) for inst in insts]) + [inst + [pad_idx] * (max_len - len(inst)) for inst in insts] + ) return_list += [inst_data.astype("int64").reshape([-1, 1])] if is_label: # label weight - inst_weight = np.array([[1.] * len(inst) + [0.] * (max_len - len(inst)) - for inst in insts]) + inst_weight = np.array( + [ + [1.0] * len(inst) + [0.0] * (max_len - len(inst)) + for inst in insts + ] + ) return_list += [inst_weight.astype("float32").reshape([-1, 1])] else: # position data - inst_pos = np.array([ - list(range(0, len(inst))) + [0] * (max_len - len(inst)) - for inst in insts - ]) + inst_pos = np.array( + [ + list(range(0, len(inst))) + [0] * (max_len - len(inst)) + for inst in insts + ] + ) return_list += [inst_pos.astype("int64").reshape([-1, 1])] if return_attn_bias: if is_target: slf_attn_bias_data = np.ones((inst_data.shape[0], max_len, max_len)) - slf_attn_bias_data = np.triu(slf_attn_bias_data, - 1).reshape([-1, 1, max_len, max_len]) - slf_attn_bias_data = np.tile(slf_attn_bias_data, - [1, n_head, 1, 1]) * [-1e9] + slf_attn_bias_data = np.triu(slf_attn_bias_data, 1).reshape( + [-1, 1, max_len, max_len] + ) + slf_attn_bias_data = np.tile( + slf_attn_bias_data, [1, n_head, 1, 1] + ) * [-1e9] else: - slf_attn_bias_data = np.array([[0] * len(inst) + [-1e9] * - (max_len - len(inst)) - for inst in insts]) + slf_attn_bias_data = np.array( + [ + [0] * len(inst) + [-1e9] * (max_len - len(inst)) + for inst in insts + ] + ) slf_attn_bias_data = np.tile( slf_attn_bias_data.reshape([-1, 1, 1, max_len]), - [1, n_head, max_len, 1]) + [1, n_head, max_len, 1], + ) return_list += [slf_attn_bias_data.astype("float32")] if return_max_len: return_list += [max_len] @@ -175,16 +199,19 @@ def pad_batch_data(insts, def prepare_train_input(insts, src_pad_idx, trg_pad_idx, n_head): src_word, src_pos, src_slf_attn_bias, src_max_len = pad_batch_data( - [inst[0] for inst in insts], src_pad_idx, n_head, is_target=False) + [inst[0] for inst in insts], src_pad_idx, n_head, is_target=False + ) src_word = src_word.reshape(-1, src_max_len) src_pos = src_pos.reshape(-1, src_max_len) trg_word, trg_pos, trg_slf_attn_bias, trg_max_len = pad_batch_data( - [inst[1] for inst in insts], trg_pad_idx, n_head, is_target=True) + [inst[1] for inst in insts], trg_pad_idx, n_head, is_target=True + ) trg_word = trg_word.reshape(-1, trg_max_len) trg_pos = trg_pos.reshape(-1, trg_max_len) - trg_src_attn_bias = np.tile(src_slf_attn_bias[:, :, ::src_max_len, :], - [1, 1, trg_max_len, 1]).astype("float32") + trg_src_attn_bias = np.tile( + src_slf_attn_bias[:, :, ::src_max_len, :], [1, 1, trg_max_len, 1] + ).astype("float32") lbl_word, lbl_weight, num_token = pad_batch_data( [inst[2] for inst in insts], @@ -194,13 +221,21 @@ def prepare_train_input(insts, src_pad_idx, trg_pad_idx, n_head): is_label=True, return_attn_bias=False, return_max_len=False, - return_num_token=True) + return_num_token=True, + ) lbl_word = lbl_word.reshape(-1, 1) lbl_weight = lbl_weight.reshape(-1, 1) data_inputs = [ - src_word, src_pos, src_slf_attn_bias, trg_word, trg_pos, - trg_slf_attn_bias, trg_src_attn_bias, lbl_word, lbl_weight + src_word, + src_pos, + src_slf_attn_bias, + trg_word, + trg_pos, + trg_slf_attn_bias, + trg_src_attn_bias, + lbl_word, + lbl_weight, ] return data_inputs @@ -208,55 +243,66 @@ def prepare_train_input(insts, src_pad_idx, trg_pad_idx, n_head): def prepare_infer_input(insts, src_pad_idx, bos_idx, n_head): src_word, src_pos, src_slf_attn_bias, src_max_len = pad_batch_data( - [inst[0] for inst in insts], src_pad_idx, n_head, is_target=False) + [inst[0] for inst in insts], src_pad_idx, n_head, is_target=False + ) # start tokens trg_word = np.asarray([[bos_idx]] * len(insts), dtype="int64") - trg_src_attn_bias = np.tile(src_slf_attn_bias[:, :, ::src_max_len, :], - [1, 1, 1, 1]).astype("float32") + trg_src_attn_bias = np.tile( + src_slf_attn_bias[:, :, ::src_max_len, :], [1, 1, 1, 1] + ).astype("float32") trg_word = trg_word.reshape(-1, 1) src_word = src_word.reshape(-1, src_max_len) src_pos = src_pos.reshape(-1, src_max_len) data_inputs = [ - src_word, src_pos, src_slf_attn_bias, trg_word, trg_src_attn_bias + src_word, + src_pos, + src_slf_attn_bias, + trg_word, + trg_src_attn_bias, ] return data_inputs def get_feed_data_reader(args, mode='train'): - def __for_train__(): - train_reader = paddle.batch(wmt16.train(args.src_vocab_size, - args.trg_vocab_size), - batch_size=args.batch_size) + train_reader = paddle.batch( + wmt16.train(args.src_vocab_size, args.trg_vocab_size), + batch_size=args.batch_size, + ) for batch in train_reader(): - tensors = prepare_train_input(batch, args.eos_idx, args.eos_idx, - args.n_head) + tensors = prepare_train_input( + batch, args.eos_idx, args.eos_idx, args.n_head + ) yield tensors def __for_test__(): - test_reader = paddle.batch(wmt16.test(args.src_vocab_size, - args.trg_vocab_size), - batch_size=args.batch_size) + test_reader = paddle.batch( + wmt16.test(args.src_vocab_size, args.trg_vocab_size), + batch_size=args.batch_size, + ) for batch in test_reader(): - tensors = prepare_infer_input(batch, args.eos_idx, args.eos_idx, - args.n_head) + tensors = prepare_infer_input( + batch, args.eos_idx, args.eos_idx, args.n_head + ) yield tensors return __for_train__ if mode == 'train' else __for_test__ class InputField(object): - def __init__(self, input_slots): self.feed_list = [] for slot in input_slots: self.feed_list.append( - fluid.layers.data(name=slot['name'], - shape=slot['shape'], - dtype=slot['dtype'], - lod_level=slot.get('lod_level', 0), - append_batch_size=False)) + fluid.layers.data( + name=slot['name'], + shape=slot['shape'], + dtype=slot['dtype'], + lod_level=slot.get('lod_level', 0), + append_batch_size=False, + ) + ) def load(program, model_path, executor=None, var_list=None): @@ -269,7 +315,8 @@ def load(program, model_path, executor=None, var_list=None): warnings.warn( "An UnicodeDecodeError is catched, which might be caused by loading " "a python2 saved model. Encoding of pickle.load would be set and " - "load again automatically.") + "load again automatically." + ) load_bak = pickle.load pickle.load = partial(load_bak, encoding="latin1") fluid.load(program, model_path, executor, var_list) @@ -282,16 +329,19 @@ def load_dygraph(model_path, keep_name_table=False): """ try: para_dict, opti_dict = fluid.load_dygraph( - model_path, keep_name_table=keep_name_table) + model_path, keep_name_table=keep_name_table + ) return para_dict, opti_dict except UnicodeDecodeError: warnings.warn( "An UnicodeDecodeError is catched, which might be caused by loading " "a python2 saved model. Encoding of pickle.load would be set and " - "load again automatically.") + "load again automatically." + ) load_bak = pickle.load pickle.load = partial(load_bak, encoding="latin1") para_dict, opti_dict = fluid.load_dygraph( - model_path, keep_name_table=keep_name_table) + model_path, keep_name_table=keep_name_table + ) pickle.load = load_bak return para_dict, opti_dict diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/tsm_config_utils.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/tsm_config_utils.py index 3169311710e4f8efc0838abdb4ce97eeba51d79d..1332e0cb86bf6e82479edc2d4f602f8fda382c3b 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/tsm_config_utils.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/tsm_config_utils.py @@ -1,16 +1,16 @@ # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. # -#Licensed under the Apache License, Version 2.0 (the "License"); -#you may not use this file except in compliance with the License. -#You may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -#Unless required by applicable law or agreed to in writing, software -#distributed under the License is distributed on an "AS IS" BASIS, -#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#See the License for the specific language governing permissions and -#limitations under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import logging @@ -25,7 +25,6 @@ CONFIG_SECS = [ class AttrDict(dict): - def __getattr__(self, key): return self[key] @@ -39,6 +38,7 @@ class AttrDict(dict): def parse_config(cfg_file): """Load a config file into AttrDict""" import yaml + with open(cfg_file, 'r') as fopen: yaml_config = AttrDict(yaml.load(fopen, Loader=yaml.Loader)) create_attr_dict(yaml_config) @@ -47,6 +47,7 @@ def parse_config(cfg_file): def create_attr_dict(yaml_config): from ast import literal_eval + for key, value in yaml_config.items(): if type(value) is dict: yaml_config[key] = value = AttrDict(value) @@ -78,7 +79,8 @@ def merge_configs(cfg, sec, args_dict): def print_configs(cfg, mode): logger.info( - "---------------- {:>5} Arguments ----------------".format(mode)) + "---------------- {:>5} Arguments ----------------".format(mode) + ) for sec, sec_items in cfg.items(): logger.info("{}:".format(sec)) for k, v in sec_items.items(): diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/yolov3.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/yolov3.py index e502d786430c4851036453ad0de7a7b017eda538..0a50751b1e78151f3e69ecefc09394c5652705e3 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/yolov3.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/yolov3.py @@ -1,16 +1,16 @@ # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. # -#Licensed under the Apache License, Version 2.0 (the "License"); -#you may not use this file except in compliance with the License. -#You may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -#Unless required by applicable law or agreed to in writing, software -#distributed under the License is distributed on an "AS IS" BASIS, -#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#See the License for the specific language governing permissions and -#limitations under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os import sys @@ -26,7 +26,6 @@ from darknet import ConvBNLayer class AttrDict(dict): - def __init__(self, *args, **kwargs): super(AttrDict, self).__init__(*args, **kwargs) @@ -78,12 +77,29 @@ cfg.pixel_means = [0.485, 0.456, 0.406] cfg.pixel_stds = [0.229, 0.224, 0.225] # anchors box weight and height cfg.anchors = [ - 10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326 + 10, + 13, + 16, + 30, + 33, + 23, + 30, + 61, + 62, + 45, + 59, + 119, + 116, + 90, + 156, + 198, + 373, + 326, ] # anchor mask of each yolo layer cfg.anchor_masks = [[6, 7, 8], [3, 4, 5], [0, 1, 2]] # IoU threshold to ignore objectness loss of pred box -cfg.ignore_thresh = .7 +cfg.ignore_thresh = 0.7 # # SOLVER options # @@ -97,7 +113,7 @@ cfg.max_iter = 20 if fluid.is_compiled_with_cuda() else 1 cfg.no_mixup_iter = 10 if fluid.is_compiled_with_cuda() else 1 # warm up to learning rate cfg.warm_up_iter = 10 if fluid.is_compiled_with_cuda() else 1 -cfg.warm_up_factor = 0. +cfg.warm_up_factor = 0.0 # lr steps_with_decay cfg.lr_steps = [400000, 450000] cfg.lr_gamma = 0.1 @@ -115,49 +131,61 @@ cfg.class_num = 80 class YoloDetectionBlock(fluid.dygraph.Layer): - def __init__(self, ch_in, channel, is_test=True): super(YoloDetectionBlock, self).__init__() - assert channel % 2 == 0, \ - "channel {} cannot be divided by 2".format(channel) - - self.conv0 = ConvBNLayer(ch_in=ch_in, - ch_out=channel, - filter_size=1, - stride=1, - padding=0, - is_test=is_test) - self.conv1 = ConvBNLayer(ch_in=channel, - ch_out=channel * 2, - filter_size=3, - stride=1, - padding=1, - is_test=is_test) - self.conv2 = ConvBNLayer(ch_in=channel * 2, - ch_out=channel, - filter_size=1, - stride=1, - padding=0, - is_test=is_test) - self.conv3 = ConvBNLayer(ch_in=channel, - ch_out=channel * 2, - filter_size=3, - stride=1, - padding=1, - is_test=is_test) - self.route = ConvBNLayer(ch_in=channel * 2, - ch_out=channel, - filter_size=1, - stride=1, - padding=0, - is_test=is_test) - self.tip = ConvBNLayer(ch_in=channel, - ch_out=channel * 2, - filter_size=3, - stride=1, - padding=1, - is_test=is_test) + assert channel % 2 == 0, "channel {} cannot be divided by 2".format( + channel + ) + + self.conv0 = ConvBNLayer( + ch_in=ch_in, + ch_out=channel, + filter_size=1, + stride=1, + padding=0, + is_test=is_test, + ) + self.conv1 = ConvBNLayer( + ch_in=channel, + ch_out=channel * 2, + filter_size=3, + stride=1, + padding=1, + is_test=is_test, + ) + self.conv2 = ConvBNLayer( + ch_in=channel * 2, + ch_out=channel, + filter_size=1, + stride=1, + padding=0, + is_test=is_test, + ) + self.conv3 = ConvBNLayer( + ch_in=channel, + ch_out=channel * 2, + filter_size=3, + stride=1, + padding=1, + is_test=is_test, + ) + self.route = ConvBNLayer( + ch_in=channel * 2, + ch_out=channel, + filter_size=1, + stride=1, + padding=0, + is_test=is_test, + ) + self.tip = ConvBNLayer( + ch_in=channel, + ch_out=channel * 2, + filter_size=3, + stride=1, + padding=1, + is_test=is_test, + ) def forward(self, inputs): out = self.conv0(inputs) @@ -170,7 +198,6 @@ class YoloDetectionBlock(fluid.dygraph.Layer): class Upsample(fluid.dygraph.Layer): - def __init__(self, scale=2): super(Upsample, self).__init__() self.scale = scale @@ -178,24 +205,22 @@ class Upsample(fluid.dygraph.Layer): def forward(self, inputs): # get dynamic upsample output shape shape_nchw = fluid.layers.shape(inputs) - shape_hw = fluid.layers.slice(shape_nchw, - axes=[0], - starts=[2], - ends=[4]) + shape_hw = fluid.layers.slice( + shape_nchw, axes=[0], starts=[2], ends=[4] + ) shape_hw.stop_gradient = True in_shape = fluid.layers.cast(shape_hw, dtype='int32') out_shape = in_shape * self.scale out_shape.stop_gradient = True # reisze by actual_shape - out = fluid.layers.resize_nearest(input=inputs, - scale=self.scale, - actual_shape=out_shape) + out = fluid.layers.resize_nearest( + input=inputs, scale=self.scale, actual_shape=out_shape + ) return out class YOLOv3(fluid.dygraph.Layer): - def __init__(self, ch_in, is_train=True, use_random=False): super(YOLOv3, self).__init__() @@ -210,47 +235,60 @@ class YOLOv3(fluid.dygraph.Layer): for i in range(3): yolo_block = self.add_sublayer( "yolo_detecton_block_%d" % (i), - YoloDetectionBlock(ch_in_list[i], - channel=512 // (2**i), - is_test=not self.is_train)) + YoloDetectionBlock( + ch_in_list[i], + channel=512 // (2**i), + is_test=not self.is_train, + ), + ) self.yolo_blocks.append(yolo_block) num_filters = len(cfg.anchor_masks[i]) * (cfg.class_num + 5) block_out = self.add_sublayer( "block_out_%d" % (i), - Conv2D(num_channels=1024 // (2**i), - num_filters=num_filters, - filter_size=1, - stride=1, - padding=0, - act=None, - param_attr=ParamAttr( - initializer=fluid.initializer.Normal(0., 0.02)), - bias_attr=ParamAttr( - initializer=fluid.initializer.Constant(0.0), - regularizer=L2Decay(0.)))) + Conv2D( + num_channels=1024 // (2**i), + num_filters=num_filters, + filter_size=1, + stride=1, + padding=0, + act=None, + param_attr=ParamAttr( + initializer=fluid.initializer.Normal(0.0, 0.02) + ), + bias_attr=ParamAttr( + initializer=fluid.initializer.Constant(0.0), + regularizer=L2Decay(0.0), + ), + ), + ) self.block_outputs.append(block_out) if i < 2: route = self.add_sublayer( "route2_%d" % i, - ConvBNLayer(ch_in=512 // (2**i), - ch_out=256 // (2**i), - filter_size=1, - stride=1, - padding=0, - is_test=(not self.is_train))) + ConvBNLayer( + ch_in=512 // (2**i), + ch_out=256 // (2**i), + filter_size=1, + stride=1, + padding=0, + is_test=(not self.is_train), + ), + ) self.route_blocks_2.append(route) self.upsample = Upsample() @declarative - def forward(self, - inputs, - gtbox=None, - gtlabel=None, - gtscore=None, - im_id=None, - im_shape=None): + def forward( + self, + inputs, + gtbox=None, + gtlabel=None, + gtscore=None, + im_id=None, + im_shape=None, + ): self.outputs = [] self.boxes = [] self.scores = [] @@ -287,7 +325,8 @@ class YOLOv3(fluid.dygraph.Layer): class_num=cfg.class_num, ignore_thresh=cfg.ignore_thresh, downsample_ratio=self.downsample, - use_label_smooth=cfg.label_smooth) + use_label_smooth=cfg.label_smooth, + ) self.losses.append(fluid.layers.reduce_mean(loss)) else: @@ -302,10 +341,12 @@ class YOLOv3(fluid.dygraph.Layer): class_num=cfg.class_num, conf_thresh=cfg.valid_thresh, downsample_ratio=self.downsample, - name="yolo_box" + str(i)) + name="yolo_box" + str(i), + ) self.boxes.append(boxes) self.scores.append( - fluid.layers.transpose(scores, perm=[0, 2, 1])) + fluid.layers.transpose(scores, perm=[0, 2, 1]) + ) self.downsample //= 2 if not self.is_train: @@ -313,13 +354,15 @@ class YOLOv3(fluid.dygraph.Layer): yolo_boxes = fluid.layers.concat(self.boxes, axis=1) yolo_scores = fluid.layers.concat(self.scores, axis=2) - pred = fluid.layers.multiclass_nms(bboxes=yolo_boxes, - scores=yolo_scores, - score_threshold=cfg.valid_thresh, - nms_top_k=cfg.nms_topk, - keep_top_k=cfg.nms_posk, - nms_threshold=cfg.nms_thresh, - background_label=-1) + pred = fluid.layers.multiclass_nms( + bboxes=yolo_boxes, + scores=yolo_scores, + score_threshold=cfg.valid_thresh, + nms_top_k=cfg.nms_topk, + keep_top_k=cfg.nms_posk, + nms_threshold=cfg.nms_thresh, + background_label=-1, + ) return pred else: return sum(self.losses) diff --git a/python/paddle/fluid/tests/unittests/elastic_demo.py b/python/paddle/fluid/tests/unittests/elastic_demo.py index af26abd0d8878986b16ea4a0becb31fd3775ccc9..37a1c93bddd325334a008c6de59a540cc07181cb 100644 --- a/python/paddle/fluid/tests/unittests/elastic_demo.py +++ b/python/paddle/fluid/tests/unittests/elastic_demo.py @@ -15,10 +15,16 @@ import os, sys import time -sys.stderr.write("{}-DISTRIBUTED_TRAINER_ENDPOINTS={}\n".format( - os.environ['PADDLE_TRAINER_ID'], - os.environ['DISTRIBUTED_TRAINER_ENDPOINTS'])) -sys.stderr.write("{}-PADDLE_TRAINERS={}\n".format( - os.environ['PADDLE_TRAINER_ID'], os.environ['PADDLE_TRAINERS'])) +sys.stderr.write( + "{}-DISTRIBUTED_TRAINER_ENDPOINTS={}\n".format( + os.environ['PADDLE_TRAINER_ID'], + os.environ['DISTRIBUTED_TRAINER_ENDPOINTS'], + ) +) +sys.stderr.write( + "{}-PADDLE_TRAINERS={}\n".format( + os.environ['PADDLE_TRAINER_ID'], os.environ['PADDLE_TRAINERS'] + ) +) time.sleep(600) diff --git a/python/paddle/fluid/tests/unittests/fake_reader.py b/python/paddle/fluid/tests/unittests/fake_reader.py index b1e90d8b1879eb303344ea2ae7c59419be728876..997e9f0fc73a321fbb86945bc48c6076810ed441 100644 --- a/python/paddle/fluid/tests/unittests/fake_reader.py +++ b/python/paddle/fluid/tests/unittests/fake_reader.py @@ -15,23 +15,24 @@ import numpy as np -def fake_imdb_reader(word_dict_size, - sample_num, - lower_seq_len=100, - upper_seq_len=200, - class_dim=2): - +def fake_imdb_reader( + word_dict_size, + sample_num, + lower_seq_len=100, + upper_seq_len=200, + class_dim=2, +): def __reader__(): for _ in range(sample_num): - length = np.random.random_integers(low=lower_seq_len, - high=upper_seq_len, - size=[1])[0] - ids = np.random.random_integers(low=0, - high=word_dict_size - 1, - size=[length]).astype('int64') - label = np.random.random_integers(low=0, - high=class_dim - 1, - size=[1]).astype('int64')[0] + length = np.random.random_integers( + low=lower_seq_len, high=upper_seq_len, size=[1] + )[0] + ids = np.random.random_integers( + low=0, high=word_dict_size - 1, size=[length] + ).astype('int64') + label = np.random.random_integers( + low=0, high=class_dim - 1, size=[1] + ).astype('int64')[0] yield ids, label return __reader__ diff --git a/python/paddle/fluid/tests/unittests/feed_data_reader.py b/python/paddle/fluid/tests/unittests/feed_data_reader.py index 6d7e73ac3c09a5df350ac332dfd6d0f1883c2d61..9a1f15ddff5ec7dea2f1cae7b9160e705d71fec0 100644 --- a/python/paddle/fluid/tests/unittests/feed_data_reader.py +++ b/python/paddle/fluid/tests/unittests/feed_data_reader.py @@ -17,7 +17,6 @@ from paddle.fluid.framework import Variable def cyclic_reader(reader): - def __reader__(): while True: for data in reader(): @@ -27,7 +26,6 @@ def cyclic_reader(reader): class FeedDataReader(object): - def __init__(self, feed_list, reader): self._feed_list = [] for var in feed_list: @@ -62,8 +60,11 @@ class FeedDataReader(object): if program._is_data_parallel: use_executor = False if program._places is None: - device_num = len(fluid.cuda_places()) if use_cuda else len( - fluid.cpu_places()) + device_num = ( + len(fluid.cuda_places()) + if use_cuda + else len(fluid.cpu_places()) + ) else: device_num = len(program._places) else: diff --git a/python/paddle/fluid/tests/unittests/fft/spectral_op_np.py b/python/paddle/fluid/tests/unittests/fft/spectral_op_np.py index 0dc958f54fcf10eeda5fab9d84c533184512d5eb..65918a0cd59fffb53a81bb85f1767db47aaa2bff 100644 --- a/python/paddle/fluid/tests/unittests/fft/spectral_op_np.py +++ b/python/paddle/fluid/tests/unittests/fft/spectral_op_np.py @@ -34,8 +34,9 @@ def _get_norm_mode(norm, forward): def _get_inv_norm(n, norm_mode): - assert isinstance(norm_mode, - NormMode), "invalid norm_type {}".format(norm_mode) + assert isinstance(norm_mode, NormMode), "invalid norm_type {}".format( + norm_mode + ) if norm_mode == NormMode.none: return 1.0 if norm_mode == NormMode.by_sqrt_n: @@ -69,8 +70,9 @@ def _fftc2r(a, n=None, axis=-1, norm=None, forward=None): if n is None: n = (a.shape[axis] - 1) * 2 inv_norm = _get_inv_norm(n, norm) - output = _raw_fft(a.conj() if forward else a, n, axis, True, False, - inv_norm) + output = _raw_fft( + a.conj() if forward else a, n, axis, True, False, inv_norm + ) return output @@ -145,8 +147,10 @@ def _fft_fill_conj_grad(x, axes, length_to_double): last_fft_axis = axes[-1] shape = x.shape for multi_index in np.ndindex(*shape): - if 0 < multi_index[last_fft_axis] and multi_index[ - last_fft_axis] <= length_to_double: + if ( + 0 < multi_index[last_fft_axis] + and multi_index[last_fft_axis] <= length_to_double + ): x[multi_index] *= 2 return x diff --git a/python/paddle/fluid/tests/unittests/fft/test_fft.py b/python/paddle/fluid/tests/unittests/fft/test_fft.py index e6ea9865ae3c313f90e7121a9ffd2eb1406af516..332a61c07fa607ccd1f48cade13f75166f1af141 100644 --- a/python/paddle/fluid/tests/unittests/fft/test_fft.py +++ b/python/paddle/fluid/tests/unittests/fft/test_fft.py @@ -30,31 +30,28 @@ RTOL = { 'float32': 1e-03, 'complex64': 1e-3, 'float64': 1e-7, - 'complex128': 1e-7 + 'complex128': 1e-7, } ATOL = {'float32': 0.0, 'complex64': 0, 'float64': 0.0, 'complex128': 0} -def rand_x(dims=1, - dtype='float64', - min_dim_len=1, - max_dim_len=10, - complex=False): +def rand_x( + dims=1, dtype='float64', min_dim_len=1, max_dim_len=10, complex=False +): shape = [np.random.randint(min_dim_len, max_dim_len) for i in range(dims)] if complex: - return np.random.randn( - *shape).astype(dtype) + 1.j * np.random.randn(*shape).astype(dtype) + return np.random.randn(*shape).astype(dtype) + 1.0j * np.random.randn( + *shape + ).astype(dtype) else: return np.random.randn(*shape).astype(dtype) def place(devices, key='place'): - def decorate(cls): module = sys.modules[cls.__module__].__dict__ raw_classes = { - k: v - for k, v in module.items() if k.startswith(cls.__name__) + k: v for k, v in module.items() if k.startswith(cls.__name__) } for raw_name, raw_cls in raw_classes.items(): @@ -62,7 +59,7 @@ def place(devices, key='place'): test_cls = dict(raw_cls.__dict__) test_cls.update({key: d}) new_name = raw_name + '.' + d.__class__.__name__ - module[new_name] = type(new_name, (raw_cls, ), test_cls) + module[new_name] = type(new_name, (raw_cls,), test_cls) del module[raw_name] return cls @@ -82,7 +79,7 @@ def parameterize(fields, values=None): name = cls.__name__ + str(k) name = name + '.' + v.get('suffix') if v.get('suffix') else name - test_cls_module[name] = type(name, (cls, ), test_cls) + test_cls_module[name] = type(name, (cls,), test_cls) for m in list(cls.__dict__): if m.startswith("test"): @@ -95,68 +92,107 @@ def parameterize(fields, values=None): @place(DEVICES) @parameterize( (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), - [('test_x_float64', rand_x(5, np.float64), None, -1, 'backward'), - ('test_x_complex', rand_x(5, complex=True), None, -1, 'backward'), - ('test_n_grater_input_length', rand_x(5, - max_dim_len=5), 11, -1, 'backward'), - ('test_n_smaller_than_input_length', rand_x( - 5, min_dim_len=5, complex=True), 3, -1, 'backward'), - ('test_axis_not_last', rand_x(5), None, 3, 'backward'), - ('test_norm_forward', rand_x(5), None, 3, 'forward'), - ('test_norm_ortho', rand_x(5), None, 3, 'ortho')]) + [ + ('test_x_float64', rand_x(5, np.float64), None, -1, 'backward'), + ('test_x_complex', rand_x(5, complex=True), None, -1, 'backward'), + ( + 'test_n_grater_input_length', + rand_x(5, max_dim_len=5), + 11, + -1, + 'backward', + ), + ( + 'test_n_smaller_than_input_length', + rand_x(5, min_dim_len=5, complex=True), + 3, + -1, + 'backward', + ), + ('test_axis_not_last', rand_x(5), None, 3, 'backward'), + ('test_norm_forward', rand_x(5), None, 3, 'forward'), + ('test_norm_ortho', rand_x(5), None, 3, 'ortho'), + ], +) class TestFft(unittest.TestCase): - def test_fft(self): - """Test fft with norm condition - """ + """Test fft with norm condition""" with paddle.fluid.dygraph.guard(self.place): - np.testing.assert_allclose(scipy.fft.fft(self.x, self.n, self.axis, - self.norm), - paddle.fft.fft(paddle.to_tensor(self.x), - self.n, self.axis, - self.norm), - rtol=RTOL.get(str(self.x.dtype)), - atol=ATOL.get(str(self.x.dtype))) + np.testing.assert_allclose( + scipy.fft.fft(self.x, self.n, self.axis, self.norm), + paddle.fft.fft( + paddle.to_tensor(self.x), self.n, self.axis, self.norm + ), + rtol=RTOL.get(str(self.x.dtype)), + atol=ATOL.get(str(self.x.dtype)), + ) @place(DEVICES) @parameterize( (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), - [('test_x_float64', rand_x(5, np.float64), None, -1, 'backward'), - ('test_x_complex', rand_x(5, complex=True), None, -1, 'backward'), - ('test_n_grater_input_length', rand_x(5, - max_dim_len=5), 11, -1, 'backward'), - ('test_n_smaller_than_input_length', rand_x( - 5, min_dim_len=5, complex=True), 3, -1, 'backward'), - ('test_axis_not_last', rand_x(5), None, 3, 'backward'), - ('test_norm_forward', rand_x(5), None, 3, 'forward'), - ('test_norm_ortho', rand_x(5), None, 3, 'ortho')]) + [ + ('test_x_float64', rand_x(5, np.float64), None, -1, 'backward'), + ('test_x_complex', rand_x(5, complex=True), None, -1, 'backward'), + ( + 'test_n_grater_input_length', + rand_x(5, max_dim_len=5), + 11, + -1, + 'backward', + ), + ( + 'test_n_smaller_than_input_length', + rand_x(5, min_dim_len=5, complex=True), + 3, + -1, + 'backward', + ), + ('test_axis_not_last', rand_x(5), None, 3, 'backward'), + ('test_norm_forward', rand_x(5), None, 3, 'forward'), + ('test_norm_ortho', rand_x(5), None, 3, 'ortho'), + ], +) class TestIfft(unittest.TestCase): - def test_fft(self): - """Test ifft with norm condition - """ + """Test ifft with norm condition""" with paddle.fluid.dygraph.guard(self.place): - np.testing.assert_allclose(scipy.fft.ifft(self.x, self.n, self.axis, - self.norm), - paddle.fft.ifft(paddle.to_tensor(self.x), - self.n, self.axis, - self.norm), - rtol=RTOL.get(str(self.x.dtype)), - atol=ATOL.get(str(self.x.dtype))) + np.testing.assert_allclose( + scipy.fft.ifft(self.x, self.n, self.axis, self.norm), + paddle.fft.ifft( + paddle.to_tensor(self.x), self.n, self.axis, self.norm + ), + rtol=RTOL.get(str(self.x.dtype)), + atol=ATOL.get(str(self.x.dtype)), + ) @place(DEVICES) @parameterize( (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), - [('test_n_nagative', rand_x(2), -1, -1, 'backward', ValueError), - ('test_n_zero', rand_x(2), 0, -1, 'backward', ValueError), - ('test_axis_out_of_range', rand_x(1), None, 10, 'backward', ValueError), - ('test_axis_with_array', rand_x(1), None, (0, 1), 'backward', ValueError), - ('test_norm_not_in_enum_value', rand_x(2), None, -1, 'random', ValueError)] + [ + ('test_n_nagative', rand_x(2), -1, -1, 'backward', ValueError), + ('test_n_zero', rand_x(2), 0, -1, 'backward', ValueError), + ('test_axis_out_of_range', rand_x(1), None, 10, 'backward', ValueError), + ( + 'test_axis_with_array', + rand_x(1), + None, + (0, 1), + 'backward', + ValueError, + ), + ( + 'test_norm_not_in_enum_value', + rand_x(2), + None, + -1, + 'random', + ValueError, + ), + ], ) class TestFftException(unittest.TestCase): - def test_fft(self): """Test fft with buoudary condition Test case include: @@ -166,55 +202,108 @@ class TestFftException(unittest.TestCase): - norm out of range """ with self.assertRaises(self.expect_exception): - paddle.fft.fft(paddle.to_tensor(self.x), self.n, self.axis, - self.norm) + paddle.fft.fft( + paddle.to_tensor(self.x), self.n, self.axis, self.norm + ) @place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), [ - ('test_x_float64', rand_x(5), None, (0, 1), 'backward'), - ('test_x_complex128', rand_x(5, complex=True), None, (0, 1), 'backward'), - ('test_n_grater_input_length', rand_x(5, max_dim_len=5), (6, 6), - (0, 1), 'backward'), - ('test_n_smaller_than_input_length', rand_x(5, min_dim_len=5, complex=True), - (4, 4), (0, 1), 'backward'), - ('test_axis_random', rand_x(5), None, (1, 2), 'backward'), - ('test_axis_none', rand_x(5), None, None, 'backward'), - ('test_norm_forward', rand_x(5), None, (0, 1), 'forward'), - ('test_norm_ortho', rand_x(5), None, (0, 1), 'ortho'), -]) +@parameterize( + (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), + [ + ('test_x_float64', rand_x(5), None, (0, 1), 'backward'), + ( + 'test_x_complex128', + rand_x(5, complex=True), + None, + (0, 1), + 'backward', + ), + ( + 'test_n_grater_input_length', + rand_x(5, max_dim_len=5), + (6, 6), + (0, 1), + 'backward', + ), + ( + 'test_n_smaller_than_input_length', + rand_x(5, min_dim_len=5, complex=True), + (4, 4), + (0, 1), + 'backward', + ), + ('test_axis_random', rand_x(5), None, (1, 2), 'backward'), + ('test_axis_none', rand_x(5), None, None, 'backward'), + ('test_norm_forward', rand_x(5), None, (0, 1), 'forward'), + ('test_norm_ortho', rand_x(5), None, (0, 1), 'ortho'), + ], +) class TestFft2(unittest.TestCase): - def test_fft2(self): - """Test fft2 with norm condition - """ + """Test fft2 with norm condition""" with paddle.fluid.dygraph.guard(self.place): - np.testing.assert_allclose(scipy.fft.fft2(self.x, self.n, self.axis, - self.norm), - paddle.fft.fft2(paddle.to_tensor(self.x), - self.n, self.axis, - self.norm), - rtol=RTOL.get(str(self.x.dtype)), - atol=ATOL.get(str(self.x.dtype))) + np.testing.assert_allclose( + scipy.fft.fft2(self.x, self.n, self.axis, self.norm), + paddle.fft.fft2( + paddle.to_tensor(self.x), self.n, self.axis, self.norm + ), + rtol=RTOL.get(str(self.x.dtype)), + atol=ATOL.get(str(self.x.dtype)), + ) @place(DEVICES) @parameterize( (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), - [('test_x_complex_input', rand_x(2, complex=True), None, - (0, 1), None, ValueError), - ('test_x_1dim_tensor', rand_x(1), None, (0, 1), None, ValueError), - ('test_n_nagative', rand_x(2), -1, (0, 1), 'backward', ValueError), - ('test_n_len_not_equal_axis', rand_x(5, max_dim_len=5), 11, - (0, 1), 'backward', ValueError), - ('test_n_zero', rand_x(2), (0, 0), (0, 1), 'backward', ValueError), - ('test_axis_out_of_range', rand_x(2), None, - (0, 1, 2), 'backward', ValueError), - ('test_axis_with_array', rand_x(1), None, (0, 1), 'backward', ValueError), - ('test_axis_not_sequence', rand_x(5), None, -10, 'backward', ValueError), - ('test_norm_not_enum', rand_x(2), None, -1, 'random', ValueError)]) + [ + ( + 'test_x_complex_input', + rand_x(2, complex=True), + None, + (0, 1), + None, + ValueError, + ), + ('test_x_1dim_tensor', rand_x(1), None, (0, 1), None, ValueError), + ('test_n_nagative', rand_x(2), -1, (0, 1), 'backward', ValueError), + ( + 'test_n_len_not_equal_axis', + rand_x(5, max_dim_len=5), + 11, + (0, 1), + 'backward', + ValueError, + ), + ('test_n_zero', rand_x(2), (0, 0), (0, 1), 'backward', ValueError), + ( + 'test_axis_out_of_range', + rand_x(2), + None, + (0, 1, 2), + 'backward', + ValueError, + ), + ( + 'test_axis_with_array', + rand_x(1), + None, + (0, 1), + 'backward', + ValueError, + ), + ( + 'test_axis_not_sequence', + rand_x(5), + None, + -10, + 'backward', + ValueError, + ), + ('test_norm_not_enum', rand_x(2), None, -1, 'random', ValueError), + ], +) class TestFft2Exception(unittest.TestCase): - def test_fft2(self): """Test fft2 with buoudary condition Test case include: @@ -227,267 +316,525 @@ class TestFft2Exception(unittest.TestCase): """ with paddle.fluid.dygraph.guard(self.place): with self.assertRaises(self.expect_exception): - paddle.fft.fft2(paddle.to_tensor(self.x), self.n, self.axis, - self.norm) + paddle.fft.fft2( + paddle.to_tensor(self.x), self.n, self.axis, self.norm + ) @place(DEVICES) @parameterize( (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), - [('test_x_float64', rand_x(5, np.float64), None, None, 'backward'), - ('test_x_complex128', rand_x(5, complex=True), None, None, 'backward'), - ('test_n_grater_input_length', rand_x(5, max_dim_len=5), (6, 6), - (1, 2), 'backward'), - ('test_n_smaller_input_length', rand_x(5, min_dim_len=5, complex=True), - (3, 3), (1, 2), 'backward'), - ('test_axis_not_default', rand_x(5), None, (1, 2), 'backward'), - ('test_norm_forward', rand_x(5), None, None, 'forward'), - ('test_norm_ortho', rand_x(5), None, None, 'ortho')]) + [ + ('test_x_float64', rand_x(5, np.float64), None, None, 'backward'), + ('test_x_complex128', rand_x(5, complex=True), None, None, 'backward'), + ( + 'test_n_grater_input_length', + rand_x(5, max_dim_len=5), + (6, 6), + (1, 2), + 'backward', + ), + ( + 'test_n_smaller_input_length', + rand_x(5, min_dim_len=5, complex=True), + (3, 3), + (1, 2), + 'backward', + ), + ('test_axis_not_default', rand_x(5), None, (1, 2), 'backward'), + ('test_norm_forward', rand_x(5), None, None, 'forward'), + ('test_norm_ortho', rand_x(5), None, None, 'ortho'), + ], +) class TestFftn(unittest.TestCase): - def test_fftn(self): - """Test fftn with norm condition - """ + """Test fftn with norm condition""" with paddle.fluid.dygraph.guard(self.place): - np.testing.assert_allclose(scipy.fft.fftn(self.x, self.n, self.axis, - self.norm), - paddle.fft.fftn(paddle.to_tensor(self.x), - self.n, self.axis, - self.norm), - rtol=RTOL.get(str(self.x.dtype)), - atol=ATOL.get(str(self.x.dtype))) + np.testing.assert_allclose( + scipy.fft.fftn(self.x, self.n, self.axis, self.norm), + paddle.fft.fftn( + paddle.to_tensor(self.x), self.n, self.axis, self.norm + ), + rtol=RTOL.get(str(self.x.dtype)), + atol=ATOL.get(str(self.x.dtype)), + ) @place(DEVICES) @parameterize( (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), - [('test_x_float64', rand_x(5, np.float64), None, None, 'backward'), - ('test_x_complex128', rand_x(5, complex=True), None, None, 'backward'), - ('test_n_grater_input_length', rand_x(5, max_dim_len=5), (6, 6), - (1, 2), 'backward'), - ('test_n_smaller_input_length', rand_x(5, min_dim_len=5, complex=True), - (3, 3), (1, 2), 'backward'), - ('test_axis_not_default', rand_x(5), None, (1, 2), 'backward'), - ('test_norm_forward', rand_x(5), None, None, 'forward'), - ('test_norm_ortho', rand_x(5), None, None, 'ortho')]) + [ + ('test_x_float64', rand_x(5, np.float64), None, None, 'backward'), + ('test_x_complex128', rand_x(5, complex=True), None, None, 'backward'), + ( + 'test_n_grater_input_length', + rand_x(5, max_dim_len=5), + (6, 6), + (1, 2), + 'backward', + ), + ( + 'test_n_smaller_input_length', + rand_x(5, min_dim_len=5, complex=True), + (3, 3), + (1, 2), + 'backward', + ), + ('test_axis_not_default', rand_x(5), None, (1, 2), 'backward'), + ('test_norm_forward', rand_x(5), None, None, 'forward'), + ('test_norm_ortho', rand_x(5), None, None, 'ortho'), + ], +) class TestIFftn(unittest.TestCase): - def test_ifftn(self): - """Test ifftn with norm condition - """ + """Test ifftn with norm condition""" with paddle.fluid.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.ifftn(self.x, self.n, self.axis, self.norm), - paddle.fft.ifftn(paddle.to_tensor(self.x), self.n, self.axis, - self.norm), + paddle.fft.ifftn( + paddle.to_tensor(self.x), self.n, self.axis, self.norm + ), rtol=RTOL.get(str(self.x.dtype)), - atol=ATOL.get(str(self.x.dtype))) + atol=ATOL.get(str(self.x.dtype)), + ) @place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), [ - ('test_x_complex128', - (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( - np.complex128), None, -1, "backward"), - ('test_n_grater_than_input_length', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), 4, -1, "backward"), - ('test_n_smaller_than_input_length', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), 2, -1, "backward"), - ('test_axis_not_last', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), None, 1, "backward"), - ('test_norm_forward', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), None, 1, "forward"), - ('test_norm_ortho', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), None, -1, "ortho"), -]) +@parameterize( + (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), + [ + ( + 'test_x_complex128', + (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( + np.complex128 + ), + None, + -1, + "backward", + ), + ( + 'test_n_grater_than_input_length', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + 4, + -1, + "backward", + ), + ( + 'test_n_smaller_than_input_length', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + 2, + -1, + "backward", + ), + ( + 'test_axis_not_last', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + 1, + "backward", + ), + ( + 'test_norm_forward', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + 1, + "forward", + ), + ( + 'test_norm_ortho', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + -1, + "ortho", + ), + ], +) class TestHfft(unittest.TestCase): - def test_hfft(self): - """Test hfft with norm condition - """ + """Test hfft with norm condition""" with paddle.fluid.dygraph.guard(self.place): - np.testing.assert_allclose(scipy.fft.hfft(self.x, self.n, self.axis, - self.norm), - paddle.fft.hfft(paddle.to_tensor(self.x), - self.n, self.axis, - self.norm), - rtol=1e-5, - atol=0) + np.testing.assert_allclose( + scipy.fft.hfft(self.x, self.n, self.axis, self.norm), + paddle.fft.hfft( + paddle.to_tensor(self.x), self.n, self.axis, self.norm + ), + rtol=1e-5, + atol=0, + ) @place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), [ - ('test_x_complex128', - (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( - np.complex128), None, -1, "backward"), - ('test_n_grater_than_input_length', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), 4, -1, "backward"), - ('test_n_smaller_than_input_length', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), 2, -1, "backward"), - ('test_axis_not_last', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), None, -1, "backward"), - ('test_norm_forward', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), None, -1, "forward"), - ('test_norm_ortho', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), None, -1, "ortho"), -]) +@parameterize( + (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), + [ + ( + 'test_x_complex128', + (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( + np.complex128 + ), + None, + -1, + "backward", + ), + ( + 'test_n_grater_than_input_length', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + 4, + -1, + "backward", + ), + ( + 'test_n_smaller_than_input_length', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + 2, + -1, + "backward", + ), + ( + 'test_axis_not_last', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + -1, + "backward", + ), + ( + 'test_norm_forward', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + -1, + "forward", + ), + ( + 'test_norm_ortho', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + -1, + "ortho", + ), + ], +) class TestIrfft(unittest.TestCase): - def test_irfft(self): - """Test irfft with norm condition - """ + """Test irfft with norm condition""" with paddle.fluid.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.irfft(self.x, self.n, self.axis, self.norm), - paddle.fft.irfft(paddle.to_tensor(self.x), self.n, self.axis, - self.norm), + paddle.fft.irfft( + paddle.to_tensor(self.x), self.n, self.axis, self.norm + ), rtol=1e-5, - atol=0) + atol=0, + ) @place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), [ - ('test_x_complex128', - (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( - np.complex128), None, None, "backward"), - ('test_n_grater_than_input_length', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), [4], None, "backward"), - ('test_n_smaller_than_input_length', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), [2], None, "backward"), - ('test_axis_not_last', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), None, None, "backward"), - ('test_norm_forward', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), None, None, "forward"), - ('test_norm_ortho', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), None, None, "ortho"), -]) +@parameterize( + (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), + [ + ( + 'test_x_complex128', + (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( + np.complex128 + ), + None, + None, + "backward", + ), + ( + 'test_n_grater_than_input_length', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + [4], + None, + "backward", + ), + ( + 'test_n_smaller_than_input_length', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + [2], + None, + "backward", + ), + ( + 'test_axis_not_last', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + None, + "backward", + ), + ( + 'test_norm_forward', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + None, + "forward", + ), + ( + 'test_norm_ortho', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + None, + "ortho", + ), + ], +) class TestIrfftn(unittest.TestCase): - def test_irfftn(self): - """Test irfftn with norm condition - """ + """Test irfftn with norm condition""" with paddle.fluid.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.irfftn(self.x, self.n, self.axis, self.norm), - paddle.fft.irfftn(paddle.to_tensor(self.x), self.n, self.axis, - self.norm), + paddle.fft.irfftn( + paddle.to_tensor(self.x), self.n, self.axis, self.norm + ), rtol=1e-5, - atol=0) + atol=0, + ) @place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), [ - ('test_x_complex128', - (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( - np.complex128), None, None, "backward"), - ('test_n_grater_than_input_length', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), [4], None, "backward"), - ('test_n_smaller_than_input_length', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), [2], None, "backward"), - ('test_axis_not_last', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), None, None, "backward"), - ('test_norm_forward', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), None, None, "forward"), - ('test_norm_ortho', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), None, None, "ortho"), -]) +@parameterize( + (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), + [ + ( + 'test_x_complex128', + (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( + np.complex128 + ), + None, + None, + "backward", + ), + ( + 'test_n_grater_than_input_length', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + [4], + None, + "backward", + ), + ( + 'test_n_smaller_than_input_length', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + [2], + None, + "backward", + ), + ( + 'test_axis_not_last', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + None, + "backward", + ), + ( + 'test_norm_forward', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + None, + "forward", + ), + ( + 'test_norm_ortho', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + None, + "ortho", + ), + ], +) class TestHfftn(unittest.TestCase): - def test_hfftn(self): - """Test hfftn with norm condition - """ + """Test hfftn with norm condition""" with paddle.fluid.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.hfftn(self.x, self.n, self.axis, self.norm), - paddle.fft.hfftn(paddle.to_tensor(self.x), self.n, self.axis, - self.norm), + paddle.fft.hfftn( + paddle.to_tensor(self.x), self.n, self.axis, self.norm + ), rtol=1e-5, - atol=0) + atol=0, + ) @place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 's', 'axis', 'norm'), [ - ('test_x_complex128', - (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( - np.complex128), None, (-2, -1), "backward"), - ('test_with_s', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), - [2, 2], (-2, -1), "backward", ValueError), - ('test_axis_not_last', - np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), None, - (-2, -1), "backward"), - ('test_norm_forward', - np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), None, - (-2, -1), "forward"), - ('test_norm_ortho', - np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), None, - (-2, -1), "ortho"), -]) +@parameterize( + (TEST_CASE_NAME, 'x', 's', 'axis', 'norm'), + [ + ( + 'test_x_complex128', + (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( + np.complex128 + ), + None, + (-2, -1), + "backward", + ), + ( + 'test_with_s', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + [2, 2], + (-2, -1), + "backward", + ValueError, + ), + ( + 'test_axis_not_last', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + (-2, -1), + "backward", + ), + ( + 'test_norm_forward', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + (-2, -1), + "forward", + ), + ( + 'test_norm_ortho', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + (-2, -1), + "ortho", + ), + ], +) class TestHfft2(unittest.TestCase): - def test_hfft2(self): - """Test hfft2 with norm condition - """ + """Test hfft2 with norm condition""" with paddle.fluid.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.hfft2(self.x, self.s, self.axis, self.norm), - paddle.fft.hfft2(paddle.to_tensor(self.x), self.s, self.axis, - self.norm), + paddle.fft.hfft2( + paddle.to_tensor(self.x), self.s, self.axis, self.norm + ), rtol=1e-5, - atol=0) + atol=0, + ) @place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 's', 'axis', 'norm'), [ - ('test_x_complex128', - (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( - np.complex128), None, (-2, -1), "backward"), - ('test_n_equal_input_length', - np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (4, 6), - (-2, -1), "backward"), - ('test_axis_not_last', - np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), None, - (-2, -1), "backward"), - ('test_norm_forward', - np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), None, - (-2, -1), "forward"), - ('test_norm_ortho', - np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), None, - (-2, -1), "ortho"), -]) +@parameterize( + (TEST_CASE_NAME, 'x', 's', 'axis', 'norm'), + [ + ( + 'test_x_complex128', + (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( + np.complex128 + ), + None, + (-2, -1), + "backward", + ), + ( + 'test_n_equal_input_length', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + (4, 6), + (-2, -1), + "backward", + ), + ( + 'test_axis_not_last', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + (-2, -1), + "backward", + ), + ( + 'test_norm_forward', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + (-2, -1), + "forward", + ), + ( + 'test_norm_ortho', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + (-2, -1), + "ortho", + ), + ], +) class TestIrfft2(unittest.TestCase): - def test_irfft2(self): - """Test irfft2 with norm condition - """ + """Test irfft2 with norm condition""" with paddle.fluid.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.irfft2(self.x, self.s, self.axis, self.norm), - paddle.fft.irfft2(paddle.to_tensor(self.x), self.s, self.axis, - self.norm), + paddle.fft.irfft2( + paddle.to_tensor(self.x), self.s, self.axis, self.norm + ), rtol=1e-5, - atol=0) + atol=0, + ) @place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), [ - ('test_bool_input', - (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( - np.bool_), None, -1, 'backward', RuntimeError), - ('test_n_nagative', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), -1, -1, 'backward', ValueError), - ('test_n_zero', np.random.randn(4, 4) + 1j * np.random.randn(4, 4), 0, -1, - 'backward', ValueError), - ('test_n_type', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), - (1, 2, 3), -1, 'backward', ValueError), - ('test_axis_out_of_range', np.random.randn(4) + 1j * np.random.randn(4), - None, 10, 'backward', ValueError), - ('test_axis_with_array', np.random.randn(4) + 1j * np.random.randn(4), None, - (0, 1), 'backward', ValueError), - ('test_norm_not_in_enum_value', np.random.randn(4, 4) + - 1j * np.random.randn(4, 4), None, -1, 'random', ValueError) -]) +@parameterize( + (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), + [ + ( + 'test_bool_input', + (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( + np.bool_ + ), + None, + -1, + 'backward', + RuntimeError, + ), + ( + 'test_n_nagative', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + -1, + -1, + 'backward', + ValueError, + ), + ( + 'test_n_zero', + np.random.randn(4, 4) + 1j * np.random.randn(4, 4), + 0, + -1, + 'backward', + ValueError, + ), + ( + 'test_n_type', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + (1, 2, 3), + -1, + 'backward', + ValueError, + ), + ( + 'test_axis_out_of_range', + np.random.randn(4) + 1j * np.random.randn(4), + None, + 10, + 'backward', + ValueError, + ), + ( + 'test_axis_with_array', + np.random.randn(4) + 1j * np.random.randn(4), + None, + (0, 1), + 'backward', + ValueError, + ), + ( + 'test_norm_not_in_enum_value', + np.random.randn(4, 4) + 1j * np.random.randn(4, 4), + None, + -1, + 'random', + ValueError, + ), + ], +) class TestHfftException(unittest.TestCase): - def test_hfft(self): """Test hfft with buoudary condition Test case include: @@ -500,27 +847,66 @@ class TestHfftException(unittest.TestCase): """ with paddle.fluid.dygraph.guard(self.place): with self.assertRaises(self.expect_exception): - paddle.fft.hfft(paddle.to_tensor(self.x), self.n, self.axis, - self.norm) + paddle.fft.hfft( + paddle.to_tensor(self.x), self.n, self.axis, self.norm + ) @place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), [ - ('test_n_nagative', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), -1, -1, 'backward', ValueError), - ('test_n_zero', np.random.randn(4, 4) + 1j * np.random.randn(4, 4), 0, -1, - 'backward', ValueError), - ('test_n_type', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), - (1, 2), -1, 'backward', ValueError), - ('test_axis_out_of_range', np.random.randn(4) + 1j * np.random.randn(4), - None, 10, 'backward', ValueError), - ('test_axis_with_array', np.random.randn(4) + 1j * np.random.randn(4), None, - (0, 1), 'backward', ValueError), - ('test_norm_not_in_enum_value', np.random.randn(4, 4) + - 1j * np.random.randn(4, 4), None, None, 'random', ValueError) -]) +@parameterize( + (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), + [ + ( + 'test_n_nagative', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + -1, + -1, + 'backward', + ValueError, + ), + ( + 'test_n_zero', + np.random.randn(4, 4) + 1j * np.random.randn(4, 4), + 0, + -1, + 'backward', + ValueError, + ), + ( + 'test_n_type', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + (1, 2), + -1, + 'backward', + ValueError, + ), + ( + 'test_axis_out_of_range', + np.random.randn(4) + 1j * np.random.randn(4), + None, + 10, + 'backward', + ValueError, + ), + ( + 'test_axis_with_array', + np.random.randn(4) + 1j * np.random.randn(4), + None, + (0, 1), + 'backward', + ValueError, + ), + ( + 'test_norm_not_in_enum_value', + np.random.randn(4, 4) + 1j * np.random.randn(4, 4), + None, + None, + 'random', + ValueError, + ), + ], +) class TestIrfftException(unittest.TestCase): - def test_irfft(self): """ Test irfft with buoudary condition @@ -533,34 +919,84 @@ class TestIrfftException(unittest.TestCase): """ with paddle.fluid.dygraph.guard(self.place): with self.assertRaises(self.expect_exception): - paddle.fft.irfft(paddle.to_tensor(self.x), self.n, self.axis, - self.norm) + paddle.fft.irfft( + paddle.to_tensor(self.x), self.n, self.axis, self.norm + ) @place(DEVICES) @parameterize( (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), - [('test_bool_input', - (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( - np.bool_), None, (-2, -1), 'backward', RuntimeError), - ('test_n_nagative', - np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2), - (-2, -1), 'backward', ValueError), - ('test_n_zero', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), - (0, 0), (-2, -1), 'backward', ValueError), - ('test_n_type', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), - 3, None, 'backward', ValueError), - ('test_n_axis_dim', - np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (1, 2), - (-1), 'backward', ValueError), - ('test_axis_out_of_range', np.random.randn(4) + 1j * np.random.randn(4), - None, (1, 2), 'backward', ValueError), - ('test_axis_type', np.random.randn(4) + 1j * np.random.randn(4), None, -1, - 'backward', ValueError), - ('test_norm_not_in_enum_value', np.random.randn(4, 4) + - 1j * np.random.randn(4, 4), None, None, 'random', ValueError)]) + [ + ( + 'test_bool_input', + (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( + np.bool_ + ), + None, + (-2, -1), + 'backward', + RuntimeError, + ), + ( + 'test_n_nagative', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + (-1, -2), + (-2, -1), + 'backward', + ValueError, + ), + ( + 'test_n_zero', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + (0, 0), + (-2, -1), + 'backward', + ValueError, + ), + ( + 'test_n_type', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + 3, + None, + 'backward', + ValueError, + ), + ( + 'test_n_axis_dim', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + (1, 2), + (-1), + 'backward', + ValueError, + ), + ( + 'test_axis_out_of_range', + np.random.randn(4) + 1j * np.random.randn(4), + None, + (1, 2), + 'backward', + ValueError, + ), + ( + 'test_axis_type', + np.random.randn(4) + 1j * np.random.randn(4), + None, + -1, + 'backward', + ValueError, + ), + ( + 'test_norm_not_in_enum_value', + np.random.randn(4, 4) + 1j * np.random.randn(4, 4), + None, + None, + 'random', + ValueError, + ), + ], +) class TestHfft2Exception(unittest.TestCase): - def test_hfft2(self): """ Test hfft2 with buoudary condition @@ -574,34 +1010,82 @@ class TestHfft2Exception(unittest.TestCase): """ with paddle.fluid.dygraph.guard(self.place): with self.assertRaises(self.expect_exception): - paddle.fft.hfft2(paddle.to_tensor(self.x), self.n, self.axis, - self.norm) + paddle.fft.hfft2( + paddle.to_tensor(self.x), self.n, self.axis, self.norm + ) @place(DEVICES) @parameterize( (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), - [('test_n_nagative', - np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2), - (-2, -1), 'backward', ValueError), - ('test_zero_point', - np.random.randn(4, 4, 1) + 1j * np.random.randn(4, 4, 1), None, - (-2, -1), "backward", ValueError), - ('test_n_zero', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), - (0, 0), (-2, -1), 'backward', ValueError), - ('test_n_type', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), - 3, -1, 'backward', ValueError), - ('test_n_axis_dim', - np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (1, 2), - (-3, -2, -1), 'backward', ValueError), - ('test_axis_out_of_range', np.random.randn(4) + 1j * np.random.randn(4), - None, (1, 2), 'backward', ValueError), - ('test_axis_type', np.random.randn(4) + 1j * np.random.randn(4), None, 1, - 'backward', ValueError), - ('test_norm_not_in_enum_value', np.random.randn(4, 4) + - 1j * np.random.randn(4, 4), None, None, 'random', ValueError)]) + [ + ( + 'test_n_nagative', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + (-1, -2), + (-2, -1), + 'backward', + ValueError, + ), + ( + 'test_zero_point', + np.random.randn(4, 4, 1) + 1j * np.random.randn(4, 4, 1), + None, + (-2, -1), + "backward", + ValueError, + ), + ( + 'test_n_zero', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + (0, 0), + (-2, -1), + 'backward', + ValueError, + ), + ( + 'test_n_type', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + 3, + -1, + 'backward', + ValueError, + ), + ( + 'test_n_axis_dim', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + (1, 2), + (-3, -2, -1), + 'backward', + ValueError, + ), + ( + 'test_axis_out_of_range', + np.random.randn(4) + 1j * np.random.randn(4), + None, + (1, 2), + 'backward', + ValueError, + ), + ( + 'test_axis_type', + np.random.randn(4) + 1j * np.random.randn(4), + None, + 1, + 'backward', + ValueError, + ), + ( + 'test_norm_not_in_enum_value', + np.random.randn(4, 4) + 1j * np.random.randn(4, 4), + None, + None, + 'random', + ValueError, + ), + ], +) class TestIrfft2Exception(unittest.TestCase): - def test_irfft2(self): """ Test irfft2 with buoudary condition @@ -615,34 +1099,84 @@ class TestIrfft2Exception(unittest.TestCase): """ with paddle.fluid.dygraph.guard(self.place): with self.assertRaises(self.expect_exception): - paddle.fft.irfft2(paddle.to_tensor(self.x), self.n, self.axis, - self.norm) + paddle.fft.irfft2( + paddle.to_tensor(self.x), self.n, self.axis, self.norm + ) @place(DEVICES) @parameterize( (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), - [('test_bool_input', - (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( - np.bool_), None, (-2, -1), 'backward', RuntimeError), - ('test_n_nagative', - np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2), - (-2, -1), 'backward', ValueError), - ('test_n_zero', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), - (0, 0), (-2, -1), 'backward', ValueError), - ('test_n_type', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), - 3, -1, 'backward', ValueError), - ('test_n_axis_dim', - np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (1, 2), - (-3, -2, -1), 'backward', ValueError), - ('test_axis_out_of_range', np.random.randn(4) + 1j * np.random.randn(4), - None, (10, 20), 'backward', ValueError), - ('test_axis_type', np.random.randn(4) + 1j * np.random.randn(4), None, 1, - 'backward', ValueError), - ('test_norm_not_in_enum_value', np.random.randn(4, 4) + - 1j * np.random.randn(4, 4), None, None, 'random', ValueError)]) + [ + ( + 'test_bool_input', + (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( + np.bool_ + ), + None, + (-2, -1), + 'backward', + RuntimeError, + ), + ( + 'test_n_nagative', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + (-1, -2), + (-2, -1), + 'backward', + ValueError, + ), + ( + 'test_n_zero', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + (0, 0), + (-2, -1), + 'backward', + ValueError, + ), + ( + 'test_n_type', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + 3, + -1, + 'backward', + ValueError, + ), + ( + 'test_n_axis_dim', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + (1, 2), + (-3, -2, -1), + 'backward', + ValueError, + ), + ( + 'test_axis_out_of_range', + np.random.randn(4) + 1j * np.random.randn(4), + None, + (10, 20), + 'backward', + ValueError, + ), + ( + 'test_axis_type', + np.random.randn(4) + 1j * np.random.randn(4), + None, + 1, + 'backward', + ValueError, + ), + ( + 'test_norm_not_in_enum_value', + np.random.randn(4, 4) + 1j * np.random.randn(4, 4), + None, + None, + 'random', + ValueError, + ), + ], +) class TestHfftnException(unittest.TestCase): - def test_hfftn(self): """Test hfftn with buoudary condition Test case include: @@ -655,31 +1189,74 @@ class TestHfftnException(unittest.TestCase): """ with paddle.fluid.dygraph.guard(self.place): with self.assertRaises(self.expect_exception): - paddle.fft.hfftn(paddle.to_tensor(self.x), self.n, self.axis, - self.norm) + paddle.fft.hfftn( + paddle.to_tensor(self.x), self.n, self.axis, self.norm + ) @place(DEVICES) @parameterize( (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), - [('test_n_nagative', - np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2), - (-2, -1), 'backward', ValueError), - ('test_n_zero', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), - (0, 0), (-2, -1), 'backward', ValueError), - ('test_n_type', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), - 3, -1, 'backward', ValueError), - ('test_n_axis_dim', - np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (1, 2), - (-3, -2, -1), 'backward', ValueError), - ('test_axis_out_of_range', np.random.randn(4) + 1j * np.random.randn(4), - None, (10, 20), 'backward', ValueError), - ('test_axis_type', np.random.randn(4) + 1j * np.random.randn(4), None, 1, - 'backward', ValueError), - ('test_norm_not_in_enum_value', np.random.randn(4, 4) + - 1j * np.random.randn(4, 4), None, None, 'random', ValueError)]) + [ + ( + 'test_n_nagative', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + (-1, -2), + (-2, -1), + 'backward', + ValueError, + ), + ( + 'test_n_zero', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + (0, 0), + (-2, -1), + 'backward', + ValueError, + ), + ( + 'test_n_type', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + 3, + -1, + 'backward', + ValueError, + ), + ( + 'test_n_axis_dim', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + (1, 2), + (-3, -2, -1), + 'backward', + ValueError, + ), + ( + 'test_axis_out_of_range', + np.random.randn(4) + 1j * np.random.randn(4), + None, + (10, 20), + 'backward', + ValueError, + ), + ( + 'test_axis_type', + np.random.randn(4) + 1j * np.random.randn(4), + None, + 1, + 'backward', + ValueError, + ), + ( + 'test_norm_not_in_enum_value', + np.random.randn(4, 4) + 1j * np.random.randn(4, 4), + None, + None, + 'random', + ValueError, + ), + ], +) class TestIrfftnException(unittest.TestCase): - def test_irfftn(self): """Test irfftn with buoudary condition Test case include: @@ -691,46 +1268,75 @@ class TestIrfftnException(unittest.TestCase): """ with paddle.fluid.dygraph.guard(self.place): with self.assertRaises(self.expect_exception): - paddle.fft.irfftn(paddle.to_tensor(self.x), self.n, self.axis, - self.norm) + paddle.fft.irfftn( + paddle.to_tensor(self.x), self.n, self.axis, self.norm + ) @place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), - [('test_x_float64', rand_x(5, np.float64), None, -1, 'backward'), - ('test_n_grater_than_input_length', rand_x( - 5, max_dim_len=5), 11, -1, 'backward'), - ('test_n_smaller_than_input_length', rand_x( - 5, min_dim_len=5), 3, -1, 'backward'), - ('test_axis_not_last', rand_x(5), None, 3, 'backward'), - ('test_norm_forward', rand_x(5), None, 3, 'forward'), - ('test_norm_ortho', rand_x(5), None, 3, 'ortho')]) +@parameterize( + (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), + [ + ('test_x_float64', rand_x(5, np.float64), None, -1, 'backward'), + ( + 'test_n_grater_than_input_length', + rand_x(5, max_dim_len=5), + 11, + -1, + 'backward', + ), + ( + 'test_n_smaller_than_input_length', + rand_x(5, min_dim_len=5), + 3, + -1, + 'backward', + ), + ('test_axis_not_last', rand_x(5), None, 3, 'backward'), + ('test_norm_forward', rand_x(5), None, 3, 'forward'), + ('test_norm_ortho', rand_x(5), None, 3, 'ortho'), + ], +) class TestRfft(unittest.TestCase): - def test_rfft(self): - """Test rfft with norm condition - """ + """Test rfft with norm condition""" with paddle.fluid.dygraph.guard(self.place): - np.testing.assert_allclose(scipy.fft.rfft(self.x, self.n, self.axis, - self.norm), - paddle.fft.rfft(paddle.to_tensor(self.x), - self.n, self.axis, - self.norm), - rtol=RTOL.get(str(self.x.dtype)), - atol=ATOL.get(str(self.x.dtype))) + np.testing.assert_allclose( + scipy.fft.rfft(self.x, self.n, self.axis, self.norm), + paddle.fft.rfft( + paddle.to_tensor(self.x), self.n, self.axis, self.norm + ), + rtol=RTOL.get(str(self.x.dtype)), + atol=ATOL.get(str(self.x.dtype)), + ) @place(DEVICES) @parameterize( (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), - [('test_n_nagative', rand_x(2), -1, -1, 'backward', ValueError), - ('test_n_zero', rand_x(2), 0, -1, 'backward', ValueError), - ('test_axis_out_of_range', rand_x(1), None, 10, 'backward', ValueError), - ('test_axis_with_array', rand_x(1), None, (0, 1), 'backward', ValueError), - ('test_norm_not_in_enum_value', rand_x(2), None, -1, 'random', ValueError)] + [ + ('test_n_nagative', rand_x(2), -1, -1, 'backward', ValueError), + ('test_n_zero', rand_x(2), 0, -1, 'backward', ValueError), + ('test_axis_out_of_range', rand_x(1), None, 10, 'backward', ValueError), + ( + 'test_axis_with_array', + rand_x(1), + None, + (0, 1), + 'backward', + ValueError, + ), + ( + 'test_norm_not_in_enum_value', + rand_x(2), + None, + -1, + 'random', + ValueError, + ), + ], ) class TestRfftException(unittest.TestCase): - def test_rfft(self): """Test rfft with buoudary condition Test case include: @@ -741,51 +1347,93 @@ class TestRfftException(unittest.TestCase): - the dimensions of n and axis are different """ with self.assertRaises(self.expect_exception): - paddle.fft.rfft(paddle.to_tensor(self.x), self.n, self.axis, - self.norm) + paddle.fft.rfft( + paddle.to_tensor(self.x), self.n, self.axis, self.norm + ) @place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), [ - ('test_x_float64', rand_x(5), None, (0, 1), 'backward'), - ('test_n_grater_input_length', rand_x(5, max_dim_len=5), (6, 6), - (0, 1), 'backward'), - ('test_n_smaller_than_input_length', rand_x(5, min_dim_len=5), (4, 4), - (0, 1), 'backward'), - ('test_axis_random', rand_x(5), None, (1, 2), 'backward'), - ('test_axis_none', rand_x(5), None, None, 'backward'), - ('test_norm_forward', rand_x(5), None, (0, 1), 'forward'), - ('test_norm_ortho', rand_x(5), None, (0, 1), 'ortho'), -]) +@parameterize( + (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), + [ + ('test_x_float64', rand_x(5), None, (0, 1), 'backward'), + ( + 'test_n_grater_input_length', + rand_x(5, max_dim_len=5), + (6, 6), + (0, 1), + 'backward', + ), + ( + 'test_n_smaller_than_input_length', + rand_x(5, min_dim_len=5), + (4, 4), + (0, 1), + 'backward', + ), + ('test_axis_random', rand_x(5), None, (1, 2), 'backward'), + ('test_axis_none', rand_x(5), None, None, 'backward'), + ('test_norm_forward', rand_x(5), None, (0, 1), 'forward'), + ('test_norm_ortho', rand_x(5), None, (0, 1), 'ortho'), + ], +) class TestRfft2(unittest.TestCase): - def test_rfft2(self): - """Test rfft2 with norm condition - """ + """Test rfft2 with norm condition""" with paddle.fluid.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.rfft2(self.x, self.n, self.axis, self.norm), - paddle.fft.rfft2(paddle.to_tensor(self.x), self.n, self.axis, - self.norm), + paddle.fft.rfft2( + paddle.to_tensor(self.x), self.n, self.axis, self.norm + ), rtol=RTOL.get(str(self.x.dtype)), - atol=ATOL.get(str(self.x.dtype))) + atol=ATOL.get(str(self.x.dtype)), + ) @place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), [ - ('test_x_complex_input', rand_x(2, complex=True), None, - (0, 1), 'backward', RuntimeError), - ('test_x_1dim_tensor', rand_x(1), None, (0, 1), 'backward', ValueError), - ('test_n_nagative', rand_x(2), -1, (0, 1), 'backward', ValueError), - ('test_n_zero', rand_x(2), 0, (0, 1), 'backward', ValueError), - ('test_axis_out_of_range', rand_x(2), None, - (0, 1, 2), 'backward', ValueError), - ('test_axis_with_array', rand_x(1), None, (0, 1), 'backward', ValueError), - ('test_axis_not_sequence', rand_x(5), None, -10, 'backward', ValueError), - ('test_norm_not_enum', rand_x(2), None, -1, 'random', ValueError), -]) +@parameterize( + (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), + [ + ( + 'test_x_complex_input', + rand_x(2, complex=True), + None, + (0, 1), + 'backward', + RuntimeError, + ), + ('test_x_1dim_tensor', rand_x(1), None, (0, 1), 'backward', ValueError), + ('test_n_nagative', rand_x(2), -1, (0, 1), 'backward', ValueError), + ('test_n_zero', rand_x(2), 0, (0, 1), 'backward', ValueError), + ( + 'test_axis_out_of_range', + rand_x(2), + None, + (0, 1, 2), + 'backward', + ValueError, + ), + ( + 'test_axis_with_array', + rand_x(1), + None, + (0, 1), + 'backward', + ValueError, + ), + ( + 'test_axis_not_sequence', + rand_x(5), + None, + -10, + 'backward', + ValueError, + ), + ('test_norm_not_enum', rand_x(2), None, -1, 'random', ValueError), + ], +) class TestRfft2Exception(unittest.TestCase): - def test_rfft2(self): """Test rfft2 with buoudary condition Test case include: @@ -798,47 +1446,83 @@ class TestRfft2Exception(unittest.TestCase): """ with paddle.fluid.dygraph.guard(self.place): with self.assertRaises(self.expect_exception): - paddle.fft.rfft2(paddle.to_tensor(self.x), self.n, self.axis, - self.norm) + paddle.fft.rfft2( + paddle.to_tensor(self.x), self.n, self.axis, self.norm + ) @place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), [ - ('test_x_float64', rand_x(5, np.float64), None, None, 'backward'), - ('test_n_grater_input_length', rand_x(5, max_dim_len=5), (6, 6), - (1, 2), 'backward'), - ('test_n_smaller_input_length', rand_x(5, min_dim_len=5), (3, 3), - (1, 2), 'backward'), - ('test_axis_not_default', rand_x(5), None, (1, 2), 'backward'), - ('test_norm_forward', rand_x(5), None, None, 'forward'), - ('test_norm_ortho', rand_x(5), None, None, 'ortho'), -]) +@parameterize( + (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), + [ + ('test_x_float64', rand_x(5, np.float64), None, None, 'backward'), + ( + 'test_n_grater_input_length', + rand_x(5, max_dim_len=5), + (6, 6), + (1, 2), + 'backward', + ), + ( + 'test_n_smaller_input_length', + rand_x(5, min_dim_len=5), + (3, 3), + (1, 2), + 'backward', + ), + ('test_axis_not_default', rand_x(5), None, (1, 2), 'backward'), + ('test_norm_forward', rand_x(5), None, None, 'forward'), + ('test_norm_ortho', rand_x(5), None, None, 'ortho'), + ], +) class TestRfftn(unittest.TestCase): - def test_rfftn(self): - """Test rfftn with norm condition - """ + """Test rfftn with norm condition""" with paddle.fluid.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.rfftn(self.x, self.n, self.axis, self.norm), - paddle.fft.rfftn(paddle.to_tensor(self.x), self.n, self.axis, - self.norm), + paddle.fft.rfftn( + paddle.to_tensor(self.x), self.n, self.axis, self.norm + ), rtol=RTOL.get(str(self.x.dtype)), - atol=ATOL.get(str(self.x.dtype))) + atol=ATOL.get(str(self.x.dtype)), + ) @place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), [ - ('test_x_complex', rand_x( - 4, complex=True), None, None, 'backward', RuntimeError), - ('test_n_nagative', rand_x(4), (-1, -1), (1, 2), 'backward', ValueError), - ('test_n_not_sequence', rand_x(4), -1, None, 'backward', ValueError), - ('test_n_zero', rand_x(4), 0, None, 'backward', ValueError), - ('test_axis_out_of_range', rand_x(1), None, [0, 1], 'backward', ValueError), - ('test_norm_not_in_enum', rand_x(2), None, -1, 'random', ValueError) -]) +@parameterize( + (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), + [ + ( + 'test_x_complex', + rand_x(4, complex=True), + None, + None, + 'backward', + RuntimeError, + ), + ( + 'test_n_nagative', + rand_x(4), + (-1, -1), + (1, 2), + 'backward', + ValueError, + ), + ('test_n_not_sequence', rand_x(4), -1, None, 'backward', ValueError), + ('test_n_zero', rand_x(4), 0, None, 'backward', ValueError), + ( + 'test_axis_out_of_range', + rand_x(1), + None, + [0, 1], + 'backward', + ValueError, + ), + ('test_norm_not_in_enum', rand_x(2), None, -1, 'random', ValueError), + ], +) class TestRfftnException(unittest.TestCase): - def test_rfftn(self): """Test rfftn with buoudary condition Test case include: @@ -849,45 +1533,75 @@ class TestRfftnException(unittest.TestCase): """ with paddle.fluid.dygraph.guard(self.place): with self.assertRaises(self.expect_exception): - paddle.fft.rfftn(paddle.to_tensor(self.x), self.n, self.axis, - self.norm) + paddle.fft.rfftn( + paddle.to_tensor(self.x), self.n, self.axis, self.norm + ) @place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), - [('test_x_float64', rand_x(5, np.float64), None, -1, 'backward'), - ('test_n_grater_than_input_length', rand_x( - 5, max_dim_len=5), 11, -1, 'backward'), - ('test_n_smaller_than_input_length', rand_x( - 5, min_dim_len=5), 3, -1, 'backward'), - ('test_axis_not_last', rand_x(5), None, 3, 'backward'), - ('test_norm_forward', rand_x(5), None, 3, 'forward'), - ('test_norm_ortho', rand_x(5), None, 3, 'ortho')]) +@parameterize( + (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), + [ + ('test_x_float64', rand_x(5, np.float64), None, -1, 'backward'), + ( + 'test_n_grater_than_input_length', + rand_x(5, max_dim_len=5), + 11, + -1, + 'backward', + ), + ( + 'test_n_smaller_than_input_length', + rand_x(5, min_dim_len=5), + 3, + -1, + 'backward', + ), + ('test_axis_not_last', rand_x(5), None, 3, 'backward'), + ('test_norm_forward', rand_x(5), None, 3, 'forward'), + ('test_norm_ortho', rand_x(5), None, 3, 'ortho'), + ], +) class TestIhfft(unittest.TestCase): - def test_ihfft(self): - """Test ihfft with norm condition - """ + """Test ihfft with norm condition""" with paddle.fluid.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.ihfft(self.x, self.n, self.axis, self.norm), - paddle.fft.ihfft(paddle.to_tensor(self.x), self.n, self.axis, - self.norm), + paddle.fft.ihfft( + paddle.to_tensor(self.x), self.n, self.axis, self.norm + ), rtol=RTOL.get(str(self.x.dtype)), - atol=ATOL.get(str(self.x.dtype))) + atol=ATOL.get(str(self.x.dtype)), + ) @place(DEVICES) @parameterize( (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), - [('test_n_nagative', rand_x(2), -1, -1, 'backward', ValueError), - ('test_n_zero', rand_x(2), 0, -1, 'backward', ValueError), - ('test_axis_out_of_range', rand_x(1), None, 10, 'backward', ValueError), - ('test_axis_with_array', rand_x(1), None, (0, 1), 'backward', ValueError), - ('test_norm_not_in_enum_value', rand_x(2), None, -1, 'random', ValueError)] + [ + ('test_n_nagative', rand_x(2), -1, -1, 'backward', ValueError), + ('test_n_zero', rand_x(2), 0, -1, 'backward', ValueError), + ('test_axis_out_of_range', rand_x(1), None, 10, 'backward', ValueError), + ( + 'test_axis_with_array', + rand_x(1), + None, + (0, 1), + 'backward', + ValueError, + ), + ( + 'test_norm_not_in_enum_value', + rand_x(2), + None, + -1, + 'random', + ValueError, + ), + ], ) class TestIhfftException(unittest.TestCase): - def test_ihfft(self): """Test ihfft with buoudary condition Test case include: @@ -897,53 +1611,101 @@ class TestIhfftException(unittest.TestCase): """ with paddle.fluid.dygraph.guard(self.place): with self.assertRaises(self.expect_exception): - paddle.fft.ihfft(paddle.to_tensor(self.x), self.n, self.axis, - self.norm) + paddle.fft.ihfft( + paddle.to_tensor(self.x), self.n, self.axis, self.norm + ) @place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), [ - ('test_x_float64', rand_x(5), None, (0, 1), 'backward'), - ('test_n_grater_input_length', rand_x(5, max_dim_len=5), (11, 11), - (0, 1), 'backward'), - ('test_n_smaller_than_input_length', rand_x(5, min_dim_len=5), (1, 1), - (0, 1), 'backward'), - ('test_axis_random', rand_x(5), None, (1, 2), 'backward'), - ('test_axis_none', rand_x(5), None, None, 'backward'), - ('test_norm_forward', rand_x(5), None, (0, 1), 'forward'), - ('test_norm_ortho', rand_x(5), None, (0, 1), 'ortho'), -]) +@parameterize( + (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), + [ + ('test_x_float64', rand_x(5), None, (0, 1), 'backward'), + ( + 'test_n_grater_input_length', + rand_x(5, max_dim_len=5), + (11, 11), + (0, 1), + 'backward', + ), + ( + 'test_n_smaller_than_input_length', + rand_x(5, min_dim_len=5), + (1, 1), + (0, 1), + 'backward', + ), + ('test_axis_random', rand_x(5), None, (1, 2), 'backward'), + ('test_axis_none', rand_x(5), None, None, 'backward'), + ('test_norm_forward', rand_x(5), None, (0, 1), 'forward'), + ('test_norm_ortho', rand_x(5), None, (0, 1), 'ortho'), + ], +) class TestIhfft2(unittest.TestCase): - def test_ihfft2(self): - """Test ihfft2 with norm condition - """ + """Test ihfft2 with norm condition""" with paddle.fluid.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.ihfft2(self.x, self.n, self.axis, self.norm), - paddle.fft.ihfft2(paddle.to_tensor(self.x), self.n, self.axis, - self.norm), + paddle.fft.ihfft2( + paddle.to_tensor(self.x), self.n, self.axis, self.norm + ), rtol=RTOL.get(str(self.x.dtype)), - atol=ATOL.get(str(self.x.dtype))) + atol=ATOL.get(str(self.x.dtype)), + ) @place(DEVICES) @parameterize( (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), - [('test_x_complex_input', rand_x(2, complex=True), None, - (0, 1), None, ValueError), - ('test_x_1dim_tensor', rand_x(1), None, (0, 1), None, ValueError), - ('test_n_nagative', rand_x(2), -1, (0, 1), 'backward', ValueError), - ('test_n_len_not_equal_axis', rand_x(5, max_dim_len=5), 11, - (0, 1), 'backward', ValueError), - ('test_n_zero', rand_x(2), (0, 0), (0, 1), 'backward', ValueError), - ('test_axis_out_of_range', rand_x(2), None, - (0, 1, 2), 'backward', ValueError), - ('test_axis_with_array', rand_x(1), None, (0, 1), 'backward', ValueError), - ('test_axis_not_sequence', rand_x(5), None, -10, 'backward', ValueError), - ('test_norm_not_enum', rand_x(2), None, -1, 'random', ValueError)]) + [ + ( + 'test_x_complex_input', + rand_x(2, complex=True), + None, + (0, 1), + None, + ValueError, + ), + ('test_x_1dim_tensor', rand_x(1), None, (0, 1), None, ValueError), + ('test_n_nagative', rand_x(2), -1, (0, 1), 'backward', ValueError), + ( + 'test_n_len_not_equal_axis', + rand_x(5, max_dim_len=5), + 11, + (0, 1), + 'backward', + ValueError, + ), + ('test_n_zero', rand_x(2), (0, 0), (0, 1), 'backward', ValueError), + ( + 'test_axis_out_of_range', + rand_x(2), + None, + (0, 1, 2), + 'backward', + ValueError, + ), + ( + 'test_axis_with_array', + rand_x(1), + None, + (0, 1), + 'backward', + ValueError, + ), + ( + 'test_axis_not_sequence', + rand_x(5), + None, + -10, + 'backward', + ValueError, + ), + ('test_norm_not_enum', rand_x(2), None, -1, 'random', ValueError), + ], +) class TestIhfft2Exception(unittest.TestCase): - def test_ihfft2(self): """Test ihfft2 with buoudary condition Test case include: @@ -956,46 +1718,75 @@ class TestIhfft2Exception(unittest.TestCase): """ with paddle.fluid.dygraph.guard(self.place): with self.assertRaises(self.expect_exception): - paddle.fft.ihfft2(paddle.to_tensor(self.x), self.n, self.axis, - self.norm) + paddle.fft.ihfft2( + paddle.to_tensor(self.x), self.n, self.axis, self.norm + ) @place(DEVICES) @parameterize( (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), - [('test_x_float64', rand_x(5, np.float64), None, None, 'backward'), - ('test_n_grater_input_length', rand_x(5, max_dim_len=5), (11, 11), - (0, 1), 'backward'), - ('test_n_smaller_input_length', rand_x(5, min_dim_len=5), (1, 1), - (0, 1), 'backward'), - ('test_axis_not_default', rand_x(5), None, (1, 2), 'backward'), - ('test_norm_forward', rand_x(5), None, None, 'forward'), - ('test_norm_ortho', rand_x(5), None, None, 'ortho')]) + [ + ('test_x_float64', rand_x(5, np.float64), None, None, 'backward'), + ( + 'test_n_grater_input_length', + rand_x(5, max_dim_len=5), + (11, 11), + (0, 1), + 'backward', + ), + ( + 'test_n_smaller_input_length', + rand_x(5, min_dim_len=5), + (1, 1), + (0, 1), + 'backward', + ), + ('test_axis_not_default', rand_x(5), None, (1, 2), 'backward'), + ('test_norm_forward', rand_x(5), None, None, 'forward'), + ('test_norm_ortho', rand_x(5), None, None, 'ortho'), + ], +) class TestIhfftn(unittest.TestCase): - def test_ihfftn(self): - """Test ihfftn with norm condition - """ + """Test ihfftn with norm condition""" with paddle.fluid.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.ihfftn(self.x, self.n, self.axis, self.norm), - paddle.fft.ihfftn(paddle.to_tensor(self.x), self.n, self.axis, - self.norm), + paddle.fft.ihfftn( + paddle.to_tensor(self.x), self.n, self.axis, self.norm + ), rtol=RTOL.get(str(self.x.dtype)), - atol=ATOL.get(str(self.x.dtype))) + atol=ATOL.get(str(self.x.dtype)), + ) @place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), [ - ('test_x_complex', rand_x( - 4, complex=True), None, None, 'backward', RuntimeError), - ('test_n_nagative', rand_x(4), -1, None, 'backward', ValueError), - ('test_n_zero', rand_x(4), 0, None, 'backward', ValueError), - ('test_axis_out_of_range', rand_x(1), None, [0, 1], 'backward', ValueError), - ('test_norm_not_in_enum', rand_x(2), None, -1, 'random', ValueError) -]) +@parameterize( + (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), + [ + ( + 'test_x_complex', + rand_x(4, complex=True), + None, + None, + 'backward', + RuntimeError, + ), + ('test_n_nagative', rand_x(4), -1, None, 'backward', ValueError), + ('test_n_zero', rand_x(4), 0, None, 'backward', ValueError), + ( + 'test_axis_out_of_range', + rand_x(1), + None, + [0, 1], + 'backward', + ValueError, + ), + ('test_norm_not_in_enum', rand_x(2), None, -1, 'random', ValueError), + ], +) class TestIhfftnException(unittest.TestCase): - def test_ihfftn(self): """Test ihfftn with buoudary condition Test case include: @@ -1006,88 +1797,107 @@ class TestIhfftnException(unittest.TestCase): """ with paddle.fluid.dygraph.guard(self.place): with self.assertRaises(self.expect_exception): - paddle.fft.ihfftn(paddle.to_tensor(self.x), self.n, self.axis, - self.norm) + paddle.fft.ihfftn( + paddle.to_tensor(self.x), self.n, self.axis, self.norm + ) @place(DEVICES) -@parameterize((TEST_CASE_NAME, 'n', 'd', 'dtype'), [ - ('test_without_d', 20, 1, 'float32'), - ('test_with_d', 20, 0.5, 'float32'), -]) +@parameterize( + (TEST_CASE_NAME, 'n', 'd', 'dtype'), + [ + ('test_without_d', 20, 1, 'float32'), + ('test_with_d', 20, 0.5, 'float32'), + ], +) class TestFftFreq(unittest.TestCase): - def test_fftfreq(self): - """Test fftfreq with norm condition - """ + """Test fftfreq with norm condition""" with paddle.fluid.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.fftfreq(self.n, self.d).astype(self.dtype), paddle.fft.fftfreq(self.n, self.d, self.dtype).numpy(), rtol=RTOL.get(str(self.dtype)), - atol=ATOL.get(str(self.dtype))) + atol=ATOL.get(str(self.dtype)), + ) @place(DEVICES) -@parameterize((TEST_CASE_NAME, 'n', 'd', 'dtype'), [ - ('test_without_d', 20, 1, 'float32'), - ('test_with_d', 20, 0.5, 'float32'), -]) +@parameterize( + (TEST_CASE_NAME, 'n', 'd', 'dtype'), + [ + ('test_without_d', 20, 1, 'float32'), + ('test_with_d', 20, 0.5, 'float32'), + ], +) class TestRfftFreq(unittest.TestCase): - def test_rfftfreq(self): - """Test rfftfreq with norm condition - """ + """Test rfftfreq with norm condition""" with paddle.fluid.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.rfftfreq(self.n, self.d).astype(self.dtype), paddle.fft.rfftfreq(self.n, self.d, self.dtype).numpy(), rtol=RTOL.get(str(self.dtype)), - atol=ATOL.get(str(self.dtype))) + atol=ATOL.get(str(self.dtype)), + ) @place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 'axes', 'dtype'), [ - ('test_1d', np.random.randn(10), (0, ), 'float64'), - ('test_2d', np.random.randn(10, 10), (0, 1), 'float64'), - ('test_2d_with_all_axes', np.random.randn(10, 10), None, 'float64'), - ('test_2d_odd_with_all_axes', - np.random.randn(5, 5) + 1j * np.random.randn(5, 5), None, 'complex128'), -]) +@parameterize( + (TEST_CASE_NAME, 'x', 'axes', 'dtype'), + [ + ('test_1d', np.random.randn(10), (0,), 'float64'), + ('test_2d', np.random.randn(10, 10), (0, 1), 'float64'), + ('test_2d_with_all_axes', np.random.randn(10, 10), None, 'float64'), + ( + 'test_2d_odd_with_all_axes', + np.random.randn(5, 5) + 1j * np.random.randn(5, 5), + None, + 'complex128', + ), + ], +) class TestFftShift(unittest.TestCase): - def test_fftshift(self): - """Test fftshift with norm condition - """ + """Test fftshift with norm condition""" with paddle.fluid.dygraph.guard(self.place): - np.testing.assert_allclose(scipy.fft.fftshift(self.x, self.axes), - paddle.fft.fftshift( - paddle.to_tensor(self.x), - self.axes).numpy(), - rtol=RTOL.get(str(self.x.dtype)), - atol=ATOL.get(str(self.x.dtype))) + np.testing.assert_allclose( + scipy.fft.fftshift(self.x, self.axes), + paddle.fft.fftshift( + paddle.to_tensor(self.x), self.axes + ).numpy(), + rtol=RTOL.get(str(self.x.dtype)), + atol=ATOL.get(str(self.x.dtype)), + ) @place(DEVICES) @parameterize( (TEST_CASE_NAME, 'x', 'axes'), - [('test_1d', np.random.randn(10), (0, ), 'float64'), - ('test_2d', np.random.randn(10, 10), (0, 1), 'float64'), - ('test_2d_with_all_axes', np.random.randn(10, 10), None, 'float64'), - ('test_2d_odd_with_all_axes', - np.random.randn(5, 5) + 1j * np.random.randn(5, 5), None, 'complex128')]) + [ + ('test_1d', np.random.randn(10), (0,), 'float64'), + ('test_2d', np.random.randn(10, 10), (0, 1), 'float64'), + ('test_2d_with_all_axes', np.random.randn(10, 10), None, 'float64'), + ( + 'test_2d_odd_with_all_axes', + np.random.randn(5, 5) + 1j * np.random.randn(5, 5), + None, + 'complex128', + ), + ], +) class TestIfftShift(unittest.TestCase): - def test_ifftshift(self): - """Test ifftshift with norm condition - """ + """Test ifftshift with norm condition""" with paddle.fluid.dygraph.guard(self.place): - np.testing.assert_allclose(scipy.fft.ifftshift(self.x, self.axes), - paddle.fft.ifftshift( - paddle.to_tensor(self.x), - self.axes).numpy(), - rtol=RTOL.get(str(self.x.dtype)), - atol=ATOL.get(str(self.x.dtype))) + np.testing.assert_allclose( + scipy.fft.ifftshift(self.x, self.axes), + paddle.fft.ifftshift( + paddle.to_tensor(self.x), self.axes + ).numpy(), + rtol=RTOL.get(str(self.x.dtype)), + atol=ATOL.get(str(self.x.dtype)), + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/fft/test_fft_with_static_graph.py b/python/paddle/fluid/tests/unittests/fft/test_fft_with_static_graph.py index 7b286c5086c7352a9a8863e9125d80b627625a58..35828ed160664c627ec072a6f5c6720b9eac07ea 100644 --- a/python/paddle/fluid/tests/unittests/fft/test_fft_with_static_graph.py +++ b/python/paddle/fluid/tests/unittests/fft/test_fft_with_static_graph.py @@ -19,8 +19,15 @@ import numpy as np import paddle import scipy.fft -from test_fft import (ATOL, DEVICES, RTOL, TEST_CASE_NAME, parameterize, place, - rand_x) +from test_fft import ( + ATOL, + DEVICES, + RTOL, + TEST_CASE_NAME, + parameterize, + place, + rand_x, +) @contextlib.contextmanager @@ -40,70 +47,126 @@ def stgraph(func, place, x, n, axes, norm): @place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), - [('test_x_float64', rand_x(5, np.float64), None, -1, 'backward'), - ('test_x_complex64', rand_x(5, np.float64, - complex=True), None, -1, 'backward'), - ('test_n_grater_than_input_length', rand_x( - 5, max_dim_len=5), 11, -1, 'backward'), - ('test_n_smaller_than_input_length', rand_x( - 5, min_dim_len=5), 3, -1, 'backward'), - ('test_axis_not_last', rand_x(5), None, 3, 'backward'), - ('test_norm_forward', rand_x(5), None, 3, 'forward'), - ('test_norm_ortho', rand_x(5), None, 3, 'ortho')]) +@parameterize( + (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), + [ + ('test_x_float64', rand_x(5, np.float64), None, -1, 'backward'), + ( + 'test_x_complex64', + rand_x(5, np.float64, complex=True), + None, + -1, + 'backward', + ), + ( + 'test_n_grater_than_input_length', + rand_x(5, max_dim_len=5), + 11, + -1, + 'backward', + ), + ( + 'test_n_smaller_than_input_length', + rand_x(5, min_dim_len=5), + 3, + -1, + 'backward', + ), + ('test_axis_not_last', rand_x(5), None, 3, 'backward'), + ('test_norm_forward', rand_x(5), None, 3, 'forward'), + ('test_norm_ortho', rand_x(5), None, 3, 'ortho'), + ], +) class TestFft(unittest.TestCase): - def test_static_rfft(self): - with stgraph(paddle.fft.fft, self.place, self.x, self.n, self.axis, - self.norm) as y: - np.testing.assert_allclose(scipy.fft.fft(self.x, self.n, self.axis, - self.norm), - y, - rtol=RTOL.get(str(self.x.dtype)), - atol=ATOL.get(str(self.x.dtype))) + with stgraph( + paddle.fft.fft, self.place, self.x, self.n, self.axis, self.norm + ) as y: + np.testing.assert_allclose( + scipy.fft.fft(self.x, self.n, self.axis, self.norm), + y, + rtol=RTOL.get(str(self.x.dtype)), + atol=ATOL.get(str(self.x.dtype)), + ) @place(DEVICES) @parameterize( (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), - [('test_n_nagative', rand_x(2), -1, -1, 'backward', ValueError), - ('test_n_zero', rand_x(2), 0, -1, 'backward', ValueError), - ('test_axis_out_of_range', rand_x(1), None, 10, 'backward', ValueError), - ('test_axis_with_array', rand_x(1), None, (0, 1), 'backward', ValueError), - ('test_norm_not_in_enum_value', rand_x(2), None, -1, 'random', ValueError)] + [ + ('test_n_nagative', rand_x(2), -1, -1, 'backward', ValueError), + ('test_n_zero', rand_x(2), 0, -1, 'backward', ValueError), + ('test_axis_out_of_range', rand_x(1), None, 10, 'backward', ValueError), + ( + 'test_axis_with_array', + rand_x(1), + None, + (0, 1), + 'backward', + ValueError, + ), + ( + 'test_norm_not_in_enum_value', + rand_x(2), + None, + -1, + 'random', + ValueError, + ), + ], ) class TestFftException(unittest.TestCase): - def test_fft(self): with self.assertRaises(self.expect_exception): - with stgraph(paddle.fft.fft, self.place, self.x, self.n, self.axis, - self.norm) as y: + with stgraph( + paddle.fft.fft, self.place, self.x, self.n, self.axis, self.norm + ) as y: pass @place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), [ - ('test_x_float64', rand_x(5), None, (0, 1), 'backward'), - ('test_x_complex128', rand_x(5, complex=True), None, (0, 1), 'backward'), - ('test_n_grater_input_length', rand_x(5, max_dim_len=5), (6, 6), - (0, 1), 'backward'), - ('test_n_smaller_than_input_length', rand_x(5, min_dim_len=5), (4, 4), - (0, 1), 'backward'), - ('test_axis_random', rand_x(5), None, (1, 2), 'backward'), - ('test_axis_none', rand_x(5), None, None, 'backward'), - ('test_norm_forward', rand_x(5), None, (0, 1), 'forward'), - ('test_norm_ortho', rand_x(5), None, (0, 1), 'ortho'), -]) +@parameterize( + (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), + [ + ('test_x_float64', rand_x(5), None, (0, 1), 'backward'), + ( + 'test_x_complex128', + rand_x(5, complex=True), + None, + (0, 1), + 'backward', + ), + ( + 'test_n_grater_input_length', + rand_x(5, max_dim_len=5), + (6, 6), + (0, 1), + 'backward', + ), + ( + 'test_n_smaller_than_input_length', + rand_x(5, min_dim_len=5), + (4, 4), + (0, 1), + 'backward', + ), + ('test_axis_random', rand_x(5), None, (1, 2), 'backward'), + ('test_axis_none', rand_x(5), None, None, 'backward'), + ('test_norm_forward', rand_x(5), None, (0, 1), 'forward'), + ('test_norm_ortho', rand_x(5), None, (0, 1), 'ortho'), + ], +) class TestFft2(unittest.TestCase): - def test_static_fft2(self): - with stgraph(paddle.fft.fft2, self.place, self.x, self.n, self.axis, - self.norm) as y: - np.testing.assert_allclose(scipy.fft.fft2(self.x, self.n, self.axis, - self.norm), - y, - rtol=RTOL.get(str(self.x.dtype)), - atol=ATOL.get(str(self.x.dtype))) + with stgraph( + paddle.fft.fft2, self.place, self.x, self.n, self.axis, self.norm + ) as y: + np.testing.assert_allclose( + scipy.fft.fft2(self.x, self.n, self.axis, self.norm), + y, + rtol=RTOL.get(str(self.x.dtype)), + atol=ATOL.get(str(self.x.dtype)), + ) @place(DEVICES) @@ -114,274 +177,593 @@ class TestFft2(unittest.TestCase): ('test_x_1dim_tensor', rand_x(1), None, (0, 1), 'backward', ValueError), ('test_n_nagative', rand_x(2), -1, (0, 1), 'backward', ValueError), ('test_n_zero', rand_x(2), 0, (0, 1), 'backward', ValueError), - ('test_axis_out_of_range', rand_x(2), None, - (0, 1, 2), 'backward', ValueError), - ('test_axis_with_array', rand_x(1), None, - (0, 1), 'backward', ValueError), - ('test_axis_not_sequence', rand_x(5), None, -10, 'backward', - ValueError), - ('test_norm_not_enum', rand_x(2), None, -1, 'random', ValueError) - ]) + ( + 'test_axis_out_of_range', + rand_x(2), + None, + (0, 1, 2), + 'backward', + ValueError, + ), + ( + 'test_axis_with_array', + rand_x(1), + None, + (0, 1), + 'backward', + ValueError, + ), + ( + 'test_axis_not_sequence', + rand_x(5), + None, + -10, + 'backward', + ValueError, + ), + ('test_norm_not_enum', rand_x(2), None, -1, 'random', ValueError), + ], +) class TestFft2Exception(unittest.TestCase): - def test_static_fft2(self): with self.assertRaises(self.expect_exception): - with stgraph(paddle.fft.fft2, self.place, self.x, self.n, self.axis, - self.norm) as y: + with stgraph( + paddle.fft.fft2, + self.place, + self.x, + self.n, + self.axis, + self.norm, + ) as y: pass @place(DEVICES) @parameterize( (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), - [('test_x_float64', rand_x(5, np.float64), None, None, 'backward'), - ('test_x_complex128', rand_x(5, np.float64, - complex=True), None, None, 'backward'), - ('test_n_grater_input_length', rand_x(5, max_dim_len=5), (6, 6), - (1, 2), 'backward'), - ('test_n_smaller_input_length', rand_x(5, min_dim_len=5), (3, 3), - (1, 2), 'backward'), - ('test_axis_not_default', rand_x(5), None, (1, 2), 'backward'), - ('test_norm_forward', rand_x(5), None, None, 'forward'), - ('test_norm_ortho', rand_x(5), None, None, 'ortho')]) + [ + ('test_x_float64', rand_x(5, np.float64), None, None, 'backward'), + ( + 'test_x_complex128', + rand_x(5, np.float64, complex=True), + None, + None, + 'backward', + ), + ( + 'test_n_grater_input_length', + rand_x(5, max_dim_len=5), + (6, 6), + (1, 2), + 'backward', + ), + ( + 'test_n_smaller_input_length', + rand_x(5, min_dim_len=5), + (3, 3), + (1, 2), + 'backward', + ), + ('test_axis_not_default', rand_x(5), None, (1, 2), 'backward'), + ('test_norm_forward', rand_x(5), None, None, 'forward'), + ('test_norm_ortho', rand_x(5), None, None, 'ortho'), + ], +) class TestFftn(unittest.TestCase): - def test_static_fftn(self): - with stgraph(paddle.fft.fftn, self.place, self.x, self.n, self.axis, - self.norm) as y: - np.testing.assert_allclose(scipy.fft.fftn(self.x, self.n, self.axis, - self.norm), - y, - rtol=RTOL.get(str(self.x.dtype)), - atol=ATOL.get(str(self.x.dtype))) - - -@place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), [ - ('test_x_complex', rand_x(4, - complex=True), None, None, 'backward', TypeError), - ('test_n_nagative', rand_x(4), (-1, -1), (1, 2), 'backward', ValueError), - ('test_n_not_sequence', rand_x(4), -1, None, 'backward', ValueError), - ('test_n_zero', rand_x(4), 0, None, 'backward', ValueError), - ('test_axis_out_of_range', rand_x(1), None, [0, 1], 'backward', ValueError), - ('test_norm_not_in_enum', rand_x(2), None, -1, 'random', ValueError) -]) -class TestRfftnException(unittest.TestCase): + with stgraph( + paddle.fft.fftn, self.place, self.x, self.n, self.axis, self.norm + ) as y: + np.testing.assert_allclose( + scipy.fft.fftn(self.x, self.n, self.axis, self.norm), + y, + rtol=RTOL.get(str(self.x.dtype)), + atol=ATOL.get(str(self.x.dtype)), + ) + +@place(DEVICES) +@parameterize( + (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), + [ + ( + 'test_x_complex', + rand_x(4, complex=True), + None, + None, + 'backward', + TypeError, + ), + ( + 'test_n_nagative', + rand_x(4), + (-1, -1), + (1, 2), + 'backward', + ValueError, + ), + ('test_n_not_sequence', rand_x(4), -1, None, 'backward', ValueError), + ('test_n_zero', rand_x(4), 0, None, 'backward', ValueError), + ( + 'test_axis_out_of_range', + rand_x(1), + None, + [0, 1], + 'backward', + ValueError, + ), + ('test_norm_not_in_enum', rand_x(2), None, -1, 'random', ValueError), + ], +) +class TestRfftnException(unittest.TestCase): def test_static_rfftn(self): with self.assertRaises(self.expect_exception): - with stgraph(paddle.fft.rfftn, self.place, self.x, self.n, - self.axis, self.norm) as y: + with stgraph( + paddle.fft.rfftn, + self.place, + self.x, + self.n, + self.axis, + self.norm, + ) as y: pass @place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), [ - ('test_x_complex128', - (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( - np.complex128), None, -1, "backward"), - ('test_n_grater_than_input_length', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), 4, -1, "backward"), - ('test_n_smaller_than_input_length', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), 2, -1, "backward"), - ('test_axis_not_last', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), None, 1, "backward"), - ('test_norm_forward', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), None, 1, "forward"), - ('test_norm_ortho', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), None, -1, "ortho"), -]) +@parameterize( + (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), + [ + ( + 'test_x_complex128', + (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( + np.complex128 + ), + None, + -1, + "backward", + ), + ( + 'test_n_grater_than_input_length', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + 4, + -1, + "backward", + ), + ( + 'test_n_smaller_than_input_length', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + 2, + -1, + "backward", + ), + ( + 'test_axis_not_last', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + 1, + "backward", + ), + ( + 'test_norm_forward', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + 1, + "forward", + ), + ( + 'test_norm_ortho', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + -1, + "ortho", + ), + ], +) class TestHfft(unittest.TestCase): - """Test hfft with norm condition - """ + """Test hfft with norm condition""" def test_hfft(self): - with stgraph(paddle.fft.hfft, self.place, self.x, self.n, self.axis, - self.norm) as y: - np.testing.assert_allclose(scipy.fft.hfft(self.x, self.n, self.axis, - self.norm), - y, - rtol=1e-5, - atol=0) - - -@place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), [ - ('test_x_complex128', - (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( - np.complex128), None, -1, "backward"), - ('test_n_grater_than_input_length', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), 4, -1, "backward"), - ('test_n_smaller_than_input_length', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), 2, -1, "backward"), - ('test_axis_not_last', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), None, -1, "backward"), - ('test_norm_forward', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), None, -1, "forward"), - ('test_norm_ortho', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), None, -1, "ortho"), -]) + with stgraph( + paddle.fft.hfft, self.place, self.x, self.n, self.axis, self.norm + ) as y: + np.testing.assert_allclose( + scipy.fft.hfft(self.x, self.n, self.axis, self.norm), + y, + rtol=1e-5, + atol=0, + ) + + +@place(DEVICES) +@parameterize( + (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), + [ + ( + 'test_x_complex128', + (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( + np.complex128 + ), + None, + -1, + "backward", + ), + ( + 'test_n_grater_than_input_length', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + 4, + -1, + "backward", + ), + ( + 'test_n_smaller_than_input_length', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + 2, + -1, + "backward", + ), + ( + 'test_axis_not_last', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + -1, + "backward", + ), + ( + 'test_norm_forward', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + -1, + "forward", + ), + ( + 'test_norm_ortho', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + -1, + "ortho", + ), + ], +) class TestIrfft(unittest.TestCase): - """Test irfft with norm condition - """ + """Test irfft with norm condition""" def test_irfft(self): - with stgraph(paddle.fft.irfft, self.place, self.x, self.n, self.axis, - self.norm) as y: - np.testing.assert_allclose(scipy.fft.irfft(self.x, self.n, - self.axis, self.norm), - y, - rtol=1e-5, - atol=0) - - -@place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), [ - ('test_x_complex128', - (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( - np.complex128), None, None, "backward"), - ('test_n_grater_than_input_length', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), [4], None, "backward"), - ('test_n_smaller_than_input_length', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), [2], None, "backward"), - ('test_axis_not_last', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), None, None, "backward"), - ('test_norm_forward', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), None, None, "forward"), - ('test_norm_ortho', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), None, None, "ortho"), -]) + with stgraph( + paddle.fft.irfft, self.place, self.x, self.n, self.axis, self.norm + ) as y: + np.testing.assert_allclose( + scipy.fft.irfft(self.x, self.n, self.axis, self.norm), + y, + rtol=1e-5, + atol=0, + ) + + +@place(DEVICES) +@parameterize( + (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), + [ + ( + 'test_x_complex128', + (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( + np.complex128 + ), + None, + None, + "backward", + ), + ( + 'test_n_grater_than_input_length', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + [4], + None, + "backward", + ), + ( + 'test_n_smaller_than_input_length', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + [2], + None, + "backward", + ), + ( + 'test_axis_not_last', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + None, + "backward", + ), + ( + 'test_norm_forward', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + None, + "forward", + ), + ( + 'test_norm_ortho', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + None, + "ortho", + ), + ], +) class Testirfftn(unittest.TestCase): - """Test irfftn with norm condition - """ + """Test irfftn with norm condition""" def test_static_irfftn(self): - with stgraph(paddle.fft.irfftn, self.place, self.x, self.n, self.axis, - self.norm) as y: - np.testing.assert_allclose(scipy.fft.irfftn(self.x, self.n, - self.axis, self.norm), - y, - rtol=1e-5, - atol=0) - - -@place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), [ - ('test_x_complex128', - (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( - np.complex128), None, None, "backward"), - ('test_n_grater_than_input_length', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), [4], None, "backward"), - ('test_n_smaller_than_input_length', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), [2], None, "backward"), - ('test_axis_not_last', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), None, None, "backward"), - ('test_norm_forward', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), None, None, "forward"), - ('test_norm_ortho', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), None, None, "ortho"), -]) + with stgraph( + paddle.fft.irfftn, self.place, self.x, self.n, self.axis, self.norm + ) as y: + np.testing.assert_allclose( + scipy.fft.irfftn(self.x, self.n, self.axis, self.norm), + y, + rtol=1e-5, + atol=0, + ) + + +@place(DEVICES) +@parameterize( + (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), + [ + ( + 'test_x_complex128', + (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( + np.complex128 + ), + None, + None, + "backward", + ), + ( + 'test_n_grater_than_input_length', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + [4], + None, + "backward", + ), + ( + 'test_n_smaller_than_input_length', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + [2], + None, + "backward", + ), + ( + 'test_axis_not_last', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + None, + "backward", + ), + ( + 'test_norm_forward', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + None, + "forward", + ), + ( + 'test_norm_ortho', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + None, + "ortho", + ), + ], +) class Testhfftn(unittest.TestCase): - """Test hfftn with norm condition - """ + """Test hfftn with norm condition""" def test_static_hfftn(self): - with stgraph(paddle.fft.hfftn, self.place, self.x, self.n, self.axis, - self.norm) as y: - np.testing.assert_allclose(scipy.fft.hfftn(self.x, self.n, - self.axis, self.norm), - y, - rtol=1e-5, - atol=0) - - -@place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 's', 'axis', 'norm'), [ - ('test_x_complex128', - (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( - np.complex128), None, (-2, -1), "backward"), - ('test_n_grater_input_length', - np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), [4, 8], - (-2, -1), "backward"), - ('test_n_smaller_input_length', - np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), [2, 4], - (-2, -1), "backward"), - ('test_axis_not_last', - np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), None, - (-2, -1), "backward"), - ('test_norm_forward', - np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), None, - (-2, -1), "forward"), - ('test_norm_ortho', - np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), None, - (-2, -1), "ortho"), -]) + with stgraph( + paddle.fft.hfftn, self.place, self.x, self.n, self.axis, self.norm + ) as y: + np.testing.assert_allclose( + scipy.fft.hfftn(self.x, self.n, self.axis, self.norm), + y, + rtol=1e-5, + atol=0, + ) + + +@place(DEVICES) +@parameterize( + (TEST_CASE_NAME, 'x', 's', 'axis', 'norm'), + [ + ( + 'test_x_complex128', + (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( + np.complex128 + ), + None, + (-2, -1), + "backward", + ), + ( + 'test_n_grater_input_length', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + [4, 8], + (-2, -1), + "backward", + ), + ( + 'test_n_smaller_input_length', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + [2, 4], + (-2, -1), + "backward", + ), + ( + 'test_axis_not_last', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + (-2, -1), + "backward", + ), + ( + 'test_norm_forward', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + (-2, -1), + "forward", + ), + ( + 'test_norm_ortho', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + (-2, -1), + "ortho", + ), + ], +) class Testhfft2(unittest.TestCase): - """Test hfft2 with norm condition - """ + """Test hfft2 with norm condition""" def test_static_hfft2(self): - with stgraph(paddle.fft.hfft2, self.place, self.x, self.s, self.axis, - self.norm) as y: - np.testing.assert_allclose(scipy.fft.hfft2(self.x, self.s, - self.axis, self.norm), - y, - rtol=1e-5, - atol=0) - - -@place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 's', 'axis', 'norm'), [ - ('test_x_complex128', - (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( - np.complex128), None, (-2, -1), "backward"), - ('test_n_equal_input_length', - np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (2, 4), - (-2, -1), "backward"), - ('test_axis_not_last', - np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), None, - (-2, -1), "backward"), - ('test_norm_forward', - np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), None, - (-2, -1), "forward"), - ('test_norm_ortho', - np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), None, - (-2, -1), "ortho"), -]) + with stgraph( + paddle.fft.hfft2, self.place, self.x, self.s, self.axis, self.norm + ) as y: + np.testing.assert_allclose( + scipy.fft.hfft2(self.x, self.s, self.axis, self.norm), + y, + rtol=1e-5, + atol=0, + ) + + +@place(DEVICES) +@parameterize( + (TEST_CASE_NAME, 'x', 's', 'axis', 'norm'), + [ + ( + 'test_x_complex128', + (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( + np.complex128 + ), + None, + (-2, -1), + "backward", + ), + ( + 'test_n_equal_input_length', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + (2, 4), + (-2, -1), + "backward", + ), + ( + 'test_axis_not_last', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + (-2, -1), + "backward", + ), + ( + 'test_norm_forward', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + (-2, -1), + "forward", + ), + ( + 'test_norm_ortho', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + None, + (-2, -1), + "ortho", + ), + ], +) class TestIrfft2(unittest.TestCase): - """Test irfft2 with norm condition - """ + """Test irfft2 with norm condition""" def test_static_irfft2(self): - with stgraph(paddle.fft.irfft2, self.place, self.x, self.s, self.axis, - self.norm) as y: - np.testing.assert_allclose(scipy.fft.irfft2(self.x, self.s, - self.axis, self.norm), - y, - rtol=1e-5, - atol=0) - - -@place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), [ - ('test_input_dtype', np.random.randn(4, 4, - 4), None, -1, 'backward', TypeError), - ('test_bool_input', - (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( - np.bool_), None, -1, 'backward', TypeError), - ('test_n_nagative', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), -1, -1, 'backward', ValueError), - ('test_n_zero', np.random.randn(4, 4) + 1j * np.random.randn(4, 4), 0, -1, - 'backward', ValueError), - ('test_n_type', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), - (1, 2, 3), -1, 'backward', ValueError), - ('test_axis_out_of_range', np.random.randn(4) + 1j * np.random.randn(4), - None, 10, 'backward', ValueError), - ('test_axis_with_array', np.random.randn(4) + 1j * np.random.randn(4), None, - (0, 1), 'backward', ValueError), - ('test_norm_not_in_enum_value', np.random.randn(4, 4) + - 1j * np.random.randn(4, 4), None, -1, 'random', ValueError) -]) + with stgraph( + paddle.fft.irfft2, self.place, self.x, self.s, self.axis, self.norm + ) as y: + np.testing.assert_allclose( + scipy.fft.irfft2(self.x, self.s, self.axis, self.norm), + y, + rtol=1e-5, + atol=0, + ) + + +@place(DEVICES) +@parameterize( + (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), + [ + ( + 'test_input_dtype', + np.random.randn(4, 4, 4), + None, + -1, + 'backward', + TypeError, + ), + ( + 'test_bool_input', + (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( + np.bool_ + ), + None, + -1, + 'backward', + TypeError, + ), + ( + 'test_n_nagative', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + -1, + -1, + 'backward', + ValueError, + ), + ( + 'test_n_zero', + np.random.randn(4, 4) + 1j * np.random.randn(4, 4), + 0, + -1, + 'backward', + ValueError, + ), + ( + 'test_n_type', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + (1, 2, 3), + -1, + 'backward', + ValueError, + ), + ( + 'test_axis_out_of_range', + np.random.randn(4) + 1j * np.random.randn(4), + None, + 10, + 'backward', + ValueError, + ), + ( + 'test_axis_with_array', + np.random.randn(4) + 1j * np.random.randn(4), + None, + (0, 1), + 'backward', + ValueError, + ), + ( + 'test_norm_not_in_enum_value', + np.random.randn(4, 4) + 1j * np.random.randn(4, 4), + None, + -1, + 'random', + ValueError, + ), + ], +) class TestHfftException(unittest.TestCase): '''Test hfft with buoudary condition Test case include: @@ -393,31 +775,89 @@ class TestHfftException(unittest.TestCase): def test_static_hfft(self): with self.assertRaises(self.expect_exception): - with stgraph(paddle.fft.hfft, self.place, self.x, self.n, self.axis, - self.norm) as y: + with stgraph( + paddle.fft.hfft, + self.place, + self.x, + self.n, + self.axis, + self.norm, + ) as y: pass @place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), [ - ('test_input_dtype', np.random.randn(4, 4, - 4), None, -1, 'backward', TypeError), - ('test_bool_input', - (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( - np.bool_), None, -1, 'backward', TypeError), - ('test_n_nagative', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), -1, -1, 'backward', ValueError), - ('test_n_zero', np.random.randn(4, 4) + 1j * np.random.randn(4, 4), 0, -1, - 'backward', ValueError), - ('test_n_type', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), - (1, 2), -1, 'backward', ValueError), - ('test_axis_out_of_range', np.random.randn(4) + 1j * np.random.randn(4), - None, 10, 'backward', ValueError), - ('test_axis_with_array', np.random.randn(4) + 1j * np.random.randn(4), None, - (0, 1), 'backward', ValueError), - ('test_norm_not_in_enum_value', np.random.randn(4, 4) + - 1j * np.random.randn(4, 4), None, None, 'random', ValueError) -]) +@parameterize( + (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), + [ + ( + 'test_input_dtype', + np.random.randn(4, 4, 4), + None, + -1, + 'backward', + TypeError, + ), + ( + 'test_bool_input', + (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( + np.bool_ + ), + None, + -1, + 'backward', + TypeError, + ), + ( + 'test_n_nagative', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + -1, + -1, + 'backward', + ValueError, + ), + ( + 'test_n_zero', + np.random.randn(4, 4) + 1j * np.random.randn(4, 4), + 0, + -1, + 'backward', + ValueError, + ), + ( + 'test_n_type', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + (1, 2), + -1, + 'backward', + ValueError, + ), + ( + 'test_axis_out_of_range', + np.random.randn(4) + 1j * np.random.randn(4), + None, + 10, + 'backward', + ValueError, + ), + ( + 'test_axis_with_array', + np.random.randn(4) + 1j * np.random.randn(4), + None, + (0, 1), + 'backward', + ValueError, + ), + ( + 'test_norm_not_in_enum_value', + np.random.randn(4, 4) + 1j * np.random.randn(4, 4), + None, + None, + 'random', + ValueError, + ), + ], +) class TestIrfftException(unittest.TestCase): '''Test Irfft with buoudary condition Test case include: @@ -430,35 +870,97 @@ class TestIrfftException(unittest.TestCase): def test_static_irfft(self): with self.assertRaises(self.expect_exception): - with stgraph(paddle.fft.irfft, self.place, self.x, self.n, - self.axis, self.norm) as y: + with stgraph( + paddle.fft.irfft, + self.place, + self.x, + self.n, + self.axis, + self.norm, + ) as y: pass @place(DEVICES) @parameterize( (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), - [('test_input_dtype', np.random.randn( - 4, 4, 4), None, None, 'backward', TypeError), - ('test_bool_input', - (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( - np.bool_), None, (-2, -1), 'backward', TypeError), - ('test_n_nagative', - np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2), - (-2, -1), 'backward', ValueError), - ('test_n_zero', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), - (0, 0), (-2, -1), 'backward', ValueError), - ('test_n_type', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), - 3, None, 'backward', ValueError), - ('test_n_axis_dim', - np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (1, 2), - (-1), 'backward', ValueError), - ('test_axis_out_of_range', np.random.randn(4) + 1j * np.random.randn(4), - None, (1, 2), 'backward', ValueError), - ('test_axis_type', np.random.randn(4) + 1j * np.random.randn(4), None, -1, - 'backward', ValueError), - ('test_norm_not_in_enum_value', np.random.randn(4, 4) + - 1j * np.random.randn(4, 4), None, None, 'random', ValueError)]) + [ + ( + 'test_input_dtype', + np.random.randn(4, 4, 4), + None, + None, + 'backward', + TypeError, + ), + ( + 'test_bool_input', + (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( + np.bool_ + ), + None, + (-2, -1), + 'backward', + TypeError, + ), + ( + 'test_n_nagative', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + (-1, -2), + (-2, -1), + 'backward', + ValueError, + ), + ( + 'test_n_zero', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + (0, 0), + (-2, -1), + 'backward', + ValueError, + ), + ( + 'test_n_type', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + 3, + None, + 'backward', + ValueError, + ), + ( + 'test_n_axis_dim', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + (1, 2), + (-1), + 'backward', + ValueError, + ), + ( + 'test_axis_out_of_range', + np.random.randn(4) + 1j * np.random.randn(4), + None, + (1, 2), + 'backward', + ValueError, + ), + ( + 'test_axis_type', + np.random.randn(4) + 1j * np.random.randn(4), + None, + -1, + 'backward', + ValueError, + ), + ( + 'test_norm_not_in_enum_value', + np.random.randn(4, 4) + 1j * np.random.randn(4, 4), + None, + None, + 'random', + ValueError, + ), + ], +) class TestHfft2Exception(unittest.TestCase): '''Test hfft2 with buoudary condition Test case include: @@ -471,35 +973,97 @@ class TestHfft2Exception(unittest.TestCase): def test_static_hfft2(self): with self.assertRaises(self.expect_exception): - with stgraph(paddle.fft.hfft2, self.place, self.x, self.n, - self.axis, self.norm) as y: + with stgraph( + paddle.fft.hfft2, + self.place, + self.x, + self.n, + self.axis, + self.norm, + ) as y: pass @place(DEVICES) @parameterize( (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), - [('test_input_dtype', np.random.randn( - 4, 4, 4), None, None, 'backward', TypeError), - ('test_bool_input', - (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( - np.bool_), None, (-2, -1), 'backward', TypeError), - ('test_n_nagative', - np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2), - (-2, -1), 'backward', ValueError), - ('test_n_zero', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), - (0, 0), (-2, -1), 'backward', ValueError), - ('test_n_type', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), - 3, -1, 'backward', ValueError), - ('test_n_axis_dim', - np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (1, 2), - (-3, -2, -1), 'backward', ValueError), - ('test_axis_out_of_range', np.random.randn(4) + 1j * np.random.randn(4), - None, (1, 2), 'backward', ValueError), - ('test_axis_type', np.random.randn(4) + 1j * np.random.randn(4), None, 1, - 'backward', ValueError), - ('test_norm_not_in_enum_value', np.random.randn(4, 4) + - 1j * np.random.randn(4, 4), None, None, 'random', ValueError)]) + [ + ( + 'test_input_dtype', + np.random.randn(4, 4, 4), + None, + None, + 'backward', + TypeError, + ), + ( + 'test_bool_input', + (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( + np.bool_ + ), + None, + (-2, -1), + 'backward', + TypeError, + ), + ( + 'test_n_nagative', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + (-1, -2), + (-2, -1), + 'backward', + ValueError, + ), + ( + 'test_n_zero', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + (0, 0), + (-2, -1), + 'backward', + ValueError, + ), + ( + 'test_n_type', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + 3, + -1, + 'backward', + ValueError, + ), + ( + 'test_n_axis_dim', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + (1, 2), + (-3, -2, -1), + 'backward', + ValueError, + ), + ( + 'test_axis_out_of_range', + np.random.randn(4) + 1j * np.random.randn(4), + None, + (1, 2), + 'backward', + ValueError, + ), + ( + 'test_axis_type', + np.random.randn(4) + 1j * np.random.randn(4), + None, + 1, + 'backward', + ValueError, + ), + ( + 'test_norm_not_in_enum_value', + np.random.randn(4, 4) + 1j * np.random.randn(4, 4), + None, + None, + 'random', + ValueError, + ), + ], +) class TestIrfft2Exception(unittest.TestCase): '''Test irfft2 with buoudary condition Test case include: @@ -512,35 +1076,97 @@ class TestIrfft2Exception(unittest.TestCase): def test_static_irfft2(self): with self.assertRaises(self.expect_exception): - with stgraph(paddle.fft.irfft2, self.place, self.x, self.n, - self.axis, self.norm) as y: + with stgraph( + paddle.fft.irfft2, + self.place, + self.x, + self.n, + self.axis, + self.norm, + ) as y: pass @place(DEVICES) @parameterize( (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), - [('test_input_dtype', np.random.randn( - 4, 4, 4), None, None, 'backward', TypeError), - ('test_bool_input', - (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( - np.bool_), None, (-2, -1), 'backward', TypeError), - ('test_n_nagative', - np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2), - (-2, -1), 'backward', ValueError), - ('test_n_zero', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), - (0, 0), (-2, -1), 'backward', ValueError), - ('test_n_type', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), - 3, -1, 'backward', ValueError), - ('test_n_axis_dim', - np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (1, 2), - (-3, -2, -1), 'backward', ValueError), - ('test_axis_out_of_range', np.random.randn(4) + 1j * np.random.randn(4), - None, (10, 20), 'backward', ValueError), - ('test_axis_type', np.random.randn(4) + 1j * np.random.randn(4), None, 1, - 'backward', ValueError), - ('test_norm_not_in_enum_value', np.random.randn(4, 4) + - 1j * np.random.randn(4, 4), None, None, 'random', ValueError)]) + [ + ( + 'test_input_dtype', + np.random.randn(4, 4, 4), + None, + None, + 'backward', + TypeError, + ), + ( + 'test_bool_input', + (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( + np.bool_ + ), + None, + (-2, -1), + 'backward', + TypeError, + ), + ( + 'test_n_nagative', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + (-1, -2), + (-2, -1), + 'backward', + ValueError, + ), + ( + 'test_n_zero', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + (0, 0), + (-2, -1), + 'backward', + ValueError, + ), + ( + 'test_n_type', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + 3, + -1, + 'backward', + ValueError, + ), + ( + 'test_n_axis_dim', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + (1, 2), + (-3, -2, -1), + 'backward', + ValueError, + ), + ( + 'test_axis_out_of_range', + np.random.randn(4) + 1j * np.random.randn(4), + None, + (10, 20), + 'backward', + ValueError, + ), + ( + 'test_axis_type', + np.random.randn(4) + 1j * np.random.randn(4), + None, + 1, + 'backward', + ValueError, + ), + ( + 'test_norm_not_in_enum_value', + np.random.randn(4, 4) + 1j * np.random.randn(4, 4), + None, + None, + 'random', + ValueError, + ), + ], +) class TestHfftnException(unittest.TestCase): '''Test hfftn with buoudary condition Test case include: @@ -553,8 +1179,14 @@ class TestHfftnException(unittest.TestCase): def test_static_hfftn(self): with self.assertRaises(self.expect_exception): - with stgraph(paddle.fft.hfftn, self.place, self.x, self.n, - self.axis, self.norm) as y: + with stgraph( + paddle.fft.hfftn, + self.place, + self.x, + self.n, + self.axis, + self.norm, + ) as y: pass @@ -562,29 +1194,75 @@ class TestHfftnException(unittest.TestCase): @parameterize( (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), [ - ('test_input_dtype', np.random.randn( - 4, 4, 4), None, None, 'backward', TypeError), + ( + 'test_input_dtype', + np.random.randn(4, 4, 4), + None, + None, + 'backward', + TypeError, + ), # ('test_bool_input', # (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4) # ).astype(np.bool_), None, (-2, -1), 'backward', ValueError), - ('test_n_nagative', - np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2), - (-2, -1), 'backward', ValueError), - ('test_n_zero', - np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (0, 0), - (-2, -1), 'backward', ValueError), - ('test_n_type', np.random.randn(4, 4, 4) + - 1j * np.random.randn(4, 4, 4), 3, -1, 'backward', ValueError), - ('test_n_axis_dim', - np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (1, 2), - (-3, -2, -1), 'backward', ValueError), - ('test_axis_out_of_range', np.random.randn(4) + 1j * np.random.randn(4), - None, (10, 20), 'backward', ValueError), - ('test_axis_type', np.random.randn(4) + 1j * np.random.randn(4), None, - 1, 'backward', ValueError), - ('test_norm_not_in_enum_value', np.random.randn(4, 4) + - 1j * np.random.randn(4, 4), None, None, 'random', ValueError) - ]) + ( + 'test_n_nagative', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + (-1, -2), + (-2, -1), + 'backward', + ValueError, + ), + ( + 'test_n_zero', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + (0, 0), + (-2, -1), + 'backward', + ValueError, + ), + ( + 'test_n_type', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + 3, + -1, + 'backward', + ValueError, + ), + ( + 'test_n_axis_dim', + np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), + (1, 2), + (-3, -2, -1), + 'backward', + ValueError, + ), + ( + 'test_axis_out_of_range', + np.random.randn(4) + 1j * np.random.randn(4), + None, + (10, 20), + 'backward', + ValueError, + ), + ( + 'test_axis_type', + np.random.randn(4) + 1j * np.random.randn(4), + None, + 1, + 'backward', + ValueError, + ), + ( + 'test_norm_not_in_enum_value', + np.random.randn(4, 4) + 1j * np.random.randn(4, 4), + None, + None, + 'random', + ValueError, + ), + ], +) class TestIrfftnException(unittest.TestCase): '''Test irfftn with buoudary condition Test case include: @@ -597,291 +1275,545 @@ class TestIrfftnException(unittest.TestCase): def test_static_irfftn(self): with self.assertRaises(self.expect_exception): - with stgraph(paddle.fft.irfftn, self.place, self.x, self.n, - self.axis, self.norm) as y: + with stgraph( + paddle.fft.irfftn, + self.place, + self.x, + self.n, + self.axis, + self.norm, + ) as y: pass @place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), - [('test_x_float64', rand_x(5, np.float64), None, -1, 'backward'), - ('test_n_grater_than_input_length', rand_x( - 5, max_dim_len=5), 11, -1, 'backward'), - ('test_n_smaller_than_input_length', rand_x( - 5, min_dim_len=5), 3, -1, 'backward'), - ('test_axis_not_last', rand_x(5), None, 3, 'backward'), - ('test_norm_forward', rand_x(5), None, 3, 'forward'), - ('test_norm_ortho', rand_x(5), None, 3, 'ortho')]) +@parameterize( + (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), + [ + ('test_x_float64', rand_x(5, np.float64), None, -1, 'backward'), + ( + 'test_n_grater_than_input_length', + rand_x(5, max_dim_len=5), + 11, + -1, + 'backward', + ), + ( + 'test_n_smaller_than_input_length', + rand_x(5, min_dim_len=5), + 3, + -1, + 'backward', + ), + ('test_axis_not_last', rand_x(5), None, 3, 'backward'), + ('test_norm_forward', rand_x(5), None, 3, 'forward'), + ('test_norm_ortho', rand_x(5), None, 3, 'ortho'), + ], +) class TestRfft(unittest.TestCase): - def test_static_rfft(self): - with stgraph(paddle.fft.rfft, self.place, self.x, self.n, self.axis, - self.norm) as y: - np.testing.assert_allclose(scipy.fft.rfft(self.x, self.n, self.axis, - self.norm), - y, - rtol=RTOL.get(str(self.x.dtype)), - atol=ATOL.get(str(self.x.dtype))) + with stgraph( + paddle.fft.rfft, self.place, self.x, self.n, self.axis, self.norm + ) as y: + np.testing.assert_allclose( + scipy.fft.rfft(self.x, self.n, self.axis, self.norm), + y, + rtol=RTOL.get(str(self.x.dtype)), + atol=ATOL.get(str(self.x.dtype)), + ) @place(DEVICES) @parameterize( (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), - [('test_n_nagative', rand_x(2), -1, -1, 'backward', ValueError), - ('test_n_zero', rand_x(2), 0, -1, 'backward', ValueError), - ('test_axis_out_of_range', rand_x(1), None, 10, 'backward', ValueError), - ('test_axis_with_array', rand_x(1), None, (0, 1), 'backward', ValueError), - ('test_norm_not_in_enum_value', rand_x(2), None, -1, 'random', ValueError)] + [ + ('test_n_nagative', rand_x(2), -1, -1, 'backward', ValueError), + ('test_n_zero', rand_x(2), 0, -1, 'backward', ValueError), + ('test_axis_out_of_range', rand_x(1), None, 10, 'backward', ValueError), + ( + 'test_axis_with_array', + rand_x(1), + None, + (0, 1), + 'backward', + ValueError, + ), + ( + 'test_norm_not_in_enum_value', + rand_x(2), + None, + -1, + 'random', + ValueError, + ), + ], ) class TestRfftException(unittest.TestCase): - def test_rfft(self): with self.assertRaises(self.expect_exception): - with stgraph(paddle.fft.rfft, self.place, self.x, self.n, self.axis, - self.norm) as y: + with stgraph( + paddle.fft.rfft, + self.place, + self.x, + self.n, + self.axis, + self.norm, + ) as y: pass @place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), [ - ('test_x_float64', rand_x(5), None, (0, 1), 'backward'), - ('test_n_grater_input_length', rand_x(5, max_dim_len=5), (6, 6), - (0, 1), 'backward'), - ('test_n_smaller_than_input_length', rand_x(5, min_dim_len=5), (4, 4), - (0, 1), 'backward'), - ('test_axis_random', rand_x(5), None, (1, 2), 'backward'), - ('test_axis_none', rand_x(5), None, None, 'backward'), - ('test_norm_forward', rand_x(5), None, (0, 1), 'forward'), - ('test_norm_ortho', rand_x(5), None, (0, 1), 'ortho'), -]) +@parameterize( + (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), + [ + ('test_x_float64', rand_x(5), None, (0, 1), 'backward'), + ( + 'test_n_grater_input_length', + rand_x(5, max_dim_len=5), + (6, 6), + (0, 1), + 'backward', + ), + ( + 'test_n_smaller_than_input_length', + rand_x(5, min_dim_len=5), + (4, 4), + (0, 1), + 'backward', + ), + ('test_axis_random', rand_x(5), None, (1, 2), 'backward'), + ('test_axis_none', rand_x(5), None, None, 'backward'), + ('test_norm_forward', rand_x(5), None, (0, 1), 'forward'), + ('test_norm_ortho', rand_x(5), None, (0, 1), 'ortho'), + ], +) class TestRfft2(unittest.TestCase): - def test_static_rfft2(self): - with stgraph(paddle.fft.rfft2, self.place, self.x, self.n, self.axis, - self.norm) as y: - np.testing.assert_allclose(scipy.fft.rfft2(self.x, self.n, - self.axis, self.norm), - y, - rtol=RTOL.get(str(self.x.dtype)), - atol=ATOL.get(str(self.x.dtype))) + with stgraph( + paddle.fft.rfft2, self.place, self.x, self.n, self.axis, self.norm + ) as y: + np.testing.assert_allclose( + scipy.fft.rfft2(self.x, self.n, self.axis, self.norm), + y, + rtol=RTOL.get(str(self.x.dtype)), + atol=ATOL.get(str(self.x.dtype)), + ) @place(DEVICES) @parameterize( (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), [ - ('test_x_complex_input', rand_x(2, complex=True), None, - (0, 1), 'backward', TypeError), + ( + 'test_x_complex_input', + rand_x(2, complex=True), + None, + (0, 1), + 'backward', + TypeError, + ), # ('test_x_not_tensor', [0, 1], None, (0, 1), 'backward', ValueError), ('test_x_1dim_tensor', rand_x(1), None, (0, 1), 'backward', ValueError), ('test_n_nagative', rand_x(2), -1, (0, 1), 'backward', ValueError), ('test_n_zero', rand_x(2), 0, (0, 1), 'backward', ValueError), - ('test_axis_out_of_range', rand_x(2), None, - (0, 1, 2), 'backward', ValueError), - ('test_axis_with_array', rand_x(1), None, - (0, 1), 'backward', ValueError), - ('test_axis_not_sequence', rand_x(5), None, -10, 'backward', - ValueError), - ('test_norm_not_enum', rand_x(2), None, -1, 'random', ValueError) - ]) + ( + 'test_axis_out_of_range', + rand_x(2), + None, + (0, 1, 2), + 'backward', + ValueError, + ), + ( + 'test_axis_with_array', + rand_x(1), + None, + (0, 1), + 'backward', + ValueError, + ), + ( + 'test_axis_not_sequence', + rand_x(5), + None, + -10, + 'backward', + ValueError, + ), + ('test_norm_not_enum', rand_x(2), None, -1, 'random', ValueError), + ], +) class TestRfft2Exception(unittest.TestCase): - def test_static_rfft(self): with self.assertRaises(self.expect_exception): - with stgraph(paddle.fft.rfft2, self.place, self.x, self.n, - self.axis, self.norm) as y: + with stgraph( + paddle.fft.rfft2, + self.place, + self.x, + self.n, + self.axis, + self.norm, + ) as y: pass @place(DEVICES) @parameterize( (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), - [('test_x_float64', rand_x(5, np.float64), None, None, 'backward'), - ('test_n_grater_input_length', rand_x(5, max_dim_len=5), (6, 6), - (1, 2), 'backward'), - ('test_n_smaller_input_length', rand_x(5, min_dim_len=5), (3, 3), - (1, 2), 'backward'), - ('test_axis_not_default', rand_x(5), None, (1, 2), 'backward'), - ('test_norm_forward', rand_x(5), None, None, 'forward'), - ('test_norm_ortho', rand_x(5), None, None, 'ortho')]) + [ + ('test_x_float64', rand_x(5, np.float64), None, None, 'backward'), + ( + 'test_n_grater_input_length', + rand_x(5, max_dim_len=5), + (6, 6), + (1, 2), + 'backward', + ), + ( + 'test_n_smaller_input_length', + rand_x(5, min_dim_len=5), + (3, 3), + (1, 2), + 'backward', + ), + ('test_axis_not_default', rand_x(5), None, (1, 2), 'backward'), + ('test_norm_forward', rand_x(5), None, None, 'forward'), + ('test_norm_ortho', rand_x(5), None, None, 'ortho'), + ], +) class TestRfftn(unittest.TestCase): - def test_static_rfft(self): - with stgraph(paddle.fft.rfftn, self.place, self.x, self.n, self.axis, - self.norm) as y: - np.testing.assert_allclose(scipy.fft.rfftn(self.x, self.n, - self.axis, self.norm), - y, - rtol=RTOL.get(str(self.x.dtype)), - atol=ATOL.get(str(self.x.dtype))) - - -@place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), [ - ('test_x_complex', rand_x(4, - complex=True), None, None, 'backward', TypeError), - ('test_n_nagative', rand_x(4), (-1, -1), (1, 2), 'backward', ValueError), - ('test_n_not_sequence', rand_x(4), -1, None, 'backward', ValueError), - ('test_n_zero', rand_x(4), 0, None, 'backward', ValueError), - ('test_axis_out_of_range', rand_x(1), None, [0, 1], 'backward', ValueError), - ('test_norm_not_in_enum', rand_x(2), None, -1, 'random', ValueError) -]) -class TestRfftnException(unittest.TestCase): + with stgraph( + paddle.fft.rfftn, self.place, self.x, self.n, self.axis, self.norm + ) as y: + np.testing.assert_allclose( + scipy.fft.rfftn(self.x, self.n, self.axis, self.norm), + y, + rtol=RTOL.get(str(self.x.dtype)), + atol=ATOL.get(str(self.x.dtype)), + ) + +@place(DEVICES) +@parameterize( + (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), + [ + ( + 'test_x_complex', + rand_x(4, complex=True), + None, + None, + 'backward', + TypeError, + ), + ( + 'test_n_nagative', + rand_x(4), + (-1, -1), + (1, 2), + 'backward', + ValueError, + ), + ('test_n_not_sequence', rand_x(4), -1, None, 'backward', ValueError), + ('test_n_zero', rand_x(4), 0, None, 'backward', ValueError), + ( + 'test_axis_out_of_range', + rand_x(1), + None, + [0, 1], + 'backward', + ValueError, + ), + ('test_norm_not_in_enum', rand_x(2), None, -1, 'random', ValueError), + ], +) +class TestRfftnException(unittest.TestCase): def test_static_rfftn(self): with self.assertRaises(self.expect_exception): - with stgraph(paddle.fft.rfftn, self.place, self.x, self.n, - self.axis, self.norm) as y: + with stgraph( + paddle.fft.rfftn, + self.place, + self.x, + self.n, + self.axis, + self.norm, + ) as y: pass @place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), - [('test_x_float64', rand_x(5, np.float64), None, -1, 'backward'), - ('test_n_grater_than_input_length', rand_x( - 5, max_dim_len=5), 11, -1, 'backward'), - ('test_n_smaller_than_input_length', rand_x( - 5, min_dim_len=5), 3, -1, 'backward'), - ('test_axis_not_last', rand_x(5), None, 3, 'backward'), - ('test_norm_forward', rand_x(5), None, 3, 'forward'), - ('test_norm_ortho', rand_x(5), None, 3, 'ortho')]) +@parameterize( + (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), + [ + ('test_x_float64', rand_x(5, np.float64), None, -1, 'backward'), + ( + 'test_n_grater_than_input_length', + rand_x(5, max_dim_len=5), + 11, + -1, + 'backward', + ), + ( + 'test_n_smaller_than_input_length', + rand_x(5, min_dim_len=5), + 3, + -1, + 'backward', + ), + ('test_axis_not_last', rand_x(5), None, 3, 'backward'), + ('test_norm_forward', rand_x(5), None, 3, 'forward'), + ('test_norm_ortho', rand_x(5), None, 3, 'ortho'), + ], +) class TestIhfft(unittest.TestCase): - def test_static_ihfft(self): - with stgraph(paddle.fft.ihfft, self.place, self.x, self.n, self.axis, - self.norm) as y: - np.testing.assert_allclose(scipy.fft.ihfft(self.x, self.n, - self.axis, self.norm), - y, - rtol=RTOL.get(str(self.x.dtype)), - atol=ATOL.get(str(self.x.dtype))) + with stgraph( + paddle.fft.ihfft, self.place, self.x, self.n, self.axis, self.norm + ) as y: + np.testing.assert_allclose( + scipy.fft.ihfft(self.x, self.n, self.axis, self.norm), + y, + rtol=RTOL.get(str(self.x.dtype)), + atol=ATOL.get(str(self.x.dtype)), + ) @place(DEVICES) @parameterize( (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), - [('test_n_nagative', rand_x(2), -1, -1, 'backward', ValueError), - ('test_n_zero', rand_x(2), 0, -1, 'backward', ValueError), - ('test_axis_out_of_range', rand_x(1), None, 10, 'backward', ValueError), - ('test_axis_with_array', rand_x(1), None, (0, 1), 'backward', ValueError), - ('test_norm_not_in_enum_value', rand_x(2), None, -1, 'random', ValueError)] + [ + ('test_n_nagative', rand_x(2), -1, -1, 'backward', ValueError), + ('test_n_zero', rand_x(2), 0, -1, 'backward', ValueError), + ('test_axis_out_of_range', rand_x(1), None, 10, 'backward', ValueError), + ( + 'test_axis_with_array', + rand_x(1), + None, + (0, 1), + 'backward', + ValueError, + ), + ( + 'test_norm_not_in_enum_value', + rand_x(2), + None, + -1, + 'random', + ValueError, + ), + ], ) class TestIhfftException(unittest.TestCase): - def test_static_ihfft(self): with self.assertRaises(self.expect_exception): - with stgraph(paddle.fft.ihfft, self.place, self.x, self.n, - self.axis, self.norm) as y: + with stgraph( + paddle.fft.ihfft, + self.place, + self.x, + self.n, + self.axis, + self.norm, + ) as y: pass @place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), [ - ('test_x_float64', rand_x(5), None, (0, 1), 'backward'), - ('test_n_grater_input_length', rand_x(5, max_dim_len=5), (11, 11), - (0, 1), 'backward'), - ('test_n_smaller_than_input_length', rand_x(5, min_dim_len=5), (1, 1), - (0, 1), 'backward'), - ('test_axis_random', rand_x(5), None, (1, 2), 'backward'), - ('test_axis_none', rand_x(5), None, None, 'backward'), - ('test_norm_forward', rand_x(5), None, (0, 1), 'forward'), - ('test_norm_ortho', rand_x(5), None, (0, 1), 'ortho'), -]) +@parameterize( + (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), + [ + ('test_x_float64', rand_x(5), None, (0, 1), 'backward'), + ( + 'test_n_grater_input_length', + rand_x(5, max_dim_len=5), + (11, 11), + (0, 1), + 'backward', + ), + ( + 'test_n_smaller_than_input_length', + rand_x(5, min_dim_len=5), + (1, 1), + (0, 1), + 'backward', + ), + ('test_axis_random', rand_x(5), None, (1, 2), 'backward'), + ('test_axis_none', rand_x(5), None, None, 'backward'), + ('test_norm_forward', rand_x(5), None, (0, 1), 'forward'), + ('test_norm_ortho', rand_x(5), None, (0, 1), 'ortho'), + ], +) class TestIhfft2(unittest.TestCase): - def test_static_ihfft2(self): - with stgraph(paddle.fft.ihfft2, self.place, self.x, self.n, self.axis, - self.norm) as y: - np.testing.assert_allclose(scipy.fft.ihfft2(self.x, self.n, - self.axis, self.norm), - y, - rtol=RTOL.get(str(self.x.dtype)), - atol=ATOL.get(str(self.x.dtype))) + with stgraph( + paddle.fft.ihfft2, self.place, self.x, self.n, self.axis, self.norm + ) as y: + np.testing.assert_allclose( + scipy.fft.ihfft2(self.x, self.n, self.axis, self.norm), + y, + rtol=RTOL.get(str(self.x.dtype)), + atol=ATOL.get(str(self.x.dtype)), + ) @place(DEVICES) @parameterize( (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), [ - ('test_x_complex_input', rand_x(2, complex=True), None, - (0, 1), None, ValueError), + ( + 'test_x_complex_input', + rand_x(2, complex=True), + None, + (0, 1), + None, + ValueError, + ), # ('test_x_not_tensor', [0, 1], None, (0, 1), None, ValueError), ('test_x_1dim_tensor', rand_x(1), None, (0, 1), None, ValueError), ('test_n_nagative', rand_x(2), -1, (0, 1), 'backward', ValueError), - ('test_n_len_not_equal_axis', rand_x(5, max_dim_len=5), 11, - (0, 1), 'backward', ValueError), + ( + 'test_n_len_not_equal_axis', + rand_x(5, max_dim_len=5), + 11, + (0, 1), + 'backward', + ValueError, + ), ('test_n_zero', rand_x(2), (0, 0), (0, 1), 'backward', ValueError), - ('test_axis_out_of_range', rand_x(2), None, - (0, 1, 2), 'backward', ValueError), - ('test_axis_with_array', rand_x(1), None, - (0, 1), 'backward', ValueError), - ('test_axis_not_sequence', rand_x(5), None, -10, 'backward', - ValueError), - ('test_norm_not_enum', rand_x(2), None, -1, 'random', ValueError) - ]) + ( + 'test_axis_out_of_range', + rand_x(2), + None, + (0, 1, 2), + 'backward', + ValueError, + ), + ( + 'test_axis_with_array', + rand_x(1), + None, + (0, 1), + 'backward', + ValueError, + ), + ( + 'test_axis_not_sequence', + rand_x(5), + None, + -10, + 'backward', + ValueError, + ), + ('test_norm_not_enum', rand_x(2), None, -1, 'random', ValueError), + ], +) class TestIhfft2Exception(unittest.TestCase): - def test_static_ihfft2(self): with self.assertRaises(self.expect_exception): - with stgraph(paddle.fft.ihfft2, self.place, self.x, self.n, - self.axis, self.norm) as y: + with stgraph( + paddle.fft.ihfft2, + self.place, + self.x, + self.n, + self.axis, + self.norm, + ) as y: pass @place(DEVICES) @parameterize( (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm'), - [('test_x_float64', rand_x(5, np.float64), None, None, 'backward'), - ('test_n_grater_input_length', rand_x(5, max_dim_len=5), (11, 11), - (0, 1), 'backward'), - ('test_n_smaller_input_length', rand_x(5, min_dim_len=5), (1, 1), - (0, 1), 'backward'), - ('test_axis_not_default', rand_x(5), None, (1, 2), 'backward'), - ('test_norm_forward', rand_x(5), None, None, 'forward'), - ('test_norm_ortho', rand_x(5), None, None, 'ortho')]) + [ + ('test_x_float64', rand_x(5, np.float64), None, None, 'backward'), + ( + 'test_n_grater_input_length', + rand_x(5, max_dim_len=5), + (11, 11), + (0, 1), + 'backward', + ), + ( + 'test_n_smaller_input_length', + rand_x(5, min_dim_len=5), + (1, 1), + (0, 1), + 'backward', + ), + ('test_axis_not_default', rand_x(5), None, (1, 2), 'backward'), + ('test_norm_forward', rand_x(5), None, None, 'forward'), + ('test_norm_ortho', rand_x(5), None, None, 'ortho'), + ], +) class TestIhfftn(unittest.TestCase): - def test_static_ihfftn(self): - with stgraph(paddle.fft.ihfftn, self.place, self.x, self.n, self.axis, - self.norm) as y: - np.testing.assert_allclose(scipy.fft.ihfftn(self.x, self.n, - self.axis, self.norm), - y, - rtol=RTOL.get(str(self.x.dtype)), - atol=ATOL.get(str(self.x.dtype))) - - -@place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), [ - ('test_x_complex', rand_x(4, - complex=True), None, None, 'backward', TypeError), - ('test_n_nagative', rand_x(4), -1, None, 'backward', ValueError), - ('test_n_zero', rand_x(4), 0, None, 'backward', ValueError), - ('test_axis_out_of_range', rand_x(1), None, [0, 1], 'backward', ValueError), - ('test_norm_not_in_enum', rand_x(2), None, -1, 'random', ValueError) -]) -class TestIhfftnException(unittest.TestCase): + with stgraph( + paddle.fft.ihfftn, self.place, self.x, self.n, self.axis, self.norm + ) as y: + np.testing.assert_allclose( + scipy.fft.ihfftn(self.x, self.n, self.axis, self.norm), + y, + rtol=RTOL.get(str(self.x.dtype)), + atol=ATOL.get(str(self.x.dtype)), + ) + +@place(DEVICES) +@parameterize( + (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), + [ + ( + 'test_x_complex', + rand_x(4, complex=True), + None, + None, + 'backward', + TypeError, + ), + ('test_n_nagative', rand_x(4), -1, None, 'backward', ValueError), + ('test_n_zero', rand_x(4), 0, None, 'backward', ValueError), + ( + 'test_axis_out_of_range', + rand_x(1), + None, + [0, 1], + 'backward', + ValueError, + ), + ('test_norm_not_in_enum', rand_x(2), None, -1, 'random', ValueError), + ], +) +class TestIhfftnException(unittest.TestCase): def test_static_ihfftn(self): with self.assertRaises(self.expect_exception): - with stgraph(paddle.fft.ihfftn, self.place, self.x, self.n, - self.axis, self.norm) as y: + with stgraph( + paddle.fft.ihfftn, + self.place, + self.x, + self.n, + self.axis, + self.norm, + ) as y: pass @place(DEVICES) -@parameterize((TEST_CASE_NAME, 'x', 'axes', 'dtype'), [ - ('test_1d', np.random.randn(10), (0, ), 'float64'), - ('test_2d', np.random.randn(10, 10), (0, 1), 'float64'), - ('test_2d_with_all_axes', np.random.randn(10, 10), None, 'float64'), - ('test_2d_odd_with_all_axes', - np.random.randn(5, 5) + 1j * np.random.randn(5, 5), None, 'complex128'), -]) +@parameterize( + (TEST_CASE_NAME, 'x', 'axes', 'dtype'), + [ + ('test_1d', np.random.randn(10), (0,), 'float64'), + ('test_2d', np.random.randn(10, 10), (0, 1), 'float64'), + ('test_2d_with_all_axes', np.random.randn(10, 10), None, 'float64'), + ( + 'test_2d_odd_with_all_axes', + np.random.randn(5, 5) + 1j * np.random.randn(5, 5), + None, + 'complex128', + ), + ], +) class TestFftShift(unittest.TestCase): - def test_fftshift(self): - """Test fftshift with norm condition - """ + """Test fftshift with norm condition""" paddle.enable_static() mp, sp = paddle.static.Program(), paddle.static.Program() with paddle.static.program_guard(mp, sp): @@ -898,16 +1830,21 @@ class TestFftShift(unittest.TestCase): @place(DEVICES) @parameterize( (TEST_CASE_NAME, 'x', 'axes'), - [('test_1d', np.random.randn(10), (0, ), 'float64'), - ('test_2d', np.random.randn(10, 10), (0, 1), 'float64'), - ('test_2d_with_all_axes', np.random.randn(10, 10), None, 'float64'), - ('test_2d_odd_with_all_axes', - np.random.randn(5, 5) + 1j * np.random.randn(5, 5), None, 'complex128')]) + [ + ('test_1d', np.random.randn(10), (0,), 'float64'), + ('test_2d', np.random.randn(10, 10), (0, 1), 'float64'), + ('test_2d_with_all_axes', np.random.randn(10, 10), None, 'float64'), + ( + 'test_2d_odd_with_all_axes', + np.random.randn(5, 5) + 1j * np.random.randn(5, 5), + None, + 'complex128', + ), + ], +) class TestIfftShift(unittest.TestCase): - def test_ifftshift(self): - """Test ifftshift with norm condition - """ + """Test ifftshift with norm condition""" paddle.enable_static() mp, sp = paddle.static.Program(), paddle.static.Program() with paddle.static.program_guard(mp, sp): diff --git a/python/paddle/fluid/tests/unittests/fft/test_spectral_op.py b/python/paddle/fluid/tests/unittests/fft/test_spectral_op.py index bc362d1b7a64368ee18fe873b5552d3f65721f4c..64b6cc111db25f51a56b43993e81cbe98f7c74f0 100644 --- a/python/paddle/fluid/tests/unittests/fft/test_spectral_op.py +++ b/python/paddle/fluid/tests/unittests/fft/test_spectral_op.py @@ -17,7 +17,14 @@ import paddle import re import sys -from spectral_op_np import fft_c2c, fft_r2c, fft_c2r, fft_c2c_backward, fft_r2c_backward, fft_c2r_backward +from spectral_op_np import ( + fft_c2c, + fft_r2c, + fft_c2r, + fft_c2c_backward, + fft_r2c_backward, + fft_c2r_backward, +) from paddle import _C_ops sys.path.append("../") @@ -32,8 +39,11 @@ def parameterize(attrs, input_values=None): if isinstance(attrs, str): attrs = [attrs] - input_dicts = (attrs if input_values is None else - [dict(zip(attrs, vals)) for vals in input_values]) + input_dicts = ( + attrs + if input_values is None + else [dict(zip(attrs, vals)) for vals in input_values] + ) def decorator(base_class): test_class_module = sys.modules[base_class.__module__].__dict__ @@ -43,8 +53,7 @@ def parameterize(attrs, input_values=None): name = class_name(base_class, idx, input_dict) - test_class_module[name] = type(name, (base_class, ), - test_class_dict) + test_class_module[name] = type(name, (base_class,), test_class_dict) for method_name in list(base_class.__dict__): if method_name.startswith("test"): @@ -60,7 +69,8 @@ def to_safe_name(s): def class_name(cls, num, params_dict): suffix = to_safe_name( - next((v for v in params_dict.values() if isinstance(v, str)), "")) + next((v for v in params_dict.values() if isinstance(v, str)), "") + ) if TEST_CASE_NAME in params_dict: suffix = to_safe_name(params_dict["test_case"]) return "{}_{}{}".format(cls.__name__, num, suffix and "_" + suffix) @@ -80,20 +90,55 @@ def fft_c2r_python_api(x, axes, norm, forward, last_dim_size=0): @parameterize( (TEST_CASE_NAME, 'x', 'axes', 'norm', 'forward'), - [('test_axes_is_sqe_type', (np.random.random( - (12, 14)) + 1j * np.random.random( - (12, 14))).astype(np.complex128), [0, 1], 'forward', True), - ('test_axis_not_last', (np.random.random( - (4, 8, 4)) + 1j * np.random.random( - (4, 8, 4))).astype(np.complex128), (0, 1), "backward", False), - ('test_norm_forward', (np.random.random((12, 14)) + 1j * np.random.random( - (12, 14))).astype(np.complex128), (0, ), "forward", False), - ('test_norm_backward', (np.random.random((12, 14)) + 1j * np.random.random( - (12, 14))).astype(np.complex128), (0, ), "backward", True), - ('test_norm_ortho', (np.random.random((12, 14)) + 1j * np.random.random( - (12, 14))).astype(np.complex128), (1, ), "ortho", True)]) + [ + ( + 'test_axes_is_sqe_type', + ( + np.random.random((12, 14)) + 1j * np.random.random((12, 14)) + ).astype(np.complex128), + [0, 1], + 'forward', + True, + ), + ( + 'test_axis_not_last', + ( + np.random.random((4, 8, 4)) + 1j * np.random.random((4, 8, 4)) + ).astype(np.complex128), + (0, 1), + "backward", + False, + ), + ( + 'test_norm_forward', + ( + np.random.random((12, 14)) + 1j * np.random.random((12, 14)) + ).astype(np.complex128), + (0,), + "forward", + False, + ), + ( + 'test_norm_backward', + ( + np.random.random((12, 14)) + 1j * np.random.random((12, 14)) + ).astype(np.complex128), + (0,), + "backward", + True, + ), + ( + 'test_norm_ortho', + ( + np.random.random((12, 14)) + 1j * np.random.random((12, 14)) + ).astype(np.complex128), + (1,), + "ortho", + True, + ), + ], +) class TestFFTC2COp(OpTest): - def setUp(self): self.op_type = "fft_c2c" self.dtype = self.x.dtype @@ -105,90 +150,173 @@ class TestFFTC2COp(OpTest): self.attrs = { 'axes': self.axes, 'normalization': self.norm, - "forward": self.forward + "forward": self.forward, } self.outputs = {'Out': out} - self.out_grad = (np.random.random(self.x.shape) + - 1j * np.random.random(self.x.shape)).astype( - self.x.dtype) - self.x_grad = fft_c2c_backward(self.out_grad, self.axes, self.norm, - self.forward) + self.out_grad = ( + np.random.random(self.x.shape) + 1j * np.random.random(self.x.shape) + ).astype(self.x.dtype) + self.x_grad = fft_c2c_backward( + self.out_grad, self.axes, self.norm, self.forward + ) def test_check_output(self): self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad("X", - "Out", - user_defined_grads=[self.x_grad], - user_defined_grad_outputs=[self.out_grad], - check_eager=True) + self.check_grad( + "X", + "Out", + user_defined_grads=[self.x_grad], + user_defined_grad_outputs=[self.out_grad], + check_eager=True, + ) @parameterize( (TEST_CASE_NAME, 'x', 'axes', 'norm', 'forward', 'last_dim_size'), - [('test_axes_is_sqe_type', (np.random.random( - (12, 14)) + 1j * np.random.random( - (12, 14))).astype(np.complex128), [0, 1], 'forward', True, 26), - ('test_axis_not_last', (np.random.random( - (4, 7, 4)) + 1j * np.random.random((4, 7, 4))).astype(np.complex128), - (0, 1), "backward", False, None), - ('test_norm_forward', (np.random.random((12, 14)) + 1j * np.random.random( - (12, 14))).astype(np.complex128), (0, ), "forward", False, 22), - ('test_norm_backward', (np.random.random((12, 14)) + 1j * np.random.random( - (12, 14))).astype(np.complex128), (0, ), "backward", True, 22), - ('test_norm_ortho', (np.random.random((12, 14)) + 1j * np.random.random( - (12, 14))).astype(np.complex128), (1, ), "ortho", True, 26)]) + [ + ( + 'test_axes_is_sqe_type', + ( + np.random.random((12, 14)) + 1j * np.random.random((12, 14)) + ).astype(np.complex128), + [0, 1], + 'forward', + True, + 26, + ), + ( + 'test_axis_not_last', + ( + np.random.random((4, 7, 4)) + 1j * np.random.random((4, 7, 4)) + ).astype(np.complex128), + (0, 1), + "backward", + False, + None, + ), + ( + 'test_norm_forward', + ( + np.random.random((12, 14)) + 1j * np.random.random((12, 14)) + ).astype(np.complex128), + (0,), + "forward", + False, + 22, + ), + ( + 'test_norm_backward', + ( + np.random.random((12, 14)) + 1j * np.random.random((12, 14)) + ).astype(np.complex128), + (0,), + "backward", + True, + 22, + ), + ( + 'test_norm_ortho', + ( + np.random.random((12, 14)) + 1j * np.random.random((12, 14)) + ).astype(np.complex128), + (1,), + "ortho", + True, + 26, + ), + ], +) class TestFFTC2ROp(OpTest): - def setUp(self): self.op_type = "fft_c2r" self.dtype = self.x.dtype self.python_api = fft_c2r_python_api - out = fft_c2r(self.x, self.axes, self.norm, self.forward, - self.last_dim_size) + out = fft_c2r( + self.x, self.axes, self.norm, self.forward, self.last_dim_size + ) self.inputs = {'X': self.x} self.attrs = { "axes": self.axes, "normalization": self.norm, "forward": self.forward, - "last_dim_size": self.last_dim_size + "last_dim_size": self.last_dim_size, } self.outputs = {'Out': out} self.out_grad = np.random.random(out.shape).astype(out.dtype) - self.x_grad = fft_c2r_backward(self.x, self.out_grad, self.axes, - self.norm, self.forward, - self.last_dim_size) + self.x_grad = fft_c2r_backward( + self.x, + self.out_grad, + self.axes, + self.norm, + self.forward, + self.last_dim_size, + ) def test_check_output(self): self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(["X"], - "Out", - user_defined_grads=[self.x_grad], - user_defined_grad_outputs=[self.out_grad], - check_eager=True) + self.check_grad( + ["X"], + "Out", + user_defined_grads=[self.x_grad], + user_defined_grad_outputs=[self.out_grad], + check_eager=True, + ) @parameterize( (TEST_CASE_NAME, 'x', 'axes', 'norm', 'forward', 'onesided'), - [('test_axes_is_sqe_type', np.random.randn(12, 18).astype(np.float64), - (0, 1), 'forward', True, True), - ('test_axis_not_last', np.random.randn(4, 8, 4).astype(np.float64), - (0, 1), "backward", False, True), - ('test_norm_forward', np.random.randn(12, 18).astype(np.float64), - (0, 1), "forward", False, False), - ('test_norm_backward', np.random.randn(12, 18).astype(np.float64), - (0, ), "backward", True, False), - ('test_norm_ortho', np.random.randn(12, 18).astype(np.float64), - (1, ), "ortho", True, False)]) + [ + ( + 'test_axes_is_sqe_type', + np.random.randn(12, 18).astype(np.float64), + (0, 1), + 'forward', + True, + True, + ), + ( + 'test_axis_not_last', + np.random.randn(4, 8, 4).astype(np.float64), + (0, 1), + "backward", + False, + True, + ), + ( + 'test_norm_forward', + np.random.randn(12, 18).astype(np.float64), + (0, 1), + "forward", + False, + False, + ), + ( + 'test_norm_backward', + np.random.randn(12, 18).astype(np.float64), + (0,), + "backward", + True, + False, + ), + ( + 'test_norm_ortho', + np.random.randn(12, 18).astype(np.float64), + (1,), + "ortho", + True, + False, + ), + ], +) class TestFFTR2COp(OpTest): - def setUp(self): self.op_type = "fft_r2c" self.dtype = self.x.dtype @@ -201,20 +329,28 @@ class TestFFTR2COp(OpTest): 'axes': self.axes, 'normalization': self.norm, "forward": self.forward, - 'onesided': self.onesided + 'onesided': self.onesided, } self.outputs = {'Out': out} self.out_grad = np.random.random(out.shape).astype(out.dtype) - self.x_grad = fft_r2c_backward(self.x, self.out_grad, self.axes, - self.norm, self.forward, self.onesided) + self.x_grad = fft_r2c_backward( + self.x, + self.out_grad, + self.axes, + self.norm, + self.forward, + self.onesided, + ) def test_check_output(self): self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad("X", - "Out", - user_defined_grads=[self.x_grad], - user_defined_grad_outputs=[self.out_grad], - check_eager=True) + self.check_grad( + "X", + "Out", + user_defined_grads=[self.x_grad], + user_defined_grad_outputs=[self.out_grad], + check_eager=True, + ) diff --git a/python/paddle/fluid/tests/unittests/find_ports.py b/python/paddle/fluid/tests/unittests/find_ports.py index 5618d4034483186d2b84d3f75b6e6075ce454c24..868efe0ee8e9d0b53135755478d881d9dc56454e 100644 --- a/python/paddle/fluid/tests/unittests/find_ports.py +++ b/python/paddle/fluid/tests/unittests/find_ports.py @@ -24,8 +24,7 @@ def train(): worker_endpoints = worker_endpoints_env trainers_num = len(worker_endpoints.split(',')) - name = "worker_endpoints:{}" \ - .format(worker_endpoints) + name = "worker_endpoints:{}".format(worker_endpoints) print(name) file_name = os.getenv("PADDLE_LAUNCH_LOG") diff --git a/python/paddle/fluid/tests/unittests/fleet_heter_ps_training.py b/python/paddle/fluid/tests/unittests/fleet_heter_ps_training.py index e99021f282291d81bdcfa6be89fa612ecfd0c5ec..08e1e1d7b24502ec2fd08524b1cddad17397e1c4 100644 --- a/python/paddle/fluid/tests/unittests/fleet_heter_ps_training.py +++ b/python/paddle/fluid/tests/unittests/fleet_heter_ps_training.py @@ -30,32 +30,38 @@ def get_dataset(inputs): def net(batch_size=4, lr=0.01): """ - network definition - - Args: - batch_size(int): the size of mini-batch for training - lr(float): learning rate of training - Returns: - avg_cost: LoDTensor of cost. - """ + network definition + + Args: + batch_size(int): the size of mini-batch for training + lr(float): learning rate of training + Returns: + avg_cost: LoDTensor of cost. + """ dnn_input_dim, lr_input_dim = int(2), int(2) with fluid.device_guard("cpu"): - dnn_data = fluid.layers.data(name="dnn_data", - shape=[-1, 1], - dtype="int64", - lod_level=1, - append_batch_size=False) - lr_data = fluid.layers.data(name="lr_data", - shape=[-1, 1], - dtype="int64", - lod_level=1, - append_batch_size=False) - label = fluid.layers.data(name="click", - shape=[-1, 1], - dtype="float32", - lod_level=0, - append_batch_size=False) + dnn_data = fluid.layers.data( + name="dnn_data", + shape=[-1, 1], + dtype="int64", + lod_level=1, + append_batch_size=False, + ) + lr_data = fluid.layers.data( + name="lr_data", + shape=[-1, 1], + dtype="int64", + lod_level=1, + append_batch_size=False, + ) + label = fluid.layers.data( + name="click", + shape=[-1, 1], + dtype="float32", + lod_level=0, + append_batch_size=False, + ) datas = [dnn_data, lr_data, label] @@ -67,10 +73,13 @@ def net(batch_size=4, lr=0.01): size=[dnn_input_dim, dnn_layer_dims[0]], param_attr=fluid.ParamAttr( name="deep_embedding", - initializer=fluid.initializer.Constant(value=0.01)), - is_sparse=True) - dnn_pool = fluid.layers.sequence_pool(input=dnn_embedding, - pool_type="sum") + initializer=fluid.initializer.Constant(value=0.01), + ), + is_sparse=True, + ) + dnn_pool = fluid.layers.sequence_pool( + input=dnn_embedding, pool_type="sum" + ) dnn_out = dnn_pool # build lr model @@ -80,8 +89,10 @@ def net(batch_size=4, lr=0.01): size=[lr_input_dim, 1], param_attr=fluid.ParamAttr( name="wide_embedding", - initializer=fluid.initializer.Constant(value=0.01)), - is_sparse=True) + initializer=fluid.initializer.Constant(value=0.01), + ), + is_sparse=True, + ) lr_pool = fluid.layers.sequence_pool(input=lr_embbding, pool_type="sum") with fluid.device_guard("gpu"): @@ -91,8 +102,10 @@ def net(batch_size=4, lr=0.01): size=dim, act="relu", param_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(value=0.01)), - name='dnn-fc-%d' % i) + initializer=fluid.initializer.Constant(value=0.01) + ), + name='dnn-fc-%d' % i, + ) dnn_out = fc merge_layer = fluid.layers.concat(input=[dnn_out, lr_pool], axis=1) @@ -124,22 +137,22 @@ dataset = get_dataset(feeds) if fleet.is_server(): pass - #fleet.init_server() - #fleet.run_server() + # fleet.init_server() + # fleet.run_server() elif fleet.is_heter_worker(): pass - #fleet.init_heter_worker() - #fleet.run_heter_worker(dataset=dataset) + # fleet.init_heter_worker() + # fleet.run_heter_worker(dataset=dataset) fleet.stop_worker() elif fleet.is_worker(): pass - #place = fluid.CPUPlace() - #exe = fluid.Executor(place) - #exe.run(fluid.default_startup_program()) - #fleet.init_worker() - #step = 1 - #for i in range(step): + # place = fluid.CPUPlace() + # exe = fluid.Executor(place) + # exe.run(fluid.default_startup_program()) + # fleet.init_worker() + # step = 1 + # for i in range(step): # exe.train_from_dataset( # program=fluid.default_main_program(), dataset=dataset, debug=False) - #exe.close() - #fleet.stop_worker() + # exe.close() + # fleet.stop_worker() diff --git a/python/paddle/fluid/tests/unittests/fleet_meta_optimizer_base.py b/python/paddle/fluid/tests/unittests/fleet_meta_optimizer_base.py index ebeeb1e272f0918c5d4ab7c5d5575f10bdcdb8d3..ee0f203881f6531b81c901a43857b64bfcdf2f6a 100755 --- a/python/paddle/fluid/tests/unittests/fleet_meta_optimizer_base.py +++ b/python/paddle/fluid/tests/unittests/fleet_meta_optimizer_base.py @@ -22,15 +22,16 @@ import paddle.distributed.fleet.base.role_maker as role_maker class TestFleetMetaOptimizer(unittest.TestCase): - def setUp(self): os.environ["PADDLE_TRAINER_ID"] = "1" os.environ[ - "PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001,127.0.0.1:36002" + "PADDLE_TRAINER_ENDPOINTS" + ] = "127.0.0.1:36001,127.0.0.1:36002" self._debug = False def debug_program(self, main_prog, startup_prog): - if not self._debug: return + if not self._debug: + return main_prog_ops = main_prog.global_block().ops startup_prog_ops = startup_prog.global_block().ops @@ -38,8 +39,11 @@ class TestFleetMetaOptimizer(unittest.TestCase): main_prog_op_types = [op.type for op in main_prog_ops] startup_prog_op_types = [op.type for op in startup_prog_ops] - print("=== debug program and ops in func [{}] ===".format( - inspect.stack()[1].function)) + print( + "=== debug program and ops in func [{}] ===".format( + inspect.stack()[1].function + ) + ) print(main_prog) print(main_prog_op_types) print(startup_prog) @@ -50,29 +54,29 @@ class TestFleetMetaOptimizer(unittest.TestCase): with fluid.unique_name.guard(): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) - input_x = paddle.fluid.layers.data(name="x", - shape=[32], - dtype='float32') - input_y = paddle.fluid.layers.data(name="y", - shape=[1], - dtype='int64') - - fc_1 = paddle.fluid.layers.fc(input=input_x, - size=64, - act='tanh') + input_x = paddle.fluid.layers.data( + name="x", shape=[32], dtype='float32' + ) + input_y = paddle.fluid.layers.data( + name="y", shape=[1], dtype='int64' + ) + + fc_1 = paddle.fluid.layers.fc( + input=input_x, size=64, act='tanh' + ) fc_2 = paddle.fluid.layers.fc(input=fc_1, size=256, act='tanh') - prediction = paddle.fluid.layers.fc(input=[fc_2], - size=2, - act='softmax') - cost = paddle.fluid.layers.cross_entropy(input=prediction, - label=input_y) + prediction = paddle.fluid.layers.fc( + input=[fc_2], size=2, act='softmax' + ) + cost = paddle.fluid.layers.cross_entropy( + input=prediction, label=input_y + ) avg_cost = paddle.mean(x=cost) strategy = paddle.distributed.fleet.DistributedStrategy() return avg_cost, strategy def pp_net(self, main_prog, startup_prog, pp_degree=2): - def fc_block(input_x): fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh') fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') @@ -84,23 +88,24 @@ class TestFleetMetaOptimizer(unittest.TestCase): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) with fluid.device_guard("gpu:0"): - input_x = paddle.fluid.layers.data(name="x", - shape=[32], - dtype='float32') - input_y = paddle.fluid.layers.data(name="y", - shape=[1], - dtype='int64') + input_x = paddle.fluid.layers.data( + name="x", shape=[32], dtype='float32' + ) + input_y = paddle.fluid.layers.data( + name="y", shape=[1], dtype='int64' + ) for stage_idx in range(pp_degree): with fluid.device_guard("gpu:" + str(stage_idx)): input_x = fc_block(input_x) with fluid.device_guard("gpu:" + str(pp_degree - 1)): - prediction = paddle.fluid.layers.fc(input=[input_x], - size=2, - act='softmax') - cost = paddle.fluid.layers.cross_entropy(input=prediction, - label=input_y) + prediction = paddle.fluid.layers.fc( + input=[input_x], size=2, act='softmax' + ) + cost = paddle.fluid.layers.cross_entropy( + input=prediction, label=input_y + ) avg_cost = paddle.mean(x=cost) strategy = paddle.distributed.fleet.DistributedStrategy() @@ -120,14 +125,16 @@ class TestFleetMetaOptimizer(unittest.TestCase): strategy = fleet.DistributedStrategy() return avg_cost, strategy - def optimizer(self, - loss, - strategy, - train_prog, - startup_prog, - name='momentum', - regularization=None, - grad_clip=None): + def optimizer( + self, + loss, + strategy, + train_prog, + startup_prog, + name='momentum', + regularization=None, + grad_clip=None, + ): with fluid.program_guard(train_prog, startup_prog): with fluid.unique_name.guard(): if name == 'momentum': @@ -135,18 +142,23 @@ class TestFleetMetaOptimizer(unittest.TestCase): learning_rate=0.01, momentum=0.9, regularization=regularization, - grad_clip=grad_clip) + grad_clip=grad_clip, + ) elif name == 'adam': optimizer = paddle.fluid.optimizer.Adam( learning_rate=0.01, regularization=regularization, - grad_clip=grad_clip) + grad_clip=grad_clip, + ) elif name == 'adamw': - optimizer = paddle.optimizer.AdamW(learning_rate=0.01, - weight_decay=0.01, - grad_clip=grad_clip) - optimizer = fleet.distributed_optimizer(optimizer, - strategy=strategy) + optimizer = paddle.optimizer.AdamW( + learning_rate=0.01, + weight_decay=0.01, + grad_clip=grad_clip, + ) + optimizer = fleet.distributed_optimizer( + optimizer, strategy=strategy + ) optimizer.minimize(loss) def set_strategy(self, strategy, name): @@ -182,7 +194,7 @@ class TestFleetMetaOptimizer(unittest.TestCase): strategy.dgc_configs = { "rampup_begin_step": 128, "rampup_step": 100, - "sparsity": [0.996, 0.999] + "sparsity": [0.996, 0.999], } elif name == 'recompute': strategy.recompute = True @@ -230,7 +242,7 @@ class TestFleetMetaOptimizer(unittest.TestCase): strategy.recompute_configs = { "checkpoints": ["fc_0.tmp_2", "fc_1.tmp_2"], "enable_offload": True, - "checkpoint_shape": [256] + "checkpoint_shape": [256], } elif name == "pipeline": strategy.pipeline = True diff --git a/python/paddle/fluid/tests/unittests/fleet_ps_training.py b/python/paddle/fluid/tests/unittests/fleet_ps_training.py index 65fa1ef935ef1e7a6082e52d8cbfdbe7745114ab..d14605a6179f7a956bf0220146563d0faa8e888f 100644 --- a/python/paddle/fluid/tests/unittests/fleet_ps_training.py +++ b/python/paddle/fluid/tests/unittests/fleet_ps_training.py @@ -15,7 +15,9 @@ import paddle.fluid as fluid from utils import gen_data from nets import mlp -from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet +from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import ( + fleet, +) from paddle.fluid.incubate.fleet.base import role_maker input_x = fluid.layers.data(name="x", shape=[32], dtype='float32') @@ -43,8 +45,10 @@ elif fleet.is_worker(): exe.run(fleet.startup_program) step = 1001 for i in range(step): - cost_val = exe.run(program=fleet.main_program, - feed=gen_data(), - fetch_list=[cost.name]) - print("worker_index: %d, step%d cost = %f" % - (fleet.worker_index(), i, cost_val[0])) + cost_val = exe.run( + program=fleet.main_program, feed=gen_data(), fetch_list=[cost.name] + ) + print( + "worker_index: %d, step%d cost = %f" + % (fleet.worker_index(), i, cost_val[0]) + ) diff --git a/python/paddle/fluid/tests/unittests/gradient_checker.py b/python/paddle/fluid/tests/unittests/gradient_checker.py index d928abe2ca99c7b35ed3b13a14442c30b4194768..6e20037185285b8d1220c50747592ed18d864885 100644 --- a/python/paddle/fluid/tests/unittests/gradient_checker.py +++ b/python/paddle/fluid/tests/unittests/gradient_checker.py @@ -21,6 +21,7 @@ import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.backward import _append_grad_suffix_, _as_list from paddle.fluid.framework import _test_eager_guard + try: from collections.abc import Sequence except: @@ -91,8 +92,11 @@ def make_jacobian(x, y_size, np_dtype): return np.zeros((_product(x.shape), y_size), dtype=np_dtype) elif isinstance(x, Sequence): jacobians = list( - filter(lambda t: t is not None, - (make_jacobian(item, y_size, np_dtype) for item in x))) + filter( + lambda t: t is not None, + (make_jacobian(item, y_size, np_dtype) for item in x), + ) + ) return jacobians else: None @@ -151,7 +155,7 @@ def _compute_numerical_jacobian(program, x, y, place, scope, delta): _set_item(x_t, i, orig, np_type) for j in range(len(y)): - jacobian[j][i, :] = (y_pos[j] - y_neg[j]) / delta / 2. + jacobian[j][i, :] = (y_pos[j] - y_neg[j]) / delta / 2.0 return jacobian @@ -180,10 +184,9 @@ def _compute_analytical_jacobian(program, x, y, place, scope): np_type = dtype_to_np_dtype(y.dtype) # create dy Variable in Program - dy = program.global_block().create_var(name=dy_name, - shape=y.shape, - dtype=np_type, - persistable=True) + dy = program.global_block().create_var( + name=dy_name, shape=y.shape, dtype=np_type, persistable=True + ) # append backward dx = fluid.gradients(y, x, dy) @@ -213,23 +216,26 @@ def _compute_analytical_jacobian(program, x, y, place, scope): if dx_res[j] is not None: jacobian[dx_idx][:, i] = dx_res[j].flatten() else: - jacobian[dx_idx][:, i] = np.zeros(dx[dx_idx].shape, - dtype=np_type).flatten() + jacobian[dx_idx][:, i] = np.zeros( + dx[dx_idx].shape, dtype=np_type + ).flatten() _set_item(dy_t, i, 0, np_type) return jacobian -def grad_check(x, - y, - x_init=None, - place=None, - program=None, - eps=1e-6, - atol=1e-5, - rtol=1e-3, - raise_exception=True): +def grad_check( + x, + y, + x_init=None, + place=None, + program=None, + eps=1e-6, + atol=1e-5, + rtol=1e-3, + raise_exception=True, +): """ Check numerical and analytical gradients for dy/dx. Each Jacobian gradients is a 2-D array with shape [xi_size, yi_size]. @@ -279,8 +285,10 @@ def grad_check(x, # init inputs if x_init is not None if x_init: if len(x_init) != len(x): - raise ValueError('len(x_init) (=%d) is not the same' - ' as len(x) (= %d)' % (len(x_init), len(x))) + raise ValueError( + 'len(x_init) (=%d) is not the same' + ' as len(x) (= %d)' % (len(x_init), len(x)) + ) # init variable in main program for var, arr in zip(x, x_init): assert var.shape == arr.shape @@ -310,31 +318,37 @@ def grad_check(x, clone_x.append(b.var(xi.name)) break analytical.append( - _compute_analytical_jacobian(prog, clone_x, clone_y, place, scope)) + _compute_analytical_jacobian(prog, clone_x, clone_y, place, scope) + ) for i, (x_idx, y_idx) in enumerate( - product(*[range(len(x)), range(len(y))])): + product(*[range(len(x)), range(len(y))]) + ): a = analytical[y_idx][x_idx] n = numerical[x_idx][y_idx] if not np.allclose(a, n, rtol, atol): - msg = 'Jacobian mismatch for output %s ' \ - 'with respect to input %s on %s,\n' \ - 'numerical:%s\nanalytical:%s\n' \ - % (y[y_idx].name, x[x_idx].name, str(place), n, a) + msg = ( + 'Jacobian mismatch for output %s ' + 'with respect to input %s on %s,\n' + 'numerical:%s\nanalytical:%s\n' + % (y[y_idx].name, x[x_idx].name, str(place), n, a) + ) return fail_test(msg) return True -def double_grad_check(x, - y, - x_init=None, - y_grads=None, - place=None, - program=None, - eps=1e-6, - atol=1e-5, - rtol=1e-3, - raise_exception=True): +def double_grad_check( + x, + y, + x_init=None, + y_grads=None, + place=None, + program=None, + eps=1e-6, + atol=1e-5, + rtol=1e-3, + raise_exception=True, +): """ Check gradients of gradients. This function will append backward to the program before second order gradient check. @@ -375,10 +389,9 @@ def double_grad_check(x, for yi in y: dyi_name = _append_grad_suffix_(yi.name) np_type = dtype_to_np_dtype(yi.dtype) - dy = program.global_block().create_var(name=dyi_name, - shape=yi.shape, - dtype=np_type, - persistable=True) + dy = program.global_block().create_var( + name=dyi_name, shape=yi.shape, dtype=np_type, persistable=True + ) dy.stop_gradient = False v = np.random.random(size=yi.shape).astype(np_type) set_var_in_scope(scope, place, dyi_name, v) @@ -407,17 +420,19 @@ def double_grad_check(x, # check triple grad and two outputs of the triple Kernel -def triple_grad_check(x, - y, - x_init=None, - y_grads=None, - x_grads_grads=None, - place=None, - program=None, - eps=1e-6, - atol=1e-5, - rtol=1e-3, - raise_exception=True): +def triple_grad_check( + x, + y, + x_init=None, + y_grads=None, + x_grads_grads=None, + place=None, + program=None, + eps=1e-6, + atol=1e-5, + rtol=1e-3, + raise_exception=True, +): """ Check triple gradients. This function will append backward to the program before third order gradient check. @@ -459,10 +474,9 @@ def triple_grad_check(x, for yi in y: dyi_name = _append_grad_suffix_(yi.name) np_type = dtype_to_np_dtype(yi.dtype) - dy = program.global_block().create_var(name=dyi_name, - shape=yi.shape, - dtype=np_type, - persistable=True) + dy = program.global_block().create_var( + name=dyi_name, shape=yi.shape, dtype=np_type, persistable=True + ) dy.stop_gradient = False v = np.random.random(size=yi.shape).astype(np_type) set_var_in_scope(scope, place, dyi_name, v) @@ -484,10 +498,9 @@ def triple_grad_check(x, for dxi in target_grads: ddxi_name = _append_grad_suffix_(dxi.name) np_type = dtype_to_np_dtype(dxi.dtype) - ddx = program.global_block().create_var(name=ddxi_name, - shape=dxi.shape, - dtype=np_type, - persistable=True) + ddx = program.global_block().create_var( + name=ddxi_name, shape=dxi.shape, dtype=np_type, persistable=True + ) ddx.stop_gradient = False v = np.random.random(size=dxi.shape).astype(np_type) set_var_in_scope(scope, place, ddxi_name, v) @@ -507,30 +520,30 @@ def triple_grad_check(x, target_grads_grads = fluid.gradients(target_grads, x, x_grads_grads) # filter None in target_grads_grads for Dy/Dx may be None in kernel - filted = [(i, dyi) for i, dyi in enumerate(target_grads_grads) - if dyi is not None] + filted = [ + (i, dyi) for i, dyi in enumerate(target_grads_grads) if dyi is not None + ] filted_idx, filted_target_grads_grads = zip(*filted) x += x_grads_grads x_init += x_grads_grads_init # x <=> [x, dout, ddx] - grad_check(x=x, - y=filted_target_grads_grads, - x_init=x_init, - place=place, - program=program, - eps=eps, - atol=atol, - rtol=rtol) - - -def get_static_double_grad(x, - y, - x_init=None, - dy_init=None, - place=None, - program=None): + grad_check( + x=x, + y=filted_target_grads_grads, + x_init=x_init, + place=place, + program=program, + eps=eps, + atol=atol, + rtol=rtol, + ) + + +def get_static_double_grad( + x, y, x_init=None, dy_init=None, place=None, program=None +): """ Get Double Grad result of static graph. @@ -554,10 +567,9 @@ def get_static_double_grad(x, yi = y[i] dyi_name = _append_grad_suffix_(yi.name) np_type = dtype_to_np_dtype(yi.dtype) - dy = program.global_block().create_var(name=dyi_name, - shape=yi.shape, - dtype=np_type, - persistable=True) + dy = program.global_block().create_var( + name=dyi_name, shape=yi.shape, dtype=np_type, persistable=True + ) dy.stop_gradient = False set_var_in_scope(scope, place, dyi_name, dy_init[i]) y_grads.append(dy) @@ -598,8 +610,10 @@ def get_static_double_grad(x, # init inputs if x_init is not None if x_init: if len(x_init) != len(x): - raise ValueError('len(x_init) (=%d) is not the same' - ' as len(x) (= %d)' % (len(x_init), len(x))) + raise ValueError( + 'len(x_init) (=%d) is not the same' + ' as len(x) (= %d)' % (len(x_init), len(x)) + ) # init variable in main program for var, arr in zip(x, x_init): assert var.shape == arr.shape @@ -611,10 +625,9 @@ def get_static_double_grad(x, np_type = dtype_to_np_dtype(yi.dtype) dy_name = _append_grad_suffix_(yi.name) # create dy Variable in Program - dy = program.global_block().create_var(name=dy_name, - shape=yi.shape, - dtype=np_type, - persistable=True) + dy = program.global_block().create_var( + name=dy_name, shape=yi.shape, dtype=np_type, persistable=True + ) # init dy tensor in scope value = np.ones(yi.shape, dtype=np_type) dy_t = set_var_in_scope(scope, place, dy_name, value) @@ -633,11 +646,9 @@ def get_static_double_grad(x, return ddx_res -def get_eager_double_grad(func, - x_init=None, - dy_init=None, - place=None, - return_mid_result=False): +def get_eager_double_grad( + func, x_init=None, dy_init=None, place=None, return_mid_result=False +): """ Get Double Grad result of dygraph. @@ -670,11 +681,13 @@ def get_eager_double_grad(func, dys.append(dy_tensor) # calculate first derivative outputs = func(inputs) - d_inputs = paddle.grad(outputs=outputs, - inputs=inputs, - grad_outputs=dys, - create_graph=True, - allow_unused=True) + d_inputs = paddle.grad( + outputs=outputs, + inputs=inputs, + grad_outputs=dys, + create_graph=True, + allow_unused=True, + ) d_inputs = [d_input for d_input in d_inputs if d_input is not None] # calcluate second derivative @@ -691,29 +704,34 @@ def get_eager_double_grad(func, ddy.stop_gradient = False ddys.append(ddy) - dd_inputs = paddle.grad(outputs=d_inputs, - inputs=inputs, - grad_outputs=ddys, - create_graph=create_graph, - allow_unused=True) + dd_inputs = paddle.grad( + outputs=d_inputs, + inputs=inputs, + grad_outputs=ddys, + create_graph=create_graph, + allow_unused=True, + ) if return_mid_result: - return [dd_input for dd_input in dd_inputs - if dd_input is not None], inputs + ddys + return [ + dd_input for dd_input in dd_inputs if dd_input is not None + ], inputs + ddys else: return [ dd_input.numpy() for dd_input in dd_inputs if dd_input is not None ] -def double_grad_check_for_dygraph(func, - x, - y, - x_init=None, - place=None, - atol=1e-5, - rtol=1e-3, - raise_exception=True): +def double_grad_check_for_dygraph( + func, + x, + y, + x_init=None, + place=None, + atol=1e-5, + rtol=1e-3, + raise_exception=True, +): """ Check second order gradients of dygraph. This function will compare the second order gradients of dygraph and second order gradients of static graph @@ -755,34 +773,38 @@ def double_grad_check_for_dygraph(func, paddle.disable_static() with _test_eager_guard(): - eager_double_grad = get_eager_double_grad(func, x_init, y_grads_init, - place) + eager_double_grad = get_eager_double_grad( + func, x_init, y_grads_init, place + ) paddle.enable_static() - static_double_grad = get_static_double_grad(x, y, x_init, y_grads_init, - place) + static_double_grad = get_static_double_grad( + x, y, x_init, y_grads_init, place + ) if len(static_double_grad) != len(eager_double_grad): - msg = "The output grad tensor's number of static graph is different with dygraph, " \ + msg = ( + "The output grad tensor's number of static graph is different with dygraph, " "please check the python api unit test used." + ) raise RuntimeError(msg) for i in range(len(static_double_grad)): - if not np.allclose(static_double_grad[i], eager_double_grad[i], rtol, - atol): - msg = 'Check eager double result fail. Mismatch between static_graph double grad ' \ - 'and eager double grad on %s, the output double grad tensor\'s index is : %d \n' \ - 'static:%s\n eager:%s\n' \ + if not np.allclose( + static_double_grad[i], eager_double_grad[i], rtol, atol + ): + msg = ( + 'Check eager double result fail. Mismatch between static_graph double grad ' + 'and eager double grad on %s, the output double grad tensor\'s index is : %d \n' + 'static:%s\n eager:%s\n' % (str(place), i, static_double_grad[i], eager_double_grad[i]) + ) return fail_test(msg) -def get_static_triple_grad(x, - y, - x_init=None, - dy_init=None, - place=None, - program=None): +def get_static_triple_grad( + x, y, x_init=None, dy_init=None, place=None, program=None +): """ Get Triple Grad result of static graph. @@ -805,10 +827,9 @@ def get_static_triple_grad(x, yi = y[i] dyi_name = _append_grad_suffix_(yi.name) np_type = dtype_to_np_dtype(yi.dtype) - dy = program.global_block().create_var(name=dyi_name, - shape=yi.shape, - dtype=np_type, - persistable=True) + dy = program.global_block().create_var( + name=dyi_name, shape=yi.shape, dtype=np_type, persistable=True + ) dy.stop_gradient = False set_var_in_scope(scope, place, dyi_name, dy_init[i]) y_grads.append(dy) @@ -828,19 +849,14 @@ def get_static_triple_grad(x, value = np.ones(dxi.shape, dtype=np_type) x_grads_grads_init.append(value) - return get_static_double_grad(x, - y, - x_init, - dy_init=x_grads_grads_init, - place=place, - program=program) + return get_static_double_grad( + x, y, x_init, dy_init=x_grads_grads_init, place=place, program=program + ) -def get_eager_triple_grad(func, - x_init=None, - dy_init=None, - place=None, - return_mid_result=False): +def get_eager_triple_grad( + func, x_init=None, dy_init=None, place=None, return_mid_result=False +): """ Get triple Grad result of dygraph. @@ -853,11 +869,9 @@ def get_eager_triple_grad(func, Returns: A list of numpy array that stores second derivative result calulated by dygraph """ - dd_y, dd_x = get_eager_double_grad(func, - x_init, - dy_init, - place, - return_mid_result=True) + dd_y, dd_x = get_eager_double_grad( + func, x_init, dy_init, place, return_mid_result=True + ) # calcluate third derivative dddys = [] @@ -866,23 +880,24 @@ def get_eager_triple_grad(func, dddy = paddle.ones(shape=dd_yi.shape, dtype=dd_yi.dtype) dddy.stop_gradient = False dddys.append(dddy) - ddd_inputs = paddle.grad(outputs=dd_y, - inputs=dd_x, - grad_outputs=dddys, - allow_unused=True) + ddd_inputs = paddle.grad( + outputs=dd_y, inputs=dd_x, grad_outputs=dddys, allow_unused=True + ) return [ ddd_input.numpy() for ddd_input in ddd_inputs if ddd_input is not None ] -def triple_grad_check_for_dygraph(func, - x, - y, - x_init=None, - place=None, - atol=1e-5, - rtol=1e-3, - raise_exception=True): +def triple_grad_check_for_dygraph( + func, + x, + y, + x_init=None, + place=None, + atol=1e-5, + rtol=1e-3, + raise_exception=True, +): """ Check third order gradients of dygraph. This function will compare the third order gradients of dygraph and third order gradients of static graph @@ -924,23 +939,30 @@ def triple_grad_check_for_dygraph(func, paddle.disable_static() with _test_eager_guard(): - eager_triple_grad = get_eager_triple_grad(func, x_init, y_grads_init, - place) + eager_triple_grad = get_eager_triple_grad( + func, x_init, y_grads_init, place + ) paddle.enable_static() - static_triple_grad = get_static_triple_grad(x, y, x_init, y_grads_init, - place) + static_triple_grad = get_static_triple_grad( + x, y, x_init, y_grads_init, place + ) if len(static_triple_grad) != len(eager_triple_grad): - msg = "The output grad tensor's number of static graph is different with dygraph, " \ + msg = ( + "The output grad tensor's number of static graph is different with dygraph, " "please check the python api unit test used." + ) raise RuntimeError(msg) for i in range(len(static_triple_grad)): - if not np.allclose(static_triple_grad[i], eager_triple_grad[i], rtol, - atol): - msg = 'Check eager double result fail. Mismatch between static_graph double grad ' \ - 'and eager double grad on %s, the output double grad tensor\'s index is : %d \n' \ - 'static:%s\n eager:%s\n' \ + if not np.allclose( + static_triple_grad[i], eager_triple_grad[i], rtol, atol + ): + msg = ( + 'Check eager double result fail. Mismatch between static_graph double grad ' + 'and eager double grad on %s, the output double grad tensor\'s index is : %d \n' + 'static:%s\n eager:%s\n' % (str(place), i, static_triple_grad[i], eager_triple_grad[i]) + ) return fail_test(msg) diff --git a/python/paddle/fluid/tests/unittests/hccl_tools.py b/python/paddle/fluid/tests/unittests/hccl_tools.py index f47fe9962fe6d9bf283da77c544fd0520f3fbbe1..85011b0d237b15dff4c3bd35e0cba91224789ce5 100644 --- a/python/paddle/fluid/tests/unittests/hccl_tools.py +++ b/python/paddle/fluid/tests/unittests/hccl_tools.py @@ -48,24 +48,28 @@ def parse_args(): Examples: >>> parse_args() """ - parser = ArgumentParser(description="mindspore distributed training launch " - "helper utilty that will generate hccl" - " config file") + parser = ArgumentParser( + description="mindspore distributed training launch " + "helper utilty that will generate hccl" + " config file" + ) parser.add_argument( "--device_num", type=str, default="[0,8)", - help= - "The number of the Ascend accelerators used. please note that the Ascend accelerators" + help="The number of the Ascend accelerators used. please note that the Ascend accelerators" "used must be continuous, such [0,4) means to use four chips " "0,1,2,3; [0,1) means to use chip 0; The first four chips are" "a group, and the last four chips are a group. In addition to" "the [0,8) chips are allowed, other cross-group such as [3,6)" - "are prohibited.") - parser.add_argument("--visible_devices", - type=str, - default="0,1,2,3,4,5,6,7", - help="will use the visible devices sequentially") + "are prohibited.", + ) + parser.add_argument( + "--visible_devices", + type=str, + default="0,1,2,3,4,5,6,7", + help="will use the visible devices sequentially", + ) parser.add_argument("--server_ip", type=str, default="", help="server ip") args = parser.parse_args() return args @@ -108,20 +112,25 @@ def main(): first_num = int(args.device_num[1]) last_num = int(args.device_num[3]) if first_num < 0 or last_num > 8: - raise ValueError("device num {} must be in range [0,8] !".format( - args.device_num)) + raise ValueError( + "device num {} must be in range [0,8] !".format(args.device_num) + ) if first_num > last_num: raise ValueError( "First num {} of device num {} must less than last num {} !".format( - first_num, args.device_num, last_num)) + first_num, args.device_num, last_num + ) + ) if first_num < 4: if last_num > 4: if first_num == 0 and last_num == 8: pass else: raise ValueError( - "device num {} must be in the same group of [0,4] or [4,8] !" - .format(args.device_num)) + "device num {} must be in the same group of [0,4] or [4,8] !".format( + args.device_num + ) + ) device_num_list = list(range(first_num, last_num)) print("device_num_list:", device_num_list) @@ -146,26 +155,32 @@ def main(): device = { 'device_id': device_id, 'device_ip': device_ip, - 'rank_id': str(rank_id) + 'rank_id': str(rank_id), } - print('rank_id:{}, device_id:{}, device_ip:{}'.format( - rank_id, device_id, device_ip)) + print( + 'rank_id:{}, device_id:{}, device_ip:{}'.format( + rank_id, device_id, device_ip + ) + ) rank_id += 1 device_list.append(device) - hccn_table['server_list'].append({ - 'server_id': server_id, - 'device': device_list, - 'host_nic_ip': 'reserve' - }) + hccn_table['server_list'].append( + { + 'server_id': server_id, + 'device': device_list, + 'host_nic_ip': 'reserve', + } + ) hccn_table['status'] = 'completed' # save hccn_table to file table_path = os.getcwd() table_fn = os.path.join( table_path, - 'hccl_{}p_{}_{}.json'.format(len(device_num_list), - "".join(map(str, - device_num_list)), server_id)) + 'hccl_{}p_{}_{}.json'.format( + len(device_num_list), "".join(map(str, device_num_list)), server_id + ), + ) with open(table_fn, 'w') as table_fp: json.dump(hccn_table, table_fp, indent=4) sys.stdout.flush() diff --git a/python/paddle/fluid/tests/unittests/hdfs_test_utils.py b/python/paddle/fluid/tests/unittests/hdfs_test_utils.py index 0f87e5603360110aabbe98b62743aa0323609190..30d1c6149379fd8b8824b9d3c8b797b0df57ed97 100644 --- a/python/paddle/fluid/tests/unittests/hdfs_test_utils.py +++ b/python/paddle/fluid/tests/unittests/hdfs_test_utils.py @@ -15,13 +15,17 @@ import unittest import os -from paddle.distributed.fleet.utils.fs import FSFileExistsError, FSFileNotExistsError, HDFSClient, LocalFS +from paddle.distributed.fleet.utils.fs import ( + FSFileExistsError, + FSFileNotExistsError, + HDFSClient, + LocalFS, +) java_home = os.environ["JAVA_HOME"] class FSTestBase(unittest.TestCase): - def _test_dirs(self, fs): dir_path = os.path.abspath("./test_dir") fs.delete(dir_path) @@ -217,10 +221,12 @@ class FSTestBase(unittest.TestCase): pass def _test_list_dir(self, fs): - fs = HDFSClient("/usr/local/hadoop-2.7.7/", - None, - time_out=15 * 1000, - sleep_inter=100) + fs = HDFSClient( + "/usr/local/hadoop-2.7.7/", + None, + time_out=15 * 1000, + sleep_inter=100, + ) fs.ls_dir("test_not_exists") def _test_touch(self, fs): diff --git a/python/paddle/fluid/tests/unittests/hybrid_parallel_pp_alexnet.py b/python/paddle/fluid/tests/unittests/hybrid_parallel_pp_alexnet.py index 2b85788ae56c620704877df1e7e4b190686738d1..2e02163061fa7a232dc17361d8a4d0696c36eefc 100644 --- a/python/paddle/fluid/tests/unittests/hybrid_parallel_pp_alexnet.py +++ b/python/paddle/fluid/tests/unittests/hybrid_parallel_pp_alexnet.py @@ -34,7 +34,6 @@ micro_batch_size = 2 class TestDistPPTraning(unittest.TestCase): - def setUp(self): strategy = fleet.DistributedStrategy() self.model_parallel_size = 1 @@ -47,16 +46,17 @@ class TestDistPPTraning(unittest.TestCase): } strategy.pipeline_configs = { "accumulate_steps": batch_size // micro_batch_size, - "micro_batch_size": micro_batch_size + "micro_batch_size": micro_batch_size, } fleet.init(is_collective=True, strategy=strategy) def build_optimizer(self, model): - scheduler = paddle.optimizer.lr.PiecewiseDecay(boundaries=[2], - values=[0.001, 0.002], - verbose=True) - optimizer = paddle.optimizer.SGD(learning_rate=scheduler, - parameters=model.parameters()) + scheduler = paddle.optimizer.lr.PiecewiseDecay( + boundaries=[2], values=[0.001, 0.002], verbose=True + ) + optimizer = paddle.optimizer.SGD( + learning_rate=scheduler, parameters=model.parameters() + ) return scheduler, optimizer def test_pp_model(self): @@ -67,7 +67,7 @@ class TestDistPPTraning(unittest.TestCase): rank_id = dist.get_rank() set_random_seed(1024, dp_id, rank_id) - #construct model a + # construct model a model_a = AlexNet(10) scheduler_a, optimizer_a = self.build_optimizer(model_a) @@ -87,15 +87,21 @@ class TestDistPPTraning(unittest.TestCase): param.set_value(parameters[idx + pp_id * (param_len // 2)]) # construct reader - train_reader = paddle.batch(paddle.dataset.mnist.train(), - batch_size=batch_size, - drop_last=True) + train_reader = paddle.batch( + paddle.dataset.mnist.train(), batch_size=batch_size, drop_last=True + ) for step_id, data in enumerate(train_reader()): - x_data = np.array([x[0] for x in data]).astype('float32').reshape( - batch_size, 1, 28, 28) - y_data = np.array([x[1] for x in data - ]).astype('int64').reshape(batch_size, 1) + x_data = ( + np.array([x[0] for x in data]) + .astype('float32') + .reshape(batch_size, 1, 28, 28) + ) + y_data = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape(batch_size, 1) + ) img = paddle.to_tensor(x_data) label = paddle.to_tensor(y_data) img.stop_gradient = True @@ -113,9 +119,9 @@ class TestDistPPTraning(unittest.TestCase): loss_b = model_b.train_batch([img, label], optimizer_b, scheduler_b) print("loss: ", loss_a.numpy(), loss_b.numpy()) - np.testing.assert_allclose(loss_a.numpy(), - loss_b.numpy(), - rtol=5e-5) + np.testing.assert_allclose( + loss_a.numpy(), loss_b.numpy(), rtol=5e-5 + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/hybrid_parallel_pp_layer.py b/python/paddle/fluid/tests/unittests/hybrid_parallel_pp_layer.py index 7cdce186cf8bc76261cb97570f723ecdb47d9e45..13badaf9440025b61877ac8aadc6e4e8244674ce 100644 --- a/python/paddle/fluid/tests/unittests/hybrid_parallel_pp_layer.py +++ b/python/paddle/fluid/tests/unittests/hybrid_parallel_pp_layer.py @@ -23,7 +23,6 @@ import paddle.nn.functional as F class ReshapeHelp(Layer): - def __init__(self, shape): super(ReshapeHelp, self).__init__() self.shape = shape @@ -33,7 +32,6 @@ class ReshapeHelp(Layer): class AlexNet(Layer): - def __init__(self, num_classes=10): super(AlexNet, self).__init__() self.features = Sequential( @@ -64,7 +62,6 @@ class AlexNet(Layer): class AlexNetPipe(AlexNet): - def to_layers(self): feat = [self.features[i] for i in range(len(self.features))] loss_fn = [self.reshape_layer, self.classifier] @@ -73,7 +70,6 @@ class AlexNetPipe(AlexNet): class AlexNetPipeDesc(PipelineLayer): - def __init__(self, num_classes=10, **kwargs): self.num_classes = num_classes decs = [ @@ -93,20 +89,19 @@ class AlexNetPipeDesc(PipelineLayer): LayerDesc(ReshapeHelp, shape=[-1, 256]), LayerDesc(nn.Linear, 256, self.num_classes), # classifier ] - super(AlexNetPipeDesc, self).__init__(layers=decs, - loss_fn=nn.CrossEntropyLoss(), - **kwargs) + super(AlexNetPipeDesc, self).__init__( + layers=decs, loss_fn=nn.CrossEntropyLoss(), **kwargs + ) class TestPipeLayerAPI(unittest.TestCase): - def setUp(self): strategy = fleet.DistributedStrategy() self.pipeline_parallel_size = 2 strategy.hybrid_configs = { "dp_degree": 1, "mp_degree": 1, - "pp_degree": self.pipeline_parallel_size + "pp_degree": self.pipeline_parallel_size, } fleet.init(is_collective=True, strategy=strategy) self.hcg = fleet.get_hybrid_communicate_group() @@ -117,9 +112,11 @@ class TestPipeLayerAPI(unittest.TestCase): def test_pipelayer_sequential(self): init_net = AlexNetPipe() - pipe_model = PipelineLayer(layers=init_net.to_layers(), - num_stages=self.pipeline_parallel_size, - loss_fn=nn.CrossEntropyLoss()) + pipe_model = PipelineLayer( + layers=init_net.to_layers(), + num_stages=self.pipeline_parallel_size, + loss_fn=nn.CrossEntropyLoss(), + ) stage_id = self.hcg.get_stage_id() init_parameters = init_net.parameters() pipe_parameters = pipe_model.parameters() diff --git a/python/paddle/fluid/tests/unittests/interpreter/test_standalone_controlflow.py b/python/paddle/fluid/tests/unittests/interpreter/test_standalone_controlflow.py index 5482eda54467f6cc02c2682a0edada0ba0add84b..39dee27fe4e330be2a3fa44db19a4f0c62251995 100644 --- a/python/paddle/fluid/tests/unittests/interpreter/test_standalone_controlflow.py +++ b/python/paddle/fluid/tests/unittests/interpreter/test_standalone_controlflow.py @@ -27,32 +27,28 @@ paddle.enable_static() # and new executor twice and check the result. # please override the _get_feeds() and build_prgram() class TestCompatibility(unittest.TestCase): - def setUp(self): - self.place = paddle.CUDAPlace( - 0) if core.is_compiled_with_cuda() else paddle.CPUPlace() + self.place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() + else paddle.CPUPlace() + ) self.iter_run = 4 def _get_feed(self): - """ return the feeds - """ + """return the feeds""" return None def build_program(self): - def true_func(): - return layers.fill_constant(shape=[1, 2], dtype='int32', - value=1), layers.fill_constant( - shape=[2, 3], - dtype='bool', - value=True) + return layers.fill_constant( + shape=[1, 2], dtype='int32', value=1 + ), layers.fill_constant(shape=[2, 3], dtype='bool', value=True) def false_func(): - return layers.fill_constant(shape=[3, 4], dtype='float32', - value=3), layers.fill_constant( - shape=[4, 5], - dtype='int64', - value=2) + return layers.fill_constant( + shape=[3, 4], dtype='float32', value=3 + ), layers.fill_constant(shape=[4, 5], dtype='int64', value=2) main_program = Program() startup_program = Program() @@ -101,14 +97,11 @@ class TestCompatibility(unittest.TestCase): class TestWhile(TestCompatibility): - def _get_feed(self): - """ return the feeds - """ + """return the feeds""" return None def build_program(self): - def cond(i, ten): return i < ten @@ -119,10 +112,12 @@ class TestWhile(TestCompatibility): main_program = paddle.static.default_main_program() startup_program = paddle.static.default_startup_program() with paddle.static.program_guard(main_program, startup_program): - i = paddle.full(shape=[1], fill_value=0, - dtype='int64') # loop counter - ten = paddle.full(shape=[1], fill_value=10, - dtype='int64') # loop length + i = paddle.full( + shape=[1], fill_value=0, dtype='int64' + ) # loop counter + ten = paddle.full( + shape=[1], fill_value=10, dtype='int64' + ) # loop length i, ten = paddle.static.nn.while_loop(cond, body, [i, ten]) exe = paddle.static.Executor(paddle.CPUPlace()) diff --git a/python/paddle/fluid/tests/unittests/interpreter/test_standalone_executor.py b/python/paddle/fluid/tests/unittests/interpreter/test_standalone_executor.py index 9da058dfee6ae4b1925a5dfd7cb67237b9a5537a..f960ad6b049356f51c552ff256dcfd0476c48817 100644 --- a/python/paddle/fluid/tests/unittests/interpreter/test_standalone_executor.py +++ b/python/paddle/fluid/tests/unittests/interpreter/test_standalone_executor.py @@ -30,10 +30,12 @@ paddle.enable_static() class TestDryRun(unittest.TestCase): - def setUp(self): - place = paddle.CUDAPlace( - 0) if core.is_compiled_with_cuda() else paddle.CPUPlace() + place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() + else paddle.CPUPlace() + ) self.place = core.Place() self.place.set_place(place) @@ -57,7 +59,8 @@ class TestDryRun(unittest.TestCase): standaloneexecutor = StandaloneExecutor(self.place, main_program.desc) # test for cost_info cost_info = standaloneexecutor.dry_run( - scope, {"a": np.ones([2, 2], dtype="float32")}) + scope, {"a": np.ones([2, 2], dtype="float32")} + ) self.check_cost_info(cost_info) def check_cost_info(self, cost_info): @@ -103,11 +106,13 @@ def build_program(): class ExecutorStatisticsTestCase(unittest.TestCase): - def setUp(self): self.iter_n = 3 - self.place = paddle.CUDAPlace( - 0) if core.is_compiled_with_cuda() else paddle.CPUPlace() + self.place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() + else paddle.CPUPlace() + ) self.perf_path = './perfstat' def test_parallel_executor_statistics(self): @@ -140,7 +145,8 @@ class ExecutorStatisticsTestCase(unittest.TestCase): with framework._enable_standalone_executor(enable): exe = paddle.static.Executor(self.place) helper_profiler = profiler.Profiler( - targets=[profiler.ProfilerTarget.CPU], scheduler=(1, 2)) + targets=[profiler.ProfilerTarget.CPU], scheduler=(1, 2) + ) helper_profiler.start() for i in range(self.iter_n): exe.run(main_program, fetch_list=fetch_list) @@ -157,11 +163,13 @@ class ExecutorStatisticsTestCase(unittest.TestCase): class MultiStreamModelTestCase(unittest.TestCase): - def setUp(self): self.iter_n = 2 - self.place = paddle.CUDAPlace( - 0) if core.is_compiled_with_cuda() else paddle.CPUPlace() + self.place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() + else paddle.CPUPlace() + ) def test_result(self): ground_truths = self.run_test(False) @@ -180,16 +188,19 @@ class MultiStreamModelTestCase(unittest.TestCase): outs = [] for i in range(self.iter_n): outs.append( - exe.run(main_program, scope=scope, fetch_list=fetch_list)) + exe.run(main_program, scope=scope, fetch_list=fetch_list) + ) print(outs) return outs class SwitchExecutorInterfaceWithFeed(unittest.TestCase): - def setUp(self): - self.place = paddle.CUDAPlace( - 0) if core.is_compiled_with_cuda() else paddle.CPUPlace() + self.place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() + else paddle.CPUPlace() + ) self.iter_run = 2 def build_program(self, is_double=False): @@ -205,24 +216,27 @@ class SwitchExecutorInterfaceWithFeed(unittest.TestCase): return main_program, startup_program, [c] - def _run(self, - feed, - use_str=False, - is_double=False, - add_wrong_fetch=False, - use_compiled=False): + def _run( + self, + feed, + use_str=False, + is_double=False, + add_wrong_fetch=False, + use_compiled=False, + ): paddle.seed(2020) main_program, startup_program, fetch_vars = self.build_program( - is_double) + is_double + ) exe = paddle.static.Executor(self.place) exe.run(startup_program) if use_compiled: main_program = paddle.static.CompiledProgram( - main_program).with_data_parallel(fetch_vars[0].name, - places=[self.place]) + main_program + ).with_data_parallel(fetch_vars[0].name, places=[self.place]) if use_str: # test for fetch name fetch_vars = [x.name for x in fetch_vars] @@ -239,15 +253,13 @@ class SwitchExecutorInterfaceWithFeed(unittest.TestCase): def run_raw_executor(self, feed, use_compiled=False): with framework._enable_standalone_executor(False): # run construct program 1 - out1 = self._run(feed, - use_str=False, - is_double=False, - use_compiled=use_compiled) + out1 = self._run( + feed, use_str=False, is_double=False, use_compiled=use_compiled + ) # run construct program 2 with same executor - out2 = self._run(feed, - use_str=True, - is_double=True, - use_compiled=use_compiled) + out2 = self._run( + feed, use_str=True, is_double=True, use_compiled=use_compiled + ) return [out1, out2] @@ -303,7 +315,6 @@ class SwitchExecutorInterfaceWithFeed(unittest.TestCase): class TestException(unittest.TestCase): - def setUp(self): self.place = paddle.CPUPlace() self.fetch_vars = None @@ -315,10 +326,9 @@ class TestException(unittest.TestCase): w = paddle.rand([10, 3]) ids = paddle.static.data(name="id", shape=[5], dtype='int64') data = paddle.static.data(name="data", shape=[3], dtype='float32') - emb = paddle.nn.functional.embedding(x=ids, - weight=w, - sparse=False, - name="embedding") + emb = paddle.nn.functional.embedding( + x=ids, weight=w, sparse=False, name="embedding" + ) emb = emb + data return main_program, startup_program, emb @@ -342,43 +352,52 @@ class TestException(unittest.TestCase): return out def test_exception(self): - feed = [{ - 'id': np.array([1, 2, 3, 4, 5]).astype(np.int64), - 'data': np.array([1, 2, 3]).astype(np.float32), - }, { - 'id': np.array([1, 2, 3, 4, 11]).astype(np.int64), - 'data': np.array([1, 2, 3]).astype(np.float32), - }] + feed = [ + { + 'id': np.array([1, 2, 3, 4, 5]).astype(np.int64), + 'data': np.array([1, 2, 3]).astype(np.float32), + }, + { + 'id': np.array([1, 2, 3, 4, 11]).astype(np.int64), + 'data': np.array([1, 2, 3]).astype(np.float32), + }, + ] self.assertRaises(ValueError, self.run_new_executor, feed) def test_nan(self): flags = {'FLAGS_check_nan_inf': True, 'FLAGS_benchmark': True} paddle.fluid.set_flags(flags) - feed = [{ - 'id': np.array([1, 2, 3, 4, 5]).astype(np.int64), - 'data': np.array([1, 2, 3]).astype(np.float32), - }, { - 'id': np.array([1, 2, 3, 4, 5]).astype(np.int64), - 'data': np.array([1, 2, 3]).astype(np.float32), - }] + feed = [ + { + 'id': np.array([1, 2, 3, 4, 5]).astype(np.int64), + 'data': np.array([1, 2, 3]).astype(np.float32), + }, + { + 'id': np.array([1, 2, 3, 4, 5]).astype(np.int64), + 'data': np.array([1, 2, 3]).astype(np.float32), + }, + ] feed[1]['data'][0] = np.nan self.assertRaises(RuntimeError, self.run_new_executor, feed) def test_scope_find_temp_var(self): - feed = [{ - 'id': np.array([1, 2, 3, 4, 5]).astype(np.int64), - 'data': np.array([1, 2, 3]).astype(np.float32), - }, { - 'id': np.array([1, 2, 3, 4, 5]).astype(np.int64), - 'data': np.array([2, 2, 2]).astype(np.float32), - }] + feed = [ + { + 'id': np.array([1, 2, 3, 4, 5]).astype(np.int64), + 'data': np.array([1, 2, 3]).astype(np.float32), + }, + { + 'id': np.array([1, 2, 3, 4, 5]).astype(np.int64), + 'data': np.array([2, 2, 2]).astype(np.float32), + }, + ] self.run_new_executor(feed) - self.assertIsNone(paddle.static.global_scope().find_var( - self.fetch_vars.name)) + self.assertIsNone( + paddle.static.global_scope().find_var(self.fetch_vars.name) + ) class TestInplaceApiWithDataTransform(unittest.TestCase): - def test_increment(self): if paddle.fluid.core.is_compiled_with_cuda(): with paddle.fluid.device_guard("gpu:0"): @@ -389,8 +408,9 @@ class TestInplaceApiWithDataTransform(unittest.TestCase): with framework._enable_standalone_executor(): for i in range(10): - a, = exe.run(paddle.static.default_main_program(), - fetch_list=[x]) + (a,) = exe.run( + paddle.static.default_main_program(), fetch_list=[x] + ) self.assertEqual(a[0], 1) diff --git a/python/paddle/fluid/tests/unittests/interpreter/test_standalone_multiply_write.py b/python/paddle/fluid/tests/unittests/interpreter/test_standalone_multiply_write.py index 0a707d1683fd17a16cea4a1d1f0c9effff4f6979..e5e44b3a28b781e49861af04a50c6f231271785a 100644 --- a/python/paddle/fluid/tests/unittests/interpreter/test_standalone_multiply_write.py +++ b/python/paddle/fluid/tests/unittests/interpreter/test_standalone_multiply_write.py @@ -22,19 +22,17 @@ paddle.enable_static() class TestMultiplyWrite(TestCompatibility): - def _get_feed(self): - """ return the feeds - """ + """return the feeds""" return None def build_program(self): main_program = Program() startup_program = Program() with paddle.static.program_guard(main_program, startup_program): - out = paddle.full((1, ), 1) - inp1 = paddle.full((1, ), 2) - inp2 = paddle.full((1, ), 3) + out = paddle.full((1,), 1) + inp1 = paddle.full((1,), 2) + inp2 = paddle.full((1,), 3) paddle.fluid.layers.assign(inp1, out) paddle.fluid.layers.assign(inp2, out) diff --git a/python/paddle/fluid/tests/unittests/ipu/custom_ops/deprecated/test_custom_nllloss_ipu.py b/python/paddle/fluid/tests/unittests/ipu/custom_ops/deprecated/test_custom_nllloss_ipu.py index 9ae7b307ca543a6dbd74323f6964dcdffcafa782..55a9b1338ab9b93096d2189c40d32fce57ac0853 100644 --- a/python/paddle/fluid/tests/unittests/ipu/custom_ops/deprecated/test_custom_nllloss_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/custom_ops/deprecated/test_custom_nllloss_ipu.py @@ -22,22 +22,23 @@ import paddle.static from paddle.utils.cpp_extension import load sys.path.append( - os.path.dirname(os.path.dirname(os.path.dirname( - os.path.abspath(__file__))))) + os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +) from op_test_ipu import IPUOpTest def load_custom_ops(): cur_dir = os.path.dirname(os.path.realpath(__file__)) - custom_ops = load(name="custom_nll_loss", - sources=[f"{cur_dir}/custom_nllloss.cc"], - extra_cxx_cflags=['-DONNX_NAMESPACE=onnx'], - extra_ldflags=['-lpopfloat']) + custom_ops = load( + name="custom_nll_loss", + sources=[f"{cur_dir}/custom_nllloss.cc"], + extra_cxx_cflags=['-DONNX_NAMESPACE=onnx'], + extra_ldflags=['-lpopfloat'], + ) return custom_ops class TestBase(IPUOpTest): - def setUp(self): self.load_custom_ops() self.set_atol() @@ -75,12 +76,12 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - label = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='int32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + label = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='int32' + ) out = self.op(x, label, **self.op_attrs) out = paddle.mean(out) self.fetch_list = [out.name] @@ -96,7 +97,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_test_op(self): self.op = self.custom_ops.custom_nll_loss self.op_attrs = { diff --git a/python/paddle/fluid/tests/unittests/ipu/custom_ops/test_checkpointoutput_ipu.py b/python/paddle/fluid/tests/unittests/ipu/custom_ops/test_checkpointoutput_ipu.py index 698cef211db66921f6b4547aa6b044b8cb3a5b3b..866c69ffbdd07cfacd237697656a54b507c54c26 100644 --- a/python/paddle/fluid/tests/unittests/ipu/custom_ops/test_checkpointoutput_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/custom_ops/test_checkpointoutput_ipu.py @@ -27,16 +27,17 @@ from op_test_ipu import IPUOpTest def load_custom_ops(): cur_dir = os.path.dirname(os.path.realpath(__file__)) - custom_ops = load(name="checkpointoutput", - sources=[ - f"{cur_dir}/custom_checkpointoutput.cc", - ], - extra_cxx_cflags=['-DONNX_NAMESPACE=onnx']) + custom_ops = load( + name="checkpointoutput", + sources=[ + f"{cur_dir}/custom_checkpointoutput.cc", + ], + extra_cxx_cflags=['-DONNX_NAMESPACE=onnx'], + ) return custom_ops class TestCheckpointoutput(IPUOpTest): - def setUp(self): self.load_custom_ops() self.set_atol() @@ -66,9 +67,9 @@ class TestCheckpointoutput(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) x = paddle.add(x, x) x = self.op(x, **self.op_attrs) x = paddle.mean(x) diff --git a/python/paddle/fluid/tests/unittests/ipu/custom_ops/test_custom_leaky_relu_ipu.py b/python/paddle/fluid/tests/unittests/ipu/custom_ops/test_custom_leaky_relu_ipu.py index 70d51940865cfcf6af755ba499d67dbcfa1c7043..bb54c0e40da32a4e0e2047f795ca728606d59507 100644 --- a/python/paddle/fluid/tests/unittests/ipu/custom_ops/test_custom_leaky_relu_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/custom_ops/test_custom_leaky_relu_ipu.py @@ -29,17 +29,18 @@ from op_test_ipu import IPUOpTest, np_dtype_to_fluid_str def load_custom_ops(): # load custom ops cur_dir = os.path.dirname(os.path.realpath(__file__)) - custom_ops = load(name="custom_jit_ops", - sources=[ - f"{cur_dir}/leaky_relu_cpu.cc", - f"{cur_dir}/leaky_relu_ipu.cc", - ], - extra_cxx_cflags=['-DONNX_NAMESPACE=onnx']) + custom_ops = load( + name="custom_jit_ops", + sources=[ + f"{cur_dir}/leaky_relu_cpu.cc", + f"{cur_dir}/leaky_relu_ipu.cc", + ], + extra_cxx_cflags=['-DONNX_NAMESPACE=onnx'], + ) return custom_ops class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -49,8 +50,9 @@ class TestBase(IPUOpTest): def set_feed(self): self.feed = { - "x": np.random.uniform(low=-2, high=2, size=[3, - 5]).astype('float32'), + "x": np.random.uniform(low=-2, high=2, size=[3, 5]).astype( + 'float32' + ), } def set_feed_attr(self): @@ -74,9 +76,11 @@ class TestBase(IPUOpTest): with paddle.static.scope_guard(scope): with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype=self.feed_dtype[0]) + x = paddle.static.data( + name=self.feed_list[0], + shape=self.feed_shape[0], + dtype=self.feed_dtype[0], + ) # custom op out = custom_ops.custom_leaky_relu(x, **self.attrs) fetch_list = [out.name] @@ -96,14 +100,16 @@ class TestBase(IPUOpTest): # add name mapping for paddle custom op and popart custom ops # `paddle_op` was defined in leaky_relu_cpu.cc # `popart_op`, `domain` and `version` was defined in leaky_relu_ipu.cc - ipu_strategy.add_custom_op(paddle_op="custom_leaky_relu", - popart_op="LeakyRelu", - domain='custom.ops', - version=1) + ipu_strategy.add_custom_op( + paddle_op="custom_leaky_relu", + popart_op="LeakyRelu", + domain='custom.ops', + version=1, + ) program = paddle.static.IpuCompiledProgram( - main_prog, scope=scope, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) + main_prog, scope=scope, ipu_strategy=ipu_strategy + ).compile(feed_list, fetch_list) else: program = main_prog @@ -114,10 +120,9 @@ class TestBase(IPUOpTest): res0 = self._test_base(False) res1 = self._test_base(True) - np.testing.assert_allclose(res0.flatten(), - res1.flatten(), - rtol=1e-05, - atol=self.atol) + np.testing.assert_allclose( + res0.flatten(), res1.flatten(), rtol=1e-05, atol=self.atol + ) self.assertTrue(res0.shape == res1.shape) diff --git a/python/paddle/fluid/tests/unittests/ipu/custom_ops/test_custom_ops_ipu.py b/python/paddle/fluid/tests/unittests/ipu/custom_ops/test_custom_ops_ipu.py index 744ea86663a7c86122daf91a7961c85f58b3e52d..9dc405dca1467fba90ce7e0e3b94420ccab8992d 100644 --- a/python/paddle/fluid/tests/unittests/ipu/custom_ops/test_custom_ops_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/custom_ops/test_custom_ops_ipu.py @@ -28,56 +28,67 @@ from op_test_ipu import IPUOpTest # just load one custom-op for the data race issue under parallel mode def load_custom_detach(): cur_dir = os.path.dirname(os.path.realpath(__file__)) - custom_ops = load(name="custom_detach", - sources=[ - f"{cur_dir}/custom_detach.cc", - ], - extra_cxx_cflags=['-DONNX_NAMESPACE=onnx'], - extra_ldflags=['-lpopfloat']) + custom_ops = load( + name="custom_detach", + sources=[ + f"{cur_dir}/custom_detach.cc", + ], + extra_cxx_cflags=['-DONNX_NAMESPACE=onnx'], + extra_ldflags=['-lpopfloat'], + ) return custom_ops def load_custom_identity(): cur_dir = os.path.dirname(os.path.realpath(__file__)) - custom_ops = load(name="custom_identity", - sources=[ - f"{cur_dir}/custom_identity.cc", - ], - extra_cxx_cflags=['-DONNX_NAMESPACE=onnx'], - extra_ldflags=['-lpopfloat']) + custom_ops = load( + name="custom_identity", + sources=[ + f"{cur_dir}/custom_identity.cc", + ], + extra_cxx_cflags=['-DONNX_NAMESPACE=onnx'], + extra_ldflags=['-lpopfloat'], + ) return custom_ops def load_custom_nll(): cur_dir = os.path.dirname(os.path.realpath(__file__)) - custom_ops = load(name="custom_nll", - sources=[ - f"{cur_dir}/custom_nll.cc", - ], - extra_cxx_cflags=['-DONNX_NAMESPACE=onnx'], - extra_ldflags=['-lpopfloat']) + custom_ops = load( + name="custom_nll", + sources=[ + f"{cur_dir}/custom_nll.cc", + ], + extra_cxx_cflags=['-DONNX_NAMESPACE=onnx'], + extra_ldflags=['-lpopfloat'], + ) return custom_ops def build_ipu_strategy(): ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.add_custom_op(paddle_op="custom_detach", - popart_op="Detach", - domain="ai.graphcore", - version=1) - ipu_strategy.add_custom_op(paddle_op="custom_identity", - popart_op="Identity", - domain="ai.onnx", - version=11) - ipu_strategy.add_custom_op(paddle_op="custom_nll", - popart_op="Nll", - domain="ai.graphcore", - version=1) + ipu_strategy.add_custom_op( + paddle_op="custom_detach", + popart_op="Detach", + domain="ai.graphcore", + version=1, + ) + ipu_strategy.add_custom_op( + paddle_op="custom_identity", + popart_op="Identity", + domain="ai.onnx", + version=11, + ) + ipu_strategy.add_custom_op( + paddle_op="custom_nll", + popart_op="Nll", + domain="ai.graphcore", + version=1, + ) return ipu_strategy class TestBase(IPUOpTest): - def setUp(self): self.load_custom_ops() self.set_atol() @@ -107,9 +118,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) out = self.op(x, **self.op_attrs) out = paddle.mean(out) self.fetch_list = [out.name] @@ -127,7 +138,6 @@ class TestBase(IPUOpTest): class TestIdentity(TestBase): - def load_custom_ops(self): self.custom_ops = load_custom_identity() @@ -137,7 +147,6 @@ class TestIdentity(TestBase): class TestNll(TestBase): - def load_custom_ops(self): self.custom_ops = load_custom_nll() @@ -159,12 +168,12 @@ class TestNll(TestBase): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - label = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='int32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + label = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='int32' + ) out = self.op(x, label, **self.op_attrs) out = paddle.mean(out) self.fetch_list = [out.name] diff --git a/python/paddle/fluid/tests/unittests/ipu/distributed/test_dist_data_parallel_ipu.py b/python/paddle/fluid/tests/unittests/ipu/distributed/test_dist_data_parallel_ipu.py index 891aa501c50796c05d04208593adcd1706279c7c..dbeed8e4da5abf1101d116b723bf45237cb8312a 100644 --- a/python/paddle/fluid/tests/unittests/ipu/distributed/test_dist_data_parallel_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/distributed/test_dist_data_parallel_ipu.py @@ -27,7 +27,6 @@ mpi_comm = None @unittest.skip('Disable distributed tests on auto CI.') class TestBase(IPUOpTest): - def set_attrs(self, enable_ipu, optimizer, log, onchip=False, rts=False): self.ipu_options = { "enable_pipelining": True, @@ -38,8 +37,8 @@ class TestBase(IPUOpTest): "replicated_graph_count": 2, "location_optimizer": { "on_chip": onchip, - "use_replicated_tensor_sharding": rts - } + "use_replicated_tensor_sharding": rts, + }, } self.cpu_bs = 16 @@ -63,19 +62,17 @@ class TestBase(IPUOpTest): with paddle.static.scope_guard(scope): with paddle.static.program_guard(main_prog, startup_prog): - image = paddle.static.data(name='image', - shape=[bs, 3, 10, 10], - dtype='float32') + image = paddle.static.data( + name='image', shape=[bs, 3, 10, 10], dtype='float32' + ) with paddle.static.ipu_shard_guard(index=0, stage=0): - conv1 = paddle.static.nn.conv2d(image, - num_filters=3, - filter_size=3, - bias_attr=False) + conv1 = paddle.static.nn.conv2d( + image, num_filters=3, filter_size=3, bias_attr=False + ) with paddle.static.ipu_shard_guard(index=1, stage=1): - conv2 = paddle.static.nn.conv2d(conv1, - num_filters=3, - filter_size=3, - bias_attr=False) + conv2 = paddle.static.nn.conv2d( + conv1, num_filters=3, filter_size=3, bias_attr=False + ) # should consider influence of bs loss = paddle.mean(conv2) @@ -104,28 +101,35 @@ class TestBase(IPUOpTest): ipu_strategy.set_graph_config( num_ipus=2 * self.ipu_options['replicated_graph_count'], is_training=True, - enable_manual_shard=True) + enable_manual_shard=True, + ) ipu_strategy.set_options(self.ipu_options) - ipu_strategy.set_options({ - "enable_distribution": - True, - "enable_distributed_replicated_graphs": - True, - "global_replica_offset": - int(os.environ.get("PADDLE_TRAINER_ID")) * 2, - "global_replication_factor": - 4 - }) + ipu_strategy.set_options( + { + "enable_distribution": True, + "enable_distributed_replicated_graphs": True, + "global_replica_offset": int( + os.environ.get("PADDLE_TRAINER_ID") + ) + * 2, + "global_replication_factor": 4, + } + ) program = paddle.static.IpuCompiledProgram( - main_prog, ipu_strategy=ipu_strategy).compile( - feed_list, fetch_list) + main_prog, ipu_strategy=ipu_strategy + ).compile(feed_list, fetch_list) feed = { - "image": - np.tile(data, [ - self.ipu_options['replicated_graph_count'] * - self.ipu_options['batches_per_step'] * - self.ipu_options['accumulation_factor'], 1, 1, 1 - ]) + "image": np.tile( + data, + [ + self.ipu_options['replicated_graph_count'] + * self.ipu_options['batches_per_step'] + * self.ipu_options['accumulation_factor'], + 1, + 1, + 1, + ], + ) } else: @@ -176,11 +180,13 @@ if __name__ == "__main__": onchip = True if sys.argv[3] == "True" else False rts = True if sys.argv[4] == "True" else False test = TestBase() - test.set_attrs(enable_ipu=True, - optimizer=optimizer, - log=log, - onchip=onchip, - rts=rts) + test.set_attrs( + enable_ipu=True, + optimizer=optimizer, + log=log, + onchip=onchip, + rts=rts, + ) test.test() # Run cpu tests for compare elif len(sys.argv) == 3: diff --git a/python/paddle/fluid/tests/unittests/ipu/distributed/test_dist_pod128_sample.py b/python/paddle/fluid/tests/unittests/ipu/distributed/test_dist_pod128_sample.py index f81ed48f04ffdd97361feb9d452a6ef8cab0f3af..a966f4308228b606071bb675c71f746ea3448221 100644 --- a/python/paddle/fluid/tests/unittests/ipu/distributed/test_dist_pod128_sample.py +++ b/python/paddle/fluid/tests/unittests/ipu/distributed/test_dist_pod128_sample.py @@ -71,43 +71,42 @@ def TestDistTraining(): exe.run(startup_prog) ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(num_ipus=64, - is_training=True, - enable_manual_shard=True) + ipu_strategy.set_graph_config( + num_ipus=64, is_training=True, enable_manual_shard=True + ) ipu_strategy.set_pipelining_config( enable_pipelining=True, batches_per_step=1, enable_gradient_accumulation=True, - accumulation_factor=4) - ipu_strategy.set_options({ - "enable_distribution": - True, - "enable_replicated_graphs": - True, - "replicated_graph_count": - 32, - "enable_distributed_replicated_graphs": - True, - "global_replica_offset": - # Paddle : int(os.environ.get("PADDLE_TRAINER_ID")) * 32 - # PopRun : int(os.environ.get("POPDIST_REPLICA_INDEX_OFFSET")) - int(os.environ.get("PADDLE_TRAINER_ID")) * 32, - "global_replication_factor": - 64, - "location_optimizer": { - "on_chip": False, - "use_replicated_tensor_sharding": True + accumulation_factor=4, + ) + ipu_strategy.set_options( + { + "enable_distribution": True, + "enable_replicated_graphs": True, + "replicated_graph_count": 32, + "enable_distributed_replicated_graphs": True, + "global_replica_offset": + # Paddle : int(os.environ.get("PADDLE_TRAINER_ID")) * 32 + # PopRun : int(os.environ.get("POPDIST_REPLICA_INDEX_OFFSET")) + int(os.environ.get("PADDLE_TRAINER_ID")) * 32, + "global_replication_factor": 64, + "location_optimizer": { + "on_chip": False, + "use_replicated_tensor_sharding": True, + }, } - }) + ) ipu_program = paddle.static.IpuCompiledProgram( - main_prog, ipu_strategy=ipu_strategy) + main_prog, ipu_strategy=ipu_strategy + ) program = ipu_program.compile(feed_list, fetch_list) for i in range(10): - res = exe.run(program, - feed={"x": input_data}, - fetch_list=fetch_list) + res = exe.run( + program, feed={"x": input_data}, fetch_list=fetch_list + ) print("index: {}, result: {}".format(i, res)) diff --git a/python/paddle/fluid/tests/unittests/ipu/distributed/test_dist_sample.py b/python/paddle/fluid/tests/unittests/ipu/distributed/test_dist_sample.py index d42977b5962d3d3a4cab782a85038c937a0b7a5d..a3ffc6694a93efeed9dc6d1e5b1bc21d4d940336 100644 --- a/python/paddle/fluid/tests/unittests/ipu/distributed/test_dist_sample.py +++ b/python/paddle/fluid/tests/unittests/ipu/distributed/test_dist_sample.py @@ -91,57 +91,73 @@ def Test(use_dist, file_name): if use_dist: ipu_strategy.set_graph_config(num_ipus=2, is_training=True) # Set distributed envs - ipu_strategy.set_options({ - "enable_distribution": - True, - "enable_replicated_graphs": - True, - "replicated_graph_count": - 2, - "enable_distributed_replicated_graphs": - True, - "global_replica_offset": - int(os.environ.get("PADDLE_TRAINER_ID")) * 2, - "global_replication_factor": - 4 - }) + ipu_strategy.set_options( + { + "enable_distribution": True, + "enable_replicated_graphs": True, + "replicated_graph_count": 2, + "enable_distributed_replicated_graphs": True, + "global_replica_offset": int( + os.environ.get("PADDLE_TRAINER_ID") + ) + * 2, + "global_replication_factor": 4, + } + ) else: ipu_strategy.set_graph_config(num_ipus=4, is_training=True) - ipu_strategy.set_options({ - "enable_replicated_graphs": True, - "replicated_graph_count": 4, - }) + ipu_strategy.set_options( + { + "enable_replicated_graphs": True, + "replicated_graph_count": 4, + } + ) ipu_program = paddle.static.IpuCompiledProgram( - main_prog, ipu_strategy=ipu_strategy) + main_prog, ipu_strategy=ipu_strategy + ) program = ipu_program.compile(feed_list, fetch_list) if use_dist: if os.environ.get("PADDLE_TRAINER_ID") == "0": - input_data = np.concatenate([ - np.array([[[1], [3]], [[2], [4]], - [[4], [127]]]).astype(np.int32), - np.array([[[1], [3]], [[2], [4]], - [[4], [127]]]).astype(np.int32) - ]) + input_data = np.concatenate( + [ + np.array( + [[[1], [3]], [[2], [4]], [[4], [127]]] + ).astype(np.int32), + np.array( + [[[1], [3]], [[2], [4]], [[4], [127]]] + ).astype(np.int32), + ] + ) else: - input_data = np.concatenate([ - np.array([[[8], [60]], [[50], [77]], - [[90], [13]]]).astype(np.int32), - np.array([[[8], [60]], [[50], [77]], - [[90], [13]]]).astype(np.int32) - ]) + input_data = np.concatenate( + [ + np.array( + [[[8], [60]], [[50], [77]], [[90], [13]]] + ).astype(np.int32), + np.array( + [[[8], [60]], [[50], [77]], [[90], [13]]] + ).astype(np.int32), + ] + ) else: - input_data = np.concatenate([ - np.array([[[1], [3]], [[2], [4]], - [[4], [127]]]).astype(np.int32), - np.array([[[1], [3]], [[2], [4]], - [[4], [127]]]).astype(np.int32), - np.array([[[8], [60]], [[50], [77]], - [[90], [13]]]).astype(np.int32), - np.array([[[8], [60]], [[50], [77]], - [[90], [13]]]).astype(np.int32) - ]) + input_data = np.concatenate( + [ + np.array([[[1], [3]], [[2], [4]], [[4], [127]]]).astype( + np.int32 + ), + np.array([[[1], [3]], [[2], [4]], [[4], [127]]]).astype( + np.int32 + ), + np.array( + [[[8], [60]], [[50], [77]], [[90], [13]]] + ).astype(np.int32), + np.array( + [[[8], [60]], [[50], [77]], [[90], [13]]] + ).astype(np.int32), + ] + ) feed_data = {"x": input_data} for step in range(10): diff --git a/python/paddle/fluid/tests/unittests/ipu/op_test_ipu.py b/python/paddle/fluid/tests/unittests/ipu/op_test_ipu.py index 9cb70d59d3e5931091e83f1f76d673f8aa77c11d..d518dec3eb7ab0cf034cd83aef5b67d79ef0bc71 100644 --- a/python/paddle/fluid/tests/unittests/ipu/op_test_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/op_test_ipu.py @@ -55,7 +55,6 @@ class ExecutionMode(IntEnum): class IPUTest(unittest.TestCase): - @classmethod def setUpClass(cls): # Get random seeds @@ -84,10 +83,10 @@ class IPUTest(unittest.TestCase): return True -@unittest.skipIf(not paddle.is_compiled_with_ipu(), - "core is not compiled with IPU") +@unittest.skipIf( + not paddle.is_compiled_with_ipu(), "core is not compiled with IPU" +) class IPUD2STest(IPUTest): - @classmethod def setUpClass(cls): super().setUpClass() @@ -101,11 +100,11 @@ class IPUD2STest(IPUTest): paddle.framework.core.IpuBackend.get_instance().reset() -@unittest.skipIf(not paddle.is_compiled_with_ipu(), - "core is not compiled with IPU") +@unittest.skipIf( + not paddle.is_compiled_with_ipu(), "core is not compiled with IPU" +) class IPUOpTest(IPUTest): - """Base Class for single op unit tests using static graph on IPU. - """ + """Base Class for single op unit tests using static graph on IPU.""" @classmethod def setUpClass(cls): @@ -159,7 +158,6 @@ class IPUOpTest(IPUTest): # Decorator for static graph building def static_graph(builder): - def wrapper(self, *args, **kwargs): self.scope = paddle.static.Scope() self.main_prog = paddle.static.Program() @@ -168,9 +166,11 @@ class IPUOpTest(IPUTest): self.startup_prog.random_seed = self.SEED with paddle.static.scope_guard(self.scope): with paddle.utils.unique_name.guard( - paddle.utils.unique_name.generate('')): - with paddle.static.program_guard(self.main_prog, - self.startup_prog): + paddle.utils.unique_name.generate('') + ): + with paddle.static.program_guard( + self.main_prog, self.startup_prog + ): builder(self, *args, **kwargs) return wrapper @@ -181,11 +181,11 @@ class IPUOpTest(IPUTest): amp_list = paddle.static.amp.CustomOpLists() amp_list.unsupported_list = {'scale'} to_fp16_var_names = paddle.static.amp.cast_model_to_fp16( - main_program, amp_list, use_fp16_guard=False) + main_program, amp_list, use_fp16_guard=False + ) paddle.static.amp.cast_parameters_to_fp16( - paddle.CPUPlace(), - main_program, - to_fp16_var_names=to_fp16_var_names) + paddle.CPUPlace(), main_program, to_fp16_var_names=to_fp16_var_names + ) def run_op_test(self, exec_mode, ipu_strategy=None): # NOTE: some op has no inputs @@ -208,14 +208,12 @@ class IPUOpTest(IPUTest): # TODO(ipu) remove in the future version of popart # keep the log clean, no side effects for tests without profiling ipu_strategy.set_options( - {'engine_options': { - 'debug.retainDebugInformation': 'false' - }}) + {'engine_options': {'debug.retainDebugInformation': 'false'}} + ) program = paddle.static.IpuCompiledProgram( - self.main_prog, - ipu_strategy=ipu_strategy).compile(self.feed_list, - self.fetch_list) + self.main_prog, ipu_strategy=ipu_strategy + ).compile(self.feed_list, self.fetch_list) else: program = self.main_prog @@ -226,9 +224,9 @@ class IPUOpTest(IPUTest): if self.is_training: result = [] for _ in range(self.epoch): - loss_res = exe.run(program, - feed=feed, - fetch_list=self.fetch_list) + loss_res = exe.run( + program, feed=feed, fetch_list=self.fetch_list + ) result.append(loss_res) else: result = exe.run(program, feed=feed, fetch_list=self.fetch_list) @@ -253,16 +251,16 @@ class IPUOpTest(IPUTest): for cpu_fp32_res, ipu_fp32_res in zip(cpu_fp32, ipu_fp32): cpu_fp32_res = np.asarray(cpu_fp32_res).astype(np.float32).flatten() ipu_fp32_res = np.asarray(ipu_fp32_res).astype(np.float32).flatten() - pass_check = np.allclose(ipu_fp32_res, - cpu_fp32_res, - rtol=self.rtol, - atol=self.atol) + pass_check = np.allclose( + ipu_fp32_res, cpu_fp32_res, rtol=self.rtol, atol=self.atol + ) if not pass_check: max_atol = np.abs(ipu_fp32_res - cpu_fp32_res).max() cpu_fp32_abs = np.abs(cpu_fp32_res) cpu_fp32_abs[cpu_fp32_abs == 0.0] = 1e-20 - max_rtol = (np.abs(ipu_fp32_res - cpu_fp32_res) / - cpu_fp32_abs).max() + max_rtol = ( + np.abs(ipu_fp32_res - cpu_fp32_res) / cpu_fp32_abs + ).max() raise AssertionError( f"ipu_fp32 check failed. max_atol is {max_atol}, max_rtol is {max_rtol}" ) @@ -274,22 +272,28 @@ class IPUOpTest(IPUTest): ipu_fp16 = output_dict[ExecutionMode.IPU_FP16] if len(cpu_fp32) != len(ipu_fp16): raise ValueError( - "different outputs number between ipu and cpu.") + "different outputs number between ipu and cpu." + ) for cpu_fp32_res, ipu_fp16_res in zip(cpu_fp32, ipu_fp16): - cpu_fp32_res = np.asarray(cpu_fp32_res).astype( - np.float32).flatten() - ipu_fp16_res = np.asarray(ipu_fp16_res).astype( - np.float32).flatten() - pass_check = np.allclose(ipu_fp16_res, - cpu_fp32_res, - rtol=self.rtol_fp16, - atol=self.atol_fp16) + cpu_fp32_res = ( + np.asarray(cpu_fp32_res).astype(np.float32).flatten() + ) + ipu_fp16_res = ( + np.asarray(ipu_fp16_res).astype(np.float32).flatten() + ) + pass_check = np.allclose( + ipu_fp16_res, + cpu_fp32_res, + rtol=self.rtol_fp16, + atol=self.atol_fp16, + ) if not pass_check: max_atol = np.abs(ipu_fp16_res - cpu_fp32_res).max() cpu_fp32_abs = np.abs(cpu_fp32_res) cpu_fp32_abs[cpu_fp32_abs == 0.0] = 1e-20 - max_rtol = (np.abs(ipu_fp16_res - cpu_fp32_res) / - cpu_fp32_abs).max() + max_rtol = ( + np.abs(ipu_fp16_res - cpu_fp32_res) / cpu_fp32_abs + ).max() raise AssertionError( f"ipu_fp16 check failed. max_atol is {max_atol}, max_rtol is {max_rtol}" ) diff --git a/python/paddle/fluid/tests/unittests/ipu/test_activation_ops_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_activation_ops_ipu.py index 97ee7a45e001ca118cf7b55d613722cda2f97fd7..672195469d2b23555af9c836442d69a63c2da943 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_activation_ops_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_activation_ops_ipu.py @@ -22,7 +22,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_test_op() @@ -47,9 +46,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) out = self.op(x, **self.op_attrs) self.fetch_list = [out.name] @@ -65,7 +64,6 @@ class TestBase(IPUOpTest): class TestBReluCase0(TestBase): - def set_data_feed(self): data = np.random.uniform(size=[1, 3, 10, 10]) * 30 self.feed_fp32 = {'in_0': data.astype(np.float32)} @@ -78,35 +76,30 @@ class TestBReluCase0(TestBase): class TestBReluCase1(TestBReluCase0): - def set_test_op(self): self.op = paddle.fluid.layers.brelu self.op_attrs = {"t_min": 0.1, 't_max': 10.0} class TestEluCase1(TestBase): - def set_test_op(self): self.op = F.elu self.op_attrs = {"alpha": 0.3} class TestHardShrinkCase0(TestBase): - def set_test_op(self): self.op = F.hardshrink self.op_attrs = {} class TestHardSigmoidCase0(TestBase): - def set_test_op(self): self.op = F.hardsigmoid self.op_attrs = {} class TestHardSigmoidCase1(TestBase): - def set_test_op(self): self.op = F.hardsigmoid self.op_attrs = { @@ -116,126 +109,108 @@ class TestHardSigmoidCase1(TestBase): class TestHardSwishCase0(TestBase): - def set_test_op(self): self.op = F.hardswish self.op_attrs = {} class TestLeakyReluCase0(TestBase): - def set_test_op(self): self.op = F.leaky_relu self.op_attrs = {} class TestLeakyReluCase1(TestBase): - def set_test_op(self): self.op = F.leaky_relu self.op_attrs = {'negative_slope': 0.2333} class TestLog10Case0(TestBase): - def set_test_op(self): self.op = paddle.log10 self.op_attrs = {} class TestLog1pCase0(TestBase): - def set_test_op(self): self.op = paddle.log1p self.op_attrs = {} class TestLog2Case0(TestBase): - def set_test_op(self): self.op = paddle.log2 self.op_attrs = {} class TestLogSigmoidCase0(TestBase): - def set_test_op(self): self.op = F.log_sigmoid self.op_attrs = {} class TestLogSoftmaxCase0(TestBase): - def set_test_op(self): self.op = F.log_softmax self.op_attrs = {} class TestMishCase0(TestBase): - def set_test_op(self): self.op = F.mish self.op_attrs = {} class TestRelu6Case0(TestBase): - def set_test_op(self): self.op = F.relu6 self.op_attrs = {} class TestRsqrtCase0(TestBase): - def set_test_op(self): self.op = paddle.rsqrt self.op_attrs = {} class TestSeluCase0(TestBase): - def set_test_op(self): self.op = F.selu self.op_attrs = {} class TestSiluCase0(TestBase): - def set_test_op(self): self.op = F.silu self.op_attrs = {} class TestSoftShrinkCase0(TestBase): - def set_test_op(self): self.op = F.softshrink self.op_attrs = {} class TestSoftShrinkCase1(TestBase): - def set_test_op(self): self.op = F.softshrink self.op_attrs = {'threshold': 0.2333} class TestSquareCase0(TestBase): - def set_test_op(self): self.op = paddle.square self.op_attrs = {} class TestSwishCase0(TestBase): - def set_test_op(self): self.op = F.swish self.op_attrs = {} class TestTanhShrinkCase0(TestBase): - def set_atol(self): super().set_atol() self.atol = 1e-7 @@ -246,14 +221,12 @@ class TestTanhShrinkCase0(TestBase): class TestThresholdedReluCase0(TestBase): - def set_test_op(self): self.op = F.thresholded_relu self.op_attrs = {} class TestThresholdedReluCase1(TestBase): - def set_test_op(self): self.op = F.thresholded_relu self.op_attrs = {'threshold': 0.2333} diff --git a/python/paddle/fluid/tests/unittests/ipu/test_affine_channel_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_affine_channel_op_ipu.py index 836b99099ffe07085438656d61972626434b823c..0e76225c5618d98b958ef488786c8250aa1ad474 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_affine_channel_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_affine_channel_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -48,16 +47,18 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - data = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + data = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) input_scale = paddle.fluid.layers.create_parameter( - shape=[self.feed_shape[0][1]], dtype="float32") + shape=[self.feed_shape[0][1]], dtype="float32" + ) input_bias = paddle.fluid.layers.create_parameter( - shape=[self.feed_shape[0][1]], dtype="float32") - out = paddle.fluid.layers.affine_channel(data, - scale=input_scale, - bias=input_bias) + shape=[self.feed_shape[0][1]], dtype="float32" + ) + out = paddle.fluid.layers.affine_channel( + data, scale=input_scale, bias=input_bias + ) self.fetch_list = [out.name] def run_model(self, exec_mode): @@ -72,7 +73,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_data_feed(self): data = np.random.uniform(size=[2, 4, 64, 64]) self.feed_fp32 = {'data': data.astype(np.float32)} @@ -81,7 +81,6 @@ class TestCase1(TestBase): @unittest.skip("Only support NCHW") class TestNHWC(TestBase): - def set_op_attrs(self): self.attrs = {} self.attrs['data_layout'] = 'NHWC' diff --git a/python/paddle/fluid/tests/unittests/ipu/test_arg_max_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_arg_max_op_ipu.py index 078e744ae507d3e8b54b527f2e1e7a6da2607831..181ae2bfbc7590ba33dcd260d34789183239deb7 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_arg_max_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_arg_max_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -44,9 +43,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) out = paddle.fluid.layers.argmax(x, **self.attrs) self.fetch_list = [out.name] @@ -64,7 +63,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = {"axis": 0} diff --git a/python/paddle/fluid/tests/unittests/ipu/test_arg_min_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_arg_min_op_ipu.py index 30c604901e877b75c2d3a48d7ac11d7beeff0adc..9c1de175dafd30ceef4453637757d44df08b1f86 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_arg_min_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_arg_min_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -44,9 +43,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) out = paddle.fluid.layers.argmin(x, **self.attrs) self.fetch_list = [out.name] @@ -64,7 +63,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = {"axis": 0} diff --git a/python/paddle/fluid/tests/unittests/ipu/test_argsort_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_argsort_op_ipu.py index 3f19da43c71c35adc102e14ccf1dcb421e8f19b7..0f6c28c82cdb4888f75c455b136f55955d5c2ce2 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_argsort_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_argsort_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -47,9 +46,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) out, _ = paddle.fluid.layers.argsort(x, **self.attrs) self.fetch_list = [out.name] @@ -67,7 +66,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = { 'axis': 0, @@ -76,7 +74,6 @@ class TestCase1(TestBase): class TestCase2(TestBase): - def set_op_attrs(self): self.attrs = { 'axis': 1, diff --git a/python/paddle/fluid/tests/unittests/ipu/test_assign_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_assign_op_ipu.py index 93cdaf018b400dc8557c70d6f516a3801c1fb2cc..08b40fc23c8032e9360333a5ff81962c0ef3a587 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_assign_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_assign_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -39,9 +38,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) x = paddle.assign(x) out = paddle.fluid.layers.elementwise_add(x, x) self.fetch_list = [out.name] @@ -58,7 +57,6 @@ class TestBase(IPUOpTest): class TestAssignFp32Value(TestBase): - def set_data_feed(self): data = np.random.uniform(size=[2, 3, 1]) self.feed_fp32 = {'in_0': data.astype(np.float32)} @@ -69,16 +67,15 @@ class TestAssignFp32Value(TestBase): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) assign = paddle.assign(self.assign_fp32) out = paddle.fluid.layers.elementwise_add(x, assign) self.fetch_list = [out.name] class TestAssignBoolValue(TestBase): - def set_data_feed(self): data = np.random.uniform(size=[2, 3, 1]) self.feed_fp32 = {'in_0': data.astype(np.float32)} @@ -88,9 +85,9 @@ class TestAssignBoolValue(TestBase): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) x = paddle.less_than(x, x) assign = paddle.assign(self.assign_bool) x = paddle.logical_and(x, assign) diff --git a/python/paddle/fluid/tests/unittests/ipu/test_avg_shard_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_avg_shard_ipu.py index a3be5458ad83f14cfe90e660d53de750d4bd9ef5..1834104606e4c354a144e764aca27ffd6fc00fea 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_avg_shard_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_avg_shard_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -45,25 +44,21 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - x = paddle.static.nn.conv2d(x, - num_filters=3, - filter_size=3, - bias_attr=False) - x = paddle.static.nn.conv2d(x, - num_filters=3, - filter_size=3, - bias_attr=False) - x = paddle.static.nn.conv2d(x, - num_filters=3, - filter_size=3, - bias_attr=False) - x = paddle.static.nn.conv2d(x, - num_filters=3, - filter_size=3, - bias_attr=False) + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + x = paddle.static.nn.conv2d( + x, num_filters=3, filter_size=3, bias_attr=False + ) + x = paddle.static.nn.conv2d( + x, num_filters=3, filter_size=3, bias_attr=False + ) + x = paddle.static.nn.conv2d( + x, num_filters=3, filter_size=3, bias_attr=False + ) + x = paddle.static.nn.conv2d( + x, num_filters=3, filter_size=3, bias_attr=False + ) self.fetch_list = [x.name] def run_model(self, exec_mode): diff --git a/python/paddle/fluid/tests/unittests/ipu/test_batch_norm_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_batch_norm_op_ipu.py index 08e5049a790ebfed3f4f543b94f893bd8c6cba41..f8a45e0bd2ab34bb4e5f0bcaeaab12bbe27de0e5 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_batch_norm_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_batch_norm_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -52,13 +51,12 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - x = paddle.static.nn.conv2d(x, - num_filters=3, - filter_size=3, - bias_attr=False) + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + x = paddle.static.nn.conv2d( + x, num_filters=3, filter_size=3, bias_attr=False + ) x = paddle.fluid.layers.batch_norm(x, **self.attrs) self.fetch_list = [x.name] @@ -74,7 +72,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_atol(self): self.atol = 1e-6 self.rtol = 1e-6 @@ -89,7 +86,6 @@ class TestCase1(TestBase): class TestCase2(TestBase): - def set_atol(self): self.atol = 1e-6 self.rtol = 1e-6 diff --git a/python/paddle/fluid/tests/unittests/ipu/test_binary_cross_entropy_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_binary_cross_entropy_op_ipu.py index 113412b8341105cb797d3093823ee1cc41269145..caf28126d226941a60e9a0991cc473af1a22a05a 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_binary_cross_entropy_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_binary_cross_entropy_op_ipu.py @@ -22,7 +22,6 @@ import paddle.nn.functional as F class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -35,11 +34,11 @@ class TestBase(IPUOpTest): target = np.random.uniform(size=[3, 4, 2, 2]) self.feed_fp32 = { "x": x.astype(np.float32), - "target": target.astype(np.float32) + "target": target.astype(np.float32), } self.feed_fp16 = { "x": x.astype(np.float16), - "target": target.astype(np.float16) + "target": target.astype(np.float16), } def set_feed_attr(self): @@ -53,12 +52,12 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self, on_ipu): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype="float32") - target = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32" + ) + target = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32' + ) out = F.binary_cross_entropy(x, target, **self.attrs) self.fetch_list = [out.name] @@ -74,7 +73,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = { 'reduction': 'sum', @@ -82,7 +80,6 @@ class TestCase1(TestBase): class TestCase2(TestBase): - def set_op_attrs(self): self.attrs = { 'reduction': 'none', diff --git a/python/paddle/fluid/tests/unittests/ipu/test_bmm_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_bmm_op_ipu.py index 8ea20cebf07a3d6022c6d10a47ffecec6a4f767e..dd1d99a8748c2e3c7575c8e73c85a78754a0010a 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_bmm_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_bmm_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -45,12 +44,12 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - y = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + y = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32' + ) out = paddle.bmm(x, y, **self.attrs) self.fetch_list = [out.name] diff --git a/python/paddle/fluid/tests/unittests/ipu/test_cast_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_cast_op_ipu.py index 675489d1e4cd598d5eb0a3b3d2b56c2f8648ff97..45b191afed1ec9aac8cf6c9dac2be07f6277137c 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_cast_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_cast_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -48,9 +47,11 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype=self.feed_dtype[0]) + x = paddle.static.data( + name=self.feed_list[0], + shape=self.feed_shape[0], + dtype=self.feed_dtype[0], + ) out = paddle.cast(x, **self.attrs) self.fetch_list = [out.name] @@ -66,7 +67,6 @@ class TestBase(IPUOpTest): class TestEnableFp16(TestBase): - @property def fp16_enabled(self): return True @@ -85,7 +85,6 @@ class TestEnableFp16(TestBase): class TestCase2(TestBase): - def set_atol(self): super().set_atol() self.atol = 1e-3 @@ -102,7 +101,6 @@ class TestCase2(TestBase): class TestCase3(TestBase): - def set_data_feed(self): self.feed_fp32 = { "x": np.random.uniform(size=[1, 3, 3, 3]).astype('float32'), @@ -114,7 +112,6 @@ class TestCase3(TestBase): class TestCase4(TestBase): - def set_data_feed(self): self.feed_fp32 = { "x": np.random.uniform(size=[1, 3, 3, 3]).astype('int32'), @@ -126,7 +123,6 @@ class TestCase4(TestBase): class TestCase5(TestBase): - def set_data_feed(self): self.feed_fp32 = { "x": np.random.uniform(size=[1, 3, 3, 3]).astype('float16'), @@ -138,7 +134,6 @@ class TestCase5(TestBase): class TestCase6(TestBase): - def set_data_feed(self): self.feed_fp32 = { "x": np.random.uniform(size=[1, 3, 3, 3]).astype('int32'), @@ -151,7 +146,6 @@ class TestCase6(TestBase): @unittest.skip('float64 is not supported') class TestCase7(TestBase): - def set_op_attrs(self): self.attrs = {} self.attrs['dtype'] = 'float64' @@ -159,7 +153,6 @@ class TestCase7(TestBase): @unittest.skip('skip float16 to float32') class TestCase8(TestBase): - def set_data_feed(self): self.feed_fp32 = { "x": np.random.uniform(size=[1, 3, 3, 3]).astype('float16'), @@ -172,16 +165,15 @@ class TestCase8(TestBase): @unittest.skip('int32 to int8 is not supported') class TestCase9(TestBase): - def set_atol(self): super().set_atol() self.atol = 1 def set_data_feed(self): self.feed_fp32 = { - "x": - np.random.randint(low=1, high=100, size=[1, 3, 3, - 3]).astype('int32'), + "x": np.random.randint(low=1, high=100, size=[1, 3, 3, 3]).astype( + 'int32' + ), } def set_op_attrs(self): diff --git a/python/paddle/fluid/tests/unittests/ipu/test_clip_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_clip_op_ipu.py index a221ad617671d75d544a9b1240f3678efe35c2d2..a570024eb70246e029846f729c4b720986fd07b6 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_clip_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_clip_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -48,9 +47,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) x = paddle.clip(x, **self.attrs) self.fetch_list = [x.name] @@ -66,111 +65,96 @@ class TestBase(IPUOpTest): class TestNoMin(TestBase): - def set_op_attrs(self): self.attrs = {} self.attrs['max'] = 3.4 class TestNoMax(TestBase): - def set_op_attrs(self): self.attrs = {} self.attrs['min'] = 0.1 class TestNoMinNoMax(TestBase): - def set_op_attrs(self): self.attrs = {} class TestMinMaxTensor(TestBase): - @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - - min = paddle.fluid.layers.fill_constant(name="min", - shape=[1], - dtype='float32', - value=0.1) - max = paddle.fluid.layers.fill_constant(name="max", - shape=[1], - dtype='float32', - value=3.4) + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + + min = paddle.fluid.layers.fill_constant( + name="min", shape=[1], dtype='float32', value=0.1 + ) + max = paddle.fluid.layers.fill_constant( + name="max", shape=[1], dtype='float32', value=3.4 + ) x = paddle.clip(x, min=min, max=max) self.fetch_list = [x.name] class TestMinTensor(TestBase): - @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - - min = paddle.fluid.layers.fill_constant(name="min", - shape=[1], - dtype='float32', - value=0.1) + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + + min = paddle.fluid.layers.fill_constant( + name="min", shape=[1], dtype='float32', value=0.1 + ) x = paddle.clip(x, min=min) self.fetch_list = [x.name] class TestMaxTensor(TestBase): - @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - - max = paddle.fluid.layers.fill_constant(name="max", - shape=[1], - dtype='float32', - value=3.4) + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + + max = paddle.fluid.layers.fill_constant( + name="max", shape=[1], dtype='float32', value=3.4 + ) x = paddle.clip(x, max=max) self.fetch_list = [x.name] class TestCombine1(TestBase): - @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - - min = paddle.fluid.layers.fill_constant(name="min", - shape=[1], - dtype='float32', - value=0.1) + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + + min = paddle.fluid.layers.fill_constant( + name="min", shape=[1], dtype='float32', value=0.1 + ) x = paddle.clip(x, min=min, max=3.4) self.fetch_list = [x.name] class TestCombine2(TestBase): - @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - - max = paddle.fluid.layers.fill_constant(name="max", - shape=[1], - dtype='float32', - value=3.4) + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + + max = paddle.fluid.layers.fill_constant( + name="max", shape=[1], dtype='float32', value=3.4 + ) x = paddle.clip(x, min=0.1, max=max) self.fetch_list = [x.name] class TestIntInput(TestBase): - def set_feed(self): data = np.random.uniform(size=[5, 5]) self.feed_fp32 = {'x': data.astype(np.int32)} @@ -180,16 +164,15 @@ class TestIntInput(TestBase): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='int32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='int32' + ) x = paddle.clip(x, min=0.1, max=3.4) self.fetch_list = [x.name] class TestIntMinMax(TestBase): - def set_feed(self): data = np.random.uniform(size=[5, 5]) self.feed_fp32 = {'x': data.astype(np.int32)} @@ -199,17 +182,15 @@ class TestIntMinMax(TestBase): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='int32') - min = paddle.fluid.layers.fill_constant(name="min", - shape=[1], - dtype='int32', - value=1) - max = paddle.fluid.layers.fill_constant(name="max", - shape=[1], - dtype='int32', - value=3) + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='int32' + ) + min = paddle.fluid.layers.fill_constant( + name="min", shape=[1], dtype='int32', value=1 + ) + max = paddle.fluid.layers.fill_constant( + name="max", shape=[1], dtype='int32', value=3 + ) x = paddle.clip(x, min=min, max=max) self.fetch_list = [x.name] diff --git a/python/paddle/fluid/tests/unittests/ipu/test_concat_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_concat_op_ipu.py index 733a5291cf50bd52c76c8ffa7ca1d527c569014c..588cae77fec333546881767a877b0f2c16109038 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_concat_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_concat_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -34,11 +33,11 @@ class TestBase(IPUOpTest): data2 = np.random.uniform(size=[1, 3, 10, 10]) self.feed_fp32 = { 'x': data1.astype(np.float32), - 'y': data2.astype(np.float32) + 'y': data2.astype(np.float32), } self.feed_fp16 = { 'x': data1.astype(np.float16), - 'y': data2.astype(np.float16) + 'y': data2.astype(np.float16), } def set_feed_attr(self): @@ -50,12 +49,12 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - y = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + y = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32' + ) out = paddle.fluid.layers.concat([x, y], **self.attrs) self.fetch_list = [out.name] @@ -71,7 +70,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = {"axis": 1} diff --git a/python/paddle/fluid/tests/unittests/ipu/test_conv2d_transpose_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_conv2d_transpose_op_ipu.py index 824b6b628defac6a53520aea06bcc68e0e55741f..af1e74bfd124c6e528269c0d5f2bd3e5e0c68c65 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_conv2d_transpose_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_conv2d_transpose_op_ipu.py @@ -21,7 +21,6 @@ from op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -52,9 +51,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) x = paddle.static.nn.conv2d_transpose(x, **self.attrs) self.fetch_list = [x.name] @@ -70,7 +69,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): super().set_op_attrs() self.attrs['stride'] = 2 @@ -78,7 +76,6 @@ class TestCase1(TestBase): @unittest.skip("Only support dilation=1") class TestCase2(TestBase): - def set_op_attrs(self): super().set_op_attrs() self.attrs['stride'] = 2 @@ -86,21 +83,18 @@ class TestCase2(TestBase): class TestCase3(TestBase): - def set_op_attrs(self): super().set_op_attrs() self.attrs['padding'] = 2 class TestCase4(TestBase): - def set_op_attrs(self): super().set_op_attrs() self.attrs['padding'] = "SAME" class TestCase5(TestBase): - def set_op_attrs(self): super().set_op_attrs() self.attrs['stride'] = 2 @@ -108,14 +102,12 @@ class TestCase5(TestBase): class TestCase6(TestBase): - def set_op_attrs(self): super().set_op_attrs() self.attrs['padding'] = "VALID" class TestCase7(TestBase): - def set_op_attrs(self): super().set_op_attrs() self.attrs['padding'] = "VALID" @@ -123,7 +115,6 @@ class TestCase7(TestBase): class TestCase8(TestBase): - def set_op_attrs(self): super().set_op_attrs() self.attrs['filter_size'] = 4 @@ -149,7 +140,6 @@ class TestCase10(TestBase): class TestCase11(TestBase): - def set_op_attrs(self): super().set_op_attrs() self.attrs['groups'] = 3 @@ -157,17 +147,16 @@ class TestCase11(TestBase): # depthwise_conv2d_transpose Op class TestCase12(TestBase): - def set_feed(self): data = np.random.uniform(size=[1, 3, 10, 10]) weight = np.random.uniform(size=[3, 1, 3, 3]) self.feed_fp32 = { 'in_0': data.astype(np.float32), - 'in_1': weight.astype(np.float32) + 'in_1': weight.astype(np.float32), } self.feed_fp16 = { 'in_0': data.astype(np.float16), - 'in_1': weight.astype(np.float16) + 'in_1': weight.astype(np.float16), } self.feed_shape = [x.shape for x in self.feed_fp32.values()] self.feed_list = list(self.feed_fp32.keys()) @@ -178,12 +167,12 @@ class TestCase12(TestBase): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - weight = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + weight = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32' + ) x = paddle.nn.functional.conv2d_transpose(x, weight, **self.attrs) self.fetch_list = [x.name] diff --git a/python/paddle/fluid/tests/unittests/ipu/test_conv_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_conv_op_ipu.py index 076d51a5e6713de33cadc637e786a7039341a4a9..89474001e781a2766fbe0c3c84411fd13128d71a 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_conv_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_conv_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -53,9 +52,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) x = paddle.fluid.layers.conv2d(x, **self.attrs) self.fetch_list = [x.name] @@ -71,35 +70,30 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): super().set_op_attrs() self.attrs['num_filters'] = 1 class TestCase2(TestBase): - def set_op_attrs(self): super().set_op_attrs() self.attrs['filter_size'] = [3, 3] class TestCase2_1(TestBase): - def set_op_attrs(self): super().set_op_attrs() self.attrs['filter_size'] = [3, 2] class TestCase3(TestBase): - def set_op_attrs(self): super().set_op_attrs() self.attrs['stride'] = [2, 3] class TestCase4(TestBase): - def set_op_attrs(self): super().set_op_attrs() self.attrs['dilation'] = [2, 2] @@ -113,21 +107,18 @@ class TestCase5(TestBase): class TestCase6(TestBase): - def set_op_attrs(self): super().set_op_attrs() self.attrs['padding'] = 2 class TestCase7(TestBase): - def set_op_attrs(self): super().set_op_attrs() self.attrs['padding'] = [2, 3] class TestCase8(TestBase): - def set_op_attrs(self): super().set_op_attrs() self.attrs['padding'] = [1, 2, 2, 3] @@ -135,17 +126,16 @@ class TestCase8(TestBase): # depthwise_conv2d Op class TestCase9(TestBase): - def set_feed(self): data = np.random.uniform(size=[1, 3, 10, 10]) weight = np.random.uniform(size=[3, 1, 3, 3]) self.feed_fp32 = { 'in_0': data.astype(np.float32), - 'in_1': weight.astype(np.float32) + 'in_1': weight.astype(np.float32), } self.feed_fp16 = { 'in_0': data.astype(np.float16), - 'in_1': weight.astype(np.float16) + 'in_1': weight.astype(np.float16), } self.feed_shape = [x.shape for x in self.feed_fp32.values()] self.feed_list = list(self.feed_fp32.keys()) @@ -156,12 +146,12 @@ class TestCase9(TestBase): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - weight = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + weight = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32' + ) x = paddle.nn.functional.conv2d(x, weight, **self.attrs) self.fetch_list = [x.name] diff --git a/python/paddle/fluid/tests/unittests/ipu/test_cross_entropy2_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_cross_entropy2_op_ipu.py index 92cf442fe27cc67d9439f3e6ad25a795b0dba060..b7db271775dbde342f2992242e82869c8e6c75f5 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_cross_entropy2_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_cross_entropy2_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -34,11 +33,11 @@ class TestBase(IPUOpTest): label = np.arange(3).reshape([3, 1]) self.feed_fp32 = { "x": x.astype(np.float32), - "label": label.astype(np.int64) + "label": label.astype(np.int64), } self.feed_fp16 = { "x": x.astype(np.float16), - "label": label.astype(np.int32) + "label": label.astype(np.int32), } def set_feed_attr(self): @@ -52,20 +51,20 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self, on_ipu): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype="float32") + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32" + ) if on_ipu: - label = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='int32') + label = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='int32' + ) else: - label = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='int64') - out = paddle.fluid.layers.cross_entropy(input=x, - label=label, - **self.attrs) + label = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='int64' + ) + out = paddle.fluid.layers.cross_entropy( + input=x, label=label, **self.attrs + ) self.fetch_list = [out.name] def run_model(self, exec_mode): @@ -82,7 +81,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = { 'soft_label': False, @@ -91,23 +89,21 @@ class TestCase1(TestBase): class TestCase2(TestBase): - def set_data_feed(self): x = np.random.uniform(size=[30, 70]) label = np.arange(30).reshape([30, 1]) self.feed_fp32 = { "x": x.astype(np.float32), - "label": label.astype(np.int64) + "label": label.astype(np.int64), } self.feed_fp16 = { "x": x.astype(np.float16), - "label": label.astype(np.int32) + "label": label.astype(np.int32), } @unittest.skip("soft_label=True is not supported") class TestCase3(TestBase): - def set_op_attrs(self): self.attrs = { 'soft_label': True, @@ -115,32 +111,30 @@ class TestCase3(TestBase): class TestCase4(TestBase): - def set_data_feed(self): x = np.random.uniform(size=[3, 5, 7]) label = np.random.randint(0, 7, [3, 5, 1], dtype='int64') self.feed_fp32 = { "x": x.astype(np.float32), - "label": label.astype(np.int64) + "label": label.astype(np.int64), } self.feed_fp16 = { "x": x.astype(np.float16), - "label": label.astype(np.int32) + "label": label.astype(np.int32), } class TestCase5(TestBase): - def set_data_feed(self): x = np.random.uniform(size=[3, 5, 6, 7]) label = np.random.randint(0, 7, [3, 5, 6], dtype='int64') self.feed_fp32 = { "x": x.astype(np.float32), - "label": label.astype(np.int64) + "label": label.astype(np.int64), } self.feed_fp16 = { "x": x.astype(np.float16), - "label": label.astype(np.int32) + "label": label.astype(np.int32), } diff --git a/python/paddle/fluid/tests/unittests/ipu/test_cumsum_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_cumsum_op_ipu.py index 5f859b064feac5a2774c9cb44ea39682c387d311..d31a78e1230d9b8206bd5ed9bbb537937a939cbb 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_cumsum_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_cumsum_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -49,9 +48,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype="float32") + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32" + ) out = paddle.fluid.layers.cumsum(x, **self.attrs) self.fetch_list = [out.name] @@ -67,49 +66,44 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = {"exclusive": True, "reverse": False} class TestCase2(TestBase): - def set_op_attrs(self): self.attrs = {"exclusive": False, "reverse": True} class TestCase3(TestBase): - def set_op_attrs(self): self.attrs = {"exclusive": True, "reverse": True} class TestCase4(TestBase): - def set_data_feed(self): x = np.random.uniform(size=[1, 128]) self.feed_fp32 = {"x": x.astype(np.int32)} @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype="int32") + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype="int32" + ) out = paddle.fluid.layers.cumsum(x, **self.attrs) self.fetch_list = [out.name] class TestCase5(TestBase): - def set_data_feed(self): x = np.random.uniform(size=[1, 128]) self.feed_fp32 = {"x": x.astype(np.int64)} @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype="int64") + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype="int64" + ) out = paddle.fluid.layers.cumsum(x, **self.attrs) self.fetch_list = [out.name] diff --git a/python/paddle/fluid/tests/unittests/ipu/test_data_norm_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_data_norm_op_ipu.py index de84b94bb7ddeac020e5d2df366ea3cd91819382..9013a56ae1c8e5dade7da0dff3c20c677c6f677a 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_data_norm_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_data_norm_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -40,9 +39,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) x = paddle.static.nn.data_norm(input=x, **self.attrs) self.fetch_list = [x.name] @@ -58,15 +57,14 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = {"in_place": True} @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) x = paddle.static.nn.data_norm(input=x, **self.attrs) x = x + 1 self.fetch_list = [x.name] @@ -74,25 +72,21 @@ class TestCase1(TestBase): @unittest.skip("Do not support in_place=True when test single data_norm Op") class TestCase2(TestBase): - def set_op_attrs(self): self.attrs = {"in_place": True} class TestCase3(TestBase): - def set_op_attrs(self): self.attrs = {"data_layout": "NHWC"} class TestCase4(TestBase): - def set_op_attrs(self): self.attrs = {"epsilon": 0.001} class TestCase5(TestBase): - def set_op_attrs(self): self.attrs = {"do_model_average_for_mean_and_var": True} @@ -101,16 +95,12 @@ class TestCase6(TestBase): # If enable_scale_and_shift=True, it requires to set values of scale and bias in `param_attr` def set_op_attrs(self): self.attrs = { - "param_attr": { - "scale_w": 0.5, - "bias": 0.1 - }, - "enable_scale_and_shift": True + "param_attr": {"scale_w": 0.5, "bias": 0.1}, + "enable_scale_and_shift": True, } class TestCase7(TestBase): - def set_op_attrs(self): self.attrs = { "param_attr": { @@ -118,9 +108,9 @@ class TestCase7(TestBase): "batch_sum": 0.1, "batch_square": 1e3, "scale_w": 0.5, - "bias": 0.1 + "bias": 0.1, }, - "enable_scale_and_shift": True + "enable_scale_and_shift": True, } diff --git a/python/paddle/fluid/tests/unittests/ipu/test_dist_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_dist_op_ipu.py index 5f8db4faba7441cb0a56056f0587301648d0b46b..a029f8a287bae0f15e4ddab861e56fb2d781246c 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_dist_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_dist_op_ipu.py @@ -21,7 +21,6 @@ from op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -34,11 +33,11 @@ class TestBase(IPUOpTest): data_y = np.random.uniform(size=[7, 1, 5]) self.feed_fp32 = { "x": data_x.astype(np.float32), - "y": data_y.astype(np.float32) + "y": data_y.astype(np.float32), } self.feed_fp16 = { "x": data_x.astype(np.float16), - "y": data_y.astype(np.float16) + "y": data_y.astype(np.float16), } def set_feed_attr(self): @@ -51,12 +50,12 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - y = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + y = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32' + ) out = paddle.dist(x, y, **self.attrs) self.fetch_list = [out.name] @@ -72,19 +71,16 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = {"p": 0} class TestCase2(TestBase): - def set_op_attrs(self): self.attrs = {"p": float("inf")} class TestCase3(TestBase): - def set_op_attrs(self): self.attrs = {"p": float("-inf")} diff --git a/python/paddle/fluid/tests/unittests/ipu/test_dot_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_dot_op_ipu.py index ed0c36f53eb0c96c0489d9b491fa5b868ecb8e71..cf32eb09f5a385ca9c8cfe2622b2b3b7f6d57c72 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_dot_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_dot_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -45,12 +44,12 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - y = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + y = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32' + ) out = paddle.dot(x, y, **self.attrs) self.fetch_list = [out.name] @@ -66,7 +65,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_data_feed(self): x = np.random.uniform(size=[6]) y = np.random.uniform(size=[6]) diff --git a/python/paddle/fluid/tests/unittests/ipu/test_dropout_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_dropout_op_ipu.py index d104b39c292462e2037ffce268299b97730cc65a..d6b3813a6e6c0ba98eae9fa6b95b80c7650c09d6 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_dropout_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_dropout_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -42,14 +41,14 @@ class TestBase(IPUOpTest): self.attrs = { "dropout_prob": 0.5, "is_test": True, - "dropout_implementation": "downgrade_in_infer" + "dropout_implementation": "downgrade_in_infer", } @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) x = paddle.fluid.layers.dropout(x, **self.attrs) out = paddle.fluid.layers.elementwise_add(x, x) self.fetch_list = [out.name] @@ -66,22 +65,20 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = { "dropout_prob": 0.5, "is_test": True, - "dropout_implementation": "upscale_in_train" + "dropout_implementation": "upscale_in_train", } class TestCase2(TestBase): - def set_op_attrs(self): self.attrs = { "dropout_prob": 0.0, "is_test": False, - "dropout_implementation": "upscale_in_train" + "dropout_implementation": "upscale_in_train", } diff --git a/python/paddle/fluid/tests/unittests/ipu/test_dy2static_fp16_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_dy2static_fp16_ipu.py index 098f3d6b2ba330b40b856df0d7c364f576c34508..3450978d427556e1fde902353b2043bc4c88ce20 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_dy2static_fp16_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_dy2static_fp16_ipu.py @@ -21,14 +21,12 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUD2STest class SimpleLayer(paddle.nn.Layer): - def __init__(self, use_ipu=False): super(SimpleLayer, self).__init__() self.use_ipu = use_ipu - self.conv = paddle.nn.Conv2D(in_channels=3, - out_channels=1, - kernel_size=2, - stride=1) + self.conv = paddle.nn.Conv2D( + in_channels=3, out_channels=1, kernel_size=2, stride=1 + ) def forward(self, x, target=None): x = self.conv(x) @@ -45,7 +43,6 @@ class SimpleLayer(paddle.nn.Layer): class TestBase(IPUD2STest): - def setUp(self): super().setUp() self.save_path = tempfile.TemporaryDirectory() @@ -59,28 +56,33 @@ class TestBase(IPUD2STest): np.random.seed(self.SEED) model = SimpleLayer(use_ipu) specs = [ - paddle.static.InputSpec(name="x", - shape=[32, 3, 10, 10], - dtype="float32"), + paddle.static.InputSpec( + name="x", shape=[32, 3, 10, 10], dtype="float32" + ), paddle.static.InputSpec(name="target", shape=[32], dtype="int64"), ] model = paddle.jit.to_static(model, input_spec=specs) - optim = paddle.optimizer.Adam(learning_rate=0.01, - parameters=model.parameters()) + optim = paddle.optimizer.Adam( + learning_rate=0.01, parameters=model.parameters() + ) data = paddle.uniform((32, 3, 10, 10), dtype='float32') label = paddle.randint(0, 10, shape=[32], dtype='int64') model_path = '{}/model_state_dict_{}.pdparams'.format( - self.save_path, 'ipu' if use_ipu else 'cpu') + self.save_path, 'ipu' if use_ipu else 'cpu' + ) optim_path = '{}/optim_state_dict_{}.pdopt'.format( - self.save_path, 'ipu' if use_ipu else 'cpu') + self.save_path, 'ipu' if use_ipu else 'cpu' + ) if use_ipu: paddle.set_device('ipu') ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(num_ipus=1, - is_training=True, - micro_batch_size=1, - enable_manual_shard=False) + ipu_strategy.set_graph_config( + num_ipus=1, + is_training=True, + micro_batch_size=1, + enable_manual_shard=False, + ) ipu_strategy.set_precision_config(enable_fp16=True) ipu_strategy.set_optimizer(optim) data = data.astype(np.float16) diff --git a/python/paddle/fluid/tests/unittests/ipu/test_dy2static_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_dy2static_ipu.py index 5a7d0836d6f1a9ed3d4dcf35460963c2eddf09d1..d8dd65646bfc34273a27c97fa266b7af9de85d11 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_dy2static_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_dy2static_ipu.py @@ -17,7 +17,9 @@ import unittest import numpy as np import paddle -from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramCache +from paddle.fluid.dygraph.dygraph_to_static.program_translator import ( + ProgramCache, +) from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUD2STest from paddle.jit import to_static from paddle.optimizer.lr import LRScheduler @@ -25,18 +27,18 @@ from functools import partial class SimpleLayer(paddle.nn.Layer): - - def __init__(self, - loss_op=None, - use_softmax=True, - use_reduction=True, - use_identity_loss=True): + def __init__( + self, + loss_op=None, + use_softmax=True, + use_reduction=True, + use_identity_loss=True, + ): super(SimpleLayer, self).__init__() self.loss_op = loss_op - self.conv = paddle.nn.Conv2D(in_channels=3, - out_channels=1, - kernel_size=2, - stride=1) + self.conv = paddle.nn.Conv2D( + in_channels=3, out_channels=1, kernel_size=2, stride=1 + ) self.use_softmax = use_softmax self.use_reduction = use_reduction self.use_identity_loss = use_identity_loss @@ -61,7 +63,6 @@ class SimpleLayer(paddle.nn.Layer): class TestBase(IPUD2STest): - def setUp(self): self.set_op_attrs() self.set_data_feed() @@ -74,25 +75,30 @@ class TestBase(IPUD2STest): self.label = paddle.randint(0, 10, shape=[8], dtype='int64') def create_model(self, use_ipu=False): - return SimpleLayer(loss_op=self.loss_op, - use_softmax=True, - use_reduction=not use_ipu, - use_identity_loss=use_ipu) + return SimpleLayer( + loss_op=self.loss_op, + use_softmax=True, + use_reduction=not use_ipu, + use_identity_loss=use_ipu, + ) def _test(self, use_ipu=False): paddle.seed(self.SEED) np.random.seed(self.SEED) model = self.create_model(use_ipu) - optim = paddle.optimizer.Adam(learning_rate=0.01, - parameters=model.parameters()) + optim = paddle.optimizer.Adam( + learning_rate=0.01, parameters=model.parameters() + ) if use_ipu: paddle.set_device('ipu') ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(num_ipus=1, - is_training=True, - micro_batch_size=1, - enable_manual_shard=False) + ipu_strategy.set_graph_config( + num_ipus=1, + is_training=True, + micro_batch_size=1, + enable_manual_shard=False, + ) ipu_strategy.set_optimizer(optim) epochs = 100 @@ -118,7 +124,6 @@ class TestBase(IPUD2STest): class TestSaveLoad(TestBase): - def setUp(self): super().setUp() self.save_path = tempfile.TemporaryDirectory() @@ -131,20 +136,25 @@ class TestSaveLoad(TestBase): paddle.seed(self.SEED) np.random.seed(self.SEED) model = self.create_model(use_ipu) - optim = paddle.optimizer.Adam(learning_rate=0.01, - parameters=model.parameters()) + optim = paddle.optimizer.Adam( + learning_rate=0.01, parameters=model.parameters() + ) model_path = '{}/model_state_dict_{}.pdparams'.format( - self.save_path, 'ipu' if use_ipu else 'cpu') + self.save_path, 'ipu' if use_ipu else 'cpu' + ) optim_path = '{}/optim_state_dict_{}.pdopt'.format( - self.save_path, 'ipu' if use_ipu else 'cpu') + self.save_path, 'ipu' if use_ipu else 'cpu' + ) if use_ipu: paddle.set_device('ipu') ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(num_ipus=1, - is_training=True, - micro_batch_size=1, - enable_manual_shard=False) + ipu_strategy.set_graph_config( + num_ipus=1, + is_training=True, + micro_batch_size=1, + enable_manual_shard=False, + ) ipu_strategy.set_optimizer(optim) epochs = 100 @@ -182,7 +192,6 @@ class TestSaveLoad(TestBase): class TestPatch(IPUD2STest): - def setUp(cls): paddle.disable_static() @@ -201,16 +210,16 @@ class TestPatch(IPUD2STest): class TestWithoutIdentityLoss1(TestBase): - def create_model(self, use_ipu=False): - return SimpleLayer(loss_op=self.loss_op, - use_softmax=True, - use_reduction=True, - use_identity_loss=False) + return SimpleLayer( + loss_op=self.loss_op, + use_softmax=True, + use_reduction=True, + use_identity_loss=False, + ) class TestWithoutIdentityLoss2(TestBase): - def set_op_attrs(self): self.loss_op = paddle.fluid.layers.softmax_with_cross_entropy @@ -219,14 +228,15 @@ class TestWithoutIdentityLoss2(TestBase): self.label = paddle.randint(0, 10, shape=[8, 1], dtype='int64') def create_model(self, use_ipu=False): - return SimpleLayer(loss_op=self.loss_op, - use_softmax=False, - use_reduction=True, - use_identity_loss=False) + return SimpleLayer( + loss_op=self.loss_op, + use_softmax=False, + use_reduction=True, + use_identity_loss=False, + ) class TestWithoutIdentityLoss3(TestBase): - def set_op_attrs(self): self.loss_op = partial(paddle.fluid.layers.kldiv_loss, reduction="none") @@ -235,14 +245,15 @@ class TestWithoutIdentityLoss3(TestBase): self.label = paddle.rand(shape=[8, 81], dtype='float32') def create_model(self, use_ipu=False): - return SimpleLayer(loss_op=self.loss_op, - use_softmax=True, - use_reduction=True, - use_identity_loss=False) + return SimpleLayer( + loss_op=self.loss_op, + use_softmax=True, + use_reduction=True, + use_identity_loss=False, + ) class TestWithoutIdentityLoss4(TestBase): - def set_op_attrs(self): self.loss_op = paddle.nn.functional.binary_cross_entropy @@ -251,27 +262,31 @@ class TestWithoutIdentityLoss4(TestBase): self.label = paddle.rand(shape=[8, 81], dtype='float32') def create_model(self, use_ipu=False): - return SimpleLayer(loss_op=self.loss_op, - use_softmax=True, - use_reduction=False, - use_identity_loss=False) + return SimpleLayer( + loss_op=self.loss_op, + use_softmax=True, + use_reduction=False, + use_identity_loss=False, + ) class TestWithoutIdentityLoss5(TestBase): - def set_op_attrs(self): self.loss_op = paddle.fluid.layers.sigmoid_cross_entropy_with_logits def set_data_feed(self): self.data = paddle.uniform((8, 3, 10, 10), dtype='float32') - self.label = paddle.randint(0, 10, shape=[8, 81], - dtype='int64').astype('float32') + self.label = paddle.randint(0, 10, shape=[8, 81], dtype='int64').astype( + 'float32' + ) def create_model(self, use_ipu=False): - return SimpleLayer(loss_op=self.loss_op, - use_softmax=True, - use_reduction=True, - use_identity_loss=False) + return SimpleLayer( + loss_op=self.loss_op, + use_softmax=True, + use_reduction=True, + use_identity_loss=False, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ipu/test_elemetwise_x_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_elemetwise_x_op_ipu.py index 9c35e43970e74c813c40981a6cba4d0fd7c05bc9..8dcb3097c2f52a5556af5f2bc4a707a4de30b911 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_elemetwise_x_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_elemetwise_x_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestMul(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -43,12 +42,12 @@ class TestMul(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - y = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + y = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32' + ) out = self.op(x, y, **self.attrs) self.fetch_list = [out.name] @@ -125,43 +124,36 @@ class TestMul(IPUOpTest): class TestAdd(TestMul): - def set_test_op(self): self.op = paddle.fluid.layers.elementwise_add class TestSub(TestMul): - def set_test_op(self): self.op = paddle.fluid.layers.elementwise_sub class TestDiv(TestMul): - def set_test_op(self): self.op = paddle.fluid.layers.elementwise_div class TestMin(TestMul): - def set_test_op(self): self.op = paddle.fluid.layers.elementwise_min class TestMax(TestMul): - def set_test_op(self): self.op = paddle.fluid.layers.elementwise_max class TestPow(TestMul): - def set_test_op(self): self.op = paddle.fluid.layers.elementwise_pow class TestMod(TestMul): - def set_atol(self): self.atol = 1e-7 self.rtol = 1e-5 diff --git a/python/paddle/fluid/tests/unittests/ipu/test_equal_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_equal_op_ipu.py index 77a78a7cb78ca175a397359cfe65dbd402e4a0b8..e0ae7fa24676a9b4c48a8f0bc48cc9fa16b768e8 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_equal_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_equal_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -50,12 +49,12 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - y = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + y = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32' + ) out = paddle.fluid.layers.equal(x, y, **self.attrs) self.fetch_list = [out.name] @@ -71,7 +70,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_data_feed(self): x = np.ones([1, 10]) y = np.ones([1, 10]) @@ -80,7 +78,6 @@ class TestCase1(TestBase): class TestCase2(TestBase): - def set_data_feed(self): x = np.ones([1, 10]) y = np.arange(0, 10).reshape([1, 10]) diff --git a/python/paddle/fluid/tests/unittests/ipu/test_eval_model_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_eval_model_ipu.py index f3b1d227d95a1c5b21fe06c02b85dd183ad977ff..335a78c5982f311ef9a04d311f754488e7de2f29 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_eval_model_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_eval_model_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_data_feed() @@ -57,25 +56,27 @@ class TestBase(IPUOpTest): with paddle.static.scope_guard(scope): with paddle.static.program_guard(main_prog, startup_prog): - image = paddle.static.data(name='image', - shape=[1, 3, 10, 10], - dtype='float32') - conv1 = paddle.static.nn.conv2d(image, - num_filters=3, - filter_size=3, - bias_attr=False) + image = paddle.static.data( + name='image', shape=[1, 3, 10, 10], dtype='float32' + ) + conv1 = paddle.static.nn.conv2d( + image, num_filters=3, filter_size=3, bias_attr=False + ) loss = paddle.mean(conv1) weight_decay = self.attrs['weight_decay'] - opt = paddle.optimizer.SGD(learning_rate=1e-1, - weight_decay=weight_decay) + opt = paddle.optimizer.SGD( + learning_rate=1e-1, weight_decay=weight_decay + ) if self.attrs['optimizer'] == 'adam': - opt = paddle.optimizer.Adam(learning_rate=1e-1, - weight_decay=weight_decay) + opt = paddle.optimizer.Adam( + learning_rate=1e-1, weight_decay=weight_decay + ) elif self.attrs['optimizer'] == 'lamb': - opt = paddle.optimizer.Lamb(learning_rate=1e-1, - lamb_weight_decay=weight_decay) + opt = paddle.optimizer.Lamb( + learning_rate=1e-1, lamb_weight_decay=weight_decay + ) opt.minimize(loss) if run_ipu: @@ -92,8 +93,8 @@ class TestBase(IPUOpTest): ipu_strategy.set_graph_config(is_training=True) ipu_strategy.set_options({"runtime_options.enable_eval": True}) program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) + main_prog, ipu_strategy=ipu_strategy + ).compile(feed_list, fetch_list) else: program = main_prog @@ -102,16 +103,17 @@ class TestBase(IPUOpTest): for epoch in range(200): if epoch == 100: ipu_strategy.set_options( - {"runtime_options.enable_eval": False}) - loss_res = exe.run(program, - feed=self.feed, - fetch_list=[loss]) + {"runtime_options.enable_eval": False} + ) + loss_res = exe.run( + program, feed=self.feed, fetch_list=[loss] + ) result.append(loss_res) else: for epoch in range(100): - loss_res = exe.run(program, - feed=self.feed, - fetch_list=[loss]) + loss_res = exe.run( + program, feed=self.feed, fetch_list=[loss] + ) result.append(loss_res) return np.array(result) @@ -120,10 +122,9 @@ class TestBase(IPUOpTest): ipu_loss = self._test_optimizer(True).flatten() cpu_loss = self._test_optimizer(False).flatten() self.assertTrue(ipu_loss[0] == ipu_loss[99]) - np.testing.assert_allclose(ipu_loss[100:], - cpu_loss, - rtol=1e-05, - atol=self.atol) + np.testing.assert_allclose( + ipu_loss[100:], cpu_loss, rtol=1e-05, atol=self.atol + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ipu/test_expand_as_v2_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_expand_as_v2_op_ipu.py index ee68eba5e543f290afdd11a8f5eb532623904335..b66e1a6c5bd627309b45c3b2a01469315af91591 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_expand_as_v2_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_expand_as_v2_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -33,11 +32,11 @@ class TestBase(IPUOpTest): data_y = np.random.uniform(size=[2, 2, 3]) self.feed_fp32 = { 'x': data_x.astype(np.float32), - 'y': data_y.astype(np.float32) + 'y': data_y.astype(np.float32), } self.feed_fp16 = { 'x': data_x.astype(np.float16), - 'y': data_y.astype(np.float16) + 'y': data_y.astype(np.float16), } def set_feed_attr(self): @@ -47,12 +46,12 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype="float32") - y = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype="float32") + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32" + ) + y = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype="float32" + ) out = paddle.expand_as(x, y) self.fetch_list = [out.name] @@ -68,33 +67,31 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_data_feed(self): data_x = np.random.uniform(size=[2, 3]) data_y = np.random.uniform(size=[2, 4, 2, 3]) self.feed_fp32 = { 'x': data_x.astype(np.float32), - 'y': data_y.astype(np.float32) + 'y': data_y.astype(np.float32), } self.feed_fp16 = { 'x': data_x.astype(np.float16), - 'y': data_y.astype(np.float16) + 'y': data_y.astype(np.float16), } @unittest.skip("corresponding dimensions must have the same value.") class TestCase2(TestBase): - def set_data_feed(self): data_x = np.random.uniform(size=[2, 3]) data_y = np.random.uniform(size=[2, 4, 3, 3]) self.feed_fp32 = { 'x': data_x.astype(np.float32), - 'y': data_y.astype(np.float32) + 'y': data_y.astype(np.float32), } self.feed_fp16 = { 'x': data_x.astype(np.float16), - 'y': data_y.astype(np.float16) + 'y': data_y.astype(np.float16), } diff --git a/python/paddle/fluid/tests/unittests/ipu/test_expand_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_expand_op_ipu.py index 843ec0438d74d9af09726443e44bfac96f6658df..784a6a41a41f563d650ba559252739f3c3af54cb 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_expand_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_expand_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -44,9 +43,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype="float32") + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32" + ) out = paddle.fluid.layers.expand(x, **self.attrs) self.fetch_list = [out.name] @@ -62,7 +61,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_data_feed(self): x = np.random.uniform(size=[2, 2]) self.feed_fp32 = {"x": x.astype(np.float32)} @@ -78,14 +76,15 @@ class TestCase1(TestBase): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype="float32") + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32" + ) expand_times = paddle.fluid.layers.fill_constant( - shape=[len(self.feed_shape[0])], dtype="int32", value=2) - out = paddle.fluid.layers.expand(x, - expand_times=expand_times, - **self.attrs) + shape=[len(self.feed_shape[0])], dtype="int32", value=2 + ) + out = paddle.fluid.layers.expand( + x, expand_times=expand_times, **self.attrs + ) self.fetch_list = [out.name] diff --git a/python/paddle/fluid/tests/unittests/ipu/test_expand_v2_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_expand_v2_op_ipu.py index 5cb949a1943e76dee8b68d3c87f9fd0916b5ad3a..cb98ed3253f298f89eadde695630754c14336bfa 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_expand_v2_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_expand_v2_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -44,9 +43,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype="float32") + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32" + ) out = paddle.expand(x, **self.attrs) self.fetch_list = [out.name] @@ -62,13 +61,11 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_attrs(self): self.attrs = {"shape": [5, 2, 2, 3]} class TestCase2(TestBase): - def set_data_feed(self): data = np.random.uniform(size=[2, 1, 3]) self.feed_fp32 = {'x': data.astype(np.float32)} @@ -80,14 +77,12 @@ class TestCase2(TestBase): @unittest.skip("corresponding dimensions must have the same value.") class TestCase3(TestBase): - def set_attrs(self): self.attrs = {"shape": [5, 2, 4, 3]} @unittest.skip("Do not support `shape` = Tensors.") class TestCase4(TestBase): - def set_data_feed(self): data = np.random.uniform(size=[3, 3]) self.feed_fp32 = {'x': data.astype(np.float32)} @@ -95,9 +90,9 @@ class TestCase4(TestBase): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype="float32") + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32" + ) self.attrs = { 'name': 'y', 'shape': [3], diff --git a/python/paddle/fluid/tests/unittests/ipu/test_fill_any_like_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_fill_any_like_op_ipu.py index 74ecba6f18c869e672c79a83fa854953be2fa69d..ae43689dfb499dac047aea479ea7906d5f49f886 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_fill_any_like_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_fill_any_like_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -43,9 +42,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) x_fill = paddle.full_like(x, **self.attrs) out = paddle.fluid.layers.elementwise_add(x_fill, x_fill) self.fetch_list = [out.name] @@ -62,13 +61,11 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = {'fill_value': 3, 'dtype': 'int32'} class TestError(TestBase): - @IPUOpTest.static_graph def build_model(self): x = paddle.fluid.data('x', [-1, 3, 13], 'float32') @@ -82,8 +79,9 @@ class TestError(TestBase): def test_error(): self.run_op_test(IPUOpTest.ExecutionMode.IPU_FP32) - self.assertRaisesRegex(Exception, "Please check tensor shape setting", - test_error) + self.assertRaisesRegex( + Exception, "Please check tensor shape setting", test_error + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ipu/test_fill_constant_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_fill_constant_op_ipu.py index 0e1e17cb65ec9f0da667318c46125f3d88f3002e..71dfb2d409747fc812c6a7ca8b3ce766f7bcd11a 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_fill_constant_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_fill_constant_op_ipu.py @@ -20,7 +20,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -63,7 +62,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = { 'name': 'x', diff --git a/python/paddle/fluid/tests/unittests/ipu/test_flatten_contiguous_range_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_flatten_contiguous_range_op_ipu.py index 4723f753fb698155c0c3d9632dd03233c7404986..11bebbe36f70bae5262cd28602f80b9e50a29353 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_flatten_contiguous_range_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_flatten_contiguous_range_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -45,9 +44,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) out = paddle.flatten(x=x, **self.attrs) self.fetch_list = [out.name] @@ -63,7 +62,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = {} self.attrs['start_axis'] = 0 @@ -71,7 +69,6 @@ class TestCase1(TestBase): class TestCase2(TestBase): - def set_op_attrs(self): self.attrs = {} self.attrs['start_axis'] = 1 @@ -79,7 +76,6 @@ class TestCase2(TestBase): class TestCase3(TestBase): - def set_op_attrs(self): self.attrs = {} self.attrs['start_axis'] = 1 diff --git a/python/paddle/fluid/tests/unittests/ipu/test_flatten_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_flatten_op_ipu.py index d7c1da14e296f9e350b6dbd50e3c9381880b4f54..f363e40c5fdce441457272e77e75ba62314f45e3 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_flatten_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_flatten_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -44,9 +43,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) out = paddle.fluid.layers.flatten(x=x, **self.attrs) self.fetch_list = [out.name] @@ -62,14 +61,12 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = {} self.attrs['axis'] = 0 class TestCase2(TestBase): - def set_op_attrs(self): self.attrs = {} self.attrs['axis'] = 2 diff --git a/python/paddle/fluid/tests/unittests/ipu/test_flip_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_flip_op_ipu.py index 07e0acb60a12311e2fc7d95f9b487ca462cf9fc0..abdb9d07b3a891a37173915144d83895000180d5 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_flip_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_flip_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -51,9 +50,11 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype=self.feed_dtype[0]) + x = paddle.static.data( + name=self.feed_list[0], + shape=self.feed_shape[0], + dtype=self.feed_dtype[0], + ) x = paddle.flip(x, **self.attrs) self.fetch_list = [x.name] @@ -69,7 +70,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_feed(self): data = np.random.randint(0, 10, size=[3, 2, 2]) self.feed_fp32 = {'x': data.astype(np.int32)} @@ -77,7 +77,6 @@ class TestCase1(TestBase): class TestCase2(TestBase): - def set_feed(self): data = np.random.randint(0, 2, size=[4, 3, 2, 2]) self.feed_fp32 = {'x': data.astype(np.bool)} diff --git a/python/paddle/fluid/tests/unittests/ipu/test_fp16_support_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_fp16_support_ipu.py index 708dd0f4054241309e6002e078485d3ec5ac0b64..8433e45f46ce1346baa0746149f67c5e7f0eca8a 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_fp16_support_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_fp16_support_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -50,22 +49,19 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - conv1 = paddle.static.nn.conv2d(x, - num_filters=3, - filter_size=3, - bias_attr=False) - conv2 = paddle.static.nn.conv2d(x, - num_filters=3, - filter_size=3, - bias_attr=False) + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + conv1 = paddle.static.nn.conv2d( + x, num_filters=3, filter_size=3, bias_attr=False + ) + conv2 = paddle.static.nn.conv2d( + x, num_filters=3, filter_size=3, bias_attr=False + ) add1 = conv1 + conv2 - conv3 = paddle.static.nn.conv2d(add1, - num_filters=8, - filter_size=8, - bias_attr=False) + conv3 = paddle.static.nn.conv2d( + add1, num_filters=8, filter_size=8, bias_attr=False + ) out = paddle.fluid.layers.relu(conv3, **self.attrs) self.fetch_list = [out.name] @@ -81,7 +77,6 @@ class TestBase(IPUOpTest): class TestIntInput(TestBase): - def set_data_feed(self): embedding = np.random.uniform(size=[10, 20]) indice = np.array([1, 3, 5]).astype(np.int32) @@ -96,12 +91,12 @@ class TestIntInput(TestBase): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - y = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='int32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + y = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='int32' + ) out = paddle.fluid.layers.gather(x, index=y) self.fetch_list = [out.name] diff --git a/python/paddle/fluid/tests/unittests/ipu/test_gather_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_gather_op_ipu.py index 13a48e5a98f1b640ebe157b3e567354a8f5a6683..5913db81f31cf4d287e35125355d3fc01b703cf4 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_gather_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_gather_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -44,12 +43,12 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - y = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='int32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + y = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='int32' + ) out = paddle.fluid.layers.gather(x, index=y, **self.attrs) self.fetch_list = [out.name] @@ -65,7 +64,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_data_feed(self): x = np.random.uniform(size=[100]) y = np.array([1, 3, 5]) diff --git a/python/paddle/fluid/tests/unittests/ipu/test_gelu_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_gelu_op_ipu.py index 2d14621d5fc7e866349aec923403dc901e8a771a..031fd2577724979b6256edac417c1e2c1905cc47 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_gelu_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_gelu_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -43,9 +42,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) out = paddle.fluid.layers.gelu(x, **self.attrs) self.fetch_list = [out.name] @@ -61,7 +60,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_atol(self): self.atol = 1e-10 self.rtol = 1e-6 diff --git a/python/paddle/fluid/tests/unittests/ipu/test_gradient_clip_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_gradient_clip_ipu.py index b63d176ff2791979dbac11feadd676f57a4e5004..d8b1aaf4c4a29752d2367c83035cb180e856f7ff 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_gradient_clip_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_gradient_clip_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_data_feed() @@ -60,13 +59,12 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - image = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - conv1 = paddle.static.nn.conv2d(image, - num_filters=3, - filter_size=3, - bias_attr=False) + image = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + conv1 = paddle.static.nn.conv2d( + image, num_filters=3, filter_size=3, bias_attr=False + ) loss = paddle.mean(conv1) self.fetch_list = [loss.name] @@ -74,20 +72,23 @@ class TestBase(IPUOpTest): # Only support ClipGradByGlobalNorm clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0) if self.attrs['optimizer'] == 'sgd': - opt = paddle.optimizer.SGD(learning_rate=1e-1, - weight_decay=weight_decay, - grad_clip=clip) + opt = paddle.optimizer.SGD( + learning_rate=1e-1, weight_decay=weight_decay, grad_clip=clip + ) elif self.attrs['optimizer'] == 'adam': - opt = paddle.optimizer.Adam(learning_rate=1e-1, - weight_decay=weight_decay, - grad_clip=clip) + opt = paddle.optimizer.Adam( + learning_rate=1e-1, weight_decay=weight_decay, grad_clip=clip + ) elif self.attrs['optimizer'] == 'lamb': - opt = paddle.optimizer.Lamb(learning_rate=1e-1, - lamb_weight_decay=weight_decay, - grad_clip=clip) + opt = paddle.optimizer.Lamb( + learning_rate=1e-1, + lamb_weight_decay=weight_decay, + grad_clip=clip, + ) else: raise ValueError( - f"Not supported optimizer {self.attrs['optimizer']} for test") + f"Not supported optimizer {self.attrs['optimizer']} for test" + ) opt.minimize(loss) def run_model(self, exec_mode): @@ -102,7 +103,6 @@ class TestBase(IPUOpTest): class TestAdam(TestBase): - def set_attrs(self): self.attrs = { "optimizer": 'adam', @@ -111,7 +111,6 @@ class TestAdam(TestBase): class TestLamb(TestBase): - def set_attrs(self): self.attrs = { "optimizer": 'lamb', diff --git a/python/paddle/fluid/tests/unittests/ipu/test_greater_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_greater_op_ipu.py index 4f2e9a1a94bfcda3fde3104dbb7a6665dc351857..c48117dd40126352ae61c956ab06058943e09125 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_greater_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_greater_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestGreaterThan(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -35,12 +34,12 @@ class TestGreaterThan(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - y = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + y = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32' + ) out = self.op(x, y, **self.attrs) self.fetch_list = [out.name] @@ -114,25 +113,21 @@ class TestGreaterThan(IPUOpTest): class TestLessThan(TestGreaterThan): - def set_test_op(self): self.op = paddle.fluid.layers.less_than class TestEqual(TestGreaterThan): - def set_test_op(self): self.op = paddle.fluid.layers.equal class TestGreaterEqual(TestGreaterThan): - def set_test_op(self): self.op = paddle.fluid.layers.greater_equal class TestLessEqual(TestGreaterThan): - def set_test_op(self): self.op = paddle.fluid.layers.less_equal diff --git a/python/paddle/fluid/tests/unittests/ipu/test_groupnorm_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_groupnorm_op_ipu.py index 70f1a6a28e97b870883ca21c76f66d023f4fe6b6..fbffe321b5575decd3dcb12575778c0e0cbc3b62 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_groupnorm_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_groupnorm_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -53,30 +52,27 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) if self.is_training: ch = self.feed_shape[0][1] - conv1 = paddle.static.nn.conv2d(x, - num_filters=ch, - filter_size=3, - bias_attr=False) + conv1 = paddle.static.nn.conv2d( + x, num_filters=ch, filter_size=3, bias_attr=False + ) scale = paddle.ParamAttr(trainable=True) bias = paddle.ParamAttr(trainable=True) - out = paddle.fluid.layers.nn.group_norm(conv1, - param_attr=scale, - bias_attr=bias, - **self.attrs) + out = paddle.fluid.layers.nn.group_norm( + conv1, param_attr=scale, bias_attr=bias, **self.attrs + ) loss = paddle.mean(out) adam = paddle.optimizer.Adam(learning_rate=1e-2) adam.minimize(loss) self.fetch_list = [loss.name] else: - out = paddle.fluid.layers.nn.group_norm(x, - param_attr=True, - bias_attr=True, - **self.attrs) + out = paddle.fluid.layers.nn.group_norm( + x, param_attr=True, bias_attr=True, **self.attrs + ) self.fetch_list = [out.name] def run_model(self, exec_mode): @@ -91,7 +87,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = { "groups": 4, @@ -101,7 +96,6 @@ class TestCase1(TestBase): class TestTrainCase1(TestBase): - def set_training(self): self.is_training = True self.epoch = 20 @@ -109,7 +103,6 @@ class TestTrainCase1(TestBase): @unittest.skipIf(IPUOpTest.use_ipumodel(), "skip for ipumodel") class TestTrainCase2(TestBase): - def set_atol(self): self.atol = 7e-4 self.rtol = 1e-6 diff --git a/python/paddle/fluid/tests/unittests/ipu/test_huber_loss_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_huber_loss_op_ipu.py index 613fbd944c26a43bdd1fcb4fc9d2381b2c5f2ad0..5030e368083ee0289aba1c9b80a156631553e3f4 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_huber_loss_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_huber_loss_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -34,11 +33,11 @@ class TestBase(IPUOpTest): target = np.random.uniform(size=[3, 4, 2, 2]) self.feed_fp32 = { "x": x.astype(np.float32), - "target": target.astype(np.float32) + "target": target.astype(np.float32), } self.feed_fp16 = { "x": x.astype(np.float16), - "target": target.astype(np.float16) + "target": target.astype(np.float16), } def set_feed_attr(self): @@ -52,12 +51,12 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self, on_ipu): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype="float32") - target = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32" + ) + target = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32' + ) out = paddle.fluid.layers.huber_loss(x, target, **self.attrs) self.fetch_list = [out.name] @@ -73,7 +72,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = { 'delta': 0.5, @@ -81,7 +79,6 @@ class TestCase1(TestBase): class TestCase2(TestBase): - def set_op_attrs(self): self.attrs = { 'delta': 0.0, diff --git a/python/paddle/fluid/tests/unittests/ipu/test_identity_loss_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_identity_loss_ipu.py index 53baf0de0acce865dc0386c53c33e62fddc2d117..8e2d2110cf1191508e7c57903f4e7851f54498b3 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_identity_loss_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_identity_loss_ipu.py @@ -20,14 +20,15 @@ import paddle.fluid as fluid import paddle.fluid.compiler as compiler import paddle.optimizer import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, - np_dtype_to_fluid_str) +from paddle.fluid.tests.unittests.ipu.op_test_ipu import ( + IPUOpTest, + np_dtype_to_fluid_str, +) paddle.enable_static() class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -41,8 +42,9 @@ class TestBase(IPUOpTest): def set_feed(self): self.feed = { - "x": np.random.uniform(low=-2, high=2, size=[3, - 5]).astype('float32'), + "x": np.random.uniform(low=-2, high=2, size=[3, 5]).astype( + 'float32' + ), } def set_feed_attr(self): @@ -62,9 +64,11 @@ class TestBase(IPUOpTest): with fluid.scope_guard(scope): with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype=self.feed_dtype[0]) + x = paddle.static.data( + name=self.feed_list[0], + shape=self.feed_shape[0], + dtype=self.feed_dtype[0], + ) out = self.op(x, reduction) fetch_list = [out.name] @@ -77,7 +81,8 @@ class TestBase(IPUOpTest): ipu_strategy = paddle.static.IpuStrategy() ipu_strategy.set_graph_config(num_ipus=1, is_training=False) ipu_compiler = compiler.IpuCompiledProgram( - main_prog, ipu_strategy=ipu_strategy) + main_prog, ipu_strategy=ipu_strategy + ) program = ipu_compiler.compile(feed_list, fetch_list) ipu_res = exe.run(program, self.feed, fetch_list) @@ -92,10 +97,9 @@ class TestBase(IPUOpTest): # none cpu_res = self.feed['x'] - np.testing.assert_allclose(ipu_res[0], - cpu_res, - rtol=1e-05, - atol=self.atol) + np.testing.assert_allclose( + ipu_res[0], cpu_res, rtol=1e-05, atol=self.atol + ) def test_base(self): # TODO: use string instead of int for reduction diff --git a/python/paddle/fluid/tests/unittests/ipu/test_inference_model_io_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_inference_model_io_ipu.py index 4a1aa8cba44182f89fc577e2fca35b159ac0ab85..c370debfc4b305cc5c8b0e65e5630ef65dd07756 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_inference_model_io_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_inference_model_io_ipu.py @@ -22,7 +22,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_data_feed() @@ -60,19 +59,24 @@ class TestBase(IPUOpTest): startup_prog.random_seed = self.SEED generator = paddle.fluid.unique_name.UniqueNameGenerator() self.full_name = '/'.join( - [self.attrs['path'].name, self.attrs['model_name']]) + [self.attrs['path'].name, self.attrs['model_name']] + ) with paddle.fluid.unique_name.guard(generator): with paddle.static.scope_guard(scope): with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - conv1 = paddle.static.nn.conv2d(x, - num_filters=3, - filter_size=3, - bias_attr=False, - name='conv2d') + x = paddle.static.data( + name=self.feed_list[0], + shape=self.feed_shape[0], + dtype='float32', + ) + conv1 = paddle.static.nn.conv2d( + x, + num_filters=3, + filter_size=3, + bias_attr=False, + name='conv2d', + ) loss = paddle.mean(conv1) if self.attrs['is_training']: @@ -93,24 +97,22 @@ class TestBase(IPUOpTest): ipu_strategy = paddle.static.IpuStrategy() ipu_strategy.set_graph_config( - is_training=self.attrs['is_training']) + is_training=self.attrs['is_training'] + ) program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(self.feed_list, - fetch_list) + main_prog, ipu_strategy=ipu_strategy + ).compile(self.feed_list, fetch_list) result = [] for i in range(self.attrs['steps']): - tmp = exe.run(program, - feed=self.feed, - fetch_list=fetch_list) + tmp = exe.run( + program, feed=self.feed, fetch_list=fetch_list + ) result.append(tmp) - paddle.static.save_inference_model(self.full_name, - x, - loss, - exe, - program=program.org_program) + paddle.static.save_inference_model( + self.full_name, x, loss, exe, program=program.org_program + ) def _test_load(self, run_ipu): if run_ipu: @@ -119,8 +121,11 @@ class TestBase(IPUOpTest): place = paddle.CPUPlace() exe = paddle.static.Executor(place) - [inference_program, feed_target_names, fetch_targets - ] = (paddle.static.load_inference_model(self.full_name, exe)) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = paddle.static.load_inference_model(self.full_name, exe) if run_ipu: feed_list = feed_target_names @@ -128,8 +133,8 @@ class TestBase(IPUOpTest): ipu_strategy = paddle.static.IpuStrategy() ipu_strategy.set_graph_config(is_training=False) program = paddle.static.IpuCompiledProgram( - inference_program, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) + inference_program, ipu_strategy=ipu_strategy + ).compile(feed_list, fetch_list) else: program = inference_program @@ -147,7 +152,6 @@ class TestBase(IPUOpTest): class TestAdam(TestBase): - def set_op_attrs(self): self.attrs = {} self.attrs['steps'] = 100 @@ -158,7 +162,6 @@ class TestAdam(TestBase): class TestLamb(TestBase): - def set_op_attrs(self): self.attrs = {} self.attrs['steps'] = 100 diff --git a/python/paddle/fluid/tests/unittests/ipu/test_instancenorm_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_instancenorm_op_ipu.py index b24e4be7ae738131eca9b79983f3220760c436c8..3479899ebda8c7c3b3d2f3ec486e4a7ebe039dd3 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_instancenorm_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_instancenorm_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -49,31 +48,28 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) if self.is_training: ch = self.feed_shape[0][1] - conv1 = paddle.static.nn.conv2d(x, - num_filters=ch, - filter_size=3, - bias_attr=False) + conv1 = paddle.static.nn.conv2d( + x, num_filters=ch, filter_size=3, bias_attr=False + ) scale = paddle.ParamAttr(trainable=True) bias = paddle.ParamAttr(trainable=True) - out = paddle.fluid.layers.nn.instance_norm(conv1, - param_attr=scale, - bias_attr=bias, - **self.attrs) + out = paddle.fluid.layers.nn.instance_norm( + conv1, param_attr=scale, bias_attr=bias, **self.attrs + ) loss = paddle.mean(out) adam = paddle.optimizer.Adam(learning_rate=1e-2) adam.minimize(loss) self.fetch_list = [loss.name] else: - out = paddle.fluid.layers.nn.instance_norm(x, - param_attr=True, - bias_attr=True, - **self.attrs) + out = paddle.fluid.layers.nn.instance_norm( + x, param_attr=True, bias_attr=True, **self.attrs + ) self.fetch_list = [out.name] def run_model(self, exec_mode): @@ -88,7 +84,6 @@ class TestBase(IPUOpTest): class TestTrainCase1(TestBase): - def set_training(self): self.is_training = True self.epoch = 10 diff --git a/python/paddle/fluid/tests/unittests/ipu/test_interpolate_ops_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_interpolate_ops_ipu.py index 108e953659dc5cece3e79d8ea80725a7ecf2f503..e31320585db1c1bcbe8e14a22b35def47d2bd97d 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_interpolate_ops_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_interpolate_ops_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -45,9 +44,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype="float32") + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32" + ) out = paddle.nn.functional.interpolate(x, **self.attrs) self.fetch_list = [out.name] @@ -63,14 +62,12 @@ class TestBase(IPUOpTest): class TestCase0(TestBase): - def set_op_attrs(self): self.attrs = {} self.attrs["size"] = [3, 4] class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = {} self.attrs["scale_factor"] = [2, 1] @@ -78,19 +75,16 @@ class TestCase1(TestBase): @unittest.skip("Only one of size or scale_factor should be defined") class TestCase2(TestBase): - def set_op_attrs(self): self.attrs = {"size": [12, 12], "scale_factor": [2, 1]} class TestCase3(TestBase): - def set_op_attrs(self): self.attrs = {"scale_factor": 2.5} class TestBilinear(TestBase): - @property def fp16_enabled(self): return False @@ -107,7 +101,6 @@ class TestBilinear(TestBase): # Take long time class TestBicubic(TestBase): - @property def fp16_enabled(self): return False @@ -124,7 +117,6 @@ class TestBicubic(TestBase): # Trilinear requires 5-D input class TestTrilinear(TestBase): - @property def fp16_enabled(self): return False @@ -144,13 +136,12 @@ class TestTrilinear(TestBase): self.attrs = { "size": [12, 12, 12], "mode": "trilinear", - "data_format": "NCDHW" + "data_format": "NCDHW", } # Linear requires 3-D input class TestLinear(TestBase): - @property def fp16_enabled(self): return False @@ -171,9 +162,9 @@ class TestLinear(TestBase): @unittest.skip( - "Transfer to Pool Op with 2-D ksize, now we only support 1-D ksize.") + "Transfer to Pool Op with 2-D ksize, now we only support 1-D ksize." +) class TestArea(TestBase): - def set_data_feed(self): x = np.random.uniform(size=[2, 3, 6, 6]) self.feed_fp32 = {"x": x.astype(np.float32)} @@ -185,7 +176,6 @@ class TestArea(TestBase): # align_corners option can only be set with the interpolating modes: linear | bilinear | bicubic | trilinear class TestAlignCorners(TestBase): - @property def fp16_enabled(self): return False @@ -194,13 +184,12 @@ class TestAlignCorners(TestBase): self.attrs = { "size": [12, 12], "align_corners": True, - "mode": "bilinear" + "mode": "bilinear", } # class TestAlignMode(TestBase): - def set_op_attrs(self): self.attrs = {"size": [12, 12], "align_mode": 1} diff --git a/python/paddle/fluid/tests/unittests/ipu/test_ipu_shard_api_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_ipu_shard_api_ipu.py index f04a3bb55e5d9dbdcc2ca2a942f644b3553fd972..bb430fe35a97b0741dfa3b2bd2e1873dddfa5f98 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_ipu_shard_api_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_ipu_shard_api_ipu.py @@ -22,7 +22,6 @@ paddle.enable_static() class TestIpuShard(unittest.TestCase): - def _test(self): # build graph main_prog = paddle.static.Program() @@ -59,14 +58,12 @@ class TestIpuShard(unittest.TestCase): def test_ipu_shard(self): ipu_index_list = self._test() expected_ipu_index_list = [1, 2, 3, 1, 2, 1, 2] - np.testing.assert_allclose(ipu_index_list, - expected_ipu_index_list, - rtol=1e-05, - atol=0) + np.testing.assert_allclose( + ipu_index_list, expected_ipu_index_list, rtol=1e-05, atol=0 + ) class TestIpuPipeline(unittest.TestCase): - def _test(self): # build graph main_prog = paddle.static.Program() @@ -104,10 +101,9 @@ class TestIpuPipeline(unittest.TestCase): ipu_index_list = self._test() expected_ipu_index_list = [1, 2, 3, 1, 2, 1, 2] - np.testing.assert_allclose(ipu_index_list, - expected_ipu_index_list, - rtol=1e-05, - atol=0) + np.testing.assert_allclose( + ipu_index_list, expected_ipu_index_list, rtol=1e-05, atol=0 + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ipu/test_ipu_strategy_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_ipu_strategy_ipu.py index ed4b258b75437625ec8c03e82f315da802b93b6a..bd00f9c9a65340604b965089f5a901c2241a1721 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_ipu_strategy_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_ipu_strategy_ipu.py @@ -21,13 +21,13 @@ paddle.enable_static() class TestIpuStrategy(unittest.TestCase): - def test_set_options(self): ipu_strategy = paddle.static.IpuStrategy() all_option_names = ipu_strategy._ipu_strategy.get_all_option_names() skip_options = [] skip_options.append( - 'mean_accumulation_and_replication_reduction_strategy') + 'mean_accumulation_and_replication_reduction_strategy' + ) skip_options.append('random_seed') for option_name in all_option_names: @@ -48,7 +48,9 @@ class TestIpuStrategy(unittest.TestCase): try: ipu_strategy.set_options({option_name: set_value}) new_value = ipu_strategy.get_option(option_name) - assert new_value == set_value, f"set {option_name} to {set_value} failed" + assert ( + new_value == set_value + ), f"set {option_name} to {set_value} failed" except: raise Exception(f"set {option_name} to {set_value} failed") @@ -70,21 +72,24 @@ class TestIpuStrategy(unittest.TestCase): options['engine_options'] = { 'debug.allowOutOfMemory': 'true', 'autoReport.directory': 'path', - 'autoReport.all': 'true' + 'autoReport.all': 'true', } options['random_seed'] = 1234 for k, v in options.items(): ipu_strategy.set_options({k: v}) - if (isinstance(v, list)): - assert v.sort() == ipu_strategy.get_option( - k).sort(), f"set {k} to {v} failed " + if isinstance(v, list): + assert ( + v.sort() == ipu_strategy.get_option(k).sort() + ), f"set {k} to {v} failed " else: assert v == ipu_strategy.get_option( - k), f"set {k} to {v} failed " + k + ), f"set {k} to {v} failed " # The custom logger need 2 int as inputs logger = lambda progress, total: print( - f"compile progrss: {progress}/{total}") + f"compile progrss: {progress}/{total}" + ) ipu_strategy.set_options({'compilation_progress_logger': logger}) diff --git a/python/paddle/fluid/tests/unittests/ipu/test_kldiv_loss_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_kldiv_loss_op_ipu.py index ef3e173fdeda92881e8c9939f69cebb52af90fd2..d08df5399d939d1517a05f2d182fb61c03b59433 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_kldiv_loss_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_kldiv_loss_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -34,11 +33,11 @@ class TestBase(IPUOpTest): target = np.random.uniform(size=[3, 4, 2, 2]) self.feed_fp32 = { "x": x.astype(np.float32), - "target": target.astype(np.float32) + "target": target.astype(np.float32), } self.feed_fp16 = { "x": x.astype(np.float16), - "target": target.astype(np.float16) + "target": target.astype(np.float16), } def set_feed_attr(self): @@ -52,12 +51,12 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self, on_ipu): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype="float32") - target = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32" + ) + target = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32' + ) out = paddle.fluid.layers.kldiv_loss(x, target, **self.attrs) self.fetch_list = [out.name] @@ -73,7 +72,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = { 'reduction': 'sum', @@ -81,7 +79,6 @@ class TestCase1(TestBase): class TestCase2(TestBase): - def set_op_attrs(self): self.attrs = { 'reduction': 'none', diff --git a/python/paddle/fluid/tests/unittests/ipu/test_layernorm_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_layernorm_op_ipu.py index 9bf457d6f924f376889e6e0b45b02e0bd92a77e0..b6ca7c34714ea9f3a8e31af7be77fa725717985c 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_layernorm_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_layernorm_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -56,30 +55,27 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) if self.is_training: ch = self.feed_shape[0][1] - conv1 = paddle.static.nn.conv2d(x, - num_filters=ch, - filter_size=3, - bias_attr=False) + conv1 = paddle.static.nn.conv2d( + x, num_filters=ch, filter_size=3, bias_attr=False + ) scale = paddle.ParamAttr(trainable=True) bias = paddle.ParamAttr(trainable=True) - out = paddle.fluid.layers.nn.layer_norm(conv1, - param_attr=scale, - bias_attr=bias, - **self.attrs) + out = paddle.fluid.layers.nn.layer_norm( + conv1, param_attr=scale, bias_attr=bias, **self.attrs + ) loss = paddle.mean(out) self.fetch_list = [loss.name] else: scale = self.attrs['scale'] bias = self.attrs['shift'] - out = paddle.fluid.layers.nn.layer_norm(x, - param_attr=scale, - bias_attr=bias, - **self.attrs) + out = paddle.fluid.layers.nn.layer_norm( + x, param_attr=scale, bias_attr=bias, **self.attrs + ) self.fetch_list = [out.name] if self.is_training: @@ -89,8 +85,9 @@ class TestBase(IPUOpTest): elif self.optimizer == 'adam': optimizer = paddle.optimizer.Adam(learning_rate=1e-2) elif self.optimizer == 'lamb': - optimizer = paddle.optimizer.Lamb(learning_rate=1e-2, - lamb_weight_decay=0.0) + optimizer = paddle.optimizer.Lamb( + learning_rate=1e-2, lamb_weight_decay=0.0 + ) if optimizer is not None: optimizer.minimize(loss) @@ -107,7 +104,6 @@ class TestBase(IPUOpTest): @unittest.skip('raise error') class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = { "scale": False, @@ -119,7 +115,6 @@ class TestCase1(TestBase): @unittest.skip('raise error') class TestCase2(TestBase): - def set_op_attrs(self): self.attrs = { "scale": True, @@ -130,7 +125,6 @@ class TestCase2(TestBase): class TestCase3(TestBase): - def set_op_attrs(self): self.attrs = { "scale": True, @@ -142,13 +136,12 @@ class TestCase3(TestBase): class TestTrainCase1(TestBase): - def set_op_attrs(self): self.attrs = { "scale": True, "shift": True, "begin_norm_axis": 1, - "epsilon": 1e-05 + "epsilon": 1e-05, } self.optimizer = 'sgd' @@ -162,7 +155,6 @@ class TestTrainCase1(TestBase): class TestTrainCase3(TestBase): - def set_atol(self): super().set_atol() self.atol = 5e-3 @@ -172,7 +164,7 @@ class TestTrainCase3(TestBase): "scale": True, "shift": True, "begin_norm_axis": 2, - "epsilon": 1e-05 + "epsilon": 1e-05, } self.optimizer = 'lamb' diff --git a/python/paddle/fluid/tests/unittests/ipu/test_logical_not_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_logical_not_op_ipu.py index d8eaa2f81bcebafc8ce4990dd3570a1986e4c1d4..c75f6faa65b65c3eed8845eabb65858c0d1b9088 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_logical_not_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_logical_not_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -40,9 +39,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype="bool") + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype="bool" + ) out = paddle.fluid.layers.logical_not(x) self.fetch_list = [out.name] diff --git a/python/paddle/fluid/tests/unittests/ipu/test_logical_x_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_logical_x_op_ipu.py index 79c22f47da5c92fe3477fa1d5157fe74f71661f4..a2d3817f7f54baa6438b8532450cd49a62b682dc 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_logical_x_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_logical_x_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestLogicalAnd(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -39,12 +38,16 @@ class TestLogicalAnd(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype=self.feed_dtype[0]) - y = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype=self.feed_dtype[1]) + x = paddle.static.data( + name=self.feed_list[0], + shape=self.feed_shape[0], + dtype=self.feed_dtype[0], + ) + y = paddle.static.data( + name=self.feed_list[1], + shape=self.feed_shape[1], + dtype=self.feed_dtype[1], + ) out = self.op(x, y, **self.attrs) self.fetch_list = [out.name] @@ -79,7 +82,6 @@ class TestLogicalAnd(IPUOpTest): class TestLogicalOr(TestLogicalAnd): - def set_test_op(self): self.op = paddle.fluid.layers.logical_or diff --git a/python/paddle/fluid/tests/unittests/ipu/test_lookuptable_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_lookuptable_op_ipu.py index ffcf8a64f53f99a3cf24f71aeb694b8817fc71e4..b2aa05fb73438273e310ae1424cfcd8abd01a9bc 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_lookuptable_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_lookuptable_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -45,14 +44,14 @@ class TestBase(IPUOpTest): "is_sparse": False, "is_distributed": False, "padding_idx": -1, - "dtype": 'float32' + "dtype": 'float32', } @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='int64') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='int64' + ) out = paddle.fluid.layers.embedding(x, **self.attrs) if self.is_training: loss = paddle.mean(out) @@ -76,7 +75,6 @@ class TestBase(IPUOpTest): class TestTrainCase1(TestBase): - def set_atol(self): self.atol = 1e-7 self.rtol = 1e-6 diff --git a/python/paddle/fluid/tests/unittests/ipu/test_lookuptable_v2_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_lookuptable_v2_op_ipu.py index 2c8e7159cf217a19211acde4e6958f039d4a2be4..4d8ca5bb03728e2ce475e1435f85e4ed72138e70 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_lookuptable_v2_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_lookuptable_v2_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -45,14 +44,14 @@ class TestBase(IPUOpTest): "embedding_dim": 16, "sparse": False, "padding_idx": -1, - "weight_attr": None + "weight_attr": None, } @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='int64') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='int64' + ) embedding = paddle.nn.Embedding(**self.attrs) out = embedding(x) if self.is_training: @@ -77,7 +76,6 @@ class TestBase(IPUOpTest): class TestTrainCase1(TestBase): - def set_atol(self): self.atol = 1e-7 self.rtol = 1e-6 diff --git a/python/paddle/fluid/tests/unittests/ipu/test_lr_sheduler_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_lr_sheduler_ipu.py index 137f3f61c535861135044a0ce0c7bcd3ecc1eb4f..7fffe224458af78e71906345b24a1011b6b24290 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_lr_sheduler_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_lr_sheduler_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class LR_New(LRScheduler): - def __init__(self, learning_rate=1e-5, last_epoch=-1, verbose=False): super(LR_New, self).__init__(learning_rate, last_epoch, verbose) @@ -32,16 +31,14 @@ class LR_New(LRScheduler): class TestConvNet(IPUOpTest): - @IPUOpTest.static_graph def build_model(self): - image = paddle.static.data(name='image', - shape=[1, 3, 10, 10], - dtype='float32') - conv1 = paddle.static.nn.conv2d(image, - num_filters=3, - filter_size=3, - bias_attr=False) + image = paddle.static.data( + name='image', shape=[1, 3, 10, 10], dtype='float32' + ) + conv1 = paddle.static.nn.conv2d( + image, num_filters=3, filter_size=3, bias_attr=False + ) loss = paddle.mean(conv1) opt = paddle.optimizer.Lamb(learning_rate=LR_New()) @@ -61,9 +58,8 @@ class TestConvNet(IPUOpTest): ipu_strategy = paddle.static.IpuStrategy() ipu_strategy.set_graph_config(is_training=True) program = paddle.static.IpuCompiledProgram( - self.main_prog, - ipu_strategy=ipu_strategy).compile(self.feed_list, - self.fetch_list) + self.main_prog, ipu_strategy=ipu_strategy + ).compile(self.feed_list, self.fetch_list) else: program = self.main_prog @@ -71,9 +67,9 @@ class TestConvNet(IPUOpTest): for _ in range(100): if hasattr(program, "lr_sheduler"): program.lr_sheduler.step() - loss_res = exe.run(program, - feed=self.feed, - fetch_list=self.fetch_list) + loss_res = exe.run( + program, feed=self.feed, fetch_list=self.fetch_list + ) result.append(loss_res) return np.array(result) diff --git a/python/paddle/fluid/tests/unittests/ipu/test_margin_rank_loss_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_margin_rank_loss_op_ipu.py index e9964156a128ba3861c979e923fcd33cec91e143..5861009fd851825d040129c5f3e264df2fc9d479 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_margin_rank_loss_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_margin_rank_loss_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -55,15 +54,15 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self, on_ipu): - label = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype="float32") - left = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='float32') - right = paddle.static.data(name=self.feed_list[2], - shape=self.feed_shape[2], - dtype='float32') + label = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32" + ) + left = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32' + ) + right = paddle.static.data( + name=self.feed_list[2], shape=self.feed_shape[2], dtype='float32' + ) out = paddle.fluid.layers.margin_rank_loss(label, left, right) self.fetch_list = [out.name] @@ -79,7 +78,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = { 'margin': 0.5, diff --git a/python/paddle/fluid/tests/unittests/ipu/test_matmul_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_matmul_op_ipu.py index bf2af886959b5230801e2fc7144f2dd1124577fd..ad53cfbae9c2c20ed37199e8170370b6cd6a8ba5 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_matmul_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_matmul_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -49,12 +48,12 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - y = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + y = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32' + ) out = paddle.fluid.layers.matmul(x, y, **self.attrs) self.fetch_list = [out.name] @@ -71,7 +70,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = { "transpose_x": True, @@ -81,7 +79,6 @@ class TestCase1(TestBase): class TestCase2(TestBase): - def set_op_attrs(self): self.attrs = { "transpose_x": True, @@ -97,7 +94,6 @@ class TestCase2(TestBase): class TestCase3(TestBase): - def set_data_feed(self): x = np.random.uniform(size=[5, 4, 3, 2]) y = np.random.uniform(size=[5, 4, 2, 3]) @@ -107,7 +103,6 @@ class TestCase3(TestBase): class TestCase4(TestBase): - def set_data_feed(self): x = np.random.uniform(size=[4, 3, 2]) y = np.random.uniform(size=[4, 2, 3]) @@ -117,7 +112,6 @@ class TestCase4(TestBase): class TestCase5(TestBase): - def set_data_feed(self): x = np.random.uniform(size=[4, 2, 3]) y = np.random.uniform(size=[3, 2]) @@ -127,7 +121,6 @@ class TestCase5(TestBase): class TestCase6(TestBase): - def set_data_feed(self): x = np.random.uniform(size=[3]) @@ -137,7 +130,6 @@ class TestCase6(TestBase): @unittest.skip("not supported") class TestCase6_2(TestCase6): - def set_data_feed(self): x = np.random.uniform(size=[3]) @@ -153,7 +145,6 @@ class TestCase6_2(TestCase6): class TestCase7(TestBase): - def set_data_feed(self): x = np.random.uniform(size=[1, 3, 4, 5]) y = np.random.uniform(size=[1, 3, 4, 5]) @@ -166,7 +157,6 @@ class TestCase7(TestBase): class TestCase8(TestBase): - def set_data_feed(self): x = np.random.uniform(size=[3, 1]) y = np.random.uniform(size=[1, 2]) @@ -177,7 +167,6 @@ class TestCase8(TestBase): @unittest.skip("not supported") class TestCase8_2(TestBase): - def set_data_feed(self): x = np.random.uniform(size=[3]) y = np.random.uniform(size=[2]) @@ -195,7 +184,6 @@ class TestCase8_2(TestBase): @unittest.skip("dim > 4 is not supported") class TestCase9(TestBase): - def set_data_feed(self): x = np.random.uniform(size=[6, 5, 4, 2, 3]) @@ -204,7 +192,6 @@ class TestCase9(TestBase): class TestCase10(TestBase): - def set_op_attrs(self): self.attrs = { "transpose_y": True, @@ -219,7 +206,6 @@ class TestCase10(TestBase): class TestCase11(TestBase): - def set_op_attrs(self): self.attrs = { "transpose_x": True, diff --git a/python/paddle/fluid/tests/unittests/ipu/test_matmul_serilize_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_matmul_serilize_ipu.py index 21aa7c4b992361fc37e8ccfa4a4ee4a15a2ca793..6ee930f2475302aca93cf8875a127ffee95144fa 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_matmul_serilize_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_matmul_serilize_ipu.py @@ -27,7 +27,6 @@ def set_serialize_factor(serialize_factor): class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -51,12 +50,16 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype=self.feed_dtype[0]) - y = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype=self.feed_dtype[1]) + x = paddle.static.data( + name=self.feed_list[0], + shape=self.feed_shape[0], + dtype=self.feed_dtype[0], + ) + y = paddle.static.data( + name=self.feed_list[1], + shape=self.feed_shape[1], + dtype=self.feed_dtype[1], + ) # decrator maybe the best choice, but need to modify api out = paddle.matmul(x, y, **self.attrs) set_serialize_factor(4) @@ -75,8 +78,8 @@ class TestBase(IPUOpTest): ipu_strategy = paddle.static.IpuStrategy() ipu_strategy.set_graph_config(is_training=self.is_training) program = paddle.static.IpuCompiledProgram( - self.main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, self.fetch_list) + self.main_prog, ipu_strategy=ipu_strategy + ).compile(feed_list, self.fetch_list) else: program = self.main_prog result = exe.run(program, feed=self.feed, fetch_list=self.fetch_list) @@ -85,10 +88,9 @@ class TestBase(IPUOpTest): def test_base(self): res0 = self.run_model(False) res1 = self.run_model(True) - np.testing.assert_allclose(res0.flatten(), - res1.flatten(), - rtol=1e-05, - atol=self.atol) + np.testing.assert_allclose( + res0.flatten(), res1.flatten(), rtol=1e-05, atol=self.atol + ) self.assertTrue(res0.shape == res1.shape) diff --git a/python/paddle/fluid/tests/unittests/ipu/test_matmul_v2_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_matmul_v2_op_ipu.py index 37f575f64bd99dbd1ff1b90c20e76765fd50338c..c71395ed272a038d0cddad4c5fb63632cfe29850 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_matmul_v2_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_matmul_v2_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -45,12 +44,12 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - y = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + y = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32' + ) out = paddle.matmul(x, y, **self.attrs) self.fetch_list = [out.name] @@ -66,7 +65,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = { "transpose_x": True, @@ -75,7 +73,6 @@ class TestCase1(TestBase): class TestCase3(TestBase): - def set_data_feed(self): x = np.random.uniform(size=[5, 4, 2, 3]) y = np.random.uniform(size=[5, 4, 3, 2]) @@ -85,7 +82,6 @@ class TestCase3(TestBase): class TestCase4(TestBase): - def set_data_feed(self): x = np.random.uniform(size=[4, 2, 3]) y = np.random.uniform(size=[4, 3, 2]) @@ -95,7 +91,6 @@ class TestCase4(TestBase): class TestCase5(TestBase): - def set_data_feed(self): x = np.random.uniform(size=[4, 2, 3]) y = np.random.uniform(size=[3, 2]) @@ -105,7 +100,6 @@ class TestCase5(TestBase): class TestCase6(TestBase): - def set_data_feed(self): x = np.random.uniform(size=[3]) y = np.random.uniform(size=[3]) @@ -116,7 +110,6 @@ class TestCase6(TestBase): @unittest.skip("not supported") class TestCase6_2(TestCase6): - def set_data_feed(self): x = np.random.uniform(size=[3]) y = np.random.uniform(size=[3]) @@ -129,7 +122,6 @@ class TestCase6_2(TestCase6): class TestCase7(TestBase): - def set_data_feed(self): x = np.random.uniform(size=[3, 1]) y = np.random.uniform(size=[1, 2]) @@ -140,7 +132,6 @@ class TestCase7(TestBase): @unittest.skip("dim > 4 is not supported") class TestCase8(TestBase): - def set_data_feed(self): self.feed = { "x": np.random.uniform(size=[6, 5, 4, 2, 3]).astype('float32'), @@ -149,7 +140,6 @@ class TestCase8(TestBase): class TestCase9(TestBase): - def set_op_attrs(self): self.attrs = { "transpose_y": True, @@ -164,7 +154,6 @@ class TestCase9(TestBase): class TestCase10(TestBase): - def set_op_attrs(self): self.attrs = { "transpose_x": True, diff --git a/python/paddle/fluid/tests/unittests/ipu/test_mean_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_mean_op_ipu.py index 0f60ed2485e7ebb2fb7dbb69e76033b305bd7c4d..7c6b1e29738435bccd431256a469a9c13607f585 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_mean_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_mean_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -43,9 +42,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) out = paddle.mean(x) self.fetch_list = [out.name] diff --git a/python/paddle/fluid/tests/unittests/ipu/test_meshgrid_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_meshgrid_op_ipu.py index 56242fea3672e0d206a8f3cecc5b5bff4aacaf1a..d23f0eb58ef711ce41947608ca61f8583a5897f6 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_meshgrid_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_meshgrid_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -40,11 +39,11 @@ class TestBase(IPUOpTest): data2 = np.random.uniform(size=[20]) self.feed_fp32 = { 'x': data1.astype(np.float32), - 'y': data2.astype(np.float32) + 'y': data2.astype(np.float32), } self.feed_fp16 = { 'x': data1.astype(np.float16), - 'y': data2.astype(np.float16) + 'y': data2.astype(np.float16), } def set_feed_attr(self): @@ -58,12 +57,16 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype=self.feed_dtype[0]) - y = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype=self.feed_dtype[1]) + x = paddle.static.data( + name=self.feed_list[0], + shape=self.feed_shape[0], + dtype=self.feed_dtype[0], + ) + y = paddle.static.data( + name=self.feed_list[1], + shape=self.feed_shape[1], + dtype=self.feed_dtype[1], + ) r1, r2 = paddle.meshgrid(x, y) self.fetch_list = [r1.name, r2.name] @@ -81,7 +84,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_feed(self): data1 = np.random.uniform(size=[10]) data2 = np.random.uniform(size=[20]) @@ -89,41 +91,46 @@ class TestCase1(TestBase): self.feed_fp32 = { 'x': data1.astype(np.float32), 'y': data2.astype(np.float32), - 'z': data3.astype(np.float32) + 'z': data3.astype(np.float32), } self.feed_fp16 = { 'x': data1.astype(np.float16), 'y': data2.astype(np.float16), - 'z': data3.astype(np.float16) + 'z': data3.astype(np.float16), } @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype=self.feed_dtype[0]) - y = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype=self.feed_dtype[1]) - z = paddle.static.data(name=self.feed_list[2], - shape=self.feed_shape[2], - dtype=self.feed_dtype[2]) + x = paddle.static.data( + name=self.feed_list[0], + shape=self.feed_shape[0], + dtype=self.feed_dtype[0], + ) + y = paddle.static.data( + name=self.feed_list[1], + shape=self.feed_shape[1], + dtype=self.feed_dtype[1], + ) + z = paddle.static.data( + name=self.feed_list[2], + shape=self.feed_shape[2], + dtype=self.feed_dtype[2], + ) r1, r2, r3 = paddle.meshgrid(x, y, z) self.fetch_list = [r1.name, r2.name, r3.name] class TestCase2(TestBase): - def set_feed(self): data1 = np.random.uniform(size=[100]) data2 = np.random.uniform(size=[200]) self.feed_fp32 = { 'x': data1.astype(np.int32), - 'y': data2.astype(np.int32) + 'y': data2.astype(np.int32), } self.feed_fp16 = { 'x': data1.astype(np.int32), - 'y': data2.astype(np.int32) + 'y': data2.astype(np.int32), } diff --git a/python/paddle/fluid/tests/unittests/ipu/test_mixed_precision_inference_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_mixed_precision_inference_ipu.py index 7118466a521019fb9e854e862265119645dba487..ab62ec4e2760a61bccabb778c69fd955cd0d1abf 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_mixed_precision_inference_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_mixed_precision_inference_ipu.py @@ -22,7 +22,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_data_feed() @@ -51,9 +50,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) # using fp32 x = paddle.static.nn.conv2d(input=x, num_filters=3, filter_size=3) @@ -77,7 +76,8 @@ class TestBase(IPUOpTest): amp_list = paddle.static.amp.CustomOpLists() amp_list.unsupported_list = {} to_fp16_var_names = paddle.static.amp.cast_model_to_fp16( - self.main_prog, amp_list, use_fp16_guard=True) + self.main_prog, amp_list, use_fp16_guard=True + ) if self.is_ipu_mode(exec_mode): place = paddle.CPUPlace() @@ -91,27 +91,29 @@ class TestBase(IPUOpTest): paddle.static.amp.cast_parameters_to_fp16( paddle.CPUPlace(), self.main_prog, - to_fp16_var_names=to_fp16_var_names) + to_fp16_var_names=to_fp16_var_names, + ) if self.is_ipu_mode(exec_mode): ipu_strategy = paddle.static.IpuStrategy() ipu_strategy.set_graph_config( is_training=False, num_ipus=self.num_ipus, - enable_manual_shard=self.enable_manual_shard) + enable_manual_shard=self.enable_manual_shard, + ) ipu_strategy.set_pipelining_config( enable_pipelining=self.enable_pipelining, - batches_per_step=self.batches_per_step) + batches_per_step=self.batches_per_step, + ) program = paddle.static.IpuCompiledProgram( - self.main_prog, - ipu_strategy=ipu_strategy).compile(self.feed_list, - self.fetch_list) + self.main_prog, ipu_strategy=ipu_strategy + ).compile(self.feed_list, self.fetch_list) else: program = self.main_prog - result = exe.run(program, - feed=self.feed_fp32, - fetch_list=self.fetch_list) + result = exe.run( + program, feed=self.feed_fp32, fetch_list=self.fetch_list + ) self.output_dict[exec_mode] = result[0] def test(self): @@ -122,15 +124,14 @@ class TestBase(IPUOpTest): class TestPipline(TestBase): - @IPUOpTest.static_graph def build_model(self, exec_mode): feed_shape = list(self.feed_shape[0]) if self.is_ipu_mode(exec_mode): feed_shape[0] = 1 - x = paddle.static.data(name=self.feed_list[0], - shape=feed_shape, - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=feed_shape, dtype='float32' + ) with paddle.static.ipu_shard_guard(index=0, stage=0): # using fp32 x = paddle.static.nn.conv2d(input=x, num_filters=3, filter_size=3) @@ -140,9 +141,9 @@ class TestPipline(TestBase): with paddle.static.ipu_shard_guard(index=1, stage=1): # using fp16 with paddle.static.amp.fp16_guard(): - x = paddle.static.nn.conv2d(input=x, - num_filters=6, - filter_size=3) + x = paddle.static.nn.conv2d( + input=x, num_filters=6, filter_size=3 + ) x = paddle.static.nn.batch_norm(x, act='relu') x = F.max_pool2d(x, kernel_size=2, stride=2) diff --git a/python/paddle/fluid/tests/unittests/ipu/test_mixed_precision_training_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_mixed_precision_training_ipu.py index 51a0e91a29c3bc320eed49e8e136455dc1fc1e75..0ea8d5557f2198183aa59c890ad8c1c76fb0b889 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_mixed_precision_training_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_mixed_precision_training_ipu.py @@ -22,7 +22,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -57,9 +56,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) # using fp32 x = paddle.static.nn.conv2d(input=x, num_filters=3, filter_size=3) @@ -87,7 +86,8 @@ class TestBase(IPUOpTest): amp_list = paddle.static.amp.CustomOpLists() amp_list.unsupported_list = {} to_fp16_var_names = paddle.static.amp.cast_model_to_fp16( - self.main_prog, amp_list) + self.main_prog, amp_list + ) if self.is_ipu_mode(exec_mode): place = paddle.CPUPlace() @@ -101,29 +101,31 @@ class TestBase(IPUOpTest): paddle.static.amp.cast_parameters_to_fp16( paddle.CPUPlace(), self.main_prog, - to_fp16_var_names=to_fp16_var_names) + to_fp16_var_names=to_fp16_var_names, + ) if self.is_ipu_mode(exec_mode): ipu_strategy = paddle.static.IpuStrategy() ipu_strategy.set_graph_config( is_training=self.is_training, num_ipus=self.num_ipus, - enable_manual_shard=self.enable_manual_shard) + enable_manual_shard=self.enable_manual_shard, + ) ipu_strategy.set_pipelining_config( enable_pipelining=self.enable_pipelining, - batches_per_step=self.batches_per_step) + batches_per_step=self.batches_per_step, + ) program = paddle.static.IpuCompiledProgram( - self.main_prog, - ipu_strategy=ipu_strategy).compile(self.feed_list, - self.fetch_list) + self.main_prog, ipu_strategy=ipu_strategy + ).compile(self.feed_list, self.fetch_list) else: program = self.main_prog result = [] for _ in range(self.epoch): - out = exe.run(program, - feed=self.feed_fp32, - fetch_list=self.fetch_list) + out = exe.run( + program, feed=self.feed_fp32, fetch_list=self.fetch_list + ) result.append(out) self.output_dict[exec_mode] = result @@ -135,15 +137,14 @@ class TestBase(IPUOpTest): class TestPipline(TestBase): - @IPUOpTest.static_graph def build_model(self, exec_mode): feed_shape = list(self.feed_shape[0]) if self.is_ipu_mode(exec_mode): feed_shape[0] = 1 - x = paddle.static.data(name=self.feed_list[0], - shape=feed_shape, - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=feed_shape, dtype='float32' + ) with paddle.static.ipu_shard_guard(index=0, stage=0): # using fp32 @@ -154,9 +155,9 @@ class TestPipline(TestBase): with paddle.static.ipu_shard_guard(index=1, stage=1): # using fp16 with paddle.static.amp.fp16_guard(): - x = paddle.static.nn.conv2d(input=x, - num_filters=6, - filter_size=3) + x = paddle.static.nn.conv2d( + input=x, num_filters=6, filter_size=3 + ) x = paddle.static.nn.batch_norm(x, act='relu') x = F.max_pool2d(x, kernel_size=2, stride=2) diff --git a/python/paddle/fluid/tests/unittests/ipu/test_model_parallel_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_model_parallel_ipu.py index 55a6569b7dd1c9add15c844abeacb03498580015..2592d0ddee59c71311c9d7a11f85a8a6ce01a9d2 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_model_parallel_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_model_parallel_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -59,19 +58,17 @@ class TestBase(IPUOpTest): bs = self.ipu_bs if run_ipu else self.cpu_bs with paddle.static.scope_guard(scope): with paddle.static.program_guard(main_prog, startup_prog): - image = paddle.static.data(name='image', - shape=[bs, 3, 10, 10], - dtype='float32') + image = paddle.static.data( + name='image', shape=[bs, 3, 10, 10], dtype='float32' + ) with paddle.static.ipu_shard_guard(index=0): - conv1 = paddle.static.nn.conv2d(image, - num_filters=3, - filter_size=3, - bias_attr=False) + conv1 = paddle.static.nn.conv2d( + image, num_filters=3, filter_size=3, bias_attr=False + ) with paddle.static.ipu_shard_guard(index=1): - conv2 = paddle.static.nn.conv2d(conv1, - num_filters=3, - filter_size=3, - bias_attr=False) + conv2 = paddle.static.nn.conv2d( + conv1, num_filters=3, filter_size=3, bias_attr=False + ) # should consider influence of bs loss = paddle.mean(conv2) @@ -101,11 +98,12 @@ class TestBase(IPUOpTest): ipu_strategy.set_graph_config( num_ipus=2 * self.ipu_options['replicated_graph_count'], is_training=self.is_training, - enable_manual_shard=True) + enable_manual_shard=True, + ) ipu_strategy.set_options(self.ipu_options) program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) + main_prog, ipu_strategy=ipu_strategy + ).compile(feed_list, fetch_list) else: program = main_prog @@ -126,14 +124,12 @@ class TestBase(IPUOpTest): cpu_outputs = self._test_base(False) ipu_outputs = self._test_base(True) - np.testing.assert_allclose(cpu_outputs, - ipu_outputs, - rtol=1e-05, - atol=self.atol) + np.testing.assert_allclose( + cpu_outputs, ipu_outputs, rtol=1e-05, atol=self.atol + ) class TestReplicaInference(TestBase): - def set_attrs(self): self.ipu_options = { "batches_per_step": 1, @@ -150,14 +146,13 @@ class TestReplicaInference(TestBase): np_image = np.random.rand(1, 3, 10, 10).astype(np.float32) self.feed_cpu = {"image": np_image} self.feed_ipu = { - "image": - np.tile(np_image, - [self.ipu_options['replicated_graph_count'], 1, 1, 1]) + "image": np.tile( + np_image, [self.ipu_options['replicated_graph_count'], 1, 1, 1] + ) } class TestReplicaCollectiveInference(TestBase): - def set_attrs(self): self.ipu_options = { "batches_per_step": 1, @@ -166,13 +161,11 @@ class TestReplicaCollectiveInference(TestBase): "accumulation_factor": 1, "enable_replicated_graphs": True, "replicated_graph_count": 2, - "accumulate_outer_fragment": { - 0: [] - }, + "accumulate_outer_fragment": {0: []}, "replicated_collectives_settings": { "prepare_schedule_for_merging_collectives": True, - "merge_all_reduce_collectives": True - } + "merge_all_reduce_collectives": True, + }, } self.cpu_bs = 1 self.ipu_bs = 1 @@ -181,14 +174,13 @@ class TestReplicaCollectiveInference(TestBase): np_image = np.random.rand(1, 3, 10, 10).astype(np.float32) self.feed_cpu = {"image": np_image} self.feed_ipu = { - "image": - np.tile(np_image, - [self.ipu_options['replicated_graph_count'], 1, 1, 1]) + "image": np.tile( + np_image, [self.ipu_options['replicated_graph_count'], 1, 1, 1] + ) } class TestPipelineInference(TestBase): - def set_attrs(self): self.ipu_options = { "batches_per_step": 2, @@ -205,13 +197,13 @@ class TestPipelineInference(TestBase): np_image = np.random.rand(1, 3, 10, 10).astype(np.float32) self.feed_cpu = {"image": np_image} self.feed_ipu = { - "image": - np.tile(np_image, [self.ipu_options['batches_per_step'], 1, 1, 1]) + "image": np.tile( + np_image, [self.ipu_options['batches_per_step'], 1, 1, 1] + ) } class TestTrainBase(TestBase): - def set_training(self): self.is_training = True self.epoch = 10 @@ -231,7 +223,6 @@ class TestTrainBase(TestBase): class TestReplicaTrain(TestTrainBase): - def set_attrs(self): self.ipu_options = { "batches_per_step": 1, @@ -239,7 +230,7 @@ class TestReplicaTrain(TestTrainBase): "enable_gradient_accumulation": False, "accumulation_factor": 1, "enable_replicated_graphs": True, - "replicated_graph_count": 2 + "replicated_graph_count": 2, } self.cpu_bs = 2 self.ipu_bs = 1 @@ -249,23 +240,21 @@ class TestReplicaTrain(TestTrainBase): np_image = np.random.rand(1, 3, 10, 10).astype(np.float32) self.feed_cpu = {"image": np.tile(np_image, [self.cpu_bs, 1, 1, 1])} self.feed_ipu = { - "image": - np.tile(np_image, - [self.ipu_options['replicated_graph_count'], 1, 1, 1]) + "image": np.tile( + np_image, [self.ipu_options['replicated_graph_count'], 1, 1, 1] + ) } def test(self): cpu_outputs = self._test_base(False) ipu_outputs = self._test_base(True)[::2] - np.testing.assert_allclose(cpu_outputs, - ipu_outputs, - rtol=1e-05, - atol=self.atol) + np.testing.assert_allclose( + cpu_outputs, ipu_outputs, rtol=1e-05, atol=self.atol + ) class TestReplicaCollectiveTrain(TestTrainBase): - def set_attrs(self): self.ipu_options = { "batches_per_step": 1, @@ -274,13 +263,11 @@ class TestReplicaCollectiveTrain(TestTrainBase): "accumulation_factor": 1, "enable_replicated_graphs": True, "replicated_graph_count": 2, - "accumulate_outer_fragment": { - 0: [] - }, + "accumulate_outer_fragment": {0: []}, "replicated_collectives_settings": { "prepare_schedule_for_merging_collectives": True, - "merge_all_reduce_collectives": True - } + "merge_all_reduce_collectives": True, + }, } self.cpu_bs = 2 self.ipu_bs = 1 @@ -290,23 +277,21 @@ class TestReplicaCollectiveTrain(TestTrainBase): np_image = np.random.rand(1, 3, 10, 10).astype(np.float32) self.feed_cpu = {"image": np.tile(np_image, [self.cpu_bs, 1, 1, 1])} self.feed_ipu = { - "image": - np.tile(np_image, - [self.ipu_options['replicated_graph_count'], 1, 1, 1]) + "image": np.tile( + np_image, [self.ipu_options['replicated_graph_count'], 1, 1, 1] + ) } def test(self): cpu_outputs = self._test_base(False) ipu_outputs = self._test_base(True)[::2] - np.testing.assert_allclose(cpu_outputs, - ipu_outputs, - rtol=1e-05, - atol=self.atol) + np.testing.assert_allclose( + cpu_outputs, ipu_outputs, rtol=1e-05, atol=self.atol + ) class TestPipelineTrain(TestTrainBase): - def set_attrs(self): self.ipu_options = { "batches_per_step": 3, @@ -323,22 +308,22 @@ class TestPipelineTrain(TestTrainBase): def set_data_feed(self): np_image = np.random.rand(1, 3, 10, 10).astype(np.float32) self.feed_cpu = {"image": np.tile(np_image, [self.cpu_bs, 1, 1, 1])} - bps_acc = self.ipu_options['batches_per_step'] * self.ipu_options[ - 'accumulation_factor'] + bps_acc = ( + self.ipu_options['batches_per_step'] + * self.ipu_options['accumulation_factor'] + ) self.feed_ipu = {"image": np.tile(np_image, [bps_acc, 1, 1, 1])} def test(self): cpu_outputs = self._test_base(False) ipu_outputs = self._test_base(True)[::3] - np.testing.assert_allclose(cpu_outputs, - ipu_outputs, - rtol=1e-05, - atol=self.atol) + np.testing.assert_allclose( + cpu_outputs, ipu_outputs, rtol=1e-05, atol=self.atol + ) class TestAdamTrain(TestTrainBase): - def set_attrs(self): self.ipu_options = { "batches_per_step": 1, @@ -354,7 +339,6 @@ class TestAdamTrain(TestTrainBase): class TestAdamReplicaTrain(TestReplicaTrain): - def set_attrs(self): self.ipu_options = { "batches_per_step": 1, @@ -370,7 +354,6 @@ class TestAdamReplicaTrain(TestReplicaTrain): class TestAdamPipelineTrain(TestPipelineTrain): - def set_attrs(self): self.ipu_options = { "batches_per_step": 3, @@ -386,7 +369,6 @@ class TestAdamPipelineTrain(TestPipelineTrain): class TestAdamRecomputationTrain(TestPipelineTrain): - def set_attrs(self): self.ipu_options = { "batches_per_step": 3, @@ -403,7 +385,6 @@ class TestAdamRecomputationTrain(TestPipelineTrain): class TestLambTrain(TestAdamTrain): - def set_attrs(self): self.ipu_options = { "batches_per_step": 1, @@ -419,7 +400,6 @@ class TestLambTrain(TestAdamTrain): class TestLambReplicaTrain(TestAdamReplicaTrain): - def set_attrs(self): self.ipu_options = { "batches_per_step": 1, @@ -435,7 +415,6 @@ class TestLambReplicaTrain(TestAdamReplicaTrain): class TestLambPipelineTrain(TestAdamPipelineTrain): - def set_attrs(self): self.ipu_options = { "batches_per_step": 3, diff --git a/python/paddle/fluid/tests/unittests/ipu/test_model_pipeline_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_model_pipeline_ipu.py index fb5f25619bf96e1b99c5b5a6759453705388d1c8..6ee6b842332b68190d1367d7e61c33c8a54715f1 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_model_pipeline_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_model_pipeline_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_training() self.set_data_feed() @@ -37,29 +36,28 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - image = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + image = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) with paddle.static.ipu_shard_guard(index=0): - conv1 = paddle.static.nn.conv2d(image, - num_filters=3, - filter_size=3, - bias_attr=False) + conv1 = paddle.static.nn.conv2d( + image, num_filters=3, filter_size=3, bias_attr=False + ) with paddle.static.ipu_shard_guard(index=1): - conv2 = paddle.static.nn.conv2d(conv1, - num_filters=3, - filter_size=3, - bias_attr=False) + conv2 = paddle.static.nn.conv2d( + conv1, num_filters=3, filter_size=3, bias_attr=False + ) loss = paddle.mean(conv2) self.fetch_list = [loss.name] def run_model(self, exec_mode): ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(num_ipus=2, - is_training=False, - enable_manual_shard=True) - ipu_strategy.set_pipelining_config(enable_pipelining=True, - batches_per_step=2) + ipu_strategy.set_graph_config( + num_ipus=2, is_training=False, enable_manual_shard=True + ) + ipu_strategy.set_pipelining_config( + enable_pipelining=True, batches_per_step=2 + ) self.run_op_test(exec_mode, ipu_strategy=ipu_strategy) def test(self): diff --git a/python/paddle/fluid/tests/unittests/ipu/test_mul_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_mul_op_ipu.py index a5ace5f1bf1c9334aa6f52ae687d8a487895905e..c1c2116a413025aa541e33a15ad3886206047d02 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_mul_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_mul_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -48,12 +47,12 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - y = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + y = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32' + ) out = paddle.fluid.layers.mul(x, y, **self.attrs) self.fetch_list = [out.name] @@ -69,7 +68,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_data_feed(self): x = np.random.uniform(size=[1, 2, 5]) y = np.random.uniform(size=[5, 3]) @@ -84,7 +82,6 @@ class TestCase1(TestBase): class TestCase2(TestBase): - def set_data_feed(self): x = np.random.uniform(size=[3, 4, 2, 9]) y = np.random.uniform(size=[3, 6, 1, 2, 3]) diff --git a/python/paddle/fluid/tests/unittests/ipu/test_not_equal_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_not_equal_op_ipu.py index c00a60775eb7f3b693de4bc8dbb1117935989d2a..465ed7d06b704bdef9fadf6125057d64245889fe 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_not_equal_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_not_equal_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -50,12 +49,12 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - y = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + y = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32' + ) out = paddle.fluid.layers.not_equal(x, y, **self.attrs) self.fetch_list = [out.name] @@ -71,7 +70,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_data_feed(self): x = np.ones([1, 10]) y = np.ones([1, 10]) @@ -80,7 +78,6 @@ class TestCase1(TestBase): class TestCase2(TestBase): - def set_data_feed(self): x = np.ones([1, 10]) y = np.arange(0, 10).reshape([1, 10]) @@ -89,7 +86,6 @@ class TestCase2(TestBase): class TestScalar(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -116,10 +112,10 @@ class TestScalar(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - out = (x != 0.5) + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + out = x != 0.5 self.fetch_list = [out.name] def run_model(self, exec_mode): diff --git a/python/paddle/fluid/tests/unittests/ipu/test_one_hot_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_one_hot_op_ipu.py index fe5b658426eeea35932be373bd5b9c9323050871..53e8bab2adc1f864e32ef903df25f154e408930b 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_one_hot_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_one_hot_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -43,9 +42,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='int32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='int32' + ) out = paddle.fluid.layers.one_hot(x, **self.attrs) self.fetch_list = [out.name] @@ -62,7 +61,6 @@ class TestBase(IPUOpTest): @unittest.skip('does not support allow_out_of_range=True') class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = {"depth": 4, "allow_out_of_range": True} diff --git a/python/paddle/fluid/tests/unittests/ipu/test_one_hot_v2_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_one_hot_v2_op_ipu.py index e958cfd1f89ba9b71c92d506025ebaacc910e56e..99294f9c5f1091ea62bc7bee3987109b9eeb439b 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_one_hot_v2_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_one_hot_v2_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -43,9 +42,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='int32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='int32' + ) out = paddle.fluid.input.one_hot(x, **self.attrs) self.fetch_list = [out.name] @@ -62,7 +61,6 @@ class TestBase(IPUOpTest): @unittest.skip('does not support allow_out_of_range=True') class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = {"depth": 4, "allow_out_of_range": True} diff --git a/python/paddle/fluid/tests/unittests/ipu/test_optimizer_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_optimizer_ipu.py index f14b2174d398e45bfc1db218f60f7b8afbf86ae7..d8b8116078be45f42b6c28edb22219cb60f01c5d 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_optimizer_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_optimizer_ipu.py @@ -20,7 +20,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_data_feed() @@ -57,25 +56,27 @@ class TestBase(IPUOpTest): with paddle.static.scope_guard(scope): with paddle.static.program_guard(main_prog, startup_prog): - image = paddle.static.data(name='image', - shape=[1, 3, 10, 10], - dtype='float32') - conv1 = paddle.static.nn.conv2d(image, - num_filters=3, - filter_size=3, - bias_attr=False) + image = paddle.static.data( + name='image', shape=[1, 3, 10, 10], dtype='float32' + ) + conv1 = paddle.static.nn.conv2d( + image, num_filters=3, filter_size=3, bias_attr=False + ) loss = paddle.mean(conv1) weight_decay = self.attrs['weight_decay'] - opt = paddle.optimizer.SGD(learning_rate=1e-1, - weight_decay=weight_decay) + opt = paddle.optimizer.SGD( + learning_rate=1e-1, weight_decay=weight_decay + ) if self.attrs['optimizer'] == 'adam': - opt = paddle.optimizer.Adam(learning_rate=1e-1, - weight_decay=weight_decay) + opt = paddle.optimizer.Adam( + learning_rate=1e-1, weight_decay=weight_decay + ) elif self.attrs['optimizer'] == 'lamb': - opt = paddle.optimizer.Lamb(learning_rate=1e-1, - lamb_weight_decay=weight_decay) + opt = paddle.optimizer.Lamb( + learning_rate=1e-1, lamb_weight_decay=weight_decay + ) opt.minimize(loss) if run_ipu: @@ -91,18 +92,23 @@ class TestBase(IPUOpTest): ipu_strategy = paddle.static.IpuStrategy() ipu_strategy.set_graph_config(is_training=True) ipu_strategy.set_options( - {'loss_scaling': self.attrs["loss_scaling"]}) + {'loss_scaling': self.attrs["loss_scaling"]} + ) if "use_no_bias_optimizer" in self.attrs.keys(): - ipu_strategy.set_options({ - "use_no_bias_optimizer": - self.attrs["use_no_bias_optimizer"] - }) + ipu_strategy.set_options( + { + "use_no_bias_optimizer": self.attrs[ + "use_no_bias_optimizer" + ] + } + ) if "accl1_type" in self.attrs.keys(): ipu_strategy.set_options( - {"accl1_type": self.attrs["accl1_type"]}) + {"accl1_type": self.attrs["accl1_type"]} + ) program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) + main_prog, ipu_strategy=ipu_strategy + ).compile(feed_list, fetch_list) else: program = main_prog @@ -118,15 +124,13 @@ class TestBase(IPUOpTest): ipu_loss = self._test_optimizer(True).flatten() cpu_loss = self._test_optimizer(False).flatten() - np.testing.assert_allclose(ipu_loss, - cpu_loss, - rtol=1e-05, - atol=self.atol) + np.testing.assert_allclose( + ipu_loss, cpu_loss, rtol=1e-05, atol=self.atol + ) @unittest.skip('do not support L2 regularization') class TestSGD(TestBase): - def set_attrs(self): self.attrs = { "optimizer": 'sgd', @@ -137,7 +141,6 @@ class TestSGD(TestBase): @unittest.skip('do not support L2 regularization') class TestAdamCase1(TestBase): - def set_attrs(self): self.attrs = { "optimizer": 'adam', @@ -147,7 +150,6 @@ class TestAdamCase1(TestBase): class TestAdamCase2(TestBase): - def set_attrs(self): self.attrs = { "optimizer": 'adam', @@ -158,7 +160,6 @@ class TestAdamCase2(TestBase): @unittest.skip('cpu do not support AdamNoBias') class TestAdamNoBias(TestBase): - def set_attrs(self): self.attrs = { "optimizer": 'adam', @@ -170,7 +171,6 @@ class TestAdamNoBias(TestBase): @unittest.skip('cpu do not support FLOAT16') class TestAdamCase3(TestBase): - def set_attrs(self): self.attrs = { "optimizer": 'adam', @@ -182,7 +182,6 @@ class TestAdamCase3(TestBase): @unittest.skip('seems cpu output wrong') class TestLambCase1(TestBase): - def set_attrs(self): self.attrs = { "optimizer": 'lamb', @@ -193,7 +192,6 @@ class TestLambCase1(TestBase): @unittest.skip('seems cpu output wrong') class TestLamb(TestBase): - def set_attrs(self): self.attrs = { "optimizer": 'lamb', @@ -204,25 +202,23 @@ class TestLamb(TestBase): @unittest.skip('cpu do not support LambNoBias') class TestLambNoBias(TestBase): - def set_attrs(self): self.attrs = { "optimizer": 'lamb', "weight_decay": 0.1, "loss_scaling": 6.0, - "use_no_bias_optimizer": True + "use_no_bias_optimizer": True, } @unittest.skip('cpu do not support FLOAT16') class TestLambCase2(TestBase): - def set_attrs(self): self.attrs = { "optimizer": 'lamb', "weight_decay": 0.1, "loss_scaling": 6.0, - "accl1_type": "FLOAT16" + "accl1_type": "FLOAT16", } diff --git a/python/paddle/fluid/tests/unittests/ipu/test_p_norm_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_p_norm_op_ipu.py index bd6ff58751d3f9e4acafc3d497b945a1600f80dd..3cdcc509e4a53740934bb0d450dfaeb83c662531 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_p_norm_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_p_norm_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -40,9 +39,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) x = paddle.nn.functional.normalize(x, **self.attrs) self.fetch_list = [x.name] @@ -58,13 +57,11 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = {"axis": 1} class TestCase2(TestBase): - def set_op_attrs(self): self.attrs = {"p": 3.5, "axis": 1, "epsilon": 1e-3} diff --git a/python/paddle/fluid/tests/unittests/ipu/test_pad_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_pad_op_ipu.py index 741fc2189f03e8b26b7a504a9efe755adc9882f7..325b95b68911257cf6ca9e8d1699c708eef3275b 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_pad_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_pad_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -40,9 +39,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) pad = paddle.nn.functional.pad(x, **self.attrs) self.fetch_list = [pad.name] @@ -59,15 +58,14 @@ class TestBase(IPUOpTest): @unittest.skip("Do not support `pad` as a tensor") class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = {} @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) const_attrs = { 'name': 'y', 'shape': [4], @@ -80,7 +78,6 @@ class TestCase1(TestBase): class TestCase2(TestBase): - def set_op_attrs(self): self.attrs = {"pad": [2, 5], "data_format": "NCL"} @@ -93,7 +90,6 @@ class TestCase2(TestBase): class TestCase3(TestBase): - def set_op_attrs(self): self.attrs = {"pad": [2, 5, 2, 3, 6, 3], "data_format": "NCDHW"} @@ -106,42 +102,36 @@ class TestCase3(TestBase): class TestCase4(TestBase): - def set_op_attrs(self): self.attrs = {"pad": [2, 2, 1, 1], "mode": "reflect"} @unittest.skip("replicate mode is not supported") class TestCase5(TestBase): - def set_op_attrs(self): self.attrs = {"pad": [1, 2, 3, 4], "mode": "replicate"} @unittest.skip("circular mode is not supported") class TestCase6(TestBase): - def set_op_attrs(self): self.attrs = {"pad": [1, 2, 3, 4], "mode": "circular"} @unittest.skip("Only support NCL, NCHW, NCDHW") class TestCase7(TestBase): - def set_op_attrs(self): self.attrs = {"pad": [1, 2], "data_format": "NLC"} @unittest.skip("Only support NCL, NCHW, NCDHW") class TestCase7(TestBase): - def set_op_attrs(self): self.attrs = {"pad": [1, 2, 3, 4], "data_format": "NHWC"} @unittest.skip("Only support NCL, NCHW, NCDHW") class TestCase7(TestBase): - def set_op_attrs(self): self.attrs = {"pad": [1, 2, 3, 4, 1, 3], "data_format": "NDHWC"} diff --git a/python/paddle/fluid/tests/unittests/ipu/test_pool_avg_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_pool_avg_op_ipu.py index 8a2aa26f1c2d8682f58ab1a2915bd8c404169d50..058e3b30a531539e2fec42261c4e8d3d35151781 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_pool_avg_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_pool_avg_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -53,9 +52,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) out = paddle.fluid.layers.pool2d(x, **self.attrs) self.fetch_list = [out.name] @@ -71,42 +70,36 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_attrs(self): super().set_attrs() self.attrs['pool_size'] = 3 class TestCase1_2(TestBase): - def set_attrs(self): super().set_attrs() self.attrs['pool_size'] = [3, 1] class TestCase2(TestBase): - def set_attrs(self): super().set_attrs() self.attrs['pool_stride'] = 2 class TestCase2_2(TestBase): - def set_attrs(self): super().set_attrs() self.attrs['pool_stride'] = [2, 1] class TestCase3(TestBase): - def set_attrs(self): super().set_attrs() self.attrs['pool_padding'] = [1, 1] class TestCase3_2(TestBase): - def set_attrs(self): super().set_attrs() self.attrs['pool_padding'] = [1, 1, 2, 2] @@ -114,7 +107,6 @@ class TestCase3_2(TestBase): @unittest.skip('the results has a positional offset') class TestCase3_3(TestBase): - def set_attrs(self): super().set_attrs() self.attrs['pool_padding'] = [1, 2, 1, 1] @@ -122,7 +114,6 @@ class TestCase3_3(TestBase): @unittest.skip('paddle output has nan') class TestCase3_4(TestBase): - def set_attrs(self): super().set_attrs() self.attrs['pool_size'] = 1 @@ -130,40 +121,36 @@ class TestCase3_4(TestBase): class TestCase4(TestBase): - def set_attrs(self): super().set_attrs() self.attrs['global_pooling'] = True class TestCase5(TestBase): - def set_attrs(self): super().set_attrs() self.attrs['ceil_mode'] = True class TestCase6(TestBase): - def set_attrs(self): super().set_attrs() self.attrs['exclusive'] = False class TestAdaptive(TestBase): - def set_op_attrs(self): self.attrs = { "pool_size": 1, "pool_type": 'avg', - "require_index": False + "require_index": False, } @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) out = paddle.fluid.layers.adaptive_pool2d(x, **self.attrs) self.fetch_list = [out.name] diff --git a/python/paddle/fluid/tests/unittests/ipu/test_pool_max_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_pool_max_op_ipu.py index dca1103a0cd9833b6e54d46d187e723a6382934a..aff790a775a9f4745580345fea2965e7f695f220 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_pool_max_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_pool_max_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -53,9 +52,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) out = paddle.fluid.layers.pool2d(x, **self.attrs) self.fetch_list = [out.name] @@ -71,42 +70,36 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): super().set_op_attrs() self.attrs['pool_size'] = 3 class TestCase1_2(TestBase): - def set_op_attrs(self): super().set_op_attrs() self.attrs['pool_size'] = [3, 1] class TestCase2(TestBase): - def set_op_attrs(self): super().set_op_attrs() self.attrs['pool_stride'] = 2 class TestCase2_2(TestBase): - def set_op_attrs(self): super().set_op_attrs() self.attrs['pool_stride'] = [2, 1] class TestCase3(TestBase): - def set_op_attrs(self): super().set_op_attrs() self.attrs['pool_padding'] = [1, 1] class TestCase3_2(TestBase): - def set_op_attrs(self): super().set_op_attrs() self.attrs['pool_padding'] = [1, 1, 2, 2] @@ -114,7 +107,6 @@ class TestCase3_2(TestBase): @unittest.skip('auto_pad is not currently supported') class TestCase3_3(TestBase): - def set_op_attrs(self): super().set_op_attrs() self.attrs['pool_padding'] = 'VALID' @@ -122,47 +114,42 @@ class TestCase3_3(TestBase): @unittest.skip('auto_pad is not currently supported') class TestCase3_4(TestBase): - def set_op_attrs(self): super().set_op_attrs() self.attrs['pool_padding'] = 'SAME' class TestCase4(TestBase): - def set_op_attrs(self): super().set_op_attrs() self.attrs['global_pooling'] = True class TestCase5(TestBase): - def set_op_attrs(self): super().set_op_attrs() self.attrs['ceil_mode'] = True class TestCase6(TestBase): - def set_op_attrs(self): super().set_op_attrs() self.attrs['exclusive'] = False class TestAdaptive(TestBase): - def set_op_attrs(self): self.attrs = { "pool_size": 1, "pool_type": 'max', - "require_index": False + "require_index": False, } @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) out = paddle.fluid.layers.adaptive_pool2d(x, **self.attrs) self.fetch_list = [out.name] diff --git a/python/paddle/fluid/tests/unittests/ipu/test_pow_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_pow_op_ipu.py index 8355f5eefde8c82da5e0b56b4eaffc916a93e04b..f61a8b8a24e394901a58849647c2c41d1a4b1841 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_pow_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_pow_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -44,9 +43,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) out = paddle.fluid.layers.pow(x, **self.attrs) self.fetch_list = [out.name] @@ -62,18 +61,17 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_data_feed(self): data1 = np.random.uniform(size=[1, 3, 2, 2]) data2 = np.array([2.0]) self.feed_fp32 = { "x": data1.astype(np.float32), - "y": data2.astype(np.float32) + "y": data2.astype(np.float32), } self.feed_fp16 = { "x": data1.astype(np.float16), - "y": data2.astype(np.float16) + "y": data2.astype(np.float16), } def set_op_attrs(self): @@ -81,12 +79,12 @@ class TestCase1(TestBase): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - factor = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + factor = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32' + ) out = paddle.fluid.layers.pow(x, factor=factor, **self.attrs) self.fetch_list = [out.name] diff --git a/python/paddle/fluid/tests/unittests/ipu/test_prelu_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_prelu_op_ipu.py index b80560dccb3f4929719b782a7a71d281176dbdec..a91b18d10ac502d500e9c7a43f3f86326f53afcd 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_prelu_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_prelu_op_ipu.py @@ -22,7 +22,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -46,9 +45,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) array = np.random.uniform(size=[1]).astype(np.float32) result1 = paddle.zeros(shape=[1], dtype='float32') @@ -70,12 +69,11 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) array = np.random.uniform(size=[3]).astype(np.float32) result1 = paddle.zeros(shape=[3], dtype='float32') weight = paddle.assign(array, result1) diff --git a/python/paddle/fluid/tests/unittests/ipu/test_print_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_print_op_ipu.py index 57935c8a657f6ad187a8e79ed0fda418231c57de..70a2eba3c797b72b9246ec5fdc6683d8e867dee2 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_print_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_print_op_ipu.py @@ -22,7 +22,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, IPUD2STest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -49,9 +48,11 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype=self.feed_dtype[0]) + x = paddle.static.data( + name=self.feed_list[0], + shape=self.feed_shape[0], + dtype=self.feed_dtype[0], + ) out = paddle.fluid.layers.conv2d(x, num_filters=3, filter_size=3) out = paddle.fluid.layers.Print(out, **self.attrs) @@ -74,13 +75,11 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = {"message": "input_data"} class TestTrainCase1(TestBase): - def set_op_attrs(self): # "forward" : print forward # "backward" : print forward and backward @@ -94,7 +93,6 @@ class TestTrainCase1(TestBase): @unittest.skip("attrs are not supported") class TestCase2(TestBase): - def set_op_attrs(self): self.attrs = { "first_n": 10, @@ -103,18 +101,16 @@ class TestCase2(TestBase): "print_tensor_type": True, "print_tensor_shape": True, "print_tensor_layout": True, - "print_tensor_lod": True + "print_tensor_lod": True, } class SimpleLayer(paddle.nn.Layer): - def __init__(self): super(SimpleLayer, self).__init__() - self.conv = paddle.nn.Conv2D(in_channels=3, - out_channels=1, - kernel_size=2, - stride=1) + self.conv = paddle.nn.Conv2D( + in_channels=3, out_channels=1, kernel_size=2, stride=1 + ) @to_static() def forward(self, x, target=None): @@ -130,7 +126,6 @@ class SimpleLayer(paddle.nn.Layer): class TestD2S(IPUD2STest): - def setUp(self): self.set_data_feed() @@ -142,16 +137,19 @@ class TestD2S(IPUD2STest): paddle.seed(self.SEED) np.random.seed(self.SEED) model = SimpleLayer() - optim = paddle.optimizer.Adam(learning_rate=0.01, - parameters=model.parameters()) + optim = paddle.optimizer.Adam( + learning_rate=0.01, parameters=model.parameters() + ) if use_ipu: paddle.set_device('ipu') ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(num_ipus=1, - is_training=True, - micro_batch_size=1, - enable_manual_shard=False) + ipu_strategy.set_graph_config( + num_ipus=1, + is_training=True, + micro_batch_size=1, + enable_manual_shard=False, + ) ipu_strategy.set_optimizer(optim) result = [] diff --git a/python/paddle/fluid/tests/unittests/ipu/test_rank_loss_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_rank_loss_op_ipu.py index ad3bbde11923ac2329ec7367a52cc4e6125f92c9..bebe0e223277099bdd21eb437279512366dc5d2a 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_rank_loss_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_rank_loss_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -49,15 +48,15 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self, on_ipu): - label = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype="float32") - left = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='float32') - right = paddle.static.data(name=self.feed_list[2], - shape=self.feed_shape[2], - dtype='float32') + label = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32" + ) + left = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32' + ) + right = paddle.static.data( + name=self.feed_list[2], shape=self.feed_shape[2], dtype='float32' + ) out = paddle.fluid.layers.rank_loss(label, left, right) self.fetch_list = [out.name] diff --git a/python/paddle/fluid/tests/unittests/ipu/test_reduce_x_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_reduce_x_op_ipu.py index c78165f86e21a89c9cb6d4e51cfb62eae7136af1..9139b1db69af9926fe3cf3400c9f4c4ea121dbde 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_reduce_x_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_reduce_x_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestMean(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -37,9 +36,9 @@ class TestMean(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) out = self.op(x, **self.attrs) self.fetch_list = [out.name] @@ -123,39 +122,34 @@ class TestMean(IPUOpTest): class TestMax(TestMean): - def set_test_op(self): self.op = paddle.fluid.layers.reduce_max class TestMin(TestMean): - def set_test_op(self): self.op = paddle.fluid.layers.reduce_min class TestProd(TestMean): - def set_test_op(self): self.op = paddle.fluid.layers.reduce_prod class TestSum(TestMean): - def set_test_op(self): self.op = paddle.fluid.layers.reduce_sum class TestLogsumexp(TestMean): - def set_test_op(self): self.op = paddle.logsumexp @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) if 'dim' in self.attrs: self.attrs['axis'] = self.attrs['dim'] del self.attrs['dim'] @@ -167,7 +161,6 @@ class TestLogsumexp(TestMean): class TestAll(TestMean): - @property def fp16_enabled(self): return False @@ -184,9 +177,9 @@ class TestAll(TestMean): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='bool') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='bool' + ) out = self.op(x, **self.attrs) self.fetch_list = [out.name] @@ -195,7 +188,6 @@ class TestAll(TestMean): class TestAny(TestAll): - def set_test_op(self): self.op = paddle.fluid.layers.reduce_any diff --git a/python/paddle/fluid/tests/unittests/ipu/test_reshape_inplace_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_reshape_inplace_op_ipu.py index 66358d83ee680cb969ab0822af05d61381ded23c..c6016d2ec13db622af4d75602a90d6a2dde92d43 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_reshape_inplace_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_reshape_inplace_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -47,9 +46,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) add = paddle.fluid.layers.elementwise_add(x, x) out = paddle.fluid.layers.reshape(add, **self.attrs) self.fetch_list = [out.name] @@ -66,7 +65,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_attrs(self): self.attrs = { "shape": [-1, 0, 10], diff --git a/python/paddle/fluid/tests/unittests/ipu/test_reshape_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_reshape_op_ipu.py index 2da63cf7330045398a0fc8edcfd0f6f5061a43d7..74bae31111ba18bbba7c541b3813bea4f98a902c 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_reshape_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_reshape_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -45,9 +44,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) out = paddle.fluid.layers.reshape(x=x, **self.attrs) self.fetch_list = [out.name] @@ -63,7 +62,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = {} self.attrs['shape'] = [2, 3, -1, 2] @@ -71,7 +69,6 @@ class TestCase1(TestBase): class TestCase2(TestBase): - def set_op_attrs(self): self.attrs = {} self.attrs['shape'] = [-1, 0, 3, 2] diff --git a/python/paddle/fluid/tests/unittests/ipu/test_save_load_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_save_load_ipu.py index 1e8387ac2284f81102f0b163c6c882335c670601..feed2f739be207ea59dc916909c7852f543dcd2d 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_save_load_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_save_load_ipu.py @@ -24,7 +24,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_data_feed() @@ -54,14 +53,14 @@ class TestBase(IPUOpTest): def build_model(self): generator = paddle.fluid.unique_name.UniqueNameGenerator() with paddle.fluid.unique_name.guard(generator): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - conv1 = paddle.static.nn.conv2d(x, - num_filters=3, - filter_size=3, - bias_attr=False, - name='conv2d') + x = paddle.static.data( + name=self.feed_list[0], + shape=self.feed_shape[0], + dtype='float32', + ) + conv1 = paddle.static.nn.conv2d( + x, num_filters=3, filter_size=3, bias_attr=False, name='conv2d' + ) loss = paddle.mean(conv1) # apply optimizer self.optimizer().minimize(loss) @@ -83,7 +82,8 @@ class TestBase(IPUOpTest): ipu_strategy.set_precision_config(enable_fp16=True) IPUOpTest.cast_model_to_fp16(self.main_prog) ipu_compiler = paddle.static.IpuCompiledProgram( - self.main_prog, ipu_strategy=ipu_strategy) + self.main_prog, ipu_strategy=ipu_strategy + ) program = ipu_compiler.compile(self.feed_list, self.fetch_list) feed = self.feed_fp32 @@ -91,16 +91,19 @@ class TestBase(IPUOpTest): feed = self.feed_fp16 result = [] - run_steps = self.attrs['steps'] if save_otherwise_load \ + run_steps = ( + self.attrs['steps'] + if save_otherwise_load else self.attrs['steps'] - self.attrs['save_at_step'] + ) for i in range(run_steps): tmp = exe.run(program, feed=feed, fetch_list=self.fetch_list) - if save_otherwise_load and \ - i == self.attrs['save_at_step'] - 1: + if save_otherwise_load and i == self.attrs['save_at_step'] - 1: ipu_compiler._backend.weights_to_host() - paddle.static.save(self.main_prog, - self.attrs['model_path'].name) + paddle.static.save( + self.main_prog, self.attrs['model_path'].name + ) if save_otherwise_load and i >= self.attrs['save_at_step']: result.append(tmp) @@ -112,72 +115,61 @@ class TestBase(IPUOpTest): def test_base(self): res0 = self.run_model(IPUOpTest.ExecutionMode.IPU_FP32, True) res1 = self.run_model(IPUOpTest.ExecutionMode.IPU_FP32, False) - np.testing.assert_allclose(res0.flatten(), - res1.flatten(), - rtol=1e-05, - atol=self.atol) + np.testing.assert_allclose( + res0.flatten(), res1.flatten(), rtol=1e-05, atol=self.atol + ) self.attrs['model_path'].cleanup() class TestMomentum(TestBase): - def set_optimizer(self): self.optimizer = partial(paddle.optimizer.Momentum, learning_rate=1e-1) class TestAdam(TestBase): - def set_optimizer(self): self.optimizer = partial(paddle.optimizer.Adam, learning_rate=1e-1) class TestLamb(TestBase): - def set_optimizer(self): self.optimizer = partial(paddle.optimizer.Lamb, learning_rate=1e-1) class TestAdamW(TestBase): - def set_optimizer(self): self.optimizer = partial(paddle.optimizer.AdamW, learning_rate=1e-1) class TestAdamax(TestBase): - def set_optimizer(self): self.optimizer = partial(paddle.optimizer.Adamax, learning_rate=1e-1) class TestAdagrad(TestBase): - def set_optimizer(self): self.optimizer = partial(paddle.optimizer.Adagrad, learning_rate=1e-1) class TestAdadelta(TestBase): - def set_optimizer(self): self.optimizer = partial(paddle.optimizer.Adagrad, learning_rate=1e-1) class TestRMSProp(TestBase): - def set_optimizer(self): self.optimizer = partial(paddle.optimizer.RMSProp, learning_rate=1e-1) class TestCenteredRMSProp(TestBase): - def set_optimizer(self): - self.optimizer = partial(paddle.optimizer.RMSProp, - learning_rate=1e-1, - centered=True) + self.optimizer = partial( + paddle.optimizer.RMSProp, learning_rate=1e-1, centered=True + ) @unittest.skipIf(IPUOpTest.use_ipumodel(), "skip for ipumodel") class TestSGDFP16(TestBase): - def set_attrs(self): self.attrs = {} self.attrs['steps'] = 100 @@ -190,67 +182,57 @@ class TestSGDFP16(TestBase): def test_base(self): res0 = self.run_model(IPUOpTest.ExecutionMode.IPU_FP16, True) res1 = self.run_model(IPUOpTest.ExecutionMode.IPU_FP16, False) - np.testing.assert_allclose(res0.flatten(), - res1.flatten(), - rtol=1e-05, - atol=self.atol) + np.testing.assert_allclose( + res0.flatten(), res1.flatten(), rtol=1e-05, atol=self.atol + ) self.attrs['model_path'].cleanup() class TestMomentumFp16(TestSGDFP16): - def set_optimizer(self): self.optimizer = partial(paddle.optimizer.Momentum, learning_rate=1e-1) class TestAdamFP16(TestSGDFP16): - def set_optimizer(self): self.optimizer = partial(paddle.optimizer.Adam, learning_rate=1e-1) class TestLambFP16(TestSGDFP16): - def set_optimizer(self): self.optimizer = partial(paddle.optimizer.Lamb, learning_rate=1e-1) class TestAdamWFP16FP16(TestSGDFP16): - def set_optimizer(self): self.optimizer = partial(paddle.optimizer.AdamW, learning_rate=1e-1) class TestAdamaxFP16(TestSGDFP16): - def set_optimizer(self): self.optimizer = partial(paddle.optimizer.Adamax, learning_rate=1e-1) class TestAdagradFP16(TestSGDFP16): - def set_optimizer(self): self.optimizer = partial(paddle.optimizer.Adagrad, learning_rate=1e-1) class TestAdadeltaFP16(TestSGDFP16): - def set_optimizer(self): self.optimizer = partial(paddle.optimizer.Adagrad, learning_rate=1e-1) class TestRMSPropFP16(TestSGDFP16): - def set_optimizer(self): self.optimizer = partial(paddle.optimizer.RMSProp, learning_rate=1e-1) class TestCenteredRMSPropFP16(TestSGDFP16): - def set_optimizer(self): - self.optimizer = partial(paddle.optimizer.RMSProp, - learning_rate=1e-1, - centered=True) + self.optimizer = partial( + paddle.optimizer.RMSProp, learning_rate=1e-1, centered=True + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ipu/test_scale_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_scale_op_ipu.py index 296d365fea6026848ee0dc7fe8fb3e54889c40f8..44ef0aa9fa4beaa68636843c715b61ee4ff60e3e 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_scale_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_scale_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -52,9 +51,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) out = paddle.fluid.layers.scale(x, **self.attrs) self.fetch_list = [out.name] @@ -70,7 +69,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = { "scale": 5.0, @@ -80,7 +78,6 @@ class TestCase1(TestBase): class TestCase2(TestBase): - def set_op_attrs(self): self.attrs = { "scale": 1.0, @@ -90,7 +87,6 @@ class TestCase2(TestBase): class TestCase3(TestBase): - def set_op_attrs(self): self.attrs = { "scale": 5.0, @@ -100,7 +96,6 @@ class TestCase3(TestBase): class TestCase4(TestBase): - def set_op_attrs(self): self.attrs = { "scale": 1.0, @@ -110,7 +105,6 @@ class TestCase4(TestBase): class TestCase5(TestBase): - def set_data_feed(self): x = np.random.uniform(size=[3, 3, 10, 10]) y = np.array([3.0]) @@ -125,12 +119,12 @@ class TestCase5(TestBase): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - y = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + y = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32' + ) out = paddle.fluid.layers.scale(x, scale=y, **self.attrs) self.fetch_list = [out.name] diff --git a/python/paddle/fluid/tests/unittests/ipu/test_scaled_optimizer_state_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_scaled_optimizer_state_ipu.py index 3960be248eca9b3a1a861504f2fbbef7cec431ef..ca6d99ab2e50f91451774bbe0e0032e84cc4624a 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_scaled_optimizer_state_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_scaled_optimizer_state_ipu.py @@ -20,7 +20,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -50,26 +49,27 @@ class TestBase(IPUOpTest): self.attrs = { "optimizer": 'lamb', "weight_decay": 0.0, - "scaled_optimizer_state": True + "scaled_optimizer_state": True, } @IPUOpTest.static_graph def build_model(self): - image = paddle.static.data(name='image', - shape=[1, 3, 10, 10], - dtype='float32') - conv1 = paddle.static.nn.conv2d(image, - num_filters=3, - filter_size=3, - bias_attr=False) + image = paddle.static.data( + name='image', shape=[1, 3, 10, 10], dtype='float32' + ) + conv1 = paddle.static.nn.conv2d( + image, num_filters=3, filter_size=3, bias_attr=False + ) loss = paddle.mean(conv1) weight_decay = self.attrs['weight_decay'] - opt = paddle.optimizer.Adam(learning_rate=1e-1, - weight_decay=weight_decay) + opt = paddle.optimizer.Adam( + learning_rate=1e-1, weight_decay=weight_decay + ) if self.attrs['optimizer'] == 'lamb': - opt = paddle.optimizer.Lamb(learning_rate=1e-1, - lamb_weight_decay=weight_decay) + opt = paddle.optimizer.Lamb( + learning_rate=1e-1, lamb_weight_decay=weight_decay + ) opt.minimize(loss) self.feed_list = [image.name] self.fetch_list = [loss.name] @@ -79,15 +79,21 @@ class TestBase(IPUOpTest): ipu_strategy.set_graph_config(is_training=self.is_training) if self.is_ipu_mode(exec_mode): if "use_no_bias_optimizer" in self.attrs.keys(): - ipu_strategy.set_options({ - "use_no_bias_optimizer": - self.attrs["use_no_bias_optimizer"] - }) + ipu_strategy.set_options( + { + "use_no_bias_optimizer": self.attrs[ + "use_no_bias_optimizer" + ] + } + ) if "scaled_optimizer_state" in self.attrs.keys(): - ipu_strategy.set_options({ - "scaled_optimizer_state": - self.attrs["scaled_optimizer_state"] - }) + ipu_strategy.set_options( + { + "scaled_optimizer_state": self.attrs[ + "scaled_optimizer_state" + ] + } + ) self.run_op_test(exec_mode, ipu_strategy=ipu_strategy) def test(self): @@ -99,12 +105,11 @@ class TestBase(IPUOpTest): class TestScaledAdam(TestBase): - def set_attrs(self): self.attrs = { "optimizer": 'adam', "weight_decay": 0.0, - "scaled_optimizer_state": True + "scaled_optimizer_state": True, } def set_atol(self): @@ -115,25 +120,23 @@ class TestScaledAdam(TestBase): @unittest.skip('cpu do not support AdamNoBias') class TestScaledAdamNoBias(TestBase): - def set_attrs(self): self.attrs = { "optimizer": 'adam', "weight_decay": 0.0, "use_no_bias_optimizer": True, - "scaled_optimizer_state": True + "scaled_optimizer_state": True, } @unittest.skip('cpu do not support LambNoBias') class TestScaledLambNoBias(TestBase): - def set_attrs(self): self.attrs = { "optimizer": 'lamb', "weight_decay": 0.0, "use_no_bias_optimizer": True, - "scaled_optimizer_state": True + "scaled_optimizer_state": True, } diff --git a/python/paddle/fluid/tests/unittests/ipu/test_set_batch_size_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_set_batch_size_ipu.py index 9bce0b5df73df2d805bd37ffe95d73f661c36c5a..6cd27f24723355047ac35ceac3894872f79844e2 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_set_batch_size_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_set_batch_size_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -49,31 +48,28 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - conv1 = paddle.static.nn.conv2d(x, - num_filters=3, - filter_size=3, - bias_attr=False) - conv2 = paddle.static.nn.conv2d(conv1, - num_filters=3, - filter_size=3, - bias_attr=False) - conv3 = paddle.static.nn.conv2d(conv2, - num_filters=3, - filter_size=3, - bias_attr=False) - conv4 = paddle.static.nn.conv2d(conv3, - num_filters=3, - filter_size=3, - bias_attr=False) + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + conv1 = paddle.static.nn.conv2d( + x, num_filters=3, filter_size=3, bias_attr=False + ) + conv2 = paddle.static.nn.conv2d( + conv1, num_filters=3, filter_size=3, bias_attr=False + ) + conv3 = paddle.static.nn.conv2d( + conv2, num_filters=3, filter_size=3, bias_attr=False + ) + conv4 = paddle.static.nn.conv2d( + conv3, num_filters=3, filter_size=3, bias_attr=False + ) self.fetch_list = [conv4.name] def run_model(self, exec_mode): ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training, - micro_batch_size=2) + ipu_strategy.set_graph_config( + is_training=self.is_training, micro_batch_size=2 + ) self.run_op_test(exec_mode, ipu_strategy) def test(self): diff --git a/python/paddle/fluid/tests/unittests/ipu/test_set_ipu_shard_api.py b/python/paddle/fluid/tests/unittests/ipu/test_set_ipu_shard_api.py index 37423fda1fa9b7ce92b62bb19ea0ae49828500a4..ccd0b6b0d2eeda32c1cc286ab0e580aa7ff690ff 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_set_ipu_shard_api.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_set_ipu_shard_api.py @@ -23,7 +23,6 @@ paddle.enable_static() class SimpleNet(paddle.nn.Layer): - def __init__(self, input_size, output_size): super(SimpleNet, self).__init__() self.linear1 = nn.Linear(input_size, output_size) @@ -46,15 +45,14 @@ class SimpleNet(paddle.nn.Layer): class TestSetIpuShard(unittest.TestCase): - def _test(self): # build graph main_prog = paddle.static.Program() with paddle.static.program_guard(main_prog): x = paddle.static.data(name='X', shape=[10, 46], dtype='float32') - label = paddle.static.data(name='Y', - shape=[10, 46], - dtype='float32') + label = paddle.static.data( + name='Y', shape=[10, 46], dtype='float32' + ) model = SimpleNet(46, 46) set_ipu_shard(model.linear1, index=1) @@ -74,22 +72,20 @@ class TestSetIpuShard(unittest.TestCase): ipu_index_list = self._test() expected_ipu_index_list = [1, 1, 2, 3, 3, 3, 4, 4] - np.testing.assert_allclose(ipu_index_list, - expected_ipu_index_list, - rtol=1e-05, - atol=0) + np.testing.assert_allclose( + ipu_index_list, expected_ipu_index_list, rtol=1e-05, atol=0 + ) class TestSetIpuPipeline(unittest.TestCase): - def _test(self): # build graph main_prog = paddle.static.Program() with paddle.static.program_guard(main_prog): x = paddle.static.data(name='X', shape=[10, 46], dtype='float32') - label = paddle.static.data(name='Y', - shape=[10, 46], - dtype='float32') + label = paddle.static.data( + name='Y', shape=[10, 46], dtype='float32' + ) model = SimpleNet(46, 46) set_ipu_shard(model.linear1, stage=1) @@ -109,29 +105,27 @@ class TestSetIpuPipeline(unittest.TestCase): ipu_index_list = self._test() expected_ipu_index_list = [1, 1, 2, 3, 3, 3, 4, 4] - np.testing.assert_allclose(ipu_index_list, - expected_ipu_index_list, - rtol=1e-05, - atol=0) + np.testing.assert_allclose( + ipu_index_list, expected_ipu_index_list, rtol=1e-05, atol=0 + ) class TestSetIpuShardAndPipeline(unittest.TestCase): - def _test(self): # build graph main_prog = paddle.static.Program() with paddle.static.program_guard(main_prog): x = paddle.static.data(name='X', shape=[10, 46], dtype='float32') - label = paddle.static.data(name='Y', - shape=[10, 46], - dtype='float32') + label = paddle.static.data( + name='Y', shape=[10, 46], dtype='float32' + ) model = SimpleNet(46, 46) set_ipu_shard(model.linear1, index=1, stage=2) set_ipu_shard(model.relu1, index=2, stage=3) - model.linear_relu2 = set_ipu_shard(model.linear_relu2, - index=3, - stage=4) + model.linear_relu2 = set_ipu_shard( + model.linear_relu2, index=3, stage=4 + ) model.linear3 = set_ipu_shard(model.linear3, index=4, stage=1) out = model(x) @@ -148,25 +142,38 @@ class TestSetIpuShardAndPipeline(unittest.TestCase): def test_set_ipu_shard(self): ipu_index_list = self._test() expected_ipu_index_list = [ - 1, 1, 2, 3, 3, 3, 4, 4, 2, 2, 3, 4, 4, 4, 1, 1 + 1, + 1, + 2, + 3, + 3, + 3, + 4, + 4, + 2, + 2, + 3, + 4, + 4, + 4, + 1, + 1, ] - np.testing.assert_allclose(ipu_index_list, - expected_ipu_index_list, - rtol=1e-05, - atol=0) + np.testing.assert_allclose( + ipu_index_list, expected_ipu_index_list, rtol=1e-05, atol=0 + ) class TestSetIpuForModel(unittest.TestCase): - def _test(self): # build graph main_prog = paddle.static.Program() with paddle.static.program_guard(main_prog): x = paddle.static.data(name='X', shape=[10, 46], dtype='float32') - label = paddle.static.data(name='Y', - shape=[10, 46], - dtype='float32') + label = paddle.static.data( + name='Y', shape=[10, 46], dtype='float32' + ) model = SimpleNet(46, 46) set_ipu_shard(model, index=1, stage=2) @@ -185,19 +192,31 @@ class TestSetIpuForModel(unittest.TestCase): def test_set_ipu_shard(self): ipu_index_list = self._test() expected_ipu_index_list = [ - 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2 + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, ] - np.testing.assert_allclose(ipu_index_list, - expected_ipu_index_list, - rtol=1e-05, - atol=0) + np.testing.assert_allclose( + ipu_index_list, expected_ipu_index_list, rtol=1e-05, atol=0 + ) class TestSetIpuMixedModel(unittest.TestCase): - def setUp(self): - def linear_relu2_mixed(self, x): with paddle.static.ipu_shard_guard(index=2, stage=3): x = self.linear2(x) @@ -216,9 +235,9 @@ class TestSetIpuMixedModel(unittest.TestCase): main_prog = paddle.static.Program() with paddle.static.program_guard(main_prog): x = paddle.static.data(name='X', shape=[10, 46], dtype='float32') - label = paddle.static.data(name='Y', - shape=[10, 46], - dtype='float32') + label = paddle.static.data( + name='Y', shape=[10, 46], dtype='float32' + ) model = SimpleNet(46, 46) set_ipu_shard(model.linear1, index=1, stage=2) @@ -239,13 +258,27 @@ class TestSetIpuMixedModel(unittest.TestCase): def test_set_ipu_shard(self): ipu_index_list = self._test() expected_ipu_index_list = [ - 1, 1, 2, 2, 2, 3, 4, 4, 2, 2, 3, 3, 3, 4, 1, 1 + 1, + 1, + 2, + 2, + 2, + 3, + 4, + 4, + 2, + 2, + 3, + 3, + 3, + 4, + 1, + 1, ] - np.testing.assert_allclose(ipu_index_list, - expected_ipu_index_list, - rtol=1e-05, - atol=0) + np.testing.assert_allclose( + ipu_index_list, expected_ipu_index_list, rtol=1e-05, atol=0 + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ipu/test_share_data_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_share_data_op_ipu.py index 7d355858f13644b31c10e18323620a8cd5058f44..59acdf921983bfc08a9150df89d4832bb79cd018 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_share_data_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_share_data_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -39,15 +38,15 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - y = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - self.main_prog.global_block().append_op(type="share_data", - inputs={"X": x}, - outputs={'Out': y}) + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + y = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + self.main_prog.global_block().append_op( + type="share_data", inputs={"X": x}, outputs={'Out': y} + ) out = paddle.fluid.layers.elementwise_add(y, y) self.fetch_list = [out.name] @@ -63,7 +62,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_data_feed(self): data = np.random.uniform(size=[2, 3, 1]) self.feed_fp32 = {'in_0': data.astype(np.float32)} @@ -74,15 +72,15 @@ class TestCase1(TestBase): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - y = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - self.main_prog.global_block().append_op(type="share_data", - inputs={"X": x}, - outputs={'Out': y}) + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + y = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + self.main_prog.global_block().append_op( + type="share_data", inputs={"X": x}, outputs={'Out': y} + ) out = paddle.fluid.layers.elementwise_add(x, y) self.fetch_list = [out.name] diff --git a/python/paddle/fluid/tests/unittests/ipu/test_sigmoid_cross_entropy_with_logits_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_sigmoid_cross_entropy_with_logits_op_ipu.py index 7a6cf28d443896998d84bef412abb9d9932ae64f..c6b0f6a494d0b679b2006104bff0e364b20bb7e4 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_sigmoid_cross_entropy_with_logits_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_sigmoid_cross_entropy_with_logits_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -34,11 +33,11 @@ class TestBase(IPUOpTest): label = np.arange(10).reshape([10]) self.feed_fp32 = { "x": x.astype(np.float32), - "label": label.astype(np.float32) + "label": label.astype(np.float32), } self.feed_fp16 = { "x": x.astype(np.float16), - "label": label.astype(np.float16) + "label": label.astype(np.float16), } def set_feed_attr(self): @@ -52,14 +51,15 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self, on_ipu): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype="float32") - label = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32" + ) + label = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32' + ) out = paddle.fluid.layers.sigmoid_cross_entropy_with_logits( - x, label, **self.attrs) + x, label, **self.attrs + ) self.fetch_list = [out.name] def run_model(self, exec_mode): @@ -74,7 +74,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = { 'ignore_index': 1, @@ -82,7 +81,6 @@ class TestCase1(TestBase): class TestCase2(TestBase): - def set_atol(self): # epsilon is added when normalize is True, use larger atol. self.atol = 1e-6 diff --git a/python/paddle/fluid/tests/unittests/ipu/test_slice_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_slice_op_ipu.py index 3bcbe417b9861ca356acadb0f5f1e106d109e27f..6b5f6588bd356cbec26ec8cc508719027da8432a 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_slice_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_slice_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -48,9 +47,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) out = paddle.fluid.layers.slice(x, **self.attrs) self.fetch_list = [out.name] @@ -66,7 +65,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = { "axes": [0, 1], @@ -77,7 +75,6 @@ class TestCase1(TestBase): @unittest.skip('dynamic graph is not support on IPU') class TestCase2(TestBase): - def set_data_feed(self): x = np.random.uniform(size=[4, 5, 6]) s = np.array([0, 0, 2]) @@ -85,12 +82,12 @@ class TestCase2(TestBase): self.feed_fp32 = { "x": x.astype(np.float32), "starts": s.astype(np.int32), - "ends": e.astype(np.int32) + "ends": e.astype(np.int32), } self.feed_fp16 = { "x": x.astype(np.float16), "starts": s.astype(np.int32), - "ends": e.astype(np.int32) + "ends": e.astype(np.int32), } def set_op_attrs(self): @@ -98,19 +95,18 @@ class TestCase2(TestBase): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - starts = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='int32') - ends = paddle.static.data(name=self.feed_list[2], - shape=self.feed_shape[2], - dtype='int32') - out = paddle.fluid.layers.slice(x, - starts=starts, - ends=ends, - **self.attrs) + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + starts = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='int32' + ) + ends = paddle.static.data( + name=self.feed_list[2], shape=self.feed_shape[2], dtype='int32' + ) + out = paddle.fluid.layers.slice( + x, starts=starts, ends=ends, **self.attrs + ) self.fetch_list = [out.name] diff --git a/python/paddle/fluid/tests/unittests/ipu/test_softmax_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_softmax_op_ipu.py index ebc05942b93589d6d1e122be81877aaef04a3259..8c1dfa0427516d53c1008eb8badd9dac0ff4ea68 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_softmax_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_softmax_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -44,9 +43,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) out = paddle.fluid.layers.softmax(x, **self.attrs) self.fetch_list = [out.name] @@ -62,7 +61,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = {"axis": 2} diff --git a/python/paddle/fluid/tests/unittests/ipu/test_softmax_with_cross_entropy_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_softmax_with_cross_entropy_op_ipu.py index d3084154a063e7029eb8de57c53f75531331b388..c5a789942aa9c9fa967019e28782ae5f8a937b29 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_softmax_with_cross_entropy_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_softmax_with_cross_entropy_op_ipu.py @@ -22,7 +22,6 @@ import paddle.nn.functional as F class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -35,11 +34,11 @@ class TestBase(IPUOpTest): label = np.arange(3).reshape([3, 1]) self.feed_fp32 = { "x": x.astype(np.float32), - "label": label.astype(np.int64) + "label": label.astype(np.int64), } self.feed_fp16 = { "x": x.astype(np.float16), - "label": label.astype(np.int32) + "label": label.astype(np.int32), } def set_feed_attr(self): @@ -53,17 +52,17 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self, on_ipu): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype="float32") + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32" + ) if on_ipu: - label = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='int32') + label = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='int32' + ) else: - label = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='int64') + label = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='int64' + ) out = F.softmax_with_cross_entropy(x, label, **self.attrs) self.fetch_list = [out.name] @@ -81,7 +80,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = { 'soft_label': False, @@ -90,37 +88,34 @@ class TestCase1(TestBase): class TestCase2(TestBase): - def set_data_feed(self): x = np.random.uniform(size=[30, 70]) label = np.arange(30).reshape([30, 1]) self.feed_fp32 = { "x": x.astype(np.float32), - "label": label.astype(np.int64) + "label": label.astype(np.int64), } self.feed_fp16 = { "x": x.astype(np.float16), - "label": label.astype(np.int32) + "label": label.astype(np.int32), } class TestCase3(TestBase): - def set_data_feed(self): x = np.random.uniform(size=[3, 5, 7]) label = np.random.randint(0, 7, [3, 5, 1], dtype='int64') self.feed_fp32 = { "x": x.astype(np.float32), - "label": label.astype(np.int64) + "label": label.astype(np.int64), } self.feed_fp16 = { "x": x.astype(np.float16), - "label": label.astype(np.int32) + "label": label.astype(np.int32), } class TestCase4(TestBase): - def set_op_attrs(self): self.attrs = { 'soft_label': False, @@ -130,17 +125,17 @@ class TestCase4(TestBase): @IPUOpTest.static_graph def build_model(self, on_ipu): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype="float32") + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32" + ) if on_ipu: - label = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='int32') + label = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='int32' + ) else: - label = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='int64') + label = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='int64' + ) loss, softmax = F.softmax_with_cross_entropy(x, label, **self.attrs) self.fetch_list = [loss.name, softmax.name] @@ -158,7 +153,6 @@ class TestCase4(TestBase): class TestCase5(TestCase4): - def set_op_attrs(self): self.attrs = { 'soft_label': False, @@ -172,16 +166,15 @@ class TestCase5(TestCase4): label = np.random.randint(0, 5, [3, 1, 7, 11], dtype='int64') self.feed_fp32 = { "x": x.astype(np.float32), - "label": label.astype(np.int64) + "label": label.astype(np.int64), } self.feed_fp16 = { "x": x.astype(np.float16), - "label": label.astype(np.int32) + "label": label.astype(np.int32), } class TestCase6(TestCase4): - def set_op_attrs(self): self.attrs = { 'soft_label': False, @@ -195,11 +188,11 @@ class TestCase6(TestCase4): label = np.random.randint(0, 7, [3, 5, 1, 9, 11], dtype='int64') self.feed_fp32 = { "x": x.astype(np.float32), - "label": label.astype(np.int64) + "label": label.astype(np.int64), } self.feed_fp16 = { "x": x.astype(np.float16), - "label": label.astype(np.int32) + "label": label.astype(np.int32), } diff --git a/python/paddle/fluid/tests/unittests/ipu/test_split_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_split_op_ipu.py index 8d8c5190692dcaa5206a249780cc41934177fecc..dada2225edf8aff590f30c85d8e9c361256056f7 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_split_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_split_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -43,9 +42,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) out = paddle.split(x, **self.attrs) self.fetch_list = [fetch.name for fetch in out] @@ -63,7 +62,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = {"num_or_sections": [2, 8], "axis": 2} diff --git a/python/paddle/fluid/tests/unittests/ipu/test_squeeze_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_squeeze_op_ipu.py index 9039dfdb3f00641fdc5468ad9d356f8b894e5359..f3e312c863e51c3ef3cff46f9082aafde85b1862 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_squeeze_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_squeeze_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -44,9 +43,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) out = paddle.fluid.layers.squeeze(x, **self.attrs) self.fetch_list = [out.name] @@ -62,13 +61,11 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = {"axes": []} class TestCase2(TestBase): - def set_op_attrs(self): self.attrs = {"axes": [-2]} diff --git a/python/paddle/fluid/tests/unittests/ipu/test_stack_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_stack_op_ipu.py index fa0a48081b4a4b233be875fda584fcf67a6d4ef4..1f1fbf6d789ae77a7561ec6ec0cd0ff3fb7d651c 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_stack_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_stack_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -36,12 +35,12 @@ class TestBase(IPUOpTest): self.feed_fp32 = { "x": x.astype(np.float32), "y": y.astype(np.float32), - "z": z.astype(np.float32) + "z": z.astype(np.float32), } self.feed_fp16 = { "x": x.astype(np.float16), "y": y.astype(np.float16), - "z": z.astype(np.float16) + "z": z.astype(np.float16), } def set_feed_attr(self): @@ -54,15 +53,15 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - y = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='float32') - z = paddle.static.data(name=self.feed_list[2], - shape=self.feed_shape[2], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + y = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32' + ) + z = paddle.static.data( + name=self.feed_list[2], shape=self.feed_shape[2], dtype='float32' + ) out = paddle.fluid.layers.stack([x, y, z], **self.attrs) self.fetch_list = [out.name] @@ -78,7 +77,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = {"axis": -2} diff --git a/python/paddle/fluid/tests/unittests/ipu/test_strided_slice_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_strided_slice_op_ipu.py index f41ad6074265ae6cf145e01d755f9577f560c4ef..5f1bed2938dbd79dd12f9004a22aa1bb1e6810f8 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_strided_slice_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_strided_slice_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -44,14 +43,14 @@ class TestBase(IPUOpTest): "axes": [1, 2, 3], "starts": [-3, 0, 2], "ends": [3, 2, 4], - "strides": [1, 1, 1] + "strides": [1, 1, 1], } @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) out = paddle.fluid.layers.strided_slice(x, **self.attrs) self.fetch_list = [out.name] @@ -67,7 +66,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_data_feed(self): data = np.random.uniform(size=[2, 4]) self.feed_fp32 = {"in_0": data.astype(np.float32)} @@ -78,13 +76,12 @@ class TestCase1(TestBase): "axes": [0, 1], "starts": [1, 3], "ends": [2, 0], - "strides": [1, -1] + "strides": [1, -1], } @unittest.skip('Only strides of 1 or -1 are supported.') class TestCase2(TestBase): - def set_data_feed(self): data = np.random.uniform(size=[2, 4]) self.feed_fp32 = {"in_0": data.astype(np.float32)} @@ -95,13 +92,12 @@ class TestCase2(TestBase): "axes": [0, 1], "starts": [1, 3], "ends": [-1, 1000], - "strides": [1, 3] + "strides": [1, 3], } @unittest.skip('dynamic graph is not support on IPU') class TestCase3(TestBase): - def set_data_feed(self): x = np.random.uniform(size=[4, 5, 6]) s = np.array([0, 0, 2]) @@ -109,12 +105,12 @@ class TestCase3(TestBase): self.feed_fp32 = { "x": x.astype(np.float32), "starts": s.astype(np.int32), - "ends": e.astype(np.int32) + "ends": e.astype(np.int32), } self.feed_fp16 = { "x": x.astype(np.float16), "starts": s.astype(np.int32), - "ends": e.astype(np.int32) + "ends": e.astype(np.int32), } def set_op_attrs(self): @@ -122,19 +118,18 @@ class TestCase3(TestBase): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - starts = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='int32') - ends = paddle.static.data(name=self.feed_list[2], - shape=self.feed_shape[2], - dtype='int32') - out = paddle.fluid.layers.strided_slice(x, - starts=starts, - ends=ends, - **self.attrs) + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + starts = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='int32' + ) + ends = paddle.static.data( + name=self.feed_list[2], shape=self.feed_shape[2], dtype='int32' + ) + out = paddle.fluid.layers.strided_slice( + x, starts=starts, ends=ends, **self.attrs + ) self.fetch_list = [out.name] diff --git a/python/paddle/fluid/tests/unittests/ipu/test_sum_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_sum_op_ipu.py index 3c4f9ff80d55768f3be8c8db6edbb70ae28cf1b8..9ed200626c1995b1e8d5f0e64b50aa7d611f7191 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_sum_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_sum_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -45,12 +44,12 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - y = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + y = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32' + ) out = paddle.fluid.layers.sum([x, y], **self.attrs) self.fetch_list = [out.name] @@ -66,7 +65,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_data_feed(self): x = np.random.uniform(size=[1, 3, 2, 2]) y = np.random.uniform(size=[1, 3, 2, 2]) @@ -74,25 +72,25 @@ class TestCase1(TestBase): self.feed_fp32 = { "x": x.astype(np.float32), "y": y.astype(np.float32), - "z": z.astype(np.float32) + "z": z.astype(np.float32), } self.feed_fp16 = { "x": x.astype(np.float16), "y": y.astype(np.float16), - "z": z.astype(np.float16) + "z": z.astype(np.float16), } @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - y = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='float32') - z = paddle.static.data(name=self.feed_list[2], - shape=self.feed_shape[2], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + y = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32' + ) + z = paddle.static.data( + name=self.feed_list[2], shape=self.feed_shape[2], dtype='float32' + ) out = paddle.fluid.layers.sum([x, y, z], **self.attrs) self.fetch_list = [out.name] diff --git a/python/paddle/fluid/tests/unittests/ipu/test_tile_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_tile_op_ipu.py index ecdbe20f75f1a2bce9a576271b31908bf24df080..becacf6b8be43b54a604aa8d2d8548cad728bb5e 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_tile_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_tile_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -46,9 +45,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) out = paddle.tile(x, **self.attrs) self.fetch_list = [out.name] @@ -64,7 +63,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = { "repeat_times": [2, 3, 2], @@ -73,7 +71,6 @@ class TestCase1(TestBase): @unittest.skip('dynamic graph is not support on IPU') class TestCase3(TestBase): - def set_data_feed(self): x = np.random.uniform(size=[4, 5, 6]) r = np.array([3, 2, 4]) @@ -91,12 +88,12 @@ class TestCase3(TestBase): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - r = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='int32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + r = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='int32' + ) out = paddle.tile(x, repeat_times=r, **self.attrs) self.fetch_list = [out.name] diff --git a/python/paddle/fluid/tests/unittests/ipu/test_topk_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_topk_op_ipu.py index 4194887ab2f053623ffc72451d8239fcf35e9338..bfc31118b4efe186b97e49c0c5901a73134776ea 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_topk_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_topk_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestTopKOp(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -50,17 +49,16 @@ class TestTopKOp(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) if not self.use_k_as_const_variable: topk_values, topk_indices = self.op(x, **self.attrs) else: # !important, popart cannot accept non const tensor - K_t = paddle.fluid.layers.fill_constant(shape=[1], - dtype='int32', - value=self.k, - name="in_2") + K_t = paddle.fluid.layers.fill_constant( + shape=[1], dtype='int32', value=self.k, name="in_2" + ) topk_values, topk_indices = self.op(x, K_t, **self.attrs) self.fetch_list = [topk_values.name, topk_indices.name] @@ -83,14 +81,12 @@ class TestTopKOp(IPUOpTest): class TestCase2(TestTopKOp): - def set_test_op(self): self.op = paddle.topk @unittest.skip("Trying to get data as int64 but it is of type int32") class TestCase3(TestTopKOp): - def set_op_attrs(self): self.use_k_as_const_variable = True self.attrs = {} @@ -99,7 +95,6 @@ class TestCase3(TestTopKOp): @unittest.skip("Trying to get data as int64 but it is of type int32") class TestCase4(TestCase3): - def set_test_op(self): self.op = paddle.topk diff --git a/python/paddle/fluid/tests/unittests/ipu/test_transpose_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_transpose_op_ipu.py index e8f3cae36739f915d39ffb3567e0b66c4feea933..34e0457c2ded0384f0102cfdd2f02d9f780d6517 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_transpose_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_transpose_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -44,9 +43,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) out = paddle.fluid.layers.transpose(x, **self.attrs) self.fetch_list = [out.name] @@ -62,13 +61,11 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = {"perm": [0, 1, 2, 3]} class TestCase2(TestBase): - def set_data_feed(self): data = np.random.uniform(size=[1, 2, 3, 4, 5]) self.feed_fp32 = {"x": data.astype(np.float32)} @@ -79,7 +76,6 @@ class TestCase2(TestBase): class TestCase_ZeroDim(TestBase): - def set_data_feed(self): data = np.random.uniform(size=[]) self.feed_fp32 = {"x": data.astype(np.float32)} diff --git a/python/paddle/fluid/tests/unittests/ipu/test_unary_ops_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_unary_ops_ipu.py index bbf0f7b6996ed459f2da6b550ce66c6f340d0e69..9670e1da6334d4982a217cf97a2a60128363bb37 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_unary_ops_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_unary_ops_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_test_op() @@ -44,9 +43,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) out = self.op(x, **self.op_attrs) self.fetch_list = [out.name] @@ -62,7 +61,6 @@ class TestBase(IPUOpTest): class TestAcos(TestBase): - @property def fp16_enabled(self): return False @@ -77,63 +75,54 @@ class TestAcos(TestBase): class TestAsin(TestAcos): - def set_test_op(self): self.op = paddle.fluid.layers.asin self.op_attrs = {} class TestSinh(TestAcos): - def set_test_op(self): self.op = paddle.fluid.layers.sinh self.op_attrs = {} class TestAtan(TestBase): - def set_test_op(self): self.op = paddle.fluid.layers.atan self.op_attrs = {} class TestCeil(TestBase): - def set_test_op(self): self.op = paddle.fluid.layers.ceil self.op_attrs = {} class TestCos(TestBase): - def set_test_op(self): self.op = paddle.fluid.layers.cos self.op_attrs = {} class TestCosh(TestBase): - def set_test_op(self): self.op = paddle.fluid.layers.cosh self.op_attrs = {} class TestErf(TestBase): - def set_test_op(self): self.op = paddle.fluid.layers.erf self.op_attrs = {} class TestExp(TestBase): - def set_test_op(self): self.op = paddle.fluid.layers.exp self.op_attrs = {} class TestFloor(TestBase): - @property def fp16_enabled(self): return False @@ -144,84 +133,72 @@ class TestFloor(TestBase): class TestLog(TestBase): - def set_test_op(self): self.op = paddle.fluid.layers.log self.op_attrs = {} class TestReciprocal(TestBase): - def set_test_op(self): self.op = paddle.fluid.layers.reciprocal self.op_attrs = {} class TestRelu(TestBase): - def set_test_op(self): self.op = paddle.fluid.layers.relu self.op_attrs = {} class TestRound(TestBase): - def set_test_op(self): self.op = paddle.fluid.layers.round self.op_attrs = {} class TestSigmoid(TestBase): - def set_test_op(self): self.op = paddle.fluid.layers.sigmoid self.op_attrs = {} class TestSign(TestBase): - def set_test_op(self): self.op = paddle.fluid.layers.sign self.op_attrs = {} class TestSin(TestBase): - def set_test_op(self): self.op = paddle.fluid.layers.sin self.op_attrs = {} class TestSoftplus(TestBase): - def set_test_op(self): self.op = paddle.fluid.layers.softplus self.op_attrs = {} class TestSoftsign(TestBase): - def set_test_op(self): self.op = paddle.fluid.layers.softsign self.op_attrs = {} class TestSqrt(TestBase): - def set_test_op(self): self.op = paddle.fluid.layers.sqrt self.op_attrs = {} class TestTan(TestBase): - def set_test_op(self): self.op = paddle.fluid.layers.tan self.op_attrs = {} class TestTanh(TestBase): - def set_test_op(self): self.op = paddle.fluid.layers.tanh self.op_attrs = {} diff --git a/python/paddle/fluid/tests/unittests/ipu/test_unsqueeze_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_unsqueeze_op_ipu.py index 3f3b9f4f890627587fd1f79107ca0fb8d0ca20e7..d5f3c5913e4ef7d41fd90774b6643b6e0d835934 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_unsqueeze_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_unsqueeze_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -44,9 +43,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) out = paddle.fluid.layers.unsqueeze(x, **self.attrs) self.fetch_list = [out.name] @@ -62,13 +61,11 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = {"axes": -1} class TestCase2(TestBase): - def set_op_attrs(self): self.attrs = {"axes": [1, 2]} diff --git a/python/paddle/fluid/tests/unittests/ipu/test_unstack_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_unstack_op_ipu.py index 46900011752e70608bb7bb74190b60f93a7a315d..382a70ba012d412afde69c9fd1d7e23b4af4d6e0 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_unstack_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_unstack_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -46,9 +45,9 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) out = paddle.unstack(x, **self.attrs) self.fetch_list = [x.name for x in out] @@ -64,7 +63,6 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): - def set_op_attrs(self): self.attrs = { "axis": 2, @@ -72,7 +70,6 @@ class TestCase1(TestBase): class TestCase2(TestBase): - def set_op_attrs(self): self.attrs = { "axis": 0, diff --git a/python/paddle/fluid/tests/unittests/ipu/test_varname_inplace_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_varname_inplace_ipu.py index 56e7eef4cb27ff6fde81355428338e6f17c0c588..e42e5b4d54e0b975506fc55a543298e15679fd61 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_varname_inplace_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_varname_inplace_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -54,9 +53,11 @@ class TestBase(IPUOpTest): with paddle.static.scope_guard(scope): with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype=self.feed_dtype[0]) + x = paddle.static.data( + name=self.feed_list[0], + shape=self.feed_shape[0], + dtype=self.feed_dtype[0], + ) add1 = paddle.fluid.layers.elementwise_add(x, x) reshape = paddle.fluid.layers.reshape(add1, **self.attrs) add2 = paddle.fluid.layers.elementwise_add(reshape, reshape) @@ -75,7 +76,8 @@ class TestBase(IPUOpTest): exe.run(startup_prog) scale1_out = main_prog.global_block().ops[4].output("Out")[0] main_prog.global_block().ops[4]._rename_output( - scale1_out, add2.name) + scale1_out, add2.name + ) main_prog.global_block().ops[5]._rename_input(scale1_out, add2.name) if run_ipu: @@ -83,8 +85,8 @@ class TestBase(IPUOpTest): ipu_strategy = paddle.static.IpuStrategy() ipu_strategy.set_graph_config(is_training=self.is_training) program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) + main_prog, ipu_strategy=ipu_strategy + ).compile(feed_list, fetch_list) else: program = main_prog @@ -95,10 +97,9 @@ class TestBase(IPUOpTest): res0 = self._test_base(True) res1 = self._test_base(False) - np.testing.assert_allclose(res0.flatten(), - res1.flatten(), - rtol=1e-05, - atol=self.atol) + np.testing.assert_allclose( + res0.flatten(), res1.flatten(), rtol=1e-05, atol=self.atol + ) self.assertTrue(res0.shape == res1.shape) diff --git a/python/paddle/fluid/tests/unittests/ipu/test_warpctc_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_warpctc_op_ipu.py index d173ae77e5cbc952bd758f25b12071fb8db63092..1161699893ad4eac93db8c1e23ea3c77cc3b0c62 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_warpctc_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_warpctc_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -39,20 +38,24 @@ class TestBase(IPUOpTest): self.max_seq_length = 5 self.max_label_length = 3 self.num_classes = 5 - self.logits_length = np.array([self.max_seq_length] * self.batch_size, - dtype=np.int64) - self.labels_length = np.array([self.max_label_length] * self.batch_size, - dtype=np.int64) + self.logits_length = np.array( + [self.max_seq_length] * self.batch_size, dtype=np.int64 + ) + self.labels_length = np.array( + [self.max_label_length] * self.batch_size, dtype=np.int64 + ) self.blank = self.num_classes - 1 self.norm_by_times = False logits = np.random.uniform( - 0.1, 1.0, [self.max_seq_length, self.batch_size, self.num_classes - ]).astype("float32") - labels = np.random.randint(0, - self.num_classes - 1, - [self.batch_size, self.max_label_length], - dtype="int32") + 0.1, 1.0, [self.max_seq_length, self.batch_size, self.num_classes] + ).astype("float32") + labels = np.random.randint( + 0, + self.num_classes - 1, + [self.batch_size, self.max_label_length], + dtype="int32", + ) self.feed_fp32 = { "Logits": logits, @@ -79,26 +82,28 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - data = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype="float32") - logits = paddle.nn.Linear(self.num_classes, - self.num_classes, - bias_attr=False)(data) - labels = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='int32') - input_length = paddle.static.data(name=self.feed_list[2], - shape=self.feed_shape[2], - dtype='int64') - label_length = paddle.static.data(name=self.feed_list[3], - shape=self.feed_shape[3], - dtype='int64') - out = paddle.fluid.layers.warpctc(logits, - labels, - input_length=input_length, - label_length=label_length, - **self.attrs) + data = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32" + ) + logits = paddle.nn.Linear( + self.num_classes, self.num_classes, bias_attr=False + )(data) + labels = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='int32' + ) + input_length = paddle.static.data( + name=self.feed_list[2], shape=self.feed_shape[2], dtype='int64' + ) + label_length = paddle.static.data( + name=self.feed_list[3], shape=self.feed_shape[3], dtype='int64' + ) + out = paddle.fluid.layers.warpctc( + logits, + labels, + input_length=input_length, + label_length=label_length, + **self.attrs + ) loss = paddle.mean(out) adam = paddle.optimizer.Adam(learning_rate=1e-2) adam.minimize(loss) diff --git a/python/paddle/fluid/tests/unittests/ipu/test_weight_decay_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_weight_decay_ipu.py index 605942983212a992be2df483ec3d212d6e0e17fe..613032c6262bf49e1ce2ee9ab57b3ee6a742ea93 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_weight_decay_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_weight_decay_ipu.py @@ -24,7 +24,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest @unittest.skipIf(IPUOpTest.use_ipumodel(), "skip for ipumodel") class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_data_feed() @@ -57,7 +56,6 @@ class TestBase(IPUOpTest): } def _test_optimizer(self, run_ipu=True): - def exclude_fn(param): return param.name.endswith('.w_0') @@ -70,22 +68,23 @@ class TestBase(IPUOpTest): with paddle.static.scope_guard(scope): with paddle.static.program_guard(main_prog, startup_prog): - image = paddle.static.data(name='image', - shape=[1, 3, 10, 10], - dtype='float32') + image = paddle.static.data( + name='image', shape=[1, 3, 10, 10], dtype='float32' + ) bias = paddle.fluid.layers.create_parameter( - shape=[1, 3, 10, 10], is_bias=True, dtype='float32') + shape=[1, 3, 10, 10], is_bias=True, dtype='float32' + ) add1 = image + bias - conv1 = paddle.static.nn.conv2d(add1, - num_filters=3, - filter_size=3, - bias_attr=False) + conv1 = paddle.static.nn.conv2d( + add1, num_filters=3, filter_size=3, bias_attr=False + ) loss = paddle.mean(conv1) opt = paddle.optimizer.Lamb( learning_rate=1e-3, lamb_weight_decay=self.attrs['weight_decay'], - exclude_from_weight_decay_fn=exclude_fn) + exclude_from_weight_decay_fn=exclude_fn, + ) opt.minimize(loss) if run_ipu: @@ -102,10 +101,11 @@ class TestBase(IPUOpTest): ipu_strategy = paddle.static.IpuStrategy() ipu_strategy.set_graph_config(is_training=True) ipu_strategy.set_options( - {'loss_scaling': self.attrs["loss_scaling"]}) + {'loss_scaling': self.attrs["loss_scaling"]} + ) program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) + main_prog, ipu_strategy=ipu_strategy + ).compile(feed_list, fetch_list) else: program = main_prog @@ -121,10 +121,9 @@ class TestBase(IPUOpTest): ipu_loss = self._test_optimizer(True).flatten() cpu_loss = self._test_optimizer(False).flatten() - np.testing.assert_allclose(ipu_loss, - cpu_loss, - rtol=1e-05, - atol=self.atol) + np.testing.assert_allclose( + ipu_loss, cpu_loss, rtol=1e-05, atol=self.atol + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ipu/test_weight_sharing_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_weight_sharing_ipu.py index 4fe9ef127335759db727be0c3e4538ff308b8ddb..8eb0edf4cfb9ad40f37a55a655888fd3992427aa 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_weight_sharing_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_weight_sharing_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestWeightSharing(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -51,24 +50,27 @@ class TestWeightSharing(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='int64') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='int64' + ) with paddle.static.ipu_shard_guard(index=0, stage=0): y = paddle.fluid.layers.embedding( input=x, size=[768, 768], dtype='float32', param_attr=paddle.fluid.ParamAttr(name='word_embedding'), - is_sparse=False) + is_sparse=False, + ) with paddle.static.ipu_shard_guard(index=1, stage=1): z = paddle.fluid.layers.fc( - input=y, size=768, param_attr=paddle.fluid.ParamAttr(name="fc")) + input=y, size=768, param_attr=paddle.fluid.ParamAttr(name="fc") + ) with paddle.static.ipu_shard_guard(index=0, stage=2): out = paddle.fluid.layers.matmul( x=z, y=self.main_prog.global_block().var('word_embedding'), - transpose_y=True) + transpose_y=True, + ) self.feed_list = [x.name] self.fetch_list = [out.name] @@ -82,15 +84,17 @@ class TestWeightSharing(IPUOpTest): exe.run(self.startup_prog) if run_ipu: ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(num_ipus=2, - is_training=self.is_training, - enable_manual_shard=True) - ipu_strategy.set_pipelining_config(enable_pipelining=True, - batches_per_step=3) + ipu_strategy.set_graph_config( + num_ipus=2, + is_training=self.is_training, + enable_manual_shard=True, + ) + ipu_strategy.set_pipelining_config( + enable_pipelining=True, batches_per_step=3 + ) program = paddle.static.IpuCompiledProgram( - self.main_prog, - ipu_strategy=ipu_strategy).compile(self.feed_list, - self.fetch_list) + self.main_prog, ipu_strategy=ipu_strategy + ).compile(self.feed_list, self.fetch_list) else: program = self.main_prog @@ -102,10 +106,9 @@ class TestWeightSharing(IPUOpTest): res0 = self.run_model(False) res1 = self.run_model(True) - np.testing.assert_allclose(res0.flatten(), - res1[0].flatten(), - rtol=1e-05, - atol=self.atol) + np.testing.assert_allclose( + res0.flatten(), res1[0].flatten(), rtol=1e-05, atol=self.atol + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ipu/test_where_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_where_op_ipu.py index 091f5dc960df15fcb56febfa323a334c54bcfa1a..15a815bf5a716c51b67e03499bdf772e6d01381e 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_where_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_where_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -34,11 +33,11 @@ class TestBase(IPUOpTest): data_y = np.random.uniform(size=[4, 5, 6]) self.feed_fp32 = { "in_0": data_x.astype(np.float32), - "in_1": data_y.astype(np.float32) + "in_1": data_y.astype(np.float32), } self.feed_fp16 = { "in_0": data_x.astype(np.float16), - "in_1": data_y.astype(np.float16) + "in_1": data_y.astype(np.float16), } def set_feed_attr(self): @@ -51,12 +50,12 @@ class TestBase(IPUOpTest): @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - y = paddle.static.data(name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) + y = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32' + ) out = paddle.where(x > 1, x, y) self.fetch_list = [out.name] diff --git a/python/paddle/fluid/tests/unittests/ipu/test_yolo_box_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_yolo_box_op_ipu.py index 3e19ca2b0c9d2a927a6919e6a38c9e5ca889563f..328e46d430c87f763d2af892d2158e897fa8d887 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_yolo_box_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_yolo_box_op_ipu.py @@ -21,7 +21,6 @@ from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest class TestBase(IPUOpTest): - def setUp(self): self.set_atol() self.set_training() @@ -50,14 +49,14 @@ class TestBase(IPUOpTest): "class_num": 80, "anchors": [10, 13, 16, 30, 33, 23], "conf_thresh": 0.01, - "downsample_ratio": 32 + "downsample_ratio": 32, } @IPUOpTest.static_graph def build_model(self): - x = paddle.static.data(name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' + ) attrs = { 'name': 'img_size', 'shape': [1, 2], diff --git a/python/paddle/fluid/tests/unittests/ir/inference/auto_scan_test.py b/python/paddle/fluid/tests/unittests/ir/inference/auto_scan_test.py index ed191d5560378f8b23db0c81554813027801ab6d..d76a17101298f86c8329aa6d84992b720a200c8e 100755 --- a/python/paddle/fluid/tests/unittests/ir/inference/auto_scan_test.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/auto_scan_test.py @@ -24,7 +24,12 @@ import paddle from paddle.fluid.core import PassVersionChecker import paddle.inference as paddle_infer from typing import Optional, List, Callable, Dict, Any -from program_config import OpConfig, ProgramConfig, create_fake_model, create_quant_model +from program_config import ( + OpConfig, + ProgramConfig, + create_fake_model, + create_quant_model, +) import hypothesis from hypothesis import given, settings @@ -32,22 +37,28 @@ import hypothesis.strategies as st logging.basicConfig(level=logging.INFO, format="%(message)s") -settings.register_profile("ci", - max_examples=100, - suppress_health_check=hypothesis.HealthCheck.all(), - deadline=None, - print_blob=True, - derandomize=True, - report_multiple_bugs=False) -settings.register_profile("dev", - max_examples=1000, - suppress_health_check=hypothesis.HealthCheck.all(), - deadline=None, - print_blob=True, - derandomize=True, - report_multiple_bugs=False) -if float(os.getenv('TEST_NUM_PERCENT_CASES', default='1.0')) < 1 or \ - os.getenv('HYPOTHESIS_TEST_PROFILE', 'dev') == 'ci': +settings.register_profile( + "ci", + max_examples=100, + suppress_health_check=hypothesis.HealthCheck.all(), + deadline=None, + print_blob=True, + derandomize=True, + report_multiple_bugs=False, +) +settings.register_profile( + "dev", + max_examples=1000, + suppress_health_check=hypothesis.HealthCheck.all(), + deadline=None, + print_blob=True, + derandomize=True, + report_multiple_bugs=False, +) +if ( + float(os.getenv('TEST_NUM_PERCENT_CASES', default='1.0')) < 1 + or os.getenv('HYPOTHESIS_TEST_PROFILE', 'dev') == 'ci' +): settings.load_profile("ci") else: settings.load_profile("dev") @@ -69,15 +80,15 @@ SkipReasons = IgnoreReasons class AutoScanTest(unittest.TestCase): - def __init__(self, *args, **kwargs): np.random.seed(1024) paddle.enable_static() super(AutoScanTest, self).__init__(*args, **kwargs) self.ignore_cases = [] abs_dir = os.path.abspath(os.path.dirname(__file__)) - self.cache_dir = os.path.join(abs_dir, - str(self.__module__) + '_cache_dir') + self.cache_dir = os.path.join( + abs_dir, str(self.__module__) + '_cache_dir' + ) self.available_passes_in_framework = set() self.num_ran_programs = 0 self.num_invalid_programs = 0 @@ -97,23 +108,29 @@ class AutoScanTest(unittest.TestCase): raise NotImplementedError @abc.abstractmethod - def add_ignore_check_case(self, teller: [ - Callable[[ProgramConfig, paddle_infer.Config], bool] - ], reason: IgnoreReasons, note: str): + def add_ignore_check_case( + self, + teller: [Callable[[ProgramConfig, paddle_infer.Config], bool]], + reason: IgnoreReasons, + note: str, + ): self.ignore_cases.append((teller, reason, note)) def is_program_valid(self, program_config: ProgramConfig) -> bool: return True - def run_test_config(self, model, params, prog_config, pred_config, - feed_data) -> Dict[str, np.ndarray]: + def run_test_config( + self, model, params, prog_config, pred_config, feed_data + ) -> Dict[str, np.ndarray]: ''' Test a single case. ''' pred_config.set_model_buffer(model, len(model), params, len(params)) predictor = paddle_infer.create_predictor(pred_config) - self.available_passes_in_framework = self.available_passes_in_framework | set( - pred_config.pass_builder().all_passes()) + self.available_passes_in_framework = ( + self.available_passes_in_framework + | set(pred_config.pass_builder().all_passes()) + ) for name, _ in prog_config.inputs.items(): input_tensor = predictor.get_input_handle(name) input_tensor.copy_from_cpu(feed_data[name]['data']) @@ -121,20 +138,28 @@ class AutoScanTest(unittest.TestCase): input_tensor.set_lod(feed_data[name]['lod']) predictor.run() result = {} - for out_name, o_name in zip(prog_config.outputs, - predictor.get_output_names()): + for out_name, o_name in zip( + prog_config.outputs, predictor.get_output_names() + ): result[out_name] = predictor.get_output_handle(o_name).copy_to_cpu() return result @abc.abstractmethod - def assert_tensors_near(self, atol: float, rtol: float, - tensor: Dict[str, np.array], - baseline: Dict[str, np.array]): + def assert_tensors_near( + self, + atol: float, + rtol: float, + tensor: Dict[str, np.array], + baseline: Dict[str, np.array], + ): for key, arr in tensor.items(): self.assertTrue( baseline[key].shape == arr.shape, - "The output shapes are not equal, the baseline shape is " + - str(baseline[key].shape) + ', but got ' + str(arr.shape)) + "The output shapes are not equal, the baseline shape is " + + str(baseline[key].shape) + + ', but got ' + + str(arr.shape), + ) diff = abs(baseline[key] - arr) np.testing.assert_allclose( baseline[key], @@ -142,22 +167,28 @@ class AutoScanTest(unittest.TestCase): rtol=rtol, atol=atol, err_msg='Output has diff, Maximum absolute error: {}'.format( - np.amax(diff))) + np.amax(diff) + ), + ) @abc.abstractmethod def run_test(self, quant=False): raise NotImplementedError - def generate_op_config(self, ops_config: List[Dict[str, - Any]]) -> List[OpConfig]: + def generate_op_config( + self, ops_config: List[Dict[str, Any]] + ) -> List[OpConfig]: ops = [] for i in range(len(ops_config)): op_config = ops_config[i] ops.append( - OpConfig(type=op_config['op_type'], - inputs=op_config['op_inputs'], - outputs=op_config['op_outputs'], - attrs=op_config['op_attrs'])) + OpConfig( + type=op_config['op_type'], + inputs=op_config['op_inputs'], + outputs=op_config['op_outputs'], + attrs=op_config['op_attrs'], + ) + ) return ops @abc.abstractmethod @@ -173,11 +204,13 @@ class AutoScanTest(unittest.TestCase): logging.info("SUCCESS: " + msg) @abc.abstractmethod - def create_inference_config(self, - passes: Optional[List[str]] = None, - use_gpu: bool = False, - use_mkldnn: bool = False, - ir_optim: Optional[bool] = None): + def create_inference_config( + self, + passes: Optional[List[str]] = None, + use_gpu: bool = False, + use_mkldnn: bool = False, + ir_optim: Optional[bool] = None, + ): config = paddle_infer.Config() config.switch_ir_debug(True) config.set_optim_cache_dir(self.cache_dir) @@ -195,7 +228,6 @@ class AutoScanTest(unittest.TestCase): class MkldnnAutoScanTest(AutoScanTest): - def __init__(self, *args, **kwargs): super(MkldnnAutoScanTest, self).__init__(*args, **kwargs) @@ -215,7 +247,7 @@ class MkldnnAutoScanTest(AutoScanTest): for name, tensor_config in prog_config.inputs.items(): feed_data[name] = { 'data': tensor_config.data, - 'lod': tensor_config.lod + 'lod': tensor_config.lod, } results: List[Dict[str, np.ndarray]] = [] @@ -223,23 +255,31 @@ class MkldnnAutoScanTest(AutoScanTest): base_config = self.create_inference_config(ir_optim=False) logging.info('RUN program_config: ' + str(prog_config)) results.append( - self.run_test_config(model, params, prog_config, base_config, - feed_data)) + self.run_test_config( + model, params, prog_config, base_config, feed_data + ) + ) self.success_log('RUN_CPU_BASELINE done') - for pred_config, ( - atol, rtol) in self.sample_predictor_configs(prog_config): + for pred_config, (atol, rtol) in self.sample_predictor_configs( + prog_config + ): # skip info ignore_flag = False for ignore_info in self.ignore_cases: if ignore_info[0](prog_config, pred_config): ignore_flag = True - if ignore_info[ - 1] == IgnoreReasons.MKLDNN_ACCURACY_ERROR: + if ( + ignore_info[1] + == IgnoreReasons.MKLDNN_ACCURACY_ERROR + ): self.ignore_log( - "[MKLDNN_ACCURACY_ERROR] " + ignore_info[2] + - ' ' + ' vs ' + - self.inference_config_str(pred_config)) + "[MKLDNN_ACCURACY_ERROR] " + + ignore_info[2] + + ' ' + + ' vs ' + + self.inference_config_str(pred_config) + ) else: raise NotImplementedError break @@ -251,20 +291,26 @@ class MkldnnAutoScanTest(AutoScanTest): try: results.append( - self.run_test_config(model, params, prog_config, - pred_config, feed_data)) - self.assert_tensors_near(atol, rtol, results[-1], - results[0]) + self.run_test_config( + model, params, prog_config, pred_config, feed_data + ) + ) + self.assert_tensors_near( + atol, rtol, results[-1], results[0] + ) except Exception as e: self.fail_log( - self.inference_config_str(pred_config) + - '\033[1;31m \nERROR INFO: {}\033[0m'.format(str(e))) + self.inference_config_str(pred_config) + + '\033[1;31m \nERROR INFO: {}\033[0m'.format(str(e)) + ) if not ignore_flag: status = False continue - self.success_log('RUN predictor_config ' + - self.inference_config_str(pred_config) + - ' done') + self.success_log( + 'RUN predictor_config ' + + self.inference_config_str(pred_config) + + ' done' + ) self.assertTrue(status) @@ -278,7 +324,6 @@ class MkldnnAutoScanTest(AutoScanTest): class PassAutoScanTest(AutoScanTest): - def __init__(self, *args, **kwargs): super(PassAutoScanTest, self).__init__(*args, **kwargs) self.passes = [] @@ -299,13 +344,17 @@ class PassAutoScanTest(AutoScanTest): def assert_op_list(self, op_list_after_fusion): if not self.passes: raise ValueError( - "In PassAutoScan you should give a valid pass name.") - last_passed_program = os.path.join(self.cache_dir, - self.passes[-1] + ".pdmodel") + "In PassAutoScan you should give a valid pass name." + ) + last_passed_program = os.path.join( + self.cache_dir, self.passes[-1] + ".pdmodel" + ) if not os.path.exists(last_passed_program): raise ValueError( - "Cannot find file {}, please make sure that your pass name is correct" - .format(last_passed_program)) + "Cannot find file {}, please make sure that your pass name is correct".format( + last_passed_program + ) + ) model_bytes = paddle.static.load_from_file(last_passed_program) pg = paddle.static.deserialize_program(model_bytes) main_block = pg.desc.block(0) @@ -317,16 +366,19 @@ class PassAutoScanTest(AutoScanTest): self.assertTrue( op_list_after_fusion == after_op_list, "Expected operator list after fusion is {}, but now it's {}".format( - op_list_after_fusion, after_op_list), + op_list_after_fusion, after_op_list + ), ) - def run_and_statis(self, - quant=False, - max_examples=100, - reproduce=None, - min_success_num=25, - max_duration=180, - passes=None): + def run_and_statis( + self, + quant=False, + max_examples=100, + reproduce=None, + min_success_num=25, + max_duration=180, + passes=None, + ): if os.getenv('HYPOTHESIS_TEST_PROFILE', 'ci') == "dev": max_examples *= 10 min_success_num *= 10 @@ -343,7 +395,9 @@ class PassAutoScanTest(AutoScanTest): report_multiple_bugs=False, ) settings.load_profile("ci") - assert passes is not None, "Parameter of passes must be defined in function run_and_statis." + assert ( + passes is not None + ), "Parameter of passes must be defined in function run_and_statis." self.passes = passes self.add_ignore_pass_case() @@ -361,32 +415,44 @@ class PassAutoScanTest(AutoScanTest): logging.info("Start to running test of {}".format(type(self))) loop_func() logging.info( - "===================Statistical Information===================") - logging.info("Number of Generated Programs: {}".format( - self.num_ran_programs + self.num_invalid_programs)) - logging.info("Number of Invalid Programs: {}".format( - self.num_invalid_programs)) + "===================Statistical Information===================" + ) + logging.info( + "Number of Generated Programs: {}".format( + self.num_ran_programs + self.num_invalid_programs + ) + ) + logging.info( + "Number of Invalid Programs: {}".format(self.num_invalid_programs) + ) logging.info("Number of Ran Programs: {}".format(self.num_ran_programs)) logging.info("Number of Ignore Tests: {}".format(self.num_ignore_tests)) - successful_ran_programs = int(self.num_ran_programs - - self.num_ignore_tests / - max(self.num_predictor_kinds, 1)) + successful_ran_programs = int( + self.num_ran_programs + - self.num_ignore_tests / max(self.num_predictor_kinds, 1) + ) logging.info( - "Number of successfully ran programs approximately equal to {}". - format(successful_ran_programs)) + "Number of successfully ran programs approximately equal to {}".format( + successful_ran_programs + ) + ) if successful_ran_programs < min_success_num: logging.warning( "satisfied_programs = ran_programs - num_ignore_tests / num_predictor_kinds" ) logging.error( - "At least {} programs need to ran successfully, but now only about {} programs satisfied." - .format(min_success_num, successful_ran_programs)) + "At least {} programs need to ran successfully, but now only about {} programs satisfied.".format( + min_success_num, successful_ran_programs + ) + ) assert False used_time = time.time() - start_time if max_duration > 0 and used_time > max_duration: logging.error( - "The duration exceeds {} seconds, if this is necessary, try to set a larger number for parameter `max_duration`." - .format(max_duration)) + "The duration exceeds {} seconds, if this is necessary, try to set a larger number for parameter `max_duration`.".format( + max_duration + ) + ) assert False def run_test(self, quant=False, prog_configs=None): @@ -406,13 +472,16 @@ class PassAutoScanTest(AutoScanTest): for name, tensor_config in prog_config.inputs.items(): feed_data[name] = { 'data': tensor_config.data, - 'lod': tensor_config.lod + 'lod': tensor_config.lod, } logging.info('RUN program_config: ' + str(prog_config)) self.num_predictor_kinds = 0 - for pred_config, op_list, ( - atol, rtol) in self.sample_predictor_configs(prog_config): + for ( + pred_config, + op_list, + (atol, rtol), + ) in self.sample_predictor_configs(prog_config): self.num_predictor_kinds += 1 # skip info @@ -423,9 +492,12 @@ class PassAutoScanTest(AutoScanTest): self.num_ignore_tests += 1 if ignore_info[1] == IgnoreReasons.PASS_ACCURACY_ERROR: self.ignore_log( - "[PASS_ACCURACY_ERROR] " + ignore_info[2] + - ' ' + ' vs ' + - self.inference_config_str(pred_config)) + "[PASS_ACCURACY_ERROR] " + + ignore_info[2] + + ' ' + + ' vs ' + + self.inference_config_str(pred_config) + ) else: raise NotImplementedError break @@ -437,37 +509,44 @@ class PassAutoScanTest(AutoScanTest): # baseline: no ir_optim run base_config = self.create_inference_config( - ir_optim=False, use_gpu=pred_config.use_gpu()) + ir_optim=False, use_gpu=pred_config.use_gpu() + ) try: # baseline - base_result = self.run_test_config(model, params, - prog_config, base_config, - feed_data) - self.success_log('RUN_BASELINE ' + - self.inference_config_str(base_config) + - ' done') + base_result = self.run_test_config( + model, params, prog_config, base_config, feed_data + ) + self.success_log( + 'RUN_BASELINE ' + + self.inference_config_str(base_config) + + ' done' + ) if os.path.exists(self.cache_dir): shutil.rmtree(self.cache_dir) - pred_result = self.run_test_config(model, params, - prog_config, pred_config, - feed_data) - self.assert_tensors_near(atol, rtol, pred_result, - base_result) + pred_result = self.run_test_config( + model, params, prog_config, pred_config, feed_data + ) + self.assert_tensors_near( + atol, rtol, pred_result, base_result + ) if not ignore_flag: self.assert_op_list(op_list) except Exception as e: self.fail_log( - self.inference_config_str(pred_config) + - '\033[1;31m \nERROR INFO: {}\033[0m'.format(str(e))) + self.inference_config_str(pred_config) + + '\033[1;31m \nERROR INFO: {}\033[0m'.format(str(e)) + ) if not ignore_flag: status = False continue - self.success_log('RUN predictor_config ' + - self.inference_config_str(pred_config) + - ' done') + self.success_log( + 'RUN predictor_config ' + + self.inference_config_str(pred_config) + + ' done' + ) status = self.check_op_version() and status self.assertTrue(status) @@ -502,14 +581,20 @@ class PassAutoScanTest(AutoScanTest): class TrtLayerAutoScanTest(AutoScanTest): - class TensorRTParam: ''' TensorRT subgraph engine parameters. ''' - def __init__(self, workspace_size, max_batch_size, min_subgraph_size, - precision, use_static, use_calib_mode): + def __init__( + self, + workspace_size, + max_batch_size, + min_subgraph_size, + precision, + use_static, + use_calib_mode, + ): self.workspace_size = workspace_size self.max_batch_size = max_batch_size self.min_subgraph_size = min_subgraph_size @@ -519,11 +604,16 @@ class TrtLayerAutoScanTest(AutoScanTest): class DynamicShapeParam: ''' - Prepare TensorRT subgraph engine dynamic shape parameters. - ''' + Prepare TensorRT subgraph engine dynamic shape parameters. + ''' - def __init__(self, min_input_shape, max_input_shape, opt_input_shape, - disable_trt_plugin_fp16): + def __init__( + self, + min_input_shape, + max_input_shape, + opt_input_shape, + disable_trt_plugin_fp16, + ): self.min_input_shape = min_input_shape self.max_input_shape = max_input_shape self.opt_input_shape = opt_input_shape @@ -537,10 +627,12 @@ class TrtLayerAutoScanTest(AutoScanTest): min_subgraph_size=0, precision=paddle_infer.PrecisionType.Float32, use_static=True, - use_calib_mode=False) + use_calib_mode=False, + ) self.dynamic_shape = self.DynamicShapeParam({}, {}, {}, False) self.num_percent_cases = float( - os.getenv('TEST_NUM_PERCENT_CASES', default='1.0')) + os.getenv('TEST_NUM_PERCENT_CASES', default='1.0') + ) # Use a seperate random generator for skipping tests self.skip_rng = np.random.default_rng(int(time.strftime("%W"))) @@ -558,31 +650,43 @@ class TrtLayerAutoScanTest(AutoScanTest): min_subgraph_size=self.trt_param.min_subgraph_size, precision_mode=self.trt_param.precision, use_static=self.trt_param.use_static, - use_calib_mode=self.trt_param.use_calib_mode) + use_calib_mode=self.trt_param.use_calib_mode, + ) if self.dynamic_shape.min_input_shape and ( - self.dynamic_shape.min_input_shape.keys() == - self.dynamic_shape.max_input_shape.keys() == - self.dynamic_shape.opt_input_shape.keys()): + self.dynamic_shape.min_input_shape.keys() + == self.dynamic_shape.max_input_shape.keys() + == self.dynamic_shape.opt_input_shape.keys() + ): config.set_trt_dynamic_shape_info( self.dynamic_shape.min_input_shape, self.dynamic_shape.max_input_shape, self.dynamic_shape.opt_input_shape, - self.dynamic_shape.disable_trt_plugin_fp16) + self.dynamic_shape.disable_trt_plugin_fp16, + ) return config - def assert_tensors_near(self, atol: float, rtol: float, - tensor: Dict[str, np.array], - baseline: Dict[str, np.array]): + def assert_tensors_near( + self, + atol: float, + rtol: float, + tensor: Dict[str, np.array], + baseline: Dict[str, np.array], + ): for key, arr in tensor.items(): self.assertEqual( - baseline[key].shape, arr.shape, - 'The output shapes are not equal, the baseline shape is ' + - str(baseline[key].shape) + ', but got ' + str(arr.shape)) + baseline[key].shape, + arr.shape, + 'The output shapes are not equal, the baseline shape is ' + + str(baseline[key].shape) + + ', but got ' + + str(arr.shape), + ) np.testing.assert_allclose(baseline[key], arr, rtol=rtol, atol=atol) def assert_op_size(self, trt_engine_num, paddle_op_num): last_passed_program = os.path.join( - self.cache_dir, 'transpose_flatten_concat_fuse_pass.pdmodel') + self.cache_dir, 'transpose_flatten_concat_fuse_pass.pdmodel' + ) model_bytes = paddle.static.load_from_file(last_passed_program) pg = paddle.static.deserialize_program(model_bytes) main_block = pg.desc.block(0) @@ -593,13 +697,19 @@ class TrtLayerAutoScanTest(AutoScanTest): trt_engine_size = sum(op_types) paddle_op_size = op_size - trt_engine_size self.assertEqual( - trt_engine_num, trt_engine_size, + trt_engine_num, + trt_engine_size, 'Expected trt_engine_num is {}, but got {}!'.format( - trt_engine_num, trt_engine_size)) + trt_engine_num, trt_engine_size + ), + ) self.assertEqual( - paddle_op_num, paddle_op_size, + paddle_op_num, + paddle_op_size, 'Expected paddle_op_num is {}, but got {}!'.format( - paddle_op_num, paddle_op_size)) + paddle_op_num, paddle_op_size + ), + ) def inference_config_str(self, config: paddle_infer.Config) -> str: dic = {} @@ -639,21 +749,26 @@ class TrtLayerAutoScanTest(AutoScanTest): for name, tensor_config in prog_config.inputs.items(): feed_data[name] = { 'data': tensor_config.data, - 'lod': tensor_config.lod + 'lod': tensor_config.lod, } results: List[Dict[str, np.ndarray]] = [] if not skip_baseline: - #baseline: gpu run + # baseline: gpu run logging.info('RUN program_config: ' + str(prog_config)) gpu_config = self.create_inference_config(use_trt=False) results.append( - self.run_test_config(model, params, prog_config, gpu_config, - feed_data)) + self.run_test_config( + model, params, prog_config, gpu_config, feed_data + ) + ) self.success_log('RUN_GPU_BASELINE done') - for pred_config, nodes_num, threshold in self.sample_predictor_configs( - prog_config): + for ( + pred_config, + nodes_num, + threshold, + ) in self.sample_predictor_configs(prog_config): if os.path.exists(self.cache_dir): shutil.rmtree(self.cache_dir) @@ -662,17 +777,24 @@ class TrtLayerAutoScanTest(AutoScanTest): atol = threshold rtol = 1e-8 elif isinstance(threshold, list) or isinstance( - threshold, tuple): + threshold, tuple + ): atol = threshold[0] rtol = threshold[1] else: raise NotImplementedError - if pred_config.tensorrt_precision_mode( - ) != paddle_infer.PrecisionType.Int8 and quant: + if ( + pred_config.tensorrt_precision_mode() + != paddle_infer.PrecisionType.Int8 + and quant + ): continue - if pred_config.tensorrt_precision_mode( - ) == paddle_infer.PrecisionType.Int8 and not quant: + if ( + pred_config.tensorrt_precision_mode() + == paddle_infer.PrecisionType.Int8 + and not quant + ): continue ignore_flag = False @@ -682,11 +804,15 @@ class TrtLayerAutoScanTest(AutoScanTest): if reason == IgnoreReasons.TRT_NOT_IMPLEMENTED: self.ignore_log( '[TRT_NOT_IMPLEMENTED] {} vs {}'.format( - note, - self.inference_config_str(pred_config))) + note, self.inference_config_str(pred_config) + ) + ) elif reason == IgnoreReasons.TRT_NOT_SUPPORT: - self.ignore_log('[TRT_NOT_SUPPORT] {} vs {}'.format( - note, self.inference_config_str(pred_config))) + self.ignore_log( + '[TRT_NOT_SUPPORT] {} vs {}'.format( + note, self.inference_config_str(pred_config) + ) + ) else: raise NotImplementedError break @@ -697,30 +823,45 @@ class TrtLayerAutoScanTest(AutoScanTest): try: pred_config_deserialize = paddle_infer.Config(pred_config) results.append( - self.run_test_config(model, params, prog_config, - pred_config, feed_data)) - self.assert_tensors_near(atol, rtol, results[-1], - results[0]) + self.run_test_config( + model, params, prog_config, pred_config, feed_data + ) + ) + self.assert_tensors_near( + atol, rtol, results[-1], results[0] + ) trt_engine_num, paddle_op_num = nodes_num self.assert_op_size(trt_engine_num, paddle_op_num) # deserialize test if trt_engine_num > 0: - self.run_test_config(model, params, prog_config, - pred_config_deserialize, feed_data) - - self.success_log('RUN predictor_config {} done'.format( - self.inference_config_str(pred_config))) + self.run_test_config( + model, + params, + prog_config, + pred_config_deserialize, + feed_data, + ) + + self.success_log( + 'RUN predictor_config {} done'.format( + self.inference_config_str(pred_config) + ) + ) except Exception as e: self.fail_log( - self.inference_config_str(pred_config) + - '\033[1;31m \nERROR INFO: {}\033[0m'.format(str(e))) + self.inference_config_str(pred_config) + + '\033[1;31m \nERROR INFO: {}\033[0m'.format(str(e)) + ) all_passes = False self.assertTrue(all_passes) # TODO(wilber): just for backward compatible - def add_skip_case(self, teller: [ - Callable[[ProgramConfig, paddle_infer.Config], bool] - ], reason: IgnoreReasons, note: str): + def add_skip_case( + self, + teller: [Callable[[ProgramConfig, paddle_infer.Config], bool]], + reason: IgnoreReasons, + note: str, + ): self.ignore_cases.append((teller, reason, note)) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/inference_pass_test.py b/python/paddle/fluid/tests/unittests/ir/inference/inference_pass_test.py index 60980fbb732f8b750c6fa752b619057abb0f2534..cc493349037e46d3f269415dfa58bd38a3e3d3fb 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/inference_pass_test.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/inference_pass_test.py @@ -24,7 +24,6 @@ from paddle.fluid.core import create_paddle_predictor class InferencePassTest(unittest.TestCase): - def __init__(self, methodName='runTest'): paddle.enable_static() super(InferencePassTest, self).__init__(methodName) @@ -49,24 +48,28 @@ class InferencePassTest(unittest.TestCase): def _get_place(self): return set([False, core.is_compiled_with_cuda()]) - def _save_models(self, dirname, feeded_var_names, target_vars, executor, - program, scope): + def _save_models( + self, dirname, feeded_var_names, target_vars, executor, program, scope + ): with fluid.scope_guard(scope): # save models as combined to ensure that # there won't be too many useless files # after finishing a couple of tests. - fluid.io.save_inference_model(dirname, feeded_var_names, - target_vars, executor, program) + fluid.io.save_inference_model( + dirname, feeded_var_names, target_vars, executor, program + ) def _get_paddle_outs(self, executor, program, scope): ''' Return PaddlePaddle outputs. ''' with fluid.scope_guard(scope): - outs = executor.run(program=program, - feed=self.feeds, - fetch_list=self.fetch_list, - return_numpy=False) + outs = executor.run( + program=program, + feed=self.feeds, + fetch_list=self.fetch_list, + return_numpy=False, + ) return outs def _get_inference_outs(self, config): @@ -95,10 +98,9 @@ class InferencePassTest(unittest.TestCase): return outs - def _get_analysis_config(self, - use_gpu=False, - use_trt=False, - use_mkldnn=False): + def _get_analysis_config( + self, use_gpu=False, use_trt=False, use_mkldnn=False + ): ''' Return a new object of AnalysisConfig. ''' @@ -116,19 +118,22 @@ class InferencePassTest(unittest.TestCase): self.trt_parameters.min_subgraph_size, self.trt_parameters.precision, self.trt_parameters.use_static, - self.trt_parameters.use_calib_mode) + self.trt_parameters.use_calib_mode, + ) if self.trt_parameters.use_inspector: config.enable_tensorrt_inspector() self.assertTrue( config.tensorrt_inspector_enabled(), - "The inspector option is not set correctly.") + "The inspector option is not set correctly.", + ) if self.dynamic_shape_params: config.set_trt_dynamic_shape_info( self.dynamic_shape_params.min_input_shape, self.dynamic_shape_params.max_input_shape, self.dynamic_shape_params.optim_input_shape, - self.dynamic_shape_params.disable_trt_plugin_fp16) + self.dynamic_shape_params.disable_trt_plugin_fp16, + ) if self.enable_tensorrt_varseqlen: config.enable_tensorrt_varseqlen() @@ -145,18 +150,16 @@ class InferencePassTest(unittest.TestCase): or disable TensorRT, enable MKLDNN or disable MKLDNN are all the same. ''' - self.assertFalse(self.feeds is None, - "The inputs of the model is None. ") + self.assertFalse( + self.feeds is None, "The inputs of the model is None. " + ) use_gpu = self._get_place() for place_ in use_gpu: self.check_output_with_option(place_, atol) - def check_output_with_option(self, - use_gpu, - atol=1e-5, - flatten=False, - quant=False, - rtol=1e-5): + def check_output_with_option( + self, use_gpu, atol=1e-5, flatten=False, quant=False, rtol=1e-5 + ): ''' Check whether calculating on CPU and GPU, enable TensorRT or disable TensorRT, enable MKLDNN or disable MKLDNN @@ -168,17 +171,26 @@ class InferencePassTest(unittest.TestCase): device = "GPU" if use_gpu else "CPU" with fluid.scope_guard(scope): executor.run(self.startup_program) - self._save_models(self.path, list(self.feeds.keys()), self.fetch_list, - executor, self.main_program, scope) + self._save_models( + self.path, + list(self.feeds.keys()), + self.fetch_list, + executor, + self.main_program, + scope, + ) paddle_outs = self._get_paddle_outs(executor, self.main_program, scope) inference_outs = self._get_inference_outs( - self._get_analysis_config(use_gpu=use_gpu)) + self._get_analysis_config(use_gpu=use_gpu) + ) # Check whether the results calculated on CPU and on GPU are the same. self.assertTrue( len(paddle_outs) == len(inference_outs), - "The number of outputs is different between inference and training forward at {}" - .format(device)) + "The number of outputs is different between inference and training forward at {}".format( + device + ), + ) for out, inference_out in zip(paddle_outs, inference_outs): paddle_out = np.array(out) @@ -191,28 +203,35 @@ class InferencePassTest(unittest.TestCase): inference_out, rtol=1e-05, atol=atol, - err_msg= - 'Output has diff between inference and training forward at {} '. - format(device)) + err_msg='Output has diff between inference and training forward at {} '.format( + device + ), + ) # Check whether the trt results and the GPU results are the same. if use_gpu and self.enable_trt: tensorrt_outputs = self._get_inference_outs( - self._get_analysis_config(use_gpu=use_gpu, - use_trt=self.enable_trt)) + self._get_analysis_config( + use_gpu=use_gpu, use_trt=self.enable_trt + ) + ) if self.trt_parameters.use_static: - #deserialize + # deserialize tensorrt_outputs = self._get_inference_outs( - self._get_analysis_config(use_gpu=use_gpu, - use_trt=self.enable_trt)) + self._get_analysis_config( + use_gpu=use_gpu, use_trt=self.enable_trt + ) + ) self.assertTrue( len(tensorrt_outputs) == len(paddle_outs), - "The number of outputs is different between GPU and TensorRT. ") + "The number of outputs is different between GPU and TensorRT. ", + ) - for paddle_out, tensorrt_output in zip(paddle_outs, - tensorrt_outputs): + for paddle_out, tensorrt_output in zip( + paddle_outs, tensorrt_outputs + ): paddle_out = np.array(paddle_out) if flatten: paddle_out = paddle_out.flatten() @@ -223,17 +242,21 @@ class InferencePassTest(unittest.TestCase): paddle_out, rtol=rtol, atol=atol, - err_msg='Output has diff between GPU and TensorRT. ') + err_msg='Output has diff between GPU and TensorRT. ', + ) # Check whether the mkldnn results and the CPU results are the same. if (not use_gpu) and self.enable_mkldnn: mkldnn_outputs = self._get_inference_outs( - self._get_analysis_config(use_gpu=use_gpu, - use_mkldnn=self.enable_mkldnn)) + self._get_analysis_config( + use_gpu=use_gpu, use_mkldnn=self.enable_mkldnn + ) + ) self.assertTrue( len(paddle_outs) == len(mkldnn_outputs), - "The number of outputs is different between CPU and MKLDNN. ") + "The number of outputs is different between CPU and MKLDNN. ", + ) if self.enable_mkldnn_bfloat16: atol = 0.01 @@ -243,21 +266,24 @@ class InferencePassTest(unittest.TestCase): mkldnn_output, rtol=1e-05, atol=atol, - err_msg='Output has diff between CPU and MKLDNN. ') + err_msg='Output has diff between CPU and MKLDNN. ', + ) class TensorRTParam: ''' Prepare TensorRT subgraph engine parameters. ''' - def __init__(self, - workspace_size, - max_batch_size, - min_subgraph_size, - precision, - use_static, - use_calib_mode, - use_inspector=False): + def __init__( + self, + workspace_size, + max_batch_size, + min_subgraph_size, + precision, + use_static, + use_calib_mode, + use_inspector=False, + ): self.workspace_size = workspace_size self.max_batch_size = max_batch_size self.min_subgraph_size = min_subgraph_size @@ -271,8 +297,13 @@ class InferencePassTest(unittest.TestCase): Prepare TensorRT subgraph engine dynamic shape parameters. ''' - def __init__(self, min_input_shape, max_input_shape, optim_input_shape, - disable_trt_plugin_fp16): + def __init__( + self, + min_input_shape, + max_input_shape, + optim_input_shape, + disable_trt_plugin_fp16, + ): self.min_input_shape = min_input_shape self.max_input_shape = max_input_shape self.optim_input_shape = optim_input_shape diff --git a/python/paddle/fluid/tests/unittests/ir/inference/program_config.py b/python/paddle/fluid/tests/unittests/ir/inference/program_config.py index 5ad6f1c885cf95a28faa8f2c7a4cdf161fc9ed27..74375d9367d4e7aa1954bb215993ddab8e0bf70c 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/program_config.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/program_config.py @@ -32,10 +32,12 @@ class TensorConfig: A config builder for a input or a weight. ''' - def __init__(self, - lod: Optional[List[List[int]]] = None, - data_gen: Optional[Callable[..., np.array]] = None, - shape: Optional[List[List[int]]] = None): + def __init__( + self, + lod: Optional[List[List[int]]] = None, + data_gen: Optional[Callable[..., np.array]] = None, + shape: Optional[List[List[int]]] = None, + ): ''' shape: The shape of the tensor. dtype: The data type of the tensor. @@ -48,7 +50,9 @@ class TensorConfig: self.dtype = data_gen().dtype self.shape = data_gen().shape else: - assert shape is not None, "While data_gen is not defined, shape must not be None" + assert ( + shape is not None + ), "While data_gen is not defined, shape must not be None" self.data = np.random.normal(0.0, 1.0, shape).astype(np.float32) self.shape = shape self.dtype = self.data.dtype @@ -64,16 +68,18 @@ class VarType(enum.Enum): class OpConfig: - ''' A config builder for generating a Op. ''' - - def __init__(self, - type: str, - inputs: Dict[str, List[str]], - outputs: Dict[str, List[str]], - attrs: Dict[str, Any] = None, - outputs_var_type: Dict[str, VarType] = None, - outputs_dtype: Dict[str, np.dtype] = None, - **kwargs): + '''A config builder for generating a Op.''' + + def __init__( + self, + type: str, + inputs: Dict[str, List[str]], + outputs: Dict[str, List[str]], + attrs: Dict[str, Any] = None, + outputs_var_type: Dict[str, VarType] = None, + outputs_dtype: Dict[str, np.dtype] = None, + **kwargs, + ): self.type = type self.inputs = inputs self.outputs = outputs @@ -91,26 +97,50 @@ class OpConfig: _OP_WITHOUT_KERNEL_SET = { - 'feed', 'fetch', 'recurrent', 'go', 'rnn_memory_helper_grad', - 'conditional_block', 'while', 'send', 'recv', 'listen_and_serv', - 'fl_listen_and_serv', 'ncclInit', 'select', 'checkpoint_notify', - 'gen_bkcl_id', 'c_gen_bkcl_id', 'gen_nccl_id', 'c_gen_nccl_id', - 'c_comm_init', 'c_sync_calc_stream', 'c_sync_comm_stream', - 'queue_generator', 'dequeue', 'enqueue', 'heter_listen_and_serv', - 'c_wait_comm', 'c_wait_compute', 'c_gen_hccl_id', 'c_comm_init_hccl', - 'copy_cross_scope' + 'feed', + 'fetch', + 'recurrent', + 'go', + 'rnn_memory_helper_grad', + 'conditional_block', + 'while', + 'send', + 'recv', + 'listen_and_serv', + 'fl_listen_and_serv', + 'ncclInit', + 'select', + 'checkpoint_notify', + 'gen_bkcl_id', + 'c_gen_bkcl_id', + 'gen_nccl_id', + 'c_gen_nccl_id', + 'c_comm_init', + 'c_sync_calc_stream', + 'c_sync_comm_stream', + 'queue_generator', + 'dequeue', + 'enqueue', + 'heter_listen_and_serv', + 'c_wait_comm', + 'c_wait_compute', + 'c_gen_hccl_id', + 'c_comm_init_hccl', + 'copy_cross_scope', } class BlockConfig: - ''' A config builder for generating a Block. ''' - - def __init__(self, - ops: List[OpConfig], - vars: List[str], - vars_dtype: Dict[str, np.dtype] = None, - vars_var_type: Dict[str, VarType] = None, - vars_lod_level: Dict[str, int] = None): + '''A config builder for generating a Block.''' + + def __init__( + self, + ops: List[OpConfig], + vars: List[str], + vars_dtype: Dict[str, np.dtype] = None, + vars_var_type: Dict[str, VarType] = None, + vars_lod_level: Dict[str, int] = None, + ): self.ops = ops self.vars = vars self.vars_dtype = vars_dtype @@ -121,10 +151,14 @@ class BlockConfig: for name in self.vars: var_desc = block_desc.var(name.encode()) var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR) - if self.vars_lod_level is not None and name in self.vars_lod_level.keys( + if ( + self.vars_lod_level is not None + and name in self.vars_lod_level.keys() ): var_desc.set_lod_level(self.vars_lod_level[name]) - if self.vars_var_type is not None and name in self.vars_var_type.keys( + if ( + self.vars_var_type is not None + and name in self.vars_var_type.keys() ): if self.vars_var_type[name] == VarType.LOD_TENSOR_ARRAY: var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR_ARRAY) @@ -134,7 +168,8 @@ class BlockConfig: var_desc.set_dtype(convert_np_dtype_to_dtype_(np.float32)) if self.vars_dtype is not None and name in self.vars_dtype.keys(): var_desc.set_dtype( - convert_np_dtype_to_dtype_(self.vars_dtype[name])) + convert_np_dtype_to_dtype_(self.vars_dtype[name]) + ) for op_config in self.ops: op_desc = block_desc.append_op() @@ -150,22 +185,32 @@ class BlockConfig: continue var_desc = block_desc.var(v.encode()) var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR) - if op_config.outputs_var_type is not None and v in op_config.outputs_var_type.keys( + if ( + op_config.outputs_var_type is not None + and v in op_config.outputs_var_type.keys() ): - if op_config.outputs_var_type[ - v] == VarType.LOD_TENSOR_ARRAY: + if ( + op_config.outputs_var_type[v] + == VarType.LOD_TENSOR_ARRAY + ): var_desc.set_type( - core.VarDesc.VarType.LOD_TENSOR_ARRAY) - elif op_config.outputs_var_type[ - v] == VarType.STEP_SCOPES: + core.VarDesc.VarType.LOD_TENSOR_ARRAY + ) + elif ( + op_config.outputs_var_type[v] == VarType.STEP_SCOPES + ): var_desc.set_type(core.VarDesc.VarType.STEP_SCOPES) continue var_desc.set_dtype(convert_np_dtype_to_dtype_(np.float32)) - if op_config.outputs_dtype is not None and v in op_config.outputs_dtype.keys( + if ( + op_config.outputs_dtype is not None + and v in op_config.outputs_dtype.keys() ): var_desc.set_dtype( convert_np_dtype_to_dtype_( - op_config.outputs_dtype[v])) + op_config.outputs_dtype[v] + ) + ) if op_config.type not in _OP_WITHOUT_KERNEL_SET: op_desc.infer_var_type(block_desc) op_desc.infer_shape(block_desc) @@ -173,10 +218,15 @@ class BlockConfig: class ProgramConfig: - ''' A config builder for generating a Program. ''' - - def __init__(self, ops: List[OpConfig], weights: Dict[str, TensorConfig], - inputs: Dict[str, TensorConfig], outputs: List[str]): + '''A config builder for generating a Program.''' + + def __init__( + self, + ops: List[OpConfig], + weights: Dict[str, TensorConfig], + inputs: Dict[str, TensorConfig], + outputs: List[str], + ): self.ops = ops # if no weight need to save, we create a place_holder to help seriazlie params. if not weights: @@ -209,7 +259,7 @@ class ProgramConfig: def create_fake_model(program_config): - ''' Create a Paddle model(in memory) according to the given config. ''' + '''Create a Paddle model(in memory) according to the given config.''' paddle.enable_static() main_program_desc = core.ProgramDesc() util_program = fluid.Program() @@ -249,21 +299,22 @@ def create_fake_model(program_config): shape=tensor_config.shape, type=core.VarDesc.VarType.LOD_TENSOR, name=name, - initializer=NumpyArrayInitializer(tensor_config.data)) + initializer=NumpyArrayInitializer(tensor_config.data), + ) in_vars = [] for name in sorted(save_var_map.keys()): in_vars.append(save_var_map[name]) out_var = util_program.global_block().create_var( - type=core.VarDesc.VarType.RAW, name="out_var_0") + type=core.VarDesc.VarType.RAW, name="out_var_0" + ) out_var.desc.set_persistable(True) - util_program.global_block().append_op(type='save_combine', - inputs={'X': in_vars}, - outputs={'Y': out_var}, - attrs={ - 'file_path': '', - 'save_to_memory': True - }) + util_program.global_block().append_op( + type='save_combine', + inputs={'X': in_vars}, + outputs={'Y': out_var}, + attrs={'file_path': '', 'save_to_memory': True}, + ) for op_config in program_config.ops: op_desc = main_block_desc.append_op() op_desc.set_type(op_config.type) @@ -283,19 +334,26 @@ def create_fake_model(program_config): continue var_desc = main_block_desc.var(v.encode()) var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR) - if op_config.outputs_var_type is not None and v in op_config.outputs_var_type.keys( + if ( + op_config.outputs_var_type is not None + and v in op_config.outputs_var_type.keys() ): - if op_config.outputs_var_type[ - v] == VarType.LOD_TENSOR_ARRAY: + if ( + op_config.outputs_var_type[v] + == VarType.LOD_TENSOR_ARRAY + ): var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR_ARRAY) elif op_config.outputs_var_type[v] == VarType.STEP_SCOPES: var_desc.set_type(core.VarDesc.VarType.STEP_SCOPES) continue var_desc.set_dtype(convert_np_dtype_to_dtype_(np.float32)) - if op_config.outputs_dtype is not None and v in op_config.outputs_dtype.keys( + if ( + op_config.outputs_dtype is not None + and v in op_config.outputs_dtype.keys() ): var_desc.set_dtype( - convert_np_dtype_to_dtype_(op_config.outputs_dtype[v])) + convert_np_dtype_to_dtype_(op_config.outputs_dtype[v]) + ) if op_config.type not in _OP_WITHOUT_KERNEL_SET: op_desc.infer_var_type(main_block_desc) op_desc.infer_shape(main_block_desc) @@ -327,19 +385,26 @@ def create_fake_model(program_config): return model, params -def create_quant_model(model, - params, - activation_quantize_type='moving_average_abs_max', - weight_quantize_type='channel_wise_abs_max', - save=False): +def create_quant_model( + model, + params, + activation_quantize_type='moving_average_abs_max', + weight_quantize_type='channel_wise_abs_max', + save=False, +): place = paddle.CUDAPlace(0) scope = global_scope() exe = paddle.static.Executor(place) - [inference_program, feed_target_names, - fetch_targets] = paddle.static.load_inference_model(path_prefix=None, - executor=exe, - model_filename=model, - params_filename=params) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = paddle.static.load_inference_model( + path_prefix=None, + executor=exe, + model_filename=model, + params_filename=params, + ) graph = IrGraph(core.Graph(inference_program.desc), for_test=True) out_scale_op_list = [ @@ -431,11 +496,11 @@ def create_quant_model(model, def _get_op_output_var_names(op): """ """ - assert isinstance(op, (IrNode, Operator)), \ - "The input op should be IrNode or Operator." + assert isinstance( + op, (IrNode, Operator) + ), "The input op should be IrNode or Operator." var_names = [] - op_name = op.name() if isinstance(op, IrNode) \ - else op.type + op_name = op.name() if isinstance(op, IrNode) else op.type if op_name not in op_real_in_out_name: return [] @@ -452,7 +517,8 @@ def create_quant_model(model, scope=scope, place=place, activation_quantize_type=activation_quantize_type, - weight_quantize_type=weight_quantize_type) + weight_quantize_type=weight_quantize_type, + ) transform_pass.apply(graph) op_nodes = graph.all_op_nodes() @@ -461,15 +527,18 @@ def create_quant_model(model, var_names = _get_op_output_var_names(op_node) for var_name in var_names: in_node = graph._find_node_by_name(op_node.outputs, var_name) - if in_node.dtype() not in \ - [core.VarDesc.VarType.FP64, core.VarDesc.VarType.FP32]: + if in_node.dtype() not in [ + core.VarDesc.VarType.FP64, + core.VarDesc.VarType.FP32, + ]: continue op_node.op()._set_attr("out_threshold", 3.0) # Freeze graph for inference, but the weight of fc/conv is still float type. freeze_pass = QuantizationFreezePass( - scope=scope, place=place, weight_quantize_type=weight_quantize_type) + scope=scope, place=place, weight_quantize_type=weight_quantize_type + ) freeze_pass.apply(graph) main_program = graph.to_program() @@ -487,18 +556,21 @@ def create_quant_model(model, tensor.set(np.ones(tensor.shape(), dtype=np.float32), place) if save: - fluid.io.save_inference_model('test_inference_model', - feed_target_names, - fetch_targets, - exe, - main_program=main_program) + fluid.io.save_inference_model( + 'test_inference_model', + feed_target_names, + fetch_targets, + exe, + main_program=main_program, + ) feed_vars = [ main_program.global_block().var(name) for name in feed_target_names ] - serialized_program = paddle.static.serialize_program(feed_vars, - fetch_targets, - program=main_program) + serialized_program = paddle.static.serialize_program( + feed_vars, fetch_targets, program=main_program + ) serialized_params = paddle.static.serialize_persistables( - feed_vars, fetch_targets, executor=exe, program=main_program) + feed_vars, fetch_targets, executor=exe, program=main_program + ) return serialized_program, serialized_params diff --git a/python/paddle/fluid/tests/unittests/ir/inference/quant_dequant_test.py b/python/paddle/fluid/tests/unittests/ir/inference/quant_dequant_test.py index c41e505332f85884ea45b38661f38365c870a202..bdbff1a385d7dd32b2084119bfe6b98436bc8bd2 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/quant_dequant_test.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/quant_dequant_test.py @@ -24,14 +24,13 @@ from paddle.fluid.contrib.slim.quantization import QuantizationFreezePass from paddle.fluid.contrib.slim.quantization import OutScaleForTrainingPass from paddle.fluid.contrib.slim.quantization import OutScaleForInferencePass from paddle.fluid.contrib.slim.quantization import AddQuantDequantPass -from paddle.fluid import (core, Program, Variable) +from paddle.fluid import core, Program, Variable from paddle.fluid.io import prepend_feed_ops, append_fetch_ops from paddle.fluid.core import create_paddle_predictor from paddle.fluid.core import AnalysisConfig class QuantDequantTest(unittest.TestCase): - def __init__(self, methodName='runTest'): super(QuantDequantTest, self).__init__(methodName) paddle.enable_static() @@ -60,28 +59,34 @@ class QuantDequantTest(unittest.TestCase): def _normalize_program(self, program, feed_vars, fetch_vars): if not isinstance(program, Program): raise TypeError( - "program type must be `fluid.Program`, but received `%s`" % - type(program)) + "program type must be `fluid.Program`, but received `%s`" + % type(program) + ) if not isinstance(feed_vars, list): feed_vars = [feed_vars] if not all(isinstance(v, Variable) for v in feed_vars): raise TypeError( - "feed_vars type must be a Variable or a list of Variable.") + "feed_vars type must be a Variable or a list of Variable." + ) if not isinstance(fetch_vars, list): fetch_vars = [fetch_vars] if not all(isinstance(v, Variable) for v in fetch_vars): raise TypeError( - "fetch_vars type must be a Variable or a list of Variable.") + "fetch_vars type must be a Variable or a list of Variable." + ) # remind users to set auc_states to 0 if auc op were found. for op in program.global_block().ops: # clear device of Op - device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName( + device_attr_name = ( + core.op_proto_and_checker_maker.kOpDeviceAttrName() ) op._set_attr(device_attr_name, "") if op.type == 'auc': - warnings.warn("Be sure that you have set auc states to 0 " - "before saving inference model.") + warnings.warn( + "Be sure that you have set auc states to 0 " + "before saving inference model." + ) break # serialize program @@ -98,7 +103,8 @@ class QuantDequantTest(unittest.TestCase): feed_var_names = [var.name for var in feed_vars] copy_program = copy_program._prune_with_input( - feeded_var_names=feed_var_names, targets=fetch_vars) + feeded_var_names=feed_var_names, targets=fetch_vars + ) copy_program = copy_program._inference_optimize(prune_read_op=True) fetch_var_names = [var.name for var in fetch_vars] prepend_feed_ops(copy_program, feed_var_names) @@ -106,25 +112,30 @@ class QuantDequantTest(unittest.TestCase): copy_program.desc._set_version() return copy_program - def _save_models(self, dirname, feeded_var_names, target_vars, executor, - program, scope): + def _save_models( + self, dirname, feeded_var_names, target_vars, executor, program, scope + ): with fluid.scope_guard(scope): - fluid.io.save_inference_model(dirname, - feeded_var_names, - target_vars, - executor, - program, - clip_extra=True) + fluid.io.save_inference_model( + dirname, + feeded_var_names, + target_vars, + executor, + program, + clip_extra=True, + ) def _get_paddle_outs(self, feed, fetch_list, executor, program, scope): ''' Return PaddlePaddle outputs. ''' with fluid.scope_guard(scope): - outs = executor.run(program=program, - feed=feed, - fetch_list=fetch_list, - return_numpy=True) + outs = executor.run( + program=program, + feed=feed, + fetch_list=fetch_list, + return_numpy=True, + ) return outs def _get_inference_outs(self, config): @@ -152,10 +163,9 @@ class QuantDequantTest(unittest.TestCase): ] return outs - def _get_analysis_config(self, - use_gpu=False, - use_trt=False, - use_mkldnn=False): + def _get_analysis_config( + self, use_gpu=False, use_trt=False, use_mkldnn=False + ): ''' Return a new object of AnalysisConfig. ''' @@ -173,14 +183,16 @@ class QuantDequantTest(unittest.TestCase): self.trt_parameters.min_subgraph_size, self.trt_parameters.precision, self.trt_parameters.use_static, - self.trt_parameters.use_calib_mode) + self.trt_parameters.use_calib_mode, + ) if self.dynamic_shape_params: config.set_trt_dynamic_shape_info( self.dynamic_shape_params.min_input_shape, self.dynamic_shape_params.max_input_shape, self.dynamic_shape_params.optim_input_shape, - self.dynamic_shape_params.disable_trt_plugin_fp16) + self.dynamic_shape_params.disable_trt_plugin_fp16, + ) if self.enable_tensorrt_varseqlen: config.enable_tensorrt_varseqlen() @@ -191,12 +203,9 @@ class QuantDequantTest(unittest.TestCase): print('config summary:', config.summary()) return config - def check_output_with_option(self, - use_gpu, - atol=1e-5, - flatten=False, - quant=False, - rtol=1e-5): + def check_output_with_option( + self, use_gpu, atol=1e-5, flatten=False, quant=False, rtol=1e-5 + ): ''' Check whether calculating on CPU and GPU, enable TensorRT or disable TensorRT, enable MKLDNN or disable MKLDNN @@ -211,14 +220,16 @@ class QuantDequantTest(unittest.TestCase): executor.run(self.startup_program) executor.run(self.test_startup_program) main_graph = IrGraph(core.Graph(self.main_program.desc), for_test=False) - test_graph = IrGraph(core.Graph(self.test_main_program.desc), - for_test=True) + test_graph = IrGraph( + core.Graph(self.test_main_program.desc), for_test=True + ) transform_pass = QuantizationTransformPass( scope=scope, place=place, activation_quantize_type=self.activation_quantize_type, - weight_quantize_type=self.weight_quantize_type) + weight_quantize_type=self.weight_quantize_type, + ) transform_pass.apply(main_graph) transform_pass.apply(test_graph) @@ -237,17 +248,19 @@ class QuantDequantTest(unittest.TestCase): iters = 10 batch_size = 1 - train_reader = paddle.batch(paddle.reader.shuffle( - paddle.dataset.mnist.train(), buf_size=500), - batch_size=batch_size) - feeder = fluid.DataFeeder(feed_list=[self.data, self.label], - place=place) + train_reader = paddle.batch( + paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=500), + batch_size=batch_size, + ) + feeder = fluid.DataFeeder( + feed_list=[self.data, self.label], place=place + ) with fluid.scope_guard(scope): for _ in range(iters): data = next(train_reader()) - loss_v = executor.run(binary, - feed=feeder.feed(data), - fetch_list=[self.loss]) + loss_v = executor.run( + binary, feed=feeder.feed(data), fetch_list=[self.loss] + ) scale_inference_pass = OutScaleForInferencePass(scope=scope) scale_inference_pass.apply(test_graph) @@ -256,29 +269,40 @@ class QuantDequantTest(unittest.TestCase): freeze_pass = QuantizationFreezePass( scope=scope, place=place, - weight_quantize_type=self.weight_quantize_type) + weight_quantize_type=self.weight_quantize_type, + ) freeze_pass.apply(test_graph) self.main_program = test_graph.to_program() with fluid.scope_guard(scope): - self.main_program = self._normalize_program(self.main_program, - self.data, - self.fetch_list) - - self._save_models(self.path, list(self.feeds.keys()), self.fetch_list, - executor, self.main_program, scope) + self.main_program = self._normalize_program( + self.main_program, self.data, self.fetch_list + ) - paddle_outs = self._get_paddle_outs(self.feeds, self.fetch_list, - executor, self.main_program, scope) + self._save_models( + self.path, + list(self.feeds.keys()), + self.fetch_list, + executor, + self.main_program, + scope, + ) + + paddle_outs = self._get_paddle_outs( + self.feeds, self.fetch_list, executor, self.main_program, scope + ) inference_outs = self._get_inference_outs( - self._get_analysis_config(use_gpu=use_gpu)) + self._get_analysis_config(use_gpu=use_gpu) + ) # Check whether the results calculated on CPU and on GPU are the same. self.assertTrue( len(paddle_outs) == len(inference_outs), - "The number of outputs is different between inference and training forward at {}" - .format(device)) + "The number of outputs is different between inference and training forward at {}".format( + device + ), + ) for out, inference_out in zip(paddle_outs, inference_outs): paddle_out = np.array(out) @@ -292,28 +316,35 @@ class QuantDequantTest(unittest.TestCase): inference_out, rtol=1e-05, atol=atol, - err_msg= - 'Output has diff between inference and training forward at {} '. - format(device)) + err_msg='Output has diff between inference and training forward at {} '.format( + device + ), + ) # Check whether the trt results and the GPU results are the same. if use_gpu and self.enable_trt: tensorrt_outputs = self._get_inference_outs( - self._get_analysis_config(use_gpu=use_gpu, - use_trt=self.enable_trt)) + self._get_analysis_config( + use_gpu=use_gpu, use_trt=self.enable_trt + ) + ) if self.trt_parameters.use_static: - #deserialize + # deserialize tensorrt_outputs = self._get_inference_outs( - self._get_analysis_config(use_gpu=use_gpu, - use_trt=self.enable_trt)) + self._get_analysis_config( + use_gpu=use_gpu, use_trt=self.enable_trt + ) + ) self.assertTrue( len(tensorrt_outputs) == len(paddle_outs), - "The number of outputs is different between GPU and TensorRT. ") + "The number of outputs is different between GPU and TensorRT. ", + ) - for paddle_out, tensorrt_output in zip(paddle_outs, - tensorrt_outputs): + for paddle_out, tensorrt_output in zip( + paddle_outs, tensorrt_outputs + ): paddle_out = np.array(paddle_out) if flatten: @@ -325,17 +356,21 @@ class QuantDequantTest(unittest.TestCase): tensorrt_output, rtol=rtol, atol=atol, - err_msg='Output has diff between GPU and TensorRT. ') + err_msg='Output has diff between GPU and TensorRT. ', + ) # Check whether the mkldnn results and the CPU results are the same. if (not use_gpu) and self.enable_mkldnn: mkldnn_outputs = self._get_inference_outs( - self._get_analysis_config(use_gpu=use_gpu, - use_mkldnn=self.enable_mkldnn)) + self._get_analysis_config( + use_gpu=use_gpu, use_mkldnn=self.enable_mkldnn + ) + ) self.assertTrue( len(paddle_outs) == len(mkldnn_outputs), - "The number of outputs is different between CPU and MKLDNN. ") + "The number of outputs is different between CPU and MKLDNN. ", + ) if self.enable_mkldnn_bfloat16: atol = 0.01 @@ -345,15 +380,23 @@ class QuantDequantTest(unittest.TestCase): mkldnn_output, rtol=1e-05, atol=atol, - err_msg='Output has diff between CPU and MKLDNN. ') + err_msg='Output has diff between CPU and MKLDNN. ', + ) class TensorRTParam: ''' Prepare TensorRT subgraph engine parameters. ''' - def __init__(self, workspace_size, max_batch_size, min_subgraph_size, - precision, use_static, use_calib_mode): + def __init__( + self, + workspace_size, + max_batch_size, + min_subgraph_size, + precision, + use_static, + use_calib_mode, + ): self.workspace_size = workspace_size self.max_batch_size = max_batch_size self.min_subgraph_size = min_subgraph_size @@ -366,8 +409,13 @@ class QuantDequantTest(unittest.TestCase): Prepare TensorRT subgraph engine dynamic shape parameters. ''' - def __init__(self, min_input_shape, max_input_shape, optim_input_shape, - disable_trt_plugin_fp16): + def __init__( + self, + min_input_shape, + max_input_shape, + optim_input_shape, + disable_trt_plugin_fp16, + ): self.min_input_shape = min_input_shape self.max_input_shape = max_input_shape self.optim_input_shape = optim_input_shape diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_adaptive_pool2d_convert_global_pass_autoscan.py b/python/paddle/fluid/tests/unittests/ir/inference/test_adaptive_pool2d_convert_global_pass_autoscan.py index aa2d29d613710e7df6fb92fc24971034942831c6..c0814dc920b9e545ec34d306bd1f180811ccd279 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_adaptive_pool2d_convert_global_pass_autoscan.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_adaptive_pool2d_convert_global_pass_autoscan.py @@ -21,55 +21,60 @@ import hypothesis.strategies as st class TestAdaptivePool2dConvertGlobalPass(PassAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_config(self, draw): x_shape = draw( - st.lists(st.integers(min_value=1, max_value=4), - min_size=4, - max_size=4)) + st.lists( + st.integers(min_value=1, max_value=4), min_size=4, max_size=4 + ) + ) pooling_type = draw(st.sampled_from(["max", "avg"])) - data_format = "NCHW" #trt support this format only + data_format = "NCHW" # trt support this format only strides = draw( - st.lists(st.integers(min_value=1, max_value=4), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=4), min_size=2, max_size=2 + ) + ) paddings = draw( - st.lists(st.integers(min_value=1, max_value=4), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=4), min_size=2, max_size=2 + ) + ) ceil_mode = draw(st.booleans()) exclusive = draw(st.booleans()) global_pooling = draw(st.booleans()) padding_algorithm = draw(st.sampled_from(["EXPLICIT", "SAME", "VAILD"])) - pool_op = OpConfig("pool2d", - inputs={"X": ["input_data"]}, - outputs={"Out": ["pool_output"]}, - ksize=[1, 1], - adaptive=True, - pooling_type=pooling_type, - data_format=data_format, - strides=strides, - paddings=paddings, - ceil_mode=ceil_mode, - global_pooling=global_pooling, - padding_algorithm=padding_algorithm, - exclusive=exclusive) + pool_op = OpConfig( + "pool2d", + inputs={"X": ["input_data"]}, + outputs={"Out": ["pool_output"]}, + ksize=[1, 1], + adaptive=True, + pooling_type=pooling_type, + data_format=data_format, + strides=strides, + paddings=paddings, + ceil_mode=ceil_mode, + global_pooling=global_pooling, + padding_algorithm=padding_algorithm, + exclusive=exclusive, + ) ops = [pool_op] - program_config = ProgramConfig(ops=ops, - weights={}, - inputs={ - "input_data": - TensorConfig(shape=x_shape), - }, - outputs=["pool_output"]) + program_config = ProgramConfig( + ops=ops, + weights={}, + inputs={ + "input_data": TensorConfig(shape=x_shape), + }, + outputs=["pool_output"], + ) return program_config @@ -81,14 +86,17 @@ class TestAdaptivePool2dConvertGlobalPass(PassAutoScanTest): min_subgraph_size=0, precision_mode=paddle_infer.PrecisionType.Float32, use_static=False, - use_calib_mode=False) + use_calib_mode=False, + ) yield config, ['pool2d'], (1e-5, 1e-5) def test(self): - self.run_and_statis(quant=False, - max_examples=300, - passes=["adaptive_pool2d_convert_global_pass"], - min_success_num=40) + self.run_and_statis( + quant=False, + max_examples=300, + passes=["adaptive_pool2d_convert_global_pass"], + min_success_num=40, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_act_mkldnn_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_act_mkldnn_fuse_pass.py index d619438de4ec08be5a30a07d70e9502536094ece..fdc995fee78993ab6fb32afd4d9b35975d56cce0 100755 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_act_mkldnn_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_act_mkldnn_fuse_pass.py @@ -48,12 +48,36 @@ class TestConvActMkldnnFusePass(PassAutoScanTest): filter_shape = prog_config.weights["filter"].shape input_shape = prog_config.inputs["input_x"].shape if padding_algorithm == "VALID": - if ((input_shape[2] - (dilations[0] * (filter_shape[2] - 1) + 1)) / strides[0] + 1) <= 1 or \ - ((input_shape[3] - (dilations[1] * (filter_shape[3] - 1) + 1)) / strides[1] + 1) <= 1: + if ( + (input_shape[2] - (dilations[0] * (filter_shape[2] - 1) + 1)) + / strides[0] + + 1 + ) <= 1 or ( + (input_shape[3] - (dilations[1] * (filter_shape[3] - 1) + 1)) + / strides[1] + + 1 + ) <= 1: return False if padding_algorithm == "EXPLICIT": - if ((input_shape[2] + paddings[0] + paddings[1] - (dilations[0] * (filter_shape[2] - 1) + 1)) / strides[0] + 1) <= 1 or \ - ((input_shape[3] + paddings[2] + paddings[3] - (dilations[1] * (filter_shape[3] - 1) + 1)) / strides[1] + 1) <= 1: + if ( + ( + input_shape[2] + + paddings[0] + + paddings[1] + - (dilations[0] * (filter_shape[2] - 1) + 1) + ) + / strides[0] + + 1 + ) <= 1 or ( + ( + input_shape[3] + + paddings[2] + + paddings[3] + - (dilations[1] * (filter_shape[3] - 1) + 1) + ) + / strides[1] + + 1 + ) <= 1: return False if data_format == "NCHW": if input_shape[1] != filter_shape[1] * groups: @@ -70,9 +94,10 @@ class TestConvActMkldnnFusePass(PassAutoScanTest): def sample_program_config(self, draw): # 1. Generate shape of input:X of conv2d x_shape = draw( - st.lists(st.integers(min_value=5, max_value=100), - min_size=4, - max_size=4)) + st.lists( + st.integers(min_value=5, max_value=100), min_size=4, max_size=4 + ) + ) x_shape[1] = draw(st.integers(min_value=5, max_value=10)) # 2. Generate legal attr:data_format of conv2d @@ -80,9 +105,10 @@ class TestConvActMkldnnFusePass(PassAutoScanTest): # 3. Generate legal shape of input:Y of conv2d f_shape = draw( - st.lists(st.integers(min_value=1, max_value=5), - min_size=4, - max_size=4)) + st.lists( + st.integers(min_value=1, max_value=5), min_size=4, max_size=4 + ) + ) if data_format == "NCHW": f_shape[1] = x_shape[1] else: @@ -90,35 +116,41 @@ class TestConvActMkldnnFusePass(PassAutoScanTest): # 4. Generate legal attr:strides of conv2d strides = draw( - st.lists(st.integers(min_value=1, max_value=5), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=5), min_size=2, max_size=2 + ) + ) # 5. Generate legal attr:padding_algorithm of conv2d padding_algorithm = draw(st.sampled_from(["EXPLICIT", "SAME", "VALID"])) # 6. Generate legal attr:padding of conv2d padding = draw( - st.lists(st.integers(min_value=1, max_value=5), - min_size=4, - max_size=4)) + st.lists( + st.integers(min_value=1, max_value=5), min_size=4, max_size=4 + ) + ) # 7. Generate legal attr:groups of conv2d groups = draw(st.integers(min_value=1, max_value=3)) # 8. Generate legal attr:dilations of conv2d dilations = draw( - st.lists(st.integers(min_value=1, max_value=5), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=5), min_size=2, max_size=2 + ) + ) # 9. Generate legal input:ResidualData of conv2d res_shape = [] if draw(st.booleans()): res_shape = draw( - st.lists(st.integers(min_value=1, max_value=100), - min_size=4, - max_size=4)) + st.lists( + st.integers(min_value=1, max_value=100), + min_size=4, + max_size=4, + ) + ) # 10. Generate legal shape of input:bias of conv2d conv_bias_shape = [] @@ -135,58 +167,69 @@ class TestConvActMkldnnFusePass(PassAutoScanTest): } weights = { "filter": TensorConfig(shape=f_shape), - "conv_bias": TensorConfig(shape=conv_bias_shape) + "conv_bias": TensorConfig(shape=conv_bias_shape), } use_mkldnn = True else: inputs = { "Input": ["input_x"], "Filter": ["filter"], - "ResidualData": ["residualdata"] + "ResidualData": ["residualdata"], } weights = {"filter": TensorConfig(shape=f_shape)} use_mkldnn = False # 11. Generate legal act type of conv2d act_type = draw( - st.sampled_from(["relu", "leaky_relu", "relu6", "swish"])) - - conv2d_op = OpConfig("conv2d", - inputs=inputs, - outputs={"Output": ["conv2d_out"]}, - strides=strides, - padding_algorithm=padding_algorithm, - paddings=padding, - groups=groups, - dilations=dilations, - data_format=data_format, - use_mkldnn=True) + st.sampled_from(["relu", "leaky_relu", "relu6", "swish"]) + ) + + conv2d_op = OpConfig( + "conv2d", + inputs=inputs, + outputs={"Output": ["conv2d_out"]}, + strides=strides, + padding_algorithm=padding_algorithm, + paddings=padding, + groups=groups, + dilations=dilations, + data_format=data_format, + use_mkldnn=True, + ) # 11. Generate legal attr of act act_op = None self.passes = ["conv_activation_mkldnn_fuse_pass"] if act_type == "relu6": threshold = draw(st.floats(min_value=1.0, max_value=10.0)) - act_op = OpConfig("relu6", - inputs={"X": ["conv2d_out"]}, - outputs={"Out": ["relu_out"]}, - threshold=threshold) + act_op = OpConfig( + "relu6", + inputs={"X": ["conv2d_out"]}, + outputs={"Out": ["relu_out"]}, + threshold=threshold, + ) elif act_type == "leaky_relu": alpha = draw(st.floats(min_value=0.1, max_value=1.0)) - act_op = OpConfig("leaky_relu", - inputs={"X": ["conv2d_out"]}, - outputs={"Out": ["relu_out"]}, - alpha=alpha) + act_op = OpConfig( + "leaky_relu", + inputs={"X": ["conv2d_out"]}, + outputs={"Out": ["relu_out"]}, + alpha=alpha, + ) elif act_type == "relu": - act_op = OpConfig("relu", - inputs={"X": ["conv2d_out"]}, - outputs={"Out": ["relu_out"]}) + act_op = OpConfig( + "relu", + inputs={"X": ["conv2d_out"]}, + outputs={"Out": ["relu_out"]}, + ) elif act_type == "swish": beta = draw(st.floats(min_value=0.1, max_value=1.0)) - act_op = OpConfig("swish", - inputs={"X": ["conv2d_out"]}, - outputs={"Out": ["swish_out"]}, - beta=beta) + act_op = OpConfig( + "swish", + inputs={"X": ["conv2d_out"]}, + outputs={"Out": ["swish_out"]}, + beta=beta, + ) ops = [conv2d_op, act_op] @@ -195,7 +238,7 @@ class TestConvActMkldnnFusePass(PassAutoScanTest): weights=weights, inputs={ "input_x": TensorConfig(shape=x_shape), - "residualdata": TensorConfig(shape=res_shape) + "residualdata": TensorConfig(shape=res_shape), }, outputs=ops[-1].outputs["Out"], ) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_bias_mkldnn_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_bias_mkldnn_fuse_pass.py index eb02c6e1c77ca2c45d1c8856e6c273662b3276df..ec7c8db66d03375b3de98ea888d544650602fb71 100755 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_bias_mkldnn_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_bias_mkldnn_fuse_pass.py @@ -48,12 +48,36 @@ class TestConvBiasMkldnnFusePass(PassAutoScanTest): filter_shape = prog_config.weights["filter"].shape input_shape = prog_config.inputs["input_x"].shape if padding_algorithm == "VALID": - if ((input_shape[2] - (dilations[0] * (filter_shape[2] - 1) + 1)) / strides[0] + 1) <= 1 or \ - ((input_shape[3] - (dilations[1] * (filter_shape[3] - 1) + 1)) / strides[1] + 1) <= 1: + if ( + (input_shape[2] - (dilations[0] * (filter_shape[2] - 1) + 1)) + / strides[0] + + 1 + ) <= 1 or ( + (input_shape[3] - (dilations[1] * (filter_shape[3] - 1) + 1)) + / strides[1] + + 1 + ) <= 1: return False if padding_algorithm == "EXPLICIT": - if ((input_shape[2] + paddings[0] + paddings[1] - (dilations[0] * (filter_shape[2] - 1) + 1)) / strides[0] + 1) <= 1 or \ - ((input_shape[3] + paddings[2] + paddings[3] - (dilations[1] * (filter_shape[3] - 1) + 1)) / strides[1] + 1) <= 1: + if ( + ( + input_shape[2] + + paddings[0] + + paddings[1] + - (dilations[0] * (filter_shape[2] - 1) + 1) + ) + / strides[0] + + 1 + ) <= 1 or ( + ( + input_shape[3] + + paddings[2] + + paddings[3] + - (dilations[1] * (filter_shape[3] - 1) + 1) + ) + / strides[1] + + 1 + ) <= 1: return False if data_format == "NCHW": if input_shape[1] != filter_shape[1] * groups: @@ -70,9 +94,10 @@ class TestConvBiasMkldnnFusePass(PassAutoScanTest): def sample_program_config(self, draw): # 1. Generate shape of input:X of conv2d x_shape = draw( - st.lists(st.integers(min_value=5, max_value=100), - min_size=4, - max_size=4)) + st.lists( + st.integers(min_value=5, max_value=100), min_size=4, max_size=4 + ) + ) x_shape[1] = draw(st.integers(min_value=5, max_value=10)) # 2. Generate legal attr:data_format of conv2d @@ -80,9 +105,10 @@ class TestConvBiasMkldnnFusePass(PassAutoScanTest): # 3. Generate legal shape of input:Y of conv2d f_shape = draw( - st.lists(st.integers(min_value=1, max_value=4), - min_size=4, - max_size=4)) + st.lists( + st.integers(min_value=1, max_value=4), min_size=4, max_size=4 + ) + ) if data_format == "NCHW": f_shape[1] = x_shape[1] else: @@ -90,27 +116,30 @@ class TestConvBiasMkldnnFusePass(PassAutoScanTest): # 4. Generate legal attr:strides of conv2d strides = draw( - st.lists(st.integers(min_value=1, max_value=4), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=4), min_size=2, max_size=2 + ) + ) # 5. Generate legal attr:padding_algorithm of conv2d padding_algorithm = draw(st.sampled_from(["EXPLICIT", "SAME", "VALID"])) # 6. Generate legal attr:padding of conv2d padding = draw( - st.lists(st.integers(min_value=1, max_value=4), - min_size=4, - max_size=4)) + st.lists( + st.integers(min_value=1, max_value=4), min_size=4, max_size=4 + ) + ) # 7. Generate legal attr:groups of conv2d groups = draw(st.integers(min_value=1, max_value=3)) # 8. Generate legal attr:dilations of conv2d dilations = draw( - st.lists(st.integers(min_value=1, max_value=4), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=4), min_size=2, max_size=2 + ) + ) # 9. Generate legal shape of input:bias of elementwise_add bias_shape = [f_shape[0]] @@ -137,7 +166,7 @@ class TestConvBiasMkldnnFusePass(PassAutoScanTest): weights = { "filter": TensorConfig(shape=f_shape), "bias": TensorConfig(shape=bias_shape), - "conv_bias": TensorConfig(shape=conv_bias_shape) + "conv_bias": TensorConfig(shape=conv_bias_shape), } use_mkldnn = True else: @@ -147,28 +176,29 @@ class TestConvBiasMkldnnFusePass(PassAutoScanTest): } weights = { "filter": TensorConfig(shape=f_shape), - "bias": TensorConfig(shape=bias_shape) + "bias": TensorConfig(shape=bias_shape), } use_mkldnn = False - conv2d_op = OpConfig("conv2d", - inputs=inputs, - outputs={"Output": ["conv2d_out"]}, - strides=strides, - padding_algorithm=padding_algorithm, - paddings=padding, - groups=groups, - dilations=dilations, - data_format=data_format, - use_mkldnn=use_mkldnn) - - add_op = OpConfig("elementwise_add", - inputs={ - "X": ["conv2d_out"], - "Y": ["bias"] - }, - outputs={"Out": ["add_out"]}, - axis=axis) + conv2d_op = OpConfig( + "conv2d", + inputs=inputs, + outputs={"Output": ["conv2d_out"]}, + strides=strides, + padding_algorithm=padding_algorithm, + paddings=padding, + groups=groups, + dilations=dilations, + data_format=data_format, + use_mkldnn=use_mkldnn, + ) + + add_op = OpConfig( + "elementwise_add", + inputs={"X": ["conv2d_out"], "Y": ["bias"]}, + outputs={"Out": ["add_out"]}, + axis=axis, + ) ops = [conv2d_op, add_op] @@ -176,13 +206,14 @@ class TestConvBiasMkldnnFusePass(PassAutoScanTest): ops=ops, weights=weights, inputs={"input_x": TensorConfig(shape=x_shape)}, - outputs=ops[-1].outputs["Out"]) + outputs=ops[-1].outputs["Out"], + ) return program_config def test(self): - self.run_and_statis(quant=False, - max_examples=350, - passes=["conv_bias_mkldnn_fuse_pass"]) + self.run_and_statis( + quant=False, max_examples=350, passes=["conv_bias_mkldnn_fuse_pass"] + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_bn_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_bn_fuse_pass.py index 0657ba51cdc7c2dac5f4eeebe08a4303df7c0a4d..e16fd8b10c2f8afa54f74d9a79518c171b685de5 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_bn_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_bn_fuse_pass.py @@ -23,7 +23,6 @@ import hypothesis.strategies as st class TestConvBnFusePass(PassAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: attrs = [ program_config.ops[i].attrs for i in range(len(program_config.ops)) @@ -46,24 +45,29 @@ class TestConvBnFusePass(PassAutoScanTest): out_channel = groups * out_channel_factor batch_size = draw(st.integers(min_value=1, max_value=4)) dilations = draw( - st.lists(st.integers(min_value=1, max_value=2), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=2), min_size=2, max_size=2 + ) + ) paddings = draw( - st.lists(st.integers(min_value=0, max_value=2), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=0, max_value=2), min_size=2, max_size=2 + ) + ) strides = draw( - st.lists(st.integers(min_value=1, max_value=2), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=2), min_size=2, max_size=2 + ) + ) has_bias = draw(st.booleans()) use_mkldnn = draw(st.booleans()) epsilon = draw(st.floats(min_value=0.0, max_value=0.001)) - x_shape = [ - batch_size, in_channel, 64, 64 - ] if data_format == "NCHW" else [batch_size, 64, 64, in_channel] + x_shape = ( + [batch_size, in_channel, 64, 64] + if data_format == "NCHW" + else [batch_size, 64, 64, in_channel] + ) w_shape = [out_channel, filter_channel, filter_size, filter_size] scale_shape = [out_channel] bias_shape = [out_channel] @@ -91,41 +95,45 @@ class TestConvBnFusePass(PassAutoScanTest): def generate_bn_Var(): return np.random.random(var_shape).astype(np.float32) - conv2d_op = OpConfig("conv2d", - inputs={ - "Input": ["conv2d_input"], - "Filter": ["conv2d_weight"], - }, - outputs={"Output": ["conv2d_out"]}, - data_format=data_format, - dilations=dilations, - padding_algorithm=padding_algorithm, - groups=groups, - paddings=paddings, - strides=strides, - use_mkldnn=use_mkldnn, - has_bias=has_bias, - is_test=True) - bn_op = OpConfig("batch_norm", - inputs={ - "X": ["conv2d_out"], - "Scale": ["batch_norm_Scale"], - "Bias": ["batch_norm_Bias"], - "Mean": ["batch_norm_Mean"], - "Variance": ["batch_norm_Variance"], - }, - outputs={ - "Y": ["batch_norm_Y"], - "MeanOut": ["batch_norm_Mean"], - "VarianceOut": ["batch_norm_Variance"], - "SavedMean": ["batch_norm_SavedMean"], - "SavedVariance": ["batch_norm_SavedVariance"], - "ReserveSpace": ["batch_norm_ReserveSpace"], - }, - epsilon=epsilon, - trainable_statistics=False, - data_layout=data_format, - is_test=True) + conv2d_op = OpConfig( + "conv2d", + inputs={ + "Input": ["conv2d_input"], + "Filter": ["conv2d_weight"], + }, + outputs={"Output": ["conv2d_out"]}, + data_format=data_format, + dilations=dilations, + padding_algorithm=padding_algorithm, + groups=groups, + paddings=paddings, + strides=strides, + use_mkldnn=use_mkldnn, + has_bias=has_bias, + is_test=True, + ) + bn_op = OpConfig( + "batch_norm", + inputs={ + "X": ["conv2d_out"], + "Scale": ["batch_norm_Scale"], + "Bias": ["batch_norm_Bias"], + "Mean": ["batch_norm_Mean"], + "Variance": ["batch_norm_Variance"], + }, + outputs={ + "Y": ["batch_norm_Y"], + "MeanOut": ["batch_norm_Mean"], + "VarianceOut": ["batch_norm_Variance"], + "SavedMean": ["batch_norm_SavedMean"], + "SavedVariance": ["batch_norm_SavedVariance"], + "ReserveSpace": ["batch_norm_ReserveSpace"], + }, + epsilon=epsilon, + trainable_statistics=False, + data_layout=data_format, + is_test=True, + ) if has_bias == True: conv2d_op.inputs["Bias"] = ["conv2d_bias"] ops = [conv2d_op, bn_op] @@ -133,25 +141,25 @@ class TestConvBnFusePass(PassAutoScanTest): program_config = ProgramConfig( ops=ops, inputs={ - "conv2d_input": - TensorConfig(data_gen=partial(generate_conv2d_Input)), + "conv2d_input": TensorConfig( + data_gen=partial(generate_conv2d_Input) + ), }, weights={ - "conv2d_weight": - TensorConfig(data_gen=partial(generate_conv2d_Filter)), - "batch_norm_Scale": - TensorConfig(data_gen=generate_bn_Scale), - "batch_norm_Bias": - TensorConfig(data_gen=generate_bn_Bias), - "batch_norm_Mean": - TensorConfig(data_gen=generate_bn_Mean), - "batch_norm_Variance": - TensorConfig(data_gen=generate_bn_Var), + "conv2d_weight": TensorConfig( + data_gen=partial(generate_conv2d_Filter) + ), + "batch_norm_Scale": TensorConfig(data_gen=generate_bn_Scale), + "batch_norm_Bias": TensorConfig(data_gen=generate_bn_Bias), + "batch_norm_Mean": TensorConfig(data_gen=generate_bn_Mean), + "batch_norm_Variance": TensorConfig(data_gen=generate_bn_Var), }, - outputs=["batch_norm_Y"]) + outputs=["batch_norm_Y"], + ) if has_bias == True: program_config.weights["conv2d_bias"] = TensorConfig( - data_gen=partial(generate_conv2d_Bias)) + data_gen=partial(generate_conv2d_Bias) + ) return program_config def sample_predictor_configs(self, program_config): @@ -174,34 +182,40 @@ class TestConvBnFusePass(PassAutoScanTest): min_subgraph_size=1, precision_mode=paddle_infer.PrecisionType.Float32, use_static=False, - use_calib_mode=False) + use_calib_mode=False, + ) if program_config.ops[0].attrs['has_bias']: yield config, ['conv2d', 'elementwise_add'], (1e-5, 1e-5) else: # it will enter conv_elementwise_add_fuse_pass yield config, ['conv2d_fusion'], (1e-5, 1e-5) def add_ignore_pass_case(self): - def teller1(program_config, predictor_config): - if program_config.ops[0].attrs[ - 'data_format'] == "NHWC" and not predictor_config.mkldnn_enabled( - ): + if ( + program_config.ops[0].attrs['data_format'] == "NHWC" + and not predictor_config.mkldnn_enabled() + ): return True return False # mkldnn Output has diff with bias! def teller2(program_config, predictor_config): - return predictor_config.mkldnn_enabled( - ) and program_config.ops[0].attrs['has_bias'] == True + return ( + predictor_config.mkldnn_enabled() + and program_config.ops[0].attrs['has_bias'] == True + ) self.add_ignore_check_case( - teller1, IgnoreReasons.PASS_ACCURACY_ERROR, - "The output format of conv2d is wrong when data_format attribute is NHWC" + teller1, + IgnoreReasons.PASS_ACCURACY_ERROR, + "The output format of conv2d is wrong when data_format attribute is NHWC", ) self.add_ignore_check_case( - teller2, IgnoreReasons.PASS_ACCURACY_ERROR, - "Currently mkldnn Output has diff with bias!") + teller2, + IgnoreReasons.PASS_ACCURACY_ERROR, + "Currently mkldnn Output has diff with bias!", + ) def test(self): self.run_and_statis( diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add2_act_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add2_act_fuse_pass.py index d08da522e1a3ede893c442b207d349632d6aed1f..75d0e445f7aff68d795d30af35b17410ab469dbf 100755 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add2_act_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add2_act_fuse_pass.py @@ -24,21 +24,21 @@ os.environ['NVIDIA_TF32_OVERRIDE'] = '0' class TestConvElementwiseAdd2ActPass(PassAutoScanTest): r""" - x_var f_var(persistable) - \ / - conv2d - | - conv2d_var y_var(persistable) - \ / - elementwise_add - | - x1_var elementwise_add_out_var - \ / - elementwise_add - | - act - | - act_var + x_var f_var(persistable) + \ / + conv2d + | + conv2d_var y_var(persistable) + \ / + elementwise_add + | + x1_var elementwise_add_out_var + \ / + elementwise_add + | + act + | + act_var """ def sample_predictor_configs(self, program_config): @@ -58,16 +58,66 @@ class TestConvElementwiseAdd2ActPass(PassAutoScanTest): if data_format != "NCHW": return False if padding_algorithm == "VALID": - if int(((input_shape[2] - (dilations[0] * (filter_shape[2] - 1) + 1)) / strides[0] + 1)) <= 0 or \ - int(((input_shape[3] - (dilations[1] * (filter_shape[3] - 1) + 1)) / strides[1] + 1)) <= 0: + if ( + int( + ( + ( + input_shape[2] + - (dilations[0] * (filter_shape[2] - 1) + 1) + ) + / strides[0] + + 1 + ) + ) + <= 0 + or int( + ( + ( + input_shape[3] + - (dilations[1] * (filter_shape[3] - 1) + 1) + ) + / strides[1] + + 1 + ) + ) + <= 0 + ): return False if padding_algorithm == "EXPLICIT": - if int(((input_shape[2] + paddings[0] + paddings[1] - (dilations[0] * (filter_shape[2] - 1) + 1)) / strides[0] + 1)) <= 0 or \ - int(((input_shape[3] + paddings[2] + paddings[3] - (dilations[1] * (filter_shape[3] - 1) + 1)) / strides[1] + 1)) <= 0: + if ( + int( + ( + ( + input_shape[2] + + paddings[0] + + paddings[1] + - (dilations[0] * (filter_shape[2] - 1) + 1) + ) + / strides[0] + + 1 + ) + ) + <= 0 + or int( + ( + ( + input_shape[3] + + paddings[2] + + paddings[3] + - (dilations[1] * (filter_shape[3] - 1) + 1) + ) + / strides[1] + + 1 + ) + ) + <= 0 + ): return False if padding_algorithm == "SAME": - if int((input_shape[2] + strides[0] - 1) / strides[0]) <= 0 or int( - (input_shape[3] + strides[1] - 1) / strides[1]) <= 0: + if ( + int((input_shape[2] + strides[0] - 1) / strides[0]) <= 0 + or int((input_shape[3] + strides[1] - 1) / strides[1]) <= 0 + ): return False if data_format == "NCHW": if input_shape[1] != filter_shape[1] * groups: @@ -89,9 +139,12 @@ class TestConvElementwiseAdd2ActPass(PassAutoScanTest): while is_not_valid: # 1. Generate shape of input:X of conv2d x_shape = draw( - st.lists(st.integers(min_value=1, max_value=100), - min_size=4, - max_size=4)) + st.lists( + st.integers(min_value=1, max_value=100), + min_size=4, + max_size=4, + ) + ) x_shape[1] = draw(st.integers(min_value=1, max_value=10)) # 2. Generate legal attr:data_format of conv2d @@ -99,9 +152,12 @@ class TestConvElementwiseAdd2ActPass(PassAutoScanTest): # 3. Generate legal shape of input:Y of conv2d f_shape = draw( - st.lists(st.integers(min_value=1, max_value=7), - min_size=4, - max_size=4)) + st.lists( + st.integers(min_value=1, max_value=7), + min_size=4, + max_size=4, + ) + ) if data_format == "NCHW": f_shape[1] = x_shape[1] else: @@ -109,51 +165,94 @@ class TestConvElementwiseAdd2ActPass(PassAutoScanTest): # 4. Generate legal attr:strides of conv2d strides = draw( - st.lists(st.integers(min_value=1, max_value=5), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=5), + min_size=2, + max_size=2, + ) + ) # 5. Generate legal attr:padding_algorithm of conv2d padding_algorithm = draw( - st.sampled_from(["EXPLICIT", "SAME", "VALID"])) + st.sampled_from(["EXPLICIT", "SAME", "VALID"]) + ) # 6. Generate legal attr:padding of conv2d padding = draw( - st.lists(st.integers(min_value=1, max_value=5), - min_size=4, - max_size=4)) + st.lists( + st.integers(min_value=1, max_value=5), + min_size=4, + max_size=4, + ) + ) # 7. Generate legal attr:groups of conv2d groups = draw(st.integers(min_value=1, max_value=3)) # 8. Generate legal attr:dilations of conv2d dilations = draw( - st.lists(st.integers(min_value=1, max_value=5), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=5), + min_size=2, + max_size=2, + ) + ) # 9. Generate legal elemntwise_add: X of conv2d bias_2_dict = dict() bias_2_dict[1] = [ - x_shape[0], f_shape[0], - int(((x_shape[2] + padding[0] + padding[1] - - (dilations[0] * (f_shape[2] - 1) + 1)) / strides[0] + 1)), - int(((x_shape[3] + padding[2] + padding[3] - - (dilations[1] * (f_shape[3] - 1) + 1)) / strides[1] + 1)) + x_shape[0], + f_shape[0], + int( + ( + ( + x_shape[2] + + padding[0] + + padding[1] + - (dilations[0] * (f_shape[2] - 1) + 1) + ) + / strides[0] + + 1 + ) + ), + int( + ( + ( + x_shape[3] + + padding[2] + + padding[3] + - (dilations[1] * (f_shape[3] - 1) + 1) + ) + / strides[1] + + 1 + ) + ), ] bias_2_dict[2] = [ - x_shape[0], f_shape[0], + x_shape[0], + f_shape[0], int((x_shape[2] + strides[0] - 1) / strides[0]), - int((x_shape[3] + strides[1] - 1) / strides[1]) + int((x_shape[3] + strides[1] - 1) / strides[1]), ] bias_2_dict[3] = [ - x_shape[0], f_shape[0], - int(((x_shape[2] - (dilations[0] * - (f_shape[2] - 1) + 1)) / strides[0] + 1)), - int(((x_shape[3] - (dilations[1] * - (f_shape[3] - 1) + 1)) / strides[1] + 1)) + x_shape[0], + f_shape[0], + int( + ( + (x_shape[2] - (dilations[0] * (f_shape[2] - 1) + 1)) + / strides[0] + + 1 + ) + ), + int( + ( + (x_shape[3] - (dilations[1] * (f_shape[3] - 1) + 1)) + / strides[1] + + 1 + ) + ), ] bias_index = 1 if padding_algorithm == "SAME": @@ -176,37 +275,34 @@ class TestConvElementwiseAdd2ActPass(PassAutoScanTest): # 12. Generate legal attr:axis of elementwise_add_2 axis_2 = -1 - conv2d_op = OpConfig("conv2d", - inputs={ - "Input": ["input_x"], - "Filter": ["filter"] - }, - outputs={"Output": ["conv2d_out"]}, - strides=strides, - padding_algorithm=padding_algorithm, - paddings=padding, - groups=groups, - dilations=dilations, - data_format=data_format) - add_1_op = OpConfig("elementwise_add", - inputs={ - "X": ["conv2d_out"], - "Y": ["bias_1"] - }, - outputs={"Out": ["add_1_out"]}, - axis=axis_1) - - add_2_op = OpConfig("elementwise_add", - inputs={ - "X": ["bias_2"], - "Y": ["add_1_out"] - }, - outputs={"Out": ["add_out"]}, - axis=axis_2) - - relu_op = OpConfig("relu", - inputs={"X": ["add_out"]}, - outputs={"Out": ["relu_out"]}) + conv2d_op = OpConfig( + "conv2d", + inputs={"Input": ["input_x"], "Filter": ["filter"]}, + outputs={"Output": ["conv2d_out"]}, + strides=strides, + padding_algorithm=padding_algorithm, + paddings=padding, + groups=groups, + dilations=dilations, + data_format=data_format, + ) + add_1_op = OpConfig( + "elementwise_add", + inputs={"X": ["conv2d_out"], "Y": ["bias_1"]}, + outputs={"Out": ["add_1_out"]}, + axis=axis_1, + ) + + add_2_op = OpConfig( + "elementwise_add", + inputs={"X": ["bias_2"], "Y": ["add_1_out"]}, + outputs={"Out": ["add_out"]}, + axis=axis_2, + ) + + relu_op = OpConfig( + "relu", inputs={"X": ["add_out"]}, outputs={"Out": ["relu_out"]} + ) ops = [conv2d_op, add_1_op, add_2_op, relu_op] @@ -218,16 +314,18 @@ class TestConvElementwiseAdd2ActPass(PassAutoScanTest): }, inputs={ "input_x": TensorConfig(shape=x_shape), - "bias_2": TensorConfig(shape=bias_2_shape) + "bias_2": TensorConfig(shape=bias_2_shape), }, outputs=ops[-1].outputs["Out"], ) return program_config def test(self): - self.run_and_statis(quant=False, - max_examples=300, - passes=["conv_elementwise_add2_act_fuse_pass"]) + self.run_and_statis( + quant=False, + max_examples=300, + passes=["conv_elementwise_add2_act_fuse_pass"], + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add_act_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add_act_fuse_pass.py index 7c1ae063ce84979b0c81b517a4110e51e76e4cae..ca34981951916ac907e46bc4f922999149d21eb2 100755 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add_act_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add_act_fuse_pass.py @@ -55,12 +55,36 @@ class TestConvElementwiseAddActPass(PassAutoScanTest): if data_format != "NCHW": return False if padding_algorithm == "VALID": - if ((input_shape[2] - (dilations[0] * (filter_shape[2] - 1) + 1)) / strides[0] + 1) <= 1 or \ - ((input_shape[3] - (dilations[1] * (filter_shape[3] - 1) + 1)) / strides[1] + 1) <= 1: + if ( + (input_shape[2] - (dilations[0] * (filter_shape[2] - 1) + 1)) + / strides[0] + + 1 + ) <= 1 or ( + (input_shape[3] - (dilations[1] * (filter_shape[3] - 1) + 1)) + / strides[1] + + 1 + ) <= 1: return False if padding_algorithm == "EXPLICIT": - if ((input_shape[2] + paddings[0] + paddings[1] - (dilations[0] * (filter_shape[2] - 1) + 1)) / strides[0] + 1) <= 1 or \ - ((input_shape[3] + paddings[2] + paddings[3] - (dilations[1] * (filter_shape[3] - 1) + 1)) / strides[1] + 1) <= 1: + if ( + ( + input_shape[2] + + paddings[0] + + paddings[1] + - (dilations[0] * (filter_shape[2] - 1) + 1) + ) + / strides[0] + + 1 + ) <= 1 or ( + ( + input_shape[3] + + paddings[2] + + paddings[3] + - (dilations[1] * (filter_shape[3] - 1) + 1) + ) + / strides[1] + + 1 + ) <= 1: return False if data_format == "NCHW": if input_shape[1] != filter_shape[1] * groups: @@ -77,9 +101,10 @@ class TestConvElementwiseAddActPass(PassAutoScanTest): def sample_program_config(self, draw): # 1. Generate shape of input:X of conv2d x_shape = draw( - st.lists(st.integers(min_value=1, max_value=100), - min_size=4, - max_size=4)) + st.lists( + st.integers(min_value=1, max_value=100), min_size=4, max_size=4 + ) + ) x_shape[1] = draw(st.integers(min_value=1, max_value=10)) # 2. Generate legal attr:data_format of conv2d @@ -87,9 +112,10 @@ class TestConvElementwiseAddActPass(PassAutoScanTest): # 3. Generate legal shape of input:Y of conv2d f_shape = draw( - st.lists(st.integers(min_value=1, max_value=7), - min_size=4, - max_size=4)) + st.lists( + st.integers(min_value=1, max_value=7), min_size=4, max_size=4 + ) + ) if data_format == "NCHW": f_shape[1] = x_shape[1] else: @@ -97,35 +123,41 @@ class TestConvElementwiseAddActPass(PassAutoScanTest): # 4. Generate legal attr:strides of conv2d strides = draw( - st.lists(st.integers(min_value=1, max_value=5), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=5), min_size=2, max_size=2 + ) + ) # 5. Generate legal attr:padding_algorithm of conv2d padding_algorithm = draw(st.sampled_from(["EXPLICIT", "SAME", "VALID"])) # 6. Generate legal attr:padding of conv2d padding = draw( - st.lists(st.integers(min_value=1, max_value=5), - min_size=4, - max_size=4)) + st.lists( + st.integers(min_value=1, max_value=5), min_size=4, max_size=4 + ) + ) # 7. Generate legal attr:groups of conv2d groups = draw(st.integers(min_value=1, max_value=3)) # 8. Generate legal attr:dilations of conv2d dilations = draw( - st.lists(st.integers(min_value=1, max_value=5), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=5), min_size=2, max_size=2 + ) + ) # 9. Generate legal input:ResidualData of conv2d res_shape = [] if draw(st.booleans()): res_shape = draw( - st.lists(st.integers(min_value=1, max_value=100), - min_size=4, - max_size=4)) + st.lists( + st.integers(min_value=1, max_value=100), + min_size=4, + max_size=4, + ) + ) # 10. Generate legal shape of input:bias of elementwise_add bias_shape = [f_shape[0]] @@ -133,30 +165,31 @@ class TestConvElementwiseAddActPass(PassAutoScanTest): # 11. Generate legal attr:axis of elementwise_add axis = 1 - conv2d_op = OpConfig("conv2d", - inputs={ - "Input": ["input_x"], - "Filter": ["filter"], - "ResidualData": ["residualdata"] - }, - outputs={"Output": ["conv2d_out"]}, - strides=strides, - padding_algorithm=padding_algorithm, - paddings=padding, - groups=groups, - dilations=dilations, - data_format=data_format) - add_op = OpConfig("elementwise_add", - inputs={ - "X": ["conv2d_out"], - "Y": ["bias"] - }, - outputs={"Out": ["add_out"]}, - axis=axis) - - relu_op = OpConfig("relu", - inputs={"X": ["add_out"]}, - outputs={"Out": ["relu_out"]}) + conv2d_op = OpConfig( + "conv2d", + inputs={ + "Input": ["input_x"], + "Filter": ["filter"], + "ResidualData": ["residualdata"], + }, + outputs={"Output": ["conv2d_out"]}, + strides=strides, + padding_algorithm=padding_algorithm, + paddings=padding, + groups=groups, + dilations=dilations, + data_format=data_format, + ) + add_op = OpConfig( + "elementwise_add", + inputs={"X": ["conv2d_out"], "Y": ["bias"]}, + outputs={"Out": ["add_out"]}, + axis=axis, + ) + + relu_op = OpConfig( + "relu", inputs={"X": ["add_out"]}, outputs={"Out": ["relu_out"]} + ) ops = [conv2d_op, add_op, relu_op] @@ -168,16 +201,18 @@ class TestConvElementwiseAddActPass(PassAutoScanTest): }, inputs={ "input_x": TensorConfig(shape=x_shape), - "residualdata": TensorConfig(shape=res_shape) + "residualdata": TensorConfig(shape=res_shape), }, outputs=ops[-1].outputs["Out"], ) return program_config def test(self): - self.run_and_statis(quant=False, - max_examples=400, - passes=["conv_elementwise_add_act_fuse_pass"]) + self.run_and_statis( + quant=False, + max_examples=400, + passes=["conv_elementwise_add_act_fuse_pass"], + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add_fuse_pass.py index 303c8017e95d207565be15fe8d1c98089ad7cf8e..6501d6c953b6168e6b2d07bfdc829c5a26374afb 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add_fuse_pass.py @@ -23,7 +23,6 @@ import hypothesis.strategies as st class TestConvEltwiseAddFusePass(PassAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: attrs = [ program_config.ops[i].attrs for i in range(len(program_config.ops)) @@ -46,21 +45,26 @@ class TestConvEltwiseAddFusePass(PassAutoScanTest): out_channel = groups * out_channel_factor batch_size = draw(st.integers(min_value=1, max_value=4)) dilations = draw( - st.lists(st.integers(min_value=1, max_value=2), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=2), min_size=2, max_size=2 + ) + ) paddings = draw( - st.lists(st.integers(min_value=0, max_value=2), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=0, max_value=2), min_size=2, max_size=2 + ) + ) strides = draw( - st.lists(st.integers(min_value=1, max_value=2), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=2), min_size=2, max_size=2 + ) + ) - x_shape = [ - batch_size, in_channel, 64, 64 - ] if data_format == "NCHW" else [batch_size, 64, 64, in_channel] + x_shape = ( + [batch_size, in_channel, 64, 64] + if data_format == "NCHW" + else [batch_size, 64, 64, in_channel] + ) w_shape = [out_channel, filter_channel, filter_size, filter_size] scale_shape = [out_channel] bias_shape = [out_channel] @@ -77,26 +81,27 @@ class TestConvEltwiseAddFusePass(PassAutoScanTest): def generate_scale_bias(): return np.random.random(bias_shape).astype(np.float32) - conv2d_op = OpConfig("conv2d", - inputs={ - "Input": ["input_data"], - "Filter": ["conv2d_weight"], - }, - outputs={"Output": ["conv_output"]}, - data_format=data_format, - dilations=dilations, - padding_algorithm=padding_algorithm, - groups=groups, - paddings=paddings, - strides=strides, - is_test=True) - eltwise_op = OpConfig("elementwise_add", - inputs={ - "X": ["conv_output"], - "Y": ["conv2d_bias"] - }, - outputs={"Out": ["elementwise_output"]}, - axis=axis) + conv2d_op = OpConfig( + "conv2d", + inputs={ + "Input": ["input_data"], + "Filter": ["conv2d_weight"], + }, + outputs={"Output": ["conv_output"]}, + data_format=data_format, + dilations=dilations, + padding_algorithm=padding_algorithm, + groups=groups, + paddings=paddings, + strides=strides, + is_test=True, + ) + eltwise_op = OpConfig( + "elementwise_add", + inputs={"X": ["conv_output"], "Y": ["conv2d_bias"]}, + outputs={"Out": ["elementwise_output"]}, + axis=axis, + ) ops = [conv2d_op, eltwise_op] program_config = ProgramConfig( @@ -105,12 +110,15 @@ class TestConvEltwiseAddFusePass(PassAutoScanTest): "input_data": TensorConfig(data_gen=partial(generate_input)), }, weights={ - "conv2d_weight": - TensorConfig(data_gen=partial(generate_weight)), - "conv2d_bias": - TensorConfig(data_gen=partial(generate_scale_bias)), + "conv2d_weight": TensorConfig( + data_gen=partial(generate_weight) + ), + "conv2d_bias": TensorConfig( + data_gen=partial(generate_scale_bias) + ), }, - outputs=["elementwise_output"]) + outputs=["elementwise_output"], + ) return program_config def sample_predictor_configs(self, program_config): @@ -125,7 +133,8 @@ class TestConvEltwiseAddFusePass(PassAutoScanTest): min_subgraph_size=1, precision_mode=paddle_infer.PrecisionType.Float32, use_static=False, - use_calib_mode=False) + use_calib_mode=False, + ) yield config, ['conv2d_fusion'], (1e-4, 1e-4) def add_ignore_pass_case(self): @@ -137,10 +146,11 @@ class TestConvEltwiseAddFusePass(PassAutoScanTest): return False self.add_ignore_check_case( - teller1, IgnoreReasons.PASS_ACCURACY_ERROR, + teller1, + IgnoreReasons.PASS_ACCURACY_ERROR, "The output format of conv2d is wrong when data_format attribute is NHWC, \ it will trigger Broadcast dimension mismatch bug \ - when data_format attribute is NHWC and axis of eltwise op is 1 for this pass." + when data_format attribute is NHWC and axis of eltwise op is 1 for this pass.", ) def test(self): diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_eltwiseadd_bn_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_eltwiseadd_bn_fuse_pass.py index a88150aecbf3b653aac378a41887d1a4cbb1ed05..9a25806c25f293fcd77cd49ae620c6cf7d29552c 100755 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_eltwiseadd_bn_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_eltwiseadd_bn_fuse_pass.py @@ -25,19 +25,19 @@ os.environ['NVIDIA_TF32_OVERRIDE'] = '0' class TestConvEltwiseaddBnFusePass(PassAutoScanTest): r""" - x_var f_var(persistable) - \ / - conv2d - | - conv2d_var bias_var(persistable) - \ / - elementwise_add - | - elementwise_add_var Scale(persistable) Bias(persistable) Mean(persistable) Variance(persistable) - | - batch_norm - | - Y MeanOut VarianceOut SavedMeanSavedVariance + x_var f_var(persistable) + \ / + conv2d + | + conv2d_var bias_var(persistable) + \ / + elementwise_add + | + elementwise_add_var Scale(persistable) Bias(persistable) Mean(persistable) Variance(persistable) + | + batch_norm + | + Y MeanOut VarianceOut SavedMeanSavedVariance """ def sample_predictor_configs(self, program_config): @@ -66,12 +66,36 @@ class TestConvEltwiseaddBnFusePass(PassAutoScanTest): if data_format != "NCHW": return False if padding_algorithm == "VALID": - if ((input_shape[2] - (dilations[0] * (filter_shape[2] - 1) + 1)) / strides[0] + 1) <= 1 or \ - ((input_shape[3] - (dilations[1] * (filter_shape[3] - 1) + 1)) / strides[1] + 1) <= 1: + if ( + (input_shape[2] - (dilations[0] * (filter_shape[2] - 1) + 1)) + / strides[0] + + 1 + ) <= 1 or ( + (input_shape[3] - (dilations[1] * (filter_shape[3] - 1) + 1)) + / strides[1] + + 1 + ) <= 1: return False if padding_algorithm == "EXPLICIT": - if ((input_shape[2] + paddings[0] + paddings[1] - (dilations[0] * (filter_shape[2] - 1) + 1)) / strides[0] + 1) <= 1 or \ - ((input_shape[3] + paddings[2] + paddings[3] - (dilations[1] * (filter_shape[3] - 1) + 1)) / strides[1] + 1) <= 1: + if ( + ( + input_shape[2] + + paddings[0] + + paddings[1] + - (dilations[0] * (filter_shape[2] - 1) + 1) + ) + / strides[0] + + 1 + ) <= 1 or ( + ( + input_shape[3] + + paddings[2] + + paddings[3] + - (dilations[1] * (filter_shape[3] - 1) + 1) + ) + / strides[1] + + 1 + ) <= 1: return False if data_format == "NCHW": @@ -105,9 +129,10 @@ class TestConvEltwiseaddBnFusePass(PassAutoScanTest): def sample_program_config(self, draw): # 1. Generate shape of input:X of conv2d x_shape = draw( - st.lists(st.integers(min_value=10, max_value=100), - min_size=4, - max_size=4)) + st.lists( + st.integers(min_value=10, max_value=100), min_size=4, max_size=4 + ) + ) x_shape[1] = draw(st.integers(min_value=1, max_value=10)) # 2. Generate legal attr:data_format of conv2d @@ -115,9 +140,10 @@ class TestConvEltwiseaddBnFusePass(PassAutoScanTest): # 2. Generate legal shape of input:Y of conv2d f_shape = draw( - st.lists(st.integers(min_value=1, max_value=7), - min_size=4, - max_size=4)) + st.lists( + st.integers(min_value=1, max_value=7), min_size=4, max_size=4 + ) + ) if data_format == "NCHW": f_shape[1] = x_shape[1] else: @@ -125,35 +151,41 @@ class TestConvEltwiseaddBnFusePass(PassAutoScanTest): # 3. Generate legal attr:strides of conv2d strides = draw( - st.lists(st.integers(min_value=1, max_value=5), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=5), min_size=2, max_size=2 + ) + ) # 4. Generate legal attr:padding_algorithm of conv2d padding_algorithm = draw(st.sampled_from(["EXPLICIT", "SAME", "VALID"])) # 5. Generate legal attr:padding of conv2d padding = draw( - st.lists(st.integers(min_value=1, max_value=5), - min_size=4, - max_size=4)) + st.lists( + st.integers(min_value=1, max_value=5), min_size=4, max_size=4 + ) + ) # 6. Generate legal attr:groups of conv2d groups = draw(st.integers(min_value=1, max_value=3)) # 7. Generate legal attr:dilations of conv2d dilations = draw( - st.lists(st.integers(min_value=1, max_value=5), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=5), min_size=2, max_size=2 + ) + ) # 9. Generate legal input:ResidualData of conv2d res_shape = [] if draw(st.booleans()): res_shape = draw( - st.lists(st.integers(min_value=1, max_value=100), - min_size=4, - max_size=4)) + st.lists( + st.integers(min_value=1, max_value=100), + min_size=4, + max_size=4, + ) + ) # 10. Generate legal shape of input:bias of elementwise_add bias_shape = [f_shape[0]] @@ -177,51 +209,54 @@ class TestConvEltwiseaddBnFusePass(PassAutoScanTest): epsilon = draw(st.floats(min_value=0.00001, max_value=0.001)) def generate_batch_variance(): - return (0.1 + - (1.0 - 0.1) * np.random.random(bn_variance_shape)).astype( - np.float32) - - conv2d_op = OpConfig("conv2d", - inputs={ - "Input": ["input_x"], - "Filter": ["filter"], - "ResidualData": ["residualdata"] - }, - outputs={"Output": ["conv2d_out"]}, - strides=strides, - padding_algorithm=padding_algorithm, - paddings=padding, - groups=groups, - dilations=dilations, - data_format=data_format) - add_op = OpConfig("elementwise_add", - inputs={ - "X": ["conv2d_out"], - "Y": ["bias"] - }, - outputs={"Out": ["add_out"]}, - axis=axis) - - bn_op = OpConfig("batch_norm", - inputs={ - "X": ["add_out"], - "Scale": ["scale_in"], - "Bias": ["bias_in"], - "Mean": ["mean_in"], - "Variance": ["variance_in"] - }, - outputs={ - "Y": ["y_out"], - "MeanOut": ["mean_in"], - "VarianceOut": ["variance_in"], - "SavedMean": ["SavedMean_out"], - "SavedVariance": ["SavedVariance_out"], - "ReserveSpace": ["ReserveSpace_out"] - }, - epsilon=epsilon, - is_test=True, - trainable_statistics=False, - data_layout=data_format) + return ( + 0.1 + (1.0 - 0.1) * np.random.random(bn_variance_shape) + ).astype(np.float32) + + conv2d_op = OpConfig( + "conv2d", + inputs={ + "Input": ["input_x"], + "Filter": ["filter"], + "ResidualData": ["residualdata"], + }, + outputs={"Output": ["conv2d_out"]}, + strides=strides, + padding_algorithm=padding_algorithm, + paddings=padding, + groups=groups, + dilations=dilations, + data_format=data_format, + ) + add_op = OpConfig( + "elementwise_add", + inputs={"X": ["conv2d_out"], "Y": ["bias"]}, + outputs={"Out": ["add_out"]}, + axis=axis, + ) + + bn_op = OpConfig( + "batch_norm", + inputs={ + "X": ["add_out"], + "Scale": ["scale_in"], + "Bias": ["bias_in"], + "Mean": ["mean_in"], + "Variance": ["variance_in"], + }, + outputs={ + "Y": ["y_out"], + "MeanOut": ["mean_in"], + "VarianceOut": ["variance_in"], + "SavedMean": ["SavedMean_out"], + "SavedVariance": ["SavedVariance_out"], + "ReserveSpace": ["ReserveSpace_out"], + }, + epsilon=epsilon, + is_test=True, + trainable_statistics=False, + data_layout=data_format, + ) ops = [conv2d_op, add_op, bn_op] @@ -243,15 +278,18 @@ class TestConvEltwiseaddBnFusePass(PassAutoScanTest): }, inputs={ "input_x": TensorConfig(shape=x_shape), - "residualdata": TensorConfig(shape=res_shape) + "residualdata": TensorConfig(shape=res_shape), }, - outputs=outputs) + outputs=outputs, + ) return program_config def test(self): - self.run_and_statis(quant=False, - max_examples=300, - passes=["conv_eltwiseadd_bn_fuse_pass"]) + self.run_and_statis( + quant=False, + max_examples=300, + passes=["conv_eltwiseadd_bn_fuse_pass"], + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_transpose_bn_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_transpose_bn_fuse_pass.py index 90d81b50471c03be0dfbdb3b494336d521b2d7eb..3749479a6cc8648b8b36f5fcac2f4fd49cd288cf 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_transpose_bn_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_transpose_bn_fuse_pass.py @@ -34,10 +34,12 @@ class TestConvTransposeBnFusePass(PassAutoScanTest): ''' def test(self): - self.run_and_statis(quant=False, - max_examples=150, - max_duration=250, - passes=["conv_transpose_bn_fuse_pass"]) + self.run_and_statis( + quant=False, + max_examples=150, + max_duration=250, + passes=["conv_transpose_bn_fuse_pass"], + ) def sample_program_config(self, draw): # generate random number @@ -47,26 +49,31 @@ class TestConvTransposeBnFusePass(PassAutoScanTest): random_input_dim2 = draw(st.integers(min_value=20, max_value=50)) random_groups = draw(st.integers(min_value=1, max_value=2)) random_dilations = draw( - st.lists(st.integers(min_value=1, max_value=3), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=3), min_size=2, max_size=2 + ) + ) random_strides = draw( - st.lists(st.integers(min_value=1, max_value=4), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=4), min_size=2, max_size=2 + ) + ) random_paddings = draw( - st.lists(st.integers(min_value=0, max_value=4), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=0, max_value=4), min_size=2, max_size=2 + ) + ) random_padding_algorithm = draw( - st.sampled_from(["EXPLICIT", "SAME", "VALID"])) + st.sampled_from(["EXPLICIT", "SAME", "VALID"]) + ) random_data_layout = draw(st.sampled_from(["NCHW", "NHWC"])) random_use_mkldnn = draw(st.booleans()) random_output_size = [] random_filter = draw( - st.lists(st.integers(min_value=1, max_value=4), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=4), min_size=2, max_size=2 + ) + ) random_out_channel = draw(st.integers(min_value=10, max_value=25)) random_epsilon = draw(st.floats(min_value=0.0, max_value=0.001)) @@ -87,24 +94,24 @@ class TestConvTransposeBnFusePass(PassAutoScanTest): return np.random.random(shape).astype(np.float32) def generate_batch_norm_Scale(): - return np.random.random([ - random_out_channel * random_groups * random_groups - ]).astype(np.float32) + return np.random.random( + [random_out_channel * random_groups * random_groups] + ).astype(np.float32) def generate_batch_norm_Bias(): - return np.random.random([ - random_out_channel * random_groups * random_groups - ]).astype(np.float32) + return np.random.random( + [random_out_channel * random_groups * random_groups] + ).astype(np.float32) def generate_batch_norm_Mean(): - return np.random.random([ - random_out_channel * random_groups * random_groups - ]).astype(np.float32) + return np.random.random( + [random_out_channel * random_groups * random_groups] + ).astype(np.float32) def generate_batch_norm_Variance(): - return np.random.random([ - random_out_channel * random_groups * random_groups - ]).astype(np.float32) + return np.random.random( + [random_out_channel * random_groups * random_groups] + ).astype(np.float32) # define op conv2d_op = OpConfig( @@ -112,7 +119,7 @@ class TestConvTransposeBnFusePass(PassAutoScanTest): inputs={ "Input": ["conv2d_Input"], "Filter": ["conv2d_Filter"], - #"Bias": ["conv2d_Bias"], + # "Bias": ["conv2d_Bias"], }, outputs={ "Output": ["conv2d_Out"], @@ -128,33 +135,34 @@ class TestConvTransposeBnFusePass(PassAutoScanTest): 'output_padding': random_output_size, 'use_mkldnn': random_use_mkldnn, 'is_test': True, - }) - - batch_norm_op = OpConfig(type="batch_norm", - inputs={ - "X": ["conv2d_Out"], - "Scale": ["batch_norm_Scale"], - "Bias": ["batch_norm_Bias"], - "Mean": ["batch_norm_Mean"], - "Variance": ["batch_norm_Variance"], - }, - outputs={ - "Y": ["batch_norm_Y"], - "MeanOut": ["batch_norm_Mean"], - "VarianceOut": ["batch_norm_Variance"], - "SavedMean": ["batch_norm_SavedMean"], - "SavedVariance": - ["batch_norm_SavedVariance"], - "ReserveSpace": - ["batch_norm_ReserveSpace"], - }, - attrs={ - 'epsilon': random_epsilon, - 'is_test': True, - 'trainable_statistics': False, - 'data_layout': random_data_layout, - 'use_mkldnn': random_use_mkldnn, - }) + }, + ) + + batch_norm_op = OpConfig( + type="batch_norm", + inputs={ + "X": ["conv2d_Out"], + "Scale": ["batch_norm_Scale"], + "Bias": ["batch_norm_Bias"], + "Mean": ["batch_norm_Mean"], + "Variance": ["batch_norm_Variance"], + }, + outputs={ + "Y": ["batch_norm_Y"], + "MeanOut": ["batch_norm_Mean"], + "VarianceOut": ["batch_norm_Variance"], + "SavedMean": ["batch_norm_SavedMean"], + "SavedVariance": ["batch_norm_SavedVariance"], + "ReserveSpace": ["batch_norm_ReserveSpace"], + }, + attrs={ + 'epsilon': random_epsilon, + 'is_test': True, + 'trainable_statistics': False, + 'data_layout': random_data_layout, + 'use_mkldnn': random_use_mkldnn, + }, + ) # define model_net model_net = [conv2d_op, batch_norm_op] @@ -165,18 +173,22 @@ class TestConvTransposeBnFusePass(PassAutoScanTest): "conv2d_Input": TensorConfig(data_gen=generate_conv2d_Input), }, weights={ - "conv2d_Filter": - TensorConfig(data_gen=generate_conv2d_Filter), - "batch_norm_Scale": - TensorConfig(data_gen=generate_batch_norm_Scale), - "batch_norm_Bias": - TensorConfig(data_gen=generate_batch_norm_Bias), - "batch_norm_Mean": - TensorConfig(data_gen=generate_batch_norm_Mean), - "batch_norm_Variance": - TensorConfig(data_gen=generate_batch_norm_Variance), + "conv2d_Filter": TensorConfig(data_gen=generate_conv2d_Filter), + "batch_norm_Scale": TensorConfig( + data_gen=generate_batch_norm_Scale + ), + "batch_norm_Bias": TensorConfig( + data_gen=generate_batch_norm_Bias + ), + "batch_norm_Mean": TensorConfig( + data_gen=generate_batch_norm_Mean + ), + "batch_norm_Variance": TensorConfig( + data_gen=generate_batch_norm_Variance + ), }, - outputs=["batch_norm_Y"]) + outputs=["batch_norm_Y"], + ) return program_config @@ -201,7 +213,6 @@ class TestConvTransposeBnFusePass(PassAutoScanTest): return True def add_ignore_pass_case(self): - def teller1(program_config, predictor_config): if program_config.ops[0].attrs['data_format'] == "NHWC": return True @@ -213,9 +224,13 @@ class TestConvTransposeBnFusePass(PassAutoScanTest): return False self.add_ignore_check_case( - teller1, IgnoreReasons.PASS_ACCURACY_ERROR, - "The output format of conv2d_transpose is wrong when data_format attribute is NHWC" + teller1, + IgnoreReasons.PASS_ACCURACY_ERROR, + "The output format of conv2d_transpose is wrong when data_format attribute is NHWC", ) - self.add_ignore_check_case(teller2, IgnoreReasons.PASS_ACCURACY_ERROR, - "there is diff when group >1 in this pass") + self.add_ignore_check_case( + teller2, + IgnoreReasons.PASS_ACCURACY_ERROR, + "there is diff when group >1 in this pass", + ) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_transpose_eltwiseadd_bn_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_transpose_eltwiseadd_bn_fuse_pass.py index 91d9b863e2f9a2065808bae3984dcb1e7f8bf653..8b5bfc91548e60f14dfd3e690ea6dacab7f44506 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_transpose_eltwiseadd_bn_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_transpose_eltwiseadd_bn_fuse_pass.py @@ -38,10 +38,12 @@ class TestConvTransposeEltwiseaddBnFusePass(PassAutoScanTest): ''' def test(self): - self.run_and_statis(quant=False, - max_examples=150, - max_duration=250, - passes=["conv_transpose_eltwiseadd_bn_fuse_pass"]) + self.run_and_statis( + quant=False, + max_examples=150, + max_duration=250, + passes=["conv_transpose_eltwiseadd_bn_fuse_pass"], + ) def sample_program_config(self, draw): # generate random number @@ -51,26 +53,31 @@ class TestConvTransposeEltwiseaddBnFusePass(PassAutoScanTest): random_input_dim2 = draw(st.integers(min_value=20, max_value=50)) random_groups = draw(st.integers(min_value=1, max_value=2)) random_dilations = draw( - st.lists(st.integers(min_value=1, max_value=3), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=3), min_size=2, max_size=2 + ) + ) random_strides = draw( - st.lists(st.integers(min_value=1, max_value=4), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=4), min_size=2, max_size=2 + ) + ) random_paddings = draw( - st.lists(st.integers(min_value=0, max_value=4), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=0, max_value=4), min_size=2, max_size=2 + ) + ) random_padding_algorithm = draw( - st.sampled_from(["EXPLICIT", "SAME", "VALID"])) + st.sampled_from(["EXPLICIT", "SAME", "VALID"]) + ) random_data_layout = draw(st.sampled_from(["NCHW", "NHWC"])) random_use_mkldnn = draw(st.booleans()) random_output_size = [] random_filter = draw( - st.lists(st.integers(min_value=1, max_value=4), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=4), min_size=2, max_size=2 + ) + ) random_out_channel = draw(st.integers(min_value=20, max_value=25)) random_epsilon = draw(st.floats(min_value=0.0, max_value=0.001)) @@ -91,89 +98,93 @@ class TestConvTransposeEltwiseaddBnFusePass(PassAutoScanTest): return np.random.random(shape).astype(np.float32) def generate_elementwise_add_Y(): - return np.random.random([ - random_out_channel * random_groups * random_groups - ]).astype(np.float32) + return np.random.random( + [random_out_channel * random_groups * random_groups] + ).astype(np.float32) def generate_batch_norm_Scale(): - return np.random.random([ - random_out_channel * random_groups * random_groups - ]).astype(np.float32) + return np.random.random( + [random_out_channel * random_groups * random_groups] + ).astype(np.float32) def generate_batch_norm_Bias(): - return np.random.random([ - random_out_channel * random_groups * random_groups - ]).astype(np.float32) + return np.random.random( + [random_out_channel * random_groups * random_groups] + ).astype(np.float32) def generate_batch_norm_Mean(): - return np.random.random([ - random_out_channel * random_groups * random_groups - ]).astype(np.float32) + return np.random.random( + [random_out_channel * random_groups * random_groups] + ).astype(np.float32) def generate_batch_norm_Variance(): - return np.random.random([ - random_out_channel * random_groups * random_groups - ]).astype(np.float32) + return np.random.random( + [random_out_channel * random_groups * random_groups] + ).astype(np.float32) # define op - conv2d_op = OpConfig(type="conv2d_transpose", - inputs={ - "Input": ["conv2d_Input"], - "Filter": ["conv2d_Filter"], - }, - outputs={ - "Output": ["conv2d_Out"], - }, - attrs={ - 'groups': random_groups, - 'dilations': random_dilations, - 'strides': random_strides, - 'paddings': random_paddings, - 'padding_algorithm': random_padding_algorithm, - 'data_format': random_data_layout, - 'output_size': random_output_size, - 'output_padding': random_output_size, - 'use_mkldnn': random_use_mkldnn, - 'is_test': True, - }) - - elementwise_op = OpConfig(type="elementwise_add", - inputs={ - "X": ["conv2d_Out"], - "Y": ["elementwise_add_Y"], - }, - outputs={ - "Out": ["elementwise_add_Out"], - }, - attrs={ - 'axis': 1, - }) - - batch_norm_op = OpConfig(type="batch_norm", - inputs={ - "X": ["elementwise_add_Out"], - "Scale": ["batch_norm_Scale"], - "Bias": ["batch_norm_Bias"], - "Mean": ["batch_norm_Mean"], - "Variance": ["batch_norm_Variance"], - }, - outputs={ - "Y": ["batch_norm_Y"], - "MeanOut": ["batch_norm_Mean"], - "VarianceOut": ["batch_norm_Variance"], - "SavedMean": ["batch_norm_SavedMean"], - "SavedVariance": - ["batch_norm_SavedVariance"], - "ReserveSpace": - ["batch_norm_ReserveSpace"], - }, - attrs={ - 'epsilon': random_epsilon, - 'is_test': True, - 'trainable_statistics': False, - 'data_layout': random_data_layout, - 'use_mkldnn': random_use_mkldnn, - }) + conv2d_op = OpConfig( + type="conv2d_transpose", + inputs={ + "Input": ["conv2d_Input"], + "Filter": ["conv2d_Filter"], + }, + outputs={ + "Output": ["conv2d_Out"], + }, + attrs={ + 'groups': random_groups, + 'dilations': random_dilations, + 'strides': random_strides, + 'paddings': random_paddings, + 'padding_algorithm': random_padding_algorithm, + 'data_format': random_data_layout, + 'output_size': random_output_size, + 'output_padding': random_output_size, + 'use_mkldnn': random_use_mkldnn, + 'is_test': True, + }, + ) + + elementwise_op = OpConfig( + type="elementwise_add", + inputs={ + "X": ["conv2d_Out"], + "Y": ["elementwise_add_Y"], + }, + outputs={ + "Out": ["elementwise_add_Out"], + }, + attrs={ + 'axis': 1, + }, + ) + + batch_norm_op = OpConfig( + type="batch_norm", + inputs={ + "X": ["elementwise_add_Out"], + "Scale": ["batch_norm_Scale"], + "Bias": ["batch_norm_Bias"], + "Mean": ["batch_norm_Mean"], + "Variance": ["batch_norm_Variance"], + }, + outputs={ + "Y": ["batch_norm_Y"], + "MeanOut": ["batch_norm_Mean"], + "VarianceOut": ["batch_norm_Variance"], + "SavedMean": ["batch_norm_SavedMean"], + "SavedVariance": ["batch_norm_SavedVariance"], + "ReserveSpace": ["batch_norm_ReserveSpace"], + }, + attrs={ + 'epsilon': random_epsilon, + 'is_test': True, + 'trainable_statistics': False, + 'data_layout': random_data_layout, + 'use_mkldnn': random_use_mkldnn, + }, + ) # define model_net model_net = [conv2d_op, elementwise_op, batch_norm_op] @@ -185,20 +196,25 @@ class TestConvTransposeEltwiseaddBnFusePass(PassAutoScanTest): "conv2d_Input": TensorConfig(data_gen=generate_conv2d_Input), }, weights={ - "conv2d_Filter": - TensorConfig(data_gen=generate_conv2d_Filter), - "elementwise_add_Y": - TensorConfig(data_gen=generate_elementwise_add_Y), - "batch_norm_Scale": - TensorConfig(data_gen=generate_batch_norm_Scale), - "batch_norm_Bias": - TensorConfig(data_gen=generate_batch_norm_Bias), - "batch_norm_Mean": - TensorConfig(data_gen=generate_batch_norm_Mean), - "batch_norm_Variance": - TensorConfig(data_gen=generate_batch_norm_Variance), + "conv2d_Filter": TensorConfig(data_gen=generate_conv2d_Filter), + "elementwise_add_Y": TensorConfig( + data_gen=generate_elementwise_add_Y + ), + "batch_norm_Scale": TensorConfig( + data_gen=generate_batch_norm_Scale + ), + "batch_norm_Bias": TensorConfig( + data_gen=generate_batch_norm_Bias + ), + "batch_norm_Mean": TensorConfig( + data_gen=generate_batch_norm_Mean + ), + "batch_norm_Variance": TensorConfig( + data_gen=generate_batch_norm_Variance + ), }, - outputs=["batch_norm_Y"]) + outputs=["batch_norm_Y"], + ) return program_config @@ -223,7 +239,6 @@ class TestConvTransposeEltwiseaddBnFusePass(PassAutoScanTest): return True def add_ignore_pass_case(self): - def teller1(program_config, predictor_config): if program_config.ops[0].attrs['data_format'] == "NHWC": return True @@ -235,8 +250,12 @@ class TestConvTransposeEltwiseaddBnFusePass(PassAutoScanTest): return False self.add_ignore_check_case( - teller1, IgnoreReasons.PASS_ACCURACY_ERROR, - "The output format of conv2d_transpose is wrong when data_format attribute is NHWC" + teller1, + IgnoreReasons.PASS_ACCURACY_ERROR, + "The output format of conv2d_transpose is wrong when data_format attribute is NHWC", + ) + self.add_ignore_check_case( + teller2, + IgnoreReasons.PASS_ACCURACY_ERROR, + "there is diff when group >1 in this pass", ) - self.add_ignore_check_case(teller2, IgnoreReasons.PASS_ACCURACY_ERROR, - "there is diff when group >1 in this pass") diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_delete_c_identity_op_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_delete_c_identity_op_pass.py index 7efe5e97180b9c327f77eb92aeb20bdc57893c01..b008c134f8ece13d291cc79f4f50c4ec057655e1 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_delete_c_identity_op_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_delete_c_identity_op_pass.py @@ -20,7 +20,6 @@ import hypothesis.strategies as st class TestDeleteCIdentityPass(PassAutoScanTest): - def sample_predictor_configs(self, program_config): config = self.create_trt_inference_config() config.enable_tensorrt_engine( @@ -29,28 +28,34 @@ class TestDeleteCIdentityPass(PassAutoScanTest): min_subgraph_size=0, precision_mode=paddle_infer.PrecisionType.Float32, use_static=False, - use_calib_mode=False) + use_calib_mode=False, + ) yield config, ['relu'], (1e-5, 1e-5) def sample_program_config(self, draw): n = draw(st.integers(min_value=1, max_value=2)) - relu_op = OpConfig("relu", - inputs={"X": ["relu_x"]}, - outputs={"Out": ["relu_out"]}) - c_identity_op = OpConfig("c_identity", - inputs={"X": ["relu_out"]}, - outputs={"Out": ["id_out"]}) + relu_op = OpConfig( + "relu", inputs={"X": ["relu_x"]}, outputs={"Out": ["relu_out"]} + ) + c_identity_op = OpConfig( + "c_identity", + inputs={"X": ["relu_out"]}, + outputs={"Out": ["id_out"]}, + ) program_config = ProgramConfig( ops=[relu_op, c_identity_op], weights={}, inputs={"relu_x": TensorConfig(shape=[n])}, - outputs=["id_out"]) + outputs=["id_out"], + ) return program_config def test(self): - self.run_and_statis(max_examples=2, - min_success_num=2, - passes=["delete_c_identity_op_pass"]) + self.run_and_statis( + max_examples=2, + min_success_num=2, + passes=["delete_c_identity_op_pass"], + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_emb_eltwise_layernorm_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_emb_eltwise_layernorm_fuse_pass.py index ce51cda6e45a8fc41994d9e6c60841e36a235be1..3cbd48dea6d4e6e6b559bab7d6632b54a6fd506e 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_emb_eltwise_layernorm_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_emb_eltwise_layernorm_fuse_pass.py @@ -24,21 +24,21 @@ import hypothesis.strategies as st class TestEmbeddingEltwiseLayerNormFusePass(PassAutoScanTest): r''' - in_var1 emb_var in_var2 emb_var in_var3 emb_var in_var emb_var - | | | | | | | | - lookup_table lookup_table lookup_table ... lookup_table - | | | | - lkt_var lkt_var lkt_var lkt_var - \ / | ... | - elementwise_add | | - \ / | - elementwise_add | - | | - elt_var / - \ / - elementwise_add - | - layer_norm + in_var1 emb_var in_var2 emb_var in_var3 emb_var in_var emb_var + | | | | | | | | + lookup_table lookup_table lookup_table ... lookup_table + | | | | + lkt_var lkt_var lkt_var lkt_var + \ / | ... | + elementwise_add | | + \ / | + elementwise_add | + | | + elt_var / + \ / + elementwise_add + | + layer_norm ''' def is_program_valid(self, program_config: ProgramConfig) -> bool: @@ -54,16 +54,20 @@ class TestEmbeddingEltwiseLayerNormFusePass(PassAutoScanTest): if program_config.ops[3].attrs['axis'] not in [-1, 2]: return False - if not (program_config.ops[5].attrs['epsilon'] >= 0 - and program_config.ops[5].attrs['epsilon'] <= 0.001): + if not ( + program_config.ops[5].attrs['epsilon'] >= 0 + and program_config.ops[5].attrs['epsilon'] <= 0.001 + ): return False if program_config.ops[5].attrs['begin_norm_axis'] != 2: return False # input check - if program_config.weights['embedding_weight1'].shape[ - 1] != program_config.weights['layer_norm_scale'].shape[0]: + if ( + program_config.weights['embedding_weight1'].shape[1] + != program_config.weights['layer_norm_scale'].shape[0] + ): return False return True @@ -83,17 +87,17 @@ class TestEmbeddingEltwiseLayerNormFusePass(PassAutoScanTest): def generate_input(attrs): if attrs[0]['op_type'] == 'lookup_table': - return np.random.randint(0, - attrs[3]['weight_size'][0], - size=(attrs[3]['batch_size'], - attrs[3]['input_dim'], - 1)).astype(np.int64) + return np.random.randint( + 0, + attrs[3]['weight_size'][0], + size=(attrs[3]['batch_size'], attrs[3]['input_dim'], 1), + ).astype(np.int64) else: - return np.random.randint(0, - attrs[3]['weight_size'][0], - size=(attrs[3]['batch_size'], - attrs[3]['input_dim'])).astype( - np.int64) + return np.random.randint( + 0, + attrs[3]['weight_size'][0], + size=(attrs[3]['batch_size'], attrs[3]['input_dim']), + ).astype(np.int64) def generate_weight1(attrs): # set embedding weight by attrs @@ -102,116 +106,128 @@ class TestEmbeddingEltwiseLayerNormFusePass(PassAutoScanTest): def generate_weight2(attrs): # set layernorm weight by attrs if attrs[2]['begin_norm_axis'] == 1: - return np.random.random(attrs[3]['input_dim'] * - attrs[3]['weight_size'][1]).astype( - np.float32) + return np.random.random( + attrs[3]['input_dim'] * attrs[3]['weight_size'][1] + ).astype(np.float32) else: return np.random.random(attrs[3]['weight_size'][1]).astype( - np.float32) + np.float32 + ) - attrs = [{ - 'is_sparse': is_sparse, - 'is_distributed': is_distributed, - 'padding_idx': padding_idx, - 'op_type': op_type - }, { - 'axis': axis - }, { - 'begin_norm_axis': begin_norm_axis, - 'epsilon': epsilon - }, { - 'batch_size': batch_size, - 'input_dim': input_dim, - 'weight_size': weight_size - }] + attrs = [ + { + 'is_sparse': is_sparse, + 'is_distributed': is_distributed, + 'padding_idx': padding_idx, + 'op_type': op_type, + }, + {'axis': axis}, + {'begin_norm_axis': begin_norm_axis, 'epsilon': epsilon}, + { + 'batch_size': batch_size, + 'input_dim': input_dim, + 'weight_size': weight_size, + }, + ] - emb_op1 = OpConfig(type=attrs[0]['op_type'], - inputs={ - "Ids": ["input_data1"], - "W": ["embedding_weight1"] - }, - outputs={"Out": ["embedding_output1"]}, - attrs={ - 'is_sparse': attrs[0]['is_sparse'], - 'is_distributed': attrs[0]['is_distributed'], - 'padding_idx': attrs[0]['padding_idx'] - }) - emb_op2 = OpConfig(type=attrs[0]['op_type'], - inputs={ - "Ids": ["input_data2"], - "W": ["embedding_weight2"] - }, - outputs={"Out": ["embedding_output2"]}, - attrs={ - 'is_sparse': attrs[0]['is_sparse'], - 'is_distributed': attrs[0]['is_distributed'], - 'padding_idx': attrs[0]['padding_idx'] - }) - emb_op3 = OpConfig(type=attrs[0]['op_type'], - inputs={ - "Ids": ["input_data3"], - "W": ["embedding_weight3"] - }, - outputs={"Out": ["embedding_output3"]}, - attrs={ - 'is_sparse': attrs[0]['is_sparse'], - 'is_distributed': attrs[0]['is_distributed'], - 'padding_idx': attrs[0]['padding_idx'] - }) - add_op1 = OpConfig(type='elementwise_add', - inputs={ - "X": [emb_op2.outputs["Out"][0]], - "Y": [emb_op3.outputs["Out"][0]], - }, - outputs={"Out": ["elementwise_add_output1"]}, - attrs={"axis": attrs[1]['axis']}) - add_op2 = OpConfig(type='elementwise_add', - inputs={ - "X": [add_op1.outputs["Out"][0]], - "Y": [emb_op1.outputs["Out"][0]], - }, - outputs={"Out": ["elementwise_add_output2"]}, - attrs={"axis": attrs[1]['axis']}) - layer_norm_op = OpConfig(type='layer_norm', - inputs={ - "X": [add_op2.outputs["Out"][0]], - "Bias": ["layer_norm_bias"], - "Scale": ["layer_norm_scale"] - }, - outputs={ - "Y": ["layer_norm_output1"], - "Mean": ["layer_norm_output2"], - "Variance": ["layer_norm_output3"] - }, - attrs={ - 'begin_norm_axis': - attrs[2]['begin_norm_axis'], - 'epsilon': attrs[2]['epsilon'] - }) + emb_op1 = OpConfig( + type=attrs[0]['op_type'], + inputs={"Ids": ["input_data1"], "W": ["embedding_weight1"]}, + outputs={"Out": ["embedding_output1"]}, + attrs={ + 'is_sparse': attrs[0]['is_sparse'], + 'is_distributed': attrs[0]['is_distributed'], + 'padding_idx': attrs[0]['padding_idx'], + }, + ) + emb_op2 = OpConfig( + type=attrs[0]['op_type'], + inputs={"Ids": ["input_data2"], "W": ["embedding_weight2"]}, + outputs={"Out": ["embedding_output2"]}, + attrs={ + 'is_sparse': attrs[0]['is_sparse'], + 'is_distributed': attrs[0]['is_distributed'], + 'padding_idx': attrs[0]['padding_idx'], + }, + ) + emb_op3 = OpConfig( + type=attrs[0]['op_type'], + inputs={"Ids": ["input_data3"], "W": ["embedding_weight3"]}, + outputs={"Out": ["embedding_output3"]}, + attrs={ + 'is_sparse': attrs[0]['is_sparse'], + 'is_distributed': attrs[0]['is_distributed'], + 'padding_idx': attrs[0]['padding_idx'], + }, + ) + add_op1 = OpConfig( + type='elementwise_add', + inputs={ + "X": [emb_op2.outputs["Out"][0]], + "Y": [emb_op3.outputs["Out"][0]], + }, + outputs={"Out": ["elementwise_add_output1"]}, + attrs={"axis": attrs[1]['axis']}, + ) + add_op2 = OpConfig( + type='elementwise_add', + inputs={ + "X": [add_op1.outputs["Out"][0]], + "Y": [emb_op1.outputs["Out"][0]], + }, + outputs={"Out": ["elementwise_add_output2"]}, + attrs={"axis": attrs[1]['axis']}, + ) + layer_norm_op = OpConfig( + type='layer_norm', + inputs={ + "X": [add_op2.outputs["Out"][0]], + "Bias": ["layer_norm_bias"], + "Scale": ["layer_norm_scale"], + }, + outputs={ + "Y": ["layer_norm_output1"], + "Mean": ["layer_norm_output2"], + "Variance": ["layer_norm_output3"], + }, + attrs={ + 'begin_norm_axis': attrs[2]['begin_norm_axis'], + 'epsilon': attrs[2]['epsilon'], + }, + ) program_config = ProgramConfig( ops=[emb_op1, emb_op2, emb_op3, add_op1, add_op2, layer_norm_op], weights={ - "embedding_weight1": - TensorConfig(data_gen=partial(generate_weight1, attrs[3])), - "embedding_weight2": - TensorConfig(data_gen=partial(generate_weight1, attrs[3])), - "embedding_weight3": - TensorConfig(data_gen=partial(generate_weight1, attrs[3])), - "layer_norm_bias": - TensorConfig(data_gen=partial(generate_weight2, attrs)), - "layer_norm_scale": - TensorConfig(data_gen=partial(generate_weight2, attrs)) + "embedding_weight1": TensorConfig( + data_gen=partial(generate_weight1, attrs[3]) + ), + "embedding_weight2": TensorConfig( + data_gen=partial(generate_weight1, attrs[3]) + ), + "embedding_weight3": TensorConfig( + data_gen=partial(generate_weight1, attrs[3]) + ), + "layer_norm_bias": TensorConfig( + data_gen=partial(generate_weight2, attrs) + ), + "layer_norm_scale": TensorConfig( + data_gen=partial(generate_weight2, attrs) + ), }, inputs={ - "input_data1": - TensorConfig(data_gen=partial(generate_input, attrs)), - "input_data2": - TensorConfig(data_gen=partial(generate_input, attrs)), - "input_data3": - TensorConfig(data_gen=partial(generate_input, attrs)) + "input_data1": TensorConfig( + data_gen=partial(generate_input, attrs) + ), + "input_data2": TensorConfig( + data_gen=partial(generate_input, attrs) + ), + "input_data3": TensorConfig( + data_gen=partial(generate_input, attrs) + ), }, - outputs=["layer_norm_output1"]) + outputs=["layer_norm_output1"], + ) return program_config @@ -227,7 +243,8 @@ class TestEmbeddingEltwiseLayerNormFusePass(PassAutoScanTest): min_subgraph_size=0, precision_mode=paddle_infer.PrecisionType.Half, use_static=False, - use_calib_mode=False) + use_calib_mode=False, + ) yield config, ['fused_embedding_eltwise_layernorm'], (1e-5, 1e-5) # trt dynamic_shape config = self.create_trt_inference_config() @@ -237,61 +254,71 @@ class TestEmbeddingEltwiseLayerNormFusePass(PassAutoScanTest): min_subgraph_size=0, precision_mode=paddle_infer.PrecisionType.Half, use_static=False, - use_calib_mode=False) + use_calib_mode=False, + ) if program_config.ops[0].type == 'lookup_table': config.set_trt_dynamic_shape_info( { "input_data1": [1, 4, 1], "input_data2": [1, 4, 1], - "input_data3": [1, 4, 1] - }, { + "input_data3": [1, 4, 1], + }, + { "input_data1": [4, 512, 1], "input_data2": [4, 512, 1], - "input_data3": [4, 512, 1] - }, { + "input_data3": [4, 512, 1], + }, + { "input_data1": [2, 128, 1], "input_data2": [2, 128, 1], - "input_data3": [2, 128, 1] - }) + "input_data3": [2, 128, 1], + }, + ) else: config.set_trt_dynamic_shape_info( { "input_data1": [1, 4], "input_data2": [1, 4], - "input_data3": [1, 4] - }, { + "input_data3": [1, 4], + }, + { "input_data1": [4, 512], "input_data2": [4, 512], - "input_data3": [4, 512] - }, { + "input_data3": [4, 512], + }, + { "input_data1": [2, 128], "input_data2": [2, 128], - "input_data3": [2, 128] - }) + "input_data3": [2, 128], + }, + ) yield config, ['fused_embedding_eltwise_layernorm'], (1e-5, 1e-5) def add_ignore_pass_case(self): - def teller1(program_config, predictor_config): - if program_config.ops[3].attrs['axis'] in [ - -1, 2 - ] and program_config.ops[5].attrs[ - 'begin_norm_axis'] == 2 and program_config.weights[ - 'embedding_weight1'].shape in [(64, 32), (64, 64)]: + if ( + program_config.ops[3].attrs['axis'] in [-1, 2] + and program_config.ops[5].attrs['begin_norm_axis'] == 2 + and program_config.weights['embedding_weight1'].shape + in [(64, 32), (64, 64)] + ): return True return False self.add_ignore_check_case( - teller1, IgnoreReasons.PASS_ACCURACY_ERROR, - "The pass output has diff in a specific case. We need to fix it as soon as possible." + teller1, + IgnoreReasons.PASS_ACCURACY_ERROR, + "The pass output has diff in a specific case. We need to fix it as soon as possible.", ) def test(self): # this fuse need to fix, now there's no program can ran successfully - self.run_and_statis(quant=False, - max_examples=50, - passes=["embedding_eltwise_layernorm_fuse_pass"], - min_success_num=0) + self.run_and_statis( + quant=False, + max_examples=50, + passes=["embedding_eltwise_layernorm_fuse_pass"], + min_success_num=0, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_fc_elementwise_layernorm_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_fc_elementwise_layernorm_fuse_pass.py index b15fb88d73d6eea9463021c6fc2f61ae1074c38f..5074fd1d1b740ceb7faddc57e6d14e27afa92f47 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_fc_elementwise_layernorm_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_fc_elementwise_layernorm_fuse_pass.py @@ -21,7 +21,6 @@ import hypothesis.strategies as st class FcElementLayernormFusePassDataGen: - def __init__(self, min_v, max_v, shape, dtype): self.min_v = min_v self.max_v = max_v @@ -29,8 +28,9 @@ class FcElementLayernormFusePassDataGen: self.dtype = dtype def __call__(self): - return np.random.normal(self.min_v, self.max_v, - self.shape).astype(self.dtype) + return np.random.normal(self.min_v, self.max_v, self.shape).astype( + self.dtype + ) class TestFCElementwiseLayerNormFusePass(PassAutoScanTest): @@ -56,18 +56,20 @@ class TestFCElementwiseLayerNormFusePass(PassAutoScanTest): def sample_program_config(self, draw): # 1. Generate shape of input:X of fc x_shape = draw( - st.lists(st.integers(min_value=1, max_value=8), - min_size=2, - max_size=5)) + st.lists( + st.integers(min_value=1, max_value=8), min_size=2, max_size=5 + ) + ) x_shape = [2, 1] x_rank = len(x_shape) # 2. Generate attr:in_num_col_dims of fc in_num_col_dims = draw(st.integers(min_value=1, max_value=x_rank - 1)) # 3. Generate legal shape of input:W/bias of fc w_shape = draw( - st.lists(st.integers(min_value=1, max_value=8), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=8), min_size=2, max_size=2 + ) + ) w_shape[0] = int(np.prod(x_shape[in_num_col_dims:])) w_shape = [1, 2] fc_bias_shape = [ @@ -84,17 +86,14 @@ class TestFCElementwiseLayerNormFusePass(PassAutoScanTest): axis = draw(st.integers(min_value=-1, max_value=0)) # 5. Generate legal shape of layer_norm begin_norm_axis = draw( - st.integers(min_value=1, max_value=len(fc_out_shape) - 1)) + st.integers(min_value=1, max_value=len(fc_out_shape) - 1) + ) layer_norm_shape = [int(np.prod(fc_out_shape[begin_norm_axis:]))] epsilon = 1e-5 fc_op = OpConfig( "fc", - inputs={ - "Input": ["fc_x"], - "W": ["fc_w"], - "Bias": ["fc_bias"] - }, + inputs={"Input": ["fc_x"], "W": ["fc_w"], "Bias": ["fc_bias"]}, outputs={"Out": ["fc_out"]}, in_num_col_dims=in_num_col_dims, padding_weights=False, @@ -104,43 +103,40 @@ class TestFCElementwiseLayerNormFusePass(PassAutoScanTest): ) add_op = OpConfig( "elementwise_add", - inputs={ - "X": ["fc_out"], - "Y": ["add_bias"] - }, + inputs={"X": ["fc_out"], "Y": ["add_bias"]}, outputs={"Out": ["add_out"]}, axis=axis, ) - layer_norm_op = OpConfig("layer_norm", - inputs={ - "X": ["add_out"], - "Scale": ["scale"], - "Bias": ["layer_norm_bias"] - }, - outputs={ - "Y": ["layer_norm_out"], - "Mean": ["layer_norm_mean"], - "Variance": ["layer_norm_var"] - }, - begin_norm_axis=begin_norm_axis, - epsilon=epsilon) + layer_norm_op = OpConfig( + "layer_norm", + inputs={ + "X": ["add_out"], + "Scale": ["scale"], + "Bias": ["layer_norm_bias"], + }, + outputs={ + "Y": ["layer_norm_out"], + "Mean": ["layer_norm_mean"], + "Variance": ["layer_norm_var"], + }, + begin_norm_axis=begin_norm_axis, + epsilon=epsilon, + ) ops = [fc_op, add_op, layer_norm_op] program_config = ProgramConfig( ops=ops, weights={ - "fc_w": - TensorConfig(shape=w_shape), - "fc_bias": - TensorConfig(shape=fc_bias_shape), - "add_bias": - TensorConfig(shape=add_bias_shape), - "scale": - TensorConfig(shape=layer_norm_shape, - data_gen=FcElementLayernormFusePassDataGen( - 0.0, 0.5, layer_norm_shape, np.float32)), - "layer_norm_bias": - TensorConfig(shape=layer_norm_shape), + "fc_w": TensorConfig(shape=w_shape), + "fc_bias": TensorConfig(shape=fc_bias_shape), + "add_bias": TensorConfig(shape=add_bias_shape), + "scale": TensorConfig( + shape=layer_norm_shape, + data_gen=FcElementLayernormFusePassDataGen( + 0.0, 0.5, layer_norm_shape, np.float32 + ), + ), + "layer_norm_bias": TensorConfig(shape=layer_norm_shape), }, inputs={ "fc_x": TensorConfig(shape=x_shape), @@ -150,9 +146,11 @@ class TestFCElementwiseLayerNormFusePass(PassAutoScanTest): return program_config def test(self): - self.run_and_statis(quant=False, - max_examples=300, - passes=["fc_elementwise_layernorm_fuse_pass"]) + self.run_and_statis( + quant=False, + max_examples=300, + passes=["fc_elementwise_layernorm_fuse_pass"], + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_fc_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_fc_fuse_pass.py index 58494fba43c254175ddc3351d976a74ebd3e69e1..73aa1738b846b34ec96466d4827567b186b15f11 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_fc_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_fc_fuse_pass.py @@ -50,7 +50,8 @@ class TestFcFusePass(PassAutoScanTest): min_subgraph_size=0, precision_mode=paddle_infer.PrecisionType.Float32, use_static=False, - use_calib_mode=False) + use_calib_mode=False, + ) yield config, ['fc'], (1e-5, 1e-5) def add_ignore_pass_case(self): @@ -74,8 +75,10 @@ class TestFcFusePass(PassAutoScanTest): def teller2(program_config, predictor_config): # TODO fuse has bug while axis != -1 axis = program_config.ops[1].attrs["axis"] - if axis != -1 and axis != program_config.ops[0].attrs[ - "x_num_col_dims"]: + if ( + axis != -1 + and axis != program_config.ops[0].attrs["x_num_col_dims"] + ): return True return False @@ -102,18 +105,21 @@ class TestFcFusePass(PassAutoScanTest): def sample_program_config(self, draw): # 1. Generate shape of input:X of mul x_shape = draw( - st.lists(st.integers(min_value=1, max_value=4), - min_size=2, - max_size=4)) + st.lists( + st.integers(min_value=1, max_value=4), min_size=2, max_size=4 + ) + ) # 2. Generate attr:x_num_col_dims/y_num_col_dims of mul x_num_col_dims = draw( - st.integers(min_value=1, max_value=len(x_shape) - 1)) + st.integers(min_value=1, max_value=len(x_shape) - 1) + ) y_num_col_dims = 1 # 3. Generate legal shape of input:Y of mul y_shape = draw( - st.lists(st.integers(min_value=1, max_value=8), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=8), min_size=2, max_size=2 + ) + ) y_shape[0] = int(np.prod(x_shape[x_num_col_dims:])) # 4. Generate legal attr:axis of elementwise_add mul_out_shape = x_shape[:x_num_col_dims] + y_shape[1:] @@ -122,12 +128,13 @@ class TestFcFusePass(PassAutoScanTest): if axis >= 0: max_bias_rank = x_num_col_dims + 1 - axis bias_rank = draw(st.integers(min_value=1, max_value=max_bias_rank)) - bias_shape = mul_out_shape[axis:axis + bias_rank] + bias_shape = mul_out_shape[axis : axis + bias_rank] else: max_bias_rank = 1 bias_rank = draw( - st.integers(min_value=1, max_value=len(mul_out_shape))) - bias_shape = mul_out_shape[-1 * bias_rank:] + st.integers(min_value=1, max_value=len(mul_out_shape)) + ) + bias_shape = mul_out_shape[-1 * bias_rank :] # 6. Random choose if use broadcast for elementwise_add, e.g [3, 4] -> [1, 4] if draw(st.booleans()): broadcast_dims = draw(st.integers(min_value=1, max_value=bias_rank)) @@ -147,28 +154,22 @@ class TestFcFusePass(PassAutoScanTest): # Use function `add_skip_pass_case` to ignore the programs even if they cause bug while runing mul_op = OpConfig( "mul", - inputs={ - "X": ["mul_x"], - "Y": ["mul_y"] - }, + inputs={"X": ["mul_x"], "Y": ["mul_y"]}, outputs={"Out": ["mul_out"]}, x_num_col_dims=x_num_col_dims, y_num_col_dims=y_num_col_dims, ) add_op = OpConfig( "elementwise_add", - inputs={ - "X": ["mul_out"], - "Y": ["bias"] - }, + inputs={"X": ["mul_out"], "Y": ["bias"]}, outputs={"Out": ["add_out"]}, axis=axis, ) ops = [mul_op, add_op] if has_relu: - relu_op = OpConfig("relu", - inputs={"X": ["add_out"]}, - outputs={"Out": ["relu_out"]}) + relu_op = OpConfig( + "relu", inputs={"X": ["add_out"]}, outputs={"Out": ["relu_out"]} + ) ops.append(relu_op) program_config = ProgramConfig( ops=ops, @@ -184,9 +185,9 @@ class TestFcFusePass(PassAutoScanTest): return program_config def test(self): - self.run_and_statis(quant=False, - max_examples=500, - passes=["fc_fuse_pass"]) + self.run_and_statis( + quant=False, max_examples=500, passes=["fc_fuse_pass"] + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_fc_gru_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_fc_gru_fuse_pass.py index b722fb798c5b70b0e05d1af8384924a782d2df1e..f0c2cbefc0c962113bb5d11557fc59a4b4ea3282 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_fc_gru_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_fc_gru_fuse_pass.py @@ -20,28 +20,29 @@ from paddle.fluid.core import PassVersionChecker class FcGruFusePassTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): dict_dim, emb_dim = 128, 64 - data = fluid.data(name='step_data', - shape=[None], - dtype='int64', - lod_level=1) + data = fluid.data( + name='step_data', shape=[None], dtype='int64', lod_level=1 + ) emb = fluid.embedding(input=data, size=[dict_dim, emb_dim]) hidden_dim = 512 x = fluid.layers.fc(input=emb, size=hidden_dim * 3) - hidden = fluid.layers.dynamic_gru(input=x, - size=hidden_dim, - bias_attr=True, - origin_mode=False, - is_reverse=True) + hidden = fluid.layers.dynamic_gru( + input=x, + size=hidden_dim, + bias_attr=True, + origin_mode=False, + is_reverse=True, + ) batch = 16 lod_tensor = fluid.LoDTensor() lod_tensor.set( np.random.randint(0, dict_dim, size=[batch]).astype("int64"), - fluid.CPUPlace()) + fluid.CPUPlace(), + ) lod_tensor.set_lod([[0, batch]]) self.feeds = {"step_data": lod_tensor} self.fetch_list = [hidden] @@ -53,28 +54,29 @@ class FcGruFusePassTest(InferencePassTest): class MulGruFusePassTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): dict_dim, emb_dim = 128, 64 - data = fluid.data(name='step_data', - shape=[None], - dtype='int64', - lod_level=1) + data = fluid.data( + name='step_data', shape=[None], dtype='int64', lod_level=1 + ) emb = fluid.embedding(input=data, size=[dict_dim, emb_dim]) hidden_dim = 512 x = fluid.layers.fc(input=emb, size=hidden_dim * 3, bias_attr=False) - hidden = fluid.layers.dynamic_gru(input=x, - size=hidden_dim, - bias_attr=True, - origin_mode=False, - is_reverse=True) + hidden = fluid.layers.dynamic_gru( + input=x, + size=hidden_dim, + bias_attr=True, + origin_mode=False, + is_reverse=True, + ) batch = 16 lod_tensor = fluid.LoDTensor() lod_tensor.set( np.random.randint(0, dict_dim, size=[batch]).astype("int64"), - fluid.CPUPlace()) + fluid.CPUPlace(), + ) lod_tensor.set_lod([[0, batch]]) self.feeds = {"step_data": lod_tensor} self.fetch_list = [hidden] diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_fc_lstm_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_fc_lstm_fuse_pass.py index 040cf05b02a44fe98babac09e4770b8aaef9ac9e..acef755d5712529da47c7c7c62ab59942ed8bef6 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_fc_lstm_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_fc_lstm_fuse_pass.py @@ -20,26 +20,26 @@ from paddle.fluid.core import PassVersionChecker class MulLstmFusePassTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): dict_dim, emb_dim = 128, 64 hidden_dim = 512 - data = fluid.data(name='data', - shape=[1], - dtype='int64', - lod_level=1) + data = fluid.data( + name='data', shape=[1], dtype='int64', lod_level=1 + ) emb = fluid.embedding(input=data, size=[dict_dim, emb_dim]) x = fluid.layers.fc(input=emb, size=hidden_dim * 4, bias_attr=False) - forward, cell = fluid.layers.dynamic_lstm(input=x, - size=hidden_dim * 4) + forward, cell = fluid.layers.dynamic_lstm( + input=x, size=hidden_dim * 4 + ) batch = 16 lod_tensor = fluid.LoDTensor() lod_tensor.set( np.random.randint(0, dict_dim, size=[batch]).astype("int64"), - fluid.CPUPlace()) + fluid.CPUPlace(), + ) lod_tensor.set_lod([[0, batch]]) self.feeds = {"data": lod_tensor} self.fetch_list = [forward, cell] diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_flatten2_matmul_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_flatten2_matmul_fuse_pass.py index 21e0843a5aab65228b91110da940469915e0279d..2d84561b770f95ba9f789fa89c7b9b4e74500324 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_flatten2_matmul_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_flatten2_matmul_fuse_pass.py @@ -44,9 +44,10 @@ class TestFlatten2MatmulFusePass(PassAutoScanTest): def sample_program_config(self, draw): # 1. Generate shape and attr of flatten2 x_shape = draw( - st.lists(st.integers(min_value=1, max_value=10), - min_size=4, - max_size=4)) + st.lists( + st.integers(min_value=1, max_value=10), min_size=4, max_size=4 + ) + ) # [a, b, c, d] => [a, b*c*d] flatten_axis = 1 flatten_shape = [x_shape[0], x_shape[1] * x_shape[2] * x_shape[3]] @@ -58,9 +59,10 @@ class TestFlatten2MatmulFusePass(PassAutoScanTest): # 3. Generate legal shape of input:Y of matmul y_shape = draw( - st.lists(st.integers(min_value=1, max_value=8), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=8), min_size=2, max_size=2 + ) + ) y_shape[0] = flatten_shape[1] # 4. Generate legal attr:axis of elementwise_add @@ -82,17 +84,11 @@ class TestFlatten2MatmulFusePass(PassAutoScanTest): "X": ["flatten2_x"], }, axis=flatten_axis, - outputs={ - "Out": ["flatten2_out"], - "XShape": ["xshape"] - }, + outputs={"Out": ["flatten2_out"], "XShape": ["xshape"]}, ) matmul_op = OpConfig( "matmul", - inputs={ - "X": ["flatten2_out"], - "Y": ["matmul_y"] - }, + inputs={"X": ["flatten2_out"], "Y": ["matmul_y"]}, outputs={"Out": ["matmul_out"]}, alpha=alpha, transpose_X=transpose_X, @@ -107,10 +103,7 @@ class TestFlatten2MatmulFusePass(PassAutoScanTest): add_op = OpConfig( "elementwise_add", - inputs={ - "X": ["matmul_out"], - "Y": ["bias"] - }, + inputs={"X": ["matmul_out"], "Y": ["bias"]}, outputs={"Out": ["add_out"]}, axis=axis, ) @@ -143,10 +136,12 @@ class TestFlatten2MatmulFusePass(PassAutoScanTest): return program_config def test(self): - self.run_and_statis(quant=False, - max_examples=50, - max_duration=1000, - passes=["gpu_cpu_flatten2_matmul_fuse_pass"]) + self.run_and_statis( + quant=False, + max_examples=50, + max_duration=1000, + passes=["gpu_cpu_flatten2_matmul_fuse_pass"], + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_identity_scale_clean_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_identity_scale_clean_pass.py index e97eac1652849f48232489336c4c3920bb2f9ac2..2261ba416d7f2676087b796951ca50398d40859e 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_identity_scale_clean_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_identity_scale_clean_pass.py @@ -20,7 +20,6 @@ import hypothesis.strategies as st class TestIdentityScaleCleanPass(PassAutoScanTest): - def sample_predictor_configs(self, program_config): config = self.create_trt_inference_config() config.enable_tensorrt_engine( @@ -29,7 +28,8 @@ class TestIdentityScaleCleanPass(PassAutoScanTest): min_subgraph_size=0, precision_mode=paddle_infer.PrecisionType.Float32, use_static=False, - use_calib_mode=False) + use_calib_mode=False, + ) yield config, ['relu'], (1e-5, 1e-5) def sample_program_config(self, draw): @@ -39,25 +39,29 @@ class TestIdentityScaleCleanPass(PassAutoScanTest): h = draw(st.integers(min_value=1, max_value=20)) w = draw(st.integers(min_value=1, max_value=20)) - relu_op = OpConfig("relu", - inputs={"X": ["relu_x"]}, - outputs={"Out": ["relu_out"]}) - scale_op = OpConfig("scale", - inputs={"X": ["relu_out"]}, - outputs={"Out": ["scale_out"]}, - bias=0., - scale=1., - bias_after_scale=True) + relu_op = OpConfig( + "relu", inputs={"X": ["relu_x"]}, outputs={"Out": ["relu_out"]} + ) + scale_op = OpConfig( + "scale", + inputs={"X": ["relu_out"]}, + outputs={"Out": ["scale_out"]}, + bias=0.0, + scale=1.0, + bias_after_scale=True, + ) program_config = ProgramConfig( ops=[relu_op, scale_op], weights={}, inputs={"relu_x": TensorConfig(shape=[n, c, h, w])}, - outputs=["scale_out"]) + outputs=["scale_out"], + ) return program_config def test(self): - self.run_and_statis(max_examples=25, - passes=["identity_scale_op_clean_pass"]) + self.run_and_statis( + max_examples=25, passes=["identity_scale_op_clean_pass"] + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_layer_norm_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_layer_norm_fuse_pass.py index 9b928436b59986ca734378b5f2f1e5d84729f694..3c6bf6a1ca9fa5f48de5c72c596b43dfaa58b195 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_layer_norm_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_layer_norm_fuse_pass.py @@ -76,15 +76,17 @@ class TestFcFusePass(PassAutoScanTest): def sample_program_config(self, draw): # 1. Generate shape of input:X x_shape = draw( - st.lists(st.integers(min_value=1, max_value=8), - min_size=4, - max_size=5)) + st.lists( + st.integers(min_value=1, max_value=8), min_size=4, max_size=5 + ) + ) x_shape_rank = len(x_shape) # 2. Generate attrs of reduce_mean keep_dim = draw(st.booleans()) reduce_all = False begin_norm_axis = draw( - st.integers(min_value=1, max_value=x_shape_rank - 1)) + st.integers(min_value=1, max_value=x_shape_rank - 1) + ) if begin_norm_axis == x_shape_rank - 1 and draw(st.booleans()): reduce_mean_dim = [-1] else: @@ -108,22 +110,30 @@ class TestFcFusePass(PassAutoScanTest): pow_axis = -1 def generate_pow_data(): - return np.array([ - 2, - ], dtype="float32") + return np.array( + [ + 2, + ], + dtype="float32", + ) # 5. Generate attrs of elementwise_add if keep_dim: add_axis = draw( - st.integers(min_value=-1, max_value=x_shape_rank - 1)) + st.integers(min_value=-1, max_value=x_shape_rank - 1) + ) else: add_axis = draw( - st.integers(min_value=-1, max_value=begin_norm_axis - 1)) + st.integers(min_value=-1, max_value=begin_norm_axis - 1) + ) def generate_epsilon_data(): - return np.array([ - 1e-5, - ], dtype="float32") + return np.array( + [ + 1e-5, + ], + dtype="float32", + ) # 6. Generate attrs of elementwise_div div_axis = 0 @@ -151,19 +161,13 @@ class TestFcFusePass(PassAutoScanTest): ) sub_op = OpConfig( "elementwise_sub", - inputs={ - "X": ["x"], - "Y": ["mean_out"] - }, + inputs={"X": ["x"], "Y": ["mean_out"]}, outputs={"Out": ["sub_out"]}, axis=sub_axis, ) pow_op = OpConfig( "elementwise_pow", - inputs={ - "X": ["sub_out"], - "Y": ["pow_y"] - }, + inputs={"X": ["sub_out"], "Y": ["pow_y"]}, outputs={"Out": ["pow_out"]}, axis=pow_axis, ) @@ -179,10 +183,7 @@ class TestFcFusePass(PassAutoScanTest): ) add_op = OpConfig( "elementwise_add", - inputs={ - "X": ["mean_out2"], - "Y": ["epsilon_var"] - }, + inputs={"X": ["mean_out2"], "Y": ["epsilon_var"]}, outputs={"Out": ["add_out"]}, axis=add_axis, ) @@ -195,35 +196,33 @@ class TestFcFusePass(PassAutoScanTest): ) div_op = OpConfig( "elementwise_div", - inputs={ - "X": ["sub_out"], - "Y": ["sqrt_out"] - }, + inputs={"X": ["sub_out"], "Y": ["sqrt_out"]}, outputs={"Out": ["div_out"]}, axis=div_axis, ) mul_op = OpConfig( "elementwise_mul", - inputs={ - "X": ["div_out"], - "Y": ["gamma_var"] - }, + inputs={"X": ["div_out"], "Y": ["gamma_var"]}, outputs={"Out": ["mul_out"]}, axis=mul_axis, ) add_op2 = OpConfig( "elementwise_add", - inputs={ - "X": ["mul_out"], - "Y": ["beta_var"] - }, + inputs={"X": ["mul_out"], "Y": ["beta_var"]}, outputs={"Out": ["add_out2"]}, axis=add_axis2, ) ops = [ - mean_op1, sub_op, pow_op, mean_op2, add_op, sqrt_op, div_op, mul_op, - add_op2 + mean_op1, + sub_op, + pow_op, + mean_op2, + add_op, + sqrt_op, + div_op, + mul_op, + add_op2, ] program_config = ProgramConfig( diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_layernorm_shift_partition_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_layernorm_shift_partition_pass.py index aeaff113f58d2157fc80aa317cbbc0b6a2ae099c..56436710f333296b3be876f41ddc7b9447e1aeda 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_layernorm_shift_partition_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_layernorm_shift_partition_pass.py @@ -49,14 +49,19 @@ class TestLayernormShiftPartitionPass(PassAutoScanTest): min_subgraph_size=0, precision_mode=paddle_infer.PrecisionType.Float32, use_static=False, - use_calib_mode=False) - config.set_trt_dynamic_shape_info({ - "input_data": [1, 9, 96], - }, { - "input_data": [4, 3136, 768], - }, { - "input_data": [1, 784, 384], - }) + use_calib_mode=False, + ) + config.set_trt_dynamic_shape_info( + { + "input_data": [1, 9, 96], + }, + { + "input_data": [4, 3136, 768], + }, + { + "input_data": [1, 784, 384], + }, + ) yield config, ['layernorm_shift_partition'], (1e-5, 1e-5) # trt dynamic_shape @@ -67,14 +72,19 @@ class TestLayernormShiftPartitionPass(PassAutoScanTest): min_subgraph_size=0, precision_mode=paddle_infer.PrecisionType.Half, use_static=False, - use_calib_mode=False) - config.set_trt_dynamic_shape_info({ - "input_data": [1, 9, 96], - }, { - "input_data": [4, 3136, 768], - }, { - "input_data": [1, 784, 384], - }) + use_calib_mode=False, + ) + config.set_trt_dynamic_shape_info( + { + "input_data": [1, 9, 96], + }, + { + "input_data": [4, 3136, 768], + }, + { + "input_data": [1, 784, 384], + }, + ) yield config, ['layernorm_shift_partition'], (1e-3, 1e-3) def sample_program_config(self, draw): @@ -90,95 +100,112 @@ class TestLayernormShiftPartitionPass(PassAutoScanTest): def generate_input(attrs): return np.random.random( - [attrs[1]["batch_size"], - *attrs[1]["input_dim"]]).astype(np.float32) + [attrs[1]["batch_size"], *attrs[1]["input_dim"]] + ).astype(np.float32) def generate_weight(attrs): return np.random.random(attrs[1]['input_dim'][-1]).astype( - np.float32) + np.float32 + ) - attrs = [{ - 'begin_norm_axis': begin_norm_axis, - 'epsilon': epsilon, - }, { - 'batch_size': batch_size, - 'input_dim': [(window_size * move_shape)**2, dim], - }, { - 'axis': axis, - 'input_resolution': window_size * move_shape, - 'move_shape': move_shape, - 'window_size': window_size, - }] + attrs = [ + { + 'begin_norm_axis': begin_norm_axis, + 'epsilon': epsilon, + }, + { + 'batch_size': batch_size, + 'input_dim': [(window_size * move_shape) ** 2, dim], + }, + { + 'axis': axis, + 'input_resolution': window_size * move_shape, + 'move_shape': move_shape, + 'window_size': window_size, + }, + ] - layer_norm_op = OpConfig(type="layer_norm", - inputs={ - "X": ["input_data"], - "Bias": ["layer_norm_bias"], - "Scale": ["layer_norm_scale"] - }, - outputs={ - "Y": ["layer_norm_output1"], - "Mean": ["layer_norm_output2"], - "Variance": ["layer_norm_output3"] - }, - attrs={ - "begin_norm_axis": - attrs[0]["begin_norm_axis"], - "epsilon": attrs[0]["epsilon"], - }) - reshape_op2 = OpConfig(type="reshape2", - inputs={ - "X": ["layer_norm_output1"], - }, - outputs={ - "Out": ["reshape_output2"], - "XShape": ["reshape_output2_xshape"], - }, - attrs={ - 'shape': [ - -1, attrs[2]["input_resolution"], - attrs[2]["input_resolution"], - attrs[1]["input_dim"][-1] - ] - }) - reshape_op3 = OpConfig(type="reshape2", - inputs={ - "X": ["reshape_output2"], - }, - outputs={ - "Out": ["reshape_output3"], - "XShape": ["reshape_output3_xshape"], - }, - attrs={ - 'shape': [ - -1, attrs[2]["move_shape"], - attrs[2]["window_size"], - attrs[2]["move_shape"], - attrs[2]["window_size"], - attrs[1]["input_dim"][-1] - ] - }) - transpose_op4 = OpConfig(type='transpose2', - inputs={ - "X": ["reshape_output3"], - }, - outputs={"Out": ["transpose_output4"]}, - attrs={"axis": attrs[2]['axis']}) - reshape_op5 = OpConfig(type="reshape2", - inputs={ - "X": ["transpose_output4"], - }, - outputs={ - "Out": ["reshape_output5"], - "XShape": ["reshape_output5_xshape"], - }, - attrs={ - 'shape': [ - -1, attrs[2]["window_size"], - attrs[2]["window_size"], - attrs[1]["input_dim"][-1] - ] - }) + layer_norm_op = OpConfig( + type="layer_norm", + inputs={ + "X": ["input_data"], + "Bias": ["layer_norm_bias"], + "Scale": ["layer_norm_scale"], + }, + outputs={ + "Y": ["layer_norm_output1"], + "Mean": ["layer_norm_output2"], + "Variance": ["layer_norm_output3"], + }, + attrs={ + "begin_norm_axis": attrs[0]["begin_norm_axis"], + "epsilon": attrs[0]["epsilon"], + }, + ) + reshape_op2 = OpConfig( + type="reshape2", + inputs={ + "X": ["layer_norm_output1"], + }, + outputs={ + "Out": ["reshape_output2"], + "XShape": ["reshape_output2_xshape"], + }, + attrs={ + 'shape': [ + -1, + attrs[2]["input_resolution"], + attrs[2]["input_resolution"], + attrs[1]["input_dim"][-1], + ] + }, + ) + reshape_op3 = OpConfig( + type="reshape2", + inputs={ + "X": ["reshape_output2"], + }, + outputs={ + "Out": ["reshape_output3"], + "XShape": ["reshape_output3_xshape"], + }, + attrs={ + 'shape': [ + -1, + attrs[2]["move_shape"], + attrs[2]["window_size"], + attrs[2]["move_shape"], + attrs[2]["window_size"], + attrs[1]["input_dim"][-1], + ] + }, + ) + transpose_op4 = OpConfig( + type='transpose2', + inputs={ + "X": ["reshape_output3"], + }, + outputs={"Out": ["transpose_output4"]}, + attrs={"axis": attrs[2]['axis']}, + ) + reshape_op5 = OpConfig( + type="reshape2", + inputs={ + "X": ["transpose_output4"], + }, + outputs={ + "Out": ["reshape_output5"], + "XShape": ["reshape_output5_xshape"], + }, + attrs={ + 'shape': [ + -1, + attrs[2]["window_size"], + attrs[2]["window_size"], + attrs[1]["input_dim"][-1], + ] + }, + ) reshape_op6 = OpConfig( type="reshape2", inputs={ @@ -189,35 +216,49 @@ class TestLayernormShiftPartitionPass(PassAutoScanTest): "XShape": ["reshape_output6_xshape"], }, attrs={ - 'shape': - [-1, attrs[2]["window_size"]**2, attrs[1]["input_dim"][-1]] - }) + 'shape': [ + -1, + attrs[2]["window_size"] ** 2, + attrs[1]["input_dim"][-1], + ] + }, + ) program_config = ProgramConfig( ops=[ - layer_norm_op, reshape_op2, reshape_op3, transpose_op4, - reshape_op5, reshape_op6 + layer_norm_op, + reshape_op2, + reshape_op3, + transpose_op4, + reshape_op5, + reshape_op6, ], weights={ - "layer_norm_bias": - TensorConfig(data_gen=partial(generate_weight, attrs)), - "layer_norm_scale": - TensorConfig(data_gen=partial(generate_weight, attrs)) + "layer_norm_bias": TensorConfig( + data_gen=partial(generate_weight, attrs) + ), + "layer_norm_scale": TensorConfig( + data_gen=partial(generate_weight, attrs) + ), }, inputs={ - "input_data": - TensorConfig(data_gen=partial(generate_input, attrs)), + "input_data": TensorConfig( + data_gen=partial(generate_input, attrs) + ), }, - outputs=["reshape_output6"]) + outputs=["reshape_output6"], + ) return program_config def test(self): - self.run_and_statis(quant=False, - max_examples=50, - passes=["layernorm_shift_partition_fuse_pass"], - max_duration=250, - min_success_num=50) + self.run_and_statis( + quant=False, + max_examples=50, + passes=["layernorm_shift_partition_fuse_pass"], + max_duration=250, + min_success_num=50, + ) class TestLayernormShiftPartition2Pass(PassAutoScanTest): @@ -248,14 +289,19 @@ class TestLayernormShiftPartition2Pass(PassAutoScanTest): min_subgraph_size=0, precision_mode=paddle_infer.PrecisionType.Float32, use_static=False, - use_calib_mode=False) - config.set_trt_dynamic_shape_info({ - "input_data": [1, 9, 96], - }, { - "input_data": [4, 3136, 768], - }, { - "input_data": [1, 784, 384], - }) + use_calib_mode=False, + ) + config.set_trt_dynamic_shape_info( + { + "input_data": [1, 9, 96], + }, + { + "input_data": [4, 3136, 768], + }, + { + "input_data": [1, 784, 384], + }, + ) yield config, ['layernorm_shift_partition'], (1e-5, 1e-5) # trt dynamic_shape @@ -266,14 +312,19 @@ class TestLayernormShiftPartition2Pass(PassAutoScanTest): min_subgraph_size=0, precision_mode=paddle_infer.PrecisionType.Half, use_static=False, - use_calib_mode=False) - config.set_trt_dynamic_shape_info({ - "input_data": [1, 9, 96], - }, { - "input_data": [4, 3136, 768], - }, { - "input_data": [1, 784, 384], - }) + use_calib_mode=False, + ) + config.set_trt_dynamic_shape_info( + { + "input_data": [1, 9, 96], + }, + { + "input_data": [4, 3136, 768], + }, + { + "input_data": [1, 784, 384], + }, + ) yield config, ['layernorm_shift_partition'], (1e-3, 1e-3) def sample_program_config(self, draw): @@ -289,106 +340,124 @@ class TestLayernormShiftPartition2Pass(PassAutoScanTest): def generate_input(attrs): return np.random.random( - [attrs[1]["batch_size"], - *attrs[1]["input_dim"]]).astype(np.float32) + [attrs[1]["batch_size"], *attrs[1]["input_dim"]] + ).astype(np.float32) def generate_weight(attrs): return np.random.random(attrs[1]['input_dim'][-1]).astype( - np.float32) + np.float32 + ) - attrs = [{ - 'begin_norm_axis': begin_norm_axis, - 'epsilon': epsilon, - }, { - 'batch_size': batch_size, - 'input_dim': [(window_size * move_shape)**2, dim], - }, { - 'axis': axis, - 'input_resolution': window_size * move_shape, - 'move_shape': move_shape, - 'window_size': window_size, - }] + attrs = [ + { + 'begin_norm_axis': begin_norm_axis, + 'epsilon': epsilon, + }, + { + 'batch_size': batch_size, + 'input_dim': [(window_size * move_shape) ** 2, dim], + }, + { + 'axis': axis, + 'input_resolution': window_size * move_shape, + 'move_shape': move_shape, + 'window_size': window_size, + }, + ] - layer_norm_op = OpConfig(type="layer_norm", - inputs={ - "X": ["input_data"], - "Bias": ["layer_norm_bias"], - "Scale": ["layer_norm_scale"] - }, - outputs={ - "Y": ["layer_norm_output1"], - "Mean": ["layer_norm_output2"], - "Variance": ["layer_norm_output3"] - }, - attrs={ - "begin_norm_axis": - attrs[0]["begin_norm_axis"], - "epsilon": attrs[0]["epsilon"], - }) - reshape_op2 = OpConfig(type="reshape2", - inputs={ - "X": ["layer_norm_output1"], - }, - outputs={ - "Out": ["reshape_output2"], - "XShape": ["reshape_output2_xshape"], - }, - attrs={ - 'shape': [ - -1, attrs[2]["input_resolution"], - attrs[2]["input_resolution"], - attrs[1]["input_dim"][-1] - ] - }) - roll_op1 = OpConfig(type="roll", - inputs={"X": ["reshape_output2"]}, - outputs={"Out": ["roll_output1"]}, - attrs={ - "axis": [1, 2], - "shifts": [ - -math.floor( - (attrs[2]["window_size"]) / 2.0), - -math.floor((attrs[2]["window_size"]) / 2.0) - ] - }) - reshape_op3 = OpConfig(type="reshape2", - inputs={ - "X": ["roll_output1"], - }, - outputs={ - "Out": ["reshape_output3"], - "XShape": ["reshape_output3_xshape"], - }, - attrs={ - 'shape': [ - -1, attrs[2]["move_shape"], - attrs[2]["window_size"], - attrs[2]["move_shape"], - attrs[2]["window_size"], - attrs[1]["input_dim"][-1] - ] - }) - transpose_op4 = OpConfig(type='transpose2', - inputs={ - "X": ["reshape_output3"], - }, - outputs={"Out": ["transpose_output4"]}, - attrs={"axis": attrs[2]['axis']}) - reshape_op5 = OpConfig(type="reshape2", - inputs={ - "X": ["transpose_output4"], - }, - outputs={ - "Out": ["reshape_output5"], - "XShape": ["reshape_output5_xshape"], - }, - attrs={ - 'shape': [ - -1, attrs[2]["window_size"], - attrs[2]["window_size"], - attrs[1]["input_dim"][-1] - ] - }) + layer_norm_op = OpConfig( + type="layer_norm", + inputs={ + "X": ["input_data"], + "Bias": ["layer_norm_bias"], + "Scale": ["layer_norm_scale"], + }, + outputs={ + "Y": ["layer_norm_output1"], + "Mean": ["layer_norm_output2"], + "Variance": ["layer_norm_output3"], + }, + attrs={ + "begin_norm_axis": attrs[0]["begin_norm_axis"], + "epsilon": attrs[0]["epsilon"], + }, + ) + reshape_op2 = OpConfig( + type="reshape2", + inputs={ + "X": ["layer_norm_output1"], + }, + outputs={ + "Out": ["reshape_output2"], + "XShape": ["reshape_output2_xshape"], + }, + attrs={ + 'shape': [ + -1, + attrs[2]["input_resolution"], + attrs[2]["input_resolution"], + attrs[1]["input_dim"][-1], + ] + }, + ) + roll_op1 = OpConfig( + type="roll", + inputs={"X": ["reshape_output2"]}, + outputs={"Out": ["roll_output1"]}, + attrs={ + "axis": [1, 2], + "shifts": [ + -math.floor((attrs[2]["window_size"]) / 2.0), + -math.floor((attrs[2]["window_size"]) / 2.0), + ], + }, + ) + reshape_op3 = OpConfig( + type="reshape2", + inputs={ + "X": ["roll_output1"], + }, + outputs={ + "Out": ["reshape_output3"], + "XShape": ["reshape_output3_xshape"], + }, + attrs={ + 'shape': [ + -1, + attrs[2]["move_shape"], + attrs[2]["window_size"], + attrs[2]["move_shape"], + attrs[2]["window_size"], + attrs[1]["input_dim"][-1], + ] + }, + ) + transpose_op4 = OpConfig( + type='transpose2', + inputs={ + "X": ["reshape_output3"], + }, + outputs={"Out": ["transpose_output4"]}, + attrs={"axis": attrs[2]['axis']}, + ) + reshape_op5 = OpConfig( + type="reshape2", + inputs={ + "X": ["transpose_output4"], + }, + outputs={ + "Out": ["reshape_output5"], + "XShape": ["reshape_output5_xshape"], + }, + attrs={ + 'shape': [ + -1, + attrs[2]["window_size"], + attrs[2]["window_size"], + attrs[1]["input_dim"][-1], + ] + }, + ) reshape_op6 = OpConfig( type="reshape2", inputs={ @@ -399,35 +468,50 @@ class TestLayernormShiftPartition2Pass(PassAutoScanTest): "XShape": ["reshape_output6_xshape"], }, attrs={ - 'shape': - [-1, attrs[2]["window_size"]**2, attrs[1]["input_dim"][-1]] - }) + 'shape': [ + -1, + attrs[2]["window_size"] ** 2, + attrs[1]["input_dim"][-1], + ] + }, + ) program_config = ProgramConfig( ops=[ - layer_norm_op, reshape_op2, roll_op1, reshape_op3, - transpose_op4, reshape_op5, reshape_op6 + layer_norm_op, + reshape_op2, + roll_op1, + reshape_op3, + transpose_op4, + reshape_op5, + reshape_op6, ], weights={ - "layer_norm_bias": - TensorConfig(data_gen=partial(generate_weight, attrs)), - "layer_norm_scale": - TensorConfig(data_gen=partial(generate_weight, attrs)) + "layer_norm_bias": TensorConfig( + data_gen=partial(generate_weight, attrs) + ), + "layer_norm_scale": TensorConfig( + data_gen=partial(generate_weight, attrs) + ), }, inputs={ - "input_data": - TensorConfig(data_gen=partial(generate_input, attrs)), + "input_data": TensorConfig( + data_gen=partial(generate_input, attrs) + ), }, - outputs=["reshape_output6"]) + outputs=["reshape_output6"], + ) return program_config def test(self): - self.run_and_statis(quant=False, - max_examples=50, - passes=["layernorm_shift_partition_fuse_pass"], - max_duration=250, - min_success_num=50) + self.run_and_statis( + quant=False, + max_examples=50, + passes=["layernorm_shift_partition_fuse_pass"], + max_duration=250, + min_success_num=50, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_to_mul_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_to_mul_pass.py index e6db1204897c51c40800a4bfbee7bea4d8586861..138782f00e10aa89f8c7ebf669dee4652815969f 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_to_mul_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_to_mul_pass.py @@ -21,9 +21,9 @@ import hypothesis.strategies as st class TestMapMatmulToMulPass(PassAutoScanTest): r""" - x_var y_var(persistable) - \ / - matmul + x_var y_var(persistable) + \ / + matmul """ def sample_predictor_configs(self, program_config): @@ -67,19 +67,23 @@ class TestMapMatmulToMulPass(PassAutoScanTest): return False self.add_ignore_check_case( - teller1, IgnoreReasons.PASS_ACCURACY_ERROR, - "The pass error on TRT while shape of mul_x > 5.") + teller1, + IgnoreReasons.PASS_ACCURACY_ERROR, + "The pass error on TRT while shape of mul_x > 5.", + ) def sample_program_config(self, draw): # 1. Generate shape and attr of matmul x_shape = draw( - st.lists(st.integers(min_value=1, max_value=8), - min_size=2, - max_size=5)) + st.lists( + st.integers(min_value=1, max_value=8), min_size=2, max_size=5 + ) + ) y_shape = draw( - st.lists(st.integers(min_value=1, max_value=8), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=8), min_size=2, max_size=2 + ) + ) y_shape[0] = x_shape[-1] alpha = 1.0 transpose_X = False @@ -87,10 +91,7 @@ class TestMapMatmulToMulPass(PassAutoScanTest): matmul_op = OpConfig( "matmul", - inputs={ - "X": ["matmul_x"], - "Y": ["matmul_y"] - }, + inputs={"X": ["matmul_x"], "Y": ["matmul_y"]}, outputs={"Out": ["matmul_out"]}, alpha=alpha, transpose_X=transpose_X, @@ -121,10 +122,12 @@ class TestMapMatmulToMulPass(PassAutoScanTest): return program_config def test(self): - self.run_and_statis(quant=False, - max_examples=100, - passes=["gpu_cpu_map_matmul_to_mul_pass"], - max_duration=180) + self.run_and_statis( + quant=False, + max_examples=100, + passes=["gpu_cpu_map_matmul_to_mul_pass"], + max_duration=180, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_v2_to_matmul_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_v2_to_matmul_pass.py index 38f86dc71702c6d5bf629cc6697733ded867cdaf..d2dec6060734c5b2e3a2d3526a8aea4947fd0139 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_v2_to_matmul_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_v2_to_matmul_pass.py @@ -21,9 +21,9 @@ import hypothesis.strategies as st class TestMapMatmulToMulPass(PassAutoScanTest): r""" - x_var y_var(persistable) - \ / - matmul_v2 + x_var y_var(persistable) + \ / + matmul_v2 """ def sample_predictor_configs(self, program_config): @@ -63,19 +63,23 @@ class TestMapMatmulToMulPass(PassAutoScanTest): return False self.add_ignore_check_case( - teller1, IgnoreReasons.PASS_ACCURACY_ERROR, - "The pass error on TRT while shape of mul_x > 5.") + teller1, + IgnoreReasons.PASS_ACCURACY_ERROR, + "The pass error on TRT while shape of mul_x > 5.", + ) def sample_program_config(self, draw): # 1. Generate shape and attr of matmul x_shape = draw( - st.lists(st.integers(min_value=1, max_value=8), - min_size=2, - max_size=5)) + st.lists( + st.integers(min_value=1, max_value=8), min_size=2, max_size=5 + ) + ) y_shape = draw( - st.lists(st.integers(min_value=1, max_value=8), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=8), min_size=2, max_size=2 + ) + ) transpose_X = draw(st.booleans()) transpose_Y = draw(st.booleans()) if transpose_X: @@ -89,15 +93,12 @@ class TestMapMatmulToMulPass(PassAutoScanTest): else: y_shape[0] = x_shape[-1] - y_shape = x_shape[0:len(x_shape) - 2] + y_shape + y_shape = x_shape[0 : len(x_shape) - 2] + y_shape alpha = 1.0 matmul_op = OpConfig( "matmul_v2", - inputs={ - "X": ["matmul_x"], - "Y": ["matmul_y"] - }, + inputs={"X": ["matmul_x"], "Y": ["matmul_y"]}, outputs={"Out": ["matmul_out"]}, alpha=alpha, trans_x=transpose_X, @@ -128,9 +129,11 @@ class TestMapMatmulToMulPass(PassAutoScanTest): return program_config def test(self): - self.run_and_statis(quant=False, - max_examples=100, - passes=["gpu_cpu_map_matmul_v2_to_matmul_pass"]) + self.run_and_statis( + quant=False, + max_examples=100, + passes=["gpu_cpu_map_matmul_v2_to_matmul_pass"], + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_v2_to_mul_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_v2_to_mul_pass.py index d4c6db1c7f5d4113d630e91674092827d2aef3af..148610edad134e681f13db3f4c5797ca024d79bb 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_v2_to_mul_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_v2_to_mul_pass.py @@ -21,9 +21,9 @@ import hypothesis.strategies as st class TestMapMatmulToMulPass(PassAutoScanTest): r""" - x_var y_var(persistable) - \ / - matmul_v2 + x_var y_var(persistable) + \ / + matmul_v2 """ def sample_predictor_configs(self, program_config): @@ -63,19 +63,23 @@ class TestMapMatmulToMulPass(PassAutoScanTest): return False self.add_ignore_check_case( - teller1, IgnoreReasons.PASS_ACCURACY_ERROR, - "The pass error on TRT while shape of mul_x > 5.") + teller1, + IgnoreReasons.PASS_ACCURACY_ERROR, + "The pass error on TRT while shape of mul_x > 5.", + ) def sample_program_config(self, draw): # 1. Generate shape and attr of matmul x_shape = draw( - st.lists(st.integers(min_value=1, max_value=8), - min_size=2, - max_size=5)) + st.lists( + st.integers(min_value=1, max_value=8), min_size=2, max_size=5 + ) + ) y_shape = draw( - st.lists(st.integers(min_value=1, max_value=8), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=8), min_size=2, max_size=2 + ) + ) y_shape[0] = x_shape[-1] alpha = 1.0 transpose_X = False @@ -83,10 +87,7 @@ class TestMapMatmulToMulPass(PassAutoScanTest): matmul_op = OpConfig( "matmul_v2", - inputs={ - "X": ["matmul_x"], - "Y": ["matmul_y"] - }, + inputs={"X": ["matmul_x"], "Y": ["matmul_y"]}, outputs={"Out": ["matmul_out"]}, alpha=alpha, trans_x=transpose_X, @@ -117,9 +118,11 @@ class TestMapMatmulToMulPass(PassAutoScanTest): return program_config def test(self): - self.run_and_statis(quant=False, - max_examples=100, - passes=["gpu_cpu_map_matmul_v2_to_mul_pass"]) + self.run_and_statis( + quant=False, + max_examples=100, + passes=["gpu_cpu_map_matmul_v2_to_mul_pass"], + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_matmul_scale_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_matmul_scale_fuse_pass.py index 6db5468292c4e62f2b62b3bfd76d894ef3077fa6..2a60d62db3d571b97c5b80ed49bd478aba0a9220 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_matmul_scale_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_matmul_scale_fuse_pass.py @@ -21,11 +21,11 @@ import hypothesis.strategies as st class TestMatmulScaleFusePass(PassAutoScanTest): r""" - x_var y_var(persistable) - \ / - matmul - | - scale + x_var y_var(persistable) + \ / + matmul + | + scale """ def sample_predictor_configs(self, program_config): @@ -50,14 +50,18 @@ class TestMatmulScaleFusePass(PassAutoScanTest): def sample_program_config(self, draw): # 1. Generate shape and attr of matmul x_shape = draw( - st.lists(st.integers(min_value=1, max_value=8), - min_size=2, - max_size=5)) + st.lists( + st.integers(min_value=1, max_value=8), min_size=2, max_size=5 + ) + ) x_shape_rank = len(x_shape) y_shape = draw( - st.lists(st.integers(min_value=1, max_value=8), - min_size=x_shape_rank, - max_size=x_shape_rank)) + st.lists( + st.integers(min_value=1, max_value=8), + min_size=x_shape_rank, + max_size=x_shape_rank, + ) + ) y_shape_rank = len(y_shape) y_shape[-2] = x_shape[-1] for i in range(y_shape_rank - 3, -1, -1): @@ -75,10 +79,7 @@ class TestMatmulScaleFusePass(PassAutoScanTest): matmul_op = OpConfig( "matmul", - inputs={ - "X": ["matmul_x"], - "Y": ["matmul_y"] - }, + inputs={"X": ["matmul_x"], "Y": ["matmul_y"]}, outputs={"Out": ["matmul_out"]}, transpose_X=transpose_X, transpose_Y=transpose_Y, @@ -95,10 +96,7 @@ class TestMatmulScaleFusePass(PassAutoScanTest): if is_scale_tensor: scale_op = OpConfig( "scale", - inputs={ - "X": ["matmul_out"], - "ScaleTensor": ["scale_tensor"] - }, + inputs={"X": ["matmul_out"], "ScaleTensor": ["scale_tensor"]}, outputs={"Out": ["scale_out"]}, scale=scale_value, bias=0.0, @@ -122,7 +120,7 @@ class TestMatmulScaleFusePass(PassAutoScanTest): if is_scale_tensor: weights = { "matmul_y": TensorConfig(shape=y_shape), - "scale_tensor": TensorConfig(shape=scale_shape) + "scale_tensor": TensorConfig(shape=scale_shape), } inputs = { "matmul_x": TensorConfig(shape=x_shape), diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_matmul_v2_scale_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_matmul_v2_scale_fuse_pass.py index a9b48e7cf362750d6da8c653ca32b7a860c1667f..4b0c226e1a7adc3e88c334015746ba788e17eb61 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_matmul_v2_scale_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_matmul_v2_scale_fuse_pass.py @@ -21,13 +21,13 @@ import hypothesis.strategies as st class TestMatmulV2ScaleFusePass(PassAutoScanTest): r""" - x_var y_var(persistable) x_var y_var*scale(persistable) - \ / \ / - matmul_v2 matmul_v2 - | => | - scale scale_out - | - scale_out + x_var y_var(persistable) x_var y_var*scale(persistable) + \ / \ / + matmul_v2 matmul_v2 + | => | + scale scale_out + | + scale_out """ def sample_predictor_configs(self, program_config): @@ -44,14 +44,18 @@ class TestMatmulV2ScaleFusePass(PassAutoScanTest): def sample_program_config(self, draw): # 1. Generate shape and attr of matmul x_shape = draw( - st.lists(st.integers(min_value=1, max_value=8), - min_size=2, - max_size=5)) + st.lists( + st.integers(min_value=1, max_value=8), min_size=2, max_size=5 + ) + ) x_shape_rank = len(x_shape) y_shape = draw( - st.lists(st.integers(min_value=1, max_value=8), - min_size=x_shape_rank, - max_size=x_shape_rank)) + st.lists( + st.integers(min_value=1, max_value=8), + min_size=x_shape_rank, + max_size=x_shape_rank, + ) + ) y_shape_rank = len(y_shape) y_shape[-2] = x_shape[-1] for i in range(y_shape_rank - 3, -1, -1): @@ -68,10 +72,7 @@ class TestMatmulV2ScaleFusePass(PassAutoScanTest): matmul_v2_op = OpConfig( "matmul_v2", - inputs={ - "X": ["matmul_x"], - "Y": ["matmul_y"] - }, + inputs={"X": ["matmul_x"], "Y": ["matmul_y"]}, outputs={"Out": ["matmul_out"]}, trans_x=transpose_X, trans_y=transpose_Y, @@ -86,10 +87,7 @@ class TestMatmulV2ScaleFusePass(PassAutoScanTest): if is_scale_tensor: scale_op = OpConfig( "scale", - inputs={ - "X": ["matmul_out"], - "ScaleTensor": ["scale_tensor"] - }, + inputs={"X": ["matmul_out"], "ScaleTensor": ["scale_tensor"]}, outputs={"Out": ["scale_out"]}, scale=scale_value, bias=0.0, diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_merge_layernorm_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_merge_layernorm_fuse_pass.py index cd25181f501e4b5f0b08770cc64537eea99ecd00..897e63e7cf5e91df7e5aabe412332f9ebbf862dd 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_merge_layernorm_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_merge_layernorm_fuse_pass.py @@ -47,10 +47,13 @@ class TestMergeLayernormFusePass(PassAutoScanTest): min_subgraph_size=0, precision_mode=paddle_infer.PrecisionType.Float32, use_static=False, - use_calib_mode=False) - config.set_trt_dynamic_shape_info({"input_data": [1, 196, 96]}, - {"input_data": [4, 3136, 384]}, - {"input_data": [1, 3136, 96]}) + use_calib_mode=False, + ) + config.set_trt_dynamic_shape_info( + {"input_data": [1, 196, 96]}, + {"input_data": [4, 3136, 384]}, + {"input_data": [1, 3136, 96]}, + ) yield config, ["merge_layernorm"], (1e-5, 1e-5) # trt dynamic_shape fp16 config = self.create_trt_inference_config() @@ -60,10 +63,13 @@ class TestMergeLayernormFusePass(PassAutoScanTest): min_subgraph_size=0, precision_mode=paddle_infer.PrecisionType.Half, use_static=False, - use_calib_mode=False) - config.set_trt_dynamic_shape_info({"input_data": [1, 196, 96]}, - {"input_data": [4, 3136, 384]}, - {"input_data": [1, 3136, 96]}) + use_calib_mode=False, + ) + config.set_trt_dynamic_shape_info( + {"input_data": [1, 196, 96]}, + {"input_data": [4, 3136, 384]}, + {"input_data": [1, 3136, 96]}, + ) yield config, ["merge_layernorm"], (1e-3, 1e-3) def sample_program_config(self, draw): @@ -72,39 +78,45 @@ class TestMergeLayernormFusePass(PassAutoScanTest): input_n = draw(st.sampled_from([96, 192, 384])) layernorm_40_begin_norm_axis = 2 layernorm_40_epsilon = draw( - st.floats(min_value=0.0000001, max_value=0.001)) + st.floats(min_value=0.0000001, max_value=0.001) + ) def generate_input(attrs): - return np.random.random([ - attrs[3]['batch_size'], - attrs[3]['input_H_W'] * attrs[3]['input_H_W'], - attrs[3]['input_n'] - ]).astype(np.float32) + return np.random.random( + [ + attrs[3]['batch_size'], + attrs[3]['input_H_W'] * attrs[3]['input_H_W'], + attrs[3]['input_n'], + ] + ).astype(np.float32) def generate_weight(attrs): - return np.random.random([attrs[3]['input_n'] * 4 - ]).astype(np.float32) + return np.random.random([attrs[3]['input_n'] * 4]).astype( + np.float32 + ) - attrs = [{ - 'shape': [-1, input_H_W, input_H_W, input_n] - }, { - 'shape': [-1, int(input_H_W * input_H_W / 4), - int(input_n * 4)] - }, { - 'begin_norm_axis': layernorm_40_begin_norm_axis, - 'epsilon': layernorm_40_epsilon - }, { - 'batch_size': batch_size, - 'input_H_W': input_H_W, - 'input_n': input_n - }] - reshape2_00_op = OpConfig(type="reshape2", - inputs={'X': ['input_data']}, - outputs={ - 'Out': ['reshape2_00_out'], - 'XShape': ['reshape2_00_outxshape'] - }, - attrs={'shape': attrs[0]['shape']}) + attrs = [ + {'shape': [-1, input_H_W, input_H_W, input_n]}, + {'shape': [-1, int(input_H_W * input_H_W / 4), int(input_n * 4)]}, + { + 'begin_norm_axis': layernorm_40_begin_norm_axis, + 'epsilon': layernorm_40_epsilon, + }, + { + 'batch_size': batch_size, + 'input_H_W': input_H_W, + 'input_n': input_n, + }, + ] + reshape2_00_op = OpConfig( + type="reshape2", + inputs={'X': ['input_data']}, + outputs={ + 'Out': ['reshape2_00_out'], + 'XShape': ['reshape2_00_outxshape'], + }, + attrs={'shape': attrs[0]['shape']}, + ) strided_slice_10_op = OpConfig( type="strided_slice", inputs={'Input': ['reshape2_00_out']}, @@ -114,8 +126,9 @@ class TestMergeLayernormFusePass(PassAutoScanTest): 'starts': [0, 0], 'infer_flags': [1, 1], 'ends': [attrs[3]['input_H_W'], attrs[3]['input_H_W']], - 'strides': [2, 2] - }) + 'strides': [2, 2], + }, + ) strided_slice_11_op = OpConfig( type="strided_slice", inputs={'Input': ['reshape2_00_out']}, @@ -125,8 +138,9 @@ class TestMergeLayernormFusePass(PassAutoScanTest): 'starts': [1, 0], 'infer_flags': [1, 1], 'ends': [attrs[3]['input_H_W'], attrs[3]['input_H_W']], - 'strides': [2, 2] - }) + 'strides': [2, 2], + }, + ) strided_slice_12_op = OpConfig( type="strided_slice", inputs={'Input': ['reshape2_00_out']}, @@ -136,8 +150,9 @@ class TestMergeLayernormFusePass(PassAutoScanTest): 'starts': [0, 1], 'infer_flags': [1, 1], 'ends': [attrs[3]['input_H_W'], attrs[3]['input_H_W']], - 'strides': [2, 2] - }) + 'strides': [2, 2], + }, + ) strided_slice_13_op = OpConfig( type="strided_slice", inputs={'Input': ['reshape2_00_out']}, @@ -147,68 +162,84 @@ class TestMergeLayernormFusePass(PassAutoScanTest): 'starts': [1, 1], 'infer_flags': [1, 1], 'ends': [attrs[3]['input_H_W'], attrs[3]['input_H_W']], - 'strides': [2, 2] - }) - concat_20_op = OpConfig(type="concat", - inputs={ - 'X': [ - 'strided_slice_10_out', - 'strided_slice_11_out', - 'strided_slice_12_out', - 'strided_slice_13_out' - ] - }, - outputs={'Out': ['concat_20_out']}, - attrs={'axis': -1}) - reshape2_30_op = OpConfig(type='reshape2', - inputs={'X': ['concat_20_out']}, - outputs={ - 'Out': ['reshape2_30_Out'], - 'XShape': ['reshape2_30_XShape'] - }, - attrs={'shape': attrs[1]['shape']}) - layernorm_40_op = OpConfig(type='layer_norm', - inputs={ - 'X': ['reshape2_30_Out'], - 'Bias': ['layer_norm_bias'], - 'Scale': ['layer_norm_scale'] - }, - outputs={ - "Y": ["layer_norm_out"], - "Mean": ["layer_norm_outMean"], - "Variance": ["layer_norm_outVariance"] - }, - attrs={ - 'begin_norm_axis': - attrs[2]['begin_norm_axis'], - 'epsilon': - attrs[2]['epsilon'] - }) + 'strides': [2, 2], + }, + ) + concat_20_op = OpConfig( + type="concat", + inputs={ + 'X': [ + 'strided_slice_10_out', + 'strided_slice_11_out', + 'strided_slice_12_out', + 'strided_slice_13_out', + ] + }, + outputs={'Out': ['concat_20_out']}, + attrs={'axis': -1}, + ) + reshape2_30_op = OpConfig( + type='reshape2', + inputs={'X': ['concat_20_out']}, + outputs={ + 'Out': ['reshape2_30_Out'], + 'XShape': ['reshape2_30_XShape'], + }, + attrs={'shape': attrs[1]['shape']}, + ) + layernorm_40_op = OpConfig( + type='layer_norm', + inputs={ + 'X': ['reshape2_30_Out'], + 'Bias': ['layer_norm_bias'], + 'Scale': ['layer_norm_scale'], + }, + outputs={ + "Y": ["layer_norm_out"], + "Mean": ["layer_norm_outMean"], + "Variance": ["layer_norm_outVariance"], + }, + attrs={ + 'begin_norm_axis': attrs[2]['begin_norm_axis'], + 'epsilon': attrs[2]['epsilon'], + }, + ) program_config = ProgramConfig( ops=[ - reshape2_00_op, strided_slice_10_op, strided_slice_11_op, - strided_slice_12_op, strided_slice_13_op, concat_20_op, - reshape2_30_op, layernorm_40_op + reshape2_00_op, + strided_slice_10_op, + strided_slice_11_op, + strided_slice_12_op, + strided_slice_13_op, + concat_20_op, + reshape2_30_op, + layernorm_40_op, ], weights={ - 'layer_norm_bias': - TensorConfig(data_gen=partial(generate_weight, attrs)), - 'layer_norm_scale': - TensorConfig(data_gen=partial(generate_weight, attrs)) + 'layer_norm_bias': TensorConfig( + data_gen=partial(generate_weight, attrs) + ), + 'layer_norm_scale': TensorConfig( + data_gen=partial(generate_weight, attrs) + ), }, inputs={ - 'input_data': - TensorConfig(data_gen=partial(generate_input, attrs)) + 'input_data': TensorConfig( + data_gen=partial(generate_input, attrs) + ) }, - outputs=['layer_norm_out']) + outputs=['layer_norm_out'], + ) return program_config def test(self): - self.run_and_statis(quant=False, - max_examples=50, - passes=["merge_layernorm_fuse_pass"], - max_duration=250, - min_success_num=50) + self.run_and_statis( + quant=False, + max_examples=50, + passes=["merge_layernorm_fuse_pass"], + max_duration=250, + min_success_num=50, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_batch_norm_act_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_batch_norm_act_fuse_pass.py index 63bb9e7256d779eff2f6fe795747518a7611c2dc..85c3aa2cd1c1dc94eda5f5f3fd00b35a6633cb8f 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_batch_norm_act_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_batch_norm_act_fuse_pass.py @@ -22,7 +22,6 @@ import hypothesis.strategies as st class TestScaleMatmulMkldnnFusePass(PassAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True @@ -55,40 +54,40 @@ class TestScaleMatmulMkldnnFusePass(PassAutoScanTest): def generate_weight(): return np.random.random(channel).astype(np.float32) - batch_norm_op = OpConfig(type="batch_norm", - inputs={ - "X": ["input_data"], - "Bias": ["Bias"], - "Mean": ["Mean"], - "Scale": ["Scale"], - "Variance": ["Variance"] - }, - outputs={ - "Y": ["norm_output"], - "MeanOut": ["Mean"], - "VarianceOut": ["Variance"], - "SavedMean": ["SavedMean"], - "SavedVariance": ["SavedVariance"] - }, - attrs={ - "data_layout": data_layout, - "epsilon": epsilon, - "fuse_with_relu": fuse_with_relu, - "is_test": is_test, - "momentum": momentum, - "trainable_statistics": - trainable_statistics, - "use_global_stats": use_global_stats, - "use_mkldnn": use_mkldnn1 - }) - - relu_op = OpConfig(type="relu", - inputs={"X": ["norm_output"]}, - outputs={"Out": ["relu_output"]}, - attrs={ - "use_cudnn": use_cudnn, - "use_mkldnn": use_mkldnn2 - }) + batch_norm_op = OpConfig( + type="batch_norm", + inputs={ + "X": ["input_data"], + "Bias": ["Bias"], + "Mean": ["Mean"], + "Scale": ["Scale"], + "Variance": ["Variance"], + }, + outputs={ + "Y": ["norm_output"], + "MeanOut": ["Mean"], + "VarianceOut": ["Variance"], + "SavedMean": ["SavedMean"], + "SavedVariance": ["SavedVariance"], + }, + attrs={ + "data_layout": data_layout, + "epsilon": epsilon, + "fuse_with_relu": fuse_with_relu, + "is_test": is_test, + "momentum": momentum, + "trainable_statistics": trainable_statistics, + "use_global_stats": use_global_stats, + "use_mkldnn": use_mkldnn1, + }, + ) + + relu_op = OpConfig( + type="relu", + inputs={"X": ["norm_output"]}, + outputs={"Out": ["relu_output"]}, + attrs={"use_cudnn": use_cudnn, "use_mkldnn": use_mkldnn2}, + ) model_net = [batch_norm_op, relu_op] @@ -98,12 +97,13 @@ class TestScaleMatmulMkldnnFusePass(PassAutoScanTest): "Bias": TensorConfig(data_gen=partial(generate_weight)), "Mean": TensorConfig(data_gen=partial(generate_weight)), "Scale": TensorConfig(data_gen=partial(generate_weight)), - "Variance": TensorConfig(data_gen=partial(generate_weight)) + "Variance": TensorConfig(data_gen=partial(generate_weight)), }, inputs={ "input_data": TensorConfig(data_gen=partial(generate_input)) }, - outputs=["relu_output"]) + outputs=["relu_output"], + ) return program_config diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv3d_bias_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv3d_bias_fuse_pass.py index 21d3234929081a18943e762fa07f4caad08088ab..9bf4fa55b0c53439b64f5c5de56ab5fc54d6156e 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv3d_bias_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv3d_bias_fuse_pass.py @@ -22,7 +22,6 @@ import hypothesis.strategies as st class TestConv3dBiasMkldnnFusePass(PassAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True @@ -39,78 +38,80 @@ class TestConv3dBiasMkldnnFusePass(PassAutoScanTest): def generate_input1(attrs): if attrs[0]['data_format'] == "NCDHW": return np.random.random( - [attrs[2]['batch_size'], 48, 64, 32, 64]).astype(np.float32) + [attrs[2]['batch_size'], 48, 64, 32, 64] + ).astype(np.float32) else: return np.random.random( - [attrs[2]['batch_size'], 64, 32, 64, 48]).astype(np.float32) + [attrs[2]['batch_size'], 64, 32, 64, 48] + ).astype(np.float32) def generate_weight1(): - return np.random.random([16, int(48 / groups), 3, 3, - 3]).astype(np.float32) + return np.random.random([16, int(48 / groups), 3, 3, 3]).astype( + np.float32 + ) def generate_weight2(): return np.random.random([16]).astype(np.float32) - attrs = [{ - "data_format": data_format, - "dilations": dilations, - "padding_algorithm": padding_algorithm, - "groups": groups, - "paddings": paddings, - "strides": strides - }, { - "axis": axis - }, { - 'batch_size': batch_size - }] - - ops_config = [{ - "op_type": "conv3d", - "op_inputs": { - "Input": ["input_data1"], - "Filter": ["conv_weight"] - }, - "op_outputs": { - "Output": ["conv_output"] - }, - "op_attrs": { - "data_format": attrs[0]['data_format'], - "dilations": attrs[0]['dilations'], - "padding_algorithm": attrs[0]['padding_algorithm'], - "groups": attrs[0]['groups'], - "paddings": attrs[0]['paddings'], - "strides": attrs[0]['strides'], - "is_test": True - } - }, { - "op_type": "elementwise_add", - "op_inputs": { - "X": ["conv_output"], - "Y": ["elementwise_weight"] + attrs = [ + { + "data_format": data_format, + "dilations": dilations, + "padding_algorithm": padding_algorithm, + "groups": groups, + "paddings": paddings, + "strides": strides, }, - "op_outputs": { - "Out": ["elementwise_output"] + {"axis": axis}, + {'batch_size': batch_size}, + ] + + ops_config = [ + { + "op_type": "conv3d", + "op_inputs": { + "Input": ["input_data1"], + "Filter": ["conv_weight"], + }, + "op_outputs": {"Output": ["conv_output"]}, + "op_attrs": { + "data_format": attrs[0]['data_format'], + "dilations": attrs[0]['dilations'], + "padding_algorithm": attrs[0]['padding_algorithm'], + "groups": attrs[0]['groups'], + "paddings": attrs[0]['paddings'], + "strides": attrs[0]['strides'], + "is_test": True, + }, }, - "op_attrs": { - 'axis': attrs[1]['axis'] + { + "op_type": "elementwise_add", + "op_inputs": { + "X": ["conv_output"], + "Y": ["elementwise_weight"], + }, + "op_outputs": {"Out": ["elementwise_output"]}, + "op_attrs": {'axis': attrs[1]['axis']}, }, - }] + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={ - "conv_weight": - TensorConfig(data_gen=partial(generate_weight1)), - "elementwise_weight": - TensorConfig(data_gen=partial(generate_weight2)) + "conv_weight": TensorConfig(data_gen=partial(generate_weight1)), + "elementwise_weight": TensorConfig( + data_gen=partial(generate_weight2) + ), }, inputs={ - "input_data1": - TensorConfig(data_gen=partial(generate_input1, attrs)) + "input_data1": TensorConfig( + data_gen=partial(generate_input1, attrs) + ) }, - outputs=["elementwise_output"]) + outputs=["elementwise_output"], + ) return program_config diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv3d_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv3d_op.py index 22454e55a139fa59906d93906e8de2560798a2a0..f9ea557a53ab549d639bd0a7f4fbca6d9b15c7e2 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv3d_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv3d_op.py @@ -23,53 +23,54 @@ import hypothesis.strategies as st class TestMkldnnConv3dOp(MkldnnAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self, *args, **kwargs): - def generate_input(*args, **kwargs): if kwargs["data_format"] == "NCDHW": - return np.random.random([kwargs["batch_size"], 48, 64, 32, - 64]).astype(np.float32) + return np.random.random( + [kwargs["batch_size"], 48, 64, 32, 64] + ).astype(np.float32) else: - return np.random.random([kwargs["batch_size"], 64, 32, 64, - 48]).astype(np.float32) + return np.random.random( + [kwargs["batch_size"], 64, 32, 64, 48] + ).astype(np.float32) def generate_weight(*args, **kwargs): - return np.random.random([16, - int(48 / kwargs["groups"]), 3, 3, - 3]).astype(np.float32) + return np.random.random( + [16, int(48 / kwargs["groups"]), 3, 3, 3] + ).astype(np.float32) - conv3d_op = OpConfig(type="conv3d", - inputs={ - "Input": ["input_data"], - "Filter": ["conv_weight"] - }, - outputs={"Output": ["conv_output"]}, - attrs={ - "data_format": kwargs["data_format"], - "dilations": kwargs["dilations"], - "padding_algorithm": - kwargs["padding_algorithm"], - "groups": kwargs["groups"], - "paddings": kwargs["paddings"], - "strides": kwargs["strides"], - "is_test": True - }) + conv3d_op = OpConfig( + type="conv3d", + inputs={"Input": ["input_data"], "Filter": ["conv_weight"]}, + outputs={"Output": ["conv_output"]}, + attrs={ + "data_format": kwargs["data_format"], + "dilations": kwargs["dilations"], + "padding_algorithm": kwargs["padding_algorithm"], + "groups": kwargs["groups"], + "paddings": kwargs["paddings"], + "strides": kwargs["strides"], + "is_test": True, + }, + ) program_config = ProgramConfig( ops=[conv3d_op], weights={ - "conv_weight": - TensorConfig(data_gen=partial(generate_weight, *args, **kwargs)) + "conv_weight": TensorConfig( + data_gen=partial(generate_weight, *args, **kwargs) + ) }, inputs={ - "input_data": - TensorConfig(data_gen=partial(generate_input, *args, **kwargs)) + "input_data": TensorConfig( + data_gen=partial(generate_input, *args, **kwargs) + ) }, - outputs=["conv_output"]) + outputs=["conv_output"], + ) yield program_config diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_activation_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_activation_fuse_pass.py index fb70f2260a34258ef23f9ceabdc17b73c999ab71..bc4d527be87dcc5ad793ce3e4ea136968e47bef9 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_activation_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_activation_fuse_pass.py @@ -20,18 +20,19 @@ from paddle.fluid.core import PassVersionChecker class ConvActivationMkldnnFusePassTest(InferencePassTest): - def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 3, 100, 100], - dtype="float32") - conv_out = fluid.layers.conv2d(data, - num_filters=self.conv_num_filters, - filter_size=self.conv_filter_size, - bias_attr=self.conv_bias_attr, - act=self.act) + data = fluid.data( + name="data", shape=[-1, 3, 100, 100], dtype="float32" + ) + conv_out = fluid.layers.conv2d( + data, + num_filters=self.conv_num_filters, + filter_size=self.conv_filter_size, + bias_attr=self.conv_bias_attr, + act=self.act, + ) self.feeds = { "data": np.random.random((1, 3, 100, 100)).astype("float32") @@ -55,7 +56,6 @@ class ConvActivationMkldnnFusePassTest(InferencePassTest): class ConvActivationMkldnnFusePassTest_1(ConvActivationMkldnnFusePassTest): - def set_params(self): self.conv_num_filters = 5 self.conv_filter_size = 5 @@ -64,7 +64,6 @@ class ConvActivationMkldnnFusePassTest_1(ConvActivationMkldnnFusePassTest): class ConvActivationMkldnnFusePassTest_2(ConvActivationMkldnnFusePassTest): - def set_params(self): self.conv_num_filters = 3 self.conv_filter_size = 3 @@ -73,7 +72,6 @@ class ConvActivationMkldnnFusePassTest_2(ConvActivationMkldnnFusePassTest): class ConvActivationMkldnnFusePassTest_3(ConvActivationMkldnnFusePassTest): - def set_params(self): self.conv_num_filters = 5 self.conv_filter_size = 5 @@ -82,7 +80,6 @@ class ConvActivationMkldnnFusePassTest_3(ConvActivationMkldnnFusePassTest): class ConvActivationMkldnnFusePassTest_4(ConvActivationMkldnnFusePassTest): - def set_params(self): self.conv_num_filters = 3 self.conv_filter_size = 3 @@ -91,7 +88,6 @@ class ConvActivationMkldnnFusePassTest_4(ConvActivationMkldnnFusePassTest): class ConvActivationMkldnnFusePassTest_5(ConvActivationMkldnnFusePassTest): - def set_params(self): self.conv_num_filters = 5 self.conv_filter_size = 5 @@ -100,7 +96,6 @@ class ConvActivationMkldnnFusePassTest_5(ConvActivationMkldnnFusePassTest): class ConvActivationMkldnnFusePassTest_6(ConvActivationMkldnnFusePassTest): - def set_params(self): self.conv_num_filters = 5 self.conv_filter_size = 5 @@ -109,7 +104,6 @@ class ConvActivationMkldnnFusePassTest_6(ConvActivationMkldnnFusePassTest): class ConvHardSigmoidOneDNNFusePassTest(ConvActivationMkldnnFusePassTest): - def set_params(self): self.conv_num_filters = 5 self.conv_filter_size = 5 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_affine_channel_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_affine_channel_fuse_pass.py index 94b9ac66a3d608f23eb455b757edac966f86c3c8..ba6179a1ff41fa7a9251699afcba4cf166058c97 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_affine_channel_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_affine_channel_fuse_pass.py @@ -22,7 +22,6 @@ import hypothesis.strategies as st class TestConvAffineChannelFusePass(PassAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True @@ -38,22 +37,27 @@ class TestConvAffineChannelFusePass(PassAutoScanTest): out_channel = groups * out_channel_factor batch_size = draw(st.integers(min_value=1, max_value=4)) dilations = draw( - st.lists(st.integers(min_value=1, max_value=2), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=2), min_size=2, max_size=2 + ) + ) paddings = draw( - st.lists(st.integers(min_value=0, max_value=2), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=0, max_value=2), min_size=2, max_size=2 + ) + ) strides = draw( - st.lists(st.integers(min_value=1, max_value=2), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=2), min_size=2, max_size=2 + ) + ) has_bias = draw(st.booleans()) - x_shape = [ - batch_size, in_channel, 64, 64 - ] if data_format == "NCHW" else [batch_size, 64, 64, in_channel] + x_shape = ( + [batch_size, in_channel, 64, 64] + if data_format == "NCHW" + else [batch_size, 64, 64, in_channel] + ) w_shape = [out_channel, filter_channel, filter_size, filter_size] scale_shape = [out_channel] bias_shape = [out_channel] @@ -70,28 +74,32 @@ class TestConvAffineChannelFusePass(PassAutoScanTest): def generate_scale_bias(): return np.random.random(bias_shape).astype(np.float32) - conv2d_op = OpConfig("conv2d", - inputs={ - "Input": ["input_data"], - "Filter": ["conv2d_weight"], - }, - outputs={"Output": ["conv_output"]}, - data_format=data_format, - dilations=dilations, - padding_algorithm=padding_algorithm, - groups=groups, - paddings=paddings, - strides=strides, - has_bias=has_bias, - is_test=True) - ac_op = OpConfig("affine_channel", - inputs={ - "X": ["conv_output"], - "Scale": ["affine_channel_scale"], - "Bias": ["affine_channel_bias"] - }, - outputs={"Out": ["affine_channel_ouput"]}, - data_layout=data_format) + conv2d_op = OpConfig( + "conv2d", + inputs={ + "Input": ["input_data"], + "Filter": ["conv2d_weight"], + }, + outputs={"Output": ["conv_output"]}, + data_format=data_format, + dilations=dilations, + padding_algorithm=padding_algorithm, + groups=groups, + paddings=paddings, + strides=strides, + has_bias=has_bias, + is_test=True, + ) + ac_op = OpConfig( + "affine_channel", + inputs={ + "X": ["conv_output"], + "Scale": ["affine_channel_scale"], + "Bias": ["affine_channel_bias"], + }, + outputs={"Out": ["affine_channel_ouput"]}, + data_layout=data_format, + ) if has_bias == True: conv2d_op.inputs["Bias"] = ["conv2d_bias"] ops = [conv2d_op, ac_op] @@ -102,19 +110,23 @@ class TestConvAffineChannelFusePass(PassAutoScanTest): "input_data": TensorConfig(data_gen=partial(generate_input)), }, weights={ - "conv2d_weight": - TensorConfig(data_gen=partial(generate_weight)), - "conv2d_bias": - TensorConfig(data_gen=partial(generate_bias)), - "affine_channel_scale": - TensorConfig(data_gen=partial(generate_scale_bias)), - "affine_channel_bias": - TensorConfig(data_gen=partial(generate_scale_bias)), + "conv2d_weight": TensorConfig( + data_gen=partial(generate_weight) + ), + "conv2d_bias": TensorConfig(data_gen=partial(generate_bias)), + "affine_channel_scale": TensorConfig( + data_gen=partial(generate_scale_bias) + ), + "affine_channel_bias": TensorConfig( + data_gen=partial(generate_scale_bias) + ), }, - outputs=["affine_channel_ouput"]) + outputs=["affine_channel_ouput"], + ) if has_bias == True: program_config.weights["conv2d_bias"] = TensorConfig( - data_gen=partial(generate_bias)) + data_gen=partial(generate_bias) + ) return program_config def sample_predictor_configs(self, program_config): @@ -131,18 +143,23 @@ class TestConvAffineChannelFusePass(PassAutoScanTest): # mkldnn Output has diff with bias! def teller2(program_config, predictor_config): - return predictor_config.mkldnn_enabled( - ) and program_config.ops[0].attrs['has_bias'] == True + return ( + predictor_config.mkldnn_enabled() + and program_config.ops[0].attrs['has_bias'] == True + ) self.add_ignore_check_case( - teller1, IgnoreReasons.PASS_ACCURACY_ERROR, + teller1, + IgnoreReasons.PASS_ACCURACY_ERROR, "The output format of conv2d is wrong when data_format attribute is NHWC, \ - because currently its fused op (Conv2DFusion) only supports data format of channel first (NCHW)." + because currently its fused op (Conv2DFusion) only supports data format of channel first (NCHW).", ) self.add_ignore_check_case( - teller2, IgnoreReasons.PASS_ACCURACY_ERROR, - "Currently mkldnn Output has diff with bias!") + teller2, + IgnoreReasons.PASS_ACCURACY_ERROR, + "Currently mkldnn Output has diff with bias!", + ) def test(self): self.run_and_statis( diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_bias_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_bias_fuse_pass.py index 7a1a04ca06f8c2753c791e3cfc1d8d1bfff2b882..6c667ac08db1ca6c46dcba7cd1e438355e12aa3f 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_bias_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_bias_fuse_pass.py @@ -19,22 +19,24 @@ import paddle.fluid as fluid from paddle.fluid.core import PassVersionChecker -#padding SAME +# padding SAME class ConvBiasMkldnnFusePassSamePadTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 3, 100, 100], - dtype="float32") + data = fluid.data( + name="data", shape=[-1, 3, 100, 100], dtype="float32" + ) param_attr = fluid.ParamAttr( initializer=fluid.initializer.Xavier(uniform=False), - learning_rate=0.001) - conv_out = fluid.layers.conv2d(input=data, - num_filters=3, - filter_size=3, - padding="SAME", - bias_attr=param_attr) + learning_rate=0.001, + ) + conv_out = fluid.layers.conv2d( + input=data, + num_filters=3, + filter_size=3, + padding="SAME", + bias_attr=param_attr, + ) self.feeds = { "data": np.random.random((1, 3, 100, 100)).astype("float32") @@ -46,25 +48,28 @@ class ConvBiasMkldnnFusePassSamePadTest(InferencePassTest): use_gpu = False self.check_output_with_option(use_gpu) self.assertTrue( - PassVersionChecker.IsCompatible("conv_bias_mkldnn_fuse_pass")) + PassVersionChecker.IsCompatible("conv_bias_mkldnn_fuse_pass") + ) -#padding VALID +# padding VALID class ConvBiasMkldnnFusePassValidPadTest(ConvBiasMkldnnFusePassSamePadTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 3, 100, 100], - dtype="float32") + data = fluid.data( + name="data", shape=[-1, 3, 100, 100], dtype="float32" + ) param_attr = fluid.ParamAttr( initializer=fluid.initializer.Xavier(uniform=False), - learning_rate=0.001) - conv_out = fluid.layers.conv2d(input=data, - num_filters=3, - filter_size=3, - padding="VALID", - bias_attr=param_attr) + learning_rate=0.001, + ) + conv_out = fluid.layers.conv2d( + input=data, + num_filters=3, + filter_size=3, + padding="VALID", + bias_attr=param_attr, + ) self.feeds = { "data": np.random.random((1, 3, 100, 100)).astype("float32") @@ -73,22 +78,24 @@ class ConvBiasMkldnnFusePassValidPadTest(ConvBiasMkldnnFusePassSamePadTest): self.enable_mkldnn = True -#padding EXPLICT NUMBER +# padding EXPLICT NUMBER class ConvBiasMkldnnFusePassExplictPadTest(ConvBiasMkldnnFusePassSamePadTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 3, 100, 100], - dtype="float32") + data = fluid.data( + name="data", shape=[-1, 3, 100, 100], dtype="float32" + ) param_attr = fluid.ParamAttr( initializer=fluid.initializer.Xavier(uniform=False), - learning_rate=0.001) - conv_out = fluid.layers.conv2d(input=data, - num_filters=3, - filter_size=3, - padding=[2, 4, 6, 8], - bias_attr=param_attr) + learning_rate=0.001, + ) + conv_out = fluid.layers.conv2d( + input=data, + num_filters=3, + filter_size=3, + padding=[2, 4, 6, 8], + bias_attr=param_attr, + ) self.feeds = { "data": np.random.random((1, 3, 100, 100)).astype("float32") @@ -98,24 +105,26 @@ class ConvBiasMkldnnFusePassExplictPadTest(ConvBiasMkldnnFusePassSamePadTest): class ConvBiasMkldnnFusePassGroupTest(ConvBiasMkldnnFusePassSamePadTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 3, 100, 100], - dtype="float32") + data = fluid.data( + name="data", shape=[-1, 3, 100, 100], dtype="float32" + ) param_attr = fluid.ParamAttr( initializer=fluid.initializer.Xavier(uniform=False), - learning_rate=0.001) - conv_out = fluid.layers.conv2d(input=data, - num_filters=3, - filter_size=3, - padding="VALID", - groups=3, - bias_attr=param_attr, - use_cudnn=False, - act="softmax", - data_format="NCHW") + learning_rate=0.001, + ) + conv_out = fluid.layers.conv2d( + input=data, + num_filters=3, + filter_size=3, + padding="VALID", + groups=3, + bias_attr=param_attr, + use_cudnn=False, + act="softmax", + data_format="NCHW", + ) self.feeds = { "data": np.random.random((1, 3, 100, 100)).astype("float32") @@ -125,26 +134,29 @@ class ConvBiasMkldnnFusePassGroupTest(ConvBiasMkldnnFusePassSamePadTest): class ConvBiasMkldnnFusePassDialtionsGroupsTest( - ConvBiasMkldnnFusePassSamePadTest): - + ConvBiasMkldnnFusePassSamePadTest +): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 3, 100, 100], - dtype="float32") + data = fluid.data( + name="data", shape=[-1, 3, 100, 100], dtype="float32" + ) param_attr = fluid.ParamAttr( initializer=fluid.initializer.Xavier(uniform=False), - learning_rate=0.001) - conv_out = fluid.layers.conv2d(input=data, - num_filters=3, - filter_size=3, - padding="VALID", - dilation=2, - groups=3, - bias_attr=param_attr, - use_cudnn=False, - act="softmax", - data_format="NCHW") + learning_rate=0.001, + ) + conv_out = fluid.layers.conv2d( + input=data, + num_filters=3, + filter_size=3, + padding="VALID", + dilation=2, + groups=3, + bias_attr=param_attr, + use_cudnn=False, + act="softmax", + data_format="NCHW", + ) self.feeds = { "data": np.random.random((1, 3, 100, 100)).astype("float32") @@ -154,20 +166,22 @@ class ConvBiasMkldnnFusePassDialtionsGroupsTest( class ConvTransposeMkldnnFusePassDialtionsGroupsTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data(name="data", shape=[-1, 3, 5, 5], dtype="float32") param_attr = fluid.ParamAttr( initializer=fluid.initializer.Xavier(uniform=False), - learning_rate=0.001) - conv_out = fluid.layers.conv2d_transpose(input=data, - num_filters=3, - filter_size=3, - padding="SAME", - dilation=1, - bias_attr=param_attr, - use_cudnn=False) + learning_rate=0.001, + ) + conv_out = fluid.layers.conv2d_transpose( + input=data, + num_filters=3, + filter_size=3, + padding="SAME", + dilation=1, + bias_attr=param_attr, + use_cudnn=False, + ) self.feeds = {"data": np.random.random((1, 3, 5, 5)).astype("float32")} self.fetch_list = [conv_out] @@ -178,7 +192,9 @@ class ConvTransposeMkldnnFusePassDialtionsGroupsTest(InferencePassTest): self.check_output_with_option(use_gpu) self.assertTrue( PassVersionChecker.IsCompatible( - "conv_transpose_bias_mkldnn_fuse_pass")) + "conv_transpose_bias_mkldnn_fuse_pass" + ) + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_concat_relu_mkldnn_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_concat_relu_mkldnn_fuse_pass.py index a5d2738869fe7e2c6fdc111db7e2d7a0d0f958e2..2d76d2846e9ae248e72da0265e68edc0e5356137 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_concat_relu_mkldnn_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_concat_relu_mkldnn_fuse_pass.py @@ -21,7 +21,6 @@ import hypothesis.strategies as st class TestConvConcatActivationMkldnnFusePass(PassAutoScanTest): - def sample_program_config(self, draw): data_format = draw(st.sampled_from(['NCHW', 'NHWC'])) dilations = draw(st.sampled_from([[2, 2]])) @@ -31,11 +30,24 @@ class TestConvConcatActivationMkldnnFusePass(PassAutoScanTest): strides = draw(st.sampled_from([[1, 2]])) axis = draw(st.sampled_from([0])) activation_type = draw( - st.sampled_from([ - 'relu', 'gelu', 'swish', 'mish', 'sqrt', 'hard_swish', - 'sigmoid', 'abs', 'relu6', 'clip', 'tanh', 'hard_sigmoid', - 'leaky_relu' - ])) + st.sampled_from( + [ + 'relu', + 'gelu', + 'swish', + 'mish', + 'sqrt', + 'hard_swish', + 'sigmoid', + 'abs', + 'relu6', + 'clip', + 'tanh', + 'hard_sigmoid', + 'leaky_relu', + ] + ) + ) def generate_data(input_type): if input_type == 'NCHW': @@ -43,94 +55,103 @@ class TestConvConcatActivationMkldnnFusePass(PassAutoScanTest): elif input_type == 'NHWC': return np.random.random([16, 64, 64, 48]).astype(np.float32) elif input_type == 'weights': - return np.random.random([16, int(48 / groups), 3, - 3]).astype(np.float32) - - conv2d_op1 = OpConfig(type='conv2d', - inputs={ - 'Input': ['conv_input_1'], - 'Filter': ['conv_weights_1'] - }, - outputs={'Output': ['conv_output_1']}, - attrs={ - 'data_format': data_format, - 'dilations': dilations, - 'padding_algorithm': padding_algorithm, - 'groups': groups, - 'paddings': paddings, - 'strides': strides - }) - - conv2d_op2 = OpConfig(type='conv2d', - inputs={ - 'Input': ['conv_input_2'], - 'Filter': ['conv_weights_2'] - }, - outputs={'Output': ['conv_output_2']}, - attrs={ - 'data_format': data_format, - 'dilations': dilations, - 'padding_algorithm': padding_algorithm, - 'groups': groups, - 'paddings': paddings, - 'strides': strides - }) + return np.random.random([16, int(48 / groups), 3, 3]).astype( + np.float32 + ) + + conv2d_op1 = OpConfig( + type='conv2d', + inputs={'Input': ['conv_input_1'], 'Filter': ['conv_weights_1']}, + outputs={'Output': ['conv_output_1']}, + attrs={ + 'data_format': data_format, + 'dilations': dilations, + 'padding_algorithm': padding_algorithm, + 'groups': groups, + 'paddings': paddings, + 'strides': strides, + }, + ) + + conv2d_op2 = OpConfig( + type='conv2d', + inputs={'Input': ['conv_input_2'], 'Filter': ['conv_weights_2']}, + outputs={'Output': ['conv_output_2']}, + attrs={ + 'data_format': data_format, + 'dilations': dilations, + 'padding_algorithm': padding_algorithm, + 'groups': groups, + 'paddings': paddings, + 'strides': strides, + }, + ) - concat_op = OpConfig(type='concat', - inputs={'X': ['conv_output_1', 'conv_output_2']}, - outputs={'Out': ['concat_output']}, - attrs={'axis': axis}) + concat_op = OpConfig( + type='concat', + inputs={'X': ['conv_output_1', 'conv_output_2']}, + outputs={'Out': ['concat_output']}, + attrs={'axis': axis}, + ) if activation_type == 'relu6': - activation_op = OpConfig(activation_type, - inputs={'X': ['concat_output']}, - outputs={'Out': ['activation_output']}, - threshold=draw( - st.floats(min_value=1.0, - max_value=10.0))) + activation_op = OpConfig( + activation_type, + inputs={'X': ['concat_output']}, + outputs={'Out': ['activation_output']}, + threshold=draw(st.floats(min_value=1.0, max_value=10.0)), + ) elif activation_type == 'leaky_relu': - activation_op = OpConfig(activation_type, - inputs={'X': ['concat_output']}, - outputs={'Out': ['activation_output']}, - alpha=draw( - st.floats(min_value=0.1, - max_value=1.0))) + activation_op = OpConfig( + activation_type, + inputs={'X': ['concat_output']}, + outputs={'Out': ['activation_output']}, + alpha=draw(st.floats(min_value=0.1, max_value=1.0)), + ) elif activation_type == 'swish': - activation_op = OpConfig(activation_type, - inputs={'X': ['concat_output']}, - outputs={'Out': ['activation_output']}, - beta=draw( - st.floats(min_value=0.1, - max_value=1.0))) + activation_op = OpConfig( + activation_type, + inputs={'X': ['concat_output']}, + outputs={'Out': ['activation_output']}, + beta=draw(st.floats(min_value=0.1, max_value=1.0)), + ) elif activation_type == 'clip': activation_op = OpConfig( activation_type, inputs={'X': ['concat_output']}, outputs={'Out': ['activation_output']}, min=draw(st.floats(min_value=0.1, max_value=0.49)), - max=draw(st.floats(min_value=0.5, max_value=1.0))) + max=draw(st.floats(min_value=0.5, max_value=1.0)), + ) else: - activation_op = OpConfig(activation_type, - inputs={'X': ['concat_output']}, - outputs={'Out': ['activation_output']}) + activation_op = OpConfig( + activation_type, + inputs={'X': ['concat_output']}, + outputs={'Out': ['activation_output']}, + ) model_net = [conv2d_op1, conv2d_op2, concat_op, activation_op] program_config = ProgramConfig( ops=model_net, inputs={ - 'conv_input_1': - TensorConfig(data_gen=partial(generate_data, data_format)), - 'conv_input_2': - TensorConfig(data_gen=partial(generate_data, data_format)) + 'conv_input_1': TensorConfig( + data_gen=partial(generate_data, data_format) + ), + 'conv_input_2': TensorConfig( + data_gen=partial(generate_data, data_format) + ), }, weights={ - 'conv_weights_1': - TensorConfig(data_gen=partial(generate_data, 'weights')), - 'conv_weights_2': - TensorConfig(data_gen=partial(generate_data, 'weights')) + 'conv_weights_1': TensorConfig( + data_gen=partial(generate_data, 'weights') + ), + 'conv_weights_2': TensorConfig( + data_gen=partial(generate_data, 'weights') + ), }, - outputs=['activation_output']) + outputs=['activation_output'], + ) return program_config @@ -139,8 +160,9 @@ class TestConvConcatActivationMkldnnFusePass(PassAutoScanTest): yield config, ['conv2d', 'conv2d', 'concat'], (1e-5, 1e-5) def test(self): - self.run_and_statis(quant=False, - passes=['conv_activation_mkldnn_fuse_pass']) + self.run_and_statis( + quant=False, passes=['conv_activation_mkldnn_fuse_pass'] + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_elementwise_add_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_elementwise_add_fuse_pass.py index 3d0ca34a5e005ac7e150e6753bb137a680ee316a..edf8599442f169cee146e4d1b234995d4b498bc5 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_elementwise_add_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_elementwise_add_fuse_pass.py @@ -23,7 +23,6 @@ import hypothesis.strategies as st # the two inputs of elementwise_add are tensor class TestConvElementwiseAddMkldnnFusePass(PassAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: attrs = [ program_config.ops[i].attrs for i in range(len(program_config.ops)) @@ -46,58 +45,60 @@ class TestConvElementwiseAddMkldnnFusePass(PassAutoScanTest): def generate_input(): if data_format == "NCHW": - return np.random.random([batch_size, 48, 64, - 64]).astype(np.float32) + return np.random.random([batch_size, 48, 64, 64]).astype( + np.float32 + ) else: - return np.random.random([batch_size, 64, 64, - 48]).astype(np.float32) + return np.random.random([batch_size, 64, 64, 48]).astype( + np.float32 + ) def generate_weight(): - return np.random.random([48, int(48 / groups), 3, - 3]).astype(np.float32) - - relu_op = OpConfig(type="relu", - inputs={"X": ["input_data"]}, - outputs={"Out": ["relu_out"]}, - attrs={}) - - conv2d_op1 = OpConfig(type="conv2d", - inputs={ - "Input": ["relu_out"], - "Filter": ["conv_weight1"] - }, - outputs={"Output": ["conv_output1"]}, - attrs={ - "data_format": data_format, - "dilations": dilations, - "padding_algorithm": padding_algorithm, - "groups": groups, - "paddings": paddings, - "strides": strides - }) - - conv2d_op2 = OpConfig(type="conv2d", - inputs={ - "Input": ["input_data"], - "Filter": ["conv_weight2"] - }, - outputs={"Output": ["conv_output2"]}, - attrs={ - "data_format": data_format, - "dilations": dilations, - "padding_algorithm": padding_algorithm, - "groups": groups, - "paddings": paddings, - "strides": strides - }) - - elt_op = OpConfig(type="elementwise_add", - inputs={ - "X": ["conv_output1"], - "Y": ["conv_output2"] - }, - outputs={"Out": ["elementwise_output"]}, - attrs={'axis': axis}) + return np.random.random([48, int(48 / groups), 3, 3]).astype( + np.float32 + ) + + relu_op = OpConfig( + type="relu", + inputs={"X": ["input_data"]}, + outputs={"Out": ["relu_out"]}, + attrs={}, + ) + + conv2d_op1 = OpConfig( + type="conv2d", + inputs={"Input": ["relu_out"], "Filter": ["conv_weight1"]}, + outputs={"Output": ["conv_output1"]}, + attrs={ + "data_format": data_format, + "dilations": dilations, + "padding_algorithm": padding_algorithm, + "groups": groups, + "paddings": paddings, + "strides": strides, + }, + ) + + conv2d_op2 = OpConfig( + type="conv2d", + inputs={"Input": ["input_data"], "Filter": ["conv_weight2"]}, + outputs={"Output": ["conv_output2"]}, + attrs={ + "data_format": data_format, + "dilations": dilations, + "padding_algorithm": padding_algorithm, + "groups": groups, + "paddings": paddings, + "strides": strides, + }, + ) + + elt_op = OpConfig( + type="elementwise_add", + inputs={"X": ["conv_output1"], "Y": ["conv_output2"]}, + outputs={"Out": ["elementwise_output"]}, + attrs={'axis': axis}, + ) model_net = [relu_op, conv2d_op1, conv2d_op2, elt_op] @@ -105,12 +106,13 @@ class TestConvElementwiseAddMkldnnFusePass(PassAutoScanTest): ops=model_net, weights={ "conv_weight1": TensorConfig(data_gen=partial(generate_weight)), - "conv_weight2": TensorConfig(data_gen=partial(generate_weight)) + "conv_weight2": TensorConfig(data_gen=partial(generate_weight)), }, inputs={ "input_data": TensorConfig(data_gen=partial(generate_input)) }, - outputs=["elementwise_output"]) + outputs=["elementwise_output"], + ) return program_config @@ -119,8 +121,9 @@ class TestConvElementwiseAddMkldnnFusePass(PassAutoScanTest): yield config, ["relu", "conv2d", "conv2d"], (1e-5, 1e-5) def test(self): - self.run_and_statis(quant=False, - passes=["conv_elementwise_add_mkldnn_fuse_pass"]) + self.run_and_statis( + quant=False, passes=["conv_elementwise_add_mkldnn_fuse_pass"] + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_gelu_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_gelu_fuse_pass.py index 3abb582365baa29d6d4bc38a16b8e549f224afa4..8094f323a904762c97a134cb95d1f58cb03a4258 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_gelu_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_gelu_fuse_pass.py @@ -23,7 +23,6 @@ import hypothesis.strategies as st class TestConvGeluMkldnnFusePass(PassAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True @@ -39,45 +38,45 @@ class TestConvGeluMkldnnFusePass(PassAutoScanTest): def generate_input(): if data_format == "NCHW": - return np.random.random([batch_size, 48, 64, - 64]).astype(np.float32) + return np.random.random([batch_size, 48, 64, 64]).astype( + np.float32 + ) else: - return np.random.random([batch_size, 64, 64, - 48]).astype(np.float32) + return np.random.random([batch_size, 64, 64, 48]).astype( + np.float32 + ) def generate_weight(): - return np.random.random([16, int(48 / groups), 3, - 3]).astype(np.float32) - - ops_config = [{ - "op_type": "conv2d", - "op_inputs": { - "Input": ["input_data"], - "Filter": ["input_weight"] - }, - "op_outputs": { - "Output": ["conv_output"] - }, - "op_attrs": { - "data_format": data_format, - "dilations": dilations, - "padding_algorithm": padding_algorithm, - "groups": groups, - "paddings": paddings, - "strides": strides - } - }, { - "op_type": "gelu", - "op_inputs": { - "X": ["conv_output"] - }, - "op_outputs": { - "Out": ["gelu_output"] + return np.random.random([16, int(48 / groups), 3, 3]).astype( + np.float32 + ) + + ops_config = [ + { + "op_type": "conv2d", + "op_inputs": { + "Input": ["input_data"], + "Filter": ["input_weight"], + }, + "op_outputs": {"Output": ["conv_output"]}, + "op_attrs": { + "data_format": data_format, + "dilations": dilations, + "padding_algorithm": padding_algorithm, + "groups": groups, + "paddings": paddings, + "strides": strides, + }, }, - "op_attrs": { - "approximate": approximate, + { + "op_type": "gelu", + "op_inputs": {"X": ["conv_output"]}, + "op_outputs": {"Out": ["gelu_output"]}, + "op_attrs": { + "approximate": approximate, + }, }, - }] + ] ops = self.generate_op_config(ops_config) @@ -89,7 +88,8 @@ class TestConvGeluMkldnnFusePass(PassAutoScanTest): inputs={ "input_data": TensorConfig(data_gen=partial(generate_input)), }, - outputs=["gelu_output"]) + outputs=["gelu_output"], + ) return program_config @@ -98,8 +98,9 @@ class TestConvGeluMkldnnFusePass(PassAutoScanTest): yield config, ["conv2d"], (1e-5, 1e-5) def test(self): - self.run_and_statis(quant=False, - passes=["conv_activation_mkldnn_fuse_pass"]) + self.run_and_statis( + quant=False, passes=["conv_activation_mkldnn_fuse_pass"] + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_hard_sigmoid_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_hard_sigmoid_fuse_pass.py index 010d3e8f3aa86b7956a17a25cc057bfceae066ad..2b76ba180f5caf8d04ede6ae35e53538a3b74e82 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_hard_sigmoid_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_hard_sigmoid_fuse_pass.py @@ -22,7 +22,6 @@ import hypothesis.strategies as st class TestConvHardSigmoidMkldnnFusePass(PassAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True @@ -39,46 +38,43 @@ class TestConvHardSigmoidMkldnnFusePass(PassAutoScanTest): def generate_input(): if data_format == "NCHW": - return np.random.random([batch_size, 48, 64, - 64]).astype(np.float32) + return np.random.random([batch_size, 48, 64, 64]).astype( + np.float32 + ) else: - return np.random.random([batch_size, 64, 64, - 48]).astype(np.float32) + return np.random.random([batch_size, 64, 64, 48]).astype( + np.float32 + ) def generate_weight(): - return np.random.random([16, int(48 / groups), 3, - 3]).astype(np.float32) - - ops_config = [{ - "op_type": "conv2d", - "op_inputs": { - "Input": ["input_data"], - "Filter": ["input_weight"] - }, - "op_outputs": { - "Output": ["conv_output"] - }, - "op_attrs": { - "data_format": data_format, - "dilations": dilations, - "padding_algorithm": padding_algorithm, - "groups": groups, - "paddings": paddings, - "strides": strides - } - }, { - "op_type": "hard_sigmoid", - "op_inputs": { - "X": ["conv_output"] - }, - "op_outputs": { - "Out": ["sigmoid_output"] + return np.random.random([16, int(48 / groups), 3, 3]).astype( + np.float32 + ) + + ops_config = [ + { + "op_type": "conv2d", + "op_inputs": { + "Input": ["input_data"], + "Filter": ["input_weight"], + }, + "op_outputs": {"Output": ["conv_output"]}, + "op_attrs": { + "data_format": data_format, + "dilations": dilations, + "padding_algorithm": padding_algorithm, + "groups": groups, + "paddings": paddings, + "strides": strides, + }, }, - "op_attrs": { - "slope": slope, - "offset": offset + { + "op_type": "hard_sigmoid", + "op_inputs": {"X": ["conv_output"]}, + "op_outputs": {"Out": ["sigmoid_output"]}, + "op_attrs": {"slope": slope, "offset": offset}, }, - }] + ] ops = self.generate_op_config(ops_config) @@ -90,7 +86,8 @@ class TestConvHardSigmoidMkldnnFusePass(PassAutoScanTest): inputs={ "input_data": TensorConfig(data_gen=partial(generate_input)), }, - outputs=["sigmoid_output"]) + outputs=["sigmoid_output"], + ) return program_config @@ -99,8 +96,9 @@ class TestConvHardSigmoidMkldnnFusePass(PassAutoScanTest): yield config, ["conv2d"], (1e-5, 1e-5) def test(self): - self.run_and_statis(quant=False, - passes=["conv_activation_mkldnn_fuse_pass"]) + self.run_and_statis( + quant=False, passes=["conv_activation_mkldnn_fuse_pass"] + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_hard_swish_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_hard_swish_fuse_pass.py index 799d1cbaa1c9feab147a24ae42782387ab37e352..417fb95c72a1dc04fac73e571dd316ec227353d9 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_hard_swish_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_hard_swish_fuse_pass.py @@ -22,7 +22,6 @@ import hypothesis.strategies as st class TestConvHardSwishMkldnnFusePass(PassAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True @@ -40,47 +39,47 @@ class TestConvHardSwishMkldnnFusePass(PassAutoScanTest): def generate_input(): if data_format == "NCHW": - return np.random.random([batch_size, 48, 64, - 64]).astype(np.float32) + return np.random.random([batch_size, 48, 64, 64]).astype( + np.float32 + ) else: - return np.random.random([batch_size, 64, 64, - 48]).astype(np.float32) + return np.random.random([batch_size, 64, 64, 48]).astype( + np.float32 + ) def generate_weight(): - return np.random.random([16, int(48 / groups), 3, - 3]).astype(np.float32) - - ops_config = [{ - "op_type": "conv2d", - "op_inputs": { - "Input": ["input_data"], - "Filter": ["input_weight"] - }, - "op_outputs": { - "Output": ["conv_output"] - }, - "op_attrs": { - "data_format": data_format, - "dilations": dilations, - "padding_algorithm": padding_algorithm, - "groups": groups, - "paddings": paddings, - "strides": strides - } - }, { - "op_type": "hard_swish", - "op_inputs": { - "X": ["conv_output"] - }, - "op_outputs": { - "Out": ["swish_output"] + return np.random.random([16, int(48 / groups), 3, 3]).astype( + np.float32 + ) + + ops_config = [ + { + "op_type": "conv2d", + "op_inputs": { + "Input": ["input_data"], + "Filter": ["input_weight"], + }, + "op_outputs": {"Output": ["conv_output"]}, + "op_attrs": { + "data_format": data_format, + "dilations": dilations, + "padding_algorithm": padding_algorithm, + "groups": groups, + "paddings": paddings, + "strides": strides, + }, }, - "op_attrs": { - "threshold": threshold, - "scale": scale, - "offset": offset + { + "op_type": "hard_swish", + "op_inputs": {"X": ["conv_output"]}, + "op_outputs": {"Out": ["swish_output"]}, + "op_attrs": { + "threshold": threshold, + "scale": scale, + "offset": offset, + }, }, - }] + ] ops = self.generate_op_config(ops_config) @@ -92,7 +91,8 @@ class TestConvHardSwishMkldnnFusePass(PassAutoScanTest): inputs={ "input_data": TensorConfig(data_gen=partial(generate_input)), }, - outputs=["swish_output"]) + outputs=["swish_output"], + ) return program_config @@ -101,8 +101,9 @@ class TestConvHardSwishMkldnnFusePass(PassAutoScanTest): yield config, ["conv2d"], (1e-5, 1e-5) def test(self): - self.run_and_statis(quant=False, - passes=["conv_activation_mkldnn_fuse_pass"]) + self.run_and_statis( + quant=False, passes=["conv_activation_mkldnn_fuse_pass"] + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_mish_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_mish_fuse_pass.py index 6f5810cb802d9f677a547eee9f8a3506d17050a6..93f58741ba8c6c6e5f688a7f6a6f0fb1cd6c6444 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_mish_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_mish_fuse_pass.py @@ -21,7 +21,6 @@ import hypothesis.strategies as st class TestConvMishMkldnnFusePass(PassAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: attrs = [op.attrs for op in program_config.ops] # If the problem has been fixed, the judgment @@ -42,43 +41,43 @@ class TestConvMishMkldnnFusePass(PassAutoScanTest): def generate_input(): if data_format == "NCHW": - return np.random.random([batch_size, 48, 64, - 64]).astype(np.float32) + return np.random.random([batch_size, 48, 64, 64]).astype( + np.float32 + ) else: - return np.random.random([batch_size, 64, 64, - 48]).astype(np.float32) + return np.random.random([batch_size, 64, 64, 48]).astype( + np.float32 + ) def generate_weight(): - return np.random.random([16, int(48 / groups), 3, - 3]).astype(np.float32) - - ops_config = [{ - "op_type": "conv2d", - "op_inputs": { - "Input": ["input_data"], - "Filter": ["input_weight"] - }, - "op_outputs": { - "Output": ["conv_output"] - }, - "op_attrs": { - "data_format": data_format, - "dilations": dilations, - "padding_algorithm": padding_algorithm, - "groups": groups, - "paddings": paddings, - "strides": strides - } - }, { - "op_type": "mish", - "op_inputs": { - "X": ["conv_output"] + return np.random.random([16, int(48 / groups), 3, 3]).astype( + np.float32 + ) + + ops_config = [ + { + "op_type": "conv2d", + "op_inputs": { + "Input": ["input_data"], + "Filter": ["input_weight"], + }, + "op_outputs": {"Output": ["conv_output"]}, + "op_attrs": { + "data_format": data_format, + "dilations": dilations, + "padding_algorithm": padding_algorithm, + "groups": groups, + "paddings": paddings, + "strides": strides, + }, }, - "op_outputs": { - "Out": ["mish_output"] + { + "op_type": "mish", + "op_inputs": {"X": ["conv_output"]}, + "op_outputs": {"Out": ["mish_output"]}, + "op_attrs": {}, }, - "op_attrs": {}, - }] + ] ops = self.generate_op_config(ops_config) @@ -90,7 +89,8 @@ class TestConvMishMkldnnFusePass(PassAutoScanTest): inputs={ "input_data": TensorConfig(data_gen=partial(generate_input)), }, - outputs=["mish_output"]) + outputs=["mish_output"], + ) return program_config @@ -99,8 +99,9 @@ class TestConvMishMkldnnFusePass(PassAutoScanTest): yield config, ["conv2d"], (1e-5, 1e-5) def test(self): - self.run_and_statis(quant=False, - passes=["conv_activation_mkldnn_fuse_pass"]) + self.run_and_statis( + quant=False, passes=["conv_activation_mkldnn_fuse_pass"] + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_transpose_bias_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_transpose_bias_fuse_pass.py index 2eebd529554d3b79b2edeae7a559623fc44fc9e2..01de12431489560604a0d36cfb184982f488a8a5 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_transpose_bias_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_transpose_bias_fuse_pass.py @@ -22,7 +22,6 @@ import hypothesis.strategies as st class TestConvTransposeMkldnnFusePass(PassAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: attrs = [ program_config.ops[i].attrs for i in range(len(program_config.ops)) @@ -47,11 +46,13 @@ class TestConvTransposeMkldnnFusePass(PassAutoScanTest): def generate_input(): if data_format == "NCHW": - return np.random.random([batch_size, 16, 64, - 64]).astype(np.float32) + return np.random.random([batch_size, 16, 64, 64]).astype( + np.float32 + ) else: - return np.random.random([batch_size, 64, 64, - 16]).astype(np.float32) + return np.random.random([batch_size, 64, 64, 16]).astype( + np.float32 + ) def generate_weight1(): return np.random.random([16, 16, 3, 3]).astype(np.float32) @@ -59,46 +60,47 @@ class TestConvTransposeMkldnnFusePass(PassAutoScanTest): def generate_weight2(): return np.random.random([16 * groups]).astype(np.float32) - conv2d_op = OpConfig(type="conv2d_transpose", - inputs={ - "Input": ["input_data"], - "Filter": ["conv2d_weight"] - }, - outputs={"Output": ["conv_output"]}, - attrs={ - "data_format": data_format, - "dilations": dilations, - "padding_algorithm": padding_algorithm, - "groups": groups, - "paddings": paddings, - "strides": strides, - "output_size": [], - "output_padding": [], - "is_test": True - }) - - elt_op = OpConfig(type="elementwise_add", - inputs={ - "X": ["conv_output"], - "Y": ["elementwise_weight"] - }, - outputs={"Out": ["elementwise_output"]}, - attrs={'axis': axis}) + conv2d_op = OpConfig( + type="conv2d_transpose", + inputs={"Input": ["input_data"], "Filter": ["conv2d_weight"]}, + outputs={"Output": ["conv_output"]}, + attrs={ + "data_format": data_format, + "dilations": dilations, + "padding_algorithm": padding_algorithm, + "groups": groups, + "paddings": paddings, + "strides": strides, + "output_size": [], + "output_padding": [], + "is_test": True, + }, + ) + + elt_op = OpConfig( + type="elementwise_add", + inputs={"X": ["conv_output"], "Y": ["elementwise_weight"]}, + outputs={"Out": ["elementwise_output"]}, + attrs={'axis': axis}, + ) model_net = [conv2d_op, elt_op] program_config = ProgramConfig( ops=model_net, weights={ - "conv2d_weight": - TensorConfig(data_gen=partial(generate_weight1)), - "elementwise_weight": - TensorConfig(data_gen=partial(generate_weight2)) + "conv2d_weight": TensorConfig( + data_gen=partial(generate_weight1) + ), + "elementwise_weight": TensorConfig( + data_gen=partial(generate_weight2) + ), }, inputs={ "input_data": TensorConfig(data_gen=partial(generate_input)) }, - outputs=["elementwise_output"]) + outputs=["elementwise_output"], + ) return program_config @@ -107,9 +109,11 @@ class TestConvTransposeMkldnnFusePass(PassAutoScanTest): yield config, ['conv2d_transpose'], (1e-5, 1e-5) def test(self): - self.run_and_statis(quant=False, - max_duration=300, - passes=["conv_transpose_bias_mkldnn_fuse_pass"]) + self.run_and_statis( + quant=False, + max_duration=300, + passes=["conv_transpose_bias_mkldnn_fuse_pass"], + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_cpu_bfloat16_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_cpu_bfloat16_pass.py index 95996f22a86d79b2ff72662ee4c9e6d3926e1e21..2cd9cbcb05700f48e5bf06293575dd19844f5033 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_cpu_bfloat16_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_cpu_bfloat16_pass.py @@ -20,20 +20,20 @@ from paddle.fluid.core import PassVersionChecker class TestMKLDNNCpuBfloat16Pass(InferencePassTest): - def setUp(self): self.init_data() with fluid.program_guard(self.main_program, self.startup_program): - x = fluid.data(name='x', - shape=[-1] + self.shape_x, - dtype=self.d_type) + x = fluid.data( + name='x', shape=[-1] + self.shape_x, dtype=self.d_type + ) out = fluid.layers.transpose(x, perm=[0, 1, 2, 3]) out = fluid.layers.reshape(out, [0, 0, 0, 0]) out = fluid.layers.fc(out, size=1) self.feeds = { - "x": - np.random.random([self.bs] + self.shape_x).astype(self.d_type) + "x": np.random.random([self.bs] + self.shape_x).astype( + self.d_type + ) } self.fetch_list = [out] diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_depthwise_conv_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_depthwise_conv_pass.py index 736c5fca7f8f09cb7aa067875c9528782da0bc4c..3a9f520f1c7663a312c665a6f65286cb8948f127 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_depthwise_conv_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_depthwise_conv_pass.py @@ -42,24 +42,29 @@ class DepthwiseConvMKLDNNPass(PassAutoScanTest): random_groups = draw(st.integers(min_value=1, max_value=3)) random_dilations = draw( - st.lists(st.integers(min_value=1, max_value=3), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=3), min_size=2, max_size=2 + ) + ) random_strides = draw( - st.lists(st.integers(min_value=1, max_value=4), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=4), min_size=2, max_size=2 + ) + ) random_paddings = draw( - st.lists(st.integers(min_value=0, max_value=4), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=0, max_value=4), min_size=2, max_size=2 + ) + ) random_padding_algorithm = draw( - st.sampled_from(["EXPLICIT", "SAME", "VALID"])) + st.sampled_from(["EXPLICIT", "SAME", "VALID"]) + ) random_data_layout = draw(st.sampled_from(["NCHW", "NHWC"])) random_filter = draw( - st.lists(st.integers(min_value=1, max_value=4), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=4), min_size=2, max_size=2 + ) + ) def generate_conv2d_Input(): shape = [random_input_dim1, random_input_dim2] @@ -78,23 +83,25 @@ class DepthwiseConvMKLDNNPass(PassAutoScanTest): return np.random.random(shape).astype(np.float32) # define op - conv2d_op = OpConfig(type="depthwise_conv2d", - inputs={ - "Input": ["conv2d_Input"], - "Filter": ["conv2d_Filter"], - }, - outputs={ - "Output": ["conv2d_Out"], - }, - attrs={ - 'groups': random_groups, - 'dilations': random_dilations, - 'strides': random_strides, - 'paddings': random_paddings, - 'padding_algorithm': random_padding_algorithm, - 'data_format': random_data_layout, - 'use_mkldnn': True, - }) + conv2d_op = OpConfig( + type="depthwise_conv2d", + inputs={ + "Input": ["conv2d_Input"], + "Filter": ["conv2d_Filter"], + }, + outputs={ + "Output": ["conv2d_Out"], + }, + attrs={ + 'groups': random_groups, + 'dilations': random_dilations, + 'strides': random_strides, + 'paddings': random_paddings, + 'padding_algorithm': random_padding_algorithm, + 'data_format': random_data_layout, + 'use_mkldnn': True, + }, + ) # define model_net model_net = [conv2d_op] @@ -108,7 +115,8 @@ class DepthwiseConvMKLDNNPass(PassAutoScanTest): weights={ "conv2d_Filter": TensorConfig(data_gen=generate_conv2d_Filter), }, - outputs=["conv2d_Out"]) + outputs=["conv2d_Out"], + ) return program_config @@ -128,13 +136,13 @@ class DepthwiseConvMKLDNNPass(PassAutoScanTest): return True def add_ignore_pass_case(self): - def teller1(program_config, predictor_config): if program_config.ops[0].attrs['data_format'] == "NHWC": return True return False self.add_ignore_check_case( - teller1, IgnoreReasons.PASS_ACCURACY_ERROR, - "The output format of depthwise_conv2d is wrong when data_format attribute is NHWC" + teller1, + IgnoreReasons.PASS_ACCURACY_ERROR, + "The output format of depthwise_conv2d is wrong when data_format attribute is NHWC", ) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass.py index 0063fea59a7a13e66bc88060546cab017e781bc9..c9647ec60b5cb0c69822be909445ed884f5e4abf 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass.py @@ -28,12 +28,12 @@ class ElementwiseActivationMkldnnFusePassTest(InferencePassTest): def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data_A = fluid.data(name="data_A", - shape=[-1, 3, 100, 100], - dtype="float32") - data_B = fluid.data(name="data_B", - shape=[-1, 3, 100, 100], - dtype="float32") + data_A = fluid.data( + name="data_A", shape=[-1, 3, 100, 100], dtype="float32" + ) + data_B = fluid.data( + name="data_B", shape=[-1, 3, 100, 100], dtype="float32" + ) elt_out = self.operand(data_A, data_B) if self.act is not None: if self.act_beta is not None: @@ -45,7 +45,7 @@ class ElementwiseActivationMkldnnFusePassTest(InferencePassTest): self.feeds = { "data_A": np.random.random((1, 3, 100, 100)).astype("float32"), - "data_B": np.random.random((1, 3, 100, 100)).astype("float32") + "data_B": np.random.random((1, 3, 100, 100)).astype("float32"), } self.fetch_list = [elt_out] self.enable_mkldnn = True @@ -63,24 +63,24 @@ class ElementwiseActivationMkldnnFusePassTest(InferencePassTest): class ElementwiseActivationMkldnnFusePassTest_Add_Relu( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_add self.act = fluid.layers.relu class ElementwiseActivationMkldnnFusePassTest_Add_Tanh( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_add self.act = fluid.layers.tanh class ElementwiseActivationMkldnnFusePassTest_Add_LeakyRelu( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_add self.act_alpha = 0.2 @@ -88,8 +88,8 @@ class ElementwiseActivationMkldnnFusePassTest_Add_LeakyRelu( class ElementwiseActivationMkldnnFusePassTest_Add_Swish( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_add self.act_alpha = 4 @@ -97,32 +97,32 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Swish( class ElementwiseActivationMkldnnFusePassTest_Add_HardSwish( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_add self.act = fluid.layers.hard_swish class ElementwiseActivationMkldnnFusePassTest_Add_SQRT( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_add self.act = fluid.layers.sqrt class ElementwiseActivationMkldnnFusePassTest_Add_ABS( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_add self.act = fluid.layers.abs class ElementwiseActivationMkldnnFusePassTest_Add_Clip( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_add self.act = fluid.layers.clip @@ -131,16 +131,16 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Clip( class ElementwiseActivationMkldnnFusePassTest_Add_Gelu( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_add self.act = fluid.layers.gelu class ElementwiseActivationMkldnnFusePassTest_Add_Gelu_Tanh( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_add self.act = fluid.layers.gelu @@ -148,8 +148,8 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Gelu_Tanh( class ElementwiseActivationMkldnnFusePassTest_Add_Relu6( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_add self.act = fluid.layers.relu6 @@ -157,32 +157,32 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Relu6( class ElementwiseActivationMkldnnFusePassTest_Add_Sigmoid( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_add self.act = fluid.layers.sigmoid class ElementwiseActivationMkldnnFusePassTest_Sub_Relu( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_sub self.act = fluid.layers.relu class ElementwiseActivationMkldnnFusePassTest_Sub_Tanh( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_sub self.act = fluid.layers.tanh class ElementwiseActivationMkldnnFusePassTest_Sub_LeakyRelu( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_sub self.act_alpha = 0.2 @@ -190,32 +190,32 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_LeakyRelu( class ElementwiseActivationMkldnnFusePassTest_Sub_Swish( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_sub self.act = fluid.layers.swish class ElementwiseActivationMkldnnFusePassTest_Sub_HardSwish( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_sub self.act = fluid.layers.hard_swish class ElementwiseActivationMkldnnFusePassTest_Sub_ABS( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_sub self.act = fluid.layers.abs class ElementwiseActivationMkldnnFusePassTest_Sub_Clip( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_sub self.act = fluid.layers.clip @@ -224,16 +224,16 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Clip( class ElementwiseActivationMkldnnFusePassTest_Sub_Gelu( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_sub self.act = fluid.layers.gelu class ElementwiseActivationMkldnnFusePassTest_Sub_Gelu_Tanh( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_sub self.act = fluid.layers.gelu @@ -241,8 +241,8 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Gelu_Tanh( class ElementwiseActivationMkldnnFusePassTest_Sub_Relu6( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_sub self.act = fluid.layers.relu6 @@ -250,32 +250,32 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Relu6( class ElementwiseActivationMkldnnFusePassTest_Sub_Sigmoid( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_sub self.act = fluid.layers.sigmoid class ElementwiseActivationMkldnnFusePassTest_Mul_Relu( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_mul self.act = fluid.layers.relu class ElementwiseActivationMkldnnFusePassTest_Mul_Tanh( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_mul self.act = fluid.layers.tanh class ElementwiseActivationMkldnnFusePassTest_Mul_LeakyRelu( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_mul self.act_alpha = 0.2 @@ -283,40 +283,40 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_LeakyRelu( class ElementwiseActivationMkldnnFusePassTest_Mul_Swish( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_mul self.act = fluid.layers.swish class ElementwiseActivationMkldnnFusePassTest_Mul_HardSwish( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_mul self.act = fluid.layers.hard_swish class ElementwiseActivationMkldnnFusePassTest_Mul_SQRT( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_mul self.act = fluid.layers.sqrt class ElementwiseActivationMkldnnFusePassTest_Mul_ABS( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_mul self.act = fluid.layers.abs class ElementwiseActivationMkldnnFusePassTest_Mul_Clip( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_mul self.act = fluid.layers.clip @@ -325,16 +325,16 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Clip( class ElementwiseActivationMkldnnFusePassTest_Mul_Gelu( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_mul self.act = fluid.layers.gelu class ElementwiseActivationMkldnnFusePassTest_Mul_Gelu_Tanh( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_mul self.act = fluid.layers.gelu @@ -342,8 +342,8 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Gelu_Tanh( class ElementwiseActivationMkldnnFusePassTest_Mul_Relu6( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_mul self.act = fluid.layers.relu6 @@ -351,8 +351,8 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Relu6( class ElementwiseActivationMkldnnFusePassTest_Mul_Sigmoid( - ElementwiseActivationMkldnnFusePassTest): - + ElementwiseActivationMkldnnFusePassTest +): def set_params(self): self.operand = fluid.layers.elementwise_mul self.act = fluid.layers.sigmoid diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass_new.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass_new.py index feef212b1f6978c527a5bc3ca1703f4e1daea6e5..8144e02afa00f8124ecaeb8908c76a6d522a5c69 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass_new.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass_new.py @@ -22,7 +22,6 @@ import hypothesis.strategies as st class TestElementWiseAddReluFusePass(PassAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True @@ -30,29 +29,24 @@ class TestElementWiseAddReluFusePass(PassAutoScanTest): batch_size = draw(st.integers(min_value=1, max_value=4)) def generate_input(): - return np.random.random([batch_size, 3, 100, - 100]).astype(np.float32) + return np.random.random([batch_size, 3, 100, 100]).astype( + np.float32 + ) - ops_config = [{ - "op_type": "elementwise_add", - "op_inputs": { - "X": ["A"], - "Y": ["B"] - }, - "op_outputs": { - "Out": ["add_output"] - }, - "op_attrs": {} - }, { - "op_type": "relu", - "op_inputs": { - "X": ["add_output"] + ops_config = [ + { + "op_type": "elementwise_add", + "op_inputs": {"X": ["A"], "Y": ["B"]}, + "op_outputs": {"Out": ["add_output"]}, + "op_attrs": {}, }, - "op_outputs": { - "Out": ["relu_output"] + { + "op_type": "relu", + "op_inputs": {"X": ["add_output"]}, + "op_outputs": {"Out": ["relu_output"]}, + "op_attrs": {}, }, - "op_attrs": {} - }] + ] ops = self.generate_op_config(ops_config) @@ -61,9 +55,10 @@ class TestElementWiseAddReluFusePass(PassAutoScanTest): weights={}, inputs={ "A": TensorConfig(data_gen=partial(generate_input)), - "B": TensorConfig(data_gen=partial(generate_input)) + "B": TensorConfig(data_gen=partial(generate_input)), }, - outputs=["relu_output"]) + outputs=["relu_output"], + ) return program_config @@ -72,9 +67,9 @@ class TestElementWiseAddReluFusePass(PassAutoScanTest): yield config, ["elementwise_add"], (1e-5, 1e-5) def test(self): - self.run_and_statis(quant=False, - passes=["elt_act_mkldnn_fuse_pass"], - min_success_num=4) + self.run_and_statis( + quant=False, passes=["elt_act_mkldnn_fuse_pass"], min_success_num=4 + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_fc_act_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_fc_act_fuse_pass.py index c438c28370e19db3f848fb36555f1cf84ba3e16f..4848a8c9770ff86285f5ff39982252bb50ee2a33 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_fc_act_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_fc_act_fuse_pass.py @@ -25,13 +25,12 @@ enable_static() class FCGeluTanhOneDnnFusePassTest(InferencePassTest): - def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 128, 768], - dtype="float32") + data = fluid.data( + name="data", shape=[-1, 128, 768], dtype="float32" + ) fc_out = fluid.layers.fc(input=data, size=3072, num_flatten_dims=2) gelu_out = fluid.layers.gelu(fc_out, approximate=False) @@ -48,13 +47,12 @@ class FCGeluTanhOneDnnFusePassTest(InferencePassTest): class FCGeluErfOneDnnFusePassTest(InferencePassTest): - def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 128, 768], - dtype="float32") + data = fluid.data( + name="data", shape=[-1, 128, 768], dtype="float32" + ) fc_out = fluid.layers.fc(input=data, size=3072, num_flatten_dims=2) gelu_out = fluid.layers.gelu(fc_out, approximate=True) @@ -72,13 +70,12 @@ class FCGeluErfOneDnnFusePassTest(InferencePassTest): class FCTanhOneDnnFusePassTest(InferencePassTest): - def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 128, 768], - dtype="float32") + data = fluid.data( + name="data", shape=[-1, 128, 768], dtype="float32" + ) fc_out = fluid.layers.fc(input=data, size=3072, num_flatten_dims=2) tanh_out = fluid.layers.tanh(fc_out) @@ -96,13 +93,12 @@ class FCTanhOneDnnFusePassTest(InferencePassTest): class FCSigmoidOneDnnFusePassTest(InferencePassTest): - def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 128, 768], - dtype="float32") + data = fluid.data( + name="data", shape=[-1, 128, 768], dtype="float32" + ) fc_out = fluid.layers.fc(input=data, size=3072, num_flatten_dims=2) sigmoid_out = fluid.layers.sigmoid(fc_out) @@ -120,13 +116,12 @@ class FCSigmoidOneDnnFusePassTest(InferencePassTest): class FCHardSwishOneDnnFusePassTest(InferencePassTest): - def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 128, 768], - dtype="float32") + data = fluid.data( + name="data", shape=[-1, 128, 768], dtype="float32" + ) fc_out = fluid.layers.fc(input=data, size=3072, num_flatten_dims=2) hardswish_out = fluid.layers.hard_swish(fc_out) @@ -144,13 +139,12 @@ class FCHardSwishOneDnnFusePassTest(InferencePassTest): class FCMishOneDnnFusePassTest(InferencePassTest): - def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 128, 768], - dtype="float32") + data = fluid.data( + name="data", shape=[-1, 128, 768], dtype="float32" + ) fc_out = fluid.layers.fc(input=data, size=3072, num_flatten_dims=2) mish_out = fluid.layers.mish(fc_out) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_fc_elementwise_add_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_fc_elementwise_add_fuse_pass.py index d69a7b8a8bc47ed34ae83278d42ffa6fbae763cb..85f877147fffc9eea890a6ad9a36752c69b8440c 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_fc_elementwise_add_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_fc_elementwise_add_fuse_pass.py @@ -22,7 +22,6 @@ import hypothesis.strategies as st class TestFCElementwiseAddMkldnnFusePass(PassAutoScanTest): - def sample_program_config(self, draw): axis = draw(st.sampled_from([-1, 0, 1])) fc_as_x = draw(st.sampled_from([True, False])) @@ -38,34 +37,40 @@ class TestFCElementwiseAddMkldnnFusePass(PassAutoScanTest): def generate_fc_bias(): return np.random.random([fc_wei]).astype(np.float32) - relu_op = OpConfig(type="relu", - inputs={"X": ["input_data"]}, - outputs={"Out": ["relu_out"]}, - attrs={}) - - fc_op = OpConfig(type="fc", - inputs={ - "Input": ["relu_out"], - "W": ["fc_weight"], - "Bias": ["fc_bias"] - }, - outputs={"Out": ["fc_output"]}, - attrs={ - "use_mkldnn": True, - "padding_weights": False, - "activation_type": "", - "in_num_col_dims": 1, - }) + relu_op = OpConfig( + type="relu", + inputs={"X": ["input_data"]}, + outputs={"Out": ["relu_out"]}, + attrs={}, + ) + + fc_op = OpConfig( + type="fc", + inputs={ + "Input": ["relu_out"], + "W": ["fc_weight"], + "Bias": ["fc_bias"], + }, + outputs={"Out": ["fc_output"]}, + attrs={ + "use_mkldnn": True, + "padding_weights": False, + "activation_type": "", + "in_num_col_dims": 1, + }, + ) if fc_as_x: inputs = {"X": ["fc_output"], "Y": ["input_data"]} else: inputs = {"X": ["input_data"], "Y": ["fc_output"]} - elt_add_op = OpConfig(type="elementwise_add", - inputs=inputs, - outputs={"Out": ["elementwise_output"]}, - attrs={'axis': axis}) + elt_add_op = OpConfig( + type="elementwise_add", + inputs=inputs, + outputs={"Out": ["elementwise_output"]}, + attrs={'axis': axis}, + ) model_net = [relu_op, fc_op, elt_add_op] @@ -78,7 +83,8 @@ class TestFCElementwiseAddMkldnnFusePass(PassAutoScanTest): inputs={ "input_data": TensorConfig(data_gen=partial(generate_input)) }, - outputs=["elementwise_output"]) + outputs=["elementwise_output"], + ) return program_config @@ -87,8 +93,9 @@ class TestFCElementwiseAddMkldnnFusePass(PassAutoScanTest): yield config, ["relu", "fc"], (1e-5, 1e-5) def test(self): - self.run_and_statis(quant=False, - passes=["fc_elementwise_add_mkldnn_fuse_pass"]) + self.run_and_statis( + quant=False, passes=["fc_elementwise_add_mkldnn_fuse_pass"] + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_inplace_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_inplace_fuse_pass.py index 6b589821ba8a4f5ad087ac474d8b047f4e73f347..6dc911390b058eb4a31478a594944589f4bfaa45 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_inplace_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_inplace_fuse_pass.py @@ -21,22 +21,20 @@ from paddle.fluid.core import PassVersionChecker class MkldnnInplacePassTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): paddle.enable_static() - data = fluid.data(name="data", - shape=[-1, 3, 100, 100], - dtype="float32") - conv_out_1 = fluid.layers.conv2d(data, - num_filters=3, - filter_size=3, - bias_attr=False) + data = fluid.data( + name="data", shape=[-1, 3, 100, 100], dtype="float32" + ) + conv_out_1 = fluid.layers.conv2d( + data, num_filters=3, filter_size=3, bias_attr=False + ) softmax_out = fluid.layers.softmax(conv_out_1) relu_out = fluid.layers.relu(conv_out_1) - eltwise_out = fluid.layers.elementwise_add(softmax_out, - relu_out, - axis=-1) + eltwise_out = fluid.layers.elementwise_add( + softmax_out, relu_out, axis=-1 + ) self.pass_name = 'mkldnn_inplace_pass' self.feeds = { diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_int8_scale_calculation_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_int8_scale_calculation_pass.py index 3d2895cc619d40b4b7ef5f9e7e288f4fa7350923..7308f93d792a9fe3b1f1b50df91160c13b972d08 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_int8_scale_calculation_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_int8_scale_calculation_pass.py @@ -20,7 +20,6 @@ import hypothesis.strategies as st class TestInt8ScaleCalculationMkldnnPass(PassAutoScanTest): - def sample_predictor_configs(self, program_config): config = self.create_inference_config(use_gpu=False) config.pass_builder().append_pass("int8_scale_calculation_mkldnn_pass") @@ -36,12 +35,36 @@ class TestInt8ScaleCalculationMkldnnPass(PassAutoScanTest): filter_shape = prog_config.weights["filter"].shape input_shape = prog_config.inputs["input_x"].shape if padding_algorithm == "VALID": - if ((input_shape[2] - (dilations[0] * (filter_shape[2] - 1) + 1)) / strides[0] + 1) <= 1 or \ - ((input_shape[3] - (dilations[1] * (filter_shape[3] - 1) + 1)) / strides[1] + 1) <= 1: + if ( + (input_shape[2] - (dilations[0] * (filter_shape[2] - 1) + 1)) + / strides[0] + + 1 + ) <= 1 or ( + (input_shape[3] - (dilations[1] * (filter_shape[3] - 1) + 1)) + / strides[1] + + 1 + ) <= 1: return False if padding_algorithm == "EXPLICIT": - if ((input_shape[2] + paddings[0] + paddings[1] - (dilations[0] * (filter_shape[2] - 1) + 1)) / strides[0] + 1) <= 1 or \ - ((input_shape[3] + paddings[2] + paddings[3] - (dilations[1] * (filter_shape[3] - 1) + 1)) / strides[1] + 1) <= 1: + if ( + ( + input_shape[2] + + paddings[0] + + paddings[1] + - (dilations[0] * (filter_shape[2] - 1) + 1) + ) + / strides[0] + + 1 + ) <= 1 or ( + ( + input_shape[3] + + paddings[2] + + paddings[3] + - (dilations[1] * (filter_shape[3] - 1) + 1) + ) + / strides[1] + + 1 + ) <= 1: return False if data_format == "NCHW": if input_shape[1] != filter_shape[1] * groups: @@ -57,40 +80,45 @@ class TestInt8ScaleCalculationMkldnnPass(PassAutoScanTest): def sample_program_config(self, draw): x_shape = draw( - st.lists(st.integers(min_value=5, max_value=100), - min_size=4, - max_size=4)) + st.lists( + st.integers(min_value=5, max_value=100), min_size=4, max_size=4 + ) + ) x_shape[1] = draw(st.integers(min_value=5, max_value=10)) data_format = draw(st.sampled_from(["NCHW", "NHWC"])) f_shape = draw( - st.lists(st.integers(min_value=1, max_value=4), - min_size=4, - max_size=4)) + st.lists( + st.integers(min_value=1, max_value=4), min_size=4, max_size=4 + ) + ) if data_format == "NCHW": f_shape[1] = x_shape[1] else: f_shape[1] = x_shape[3] strides = draw( - st.lists(st.integers(min_value=1, max_value=4), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=4), min_size=2, max_size=2 + ) + ) padding_algorithm = draw(st.sampled_from(["EXPLICIT", "SAME", "VALID"])) padding = draw( - st.lists(st.integers(min_value=1, max_value=4), - min_size=4, - max_size=4)) + st.lists( + st.integers(min_value=1, max_value=4), min_size=4, max_size=4 + ) + ) groups = draw(st.integers(min_value=1, max_value=3)) dilations = draw( - st.lists(st.integers(min_value=1, max_value=4), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=4), min_size=2, max_size=2 + ) + ) bias_shape = [f_shape[0]] inputs = dict() @@ -116,17 +144,19 @@ class TestInt8ScaleCalculationMkldnnPass(PassAutoScanTest): "filter": TensorConfig(shape=f_shape), } - conv2d_op = OpConfig("conv2d", - inputs=inputs, - outputs={"Output": ["conv2d_out"]}, - strides=strides, - padding_algorithm=padding_algorithm, - paddings=padding, - groups=groups, - dilations=dilations, - data_format=data_format, - use_mkldnn=use_mkldnn, - mkldnn_data_type="int8") + conv2d_op = OpConfig( + "conv2d", + inputs=inputs, + outputs={"Output": ["conv2d_out"]}, + strides=strides, + padding_algorithm=padding_algorithm, + paddings=padding, + groups=groups, + dilations=dilations, + data_format=data_format, + use_mkldnn=use_mkldnn, + mkldnn_data_type="int8", + ) ops = [conv2d_op] @@ -134,13 +164,16 @@ class TestInt8ScaleCalculationMkldnnPass(PassAutoScanTest): ops=ops, weights=weights, inputs={"input_x": TensorConfig(shape=x_shape)}, - outputs=["conv2d_out"]) + outputs=["conv2d_out"], + ) return program_config def test(self): - self.run_and_statis(quant=False, - max_examples=100, - passes=["int8_scale_calculation_mkldnn_pass"]) + self.run_and_statis( + quant=False, + max_examples=100, + passes=["int8_scale_calculation_mkldnn_pass"], + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_log_softmax_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_log_softmax_op.py index 929863b42a7300ee0e8197b8fcafcb264182e17d..814163d4ec3dc598a7a6fff2b8af601a23975253 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_log_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_log_softmax_op.py @@ -22,28 +22,30 @@ import hypothesis.strategies as st class TestMKLDNNLogSoftmaxOp(MkldnnAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self, *args, **kwargs): - def generate_input(*args, **kwargs): return np.random.random(kwargs['in_shape']).astype(np.float32) - logsoftmax_op = OpConfig(type="log_softmax", - inputs={"X": ["input_data"]}, - outputs={"Out": ["output_data"]}, - attrs={"axis": kwargs['axis']}) + logsoftmax_op = OpConfig( + type="log_softmax", + inputs={"X": ["input_data"]}, + outputs={"Out": ["output_data"]}, + attrs={"axis": kwargs['axis']}, + ) program_config = ProgramConfig( ops=[logsoftmax_op], weights={}, inputs={ - "input_data": - TensorConfig(data_gen=partial(generate_input, *args, **kwargs)), + "input_data": TensorConfig( + data_gen=partial(generate_input, *args, **kwargs) + ), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config @@ -51,10 +53,12 @@ class TestMKLDNNLogSoftmaxOp(MkldnnAutoScanTest): config = self.create_inference_config(use_mkldnn=True) yield config, (1e-5, 1e-5) - @given(axis=st.sampled_from([-2, -1, 0, 1]), - in_shape=st.lists(st.integers(min_value=2, max_value=5), - min_size=3, - max_size=5)) + @given( + axis=st.sampled_from([-2, -1, 0, 1]), + in_shape=st.lists( + st.integers(min_value=2, max_value=5), min_size=3, max_size=5 + ), + ) def test(self, *args, **kwargs): self.run_test(quant=False, *args, **kwargs) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_activation_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_activation_fuse_pass.py index b894fc708b4243b802788981d7c7a34f43d6ad0d..a90129394e1bbdfaba1f07930585c2469869557d 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_activation_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_activation_fuse_pass.py @@ -21,7 +21,6 @@ import hypothesis.strategies as st class TestMatmulActivationMkldnnFusePass(PassAutoScanTest): - def sample_program_config(self, draw): transpose_X = draw(st.booleans()) transpose_Y = draw(st.booleans()) @@ -30,11 +29,24 @@ class TestMatmulActivationMkldnnFusePass(PassAutoScanTest): channel = draw(st.sampled_from([8])) input_dim = draw(st.sampled_from([32])) activation_type = draw( - st.sampled_from([ - 'relu', 'gelu', 'swish', 'mish', 'sqrt', 'hard_swish', - 'sigmoid', 'abs', 'relu6', 'clip', 'tanh', 'hard_sigmoid', - 'leaky_relu' - ])) + st.sampled_from( + [ + 'relu', + 'gelu', + 'swish', + 'mish', + 'sqrt', + 'hard_swish', + 'sigmoid', + 'abs', + 'relu6', + 'clip', + 'tanh', + 'hard_sigmoid', + 'leaky_relu', + ] + ) + ) def generate_input(type): if transpose_X and transpose_Y: @@ -55,50 +67,52 @@ class TestMatmulActivationMkldnnFusePass(PassAutoScanTest): else: return np.random.random(shape_y).astype(np.float32) - matmul_op = OpConfig(type='matmul', - inputs={ - 'X': ['matmul_X'], - 'Y': ['matmul_Y'] - }, - outputs={'Out': ['matmul_output']}, - attrs={ - 'transpose_X': transpose_X, - 'transpose_Y': transpose_Y, - 'alpha': alpha - }) + matmul_op = OpConfig( + type='matmul', + inputs={'X': ['matmul_X'], 'Y': ['matmul_Y']}, + outputs={'Out': ['matmul_output']}, + attrs={ + 'transpose_X': transpose_X, + 'transpose_Y': transpose_Y, + 'alpha': alpha, + }, + ) if activation_type == "relu6": - activation_op = OpConfig(activation_type, - inputs={"X": ["matmul_output"]}, - outputs={"Out": ["activation_output"]}, - threshold=draw( - st.floats(min_value=1.0, - max_value=10.0))) + activation_op = OpConfig( + activation_type, + inputs={"X": ["matmul_output"]}, + outputs={"Out": ["activation_output"]}, + threshold=draw(st.floats(min_value=1.0, max_value=10.0)), + ) elif activation_type == "leaky_relu": - activation_op = OpConfig(activation_type, - inputs={"X": ["matmul_output"]}, - outputs={"Out": ["activation_output"]}, - alpha=draw( - st.floats(min_value=0.1, - max_value=1.0))) + activation_op = OpConfig( + activation_type, + inputs={"X": ["matmul_output"]}, + outputs={"Out": ["activation_output"]}, + alpha=draw(st.floats(min_value=0.1, max_value=1.0)), + ) elif activation_type == "swish": - activation_op = OpConfig(activation_type, - inputs={"X": ["matmul_output"]}, - outputs={"Out": ["activation_output"]}, - beta=draw( - st.floats(min_value=0.1, - max_value=1.0))) + activation_op = OpConfig( + activation_type, + inputs={"X": ["matmul_output"]}, + outputs={"Out": ["activation_output"]}, + beta=draw(st.floats(min_value=0.1, max_value=1.0)), + ) elif activation_type == "clip": activation_op = OpConfig( activation_type, inputs={"X": ["matmul_output"]}, outputs={"Out": ["activation_output"]}, min=draw(st.floats(min_value=0.1, max_value=0.49)), - max=draw(st.floats(min_value=0.5, max_value=1.0))) + max=draw(st.floats(min_value=0.5, max_value=1.0)), + ) else: - activation_op = OpConfig(activation_type, - inputs={"X": ["matmul_output"]}, - outputs={"Out": ["activation_output"]}) + activation_op = OpConfig( + activation_type, + inputs={"X": ["matmul_output"]}, + outputs={"Out": ["activation_output"]}, + ) model_net = [matmul_op, activation_op] @@ -107,9 +121,10 @@ class TestMatmulActivationMkldnnFusePass(PassAutoScanTest): weights={}, inputs={ 'matmul_X': TensorConfig(data_gen=partial(generate_input, 'x')), - 'matmul_Y': TensorConfig(data_gen=partial(generate_input, 'y')) + 'matmul_Y': TensorConfig(data_gen=partial(generate_input, 'y')), }, - outputs=['activation_output']) + outputs=['activation_output'], + ) return program_config @@ -118,9 +133,11 @@ class TestMatmulActivationMkldnnFusePass(PassAutoScanTest): yield config, ['matmul'], (1e-5, 1e-5) def test(self): - self.run_and_statis(quant=False, - max_examples=30, - passes=['matmul_activation_mkldnn_fuse_pass']) + self.run_and_statis( + quant=False, + max_examples=30, + passes=['matmul_activation_mkldnn_fuse_pass'], + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_elementwise_add_activation_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_elementwise_add_activation_fuse_pass.py index a27ed9dd9c99a2d7000e2d634165c97904a5e81a..035cd1d38004ca5a579e9afd644308dcb5b3a681 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_elementwise_add_activation_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_elementwise_add_activation_fuse_pass.py @@ -22,7 +22,6 @@ import hypothesis.strategies as st class TestMatmulElementwiseAddActivationMkldnnFusePass(PassAutoScanTest): - def sample_program_config(self, draw): axis = draw(st.sampled_from([-1, 0, 1])) matmul_as_x = draw(st.booleans()) @@ -30,71 +29,88 @@ class TestMatmulElementwiseAddActivationMkldnnFusePass(PassAutoScanTest): channel = draw(st.sampled_from([16, 32, 64])) input_dim = draw(st.sampled_from([16, 32, 64])) activation_type = draw( - st.sampled_from([ - 'relu', 'gelu', 'tanh', 'sigmoid', 'swish', 'mish', 'sqrt', - 'hard_swish', 'sigmoid', 'abs', 'relu6', 'clip', 'tanh', - 'hard_sigmoid', 'leaky_relu' - ])) + st.sampled_from( + [ + 'relu', + 'gelu', + 'tanh', + 'sigmoid', + 'swish', + 'mish', + 'sqrt', + 'hard_swish', + 'sigmoid', + 'abs', + 'relu6', + 'clip', + 'tanh', + 'hard_sigmoid', + 'leaky_relu', + ] + ) + ) def generate_input(): - return np.random.random([batch_size, channel, input_dim, - input_dim]).astype(np.float32) - - matmul_op = OpConfig(type='matmul', - inputs={ - 'X': ['matmul_x'], - 'Y': ['matmul_y'] - }, - outputs={'Out': ['matmul_output']}, - attrs={ - 'use_mkldnn': True, - }) + return np.random.random( + [batch_size, channel, input_dim, input_dim] + ).astype(np.float32) + + matmul_op = OpConfig( + type='matmul', + inputs={'X': ['matmul_x'], 'Y': ['matmul_y']}, + outputs={'Out': ['matmul_output']}, + attrs={ + 'use_mkldnn': True, + }, + ) if matmul_as_x: inputs = {'X': ['matmul_output'], 'Y': ['elementwise_addend']} else: inputs = {'X': ['elementwise_addend'], 'Y': ['matmul_output']} - elt_add_op = OpConfig(type='elementwise_add', - inputs=inputs, - outputs={'Out': ['elementwise_add_output']}, - attrs={ - 'axis': axis, - 'use_mkldnn': True - }) + elt_add_op = OpConfig( + type='elementwise_add', + inputs=inputs, + outputs={'Out': ['elementwise_add_output']}, + attrs={'axis': axis, 'use_mkldnn': True}, + ) if activation_type == "relu6": - activation_op = OpConfig(activation_type, - inputs={"X": ["elementwise_add_output"]}, - outputs={"Out": ["activation_output"]}, - threshold=draw( - st.floats(min_value=1.0, - max_value=10.0))) + activation_op = OpConfig( + activation_type, + inputs={"X": ["elementwise_add_output"]}, + outputs={"Out": ["activation_output"]}, + threshold=draw(st.floats(min_value=1.0, max_value=10.0)), + ) elif activation_type == "leaky_relu": - activation_op = OpConfig(activation_type, - inputs={"X": ["elementwise_add_output"]}, - outputs={"Out": ["activation_output"]}, - alpha=draw( - st.floats(min_value=0.1, - max_value=1.0))) + activation_op = OpConfig( + activation_type, + inputs={"X": ["elementwise_add_output"]}, + outputs={"Out": ["activation_output"]}, + alpha=draw(st.floats(min_value=0.1, max_value=1.0)), + ) elif activation_type == "swish": - activation_op = OpConfig(activation_type, - inputs={"X": ["elementwise_add_output"]}, - outputs={"Out": ["activation_output"]}, - beta=draw( - st.floats(min_value=0.1, - max_value=1.0))) + activation_op = OpConfig( + activation_type, + inputs={"X": ["elementwise_add_output"]}, + outputs={"Out": ["activation_output"]}, + beta=draw(st.floats(min_value=0.1, max_value=1.0)), + ) elif activation_type == "clip": activation_op = OpConfig( activation_type, inputs={"X": ["elementwise_add_output"]}, outputs={"Out": ["activation_output"]}, min=draw(st.floats(min_value=0.1, max_value=0.49)), - max=draw(st.floats(min_value=0.5, max_value=1.0))) + max=draw(st.floats(min_value=0.5, max_value=1.0)), + ) else: - activation_op = OpConfig(activation_type, - inputs={"X": ["elementwise_add_output"]}, - outputs={"Out": ["activation_output"]}) + activation_op = OpConfig( + activation_type, + inputs={"X": ["elementwise_add_output"]}, + outputs={"Out": ["activation_output"]}, + ) model_net = [matmul_op, elt_add_op, activation_op] @@ -104,10 +120,12 @@ class TestMatmulElementwiseAddActivationMkldnnFusePass(PassAutoScanTest): inputs={ 'matmul_x': TensorConfig(data_gen=partial(generate_input)), 'matmul_y': TensorConfig(data_gen=partial(generate_input)), - 'elementwise_addend': - TensorConfig(data_gen=partial(generate_input)) + 'elementwise_addend': TensorConfig( + data_gen=partial(generate_input) + ), }, - outputs=['activation_output']) + outputs=['activation_output'], + ) return program_config @@ -116,16 +134,19 @@ class TestMatmulElementwiseAddActivationMkldnnFusePass(PassAutoScanTest): use_mkldnn=True, passes=[ 'matmul_elementwise_add_mkldnn_fuse_pass', - 'matmul_activation_mkldnn_fuse_pass' - ]) + 'matmul_activation_mkldnn_fuse_pass', + ], + ) yield config, ['matmul'], (1e-5, 1e-5) def test(self): - self.run_and_statis(quant=False, - passes=[ - 'matmul_elementwise_add_mkldnn_fuse_pass', - 'matmul_activation_mkldnn_fuse_pass' - ]) + self.run_and_statis( + quant=False, + passes=[ + 'matmul_elementwise_add_mkldnn_fuse_pass', + 'matmul_activation_mkldnn_fuse_pass', + ], + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_elementwise_add_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_elementwise_add_fuse_pass.py index 38c8985dbad1ff2eb43bec9e8c755906d6d0cad1..95b79db4630168b7af904195a7625baff957faee 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_elementwise_add_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_elementwise_add_fuse_pass.py @@ -22,7 +22,6 @@ import hypothesis.strategies as st class TestMatmulElementwiseAddMkldnnFusePass(PassAutoScanTest): - def sample_program_config(self, draw): axis = draw(st.sampled_from([-1, 0, 1])) matmul_as_x = draw(st.booleans()) @@ -31,31 +30,30 @@ class TestMatmulElementwiseAddMkldnnFusePass(PassAutoScanTest): input_dim = draw(st.sampled_from([16, 32, 64])) def generate_input(): - return np.random.random([batch_size, channel, input_dim, - input_dim]).astype(np.float32) - - matmul_op = OpConfig(type='matmul', - inputs={ - 'X': ['matmul_x'], - 'Y': ['matmul_y'] - }, - outputs={'Out': ['matmul_output']}, - attrs={ - 'use_mkldnn': True, - }) + return np.random.random( + [batch_size, channel, input_dim, input_dim] + ).astype(np.float32) + + matmul_op = OpConfig( + type='matmul', + inputs={'X': ['matmul_x'], 'Y': ['matmul_y']}, + outputs={'Out': ['matmul_output']}, + attrs={ + 'use_mkldnn': True, + }, + ) if matmul_as_x: inputs = {'X': ['matmul_output'], 'Y': ['elementwise_addend']} else: inputs = {'X': ['elementwise_addend'], 'Y': ['matmul_output']} - elt_add_op = OpConfig(type='elementwise_add', - inputs=inputs, - outputs={'Out': ['elementwise_add_output']}, - attrs={ - 'axis': axis, - 'use_mkldnn': True - }) + elt_add_op = OpConfig( + type='elementwise_add', + inputs=inputs, + outputs={'Out': ['elementwise_add_output']}, + attrs={'axis': axis, 'use_mkldnn': True}, + ) model_net = [matmul_op, elt_add_op] @@ -65,21 +63,25 @@ class TestMatmulElementwiseAddMkldnnFusePass(PassAutoScanTest): inputs={ 'matmul_x': TensorConfig(data_gen=partial(generate_input)), 'matmul_y': TensorConfig(data_gen=partial(generate_input)), - 'elementwise_addend': - TensorConfig(data_gen=partial(generate_input)) + 'elementwise_addend': TensorConfig( + data_gen=partial(generate_input) + ), }, - outputs=['elementwise_add_output']) + outputs=['elementwise_add_output'], + ) return program_config def sample_predictor_configs(self, program_config): config = self.create_inference_config( - use_mkldnn=True, passes=['matmul_elementwise_add_mkldnn_fuse_pass']) + use_mkldnn=True, passes=['matmul_elementwise_add_mkldnn_fuse_pass'] + ) yield config, ['matmul'], (1e-5, 1e-5) def test(self): - self.run_and_statis(quant=False, - passes=['matmul_elementwise_add_mkldnn_fuse_pass']) + self.run_and_statis( + quant=False, passes=['matmul_elementwise_add_mkldnn_fuse_pass'] + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_op_output_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_op_output_fuse_pass.py index 1136867e5530acbec52b8f9ee2dd4151a95cc81a..064847dd7a0bfada3a7348cacd83e22e21206ae6 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_op_output_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_op_output_fuse_pass.py @@ -20,7 +20,6 @@ from inference_pass_test import InferencePassTest class TestMKLDNNMatmulFuseOp(InferencePassTest): - def init_data(self): self.bs = 8 self.d_type = np.float32 @@ -30,16 +29,17 @@ class TestMKLDNNMatmulFuseOp(InferencePassTest): def make_network(self): with fluid.program_guard(self.main_program, self.startup_program): - x = fluid.data(name='x', - shape=[-1] + self.shape_x, - dtype=self.d_type) - y = fluid.data(name='y', - shape=[-1] + self.shape_y, - dtype=self.d_type) + x = fluid.data( + name='x', shape=[-1] + self.shape_x, dtype=self.d_type + ) + y = fluid.data( + name='y', shape=[-1] + self.shape_y, dtype=self.d_type + ) out = fluid.layers.matmul(x, y) out = fluid.layers.transpose(out, perm=[0, 2, 1, 3]) out = fluid.layers.reshape( - out, [0, 0, self.shape_y[0] * self.shape_y[2]]) + out, [0, 0, self.shape_y[0] * self.shape_y[2]] + ) out = fluid.layers.fc(out, size=1) return out @@ -51,7 +51,7 @@ class TestMKLDNNMatmulFuseOp(InferencePassTest): def set_feeds(self, out): self.feeds = { "x": np.random.random([self.bs] + self.shape_x).astype(self.d_type), - "y": np.random.random([self.bs] + self.shape_y).astype(self.d_type) + "y": np.random.random([self.bs] + self.shape_y).astype(self.d_type), } self.fetch_list = [out] @@ -61,7 +61,6 @@ class TestMKLDNNMatmulFuseOp(InferencePassTest): class TestMKLDNNMatmulOtherDimsFuseOp(TestMKLDNNMatmulFuseOp): - def init_data(self): self.bs = 8 self.d_type = np.float32 @@ -71,15 +70,14 @@ class TestMKLDNNMatmulOtherDimsFuseOp(TestMKLDNNMatmulFuseOp): class TestMKLDNNMatmulOpNotFusedWrongTransposeAxis(TestMKLDNNMatmulFuseOp): - def make_network(self): with fluid.program_guard(self.main_program, self.startup_program): - x = fluid.data(name='x', - shape=[-1] + self.shape_x, - dtype=self.d_type) - y = fluid.data(name='y', - shape=[-1] + self.shape_y, - dtype=self.d_type) + x = fluid.data( + name='x', shape=[-1] + self.shape_x, dtype=self.d_type + ) + y = fluid.data( + name='y', shape=[-1] + self.shape_y, dtype=self.d_type + ) out = fluid.layers.matmul(x, y) out = fluid.layers.transpose(out, perm=[0, 1, 2, 3]) out = fluid.layers.reshape(out, [0, 0, 0, 0]) @@ -88,7 +86,6 @@ class TestMKLDNNMatmulOpNotFusedWrongTransposeAxis(TestMKLDNNMatmulFuseOp): class TestMKLDNNMatmulOpNotFusedBreakPattern(TestMKLDNNMatmulFuseOp): - def init_data(self): self.bs = 7 self.d_type = np.float32 @@ -98,18 +95,20 @@ class TestMKLDNNMatmulOpNotFusedBreakPattern(TestMKLDNNMatmulFuseOp): def make_network(self): with fluid.program_guard(self.main_program, self.startup_program): - x = fluid.data(name='x', - shape=[-1] + self.shape_x, - dtype=self.d_type) - y = fluid.data(name='y', - shape=[-1] + self.shape_y, - dtype=self.d_type) + x = fluid.data( + name='x', shape=[-1] + self.shape_x, dtype=self.d_type + ) + y = fluid.data( + name='y', shape=[-1] + self.shape_y, dtype=self.d_type + ) out = fluid.layers.matmul(x, y) out = fluid.layers.transpose(out, perm=[0, 2, 1, 3]) - out = fluid.layers.transpose(out, perm=[0, 1, 2, - 3]) # breaks pattern + out = fluid.layers.transpose( + out, perm=[0, 1, 2, 3] + ) # breaks pattern out = fluid.layers.reshape( - out, [0, 0, self.shape_y[0] * self.shape_y[2]]) + out, [0, 0, self.shape_y[0] * self.shape_y[2]] + ) out = fluid.layers.fc(out, size=1) return out diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_transpose_reshape_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_transpose_reshape_fuse_pass.py index 56825149dd26ca424cd5abf4ded13ad6597582ea..6c9a9c315949b9708c644bb7fd16ec04940af31a 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_transpose_reshape_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_transpose_reshape_fuse_pass.py @@ -22,7 +22,6 @@ import hypothesis.strategies as st class TestMatmulTransposeReshapeMkldnnFusePass(PassAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: attrs = [ program_config.ops[i].attrs for i in range(len(program_config.ops)) @@ -63,39 +62,39 @@ class TestMatmulTransposeReshapeMkldnnFusePass(PassAutoScanTest): else: return np.random.random(shape_y).astype(np.float32) - matmul_op = OpConfig(type="matmul", - inputs={ - "X": ["input_data1"], - "Y": ["input_data2"] - }, - outputs={"Out": ["matmul_output"]}, - attrs={ - "transpose_X": transpose_X, - "transpose_Y": transpose_Y, - "alpha": alpha, - "fused_reshape_X": [], - "fused_reshape_Y": [], - "fused_transpose_X": [], - "fused_transpose_Y": [], - "fused_reshape_Out": [], - "fused_transpose_Out": [] - }) - - transpose2_op = OpConfig(type="transpose2", - inputs={"X": ["matmul_output"]}, - outputs={ - "Out": ["transpose2_output"], - "XShape": ["transpose2_xshape"] - }, - attrs={'axis': axis}) - - reshape2_op = OpConfig(type="reshape2", - inputs={"X": ["transpose2_output"]}, - outputs={ - "Out": ["reshape2_output"], - "XShape": ["reshape2_xshape"] - }, - attrs={'shape': shape}) + matmul_op = OpConfig( + type="matmul", + inputs={"X": ["input_data1"], "Y": ["input_data2"]}, + outputs={"Out": ["matmul_output"]}, + attrs={ + "transpose_X": transpose_X, + "transpose_Y": transpose_Y, + "alpha": alpha, + "fused_reshape_X": [], + "fused_reshape_Y": [], + "fused_transpose_X": [], + "fused_transpose_Y": [], + "fused_reshape_Out": [], + "fused_transpose_Out": [], + }, + ) + + transpose2_op = OpConfig( + type="transpose2", + inputs={"X": ["matmul_output"]}, + outputs={ + "Out": ["transpose2_output"], + "XShape": ["transpose2_xshape"], + }, + attrs={'axis': axis}, + ) + + reshape2_op = OpConfig( + type="reshape2", + inputs={"X": ["transpose2_output"]}, + outputs={"Out": ["reshape2_output"], "XShape": ["reshape2_xshape"]}, + attrs={'shape': shape}, + ) model_net = [matmul_op, transpose2_op, reshape2_op] @@ -103,12 +102,15 @@ class TestMatmulTransposeReshapeMkldnnFusePass(PassAutoScanTest): ops=model_net, weights={}, inputs={ - "input_data1": - TensorConfig(data_gen=partial(generate_input, "x")), - "input_data2": - TensorConfig(data_gen=partial(generate_input, "y")) + "input_data1": TensorConfig( + data_gen=partial(generate_input, "x") + ), + "input_data2": TensorConfig( + data_gen=partial(generate_input, "y") + ), }, - outputs=["reshape2_output"]) + outputs=["reshape2_output"], + ) return program_config @@ -118,7 +120,8 @@ class TestMatmulTransposeReshapeMkldnnFusePass(PassAutoScanTest): def test(self): self.run_and_statis( - quant=False, passes=["matmul_transpose_reshape_mkldnn_fuse_pass"]) + quant=False, passes=["matmul_transpose_reshape_mkldnn_fuse_pass"] + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_v2_activation_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_v2_activation_fuse_pass.py index 153b81fa797af560fa56898db6c6a1ce54719215..97b2c22b35e79e9e10e05740bef79a6211fe3142 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_v2_activation_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_v2_activation_fuse_pass.py @@ -21,7 +21,6 @@ import hypothesis.strategies as st class TestMatmulv2ActivationMkldnnFusePass(PassAutoScanTest): - def sample_program_config(self, draw): transpose_X = draw(st.booleans()) transpose_Y = draw(st.booleans()) @@ -29,11 +28,24 @@ class TestMatmulv2ActivationMkldnnFusePass(PassAutoScanTest): channel = draw(st.sampled_from([16, 32, 64])) input_dim = draw(st.sampled_from([16, 32, 64])) activation_type = draw( - st.sampled_from([ - 'relu', 'gelu', 'swish', 'mish', 'sqrt', 'hard_swish', - 'sigmoid', 'abs', 'relu6', 'clip', 'tanh', 'hard_sigmoid', - 'leaky_relu' - ])) + st.sampled_from( + [ + 'relu', + 'gelu', + 'swish', + 'mish', + 'sqrt', + 'hard_swish', + 'sigmoid', + 'abs', + 'relu6', + 'clip', + 'tanh', + 'hard_sigmoid', + 'leaky_relu', + ] + ) + ) def generate_input(type): broadcast_X = st.booleans() @@ -60,49 +72,48 @@ class TestMatmulv2ActivationMkldnnFusePass(PassAutoScanTest): else: return np.random.random(shape_y).astype(np.float32) - matmul_op = OpConfig(type='matmul_v2', - inputs={ - 'X': ['matmul_X'], - 'Y': ['matmul_Y'] - }, - outputs={'Out': ['matmul_output']}, - attrs={ - 'trans_x': transpose_X, - 'trans_y': transpose_Y - }) + matmul_op = OpConfig( + type='matmul_v2', + inputs={'X': ['matmul_X'], 'Y': ['matmul_Y']}, + outputs={'Out': ['matmul_output']}, + attrs={'trans_x': transpose_X, 'trans_y': transpose_Y}, + ) if activation_type == 'relu6': - activation_op = OpConfig(activation_type, - inputs={'X': ['matmul_output']}, - outputs={'Out': ['activation_output']}, - threshold=draw( - st.floats(min_value=1.0, - max_value=10.0))) + activation_op = OpConfig( + activation_type, + inputs={'X': ['matmul_output']}, + outputs={'Out': ['activation_output']}, + threshold=draw(st.floats(min_value=1.0, max_value=10.0)), + ) elif activation_type == 'leaky_relu': - activation_op = OpConfig(activation_type, - inputs={'X': ['matmul_output']}, - outputs={'Out': ['activation_output']}, - alpha=draw( - st.floats(min_value=0.1, - max_value=1.0))) + activation_op = OpConfig( + activation_type, + inputs={'X': ['matmul_output']}, + outputs={'Out': ['activation_output']}, + alpha=draw(st.floats(min_value=0.1, max_value=1.0)), + ) elif activation_type == 'swish': - activation_op = OpConfig(activation_type, - inputs={'X': ['matmul_output']}, - outputs={'Out': ['activation_output']}, - beta=draw( - st.floats(min_value=0.1, - max_value=1.0))) + activation_op = OpConfig( + activation_type, + inputs={'X': ['matmul_output']}, + outputs={'Out': ['activation_output']}, + beta=draw(st.floats(min_value=0.1, max_value=1.0)), + ) elif activation_type == 'clip': activation_op = OpConfig( activation_type, inputs={'X': ['matmul_output']}, outputs={'Out': ['activation_output']}, min=draw(st.floats(min_value=0.1, max_value=0.49)), - max=draw(st.floats(min_value=0.5, max_value=1.0))) + max=draw(st.floats(min_value=0.5, max_value=1.0)), + ) else: - activation_op = OpConfig(activation_type, - inputs={'X': ['matmul_output']}, - outputs={'Out': ['activation_output']}) + activation_op = OpConfig( + activation_type, + inputs={'X': ['matmul_output']}, + outputs={'Out': ['activation_output']}, + ) model_net = [matmul_op, activation_op] @@ -111,9 +122,10 @@ class TestMatmulv2ActivationMkldnnFusePass(PassAutoScanTest): weights={}, inputs={ 'matmul_X': TensorConfig(data_gen=partial(generate_input, 'X')), - 'matmul_Y': TensorConfig(data_gen=partial(generate_input, 'Y')) + 'matmul_Y': TensorConfig(data_gen=partial(generate_input, 'Y')), }, - outputs=['activation_output']) + outputs=['activation_output'], + ) return program_config @@ -122,9 +134,11 @@ class TestMatmulv2ActivationMkldnnFusePass(PassAutoScanTest): yield config, ['matmul_v2'], (1e-5, 1e-5) def test(self): - self.run_and_statis(quant=False, - max_examples=30, - passes=['matmul_activation_mkldnn_fuse_pass']) + self.run_and_statis( + quant=False, + max_examples=30, + passes=['matmul_activation_mkldnn_fuse_pass'], + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_v2_elementwise_add_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_v2_elementwise_add_fuse_pass.py index 03f2867948e916f0aa32d4b3bfee267bfa2d7711..783a38783e683afd253d0a92bfd5eb60df775f60 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_v2_elementwise_add_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_v2_elementwise_add_fuse_pass.py @@ -21,7 +21,6 @@ import hypothesis.strategies as st class TestMatmulV2ElementwiseAddMkldnnFusePass(PassAutoScanTest): - def sample_program_config(self, draw): axis = draw(st.sampled_from([-1, 0, 1])) matmul_as_x = draw(st.booleans()) @@ -49,26 +48,24 @@ class TestMatmulV2ElementwiseAddMkldnnFusePass(PassAutoScanTest): shape_out = [batch_size, channel, input_dim_X, input_dim_Y] return np.random.random(shape_out).astype(np.float32) - matmul_op = OpConfig(type='matmul_v2', - inputs={ - 'X': ['matmul_X'], - 'Y': ['matmul_Y'] - }, - outputs={'Out': ['matmul_output']}, - attrs={'use_mkldnn': True}) + matmul_op = OpConfig( + type='matmul_v2', + inputs={'X': ['matmul_X'], 'Y': ['matmul_Y']}, + outputs={'Out': ['matmul_output']}, + attrs={'use_mkldnn': True}, + ) if matmul_as_x: inputs = {'X': ['matmul_output'], 'Y': ['elementwise_addend']} else: inputs = {'X': ['elementwise_addend'], 'Y': ['matmul_output']} - elt_add_op = OpConfig(type='elementwise_add', - inputs=inputs, - outputs={'Out': ['elementwise_add_output']}, - attrs={ - 'axis': axis, - 'use_mkldnn': True - }) + elt_add_op = OpConfig( + type='elementwise_add', + inputs=inputs, + outputs={'Out': ['elementwise_add_output']}, + attrs={'axis': axis, 'use_mkldnn': True}, + ) model_net = [matmul_op, elt_add_op] @@ -76,14 +73,14 @@ class TestMatmulV2ElementwiseAddMkldnnFusePass(PassAutoScanTest): ops=model_net, weights={}, inputs={ - 'matmul_X': - TensorConfig(data_gen=partial(generate_input, 'X')), - 'matmul_Y': - TensorConfig(data_gen=partial(generate_input, 'Y')), - 'elementwise_addend': - TensorConfig(data_gen=partial(generate_input, 'ElAdd')) + 'matmul_X': TensorConfig(data_gen=partial(generate_input, 'X')), + 'matmul_Y': TensorConfig(data_gen=partial(generate_input, 'Y')), + 'elementwise_addend': TensorConfig( + data_gen=partial(generate_input, 'ElAdd') + ), }, - outputs=['elementwise_add_output']) + outputs=['elementwise_add_output'], + ) return program_config @@ -92,9 +89,11 @@ class TestMatmulV2ElementwiseAddMkldnnFusePass(PassAutoScanTest): yield config, ['matmul_v2'], (1e-5, 1e-5) def test(self): - self.run_and_statis(quant=False, - max_examples=30, - passes=['matmul_elementwise_add_mkldnn_fuse_pass']) + self.run_and_statis( + quant=False, + max_examples=30, + passes=['matmul_elementwise_add_mkldnn_fuse_pass'], + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_v2_transpose_reshape_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_v2_transpose_reshape_fuse_pass.py index 42f349cf4ae0a2f909609c59563da11e64efff13..317595d20a63eb1fef36b78d0f872c09fa2019d5 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_v2_transpose_reshape_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_v2_transpose_reshape_fuse_pass.py @@ -22,18 +22,25 @@ import hypothesis.strategies as st class TestMatmulv2TransposeReshapeMkldnnFusePass(PassAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: - if program_config.inputs["input_data1"].shape[ - -4] != 1 and program_config.inputs["input_data2"].shape[-4] != 1: - if program_config.inputs["input_data1"].shape[ - -4] != program_config.inputs["input_data2"].shape[-4]: + if ( + program_config.inputs["input_data1"].shape[-4] != 1 + and program_config.inputs["input_data2"].shape[-4] != 1 + ): + if ( + program_config.inputs["input_data1"].shape[-4] + != program_config.inputs["input_data2"].shape[-4] + ): return False - if program_config.inputs["input_data1"].shape[ - -3] != 1 and program_config.inputs["input_data2"].shape[-3] != 1: - if program_config.inputs["input_data1"].shape[ - -3] != program_config.inputs["input_data2"].shape[-3]: + if ( + program_config.inputs["input_data1"].shape[-3] != 1 + and program_config.inputs["input_data2"].shape[-3] != 1 + ): + if ( + program_config.inputs["input_data1"].shape[-3] + != program_config.inputs["input_data2"].shape[-3] + ): return False attrs = [ @@ -76,38 +83,38 @@ class TestMatmulv2TransposeReshapeMkldnnFusePass(PassAutoScanTest): else: return np.random.random(shape_y).astype(np.float32) - matmul_op = OpConfig(type="matmul_v2", - inputs={ - "X": ["input_data1"], - "Y": ["input_data2"] - }, - outputs={"Out": ["matmul_output"]}, - attrs={ - "trans_x": transpose_X, - "trans_y": transpose_Y, - "fused_reshape_X": [], - "fused_reshape_Y": [], - "fused_transpose_X": [], - "fused_transpose_Y": [], - "fused_reshape_Out": [], - "fused_transpose_Out": [] - }) - - transpose2_op = OpConfig(type="transpose2", - inputs={"X": ["matmul_output"]}, - outputs={ - "Out": ["transpose2_output"], - "XShape": ["transpose2_xshape"] - }, - attrs={'axis': axis}) - - reshape2_op = OpConfig(type="reshape2", - inputs={"X": ["transpose2_output"]}, - outputs={ - "Out": ["reshape2_output"], - "XShape": ["reshape2_xshape"] - }, - attrs={'shape': shape}) + matmul_op = OpConfig( + type="matmul_v2", + inputs={"X": ["input_data1"], "Y": ["input_data2"]}, + outputs={"Out": ["matmul_output"]}, + attrs={ + "trans_x": transpose_X, + "trans_y": transpose_Y, + "fused_reshape_X": [], + "fused_reshape_Y": [], + "fused_transpose_X": [], + "fused_transpose_Y": [], + "fused_reshape_Out": [], + "fused_transpose_Out": [], + }, + ) + + transpose2_op = OpConfig( + type="transpose2", + inputs={"X": ["matmul_output"]}, + outputs={ + "Out": ["transpose2_output"], + "XShape": ["transpose2_xshape"], + }, + attrs={'axis': axis}, + ) + + reshape2_op = OpConfig( + type="reshape2", + inputs={"X": ["transpose2_output"]}, + outputs={"Out": ["reshape2_output"], "XShape": ["reshape2_xshape"]}, + attrs={'shape': shape}, + ) model_net = [matmul_op, transpose2_op, reshape2_op] @@ -115,12 +122,15 @@ class TestMatmulv2TransposeReshapeMkldnnFusePass(PassAutoScanTest): ops=model_net, weights={}, inputs={ - "input_data1": - TensorConfig(data_gen=partial(generate_input, "x")), - "input_data2": - TensorConfig(data_gen=partial(generate_input, "y")) + "input_data1": TensorConfig( + data_gen=partial(generate_input, "x") + ), + "input_data2": TensorConfig( + data_gen=partial(generate_input, "y") + ), }, - outputs=["reshape2_output"]) + outputs=["reshape2_output"], + ) return program_config @@ -139,7 +149,8 @@ class TestMatmulv2TransposeReshapeMkldnnFusePass(PassAutoScanTest): def test(self): self.run_and_statis( - quant=False, passes=["matmul_transpose_reshape_mkldnn_fuse_pass"]) + quant=False, passes=["matmul_transpose_reshape_mkldnn_fuse_pass"] + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmulv2_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmulv2_op.py index 06829c2d78b5e0253132870a28e14908bde640ae..29dee32df55662f77edb9b1df3519af6ab1b1140 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmulv2_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmulv2_op.py @@ -23,25 +23,30 @@ import hypothesis.strategies as st class TestMkldnnMatmulv2Op(MkldnnAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: if len(program_config.inputs["input_data2"].shape) == 4: - if program_config.inputs["input_data1"].shape[ - -4] != 1 and program_config.inputs["input_data2"].shape[ - -4] != 1: - if program_config.inputs["input_data1"].shape[ - -4] != program_config.inputs["input_data2"].shape[-4]: + if ( + program_config.inputs["input_data1"].shape[-4] != 1 + and program_config.inputs["input_data2"].shape[-4] != 1 + ): + if ( + program_config.inputs["input_data1"].shape[-4] + != program_config.inputs["input_data2"].shape[-4] + ): return False - if program_config.inputs["input_data1"].shape[ - -3] != 1 and program_config.inputs["input_data2"].shape[-3] != 1: - if program_config.inputs["input_data1"].shape[ - -3] != program_config.inputs["input_data2"].shape[-3]: + if ( + program_config.inputs["input_data1"].shape[-3] != 1 + and program_config.inputs["input_data2"].shape[-3] != 1 + ): + if ( + program_config.inputs["input_data1"].shape[-3] + != program_config.inputs["input_data2"].shape[-3] + ): return False return True def sample_program_configs(self, *args, **kwargs): - def generate_input(type, *args, **kwargs): transpose_X = kwargs["transpose_X"] transpose_Y = kwargs["transpose_Y"] @@ -81,35 +86,35 @@ class TestMkldnnMatmulv2Op(MkldnnAutoScanTest): else: return np.random.random(shape_y).astype(np.float32) - matmul_op = OpConfig(type="matmul_v2", - inputs={ - "X": ["input_data1"], - "Y": ["input_data2"] - }, - outputs={"Out": ["matmul_output"]}, - attrs={ - "trans_x": kwargs["transpose_X"], - "trans_y": kwargs["transpose_Y"], - "fused_reshape_X": [], - "fused_reshape_Y": [], - "fused_transpose_X": [], - "fused_transpose_Y": [], - "fused_reshape_Out": [], - "fused_transpose_Out": [] - }) + matmul_op = OpConfig( + type="matmul_v2", + inputs={"X": ["input_data1"], "Y": ["input_data2"]}, + outputs={"Out": ["matmul_output"]}, + attrs={ + "trans_x": kwargs["transpose_X"], + "trans_y": kwargs["transpose_Y"], + "fused_reshape_X": [], + "fused_reshape_Y": [], + "fused_transpose_X": [], + "fused_transpose_Y": [], + "fused_reshape_Out": [], + "fused_transpose_Out": [], + }, + ) program_config = ProgramConfig( ops=[matmul_op], weights={}, inputs={ - "input_data1": - TensorConfig( - data_gen=partial(generate_input, "x", *args, **kwargs)), - "input_data2": - TensorConfig( - data_gen=partial(generate_input, "y", *args, **kwargs)) + "input_data1": TensorConfig( + data_gen=partial(generate_input, "x", *args, **kwargs) + ), + "input_data2": TensorConfig( + data_gen=partial(generate_input, "y", *args, **kwargs) + ), }, - outputs=["matmul_output"]) + outputs=["matmul_output"], + ) yield program_config @@ -117,14 +122,16 @@ class TestMkldnnMatmulv2Op(MkldnnAutoScanTest): config = self.create_inference_config(use_mkldnn=True) yield config, (1e-5, 1e-5) - @given(transpose_X=st.booleans(), - transpose_Y=st.booleans(), - y_dim_len=st.sampled_from([3, 4]), - batch_size1=st.integers(min_value=1, max_value=4), - batch_size2=st.integers(min_value=1, max_value=4), - channel1=st.sampled_from([1, 16, 32, 64]), - channel2=st.sampled_from([1, 16, 32, 64]), - input_dim=st.sampled_from([16, 32, 64])) + @given( + transpose_X=st.booleans(), + transpose_Y=st.booleans(), + y_dim_len=st.sampled_from([3, 4]), + batch_size1=st.integers(min_value=1, max_value=4), + batch_size2=st.integers(min_value=1, max_value=4), + channel1=st.sampled_from([1, 16, 32, 64]), + channel2=st.sampled_from([1, 16, 32, 64]), + input_dim=st.sampled_from([16, 32, 64]), + ) def test(self, *args, **kwargs): self.run_test(*args, **kwargs) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_mish_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_mish_op.py index 2b2759cc6515129f894ff3f58b330240bfbcd7f3..a6062cb526b23807930944135364b8af46de25cf 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_mish_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_mish_op.py @@ -22,35 +22,39 @@ import hypothesis.strategies as st class TestMkldnnMishOp(MkldnnAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: # if mode is channel, and in_shape is 1 rank - if len(program_config.inputs['input_data'].shape - ) == 1 and program_config.ops[0].attrs['mode'] == 'channel': + if ( + len(program_config.inputs['input_data'].shape) == 1 + and program_config.ops[0].attrs['mode'] == 'channel' + ): return False return True def sample_program_configs(self, *args, **kwargs): - def generate_input(*args, **kwargs): return np.random.random(kwargs['in_shape']).astype(np.float32) - mish_op = OpConfig(type="mish", - inputs={"X": ["input_data"]}, - outputs={"Out": ["output_data"]}, - attrs={ - "mode": kwargs['mode'], - "data_format": kwargs['data_format'] - }) + mish_op = OpConfig( + type="mish", + inputs={"X": ["input_data"]}, + outputs={"Out": ["output_data"]}, + attrs={ + "mode": kwargs['mode'], + "data_format": kwargs['data_format'], + }, + ) program_config = ProgramConfig( ops=[mish_op], weights={}, inputs={ - "input_data": - TensorConfig(data_gen=partial(generate_input, *args, **kwargs)), + "input_data": TensorConfig( + data_gen=partial(generate_input, *args, **kwargs) + ), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config @@ -58,11 +62,13 @@ class TestMkldnnMishOp(MkldnnAutoScanTest): config = self.create_inference_config(use_mkldnn=True) yield config, (1e-5, 1e-5) - @given(mode=st.sampled_from(['all', 'channel', 'element']), - data_format=st.sampled_from(['NCHW', 'NHWC']), - in_shape=st.lists(st.integers(min_value=1, max_value=32), - min_size=1, - max_size=4)) + @given( + mode=st.sampled_from(['all', 'channel', 'element']), + data_format=st.sampled_from(['NCHW', 'NHWC']), + in_shape=st.lists( + st.integers(min_value=1, max_value=32), min_size=1, max_size=4 + ), + ) def test(self, *args, **kwargs): self.run_test(quant=False, *args, **kwargs) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_pad2d_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_pad2d_op.py index e26cf2a8605a27d159e395c3b7b2e3d9438f9acc..86fc1df8cc641314002b7b14e7681cb1aa3eff22 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_pad2d_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_pad2d_op.py @@ -22,29 +22,31 @@ import hypothesis.strategies as st class TestOneDNNPad2DOp(MkldnnAutoScanTest): - def sample_program_configs(self, *args, **kwargs): - def generate_input(*args, **kwargs): return np.random.random(kwargs['in_shape']).astype(np.float32) - pad3d_op = OpConfig(type="pad2d", - inputs={"X": ["input_data"]}, - outputs={"Out": ["output_data"]}, - attrs={ - "mode": "constant", - "data_format": kwargs['data_format'], - "paddings": kwargs['paddings'], - }) + pad3d_op = OpConfig( + type="pad2d", + inputs={"X": ["input_data"]}, + outputs={"Out": ["output_data"]}, + attrs={ + "mode": "constant", + "data_format": kwargs['data_format'], + "paddings": kwargs['paddings'], + }, + ) program_config = ProgramConfig( ops=[pad3d_op], weights={}, inputs={ - "input_data": - TensorConfig(data_gen=partial(generate_input, *args, **kwargs)), + "input_data": TensorConfig( + data_gen=partial(generate_input, *args, **kwargs) + ), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config @@ -52,11 +54,15 @@ class TestOneDNNPad2DOp(MkldnnAutoScanTest): config = self.create_inference_config(use_mkldnn=True) yield config, (1e-5, 1e-5) - @given(data_format=st.sampled_from(['NCHW', 'NHWC']), - in_shape=st.sampled_from([[2, 3, 4, 5], [1, 4, 1, 3], [4, 3, 2, 1], - [1, 1, 1, 1]]), - paddings=st.sampled_from([[0, 0, 0, 0], [1, 2, 0, 1], [2, 5, 11, 3], - [0, 5, 0, 1]])) + @given( + data_format=st.sampled_from(['NCHW', 'NHWC']), + in_shape=st.sampled_from( + [[2, 3, 4, 5], [1, 4, 1, 3], [4, 3, 2, 1], [1, 1, 1, 1]] + ), + paddings=st.sampled_from( + [[0, 0, 0, 0], [1, 2, 0, 1], [2, 5, 11, 3], [0, 5, 0, 1]] + ), + ) def test(self, *args, **kwargs): self.run_test(quant=False, *args, **kwargs) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_pad3d_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_pad3d_op.py index 8184bb149e1ac3fb629e4183ac270a8bd4316677..cb4daa686534f044924167ae77760d9adc27c9e6 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_pad3d_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_pad3d_op.py @@ -22,37 +22,35 @@ import hypothesis.strategies as st class TestOneDNNPad3DOp(MkldnnAutoScanTest): - def sample_program_configs(self, *args, **kwargs): - def generate_input(*args, **kwargs): return np.random.random(kwargs['in_shape']).astype(np.float32) def generate_paddings(): return np.random.randint(0, 4, size=(6)).astype(np.int32) - pad3d_op = OpConfig(type="pad3d", - inputs={ - "X": ["input_data"], - "Paddings": ["paddings_data"] - }, - outputs={"Out": ["output_data"]}, - attrs={ - "mode": "constant", - "data_format": kwargs['data_format'], - "paddings": kwargs['paddings'], - }) + pad3d_op = OpConfig( + type="pad3d", + inputs={"X": ["input_data"], "Paddings": ["paddings_data"]}, + outputs={"Out": ["output_data"]}, + attrs={ + "mode": "constant", + "data_format": kwargs['data_format'], + "paddings": kwargs['paddings'], + }, + ) program_config = ProgramConfig( ops=[pad3d_op], weights={}, inputs={ - "input_data": - TensorConfig(data_gen=partial(generate_input, *args, **kwargs)), - "paddings_data": - TensorConfig(data_gen=generate_paddings) + "input_data": TensorConfig( + data_gen=partial(generate_input, *args, **kwargs) + ), + "paddings_data": TensorConfig(data_gen=generate_paddings), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config @@ -60,12 +58,21 @@ class TestOneDNNPad3DOp(MkldnnAutoScanTest): config = self.create_inference_config(use_mkldnn=True) yield config, (1e-5, 1e-5) - @given(data_format=st.sampled_from(['NCDHW', 'NDHWC']), - use_paddings_tensor=st.sampled_from([True, False]), - in_shape=st.sampled_from([[2, 3, 4, 5, 6], [1, 4, 1, 3, 2], - [4, 3, 2, 1, 1], [1, 1, 1, 1, 1]]), - paddings=st.sampled_from([[0, 0, 0, 0, 0, 0], [1, 2, 0, 1, 2, 1], - [2, 5, 11, 3, 4, 3], [0, 5, 0, 1, 0, 2]])) + @given( + data_format=st.sampled_from(['NCDHW', 'NDHWC']), + use_paddings_tensor=st.sampled_from([True, False]), + in_shape=st.sampled_from( + [[2, 3, 4, 5, 6], [1, 4, 1, 3, 2], [4, 3, 2, 1, 1], [1, 1, 1, 1, 1]] + ), + paddings=st.sampled_from( + [ + [0, 0, 0, 0, 0, 0], + [1, 2, 0, 1, 2, 1], + [2, 5, 11, 3, 4, 3], + [0, 5, 0, 1, 0, 2], + ] + ), + ) def test(self, *args, **kwargs): self.run_test(quant=False, *args, **kwargs) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_prelu_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_prelu_op.py index 646cf1170ee97b215f7bc1592e787f91f7adc15c..5a9aabe1b9b5c0c5da1fdb97dfd2c7ec8e1922b0 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_prelu_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_prelu_op.py @@ -23,16 +23,16 @@ import hypothesis.strategies as st class TestMkldnnPreluOp(MkldnnAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: # if mode is channel, and in_shape is 1 rank - if len(program_config.inputs['input_data'].shape - ) == 1 and program_config.ops[0].attrs['mode'] == 'channel': + if ( + len(program_config.inputs['input_data'].shape) == 1 + and program_config.ops[0].attrs['mode'] == 'channel' + ): return False return True def sample_program_configs(self, *args, **kwargs): - def generate_input(*args, **kwargs): return np.random.random(kwargs['in_shape']).astype(np.float32) @@ -45,38 +45,42 @@ class TestMkldnnPreluOp(MkldnnAutoScanTest): return np.zeros((1)).astype(np.float32) if kwargs['data_format'] == 'NCHW': return np.random.random(kwargs['in_shape'][1]).astype( - np.float32) + np.float32 + ) else: return np.random.random(kwargs['in_shape'][-1]).astype( - np.float32) + np.float32 + ) else: if len(kwargs['in_shape']) <= 1: # not valid case, just return 0 return np.zeros((1)).astype(np.float32) return np.random.random(kwargs['in_shape']).astype(np.float32) - prelu_op = OpConfig(type="prelu", - inputs={ - "X": ["input_data"], - "Alpha": ["alpha_weight"] - }, - outputs={"Out": ["output_data"]}, - attrs={ - "mode": kwargs['mode'], - "data_format": kwargs['data_format'] - }) + prelu_op = OpConfig( + type="prelu", + inputs={"X": ["input_data"], "Alpha": ["alpha_weight"]}, + outputs={"Out": ["output_data"]}, + attrs={ + "mode": kwargs['mode'], + "data_format": kwargs['data_format'], + }, + ) program_config = ProgramConfig( ops=[prelu_op], weights={ - "alpha_weight": - TensorConfig(data_gen=partial(generate_alpha, *args, **kwargs)) + "alpha_weight": TensorConfig( + data_gen=partial(generate_alpha, *args, **kwargs) + ) }, inputs={ - "input_data": - TensorConfig(data_gen=partial(generate_input, *args, **kwargs)), + "input_data": TensorConfig( + data_gen=partial(generate_input, *args, **kwargs) + ), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config @@ -87,11 +91,13 @@ class TestMkldnnPreluOp(MkldnnAutoScanTest): def add_skip_pass_case(self): pass - @given(mode=st.sampled_from(['all', 'channel', 'element']), - data_format=st.sampled_from(['NCHW', 'NHWC']), - in_shape=st.lists(st.integers(min_value=1, max_value=32), - min_size=1, - max_size=4)) + @given( + mode=st.sampled_from(['all', 'channel', 'element']), + data_format=st.sampled_from(['NCHW', 'NHWC']), + in_shape=st.lists( + st.integers(min_value=1, max_value=32), min_size=1, max_size=4 + ), + ) def test(self, *args, **kwargs): self.add_skip_pass_case() self.run_test(quant=False, *args, **kwargs) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_reshape_transpose_matmul_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_reshape_transpose_matmul_fuse_pass.py index 62615ce54c87ca572d1e1f22aac326d051934edd..7147c412618fdc8396939102265b11d5c15ab6d1 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_reshape_transpose_matmul_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_reshape_transpose_matmul_fuse_pass.py @@ -25,7 +25,6 @@ num = 32 * 64 class TestReshapeTransposeMatmulMkldnnFusePass(PassAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True @@ -35,8 +34,8 @@ class TestReshapeTransposeMatmulMkldnnFusePass(PassAutoScanTest): alpha = draw(st.floats(min_value=0.01, max_value=2)) axis = draw(st.sampled_from([[0, 2, 1, 3]])) shape = draw( - st.sampled_from([[0, 64, -1, 32], [0, 32, -1, 64], [-1, 32, 1, - 64]])) + st.sampled_from([[0, 64, -1, 32], [0, 32, -1, 64], [-1, 32, 1, 64]]) + ) batch_size = draw(st.integers(min_value=1, max_value=4)) channel = draw(st.integers(min_value=1, max_value=64)) input_dim = draw(st.sampled_from([32, 64])) @@ -65,8 +64,10 @@ class TestReshapeTransposeMatmulMkldnnFusePass(PassAutoScanTest): if attrs[2]['transpose_X'] and attrs[2]['transpose_Y']: shape_y = [ - matmul_shape[0], matmul_shape[1], matmul_shape[-1], - int(num / matmul_shape[-1]) + matmul_shape[0], + matmul_shape[1], + matmul_shape[-1], + int(num / matmul_shape[-1]), ] elif attrs[2]['transpose_X']: shape_y = matmul_shape @@ -74,70 +75,64 @@ class TestReshapeTransposeMatmulMkldnnFusePass(PassAutoScanTest): shape_y = matmul_shape else: shape_y = [ - matmul_shape[0], matmul_shape[1], matmul_shape[-1], - int(num / matmul_shape[-1]) + matmul_shape[0], + matmul_shape[1], + matmul_shape[-1], + int(num / matmul_shape[-1]), ] return np.random.random(shape_y).astype(np.float32) - attrs = [{ - "shape": shape - }, { - "axis": axis - }, { - "transpose_X": transpose_X, - "transpose_Y": transpose_Y, - "alpha": alpha - }, { - 'batch_size': batch_size, - 'channel': channel, - 'input_dim': input_dim - }] - - ops_config = [{ - "op_type": "reshape2", - "op_inputs": { - "X": ["input_data1"] - }, - "op_outputs": { - "Out": ["reshape2_output"], - "XShape": ["reshape2_xshape"] - }, - "op_attrs": { - 'shape': attrs[0]['shape'] - }, - }, { - "op_type": "transpose2", - "op_inputs": { - "X": ["reshape2_output"] + attrs = [ + {"shape": shape}, + {"axis": axis}, + { + "transpose_X": transpose_X, + "transpose_Y": transpose_Y, + "alpha": alpha, }, - "op_outputs": { - "Out": ["transpose2_output"], - "XShape": ["transpose2_xshape"] + { + 'batch_size': batch_size, + 'channel': channel, + 'input_dim': input_dim, }, - "op_attrs": { - 'axis': attrs[1]['axis'] + ] + + ops_config = [ + { + "op_type": "reshape2", + "op_inputs": {"X": ["input_data1"]}, + "op_outputs": { + "Out": ["reshape2_output"], + "XShape": ["reshape2_xshape"], + }, + "op_attrs": {'shape': attrs[0]['shape']}, }, - }, { - "op_type": "matmul", - "op_inputs": { - "X": ["transpose2_output"], - "Y": ["input_data2"] + { + "op_type": "transpose2", + "op_inputs": {"X": ["reshape2_output"]}, + "op_outputs": { + "Out": ["transpose2_output"], + "XShape": ["transpose2_xshape"], + }, + "op_attrs": {'axis': attrs[1]['axis']}, }, - "op_outputs": { - "Out": ["matmul_output"] + { + "op_type": "matmul", + "op_inputs": {"X": ["transpose2_output"], "Y": ["input_data2"]}, + "op_outputs": {"Out": ["matmul_output"]}, + "op_attrs": { + 'transpose_X': attrs[2]['transpose_X'], + 'transpose_Y': attrs[2]['transpose_Y'], + 'alpha': attrs[2]['alpha'], + "fused_reshape_X": [], + "fused_reshape_Y": [], + "fused_transpose_X": [], + "fused_transpose_Y": [], + "fused_reshape_Out": [], + "fused_transpose_Out": [], + }, }, - "op_attrs": { - 'transpose_X': attrs[2]['transpose_X'], - 'transpose_Y': attrs[2]['transpose_Y'], - 'alpha': attrs[2]['alpha'], - "fused_reshape_X": [], - "fused_reshape_Y": [], - "fused_transpose_X": [], - "fused_transpose_Y": [], - "fused_reshape_Out": [], - "fused_transpose_Out": [] - } - }] + ] ops = self.generate_op_config(ops_config) @@ -145,12 +140,15 @@ class TestReshapeTransposeMatmulMkldnnFusePass(PassAutoScanTest): ops=ops, weights={}, inputs={ - "input_data1": - TensorConfig(data_gen=partial(generate_input1, attrs)), - "input_data2": - TensorConfig(data_gen=partial(generate_input2, attrs)) + "input_data1": TensorConfig( + data_gen=partial(generate_input1, attrs) + ), + "input_data2": TensorConfig( + data_gen=partial(generate_input2, attrs) + ), }, - outputs=["matmul_output"]) + outputs=["matmul_output"], + ) return program_config @@ -160,7 +158,8 @@ class TestReshapeTransposeMatmulMkldnnFusePass(PassAutoScanTest): def test(self): self.run_and_statis( - quant=False, passes=["reshape_transpose_matmul_mkldnn_fuse_pass"]) + quant=False, passes=["reshape_transpose_matmul_mkldnn_fuse_pass"] + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_reshape_transpose_matmul_v2_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_reshape_transpose_matmul_v2_fuse_pass.py index 9d6c0309e17d27ae3efd5633cade3003ec4727df..b62b3eaf51795ae7f773ede7b2347c7a4e541a74 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_reshape_transpose_matmul_v2_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_reshape_transpose_matmul_v2_fuse_pass.py @@ -21,24 +21,26 @@ from paddle.fluid.core import PassVersionChecker class TestReshapeTransposeMatmulV2OneDNNFusePass(InferencePassTest): - def setUp(self): self.set_params() self.tranpose_perm = [0, 2, 1, 3] self.pass_name = 'reshape_transpose_matmul_mkldnn_fuse_pass' with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=self.data_shape, - dtype="float32") - weight = fluid.layers.create_parameter(shape=self.weight_shape, - dtype="float32") + data = fluid.data( + name="data", shape=self.data_shape, dtype="float32" + ) + weight = fluid.layers.create_parameter( + shape=self.weight_shape, dtype="float32" + ) reshape = fluid.layers.reshape(data, shape=self.reshape_shape) transpose = fluid.layers.transpose(reshape, self.tranpose_perm) - matmul = paddle.matmul(transpose, - weight, - transpose_x=self.transpose_x, - transpose_y=self.transpose_y) + matmul = paddle.matmul( + transpose, + weight, + transpose_x=self.transpose_x, + transpose_y=self.transpose_y, + ) self.fetch_list = [matmul] self.enable_mkldnn = True @@ -60,8 +62,8 @@ class TestReshapeTransposeMatmulV2OneDNNFusePass(InferencePassTest): class TestReshapeTransposeMatmulV2OneDNNFusePassBroadcast( - TestReshapeTransposeMatmulV2OneDNNFusePass): - + TestReshapeTransposeMatmulV2OneDNNFusePass +): def set_params(self): self.data_shape = [2, 64, 16] self.weight_shape = [1, 2, 8, 64] diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_scale_matmul_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_scale_matmul_fuse_pass.py index c12915c1eb150dfcd220bea3d369675cd29ccdc4..109a56edbcb0264c9d9abd644a81a1ea60fca28e 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_scale_matmul_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_scale_matmul_fuse_pass.py @@ -22,7 +22,6 @@ import hypothesis.strategies as st class TestScaleMatmulMkldnnFusePass(PassAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True @@ -40,39 +39,55 @@ class TestScaleMatmulMkldnnFusePass(PassAutoScanTest): def generate_input(attrs, type): if attrs[1]['transpose_X'] and attrs[1]['transpose_Y']: shape_x = [ - attrs[2]['batch_size'], attrs[2]['channel'], - attrs[2]['input_dim'], 32 + attrs[2]['batch_size'], + attrs[2]['channel'], + attrs[2]['input_dim'], + 32, ] shape_y = [ - attrs[2]['batch_size'], attrs[2]['channel'], 64, - attrs[2]['input_dim'] + attrs[2]['batch_size'], + attrs[2]['channel'], + 64, + attrs[2]['input_dim'], ] elif attrs[1]['transpose_X']: shape_x = [ - attrs[2]['batch_size'], attrs[2]['channel'], - attrs[2]['input_dim'], 32 + attrs[2]['batch_size'], + attrs[2]['channel'], + attrs[2]['input_dim'], + 32, ] shape_y = [ - attrs[2]['batch_size'], attrs[2]['channel'], - attrs[2]['input_dim'], 64 + attrs[2]['batch_size'], + attrs[2]['channel'], + attrs[2]['input_dim'], + 64, ] elif attrs[1]['transpose_Y']: shape_x = [ - attrs[2]['batch_size'], attrs[2]['channel'], 32, - attrs[2]['input_dim'] + attrs[2]['batch_size'], + attrs[2]['channel'], + 32, + attrs[2]['input_dim'], ] shape_y = [ - attrs[2]['batch_size'], attrs[2]['channel'], 8, - attrs[2]['input_dim'] + attrs[2]['batch_size'], + attrs[2]['channel'], + 8, + attrs[2]['input_dim'], ] else: shape_x = [ - attrs[2]['batch_size'], attrs[2]['channel'], 32, - attrs[2]['input_dim'] + attrs[2]['batch_size'], + attrs[2]['channel'], + 32, + attrs[2]['input_dim'], ] shape_y = [ - attrs[2]['batch_size'], attrs[2]['channel'], - attrs[2]['input_dim'], 16 + attrs[2]['batch_size'], + attrs[2]['channel'], + attrs[2]['input_dim'], + 16, ] if type == "x": @@ -80,54 +95,52 @@ class TestScaleMatmulMkldnnFusePass(PassAutoScanTest): else: return np.random.random(shape_y).astype(np.float32) - attrs = [{ - "scale": scale, - "bias": bias, - "bias_after_scale": bias_after_scale - }, { - "transpose_X": transpose_X, - "transpose_Y": transpose_Y, - "alpha": alpha - }, { - 'batch_size': batch_size, - 'channel': channel, - 'input_dim': input_dim - }] - - ops_config = [{ - "op_type": "scale", - "op_inputs": { - "X": ["input_data1"] + attrs = [ + { + "scale": scale, + "bias": bias, + "bias_after_scale": bias_after_scale, }, - "op_outputs": { - "Out": ["scale_output"] + { + "transpose_X": transpose_X, + "transpose_Y": transpose_Y, + "alpha": alpha, }, - "op_attrs": { - "scale": attrs[0]['scale'], - "bias": attrs[0]['bias'], - "bias_after_scale": attrs[0]['bias_after_scale'] + { + 'batch_size': batch_size, + 'channel': channel, + 'input_dim': input_dim, }, - }, { - "op_type": "matmul", - "op_inputs": { - "X": ["scale_output"], - "Y": ["input_data2"] + ] + + ops_config = [ + { + "op_type": "scale", + "op_inputs": {"X": ["input_data1"]}, + "op_outputs": {"Out": ["scale_output"]}, + "op_attrs": { + "scale": attrs[0]['scale'], + "bias": attrs[0]['bias'], + "bias_after_scale": attrs[0]['bias_after_scale'], + }, }, - "op_outputs": { - "Out": ["matmul_output"] + { + "op_type": "matmul", + "op_inputs": {"X": ["scale_output"], "Y": ["input_data2"]}, + "op_outputs": {"Out": ["matmul_output"]}, + "op_attrs": { + 'transpose_X': attrs[1]['transpose_X'], + 'transpose_Y': attrs[1]['transpose_Y'], + 'alpha': attrs[1]['alpha'], + "fused_reshape_X": [], + "fused_reshape_Y": [], + "fused_transpose_X": [], + "fused_transpose_Y": [], + "fused_reshape_Out": [], + "fused_transpose_Out": [], + }, }, - "op_attrs": { - 'transpose_X': attrs[1]['transpose_X'], - 'transpose_Y': attrs[1]['transpose_Y'], - 'alpha': attrs[1]['alpha'], - "fused_reshape_X": [], - "fused_reshape_Y": [], - "fused_transpose_X": [], - "fused_transpose_Y": [], - "fused_reshape_Out": [], - "fused_transpose_Out": [] - } - }] + ] ops = self.generate_op_config(ops_config) @@ -135,12 +148,15 @@ class TestScaleMatmulMkldnnFusePass(PassAutoScanTest): ops=ops, weights={}, inputs={ - "input_data1": - TensorConfig(data_gen=partial(generate_input, attrs, "x")), - "input_data2": - TensorConfig(data_gen=partial(generate_input, attrs, "y")) + "input_data1": TensorConfig( + data_gen=partial(generate_input, attrs, "x") + ), + "input_data2": TensorConfig( + data_gen=partial(generate_input, attrs, "y") + ), }, - outputs=["matmul_output"]) + outputs=["matmul_output"], + ) return program_config diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_shape_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_shape_op.py index 92111062b1298e05e14c97e16ad5e9a211471fe9..15fae34372975d7b16d064cb7db0a59f590295f9 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_shape_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_shape_op.py @@ -22,28 +22,31 @@ import hypothesis.strategies as st class TestMkldnnShapeOp(MkldnnAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self, *args, **kwargs): - def generate_input(*args, **kwargs): return np.random.random(kwargs['in_shape']).astype( - kwargs['in_dtype']) + kwargs['in_dtype'] + ) - shape_op = OpConfig(type="shape", - inputs={"Input": ["input_data"]}, - outputs={"Out": ["output_data"]}) + shape_op = OpConfig( + type="shape", + inputs={"Input": ["input_data"]}, + outputs={"Out": ["output_data"]}, + ) program_config = ProgramConfig( ops=[shape_op], weights={}, inputs={ - "input_data": - TensorConfig(data_gen=partial(generate_input, *args, **kwargs)), + "input_data": TensorConfig( + data_gen=partial(generate_input, *args, **kwargs) + ), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config @@ -51,10 +54,12 @@ class TestMkldnnShapeOp(MkldnnAutoScanTest): config = self.create_inference_config(use_mkldnn=True) yield config, (1e-5, 1e-5) - @given(in_shape=st.lists(st.integers(min_value=1, max_value=3), - min_size=1, - max_size=6), - in_dtype=st.sampled_from([np.float32, np.uint16, np.int8, np.uint8])) + @given( + in_shape=st.lists( + st.integers(min_value=1, max_value=3), min_size=1, max_size=6 + ), + in_dtype=st.sampled_from([np.float32, np.uint16, np.int8, np.uint8]), + ) def test(self, *args, **kwargs): self.run_test(quant=False, *args, **kwargs) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_shuffle_channel_detect_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_shuffle_channel_detect_pass.py index 423bb0f0dbb455d5320f1878eba300e259b47865..b4d94c4e815199aecf719d31fb0347ea7fbdaa67 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_shuffle_channel_detect_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_shuffle_channel_detect_pass.py @@ -31,7 +31,6 @@ def product(input): class TestShuffleChannelMKLDNNDetectPass(PassAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: input_shape = program_config.inputs['input_data'].shape first_reshape2_shape = program_config.ops[0].attrs['shape'] @@ -43,18 +42,26 @@ class TestShuffleChannelMKLDNNDetectPass(PassAutoScanTest): img_w = input_shape[-1] if shape_prod != product(first_reshape2_shape) or shape_prod != product( - second_reshape2_shape): + second_reshape2_shape + ): return False - if len(input_shape) != 4 or len(first_reshape2_shape) != 5 or len( - second_reshape2_shape) != 4: + if ( + len(input_shape) != 4 + or len(first_reshape2_shape) != 5 + or len(second_reshape2_shape) != 4 + ): return False if transpose2_axis != [0, 2, 1, 3, 4]: return False - if first_reshape2_shape[-1] != img_w or first_reshape2_shape[ - -2] != img_h: + if ( + first_reshape2_shape[-1] != img_w + or first_reshape2_shape[-2] != img_h + ): return False - if second_reshape2_shape[-1] != img_w or second_reshape2_shape[ - -2] != img_h: + if ( + second_reshape2_shape[-1] != img_w + or second_reshape2_shape[-2] != img_h + ): return False return True @@ -62,10 +69,12 @@ class TestShuffleChannelMKLDNNDetectPass(PassAutoScanTest): def sample_program_config(self, draw): input_shape = draw(st.sampled_from([[128, 32, 32]])) first_reshape2_shape = draw( - st.sampled_from([[2, 64, 32, 32], [8, 16, 32, 32]])) + st.sampled_from([[2, 64, 32, 32], [8, 16, 32, 32]]) + ) transpose2_axis = draw(st.sampled_from([[0, 2, 1, 3, 4], [0, 2, 1, 3]])) second_reshape2_shape = draw( - st.sampled_from([[128, 32, 32], [128, 31, 32]])) + st.sampled_from([[128, 32, 32], [128, 31, 32]]) + ) batch_size = draw(st.integers(min_value=1, max_value=10)) input_shape.insert(0, batch_size) @@ -75,43 +84,37 @@ class TestShuffleChannelMKLDNNDetectPass(PassAutoScanTest): def generate_input(): return np.random.random(input_shape).astype(np.float32) - ops_config = [{ - "op_type": "reshape2", - "op_inputs": { - "X": ["input_data"] - }, - "op_outputs": { - "Out": ["first_reshape2_output"], - "XShape": ["first_reshape2_xshape"] - }, - "op_attrs": { - 'shape': first_reshape2_shape - }, - }, { - "op_type": "transpose2", - "op_inputs": { - "X": ["first_reshape2_output"] - }, - "op_outputs": { - "Out": ["transpose2_output"], - "XShape": ["transpose2_xshape"] - }, - "op_attrs": { - 'axis': transpose2_axis + ops_config = [ + { + "op_type": "reshape2", + "op_inputs": {"X": ["input_data"]}, + "op_outputs": { + "Out": ["first_reshape2_output"], + "XShape": ["first_reshape2_xshape"], + }, + "op_attrs": {'shape': first_reshape2_shape}, }, - }, { - "op_type": "reshape2", - "op_inputs": { - "X": ["transpose2_output"], + { + "op_type": "transpose2", + "op_inputs": {"X": ["first_reshape2_output"]}, + "op_outputs": { + "Out": ["transpose2_output"], + "XShape": ["transpose2_xshape"], + }, + "op_attrs": {'axis': transpose2_axis}, }, - "op_outputs": { - "Out": ["output_data"], - "XShape": ["second_reshape2_xshape"] + { + "op_type": "reshape2", + "op_inputs": { + "X": ["transpose2_output"], + }, + "op_outputs": { + "Out": ["output_data"], + "XShape": ["second_reshape2_xshape"], + }, + "op_attrs": {'shape': second_reshape2_shape}, }, - "op_attrs": { - 'shape': second_reshape2_shape - } - }] + ] ops = self.generate_op_config(ops_config) @@ -121,7 +124,8 @@ class TestShuffleChannelMKLDNNDetectPass(PassAutoScanTest): inputs={ "input_data": TensorConfig(data_gen=partial(generate_input)) }, - outputs=["output_data"]) + outputs=["output_data"], + ) return program_config @@ -130,8 +134,9 @@ class TestShuffleChannelMKLDNNDetectPass(PassAutoScanTest): yield config, ["shuffle_channel"], (1e-5, 1e-5) def test(self): - self.run_and_statis(quant=False, - passes=["shuffle_channel_mkldnn_detect_pass"]) + self.run_and_statis( + quant=False, passes=["shuffle_channel_mkldnn_detect_pass"] + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_shuffle_channel_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_shuffle_channel_op.py index d9050b58ee3805c8a18aa700ce13b89f406c065b..72dfe3f65611b8843c77b0e2f6df6d47f8ba8b3b 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_shuffle_channel_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_shuffle_channel_op.py @@ -22,28 +22,30 @@ import hypothesis.strategies as st class TestMKLDNNShuffleChannelOp(MkldnnAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self, *args, **kwargs): - def generate_input(*args, **kwargs): return np.random.random(kwargs['in_shape']).astype(np.float32) - shuffle_channel_op = OpConfig(type="shuffle_channel", - inputs={"X": ["input_data"]}, - outputs={"Out": ["output_data"]}, - attrs={"group": kwargs['group']}) + shuffle_channel_op = OpConfig( + type="shuffle_channel", + inputs={"X": ["input_data"]}, + outputs={"Out": ["output_data"]}, + attrs={"group": kwargs['group']}, + ) program_config = ProgramConfig( ops=[shuffle_channel_op], weights={}, inputs={ - "input_data": - TensorConfig(data_gen=partial(generate_input, *args, **kwargs)), + "input_data": TensorConfig( + data_gen=partial(generate_input, *args, **kwargs) + ), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config @@ -51,8 +53,10 @@ class TestMKLDNNShuffleChannelOp(MkldnnAutoScanTest): config = self.create_inference_config(use_mkldnn=True) yield config, (1e-5, 1e-5) - @given(group=st.sampled_from([1, 2, 8, 32, 128]), - in_shape=st.sampled_from([[5, 512, 2, 3], [2, 256, 5, 4]])) + @given( + group=st.sampled_from([1, 2, 8, 32, 128]), + in_shape=st.sampled_from([[5, 512, 2, 3], [2, 256, 5, 4]]), + ) def test(self, *args, **kwargs): self.run_test(quant=False, *args, **kwargs) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_softplus_activation_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_softplus_activation_fuse_pass.py index 037234ccda1c531dd010dd1c9f6cbe456571909b..67445e18444fb2304d213800af3903c0ddb4fca7 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_softplus_activation_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_softplus_activation_fuse_pass.py @@ -28,17 +28,18 @@ class SoftplusActivationReluOneDNNFusePassTest(InferencePassTest): def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 3, 100, 100], - dtype="float32") + data = fluid.data( + name="data", shape=[-1, 3, 100, 100], dtype="float32" + ) softplus_out = fluid.layers.softplus(data) if self.fuse_beta is not None: - activation_out = self.fuse_activation(softplus_out, - self.fuse_alpha, - self.fuse_beta) + activation_out = self.fuse_activation( + softplus_out, self.fuse_alpha, self.fuse_beta + ) elif self.fuse_alpha is not None: - activation_out = self.fuse_activation(softplus_out, - self.fuse_alpha) + activation_out = self.fuse_activation( + softplus_out, self.fuse_alpha + ) else: activation_out = self.fuse_activation(softplus_out) @@ -60,52 +61,52 @@ class SoftplusActivationReluOneDNNFusePassTest(InferencePassTest): class SoftplusActivationTanhOneDNNFusePassTest( - SoftplusActivationReluOneDNNFusePassTest): - + SoftplusActivationReluOneDNNFusePassTest +): def set_params(self): self.fuse_activation = fluid.layers.tanh class SoftplusActivationLeakyReluOneDNNFusePassTest( - SoftplusActivationReluOneDNNFusePassTest): - + SoftplusActivationReluOneDNNFusePassTest +): def set_params(self): self.fuse_activation = fluid.layers.leaky_relu self.fuse_alpha = 0.3 class SoftplusActivationSwishOneDNNFusePassTest( - SoftplusActivationReluOneDNNFusePassTest): - + SoftplusActivationReluOneDNNFusePassTest +): def set_params(self): self.fuse_activation = fluid.layers.swish self.fuse_alpha = 3 class SoftplusActivationHardSwishOneDNNFusePassTest( - SoftplusActivationReluOneDNNFusePassTest): - + SoftplusActivationReluOneDNNFusePassTest +): def set_params(self): self.fuse_activation = fluid.layers.hard_swish class SoftplusActivationSqrtOneDNNFusePassTest( - SoftplusActivationReluOneDNNFusePassTest): - + SoftplusActivationReluOneDNNFusePassTest +): def set_params(self): self.fuse_activation = fluid.layers.hard_swish class SoftplusActivationAbsOneDNNFusePassTest( - SoftplusActivationReluOneDNNFusePassTest): - + SoftplusActivationReluOneDNNFusePassTest +): def set_params(self): self.fuse_activation = fluid.layers.abs class SoftplusActivationClipOneDNNFusePassTest( - SoftplusActivationReluOneDNNFusePassTest): - + SoftplusActivationReluOneDNNFusePassTest +): def set_params(self): self.fuse_activation = fluid.layers.clip self.fuse_alpha = 1.1 @@ -113,30 +114,30 @@ class SoftplusActivationClipOneDNNFusePassTest( class SoftplusActivationGeluErfOneDNNFusePassTest( - SoftplusActivationReluOneDNNFusePassTest): - + SoftplusActivationReluOneDNNFusePassTest +): def set_params(self): self.fuse_activation = fluid.layers.gelu class SoftplusActivationGeluTanhOneDNNFusePassTest( - SoftplusActivationReluOneDNNFusePassTest): - + SoftplusActivationReluOneDNNFusePassTest +): def set_params(self): self.fuse_activation = fluid.layers.gelu self.fuse_alpha = True # simulated "Approximate" attr class SoftplusActivationRelu6OneDNNFusePassTest( - SoftplusActivationReluOneDNNFusePassTest): - + SoftplusActivationReluOneDNNFusePassTest +): def set_params(self): self.fuse_activation = fluid.layers.relu6 class SoftplusActivationSigmoidOneDNNFusePassTest( - SoftplusActivationReluOneDNNFusePassTest): - + SoftplusActivationReluOneDNNFusePassTest +): def set_params(self): self.fuse_activation = fluid.layers.sigmoid diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mul_gru_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mul_gru_fuse_pass.py index 54f6b60ff86d8aa87351854746827c0d4c169afd..ad0ba4c1fc9d5119c354fc7f7acefe841c19b043 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mul_gru_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mul_gru_fuse_pass.py @@ -22,7 +22,6 @@ import hypothesis.strategies as st class TestMulGruFusePass(PassAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True @@ -43,83 +42,89 @@ class TestMulGruFusePass(PassAutoScanTest): def generate_weight(shape): return np.full(shape, 0.0001).astype(np.float32) - im2sequence_op = OpConfig(type="im2sequence", - inputs={"X": ["input_data"]}, - outputs={"Out": ["seq_out"]}, - attrs={ - "kernels": [6, 1], - "out_stride": [1, 1], - "paddings": [0, 0, 0, 0], - "strides": [1, 1] - }) - - mul_op = OpConfig(type="mul", - inputs={ - "X": ["seq_out"], - "Y": ["mul_weight"] - }, - outputs={"Out": ["mul_out"]}, - attrs={ - "x_num_col_dims": x_col, - "y_num_col_dims": y_col - }) + im2sequence_op = OpConfig( + type="im2sequence", + inputs={"X": ["input_data"]}, + outputs={"Out": ["seq_out"]}, + attrs={ + "kernels": [6, 1], + "out_stride": [1, 1], + "paddings": [0, 0, 0, 0], + "strides": [1, 1], + }, + ) + + mul_op = OpConfig( + type="mul", + inputs={"X": ["seq_out"], "Y": ["mul_weight"]}, + outputs={"Out": ["mul_out"]}, + attrs={"x_num_col_dims": x_col, "y_num_col_dims": y_col}, + ) if has_origin_mode: - gru_op = OpConfig(type="gru", - inputs={ - "Input": ["mul_out"], - "Weight": ["gru_weight"], - "Bias": ["gru_bias"] - }, - outputs={ - "BatchGate": ["batch_gate"], - "BatchHidden": ["batch_hidden"], - "BatchResetHiddenPrev": ["batch_reset"], - "Hidden": ["hidden"] - }, - attrs={ - 'activation': activation, - 'is_reverse': is_reverse, - 'gate_activation': gate_activation, - 'is_test': True, - 'origin_mode': origin_mode - }) + gru_op = OpConfig( + type="gru", + inputs={ + "Input": ["mul_out"], + "Weight": ["gru_weight"], + "Bias": ["gru_bias"], + }, + outputs={ + "BatchGate": ["batch_gate"], + "BatchHidden": ["batch_hidden"], + "BatchResetHiddenPrev": ["batch_reset"], + "Hidden": ["hidden"], + }, + attrs={ + 'activation': activation, + 'is_reverse': is_reverse, + 'gate_activation': gate_activation, + 'is_test': True, + 'origin_mode': origin_mode, + }, + ) else: - gru_op = OpConfig(type="gru", - inputs={ - "Input": ["mul_out"], - "Weight": ["gru_weight"], - "Bias": ["gru_bias"] - }, - outputs={ - "BatchGate": ["batch_gate"], - "BatchHidden": ["batch_hidden"], - "BatchResetHiddenPrev": ["batch_reset"], - "Hidden": ["hidden"] - }, - attrs={ - 'activation': activation, - 'is_reverse': is_reverse, - 'gate_activation': gate_activation, - 'is_test': True - }) + gru_op = OpConfig( + type="gru", + inputs={ + "Input": ["mul_out"], + "Weight": ["gru_weight"], + "Bias": ["gru_bias"], + }, + outputs={ + "BatchGate": ["batch_gate"], + "BatchHidden": ["batch_hidden"], + "BatchResetHiddenPrev": ["batch_reset"], + "Hidden": ["hidden"], + }, + attrs={ + 'activation': activation, + 'is_reverse': is_reverse, + 'gate_activation': gate_activation, + 'is_test': True, + }, + ) model_net = [im2sequence_op, mul_op, gru_op] program_config = ProgramConfig( ops=model_net, weights={ - "mul_weight": - TensorConfig(data_gen=partial(generate_weight, [768, 600])), - "gru_weight": - TensorConfig(data_gen=partial(generate_weight, [200, 600])), - "gru_bias": - TensorConfig(data_gen=partial(generate_weight, [1, 600])) + "mul_weight": TensorConfig( + data_gen=partial(generate_weight, [768, 600]) + ), + "gru_weight": TensorConfig( + data_gen=partial(generate_weight, [200, 600]) + ), + "gru_bias": TensorConfig( + data_gen=partial(generate_weight, [1, 600]) + ), }, inputs={ "input_data": TensorConfig(data_gen=partial(generate_input)) }, - outputs=["hidden"]) + outputs=["hidden"], + ) return program_config @@ -128,9 +133,9 @@ class TestMulGruFusePass(PassAutoScanTest): yield config, ["im2sequence", "fusion_gru"], (1e-5, 1e-5) def test(self): - self.run_and_statis(quant=False, - max_duration=300, - passes=["mul_gru_fuse_pass"]) + self.run_and_statis( + quant=False, max_duration=300, passes=["mul_gru_fuse_pass"] + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mul_lstm_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mul_lstm_fuse_pass.py index 2d65a7fad5e0e7e0615f74c310509bbd294633f1..3b6e72ecf43302fd991a2b921f854d4660cb5414 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mul_lstm_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mul_lstm_fuse_pass.py @@ -22,7 +22,6 @@ import hypothesis.strategies as st class TestMulLstmFusePass(PassAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True @@ -34,7 +33,8 @@ class TestMulLstmFusePass(PassAutoScanTest): gate_activation = draw(st.sampled_from(["sigmoid"])) cell_activation = draw(st.sampled_from(["tanh", "relu", "identity"])) candidate_activation = draw( - st.sampled_from(["tanh", "relu", "identity"])) + st.sampled_from(["tanh", "relu", "identity"]) + ) batch_size = draw(st.integers(min_value=1, max_value=40)) def generate_input(): @@ -44,47 +44,47 @@ class TestMulLstmFusePass(PassAutoScanTest): def generate_weight(shape): return np.full(shape, 0.0001).astype(np.float32) - im2sequence_op = OpConfig(type="im2sequence", - inputs={"X": ["input_data"]}, - outputs={"Out": ["seq_out"]}, - attrs={ - "kernels": [6, 1], - "out_stride": [1, 1], - "paddings": [0, 0, 0, 0], - "strides": [1, 1] - }) - - mul_op = OpConfig(type="mul", - inputs={ - "X": ["seq_out"], - "Y": ["mul_weight"] - }, - outputs={"Out": ["mul_out"]}, - attrs={ - "x_num_col_dims": x_col, - "y_num_col_dims": y_col - }) - - lstm_op = OpConfig(type="lstm", - inputs={ - "Input": ["mul_out"], - "Weight": ["lstm_weight"], - "Bias": ["lstm_bias"] - }, - outputs={ - "Hidden": ["lstm_hidden"], - "Cell": ["lstm_cell"], - "BatchGate": ["lstm_gate"], - "BatchCellPreAct": ["lstm_batch_cell"] - }, - attrs={ - 'use_peepholes': use_peepholes, - 'is_reverse': is_reverse, - 'gate_activation': gate_activation, - 'cell_activation': cell_activation, - 'candidate_activation': candidate_activation, - 'is_test': True - }) + im2sequence_op = OpConfig( + type="im2sequence", + inputs={"X": ["input_data"]}, + outputs={"Out": ["seq_out"]}, + attrs={ + "kernels": [6, 1], + "out_stride": [1, 1], + "paddings": [0, 0, 0, 0], + "strides": [1, 1], + }, + ) + + mul_op = OpConfig( + type="mul", + inputs={"X": ["seq_out"], "Y": ["mul_weight"]}, + outputs={"Out": ["mul_out"]}, + attrs={"x_num_col_dims": x_col, "y_num_col_dims": y_col}, + ) + + lstm_op = OpConfig( + type="lstm", + inputs={ + "Input": ["mul_out"], + "Weight": ["lstm_weight"], + "Bias": ["lstm_bias"], + }, + outputs={ + "Hidden": ["lstm_hidden"], + "Cell": ["lstm_cell"], + "BatchGate": ["lstm_gate"], + "BatchCellPreAct": ["lstm_batch_cell"], + }, + attrs={ + 'use_peepholes': use_peepholes, + 'is_reverse': is_reverse, + 'gate_activation': gate_activation, + 'cell_activation': cell_activation, + 'candidate_activation': candidate_activation, + 'is_test': True, + }, + ) model_net = [im2sequence_op, mul_op, lstm_op] @@ -96,17 +96,21 @@ class TestMulLstmFusePass(PassAutoScanTest): program_config = ProgramConfig( ops=model_net, weights={ - "mul_weight": - TensorConfig(data_gen=partial(generate_weight, [768, 600])), - "lstm_weight": - TensorConfig(data_gen=partial(generate_weight, [150, 600])), - "lstm_bias": - TensorConfig(data_gen=partial(generate_weight, lstm_bias_shape)) + "mul_weight": TensorConfig( + data_gen=partial(generate_weight, [768, 600]) + ), + "lstm_weight": TensorConfig( + data_gen=partial(generate_weight, [150, 600]) + ), + "lstm_bias": TensorConfig( + data_gen=partial(generate_weight, lstm_bias_shape) + ), }, inputs={ "input_data": TensorConfig(data_gen=partial(generate_input)), }, - outputs=["lstm_hidden"]) + outputs=["lstm_hidden"], + ) return program_config @@ -115,9 +119,9 @@ class TestMulLstmFusePass(PassAutoScanTest): yield config, ["im2sequence", "fusion_lstm"], (1e-5, 1e-5) def test(self): - self.run_and_statis(quant=False, - max_duration=300, - passes=["mul_lstm_fuse_pass"]) + self.run_and_statis( + quant=False, max_duration=300, passes=["mul_lstm_fuse_pass"] + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_multihead_matmul_fuse_pass_v3.py b/python/paddle/fluid/tests/unittests/ir/inference/test_multihead_matmul_fuse_pass_v3.py index c59da1a8913ea69f03a546673e437ef94de7c14e..25f1c551709b50be398bb716e3f5217b9317c7c6 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_multihead_matmul_fuse_pass_v3.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_multihead_matmul_fuse_pass_v3.py @@ -20,171 +20,190 @@ import unittest class TestMultiheadMatmulFusePass(PassAutoScanTest): - def sample_predictor_configs(self, program_config): # gpu config = self.create_inference_config(use_gpu=True) yield config, ["multihead_matmul", "mul"], (1e-2, 1e-3) def sample_program_config(self, draw): - def generate_mul_input(): return np.random.random([1, 128, 768]).astype(np.float32) - 0.5 def generate_elewise_input(): return np.random.random([1, 12, 128, 128]).astype(np.float32) - mul_0 = OpConfig("mul", - inputs={ - "X": ["mul_x"], - "Y": ["mul_0_w"] - }, - outputs={"Out": ["mul_0_out"]}, - x_num_col_dims=2, - y_num_col_dims=1) - mul_1 = OpConfig("mul", - inputs={ - "X": ["mul_x"], - "Y": ["mul_1_w"] - }, - outputs={"Out": ["mul_1_out"]}, - x_num_col_dims=2, - y_num_col_dims=1) - mul_2 = OpConfig("mul", - inputs={ - "X": ["mul_x"], - "Y": ["mul_2_w"] - }, - outputs={"Out": ["mul_2_out"]}, - x_num_col_dims=2, - y_num_col_dims=1) - ele_0 = OpConfig("elementwise_add", - inputs={ - "X": [mul_0.outputs["Out"][0]], - "Y": ["ele_0_w"] - }, - outputs={"Out": ["ele_0_out"]}, - axis=-1) - ele_1 = OpConfig("elementwise_add", - inputs={ - "X": [mul_1.outputs["Out"][0]], - "Y": ["ele_1_w"] - }, - outputs={"Out": ["ele_1_out"]}, - axis=-1) - ele_2 = OpConfig("elementwise_add", - inputs={ - "X": [mul_2.outputs["Out"][0]], - "Y": ["ele_2_w"] - }, - outputs={"Out": ["ele_2_out"]}, - axis=-1) - reshape_0 = OpConfig("reshape2", - inputs={"X": [ele_0.outputs["Out"][0]]}, - outputs={ - "Out": ["reshape_0_out"], - "XShape": ["reshape_0_Xout"] - }, - shape=(1, 128, 12, 64)) - reshape_1 = OpConfig("reshape2", - inputs={"X": [ele_1.outputs["Out"][0]]}, - outputs={ - "Out": ["reshape_1_out"], - "XShape": ["reshape_1_Xout"] - }, - shape=(1, 128, 12, 64)) - reshape_2 = OpConfig("reshape2", - inputs={"X": [ele_2.outputs["Out"][0]]}, - outputs={ - "Out": ["reshape_2_out"], - "XShape": ["reshape_2_Xout"] - }, - shape=(1, 128, 12, 64)) - transpose_0 = OpConfig("transpose2", - inputs={"X": [reshape_0.outputs["Out"][0]]}, - outputs={"Out": ["transpose_0_out"]}, - axis=(0, 2, 1, 3)) - transpose_1 = OpConfig("transpose2", - inputs={"X": [reshape_1.outputs["Out"][0]]}, - outputs={"Out": ["transpose_1_out"]}, - axis=(0, 2, 3, 1)) - transpose_2 = OpConfig("transpose2", - inputs={"X": [reshape_2.outputs["Out"][0]]}, - outputs={"Out": ["transpose_2_out"]}, - axis=(0, 2, 1, 3)) - matmul_0 = OpConfig("matmul", - inputs={ - "X": [transpose_0.outputs["Out"][0]], - "Y": [transpose_1.outputs["Out"][0]] - }, - outputs={"Out": ["matmul_0_out"]}, - alpha=0.125, - transpose_X=False, - transpose_Y=False, - fused_reshape_Out=[], - fused_reshape_X=[], - fused_reshape_Y=[], - fused_transpose_Out=[], - fused_transpose_X=[], - fused_transpose_Y=[]) - ele_3 = OpConfig("elementwise_add", - inputs={ - "X": [matmul_0.outputs["Out"][0]], - "Y": ["eltadd_qk_b_var"] - }, - outputs={"Out": ["ele_3_out"]}, - axis=-1) - softmax_op = OpConfig("softmax", - inputs={"X": [ele_3.outputs["Out"][0]]}, - outputs={"Out": ["softmax_out"]}, - axis=3, - is_test=True) - matmul_1 = OpConfig("matmul", - inputs={ - "X": [softmax_op.outputs["Out"][0]], - "Y": [transpose_2.outputs["Out"][0]] - }, - outputs={"Out": ["matmul_1_out"]}, - alpha=1.0, - transpose_X=False, - transpose_Y=False, - fused_reshape_Out=[], - fused_reshape_X=[], - fused_reshape_Y=[], - fused_transpose_Out=[], - fused_transpose_X=[], - fused_transpose_Y=[]) - transpose_3 = OpConfig("transpose2", - inputs={"X": [matmul_1.outputs["Out"][0]]}, - outputs={"Out": ["transpose_3_out"]}, - axis=(0, 2, 1, 3)) - reshape_3 = OpConfig("reshape2", - inputs={"X": [transpose_3.outputs["Out"][0]]}, - outputs={ - "Out": ["reshape_3_out"], - "XShape": ["reshape_3_Xout"] - }, - shape=(1, 128, 768)) - mul_3 = OpConfig("mul", - inputs={ - "X": [reshape_3.outputs["Out"][0]], - "Y": ["mul_3_w"] - }, - outputs={"Out": ["mul_3_out"]}, - x_num_col_dims=2, - y_num_col_dims=1) + mul_0 = OpConfig( + "mul", + inputs={"X": ["mul_x"], "Y": ["mul_0_w"]}, + outputs={"Out": ["mul_0_out"]}, + x_num_col_dims=2, + y_num_col_dims=1, + ) + mul_1 = OpConfig( + "mul", + inputs={"X": ["mul_x"], "Y": ["mul_1_w"]}, + outputs={"Out": ["mul_1_out"]}, + x_num_col_dims=2, + y_num_col_dims=1, + ) + mul_2 = OpConfig( + "mul", + inputs={"X": ["mul_x"], "Y": ["mul_2_w"]}, + outputs={"Out": ["mul_2_out"]}, + x_num_col_dims=2, + y_num_col_dims=1, + ) + ele_0 = OpConfig( + "elementwise_add", + inputs={"X": [mul_0.outputs["Out"][0]], "Y": ["ele_0_w"]}, + outputs={"Out": ["ele_0_out"]}, + axis=-1, + ) + ele_1 = OpConfig( + "elementwise_add", + inputs={"X": [mul_1.outputs["Out"][0]], "Y": ["ele_1_w"]}, + outputs={"Out": ["ele_1_out"]}, + axis=-1, + ) + ele_2 = OpConfig( + "elementwise_add", + inputs={"X": [mul_2.outputs["Out"][0]], "Y": ["ele_2_w"]}, + outputs={"Out": ["ele_2_out"]}, + axis=-1, + ) + reshape_0 = OpConfig( + "reshape2", + inputs={"X": [ele_0.outputs["Out"][0]]}, + outputs={"Out": ["reshape_0_out"], "XShape": ["reshape_0_Xout"]}, + shape=(1, 128, 12, 64), + ) + reshape_1 = OpConfig( + "reshape2", + inputs={"X": [ele_1.outputs["Out"][0]]}, + outputs={"Out": ["reshape_1_out"], "XShape": ["reshape_1_Xout"]}, + shape=(1, 128, 12, 64), + ) + reshape_2 = OpConfig( + "reshape2", + inputs={"X": [ele_2.outputs["Out"][0]]}, + outputs={"Out": ["reshape_2_out"], "XShape": ["reshape_2_Xout"]}, + shape=(1, 128, 12, 64), + ) + transpose_0 = OpConfig( + "transpose2", + inputs={"X": [reshape_0.outputs["Out"][0]]}, + outputs={"Out": ["transpose_0_out"]}, + axis=(0, 2, 1, 3), + ) + transpose_1 = OpConfig( + "transpose2", + inputs={"X": [reshape_1.outputs["Out"][0]]}, + outputs={"Out": ["transpose_1_out"]}, + axis=(0, 2, 3, 1), + ) + transpose_2 = OpConfig( + "transpose2", + inputs={"X": [reshape_2.outputs["Out"][0]]}, + outputs={"Out": ["transpose_2_out"]}, + axis=(0, 2, 1, 3), + ) + matmul_0 = OpConfig( + "matmul", + inputs={ + "X": [transpose_0.outputs["Out"][0]], + "Y": [transpose_1.outputs["Out"][0]], + }, + outputs={"Out": ["matmul_0_out"]}, + alpha=0.125, + transpose_X=False, + transpose_Y=False, + fused_reshape_Out=[], + fused_reshape_X=[], + fused_reshape_Y=[], + fused_transpose_Out=[], + fused_transpose_X=[], + fused_transpose_Y=[], + ) + ele_3 = OpConfig( + "elementwise_add", + inputs={ + "X": [matmul_0.outputs["Out"][0]], + "Y": ["eltadd_qk_b_var"], + }, + outputs={"Out": ["ele_3_out"]}, + axis=-1, + ) + softmax_op = OpConfig( + "softmax", + inputs={"X": [ele_3.outputs["Out"][0]]}, + outputs={"Out": ["softmax_out"]}, + axis=3, + is_test=True, + ) + matmul_1 = OpConfig( + "matmul", + inputs={ + "X": [softmax_op.outputs["Out"][0]], + "Y": [transpose_2.outputs["Out"][0]], + }, + outputs={"Out": ["matmul_1_out"]}, + alpha=1.0, + transpose_X=False, + transpose_Y=False, + fused_reshape_Out=[], + fused_reshape_X=[], + fused_reshape_Y=[], + fused_transpose_Out=[], + fused_transpose_X=[], + fused_transpose_Y=[], + ) + transpose_3 = OpConfig( + "transpose2", + inputs={"X": [matmul_1.outputs["Out"][0]]}, + outputs={"Out": ["transpose_3_out"]}, + axis=(0, 2, 1, 3), + ) + reshape_3 = OpConfig( + "reshape2", + inputs={"X": [transpose_3.outputs["Out"][0]]}, + outputs={"Out": ["reshape_3_out"], "XShape": ["reshape_3_Xout"]}, + shape=(1, 128, 768), + ) + mul_3 = OpConfig( + "mul", + inputs={"X": [reshape_3.outputs["Out"][0]], "Y": ["mul_3_w"]}, + outputs={"Out": ["mul_3_out"]}, + x_num_col_dims=2, + y_num_col_dims=1, + ) ops = [ - mul_0, mul_1, mul_2, ele_0, ele_1, ele_2, reshape_0, reshape_1, - reshape_2, transpose_0, transpose_1, transpose_2, matmul_0, ele_3, - softmax_op, matmul_1, transpose_3, reshape_3, mul_3 + mul_0, + mul_1, + mul_2, + ele_0, + ele_1, + ele_2, + reshape_0, + reshape_1, + reshape_2, + transpose_0, + transpose_1, + transpose_2, + matmul_0, + ele_3, + softmax_op, + matmul_1, + transpose_3, + reshape_3, + mul_3, ] program_config = ProgramConfig( ops=ops, inputs={ - "mul_x": - TensorConfig(data_gen=partial(generate_mul_input)), - "eltadd_qk_b_var": - TensorConfig(data_gen=partial(generate_elewise_input)) + "mul_x": TensorConfig(data_gen=partial(generate_mul_input)), + "eltadd_qk_b_var": TensorConfig( + data_gen=partial(generate_elewise_input) + ), }, weights={ "mul_0_w": TensorConfig(shape=[768, 768]), @@ -193,16 +212,19 @@ class TestMultiheadMatmulFusePass(PassAutoScanTest): "mul_3_w": TensorConfig(shape=[768, 768]), "ele_0_w": TensorConfig(shape=[768]), "ele_1_w": TensorConfig(shape=[768]), - "ele_2_w": TensorConfig(shape=[768]) + "ele_2_w": TensorConfig(shape=[768]), }, - outputs=[ops[-1].outputs["Out"][0]]) + outputs=[ops[-1].outputs["Out"][0]], + ) return program_config def test(self): - self.run_and_statis(quant=False, - max_examples=100, - min_success_num=1, - passes=["multihead_matmul_fuse_pass_v3"]) + self.run_and_statis( + quant=False, + max_examples=100, + min_success_num=1, + passes=["multihead_matmul_fuse_pass_v3"], + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_onednn_fc_activation_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_onednn_fc_activation_fuse_pass.py index 4e798a0ed57d5338ab8e6c0b7a3c7f622d6ff57d..770aac84ec0d3c015c1170bd498b60834243788e 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_onednn_fc_activation_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_onednn_fc_activation_fuse_pass.py @@ -21,32 +21,46 @@ import hypothesis.strategies as st class TestFCActivationOneDNNFusePass(PassAutoScanTest): - def sample_program_config(self, draw): fc_in = draw(st.sampled_from([32, 64])) fc_wei = draw(st.sampled_from([64])) activation_type = draw( - st.sampled_from([ - 'relu', 'gelu', 'swish', 'mish', 'sqrt', 'hard_swish', - 'sigmoid', 'abs', 'relu6', 'clip', 'tanh', 'hard_sigmoid', - 'leaky_relu' - ])) + st.sampled_from( + [ + 'relu', + 'gelu', + 'swish', + 'mish', + 'sqrt', + 'hard_swish', + 'sigmoid', + 'abs', + 'relu6', + 'clip', + 'tanh', + 'hard_sigmoid', + 'leaky_relu', + ] + ) + ) def generate_input(shape): return np.random.random(shape).astype(np.float32) - fc_op = OpConfig(type="fc", - inputs={ - "Input": ["fc_input"], - "W": ["fc_weight"], - "Bias": ["fc_bias"] - }, - outputs={"Out": ["fc_output"]}, - attrs={ - "use_mkldnn": True, - "padding_weights": False, - "in_num_col_dims": 1, - }) + fc_op = OpConfig( + type="fc", + inputs={ + "Input": ["fc_input"], + "W": ["fc_weight"], + "Bias": ["fc_bias"], + }, + outputs={"Out": ["fc_output"]}, + attrs={ + "use_mkldnn": True, + "padding_weights": False, + "in_num_col_dims": 1, + }, + ) if activation_type == "clip": activation_op = OpConfig( @@ -54,58 +68,69 @@ class TestFCActivationOneDNNFusePass(PassAutoScanTest): inputs={"X": ["fc_output"]}, outputs={"Out": ["activation_output"]}, min=draw(st.floats(min_value=0.1, max_value=0.49)), - max=draw(st.floats(min_value=0.5, max_value=1.0))) + max=draw(st.floats(min_value=0.5, max_value=1.0)), + ) elif activation_type == "gelu": - activation_op = OpConfig(activation_type, - inputs={"X": ["fc_output"]}, - outputs={"Out": ["activation_output"]}, - approximate=draw(st.booleans())) + activation_op = OpConfig( + activation_type, + inputs={"X": ["fc_output"]}, + outputs={"Out": ["activation_output"]}, + approximate=draw(st.booleans()), + ) elif activation_type == "leaky_relu": - activation_op = OpConfig(activation_type, - inputs={"X": ["fc_output"]}, - outputs={"Out": ["activation_output"]}, - alpha=draw( - st.floats(min_value=0.1, - max_value=1.0))) + activation_op = OpConfig( + activation_type, + inputs={"X": ["fc_output"]}, + outputs={"Out": ["activation_output"]}, + alpha=draw(st.floats(min_value=0.1, max_value=1.0)), + ) elif activation_type == "relu6": - activation_op = OpConfig(activation_type, - inputs={"X": ["fc_output"]}, - outputs={"Out": ["activation_output"]}, - threshold=6) + activation_op = OpConfig( + activation_type, + inputs={"X": ["fc_output"]}, + outputs={"Out": ["activation_output"]}, + threshold=6, + ) elif activation_type == "swish": - activation_op = OpConfig(activation_type, - inputs={"X": ["fc_output"]}, - outputs={"Out": ["activation_output"]}, - beta=draw( - st.floats(min_value=0.1, - max_value=10.0))) + activation_op = OpConfig( + activation_type, + inputs={"X": ["fc_output"]}, + outputs={"Out": ["activation_output"]}, + beta=draw(st.floats(min_value=0.1, max_value=10.0)), + ) else: - activation_op = OpConfig(activation_type, - inputs={"X": ["fc_output"]}, - outputs={"Out": ["activation_output"]}) + activation_op = OpConfig( + activation_type, + inputs={"X": ["fc_output"]}, + outputs={"Out": ["activation_output"]}, + ) model_net = [fc_op, activation_op] program_config = ProgramConfig( ops=model_net, weights={ - "fc_weight": - TensorConfig( - data_gen=partial(generate_input, [fc_wei, fc_wei])), - "fc_bias": - TensorConfig(data_gen=partial(generate_input, [fc_wei])), + "fc_weight": TensorConfig( + data_gen=partial(generate_input, [fc_wei, fc_wei]) + ), + "fc_bias": TensorConfig( + data_gen=partial(generate_input, [fc_wei]) + ), }, inputs={ - "fc_input": - TensorConfig(data_gen=partial(generate_input, [fc_in, fc_wei])) + "fc_input": TensorConfig( + data_gen=partial(generate_input, [fc_in, fc_wei]) + ) }, - outputs=["activation_output"]) + outputs=["activation_output"], + ) return program_config def sample_predictor_configs(self, program_config): config = self.create_inference_config( - use_mkldnn=True, passes=["fc_act_mkldnn_fuse_pass"]) + use_mkldnn=True, passes=["fc_act_mkldnn_fuse_pass"] + ) yield config, ["fc"], (1e-5, 1e-5) def test(self): diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_repeated_fc_relu_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_repeated_fc_relu_fuse_pass.py index f0cb8797f4ef08580788df5dba4c08ab4585fa4f..1334407cfaf1002fe0aa63ab5707eff3deedeede 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_repeated_fc_relu_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_repeated_fc_relu_fuse_pass.py @@ -22,7 +22,6 @@ import hypothesis.strategies as st class TestRepeatedFcReluFusePass(PassAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True @@ -39,82 +38,77 @@ class TestRepeatedFcReluFusePass(PassAutoScanTest): def generate_weight(shape): return np.random.random(shape).astype(np.float32) - attrs = [{ - "x_col": x_col, - "y_col": y_col - }, { - "axis": axis - }, { - 'batch_size': batch_size, - 'dim': dim - }] - - mul_op1 = OpConfig(type="mul", - inputs={ - "X": ["input_data"], - "Y": ["mul1_weight"] - }, - outputs={"Out": ["mul1_output"]}, - attrs={ - "x_num_col_dims": x_col, - "y_num_col_dims": y_col - }) - - elt_op1 = OpConfig(type="elementwise_add", - inputs={ - "X": ["mul1_output"], - "Y": ["elementwise1_weight"] - }, - outputs={"Out": ["elementwise1_output"]}, - attrs={"axis": axis}) - - relu_op1 = OpConfig(type="relu", - inputs={"X": ["elementwise1_output"]}, - outputs={"Out": ["relu1_output"]}, - attrs={}) - - mul_op2 = OpConfig(type="mul", - inputs={ - "X": ["relu1_output"], - "Y": ["mul2_weight"] - }, - outputs={"Out": ["mul2_output"]}, - attrs={ - "x_num_col_dims": x_col, - "y_num_col_dims": y_col - }) - - elt_op2 = OpConfig(type="elementwise_add", - inputs={ - "X": ["mul2_output"], - "Y": ["elementwise2_weight"] - }, - outputs={"Out": ["elementwise2_output"]}, - attrs={"axis": axis}) - - relu_op2 = OpConfig(type="relu", - inputs={"X": ["elementwise2_output"]}, - outputs={"Out": ["relu2_output"]}, - attrs={}) + attrs = [ + {"x_col": x_col, "y_col": y_col}, + {"axis": axis}, + {'batch_size': batch_size, 'dim': dim}, + ] + + mul_op1 = OpConfig( + type="mul", + inputs={"X": ["input_data"], "Y": ["mul1_weight"]}, + outputs={"Out": ["mul1_output"]}, + attrs={"x_num_col_dims": x_col, "y_num_col_dims": y_col}, + ) + + elt_op1 = OpConfig( + type="elementwise_add", + inputs={"X": ["mul1_output"], "Y": ["elementwise1_weight"]}, + outputs={"Out": ["elementwise1_output"]}, + attrs={"axis": axis}, + ) + + relu_op1 = OpConfig( + type="relu", + inputs={"X": ["elementwise1_output"]}, + outputs={"Out": ["relu1_output"]}, + attrs={}, + ) + + mul_op2 = OpConfig( + type="mul", + inputs={"X": ["relu1_output"], "Y": ["mul2_weight"]}, + outputs={"Out": ["mul2_output"]}, + attrs={"x_num_col_dims": x_col, "y_num_col_dims": y_col}, + ) + + elt_op2 = OpConfig( + type="elementwise_add", + inputs={"X": ["mul2_output"], "Y": ["elementwise2_weight"]}, + outputs={"Out": ["elementwise2_output"]}, + attrs={"axis": axis}, + ) + + relu_op2 = OpConfig( + type="relu", + inputs={"X": ["elementwise2_output"]}, + outputs={"Out": ["relu2_output"]}, + attrs={}, + ) model_net = [mul_op1, elt_op1, relu_op1, mul_op2, elt_op2, relu_op2] program_config = ProgramConfig( ops=model_net, weights={ - "mul1_weight": - TensorConfig(data_gen=partial(generate_weight, [dim, 32])), - "mul2_weight": - TensorConfig(data_gen=partial(generate_weight, [32, 128])), - "elementwise1_weight": - TensorConfig(data_gen=partial(generate_weight, [32])), - "elementwise2_weight": - TensorConfig(data_gen=partial(generate_weight, [128])) + "mul1_weight": TensorConfig( + data_gen=partial(generate_weight, [dim, 32]) + ), + "mul2_weight": TensorConfig( + data_gen=partial(generate_weight, [32, 128]) + ), + "elementwise1_weight": TensorConfig( + data_gen=partial(generate_weight, [32]) + ), + "elementwise2_weight": TensorConfig( + data_gen=partial(generate_weight, [128]) + ), }, inputs={ "input_data": TensorConfig(data_gen=partial(generate_input)), }, - outputs=["relu2_output"]) + outputs=["relu2_output"], + ) return program_config diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_reshape2_matmul_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_reshape2_matmul_fuse_pass.py index 7de2491e693710e7111e1f483a8469e4f0567942..8efe56ed222fc458470bcdfc0234559366152423 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_reshape2_matmul_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_reshape2_matmul_fuse_pass.py @@ -44,9 +44,10 @@ class TestReshape2MatmulFusePass(PassAutoScanTest): def sample_program_config(self, draw): # 1. Generate shape and attr of reshape2 reshape = draw( - st.lists(st.integers(min_value=1, max_value=10), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=10), min_size=2, max_size=2 + ) + ) x_shape = reshape + [1, 1] # 2. Generate attr:transpose_X/transpose_Y/alpha of matmul @@ -56,9 +57,10 @@ class TestReshape2MatmulFusePass(PassAutoScanTest): # 3. Generate legal shape of input:Y of matmul y_shape = draw( - st.lists(st.integers(min_value=1, max_value=8), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=8), min_size=2, max_size=2 + ) + ) y_shape[0] = x_shape[1] # 4. Generate legal attr:axis of elementwise_add @@ -91,17 +93,11 @@ class TestReshape2MatmulFusePass(PassAutoScanTest): "X": ["reshape2_x"], }, shape=reshape, - outputs={ - "Out": ["reshape2_out"], - "XShape": ["xshape"] - }, + outputs={"Out": ["reshape2_out"], "XShape": ["xshape"]}, ) matmul_op = OpConfig( "matmul", - inputs={ - "X": ["reshape2_out"], - "Y": ["matmul_y"] - }, + inputs={"X": ["reshape2_out"], "Y": ["matmul_y"]}, outputs={"Out": ["matmul_out"]}, alpha=alpha, transpose_X=transpose_X, @@ -116,10 +112,7 @@ class TestReshape2MatmulFusePass(PassAutoScanTest): add_op = OpConfig( "elementwise_add", - inputs={ - "X": ["matmul_out"], - "Y": ["bias"] - }, + inputs={"X": ["matmul_out"], "Y": ["bias"]}, outputs={"Out": ["add_out"]}, axis=axis, ) @@ -152,10 +145,12 @@ class TestReshape2MatmulFusePass(PassAutoScanTest): return program_config def test(self): - self.run_and_statis(quant=False, - max_examples=50, - max_duration=1000, - passes=["gpu_cpu_reshape2_matmul_fuse_pass"]) + self.run_and_statis( + quant=False, + max_examples=50, + max_duration=1000, + passes=["gpu_cpu_reshape2_matmul_fuse_pass"], + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_seq_concat_fc_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_seq_concat_fc_fuse_pass.py index 53dc65f84b5deab9d3ac2a5dd1271e3dc888a8e2..52153788abedfcd0dc8c8be5c409caee1e5c2be3 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_seq_concat_fc_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_seq_concat_fc_fuse_pass.py @@ -22,7 +22,6 @@ import hypothesis.strategies as st class TestSeqConcatFcFusePass(PassAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True @@ -44,83 +43,83 @@ class TestSeqConcatFcFusePass(PassAutoScanTest): def generate_weight(shape): return np.random.random(shape).astype(np.float32) - sequence_expand_op1 = OpConfig(type="sequence_expand", - inputs={ - "X": ["input_data1"], - "Y": ["input_data2"] - }, - outputs={"Out": ["seq_exp1_out"]}, - attrs={"ref_level": ref_level}) - - sequence_expand_op2 = OpConfig(type="sequence_expand", - inputs={ - "X": ["input_data1"], - "Y": ["input_data3"] - }, - outputs={"Out": ["seq_exp2_out"]}, - attrs={"ref_level": ref_level}) + sequence_expand_op1 = OpConfig( + type="sequence_expand", + inputs={"X": ["input_data1"], "Y": ["input_data2"]}, + outputs={"Out": ["seq_exp1_out"]}, + attrs={"ref_level": ref_level}, + ) + + sequence_expand_op2 = OpConfig( + type="sequence_expand", + inputs={"X": ["input_data1"], "Y": ["input_data3"]}, + outputs={"Out": ["seq_exp2_out"]}, + attrs={"ref_level": ref_level}, + ) concat_op = OpConfig( type="concat", inputs={"X": ["input_data1", "seq_exp1_out", "seq_exp2_out"]}, outputs={"Out": ["concat_output"]}, - attrs={'axis': axis1}) - - mul_op = OpConfig(type="mul", - inputs={ - "X": ["concat_output"], - "Y": ["mul_weight"] - }, - outputs={"Out": ["mul_out"]}, - attrs={ - "x_num_col_dims": x_col, - "y_num_col_dims": y_col - }) - - elt_op = OpConfig(type="elementwise_add", - inputs={ - "X": ["mul_out"], - "Y": ["elt_weight"] - }, - outputs={"Out": ["elt_out"]}, - attrs={"axis": axis2}) - - act_op = OpConfig(type=act_type, - inputs={"X": ["elt_out"]}, - outputs={"Out": ["act_out"]}, - attrs={ - "use_cudnn": use_cudnn, - "use_mkldnn": use_mkldnn - }) + attrs={'axis': axis1}, + ) + + mul_op = OpConfig( + type="mul", + inputs={"X": ["concat_output"], "Y": ["mul_weight"]}, + outputs={"Out": ["mul_out"]}, + attrs={"x_num_col_dims": x_col, "y_num_col_dims": y_col}, + ) + + elt_op = OpConfig( + type="elementwise_add", + inputs={"X": ["mul_out"], "Y": ["elt_weight"]}, + outputs={"Out": ["elt_out"]}, + attrs={"axis": axis2}, + ) + + act_op = OpConfig( + type=act_type, + inputs={"X": ["elt_out"]}, + outputs={"Out": ["act_out"]}, + attrs={"use_cudnn": use_cudnn, "use_mkldnn": use_mkldnn}, + ) model_net = [ - sequence_expand_op1, sequence_expand_op2, concat_op, mul_op, elt_op, - act_op + sequence_expand_op1, + sequence_expand_op2, + concat_op, + mul_op, + elt_op, + act_op, ] program_config = ProgramConfig( ops=model_net, weights={ - "mul_weight": - TensorConfig(data_gen=partial(generate_weight, [384, dim])), - "elt_weight": - TensorConfig(data_gen=partial(generate_weight, [dim])) + "mul_weight": TensorConfig( + data_gen=partial(generate_weight, [384, dim]) + ), + "elt_weight": TensorConfig( + data_gen=partial(generate_weight, [dim]) + ), }, inputs={ - "input_data1": - TensorConfig(data_gen=partial(generate_input, - [batch_size, 128]), - lod=[[0, 1]]), - "input_data2": - TensorConfig(data_gen=partial(generate_input, - [batch_size, 128]), - lod=[[0, 1]]), - "input_data3": - TensorConfig(data_gen=partial(generate_input, - [batch_size, 128]), - lod=[[0, 1]]) + "input_data1": TensorConfig( + data_gen=partial(generate_input, [batch_size, 128]), + lod=[[0, 1]], + ), + "input_data2": TensorConfig( + data_gen=partial(generate_input, [batch_size, 128]), + lod=[[0, 1]], + ), + "input_data3": TensorConfig( + data_gen=partial(generate_input, [batch_size, 128]), + lod=[[0, 1]], + ), }, - outputs=["act_out"]) + outputs=["act_out"], + ) return program_config @@ -129,15 +128,15 @@ class TestSeqConcatFcFusePass(PassAutoScanTest): yield config, ["fusion_seqexpand_concat_fc"], (1e-5, 1e-5) def add_ignore_pass_case(self): - def teller1(program_config, predictor_config): if program_config.ops[-1].type == "relu": return True return False self.add_ignore_check_case( - teller1, IgnoreReasons.PASS_ACCURACY_ERROR, - "The pass output has diff in a specific case. We need to fix it as soon as possible." + teller1, + IgnoreReasons.PASS_ACCURACY_ERROR, + "The pass output has diff in a specific case. We need to fix it as soon as possible.", ) def test(self): diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_seqconv_eltadd_relu_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_seqconv_eltadd_relu_fuse_pass.py index ee5bb892b942e205bd98573c131412f811ebffc4..08c95df169bac7cde5fe237653878a795e7eff50 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_seqconv_eltadd_relu_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_seqconv_eltadd_relu_fuse_pass.py @@ -22,7 +22,6 @@ import hypothesis.strategies as st class TestSeqconvEltaddReluFusePass(PassAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True @@ -41,71 +40,80 @@ class TestSeqconvEltaddReluFusePass(PassAutoScanTest): def generate_weight(shape): return np.random.random(shape).astype(np.float32) - im2sequence_op = OpConfig(type="im2sequence", - inputs={"X": ["input_data"]}, - outputs={"Out": ["seq_out"]}, - attrs={ - "kernels": [6, 1], - "out_stride": [1, 1], - "paddings": [0, 0, 0, 0], - "strides": [1, 1] - }) - - sequence_conv_op = OpConfig(type="sequence_conv", - inputs={ - "X": ["seq_out"], - "Filter": ["conv_weight"] - }, - outputs={"Out": ["conv_out"]}, - attrs={ - "contextLength": contextLength, - "contextStart": contextStart, - "contextStride": contextStride, - "paddingTrainable": paddingTrainable - }) - - elementwise_add_op = OpConfig(type="elementwise_add", - inputs={ - "X": ["conv_out"], - "Y": ["elt_weight"] - }, - outputs={"Out": ["elt_output"]}, - attrs={'axis': axis}) - - relu_op = OpConfig(type="relu", - inputs={"X": ["elt_output"]}, - outputs={"Out": ["relu_output"]}, - attrs={}) + im2sequence_op = OpConfig( + type="im2sequence", + inputs={"X": ["input_data"]}, + outputs={"Out": ["seq_out"]}, + attrs={ + "kernels": [6, 1], + "out_stride": [1, 1], + "paddings": [0, 0, 0, 0], + "strides": [1, 1], + }, + ) + + sequence_conv_op = OpConfig( + type="sequence_conv", + inputs={"X": ["seq_out"], "Filter": ["conv_weight"]}, + outputs={"Out": ["conv_out"]}, + attrs={ + "contextLength": contextLength, + "contextStart": contextStart, + "contextStride": contextStride, + "paddingTrainable": paddingTrainable, + }, + ) + + elementwise_add_op = OpConfig( + type="elementwise_add", + inputs={"X": ["conv_out"], "Y": ["elt_weight"]}, + outputs={"Out": ["elt_output"]}, + attrs={'axis': axis}, + ) + + relu_op = OpConfig( + type="relu", + inputs={"X": ["elt_output"]}, + outputs={"Out": ["relu_output"]}, + attrs={}, + ) model_net = [ - im2sequence_op, sequence_conv_op, elementwise_add_op, relu_op + im2sequence_op, + sequence_conv_op, + elementwise_add_op, + relu_op, ] program_config = ProgramConfig( ops=model_net, weights={ - "conv_weight": - TensorConfig( - data_gen=partial(generate_weight, [768 * - contextLength, 16])), - "elt_weight": - TensorConfig(data_gen=partial(generate_weight, [16])) + "conv_weight": TensorConfig( + data_gen=partial(generate_weight, [768 * contextLength, 16]) + ), + "elt_weight": TensorConfig( + data_gen=partial(generate_weight, [16]) + ), }, inputs={ "input_data": TensorConfig(data_gen=partial(generate_input)) }, - outputs=["relu_output"]) + outputs=["relu_output"], + ) return program_config def sample_predictor_configs(self, program_config): config = self.create_inference_config() - yield config, ["im2sequence", - "fusion_seqconv_eltadd_relu"], (1e-5, 1e-5) + yield config, ["im2sequence", "fusion_seqconv_eltadd_relu"], ( + 1e-5, + 1e-5, + ) def test(self): - self.run_and_statis(quant=False, - passes=["seqconv_eltadd_relu_fuse_pass"]) + self.run_and_statis( + quant=False, passes=["seqconv_eltadd_relu_fuse_pass"] + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_seqpool_cvm_concat_fuse_pass_py.py b/python/paddle/fluid/tests/unittests/ir/inference/test_seqpool_cvm_concat_fuse_pass_py.py index d768b1c23ff514c2a761d78b443c603aaaf412c1..08878c3cd40aa9f9cb5bbf333c11cd6317f5d4e2 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_seqpool_cvm_concat_fuse_pass_py.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_seqpool_cvm_concat_fuse_pass_py.py @@ -22,7 +22,6 @@ import hypothesis.strategies as st class TestSeqpoolCvmConcatFusePass(PassAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True @@ -47,84 +46,88 @@ class TestSeqpoolCvmConcatFusePass(PassAutoScanTest): def generate_input3(): return np.random.random([1, 768]).astype(np.float32) - im2sequence_op = OpConfig(type="im2sequence", - inputs={"X": ["input_data1"]}, - outputs={"Out": ["seq_out"]}, - attrs={ - "kernels": [6, 1], - "out_stride": [1, 1], - "paddings": [0, 0, 0, 0], - "strides": [1, 1] - }) - - sequence_pool_op1 = OpConfig(type="sequence_pool", - inputs={"X": ["seq_out"]}, - outputs={ - "Out": ["seq_pool1_out"], - "MaxIndex": ["index1_out"] - }, - attrs={ - "is_test": is_test, - "pooltype": pooltype, - "pad_value": pad_value1 - }) - - sequence_pool_op2 = OpConfig(type="sequence_pool", - inputs={"X": ["seq_out"]}, - outputs={ - "Out": ["seq_pool2_out"], - "MaxIndex": ["index2_out"] - }, - attrs={ - "is_test": is_test, - "pooltype": pooltype, - "pad_value": pad_value2 - }) - - sequence_pool_op3 = OpConfig(type="sequence_pool", - inputs={"X": ["seq_out"]}, - outputs={ - "Out": ["seq_pool3_out"], - "MaxIndex": ["index3_out"] - }, - attrs={ - "is_test": is_test, - "pooltype": pooltype, - "pad_value": pad_value3 - }) - - cvm_op1 = OpConfig(type="cvm", - inputs={ - "X": ["seq_pool1_out"], - "CVM": ["input_data2"] - }, - outputs={"Y": ["cvm1_out"]}, - attrs={"use_cvm": use_cvm}) - - cvm_op2 = OpConfig(type="cvm", - inputs={ - "X": ["seq_pool2_out"], - "CVM": ["input_data2"] - }, - outputs={"Y": ["cvm2_out"]}, - attrs={"use_cvm": use_cvm}) - - cvm_op3 = OpConfig(type="cvm", - inputs={ - "X": ["seq_pool3_out"], - "CVM": ["input_data2"] - }, - outputs={"Y": ["cvm3_out"]}, - attrs={"use_cvm": use_cvm}) - - concat_op = OpConfig(type="concat", - inputs={"X": ["cvm1_out", "cvm2_out", "cvm3_out"]}, - outputs={"Out": ["concat_output"]}, - attrs={'axis': axis}) + im2sequence_op = OpConfig( + type="im2sequence", + inputs={"X": ["input_data1"]}, + outputs={"Out": ["seq_out"]}, + attrs={ + "kernels": [6, 1], + "out_stride": [1, 1], + "paddings": [0, 0, 0, 0], + "strides": [1, 1], + }, + ) + + sequence_pool_op1 = OpConfig( + type="sequence_pool", + inputs={"X": ["seq_out"]}, + outputs={"Out": ["seq_pool1_out"], "MaxIndex": ["index1_out"]}, + attrs={ + "is_test": is_test, + "pooltype": pooltype, + "pad_value": pad_value1, + }, + ) + + sequence_pool_op2 = OpConfig( + type="sequence_pool", + inputs={"X": ["seq_out"]}, + outputs={"Out": ["seq_pool2_out"], "MaxIndex": ["index2_out"]}, + attrs={ + "is_test": is_test, + "pooltype": pooltype, + "pad_value": pad_value2, + }, + ) + + sequence_pool_op3 = OpConfig( + type="sequence_pool", + inputs={"X": ["seq_out"]}, + outputs={"Out": ["seq_pool3_out"], "MaxIndex": ["index3_out"]}, + attrs={ + "is_test": is_test, + "pooltype": pooltype, + "pad_value": pad_value3, + }, + ) + + cvm_op1 = OpConfig( + type="cvm", + inputs={"X": ["seq_pool1_out"], "CVM": ["input_data2"]}, + outputs={"Y": ["cvm1_out"]}, + attrs={"use_cvm": use_cvm}, + ) + + cvm_op2 = OpConfig( + type="cvm", + inputs={"X": ["seq_pool2_out"], "CVM": ["input_data2"]}, + outputs={"Y": ["cvm2_out"]}, + attrs={"use_cvm": use_cvm}, + ) + + cvm_op3 = OpConfig( + type="cvm", + inputs={"X": ["seq_pool3_out"], "CVM": ["input_data2"]}, + outputs={"Y": ["cvm3_out"]}, + attrs={"use_cvm": use_cvm}, + ) + + concat_op = OpConfig( + type="concat", + inputs={"X": ["cvm1_out", "cvm2_out", "cvm3_out"]}, + outputs={"Out": ["concat_output"]}, + attrs={'axis': axis}, + ) model_net = [ - im2sequence_op, sequence_pool_op1, sequence_pool_op2, - sequence_pool_op3, cvm_op1, cvm_op2, cvm_op3, concat_op + im2sequence_op, + sequence_pool_op1, + sequence_pool_op2, + sequence_pool_op3, + cvm_op1, + cvm_op2, + cvm_op3, + concat_op, ] program_config = ProgramConfig( @@ -133,9 +136,10 @@ class TestSeqpoolCvmConcatFusePass(PassAutoScanTest): inputs={ "input_data1": TensorConfig(data_gen=partial(generate_input1)), "input_data2": TensorConfig(data_gen=partial(generate_input2)), - "input_data3": TensorConfig(data_gen=partial(generate_input3)) + "input_data3": TensorConfig(data_gen=partial(generate_input3)), }, - outputs=["concat_output"]) + outputs=["concat_output"], + ) return program_config @@ -144,8 +148,9 @@ class TestSeqpoolCvmConcatFusePass(PassAutoScanTest): yield config, ["im2sequence", "fusion_seqpool_cvm_concat"], (1e-5, 1e-5) def test(self): - self.run_and_statis(quant=False, - passes=["seqpool_cvm_concat_fuse_pass"]) + self.run_and_statis( + quant=False, passes=["seqpool_cvm_concat_fuse_pass"] + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_shuffle_channel_detect_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_shuffle_channel_detect_pass.py index 669b1906d2c107a55bad1a6dc3d647872153ea5f..5aa03a57f4bb32b62478484bf8a01bfaa2b76966 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_shuffle_channel_detect_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_shuffle_channel_detect_pass.py @@ -23,7 +23,6 @@ import hypothesis.strategies as st class TestShuffleChannelDetectPass(PassAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: attrs = [ program_config.ops[i].attrs for i in range(len(program_config.ops)) @@ -46,44 +45,52 @@ class TestShuffleChannelDetectPass(PassAutoScanTest): def generate_reshape2_Input(): return np.random.random(x_shape).astype(np.float32) - reshape2_op1 = OpConfig("reshape2", - inputs={ - "X": ["reshape2_input1"], - }, - outputs={ - "Out": ["reshape2_output1"], - "XShape": ["reshape2_xshape1"] - }, - shape=shape, - input_shape=x_shape) - transpose2_op = OpConfig("transpose2", - inputs={ - "X": ["reshape2_output1"], - }, - outputs={ - "Out": ["transpose2_output"], - "XShape": ["transpose2_xshape"] - }, - axis=axis_v) - reshape2_op2 = OpConfig("reshape2", - inputs={ - "X": ["transpose2_output"], - }, - outputs={ - "Out": ["reshape2_output2"], - "XShape": ["reshape2_xshape2"] - }, - shape=x_shape) + reshape2_op1 = OpConfig( + "reshape2", + inputs={ + "X": ["reshape2_input1"], + }, + outputs={ + "Out": ["reshape2_output1"], + "XShape": ["reshape2_xshape1"], + }, + shape=shape, + input_shape=x_shape, + ) + transpose2_op = OpConfig( + "transpose2", + inputs={ + "X": ["reshape2_output1"], + }, + outputs={ + "Out": ["transpose2_output"], + "XShape": ["transpose2_xshape"], + }, + axis=axis_v, + ) + reshape2_op2 = OpConfig( + "reshape2", + inputs={ + "X": ["transpose2_output"], + }, + outputs={ + "Out": ["reshape2_output2"], + "XShape": ["reshape2_xshape2"], + }, + shape=x_shape, + ) ops = [reshape2_op1, transpose2_op, reshape2_op2] program_config = ProgramConfig( ops=ops, inputs={ - "reshape2_input1": - TensorConfig(data_gen=partial(generate_reshape2_Input)), + "reshape2_input1": TensorConfig( + data_gen=partial(generate_reshape2_Input) + ), }, weights={}, - outputs=["reshape2_output2"]) + outputs=["reshape2_output2"], + ) return program_config def sample_predictor_configs(self, program_config): @@ -94,7 +101,8 @@ class TestShuffleChannelDetectPass(PassAutoScanTest): min_subgraph_size=1, precision_mode=paddle_infer.PrecisionType.Float32, use_static=False, - use_calib_mode=False) + use_calib_mode=False, + ) yield config, ['shuffle_channel'], (1e-5, 1e-5) def test(self): diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_simplify_with_basic_ops_pass_autoscan.py b/python/paddle/fluid/tests/unittests/ir/inference/test_simplify_with_basic_ops_pass_autoscan.py index 6dab206551c186a9652d9be703147c1cd5e3fb65..42c5a38a72af7a4c91ade28cbb0f3b06da72858c 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_simplify_with_basic_ops_pass_autoscan.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_simplify_with_basic_ops_pass_autoscan.py @@ -21,47 +21,49 @@ import hypothesis.strategies as st class TestSimplifyWithBasicOpsPassUpscale(PassAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_config(self, draw): - #scale = draw(st.floats(min_value=0.01, max_value=1.0)) - #bias = draw(st.floats(min_value=0.01, max_value=2.0)) - #bias_after_scale = draw(st.booleans()) + # scale = draw(st.floats(min_value=0.01, max_value=1.0)) + # bias = draw(st.floats(min_value=0.01, max_value=2.0)) + # bias_after_scale = draw(st.booleans()) fix_seed = draw(st.booleans()) dropout_implementation = "upscale_in_train" dropout_prob = draw(st.floats(min_value=0.0, max_value=1.0)) seed = draw(st.integers(min_value=0, max_value=512)) x_shape = draw( - st.lists(st.integers(min_value=1, max_value=4), - min_size=2, - max_size=4)) + st.lists( + st.integers(min_value=1, max_value=4), min_size=2, max_size=4 + ) + ) is_test = True - dropout_op = OpConfig("dropout", - inputs={"X": ["input_data"]}, - outputs={ - "Out": ["dropout_output"], - "Mask": ["mask"] - }, - fix_seed=fix_seed, - dropout_implementation=dropout_implementation, - dropout_prob=dropout_prob, - seed=seed, - is_test=is_test) - relu_op = OpConfig("relu", - inputs={"X": ["dropout_output"]}, - outputs={"Out": ["relu_out"]}) + dropout_op = OpConfig( + "dropout", + inputs={"X": ["input_data"]}, + outputs={"Out": ["dropout_output"], "Mask": ["mask"]}, + fix_seed=fix_seed, + dropout_implementation=dropout_implementation, + dropout_prob=dropout_prob, + seed=seed, + is_test=is_test, + ) + relu_op = OpConfig( + "relu", + inputs={"X": ["dropout_output"]}, + outputs={"Out": ["relu_out"]}, + ) ops = [dropout_op, relu_op] - program_config = ProgramConfig(ops=ops, - weights={}, - inputs={ - "input_data": - TensorConfig(shape=x_shape), - }, - outputs=["relu_out"]) + program_config = ProgramConfig( + ops=ops, + weights={}, + inputs={ + "input_data": TensorConfig(shape=x_shape), + }, + outputs=["relu_out"], + ) return program_config @@ -77,18 +79,20 @@ class TestSimplifyWithBasicOpsPassUpscale(PassAutoScanTest): min_subgraph_size=0, precision_mode=paddle_infer.PrecisionType.Float32, use_static=False, - use_calib_mode=False) + use_calib_mode=False, + ) yield config, ['relu'], (1e-5, 1e-5) def test(self): - self.run_and_statis(quant=False, - max_examples=30, - passes=["simplify_with_basic_ops_pass"], - min_success_num=30) + self.run_and_statis( + quant=False, + max_examples=30, + passes=["simplify_with_basic_ops_pass"], + min_success_num=30, + ) class TestSimplifyWithBasicOpsPassDowngrade(PassAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True @@ -98,34 +102,37 @@ class TestSimplifyWithBasicOpsPassDowngrade(PassAutoScanTest): dropout_prob = draw(st.floats(min_value=0.0, max_value=1.0)) seed = draw(st.integers(min_value=0, max_value=512)) x_shape = draw( - st.lists(st.integers(min_value=1, max_value=4), - min_size=2, - max_size=4)) + st.lists( + st.integers(min_value=1, max_value=4), min_size=2, max_size=4 + ) + ) is_test = True - dropout_op = OpConfig("dropout", - inputs={"X": ["input_data"]}, - outputs={ - "Out": ["dropout_output"], - "Mask": ["mask"] - }, - fix_seed=fix_seed, - dropout_implementation=dropout_implementation, - dropout_prob=dropout_prob, - seed=seed, - is_test=is_test) - relu_op = OpConfig("relu", - inputs={"X": ["dropout_output"]}, - outputs={"Out": ["relu_out"]}) + dropout_op = OpConfig( + "dropout", + inputs={"X": ["input_data"]}, + outputs={"Out": ["dropout_output"], "Mask": ["mask"]}, + fix_seed=fix_seed, + dropout_implementation=dropout_implementation, + dropout_prob=dropout_prob, + seed=seed, + is_test=is_test, + ) + relu_op = OpConfig( + "relu", + inputs={"X": ["dropout_output"]}, + outputs={"Out": ["relu_out"]}, + ) ops = [dropout_op, relu_op] - program_config = ProgramConfig(ops=ops, - weights={}, - inputs={ - "input_data": - TensorConfig(shape=x_shape), - }, - outputs=["relu_out"]) + program_config = ProgramConfig( + ops=ops, + weights={}, + inputs={ + "input_data": TensorConfig(shape=x_shape), + }, + outputs=["relu_out"], + ) return program_config @@ -141,14 +148,17 @@ class TestSimplifyWithBasicOpsPassDowngrade(PassAutoScanTest): min_subgraph_size=0, precision_mode=paddle_infer.PrecisionType.Float32, use_static=False, - use_calib_mode=False) + use_calib_mode=False, + ) yield config, ['scale', 'relu'], (1e-5, 1e-5) def test(self): - self.run_and_statis(quant=False, - max_examples=30, - passes=["simplify_with_basic_ops_pass"], - min_success_num=30) + self.run_and_statis( + quant=False, + max_examples=30, + passes=["simplify_with_basic_ops_pass"], + min_success_num=30, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_squared_mat_sub_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_squared_mat_sub_fuse_pass.py index 93a6e0fa32293ff3136c25d947636001ce2823f5..4e2287c9a65b8a82a72877bc4a1a6b381678bc90 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_squared_mat_sub_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_squared_mat_sub_fuse_pass.py @@ -22,7 +22,6 @@ import hypothesis.strategies as st class TestSquaredMatSubFusePass(PassAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True @@ -49,110 +48,125 @@ class TestSquaredMatSubFusePass(PassAutoScanTest): else: return np.random.random(shape_y).astype(np.float32) - matmul_op1 = OpConfig(type="matmul", - inputs={ - "X": ["input_data1"], - "Y": ["input_data2"] - }, - outputs={"Out": ["matmul1_output"]}, - attrs={ - "transpose_X": transpose_X, - "transpose_Y": transpose_Y, - "alpha": alpha1, - "fused_reshape_X": [], - "fused_reshape_Y": [], - "fused_transpose_X": [], - "fused_transpose_Y": [], - "fused_reshape_Out": [], - "fused_transpose_Out": [] - }) - - square_op1 = OpConfig(type="square", - inputs={"X": ["matmul1_output"]}, - outputs={"Out": ["square1_output"]}, - attrs={}) - - square_op2 = OpConfig(type="square", - inputs={"X": ["input_data1"]}, - outputs={"Out": ["square2_output"]}, - attrs={}) - - square_op3 = OpConfig(type="square", - inputs={"X": ["input_data2"]}, - outputs={"Out": ["square3_output"]}, - attrs={}) - - matmul_op2 = OpConfig(type="matmul", - inputs={ - "X": ["square2_output"], - "Y": ["square3_output"] - }, - outputs={"Out": ["matmul2_output"]}, - attrs={ - "transpose_X": transpose_X, - "transpose_Y": transpose_Y, - "alpha": alpha2, - "fused_reshape_X": [], - "fused_reshape_Y": [], - "fused_transpose_X": [], - "fused_transpose_Y": [], - "fused_reshape_Out": [], - "fused_transpose_Out": [] - }) - - elt_sub_op = OpConfig(type="elementwise_sub", - inputs={ - "X": ["square1_output"], - "Y": ["matmul2_output"] - }, - outputs={"Out": ["sub_out"]}, - attrs={"axis": axis1}) + matmul_op1 = OpConfig( + type="matmul", + inputs={"X": ["input_data1"], "Y": ["input_data2"]}, + outputs={"Out": ["matmul1_output"]}, + attrs={ + "transpose_X": transpose_X, + "transpose_Y": transpose_Y, + "alpha": alpha1, + "fused_reshape_X": [], + "fused_reshape_Y": [], + "fused_transpose_X": [], + "fused_transpose_Y": [], + "fused_reshape_Out": [], + "fused_transpose_Out": [], + }, + ) + + square_op1 = OpConfig( + type="square", + inputs={"X": ["matmul1_output"]}, + outputs={"Out": ["square1_output"]}, + attrs={}, + ) + + square_op2 = OpConfig( + type="square", + inputs={"X": ["input_data1"]}, + outputs={"Out": ["square2_output"]}, + attrs={}, + ) + + square_op3 = OpConfig( + type="square", + inputs={"X": ["input_data2"]}, + outputs={"Out": ["square3_output"]}, + attrs={}, + ) + + matmul_op2 = OpConfig( + type="matmul", + inputs={"X": ["square2_output"], "Y": ["square3_output"]}, + outputs={"Out": ["matmul2_output"]}, + attrs={ + "transpose_X": transpose_X, + "transpose_Y": transpose_Y, + "alpha": alpha2, + "fused_reshape_X": [], + "fused_reshape_Y": [], + "fused_transpose_X": [], + "fused_transpose_Y": [], + "fused_reshape_Out": [], + "fused_transpose_Out": [], + }, + ) + + elt_sub_op = OpConfig( + type="elementwise_sub", + inputs={"X": ["square1_output"], "Y": ["matmul2_output"]}, + outputs={"Out": ["sub_out"]}, + attrs={"axis": axis1}, + ) if has_str_value: - fill_constant_op = OpConfig(type="fill_constant", - inputs={}, - outputs={"Out": ["constant_out"]}, - attrs={ - "dtype": 5, - "place_type": place_type, - "str_value": str_value, - "value": value, - "shape": shape - }) + fill_constant_op = OpConfig( + type="fill_constant", + inputs={}, + outputs={"Out": ["constant_out"]}, + attrs={ + "dtype": 5, + "place_type": place_type, + "str_value": str_value, + "value": value, + "shape": shape, + }, + ) else: - fill_constant_op = OpConfig(type="fill_constant", - inputs={}, - outputs={"Out": ["constant_out"]}, - attrs={ - "dtype": 5, - "place_type": place_type, - "value": value, - "shape": shape - }) - - elt_mul_op = OpConfig(type="elementwise_mul", - inputs={ - "X": ["sub_out"], - "Y": ["constant_out"] - }, - outputs={"Out": ["mul_out"]}, - attrs={"axis": axis2}) + fill_constant_op = OpConfig( + type="fill_constant", + inputs={}, + outputs={"Out": ["constant_out"]}, + attrs={ + "dtype": 5, + "place_type": place_type, + "value": value, + "shape": shape, + }, + ) + + elt_mul_op = OpConfig( + type="elementwise_mul", + inputs={"X": ["sub_out"], "Y": ["constant_out"]}, + outputs={"Out": ["mul_out"]}, + attrs={"axis": axis2}, + ) model_net = [ - matmul_op1, square_op1, square_op2, square_op3, matmul_op2, - elt_sub_op, fill_constant_op, elt_mul_op + matmul_op1, + square_op1, + square_op2, + square_op3, + matmul_op2, + elt_sub_op, + fill_constant_op, + elt_mul_op, ] program_config = ProgramConfig( ops=model_net, weights={}, inputs={ - "input_data1": - TensorConfig(data_gen=partial(generate_input, "x")), - "input_data2": - TensorConfig(data_gen=partial(generate_input, "y")) + "input_data1": TensorConfig( + data_gen=partial(generate_input, "x") + ), + "input_data2": TensorConfig( + data_gen=partial(generate_input, "y") + ), }, - outputs=["mul_out"]) + outputs=["mul_out"], + ) return program_config diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_squeeze2_matmul_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_squeeze2_matmul_fuse_pass.py index 8deee7c1517e8a660e9195fb03862adb1b12d3d2..e7ef132f1abbde4f1574ca8091d1d4e299625258 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_squeeze2_matmul_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_squeeze2_matmul_fuse_pass.py @@ -44,9 +44,10 @@ class TestSqueeze2MatmulFusePass(PassAutoScanTest): def sample_program_config(self, draw): # 1. Generate shape of input:X of squeeze2 x_shape = draw( - st.lists(st.integers(min_value=1, max_value=8), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=8), min_size=2, max_size=2 + ) + ) # axes of squeeze2 == [2, 3] x_shape += [1, 1] axes = [2, 3] @@ -58,9 +59,10 @@ class TestSqueeze2MatmulFusePass(PassAutoScanTest): # 3. Generate legal shape of input:Y of matmul y_shape = draw( - st.lists(st.integers(min_value=1, max_value=8), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=8), min_size=2, max_size=2 + ) + ) y_shape[0] = x_shape[1] # 4. Generate legal attr:axis of elementwise_add @@ -93,17 +95,11 @@ class TestSqueeze2MatmulFusePass(PassAutoScanTest): "X": ["squeeze2_x"], }, axes=axes, - outputs={ - "Out": ["squeeze2_out"], - "XShape": ["xshape"] - }, + outputs={"Out": ["squeeze2_out"], "XShape": ["xshape"]}, ) matmul_op = OpConfig( "matmul", - inputs={ - "X": ["squeeze2_out"], - "Y": ["matmul_y"] - }, + inputs={"X": ["squeeze2_out"], "Y": ["matmul_y"]}, outputs={"Out": ["matmul_out"]}, alpha=alpha, transpose_X=transpose_X, @@ -118,10 +114,7 @@ class TestSqueeze2MatmulFusePass(PassAutoScanTest): add_op = OpConfig( "elementwise_add", - inputs={ - "X": ["matmul_out"], - "Y": ["bias"] - }, + inputs={"X": ["matmul_out"], "Y": ["bias"]}, outputs={"Out": ["add_out"]}, axis=axis, ) @@ -154,10 +147,12 @@ class TestSqueeze2MatmulFusePass(PassAutoScanTest): return program_config def test(self): - self.run_and_statis(quant=False, - max_examples=50, - max_duration=1000, - passes=["gpu_cpu_squeeze2_matmul_fuse_pass"]) + self.run_and_statis( + quant=False, + max_examples=50, + max_duration=1000, + passes=["gpu_cpu_squeeze2_matmul_fuse_pass"], + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_transpose_flatten_concat_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_transpose_flatten_concat_fuse_pass.py index 41145be9bca0b16ed7995e8c750f87131a150ead..acdff3d9a86cedbbed3febcd4fe54f9ff8d01e5c 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_transpose_flatten_concat_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_transpose_flatten_concat_fuse_pass.py @@ -94,14 +94,18 @@ class TestTransposeFlattenConcatFusePass(PassAutoScanTest): if draw(st.booleans()): trans_axis[j], trans_axis[-1] = trans_axis[-1], trans_axis[j] # Generate axis of flatten - flatten_axis = draw(st.integers(min_value=0, - max_value=x_shape_rank - 1)) + flatten_axis = draw( + st.integers(min_value=0, max_value=x_shape_rank - 1) + ) for i in range(times): # Generate x_shape of transpose x_shape = draw( - st.lists(st.integers(min_value=1, max_value=10), - min_size=x_shape_rank, - max_size=x_shape_rank)) + st.lists( + st.integers(min_value=1, max_value=10), + min_size=x_shape_rank, + max_size=x_shape_rank, + ) + ) str_i = str(i) transpose_op = OpConfig( @@ -112,7 +116,7 @@ class TestTransposeFlattenConcatFusePass(PassAutoScanTest): axis=trans_axis, outputs={ "Out": ["trans_out" + str_i], - "XShape": ["trans_shape" + str_i] + "XShape": ["trans_shape" + str_i], }, ) ops.append(transpose_op) @@ -124,7 +128,7 @@ class TestTransposeFlattenConcatFusePass(PassAutoScanTest): axis=flatten_axis, outputs={ "Out": ["flatten2_out" + str_i], - "XShape": ["xshape" + str_i] + "XShape": ["xshape" + str_i], }, ) concat_input.append("flatten2_out" + str_i) @@ -152,9 +156,11 @@ class TestTransposeFlattenConcatFusePass(PassAutoScanTest): return program_config def test(self): - self.run_and_statis(quant=False, - max_examples=300, - passes=["transpose_flatten_concat_fuse_pass"]) + self.run_and_statis( + quant=False, + max_examples=300, + passes=["transpose_flatten_concat_fuse_pass"], + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_activation_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_activation_pass.py index 7ba824360ad46e9c6e647542a0143251483c8aca..0f2a8a97430cda2283d0f81f063ea6982625b53c 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_activation_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_activation_pass.py @@ -24,18 +24,18 @@ from paddle.fluid.core import AnalysisConfig class TensorRTSubgraphPassActivationTest(InferencePassTest): - def setUpTensorRTParam(self): self.enable_trt = True self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False + ) def setUp(self): self.setUpTensorRTParam() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 6, 32, 32], - dtype="float32") + data = fluid.data( + name="data", shape=[-1, 6, 32, 32], dtype="float32" + ) act_out = self.append_act(data) out = fluid.layers.batch_norm(act_out, is_test=True) self.feeds = { @@ -51,298 +51,340 @@ class TensorRTSubgraphPassActivationTest(InferencePassTest): use_gpu = True if os.path.exists(self.path + "_opt_cache"): shutil.rmtree(self.path + "_opt_cache") - if self.trt_parameters.precision == AnalysisConfig.Precision.Float32: + if ( + self.trt_parameters.precision + == AnalysisConfig.Precision.Float32 + ): self.check_output_with_option(use_gpu) else: self.check_output_with_option(use_gpu, 1e-3) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TensorRTSubgraphPassLeakyReluTest(TensorRTSubgraphPassActivationTest): - def append_act(self, x): return fluid.layers.leaky_relu(x) class TensorRTSubgraphPassRelu6Test(TensorRTSubgraphPassActivationTest): - def append_act(self, x): return fluid.layers.relu6(x) class TensorRTSubgraphPassSoftMaxTest(TensorRTSubgraphPassActivationTest): - def append_act(self, x): return fluid.layers.softmax(x) class TensorRTSubgraphPassSigmoidTest(TensorRTSubgraphPassActivationTest): - def append_act(self, x): return fluid.layers.sigmoid(x) class TensorRTSubgraphPassHardSwishTest(TensorRTSubgraphPassActivationTest): - def append_act(self, x): return fluid.layers.hard_swish(x) class TensorRTSubgraphPassHardSigmoidTest(TensorRTSubgraphPassActivationTest): - def append_act(self, x): return fluid.layers.hard_sigmoid(x) -class TensorRTSubgraphPassHardSwishPluginTest(TensorRTSubgraphPassActivationTest - ): - +class TensorRTSubgraphPassHardSwishPluginTest( + TensorRTSubgraphPassActivationTest +): def append_act(self, x): return fluid.layers.hard_swish(x, threshold=4.0, scale=8.0) class TensorRTSubgraphPassClipTest(TensorRTSubgraphPassActivationTest): - def append_act(self, x): return fluid.layers.clip(x, 0, 1) class TensorRTSubgraphPassTanhTest(TensorRTSubgraphPassActivationTest): - def append_act(self, x): return fluid.layers.tanh(x) class TensorRTSubgraphPassSwishTest(TensorRTSubgraphPassActivationTest): - def setUpTensorRTParam(self): self.enable_trt = True self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, True, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, True, False + ) def append_act(self, x): return fluid.layers.swish(x) class TensorRTSubgraphPassSwishFp16SerializeTest( - TensorRTSubgraphPassActivationTest): - + TensorRTSubgraphPassActivationTest +): def setUpTensorRTParam(self): self.enable_trt = True self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Half, True, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Half, True, False + ) def append_act(self, x): return fluid.layers.swish(x) class TensorRTSubgraphPassDynamicSwishFp16SerializeTest( - TensorRTSubgraphPassActivationTest): - + TensorRTSubgraphPassActivationTest +): def setUpTensorRTParam(self): self.enable_trt = True self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Half, True, False) - self.dynamic_shape_params = TensorRTSubgraphPassActivationTest.DynamicShapeParam( - {'data': [1, 6, 8, 8]}, {'data': [1, 6, 128, 128]}, - {'data': [1, 6, 64, 64]}, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Half, True, False + ) + self.dynamic_shape_params = ( + TensorRTSubgraphPassActivationTest.DynamicShapeParam( + {'data': [1, 6, 8, 8]}, + {'data': [1, 6, 128, 128]}, + {'data': [1, 6, 64, 64]}, + False, + ) + ) def append_act(self, x): return fluid.layers.swish(x) class TensorRTSubgraphPassMishTest(TensorRTSubgraphPassActivationTest): - def setUpTensorRTParam(self): self.enable_trt = True self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, True, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, True, False + ) def append_act(self, x): return fluid.layers.mish(x) class TensorRTSubgraphPassMishFp16SerializeTest( - TensorRTSubgraphPassActivationTest): - + TensorRTSubgraphPassActivationTest +): def setUpTensorRTParam(self): self.enable_trt = True self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Half, True, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Half, True, False + ) def append_act(self, x): return fluid.layers.mish(x) class TensorRTSubgraphPassDynamicMishFp16SerializeTest( - TensorRTSubgraphPassActivationTest): - + TensorRTSubgraphPassActivationTest +): def setUpTensorRTParam(self): self.enable_trt = True self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Half, False, False) - self.dynamic_shape_params = TensorRTSubgraphPassActivationTest.DynamicShapeParam( - {'data': [1, 6, 8, 8]}, {'data': [1, 6, 128, 128]}, - {'data': [1, 6, 64, 64]}, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Half, False, False + ) + self.dynamic_shape_params = ( + TensorRTSubgraphPassActivationTest.DynamicShapeParam( + {'data': [1, 6, 8, 8]}, + {'data': [1, 6, 128, 128]}, + {'data': [1, 6, 64, 64]}, + False, + ) + ) def append_act(self, x): return fluid.layers.mish(x) class TensorRTSubgraphPassPreluAllTest(TensorRTSubgraphPassActivationTest): - def append_act(self, x): return fluid.layers.prelu(x, mode='all') class TensorRTSubgraphPassPreluChannelTest(TensorRTSubgraphPassActivationTest): - def append_act(self, x): return fluid.layers.prelu(x, mode='channel') class TensorRTSubgraphPassPreluElementTest(TensorRTSubgraphPassActivationTest): - def append_act(self, x): return fluid.layers.prelu(x, mode='element') class TensorRTSubgraphPassPreluDynamicTest(TensorRTSubgraphPassActivationTest): - def setUpTensorRTParam(self): self.enable_trt = True self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False) - self.dynamic_shape_params = TensorRTSubgraphPassActivationTest.DynamicShapeParam( - {'data': [1, 6, 8, 8]}, {'data': [1, 6, 128, 128]}, - {'data': [1, 6, 64, 64]}, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False + ) + self.dynamic_shape_params = ( + TensorRTSubgraphPassActivationTest.DynamicShapeParam( + {'data': [1, 6, 8, 8]}, + {'data': [1, 6, 128, 128]}, + {'data': [1, 6, 64, 64]}, + False, + ) + ) def append_act(self, x): return fluid.layers.prelu(x, mode='all') class TensorRTSubgraphPassPreluFp16Test(TensorRTSubgraphPassActivationTest): - def setUpTensorRTParam(self): self.enable_trt = True self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Half, False, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Half, False, False + ) def append_act(self, x): return fluid.layers.prelu(x, mode='all') class TensorRTSubgraphPassPreluFp16SerializeTest( - TensorRTSubgraphPassActivationTest): - + TensorRTSubgraphPassActivationTest +): def setUpTensorRTParam(self): self.enable_trt = True self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Half, True, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Half, True, False + ) def append_act(self, x): return fluid.layers.prelu(x, mode='all') class TensorRTSubgraphPassPreluFp16DynamicTest( - TensorRTSubgraphPassActivationTest): - + TensorRTSubgraphPassActivationTest +): def setUpTensorRTParam(self): self.enable_trt = True self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Half, False, False) - self.dynamic_shape_params = TensorRTSubgraphPassActivationTest.DynamicShapeParam( - {'data': [1, 6, 8, 8]}, {'data': [1, 6, 128, 128]}, - {'data': [1, 6, 64, 64]}, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Half, False, False + ) + self.dynamic_shape_params = ( + TensorRTSubgraphPassActivationTest.DynamicShapeParam( + {'data': [1, 6, 8, 8]}, + {'data': [1, 6, 128, 128]}, + {'data': [1, 6, 64, 64]}, + False, + ) + ) def append_act(self, x): return fluid.layers.prelu(x, mode='all') class TensorRTSubgraphPassPreluFp16DynamicSerializeTest( - TensorRTSubgraphPassActivationTest): - + TensorRTSubgraphPassActivationTest +): def setUpTensorRTParam(self): self.enable_trt = True self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Half, True, False) - self.dynamic_shape_params = TensorRTSubgraphPassActivationTest.DynamicShapeParam( - {'data': [1, 6, 8, 8]}, {'data': [1, 6, 128, 128]}, - {'data': [1, 6, 64, 64]}, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Half, True, False + ) + self.dynamic_shape_params = ( + TensorRTSubgraphPassActivationTest.DynamicShapeParam( + {'data': [1, 6, 8, 8]}, + {'data': [1, 6, 128, 128]}, + {'data': [1, 6, 64, 64]}, + False, + ) + ) def append_act(self, x): return fluid.layers.prelu(x, mode='all') class TensorRTSubgraphPassGeluTest(TensorRTSubgraphPassActivationTest): - def append_act(self, x): return fluid.layers.gelu(x) class TensorRTSubgraphPassGeluDynamicTest(TensorRTSubgraphPassActivationTest): - def setUpTensorRTParam(self): self.enable_trt = True self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False) - self.dynamic_shape_params = TensorRTSubgraphPassActivationTest.DynamicShapeParam( - {'data': [1, 6, 8, 8]}, {'data': [1, 6, 128, 128]}, - {'data': [1, 6, 64, 64]}, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False + ) + self.dynamic_shape_params = ( + TensorRTSubgraphPassActivationTest.DynamicShapeParam( + {'data': [1, 6, 8, 8]}, + {'data': [1, 6, 128, 128]}, + {'data': [1, 6, 64, 64]}, + False, + ) + ) def append_act(self, x): return fluid.layers.gelu(x) class TensorRTSubgraphPassGeluFp16Test(TensorRTSubgraphPassActivationTest): - def setUpTensorRTParam(self): self.enable_trt = True self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Half, False, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Half, False, False + ) def append_act(self, x): return fluid.layers.gelu(x) class TensorRTSubgraphPassGeluFp16SerializeTest( - TensorRTSubgraphPassActivationTest): - + TensorRTSubgraphPassActivationTest +): def setUpTensorRTParam(self): self.enable_trt = True self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Half, True, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Half, True, False + ) def append_act(self, x): return fluid.layers.gelu(x) -class TensorRTSubgraphPassGeluFp16DynamicTest(TensorRTSubgraphPassActivationTest - ): - +class TensorRTSubgraphPassGeluFp16DynamicTest( + TensorRTSubgraphPassActivationTest +): def setUpTensorRTParam(self): self.enable_trt = True self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Half, False, False) - self.dynamic_shape_params = TensorRTSubgraphPassActivationTest.DynamicShapeParam( - {'data': [1, 6, 8, 8]}, {'data': [1, 6, 128, 128]}, - {'data': [1, 6, 64, 64]}, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Half, False, False + ) + self.dynamic_shape_params = ( + TensorRTSubgraphPassActivationTest.DynamicShapeParam( + {'data': [1, 6, 8, 8]}, + {'data': [1, 6, 128, 128]}, + {'data': [1, 6, 64, 64]}, + False, + ) + ) def append_act(self, x): return fluid.layers.gelu(x) class TensorRTSubgraphPassGeluFp16DynamicSerializeTest( - TensorRTSubgraphPassActivationTest): - + TensorRTSubgraphPassActivationTest +): def setUpTensorRTParam(self): self.enable_trt = True self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Half, True, False) - self.dynamic_shape_params = TensorRTSubgraphPassActivationTest.DynamicShapeParam( - {'data': [1, 6, 8, 8]}, {'data': [1, 6, 128, 128]}, - {'data': [1, 6, 64, 64]}, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Half, True, False + ) + self.dynamic_shape_params = ( + TensorRTSubgraphPassActivationTest.DynamicShapeParam( + {'data': [1, 6, 8, 8]}, + {'data': [1, 6, 128, 128]}, + {'data': [1, 6, 64, 64]}, + False, + ) + ) def append_act(self, x): return fluid.layers.gelu(x) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_affine_channel_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_affine_channel_op.py index eb2360668f562278c8002592935dc5dd3dd98865..69e73d4359697046f87bafc16e2cdfc4db59b2fb 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_affine_channel_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_affine_channel_op.py @@ -23,7 +23,6 @@ from paddle.fluid.core import AnalysisConfig class TRTAffineChannelTest(InferencePassTest): - def setUp(self): self.bs = 2 self.channel = 8 @@ -38,7 +37,8 @@ class TRTAffineChannelTest(InferencePassTest): # set min_graph_size to 2, # because affine channel doesn't support nhwc format self.trt_parameters = InferencePassTest.TensorRTParam( - 1 << 30, self.bs, 2, self.precision, self.serialize, False) + 1 << 30, self.bs, 2, self.precision, self.serialize, False + ) with fluid.program_guard(self.main_program, self.startup_program): if self.data_layout == 'NCHW': @@ -51,13 +51,16 @@ class TRTAffineChannelTest(InferencePassTest): scale = fluid.layers.create_parameter( shape=[self.channel], dtype='float32', - default_initializer=fluid.initializer.Constant(2.)) + default_initializer=fluid.initializer.Constant(2.0), + ) bias = fluid.layers.create_parameter( shape=[self.channel], dtype='float32', - default_initializer=fluid.initializer.Constant(.5)) + default_initializer=fluid.initializer.Constant(0.5), + ) affine_channel_out = fluid.layers.affine_channel( - data, scale=scale, bias=bias, data_layout=self.data_layout) + data, scale=scale, bias=bias, data_layout=self.data_layout + ) out = fluid.layers.batch_norm(affine_channel_out, is_test=True) shape[0] = self.bs @@ -74,7 +77,8 @@ class TRTAffineChannelTest(InferencePassTest): atol = 2e-2 self.check_output_with_option(use_gpu, atol, flatten=True) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) def run_test(self): self.build() @@ -82,30 +86,39 @@ class TRTAffineChannelTest(InferencePassTest): def run_test_all(self): precision_opt = [ - AnalysisConfig.Precision.Float32, AnalysisConfig.Precision.Half + AnalysisConfig.Precision.Float32, + AnalysisConfig.Precision.Half, ] serialize_opt = [False, True] if self.data_layout == 'NCHW': min_shape = [ - self.bs, self.channel, self.height // 2, self.width // 2 + self.bs, + self.channel, + self.height // 2, + self.width // 2, ] max_shape = [self.bs, self.channel, self.height * 2, self.width * 2] opt_shape = [self.bs, self.channel, self.height, self.width] if self.data_layout == 'NHWC': min_shape = [ - self.bs, self.height // 2, self.width // 2, self.channel + self.bs, + self.height // 2, + self.width // 2, + self.channel, ] max_shape = [self.bs, self.height * 2, self.width * 2, self.channel] opt_shape = [self.bs, self.height, self.width, self.channel] dynamic_shape_profile = InferencePassTest.DynamicShapeParam( - {'in': min_shape}, {'in': max_shape}, {'in': opt_shape}, False) + {'in': min_shape}, {'in': max_shape}, {'in': opt_shape}, False + ) dynamic_shape_opt = [None, dynamic_shape_profile] for precision, serialize, dynamic_shape in itertools.product( - precision_opt, serialize_opt, dynamic_shape_opt): + precision_opt, serialize_opt, dynamic_shape_opt + ): self.precision = precision self.serialize = serialize self.dynamic_shape_params = dynamic_shape @@ -126,7 +139,9 @@ class TRTAffineChannelTest(InferencePassTest): self.dynamic_shape_params = InferencePassTest.DynamicShapeParam( {'in': [self.bs, self.channel, self.height // 2, self.width // 2]}, {'in': [self.bs, self.channel, self.height * 2, self.width * 2]}, - {'in': [self.bs, self.channel, self.height, self.width]}, False) + {'in': [self.bs, self.channel, self.height, self.width]}, + False, + ) self.run_test() def test_nchw_all(self): diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_anchor_generator_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_anchor_generator_op.py index 7e22604a374a6b99a1a80e9fa4998896311cb631..cd05c8528bc1b318373bb645807ba00cab85bca0 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_anchor_generator_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_anchor_generator_op.py @@ -22,40 +22,47 @@ from paddle.fluid.core import AnalysisConfig class TRTAnchorGeneratorBaseTest(InferencePassTest): - def setUp(self): self.bs = 1 self.channel = 16 self.height = 32 self.width = 32 - self.anchor_sizes = [64., 128., 256., 512.] - self.aspect_ratios = [.5, 1., 2.] - self.variance = [.1, .1, .2, .2] - self.stride = [8., 8.] + self.anchor_sizes = [64.0, 128.0, 256.0, 512.0] + self.aspect_ratios = [0.5, 1.0, 2.0] + self.variance = [0.1, 0.1, 0.2, 0.2] + self.stride = [8.0, 8.0] self.precision = AnalysisConfig.Precision.Float32 self.serialize = False self.enable_trt = True self.feeds = { - 'data': - np.random.random([self.bs, self.channel, self.height, - self.width]).astype('float32'), + 'data': np.random.random( + [self.bs, self.channel, self.height, self.width] + ).astype('float32'), } def build(self): min_graph_size = 3 if self.dynamic_shape_params is not None else 2 self.trt_parameters = InferencePassTest.TensorRTParam( - 1 << 30, self.bs, min_graph_size, self.precision, self.serialize, - False) + 1 << 30, + self.bs, + min_graph_size, + self.precision, + self.serialize, + False, + ) with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name='data', - shape=[-1, self.channel, self.height, self.width], - dtype='float32') + data = fluid.data( + name='data', + shape=[-1, self.channel, self.height, self.width], + dtype='float32', + ) anchor, var = fluid.layers.detection.anchor_generator( data, anchor_sizes=self.anchor_sizes, aspect_ratios=self.aspect_ratios, variance=self.variance, - stride=self.stride) + stride=self.stride, + ) if self.dynamic_shape_params is not None: anchor = fluid.layers.transpose(anchor, [2, 3, 0, 1]) out = fluid.layers.batch_norm(anchor, is_test=True) @@ -69,10 +76,17 @@ class TRTAnchorGeneratorBaseTest(InferencePassTest): def set_dynamic(self): self.dynamic_shape_params = InferencePassTest.DynamicShapeParam( { - 'data': - [self.bs, self.channel, self.height // 2, self.width // 2] - }, {'data': [self.bs, self.channel, self.height, self.width]}, - {'data': [self.bs, self.channel, self.height, self.width]}, False) + 'data': [ + self.bs, + self.channel, + self.height // 2, + self.width // 2, + ] + }, + {'data': [self.bs, self.channel, self.height, self.width]}, + {'data': [self.bs, self.channel, self.height, self.width]}, + False, + ) def test_base(self): self.run_test() @@ -113,7 +127,8 @@ class TRTAnchorGeneratorBaseTest(InferencePassTest): atol = 1e-3 self.check_output_with_option(use_gpu, atol, flatten=True) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_c_allreduce_infer_script.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_c_allreduce_infer_script.py index e90157c928cd2a44e31de711e919e11f0d1c88d5..d2d34e34cbc99870d10d59362e8fc0df2e022045 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_c_allreduce_infer_script.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_c_allreduce_infer_script.py @@ -39,20 +39,25 @@ def run(op_type, precision): lod_level=data.lod_level, persistable=False, is_data=False, - initializer=paddle.nn.initializer.Constant(value=1.0)) - block.append_op(type=op_type, - inputs={'X': data}, - outputs={'Out': c_data}, - attrs={ - 'ring_id': 0, - 'use_calc_stream': True, - 'use_model_parallel': True - }) + initializer=paddle.nn.initializer.Constant(value=1.0), + ) + block.append_op( + type=op_type, + inputs={'X': data}, + outputs={'Out': c_data}, + attrs={ + 'ring_id': 0, + 'use_calc_stream': True, + 'use_model_parallel': True, + }, + ) out = paddle.static.nn.fc( x=c_data, size=1, weight_attr=paddle.ParamAttr( - initializer=paddle.nn.initializer.Constant(value=0.5))) + initializer=paddle.nn.initializer.Constant(value=0.5) + ), + ) mean = paddle.mean(out) exe = paddle.static.Executor(paddle.CPUPlace()) exe.run(startup_program) @@ -68,12 +73,17 @@ def run(op_type, precision): dist_config.enable_dist_model(True) with tempfile.TemporaryDirectory(prefix="allreduce_") as tmpdir: - paddle.static.save_inference_model(os.path.join(tmpdir, "model"), - [data], [mean], - exe, - program=main_program) - config = Config(os.path.join(tmpdir, "model.pdmodel"), - os.path.join(tmpdir, "model.pdiparams")) + paddle.static.save_inference_model( + os.path.join(tmpdir, "model"), + [data], + [mean], + exe, + program=main_program, + ) + config = Config( + os.path.join(tmpdir, "model.pdmodel"), + os.path.join(tmpdir, "model.pdiparams"), + ) config.enable_memory_optim() config.enable_use_gpu(1000, fleet.worker_index()) config.set_dist_config(dist_config) @@ -82,11 +92,14 @@ def run(op_type, precision): max_batch_size=1, min_subgraph_size=1, precision_mode=PrecisionType.Half - if precision == "fp16" else PrecisionType.Int8, + if precision == "fp16" + else PrecisionType.Int8, use_static=False, - use_calib_mode=False) - config.set_trt_dynamic_shape_info({"data": [3, 4]}, {"data": [3, 4]}, - {"data": [3, 4]}) + use_calib_mode=False, + ) + config.set_trt_dynamic_shape_info( + {"data": [3, 4]}, {"data": [3, 4]}, {"data": [3, 4]} + ) predictor = create_predictor(config) input_names = predictor.get_input_names() input_tensor = predictor.get_input_handle("data") diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv3d_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv3d_op.py index 088b548f0184a0da9ebf1c57925cb02b868a405b..70e62f9eaf6c7643c7cb1b32b32639fd59875502 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv3d_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv3d_op.py @@ -22,29 +22,31 @@ from paddle.fluid.core import AnalysisConfig class TensorRTSubgraphPassConv3dTest(InferencePassTest): - def setUp(self): self.init_params() self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 3, 6, 32, 32], - dtype="float32") - conv_out = fluid.layers.conv3d(input=data, - num_filters=self.conv_num_filters, - filter_size=self.conv_filter_size, - groups=self.conv_groups, - padding=self.conv_padding, - bias_attr=False, - use_cudnn=self.use_cudnn, - stride=self.stride, - act=None) + data = fluid.data( + name="data", shape=[-1, 3, 6, 32, 32], dtype="float32" + ) + conv_out = fluid.layers.conv3d( + input=data, + num_filters=self.conv_num_filters, + filter_size=self.conv_filter_size, + groups=self.conv_groups, + padding=self.conv_padding, + bias_attr=False, + use_cudnn=self.use_cudnn, + stride=self.stride, + act=None, + ) self.feeds = { "data": np.random.random([1, 3, 6, 32, 32]).astype("float32"), } self.enable_trt = True self.trt_parameters = TensorRTSubgraphPassConv3dTest.TensorRTParam( - 1 << 30, 32, 1, self.precision, self.use_static, False) + 1 << 30, 32, 1, self.precision, self.use_static, False + ) self.fetch_list = [conv_out] def init_params(self): @@ -65,12 +67,13 @@ class TensorRTSubgraphPassConv3dTest(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) - + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) -class TensorRTSubgraphPassConv3dValidPaddingTest(TensorRTSubgraphPassConv3dTest - ): +class TensorRTSubgraphPassConv3dValidPaddingTest( + TensorRTSubgraphPassConv3dTest +): def set_params(self): self.conv_num_filters = 6 self.conv_filter_size = 6 @@ -79,7 +82,6 @@ class TensorRTSubgraphPassConv3dValidPaddingTest(TensorRTSubgraphPassConv3dTest class TensorRTSubgraphPassConv3dSamePaddingTest(TensorRTSubgraphPassConv3dTest): - def set_params(self): self.conv_num_filters = 6 self.conv_filter_size = 6 @@ -88,7 +90,6 @@ class TensorRTSubgraphPassConv3dSamePaddingTest(TensorRTSubgraphPassConv3dTest): class TensorRTSubgraphPassConv3dPaddingTest(TensorRTSubgraphPassConv3dTest): - def set_params(self): self.conv_num_filters = 6 self.conv_filter_size = 6 @@ -97,7 +98,6 @@ class TensorRTSubgraphPassConv3dPaddingTest(TensorRTSubgraphPassConv3dTest): class TensorRTSubgraphPassConv3dStrideTest(TensorRTSubgraphPassConv3dTest): - def set_params(self): self.conv_num_filters = 6 self.conv_filter_size = 6 @@ -107,39 +107,49 @@ class TensorRTSubgraphPassConv3dStrideTest(TensorRTSubgraphPassConv3dTest): class DynamicShapeTensorRTSubgraphPassConv3dTest(InferencePassTest): - def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 6, -1, -1, -1], - dtype="float32") - conv_out = fluid.layers.conv3d(input=data, - num_filters=self.conv_num_filters, - filter_size=self.conv_filter_size, - groups=self.conv_groups, - padding=self.conv_padding, - bias_attr=False, - use_cudnn=self.use_cudnn, - stride=self.stride, - act=None) + data = fluid.data( + name="data", shape=[-1, 6, -1, -1, -1], dtype="float32" + ) + conv_out = fluid.layers.conv3d( + input=data, + num_filters=self.conv_num_filters, + filter_size=self.conv_filter_size, + groups=self.conv_groups, + padding=self.conv_padding, + bias_attr=False, + use_cudnn=self.use_cudnn, + stride=self.stride, + act=None, + ) self.feeds = { "data": np.random.random([1, 6, 32, 32, 8]).astype("float32"), } self.enable_trt = True - self.trt_parameters = DynamicShapeTensorRTSubgraphPassConv3dTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False) - self.dynamic_shape_params = DynamicShapeTensorRTSubgraphPassConv3dTest.DynamicShapeParam( - { - "data": [1, 6, 8, 8, 8], - "conv3d_0.tmp_0": [1, 6, 8, 8, 4], - }, { - "data": [32, 6, 32, 32, 8], - "conv3d_0.tmp_0": [32, 6, 32, 32, 8], - }, { - "data": [16, 6, 16, 16, 8], - "conv3d_0.tmp_0": [16, 6, 16, 16, 8], - }, False) + self.trt_parameters = ( + DynamicShapeTensorRTSubgraphPassConv3dTest.TensorRTParam( + 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False + ) + ) + self.dynamic_shape_params = ( + DynamicShapeTensorRTSubgraphPassConv3dTest.DynamicShapeParam( + { + "data": [1, 6, 8, 8, 8], + "conv3d_0.tmp_0": [1, 6, 8, 8, 4], + }, + { + "data": [32, 6, 32, 32, 8], + "conv3d_0.tmp_0": [32, 6, 32, 32, 8], + }, + { + "data": [16, 6, 16, 16, 8], + "conv3d_0.tmp_0": [16, 6, 16, 16, 8], + }, + False, + ) + ) self.fetch_list = [conv_out] def set_params(self): @@ -155,7 +165,8 @@ class DynamicShapeTensorRTSubgraphPassConv3dTest(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv3d_transpose_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv3d_transpose_op.py index 0cd4bd1a8e98f9db4d061783297da1579be26778..9beabe55052307616e123ef6752e04cd940bb379 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv3d_transpose_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv3d_transpose_op.py @@ -22,13 +22,12 @@ from paddle.fluid.core import AnalysisConfig class TensorRTSubgraphPassConv3dTransposeTest(InferencePassTest): - def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 4, 4, 32, 32], - dtype="float32") + data = fluid.data( + name="data", shape=[-1, 4, 4, 32, 32], dtype="float32" + ) conv_out = fluid.layers.conv3d_transpose( input=data, num_filters=self.conv_num_filters, @@ -38,13 +37,17 @@ class TensorRTSubgraphPassConv3dTransposeTest(InferencePassTest): bias_attr=False, use_cudnn=self.use_cudnn, stride=1, - act=None) + act=None, + ) self.feeds = { "data": np.random.random([1, 4, 4, 32, 32]).astype("float32"), } self.enable_trt = True - self.trt_parameters = TensorRTSubgraphPassConv3dTransposeTest.TensorRTParam( - 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False) + self.trt_parameters = ( + TensorRTSubgraphPassConv3dTransposeTest.TensorRTParam( + 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False + ) + ) self.fetch_list = [conv_out] def set_params(self): @@ -59,12 +62,13 @@ class TensorRTSubgraphPassConv3dTransposeTest(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TensorRTSubgraphPassConv3dTransposeSamePaddingTest( - TensorRTSubgraphPassConv3dTransposeTest): - + TensorRTSubgraphPassConv3dTransposeTest +): def set_params(self): self.conv_num_filters = 6 self.conv_filter_size = 6 @@ -74,8 +78,8 @@ class TensorRTSubgraphPassConv3dTransposeSamePaddingTest( class TensorRTSubgraphPassConv3dTransposeMultigroupTest( - TensorRTSubgraphPassConv3dTransposeTest): - + TensorRTSubgraphPassConv3dTransposeTest +): def set_params(self): self.conv_num_filters = 6 self.conv_filter_size = 6 @@ -85,13 +89,12 @@ class TensorRTSubgraphPassConv3dTransposeMultigroupTest( class DynamicShapeTensorRTSubgraphPassConv3dTransposeTest(InferencePassTest): - def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 6, -1, -1, -1], - dtype="float32") + data = fluid.data( + name="data", shape=[-1, 6, -1, -1, -1], dtype="float32" + ) conv_out = fluid.layers.conv3d_transpose( input=data, num_filters=self.conv_num_filters, @@ -101,24 +104,32 @@ class DynamicShapeTensorRTSubgraphPassConv3dTransposeTest(InferencePassTest): bias_attr=False, use_cudnn=self.use_cudnn, stride=self.stride, - act=None) + act=None, + ) self.feeds = { "data": np.random.random([1, 6, 32, 32, 8]).astype("float32"), } self.enable_trt = True - self.trt_parameters = DynamicShapeTensorRTSubgraphPassConv3dTransposeTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False) + self.trt_parameters = ( + DynamicShapeTensorRTSubgraphPassConv3dTransposeTest.TensorRTParam( + 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False + ) + ) self.dynamic_shape_params = DynamicShapeTensorRTSubgraphPassConv3dTransposeTest.DynamicShapeParam( { "data": [1, 6, 8, 8, 8], "conv3d_transpose_0.tmp_0": [1, 6, 8, 8, 1], - }, { + }, + { "data": [32, 6, 32, 32, 8], "conv3d_transpose_0.tmp_0": [32, 6, 64, 64, 16], - }, { + }, + { "data": [16, 6, 16, 16, 8], "conv3d_transpose_0.tmp_0": [16, 6, 16, 16, 8], - }, False) + }, + False, + ) self.fetch_list = [conv_out] def set_params(self): @@ -134,7 +145,8 @@ class DynamicShapeTensorRTSubgraphPassConv3dTransposeTest(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv_pass.py index 5e163bce4073a34d7855d7d6c203eb44eba5c1d5..90dbed96f90661271bfabd883f763b3e37c265f4 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv_pass.py @@ -25,27 +25,29 @@ os.environ['NVIDIA_TF32_OVERRIDE'] = '0' class TensorRTSubgraphPassConvTest(InferencePassTest): - def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 6, 64, 64], - dtype="float32") - conv_out = fluid.layers.conv2d(input=data, - num_filters=self.conv_num_filters, - filter_size=self.conv_filter_size, - groups=self.conv_groups, - padding=self.conv_padding, - bias_attr=False, - use_cudnn=self.use_cudnn, - act=None) + data = fluid.data( + name="data", shape=[-1, 6, 64, 64], dtype="float32" + ) + conv_out = fluid.layers.conv2d( + input=data, + num_filters=self.conv_num_filters, + filter_size=self.conv_filter_size, + groups=self.conv_groups, + padding=self.conv_padding, + bias_attr=False, + use_cudnn=self.use_cudnn, + act=None, + ) self.feeds = { "data": np.random.random([1, 6, 64, 64]).astype("float32"), } self.enable_trt = True self.trt_parameters = TensorRTSubgraphPassConvTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [conv_out] def set_params(self): @@ -60,11 +62,11 @@ class TensorRTSubgraphPassConvTest(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TensorRTSubgraphPassConvValidPaddingTest(TensorRTSubgraphPassConvTest): - def set_params(self): self.conv_num_filters = 6 self.conv_filter_size = 6 @@ -74,7 +76,6 @@ class TensorRTSubgraphPassConvValidPaddingTest(TensorRTSubgraphPassConvTest): class TensorRTSubgraphPassConvSamePaddingTest(InferencePassTest): - def set_params(self): self.conv_num_filters = 6 self.conv_filter_size = 6 @@ -84,7 +85,6 @@ class TensorRTSubgraphPassConvSamePaddingTest(InferencePassTest): class TensorRTSubgraphPassDepthwiseConvTest(TensorRTSubgraphPassConvTest): - def set_params(self): self.conv_num_filters = 6 self.conv_filter_size = 6 @@ -94,7 +94,6 @@ class TensorRTSubgraphPassDepthwiseConvTest(TensorRTSubgraphPassConvTest): class TensorRTSubgraphPassDepthwiseConv2Test(TensorRTSubgraphPassConvTest): - def set_params(self): self.conv_num_filters = 12 self.conv_filter_size = 6 @@ -104,13 +103,12 @@ class TensorRTSubgraphPassDepthwiseConv2Test(TensorRTSubgraphPassConvTest): class TensorRTSubgraphPassConvTransposeTest(InferencePassTest): - def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 6, 64, 64], - dtype="float32") + data = fluid.data( + name="data", shape=[-1, 6, 64, 64], dtype="float32" + ) conv_out = fluid.layers.conv2d_transpose( input=data, num_filters=self.conv_num_filters, @@ -119,13 +117,17 @@ class TensorRTSubgraphPassConvTransposeTest(InferencePassTest): padding=self.conv_padding, bias_attr=False, use_cudnn=self.use_cudnn, - act=None) + act=None, + ) self.feeds = { "data": np.random.random([1, 6, 64, 64]).astype("float32"), } self.enable_trt = True - self.trt_parameters = TensorRTSubgraphPassConvTransposeTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False) + self.trt_parameters = ( + TensorRTSubgraphPassConvTransposeTest.TensorRTParam( + 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False + ) + ) self.fetch_list = [conv_out] def set_params(self): @@ -140,12 +142,13 @@ class TensorRTSubgraphPassConvTransposeTest(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TensorRTSubgraphPassConvTransposeValidPaddingTest( - TensorRTSubgraphPassConvTransposeTest): - + TensorRTSubgraphPassConvTransposeTest +): def set_params(self): self.conv_num_filters = 6 self.conv_filter_size = 6 @@ -155,8 +158,8 @@ class TensorRTSubgraphPassConvTransposeValidPaddingTest( class TensorRTSubgraphPassConvTransposeSamePaddingTest( - TensorRTSubgraphPassConvTransposeTest): - + TensorRTSubgraphPassConvTransposeTest +): def set_params(self): self.conv_num_filters = 6 self.conv_filter_size = 6 @@ -166,8 +169,8 @@ class TensorRTSubgraphPassConvTransposeSamePaddingTest( class TensorRTSubgraphPassConvTransposeMultiGroupTest( - TensorRTSubgraphPassConvTransposeTest): - + TensorRTSubgraphPassConvTransposeTest +): def set_params(self): self.conv_num_filters = 6 self.conv_filter_size = 6 @@ -177,8 +180,8 @@ class TensorRTSubgraphPassConvTransposeMultiGroupTest( class TensorRTSubgraphPassConvTranspose2Test( - TensorRTSubgraphPassConvTransposeTest): - + TensorRTSubgraphPassConvTransposeTest +): def set_params(self): self.conv_num_filters = 12 self.conv_filter_size = 4 @@ -188,8 +191,8 @@ class TensorRTSubgraphPassConvTranspose2Test( class TensorRTSubgraphPassDepthwiseConvTransposeTest( - TensorRTSubgraphPassConvTransposeTest): - + TensorRTSubgraphPassConvTransposeTest +): def set_params(self): self.conv_num_filters = 6 self.conv_filter_size = 4 @@ -199,42 +202,52 @@ class TensorRTSubgraphPassDepthwiseConvTransposeTest( class DynamicShapeTensorRTSubgraphPassConvTest(InferencePassTest): - def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 6, -1, -1], - dtype="float32") - conv_out = fluid.layers.conv2d(input=data, - num_filters=self.conv_num_filters, - filter_size=self.conv_filter_size, - groups=self.conv_groups, - padding=self.conv_padding, - bias_attr=False, - use_cudnn=self.use_cudnn, - stride=self.stride, - act=None) + data = fluid.data( + name="data", shape=[-1, 6, -1, -1], dtype="float32" + ) + conv_out = fluid.layers.conv2d( + input=data, + num_filters=self.conv_num_filters, + filter_size=self.conv_filter_size, + groups=self.conv_groups, + padding=self.conv_padding, + bias_attr=False, + use_cudnn=self.use_cudnn, + stride=self.stride, + act=None, + ) self.feeds = { "data": np.random.random([32, 6, 64, 64]).astype("float32"), } self.enable_trt = True - self.trt_parameters = DynamicShapeTensorRTSubgraphPassConvTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False) - self.dynamic_shape_params = DynamicShapeTensorRTSubgraphPassConvTest.DynamicShapeParam( - { - "conv2d_0.tmp_0": [1, 6, 8, 8], - "data": [1, 6, 8, 8], - "depthwise_conv2d_0.tmp_0": [1, 6, 8, 8] - }, { - "conv2d_0.tmp_0": [32, 6, 64, 64], - "data": [32, 6, 64, 64], - "depthwise_conv2d_0.tmp_0": [32, 6, 64, 64] - }, { - "conv2d_0.tmp_0": [16, 6, 16, 16], - "data": [16, 6, 16, 16], - "depthwise_conv2d_0.tmp_0": [16, 6, 16, 16] - }, False) + self.trt_parameters = ( + DynamicShapeTensorRTSubgraphPassConvTest.TensorRTParam( + 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False + ) + ) + self.dynamic_shape_params = ( + DynamicShapeTensorRTSubgraphPassConvTest.DynamicShapeParam( + { + "conv2d_0.tmp_0": [1, 6, 8, 8], + "data": [1, 6, 8, 8], + "depthwise_conv2d_0.tmp_0": [1, 6, 8, 8], + }, + { + "conv2d_0.tmp_0": [32, 6, 64, 64], + "data": [32, 6, 64, 64], + "depthwise_conv2d_0.tmp_0": [32, 6, 64, 64], + }, + { + "conv2d_0.tmp_0": [16, 6, 16, 16], + "data": [16, 6, 16, 16], + "depthwise_conv2d_0.tmp_0": [16, 6, 16, 16], + }, + False, + ) + ) self.fetch_list = [conv_out] def set_params(self): @@ -250,12 +263,13 @@ class DynamicShapeTensorRTSubgraphPassConvTest(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class DynamicShapeTensorRTSubgraphPassDepthwiseConvTransposeTest( - DynamicShapeTensorRTSubgraphPassConvTest): - + DynamicShapeTensorRTSubgraphPassConvTest +): def set_params(self): self.conv_num_filters = 6 self.conv_filter_size = 6 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv_quant_dequant_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv_quant_dequant_pass.py index fbaf6944be983afe039e3f74c36d82a64a928ec5..f19de2a3bb372bd31e3cc9fab516f76a21a8c2ec 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv_quant_dequant_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv_quant_dequant_pass.py @@ -23,25 +23,26 @@ from paddle.fluid.core import AnalysisConfig class QuantDequantTensorRTSubgraphPassConvTest(QuantDequantTest): - def setUp(self): self.set_params() def network(): - self.data = fluid.data(name='data', - shape=[1, 28, 28], - dtype='float32') + self.data = fluid.data( + name='data', shape=[1, 28, 28], dtype='float32' + ) data_reshape = fluid.layers.reshape(self.data, shape=[1, 4, 14, 14]) self.label = fluid.data(name='label', shape=[1, 1], dtype='int64') label_shape = fluid.layers.reshape(self.label, shape=[1, 1, 1]) - conv_out = fluid.layers.conv2d(input=data_reshape, - num_filters=self.conv_num_filters, - filter_size=self.conv_filter_size, - groups=self.conv_groups, - padding=self.conv_padding, - bias_attr=False, - use_cudnn=self.use_cudnn, - act=None) + conv_out = fluid.layers.conv2d( + input=data_reshape, + num_filters=self.conv_num_filters, + filter_size=self.conv_filter_size, + groups=self.conv_groups, + padding=self.conv_padding, + bias_attr=False, + use_cudnn=self.use_cudnn, + act=None, + ) if self.conv_padding == [1, 1]: cout = fluid.layers.reshape(conv_out, shape=[1, 1, 10816]) elif self.conv_padding == 'VALID': @@ -58,21 +59,25 @@ class QuantDequantTensorRTSubgraphPassConvTest(QuantDequantTest): self.main_program.random_seed = 2 self.startup_program.random_seed = 2 self.test_main_program.random_seed = 2 - #self.test_startup_program.random_seed = 2 + # self.test_startup_program.random_seed = 2 with fluid.unique_name.guard(): with fluid.program_guard(self.main_program, self.startup_program): self.loss, result = network() opt = fluid.optimizer.Adam(learning_rate=0.0001) opt.minimize(self.loss) with fluid.unique_name.guard(): - with fluid.program_guard(self.test_main_program, - self.startup_program): + with fluid.program_guard( + self.test_main_program, self.startup_program + ): network() self.feeds = {"data": np.random.random([1, 28, 28]).astype("float32")} self.fetch_list = [result] self.enable_trt = True - self.trt_parameters = QuantDequantTensorRTSubgraphPassConvTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Int8, False, False) + self.trt_parameters = ( + QuantDequantTensorRTSubgraphPassConvTest.TensorRTParam( + 1 << 30, 32, 0, AnalysisConfig.Precision.Int8, False, False + ) + ) self.activation_quantize_type = 'moving_average_abs_max' self.weight_quantize_type = 'channel_wise_abs_max' @@ -86,17 +91,17 @@ class QuantDequantTensorRTSubgraphPassConvTest(QuantDequantTest): def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True - self.check_output_with_option(use_gpu, - atol=1e-1, - flatten=False, - rtol=1e-1) + self.check_output_with_option( + use_gpu, atol=1e-1, flatten=False, rtol=1e-1 + ) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class QuantDequantTensorRTSubgraphPassConvValidPaddingTest( - QuantDequantTensorRTSubgraphPassConvTest): - + QuantDequantTensorRTSubgraphPassConvTest +): def set_params(self): self.conv_num_filters = 64 self.conv_filter_size = 4 @@ -106,8 +111,8 @@ class QuantDequantTensorRTSubgraphPassConvValidPaddingTest( class QuantDequantTensorRTSubgraphPassConvSamePaddingTest( - QuantDequantTensorRTSubgraphPassConvTest): - + QuantDequantTensorRTSubgraphPassConvTest +): def set_params(self): self.conv_num_filters = 64 self.conv_filter_size = 4 @@ -117,8 +122,8 @@ class QuantDequantTensorRTSubgraphPassConvSamePaddingTest( class QuantDequantTensorRTSubgraphPassDWConvTest( - QuantDequantTensorRTSubgraphPassConvTest): - + QuantDequantTensorRTSubgraphPassConvTest +): def set_params(self): self.conv_num_filters = 64 self.conv_filter_size = 4 @@ -128,25 +133,26 @@ class QuantDequantTensorRTSubgraphPassDWConvTest( class DynamicShapeQuantDequantTensorRTSubgraphPassConvTest(QuantDequantTest): - def setUp(self): self.set_params() def network(): - self.data = fluid.data(name='data', - shape=[1, 28, 28], - dtype='float32') + self.data = fluid.data( + name='data', shape=[1, 28, 28], dtype='float32' + ) data_reshape = fluid.layers.reshape(self.data, shape=[1, 4, 14, 14]) self.label = fluid.data(name='label', shape=[1, 1], dtype='int64') label_shape = fluid.layers.reshape(self.label, shape=[1, 1, 1]) - conv_out = fluid.layers.conv2d(input=data_reshape, - num_filters=self.conv_num_filters, - filter_size=self.conv_filter_size, - groups=self.conv_groups, - padding=self.conv_padding, - bias_attr=False, - use_cudnn=self.use_cudnn, - act=None) + conv_out = fluid.layers.conv2d( + input=data_reshape, + num_filters=self.conv_num_filters, + filter_size=self.conv_filter_size, + groups=self.conv_groups, + padding=self.conv_padding, + bias_attr=False, + use_cudnn=self.use_cudnn, + act=None, + ) cout = fluid.layers.reshape(conv_out, shape=[1, 1, 10816]) result = fluid.layers.relu(cout) loss = fluid.layers.cross_entropy(input=result, label=label_shape) @@ -156,41 +162,49 @@ class DynamicShapeQuantDequantTensorRTSubgraphPassConvTest(QuantDequantTest): self.main_program.random_seed = 2 self.startup_program.random_seed = 2 self.test_main_program.random_seed = 2 - #self.test_startup_program.random_seed = 2 + # self.test_startup_program.random_seed = 2 with fluid.unique_name.guard(): with fluid.program_guard(self.main_program, self.startup_program): self.loss, result = network() opt = fluid.optimizer.Adam(learning_rate=0.0001) opt.minimize(self.loss) with fluid.unique_name.guard(): - with fluid.program_guard(self.test_main_program, - self.startup_program): + with fluid.program_guard( + self.test_main_program, self.startup_program + ): network() self.feeds = {"data": np.random.random([1, 28, 28]).astype("float32")} self.fetch_list = [result] self.enable_trt = True - self.trt_parameters = DynamicShapeQuantDequantTensorRTSubgraphPassConvTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Int8, False, False) + self.trt_parameters = ( + DynamicShapeQuantDequantTensorRTSubgraphPassConvTest.TensorRTParam( + 1 << 30, 32, 0, AnalysisConfig.Precision.Int8, False, False + ) + ) self.dynamic_shape_params = DynamicShapeQuantDequantTensorRTSubgraphPassConvTest.DynamicShapeParam( { "conv2d_0.tmp_0": [1, 4, 14, 14], "data": [1, 28, 28], "depthwise_conv2d_0.tmp_0": [1, 4, 14, 14], "reshape2_0.tmp_0": [1, 4, 14, 14], - "reshape2_2.tmp_0": [1, 1, 10816] - }, { + "reshape2_2.tmp_0": [1, 1, 10816], + }, + { "conv2d_0.tmp_0": [4, 4, 14, 14], "data": [4, 28, 28], "depthwise_conv2d_0.tmp_0": [4, 4, 14, 14], "reshape2_0.tmp_0": [4, 4, 14, 14], - "reshape2_2.tmp_0": [1, 1, 43264] - }, { + "reshape2_2.tmp_0": [1, 1, 43264], + }, + { "conv2d_0.tmp_0": [1, 4, 14, 14], "data": [1, 28, 28], "depthwise_conv2d_0.tmp_0": [1, 4, 14, 14], "reshape2_0.tmp_0": [1, 4, 14, 14], - "reshape2_2.tmp_0": [1, 1, 10816] - }, False) + "reshape2_2.tmp_0": [1, 1, 10816], + }, + False, + ) self.activation_quantize_type = 'moving_average_abs_max' self.weight_quantize_type = 'channel_wise_abs_max' @@ -204,23 +218,22 @@ class DynamicShapeQuantDequantTensorRTSubgraphPassConvTest(QuantDequantTest): def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True - self.check_output_with_option(use_gpu, - atol=1e-1, - flatten=False, - rtol=1e-1) + self.check_output_with_option( + use_gpu, atol=1e-1, flatten=False, rtol=1e-1 + ) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class QuantDequantTensorRTSubgraphPassConvTransposeTest(QuantDequantTest): - def setUp(self): self.set_params() def network(): - self.data = fluid.data(name='data', - shape=[1, 28, 28], - dtype='float32') + self.data = fluid.data( + name='data', shape=[1, 28, 28], dtype='float32' + ) data_reshape = fluid.layers.reshape(self.data, shape=[1, 4, 14, 14]) self.label = fluid.data(name='label', shape=[1, 1], dtype='int64') label_shape = fluid.layers.reshape(self.label, shape=[1, 1, 1]) @@ -232,7 +245,8 @@ class QuantDequantTensorRTSubgraphPassConvTransposeTest(QuantDequantTest): padding=self.conv_padding, bias_attr=False, use_cudnn=self.use_cudnn, - act=None) + act=None, + ) if self.conv_padding == [1, 1]: cout = fluid.layers.reshape(conv_out, shape=[1, 1, 14400]) elif self.conv_padding == 'VALID': @@ -249,21 +263,25 @@ class QuantDequantTensorRTSubgraphPassConvTransposeTest(QuantDequantTest): self.main_program.random_seed = 2 self.startup_program.random_seed = 2 self.test_main_program.random_seed = 2 - #self.test_startup_program.random_seed = 2 + # self.test_startup_program.random_seed = 2 with fluid.unique_name.guard(): with fluid.program_guard(self.main_program, self.startup_program): self.loss, result = network() opt = fluid.optimizer.Adam(learning_rate=0.0001) opt.minimize(self.loss) with fluid.unique_name.guard(): - with fluid.program_guard(self.test_main_program, - self.startup_program): + with fluid.program_guard( + self.test_main_program, self.startup_program + ): network() self.feeds = {"data": np.random.random([1, 28, 28]).astype("float32")} self.fetch_list = [result] self.enable_trt = True - self.trt_parameters = QuantDequantTensorRTSubgraphPassConvTransposeTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Int8, False, False) + self.trt_parameters = ( + QuantDequantTensorRTSubgraphPassConvTransposeTest.TensorRTParam( + 1 << 30, 32, 0, AnalysisConfig.Precision.Int8, False, False + ) + ) self.activation_quantize_type = 'moving_average_abs_max' self.weight_quantize_type = 'channel_wise_abs_max' @@ -277,17 +295,17 @@ class QuantDequantTensorRTSubgraphPassConvTransposeTest(QuantDequantTest): def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True - self.check_output_with_option(use_gpu, - atol=1e-1, - flatten=False, - rtol=1e-1) + self.check_output_with_option( + use_gpu, atol=1e-1, flatten=False, rtol=1e-1 + ) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class QuantDequantTensorRTSubgraphPassConvTransValidPaddingTest( - QuantDequantTensorRTSubgraphPassConvTransposeTest): - + QuantDequantTensorRTSubgraphPassConvTransposeTest +): def set_params(self): self.conv_num_filters = 64 self.conv_filter_size = 4 @@ -297,8 +315,8 @@ class QuantDequantTensorRTSubgraphPassConvTransValidPaddingTest( class QuantDequantTensorRTSubgraphPassConvTransSamePaddingTest( - QuantDequantTensorRTSubgraphPassConvTransposeTest): - + QuantDequantTensorRTSubgraphPassConvTransposeTest +): def set_params(self): self.conv_num_filters = 64 self.conv_filter_size = 4 @@ -308,8 +326,8 @@ class QuantDequantTensorRTSubgraphPassConvTransSamePaddingTest( class QuantDequantTensorRTSubgraphPassTransDWConvTest( - QuantDequantTensorRTSubgraphPassConvTransposeTest): - + QuantDequantTensorRTSubgraphPassConvTransposeTest +): def set_params(self): self.conv_num_filters = 64 self.conv_filter_size = 4 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_activation.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_activation.py index 49367f78e2da4092f72c60656bcd73a6a5331655..c9bf048a044ba4a97f6747dcbd1d626f0d707b47 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_activation.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_activation.py @@ -22,12 +22,10 @@ from typing import Any, Dict, List class TrtConvertActivationTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(dims, batch, attrs: List[Dict[str, Any]]): if dims == 1: return np.random.random([32]).astype(np.float32) @@ -41,11 +39,19 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest): for dims in [1, 2, 3, 4]: for batch in [1, 4]: for op_type in [ - "relu", "sigmoid", "tanh", "relu6", "elu", "selu", - "softsign", "stanh", "thresholded_relu", "softplus" + "relu", + "sigmoid", + "tanh", + "relu6", + "elu", + "selu", + "softsign", + "stanh", + "thresholded_relu", + "softplus", ]: # few samples to reduce time - #for beta in [-0.2, 0.5, 0.67, 3]: + # for beta in [-0.2, 0.5, 0.67, 3]: # for alpha in [-0.2, 0.5, 0.67, 3]: for beta in [0.67]: for alpha in [0.67]: @@ -62,33 +68,34 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest): if op_type == "softplus": dics = [{"beta": beta}] - ops_config = [{ - "op_type": op_type, - "op_inputs": { - "X": ["input_data"] - }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": dics[0] - }] + ops_config = [ + { + "op_type": op_type, + "op_inputs": {"X": ["input_data"]}, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data": - TensorConfig(data_gen=partial( - generate_input1, dims, batch, dics)) + "input_data": TensorConfig( + data_gen=partial( + generate_input1, dims, batch, dics + ) + ) }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): if self.dims == 1: self.dynamic_shape.min_input_shape = {"input_data": [1]} @@ -131,19 +138,23 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-3 + attrs, False + ), 1e-3 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-3 + attrs, True + ), 1e-3 def test(self): self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_affine_channel.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_affine_channel.py index 34b033b4b7c8070e2788ca7650d8a35cf53ca7ab..a9ba3b11f81551a3acbfe30d7a105a94b9aa1ec4 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_affine_channel.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_affine_channel.py @@ -22,12 +22,10 @@ from typing import Any, Dict, List class TrtConvertAffineChannelTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(batch, dims, attrs: List[Dict[str, Any]]): if dims == 2: return np.ones([batch, 64]).astype(np.float32) @@ -50,42 +48,45 @@ class TrtConvertAffineChannelTest(TrtLayerAutoScanTest): self.dims = dims dics = [{"data_layout": data_layout}] - ops_config = [{ - "op_type": "affine_channel", - "op_inputs": { - "X": ["input_data"], - "Scale": ["scale"], - "Bias": ["bias"] - }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": dics[0] - }] + ops_config = [ + { + "op_type": "affine_channel", + "op_inputs": { + "X": ["input_data"], + "Scale": ["scale"], + "Bias": ["bias"], + }, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={ - "scale": - TensorConfig( - data_gen=partial(generate_weight1, dims, dics)), - "bias": - TensorConfig( - data_gen=partial(generate_weight1, dims, dics)) + "scale": TensorConfig( + data_gen=partial(generate_weight1, dims, dics) + ), + "bias": TensorConfig( + data_gen=partial(generate_weight1, dims, dics) + ), }, inputs={ - "input_data": - TensorConfig(data_gen=partial( - generate_input1, batch, dims, dics)) + "input_data": TensorConfig( + data_gen=partial( + generate_input1, batch, dims, dics + ) + ) }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): if self.dims == 2: self.dynamic_shape.min_input_shape = {"input_data": [1, 32]} @@ -132,19 +133,23 @@ class TrtConvertAffineChannelTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-3, 1e-3) + attrs, False + ), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-3, 1e-3) + attrs, True + ), (1e-3, 1e-3) def test(self): self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_anchor_generator.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_anchor_generator.py index b3e58c204f3222adb3ce47949cfa374a12b97f94..32cb6b64514d09227648a4e12faacc6ea6d1c72c 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_anchor_generator.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_anchor_generator.py @@ -22,60 +22,66 @@ from typing import Any, Dict, List class TrtConvertAnchorGeneratorTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(batch, attrs: List[Dict[str, Any]]): return np.random.random([batch, 3, 64, 64]).astype(np.float32) for batch in [1, 2, 4]: for anchor_sizes in [[64.0, 128.0, 256.0, 512.0]]: for aspect_ratios in [[0.5, 1, 2], [0.4, 1.2, 3]]: - for variances in [[1.0, 1.0, 1.0, 1.0], - [0.5, 1.0, 0.5, 1.0]]: + for variances in [ + [1.0, 1.0, 1.0, 1.0], + [0.5, 1.0, 0.5, 1.0], + ]: for stride in [[16.0, 16.0], [16.0, 32.0]]: for offset in [0.5, 0.8]: - dics = [{ - "anchor_sizes": anchor_sizes, - "aspect_ratios": aspect_ratios, - "variances": variances, - "stride": stride, - "offset": offset - }] - - ops_config = [{ - "op_type": "anchor_generator", - "op_inputs": { - "Input": ["input_data"] - }, - "op_outputs": { - "Anchors": ["output_anchors"], - "Variances": ["output_variances"] - }, - "op_attrs": dics[0] - }] + dics = [ + { + "anchor_sizes": anchor_sizes, + "aspect_ratios": aspect_ratios, + "variances": variances, + "stride": stride, + "offset": offset, + } + ] + + ops_config = [ + { + "op_type": "anchor_generator", + "op_inputs": {"Input": ["input_data"]}, + "op_outputs": { + "Anchors": ["output_anchors"], + "Variances": ["output_variances"], + }, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data": - TensorConfig(data_gen=partial( - generate_input1, batch, dics)) + "input_data": TensorConfig( + data_gen=partial( + generate_input1, batch, dics + ) + ) }, outputs=[ - "output_anchors", "output_variances" - ]) + "output_anchors", + "output_variances", + ], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = {"input_data": [1, 3, 32, 32]} self.dynamic_shape.max_input_shape = {"input_data": [4, 3, 64, 64]} @@ -100,19 +106,23 @@ class TrtConvertAnchorGeneratorTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-3 + attrs, False + ), 1e-3 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-3 + attrs, True + ), 1e-3 def test(self): self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_arg_max.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_arg_max.py index 3d8d492e89124d317c4d753e24cf8f1d8166257b..a19132571468a0214735107d9ac583c2571b09cd 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_arg_max.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_arg_max.py @@ -22,7 +22,6 @@ from typing import List class TrtConvertArgMaxTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: input_shape = program_config.inputs["arg_max_input"].shape axis = program_config.ops[0].attrs["axis"] @@ -33,7 +32,6 @@ class TrtConvertArgMaxTest(TrtLayerAutoScanTest): return True def sample_program_configs(self): - def generate_input(rank, batch): dims = [batch] for i in range(rank - 1): @@ -48,36 +46,37 @@ class TrtConvertArgMaxTest(TrtLayerAutoScanTest): self.rank = rank flatten = False dtype = 2 - ops_config = [{ - "op_type": "arg_max", - "op_inputs": { - "X": ["arg_max_input"] - }, - "op_outputs": { - "Out": ["arg_max_out"] - }, - "op_attrs": { - "axis": axis, - "keepdims": keepdims, - "flatten": flatten, - "dtype": dtype + ops_config = [ + { + "op_type": "arg_max", + "op_inputs": {"X": ["arg_max_input"]}, + "op_outputs": {"Out": ["arg_max_out"]}, + "op_attrs": { + "axis": axis, + "keepdims": keepdims, + "flatten": flatten, + "dtype": dtype, + }, } - }] + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "arg_max_input": - TensorConfig(data_gen=partial( - generate_input, rank, batch)) + "arg_max_input": TensorConfig( + data_gen=partial( + generate_input, rank, batch + ) + ) }, - outputs=["arg_max_out"]) + outputs=["arg_max_out"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): if self.rank == 3: self.dynamic_shape.min_input_shape = { @@ -117,19 +116,23 @@ class TrtConvertArgMaxTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-3 + attrs, False + ), 1e-3 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-3 + attrs, True + ), 1e-3 def test(self): self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_batch_norm.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_batch_norm.py index 88d13c062a55de69be4fd211c6d3d8bdee88930a..806ead9bc1912ff8d24d1d58e3b5c8eafbd173f0 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_batch_norm.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_batch_norm.py @@ -22,12 +22,10 @@ from typing import Any, Dict, List class TrtConvertBatchNormTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(attrs: List[Dict[str, Any]], batch): if self.dims == 4: if attrs[0]['data_layout'] == "NCHW": @@ -62,88 +60,119 @@ class TrtConvertBatchNormTest(TrtLayerAutoScanTest): for momentum in [0.9, 0.8]: self.num_input = num_input self.dims = dims - dics = [{ - "epsilon": epsilon, - "data_layout": data_layout, - "momentum": momentum, - "is_test": True, - "trainable_statistics": False - }, {}] - dics_intput = [{ - "X": ["batch_norm_input"], - "Bias": ["Bias"], - "Mean": ["Mean"], - "Scale": ["Scale"], - "Variance": ["Variance"], - "MomentumTensor": ["MomentumTensor"] - }, { - "X": ["batch_norm_input"], - "Bias": ["Bias"], - "Mean": ["Mean"], - "Scale": ["Scale"], - "Variance": ["Variance"] - }] - dics_intputs = [{ - "Bias": - TensorConfig(data_gen=partial( - generate_bias, dics, batch)), - "Mean": - TensorConfig(data_gen=partial( - generate_mean, dics, batch)), - "Scale": - TensorConfig(data_gen=partial( - generate_scale, dics, batch)), - "Variance": - TensorConfig(data_gen=partial( - generate_variance, dics, batch)), - "MomentumTensor": - TensorConfig(data_gen=partial( - generate_MomentumTensor, dics, batch)), - }, { - "Bias": - TensorConfig(data_gen=partial( - generate_bias, dics, batch)), - "Mean": - TensorConfig(data_gen=partial( - generate_mean, dics, batch)), - "Scale": - TensorConfig(data_gen=partial( - generate_scale, dics, batch)), - "Variance": - TensorConfig(data_gen=partial( - generate_variance, dics, batch)) - }] - ops_config = [{ - "op_type": - "batch_norm", - "op_inputs": - dics_intput[num_input], - "op_outputs": { - "Y": ["batch_norm_out"], - "MeanOut": ["Mean"], - "VarianceOut": ["Variance"], - "SavedMean": ["SavedMean"], - "SavedVariance": ["SavedVariance"] + dics = [ + { + "epsilon": epsilon, + "data_layout": data_layout, + "momentum": momentum, + "is_test": True, + "trainable_statistics": False, + }, + {}, + ] + dics_intput = [ + { + "X": ["batch_norm_input"], + "Bias": ["Bias"], + "Mean": ["Mean"], + "Scale": ["Scale"], + "Variance": ["Variance"], + "MomentumTensor": ["MomentumTensor"], + }, + { + "X": ["batch_norm_input"], + "Bias": ["Bias"], + "Mean": ["Mean"], + "Scale": ["Scale"], + "Variance": ["Variance"], }, - "op_attrs": - dics[0] - }] + ] + dics_intputs = [ + { + "Bias": TensorConfig( + data_gen=partial( + generate_bias, dics, batch + ) + ), + "Mean": TensorConfig( + data_gen=partial( + generate_mean, dics, batch + ) + ), + "Scale": TensorConfig( + data_gen=partial( + generate_scale, dics, batch + ) + ), + "Variance": TensorConfig( + data_gen=partial( + generate_variance, dics, batch + ) + ), + "MomentumTensor": TensorConfig( + data_gen=partial( + generate_MomentumTensor, + dics, + batch, + ) + ), + }, + { + "Bias": TensorConfig( + data_gen=partial( + generate_bias, dics, batch + ) + ), + "Mean": TensorConfig( + data_gen=partial( + generate_mean, dics, batch + ) + ), + "Scale": TensorConfig( + data_gen=partial( + generate_scale, dics, batch + ) + ), + "Variance": TensorConfig( + data_gen=partial( + generate_variance, dics, batch + ) + ), + }, + ] + ops_config = [ + { + "op_type": "batch_norm", + "op_inputs": dics_intput[num_input], + "op_outputs": { + "Y": ["batch_norm_out"], + "MeanOut": ["Mean"], + "VarianceOut": ["Variance"], + "SavedMean": ["SavedMean"], + "SavedVariance": ["SavedVariance"], + }, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights=dics_intputs[num_input], inputs={ - "batch_norm_input": - TensorConfig(data_gen=partial( - generate_input1, dics, batch)) + "batch_norm_input": TensorConfig( + data_gen=partial( + generate_input1, dics, batch + ) + ) }, - outputs=["batch_norm_out"]) + outputs=["batch_norm_out"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): if self.dims == 4: if attrs[0]['data_layout'] == "NCHW": @@ -202,29 +231,35 @@ class TrtConvertBatchNormTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-3, 1e-3) + attrs, False + ), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-3, 1e-3) + attrs, True + ), (1e-3, 1e-3) def add_skip_trt_case(self): - def teller1(program_config, predictor_config): if len(program_config.weights) == 5: return True return False - self.add_skip_case(teller1, SkipReasons.TRT_NOT_SUPPORT, - "INPUT MomentumTensor NOT SUPPORT") + self.add_skip_case( + teller1, + SkipReasons.TRT_NOT_SUPPORT, + "INPUT MomentumTensor NOT SUPPORT", + ) def test(self): self.add_skip_trt_case() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_bilinear_interp_v2.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_bilinear_interp_v2.py index 83559d4fe863a04b90f521b21a5df3f81d3a671a..5015e7e36be06c0fb93b2adec2d9ac749f9960fe 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_bilinear_interp_v2.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_bilinear_interp_v2.py @@ -22,7 +22,6 @@ import unittest class TrtConvertBilinearInterpV2Test(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: inputs = program_config.inputs weights = program_config.weights @@ -33,13 +32,13 @@ class TrtConvertBilinearInterpV2Test(TrtLayerAutoScanTest): return True def sample_program_configs(self): - def generate_input1(attrs: List[Dict[str, Any]]): return np.ones([1, 3, 64, 64]).astype(np.float32) def generate_input2(attrs: List[Dict[str, Any]]): - return np.random.uniform(low=0.5, high=6.0, - size=(2)).astype("float32") + return np.random.uniform(low=0.5, high=6.0, size=(2)).astype( + "float32" + ) for data_layout in ["NCHW", "NHWC"]: for scale_y in [2.0, -1.0, 0.0]: @@ -47,48 +46,55 @@ class TrtConvertBilinearInterpV2Test(TrtLayerAutoScanTest): scale = [scale_y, scale_x] for out_h in [32, 64, 128, 192]: for out_w in [32, 64]: - dics = [{ - "data_layout": data_layout, - "interp_method": "bilinear", - "align_corners": False, - "align_mode": 0, - "scale": scale, - "out_h": out_h, - "out_w": out_w - }] - - ops_config = [{ - "op_type": "bilinear_interp_v2", - "op_inputs": { - "X": ["input_data"], - "Scale": ["input_scale"] - }, - "op_outputs": { - "Out": ["bilinear_interp_v2_output_data"] - }, - "op_attrs": dics[0] - }] + dics = [ + { + "data_layout": data_layout, + "interp_method": "bilinear", + "align_corners": False, + "align_mode": 0, + "scale": scale, + "out_h": out_h, + "out_w": out_w, + } + ] + + ops_config = [ + { + "op_type": "bilinear_interp_v2", + "op_inputs": { + "X": ["input_data"], + "Scale": ["input_scale"], + }, + "op_outputs": { + "Out": [ + "bilinear_interp_v2_output_data" + ] + }, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={ - "input_scale": - TensorConfig( - data_gen=partial(generate_input2, dics)) + "input_scale": TensorConfig( + data_gen=partial(generate_input2, dics) + ) }, inputs={ - "input_data": - TensorConfig( - data_gen=partial(generate_input1, dics)) + "input_data": TensorConfig( + data_gen=partial(generate_input1, dics) + ) }, - outputs=["bilinear_interp_v2_output_data"]) + outputs=["bilinear_interp_v2_output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = {"input_data": [1, 3, 64, 64]} self.dynamic_shape.max_input_shape = {"input_data": [4, 3, 64, 64]} @@ -110,19 +116,23 @@ class TrtConvertBilinearInterpV2Test(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-2 + attrs, False + ), 1e-2 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-2 + attrs, True + ), 1e-2 def test(self): self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_bmm.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_bmm.py index 54a2cffe3ad51d7c9c471b149f37e5d3e40596e5..6a7aedc9c9e8dbf695b7a780aea9b1392a0f75a7 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_bmm.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_bmm.py @@ -23,9 +23,7 @@ import os class TrtConvertBmmTest_dynamic(TrtLayerAutoScanTest): - def sample_program_configs(self): - def generate_input(shape): return np.random.random(shape).astype(np.float32) @@ -35,49 +33,50 @@ class TrtConvertBmmTest_dynamic(TrtLayerAutoScanTest): input1_shape = [batch, 350, 75] input2_shape = [batch, 75, 25] dics = [{}] - ops_config = [{ - "op_type": "bmm", - "op_inputs": { - "X": ["input1_data"], - "Y": ["input2_data"] - }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": dics[0] - }] + ops_config = [ + { + "op_type": "bmm", + "op_inputs": { + "X": ["input1_data"], + "Y": ["input2_data"], + }, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input1_data": - TensorConfig( - data_gen=partial(generate_input, input1_shape)), - "input2_data": - TensorConfig( - data_gen=partial(generate_input, input2_shape)) + "input1_data": TensorConfig( + data_gen=partial(generate_input, input1_shape) + ), + "input2_data": TensorConfig( + data_gen=partial(generate_input, input2_shape) + ), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = { "input1_data": [10, 350, 75], - "input2_data": [10, 75, 25] + "input2_data": [10, 75, 25], } self.dynamic_shape.max_input_shape = { "input1_data": [100, 350, 75], - "input2_data": [100, 75, 25] + "input2_data": [100, 75, 25], } self.dynamic_shape.opt_input_shape = { "input1_data": [15, 350, 75], - "input2_data": [15, 75, 25] + "input2_data": [15, 75, 25], } def clear_dynamic_shape(): @@ -98,25 +97,29 @@ class TrtConvertBmmTest_dynamic(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-3 + attrs, False + ), 1e-3 # The output has little diff between gpu and trt in CI-Windows-Inference tol_fp32 = 1e-4 tol_half = 1e-4 - if (os.name == 'nt'): + if os.name == 'nt': tol_fp32 = 1e-2 tol_half = 1e-2 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), tol_fp32 + attrs, True + ), tol_fp32 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), tol_half + attrs, True + ), tol_half def add_skip_trt_case(self): pass diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_c_allreduce.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_c_allreduce.py index a9f1964e49aebe31db5eca5a54d00b1a917eea65..964234220cf98c75e538f29f72857b2997ad4d3d 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_c_allreduce.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_c_allreduce.py @@ -19,14 +19,13 @@ import paddle class TestDistTRT(unittest.TestCase): - def setUp(self): self.init_case() self.script = "test_trt_c_allreduce_infer_script.py" def init_case(self): self.op_type = "c_allreduce_sum" - self.target_value = 4. + self.target_value = 4.0 self.precision = "fp16" def test_run(self): @@ -45,14 +44,13 @@ class TestDistTRT(unittest.TestCase): class TestMin(TestDistTRT): - def init_case(self): self.op_type = "c_allreduce_min" - self.target_value = 2. + self.target_value = 2.0 self.precision = "int8" -#class TestMax(TestDistTRT): +# class TestMax(TestDistTRT): # # def init_case(self): # self.op_type = "c_allreduce_max" @@ -60,7 +58,7 @@ class TestMin(TestDistTRT): # self.precision = "fp16" # # -#class TestProd(TestDistTRT): +# class TestProd(TestDistTRT): # # def init_case(self): # self.op_type = "c_allreduce_prod" diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_cast.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_cast.py index 5c915db7250d2d50814db7f058dd3622cac0209c..76b46313f9590751c3ee31b5ef673e6336ec8a36 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_cast.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_cast.py @@ -22,7 +22,6 @@ from typing import List class TrtConvertCastTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: attrs = [ program_config.ops[i].attrs for i in range(len(program_config.ops)) @@ -31,14 +30,13 @@ class TrtConvertCastTest(TrtLayerAutoScanTest): return False if attrs[0]['in_dtype'] in [4, 5] and attrs[0]['out_dtype'] == 4: return False - if attrs[0]['in_dtype'] not in [ - 2, 4, 5 - ] or attrs[0]['out_dtype'] not in [2, 4, 5]: + if attrs[0]['in_dtype'] not in [2, 4, 5] or attrs[0][ + 'out_dtype' + ] not in [2, 4, 5]: return False return True def sample_program_configs(self): - def generate_input(type): if type == 0: return np.ones([1, 3, 64, 64]).astype(np.bool) @@ -53,32 +51,32 @@ class TrtConvertCastTest(TrtLayerAutoScanTest): for out_dtype in [0, 2, 4, 5, 6]: dics = [{"in_dtype": in_dtype, "out_dtype": out_dtype}] - ops_config = [{ - "op_type": "cast", - "op_inputs": { - "X": ["input_data"] - }, - "op_outputs": { - "Out": ["cast_output_data"] - }, - "op_attrs": dics[0] - }] + ops_config = [ + { + "op_type": "cast", + "op_inputs": {"X": ["input_data"]}, + "op_outputs": {"Out": ["cast_output_data"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data": - TensorConfig(data_gen=partial(generate_input, in_dtype)) + "input_data": TensorConfig( + data_gen=partial(generate_input, in_dtype) + ) }, - outputs=["cast_output_data"]) + outputs=["cast_output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = {"input_data": [1, 3, 64, 64]} self.dynamic_shape.max_input_shape = {"input_data": [4, 3, 64, 64]} @@ -100,19 +98,23 @@ class TrtConvertCastTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-2 + attrs, False + ), 1e-2 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-2 + attrs, True + ), 1e-2 def test(self): self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_clip.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_clip.py index ad1dce6cdb2dfd8d3fc6961785bac8fcaa085c2e..18d5adb284bf25d012d34a25e1f62eace8d202cf 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_clip.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_clip.py @@ -22,12 +22,10 @@ import unittest class TrtConvertClipTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(dims, batch, attrs: List[Dict[str, Any]]): if dims == 1: return np.ones([32]).astype(np.float32) @@ -46,52 +44,52 @@ class TrtConvertClipTest(TrtLayerAutoScanTest): for dims in [1, 2, 3, 4]: for batch in [1, 4]: - for op_inputs in [{ - "X": ["input_data"] - }, { - "X": ["input_data"], - "Min": ["Min_"], - "Max": ["Max_"] - }]: + for op_inputs in [ + {"X": ["input_data"]}, + {"X": ["input_data"], "Min": ["Min_"], "Max": ["Max_"]}, + ]: self.input_num = len(op_inputs) self.dims = dims - dics = [{ - "min": np.random.uniform(1, 10), - "max": np.random.uniform(10, 20) - }, { - "op_inputs": op_inputs - }] - ops_config = [{ - "op_type": "clip", - "op_inputs": op_inputs, - "op_outputs": { - "Out": ["output_data"] + dics = [ + { + "min": np.random.uniform(1, 10), + "max": np.random.uniform(10, 20), }, - "op_attrs": dics[0] - }] + {"op_inputs": op_inputs}, + ] + ops_config = [ + { + "op_type": "clip", + "op_inputs": op_inputs, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={ - "Min_": - TensorConfig( - data_gen=partial(generate_weight1, dics)), - "Max_": - TensorConfig( - data_gen=partial(generate_weight2, dics)) + "Min_": TensorConfig( + data_gen=partial(generate_weight1, dics) + ), + "Max_": TensorConfig( + data_gen=partial(generate_weight2, dics) + ), }, inputs={ - "input_data": - TensorConfig(data_gen=partial( - generate_input1, dims, batch, dics)) + "input_data": TensorConfig( + data_gen=partial( + generate_input1, dims, batch, dics + ) + ) }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs(self, program_config): - def generate_dynamic_shape(attrs): if self.dims == 1: self.dynamic_shape.min_input_shape = {"input_data": [1]} @@ -135,19 +133,23 @@ class TrtConvertClipTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-3 + attrs, False + ), 1e-3 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-3 + attrs, True + ), 1e-3 def test(self): self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_concat.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_concat.py index af4d479f8f21b8ac48549b26a33f7d61945cc9de..46a7b82ef4d7809c85b044bd1f4e76ada5e4f9dc 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_concat.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_concat.py @@ -22,7 +22,6 @@ from typing import Any, Dict, List class TrtConvertConcatTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: inputs = program_config.inputs weights = program_config.weights @@ -31,14 +30,13 @@ class TrtConvertConcatTest(TrtLayerAutoScanTest): attrs = [ program_config.ops[i].attrs for i in range(len(program_config.ops)) ] - #The input dimension should be less than or equal to the set axis. + # The input dimension should be less than or equal to the set axis. if len(inputs['concat_input1'].shape) <= attrs[0]['axis']: return False return True def sample_program_configs(self): - def generate_input1(attrs: List[Dict[str, Any]], batch): if self.dims == 4: return np.ones([batch, 3, 24, 24]).astype(np.float32) @@ -79,58 +77,83 @@ class TrtConvertConcatTest(TrtLayerAutoScanTest): self.num_input = num_input self.dims = dims dics = [{"axis": axis}, {}] - dics_intput = [{ - "X": - ["concat_input1", "concat_input2", "concat_input3"], - "AxisTensor": ["AxisTensor"], - }, { - "X": - ["concat_input1", "concat_input2", "concat_input3"] - }] - dics_inputs = [{ - "concat_input1": - TensorConfig( - data_gen=partial(generate_input1, dics, batch)), - "concat_input2": - TensorConfig( - data_gen=partial(generate_input2, dics, batch)), - "concat_input3": - TensorConfig( - data_gen=partial(generate_input3, dics, batch)), - "AxisTensor": - TensorConfig( - data_gen=partial(generate_weight1, dics)) - }, { - "concat_input1": - TensorConfig( - data_gen=partial(generate_input1, dics, batch)), - "concat_input2": - TensorConfig( - data_gen=partial(generate_input2, dics, batch)), - "concat_input3": - TensorConfig( - data_gen=partial(generate_input3, dics, batch)) - }] - ops_config = [{ - "op_type": "concat", - "op_inputs": dics_intput[num_input], - "op_outputs": { - "Out": ["concat_output"] + dics_intput = [ + { + "X": [ + "concat_input1", + "concat_input2", + "concat_input3", + ], + "AxisTensor": ["AxisTensor"], + }, + { + "X": [ + "concat_input1", + "concat_input2", + "concat_input3", + ] + }, + ] + dics_inputs = [ + { + "concat_input1": TensorConfig( + data_gen=partial( + generate_input1, dics, batch + ) + ), + "concat_input2": TensorConfig( + data_gen=partial( + generate_input2, dics, batch + ) + ), + "concat_input3": TensorConfig( + data_gen=partial( + generate_input3, dics, batch + ) + ), + "AxisTensor": TensorConfig( + data_gen=partial(generate_weight1, dics) + ), + }, + { + "concat_input1": TensorConfig( + data_gen=partial( + generate_input1, dics, batch + ) + ), + "concat_input2": TensorConfig( + data_gen=partial( + generate_input2, dics, batch + ) + ), + "concat_input3": TensorConfig( + data_gen=partial( + generate_input3, dics, batch + ) + ), }, - "op_attrs": dics[0] - }] + ] + ops_config = [ + { + "op_type": "concat", + "op_inputs": dics_intput[num_input], + "op_outputs": {"Out": ["concat_output"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs=dics_inputs[num_input], - outputs=["concat_output"]) + outputs=["concat_output"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): if self.num_input == 0: if self.dims == 4: @@ -138,76 +161,76 @@ class TrtConvertConcatTest(TrtLayerAutoScanTest): "concat_input1": [1, 3, 24, 24], "concat_input2": [1, 3, 24, 24], "concat_input3": [1, 3, 24, 24], - "AxisTensor": [1] + "AxisTensor": [1], } self.dynamic_shape.max_input_shape = { "concat_input1": [4, 3, 48, 48], "concat_input2": [4, 3, 48, 48], "concat_input3": [4, 3, 48, 48], - "AxisTensor": [1] + "AxisTensor": [1], } self.dynamic_shape.opt_input_shape = { "concat_input1": [1, 3, 24, 24], "concat_input2": [1, 3, 24, 24], "concat_input3": [1, 3, 24, 24], - "AxisTensor": [1] + "AxisTensor": [1], } elif self.dims == 3: self.dynamic_shape.min_input_shape = { "concat_input1": [1, 3, 24], "concat_input2": [1, 3, 24], "concat_input3": [1, 3, 24], - "AxisTensor": [1] + "AxisTensor": [1], } self.dynamic_shape.max_input_shape = { "concat_input1": [4, 12, 48], "concat_input2": [4, 12, 48], "concat_input3": [4, 12, 48], - "AxisTensor": [1] + "AxisTensor": [1], } self.dynamic_shape.opt_input_shape = { "concat_input1": [1, 3, 24], "concat_input2": [1, 3, 24], "concat_input3": [1, 3, 24], - "AxisTensor": [1] + "AxisTensor": [1], } elif self.dims == 2: self.dynamic_shape.min_input_shape = { "concat_input1": [1, 24], "concat_input2": [1, 24], "concat_input3": [1, 24], - "AxisTensor": [1] + "AxisTensor": [1], } self.dynamic_shape.max_input_shape = { "concat_input1": [4, 48], "concat_input2": [4, 48], "concat_input3": [4, 48], - "AxisTensor": [1] + "AxisTensor": [1], } self.dynamic_shape.opt_input_shape = { "concat_input1": [1, 24], "concat_input2": [1, 24], "concat_input3": [1, 24], - "AxisTensor": [1] + "AxisTensor": [1], } elif self.dims == 1: self.dynamic_shape.min_input_shape = { "concat_input1": [24], "concat_input2": [24], "concat_input3": [24], - "AxisTensor": [0] + "AxisTensor": [0], } self.dynamic_shape.max_input_shape = { "concat_input1": [48], "concat_input2": [48], "concat_input3": [48], - "AxisTensor": [0] + "AxisTensor": [0], } self.dynamic_shape.opt_input_shape = { "concat_input1": [24], "concat_input2": [24], "concat_input3": [24], - "AxisTensor": [0] + "AxisTensor": [0], } elif self.num_input == 1: if self.dims == 4: @@ -219,60 +242,60 @@ class TrtConvertConcatTest(TrtLayerAutoScanTest): self.dynamic_shape.max_input_shape = { "concat_input1": [4, 3, 48, 48], "concat_input2": [4, 3, 48, 48], - "concat_input3": [4, 3, 48, 48] + "concat_input3": [4, 3, 48, 48], } self.dynamic_shape.opt_input_shape = { "concat_input1": [1, 3, 24, 24], "concat_input2": [1, 3, 24, 24], - "concat_input3": [1, 3, 24, 24] + "concat_input3": [1, 3, 24, 24], } elif self.dims == 3: self.dynamic_shape.min_input_shape = { "concat_input1": [1, 3, 24], "concat_input2": [1, 3, 24], - "concat_input3": [1, 3, 24] + "concat_input3": [1, 3, 24], } self.dynamic_shape.max_input_shape = { "concat_input1": [4, 12, 48], "concat_input2": [4, 12, 48], - "concat_input3": [4, 12, 48] + "concat_input3": [4, 12, 48], } self.dynamic_shape.opt_input_shape = { "concat_input1": [1, 3, 24], "concat_input2": [1, 3, 24], - "concat_input3": [1, 3, 24] + "concat_input3": [1, 3, 24], } elif self.dims == 2: self.dynamic_shape.min_input_shape = { "concat_input1": [1, 24], "concat_input2": [1, 24], - "concat_input3": [1, 24] + "concat_input3": [1, 24], } self.dynamic_shape.max_input_shape = { "concat_input1": [4, 48], "concat_input2": [4, 48], - "concat_input3": [4, 48] + "concat_input3": [4, 48], } self.dynamic_shape.opt_input_shape = { "concat_input1": [1, 24], "concat_input2": [1, 24], - "concat_input3": [1, 24] + "concat_input3": [1, 24], } elif self.dims == 1: self.dynamic_shape.min_input_shape = { "concat_input1": [24], "concat_input2": [24], - "concat_input3": [24] + "concat_input3": [24], } self.dynamic_shape.max_input_shape = { "concat_input1": [48], "concat_input2": [48], - "concat_input3": [48] + "concat_input3": [48], } self.dynamic_shape.opt_input_shape = { "concat_input1": [24], "concat_input2": [24], - "concat_input3": [24] + "concat_input3": [24], } def clear_dynamic_shape(): @@ -296,29 +319,33 @@ class TrtConvertConcatTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-3 + attrs, False + ), 1e-3 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-3 + attrs, True + ), 1e-3 def add_skip_trt_case(self): - def teller1(program_config, predictor_config): if len(program_config.inputs) == 4: return True return False - self.add_skip_case(teller1, SkipReasons.TRT_NOT_SUPPORT, - "INPUT AxisTensor NOT SUPPORT") + self.add_skip_case( + teller1, SkipReasons.TRT_NOT_SUPPORT, "INPUT AxisTensor NOT SUPPORT" + ) def test(self): self.add_skip_trt_case() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d.py index 0682477357e0cc62340bb333220a4595f8c3e50f..364d8a1b6bb8ee4c2dfb12367adc76cb128aaff2 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d.py @@ -23,7 +23,6 @@ from program_config import TensorConfig, ProgramConfig class TrtConvertConv2dTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: inputs = program_config.inputs weights = program_config.weights @@ -31,14 +30,17 @@ class TrtConvertConv2dTest(TrtLayerAutoScanTest): program_config.ops[i].attrs for i in range(len(program_config.ops)) ] - if inputs['input_data'].shape[ - 1] != weights['conv2d_weight'].shape[1] * attrs[0]['groups']: + if ( + inputs['input_data'].shape[1] + != weights['conv2d_weight'].shape[1] * attrs[0]['groups'] + ): return False ver = paddle_infer.get_trt_compile_version() if ver[0] * 1000 + ver[1] * 100 + ver[0] * 10 < 7000: if attrs[0]['padding_algorithm'] == 'SAME' and ( - attrs[0]['strides'][0] > 1 or attrs[0]['strides'][1] > 1): + attrs[0]['strides'][0] > 1 or attrs[0]['strides'][1] > 1 + ): return False return True @@ -47,8 +49,12 @@ class TrtConvertConv2dTest(TrtLayerAutoScanTest): self.trt_param.workspace_size = 1073741824 def generate_input1(batch, attrs: List[Dict[str, Any]]): - return np.ones([batch, attrs[0]['groups'] * 3, 64, 64]).astype( - np.float32) / 4 + return ( + np.ones([batch, attrs[0]['groups'] * 3, 64, 64]).astype( + np.float32 + ) + / 4 + ) def generate_weight1(attrs: List[Dict[str, Any]]): return np.random.random([9, 3, 3, 3]).astype(np.float32) - 0.5 @@ -71,73 +77,82 @@ class TrtConvertConv2dTest(TrtLayerAutoScanTest): data_format_options, ] - for batch, strides, paddings, groups, padding_algorithm, dilations, data_format in itertools.product( - *configurations): - - attrs = [{ - "data_fromat": data_format, - "dilations": dilations, - "padding_algorithm": padding_algorithm, - "groups": groups, - "paddings": paddings, - "strides": strides, - "data_format": data_format, - }, {}] - - ops_config = [{ - "op_type": "conv2d", - "op_inputs": { - "Input": ["input_data"], - "Filter": ["conv2d_weight"] - }, - "op_outputs": { - "Output": ["conv_output_data"] + for ( + batch, + strides, + paddings, + groups, + padding_algorithm, + dilations, + data_format, + ) in itertools.product(*configurations): + + attrs = [ + { + "data_fromat": data_format, + "dilations": dilations, + "padding_algorithm": padding_algorithm, + "groups": groups, + "paddings": paddings, + "strides": strides, + "data_format": data_format, }, - "op_attrs": attrs[0] - }, { - "op_type": "relu", - "op_inputs": { - "X": ["conv_output_data"] + {}, + ] + + ops_config = [ + { + "op_type": "conv2d", + "op_inputs": { + "Input": ["input_data"], + "Filter": ["conv2d_weight"], + }, + "op_outputs": {"Output": ["conv_output_data"]}, + "op_attrs": attrs[0], }, - "op_outputs": { - "Out": ["output_data"] + { + "op_type": "relu", + "op_inputs": {"X": ["conv_output_data"]}, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": attrs[1], }, - "op_attrs": attrs[1] - }] + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={ - "conv2d_weight": - TensorConfig(data_gen=partial(generate_weight1, attrs)) + "conv2d_weight": TensorConfig( + data_gen=partial(generate_weight1, attrs) + ) }, inputs={ - "input_data": - TensorConfig( - data_gen=partial(generate_input1, batch, attrs)) + "input_data": TensorConfig( + data_gen=partial(generate_input1, batch, attrs) + ) }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): input_groups = attrs[0]['groups'] * 3 self.dynamic_shape.min_input_shape = { "input_data": [1, input_groups, 32, 32], - "output_data": [1, 24, 32, 32] + "output_data": [1, 24, 32, 32], } self.dynamic_shape.max_input_shape = { "input_data": [4, input_groups, 64, 64], - "output_data": [4, 24, 64, 64] + "output_data": [4, 24, 64, 64], } self.dynamic_shape.opt_input_shape = { "input_data": [1, input_groups, 64, 64], - "output_data": [1, 24, 64, 64] + "output_data": [1, 24, 64, 64], } def clear_dynamic_shape(): @@ -156,25 +171,31 @@ class TrtConvertConv2dTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-3, 1e-3) + attrs, False + ), (1e-3, 1e-3) self.trt_param.precision = paddle_infer.PrecisionType.Int8 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-2, 1e-2) + attrs, False + ), (1e-2, 1e-2) # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-3, 1e-3) + attrs, True + ), (1e-3, 1e-3) self.trt_param.precision = paddle_infer.PrecisionType.Int8 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-2, 1e-2) + attrs, True + ), (1e-2, 1e-2) def test(self): self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d_fusion.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d_fusion.py index 420b3a5cfaa3761aef7d2e82a09213c448b42095..ab3af8e24aa2ed51a03103bdd71e44ffa0b9053b 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d_fusion.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d_fusion.py @@ -23,7 +23,6 @@ from trt_layer_auto_scan_test import TrtLayerAutoScanTest class TrtConvertConv2dFusionTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: inputs = program_config.inputs weights = program_config.weights @@ -31,8 +30,10 @@ class TrtConvertConv2dFusionTest(TrtLayerAutoScanTest): program_config.ops[i].attrs for i in range(len(program_config.ops)) ] - if inputs['input_data'].shape[ - 1] != weights['conv2d_weight'].shape[1] * attrs[0]['groups']: + if ( + inputs['input_data'].shape[1] + != weights['conv2d_weight'].shape[1] * attrs[0]['groups'] + ): return False if attrs[0]['groups'] <= 1: @@ -41,7 +42,8 @@ class TrtConvertConv2dFusionTest(TrtLayerAutoScanTest): ver = paddle_infer.get_trt_compile_version() if ver[0] * 1000 + ver[1] * 100 + ver[0] * 10 < 7000: if attrs[0]['padding_algorithm'] == 'SAME' and ( - attrs[0]['strides'][0] > 1 or attrs[0]['strides'][1] > 1): + attrs[0]['strides'][0] > 1 or attrs[0]['strides'][1] > 1 + ): return False return True @@ -50,8 +52,9 @@ class TrtConvertConv2dFusionTest(TrtLayerAutoScanTest): self.trt_param.workspace_size = 1073741824 def generate_input1(batch, attrs: List[Dict[str, Any]]): - return np.ones([batch, attrs[0]['groups'] * 3, 64, - 64]).astype(np.float32) + return np.ones([batch, attrs[0]['groups'] * 3, 64, 64]).astype( + np.float32 + ) def generate_weight1(attrs: List[Dict[str, Any]]): return np.random.random([24, 3, 3, 3]).astype(np.float32) @@ -77,77 +80,87 @@ class TrtConvertConv2dFusionTest(TrtLayerAutoScanTest): data_format_options, ] - for (batch, strides, paddings, groups, padding_algorithm, dilations, - data_format) in product(*configurations): - - attrs = [{ - "strides": strides, - "paddings": paddings, - "groups": groups, - "padding_algorithm": padding_algorithm, - "dilations": dilations, - "data_format": data_format, - }, { - "axis": 1 - }] - - ops_config = [{ - "op_type": "conv2d", - "op_inputs": { - "Input": ["input_data"], - "Filter": ["conv2d_weight"] - }, - "op_outputs": { - "Output": ["conv_output_data"] + for ( + batch, + strides, + paddings, + groups, + padding_algorithm, + dilations, + data_format, + ) in product(*configurations): + + attrs = [ + { + "strides": strides, + "paddings": paddings, + "groups": groups, + "padding_algorithm": padding_algorithm, + "dilations": dilations, + "data_format": data_format, }, - "op_attrs": attrs[0] - }, { - "op_type": "elementwise_add", - "op_inputs": { - "X": ["conv_output_data"], - "Y": ["elementwise_weight"] + {"axis": 1}, + ] + + ops_config = [ + { + "op_type": "conv2d", + "op_inputs": { + "Input": ["input_data"], + "Filter": ["conv2d_weight"], + }, + "op_outputs": {"Output": ["conv_output_data"]}, + "op_attrs": attrs[0], }, - "op_outputs": { - "Out": ["output_data"] + { + "op_type": "elementwise_add", + "op_inputs": { + "X": ["conv_output_data"], + "Y": ["elementwise_weight"], + }, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": attrs[1], }, - "op_attrs": attrs[1] - }] + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={ - "conv2d_weight": - TensorConfig(data_gen=partial(generate_weight1, attrs)), - "elementwise_weight": - TensorConfig(data_gen=partial(generate_weight2, attrs)) + "conv2d_weight": TensorConfig( + data_gen=partial(generate_weight1, attrs) + ), + "elementwise_weight": TensorConfig( + data_gen=partial(generate_weight2, attrs) + ), }, inputs={ - "input_data": - TensorConfig( - data_gen=partial(generate_input1, batch, attrs)) + "input_data": TensorConfig( + data_gen=partial(generate_input1, batch, attrs) + ) }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): input_groups = attrs[0]['groups'] * 3 self.dynamic_shape.min_input_shape = { "input_data": [1, input_groups, 32, 32], - "output_data": [1, 24, 32, 32] + "output_data": [1, 24, 32, 32], } self.dynamic_shape.max_input_shape = { "input_data": [2, input_groups, 64, 64], - "output_data": [2, 24, 64, 64] + "output_data": [2, 24, 64, 64], } self.dynamic_shape.opt_input_shape = { "input_data": [1, input_groups, 64, 64], - "output_data": [1, 24, 64, 64] + "output_data": [1, 24, 64, 64], } def clear_dynamic_shape(): @@ -166,25 +179,31 @@ class TrtConvertConv2dFusionTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-3, 1e-3) + attrs, False + ), (1e-3, 1e-3) self.trt_param.precision = paddle_infer.PrecisionType.Int8 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-3, 1e-3) + attrs, False + ), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-3, 1e-3) + attrs, True + ), (1e-3, 1e-3) self.trt_param.precision = paddle_infer.PrecisionType.Int8 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-3, 1e-3) + attrs, True + ), (1e-3, 1e-3) def test(self): self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d_transpose.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d_transpose.py index 3103a105de2aa244242fd1e6bfaced46e0cdbc33..bb0f3cfc3a80d04c48e81e0a66b7f9050226fb53 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d_transpose.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d_transpose.py @@ -22,7 +22,6 @@ from typing import Any, Dict, List class TrtConvertConv2dTransposeTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: inputs = program_config.inputs weights = program_config.weights @@ -30,8 +29,10 @@ class TrtConvertConv2dTransposeTest(TrtLayerAutoScanTest): program_config.ops[i].attrs for i in range(len(program_config.ops)) ] - if inputs['input_data'].shape[ - 1] != weights['conv2d_weight'].shape[1] * attrs[0]['groups']: + if ( + inputs['input_data'].shape[1] + != weights['conv2d_weight'].shape[1] * attrs[0]['groups'] + ): return False if inputs['input_data'].shape[1] != weights['conv2d_weight'].shape[0]: @@ -54,12 +55,13 @@ class TrtConvertConv2dTransposeTest(TrtLayerAutoScanTest): def generate_weight1(num_channels, attrs: List[Dict[str, Any]]): if attrs[0]['groups'] == 1: - return np.random.random([num_channels, num_channels, 3, - 3]).astype(np.float32) + return np.random.random( + [num_channels, num_channels, 3, 3] + ).astype(np.float32) else: return np.random.random( - [num_channels, int(num_channels / 2), 3, - 3]).astype(np.float32) + [num_channels, int(num_channels / 2), 3, 3] + ).astype(np.float32) for num_channels in [2, 4, 6]: for batch in [1, 4]: @@ -67,99 +69,113 @@ class TrtConvertConv2dTransposeTest(TrtLayerAutoScanTest): for paddings in [[0, 3], [1, 2, 3, 4]]: for groups in [2]: for padding_algorithm in [ - 'EXPLICIT', 'SAME', 'VALID' + 'EXPLICIT', + 'SAME', + 'VALID', ]: for dilations in [[2, 2], [1, 2]]: for data_format in ['NCHW']: self.num_channels = num_channels - dics = [{ - "data_fromat": data_format, - "dilations": dilations, - "padding_algorithm": - padding_algorithm, - "groups": groups, - "paddings": paddings, - "strides": strides, - "data_format": data_format, - "output_size": [], - "output_padding": [] - }] - - ops_config = [{ - "op_type": "conv2d_transpose", - "op_inputs": { - "Input": ["input_data"], - "Filter": ["conv2d_weight"] - }, - "op_outputs": { - "Output": ["output_data"] - }, - "op_attrs": dics[0] - }] + dics = [ + { + "data_fromat": data_format, + "dilations": dilations, + "padding_algorithm": padding_algorithm, + "groups": groups, + "paddings": paddings, + "strides": strides, + "data_format": data_format, + "output_size": [], + "output_padding": [], + } + ] + + ops_config = [ + { + "op_type": "conv2d_transpose", + "op_inputs": { + "Input": ["input_data"], + "Filter": ["conv2d_weight"], + }, + "op_outputs": { + "Output": ["output_data"] + }, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config( - ops_config) + ops_config + ) program_config = ProgramConfig( ops=ops, weights={ - "conv2d_weight": - TensorConfig(data_gen=partial( - generate_weight1, - num_channels, dics)) + "conv2d_weight": TensorConfig( + data_gen=partial( + generate_weight1, + num_channels, + dics, + ) + ) }, inputs={ - "input_data": - TensorConfig(data_gen=partial( - generate_input1, batch, - num_channels, dics)) + "input_data": TensorConfig( + data_gen=partial( + generate_input1, + batch, + num_channels, + dics, + ) + ) }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): if self.num_channels == 2: self.dynamic_shape.min_input_shape = { "input_data": [1, 2, 32, 32], - "output_data": [1, 24, 32, 32] + "output_data": [1, 24, 32, 32], } self.dynamic_shape.max_input_shape = { "input_data": [4, 2, 64, 64], - "output_data": [4, 24, 64, 64] + "output_data": [4, 24, 64, 64], } self.dynamic_shape.opt_input_shape = { "input_data": [1, 2, 64, 64], - "output_data": [1, 24, 64, 64] + "output_data": [1, 24, 64, 64], } elif self.num_channels == 4: self.dynamic_shape.min_input_shape = { "input_data": [1, 4, 32, 32], - "output_data": [1, 24, 32, 32] + "output_data": [1, 24, 32, 32], } self.dynamic_shape.max_input_shape = { "input_data": [4, 4, 64, 64], - "output_data": [4, 24, 64, 64] + "output_data": [4, 24, 64, 64], } self.dynamic_shape.opt_input_shape = { "input_data": [1, 4, 64, 64], - "output_data": [1, 24, 64, 64] + "output_data": [1, 24, 64, 64], } else: self.dynamic_shape.min_input_shape = { "input_data": [1, 6, 32, 32], - "output_data": [1, 24, 32, 32] + "output_data": [1, 24, 32, 32], } self.dynamic_shape.max_input_shape = { "input_data": [4, 6, 64, 64], - "output_data": [4, 24, 64, 64] + "output_data": [4, 24, 64, 64], } self.dynamic_shape.opt_input_shape = { "input_data": [1, 6, 64, 64], - "output_data": [1, 24, 64, 64] + "output_data": [1, 24, 64, 64], } def clear_dynamic_shape(): @@ -178,10 +194,12 @@ class TrtConvertConv2dTransposeTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-3, 1e-3) + attrs, False + ), (1e-3, 1e-3) # self.trt_param.precision = paddle_infer.PrecisionType.Int8 # yield self.create_inference_config(), generate_trt_nodes_num( # attrs, False), (1e-5, 1e-5) @@ -190,24 +208,26 @@ class TrtConvertConv2dTransposeTest(TrtLayerAutoScanTest): generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-3, 1e-3) + attrs, True + ), (1e-3, 1e-3) # self.trt_param.precision = paddle_infer.PrecisionType.Int8 # yield self.create_inference_config(), generate_trt_nodes_num( # attrs, True), (1e-5, 1e-5) def add_skip_trt_case(self): - def teller1(program_config, predictor_config): if self.trt_param.precision == paddle_infer.PrecisionType.Int8: return True return False self.add_skip_case( - teller1, SkipReasons.TRT_NOT_IMPLEMENTED, - "When precisionType is int8 without relu op, output is different between Trt and Paddle." + teller1, + SkipReasons.TRT_NOT_IMPLEMENTED, + "When precisionType is int8 without relu op, output is different between Trt and Paddle.", ) def test(self): @@ -221,7 +241,6 @@ class TrtConvertConv2dTransposeTest(TrtLayerAutoScanTest): # Special case class TrtConvertConv2dTransposeTest2(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: ver = paddle_infer.get_trt_compile_version() if ver[0] * 1000 + ver[1] * 100 + ver[2] * 10 < 7000: @@ -241,49 +260,52 @@ class TrtConvertConv2dTransposeTest2(TrtLayerAutoScanTest): batch = 1 self.num_channels = num_channels - dics = [{ - "data_fromat": 'NCHW', - "dilations": [1, 1], - "padding_algorithm": 'EXPLICIT', - "groups": 1, - "paddings": [1, 1], - "strides": [2, 2], - "output_padding": [1, 1], - "output_size": [], - }] - - ops_config = [{ - "op_type": "conv2d_transpose", - "op_inputs": { - "Input": ["input_data"], - "Filter": ["conv2d_weight"] - }, - "op_outputs": { - "Output": ["output_data"] - }, - "op_attrs": dics[0] - }] + dics = [ + { + "data_fromat": 'NCHW', + "dilations": [1, 1], + "padding_algorithm": 'EXPLICIT', + "groups": 1, + "paddings": [1, 1], + "strides": [2, 2], + "output_padding": [1, 1], + "output_size": [], + } + ] + + ops_config = [ + { + "op_type": "conv2d_transpose", + "op_inputs": { + "Input": ["input_data"], + "Filter": ["conv2d_weight"], + }, + "op_outputs": {"Output": ["output_data"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={ - "conv2d_weight": - TensorConfig( - data_gen=partial(generate_weight1, num_channels, dics)) + "conv2d_weight": TensorConfig( + data_gen=partial(generate_weight1, num_channels, dics) + ) }, inputs={ - "input_data": - TensorConfig(data_gen=partial(generate_input1, batch, - num_channels, dics)) + "input_data": TensorConfig( + data_gen=partial(generate_input1, batch, num_channels, dics) + ) }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = { "input_data": [1, 128, 20, 30], @@ -311,19 +333,23 @@ class TrtConvertConv2dTransposeTest2(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-4 + attrs, False + ), 1e-4 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e0, 1e-3) + attrs, False + ), (1e0, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-4 + attrs, True + ), 1e-4 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e0, 1e-3) + attrs, True + ), (1e0, 1e-3) def add_skip_trt_case(self): pass diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv3d_transpose.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv3d_transpose.py index a19405cdec917f1be61c6fe3691d103656183471..d0edb4fba0e5513a2d33a1d96d82f4346d5dab9b 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv3d_transpose.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv3d_transpose.py @@ -23,7 +23,6 @@ from typing import Any, Dict, List # Special case class TrtConvertConv3dTransposeTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: ver = paddle_infer.get_trt_compile_version() if ver[0] * 1000 + ver[1] * 100 + ver[2] * 10 < 8400: @@ -37,56 +36,60 @@ class TrtConvertConv3dTransposeTest(TrtLayerAutoScanTest): return np.ones([batch, num_channels, 4, 20, 30]).astype(np.float32) def generate_weight1(num_channels, attrs: List[Dict[str, Any]]): - return np.random.random([num_channels, 64, 3, 3, - 3]).astype(np.float32) + return np.random.random([num_channels, 64, 3, 3, 3]).astype( + np.float32 + ) num_channels = 128 batch = 1 # in_channels self.num_channels = num_channels - dics = [{ - "data_fromat": 'NCHW', - "dilations": [1, 1, 1], - "padding_algorithm": 'EXPLICIT', - "groups": 1, - "paddings": [1, 1, 1], - "strides": [2, 2, 2], - "output_padding": [1, 1, 1], - "output_size": [], - }] - - ops_config = [{ - "op_type": "conv3d_transpose", - "op_inputs": { - "Input": ["input_data"], - "Filter": ["conv3d_weight"] - }, - "op_outputs": { - "Output": ["output_data"] - }, - "op_attrs": dics[0] - }] + dics = [ + { + "data_fromat": 'NCHW', + "dilations": [1, 1, 1], + "padding_algorithm": 'EXPLICIT', + "groups": 1, + "paddings": [1, 1, 1], + "strides": [2, 2, 2], + "output_padding": [1, 1, 1], + "output_size": [], + } + ] + + ops_config = [ + { + "op_type": "conv3d_transpose", + "op_inputs": { + "Input": ["input_data"], + "Filter": ["conv3d_weight"], + }, + "op_outputs": {"Output": ["output_data"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={ - "conv3d_weight": - TensorConfig( - data_gen=partial(generate_weight1, num_channels, dics)) + "conv3d_weight": TensorConfig( + data_gen=partial(generate_weight1, num_channels, dics) + ) }, inputs={ - "input_data": - TensorConfig(data_gen=partial(generate_input1, batch, - num_channels, dics)) + "input_data": TensorConfig( + data_gen=partial(generate_input1, batch, num_channels, dics) + ) }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = { "input_data": [1, 128, 4, 20, 30], @@ -114,13 +117,15 @@ class TrtConvertConv3dTransposeTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-3 + attrs, False + ), 1e-3 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-3 + attrs, True + ), 1e-3 def add_skip_trt_case(self): pass diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_deformable_conv.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_deformable_conv.py index f50f1704875b00176c0576c4639d7c6465925f56..e8fb7142b4d57142c0f292eef4580deeb17a02e1 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_deformable_conv.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_deformable_conv.py @@ -22,7 +22,6 @@ import unittest class TrtConvertDeformableConvTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: inputs = program_config.inputs weights = program_config.weights @@ -30,114 +29,158 @@ class TrtConvertDeformableConvTest(TrtLayerAutoScanTest): program_config.ops[i].attrs for i in range(len(program_config.ops)) ] - if inputs['input_data'].shape[ - 1] != weights['filter_data'].shape[1] * attrs[0]['groups']: + if ( + inputs['input_data'].shape[1] + != weights['filter_data'].shape[1] * attrs[0]['groups'] + ): return False return True def sample_program_configs(self): - - def compute_output_size(input_size: List[int], kernel_sizes: List[int], - attrs: List[Dict[str, Any]]): + def compute_output_size( + input_size: List[int], + kernel_sizes: List[int], + attrs: List[Dict[str, Any]], + ): strides = attrs[0]['strides'] paddings = attrs[0]['paddings'] dilations = attrs[0]['dilations'] output_size = [] - for i, k, s, p, d in zip(input_size, kernel_sizes, strides, - paddings, dilations): + for i, k, s, p, d in zip( + input_size, kernel_sizes, strides, paddings, dilations + ): k = d * (k - 1) + 1 output_size.append((i + 2 * p - k) // s + 1) return output_size - def generate_input1(batch: int, input_size: List[int], - kernel_sizes: List[int], attrs: List[Dict[str, - Any]]): + def generate_input1( + batch: int, + input_size: List[int], + kernel_sizes: List[int], + attrs: List[Dict[str, Any]], + ): return np.random.random([batch, 3] + input_size).astype(np.float32) - def generate_offset1(batch: int, input_size: List[int], - kernel_sizes: List[int], attrs: List[Dict[str, - Any]]): + def generate_offset1( + batch: int, + input_size: List[int], + kernel_sizes: List[int], + attrs: List[Dict[str, Any]], + ): output_size = compute_output_size(input_size, kernel_sizes, attrs) - return np.random.random([batch, 2 * np.prod(kernel_sizes)] + - output_size).astype(np.float32) - - def generate_mask1(batch: int, input_size: List[int], - kernel_sizes: List[int], attrs: List[Dict[str, - Any]]): + return np.random.random( + [batch, 2 * np.prod(kernel_sizes)] + output_size + ).astype(np.float32) + + def generate_mask1( + batch: int, + input_size: List[int], + kernel_sizes: List[int], + attrs: List[Dict[str, Any]], + ): output_size = compute_output_size(input_size, kernel_sizes, attrs) - return np.random.random([batch, np.prod(kernel_sizes)] + - output_size).astype(np.float32) - - def generate_filter1(batch: int, input_size: List[int], - kernel_sizes: List[int], attrs: List[Dict[str, - Any]]): + return np.random.random( + [batch, np.prod(kernel_sizes)] + output_size + ).astype(np.float32) + + def generate_filter1( + batch: int, + input_size: List[int], + kernel_sizes: List[int], + attrs: List[Dict[str, Any]], + ): return np.random.random([6, 3] + kernel_sizes).astype(np.float32) for batch in [ - 1, + 1, ]: for input_size in [[32, 32]]: for kernel_sizes in [[3, 3]]: for strides in [[1, 1], [2, 2]]: for paddings in [[1, 1], [0, 2]]: for groups in [ - 1, + 1, ]: for dilations in [[1, 1], [2, 2]]: - dics = [{ - "strides": strides, - "paddings": paddings, - "groups": groups, - "dilations": dilations, - "deformable_groups": 1, - "im2col_step": 1 - }] - - ops_config = [{ - "op_type": "deformable_conv", - "op_inputs": { - "Input": ["input_data"], - "Offset": ["offset_data"], - "Mask": ["mask_data"], - "Filter": ["filter_data"] - }, - "op_outputs": { - "Output": ["output_data"] - }, - "op_attrs": dics[0] - }] + dics = [ + { + "strides": strides, + "paddings": paddings, + "groups": groups, + "dilations": dilations, + "deformable_groups": 1, + "im2col_step": 1, + } + ] + + ops_config = [ + { + "op_type": "deformable_conv", + "op_inputs": { + "Input": ["input_data"], + "Offset": ["offset_data"], + "Mask": ["mask_data"], + "Filter": ["filter_data"], + }, + "op_outputs": { + "Output": ["output_data"] + }, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={ - "filter_data": - TensorConfig(data_gen=partial( - generate_filter1, batch, input_size, - kernel_sizes, dics)) + "filter_data": TensorConfig( + data_gen=partial( + generate_filter1, + batch, + input_size, + kernel_sizes, + dics, + ) + ) }, inputs={ - "input_data": - TensorConfig(data_gen=partial( - generate_input1, batch, input_size, - kernel_sizes, dics)), - "offset_data": - TensorConfig(data_gen=partial( - generate_offset1, batch, input_size, - kernel_sizes, dics)), - "mask_data": - TensorConfig(data_gen=partial( - generate_mask1, batch, input_size, - kernel_sizes, dics)) + "input_data": TensorConfig( + data_gen=partial( + generate_input1, + batch, + input_size, + kernel_sizes, + dics, + ) + ), + "offset_data": TensorConfig( + data_gen=partial( + generate_offset1, + batch, + input_size, + kernel_sizes, + dics, + ) + ), + "mask_data": TensorConfig( + data_gen=partial( + generate_mask1, + batch, + input_size, + kernel_sizes, + dics, + ) + ), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def clear_dynamic_shape(): self.dynamic_shape.min_input_shape = {} self.dynamic_shape.max_input_shape = {} @@ -158,7 +201,8 @@ class TrtConvertDeformableConvTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 def test(self): self.trt_param.workspace_size = 1 << 28 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_depthwise_conv2d.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_depthwise_conv2d.py index 90afa7fc1f05aec23061c6d69255b812c9542a8a..8fd03ae830a7d73741d078e71cb9a3baa372f684 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_depthwise_conv2d.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_depthwise_conv2d.py @@ -23,7 +23,6 @@ import paddle.inference as paddle_infer class TrtConvertDepthwiseConv2dTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: inputs = program_config.inputs weights = program_config.weights @@ -31,8 +30,10 @@ class TrtConvertDepthwiseConv2dTest(TrtLayerAutoScanTest): program_config.ops[i].attrs for i in range(len(program_config.ops)) ] - if inputs['input_data'].shape[ - 1] != weights['conv2d_weight'].shape[1] * attrs[0]['groups']: + if ( + inputs['input_data'].shape[1] + != weights['conv2d_weight'].shape[1] * attrs[0]['groups'] + ): return False return True @@ -65,61 +66,72 @@ class TrtConvertDepthwiseConv2dTest(TrtLayerAutoScanTest): data_format_options, ] - for (batch, strides, paddings, groups, padding_algorithm, dilations, - data_format) in itertools.product(*configurations): - attrs = [{ - "strides": strides, - "paddings": paddings, - "groups": groups, - "padding_algorithm": padding_algorithm, - "dilations": dilations, - "data_fromat": data_format, - }] - - ops_config = [{ - "op_type": "depthwise_conv2d", - "op_inputs": { - "Input": ["input_data"], - "Filter": ["conv2d_weight"] - }, - "op_outputs": { - "Output": ["output_data"] - }, - "op_attrs": attrs[0] - }] + for ( + batch, + strides, + paddings, + groups, + padding_algorithm, + dilations, + data_format, + ) in itertools.product(*configurations): + attrs = [ + { + "strides": strides, + "paddings": paddings, + "groups": groups, + "padding_algorithm": padding_algorithm, + "dilations": dilations, + "data_fromat": data_format, + } + ] + + ops_config = [ + { + "op_type": "depthwise_conv2d", + "op_inputs": { + "Input": ["input_data"], + "Filter": ["conv2d_weight"], + }, + "op_outputs": {"Output": ["output_data"]}, + "op_attrs": attrs[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={ - "conv2d_weight": - TensorConfig(data_gen=partial(generate_weight1, attrs)) + "conv2d_weight": TensorConfig( + data_gen=partial(generate_weight1, attrs) + ) }, inputs={ - "input_data": - TensorConfig( - data_gen=partial(generate_input1, batch, attrs)) + "input_data": TensorConfig( + data_gen=partial(generate_input1, batch, attrs) + ) }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): groups = attrs[0]['groups'] self.dynamic_shape.min_input_shape = { "input_data": [1, groups, 32, 32], - "output_data": [1, 24, 32, 32] + "output_data": [1, 24, 32, 32], } self.dynamic_shape.max_input_shape = { "input_data": [4, groups, 64, 64], - "output_data": [4, 24, 64, 64] + "output_data": [4, 24, 64, 64], } self.dynamic_shape.opt_input_shape = { "input_data": [1, groups, 64, 64], - "output_data": [1, 24, 64, 64] + "output_data": [1, 24, 64, 64], } def clear_dynamic_shape(): @@ -139,35 +151,44 @@ class TrtConvertDepthwiseConv2dTest(TrtLayerAutoScanTest): self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num(), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half - yield self.create_inference_config(), generate_trt_nodes_num(), (1e-3, - 1e-3) + yield self.create_inference_config(), generate_trt_nodes_num(), ( + 1e-3, + 1e-3, + ) self.trt_param.precision = paddle_infer.PrecisionType.Int8 - yield self.create_inference_config(), generate_trt_nodes_num(), (1e-3, - 1e-3) + yield self.create_inference_config(), generate_trt_nodes_num(), ( + 1e-3, + 1e-3, + ) # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num(), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half - yield self.create_inference_config(), generate_trt_nodes_num(), (1e-3, - 1e-3) + yield self.create_inference_config(), generate_trt_nodes_num(), ( + 1e-3, + 1e-3, + ) self.trt_param.precision = paddle_infer.PrecisionType.Int8 - yield self.create_inference_config(), generate_trt_nodes_num(), (1e-3, - 1e-3) + yield self.create_inference_config(), generate_trt_nodes_num(), ( + 1e-3, + 1e-3, + ) def add_skip_trt_case(self): - def teller1(program_config, predictor_config): - if program_config.ops[0].attrs[ - 'padding_algorithm'] == "SAME" or program_config.ops[ - 0].attrs['padding_algorithm'] == "VALID": + if ( + program_config.ops[0].attrs['padding_algorithm'] == "SAME" + or program_config.ops[0].attrs['padding_algorithm'] == "VALID" + ): return True return False self.add_skip_case( - teller1, SkipReasons.TRT_NOT_IMPLEMENTED, - "When padding_algorithm is 'SAME' or 'VALID', Trt dose not support. In this case, trt build error is caused by scale op." + teller1, + SkipReasons.TRT_NOT_IMPLEMENTED, + "When padding_algorithm is 'SAME' or 'VALID', Trt dose not support. In this case, trt build error is caused by scale op.", ) def teller2(program_config, predictor_config): @@ -176,8 +197,9 @@ class TrtConvertDepthwiseConv2dTest(TrtLayerAutoScanTest): return False self.add_skip_case( - teller2, SkipReasons.TRT_NOT_IMPLEMENTED, - "When precisionType is int8 without relu op, output is different between Trt and Paddle." + teller2, + SkipReasons.TRT_NOT_IMPLEMENTED, + "When precisionType is int8 without relu op, output is different between Trt and Paddle.", ) def test(self): diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_depthwise_conv2d_transpose.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_depthwise_conv2d_transpose.py index 417849ded555f704f28725665dff2fa4bad62539..345d546ae640f95ba121e216e40d08e70342eec1 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_depthwise_conv2d_transpose.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_depthwise_conv2d_transpose.py @@ -22,7 +22,6 @@ import unittest class TrtConvertDepthwiseConv2dTransposeTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: inputs = program_config.inputs weights = program_config.weights @@ -30,8 +29,10 @@ class TrtConvertDepthwiseConv2dTransposeTest(TrtLayerAutoScanTest): program_config.ops[i].attrs for i in range(len(program_config.ops)) ] - if inputs['input_data'].shape[ - 1] != weights['conv2d_weight'].shape[1] * attrs[0]['groups']: + if ( + inputs['input_data'].shape[1] + != weights['conv2d_weight'].shape[1] * attrs[0]['groups'] + ): return False if inputs['input_data'].shape[1] != weights['conv2d_weight'].shape[1]: @@ -53,12 +54,14 @@ class TrtConvertDepthwiseConv2dTransposeTest(TrtLayerAutoScanTest): self.trt_param.workspace_size = 1073741824 def generate_input1(batch, attrs: List[Dict[str, Any]]): - return np.ones([batch, attrs[0]['groups'], 64, - 64]).astype(np.float32) + return np.ones([batch, attrs[0]['groups'], 64, 64]).astype( + np.float32 + ) def generate_weight1(attrs: List[Dict[str, Any]]): - return np.random.random([attrs[0]['groups'], 1, 3, - 3]).astype(np.float32) + return np.random.random([attrs[0]['groups'], 1, 3, 3]).astype( + np.float32 + ) for batch in [1, 2, 4]: for strides in [[1, 1], [2, 2], [1, 2]]: @@ -68,62 +71,71 @@ class TrtConvertDepthwiseConv2dTransposeTest(TrtLayerAutoScanTest): for dilations in [[1, 1], [2, 2], [1, 2]]: for data_format in ['NCHW']: - dics = [{ - "data_fromat": data_format, - "dilations": dilations, - "padding_algorithm": padding_algorithm, - "groups": groups, - "paddings": paddings, - "strides": strides, - "data_format": data_format, - "output_size": [], - "output_padding": [] - }] - - ops_config = [{ - "op_type": "conv2d_transpose", - "op_inputs": { - "Input": ["input_data"], - "Filter": ["conv2d_weight"] - }, - "op_outputs": { - "Output": ["output_data"] - }, - "op_attrs": dics[0] - }] + dics = [ + { + "data_fromat": data_format, + "dilations": dilations, + "padding_algorithm": padding_algorithm, + "groups": groups, + "paddings": paddings, + "strides": strides, + "data_format": data_format, + "output_size": [], + "output_padding": [], + } + ] + + ops_config = [ + { + "op_type": "conv2d_transpose", + "op_inputs": { + "Input": ["input_data"], + "Filter": ["conv2d_weight"], + }, + "op_outputs": { + "Output": ["output_data"] + }, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={ - "conv2d_weight": - TensorConfig(data_gen=partial( - generate_weight1, dics)) + "conv2d_weight": TensorConfig( + data_gen=partial( + generate_weight1, dics + ) + ) }, inputs={ - "input_data": - TensorConfig(data_gen=partial( - generate_input1, batch, dics)) + "input_data": TensorConfig( + data_gen=partial( + generate_input1, batch, dics + ) + ) }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = { "input_data": [1, attrs[0]['groups'], 32, 32], - "output_data": [1, attrs[0]['groups'], 32, 32] + "output_data": [1, attrs[0]['groups'], 32, 32], } self.dynamic_shape.max_input_shape = { "input_data": [4, attrs[0]['groups'], 64, 64], - "output_data": [4, attrs[0]['groups'], 64, 64] + "output_data": [4, attrs[0]['groups'], 64, 64], } self.dynamic_shape.opt_input_shape = { "input_data": [1, attrs[0]['groups'], 64, 64], - "output_data": [1, attrs[0]['groups'], 64, 64] + "output_data": [1, attrs[0]['groups'], 64, 64], } def clear_dynamic_shape(): @@ -142,10 +154,12 @@ class TrtConvertDepthwiseConv2dTransposeTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-3, 1e-3) + attrs, False + ), (1e-3, 1e-3) # self.trt_param.precision = paddle_infer.PrecisionType.Int8 # yield self.create_inference_config(), generate_trt_nodes_num( # attrs, False), (1e-5, 1e-5) @@ -154,24 +168,26 @@ class TrtConvertDepthwiseConv2dTransposeTest(TrtLayerAutoScanTest): generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-3, 1e-3) + attrs, True + ), (1e-3, 1e-3) # self.trt_param.precision = paddle_infer.PrecisionType.Int8 # yield self.create_inference_config(), generate_trt_nodes_num( # attrs, True), (1e-5, 1e-5) def add_skip_trt_case(self): - def teller1(program_config, predictor_config): if self.trt_param.precision == paddle_infer.PrecisionType.Int8: return True return False self.add_skip_case( - teller1, SkipReasons.TRT_NOT_IMPLEMENTED, - "When precisionType is int8 without relu op, output is different between Trt and Paddle." + teller1, + SkipReasons.TRT_NOT_IMPLEMENTED, + "When precisionType is int8 without relu op, output is different between Trt and Paddle.", ) def test(self): diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_dropout.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_dropout.py index 3f843c773cb7150d0e1c4972ab19b91c1343a962..91b8380d7d6127347e2d8c98c48f02ea0a67af67 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_dropout.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_dropout.py @@ -22,12 +22,10 @@ from typing import Any, Dict, List class TrtConvertDropoutTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(dims, batch, attrs: List[Dict[str, Any]]): if dims == 1: return np.ones([64]).astype(np.float32) @@ -42,47 +40,57 @@ class TrtConvertDropoutTest(TrtLayerAutoScanTest): for batch in [1, 2, 4]: for fix_seed in [False, True]: for dropout_implementation in [ - "downgrade_in_infer", "upscale_in_train" + "downgrade_in_infer", + "upscale_in_train", ]: for dropout_prob in [np.random.random()]: for seed in [0, 64, 128, 512]: self.dims = dims - dics = [{ - "fix_seed": fix_seed, - "dropout_implementation": - dropout_implementation, - "dropout_prob": dropout_prob, - "seed": seed, - "is_test": True - }] - - ops_config = [{ - "op_type": "dropout", - "op_inputs": { - "X": ["input_data"], - }, - "op_outputs": { - "Out": ["dropout_output_data"] - }, - "op_attrs": dics[0] - }] + dics = [ + { + "fix_seed": fix_seed, + "dropout_implementation": dropout_implementation, + "dropout_prob": dropout_prob, + "seed": seed, + "is_test": True, + } + ] + + ops_config = [ + { + "op_type": "dropout", + "op_inputs": { + "X": ["input_data"], + }, + "op_outputs": { + "Out": ["dropout_output_data"] + }, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data": - TensorConfig(data_gen=partial( - generate_input1, dims, batch, dics)) + "input_data": TensorConfig( + data_gen=partial( + generate_input1, + dims, + batch, + dics, + ) + ) }, - outputs=["dropout_output_data"]) + outputs=["dropout_output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): if self.dims == 1: self.dynamic_shape.min_input_shape = {"input_data": [1]} @@ -128,19 +136,23 @@ class TrtConvertDropoutTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-3 + attrs, False + ), 1e-3 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-3 + attrs, True + ), 1e-3 def add_skip_trt_case(self): pass diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_elementwise.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_elementwise.py index f9d7f0b9079e315c92e644d8bcb66f363a093fd9..36990554755077630a675656998d8cb5dd2efb84 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_elementwise.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_elementwise.py @@ -24,12 +24,10 @@ from typing import List # This is the special test case with weight including batch dimension # I don't want to mess up the code written by others, so I wrote a class specifically class TrtConvertElementwiseTest_one_input_special_case0(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input(shape): return np.random.random(shape).astype(np.float32) @@ -39,44 +37,50 @@ class TrtConvertElementwiseTest_one_input_special_case0(TrtLayerAutoScanTest): for batch in [1, 4]: for shape in [[batch, 32, 16, 32]]: for op_type in [ - "elementwise_add", "elementwise_mul", "elementwise_sub", - "elementwise_div", "elementwise_pow", "elementwise_min", - "elementwise_max" + "elementwise_add", + "elementwise_mul", + "elementwise_sub", + "elementwise_div", + "elementwise_pow", + "elementwise_min", + "elementwise_max", ]: for axis in [-1]: self.dims = len(shape) dics = [{"axis": axis}] - ops_config = [{ - "op_type": op_type, - "op_inputs": { - "X": ["input_data"], - "Y": ["weight"] - }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": dics[0] - }] + ops_config = [ + { + "op_type": op_type, + "op_inputs": { + "X": ["input_data"], + "Y": ["weight"], + }, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={ - "weight": - TensorConfig(data_gen=partial(generate_weight)) + "weight": TensorConfig( + data_gen=partial(generate_weight) + ) }, inputs={ - "input_data": - TensorConfig( - data_gen=partial(generate_input, shape)), + "input_data": TensorConfig( + data_gen=partial(generate_input, shape) + ), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): # The input.dims[1] must be equal to the weight's length. if self.dims == 4: @@ -106,19 +110,23 @@ class TrtConvertElementwiseTest_one_input_special_case0(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-5, 1e-5) + attrs, False + ), (1e-5, 1e-5) self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-3, 1e-3) + attrs, False + ), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-5, 1e-5) + attrs, True + ), (1e-5, 1e-5) self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-3, 1e-3) + attrs, True + ), (1e-3, 1e-3) def add_skip_trt_case(self): pass @@ -130,12 +138,10 @@ class TrtConvertElementwiseTest_one_input_special_case0(TrtLayerAutoScanTest): # This is the special test case class TrtConvertElementwiseTest_one_input_special_case1(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input(shape): return np.random.random(shape).astype(np.float32) @@ -144,44 +150,47 @@ class TrtConvertElementwiseTest_one_input_special_case1(TrtLayerAutoScanTest): for shape in [[32]]: for op_type in [ - "elementwise_add", "elementwise_mul", "elementwise_sub", - "elementwise_div", "elementwise_pow", "elementwise_min", - "elementwise_max" + "elementwise_add", + "elementwise_mul", + "elementwise_sub", + "elementwise_div", + "elementwise_pow", + "elementwise_min", + "elementwise_max", ]: for axis in [-1]: self.dims = len(shape) dics = [{"axis": axis}] - ops_config = [{ - "op_type": op_type, - "op_inputs": { - "X": ["input_data"], - "Y": ["weight"] - }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": dics[0] - }] + ops_config = [ + { + "op_type": op_type, + "op_inputs": {"X": ["input_data"], "Y": ["weight"]}, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={ - "weight": - TensorConfig(data_gen=partial(generate_weight)) + "weight": TensorConfig( + data_gen=partial(generate_weight) + ) }, inputs={ - "input_data": - TensorConfig( - data_gen=partial(generate_input, shape)), + "input_data": TensorConfig( + data_gen=partial(generate_input, shape) + ), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = {"input_data": [32]} self.dynamic_shape.max_input_shape = {"input_data": [64]} @@ -205,19 +214,23 @@ class TrtConvertElementwiseTest_one_input_special_case1(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-5, 1e-5) + attrs, False + ), (1e-5, 1e-5) self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-3, 1e-3) + attrs, False + ), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-5, 1e-5) + attrs, True + ), (1e-5, 1e-5) self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-3, 1e-3) + attrs, True + ), (1e-3, 1e-3) def add_skip_trt_case(self): pass @@ -228,12 +241,10 @@ class TrtConvertElementwiseTest_one_input_special_case1(TrtLayerAutoScanTest): class TrtConvertElementwiseTest_one_input(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input(shape): return np.random.random(shape).astype(np.float32) @@ -241,47 +252,57 @@ class TrtConvertElementwiseTest_one_input(TrtLayerAutoScanTest): return np.random.randn(32).astype(np.float32) for batch in [1, 4]: - for shape in [[32], [batch, 32], [batch, 32, 32], - [batch, 32, 16, 32]]: + for shape in [ + [32], + [batch, 32], + [batch, 32, 32], + [batch, 32, 16, 32], + ]: for op_type in [ - "elementwise_add", "elementwise_mul", "elementwise_sub", - "elementwise_div", "elementwise_pow", "elementwise_min", - "elementwise_max" + "elementwise_add", + "elementwise_mul", + "elementwise_sub", + "elementwise_div", + "elementwise_pow", + "elementwise_min", + "elementwise_max", ]: for axis in [-1 if len(shape) == 1 else 1]: self.dims = len(shape) dics = [{"axis": axis}] - ops_config = [{ - "op_type": op_type, - "op_inputs": { - "X": ["input_data"], - "Y": ["weight"] - }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": dics[0] - }] + ops_config = [ + { + "op_type": op_type, + "op_inputs": { + "X": ["input_data"], + "Y": ["weight"], + }, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={ - "weight": - TensorConfig(data_gen=partial(generate_weight)) + "weight": TensorConfig( + data_gen=partial(generate_weight) + ) }, inputs={ - "input_data": - TensorConfig( - data_gen=partial(generate_input, shape)), + "input_data": TensorConfig( + data_gen=partial(generate_input, shape) + ), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): # The input.dims[1] must be equal to the weight's length. if self.dims == 1: @@ -325,19 +346,23 @@ class TrtConvertElementwiseTest_one_input(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-5, 1e-5) + attrs, False + ), (1e-5, 1e-5) self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-3, 1e-3) + attrs, False + ), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-5, 1e-5) + attrs, True + ), (1e-5, 1e-5) self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-3, 1e-3) + attrs, True + ), (1e-3, 1e-3) def add_skip_trt_case(self): pass @@ -348,108 +373,112 @@ class TrtConvertElementwiseTest_one_input(TrtLayerAutoScanTest): class TrtConvertElementwiseTest_two_input_without_broadcast( - TrtLayerAutoScanTest): - + TrtLayerAutoScanTest +): def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input(shape): return np.random.random(shape).astype(np.float32) for shape in [[4], [4, 32], [2, 64, 32], [1, 8, 16, 32]]: for op_type in [ - "elementwise_add", "elementwise_mul", "elementwise_sub", - "elementwise_div", "elementwise_pow", "elementwise_min", - "elementwise_max" + "elementwise_add", + "elementwise_mul", + "elementwise_sub", + "elementwise_div", + "elementwise_pow", + "elementwise_min", + "elementwise_max", ]: for axis in [0, -1]: self.dims = len(shape) dics = [{"axis": axis}] - ops_config = [{ - "op_type": op_type, - "op_inputs": { - "X": ["input_data1"], - "Y": ["input_data2"] - }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": dics[0] - }] + ops_config = [ + { + "op_type": op_type, + "op_inputs": { + "X": ["input_data1"], + "Y": ["input_data2"], + }, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data1": - TensorConfig( - data_gen=partial(generate_input, shape)), - "input_data2": - TensorConfig( - data_gen=partial(generate_input, shape)) + "input_data1": TensorConfig( + data_gen=partial(generate_input, shape) + ), + "input_data2": TensorConfig( + data_gen=partial(generate_input, shape) + ), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): if self.dims == 1: self.dynamic_shape.min_input_shape = { "input_data1": [1], - "input_data2": [1] + "input_data2": [1], } self.dynamic_shape.max_input_shape = { "input_data1": [128], - "input_data2": [128] + "input_data2": [128], } self.dynamic_shape.opt_input_shape = { "input_data1": [32], - "input_data2": [32] + "input_data2": [32], } elif self.dims == 2: self.dynamic_shape.min_input_shape = { "input_data1": [1, 4], - "input_data2": [1, 4] + "input_data2": [1, 4], } self.dynamic_shape.max_input_shape = { "input_data1": [128, 256], - "input_data2": [128, 256] + "input_data2": [128, 256], } self.dynamic_shape.opt_input_shape = { "input_data1": [32, 64], - "input_data2": [32, 64] + "input_data2": [32, 64], } elif self.dims == 3: self.dynamic_shape.min_input_shape = { "input_data1": [1, 4, 4], - "input_data2": [1, 4, 4] + "input_data2": [1, 4, 4], } self.dynamic_shape.max_input_shape = { "input_data1": [128, 128, 256], - "input_data2": [128, 128, 256] + "input_data2": [128, 128, 256], } self.dynamic_shape.opt_input_shape = { "input_data1": [2, 64, 64], - "input_data2": [2, 64, 64] + "input_data2": [2, 64, 64], } elif self.dims == 4: self.dynamic_shape.min_input_shape = { "input_data1": [1, 4, 4, 4], - "input_data2": [1, 4, 4, 4] + "input_data2": [1, 4, 4, 4], } self.dynamic_shape.max_input_shape = { "input_data1": [8, 128, 64, 128], - "input_data2": [8, 128, 64, 128] + "input_data2": [8, 128, 64, 128], } self.dynamic_shape.opt_input_shape = { "input_data1": [2, 64, 32, 32], - "input_data2": [2, 64, 32, 32] + "input_data2": [2, 64, 32, 32], } def clear_dynamic_shape(): @@ -470,10 +499,12 @@ class TrtConvertElementwiseTest_two_input_without_broadcast( clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-5, 1e-5) + attrs, False + ), (1e-5, 1e-5) self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-3, 1e-3) + attrs, False + ), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) @@ -491,7 +522,6 @@ class TrtConvertElementwiseTest_two_input_without_broadcast( class TrtConvertElementwiseTest_two_input_with_broadcast(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: inputs = program_config.inputs if len(inputs['input_data1'].shape) != len(inputs['input_data2'].shape): @@ -500,7 +530,6 @@ class TrtConvertElementwiseTest_two_input_with_broadcast(TrtLayerAutoScanTest): return True def sample_program_configs(self): - def generate_input(shape): return np.random.random(shape).astype(np.float32) @@ -512,8 +541,12 @@ class TrtConvertElementwiseTest_two_input_with_broadcast(TrtLayerAutoScanTest): input2_shape5_list = [[32], [2, 1, 32], [4, 1, 1, 32]] input2_shape6_list = [[1, 32], [1, 32], [1, 1, 1, 32]] input2_shape_list = [ - input2_shape1_list, input2_shape2_list, input2_shape3_list, - input2_shape4_list, input2_shape5_list, input2_shape6_list + input2_shape1_list, + input2_shape2_list, + input2_shape3_list, + input2_shape4_list, + input2_shape5_list, + input2_shape6_list, ] axis1_list = [[-1], [1, -1], [1, -1]] axis2_list = [[-1], [0], [0]] @@ -522,8 +555,12 @@ class TrtConvertElementwiseTest_two_input_with_broadcast(TrtLayerAutoScanTest): axis5_list = [[-1, 1], [-1, 0], [-1, 0]] axis6_list = [[-1, 0], [-1, 1], [-1, 0]] axis_list = [ - axis1_list, axis2_list, axis3_list, axis4_list, axis5_list, - axis6_list + axis1_list, + axis2_list, + axis3_list, + axis4_list, + axis5_list, + axis6_list, ] for i in range(3): @@ -531,66 +568,75 @@ class TrtConvertElementwiseTest_two_input_with_broadcast(TrtLayerAutoScanTest): for j in range(6): input2_shape = input2_shape_list[j][i] for op_type in [ - "elementwise_add", - "elementwise_mul", - "elementwise_sub", - "elementwise_div", - "elementwise_pow", - "elementwise_min", - "elementwise_max", + "elementwise_add", + "elementwise_mul", + "elementwise_sub", + "elementwise_div", + "elementwise_pow", + "elementwise_min", + "elementwise_max", ]: for axis in axis_list[j][i]: self.shape1 = input1_shape self.shape2 = input2_shape dics = [{"axis": axis}] - ops_config = [{ - "op_type": op_type, - "op_inputs": { - "X": ["input_data1"], - "Y": ["input_data2"] - }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": dics[0] - }] + ops_config = [ + { + "op_type": op_type, + "op_inputs": { + "X": ["input_data1"], + "Y": ["input_data2"], + }, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data1": - TensorConfig(data_gen=partial( - generate_input, input1_shape)), - "input_data2": - TensorConfig(data_gen=partial( - generate_input, input2_shape)) + "input_data1": TensorConfig( + data_gen=partial( + generate_input, input1_shape + ) + ), + "input_data2": TensorConfig( + data_gen=partial( + generate_input, input2_shape + ) + ), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): - max_shape = [[128], [128, 128], [128, 128, 128], - [128, 128, 128, 128]] + max_shape = [ + [128], + [128, 128], + [128, 128, 128], + [128, 128, 128, 128], + ] min_shape = [[1], [1, 1], [1, 1, 1], [1, 1, 1, 1]] opt_shape = [[32], [32, 32], [32, 32, 32], [32, 32, 32, 32]] self.dynamic_shape.min_input_shape = { "input_data1": min_shape[len(self.shape1) - 1], - "input_data2": min_shape[len(self.shape2) - 1] + "input_data2": min_shape[len(self.shape2) - 1], } self.dynamic_shape.max_input_shape = { "input_data1": max_shape[len(self.shape1) - 1], - "input_data2": max_shape[len(self.shape2) - 1] + "input_data2": max_shape[len(self.shape2) - 1], } self.dynamic_shape.opt_input_shape = { "input_data1": opt_shape[len(self.shape1) - 1], - "input_data2": opt_shape[len(self.shape2) - 1] + "input_data2": opt_shape[len(self.shape2) - 1], } def clear_dynamic_shape(): @@ -626,12 +672,10 @@ class TrtConvertElementwiseTest_two_input_with_broadcast(TrtLayerAutoScanTest): class TrtConvertElementwiseTest_one_input_corner_case(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input(shape): return np.random.random(shape).astype(np.float32) @@ -640,52 +684,58 @@ class TrtConvertElementwiseTest_one_input_corner_case(TrtLayerAutoScanTest): return np.random.rand(32).astype(np.float32) for batch in [1, 2, 4]: - for shape in [[32], [batch, 32], [batch, 32, 32], - [batch, 32, 16, 32]]: + for shape in [ + [32], + [batch, 32], + [batch, 32, 32], + [batch, 32, 16, 32], + ]: for op_type in [ - "elementwise_add", - "elementwise_mul", - "elementwise_sub", - "elementwise_div", - "elementwise_pow", - "elementwise_min", - "elementwise_max", + "elementwise_add", + "elementwise_mul", + "elementwise_sub", + "elementwise_div", + "elementwise_pow", + "elementwise_min", + "elementwise_max", ]: self.op_type = op_type for axis in [-1 if len(shape) == 1 else 1]: self.dims = len(shape) dics = [{"axis": axis}] - ops_config = [{ - "op_type": op_type, - "op_inputs": { - "X": ["weight"], - "Y": ["input_data"] - }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": dics[0] - }] + ops_config = [ + { + "op_type": op_type, + "op_inputs": { + "X": ["weight"], + "Y": ["input_data"], + }, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={ - "weight": - TensorConfig(data_gen=partial(generate_weight)) + "weight": TensorConfig( + data_gen=partial(generate_weight) + ) }, inputs={ - "input_data": - TensorConfig( - data_gen=partial(generate_input, shape)), + "input_data": TensorConfig( + data_gen=partial(generate_input, shape) + ), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): # The input.dims[1] must be equal to the weight's length. if self.dims == 1: diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_emb_eltwise_layernorm.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_emb_eltwise_layernorm.py index 12b80e09a55154f7f4b3ab2fc637b737435ef7a1..c47992b52afe8580ae5c5c92731675df41ee863e 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_emb_eltwise_layernorm.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_emb_eltwise_layernorm.py @@ -22,15 +22,14 @@ import unittest class TrtConvertEmbEltwiseLayernormTest1(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input(batch, input_size): - return np.random.randint(0, 7, size=(batch, input_size, - 1)).astype(np.int64) + return np.random.randint(0, 7, size=(batch, input_size, 1)).astype( + np.int64 + ) def generate_weight1(size11, size2): return np.random.randn(size11, size2).astype(np.float32) @@ -56,180 +55,228 @@ class TrtConvertEmbEltwiseLayernormTest1(TrtLayerAutoScanTest): for axis1 in [0, -1]: for axis2 in [0, -1]: for type in [ - "lookup_table", - "lookup_table_v2" + "lookup_table", + "lookup_table_v2", ]: - dics = [{ - "is_sparse": False, - "is_distributed": False, - "padding_idx": -1, - "is_test": True - }, { - "is_sparse": False, - "is_distributed": False, - "padding_idx": -1, - }, { - "axis": axis1 - }, { - "axis": axis2 - }, { - "begin_norm_axis": norm_axis, - "epsilon": epsilon - }] - ops_config = [{ - "op_type": - type, - "op_inputs": { - "Ids": ["input_data1"], - "W": ["embedding1_weight"] - }, - "op_outputs": { - "Out": - ["embedding1_output"] + dics = [ + { + "is_sparse": False, + "is_distributed": False, + "padding_idx": -1, + "is_test": True, }, - "op_attrs": - dics[0] if type - == "lookup_table" else dics[1] - }, { - "op_type": - type, - "op_inputs": { - "Ids": ["input_data2"], - "W": ["embedding2_weight"] + { + "is_sparse": False, + "is_distributed": False, + "padding_idx": -1, }, - "op_outputs": { - "Out": - ["embedding2_output"] + {"axis": axis1}, + {"axis": axis2}, + { + "begin_norm_axis": norm_axis, + "epsilon": epsilon, }, - "op_attrs": - dics[0] if type - == "lookup_table" else dics[1] - }, { - "op_type": - type, - "op_inputs": { - "Ids": ["input_data3"], - "W": ["embedding3_weight"] + ] + ops_config = [ + { + "op_type": type, + "op_inputs": { + "Ids": ["input_data1"], + "W": [ + "embedding1_weight" + ], + }, + "op_outputs": { + "Out": [ + "embedding1_output" + ] + }, + "op_attrs": dics[0] + if type == "lookup_table" + else dics[1], }, - "op_outputs": { - "Out": - ["embedding3_output"] + { + "op_type": type, + "op_inputs": { + "Ids": ["input_data2"], + "W": [ + "embedding2_weight" + ], + }, + "op_outputs": { + "Out": [ + "embedding2_output" + ] + }, + "op_attrs": dics[0] + if type == "lookup_table" + else dics[1], }, - "op_attrs": - dics[0] if type - == "lookup_table" else dics[1] - }, { - "op_type": "elementwise_add", - "op_inputs": { - "X": ["embedding2_output"], - "Y": ["embedding3_output"] + { + "op_type": type, + "op_inputs": { + "Ids": ["input_data3"], + "W": [ + "embedding3_weight" + ], + }, + "op_outputs": { + "Out": [ + "embedding3_output" + ] + }, + "op_attrs": dics[0] + if type == "lookup_table" + else dics[1], }, - "op_outputs": { - "Out": - ["elementwise_add1_output"] + { + "op_type": "elementwise_add", + "op_inputs": { + "X": [ + "embedding2_output" + ], + "Y": [ + "embedding3_output" + ], + }, + "op_outputs": { + "Out": [ + "elementwise_add1_output" + ] + }, + "op_attrs": dics[2], }, - "op_attrs": dics[2] - }, { - "op_type": "elementwise_add", - "op_inputs": { - "X": - ["elementwise_add1_output"], - "Y": ["embedding1_output"] + { + "op_type": "elementwise_add", + "op_inputs": { + "X": [ + "elementwise_add1_output" + ], + "Y": [ + "embedding1_output" + ], + }, + "op_outputs": { + "Out": [ + "elementwise_add2_output" + ] + }, + "op_attrs": dics[3], }, - "op_outputs": { - "Out": - ["elementwise_add2_output"] + { + "op_type": "layer_norm", + "op_inputs": { + "X": [ + "elementwise_add2_output" + ], + "Bias": [ + "layer_norm_bias" + ], + "Scale": [ + "layer_norm_scale" + ], + }, + "op_outputs": { + "Y": [ + "layer_norm_output1" + ], + "Mean": [ + "layer_norm_output2" + ], + "Variance": [ + "layer_norm_output3" + ], + }, + "op_attrs": dics[4], }, - "op_attrs": dics[3] - }, { - "op_type": "layer_norm", - "op_inputs": { - "X": - ["elementwise_add2_output"], - "Bias": ["layer_norm_bias"], - "Scale": - ["layer_norm_scale"] - }, - "op_outputs": { - "Y": ["layer_norm_output1"], - "Mean": - ["layer_norm_output2"], - "Variance": - ["layer_norm_output3"] - }, - "op_attrs": dics[4] - }] + ] ops = self.generate_op_config( - ops_config) + ops_config + ) program_config = ProgramConfig( ops=ops, weights={ - "embedding1_weight": - TensorConfig( + "embedding1_weight": TensorConfig( data_gen=partial( generate_weight1, - size11, size2)), - "embedding2_weight": - TensorConfig( + size11, + size2, + ) + ), + "embedding2_weight": TensorConfig( data_gen=partial( generate_weight2, - size12, size2)), - "embedding3_weight": - TensorConfig( + size12, + size2, + ) + ), + "embedding3_weight": TensorConfig( data_gen=partial( generate_weight3, - size13, size2)), - "layer_norm_bias": - TensorConfig( + size13, + size2, + ) + ), + "layer_norm_bias": TensorConfig( data_gen=partial( generate_weight4, - size2)), - "layer_norm_scale": - TensorConfig( + size2, + ) + ), + "layer_norm_scale": TensorConfig( data_gen=partial( generate_weight4, - size2)) + size2, + ) + ), }, inputs={ - "input_data1": - TensorConfig( + "input_data1": TensorConfig( data_gen=partial( generate_input, - batch, input_size)), - "input_data2": - TensorConfig( + batch, + input_size, + ) + ), + "input_data2": TensorConfig( data_gen=partial( generate_input, - batch, input_size)), - "input_data3": - TensorConfig( + batch, + input_size, + ) + ), + "input_data3": TensorConfig( data_gen=partial( generate_input, - batch, input_size)) + batch, + input_size, + ) + ), }, - outputs=["layer_norm_output1"]) + outputs=["layer_norm_output1"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = { "input_data1": [1, 4, 1], "input_data2": [1, 4, 1], - "input_data3": [1, 4, 1] + "input_data3": [1, 4, 1], } self.dynamic_shape.max_input_shape = { "input_data1": [4, 512, 1], "input_data2": [4, 512, 1], - "input_data3": [4, 512, 1] + "input_data3": [4, 512, 1], } self.dynamic_shape.opt_input_shape = { "input_data1": [2, 128, 1], "input_data2": [2, 128, 1], - "input_data3": [2, 128, 1] + "input_data3": [2, 128, 1], } def clear_dynamic_shape(): diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_equal.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_equal.py index 4ca0da9532bc49dcdd3dee9d3a0b56b62641d624..ee73586e6e96a4a228af351b3867f789bee34770 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_equal.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_equal.py @@ -22,7 +22,6 @@ from typing import List class TrtConvertElementwiseTest_one_input_corner_case(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: attrs = [ program_config.ops[i].attrs for i in range(len(program_config.ops)) @@ -35,7 +34,6 @@ class TrtConvertElementwiseTest_one_input_corner_case(TrtLayerAutoScanTest): return True def sample_program_configs(self): - def generate_input(shape): return np.random.random(shape).astype(np.float32) @@ -44,86 +42,84 @@ class TrtConvertElementwiseTest_one_input_corner_case(TrtLayerAutoScanTest): for axis in [-1 if len(shape) == 1 else 1]: self.dims = len(shape) dics = [{"axis": axis}, {"in_dtype": 0, "out_dtype": 5}] - ops_config = [{ - "op_type": "equal", - "op_inputs": { - "X": ["input_data1"], - "Y": ["input_data2"] - }, - "op_outputs": { - "Out": ["compare_output_data"] + ops_config = [ + { + "op_type": "equal", + "op_inputs": { + "X": ["input_data1"], + "Y": ["input_data2"], + }, + "op_outputs": {"Out": ["compare_output_data"]}, + "op_attrs": dics[0], }, - "op_attrs": dics[0] - }, { - "op_type": "cast", - "op_inputs": { - "X": ["compare_output_data"] + { + "op_type": "cast", + "op_inputs": {"X": ["compare_output_data"]}, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": dics[1], }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": dics[1] - }] + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data1": - TensorConfig( - data_gen=partial(generate_input, shape)), - "input_data2": - TensorConfig( - data_gen=partial(generate_input, shape)) + "input_data1": TensorConfig( + data_gen=partial(generate_input, shape) + ), + "input_data2": TensorConfig( + data_gen=partial(generate_input, shape) + ), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): # The input.dims[1] must be equal to the weight's length. if self.dims == 2: self.dynamic_shape.min_input_shape = { "input_data1": [1, 1], - "input_data2": [1, 1] + "input_data2": [1, 1], } self.dynamic_shape.max_input_shape = { "input_data1": [4, 1], - "input_data2": [4, 1] + "input_data2": [4, 1], } self.dynamic_shape.opt_input_shape = { "input_data1": [2, 1], - "input_data2": [2, 1] + "input_data2": [2, 1], } elif self.dims == 3: self.dynamic_shape.min_input_shape = { "input_data1": [1, 1, 4], - "input_data2": [1, 1, 4] + "input_data2": [1, 1, 4], } self.dynamic_shape.max_input_shape = { "input_data1": [4, 1, 256], - "input_data2": [1, 1, 256] + "input_data2": [1, 1, 256], } self.dynamic_shape.opt_input_shape = { "input_data1": [2, 1, 16], - "input_data2": [2, 1, 16] + "input_data2": [2, 1, 16], } elif self.dims == 4: self.dynamic_shape.min_input_shape = { "input_data1": [1, 1, 4, 4], - "input_data2": [1, 1, 4, 4] + "input_data2": [1, 1, 4, 4], } self.dynamic_shape.max_input_shape = { "input_data1": [4, 1, 128, 256], - "input_data2": [4, 1, 128, 256] + "input_data2": [4, 1, 128, 256], } self.dynamic_shape.opt_input_shape = { "input_data1": [2, 1, 32, 16], - "input_data2": [2, 1, 32, 16] + "input_data2": [2, 1, 32, 16], } def clear_dynamic_shape(): @@ -144,19 +140,23 @@ class TrtConvertElementwiseTest_one_input_corner_case(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-3 + attrs, False + ), 1e-3 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-3 + attrs, True + ), 1e-3 def test(self): self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_expand_v2.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_expand_v2.py index 781264531d625303a65d0c38af2cb8d5adf78ac8..6eb022810359079abb7a193be8b887eddd6d3095 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_expand_v2.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_expand_v2.py @@ -22,7 +22,6 @@ import unittest class TrtConvertExpandV2Test(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: attrs = [ program_config.ops[i].attrs for i in range(len(program_config.ops)) @@ -35,7 +34,6 @@ class TrtConvertExpandV2Test(TrtLayerAutoScanTest): return True def sample_program_configs(self): - def generate_input1(attrs: List[Dict[str, Any]]): if self.dims == 4: self.input_shape = [1, 1, 4, 6] @@ -69,29 +67,31 @@ class TrtConvertExpandV2Test(TrtLayerAutoScanTest): self.dims = dims dics_intput = [{"X": ["expand_v2_input"]}] - ops_config = [{ - "op_type": "expand_v2", - "op_inputs": dics_intput[0], - "op_outputs": { - "Out": ["expand_v2_out"] - }, - "op_attrs": dics[0] - }] + ops_config = [ + { + "op_type": "expand_v2", + "op_inputs": dics_intput[0], + "op_outputs": {"Out": ["expand_v2_out"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "expand_v2_input": - TensorConfig(data_gen=partial(generate_input1, dics)) + "expand_v2_input": TensorConfig( + data_gen=partial(generate_input1, dics) + ) }, - outputs=["expand_v2_out"]) + outputs=["expand_v2_out"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): if self.dims == 4: self.dynamic_shape.min_input_shape = { @@ -146,19 +146,23 @@ class TrtConvertExpandV2Test(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-3 + attrs, False + ), 1e-3 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-3 + attrs, True + ), 1e-3 def add_skip_trt_case(self): pass @@ -169,7 +173,6 @@ class TrtConvertExpandV2Test(TrtLayerAutoScanTest): class TrtConvertExpandV2Test2(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: attrs = [ program_config.ops[i].attrs for i in range(len(program_config.ops)) @@ -177,7 +180,6 @@ class TrtConvertExpandV2Test2(TrtLayerAutoScanTest): return True def sample_program_configs(self): - def generate_input1(attrs: List[Dict[str, Any]]): if self.dims == 1: self.input_shape = [1] @@ -192,18 +194,13 @@ class TrtConvertExpandV2Test2(TrtLayerAutoScanTest): ] self.dims = dims dics_intput = [ - { - "X": ["expand_v2_input"], - "Shape": ["shapeT1_data"] - }, + {"X": ["expand_v2_input"], "Shape": ["shapeT1_data"]}, ] ops_config = [ { "op_type": "fill_constant", "op_inputs": {}, - "op_outputs": { - "Out": ["shapeT1_data"] - }, + "op_outputs": {"Out": ["shapeT1_data"]}, "op_attrs": { "dtype": 2, "str_value": "10", @@ -213,10 +210,8 @@ class TrtConvertExpandV2Test2(TrtLayerAutoScanTest): { "op_type": "expand_v2", "op_inputs": dics_intput[0], - "op_outputs": { - "Out": ["expand_v2_out"] - }, - "op_attrs": dics[0] + "op_outputs": {"Out": ["expand_v2_out"]}, + "op_attrs": dics[0], }, ] ops = self.generate_op_config(ops_config) @@ -224,16 +219,18 @@ class TrtConvertExpandV2Test2(TrtLayerAutoScanTest): ops=ops, weights={}, inputs={ - "expand_v2_input": - TensorConfig(data_gen=partial(generate_input1, dics)) + "expand_v2_input": TensorConfig( + data_gen=partial(generate_input1, dics) + ) }, - outputs=["expand_v2_out"]) + outputs=["expand_v2_out"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(): if self.dims == 1: self.dynamic_shape.min_input_shape = {"expand_v2_input": [1]} @@ -262,7 +259,6 @@ class TrtConvertExpandV2Test2(TrtLayerAutoScanTest): class TrtConvertExpandV2Test3(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: attrs = [ program_config.ops[i].attrs for i in range(len(program_config.ops)) @@ -270,7 +266,6 @@ class TrtConvertExpandV2Test3(TrtLayerAutoScanTest): return True def sample_program_configs(self): - def generate_input1(attrs: List[Dict[str, Any]]): if self.dims == 4: self.input_shape = [1, 1, 4, 6] @@ -291,18 +286,18 @@ class TrtConvertExpandV2Test3(TrtLayerAutoScanTest): { "X": ["expand_v2_input"], "expand_shapes_tensor": [ - "shapeT1_data", "shapeT2_data", "shapeT3_data", - "shapeT4_data" - ] + "shapeT1_data", + "shapeT2_data", + "shapeT3_data", + "shapeT4_data", + ], }, ] ops_config = [ { "op_type": "fill_constant", "op_inputs": {}, - "op_outputs": { - "Out": ["shapeT1_data"] - }, + "op_outputs": {"Out": ["shapeT1_data"]}, "op_attrs": { "dtype": 2, "str_value": "10", @@ -312,9 +307,7 @@ class TrtConvertExpandV2Test3(TrtLayerAutoScanTest): { "op_type": "fill_constant", "op_inputs": {}, - "op_outputs": { - "Out": ["shapeT2_data"] - }, + "op_outputs": {"Out": ["shapeT2_data"]}, "op_attrs": { "dtype": 2, "str_value": "12", @@ -324,9 +317,7 @@ class TrtConvertExpandV2Test3(TrtLayerAutoScanTest): { "op_type": "fill_constant", "op_inputs": {}, - "op_outputs": { - "Out": ["shapeT3_data"] - }, + "op_outputs": {"Out": ["shapeT3_data"]}, "op_attrs": { "dtype": 2, "str_value": "4", @@ -336,9 +327,7 @@ class TrtConvertExpandV2Test3(TrtLayerAutoScanTest): { "op_type": "fill_constant", "op_inputs": {}, - "op_outputs": { - "Out": ["shapeT4_data"] - }, + "op_outputs": {"Out": ["shapeT4_data"]}, "op_attrs": { "dtype": 2, "str_value": "6", @@ -348,10 +337,8 @@ class TrtConvertExpandV2Test3(TrtLayerAutoScanTest): { "op_type": "expand_v2", "op_inputs": dics_intput[0], - "op_outputs": { - "Out": ["expand_v2_out"] - }, - "op_attrs": dics[0] + "op_outputs": {"Out": ["expand_v2_out"]}, + "op_attrs": dics[0], }, ] ops = self.generate_op_config(ops_config) @@ -359,16 +346,18 @@ class TrtConvertExpandV2Test3(TrtLayerAutoScanTest): ops=ops, weights={}, inputs={ - "expand_v2_input": - TensorConfig(data_gen=partial(generate_input1, dics)) + "expand_v2_input": TensorConfig( + data_gen=partial(generate_input1, dics) + ) }, - outputs=["expand_v2_out"]) + outputs=["expand_v2_out"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(): if self.dims == 4: self.dynamic_shape.min_input_shape = { diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_fc.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_fc.py index 97045ee8a544ec981a823029e31dec85951107ee..04763c985887c14e1c14c1b75c4c6b6ff93b575a 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_fc.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_fc.py @@ -23,10 +23,9 @@ import os class TrtConvertFcTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: # The output has diff between gpu and trt in CI windows - if (os.name == 'nt'): + if os.name == 'nt': return False return True @@ -34,12 +33,14 @@ class TrtConvertFcTest(TrtLayerAutoScanTest): self.trt_param.workspace_size = 1073741824 def generate_input1(batch, attrs: List[Dict[str, Any]]): - return np.random.random([batch, 3, 64, (int)(attrs[0]["m"] / 2), - 2]).astype(np.float32) + return np.random.random( + [batch, 3, 64, (int)(attrs[0]["m"] / 2), 2] + ).astype(np.float32) def generate_w(batch, attrs: List[Dict[str, Any]]): - return np.random.random([attrs[0]["m"], - attrs[0]["n"]]).astype(np.float32) + return np.random.random([attrs[0]["m"], attrs[0]["n"]]).astype( + np.float32 + ) def generate_bias(batch, attrs: List[Dict[str, Any]]): return np.random.random([attrs[0]["n"]]).astype(np.float32) @@ -53,7 +54,7 @@ class TrtConvertFcTest(TrtLayerAutoScanTest): "m": m, "n": n, }, - {} + {}, ] ops_config = [ @@ -62,12 +63,10 @@ class TrtConvertFcTest(TrtLayerAutoScanTest): "op_inputs": { "Input": ["input_data"], "W": ["w_data"], - "Bias": ["bias_data"] - }, - "op_outputs": { - "Out": ["output_data"] + "Bias": ["bias_data"], }, - "op_attrs": dics[0] + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": dics[0], }, ] @@ -76,24 +75,26 @@ class TrtConvertFcTest(TrtLayerAutoScanTest): program_config = ProgramConfig( ops=ops, weights={ - "w_data": - TensorConfig(data_gen=partial(generate_w, batch, dics)), - "bias_data": - TensorConfig( - data_gen=partial(generate_bias, batch, dics)) + "w_data": TensorConfig( + data_gen=partial(generate_w, batch, dics) + ), + "bias_data": TensorConfig( + data_gen=partial(generate_bias, batch, dics) + ), }, inputs={ - "input_data": - TensorConfig( - data_gen=partial(generate_input1, batch, dics)), + "input_data": TensorConfig( + data_gen=partial(generate_input1, batch, dics) + ), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = { "input_data": [1, 3, 32, 16, 2], @@ -121,19 +122,23 @@ class TrtConvertFcTest(TrtLayerAutoScanTest): # clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-3, 1e-3) + attrs, False + ), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-3, 1e-3) + attrs, True + ), (1e-3, 1e-3) def test(self): self.run_test() @@ -143,10 +148,9 @@ class TrtConvertFcTest(TrtLayerAutoScanTest): class TrtConvertFcTest2(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: # The output has diff between gpu and trt in CI windows - if (os.name == 'nt'): + if os.name == 'nt': return False return True @@ -157,8 +161,9 @@ class TrtConvertFcTest2(TrtLayerAutoScanTest): return np.random.random([batch, 3, 64, 14]).astype(np.float32) def generate_w(batch, attrs: List[Dict[str, Any]]): - return np.random.random([attrs[0]["m"], - attrs[0]["n"]]).astype(np.float32) + return np.random.random([attrs[0]["m"], attrs[0]["n"]]).astype( + np.float32 + ) def generate_bias(batch, attrs: List[Dict[str, Any]]): return np.random.random([attrs[0]["n"]]).astype(np.float32) @@ -172,7 +177,7 @@ class TrtConvertFcTest2(TrtLayerAutoScanTest): "m": m, "n": n, }, - {} + {}, ] ops_config = [ @@ -181,12 +186,10 @@ class TrtConvertFcTest2(TrtLayerAutoScanTest): "op_inputs": { "Input": ["input_data"], "W": ["w_data"], - "Bias": ["bias_data"] + "Bias": ["bias_data"], }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": dics[0] + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": dics[0], }, ] @@ -195,24 +198,26 @@ class TrtConvertFcTest2(TrtLayerAutoScanTest): program_config = ProgramConfig( ops=ops, weights={ - "w_data": - TensorConfig(data_gen=partial(generate_w, batch, dics)), - "bias_data": - TensorConfig( - data_gen=partial(generate_bias, batch, dics)) + "w_data": TensorConfig( + data_gen=partial(generate_w, batch, dics) + ), + "bias_data": TensorConfig( + data_gen=partial(generate_bias, batch, dics) + ), }, inputs={ - "input_data": - TensorConfig( - data_gen=partial(generate_input1, batch, dics)), + "input_data": TensorConfig( + data_gen=partial(generate_input1, batch, dics) + ), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(): self.dynamic_shape.min_input_shape = { "input_data": [1, 3, 32, 14], @@ -277,7 +282,7 @@ class TrtConvertFcTest3(TrtLayerAutoScanTest): "m": m, "n": n, }, - {} + {}, ] ops_config = [ @@ -286,12 +291,10 @@ class TrtConvertFcTest3(TrtLayerAutoScanTest): "op_inputs": { "Input": ["input_data"], "W": ["w_data"], - "Bias": ["bias_data"] + "Bias": ["bias_data"], }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": dics[0] + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": dics[0], }, ] @@ -300,24 +303,26 @@ class TrtConvertFcTest3(TrtLayerAutoScanTest): program_config = ProgramConfig( ops=ops, weights={ - "w_data": - TensorConfig(data_gen=partial(generate_w, batch, dics)), - "bias_data": - TensorConfig( - data_gen=partial(generate_bias, batch, dics)) + "w_data": TensorConfig( + data_gen=partial(generate_w, batch, dics) + ), + "bias_data": TensorConfig( + data_gen=partial(generate_bias, batch, dics) + ), }, inputs={ - "input_data": - TensorConfig( - data_gen=partial(generate_input1, batch, dics)), + "input_data": TensorConfig( + data_gen=partial(generate_input1, batch, dics) + ), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(): self.dynamic_shape.min_input_shape = { "input_data": [1, 14, 1, 2], diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_fill_constant.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_fill_constant.py index 5e7132d74c40b7ee947bee844e4cc1c18daeb53d..6e22f5db13c18ec9723588c32f833676f20a0b8f 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_fill_constant.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_fill_constant.py @@ -22,12 +22,10 @@ from typing import Any, Dict, List class TrtConvertSplitTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_value_data(attrs: List[Dict[str, Any]]): return np.array([1]).astype(np.int32) @@ -47,21 +45,28 @@ class TrtConvertSplitTest(TrtLayerAutoScanTest): str_value = str_value else: str_value = "" - dics = [{ - "str_value": str_value, - "value": value, - "shape": shape, - "dtype": dtype - }, { - "axis": -1 - }] - dics_intput = [{ - "ValueTensor": ["value_data"] - }, { - "ShapeTensor": ["shape_data"], - }, { - "ShapeTensorList": ["shapeT1_data", "shapeT2_data"], - }, {}] + dics = [ + { + "str_value": str_value, + "value": value, + "shape": shape, + "dtype": dtype, + }, + {"axis": -1}, + ] + dics_intput = [ + {"ValueTensor": ["value_data"]}, + { + "ShapeTensor": ["shape_data"], + }, + { + "ShapeTensorList": [ + "shapeT1_data", + "shapeT2_data", + ], + }, + {}, + ] ops_config = [ { "op_type": "fill_constant", @@ -69,7 +74,7 @@ class TrtConvertSplitTest(TrtLayerAutoScanTest): "op_outputs": { "Out": ["out_data"], }, - "op_attrs": dics[0] + "op_attrs": dics[0], }, ] @@ -81,26 +86,31 @@ class TrtConvertSplitTest(TrtLayerAutoScanTest): ops=ops, weights={}, inputs={ - "value_data": - TensorConfig(data_gen=partial( - generate_value_data, dics)), - "shape_data": - TensorConfig(data_gen=partial( - generate_shape_data, dics)), - "shapeT1_data": - TensorConfig(data_gen=partial( - generate_shapelist_data, dics)), - "shapeT2_data": - TensorConfig(data_gen=partial( - generate_shapelist_data, dics)), + "value_data": TensorConfig( + data_gen=partial(generate_value_data, dics) + ), + "shape_data": TensorConfig( + data_gen=partial(generate_shape_data, dics) + ), + "shapeT1_data": TensorConfig( + data_gen=partial( + generate_shapelist_data, dics + ) + ), + "shapeT2_data": TensorConfig( + data_gen=partial( + generate_shapelist_data, dics + ) + ), }, - outputs=["out_data"]) + outputs=["out_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.input_shape = [1, 1] max_shape = list(self.input_shape) @@ -118,7 +128,7 @@ class TrtConvertSplitTest(TrtLayerAutoScanTest): self.dynamic_shape.opt_input_shape = {} def generate_trt_nodes_num(attrs, dynamic_shape): - if (self.num_input < 3): + if self.num_input < 3: return 0, 6 return 1, 5 @@ -131,10 +141,12 @@ class TrtConvertSplitTest(TrtLayerAutoScanTest): generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-3 + attrs, True + ), 1e-3 def add_skip_trt_case(self): pass diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_flatten.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_flatten.py index 314457e25e9ee02ee623b346a68c8bb8bfc0d2d6..4027b8e1f48b89e2c701b6640aaa08a0e4d20868 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_flatten.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_flatten.py @@ -22,12 +22,10 @@ from typing import List class TrtConvertFlattenTest_dim_2(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input(batch): return np.random.random([batch, 32]).astype(np.float32) @@ -39,34 +37,35 @@ class TrtConvertFlattenTest_dim_2(TrtLayerAutoScanTest): else: op_outputs = { "Out": ["output_data"], - "XShape": ["xshape_data"] + "XShape": ["xshape_data"], } dics = [{"axis": axis}] - ops_config = [{ - "op_type": "flatten", - "op_inputs": { - "X": ["input_data"] - }, - "op_outputs": op_outputs, - "op_attrs": dics[0] - }] + ops_config = [ + { + "op_type": "flatten", + "op_inputs": {"X": ["input_data"]}, + "op_outputs": op_outputs, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data": - TensorConfig( - data_gen=partial(generate_input, batch)) + "input_data": TensorConfig( + data_gen=partial(generate_input, batch) + ) }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = {"input_data": [1, 8]} self.dynamic_shape.max_input_shape = {"input_data": [4, 64]} @@ -100,31 +99,33 @@ class TrtConvertFlattenTest_dim_2(TrtLayerAutoScanTest): # for static_shape clear_dynamic_shape() yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-3, 1e-3) + attrs, False + ), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-3, 1e-3) + attrs, True + ), (1e-3, 1e-3) def test(self): self.run_test() class TrtConvertFlattenTest_dim_3(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input(batch): return np.random.random([batch, 32, 64]).astype(np.float32) @@ -136,34 +137,35 @@ class TrtConvertFlattenTest_dim_3(TrtLayerAutoScanTest): else: op_outputs = { "Out": ["output_data"], - "XShape": ["xshape_data"] + "XShape": ["xshape_data"], } dics = [{"axis": axis}] - ops_config = [{ - "op_type": "flatten", - "op_inputs": { - "X": ["input_data"] - }, - "op_outputs": op_outputs, - "op_attrs": dics[0] - }] + ops_config = [ + { + "op_type": "flatten", + "op_inputs": {"X": ["input_data"]}, + "op_outputs": op_outputs, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data": - TensorConfig( - data_gen=partial(generate_input, batch)) + "input_data": TensorConfig( + data_gen=partial(generate_input, batch) + ) }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = {"input_data": [1, 8, 8]} self.dynamic_shape.max_input_shape = {"input_data": [4, 32, 64]} @@ -198,31 +200,33 @@ class TrtConvertFlattenTest_dim_3(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-3, 1e-3) + attrs, False + ), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-3, 1e-3) + attrs, True + ), (1e-3, 1e-3) def test(self): self.run_test() class TrtConvertFlattenTest_dim_4(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input(batch): return np.random.random([batch, 8, 8, 8]).astype(np.float32) @@ -234,34 +238,35 @@ class TrtConvertFlattenTest_dim_4(TrtLayerAutoScanTest): else: op_outputs = { "Out": ["output_data"], - "XShape": ["xshape_data"] + "XShape": ["xshape_data"], } dics = [{"axis": axis}] - ops_config = [{ - "op_type": "flatten", - "op_inputs": { - "X": ["input_data"] - }, - "op_outputs": op_outputs, - "op_attrs": dics[0] - }] + ops_config = [ + { + "op_type": "flatten", + "op_inputs": {"X": ["input_data"]}, + "op_outputs": op_outputs, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data": - TensorConfig( - data_gen=partial(generate_input, batch)) + "input_data": TensorConfig( + data_gen=partial(generate_input, batch) + ) }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = {"input_data": [1, 4, 4, 4]} self.dynamic_shape.max_input_shape = {"input_data": [4, 32, 32, 32]} @@ -296,31 +301,33 @@ class TrtConvertFlattenTest_dim_4(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-3, 1e-3) + attrs, False + ), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-3, 1e-3) + attrs, True + ), (1e-3, 1e-3) def test(self): self.run_test() class TrtConvertFlattenTest_dim_5(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input(batch): return np.random.random([batch, 8, 8, 8]).astype(np.float32) @@ -332,34 +339,35 @@ class TrtConvertFlattenTest_dim_5(TrtLayerAutoScanTest): else: op_outputs = { "Out": ["output_data"], - "XShape": ["xshape_data"] + "XShape": ["xshape_data"], } dics = [{"axis": axis}] - ops_config = [{ - "op_type": "flatten", - "op_inputs": { - "X": ["input_data"] - }, - "op_outputs": op_outputs, - "op_attrs": dics[0] - }] + ops_config = [ + { + "op_type": "flatten", + "op_inputs": {"X": ["input_data"]}, + "op_outputs": op_outputs, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data": - TensorConfig( - data_gen=partial(generate_input, batch)) + "input_data": TensorConfig( + data_gen=partial(generate_input, batch) + ) }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = {"input_data": [1, 4, 4, 4]} self.dynamic_shape.max_input_shape = {"input_data": [4, 16, 16, 8]} @@ -394,19 +402,23 @@ class TrtConvertFlattenTest_dim_5(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-3, 1e-3) + attrs, False + ), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-3, 1e-3) + attrs, True + ), (1e-3, 1e-3) def test(self): self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_flatten_contiguous_range.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_flatten_contiguous_range.py index e39d8bf10e2df289c89022b4adfe8b9cf4d79c0c..fb27f8e630e0088ecb2f790497f200eb9485e3b5 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_flatten_contiguous_range.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_flatten_contiguous_range.py @@ -22,12 +22,10 @@ from typing import List class TrtConvertFlattenContiguousRangeTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input(batch): return np.random.random([2, batch, 4, 8, 3]).astype(np.float32) @@ -37,35 +35,36 @@ class TrtConvertFlattenContiguousRangeTest(TrtLayerAutoScanTest): type = "flatten_contiguous_range" op_outputs = { "Out": ["output_data"], - "XShape": ["xshape_data"] + "XShape": ["xshape_data"], } - ops_config = [{ - "op_type": type, - "op_inputs": { - "X": ["input_data"] - }, - "op_outputs": op_outputs, - "op_attrs": { - "start_axis": start_axis, - "stop_axis": stop_axis, + ops_config = [ + { + "op_type": type, + "op_inputs": {"X": ["input_data"]}, + "op_outputs": op_outputs, + "op_attrs": { + "start_axis": start_axis, + "stop_axis": stop_axis, + }, } - }] + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data": - TensorConfig( - data_gen=partial(generate_input, batch)) + "input_data": TensorConfig( + data_gen=partial(generate_input, batch) + ) }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = {"input_data": [2, 1, 4, 8, 3]} self.dynamic_shape.max_input_shape = {"input_data": [2, 4, 4, 8, 3]} @@ -97,19 +96,23 @@ class TrtConvertFlattenContiguousRangeTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-3, 1e-3) + attrs, False + ), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-3, 1e-3) + attrs, True + ), (1e-3, 1e-3) def test(self): self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_fused_token_prune.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_fused_token_prune.py index 287f0af08236923f82ad1ad811705c65648ea1c2..5130660257c7909f6879710771f004ef17f79a2c 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_fused_token_prune.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_fused_token_prune.py @@ -22,7 +22,6 @@ import unittest class TrtConvertFusedTokenPruneTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True @@ -40,65 +39,70 @@ class TrtConvertFusedTokenPruneTest(TrtLayerAutoScanTest): for keep_first_token in [True, False]: for keep_order in [True, False]: - dics = [{ - "keep_first_token": keep_first_token, - "keep_order": keep_order - }] - ops_config = [{ - "op_type": "fused_token_prune", - "op_inputs": { - "Attn": ["attn"], - "X": ["x"], - "Mask": ["mask"], - "NewMask": ["new_mask"] - }, - "op_outputs": { - "SlimmedX": ["slimmed_x"], - "CLSInds": ["cls_inds"] - }, - "op_attrs": dics[0] - }] + dics = [ + { + "keep_first_token": keep_first_token, + "keep_order": keep_order, + } + ] + ops_config = [ + { + "op_type": "fused_token_prune", + "op_inputs": { + "Attn": ["attn"], + "X": ["x"], + "Mask": ["mask"], + "NewMask": ["new_mask"], + }, + "op_outputs": { + "SlimmedX": ["slimmed_x"], + "CLSInds": ["cls_inds"], + }, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "attn": - TensorConfig( - data_gen=partial(generate_attn_or_mask, dics)), - "x": - TensorConfig(data_gen=partial(generate_x, dics)), - "mask": - TensorConfig( - data_gen=partial(generate_attn_or_mask, dics)), - "new_mask": - TensorConfig(data_gen=partial(generate_new_mask, dics)) + "attn": TensorConfig( + data_gen=partial(generate_attn_or_mask, dics) + ), + "x": TensorConfig(data_gen=partial(generate_x, dics)), + "mask": TensorConfig( + data_gen=partial(generate_attn_or_mask, dics) + ), + "new_mask": TensorConfig( + data_gen=partial(generate_new_mask, dics) + ), }, - outputs=["slimmed_x", "cls_inds"]) + outputs=["slimmed_x", "cls_inds"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = { "attn": [4, 12, 64, 64], "x": [4, 64, 76], "mask": [4, 12, 64, 64], - "new_mask": [4, 12, 32, 32] + "new_mask": [4, 12, 32, 32], } self.dynamic_shape.max_input_shape = { "attn": [4, 12, 64, 64], "x": [4, 64, 76], "mask": [4, 12, 64, 64], - "new_mask": [4, 12, 32, 32] + "new_mask": [4, 12, 32, 32], } self.dynamic_shape.opt_input_shape = { "attn": [4, 12, 64, 64], "x": [4, 64, 76], "mask": [4, 12, 64, 64], - "new_mask": [4, 12, 32, 32] + "new_mask": [4, 12, 32, 32], } def clear_dynamic_shape(): @@ -116,10 +120,12 @@ class TrtConvertFusedTokenPruneTest(TrtLayerAutoScanTest): generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-2, 1e-2) + attrs, True + ), (1e-2, 1e-2) self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-1, 1e-2) + attrs, True + ), (1e-1, 1e-2) def test(self): self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gather.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gather.py index d74cad229284b40a64c4436b27690afb0afbb8cb..ca2984fa1877712f5ac162065d0342f89d0798c7 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gather.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gather.py @@ -22,7 +22,6 @@ import unittest class TrtConvertGatherTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: inputs = program_config.inputs attrs = [ @@ -34,7 +33,6 @@ class TrtConvertGatherTest(TrtLayerAutoScanTest): return True def sample_program_configs(self): - def generate_input1(shape): return np.random.random(shape).astype(np.float32) @@ -51,112 +49,126 @@ class TrtConvertGatherTest(TrtLayerAutoScanTest): for index in [[1, 4], [4, 8]]: for axis in [0, 1, 2, 3]: for overwrite in [True, False]: - for input in [{ - "X": ["input_data"], - "Index": ["index_data"] - }, { + for input in [ + {"X": ["input_data"], "Index": ["index_data"]}, + { "X": ["input_data"], "Index": ["index_data"], - "Axis": ["axis_data"] - }]: + "Axis": ["axis_data"], + }, + ]: for index_type_int32 in [True, False]: self.shape = shape self.axis = axis self.input_num = len(input) self.index_type_int32 = index_type_int32 dics = [{"overwrite": overwrite, "axis": axis}] - ops_config = [{ - "op_type": "gather", - "op_inputs": input, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": dics[0] - }] + ops_config = [ + { + "op_type": "gather", + "op_inputs": input, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data": - TensorConfig(data_gen=partial( - generate_input1, shape)), - "index_data": - TensorConfig(data_gen=partial( - generate_input2 - if index_type_int32 == - True else generate_input4, index)), - } if len(input) == 2 else { - "input_data": - TensorConfig(data_gen=partial( - generate_input1, shape)), - "index_data": - TensorConfig(data_gen=partial( - generate_input2, index)), - "axis_data": - TensorConfig(data_gen=partial( - generate_input3, axis)), + "input_data": TensorConfig( + data_gen=partial( + generate_input1, shape + ) + ), + "index_data": TensorConfig( + data_gen=partial( + generate_input2 + if index_type_int32 == True + else generate_input4, + index, + ) + ), + } + if len(input) == 2 + else { + "input_data": TensorConfig( + data_gen=partial( + generate_input1, shape + ) + ), + "index_data": TensorConfig( + data_gen=partial( + generate_input2, index + ) + ), + "axis_data": TensorConfig( + data_gen=partial( + generate_input3, axis + ) + ), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): if len(self.shape) == 1: self.dynamic_shape.min_input_shape = { "input_data": [4], - "index_data": [1] + "index_data": [1], } self.dynamic_shape.max_input_shape = { "input_data": [128], - "index_data": [4] + "index_data": [4], } self.dynamic_shape.opt_input_shape = { "input_data": [16], - "index_data": [2] + "index_data": [2], } elif len(self.shape) == 2: self.dynamic_shape.min_input_shape = { "input_data": [2, 4], - "index_data": [1] + "index_data": [1], } self.dynamic_shape.max_input_shape = { "input_data": [256, 256], - "index_data": [4] + "index_data": [4], } self.dynamic_shape.opt_input_shape = { "input_data": [64, 32], - "index_data": [2] + "index_data": [2], } elif len(self.shape) == 3: self.dynamic_shape.min_input_shape = { "input_data": [2, 4, 4], - "index_data": [1] + "index_data": [1], } self.dynamic_shape.max_input_shape = { "input_data": [128, 256, 256], - "index_data": [4] + "index_data": [4], } self.dynamic_shape.opt_input_shape = { "input_data": [16, 64, 32], - "index_data": [2] + "index_data": [2], } elif len(self.shape) == 4: self.dynamic_shape.min_input_shape = { "input_data": [2, 4, 4, 2], - "index_data": [1] + "index_data": [1], } self.dynamic_shape.max_input_shape = { "input_data": [128, 256, 64, 128], - "index_data": [4] + "index_data": [4], } self.dynamic_shape.opt_input_shape = { "input_data": [16, 64, 16, 32], - "index_data": [2] + "index_data": [2], } def clear_dynamic_shape(): @@ -181,10 +193,12 @@ class TrtConvertGatherTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - False), 1e-5 + False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - False), 1e-3 + False + ), 1e-3 # for dynamic_shape generate_dynamic_shape(attrs) @@ -200,14 +214,17 @@ class TrtConvertGatherTest(TrtLayerAutoScanTest): def teller1(program_config, predictor_config): if len(self.dynamic_shape.min_input_shape) != 0: inputs = program_config.inputs - if len(inputs['input_data'].shape) == 1 or len( - inputs['index_data'].shape) == 1: + if ( + len(inputs['input_data'].shape) == 1 + or len(inputs['index_data'].shape) == 1 + ): return True return False self.add_skip_case( - teller1, SkipReasons.TRT_NOT_SUPPORT, - "Need to repair the case: trt reshape out failed for dynamic shape mode when inputs' dims==1. under trt7.0 " + teller1, + SkipReasons.TRT_NOT_SUPPORT, + "Need to repair the case: trt reshape out failed for dynamic shape mode when inputs' dims==1. under trt7.0 ", ) def test(self): diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gather_nd.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gather_nd.py index ea392357114e7c6bfa24621e04898702ac432690..6ac8314ec77765549a72a4f9efcfdb8a0e7cf8f1 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gather_nd.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gather_nd.py @@ -23,7 +23,6 @@ import os class TrtConvertGatherNdTest_dim_4_1(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: # The output has diff between gpu and trt in CI windows # if ( and self.trt_param.precision == paddle_infer.PrecisionType.Half): @@ -31,54 +30,53 @@ class TrtConvertGatherNdTest_dim_4_1(TrtLayerAutoScanTest): return True def sample_program_configs(self): - def generate_input1(): return np.random.random([2, 32, 64, 64]).astype(np.float32) def generate_input2(): return np.ones([1]).astype(np.int32) - ops_config = [{ - "op_type": "gather_nd", - "op_inputs": { - "X": ["input_data"], - "Index": ["index_data"] - }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": {} - }] + ops_config = [ + { + "op_type": "gather_nd", + "op_inputs": {"X": ["input_data"], "Index": ["index_data"]}, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": {}, + } + ] ops = self.generate_op_config(ops_config) for i in range(10): program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data": - TensorConfig(data_gen=partial(generate_input1)), - "index_data": - TensorConfig(data_gen=partial(generate_input2)), + "input_data": TensorConfig( + data_gen=partial(generate_input1) + ), + "index_data": TensorConfig( + data_gen=partial(generate_input2) + ), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = { "input_data": [1, 8, 8, 8], - "index_data": [1] + "index_data": [1], } self.dynamic_shape.max_input_shape = { "input_data": [4, 32, 64, 64], - "index_data": [1] + "index_data": [1], } self.dynamic_shape.opt_input_shape = { "input_data": [2, 32, 64, 64], - "index_data": [1] + "index_data": [1], } def clear_dynamic_shape(): @@ -105,15 +103,16 @@ class TrtConvertGatherNdTest_dim_4_1(TrtLayerAutoScanTest): yield self.create_inference_config(), (1, 3), 1e-3 def add_skip_trt_case(self): - def teller1(program_config, predictor_config): if len(self.dynamic_shape.min_input_shape) != 0 and os.name == 'nt': return True return False self.add_skip_case( - teller1, SkipReasons.TRT_NOT_SUPPORT, - "Under Windows Ci, this case will sporadically fail.") + teller1, + SkipReasons.TRT_NOT_SUPPORT, + "Under Windows Ci, this case will sporadically fail.", + ) def test(self): self.add_skip_trt_case() @@ -121,29 +120,24 @@ class TrtConvertGatherNdTest_dim_4_1(TrtLayerAutoScanTest): class TrtConvertGatherNdTest_dim_4_1_2(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(): return np.random.random([2, 32, 64, 64]).astype(np.float32) def generate_input2(): return np.array([1, 2]).astype(np.int32) - ops_config = [{ - "op_type": "gather_nd", - "op_inputs": { - "X": ["input_data"], - "Index": ["index_data"] - }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": {} - }] + ops_config = [ + { + "op_type": "gather_nd", + "op_inputs": {"X": ["input_data"], "Index": ["index_data"]}, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": {}, + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( @@ -153,25 +147,26 @@ class TrtConvertGatherNdTest_dim_4_1_2(TrtLayerAutoScanTest): "input_data": TensorConfig(data_gen=partial(generate_input1)), "index_data": TensorConfig(data_gen=partial(generate_input2)), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = { "input_data": [1, 8, 8, 8], - "index_data": [2] + "index_data": [2], } self.dynamic_shape.max_input_shape = { "input_data": [4, 32, 64, 64], - "index_data": [2] + "index_data": [2], } self.dynamic_shape.opt_input_shape = { "input_data": [2, 32, 64, 64], - "index_data": [2] + "index_data": [2], } def clear_dynamic_shape(): @@ -198,15 +193,16 @@ class TrtConvertGatherNdTest_dim_4_1_2(TrtLayerAutoScanTest): yield self.create_inference_config(), (1, 3), 1e-3 def add_skip_trt_case(self): - def teller1(program_config, predictor_config): if len(self.dynamic_shape.min_input_shape) != 0 and os.name == 'nt': return True return False self.add_skip_case( - teller1, SkipReasons.TRT_NOT_SUPPORT, - "Under Windows Ci, this case will sporadically fail.") + teller1, + SkipReasons.TRT_NOT_SUPPORT, + "Under Windows Ci, this case will sporadically fail.", + ) def test(self): self.add_skip_trt_case() @@ -214,29 +210,24 @@ class TrtConvertGatherNdTest_dim_4_1_2(TrtLayerAutoScanTest): class TrtConvertGatherNdTest_dim_4_2(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(): return np.random.random([2, 32, 64, 64]).astype(np.float32) def generate_input2(): return np.ones([2, 2]).astype(np.int32) - ops_config = [{ - "op_type": "gather_nd", - "op_inputs": { - "X": ["input_data"], - "Index": ["index_data"] - }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": {} - }] + ops_config = [ + { + "op_type": "gather_nd", + "op_inputs": {"X": ["input_data"], "Index": ["index_data"]}, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": {}, + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( @@ -246,25 +237,26 @@ class TrtConvertGatherNdTest_dim_4_2(TrtLayerAutoScanTest): "input_data": TensorConfig(data_gen=partial(generate_input1)), "index_data": TensorConfig(data_gen=partial(generate_input2)), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = { "input_data": [1, 8, 8, 8], - "index_data": [2, 2] + "index_data": [2, 2], } self.dynamic_shape.max_input_shape = { "input_data": [4, 32, 64, 64], - "index_data": [2, 2] + "index_data": [2, 2], } self.dynamic_shape.opt_input_shape = { "input_data": [2, 32, 64, 64], - "index_data": [2, 2] + "index_data": [2, 2], } def clear_dynamic_shape(): @@ -291,15 +283,16 @@ class TrtConvertGatherNdTest_dim_4_2(TrtLayerAutoScanTest): yield self.create_inference_config(), (1, 3), 1e-3 def add_skip_trt_case(self): - def teller1(program_config, predictor_config): if len(self.dynamic_shape.min_input_shape) != 0 and os.name == 'nt': return True return False self.add_skip_case( - teller1, SkipReasons.TRT_NOT_SUPPORT, - "Under Windows Ci, this case will sporadically fail.") + teller1, + SkipReasons.TRT_NOT_SUPPORT, + "Under Windows Ci, this case will sporadically fail.", + ) def test(self): self.add_skip_trt_case() @@ -307,29 +300,24 @@ class TrtConvertGatherNdTest_dim_4_2(TrtLayerAutoScanTest): class TrtConvertGatherNdTest_dim_4_3(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(): return np.random.random([2, 32, 64, 64]).astype(np.float32) def generate_input2(): return np.ones([2, 2, 4]).astype(np.int32) - ops_config = [{ - "op_type": "gather_nd", - "op_inputs": { - "X": ["input_data"], - "Index": ["index_data"] - }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": {} - }] + ops_config = [ + { + "op_type": "gather_nd", + "op_inputs": {"X": ["input_data"], "Index": ["index_data"]}, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": {}, + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( @@ -339,25 +327,26 @@ class TrtConvertGatherNdTest_dim_4_3(TrtLayerAutoScanTest): "input_data": TensorConfig(data_gen=partial(generate_input1)), "index_data": TensorConfig(data_gen=partial(generate_input2)), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = { "input_data": [1, 8, 8, 8], - "index_data": [2, 2, 4] + "index_data": [2, 2, 4], } self.dynamic_shape.max_input_shape = { "input_data": [4, 32, 64, 64], - "index_data": [2, 2, 4] + "index_data": [2, 2, 4], } self.dynamic_shape.opt_input_shape = { "input_data": [2, 32, 64, 64], - "index_data": [2, 2, 4] + "index_data": [2, 2, 4], } def clear_dynamic_shape(): @@ -384,15 +373,16 @@ class TrtConvertGatherNdTest_dim_4_3(TrtLayerAutoScanTest): yield self.create_inference_config(), (1, 3), 1e-3 def add_skip_trt_case(self): - def teller1(program_config, predictor_config): if len(self.dynamic_shape.min_input_shape) != 0 and os.name == 'nt': return True return False self.add_skip_case( - teller1, SkipReasons.TRT_NOT_SUPPORT, - "Under Windows Ci, this case will sporadically fail.") + teller1, + SkipReasons.TRT_NOT_SUPPORT, + "Under Windows Ci, this case will sporadically fail.", + ) def test(self): self.add_skip_trt_case() @@ -400,29 +390,24 @@ class TrtConvertGatherNdTest_dim_4_3(TrtLayerAutoScanTest): class TrtConvertGatherNdTest_dim_2_2(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(): return np.random.random([2, 32]).astype(np.float32) def generate_input2(): return np.array([[0, 3], [1, 9]]).astype(np.int32) - ops_config = [{ - "op_type": "gather_nd", - "op_inputs": { - "X": ["input_data"], - "Index": ["index_data"] - }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": {} - }] + ops_config = [ + { + "op_type": "gather_nd", + "op_inputs": {"X": ["input_data"], "Index": ["index_data"]}, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": {}, + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( @@ -432,25 +417,26 @@ class TrtConvertGatherNdTest_dim_2_2(TrtLayerAutoScanTest): "input_data": TensorConfig(data_gen=partial(generate_input1)), "index_data": TensorConfig(data_gen=partial(generate_input2)), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = { "input_data": [1, 4], - "index_data": [2, 2] + "index_data": [2, 2], } self.dynamic_shape.max_input_shape = { "input_data": [4, 64], - "index_data": [2, 2] + "index_data": [2, 2], } self.dynamic_shape.opt_input_shape = { "input_data": [2, 8], - "index_data": [2, 2] + "index_data": [2, 2], } def clear_dynamic_shape(): @@ -477,15 +463,16 @@ class TrtConvertGatherNdTest_dim_2_2(TrtLayerAutoScanTest): yield self.create_inference_config(), (1, 3), 1e-3 def add_skip_trt_case(self): - def teller1(program_config, predictor_config): if len(self.dynamic_shape.min_input_shape) != 0 and os.name == 'nt': return True return False self.add_skip_case( - teller1, SkipReasons.TRT_NOT_SUPPORT, - "Under Windows Ci, this case will sporadically fail.") + teller1, + SkipReasons.TRT_NOT_SUPPORT, + "Under Windows Ci, this case will sporadically fail.", + ) def test(self): self.add_skip_trt_case() @@ -493,30 +480,26 @@ class TrtConvertGatherNdTest_dim_2_2(TrtLayerAutoScanTest): class TrtConvertGatherNdTest_dim_3_3(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(): return np.random.random([16, 32, 256]).astype(np.float32) def generate_input2(): - return np.array([[[2, 5], [3, 8]], [[0, 2], [0, - 3]]]).astype(np.int32) - - ops_config = [{ - "op_type": "gather_nd", - "op_inputs": { - "X": ["input_data"], - "Index": ["index_data"] - }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": {} - }] + return np.array([[[2, 5], [3, 8]], [[0, 2], [0, 3]]]).astype( + np.int32 + ) + + ops_config = [ + { + "op_type": "gather_nd", + "op_inputs": {"X": ["input_data"], "Index": ["index_data"]}, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": {}, + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( @@ -526,25 +509,26 @@ class TrtConvertGatherNdTest_dim_3_3(TrtLayerAutoScanTest): "input_data": TensorConfig(data_gen=partial(generate_input1)), "index_data": TensorConfig(data_gen=partial(generate_input2)), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = { "input_data": [1, 4, 4], - "index_data": [1, 1, 1] + "index_data": [1, 1, 1], } self.dynamic_shape.max_input_shape = { "input_data": [16, 64, 512], - "index_data": [4, 2, 4] + "index_data": [4, 2, 4], } self.dynamic_shape.opt_input_shape = { "input_data": [2, 8, 64], - "index_data": [2, 2, 2] + "index_data": [2, 2, 2], } def clear_dynamic_shape(): diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gelu.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gelu.py index 4f8e5c6356abb9e6a12b7f81700a321863b7aef4..d6d2f876361e51a0ff80d3dfc7eebba64755177e 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gelu.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gelu.py @@ -22,12 +22,10 @@ import unittest class TrtConvertGeluTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(dims, attrs: List[Dict[str, Any]]): if dims == 1: return np.ones([32]).astype(np.float32) @@ -43,33 +41,32 @@ class TrtConvertGeluTest(TrtLayerAutoScanTest): self.dims = dims dics = [{"approximate": approximate}] - ops_config = [{ - "op_type": "gelu", - "op_inputs": { - "X": ["input_data"] - }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": dics[0] - }] + ops_config = [ + { + "op_type": "gelu", + "op_inputs": {"X": ["input_data"]}, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data": - TensorConfig( - data_gen=partial(generate_input1, dims, dics)) + "input_data": TensorConfig( + data_gen=partial(generate_input1, dims, dics) + ) }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): if self.dims == 1: self.dynamic_shape.min_input_shape = {"input_data": [1]} @@ -123,19 +120,23 @@ class TrtConvertGeluTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-3 + attrs, False + ), 1e-3 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-3 + attrs, True + ), 1e-3 def test(self): self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_grid_sampler.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_grid_sampler.py index e04c096d9498bd43a741b941facaa0fc7841c168..36b0c1638cb77e111b0d660c1d10b446aa6af843 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_grid_sampler.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_grid_sampler.py @@ -22,29 +22,27 @@ import unittest class TrtConvertGridSampler(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(): return np.random.random([1, 3, 32, 32]).astype(np.float32) def generate_input2(): return np.random.random([1, 3, 3, 2]).astype(np.float32) - ops_config = [{ - "op_type": "grid_sampler", - "op_inputs": { - "X": ["input_data"], - "Grid": ["grid_data"], - }, - "op_outputs": { - "Output": ["output_data"] - }, - "op_attrs": {} - }] + ops_config = [ + { + "op_type": "grid_sampler", + "op_inputs": { + "X": ["input_data"], + "Grid": ["grid_data"], + }, + "op_outputs": {"Output": ["output_data"]}, + "op_attrs": {}, + } + ] ops = self.generate_op_config(ops_config) for i in range(10): @@ -52,30 +50,33 @@ class TrtConvertGridSampler(TrtLayerAutoScanTest): ops=ops, weights={}, inputs={ - "input_data": - TensorConfig(data_gen=partial(generate_input1)), - "grid_data": - TensorConfig(data_gen=partial(generate_input2)), + "input_data": TensorConfig( + data_gen=partial(generate_input1) + ), + "grid_data": TensorConfig( + data_gen=partial(generate_input2) + ), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = { "input_data": [1, 3, 32, 32], - "grid_data": [1, 3, 3, 2] + "grid_data": [1, 3, 3, 2], } self.dynamic_shape.max_input_shape = { "input_data": [1, 3, 64, 64], - "grid_data": [1, 3, 4, 4] + "grid_data": [1, 3, 4, 4], } self.dynamic_shape.opt_input_shape = { "input_data": [1, 3, 32, 32], - "grid_data": [1, 3, 3, 2] + "grid_data": [1, 3, 3, 2], } def clear_dynamic_shape(): diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_group_norm.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_group_norm.py index 0c8e6eeb87babc5517f478e782323eb52e2f3280..18f40934172f6d22e1ed1133d2b273468618683b 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_group_norm.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_group_norm.py @@ -22,7 +22,6 @@ import unittest class TrtConvertGroupNormTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: inputs = program_config.inputs weights = program_config.weights @@ -36,7 +35,6 @@ class TrtConvertGroupNormTest(TrtLayerAutoScanTest): return True def sample_program_configs(self): - def generate_input(attrs: List[Dict[str, Any]], batch): if attrs[0]['data_layout'] == 'NCHW': return np.random.random([batch, 32, 64, 64]).astype(np.float32) @@ -53,47 +51,56 @@ class TrtConvertGroupNormTest(TrtLayerAutoScanTest): for group in [1, 4, 32, -1]: for epsilon in [0.0001, 0.0007, -1, 1]: for data_layout in ['NCHW']: - dics = [{ - "epsilon": epsilon, - "groups": group, - "data_layout": data_layout - }] - ops_config = [{ - "op_type": "group_norm", - "op_inputs": { - "X": ["input_data"], - "Scale": ["scale_weight"], - "Bias": ["bias_weight"] - }, - "op_outputs": { - "Y": ["y_output"], - "Mean": ["mean_output"], - "Variance": ["variance_output"] - }, - "op_attrs": dics[0] - }] + dics = [ + { + "epsilon": epsilon, + "groups": group, + "data_layout": data_layout, + } + ] + ops_config = [ + { + "op_type": "group_norm", + "op_inputs": { + "X": ["input_data"], + "Scale": ["scale_weight"], + "Bias": ["bias_weight"], + }, + "op_outputs": { + "Y": ["y_output"], + "Mean": ["mean_output"], + "Variance": ["variance_output"], + }, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={ - "scale_weight": - TensorConfig(data_gen=partial(generate_scale)), - "bias_weight": - TensorConfig(data_gen=partial(generate_bias)) + "scale_weight": TensorConfig( + data_gen=partial(generate_scale) + ), + "bias_weight": TensorConfig( + data_gen=partial(generate_bias) + ), }, inputs={ - "input_data": - TensorConfig(data_gen=partial( - generate_input, dics, batch)) + "input_data": TensorConfig( + data_gen=partial( + generate_input, dics, batch + ) + ) }, - outputs=["y_output"]) + outputs=["y_output"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = {"input_data": [1, 16, 16, 16]} self.dynamic_shape.max_input_shape = { @@ -117,19 +124,23 @@ class TrtConvertGroupNormTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-3 + attrs, False + ), 1e-3 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-3 + attrs, True + ), 1e-3 def add_skip_trt_case(self): pass diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_hard_sigmoid.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_hard_sigmoid.py index 0a1699139c9708d465cc4c0bed396b943ebf4add..3c8cd8948f4153d01987ab36cf40ebf23633fa30 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_hard_sigmoid.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_hard_sigmoid.py @@ -22,12 +22,10 @@ import unittest class TrtConvertHardSigmoidTest_dim_2(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input(shape): return np.random.random(shape).astype(np.float32) @@ -37,33 +35,34 @@ class TrtConvertHardSigmoidTest_dim_2(TrtLayerAutoScanTest): for slope in [0.1, 0.5]: for offset in [0.2, 0.7]: dics = [{"slope": slope, "offset": offset}] - ops_config = [{ - "op_type": "hard_sigmoid", - "op_inputs": { - "X": ["input_data"], - }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": dics[0] - }] + ops_config = [ + { + "op_type": "hard_sigmoid", + "op_inputs": { + "X": ["input_data"], + }, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data": - TensorConfig( - data_gen=partial(generate_input, shape)) + "input_data": TensorConfig( + data_gen=partial(generate_input, shape) + ) }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): if self.input_dim == 2: self.dynamic_shape.min_input_shape = {"input_data": [1, 8]} diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_hard_swish.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_hard_swish.py index b014d6fa2b0c228c473bf40defc774c46a8aa1b2..4180c5b08307103c7e8d702abc2b61360c66afdc 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_hard_swish.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_hard_swish.py @@ -22,7 +22,6 @@ import unittest class TrtConvertHardSwishTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: inputs = program_config.inputs weights = program_config.weights @@ -36,46 +35,46 @@ class TrtConvertHardSwishTest(TrtLayerAutoScanTest): return True def sample_program_configs(self): - def generate_input1(attrs: List[Dict[str, Any]]): return np.ones([1, 3, 32, 32]).astype(np.float32) for threshold in [6.0, 7.0, 100.0, 0.0, -1.0]: for scale in [5.0, 7.0, -1.0, 0.0, 100.0]: for offset in [3.0, 5.0, -1.0, 0.0, 100.0]: - dics = [{ - "threshold": threshold, - "scale": scale, - "offset": offset - }] - - ops_config = [{ - "op_type": "hard_swish", - "op_inputs": { - "X": ["input_data"] - }, - "op_outputs": { - "Out": ["hard_swish_output_data"] - }, - "op_attrs": dics[0] - }] + dics = [ + { + "threshold": threshold, + "scale": scale, + "offset": offset, + } + ] + + ops_config = [ + { + "op_type": "hard_swish", + "op_inputs": {"X": ["input_data"]}, + "op_outputs": {"Out": ["hard_swish_output_data"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data": - TensorConfig( - data_gen=partial(generate_input1, dics)) + "input_data": TensorConfig( + data_gen=partial(generate_input1, dics) + ) }, - outputs=["hard_swish_output_data"]) + outputs=["hard_swish_output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = {"input_data": [1, 3, 16, 16]} self.dynamic_shape.max_input_shape = {"input_data": [2, 3, 32, 32]} @@ -97,19 +96,23 @@ class TrtConvertHardSwishTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-3, 1e-3) + attrs, False + ), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-3, 1e-3) + attrs, True + ), (1e-3, 1e-3) def test(self): self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_instance_norm.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_instance_norm.py index 135858fec77c1b7e93bffdb4b653fbe9f2c9e350..b3be8dfdc6653b37020dc3d290eab723609cad14 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_instance_norm.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_instance_norm.py @@ -24,7 +24,6 @@ import os class TrtConvertInstanceNormTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: attrs = [ program_config.ops[i].attrs for i in range(len(program_config.ops)) @@ -36,7 +35,6 @@ class TrtConvertInstanceNormTest(TrtLayerAutoScanTest): return True def sample_program_configs(self): - def generate_input1(attrs: List[Dict[str, Any]], shape_input): return np.random.random(shape_input).astype(np.float32) @@ -44,48 +42,60 @@ class TrtConvertInstanceNormTest(TrtLayerAutoScanTest): return np.random.random(shape_input[1]).astype(np.float32) for batch in [1, 2, 4]: - for shape_input in [[batch, 16], [batch, 32, 64], - [batch, 16, 32, 64]]: + for shape_input in [ + [batch, 16], + [batch, 32, 64], + [batch, 16, 32, 64], + ]: self.in_dim = len(shape_input) for epsilon in [0.0005, -1, 1]: dics = [{"epsilon": epsilon}] - ops_config = [{ - "op_type": "instance_norm", - "op_inputs": { - "X": ["input_data"], - "Scale": ["scale_data"], - "Bias": ["bias_data"] - }, - "op_outputs": { - "Y": ["y_data"], - "SavedMean": ["saved_mean_data"], - "SavedVariance": ["saved_variance_data"] - }, - "op_attrs": dics[0] - }] + ops_config = [ + { + "op_type": "instance_norm", + "op_inputs": { + "X": ["input_data"], + "Scale": ["scale_data"], + "Bias": ["bias_data"], + }, + "op_outputs": { + "Y": ["y_data"], + "SavedMean": ["saved_mean_data"], + "SavedVariance": ["saved_variance_data"], + }, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={ - "bias_data": - TensorConfig(data_gen=partial( - generate_input2, dics, shape_input)), - "scale_data": - TensorConfig(data_gen=partial( - generate_input2, dics, shape_input)) + "bias_data": TensorConfig( + data_gen=partial( + generate_input2, dics, shape_input + ) + ), + "scale_data": TensorConfig( + data_gen=partial( + generate_input2, dics, shape_input + ) + ), }, inputs={ - "input_data": - TensorConfig(data_gen=partial( - generate_input1, dics, shape_input)) + "input_data": TensorConfig( + data_gen=partial( + generate_input1, dics, shape_input + ) + ) }, - outputs=["y_data"]) + outputs=["y_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): if self.in_dim == 2: self.dynamic_shape.min_input_shape = {"input_data": [1, 4]} @@ -128,30 +138,35 @@ class TrtConvertInstanceNormTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-3, 1e-3) + attrs, False + ), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-3, 1e-3) + attrs, True + ), (1e-3, 1e-3) def add_skip_trt_case(self): - def teller2(program_config, predictor_config): if len(self.dynamic_shape.min_input_shape) != 0 and os.name == 'nt': return True return False self.add_skip_case( - teller2, SkipReasons.TRT_NOT_SUPPORT, - "The output has diff between gpu and trt in Windows.") + teller2, + SkipReasons.TRT_NOT_SUPPORT, + "The output has diff between gpu and trt in Windows.", + ) def test(self): self.add_skip_trt_case() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_inverse.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_inverse.py index 390eff3e1e84a29254356b5c2a1f48e0eb8b70a3..6ccb00d1a0f51d665576a3ab0a23974380ebc3ac 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_inverse.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_inverse.py @@ -22,41 +22,41 @@ import unittest class TrtConvertInverse(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(): return np.random.random([32, 32]).astype(np.float32) - ops_config = [{ - "op_type": "inverse", - "op_inputs": { - "Input": ["input_data"], - }, - "op_outputs": { - "Output": ["output_data"] - }, - "op_attrs": {} - }] + ops_config = [ + { + "op_type": "inverse", + "op_inputs": { + "Input": ["input_data"], + }, + "op_outputs": {"Output": ["output_data"]}, + "op_attrs": {}, + } + ] ops = self.generate_op_config(ops_config) for i in range(10): program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data": - TensorConfig(data_gen=partial(generate_input1)), + "input_data": TensorConfig( + data_gen=partial(generate_input1) + ), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = { "input_data": [1, 1], diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_layer_norm.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_layer_norm.py index f6e4dec9538fe1c92bcfd72dc1d08284d104816b..2d52f74be38232941e49a62f4d5fc65bef00e805 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_layer_norm.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_layer_norm.py @@ -23,7 +23,6 @@ import unittest class TrtConvertLayerNormTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: inputs = program_config.inputs weights = program_config.weights @@ -34,13 +33,13 @@ class TrtConvertLayerNormTest(TrtLayerAutoScanTest): if attrs[0]['epsilon'] < 0 or attrs[0]['epsilon'] > 0.001: return False if attrs[0]['begin_norm_axis'] <= 0 or attrs[0]['begin_norm_axis'] >= ( - len(inputs['input_data'].shape) - 1): + len(inputs['input_data'].shape) - 1 + ): return False return True def sample_program_configs(self): - def generate_input1(attrs: List[Dict[str, Any]], shape_input): return np.ones(shape_input).astype(np.float32) @@ -53,49 +52,52 @@ class TrtConvertLayerNormTest(TrtLayerAutoScanTest): for epsilon in [0.0005, -1, 1]: for begin_norm_axis in [1, 0, -1, 2, 3]: - dics = [{ - "epsilon": epsilon, - "begin_norm_axis": begin_norm_axis - }, {}] - - ops_config = [{ - "op_type": "layer_norm", - "op_inputs": { - "X": ["input_data"], - "Scale": ["scale_data"], - "Bias": ["bias_data"] - }, - "op_outputs": { - "Y": ["y_data"], - "Mean": ["saved_mean_data"], - "Variance": ["saved_variance_data"] - }, - "op_attrs": dics[0] - }] + dics = [ + {"epsilon": epsilon, "begin_norm_axis": begin_norm_axis}, + {}, + ] + + ops_config = [ + { + "op_type": "layer_norm", + "op_inputs": { + "X": ["input_data"], + "Scale": ["scale_data"], + "Bias": ["bias_data"], + }, + "op_outputs": { + "Y": ["y_data"], + "Mean": ["saved_mean_data"], + "Variance": ["saved_variance_data"], + }, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) shape_input = [1, 3, 64, 64] program_config = ProgramConfig( ops=ops, weights={ - "bias_data": - TensorConfig(data_gen=partial(generate_input2, dics, - shape_input)), - "scale_data": - TensorConfig(data_gen=partial(generate_input2, dics, - shape_input)) + "bias_data": TensorConfig( + data_gen=partial(generate_input2, dics, shape_input) + ), + "scale_data": TensorConfig( + data_gen=partial(generate_input2, dics, shape_input) + ), }, inputs={ - "input_data": - TensorConfig(data_gen=partial(generate_input1, dics, - shape_input)) + "input_data": TensorConfig( + data_gen=partial(generate_input1, dics, shape_input) + ) }, - outputs=["y_data"]) + outputs=["y_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = {"input_data": [1, 3, 32, 32]} self.dynamic_shape.max_input_shape = {"input_data": [4, 3, 64, 64]} @@ -108,7 +110,7 @@ class TrtConvertLayerNormTest(TrtLayerAutoScanTest): def generate_trt_nodes_num(attrs, dynamic_shape): inputs = program_config.inputs - #if not dynamic_shape: + # if not dynamic_shape: # if attrs[0]["begin_norm_axis"] >= len(inputs["input_data"].shape) - 1: # print ("iiiiiii") # return 0, 3 @@ -122,26 +124,29 @@ class TrtConvertLayerNormTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-2 + attrs, False + ), 1e-2 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-2 + attrs, True + ), 1e-2 def test(self): self.run_test() class TrtConvertLayerNormTest_2(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: inputs = program_config.inputs weights = program_config.weights @@ -152,13 +157,13 @@ class TrtConvertLayerNormTest_2(TrtLayerAutoScanTest): if attrs[0]['epsilon'] < 0 or attrs[0]['epsilon'] > 0.001: return False if attrs[0]['begin_norm_axis'] <= 0 or attrs[0]['begin_norm_axis'] >= ( - len(inputs['input_data'].shape) - 1): + len(inputs['input_data'].shape) - 1 + ): return False return True def sample_program_configs(self): - def generate_input1(attrs: List[Dict[str, Any]], shape_input): return np.ones(shape_input).astype(np.float32) @@ -171,49 +176,52 @@ class TrtConvertLayerNormTest_2(TrtLayerAutoScanTest): for epsilon in [0.0005, -1, 1]: for begin_norm_axis in [1, 0, -1, 2, 3]: - dics = [{ - "epsilon": epsilon, - "begin_norm_axis": begin_norm_axis - }, {}] - - ops_config = [{ - "op_type": "layer_norm", - "op_inputs": { - "X": ["input_data"], - "Scale": ["scale_data"], - "Bias": ["bias_data"] - }, - "op_outputs": { - "Y": ["y_data"], - "Mean": ["saved_mean_data"], - "Variance": ["saved_variance_data"] - }, - "op_attrs": dics[0] - }] + dics = [ + {"epsilon": epsilon, "begin_norm_axis": begin_norm_axis}, + {}, + ] + + ops_config = [ + { + "op_type": "layer_norm", + "op_inputs": { + "X": ["input_data"], + "Scale": ["scale_data"], + "Bias": ["bias_data"], + }, + "op_outputs": { + "Y": ["y_data"], + "Mean": ["saved_mean_data"], + "Variance": ["saved_variance_data"], + }, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) shape_input = [2, 64, 3, 3] program_config = ProgramConfig( ops=ops, weights={ - "bias_data": - TensorConfig(data_gen=partial(generate_input2, dics, - shape_input)), - "scale_data": - TensorConfig(data_gen=partial(generate_input2, dics, - shape_input)) + "bias_data": TensorConfig( + data_gen=partial(generate_input2, dics, shape_input) + ), + "scale_data": TensorConfig( + data_gen=partial(generate_input2, dics, shape_input) + ), }, inputs={ - "input_data": - TensorConfig(data_gen=partial(generate_input1, dics, - shape_input)) + "input_data": TensorConfig( + data_gen=partial(generate_input1, dics, shape_input) + ) }, - outputs=["y_data"]) + outputs=["y_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = {"input_data": [1, 64, 3, 3]} self.dynamic_shape.max_input_shape = {"input_data": [4, 64, 3, 3]} @@ -226,7 +234,7 @@ class TrtConvertLayerNormTest_2(TrtLayerAutoScanTest): def generate_trt_nodes_num(attrs, dynamic_shape): inputs = program_config.inputs - #if not dynamic_shape: + # if not dynamic_shape: # if attrs[0]["begin_norm_axis"] >= len(inputs["input_data"].shape) - 1: # print ("iiiiiii") # return 0, 3 @@ -240,19 +248,23 @@ class TrtConvertLayerNormTest_2(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-2 + attrs, False + ), 1e-2 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-2 + attrs, True + ), 1e-2 def test(self): self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_leaky_relu.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_leaky_relu.py index 2742a536de97b9cbb501a8f407a5239667fd7d31..93f7115ae4fbb5910d0b9cbaa8f9cba3363bfe70 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_leaky_relu.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_leaky_relu.py @@ -23,12 +23,10 @@ import unittest class TrtConvertLeakyReluTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(shape): return np.random.random(shape).astype(np.float32) @@ -37,32 +35,35 @@ class TrtConvertLeakyReluTest(TrtLayerAutoScanTest): self.input_dim = len(shape) for alpha in [0.02, 1.0, 100.0, -1.0, 0.0]: dics = [{"alpha": alpha}] - ops_config = [{ - "op_type": "leaky_relu", - "op_inputs": { - "X": ["input_data"], - }, - "op_outputs": { - "Out": ["y_data"], - }, - "op_attrs": dics[0] - }] + ops_config = [ + { + "op_type": "leaky_relu", + "op_inputs": { + "X": ["input_data"], + }, + "op_outputs": { + "Out": ["y_data"], + }, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data": - TensorConfig( - data_gen=partial(generate_input1, shape)) + "input_data": TensorConfig( + data_gen=partial(generate_input1, shape) + ) }, - outputs=["y_data"]) + outputs=["y_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): if self.input_dim == 2: self.dynamic_shape.min_input_shape = {"input_data": [1, 8]} @@ -101,25 +102,31 @@ class TrtConvertLeakyReluTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-3, 1e-3) + attrs, False + ), (1e-3, 1e-3) self.trt_param.precision = paddle_infer.PrecisionType.Int8 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-3, 1e-3) + attrs, False + ), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-3, 1e-3) + attrs, True + ), (1e-3, 1e-3) self.trt_param.precision = paddle_infer.PrecisionType.Int8 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-3, 1e-3) + attrs, True + ), (1e-3, 1e-3) def test(self): self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_matmul.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_matmul.py index b4895d805bc62f669b6cdc48deaae43a2069b2db..9175c9b4ae13b71b215b7477dd5cc5932c4fdf55 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_matmul.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_matmul.py @@ -22,12 +22,10 @@ import unittest class TrtConvertMatmulTest_static(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input(shape): return np.random.random(shape).astype(np.float32) @@ -47,48 +45,55 @@ class TrtConvertMatmulTest_static(TrtLayerAutoScanTest): input1_shape = [batch, 32, 6] input2_shape = [batch, 6, 11] for alpha in [0.3, 1.0]: - dics = [{ - "transpose_X": trans_x, - "transpose_Y": trans_y, - "alpha": alpha, - "fused_reshape_X": [], - "fused_reshape_Y": [], - "fused_transpose_X": [], - "fused_transpose_Y": [], - "fused_reshape_Out": [], - "fused_transpose_Out": [] - }] - ops_config = [{ - "op_type": "matmul", - "op_inputs": { - "X": ["input1_data"], - "Y": ["input2_data"] - }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": dics[0] - }] + dics = [ + { + "transpose_X": trans_x, + "transpose_Y": trans_y, + "alpha": alpha, + "fused_reshape_X": [], + "fused_reshape_Y": [], + "fused_transpose_X": [], + "fused_transpose_Y": [], + "fused_reshape_Out": [], + "fused_transpose_Out": [], + } + ] + ops_config = [ + { + "op_type": "matmul", + "op_inputs": { + "X": ["input1_data"], + "Y": ["input2_data"], + }, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input1_data": - TensorConfig(data_gen=partial( - generate_input, input1_shape)), - "input2_data": - TensorConfig(data_gen=partial( - generate_input, input2_shape)) + "input1_data": TensorConfig( + data_gen=partial( + generate_input, input1_shape + ) + ), + "input2_data": TensorConfig( + data_gen=partial( + generate_input, input2_shape + ) + ), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): pass @@ -109,12 +114,10 @@ class TrtConvertMatmulTest_static(TrtLayerAutoScanTest): class TrtConvertMatmulTest_dynamic(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input(shape): return np.random.random(shape).astype(np.float32) @@ -133,60 +136,63 @@ class TrtConvertMatmulTest_dynamic(TrtLayerAutoScanTest): # input1_shape = [batch, 32, 6] # input2_shape = [batch, 6, 11] for alpha in [0.3, 1.0]: - dics = [{ - "transpose_X": trans_x, - "transpose_Y": trans_y, - "alpha": alpha, - "fused_reshape_X": [], - "fused_reshape_Y": [], - "fused_transpose_X": [], - "fused_transpose_Y": [], - "fused_reshape_Out": [], - "fused_transpose_Out": [] - }] - ops_config = [{ - "op_type": "matmul", - "op_inputs": { - "X": ["input1_data"], - "Y": ["input2_data"] - }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": dics[0] - }] + dics = [ + { + "transpose_X": trans_x, + "transpose_Y": trans_y, + "alpha": alpha, + "fused_reshape_X": [], + "fused_reshape_Y": [], + "fused_transpose_X": [], + "fused_transpose_Y": [], + "fused_reshape_Out": [], + "fused_transpose_Out": [], + } + ] + ops_config = [ + { + "op_type": "matmul", + "op_inputs": { + "X": ["input1_data"], + "Y": ["input2_data"], + }, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input1_data": - TensorConfig( - data_gen=partial(generate_input, input1_shape)), - "input2_data": - TensorConfig( - data_gen=partial(generate_input, input2_shape)) + "input1_data": TensorConfig( + data_gen=partial(generate_input, input1_shape) + ), + "input2_data": TensorConfig( + data_gen=partial(generate_input, input2_shape) + ), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = { "input1_data": [1, 4, 4], - "input2_data": [1, 4, 4] + "input2_data": [1, 4, 4], } self.dynamic_shape.max_input_shape = { "input1_data": [16, 4, 4], - "input2_data": [16, 4, 4] + "input2_data": [16, 4, 4], } self.dynamic_shape.opt_input_shape = { "input1_data": [8, 4, 4], - "input2_data": [8, 4, 4] + "input2_data": [8, 4, 4], } attrs = [ diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_matmul_v2.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_matmul_v2.py index 524262244d3ce029708bbd414143f2cf222b5b2f..82d9143a0981fe921c355f5f490e083a5332cf71 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_matmul_v2.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_matmul_v2.py @@ -23,9 +23,7 @@ import os class TrtConvertMatmulTest_dynamic(TrtLayerAutoScanTest): - def sample_program_configs(self): - def generate_input(shape): return np.random.random(shape).astype(np.float32) @@ -34,53 +32,56 @@ class TrtConvertMatmulTest_dynamic(TrtLayerAutoScanTest): for trans_y in [False]: input1_shape = [batch, 64, 350, 75] input2_shape = [75, 25] - dics = [{ - "trans_x": trans_x, - "trans_y": trans_y, - }] - ops_config = [{ - "op_type": "matmul_v2", - "op_inputs": { - "X": ["input1_data"], - "Y": ["input2_data"] - }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": dics[0] - }] + dics = [ + { + "trans_x": trans_x, + "trans_y": trans_y, + } + ] + ops_config = [ + { + "op_type": "matmul_v2", + "op_inputs": { + "X": ["input1_data"], + "Y": ["input2_data"], + }, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input1_data": - TensorConfig( - data_gen=partial(generate_input, input1_shape)), - "input2_data": - TensorConfig( - data_gen=partial(generate_input, input2_shape)) + "input1_data": TensorConfig( + data_gen=partial(generate_input, input1_shape) + ), + "input2_data": TensorConfig( + data_gen=partial(generate_input, input2_shape) + ), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = { "input1_data": [10, 64, 350, 75], - "input2_data": [75, 25] + "input2_data": [75, 25], } self.dynamic_shape.max_input_shape = { "input1_data": [100, 64, 350, 75], - "input2_data": [75, 25] + "input2_data": [75, 25], } self.dynamic_shape.opt_input_shape = { "input1_data": [15, 64, 350, 75], - "input2_data": [75, 25] + "input2_data": [75, 25], } attrs = [ @@ -90,7 +91,7 @@ class TrtConvertMatmulTest_dynamic(TrtLayerAutoScanTest): # The output has little diff between gpu and trt in CI-Windows-Inference tol_fp32 = 1e-5 tol_half = 1e-5 - if (os.name == 'nt'): + if os.name == 'nt': tol_fp32 = 1e-3 tol_half = 1e-3 # for dynamic_shape @@ -109,9 +110,7 @@ class TrtConvertMatmulTest_dynamic(TrtLayerAutoScanTest): class TrtConvertMatmulTest_dynamic2(TrtLayerAutoScanTest): - def sample_program_configs(self): - def generate_input(shape): return np.random.random(shape).astype(np.float32) @@ -120,53 +119,56 @@ class TrtConvertMatmulTest_dynamic2(TrtLayerAutoScanTest): for trans_y in [False]: input1_shape = [60, 40] input2_shape = [batch, 40, 90] - dics = [{ - "trans_x": trans_x, - "trans_y": trans_y, - }] - ops_config = [{ - "op_type": "matmul_v2", - "op_inputs": { - "X": ["input1_data"], - "Y": ["input2_data"] - }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": dics[0] - }] + dics = [ + { + "trans_x": trans_x, + "trans_y": trans_y, + } + ] + ops_config = [ + { + "op_type": "matmul_v2", + "op_inputs": { + "X": ["input1_data"], + "Y": ["input2_data"], + }, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input1_data": - TensorConfig( - data_gen=partial(generate_input, input1_shape)), - "input2_data": - TensorConfig( - data_gen=partial(generate_input, input2_shape)) + "input1_data": TensorConfig( + data_gen=partial(generate_input, input1_shape) + ), + "input2_data": TensorConfig( + data_gen=partial(generate_input, input2_shape) + ), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = { "input1_data": [60, 40], - "input2_data": [10, 40, 90] + "input2_data": [10, 40, 90], } self.dynamic_shape.max_input_shape = { "input1_data": [60, 40], - "input2_data": [20, 40, 90] + "input2_data": [20, 40, 90], } self.dynamic_shape.opt_input_shape = { "input1_data": [60, 40], - "input2_data": [15, 40, 90] + "input2_data": [15, 40, 90], } attrs = [ @@ -175,7 +177,7 @@ class TrtConvertMatmulTest_dynamic2(TrtLayerAutoScanTest): # The output has little diff between gpu and trt in CI-Windows-Inference tol_fp32 = 1e-5 tol_half = 1e-5 - if (os.name == 'nt'): + if os.name == 'nt': tol_fp32 = 1e-3 tol_half = 1e-3 # for dynamic_shape @@ -194,9 +196,7 @@ class TrtConvertMatmulTest_dynamic2(TrtLayerAutoScanTest): class TrtConvertMatmulTest_dynamic3(TrtLayerAutoScanTest): - def sample_program_configs(self): - def generate_input(shape): return np.random.random(shape).astype(np.float32) @@ -219,86 +219,95 @@ class TrtConvertMatmulTest_dynamic3(TrtLayerAutoScanTest): elif case == 2: input1_shape = [50] input2_shape = [50] - if (case == 0 or case == 1): - dics = [{ - "trans_x": False, - "trans_y": False, - }] - elif (case == 2): - dics = [{ - "trans_x": trans_x, - "trans_y": trans_y, - }] - ops_config = [{ - "op_type": "matmul_v2", - "op_inputs": { - "X": ["input1_data"], - "Y": ["input2_data"] - }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": dics[0] - }] + if case == 0 or case == 1: + dics = [ + { + "trans_x": False, + "trans_y": False, + } + ] + elif case == 2: + dics = [ + { + "trans_x": trans_x, + "trans_y": trans_y, + } + ] + ops_config = [ + { + "op_type": "matmul_v2", + "op_inputs": { + "X": ["input1_data"], + "Y": ["input2_data"], + }, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input1_data": - TensorConfig(data_gen=partial( - generate_input, input1_shape)), - "input2_data": - TensorConfig(data_gen=partial( - generate_input, input2_shape)) + "input1_data": TensorConfig( + data_gen=partial( + generate_input, input1_shape + ) + ), + "input2_data": TensorConfig( + data_gen=partial( + generate_input, input2_shape + ) + ), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(): - if (self.case == 0): + if self.case == 0: self.dynamic_shape.min_input_shape = { "input1_data": [20, 50], - "input2_data": [50] + "input2_data": [50], } self.dynamic_shape.max_input_shape = { "input1_data": [30, 50], - "input2_data": [50] + "input2_data": [50], } self.dynamic_shape.opt_input_shape = { "input1_data": [25, 50], - "input2_data": [50] + "input2_data": [50], } - elif (self.case == 1): + elif self.case == 1: self.dynamic_shape.min_input_shape = { "input2_data": [50, 20], - "input1_data": [50] + "input1_data": [50], } self.dynamic_shape.max_input_shape = { "input2_data": [50, 30], - "input1_data": [50] + "input1_data": [50], } self.dynamic_shape.opt_input_shape = { "input2_data": [50, 25], - "input1_data": [50] + "input1_data": [50], } - elif (self.case == 2): + elif self.case == 2: self.dynamic_shape.min_input_shape = { "input2_data": [30], - "input1_data": [50] + "input1_data": [50], } self.dynamic_shape.max_input_shape = { "input2_data": [50], - "input1_data": [50] + "input1_data": [50], } self.dynamic_shape.opt_input_shape = { "input2_data": [50], - "input1_data": [50] + "input1_data": [50], } generate_dynamic_shape() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_mish.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_mish.py index d6d62967cfb5df1502d6602b293d13188db8cf50..baac4943f7c269ee4f5e6b59e3b4fbca1c734d08 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_mish.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_mish.py @@ -21,12 +21,10 @@ import unittest class TrtConvertMishTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input(batch, dim1, dim2, dim3): shape = [batch] if dim1 != 0: @@ -51,35 +49,36 @@ class TrtConvertMishTest(TrtLayerAutoScanTest): if dim1 == 0 and dim2 == 0 and dim3 != 0: continue - ops_config = [{ - "op_type": "mish", - "op_inputs": { - "X": ["input_data"] - }, - "op_outputs": { - "Out": ["mish_output_data"] - }, - "op_attrs": { - "threshold": thre + ops_config = [ + { + "op_type": "mish", + "op_inputs": {"X": ["input_data"]}, + "op_outputs": {"Out": ["mish_output_data"]}, + "op_attrs": {"threshold": thre}, } - }] + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data": - TensorConfig(data_gen=partial( - generate_input, batch, dim1, dim2, - dim3)) + "input_data": TensorConfig( + data_gen=partial( + generate_input, + batch, + dim1, + dim2, + dim3, + ) + ) }, - outputs=["mish_output_data"]) + outputs=["mish_output_data"], + ) yield program_config def sample_predictor_configs(self, program_config): - def generate_dynamic_shape(attrs): if self.dim1 == 0: self.dynamic_shape.min_input_shape = { @@ -139,29 +138,35 @@ class TrtConvertMishTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-3, 1e-3) + attrs, False + ), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-3, 1e-3) + attrs, True + ), (1e-3, 1e-3) def add_skip_trt_case(self): - def teller1(program_config, predictor_config): if self.dim1 == 0 and self.dim2 == 0 and self.dim3 == 0: return True return False - self.add_skip_case(teller1, SkipReasons.TRT_NOT_SUPPORT, - "Trt does not support 1-dimensional input.") + self.add_skip_case( + teller1, + SkipReasons.TRT_NOT_SUPPORT, + "Trt does not support 1-dimensional input.", + ) def test(self): self.add_skip_trt_case() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_multiclass_nms.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_multiclass_nms.py index e52eb4d1ce9c7fe870093a353067b45df3c565d4..2d98b339d2ad32af9bdc96ab249752334f87159d 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_multiclass_nms.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_multiclass_nms.py @@ -22,7 +22,6 @@ import unittest class TrtConvertMulticlassNMSTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True @@ -39,17 +38,21 @@ class TrtConvertMulticlassNMSTest(TrtLayerAutoScanTest): min_subgraph_size=self.trt_param.min_subgraph_size, precision_mode=self.trt_param.precision, use_static=self.trt_param.use_static, - use_calib_mode=self.trt_param.use_calib_mode) - if len(self.dynamic_shape.min_input_shape - ) != 0 and self.dynamic_shape.min_input_shape.keys( - ) == self.dynamic_shape.max_input_shape.keys( - ) and self.dynamic_shape.min_input_shape.keys( - ) == self.dynamic_shape.opt_input_shape.keys(): + use_calib_mode=self.trt_param.use_calib_mode, + ) + if ( + len(self.dynamic_shape.min_input_shape) != 0 + and self.dynamic_shape.min_input_shape.keys() + == self.dynamic_shape.max_input_shape.keys() + and self.dynamic_shape.min_input_shape.keys() + == self.dynamic_shape.opt_input_shape.keys() + ): config.set_trt_dynamic_shape_info( self.dynamic_shape.min_input_shape, self.dynamic_shape.max_input_shape, self.dynamic_shape.opt_input_shape, - self.dynamic_shape.disable_trt_plugin_fp16) + self.dynamic_shape.disable_trt_plugin_fp16, + ) return config else: config = paddle_infer.Config() @@ -59,15 +62,15 @@ class TrtConvertMulticlassNMSTest(TrtLayerAutoScanTest): return config def sample_program_configs(self): - def generate_boxes(batch, num_boxes): - return np.arange(batch * num_boxes * 4, - dtype=np.float32).reshape([batch, num_boxes, 4]) + return np.arange(batch * num_boxes * 4, dtype=np.float32).reshape( + [batch, num_boxes, 4] + ) def generate_scores(batch, num_boxes, num_classes): - return np.arange(batch * num_classes * num_boxes, - dtype=np.float32).reshape( - [batch, num_classes, num_boxes]) + return np.arange( + batch * num_classes * num_boxes, dtype=np.float32 + ).reshape([batch, num_classes, num_boxes]) # return np.random.rand(batch, num_classes, num_boxes).astype(np.float32) for batch in [1, 2]: @@ -76,46 +79,55 @@ class TrtConvertMulticlassNMSTest(TrtLayerAutoScanTest): for num_boxes, num_classes in [[80, 100], [40, 200], [20, 400]]: self.num_boxes, self.num_classes = num_boxes, num_classes for score_threshold in [ - 0.01, + 0.01, ]: - ops_config = [{ - "op_type": "multiclass_nms", - "op_inputs": { - "BBoxes": ["input_bboxes"], - "Scores": ["input_scores"], - }, - "op_outputs": { - "Out": ["nms_output_boxes"], - }, - "op_attrs": { - "background_label": -1, - "score_threshold": score_threshold, - "nms_top_k": num_boxes, - "keep_top_k": num_boxes, - "nms_threshold": 0.3, - "normalized": False, - "nms_eta": nms_eta + ops_config = [ + { + "op_type": "multiclass_nms", + "op_inputs": { + "BBoxes": ["input_bboxes"], + "Scores": ["input_scores"], + }, + "op_outputs": { + "Out": ["nms_output_boxes"], + }, + "op_attrs": { + "background_label": -1, + "score_threshold": score_threshold, + "nms_top_k": num_boxes, + "keep_top_k": num_boxes, + "nms_threshold": 0.3, + "normalized": False, + "nms_eta": nms_eta, + }, } - }] + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_bboxes": - TensorConfig(data_gen=partial( - generate_boxes, batch, num_boxes)), - "input_scores": - TensorConfig( - data_gen=partial(generate_scores, batch, - num_boxes, num_classes)) + "input_bboxes": TensorConfig( + data_gen=partial( + generate_boxes, batch, num_boxes + ) + ), + "input_scores": TensorConfig( + data_gen=partial( + generate_scores, + batch, + num_boxes, + num_classes, + ) + ), }, - outputs=["nms_output_boxes"]) + outputs=["nms_output_boxes"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): # The last dim of input_bboxes should be static. self.dynamic_shape.min_input_shape = { @@ -147,39 +159,53 @@ class TrtConvertMulticlassNMSTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-2 + attrs, False + ), 1e-2 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 # self.trt_param.precision = paddle_infer.PrecisionType.Half # yield self.create_inference_config(), generate_trt_nodes_num( # attrs, True), (1e-2, 1e-2) - def assert_tensors_near(self, atol: float, rtol: float, - tensor: Dict[str, np.array], - baseline: Dict[str, np.array]): + def assert_tensors_near( + self, + atol: float, + rtol: float, + tensor: Dict[str, np.array], + baseline: Dict[str, np.array], + ): # the order of tensorrt outputs are not consistent with paddle for key, arr in tensor.items(): if key == "nms_output_boxes": basline_arr = np.array( - sorted(baseline[key].reshape((-1, 6)), - key=lambda i: [i[0], i[1]])) + sorted( + baseline[key].reshape((-1, 6)), + key=lambda i: [i[0], i[1]], + ) + ) arr = np.array( - sorted(arr.reshape((-1, 6)), key=lambda i: [i[0], i[1]])) + sorted(arr.reshape((-1, 6)), key=lambda i: [i[0], i[1]]) + ) else: basline_arr = np.array(baseline[key].reshape((-1, 1))) arr = np.array(arr.reshape((-1, 1))) self.assertTrue( basline_arr.shape == arr.shape, - "The output shapes are not equal, the baseline shape is " + - str(basline_arr.shape) + ', but got ' + str(arr.shape)) + "The output shapes are not equal, the baseline shape is " + + str(basline_arr.shape) + + ', but got ' + + str(arr.shape), + ) diff = abs(basline_arr - arr) np.testing.assert_allclose( basline_arr, @@ -187,7 +213,9 @@ class TrtConvertMulticlassNMSTest(TrtLayerAutoScanTest): rtol=rtol, atol=atol, err_msg='Output has diff, Maximum absolute error: {}'.format( - np.amax(diff))) + np.amax(diff) + ), + ) def assert_op_size(self, trt_engine_num, paddle_op_num): # tensorrt op num is not consistent with paddle diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_multiclass_nms3.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_multiclass_nms3.py index 7b743b00b3d123ba526ee7617d175162e326c433..3a04993f2cbbdefddaeddf07ac4e00f60fdd8789 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_multiclass_nms3.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_multiclass_nms3.py @@ -22,7 +22,6 @@ import unittest class TrtConvertMulticlassNMS3Test(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True @@ -39,17 +38,21 @@ class TrtConvertMulticlassNMS3Test(TrtLayerAutoScanTest): min_subgraph_size=self.trt_param.min_subgraph_size, precision_mode=self.trt_param.precision, use_static=self.trt_param.use_static, - use_calib_mode=self.trt_param.use_calib_mode) - if len(self.dynamic_shape.min_input_shape - ) != 0 and self.dynamic_shape.min_input_shape.keys( - ) == self.dynamic_shape.max_input_shape.keys( - ) and self.dynamic_shape.min_input_shape.keys( - ) == self.dynamic_shape.opt_input_shape.keys(): + use_calib_mode=self.trt_param.use_calib_mode, + ) + if ( + len(self.dynamic_shape.min_input_shape) != 0 + and self.dynamic_shape.min_input_shape.keys() + == self.dynamic_shape.max_input_shape.keys() + and self.dynamic_shape.min_input_shape.keys() + == self.dynamic_shape.opt_input_shape.keys() + ): config.set_trt_dynamic_shape_info( self.dynamic_shape.min_input_shape, self.dynamic_shape.max_input_shape, self.dynamic_shape.opt_input_shape, - self.dynamic_shape.disable_trt_plugin_fp16) + self.dynamic_shape.disable_trt_plugin_fp16, + ) return config else: config = paddle_infer.Config() @@ -59,15 +62,15 @@ class TrtConvertMulticlassNMS3Test(TrtLayerAutoScanTest): return config def sample_program_configs(self): - def generate_boxes(batch, num_boxes): - return np.arange(batch * num_boxes * 4, - dtype=np.float32).reshape([batch, num_boxes, 4]) + return np.arange(batch * num_boxes * 4, dtype=np.float32).reshape( + [batch, num_boxes, 4] + ) def generate_scores(batch, num_boxes, num_classes): - return np.arange(batch * num_classes * num_boxes, - dtype=np.float32).reshape( - [batch, num_classes, num_boxes]) + return np.arange( + batch * num_classes * num_boxes, dtype=np.float32 + ).reshape([batch, num_classes, num_boxes]) # return np.random.rand(batch, num_classes, num_boxes).astype(np.float32) for batch in [1, 2]: @@ -76,51 +79,61 @@ class TrtConvertMulticlassNMS3Test(TrtLayerAutoScanTest): for num_boxes, num_classes in [[80, 100], [40, 200], [20, 400]]: self.num_boxes, self.num_classes = num_boxes, num_classes for score_threshold in [ - 0.01, + 0.01, ]: - ops_config = [{ - "op_type": "multiclass_nms3", - "op_inputs": { - "BBoxes": ["input_bboxes"], - "Scores": ["input_scores"], - }, - "op_outputs": { - "Out": ["nms_output_boxes"], - "Index": ["nms_output_index"], - "NmsRoisNum": ["nms_output_num"] - }, - "op_attrs": { - "background_label": -1, - "score_threshold": score_threshold, - "nms_top_k": num_boxes, - "keep_top_k": num_boxes, - "nms_threshold": 0.3, - "normalized": False, - "nms_eta": nms_eta + ops_config = [ + { + "op_type": "multiclass_nms3", + "op_inputs": { + "BBoxes": ["input_bboxes"], + "Scores": ["input_scores"], + }, + "op_outputs": { + "Out": ["nms_output_boxes"], + "Index": ["nms_output_index"], + "NmsRoisNum": ["nms_output_num"], + }, + "op_attrs": { + "background_label": -1, + "score_threshold": score_threshold, + "nms_top_k": num_boxes, + "keep_top_k": num_boxes, + "nms_threshold": 0.3, + "normalized": False, + "nms_eta": nms_eta, + }, } - }] + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_bboxes": - TensorConfig(data_gen=partial( - generate_boxes, batch, num_boxes)), - "input_scores": - TensorConfig( - data_gen=partial(generate_scores, batch, - num_boxes, num_classes)) + "input_bboxes": TensorConfig( + data_gen=partial( + generate_boxes, batch, num_boxes + ) + ), + "input_scores": TensorConfig( + data_gen=partial( + generate_scores, + batch, + num_boxes, + num_classes, + ) + ), }, outputs=[ - "nms_output_boxes", "nms_output_num", - "nms_output_index" - ]) + "nms_output_boxes", + "nms_output_num", + "nms_output_index", + ], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): # The last dim of input_bboxes should be static. self.dynamic_shape.min_input_shape = { @@ -152,41 +165,55 @@ class TrtConvertMulticlassNMS3Test(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-2 + attrs, False + ), 1e-2 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 # self.trt_param.precision = paddle_infer.PrecisionType.Half # yield self.create_inference_config(), generate_trt_nodes_num( # attrs, True), (1e-2, 1e-2) - def assert_tensors_near(self, atol: float, rtol: float, - tensor: Dict[str, np.array], - baseline: Dict[str, np.array]): + def assert_tensors_near( + self, + atol: float, + rtol: float, + tensor: Dict[str, np.array], + baseline: Dict[str, np.array], + ): # the order of tensorrt outputs are not consistent with paddle for key, arr in tensor.items(): if key == "nms_output_index": continue if key == "nms_output_boxes": basline_arr = np.array( - sorted(baseline[key].reshape((-1, 6)), - key=lambda i: [i[0], i[1]])) + sorted( + baseline[key].reshape((-1, 6)), + key=lambda i: [i[0], i[1]], + ) + ) arr = np.array( - sorted(arr.reshape((-1, 6)), key=lambda i: [i[0], i[1]])) + sorted(arr.reshape((-1, 6)), key=lambda i: [i[0], i[1]]) + ) else: basline_arr = np.array(baseline[key].reshape((-1, 1))) arr = np.array(arr.reshape((-1, 1))) self.assertTrue( basline_arr.shape == arr.shape, - "The output shapes are not equal, the baseline shape is " + - str(basline_arr.shape) + ', but got ' + str(arr.shape)) + "The output shapes are not equal, the baseline shape is " + + str(basline_arr.shape) + + ', but got ' + + str(arr.shape), + ) diff = abs(basline_arr - arr) np.testing.assert_allclose( basline_arr, @@ -194,7 +221,9 @@ class TrtConvertMulticlassNMS3Test(TrtLayerAutoScanTest): rtol=rtol, atol=atol, err_msg='Output has diff, Maximum absolute error: {}'.format( - np.amax(diff))) + np.amax(diff) + ), + ) def assert_op_size(self, trt_engine_num, paddle_op_num): # tensorrt op num is not consistent with paddle diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_multihead_matmul.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_multihead_matmul.py index a580828943bb9b3d6c1564ec270871a48286848b..db02ce1e0f4e7ab069f10e0ae1cdde7d6a490bed 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_multihead_matmul.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_multihead_matmul.py @@ -22,12 +22,10 @@ from typing import List class TrtConvertMultiHeadMatmulTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(batch, dim1): return np.random.random((batch, dim1, 768)).astype(np.float32) @@ -44,103 +42,86 @@ class TrtConvertMultiHeadMatmulTest(TrtLayerAutoScanTest): self.batch = batch for reshape_shape in [[0, 0, 12, 64]]: for dim1 in [128]: - input2_shapes = [[batch, reshape_shape[2], dim1, dim1], - [batch, 1, 1, dim1]] + input2_shapes = [ + [batch, reshape_shape[2], dim1, dim1], + [batch, 1, 1, dim1], + ] for input2_shape in input2_shapes: for axis in [0]: - dics = [{ - "x_num_col_dims": 2, - "y_num_col_dims": 1 - }, { - "axis": 2 - }, { - "shape": reshape_shape - }, { - "axis": [0, 2, 1, 3] - }, { - "x_num_col_dims": 2, - "y_num_col_dims": 1 - }, { - "axis": 2 - }, { - "shape": reshape_shape - }, { - "axis": [0, 2, 1, 3] - }, { - "x_num_col_dims": 2, - "y_num_col_dims": 1 - }, { - "axis": 2 - }, { - "shape": reshape_shape - }, { - "axis": [0, 2, 1, 3] - }, { - "scale": 0.125, - "bias": 0.0, - "bias_after_scale": True - }, { - "alpha": 1.0, - "transpose_X": False, - "transpose_Y": True, - "fused_reshape_X": [], - "fused_reshape_Y": [], - "fused_transpose_X": [], - "fused_transpose_Y": [], - "fused_reshape_Out": [], - "fused_transpose_Out": [] - }, { - "axis": axis - }, { - "axis": -1, - "is_test": True - }, { - "seed": 0, - "dropout_prob": 0.10000000149011612, - "dropout_implementation": "upscale_in_train", - "fix_seed": False, - "is_test": True - }, { - "alpha": 1.0, - "transpose_X": False, - "transpose_Y": False, - "fused_reshape_X": [], - "fused_reshape_Y": [], - "fused_transpose_X": [], - "fused_transpose_Y": [], - "fused_reshape_Out": [], - "fused_transpose_Out": [] - }, { - "axis": [0, 2, 1, 3] - }, { - "shape": [0, 0, 768] - }, { - "x_num_col_dims": 2, - "y_num_col_dims": 1 - }] + dics = [ + {"x_num_col_dims": 2, "y_num_col_dims": 1}, + {"axis": 2}, + {"shape": reshape_shape}, + {"axis": [0, 2, 1, 3]}, + {"x_num_col_dims": 2, "y_num_col_dims": 1}, + {"axis": 2}, + {"shape": reshape_shape}, + {"axis": [0, 2, 1, 3]}, + {"x_num_col_dims": 2, "y_num_col_dims": 1}, + {"axis": 2}, + {"shape": reshape_shape}, + {"axis": [0, 2, 1, 3]}, + { + "scale": 0.125, + "bias": 0.0, + "bias_after_scale": True, + }, + { + "alpha": 1.0, + "transpose_X": False, + "transpose_Y": True, + "fused_reshape_X": [], + "fused_reshape_Y": [], + "fused_transpose_X": [], + "fused_transpose_Y": [], + "fused_reshape_Out": [], + "fused_transpose_Out": [], + }, + {"axis": axis}, + {"axis": -1, "is_test": True}, + { + "seed": 0, + "dropout_prob": 0.10000000149011612, + "dropout_implementation": "upscale_in_train", + "fix_seed": False, + "is_test": True, + }, + { + "alpha": 1.0, + "transpose_X": False, + "transpose_Y": False, + "fused_reshape_X": [], + "fused_reshape_Y": [], + "fused_transpose_X": [], + "fused_transpose_Y": [], + "fused_reshape_Out": [], + "fused_transpose_Out": [], + }, + {"axis": [0, 2, 1, 3]}, + {"shape": [0, 0, 768]}, + {"x_num_col_dims": 2, "y_num_col_dims": 1}, + ] ops_config = [ { "op_type": "mul", "op_inputs": { "X": ["input_data1"], - "Y": ["mul1_weight"] + "Y": ["mul1_weight"], }, - "op_outputs": { - "Out": ["mul1_output"] - }, - "op_attrs": dics[0] + "op_outputs": {"Out": ["mul1_output"]}, + "op_attrs": dics[0], }, { "op_type": "elementwise_add", "op_inputs": { "X": ["mul1_output"], - "Y": ["elementwise_add1_weight"] + "Y": ["elementwise_add1_weight"], }, "op_outputs": { "Out": ["elementwise_add1_output"] }, - "op_attrs": dics[1] + "op_attrs": dics[1], }, { "op_type": "reshape2", @@ -149,42 +130,38 @@ class TrtConvertMultiHeadMatmulTest(TrtLayerAutoScanTest): }, "op_outputs": { "Out": ["reshape21_output"], - "XShape": ["reshape21_output_xshape"] + "XShape": ["reshape21_output_xshape"], }, - "op_attrs": dics[2] + "op_attrs": dics[2], }, { "op_type": "transpose2", - "op_inputs": { - "X": ["reshape21_output"] - }, + "op_inputs": {"X": ["reshape21_output"]}, "op_outputs": { "Out": ["transpose21_output"], - "XShape": ["transpose21_output_xshape"] + "XShape": ["transpose21_output_xshape"], }, - "op_attrs": dics[3] + "op_attrs": dics[3], }, { "op_type": "mul", "op_inputs": { "X": ["input_data1"], - "Y": ["mul2_weight"] + "Y": ["mul2_weight"], }, - "op_outputs": { - "Out": ["mul2_output"] - }, - "op_attrs": dics[4] + "op_outputs": {"Out": ["mul2_output"]}, + "op_attrs": dics[4], }, { "op_type": "elementwise_add", "op_inputs": { "X": ["mul2_output"], - "Y": ["elementwise_add2_weight"] + "Y": ["elementwise_add2_weight"], }, "op_outputs": { "Out": ["elementwise_add2_output"] }, - "op_attrs": dics[5] + "op_attrs": dics[5], }, { "op_type": "reshape2", @@ -193,42 +170,38 @@ class TrtConvertMultiHeadMatmulTest(TrtLayerAutoScanTest): }, "op_outputs": { "Out": ["reshape22_output"], - "XShape": ["reshape22_output_xshape"] + "XShape": ["reshape22_output_xshape"], }, - "op_attrs": dics[6] + "op_attrs": dics[6], }, { "op_type": "transpose2", - "op_inputs": { - "X": ["reshape22_output"] - }, + "op_inputs": {"X": ["reshape22_output"]}, "op_outputs": { "Out": ["transpose22_output"], - "XShape": ["transpose22_output_xshape"] + "XShape": ["transpose22_output_xshape"], }, - "op_attrs": dics[7] + "op_attrs": dics[7], }, { "op_type": "mul", "op_inputs": { "X": ["input_data1"], - "Y": ["mul3_weight"] + "Y": ["mul3_weight"], }, - "op_outputs": { - "Out": ["mul3_output"] - }, - "op_attrs": dics[8] + "op_outputs": {"Out": ["mul3_output"]}, + "op_attrs": dics[8], }, { "op_type": "elementwise_add", "op_inputs": { "X": ["mul3_output"], - "Y": ["elementwise_add3_weight"] + "Y": ["elementwise_add3_weight"], }, "op_outputs": { "Out": ["elementwise_add3_output"] }, - "op_attrs": dics[9] + "op_attrs": dics[9], }, { "op_type": "reshape2", @@ -237,30 +210,26 @@ class TrtConvertMultiHeadMatmulTest(TrtLayerAutoScanTest): }, "op_outputs": { "Out": ["reshape23_output"], - "XShape": ["reshape23_output_xshape"] + "XShape": ["reshape23_output_xshape"], }, - "op_attrs": dics[10] + "op_attrs": dics[10], }, { "op_type": "transpose2", - "op_inputs": { - "X": ["reshape23_output"] - }, + "op_inputs": {"X": ["reshape23_output"]}, "op_outputs": { "Out": ["transpose23_output"], - "XShape": ["transpose23_output_xshape"] + "XShape": ["transpose23_output_xshape"], }, - "op_attrs": dics[11] + "op_attrs": dics[11], }, { "op_type": "scale", "op_inputs": { "X": ["transpose23_output"], }, - "op_outputs": { - "Out": ["scale_output"] - }, - "op_attrs": dics[12] + "op_outputs": {"Out": ["scale_output"]}, + "op_attrs": dics[12], }, { "op_type": "matmul", @@ -268,41 +237,35 @@ class TrtConvertMultiHeadMatmulTest(TrtLayerAutoScanTest): "X": ["scale_output"], "Y": ["transpose22_output"], }, - "op_outputs": { - "Out": ["matmul1_output"] - }, - "op_attrs": dics[13] + "op_outputs": {"Out": ["matmul1_output"]}, + "op_attrs": dics[13], }, { "op_type": "elementwise_add", "op_inputs": { "X": ["matmul1_output"], - "Y": ["input_data2"] + "Y": ["input_data2"], }, "op_outputs": { "Out": ["elementwise_add4_output"] }, - "op_attrs": dics[14] + "op_attrs": dics[14], }, { "op_type": "softmax", "op_inputs": { "X": ["elementwise_add4_output"] }, - "op_outputs": { - "Out": ["softmax_output"] - }, - "op_attrs": dics[15] + "op_outputs": {"Out": ["softmax_output"]}, + "op_attrs": dics[15], }, { "op_type": "dropout", "op_inputs": { "X": ["softmax_output"], }, - "op_outputs": { - "Out": ["dropout3_output"] - }, - "op_attrs": dics[16] + "op_outputs": {"Out": ["dropout3_output"]}, + "op_attrs": dics[16], }, { "op_type": "matmul", @@ -310,32 +273,26 @@ class TrtConvertMultiHeadMatmulTest(TrtLayerAutoScanTest): "X": ["dropout3_output"], "Y": ["transpose21_output"], }, - "op_outputs": { - "Out": ["matmul2_output"] - }, - "op_attrs": dics[17] + "op_outputs": {"Out": ["matmul2_output"]}, + "op_attrs": dics[17], }, { "op_type": "transpose2", - "op_inputs": { - "X": ["matmul2_output"] - }, + "op_inputs": {"X": ["matmul2_output"]}, "op_outputs": { "Out": ["transpose24_output"], - "XShape": ["transpose24_output_xshape"] + "XShape": ["transpose24_output_xshape"], }, - "op_attrs": dics[18] + "op_attrs": dics[18], }, { "op_type": "reshape2", - "op_inputs": { - "X": ["transpose24_output"] - }, + "op_inputs": {"X": ["transpose24_output"]}, "op_outputs": { "Out": ["reshape24_output"], - "XShape": ["reshape24_output_xshape"] + "XShape": ["reshape24_output_xshape"], }, - "op_attrs": dics[19] + "op_attrs": dics[19], }, # In order to fuse ops with # multihead_matmul_fuse_pass_v2, the last op @@ -344,72 +301,75 @@ class TrtConvertMultiHeadMatmulTest(TrtLayerAutoScanTest): "op_type": "mul", "op_inputs": { "X": ["reshape24_output"], - "Y": ["mul4_weight"] + "Y": ["mul4_weight"], }, - "op_outputs": { - "Out": ["mul4_output"] - }, - "op_attrs": dics[20] - } + "op_outputs": {"Out": ["mul4_output"]}, + "op_attrs": dics[20], + }, ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={ - "mul1_weight": - TensorConfig( - data_gen=partial(generate_weight1)), - "mul2_weight": - TensorConfig( - data_gen=partial(generate_weight1)), - "mul3_weight": - TensorConfig( - data_gen=partial(generate_weight1)), - "mul4_weight": - TensorConfig( - data_gen=partial(generate_weight1)), - "elementwise_add1_weight": - TensorConfig( - data_gen=partial(generate_weight2)), - "elementwise_add2_weight": - TensorConfig( - data_gen=partial(generate_weight2)), - "elementwise_add3_weight": - TensorConfig( - data_gen=partial(generate_weight2)), + "mul1_weight": TensorConfig( + data_gen=partial(generate_weight1) + ), + "mul2_weight": TensorConfig( + data_gen=partial(generate_weight1) + ), + "mul3_weight": TensorConfig( + data_gen=partial(generate_weight1) + ), + "mul4_weight": TensorConfig( + data_gen=partial(generate_weight1) + ), + "elementwise_add1_weight": TensorConfig( + data_gen=partial(generate_weight2) + ), + "elementwise_add2_weight": TensorConfig( + data_gen=partial(generate_weight2) + ), + "elementwise_add3_weight": TensorConfig( + data_gen=partial(generate_weight2) + ), }, inputs={ - "input_data1": - TensorConfig(data_gen=partial( - generate_input1, batch, dim1)), - "input_data2": - TensorConfig(data_gen=partial( - generate_input2, input2_shape)), + "input_data1": TensorConfig( + data_gen=partial( + generate_input1, batch, dim1 + ) + ), + "input_data2": TensorConfig( + data_gen=partial( + generate_input2, input2_shape + ) + ), }, - outputs=["mul4_output"]) + outputs=["mul4_output"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): # The last dim of input1 and input2 should be static. self.dynamic_shape.min_input_shape = { "input_data1": [1, 8, 768], "input_data2": [1, 1, 1, 128], - "reshape24_output": [1, 128, 768] + "reshape24_output": [1, 128, 768], } self.dynamic_shape.max_input_shape = { "input_data1": [16, 512, 768], "input_data2": [16, 256, 512, 128], - "reshape24_output": [1, 128, 768] + "reshape24_output": [1, 128, 768], } self.dynamic_shape.opt_input_shape = { "input_data1": [8, 128, 768], "input_data2": [8, 32, 64, 128], - "reshape24_output": [1, 128, 768] + "reshape24_output": [1, 128, 768], } def clear_dynamic_shape(): @@ -438,25 +398,30 @@ class TrtConvertMultiHeadMatmulTest(TrtLayerAutoScanTest): yield self.create_inference_config(), (1, 3), (1e-3, 1e-3) def add_skip_trt_case(self): - def teller1(program_config, predictor_config): if self.trt_param.precision == paddle_infer.PrecisionType.Half: return True return False self.add_skip_case( - teller1, SkipReasons.TRT_NOT_IMPLEMENTED, - "The output has diff between gpu and trt in fp16 mode.") + teller1, + SkipReasons.TRT_NOT_IMPLEMENTED, + "The output has diff between gpu and trt in fp16 mode.", + ) def teller2(program_config, predictor_config): - if self.trt_param.precision == paddle_infer.PrecisionType.Float32 and len( - self.dynamic_shape.min_input_shape) != 0 and self.batch > 2: + if ( + self.trt_param.precision == paddle_infer.PrecisionType.Float32 + and len(self.dynamic_shape.min_input_shape) != 0 + and self.batch > 2 + ): return True return False self.add_skip_case( - teller2, SkipReasons.TRT_NOT_IMPLEMENTED, - "The output has diff between gpu and trt when dynamic fp32 mode and batch size > 2." + teller2, + SkipReasons.TRT_NOT_IMPLEMENTED, + "The output has diff between gpu and trt when dynamic fp32 mode and batch size > 2.", ) def teller3(program_config, predictor_config): @@ -465,8 +430,10 @@ class TrtConvertMultiHeadMatmulTest(TrtLayerAutoScanTest): return False self.add_skip_case( - teller3, SkipReasons.TRT_NOT_IMPLEMENTED, - "The output has diff between gpu and trt in int8 mode.") + teller3, + SkipReasons.TRT_NOT_IMPLEMENTED, + "The output has diff between gpu and trt in int8 mode.", + ) def test(self): self.add_skip_trt_case() @@ -474,9 +441,7 @@ class TrtConvertMultiHeadMatmulTest(TrtLayerAutoScanTest): class TrtConvertMultiHeadMatmulTestInt8(TrtConvertMultiHeadMatmulTest): - def sample_program_configs(self): - def generate_input1(batch, dim1): return np.random.random((batch, dim1, 768)).astype(np.float32) @@ -493,112 +458,110 @@ class TrtConvertMultiHeadMatmulTestInt8(TrtConvertMultiHeadMatmulTest): self.batch = batch for reshape_shape in [[0, 0, 12, 64]]: for dim1 in [128]: - input2_shapes = [[batch, reshape_shape[2], dim1, dim1], - [batch, 1, 1, dim1]] + input2_shapes = [ + [batch, reshape_shape[2], dim1, dim1], + [batch, 1, 1, dim1], + ] for input2_shape in input2_shapes: for axis in [0]: - dics = [{ - "x_num_col_dims": 2, - "y_num_col_dims": 1, - "enable_int8": True, - "Input_scale": 1.0, - }, { - "axis": 2, - "out_threshold": 1.0, - }, { - "shape": reshape_shape - }, { - "axis": [0, 2, 1, 3] - }, { - "x_num_col_dims": 2, - "y_num_col_dims": 1, - "enable_int8": True, - "Input_scale": 1.0, - }, { - "axis": 2, - "out_threshold": 1.0, - }, { - "shape": reshape_shape - }, { - "axis": [0, 2, 1, 3] - }, { - "x_num_col_dims": 2, - "y_num_col_dims": 1, - "enable_int8": True, - "Input_scale": 1.0, - }, { - "axis": 2, - "out_threshold": 1.0, - }, { - "shape": reshape_shape - }, { - "axis": [0, 2, 1, 3] - }, { - "scale": 0.125, - "bias": 0.0, - "bias_after_scale": True - }, { - "alpha": 1.0, - "transpose_X": False, - "transpose_Y": True, - "fused_reshape_X": [], - "fused_reshape_Y": [], - "fused_transpose_X": [], - "fused_transpose_Y": [], - "fused_reshape_Out": [], - "fused_transpose_Out": [] - }, { - "axis": axis - }, { - "axis": -1, - "is_test": True - }, { - "seed": 0, - "dropout_prob": 0.10000000149011612, - "dropout_implementation": "upscale_in_train", - "fix_seed": False, - "is_test": True - }, { - "alpha": 1.0, - "transpose_X": False, - "transpose_Y": False, - "fused_reshape_X": [], - "fused_reshape_Y": [], - "fused_transpose_X": [], - "fused_transpose_Y": [], - "fused_reshape_Out": [], - "fused_transpose_Out": [] - }, { - "axis": [0, 2, 1, 3] - }, { - "shape": [0, 0, 768] - }, { - "x_num_col_dims": 2, - "y_num_col_dims": 1 - }] + dics = [ + { + "x_num_col_dims": 2, + "y_num_col_dims": 1, + "enable_int8": True, + "Input_scale": 1.0, + }, + { + "axis": 2, + "out_threshold": 1.0, + }, + {"shape": reshape_shape}, + {"axis": [0, 2, 1, 3]}, + { + "x_num_col_dims": 2, + "y_num_col_dims": 1, + "enable_int8": True, + "Input_scale": 1.0, + }, + { + "axis": 2, + "out_threshold": 1.0, + }, + {"shape": reshape_shape}, + {"axis": [0, 2, 1, 3]}, + { + "x_num_col_dims": 2, + "y_num_col_dims": 1, + "enable_int8": True, + "Input_scale": 1.0, + }, + { + "axis": 2, + "out_threshold": 1.0, + }, + {"shape": reshape_shape}, + {"axis": [0, 2, 1, 3]}, + { + "scale": 0.125, + "bias": 0.0, + "bias_after_scale": True, + }, + { + "alpha": 1.0, + "transpose_X": False, + "transpose_Y": True, + "fused_reshape_X": [], + "fused_reshape_Y": [], + "fused_transpose_X": [], + "fused_transpose_Y": [], + "fused_reshape_Out": [], + "fused_transpose_Out": [], + }, + {"axis": axis}, + {"axis": -1, "is_test": True}, + { + "seed": 0, + "dropout_prob": 0.10000000149011612, + "dropout_implementation": "upscale_in_train", + "fix_seed": False, + "is_test": True, + }, + { + "alpha": 1.0, + "transpose_X": False, + "transpose_Y": False, + "fused_reshape_X": [], + "fused_reshape_Y": [], + "fused_transpose_X": [], + "fused_transpose_Y": [], + "fused_reshape_Out": [], + "fused_transpose_Out": [], + }, + {"axis": [0, 2, 1, 3]}, + {"shape": [0, 0, 768]}, + {"x_num_col_dims": 2, "y_num_col_dims": 1}, + ] ops_config = [ { "op_type": "mul", "op_inputs": { "X": ["input_data1"], - "Y": ["mul1_weight"] - }, - "op_outputs": { - "Out": ["mul1_output"] + "Y": ["mul1_weight"], }, - "op_attrs": dics[0] + "op_outputs": {"Out": ["mul1_output"]}, + "op_attrs": dics[0], }, { "op_type": "elementwise_add", "op_inputs": { "X": ["mul1_output"], - "Y": ["elementwise_add1_weight"] + "Y": ["elementwise_add1_weight"], }, "op_outputs": { "Out": ["elementwise_add1_output"] }, - "op_attrs": dics[1] + "op_attrs": dics[1], }, { "op_type": "reshape2", @@ -607,42 +570,38 @@ class TrtConvertMultiHeadMatmulTestInt8(TrtConvertMultiHeadMatmulTest): }, "op_outputs": { "Out": ["reshape21_output"], - "XShape": ["reshape21_output_xshape"] + "XShape": ["reshape21_output_xshape"], }, - "op_attrs": dics[2] + "op_attrs": dics[2], }, { "op_type": "transpose2", - "op_inputs": { - "X": ["reshape21_output"] - }, + "op_inputs": {"X": ["reshape21_output"]}, "op_outputs": { "Out": ["transpose21_output"], - "XShape": ["transpose21_output_xshape"] + "XShape": ["transpose21_output_xshape"], }, - "op_attrs": dics[3] + "op_attrs": dics[3], }, { "op_type": "mul", "op_inputs": { "X": ["input_data1"], - "Y": ["mul2_weight"] - }, - "op_outputs": { - "Out": ["mul2_output"] + "Y": ["mul2_weight"], }, - "op_attrs": dics[4] + "op_outputs": {"Out": ["mul2_output"]}, + "op_attrs": dics[4], }, { "op_type": "elementwise_add", "op_inputs": { "X": ["mul2_output"], - "Y": ["elementwise_add2_weight"] + "Y": ["elementwise_add2_weight"], }, "op_outputs": { "Out": ["elementwise_add2_output"] }, - "op_attrs": dics[5] + "op_attrs": dics[5], }, { "op_type": "reshape2", @@ -651,42 +610,38 @@ class TrtConvertMultiHeadMatmulTestInt8(TrtConvertMultiHeadMatmulTest): }, "op_outputs": { "Out": ["reshape22_output"], - "XShape": ["reshape22_output_xshape"] + "XShape": ["reshape22_output_xshape"], }, - "op_attrs": dics[6] + "op_attrs": dics[6], }, { "op_type": "transpose2", - "op_inputs": { - "X": ["reshape22_output"] - }, + "op_inputs": {"X": ["reshape22_output"]}, "op_outputs": { "Out": ["transpose22_output"], - "XShape": ["transpose22_output_xshape"] + "XShape": ["transpose22_output_xshape"], }, - "op_attrs": dics[7] + "op_attrs": dics[7], }, { "op_type": "mul", "op_inputs": { "X": ["input_data1"], - "Y": ["mul3_weight"] - }, - "op_outputs": { - "Out": ["mul3_output"] + "Y": ["mul3_weight"], }, - "op_attrs": dics[8] + "op_outputs": {"Out": ["mul3_output"]}, + "op_attrs": dics[8], }, { "op_type": "elementwise_add", "op_inputs": { "X": ["mul3_output"], - "Y": ["elementwise_add3_weight"] + "Y": ["elementwise_add3_weight"], }, "op_outputs": { "Out": ["elementwise_add3_output"] }, - "op_attrs": dics[9] + "op_attrs": dics[9], }, { "op_type": "reshape2", @@ -695,30 +650,26 @@ class TrtConvertMultiHeadMatmulTestInt8(TrtConvertMultiHeadMatmulTest): }, "op_outputs": { "Out": ["reshape23_output"], - "XShape": ["reshape23_output_xshape"] + "XShape": ["reshape23_output_xshape"], }, - "op_attrs": dics[10] + "op_attrs": dics[10], }, { "op_type": "transpose2", - "op_inputs": { - "X": ["reshape23_output"] - }, + "op_inputs": {"X": ["reshape23_output"]}, "op_outputs": { "Out": ["transpose23_output"], - "XShape": ["transpose23_output_xshape"] + "XShape": ["transpose23_output_xshape"], }, - "op_attrs": dics[11] + "op_attrs": dics[11], }, { "op_type": "scale", "op_inputs": { "X": ["transpose23_output"], }, - "op_outputs": { - "Out": ["scale_output"] - }, - "op_attrs": dics[12] + "op_outputs": {"Out": ["scale_output"]}, + "op_attrs": dics[12], }, { "op_type": "matmul", @@ -726,41 +677,35 @@ class TrtConvertMultiHeadMatmulTestInt8(TrtConvertMultiHeadMatmulTest): "X": ["scale_output"], "Y": ["transpose22_output"], }, - "op_outputs": { - "Out": ["matmul1_output"] - }, - "op_attrs": dics[13] + "op_outputs": {"Out": ["matmul1_output"]}, + "op_attrs": dics[13], }, { "op_type": "elementwise_add", "op_inputs": { "X": ["matmul1_output"], - "Y": ["input_data2"] + "Y": ["input_data2"], }, "op_outputs": { "Out": ["elementwise_add4_output"] }, - "op_attrs": dics[14] + "op_attrs": dics[14], }, { "op_type": "softmax", "op_inputs": { "X": ["elementwise_add4_output"] }, - "op_outputs": { - "Out": ["softmax_output"] - }, - "op_attrs": dics[15] + "op_outputs": {"Out": ["softmax_output"]}, + "op_attrs": dics[15], }, { "op_type": "dropout", "op_inputs": { "X": ["softmax_output"], }, - "op_outputs": { - "Out": ["dropout3_output"] - }, - "op_attrs": dics[16] + "op_outputs": {"Out": ["dropout3_output"]}, + "op_attrs": dics[16], }, { "op_type": "matmul", @@ -768,32 +713,26 @@ class TrtConvertMultiHeadMatmulTestInt8(TrtConvertMultiHeadMatmulTest): "X": ["dropout3_output"], "Y": ["transpose21_output"], }, - "op_outputs": { - "Out": ["matmul2_output"] - }, - "op_attrs": dics[17] + "op_outputs": {"Out": ["matmul2_output"]}, + "op_attrs": dics[17], }, { "op_type": "transpose2", - "op_inputs": { - "X": ["matmul2_output"] - }, + "op_inputs": {"X": ["matmul2_output"]}, "op_outputs": { "Out": ["transpose24_output"], - "XShape": ["transpose24_output_xshape"] + "XShape": ["transpose24_output_xshape"], }, - "op_attrs": dics[18] + "op_attrs": dics[18], }, { "op_type": "reshape2", - "op_inputs": { - "X": ["transpose24_output"] - }, + "op_inputs": {"X": ["transpose24_output"]}, "op_outputs": { "Out": ["reshape24_output"], - "XShape": ["reshape24_output_xshape"] + "XShape": ["reshape24_output_xshape"], }, - "op_attrs": dics[19] + "op_attrs": dics[19], }, # In order to fuse ops with # multihead_matmul_fuse_pass_v2, the last op @@ -802,61 +741,62 @@ class TrtConvertMultiHeadMatmulTestInt8(TrtConvertMultiHeadMatmulTest): "op_type": "mul", "op_inputs": { "X": ["reshape24_output"], - "Y": ["mul4_weight"] - }, - "op_outputs": { - "Out": ["mul4_output"] + "Y": ["mul4_weight"], }, - "op_attrs": dics[20] - } + "op_outputs": {"Out": ["mul4_output"]}, + "op_attrs": dics[20], + }, ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={ - "mul1_weight": - TensorConfig( - data_gen=partial(generate_weight1)), - "mul2_weight": - TensorConfig( - data_gen=partial(generate_weight1)), - "mul3_weight": - TensorConfig( - data_gen=partial(generate_weight1)), - "mul4_weight": - TensorConfig( - data_gen=partial(generate_weight1)), - "elementwise_add1_weight": - TensorConfig( - data_gen=partial(generate_weight2)), - "elementwise_add2_weight": - TensorConfig( - data_gen=partial(generate_weight2)), - "elementwise_add3_weight": - TensorConfig( - data_gen=partial(generate_weight2)), + "mul1_weight": TensorConfig( + data_gen=partial(generate_weight1) + ), + "mul2_weight": TensorConfig( + data_gen=partial(generate_weight1) + ), + "mul3_weight": TensorConfig( + data_gen=partial(generate_weight1) + ), + "mul4_weight": TensorConfig( + data_gen=partial(generate_weight1) + ), + "elementwise_add1_weight": TensorConfig( + data_gen=partial(generate_weight2) + ), + "elementwise_add2_weight": TensorConfig( + data_gen=partial(generate_weight2) + ), + "elementwise_add3_weight": TensorConfig( + data_gen=partial(generate_weight2) + ), }, inputs={ - "input_data1": - TensorConfig(data_gen=partial( - generate_input1, batch, dim1)), - "input_data2": - TensorConfig(data_gen=partial( - generate_input2, input2_shape)), + "input_data1": TensorConfig( + data_gen=partial( + generate_input1, batch, dim1 + ) + ), + "input_data2": TensorConfig( + data_gen=partial( + generate_input2, input2_shape + ) + ), }, - outputs=["mul4_output"]) + outputs=["mul4_output"], + ) yield program_config class TrtConvertVitToMultiHeadMatmulTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(batch, length): return np.zeros((batch, length, 768), dtype=np.float32) @@ -870,216 +810,190 @@ class TrtConvertVitToMultiHeadMatmulTest(TrtLayerAutoScanTest): self.batch = batch for length in [64, 384]: self.length = length - ops_config = [{ - "op_type": "matmul_v2", - "op_inputs": { - "X": ["input_data1"], - "Y": ["matmul1_weight"] - }, - "op_outputs": { - "Out": ["matmul1_output"] - }, - "op_attrs": { - "trans_x": False, - "trans_y": False - } - }, { - "op_type": "elementwise_add", - "op_inputs": { - "X": ["matmul1_output"], - "Y": ["elementwise_add1_weight"] - }, - "op_outputs": { - "Out": ["elementwise_add1_output"] + ops_config = [ + { + "op_type": "matmul_v2", + "op_inputs": { + "X": ["input_data1"], + "Y": ["matmul1_weight"], + }, + "op_outputs": {"Out": ["matmul1_output"]}, + "op_attrs": {"trans_x": False, "trans_y": False}, }, - "op_attrs": { - "Scale_out": 1.0, - "Scale_x": 1.0, - "Scale_y": 1.0, - "axis": 2 - } - }, { - "op_type": "reshape2", - "op_inputs": { - "X": ["elementwise_add1_output"], + { + "op_type": "elementwise_add", + "op_inputs": { + "X": ["matmul1_output"], + "Y": ["elementwise_add1_weight"], + }, + "op_outputs": {"Out": ["elementwise_add1_output"]}, + "op_attrs": { + "Scale_out": 1.0, + "Scale_x": 1.0, + "Scale_y": 1.0, + "axis": 2, + }, }, - "op_outputs": { - "Out": ["reshape1_output"], - "XShape": ["reshape1_output_xshape"] + { + "op_type": "reshape2", + "op_inputs": { + "X": ["elementwise_add1_output"], + }, + "op_outputs": { + "Out": ["reshape1_output"], + "XShape": ["reshape1_output_xshape"], + }, + "op_attrs": {"shape": [-1, self.length, 3, 12, 64]}, }, - "op_attrs": { - "shape": [-1, self.length, 3, 12, 64] - } - }, { - "op_type": "transpose2", - "op_inputs": { - "X": ["reshape1_output"] + { + "op_type": "transpose2", + "op_inputs": {"X": ["reshape1_output"]}, + "op_outputs": { + "Out": ["transpose1_output"], + "XShape": ["transpose1_output_xshape"], + }, + "op_attrs": { + "axis": [2, 0, 3, 1, 4], + "data_format": "AnyLayout", + }, }, - "op_outputs": { - "Out": ["transpose1_output"], - "XShape": ["transpose1_output_xshape"] + { + "op_type": "slice", + "op_inputs": { + "Input": ["transpose1_output"], + }, + "op_outputs": {"Out": ["slice1_output"]}, + "op_attrs": { + "axes": [0], + "starts": [0], + "ends": [1], + "decrease_axis": [0], + "infer_flags": [1], + }, }, - "op_attrs": { - "axis": [2, 0, 3, 1, 4], - "data_format": "AnyLayout" - } - }, { - "op_type": "slice", - "op_inputs": { - "Input": ["transpose1_output"], + { + "op_type": "slice", + "op_inputs": { + "Input": ["transpose1_output"], + }, + "op_outputs": {"Out": ["slice2_output"]}, + "op_attrs": { + "axes": [0], + "starts": [1], + "ends": [2], + "decrease_axis": [0], + "infer_flags": [1], + }, }, - "op_outputs": { - "Out": ["slice1_output"] + { + "op_type": "slice", + "op_inputs": { + "Input": ["transpose1_output"], + }, + "op_outputs": {"Out": ["slice3_output"]}, + "op_attrs": { + "axes": [0], + "starts": [2], + "ends": [3], + "decrease_axis": [0], + "infer_flags": [1], + }, }, - "op_attrs": { - "axes": [0], - "starts": [0], - "ends": [1], - "decrease_axis": [0], - "infer_flags": [1] - } - }, { - "op_type": "slice", - "op_inputs": { - "Input": ["transpose1_output"], + { + "op_type": "transpose2", + "op_inputs": {"X": ["slice2_output"]}, + "op_outputs": { + "Out": ["transpose2_output"], + }, + "op_attrs": { + "axis": [0, 1, 3, 2], + "data_format": "AnyLayout", + }, }, - "op_outputs": { - "Out": ["slice2_output"] + { + "op_type": "matmul_v2", + "op_inputs": { + "X": ["slice1_output"], + "Y": ["transpose2_output"], + }, + "op_outputs": {"Out": ["matmul2_output"]}, + "op_attrs": {"trans_x": False, "trans_y": False}, }, - "op_attrs": { - "axes": [0], - "starts": [1], - "ends": [2], - "decrease_axis": [0], - "infer_flags": [1] - } - }, { - "op_type": "slice", - "op_inputs": { - "Input": ["transpose1_output"], + { + "op_type": "scale", + "op_inputs": { + "X": ["matmul2_output"], + }, + "op_outputs": {"Out": ["scale_output"]}, + "op_attrs": { + "scale": 0.125, + "bias": 0.0, + "bias_after_scale": True, + }, }, - "op_outputs": { - "Out": ["slice3_output"] + { + "op_type": "softmax", + "op_inputs": {"X": ["scale_output"]}, + "op_outputs": {"Out": ["softmax_output"]}, + "op_attrs": {"axis": -1, "data_format": "AnyLayout"}, }, - "op_attrs": { - "axes": [0], - "starts": [2], - "ends": [3], - "decrease_axis": [0], - "infer_flags": [1] - } - }, { - "op_type": "transpose2", - "op_inputs": { - "X": ["slice2_output"] + { + "op_type": "matmul_v2", + "op_inputs": { + "X": ["softmax_output"], + "Y": ["slice3_output"], + }, + "op_outputs": {"Out": ["matmul3_output"]}, + "op_attrs": {"trans_x": False, "trans_y": False}, }, - "op_outputs": { - "Out": ["transpose2_output"], + { + "op_type": "transpose2", + "op_inputs": {"X": ["matmul3_output"]}, + "op_outputs": { + "Out": ["transpose3_output"], + "XShape": ["transpose3_output_xshape"], + }, + "op_attrs": { + "axis": [0, 2, 1, 3], + "data_format": "AnyLayout", + }, }, - "op_attrs": { - "axis": [0, 1, 3, 2], - "data_format": "AnyLayout" - } - }, { - "op_type": "matmul_v2", - "op_inputs": { - "X": ["slice1_output"], - "Y": ["transpose2_output"] + { + "op_type": "reshape2", + "op_inputs": {"X": ["transpose3_output"]}, + "op_outputs": { + "Out": ["reshape2_output"], + "XShape": ["reshape2_output_xshape"], + }, + "op_attrs": {"shape": [-1, self.length, 768]}, }, - "op_outputs": { - "Out": ["matmul2_output"] - }, - "op_attrs": { - "trans_x": False, - "trans_y": False - } - }, { - "op_type": "scale", - "op_inputs": { - "X": ["matmul2_output"], - }, - "op_outputs": { - "Out": ["scale_output"] - }, - "op_attrs": { - "scale": 0.125, - "bias": 0.0, - "bias_after_scale": True - } - }, { - "op_type": "softmax", - "op_inputs": { - "X": ["scale_output"] - }, - "op_outputs": { - "Out": ["softmax_output"] - }, - "op_attrs": { - "axis": -1, - "data_format": "AnyLayout" - } - }, { - "op_type": "matmul_v2", - "op_inputs": { - "X": ["softmax_output"], - "Y": ["slice3_output"] - }, - "op_outputs": { - "Out": ["matmul3_output"] - }, - "op_attrs": { - "trans_x": False, - "trans_y": False - } - }, { - "op_type": "transpose2", - "op_inputs": { - "X": ["matmul3_output"] - }, - "op_outputs": { - "Out": ["transpose3_output"], - "XShape": ["transpose3_output_xshape"] - }, - "op_attrs": { - "axis": [0, 2, 1, 3], - "data_format": "AnyLayout" - } - }, { - "op_type": "reshape2", - "op_inputs": { - "X": ["transpose3_output"] - }, - "op_outputs": { - "Out": ["reshape2_output"], - "XShape": ["reshape2_output_xshape"] - }, - "op_attrs": { - "shape": [-1, self.length, 768] - } - }] + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={ - "matmul1_weight": - TensorConfig(data_gen=partial(generate_weight1)), - "elementwise_add1_weight": - TensorConfig(data_gen=partial(generate_weight2)) + "matmul1_weight": TensorConfig( + data_gen=partial(generate_weight1) + ), + "elementwise_add1_weight": TensorConfig( + data_gen=partial(generate_weight2) + ), }, inputs={ - "input_data1": - TensorConfig( - data_gen=partial(generate_input1, batch, length)) + "input_data1": TensorConfig( + data_gen=partial(generate_input1, batch, length) + ) }, - outputs=["reshape2_output"]) + outputs=["reshape2_output"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): # The last dim of input1 and input2 should be static. self.dynamic_shape.min_input_shape = { @@ -1111,11 +1025,15 @@ class TrtConvertVitToMultiHeadMatmulTest(TrtLayerAutoScanTest): generate_dynamic_shape(attrs) self.trt_param.workspace_size = 2013265920 self.trt_param.precision = paddle_infer.PrecisionType.Half - yield self.create_inference_config(), generate_trt_nodes_num(), (1e-3, - 1e-3) + yield self.create_inference_config(), generate_trt_nodes_num(), ( + 1e-3, + 1e-3, + ) self.trt_param.precision = paddle_infer.PrecisionType.Float32 - yield self.create_inference_config(), generate_trt_nodes_num(), (1e-5, - 1e-5) + yield self.create_inference_config(), generate_trt_nodes_num(), ( + 1e-5, + 1e-5, + ) def add_skip_trt_case(self): pass diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_nearest_interp.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_nearest_interp.py index 71893db5e5f18b51fcc566a5fedb49c1b664a325..1df42024992cc5ca60c3f344311865d2c0b1e331 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_nearest_interp.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_nearest_interp.py @@ -22,7 +22,6 @@ import unittest class TrtConvertNearestInterpTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: inputs = program_config.inputs weights = program_config.weights @@ -30,8 +29,9 @@ class TrtConvertNearestInterpTest(TrtLayerAutoScanTest): program_config.ops[i].attrs for i in range(len(program_config.ops)) ] - if attrs[0]['scale'] <= 0 and (attrs[0]['out_h'] <= 0 - or attrs[0]['out_w'] <= 0): + if attrs[0]['scale'] <= 0 and ( + attrs[0]['out_h'] <= 0 or attrs[0]['out_w'] <= 0 + ): return False if (attrs[0]['out_h'] <= 0) ^ (attrs[0]['out_w'] <= 0): return False @@ -39,7 +39,6 @@ class TrtConvertNearestInterpTest(TrtLayerAutoScanTest): return True def sample_program_configs(self): - def generate_input1(attrs: List[Dict[str, Any]]): return np.ones([1, 3, 64, 64]).astype(np.float32) @@ -49,42 +48,49 @@ class TrtConvertNearestInterpTest(TrtLayerAutoScanTest): for scale in [2.0, -1.0, 0.0]: for out_h in [32, 64, 128 - 32]: for out_w in [32, -32]: - dics = [{ - "data_layout": data_layout, - "interp_method": interp_method, - "align_corners": align_corners, - "scale": scale, - "out_h": out_h, - "out_w": out_w - }] - - ops_config = [{ - "op_type": "nearest_interp", - "op_inputs": { - "X": ["input_data"] - }, - "op_outputs": { - "Out": ["nearest_interp_output_data"] - }, - "op_attrs": dics[0] - }] + dics = [ + { + "data_layout": data_layout, + "interp_method": interp_method, + "align_corners": align_corners, + "scale": scale, + "out_h": out_h, + "out_w": out_w, + } + ] + + ops_config = [ + { + "op_type": "nearest_interp", + "op_inputs": {"X": ["input_data"]}, + "op_outputs": { + "Out": [ + "nearest_interp_output_data" + ] + }, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data": - TensorConfig(data_gen=partial( - generate_input1, dics)) + "input_data": TensorConfig( + data_gen=partial( + generate_input1, dics + ) + ) }, - outputs=["nearest_interp_output_data"]) + outputs=["nearest_interp_output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = {"input_data": [1, 3, 32, 32]} self.dynamic_shape.max_input_shape = {"input_data": [4, 3, 64, 64]} @@ -106,33 +112,39 @@ class TrtConvertNearestInterpTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-2 + attrs, False + ), 1e-2 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-2 + attrs, True + ), 1e-2 def add_skip_trt_case(self): - def teller1(program_config, predictor_config): - if program_config.ops[0].attrs[ - 'scale'] <= 0 and self.dynamic_shape.min_input_shape: + if ( + program_config.ops[0].attrs['scale'] <= 0 + and self.dynamic_shape.min_input_shape + ): return True if program_config.ops[0].attrs['align_corners'] == True: return True return False self.add_skip_case( - teller1, SkipReasons.TRT_NOT_IMPLEMENTED, - "NOT Implemented: we need to add support scale <= 0 in dynamic shape in the future" + teller1, + SkipReasons.TRT_NOT_IMPLEMENTED, + "NOT Implemented: we need to add support scale <= 0 in dynamic shape in the future", ) def test(self): diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_nearest_interp_v2.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_nearest_interp_v2.py index a8156e37f9b60774b6e4ea89711085051efb39ba..775db8a3c595546fb4a3ea16c3068eed4d5e955f 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_nearest_interp_v2.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_nearest_interp_v2.py @@ -21,47 +21,44 @@ import unittest class TrtConvertNearestInterpV2Test(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input(): return np.ones([1, 3, 32, 32]).astype(np.float32) - ops_config = [{ - "op_type": "nearest_interp_v2", - "op_inputs": { - "X": ["input_data"] - }, - "op_outputs": { - "Out": ["interp_output_data"] - }, - "op_attrs": { - "data_layout": "NCHW", - "interp_method": "nearest", - "align_corners": False, - "align_mode": 1, - "scale": [2., 2.], - "out_d": 0, - "out_h": 0, - "out_w": 0 + ops_config = [ + { + "op_type": "nearest_interp_v2", + "op_inputs": {"X": ["input_data"]}, + "op_outputs": {"Out": ["interp_output_data"]}, + "op_attrs": { + "data_layout": "NCHW", + "interp_method": "nearest", + "align_corners": False, + "align_mode": 1, + "scale": [2.0, 2.0], + "out_d": 0, + "out_h": 0, + "out_w": 0, + }, } - }] + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={"input_data": TensorConfig(data_gen=generate_input)}, - outputs=["interp_output_data"]) + outputs=["interp_output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = {"input_data": [1, 3, 32, 32]} self.dynamic_shape.max_input_shape = {"input_data": [4, 3, 64, 64]} @@ -83,19 +80,23 @@ class TrtConvertNearestInterpV2Test(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-2 + attrs, False + ), 1e-2 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-2 + attrs, True + ), 1e-2 def test(self): self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_pad.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_pad.py index dafb78961810cc79a436686d3cd96f59434343bd..12262a83022e385c0b850ab54bab5dc63899b20f 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_pad.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_pad.py @@ -22,7 +22,6 @@ import unittest class TrtConvertPadTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: inputs = program_config.inputs weights = program_config.weights @@ -39,7 +38,6 @@ class TrtConvertPadTest(TrtLayerAutoScanTest): return True def sample_program_configs(self): - def generate_input1(attrs: List[Dict[str, Any]]): return np.ones([1, 3, 64, 64]).astype(np.float32) @@ -47,37 +45,40 @@ class TrtConvertPadTest(TrtLayerAutoScanTest): return np.random.random([24, 3, 3, 3]).astype(np.float32) for pad_value in [0.0, 1.0, 2.0, -100, 100.0]: - for paddings in [[0, 0, 0, 0, 1, 1, 1, 1], [0, 0, 0, 0, 1, 2, 3, 4], - [0, 0, 1, 1, 1, 1, 1, 1], - [0, 0, 0, 0, -1, -1, 1, 1]]: + for paddings in [ + [0, 0, 0, 0, 1, 1, 1, 1], + [0, 0, 0, 0, 1, 2, 3, 4], + [0, 0, 1, 1, 1, 1, 1, 1], + [0, 0, 0, 0, -1, -1, 1, 1], + ]: dics = [{"pad_value": pad_value, "paddings": paddings}, {}] - ops_config = [{ - "op_type": "pad", - "op_inputs": { - "X": ["input_data"] - }, - "op_outputs": { - "Out": ["pad_output_data"] - }, - "op_attrs": dics[0] - }] + ops_config = [ + { + "op_type": "pad", + "op_inputs": {"X": ["input_data"]}, + "op_outputs": {"Out": ["pad_output_data"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data": - TensorConfig(data_gen=partial(generate_input1, dics)) + "input_data": TensorConfig( + data_gen=partial(generate_input1, dics) + ) }, - outputs=["pad_output_data"]) + outputs=["pad_output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = {"input_data": [1, 3, 32, 32]} self.dynamic_shape.max_input_shape = {"input_data": [4, 3, 64, 64]} @@ -102,22 +103,25 @@ class TrtConvertPadTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-2 + attrs, False + ), 1e-2 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-2 + attrs, True + ), 1e-2 def add_skip_trt_case(self): - def teller1(program_config, predictor_config): for x in range(len(program_config.ops[0].attrs['paddings']) - 4): if program_config.ops[0].attrs['paddings'][x] != 0: @@ -125,8 +129,9 @@ class TrtConvertPadTest(TrtLayerAutoScanTest): return False self.add_skip_case( - teller1, SkipReasons.TRT_NOT_IMPLEMENTED, - "NOT Implemented: we need to add support pad not only inplement on h or w, such as paddings = [0, 0, 1, 1, 1, 1, 1, 1]" + teller1, + SkipReasons.TRT_NOT_IMPLEMENTED, + "NOT Implemented: we need to add support pad not only inplement on h or w, such as paddings = [0, 0, 1, 1, 1, 1, 1, 1]", ) def test(self): diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_pad3d.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_pad3d.py index d44da5534e6a2ed3720ec37609fa4d47e8246f0e..02429bed44c0420abd819a894c838a5f0cac3152 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_pad3d.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_pad3d.py @@ -22,30 +22,30 @@ import unittest class TrtConvertPad3d(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(): return np.ones([1, 1, 3, 64, 64]).astype(np.float32) for value in [True, False]: - for paddings in [[0, 0, 0, 0, 1, 1], [0, 0, 1, 2, 3, 4], - [1, 1, 1, 1, 1, 1], [0, 0, -1, -1, 1, 1]]: + for paddings in [ + [0, 0, 0, 0, 1, 1], + [0, 0, 1, 2, 3, 4], + [1, 1, 1, 1, 1, 1], + [0, 0, -1, -1, 1, 1], + ]: dics = [{"value": value, "paddings": paddings}, {}] - ops_config = [{ - "op_type": "pad3d", - "op_inputs": { - "X": ["input_data"] - }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": dics[0] - }] + ops_config = [ + { + "op_type": "pad3d", + "op_inputs": {"X": ["input_data"]}, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) for i in range(10): @@ -53,16 +53,18 @@ class TrtConvertPad3d(TrtLayerAutoScanTest): ops=ops, weights={}, inputs={ - "input_data": - TensorConfig(data_gen=partial(generate_input1)), + "input_data": TensorConfig( + data_gen=partial(generate_input1) + ), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = { "input_data": [1, 1, 3, 64, 64] diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_pool2d.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_pool2d.py index 49d24e863ca12d131276be39a43d5fa66e8d1d17..b4eacbb136f06d90ef6dcf288ebd25c02ae6a502 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_pool2d.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_pool2d.py @@ -23,7 +23,6 @@ import itertools class TrtConvertPool2dTest(TrtLayerAutoScanTest): - def is_paddings_valid(self, program_config: ProgramConfig) -> bool: exclusive = program_config.ops[0].attrs['exclusive'] paddings = program_config.ops[0].attrs['paddings'] @@ -65,39 +64,54 @@ class TrtConvertPool2dTest(TrtLayerAutoScanTest): ceil_mode_options = [True, False] configurations = [ - strides_options, paddings_options, pooling_type_options, - padding_algorithm_options, ksize_options, data_format_options, - global_pooling_options, exclusive_options, adaptive_option, - ceil_mode_options + strides_options, + paddings_options, + pooling_type_options, + padding_algorithm_options, + ksize_options, + data_format_options, + global_pooling_options, + exclusive_options, + adaptive_option, + ceil_mode_options, ] - for (strides, paddings, pooling_type, padding_algorithm, ksize, - data_format, global_pooling, exclusive, adaptive, - ceil_mode) in itertools.product(*configurations): - - attrs = [{ - "strides": strides, - "paddings": paddings, - "pooling_type": pooling_type, - "padding_algorithm": padding_algorithm, - "ksize": ksize, - "data_format": data_format, - "global_pooling": global_pooling, - "exclusive": exclusive, - "adaptive": adaptive, - "ceil_mode": ceil_mode, - }] - - ops_config = [{ - "op_type": "pool2d", - "op_inputs": { - "X": ["input_data"] - }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": attrs[0] - }] + for ( + strides, + paddings, + pooling_type, + padding_algorithm, + ksize, + data_format, + global_pooling, + exclusive, + adaptive, + ceil_mode, + ) in itertools.product(*configurations): + + attrs = [ + { + "strides": strides, + "paddings": paddings, + "pooling_type": pooling_type, + "padding_algorithm": padding_algorithm, + "ksize": ksize, + "data_format": data_format, + "global_pooling": global_pooling, + "exclusive": exclusive, + "adaptive": adaptive, + "ceil_mode": ceil_mode, + } + ] + + ops_config = [ + { + "op_type": "pool2d", + "op_inputs": {"X": ["input_data"]}, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": attrs[0], + } + ] ops = self.generate_op_config(ops_config) @@ -105,16 +119,18 @@ class TrtConvertPool2dTest(TrtLayerAutoScanTest): ops=ops, weights={}, inputs={ - "input_data": - TensorConfig(data_gen=partial(generate_input1, attrs)) + "input_data": TensorConfig( + data_gen=partial(generate_input1, attrs) + ) }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = {"input_data": [1, 3, 32, 32]} self.dynamic_shape.max_input_shape = {"input_data": [1, 3, 64, 64]} @@ -136,34 +152,40 @@ class TrtConvertPool2dTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-3, 1e-3) + attrs, False + ), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-3, 1e-3) + attrs, True + ), (1e-3, 1e-3) def add_skip_trt_case(self): - def teller(program_config, predictor_config): - if program_config.ops[0].attrs['pooling_type'] == 'avg' and \ - program_config.ops[0].attrs['global_pooling'] == False and \ - program_config.ops[0].attrs['exclusive'] == True and \ - program_config.ops[0].attrs['adaptive'] == False and \ - program_config.ops[0].attrs['ceil_mode'] == True: + if ( + program_config.ops[0].attrs['pooling_type'] == 'avg' + and program_config.ops[0].attrs['global_pooling'] == False + and program_config.ops[0].attrs['exclusive'] == True + and program_config.ops[0].attrs['adaptive'] == False + and program_config.ops[0].attrs['ceil_mode'] == True + ): return True return False self.add_skip_case( - teller, SkipReasons.TRT_NOT_IMPLEMENTED, - "The results of some cases are Nan, but the results of TensorRT and GPU are the same." + teller, + SkipReasons.TRT_NOT_IMPLEMENTED, + "The results of some cases are Nan, but the results of TensorRT and GPU are the same.", ) def test(self): diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_preln_residual_bias.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_preln_residual_bias.py index 284fd9231d19290e6b5a21e52f045caca23720a2..9390d12ca8b115f153356b742a31d76b00dce26f 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_preln_residual_bias.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_preln_residual_bias.py @@ -22,7 +22,6 @@ import unittest class TrtConvertSkipLayernormTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: inputs = program_config.inputs weights = program_config.weights @@ -32,14 +31,13 @@ class TrtConvertSkipLayernormTest(TrtLayerAutoScanTest): program_config.ops[i].attrs for i in range(len(program_config.ops)) ] - #The input dimension should be less than or equal to the set axis. + # The input dimension should be less than or equal to the set axis. if 'begin_norm_axis' in attrs[0] and attrs[0]['begin_norm_axis'] >= 0: if len(inputs['inputX_data'].shape) <= attrs[0]['begin_norm_axis']: return False return True def sample_program_configs(self): - def generate_input1(attrs: List[Dict[str, Any]], batch): return np.ones([batch, 128, 768]).astype(np.float32) @@ -56,96 +54,100 @@ class TrtConvertSkipLayernormTest(TrtLayerAutoScanTest): for epsilon in [1e-5]: for begin_norm_axis in [2]: for enable_int8 in [False, True]: - dics = [{ - "epsilon": epsilon, - "begin_norm_axis": begin_norm_axis, - }, {}] - - ops_config = [{ - "op_type": "elementwise_add", - "op_inputs": { - "X": ["inputX_data"], - "Y": ["EleBias"] - }, - "op_outputs": { - "Out": ["bias_out"] + dics = [ + { + "epsilon": epsilon, + "begin_norm_axis": begin_norm_axis, }, - "op_attrs": { - "axis": -1 - } - }, { - "op_type": "elementwise_add", - "op_inputs": { - "X": ["bias_out"], - "Y": ["inputY_data"] + {}, + ] + + ops_config = [ + { + "op_type": "elementwise_add", + "op_inputs": { + "X": ["inputX_data"], + "Y": ["EleBias"], + }, + "op_outputs": {"Out": ["bias_out"]}, + "op_attrs": {"axis": -1}, }, - "op_outputs": { - "Out": ["ele_out"] + { + "op_type": "elementwise_add", + "op_inputs": { + "X": ["bias_out"], + "Y": ["inputY_data"], + }, + "op_outputs": {"Out": ["ele_out"]}, + "op_attrs": {"axis": -1}, }, - "op_attrs": { - "axis": -1 - } - }, { - "op_type": "layer_norm", - "op_inputs": { - "X": ["ele_out"], - "Bias": ["Bias"], - "Scale": ["Scale"] + { + "op_type": "layer_norm", + "op_inputs": { + "X": ["ele_out"], + "Bias": ["Bias"], + "Scale": ["Scale"], + }, + "op_outputs": { + "Y": ["layernorm_out"], + "Mean": ["Mean"], + "Variance": ["Variance"], + }, + "op_attrs": dics[0], }, - "op_outputs": { - "Y": ["layernorm_out"], - "Mean": ["Mean"], - "Variance": ["Variance"] - }, - "op_attrs": dics[0] - }] + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={ - "Bias": - TensorConfig( - data_gen=partial(generate_weight1, dics)), - "Scale": - TensorConfig( - data_gen=partial(generate_weight2, dics)), - "EleBias": - TensorConfig( - data_gen=partial(generate_weight2, dics)) + "Bias": TensorConfig( + data_gen=partial(generate_weight1, dics) + ), + "Scale": TensorConfig( + data_gen=partial(generate_weight2, dics) + ), + "EleBias": TensorConfig( + data_gen=partial(generate_weight2, dics) + ), }, inputs={ - "inputX_data": - TensorConfig(data_gen=partial( - generate_input1, dics, batch)), - "inputY_data": - TensorConfig(data_gen=partial( - generate_input2, dics, batch)) + "inputX_data": TensorConfig( + data_gen=partial( + generate_input1, dics, batch + ) + ), + "inputY_data": TensorConfig( + data_gen=partial( + generate_input2, dics, batch + ) + ), }, - outputs=["ele_out", "layernorm_out"]) + outputs=["ele_out", "layernorm_out"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = { "inputX_data": [4, 128, 768], "inputY_data": [4, 128, 768], "Bias": [768], - "Scale": [768] + "Scale": [768], } self.dynamic_shape.max_input_shape = { "inputX_data": [4, 128, 768], "inputY_data": [4, 128, 768], "Bias": [768], - "Scale": [768] + "Scale": [768], } self.dynamic_shape.opt_input_shape = { "inputX_data": [4, 128, 768], "inputY_data": [4, 128, 768], "Bias": [768], - "Scale": [768] + "Scale": [768], } def clear_dynamic_shape(): @@ -164,10 +166,12 @@ class TrtConvertSkipLayernormTest(TrtLayerAutoScanTest): generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-2 # atol=1e-2 while rtol is 1e-8 + attrs, True + ), 1e-2 # atol=1e-2 while rtol is 1e-8 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-2 # atol=1e-2 while rtol is 1e-8 + attrs, True + ), 1e-2 # atol=1e-2 while rtol is 1e-8 def add_skip_trt_case(self): pass diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_preln_residual_no_bias.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_preln_residual_no_bias.py index 74582cffb715c1fdc7b6a94a459dab9e57635ad2..320bdd1925c316c140ac870080b9c8d8ea658ec9 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_preln_residual_no_bias.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_preln_residual_no_bias.py @@ -22,7 +22,6 @@ import unittest class TrtConvertSkipLayernormTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: inputs = program_config.inputs weights = program_config.weights @@ -32,14 +31,13 @@ class TrtConvertSkipLayernormTest(TrtLayerAutoScanTest): program_config.ops[i].attrs for i in range(len(program_config.ops)) ] - #The input dimension should be less than or equal to the set axis. + # The input dimension should be less than or equal to the set axis. if 'begin_norm_axis' in attrs[0] and attrs[0]['begin_norm_axis'] >= 0: if len(inputs['inputX_data'].shape) <= attrs[0]['begin_norm_axis']: return False return True def sample_program_configs(self): - def generate_input1(attrs: List[Dict[str, Any]], batch): return np.ones([batch, 128, 768]).astype(np.float32) @@ -56,81 +54,88 @@ class TrtConvertSkipLayernormTest(TrtLayerAutoScanTest): for epsilon in [1e-5]: for begin_norm_axis in [2]: for enable_int8 in [False, True]: - dics = [{ - "epsilon": epsilon, - "begin_norm_axis": begin_norm_axis, - }, {}] - - ops_config = [{ - "op_type": "elementwise_add", - "op_inputs": { - "X": ["inputX_data"], - "Y": ["inputY_data"] + dics = [ + { + "epsilon": epsilon, + "begin_norm_axis": begin_norm_axis, }, - "op_outputs": { - "Out": ["ele_out"] + {}, + ] + + ops_config = [ + { + "op_type": "elementwise_add", + "op_inputs": { + "X": ["inputX_data"], + "Y": ["inputY_data"], + }, + "op_outputs": {"Out": ["ele_out"]}, + "op_attrs": {"axis": -1}, }, - "op_attrs": { - "axis": -1 - } - }, { - "op_type": "layer_norm", - "op_inputs": { - "X": ["ele_out"], - "Bias": ["Bias"], - "Scale": ["Scale"] + { + "op_type": "layer_norm", + "op_inputs": { + "X": ["ele_out"], + "Bias": ["Bias"], + "Scale": ["Scale"], + }, + "op_outputs": { + "Y": ["layernorm_out"], + "Mean": ["Mean"], + "Variance": ["Variance"], + }, + "op_attrs": dics[0], }, - "op_outputs": { - "Y": ["layernorm_out"], - "Mean": ["Mean"], - "Variance": ["Variance"] - }, - "op_attrs": dics[0] - }] + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={ - "Bias": - TensorConfig( - data_gen=partial(generate_weight1, dics)), - "Scale": - TensorConfig( - data_gen=partial(generate_weight2, dics)) + "Bias": TensorConfig( + data_gen=partial(generate_weight1, dics) + ), + "Scale": TensorConfig( + data_gen=partial(generate_weight2, dics) + ), }, inputs={ - "inputX_data": - TensorConfig(data_gen=partial( - generate_input1, dics, batch)), - "inputY_data": - TensorConfig(data_gen=partial( - generate_input2, dics, batch)) + "inputX_data": TensorConfig( + data_gen=partial( + generate_input1, dics, batch + ) + ), + "inputY_data": TensorConfig( + data_gen=partial( + generate_input2, dics, batch + ) + ), }, - outputs=["ele_out", "layernorm_out"]) + outputs=["ele_out", "layernorm_out"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = { "inputX_data": [4, 128, 768], "inputY_data": [4, 128, 768], "Bias": [768], - "Scale": [768] + "Scale": [768], } self.dynamic_shape.max_input_shape = { "inputX_data": [4, 128, 768], "inputY_data": [4, 128, 768], "Bias": [768], - "Scale": [768] + "Scale": [768], } self.dynamic_shape.opt_input_shape = { "inputX_data": [4, 128, 768], "inputY_data": [4, 128, 768], "Bias": [768], - "Scale": [768] + "Scale": [768], } def clear_dynamic_shape(): @@ -149,10 +154,12 @@ class TrtConvertSkipLayernormTest(TrtLayerAutoScanTest): generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-2 # atol=1e-2 while rtol is 1e-8 + attrs, True + ), 1e-2 # atol=1e-2 while rtol is 1e-8 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-2 # atol=1e-2 while rtol is 1e-8 + attrs, True + ), 1e-2 # atol=1e-2 while rtol is 1e-8 def add_skip_trt_case(self): pass diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_prelu.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_prelu.py index 0bc26954f0f0747e4af9661a4872582d7a88cd48..862bf1ff610894b33ad51fb622629bb2821aefad 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_prelu.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_prelu.py @@ -22,12 +22,10 @@ import unittest class TrtConvertPreluTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input(batch, dim1, dim2, dim3): shape = [batch] if dim1 != 0: @@ -41,8 +39,10 @@ class TrtConvertPreluTest(TrtLayerAutoScanTest): def generate_alpha(attrs: List[Dict[str, Any]], dim1, dim2, dim3): if attrs[0]["mode"] == "all": return np.random.random(size=(1)).astype(np.float32) - elif attrs[0]["mode"] == "channel" and attrs[0][ - "data_format"] == "NCHW": + elif ( + attrs[0]["mode"] == "channel" + and attrs[0]["data_format"] == "NCHW" + ): shape = [1] if dim1 != 0: shape.append(dim1) @@ -51,8 +51,10 @@ class TrtConvertPreluTest(TrtLayerAutoScanTest): if dim3 != 0: shape.append(1) return np.random.random(size=shape).astype(np.float32) - elif attrs[0]["mode"] == "channel" and attrs[0][ - "data_format"] == "NHWC": + elif ( + attrs[0]["mode"] == "channel" + and attrs[0]["data_format"] == "NHWC" + ): shape = [1] if dim1 != 0: shape.append(1) @@ -86,48 +88,66 @@ class TrtConvertPreluTest(TrtLayerAutoScanTest): for mode in ["all", "channel", "element"]: for data_format in ['NCHW', 'NHWC']: - if mode == "channel" and dim1 == 0 and data_format == "NCHW": + if ( + mode == "channel" + and dim1 == 0 + and data_format == "NCHW" + ): continue - if mode == "channel" and dim3 == 0 and data_format == "NHWC": + if ( + mode == "channel" + and dim3 == 0 + and data_format == "NHWC" + ): continue - dics = [{ - "mode": mode, - "data_format": data_format - }] - ops_config = [{ - "op_type": "prelu", - "op_inputs": { - "X": ["input_data"], - "Alpha": ["alpha_weight"] - }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": dics[0] - }] + dics = [ + {"mode": mode, "data_format": data_format} + ] + ops_config = [ + { + "op_type": "prelu", + "op_inputs": { + "X": ["input_data"], + "Alpha": ["alpha_weight"], + }, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={ - "alpha_weight": - TensorConfig(data_gen=partial( - generate_alpha, dics, dim1, dim2, - dim3)) + "alpha_weight": TensorConfig( + data_gen=partial( + generate_alpha, + dics, + dim1, + dim2, + dim3, + ) + ) }, inputs={ - "input_data": - TensorConfig(data_gen=partial( - generate_input, batch, dim1, dim2, - dim3)), + "input_data": TensorConfig( + data_gen=partial( + generate_input, + batch, + dim1, + dim2, + dim3, + ) + ), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): if self.dim1 == 0: self.dynamic_shape.min_input_shape = { @@ -181,7 +201,12 @@ class TrtConvertPreluTest(TrtLayerAutoScanTest): ] def generate_trt_nodes_num(attrs, dynamic_shape): - if not dynamic_shape and self.dim1 == 0 and self.dim2 == 0 and self.dim3 == 0: + if ( + not dynamic_shape + and self.dim1 == 0 + and self.dim2 == 0 + and self.dim3 == 0 + ): return 0, 3 return 1, 2 @@ -189,19 +214,23 @@ class TrtConvertPreluTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-3, 1e-3) + attrs, False + ), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-3, 1e-3) + attrs, True + ), (1e-3, 1e-3) def add_skip_trt_case(self): ver = paddle_infer.get_trt_compile_version() @@ -213,8 +242,9 @@ class TrtConvertPreluTest(TrtLayerAutoScanTest): return False self.add_skip_case( - teller, SkipReasons.TRT_NOT_IMPLEMENTED, - "Need to repair the case: the output of GPU and tensorrt has diff in trt6, the prelu static plugin has bug." + teller, + SkipReasons.TRT_NOT_IMPLEMENTED, + "Need to repair the case: the output of GPU and tensorrt has diff in trt6, the prelu static plugin has bug.", ) def test(self): diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_reduce_mean.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_reduce_mean.py index 95c9e24dcad1f2f529564b6456fd14fb1e1dd0d2..78ad7978b06c0f7c8218add877172d856bcb212e 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_reduce_mean.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_reduce_mean.py @@ -22,7 +22,6 @@ import unittest class TrtConvertReduceMeanTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: inputs = program_config.inputs attrs = [ @@ -45,7 +44,6 @@ class TrtConvertReduceMeanTest(TrtLayerAutoScanTest): return True def sample_program_configs(self): - def generate_input1(dtype, attrs: List[Dict[str, Any]]): if dtype == -1 or dtype == 5: return np.random.random([1, 3, 64, 64]).astype(np.float32) @@ -53,39 +51,52 @@ class TrtConvertReduceMeanTest(TrtLayerAutoScanTest): return np.random.random([1, 3, 64, 64]).astype(np.int32) for keep_dim in [True, False]: - for dim in [[], [1], [0], [0, 1], [1, 2, 3], [-2, 0, 3], [-3], - [-4, 1], [3, 4, 5]]: + for dim in [ + [], + [1], + [0], + [0, 1], + [1, 2, 3], + [-2, 0, 3], + [-3], + [-4, 1], + [3, 4, 5], + ]: for reduce_all in [True, False]: for out_dtype in [-1, 2, 5]: - dics = [{ - "keep_dim": keep_dim, - "dim": dim, - "reduce_all": reduce_all, - "out_dtype": out_dtype, - "in_dtype": out_dtype, - }, {}] - - ops_config = [{ - "op_type": "reduce_mean", - "op_inputs": { - "X": ["input_data"] + dics = [ + { + "keep_dim": keep_dim, + "dim": dim, + "reduce_all": reduce_all, + "out_dtype": out_dtype, + "in_dtype": out_dtype, }, - "op_outputs": { - "Out": ["reduce_output_data"] - }, - "op_attrs": dics[0] - }] + {}, + ] + + ops_config = [ + { + "op_type": "reduce_mean", + "op_inputs": {"X": ["input_data"]}, + "op_outputs": {"Out": ["reduce_output_data"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data": - TensorConfig(data_gen=partial( - generate_input1, out_dtype, dics)) + "input_data": TensorConfig( + data_gen=partial( + generate_input1, out_dtype, dics + ) + ) }, - outputs=["reduce_output_data"]) + outputs=["reduce_output_data"], + ) if not self.is_program_valid(program_config): continue @@ -93,8 +104,8 @@ class TrtConvertReduceMeanTest(TrtLayerAutoScanTest): yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = {"input_data": [1, 3, 32, 32]} self.dynamic_shape.max_input_shape = {"input_data": [4, 3, 64, 64]} @@ -125,19 +136,23 @@ class TrtConvertReduceMeanTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (5e-4, 5e-4) + attrs, False + ), (5e-4, 5e-4) # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (5e-4, 5e-4) + attrs, True + ), (5e-4, 5e-4) def add_skip_trt_case(self): pass diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_reduce_sum.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_reduce_sum.py index 41d491d32551a22cc4e2ca6be65c23169c7a91ad..67f3ab26f3750f8e37a7e4f02888f2c95edbbc48 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_reduce_sum.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_reduce_sum.py @@ -23,7 +23,6 @@ import unittest class TrtConvertReduceSumTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: inputs = program_config.inputs attrs = [ @@ -41,7 +40,6 @@ class TrtConvertReduceSumTest(TrtLayerAutoScanTest): return True def sample_program_configs(self): - def generate_input1(dtype, attrs: List[Dict[str, Any]]): if dtype == -1 or dtype == 5: return np.random.random([1, 3, 32, 32]).astype(np.float32) @@ -49,39 +47,52 @@ class TrtConvertReduceSumTest(TrtLayerAutoScanTest): return np.random.random([1, 3, 32, 32]).astype(np.int32) for keep_dim in [True, False]: - for dim in [[], [1], [0], [0, 1], [1, 2, 3], [-2, 0, 3], [-3], - [-4, 1], [3, 4, 5]]: + for dim in [ + [], + [1], + [0], + [0, 1], + [1, 2, 3], + [-2, 0, 3], + [-3], + [-4, 1], + [3, 4, 5], + ]: for reduce_all in [True, False]: for out_dtype in [-1, 2, 5]: - dics = [{ - "keep_dim": keep_dim, - "dim": dim, - "reduce_all": reduce_all, - "out_dtype": out_dtype, - "in_dtype": out_dtype, - }, {}] - - ops_config = [{ - "op_type": "reduce_sum", - "op_inputs": { - "X": ["input_data"] + dics = [ + { + "keep_dim": keep_dim, + "dim": dim, + "reduce_all": reduce_all, + "out_dtype": out_dtype, + "in_dtype": out_dtype, }, - "op_outputs": { - "Out": ["reduce_output_data"] - }, - "op_attrs": dics[0] - }] + {}, + ] + + ops_config = [ + { + "op_type": "reduce_sum", + "op_inputs": {"X": ["input_data"]}, + "op_outputs": {"Out": ["reduce_output_data"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data": - TensorConfig(data_gen=partial( - generate_input1, out_dtype, dics)) + "input_data": TensorConfig( + data_gen=partial( + generate_input1, out_dtype, dics + ) + ) }, - outputs=["reduce_output_data"]) + outputs=["reduce_output_data"], + ) if not self.is_program_valid(program_config): continue @@ -89,7 +100,6 @@ class TrtConvertReduceSumTest(TrtLayerAutoScanTest): yield program_config def sample_predictor_configs(self, program_config): - def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = {"input_data": [1, 3, 32, 32]} self.dynamic_shape.max_input_shape = {"input_data": [4, 3, 64, 64]} @@ -120,19 +130,23 @@ class TrtConvertReduceSumTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-5, 1e-5) + attrs, False + ), (1e-5, 1e-5) self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-3, 1e-3) + attrs, False + ), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-5, 1e-5) + attrs, True + ), (1e-5, 1e-5) self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-3, 1e-3) + attrs, True + ), (1e-3, 1e-3) def add_skip_trt_case(self): pass diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_reshape.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_reshape.py index abd4be3f888ed9802671cd53e4bfd5179be95be6..90aaddfcb403e5be9e09d765d8ba6df0bb073c85 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_reshape.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_reshape.py @@ -22,7 +22,6 @@ import unittest class TrtConvertReshapeTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: attrs = [ program_config.ops[i].attrs for i in range(len(program_config.ops)) @@ -31,7 +30,7 @@ class TrtConvertReshapeTest(TrtLayerAutoScanTest): if len(attrs[0]['shape']) != 1: return False - #To test if the shape contains 0 + # To test if the shape contains 0 if len(attrs[0]['shape']) == 3: if attrs[0]['shape'][1] == 0: if self.dims != 3: @@ -45,7 +44,6 @@ class TrtConvertReshapeTest(TrtLayerAutoScanTest): return True def sample_program_configs(self): - def generate_input1(attrs: List[Dict[str, Any]]): if self.dims == 4: self.input_shape = [1, 2, 4, 6] @@ -70,9 +68,18 @@ class TrtConvertReshapeTest(TrtLayerAutoScanTest): return np.array([24]).astype(np.int32) for dims in [4, 3, 2, 1]: - for shape in [[1, 6, 8], [1, 2, 4, 6], [1, 1, 0, 12], [1, 0, 6], - [1, -1, 12], [2, -1], [3, 16], [3, 4, 4], [48], - [-1, 48]]: + for shape in [ + [1, 6, 8], + [1, 2, 4, 6], + [1, 1, 0, 12], + [1, 0, 6], + [1, -1, 12], + [2, -1], + [3, 16], + [3, 4, 4], + [48], + [-1, 48], + ]: dics = [ { "shape": shape, @@ -81,29 +88,31 @@ class TrtConvertReshapeTest(TrtLayerAutoScanTest): self.dims = dims dics_intput = [{"X": ["reshape_input"]}] - ops_config = [{ - "op_type": "reshape", - "op_inputs": dics_intput[0], - "op_outputs": { - "Out": ["reshape_out"] - }, - "op_attrs": dics[0] - }] + ops_config = [ + { + "op_type": "reshape", + "op_inputs": dics_intput[0], + "op_outputs": {"Out": ["reshape_out"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "reshape_input": - TensorConfig(data_gen=partial(generate_input1, dics)) + "reshape_input": TensorConfig( + data_gen=partial(generate_input1, dics) + ) }, - outputs=["reshape_out"]) + outputs=["reshape_out"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): if self.dims == 4: self.dynamic_shape.min_input_shape = { @@ -141,13 +150,14 @@ class TrtConvertReshapeTest(TrtLayerAutoScanTest): def generate_trt_nodes_num(attrs, dynamic_shape): # in static shape mode, here is consistent with op_teller.cc - if (not dynamic_shape): - if (attrs[0]['shape'][0] == 0): + if not dynamic_shape: + if attrs[0]['shape'][0] == 0: return 1, 2 - elif (len(attrs[0]['shape']) == 1): + elif len(attrs[0]['shape']) == 1: return 0, 3 - elif (np.prod(attrs[0]['shape'][1:]) == np.prod( - self.input_shape[1:])): + elif np.prod(attrs[0]['shape'][1:]) == np.prod( + self.input_shape[1:] + ): return 1, 2 else: return 0, 3 @@ -161,19 +171,23 @@ class TrtConvertReshapeTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-3 + attrs, False + ), 1e-3 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-3 + attrs, True + ), 1e-3 def add_skip_trt_case(self): pass @@ -185,12 +199,10 @@ class TrtConvertReshapeTest(TrtLayerAutoScanTest): # reshape having three inputs. class TrtConvertReshapeTest2(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(attrs: List[Dict[str, Any]]): if self.dims == 4: return np.random.random([1, 2, 4, 6]).astype(np.float32) @@ -203,9 +215,12 @@ class TrtConvertReshapeTest2(TrtLayerAutoScanTest): for dims in [4, 3, 2, 1]: for shape in [[-1, 48]]: - dics = [{ - "shape": shape, - }, {}] + dics = [ + { + "shape": shape, + }, + {}, + ] self.dims = dims dics_intput = [ { @@ -217,9 +232,7 @@ class TrtConvertReshapeTest2(TrtLayerAutoScanTest): { "op_type": "fill_constant", "op_inputs": {}, - "op_outputs": { - "Out": ["shapeT1_data"] - }, + "op_outputs": {"Out": ["shapeT1_data"]}, "op_attrs": { "dtype": 2, "str_value": "2", @@ -229,9 +242,7 @@ class TrtConvertReshapeTest2(TrtLayerAutoScanTest): { "op_type": "fill_constant", "op_inputs": {}, - "op_outputs": { - "Out": ["shapeT2_data"] - }, + "op_outputs": {"Out": ["shapeT2_data"]}, "op_attrs": { "dtype": 2, "str_value": "24", @@ -241,10 +252,8 @@ class TrtConvertReshapeTest2(TrtLayerAutoScanTest): { "op_type": "reshape", "op_inputs": dics_intput[0], - "op_outputs": { - "Out": ["reshape_out"] - }, - "op_attrs": dics[0] + "op_outputs": {"Out": ["reshape_out"]}, + "op_attrs": dics[0], }, ] ops = self.generate_op_config(ops_config) @@ -252,16 +261,18 @@ class TrtConvertReshapeTest2(TrtLayerAutoScanTest): ops=ops, weights={}, inputs={ - "reshape_input": - TensorConfig(data_gen=partial(generate_input1, dics)) + "reshape_input": TensorConfig( + data_gen=partial(generate_input1, dics) + ) }, - outputs=["reshape_out"]) + outputs=["reshape_out"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(): if self.dims == 4: self.dynamic_shape.min_input_shape = { @@ -309,12 +320,10 @@ class TrtConvertReshapeTest2(TrtLayerAutoScanTest): # reshape having 2 inputs. class TrtConvertReshapeTest3(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(attrs: List[Dict[str, Any]]): if self.dims == 4: return np.random.random([1, 2, 12, 6]).astype(np.float32) @@ -327,9 +336,12 @@ class TrtConvertReshapeTest3(TrtLayerAutoScanTest): for dims in [4, 3, 2, 1]: for shape in [[-1, 144]]: - dics = [{ - "shape": shape, - }, {}] + dics = [ + { + "shape": shape, + }, + {}, + ] self.dims = dims dics_intput = [ { @@ -341,9 +353,7 @@ class TrtConvertReshapeTest3(TrtLayerAutoScanTest): { "op_type": "fill_constant", "op_inputs": {}, - "op_outputs": { - "Out": ["shape_data"] - }, + "op_outputs": {"Out": ["shape_data"]}, "op_attrs": { "dtype": 2, "str_value": "12", @@ -353,10 +363,8 @@ class TrtConvertReshapeTest3(TrtLayerAutoScanTest): { "op_type": "reshape", "op_inputs": dics_intput[0], - "op_outputs": { - "Out": ["reshape_out"] - }, - "op_attrs": dics[0] + "op_outputs": {"Out": ["reshape_out"]}, + "op_attrs": dics[0], }, ] ops = self.generate_op_config(ops_config) @@ -364,16 +372,18 @@ class TrtConvertReshapeTest3(TrtLayerAutoScanTest): ops=ops, weights={}, inputs={ - "reshape_input": - TensorConfig(data_gen=partial(generate_input1, dics)) + "reshape_input": TensorConfig( + data_gen=partial(generate_input1, dics) + ) }, - outputs=["reshape_out"]) + outputs=["reshape_out"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(): if self.dims == 4: self.dynamic_shape.min_input_shape = { diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_rnn.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_rnn.py index 2b76e273aa0a964b6744259c4ecb584159f87da6..4f65ea6abf41d9385a520c1d3adc81c4184f8990 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_rnn.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_rnn.py @@ -23,7 +23,6 @@ import os class TrtConvertSliceTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True @@ -36,148 +35,168 @@ class TrtConvertSliceTest(TrtLayerAutoScanTest): for num_layers in [1, 2]: for is_bidirec in [True, False]: dics = [] - dics.append({ - "hidden_size": hidden_size, - "input_size": input_size, - "num_layers": num_layers, - "mode": "LSTM", - "is_bidirec": is_bidirec, - "is_test": True, - "dropout_prob": 0.0, - # for my convience - "batch": batch, - "seq_len": seq_len, - }) + dics.append( + { + "hidden_size": hidden_size, + "input_size": input_size, + "num_layers": num_layers, + "mode": "LSTM", + "is_bidirec": is_bidirec, + "is_test": True, + "dropout_prob": 0.0, + # for my convience + "batch": batch, + "seq_len": seq_len, + } + ) K = 1 - if (dics[0]["is_bidirec"]): + if dics[0]["is_bidirec"]: K = 2 def generate_input1(): - return np.random.random([ - batch, seq_len, input_size - ]).astype(np.float32) * 2 - 1 + return ( + np.random.random( + [batch, seq_len, input_size] + ).astype(np.float32) + * 2 + - 1 + ) # initial input -> hidden def generate_w0(): - return np.random.random([ - 4 * hidden_size, input_size - ]).astype(np.float32) * 2 - 1 + return ( + np.random.random( + [4 * hidden_size, input_size] + ).astype(np.float32) + * 2 + - 1 + ) # prev layer's output -> hidden def generate_w1(): - return np.random.random([ - 4 * hidden_size, K * hidden_size - ]).astype(np.float32) * 2 - 1 + return ( + np.random.random( + [4 * hidden_size, K * hidden_size] + ).astype(np.float32) + * 2 + - 1 + ) # def generate_w2(): - return np.random.random([ - 4 * hidden_size, hidden_size - ]).astype(np.float32) * 2 - 1 + return ( + np.random.random( + [4 * hidden_size, hidden_size] + ).astype(np.float32) + * 2 + - 1 + ) def generate_b(): - return np.random.random([ - 4 * hidden_size - ]).astype(np.float32) * 2 - 1 - - dics.append({ - "dtype": - 5, - "input_dim_idx": - 0, - "str_value": - "", - "value": - 0.0, - "shape": [K * num_layers, -1, hidden_size], - "output_dim_idx": - 1, - }) + return ( + np.random.random( + [4 * hidden_size] + ).astype(np.float32) + * 2 + - 1 + ) + + dics.append( + { + "dtype": 5, + "input_dim_idx": 0, + "str_value": "", + "value": 0.0, + "shape": [ + K * num_layers, + -1, + hidden_size, + ], + "output_dim_idx": 1, + } + ) dics.append({"axis": [1, 0, 2]}) # set weights WeightList = [ "weight" + str(i) - for i in range(4 * K * - dics[0]["num_layers"]) + for i in range( + 4 * K * dics[0]["num_layers"] + ) ] weights = {} for i in range((int)(len(WeightList) / 2)): # mean this weight : input->hidden # input has 2 case: initial input input_size, K * hidden form the prev layer. - if (i % 2 == 0): - if (i <= K): + if i % 2 == 0: + if i <= K: weights[ - WeightList[i]] = TensorConfig( - data_gen=partial( - generate_w0)) + WeightList[i] + ] = TensorConfig( + data_gen=partial(generate_w0) + ) else: weights[ - WeightList[i]] = TensorConfig( - data_gen=partial( - generate_w1)) + WeightList[i] + ] = TensorConfig( + data_gen=partial(generate_w1) + ) # mean this weight : hidden->hidden - if (i % 2 == 1): + if i % 2 == 1: weights[WeightList[i]] = TensorConfig( - data_gen=partial(generate_w2)) - for i in range((int)(len(WeightList) / 2), - len(WeightList)): + data_gen=partial(generate_w2) + ) + for i in range( + (int)(len(WeightList) / 2), len(WeightList) + ): weights[WeightList[i]] = TensorConfig( - data_gen=partial(generate_b)) + data_gen=partial(generate_b) + ) ops_config = [ { - "op_type": - "fill_constant_batch_size_like", - "op_inputs": { - "Input": ["input_data"] - }, - "op_outputs": { - "Out": ["prestate1"] - }, - "op_attrs": dics[1] + "op_type": "fill_constant_batch_size_like", + "op_inputs": {"Input": ["input_data"]}, + "op_outputs": {"Out": ["prestate1"]}, + "op_attrs": dics[1], }, { - "op_type": - "fill_constant_batch_size_like", - "op_inputs": { - "Input": ["input_data"] - }, - "op_outputs": { - "Out": ["prestate2"] - }, - "op_attrs": dics[1] + "op_type": "fill_constant_batch_size_like", + "op_inputs": {"Input": ["input_data"]}, + "op_outputs": {"Out": ["prestate2"]}, + "op_attrs": dics[1], }, { "op_type": "transpose2", - "op_inputs": { - "X": ["input_data"] - }, + "op_inputs": {"X": ["input_data"]}, "op_outputs": { "Out": ["rnn_input_data"] }, - "op_attrs": dics[2] + "op_attrs": dics[2], }, { "op_type": "rnn", "op_inputs": { "Input": ["rnn_input_data"], # prev_c, prev_h - "PreState": - ["prestate1", "prestate2"], + "PreState": [ + "prestate1", + "prestate2", + ], "WeightList": WeightList, }, "op_outputs": { "Out": ["rnn_output_data"], "State": [ "state_output_data0", - "state_output_data1" + "state_output_data1", ], "Reserve": ["reserve_data"], - "DropoutState": - ["DropoutState_data"] + "DropoutState": [ + "DropoutState_data" + ], }, - "op_attrs": dics[0] - } + "op_attrs": dics[0], + }, ] ops = self.generate_op_config(ops_config) @@ -185,16 +204,18 @@ class TrtConvertSliceTest(TrtLayerAutoScanTest): ops=ops, weights=weights, inputs={ - "input_data": - TensorConfig( - data_gen=partial(generate_input1)) + "input_data": TensorConfig( + data_gen=partial(generate_input1) + ) }, - outputs=["rnn_output_data"]) + outputs=["rnn_output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): + self, program_config + ) -> (paddle_infer.Config, List[int], float): attrs = [ program_config.ops[i].attrs for i in range(len(program_config.ops)) ] @@ -234,7 +255,7 @@ class TrtConvertSliceTest(TrtLayerAutoScanTest): # The output has diff between gpu and trt in PR-CI-Windows-Inference tol_fp32 = 1e-5 tol_half = 1e-2 - if (os.name == 'nt'): + if os.name == 'nt': tol_fp32 = 1e-2 tol_half = 1e-1 @@ -242,10 +263,12 @@ class TrtConvertSliceTest(TrtLayerAutoScanTest): generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), tol_fp32 + attrs, True + ), tol_fp32 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), tol_half + attrs, True + ), tol_half def test(self): self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_roi_align.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_roi_align.py index ee84df151b5c67a0b6ba54de06a78a3ec5898625..f89527359d4d19f650f3d1b96668b3df75323b05 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_roi_align.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_roi_align.py @@ -22,12 +22,10 @@ from typing import Any, Dict, List class TrtConvertRoiAlignTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(attrs: List[Dict[str, Any]], batch): return np.ones([batch, 256, 32, 32]).astype(np.float32) @@ -47,92 +45,111 @@ class TrtConvertRoiAlignTest(TrtLayerAutoScanTest): self.num_input = num_input if num_input == 1: batch = 1 - dics = [{ - "spatial_scale": spatial_scale, - "pooled_height": pooled_height, - "pooled_width": pooled_width, - "sampling_ratio": sampling_ratio, - "aligned": aligned - }, {}] - dics_input = [{ - "X": ["roi_align_input"], - "ROIs": ["ROIs"], - "RoisNum": ["RoisNum"] - }, { - "X": ["roi_align_input"], - "ROIs": ["ROIs"] - }] - program_input = [{ - "roi_align_input": - TensorConfig(data_gen=partial( - generate_input1, dics, batch)), - "ROIs": - TensorConfig(data_gen=partial( - generate_input2, dics, batch)), - "RoisNum": - TensorConfig(data_gen=partial( - generate_input3, dics, batch)) - }, { - "roi_align_input": - TensorConfig(data_gen=partial( - generate_input1, dics, batch)), - "ROIs": - TensorConfig(data_gen=partial( - generate_input2, dics, batch), - lod=[[32, 3]]) - }] - ops_config = [{ - "op_type": - "roi_align", - "op_inputs": - dics_input[num_input], - "op_outputs": { - "Out": ["roi_align_out"] + dics = [ + { + "spatial_scale": spatial_scale, + "pooled_height": pooled_height, + "pooled_width": pooled_width, + "sampling_ratio": sampling_ratio, + "aligned": aligned, + }, + {}, + ] + dics_input = [ + { + "X": ["roi_align_input"], + "ROIs": ["ROIs"], + "RoisNum": ["RoisNum"], + }, + { + "X": ["roi_align_input"], + "ROIs": ["ROIs"], + }, + ] + program_input = [ + { + "roi_align_input": TensorConfig( + data_gen=partial( + generate_input1, dics, batch + ) + ), + "ROIs": TensorConfig( + data_gen=partial( + generate_input2, dics, batch + ) + ), + "RoisNum": TensorConfig( + data_gen=partial( + generate_input3, dics, batch + ) + ), + }, + { + "roi_align_input": TensorConfig( + data_gen=partial( + generate_input1, dics, batch + ) + ), + "ROIs": TensorConfig( + data_gen=partial( + generate_input2, dics, batch + ), + lod=[[32, 3]], + ), }, - "op_attrs": - dics[0] - }] + ] + ops_config = [ + { + "op_type": "roi_align", + "op_inputs": dics_input[num_input], + "op_outputs": { + "Out": ["roi_align_out"] + }, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs=program_input[num_input], - outputs=["roi_align_out"]) + outputs=["roi_align_out"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): if self.num_input == 0: self.dynamic_shape.min_input_shape = { "roi_align_input": [1, 256, 32, 32], "ROIs": [3, 4], - "RoisNum": [1] + "RoisNum": [1], } self.dynamic_shape.max_input_shape = { "roi_align_input": [1, 256, 64, 64], "ROIs": [3, 4], - "RoisNum": [1] + "RoisNum": [1], } self.dynamic_shape.opt_input_shape = { "roi_align_input": [1, 256, 64, 64], "ROIs": [3, 4], - "RoisNum": [1] + "RoisNum": [1], } elif self.num_input == 1: self.dynamic_shape.min_input_shape = { "roi_align_input": [1, 256, 32, 32], - "ROIs": [3, 4] + "ROIs": [3, 4], } self.dynamic_shape.max_input_shape = { "roi_align_input": [1, 256, 64, 64], - "ROIs": [3, 4] + "ROIs": [3, 4], } self.dynamic_shape.opt_input_shape = { "roi_align_input": [1, 256, 64, 64], - "ROIs": [3, 4] + "ROIs": [3, 4], } def clear_dynamic_shape(): @@ -159,29 +176,33 @@ class TrtConvertRoiAlignTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-3 + attrs, False + ), 1e-3 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-3 + attrs, True + ), 1e-3 def add_skip_trt_case(self): - def teller1(program_config, predictor_config): if len(program_config.inputs) == 3: return True return False - self.add_skip_case(teller1, SkipReasons.TRT_NOT_SUPPORT, - "INPUT RoisNum NOT SUPPORT") + self.add_skip_case( + teller1, SkipReasons.TRT_NOT_SUPPORT, "INPUT RoisNum NOT SUPPORT" + ) def test(self): self.add_skip_trt_case() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_roll.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_roll.py index 6b6e1e0c4bfe59afa65117925a45563606ed40bf..47d415b1a6b76e0716e29bf8c56868f8017fcdd5 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_roll.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_roll.py @@ -22,7 +22,6 @@ import unittest class TrtConvertRollTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: inputs = program_config.inputs weights = program_config.weights @@ -32,43 +31,44 @@ class TrtConvertRollTest(TrtLayerAutoScanTest): return True def sample_program_configs(self): - def generate_input1(attrs: List[Dict[str, Any]]): return np.ones([1, 56, 56, 192]).astype(np.float32) for axis in [[1, 2]]: for shifts in [[-1, -1], [-3, -3]]: - dics = [{ - "axis": axis, - "shifts": shifts, - }] - - ops_config = [{ - "op_type": "roll", - "op_inputs": { - "X": ["input_data"] - }, - "op_outputs": { - "Out": ["roll_output_data"] - }, - "op_attrs": dics[0] - }] + dics = [ + { + "axis": axis, + "shifts": shifts, + } + ] + + ops_config = [ + { + "op_type": "roll", + "op_inputs": {"X": ["input_data"]}, + "op_outputs": {"Out": ["roll_output_data"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data": - TensorConfig(data_gen=partial(generate_input1, dics)) + "input_data": TensorConfig( + data_gen=partial(generate_input1, dics) + ) }, - outputs=["roll_output_data"]) + outputs=["roll_output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = { "input_data": [1, 56, 56, 192] @@ -103,19 +103,23 @@ class TrtConvertRollTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-3 + attrs, False + ), 1e-3 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-3 + attrs, True + ), 1e-3 def test(self): self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_scale.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_scale.py index 33a7c060ecc15f34a17289ec2ac2fdf3e5b7e976..27658d9286367356d4598e637df7c4cc6af01519 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_scale.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_scale.py @@ -22,12 +22,10 @@ import unittest class TrtConvertScaleTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(attrs: List[Dict[str, Any]], batch): if self.dims == 4: return np.ones([batch, 3, 24, 24]).astype(np.float32) @@ -49,51 +47,60 @@ class TrtConvertScaleTest(TrtLayerAutoScanTest): for bias_after_scale in [False, True]: self.num_input = num_input self.dims = dims - dics = [{ - "scale": scale, - "bias": bias, - "bias_after_scale": bias_after_scale - }, {}] - - dics_intput = [{ - "X": ["scale_input"], - "ScaleTensor": ["ScaleTensor"], - }, { - "X": ["scale_input"] - }] - dics_intputs = [{ - "ScaleTensor": - TensorConfig(data_gen=partial( - generate_weight1, dics)) - }, {}] - - ops_config = [{ - "op_type": - "scale", - "op_inputs": - dics_intput[num_input], - "op_outputs": { - "Out": ["scale_out"] + dics = [ + { + "scale": scale, + "bias": bias, + "bias_after_scale": bias_after_scale, + }, + {}, + ] + + dics_intput = [ + { + "X": ["scale_input"], + "ScaleTensor": ["ScaleTensor"], }, - "op_attrs": - dics[0] - }] + {"X": ["scale_input"]}, + ] + dics_intputs = [ + { + "ScaleTensor": TensorConfig( + data_gen=partial( + generate_weight1, dics + ) + ) + }, + {}, + ] + + ops_config = [ + { + "op_type": "scale", + "op_inputs": dics_intput[num_input], + "op_outputs": {"Out": ["scale_out"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights=dics_intputs[num_input], inputs={ - "scale_input": - TensorConfig(data_gen=partial( - generate_input1, dics, batch)) + "scale_input": TensorConfig( + data_gen=partial( + generate_input1, dics, batch + ) + ) }, - outputs=["scale_out"]) + outputs=["scale_out"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): if self.dims == 4: self.dynamic_shape.min_input_shape = { @@ -134,37 +141,46 @@ class TrtConvertScaleTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-3, 1e-3) + attrs, False + ), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-3, 1e-3) + attrs, True + ), (1e-3, 1e-3) def add_skip_trt_case(self): - def teller1(program_config, predictor_config): if self.num_input == 0: return True return False - self.add_skip_case(teller1, SkipReasons.TRT_NOT_SUPPORT, - "INPUT ScaleTensor and Shape NOT SUPPORT") + self.add_skip_case( + teller1, + SkipReasons.TRT_NOT_SUPPORT, + "INPUT ScaleTensor and Shape NOT SUPPORT", + ) def teller2(program_config, predictor_config): if self.dims == 1 and len(self.dynamic_shape.min_input_shape) == 0: return True return False - self.add_skip_case(teller2, SkipReasons.TRT_NOT_SUPPORT, - "INPUT DIM EQUAL TO 1 OF STATIC SHAPE NOT SUPPORT") + self.add_skip_case( + teller2, + SkipReasons.TRT_NOT_SUPPORT, + "INPUT DIM EQUAL TO 1 OF STATIC SHAPE NOT SUPPORT", + ) def test(self): self.add_skip_trt_case() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_scatter_nd_add.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_scatter_nd_add.py index d0e6cf867ce4354bf4fcbe17906c2471e42645a5..9376ade22a2be5314a7d9e8f58d8bb0948335636 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_scatter_nd_add.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_scatter_nd_add.py @@ -22,12 +22,10 @@ import unittest class TrtConvertScatterNd(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(): return np.random.random([6]).astype(np.float32) @@ -37,38 +35,42 @@ class TrtConvertScatterNd(TrtLayerAutoScanTest): def generate_input3(): return np.random.random([4]).astype(np.float32) - ops_config = [{ - "op_type": "scatter_nd_add", - "op_inputs": { - "X": ["input_data"], - "Index": ["index_data"], - "Updates": ["update_data"] - }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": {} - }] + ops_config = [ + { + "op_type": "scatter_nd_add", + "op_inputs": { + "X": ["input_data"], + "Index": ["index_data"], + "Updates": ["update_data"], + }, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": {}, + } + ] ops = self.generate_op_config(ops_config) for i in range(10): program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data": - TensorConfig(data_gen=partial(generate_input1)), - "index_data": - TensorConfig(data_gen=partial(generate_input2)), - "update_data": - TensorConfig(data_gen=partial(generate_input3)), + "input_data": TensorConfig( + data_gen=partial(generate_input1) + ), + "index_data": TensorConfig( + data_gen=partial(generate_input2) + ), + "update_data": TensorConfig( + data_gen=partial(generate_input3) + ), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = { "input_data": [1], diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_shape.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_shape.py index 778a097c3ab038f2d319ab28c22e4d8484357b19..03df0287836aa5048688a0f22a9588f0f299a5e1 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_shape.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_shape.py @@ -22,12 +22,10 @@ import unittest class TrtConvertSumTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(batch): if self.dims == 4: return np.ones([batch, 3, 24, 24]).astype(np.float32) @@ -41,31 +39,31 @@ class TrtConvertSumTest(TrtLayerAutoScanTest): for dims in [1, 2, 3, 4]: for batch in [1, 4]: self.dims = dims - ops_config = [{ - "op_type": "shape", - "op_inputs": { - "Input": ["input1"] - }, - "op_outputs": { - "Out": ["output"] - }, - "op_attrs": {} - }] + ops_config = [ + { + "op_type": "shape", + "op_inputs": {"Input": ["input1"]}, + "op_outputs": {"Out": ["output"]}, + "op_attrs": {}, + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input1": - TensorConfig(data_gen=partial(generate_input1, batch)) + "input1": TensorConfig( + data_gen=partial(generate_input1, batch) + ) }, - outputs=["output"]) + outputs=["output"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(): if self.dims == 4: self.dynamic_shape.min_input_shape = {"input1": [1, 3, 24, 24]} @@ -87,7 +85,7 @@ class TrtConvertSumTest(TrtLayerAutoScanTest): } def generate_trt_nodes_num(dynamic_shape): - if (not dynamic_shape): + if not dynamic_shape: return 0, 3 return 1, 2 @@ -100,10 +98,12 @@ class TrtConvertSumTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - False), 1e-5 + False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - False), 1e-3 + False + ), 1e-3 # for dynamic_shape generate_dynamic_shape() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_shuffle_channel.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_shuffle_channel.py index bf5584e0029433cf7690f3def6917069c7bf3283..04c1e3259fc12da2b0d603134bcb600d7e2daa65 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_shuffle_channel.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_shuffle_channel.py @@ -22,44 +22,41 @@ import unittest class TrtConvertShuffleChannelTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(attrs: List[Dict[str, Any]], batch): return np.ones([batch, 6, 24, 24]).astype(np.float32) for batch in [1, 2, 4]: for group in [1, 2, 3]: dics = [{"group": group}, {}] - ops_config = [{ - "op_type": "shuffle_channel", - "op_inputs": { - "X": ["shuffle_channel_input"] - }, - "op_outputs": { - "Out": ["shuffle_channel_out"] - }, - "op_attrs": dics[0] - }] + ops_config = [ + { + "op_type": "shuffle_channel", + "op_inputs": {"X": ["shuffle_channel_input"]}, + "op_outputs": {"Out": ["shuffle_channel_out"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "shuffle_channel_input": - TensorConfig( - data_gen=partial(generate_input1, dics, batch)) + "shuffle_channel_input": TensorConfig( + data_gen=partial(generate_input1, dics, batch) + ) }, - outputs=["shuffle_channel_out"]) + outputs=["shuffle_channel_out"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = { "shuffle_channel_input": [1, 6, 24, 24] @@ -78,8 +75,10 @@ class TrtConvertShuffleChannelTest(TrtLayerAutoScanTest): def generate_trt_nodes_num(attrs, dynamic_shape): ver = paddle_infer.get_trt_compile_version() - if ver[0] * 1000 + ver[1] * 100 + ver[ - 2] * 10 < 8000 and dynamic_shape == True: + if ( + ver[0] * 1000 + ver[1] * 100 + ver[2] * 10 < 8000 + and dynamic_shape == True + ): return 0, 3 else: return 1, 2 @@ -92,19 +91,23 @@ class TrtConvertShuffleChannelTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-3 + attrs, False + ), 1e-3 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-3 + attrs, True + ), 1e-3 def add_skip_trt_case(self): pass diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_silu.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_silu.py index 3b9ff8a0527d53db0193885efe847558e6b9c66a..73df326fb01ad2ef9d6490686126783ab9fab0ce 100755 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_silu.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_silu.py @@ -22,12 +22,10 @@ import unittest class TrtConvertSwishTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(dims, attrs: List[Dict[str, Any]]): if dims == 1: return np.ones([3]).astype(np.float32) @@ -42,33 +40,34 @@ class TrtConvertSwishTest(TrtLayerAutoScanTest): for beta in [1.0, 2.0, 3.0]: self.dims = dims - ops_config = [{ - "op_type": "silu", - "op_inputs": { - "X": ["input_data"], - }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": {} - }] + ops_config = [ + { + "op_type": "silu", + "op_inputs": { + "X": ["input_data"], + }, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": {}, + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data": - TensorConfig( - data_gen=partial(generate_input1, dims, {})) + "input_data": TensorConfig( + data_gen=partial(generate_input1, dims, {}) + ) }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): if self.dims == 1: self.dynamic_shape.min_input_shape = {"input_data": [1]} @@ -113,19 +112,23 @@ class TrtConvertSwishTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-3, 1e-3) + attrs, False + ), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-3, 1e-3) + attrs, True + ), (1e-3, 1e-3) def test(self): self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_skip_layernorm.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_skip_layernorm.py index 3a3b0603ec846478ff1751fef0a16f491ce372d7..18ea2abe6bc35f8cadc429e5e8ec0ad3630da5ae 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_skip_layernorm.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_skip_layernorm.py @@ -22,7 +22,6 @@ import unittest class TrtConvertSkipLayernormTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: inputs = program_config.inputs weights = program_config.weights @@ -32,19 +31,20 @@ class TrtConvertSkipLayernormTest(TrtLayerAutoScanTest): program_config.ops[i].attrs for i in range(len(program_config.ops)) ] - #The input dimension should be less than or equal to the set axis. + # The input dimension should be less than or equal to the set axis. if attrs[0]['begin_norm_axis'] >= 0: - if len(inputs['skip_layernorm_inputX_data'].shape - ) <= attrs[0]['begin_norm_axis']: + if ( + len(inputs['skip_layernorm_inputX_data'].shape) + <= attrs[0]['begin_norm_axis'] + ): return False - #2D input is not supported. + # 2D input is not supported. if self.dims == 2: return False return True def sample_program_configs(self): - def generate_input1(attrs: List[Dict[str, Any]], batch): if self.dims == 4: return np.ones([batch, 6, 128, 768]).astype(np.float32) @@ -73,107 +73,117 @@ class TrtConvertSkipLayernormTest(TrtLayerAutoScanTest): for begin_norm_axis in [0, 1, 2, -1]: for enable_int8 in [False, True]: self.dims = dims - dics = [{ - "epsilon": epsilon, - "begin_norm_axis": begin_norm_axis, - "enable_int8": enable_int8 - }, {}] - ops_config = [{ - "op_type": "skip_layernorm", - "op_inputs": { - "X": ["skip_layernorm_inputX_data"], - "Y": ["skip_layernorm_inputY_data"], - "Bias": ["Bias"], - "Scale": ["Scale"] + dics = [ + { + "epsilon": epsilon, + "begin_norm_axis": begin_norm_axis, + "enable_int8": enable_int8, }, - "op_outputs": { - "Out": ["skip_layernorm_out"] - }, - "op_attrs": dics[0] - }] + {}, + ] + ops_config = [ + { + "op_type": "skip_layernorm", + "op_inputs": { + "X": ["skip_layernorm_inputX_data"], + "Y": ["skip_layernorm_inputY_data"], + "Bias": ["Bias"], + "Scale": ["Scale"], + }, + "op_outputs": { + "Out": ["skip_layernorm_out"] + }, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={ - "Bias": - TensorConfig(data_gen=partial( - generate_weight1, dics)), - "Scale": - TensorConfig(data_gen=partial( - generate_weight2, dics)) + "Bias": TensorConfig( + data_gen=partial(generate_weight1, dics) + ), + "Scale": TensorConfig( + data_gen=partial(generate_weight2, dics) + ), }, inputs={ - "skip_layernorm_inputX_data": - TensorConfig(data_gen=partial( - generate_input1, dics, batch)), - "skip_layernorm_inputY_data": - TensorConfig(data_gen=partial( - generate_input2, dics, batch)) + "skip_layernorm_inputX_data": TensorConfig( + data_gen=partial( + generate_input1, dics, batch + ) + ), + "skip_layernorm_inputY_data": TensorConfig( + data_gen=partial( + generate_input2, dics, batch + ) + ), }, - outputs=["skip_layernorm_out"]) + outputs=["skip_layernorm_out"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): if self.dims == 4: self.dynamic_shape.min_input_shape = { "skip_layernorm_inputX_data": [1, 6, 128, 768], "skip_layernorm_inputY_data": [1, 6, 128, 768], "Bias": [768], - "Scale": [768] + "Scale": [768], } self.dynamic_shape.max_input_shape = { "skip_layernorm_inputX_data": [4, 6, 768, 3072], "skip_layernorm_inputY_data": [4, 6, 768, 3072], "Bias": [3072], - "Scale": [3072] + "Scale": [3072], } self.dynamic_shape.opt_input_shape = { "skip_layernorm_inputX_data": [2, 6, 128, 768], "skip_layernorm_inputY_data": [2, 6, 128, 768], "Bias": [768], - "Scale": [768] + "Scale": [768], } elif self.dims == 3: self.dynamic_shape.min_input_shape = { "skip_layernorm_inputX_data": [1, 128, 768], "skip_layernorm_inputY_data": [1, 128, 768], "Bias": [768], - "Scale": [768] + "Scale": [768], } self.dynamic_shape.max_input_shape = { "skip_layernorm_inputX_data": [4, 768, 3072], "skip_layernorm_inputY_data": [4, 768, 3072], "Bias": [3072], - "Scale": [3072] + "Scale": [3072], } self.dynamic_shape.opt_input_shape = { "skip_layernorm_inputX_data": [2, 128, 768], "skip_layernorm_inputY_data": [2, 128, 768], "Bias": [768], - "Scale": [768] + "Scale": [768], } elif self.dims == 2: self.dynamic_shape.min_input_shape = { "skip_layernorm_inputX_data": [1, 768], "skip_layernorm_inputY_data": [1, 768], "Bias": [768], - "Scale": [768] + "Scale": [768], } self.dynamic_shape.max_input_shape = { "skip_layernorm_inputX_data": [4, 3072], "skip_layernorm_inputY_data": [4, 3072], "Bias": [3072], - "Scale": [3072] + "Scale": [3072], } self.dynamic_shape.opt_input_shape = { "skip_layernorm_inputX_data": [2, 768], "skip_layernorm_inputY_data": [2, 768], "Bias": [768], - "Scale": [768] + "Scale": [768], } def clear_dynamic_shape(): @@ -204,10 +214,12 @@ class TrtConvertSkipLayernormTest(TrtLayerAutoScanTest): generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-3, 1e-3) + attrs, True + ), (1e-3, 1e-3) def add_skip_trt_case(self): pass diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_slice.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_slice.py index 22eb7cef872b72e608a5781db1b29fb7aad3e500..81d833f39f08f6a4fb3aa6709269ce2f97ec1b2a 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_slice.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_slice.py @@ -22,7 +22,6 @@ import unittest class TrtConvertSliceTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: inputs = program_config.inputs weights = program_config.weights @@ -34,13 +33,17 @@ class TrtConvertSliceTest(TrtLayerAutoScanTest): start = 0 end = 0 if attrs[0]["starts"][x] < 0: - start = attrs[0]["starts"][x] + inputs['input_data'].shape[ - attrs[0]["axes"][x]] + start = ( + attrs[0]["starts"][x] + + inputs['input_data'].shape[attrs[0]["axes"][x]] + ) else: start = attrs[0]["starts"][x] if attrs[0]["ends"][x] < 0: - end = attrs[0]["ends"][x] + inputs['input_data'].shape[ - attrs[0]["axes"][x]] + end = ( + attrs[0]["ends"][x] + + inputs['input_data'].shape[attrs[0]["axes"][x]] + ) else: end = attrs[0]["ends"][x] start = max(0, start) @@ -51,12 +54,11 @@ class TrtConvertSliceTest(TrtLayerAutoScanTest): for x in attrs[0]["decrease_axis"]: if x < 0: return False - if (out_shape[x] != 1): + if out_shape[x] != 1: return False return True def sample_program_configs(self): - def generate_input1(attrs: List[Dict[str, Any]]): return np.random.random([6, 6, 64, 64]).astype(np.float32) @@ -65,41 +67,44 @@ class TrtConvertSliceTest(TrtLayerAutoScanTest): for ends in [[2, 2], [5, 5], [1, -1]]: for decrease_axis in [[], [1], [2], [-1], [-100]]: for infer_flags in [[-1]]: - dics = [{ - "axes": axes, - "starts": starts, - "ends": ends, - "decrease_axis": decrease_axis, - "infer_flags": infer_flags - }] - - ops_config = [{ - "op_type": "slice", - "op_inputs": { - "Input": ["input_data"] - }, - "op_outputs": { - "Out": ["slice_output_data"] - }, - "op_attrs": dics[0] - }] + dics = [ + { + "axes": axes, + "starts": starts, + "ends": ends, + "decrease_axis": decrease_axis, + "infer_flags": infer_flags, + } + ] + + ops_config = [ + { + "op_type": "slice", + "op_inputs": {"Input": ["input_data"]}, + "op_outputs": { + "Out": ["slice_output_data"] + }, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data": - TensorConfig( - data_gen=partial(generate_input1, dics)) + "input_data": TensorConfig( + data_gen=partial(generate_input1, dics) + ) }, - outputs=["slice_output_data"]) + outputs=["slice_output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = {"input_data": [1, 3, 32, 32]} self.dynamic_shape.max_input_shape = {"input_data": [8, 8, 64, 64]} @@ -125,19 +130,23 @@ class TrtConvertSliceTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-3 + attrs, False + ), 1e-3 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-3 + attrs, True + ), 1e-3 def test(self): # TODO(inference): fix. diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_softmax.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_softmax.py index 6662146331080f55c60651277b00a4cdc1266cc8..7f1ed848cb17815f33de947c2668d8b21f9d4d2b 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_softmax.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_softmax.py @@ -22,7 +22,6 @@ from typing import Any, Dict, List class TrtConvertSoftmaxTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: inputs = program_config.inputs weights = program_config.weights @@ -32,14 +31,13 @@ class TrtConvertSoftmaxTest(TrtLayerAutoScanTest): program_config.ops[i].attrs for i in range(len(program_config.ops)) ] - #The input dimension should be less than or equal to the set axis. + # The input dimension should be less than or equal to the set axis. if len(inputs['softmax_input'].shape) <= attrs[0]['axis']: return False return True def sample_program_configs(self): - def generate_input1(attrs: List[Dict[str, Any]], batch): if self.dims == 4: return np.ones([batch, 3, 24, 24]).astype(np.float32) @@ -53,32 +51,31 @@ class TrtConvertSoftmaxTest(TrtLayerAutoScanTest): for axis in [-1, 0, 1, 2, 3]: self.dims = dims dics = [{"axis": axis}, {}] - ops_config = [{ - "op_type": "softmax", - "op_inputs": { - "X": ["softmax_input"] - }, - "op_outputs": { - "Out": ["softmax_out"] - }, - "op_attrs": dics[0] - }] + ops_config = [ + { + "op_type": "softmax", + "op_inputs": {"X": ["softmax_input"]}, + "op_outputs": {"Out": ["softmax_out"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "softmax_input": - TensorConfig( - data_gen=partial(generate_input1, dics, batch)) + "softmax_input": TensorConfig( + data_gen=partial(generate_input1, dics, batch) + ) }, - outputs=["softmax_out"]) + outputs=["softmax_out"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): if self.dims == 4: self.dynamic_shape.min_input_shape = { @@ -123,19 +120,23 @@ class TrtConvertSoftmaxTest(TrtLayerAutoScanTest): else: self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-3 + attrs, False + ), 1e-3 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-3 + attrs, True + ), 1e-3 def test(self): self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_split.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_split.py index b60b9227395d711ed129119bc69a5abfe1b3e77f..ce9c533c625879b9a2e68133b2fff4193d4a29f4 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_split.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_split.py @@ -22,7 +22,6 @@ from typing import Any, Dict, List class TrtConvertSplitTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: inputs = program_config.inputs weights = program_config.weights @@ -35,13 +34,13 @@ class TrtConvertSplitTest(TrtLayerAutoScanTest): if len(inputs['split_input'].shape) <= attrs[0]['axis']: return False - #Sections and num cannot both be equal to 0. + # Sections and num cannot both be equal to 0. if len(attrs[0]['sections']) == 0: if attrs[0]['num'] == 0: return False - #When sections and num are not both equal to 0, sections has higher priority. - #The sum of sections should be equal to the input size. + # When sections and num are not both equal to 0, sections has higher priority. + # The sum of sections should be equal to the input size. if len(attrs[0]['sections']) != 0: if attrs[0]['num'] != 0: return False @@ -53,16 +52,18 @@ class TrtConvertSplitTest(TrtLayerAutoScanTest): if sum != inputs['split_input'].shape[attrs[0]['axis']]: return False - #The size of num should be equal to the input dimension. + # The size of num should be equal to the input dimension. if attrs[0]['num'] != 0: if len(outputs) != attrs[0]['num']: return False - #Test AxisTensor and SectionsTensorList + # Test AxisTensor and SectionsTensorList if self.num_input == 0: - if self.dims == 2 and attrs[0]['sections'] == [ - 10, 14 - ] and len(outputs) == 2: + if ( + self.dims == 2 + and attrs[0]['sections'] == [10, 14] + and len(outputs) == 2 + ): return True else: return False @@ -70,7 +71,6 @@ class TrtConvertSplitTest(TrtLayerAutoScanTest): return True def sample_program_configs(self): - def generate_input1(attrs: List[Dict[str, Any]], batch): if self.dims == 4: return np.random.random([batch, 3, 3, 24]).astype(np.float32) @@ -93,72 +93,95 @@ class TrtConvertSplitTest(TrtLayerAutoScanTest): for num_input in [0, 1]: for dims in [1, 2, 3, 4]: for batch in [3, 6, 9]: - for Out in [["output_var0", "output_var1"], - ["output_var0", "output_var1", "output_var2"]]: - for sections in [[], [1, 2], [2, 1], [10, 14], - [1, 1, 1], [2, 2, 2], [3, 3, 3], - [3, 7, 14]]: + for Out in [ + ["output_var0", "output_var1"], + ["output_var0", "output_var1", "output_var2"], + ]: + for sections in [ + [], + [1, 2], + [2, 1], + [10, 14], + [1, 1, 1], + [2, 2, 2], + [3, 3, 3], + [3, 7, 14], + ]: for num in [0, 3]: for axis in [0, 1, 2, 3]: self.batch = batch self.num_input = num_input self.dims = dims - dics = [{ - "sections": sections, - "num": num, - "axis": axis - }, {}] - - dics_intput = [{ - "X": ["split_input"], - "AxisTensor": ["AxisTensor"], - "SectionsTensorList": [ - "SectionsTensorList1", - "SectionsTensorList2" - ] - }, { - "X": ["split_input"] - }] - dics_intputs = [{ - "AxisTensor": - TensorConfig(data_gen=partial( - generate_AxisTensor, dics)), - "SectionsTensorList1": - TensorConfig(data_gen=partial( - generate_SectionsTensorList1, - dics)), - "SectionsTensorList2": - TensorConfig(data_gen=partial( - generate_SectionsTensorList2, dics)) - }, {}] - - ops_config = [{ - "op_type": - "split", - "op_inputs": - dics_intput[num_input], - "op_outputs": { - "Out": Out + dics = [ + { + "sections": sections, + "num": num, + "axis": axis, + }, + {}, + ] + + dics_intput = [ + { + "X": ["split_input"], + "AxisTensor": ["AxisTensor"], + "SectionsTensorList": [ + "SectionsTensorList1", + "SectionsTensorList2", + ], }, - "op_attrs": - dics[0] - }] + {"X": ["split_input"]}, + ] + dics_intputs = [ + { + "AxisTensor": TensorConfig( + data_gen=partial( + generate_AxisTensor, dics + ) + ), + "SectionsTensorList1": TensorConfig( + data_gen=partial( + generate_SectionsTensorList1, + dics, + ) + ), + "SectionsTensorList2": TensorConfig( + data_gen=partial( + generate_SectionsTensorList2, + dics, + ) + ), + }, + {}, + ] + + ops_config = [ + { + "op_type": "split", + "op_inputs": dics_intput[num_input], + "op_outputs": {"Out": Out}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights=dics_intputs[num_input], inputs={ - "split_input": - TensorConfig(data_gen=partial( - generate_input1, dics, batch)) + "split_input": TensorConfig( + data_gen=partial( + generate_input1, dics, batch + ) + ) }, - outputs=Out) + outputs=Out, + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): if self.dims == 4: self.dynamic_shape.min_input_shape = { @@ -216,30 +239,35 @@ class TrtConvertSplitTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-3 + attrs, False + ), 1e-3 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-3 + attrs, True + ), 1e-3 def add_skip_trt_case(self): - def teller1(program_config, predictor_config): if len(program_config.weights) == 3: return True return False self.add_skip_case( - teller1, SkipReasons.TRT_NOT_SUPPORT, - "INPUT AxisTensor AND SectionsTensorList NOT SUPPORT.") + teller1, + SkipReasons.TRT_NOT_SUPPORT, + "INPUT AxisTensor AND SectionsTensorList NOT SUPPORT.", + ) def test(self): self.add_skip_trt_case() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_squeeze2.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_squeeze2.py index 31bf484c59950ead6508558bada2b57727e15ca8..27860452091449cafccf477b30c071dbda4fd1b6 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_squeeze2.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_squeeze2.py @@ -22,7 +22,6 @@ from typing import Any, Dict, List class TrtConvertSplitTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: inputs = program_config.inputs attrs = [ @@ -40,25 +39,25 @@ class TrtConvertSplitTest(TrtLayerAutoScanTest): self.dims = dims self.axes = axes dics = [{"axes": axes}] - ops_config = [{ - "op_type": "squeeze2", - "op_inputs": { - "X": ["in_data"] - }, - "op_outputs": { - "Out": ["out_data"], - "XShape": ["XShape_data"] - }, - "op_attrs": dics[0] - }] + ops_config = [ + { + "op_type": "squeeze2", + "op_inputs": {"X": ["in_data"]}, + "op_outputs": { + "Out": ["out_data"], + "XShape": ["XShape_data"], + }, + "op_attrs": dics[0], + } + ] # new_axes is the update of axes new_axes = list(axes) for i in range(len(new_axes)): - if (new_axes[i] < 0): + if new_axes[i] < 0: new_axes[i] += dims - if (max(new_axes) >= dims): + if max(new_axes) >= dims: continue - # generate input data + # generate input data self.input_shape = [1] * dims for i in range(dims): self.input_shape[i] = np.random.randint(1, 20) @@ -68,24 +67,26 @@ class TrtConvertSplitTest(TrtLayerAutoScanTest): for i in new_axes: self.input_shape[i] = 1 return np.random.random(self.input_shape).astype( - np.float32) + np.float32 + ) ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "in_data": - TensorConfig( - data_gen=partial(generate_input1, dics, batch)) + "in_data": TensorConfig( + data_gen=partial(generate_input1, dics, batch) + ) }, - outputs=["out_data"]) + outputs=["out_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): max_shape = list(self.input_shape) min_shape = list(self.input_shape) @@ -112,19 +113,23 @@ class TrtConvertSplitTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-3 + attrs, False + ), 1e-3 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-3 + attrs, True + ), 1e-3 def add_skip_trt_case(self): pass diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_stack.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_stack.py index dc2c8bb41626b6b2528d86c55b509418d231469e..91e5e499b19b4f2f5dd101ee63a0d57d5917a068 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_stack.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_stack.py @@ -22,7 +22,6 @@ import unittest class TrtConvertStackTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: inputs = program_config.inputs weights = program_config.weights @@ -31,14 +30,13 @@ class TrtConvertStackTest(TrtLayerAutoScanTest): attrs = [ program_config.ops[i].attrs for i in range(len(program_config.ops)) ] - #The input dimension should be less than the set axis. + # The input dimension should be less than the set axis. if len(inputs['stack_input1'].shape) < attrs[0]['axis']: return False return True def sample_program_configs(self): - def generate_input1(attrs: List[Dict[str, Any]], batch): if self.dims == 4: return np.random.random([batch, 3, 24, 24]).astype(np.float32) @@ -74,103 +72,107 @@ class TrtConvertStackTest(TrtLayerAutoScanTest): for axis in [-2, -1, 0, 1, 2, 3]: self.dims = dims dics = [{"axis": axis}, {}] - ops_config = [{ - "op_type": "stack", - "op_inputs": { - "X": - ["stack_input1", "stack_input2", "stack_input3"] - }, - "op_outputs": { - "Y": ["stack_output"] - }, - "op_attrs": dics[0] - }] + ops_config = [ + { + "op_type": "stack", + "op_inputs": { + "X": [ + "stack_input1", + "stack_input2", + "stack_input3", + ] + }, + "op_outputs": {"Y": ["stack_output"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "stack_input1": - TensorConfig( - data_gen=partial(generate_input1, dics, batch)), - "stack_input2": - TensorConfig( - data_gen=partial(generate_input2, dics, batch)), - "stack_input3": - TensorConfig( - data_gen=partial(generate_input3, dics, batch)) + "stack_input1": TensorConfig( + data_gen=partial(generate_input1, dics, batch) + ), + "stack_input2": TensorConfig( + data_gen=partial(generate_input2, dics, batch) + ), + "stack_input3": TensorConfig( + data_gen=partial(generate_input3, dics, batch) + ), }, - outputs=["stack_output"]) + outputs=["stack_output"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): if self.dims == 4: self.dynamic_shape.min_input_shape = { "stack_input1": [1, 3, 24, 24], "stack_input2": [1, 3, 24, 24], - "stack_input3": [1, 3, 24, 24] + "stack_input3": [1, 3, 24, 24], } self.dynamic_shape.max_input_shape = { "stack_input1": [4, 3, 48, 48], "stack_input2": [4, 3, 48, 48], - "stack_input3": [4, 3, 48, 48] + "stack_input3": [4, 3, 48, 48], } self.dynamic_shape.opt_input_shape = { "stack_input1": [1, 3, 24, 24], "stack_input2": [1, 3, 24, 24], - "stack_input3": [1, 3, 24, 24] + "stack_input3": [1, 3, 24, 24], } elif self.dims == 3: self.dynamic_shape.min_input_shape = { "stack_input1": [1, 3, 24], "stack_input2": [1, 3, 24], - "stack_input3": [1, 3, 24] + "stack_input3": [1, 3, 24], } self.dynamic_shape.max_input_shape = { "stack_input1": [4, 3, 48], "stack_input2": [4, 3, 48], - "stack_input3": [4, 3, 48] + "stack_input3": [4, 3, 48], } self.dynamic_shape.opt_input_shape = { "stack_input1": [1, 3, 24], "stack_input2": [1, 3, 24], - "stack_input3": [1, 3, 24] + "stack_input3": [1, 3, 24], } elif self.dims == 2: self.dynamic_shape.min_input_shape = { "stack_input1": [1, 24], "stack_input2": [1, 24], - "stack_input3": [1, 24] + "stack_input3": [1, 24], } self.dynamic_shape.max_input_shape = { "stack_input1": [4, 48], "stack_input2": [4, 48], - "stack_input3": [4, 48] + "stack_input3": [4, 48], } self.dynamic_shape.opt_input_shape = { "stack_input1": [1, 24], "stack_input2": [1, 24], - "stack_input3": [1, 24] + "stack_input3": [1, 24], } elif self.dims == 1: self.dynamic_shape.min_input_shape = { "stack_input1": [24], "stack_input2": [24], - "stack_input3": [24] + "stack_input3": [24], } self.dynamic_shape.max_input_shape = { "stack_input1": [48], "stack_input2": [48], - "stack_input3": [48] + "stack_input3": [48], } self.dynamic_shape.opt_input_shape = { "stack_input1": [24], "stack_input2": [24], - "stack_input3": [24] + "stack_input3": [24], } def clear_dynamic_shape(): @@ -191,19 +193,23 @@ class TrtConvertStackTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-3 + attrs, False + ), 1e-3 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-3 + attrs, True + ), 1e-3 def add_skip_trt_case(self): pass diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_strided_slice.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_strided_slice.py index 4f517b447a19125ef1d05dfd917abeeca0c94b0a..b8549c918736a7f0e627d177b2dfa8e7c4214a76 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_strided_slice.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_strided_slice.py @@ -22,7 +22,6 @@ import unittest class TrtConvertStridedSliceTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: inputs = program_config.inputs weights = program_config.weights @@ -32,7 +31,6 @@ class TrtConvertStridedSliceTest(TrtLayerAutoScanTest): return True def sample_program_configs(self): - def generate_input1(attrs: List[Dict[str, Any]]): return np.random.random([1, 56, 56, 192]).astype(np.float32) @@ -42,42 +40,47 @@ class TrtConvertStridedSliceTest(TrtLayerAutoScanTest): for decrease_axis in [[]]: for infer_flags in [[1, 1]]: for strides in [[2, 2]]: - dics = [{ - "axes": axes, - "starts": starts, - "ends": ends, - "decrease_axis": decrease_axis, - "infer_flags": infer_flags, - "strides": strides - }] - - ops_config = [{ - "op_type": "strided_slice", - "op_inputs": { - "Input": ["input_data"] - }, - "op_outputs": { - "Out": ["slice_output_data"] - }, - "op_attrs": dics[0] - }] + dics = [ + { + "axes": axes, + "starts": starts, + "ends": ends, + "decrease_axis": decrease_axis, + "infer_flags": infer_flags, + "strides": strides, + } + ] + + ops_config = [ + { + "op_type": "strided_slice", + "op_inputs": {"Input": ["input_data"]}, + "op_outputs": { + "Out": ["slice_output_data"] + }, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data": - TensorConfig(data_gen=partial( - generate_input1, dics)) + "input_data": TensorConfig( + data_gen=partial( + generate_input1, dics + ) + ) }, - outputs=["slice_output_data"]) + outputs=["slice_output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = { "input_data": [1, 56, 56, 192] @@ -118,71 +121,81 @@ class TrtConvertStridedSliceTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 def test(self): self.run_test() class TrtConvertStridedSliceTest2(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(attrs: List[Dict[str, Any]]): return np.random.random([1, 56, 56, 192]).astype(np.float32) for axes in [[1, 2], [2, 3], [1, 3]]: - for starts in [[-10, 1], [-10, 20], [-10, 15], [-10, 16], [-10, - 20]]: + for starts in [ + [-10, 1], + [-10, 20], + [-10, 15], + [-10, 16], + [-10, 20], + ]: for ends in [[-9, 10000], [-9, -1], [-9, 40]]: for decrease_axis in [[]]: for infer_flags in [[1, 1]]: for strides in [[2, 2]]: - dics = [{ - "axes": axes, - "starts": starts, - "ends": ends, - "decrease_axis": [axes[0]], - "infer_flags": infer_flags, - "strides": strides - }] - - ops_config = [{ - "op_type": "strided_slice", - "op_inputs": { - "Input": ["input_data"] - }, - "op_outputs": { - "Out": ["slice_output_data"] - }, - "op_attrs": dics[0] - }] + dics = [ + { + "axes": axes, + "starts": starts, + "ends": ends, + "decrease_axis": [axes[0]], + "infer_flags": infer_flags, + "strides": strides, + } + ] + + ops_config = [ + { + "op_type": "strided_slice", + "op_inputs": {"Input": ["input_data"]}, + "op_outputs": { + "Out": ["slice_output_data"] + }, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data": - TensorConfig(data_gen=partial( - generate_input1, dics)) + "input_data": TensorConfig( + data_gen=partial( + generate_input1, dics + ) + ) }, - outputs=["slice_output_data"]) + outputs=["slice_output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(): self.dynamic_shape.min_input_shape = { "input_data": [1, 56, 56, 192] diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_sum.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_sum.py index 673d20313e1228a2d412e0d6e20c8d111b283ac4..f7534215c624762d8be382f575204182c6c4597d 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_sum.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_sum.py @@ -22,12 +22,10 @@ import unittest class TrtConvertSumTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(batch): if self.dims == 4: return np.ones([batch, 3, 24, 24]).astype(np.float32) @@ -61,99 +59,101 @@ class TrtConvertSumTest(TrtLayerAutoScanTest): for dims in [1, 2, 3, 4]: for batch in [1, 4]: self.dims = dims - ops_config = [{ - "op_type": "sum", - "op_inputs": { - "X": ["input1", "input2", "input3"] - }, - "op_outputs": { - "Out": ["output"] - }, - "op_attrs": {} - }] + ops_config = [ + { + "op_type": "sum", + "op_inputs": {"X": ["input1", "input2", "input3"]}, + "op_outputs": {"Out": ["output"]}, + "op_attrs": {}, + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input1": - TensorConfig(data_gen=partial(generate_input1, batch)), - "input2": - TensorConfig(data_gen=partial(generate_input2, batch)), - "input3": - TensorConfig(data_gen=partial(generate_input3, batch)) + "input1": TensorConfig( + data_gen=partial(generate_input1, batch) + ), + "input2": TensorConfig( + data_gen=partial(generate_input2, batch) + ), + "input3": TensorConfig( + data_gen=partial(generate_input3, batch) + ), }, - outputs=["output"]) + outputs=["output"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(): if self.dims == 4: self.dynamic_shape.min_input_shape = { "input1": [1, 3, 24, 24], "input2": [1, 3, 24, 24], - "input3": [1, 3, 24, 24] + "input3": [1, 3, 24, 24], } self.dynamic_shape.max_input_shape = { "input1": [4, 3, 48, 48], "input2": [4, 3, 48, 48], - "input3": [4, 3, 48, 48] + "input3": [4, 3, 48, 48], } self.dynamic_shape.opt_input_shape = { "input1": [1, 3, 24, 24], "input2": [1, 3, 24, 24], - "input3": [1, 3, 24, 24] + "input3": [1, 3, 24, 24], } elif self.dims == 3: self.dynamic_shape.min_input_shape = { "input1": [1, 3, 24], "input2": [1, 3, 24], - "input3": [1, 3, 24] + "input3": [1, 3, 24], } self.dynamic_shape.max_input_shape = { "input1": [4, 3, 48], "input2": [4, 3, 48], - "input3": [4, 3, 48] + "input3": [4, 3, 48], } self.dynamic_shape.opt_input_shape = { "input1": [1, 3, 24], "input2": [1, 3, 24], - "input3": [1, 3, 24] + "input3": [1, 3, 24], } elif self.dims == 2: self.dynamic_shape.min_input_shape = { "input1": [1, 24], "input2": [1, 24], - "input3": [1, 24] + "input3": [1, 24], } self.dynamic_shape.max_input_shape = { "input1": [4, 48], "input2": [4, 48], - "input3": [4, 48] + "input3": [4, 48], } self.dynamic_shape.opt_input_shape = { "input1": [1, 24], "input2": [1, 24], - "input3": [1, 24] + "input3": [1, 24], } elif self.dims == 1: self.dynamic_shape.min_input_shape = { "input1": [24], "input2": [24], - "input3": [24] + "input3": [24], } self.dynamic_shape.max_input_shape = { "input1": [48], "input2": [48], - "input3": [48] + "input3": [48], } self.dynamic_shape.opt_input_shape = { "input1": [24], "input2": [24], - "input3": [24] + "input3": [24], } def clear_dynamic_shape(): @@ -162,7 +162,7 @@ class TrtConvertSumTest(TrtLayerAutoScanTest): self.dynamic_shape.opt_input_shape = {} def generate_trt_nodes_num(dynamic_shape): - if (self.dims == 1 and not dynamic_shape): + if self.dims == 1 and not dynamic_shape: return 0, 5 return 1, 4 @@ -170,10 +170,12 @@ class TrtConvertSumTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - False), 1e-5 + False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - False), 1e-3 + False + ), 1e-3 # for dynamic_shape generate_dynamic_shape() @@ -188,12 +190,10 @@ class TrtConvertSumTest(TrtLayerAutoScanTest): # special case when sum having olny one input class TrtConvertSumTest1(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(batch): if self.dims == 4: return np.ones([batch, 3, 24, 24]).astype(np.float32) @@ -207,31 +207,31 @@ class TrtConvertSumTest1(TrtLayerAutoScanTest): for dims in [1, 2, 3, 4]: for batch in [1, 4]: self.dims = dims - ops_config = [{ - "op_type": "sum", - "op_inputs": { - "X": ["input1"] - }, - "op_outputs": { - "Out": ["output"] - }, - "op_attrs": {} - }] + ops_config = [ + { + "op_type": "sum", + "op_inputs": {"X": ["input1"]}, + "op_outputs": {"Out": ["output"]}, + "op_attrs": {}, + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input1": - TensorConfig(data_gen=partial(generate_input1, batch)), + "input1": TensorConfig( + data_gen=partial(generate_input1, batch) + ), }, - outputs=["output"]) + outputs=["output"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(): if self.dims == 4: self.dynamic_shape.min_input_shape = {"input1": [1, 3, 24, 24]} @@ -268,7 +268,7 @@ class TrtConvertSumTest1(TrtLayerAutoScanTest): self.dynamic_shape.opt_input_shape = {} def generate_trt_nodes_num(dynamic_shape): - if (self.dims == 1 and not dynamic_shape): + if self.dims == 1 and not dynamic_shape: return 0, 3 return 1, 2 @@ -276,10 +276,12 @@ class TrtConvertSumTest1(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - False), 1e-5 + False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - False), 1e-3 + False + ), 1e-3 # for dynamic_shape generate_dynamic_shape() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_swish.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_swish.py index 1b5398bb483ad4a87880505c7be944d72ddbbad7..b31e165c2e0f5468b73d85473ff44da1adfd38f5 100755 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_swish.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_swish.py @@ -22,12 +22,10 @@ import unittest class TrtConvertSwishTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(dims, attrs: List[Dict[str, Any]]): if dims == 1: return np.ones([3]).astype(np.float32) @@ -44,33 +42,34 @@ class TrtConvertSwishTest(TrtLayerAutoScanTest): dics = [{"beta": beta}] - ops_config = [{ - "op_type": "swish", - "op_inputs": { - "X": ["input_data"], - }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": dics[0] - }] + ops_config = [ + { + "op_type": "swish", + "op_inputs": { + "X": ["input_data"], + }, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data": - TensorConfig( - data_gen=partial(generate_input1, dims, dics)) + "input_data": TensorConfig( + data_gen=partial(generate_input1, dims, dics) + ) }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): if self.dims == 1: self.dynamic_shape.min_input_shape = {"input_data": [1]} @@ -115,19 +114,23 @@ class TrtConvertSwishTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), (1e-3, 1e-3) + attrs, False + ), (1e-3, 1e-3) # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), (1e-3, 1e-3) + attrs, True + ), (1e-3, 1e-3) def test(self): self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_tile.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_tile.py index 87cb2c6630d05f8a4de5bf89514592f391691755..d65d7e3c29f9694ba73cea58bd2bdd29771de912 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_tile.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_tile.py @@ -25,7 +25,6 @@ import hypothesis.strategies as st class TrtConvertTileTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: inputs = program_config.inputs attrs = [ @@ -38,38 +37,37 @@ class TrtConvertTileTest(TrtLayerAutoScanTest): return True def sample_program_configs(self, *args, **kwargs): - def generate_input1(attrs: List[Dict[str, Any]]): return np.ones([1, 2, 3, 4]).astype(np.float32) dics = [{"repeat_times": kwargs['repeat_times']}] - ops_config = [{ - "op_type": "tile", - "op_inputs": { - "X": ["input_data"] - }, - "op_outputs": { - "Out": ["tile_output_data"] - }, - "op_attrs": dics[0] - }] + ops_config = [ + { + "op_type": "tile", + "op_inputs": {"X": ["input_data"]}, + "op_outputs": {"Out": ["tile_output_data"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data": - TensorConfig(data_gen=partial(generate_input1, dics)) + "input_data": TensorConfig( + data_gen=partial(generate_input1, dics) + ) }, - outputs=["tile_output_data"]) + outputs=["tile_output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = {"input_data": [1, 3, 32, 32]} self.dynamic_shape.max_input_shape = {"input_data": [4, 3, 64, 64]} @@ -98,19 +96,23 @@ class TrtConvertTileTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-3 + attrs, False + ), 1e-3 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-3 + attrs, True + ), 1e-3 @given(repeat_times=st.sampled_from([[100], [1, 2], [0, 3], [1, 2, 100]])) def test(self, *args, **kwargs): diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_top_k.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_top_k.py index 7e8caf4a7e4575ea35e0954636b6ef088dcd3a2d..b8150238407a2aca4d7a552f65776b176a13d135 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_top_k.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_top_k.py @@ -22,7 +22,6 @@ from typing import Any, Dict, List class TrtConvertActivationTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True @@ -44,34 +43,37 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest): for k in [1, 3]: self.dims = dims dics = [{"k": k}] - ops_config = [{ - "op_type": "top_k", - "op_inputs": { - "X": ["input_data"] - }, - "op_outputs": { - "Out": ["output_data"], - "Indices": ["indices_data"] - }, - "op_attrs": dics[0] - }] + ops_config = [ + { + "op_type": "top_k", + "op_inputs": {"X": ["input_data"]}, + "op_outputs": { + "Out": ["output_data"], + "Indices": ["indices_data"], + }, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data": - TensorConfig(data_gen=partial( - generate_input1, dims, batch, dics)) + "input_data": TensorConfig( + data_gen=partial( + generate_input1, dims, batch, dics + ) + ) }, - outputs=["output_data", "indices_data"]) + outputs=["output_data", "indices_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): if self.dims == 1: self.dynamic_shape.min_input_shape = {"input_data": [1]} @@ -114,19 +116,23 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-3 + attrs, False + ), 1e-3 ## for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-3 + attrs, True + ), 1e-3 def test(self): self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_top_k_v2.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_top_k_v2.py index c4114e0d4c5a827a280c7b94d85f985e82ad0755..477ea649effd366496b670250c0b8ce7883fc828 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_top_k_v2.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_top_k_v2.py @@ -22,7 +22,6 @@ from typing import Any, Dict, List class TrtConvertActivationTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: inputs = program_config.inputs attrs = [ @@ -53,40 +52,48 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest): for sort in [True, False]: self.dims = dims self.sort = sort - dics = [{ - "k": k, - "axis": axis, - "largest": largest, - "sorted": sort - }] - ops_config = [{ - "op_type": "top_k_v2", - "op_inputs": { - "X": ["input_data"] - }, - "op_outputs": { - "Out": ["output_data"], - "Indices": ["indices_data"] - }, - "op_attrs": dics[0] - }] + dics = [ + { + "k": k, + "axis": axis, + "largest": largest, + "sorted": sort, + } + ] + ops_config = [ + { + "op_type": "top_k_v2", + "op_inputs": {"X": ["input_data"]}, + "op_outputs": { + "Out": ["output_data"], + "Indices": ["indices_data"], + }, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data": - TensorConfig(data_gen=partial( - generate_input1, dims, batch, dics)) + "input_data": TensorConfig( + data_gen=partial( + generate_input1, + dims, + batch, + dics, + ) + ) }, - outputs=["output_data", "indices_data"]) + outputs=["output_data", "indices_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): if self.dims == 1: self.dynamic_shape.min_input_shape = {"input_data": [1]} @@ -131,19 +138,23 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-3 + attrs, False + ), 1e-3 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-3 + attrs, True + ), 1e-3 def test(self): self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_transpose.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_transpose.py index 684a47f4232183fb4f1cd5365e9c3c15ee0cf003..9ebcd87399230753d1b01addcaac795c2bb490cd 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_transpose.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_transpose.py @@ -22,7 +22,6 @@ import unittest class TrtConvertTransposeTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: inputs = program_config.inputs weights = program_config.weights @@ -32,14 +31,13 @@ class TrtConvertTransposeTest(TrtLayerAutoScanTest): program_config.ops[i].attrs for i in range(len(program_config.ops)) ] - #The shape of input and axis should be equal. + # The shape of input and axis should be equal. if len(inputs['transpose_input'].shape) != len(attrs[0]['axis']): return False return True def sample_program_configs(self): - def generate_input1(attrs: List[Dict[str, Any]], batch): if self.dims == 4: return np.ones([batch, 3, 24, 24]).astype(np.float32) @@ -50,37 +48,43 @@ class TrtConvertTransposeTest(TrtLayerAutoScanTest): for dims in [2, 3, 4]: for batch in [1, 2, 4]: - for axis in [[0, 1, 3, 2], [0, 3, 2, 1], [3, 2, 0, 1], - [0, 1, 2, 3], [0, 1, 2], [2, 0, 1], [1, 0], [0, - 1]]: + for axis in [ + [0, 1, 3, 2], + [0, 3, 2, 1], + [3, 2, 0, 1], + [0, 1, 2, 3], + [0, 1, 2], + [2, 0, 1], + [1, 0], + [0, 1], + ]: self.dims = dims dics = [{"axis": axis}, {}] - ops_config = [{ - "op_type": "transpose", - "op_inputs": { - "X": ["transpose_input"] - }, - "op_outputs": { - "Out": ["transpose_out"] - }, - "op_attrs": dics[0] - }] + ops_config = [ + { + "op_type": "transpose", + "op_inputs": {"X": ["transpose_input"]}, + "op_outputs": {"Out": ["transpose_out"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "transpose_input": - TensorConfig( - data_gen=partial(generate_input1, dics, batch)) + "transpose_input": TensorConfig( + data_gen=partial(generate_input1, dics, batch) + ) }, - outputs=["transpose_out"]) + outputs=["transpose_out"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): if self.dims == 4: self.dynamic_shape.min_input_shape = { @@ -134,19 +138,23 @@ class TrtConvertTransposeTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-3 + attrs, False + ), 1e-3 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-3 + attrs, True + ), 1e-3 def test(self): self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_unary.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_unary.py index 65656fd96d6c22616b824adb04b9c595093d52e2..56b17df2dd04e2bb57bf44a8f2078893a1ea2586 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_unary.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_unary.py @@ -22,7 +22,6 @@ from typing import Any, Dict, List class TrtConvertActivationTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True @@ -42,40 +41,54 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest): for dims in [1, 2, 3, 4]: for batch in [1, 4]: for op_type in [ - "exp", "log", "sqrt", "abs", "sin", "cos", "tan", - "sinh", "cosh", "asin", "acos", "atan", "asinh", - "atanh", "ceil", "floor" + "exp", + "log", + "sqrt", + "abs", + "sin", + "cos", + "tan", + "sinh", + "cosh", + "asin", + "acos", + "atan", + "asinh", + "atanh", + "ceil", + "floor", ]: self.dims = dims dics = [{}] - ops_config = [{ - "op_type": op_type, - "op_inputs": { - "X": ["input_data"] - }, - "op_outputs": { - "Out": ["output_data"] - }, - "op_attrs": dics[0] - }] + ops_config = [ + { + "op_type": op_type, + "op_inputs": {"X": ["input_data"]}, + "op_outputs": {"Out": ["output_data"]}, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data": - TensorConfig(data_gen=partial( - generate_input1, dims, batch, dics)) + "input_data": TensorConfig( + data_gen=partial( + generate_input1, dims, batch, dics + ) + ) }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): if self.dims == 1: self.dynamic_shape.min_input_shape = {"input_data": [1]} @@ -118,19 +131,23 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-3 + attrs, False + ), 1e-3 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-3 + attrs, True + ), 1e-3 def test(self): self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_unfold.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_unfold.py index 685658ca74600b7e079b5ff1a6c43bb83903e21c..5ec187daef4e91f938648e25a0e2b84b782db665 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_unfold.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_unfold.py @@ -22,46 +22,46 @@ import unittest class TrtConvertUnfold(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(): return np.random.random([1, 3, 24, 24]).astype(np.float32) - ops_config = [{ - "op_type": "unfold", - "op_inputs": { - "X": ["input_data"], - }, - "op_outputs": { - "Y": ["output_data"] - }, - "op_attrs": { - "dilations": [1, 1], - "kernel_sizes": [4, 4], - "paddings": [0, 0, 0, 0], - "strides": [1, 1], + ops_config = [ + { + "op_type": "unfold", + "op_inputs": { + "X": ["input_data"], + }, + "op_outputs": {"Y": ["output_data"]}, + "op_attrs": { + "dilations": [1, 1], + "kernel_sizes": [4, 4], + "paddings": [0, 0, 0, 0], + "strides": [1, 1], + }, } - }] + ] ops = self.generate_op_config(ops_config) for i in range(10): program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "input_data": - TensorConfig(data_gen=partial(generate_input1)), + "input_data": TensorConfig( + data_gen=partial(generate_input1) + ), }, - outputs=["output_data"]) + outputs=["output_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = { "input_data": [1, 3, 4, 4], diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_unsqueeze2.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_unsqueeze2.py index b18e98e9690999002ebf10777125e56f6f1f8ad6..d79f4ddf7fc6da87a3e005e59513f7865f0b0f67 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_unsqueeze2.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_unsqueeze2.py @@ -22,7 +22,6 @@ from typing import Any, Dict, List class TrtConvertSplitTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True @@ -34,17 +33,17 @@ class TrtConvertSplitTest(TrtLayerAutoScanTest): self.dims = dims self.axes = axes dics = [{"axes": axes}] - ops_config = [{ - "op_type": "unsqueeze2", - "op_inputs": { - "X": ["in_data"] - }, - "op_outputs": { - "Out": ["out_data"], - "XShape": ["XShape_data"] - }, - "op_attrs": dics[0] - }] + ops_config = [ + { + "op_type": "unsqueeze2", + "op_inputs": {"X": ["in_data"]}, + "op_outputs": { + "Out": ["out_data"], + "XShape": ["XShape_data"], + }, + "op_attrs": dics[0], + } + ] # generate input data self.input_shape = [1] * dims @@ -54,24 +53,26 @@ class TrtConvertSplitTest(TrtLayerAutoScanTest): def generate_input1(attrs: List[Dict[str, Any]], batch): self.input_shape[0] = batch return np.random.random(self.input_shape).astype( - np.float32) + np.float32 + ) ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "in_data": - TensorConfig( - data_gen=partial(generate_input1, dics, batch)) + "in_data": TensorConfig( + data_gen=partial(generate_input1, dics, batch) + ) }, - outputs=["out_data"]) + outputs=["out_data"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): max_shape = list(self.input_shape) min_shape = list(self.input_shape) @@ -98,19 +99,23 @@ class TrtConvertSplitTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-3 + attrs, False + ), 1e-3 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-3 + attrs, True + ), 1e-3 def add_skip_trt_case(self): pass diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_yolo_box.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_yolo_box.py index ed9bb722a8a6d9122b1bcd00f960785216dff65f..a0d089c69c934464f54beb33fefb76b6c8dfa2d8 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_yolo_box.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_yolo_box.py @@ -23,19 +23,19 @@ import os class TrtConvertYoloBoxTest(TrtLayerAutoScanTest): - def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(attrs: List[Dict[str, Any]], batch, channel): if attrs[0]['iou_aware'] == True: - return np.ones([batch, 3 * (channel + 6), 13, - 13]).astype(np.float32) + return np.ones([batch, 3 * (channel + 6), 13, 13]).astype( + np.float32 + ) else: - return np.ones([batch, 3 * (channel + 5), 13, - 13]).astype(np.float32) + return np.ones([batch, 3 * (channel + 5), 13, 13]).astype( + np.float32 + ) def generate_input2(attrs: List[Dict[str, Any]], batch): return np.random.random([batch, 2]).astype(np.int32) @@ -49,89 +49,92 @@ class TrtConvertYoloBoxTest(TrtLayerAutoScanTest): for scale_x_y in [1.0, 0.9]: for iou_aware in [False, True]: for iou_aware_factor in [0.5]: - dics = [{ - "class_num": - class_num, - "anchors": - anchors, - "downsample_ratio": - downsample_ratio, - "conf_thresh": - conf_thresh, - "clip_bbox": - clip_bbox, - "scale_x_y": - scale_x_y, - "iou_aware": - iou_aware, - "iou_aware_factor": - iou_aware_factor - }, {}] - ops_config = [{ - "op_type": "yolo_box", - "op_inputs": { - "X": ["yolo_box_input"], - "ImgSize": ["imgsize"] - }, - "op_outputs": { - "Boxes": ["boxes"], - "Scores": ["scores"] + dics = [ + { + "class_num": class_num, + "anchors": anchors, + "downsample_ratio": downsample_ratio, + "conf_thresh": conf_thresh, + "clip_bbox": clip_bbox, + "scale_x_y": scale_x_y, + "iou_aware": iou_aware, + "iou_aware_factor": iou_aware_factor, }, - "op_attrs": dics[0] - }] + {}, + ] + ops_config = [ + { + "op_type": "yolo_box", + "op_inputs": { + "X": ["yolo_box_input"], + "ImgSize": ["imgsize"], + }, + "op_outputs": { + "Boxes": ["boxes"], + "Scores": ["scores"], + }, + "op_attrs": dics[0], + } + ] ops = self.generate_op_config( - ops_config) + ops_config + ) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "yolo_box_input": - TensorConfig( + "yolo_box_input": TensorConfig( data_gen=partial( generate_input1, - dics, batch, - class_num)), - "imgsize": - TensorConfig( + dics, + batch, + class_num, + ) + ), + "imgsize": TensorConfig( data_gen=partial( generate_input2, - dics, batch)) + dics, + batch, + ) + ), }, - outputs=["boxes", "scores"]) + outputs=["boxes", "scores"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): - + self, program_config + ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): if attrs[0]['iou_aware'] == True: channel = 3 * (attrs[0]['class_num'] + 6) self.dynamic_shape.min_input_shape = { "yolo_box_input": [1, channel, 12, 12], - "imgsize": [1, 2] + "imgsize": [1, 2], } self.dynamic_shape.max_input_shape = { "yolo_box_input": [4, channel, 24, 24], - "imgsize": [4, 2] + "imgsize": [4, 2], } self.dynamic_shape.opt_input_shape = { "yolo_box_input": [1, channel, 24, 24], - "imgsize": [1, 2] + "imgsize": [1, 2], } else: channel = 3 * (attrs[0]['class_num'] + 5) self.dynamic_shape.min_input_shape = { "yolo_box_input": [1, channel, 12, 12], - "imgsize": [1, 2] + "imgsize": [1, 2], } self.dynamic_shape.max_input_shape = { "yolo_box_input": [4, channel, 24, 24], - "imgsize": [4, 2] + "imgsize": [4, 2], } self.dynamic_shape.opt_input_shape = { "yolo_box_input": [1, channel, 24, 24], - "imgsize": [1, 2] + "imgsize": [1, 2], } def clear_dynamic_shape(): @@ -149,30 +152,35 @@ class TrtConvertYoloBoxTest(TrtLayerAutoScanTest): clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-3 + attrs, False + ), 1e-3 # for dynamic_shape generate_dynamic_shape(attrs) self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True + ), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-3 + attrs, True + ), 1e-3 def add_skip_trt_case(self): - def teller2(program_config, predictor_config): if len(self.dynamic_shape.min_input_shape) != 0 and os.name == 'nt': return True return False self.add_skip_case( - teller2, SkipReasons.TRT_NOT_SUPPORT, - "The output has diff between gpu and trt in Windows.") + teller2, + SkipReasons.TRT_NOT_SUPPORT, + "The output has diff between gpu and trt in Windows.", + ) def test(self): self.add_skip_trt_case() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_yolo_box_head.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_yolo_box_head.py index 08a09338bf27be71ea5f352ae749edb05c765746..e8b4006933f5796e0ad4cff9e7ec8a8bf2df97b5 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_yolo_box_head.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_yolo_box_head.py @@ -22,17 +22,18 @@ import unittest class TrtConvertYoloBoxHeadTest(TrtLayerAutoScanTest): - def sample_program_configs(self): - def generate_input(attrs: List[Dict[str, Any]], batch, shape): gen_shape = shape.copy() gen_shape.insert(0, batch) return np.random.uniform(0, 1, gen_shape).astype("float32") input_shape = [[255, 19, 19], [255, 38, 38], [255, 76, 76]] - anchors = [[116, 90, 156, 198, 373, 326], [30, 61, 62, 45, 59, 119], - [10, 13, 16, 30, 33, 23]] + anchors = [ + [116, 90, 156, 198, 373, 326], + [30, 61, 62, 45, 59, 119], + [10, 13, 16, 30, 33, 23], + ] class_num = 80 for batch in [1, 4]: for i in range(len(anchors)): @@ -40,31 +41,40 @@ class TrtConvertYoloBoxHeadTest(TrtLayerAutoScanTest): "anchors": anchors[i], "class_num": class_num, } - ops_config = [{ - "op_type": "yolo_box_head", - "op_inputs": { - "X": ["yolo_box_head_input"], - }, - "op_outputs": { - "Out": ["yolo_box_head_output"], - }, - "op_attrs": attrs_dict - }] + ops_config = [ + { + "op_type": "yolo_box_head", + "op_inputs": { + "X": ["yolo_box_head_input"], + }, + "op_outputs": { + "Out": ["yolo_box_head_output"], + }, + "op_attrs": attrs_dict, + } + ] ops = self.generate_op_config(ops_config) program_config = ProgramConfig( ops=ops, weights={}, inputs={ - "yolo_box_head_input": - TensorConfig(data_gen=partial( - generate_input, attrs_dict, batch, input_shape[i])) + "yolo_box_head_input": TensorConfig( + data_gen=partial( + generate_input, + attrs_dict, + batch, + input_shape[i], + ) + ) }, - outputs=["yolo_box_head_output"]) + outputs=["yolo_box_head_output"], + ) yield program_config def sample_predictor_configs( - self, program_config) -> (paddle_infer.Config, List[int], float): + self, program_config + ) -> (paddle_infer.Config, List[int], float): # for static_shape self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), [1, 2], 1e-5 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_deformable_conv.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_deformable_conv.py index 21949e3d7804814566404b886f0f0799e71bf873..eb79b4ccd1d980d23c322816db4e5b9ef736bcf5 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_deformable_conv.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_deformable_conv.py @@ -25,19 +25,18 @@ os.environ['NVIDIA_TF32_OVERRIDE'] = '0' class TRTDeformableConvTest(InferencePassTest): - def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - input = fluid.data(name='input', - shape=self.input_size, - dtype=self.dtype) - offset = fluid.data(name='offset', - shape=self.offset_size, - dtype=self.dtype) - mask = fluid.data(name='mask', - shape=self.mask_size, - dtype=self.dtype) + input = fluid.data( + name='input', shape=self.input_size, dtype=self.dtype + ) + offset = fluid.data( + name='offset', shape=self.offset_size, dtype=self.dtype + ) + mask = fluid.data( + name='mask', shape=self.mask_size, dtype=self.dtype + ) output = fluid.layers.deformable_conv( input, @@ -50,19 +49,21 @@ class TRTDeformableConvTest(InferencePassTest): dilation=self.dilations, groups=self.groups, deformable_groups=self.deformable_groups, - im2col_step=self.im2col_step) + im2col_step=self.im2col_step, + ) self.feeds = { 'input': np.random.random(self.input_size).astype(self.dtype), 'offset': np.random.random(self.offset_size).astype(self.dtype), - 'mask': np.random.random(self.mask_size).astype(self.dtype) + 'mask': np.random.random(self.mask_size).astype(self.dtype), } self.enable_trt = True dtype = AnalysisConfig.Precision.Float32 if self.dtype == 'float16': dtype = AnalysisConfig.Precision.Half self.trt_parameters = TRTDeformableConvTest.TensorRTParam( - 1 << 30, self.bs, 0, dtype, False, False) + 1 << 30, self.bs, 0, dtype, False, False + ) self.fetch_list = [output] def set_params(self): @@ -77,13 +78,21 @@ class TRTDeformableConvTest(InferencePassTest): self.input_size = [self.bs, 8, 4, 4] self.num_filters = 8 self.filter_size = 3 - offset_c = 2 * self.deformable_groups * self.filter_size * self.filter_size + offset_c = ( + 2 * self.deformable_groups * self.filter_size * self.filter_size + ) mask_c = self.deformable_groups * self.filter_size * self.filter_size self.offset_size = [ - self.input_size[0], offset_c, self.input_size[2], self.input_size[3] + self.input_size[0], + offset_c, + self.input_size[2], + self.input_size[3], ] self.mask_size = [ - self.input_size[0], mask_c, self.input_size[2], self.input_size[3] + self.input_size[0], + mask_c, + self.input_size[2], + self.input_size[3], ] self.dtype = 'float32' @@ -93,7 +102,8 @@ class TRTDeformableConvTest(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_dynamic_shape.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_dynamic_shape.py index ae5f9f94673abc81ea1070c78d5b4ef704a6b234..ee021b24a3bfa73b5e1bd54b7659d76600be73d4 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_dynamic_shape.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_dynamic_shape.py @@ -21,27 +21,32 @@ from paddle.fluid.core import AnalysisConfig class TRTDynamicShapeTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 3, 16, 16], - dtype="float32") - out = fluid.layers.conv2d(input=data, - num_filters=3, - filter_size=3, - groups=1, - padding=[1, 1], - bias_attr=False, - act=None) + data = fluid.data( + name="data", shape=[-1, 3, 16, 16], dtype="float32" + ) + out = fluid.layers.conv2d( + input=data, + num_filters=3, + filter_size=3, + groups=1, + padding=[1, 1], + bias_attr=False, + act=None, + ) self.feeds = self.set_feeds() self.enable_trt = True self.trt_parameters = TRTDynamicShapeTest.TensorRTParam( - 1 << 30, 1, 1, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, 1, 1, AnalysisConfig.Precision.Float32, False, False + ) self.dynamic_shape_params = TRTDynamicShapeTest.DynamicShapeParam( - {'data': [1, 3, 8, 8]}, {'data': [1, 3, 32, 32]}, - {'data': [1, 3, 16, 16]}, False) + {'data': [1, 3, 8, 8]}, + {'data': [1, 3, 32, 32]}, + {'data': [1, 3, 16, 16]}, + False, + ) self.fetch_list = [out] def set_feeds(self): @@ -56,7 +61,6 @@ class TRTDynamicShapeTest(InferencePassTest): class TRTDynamicShapeOutOfBound1Test(TRTDynamicShapeTest): - def set_feeds(self): return { "data": np.random.random([1, 3, 64, 16]).astype("float32"), @@ -84,7 +88,6 @@ class TRTDynamicShapeOutOfBound1Test(TRTDynamicShapeTest): class TRTDynamicShapeOutOfBound3Test(TRTDynamicShapeTest): - def set_feeds(self): return { "data": np.random.random([1, 3, 4, 16]).astype("float32"), diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_elementwise_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_elementwise_op.py index b24595e3797911295e5c5043c3e8b8fb64f000d2..7fb0baf11ffb06d6cce9ef5a3191ed8fa6b4c287 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_elementwise_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_elementwise_op.py @@ -24,15 +24,14 @@ from paddle.fluid.core import AnalysisConfig class TensorRTSubgraphPassElementwiseBroadcastTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data1 = fluid.data(name="data1", - shape=[-1, 3, 64, 64], - dtype="float32") - data2 = fluid.data(name="data2", - shape=[-1, 3, 64, 1], - dtype="float32") + data1 = fluid.data( + name="data1", shape=[-1, 3, 64, 64], dtype="float32" + ) + data2 = fluid.data( + name="data2", shape=[-1, 3, 64, 1], dtype="float32" + ) eltwise_out = self.append_eltwise(data1, data2) out = fluid.layers.batch_norm(eltwise_out, is_test=True) self.feeds = { @@ -40,8 +39,11 @@ class TensorRTSubgraphPassElementwiseBroadcastTest(InferencePassTest): "data2": np.random.random([1, 3, 64, 1]).astype("float32"), } self.enable_trt = True - self.trt_parameters = TensorRTSubgraphPassElementwiseBroadcastTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, True, False) + self.trt_parameters = ( + TensorRTSubgraphPassElementwiseBroadcastTest.TensorRTParam( + 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, True, False + ) + ) self.fetch_list = [out] def append_eltwise(self, data1, data2): @@ -54,26 +56,27 @@ class TensorRTSubgraphPassElementwiseBroadcastTest(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TensorRTSubgraphPassElementwiseBroadcastTest1( - TensorRTSubgraphPassElementwiseBroadcastTest): - + TensorRTSubgraphPassElementwiseBroadcastTest +): def append_eltwise(self, data1, data2): return fluid.layers.elementwise_sub(x=data1, y=data2, axis=0) class TensorRTSubgraphPassElementwiseBroadcastTest2( - TensorRTSubgraphPassElementwiseBroadcastTest): - + TensorRTSubgraphPassElementwiseBroadcastTest +): def append_eltwise(self, data1, data2): return fluid.layers.elementwise_mul(x=data1, y=data2, axis=0) class TensorRTSubgraphPassElementwiseBroadcastTest3( - TensorRTSubgraphPassElementwiseBroadcastTest): - + TensorRTSubgraphPassElementwiseBroadcastTest +): def append_eltwise(self, data1, data2): return fluid.layers.elementwise_div(x=data1, y=data2, axis=0) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_fc_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_fc_fuse_pass.py index 78436f644b4f2d762a160bce666e805d22b47585..606836c8d0f57365cc6b67f003b9b6d57012c12c 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_fc_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_fc_fuse_pass.py @@ -21,16 +21,14 @@ from paddle.fluid.core import AnalysisConfig class FCFusePassTRTTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[32, 128, 2, 2], - dtype="float32") - fc_out1 = fluid.layers.fc(input=data, - size=128, - num_flatten_dims=1, - act="relu") + data = fluid.data( + name="data", shape=[32, 128, 2, 2], dtype="float32" + ) + fc_out1 = fluid.layers.fc( + input=data, size=128, num_flatten_dims=1, act="relu" + ) out = fluid.layers.softmax(input=fc_out1) self.feeds = { @@ -53,16 +51,14 @@ class FCFusePassTRTTest(InferencePassTest): class FCFusePassTRTStaticDims4Cols1Test(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[32, 128, 32, 8], - dtype="float32") - fc_out1 = fluid.layers.fc(input=data, - size=64, - num_flatten_dims=1, - act="relu") + data = fluid.data( + name="data", shape=[32, 128, 32, 8], dtype="float32" + ) + fc_out1 = fluid.layers.fc( + input=data, size=64, num_flatten_dims=1, act="relu" + ) out = fluid.layers.softmax(input=fc_out1) self.feeds = { @@ -70,7 +66,8 @@ class FCFusePassTRTStaticDims4Cols1Test(InferencePassTest): } self.enable_trt = True self.trt_parameters = FCFusePassTRTStaticDims4Cols1Test.TensorRTParam( - 1 << 30, 32, 2, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, 32, 2, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [out] def test_check_output(self): @@ -82,16 +79,14 @@ class FCFusePassTRTStaticDims4Cols1Test(InferencePassTest): class FCFusePassTRTStaticDims4Cols2Test(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[3, 24, 16, 16], - dtype="float32") - fc_out1 = fluid.layers.fc(input=data, - size=32, - num_flatten_dims=2, - act="relu") + data = fluid.data( + name="data", shape=[3, 24, 16, 16], dtype="float32" + ) + fc_out1 = fluid.layers.fc( + input=data, size=32, num_flatten_dims=2, act="relu" + ) out = fluid.layers.softmax(input=fc_out1) self.feeds = { @@ -99,7 +94,8 @@ class FCFusePassTRTStaticDims4Cols2Test(InferencePassTest): } self.enable_trt = True self.trt_parameters = FCFusePassTRTStaticDims4Cols2Test.TensorRTParam( - 1 << 30, 32, 2, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, 32, 2, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [out] def test_check_output(self): @@ -111,22 +107,27 @@ class FCFusePassTRTStaticDims4Cols2Test(InferencePassTest): class FCFusePassTRTDynamicDims2Test(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data(name="data", shape=[32, 128], dtype="float32") - fc_out1 = fluid.layers.fc(input=data, - size=64, - num_flatten_dims=1, - act="relu") + fc_out1 = fluid.layers.fc( + input=data, size=64, num_flatten_dims=1, act="relu" + ) out = fluid.layers.softmax(input=fc_out1) self.feeds = {"data": np.random.random((32, 128)).astype("float32")} self.enable_trt = True self.trt_parameters = FCFusePassTRTDynamicDims2Test.TensorRTParam( - 1 << 30, 32, 2, AnalysisConfig.Precision.Float32, False, False) - self.dynamic_shape_params = FCFusePassTRTDynamicDims2Test.DynamicShapeParam( - {'data': [1, 128]}, {'data': [64, 128]}, {'data': [32, 128]}, False) + 1 << 30, 32, 2, AnalysisConfig.Precision.Float32, False, False + ) + self.dynamic_shape_params = ( + FCFusePassTRTDynamicDims2Test.DynamicShapeParam( + {'data': [1, 128]}, + {'data': [64, 128]}, + {'data': [32, 128]}, + False, + ) + ) self.fetch_list = [out] def test_check_output(self): @@ -138,23 +139,27 @@ class FCFusePassTRTDynamicDims2Test(InferencePassTest): class FCFusePassTRTDynamicDims3Cols1Test(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data(name="data", shape=[32, 128, 32], dtype="float32") - fc_out1 = fluid.layers.fc(input=data, - size=64, - num_flatten_dims=1, - act="relu") + fc_out1 = fluid.layers.fc( + input=data, size=64, num_flatten_dims=1, act="relu" + ) out = fluid.layers.softmax(input=fc_out1) self.feeds = {"data": np.random.random((32, 128, 32)).astype("float32")} self.enable_trt = True self.trt_parameters = FCFusePassTRTDynamicDims3Cols1Test.TensorRTParam( - 1 << 30, 32, 2, AnalysisConfig.Precision.Float32, False, False) - self.dynamic_shape_params = FCFusePassTRTDynamicDims3Cols1Test.DynamicShapeParam( - {'data': [1, 128, 32]}, {'data': [64, 128, 32]}, - {'data': [32, 128, 32]}, False) + 1 << 30, 32, 2, AnalysisConfig.Precision.Float32, False, False + ) + self.dynamic_shape_params = ( + FCFusePassTRTDynamicDims3Cols1Test.DynamicShapeParam( + {'data': [1, 128, 32]}, + {'data': [64, 128, 32]}, + {'data': [32, 128, 32]}, + False, + ) + ) self.fetch_list = [out] def test_check_output(self): @@ -166,23 +171,27 @@ class FCFusePassTRTDynamicDims3Cols1Test(InferencePassTest): class FCFusePassTRTDynamicDims3Cols2Test(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data(name="data", shape=[32, 128, 32], dtype="float32") - fc_out1 = fluid.layers.fc(input=data, - size=64, - num_flatten_dims=2, - act="relu") + fc_out1 = fluid.layers.fc( + input=data, size=64, num_flatten_dims=2, act="relu" + ) out = fluid.layers.softmax(input=fc_out1) self.feeds = {"data": np.random.random((32, 128, 32)).astype("float32")} self.enable_trt = True self.trt_parameters = FCFusePassTRTDynamicDims3Cols2Test.TensorRTParam( - 1 << 30, 32, 2, AnalysisConfig.Precision.Float32, False, False) - self.dynamic_shape_params = FCFusePassTRTDynamicDims3Cols2Test.DynamicShapeParam( - {'data': [1, 32, 32]}, {'data': [64, 256, 32]}, - {'data': [32, 128, 32]}, False) + 1 << 30, 32, 2, AnalysisConfig.Precision.Float32, False, False + ) + self.dynamic_shape_params = ( + FCFusePassTRTDynamicDims3Cols2Test.DynamicShapeParam( + {'data': [1, 32, 32]}, + {'data': [64, 256, 32]}, + {'data': [32, 128, 32]}, + False, + ) + ) self.fetch_list = [out] def test_check_output(self): @@ -194,16 +203,14 @@ class FCFusePassTRTDynamicDims3Cols2Test(InferencePassTest): class FCFusePassTRTDynamicDims4Cols1Test(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[32, 12, 4, 6], - dtype="float32") - fc_out1 = fluid.layers.fc(input=data, - size=64, - num_flatten_dims=1, - act="relu") + data = fluid.data( + name="data", shape=[32, 12, 4, 6], dtype="float32" + ) + fc_out1 = fluid.layers.fc( + input=data, size=64, num_flatten_dims=1, act="relu" + ) out = fluid.layers.softmax(input=fc_out1) self.feeds = { @@ -211,10 +218,16 @@ class FCFusePassTRTDynamicDims4Cols1Test(InferencePassTest): } self.enable_trt = True self.trt_parameters = FCFusePassTRTDynamicDims4Cols1Test.TensorRTParam( - 1 << 30, 32, 2, AnalysisConfig.Precision.Float32, False, False) - self.dynamic_shape_params = FCFusePassTRTDynamicDims4Cols1Test.DynamicShapeParam( - {'data': [1, 12, 4, 6]}, {'data': [64, 12, 4, 6]}, - {'data': [32, 12, 4, 6]}, False) + 1 << 30, 32, 2, AnalysisConfig.Precision.Float32, False, False + ) + self.dynamic_shape_params = ( + FCFusePassTRTDynamicDims4Cols1Test.DynamicShapeParam( + {'data': [1, 12, 4, 6]}, + {'data': [64, 12, 4, 6]}, + {'data': [32, 12, 4, 6]}, + False, + ) + ) self.fetch_list = [out] def test_check_output(self): @@ -226,16 +239,14 @@ class FCFusePassTRTDynamicDims4Cols1Test(InferencePassTest): class FCFusePassTRTDynamicDims4Cols2Test(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[32, 128, 32, 32], - dtype="float32") - fc_out1 = fluid.layers.fc(input=data, - size=64, - num_flatten_dims=2, - act="relu") + data = fluid.data( + name="data", shape=[32, 128, 32, 32], dtype="float32" + ) + fc_out1 = fluid.layers.fc( + input=data, size=64, num_flatten_dims=2, act="relu" + ) out = fluid.layers.softmax(input=fc_out1) self.feeds = { @@ -243,10 +254,16 @@ class FCFusePassTRTDynamicDims4Cols2Test(InferencePassTest): } self.enable_trt = True self.trt_parameters = FCFusePassTRTDynamicDims4Cols2Test.TensorRTParam( - 1 << 30, 32, 2, AnalysisConfig.Precision.Float32, False, False) - self.dynamic_shape_params = FCFusePassTRTDynamicDims4Cols2Test.DynamicShapeParam( - {'data': [1, 64, 32, 32]}, {'data': [64, 256, 32, 32]}, - {'data': [32, 128, 32, 32]}, False) + 1 << 30, 32, 2, AnalysisConfig.Precision.Float32, False, False + ) + self.dynamic_shape_params = ( + FCFusePassTRTDynamicDims4Cols2Test.DynamicShapeParam( + {'data': [1, 64, 32, 32]}, + {'data': [64, 256, 32, 32]}, + {'data': [32, 128, 32, 32]}, + False, + ) + ) self.fetch_list = [out] def test_check_output(self): @@ -258,16 +275,14 @@ class FCFusePassTRTDynamicDims4Cols2Test(InferencePassTest): class FCFusePassTRTDynamicDims4Cols3Test(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[32, 128, 32, 32], - dtype="float32") - fc_out1 = fluid.layers.fc(input=data, - size=64, - num_flatten_dims=3, - act="relu") + data = fluid.data( + name="data", shape=[32, 128, 32, 32], dtype="float32" + ) + fc_out1 = fluid.layers.fc( + input=data, size=64, num_flatten_dims=3, act="relu" + ) out = fluid.layers.softmax(input=fc_out1) self.feeds = { @@ -275,10 +290,16 @@ class FCFusePassTRTDynamicDims4Cols3Test(InferencePassTest): } self.enable_trt = True self.trt_parameters = FCFusePassTRTDynamicDims4Cols3Test.TensorRTParam( - 1 << 30, 32, 2, AnalysisConfig.Precision.Float32, False, False) - self.dynamic_shape_params = FCFusePassTRTDynamicDims4Cols3Test.DynamicShapeParam( - {'data': [1, 128, 32, 32]}, {'data': [64, 128, 32, 32]}, - {'data': [32, 128, 32, 32]}, False) + 1 << 30, 32, 2, AnalysisConfig.Precision.Float32, False, False + ) + self.dynamic_shape_params = ( + FCFusePassTRTDynamicDims4Cols3Test.DynamicShapeParam( + {'data': [1, 128, 32, 32]}, + {'data': [64, 128, 32, 32]}, + {'data': [32, 128, 32, 32]}, + False, + ) + ) self.fetch_list = [out] def test_check_output(self): diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_fc_fuse_quant_dequant_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_fc_fuse_quant_dequant_pass.py index 847c836972feaacf24c9206f1d10c9f9a1d63474..179dcd140d8a2c15569ceba9d4a68a04fc038818 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_fc_fuse_quant_dequant_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_fc_fuse_quant_dequant_pass.py @@ -23,19 +23,19 @@ from paddle.fluid.core import PassVersionChecker class FCQuantDequantFusePassTRTDims3Cols1Test(QuantDequantTest): - def setUp(self): - def network(): - self.data = fluid.data(name='data', - shape=[1, 28, 28], - dtype='float32') + self.data = fluid.data( + name='data', shape=[1, 28, 28], dtype='float32' + ) self.label = fluid.data(name='label', shape=[1, 1], dtype='int64') - fc_out = fluid.layers.fc(input=self.data, - size=10, - num_flatten_dims=1, - bias_attr=False, - act="relu") + fc_out = fluid.layers.fc( + input=self.data, + size=10, + num_flatten_dims=1, + bias_attr=False, + act="relu", + ) result = fluid.layers.relu(fc_out) loss = fluid.layers.cross_entropy(input=result, label=self.label) avg_loss = paddle.mean(loss) @@ -44,62 +44,64 @@ class FCQuantDequantFusePassTRTDims3Cols1Test(QuantDequantTest): self.main_program.random_seed = 2 self.startup_program.random_seed = 2 self.test_main_program.random_seed = 2 - #self.test_startup_program.random_seed = 2 + # self.test_startup_program.random_seed = 2 with fluid.unique_name.guard(): with fluid.program_guard(self.main_program, self.startup_program): self.loss, result = network() opt = fluid.optimizer.Adam(learning_rate=0.0001) opt.minimize(self.loss) with fluid.unique_name.guard(): - with fluid.program_guard(self.test_main_program, - self.startup_program): + with fluid.program_guard( + self.test_main_program, self.startup_program + ): network() self.feeds = {"data": np.random.random((1, 28, 28)).astype("float32")} self.fetch_list = [result] self.enable_trt = True - self.trt_parameters = FCQuantDequantFusePassTRTDims3Cols1Test.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Int8, False, False) - self.dynamic_shape_params = FCQuantDequantFusePassTRTDims3Cols1Test.DynamicShapeParam( - { - 'data': [1, 28, 28], - 'reshape2_1.tmp_0': [1, 1, 10] - }, { - 'data': [2, 28, 28], - 'reshape2_1.tmp_0': [2, 1, 10] - }, { - 'data': [1, 28, 28], - 'reshape2_1.tmp_0': [1, 1, 10] - }, False) + self.trt_parameters = ( + FCQuantDequantFusePassTRTDims3Cols1Test.TensorRTParam( + 1 << 30, 32, 0, AnalysisConfig.Precision.Int8, False, False + ) + ) + self.dynamic_shape_params = ( + FCQuantDequantFusePassTRTDims3Cols1Test.DynamicShapeParam( + {'data': [1, 28, 28], 'reshape2_1.tmp_0': [1, 1, 10]}, + {'data': [2, 28, 28], 'reshape2_1.tmp_0': [2, 1, 10]}, + {'data': [1, 28, 28], 'reshape2_1.tmp_0': [1, 1, 10]}, + False, + ) + ) self.activation_quantize_type = 'moving_average_abs_max' self.weight_quantize_type = 'channel_wise_abs_max' def test_check_output(self): - #self.quant_dequant() + # self.quant_dequant() if core.is_compiled_with_cuda(): use_gpu = True - self.check_output_with_option(use_gpu, - atol=1e-2, - flatten=False, - rtol=1e-2) + self.check_output_with_option( + use_gpu, atol=1e-2, flatten=False, rtol=1e-2 + ) self.assertTrue( PassVersionChecker.IsCompatible( - 'quant_conv2d_dequant_fuse_pass')) + 'quant_conv2d_dequant_fuse_pass' + ) + ) class FCQuantDequantFusePassTRTDims3Cols2Test(QuantDequantTest): - def setUp(self): - def network(): - self.data = fluid.data(name='data', - shape=[1, 28, 28], - dtype='float32') + self.data = fluid.data( + name='data', shape=[1, 28, 28], dtype='float32' + ) self.label = fluid.data(name='label', shape=[1, 1], dtype='int64') - fc_out = fluid.layers.fc(input=self.data, - size=28, - num_flatten_dims=2, - bias_attr=False, - act=None) + fc_out = fluid.layers.fc( + input=self.data, + size=28, + num_flatten_dims=2, + bias_attr=False, + act=None, + ) c_out = fluid.layers.reshape(fc_out, shape=[0, 784]) result = fluid.layers.relu(c_out) loss = fluid.layers.cross_entropy(input=result, label=self.label) @@ -109,64 +111,66 @@ class FCQuantDequantFusePassTRTDims3Cols2Test(QuantDequantTest): self.main_program.random_seed = 2 self.startup_program.random_seed = 2 self.test_main_program.random_seed = 2 - #self.test_startup_program.random_seed = 2 + # self.test_startup_program.random_seed = 2 with fluid.unique_name.guard(): with fluid.program_guard(self.main_program, self.startup_program): self.loss, result = network() opt = fluid.optimizer.Adam(learning_rate=0.0001) opt.minimize(self.loss) with fluid.unique_name.guard(): - with fluid.program_guard(self.test_main_program, - self.startup_program): + with fluid.program_guard( + self.test_main_program, self.startup_program + ): network() self.feeds = {"data": np.random.random((1, 28, 28)).astype("float32")} self.fetch_list = [result] self.enable_trt = True - self.trt_parameters = FCQuantDequantFusePassTRTDims3Cols2Test.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Int8, False, False) - self.dynamic_shape_params = FCQuantDequantFusePassTRTDims3Cols2Test.DynamicShapeParam( - { - 'data': [1, 28, 28], - 'reshape2_0.tmp_0': [1, 784] - }, { - 'data': [4, 28, 28], - 'reshape2_0.tmp_0': [4, 784] - }, { - 'data': [1, 28, 28], - 'reshape2_0.tmp_0': [1, 784] - }, False) + self.trt_parameters = ( + FCQuantDequantFusePassTRTDims3Cols2Test.TensorRTParam( + 1 << 30, 32, 0, AnalysisConfig.Precision.Int8, False, False + ) + ) + self.dynamic_shape_params = ( + FCQuantDequantFusePassTRTDims3Cols2Test.DynamicShapeParam( + {'data': [1, 28, 28], 'reshape2_0.tmp_0': [1, 784]}, + {'data': [4, 28, 28], 'reshape2_0.tmp_0': [4, 784]}, + {'data': [1, 28, 28], 'reshape2_0.tmp_0': [1, 784]}, + False, + ) + ) self.activation_quantize_type = 'moving_average_abs_max' self.weight_quantize_type = 'channel_wise_abs_max' def test_check_output(self): - #self.quant_dequant() + # self.quant_dequant() if core.is_compiled_with_cuda(): use_gpu = True - self.check_output_with_option(use_gpu, - atol=1e-1, - flatten=False, - rtol=1e-1) + self.check_output_with_option( + use_gpu, atol=1e-1, flatten=False, rtol=1e-1 + ) self.assertTrue( PassVersionChecker.IsCompatible( - 'quant_conv2d_dequant_fuse_pass')) + 'quant_conv2d_dequant_fuse_pass' + ) + ) class FCQuantDequantFusePassTRTDims3Cols3Test(QuantDequantTest): - def setUp(self): - def network(): - self.data = fluid.data(name='data', - shape=[1, 28, 28], - dtype='float32') + self.data = fluid.data( + name='data', shape=[1, 28, 28], dtype='float32' + ) self.label = fluid.data(name='label', shape=[1, 1], dtype='int64') label_shape = fluid.layers.reshape(self.label, shape=[1, 1, 1]) reshape_out = fluid.layers.reshape(self.data, shape=[1, 14, 14, 4]) - fc_out = fluid.layers.fc(input=reshape_out, - size=14, - num_flatten_dims=3, - bias_attr=False, - act=None) + fc_out = fluid.layers.fc( + input=reshape_out, + size=14, + num_flatten_dims=3, + bias_attr=False, + act=None, + ) c_out = fluid.layers.reshape(fc_out, shape=[1, 1, 2744]) result = fluid.layers.relu(c_out) loss = fluid.layers.cross_entropy(input=result, label=label_shape) @@ -176,49 +180,60 @@ class FCQuantDequantFusePassTRTDims3Cols3Test(QuantDequantTest): self.main_program.random_seed = 2 self.startup_program.random_seed = 2 self.test_main_program.random_seed = 2 - #self.test_startup_program.random_seed = 2 + # self.test_startup_program.random_seed = 2 with fluid.unique_name.guard(): with fluid.program_guard(self.main_program, self.startup_program): self.loss, result = network() opt = fluid.optimizer.Adam(learning_rate=0.0001) opt.minimize(self.loss) with fluid.unique_name.guard(): - with fluid.program_guard(self.test_main_program, - self.startup_program): + with fluid.program_guard( + self.test_main_program, self.startup_program + ): network() self.feeds = {"data": np.random.random((1, 28, 28)).astype("float32")} self.fetch_list = [result] self.enable_trt = True - self.trt_parameters = FCQuantDequantFusePassTRTDims3Cols3Test.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Int8, False, False) - self.dynamic_shape_params = FCQuantDequantFusePassTRTDims3Cols3Test.DynamicShapeParam( - { - 'data': [1, 28, 28], - "reshape2_1.tmp_0": [1, 14, 14, 4], - "reshape2_2.tmp_0": [1, 1, 2744] - }, { - 'data': [4, 28, 28], - "reshape2_1.tmp_0": [4, 14, 14, 4], - "reshape2_2.tmp_0": [4, 1, 2744] - }, { - 'data': [1, 28, 28], - "reshape2_1.tmp_0": [1, 14, 14, 4], - "reshape2_2.tmp_0": [1, 1, 2744] - }, False) + self.trt_parameters = ( + FCQuantDequantFusePassTRTDims3Cols3Test.TensorRTParam( + 1 << 30, 32, 0, AnalysisConfig.Precision.Int8, False, False + ) + ) + self.dynamic_shape_params = ( + FCQuantDequantFusePassTRTDims3Cols3Test.DynamicShapeParam( + { + 'data': [1, 28, 28], + "reshape2_1.tmp_0": [1, 14, 14, 4], + "reshape2_2.tmp_0": [1, 1, 2744], + }, + { + 'data': [4, 28, 28], + "reshape2_1.tmp_0": [4, 14, 14, 4], + "reshape2_2.tmp_0": [4, 1, 2744], + }, + { + 'data': [1, 28, 28], + "reshape2_1.tmp_0": [1, 14, 14, 4], + "reshape2_2.tmp_0": [1, 1, 2744], + }, + False, + ) + ) self.activation_quantize_type = 'moving_average_abs_max' self.weight_quantize_type = 'channel_wise_abs_max' def test_check_output(self): - #self.quant_dequant() + # self.quant_dequant() if core.is_compiled_with_cuda(): use_gpu = True - self.check_output_with_option(use_gpu, - atol=1e0, - flatten=False, - rtol=1e0) + self.check_output_with_option( + use_gpu, atol=1e0, flatten=False, rtol=1e0 + ) self.assertTrue( PassVersionChecker.IsCompatible( - 'quant_conv2d_dequant_fuse_pass')) + 'quant_conv2d_dequant_fuse_pass' + ) + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_flatten2_matmul_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_flatten2_matmul_fuse_pass.py index 698eae07ccf8e54ff6093ebf1037749b7e3ef104..b163d429e4778e7c83b7f8346f36192a7efdab45 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_flatten2_matmul_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_flatten2_matmul_fuse_pass.py @@ -42,7 +42,8 @@ class TestFlatten2MatmulFusePass(PassAutoScanTest): min_subgraph_size=0, precision_mode=paddle_infer.PrecisionType.Float32, use_static=False, - use_calib_mode=False) + use_calib_mode=False, + ) yield config, ['mul', 'elementwise_add'], (1e-4, 1e-1) def add_ignore_pass_case(self): @@ -65,9 +66,10 @@ class TestFlatten2MatmulFusePass(PassAutoScanTest): def sample_program_config(self, draw): # 1. Generate shape and attr of flatten2 x_shape = draw( - st.lists(st.integers(min_value=1, max_value=10), - min_size=4, - max_size=4)) + st.lists( + st.integers(min_value=1, max_value=10), min_size=4, max_size=4 + ) + ) # [a, b, c, d] => [a, b*c*d] flatten_axis = 1 flatten_shape = [x_shape[0], x_shape[1] * x_shape[2] * x_shape[3]] @@ -79,9 +81,10 @@ class TestFlatten2MatmulFusePass(PassAutoScanTest): # 3. Generate legal shape of input:Y of matmul y_shape = draw( - st.lists(st.integers(min_value=1, max_value=8), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=8), min_size=2, max_size=2 + ) + ) y_shape[0] = flatten_shape[1] # 4. Generate legal attr:axis of elementwise_add @@ -98,17 +101,11 @@ class TestFlatten2MatmulFusePass(PassAutoScanTest): "X": ["flatten2_x"], }, axis=flatten_axis, - outputs={ - "Out": ["flatten2_out"], - "XShape": ["xshape"] - }, + outputs={"Out": ["flatten2_out"], "XShape": ["xshape"]}, ) matmul_op = OpConfig( "matmul", - inputs={ - "X": ["flatten2_out"], - "Y": ["matmul_y"] - }, + inputs={"X": ["flatten2_out"], "Y": ["matmul_y"]}, outputs={"Out": ["matmul_out"]}, alpha=alpha, transpose_X=transpose_X, @@ -123,10 +120,7 @@ class TestFlatten2MatmulFusePass(PassAutoScanTest): add_op = OpConfig( "elementwise_add", - inputs={ - "X": ["matmul_out"], - "Y": ["bias"] - }, + inputs={"X": ["matmul_out"], "Y": ["bias"]}, outputs={"Out": ["add_out"]}, axis=axis, ) @@ -148,9 +142,11 @@ class TestFlatten2MatmulFusePass(PassAutoScanTest): return program_config def test(self): - self.run_and_statis(quant=False, - max_examples=25, - passes=["trt_flatten2_matmul_fuse_pass"]) + self.run_and_statis( + quant=False, + max_examples=25, + passes=["trt_flatten2_matmul_fuse_pass"], + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_flatten_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_flatten_op.py index 41c0c7621ac990afdb3b58ecd3bcac675feb8e88..6d230134a3527e45ad66ac5e17527606d28a48e1 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_flatten_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_flatten_op.py @@ -22,12 +22,11 @@ from paddle.fluid.core import AnalysisConfig class TRTFlattenTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 6, 64, 64], - dtype="float32") + data = fluid.data( + name="data", shape=[-1, 6, 64, 64], dtype="float32" + ) flatten_out = self.append_flatten(data) out = fluid.layers.batch_norm(flatten_out, is_test=True) self.feeds = { @@ -35,7 +34,8 @@ class TRTFlattenTest(InferencePassTest): } self.enable_trt = True self.trt_parameters = TRTFlattenTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [out] def append_flatten(self, data): @@ -46,16 +46,16 @@ class TRTFlattenTest(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TRTFlattenDynamicTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 6, 64, 64], - dtype="float32") + data = fluid.data( + name="data", shape=[-1, 6, 64, 64], dtype="float32" + ) flatten_out = self.append_flatten(data) out = fluid.layers.batch_norm(flatten_out, is_test=True) self.feeds = { @@ -63,18 +63,14 @@ class TRTFlattenDynamicTest(InferencePassTest): } self.enable_trt = True self.trt_parameters = TRTFlattenDynamicTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False + ) self.dynamic_shape_params = TRTFlattenDynamicTest.DynamicShapeParam( - { - 'data': [2, 6, 64, 64], - 'flatten_0.tmp_0': [2, 6 * 64 * 64] - }, { - 'data': [2, 6, 64, 64], - 'flatten_0.tmp_0': [2, 6 * 64 * 64] - }, { - 'data': [2, 6, 64, 64], - 'flatten_0.tmp_0': [2, 6 * 64 * 64] - }, False) + {'data': [2, 6, 64, 64], 'flatten_0.tmp_0': [2, 6 * 64 * 64]}, + {'data': [2, 6, 64, 64], 'flatten_0.tmp_0': [2, 6 * 64 * 64]}, + {'data': [2, 6, 64, 64], 'flatten_0.tmp_0': [2, 6 * 64 * 64]}, + False, + ) self.fetch_list = [out] def append_flatten(self, data): @@ -85,7 +81,8 @@ class TRTFlattenDynamicTest(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_gather_nd_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_gather_nd_op.py index 2f8a711287cd29a83ab2e1b3e6f32c2806885e9e..75dbf5477b03412d5edd61669b471eebf0bbcaa7 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_gather_nd_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_gather_nd_op.py @@ -22,7 +22,6 @@ from paddle.fluid.core import AnalysisConfig class TRTGatherNdTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data(name="data", shape=[-1, 3, 4], dtype="float32") @@ -32,40 +31,37 @@ class TRTGatherNdTest(InferencePassTest): self.feeds = { "data": np.random.random([2, 3, 4]).astype("float32"), - "index": np.array([[[0, 1], [1, 0]], [[1, 2], - [0, 1]]]).astype("int32"), + "index": np.array([[[0, 1], [1, 0]], [[1, 2], [0, 1]]]).astype( + "int32" + ), } self.enable_trt = True self.trt_parameters = TRTGatherNdTest.TensorRTParam( - 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [out] self.dynamic_shape_params = TRTGatherNdTest.DynamicShapeParam( - { - 'data': [1, 3, 4], - 'index': [1, 2, 2] - }, { - 'data': [3, 3, 4], - 'index': [3, 2, 2] - }, { - 'data': [3, 3, 4], - 'index': [3, 2, 2] - }, False) + {'data': [1, 3, 4], 'index': [1, 2, 2]}, + {'data': [3, 3, 4], 'index': [3, 2, 2]}, + {'data': [3, 3, 4], 'index': [3, 2, 2]}, + False, + ) def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TRTGatherNdFp16Test(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 1280, 192], - dtype="float32") + data = fluid.data( + name="data", shape=[-1, 1280, 192], dtype="float32" + ) index = fluid.data(name="index", shape=[-1, 1028, 2], dtype="int32") gather_nd = fluid.layers.gather_nd(data, index) out = fluid.layers.batch_norm(gather_nd, is_test=True) @@ -77,26 +73,23 @@ class TRTGatherNdFp16Test(InferencePassTest): } self.enable_trt = True self.trt_parameters = TRTGatherNdFp16Test.TensorRTParam( - 1 << 30, 32, 1, AnalysisConfig.Precision.Half, False, False) + 1 << 30, 32, 1, AnalysisConfig.Precision.Half, False, False + ) self.fetch_list = [out] self.dynamic_shape_params = TRTGatherNdFp16Test.DynamicShapeParam( - { - 'data': [1, 1280, 192], - 'index': [1, 1028, 2] - }, { - 'data': [3, 1280, 192], - 'index': [3, 1028, 2] - }, { - 'data': [3, 1280, 192], - 'index': [3, 1028, 2] - }, False) + {'data': [1, 1280, 192], 'index': [1, 1028, 2]}, + {'data': [3, 1280, 192], 'index': [3, 1028, 2]}, + {'data': [3, 1280, 192], 'index': [3, 1028, 2]}, + False, + ) def test_check_output(self, atol=1e-3): if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_gather_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_gather_op.py index c765d27f0bc1f9181d323b3ec3d1ef04608e91a1..72c62ff4127a6b5a83238296d7f51f39841e2292 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_gather_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_gather_op.py @@ -22,7 +22,6 @@ from paddle.fluid.core import AnalysisConfig class TRTGatherTest1(InferencePassTest): - def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): @@ -33,23 +32,19 @@ class TRTGatherTest1(InferencePassTest): self.feeds = { "data": np.random.random([self.bs, 128]).astype("float32"), - "index": self.index + "index": self.index, } self.enable_trt = True self.trt_parameters = TRTGatherTest1.TensorRTParam( - 1 << 30, self.bs, 1, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, self.bs, 1, AnalysisConfig.Precision.Float32, False, False + ) self.dynamic_shape_params = TRTGatherTest1.DynamicShapeParam( - { - 'data': [1, 1], - 'index': [1, 1] - }, { - 'data': [32, 128], - 'index': [3, 1] - }, { - 'data': [32, 128], - 'index': [3, 1] - }, False) + {'data': [1, 1], 'index': [1, 1]}, + {'data': [32, 128], 'index': [3, 1]}, + {'data': [32, 128], 'index': [3, 1]}, + False, + ) self.fetch_list = [out] def set_params(self): @@ -61,11 +56,11 @@ class TRTGatherTest1(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu, flatten=False) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TRTGatherTest2(InferencePassTest): - def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): @@ -76,23 +71,19 @@ class TRTGatherTest2(InferencePassTest): self.feeds = { "data": np.random.random([self.bs, 64]).astype("float32"), - "index": self.index + "index": self.index, } self.enable_trt = True self.trt_parameters = TRTGatherTest2.TensorRTParam( - 1 << 30, self.bs, 1, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, self.bs, 1, AnalysisConfig.Precision.Float32, False, False + ) self.dynamic_shape_params = TRTGatherTest2.DynamicShapeParam( - { - 'data': [2, 4], - 'index': [1] - }, { - 'data': [256, 256], - 'index': [4] - }, { - 'data': [64, 32], - 'index': [2] - }, False) + {'data': [2, 4], 'index': [1]}, + {'data': [256, 256], 'index': [4]}, + {'data': [64, 32], 'index': [2]}, + False, + ) self.fetch_list = [out] def set_params(self): @@ -104,7 +95,8 @@ class TRTGatherTest2(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu, flatten=False) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_inspector.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_inspector.py index 2aba345ffc6863877d61cb8eddab8a84173d4ef0..cab331aaa3c91e691e95386e654d39597fe71efd 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_inspector.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_inspector.py @@ -24,16 +24,17 @@ import subprocess class TensorRTInspectorTest(InferencePassTest): - def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data(name="data", shape=[1, 16, 16], dtype="float32") - matmul_out = fluid.layers.matmul(x=data, - y=data, - transpose_x=self.transpose_x, - transpose_y=self.transpose_y, - alpha=self.alpha) + matmul_out = fluid.layers.matmul( + x=data, + y=data, + transpose_x=self.transpose_x, + transpose_y=self.transpose_y, + alpha=self.alpha, + ) out = fluid.layers.batch_norm(matmul_out, is_test=True) self.feeds = { @@ -41,7 +42,8 @@ class TensorRTInspectorTest(InferencePassTest): } self.enable_trt = True self.trt_parameters = InferencePassTest.TensorRTParam( - 1 << 30, 1, 0, AnalysisConfig.Precision.Float32, False, False, True) + 1 << 30, 1, 0, AnalysisConfig.Precision.Float32, False, False, True + ) self.fetch_list = [out] def set_params(self): @@ -53,12 +55,16 @@ class TensorRTInspectorTest(InferencePassTest): if core.is_compiled_with_cuda(): build_engine = subprocess.run( [sys.executable, 'test_trt_inspector.py', '--build-engine'], - stderr=subprocess.PIPE) + stderr=subprocess.PIPE, + ) engine_info = build_engine.stderr.decode('ascii') trt_compile_version = paddle.inference.get_trt_compile_version() trt_runtime_version = paddle.inference.get_trt_runtime_version() valid_version = (8, 2, 0) - if trt_compile_version >= valid_version and trt_runtime_version >= valid_version: + if ( + trt_compile_version >= valid_version + and trt_runtime_version >= valid_version + ): self.assertTrue('====== engine info ======' in engine_info) self.assertTrue('====== engine info end ======' in engine_info) self.assertTrue('matmul' in engine_info) @@ -66,8 +72,9 @@ class TensorRTInspectorTest(InferencePassTest): self.assertTrue('batch_norm' in engine_info) else: self.assertTrue( - 'Inspector needs TensorRT version 8.2 and after.' in - engine_info) + 'Inspector needs TensorRT version 8.2 and after.' + in engine_info + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_instance_norm_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_instance_norm_op.py index 6939e1c1a00545a62b9519945a6c5e802d819a4a..c69394d7adfa2d6010847e06ee8bdbf2ffa57398 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_instance_norm_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_instance_norm_op.py @@ -25,7 +25,6 @@ from paddle.fluid.core import AnalysisConfig class TRTInstanceNormTest(InferencePassTest): - def setUp(self): self.bs = 4 self.channel = 4 @@ -37,7 +36,8 @@ class TRTInstanceNormTest(InferencePassTest): def build(self): self.trt_parameters = InferencePassTest.TensorRTParam( - 1 << 30, self.bs, 2, self.precision, self.serialize, False) + 1 << 30, self.bs, 2, self.precision, self.serialize, False + ) with fluid.program_guard(self.main_program, self.startup_program): shape = [-1, self.channel, self.height, self.width] @@ -61,7 +61,8 @@ class TRTInstanceNormTest(InferencePassTest): atol = 2e-2 self.check_output_with_option(use_gpu, atol, flatten=True) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) def run_test(self, remove_cache=False): self.build() @@ -69,12 +70,14 @@ class TRTInstanceNormTest(InferencePassTest): def run_all_tests(self): precision_opt = [ - AnalysisConfig.Precision.Float32, AnalysisConfig.Precision.Half + AnalysisConfig.Precision.Float32, + AnalysisConfig.Precision.Half, ] serialize_opt = [False, True] - for precision, serialize in itertools.product(precision_opt, - serialize_opt): + for precision, serialize in itertools.product( + precision_opt, serialize_opt + ): self.precision = precision self.serialize = serialize self.run_test() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_matmul.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_matmul.py index 14b0e9fa1451d0aa4ca3af7b5a60f8169977cfb7..9e24c0f3233b7241ec0898db485fce75e1408683 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_matmul.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_matmul.py @@ -22,16 +22,17 @@ from paddle.fluid.core import AnalysisConfig class TensorRTMatMulDims2Test(InferencePassTest): - def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data(name="data", shape=[24, 24], dtype="float32") - matmul_out = fluid.layers.matmul(x=data, - y=data, - transpose_x=self.transpose_x, - transpose_y=self.transpose_y, - alpha=self.alpha) + matmul_out = fluid.layers.matmul( + x=data, + y=data, + transpose_x=self.transpose_x, + transpose_y=self.transpose_y, + alpha=self.alpha, + ) out = fluid.layers.batch_norm(matmul_out, is_test=True) self.feeds = { @@ -39,7 +40,8 @@ class TensorRTMatMulDims2Test(InferencePassTest): } self.enable_trt = True self.trt_parameters = TensorRTMatMulDims2Test.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [out] def set_params(self): @@ -52,22 +54,24 @@ class TensorRTMatMulDims2Test(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TensorRTMatMulTest(InferencePassTest): - def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 6, 24, 24], - dtype="float32") - matmul_out = fluid.layers.matmul(x=data, - y=data, - transpose_x=self.transpose_x, - transpose_y=self.transpose_y, - alpha=self.alpha) + data = fluid.data( + name="data", shape=[-1, 6, 24, 24], dtype="float32" + ) + matmul_out = fluid.layers.matmul( + x=data, + y=data, + transpose_x=self.transpose_x, + transpose_y=self.transpose_y, + alpha=self.alpha, + ) out = fluid.layers.batch_norm(matmul_out, is_test=True) self.feeds = { @@ -75,7 +79,8 @@ class TensorRTMatMulTest(InferencePassTest): } self.enable_trt = True self.trt_parameters = TensorRTMatMulTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [out] def set_params(self): @@ -88,11 +93,11 @@ class TensorRTMatMulTest(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TensorRTMatMulTransposeXTest(TensorRTMatMulTest): - def set_params(self): self.transpose_x = True self.transpose_y = False @@ -100,7 +105,6 @@ class TensorRTMatMulTransposeXTest(TensorRTMatMulTest): class TensorRTMatMulTransposeYTest(TensorRTMatMulTest): - def set_params(self): self.transpose_x = False self.transpose_y = True @@ -108,7 +112,6 @@ class TensorRTMatMulTransposeYTest(TensorRTMatMulTest): class TensorRTMatMulScaleTest(TensorRTMatMulTest): - def set_params(self): self.transpose_x = False self.transpose_y = False @@ -116,29 +119,31 @@ class TensorRTMatMulScaleTest(TensorRTMatMulTest): class TensorRTMatMulBroadcastTest(InferencePassTest): - def setUp(self): self.set_params() place = fluid.CPUPlace() with fluid.program_guard(self.main_program, self.startup_program): - data_x = fluid.data(name="data_x", - shape=[-1, 6, 24], - dtype="float32") + data_x = fluid.data( + name="data_x", shape=[-1, 6, 24], dtype="float32" + ) data_y = fluid.data(name="data_y", shape=[24, 16], dtype="float32") - matmul_out = fluid.layers.matmul(x=data_x, - y=data_y, - transpose_x=self.transpose_x, - transpose_y=self.transpose_y, - alpha=self.alpha) + matmul_out = fluid.layers.matmul( + x=data_x, + y=data_y, + transpose_x=self.transpose_x, + transpose_y=self.transpose_y, + alpha=self.alpha, + ) out = fluid.layers.batch_norm(matmul_out, is_test=True) self.feeds = { "data_x": np.ones([2, 6, 24]).astype("float32"), - "data_y": np.ones([24, 16]).astype("float32") + "data_y": np.ones([24, 16]).astype("float32"), } self.enable_trt = True self.trt_parameters = TensorRTMatMulBroadcastTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [out] def set_params(self): @@ -151,7 +156,8 @@ class TensorRTMatMulBroadcastTest(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_matmul_quant_dequant.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_matmul_quant_dequant.py index b075c65c82148c545d249dc774cacb2defedb4fd..75db2ebb221f5bf160b531f2a5557a2649a12063 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_matmul_quant_dequant.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_matmul_quant_dequant.py @@ -23,25 +23,28 @@ from paddle.fluid.core import AnalysisConfig class TensorRTMatMulQuantDequantDims3Test(QuantDequantTest): - def setUp(self): self.set_params() def network(): - self.data = fluid.data(name='data', - shape=[1, 28, 28], - dtype='float32') + self.data = fluid.data( + name='data', shape=[1, 28, 28], dtype='float32' + ) self.label = fluid.data(name='label', shape=[1, 1], dtype='int64') - matmul_out = fluid.layers.matmul(x=self.data, - y=self.data, - transpose_x=self.transpose_x, - transpose_y=self.transpose_y, - alpha=self.alpha) - fc_out = fluid.layers.fc(input=matmul_out, - size=10, - num_flatten_dims=1, - bias_attr=False, - act=None) + matmul_out = fluid.layers.matmul( + x=self.data, + y=self.data, + transpose_x=self.transpose_x, + transpose_y=self.transpose_y, + alpha=self.alpha, + ) + fc_out = fluid.layers.fc( + input=matmul_out, + size=10, + num_flatten_dims=1, + bias_attr=False, + act=None, + ) result = fluid.layers.relu(fc_out) loss = fluid.layers.cross_entropy(input=result, label=self.label) avg_loss = paddle.mean(loss) @@ -50,21 +53,23 @@ class TensorRTMatMulQuantDequantDims3Test(QuantDequantTest): self.main_program.random_seed = 2 self.startup_program.random_seed = 2 self.test_main_program.random_seed = 2 - #self.test_startup_program.random_seed = 2 + # self.test_startup_program.random_seed = 2 with fluid.unique_name.guard(): with fluid.program_guard(self.main_program, self.startup_program): self.loss, result = network() opt = fluid.optimizer.Adam(learning_rate=0.0001) opt.minimize(self.loss) with fluid.unique_name.guard(): - with fluid.program_guard(self.test_main_program, - self.startup_program): + with fluid.program_guard( + self.test_main_program, self.startup_program + ): network() self.feeds = {"data": np.random.random([1, 28, 28]).astype("float32")} self.fetch_list = [result] self.enable_trt = True self.trt_parameters = TensorRTMatMulQuantDequantDims3Test.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Int8, False, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Int8, False, False + ) self.activation_quantize_type = 'moving_average_abs_max' self.weight_quantize_type = 'channel_wise_abs_max' @@ -74,20 +79,20 @@ class TensorRTMatMulQuantDequantDims3Test(QuantDequantTest): self.alpha = 1.0 def test_check_output(self): - #self.quant_dequant() + # self.quant_dequant() if core.is_compiled_with_cuda(): use_gpu = True - self.check_output_with_option(use_gpu, - atol=1, - flatten=False, - rtol=1e-1) + self.check_output_with_option( + use_gpu, atol=1, flatten=False, rtol=1e-1 + ) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TensorRTMatMulQuantDequantDims3TransposeXTest( - TensorRTMatMulQuantDequantDims3Test): - + TensorRTMatMulQuantDequantDims3Test +): def set_params(self): self.transpose_x = True self.transpose_y = False @@ -95,8 +100,8 @@ class TensorRTMatMulQuantDequantDims3TransposeXTest( class TensorRTMatMulQuantDequantDims3TransposeYTest( - TensorRTMatMulQuantDequantDims3Test): - + TensorRTMatMulQuantDequantDims3Test +): def set_params(self): self.transpose_x = False self.transpose_y = True @@ -104,8 +109,8 @@ class TensorRTMatMulQuantDequantDims3TransposeYTest( class TensorRTMatMulQuantDequantDims3TransposeXYTest( - TensorRTMatMulQuantDequantDims3Test): - + TensorRTMatMulQuantDequantDims3Test +): def set_params(self): self.transpose_x = True self.transpose_y = True @@ -113,27 +118,30 @@ class TensorRTMatMulQuantDequantDims3TransposeXYTest( class TensorRTMatMulQuantDequantDims4Test(QuantDequantTest): - def setUp(self): self.set_params() def network(): - self.data = fluid.data(name='data', - shape=[1, 28, 28], - dtype='float32') + self.data = fluid.data( + name='data', shape=[1, 28, 28], dtype='float32' + ) self.label = fluid.data(name='label', shape=[1, 1], dtype='int64') reshape_out = fluid.layers.reshape(self.data, shape=[1, 4, 14, 14]) - matmul_out = fluid.layers.matmul(x=reshape_out, - y=reshape_out, - transpose_x=self.transpose_x, - transpose_y=self.transpose_y, - alpha=self.alpha) + matmul_out = fluid.layers.matmul( + x=reshape_out, + y=reshape_out, + transpose_x=self.transpose_x, + transpose_y=self.transpose_y, + alpha=self.alpha, + ) out = fluid.layers.batch_norm(matmul_out, is_test=True) - fc_out = fluid.layers.fc(input=matmul_out, - size=10, - num_flatten_dims=1, - bias_attr=False, - act=None) + fc_out = fluid.layers.fc( + input=matmul_out, + size=10, + num_flatten_dims=1, + bias_attr=False, + act=None, + ) result = fluid.layers.relu(fc_out) loss = fluid.layers.cross_entropy(input=result, label=self.label) avg_loss = paddle.mean(loss) @@ -142,21 +150,23 @@ class TensorRTMatMulQuantDequantDims4Test(QuantDequantTest): self.main_program.random_seed = 2 self.startup_program.random_seed = 2 self.test_main_program.random_seed = 2 - #self.test_startup_program.random_seed = 2 + # self.test_startup_program.random_seed = 2 with fluid.unique_name.guard(): with fluid.program_guard(self.main_program, self.startup_program): self.loss, result = network() opt = fluid.optimizer.Adam(learning_rate=0.0001) opt.minimize(self.loss) with fluid.unique_name.guard(): - with fluid.program_guard(self.test_main_program, - self.startup_program): + with fluid.program_guard( + self.test_main_program, self.startup_program + ): network() self.feeds = {"data": np.random.random([1, 28, 28]).astype("float32")} self.fetch_list = [result] self.enable_trt = True self.trt_parameters = TensorRTMatMulQuantDequantDims4Test.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Int8, False, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Int8, False, False + ) self.activation_quantize_type = 'moving_average_abs_max' self.weight_quantize_type = 'channel_wise_abs_max' @@ -166,20 +176,20 @@ class TensorRTMatMulQuantDequantDims4Test(QuantDequantTest): self.alpha = 1.0 def test_check_output(self): - #self.quant_dequant() + # self.quant_dequant() if core.is_compiled_with_cuda(): use_gpu = True - self.check_output_with_option(use_gpu, - atol=1, - flatten=False, - rtol=1e-1) + self.check_output_with_option( + use_gpu, atol=1, flatten=False, rtol=1e-1 + ) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TensorRTMatMulQuantDequantDims4TransposeXTest( - TensorRTMatMulQuantDequantDims4Test): - + TensorRTMatMulQuantDequantDims4Test +): def set_params(self): self.transpose_x = True self.transpose_y = False @@ -187,8 +197,8 @@ class TensorRTMatMulQuantDequantDims4TransposeXTest( class TensorRTMatMulQuantDequantDims4TransposeYTest( - TensorRTMatMulQuantDequantDims4Test): - + TensorRTMatMulQuantDequantDims4Test +): def set_params(self): self.transpose_x = False self.transpose_y = True @@ -196,8 +206,8 @@ class TensorRTMatMulQuantDequantDims4TransposeYTest( class TensorRTMatMulQuantDequantDims4TransposeXYTest( - TensorRTMatMulQuantDequantDims4Test): - + TensorRTMatMulQuantDequantDims4Test +): def set_params(self): self.transpose_x = True self.transpose_y = True @@ -205,26 +215,29 @@ class TensorRTMatMulQuantDequantDims4TransposeXYTest( class TensorRTMatMulQuantDequantDims3DynamicTest(QuantDequantTest): - def setUp(self): self.set_params() def network(): - self.data = fluid.data(name='data', - shape=[-1, 28, 28], - dtype='float32') + self.data = fluid.data( + name='data', shape=[-1, 28, 28], dtype='float32' + ) self.label = fluid.data(name='label', shape=[1, 1], dtype='int64') - matmul_out = fluid.layers.matmul(x=self.data, - y=self.data, - transpose_x=self.transpose_x, - transpose_y=self.transpose_y, - alpha=self.alpha) + matmul_out = fluid.layers.matmul( + x=self.data, + y=self.data, + transpose_x=self.transpose_x, + transpose_y=self.transpose_y, + alpha=self.alpha, + ) out = fluid.layers.batch_norm(matmul_out, is_test=True) - fc_out = fluid.layers.fc(input=matmul_out, - size=10, - num_flatten_dims=1, - bias_attr=False, - act=None) + fc_out = fluid.layers.fc( + input=matmul_out, + size=10, + num_flatten_dims=1, + bias_attr=False, + act=None, + ) result = fluid.layers.relu(fc_out) loss = fluid.layers.cross_entropy(input=result, label=self.label) avg_loss = paddle.mean(loss) @@ -233,24 +246,33 @@ class TensorRTMatMulQuantDequantDims3DynamicTest(QuantDequantTest): self.main_program.random_seed = 2 self.startup_program.random_seed = 2 self.test_main_program.random_seed = 2 - #self.test_startup_program.random_seed = 2 + # self.test_startup_program.random_seed = 2 with fluid.unique_name.guard(): with fluid.program_guard(self.main_program, self.startup_program): self.loss, result = network() opt = fluid.optimizer.Adam(learning_rate=0.0001) opt.minimize(self.loss) with fluid.unique_name.guard(): - with fluid.program_guard(self.test_main_program, - self.startup_program): + with fluid.program_guard( + self.test_main_program, self.startup_program + ): network() self.feeds = {"data": np.random.random([3, 28, 28]).astype("float32")} self.fetch_list = [result] self.enable_trt = True - self.trt_parameters = TensorRTMatMulQuantDequantDims3DynamicTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Int8, False, False) - self.dynamic_shape_params = TensorRTMatMulQuantDequantDims3DynamicTest.DynamicShapeParam( - {'data': [1, 28, 28]}, {'data': [4, 28, 28]}, {'data': [3, 28, 28]}, - False) + self.trt_parameters = ( + TensorRTMatMulQuantDequantDims3DynamicTest.TensorRTParam( + 1 << 30, 32, 0, AnalysisConfig.Precision.Int8, False, False + ) + ) + self.dynamic_shape_params = ( + TensorRTMatMulQuantDequantDims3DynamicTest.DynamicShapeParam( + {'data': [1, 28, 28]}, + {'data': [4, 28, 28]}, + {'data': [3, 28, 28]}, + False, + ) + ) self.activation_quantize_type = 'moving_average_abs_max' self.weight_quantize_type = 'channel_wise_abs_max' @@ -260,20 +282,20 @@ class TensorRTMatMulQuantDequantDims3DynamicTest(QuantDequantTest): self.alpha = 1.0 def test_check_output(self): - #self.quant_dequant() + # self.quant_dequant() if core.is_compiled_with_cuda(): use_gpu = True - self.check_output_with_option(use_gpu, - atol=1, - flatten=False, - rtol=1e-1) + self.check_output_with_option( + use_gpu, atol=1, flatten=False, rtol=1e-1 + ) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TensorRTMatMulQuantDequantDims4TransposeXDynamicTest( - TensorRTMatMulQuantDequantDims3DynamicTest): - + TensorRTMatMulQuantDequantDims3DynamicTest +): def set_params(self): self.transpose_x = True self.transpose_y = False @@ -281,8 +303,8 @@ class TensorRTMatMulQuantDequantDims4TransposeXDynamicTest( class TensorRTMatMulQuantDequantDims4TransposeYDynamicTest( - TensorRTMatMulQuantDequantDims3DynamicTest): - + TensorRTMatMulQuantDequantDims3DynamicTest +): def set_params(self): self.transpose_x = False self.transpose_y = True @@ -290,8 +312,8 @@ class TensorRTMatMulQuantDequantDims4TransposeYDynamicTest( class TensorRTMatMulQuantDequantDims4TransposeXYDynamicTest( - TensorRTMatMulQuantDequantDims3DynamicTest): - + TensorRTMatMulQuantDequantDims3DynamicTest +): def set_params(self): self.transpose_x = True self.transpose_y = True diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_multiclass_nms3_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_multiclass_nms3_op.py index 91ca32ef8994ef540d2318546123938ca5e6087f..05df3b65082b77a40a039365403c8ba2ec674e82 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_multiclass_nms3_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_multiclass_nms3_op.py @@ -24,19 +24,21 @@ from paddle.fluid.core import PassVersionChecker from paddle.fluid.core import AnalysisConfig -def multiclass_nms(bboxes, - scores, - score_threshold, - nms_top_k, - keep_top_k, - nms_threshold=0.3, - normalized=True, - nms_eta=1., - background_label=-1, - return_index=False, - return_rois_num=True, - rois_num=None, - name=None): +def multiclass_nms( + bboxes, + scores, + score_threshold, + nms_top_k, + keep_top_k, + nms_threshold=0.3, + normalized=True, + nms_eta=1.0, + background_label=-1, + return_index=False, + return_rois_num=True, + rois_num=None, + name=None, +): """ This operator is to do multi-class non maximum suppression (NMS) on boxes and scores. @@ -127,12 +129,25 @@ def multiclass_nms(bboxes, return_index=True) """ if in_dygraph_mode(): - attrs = ('background_label', background_label, 'score_threshold', - score_threshold, 'nms_top_k', nms_top_k, 'nms_threshold', - nms_threshold, 'keep_top_k', keep_top_k, 'nms_eta', nms_eta, - 'normalized', normalized) + attrs = ( + 'background_label', + background_label, + 'score_threshold', + score_threshold, + 'nms_top_k', + nms_top_k, + 'nms_threshold', + nms_threshold, + 'keep_top_k', + keep_top_k, + 'nms_eta', + nms_eta, + 'normalized', + normalized, + ) output, index, nms_rois_num = core.ops.multiclass_nms3( - bboxes, scores, rois_num, *attrs) + bboxes, scores, rois_num, *attrs + ) if not return_index: index = None return output, nms_rois_num, index @@ -150,21 +165,24 @@ def multiclass_nms(bboxes, if return_rois_num: nms_rois_num = helper.create_variable_for_type_inference( - dtype='int32') + dtype='int32' + ) outputs['NmsRoisNum'] = nms_rois_num - helper.append_op(type="multiclass_nms3", - inputs=inputs, - attrs={ - 'background_label': background_label, - 'score_threshold': score_threshold, - 'nms_top_k': nms_top_k, - 'nms_threshold': nms_threshold, - 'keep_top_k': keep_top_k, - 'nms_eta': nms_eta, - 'normalized': normalized - }, - outputs=outputs) + helper.append_op( + type="multiclass_nms3", + inputs=inputs, + attrs={ + 'background_label': background_label, + 'score_threshold': score_threshold, + 'nms_top_k': nms_top_k, + 'nms_threshold': nms_threshold, + 'keep_top_k': keep_top_k, + 'nms_eta': nms_eta, + 'normalized': normalized, + }, + outputs=outputs, + ) output.stop_gradient = True index.stop_gradient = True if not return_index: @@ -176,7 +194,6 @@ def multiclass_nms(bboxes, class TensorRTMultiClassNMS3Test(InferencePassTest): - def setUp(self): self.enable_trt = True self.enable_tensorrt_varseqlen = True @@ -184,25 +201,28 @@ class TensorRTMultiClassNMS3Test(InferencePassTest): self.serialize = False self.bs = 1 self.background_label = -1 - self.score_threshold = .5 + self.score_threshold = 0.5 self.nms_top_k = 8 - self.nms_threshold = .3 + self.nms_threshold = 0.3 self.keep_top_k = 8 self.normalized = False self.num_classes = 8 self.num_boxes = 8 self.nms_eta = 1.1 self.trt_parameters = InferencePassTest.TensorRTParam( - 1 << 30, self.bs, 2, self.precision, self.serialize, False) + 1 << 30, self.bs, 2, self.precision, self.serialize, False + ) def build(self): with fluid.program_guard(self.main_program, self.startup_program): - boxes = fluid.data(name='bboxes', - shape=[-1, self.num_boxes, 4], - dtype='float32') - scores = fluid.data(name='scores', - shape=[-1, self.num_classes, self.num_boxes], - dtype='float32') + boxes = fluid.data( + name='bboxes', shape=[-1, self.num_boxes, 4], dtype='float32' + ) + scores = fluid.data( + name='scores', + shape=[-1, self.num_classes, self.num_boxes], + dtype='float32', + ) multiclass_nms_out, _, _ = multiclass_nms( bboxes=boxes, scores=scores, @@ -212,17 +232,26 @@ class TensorRTMultiClassNMS3Test(InferencePassTest): nms_threshold=self.nms_threshold, keep_top_k=self.keep_top_k, normalized=self.normalized, - nms_eta=self.nms_eta) - mutliclass_nms_out = multiclass_nms_out + 1. + nms_eta=self.nms_eta, + ) + mutliclass_nms_out = multiclass_nms_out + 1.0 multiclass_nms_out = fluid.layers.reshape( - multiclass_nms_out, [self.bs, 1, self.keep_top_k, 6], - name='reshape') + multiclass_nms_out, + [self.bs, 1, self.keep_top_k, 6], + name='reshape', + ) out = fluid.layers.batch_norm(multiclass_nms_out, is_test=True) - boxes_data = np.arange(self.num_boxes * 4).reshape( - [self.bs, self.num_boxes, 4]).astype('float32') - scores_data = np.arange(1 * self.num_classes * self.num_boxes).reshape( - [self.bs, self.num_classes, self.num_boxes]).astype('float32') + boxes_data = ( + np.arange(self.num_boxes * 4) + .reshape([self.bs, self.num_boxes, 4]) + .astype('float32') + ) + scores_data = ( + np.arange(1 * self.num_classes * self.num_boxes) + .reshape([self.bs, self.num_classes, self.num_boxes]) + .astype('float32') + ) self.feeds = { 'bboxes': boxes_data, 'scores': scores_data, @@ -235,7 +264,8 @@ class TensorRTMultiClassNMS3Test(InferencePassTest): def run_test_all(self): precision_opt = [ - AnalysisConfig.Precision.Float32, AnalysisConfig.Precision.Half + AnalysisConfig.Precision.Float32, + AnalysisConfig.Precision.Half, ] serialize_opt = [False, True] max_shape = { @@ -246,13 +276,15 @@ class TensorRTMultiClassNMS3Test(InferencePassTest): dynamic_shape_opt = [ None, InferencePassTest.DynamicShapeParam( - { - 'bboxes': [1, 1, 4], - 'scores': [1, 1, 1] - }, max_shape, opt_shape, False) + {'bboxes': [1, 1, 4], 'scores': [1, 1, 1]}, + max_shape, + opt_shape, + False, + ), ] for precision, serialize, dynamic_shape in itertools.product( - precision_opt, serialize_opt, dynamic_shape_opt): + precision_opt, serialize_opt, dynamic_shape_opt + ): self.precision = precision self.serialize = serialize self.dynamic_shape_params = dynamic_shape @@ -264,7 +296,8 @@ class TensorRTMultiClassNMS3Test(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) def test_base(self): self.run_test() @@ -284,10 +317,11 @@ class TensorRTMultiClassNMS3Test(InferencePassTest): } opt_shape = max_shape self.dynamic_shape_params = InferencePassTest.DynamicShapeParam( - { - 'bboxes': [1, 1, 4], - 'scores': [1, 1, 1] - }, max_shape, opt_shape, False) + {'bboxes': [1, 1, 4], 'scores': [1, 1, 1]}, + max_shape, + opt_shape, + False, + ) self.run_test() def test_background(self): diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_multiclass_nms_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_multiclass_nms_op.py index 5e04241f149914630886f3dce07dd878d945c536..ead11ba7ae170e76853ed0e0876f1daa2d3c719e 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_multiclass_nms_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_multiclass_nms_op.py @@ -23,7 +23,6 @@ from paddle.fluid.core import AnalysisConfig class TensorRTMultiClassNMSTest(InferencePassTest): - def setUp(self): self.enable_trt = True self.enable_tensorrt_varseqlen = True @@ -31,24 +30,27 @@ class TensorRTMultiClassNMSTest(InferencePassTest): self.serialize = False self.bs = 1 self.background_label = -1 - self.score_threshold = .5 + self.score_threshold = 0.5 self.nms_top_k = 8 - self.nms_threshold = .3 + self.nms_threshold = 0.3 self.keep_top_k = 8 self.normalized = False self.num_classes = 8 self.num_boxes = 8 self.trt_parameters = InferencePassTest.TensorRTParam( - 1 << 30, self.bs, 2, self.precision, self.serialize, False) + 1 << 30, self.bs, 2, self.precision, self.serialize, False + ) def build(self): with fluid.program_guard(self.main_program, self.startup_program): - boxes = fluid.data(name='bboxes', - shape=[-1, self.num_boxes, 4], - dtype='float32') - scores = fluid.data(name='scores', - shape=[-1, self.num_classes, self.num_boxes], - dtype='float32') + boxes = fluid.data( + name='bboxes', shape=[-1, self.num_boxes, 4], dtype='float32' + ) + scores = fluid.data( + name='scores', + shape=[-1, self.num_classes, self.num_boxes], + dtype='float32', + ) multiclass_nms_out = fluid.layers.multiclass_nms( bboxes=boxes, scores=scores, @@ -57,17 +59,26 @@ class TensorRTMultiClassNMSTest(InferencePassTest): nms_top_k=self.nms_top_k, nms_threshold=self.nms_threshold, keep_top_k=self.keep_top_k, - normalized=self.normalized) - mutliclass_nms_out = multiclass_nms_out + 1. + normalized=self.normalized, + ) + mutliclass_nms_out = multiclass_nms_out + 1.0 multiclass_nms_out = fluid.layers.reshape( - multiclass_nms_out, [self.bs, 1, self.keep_top_k, 6], - name='reshape') + multiclass_nms_out, + [self.bs, 1, self.keep_top_k, 6], + name='reshape', + ) out = fluid.layers.batch_norm(multiclass_nms_out, is_test=True) - boxes_data = np.arange(self.num_boxes * 4).reshape( - [self.bs, self.num_boxes, 4]).astype('float32') - scores_data = np.arange(1 * self.num_classes * self.num_boxes).reshape( - [self.bs, self.num_classes, self.num_boxes]).astype('float32') + boxes_data = ( + np.arange(self.num_boxes * 4) + .reshape([self.bs, self.num_boxes, 4]) + .astype('float32') + ) + scores_data = ( + np.arange(1 * self.num_classes * self.num_boxes) + .reshape([self.bs, self.num_classes, self.num_boxes]) + .astype('float32') + ) self.feeds = { 'bboxes': boxes_data, 'scores': scores_data, @@ -80,7 +91,8 @@ class TensorRTMultiClassNMSTest(InferencePassTest): def run_test_all(self): precision_opt = [ - AnalysisConfig.Precision.Float32, AnalysisConfig.Precision.Half + AnalysisConfig.Precision.Float32, + AnalysisConfig.Precision.Half, ] serialize_opt = [False, True] max_shape = { @@ -91,13 +103,15 @@ class TensorRTMultiClassNMSTest(InferencePassTest): dynamic_shape_opt = [ None, InferencePassTest.DynamicShapeParam( - { - 'bboxes': [1, 1, 4], - 'scores': [1, 1, 1] - }, max_shape, opt_shape, False) + {'bboxes': [1, 1, 4], 'scores': [1, 1, 1]}, + max_shape, + opt_shape, + False, + ), ] for precision, serialize, dynamic_shape in itertools.product( - precision_opt, serialize_opt, dynamic_shape_opt): + precision_opt, serialize_opt, dynamic_shape_opt + ): self.precision = precision self.serialize = serialize self.dynamic_shape_params = dynamic_shape @@ -109,7 +123,8 @@ class TensorRTMultiClassNMSTest(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) def test_base(self): self.run_test() @@ -129,10 +144,11 @@ class TensorRTMultiClassNMSTest(InferencePassTest): } opt_shape = max_shape self.dynamic_shape_params = InferencePassTest.DynamicShapeParam( - { - 'bboxes': [1, 1, 4], - 'scores': [1, 1, 1] - }, max_shape, opt_shape, False) + {'bboxes': [1, 1, 4], 'scores': [1, 1, 1]}, + max_shape, + opt_shape, + False, + ) self.run_test() def test_background(self): diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_nearest_interp_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_nearest_interp_op.py index e5f5908fd0373e475368d26e135a30a990ee1d11..3582d9848d5838eb98ae612e0ed735011290b244 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_nearest_interp_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_nearest_interp_op.py @@ -22,20 +22,23 @@ from paddle.fluid.core import AnalysisConfig class TRTNearestInterpTest(InferencePassTest): - def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): if self.data_layout == 'NCHW': shape = [ - -1, self.channels, self.origin_shape[0], - self.origin_shape[1] + -1, + self.channels, + self.origin_shape[0], + self.origin_shape[1], ] else: shape = [ - -1, self.origin_shape[0], self.origin_shape[1], - self.channels + -1, + self.origin_shape[0], + self.origin_shape[1], + self.channels, ] data = fluid.data(name='data', shape=shape, dtype='float32') resize_out = self.append_nearest_interp(data) @@ -43,13 +46,17 @@ class TRTNearestInterpTest(InferencePassTest): if self.data_layout == 'NCHW': shape = [ - self.bs, self.channels, self.origin_shape[0], - self.origin_shape[1] + self.bs, + self.channels, + self.origin_shape[0], + self.origin_shape[1], ] else: shape = [ - self.bs, self.origin_shape[0], self.origin_shape[1], - self.channels + self.bs, + self.origin_shape[0], + self.origin_shape[1], + self.channels, ] self.feeds = { @@ -57,7 +64,8 @@ class TRTNearestInterpTest(InferencePassTest): } self.enable_trt = True self.trt_parameters = TRTNearestInterpTest.TensorRTParam( - 1 << 30, self.bs, 1, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, self.bs, 1, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [out] def set_params(self): @@ -71,26 +79,30 @@ class TRTNearestInterpTest(InferencePassTest): self.data_layout = 'NCHW' def append_nearest_interp(self, data): - if self.scale > 0.: - return fluid.layers.resize_nearest(data, - scale=self.scale, - align_corners=self.align_corners, - data_format=self.data_layout) - return fluid.layers.resize_nearest(data, - out_shape=self.resize_shape, - align_corners=self.align_corners, - data_format=self.data_layout) + if self.scale > 0.0: + return fluid.layers.resize_nearest( + data, + scale=self.scale, + align_corners=self.align_corners, + data_format=self.data_layout, + ) + return fluid.layers.resize_nearest( + data, + out_shape=self.resize_shape, + align_corners=self.align_corners, + data_format=self.data_layout, + ) def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TRTNearestInterpTest1(TRTNearestInterpTest): - def set_params(self): self.bs = 4 self.scale = -1 @@ -102,10 +114,9 @@ class TRTNearestInterpTest1(TRTNearestInterpTest): class TRTNearestInterpTest2(TRTNearestInterpTest): - def set_params(self): self.bs = 4 - self.scale = 2. + self.scale = 2.0 self.channels = 3 self.origin_shape = (16, 16) # HW self.resize_shape = (32, 32) # HW @@ -114,7 +125,6 @@ class TRTNearestInterpTest2(TRTNearestInterpTest): class TRTNearestInterpTest3(TRTNearestInterpTest): - def set_params(self): self.bs = 4 self.scale = 0 @@ -126,7 +136,6 @@ class TRTNearestInterpTest3(TRTNearestInterpTest): class TRTNearestInterpTest4(TRTNearestInterpTest): - def set_params(self): self.bs = 4 self.scale = -1 @@ -138,7 +147,6 @@ class TRTNearestInterpTest4(TRTNearestInterpTest): class TRTNearestInterpTest5(TRTNearestInterpTest): - def set_params(self): self.bs = 4 self.scale = -1 @@ -150,10 +158,9 @@ class TRTNearestInterpTest5(TRTNearestInterpTest): class TRTNearestInterpTest6(TRTNearestInterpTest): - def set_params(self): self.bs = 4 - self.scale = 2. + self.scale = 2.0 self.channels = 3 self.origin_shape = (16, 16) # HW self.resize_shape = (32, 32) # HW @@ -162,7 +169,6 @@ class TRTNearestInterpTest6(TRTNearestInterpTest): class TRTNearestInterpTest7(TRTNearestInterpTest): - def set_params(self): self.bs = 4 self.scale = -1 @@ -174,7 +180,6 @@ class TRTNearestInterpTest7(TRTNearestInterpTest): class TRTNearestInterpTest8(TRTNearestInterpTest): - def set_params(self): self.bs = 4 self.scale = -1 @@ -186,7 +191,6 @@ class TRTNearestInterpTest8(TRTNearestInterpTest): class TRTNearestInterpTest9(TRTNearestInterpTest): - def set_params(self): self.bs = 4 self.scale = -1 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_nearest_interp_v2_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_nearest_interp_v2_op.py index 8e6a28c483db334145b1438645584f6f598feb44..fc145fc410ad3794afae814fc2749fe9bb32c6c8 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_nearest_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_nearest_interp_v2_op.py @@ -23,20 +23,23 @@ from paddle.fluid.core import AnalysisConfig class TRTNearestInterpTest(InferencePassTest): - def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): if self.data_layout == 'NCHW': shape = [ - -1, self.channels, self.origin_shape[0], - self.origin_shape[1] + -1, + self.channels, + self.origin_shape[0], + self.origin_shape[1], ] else: shape = [ - -1, self.origin_shape[0], self.origin_shape[1], - self.channels + -1, + self.origin_shape[0], + self.origin_shape[1], + self.channels, ] data = fluid.data(name='data', shape=shape, dtype='float32') resize_out = self.append_nearest_interp(data) @@ -44,13 +47,17 @@ class TRTNearestInterpTest(InferencePassTest): if self.data_layout == 'NCHW': shape = [ - self.bs, self.channels, self.origin_shape[0], - self.origin_shape[1] + self.bs, + self.channels, + self.origin_shape[0], + self.origin_shape[1], ] else: shape = [ - self.bs, self.origin_shape[0], self.origin_shape[1], - self.channels + self.bs, + self.origin_shape[0], + self.origin_shape[1], + self.channels, ] self.feeds = { @@ -58,7 +65,8 @@ class TRTNearestInterpTest(InferencePassTest): } self.enable_trt = True self.trt_parameters = TRTNearestInterpTest.TensorRTParam( - 1 << 30, self.bs, 1, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, self.bs, 1, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [out] def set_params(self): @@ -71,31 +79,35 @@ class TRTNearestInterpTest(InferencePassTest): self.data_layout = 'NCHW' def append_nearest_interp(self, data): - if self.scale > 0.: - return F.interpolate(data, - scale_factor=self.scale, - align_corners=self.align_corners, - mode='nearest', - data_format=self.data_layout) - return F.interpolate(data, - size=self.resize_shape, - align_corners=self.align_corners, - mode='nearest', - data_format=self.data_layout) + if self.scale > 0.0: + return F.interpolate( + data, + scale_factor=self.scale, + align_corners=self.align_corners, + mode='nearest', + data_format=self.data_layout, + ) + return F.interpolate( + data, + size=self.resize_shape, + align_corners=self.align_corners, + mode='nearest', + data_format=self.data_layout, + ) def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TRTNearestInterpTest1(TRTNearestInterpTest): - def set_params(self): self.bs = 4 - self.scale = 2. + self.scale = 2.0 self.channels = 3 self.origin_shape = (16, 16) # HW self.resize_shape = (32, 32) # HW @@ -104,7 +116,6 @@ class TRTNearestInterpTest1(TRTNearestInterpTest): class TRTNearestInterpTest2(TRTNearestInterpTest): - def set_params(self): self.bs = 4 self.scale = -1 @@ -116,7 +127,6 @@ class TRTNearestInterpTest2(TRTNearestInterpTest): class TRTNearestInterpTest3(TRTNearestInterpTest): - def set_params(self): self.bs = 4 self.scale = -1 @@ -128,10 +138,9 @@ class TRTNearestInterpTest3(TRTNearestInterpTest): class TRTNearestInterpTest4(TRTNearestInterpTest): - def set_params(self): self.bs = 4 - self.scale = 2. + self.scale = 2.0 self.channels = 3 self.origin_shape = (16, 16) # HW self.resize_shape = (32, 32) # HW @@ -140,7 +149,6 @@ class TRTNearestInterpTest4(TRTNearestInterpTest): class TRTNearestInterpTest5(TRTNearestInterpTest): - def set_params(self): self.bs = 4 self.scale = -1 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pad_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pad_op.py index ae6dbefa86da0c02cc4b4e1ddb7149557e7613f2..338d7d8a481075e14c5a428bc23867a82f0f5ab1 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pad_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pad_op.py @@ -21,15 +21,14 @@ from paddle.fluid.core import AnalysisConfig class PadOpTRTTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[1, 3, 128, 128], - dtype="float32") - pad_out = fluid.layers.pad(x=data, - paddings=[0, 0, 0, 0, 0, 1, 1, 2], - pad_value=0.0) + data = fluid.data( + name="data", shape=[1, 3, 128, 128], dtype="float32" + ) + pad_out = fluid.layers.pad( + x=data, paddings=[0, 0, 0, 0, 0, 1, 1, 2], pad_value=0.0 + ) out = fluid.layers.batch_norm(pad_out, is_test=True) self.feeds = { @@ -37,7 +36,8 @@ class PadOpTRTTest(InferencePassTest): } self.enable_trt = True self.trt_parameters = PadOpTRTTest.TensorRTParam( - 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [out] def test_check_output(self): diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pool3d_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pool3d_op.py index c4488a57f96057c2e2edd9773ecb699c628b8cee..7e7499a19c718e477f02c3e36943657d3bdff229 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pool3d_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pool3d_op.py @@ -26,7 +26,6 @@ from paddle.fluid.core import AnalysisConfig class TensorRTPool3dTest(InferencePassTest): - def setUp(self): self.bs = 1 self.channel = 3 @@ -44,10 +43,9 @@ class TensorRTPool3dTest(InferencePassTest): self.serialize = False self.precision = AnalysisConfig.Precision.Float32 self.feeds = { - 'data': - np.random.random( - [self.bs, self.channel, self.depth, self.height, - self.width]).astype('float32'), + 'data': np.random.random( + [self.bs, self.channel, self.depth, self.height, self.width] + ).astype('float32'), } def set_extra_config(self): @@ -56,22 +54,26 @@ class TensorRTPool3dTest(InferencePassTest): def build_network(self): self.set_extra_config() self.trt_parameters = TensorRTPool3dTest.TensorRTParam( - 1 << 30, self.bs, 0, self.precision, self.serialize, False) + 1 << 30, self.bs, 0, self.precision, self.serialize, False + ) with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name='data', shape=[-1, self.channel, self.depth, self.height, self.width], - dtype='float32') - pool_out = fluid.layers.pool3d(input=data, - pool_size=self.pool_size, - pool_type=self.pool_type, - pool_stride=self.pool_stride, - pool_padding=self.pool_padding, - global_pooling=self.global_pooling, - ceil_mode=self.ceil_mode, - exclusive=self.exclusive) - #out = fluid.layers.batch_norm(pool_out, is_test=True) + dtype='float32', + ) + pool_out = fluid.layers.pool3d( + input=data, + pool_size=self.pool_size, + pool_type=self.pool_type, + pool_stride=self.pool_stride, + pool_padding=self.pool_padding, + global_pooling=self.global_pooling, + ceil_mode=self.ceil_mode, + exclusive=self.exclusive, + ) + # out = fluid.layers.batch_norm(pool_out, is_test=True) self.fetch_list = [pool_out] def check_output(self): @@ -84,11 +86,13 @@ class TensorRTPool3dTest(InferencePassTest): elif self.precision == AnalysisConfig.Precision.Half: atol, rtol = (1e-3, 1e-3) else: - raise ValueError("Unsupported precision {}".format( - self.precision)) + raise ValueError( + "Unsupported precision {}".format(self.precision) + ) self.check_output_with_option(use_gpu, atol=atol, rtol=rtol) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) def run_test(self): self.build_network() @@ -96,30 +100,51 @@ class TensorRTPool3dTest(InferencePassTest): def test(self): precision_options = [ - AnalysisConfig.Precision.Float32, AnalysisConfig.Precision.Half + AnalysisConfig.Precision.Float32, + AnalysisConfig.Precision.Half, ] serialize_options = [False, True] dynamic_shape_profile = InferencePassTest.DynamicShapeParam( { 'data': [ - self.bs, self.channel, self.depth // 2, self.height // 2, - self.width // 2 + self.bs, + self.channel, + self.depth // 2, + self.height // 2, + self.width // 2, ] - }, { - 'data': - [self.bs, self.channel, self.depth, self.height, self.width] - }, { - 'data': - [self.bs, self.channel, self.depth, self.height, self.width] - }, False) + }, + { + 'data': [ + self.bs, + self.channel, + self.depth, + self.height, + self.width, + ] + }, + { + 'data': [ + self.bs, + self.channel, + self.depth, + self.height, + self.width, + ] + }, + False, + ) dynamic_shape_options = [None, dynamic_shape_profile] for precision, serialize, dynamic_shape in itertools.product( - precision_options, serialize_options, dynamic_shape_options): + precision_options, serialize_options, dynamic_shape_options + ): is_dynamic = True if dynamic_shape_options is not None else False with self.subTest( - 'Precision: {}, Serialize: {}, Dynamic: {}'.format( - precision, serialize, is_dynamic)): + 'Precision: {}, Serialize: {}, Dynamic: {}'.format( + precision, serialize, is_dynamic + ) + ): self.precision = precision self.serialize = serialize self.dynamic_shape_params = dynamic_shape @@ -127,7 +152,6 @@ class TensorRTPool3dTest(InferencePassTest): class TensorRTAvgPool3dTest(TensorRTPool3dTest): - def set_extra_config(self): self.pool_size = 2 self.pool_type = 'avg' @@ -139,7 +163,6 @@ class TensorRTAvgPool3dTest(TensorRTPool3dTest): class TensorRTGlobalPool3dTest(TensorRTPool3dTest): - def set_extra_config(self): self.pool_size = 2 self.pool_type = 'max' @@ -151,7 +174,6 @@ class TensorRTGlobalPool3dTest(TensorRTPool3dTest): class TensorRTCeilPool3dTest(TensorRTPool3dTest): - def set_extra_config(self): self.pool_size = 2 self.pool_type = 'max' @@ -163,7 +185,6 @@ class TensorRTCeilPool3dTest(TensorRTPool3dTest): class TensorRTExclusivePool3dTest(TensorRTPool3dTest): - def set_extra_config(self): self.pool_size = 2 self.pool_type = 'max' @@ -175,7 +196,6 @@ class TensorRTExclusivePool3dTest(TensorRTPool3dTest): class TensorRTSamePaddingPool3dTest(InferencePassTest): - def set_extra_config(self): self.pool_size = 2 self.pool_type = 'max' @@ -187,7 +207,6 @@ class TensorRTSamePaddingPool3dTest(InferencePassTest): class TensorRTValidPaddingPool3dTest(InferencePassTest): - def set_extra_config(self): self.pool_size = 2 self.pool_type = 'max' @@ -199,7 +218,6 @@ class TensorRTValidPaddingPool3dTest(InferencePassTest): class TensorRTAdaptiveAvgPool3DTest(InferencePassTest): - def setUp(self): self.bs = 1 self.channel = 3 @@ -210,24 +228,26 @@ class TensorRTAdaptiveAvgPool3DTest(InferencePassTest): self.serialize = False self.precision = AnalysisConfig.Precision.Float32 self.feeds = { - 'data': - np.random.random( - [self.bs, self.channel, self.depth, self.height, - self.width]).astype('float32'), + 'data': np.random.random( + [self.bs, self.channel, self.depth, self.height, self.width] + ).astype('float32'), } def build_network(self): self.trt_parameters = TensorRTPool3dTest.TensorRTParam( - 1 << 30, self.bs, 0, self.precision, self.serialize, False) + 1 << 30, self.bs, 0, self.precision, self.serialize, False + ) with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name='data', shape=[-1, self.channel, self.depth, self.height, self.width], - dtype='float32') + dtype='float32', + ) pool_out = paddle.nn.functional.adaptive_avg_pool3d( - x=data, output_size=[3, 3, 3]) - #out = fluid.layers.batch_norm(pool_out, is_test=True) + x=data, output_size=[3, 3, 3] + ) + # out = fluid.layers.batch_norm(pool_out, is_test=True) self.fetch_list = [pool_out] def check_output(self): @@ -237,7 +257,8 @@ class TensorRTAdaptiveAvgPool3DTest(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) def run_test(self): self.build_network() @@ -245,30 +266,51 @@ class TensorRTAdaptiveAvgPool3DTest(InferencePassTest): def test(self): precision_options = [ - AnalysisConfig.Precision.Float32, AnalysisConfig.Precision.Half + AnalysisConfig.Precision.Float32, + AnalysisConfig.Precision.Half, ] serialize_options = [False, True] dynamic_shape_profile = InferencePassTest.DynamicShapeParam( { 'data': [ - self.bs, self.channel, self.depth // 2, self.height // 2, - self.width // 2 + self.bs, + self.channel, + self.depth // 2, + self.height // 2, + self.width // 2, ] - }, { - 'data': - [self.bs, self.channel, self.depth, self.height, self.width] - }, { - 'data': - [self.bs, self.channel, self.depth, self.height, self.width] - }, False) + }, + { + 'data': [ + self.bs, + self.channel, + self.depth, + self.height, + self.width, + ] + }, + { + 'data': [ + self.bs, + self.channel, + self.depth, + self.height, + self.width, + ] + }, + False, + ) dynamic_shape_options = [None, dynamic_shape_profile] for precision, serialize, dynamic_shape in itertools.product( - precision_options, serialize_options, dynamic_shape_options): + precision_options, serialize_options, dynamic_shape_options + ): is_dynamic = True if dynamic_shape_options is not None else False with self.subTest( - 'Precision: {}, Serialize: {}, Dynamic: {}'.format( - precision, serialize, is_dynamic)): + 'Precision: {}, Serialize: {}, Dynamic: {}'.format( + precision, serialize, is_dynamic + ) + ): self.precision = precision self.serialize = serialize self.dynamic_shape_params = dynamic_shape @@ -276,7 +318,6 @@ class TensorRTAdaptiveAvgPool3DTest(InferencePassTest): class TensorRTAdaptiveMaxPool3DTest(InferencePassTest): - def setUp(self): self.bs = 1 self.channel = 3 @@ -287,24 +328,26 @@ class TensorRTAdaptiveMaxPool3DTest(InferencePassTest): self.serialize = False self.precision = AnalysisConfig.Precision.Float32 self.feeds = { - 'data': - np.random.random( - [self.bs, self.channel, self.depth, self.height, - self.width]).astype('float32'), + 'data': np.random.random( + [self.bs, self.channel, self.depth, self.height, self.width] + ).astype('float32'), } def build_network(self): self.trt_parameters = TensorRTPool3dTest.TensorRTParam( - 1 << 30, self.bs, 0, self.precision, self.serialize, False) + 1 << 30, self.bs, 0, self.precision, self.serialize, False + ) with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name='data', shape=[-1, self.channel, self.depth, self.height, self.width], - dtype='float32') + dtype='float32', + ) pool_out = paddle.nn.functional.adaptive_max_pool3d( - x=data, output_size=[3, 3, 3]) - #out = fluid.layers.batch_norm(pool_out, is_test=True) + x=data, output_size=[3, 3, 3] + ) + # out = fluid.layers.batch_norm(pool_out, is_test=True) self.fetch_list = [pool_out] def check_output(self): @@ -314,7 +357,8 @@ class TensorRTAdaptiveMaxPool3DTest(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) def run_test(self): self.build_network() @@ -322,30 +366,51 @@ class TensorRTAdaptiveMaxPool3DTest(InferencePassTest): def test(self): precision_options = [ - AnalysisConfig.Precision.Float32, AnalysisConfig.Precision.Half + AnalysisConfig.Precision.Float32, + AnalysisConfig.Precision.Half, ] serialize_options = [False, True] dynamic_shape_profile = InferencePassTest.DynamicShapeParam( { 'data': [ - self.bs, self.channel, self.depth // 2, self.height // 2, - self.width // 2 + self.bs, + self.channel, + self.depth // 2, + self.height // 2, + self.width // 2, ] - }, { - 'data': - [self.bs, self.channel, self.depth, self.height, self.width] - }, { - 'data': - [self.bs, self.channel, self.depth, self.height, self.width] - }, False) + }, + { + 'data': [ + self.bs, + self.channel, + self.depth, + self.height, + self.width, + ] + }, + { + 'data': [ + self.bs, + self.channel, + self.depth, + self.height, + self.width, + ] + }, + False, + ) dynamic_shape_options = [None, dynamic_shape_profile] for precision, serialize, dynamic_shape in itertools.product( - precision_options, serialize_options, dynamic_shape_options): + precision_options, serialize_options, dynamic_shape_options + ): is_dynamic = True if dynamic_shape_options is not None else False with self.subTest( - 'Precision: {}, Serialize: {}, Dynamic: {}'.format( - precision, serialize, is_dynamic)): + 'Precision: {}, Serialize: {}, Dynamic: {}'.format( + precision, serialize, is_dynamic + ) + ): self.precision = precision self.serialize = serialize self.dynamic_shape_params = dynamic_shape diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pool_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pool_op.py index f6eaa2fb8c75e0d1d525483ebb4b457470c85065..2ceb891024ee06a4557bb087a10c12d17c51f24e 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pool_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pool_op.py @@ -25,7 +25,6 @@ from paddle.fluid.core import AnalysisConfig class TensorRTPoolTest(InferencePassTest): - def setUp(self): self.bs = 1 self.channel = 2 @@ -42,9 +41,9 @@ class TensorRTPoolTest(InferencePassTest): self.serialize = False self.precision = AnalysisConfig.Precision.Float32 self.feeds = { - 'data': - np.random.random([self.bs, self.channel, self.height, - self.width]).astype('float32'), + 'data': np.random.random( + [self.bs, self.channel, self.height, self.width] + ).astype('float32'), } def set_extra_config(self): @@ -53,20 +52,25 @@ class TensorRTPoolTest(InferencePassTest): def build_network(self): self.set_extra_config() self.trt_parameters = TensorRTPoolTest.TensorRTParam( - 1 << 30, self.bs, 0, self.precision, self.serialize, False) + 1 << 30, self.bs, 0, self.precision, self.serialize, False + ) with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name='data', - shape=[-1, self.channel, self.height, self.width], - dtype='float32') - pool_out = fluid.layers.pool2d(input=data, - pool_size=self.pool_size, - pool_type=self.pool_type, - pool_stride=self.pool_stride, - pool_padding=self.pool_padding, - global_pooling=self.global_pooling, - ceil_mode=self.ceil_mode, - exclusive=self.exclusive) + data = fluid.data( + name='data', + shape=[-1, self.channel, self.height, self.width], + dtype='float32', + ) + pool_out = fluid.layers.pool2d( + input=data, + pool_size=self.pool_size, + pool_type=self.pool_type, + pool_stride=self.pool_stride, + pool_padding=self.pool_padding, + global_pooling=self.global_pooling, + ceil_mode=self.ceil_mode, + exclusive=self.exclusive, + ) out = fluid.layers.batch_norm(pool_out, is_test=True) self.fetch_list = [out] @@ -80,11 +84,13 @@ class TensorRTPoolTest(InferencePassTest): elif self.precision == AnalysisConfig.Precision.Half: atol, rtol = (1e-3, 1e-3) else: - raise ValueError("Unsupported precision {}".format( - self.precision)) + raise ValueError( + "Unsupported precision {}".format(self.precision) + ) self.check_output_with_option(use_gpu, atol=atol, rtol=rtol) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) def run_test(self): self.build_network() @@ -92,23 +98,34 @@ class TensorRTPoolTest(InferencePassTest): def test(self): precision_options = [ - AnalysisConfig.Precision.Float32, AnalysisConfig.Precision.Half + AnalysisConfig.Precision.Float32, + AnalysisConfig.Precision.Half, ] serialize_options = [False, True] dynamic_shape_profile = InferencePassTest.DynamicShapeParam( { - 'data': - [self.bs, self.channel, self.height // 2, self.width // 2] - }, {'data': [self.bs, self.channel, self.height, self.width]}, - {'data': [self.bs, self.channel, self.height, self.width]}, False) + 'data': [ + self.bs, + self.channel, + self.height // 2, + self.width // 2, + ] + }, + {'data': [self.bs, self.channel, self.height, self.width]}, + {'data': [self.bs, self.channel, self.height, self.width]}, + False, + ) dynamic_shape_options = [None, dynamic_shape_profile] for precision, serialize, dynamic_shape in itertools.product( - precision_options, serialize_options, dynamic_shape_options): + precision_options, serialize_options, dynamic_shape_options + ): is_dynamic = True if dynamic_shape_options is not None else False with self.subTest( - 'Precision: {}, Serialize: {}, Dynamic: {}'.format( - precision, serialize, is_dynamic)): + 'Precision: {}, Serialize: {}, Dynamic: {}'.format( + precision, serialize, is_dynamic + ) + ): self.precision = precision self.serialize = serialize self.dynamic_shape = dynamic_shape @@ -116,7 +133,6 @@ class TensorRTPoolTest(InferencePassTest): class TensorRTAvgPoolTest(TensorRTPoolTest): - def set_extra_config(self): self.pool_size = 2 self.pool_type = 'avg' @@ -128,7 +144,6 @@ class TensorRTAvgPoolTest(TensorRTPoolTest): class TensorRTAvgCeilPoolTest(TensorRTPoolTest): - def set_extra_config(self): self.pool_size = 2 self.pool_type = 'avg' @@ -140,7 +155,6 @@ class TensorRTAvgCeilPoolTest(TensorRTPoolTest): class TensorRTGlobalPoolTest(TensorRTPoolTest): - def set_extra_config(self): self.pool_size = 2 self.pool_type = 'max' @@ -152,7 +166,6 @@ class TensorRTGlobalPoolTest(TensorRTPoolTest): class TensorRTCeilPoolTest(TensorRTPoolTest): - def set_extra_config(self): self.pool_size = 2 self.pool_type = 'max' @@ -164,7 +177,6 @@ class TensorRTCeilPoolTest(TensorRTPoolTest): class TensorRTExclusivePoolTest(TensorRTPoolTest): - def set_extra_config(self): self.pool_size = 2 self.pool_type = 'max' @@ -176,7 +188,6 @@ class TensorRTExclusivePoolTest(TensorRTPoolTest): class TensorRTSamePaddingPoolTest(InferencePassTest): - def set_extra_config(self): self.pool_size = 2 self.pool_type = 'max' @@ -188,7 +199,6 @@ class TensorRTSamePaddingPoolTest(InferencePassTest): class TensorRTValidPaddingPoolTest(InferencePassTest): - def set_extra_config(self): self.pool_size = 2 self.pool_type = 'max' diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_reduce_mean_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_reduce_mean_op.py index f699c5572faa0103d617c18d9ae5c6b0a5e649d3..05621516d0b5ac5dd70a6a374c8a2de8eb072253 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_reduce_mean_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_reduce_mean_op.py @@ -22,15 +22,14 @@ from paddle.fluid.core import AnalysisConfig class TRTReduceMeanTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 3, -1, -1], - dtype="float32") - reduce_mean = fluid.layers.reduce_mean(data, - dim=[2, -1], - keep_dim=True) + data = fluid.data( + name="data", shape=[-1, 3, -1, -1], dtype="float32" + ) + reduce_mean = fluid.layers.reduce_mean( + data, dim=[2, -1], keep_dim=True + ) out = fluid.layers.batch_norm(reduce_mean, is_test=True) self.feeds = { @@ -38,27 +37,31 @@ class TRTReduceMeanTest(InferencePassTest): } self.enable_trt = True self.trt_parameters = TRTReduceMeanTest.TensorRTParam( - 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [out] self.dynamic_shape_params = TRTReduceMeanTest.DynamicShapeParam( - {'data': [1, 3, 16, 16]}, {'data': [3, 3, 56, 56]}, - {'data': [3, 3, 56, 56]}, False) + {'data': [1, 3, 16, 16]}, + {'data': [3, 3, 56, 56]}, + {'data': [3, 3, 56, 56]}, + False, + ) def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TRTReduceMeanAllNoBatchTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 3, -1, -1], - dtype="float32") + data = fluid.data( + name="data", shape=[-1, 3, -1, -1], dtype="float32" + ) reduce_mean = fluid.layers.reduce_mean(data, keep_dim=True) out = fluid.layers.batch_norm(reduce_mean, is_test=True) @@ -67,30 +70,36 @@ class TRTReduceMeanAllNoBatchTest(InferencePassTest): } self.enable_trt = True self.trt_parameters = TRTReduceMeanAllNoBatchTest.TensorRTParam( - 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [out] - self.dynamic_shape_params = TRTReduceMeanAllNoBatchTest.DynamicShapeParam( - {'data': [1, 3, 16, 16]}, {'data': [3, 3, 56, 56]}, - {'data': [3, 3, 56, 56]}, False) + self.dynamic_shape_params = ( + TRTReduceMeanAllNoBatchTest.DynamicShapeParam( + {'data': [1, 3, 16, 16]}, + {'data': [3, 3, 56, 56]}, + {'data': [3, 3, 56, 56]}, + False, + ) + ) def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TRTReduceMeanTestFP16(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 3, -1, -1], - dtype="float32") - reduce_mean = fluid.layers.reduce_mean(data, - dim=[2, -1], - keep_dim=True) + data = fluid.data( + name="data", shape=[-1, 3, -1, -1], dtype="float32" + ) + reduce_mean = fluid.layers.reduce_mean( + data, dim=[2, -1], keep_dim=True + ) out = fluid.layers.batch_norm(reduce_mean, is_test=True) self.feeds = { @@ -98,27 +107,31 @@ class TRTReduceMeanTestFP16(InferencePassTest): } self.enable_trt = True self.trt_parameters = TRTReduceMeanTestFP16.TensorRTParam( - 1 << 30, 32, 1, AnalysisConfig.Precision.Half, False, False) + 1 << 30, 32, 1, AnalysisConfig.Precision.Half, False, False + ) self.fetch_list = [out] self.dynamic_shape_params = TRTReduceMeanTestFP16.DynamicShapeParam( - {'data': [1, 3, 16, 16]}, {'data': [3, 3, 56, 56]}, - {'data': [3, 3, 56, 56]}, False) + {'data': [1, 3, 16, 16]}, + {'data': [3, 3, 56, 56]}, + {'data': [3, 3, 56, 56]}, + False, + ) def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TRTReduceMeanAllTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 3, 56, 56], - dtype="float32") + data = fluid.data( + name="data", shape=[-1, 3, 56, 56], dtype="float32" + ) reduce_mean = fluid.layers.reduce_mean(data, keep_dim=True) out = fluid.layers.batch_norm(reduce_mean, is_test=True) @@ -127,30 +140,34 @@ class TRTReduceMeanAllTest(InferencePassTest): } self.enable_trt = True self.trt_parameters = TRTReduceMeanAllTest.TensorRTParam( - 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [out] self.dynamic_shape_params = TRTReduceMeanAllTest.DynamicShapeParam( - {'data': [1, 3, 56, 56]}, {'data': [3, 3, 56, 56]}, - {'data': [3, 3, 56, 56]}, False) + {'data': [1, 3, 56, 56]}, + {'data': [3, 3, 56, 56]}, + {'data': [3, 3, 56, 56]}, + False, + ) def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TRTReduceMeanTestStatic(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[3, 3, 56, 56], - dtype="float32") - reduce_mean = fluid.layers.reduce_mean(data, - dim=[2, -1], - keep_dim=True) + data = fluid.data( + name="data", shape=[3, 3, 56, 56], dtype="float32" + ) + reduce_mean = fluid.layers.reduce_mean( + data, dim=[2, -1], keep_dim=True + ) out = fluid.layers.batch_norm(reduce_mean, is_test=True) self.feeds = { @@ -158,7 +175,8 @@ class TRTReduceMeanTestStatic(InferencePassTest): } self.enable_trt = True self.trt_parameters = TRTReduceMeanTestStatic.TensorRTParam( - 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [out] def test_check_output(self): @@ -166,16 +184,16 @@ class TRTReduceMeanTestStatic(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TRTReduceMeanStaticAllTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[4, 3, 56, 56], - dtype="float32") + data = fluid.data( + name="data", shape=[4, 3, 56, 56], dtype="float32" + ) reduce_mean = fluid.layers.reduce_mean(data, keep_dim=True) out = fluid.layers.batch_norm(reduce_mean, is_test=True) @@ -184,7 +202,8 @@ class TRTReduceMeanStaticAllTest(InferencePassTest): } self.enable_trt = True self.trt_parameters = TRTReduceMeanStaticAllTest.TensorRTParam( - 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [out] def test_check_output(self): @@ -192,16 +211,16 @@ class TRTReduceMeanStaticAllTest(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TRTReduceMeanStaticFP16(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[4, 3, 56, 56], - dtype="float32") + data = fluid.data( + name="data", shape=[4, 3, 56, 56], dtype="float32" + ) reduce_mean = fluid.layers.reduce_mean(data, keep_dim=True) out = fluid.layers.batch_norm(reduce_mean, is_test=True) @@ -210,27 +229,27 @@ class TRTReduceMeanStaticFP16(InferencePassTest): } self.enable_trt = True self.trt_parameters = TRTReduceMeanStaticFP16.TensorRTParam( - 1 << 30, 32, 1, AnalysisConfig.Precision.Half, False, False) + 1 << 30, 32, 1, AnalysisConfig.Precision.Half, False, False + ) self.fetch_list = [out] def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True - self.check_output_with_option(use_gpu, - flatten=True, - atol=1e-3, - rtol=1e-3) + self.check_output_with_option( + use_gpu, flatten=True, atol=1e-3, rtol=1e-3 + ) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TRTReduceMeanFP16Static(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[4, 3, 56, 56], - dtype="float32") + data = fluid.data( + name="data", shape=[4, 3, 56, 56], dtype="float32" + ) reduce_mean = fluid.layers.reduce_mean(data, keep_dim=True) out = fluid.layers.batch_norm(reduce_mean, is_test=True) @@ -239,18 +258,19 @@ class TRTReduceMeanFP16Static(InferencePassTest): } self.enable_trt = True self.trt_parameters = TRTReduceMeanFP16Static.TensorRTParam( - 1 << 30, 32, 1, AnalysisConfig.Precision.Half, True, False) + 1 << 30, 32, 1, AnalysisConfig.Precision.Half, True, False + ) self.fetch_list = [out] def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True - self.check_output_with_option(use_gpu, - flatten=True, - atol=1e-3, - rtol=1e-3) + self.check_output_with_option( + use_gpu, flatten=True, atol=1e-3, rtol=1e-3 + ) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_reduce_sum_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_reduce_sum_op.py index 13663676c6db135dc605e3cb49df0ddeaeced99b..09e9a652ca866ed81d0981f5ef10c1438bb9f2d4 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_reduce_sum_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_reduce_sum_op.py @@ -22,15 +22,14 @@ from paddle.fluid.core import AnalysisConfig class TRTReduceSumTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 3, 10, 192], - dtype="float32") - reduce_sum = fluid.layers.reduce_sum(data, - dim=[2, -1], - keep_dim=True) + data = fluid.data( + name="data", shape=[-1, 3, 10, 192], dtype="float32" + ) + reduce_sum = fluid.layers.reduce_sum( + data, dim=[2, -1], keep_dim=True + ) out = fluid.layers.batch_norm(reduce_sum, is_test=True) self.feeds = { @@ -38,27 +37,31 @@ class TRTReduceSumTest(InferencePassTest): } self.enable_trt = True self.trt_parameters = TRTReduceSumTest.TensorRTParam( - 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [out] self.dynamic_shape_params = TRTReduceSumTest.DynamicShapeParam( - {'data': [1, 3, 8, 8]}, {'data': [3, 3, 10, 192]}, - {'data': [3, 3, 10, 192]}, False) + {'data': [1, 3, 8, 8]}, + {'data': [3, 3, 10, 192]}, + {'data': [3, 3, 10, 192]}, + False, + ) def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TRTReduceSumAllTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 3, 10, 192], - dtype="float32") + data = fluid.data( + name="data", shape=[-1, 3, 10, 192], dtype="float32" + ) reduce_sum = fluid.layers.reduce_sum(data, keep_dim=True) out = fluid.layers.batch_norm(reduce_sum, is_test=True) @@ -67,18 +70,23 @@ class TRTReduceSumAllTest(InferencePassTest): } self.enable_trt = True self.trt_parameters = TRTReduceSumAllTest.TensorRTParam( - 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [out] self.dynamic_shape_params = TRTReduceSumAllTest.DynamicShapeParam( - {'data': [1, 3, 8, 8]}, {'data': [3, 3, 10, 192]}, - {'data': [3, 3, 10, 192]}, False) + {'data': [1, 3, 8, 8]}, + {'data': [3, 3, 10, 192]}, + {'data': [3, 3, 10, 192]}, + False, + ) def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_reshape_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_reshape_op.py index ade320796ebef1f98f3c5dd46d88b1b397365f94..075919b7bf6e357de86a514c2ce5b4241743ca59 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_reshape_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_reshape_op.py @@ -22,19 +22,20 @@ from paddle.fluid.core import AnalysisConfig class TRTReshapeTest(InferencePassTest): - def setUp(self): self.bs = 1 self.input_shape = [16, 3, 8] self.reshape = [-1, 4, 4, 24] self.data_shape = [ - self.bs, self.input_shape[0], self.input_shape[1], - self.input_shape[2] + self.bs, + self.input_shape[0], + self.input_shape[1], + self.input_shape[2], ] with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name='data', - shape=self.data_shape, - dtype='float32') + data = fluid.data( + name='data', shape=self.data_shape, dtype='float32' + ) reshape_out = self.append_reshape(data, self.reshape) out = fluid.layers.batch_norm(reshape_out, is_test=True) self.feeds = { @@ -42,7 +43,8 @@ class TRTReshapeTest(InferencePassTest): } self.enable_trt = True self.trt_parameters = TRTReshapeTest.TensorRTParam( - 1 << 30, self.bs, 1, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, self.bs, 1, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [out] def append_reshape(self, data, reshape): @@ -53,23 +55,25 @@ class TRTReshapeTest(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TRTReshapeTest1(TRTReshapeTest): - def setUp(self): self.bs = 2 self.input_shape = [23, 13, 12] self.reshape = [2, 0, -1, 6] self.data_shape = [ - self.bs, self.input_shape[0], self.input_shape[1], - self.input_shape[2] + self.bs, + self.input_shape[0], + self.input_shape[1], + self.input_shape[2], ] with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name='data', - shape=self.data_shape, - dtype='float32') + data = fluid.data( + name='data', shape=self.data_shape, dtype='float32' + ) reshape_out = self.append_reshape(data, self.reshape) out = fluid.layers.batch_norm(reshape_out, is_test=True) self.feeds = { @@ -77,24 +81,26 @@ class TRTReshapeTest1(TRTReshapeTest): } self.enable_trt = True self.trt_parameters = TRTReshapeTest.TensorRTParam( - 1 << 30, self.bs, 1, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, self.bs, 1, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [out] class TRTReshapeTest2(TRTReshapeTest): - def setUp(self): self.bs = 2 self.input_shape = [23, 13, 12] self.reshape = [2, 0, -1, 6] self.data_shape = [ - self.bs, self.input_shape[0], self.input_shape[1], - self.input_shape[2] + self.bs, + self.input_shape[0], + self.input_shape[1], + self.input_shape[2], ] with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name='data', - shape=self.data_shape, - dtype='float32') + data = fluid.data( + name='data', shape=self.data_shape, dtype='float32' + ) reshape_out = fluid.layers.reshape(x=data, shape=self.reshape) out = fluid.layers.batch_norm(reshape_out, is_test=True) self.feeds = { @@ -102,24 +108,26 @@ class TRTReshapeTest2(TRTReshapeTest): } self.enable_trt = True self.trt_parameters = TRTReshapeTest.TensorRTParam( - 1 << 30, self.bs, 1, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, self.bs, 1, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [out] class TRTReshapeTest3(TRTReshapeTest): - def setUp(self): self.bs = 1 self.input_shape = [7, 16, 27] self.reshape = [1, 8, 14, 0] self.data_shape = [ - self.bs, self.input_shape[0], self.input_shape[1], - self.input_shape[2] + self.bs, + self.input_shape[0], + self.input_shape[1], + self.input_shape[2], ] with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name='data', - shape=self.data_shape, - dtype='float32') + data = fluid.data( + name='data', shape=self.data_shape, dtype='float32' + ) bn_out = fluid.layers.batch_norm(data, is_test=True) out = self.append_reshape(bn_out, self.reshape) self.feeds = { @@ -127,7 +135,8 @@ class TRTReshapeTest3(TRTReshapeTest): } self.enable_trt = True self.trt_parameters = TRTReshapeTest.TensorRTParam( - 1 << 30, self.bs, 1, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, self.bs, 1, AnalysisConfig.Precision.Float32, False, False + ) ''' self.dynamic_shape_params = TRTReshapeTest.DynamicShapeParam({ 'data': [1, 3, 8, 8] diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_roi_align_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_roi_align_op.py index a854e74110c638fa77ebad0a9e1263ea10d8680c..3b1d75d6954484bab268da8b8fdf6b17a9ff7db3 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_roi_align_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_roi_align_op.py @@ -22,7 +22,6 @@ from paddle.fluid.core import AnalysisConfig class TRTRoiAlignTest(InferencePassTest): - def setUp(self): self.bs = 2 self.num_rois = 4 @@ -35,21 +34,27 @@ class TRTRoiAlignTest(InferencePassTest): def build(self): self.trt_parameters = TRTRoiAlignTest.TensorRTParam( - 1 << 30, self.bs * self.num_rois, 1, self.precision, self.serialize, - False) + 1 << 30, + self.bs * self.num_rois, + 1, + self.precision, + self.serialize, + False, + ) with fluid.program_guard(self.main_program, self.startup_program): data_shape = [-1, self.channel, self.height, self.width] data = fluid.data(name='data', shape=data_shape, dtype='float32') - rois = fluid.data(name='rois', - shape=[-1, 4], - dtype='float32', - lod_level=1) + rois = fluid.data( + name='rois', shape=[-1, 4], dtype='float32', lod_level=1 + ) roi_align_out = fluid.layers.roi_align(data, rois) out = fluid.layers.batch_norm(roi_align_out, is_test=True) rois_lod = fluid.create_lod_tensor( np.random.random([self.bs * self.num_rois, 4]).astype('float32'), - [[self.num_rois, self.num_rois]], fluid.CPUPlace()) + [[self.num_rois, self.num_rois]], + fluid.CPUPlace(), + ) data_shape[0] = self.bs self.feeds = { @@ -66,27 +71,38 @@ class TRTRoiAlignTest(InferencePassTest): atol = 1e-3 self.check_output_with_option(use_gpu, atol, flatten=True) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) def set_dynamic(self): min_shape_spec = dict() max_shape_spec = dict() opt_shape_spec = dict() min_shape_spec['data'] = [ - self.bs, self.channel, self.height // 2, self.width // 2 + self.bs, + self.channel, + self.height // 2, + self.width // 2, ] min_shape_spec['rois'] = [1, 4] max_shape_spec['data'] = [ - self.bs, self.channel, self.height * 2, self.width * 2 + self.bs, + self.channel, + self.height * 2, + self.width * 2, ] max_shape_spec['rois'] = [self.bs * self.num_rois, 4] opt_shape_spec['data'] = [ - self.bs, self.channel, self.height, self.width + self.bs, + self.channel, + self.height, + self.width, ] opt_shape_spec['rois'] = [self.bs * self.num_rois, 4] self.dynamic_shape_params = InferencePassTest.DynamicShapeParam( - min_shape_spec, max_shape_spec, opt_shape_spec, False) + min_shape_spec, max_shape_spec, opt_shape_spec, False + ) def run_test(self): self.build() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_scale_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_scale_op.py index 77f2170f3e28da1645c601afb061a45c6718cfaf..966e1e77e12429be4cdcb38fb02ffd0d719720f5 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_scale_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_scale_op.py @@ -22,7 +22,6 @@ from paddle.fluid.core import AnalysisConfig class TRTScaleTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data(name="data", shape=[-1, 512], dtype="float32") @@ -34,30 +33,30 @@ class TRTScaleTest(InferencePassTest): } self.enable_trt = True self.trt_parameters = TRTScaleTest.TensorRTParam( - 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [out] def append_scale(self, data): - return fluid.layers.scale(x=data, - scale=2.0, - bias=-1.0, - bias_after_scale=False) + return fluid.layers.scale( + x=data, scale=2.0, bias=-1.0, bias_after_scale=False + ) def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TRTScaleShape2Test(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 512, 512], - dtype="float32") + data = fluid.data( + name="data", shape=[-1, 512, 512], dtype="float32" + ) scale_out = self.append_scale(data) out = fluid.layers.batch_norm(scale_out, is_test=True) @@ -66,21 +65,22 @@ class TRTScaleShape2Test(InferencePassTest): } self.enable_trt = True self.trt_parameters = TRTScaleShape2Test.TensorRTParam( - 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [out] def append_scale(self, data): - return fluid.layers.scale(x=data, - scale=2.0, - bias=-1.0, - bias_after_scale=False) + return fluid.layers.scale( + x=data, scale=2.0, bias=-1.0, bias_after_scale=False + ) def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_shuffle_channel_detect_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_shuffle_channel_detect_pass.py index 0752599e76b8cb43242dd827e270f5dda2576ecc..d5bbbcde1e163efccb41cb69263ccd71d2cb0f7f 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_shuffle_channel_detect_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_shuffle_channel_detect_pass.py @@ -21,12 +21,11 @@ from paddle.fluid.core import AnalysisConfig class ShuffleChannelFuseTRTPassTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 6, 64, 64], - dtype="float32") + data = fluid.data( + name="data", shape=[-1, 6, 64, 64], dtype="float32" + ) reshape1 = fluid.layers.reshape(x=data, shape=[-1, 2, 3, 64, 64]) trans = fluid.layers.transpose(x=reshape1, perm=[0, 2, 1, 3, 4]) reshape2 = fluid.layers.reshape(x=trans, shape=[-1, 6, 64, 64]) @@ -37,7 +36,8 @@ class ShuffleChannelFuseTRTPassTest(InferencePassTest): } self.enable_trt = True self.trt_parameters = ShuffleChannelFuseTRTPassTest.TensorRTParam( - 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [out] def test_check_output(self): @@ -45,7 +45,8 @@ class ShuffleChannelFuseTRTPassTest(InferencePassTest): self.check_output() self.assertTrue( - PassVersionChecker.IsCompatible('shuffle_channel_detect_pass')) + PassVersionChecker.IsCompatible('shuffle_channel_detect_pass') + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_slice_dynamic_plugin.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_slice_dynamic_plugin.py index e4d3f5b418e0e350364927b56c6e9dde8298c9ac..e9ea9948af7957272551960389bb01dfddfae8fa 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_slice_dynamic_plugin.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_slice_dynamic_plugin.py @@ -20,9 +20,8 @@ import paddle.fluid.core as core from paddle.fluid.core import AnalysisConfig -#normal starts && ends +# normal starts && ends class SlicePluginTRTDynamicTest(InferencePassTest): - def setUpSliceParams(self): self.params_axes = [1, 3] self.params_starts = [0, 1] @@ -30,11 +29,15 @@ class SlicePluginTRTDynamicTest(InferencePassTest): def setUpTensorRTParams(self): self.trt_parameters = SlicePluginTRTDynamicTest.TensorRTParam( - 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False + ) self.enable_trt = True self.dynamic_shape_params = SlicePluginTRTDynamicTest.DynamicShapeParam( - {'data': [1, 1, 1, 1]}, {'data': [8, 8, 8, 8]}, - {'data': [8, 8, 8, 8]}, False) + {'data': [1, 1, 1, 1]}, + {'data': [8, 8, 8, 8]}, + {'data': [8, 8, 8, 8]}, + False, + ) def setUp(self): self.setUpSliceParams() @@ -44,10 +47,9 @@ class SlicePluginTRTDynamicTest(InferencePassTest): axes = self.params_axes starts = self.params_starts ends = self.params_ends - slice_out = fluid.layers.slice(data, - axes=axes, - starts=starts, - ends=ends) + slice_out = fluid.layers.slice( + data, axes=axes, starts=starts, ends=ends + ) self.feeds = { "data": np.random.random((3, 3, 3, 3)).astype("float32"), @@ -66,7 +68,6 @@ class SlicePluginTRTDynamicTest(InferencePassTest): class SlicePluginTRTDynamicBoundTest(SlicePluginTRTDynamicTest): - def setUpSliceParams(self): self.params_axes = [1, 3] self.params_starts = [0, 1] @@ -74,27 +75,40 @@ class SlicePluginTRTDynamicBoundTest(SlicePluginTRTDynamicTest): def setUpTensorRTParams(self): self.trt_parameters = SlicePluginTRTDynamicBoundTest.TensorRTParam( - 1 << 30, 32, 1, AnalysisConfig.Precision.Half, False, False) + 1 << 30, 32, 1, AnalysisConfig.Precision.Half, False, False + ) self.enable_trt = True - self.dynamic_shape_params = SlicePluginTRTDynamicBoundTest.DynamicShapeParam( - {'data': [1, 1, 1, 1]}, {'data': [8, 8, 8, 8]}, - {'data': [8, 8, 8, 8]}, False) + self.dynamic_shape_params = ( + SlicePluginTRTDynamicBoundTest.DynamicShapeParam( + {'data': [1, 1, 1, 1]}, + {'data': [8, 8, 8, 8]}, + {'data': [8, 8, 8, 8]}, + False, + ) + ) class SlicePluginTRTDynamicNegativeBoundTest(SlicePluginTRTDynamicTest): - def setUpSliceParams(self): self.params_axes = [1, 3] self.params_starts = [-5, 1] self.params_ends = [2, 1000] def setUpTensorRTParams(self): - self.trt_parameters = SlicePluginTRTDynamicNegativeBoundTest.TensorRTParam( - 1 << 30, 32, 1, AnalysisConfig.Precision.Half, False, False) + self.trt_parameters = ( + SlicePluginTRTDynamicNegativeBoundTest.TensorRTParam( + 1 << 30, 32, 1, AnalysisConfig.Precision.Half, False, False + ) + ) self.enable_trt = True - self.dynamic_shape_params = SlicePluginTRTDynamicNegativeBoundTest.DynamicShapeParam( - {'data': [1, 1, 1, 1]}, {'data': [8, 8, 8, 8]}, - {'data': [8, 8, 8, 8]}, False) + self.dynamic_shape_params = ( + SlicePluginTRTDynamicNegativeBoundTest.DynamicShapeParam( + {'data': [1, 1, 1, 1]}, + {'data': [8, 8, 8, 8]}, + {'data': [8, 8, 8, 8]}, + False, + ) + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_slice_plugin.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_slice_plugin.py index 70334d92cc4ed80bca728963541c422e0aae8460..ab93e3f647513f6ab0d2b28d7b0bcfa20c30ddfc 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_slice_plugin.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_slice_plugin.py @@ -20,9 +20,8 @@ import paddle.fluid.core as core from paddle.fluid.core import AnalysisConfig -#normal starts && ends +# normal starts && ends class SlicePluginTRTTest(InferencePassTest): - def setUpSliceParams(self): self.params_axes = [1, 3] self.params_starts = [0, 1] @@ -30,7 +29,8 @@ class SlicePluginTRTTest(InferencePassTest): def setUpTensorRTParams(self): self.trt_parameters = SlicePluginTRTTest.TensorRTParam( - 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False + ) self.enable_trt = True def setUp(self): @@ -41,10 +41,9 @@ class SlicePluginTRTTest(InferencePassTest): axes = self.params_axes starts = self.params_starts ends = self.params_ends - slice_out = fluid.layers.slice(data, - axes=axes, - starts=starts, - ends=ends) + slice_out = fluid.layers.slice( + data, axes=axes, starts=starts, ends=ends + ) out = fluid.layers.batch_norm(slice_out, is_test=True) self.feeds = { @@ -63,51 +62,48 @@ class SlicePluginTRTTest(InferencePassTest): self.check_output_with_option(use_gpu[i], atol) -#negative starts && ends +# negative starts && ends class SlicePluginTRTTestNegativeStartsAndEnds(SlicePluginTRTTest): - def setUpSliceParams(self): self.params_axes = [2, 3] self.params_starts = [-3, -2] self.params_ends = [-1, 3] -#exceeded bound starts && ends +# exceeded bound starts && ends class SlicePluginTRTTestStartsAndEndsBoundCheck(SlicePluginTRTTest): - def setUpSliceParams(self): self.params_axes = [2, 3] self.params_starts = [-5, -2] self.params_ends = [-1, 8] -#fp16 +# fp16 class SlicePluginTRTTestFp16(SlicePluginTRTTest): - def setUpTensorRTParams(self): self.trt_parameters = SlicePluginTRTTest.TensorRTParam( - 1 << 30, 32, 1, AnalysisConfig.Precision.Half, False, False) + 1 << 30, 32, 1, AnalysisConfig.Precision.Half, False, False + ) self.enable_trt = True class StaticSlicePluginTRTTestFp16(SlicePluginTRTTest): - def setUpTensorRTParams(self): self.trt_parameters = SlicePluginTRTTest.TensorRTParam( - 1 << 30, 32, 1, AnalysisConfig.Precision.Half, True, False) + 1 << 30, 32, 1, AnalysisConfig.Precision.Half, True, False + ) self.enable_trt = True class StaticSlicePluginTRTTestFp32(SlicePluginTRTTest): - def setUpTensorRTParams(self): self.trt_parameters = SlicePluginTRTTest.TensorRTParam( - 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, True, False) + 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, True, False + ) self.enable_trt = True class SlicePluginTRTTestInt32(SlicePluginTRTTest): - def setUp(self): self.setUpSliceParams() self.setUpTensorRTParams() @@ -116,10 +112,9 @@ class SlicePluginTRTTestInt32(SlicePluginTRTTest): axes = self.params_axes starts = self.params_starts ends = self.params_ends - slice_out = fluid.layers.slice(data, - axes=axes, - starts=starts, - ends=ends) + slice_out = fluid.layers.slice( + data, axes=axes, starts=starts, ends=ends + ) cast_out = fluid.layers.cast(slice_out, 'float32') out = fluid.layers.batch_norm(cast_out, is_test=True) @@ -130,10 +125,10 @@ class SlicePluginTRTTestInt32(SlicePluginTRTTest): class StaticSlicePluginTRTTestInt32(SlicePluginTRTTest): - def setUpTensorRTParams(self): self.trt_parameters = SlicePluginTRTTest.TensorRTParam( - 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, True, False) + 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, True, False + ) self.enable_trt = True def setUp(self): @@ -144,10 +139,9 @@ class StaticSlicePluginTRTTestInt32(SlicePluginTRTTest): axes = self.params_axes starts = self.params_starts ends = self.params_ends - slice_out = fluid.layers.slice(data, - axes=axes, - starts=starts, - ends=ends) + slice_out = fluid.layers.slice( + data, axes=axes, starts=starts, ends=ends + ) cast_out = fluid.layers.cast(slice_out, 'float32') out = fluid.layers.batch_norm(cast_out, is_test=True) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_squeeze2_matmul_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_squeeze2_matmul_fuse_pass.py index 54c1f7226857353f7aac1f47587ec599cf7b7077..ce86d97afb0b1e27d9a326b6ad23980d54e45dc3 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_squeeze2_matmul_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_squeeze2_matmul_fuse_pass.py @@ -42,7 +42,8 @@ class TestSqueeze2MatmulFusePass(PassAutoScanTest): min_subgraph_size=0, precision_mode=paddle_infer.PrecisionType.Float32, use_static=False, - use_calib_mode=False) + use_calib_mode=False, + ) yield config, ['mul', 'elementwise_add'], (1e-4, 1e-1) def add_ignore_pass_case(self): @@ -65,9 +66,10 @@ class TestSqueeze2MatmulFusePass(PassAutoScanTest): def sample_program_config(self, draw): # 1. Generate shape of input:X of squeeze2 x_shape = draw( - st.lists(st.integers(min_value=1, max_value=8), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=8), min_size=2, max_size=2 + ) + ) # axes of squeeze2 == [2, 3] x_shape += [1, 1] axes = [2, 3] @@ -79,9 +81,10 @@ class TestSqueeze2MatmulFusePass(PassAutoScanTest): # 3. Generate legal shape of input:Y of matmul y_shape = draw( - st.lists(st.integers(min_value=1, max_value=8), - min_size=2, - max_size=2)) + st.lists( + st.integers(min_value=1, max_value=8), min_size=2, max_size=2 + ) + ) y_shape[0] = x_shape[1] # 4. Generate legal attr:axis of elementwise_add @@ -103,17 +106,11 @@ class TestSqueeze2MatmulFusePass(PassAutoScanTest): "X": ["squeeze2_x"], }, axes=axes, - outputs={ - "Out": ["squeeze2_out"], - "XShape": ["xshape"] - }, + outputs={"Out": ["squeeze2_out"], "XShape": ["xshape"]}, ) matmul_op = OpConfig( "matmul", - inputs={ - "X": ["squeeze2_out"], - "Y": ["matmul_y"] - }, + inputs={"X": ["squeeze2_out"], "Y": ["matmul_y"]}, outputs={"Out": ["matmul_out"]}, alpha=alpha, transpose_X=transpose_X, @@ -128,10 +125,7 @@ class TestSqueeze2MatmulFusePass(PassAutoScanTest): add_op = OpConfig( "elementwise_add", - inputs={ - "X": ["matmul_out"], - "Y": ["bias"] - }, + inputs={"X": ["matmul_out"], "Y": ["bias"]}, outputs={"Out": ["add_out"]}, axis=axis, ) @@ -152,9 +146,11 @@ class TestSqueeze2MatmulFusePass(PassAutoScanTest): return program_config def test(self): - self.run_and_statis(quant=False, - max_examples=25, - passes=["trt_squeeze2_matmul_fuse_pass"]) + self.run_and_statis( + quant=False, + max_examples=25, + passes=["trt_squeeze2_matmul_fuse_pass"], + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_subgraph_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_subgraph_pass.py index 2472ff027e3cce9b21b5db04e26014e54152d625..7478f6a3734e71f04210f4ef1680bfe8aa0a1b54 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_subgraph_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_subgraph_pass.py @@ -24,12 +24,11 @@ from paddle.fluid.core import AnalysisConfig class TensorRTSubgraphPassFcTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 6, 64, 64], - dtype="float32") + data = fluid.data( + name="data", shape=[-1, 6, 64, 64], dtype="float32" + ) fc_out = fluid.layers.fc(input=[data], act=None, size=1000) reshape_out = fluid.layers.reshape(x=fc_out, shape=[1, 1000]) self.feeds = { @@ -37,7 +36,8 @@ class TensorRTSubgraphPassFcTest(InferencePassTest): } self.enable_trt = True self.trt_parameters = TensorRTSubgraphPassFcTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [reshape_out] def test_check_output(self): @@ -46,19 +46,19 @@ class TensorRTSubgraphPassFcTest(InferencePassTest): # TRT output shape of fc is (1, 1000, 1, 1). To compare the output value only, flatten the results. self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TensorRTSubgraphPassConcatTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data1 = fluid.data(name="data1", - shape=[-1, 3, 64, 64], - dtype="float32") - data2 = fluid.data(name="data2", - shape=[-1, 3, 64, 64], - dtype="float32") + data1 = fluid.data( + name="data1", shape=[-1, 3, 64, 64], dtype="float32" + ) + data2 = fluid.data( + name="data2", shape=[-1, 3, 64, 64], dtype="float32" + ) concat_out = fluid.layers.concat([data1, data2], axis=2) out = fluid.layers.batch_norm(concat_out, is_test=True) self.feeds = { @@ -67,7 +67,8 @@ class TensorRTSubgraphPassConcatTest(InferencePassTest): } self.enable_trt = True self.trt_parameters = TensorRTSubgraphPassConcatTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [out] def test_check_output(self): @@ -75,16 +76,16 @@ class TensorRTSubgraphPassConcatTest(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TensorRTSubgraphPassSplitTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 3, 64, 64], - dtype="float32") + data = fluid.data( + name="data", shape=[-1, 3, 64, 64], dtype="float32" + ) split_out = fluid.layers.split(data, dim=-1, num_or_sections=2) out = fluid.layers.batch_norm(split_out[0], is_test=True) self.feeds = { @@ -92,7 +93,8 @@ class TensorRTSubgraphPassSplitTest(InferencePassTest): } self.enable_trt = True self.trt_parameters = TensorRTSubgraphPassSplitTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [out] def test_check_output(self): @@ -100,16 +102,16 @@ class TensorRTSubgraphPassSplitTest(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TensorRTSubgraphPassSplitSerializeTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 3, 64, 64], - dtype="float32") + data = fluid.data( + name="data", shape=[-1, 3, 64, 64], dtype="float32" + ) split_out = fluid.layers.split(data, dim=-1, num_or_sections=2) out = fluid.layers.batch_norm(split_out[0], is_test=True) self.feeds = { @@ -117,7 +119,8 @@ class TensorRTSubgraphPassSplitSerializeTest(InferencePassTest): } self.enable_trt = True self.trt_parameters = TensorRTSubgraphPassSplitTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, True, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, True, False + ) self.fetch_list = [out] def test_check_output(self): @@ -127,16 +130,16 @@ class TensorRTSubgraphPassSplitSerializeTest(InferencePassTest): shutil.rmtree(self.path + "_opt_cache") self.check_output_with_option(use_gpu) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TensorRTSubgraphPassDynamicSplitFp16SerializeTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 3, 64, 64], - dtype="float32") + data = fluid.data( + name="data", shape=[-1, 3, 64, 64], dtype="float32" + ) split_out = fluid.layers.split(data, dim=-1, num_or_sections=2) out = fluid.layers.batch_norm(split_out[0], is_test=True) self.feeds = { @@ -144,10 +147,16 @@ class TensorRTSubgraphPassDynamicSplitFp16SerializeTest(InferencePassTest): } self.enable_trt = True self.trt_parameters = TensorRTSubgraphPassSplitTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Half, True, False) - self.dynamic_shape_params = TensorRTSubgraphPassDynamicSplitFp16SerializeTest.DynamicShapeParam( - {'data': [1, 3, 8, 64]}, {'data': [1, 3, 512, 64]}, - {'data': [1, 3, 256, 64]}, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Half, True, False + ) + self.dynamic_shape_params = ( + TensorRTSubgraphPassDynamicSplitFp16SerializeTest.DynamicShapeParam( + {'data': [1, 3, 8, 64]}, + {'data': [1, 3, 512, 64]}, + {'data': [1, 3, 256, 64]}, + False, + ) + ) self.fetch_list = [out] def test_check_output(self): @@ -157,31 +166,36 @@ class TensorRTSubgraphPassDynamicSplitFp16SerializeTest(InferencePassTest): shutil.rmtree(self.path + "_opt_cache") self.check_output_with_option(use_gpu, 1e-3) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TensorRTSubgraphPassInstanceNormTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 3, 64, 64], - dtype="float32") + data = fluid.data( + name="data", shape=[-1, 3, 64, 64], dtype="float32" + ) param_attr = fluid.ParamAttr( name='instance_norm_w', - initializer=fluid.initializer.Constant(value=1.0)) + initializer=fluid.initializer.Constant(value=1.0), + ) bias_attr = fluid.ParamAttr( name='instance_norm_b', - initializer=fluid.initializer.Constant(value=0.0)) - out = fluid.layers.instance_norm(input=data, - param_attr=param_attr, - bias_attr=bias_attr) + initializer=fluid.initializer.Constant(value=0.0), + ) + out = fluid.layers.instance_norm( + input=data, param_attr=param_attr, bias_attr=bias_attr + ) self.feeds = { "data": np.random.random([1, 3, 64, 64]).astype("float32"), } self.enable_trt = True - self.trt_parameters = TensorRTSubgraphPassInstanceNormTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False) + self.trt_parameters = ( + TensorRTSubgraphPassInstanceNormTest.TensorRTParam( + 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False + ) + ) self.fetch_list = [out] def test_check_output(self): @@ -189,16 +203,16 @@ class TensorRTSubgraphPassInstanceNormTest(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu, atol=1e-4, flatten=True) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TensorRTSubgraphPassTransposeTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 6, 64, 64], - dtype="float32") + data = fluid.data( + name="data", shape=[-1, 6, 64, 64], dtype="float32" + ) transpose_out = self.append_transpose(data) out = fluid.layers.batch_norm(transpose_out, is_test=True) self.feeds = { @@ -206,7 +220,8 @@ class TensorRTSubgraphPassTransposeTest(InferencePassTest): } self.enable_trt = True self.trt_parameters = TensorRTSubgraphPassTransposeTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [out] def append_transpose(self, data): @@ -217,25 +232,27 @@ class TensorRTSubgraphPassTransposeTest(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TensorRTSubgraphPassLayerNormTest(InferencePassTest): - def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 3, 64, 64], - dtype="float32") - out = fluid.layers.layer_norm(data, - begin_norm_axis=self.begin_norm_axis) + data = fluid.data( + name="data", shape=[-1, 3, 64, 64], dtype="float32" + ) + out = fluid.layers.layer_norm( + data, begin_norm_axis=self.begin_norm_axis + ) self.feeds = { "data": np.random.random([1, 3, 64, 64]).astype("float32"), } self.enable_trt = True self.trt_parameters = TensorRTSubgraphPassLayerNormTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [out] def set_params(self): @@ -246,19 +263,20 @@ class TensorRTSubgraphPassLayerNormTest(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TensorRTSubgraphPassLayerNormDynamicTest(InferencePassTest): - def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 3, 64, 64], - dtype="float32") - out = fluid.layers.layer_norm(data, - begin_norm_axis=self.begin_norm_axis) + data = fluid.data( + name="data", shape=[-1, 3, 64, 64], dtype="float32" + ) + out = fluid.layers.layer_norm( + data, begin_norm_axis=self.begin_norm_axis + ) self.feeds = { "data": np.random.random([1, 3, 64, 64]).astype("float32"), } @@ -267,16 +285,25 @@ class TensorRTSubgraphPassLayerNormDynamicTest(InferencePassTest): def set_trt_params(self): self.enable_trt = True - self.trt_parameters = TensorRTSubgraphPassLayerNormDynamicTest.TensorRTParam( - 1 << 30, 32, 0, self.precision, self.serialize, False) - self.dynamic_shape_params = TensorRTSubgraphPassLayerNormDynamicTest.DynamicShapeParam( - { - 'data': [1, 3, 64, 64], - }, { - 'data': [8, 8, 64, 64], - }, { - 'data': [4, 4, 64, 64], - }, False) + self.trt_parameters = ( + TensorRTSubgraphPassLayerNormDynamicTest.TensorRTParam( + 1 << 30, 32, 0, self.precision, self.serialize, False + ) + ) + self.dynamic_shape_params = ( + TensorRTSubgraphPassLayerNormDynamicTest.DynamicShapeParam( + { + 'data': [1, 3, 64, 64], + }, + { + 'data': [8, 8, 64, 64], + }, + { + 'data': [4, 4, 64, 64], + }, + False, + ) + ) def set_params(self): self.begin_norm_axis = 2 @@ -290,12 +317,13 @@ class TensorRTSubgraphPassLayerNormDynamicTest(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TensorRTSubgraphPassLayerNormDynamicFP16Test( - TensorRTSubgraphPassLayerNormDynamicTest): - + TensorRTSubgraphPassLayerNormDynamicTest +): def set_params(self): self.begin_norm_axis = 2 self.precision = AnalysisConfig.Precision.Half @@ -308,33 +336,33 @@ class TensorRTSubgraphPassLayerNormDynamicFP16Test( use_gpu = True self.check_output_with_option(use_gpu, atol=0.01, rtol=0.01) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TensorRTSubgraphPassLayerNormBeginNormAxis2Test( - TensorRTSubgraphPassLayerNormTest): - + TensorRTSubgraphPassLayerNormTest +): def set_params(self): self.begin_norm_axis = 2 class TensorRTSubgraphPassLayerNormBeginNormAxis3Test( - TensorRTSubgraphPassLayerNormTest): - + TensorRTSubgraphPassLayerNormTest +): def set_params(self): self.begin_norm_axis = 3 class TensorRTSubgraphPassElementwiseTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data1 = fluid.data(name="data1", - shape=[-1, 3, 64, 64], - dtype="float32") - data2 = fluid.data(name="data2", - shape=[-1, 3, 64, 64], - dtype="float32") + data1 = fluid.data( + name="data1", shape=[-1, 3, 64, 64], dtype="float32" + ) + data2 = fluid.data( + name="data2", shape=[-1, 3, 64, 64], dtype="float32" + ) eltwise_out = self.append_eltwise(data1, data2) out = fluid.layers.batch_norm(eltwise_out, is_test=True) self.feeds = { @@ -343,7 +371,8 @@ class TensorRTSubgraphPassElementwiseTest(InferencePassTest): } self.enable_trt = True self.trt_parameters = TensorRTSubgraphPassElementwiseTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [out] def append_eltwise(self, data1, data2): @@ -354,38 +383,40 @@ class TensorRTSubgraphPassElementwiseTest(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) -class TensorRTSubgraphPassElementwiseMulTest(TensorRTSubgraphPassElementwiseTest - ): - +class TensorRTSubgraphPassElementwiseMulTest( + TensorRTSubgraphPassElementwiseTest +): def append_eltwise(self, data1, data2): return fluid.layers.elementwise_mul(x=data1, y=data2) class TensorRTSubgraphPassElementwiseSerializeTest( - TensorRTSubgraphPassElementwiseTest): - + TensorRTSubgraphPassElementwiseTest +): def setUp(self): super(TensorRTSubgraphPassElementwiseSerializeTest, self).setUp() self.trt_parameters = TensorRTSubgraphPassElementwiseTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, True, False) + 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, True, False + ) def test_check_output(self): if os.path.exists(self.path + "_opt_cache"): shutil.rmtree(self.path + "_opt_cache") - super(TensorRTSubgraphPassElementwiseSerializeTest, - self).test_check_output() + super( + TensorRTSubgraphPassElementwiseSerializeTest, self + ).test_check_output() class TensorRTSubgraphPassElementwiseBroadcastDynamicTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data1 = fluid.data(name="data1", - shape=[-1, 3, 64, 64], - dtype="float32") + data1 = fluid.data( + name="data1", shape=[-1, 3, 64, 64], dtype="float32" + ) data2 = fluid.data(name="data2", shape=[64, 64], dtype="float32") eltwise_out = self.append_eltwise(data1, data2) out = fluid.layers.batch_norm(eltwise_out, is_test=True) @@ -394,19 +425,17 @@ class TensorRTSubgraphPassElementwiseBroadcastDynamicTest(InferencePassTest): "data2": np.random.random([64, 64]).astype("float32"), } self.enable_trt = True - self.trt_parameters = TensorRTSubgraphPassElementwiseBroadcastDynamicTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, True, False) + self.trt_parameters = ( + TensorRTSubgraphPassElementwiseBroadcastDynamicTest.TensorRTParam( + 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, True, False + ) + ) self.dynamic_shape_params = TensorRTSubgraphPassElementwiseBroadcastDynamicTest.DynamicShapeParam( - { - 'data1': [1, 3, 8, 64], - 'data2': [8, 64] - }, { - 'data1': [1, 3, 512, 64], - 'data2': [512, 64] - }, { - 'data1': [1, 3, 256, 64], - 'data2': [256, 64] - }, False) + {'data1': [1, 3, 8, 64], 'data2': [8, 64]}, + {'data1': [1, 3, 512, 64], 'data2': [512, 64]}, + {'data1': [1, 3, 256, 64], 'data2': [256, 64]}, + False, + ) self.fetch_list = [out] def append_eltwise(self, data1, data2): @@ -419,24 +448,27 @@ class TensorRTSubgraphPassElementwiseBroadcastDynamicTest(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TensorRTSubgraphPassShuffleChannelTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[-1, 6, 64, 64], - dtype="float32") + data = fluid.data( + name="data", shape=[-1, 6, 64, 64], dtype="float32" + ) sc_out = fluid.layers.shuffle_channel(data, group=3) out = fluid.layers.batch_norm(sc_out, is_test=True) self.feeds = { "data": np.random.random([1, 6, 64, 64]).astype("float32"), } self.enable_trt = True - self.trt_parameters = TensorRTSubgraphPassShuffleChannelTest.TensorRTParam( - 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False) + self.trt_parameters = ( + TensorRTSubgraphPassShuffleChannelTest.TensorRTParam( + 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False + ) + ) self.fetch_list = [out] def test_check_output(self): @@ -444,7 +476,8 @@ class TensorRTSubgraphPassShuffleChannelTest(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_tile_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_tile_op.py index bb3772ca5aeeb395a07795c1ae70e636b9428a25..52fa99fc81ad521fe279a2f1bd2b21183b8afaf5 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_tile_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_tile_op.py @@ -23,12 +23,11 @@ from paddle.fluid.core import AnalysisConfig class TRTTileTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[4, 3, 224, 256], - dtype="float32") + data = fluid.data( + name="data", shape=[4, 3, 224, 256], dtype="float32" + ) tile_out = paddle.tile(x=data, repeat_times=[1, 1, 1, 1]) out = fluid.layers.batch_norm(tile_out, is_test=True) @@ -37,7 +36,8 @@ class TRTTileTest(InferencePassTest): } self.enable_trt = True self.trt_parameters = TRTTileTest.TensorRTParam( - 1 << 30, 16, 1, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, 16, 1, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [out] def test_check_output(self): @@ -45,11 +45,11 @@ class TRTTileTest(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TRTTileExpandTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data(name="data", shape=[1, 1, 1, 1], dtype="float32") @@ -61,7 +61,8 @@ class TRTTileExpandTest(InferencePassTest): } self.enable_trt = True self.trt_parameters = TRTTileExpandTest.TensorRTParam( - 1 << 30, 1, 1, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, 1, 1, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [out] def test_check_output(self): @@ -69,11 +70,11 @@ class TRTTileExpandTest(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TRTTileExpandStaticTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data(name="data", shape=[1, 1, 1, 1], dtype="float32") @@ -85,7 +86,8 @@ class TRTTileExpandStaticTest(InferencePassTest): } self.enable_trt = True self.trt_parameters = TRTTileExpandStaticTest.TensorRTParam( - 1 << 30, 1, 1, AnalysisConfig.Precision.Float32, True, False) + 1 << 30, 1, 1, AnalysisConfig.Precision.Float32, True, False + ) self.fetch_list = [out] def test_check_output(self): @@ -93,11 +95,11 @@ class TRTTileExpandStaticTest(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TRTTileExpandHalfTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data(name="data", shape=[1, 1, 1, 1], dtype="float32") @@ -109,7 +111,8 @@ class TRTTileExpandHalfTest(InferencePassTest): } self.enable_trt = True self.trt_parameters = TRTTileExpandHalfTest.TensorRTParam( - 1 << 30, 1, 1, AnalysisConfig.Precision.Half, False, False) + 1 << 30, 1, 1, AnalysisConfig.Precision.Half, False, False + ) self.fetch_list = [out] def test_check_output(self): @@ -117,7 +120,8 @@ class TRTTileExpandHalfTest(InferencePassTest): use_gpu = True self.check_output_with_option(use_gpu, 1e-4, flatten=True) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py index 3a15f0dcf34a6ff75e8bc4ddb42957857f4bdb41..409d36600d28b8b4a1d33893aa1153e5fed3d118 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py @@ -21,15 +21,14 @@ from paddle.fluid.core import AnalysisConfig class TransposeFlattenConcatFusePassTRTTest(InferencePassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data1 = fluid.data(name="data1", - shape=[8, 32, 128], - dtype="float32") - data2 = fluid.data(name="data2", - shape=[8, 32, 128], - dtype="float32") + data1 = fluid.data( + name="data1", shape=[8, 32, 128], dtype="float32" + ) + data2 = fluid.data( + name="data2", shape=[8, 32, 128], dtype="float32" + ) trans1 = fluid.layers.transpose(data1, perm=[0, 2, 1]) trans2 = fluid.layers.transpose(data2, perm=[0, 2, 1]) flatt1 = fluid.layers.flatten(trans1) @@ -42,11 +41,14 @@ class TransposeFlattenConcatFusePassTRTTest(InferencePassTest): self.feeds = { "data1": np.random.random([8, 32, 128]).astype("float32"), - "data2": np.random.random([8, 32, 128]).astype("float32") + "data2": np.random.random([8, 32, 128]).astype("float32"), } self.enable_trt = True - self.trt_parameters = TransposeFlattenConcatFusePassTRTTest.TensorRTParam( - 1 << 20, 8, 0, AnalysisConfig.Precision.Float32, False, False) + self.trt_parameters = ( + TransposeFlattenConcatFusePassTRTTest.TensorRTParam( + 1 << 20, 8, 0, AnalysisConfig.Precision.Float32, False, False + ) + ) self.fetch_list = [out] def test_check_output(self): diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_tuned_dynamic_shape.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_tuned_dynamic_shape.py index 3ef7ca22acaa81d213852d5b41f46e51b6822b79..44345b89c1926e5917929cc334e16f754a89003a 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_tuned_dynamic_shape.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_tuned_dynamic_shape.py @@ -22,7 +22,6 @@ from paddle.inference import Config, create_predictor class TRTTunedDynamicShapeTest(unittest.TestCase): - def get_model(self): place = fluid.CUDAPlace(0) exe = fluid.Executor(place) @@ -30,21 +29,25 @@ class TRTTunedDynamicShapeTest(unittest.TestCase): main_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(main_program, startup_program): - data = fluid.data(name="data", - shape=[-1, 6, 64, 64], - dtype="float32") - conv_out = fluid.layers.conv2d(input=data, - num_filters=3, - filter_size=3, - groups=1, - padding=0, - bias_attr=False, - act=None) + data = fluid.data( + name="data", shape=[-1, 6, 64, 64], dtype="float32" + ) + conv_out = fluid.layers.conv2d( + input=data, + num_filters=3, + filter_size=3, + groups=1, + padding=0, + bias_attr=False, + act=None, + ) exe.run(startup_program) serialized_program = paddle.static.serialize_program( - data, conv_out, program=main_program) + data, conv_out, program=main_program + ) serialized_params = paddle.static.serialize_persistables( - data, conv_out, executor=exe, program=main_program) + data, conv_out, executor=exe, program=main_program + ) return serialized_program, serialized_params def get_config(self, model, params, tuned=False): @@ -61,9 +64,11 @@ class TRTTunedDynamicShapeTest(unittest.TestCase): min_subgraph_size=0, precision_mode=paddle.inference.PrecisionType.Float32, use_static=True, - use_calib_mode=False) - config.enable_tuned_tensorrt_dynamic_shape('shape_range.pbtxt', - True) + use_calib_mode=False, + ) + config.enable_tuned_tensorrt_dynamic_shape( + 'shape_range.pbtxt', True + ) return config diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_yolo_box_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_yolo_box_op.py index 5eeafe0228a780f86bfde9cf8e13fbb1292e4345..d2ad64cf72ef77e65f6fc341490ec55b61025a67 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_yolo_box_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_yolo_box_op.py @@ -22,26 +22,26 @@ from paddle.fluid.core import AnalysisConfig class TRTYoloBoxTest(InferencePassTest): - def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): image_shape = [self.bs, self.channel, self.height, self.width] image = fluid.data(name='image', shape=image_shape, dtype='float32') - image_size = fluid.data(name='image_size', - shape=[self.bs, 2], - dtype='int32') + image_size = fluid.data( + name='image_size', shape=[self.bs, 2], dtype='int32' + ) boxes, scores = self.append_yolobox(image, image_size) self.feeds = { - 'image': - np.random.random(image_shape).astype('float32'), - 'image_size': - np.random.randint(32, 64, size=(self.bs, 2)).astype('int32'), + 'image': np.random.random(image_shape).astype('float32'), + 'image_size': np.random.randint(32, 64, size=(self.bs, 2)).astype( + 'int32' + ), } self.enable_trt = True self.trt_parameters = TRTYoloBoxTest.TensorRTParam( - 1 << 30, self.bs, 1, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, self.bs, 1, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [scores, boxes] def set_params(self): @@ -51,35 +51,37 @@ class TRTYoloBoxTest(InferencePassTest): self.width = 64 self.class_num = 80 self.anchors = [10, 13, 16, 30, 33, 23] - self.conf_thresh = .1 + self.conf_thresh = 0.1 self.downsample_ratio = 32 def append_yolobox(self, image, image_size): - return fluid.layers.yolo_box(x=image, - img_size=image_size, - class_num=self.class_num, - anchors=self.anchors, - conf_thresh=self.conf_thresh, - downsample_ratio=self.downsample_ratio) + return fluid.layers.yolo_box( + x=image, + img_size=image_size, + class_num=self.class_num, + anchors=self.anchors, + conf_thresh=self.conf_thresh, + downsample_ratio=self.downsample_ratio, + ) def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TRTYoloBoxFP16Test(InferencePassTest): - def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): image_shape = [self.bs, self.channel, self.height, self.width] image = fluid.data(name='image', shape=image_shape, dtype='float32') - image_size = fluid.data(name='image_size', - shape=[self.bs, 2], - dtype='int32') + image_size = fluid.data( + name='image_size', shape=[self.bs, 2], dtype='int32' + ) boxes, scores = self.append_yolobox(image, image_size) self.feeds = { @@ -88,7 +90,8 @@ class TRTYoloBoxFP16Test(InferencePassTest): } self.enable_trt = True self.trt_parameters = TRTYoloBoxFP16Test.TensorRTParam( - 1 << 30, self.bs, 1, AnalysisConfig.Precision.Half, False, False) + 1 << 30, self.bs, 1, AnalysisConfig.Precision.Half, False, False + ) self.fetch_list = [scores, boxes] def set_params(self): @@ -98,46 +101,49 @@ class TRTYoloBoxFP16Test(InferencePassTest): self.class_num = 1 self.anchors = [106, 148, 92, 300, 197, 334] self.channel = 18 - self.conf_thresh = .05 + self.conf_thresh = 0.05 self.downsample_ratio = 32 def append_yolobox(self, image, image_size): - return fluid.layers.yolo_box(x=image, - img_size=image_size, - class_num=self.class_num, - anchors=self.anchors, - conf_thresh=self.conf_thresh, - downsample_ratio=self.downsample_ratio) + return fluid.layers.yolo_box( + x=image, + img_size=image_size, + class_num=self.class_num, + anchors=self.anchors, + conf_thresh=self.conf_thresh, + downsample_ratio=self.downsample_ratio, + ) def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True, rtol=1e-1) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) class TRTYoloBoxIoUAwareTest(InferencePassTest): - def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): image_shape = [self.bs, self.channel, self.height, self.width] image = fluid.data(name='image', shape=image_shape, dtype='float32') - image_size = fluid.data(name='image_size', - shape=[self.bs, 2], - dtype='int32') + image_size = fluid.data( + name='image_size', shape=[self.bs, 2], dtype='int32' + ) boxes, scores = self.append_yolobox(image, image_size) self.feeds = { - 'image': - np.random.random(image_shape).astype('float32'), - 'image_size': - np.random.randint(32, 64, size=(self.bs, 2)).astype('int32'), + 'image': np.random.random(image_shape).astype('float32'), + 'image_size': np.random.randint(32, 64, size=(self.bs, 2)).astype( + 'int32' + ), } self.enable_trt = True self.trt_parameters = TRTYoloBoxTest.TensorRTParam( - 1 << 30, self.bs, 1, AnalysisConfig.Precision.Float32, False, False) + 1 << 30, self.bs, 1, AnalysisConfig.Precision.Float32, False, False + ) self.fetch_list = [scores, boxes] def set_params(self): @@ -147,27 +153,30 @@ class TRTYoloBoxIoUAwareTest(InferencePassTest): self.width = 64 self.class_num = 80 self.anchors = [10, 13, 16, 30, 33, 23] - self.conf_thresh = .1 + self.conf_thresh = 0.1 self.downsample_ratio = 32 self.iou_aware = True self.iou_aware_factor = 0.5 def append_yolobox(self, image, image_size): - return fluid.layers.yolo_box(x=image, - img_size=image_size, - class_num=self.class_num, - anchors=self.anchors, - conf_thresh=self.conf_thresh, - downsample_ratio=self.downsample_ratio, - iou_aware=self.iou_aware, - iou_aware_factor=self.iou_aware_factor) + return fluid.layers.yolo_box( + x=image, + img_size=image_size, + class_num=self.class_num, + anchors=self.anchors, + conf_thresh=self.conf_thresh, + downsample_ratio=self.downsample_ratio, + iou_aware=self.iou_aware, + iou_aware_factor=self.iou_aware_factor, + ) def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( - PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) + PassVersionChecker.IsCompatible('tensorrt_subgraph_pass') + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_unsqueeze2_eltwise_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_unsqueeze2_eltwise_fuse_pass.py index 7ea2b494f3d17713fb811465690996e72b296731..d9626ebd1de6f610ce167b1ab9be24c2fc515f06 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_unsqueeze2_eltwise_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_unsqueeze2_eltwise_fuse_pass.py @@ -40,7 +40,8 @@ class TestUnsqueezeEltwiseFusePass(PassAutoScanTest): min_subgraph_size=0, precision_mode=paddle_infer.PrecisionType.Float32, use_static=False, - use_calib_mode=False) + use_calib_mode=False, + ) yield config, [ 'elementwise_mul', ], (1e-5, 1e-5) @@ -48,9 +49,10 @@ class TestUnsqueezeEltwiseFusePass(PassAutoScanTest): def sample_program_config(self, draw): # 1. Generate shape and attr of mul x_shape = draw( - st.lists(st.integers(min_value=1, max_value=10), - min_size=4, - max_size=4)) + st.lists( + st.integers(min_value=1, max_value=10), min_size=4, max_size=4 + ) + ) axis = -1 # 2. Generate legal shape and attr of input:Y of unsqueeze2 @@ -62,20 +64,14 @@ class TestUnsqueezeEltwiseFusePass(PassAutoScanTest): inputs={ "X": ["unsqueeze2_x"], "AxesTensor": [], - "AxesTensorList": [] + "AxesTensorList": [], }, axes=unsqueeze2_axes, - outputs={ - "Out": ["unsqueeze2_out"], - "XShape": ["xshape"] - }, + outputs={"Out": ["unsqueeze2_out"], "XShape": ["xshape"]}, ) mul_op = OpConfig( "elementwise_mul", - inputs={ - "Y": ["unsqueeze2_out"], - "X": ["mul_x"] - }, + inputs={"Y": ["unsqueeze2_out"], "X": ["mul_x"]}, axis=axis, outputs={"Out": ["mul_out"]}, ) @@ -97,9 +93,11 @@ class TestUnsqueezeEltwiseFusePass(PassAutoScanTest): return program_config def test(self): - self.run_and_statis(quant=False, - max_examples=300, - passes=["unsqueeze2_eltwise_fuse_pass"]) + self.run_and_statis( + quant=False, + max_examples=300, + passes=["unsqueeze2_eltwise_fuse_pass"], + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_yolo_box_post.py b/python/paddle/fluid/tests/unittests/ir/inference/test_yolo_box_post.py index 74f251ec62b6ecbc4fd875c22535f60705622437..fd1d03a6a7229b96bcebe5d378c8d8fcf8d23a72 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_yolo_box_post.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_yolo_box_post.py @@ -20,22 +20,24 @@ from paddle.fluid.layer_helper import LayerHelper paddle.enable_static() -def yolo_box_post(box0, - box1, - box2, - im_shape, - im_scale, - anchors0=[116, 90, 156, 198, 373, 326], - anchors1=[30, 61, 62, 45, 59, 119], - anchors2=[10, 13, 16, 30, 33, 23], - class_num=80, - conf_thresh=0.005, - downsample_ratio0=32, - downsample_ratio1=16, - downsample_ratio2=8, - clip_bbox=True, - scale_x_y=1., - nms_threshold=0.45): +def yolo_box_post( + box0, + box1, + box2, + im_shape, + im_scale, + anchors0=[116, 90, 156, 198, 373, 326], + anchors1=[30, 61, 62, 45, 59, 119], + anchors2=[10, 13, 16, 30, 33, 23], + class_num=80, + conf_thresh=0.005, + downsample_ratio0=32, + downsample_ratio1=16, + downsample_ratio2=8, + clip_bbox=True, + scale_x_y=1.0, + nms_threshold=0.45, +): helper = LayerHelper('yolo_box_post', **locals()) output = helper.create_variable_for_type_inference(dtype=box0.dtype) nms_rois_num = helper.create_variable_for_type_inference(dtype='int32') @@ -44,35 +46,37 @@ def yolo_box_post(box0, 'Boxes1': box1, 'Boxes2': box2, "ImageShape": im_shape, - "ImageScale": im_scale + "ImageScale": im_scale, } outputs = {'Out': output, 'NmsRoisNum': nms_rois_num} - helper.append_op(type="yolo_box_post", - inputs=inputs, - attrs={ - 'anchors0': anchors0, - 'anchors1': anchors1, - 'anchors2': anchors2, - 'class_num': class_num, - 'conf_thresh': conf_thresh, - 'downsample_ratio0': downsample_ratio0, - 'downsample_ratio1': downsample_ratio1, - 'downsample_ratio2': downsample_ratio2, - 'clip_bbox': clip_bbox, - 'scale_x_y': scale_x_y, - 'nms_threshold': nms_threshold - }, - outputs=outputs) + helper.append_op( + type="yolo_box_post", + inputs=inputs, + attrs={ + 'anchors0': anchors0, + 'anchors1': anchors1, + 'anchors2': anchors2, + 'class_num': class_num, + 'conf_thresh': conf_thresh, + 'downsample_ratio0': downsample_ratio0, + 'downsample_ratio1': downsample_ratio1, + 'downsample_ratio2': downsample_ratio2, + 'clip_bbox': clip_bbox, + 'scale_x_y': scale_x_y, + 'nms_threshold': nms_threshold, + }, + outputs=outputs, + ) output.stop_gradient = True nms_rois_num.stop_gradient = True return output, nms_rois_num -@unittest.skipIf(not paddle.is_compiled_with_cuda(), - "only support cuda kernel.") +@unittest.skipIf( + not paddle.is_compiled_with_cuda(), "only support cuda kernel." +) class TestYoloBoxPost(unittest.TestCase): - def test_yolo_box_post(self): place = paddle.CUDAPlace(0) program = paddle.static.Program() @@ -90,8 +94,8 @@ class TestYoloBoxPost(unittest.TestCase): "box0": np.random.uniform(size=[1, 255, 19, 19]).astype("float32"), "box1": np.random.uniform(size=[1, 255, 38, 38]).astype("float32"), "box2": np.random.uniform(size=[1, 255, 76, 76]).astype("float32"), - "im_shape": np.array([[608., 608.]], "float32"), - "im_scale": np.array([[1., 1.]], "float32") + "im_shape": np.array([[608.0, 608.0]], "float32"), + "im_scale": np.array([[1.0, 1.0]], "float32"), } outs = exe.run(program, feed=feed, fetch_list=[out.name, rois_num.name]) diff --git a/python/paddle/fluid/tests/unittests/ir/pass_test.py b/python/paddle/fluid/tests/unittests/ir/pass_test.py index 0336dcb2da54832259bf22b254ba751eade8fa26..4cae2e7fec187e35e4a89240f6f3260882ba2411 100644 --- a/python/paddle/fluid/tests/unittests/ir/pass_test.py +++ b/python/paddle/fluid/tests/unittests/ir/pass_test.py @@ -23,7 +23,6 @@ from paddle.fluid.framework import Block class PassTest(unittest.TestCase): - @classmethod def setUpClass(self): self.main_program = fluid.Program() @@ -68,10 +67,12 @@ class PassTest(unittest.TestCase): self.check_output_with_place(place, startup_on_cpu, atol) def _run_program(self, executor, program): - outs = executor.run(program=program, - feed=self.feeds, - fetch_list=self.fetch_list, - return_numpy=False) + outs = executor.run( + program=program, + feed=self.feeds, + fetch_list=self.fetch_list, + return_numpy=False, + ) outs_np = [] outs_lod = [] for out in outs: @@ -128,8 +129,10 @@ class PassTest(unittest.TestCase): outs, lods = self._run_program(executor, self.main_program) self.assertTrue( len(self.fetch_list) == len(outs), - "Checking the number of fetchs failed. Expected: {}, Received: {}". - format(len(self.fetch_list), len(outs))) + "Checking the number of fetchs failed. Expected: {}, Received: {}".format( + len(self.fetch_list), len(outs) + ), + ) # Parameters may be changed in ir passes. opt_program = self._apply_ir_passes() @@ -138,13 +141,16 @@ class PassTest(unittest.TestCase): if startup_on_cpu and not isinstance(place, fluid.CPUPlace): warnings.warn( "Parameters are on CPU, and will be transferred to GPU " - "automatically by data transform.") + "automatically by data transform." + ) outs_opt, lods_opt = self._run_program(executor, opt_program) self.assertTrue( len(self.fetch_list) == len(outs_opt), - "Checking the number of fetchs failed. Expected: {}, Received: {}". - format(len(self.fetch_list), len(outs_opt))) + "Checking the number of fetchs failed. Expected: {}, Received: {}".format( + len(self.fetch_list), len(outs_opt) + ), + ) for i in range(len(self.fetch_list)): is_allclose = np.allclose(outs_opt[i], outs[i], atol=atol) if not is_allclose: @@ -156,9 +162,17 @@ class PassTest(unittest.TestCase): self.assertTrue( is_allclose, "Output (name: %s, shape: %s, dtype: %s) has diff at %s. The maximum diff is %e, first error element is %d, expected %e, but got %e" - % (self.fetch_list[i].name, str(self.fetch_list[i].shape), - self.fetch_list[i].dtype, str(place), max_diff, offset, - a.flatten()[offset], b.flatten()[offset])) + % ( + self.fetch_list[i].name, + str(self.fetch_list[i].shape), + self.fetch_list[i].dtype, + str(place), + max_diff, + offset, + a.flatten()[offset], + b.flatten()[offset], + ), + ) def _check_fused_ops(self, program): ''' @@ -179,9 +193,10 @@ class PassTest(unittest.TestCase): self.assertTrue( self.num_fused_ops == acctual_num_fused_ops, "Checking of the number of fused operator < {} > failed. " - "Expected: {}, Received: {}".format(self.fused_op_type, - self.num_fused_ops, - acctual_num_fused_ops)) + "Expected: {}, Received: {}".format( + self.fused_op_type, self.num_fused_ops, acctual_num_fused_ops + ), + ) def check_program(self, program=None): ''' @@ -196,13 +211,16 @@ class PassTest(unittest.TestCase): self.assertTrue( self.main_program.desc != program.desc, "The optimized program and the origin main_program hold the same " - "desc.") + "desc.", + ) self.assertTrue( self.main_program.num_blocks == program.num_blocks, "The number of blocks of the origin program and the optimized " "program are different ({} vs {}).".format( - self.main_program.num_blocks, program.num_blocks)) + self.main_program.num_blocks, program.num_blocks + ), + ) is_different = False for i in range(program.num_blocks): @@ -219,7 +237,8 @@ class PassTest(unittest.TestCase): break if len(self.main_program.block(i).vars) != len( - program.block(i).vars): + program.block(i).vars + ): # The number of vars in the block i of the origin program and # the optimized program is different. is_different = True @@ -235,7 +254,8 @@ class PassTest(unittest.TestCase): self.assertTrue( is_different, "The optimized program is logically the same with the origin " - "program.") + "program.", + ) def _find_op(self, specified_op, program, block_id): is_find = False diff --git a/python/paddle/fluid/tests/unittests/ir/test_convert_to_mixed_precision.py b/python/paddle/fluid/tests/unittests/ir/test_convert_to_mixed_precision.py index 20891b22fde3b9340c503ed3c568f1e883eba816..6169cb98b6b28fcb773a05f378050b9722e4d630 100644 --- a/python/paddle/fluid/tests/unittests/ir/test_convert_to_mixed_precision.py +++ b/python/paddle/fluid/tests/unittests/ir/test_convert_to_mixed_precision.py @@ -23,55 +23,75 @@ from paddle.inference import PrecisionType, PlaceType from paddle.inference import convert_to_mixed_precision -@unittest.skipIf(not paddle.is_compiled_with_cuda() - or paddle.get_cudnn_version() < 8000, - 'should compile with cuda.') +@unittest.skipIf( + not paddle.is_compiled_with_cuda() or paddle.get_cudnn_version() < 8000, + 'should compile with cuda.', +) class TestConvertToMixedPrecision(unittest.TestCase): - def test_convert_to_fp16(self): model = resnet50(True) net = to_static( - model, input_spec=[InputSpec(shape=[None, 3, 224, 224], name='x')]) + model, input_spec=[InputSpec(shape=[None, 3, 224, 224], name='x')] + ) paddle.jit.save(net, 'resnet50/inference') - convert_to_mixed_precision('resnet50/inference.pdmodel', - 'resnet50/inference.pdiparams', - 'mixed/inference.pdmodel', - 'mixed/inference.pdiparams', - PrecisionType.Half, PlaceType.GPU, True) + convert_to_mixed_precision( + 'resnet50/inference.pdmodel', + 'resnet50/inference.pdiparams', + 'mixed/inference.pdmodel', + 'mixed/inference.pdiparams', + PrecisionType.Half, + PlaceType.GPU, + True, + ) def test_convert_to_fp16_with_fp16_input(self): model = resnet50(True) net = to_static( - model, input_spec=[InputSpec(shape=[None, 3, 224, 224], name='x')]) + model, input_spec=[InputSpec(shape=[None, 3, 224, 224], name='x')] + ) paddle.jit.save(net, 'resnet50/inference') - convert_to_mixed_precision('resnet50/inference.pdmodel', - 'resnet50/inference.pdiparams', - 'mixed1/inference.pdmodel', - 'mixed1/inference.pdiparams', - PrecisionType.Half, PlaceType.GPU, False) + convert_to_mixed_precision( + 'resnet50/inference.pdmodel', + 'resnet50/inference.pdiparams', + 'mixed1/inference.pdmodel', + 'mixed1/inference.pdiparams', + PrecisionType.Half, + PlaceType.GPU, + False, + ) def test_convert_to_fp16_with_blacklist(self): model = resnet50(True) net = to_static( - model, input_spec=[InputSpec(shape=[None, 3, 224, 224], name='x')]) + model, input_spec=[InputSpec(shape=[None, 3, 224, 224], name='x')] + ) paddle.jit.save(net, 'resnet50/inference') - convert_to_mixed_precision('resnet50/inference.pdmodel', - 'resnet50/inference.pdiparams', - 'mixed2/inference.pdmodel', - 'mixed2/inference.pdiparams', - PrecisionType.Half, PlaceType.GPU, False, - set('conv2d')) + convert_to_mixed_precision( + 'resnet50/inference.pdmodel', + 'resnet50/inference.pdiparams', + 'mixed2/inference.pdmodel', + 'mixed2/inference.pdiparams', + PrecisionType.Half, + PlaceType.GPU, + False, + set('conv2d'), + ) def test_convert_to_bf16(self): model = resnet50(True) net = to_static( - model, input_spec=[InputSpec(shape=[None, 3, 224, 224], name='x')]) + model, input_spec=[InputSpec(shape=[None, 3, 224, 224], name='x')] + ) paddle.jit.save(net, 'resnet50/inference') - convert_to_mixed_precision('resnet50/inference.pdmodel', - 'resnet50/inference.pdiparams', - 'mixed3/inference.pdmodel', - 'mixed3/inference.pdiparams', - PrecisionType.Bfloat16, PlaceType.GPU, True) + convert_to_mixed_precision( + 'resnet50/inference.pdmodel', + 'resnet50/inference.pdiparams', + 'mixed3/inference.pdmodel', + 'mixed3/inference.pdiparams', + PrecisionType.Bfloat16, + PlaceType.GPU, + True, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/ir/test_fuse_resnet_unit.py b/python/paddle/fluid/tests/unittests/ir/test_fuse_resnet_unit.py index 58b227f4cb596b64d48f152cfe83c7eec5395f52..4dabcdbcf35d6d70d81733490c96bb84a4781300 100644 --- a/python/paddle/fluid/tests/unittests/ir/test_fuse_resnet_unit.py +++ b/python/paddle/fluid/tests/unittests/ir/test_fuse_resnet_unit.py @@ -22,13 +22,14 @@ paddle.enable_static() np.random.seed(0) -@unittest.skipIf(not paddle.is_compiled_with_cuda() - or paddle.get_cudnn_version() < 8000 - or paddle.device.cuda.get_device_capability()[0] < 7, - "only support with cuda and cudnn version is at least 8.0 " - "and device's compute capability is at least 7.0") +@unittest.skipIf( + not paddle.is_compiled_with_cuda() + or paddle.get_cudnn_version() < 8000 + or paddle.device.cuda.get_device_capability()[0] < 7, + "only support with cuda and cudnn version is at least 8.0 " + "and device's compute capability is at least 7.0", +) class TestFuseResNetUnit(unittest.TestCase): - def test_fuse_resenet_unit(self): place = paddle.CUDAPlace(0) program = paddle.static.Program() @@ -36,14 +37,12 @@ class TestFuseResNetUnit(unittest.TestCase): with paddle.static.amp.fp16_guard(): with paddle.static.program_guard(program, startup_program): x = paddle.static.data("x", [1, 64, 64, 8]) - conv2d = paddle.nn.Conv2D(8, - 32, - 1, - bias_attr=False, - data_format='NHWC') - batch_norm = paddle.nn.BatchNorm(32, - act='relu', - data_layout='NHWC') + conv2d = paddle.nn.Conv2D( + 8, 32, 1, bias_attr=False, data_format='NHWC' + ) + batch_norm = paddle.nn.BatchNorm( + 32, act='relu', data_layout='NHWC' + ) out = batch_norm(conv2d(x)) graph = core.Graph(program.desc) core.get_pass("fuse_resnet_unit").apply(graph) @@ -52,15 +51,15 @@ class TestFuseResNetUnit(unittest.TestCase): after_params = paddle.static.amp.cast_model_to_fp16(after_program) exe = paddle.static.Executor(place) exe.run(startup_program) - paddle.static.amp.cast_parameters_to_fp16(place, - program, - to_fp16_var_names=params) paddle.static.amp.cast_parameters_to_fp16( - place, after_program, to_fp16_var_names=after_params) + place, program, to_fp16_var_names=params + ) + paddle.static.amp.cast_parameters_to_fp16( + place, after_program, to_fp16_var_names=after_params + ) feed = {"x": np.random.randn(1, 64, 64, 8).astype("float16")} before_out = exe.run(program, feed=feed, fetch_list=[out.name]) after_out = exe.run(after_program, feed=feed, fetch_list=[out.name]) - np.testing.assert_allclose(before_out[0], - after_out[0], - rtol=1e-05, - atol=0.005) + np.testing.assert_allclose( + before_out[0], after_out[0], rtol=1e-05, atol=0.005 + ) diff --git a/python/paddle/fluid/tests/unittests/ir/test_ir_embedding_eltwise_layernorm_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/test_ir_embedding_eltwise_layernorm_fuse_pass.py index 6a573e7beacce3c972250b4ae2b449fb1dc8f98a..6b0a69568a38578efacc4e8593e77582fb486bdf 100644 --- a/python/paddle/fluid/tests/unittests/ir/test_ir_embedding_eltwise_layernorm_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/test_ir_embedding_eltwise_layernorm_fuse_pass.py @@ -21,89 +21,102 @@ import paddle.fluid.core as core class EmbEltwiseLayerNormFusePassTest(PassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - word_id = fluid.layers.data(name="word_id", - shape=[1, 128, 1], - dtype="int64", - append_batch_size=False) - pos_id = fluid.layers.data(name="pos_id", - shape=[1, 128, 1], - dtype="int64", - append_batch_size=False) - sent_id = fluid.layers.data(name="sent_id", - shape=[1, 128, 1], - dtype="int64", - append_batch_size=False) - word_emb = fluid.layers.embedding(input=word_id, - size=(128, 768), - dtype='float32') - pos_emb = fluid.layers.embedding(input=pos_id, - size=(128, 768), - dtype='float32') - sent_emb = fluid.layers.embedding(input=sent_id, - size=(128, 768), - dtype='float32') + word_id = fluid.layers.data( + name="word_id", + shape=[1, 128, 1], + dtype="int64", + append_batch_size=False, + ) + pos_id = fluid.layers.data( + name="pos_id", + shape=[1, 128, 1], + dtype="int64", + append_batch_size=False, + ) + sent_id = fluid.layers.data( + name="sent_id", + shape=[1, 128, 1], + dtype="int64", + append_batch_size=False, + ) + word_emb = fluid.layers.embedding( + input=word_id, size=(128, 768), dtype='float32' + ) + pos_emb = fluid.layers.embedding( + input=pos_id, size=(128, 768), dtype='float32' + ) + sent_emb = fluid.layers.embedding( + input=sent_id, size=(128, 768), dtype='float32' + ) add1 = fluid.layers.elementwise_add(word_emb, pos_emb) add2 = fluid.layers.elementwise_add(add1, sent_emb) hidden1 = fluid.layers.layer_norm(input=add2, begin_norm_axis=2) - id1 = fluid.layers.data(name="id1", - shape=[1, 128, 1], - dtype="int64", - append_batch_size=False) - id2 = fluid.layers.data(name="id2", - shape=[1, 128, 1], - dtype="int64", - append_batch_size=False) - id3 = fluid.layers.data(name="id3", - shape=[1, 128, 1], - dtype="int64", - append_batch_size=False) - id4 = fluid.layers.data(name="id4", - shape=[1, 128, 1], - dtype="int64", - append_batch_size=False) - emb1 = fluid.layers.embedding(input=id1, - size=(128, 768), - dtype='float32') - emb2 = fluid.layers.embedding(input=id2, - size=(128, 768), - dtype='float32') - emb3 = fluid.layers.embedding(input=id3, - size=(128, 768), - dtype='float32') - emb4 = fluid.layers.embedding(input=id4, - size=(128, 768), - dtype='float32') + id1 = fluid.layers.data( + name="id1", + shape=[1, 128, 1], + dtype="int64", + append_batch_size=False, + ) + id2 = fluid.layers.data( + name="id2", + shape=[1, 128, 1], + dtype="int64", + append_batch_size=False, + ) + id3 = fluid.layers.data( + name="id3", + shape=[1, 128, 1], + dtype="int64", + append_batch_size=False, + ) + id4 = fluid.layers.data( + name="id4", + shape=[1, 128, 1], + dtype="int64", + append_batch_size=False, + ) + emb1 = fluid.layers.embedding( + input=id1, size=(128, 768), dtype='float32' + ) + emb2 = fluid.layers.embedding( + input=id2, size=(128, 768), dtype='float32' + ) + emb3 = fluid.layers.embedding( + input=id3, size=(128, 768), dtype='float32' + ) + emb4 = fluid.layers.embedding( + input=id4, size=(128, 768), dtype='float32' + ) add_1 = fluid.layers.elementwise_add(emb1, emb2) add_2 = fluid.layers.elementwise_add(add_1, emb3) add_3 = fluid.layers.elementwise_add(add_2, emb4) hidden_1 = fluid.layers.layer_norm(input=add_3, begin_norm_axis=2) self.feeds = { - "word_id": - np.random.randint(low=0, high=128, - size=(1, 128, 1)).astype("int64"), - "pos_id": - np.random.randint(low=0, high=128, - size=(1, 128, 1)).astype("int64"), - "sent_id": - np.random.randint(low=0, high=128, - size=(1, 128, 1)).astype("int64"), - "id1": - np.random.randint(low=0, high=128, - size=(1, 128, 1)).astype("int64"), - "id2": - np.random.randint(low=0, high=128, - size=(1, 128, 1)).astype("int64"), - "id3": - np.random.randint(low=0, high=128, - size=(1, 128, 1)).astype("int64"), - "id4": - np.random.randint(low=0, high=128, - size=(1, 128, 1)).astype("int64"), + "word_id": np.random.randint( + low=0, high=128, size=(1, 128, 1) + ).astype("int64"), + "pos_id": np.random.randint( + low=0, high=128, size=(1, 128, 1) + ).astype("int64"), + "sent_id": np.random.randint( + low=0, high=128, size=(1, 128, 1) + ).astype("int64"), + "id1": np.random.randint(low=0, high=128, size=(1, 128, 1)).astype( + "int64" + ), + "id2": np.random.randint(low=0, high=128, size=(1, 128, 1)).astype( + "int64" + ), + "id3": np.random.randint(low=0, high=128, size=(1, 128, 1)).astype( + "int64" + ), + "id4": np.random.randint(low=0, high=128, size=(1, 128, 1)).astype( + "int64" + ), } self.fetch_list = [hidden1, hidden_1] self.pass_names = "embedding_eltwise_layernorm_fuse_pass" @@ -115,9 +128,7 @@ class EmbEltwiseLayerNormFusePassTest(PassTest): if not core.is_compiled_with_cuda(): return self.pass_attrs = { - "embedding_eltwise_layernorm_fuse_pass": { - "use_gpu": True - } + "embedding_eltwise_layernorm_fuse_pass": {"use_gpu": True} } place = fluid.CUDAPlace(0) self.check_output_with_place(place, startup_on_cpu=True) diff --git a/python/paddle/fluid/tests/unittests/ir/test_ir_fc_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/test_ir_fc_fuse_pass.py index 060d63cc332a5337b016f104b0c1268f07325e66..6022f98fa399c4772e34f7cf45e61a46e871d727 100644 --- a/python/paddle/fluid/tests/unittests/ir/test_ir_fc_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/test_ir_fc_fuse_pass.py @@ -21,17 +21,14 @@ import paddle.fluid.core as core class FCFusePassTest(PassTest): - def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", - shape=[32, 128], - dtype="float32", - lod_level=0) - tmp_0 = fluid.layers.fc(input=data, - size=128, - num_flatten_dims=1, - act="relu") + data = fluid.data( + name="data", shape=[32, 128], dtype="float32", lod_level=0 + ) + tmp_0 = fluid.layers.fc( + input=data, size=128, num_flatten_dims=1, act="relu" + ) tmp_1 = fluid.layers.fc(input=tmp_0, size=32, num_flatten_dims=1) tmp_2 = fluid.layers.softmax(input=tmp_1) diff --git a/python/paddle/fluid/tests/unittests/ir/test_ir_fusion_group_pass.py b/python/paddle/fluid/tests/unittests/ir/test_ir_fusion_group_pass.py index 10b861fad54c47ac236c1ba166b33a2e27287952..3775a4d08d1aa65024ed54ef0414bc2f06a7485b 100644 --- a/python/paddle/fluid/tests/unittests/ir/test_ir_fusion_group_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/test_ir_fusion_group_pass.py @@ -22,12 +22,12 @@ import paddle.fluid.core as core class FusionGroupPassTest(PassTest): - def build_program(self, dtype): with fluid.program_guard(self.main_program, self.startup_program): self.feed_vars = self._prepare_feed_vars([32, 128], dtype, 2) self.feed_vars.append( - fluid.data(name="data2", shape=[128, 128], dtype=dtype)) + fluid.data(name="data2", shape=[128, 128], dtype=dtype) + ) # subgraph with only 1 op node tmp_0 = self.feed_vars[0] * self.feed_vars[1] @@ -78,7 +78,6 @@ class FusionGroupPassTest(PassTest): class FusionGroupPassComplicatedTest(FusionGroupPassTest): - def build_program(self, dtype): with fluid.program_guard(self.main_program, self.startup_program): self.feed_vars = self._prepare_feed_vars([32, 64], dtype, 5) @@ -87,7 +86,8 @@ class FusionGroupPassComplicatedTest(FusionGroupPassTest): tmp_0 = one * self.feed_vars[0] # subgraph with 9 op nodes tmp_1 = tmp_0 * layers.sigmoid(self.feed_vars[1]) + layers.sigmoid( - self.feed_vars[2]) * layers.tanh(self.feed_vars[3]) + self.feed_vars[2] + ) * layers.tanh(self.feed_vars[3]) tmp_2 = layers.tanh(tmp_1) + layers.sigmoid(self.feed_vars[4]) self.append_gradients(tmp_2) @@ -97,12 +97,12 @@ class FusionGroupPassComplicatedTest(FusionGroupPassTest): class FusionGroupPassInplaceTest(FusionGroupPassTest): - def build_program(self, dtype): with fluid.program_guard(self.main_program, self.startup_program): self.feed_vars = self._prepare_feed_vars([32, 128], dtype, 3) self.feed_vars.append( - fluid.data(name="data3", shape=[128, 32], dtype=dtype)) + fluid.data(name="data3", shape=[128, 32], dtype=dtype) + ) # subgraph with 3 op node tmp_0 = self.feed_vars[0] - self.feed_vars[1] @@ -115,7 +115,6 @@ class FusionGroupPassInplaceTest(FusionGroupPassTest): class FusionGroupPassTestFP64(FusionGroupPassTest): - def setUp(self): self.build_program("float64") self.feeds = self._feed_random_data(self.feed_vars) @@ -124,12 +123,12 @@ class FusionGroupPassTestFP64(FusionGroupPassTest): class FusionGroupPassTestCastAndFP16(FusionGroupPassTest): - def build_program(self, dtype): with fluid.program_guard(self.main_program, self.startup_program): self.feed_vars = self._prepare_feed_vars([32, 128], dtype, 2) self.feed_vars.append( - fluid.data(name="data2", shape=[128, 128], dtype=dtype)) + fluid.data(name="data2", shape=[128, 128], dtype=dtype) + ) # subgraph with 2 op nodes tmp_0 = self.feed_vars[0] * self.feed_vars[1] @@ -152,16 +151,17 @@ class FusionGroupPassTestCastAndFP16(FusionGroupPassTest): class FusionGroupPassSumTest(FusionGroupPassTest): - def build_program(self, dtype): with fluid.program_guard(self.main_program, self.startup_program): self.feed_vars = self._prepare_feed_vars([32, 128], dtype, 3) self.feed_vars.append( - fluid.data(name="data3", shape=[128, 128], dtype=dtype)) + fluid.data(name="data3", shape=[128, 128], dtype=dtype) + ) # subgraph with 2 op nodes tmp_0 = layers.sum( - [self.feed_vars[0], self.feed_vars[1], self.feed_vars[2]]) + [self.feed_vars[0], self.feed_vars[1], self.feed_vars[2]] + ) tmp_1 = layers.sqrt(tmp_0) tmp_2 = layers.mul(tmp_0, self.feed_vars[3]) # subgraph with 2 op nodes @@ -174,7 +174,6 @@ class FusionGroupPassSumTest(FusionGroupPassTest): class FusionGroupPassCastTest(FusionGroupPassTest): - def build_program(self, dtype): with fluid.program_guard(self.main_program, self.startup_program): self.feed_vars = self._prepare_feed_vars([2, 2], dtype, 2) @@ -196,17 +195,15 @@ class FusionGroupPassCastTest(FusionGroupPassTest): class FusionGroupPassFillConstantTest(FusionGroupPassTest): - def build_program(self, dtype): with fluid.program_guard(self.main_program, self.startup_program): self.feed_vars = self._prepare_feed_vars([2, 2], dtype, 2) tmp_0 = layers.elementwise_add(self.feed_vars[0], self.feed_vars[1]) tmp_1 = layers.fill_constant(shape=[2, 2], dtype=dtype, value=2.0) - tmp_2 = layers.scale(tmp_1, - scale=3.0, - bias=1.0, - bias_after_scale=True) + tmp_2 = layers.scale( + tmp_1, scale=3.0, bias=1.0, bias_after_scale=True + ) tmp_3 = layers.elementwise_mul(tmp_2, tmp_0) self.append_gradients(tmp_3) diff --git a/python/paddle/fluid/tests/unittests/ir/test_ir_generate_pass.py b/python/paddle/fluid/tests/unittests/ir/test_ir_generate_pass.py index e4d8f1b32a7fcde0c80abd789ffd7d088fbebef0..76b58d3ce90009a641740a843c3c98abf93eb39b 100644 --- a/python/paddle/fluid/tests/unittests/ir/test_ir_generate_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/test_ir_generate_pass.py @@ -23,9 +23,7 @@ import numpy as np # 1: relu(X=ewadd(X=mul(X=x, Y=w), Y=b)) => fc(Input=x, W=w, Bias=b) @ir.RegisterPass def generate_fc_fuse(): - def create_pass_pair(with_relu): - def pattern(x, w, b): mul = ir.PassDesc.OP.mul(X=x, Y=w) ewadd = ir.PassDesc.OP.elementwise_add(X=mul, Y=b) @@ -36,8 +34,9 @@ def generate_fc_fuse(): def replace(x, w, b): fc = ir.PassDesc.OP.fc(Input=x, W=w, Bias=b) - fc.Attr("in_num_col_dims").MappedPattern(op="mul", - name="x_num_col_dims") + fc.Attr("in_num_col_dims").MappedPattern( + op="mul", name="x_num_col_dims" + ) if with_relu: fc.SetAttr("activation_type", "relu") return fc @@ -57,7 +56,6 @@ def multi_add_to_sum_v1(): @ir.RegisterPass def multi_add_to_sum_v2(): - def pattern(x, y, z): ewadd1 = ir.PassDesc.OP.elementwise_add(X=x, Y=y) ewadd2 = ir.PassDesc.OP.elementwise_add(X=ewadd1, Y=z) @@ -75,13 +73,14 @@ def multi_add_to_sum_v3(): # mul(x, y1), mul(x, y2) => slice(mul(x, concat(y1, y2))) -@ir.RegisterPass(input_specs={ - 'x': InputSpec([16, 32]), - 'y1': InputSpec([32, 12]), - 'y2': InputSpec([32, 48]) -}) +@ir.RegisterPass( + input_specs={ + 'x': InputSpec([16, 32]), + 'y1': InputSpec([32, 12]), + 'y2': InputSpec([32, 48]), + } +) def generate_combine_mul_v1(): - def pattern(x, y1, y2): mul1 = paddle.matmul(x, y1) mul2 = paddle.matmul(x, y2) @@ -99,7 +98,6 @@ def generate_combine_mul_v1(): @ir.RegisterPass def generate_combine_mul_v2(): - def pattern(x, y1, y2): mul1 = ir.PassDesc.OP.matmul_v2(X=x, Y=y1) mul2 = ir.PassDesc.OP.matmul_v2(X=x, Y=y2) @@ -118,7 +116,6 @@ def generate_combine_mul_v2(): # reshape(reshape(x)) => x @ir.RegisterPass(input_specs={'x': InputSpec([10, 16, 16])}) def generate_simplify_inference_v1(): - def pattern(x): transpose = paddle.transpose(x, [0, 2, 1]) return paddle.transpose(transpose, [0, 2, 1]) @@ -128,7 +125,6 @@ def generate_simplify_inference_v1(): @ir.RegisterPass def generate_simplify_inference_v2(): - def pattern(x): op1 = ir.PassDesc.OP.transpose2 op2 = ir.PassDesc.OP.transpose2 @@ -140,7 +136,6 @@ def generate_simplify_inference_v2(): @ir.RegisterPass def generate_layer_norm_fuse_pass(): - def pattern(x, gamma, beta): gamma.Attr("shape").Size().EQ(1) gamma.Attr("shape")[0].EQ(x.Attr("shape")[-1]) @@ -175,7 +170,6 @@ def generate_layer_norm_fuse_pass(): @ir.RegisterPass def unimplemented_operand_exception(): - def pattern(x, y): return ir.PassDesc.OP.elementwise_add(X=x, Y=y) @@ -189,7 +183,6 @@ def unimplemented_operand_exception(): @ir.RegisterPass def unimplemented_operation_exception(): - def pattern(x, y): return ir.PassDesc.OP.elementwise_add(X=x, Y=y) @@ -208,7 +201,6 @@ def get_multi_pass_desc_from_str(s): class TestGeneratePass(unittest.TestCase): - def convert_ops_to_op_dicts(self, ops): op_dicts = dict() for op in ops: @@ -237,13 +229,13 @@ class TestGeneratePass(unittest.TestCase): core.get_pass("unimplemented_operation_exception").apply(graph) def test_generate_fc_fuse(self): - def _check_fc_fuse_pass(pass_desc, with_relu): pattern_op_dicts = self.convert_ops_to_op_dicts(pass_desc.pattern) replace_op_dicts = self.convert_ops_to_op_dicts(pass_desc.replace) self.assertEqual(len(pattern_op_dicts.get("mul", [])), 1) - self.assertEqual(len(pattern_op_dicts.get("elementwise_add", [])), - 1) + self.assertEqual( + len(pattern_op_dicts.get("elementwise_add", [])), 1 + ) if with_relu: self.assertEqual(len(pattern_op_dicts.get("relu", [])), 1) pattern_op_num = 3 # relu, ewadd, mul @@ -285,12 +277,12 @@ class TestGeneratePass(unittest.TestCase): feed = { "x": np.random.random([10, 10, 10]).astype("float32"), "y": np.random.random([10, 10, 10]).astype("float32"), - "z": np.random.random([10, 10, 10]).astype("float32") + "z": np.random.random([10, 10, 10]).astype("float32"), } before_out = executor.run(program, feed=feed, fetch_list=[out.name]) - after_out = executor.run(after_program, - feed=feed, - fetch_list=[out.name]) + after_out = executor.run( + after_program, feed=feed, fetch_list=[out.name] + ) np.testing.assert_allclose(before_out, after_out, rtol=1e-05) def test_multi_add_to_sum(self): @@ -320,13 +312,14 @@ class TestGeneratePass(unittest.TestCase): feed = { "x": np.random.random([16, 32]).astype("float32"), "y": np.random.random([32, 12]).astype("float32"), - "z": np.random.random([32, 48]).astype("float32") + "z": np.random.random([32, 48]).astype("float32"), } before_out1, before_out2 = executor.run( - program, feed=feed, fetch_list=[out1.name, out2.name]) - after_out1, after_out2 = executor.run(after_program, - feed=feed, - fetch_list=[out1.name, out2.name]) + program, feed=feed, fetch_list=[out1.name, out2.name] + ) + after_out1, after_out2 = executor.run( + after_program, feed=feed, fetch_list=[out1.name, out2.name] + ) np.testing.assert_allclose(before_out1, after_out1, rtol=1e-05) np.testing.assert_allclose(before_out2, after_out2, rtol=1e-05) @@ -366,9 +359,9 @@ class TestGeneratePass(unittest.TestCase): executor.run(startup_program) feed = {"x": np.random.random([10, 16, 16]).astype("float32")} before_out = executor.run(program, feed=feed, fetch_list=[out.name]) - after_out = executor.run(after_program, - feed=feed, - fetch_list=[out.name]) + after_out = executor.run( + after_program, feed=feed, fetch_list=[out.name] + ) np.testing.assert_allclose(before_out, after_out, rtol=1e-05) def test_generate_simplify_inference(self): @@ -381,12 +374,12 @@ class TestGeneratePass(unittest.TestCase): startup_program = paddle.static.Program() with paddle.static.program_guard(program, startup_program): x = paddle.static.data("x", [3, 64, 120], "float32") - gamma = paddle.static.create_parameter(shape=[120], - dtype="float32", - is_bias=True) - beta = paddle.static.create_parameter(shape=[120], - dtype="float32", - is_bias=True) + gamma = paddle.static.create_parameter( + shape=[120], dtype="float32", is_bias=True + ) + beta = paddle.static.create_parameter( + shape=[120], dtype="float32", is_bias=True + ) x_sub_mean = x - paddle.mean(x, axis=-1, keepdim=True) std_dev = paddle.mean(x_sub_mean.pow(2), axis=-1, keepdim=True) @@ -402,7 +395,7 @@ class TestGeneratePass(unittest.TestCase): executor.run(startup_program) feed = {"x": np.random.random([3, 64, 120]).astype("float32")} before_out = executor.run(program, feed=feed, fetch_list=[out.name]) - after_out = executor.run(after_program, - feed=feed, - fetch_list=[out.name]) + after_out = executor.run( + after_program, feed=feed, fetch_list=[out.name] + ) np.testing.assert_allclose(before_out, after_out, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/ir/test_ir_graph_to_program_pass.py b/python/paddle/fluid/tests/unittests/ir/test_ir_graph_to_program_pass.py index c4f9147e33a42017b2c4e8f75d71a4f84ca34a43..1b68c64805cb784267fd5bc2ef2c88a45afc9f85 100644 --- a/python/paddle/fluid/tests/unittests/ir/test_ir_graph_to_program_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/test_ir_graph_to_program_pass.py @@ -31,7 +31,6 @@ def IRGraph_to_program(ir_graph): class GraphToProgramPassTest(unittest.TestCase): - def check_vars_equal(self, o_block, c_block): o_params = sorted(o_block.all_parameters(), key=lambda p: p.name) c_params = sorted(c_block.all_parameters(), key=lambda p: p.name) @@ -69,12 +68,12 @@ class GraphToProgramPassTest(unittest.TestCase): o_attr = o_attrs[attr_idx] c_attr = c_attrs[attr_idx] self.assertEqual(o_attr, c_attr) - self.assertEqual(o_op.desc.attr_type(o_attr), - c_op.desc.attr_type(c_attr)) + self.assertEqual( + o_op.desc.attr_type(o_attr), c_op.desc.attr_type(c_attr) + ) class SingleGraphToProgramPass(GraphToProgramPassTest): - def setUp(self): self.origin_program = self.build_program() ir_graph = program_to_IRGraph(self.origin_program) @@ -91,10 +90,12 @@ class SingleGraphToProgramPass(GraphToProgramPassTest): return program def test_check_parameter(self): - origin_parameter = sorted(self.origin_program.all_parameters(), - key=lambda p: p.name) - converted_parameter = sorted(self.converted_program.all_parameters(), - key=lambda p: p.name) + origin_parameter = sorted( + self.origin_program.all_parameters(), key=lambda p: p.name + ) + converted_parameter = sorted( + self.converted_program.all_parameters(), key=lambda p: p.name + ) self.assertEqual(len(origin_parameter), len(converted_parameter)) diff --git a/python/paddle/fluid/tests/unittests/ir/test_ir_preln_residual_bias_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/test_ir_preln_residual_bias_fuse_pass.py index a8d2cdc01c493f04023f6bb03ca4d1fb31c73629..2f367b83ae640ef974ec4481a7cc618bbdf308c4 100644 --- a/python/paddle/fluid/tests/unittests/ir/test_ir_preln_residual_bias_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/test_ir_preln_residual_bias_fuse_pass.py @@ -19,20 +19,18 @@ import paddle class PrelnResidualBiasFusePassTest(PassTest): - def setUp(self): paddle.enable_static() - with paddle.static.program_guard(self.main_program, - self.startup_program): - x = paddle.static.data(name="x", - shape=[128, 768], - dtype="float32", - lod_level=0) + with paddle.static.program_guard( + self.main_program, self.startup_program + ): + x = paddle.static.data( + name="x", shape=[128, 768], dtype="float32", lod_level=0 + ) bias = paddle.static.create_parameter(shape=[768], dtype='float32') - y = paddle.static.data(name="y", - shape=[128, 768], - dtype="float32", - lod_level=0) + y = paddle.static.data( + name="y", shape=[128, 768], dtype="float32", lod_level=0 + ) x = x + bias elementwise_out = x + y out = paddle.static.nn.layer_norm(input=elementwise_out) @@ -57,19 +55,17 @@ class PrelnResidualBiasFusePassTest(PassTest): class PrelnResidualBiasFusePassNoBiasTest(PassTest): - def setUp(self): paddle.enable_static() - with paddle.static.program_guard(self.main_program, - self.startup_program): - x = paddle.static.data(name="x", - shape=[128, 768], - dtype="float32", - lod_level=0) - y = paddle.static.data(name="y", - shape=[128, 768], - dtype="float32", - lod_level=0) + with paddle.static.program_guard( + self.main_program, self.startup_program + ): + x = paddle.static.data( + name="x", shape=[128, 768], dtype="float32", lod_level=0 + ) + y = paddle.static.data( + name="y", shape=[128, 768], dtype="float32", lod_level=0 + ) elementwise_out = x + y out = paddle.static.nn.layer_norm(input=elementwise_out) diff --git a/python/paddle/fluid/tests/unittests/ir/test_ir_skip_layernorm_pass.py b/python/paddle/fluid/tests/unittests/ir/test_ir_skip_layernorm_pass.py index d53e7109d6a8b13d302841ba7447ce2a63fe43c8..82f53f95d9eaddc42e6e46f8566d7f3ea9250493 100644 --- a/python/paddle/fluid/tests/unittests/ir/test_ir_skip_layernorm_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/test_ir_skip_layernorm_pass.py @@ -21,18 +21,15 @@ import paddle.fluid.core as core class SkipLayerNormFusePassTest(PassTest): - def setUp(self): paddle.enable_static() with fluid.program_guard(self.main_program, self.startup_program): - x = fluid.data(name="x", - shape=[128, 768], - dtype="float32", - lod_level=0) - y = fluid.data(name="y", - shape=[128, 768], - dtype="float32", - lod_level=0) + x = fluid.data( + name="x", shape=[128, 768], dtype="float32", lod_level=0 + ) + y = fluid.data( + name="y", shape=[128, 768], dtype="float32", lod_level=0 + ) elementwise_out = fluid.layers.elementwise_add(x=x, y=y) out = fluid.layers.layer_norm(input=elementwise_out) @@ -42,7 +39,7 @@ class SkipLayerNormFusePassTest(PassTest): self.num_fused_ops = 1 self.graph_attrs = { "embedding_eltwise_layernorm_fuse_pass_flag": True, - "multihead_matmul_fuse_pass_flag": True + "multihead_matmul_fuse_pass_flag": True, } def test_check_program(self): diff --git a/python/paddle/fluid/tests/unittests/ir/test_ir_subgraph_python_interface.py b/python/paddle/fluid/tests/unittests/ir/test_ir_subgraph_python_interface.py index 56f2e5e0a2450d545eb42ba00bfbee444fca9c1c..bc9e63826cdffe1f886903485532a5a54fdb1fae 100644 --- a/python/paddle/fluid/tests/unittests/ir/test_ir_subgraph_python_interface.py +++ b/python/paddle/fluid/tests/unittests/ir/test_ir_subgraph_python_interface.py @@ -27,13 +27,11 @@ paddle.enable_static() class TestQuantizationSubGraph(unittest.TestCase): - def build_graph_with_sub_graph(self): - def linear_fc(num): - data = fluid.layers.data(name='image', - shape=[1, 32, 32], - dtype='float32') + data = fluid.layers.data( + name='image', shape=[1, 32, 32], dtype='float32' + ) label = fluid.layers.data(name='label', shape=[1], dtype='int64') hidden = data for _ in range(num): @@ -63,7 +61,8 @@ class TestQuantizationSubGraph(unittest.TestCase): graph = IrGraph(core_graph, for_test=True) sub_graph = graph.get_sub_graph(0) all_sub_graphs = graph.all_sub_graphs( - for_test=True) # same reason for subgraph + for_test=True + ) # same reason for subgraph # Should return graph and sub_graphs at the same time. If only return sub_graph, the graph will # be destructed and the sub_graphs will be empty. return graph, all_sub_graphs @@ -75,7 +74,8 @@ class TestQuantizationSubGraph(unittest.TestCase): scope=fluid.global_scope(), place=place, activation_quantize_type='abs_max', - weight_quantize_type='range_abs_max') + weight_quantize_type='range_abs_max', + ) Find_inserted_quant_op = False for sub_graph in sub_graphs: transform_pass.apply(sub_graph) @@ -87,8 +87,9 @@ class TestQuantizationSubGraph(unittest.TestCase): def test_quant_sub_graphs_cpu(self): self.test_quant_sub_graphs(use_cuda=False) - @OpTestTool.skip_if(not paddle.is_compiled_with_cuda(), - "Not GPU version paddle") + @OpTestTool.skip_if( + not paddle.is_compiled_with_cuda(), "Not GPU version paddle" + ) def test_quant_sub_graphs_gpu(self): self.test_quant_sub_graphs(use_cuda=True) diff --git a/python/paddle/fluid/tests/unittests/ir/test_ir_yolo_box_pass.py b/python/paddle/fluid/tests/unittests/ir/test_ir_yolo_box_pass.py index e86327f62f8cf7fc1a0fa33042b8c4d2ed4fd79e..58bda7ab8384a27703ad11ed58dd91e1f2781146 100644 --- a/python/paddle/fluid/tests/unittests/ir/test_ir_yolo_box_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/test_ir_yolo_box_pass.py @@ -20,15 +20,17 @@ from paddle.fluid.layer_helper import LayerHelper paddle.enable_static() -def multiclass_nms(bboxes, - scores, - score_threshold, - nms_top_k, - keep_top_k, - nms_threshold=0.3, - normalized=True, - nms_eta=1., - background_label=-1): +def multiclass_nms( + bboxes, + scores, + score_threshold, + nms_top_k, + keep_top_k, + nms_threshold=0.3, + normalized=True, + nms_eta=1.0, + background_label=-1, +): helper = LayerHelper('multiclass_nms3', **locals()) output = helper.create_variable_for_type_inference(dtype=bboxes.dtype) index = helper.create_variable_for_type_inference(dtype='int32') @@ -36,18 +38,20 @@ def multiclass_nms(bboxes, inputs = {'BBoxes': bboxes, 'Scores': scores} outputs = {'Out': output, 'Index': index, 'NmsRoisNum': nms_rois_num} - helper.append_op(type="multiclass_nms3", - inputs=inputs, - attrs={ - 'background_label': background_label, - 'score_threshold': score_threshold, - 'nms_top_k': nms_top_k, - 'nms_threshold': nms_threshold, - 'keep_top_k': keep_top_k, - 'nms_eta': nms_eta, - 'normalized': normalized - }, - outputs=outputs) + helper.append_op( + type="multiclass_nms3", + inputs=inputs, + attrs={ + 'background_label': background_label, + 'score_threshold': score_threshold, + 'nms_top_k': nms_top_k, + 'nms_threshold': nms_threshold, + 'keep_top_k': keep_top_k, + 'nms_eta': nms_eta, + 'normalized': normalized, + }, + outputs=outputs, + ) output.stop_gradient = True index.stop_gradient = True @@ -55,7 +59,6 @@ def multiclass_nms(bboxes, class TestYoloBoxPass(unittest.TestCase): - def test_yolo_box_pass(self): program = paddle.static.Program() with paddle.static.program_guard(program): @@ -67,18 +70,22 @@ class TestYoloBoxPass(unittest.TestCase): div = paddle.divide(im_shape, im_scale) cast = paddle.cast(div, "int32") boxes0, scores0 = paddle.vision.ops.yolo_box( - yolo_box0_x, cast, [116, 90, 156, 198, 373, 326], 80, 0.005, 32) + yolo_box0_x, cast, [116, 90, 156, 198, 373, 326], 80, 0.005, 32 + ) boxes1, scores1 = paddle.vision.ops.yolo_box( - yolo_box1_x, cast, [30, 61, 62, 45, 59, 119], 80, 0.005, 16) + yolo_box1_x, cast, [30, 61, 62, 45, 59, 119], 80, 0.005, 16 + ) boxes2, scores2 = paddle.vision.ops.yolo_box( - yolo_box2_x, cast, [10, 13, 16, 30, 33, 23], 80, 0.005, 8) + yolo_box2_x, cast, [10, 13, 16, 30, 33, 23], 80, 0.005, 8 + ) transpose0 = paddle.transpose(scores0, [0, 2, 1]) transpose1 = paddle.transpose(scores1, [0, 2, 1]) transpose2 = paddle.transpose(scores2, [0, 2, 1]) concat0 = paddle.concat([boxes0, boxes1, boxes2], 1) concat1 = paddle.concat([transpose0, transpose1, transpose2], 2) - out0, out1, out2 = multiclass_nms(concat0, concat1, 0.01, 1000, 100, - 0.45, True, 1., 80) + out0, out1, out2 = multiclass_nms( + concat0, concat1, 0.01, 1000, 100, 0.45, True, 1.0, 80 + ) graph = core.Graph(program.desc) core.get_pass("yolo_box_fuse_pass").apply(graph) graph = paddle.fluid.framework.IrGraph(graph) diff --git a/python/paddle/fluid/tests/unittests/ir_memory_optimize_net_base.py b/python/paddle/fluid/tests/unittests/ir_memory_optimize_net_base.py index ca518747821b03116669c2c0c8b87c78669d7829..88c2024cff1518d6c2fbc3d92fc05c0f0efd177f 100644 --- a/python/paddle/fluid/tests/unittests/ir_memory_optimize_net_base.py +++ b/python/paddle/fluid/tests/unittests/ir_memory_optimize_net_base.py @@ -31,20 +31,22 @@ os.environ['CPU_NUM'] = '2' class BuildIrMemOptBase(unittest.TestCase): - def setup_reader(self): self.batch_size = 32 self.word_dict = paddle.dataset.imdb.word_dict() - self.train_reader = paddle.batch(paddle.dataset.imdb.train( - self.word_dict), - batch_size=self.batch_size) - - def check_network_convergence(self, - network, - use_cuda=True, - use_ir_memory_optimize=True, - enable_inplace=True, - iter=5): + self.train_reader = paddle.batch( + paddle.dataset.imdb.train(self.word_dict), + batch_size=self.batch_size, + ) + + def check_network_convergence( + self, + network, + use_cuda=True, + use_ir_memory_optimize=True, + enable_inplace=True, + iter=5, + ): if use_cuda and not core.is_compiled_with_cuda(): print('Skip use_cuda=True because Paddle is not compiled with cuda') return @@ -57,10 +59,9 @@ class BuildIrMemOptBase(unittest.TestCase): fluid.default_startup_program().random_seed = 100 fluid.default_main_program().random_seed = 100 - data = fluid.layers.data(name="words", - shape=[1], - dtype="int64", - lod_level=1) + data = fluid.layers.data( + name="words", shape=[1], dtype="int64", lod_level=1 + ) label = fluid.layers.data(name="label", shape=[1], dtype="int64") @@ -79,8 +80,9 @@ class BuildIrMemOptBase(unittest.TestCase): exe.run(fluid.default_startup_program()) train_cp = compiler.CompiledProgram(fluid.default_main_program()) - train_cp = train_cp.with_data_parallel(loss_name=cost.name, - build_strategy=build_strategy) + train_cp = train_cp.with_data_parallel( + loss_name=cost.name, build_strategy=build_strategy + ) fetch_list = [cost.name] begin = time.time() @@ -100,21 +102,23 @@ class BuildIrMemOptBase(unittest.TestCase): break end = time.time() - print("%.4f Instance per second" % ((self.batch_size * iter) / - (end - begin))) + print( + "%.4f Instance per second" + % ((self.batch_size * iter) / (end - begin)) + ) print(first_loss, last_loss) avg_last_loss_val = np.array(last_loss).mean() avg_first_loss_val = np.array(first_loss).mean() if math.isnan(float(avg_last_loss_val)) or math.isnan( - float(avg_first_loss_val)): + float(avg_first_loss_val) + ): sys.exit("got NaN loss, training failed.") return first_loss, last_loss class TestIrMemOptBase(BuildIrMemOptBase): - def setUp(self): self.network = None @@ -126,15 +130,22 @@ class TestIrMemOptBase(BuildIrMemOptBase): with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.scope_guard(core.Scope()): - baseline_first_loss, baseline_last_loss = self.check_network_convergence( - self.network) + ( + baseline_first_loss, + baseline_last_loss, + ) = self.check_network_convergence(self.network) cur_first_loss, cur_last_loss = self.check_network_convergence( - self.network) - - self.assertAlmostEquals(np.mean(baseline_last_loss), - np.mean(cur_last_loss), - delta=1e-6) - self.assertAlmostEquals(np.mean(baseline_first_loss), - np.mean(cur_first_loss), - delta=1e-6) + self.network + ) + + self.assertAlmostEquals( + np.mean(baseline_last_loss), + np.mean(cur_last_loss), + delta=1e-6, + ) + self.assertAlmostEquals( + np.mean(baseline_first_loss), + np.mean(cur_first_loss), + delta=1e-6, + ) diff --git a/python/paddle/fluid/tests/unittests/jit_load_rename_var.py b/python/paddle/fluid/tests/unittests/jit_load_rename_var.py index caaca0716cdefa4397323069eb144f957ee7fc2e..2ba1e5522d1e9624c2bc90d349eeac57c53dfcf5 100644 --- a/python/paddle/fluid/tests/unittests/jit_load_rename_var.py +++ b/python/paddle/fluid/tests/unittests/jit_load_rename_var.py @@ -32,8 +32,9 @@ def rename_var_with_generator(names_old): else: temp_name = "_".join(temp_name) name_new = _generate_unique_var_name_sync_with_main_program( - temp_name) - if name_new not in names_old[:var_idx] + names_old[var_idx + 1:]: + temp_name + ) + if name_new not in names_old[:var_idx] + names_old[var_idx + 1 :]: break dict_rename_var_old_new[name_old] = name_new return dict_rename_var_old_new diff --git a/python/paddle/fluid/tests/unittests/launch_function_helper.py b/python/paddle/fluid/tests/unittests/launch_function_helper.py index 992bc58b5e490a9b547c049b3790c65204254a97..b8bb4cbc35ab11fe3f3912794988900b7f2b629f 100644 --- a/python/paddle/fluid/tests/unittests/launch_function_helper.py +++ b/python/paddle/fluid/tests/unittests/launch_function_helper.py @@ -59,7 +59,6 @@ def wait(procs, timeout=30): def _find_free_port(port_set): - def __free_port(): with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s: s.bind(('', 0)) diff --git a/python/paddle/fluid/tests/unittests/mkldnn/check_flags_mkldnn_ops_on_off.py b/python/paddle/fluid/tests/unittests/mkldnn/check_flags_mkldnn_ops_on_off.py index 4042b4da633388cbeb264495503187f648ed375d..133320c7f375e14b58d98f0219abcac3a516917f 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/check_flags_mkldnn_ops_on_off.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/check_flags_mkldnn_ops_on_off.py @@ -23,15 +23,23 @@ _enable_legacy_dygraph() def check(): - print("check: _global_flags()['FLAGS_use_mkldnn']=", - _global_flags()["FLAGS_use_mkldnn"]) - print("check: fluid.get_flags('FLAGS_use_mkldnn')=", - fluid.get_flags(['FLAGS_use_mkldnn'])) + print( + "check: _global_flags()['FLAGS_use_mkldnn']=", + _global_flags()["FLAGS_use_mkldnn"], + ) + print( + "check: fluid.get_flags('FLAGS_use_mkldnn')=", + fluid.get_flags(['FLAGS_use_mkldnn']), + ) print("check: DNNL_VERBOSE=", os.environ['DNNL_VERBOSE']) - print("check: FLAGS_tracer_mkldnn_ops_on=", - _global_flags()['FLAGS_tracer_mkldnn_ops_on']) - print("check: FLAGS_tracer_mkldnn_ops_off=", - _global_flags()['FLAGS_tracer_mkldnn_ops_off']) + print( + "check: FLAGS_tracer_mkldnn_ops_on=", + _global_flags()['FLAGS_tracer_mkldnn_ops_on'], + ) + print( + "check: FLAGS_tracer_mkldnn_ops_off=", + _global_flags()['FLAGS_tracer_mkldnn_ops_off'], + ) a_np = np.random.uniform(-2, 2, (10, 20, 30)).astype(np.float32) b_np = np.random.uniform(-5, 5, (10, 20, 30)).astype(np.float32) helper = LayerHelper(fluid.unique_name.generate(str("test")), act="relu") diff --git a/python/paddle/fluid/tests/unittests/mkldnn/check_flags_use_mkldnn.py b/python/paddle/fluid/tests/unittests/mkldnn/check_flags_use_mkldnn.py index 03786e0aed7fe88eeeabcbdac96d829ac092ac86..1bdca9244cc2bc3f95bcd73f142b32467788e17b 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/check_flags_use_mkldnn.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/check_flags_use_mkldnn.py @@ -20,10 +20,14 @@ from paddle.fluid.framework import _global_flags def check(): - print("check: _global_flags()['FLAGS_use_mkldnn']=", - _global_flags()["FLAGS_use_mkldnn"]) - print("check: fluid.get_flags('FLAGS_use_mkldnn')=", - fluid.get_flags(['FLAGS_use_mkldnn'])) + print( + "check: _global_flags()['FLAGS_use_mkldnn']=", + _global_flags()["FLAGS_use_mkldnn"], + ) + print( + "check: fluid.get_flags('FLAGS_use_mkldnn')=", + fluid.get_flags(['FLAGS_use_mkldnn']), + ) print("check: DNNL_VERBOSE=", os.environ['DNNL_VERBOSE']) a_np = np.random.uniform(-2, 2, (10, 20, 30)).astype(np.float32) helper = LayerHelper(fluid.unique_name.generate(str("test")), act="relu") @@ -32,7 +36,7 @@ def check(): a = fluid.dygraph.to_variable(a_np) res1 = func(a) res2 = np.maximum(a_np, 0) - assert (np.array_equal(res1.numpy(), res2)) + assert np.array_equal(res1.numpy(), res2) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/mkldnn/mkldnn_op_test.py b/python/paddle/fluid/tests/unittests/mkldnn/mkldnn_op_test.py index 3b6788e6ab1264ec930b004a161d610cb9c78406..9d79ee3bf6933f9f8dd60bd6c1f4ef0bc6e69139 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/mkldnn_op_test.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/mkldnn_op_test.py @@ -18,12 +18,14 @@ import paddle.fluid as fluid def __assert_close(test_case, tensor, np_array, msg, atol=1e-4): - test_case.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), - msg) + test_case.assertTrue( + np.allclose(np.array(tensor), np_array, atol=atol), msg + ) -def check_if_mkldnn_primitives_exist_in_bwd(test_case, op_type, x, out, - out_grad, x_grad): +def check_if_mkldnn_primitives_exist_in_bwd( + test_case, op_type, x, out, out_grad, x_grad +): place = core.CPUPlace() var_dict = {'x': x, 'out': out, 'out@GRAD': out_grad, 'x@GRAD': x_grad} @@ -34,20 +36,23 @@ def check_if_mkldnn_primitives_exist_in_bwd(test_case, op_type, x, out, with fluid.program_guard(program): block = program.global_block() for name in ground_truth: - block.create_var(name=name, - dtype=np.float32, - shape=ground_truth[name].shape) + block.create_var( + name=name, dtype=np.float32, shape=ground_truth[name].shape + ) - op = block.append_op(type=op_type, - inputs={ - 'X': block.var('x'), - }, - outputs={'Out': block.var('out')}, - attrs={'use_mkldnn': True}) + op = block.append_op( + type=op_type, + inputs={ + 'X': block.var('x'), + }, + outputs={'Out': block.var('out')}, + attrs={'use_mkldnn': True}, + ) # Generate backward op_desc grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc( - op.desc, set(), []) + op.desc, set(), [] + ) grad_op_desc = grad_op_desc_list[0] new_op_desc = block.desc.append_op() new_op_desc.copy_from(grad_op_desc) @@ -65,29 +70,35 @@ def check_if_mkldnn_primitives_exist_in_bwd(test_case, op_type, x, out, for i in range(2): out = exe.run( program, - feed={name: var_dict[name] - for name in ['x', 'out@GRAD']}, - fetch_list=['x@GRAD', 'out']) + feed={name: var_dict[name] for name in ['x', 'out@GRAD']}, + fetch_list=['x@GRAD', 'out'], + ) __assert_close(test_case, x_grad, out[0], 'x@GRAD') -def check_if_mkldnn_batchnorm_primitives_exist_in_bwd(test_case, var_dict, - place, shape, - data_layout): +def check_if_mkldnn_batchnorm_primitives_exist_in_bwd( + test_case, var_dict, place, shape, data_layout +): var_names = [ - 'x', 'scale', 'bias', 'mean', 'variance', 'y', 'saved_mean', - 'saved_variance' + 'x', + 'scale', + 'bias', + 'mean', + 'variance', + 'y', + 'saved_mean', + 'saved_variance', ] ground_truth = {name: var_dict[name] for name in var_names} program = fluid.Program() with fluid.program_guard(program): block = program.global_block() for name in ground_truth: - block.create_var(name=name, - dtype='float32', - shape=ground_truth[name].shape) + block.create_var( + name=name, dtype='float32', shape=ground_truth[name].shape + ) bn_op = block.append_op( type="batch_norm", inputs={ @@ -95,14 +106,14 @@ def check_if_mkldnn_batchnorm_primitives_exist_in_bwd(test_case, var_dict, "Scale": block.var('scale'), "Bias": block.var('bias'), "Mean": block.var('mean'), - "Variance": block.var('variance') + "Variance": block.var('variance'), }, outputs={ "Y": block.var('y'), "MeanOut": block.var('mean'), # share memory "VarianceOut": block.var('variance'), # share memory "SavedMean": block.var('saved_mean'), - "SavedVariance": block.var('saved_variance') + "SavedVariance": block.var('saved_variance'), }, attrs={ "momentum": test_case.momentum, @@ -111,15 +122,17 @@ def check_if_mkldnn_batchnorm_primitives_exist_in_bwd(test_case, var_dict, "data_layout": data_layout, "use_mkldnn": test_case.use_mkldnn, "fuse_with_relu": test_case.fuse_with_relu, - "use_global_stats": test_case.use_global_stats - }) - block.create_var(name='y@GRAD', - dtype='float32', - shape=var_dict['y'].shape) + "use_global_stats": test_case.use_global_stats, + }, + ) + block.create_var( + name='y@GRAD', dtype='float32', shape=var_dict['y'].shape + ) # generate backward op_desc grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc( - bn_op.desc, test_case.no_grad_set, []) + bn_op.desc, test_case.no_grad_set, [] + ) grad_op_desc = grad_op_desc_list[0] new_op_desc = block.desc.append_op() new_op_desc.copy_from(grad_op_desc) @@ -139,10 +152,17 @@ def check_if_mkldnn_batchnorm_primitives_exist_in_bwd(test_case, var_dict, program, feed={ name: var_dict[name] - for name in - ['x', 'scale', 'bias', 'mean', 'variance', 'y@GRAD'] + for name in [ + 'x', + 'scale', + 'bias', + 'mean', + 'variance', + 'y@GRAD', + ] }, - fetch_list=test_case.fetch_list) + fetch_list=test_case.fetch_list, + ) for id, name in enumerate(test_case.fetch_list): __assert_close(test_case, var_dict[name], out[id], name) diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_activation_bf16_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_activation_bf16_mkldnn_op.py index f7d6763bddc607023b918186fc15f16aef7c9693..de9c8dcf4954220bb7ea82150385248184019d9a 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_activation_bf16_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_activation_bf16_mkldnn_op.py @@ -17,14 +17,16 @@ import unittest import numpy as np from scipy.special import erf import paddle.fluid.core as core -from paddle.fluid.tests.unittests.op_test import OpTestTool, convert_float_to_uint16 +from paddle.fluid.tests.unittests.op_test import ( + OpTestTool, + convert_float_to_uint16, +) from paddle.fluid.tests.unittests.test_activation_op import TestActivation from paddle.fluid.tests.unittests.test_gelu_op import gelu @OpTestTool.skip_if_not_cpu_bf16() class MKLDNNBF16ActivationOp(metaclass=abc.ABCMeta): - @abc.abstractmethod def config(self): pass @@ -62,14 +64,15 @@ class MKLDNNBF16ActivationOp(metaclass=abc.ABCMeta): def test_check_grad(self): self.calculate_grads() self.check_grad_with_place( - core.CPUPlace(), ["X"], + core.CPUPlace(), + ["X"], "Out", user_defined_grads=[self.dx], - user_defined_grad_outputs=[convert_float_to_uint16(self.out)]) + user_defined_grad_outputs=[convert_float_to_uint16(self.out)], + ) class TestMKLDNNSigmoidBF16Op(MKLDNNBF16ActivationOp, TestActivation): - def config(self): self.op_type = "sigmoid" @@ -81,7 +84,6 @@ class TestMKLDNNSigmoidBF16Op(MKLDNNBF16ActivationOp, TestActivation): class TestMKLDNNSqrtBF16Op(MKLDNNBF16ActivationOp, TestActivation): - def config(self): self.op_type = "sqrt" @@ -96,7 +98,6 @@ class TestMKLDNNSqrtBF16Op(MKLDNNBF16ActivationOp, TestActivation): class TestMKLDNNGeluErfBF16Op(MKLDNNBF16ActivationOp, TestActivation): - def config(self): self.op_type = "gelu" @@ -104,19 +105,19 @@ class TestMKLDNNGeluErfBF16Op(MKLDNNBF16ActivationOp, TestActivation): return gelu(x, False) def op_grad(self, dout, x): - return (dout * - (0.5 + 0.5 * erf(x / np.sqrt(2)) + - (x / np.sqrt(2 * np.pi) * np.exp(-0.5 * np.power(x, 2))))) + return dout * ( + 0.5 + + 0.5 * erf(x / np.sqrt(2)) + + (x / np.sqrt(2 * np.pi) * np.exp(-0.5 * np.power(x, 2))) + ) class TestMKLDNNGeluErfDim2BF16Op(TestMKLDNNGeluErfBF16Op): - def init_data(self): self.x = np.random.uniform(-1, 1, [11, 17]).astype(np.float32) class TestMKLDNNGeluTanhBF16Op(MKLDNNBF16ActivationOp, TestActivation): - def config(self): self.op_type = "gelu" @@ -125,23 +126,30 @@ class TestMKLDNNGeluTanhBF16Op(MKLDNNBF16ActivationOp, TestActivation): def op_grad(self, dout, x): grad_part = np.tanh( - np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))) - return dout * 0.5 * (1 + grad_part) * (1 + np.sqrt(2 / np.pi) * - (x + 0.134145 * np.power(x, 3)) * - (1 - grad_part)) + np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3)) + ) + return ( + dout + * 0.5 + * (1 + grad_part) + * ( + 1 + + np.sqrt(2 / np.pi) + * (x + 0.134145 * np.power(x, 3)) + * (1 - grad_part) + ) + ) def set_attrs(self): self.attrs = {"use_mkldnn": True, "approximate": True} class TestMKLDNNGeluTanhDim2BF16Op(TestMKLDNNGeluTanhBF16Op): - def init_data(self): self.x = np.random.uniform(-1, 1, [11, 17]).astype(np.float32) class TestMKLDNNReluBF16Op(MKLDNNBF16ActivationOp, TestActivation): - def config(self): self.op_type = "relu" @@ -153,7 +161,6 @@ class TestMKLDNNReluBF16Op(MKLDNNBF16ActivationOp, TestActivation): class TestMKLDNNMishBF16Op(MKLDNNBF16ActivationOp, TestActivation): - def config(self): self.op_type = "mish" @@ -161,14 +168,17 @@ class TestMKLDNNMishBF16Op(MKLDNNBF16ActivationOp, TestActivation): return x * np.tanh(np.log(1 + np.exp(x))) def op_grad(self, dout, x): - omega = np.exp( - 3 * x) + 4 * np.exp(2 * x) + np.exp(x) * (4 * x + 6) + 4 * (x + 1) + omega = ( + np.exp(3 * x) + + 4 * np.exp(2 * x) + + np.exp(x) * (4 * x + 6) + + 4 * (x + 1) + ) delta = np.exp(2 * x) + 2 * np.exp(x) + 2 return dout * ((np.exp(x) * omega) / delta**2) class TestMKLDNNRelu6BF16Op(MKLDNNBF16ActivationOp, TestActivation): - def config(self): self.op_type = "relu6" @@ -180,7 +190,6 @@ class TestMKLDNNRelu6BF16Op(MKLDNNBF16ActivationOp, TestActivation): class TestMKLDNNLeakyReluBF16Op(MKLDNNBF16ActivationOp, TestActivation): - def config(self): self.op_type = "leaky_relu" @@ -196,7 +205,6 @@ class TestMKLDNNLeakyReluBF16Op(MKLDNNBF16ActivationOp, TestActivation): class TestMKLDNNSwishBF16Op(MKLDNNBF16ActivationOp, TestActivation): - def config(self): self.op_type = "swish" @@ -215,7 +223,6 @@ class TestMKLDNNSwishBF16Op(MKLDNNBF16ActivationOp, TestActivation): class TestMKLDNNHardSwishBF16Op(MKLDNNBF16ActivationOp, TestActivation): - def config(self): self.op_type = "hard_swish" @@ -229,7 +236,6 @@ class TestMKLDNNHardSwishBF16Op(MKLDNNBF16ActivationOp, TestActivation): class TestMKLDNNTanhBF16Op(MKLDNNBF16ActivationOp, TestActivation): - def config(self): self.op_type = "tanh" @@ -237,11 +243,10 @@ class TestMKLDNNTanhBF16Op(MKLDNNBF16ActivationOp, TestActivation): return np.tanh(x) def op_grad(self, dout, x): - return dout * (1 - np.tanh(x)**2) + return dout * (1 - np.tanh(x) ** 2) class TestMKLDNNAbsBF16Op(MKLDNNBF16ActivationOp, TestActivation): - def config(self): self.op_type = "abs" @@ -253,7 +258,6 @@ class TestMKLDNNAbsBF16Op(MKLDNNBF16ActivationOp, TestActivation): class TestMKLDNNEluBF16Op(MKLDNNBF16ActivationOp, TestActivation): - def config(self): self.op_type = "elu" @@ -269,7 +273,6 @@ class TestMKLDNNEluBF16Op(MKLDNNBF16ActivationOp, TestActivation): class TestMKLDNNExpBF16Op(MKLDNNBF16ActivationOp, TestActivation): - def config(self): self.op_type = "exp" diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py index 0ca003b45f2ead713cef0d60168b8dddb50c62b3..12ab822d3e1c2c8f691476fa1ecdedbc622e616b 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py @@ -17,13 +17,23 @@ import numpy as np from scipy.special import expit import paddle.fluid.core as core from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16 -from paddle.fluid.tests.unittests.test_activation_op import TestActivation, TestRelu, TestTanh, TestSqrt, TestAbs, TestLeakyRelu, TestSwish, TestHardSwish, TestRelu6, TestSigmoid +from paddle.fluid.tests.unittests.test_activation_op import ( + TestActivation, + TestRelu, + TestTanh, + TestSqrt, + TestAbs, + TestLeakyRelu, + TestSwish, + TestHardSwish, + TestRelu6, + TestSigmoid, +) from paddle.fluid.tests.unittests.test_gelu_op import gelu from mkldnn_op_test import check_if_mkldnn_primitives_exist_in_bwd class TestMKLDNNReluDim2(TestRelu): - def setUp(self): super(TestMKLDNNReluDim2, self).setUp() @@ -34,7 +44,6 @@ class TestMKLDNNReluDim2(TestRelu): class TestMKLDNNRelu6Dim2(TestRelu6): - def setUp(self): super(TestMKLDNNRelu6Dim2, self).setUp() self.attrs.update({"use_mkldnn": True}) @@ -44,7 +53,6 @@ class TestMKLDNNRelu6Dim2(TestRelu6): class TestMKLDNNLeakyReluDim2(TestLeakyRelu): - def setUp(self): super(TestMKLDNNLeakyReluDim2, self).setUp() @@ -55,7 +63,6 @@ class TestMKLDNNLeakyReluDim2(TestLeakyRelu): class TestMKLDNNGeluDim2(TestActivation): - def setUp(self): self.op_type = "gelu" self.dtype = np.float32 @@ -69,7 +76,6 @@ class TestMKLDNNGeluDim2(TestActivation): class TestMKLDNNGeluDim2Approx(TestActivation): - def setUp(self): self.op_type = "gelu" self.dtype = np.float32 @@ -83,7 +89,6 @@ class TestMKLDNNGeluDim2Approx(TestActivation): class TestMKLDNNTanhDim2(TestTanh): - def setUp(self): super(TestMKLDNNTanhDim2, self).setUp() @@ -94,7 +99,6 @@ class TestMKLDNNTanhDim2(TestTanh): class TestMKLDNNSqrtDim2(TestSqrt): - def setUp(self): super(TestMKLDNNSqrtDim2, self).setUp() @@ -105,7 +109,6 @@ class TestMKLDNNSqrtDim2(TestSqrt): class TestMKLDNNAbsDim2(TestAbs): - def setUp(self): super(TestMKLDNNAbsDim2, self).setUp() self.attrs = {"use_mkldnn": True} @@ -115,7 +118,6 @@ class TestMKLDNNAbsDim2(TestAbs): class TestMKLDNNSwishDim2(TestSwish): - def setUp(self): super(TestMKLDNNSwishDim2, self).setUp() @@ -127,21 +129,18 @@ class TestMKLDNNSwishDim2(TestSwish): class TestMKLDNNHardSwishDim2(TestHardSwish): - def setUp(self): super(TestMKLDNNHardSwishDim2, self).setUp() self.attrs = {"use_mkldnn": True} class TestMKLDNNSigmoidDim2(TestSigmoid): - def setUp(self): super(TestMKLDNNSigmoidDim2, self).setUp() self.attrs = {"use_mkldnn": True} class TestMKLDNNReluDim4(TestRelu): - def setUp(self): super(TestMKLDNNReluDim4, self).setUp() @@ -159,7 +158,6 @@ class TestMKLDNNReluDim4(TestRelu): class TestMKLDNNLeakyReluDim4(TestLeakyRelu): - def setUp(self): super(TestMKLDNNLeakyReluDim4, self).setUp() @@ -177,7 +175,6 @@ class TestMKLDNNLeakyReluDim4(TestLeakyRelu): class TestMKLDNNGeluDim4(TestActivation): - def setUp(self): self.op_type = "gelu" self.dtype = np.float32 @@ -191,7 +188,6 @@ class TestMKLDNNGeluDim4(TestActivation): class TestMKLDNNGeluDim4Approx(TestActivation): - def setUp(self): self.op_type = "gelu" self.dtype = np.float32 @@ -204,10 +200,10 @@ class TestMKLDNNGeluDim4Approx(TestActivation): self.attrs = {"use_mkldnn": True, "approximate": True} -@unittest.skipIf(not core.supports_bfloat16(), - "place does not support BF16 evaluation") +@unittest.skipIf( + not core.supports_bfloat16(), "place does not support BF16 evaluation" +) class TestMKLDNNGeluBf16Dim4(TestActivation): - def setUp(self): self.op_type = "gelu" self.dtype = np.uint16 @@ -226,10 +222,10 @@ class TestMKLDNNGeluBf16Dim4(TestActivation): pass -@unittest.skipIf(not core.supports_bfloat16(), - "place does not support BF16 evaluation") +@unittest.skipIf( + not core.supports_bfloat16(), "place does not support BF16 evaluation" +) class TestMKLDNNGeluBf16Dim4Approx(TestActivation): - def setUp(self): self.op_type = "gelu" self.dtype = np.uint16 @@ -249,7 +245,6 @@ class TestMKLDNNGeluBf16Dim4Approx(TestActivation): class TestMKLDNNTanhDim4(TestTanh): - def setUp(self): super(TestMKLDNNTanhDim4, self).setUp() @@ -261,7 +256,6 @@ class TestMKLDNNTanhDim4(TestTanh): class TestMKLDNNSqrtDim4(TestSqrt): - def setUp(self): super(TestMKLDNNSqrtDim4, self).setUp() @@ -273,7 +267,6 @@ class TestMKLDNNSqrtDim4(TestSqrt): class TestMKLDNNAbsDim4(TestAbs): - def setUp(self): super(TestMKLDNNAbsDim4, self).setUp() @@ -289,7 +282,6 @@ class TestMKLDNNAbsDim4(TestAbs): class TestMKLDNNSwishDim4(TestSwish): - def setUp(self): super(TestMKLDNNSwishDim4, self).setUp() @@ -311,12 +303,12 @@ def ref_hardswish(x, threshold=6.0, scale=6.0, offset=3.0): if x_dtype == 'float16': x_dtype = 'float16' x = x.astype('float32') - return (x * np.minimum(np.maximum(x + offset, 0.), threshold) / - scale).astype(x_dtype) + return ( + x * np.minimum(np.maximum(x + offset, 0.0), threshold) / scale + ).astype(x_dtype) class TestMKLDNNHardSwishDim4(TestHardSwish): - def setUp(self): super(TestMKLDNNHardSwishDim4, self).setUp() @@ -338,7 +330,6 @@ class TestMKLDNNHardSwishDim4(TestHardSwish): class TestMKLDNNMish(TestActivation): - def setUp(self): self.op_type = "mish" self.dtype = np.float32 @@ -352,7 +343,6 @@ class TestMKLDNNMish(TestActivation): class TestMKLDNNRound(TestActivation): - def setUp(self): self.op_type = "round" @@ -365,7 +355,6 @@ class TestMKLDNNRound(TestActivation): class TestMKLDNNSigmoidDim4(TestSigmoid): - def setUp(self): super(TestMKLDNNSigmoidDim4, self).setUp() @@ -377,7 +366,6 @@ class TestMKLDNNSigmoidDim4(TestSigmoid): class TestMKLDNNEluDefaultAlpha(TestActivation): - def setUp(self): self.op_type = "elu" self.set_alpha() @@ -387,8 +375,8 @@ class TestMKLDNNEluDefaultAlpha(TestActivation): self.inputs = {'X': x} self.attrs = {'use_mkldnn': True, 'alpha': self.alpha} self.outputs = { - 'Out': - np.maximum(0, x) + np.minimum(0, self.alpha * (np.exp(x) - 1)) + 'Out': np.maximum(0, x) + + np.minimum(0, self.alpha * (np.exp(x) - 1)) } def set_alpha(self): @@ -396,13 +384,11 @@ class TestMKLDNNEluDefaultAlpha(TestActivation): class TestMKLDNNEluCustomAlpha(TestMKLDNNEluDefaultAlpha): - def set_alpha(self): self.alpha = 2.5 class TestMKLDNNExpOp(TestActivation): - def setUp(self): self.op_type = "exp" x = np.random.random((5, 5, 4)).astype("float32") @@ -414,7 +400,6 @@ class TestMKLDNNExpOp(TestActivation): # Check if primitives already exist in backward class TestMKLDNNAbsPrimitivesAlreadyExist(unittest.TestCase): - def setUp(self): super(TestMKLDNNAbsPrimitivesAlreadyExist, self).setUp() @@ -430,9 +415,9 @@ class TestMKLDNNAbsPrimitivesAlreadyExist(unittest.TestCase): return out_grad * np.sign(x) def test_check(self): - check_if_mkldnn_primitives_exist_in_bwd(self, self.op_type, self.x, - self.out, self.out_grad, - self.x_grad) + check_if_mkldnn_primitives_exist_in_bwd( + self, self.op_type, self.x, self.out, self.out_grad, self.x_grad + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_batch_norm_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_batch_norm_mkldnn_op.py index f8c5ca3d1be890e30510a014dc85017ec9f7fa5a..f4190f06634587491aca42a3cc50110b4ec6aca0 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_batch_norm_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_batch_norm_mkldnn_op.py @@ -16,47 +16,69 @@ import unittest import numpy as np import paddle.fluid.core as core from paddle.fluid.tests.unittests.op_test import _set_use_system_allocator -from paddle.fluid.tests.unittests.test_batch_norm_op import TestBatchNormOpInference, TestBatchNormOpTraining, _reference_training, _reference_grad +from paddle.fluid.tests.unittests.test_batch_norm_op import ( + TestBatchNormOpInference, + TestBatchNormOpTraining, + _reference_training, + _reference_grad, +) from mkldnn_op_test import check_if_mkldnn_batchnorm_primitives_exist_in_bwd _set_use_system_allocator(True) class TestMKLDNNBatchNormOpTraining(TestBatchNormOpTraining): - def init_kernel_type(self): self.use_mkldnn = True self.data_formats = ["NCHW"] - def ref_forward_backward(self, x, y_grad, scale, bias, mean, variance, - epsilon, momentum, shape, data_layout): + def ref_forward_backward( + self, + x, + y_grad, + scale, + bias, + mean, + variance, + epsilon, + momentum, + shape, + data_layout, + ): if data_layout != "NCHW" and data_layout != "NHWC": raise ValueError("Unknown data order.") # run forward y, saved_mean, saved_variance = _reference_training( - x, scale, bias, epsilon, data_layout) - mean_out = saved_mean * (1. - momentum) + momentum * mean - variance_out = saved_variance * (1. - momentum) + momentum * variance + x, scale, bias, epsilon, data_layout + ) + mean_out = saved_mean * (1.0 - momentum) + momentum * mean + variance_out = saved_variance * (1.0 - momentum) + momentum * variance # run backward - x_grad, scale_grad, bias_grad = _reference_grad(x, y_grad, scale, - saved_mean, - saved_variance, epsilon, - data_layout) - - return y, mean_out, variance_out, saved_mean, saved_variance, x_grad, scale_grad, bias_grad + x_grad, scale_grad, bias_grad = _reference_grad( + x, y_grad, scale, saved_mean, saved_variance, epsilon, data_layout + ) + + return ( + y, + mean_out, + variance_out, + saved_mean, + saved_variance, + x_grad, + scale_grad, + bias_grad, + ) class TestMKLDNNBatchNormOpTraining_NHWC(TestMKLDNNBatchNormOpTraining): - def init_kernel_type(self): self.use_mkldnn = True self.data_formats = ["NHWC"] class TestMKLDNNBatchNormOpExistedPrimitives(TestMKLDNNBatchNormOpTraining): - def init_test_case(self): TestMKLDNNBatchNormOpTraining.init_test_case(self) self.fetch_list = ['y', 'x@GRAD'] @@ -74,20 +96,38 @@ class TestMKLDNNBatchNormOpExistedPrimitives(TestMKLDNNBatchNormOpTraining): mean, variance = self.set_mean_variance(scale_shape, x, data_layout) y_grad = np.random.random_sample(shape).astype(np.float32) - y, mean_out, variance_out, saved_mean, saved_variance, x_grad, scale_grad, bias_grad = self.ref_forward_backward( - x, y_grad, scale, bias, mean, variance, self.epsilon, self.momentum, - shape, data_layout) + ( + y, + mean_out, + variance_out, + saved_mean, + saved_variance, + x_grad, + scale_grad, + bias_grad, + ) = self.ref_forward_backward( + x, + y_grad, + scale, + bias, + mean, + variance, + self.epsilon, + self.momentum, + shape, + data_layout, + ) var_dict = locals() var_dict['y@GRAD'] = y_grad var_dict['x@GRAD'] = x_grad var_dict['scale@GRAD'] = scale_grad var_dict['bias@GRAD'] = bias_grad check_if_mkldnn_batchnorm_primitives_exist_in_bwd( - self, var_dict, place, shape, data_layout) + self, var_dict, place, shape, data_layout + ) class TestMKLDNNBatchNormOpInference(TestBatchNormOpInference): - def init_kernel_type(self): self.use_mkldnn = True @@ -98,7 +138,6 @@ class TestMKLDNNBatchNormOpInference(TestBatchNormOpInference): class TestMKLDNNBatchNormOpInference_NHWC(TestMKLDNNBatchNormOpInference): - def test_check_output(self): place = core.CPUPlace() data_format = "NHWC" @@ -106,7 +145,6 @@ class TestMKLDNNBatchNormOpInference_NHWC(TestMKLDNNBatchNormOpInference): class TestMKLDNNBatchNormOpWithReluInference(TestBatchNormOpInference): - def init_kernel_type(self): self.use_mkldnn = True self.fuse_with_relu = True @@ -119,5 +157,6 @@ class TestMKLDNNBatchNormOpWithReluInference(TestBatchNormOpInference): if __name__ == '__main__': from paddle import enable_static + enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_bilinear_interp_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_bilinear_interp_mkldnn_op.py index 2a91d6257300d2aed389ce5e2f2480baf568268f..273b232ecf8307617eea659b3ea0f1ba1eff4a7c 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_bilinear_interp_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_bilinear_interp_mkldnn_op.py @@ -19,12 +19,9 @@ from paddle.fluid.tests.unittests.op_test import OpTest from paddle.fluid.tests.unittests.op_test import skip_check_grad_ci -def bilinear_interp_mkldnn_np(input, - out_h, - out_w, - out_size=None, - actual_shape=None, - data_layout='NCHW'): +def bilinear_interp_mkldnn_np( + input, out_h, out_w, out_size=None, actual_shape=None, data_layout='NCHW' +): """bilinear interpolation implement in shape [N, C, H, W]""" if data_layout == "NHWC": input = np.transpose(input, (0, 3, 1, 2)) # NHWC => NCHW @@ -54,10 +51,12 @@ def bilinear_interp_mkldnn_np(input, input_h1_w0 = input[:, :, h1, w0] input_h0_w1 = input[:, :, h0, w1] input_h1_w1 = input[:, :, h1, w1] - out[:, :, oh, - ow] = input_h0_w0 * (1 - Wh) * (1 - Ww) + input_h1_w0 * Wh * ( - 1 - Ww) + input_h0_w1 * (1 - - Wh) * Ww + input_h1_w1 * Wh * Ww + out[:, :, oh, ow] = ( + input_h0_w0 * (1 - Wh) * (1 - Ww) + + input_h1_w0 * Wh * (1 - Ww) + + input_h0_w1 * (1 - Wh) * Ww + + input_h1_w1 * Wh * Ww + ) if data_layout == "NHWC": out = np.transpose(out, (0, 2, 3, 1)) # NCHW => NHWC @@ -67,7 +66,6 @@ def bilinear_interp_mkldnn_np(input, @skip_check_grad_ci(reason="Haven not implement interpolate grad kernel.") class TestBilinearInterpMKLDNNOp(OpTest): - def init_test_case(self): pass @@ -102,9 +100,14 @@ class TestBilinearInterpMKLDNNOp(OpTest): out_h = self.out_h out_w = self.out_w - output_np = bilinear_interp_mkldnn_np(input_np, out_h, out_w, - self.out_size, self.actual_shape, - self.data_layout) + output_np = bilinear_interp_mkldnn_np( + input_np, + out_h, + out_w, + self.out_size, + self.actual_shape, + self.data_layout, + ) self.inputs = {'X': input_np} if self.out_size is not None: @@ -117,7 +120,7 @@ class TestBilinearInterpMKLDNNOp(OpTest): 'out_w': self.out_w, 'scale': self.scale, 'data_layout': self.data_layout, - 'use_mkldnn': self.use_mkldnn + 'use_mkldnn': self.use_mkldnn, } self.outputs = {'Out': output_np} @@ -126,7 +129,6 @@ class TestBilinearInterpMKLDNNOp(OpTest): class TestBilinearInterpOpMKLDNNNHWC(TestBilinearInterpMKLDNNOp): - def init_test_case(self): self.input_shape = [3, 2, 32, 16] self.out_h = 27 @@ -136,73 +138,67 @@ class TestBilinearInterpOpMKLDNNNHWC(TestBilinearInterpMKLDNNOp): class TestBilinearNeighborInterpMKLDNNCase2(TestBilinearInterpMKLDNNOp): - def init_test_case(self): self.input_shape = [3, 3, 9, 6] self.out_h = 12 self.out_w = 12 - self.scale = 1. + self.scale = 1.0 class TestBilinearNeighborInterpDataLayout(TestBilinearInterpMKLDNNOp): - def init_test_case(self): self.input_shape = [2, 4, 4, 5] self.out_h = 6 self.out_w = 7 - self.scale = 0. + self.scale = 0.0 self.data_layout = "NHWC" class TestBilinearNeighborInterpCase3(TestBilinearInterpMKLDNNOp): - def init_test_case(self): self.input_shape = [1, 1, 32, 64] self.out_h = 64 self.out_w = 128 - self.scale = 0. + self.scale = 0.0 class TestBilinearNeighborInterpCase4(TestBilinearInterpMKLDNNOp): - def init_test_case(self): self.input_shape = [4, 1, 7, 8] self.out_h = 1 self.out_w = 1 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([2, 2]).astype("int32") class TestBilinearNeighborInterpCase5(TestBilinearInterpMKLDNNOp): - def init_test_case(self): self.input_shape = [1, 1, 9, 6] self.out_h = 12 self.out_w = 12 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([13, 13]).astype("int32") class TestBilinearNeighborInterpCase6(TestBilinearInterpMKLDNNOp): - def init_test_case(self): self.input_shape = [1, 1, 32, 64] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([65, 129]).astype("int32") class TestBilinearNeighborInterpSame(TestBilinearInterpMKLDNNOp): - def init_test_case(self): self.input_shape = [2, 3, 32, 64] self.out_h = 32 self.out_w = 64 - self.scale = 0. + self.scale = 0.0 if __name__ == "__main__": from paddle import enable_static + enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_bilinear_interp_v2_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_bilinear_interp_v2_mkldnn_op.py index 55f57764b12f3b93790493d9f121357c87ec868d..e6cf8381fa28ae3acc7e42a3cc44ac24e209af1f 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_bilinear_interp_v2_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_bilinear_interp_v2_mkldnn_op.py @@ -19,12 +19,9 @@ from paddle.fluid.tests.unittests.op_test import OpTest from paddle.fluid.tests.unittests.op_test import skip_check_grad_ci -def bilinear_interp_mkldnn_np(input, - out_h, - out_w, - out_size=None, - actual_shape=None, - data_layout='NCHW'): +def bilinear_interp_mkldnn_np( + input, out_h, out_w, out_size=None, actual_shape=None, data_layout='NCHW' +): """bilinear interpolation implement in shape [N, C, H, W]""" if data_layout == "NHWC": input = np.transpose(input, (0, 3, 1, 2)) # NHWC => NCHW @@ -54,10 +51,12 @@ def bilinear_interp_mkldnn_np(input, input_h1_w0 = input[:, :, h1, w0] input_h0_w1 = input[:, :, h0, w1] input_h1_w1 = input[:, :, h1, w1] - out[:, :, oh, - ow] = input_h0_w0 * (1 - Wh) * (1 - Ww) + input_h1_w0 * Wh * ( - 1 - Ww) + input_h0_w1 * (1 - - Wh) * Ww + input_h1_w1 * Wh * Ww + out[:, :, oh, ow] = ( + input_h0_w0 * (1 - Wh) * (1 - Ww) + + input_h1_w0 * Wh * (1 - Ww) + + input_h0_w1 * (1 - Wh) * Ww + + input_h1_w1 * Wh * Ww + ) if data_layout == "NHWC": out = np.transpose(out, (0, 2, 3, 1)) # NCHW => NHWC @@ -67,7 +66,6 @@ def bilinear_interp_mkldnn_np(input, @skip_check_grad_ci(reason="Haven not implement interpolate grad kernel.") class TestBilinearInterpMKLDNNOp(OpTest): - def init_test_case(self): pass @@ -116,9 +114,14 @@ class TestBilinearInterpMKLDNNOp(OpTest): out_h = self.out_h out_w = self.out_w - output_np = bilinear_interp_mkldnn_np(input_np, out_h, out_w, - self.out_size, self.actual_shape, - self.data_layout) + output_np = bilinear_interp_mkldnn_np( + input_np, + out_h, + out_w, + self.out_size, + self.actual_shape, + self.data_layout, + ) if isinstance(self.scale, float): self.scale = [self.scale, self.scale] @@ -134,7 +137,7 @@ class TestBilinearInterpMKLDNNOp(OpTest): 'out_w': self.out_w, 'scale': self.scale, 'data_layout': self.data_layout, - 'use_mkldnn': self.use_mkldnn + 'use_mkldnn': self.use_mkldnn, } self.outputs = {'Out': output_np} @@ -143,7 +146,6 @@ class TestBilinearInterpMKLDNNOp(OpTest): class TestBilinearInterpOpMKLDNNNHWC(TestBilinearInterpMKLDNNOp): - def init_test_case(self): self.input_shape = [3, 2, 32, 16] self.out_h = 27 @@ -153,7 +155,6 @@ class TestBilinearInterpOpMKLDNNNHWC(TestBilinearInterpMKLDNNOp): class TestBilinearNeighborInterpMKLDNNCase2(TestBilinearInterpMKLDNNOp): - def init_test_case(self): self.input_shape = [3, 3, 9, 6] self.out_h = 12 @@ -161,7 +162,6 @@ class TestBilinearNeighborInterpMKLDNNCase2(TestBilinearInterpMKLDNNOp): class TestBilinearNeighborInterpCase3(TestBilinearInterpMKLDNNOp): - def init_test_case(self): self.input_shape = [1, 1, 32, 64] self.out_h = 64 @@ -170,7 +170,6 @@ class TestBilinearNeighborInterpCase3(TestBilinearInterpMKLDNNOp): class TestBilinearNeighborInterpCase4(TestBilinearInterpMKLDNNOp): - def init_test_case(self): self.input_shape = [1, 1, 32, 64] self.out_h = 64 @@ -180,7 +179,6 @@ class TestBilinearNeighborInterpCase4(TestBilinearInterpMKLDNNOp): class TestBilinearNeighborInterpCase5(TestBilinearInterpMKLDNNOp): - def init_test_case(self): self.input_shape = [1, 1, 9, 6] self.out_h = 12 @@ -189,7 +187,6 @@ class TestBilinearNeighborInterpCase5(TestBilinearInterpMKLDNNOp): class TestBilinearNeighborInterpCase6(TestBilinearInterpMKLDNNOp): - def init_test_case(self): self.input_shape = [1, 1, 32, 64] self.out_h = 64 @@ -199,7 +196,6 @@ class TestBilinearNeighborInterpCase6(TestBilinearInterpMKLDNNOp): class TestBilinearNeighborInterpSame(TestBilinearInterpMKLDNNOp): - def init_test_case(self): self.input_shape = [2, 3, 32, 64] self.out_h = 32 @@ -210,5 +206,6 @@ class TestBilinearNeighborInterpSame(TestBilinearInterpMKLDNNOp): if __name__ == "__main__": from paddle import enable_static + enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_cast_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_cast_mkldnn_op.py index 8f7267d212ebd771ccf772d1ce7945300d94d41a..92b98d35d02e96e9efe4f7279d46078b68f16b36 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_cast_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_cast_mkldnn_op.py @@ -20,10 +20,10 @@ import paddle.fluid.core as core from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16 -@unittest.skipIf(not core.supports_bfloat16(), - "place does not support BF16 evaluation") +@unittest.skipIf( + not core.supports_bfloat16(), "place does not support BF16 evaluation" +) class TestCastBF16ToFP32MKLDNNOp(OpTest): - def init_data(self): self.out = np.random.random(size=[10, 10]).astype("float32") self.x = convert_float_to_uint16(self.out) @@ -32,12 +32,15 @@ class TestCastBF16ToFP32MKLDNNOp(OpTest): self.init_data() self.inputs = {'X': self.x} self.outputs = {'Out': self.out} - prepare_dtype = lambda x: int(core.VarDesc.VarType.BF16 if x.dtype != np - .float32 else core.VarDesc.VarType.FP32) + prepare_dtype = lambda x: int( + core.VarDesc.VarType.BF16 + if x.dtype != np.float32 + else core.VarDesc.VarType.FP32 + ) self.attrs = { 'in_dtype': prepare_dtype(self.x), 'out_dtype': prepare_dtype(self.out), - 'use_mkldnn': True + 'use_mkldnn': True, } self.op_type = 'cast' @@ -46,29 +49,28 @@ class TestCastBF16ToFP32MKLDNNOp(OpTest): def test_check_grad(self): self.check_grad_with_place( - core.CPUPlace(), ["X"], + core.CPUPlace(), + ["X"], "Out", check_dygraph=False, user_defined_grads=[self.inputs['X']], - user_defined_grad_outputs=[self.outputs['Out']]) + user_defined_grad_outputs=[self.outputs['Out']], + ) class TestCastFP32ToBF16MKLDNNOp(TestCastBF16ToFP32MKLDNNOp): - def init_data(self): self.x = np.random.random(size=[2, 6]).astype("float32") self.out = convert_float_to_uint16(self.x) class TestCastBF16ToBF16MKLDNNOp(TestCastBF16ToFP32MKLDNNOp): - def init_data(self): self.x = np.random.random(size=[6, 13]).astype("uint16") self.out = self.x class TestCastFP32ToFP32MKLDNNOp(TestCastBF16ToFP32MKLDNNOp): - def init_data(self): self.x = np.random.random(size=[7, 15]).astype("float32") self.out = self.x diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_clip_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_clip_mkldnn_op.py index b9a9119e5adfa315d571f1219a5b23b8533f6409..012c7cb018bbb9c787e4c0e015d4ef9157dc69cc 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_clip_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_clip_mkldnn_op.py @@ -14,14 +14,17 @@ import unittest import numpy as np -from paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool, convert_float_to_uint16 +from paddle.fluid.tests.unittests.op_test import ( + OpTest, + OpTestTool, + convert_float_to_uint16, +) import paddle import paddle.fluid.core as core @OpTestTool.skip_if_not_cpu_bf16() class TestClipOneDNNOp(OpTest): - def setUp(self): self.op_type = "clip" self.set_inputs() @@ -29,10 +32,16 @@ class TestClipOneDNNOp(OpTest): self.set_additional_inputs() self.adjust_op_settings() - self.min = self.attrs[ - 'min'] if 'Min' not in self.inputs else self.inputs['Min'] - self.max = self.attrs[ - 'max'] if 'Max' not in self.inputs else self.inputs['Max'] + self.min = ( + self.attrs['min'] + if 'Min' not in self.inputs + else self.inputs['Min'] + ) + self.max = ( + self.attrs['max'] + if 'Max' not in self.inputs + else self.inputs['Max'] + ) self.outputs = {'Out': np.clip(self.x_fp32, self.min, self.max)} @@ -57,19 +66,16 @@ class TestClipOneDNNOp(OpTest): class TestClipMinAsInputOneDNNOp(TestClipOneDNNOp): - def set_additional_inputs(self): self.inputs['Min'] = np.array([6.8]).astype('float32') class TestClipMaxAsInputOneDNNOp(TestClipOneDNNOp): - def set_additional_inputs(self): self.inputs['Max'] = np.array([9.1]).astype('float32') class TestClipMaxAndMinAsInputsOneDNNOp(TestClipOneDNNOp): - def set_additional_inputs(self): self.inputs['Max'] = np.array([8.5]).astype('float32') self.inputs['Min'] = np.array([7.1]).astype('float32') @@ -77,10 +83,8 @@ class TestClipMaxAndMinAsInputsOneDNNOp(TestClipOneDNNOp): # BF16 TESTS def create_bf16_test_class(parent): - @OpTestTool.skip_if_not_cpu_bf16() class TestClipBF16OneDNNOp(parent): - def set_inputs(self): self.x_fp32 = np.random.random((10, 10)).astype(np.float32) * 25 self.inputs = {'X': convert_float_to_uint16(self.x_fp32)} @@ -95,8 +99,10 @@ def create_bf16_test_class(parent): for i in range(self.dx.shape[0]): for j in range(self.dx.shape[1]): - if self.x_fp32[j][i] > self.min and self.x_fp32[j][ - i] < self.max: + if ( + self.x_fp32[j][i] > self.min + and self.x_fp32[j][i] < self.max + ): self.dx[j][i] = self.dout[j][i] def test_check_output(self): @@ -105,10 +111,12 @@ def create_bf16_test_class(parent): def test_check_grad(self): self.calculate_grads() self.check_grad_with_place( - core.CPUPlace(), ["X"], + core.CPUPlace(), + ["X"], "Out", user_defined_grads=[self.dx], - user_defined_grad_outputs=[convert_float_to_uint16(self.dout)]) + user_defined_grad_outputs=[convert_float_to_uint16(self.dout)], + ) cls_name = "{0}_{1}".format(parent.__name__, "BF16") TestClipBF16OneDNNOp.__name__ = cls_name diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_concat_bf16_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_concat_bf16_mkldnn_op.py index f10b04bdb5be2322db009496e2834ca74099f448..eefb3c766abec4c7c38be281695feb4e394ebfd3 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_concat_bf16_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_concat_bf16_mkldnn_op.py @@ -20,10 +20,10 @@ from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16 from paddle import enable_static -@unittest.skipIf(not core.supports_bfloat16(), - "place does not support BF16 evaluation") +@unittest.skipIf( + not core.supports_bfloat16(), "place does not support BF16 evaluation" +) class TestConcatBf16Op(OpTest): - def setUp(self): self.op_type = "concat" self.use_mkldnn = True @@ -35,14 +35,15 @@ class TestConcatBf16Op(OpTest): self.attrs = { 'axis': self.axis, 'use_mkldnn': True, - 'mkldnn_data_type': self.mkldnn_data_type + 'mkldnn_data_type': self.mkldnn_data_type, } self.sections = [self.x0.shape[self.axis]] * 2 self.sections[1] += self.x1.shape[self.axis] - self.output = np.concatenate((self.x0, self.x1, self.x2), - axis=self.axis).astype(np.uint16) + self.output = np.concatenate( + (self.x0, self.x1, self.x2), axis=self.axis + ).astype(np.uint16) self.outputs = {'Out': self.output} def calculate_grads(self): @@ -55,20 +56,25 @@ class TestConcatBf16Op(OpTest): def test_check_grad(self): self.calculate_grads() self.check_grad_with_place( - core.CPUPlace(), ["x0", "x1", "x2"], + core.CPUPlace(), + ["x0", "x1", "x2"], "Out", user_defined_grads=[self.dxs[0], self.dxs[1], self.dxs[2]], - user_defined_grad_outputs=[self.dout]) + user_defined_grad_outputs=[self.dout], + ) -# --------------------test concat bf16 in with axis 0-------------------- + # --------------------test concat bf16 in with axis 0-------------------- def init_test_data(self): self.x0 = convert_float_to_uint16( - np.random.random(self.x0_shape).astype(np.float32)) + np.random.random(self.x0_shape).astype(np.float32) + ) self.x1 = convert_float_to_uint16( - np.random.random(self.x1_shape).astype(np.float32)) + np.random.random(self.x1_shape).astype(np.float32) + ) self.x2 = convert_float_to_uint16( - np.random.random(self.x2_shape).astype(np.float32)) + np.random.random(self.x2_shape).astype(np.float32) + ) def init_axis(self): self.axis = 0 @@ -83,7 +89,6 @@ class TestConcatBf16Op(OpTest): class TestAxis1Case(TestConcatBf16Op): - def init_axis(self): self.axis = 1 @@ -97,7 +102,6 @@ class TestAxis1Case(TestConcatBf16Op): class TestAxis2Case(TestConcatBf16Op): - def init_axis(self): self.axis = 2 @@ -111,7 +115,6 @@ class TestAxis2Case(TestConcatBf16Op): class TestAxis3Case(TestConcatBf16Op): - def init_axis(self): self.axis = 3 diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_concat_int8_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_concat_int8_mkldnn_op.py index da07bce944726b1df78140e295616bce1251bc1a..5d275c7fb434f576672a21782b440074780fa5bc 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_concat_int8_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_concat_int8_mkldnn_op.py @@ -18,7 +18,6 @@ from paddle.fluid.tests.unittests.op_test import OpTest class TestConcatOp(OpTest): - def setUp(self): self.op_type = "concat" self.use_mkldnn = True @@ -29,8 +28,9 @@ class TestConcatOp(OpTest): self.inputs = {'X': [('x0', self.x0), ('x1', self.x1), ('x2', self.x2)]} self.attrs = {'axis': self.axis, 'use_mkldnn': True} - self.output = np.concatenate((self.x0, self.x1, self.x2), - axis=self.axis).astype('int') + self.output = np.concatenate( + (self.x0, self.x1, self.x2), axis=self.axis + ).astype('int') self.outputs = {'Out': self.output} @@ -38,7 +38,7 @@ class TestConcatOp(OpTest): # TODO(wangzhongpu): support mkldnn op in dygraph mode self.check_output(check_dygraph=False) -#--------------------test concat s8 in with axis 0-------------------- + # --------------------test concat s8 in with axis 0-------------------- def init_test_data(self): self.x0 = (np.random.randint(0, 100, self.x0_shape) - 50).astype('int8') @@ -54,11 +54,10 @@ class TestConcatOp(OpTest): self.x2_shape = [3, 2, 1, 2] -#--------------------test concat u8 in with axis 0-------------------- +# --------------------test concat u8 in with axis 0-------------------- class TestConcatOp2(TestConcatOp): - def init_test_data(self): self.x0 = (np.random.randint(0, 100, self.x0_shape)).astype('uint8') self.x1 = (np.random.randint(0, 50, self.x1_shape)).astype('uint8') @@ -75,10 +74,9 @@ class TestConcatOp2(TestConcatOp): def create_test_int8_class(parent): - #--------------------test concat s8/u8 in with axis 1-------------------- + # --------------------test concat s8/u8 in with axis 1-------------------- class TestAxis1Case(parent): - def init_axis(self): self.axis = 1 @@ -87,10 +85,9 @@ def create_test_int8_class(parent): self.x1_shape = [1, 2, 5, 5] self.x2_shape = [1, 3, 5, 5] -#--------------------test concat s8/u8 in with axis 2-------------------- + # --------------------test concat s8/u8 in with axis 2-------------------- class TestAxis2Case(parent): - def init_axis(self): self.axis = 2 @@ -99,11 +96,9 @@ def create_test_int8_class(parent): self.x1_shape = [2, 3, 5, 5] self.x2_shape = [2, 3, 6, 5] - -#--------------------test concat s8/u8 in with axis 3-------------------- + # --------------------test concat s8/u8 in with axis 3-------------------- class TestAxis3Case(parent): - def init_axis(self): self.axis = 3 @@ -122,10 +117,12 @@ def create_test_int8_class(parent): globals()[cls_name_2] = TestAxis2Case globals()[cls_name_3] = TestAxis3Case + create_test_int8_class(TestConcatOp) create_test_int8_class(TestConcatOp2) if __name__ == '__main__': from paddle import enable_static + enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_concat_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_concat_mkldnn_op.py index 1788923e20b2e0ada564165fb1930b615248fcec..549c9620be8f7f948719193d02bd53d32c85092d 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_concat_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_concat_mkldnn_op.py @@ -21,7 +21,6 @@ from paddle import enable_static class TestConcatAxis0OneDNNOp(OpTest): - def setUp(self): self.op_type = "concat" self.mkldnn_data_type = "float32" @@ -33,11 +32,12 @@ class TestConcatAxis0OneDNNOp(OpTest): self.attrs = { 'axis': self.axis, 'use_mkldnn': True, - 'mkldnn_data_type': self.mkldnn_data_type + 'mkldnn_data_type': self.mkldnn_data_type, } - self.output = np.concatenate((self.x0, self.x1, self.x2), - axis=self.axis).astype(self.dtype) + self.output = np.concatenate( + (self.x0, self.x1, self.x2), axis=self.axis + ).astype(self.dtype) self.outputs = {'Out': self.output} @@ -68,7 +68,6 @@ class TestConcatAxis0OneDNNOp(OpTest): class TestConcatAxis1OneDNNOp(TestConcatAxis0OneDNNOp): - def init_axis(self): self.axis = 1 @@ -79,7 +78,6 @@ class TestConcatAxis1OneDNNOp(TestConcatAxis0OneDNNOp): class TestConcatAxis2OneDNNOp(TestConcatAxis0OneDNNOp): - def init_axis(self): self.axis = 2 @@ -90,7 +88,6 @@ class TestConcatAxis2OneDNNOp(TestConcatAxis0OneDNNOp): class TestConcatAxis3OneDNNOp(TestConcatAxis0OneDNNOp): - def init_axis(self): self.axis = 3 diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_bf16_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_bf16_mkldnn_op.py index e4f23f195d2f600b720d495f14740db09dae3533..80657bad6317d286f42650fed11f499df0fbf06b 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_bf16_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_bf16_mkldnn_op.py @@ -16,8 +16,15 @@ import unittest import numpy as np import paddle.fluid.core as core -from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16, OpTestTool -from paddle.fluid.tests.unittests.test_conv2d_op import conv2d_forward_naive, TestConv2DOp +from paddle.fluid.tests.unittests.op_test import ( + OpTest, + convert_float_to_uint16, + OpTestTool, +) +from paddle.fluid.tests.unittests.test_conv2d_op import ( + conv2d_forward_naive, + TestConv2DOp, +) def conv2d_residual_naive(out, residual): @@ -26,10 +33,10 @@ def conv2d_residual_naive(out, residual): return out -@unittest.skipIf(not core.supports_bfloat16(), - "place does not support BF16 evaluation") +@unittest.skipIf( + not core.supports_bfloat16(), "place does not support BF16 evaluation" +) class TestConv2DBF16Op(TestConv2DOp): - def setUp(self): self.op_type = "conv2d" self.use_cudnn = False @@ -53,7 +60,7 @@ class TestConv2DBF16Op(TestConv2DOp): self.conv2d_param = { 'stride': self.stride, 'pad': self.pad, - 'dilation': self.dilations + 'dilation': self.dilations, } self.input = np.random.random(self.input_size).astype(np.float32) @@ -61,16 +68,18 @@ class TestConv2DBF16Op(TestConv2DOp): self.inputs_fp32 = {'Input': self.input, 'Filter': self.filter} - conv_out, _, _, _, _ = conv2d_forward_naive(self.input, self.filter, - self.groups, - self.conv2d_param) + conv_out, _, _, _, _ = conv2d_forward_naive( + self.input, self.filter, self.groups, self.conv2d_param + ) self.conv_output_float = conv_out if self.fuse_residual: self.input_residual = np.random.random( - self.input_residual_size).astype(np.float32) + self.input_residual_size + ).astype(np.float32) self.conv_output_float = conv2d_residual_naive( - self.conv_output_float, self.input_residual) + self.conv_output_float, self.input_residual + ) self.conv_output = convert_float_to_uint16(self.conv_output_float) self.outputs = {'Output': self.conv_output} elif self.force_fp32_output: @@ -87,15 +96,16 @@ class TestConv2DBF16Op(TestConv2DOp): self.filter = convert_float_to_uint16(self.filter) self.inputs = { - 'Input': - self.input, - 'Filter': - OpTest.np_dtype_to_fluid_dtype(self.filter.astype(self.weight_type)) + 'Input': self.input, + 'Filter': OpTest.np_dtype_to_fluid_dtype( + self.filter.astype(self.weight_type) + ), } if self.fuse_residual: self.inputs['ResidualData'] = OpTest.np_dtype_to_fluid_dtype( - convert_float_to_uint16(self.input_residual)) + convert_float_to_uint16(self.input_residual) + ) self.attrs = { 'strides': self.stride, @@ -106,7 +116,7 @@ class TestConv2DBF16Op(TestConv2DOp): 'use_mkldnn': self.use_mkldnn, 'mkldnn_data_type': self.mkldnn_data_type, 'force_fp32_output': self.force_fp32_output, - 'fuse_residual_connection': self.fuse_residual + 'fuse_residual_connection': self.fuse_residual, } self.init_additional_attrs() @@ -156,7 +166,6 @@ class TestConv2DBF16Op(TestConv2DOp): @OpTestTool.skip_if_not_cpu_bf16() class TestConv2DWithGradBF16Op(TestConv2DBF16Op): - def init_fuse_relu(self): self.fuse_activation = None @@ -177,10 +186,12 @@ class TestConv2DWithGradBF16Op(TestConv2DBF16Op): dx, dweights = conv_backward(dout, x, w, self.conv2d_param) self.check_grad_with_place( - core.CPUPlace(), ["Input", "Filter"], + core.CPUPlace(), + ["Input", "Filter"], "Output", user_defined_grads=[dx, dweights], - user_defined_grad_outputs=[convert_float_to_uint16(dout)]) + user_defined_grad_outputs=[convert_float_to_uint16(dout)], + ) def test_check_grad_no_filter(self): dout = self.conv_output_float @@ -190,11 +201,13 @@ class TestConv2DWithGradBF16Op(TestConv2DBF16Op): dx, _ = conv_backward(dout, x, w, self.conv2d_param) self.check_grad_with_place( - core.CPUPlace(), ["Input"], + core.CPUPlace(), + ["Input"], "Output", set(['Filter']), user_defined_grads=[dx], - user_defined_grad_outputs=[convert_float_to_uint16(dout)]) + user_defined_grad_outputs=[convert_float_to_uint16(dout)], + ) def test_check_grad_no_input(self): dout = self.conv_output_float @@ -204,11 +217,13 @@ class TestConv2DWithGradBF16Op(TestConv2DBF16Op): _, dweights = conv_backward(dout, x, w, self.conv2d_param) self.check_grad_with_place( - core.CPUPlace(), ["Filter"], + core.CPUPlace(), + ["Filter"], "Output", set(['Input']), user_defined_grads=[dweights], - user_defined_grad_outputs=[convert_float_to_uint16(dout)]) + user_defined_grad_outputs=[convert_float_to_uint16(dout)], + ) def conv_backward(dout, x, w, params): @@ -224,7 +239,7 @@ def conv_backward(dout, x, w, params): H_out = int(1 + (H + 2 * padding - KH) / stride[0]) W_out = int(1 + (W + 2 * padding - KW) / stride[1]) - x_padded = np.pad(x, ((0, ), (0, ), (padding, ), (padding, )), 'constant') + x_padded = np.pad(x, ((0,), (0,), (padding,), (padding,)), 'constant') for n in range(N): for oc in range(OC): @@ -233,11 +248,17 @@ def conv_backward(dout, x, w, params): for k in range(H_out): for l in range(W_out): for ic in range(IC): - dweights[oc, ic, i, j] += x_padded[ - n, ic, i + k * stride[0], - j + l * stride[1]] * dout[n, oc, k, l] - - dx_padded = np.pad(dx, ((0, ), (0, ), (padding, ), (padding, )), 'constant') + dweights[oc, ic, i, j] += ( + x_padded[ + n, + ic, + i + k * stride[0], + j + l * stride[1], + ] + * dout[n, oc, k, l] + ) + + dx_padded = np.pad(dx, ((0,), (0,), (padding,), (padding,)), 'constant') w_ = np.zeros_like(w) for i in range(KH): @@ -251,10 +272,14 @@ def conv_backward(dout, x, w, params): for kh in range(KH): for kw in range(KW): for ic in range(IC): - dx_padded[n, ic, stride[0] * i + kh, - stride[1] * j + - kw] += dout[n, oc, i, j] * w[oc, ic, - kh, kw] + dx_padded[ + n, + ic, + stride[0] * i + kh, + stride[1] * j + kw, + ] += ( + dout[n, oc, i, j] * w[oc, ic, kh, kw] + ) if padding == 0: dx = dx_padded @@ -265,21 +290,18 @@ def conv_backward(dout, x, w, params): class TestConv2DBF16WithPadding1(TestConv2DWithGradBF16Op): - def init_test_case(self): TestConv2DWithGradBF16Op.init_test_case(self) self.pad = [1, 1] class TestConv2DBF16WithStride2(TestConv2DWithGradBF16Op): - def init_test_case(self): TestConv2DWithGradBF16Op.init_test_case(self) self.stride = [2, 3] class TestConv2D(TestConv2DBF16Op): - def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] @@ -294,7 +316,6 @@ class TestConv2D(TestConv2DBF16Op): class TestWithPad(TestConv2D): - def init_test_case(self): TestConv2D.init_test_case(self) self.pad = [1, 1] @@ -302,13 +323,11 @@ class TestWithPad(TestConv2D): class TestWithGroup(TestConv2D): - def init_group(self): self.groups = 3 class TestWithStride(TestConv2DBF16Op): - def init_test_case(self): self.pad = [1, 1] self.stride = [2, 2] @@ -323,7 +342,6 @@ class TestWithStride(TestConv2DBF16Op): class TestWithDilations(TestConv2DBF16Op): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -339,7 +357,6 @@ class TestWithDilations(TestConv2DBF16Op): class TestWith1x1ForceFP32Output(TestConv2DBF16Op): - def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] @@ -356,7 +373,6 @@ class TestWith1x1ForceFP32Output(TestConv2DBF16Op): class TestWithInput1x1Filter1x1(TestConv2DBF16Op): - def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] @@ -372,5 +388,6 @@ class TestWithInput1x1Filter1x1(TestConv2DBF16Op): if __name__ == '__main__': from paddle import enable_static + enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_int8_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_int8_mkldnn_op.py index 4ec61b0bb0b8ab061763eadfb7e1af165bb5e049..c4113c12c4cbb52db6b0a9108187199962ff6b31 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_int8_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_int8_mkldnn_op.py @@ -17,7 +17,10 @@ import numpy as np import os import paddle.fluid.core as core from paddle.fluid.tests.unittests.op_test import OpTest -from paddle.fluid.tests.unittests.test_conv2d_op import conv2d_forward_naive, TestConv2DOp +from paddle.fluid.tests.unittests.test_conv2d_op import ( + conv2d_forward_naive, + TestConv2DOp, +) def conv2d_forward_refer(input, filter, group, conv_param): @@ -25,10 +28,10 @@ def conv2d_forward_refer(input, filter, group, conv_param): return out -@unittest.skipIf(not core.supports_int8(), - "place does not support int8 computation") +@unittest.skipIf( + not core.supports_int8(), "place does not support int8 computation" +) class TestConv2DInt8Op(TestConv2DOp): - def setUp(self): self.op_type = "conv2d" self.use_cudnn = False @@ -49,85 +52,109 @@ class TestConv2DInt8Op(TestConv2DOp): conv2d_param = { 'stride': self.stride, 'pad': self.pad, - 'dilation': self.dilations + 'dilation': self.dilations, } # This implementation of convolution quantization is based on OneDNN documentation # https://oneapi-src.github.io/oneDNN/dev_guide_int8_computations.html#doxid-dev-guide-int8-computations-1dg-i8-comp-s11 - inner_scale = 1. if self.fuse_activation != "" else self.scale_out - activation_scale = self.scale_out if self.fuse_activation != "" else 1. - scale_output_shift = (inner_scale / - (self.scale_in * self.scale_weights[0])) + inner_scale = 1.0 if self.fuse_activation != "" else self.scale_out + activation_scale = self.scale_out if self.fuse_activation != "" else 1.0 + scale_output_shift = inner_scale / ( + self.scale_in * self.scale_weights[0] + ) filter = np.random.random(self.filter_size).astype(self.weighttype) # When the Intel AVX2 or Intel AVX512 Instruction Set is used # the reorder additionally scales the weights by 0.5 # to overcome the potential overflow issue. If the processor supports VNNI instructions, # modification of the weights is not necessary. - avx_scale = 0.5 if not core.supports_vnni( - ) and self.srctype == np.int8 else 1. - filter_int = np.round(filter * self.scale_weights[0] * - avx_scale).astype(np.int32) + avx_scale = ( + 0.5 if not core.supports_vnni() and self.srctype == np.int8 else 1.0 + ) + filter_int = np.round( + filter * self.scale_weights[0] * avx_scale + ).astype(np.int32) scale_output_shift = scale_output_shift / avx_scale def conv2d_forward_refer_helper(input_): - return conv2d_forward_refer(input_.astype(np.int32), filter_int, - self.groups, conv2d_param).astype( - np.float32) * scale_output_shift + return ( + conv2d_forward_refer( + input_.astype(np.int32), + filter_int, + self.groups, + conv2d_param, + ).astype(np.float32) + * scale_output_shift + ) def residual_helper(init_low, init_high, output_): input_residual_ = np.random.randint( - init_low, init_high, - self.input_residual_size).astype(self.srctype) - return (output_ + input_residual_ * - (inner_scale / self.scale_in_eltwise)), input_residual_ + init_low, init_high, self.input_residual_size + ).astype(self.srctype) + return ( + output_ + + input_residual_ * (inner_scale / self.scale_in_eltwise) + ), input_residual_ if self.srctype == np.int8: init_low, init_high = (-5, 5) - input = np.random.randint(init_low, init_high, - self.input_size).astype(self.srctype) + input = np.random.randint( + init_low, init_high, self.input_size + ).astype(self.srctype) input_shift = (np.ones(self.input_size) * 128).astype(np.uint8) output1 = conv2d_forward_refer_helper( - np.round(input + input_shift).astype(np.int32)) + np.round(input + input_shift).astype(np.int32) + ) output2 = conv2d_forward_refer_helper( - np.round(input_shift).astype(np.int32)) + np.round(input_shift).astype(np.int32) + ) output = output1 - output2 else: init_low, init_high = (0, 10) - input = np.random.randint(init_low, init_high, - self.input_size).astype(self.srctype) + input = np.random.randint( + init_low, init_high, self.input_size + ).astype(self.srctype) output = conv2d_forward_refer_helper(input) if self.fuse_residual: - output, input_residual = residual_helper(init_low, init_high, - output) + output, input_residual = residual_helper( + init_low, init_high, output + ) if self.fuse_activation == "": pass elif self.fuse_activation == "relu": output = activation_scale * np.maximum(output, 0) elif self.fuse_activation == "hard_swish": - output = activation_scale * output / 6. * np.minimum( - np.maximum(0, output + 3.), 6) + output = ( + activation_scale + * output + / 6.0 + * np.minimum(np.maximum(0, output + 3.0), 6) + ) elif self.fuse_activation == "relu6": output = activation_scale * np.maximum(0, np.minimum(6, output)) elif self.fuse_activation == "swish": - output = activation_scale * output / (1. + np.exp(-1. * output)) + output = activation_scale * output / (1.0 + np.exp(-1.0 * output)) elif self.fuse_activation == "leaky_relu": output = activation_scale * np.maximum(output, 0.02 * output) else: - raise NotImplementedError("test for " + self.fuse_activation + - " activation not implemented") + raise NotImplementedError( + "test for " + + self.fuse_activation + + " activation not implemented" + ) output = np.round(output).astype(self.dsttype) self.inputs = { 'Input': OpTest.np_dtype_to_fluid_dtype(input.astype(self.srctype)), - 'Filter': OpTest.np_dtype_to_fluid_dtype(filter) + 'Filter': OpTest.np_dtype_to_fluid_dtype(filter), } if self.fuse_residual: self.inputs['ResidualData'] = OpTest.np_dtype_to_fluid_dtype( - input_residual) + input_residual + ) self.attrs = { 'strides': self.stride, @@ -146,15 +173,15 @@ class TestConv2DInt8Op(TestConv2DOp): 'fuse_alpha': self.fuse_alpha, 'fuse_beta': self.fuse_beta, 'fuse_residual_connection': self.fuse_residual, - 'mkldnn_data_type': self.mkldnn_data_type + 'mkldnn_data_type': self.mkldnn_data_type, } self.outputs = {'Output': output} def test_check_output(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode - self.check_output_with_place(core.CPUPlace(), - atol=0, - check_dygraph=False) + self.check_output_with_place( + core.CPUPlace(), atol=0, check_dygraph=False + ) def test_check_grad(self): pass @@ -193,7 +220,6 @@ class TestConv2DInt8Op(TestConv2DOp): class TestConv2D(TestConv2DInt8Op): - def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] @@ -209,7 +235,6 @@ class TestConv2D(TestConv2DInt8Op): class TestWithHardSwish(TestConv2D): - def init_fuse_activation(self): self.fuse_activation = "hard_swish" self.fuse_alpha = 0 @@ -217,7 +242,6 @@ class TestWithHardSwish(TestConv2D): class TestWithRelu6(TestConv2D): - def init_fuse_activation(self): self.fuse_activation = "relu6" self.fuse_alpha = 6 @@ -225,7 +249,6 @@ class TestWithRelu6(TestConv2D): class TestWithSwish(TestConv2D): - def init_fuse_activation(self): self.fuse_activation = "swish" self.fuse_alpha = 1 @@ -233,7 +256,6 @@ class TestWithSwish(TestConv2D): class TestWithLeakyRelu(TestConv2D): - def init_fuse_activation(self): self.fuse_activation = "leaky_relu" self.fuse_alpha = 0.02 @@ -241,7 +263,6 @@ class TestWithLeakyRelu(TestConv2D): class TestWithPad(TestConv2D): - def init_test_case(self): TestConv2D.init_test_case(self) self.pad = [1, 1] @@ -249,13 +270,11 @@ class TestWithPad(TestConv2D): class TestWithGroup(TestConv2D): - def init_group(self): self.groups = 3 class TestWithStride(TestConv2DInt8Op): - def init_test_case(self): self.pad = [1, 1] self.stride = [2, 2] @@ -271,7 +290,6 @@ class TestWithStride(TestConv2DInt8Op): class TestWithDilations(TestConv2DInt8Op): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -288,7 +306,6 @@ class TestWithDilations(TestConv2DInt8Op): class TestWith1x1(TestConv2DInt8Op): - def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] @@ -304,7 +321,6 @@ class TestWith1x1(TestConv2DInt8Op): class TestWithInput1x1Filter1x1(TestConv2DInt8Op): - def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] @@ -335,37 +351,31 @@ def create_test_int8_class(parent): # --------------------test conv2d s8 in and u8 out-------------------- class TestS8U8Case(parent): - def init_data_type(self): init_data_type_with_fusion(self, np.int8, "relu", False) # --------------------test conv2d s8 in and s8 out-------------------- class TestS8S8Case(parent): - def init_data_type(self): init_data_type_with_fusion(self, np.int8, "", False) # --------------------test conv2d u8 in and s8 out-------------------- class TestU8S8Case(parent): - def init_data_type(self): init_data_type_with_fusion(self, np.uint8, "", False) # --------------------test conv2d u8 in and u8 out without residual fuse-------------------- class TestU8U8Case(parent): - def init_data_type(self): init_data_type_with_fusion(self, np.uint8, "relu", False) # --------------------test conv2d s8 in and s8 out with residual fuse-------------------- class TestS8S8ResCase(parent): - def init_data_type(self): init_data_type_with_fusion(self, np.int8, "", True) # --------------------test conv2d u8 in and s8 out with residual fuse-------------------- class TestU8S8ResCase(parent): - def init_data_type(self): init_data_type_with_fusion(self, np.uint8, "", True) @@ -375,9 +385,11 @@ def create_test_int8_class(parent): cls_name_u8u8 = "{0}_relu_{1}_residual_0".format(parent.__name__, "1") cls_name_s8s8_re_1 = "{0}_relu_{1}_residual_{2}".format( - parent.__name__, "0", "1") + parent.__name__, "0", "1" + ) cls_name_u8s8_re_1 = "{0}_relu_{1}_residual_{2}".format( - parent.__name__, "0", "1") + parent.__name__, "0", "1" + ) TestS8U8Case.__name__ = cls_name_s8u8 TestS8S8Case.__name__ = cls_name_s8s8 TestU8S8Case.__name__ = cls_name_u8s8 @@ -395,12 +407,12 @@ def create_test_int8_class(parent): if os.name != 'nt': # --------------------test conv2d s8 in and u8 out with residual fuse-------------------- class TestS8U8ResCase(parent): - def init_data_type(self): init_data_type_with_fusion(self, np.int8, "relu", True) cls_name_s8u8_re_1 = "{0}_relu_{1}_residual_{2}".format( - parent.__name__, "1", "1") + parent.__name__, "1", "1" + ) TestS8U8ResCase.__name__ = cls_name_s8u8_re_1 globals()[cls_name_s8u8_re_1] = TestS8U8ResCase @@ -415,7 +427,6 @@ create_test_int8_class(TestWithInput1x1Filter1x1) class TestConv2DOp_AsyPadding_INT_MKLDNN(TestConv2DInt8Op): - def init_kernel_type(self): self.use_mkldnn = True @@ -425,14 +436,12 @@ class TestConv2DOp_AsyPadding_INT_MKLDNN(TestConv2DInt8Op): class TestConv2DOp_Same_INT_MKLDNN(TestConv2DOp_AsyPadding_INT_MKLDNN): - def init_paddings(self): self.pad = [0, 0] self.padding_algorithm = "SAME" class TestConv2DOp_Valid_INT_MKLDNN(TestConv2DOp_AsyPadding_INT_MKLDNN): - def init_paddings(self): self.pad = [1, 1] self.padding_algorithm = "VALID" @@ -440,5 +449,6 @@ class TestConv2DOp_Valid_INT_MKLDNN(TestConv2DOp_AsyPadding_INT_MKLDNN): if __name__ == '__main__': from paddle import enable_static + enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_mkldnn_op.py index d3e9bdde1a6893484a2d4e2cd5d8b96e66b2b0e5..65147ea6c2bddbd680ba06777f27dc136c263c93 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_mkldnn_op.py @@ -16,7 +16,10 @@ import unittest import numpy as np from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci -from paddle.fluid.tests.unittests.test_conv2d_op import TestConv2DOp, TestConv2DOp_v2 +from paddle.fluid.tests.unittests.test_conv2d_op import ( + TestConv2DOp, + TestConv2DOp_v2, +) def conv2d_bias_naive(out, bias): @@ -34,7 +37,6 @@ def conv2d_residual_naive(out, residual): class TestConv2DMKLDNNOp(TestConv2DOp): - def init_group(self): self.groups = 1 @@ -65,7 +67,7 @@ class TestConv2DMKLDNNOp(TestConv2DOp): output = self.outputs['Output'] - #mkldnn only support either conv-sum-relu, or conv-relu. + # mkldnn only support either conv-sum-relu, or conv-relu. if self.fuse_bias and self.bias_size is not None: bias = np.random.random(self.bias_size).astype(self.dtype) output = conv2d_bias_naive(output, bias) @@ -73,22 +75,29 @@ class TestConv2DMKLDNNOp(TestConv2DOp): self.attrs['fuse_bias'] = self.fuse_bias self.inputs['Bias'] = OpTest.np_dtype_to_fluid_dtype(bias) - if self.fuse_residual_connection and self.input_residual_size is not None: + if ( + self.fuse_residual_connection + and self.input_residual_size is not None + ): input_residual = np.random.random(self.input_residual_size).astype( - self.dtype) + self.dtype + ) output = conv2d_residual_naive(output, input_residual) self.attrs[ - 'fuse_residual_connection'] = self.fuse_residual_connection + 'fuse_residual_connection' + ] = self.fuse_residual_connection self.inputs['ResidualData'] = OpTest.np_dtype_to_fluid_dtype( - input_residual) + input_residual + ) if self.fuse_activation == "relu": output = np.maximum(output, 0).astype(self.dsttype) if self.fuse_activation == "relu6": - output = np.minimum(np.maximum(output, 0), - self.fuse_alpha).astype(self.dsttype) + output = np.minimum(np.maximum(output, 0), self.fuse_alpha).astype( + self.dsttype + ) output = output.astype(self.dtype) self.attrs['fuse_bias'] = self.fuse_bias @@ -101,9 +110,9 @@ class TestConv2DMKLDNNOp(TestConv2DOp): @skip_check_grad_ci( - reason="Fusion is for inference only, check_grad is not required.") + reason="Fusion is for inference only, check_grad is not required." +) class TestWithbreluFusion(TestConv2DMKLDNNOp): - def init_test_case(self): TestConv2DMKLDNNOp.init_test_case(self) self.fuse_activation = "relu6" @@ -112,9 +121,9 @@ class TestWithbreluFusion(TestConv2DMKLDNNOp): @skip_check_grad_ci( - reason="Fusion is for inference only, check_grad is not required.") + reason="Fusion is for inference only, check_grad is not required." +) class TestWithFuse(TestConv2DMKLDNNOp): - def init_test_case(self): TestConv2DMKLDNNOp.init_test_case(self) self.pad = [1, 1] @@ -125,7 +134,6 @@ class TestWithFuse(TestConv2DMKLDNNOp): class TestWithPadWithBias(TestConv2DMKLDNNOp): - def init_test_case(self): TestConv2DMKLDNNOp.init_test_case(self) self.pad = [1, 1] @@ -133,7 +141,6 @@ class TestWithPadWithBias(TestConv2DMKLDNNOp): class TestWithStride(TestConv2DMKLDNNOp): - def init_test_case(self): TestConv2DMKLDNNOp.init_test_case(self) self.pad = [1, 1] @@ -142,7 +149,6 @@ class TestWithStride(TestConv2DMKLDNNOp): class TestWithGroup(TestConv2DMKLDNNOp): - def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] @@ -156,14 +162,12 @@ class TestWithGroup(TestConv2DMKLDNNOp): class TestWith1x1(TestConv2DMKLDNNOp): - def init_test_case(self): TestConv2DMKLDNNOp.init_test_case(self) self.filter_size = [40, 3, 1, 1] class TestWithInput1x1Filter1x1(TestConv2DMKLDNNOp): - def init_test_case(self): TestConv2DMKLDNNOp.init_test_case(self) self.input_size = [2, 60, 1, 1] # NCHW @@ -176,7 +180,6 @@ class TestWithInput1x1Filter1x1(TestConv2DMKLDNNOp): class TestConv2DOp_AsyPadding_MKLDNN(TestConv2DOp_v2): - def init_kernel_type(self): self.use_mkldnn = True self.dtype = np.float32 @@ -187,21 +190,18 @@ class TestConv2DOp_AsyPadding_MKLDNN(TestConv2DOp_v2): class TestConv2DOp_Same_MKLDNN(TestConv2DOp_AsyPadding_MKLDNN): - def init_paddings(self): self.pad = [0, 0] self.padding_algorithm = "SAME" class TestConv2DOp_Valid_MKLDNN(TestConv2DOp_AsyPadding_MKLDNN): - def init_paddings(self): self.pad = [1, 1] self.padding_algorithm = "VALID" class TestConv2DOp_Valid_NHWC_MKLDNN(TestConv2DOp_Valid_MKLDNN): - def init_data_format(self): self.data_format = "NHWC" @@ -211,21 +211,18 @@ class TestConv2DOp_Valid_NHWC_MKLDNN(TestConv2DOp_Valid_MKLDNN): class TestConv2DOp_Same_NHWC_MKLDNN(TestConv2DOp_Valid_NHWC_MKLDNN): - def init_paddings(self): self.pad = [0, 0] self.padding_algorithm = "SAME" class TestConv2DOp_AsyPadding_NHWC_MKLDNN(TestConv2DOp_Valid_NHWC_MKLDNN): - def init_paddings(self): self.pad = [0, 0, 1, 2] self.padding_algorithm = "EXPLICIT" class TestMKLDNNDilations(TestConv2DMKLDNNOp): - def init_test_case(self): TestConv2DMKLDNNOp.init_test_case(self) self.pad = [0, 0] @@ -244,5 +241,6 @@ class TestMKLDNNDilations(TestConv2DMKLDNNOp): if __name__ == '__main__': from paddle import enable_static + enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_transpose_bf16_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_transpose_bf16_mkldnn_op.py index e9f4f5a3251981e936e550dca0924e5d9c25cdf3..e605dc6bf95519f13a56b786e111a009350fe4dc 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_transpose_bf16_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_transpose_bf16_mkldnn_op.py @@ -17,7 +17,9 @@ import numpy as np import paddle.fluid.core as core from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16 -from paddle.fluid.tests.unittests.test_conv2d_transpose_op import conv2dtranspose_forward_naive +from paddle.fluid.tests.unittests.test_conv2d_transpose_op import ( + conv2dtranspose_forward_naive, +) from paddle import enable_static @@ -29,10 +31,10 @@ def conv2d_bias_naive(out, bias): return out -@unittest.skipIf(not core.supports_bfloat16(), - "place does not support BF16 evaluation") +@unittest.skipIf( + not core.supports_bfloat16(), "place does not support BF16 evaluation" +) class TestConv2DTransposeBF16MKLDNNOp(OpTest): - def test_check_output(self): self.check_output_with_place(core.CPUPlace()) @@ -95,7 +97,7 @@ class TestConv2DTransposeBF16MKLDNNOp(OpTest): 'data_format': self.data_format, 'fuse_activation': self.fuse_activation, 'fuse_alpha': self.fuse_alpha, - 'fuse_beta': self.fuse_beta + 'fuse_beta': self.fuse_beta, } if self.output_size is not None: self.attrs['output_size'] = self.output_size @@ -103,15 +105,16 @@ class TestConv2DTransposeBF16MKLDNNOp(OpTest): if len(self.output_padding) > 0: self.attrs['output_padding'] = self.output_padding - output = conv2dtranspose_forward_naive(input, filter, - self.attrs).astype(np.float32) + output = conv2dtranspose_forward_naive( + input, filter, self.attrs + ).astype(np.float32) if self.input_type is not np.float32: input = convert_float_to_uint16(input) self.inputs = { 'Input': input.view(self.input_type), - 'Filter': OpTest.np_dtype_to_fluid_dtype(filter) + 'Filter': OpTest.np_dtype_to_fluid_dtype(filter), } if self.fuse_bias and self.bias_size is not None: @@ -132,7 +135,6 @@ class TestConv2DTransposeBF16MKLDNNOp(OpTest): class TestMKLDNNFuseBias(TestConv2DTransposeBF16MKLDNNOp): - def init_test_case(self): super(TestMKLDNNFuseBias, self).init_test_case() self.pad = [1, 1] @@ -141,7 +143,6 @@ class TestMKLDNNFuseBias(TestConv2DTransposeBF16MKLDNNOp): class TestMKLDNNWithPad(TestConv2DTransposeBF16MKLDNNOp): - def init_test_case(self): super(TestMKLDNNWithPad, self).init_test_case() self.pad = [1, 1] @@ -149,7 +150,6 @@ class TestMKLDNNWithPad(TestConv2DTransposeBF16MKLDNNOp): class TestMKLDNNWithStride(TestConv2DTransposeBF16MKLDNNOp): - def init_test_case(self): super(TestMKLDNNWithStride, self).init_test_case() self.pad = [1, 1] @@ -158,7 +158,6 @@ class TestMKLDNNWithStride(TestConv2DTransposeBF16MKLDNNOp): class TestMKLDNNWithAsymPad(TestConv2DTransposeBF16MKLDNNOp): - def init_test_case(self): super(TestMKLDNNWithAsymPad, self).init_test_case() self.pad = [0, 0, 1, 2] @@ -166,7 +165,6 @@ class TestMKLDNNWithAsymPad(TestConv2DTransposeBF16MKLDNNOp): class TestMKLDNNWithSamePad(TestConv2DTransposeBF16MKLDNNOp): - def init_test_case(self): super(TestMKLDNNWithSamePad, self).init_test_case() self.pad = [0, 0] @@ -174,7 +172,6 @@ class TestMKLDNNWithSamePad(TestConv2DTransposeBF16MKLDNNOp): class TestMKLDNNWithValidPad(TestConv2DTransposeBF16MKLDNNOp): - def init_test_case(self): super(TestMKLDNNWithValidPad, self).init_test_case() self.pad = [1, 1] @@ -182,7 +179,6 @@ class TestMKLDNNWithValidPad(TestConv2DTransposeBF16MKLDNNOp): class TestMKLDNNWithValidPad_NHWC(TestMKLDNNWithValidPad): - def init_test_case(self): super(TestMKLDNNWithValidPad_NHWC, self).init_test_case() self.data_format = 'NHWC' @@ -191,11 +187,12 @@ class TestMKLDNNWithValidPad_NHWC(TestMKLDNNWithValidPad): class TestConv2DTransposeMKLDNNWithDilationsExplicitPad( - TestConv2DTransposeBF16MKLDNNOp): - + TestConv2DTransposeBF16MKLDNNOp +): def init_test_case(self): - super(TestConv2DTransposeMKLDNNWithDilationsExplicitPad, - self).init_test_case() + super( + TestConv2DTransposeMKLDNNWithDilationsExplicitPad, self + ).init_test_case() self.stride = [2, 1] self.dilations = [1, 2] self.groups = 1 diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_transpose_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_transpose_mkldnn_op.py index 081f70a02e3353fc712db5f38fb2f6960584a1c5..14e42fc50ce7827ede9b5b7022ca7a1d8f76cd83 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_transpose_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_transpose_mkldnn_op.py @@ -16,7 +16,9 @@ import unittest import numpy as np from paddle.fluid.tests.unittests.op_test import OpTest from paddle import enable_static -from paddle.fluid.tests.unittests.test_conv2d_transpose_op import TestConv2DTransposeOp +from paddle.fluid.tests.unittests.test_conv2d_transpose_op import ( + TestConv2DTransposeOp, +) def conv2d_bias_naive(out, bias): @@ -28,7 +30,6 @@ def conv2d_bias_naive(out, bias): class TestConv2DTransposeMKLDNNOp(TestConv2DTransposeOp): - def test_check_grad(self): return @@ -87,7 +88,6 @@ class TestConv2DTransposeMKLDNNOp(TestConv2DTransposeOp): class TestMKLDNNFuseBias(TestConv2DTransposeMKLDNNOp): - def init_test_case(self): TestConv2DTransposeMKLDNNOp.init_test_case(self) self.pad = [1, 1] @@ -96,7 +96,6 @@ class TestMKLDNNFuseBias(TestConv2DTransposeMKLDNNOp): class TestMKLDNNWithPad(TestConv2DTransposeMKLDNNOp): - def init_test_case(self): TestConv2DTransposeMKLDNNOp.init_test_case(self) self.pad = [1, 1] @@ -104,7 +103,6 @@ class TestMKLDNNWithPad(TestConv2DTransposeMKLDNNOp): class TestMKLDNNWithStride(TestConv2DTransposeMKLDNNOp): - def init_test_case(self): TestConv2DTransposeMKLDNNOp.init_test_case(self) self.pad = [1, 1] @@ -113,7 +111,6 @@ class TestMKLDNNWithStride(TestConv2DTransposeMKLDNNOp): class TestMKLDNNWithAsymPad(TestConv2DTransposeMKLDNNOp): - def init_test_case(self): TestConv2DTransposeMKLDNNOp.init_test_case(self) self.pad = [0, 0, 1, 2] @@ -121,7 +118,6 @@ class TestMKLDNNWithAsymPad(TestConv2DTransposeMKLDNNOp): class TestMKLDNNWithSamePad(TestConv2DTransposeMKLDNNOp): - def init_test_case(self): TestConv2DTransposeMKLDNNOp.init_test_case(self) self.pad = [0, 0] @@ -129,7 +125,6 @@ class TestMKLDNNWithSamePad(TestConv2DTransposeMKLDNNOp): class TestMKLDNNWithValidPad(TestConv2DTransposeMKLDNNOp): - def init_test_case(self): TestConv2DTransposeMKLDNNOp.init_test_case(self) self.pad = [1, 1] @@ -137,7 +132,6 @@ class TestMKLDNNWithValidPad(TestConv2DTransposeMKLDNNOp): class TestMKLDNNWithValidPad_NHWC(TestMKLDNNWithValidPad): - def init_test_case(self): super(TestMKLDNNWithValidPad_NHWC, self).init_test_case() self.data_format = "NHWC" @@ -146,8 +140,8 @@ class TestMKLDNNWithValidPad_NHWC(TestMKLDNNWithValidPad): class TestConv2DTransposeMKLDNNWithDilationsExplicitPad( - TestConv2DTransposeMKLDNNOp): - + TestConv2DTransposeMKLDNNOp +): def init_test_case(self): TestConv2DTransposeMKLDNNOp.init_test_case(self) self.stride = [2, 1] @@ -161,7 +155,6 @@ class TestConv2DTransposeMKLDNNWithDilationsExplicitPad( class TestMKLDNNWithGroups(TestConv2DTransposeMKLDNNOp): - def init_test_case(self): TestConv2DTransposeMKLDNNOp.init_test_case(self) self.pad = [1, 1] @@ -172,7 +165,6 @@ class TestMKLDNNWithGroups(TestConv2DTransposeMKLDNNOp): class TestMKLDNNWithGroups_NHWC(TestConv2DTransposeMKLDNNOp): - def init_test_case(self): TestConv2DTransposeMKLDNNOp.init_test_case(self) self.pad = [1, 1] diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_conv3d_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_conv3d_mkldnn_op.py index 6e31912a909b7c5a36c284fd326a7bebdcdc35c9..30561ec8f0f818bf04489decb40e71f8db00b131 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_conv3d_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_conv3d_mkldnn_op.py @@ -14,11 +14,17 @@ import unittest import numpy as np -from paddle.fluid.tests.unittests.test_conv3d_op import TestCase1, TestConv3DOp, TestWith1x1, TestWithGroup1, TestWithGroup2, TestWithInput1x1Filter1x1 +from paddle.fluid.tests.unittests.test_conv3d_op import ( + TestCase1, + TestConv3DOp, + TestWith1x1, + TestWithGroup1, + TestWithGroup2, + TestWithInput1x1Filter1x1, +) class TestMKLDNN(TestConv3DOp): - def init_kernel_type(self): self.use_mkldnn = True self.data_format = "NCHW" @@ -26,7 +32,6 @@ class TestMKLDNN(TestConv3DOp): class TestMKLDNNCase1(TestCase1): - def init_kernel_type(self): self.use_mkldnn = True self.data_format = "NCHW" @@ -34,7 +39,6 @@ class TestMKLDNNCase1(TestCase1): class TestMKLDNNGroup1(TestWithGroup1): - def init_kernel_type(self): self.use_mkldnn = True self.data_format = "NCHW" @@ -42,7 +46,6 @@ class TestMKLDNNGroup1(TestWithGroup1): class TestMKLDNNGroup2(TestWithGroup2): - def init_kernel_type(self): self.use_mkldnn = True self.data_format = "NCHW" @@ -50,7 +53,6 @@ class TestMKLDNNGroup2(TestWithGroup2): class TestMKLDNNWith1x1(TestWith1x1): - def init_kernel_type(self): self.use_mkldnn = True self.data_format = "NCHW" @@ -58,7 +60,6 @@ class TestMKLDNNWith1x1(TestWith1x1): class TestMKLDNNWithInput1x1Filter1x1(TestWithInput1x1Filter1x1): - def init_kernel_type(self): self.use_mkldnn = True self.data_format = "NCHW" @@ -66,7 +67,6 @@ class TestMKLDNNWithInput1x1Filter1x1(TestWithInput1x1Filter1x1): class TestConv3DOp_AsyPadding_MKLDNN(TestConv3DOp): - def init_kernel_type(self): self.use_mkldnn = True self.data_format = "NCHW" @@ -78,7 +78,6 @@ class TestConv3DOp_AsyPadding_MKLDNN(TestConv3DOp): class TestConv3DOp_Same_MKLDNN(TestConv3DOp_AsyPadding_MKLDNN): - def init_paddings(self): self.pad = [0, 0, 0] self.padding_algorithm = "SAME" @@ -90,7 +89,6 @@ class TestConv3DOp_Same_MKLDNN(TestConv3DOp_AsyPadding_MKLDNN): class TestConv3DOp_Valid_MKLDNN(TestConv3DOp_AsyPadding_MKLDNN): - def init_paddings(self): self.pad = [1, 1, 1] self.padding_algorithm = "VALID" @@ -103,5 +101,6 @@ class TestConv3DOp_Valid_MKLDNN(TestConv3DOp_AsyPadding_MKLDNN): if __name__ == '__main__': from paddle import enable_static + enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_dequantize_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_dequantize_mkldnn_op.py index 70a1fd823941b7b8906c87d1b1ae93be3ec1914e..2e3a46095996720f17cf57a1413998ee1859aea1 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_dequantize_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_dequantize_mkldnn_op.py @@ -19,7 +19,6 @@ import paddle class TestDeQuantizeOp(OpTest): - def setUp(self): self.op_type = 'dequantize' self.scale = 127.0 @@ -45,19 +44,22 @@ class TestDeQuantizeOp(OpTest): def prepare_input_int8(self): if self.data_type == 'int8': # input data values are integers from interval [-128, 128) - self.input = (np.random.randint(0, 256, self.input_size) - - 128).astype(self.data_type) + self.input = ( + np.random.randint(0, 256, self.input_size) - 128 + ).astype(self.data_type) else: # input data values are integers from interval [0, 256) self.input = (np.random.randint(0, 256, self.input_size)).astype( - self.data_type) + self.data_type + ) self.inputs = {'Input': OpTest.np_dtype_to_fluid_dtype(self.input)} self.attrs = {'Scale': self.scale, 'Shift': self.shift} def prepare_output_int8(self): - output = (self.input / self.scale - - (self.shift / self.scale)).astype('float') + output = (self.input / self.scale - (self.shift / self.scale)).astype( + 'float' + ) self.outputs = {'Output': output} def test_check_output(self): @@ -87,7 +89,6 @@ class TestDeQuantizeOp(OpTest): class TestDeQuantizeOp1(TestDeQuantizeOp): - def set_scale(self): self.scale = 1.5 @@ -96,7 +97,6 @@ class TestDeQuantizeOp1(TestDeQuantizeOp): class TestDeQuantizeOp2(TestDeQuantizeOp): - def set_scale(self): self.scale = 0.8 @@ -105,7 +105,6 @@ class TestDeQuantizeOp2(TestDeQuantizeOp): class TestDeQuantizeOpBf16(TestDeQuantizeOp): - def set_scale(self): self.scale = 1.0 @@ -116,7 +115,6 @@ class TestDeQuantizeOpBf16(TestDeQuantizeOp): # 2-dim input # P - positive input, with shift class TestDeQuantizeOpShift_2_P(TestDeQuantizeOp): - def set_data_type(self): self.data_type = 'uint8' @@ -133,7 +131,6 @@ class TestDeQuantizeOpShift_2_P(TestDeQuantizeOp): # 2-dim input # N - negative input, with shift class TestDeQuantizeOpShift_2_N(TestDeQuantizeOpShift_2_P): - def set_data_type(self): self.data_type = 'int8' @@ -149,26 +146,22 @@ class TestDeQuantizeOpShift_2_N(TestDeQuantizeOpShift_2_P): # 3-dim input class TestDeQuantizeOpShift_3_P(TestDeQuantizeOpShift_2_P): - def set_input_size(self): self.input_size = [2, 3, 4] class TestDeQuantizeOpShift_3_N(TestDeQuantizeOpShift_2_N): - def set_input_size(self): self.input_size = [2, 3, 4] # 4-dim input class TestDeQuantizeOpShift_4_P(TestDeQuantizeOpShift_2_P): - def set_input_size(self): self.input_size = [2, 3, 4, 5] class TestDeQuantizeOpShift_4_N(TestDeQuantizeOpShift_2_N): - def set_input_size(self): self.input_size = [2, 3, 4, 5] diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_add_bf16_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_add_bf16_mkldnn_op.py index 6511b1a1538866a8a15057d7a944a56985d415ea..8e2ae04bb7c8f31151e7c3509547db75cccbc032 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_add_bf16_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_add_bf16_mkldnn_op.py @@ -19,10 +19,10 @@ from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16 from paddle import enable_static -@unittest.skipIf(not core.supports_bfloat16(), - "place does not support BF16 evaluation") +@unittest.skipIf( + not core.supports_bfloat16(), "place does not support BF16 evaluation" +) class TestElementwiseAddBf16MklDNNOp(OpTest): - def setUp(self): self.op_type = "elementwise_add" self.use_mkldnn = True @@ -38,8 +38,12 @@ class TestElementwiseAddBf16MklDNNOp(OpTest): self.outputs = {'Out': convert_float_to_uint16(self.out)} def generate_data(self): - self.x = np.random.random(100, ).astype(np.float32) - self.y = np.random.random(100, ).astype(np.float32) + self.x = np.random.random( + 100, + ).astype(np.float32) + self.y = np.random.random( + 100, + ).astype(np.float32) self.out = np.add(self.x, self.y) def test_check_output(self): @@ -47,30 +51,39 @@ class TestElementwiseAddBf16MklDNNOp(OpTest): # elementwise_add grad (no braodcasting) is just passing upper gradients to either X or Y or both def test_check_grad_normal(self): - self.check_grad_with_place(core.CPUPlace(), ["X", "Y"], - "Out", - check_dygraph=False, - user_defined_grads=[self.x, self.x], - user_defined_grad_outputs=[self.x_bf16]) + self.check_grad_with_place( + core.CPUPlace(), + ["X", "Y"], + "Out", + check_dygraph=False, + user_defined_grads=[self.x, self.x], + user_defined_grad_outputs=[self.x_bf16], + ) def test_check_grad_ingore_x(self): - self.check_grad_with_place(core.CPUPlace(), ["Y"], - "Out", - check_dygraph=False, - user_defined_grads=[self.y], - user_defined_grad_outputs=[self.y_bf16]) + self.check_grad_with_place( + core.CPUPlace(), + ["Y"], + "Out", + check_dygraph=False, + user_defined_grads=[self.y], + user_defined_grad_outputs=[self.y_bf16], + ) def test_check_grad_ingore_y(self): - self.check_grad_with_place(core.CPUPlace(), ["X"], - "Out", - check_dygraph=False, - user_defined_grads=[self.x], - user_defined_grad_outputs=[self.x_bf16]) - + self.check_grad_with_place( + core.CPUPlace(), + ["X"], + "Out", + check_dygraph=False, + user_defined_grads=[self.x], + user_defined_grad_outputs=[self.x_bf16], + ) -class TestElementwiseAddBroadCastingBf16MklDNNOp(TestElementwiseAddBf16MklDNNOp - ): +class TestElementwiseAddBroadCastingBf16MklDNNOp( + TestElementwiseAddBf16MklDNNOp +): def generate_data(self): self.x = np.random.uniform(1, 2, [2, 3, 4, 100]).astype(np.float32) self.y = np.random.uniform(1, 2, [100]).astype(np.float32) @@ -85,20 +98,23 @@ class TestElementwiseAddBroadCastingBf16MklDNNOp(TestElementwiseAddBf16MklDNNOp def test_check_grad_normal(self): self.check_grad_with_place( - core.CPUPlace(), ["X", "Y"], + core.CPUPlace(), + ["X", "Y"], "Out", check_dygraph=False, - user_defined_grads=[self.x, - self.compute_reduced_gradients(self.x)], - user_defined_grad_outputs=[self.x_bf16]) + user_defined_grads=[self.x, self.compute_reduced_gradients(self.x)], + user_defined_grad_outputs=[self.x_bf16], + ) def test_check_grad_ingore_x(self): self.check_grad_with_place( - core.CPUPlace(), ["Y"], + core.CPUPlace(), + ["Y"], "Out", check_dygraph=False, user_defined_grads=[self.compute_reduced_gradients(self.x)], - user_defined_grad_outputs=[self.x_bf16]) + user_defined_grad_outputs=[self.x_bf16], + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_add_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_add_mkldnn_op.py index 2b89d3c4b10da6e56e69f82f47e7db50d3191f82..4001e2ba76ba7ec6fd071848de97f61f2022acfc 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_add_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_add_mkldnn_op.py @@ -15,12 +15,13 @@ import unittest import numpy as np from paddle.fluid.tests.unittests.op_test import skip_check_grad_ci -from paddle.fluid.tests.unittests.test_elementwise_add_op import TestElementwiseAddOp +from paddle.fluid.tests.unittests.test_elementwise_add_op import ( + TestElementwiseAddOp, +) from paddle import enable_static class TestMKLDNNElementwiseAddOp(TestElementwiseAddOp): - def init_kernel_type(self): self.use_mkldnn = True @@ -29,15 +30,13 @@ class TestMKLDNNElementwiseAddOp(TestElementwiseAddOp): class TestMKLDNNElementwiseAddOp2(TestMKLDNNElementwiseAddOp): - def init_input_output(self): - self.x = np.random.random((100, )).astype(self.dtype) - self.y = np.random.random((100, )).astype(self.dtype) + self.x = np.random.random((100,)).astype(self.dtype) + self.y = np.random.random((100,)).astype(self.dtype) self.out = np.add(self.x, self.y) class TestMKLDNNElementwiseAddOp3(TestMKLDNNElementwiseAddOp): - def init_input_output(self): self.x = np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype(self.dtype) self.y = np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype(self.dtype) @@ -45,7 +44,6 @@ class TestMKLDNNElementwiseAddOp3(TestMKLDNNElementwiseAddOp): class TestMKLDNNElementwiseAddOp4(TestMKLDNNElementwiseAddOp): - def init_input_output(self): self.x = np.random.uniform(1, 2, [2, 3, 4, 32]).astype(self.dtype) self.y = np.random.uniform(1, 2, [4, 32]).astype(self.dtype) @@ -60,7 +58,6 @@ class TestMKLDNNElementwiseAddOp4(TestMKLDNNElementwiseAddOp): class TestMKLDNNElementwiseAddOp5(TestMKLDNNElementwiseAddOp): - def init_input_output(self): self.x = np.random.uniform(1, 2, [2, 3, 4, 100]).astype(self.dtype) self.y = np.random.uniform(1, 2, [100]).astype(self.dtype) @@ -68,7 +65,6 @@ class TestMKLDNNElementwiseAddOp5(TestMKLDNNElementwiseAddOp): class TestMKLDNNElementwiseAddOpBroadcastXintoY(TestMKLDNNElementwiseAddOp): - def init_input_output(self): self.x = np.random.uniform(1, 2, [2, 50, 1]).astype(self.dtype) self.y = np.random.uniform(1, 2, [2, 50, 160]).astype(self.dtype) @@ -76,7 +72,6 @@ class TestMKLDNNElementwiseAddOpBroadcastXintoY(TestMKLDNNElementwiseAddOp): class TestMKLDNNElementwiseAddOp_broadcast_3(TestMKLDNNElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype) self.y = np.random.rand(10, 12).astype(self.dtype) @@ -87,7 +82,6 @@ class TestMKLDNNElementwiseAddOp_broadcast_3(TestMKLDNNElementwiseAddOp): class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestMKLDNNElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(10, 12).astype(self.dtype) self.y = np.random.rand(2, 2, 10, 12).astype(self.dtype) @@ -111,9 +105,9 @@ class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestMKLDNNElementwiseAddOp): @skip_check_grad_ci( - reason="oneDNN's int8 elementwise_ops don't implemend grad kernel.") + reason="oneDNN's int8 elementwise_ops don't implemend grad kernel." +) class TestInt8(TestElementwiseAddOp): - def init_kernel_type(self): self.use_mkldnn = True self._cpu_only = True @@ -147,7 +141,6 @@ class TestInt8(TestElementwiseAddOp): class TestInt8Scales(TestInt8): - def quantize(self, tensor, dt="int8"): max_int = 127.0 if dt == "int8" else 255.0 scale = max_int / np.abs(np.amax(tensor)) @@ -155,8 +148,8 @@ class TestInt8Scales(TestInt8): return scale, quantized def init_input_output(self): - self.x_f = np.random.random((100, )).astype("float") - self.y_f = np.random.random((100, )).astype("float") + self.x_f = np.random.random((100,)).astype("float") + self.y_f = np.random.random((100,)).astype("float") self.out_f = np.add(self.x_f, self.y_f) self.scale_x, self.x = self.quantize(self.x_f) @@ -172,15 +165,15 @@ class TestInt8Scales(TestInt8): # TODO(wangzhongpu): support mkldnn op in dygraph mode self.init_scales() int_atol = 1 # different quantization techniques - self.check_output(check_dygraph=(self.use_mkldnn == False), - atol=int_atol) + self.check_output( + check_dygraph=(self.use_mkldnn == False), atol=int_atol + ) class TestUint8Scales(TestInt8Scales): - def init_input_output(self): - self.x_f = np.random.random((100, )).astype("float") - self.y_f = np.random.random((100, )).astype("float") + self.x_f = np.random.random((100,)).astype("float") + self.y_f = np.random.random((100,)).astype("float") self.out_f = np.add(self.x_f, self.y_f) self.scale_x, self.x = self.quantize(self.x_f, "uint8") diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_div_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_div_mkldnn_op.py index 9312e9459037ac412448b13018ee7371cca62b73..8adeb32dc9f6c4b87b221ac5e0cdf0c8362f4d34 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_div_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_div_mkldnn_op.py @@ -15,15 +15,20 @@ import unittest import numpy as np from paddle import enable_static -from paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool, convert_float_to_uint16 +from paddle.fluid.tests.unittests.op_test import ( + OpTest, + OpTestTool, + convert_float_to_uint16, +) from paddle.fluid.framework import _current_expected_place import paddle.fluid.core as core -@OpTestTool.skip_if(not (isinstance(_current_expected_place(), core.CPUPlace)), - "GPU is not supported") +@OpTestTool.skip_if( + not (isinstance(_current_expected_place(), core.CPUPlace)), + "GPU is not supported", +) class TestMKLDNNElementwiseDivOp(OpTest): - def setUp(self): self.op_type = "elementwise_div" self.init_dtype() @@ -32,7 +37,7 @@ class TestMKLDNNElementwiseDivOp(OpTest): self.init_axis() self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} self.outputs = {'Out': self.out} @@ -65,7 +70,6 @@ class TestMKLDNNElementwiseDivOp(OpTest): class TestMKLDNNElementwiseDivOp2(TestMKLDNNElementwiseDivOp): - def init_input_output(self): self.x = np.random.uniform(0.1, 1, [100]).astype(self.dtype) self.y = np.random.uniform(0.1, 1, [100]).astype(self.dtype) @@ -73,7 +77,6 @@ class TestMKLDNNElementwiseDivOp2(TestMKLDNNElementwiseDivOp): class TestMKLDNNElementwiseDivOp3(TestMKLDNNElementwiseDivOp): - def init_input_output(self): self.x = np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype(self.dtype) self.y = np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype(self.dtype) @@ -81,7 +84,6 @@ class TestMKLDNNElementwiseDivOp3(TestMKLDNNElementwiseDivOp): class TestMKLDNNElementwiseDivOp4(TestMKLDNNElementwiseDivOp): - def init_input_output(self): self.x = np.random.uniform(1, 2, [2, 3, 4, 32]).astype(self.dtype) self.y = np.random.uniform(1, 2, [4, 32]).astype(self.dtype) @@ -96,7 +98,6 @@ class TestMKLDNNElementwiseDivOp4(TestMKLDNNElementwiseDivOp): class TestMKLDNNElementwiseDivOp5(TestMKLDNNElementwiseDivOp): - def init_input_output(self): self.x = np.random.uniform(1, 2, [2, 3, 4, 100]).astype(self.dtype) self.y = np.random.uniform(1, 2, [100]).astype(self.dtype) @@ -112,7 +113,6 @@ class TestMKLDNNElementwiseDivOp5(TestMKLDNNElementwiseDivOp): @OpTestTool.skip_if_not_cpu_bf16() class TestBf16(TestMKLDNNElementwiseDivOp): - def setUp(self): self.op_type = "elementwise_div" self.init_dtype() @@ -139,34 +139,43 @@ class TestBf16(TestMKLDNNElementwiseDivOp): self.check_output_with_place(core.CPUPlace()) def test_check_grad_normal(self): - self.check_grad_with_place(core.CPUPlace(), ["X", "Y"], - "Out", - user_defined_grads=[ - np.divide(self.x, self.y), - np.divide((np.multiply(-self.x, self.x)), - np.multiply(self.y, self.y)) - ], - user_defined_grad_outputs=[self.x_bf16]) + self.check_grad_with_place( + core.CPUPlace(), + ["X", "Y"], + "Out", + user_defined_grads=[ + np.divide(self.x, self.y), + np.divide( + (np.multiply(-self.x, self.x)), np.multiply(self.y, self.y) + ), + ], + user_defined_grad_outputs=[self.x_bf16], + ) def test_check_grad_ignore_x(self): - self.check_grad_with_place(core.CPUPlace(), ["Y"], - "Out", - user_defined_grads=[ - np.divide((np.multiply(-self.x, self.y)), - np.multiply(self.y, self.y)) - ], - user_defined_grad_outputs=[self.y_bf16]) + self.check_grad_with_place( + core.CPUPlace(), + ["Y"], + "Out", + user_defined_grads=[ + np.divide( + (np.multiply(-self.x, self.y)), np.multiply(self.y, self.y) + ) + ], + user_defined_grad_outputs=[self.y_bf16], + ) def test_check_grad_ignore_y(self): self.check_grad_with_place( - core.CPUPlace(), ["X"], + core.CPUPlace(), + ["X"], "Out", user_defined_grads=[np.divide(self.x, self.y)], - user_defined_grad_outputs=[self.x_bf16]) + user_defined_grad_outputs=[self.x_bf16], + ) class TestBf16Broadcasting(TestBf16): - def init_input_output(self): self.x = np.random.uniform(1, 2, [2, 3, 4, 100]).astype(self.dtype) self.y = np.random.uniform(1, 2, [100]).astype(self.dtype) diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_mul_bf16_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_mul_bf16_mkldnn_op.py index 26531d826b5be77d130fd1900b307d66e68a9ffd..d7e31b061552e34ae3d67e225d90f78576f33179 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_mul_bf16_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_mul_bf16_mkldnn_op.py @@ -19,10 +19,10 @@ from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16 from paddle import enable_static -@unittest.skipIf(not core.supports_bfloat16(), - "place does not support BF16 evaluation") +@unittest.skipIf( + not core.supports_bfloat16(), "place does not support BF16 evaluation" +) class TestElementwiseMulBf16MklDNNOp(OpTest): - def setUp(self): self.op_type = "elementwise_mul" self.use_mkldnn = True @@ -37,43 +37,54 @@ class TestElementwiseMulBf16MklDNNOp(OpTest): self.outputs = {'Out': convert_float_to_uint16(self.out)} def generate_data(self): - self.x = np.random.random(100, ).astype(np.float32) - self.y = np.random.random(100, ).astype(np.float32) + self.x = np.random.random( + 100, + ).astype(np.float32) + self.y = np.random.random( + 100, + ).astype(np.float32) self.out = np.multiply(self.x, self.y) def test_check_output(self): self.check_output_with_place(core.CPUPlace()) def test_check_grad_normal(self): - self.check_grad_with_place(core.CPUPlace(), ["X", "Y"], - "Out", - check_dygraph=False, - user_defined_grads=[ - np.multiply(self.x, self.y), - np.multiply(self.x, self.x) - ], - user_defined_grad_outputs=[self.x_bf16]) + self.check_grad_with_place( + core.CPUPlace(), + ["X", "Y"], + "Out", + check_dygraph=False, + user_defined_grads=[ + np.multiply(self.x, self.y), + np.multiply(self.x, self.x), + ], + user_defined_grad_outputs=[self.x_bf16], + ) def test_check_grad_ingore_x(self): self.check_grad_with_place( - core.CPUPlace(), ["Y"], + core.CPUPlace(), + ["Y"], "Out", check_dygraph=False, user_defined_grads=[np.multiply(self.y, self.x)], - user_defined_grad_outputs=[self.y_bf16]) + user_defined_grad_outputs=[self.y_bf16], + ) def test_check_grad_ingore_y(self): self.check_grad_with_place( - core.CPUPlace(), ["X"], + core.CPUPlace(), + ["X"], "Out", check_dygraph=False, user_defined_grads=[np.multiply(self.x, self.y)], - user_defined_grad_outputs=[self.x_bf16]) - + user_defined_grad_outputs=[self.x_bf16], + ) -class TestElementwiseMulBroadcastingBf16MklDNNOp(TestElementwiseMulBf16MklDNNOp - ): +class TestElementwiseMulBroadcastingBf16MklDNNOp( + TestElementwiseMulBf16MklDNNOp +): def generate_data(self): self.x = np.random.uniform(1, 2, [1, 2, 3, 100]).astype(np.float32) self.y = np.random.uniform(1, 2, [100]).astype(np.float32) @@ -90,7 +101,7 @@ class TestElementwiseMulBroadcastingBf16MklDNNOp(TestElementwiseMulBf16MklDNNOp # accuracy problems that need to be explained def test_check_grad_normal(self): pass - #self.check_grad_with_place( + # self.check_grad_with_place( # core.CPUPlace(), ["X", "Y"], # "Out", # check_dy_graph=False, @@ -102,7 +113,7 @@ class TestElementwiseMulBroadcastingBf16MklDNNOp(TestElementwiseMulBf16MklDNNOp def test_check_grad_ingore_x(self): pass - #self.check_grad_with_place( + # self.check_grad_with_place( # core.CPUPlace(), ["Y"], # "Out", # check_dy_graph=False, diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_mul_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_mul_mkldnn_op.py index 7f3bd429eb3888507c4c53688fb3482ce329bf5a..3ca09093b814d76fb4dcc49faa5a43dc1da8e3e3 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_mul_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_mul_mkldnn_op.py @@ -15,12 +15,13 @@ import unittest import numpy as np from paddle.fluid.tests.unittests.op_test import skip_check_grad_ci -from paddle.fluid.tests.unittests.test_elementwise_mul_op import ElementwiseMulOp +from paddle.fluid.tests.unittests.test_elementwise_mul_op import ( + ElementwiseMulOp, +) from paddle import enable_static class TestMKLDNNElementwiseMulOp(ElementwiseMulOp): - def init_kernel_type(self): self.use_mkldnn = True @@ -29,15 +30,13 @@ class TestMKLDNNElementwiseMulOp(ElementwiseMulOp): class TestMKLDNNElementwiseMulOp2(TestMKLDNNElementwiseMulOp): - def init_input_output(self): - self.x = np.random.random((100, )).astype(self.dtype) - self.y = np.random.random((100, )).astype(self.dtype) + self.x = np.random.random((100,)).astype(self.dtype) + self.y = np.random.random((100,)).astype(self.dtype) self.out = np.multiply(self.x, self.y) class TestMKLDNNElementwiseMulOp3(TestMKLDNNElementwiseMulOp): - def init_input_output(self): self.x = np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype(self.dtype) self.y = np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype(self.dtype) @@ -45,7 +44,6 @@ class TestMKLDNNElementwiseMulOp3(TestMKLDNNElementwiseMulOp): class TestMKLDNNElementwiseMulOp4(TestMKLDNNElementwiseMulOp): - def init_input_output(self): self.x = np.random.uniform(1, 2, [2, 3, 4, 32]).astype(self.dtype) self.y = np.random.uniform(1, 2, [4, 32]).astype(self.dtype) @@ -60,7 +58,6 @@ class TestMKLDNNElementwiseMulOp4(TestMKLDNNElementwiseMulOp): class TestMKLDNNElementwiseMulOp5(TestMKLDNNElementwiseMulOp): - def init_input_output(self): self.x = np.random.uniform(1, 2, [2, 3, 4, 100]).astype(self.dtype) self.y = np.random.uniform(1, 2, [100]).astype(self.dtype) @@ -81,9 +78,9 @@ class TestMKLDNNElementwiseMulOp5(TestMKLDNNElementwiseMulOp): @skip_check_grad_ci( - reason="oneDNN's int8 elementwise_ops don't implemend grad kernel.") + reason="oneDNN's int8 elementwise_ops don't implemend grad kernel." +) class TestInt8(ElementwiseMulOp): - def init_kernel_type(self): self.use_mkldnn = True self._cpu_only = True @@ -117,7 +114,6 @@ class TestInt8(ElementwiseMulOp): class TestInt8Scales(TestInt8): - def quantize(self, tensor, dt="int8"): max_int = 127.0 if dt == "int8" else 255.0 scale = max_int / np.abs(np.amax(tensor)) @@ -125,8 +121,8 @@ class TestInt8Scales(TestInt8): return scale, quantized def init_input_output(self): - self.x_f = np.random.random((100, )).astype("float") - self.y_f = np.random.random((100, )).astype("float") + self.x_f = np.random.random((100,)).astype("float") + self.y_f = np.random.random((100,)).astype("float") self.out_f = np.multiply(self.x_f, self.y_f) self.scale_x, self.x = self.quantize(self.x_f) @@ -142,15 +138,15 @@ class TestInt8Scales(TestInt8): # TODO(wangzhongpu): support mkldnn op in dygraph mode self.init_scales() int_atol = 1 # different quantization techniques - self.check_output(check_dygraph=(self.use_mkldnn == False), - atol=int_atol) + self.check_output( + check_dygraph=(self.use_mkldnn == False), atol=int_atol + ) class TestUint8Scales(TestInt8Scales): - def init_input_output(self): - self.x_f = np.random.random((100, )).astype("float") - self.y_f = np.random.random((100, )).astype("float") + self.x_f = np.random.random((100,)).astype("float") + self.y_f = np.random.random((100,)).astype("float") self.out_f = np.multiply(self.x_f, self.y_f) self.scale_x, self.x = self.quantize(self.x_f, "uint8") diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_sub_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_sub_mkldnn_op.py index b91e81550cdebee4df4d2653aa8045756a09597d..6d1f12d1414cb79f2cffb250a2513bb87ad90539 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_sub_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_sub_mkldnn_op.py @@ -15,15 +15,20 @@ import unittest import numpy as np from paddle import enable_static -from paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool, convert_float_to_uint16 +from paddle.fluid.tests.unittests.op_test import ( + OpTest, + OpTestTool, + convert_float_to_uint16, +) from paddle.fluid.framework import _current_expected_place import paddle.fluid.core as core -@OpTestTool.skip_if(not (isinstance(_current_expected_place(), core.CPUPlace)), - "GPU is not supported") +@OpTestTool.skip_if( + not (isinstance(_current_expected_place(), core.CPUPlace)), + "GPU is not supported", +) class TestMKLDNNElementwiseSubOp(OpTest): - def setUp(self): self.op_type = "elementwise_sub" self.init_dtype() @@ -32,7 +37,7 @@ class TestMKLDNNElementwiseSubOp(OpTest): self.init_axis() self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} self.outputs = {'Out': self.out} @@ -65,15 +70,13 @@ class TestMKLDNNElementwiseSubOp(OpTest): class TestMKLDNNElementwiseSubOp2(TestMKLDNNElementwiseSubOp): - def init_input_output(self): - self.x = np.random.random((100, )).astype(self.dtype) - self.y = np.random.random((100, )).astype(self.dtype) + self.x = np.random.random((100,)).astype(self.dtype) + self.y = np.random.random((100,)).astype(self.dtype) self.out = np.subtract(self.x, self.y) class TestMKLDNNElementwiseSubOp3(TestMKLDNNElementwiseSubOp): - def init_input_output(self): self.x = np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype(self.dtype) self.y = np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype(self.dtype) @@ -81,7 +84,6 @@ class TestMKLDNNElementwiseSubOp3(TestMKLDNNElementwiseSubOp): class TestMKLDNNElementwiseSubOp4(TestMKLDNNElementwiseSubOp): - def init_input_output(self): self.x = np.random.uniform(1, 2, [2, 3, 4, 32]).astype(self.dtype) self.y = np.random.uniform(1, 2, [4, 32]).astype(self.dtype) @@ -89,7 +91,6 @@ class TestMKLDNNElementwiseSubOp4(TestMKLDNNElementwiseSubOp): class TestMKLDNNElementwiseSubOp40(TestMKLDNNElementwiseSubOp): - def init_input_output(self): self.x = np.random.uniform(0.1, 2, [180, 1]).astype(self.dtype) self.y = np.random.uniform(0.1, 1, [1, 256]).astype(self.dtype) @@ -106,7 +107,6 @@ class TestMKLDNNElementwiseSubOp40(TestMKLDNNElementwiseSubOp): class TestMKLDNNElementwiseSubOp5(TestMKLDNNElementwiseSubOp): - def init_input_output(self): self.x = np.random.uniform(1, 2, [2, 3, 4, 100]).astype(self.dtype) self.y = np.random.uniform(1, 2, [100]).astype(self.dtype) @@ -114,7 +114,6 @@ class TestMKLDNNElementwiseSubOp5(TestMKLDNNElementwiseSubOp): class TestMKLDNNElementwiseSubOp_broadcast(TestMKLDNNElementwiseSubOp): - def init_input_output(self): self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype) self.y = np.random.rand(10, 12).astype(self.dtype) @@ -125,7 +124,6 @@ class TestMKLDNNElementwiseSubOp_broadcast(TestMKLDNNElementwiseSubOp): class TestElementwiseSubOp_xsize_lessthan_ysize_sub(TestMKLDNNElementwiseSubOp): - def init_input_output(self): self.x = np.random.rand(10, 12).astype(self.dtype) self.y = np.random.rand(2, 2, 10, 12).astype(self.dtype) @@ -146,7 +144,6 @@ class TestElementwiseSubOp_xsize_lessthan_ysize_sub(TestMKLDNNElementwiseSubOp): @OpTestTool.skip_if_not_cpu_bf16() class TestBf16(TestMKLDNNElementwiseSubOp): - def setUp(self): self.op_type = "elementwise_sub" self.init_dtype() @@ -165,34 +162,46 @@ class TestBf16(TestMKLDNNElementwiseSubOp): self.mkldnn_data_type = "bfloat16" def init_input_output(self): - self.x = np.random.random(100, ).astype(self.dtype) - self.y = np.random.random(100, ).astype(self.dtype) + self.x = np.random.random( + 100, + ).astype(self.dtype) + self.y = np.random.random( + 100, + ).astype(self.dtype) self.out = np.subtract(self.x, self.y) def test_check_output(self): self.check_output_with_place(core.CPUPlace()) def test_check_grad_normal(self): - self.check_grad_with_place(core.CPUPlace(), ["X", "Y"], - "Out", - user_defined_grads=[self.x, -self.x], - user_defined_grad_outputs=[self.x_bf16]) + self.check_grad_with_place( + core.CPUPlace(), + ["X", "Y"], + "Out", + user_defined_grads=[self.x, -self.x], + user_defined_grad_outputs=[self.x_bf16], + ) def test_check_grad_ignore_x(self): - self.check_grad_with_place(core.CPUPlace(), ["Y"], - "Out", - user_defined_grads=[-self.y], - user_defined_grad_outputs=[self.y_bf16]) + self.check_grad_with_place( + core.CPUPlace(), + ["Y"], + "Out", + user_defined_grads=[-self.y], + user_defined_grad_outputs=[self.y_bf16], + ) def test_check_grad_ignore_y(self): - self.check_grad_with_place(core.CPUPlace(), ["X"], - "Out", - user_defined_grads=[self.x], - user_defined_grad_outputs=[self.x_bf16]) + self.check_grad_with_place( + core.CPUPlace(), + ["X"], + "Out", + user_defined_grads=[self.x], + user_defined_grad_outputs=[self.x_bf16], + ) class TestBf16Broadcasting(TestBf16): - def init_input_output(self): self.x = np.random.uniform(1, 2, [2, 3, 4, 100]).astype(self.dtype) self.y = np.random.uniform(1, 2, [100]).astype(self.dtype) @@ -206,22 +215,24 @@ class TestBf16Broadcasting(TestBf16): def test_check_grad_normal(self): self.check_grad_with_place( - core.CPUPlace(), ["X", "Y"], + core.CPUPlace(), + ["X", "Y"], "Out", - user_defined_grads=[self.x, - self.compute_reduced_gradients(self.x)], - user_defined_grad_outputs=[self.x_bf16]) + user_defined_grads=[self.x, self.compute_reduced_gradients(self.x)], + user_defined_grad_outputs=[self.x_bf16], + ) def test_check_grad_ignore_x(self): self.check_grad_with_place( - core.CPUPlace(), ["Y"], + core.CPUPlace(), + ["Y"], "Out", user_defined_grads=[self.compute_reduced_gradients(self.x)], - user_defined_grad_outputs=[self.x_bf16]) + user_defined_grad_outputs=[self.x_bf16], + ) class TestInt8(TestMKLDNNElementwiseSubOp): - def init_kernel_type(self): self.use_mkldnn = True self._cpu_only = True diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_expand_v2_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_expand_v2_mkldnn_op.py index cf98324809d333024b1e64bfe891a83bbb1398f2..f156cceb8231275f277eeb85e7c5ab303f5ad6e9 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_expand_v2_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_expand_v2_mkldnn_op.py @@ -16,13 +16,18 @@ import unittest import numpy as np import paddle from paddle.fluid import core -from paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool, convert_float_to_uint16 +from paddle.fluid.tests.unittests.op_test import ( + OpTest, + OpTestTool, + convert_float_to_uint16, +) -@OpTestTool.skip_if(core.is_compiled_with_cuda(), - "CUDA required dygraph so oneDNN UT must be skipped") +@OpTestTool.skip_if( + core.is_compiled_with_cuda(), + "CUDA required dygraph so oneDNN UT must be skipped", +) class TestExpandV2OneDNNOp(OpTest): - def setUp(self): self.op_type = "expand_v2" self.init_data() @@ -52,7 +57,6 @@ class TestExpandV2OneDNNOp(OpTest): class TestExpandV2ExpandDimOneDNNOp(TestExpandV2OneDNNOp): - def init_data(self): self.ori_shape = [120] self.shape = [2, 120] @@ -60,7 +64,6 @@ class TestExpandV2ExpandDimOneDNNOp(TestExpandV2OneDNNOp): class TestExpandV2CopyScenarioOneDNNOp(TestExpandV2OneDNNOp): - def init_data(self): self.ori_shape = (2, 10, 5) self.shape = (2, 10, 5) @@ -68,7 +71,6 @@ class TestExpandV2CopyScenarioOneDNNOp(TestExpandV2OneDNNOp): class TestExpandV2CopyScenarioShapeNotGivenOneDNNOp(TestExpandV2OneDNNOp): - def init_data(self): self.ori_shape = (2, 4, 5, 7) self.shape = (-1, -1, -1, -1) @@ -76,7 +78,6 @@ class TestExpandV2CopyScenarioShapeNotGivenOneDNNOp(TestExpandV2OneDNNOp): class TestExpandV2ExpandShapesTensor1OneDNNOp(TestExpandV2OneDNNOp): - def init_data(self): self.ori_shape = [100, 1] self.expand_times = [1, 2] @@ -86,8 +87,9 @@ class TestExpandV2ExpandShapesTensor1OneDNNOp(TestExpandV2OneDNNOp): def calc_expand_shapes_tensor(self): self.expand_shapes_tensor = [] for index, ele in enumerate(self.expand_shape): - self.expand_shapes_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + self.expand_shapes_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) def set_additional_inputs(self): self.calc_expand_shapes_tensor() @@ -95,8 +97,8 @@ class TestExpandV2ExpandShapesTensor1OneDNNOp(TestExpandV2OneDNNOp): class TestExpandV2ExpandShapesTensor2OneDNNOp( - TestExpandV2ExpandShapesTensor1OneDNNOp): - + TestExpandV2ExpandShapesTensor1OneDNNOp +): def init_data(self): self.ori_shape = [12, 14] self.expand_times = [1, 1] @@ -105,7 +107,6 @@ class TestExpandV2ExpandShapesTensor2OneDNNOp( class TestExpandV2ShapesTensorOneDNNOp(TestExpandV2OneDNNOp): - def init_data(self): self.ori_shape = [100] self.expand_times = [2, 1] @@ -118,10 +119,8 @@ class TestExpandV2ShapesTensorOneDNNOp(TestExpandV2OneDNNOp): # BF16 TESTS def create_expand_v2_bf16_test_class(parent): - @OpTestTool.skip_if_not_cpu_bf16() class TestExpandV2BF16OneDNNOp(parent): - def set_inputs(self): self.attrs['mkldnn_data_type'] = 'bfloat16' self.inputs = {"X": convert_float_to_uint16(self.x)} @@ -137,10 +136,12 @@ def create_expand_v2_bf16_test_class(parent): def test_check_grad(self): self.calculate_grads() self.check_grad_with_place( - core.CPUPlace(), ["X"], + core.CPUPlace(), + ["X"], "Out", user_defined_grads=[convert_float_to_uint16(self.dx)], - user_defined_grad_outputs=[self.dout]) + user_defined_grad_outputs=[self.dout], + ) cls_name = "{0}_{1}".format(parent.__name__, "Expand_v2_BF16") TestExpandV2BF16OneDNNOp.__name__ = cls_name diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_fc_bf16_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_fc_bf16_mkldnn_op.py index b5b83bf47ea72c3a3c1eea576047f9c5760e8cc2..b35b06915b5fd4e6eaace9257eda0f02c3d976d9 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_fc_bf16_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_fc_bf16_mkldnn_op.py @@ -25,16 +25,15 @@ def fully_connected_naive(input, weights, bias_data): class MatrixGenerate: - def __init__(self, mb, ic, oc, h, w): self.input = np.random.random((mb, ic * h * w)).astype(np.float32) self.weights = np.random.random((ic * h * w, oc)).astype(np.float32) -@unittest.skipIf(not core.supports_bfloat16(), - "place does not support BF16 evaluation") +@unittest.skipIf( + not core.supports_bfloat16(), "place does not support BF16 evaluation" +) class TestFcBf16MklDNNOp(OpTest): - def generate_data(self): self.matrix = MatrixGenerate(1, 10, 15, 3, 3) self.bias = np.random.random(15).astype("float32") @@ -46,20 +45,21 @@ class TestFcBf16MklDNNOp(OpTest): self.force_fp32_output = False self.generate_data() - self.output = fully_connected_naive(self.matrix.input, - self.matrix.weights, self.bias) + self.output = fully_connected_naive( + self.matrix.input, self.matrix.weights, self.bias + ) if not self.force_fp32_output: self.output = convert_float_to_uint16(self.output) self.inputs = { 'Input': convert_float_to_uint16(self.matrix.input), 'W': self.matrix.weights, - 'Bias': self.bias + 'Bias': self.bias, } self.attrs = { 'use_mkldnn': self.use_mkldnn, - 'force_fp32_output': self.force_fp32_output + 'force_fp32_output': self.force_fp32_output, } self.outputs = {'Out': self.output} @@ -75,7 +75,6 @@ class TestFcBf16MklDNNOp(OpTest): class TestFCMKLDNNOp1(TestFcBf16MklDNNOp): - def generate_data(self): self.matrix = MatrixGenerate(2, 15, 48, 2, 2) self.bias = np.random.random(48).astype(np.float32) diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_fc_int8_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_fc_int8_mkldnn_op.py index 5cecbf159cc05efa280846734b49e267a15b7b25..954932517352725a076a68e2f6e80c206544bd8d 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_fc_int8_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_fc_int8_mkldnn_op.py @@ -19,7 +19,6 @@ from paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool @OpTestTool.skip_if_not_cpu() class TestFCINT8OneDNNOp(OpTest): - def setUp(self): self.op_type = "fc" self._cpu_only = True @@ -32,7 +31,7 @@ class TestFCINT8OneDNNOp(OpTest): 'Scale_in': self.x_scale, 'Scale_weights': [self.y_scale], 'Scale_out': self.out_scale, - 'force_fp32_output': self.force_fp32_output + 'force_fp32_output': self.force_fp32_output, } if self.force_fp32_output: @@ -50,7 +49,7 @@ class TestFCINT8OneDNNOp(OpTest): self.inputs = {'Input': self.x, 'W': self.y_float, 'Bias': self.bias} def quantize(self, tensor): - scale = 63. / np.abs(np.amax(tensor)) + scale = 63.0 / np.abs(np.amax(tensor)) quantized = np.round(scale * tensor).astype("int8") return scale, quantized @@ -74,7 +73,6 @@ class TestFCINT8OneDNNOp(OpTest): class TestFCINT8NoBiasOneDNNOp(TestFCINT8OneDNNOp): - def configure(self): self.use_bias = False self.force_fp32_output = False @@ -87,7 +85,6 @@ class TestFCINT8NoBiasOneDNNOp(TestFCINT8OneDNNOp): class TestFCINT8ForceFP32OutputOneDNNOp(TestFCINT8NoBiasOneDNNOp): - def configure(self): self.use_bias = False self.force_fp32_output = True @@ -95,5 +92,6 @@ class TestFCINT8ForceFP32OutputOneDNNOp(TestFCINT8NoBiasOneDNNOp): if __name__ == "__main__": import paddle + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_fc_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_fc_mkldnn_op.py index 0c1d9bef032bc3d5c17e1696f7ecf5dbb9899cc9..d64e0b441ee696b7096d08fba9e305da96211348 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_fc_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_fc_mkldnn_op.py @@ -23,14 +23,12 @@ def fully_connected_naive(input, weights, bias_data): class MatrixGenerate: - def __init__(self, mb, ic, oc, h, w): self.input = np.random.random((mb, ic * h * w)).astype("float32") self.weights = np.random.random((ic * h * w, oc)).astype("float32") class TestFCMKLDNNOp(OpTest): - def create_data(self): self.matrix = MatrixGenerate(1, 10, 15, 3, 3) self.bias = np.random.random(15).astype("float32") @@ -43,15 +41,15 @@ class TestFCMKLDNNOp(OpTest): self.inputs = { 'Input': self.matrix.input, 'W': self.matrix.weights, - 'Bias': self.bias + 'Bias': self.bias, } self.attrs = {'use_mkldnn': self.use_mkldnn} self.outputs = { - 'Out': - fully_connected_naive(self.matrix.input, self.matrix.weights, - self.bias) + 'Out': fully_connected_naive( + self.matrix.input, self.matrix.weights, self.bias + ) } def test_check_output(self): @@ -66,7 +64,6 @@ class TestFCMKLDNNOp(OpTest): class TestFCMKLDNNOp1(TestFCMKLDNNOp): - def create_data(self): self.matrix = MatrixGenerate(2, 15, 48, 2, 2) self.bias = np.random.random(48).astype("float32") @@ -74,5 +71,6 @@ class TestFCMKLDNNOp1(TestFCMKLDNNOp): if __name__ == "__main__": import paddle + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_fill_constant_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_fill_constant_mkldnn_op.py index f4dfcee10bca23e5ca4e3473e99a4f954279babb..92e274229b02560fd9a4e92e1cdd861969cb8d8d 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_fill_constant_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_fill_constant_mkldnn_op.py @@ -20,7 +20,6 @@ import paddle @OpTestTool.skip_if_not_cpu_bf16() class TestFillConstant2DOneDNNOp(OpTest): - def setUp(self): self.op_type = "fill_constant" self.dtype = np.float32 @@ -62,59 +61,58 @@ class TestFillConstant2DOneDNNOp(OpTest): self.check_output() -class TestFillZerosLike4DShapeTensorPriorityOneDNNOp(TestFillConstant2DOneDNNOp - ): - +class TestFillZerosLike4DShapeTensorPriorityOneDNNOp( + TestFillConstant2DOneDNNOp +): def set_inputs(self): self.inputs = {'ShapeTensor': np.array([5, 6, 7, 8]).astype("int32")} class TestFillZerosLike4DShapeTensorListPriorityOneDNNOp( - TestFillConstant2DOneDNNOp): - + TestFillConstant2DOneDNNOp +): def set_inputs(self): shape = (4, 5, 6, 7) self.shape_tensor_list = [] for index, elem in enumerate(shape): - self.shape_tensor_list.append(("x" + str(index), np.ones( - (1)).astype('int32') * elem)) + self.shape_tensor_list.append( + ("x" + str(index), np.ones((1)).astype('int32') * elem) + ) self.inputs = {'ShapeTensorList': self.shape_tensor_list} class TestFillZerosLike2DStringValueInfOneDNNOp(TestFillConstant2DOneDNNOp): - def set_attrs(self): self.str_value = "inf" self.attrs = {'shape': (10, 13), 'use_mkldnn': True, 'str_value': "inf"} -class TestFillZerosLike2DStringValueMinusInfOneDNNOp(TestFillConstant2DOneDNNOp - ): - +class TestFillZerosLike2DStringValueMinusInfOneDNNOp( + TestFillConstant2DOneDNNOp +): def set_attrs(self): self.str_value = "-inf" self.attrs = { 'shape': (10, 13), 'use_mkldnn': True, - 'str_value': "-inf" + 'str_value': "-inf", } class TestFillZerosLike2DStringValueFloatOneDNNOp(TestFillConstant2DOneDNNOp): - def set_attrs(self): self.str_value = "0.123" self.attrs = { 'shape': (10, 13), 'use_mkldnn': True, - 'str_value': "0.123" + 'str_value': "0.123", } class TestFillZerosLike2DValueTensorPriorityOneDNNOp( - TestFillZerosLike2DStringValueFloatOneDNNOp): - + TestFillZerosLike2DStringValueFloatOneDNNOp +): def set_inputs(self): self.inputs = {'ValueTensor': np.atleast_1d(2.25).astype("float32")} diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_flags_mkldnn_ops_on_off.py b/python/paddle/fluid/tests/unittests/mkldnn/test_flags_mkldnn_ops_on_off.py index ec4e87918d7c29f64b51b8bccdc06966880afa95..b59ce2ee71498f9595dc2c498e4325720a4f0765 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_flags_mkldnn_ops_on_off.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_flags_mkldnn_ops_on_off.py @@ -20,7 +20,6 @@ import re class TestFlagsUseMkldnn(unittest.TestCase): - def setUp(self): self._python_interp = sys.executable self._python_interp += " check_flags_mkldnn_ops_on_off.py" @@ -30,16 +29,22 @@ class TestFlagsUseMkldnn(unittest.TestCase): self.env[str("FLAGS_use_mkldnn")] = str("1") self.relu_regex = b"^onednn_verbose,exec,cpu,eltwise,.+alg:eltwise_relu alpha:0 beta:0,10x20x20" - self.ew_add_regex = b"^onednn_verbose,exec,cpu,binary.+alg:binary_add,10x20x30:10x20x30" - self.matmul_regex = b"^onednn_verbose,exec,cpu,matmul,.*10x20x30:10x30x20:10x20x20" + self.ew_add_regex = ( + b"^onednn_verbose,exec,cpu,binary.+alg:binary_add,10x20x30:10x20x30" + ) + self.matmul_regex = ( + b"^onednn_verbose,exec,cpu,matmul,.*10x20x30:10x30x20:10x20x20" + ) def flags_use_mkl_dnn_common(self, e): cmd = self._python_interp env = dict(self.env, **e) - proc = subprocess.Popen(cmd.split(" "), - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - env=env) + proc = subprocess.Popen( + cmd.split(" "), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=env, + ) out, err = proc.communicate() returncode = proc.returncode @@ -98,7 +103,7 @@ class TestFlagsUseMkldnn(unittest.TestCase): def test_flags_use_mkl_dnn_on_off(self): env = { str("FLAGS_tracer_mkldnn_ops_on"): str("elementwise_add"), - str("FLAGS_tracer_mkldnn_ops_off"): str("matmul") + str("FLAGS_tracer_mkldnn_ops_off"): str("matmul"), } out, err = self.flags_use_mkl_dnn_common(env) assert self.not_found(self.relu_regex, out, err) diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_flags_use_mkldnn.py b/python/paddle/fluid/tests/unittests/mkldnn/test_flags_use_mkldnn.py index 1e79d8e82c5d330a53390fcb29295849a0e86c2d..35ca3188f195000363b7a608cbbcc44785ecb901 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_flags_use_mkldnn.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_flags_use_mkldnn.py @@ -20,7 +20,6 @@ import re class TestFlagsUseMkldnn(unittest.TestCase): - def setUp(self): self._python_interp = sys.executable self._python_interp += " check_flags_use_mkldnn.py" @@ -45,10 +44,12 @@ class TestFlagsUseMkldnn(unittest.TestCase): def test_flags_use_mkl_dnn(self): cmd = self._python_interp - proc = subprocess.Popen(cmd.split(" "), - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - env=self.env) + proc = subprocess.Popen( + cmd.split(" "), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=self.env, + ) out, err = proc.communicate() returncode = proc.returncode diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_flatten_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_flatten_mkldnn_op.py index 32e7cec2999e0bf313dd8a9a8394b41ebe3498ac..0786b8d7d7a9cbb9436eefb194d0e61dc50ba1bd 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_flatten_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_flatten_mkldnn_op.py @@ -17,12 +17,15 @@ import numpy as np import paddle import paddle.fluid.core as core -from paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool, convert_float_to_uint16 +from paddle.fluid.tests.unittests.op_test import ( + OpTest, + OpTestTool, + convert_float_to_uint16, +) @OpTestTool.skip_if_not_cpu_bf16() class TestFlattenOneDNNOp(OpTest): - def setUp(self): self.set_op_type() self.init_test_case() @@ -50,7 +53,6 @@ class TestFlattenOneDNNOp(OpTest): class TestFlattenOneDNNOp1(TestFlattenOneDNNOp): - def init_test_case(self): self.in_shape = (3, 2, 2, 10) self.axis = 0 @@ -58,7 +60,6 @@ class TestFlattenOneDNNOp1(TestFlattenOneDNNOp): class TestFlattenOneDNNOpSixDims(TestFlattenOneDNNOp): - def init_test_case(self): self.in_shape = (3, 2, 3, 2, 4, 4) self.axis = 4 @@ -66,28 +67,23 @@ class TestFlattenOneDNNOpSixDims(TestFlattenOneDNNOp): class TestFlatten2OneDNNOp(TestFlattenOneDNNOp): - def set_op_type(self): self.op_type = "flatten2" class TestFlatten2OneDNNOp1(TestFlattenOneDNNOp1): - def set_op_type(self): self.op_type = "flatten2" class TestFlatten2OneDNNOpSixDims(TestFlattenOneDNNOpSixDims): - def set_op_type(self): self.op_type = "flatten2" # BF16 TESTS def create_flatten_bf16_test_classes(parent): - class TestFlatten2BF16OneDNNOp(parent): - def set_inputs(self): self.dtype = np.uint16 self.inputs = { @@ -99,22 +95,25 @@ def create_flatten_bf16_test_classes(parent): self.dx = np.reshape(self.dout, self.ori_shape) def test_check_output(self): - self.check_output_with_place(core.CPUPlace(), - no_check_set=["XShape"]) + self.check_output_with_place( + core.CPUPlace(), no_check_set=["XShape"] + ) def test_check_grad(self): self.calculate_grads() - self.check_grad_with_place(core.CPUPlace(), ["X"], - "Out", - user_defined_grads=[self.dx], - user_defined_grad_outputs=[self.dout]) + self.check_grad_with_place( + core.CPUPlace(), + ["X"], + "Out", + user_defined_grads=[self.dx], + user_defined_grad_outputs=[self.dout], + ) cls_name = "{0}_{1}".format(parent.__name__, "Flatten2_BF16") TestFlatten2BF16OneDNNOp.__name__ = cls_name globals()[cls_name] = TestFlatten2BF16OneDNNOp class TestFlattenBF16OneDNNOp(parent): - def set_op_type(self): self.dtype = np.uint16 self.op_type = "flatten" @@ -138,10 +137,12 @@ def create_flatten_bf16_test_classes(parent): def test_check_grad(self): self.calculate_grads() self.check_grad_with_place( - core.CPUPlace(), ["X"], + core.CPUPlace(), + ["X"], "Out", user_defined_grads=[self.dx], - user_defined_grad_outputs=[convert_float_to_uint16(self.dout)]) + user_defined_grad_outputs=[convert_float_to_uint16(self.dout)], + ) cls_name = "{0}_{1}".format(parent.__name__, "Flatten_BF16") TestFlattenBF16OneDNNOp.__name__ = cls_name diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_fusion_gru_bf16_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_fusion_gru_bf16_mkldnn_op.py index 62759913e84527bdbc92fb758a966e115b7d1331..da8d5149360902adbfc54a799fe9ef3d8d989dd7 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_fusion_gru_bf16_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_fusion_gru_bf16_mkldnn_op.py @@ -20,10 +20,10 @@ from paddle.fluid.tests.unittests.test_fusion_gru_op import fusion_gru from paddle.fluid.tests.unittests.test_fusion_lstm_op import ACTIVATION -@unittest.skipIf(not core.supports_bfloat16(), - "place does not support BF16 evaluation") +@unittest.skipIf( + not core.supports_bfloat16(), "place does not support BF16 evaluation" +) class TestFusionGRUBF16MKLDNNOp(OpTest): - def set_confs(self): pass @@ -66,19 +66,30 @@ class TestFusionGRUBF16MKLDNNOp(OpTest): wh_bf16 = convert_float_to_uint16(wh_fp32) # bias is fp32 despite other inputs being in bf16 - bias = np.random.rand( - 1, 3 * self.D).astype('float32') if self.with_bias else np.zeros( - (1, 3 * self.D), dtype='float32') - - h0_fp32 = np.random.rand( - N, self.D).astype('float32') if self.with_h0 else np.zeros( - (N, self.D), dtype='float32') - - _, _, _, hidden = fusion_gru(x_fp32, self.lod, h0_fp32, wx_fp32, - wh_fp32, bias, self.is_reverse, - self.origin_mode, - ACTIVATION[self.act_state], - ACTIVATION[self.act_gate]) + bias = ( + np.random.rand(1, 3 * self.D).astype('float32') + if self.with_bias + else np.zeros((1, 3 * self.D), dtype='float32') + ) + + h0_fp32 = ( + np.random.rand(N, self.D).astype('float32') + if self.with_h0 + else np.zeros((N, self.D), dtype='float32') + ) + + _, _, _, hidden = fusion_gru( + x_fp32, + self.lod, + h0_fp32, + wx_fp32, + wh_fp32, + bias, + self.is_reverse, + self.origin_mode, + ACTIVATION[self.act_state], + ACTIVATION[self.act_gate], + ) hidden_bf16 = convert_float_to_uint16(hidden) @@ -86,13 +97,13 @@ class TestFusionGRUBF16MKLDNNOp(OpTest): self.inputs = { 'X': (x_bf16, self.lod), 'WeightX': wx_bf16, - 'WeightH': wh_bf16 + 'WeightH': wh_bf16, } elif self.weights_dtype == 'fp32': self.inputs = { 'X': (x_bf16, self.lod), 'WeightX': wx_fp32, - 'WeightH': wh_fp32 + 'WeightH': wh_fp32, } if self.with_bias: @@ -120,24 +131,22 @@ class TestFusionGRUBF16MKLDNNOp(OpTest): class TestFusionGRUINT8MKLDNNOp2(TestFusionGRUBF16MKLDNNOp): - def set_confs(self): self.origin_mode = False class TestFusionGRUINT8MKLDNNOp3(TestFusionGRUBF16MKLDNNOp): - def set_confs(self): self.with_bias = False class TestFusionGRUINT8MKLDNNBF16WeightsOp(TestFusionGRUBF16MKLDNNOp): - def set_confs(self): self.weights_dtype = 'bf16' if __name__ == "__main__": from paddle import enable_static + enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_fusion_gru_int8_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_fusion_gru_int8_mkldnn_op.py index 9cbbddd6a42af2d08c0c27434124b12a6fde6481..fdd9ef2b4cca9a742a2543833298e6bc670104e0 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_fusion_gru_int8_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_fusion_gru_int8_mkldnn_op.py @@ -20,7 +20,6 @@ from paddle.fluid.tests.unittests.test_fusion_lstm_op import ACTIVATION class TestFusionGRUINT8MKLDNNOp(OpTest): - def set_confs(self): pass @@ -62,35 +61,60 @@ class TestFusionGRUINT8MKLDNNOp(OpTest): # WeightH data shape in PP: [OC, 2 * OC] + [OC, OC] # Scales shape in oneDNN: [3, OC] s8_max = 127.0 - scale_ur = s8_max / np.max(np.abs( - np.concatenate([ - wx[:, :2 * self.OC], - wh.flatten()[:2 * self.OC * self.OC].reshape( - self.OC, 2 * self.OC) - ], - axis=0)), - axis=0) - scale_o = s8_max / np.max(np.abs( - np.concatenate([ - wx[:, 2 * self.OC:], - wh.flatten()[2 * self.OC * self.OC:].reshape(self.OC, self.OC) - ], - axis=0)), - axis=0) + scale_ur = s8_max / np.max( + np.abs( + np.concatenate( + [ + wx[:, : 2 * self.OC], + wh.flatten()[: 2 * self.OC * self.OC].reshape( + self.OC, 2 * self.OC + ), + ], + axis=0, + ) + ), + axis=0, + ) + scale_o = s8_max / np.max( + np.abs( + np.concatenate( + [ + wx[:, 2 * self.OC :], + wh.flatten()[2 * self.OC * self.OC :].reshape( + self.OC, self.OC + ), + ], + axis=0, + ) + ), + axis=0, + ) scale_weights = np.concatenate([scale_ur, scale_o]).astype('float') - bias = np.random.rand( - 1, 3 * self.OC).astype('float32') if self.with_bias else np.zeros( - (1, 3 * self.OC), dtype='float32') - h0 = np.random.rand( - N, self.OC).astype('float32') if self.with_h0 else np.zeros( - (N, self.OC), dtype='float32') - - _, _, _, hidden_f32 = fusion_gru(x_f32, self.lod, h0, wx, wh, bias, - self.is_reverse, self.origin_mode, - ACTIVATION[self.act_state], - ACTIVATION[self.act_gate]) + bias = ( + np.random.rand(1, 3 * self.OC).astype('float32') + if self.with_bias + else np.zeros((1, 3 * self.OC), dtype='float32') + ) + h0 = ( + np.random.rand(N, self.OC).astype('float32') + if self.with_h0 + else np.zeros((N, self.OC), dtype='float32') + ) + + _, _, _, hidden_f32 = fusion_gru( + x_f32, + self.lod, + h0, + wx, + wh, + bias, + self.is_reverse, + self.origin_mode, + ACTIVATION[self.act_state], + ACTIVATION[self.act_gate], + ) self.inputs = {'X': (x_u8, self.lod), 'WeightX': wx, 'WeightH': wh} @@ -106,7 +130,8 @@ class TestFusionGRUINT8MKLDNNOp(OpTest): else: self.error_margin = 1 hidden_u8 = np.rint(hidden_f32 * scale_data + shift_data).astype( - np.uint8) + np.uint8 + ) # hidden_u8 = (hidden_f32 * scale_data + shift_data).astype(np.uint8) self.outputs = {'Hidden': (hidden_u8, self.lod)} @@ -120,7 +145,7 @@ class TestFusionGRUINT8MKLDNNOp(OpTest): 'force_fp32_output': self.force_fp32_output, 'Scale_data': scale_data, 'Shift_data': shift_data, - 'Scale_weights': scale_weights + 'Scale_weights': scale_weights, } def test_check_output(self): @@ -128,30 +153,27 @@ class TestFusionGRUINT8MKLDNNOp(OpTest): class TestFusionGRUINT8MKLDNNOp2(TestFusionGRUINT8MKLDNNOp): - def set_confs(self): self.force_fp32_output = False class TestFusionGRUINT8MKLDNNOp3(TestFusionGRUINT8MKLDNNOp): - def set_confs(self): self.origin_mode = False class TestFusionGRUINT8MKLDNNOp4(TestFusionGRUINT8MKLDNNOp): - def set_confs(self): self.with_bias = False class TestFusionGRUINT8MKLDNNOp5(TestFusionGRUINT8MKLDNNOp): - def set_confs(self): self.with_h0 = False if __name__ == "__main__": from paddle import enable_static + enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_fusion_gru_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_fusion_gru_mkldnn_op.py index e47105e733cca4921ac3b61ee1f2aa4fd45e84ac..aa38c0047593f72aa440fcc5b001897f9e4851cc 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_fusion_gru_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_fusion_gru_mkldnn_op.py @@ -17,41 +17,35 @@ from paddle.fluid.tests.unittests.test_fusion_gru_op import TestFusionGRUOp class TestFusionGRUMKLDNNOp(TestFusionGRUOp): - def set_confs(self): self.use_mkldnn = True class TestFusionGRUMKLDNNOpNoInitial(TestFusionGRUOp): - def set_confs(self): self.with_h0 = False self.use_mkldnn = True class TestFusionGRUMKLDNNOpNoBias(TestFusionGRUOp): - def set_confs(self): self.with_bias = False self.use_mkldnn = True class TestFusionGRUMKLDNNOpReverse(TestFusionGRUOp): - def set_confs(self): self.is_reverse = True self.use_mkldnn = True class TestFusionGRUMKLDNNOpOriginMode(TestFusionGRUOp): - def set_confs(self): self.origin_mode = True self.use_mkldnn = True class TestFusionGRUMKLDNNOpMD1(TestFusionGRUOp): - def set_confs(self): self.M = 36 self.D = 8 @@ -59,7 +53,6 @@ class TestFusionGRUMKLDNNOpMD1(TestFusionGRUOp): class TestFusionGRUMKLDNNOpMD2(TestFusionGRUOp): - def set_confs(self): self.M = 8 self.D = 8 @@ -67,7 +60,6 @@ class TestFusionGRUMKLDNNOpMD2(TestFusionGRUOp): class TestFusionGRUMKLDNNOpMD3(TestFusionGRUOp): - def set_confs(self): self.M = 17 self.D = 15 @@ -75,7 +67,6 @@ class TestFusionGRUMKLDNNOpMD3(TestFusionGRUOp): class TestFusionGRUMKLDNNOpBS1(TestFusionGRUOp): - def set_confs(self): self.lod = [[3]] self.D = 16 @@ -84,5 +75,6 @@ class TestFusionGRUMKLDNNOpBS1(TestFusionGRUOp): if __name__ == "__main__": from paddle import enable_static + enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_fusion_lstm_bf16_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_fusion_lstm_bf16_mkldnn_op.py index a2fdf54da9ded87a6d236999198827cc1ce048f0..4d2842d26ede67895b3dc44b549a38f103a177bc 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_fusion_lstm_bf16_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_fusion_lstm_bf16_mkldnn_op.py @@ -16,22 +16,25 @@ import unittest import numpy as np import paddle.fluid.core as core from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16 -from paddle.fluid.tests.unittests.test_fusion_lstm_op import ACTIVATION, fusion_lstm +from paddle.fluid.tests.unittests.test_fusion_lstm_op import ( + ACTIVATION, + fusion_lstm, +) -@unittest.skipIf(not core.supports_bfloat16(), - "place does not support BF16 evaluation") +@unittest.skipIf( + not core.supports_bfloat16(), "place does not support BF16 evaluation" +) class TestFusionLSTMBF16ONEDNNOp(OpTest): - def set_confs(self): pass def test_check_output(self): for use_seq in {True, False}: self.attrs['use_seq'] = use_seq - self.check_output(check_dygraph=False, - no_check_set=["Cell"], - atol=2e-2) + self.check_output( + check_dygraph=False, no_check_set=["Cell"], atol=2e-2 + ) def setUp(self): self.op_type = 'fusion_lstm' @@ -75,8 +78,8 @@ class TestFusionLSTMBF16ONEDNNOp(OpTest): b = np.random.normal(size=(1, 7 * self.D)).astype('float32') else: b = np.random.normal(size=(1, 4 * self.D)).astype('float32') - w_b = np.copy(b[:, 0:4 * self.D]) - w_c = b[:, 4 * self.D:] if self.use_peepholes else None + w_b = np.copy(b[:, 0 : 4 * self.D]) + w_c = b[:, 4 * self.D :] if self.use_peepholes else None wx = np.random.normal(size=(self.M, 4 * self.D)).astype('float32') @@ -84,12 +87,23 @@ class TestFusionLSTMBF16ONEDNNOp(OpTest): wh_bf16 = convert_float_to_uint16(wh) bx = np.random.normal(size=(1, 4 * self.D)).astype('float32') - b[0, 0:4 * self.D] += bx[0, :] - - hidden, c = fusion_lstm(x, self.lod, wx, bx, h0, c0, wh, w_b, w_c, - self.is_reverse, ACTIVATION[self.act_gate], - ACTIVATION[self.act_cell], - ACTIVATION[self.act_cand]) + b[0, 0 : 4 * self.D] += bx[0, :] + + hidden, c = fusion_lstm( + x, + self.lod, + wx, + bx, + h0, + c0, + wh, + w_b, + w_c, + self.is_reverse, + ACTIVATION[self.act_gate], + ACTIVATION[self.act_cell], + ACTIVATION[self.act_cand], + ) hidden = hidden.astype('float32') hidden_bf16 = convert_float_to_uint16(hidden) @@ -99,14 +113,14 @@ class TestFusionLSTMBF16ONEDNNOp(OpTest): 'X': (x_bf16, self.lod), 'WeightX': wx_bf16, 'WeightH': wh_bf16, - 'Bias': b + 'Bias': b, } elif self.weights_dtype == 'fp32': self.inputs = { 'X': (x_bf16, self.lod), 'WeightX': wx, 'WeightH': wh, - 'Bias': b + 'Bias': b, } if self.has_initial_state: @@ -135,30 +149,27 @@ class TestFusionLSTMBF16ONEDNNOp(OpTest): class TestFusionLSTMBF16ONEDNNPeepholesOp(TestFusionLSTMBF16ONEDNNOp): - def set_confs(self): self.use_peepholes = True class TestFusionLSTMBF16ONEDNNInitializedStateOp(TestFusionLSTMBF16ONEDNNOp): - def set_confs(self): self.has_initial_state = True class TestFusionLSTMBF16ONEDNNReverseOp(TestFusionLSTMBF16ONEDNNOp): - def set_confs(self): self.is_reverse = True class TestFusionLSTMBF16ONEDNNBF16WeightsOp(TestFusionLSTMBF16ONEDNNOp): - def set_confs(self): self.weights_dtype = 'bf16' if __name__ == "__main__": from paddle import enable_static + enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_fusion_lstm_int8_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_fusion_lstm_int8_mkldnn_op.py index 968c01faa17b0f0cf0ba9b0149242c9ff4b8f1b9..0b252e7cbafa36f7a20347d841d083e4c2d57d81 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_fusion_lstm_int8_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_fusion_lstm_int8_mkldnn_op.py @@ -15,11 +15,13 @@ import unittest import numpy as np from paddle.fluid.tests.unittests.op_test import OpTest -from paddle.fluid.tests.unittests.test_fusion_lstm_op import ACTIVATION, fusion_lstm +from paddle.fluid.tests.unittests.test_fusion_lstm_op import ( + ACTIVATION, + fusion_lstm, +) class TestFusionLSTMINT8MKLDNNOp(OpTest): - def set_confs(self): pass @@ -59,7 +61,8 @@ class TestFusionLSTMINT8MKLDNNOp(OpTest): s8_max = 127.0 scale_weights = s8_max / np.max( - np.abs(np.concatenate([wx[:, :], wh[:, :]], axis=0)), axis=0) + np.abs(np.concatenate([wx[:, :], wh[:, :]], axis=0)), axis=0 + ) scale_weights = scale_weights.astype('float') @@ -67,11 +70,11 @@ class TestFusionLSTMINT8MKLDNNOp(OpTest): b = np.random.rand(1, 7 * self.OC).astype('float32') else: b = np.random.rand(1, 4 * self.OC).astype('float32') - w_b = np.copy(b[:, 0:4 * self.OC]) - w_c = b[:, 4 * self.OC:] if self.use_peepholes else None + w_b = np.copy(b[:, 0 : 4 * self.OC]) + w_c = b[:, 4 * self.OC :] if self.use_peepholes else None bx = np.random.normal(size=(1, 4 * self.OC)).astype('float32') - b[0, 0:4 * self.OC] += bx[0, :] + b[0, 0 : 4 * self.OC] += bx[0, :] if self.has_initial_state: h0 = np.random.rand(N, self.OC).astype('float32') @@ -80,17 +83,27 @@ class TestFusionLSTMINT8MKLDNNOp(OpTest): h0 = np.zeros((N, self.OC)).astype('float32') c0 = np.zeros((N, self.OC)).astype('float32') - hidden_f32, c = fusion_lstm(x_f32, self.lod, wx, bx, h0, c0, wh, w_b, - w_c, self.is_reverse, - ACTIVATION[self.act_gate], - ACTIVATION[self.act_cell], - ACTIVATION[self.act_cand]) + hidden_f32, c = fusion_lstm( + x_f32, + self.lod, + wx, + bx, + h0, + c0, + wh, + w_b, + w_c, + self.is_reverse, + ACTIVATION[self.act_gate], + ACTIVATION[self.act_cell], + ACTIVATION[self.act_cand], + ) self.inputs = { 'X': (x_u8, self.lod), 'WeightX': wx, 'WeightH': wh, - 'Bias': b + 'Bias': b, } if self.has_initial_state: @@ -101,15 +114,16 @@ class TestFusionLSTMINT8MKLDNNOp(OpTest): self.error_margin = 1e-1 self.outputs = { 'Hidden': (hidden_f32, self.lod), - 'Cell': (c, self.lod) + 'Cell': (c, self.lod), } else: self.error_margin = 2 hidden_u8 = np.rint(hidden_f32 * scale_data + shift_data).astype( - np.uint8) + np.uint8 + ) self.outputs = { 'Hidden': (hidden_u8, self.lod), - 'Cell': (c, self.lod) + 'Cell': (c, self.lod), } self.attrs = { @@ -123,36 +137,36 @@ class TestFusionLSTMINT8MKLDNNOp(OpTest): 'force_fp32_output': self.force_fp32_output, 'Scale_data': scale_data, 'Shift_data': shift_data, - 'Scale_weights': scale_weights + 'Scale_weights': scale_weights, } def test_check_output(self): for use_seq in {True, False}: self.attrs['use_seq'] = use_seq - self.check_output(check_dygraph=False, - no_check_set=["Cell"], - atol=self.error_margin) + self.check_output( + check_dygraph=False, + no_check_set=["Cell"], + atol=self.error_margin, + ) class TestFusionLSTMINT8MKLDNNOp2(TestFusionLSTMINT8MKLDNNOp): - def set_confs(self): self.force_fp32_output = True class TestFusionLSTMINT8MKLDNNOp4(TestFusionLSTMINT8MKLDNNOp): - def set_confs(self): self.is_reverse = True class TestFusionLSTMINT8MKLDNNOp5(TestFusionLSTMINT8MKLDNNOp): - def set_confs(self): self.has_initial_state = True if __name__ == "__main__": from paddle import enable_static + enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_fusion_lstm_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_fusion_lstm_mkldnn_op.py index 5ce307fe35ed59e7a7597032b387505cf7dfdeeb..8df2aedb3762d06877ab3ace2955476a11d1315d 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_fusion_lstm_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_fusion_lstm_mkldnn_op.py @@ -17,7 +17,6 @@ from paddle.fluid.tests.unittests.test_fusion_lstm_op import TestFusionLSTMOp class TestFusionLSTMONEDNNOp(TestFusionLSTMOp): - def set_conf(self): self.use_mkldnn = True @@ -28,14 +27,12 @@ class TestFusionLSTMONEDNNOp(TestFusionLSTMOp): class TestFusionLSTMONEDNNOpReverse(TestFusionLSTMONEDNNOp): - def set_conf(self): self.is_reverse = True self.use_mkldnn = True class TestFusionLSTMONEDNNOpInitReverse(TestFusionLSTMONEDNNOp): - def set_conf(self): self.has_initial_state = True self.is_reverse = True @@ -43,7 +40,6 @@ class TestFusionLSTMONEDNNOpInitReverse(TestFusionLSTMONEDNNOp): class TestFusionLSTMONEDNNOpMD1(TestFusionLSTMONEDNNOp): - def set_conf(self): self.M = 36 self.D = 8 @@ -51,7 +47,6 @@ class TestFusionLSTMONEDNNOpMD1(TestFusionLSTMONEDNNOp): class TestFusionLSTMONEDNNOpMD2(TestFusionLSTMONEDNNOp): - def set_conf(self): self.M = 8 self.D = 8 @@ -59,7 +54,6 @@ class TestFusionLSTMONEDNNOpMD2(TestFusionLSTMONEDNNOp): class TestFusionLSTMONEDNNOpMD3(TestFusionLSTMONEDNNOp): - def set_conf(self): self.M = 15 self.D = 3 @@ -67,7 +61,6 @@ class TestFusionLSTMONEDNNOpMD3(TestFusionLSTMONEDNNOp): class TestFusionLSTMONEDNNOpBS1(TestFusionLSTMONEDNNOp): - def set_conf(self): self.lod = [[3]] self.D = 16 @@ -75,7 +68,6 @@ class TestFusionLSTMONEDNNOpBS1(TestFusionLSTMONEDNNOp): class TestFusionLSTMONEDNNOpPeepholesInit(TestFusionLSTMONEDNNOp): - def set_conf(self): self.use_peepholes = True self.has_initial_state = True @@ -84,5 +76,6 @@ class TestFusionLSTMONEDNNOpPeepholesInit(TestFusionLSTMONEDNNOp): if __name__ == '__main__': from paddle import enable_static + enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_gaussian_random_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_gaussian_random_mkldnn_op.py index 545caa7b73f6b638e368753b68e4c5f36c59ba9b..8a7c7c23e40f1b65842b96c8b86561f2ac00f3a7 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_gaussian_random_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_gaussian_random_mkldnn_op.py @@ -14,17 +14,17 @@ import unittest -from paddle.fluid.tests.unittests.test_gaussian_random_op import TestGaussianRandomOp +from paddle.fluid.tests.unittests.test_gaussian_random_op import ( + TestGaussianRandomOp, +) class TestMKLDNNGaussianRandomOpSeed10(TestGaussianRandomOp): - def init_kernel_type(self): self.use_mkldnn = True class TestMKLDNNGaussianRandomOpSeed0(TestGaussianRandomOp): - def setUp(self): TestGaussianRandomOp.setUp(self) self.use_mkldnn = True @@ -33,7 +33,7 @@ class TestMKLDNNGaussianRandomOpSeed0(TestGaussianRandomOp): "mean": 1.0, "std": 2.0, "seed": 10, - "use_mkldnn": self.use_mkldnn + "use_mkldnn": self.use_mkldnn, } diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_layer_norm_bf16_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_layer_norm_bf16_mkldnn_op.py index ab4cc17a713c667bcb427b9d46d2c95624ec770b..29dd1a8f3609d1e9420609e1d4b63e8f94dfbeac 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_layer_norm_bf16_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_layer_norm_bf16_mkldnn_op.py @@ -22,8 +22,12 @@ import paddle.fluid as fluid from paddle import enable_static from functools import reduce -from paddle.fluid.tests.unittests.mkldnn.test_layer_norm_mkldnn_op import TestLayerNormMKLDNNOp -from paddle.fluid.tests.unittests.mkldnn.test_layer_norm_mkldnn_op import _reference_layer_norm_naive +from paddle.fluid.tests.unittests.mkldnn.test_layer_norm_mkldnn_op import ( + TestLayerNormMKLDNNOp, +) +from paddle.fluid.tests.unittests.mkldnn.test_layer_norm_mkldnn_op import ( + _reference_layer_norm_naive, +) from paddle.fluid.tests.unittests.op_test import convert_float_to_uint16 from paddle.fluid.tests.unittests.op_test import _set_use_system_allocator @@ -32,26 +36,22 @@ np.random.random(123) _set_use_system_allocator(True) -@unittest.skipIf(not core.supports_bfloat16(), - "place does not support BF16 evaluation") +@unittest.skipIf( + not core.supports_bfloat16(), "place does not support BF16 evaluation" +) class TestLayerNormBF16MKLDNNOp(TestLayerNormMKLDNNOp): - def __assert_close(self, tensor, np_array, msg, rtol=2e-02, atol=2): - np.testing.assert_allclose(np.array(tensor), - np_array, - rtol=rtol, - atol=atol, - err_msg=msg) - - def check_forward(self, - shape, - begin_norm_axis, - with_scale_bias=True, - with_is_test=False): + np.testing.assert_allclose( + np.array(tensor), np_array, rtol=rtol, atol=atol, err_msg=msg + ) + + def check_forward( + self, shape, begin_norm_axis, with_scale_bias=True, with_is_test=False + ): # attr epsilon = 0.00001 x_shape = shape - D = reduce(mul, x_shape[begin_norm_axis:len(x_shape)], 1) + D = reduce(mul, x_shape[begin_norm_axis : len(x_shape)], 1) scale_shape = [D] np.random.seed(123) @@ -66,8 +66,9 @@ class TestLayerNormBF16MKLDNNOp(TestLayerNormMKLDNNOp): bias = np.array([]) # reference forward & backward - y, mean, variance = _reference_layer_norm_naive(x, scale, bias, epsilon, - begin_norm_axis) + y, mean, variance = _reference_layer_norm_naive( + x, scale, bias, epsilon, begin_norm_axis + ) y_bf16 = convert_float_to_uint16(y) @@ -85,13 +86,17 @@ class TestLayerNormBF16MKLDNNOp(TestLayerNormMKLDNNOp): # scale and bias are fp32 and other vars are of bf16 for name in ground_truth: if name == 'x_bf16' or name == 'y_bf16': - block.create_var(name=name, - dtype='uint16', - shape=ground_truth[name].shape) + block.create_var( + name=name, + dtype='uint16', + shape=ground_truth[name].shape, + ) else: - block.create_var(name=name, - dtype='float32', - shape=ground_truth[name].shape) + block.create_var( + name=name, + dtype='float32', + shape=ground_truth[name].shape, + ) inputs = {"X": block.var('x_bf16')} if with_scale_bias: @@ -110,8 +115,9 @@ class TestLayerNormBF16MKLDNNOp(TestLayerNormMKLDNNOp): "epsilon": epsilon, "begin_norm_axis": begin_norm_axis, "use_mkldnn": True, - "is_test": with_is_test - }) + "is_test": with_is_test, + }, + ) exe = fluid.Executor(core.CPUPlace()) @@ -120,19 +126,20 @@ class TestLayerNormBF16MKLDNNOp(TestLayerNormMKLDNNOp): input_list.append('scale') input_list.append('bias') - out = exe.run(program, - feed={name: var_dict[name] - for name in input_list}, - fetch_list=['y_bf16', 'mean', 'variance']) + out = exe.run( + program, + feed={name: var_dict[name] for name in input_list}, + fetch_list=['y_bf16', 'mean', 'variance'], + ) self.__assert_close(y_bf16, out[0], "y_bf16", 2) if not with_is_test: self.__assert_close(mean, out[1], "mean") self.__assert_close(variance, out[2], "variance", 1e-3) def test_check_forward_with_is_test(self): - self.check_forward(shape=[2, 3, 4, 5], - begin_norm_axis=3, - with_is_test=True) + self.check_forward( + shape=[2, 3, 4, 5], begin_norm_axis=3, with_is_test=True + ) # TODO (jczaja): Enable those to test when enabling training using bf16 def test_check_forward_with_scale_and_bias(self): diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_layer_norm_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_layer_norm_mkldnn_op.py index 6506ea41ccdc1b56fa5e24e3811a0888e75e5f81..2233bdfda957601c065fe03757ca59e49da7f91a 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_layer_norm_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_layer_norm_mkldnn_op.py @@ -22,7 +22,10 @@ import paddle.fluid as fluid from paddle import enable_static from functools import reduce -from paddle.fluid.tests.unittests.op_test import _set_use_system_allocator, OpTestTool +from paddle.fluid.tests.unittests.op_test import ( + _set_use_system_allocator, + OpTestTool, +) np.random.random(123) @@ -32,7 +35,7 @@ _set_use_system_allocator(True) def _reference_layer_norm_naive(x, scale, beta, epsilon, begin_norm_axis=1): x_shape = x.shape N = reduce(mul, x_shape[0:begin_norm_axis], 1) - D = reduce(mul, x_shape[begin_norm_axis:len(x_shape)], 1) + D = reduce(mul, x_shape[begin_norm_axis : len(x_shape)], 1) x.shape = [N, D] if scale.size == 0 and beta.size == 0: scale = np.ones([1, D]) @@ -43,34 +46,32 @@ def _reference_layer_norm_naive(x, scale, beta, epsilon, begin_norm_axis=1): mean = np.mean(x, axis=1) var = np.var(x, axis=1) + epsilon - output = scale * np.divide((x - mean.reshape([N, 1])), - (np.sqrt(var)).reshape([N, 1])) + beta + output = ( + scale + * np.divide((x - mean.reshape([N, 1])), (np.sqrt(var)).reshape([N, 1])) + + beta + ) x.shape, output.shape = x_shape, x_shape return output, mean, var class TestLayerNormMKLDNNOp(unittest.TestCase): - def setUp(self): self.use_mkldnn = True def __assert_close(self, tensor, np_array, msg, atol=1e-4): - np.testing.assert_allclose(np.array(tensor), - np_array, - rtol=1e-05, - atol=atol, - err_msg=msg) - - def check_forward(self, - shape, - begin_norm_axis, - with_scale_bias=True, - with_is_test=False): + np.testing.assert_allclose( + np.array(tensor), np_array, rtol=1e-05, atol=atol, err_msg=msg + ) + + def check_forward( + self, shape, begin_norm_axis, with_scale_bias=True, with_is_test=False + ): # attr epsilon = 0.00001 x_shape = shape - D = reduce(mul, x_shape[begin_norm_axis:len(x_shape)], 1) + D = reduce(mul, x_shape[begin_norm_axis : len(x_shape)], 1) scale_shape = [D] np.random.seed(123) @@ -84,8 +85,9 @@ class TestLayerNormMKLDNNOp(unittest.TestCase): bias = np.array([]) # reference forward & backward - y, mean, variance = _reference_layer_norm_naive(x, scale, bias, epsilon, - begin_norm_axis) + y, mean, variance = _reference_layer_norm_naive( + x, scale, bias, epsilon, begin_norm_axis + ) var_dict = locals() var_names = ['x', 'mean', 'variance', 'y'] @@ -99,9 +101,9 @@ class TestLayerNormMKLDNNOp(unittest.TestCase): block = program.global_block() for name in ground_truth: - block.create_var(name=name, - dtype='float32', - shape=ground_truth[name].shape) + block.create_var( + name=name, dtype='float32', shape=ground_truth[name].shape + ) inputs = {"X": block.var('x')} if with_scale_bias: @@ -120,8 +122,9 @@ class TestLayerNormMKLDNNOp(unittest.TestCase): "epsilon": epsilon, "begin_norm_axis": begin_norm_axis, "use_mkldnn": True, - "is_test": with_is_test - }) + "is_test": with_is_test, + }, + ) exe = fluid.Executor(core.CPUPlace()) @@ -130,10 +133,11 @@ class TestLayerNormMKLDNNOp(unittest.TestCase): input_list.append('scale') input_list.append('bias') - out = exe.run(program, - feed={name: var_dict[name] - for name in input_list}, - fetch_list=['y', 'mean', 'variance']) + out = exe.run( + program, + feed={name: var_dict[name] for name in input_list}, + fetch_list=['y', 'mean', 'variance'], + ) self.__assert_close(y, out[0], "y") if not with_is_test: self.__assert_close(mean, out[1], "mean") @@ -147,14 +151,14 @@ class TestLayerNormMKLDNNOp(unittest.TestCase): self.check_forward(shape=[2, 3, 4, 5], begin_norm_axis=3) def test_check_forward_without_scale_and_bias(self): - self.check_forward(shape=[2, 3, 4, 5], - begin_norm_axis=3, - with_scale_bias=False) + self.check_forward( + shape=[2, 3, 4, 5], begin_norm_axis=3, with_scale_bias=False + ) def test_check_forward_with_is_test(self): - self.check_forward(shape=[2, 3, 4, 5], - begin_norm_axis=3, - with_is_test=True) + self.check_forward( + shape=[2, 3, 4, 5], begin_norm_axis=3, with_is_test=True + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_log_softmax_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_log_softmax_mkldnn_op.py index 89de5198101c2edabe4a20fb69e496dce2a1f1e0..4279ffc5915fc17ff49afd96700ccc530604a3bb 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_log_softmax_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_log_softmax_mkldnn_op.py @@ -17,12 +17,15 @@ import numpy as np import paddle from paddle.fluid import core from paddle.fluid.tests.unittests.test_log_softmax import ref_log_softmax -from paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool, convert_float_to_uint16 +from paddle.fluid.tests.unittests.op_test import ( + OpTest, + OpTestTool, + convert_float_to_uint16, +) @OpTestTool.skip_if_not_cpu_bf16() class TestLogSoftmaxOneDNNOp(OpTest): - def setUp(self): self.op_type = 'log_softmax' self.set_dtype() @@ -53,45 +56,39 @@ class TestLogSoftmaxOneDNNOp(OpTest): class TestLogSoftmax1DOneDNNOp(TestLogSoftmaxOneDNNOp): - def set_shape(self): self.shape = [100] class TestLogSoftmax3DOneDNNOp(TestLogSoftmaxOneDNNOp): - def set_shape(self): self.shape = [12, 10, 3] class TestLogSoftmax5DOneDNNOp(TestLogSoftmaxOneDNNOp): - def set_shape(self): self.shape = [2, 3, 4, 5, 6] class TestLogSoftmaxPositiveAxisOneDNNOp(TestLogSoftmaxOneDNNOp): - def set_axis(self): self.axis = 2 # BF16 TESTS class TestLogSoftmax1DBF16OneDNNOp(TestLogSoftmax1DOneDNNOp): - def set_dtype(self): self.dtype = np.uint16 -class TestLogSoftmaxPositiveAxisBF16OneDNNOp(TestLogSoftmaxPositiveAxisOneDNNOp - ): - +class TestLogSoftmaxPositiveAxisBF16OneDNNOp( + TestLogSoftmaxPositiveAxisOneDNNOp +): def set_dtype(self): self.dtype = np.uint16 class TestLogSoftmax5DBF16OneDNNOp(TestLogSoftmax5DOneDNNOp): - def set_shape(self): self.shape = [2, 3, 4, 5, 6] diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_lrn_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_lrn_mkldnn_op.py index 8f3913a86f421e7a9895183102456bda516cb9f0..f81a0d22aa7602f608b475613a5fe342783b1e9d 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_lrn_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_lrn_mkldnn_op.py @@ -17,7 +17,6 @@ from paddle.fluid.tests.unittests.test_lrn_op import TestLRNOp class TestLRNMKLDNNOp(TestLRNOp): - def get_attrs(self): attrs = TestLRNOp.get_attrs(self) attrs['use_mkldnn'] = True @@ -26,36 +25,31 @@ class TestLRNMKLDNNOp(TestLRNOp): def test_check_output(self): # We cannot validate MidOut as LRN REF has diffrent meaning in it # TODO(wangzhongpu): support mkldnn op in dygraph mode - self.check_output(atol=0.002, - no_check_set=['MidOut'], - check_dygraph=False) + self.check_output( + atol=0.002, no_check_set=['MidOut'], check_dygraph=False + ) def test_check_grad_normal(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode - self.check_grad(['X'], - 'Out', - max_relative_error=0.01, - check_dygraph=False) + self.check_grad( + ['X'], 'Out', max_relative_error=0.01, check_dygraph=False + ) class TestLRNMKLDNNOpWithIsTest(TestLRNMKLDNNOp): - def get_attrs(self): attrs = TestLRNMKLDNNOp.get_attrs(self) attrs['is_test'] = True return attrs def test_check_grad_normal(self): - def check_raise_is_test(): try: - self.check_grad(['X'], - 'Out', - max_relative_error=0.01, - check_dygraph=False) + self.check_grad( + ['X'], 'Out', max_relative_error=0.01, check_dygraph=False + ) except Exception as e: - t = \ - "is_test attribute should be set to False in training phase." + t = "is_test attribute should be set to False in training phase." if t in str(e): raise AttributeError @@ -63,12 +57,12 @@ class TestLRNMKLDNNOpWithIsTest(TestLRNMKLDNNOp): class TestLRNMKLDNNOpNHWC(TestLRNMKLDNNOp): - def init_test_case(self): self.data_format = 'NHWC' if __name__ == "__main__": from paddle import enable_static + enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_matmul_bf16_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_matmul_bf16_mkldnn_op.py index 8940502b8066222125ba14883a427077ade41bbd..cdb9dfd8ee8380fbb3ea8453e934729cf7d8f40f 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_matmul_bf16_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_matmul_bf16_mkldnn_op.py @@ -19,10 +19,10 @@ from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16 from paddle import enable_static -@unittest.skipIf(not core.supports_bfloat16(), - "place does not support BF16 evaluation") +@unittest.skipIf( + not core.supports_bfloat16(), "place does not support BF16 evaluation" +) class TestMatmulBf16MklDNNOp(OpTest): - def generate_data(self): self.x_fp32 = np.random.random((25, 2, 2)).astype(np.float32) self.y_fp32 = np.random.random((25, 2, 2)).astype(np.float32) @@ -35,7 +35,7 @@ class TestMatmulBf16MklDNNOp(OpTest): "mkldnn_data_type": self.mkldnn_data_type, "force_fp32_output": self.force_fp32_output, 'transpose_X': False, - 'transpose_Y': False + 'transpose_Y': False, } def setUp(self): @@ -62,11 +62,13 @@ class TestMatmulBf16MklDNNOp(OpTest): def test_check_grad(self): self.calculate_grads() self.check_grad_with_place( - core.CPUPlace(), ["X", "Y"], + core.CPUPlace(), + ["X", "Y"], "Out", check_dygraph=False, user_defined_grads=[self.dx, self.dy], - user_defined_grad_outputs=[convert_float_to_uint16(self.dout)]) + user_defined_grad_outputs=[convert_float_to_uint16(self.dout)], + ) def matmul_grad(self, x, transpose_x, y, transpose_y): x_transpose_axes = [1, 0] if x.ndim == 2 else [0, 2, 1] @@ -81,23 +83,35 @@ class TestMatmulBf16MklDNNOp(OpTest): x_transpose_axes = [1, 0] if self.x_fp32.ndim == 2 else [0, 2, 1] y_transpose_axes = [1, 0] if self.y_fp32.ndim == 2 else [0, 2, 1] - x = np.transpose(self.x_fp32, x_transpose_axes - ) if self.attrs['transpose_X'] is True else self.x_fp32 - y = np.transpose(self.y_fp32, y_transpose_axes - ) if self.attrs['transpose_Y'] is True else self.y_fp32 + x = ( + np.transpose(self.x_fp32, x_transpose_axes) + if self.attrs['transpose_X'] is True + else self.x_fp32 + ) + y = ( + np.transpose(self.y_fp32, y_transpose_axes) + if self.attrs['transpose_Y'] is True + else self.y_fp32 + ) dout = self.alpha * np.matmul(x, y) - if self.attrs['transpose_X'] is True and self.attrs[ - 'transpose_Y'] is True: + if ( + self.attrs['transpose_X'] is True + and self.attrs['transpose_Y'] is True + ): self.dx = self.matmul_grad(self.y_fp32, True, dout, True) self.dy = self.matmul_grad(dout, True, self.x_fp32, True) - elif self.attrs['transpose_X'] is True and self.attrs[ - 'transpose_Y'] is False: + elif ( + self.attrs['transpose_X'] is True + and self.attrs['transpose_Y'] is False + ): self.dx = self.matmul_grad(self.y_fp32, False, dout, True) self.dy = self.matmul_grad(self.x_fp32, False, dout, False) - elif self.attrs['transpose_X'] is False and self.attrs[ - 'transpose_Y'] is True: + elif ( + self.attrs['transpose_X'] is False + and self.attrs['transpose_Y'] is True + ): self.dx = self.matmul_grad(dout, False, self.y_fp32, False) self.dy = self.matmul_grad(dout, True, self.x_fp32, False) else: @@ -108,7 +122,6 @@ class TestMatmulBf16MklDNNOp(OpTest): class TestDnnlMatMulOpAlpha(TestMatmulBf16MklDNNOp): - def generate_data(self): self.x_fp32 = np.random.random((17, 2, 3)).astype(np.float32) self.y_fp32 = np.random.random((17, 3, 2)).astype(np.float32) @@ -117,7 +130,6 @@ class TestDnnlMatMulOpAlpha(TestMatmulBf16MklDNNOp): class TestDnnlMatMulOp2D(TestMatmulBf16MklDNNOp): - def generate_data(self): self.x_fp32 = np.random.random((12, 9)).astype(np.float32) self.y_fp32 = np.random.random((9, 12)).astype(np.float32) @@ -125,7 +137,6 @@ class TestDnnlMatMulOp2D(TestMatmulBf16MklDNNOp): class TestDnnlMatMulOpTransposeX(TestMatmulBf16MklDNNOp): - def generate_data(self): self.x_fp32 = np.random.random((12, 9)).astype(np.float32) self.y_fp32 = np.random.random((12, 9)).astype(np.float32) @@ -136,12 +147,11 @@ class TestDnnlMatMulOpTransposeX(TestMatmulBf16MklDNNOp): "use_mkldnn": self.use_mkldnn, "mkldnn_data_type": self.mkldnn_data_type, 'transpose_X': True, - 'transpose_Y': False + 'transpose_Y': False, } class TestDnnlMatMulOpTransposeY(TestMatmulBf16MklDNNOp): - def generate_data(self): self.x_fp32 = np.random.random((12, 9)).astype(np.float32) self.y_fp32 = np.random.random((12, 9)).astype(np.float32) @@ -152,12 +162,11 @@ class TestDnnlMatMulOpTransposeY(TestMatmulBf16MklDNNOp): "use_mkldnn": self.use_mkldnn, "mkldnn_data_type": self.mkldnn_data_type, 'transpose_Y': True, - 'transpose_X': False + 'transpose_X': False, } class TestMatmulBf16MklDNNForceFp32Output(TestMatmulBf16MklDNNOp): - def generate_data(self): self.x_fp32 = np.random.random((12, 9)).astype(np.float32) self.y_fp32 = np.random.random((9, 12)).astype(np.float32) diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_matmul_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_matmul_mkldnn_op.py index ffaa725614b770c3e0a4c06811957d1ab90e6abb..4d4b6a384a5ad139816b4229d5f40fb39943ec31 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_matmul_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_matmul_mkldnn_op.py @@ -18,7 +18,6 @@ from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci class TestDnnlMatMulOp(OpTest): - def generate_data(self): self.x = np.random.random((25, 2, 2)).astype("float32") self.y = np.random.random((25, 2, 2)).astype("float32") @@ -47,13 +46,11 @@ class TestDnnlMatMulOp(OpTest): class TestDnnlMatMulWithGradOp(TestDnnlMatMulOp): - def test_check_grad(self): self.check_grad(['X', 'Y'], 'Out', max_relative_error=1e-2) class TestDnnlMatMulOpMixedDims1(TestDnnlMatMulWithGradOp): - def generate_data(self): self.x = np.random.random((17, 2, 3)).astype("float32") self.y = np.random.random((3, 4)).astype("float32") @@ -61,7 +58,6 @@ class TestDnnlMatMulOpMixedDims1(TestDnnlMatMulWithGradOp): class TestDnnlMatMulOpMixedDimsYWiderTransposeY(TestDnnlMatMulWithGradOp): - def generate_data(self): self.x = np.random.random((8, 2, 3)).astype("float32") self.y = np.random.random((4, 3)).astype("float32") @@ -72,7 +68,6 @@ class TestDnnlMatMulOpMixedDimsYWiderTransposeY(TestDnnlMatMulWithGradOp): class TestDnnlMatMulOpMixedDimsYWiderTransposeX(TestDnnlMatMulWithGradOp): - def generate_data(self): self.x = np.random.random((8, 3, 2)).astype("float32") self.y = np.random.random((3, 4)).astype("float32") @@ -83,31 +78,30 @@ class TestDnnlMatMulOpMixedDimsYWiderTransposeX(TestDnnlMatMulWithGradOp): class TestDnnlMatMulOpMixedDimsXWiderTransposeXY(TestDnnlMatMulWithGradOp): - def generate_data(self): self.x = np.random.random((8, 3, 2)).astype("float32") self.y = np.random.random((4, 3)).astype("float32") - self.out = np.matmul(np.transpose(self.x, (0, 2, 1)), - np.transpose(self.y)) + self.out = np.matmul( + np.transpose(self.x, (0, 2, 1)), np.transpose(self.y) + ) def set_attributes(self): self.attrs = {'transpose_X': True, 'transpose_Y': True} class TestDnnlMatMulOpMixedDimsYWiderTransposeXY(TestDnnlMatMulWithGradOp): - def generate_data(self): self.x = np.random.random((3, 2)).astype("float32") self.y = np.random.random((8, 4, 3)).astype("float32") - self.out = np.matmul(np.transpose(self.x), - np.transpose(self.y, (0, 2, 1))) + self.out = np.matmul( + np.transpose(self.x), np.transpose(self.y, (0, 2, 1)) + ) def set_attributes(self): self.attrs = {'transpose_X': True, 'transpose_Y': True} class TestDnnlMatMulOpMixedDimsXWiderTransposeX(TestDnnlMatMulWithGradOp): - def generate_data(self): self.x = np.random.random((5, 4)).astype("float32") self.y = np.random.random((8, 5, 4)).astype("float32") @@ -118,7 +112,6 @@ class TestDnnlMatMulOpMixedDimsXWiderTransposeX(TestDnnlMatMulWithGradOp): class TestDnnlMatMulOpVectorMultiply(TestDnnlMatMulWithGradOp): - def generate_data(self): self.x = np.random.random((5)).astype("float32") self.y = np.random.random((5)).astype("float32") @@ -126,7 +119,6 @@ class TestDnnlMatMulOpVectorMultiply(TestDnnlMatMulWithGradOp): class TestDnnlMatMulOpVectorMultiplyTranspose(TestDnnlMatMulWithGradOp): - def generate_data(self): self.x = np.random.random((5)).astype("float32") x_resized = np.copy(self.x) @@ -141,7 +133,6 @@ class TestDnnlMatMulOpVectorMultiplyTranspose(TestDnnlMatMulWithGradOp): class TestDnnlMatMulOpMixedDims2(TestDnnlMatMulWithGradOp): - def generate_data(self): self.x = np.random.random((2, 3)).astype("float32") self.y = np.random.random((17, 3, 4)).astype("float32") @@ -149,7 +140,6 @@ class TestDnnlMatMulOpMixedDims2(TestDnnlMatMulWithGradOp): class TestDnnlMatMulOpAlpha(TestDnnlMatMulWithGradOp): - def generate_data(self): self.x = np.random.random((17, 2, 3)).astype("float32") self.y = np.random.random((17, 3, 2)).astype("float32") @@ -158,7 +148,6 @@ class TestDnnlMatMulOpAlpha(TestDnnlMatMulWithGradOp): class TestDnnlMatMulOp2D(TestDnnlMatMulWithGradOp): - def generate_data(self): self.x = np.random.random((12, 9)).astype("float32") self.y = np.random.random((9, 12)).astype("float32") @@ -166,7 +155,6 @@ class TestDnnlMatMulOp2D(TestDnnlMatMulWithGradOp): class TestDnnlMatMulOpTransposeX(TestDnnlMatMulWithGradOp): - def generate_data(self): self.x = np.random.random((12, 9)).astype("float32") self.y = np.random.random((12, 9)).astype("float32") @@ -177,7 +165,6 @@ class TestDnnlMatMulOpTransposeX(TestDnnlMatMulWithGradOp): class TestDnnlMatMulOpTransposeY(TestDnnlMatMulWithGradOp): - def generate_data(self): self.x = np.random.random((12, 9)).astype("float32") self.y = np.random.random((12, 9)).astype("float32") @@ -188,7 +175,6 @@ class TestDnnlMatMulOpTransposeY(TestDnnlMatMulWithGradOp): class TestDnnlMatMulOpTransposeY3D(TestDnnlMatMulWithGradOp): - def generate_data(self): self.x = np.random.random((17, 3, 2)).astype("float32") self.y = np.random.random((17, 3, 2)).astype("float32") @@ -199,7 +185,6 @@ class TestDnnlMatMulOpTransposeY3D(TestDnnlMatMulWithGradOp): class TestDnnlMatMulOpInt8NoScales(TestDnnlMatMulOp): - def generate_data(self): self.x = np.random.random((12, 9)).astype("int8") self.y = np.random.random((9, 12)).astype("int8") @@ -211,7 +196,7 @@ class TestDnnlMatMulOpInt8(TestDnnlMatMulOp): # on older platforms (BDW, SKX) we needed to reduce # range from [-127, 127] to [-63, 63] def quantize(self, tensor): - scale = 63. / np.abs(np.amax(tensor)) + scale = 63.0 / np.abs(np.amax(tensor)) quantized = np.round(scale * tensor).astype("int8") return scale, quantized @@ -238,7 +223,6 @@ class TestDnnlMatMulOpInt8(TestDnnlMatMulOp): class TestDnnlMatMulOpInt8ForceFP32(TestDnnlMatMulOpInt8): - def generate_data(self): x_float = np.random.random((12, 9)).astype("float32") self.x_scale, self.x = self.quantize(x_float) @@ -253,12 +237,11 @@ class TestDnnlMatMulOpInt8ForceFP32(TestDnnlMatMulOpInt8): self.attrs = { 'Scale_x': self.x_scale, 'Scale_y': self.y_scale, - 'force_fp32_output': True + 'force_fp32_output': True, } class TestDnnlMatMulOpInt8ForceFP32BasicScales(TestDnnlMatMulOp): - def generate_data(self): self.x = np.random.randint(0, 3, (12, 9)).astype("int8") self.y = np.random.randint(0, 3, (9, 12)).astype("int8") @@ -270,15 +253,22 @@ class TestDnnlMatMulOpInt8ForceFP32BasicScales(TestDnnlMatMulOp): @skip_check_grad_ci(reason="DNNL's MatMul doesn't implement grad kernel.") class TestReshapeTransposeMatMulOp(OpTest): - def init_data_type(self): self.data_type_ = 'float32' def generate_data(self): - self.x = np.random.random([2, 128, 768]).astype("float32").reshape( - [2, 128, 12, 64]).transpose([0, 2, 1, 3]) - self.y = np.random.random([2, 128, 768]).astype("float32").reshape( - [2, 128, 12, 64]).transpose([0, 2, 1, 3]) + self.x = ( + np.random.random([2, 128, 768]) + .astype("float32") + .reshape([2, 128, 12, 64]) + .transpose([0, 2, 1, 3]) + ) + self.y = ( + np.random.random([2, 128, 768]) + .astype("float32") + .reshape([2, 128, 12, 64]) + .transpose([0, 2, 1, 3]) + ) self.out = np.matmul(self.x, self.y.transpose([0, 1, 3, 2])) self.fused_reshape_X = [] self.fused_transpose_X = [] @@ -300,7 +290,7 @@ class TestReshapeTransposeMatMulOp(OpTest): self.inputs = {'X': self.x, 'Y': self.y} self.attrs = { 'use_mkldnn': self.use_mkldnn, - self.transpose_y_name: self.transpose_y + self.transpose_y_name: self.transpose_y, } if len(self.fused_transpose_X) > 0: self.attrs['fused_transpose_X'] = self.fused_transpose_X @@ -318,49 +308,53 @@ class TestReshapeTransposeMatMulOp(OpTest): class TestReshapeTransposeMatMulOp4DXFloat(TestReshapeTransposeMatMulOp): - def generate_data(self): self.x = np.random.random([2, 128, 768]).astype("float32") - self.y = np.random.random([2, 128, 768]).astype("float32").reshape( - [2, 128, 12, 64]).transpose([0, 2, 1, 3]) + self.y = ( + np.random.random([2, 128, 768]) + .astype("float32") + .reshape([2, 128, 12, 64]) + .transpose([0, 2, 1, 3]) + ) self.fused_transpose_X = [0, 2, 1, 3] self.fused_reshape_X = [0, 0, 12, 64] self.fused_transpose_Y = [] self.fused_reshape_Y = [] self.out = np.matmul( self.x.reshape([2, 128, 12, 64]).transpose([0, 2, 1, 3]), - self.y.transpose([0, 1, 3, 2])) + self.y.transpose([0, 1, 3, 2]), + ) class TestReshapeTransposeMatMulOp4DXInt8(TestReshapeTransposeMatMulOp4DXFloat): - def init_data_type(self): self.data_type_ = 'int8' class TestReshapeTransposeMatMulOp4DYFloat(TestReshapeTransposeMatMulOp): - def generate_data(self): - self.x = np.random.random([2, 128, 768]).astype("float32").reshape( - [2, 128, 12, 64]).transpose([0, 2, 1, 3]) + self.x = ( + np.random.random([2, 128, 768]) + .astype("float32") + .reshape([2, 128, 12, 64]) + .transpose([0, 2, 1, 3]) + ) self.y = np.random.random([2, 128, 768]).astype("float32") self.fused_transpose_X = [] self.fused_reshape_X = [] self.fused_transpose_Y = [0, 2, 1, 3] self.fused_reshape_Y = [0, 0, 12, 64] self.out = np.matmul( - self.x, - self.y.reshape([2, 128, 12, 64]).transpose([0, 2, 3, 1])) + self.x, self.y.reshape([2, 128, 12, 64]).transpose([0, 2, 3, 1]) + ) class TestReshapeTransposeMatMulOp4DYInt8(TestReshapeTransposeMatMulOp4DYFloat): - def init_data_type(self): self.data_type_ = 'int8' class TestReshapeTransposeMatMulOp4DXYFloat(TestReshapeTransposeMatMulOp): - def generate_data(self): self.x = np.random.random([2, 128, 768]).astype("float32") self.y = np.random.random([2, 128, 768]).astype("float32") @@ -370,42 +364,48 @@ class TestReshapeTransposeMatMulOp4DXYFloat(TestReshapeTransposeMatMulOp): self.fused_reshape_Y = [0, 0, 12, 64] self.out = np.matmul( self.x.reshape([2, 128, 12, 64]).transpose([0, 2, 1, 3]), - self.y.reshape([2, 128, 12, 64]).transpose([0, 2, 3, 1])) - + self.y.reshape([2, 128, 12, 64]).transpose([0, 2, 3, 1]), + ) -class TestReshapeTransposeMatMulOp4DXYInt8(TestReshapeTransposeMatMulOp4DXYFloat - ): +class TestReshapeTransposeMatMulOp4DXYInt8( + TestReshapeTransposeMatMulOp4DXYFloat +): def init_data_type(self): self.data_type_ = 'int8' class TestReshapeTransposeMatMulOp2DXFloat(TestReshapeTransposeMatMulOp): - def generate_data(self): self.x = np.random.random([2, 5, 10]).astype("float32") - self.y = np.random.random([2, 5, 10]).astype("float32").reshape( - [10, 10]).transpose([1, 0]) + self.y = ( + np.random.random([2, 5, 10]) + .astype("float32") + .reshape([10, 10]) + .transpose([1, 0]) + ) self.fused_transpose_X = [1, 0] self.fused_reshape_X = [10, 10] self.fused_transpose_Y = [] self.fused_reshape_Y = [] self.out = np.matmul( - self.x.reshape([10, 10]).transpose([1, 0]), self.y.transpose([1, - 0])) + self.x.reshape([10, 10]).transpose([1, 0]), self.y.transpose([1, 0]) + ) class TestReshapeTransposeMatMulOp2DXInt8(TestReshapeTransposeMatMulOp2DXFloat): - def init_data_type(self): self.data_type_ = 'int8' class TestReshapeTransposeMatMulOp2DYFloat(TestReshapeTransposeMatMulOp): - def generate_data(self): - self.x = np.random.random([2, 5, 10]).astype("float32").reshape( - [10, 10]).transpose([1, 0]) + self.x = ( + np.random.random([2, 5, 10]) + .astype("float32") + .reshape([10, 10]) + .transpose([1, 0]) + ) self.y = np.random.random([2, 5, 10]).astype("float32") self.fused_transpose_X = [] self.fused_reshape_X = [] @@ -415,37 +415,42 @@ class TestReshapeTransposeMatMulOp2DYFloat(TestReshapeTransposeMatMulOp): class TestReshapeTransposeMatMulOp2DYInt8(TestReshapeTransposeMatMulOp2DYFloat): - def init_data_type(self): self.data_type_ = 'int8' class TestReshapeTransposeMatMulOp3DXFloat(TestReshapeTransposeMatMulOp): - def generate_data(self): self.x = np.random.random([2, 2, 5, 5]).astype("float32") - self.y = np.random.random([2, 2, 5, 5]).astype("float32").reshape( - [2, 10, 5]).transpose([0, 2, 1]) + self.y = ( + np.random.random([2, 2, 5, 5]) + .astype("float32") + .reshape([2, 10, 5]) + .transpose([0, 2, 1]) + ) self.fused_transpose_X = [0, 2, 1] self.fused_reshape_X = [2, 10, 5] self.fused_transpose_Y = [] self.fused_reshape_Y = [] self.out = np.matmul( self.x.reshape([2, 10, 5]).transpose(0, 2, 1), - self.y.transpose(0, 2, 1)) + self.y.transpose(0, 2, 1), + ) class TestReshapeTransposeMatMulOp3DXInt8(TestReshapeTransposeMatMulOp3DXFloat): - def init_data_type(self): self.data_type_ = 'int8' class TestReshapeTransposeMatMulOp3DYFloat(TestReshapeTransposeMatMulOp): - def generate_data(self): - self.x = np.random.random([2, 2, 5, 5]).astype(self.data_type_).reshape( - [2, 10, 5]).transpose([0, 2, 1]) + self.x = ( + np.random.random([2, 2, 5, 5]) + .astype(self.data_type_) + .reshape([2, 10, 5]) + .transpose([0, 2, 1]) + ) self.y = np.random.random([2, 2, 5, 5]).astype(self.data_type_) self.fused_transpose_X = [] self.fused_reshape_X = [] @@ -455,14 +460,12 @@ class TestReshapeTransposeMatMulOp3DYFloat(TestReshapeTransposeMatMulOp): class TestReshapeTransposeMatMulOp3DYInt8(TestReshapeTransposeMatMulOp3DYFloat): - def init_data_type(self): self.data_type_ = 'int8' @skip_check_grad_ci(reason="Tests inference only optimization.") class TestMatMulOpTransposeReshapeEmptyFloat(OpTest): - def init_data_type(self): self.data_type_ = np.float32 @@ -512,39 +515,44 @@ class TestMatMulOpTransposeReshapeEmptyFloat(OpTest): class TestMatMulOpTransposeReshapeIntEmptyInt( - TestMatMulOpTransposeReshapeEmptyFloat): - + TestMatMulOpTransposeReshapeEmptyFloat +): def init_data_type(self): self.data_type_ = np.int8 class TestMatMulOpTransposeReshapeBasicFloat( - TestMatMulOpTransposeReshapeEmptyFloat): - + TestMatMulOpTransposeReshapeEmptyFloat +): def generate_data(self): self.bs = 8 - self.x = np.random.random([self.bs, 12, 128, - 128]).astype(self.data_type_) - self.y = np.random.random([self.bs, 12, 128, - 64]).astype(self.data_type_) + self.x = np.random.random([self.bs, 12, 128, 128]).astype( + self.data_type_ + ) + self.y = np.random.random([self.bs, 12, 128, 64]).astype( + self.data_type_ + ) def init_params_and_out(self): self.transpose_out = [0, 2, 1, 3] self.reshape_out = [0, 0, self.x.shape[1] * self.y.shape[-1]] - self.out = np.matmul(self.x, self.y).transpose([0, 2, 1, 3]).reshape( - [self.bs, -1, self.x.shape[1] * self.y.shape[-1]]) + self.out = ( + np.matmul(self.x, self.y) + .transpose([0, 2, 1, 3]) + .reshape([self.bs, -1, self.x.shape[1] * self.y.shape[-1]]) + ) class TestMatMulOpTransposeReshapeBasicInt( - TestMatMulOpTransposeReshapeBasicFloat): - + TestMatMulOpTransposeReshapeBasicFloat +): def init_data_type(self): self.data_type_ = np.int8 class TestMatMulOpTransposeReshapeOtherDimFloat( - TestMatMulOpTransposeReshapeBasicFloat): - + TestMatMulOpTransposeReshapeBasicFloat +): def generate_data(self): self.bs = 11 self.x = np.random.random([self.bs, 12, 14, 18]).astype(self.data_type_) @@ -552,13 +560,14 @@ class TestMatMulOpTransposeReshapeOtherDimFloat( class TestMatMulOpTransposeReshapeOtherDimInt( - TestMatMulOpTransposeReshapeOtherDimFloat): - + TestMatMulOpTransposeReshapeOtherDimFloat +): def init_data_type(self): self.data_type_ = np.int8 if __name__ == "__main__": from paddle import enable_static + enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_matmul_v2_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_matmul_v2_mkldnn_op.py index 5038114582116a0a8079c035b51d59f213e3ecb2..4a4e0d3ac66caf41585b2e5730044ba4692282ff 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_matmul_v2_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_matmul_v2_mkldnn_op.py @@ -15,17 +15,26 @@ import unittest import numpy as np -from paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool, convert_float_to_uint16 +from paddle.fluid.tests.unittests.op_test import ( + OpTest, + OpTestTool, + convert_float_to_uint16, +) import paddle.fluid.core as core import paddle from paddle.fluid.tests.unittests.mkldnn.test_matmul_mkldnn_op import ( TestMatMulOpTransposeReshapeEmptyFloat, TestMatMulOpTransposeReshapeBasicFloat, - TestMatMulOpTransposeReshapeOtherDimFloat, TestReshapeTransposeMatMulOp, - TestReshapeTransposeMatMulOp4DXFloat, TestReshapeTransposeMatMulOp4DYFloat, - TestReshapeTransposeMatMulOp4DXYFloat, TestReshapeTransposeMatMulOp2DXFloat, - TestReshapeTransposeMatMulOp2DYFloat, TestReshapeTransposeMatMulOp3DXFloat, - TestReshapeTransposeMatMulOp3DYFloat) + TestMatMulOpTransposeReshapeOtherDimFloat, + TestReshapeTransposeMatMulOp, + TestReshapeTransposeMatMulOp4DXFloat, + TestReshapeTransposeMatMulOp4DYFloat, + TestReshapeTransposeMatMulOp4DXYFloat, + TestReshapeTransposeMatMulOp2DXFloat, + TestReshapeTransposeMatMulOp2DYFloat, + TestReshapeTransposeMatMulOp3DXFloat, + TestReshapeTransposeMatMulOp3DYFloat, +) def reference_matmul(X, Y, transpose_x=False, transpose_y=False): @@ -34,7 +43,7 @@ def reference_matmul(X, Y, transpose_x=False, transpose_y=False): # transpose X and Y appropriately. if transpose_x: if X.ndim == 1: - X = X.reshape((X.size, )) + X = X.reshape((X.size,)) elif X.ndim == 2: X = X.T else: @@ -43,7 +52,7 @@ def reference_matmul(X, Y, transpose_x=False, transpose_y=False): X = np.transpose(X, tuple(dim)) if transpose_y: if Y.ndim == 1: - Y = Y.reshape((Y.size, )) + Y = Y.reshape((Y.size,)) else: dim = [i for i in range(len(Y.shape))] dim[-1], dim[len(Y.shape) - 2] = dim[len(Y.shape) - 2], dim[-1] @@ -54,10 +63,9 @@ def reference_matmul(X, Y, transpose_x=False, transpose_y=False): class TestMatMulV2VectorXVectorOneDNNOp(OpTest): - def config(self): - self.x_shape = (100, ) - self.y_shape = (100, ) + self.x_shape = (100,) + self.y_shape = (100,) self.trans_x = False self.trans_y = False self._cpu_only = True @@ -77,14 +85,15 @@ class TestMatMulV2VectorXVectorOneDNNOp(OpTest): # -0.1 ~ 0.1 x = -0.1 + 0.2 * x y = -0.1 + 0.2 * y - result = reference_matmul(x, y, self.trans_x, - self.trans_y).astype("float32") + result = reference_matmul(x, y, self.trans_x, self.trans_y).astype( + "float32" + ) self.set_inputs(x, y) self.attrs = { 'trans_x': self.trans_x, 'trans_y': self.trans_y, - 'use_mkldnn': True + 'use_mkldnn': True, } self.set_dtype_attr() self.outputs = {'Out': result} @@ -97,45 +106,42 @@ class TestMatMulV2VectorXVectorOneDNNOp(OpTest): class TestMatMulV2VectorXMatrixTransposeYOneDNNOp( - TestMatMulV2VectorXVectorOneDNNOp): - + TestMatMulV2VectorXVectorOneDNNOp +): def config(self): - self.x_shape = (100, ) + self.x_shape = (100,) self.y_shape = (1, 3, 2, 100) self.trans_x = False self.trans_y = True class TestMatMulV2VectorXMatrixOneDNNOp(TestMatMulV2VectorXVectorOneDNNOp): - def config(self): - self.x_shape = (100, ) + self.x_shape = (100,) self.y_shape = (1, 1, 100, 2) self.trans_x = False self.trans_y = False class TestMatMulV2MatrixXVectorTransposeXOneDNNOp( - TestMatMulV2VectorXVectorOneDNNOp): - + TestMatMulV2VectorXVectorOneDNNOp +): def config(self): self.x_shape = (1, 1, 100, 1) - self.y_shape = (100, ) + self.y_shape = (100,) self.trans_x = True self.trans_y = False class TestMatMulV2MatrixXVectorOneDNNOp(TestMatMulV2VectorXVectorOneDNNOp): - def config(self): self.x_shape = (1, 2, 1, 100) - self.y_shape = (100, ) + self.y_shape = (100,) self.trans_x = False self.trans_y = False class TestMatMulV2MatrixXMatrixOneDNNOp(TestMatMulV2VectorXVectorOneDNNOp): - def config(self): self.x_shape = (1, 1, 2, 100) self.y_shape = (1, 1, 100, 1) @@ -144,8 +150,8 @@ class TestMatMulV2MatrixXMatrixOneDNNOp(TestMatMulV2VectorXVectorOneDNNOp): class TestMatMulV2MatrixXMatrixTransposeYOneDNNOp( - TestMatMulV2VectorXVectorOneDNNOp): - + TestMatMulV2VectorXVectorOneDNNOp +): def config(self): self.x_shape = (1, 1, 1, 100) self.y_shape = (2, 1, 2, 100) @@ -154,7 +160,6 @@ class TestMatMulV2MatrixXMatrixTransposeYOneDNNOp( class TestMatMulV2MatrixXMatrix2OneDNNOp(TestMatMulV2VectorXVectorOneDNNOp): - def config(self): self.x_shape = (2, 1, 12, 9) self.y_shape = (1, 3, 9, 12) @@ -163,7 +168,6 @@ class TestMatMulV2MatrixXMatrix2OneDNNOp(TestMatMulV2VectorXVectorOneDNNOp): class TestMatMulV2MatrixXMatrix3OneDNNOp(TestMatMulV2VectorXVectorOneDNNOp): - def config(self): self.x_shape = (2, 1, 2, 100) self.y_shape = (1, 1, 100, 2) @@ -172,8 +176,8 @@ class TestMatMulV2MatrixXMatrix3OneDNNOp(TestMatMulV2VectorXVectorOneDNNOp): class TestMatMulV2MatrixXMatrixTranposeXOneDNNOp2( - TestMatMulV2VectorXVectorOneDNNOp): - + TestMatMulV2VectorXVectorOneDNNOp +): def config(self): self.x_shape = (2, 1, 4, 25) self.y_shape = (1, 1, 4, 25) @@ -182,8 +186,8 @@ class TestMatMulV2MatrixXMatrixTranposeXOneDNNOp2( class TestMatMulV2MatrixXMatrixTranposeX2OneDNNOp3( - TestMatMulV2VectorXVectorOneDNNOp): - + TestMatMulV2VectorXVectorOneDNNOp +): def config(self): self.x_shape = (2, 2, 7, 4) self.y_shape = (2, 2, 7, 5) @@ -192,8 +196,8 @@ class TestMatMulV2MatrixXMatrixTranposeX2OneDNNOp3( class TestMatMulV2MatrixXMatrixTransposeX3OneDNNOp( - TestMatMulV2VectorXVectorOneDNNOp): - + TestMatMulV2VectorXVectorOneDNNOp +): def config(self): self.x_shape = (3, 1, 6, 7) self.y_shape = (1, 2, 6, 9) @@ -202,7 +206,6 @@ class TestMatMulV2MatrixXMatrixTransposeX3OneDNNOp( class TestMatMulV2MatrixXMatrix4OneDNNOp(TestMatMulV2VectorXVectorOneDNNOp): - def config(self): self.x_shape = (3, 1, 6, 6) self.y_shape = (1, 2, 6, 9) @@ -211,26 +214,24 @@ class TestMatMulV2MatrixXMatrix4OneDNNOp(TestMatMulV2VectorXVectorOneDNNOp): class TestMatMulV2VectorXMatrix5DOneDNNOp(TestMatMulV2VectorXVectorOneDNNOp): - def config(self): - self.x_shape = (100) + self.x_shape = 100 self.y_shape = (1, 2, 2, 100, 2) self.trans_x = False self.trans_y = False class TestMatMulV2Matrix3DXVectorOneDNNOp(TestMatMulV2VectorXVectorOneDNNOp): - def config(self): self.x_shape = (2, 1, 100) - self.y_shape = (100) + self.y_shape = 100 self.trans_x = False self.trans_y = False class TestMatMulV2MatrixXMatrixTransposeXTransposeYOneDNNOp( - TestMatMulV2VectorXVectorOneDNNOp): - + TestMatMulV2VectorXVectorOneDNNOp +): def config(self): self.x_shape = (3, 1, 10, 8) self.y_shape = (1, 2, 9, 10) @@ -239,8 +240,8 @@ class TestMatMulV2MatrixXMatrixTransposeXTransposeYOneDNNOp( class TestMatMulV2MatrixXMatrixTransposeY2OneDNNOp( - TestMatMulV2VectorXVectorOneDNNOp): - + TestMatMulV2VectorXVectorOneDNNOp +): def config(self): self.x_shape = (3, 1, 10, 10) self.y_shape = (1, 2, 9, 10) @@ -249,8 +250,8 @@ class TestMatMulV2MatrixXMatrixTransposeY2OneDNNOp( class TestMatMulV2MatrixXMatrix5DTranposeYOneDNNOp( - TestMatMulV2VectorXVectorOneDNNOp): - + TestMatMulV2VectorXVectorOneDNNOp +): def config(self): self.x_shape = (1, 3, 1, 10, 10) self.y_shape = (3, 1, 2, 9, 10) @@ -259,7 +260,6 @@ class TestMatMulV2MatrixXMatrix5DTranposeYOneDNNOp( class TestMatMulV2MatrixXMatrix6Dx2DOneDNNOp(TestMatMulV2VectorXVectorOneDNNOp): - def config(self): self.x_shape = (1, 1, 2, 1, 8, 9) self.y_shape = (9, 12) @@ -268,7 +268,6 @@ class TestMatMulV2MatrixXMatrix6Dx2DOneDNNOp(TestMatMulV2VectorXVectorOneDNNOp): class TestMatMulV2MatrixXMatrix2Dx5DOneDNNOp(TestMatMulV2VectorXVectorOneDNNOp): - def config(self): self.x_shape = (20, 5) self.y_shape = (1, 2, 1, 5, 11) @@ -277,8 +276,8 @@ class TestMatMulV2MatrixXMatrix2Dx5DOneDNNOp(TestMatMulV2VectorXVectorOneDNNOp): class TestMatMulV2MatrixXMatrix4Dx3DTransposeXOneDNNOp( - TestMatMulV2VectorXVectorOneDNNOp): - + TestMatMulV2VectorXVectorOneDNNOp +): def config(self): self.x_shape = (5, 4, 15, 10) self.y_shape = (1, 15, 20) @@ -287,8 +286,8 @@ class TestMatMulV2MatrixXMatrix4Dx3DTransposeXOneDNNOp( class TestMatMulV2MatrixXMatrix3Dx4DTransposeYOneDNNOp( - TestMatMulV2VectorXVectorOneDNNOp): - + TestMatMulV2VectorXVectorOneDNNOp +): def config(self): self.x_shape = (2, 10, 15) self.y_shape = (4, 2, 20, 15) @@ -297,8 +296,8 @@ class TestMatMulV2MatrixXMatrix3Dx4DTransposeYOneDNNOp( class TestMatMulV2MatrixXMatrix5Dx3DTransposeXTransposeYOneDNNOp( - TestMatMulV2VectorXVectorOneDNNOp): - + TestMatMulV2VectorXVectorOneDNNOp +): def config(self): self.x_shape = (4, 3, 2, 15, 10) self.y_shape = (1, 20, 15) @@ -307,7 +306,6 @@ class TestMatMulV2MatrixXMatrix5Dx3DTransposeXTransposeYOneDNNOp( class TestMatMulV2MatrixXMatrix3Dx4DOneDNNOp(TestMatMulV2VectorXVectorOneDNNOp): - def config(self): self.x_shape = (1, 1, 32, 16) self.y_shape = (16, 16, 16) @@ -317,14 +315,12 @@ class TestMatMulV2MatrixXMatrix3Dx4DOneDNNOp(TestMatMulV2VectorXVectorOneDNNOp): # BF16 TESTS def create_bf16_test_class(parent): - @OpTestTool.skip_if_not_cpu_bf16() class TestMatMulV2Bf16OneDNNOp(parent): - def set_inputs(self, x, y): self.inputs = { 'X': convert_float_to_uint16(x), - 'Y': convert_float_to_uint16(y) + 'Y': convert_float_to_uint16(y), } self.x_fp32 = x self.y_fp32 = y @@ -338,16 +334,24 @@ def create_bf16_test_class(parent): def test_check_grad(self): self.calculate_grads() self.check_grad_with_place( - core.CPUPlace(), ["X", "Y"], + core.CPUPlace(), + ["X", "Y"], "Out", user_defined_grads=[self.dx, self.dy], - user_defined_grad_outputs=[convert_float_to_uint16(self.dout)]) + user_defined_grad_outputs=[convert_float_to_uint16(self.dout)], + ) def matmul_grad(self, x, transpose_x, y, transpose_y): - x = np.transpose( - x, self.shape_transpose_axes[x.ndim]) if transpose_x else x - y = np.transpose( - y, self.shape_transpose_axes[y.ndim]) if transpose_y else y + x = ( + np.transpose(x, self.shape_transpose_axes[x.ndim]) + if transpose_x + else x + ) + y = ( + np.transpose(y, self.shape_transpose_axes[y.ndim]) + if transpose_y + else y + ) return np.matmul(x, y) @@ -357,7 +361,7 @@ def create_bf16_test_class(parent): 3: [0, 2, 1], 4: [0, 1, 3, 2], 5: [0, 1, 2, 4, 3], - 6: [0, 1, 2, 3, 5, 4] + 6: [0, 1, 2, 3, 5, 4], } # expand vector so it will be a valid matrix for multiplication @@ -369,10 +373,16 @@ def create_bf16_test_class(parent): x_transpose_axes = self.shape_transpose_axes[self.x_fp32.ndim] y_transpose_axes = self.shape_transpose_axes[self.y_fp32.ndim] - x = np.transpose(self.x_fp32, x_transpose_axes - ) if self.attrs['trans_x'] is True else self.x_fp32 - y = np.transpose(self.y_fp32, y_transpose_axes - ) if self.attrs['trans_y'] is True else self.y_fp32 + x = ( + np.transpose(self.x_fp32, x_transpose_axes) + if self.attrs['trans_x'] is True + else self.x_fp32 + ) + y = ( + np.transpose(self.y_fp32, y_transpose_axes) + if self.attrs['trans_y'] is True + else self.y_fp32 + ) dout = np.matmul(x, y) @@ -389,12 +399,14 @@ def create_bf16_test_class(parent): if self.attrs['trans_x'] is True and self.attrs['trans_y'] is True: self.dx = self.matmul_grad(self.y_fp32, True, dout, True) self.dy = self.matmul_grad(dout, True, self.x_fp32, True) - elif self.attrs['trans_x'] is True and self.attrs[ - 'trans_y'] is False: + elif ( + self.attrs['trans_x'] is True and self.attrs['trans_y'] is False + ): self.dx = self.matmul_grad(self.y_fp32, False, dout, True) self.dy = self.matmul_grad(self.x_fp32, False, dout, False) - elif self.attrs['trans_x'] is False and self.attrs[ - 'trans_y'] is True: + elif ( + self.attrs['trans_x'] is False and self.attrs['trans_y'] is True + ): self.dx = self.matmul_grad(dout, False, self.y_fp32, False) self.dy = self.matmul_grad(dout, True, self.x_fp32, False) else: @@ -405,21 +417,25 @@ def create_bf16_test_class(parent): x_reduce_axis = [] y_reduce_axis = [] for index, (first, second) in enumerate( - zip(x_shape[0:-2], self.dx.shape[0:-2])): + zip(x_shape[0:-2], self.dx.shape[0:-2]) + ): if first != second: x_reduce_axis.append(index) for index, (first, second) in enumerate( - zip(y_shape[0:-2], self.dy.shape[0:-2])): + zip(y_shape[0:-2], self.dy.shape[0:-2]) + ): if first != second: y_reduce_axis.append(index) if x_reduce_axis: - self.dx = self.dx.sum(axis=tuple(x_reduce_axis), - keepdims=True) + self.dx = self.dx.sum( + axis=tuple(x_reduce_axis), keepdims=True + ) if y_reduce_axis: - self.dy = self.dy.sum(axis=tuple(y_reduce_axis), - keepdims=True) + self.dy = self.dy.sum( + axis=tuple(y_reduce_axis), keepdims=True + ) # after multiplying with vector one dimension is deleted from tensor if len(x_shape) == 2 and x_shape[0] == 1: @@ -456,84 +472,83 @@ create_bf16_test_class(TestMatMulV2MatrixXMatrix2Dx5DOneDNNOp) class TestMatMulV2OpTransposeReshapeEmptyFloat( - TestMatMulOpTransposeReshapeEmptyFloat): - + TestMatMulOpTransposeReshapeEmptyFloat +): def set_op_type(self): self.op_type = "matmul_v2" class TestMatMulV2OpTransposeReshapeBasicFloat( - TestMatMulOpTransposeReshapeBasicFloat): - + TestMatMulOpTransposeReshapeBasicFloat +): def set_op_type(self): self.op_type = "matmul_v2" class TestMatMulV2OpTransposeReshapeOtherDimFloat( - TestMatMulOpTransposeReshapeOtherDimFloat): - + TestMatMulOpTransposeReshapeOtherDimFloat +): def set_op_type(self): self.op_type = "matmul_v2" class TestMatMulV2OpReshapeTranspose(TestReshapeTransposeMatMulOp): - def set_op_type_and_transpose_y_name(self): self.op_type = "matmul_v2" self.transpose_y_name = "trans_y" class TestMatMulV2OpReshapeTranspose4DXFloat( - TestReshapeTransposeMatMulOp4DXFloat): - + TestReshapeTransposeMatMulOp4DXFloat +): def set_op_type_and_transpose_y_name(self): self.op_type = "matmul_v2" self.transpose_y_name = "trans_y" class TestMatMulV2OpReshapeTranspose4DYFloat( - TestReshapeTransposeMatMulOp4DYFloat): - + TestReshapeTransposeMatMulOp4DYFloat +): def set_op_type_and_transpose_y_name(self): self.op_type = "matmul_v2" self.transpose_y_name = "trans_y" class TestMatMulV2OpReshapeTranspose4DXYFloat( - TestReshapeTransposeMatMulOp4DXYFloat): - + TestReshapeTransposeMatMulOp4DXYFloat +): def set_op_type_and_transpose_y_name(self): self.op_type = "matmul_v2" self.transpose_y_name = "trans_y" class TestMatMulV2OpReshapeTranspose2DXFloat( - TestReshapeTransposeMatMulOp2DXFloat): - + TestReshapeTransposeMatMulOp2DXFloat +): def set_op_type_and_transpose_y_name(self): self.op_type = "matmul_v2" self.transpose_y_name = "trans_y" class TestMatMulV2OpReshapeTranspose2DYFloat( - TestReshapeTransposeMatMulOp2DYFloat): - + TestReshapeTransposeMatMulOp2DYFloat +): def set_op_type_and_transpose_y_name(self): self.op_type = "matmul_v2" self.transpose_y_name = "trans_y" class TestMatMulV2OpReshapeTranspose3DXFloat( - TestReshapeTransposeMatMulOp3DXFloat): - + TestReshapeTransposeMatMulOp3DXFloat +): def set_op_type_and_transpose_y_name(self): self.op_type = "matmul_v2" self.transpose_y_name = "trans_y" class TestMatMulV2OpReshapeTranspose3DYFloat( - TestReshapeTransposeMatMulOp3DYFloat): - + TestReshapeTransposeMatMulOp3DYFloat +): def set_op_type_and_transpose_y_name(self): self.op_type = "matmul_v2" self.transpose_y_name = "trans_y" diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_mul_int8_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_mul_int8_mkldnn_op.py index 944cab82c908e34d308ea15390b888f975391030..9011b1229bdb0b17a427e54c36882b1e54205c35 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_mul_int8_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_mul_int8_mkldnn_op.py @@ -17,17 +17,16 @@ import numpy as np import paddle import paddle.fluid.core as core from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci + ''' test case for s8 * s8 ''' @skip_check_grad_ci( - reason= - "mul_mkldnn_op does not implement grad operator, check_grad is not required." + reason="mul_mkldnn_op does not implement grad operator, check_grad is not required." ) class TestMKLDNNMulOpS8S8(OpTest): - def setUp(self): self.op_type = "mul" self.init_kernel_type() @@ -65,10 +64,9 @@ class TestMKLDNNMulOpS8S8(OpTest): quant_B = np.round(B_data * self.scale_y[0]).astype(np.int_) output = np.dot(A_data, quant_B) - scale_output_shift = (self.scale_out) / \ - (self.scale_x * self.scale_y[0]) + scale_output_shift = (self.scale_out) / (self.scale_x * self.scale_y[0]) - if (self.force_fp32): + if self.force_fp32: output = (output * scale_output_shift).astype(self.dsttype) else: output = np.round(output * scale_output_shift).astype(self.dsttype) @@ -78,9 +76,9 @@ class TestMKLDNNMulOpS8S8(OpTest): def test_check_output(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode - self.check_output_with_place(core.CPUPlace(), - atol=0, - check_dygraph=False) + self.check_output_with_place( + core.CPUPlace(), atol=0, check_dygraph=False + ) ''' @@ -89,7 +87,6 @@ class TestMKLDNNMulOpS8S8(OpTest): class TestMKLDNNMulOpS8U8(TestMKLDNNMulOpS8S8): - def init_data_type(self): self.srctype = np.uint8 self.dsttype = np.float32 if self.force_fp32 else np.int8 @@ -101,7 +98,6 @@ class TestMKLDNNMulOpS8U8(TestMKLDNNMulOpS8S8): class TestMKLDNNMulOpS8S8WithFlatten(TestMKLDNNMulOpS8S8): - def setUp(self): self.op_type = "mul" self.init_kernel_type() @@ -128,8 +124,9 @@ class TestMKLDNNMulOpS8S8WithFlatten(TestMKLDNNMulOpS8S8): else: A_data = np.random.randint(0, 127, (3, 4, 4, 3)).astype(np.uint8) - B_data = np.random.uniform(-127, 127, - (2, 6, 1, 2, 3)).astype(np.float32) + B_data = np.random.uniform(-127, 127, (2, 6, 1, 2, 3)).astype( + np.float32 + ) A_data_reshape = A_data.reshape(3 * 4, 4 * 3) B_data_reshape = B_data.reshape(2 * 6, 1 * 2 * 3) @@ -137,10 +134,9 @@ class TestMKLDNNMulOpS8S8WithFlatten(TestMKLDNNMulOpS8S8): quant_B = np.round(B_data_reshape * self.scale_y[0]).astype(np.int_) output = np.dot(A_data_reshape, quant_B) - scale_output_shift = (self.scale_out) / \ - (self.scale_x * self.scale_y[0]) + scale_output_shift = (self.scale_out) / (self.scale_x * self.scale_y[0]) - if (self.force_fp32): + if self.force_fp32: output = (output * scale_output_shift).astype(self.dsttype) else: output = np.round(output * scale_output_shift).astype(self.dsttype) @@ -157,7 +153,6 @@ class TestMKLDNNMulOpS8S8WithFlatten(TestMKLDNNMulOpS8S8): class TestMKLDNNMulOpS8U8WithFlatten(TestMKLDNNMulOpS8S8WithFlatten): - def init_data_type(self): self.srctype = np.uint8 self.dsttype = np.float32 if self.force_fp32 else np.int8 diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_mul_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_mul_mkldnn_op.py index 85a17094fdc362ed98265f0f8041b68139af046f..0752d4a0104339bca469957b2caacc571c41d1f9 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_mul_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_mul_mkldnn_op.py @@ -16,12 +16,15 @@ import unittest import numpy as np import paddle import paddle.fluid.core as core -from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16, OpTestTool +from paddle.fluid.tests.unittests.op_test import ( + OpTest, + convert_float_to_uint16, + OpTestTool, +) @OpTestTool.skip_if_not_cpu_bf16() class TestMulOneDNNOp(OpTest): - def setUp(self): self.op_type = "mul" self.attrs = {'use_mkldnn': True} @@ -37,8 +40,10 @@ class TestMulOneDNNOp(OpTest): self.inputs = {'X': self.x, 'Y': self.y} - output = np.dot(np.reshape(self.x_fp32, self.np_x_shape), - np.reshape(self.y_fp32, self.np_y_shape)) + output = np.dot( + np.reshape(self.x_fp32, self.np_x_shape), + np.reshape(self.y_fp32, self.np_y_shape), + ) self.outputs = {'Out': np.reshape(output, self.out_shape)} def init_shapes_and_attrs(self): @@ -67,7 +72,6 @@ class TestMulOneDNNOp(OpTest): class TestMulXNumColDims2OneDNNOp(TestMulOneDNNOp): - def init_shapes_and_attrs(self): self.x_shape = (6, 7, 5) self.y_shape = (5, 21) @@ -81,7 +85,6 @@ class TestMulXNumColDims2OneDNNOp(TestMulOneDNNOp): class TestMulYNumColDims2OneDNNOp(TestMulOneDNNOp): - def init_shapes_and_attrs(self): self.x_shape = (20, 6) self.y_shape = (2, 3, 21) @@ -95,7 +98,6 @@ class TestMulYNumColDims2OneDNNOp(TestMulOneDNNOp): class TestMulYAndXNumColDims2OneDNNOp(TestMulOneDNNOp): - def init_shapes_and_attrs(self): self.x_shape = (10, 5, 6) self.y_shape = (2, 3, 21) @@ -110,7 +112,6 @@ class TestMulYAndXNumColDims2OneDNNOp(TestMulOneDNNOp): class TestMulBF16OneDNNOp(TestMulOneDNNOp): - def init_inputs_dtype(self): self.x = convert_float_to_uint16(self.x) self.y = convert_float_to_uint16(self.y) @@ -131,28 +132,34 @@ class TestMulBF16OneDNNOp(TestMulOneDNNOp): def test_check_grad(self): self.calculate_grads() self.check_grad_with_place( - core.CPUPlace(), ['X', 'Y'], + core.CPUPlace(), + ['X', 'Y'], 'Out', user_defined_grads=[self.dx, self.dy], - user_defined_grad_outputs=[convert_float_to_uint16(self.dout)]) + user_defined_grad_outputs=[convert_float_to_uint16(self.dout)], + ) def test_check_grad_ingore_x(self): self.calculate_grads() self.check_grad_with_place( - core.CPUPlace(), ['Y'], + core.CPUPlace(), + ['Y'], 'Out', set('X'), user_defined_grads=[self.dy], - user_defined_grad_outputs=[convert_float_to_uint16(self.dout)]) + user_defined_grad_outputs=[convert_float_to_uint16(self.dout)], + ) def test_check_grad_ingore_y(self): self.calculate_grads() self.check_grad_with_place( - core.CPUPlace(), ['X'], + core.CPUPlace(), + ['X'], 'Out', set('Y'), user_defined_grads=[self.dx], - user_defined_grad_outputs=[convert_float_to_uint16(self.dout)]) + user_defined_grad_outputs=[convert_float_to_uint16(self.dout)], + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_multi_gru_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_multi_gru_mkldnn_op.py index a5f4583c3e5f62ab99cf92aaa3a610c5ac8738cc..273138496fce1d3bac1a6e5886b1bed3289297b0 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_multi_gru_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_multi_gru_mkldnn_op.py @@ -15,33 +15,55 @@ import unittest import numpy as np from paddle.fluid.tests.unittests.op_test import OpTest -from paddle.fluid.tests.unittests.test_fusion_gru_op import fusion_gru, ACTIVATION +from paddle.fluid.tests.unittests.test_fusion_gru_op import ( + fusion_gru, + ACTIVATION, +) def multi_gru( - x, # T x M - lod, # 1 x N - h0, # N x D - wx, # M x 3D - wh, # D x 3D - bias, # 1 x 3D - origin_mode, - layers): + x, # T x M + lod, # 1 x N + h0, # N x D + wx, # M x 3D + wh, # D x 3D + bias, # 1 x 3D + origin_mode, + layers, +): act_state = ACTIVATION['tanh'] act_gate = ACTIVATION['sigmoid'] input = x for i in range(0, layers * 2, 2): - _, _, _, gru1_out = fusion_gru(input, lod, h0[i], wx[i], wh[i], bias[i], - False, origin_mode, act_state, act_gate) - _, _, _, gru2_out = fusion_gru(input, lod, h0[i + 1], wx[i + 1], - wh[i + 1], bias[i + 1], True, - origin_mode, act_state, act_gate) + _, _, _, gru1_out = fusion_gru( + input, + lod, + h0[i], + wx[i], + wh[i], + bias[i], + False, + origin_mode, + act_state, + act_gate, + ) + _, _, _, gru2_out = fusion_gru( + input, + lod, + h0[i + 1], + wx[i + 1], + wh[i + 1], + bias[i + 1], + True, + origin_mode, + act_state, + act_gate, + ) input = np.concatenate((gru1_out, gru2_out), axis=1) return input class TestMultiGruMkldnnOp(OpTest): - def set_confs(self): pass @@ -96,17 +118,22 @@ class TestMultiGruMkldnnOp(OpTest): wx.append(np.random.rand(IC, 3 * OC).astype('float32')) wh.append(np.random.rand(OC, 3 * OC).astype('float32')) bias.append( - np.random.rand(1, 3 * OC).astype('float32') if self. - with_bias else np.zeros((1, 3 * OC), dtype='float32')) + np.random.rand(1, 3 * OC).astype('float32') + if self.with_bias + else np.zeros((1, 3 * OC), dtype='float32') + ) h0.append(np.zeros((N, OC), dtype='float32')) - self.inputs['WeightX'] = [('wx' + str(i), wx[i]) - for i in range(self.layers * 2)] - self.inputs['WeightH'] = [('wh' + str(i), wh[i]) - for i in range(self.layers * 2)] + self.inputs['WeightX'] = [ + ('wx' + str(i), wx[i]) for i in range(self.layers * 2) + ] + self.inputs['WeightH'] = [ + ('wh' + str(i), wh[i]) for i in range(self.layers * 2) + ] if self.with_bias: - self.inputs['Bias'] = [('b' + str(i), bias[i]) - for i in range(self.layers * 2)] + self.inputs['Bias'] = [ + ('b' + str(i), bias[i]) for i in range(self.layers * 2) + ] if is_int8: s8_max = 127.0 @@ -114,38 +141,54 @@ class TestMultiGruMkldnnOp(OpTest): for layer in range(self.layers): OC = self.OCs[layer] for j in range(2): - scale_ur = s8_max / np.max(np.abs( - np.concatenate([ - wx[2 * layer + j][:, :2 * OC], - wh[2 * layer + j].flatten()[:2 * OC * OC].reshape( - OC, 2 * OC) - ], - axis=0)), - axis=0) - scale_o = s8_max / np.max(np.abs( - np.concatenate([ - wx[2 * layer + j][:, 2 * OC:], - wh[2 * layer + j].flatten()[2 * OC * OC:].reshape( - OC, OC) - ], - axis=0)), - axis=0) + scale_ur = s8_max / np.max( + np.abs( + np.concatenate( + [ + wx[2 * layer + j][:, : 2 * OC], + wh[2 * layer + j] + .flatten()[: 2 * OC * OC] + .reshape(OC, 2 * OC), + ], + axis=0, + ) + ), + axis=0, + ) + scale_o = s8_max / np.max( + np.abs( + np.concatenate( + [ + wx[2 * layer + j][:, 2 * OC :], + wh[2 * layer + j] + .flatten()[2 * OC * OC :] + .reshape(OC, OC), + ], + axis=0, + ) + ), + axis=0, + ) scale_weights.append( - np.concatenate([scale_ur, scale_o]).astype('float32')) - self.inputs['Scale_weights'] = [('w_scale' + str(i), - scale_weights[i]) - for i in range(self.layers * 2)] + np.concatenate([scale_ur, scale_o]).astype('float32') + ) + self.inputs['Scale_weights'] = [ + ('w_scale' + str(i), scale_weights[i]) + for i in range(self.layers * 2) + ] self.error_margin = 1e-1 if self.force_fp32_output else 1 - hidden_f32 = multi_gru(x_f32, self.lod, h0, wx, wh, bias, - self.origin_mode, self.layers) + hidden_f32 = multi_gru( + x_f32, self.lod, h0, wx, wh, bias, self.origin_mode, self.layers + ) if self.dtype == 'float32' or self.force_fp32_output: self.outputs = {'Hidden': (hidden_f32, self.lod)} else: hidden_u8 = np.rint(hidden_f32 * scale_data + shift_data).astype( - np.uint8) + np.uint8 + ) self.outputs = {'Hidden': (hidden_u8, self.lod)} self.attrs = { @@ -166,13 +209,11 @@ class TestMultiGruMkldnnOp(OpTest): class TestMultiGruMkldnnOpNoBias(TestMultiGruMkldnnOp): - def set_confs(self): self.with_bias = False class TestMultiGruMkldnnOpLayers2(TestMultiGruMkldnnOp): - def set_confs(self): self.layers = 2 self.ICs = [2, 6] @@ -180,7 +221,6 @@ class TestMultiGruMkldnnOpLayers2(TestMultiGruMkldnnOp): class TestMultiGruMkldnnOpLayers3(TestMultiGruMkldnnOp): - def set_confs(self): self.layers = 3 self.ICs = [2, 6, 12] @@ -188,71 +228,64 @@ class TestMultiGruMkldnnOpLayers3(TestMultiGruMkldnnOp): class TestMultiGruMkldnnOpOriginMode(TestMultiGruMkldnnOp): - def set_confs(self): self.origin_mode = True class TestMultiGruMkldnnInt8Op(TestMultiGruMkldnnOp): - def set_dtype(self): self.dtype = 'int8' class TestMultiGruMkldnnInt8OpForceFP32Output(TestMultiGruMkldnnInt8Op): - def set_force_fp32_output(self): self.force_fp32_output = True class TestMultiGruMkldnnInt8OpNoBias(TestMultiGruMkldnnOpNoBias): - def set_dtype(self): self.dtype = 'int8' class TestMultiGruMkldnnInt8OpNoBiasForceFP32Output( - TestMultiGruMkldnnInt8OpNoBias): - + TestMultiGruMkldnnInt8OpNoBias +): def set_force_fp32_output(self): self.force_fp32_output = True class TestMultiGruMkldnnInt8OpLayers2(TestMultiGruMkldnnOpLayers2): - def set_dtype(self): self.dtype = 'int8' class TestMultiGruMkldnnInt8OpLayers2ForceFP32Output( - TestMultiGruMkldnnInt8OpLayers2): - + TestMultiGruMkldnnInt8OpLayers2 +): def set_force_fp32_output(self): self.force_fp32_output = True class TestMultiGruMkldnnInt8OpLayers3(TestMultiGruMkldnnOpLayers3): - def set_dtype(self): self.dtype = 'int8' class TestMultiGruMkldnnInt8OpLayers3ForceFP32Output( - TestMultiGruMkldnnInt8OpLayers3): - + TestMultiGruMkldnnInt8OpLayers3 +): def set_force_fp32_output(self): self.force_fp32_output = True class TestMultiGruMkldnnInt8OpOriginMode(TestMultiGruMkldnnOpOriginMode): - def set_dtype(self): self.dtype = 'int8' class TestMultiGruMkldnnInt8OpOriginModeForceFP32Output( - TestMultiGruMkldnnInt8OpOriginMode): - + TestMultiGruMkldnnInt8OpOriginMode +): def set_force_fp32_output(self): self.force_fp32_output = True diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_nearest_interp_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_nearest_interp_mkldnn_op.py index f60050509e6bd3c201bb1b39723f7df6c0bcc4b0..e6a53f122bb88337e6d0244dc82bfb614be8e45d 100755 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_nearest_interp_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_nearest_interp_mkldnn_op.py @@ -18,12 +18,9 @@ from paddle.fluid.tests.unittests.op_test import OpTest from paddle.fluid.tests.unittests.op_test import skip_check_grad_ci -def nearest_neighbor_interp_mkldnn_np(X, - out_h, - out_w, - out_size=None, - actual_shape=None, - data_layout='NCHW'): +def nearest_neighbor_interp_mkldnn_np( + X, out_h, out_w, out_size=None, actual_shape=None, data_layout='NCHW' +): """nearest neighbor interpolation implement in shape [N, C, H, W]""" if data_layout == "NHWC": X = np.transpose(X, (0, 3, 1, 2)) # NHWC => NCHW @@ -37,9 +34,9 @@ def nearest_neighbor_interp_mkldnn_np(X, n, c, in_h, in_w = X.shape fh = fw = 0.0 - if (out_h > 1): + if out_h > 1: fh = out_h * 1.0 / in_h - if (out_w > 1): + if out_w > 1: fw = out_w * 1.0 / in_w out = np.zeros((n, c, out_h, out_w)) @@ -58,7 +55,6 @@ def nearest_neighbor_interp_mkldnn_np(X, @skip_check_grad_ci(reason="Haven not implement interpolate grad kernel.") class TestNearestInterpMKLDNNOp(OpTest): - def init_test_case(self): pass @@ -86,8 +82,9 @@ class TestNearestInterpMKLDNNOp(OpTest): input_np = np.random.random(self.input_shape).astype(self.dtype) else: init_low, init_high = (-5, 5) if self.dtype == np.int8 else (0, 10) - input_np = np.random.randint(init_low, init_high, - self.input_shape).astype(self.dtype) + input_np = np.random.randint( + init_low, init_high, self.input_shape + ).astype(self.dtype) if self.data_layout == "NCHW": in_h = self.input_shape[2] @@ -103,10 +100,14 @@ class TestNearestInterpMKLDNNOp(OpTest): out_h = self.out_h out_w = self.out_w - output_np = nearest_neighbor_interp_mkldnn_np(input_np, out_h, out_w, - self.out_size, - self.actual_shape, - self.data_layout) + output_np = nearest_neighbor_interp_mkldnn_np( + input_np, + out_h, + out_w, + self.out_size, + self.actual_shape, + self.data_layout, + ) self.inputs = {'X': input_np} if self.out_size is not None: @@ -119,7 +120,7 @@ class TestNearestInterpMKLDNNOp(OpTest): 'out_w': self.out_w, 'scale': self.scale, 'data_layout': self.data_layout, - 'use_mkldnn': self.use_mkldnn + 'use_mkldnn': self.use_mkldnn, } self.outputs = {'Out': output_np} @@ -128,7 +129,6 @@ class TestNearestInterpMKLDNNOp(OpTest): class TestNearestInterpOpMKLDNNNHWC(TestNearestInterpMKLDNNOp): - def init_test_case(self): self.input_shape = [3, 2, 32, 16] self.out_h = 27 @@ -138,40 +138,36 @@ class TestNearestInterpOpMKLDNNNHWC(TestNearestInterpMKLDNNOp): class TestNearestNeighborInterpMKLDNNCase2(TestNearestInterpMKLDNNOp): - def init_test_case(self): self.input_shape = [3, 3, 9, 6] self.out_h = 12 self.out_w = 12 - self.scale = 1. + self.scale = 1.0 class TestNearestNeighborInterpCase3(TestNearestInterpMKLDNNOp): - def init_test_case(self): self.input_shape = [1, 1, 32, 64] self.out_h = 64 self.out_w = 128 - self.scale = 0. + self.scale = 0.0 class TestNearestNeighborInterpCase4(TestNearestInterpMKLDNNOp): - def init_test_case(self): self.input_shape = [1, 1, 32, 64] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([65, 129]).astype("int32") class TestNearestNeighborInterpSame(TestNearestInterpMKLDNNOp): - def init_test_case(self): self.input_shape = [2, 3, 32, 64] self.out_h = 32 self.out_w = 64 - self.scale = 0. + self.scale = 0.0 def create_test_class(parent): @@ -180,12 +176,10 @@ def create_test_class(parent): ''' class TestInt8Case(parent): - def init_data_type(self): self.dtype = np.int8 class TestUint8Case(parent): - def init_data_type(self): self.dtype = np.uint8 @@ -204,5 +198,6 @@ create_test_class(TestNearestNeighborInterpSame) if __name__ == "__main__": from paddle import enable_static + enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_nearest_interp_v2_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_nearest_interp_v2_mkldnn_op.py index 6e5601e91b02f30c6dfbee7c5f185bc6c60bd18e..6d5480a03f598b8c9d1f412ceeb2777da9bcef2b 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_nearest_interp_v2_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_nearest_interp_v2_mkldnn_op.py @@ -14,16 +14,17 @@ import unittest import numpy as np -from paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool, convert_float_to_uint16 +from paddle.fluid.tests.unittests.op_test import ( + OpTest, + OpTestTool, + convert_float_to_uint16, +) from paddle.fluid.tests.unittests.op_test import skip_check_grad_ci -def nearest_neighbor_interp_mkldnn_np(X, - out_h, - out_w, - out_size=None, - actual_shape=None, - data_layout='NCHW'): +def nearest_neighbor_interp_mkldnn_np( + X, out_h, out_w, out_size=None, actual_shape=None, data_layout='NCHW' +): """nearest neighbor interpolation implement in shape [N, C, H, W]""" if data_layout == "NHWC": X = np.transpose(X, (0, 3, 1, 2)) # NHWC => NCHW @@ -37,9 +38,9 @@ def nearest_neighbor_interp_mkldnn_np(X, n, c, in_h, in_w = X.shape fh = fw = 0.0 - if (out_h > 1): + if out_h > 1: fh = out_h * 1.0 / in_h - if (out_w > 1): + if out_w > 1: fw = out_w * 1.0 / in_w out = np.zeros((n, c, out_h, out_w)) @@ -59,7 +60,6 @@ def nearest_neighbor_interp_mkldnn_np(X, @skip_check_grad_ci(reason="Haven not implement interpolate grad kernel.") @OpTestTool.skip_if_not_cpu_bf16() class TestNearestInterpV2MKLDNNOp(OpTest): - def init_test_case(self): pass @@ -87,8 +87,9 @@ class TestNearestInterpV2MKLDNNOp(OpTest): input_np = np.random.random(self.input_shape).astype(self.dtype) else: init_low, init_high = (-5, 5) if self.dtype == np.int8 else (0, 10) - input_np = np.random.randint(init_low, init_high, - self.input_shape).astype(self.dtype) + input_np = np.random.randint( + init_low, init_high, self.input_shape + ).astype(self.dtype) if self.data_layout == "NCHW": in_h = self.input_shape[2] @@ -118,10 +119,14 @@ class TestNearestInterpV2MKLDNNOp(OpTest): out_h = self.out_h out_w = self.out_w - output_np = nearest_neighbor_interp_mkldnn_np(input_np, out_h, out_w, - self.out_size, - self.actual_shape, - self.data_layout) + output_np = nearest_neighbor_interp_mkldnn_np( + input_np, + out_h, + out_w, + self.out_size, + self.actual_shape, + self.data_layout, + ) if isinstance(self.scale, float): self.scale = [self.scale] @@ -140,7 +145,7 @@ class TestNearestInterpV2MKLDNNOp(OpTest): 'out_w': self.out_w, 'scale': self.scale, 'data_layout': self.data_layout, - 'use_mkldnn': self.use_mkldnn + 'use_mkldnn': self.use_mkldnn, } self.outputs = {'Out': output_np} @@ -149,7 +154,6 @@ class TestNearestInterpV2MKLDNNOp(OpTest): class TestNearestInterpOpV2MKLDNNNHWC(TestNearestInterpV2MKLDNNOp): - def init_test_case(self): self.input_shape = [3, 2, 32, 16] self.out_h = 27 @@ -159,7 +163,6 @@ class TestNearestInterpOpV2MKLDNNNHWC(TestNearestInterpV2MKLDNNOp): class TestNearestNeighborInterpV2MKLDNNCase2(TestNearestInterpV2MKLDNNOp): - def init_test_case(self): self.input_shape = [3, 3, 9, 6] self.out_h = 12 @@ -167,7 +170,6 @@ class TestNearestNeighborInterpV2MKLDNNCase2(TestNearestInterpV2MKLDNNOp): class TestNearestNeighborInterpV2MKLDNNCase3(TestNearestInterpV2MKLDNNOp): - def init_test_case(self): self.input_shape = [1, 1, 32, 64] self.out_h = 64 @@ -176,7 +178,6 @@ class TestNearestNeighborInterpV2MKLDNNCase3(TestNearestInterpV2MKLDNNOp): class TestNearestNeighborInterpV2MKLDNNCase4(TestNearestInterpV2MKLDNNOp): - def init_test_case(self): self.input_shape = [1, 1, 32, 64] self.out_h = 64 @@ -186,7 +187,6 @@ class TestNearestNeighborInterpV2MKLDNNCase4(TestNearestInterpV2MKLDNNOp): class TestNearestNeighborInterpV2MKLDNNSame(TestNearestInterpV2MKLDNNOp): - def init_test_case(self): self.input_shape = [2, 3, 32, 64] self.out_h = 32 @@ -200,17 +200,14 @@ def create_test_class(parent): ''' class TestBf16Case(parent): - def init_data_type(self): self.dtype = np.uint16 class TestInt8Case(parent): - def init_data_type(self): self.dtype = np.int8 class TestUint8Case(parent): - def init_data_type(self): self.dtype = np.uint8 @@ -231,5 +228,6 @@ create_test_class(TestNearestNeighborInterpV2MKLDNNSame) if __name__ == "__main__": from paddle import enable_static + enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_onnx_format_quantization_mobilenetv1.py b/python/paddle/fluid/tests/unittests/mkldnn/test_onnx_format_quantization_mobilenetv1.py index ae4a8c16d67def973ef9f7b84363013f1fbe11fb..1b27a39f2e95665f8da70da1b95b5f3db36aaf1a 100755 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_onnx_format_quantization_mobilenetv1.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_onnx_format_quantization_mobilenetv1.py @@ -74,13 +74,14 @@ def process_image(sample, mode, color_jitter, rotate): return img, sample[1] -def _reader_creator(file_list, - mode, - shuffle=False, - color_jitter=False, - rotate=False, - data_dir=DATA_DIR): - +def _reader_creator( + file_list, + mode, + shuffle=False, + color_jitter=False, + rotate=False, + data_dir=DATA_DIR, +): def reader(): with open(file_list) as flist: full_lines = [line.strip() for line in flist] @@ -95,10 +96,9 @@ def _reader_creator(file_list, continue yield img_path, int(label) - mapper = functools.partial(process_image, - mode=mode, - color_jitter=color_jitter, - rotate=rotate) + mapper = functools.partial( + process_image, mode=mode, color_jitter=color_jitter, rotate=rotate + ) return paddle.reader.xmap_readers(mapper, reader, THREAD, BUF_SIZE) @@ -109,11 +109,11 @@ def val(data_dir=DATA_DIR): class TestPostTrainingQuantization(unittest.TestCase): - def setUp(self): self.int8_download = 'int8/download' - self.cache_folder = os.path.expanduser('~/.cache/paddle/dataset/' + - self.int8_download) + self.cache_folder = os.path.expanduser( + '~/.cache/paddle/dataset/' + self.int8_download + ) self.data_cache_folder = '' data_urls = [] data_md5s = [] @@ -126,27 +126,32 @@ class TestPostTrainingQuantization(unittest.TestCase): 'https://paddle-inference-dist.bj.bcebos.com/int8/ILSVRC2012_img_val.tar.gz.partab' ) data_md5s.append('1e9f15f64e015e58d6f9ec3210ed18b5') - self.data_cache_folder = self.download_data(data_urls, data_md5s, - "full_data", False) + self.data_cache_folder = self.download_data( + data_urls, data_md5s, "full_data", False + ) else: data_urls.append( 'http://paddle-inference-dist.bj.bcebos.com/int8/calibration_test_data.tar.gz' ) data_md5s.append('1b6c1c434172cca1bf9ba1e4d7a3157d') - self.data_cache_folder = self.download_data(data_urls, data_md5s, - "small_data", False) + self.data_cache_folder = self.download_data( + data_urls, data_md5s, "small_data", False + ) # reader/decorator.py requires the relative path to the data folder if not os.path.exists("./data/ILSVRC2012"): - cmd = 'rm -rf {0} && ln -s {1} {0}'.format("data", - self.data_cache_folder) + cmd = 'rm -rf {0} && ln -s {1} {0}'.format( + "data", self.data_cache_folder + ) os.system(cmd) self.batch_size = 1 if os.environ.get('DATASET') == 'full' else 50 - self.sample_iterations = 50 if os.environ.get( - 'DATASET') == 'full' else 2 - self.infer_iterations = 50000 if os.environ.get( - 'DATASET') == 'full' else 2 + self.sample_iterations = ( + 50 if os.environ.get('DATASET') == 'full' else 2 + ) + self.infer_iterations = ( + 50000 if os.environ.get('DATASET') == 'full' else 2 + ) self.int8_model = "post_training_quantization" print("self.int8_model: ", self.int8_model) @@ -159,7 +164,8 @@ class TestPostTrainingQuantization(unittest.TestCase): def cache_unzipping(self, target_folder, zip_path): if not os.path.exists(target_folder): cmd = 'mkdir {0} && tar xf {1} -C {0}'.format( - target_folder, zip_path) + target_folder, zip_path + ) os.system(cmd) def download_data(self, data_urls, data_md5s, folder_name, is_model=True): @@ -171,13 +177,15 @@ class TestPostTrainingQuantization(unittest.TestCase): download(data_urls[i], self.int8_download, data_md5s[i]) file_names.append(data_urls[i].split('/')[-1]) - zip_path = os.path.join(self.cache_folder, - 'full_imagenet_val.tar.gz') + zip_path = os.path.join( + self.cache_folder, 'full_imagenet_val.tar.gz' + ) if not os.path.exists(zip_path): cat_command = 'cat' for file_name in file_names: - cat_command += ' ' + os.path.join(self.cache_folder, - file_name) + cat_command += ' ' + os.path.join( + self.cache_folder, file_name + ) cat_command += ' > ' + zip_path os.system(cat_command) @@ -193,11 +201,9 @@ class TestPostTrainingQuantization(unittest.TestCase): def download_model(self): pass - def run_program(self, - model_path, - batch_size, - infer_iterations, - is_quantized_model=False): + def run_program( + self, model_path, batch_size, infer_iterations, is_quantized_model=False + ): image_shape = [3, 224, 224] config = paddle.inference.Config(model_path) config.disable_gpu() @@ -223,8 +229,9 @@ class TestPostTrainingQuantization(unittest.TestCase): cnt = 0 periods = [] for batch_id, data in enumerate(val_reader()): - image = np.array([x[0].reshape(image_shape) - for x in data]).astype("float32") + image = np.array([x[0].reshape(image_shape) for x in data]).astype( + "float32" + ) label = np.array([x[1] for x in data]).astype("int64") label = label.reshape([-1, 1]) @@ -252,80 +259,106 @@ class TestPostTrainingQuantization(unittest.TestCase): acc1 = np.sum(test_info) / cnt return (throughput, latency, acc1) - def generate_quantized_model(self, - model_path, - quantizable_op_type, - algo="KL", - round_type="round", - is_full_quantize=False, - is_use_cache_file=False, - is_optimize_model=False, - onnx_format=False): + def generate_quantized_model( + self, + model_path, + quantizable_op_type, + algo="KL", + round_type="round", + is_full_quantize=False, + is_use_cache_file=False, + is_optimize_model=False, + onnx_format=False, + ): place = fluid.CPUPlace() exe = fluid.Executor(place) val_reader = val() - ptq = PostTrainingQuantization(executor=exe, - sample_generator=val_reader, - model_dir=model_path, - algo=algo, - quantizable_op_type=quantizable_op_type, - round_type=round_type, - is_full_quantize=is_full_quantize, - optimize_model=is_optimize_model, - onnx_format=onnx_format, - is_use_cache_file=is_use_cache_file) + ptq = PostTrainingQuantization( + executor=exe, + sample_generator=val_reader, + model_dir=model_path, + algo=algo, + quantizable_op_type=quantizable_op_type, + round_type=round_type, + is_full_quantize=is_full_quantize, + optimize_model=is_optimize_model, + onnx_format=onnx_format, + is_use_cache_file=is_use_cache_file, + ) ptq.quantize() ptq.save_quantized_model(self.int8_model) - def run_test(self, - model, - algo, - round_type, - data_urls, - data_md5s, - quantizable_op_type, - is_full_quantize, - is_use_cache_file, - is_optimize_model, - diff_threshold, - onnx_format=True): + def run_test( + self, + model, + algo, + round_type, + data_urls, + data_md5s, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + onnx_format=True, + ): infer_iterations = self.infer_iterations batch_size = self.batch_size sample_iterations = self.sample_iterations model_cache_folder = self.download_data(data_urls, data_md5s, model) - print("Start INT8 post training quantization for {0} on {1} images ...". - format(model, sample_iterations * batch_size)) - self.generate_quantized_model(os.path.join(model_cache_folder, "model"), - quantizable_op_type, algo, round_type, - is_full_quantize, is_use_cache_file, - is_optimize_model, onnx_format) + print( + "Start INT8 post training quantization for {0} on {1} images ...".format( + model, sample_iterations * batch_size + ) + ) + self.generate_quantized_model( + os.path.join(model_cache_folder, "model"), + quantizable_op_type, + algo, + round_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + onnx_format, + ) - print("Start FP32 inference for {0} on {1} images ...".format( - model, infer_iterations * batch_size)) + print( + "Start FP32 inference for {0} on {1} images ...".format( + model, infer_iterations * batch_size + ) + ) (fp32_throughput, fp32_latency, fp32_acc1) = self.run_program( - os.path.join(model_cache_folder, "model"), batch_size, - infer_iterations) + os.path.join(model_cache_folder, "model"), + batch_size, + infer_iterations, + ) - print("Start INT8 inference for {0} on {1} images ...".format( - model, infer_iterations * batch_size)) - (int8_throughput, int8_latency, - int8_acc1) = self.run_program(self.int8_model, - batch_size, - infer_iterations, - is_quantized_model=True) + print( + "Start INT8 inference for {0} on {1} images ...".format( + model, infer_iterations * batch_size + ) + ) + (int8_throughput, int8_latency, int8_acc1) = self.run_program( + self.int8_model, + batch_size, + infer_iterations, + is_quantized_model=True, + ) print("---Post training quantization of {} method---".format(algo)) print( - "FP32 {0}: batch_size {1}, throughput {2} images/second, latency {3} second, accuracy {4}." - .format(model, batch_size, fp32_throughput, fp32_latency, - fp32_acc1)) + "FP32 {0}: batch_size {1}, throughput {2} images/second, latency {3} second, accuracy {4}.".format( + model, batch_size, fp32_throughput, fp32_latency, fp32_acc1 + ) + ) print( - "INT8 {0}: batch_size {1}, throughput {2} images/second, latency {3} second, accuracy {4}.\n" - .format(model, batch_size, int8_throughput, int8_latency, - int8_acc1)) + "INT8 {0}: batch_size {1}, throughput {2} images/second, latency {3} second, accuracy {4}.\n".format( + model, batch_size, int8_throughput, int8_latency, int8_acc1 + ) + ) sys.stdout.flush() delta_value = int8_latency - fp32_latency @@ -333,7 +366,6 @@ class TestPostTrainingQuantization(unittest.TestCase): class TestMKLDNNInt8ForResnet50AvgONNXFormat(TestPostTrainingQuantization): - def test_onnx_format_avg_resnet50(self): model = "resnet50" algo = "avg" @@ -351,17 +383,19 @@ class TestMKLDNNInt8ForResnet50AvgONNXFormat(TestPostTrainingQuantization): is_use_cache_file = False is_optimize_model = False diff_threshold = 0 - self.run_test(model, - algo, - round_type, - data_urls, - data_md5s, - quantizable_op_type, - is_full_quantize, - is_use_cache_file, - is_optimize_model, - diff_threshold, - onnx_format=True) + self.run_test( + model, + algo, + round_type, + data_urls, + data_md5s, + quantizable_op_type, + is_full_quantize, + is_use_cache_file, + is_optimize_model, + diff_threshold, + onnx_format=True, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_bf16_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_bf16_mkldnn_op.py index d92025344767fb95d3b4a161584235ad8eb30fe1..81ca727b63895ac498a4fd2ece8ad96de2568357 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_bf16_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_bf16_mkldnn_op.py @@ -15,15 +15,23 @@ import unittest import numpy as np import paddle.fluid.core as core -from paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool, convert_float_to_uint16 -from paddle.fluid.tests.unittests.test_pool2d_op import TestPool2D_Op_Mixin, max_pool2D_forward_naive -from paddle.fluid.tests.unittests.npu.test_pool2d_op_npu import pool2d_backward_navie as pool2d_backward_naive +from paddle.fluid.tests.unittests.op_test import ( + OpTest, + OpTestTool, + convert_float_to_uint16, +) +from paddle.fluid.tests.unittests.test_pool2d_op import ( + TestPool2D_Op_Mixin, + max_pool2D_forward_naive, +) +from paddle.fluid.tests.unittests.npu.test_pool2d_op_npu import ( + pool2d_backward_navie as pool2d_backward_naive, +) from paddle import enable_static @OpTestTool.skip_if_not_cpu_bf16() class TestPoolBf16MklDNNOpGrad(TestPool2D_Op_Mixin, OpTest): - def init_kernel_type(self): self.use_mkldnn = True @@ -35,11 +43,17 @@ class TestPoolBf16MklDNNOpGrad(TestPool2D_Op_Mixin, OpTest): self.attrs['mkldnn_data_type'] = "bfloat16" self.x_fp32 = np.random.random(self.shape).astype(np.float32) - output = self.pool2D_forward_naive(self.x_fp32, self.ksize, - self.strides, self.paddings, - self.global_pool, self.ceil_mode, - self.exclusive, self.adaptive, - "float32").astype(np.float32) + output = self.pool2D_forward_naive( + self.x_fp32, + self.ksize, + self.strides, + self.paddings, + self.global_pool, + self.ceil_mode, + self.exclusive, + self.adaptive, + "float32", + ).astype(np.float32) self.inputs = {'X': convert_float_to_uint16(self.x_fp32)} self.outputs = {'Out': convert_float_to_uint16(output)} @@ -48,27 +62,27 @@ class TestPoolBf16MklDNNOpGrad(TestPool2D_Op_Mixin, OpTest): self.check_output_with_place(core.CPUPlace()) def test_check_grad(self): - x_grad = pool2d_backward_naive(self.x_fp32, - ksize=self.ksize, - strides=self.strides, - paddings=self.paddings, - global_pool=self.global_pool, - ceil_mode=False, - exclusive=self.exclusive, - adaptive=self.adaptive, - data_format=self.data_format, - pool_type=self.pool_type, - padding_algorithm=self.padding_algorithm) + x_grad = pool2d_backward_naive( + self.x_fp32, + ksize=self.ksize, + strides=self.strides, + paddings=self.paddings, + global_pool=self.global_pool, + ceil_mode=False, + exclusive=self.exclusive, + adaptive=self.adaptive, + data_format=self.data_format, + pool_type=self.pool_type, + padding_algorithm=self.padding_algorithm, + ) x_grad = x_grad / np.prod(self.outputs['Out'].shape) - self.check_grad_with_place(core.CPUPlace(), - set(['X']), - 'Out', - user_defined_grads=[x_grad]) + self.check_grad_with_place( + core.CPUPlace(), set(['X']), 'Out', user_defined_grads=[x_grad] + ) @OpTestTool.skip_if_not_cpu_bf16() class TestPoolBf16MklDNNOp(TestPool2D_Op_Mixin, OpTest): - def init_kernel_type(self): self.use_mkldnn = True @@ -77,11 +91,19 @@ class TestPoolBf16MklDNNOp(TestPool2D_Op_Mixin, OpTest): self.dtype = np.uint16 input = np.random.random(self.shape).astype(np.float32) - output = (self.pool2D_forward_naive(input, self.ksize, self.strides, - self.paddings, self.global_pool, - self.ceil_mode, self.exclusive, - self.adaptive, - "float32")).astype(np.float32) + output = ( + self.pool2D_forward_naive( + input, + self.ksize, + self.strides, + self.paddings, + self.global_pool, + self.ceil_mode, + self.exclusive, + self.adaptive, + "float32", + ) + ).astype(np.float32) self.inputs = {'X': convert_float_to_uint16(input)} self.outputs = {'Out': convert_float_to_uint16(output)} @@ -94,7 +116,6 @@ class TestPoolBf16MklDNNOp(TestPool2D_Op_Mixin, OpTest): class TestCase1Avg(TestPoolBf16MklDNNOp): - def init_test_case(self): self.shape = [2, 3, 7, 7] self.ksize = [3, 3] @@ -109,7 +130,6 @@ class TestCase1Avg(TestPoolBf16MklDNNOp): class TestCase2Avg(TestPoolBf16MklDNNOp): - def init_test_case(self): self.shape = [2, 3, 7, 7] self.ksize = [3, 3] @@ -124,28 +144,24 @@ class TestCase2Avg(TestPoolBf16MklDNNOp): class TestCase0Max(TestPoolBf16MklDNNOp): - def init_pool_type(self): self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive class TestCase1Max(TestCase1Avg): - def init_pool_type(self): self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive class TestCase2Max(TestCase2Avg): - def init_pool_type(self): self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive class TestCase1PadZeroExclusiveAvgGrad(TestPoolBf16MklDNNOpGrad): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -164,27 +180,23 @@ class TestCase1PadZeroExclusiveAvgGrad(TestPoolBf16MklDNNOpGrad): class TestCase2PadOneNonExclusiveAvgGrad(TestCase1PadZeroExclusiveAvgGrad): - def init_exclusive(self): self.exclusive = False class TestCase0InitialMaxGrad(TestPoolBf16MklDNNOpGrad): - def init_pool_type(self): self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive class TestCase1PadZeroExclusiveMaxGrad(TestCase1PadZeroExclusiveAvgGrad): - def init_pool_type(self): self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive class TestCase2PadOneNonExclusiveMaxGrad(TestCase2PadOneNonExclusiveAvgGrad): - def init_pool_type(self): self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_int8_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_int8_mkldnn_op.py index 27595afac1f5489e2b60decba23ba7a194469fbf..f21891f35e7d9e7b058be2340026031b9893855e 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_int8_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_int8_mkldnn_op.py @@ -17,11 +17,13 @@ import numpy as np import paddle.fluid.core as core from paddle.fluid.tests.unittests.op_test import OpTest -from paddle.fluid.tests.unittests.test_pool2d_op import TestPool2D_Op, max_pool2D_forward_naive +from paddle.fluid.tests.unittests.test_pool2d_op import ( + TestPool2D_Op, + max_pool2D_forward_naive, +) class TestPool2DMKLDNNInt8_Op(TestPool2D_Op): - def init_kernel_type(self): self.use_mkldnn = True @@ -30,29 +32,38 @@ class TestPool2DMKLDNNInt8_Op(TestPool2D_Op): def setUp(self): TestPool2D_Op.setUp(self) - assert self.dtype in [np.int8, - np.uint8], 'Dtype should be int8 or uint8' + assert self.dtype in [ + np.int8, + np.uint8, + ], 'Dtype should be int8 or uint8' input = np.random.randint(0, 100, self.shape).astype(self.dtype) - output = (self.pool2D_forward_naive(input, self.ksize, self.strides, - self.paddings, self.global_pool, - self.ceil_mode, self.exclusive, - self.adaptive, - self.dtype)).astype(self.dtype) + output = ( + self.pool2D_forward_naive( + input, + self.ksize, + self.strides, + self.paddings, + self.global_pool, + self.ceil_mode, + self.exclusive, + self.adaptive, + self.dtype, + ) + ).astype(self.dtype) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(input)} self.outputs = {'Out': output} def test_check_output(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode - self.check_output_with_place(core.CPUPlace(), - atol=1e-5, - check_dygraph=False) + self.check_output_with_place( + core.CPUPlace(), atol=1e-5, check_dygraph=False + ) def test_check_grad(self): pass class TestCase1Avg(TestPool2DMKLDNNInt8_Op): - def init_test_case(self): self.shape = [2, 3, 7, 7] self.ksize = [3, 3] @@ -67,7 +78,6 @@ class TestCase1Avg(TestPool2DMKLDNNInt8_Op): class TestCase2Avg(TestPool2DMKLDNNInt8_Op): - def init_test_case(self): self.shape = [2, 3, 7, 7] self.ksize = [3, 3] @@ -82,35 +92,29 @@ class TestCase2Avg(TestPool2DMKLDNNInt8_Op): class TestCase0Max(TestPool2DMKLDNNInt8_Op): - def init_pool_type(self): self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive class TestCase1Max(TestCase1Avg): - def init_pool_type(self): self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive class TestCase2Max(TestCase2Avg): - def init_pool_type(self): self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive def create_test_s8_u8_class(parent): - class TestS8Case(parent): - def init_data_type(self): self.dtype = np.int8 class TestU8Case(parent): - def init_data_type(self): self.dtype = np.uint8 diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_mkldnn_op.py index 8a201a2e1e402f46c80b5a0f7013c9f8e2f6382d..e4e8ecb5bef28b299837613ce4b8bd72d96abb6e 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_mkldnn_op.py @@ -14,13 +14,19 @@ import unittest import numpy as np -from paddle.fluid.tests.unittests.test_pool2d_op import TestPool2D_Op, TestCase1, TestCase2, TestCase3, TestCase4, TestCase5, avg_pool2D_forward_naive +from paddle.fluid.tests.unittests.test_pool2d_op import ( + TestPool2D_Op, + TestCase1, + TestCase2, + TestCase3, + TestCase4, + TestCase5, + avg_pool2D_forward_naive, +) def create_test_mkldnn_use_ceil_class(parent): - class TestMKLDNNPool2DUseCeilCase(parent): - def init_kernel_type(self): self.use_mkldnn = True @@ -41,9 +47,7 @@ create_test_mkldnn_use_ceil_class(TestCase2) def create_test_mkldnn_class(parent): - class TestMKLDNNCase(parent): - def init_kernel_type(self): self.use_mkldnn = True @@ -64,7 +68,6 @@ create_test_mkldnn_class(TestCase5) class TestAvgPoolAdaptive(TestPool2D_Op): - def init_adaptive(self): self.adaptive = True @@ -87,7 +90,6 @@ class TestAvgPoolAdaptive(TestPool2D_Op): class TestAvgPoolAdaptive2(TestAvgPoolAdaptive): - def init_test_case(self): self.ksize = [2, 3] self.strides = [1, 1] @@ -97,7 +99,6 @@ class TestAvgPoolAdaptive2(TestAvgPoolAdaptive): class TestAvgPoolAdaptive3(TestAvgPoolAdaptive): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -107,7 +108,6 @@ class TestAvgPoolAdaptive3(TestAvgPoolAdaptive): class TestAsymPad(TestPool2D_Op): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -133,81 +133,68 @@ class TestAsymPad(TestPool2D_Op): class TestAsymPadCase1(TestAsymPad): - def init_paddings(self): self.paddings = [1, 1, 0, 0] class TestAsymPadCase2(TestAsymPad): - def init_paddings(self): self.paddings = [1, 0, 1, 2] class TestAsymPadCase3(TestAsymPad): - def init_paddings(self): self.paddings = [1, 2, 1, 2] class TestAsymPadCase4(TestAsymPad): - def init_paddings(self): self.paddings = [1, 0, 1, 2] class TestAsymPadCase5(TestAsymPad): - def init_paddings(self): self.paddings = [2, 2, 1, 2] class TestAsymPadMaxCase1(TestAsymPadCase1): - def init_pool_type(self): self.pool_type = "max" class TestAsymPadMaxCase2(TestAsymPadCase2): - def init_pool_type(self): self.pool_type = "max" class TestAsymPadMaxCase3(TestAsymPadCase3): - def init_pool_type(self): self.pool_type = "max" class TestAsymPadMaxCase4(TestAsymPadCase4): - def init_pool_type(self): self.pool_type = "max" class TestAsymPadMaxCase5(TestAsymPadCase5): - def init_pool_type(self): self.pool_type = "max" class TestAsymPadSame(TestAsymPad): - def init_paddings(self): self.paddings = [0, 0] self.padding_algorithm = "SAME" class TestAsymPadValid(TestAsymPad): - def init_paddings(self): self.paddings = [0, 0, 0, 0] self.padding_algorithm = "VALID" class TestAsymPadValidNHWC(TestAsymPadValid): - def init_data_format(self): self.data_format = "NHWC" @@ -217,5 +204,6 @@ class TestAsymPadValidNHWC(TestAsymPadValid): if __name__ == '__main__': from paddle import enable_static + enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_prelu_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_prelu_mkldnn_op.py index 809d5efd1e437e18a9d49cbf5267a1d7dff8b8fc..1b1c79248ce2d9c94ffa195f96668c9ce4cbc758 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_prelu_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_prelu_mkldnn_op.py @@ -16,7 +16,11 @@ import unittest import numpy as np import paddle import paddle.fluid.core as core -from paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool, convert_float_to_uint16 +from paddle.fluid.tests.unittests.op_test import ( + OpTest, + OpTestTool, + convert_float_to_uint16, +) def ref_prelu(x, weight, mode): @@ -27,12 +31,14 @@ def ref_prelu(x, weight, mode): elif mode == "channel": if len(weight.shape) > 1: for i in range(x.shape[1]): - result[:, i] = np.where(x[:, i] > 0, x[:, i], - x[:, i] * weight[0, i]) + result[:, i] = np.where( + x[:, i] > 0, x[:, i], x[:, i] * weight[0, i] + ) else: for i in range(x.shape[1]): - result[:, i] = np.where(x[:, i] > 0, x[:, i], - x[:, i] * weight[i]) + result[:, i] = np.where( + x[:, i] > 0, x[:, i], x[:, i] * weight[i] + ) elif mode == "element": result = np.where(x[:] > 0, x[:], x[:] * weight) @@ -40,7 +46,6 @@ def ref_prelu(x, weight, mode): class TestPReluModeChannelOneDNNOp(OpTest): - def init_attrs(self): self.mode = "element" self.alpha = np.random.random((1, 4, 5, 5)).astype("float32") @@ -69,7 +74,6 @@ class TestPReluModeChannelOneDNNOp(OpTest): class TestPReluModeAllOneDNNOp(TestPReluModeChannelOneDNNOp): - def init_attrs(self): self.mode = "all" self.alpha = np.random.random((1, 1, 1, 1)).astype("float32") @@ -81,14 +85,12 @@ class TestPReluModeAllOneDNNOp(TestPReluModeChannelOneDNNOp): class TestPReluModeElementOneDNNOp(TestPReluModeChannelOneDNNOp): - def init_attrs(self): self.mode = "element" self.alpha = np.random.random((1, 4, 5, 5)).astype("float32") class TestPReluModeChannel3DOneDNNOp(TestPReluModeChannelOneDNNOp): - def init_attrs(self): self.mode = "channel" self.x = np.random.random((1, 100, 1)).astype("float32") @@ -96,7 +98,6 @@ class TestPReluModeChannel3DOneDNNOp(TestPReluModeChannelOneDNNOp): class TestPReluModeChannelAlpha1DOneDNNOp(TestPReluModeChannelOneDNNOp): - def init_attrs(self): self.mode = "channel" self.x = np.random.random((1, 100, 1)).astype("float32") @@ -104,7 +105,6 @@ class TestPReluModeChannelAlpha1DOneDNNOp(TestPReluModeChannelOneDNNOp): class TestPReluModeAllAlpha1DOneDNNOp(TestPReluModeAllOneDNNOp): - def init_attrs(self): self.mode = "channel" self.x = np.random.random((1, 1, 100)).astype("float32") @@ -113,14 +113,14 @@ class TestPReluModeAllAlpha1DOneDNNOp(TestPReluModeAllOneDNNOp): # BF16 TESTS def create_bf16_test_class(parent): - @OpTestTool.skip_if_not_cpu_bf16() class TestPReluBF16OneDNNOp(parent): - - def set_inputs(self, ): + def set_inputs( + self, + ): self.inputs = { 'X': convert_float_to_uint16(self.x), - 'Alpha': convert_float_to_uint16(self.alpha) + 'Alpha': convert_float_to_uint16(self.alpha), } def set_dtype_attr(self): @@ -136,12 +136,18 @@ def create_bf16_test_class(parent): elif self.mode == "channel": if len(self.alpha.shape) > 1: for i in range(self.x.shape[1]): - self.dx[:, i] = np.where(self.x[:, i] > 0, dout[:, i], - dout[:, i] * self.alpha[0, i]) + self.dx[:, i] = np.where( + self.x[:, i] > 0, + dout[:, i], + dout[:, i] * self.alpha[0, i], + ) else: for i in range(self.x.shape[1]): - self.dx[:, i] = np.where(self.x[:, i] > 0, dout[:, i], - dout[:, i] * self.alpha[i]) + self.dx[:, i] = np.where( + self.x[:, i] > 0, + dout[:, i], + dout[:, i] * self.alpha[i], + ) self.dx elif self.mode == "element": self.dx = np.where(self.x[:] > 0, dout[:], dout[:] * self.alpha) @@ -155,10 +161,12 @@ def create_bf16_test_class(parent): def test_check_grad(self): self.calculate_grads() self.check_grad_with_place( - core.CPUPlace(), ["X", "Alpha"], + core.CPUPlace(), + ["X", "Alpha"], "Out", user_defined_grads=[self.dx, self.dalpha], - user_defined_grad_outputs=[convert_float_to_uint16(self.dout)]) + user_defined_grad_outputs=[convert_float_to_uint16(self.dout)], + ) cls_name = "{0}_{1}".format(parent.__name__, "BF16") TestPReluBF16OneDNNOp.__name__ = cls_name diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_quantize_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_quantize_mkldnn_op.py index 52498793d58b5d72022f68f2098d4cd32c9862fb..4712cf4326934208eca419bda57c2badfc88fc24 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_quantize_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_quantize_mkldnn_op.py @@ -19,7 +19,6 @@ import paddle class TestQuantizeOp(OpTest): - def setUp(self): self.op_type = 'quantize' self.scale = 255.0 @@ -38,25 +37,28 @@ class TestQuantizeOp(OpTest): def prepare_input(self): if self.is_negative: # input data values are from interval [-1.0, 1.0) - self.input = (2 * np.random.random_sample(self.input_size) - - 1).astype('float32') + self.input = ( + 2 * np.random.random_sample(self.input_size) - 1 + ).astype('float32') else: # input data values are from interval [0.0, 1.0) - self.input = (np.random.random_sample( - self.input_size)).astype('float32') + self.input = (np.random.random_sample(self.input_size)).astype( + 'float32' + ) self.inputs = {'Input': OpTest.np_dtype_to_fluid_dtype(self.input)} self.attrs = { 'Scale': self.scale, 'Shift': self.shift, 'is_negative_input': self.is_negative, - 'output_format': self.output_format + 'output_format': self.output_format, } def prepare_output(self): input_data_type = 'int8' if self.is_negative else 'uint8' - output = np.rint(self.input * self.scale + - self.shift).astype(input_data_type) + output = np.rint(self.input * self.scale + self.shift).astype( + input_data_type + ) self.outputs = {'Output': output} def test_check_output(self): @@ -89,7 +91,6 @@ class TestQuantizeOp(OpTest): class TestQuantizeOp1(TestQuantizeOp): - def set_scale(self): self.scale = 127.0 @@ -98,7 +99,6 @@ class TestQuantizeOp1(TestQuantizeOp): class TestQuantizeOp2(TestQuantizeOp): - def set_scale(self): self.scale = 255.0 @@ -109,7 +109,6 @@ class TestQuantizeOp2(TestQuantizeOp): # 2-dim input # P - positive input class TestQuantizeOpShift_NCHW_2_P(TestQuantizeOp): - def set_output_format(self): self.output_format = 'NCHW' @@ -129,7 +128,6 @@ class TestQuantizeOpShift_NCHW_2_P(TestQuantizeOp): # 2-dim input # N - negative input class TestQuantizeOpShift_NCHW_2_N(TestQuantizeOpShift_NCHW_2_P): - def set_is_negative(self): self.is_nagative = True @@ -141,63 +139,53 @@ class TestQuantizeOpShift_NCHW_2_N(TestQuantizeOpShift_NCHW_2_P): class TestQuantizeOpShift_NHWC_2_P(TestQuantizeOpShift_NCHW_2_P): - def set_output_format(self): self.output_format = 'NHWC' class TestQuantizeOpShift_NHWC_2_N(TestQuantizeOpShift_NCHW_2_N): - def set_output_format(self): self.output_format = 'NHWC' # 3-dim input class TestQuantizeOpShift_NCHW_3_P(TestQuantizeOpShift_NCHW_2_P): - def set_input_size(self): self.input_size = [2, 3, 4] class TestQuantizeOpShift_NCHW_3_N(TestQuantizeOpShift_NCHW_2_N): - def set_input_size(self): self.input_size = [2, 3, 4] class TestQuantizeOpShift_NHWC_3_P(TestQuantizeOpShift_NCHW_3_P): - def set_output_format(self): self.output_format = 'NHWC' class TestQuantizeOpShift_NHWC_3_N(TestQuantizeOpShift_NCHW_3_N): - def set_output_format(self): self.output_format = 'NHWC' # 4-dim input class TestQuantizeOpShift_NCHW_4_P(TestQuantizeOpShift_NCHW_2_P): - def set_input_size(self): self.input_size = [2, 3, 4, 5] class TestQuantizeOpShift_NCHW_4_N(TestQuantizeOpShift_NCHW_2_N): - def set_input_size(self): self.input_size = [2, 3, 4, 5] class TestQuantizeOpShift_NHWC_4_P(TestQuantizeOpShift_NCHW_4_P): - def set_output_format(self): self.output_format = 'NHWC' class TestQuantizeOpShift_NHWC_4_N(TestQuantizeOpShift_NCHW_4_N): - def set_output_format(self): self.output_format = 'NHWC' diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_reduce_bf16_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_reduce_bf16_mkldnn_op.py index 12b8a468b46f5272b0194b9795fa9786444a7562..fdd24498f88e025bad45279c80a73ba2ea356512 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_reduce_bf16_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_reduce_bf16_mkldnn_op.py @@ -14,7 +14,12 @@ import unittest import numpy as np -from paddle.fluid.tests.unittests.op_test import OpTestTool, OpTest, skip_check_grad_ci, convert_float_to_uint16 +from paddle.fluid.tests.unittests.op_test import ( + OpTestTool, + OpTest, + skip_check_grad_ci, + convert_float_to_uint16, +) import paddle.fluid.core as core import paddle @@ -23,7 +28,6 @@ paddle.enable_static() @OpTestTool.skip_if_not_cpu_bf16() class TestReduceSumDefaultBF16OneDNNOp(OpTest): - def setUp(self): self.op_type = "reduce_sum" self.use_mkldnn = True @@ -69,20 +73,21 @@ class TestReduceSumDefaultBF16OneDNNOp(OpTest): class TestReduceDefaultWithGradBF16OneDNNOp(TestReduceSumDefaultBF16OneDNNOp): - def test_check_grad(self): self.calculate_grads() self.check_grad_with_place( - core.CPUPlace(), ["X"], + core.CPUPlace(), + ["X"], "Out", check_dygraph=False, user_defined_grads=[self.grad_X], - user_defined_grad_outputs=[convert_float_to_uint16(self.grad_Out)]) + user_defined_grad_outputs=[convert_float_to_uint16(self.grad_Out)], + ) class TestReduceSum4DReduceAllDimAttributeBF16OneDNNOp( - TestReduceDefaultWithGradBF16OneDNNOp): - + TestReduceDefaultWithGradBF16OneDNNOp +): def setUp(self): self.op_type = "reduce_sum" self.use_mkldnn = True @@ -94,8 +99,8 @@ class TestReduceSum4DReduceAllDimAttributeBF16OneDNNOp( class TestReduceSum4DReduceAllWithoutReduceAllAttributeNegativeDimsBF16OneDNNOp( - TestReduceDefaultWithGradBF16OneDNNOp): - + TestReduceDefaultWithGradBF16OneDNNOp +): def setUp(self): self.op_type = "reduce_sum" self.use_mkldnn = True @@ -107,8 +112,8 @@ class TestReduceSum4DReduceAllWithoutReduceAllAttributeNegativeDimsBF16OneDNNOp( class TestReduceSum5DReduceAllKeepDimsBF16OneDNNOp( - TestReduceDefaultWithGradBF16OneDNNOp): - + TestReduceDefaultWithGradBF16OneDNNOp +): def setUp(self): self.op_type = "reduce_sum" self.use_mkldnn = True @@ -119,9 +124,9 @@ class TestReduceSum5DReduceAllKeepDimsBF16OneDNNOp( self.outputs = {'Out': self.x_fp32.sum(keepdims=self.attrs['keep_dim'])} -class TestReduceSum4DReduceAllBF16OneDNNOp(TestReduceDefaultWithGradBF16OneDNNOp - ): - +class TestReduceSum4DReduceAllBF16OneDNNOp( + TestReduceDefaultWithGradBF16OneDNNOp +): def setUp(self): self.op_type = "reduce_sum" self.use_mkldnn = True @@ -134,7 +139,8 @@ class TestReduceSum4DReduceAllBF16OneDNNOp(TestReduceDefaultWithGradBF16OneDNNOp @skip_check_grad_ci( reason="reduce_max is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMax3DBF16OneDNNOp(TestReduceSumDefaultBF16OneDNNOp): """Remove Max with subgradient from gradient check to confirm the success of CI.""" @@ -150,9 +156,11 @@ class TestReduceMax3DBF16OneDNNOp(TestReduceSumDefaultBF16OneDNNOp): @skip_check_grad_ci( reason="reduce_max is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMax4DNegativeAndPositiveDimsBF16OneDNNOp( - TestReduceSumDefaultBF16OneDNNOp): + TestReduceSumDefaultBF16OneDNNOp +): """Remove Max with subgradient from gradient check to confirm the success of CI.""" def setUp(self): @@ -167,7 +175,8 @@ class TestReduceMax4DNegativeAndPositiveDimsBF16OneDNNOp( @skip_check_grad_ci( reason="reduce_min is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMin3DBF16OneDNNOp(TestReduceSumDefaultBF16OneDNNOp): """Remove Min with subgradient from gradient check to confirm the success of CI.""" @@ -182,7 +191,6 @@ class TestReduceMin3DBF16OneDNNOp(TestReduceSumDefaultBF16OneDNNOp): class TestReduceMean3DBF16OneDNNOp(TestReduceDefaultWithGradBF16OneDNNOp): - def setUp(self): self.op_type = "reduce_mean" self.use_mkldnn = True @@ -194,7 +202,6 @@ class TestReduceMean3DBF16OneDNNOp(TestReduceDefaultWithGradBF16OneDNNOp): class TestReduceMean4DBF16OneDNNOp(TestReduceDefaultWithGradBF16OneDNNOp): - def setUp(self): self.op_type = "reduce_mean" self.use_mkldnn = True @@ -203,9 +210,8 @@ class TestReduceMean4DBF16OneDNNOp(TestReduceDefaultWithGradBF16OneDNNOp): self.inputs = {'X': self.x_bf16} self.attrs = {'use_mkldnn': self.use_mkldnn, 'dim': [0, 1]} self.outputs = { - 'Out': - self.x_fp32.sum(axis=tuple(self.attrs['dim'])) / - (self.x_fp32.shape[0] * self.x_fp32.shape[1]) + 'Out': self.x_fp32.sum(axis=tuple(self.attrs['dim'])) + / (self.x_fp32.shape[0] * self.x_fp32.shape[1]) } diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_reduce_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_reduce_mkldnn_op.py index 93e81070cb669253b3f65474d6f09a0b9c4f2475..b6f2c30a68285652fe869601845ab8ad83e0e9f8 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_reduce_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_reduce_mkldnn_op.py @@ -14,12 +14,15 @@ import unittest import numpy as np -from paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool, skip_check_grad_ci +from paddle.fluid.tests.unittests.op_test import ( + OpTest, + OpTestTool, + skip_check_grad_ci, +) import paddle class TestReduceSumDefaultOneDNNOp(OpTest): - def setUp(self): self.op_type = "reduce_sum" self.use_mkldnn = True @@ -32,13 +35,11 @@ class TestReduceSumDefaultOneDNNOp(OpTest): class TestReduceDefaultWithGradOneDNNOp(TestReduceSumDefaultOneDNNOp): - def test_check_grad(self): self.check_grad(['X'], 'Out') class TestReduceSum4DOneDNNOp(TestReduceDefaultWithGradOneDNNOp): - def setUp(self): self.op_type = "reduce_sum" self.use_mkldnn = True @@ -50,8 +51,8 @@ class TestReduceSum4DOneDNNOp(TestReduceDefaultWithGradOneDNNOp): class TestReduceSum4DReduceAllDimAttributeBF16OneDNNOp( - TestReduceDefaultWithGradOneDNNOp): - + TestReduceDefaultWithGradOneDNNOp +): def setUp(self): self.op_type = "reduce_sum" self.use_mkldnn = True @@ -63,22 +64,21 @@ class TestReduceSum4DReduceAllDimAttributeBF16OneDNNOp( class TestReduceSum5DKeepDimsOneDNNOp(TestReduceDefaultWithGradOneDNNOp): - def setUp(self): self.op_type = "reduce_sum" self.use_mkldnn = True self.inputs = {'X': np.random.random((2, 5, 3, 2, 2)).astype("float32")} self.attrs = {'dim': (2, 3, 4), 'keep_dim': True, 'use_mkldnn': True} self.outputs = { - 'Out': - self.inputs['X'].sum(axis=tuple(self.attrs['dim']), - keepdims=self.attrs['keep_dim']) + 'Out': self.inputs['X'].sum( + axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim'] + ) } -class TestReduceSum5DReduceAllKeepDimsOneDNNOp(TestReduceDefaultWithGradOneDNNOp - ): - +class TestReduceSum5DReduceAllKeepDimsOneDNNOp( + TestReduceDefaultWithGradOneDNNOp +): def setUp(self): self.op_type = "reduce_sum" self.use_mkldnn = True @@ -90,7 +90,6 @@ class TestReduceSum5DReduceAllKeepDimsOneDNNOp(TestReduceDefaultWithGradOneDNNOp class TestReduceSum4DReduceAllOneDNNOp(TestReduceDefaultWithGradOneDNNOp): - def setUp(self): self.op_type = "reduce_sum" self.use_mkldnn = True @@ -101,8 +100,8 @@ class TestReduceSum4DReduceAllOneDNNOp(TestReduceDefaultWithGradOneDNNOp): @OpTestTool.skip_if_not_cpu() class TestReduceSum4DNoReduceSimpleCopyOneDNNOp( - TestReduceDefaultWithGradOneDNNOp): - + TestReduceDefaultWithGradOneDNNOp +): def setUp(self): self.op_type = "reduce_sum" self.use_mkldnn = True @@ -113,7 +112,8 @@ class TestReduceSum4DNoReduceSimpleCopyOneDNNOp( @skip_check_grad_ci( reason="reduce_max is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMax3DOneDNNOp(TestReduceSumDefaultOneDNNOp): """Remove Max with subgradient from gradient check to confirm the success of CI.""" @@ -129,9 +129,11 @@ class TestReduceMax3DOneDNNOp(TestReduceSumDefaultOneDNNOp): @skip_check_grad_ci( reason="reduce_max is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMax4DNegativeAndPositiveDimsOneDNNOp( - TestReduceSumDefaultOneDNNOp): + TestReduceSumDefaultOneDNNOp +): """Remove Max with subgradient from gradient check to confirm the success of CI.""" def setUp(self): @@ -146,7 +148,8 @@ class TestReduceMax4DNegativeAndPositiveDimsOneDNNOp( @skip_check_grad_ci( reason="reduce_min is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMin3DOneDNNOp(TestReduceSumDefaultOneDNNOp): """Remove Min with subgradient from gradient check to confirm the success of CI.""" @@ -161,7 +164,6 @@ class TestReduceMin3DOneDNNOp(TestReduceSumDefaultOneDNNOp): class TestReduceMean3DOneDNNOp(TestReduceDefaultWithGradOneDNNOp): - def setUp(self): self.op_type = "reduce_mean" self.use_mkldnn = True @@ -173,15 +175,14 @@ class TestReduceMean3DOneDNNOp(TestReduceDefaultWithGradOneDNNOp): class TestReduceMean4DReduceAllOneDNNOp(TestReduceDefaultWithGradOneDNNOp): - def setUp(self): self.op_type = "reduce_mean" self.use_mkldnn = True self.inputs = {'X': np.random.random((5, 6, 8, 10)).astype("float32")} self.attrs = {'reduce_all': True, 'use_mkldnn': self.use_mkldnn} self.outputs = { - 'Out': - self.inputs['X'].sum() / np.asarray(self.inputs['X'].shape).prod() + 'Out': self.inputs['X'].sum() + / np.asarray(self.inputs['X'].shape).prod() } diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_requantize_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_requantize_mkldnn_op.py index f49bb89e3e53e1d79c741948f38b3a9da38ba0e4..a908b5f408d3881f3037aae411fc90ad355fed01 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_requantize_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_requantize_mkldnn_op.py @@ -21,7 +21,6 @@ from mkldnn_op_test import format_reorder class TestReQuantizeOp(OpTest): - def set_input_size(self): self.input_size = [1, 1, 10, 10] self.format_reorder = format_reorder @@ -43,32 +42,36 @@ class TestReQuantizeOp(OpTest): def prepare_input(self): if self.input_data_type == 'int8': # input data values are integers from interval [-128, 128) - self.input = (np.random.randint(0, 256, self.input_size) - - 128).astype(self.input_data_type) + self.input = ( + np.random.randint(0, 256, self.input_size) - 128 + ).astype(self.input_data_type) else: # input data values are integers from interval [0, 256) self.input = (np.random.randint(0, 256, self.input_size)).astype( - self.input_data_type) + self.input_data_type + ) self.inputs = {'Input': OpTest.np_dtype_to_fluid_dtype(self.input)} self.attrs = { 'Scale_in': self.scale_in, 'Scale_out': self.scale_out, 'Shift_in': self.shift_in, - 'Shift_out': self.shift_out + 'Shift_out': self.shift_out, } def prepare_output(self): scale_ratio = self.scale_out / self.scale_in - with_shift = (self.shift_in != 0.0 or self.shift_out != 0.0) + with_shift = self.shift_in != 0.0 or self.shift_out != 0.0 if with_shift or self.input_data_type == 'uint8': dst_type = 'uint8' type_min = 0 type_max = 255 new_shift = np.clip( - np.rint(self.shift_out - scale_ratio * self.shift_in), type_min, - type_max) + np.rint(self.shift_out - scale_ratio * self.shift_in), + type_min, + type_max, + ) else: dst_type = 'int8' type_min = -128 @@ -77,15 +80,19 @@ class TestReQuantizeOp(OpTest): output_tmp = np.clip( np.rint(self.input.astype('float32') * scale_ratio + new_shift), - type_min, type_max).astype(dst_type) + type_min, + type_max, + ).astype(dst_type) self.output = self.format_reorder(output_tmp, self.input_size) self.outputs = {'Output': self.output} def test_check_output(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode - self.assertTrue(self.input_data_type == 'uint8' or self.shift_in == 0.0, - 'Input data must be unsigned if it has nonzero shift.') + self.assertTrue( + self.input_data_type == 'uint8' or self.shift_in == 0.0, + 'Input data must be unsigned if it has nonzero shift.', + ) self.check_output(check_dygraph=False) def check_raise_error(self, msg): @@ -111,28 +118,24 @@ class TestReQuantizeOp(OpTest): class TestReQuantizeOp_S8_SameScales(TestReQuantizeOp): - def set_scales(self): self.scale_in = 127.0 self.scale_out = 127.0 class TestReQuantizeOp_S8_DifferentScales_1(TestReQuantizeOp): - def set_scales(self): self.scale_in = 127.0 self.scale_out = 100.0 class TestReQuantizeOp_S8_DifferentScales_2(TestReQuantizeOp): - def set_scales(self): self.scale_in = 100.0 self.scale_out = 127.0 class TestReQuantizeOp_S8_ZeroInputScale(TestReQuantizeOp): - def set_scales(self): self.scale_in = 0.0 self.scale_out = 127.0 @@ -142,12 +145,14 @@ class TestReQuantizeOp_S8_ZeroInputScale(TestReQuantizeOp): self.outputs = {'Output': self.output} def test_check_output(self): - self.assertRaises(AttributeError, self.check_raise_error, - 'Scale of input cannot be 0.0') + self.assertRaises( + AttributeError, + self.check_raise_error, + 'Scale of input cannot be 0.0', + ) class TestReQuantizeOp_S8_ZeroOutputScale(TestReQuantizeOp): - def set_scales(self): self.scale_in = 127.0 self.scale_out = 0.0 @@ -157,29 +162,31 @@ class TestReQuantizeOp_S8_ZeroOutputScale(TestReQuantizeOp): self.outputs = {'Output': self.output} def test_check_output(self): - self.assertRaises(AttributeError, self.check_raise_error, - 'Scale of output cannot be 0.0') + self.assertRaises( + AttributeError, + self.check_raise_error, + 'Scale of output cannot be 0.0', + ) # ---------------test requantize with u8 input, no shift-------------------- class TestReQuantizeOp_U8_SameScales(TestReQuantizeOp_S8_SameScales): - def set_input_data_type(self): self.input_data_type = 'uint8' class TestReQuantizeOp_U8_DifferentScales_1( - TestReQuantizeOp_S8_DifferentScales_1): - + TestReQuantizeOp_S8_DifferentScales_1 +): def set_input_data_type(self): self.input_data_type = 'uint8' class TestReQuantizeOp_U8_DifferentScales_2( - TestReQuantizeOp_S8_DifferentScales_2): - + TestReQuantizeOp_S8_DifferentScales_2 +): def set_input_data_type(self): self.input_data_type = 'uint8' @@ -188,7 +195,6 @@ class TestReQuantizeOp_U8_DifferentScales_2( class TestReQuantizeOp_S8_WithShift(TestReQuantizeOp): - def set_scales(self): self.scale_in = 60.0 self.scale_out = 127.0 @@ -199,12 +205,13 @@ class TestReQuantizeOp_S8_WithShift(TestReQuantizeOp): def test_check_output(self): self.assertRaises( - AttributeError, self.check_raise_error, - 'Requantize does not support nonzero shift for signed input.') + AttributeError, + self.check_raise_error, + 'Requantize does not support nonzero shift for signed input.', + ) class TestReQuantizeOp_S8_WithOutputShift(TestReQuantizeOp): - def set_scales(self): self.scale_in = 127.0 self.scale_out = 60.0 @@ -218,71 +225,70 @@ class TestReQuantizeOp_S8_WithOutputShift(TestReQuantizeOp): class TestReQuantizeOp_U8_SameScales_SameShift(TestReQuantizeOp_U8_SameScales): - def set_shifts(self): self.shift_in = 128.0 self.shift_out = 128.0 class TestReQuantizeOp_U8_SameScales_DifferentShift_1( - TestReQuantizeOp_U8_SameScales): - + TestReQuantizeOp_U8_SameScales +): def set_shifts(self): self.shift_in = 60.0 self.shift_out = 128.0 class TestReQuantizeOp_U8_SameScales_DifferentShift_2( - TestReQuantizeOp_U8_SameScales): - + TestReQuantizeOp_U8_SameScales +): def set_shifts(self): self.shift_in = 128.0 self.shift_out = 60.0 class TestReQuantizeOp_U8_DifferentScales_1_SameShift( - TestReQuantizeOp_U8_DifferentScales_1): - + TestReQuantizeOp_U8_DifferentScales_1 +): def set_shifts(self): self.shift_in = 128.0 self.shift_out = 128.0 class TestReQuantizeOp_U8_DifferentScales_2_SameShift( - TestReQuantizeOp_U8_DifferentScales_2): - + TestReQuantizeOp_U8_DifferentScales_2 +): def set_shifts(self): self.shift_in = 128.0 self.shift_out = 128.0 class TestReQuantizeOp_U8_DifferentScales_1_DifferentShift_1( - TestReQuantizeOp_U8_DifferentScales_1): - + TestReQuantizeOp_U8_DifferentScales_1 +): def set_shifts(self): self.shift_in = 128.0 self.shift_out = 60.0 class TestReQuantizeOp_U8_DifferentScales_2_DifferentShift_1( - TestReQuantizeOp_U8_DifferentScales_2): - + TestReQuantizeOp_U8_DifferentScales_2 +): def set_shifts(self): self.shift_in = 128.0 self.shift_out = 60.0 class TestReQuantizeOp_U8_DifferentScales_1_DifferentShift_2( - TestReQuantizeOp_U8_DifferentScales_1): - + TestReQuantizeOp_U8_DifferentScales_1 +): def set_shifts(self): self.shift_in = 60.0 self.shift_out = 128.0 class TestReQuantizeOp_U8_DifferentScales_2_DifferentShift_2( - TestReQuantizeOp_U8_DifferentScales_2): - + TestReQuantizeOp_U8_DifferentScales_2 +): def set_shifts(self): self.shift_in = 60.0 self.shift_out = 128.0 @@ -292,7 +298,6 @@ class TestReQuantizeOp_U8_DifferentScales_2_DifferentShift_2( class TestReQuantizeOp_2DimFormat(TestReQuantizeOp): - def format_reorder_2Dim(self, out, size): return out @@ -305,7 +310,6 @@ class TestReQuantizeOp_2DimFormat(TestReQuantizeOp): class TestReQuantizeOpReused(TestReQuantizeOp): - def setUp(self): # self.input_size = [1, 1, 10, 10] self.input_size = [1, 1, 2, 2] @@ -337,38 +341,40 @@ class TestReQuantizeOpReused(TestReQuantizeOp): with fluid.program_guard(program): block = program.global_block() for name in variables: - block.create_var(name=name, - dtype="int8", - shape=variables[name].shape) - block.append_op(type="requantize", - inputs={ - 'Input': block.var('input'), - }, - outputs={"Output": block.var('output')}, - attrs={ - 'Scale_in': self.scale_in, - 'Scale_out': self.scale_out, - 'Shift_in': self.shift_in, - 'Shift_out': self.shift_out - }) + block.create_var( + name=name, dtype="int8", shape=variables[name].shape + ) + block.append_op( + type="requantize", + inputs={ + 'Input': block.var('input'), + }, + outputs={"Output": block.var('output')}, + attrs={ + 'Scale_in': self.scale_in, + 'Scale_out': self.scale_out, + 'Shift_in': self.shift_in, + 'Shift_out': self.shift_out, + }, + ) place = core.CPUPlace() exe = fluid.Executor(place) for i in range(2): - out = exe.run(program, - feed={'input': variables['input']}, - fetch_list=['output']) + out = exe.run( + program, + feed={'input': variables['input']}, + fetch_list=['output'], + ) - np.testing.assert_allclose(variables['output'], - out[0], - rtol=1e-05, - atol=1e-4) + np.testing.assert_allclose( + variables['output'], out[0], rtol=1e-05, atol=1e-4 + ) # ---------------test reused requantize op, no shift------------------------ class TestReQuantizeOpReused_WithShift(TestReQuantizeOpReused): - def set_input_data_type(self): self.input_data_type = 'uint8' diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_reshape_bf16_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_reshape_bf16_op.py index 7fbdf66ee7d7525862dc14dcdad0edc4fc89b0b1..efa1459b047c8df3a1455afbf21b1590af890e7c 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_reshape_bf16_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_reshape_bf16_op.py @@ -20,10 +20,10 @@ from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16 from paddle import enable_static -@unittest.skipIf(not core.supports_bfloat16(), - "place does not support BF16 evaluation") +@unittest.skipIf( + not core.supports_bfloat16(), "place does not support BF16 evaluation" +) class TestReshapeBf16Op(OpTest): - def setUp(self): self.op_type = "reshape2" self.use_mkldnn = False @@ -35,11 +35,11 @@ class TestReshapeBf16Op(OpTest): self.attrs = { 'shape': self.new_shape, 'use_mkldnn': self.use_mkldnn, - 'mkldnn_data_type': self.mkldnn_data_type + 'mkldnn_data_type': self.mkldnn_data_type, } self.outputs = { "Out": self.inputs["X"].reshape(self.infered_shape), - 'XShape': np.random.random(self.ori_shape).astype(np.float32) + 'XShape': np.random.random(self.ori_shape).astype(np.float32), } def init_data(self): @@ -49,21 +49,24 @@ class TestReshapeBf16Op(OpTest): def init_input_data(self): self.input_data_fp32 = np.random.random(self.ori_shape).astype( - np.float32) + np.float32 + ) self.input_data = convert_float_to_uint16(self.input_data_fp32) def test_check_output(self): self.check_output_with_place(core.CPUPlace(), no_check_set=['XShape']) def test_check_grad(self): - self.check_grad_with_place(core.CPUPlace(), ["X"], - "Out", - check_dygraph=False, - user_defined_grads=[self.input_data_fp32], - user_defined_grad_outputs=[ - self.inputs["X"].reshape( - self.infered_shape) - ]) + self.check_grad_with_place( + core.CPUPlace(), + ["X"], + "Out", + check_dygraph=False, + user_defined_grads=[self.input_data_fp32], + user_defined_grad_outputs=[ + self.inputs["X"].reshape(self.infered_shape) + ], + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_reshape_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_reshape_mkldnn_op.py index ef2748155e7830df1ae45765ca0851801d81017d..46d232cfc0809ff82e5939a709bf860799b418e6 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_reshape_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_reshape_mkldnn_op.py @@ -16,13 +16,18 @@ import unittest import numpy as np import paddle import paddle.fluid.core as core -from paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool, convert_float_to_uint16 +from paddle.fluid.tests.unittests.op_test import ( + OpTest, + OpTestTool, + convert_float_to_uint16, +) -@OpTestTool.skip_if(core.is_compiled_with_cuda(), - "CUDA has to be skipped because it forces dygraph") +@OpTestTool.skip_if( + core.is_compiled_with_cuda(), + "CUDA has to be skipped because it forces dygraph", +) class TestReshape2OneDNNOp(OpTest): - def setUp(self): self.init_data() self.set_op_type() @@ -47,7 +52,7 @@ class TestReshape2OneDNNOp(OpTest): def set_outputs(self): self.outputs = { "Out": self.inputs["X"].reshape(self.infered_shape), - 'XShape': np.random.random(self.ori_shape).astype("float32") + 'XShape': np.random.random(self.ori_shape).astype("float32"), } def init_data(self): @@ -63,7 +68,6 @@ class TestReshape2OneDNNOp(OpTest): class TestReshape2OneDNNOpDimInfer1(TestReshape2OneDNNOp): - def init_data(self): self.ori_shape = (5, 25) self.new_shape = (5, -1, 5) @@ -71,7 +75,6 @@ class TestReshape2OneDNNOpDimInfer1(TestReshape2OneDNNOp): class TestReshape2OneDNNOpDimInfer2(TestReshape2OneDNNOp): - def init_data(self): self.ori_shape = (6, 20) self.new_shape = (0, -1, 20) @@ -83,12 +86,11 @@ class TestReshape2OneDNNOpDimInfer2(TestReshape2OneDNNOp): def set_outputs(self): self.outputs = { "Out": self.inputs["X"].reshape(self.actual_shape), - 'XShape': np.random.random(self.ori_shape).astype("float32") + 'XShape': np.random.random(self.ori_shape).astype("float32"), } class TestReshape2OneDNNOp_attr_OnlyShape(TestReshape2OneDNNOp): - def set_additional_inputs(self): self.inputs["Shape"] = np.array(self.new_shape, dtype="int32") @@ -98,7 +100,7 @@ class TestReshape2OneDNNOp_attr_OnlyShape(TestReshape2OneDNNOp): def set_outputs(self): self.outputs = { "Out": self.inputs["X"].reshape(self.infered_shape), - 'XShape': np.random.random(self.ori_shape).astype("float32") + 'XShape': np.random.random(self.ori_shape).astype("float32"), } def init_data(self): @@ -108,8 +110,8 @@ class TestReshape2OneDNNOp_attr_OnlyShape(TestReshape2OneDNNOp): class TestReshape2OneDNNOpDimInfer1_attr_OnlyShape( - TestReshape2OneDNNOp_attr_OnlyShape): - + TestReshape2OneDNNOp_attr_OnlyShape +): def init_data(self): self.ori_shape = (5, 20) self.new_shape = (5, -1, 10) @@ -118,12 +120,12 @@ class TestReshape2OneDNNOpDimInfer1_attr_OnlyShape( class TestReshape2OneDNNOpDimInfer1_attr_ShapeTensor(TestReshape2OneDNNOp): - def set_additional_inputs(self): shape_tensor = [] for index, ele in enumerate(self.new_shape): - shape_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + shape_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs["ShapeTensor"] = shape_tensor @@ -135,20 +137,20 @@ class TestReshape2OneDNNOpDimInfer1_attr_ShapeTensor(TestReshape2OneDNNOp): class TestReshape2OneDNNOpDimInfer1_attr_ShapeTensorAndShape( - TestReshape2OneDNNOpDimInfer1_attr_ShapeTensor): - + TestReshape2OneDNNOpDimInfer1_attr_ShapeTensor +): def set_additional_inputs(self): shape_tensor = [] for index, ele in enumerate(self.new_shape): - shape_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + shape_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs["Shape"] = np.array((1, 2, 3, 4), dtype="int32") self.inputs["ShapeTensor"] = shape_tensor class TestReshapeOneDNNOp(TestReshape2OneDNNOp): - def set_op_type(self): self.op_type = "reshape" @@ -160,7 +162,6 @@ class TestReshapeOneDNNOp(TestReshape2OneDNNOp): class TestReshapeOneDNNOpDimInfer1(TestReshapeOneDNNOp): - def init_data(self): self.ori_shape = (5, 25) self.new_shape = (5, -1, 5) @@ -168,7 +169,6 @@ class TestReshapeOneDNNOpDimInfer1(TestReshapeOneDNNOp): class TestReshapeOneDNNOp_attr_OnlyShape(TestReshape2OneDNNOp_attr_OnlyShape): - def set_op_type(self): self.op_type = "reshape" @@ -180,8 +180,8 @@ class TestReshapeOneDNNOp_attr_OnlyShape(TestReshape2OneDNNOp_attr_OnlyShape): class TestReshapeOneDNNOpDimInfer1_attr_OnlyShape( - TestReshapeOneDNNOp_attr_OnlyShape): - + TestReshapeOneDNNOp_attr_OnlyShape +): def init_data(self): self.ori_shape = (5, 20) self.new_shape = (5, -1, 10) @@ -191,10 +191,8 @@ class TestReshapeOneDNNOpDimInfer1_attr_OnlyShape( # BF16 TESTS def create_reshape_bf16_test_classes(parent): - @OpTestTool.skip_if_not_cpu_bf16() class TestReshape2BF16OneDNNOp(parent): - def set_inputs(self): self.dtype = np.uint16 self.inputs = {"X": convert_float_to_uint16(self.x)} @@ -204,22 +202,25 @@ def create_reshape_bf16_test_classes(parent): self.dx = np.reshape(self.dout, self.ori_shape) def test_check_output(self): - self.check_output_with_place(core.CPUPlace(), - no_check_set=["XShape"]) + self.check_output_with_place( + core.CPUPlace(), no_check_set=["XShape"] + ) def test_check_grad(self): self.calculate_grads() - self.check_grad_with_place(core.CPUPlace(), ["X"], - "Out", - user_defined_grads=[self.dx], - user_defined_grad_outputs=[self.dout]) + self.check_grad_with_place( + core.CPUPlace(), + ["X"], + "Out", + user_defined_grads=[self.dx], + user_defined_grad_outputs=[self.dout], + ) cls_name = "{0}_{1}".format(parent.__name__, "Reshape2_BF16") TestReshape2BF16OneDNNOp.__name__ = cls_name globals()[cls_name] = TestReshape2BF16OneDNNOp class TestReshapeBF16OneDNNOp(TestReshape2BF16OneDNNOp): - def set_op_type(self): self.dtype = np.uint16 self.op_type = "reshape" @@ -233,10 +234,12 @@ def create_reshape_bf16_test_classes(parent): def test_check_grad(self): self.calculate_grads() self.check_grad_with_place( - core.CPUPlace(), ["X"], + core.CPUPlace(), + ["X"], "Out", user_defined_grads=[self.dx], - user_defined_grad_outputs=[convert_float_to_uint16(self.dout)]) + user_defined_grad_outputs=[convert_float_to_uint16(self.dout)], + ) cls_name = "{0}_{1}".format(parent.__name__, "Reshape_BF16") TestReshapeBF16OneDNNOp.__name__ = cls_name diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_scale_bf16_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_scale_bf16_mkldnn_op.py index 18224e07d42b5c649846a03b9f78fbfb522fd4e1..e22161672fe8241c6524c7e1e3aca7413ca2c9c6 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_scale_bf16_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_scale_bf16_mkldnn_op.py @@ -19,12 +19,14 @@ import paddle import paddle.fluid.core as core -@unittest.skipIf(not core.supports_bfloat16(), - "place does not support BF16 evaluation") -@unittest.skipIf(core.is_compiled_with_cuda(), - "core is compiled with CUDA which has no BF implementation") +@unittest.skipIf( + not core.supports_bfloat16(), "place does not support BF16 evaluation" +) +@unittest.skipIf( + core.is_compiled_with_cuda(), + "core is compiled with CUDA which has no BF implementation", +) class TestScaleOpBF16(OpTest): - def setUp(self): self.op_type = "scale" self.x_fp32 = np.random.random((10, 10)).astype(np.float32) @@ -47,7 +49,7 @@ class TestScaleOpBF16(OpTest): scale = self.attrs['ScaleTensor'] self.out = (self.x_fp32 * scale) + bias - self.dx = (self.out * scale) + self.dx = self.out * scale def test_check_output(self): self.check_output(check_dygraph=False) @@ -55,15 +57,16 @@ class TestScaleOpBF16(OpTest): def test_check_grad(self): self.calculate_grads() self.check_grad_with_place( - core.CPUPlace(), ["X"], + core.CPUPlace(), + ["X"], "Out", check_dygraph=False, user_defined_grads=[self.dx], - user_defined_grad_outputs=[convert_float_to_uint16(self.out)]) + user_defined_grad_outputs=[convert_float_to_uint16(self.out)], + ) class TestScaleOpBF16BiasNotAfterScale(TestScaleOpBF16): - def setUp(self): self.op_type = "scale" self.x_fp32 = np.random.random((10, 10)).astype(np.float32) @@ -74,7 +77,7 @@ class TestScaleOpBF16BiasNotAfterScale(TestScaleOpBF16): 'scale': self.scale, 'use_mkldnn': True, 'bias': 0.0, - 'bias_after_scale': False + 'bias_after_scale': False, } self.use_mkldnn = True self.outputs = { @@ -83,7 +86,6 @@ class TestScaleOpBF16BiasNotAfterScale(TestScaleOpBF16): class TestScaleOpBF16ScaleTensor(TestScaleOpBF16): - def setUp(self): self.op_type = "scale" self.scale = -2.3 @@ -92,14 +94,13 @@ class TestScaleOpBF16ScaleTensor(TestScaleOpBF16): self.scale_tensor = np.array([self.scale]).astype(np.float32) self.inputs = { 'X': self.x_bf16, - 'ScaleTensor': convert_float_to_uint16(self.scale_tensor) + 'ScaleTensor': convert_float_to_uint16(self.scale_tensor), } self.attrs = {'use_mkldnn': True} self.outputs = {'Out': self.x_fp32 * self.scale} class TestScaleOpBF16ScaleTensorNotBiasAfterScale(TestScaleOpBF16): - def setUp(self): self.op_type = "scale" self.scale = 1.2 @@ -108,12 +109,12 @@ class TestScaleOpBF16ScaleTensorNotBiasAfterScale(TestScaleOpBF16): self.scale_tensor = np.array([self.scale]).astype(np.float32) self.inputs = { 'X': self.x_bf16, - 'ScaleTensor': convert_float_to_uint16(self.scale_tensor) + 'ScaleTensor': convert_float_to_uint16(self.scale_tensor), } self.attrs = { 'bias': -1.1, 'bias_after_scale': False, - 'use_mkldnn': True + 'use_mkldnn': True, } self.outputs = {'Out': (self.x_fp32 + self.attrs['bias']) * self.scale} diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_scale_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_scale_mkldnn_op.py index 9fcbb98dfa287fd8192e6dff7dac4e6ceae1317d..b812f8d876d918ccfa289e4fb30a182843b1af8c 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_scale_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_scale_mkldnn_op.py @@ -19,7 +19,6 @@ import paddle class TestScaleOp(OpTest): - def setUp(self): self.op_type = "scale" self.inputs = {'X': np.random.random((10, 10)).astype(np.float32)} @@ -37,7 +36,6 @@ class TestScaleOp(OpTest): class TestScaleOpBiasNotAfterScale(OpTest): - def setUp(self): self.op_type = "scale" self.inputs = {'X': np.random.random((10, 10)).astype(np.float32)} @@ -45,7 +43,7 @@ class TestScaleOpBiasNotAfterScale(OpTest): 'scale': 1.5, 'use_mkldnn': True, 'bias': 2.3, - 'bias_after_scale': False + 'bias_after_scale': False, } self.use_mkldnn = True self.outputs = { @@ -60,13 +58,12 @@ class TestScaleOpBiasNotAfterScale(OpTest): class TestScaleOpScaleTensor(OpTest): - def setUp(self): self.op_type = "scale" self.scale = -2.3 self.inputs = { 'X': np.random.random((10, 10)).astype(np.float32), - 'ScaleTensor': np.array([self.scale]).astype(np.float32) + 'ScaleTensor': np.array([self.scale]).astype(np.float32), } self.attrs = {} self.outputs = {'Out': self.inputs['X'] * self.scale} @@ -79,18 +76,17 @@ class TestScaleOpScaleTensor(OpTest): class TestScaleOpScaleTensorNotBiasAfterScale(OpTest): - def setUp(self): self.op_type = "scale" self.scale = -1.2 self.inputs = { 'X': np.random.random((10, 10)).astype(np.float32), - 'ScaleTensor': np.array([self.scale]).astype(np.float32) + 'ScaleTensor': np.array([self.scale]).astype(np.float32), } self.attrs = {'bias': -6.8, 'bias_after_scale': False} self.outputs = { - 'Out': - (self.inputs['X'] + self.attrs['bias']) * self.inputs['ScaleTensor'] + 'Out': (self.inputs['X'] + self.attrs['bias']) + * self.inputs['ScaleTensor'] } def test_check_output(self): diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_shape_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_shape_mkldnn_op.py index 573d4a9b2cf22a2ed9783274e38420a6ab8e7c78..98401c3ca1754c52a09891ac1071e8c2d94905d3 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_shape_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_shape_mkldnn_op.py @@ -21,7 +21,6 @@ from paddle.fluid import core @OpTestTool.skip_if_not_cpu_bf16() class TestShape3DFP32OneDNNOp(OpTest): - def setUp(self): self.op_type = "shape" self.config() @@ -38,21 +37,18 @@ class TestShape3DFP32OneDNNOp(OpTest): class TestShape6DBF16OneDNNOp(TestShape3DFP32OneDNNOp): - def config(self): self.shape = [10, 2, 3, 4, 5, 2] self.dtype = np.uint16 class TestShape9DINT8OneDNNOp(TestShape3DFP32OneDNNOp): - def config(self): self.shape = [1, 2, 3, 4, 5, 6, 7, 8, 9] self.dtype = np.int8 class TestShape2DUINT8OneDNNOp(TestShape3DFP32OneDNNOp): - def config(self): self.shape = [7, 11] self.dtype = np.uint8 diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_shuffle_channel_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_shuffle_channel_mkldnn_op.py index 0ed91e959242b252872491ada6d8ff62ce6709ee..b5320c0653a266e875e19f31333afe102a19ac47 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_shuffle_channel_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_shuffle_channel_mkldnn_op.py @@ -21,7 +21,6 @@ import paddle.fluid.core as core @OpTestTool.skip_if_not_cpu_bf16() class TestShuffleChannelOneDNNOp(OpTest): - def setUp(self): self.op_type = "shuffle_channel" self.set_dtype() @@ -30,8 +29,9 @@ class TestShuffleChannelOneDNNOp(OpTest): self.attrs = {'use_mkldnn': True, 'group': self.group} _, c, h, w = self.inputs['X'].shape - input_reshaped = np.reshape(self.inputs['X'], - (-1, self.group, c // self.group, h, w)) + input_reshaped = np.reshape( + self.inputs['X'], (-1, self.group, c // self.group, h, w) + ) input_transposed = np.transpose(input_reshaped, (0, 2, 1, 3, 4)) self.outputs = {'Out': np.reshape(input_transposed, (-1, c, h, w))} @@ -46,13 +46,11 @@ class TestShuffleChannelOneDNNOp(OpTest): class TestShuffleChannelSingleGroupOneDNNOp(TestShuffleChannelOneDNNOp): - def set_group(self): self.group = 1 class TestShuffleChannelBF16OneDNNOp(TestShuffleChannelOneDNNOp): - def set_dtype(self): self.dtype = np.uint16 diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_slice_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_slice_mkldnn_op.py index 0812f4da8b275afe76704907d347980911502d5c..8dca568bb1eff28b4be808ac18571a37500ad02e 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_slice_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_slice_mkldnn_op.py @@ -15,14 +15,19 @@ import unittest import numpy as np import paddle.fluid.core as core -from paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool, convert_float_to_uint16 +from paddle.fluid.tests.unittests.op_test import ( + OpTest, + OpTestTool, + convert_float_to_uint16, +) import paddle -@OpTestTool.skip_if(core.is_compiled_with_cuda(), - "CUDA required dygraph so oneDNN UT must be skipped") +@OpTestTool.skip_if( + core.is_compiled_with_cuda(), + "CUDA required dygraph so oneDNN UT must be skipped", +) class TestSliceOneDNNOp(OpTest): - def setUp(self): self.op_type = "slice" self.config() @@ -33,7 +38,7 @@ class TestSliceOneDNNOp(OpTest): 'starts': self.starts, 'ends': self.ends, 'infer_flags': self.infer_flags, - 'use_mkldnn': True + 'use_mkldnn': True, } self.set_attrs() @@ -59,7 +64,6 @@ class TestSliceOneDNNOp(OpTest): class TestSliceOneDNNOp1(TestSliceOneDNNOp): - def config(self): self.input = np.random.random([3, 4, 5, 6]).astype("float32") self.starts = [-3, 0, 2] @@ -70,7 +74,6 @@ class TestSliceOneDNNOp1(TestSliceOneDNNOp): class TestSliceOneDNNOp2(TestSliceOneDNNOp): - def config(self): self.input = np.random.random([3, 4, 5, 6]).astype("float32") self.starts = [-3, 0, 2] @@ -81,7 +84,6 @@ class TestSliceOneDNNOp2(TestSliceOneDNNOp): class TestSliceDecrease1AxisOneDNNOp(TestSliceOneDNNOp): - def set_attrs(self): self.attrs['decrease_axis'] = self.decrease_axis @@ -96,7 +98,6 @@ class TestSliceDecrease1AxisOneDNNOp(TestSliceOneDNNOp): class TestSliceDecrease2AxesOneDNNOp(TestSliceDecrease1AxisOneDNNOp): - def config(self): self.input = np.random.random([3, 4, 5, 6]).astype("float32") self.starts = [1, 0, 2] @@ -108,7 +109,6 @@ class TestSliceDecrease2AxesOneDNNOp(TestSliceDecrease1AxisOneDNNOp): class TestSliceDecrease3AxesOneDNNOp(TestSliceDecrease1AxisOneDNNOp): - def config(self): self.input = np.random.random([3, 4, 5, 6]).astype("float32") self.starts = [-1, 0, 2] @@ -120,7 +120,6 @@ class TestSliceDecrease3AxesOneDNNOp(TestSliceDecrease1AxisOneDNNOp): class TestSliceDecrease4AxesOneDNNOp(TestSliceDecrease1AxisOneDNNOp): - def config(self): self.input = np.random.random([3, 4, 5, 7]).astype("float32") self.starts = [0, 1, 2, 3] @@ -132,7 +131,6 @@ class TestSliceDecrease4AxesOneDNNOp(TestSliceDecrease1AxisOneDNNOp): class TestSlice5DOneDNNOp(TestSliceDecrease1AxisOneDNNOp): - def config(self): self.input = np.random.random([3, 4, 5, 6, 7]).astype("float32") self.starts = [-1] @@ -144,7 +142,6 @@ class TestSlice5DOneDNNOp(TestSliceDecrease1AxisOneDNNOp): class TestSlice3DOneDNNOp(TestSliceDecrease1AxisOneDNNOp): - def config(self): self.input = np.random.random([5, 4, 5]).astype("float32") self.starts = [-1] @@ -156,8 +153,8 @@ class TestSlice3DOneDNNOp(TestSliceDecrease1AxisOneDNNOp): class TestSliceOneDNNOp_decs_dim_starts_ListTensor( - TestSliceDecrease1AxisOneDNNOp): - + TestSliceDecrease1AxisOneDNNOp +): def set_inputs(self): starts_tensor = [] for index, ele in enumerate(self.starts): @@ -175,7 +172,6 @@ class TestSliceOneDNNOp_decs_dim_starts_ListTensor( class TestSlice4DInferDimsOneDNNOp(TestSliceDecrease1AxisOneDNNOp): - def config(self): self.input = np.random.random([1, 1, 10, 10]).astype("float32") self.starts = [1, 2] @@ -187,7 +183,6 @@ class TestSlice4DInferDimsOneDNNOp(TestSliceDecrease1AxisOneDNNOp): class TestSlice4DInferDimsOneDNNOp2(TestSliceDecrease1AxisOneDNNOp): - def config(self): self.input = np.random.random([1, 1, 10, 10]).astype("float32") self.starts = [4, 2] @@ -200,10 +195,8 @@ class TestSlice4DInferDimsOneDNNOp2(TestSliceDecrease1AxisOneDNNOp): # BF16 TESTS def create_bf16_test_class(parent): - @OpTestTool.skip_if_not_cpu_bf16() class TestSliceBF16OneDNNOp(parent): - def set_inputs(self): self.dtype = np.uint16 self.inputs = {'Input': convert_float_to_uint16(self.input)} @@ -218,8 +211,12 @@ def create_bf16_test_class(parent): for i in range(len(self.axes)): begin[self.axes[i]] = self.starts[i] end[self.axes[i]] = self.ends[i] - self.dx[begin[0]:end[0], begin[1]:end[1], begin[2]:end[2], - begin[3]:end[3]] = self.dout + self.dx[ + begin[0] : end[0], + begin[1] : end[1], + begin[2] : end[2], + begin[3] : end[3], + ] = self.dout def test_check_output(self): self.check_output_with_place(core.CPUPlace()) @@ -227,10 +224,12 @@ def create_bf16_test_class(parent): def test_check_grad(self): self.calculate_grads() self.check_grad_with_place( - core.CPUPlace(), ["Input"], + core.CPUPlace(), + ["Input"], "Out", user_defined_grads=[self.dx], - user_defined_grad_outputs=[convert_float_to_uint16(self.dout)]) + user_defined_grad_outputs=[convert_float_to_uint16(self.dout)], + ) cls_name = "{0}_{1}".format(parent.__name__, "BF16") TestSliceBF16OneDNNOp.__name__ = cls_name diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_softmax_bf16_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_softmax_bf16_mkldnn_op.py index 23643261836ea38fcc2590f9d2d053b2494397bd..e87214b6b27e75b08a4385b497dfb475781f89de 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_softmax_bf16_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_softmax_bf16_mkldnn_op.py @@ -16,21 +16,28 @@ import unittest import numpy as np from paddle.fluid.tests.unittests.op_test import convert_float_to_uint16 import paddle.fluid.core as core -from paddle.fluid.tests.unittests.test_softmax_op import TestSoftmaxOp, TestSoftmaxOp2, TestSoftmaxOp3, TestSoftmaxOp4, TestSoftmaxOp5, TestSoftmaxOp6 +from paddle.fluid.tests.unittests.test_softmax_op import ( + TestSoftmaxOp, + TestSoftmaxOp2, + TestSoftmaxOp3, + TestSoftmaxOp4, + TestSoftmaxOp5, + TestSoftmaxOp6, +) from paddle import enable_static def stable_softmax(x): """Compute the softmax of vector x in a numerically stable way.""" - shiftx = x - np.max(x).clip(-64.) + shiftx = x - np.max(x).clip(-64.0) exps = np.exp(shiftx) return exps / np.sum(exps) -@unittest.skipIf(not core.supports_bfloat16(), - "place does not support BF16 evaluation") +@unittest.skipIf( + not core.supports_bfloat16(), "place does not support BF16 evaluation" +) class TestSoftmaxMKLDNNOp(TestSoftmaxOp): - def get_x_shape(self): return [10, 10] @@ -47,7 +54,8 @@ class TestSoftmaxMKLDNNOp(TestSoftmaxOp): x = np.random.uniform(0.1, 1, self.shape).astype(np.float64) out = convert_float_to_uint16( - np.apply_along_axis(stable_softmax, self.axis, x)) + np.apply_along_axis(stable_softmax, self.axis, x) + ) self.inputs = {'X': convert_float_to_uint16(x)} self.outputs = {'Out': out} @@ -64,31 +72,26 @@ class TestSoftmaxMKLDNNOp(TestSoftmaxOp): class TestSoftmaxMKLDNNOp2(TestSoftmaxOp2): - def init_kernel_type(self): self.use_mkldnn = True class TestSoftmaxMKLDNNOp3(TestSoftmaxOp3): - def init_kernel_type(self): self.use_mkldnn = True class TestSoftmaxMKLDNNOp4(TestSoftmaxOp4): - def init_kernel_type(self): self.use_mkldnn = True class TestSoftmaxMKLDNNOp5(TestSoftmaxOp5): - def init_kernel_type(self): self.use_mkldnn = True class TestSoftmaxMKLDNNOp6(TestSoftmaxOp6): - def init_kernel_type(self): self.use_mkldnn = True diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_softmax_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_softmax_mkldnn_op.py index 1422d1aeb342d7716f4d436b25aba4c1c1b09d3f..c181c1248867f7a69eab796b4093fac7f3de2381 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_softmax_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_softmax_mkldnn_op.py @@ -16,19 +16,25 @@ import unittest import numpy as np from paddle.fluid.tests.unittests.op_test import OpTest import paddle.fluid.core as core -from paddle.fluid.tests.unittests.test_softmax_op import TestSoftmaxOp, TestSoftmaxOp2, TestSoftmaxOp3, TestSoftmaxOp4, TestSoftmaxOp5, TestSoftmaxOp6 +from paddle.fluid.tests.unittests.test_softmax_op import ( + TestSoftmaxOp, + TestSoftmaxOp2, + TestSoftmaxOp3, + TestSoftmaxOp4, + TestSoftmaxOp5, + TestSoftmaxOp6, +) from mkldnn_op_test import check_if_mkldnn_primitives_exist_in_bwd def stable_softmax(x): """Compute the softmax of vector x in a numerically stable way.""" - shiftx = x - np.max(x).clip(-64.) + shiftx = x - np.max(x).clip(-64.0) exps = np.exp(shiftx) return exps / np.sum(exps) class TestSoftmaxMKLDNNOp(TestSoftmaxOp): - def get_x_shape(self): return [10, 10] @@ -52,7 +58,7 @@ class TestSoftmaxMKLDNNOp(TestSoftmaxOp): self.attrs = { 'axis': self.axis, 'use_cudnn': self.use_cudnn, - 'use_mkldnn': self.use_mkldnn + 'use_mkldnn': self.use_mkldnn, } def test_check_output(self): @@ -68,53 +74,49 @@ class TestSoftmaxMKLDNNOp(TestSoftmaxOp): if self.use_cudnn or self.dtype == np.float16: place = core.CUDAPlace(0) if core.is_float16_supported(place): - self.check_grad_with_place(place, ["X"], - "Out", - max_relative_error=0.01, - check_dygraph=False) + self.check_grad_with_place( + place, + ["X"], + "Out", + max_relative_error=0.01, + check_dygraph=False, + ) else: - self.check_grad(["X"], - "Out", - max_relative_error=0.01, - check_dygraph=False) + self.check_grad( + ["X"], "Out", max_relative_error=0.01, check_dygraph=False + ) def init_kernel_type(self): self.use_mkldnn = True class TestSoftmaxMKLDNNOp2(TestSoftmaxOp2): - def init_kernel_type(self): self.use_mkldnn = True class TestSoftmaxMKLDNNOp3(TestSoftmaxOp3): - def init_kernel_type(self): self.use_mkldnn = True class TestSoftmaxMKLDNNOp4(TestSoftmaxOp4): - def init_kernel_type(self): self.use_mkldnn = True class TestSoftmaxMKLDNNOp5(TestSoftmaxOp5): - def init_kernel_type(self): self.use_mkldnn = True class TestSoftmaxMKLDNNOp6(TestSoftmaxOp6): - def init_kernel_type(self): self.use_mkldnn = True # Check if primitives already exist in backward class TestSoftmaxMKLDNNPrimitivesAlreadyExist(unittest.TestCase): - def setUp(self): super(TestSoftmaxMKLDNNPrimitivesAlreadyExist, self).setUp() @@ -130,12 +132,13 @@ class TestSoftmaxMKLDNNPrimitivesAlreadyExist(unittest.TestCase): return out * (out_grad - np.dot(out, out_grad)) def test_check(self): - check_if_mkldnn_primitives_exist_in_bwd(self, self.op_type, self.x, - self.out, self.out_grad, - self.x_grad) + check_if_mkldnn_primitives_exist_in_bwd( + self, self.op_type, self.x, self.out, self.out_grad, self.x_grad + ) if __name__ == '__main__': from paddle import enable_static + enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_softplus_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_softplus_mkldnn_op.py index db7eeb88f1d0de0c7a040f125f69564aad152395..49846a2280cdad830749b1e200818cd8505c5166 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_softplus_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_softplus_mkldnn_op.py @@ -14,20 +14,25 @@ import unittest import numpy as np -from paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool, convert_float_to_uint16 +from paddle.fluid.tests.unittests.op_test import ( + OpTest, + OpTestTool, + convert_float_to_uint16, +) import paddle def ref_softplus(x, beta, threshold): x_beta = beta * x - out = np.select([x_beta <= threshold, x_beta > threshold], - [np.log(1 + np.exp(x_beta)) / beta, x]) + out = np.select( + [x_beta <= threshold, x_beta > threshold], + [np.log(1 + np.exp(x_beta)) / beta, x], + ) return out @OpTestTool.skip_if_not_cpu_bf16() class TestSoftplusOneDNNOp(OpTest): - def setUp(self): self.op_type = "softplus" self.beta = 1 @@ -57,52 +62,45 @@ class TestSoftplusOneDNNOp(OpTest): class TestSoftplus4DOneDNNOp(TestSoftplusOneDNNOp): - def config(self): self.x_shape = (10, 5, 4, 2) class TestSoftplus6DOneDNNOp(TestSoftplusOneDNNOp): - def config(self): self.x_shape = (3, 2, 2, 5, 4, 2) class TestSoftplus6DExtendedFunctorOneDNNOp(TestSoftplusOneDNNOp): - def config(self): self.x_shape = (3, 5, 2, 5, 4, 2) self.beta = 2.5 class TestSoftplus3DExtendedFunctorOneDNNOp(TestSoftplusOneDNNOp): - def config(self): self.x_shape = (20, 4, 2) self.beta = 0.4 class TestSoftplusBF16OneDNNOp(TestSoftplusOneDNNOp): - def set_dtype(self): self.dtype = np.uint16 class TestSoftplus4DBF16OneDNNOp(TestSoftplus4DOneDNNOp): - def set_dtype(self): self.dtype = np.uint16 class TestSoftplus6DBF16OneDNNOp(TestSoftplus6DOneDNNOp): - def set_dtype(self): self.dtype = np.uint16 class TestSoftplus3DExtendedFunctorBF16OneDNNOp( - TestSoftplus3DExtendedFunctorOneDNNOp): - + TestSoftplus3DExtendedFunctorOneDNNOp +): def set_dtype(self): self.dtype = np.uint16 diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_split_bf16_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_split_bf16_mkldnn_op.py index 56416629d771d0828cca3a1234a59ad2f636a641..a3345c1922d547b2bde021424af4642c2b705ddc 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_split_bf16_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_split_bf16_mkldnn_op.py @@ -19,12 +19,14 @@ from paddle.fluid import core from paddle.fluid.tests.unittests.op_test import OpTest -@unittest.skipIf(not core.supports_bfloat16(), - "place does not support BF16 evaluation") -@unittest.skipIf(core.is_compiled_with_cuda(), - "core is compiled with CUDA which has no BF implementation") +@unittest.skipIf( + not core.supports_bfloat16(), "place does not support BF16 evaluation" +) +@unittest.skipIf( + core.is_compiled_with_cuda(), + "core is compiled with CUDA which has no BF implementation", +) class TestSplitSectionsBF16OneDNNOp(OpTest): - def init_data(self): self.x = np.random.random((4, 5, 6)).astype("uint16") self.axis = 1 @@ -43,7 +45,7 @@ class TestSplitSectionsBF16OneDNNOp(OpTest): self.attrs = { 'use_mkldnn': True, 'num': self.num, - 'mkldnn_data_type': "bfloat16" + 'mkldnn_data_type': "bfloat16", } if self.axis is not None: @@ -55,8 +57,9 @@ class TestSplitSectionsBF16OneDNNOp(OpTest): if self.sections_tensor_list is not None: self.inputs['SectionsTensorList'] = self.sections_tensor_list - self.outputs = {'Out': [('out%d' % i, self.out[i]) \ - for i in range(len(self.out))]} + self.outputs = { + 'Out': [('out%d' % i, self.out[i]) for i in range(len(self.out))] + } def test_check_output(self): self.check_output_with_place(core.CPUPlace()) @@ -73,50 +76,47 @@ class TestSplitSectionsBF16OneDNNOp(OpTest): class TestSplitNumBF16OneDNNOp(TestSplitSectionsBF16OneDNNOp): - def init_data(self): self.x = np.random.random((4, 8, 5, 3)).astype("uint16") self.axis = 1 self.sections = [] self.num = 4 - indices_or_sections = 4 #indices + indices_or_sections = 4 # indices self.out = np.split(self.x, indices_or_sections, self.axis) class TestSplitNumAxisTensorBF16OneDNNOp(TestSplitSectionsBF16OneDNNOp): - def init_data(self): self.x = np.random.random((4, 5, 6)).astype("uint16") self.axis = None self.sections = [] self.num = 3 - indices_or_sections = 3 #indices + indices_or_sections = 3 # indices self.axis_tensor = np.array([2]).astype("int32") self.out = np.split(self.x, indices_or_sections, 2) class TestSplitSectionsTensorBF16OneDNNOp(TestSplitSectionsBF16OneDNNOp): - def init_data(self): self.x = np.random.random((4, 5, 6)).astype("uint16") self.axis = 1 self.sections = [2, 1, 2] self.sections_tensor_list = [] for index, ele in enumerate(self.sections): - self.sections_tensor_list.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + self.sections_tensor_list.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.sections = [-1, -1, -1] - indices_or_sections = [2, 3] #sections + indices_or_sections = [2, 3] # sections self.out = np.split(self.x, indices_or_sections, self.axis) class TestSplitOpUnknownSectionBF16OneDNNOp(TestSplitSectionsBF16OneDNNOp): - def init_data(self): self.x = np.random.random((4, 5, 6)).astype("uint16") self.axis = 2 self.sections = [2, 2, -1] - indices_or_sections = [2, 4] #sections + indices_or_sections = [2, 4] # sections self.out = np.split(self.x, indices_or_sections, self.axis) diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_split_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_split_mkldnn_op.py index 2b1e1fc9f226a1fcf3642b18aeb0cd0d3f2e275c..e148e0cdcab50c7c84d10ed866e62c40ba3d74f7 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_split_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_split_mkldnn_op.py @@ -19,7 +19,6 @@ from paddle.fluid.tests.unittests.op_test import OpTest class TestSplitSectionsOneDNNOp(OpTest): - def init_data(self): self.x = np.random.random((4, 5, 6)).astype("float32") self.axis = 1 @@ -46,8 +45,9 @@ class TestSplitSectionsOneDNNOp(OpTest): if self.sections_tensor_list is not None: self.inputs['SectionsTensorList'] = self.sections_tensor_list - self.outputs = {'Out': [('out%d' % i, self.out[i]) \ - for i in range(len(self.out))]} + self.outputs = { + 'Out': [('out%d' % i, self.out[i]) for i in range(len(self.out))] + } def test_check_output(self): self.check_output() @@ -58,13 +58,12 @@ class TestSplitSectionsOneDNNOp(OpTest): # test with attr(num) class TestSplitNumOneDNNOp(TestSplitSectionsOneDNNOp): - def init_data(self): self.x = np.random.random((4, 8, 5, 3)).astype("float32") self.axis = 1 self.sections = [] self.num = 4 - indices_or_sections = 4 #indices + indices_or_sections = 4 # indices self.out = np.split(self.x, indices_or_sections, self.axis) def test_check_grad(self): @@ -72,40 +71,38 @@ class TestSplitNumOneDNNOp(TestSplitSectionsOneDNNOp): class TestSplitNumAxisTensorOneDNNOp(TestSplitSectionsOneDNNOp): - def init_data(self): self.x = np.random.random((4, 5, 6)).astype("float32") self.axis = None self.sections = [] self.num = 3 - indices_or_sections = 3 #indices + indices_or_sections = 3 # indices self.axis_tensor = np.array([2]).astype("int32") self.out = np.split(self.x, indices_or_sections, 2) # attr(sections) is list containing Tensor class TestSplitSectionsTensorOneDNNOp(TestSplitSectionsOneDNNOp): - def init_data(self): self.x = np.random.random((4, 5, 6)).astype("float32") self.axis = 1 self.sections = [2, 1, 2] self.sections_tensor_list = [] for index, ele in enumerate(self.sections): - self.sections_tensor_list.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + self.sections_tensor_list.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.sections = [-1, -1, -1] - indices_or_sections = [2, 3] #sections + indices_or_sections = [2, 3] # sections self.out = np.split(self.x, indices_or_sections, self.axis) class TestSplitOpUnknownSectionOneDNNOp(TestSplitSectionsOneDNNOp): - def init_data(self): self.x = np.random.random((4, 5, 6)).astype("float32") self.axis = 2 self.sections = [2, 2, -1] - indices_or_sections = [2, 4] #sections + indices_or_sections = [2, 4] # sections self.out = np.split(self.x, indices_or_sections, self.axis) diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_squeeze2_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_squeeze2_mkldnn_op.py index 9d8647b908670aa050f57ba342770e66a454e7e8..0b5cca9d5ad0150d880ea8bc65b4b422154fe54c 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_squeeze2_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_squeeze2_mkldnn_op.py @@ -16,13 +16,18 @@ import unittest import numpy as np import paddle import paddle.fluid.core as core -from paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool, convert_float_to_uint16 +from paddle.fluid.tests.unittests.op_test import ( + OpTest, + OpTestTool, + convert_float_to_uint16, +) -@OpTestTool.skip_if(core.is_compiled_with_cuda(), - "CUDA has to be skipped because it forces dygraph") +@OpTestTool.skip_if( + core.is_compiled_with_cuda(), + "CUDA has to be skipped because it forces dygraph", +) class TestSqueeze2OneDNNOp(OpTest): - def set_op_type(self): self.op_type = "squeeze2" @@ -40,7 +45,7 @@ class TestSqueeze2OneDNNOp(OpTest): def set_outputs(self): self.outputs = { "Out": self.x.reshape(self.new_shape), - "XShape": np.random.random(self.ori_shape).astype("float32") + "XShape": np.random.random(self.ori_shape).astype("float32"), } def setUp(self): @@ -59,7 +64,6 @@ class TestSqueeze2OneDNNOp(OpTest): class TestSqueezeOneDNNOp(TestSqueeze2OneDNNOp): - def set_op_type(self): self.op_type = "squeeze" @@ -71,7 +75,6 @@ class TestSqueezeOneDNNOp(TestSqueeze2OneDNNOp): class TestSqueeze2OneDNNOp1(TestSqueeze2OneDNNOp): - def init_test_case(self): self.ori_shape = (1, 20, 1, 5) self.axes = (0, -2) @@ -79,7 +82,6 @@ class TestSqueeze2OneDNNOp1(TestSqueeze2OneDNNOp): class TestSqueezeOneDNNOp1(TestSqueezeOneDNNOp): - def init_test_case(self): self.ori_shape = (1, 20, 1, 5) self.axes = (0, -2) @@ -87,7 +89,6 @@ class TestSqueezeOneDNNOp1(TestSqueezeOneDNNOp): class TestSqueeze2OneDNNOp2(TestSqueeze2OneDNNOp): - def init_test_case(self): self.ori_shape = (1, 20, 1, 5) self.axes = () @@ -95,7 +96,6 @@ class TestSqueeze2OneDNNOp2(TestSqueeze2OneDNNOp): class TestSqueezeOneDNNOp2(TestSqueezeOneDNNOp): - def init_test_case(self): self.ori_shape = (1, 20, 1, 5) self.axes = () @@ -103,7 +103,6 @@ class TestSqueezeOneDNNOp2(TestSqueezeOneDNNOp): class TestSqueeze2OneDNNOp3(TestSqueeze2OneDNNOp): - def init_test_case(self): self.ori_shape = (25, 1, 1, 4, 1) self.axes = (1, -1) @@ -111,7 +110,6 @@ class TestSqueeze2OneDNNOp3(TestSqueeze2OneDNNOp): class TestSqueezeOneDNNOp3(TestSqueezeOneDNNOp): - def init_test_case(self): self.ori_shape = (25, 1, 1, 4, 1) self.axes = (1, -1) @@ -120,10 +118,8 @@ class TestSqueezeOneDNNOp3(TestSqueezeOneDNNOp): # BF16 TESTS def create_squeeze_bf16_test_classes(parent): - @OpTestTool.skip_if_not_cpu_bf16() class TestSqueeze2BF16OneDNNOp(parent): - def set_inputs(self): self.dtype = np.uint16 self.inputs = {"X": convert_float_to_uint16(self.x)} @@ -134,17 +130,19 @@ def create_squeeze_bf16_test_classes(parent): def test_check_grad(self): self.calculate_grads() - self.check_grad_with_place(core.CPUPlace(), ["X"], - "Out", - user_defined_grads=[self.dx], - user_defined_grad_outputs=[self.dout]) + self.check_grad_with_place( + core.CPUPlace(), + ["X"], + "Out", + user_defined_grads=[self.dx], + user_defined_grad_outputs=[self.dout], + ) cls_name = "{0}_{1}".format(parent.__name__, "Squeeze2_BF16") TestSqueeze2BF16OneDNNOp.__name__ = cls_name globals()[cls_name] = TestSqueeze2BF16OneDNNOp class TestSqueezeBF16OneDNNOp(TestSqueeze2BF16OneDNNOp): - def set_op_type(self): self.dtype = np.uint16 self.op_type = "squeeze" diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_stack_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_stack_mkldnn_op.py index 5638c14b9e4880801a8b56d455fca55c3327fa52..4052d50a474a220f33b31277ee7be20bcecb5914 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_stack_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_stack_mkldnn_op.py @@ -21,7 +21,6 @@ import paddle.fluid.core as core @OpTestTool.skip_if_not_cpu() class TestStack2DOneDNNOp(OpTest): - def initDefaultParameters(self): self.num_inputs = 4 self.input_dim = (2, 2) @@ -45,7 +44,8 @@ class TestStack2DOneDNNOp(OpTest): for i in range(self.num_inputs): self.op_inputs.append( - np.random.random(size=self.input_dim).astype(np.float32)) + np.random.random(size=self.input_dim).astype(np.float32) + ) input_list = [] input_names = self.getInputNames() @@ -65,21 +65,18 @@ class TestStack2DOneDNNOp(OpTest): class TestStack1DOneDNNOp(TestStack2DOneDNNOp): - def initParameters(self): - self.input_dim = (100) + self.input_dim = 100 self.axis = 0 class TestStack1DAxis1OneDNNOp(TestStack2DOneDNNOp): - def initParameters(self): - self.input_dim = (100) + self.input_dim = 100 self.axis = 1 class TestStack2DAxisLastOneDNNOp(TestStack2DOneDNNOp): - def initParameters(self): self.input_dim = (13, 24) self.num_inputs = 5 @@ -87,14 +84,12 @@ class TestStack2DAxisLastOneDNNOp(TestStack2DOneDNNOp): class TestStack3DAxisNegativeOneDNNOp(TestStack2DOneDNNOp): - def initParameters(self): self.input_dim = (10, 128, 128) self.axis = -2 class TestStack3DOneDNNOp(TestStack2DOneDNNOp): - def initParameters(self): self.input_dim = (10, 128, 128) self.num_inputs = 3 @@ -102,7 +97,6 @@ class TestStack3DOneDNNOp(TestStack2DOneDNNOp): class TestStack4DOneDNNOp(TestStack2DOneDNNOp): - def initParameters(self): self.input_dim = (2, 2, 2, 2) self.num_inputs = 3 @@ -110,7 +104,6 @@ class TestStack4DOneDNNOp(TestStack2DOneDNNOp): class TestStack5DOneDNNOp(TestStack2DOneDNNOp): - def initParameters(self): self.input_dim = (2, 3, 4, 5, 6) self.num_inputs = 6 diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_sum_bf16_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_sum_bf16_mkldnn_op.py index 6792eaf7e6944bbdf48da08cdf29ff94d26f3ba0..435651f38a79cd8ae4bce59804cccc57974dae12 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_sum_bf16_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_sum_bf16_mkldnn_op.py @@ -20,10 +20,10 @@ from paddle import enable_static import numpy as np -@unittest.skipIf(not core.supports_bfloat16(), - "place does not support BF16 evaluation") +@unittest.skipIf( + not core.supports_bfloat16(), "place does not support BF16 evaluation" +) class TestSumBF16MKLDNN(TestSumOp): - def setUp(self): self.op_type = "sum" self.use_mkldnn = True diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_sum_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_sum_mkldnn_op.py index a9c345e9dfa926ecd91e9798b0d142e9e390aad2..ab26f101687874c153e6d4f61a88f364cb49ad33 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_sum_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_sum_mkldnn_op.py @@ -20,7 +20,6 @@ import paddle.fluid.op as fluid_op class TestSumMKLDNN(TestSumOp): - def setUp(self): self.op_type = "sum" self.init_data_type() @@ -46,7 +45,6 @@ class TestSumMKLDNN(TestSumOp): class TestMKLDNNSumInplaceOp(unittest.TestCase): - def setUp(self): self.op_type = "sum" self.init_data_type() @@ -70,10 +68,9 @@ class TestMKLDNNSumInplaceOp(unittest.TestCase): tensor = var.get_tensor() tensor.set(var_value, place) - sum_op = fluid_op.Operator("sum", - X=["x0", "x1"], - Out=out_var_name, - use_mkldnn=True) + sum_op = fluid_op.Operator( + "sum", X=["x0", "x1"], Out=out_var_name, use_mkldnn=True + ) expected_out = np.array(self.x0 + self.x1) sum_op.run(scope, place) out = scope.find_var("x0").get_tensor() @@ -83,7 +80,7 @@ class TestMKLDNNSumInplaceOp(unittest.TestCase): out_array, rtol=1e-05, atol=1e-05, - err_msg='Inplace sum_mkldnn_op output has diff with expected output' + err_msg='Inplace sum_mkldnn_op output has diff with expected output', ) def test_check_grad(self): @@ -92,5 +89,6 @@ class TestMKLDNNSumInplaceOp(unittest.TestCase): if __name__ == '__main__': from paddle import enable_static + enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_transpose_bf16_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_transpose_bf16_mkldnn_op.py index 2edab93d778dea567814ec23f3c2e9c79ca67690..352e35f66dd31884133d5857f32258b69fb8faed 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_transpose_bf16_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_transpose_bf16_mkldnn_op.py @@ -19,10 +19,10 @@ from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16 from paddle import enable_static -@unittest.skipIf(not core.supports_bfloat16(), - "place does not support BF16 evaluation") +@unittest.skipIf( + not core.supports_bfloat16(), "place does not support BF16 evaluation" +) class TestTransposeOp(OpTest): - def setUp(self): self.op_type = "transpose2" self.use_mkldnn = True @@ -36,12 +36,12 @@ class TestTransposeOp(OpTest): self.attrs = { 'axis': list(self.axis), 'use_mkldnn': self.use_mkldnn, - 'mkldnn_data_type': self.mkldnn_data_type + 'mkldnn_data_type': self.mkldnn_data_type, } self.outputs = { 'XShape': np.random.random(self.shape).astype(np.uint16), - 'Out': self.inputs['X'].transpose(self.axis) + 'Out': self.inputs['X'].transpose(self.axis), } def test_check_output(self): @@ -52,11 +52,11 @@ class TestTransposeOp(OpTest): def init_test_data(self): self.input_data = convert_float_to_uint16( - np.random.random(self.shape).astype(np.float32)) + np.random.random(self.shape).astype(np.float32) + ) class TestBF16Case(TestTransposeOp): - def init_test_case(self): self.shape = (2, 4, 6, 8) diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_transpose_int8_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_transpose_int8_mkldnn_op.py index 5b3be06aa7505bcd3065231194c834fe91f79fe5..15c89a37e09223466cb3fe3c929008ef5c138796 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_transpose_int8_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_transpose_int8_mkldnn_op.py @@ -20,7 +20,6 @@ from mkldnn_op_test import format_reorder class TestTransposeOp(OpTest): - def setUp(self): self.init_op_type() self.initTestCase() @@ -31,7 +30,7 @@ class TestTransposeOp(OpTest): self.inputs = { 'X': format_reorder(self.input_data, self.shape).astype(np.int8) - } #transform data format to 'NHWC' for INT8 transpose specially. + } # transform data format to 'NHWC' for INT8 transpose specially. self.attrs = { 'axis': list(self.axis), @@ -40,7 +39,7 @@ class TestTransposeOp(OpTest): self.outputs = { 'XShape': np.random.random(self.shape).astype(np.int8), - 'Out': self.inputs['X'].transpose(self.axis) + 'Out': self.inputs['X'].transpose(self.axis), } def init_op_type(self): @@ -48,37 +47,37 @@ class TestTransposeOp(OpTest): def test_check_output(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode - self.check_output_with_place(core.CPUPlace(), - 1e-5, - no_check_set=['XShape'], - check_dygraph=False) + self.check_output_with_place( + core.CPUPlace(), 1e-5, no_check_set=['XShape'], check_dygraph=False + ) def initTestCase(self): self.shape = (2, 3, 4, 5) def initInputData(self): self.input_data = (np.random.randint(0, 100, self.shape) - 50).astype( - np.int8) + np.int8 + ) class TestINT8Case(TestTransposeOp): - def initTestCase(self): self.shape = (2, 4, 6, 8) def initInputData(self): self.input_data = (np.random.randint(0, 100, self.shape) - 50).astype( - np.int8) + np.int8 + ) class TestUINT8Case(TestTransposeOp): - def initTestCase(self): self.shape = (1, 3, 5, 7) def initDataType(self): - self.input_data = (np.random.randint(0, 100, - self.shape)).astype(np.uint8) + self.input_data = (np.random.randint(0, 100, self.shape)).astype( + np.uint8 + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_transpose_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_transpose_mkldnn_op.py index 5e81358c688b1b04fa7c46e01ca11c645eabc8f7..292c98b4dae238cd6c907e962dfd781d3d0e443a 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_transpose_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_transpose_mkldnn_op.py @@ -19,7 +19,6 @@ import numpy as np class TestTransposeMKLDNN(TestTransposeOp): - def setUp(self): self.init_op_type() self.initTestCase() @@ -30,7 +29,7 @@ class TestTransposeMKLDNN(TestTransposeOp): } self.outputs = { 'XShape': np.random.random(self.shape).astype("float32"), - 'Out': self.inputs['X'].transpose(self.axis) + 'Out': self.inputs['X'].transpose(self.axis), } def init_op_type(self): @@ -52,42 +51,36 @@ class TestTransposeMKLDNN(TestTransposeOp): class TestCase0MKLDNN(TestTransposeMKLDNN): - def initTestCase(self): - self.shape = (100, ) - self.axis = (0, ) + self.shape = (100,) + self.axis = (0,) class TestCase1a(TestTransposeMKLDNN): - def initTestCase(self): self.shape = (3, 4, 10) self.axis = (0, 2, 1) class TestCase1b(TestTransposeMKLDNN): - def initTestCase(self): self.shape = (3, 4, 10) self.axis = (2, 1, 0) class TestCase2(TestTransposeMKLDNN): - def initTestCase(self): self.shape = (2, 3, 4, 5) self.axis = (0, 2, 3, 1) class TestCase3(TestTransposeMKLDNN): - def initTestCase(self): self.shape = (2, 3, 4, 5, 6) self.axis = (4, 2, 3, 1, 0) class TestCase4(TestTransposeMKLDNN): - def initTestCase(self): self.shape = (2, 3, 4, 5, 6, 1) self.axis = (4, 2, 3, 1, 0, 5) diff --git a/python/paddle/fluid/tests/unittests/mlu/c_comm_init_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/c_comm_init_op_mlu.py index 9e6385bda4a7557f1e47db2f322c16d363181e83..ca779db12c241eb5e4bc4ff45684d6b97a7a786d 100644 --- a/python/paddle/fluid/tests/unittests/mlu/c_comm_init_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/c_comm_init_op_mlu.py @@ -16,14 +16,15 @@ import unittest import os import paddle.fluid.core as core import paddle.fluid as fluid -from paddle.distributed.fleet.base.private_helper_function import wait_server_ready +from paddle.distributed.fleet.base.private_helper_function import ( + wait_server_ready, +) import paddle paddle.enable_static() class TestCCommInitOp(unittest.TestCase): - def setUp(self): self.endpoints = os.getenv("PADDLE_TRAINER_ENDPOINTS").split(',') self.current_endpoint = os.getenv("PADDLE_CURRENT_ENDPOINT") @@ -43,24 +44,29 @@ class TestCCommInitOp(unittest.TestCase): cncl_id_var = block.create_var( name=fluid.unique_name.generate('cncl_id'), persistable=True, - type=fluid.core.VarDesc.VarType.RAW) - block.append_op(type='c_gen_cncl_id', - inputs={}, - outputs={'Out': cncl_id_var}, - attrs={ - 'rank': self.rank, - 'endpoint': self.current_endpoint, - 'other_endpoints': self.other_endpoints - }) - block.append_op(type='c_comm_init', - inputs={'X': cncl_id_var}, - outputs={}, - attrs={ - 'nranks': self.nranks, - 'rank': self.rank, - 'ring_id': 0, - 'device_id': self.mlu_id - }) + type=fluid.core.VarDesc.VarType.RAW, + ) + block.append_op( + type='c_gen_cncl_id', + inputs={}, + outputs={'Out': cncl_id_var}, + attrs={ + 'rank': self.rank, + 'endpoint': self.current_endpoint, + 'other_endpoints': self.other_endpoints, + }, + ) + block.append_op( + type='c_comm_init', + inputs={'X': cncl_id_var}, + outputs={}, + attrs={ + 'nranks': self.nranks, + 'rank': self.rank, + 'ring_id': 0, + 'device_id': self.mlu_id, + }, + ) self.exe.run(program) diff --git a/python/paddle/fluid/tests/unittests/mlu/collective_allgather_api.py b/python/paddle/fluid/tests/unittests/mlu/collective_allgather_api.py index c9c681ae38b0efc06c43a15dd7d56185166abd8a..1d8d11ad1120008c67ef497b698608a4c6b9233e 100755 --- a/python/paddle/fluid/tests/unittests/mlu/collective_allgather_api.py +++ b/python/paddle/fluid/tests/unittests/mlu/collective_allgather_api.py @@ -30,22 +30,24 @@ import unittest from multiprocessing import Process import paddle.fluid.layers as layers from functools import reduce -from test_collective_api_base_mlu import TestCollectiveAPIRunnerBase, runtime_main +from test_collective_api_base_mlu import ( + TestCollectiveAPIRunnerBase, + runtime_main, +) paddle.enable_static() class TestCollectiveAllgatherAPI(TestCollectiveAPIRunnerBase): - def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank): with fluid.program_guard(main_prog, startup_program): tensor_list = [] - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype='float32') + tindata = layers.data( + name="tindata", shape=[10, 1000], dtype='float32' + ) paddle.distributed.all_gather(tensor_list, tindata) return tensor_list diff --git a/python/paddle/fluid/tests/unittests/mlu/collective_allgather_op.py b/python/paddle/fluid/tests/unittests/mlu/collective_allgather_op.py index 727f3011ff14c256ea4190b02f511ae7662db664..8040e834d65a90b7f6e883fff481008963f084f7 100755 --- a/python/paddle/fluid/tests/unittests/mlu/collective_allgather_op.py +++ b/python/paddle/fluid/tests/unittests/mlu/collective_allgather_op.py @@ -35,7 +35,6 @@ paddle.enable_static() class TestCollectiveAllgather(TestCollectiveRunnerBase): - def __init__(self): self.global_ring_id = 0 @@ -43,26 +42,28 @@ class TestCollectiveAllgather(TestCollectiveRunnerBase): ring_id = 0 nranks = 2 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype='float32') + tindata = layers.data( + name="tindata", shape=[10, 1000], dtype='float32' + ) toutdata = main_prog.current_block().create_var( name="outofallgather", dtype='float32', type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=False) - main_prog.global_block().append_op(type="c_allgather", - inputs={'X': tindata}, - attrs={ - 'ring_id': ring_id, - 'nranks': nranks - }, - outputs={'Out': toutdata}) - main_prog.global_block().append_op(type="c_sync_comm_stream", - inputs={'X': toutdata}, - outputs={'Out': toutdata}, - attrs={'ring_id': ring_id}) + stop_gradient=False, + ) + main_prog.global_block().append_op( + type="c_allgather", + inputs={'X': tindata}, + attrs={'ring_id': ring_id, 'nranks': nranks}, + outputs={'Out': toutdata}, + ) + main_prog.global_block().append_op( + type="c_sync_comm_stream", + inputs={'X': toutdata}, + outputs={'Out': toutdata}, + attrs={'ring_id': ring_id}, + ) return toutdata diff --git a/python/paddle/fluid/tests/unittests/mlu/collective_allreduce_api.py b/python/paddle/fluid/tests/unittests/mlu/collective_allreduce_api.py index 7ea3a2f00c53e3b9491b9f92beceba2e8ee11390..22ca990c55afde93e2b28ab1167785e6e0559f49 100644 --- a/python/paddle/fluid/tests/unittests/mlu/collective_allreduce_api.py +++ b/python/paddle/fluid/tests/unittests/mlu/collective_allreduce_api.py @@ -30,21 +30,23 @@ import unittest from multiprocessing import Process import paddle.fluid.layers as layers from functools import reduce -from test_collective_api_base_mlu import TestCollectiveAPIRunnerBase, runtime_main +from test_collective_api_base_mlu import ( + TestCollectiveAPIRunnerBase, + runtime_main, +) paddle.enable_static() class TestCollectiveAllreduceAPI(TestCollectiveAPIRunnerBase): - def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank): with fluid.program_guard(main_prog, startup_program): - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype='float32') + tindata = layers.data( + name="tindata", shape=[10, 1000], dtype='float32' + ) paddle.distributed.all_reduce(tindata) return [tindata] diff --git a/python/paddle/fluid/tests/unittests/mlu/collective_allreduce_op.py b/python/paddle/fluid/tests/unittests/mlu/collective_allreduce_op.py index 19da310741d6ed398bae4a3c4e805f53e5b6e973..609d9e7c41688e40a16b8216a8a69bc9c4b7815c 100644 --- a/python/paddle/fluid/tests/unittests/mlu/collective_allreduce_op.py +++ b/python/paddle/fluid/tests/unittests/mlu/collective_allreduce_op.py @@ -36,30 +36,34 @@ paddle.enable_static() class TestCollectiveAllreduce(TestCollectiveRunnerBase): - def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, col_type): ring_id = 0 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype='float32') + tindata = layers.data( + name="tindata", shape=[10, 1000], dtype='float32' + ) toutdata = main_prog.current_block().create_var( name="outof" + col_type, dtype='float32', type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=False) - main_prog.global_block().append_op(type="c_" + col_type, - inputs={'X': tindata}, - attrs={'ring_id': ring_id}, - outputs={'Out': toutdata}) - main_prog.global_block().append_op(type="c_sync_comm_stream", - inputs={'X': toutdata}, - outputs={'Out': toutdata}, - attrs={'ring_id': ring_id}) + stop_gradient=False, + ) + main_prog.global_block().append_op( + type="c_" + col_type, + inputs={'X': tindata}, + attrs={'ring_id': ring_id}, + outputs={'Out': toutdata}, + ) + main_prog.global_block().append_op( + type="c_sync_comm_stream", + inputs={'X': toutdata}, + outputs={'Out': toutdata}, + attrs={'ring_id': ring_id}, + ) return toutdata diff --git a/python/paddle/fluid/tests/unittests/mlu/collective_broadcast_api.py b/python/paddle/fluid/tests/unittests/mlu/collective_broadcast_api.py index c14456704dfc87d4a4d3d49b636c285c66c458d7..0c1ae572251aa6dc579090ead3433b73178f79c5 100644 --- a/python/paddle/fluid/tests/unittests/mlu/collective_broadcast_api.py +++ b/python/paddle/fluid/tests/unittests/mlu/collective_broadcast_api.py @@ -30,21 +30,23 @@ import unittest from multiprocessing import Process import paddle.fluid.layers as layers from functools import reduce -from test_collective_api_base_mlu import TestCollectiveAPIRunnerBase, runtime_main +from test_collective_api_base_mlu import ( + TestCollectiveAPIRunnerBase, + runtime_main, +) paddle.enable_static() class TestCollectiveBroadcastAPI(TestCollectiveAPIRunnerBase): - def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank): with fluid.program_guard(main_prog, startup_program): - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype="float32") + tindata = layers.data( + name="tindata", shape=[10, 1000], dtype="float32" + ) paddle.distributed.broadcast(tindata, src=1) return [tindata] diff --git a/python/paddle/fluid/tests/unittests/mlu/collective_broadcast_op.py b/python/paddle/fluid/tests/unittests/mlu/collective_broadcast_op.py index 3dd50f09a8f5e6383e83ae158e2fc0f4020f8f8b..982d5d204fccc23d0a66bbb605135270c51534e4 100755 --- a/python/paddle/fluid/tests/unittests/mlu/collective_broadcast_op.py +++ b/python/paddle/fluid/tests/unittests/mlu/collective_broadcast_op.py @@ -36,7 +36,6 @@ paddle.enable_static() class TestCollectiveBroadcast(TestCollectiveRunnerBase): - def __init__(self): self.global_ring_id = 0 @@ -44,26 +43,28 @@ class TestCollectiveBroadcast(TestCollectiveRunnerBase): ring_id = 0 rootid = 1 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype='float32') + tindata = layers.data( + name="tindata", shape=[10, 1000], dtype='float32' + ) toutdata = main_prog.current_block().create_var( name="outofbroadcast", dtype='float32', type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=False) - main_prog.global_block().append_op(type="c_broadcast", - inputs={'X': tindata}, - attrs={ - 'ring_id': ring_id, - 'root': rootid - }, - outputs={'Out': toutdata}) - main_prog.global_block().append_op(type="c_sync_comm_stream", - inputs={'X': toutdata}, - outputs={'Out': toutdata}, - attrs={'ring_id': ring_id}) + stop_gradient=False, + ) + main_prog.global_block().append_op( + type="c_broadcast", + inputs={'X': tindata}, + attrs={'ring_id': ring_id, 'root': rootid}, + outputs={'Out': toutdata}, + ) + main_prog.global_block().append_op( + type="c_sync_comm_stream", + inputs={'X': toutdata}, + outputs={'Out': toutdata}, + attrs={'ring_id': ring_id}, + ) return toutdata diff --git a/python/paddle/fluid/tests/unittests/mlu/collective_reduce_api.py b/python/paddle/fluid/tests/unittests/mlu/collective_reduce_api.py index 4f257d07fa331f56c18d72f56f88287821182ea8..33a0d9d0d51a7efb0724c7d04c80e32a2f3c0480 100644 --- a/python/paddle/fluid/tests/unittests/mlu/collective_reduce_api.py +++ b/python/paddle/fluid/tests/unittests/mlu/collective_reduce_api.py @@ -30,21 +30,23 @@ import unittest from multiprocessing import Process import paddle.fluid.layers as layers from functools import reduce -from test_collective_api_base_mlu import TestCollectiveAPIRunnerBase, runtime_main +from test_collective_api_base_mlu import ( + TestCollectiveAPIRunnerBase, + runtime_main, +) paddle.enable_static() class TestCollectiveReduceAPI(TestCollectiveAPIRunnerBase): - def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank): with fluid.program_guard(main_prog, startup_program): - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype='float32') + tindata = layers.data( + name="tindata", shape=[10, 1000], dtype='float32' + ) paddle.distributed.reduce(tindata, dst=0) return [tindata] diff --git a/python/paddle/fluid/tests/unittests/mlu/collective_reduce_op.py b/python/paddle/fluid/tests/unittests/mlu/collective_reduce_op.py index 733440cb0de4761d2334d9e689afba07768ba2ed..d9271376b0f3124427003d4ef90cc7a3ae87404a 100644 --- a/python/paddle/fluid/tests/unittests/mlu/collective_reduce_op.py +++ b/python/paddle/fluid/tests/unittests/mlu/collective_reduce_op.py @@ -36,7 +36,6 @@ paddle.enable_static() class TestCollectiveReduce(TestCollectiveRunnerBase): - def __init__(self): self.global_ring_id = 0 @@ -44,26 +43,28 @@ class TestCollectiveReduce(TestCollectiveRunnerBase): ring_id = 0 rootid = 1 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype='float32') + tindata = layers.data( + name="tindata", shape=[10, 1000], dtype='float32' + ) toutdata = main_prog.current_block().create_var( name="outof" + col_type, dtype='float32', type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=False) - main_prog.global_block().append_op(type="c_" + col_type, - inputs={'X': tindata}, - attrs={ - 'ring_id': ring_id, - 'root_id': rootid - }, - outputs={'Out': toutdata}) - main_prog.global_block().append_op(type="c_sync_comm_stream", - inputs={'X': toutdata}, - outputs={'Out': toutdata}, - attrs={'ring_id': ring_id}) + stop_gradient=False, + ) + main_prog.global_block().append_op( + type="c_" + col_type, + inputs={'X': tindata}, + attrs={'ring_id': ring_id, 'root_id': rootid}, + outputs={'Out': toutdata}, + ) + main_prog.global_block().append_op( + type="c_sync_comm_stream", + inputs={'X': toutdata}, + outputs={'Out': toutdata}, + attrs={'ring_id': ring_id}, + ) return toutdata diff --git a/python/paddle/fluid/tests/unittests/mlu/multi_process_mlu.py b/python/paddle/fluid/tests/unittests/mlu/multi_process_mlu.py index 782475ff8cb5edb524b9587dc8b68c14ba3d9b56..96148a7bacc4d7f6536902c8703dc97edc22c4ba 100644 --- a/python/paddle/fluid/tests/unittests/mlu/multi_process_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/multi_process_mlu.py @@ -26,12 +26,18 @@ def train(prefix): worker_endpoints = worker_endpoints_env trainers_num = len(worker_endpoints.split(',')) - name = "selected_mlus:{} worker_endpoints:{} trainers_num:{} current_endpoint:{} trainer_id:{}"\ - .format(selected_mlus, worker_endpoints, trainers_num, current_endpoint,trainer_id) + name = "selected_mlus:{} worker_endpoints:{} trainers_num:{} current_endpoint:{} trainer_id:{}".format( + selected_mlus, + worker_endpoints, + trainers_num, + current_endpoint, + trainer_id, + ) print(name) - with open("multi_process_{}.check_{}.log".format(prefix, trainer_id), - "w") as f: + with open( + "multi_process_{}.check_{}.log".format(prefix, trainer_id), "w" + ) as f: f.write(name) @@ -48,23 +54,34 @@ def train_abort(prefix): # train abort exit(1) except SystemExit: - name = "abort>>> selected_mlus:{} worker_endpoints:{} trainers_num:{} current_endpoint:{} trainer_id:{}"\ - .format(selected_mlus, worker_endpoints, trainers_num, current_endpoint,trainer_id) + name = "abort>>> selected_mlus:{} worker_endpoints:{} trainers_num:{} current_endpoint:{} trainer_id:{}".format( + selected_mlus, + worker_endpoints, + trainers_num, + current_endpoint, + trainer_id, + ) print(name) with open( - "multi_process_{}.check_{}.log".format(prefix, trainer_id), - "w") as f: + "multi_process_{}.check_{}.log".format(prefix, trainer_id), "w" + ) as f: f.write(name) raise else: # sleep 30s to make sure paddle.distributed.launch will terminate this process time.sleep(30) - name = "selected_mlus:{} worker_endpoints:{} trainers_num:{} current_endpoint:{} trainer_id:{}"\ - .format(selected_mlus, worker_endpoints, trainers_num, current_endpoint,trainer_id) + name = "selected_mlus:{} worker_endpoints:{} trainers_num:{} current_endpoint:{} trainer_id:{}".format( + selected_mlus, + worker_endpoints, + trainers_num, + current_endpoint, + trainer_id, + ) print(name) - with open("multi_process_{}.check_{}.log".format(prefix, trainer_id), - "w") as f: + with open( + "multi_process_{}.check_{}.log".format(prefix, trainer_id), "w" + ) as f: f.write(name) diff --git a/python/paddle/fluid/tests/unittests/mlu/nproc_process_mlu.py b/python/paddle/fluid/tests/unittests/mlu/nproc_process_mlu.py index 9b2713532e41b2ffd6994278623153cd46163545..2a070bf67227fbc6dc5eaf5add37aa1b9ba6d361 100644 --- a/python/paddle/fluid/tests/unittests/mlu/nproc_process_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/nproc_process_mlu.py @@ -25,8 +25,13 @@ def train(prefix): worker_endpoints = worker_endpoints_env trainers_num = len(worker_endpoints.split(',')) - name = "selected_mlus:{} worker_endpoints:{} trainers_num:{} current_endpoint:{} trainer_id:{}"\ - .format(selected_mlus, worker_endpoints, trainers_num, current_endpoint,trainer_id) + name = "selected_mlus:{} worker_endpoints:{} trainers_num:{} current_endpoint:{} trainer_id:{}".format( + selected_mlus, + worker_endpoints, + trainers_num, + current_endpoint, + trainer_id, + ) print(name) with open("{}.check_{}.log".format(prefix, trainer_id), "w") as f: diff --git a/python/paddle/fluid/tests/unittests/mlu/parallel_dygraph_sync_batch_norm.py b/python/paddle/fluid/tests/unittests/mlu/parallel_dygraph_sync_batch_norm.py index b562f9ce4acaf89602e5531edc6b6315bbd28e49..56abdfc26dda9bc4a875c63c51497bee6a2bc694 100644 --- a/python/paddle/fluid/tests/unittests/mlu/parallel_dygraph_sync_batch_norm.py +++ b/python/paddle/fluid/tests/unittests/mlu/parallel_dygraph_sync_batch_norm.py @@ -32,37 +32,42 @@ from test_dist_base import runtime_main, TestParallelDyGraphRunnerBase class TestLayer(fluid.dygraph.Layer): - - def __init__(self, - num_channels, - num_filters, - filter_size, - stride=1, - groups=1, - act=None): + def __init__( + self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + ): super(TestLayer, self).__init__() - self._conv = Conv2D(in_channels=num_channels, - out_channels=num_filters, - kernel_size=filter_size, - stride=stride, - padding=(filter_size - 1) // 2, - groups=groups, - bias_attr=False) + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + bias_attr=False, + ) self._sync_batch_norm = SyncBatchNorm(num_filters) - self._conv2 = Conv2D(in_channels=num_filters, - out_channels=num_filters, - kernel_size=filter_size, - stride=stride, - padding=(filter_size - 1) // 2, - groups=groups, - bias_attr=False) + self._conv2 = Conv2D( + in_channels=num_filters, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + bias_attr=False, + ) - self._sync_batch_norm2 = SyncBatchNorm(num_filters, - weight_attr=False, - bias_attr=False) + self._sync_batch_norm2 = SyncBatchNorm( + num_filters, weight_attr=False, bias_attr=False + ) def forward(self, inputs): y = self._conv(inputs) @@ -74,20 +79,23 @@ class TestLayer(fluid.dygraph.Layer): class TestSyncBatchNorm(TestParallelDyGraphRunnerBase): - def get_model(self): model = TestLayer(3, 64, 7) - train_reader = paddle.batch(paddle.dataset.flowers.test(use_xmap=False), - batch_size=32, - drop_last=True) - opt = fluid.optimizer.Adam(learning_rate=1e-3, - parameter_list=model.parameters()) + train_reader = paddle.batch( + paddle.dataset.flowers.test(use_xmap=False), + batch_size=32, + drop_last=True, + ) + opt = fluid.optimizer.Adam( + learning_rate=1e-3, parameter_list=model.parameters() + ) return model, train_reader, opt def run_one_loop(self, model, opt, data): batch_size = len(data) - dy_x_data = np.array([x[0].reshape(3, 224, 224) - for x in data]).astype('float32') + dy_x_data = np.array([x[0].reshape(3, 224, 224) for x in data]).astype( + 'float32' + ) img = to_variable(dy_x_data) img.stop_gradient = False diff --git a/python/paddle/fluid/tests/unittests/mlu/sync_batch_norm_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/sync_batch_norm_op_mlu.py index eb410f14f540a8855cca817e400d96b8d0d53c28..3ea79600954aad15cf93021b772222f9d6c83d1d 100644 --- a/python/paddle/fluid/tests/unittests/mlu/sync_batch_norm_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/sync_batch_norm_op_mlu.py @@ -31,7 +31,10 @@ import unittest from multiprocessing import Process import paddle.fluid.layers as layers from functools import reduce -from test_sync_batch_norm_base_mlu import TestSyncBatchNormRunnerBase, runtime_main +from test_sync_batch_norm_base_mlu import ( + TestSyncBatchNormRunnerBase, + runtime_main, +) from op_test import OpTest, _set_use_system_allocator from test_sync_batch_norm_op import create_or_get_tensor @@ -41,7 +44,6 @@ paddle.enable_static() class TestSyncBatchNormOpTraining(TestSyncBatchNormRunnerBase): - def __init__(self): self.global_ring_id = 0 @@ -54,29 +56,34 @@ class TestSyncBatchNormOpTraining(TestSyncBatchNormRunnerBase): self.dshape = [self.N, self.C, self.H, self.W] self.atol = 1e-3 - def get_model(self, - main, - startup, - place, - layout, - seed, - sync_bn=False, - only_forward=False): + def get_model( + self, + main, + startup, + place, + layout, + seed, + sync_bn=False, + only_forward=False, + ): """Build program.""" use_cudnn = False with fluid.unique_name.guard(): with fluid.program_guard(main, startup): - data = fluid.layers.data(name='input', - shape=self.dshape, - dtype=self.dtype, - append_batch_size=False) + data = fluid.layers.data( + name='input', + shape=self.dshape, + dtype=self.dtype, + append_batch_size=False, + ) conv = fluid.layers.conv2d( input=data, num_filters=32, filter_size=1, param_attr=fluid.ParamAttr(name='conv2d_weight'), bias_attr=False, - use_cudnn=use_cudnn) + use_cudnn=use_cudnn, + ) if self.bn_dtype == np.float16: conv = fluid.layers.cast(conv, 'float16') bn = fluid.layers.batch_norm( @@ -86,7 +93,8 @@ class TestSyncBatchNormOpTraining(TestSyncBatchNormRunnerBase): moving_mean_name='bn_moving_mean', moving_variance_name='bn_moving_variance', data_layout=layout, - is_test=only_forward) + is_test=only_forward, + ) if self.bn_dtype == np.float16: bn = fluid.layers.cast(bn, 'float32') sigmoid = fluid.layers.sigmoid(bn) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_abs_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_abs_op_mlu.py index fe56236b8e25ad820872187f557655935a9d6d8f..c9e09fccf20e3cbae93476d09c5e67aec171b6b9 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_abs_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_abs_op_mlu.py @@ -29,7 +29,6 @@ np.random.seed(10) class TestAbs(OpTest): - def setUp(self): self.op_type = "abs" self.set_mlu() @@ -56,12 +55,12 @@ class TestAbs(OpTest): self.check_output_with_place(self.place) def test_check_grad(self): - self.check_grad_with_place(self.place, ['X'], ['Out'], - check_eager=False) + self.check_grad_with_place( + self.place, ['X'], ['Out'], check_eager=False + ) class TestAbsHalf(OpTest): - def setUp(self): self.op_type = "abs" self.set_mlu() @@ -88,8 +87,9 @@ class TestAbsHalf(OpTest): self.check_output_with_place(self.place) def test_check_grad(self): - self.check_grad_with_place(self.place, ['X'], ['Out'], - check_eager=False) + self.check_grad_with_place( + self.place, ['X'], ['Out'], check_eager=False + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/mlu/test_accuracy_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_accuracy_op_mlu.py index 0cdc2045ec7b726641639576168b1a6140621aca..e37be2f9f3d11a5131ac7f628d93880be588d337 100755 --- a/python/paddle/fluid/tests/unittests/mlu/test_accuracy_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_accuracy_op_mlu.py @@ -26,7 +26,6 @@ paddle.enable_static() class TestAccuracyOp(OpTest): - def setUp(self): self.op_type = "accuracy" self.place = paddle.device.MLUPlace(0) @@ -47,7 +46,7 @@ class TestAccuracyOp(OpTest): self.outputs = { 'Accuracy': np.array([num_correct / float(n)]).astype(self.dtype), 'Correct': np.array([num_correct]).astype("int32"), - 'Total': np.array([n]).astype("int32") + 'Total': np.array([n]).astype("int32"), } def init_dtype(self): @@ -58,7 +57,6 @@ class TestAccuracyOp(OpTest): class TestAccuracyOpFp16(TestAccuracyOp): - def init_dtype(self): self.dtype = np.float16 @@ -67,15 +65,15 @@ class TestAccuracyOpFp16(TestAccuracyOp): class TestAccuracyOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # The input type of accuracy_op must be Variable. - x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - fluid.MLUPlace(0)) - label = fluid.layers.data(name='label', - shape=[-1, 1], - dtype="int32") + x1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.MLUPlace(0) + ) + label = fluid.layers.data( + name='label', shape=[-1, 1], dtype="int32" + ) self.assertRaises(TypeError, fluid.layers.accuracy, x1, label) self.assertRaises(TypeError, paddle.metric.accuracy, x1, label) # The input dtype of accuracy_op must be float32 or float64. @@ -88,40 +86,42 @@ class TestAccuracyOpError(unittest.TestCase): class TestAccuracyAPI1(unittest.TestCase): - def setUp(self): - self.predictions = paddle.static.data(shape=[2, 5], - name="predictions", - dtype="float32") - self.label = paddle.static.data(shape=[2, 1], - name="labels", - dtype="int32") - self.result = paddle.static.accuracy(input=self.predictions, - label=self.label, - k=1) + self.predictions = paddle.static.data( + shape=[2, 5], name="predictions", dtype="float32" + ) + self.label = paddle.static.data( + shape=[2, 1], name="labels", dtype="int32" + ) + self.result = paddle.static.accuracy( + input=self.predictions, label=self.label, k=1 + ) self.input_predictions = np.array( [[0.2, 0.1, 0.4, 0.1, 0.1], [0.2, 0.3, 0.1, 0.15, 0.25]], - dtype="float32") + dtype="float32", + ) self.input_labels = np.array([[2], [0]], dtype="int32") self.expect_value = np.array([0.5], dtype='float32') def test_api(self): exe = paddle.static.Executor() - result, = exe.run(feed={ - "predictions": self.input_predictions, - 'labels': self.input_labels - }, - fetch_list=[self.result.name]) + (result,) = exe.run( + feed={ + "predictions": self.input_predictions, + 'labels': self.input_labels, + }, + fetch_list=[self.result.name], + ) self.assertEqual((result == self.expect_value).all(), True) class TestAccuracyAPI2(unittest.TestCase): - def test_api(self): with fluid.dygraph.guard(): predictions = paddle.to_tensor( [[0.2, 0.1, 0.4, 0.1, 0.1], [0.2, 0.3, 0.1, 0.15, 0.25]], - dtype='float32') + dtype='float32', + ) label = paddle.to_tensor([[2], [0]], dtype="int32") result = paddle.static.accuracy(input=predictions, label=label, k=1) expect_value = np.array([0.5], dtype='float32') @@ -129,12 +129,12 @@ class TestAccuracyAPI2(unittest.TestCase): class TestAccuracyAPI(unittest.TestCase): - def test_api(self): with fluid.dygraph.guard(): predictions = paddle.to_tensor( [[0.2, 0.1, 0.4, 0.1, 0.1], [0.2, 0.3, 0.1, 0.15, 0.25]], - dtype='float32') + dtype='float32', + ) label = paddle.to_tensor([[2], [0]], dtype="int32") result = paddle.metric.accuracy(input=predictions, label=label, k=1) expect_value = np.array([0.5], dtype='float32') diff --git a/python/paddle/fluid/tests/unittests/mlu/test_adam_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_adam_op_mlu.py index 3ac6e23d21a4d1ce9d8119f4f5de9a974c63d4b5..70a3a2671b6b8f54d9fbc857063298dfeeebfbba 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_adam_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_adam_op_mlu.py @@ -28,7 +28,6 @@ SEED = 2022 class TestAdam(OpTest): - def setUp(self): self.set_mlu() self.op_type = "adam" @@ -52,20 +51,19 @@ class TestAdam(OpTest): 'Moment2': moment2, 'LearningRate': np.array([learning_rate]).astype("float32"), 'Beta1Pow': np.array([beta1_pow]).astype("float32"), - 'Beta2Pow': np.array([beta2_pow]).astype("float32") + 'Beta2Pow': np.array([beta2_pow]).astype("float32"), } self.attrs = {'epsilon': epsilon, 'beta1': beta1, 'beta2': beta2} - param_out, moment1_out, \ - moment2_out = adam_step(self.inputs, self.attrs) + param_out, moment1_out, moment2_out = adam_step(self.inputs, self.attrs) self.outputs = { 'Moment1Out': moment1_out, 'Moment2Out': moment2_out, 'ParamOut': param_out, 'Beta1PowOut': np.array([beta1_pow]).astype("float32") * beta1, - 'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2 + 'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2, } def set_mlu(self): @@ -80,7 +78,6 @@ class TestAdam(OpTest): class TestAdamWithEpsilonTensor(OpTest): - def setUp(self): self.set_mlu() self.op_type = "adam" @@ -112,15 +109,14 @@ class TestAdamWithEpsilonTensor(OpTest): self.attrs = {'epsilon': epsilon} - param_out, moment1_out, \ - moment2_out = adam_step(self.inputs, self.attrs) + param_out, moment1_out, moment2_out = adam_step(self.inputs, self.attrs) self.outputs = { 'Moment1Out': moment1_out, 'Moment2Out': moment2_out, 'ParamOut': param_out, 'Beta1PowOut': np.array([beta1_pow]).astype("float32") * beta1, - 'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2 + 'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2, } def set_mlu(self): @@ -135,7 +131,6 @@ class TestAdamWithEpsilonTensor(OpTest): class TestAdamOpWithSkipUpdate(OpTest): - def setUp(self): self.set_mlu() self.op_type = "adam" @@ -188,7 +183,6 @@ class TestAdamOpWithSkipUpdate(OpTest): class TestAdamOpWithGlobalBetaPow(OpTest): - def setUp(self): self.set_mlu() self.op_type = "adam" @@ -220,8 +214,7 @@ class TestAdamOpWithGlobalBetaPow(OpTest): attributes = {'epsilon': epsilon} - param_out, moment1_out, \ - moment2_out = adam_step(self.inputs, attributes) + param_out, moment1_out, moment2_out = adam_step(self.inputs, attributes) self.attrs = {'use_global_beta_pow': True} @@ -231,7 +224,7 @@ class TestAdamOpWithGlobalBetaPow(OpTest): 'Moment2Out': moment2_out, 'ParamOut': param_out, 'Beta1PowOut': np.array([]), - 'Beta2PowOut': np.array([]) + 'Beta2PowOut': np.array([]), } def set_mlu(self): @@ -246,7 +239,6 @@ class TestAdamOpWithGlobalBetaPow(OpTest): class TestNet(unittest.TestCase): - def _test(self, run_mlu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -261,9 +253,9 @@ class TestNet(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=[32, 32], dtype='float32') b = paddle.static.data(name="b", shape=[32, 32], dtype='float32') - label = paddle.static.data(name="label", - shape=[32, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[32, 1], dtype='int64' + ) sum = paddle.add(a, b) z = paddle.pow(sum, 2.0) @@ -287,16 +279,17 @@ class TestNet(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "b": b_np, "label": label_np}, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res diff --git a/python/paddle/fluid/tests/unittests/mlu/test_adamw_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_adamw_op_mlu.py index e60664f27a0e17c0a665f6cafe887009c9861cb5..c9b3b527e72dba25ace62e9fa3f5d3d34d5bcbb4 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_adamw_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_adamw_op_mlu.py @@ -28,7 +28,6 @@ SEED = 2022 class TestAdamW(OpTest): - def setUp(self): self.set_mlu() self.op_type = "adamw" @@ -52,7 +51,7 @@ class TestAdamW(OpTest): 'Moment2': moment2, 'LearningRate': np.array([learning_rate]).astype("float32"), 'Beta1Pow': np.array([beta1_pow]).astype("float32"), - 'Beta2Pow': np.array([beta2_pow]).astype("float32") + 'Beta2Pow': np.array([beta2_pow]).astype("float32"), } self.attrs = { @@ -60,18 +59,19 @@ class TestAdamW(OpTest): 'beta1': beta1, 'beta2': beta2, "coeff": 0.9, - "with_decay": True + "with_decay": True, } - param_out, moment1_out, \ - moment2_out = adamw_step(self.inputs, self.attrs) + param_out, moment1_out, moment2_out = adamw_step( + self.inputs, self.attrs + ) self.outputs = { 'Moment1Out': moment1_out, 'Moment2Out': moment2_out, 'ParamOut': param_out, 'Beta1PowOut': np.array([beta1_pow]).astype("float32") * beta1, - 'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2 + 'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2, } def set_mlu(self): @@ -86,7 +86,6 @@ class TestAdamW(OpTest): class TestAdamOpWithSkipUpdate(OpTest): - def setUp(self): self.set_mlu() self.op_type = "adamw" @@ -139,7 +138,6 @@ class TestAdamOpWithSkipUpdate(OpTest): class TestAdamOpWithoutDecay(OpTest): - def setUp(self): self.set_mlu() self.op_type = "adamw" @@ -192,7 +190,6 @@ class TestAdamOpWithoutDecay(OpTest): class TestNet(unittest.TestCase): - def _test(self, run_mlu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -207,9 +204,9 @@ class TestNet(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=[32, 32], dtype='float32') b = paddle.static.data(name="b", shape=[32, 32], dtype='float32') - label = paddle.static.data(name="label", - shape=[32, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[32, 1], dtype='int64' + ) sum = paddle.add(a, b) z = paddle.pow(sum, 2.0) @@ -233,16 +230,17 @@ class TestNet(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "b": b_np, "label": label_np}, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res diff --git a/python/paddle/fluid/tests/unittests/mlu/test_amp_check_finite_and_scale_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_amp_check_finite_and_scale_op_mlu.py index 3e113847b7a780a5875747563fc9b0431ada0d09..7a2e22a18ad1dadff02a7e9528e1ec4ae5f7db9e 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_amp_check_finite_and_scale_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_amp_check_finite_and_scale_op_mlu.py @@ -25,7 +25,6 @@ SEED = 2022 class TestCheckFiniteAndUnscaleOp(OpTest): - def setUp(self): self.set_mlu() self.op_type = "check_finite_and_unscale" @@ -54,7 +53,6 @@ class TestCheckFiniteAndUnscaleOp(OpTest): class TestCheckFiniteAndUnscaleOpWithNan(TestCheckFiniteAndUnscaleOp): - def init_test_case(self): x = np.random.random((129, 129)).astype(self.dtype) x[128][128] = np.nan @@ -73,7 +71,6 @@ class TestCheckFiniteAndUnscaleOpWithNan(TestCheckFiniteAndUnscaleOp): class TestCheckFiniteAndUnscaleOpWithInf(TestCheckFiniteAndUnscaleOp): - def init_test_case(self): x = np.random.random((129, 129)).astype(self.dtype) x[128][128] = np.inf @@ -92,7 +89,6 @@ class TestCheckFiniteAndUnscaleOpWithInf(TestCheckFiniteAndUnscaleOp): class TestCheckFiniteAndUnscaleOpMultiInput(TestCheckFiniteAndUnscaleOp): - def init_test_case(self): x0 = np.random.random((129, 129)).astype(self.dtype) x1 = np.random.random((129, 129)).astype(self.dtype) @@ -106,7 +102,6 @@ class TestCheckFiniteAndUnscaleOpMultiInput(TestCheckFiniteAndUnscaleOp): class TestCheckFiniteAndUnscaleOpMultiInputWithNan(TestCheckFiniteAndUnscaleOp): - def init_test_case(self): x0 = np.random.random((129, 129)).astype(self.dtype) x0[128][128] = np.nan @@ -126,7 +121,6 @@ class TestCheckFiniteAndUnscaleOpMultiInputWithNan(TestCheckFiniteAndUnscaleOp): class TestCheckFiniteAndUnscaleOpMultiInputWithInf(TestCheckFiniteAndUnscaleOp): - def init_test_case(self): x0 = np.random.random((129, 129)).astype(self.dtype) x0[128][128] = np.nan diff --git a/python/paddle/fluid/tests/unittests/mlu/test_arg_max_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_arg_max_op_mlu.py index e8a41018743228c200905d4b5f728b96af3bf20a..81d854b4875e70b55d3db083ac0cb7ea3596249e 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_arg_max_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_arg_max_op_mlu.py @@ -27,7 +27,6 @@ paddle.enable_static() class BaseTestCase(OpTest): - def set_mlu(self): self.__class__.use_mlu = True self.place = paddle.MLUPlace(0) @@ -51,7 +50,6 @@ class BaseTestCase(OpTest): class TestArgMaxSameValue1(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dtype = 'float32' @@ -67,7 +65,6 @@ class TestArgMaxSameValue1(BaseTestCase): class TestArgMaxSameValue2(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dtype = 'float16' @@ -84,7 +81,6 @@ class TestArgMaxSameValue2(BaseTestCase): # test argmax, dtype: float16 class TestArgMaxFloat16Case1(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (3, 4, 5) @@ -93,7 +89,6 @@ class TestArgMaxFloat16Case1(BaseTestCase): class TestArgMaxFloat16Case2(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (3, 4, 5) @@ -102,7 +97,6 @@ class TestArgMaxFloat16Case2(BaseTestCase): class TestArgMaxFloat16Case3(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (3, 4, 5) @@ -111,7 +105,6 @@ class TestArgMaxFloat16Case3(BaseTestCase): class TestArgMaxFloat16Case4(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (3, 4, 5) @@ -120,7 +113,6 @@ class TestArgMaxFloat16Case4(BaseTestCase): class TestArgMaxFloat16Case5(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (3, 4) @@ -129,7 +121,6 @@ class TestArgMaxFloat16Case5(BaseTestCase): class TestArgMaxFloat16Case6(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (3, 4) @@ -138,7 +129,6 @@ class TestArgMaxFloat16Case6(BaseTestCase): class TestArgMaxFloat16Case7(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (3, 4) @@ -147,35 +137,31 @@ class TestArgMaxFloat16Case7(BaseTestCase): class TestArgMaxFloat16Case8(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' - self.dims = (1, ) + self.dims = (1,) self.dtype = 'float16' self.axis = 0 class TestArgMaxFloat16Case9(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' - self.dims = (2, ) + self.dims = (2,) self.dtype = 'float16' self.axis = 0 class TestArgMaxFloat16Case10(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' - self.dims = (3, ) + self.dims = (3,) self.dtype = 'float16' self.axis = 0 # test argmax, dtype: float32 class TestArgMaxFloat32Case1(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (3, 4, 5) @@ -184,7 +170,6 @@ class TestArgMaxFloat32Case1(BaseTestCase): class TestArgMaxFloat32Case2(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (3, 4, 5) @@ -193,7 +178,6 @@ class TestArgMaxFloat32Case2(BaseTestCase): class TestArgMaxFloat32Case3(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (3, 4, 5) @@ -202,7 +186,6 @@ class TestArgMaxFloat32Case3(BaseTestCase): class TestArgMaxFloat32Case4(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (3, 4, 5) @@ -211,7 +194,6 @@ class TestArgMaxFloat32Case4(BaseTestCase): class TestArgMaxFloat32Case5(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (3, 4) @@ -220,7 +202,6 @@ class TestArgMaxFloat32Case5(BaseTestCase): class TestArgMaxFloat32Case6(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (3, 4) @@ -229,7 +210,6 @@ class TestArgMaxFloat32Case6(BaseTestCase): class TestArgMaxFloat32Case7(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (3, 4) @@ -238,34 +218,30 @@ class TestArgMaxFloat32Case7(BaseTestCase): class TestArgMaxFloat32Case8(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' - self.dims = (1, ) + self.dims = (1,) self.dtype = 'float32' self.axis = 0 class TestArgMaxFloat32Case9(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' - self.dims = (2, ) + self.dims = (2,) self.dtype = 'float32' self.axis = 0 class TestArgMaxFloat32Case10(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' - self.dims = (3, ) + self.dims = (3,) self.dtype = 'float32' self.axis = 0 class BaseTestComplex1_1(OpTest): - def set_mlu(self): self.__class__.use_mlu = True self.place = paddle.MLUPlace(0) @@ -283,7 +259,7 @@ class BaseTestComplex1_1(OpTest): self.inputs = {'X': self.x} self.attrs = { 'axis': self.axis, - 'dtype': int(core.VarDesc.VarType.INT32) + 'dtype': int(core.VarDesc.VarType.INT32), } self.outputs = { 'Out': np.argmax(self.x, axis=self.axis).astype("int32") @@ -294,7 +270,6 @@ class BaseTestComplex1_1(OpTest): class BaseTestComplex1_2(OpTest): - def set_mlu(self): self.__class__.use_mlu = True self.place = paddle.MLUPlace(0) @@ -312,7 +287,7 @@ class BaseTestComplex1_2(OpTest): self.inputs = {'X': self.x} self.attrs = { 'axis': self.axis, - 'dtype': int(core.VarDesc.VarType.INT32) + 'dtype': int(core.VarDesc.VarType.INT32), } self.outputs = { 'Out': np.argmax(self.x, axis=self.axis).astype("int32") @@ -323,7 +298,6 @@ class BaseTestComplex1_2(OpTest): class TestArgMaxAPI(unittest.TestCase): - def initTestCase(self): self.dims = (3, 4, 5) self.dtype = 'float32' @@ -335,7 +309,6 @@ class TestArgMaxAPI(unittest.TestCase): self.place = [paddle.MLUPlace(0)] def test_dygraph_api(self): - def run(place): paddle.disable_static(place) np.random.seed(2022) @@ -343,9 +316,9 @@ class TestArgMaxAPI(unittest.TestCase): tensor_input = paddle.to_tensor(numpy_input) numpy_output = np.argmax(numpy_input, axis=self.axis) paddle_output = paddle.argmax(tensor_input, axis=self.axis) - np.testing.assert_allclose(numpy_output, - paddle_output.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + numpy_output, paddle_output.numpy(), rtol=1e-05 + ) paddle.enable_static() for place in self.place: @@ -353,7 +326,6 @@ class TestArgMaxAPI(unittest.TestCase): class TestArgMaxAPI_2(unittest.TestCase): - def initTestCase(self): self.dims = (3, 4, 5) self.dtype = 'float32' @@ -366,20 +338,20 @@ class TestArgMaxAPI_2(unittest.TestCase): self.place = [paddle.MLUPlace(0)] def test_dygraph_api(self): - def run(place): paddle.disable_static(place) np.random.seed(2022) numpy_input = (np.random.random(self.dims)).astype(self.dtype) tensor_input = paddle.to_tensor(numpy_input) - numpy_output = np.argmax(numpy_input, - axis=self.axis).reshape(1, 4, 5) - paddle_output = paddle.argmax(tensor_input, - axis=self.axis, - keepdim=self.keep_dims) - np.testing.assert_allclose(numpy_output, - paddle_output.numpy(), - rtol=1e-05) + numpy_output = np.argmax(numpy_input, axis=self.axis).reshape( + 1, 4, 5 + ) + paddle_output = paddle.argmax( + tensor_input, axis=self.axis, keepdim=self.keep_dims + ) + np.testing.assert_allclose( + numpy_output, paddle_output.numpy(), rtol=1e-05 + ) self.assertEqual(numpy_output.shape, paddle_output.numpy().shape) paddle.enable_static() @@ -388,7 +360,6 @@ class TestArgMaxAPI_2(unittest.TestCase): class TestArgMaxAPI_3(unittest.TestCase): - def initTestCase(self): self.dims = (1, 9) self.dtype = 'float32' @@ -399,7 +370,6 @@ class TestArgMaxAPI_3(unittest.TestCase): self.place = [paddle.MLUPlace(0)] def test_dygraph_api(self): - def run(place): paddle.disable_static(place) np.random.seed(2022) @@ -407,9 +377,9 @@ class TestArgMaxAPI_3(unittest.TestCase): tensor_input = paddle.to_tensor(numpy_input) numpy_output = np.argmax(numpy_input).reshape([1]) paddle_output = paddle.argmax(tensor_input) - np.testing.assert_allclose(numpy_output, - paddle_output.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + numpy_output, paddle_output.numpy(), rtol=1e-05 + ) self.assertEqual(numpy_output.shape, paddle_output.numpy().shape) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/mlu/test_argsort_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_argsort_op_mlu.py index 6e474ca604e8357bb65457afd4324471a006be1a..4f96e3b5ccdd8af5e18d9d4fba0a3838a5321a1c 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_argsort_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_argsort_op_mlu.py @@ -26,9 +26,7 @@ SEED = 2022 def gen_test_class(dtype, axis, descending): - class TestArgsortOp(OpTest): - def setUp(self): np.random.seed(SEED) self.set_mlu() @@ -48,9 +46,11 @@ def gen_test_class(dtype, axis, descending): def get_output(self): if descending: self.indices = np.flip( - np.argsort(self.x, kind='heapsort', axis=axis), axis) + np.argsort(self.x, kind='heapsort', axis=axis), axis + ) self.sorted_x = np.flip( - np.sort(self.x, kind='heapsort', axis=axis), axis) + np.sort(self.x, kind='heapsort', axis=axis), axis + ) else: self.indices = np.argsort(self.x, kind='heapsort', axis=axis) self.sorted_x = np.sort(self.x, kind='heapsort', axis=axis) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_assign_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_assign_op_mlu.py index 91327f748459c7e0f6450869c9d79d0d18ef650c..7ee93fa0097c000e667e08f76ca87af802e0dec5 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_assign_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_assign_op_mlu.py @@ -25,7 +25,6 @@ SEED = 2022 class TestAssign(OpTest): - def setUp(self): self.set_mlu() self.op_type = "assign" diff --git a/python/paddle/fluid/tests/unittests/mlu/test_assign_value_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_assign_value_op_mlu.py index 6f120560f9801f151f19ed1f79150210a34ee2b3..535ae9f8c86b2f41dac181a63c040e4c08d139a2 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_assign_value_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_assign_value_op_mlu.py @@ -29,7 +29,6 @@ numpy.random.seed(2022) class TestAssignValueMLUOp(op_test.OpTest): - def setUp(self): self.set_mlu() self.op_type = "assign_value" @@ -39,7 +38,8 @@ class TestAssignValueMLUOp(op_test.OpTest): self.attrs["shape"] = self.value.shape self.attrs["dtype"] = framework.convert_np_dtype_to_dtype_( - self.value.dtype) + self.value.dtype + ) self.outputs = {"Out": self.value} def set_mlu(self): @@ -55,24 +55,22 @@ class TestAssignValueMLUOp(op_test.OpTest): class TestAssignValueMLUOp2(TestAssignValueMLUOp): - def init_data(self): self.value = numpy.random.random(size=(2, 5)).astype(numpy.int32) self.attrs["int32_values"] = [int(v) for v in self.value.flat] class TestAssignValueMLUOp3(TestAssignValueMLUOp): - def init_data(self): self.value = numpy.random.random(size=(2, 5)).astype(numpy.int64) self.attrs["int64_values"] = [int(v) for v in self.value.flat] class TestAssignValueMLUOp4(TestAssignValueMLUOp): - def init_data(self): - self.value = numpy.random.choice(a=[False, True], - size=(2, 5)).astype(numpy.bool) + self.value = numpy.random.choice(a=[False, True], size=(2, 5)).astype( + numpy.bool + ) self.attrs["bool_values"] = [int(v) for v in self.value.flat] diff --git a/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu.py index d97a1e074d6098594babf54313450853026ef72a..53b78e18f886125f88e227a52637d35d35795847 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu.py @@ -158,12 +158,20 @@ def _reference_grad(x, y_grad, scale, mean, var, epsilon, data_format): x = np.transpose(x, (0, 2, 3, 1)) y_grad = np.transpose(y_grad, (0, 2, 3, 1)) - x_grad = scale * (y_grad - np.mean(y_grad, axis=(0, 1, 2)) - - (x - mean) * np.mean(y_grad * - (x - mean), axis=(0, 1, 2)) / - (var + epsilon)) / np.sqrt(var + epsilon) - grad_scale = np.sum(y_grad * (x - mean) / np.sqrt(var + epsilon), - axis=(0, 1, 2)) + x_grad = ( + scale + * ( + y_grad + - np.mean(y_grad, axis=(0, 1, 2)) + - (x - mean) + * np.mean(y_grad * (x - mean), axis=(0, 1, 2)) + / (var + epsilon) + ) + / np.sqrt(var + epsilon) + ) + grad_scale = np.sum( + y_grad * (x - mean) / np.sqrt(var + epsilon), axis=(0, 1, 2) + ) grad_offset = np.sum(y_grad, axis=(0, 1, 2)) # transfer back to N, C, H, W @@ -188,7 +196,6 @@ def create_or_get_tensor(scope, var_name, var, place): def set_output_grad(scope, outputs, place, feed_dict=None): - def __set_tensor__(name, data=None): out_tensor = scope.find_var(name).get_tensor() grad_tensor = scope.var(grad_var_name(name)).get_tensor() @@ -210,17 +217,15 @@ def set_output_grad(scope, outputs, place, feed_dict=None): class TestBatchNormOpInference(unittest.TestCase): - def setUp(self): self.dtype = np.float32 self.fuse_with_relu = False self.init_kernel_type() def __assert_close(self, tensor, np_array, msg, atol=1e-4): - np.testing.assert_allclose(np.array(tensor), - np_array, - atol=atol, - err_msg=msg) + np.testing.assert_allclose( + np.array(tensor), np_array, atol=atol, err_msg=msg + ) def check_with_place(self, place, data_layout, dtype, shape): epsilon = 0.00001 @@ -246,34 +251,39 @@ class TestBatchNormOpInference(unittest.TestCase): mean = np.zeros(scale_shape).astype(np.float32) variance = np.ones(scale_shape).astype(np.float32) - y_out = _reference_testing(x_val, scale_val, bias_val, mean, variance, - epsilon, data_layout).astype(dtype) + y_out = _reference_testing( + x_val, scale_val, bias_val, mean, variance, epsilon, data_layout + ).astype(dtype) if self.fuse_with_relu: y_out = np.maximum(y_out, 0) scope = core.Scope() # create input - x_tensor = create_or_get_tensor(scope, "x_val", - OpTest.np_dtype_to_fluid_dtype(x_val), - place) + x_tensor = create_or_get_tensor( + scope, "x_val", OpTest.np_dtype_to_fluid_dtype(x_val), place + ) scale_tensor = create_or_get_tensor( - scope, "scale_val", OpTest.np_dtype_to_fluid_dtype(scale_val), - place) + scope, "scale_val", OpTest.np_dtype_to_fluid_dtype(scale_val), place + ) bias_tensor = create_or_get_tensor( - scope, "bias_val", OpTest.np_dtype_to_fluid_dtype(bias_val), place) - mean_tensor = create_or_get_tensor(scope, "mean", - OpTest.np_dtype_to_fluid_dtype(mean), - place) + scope, "bias_val", OpTest.np_dtype_to_fluid_dtype(bias_val), place + ) + mean_tensor = create_or_get_tensor( + scope, "mean", OpTest.np_dtype_to_fluid_dtype(mean), place + ) variance_tensor = create_or_get_tensor( - scope, "variance", OpTest.np_dtype_to_fluid_dtype(variance), place) + scope, "variance", OpTest.np_dtype_to_fluid_dtype(variance), place + ) # create output y_tensor = create_or_get_tensor(scope, "y_out", None, place) - saved_mean_tensor = create_or_get_tensor(scope, "saved_mean", None, - place) - saved_variance_tensor = create_or_get_tensor(scope, "saved_variance", - None, place) + saved_mean_tensor = create_or_get_tensor( + scope, "saved_mean", None, place + ) + saved_variance_tensor = create_or_get_tensor( + scope, "saved_variance", None, place + ) mean_out_tensor = mean_tensor variance_out_tensor = variance_tensor @@ -296,17 +306,25 @@ class TestBatchNormOpInference(unittest.TestCase): data_layout=data_layout, use_mkldnn=False, fuse_with_relu=self.fuse_with_relu, - epsilon=epsilon) + epsilon=epsilon, + ) batch_norm_op.run(scope, place) # check inference result - self.__assert_close(y_tensor, - y_out, - "inference output are different at " + str(place) + - ", " + data_layout + ", " + str(np.dtype(dtype)) + - str(np.array(y_tensor)) + str(y_out), - atol=1e-3) + self.__assert_close( + y_tensor, + y_out, + "inference output are different at " + + str(place) + + ", " + + data_layout + + ", " + + str(np.dtype(dtype)) + + str(np.array(y_tensor)) + + str(y_out), + atol=1e-3, + ) def test_check_output(self): places = [core.CPUPlace()] @@ -315,8 +333,9 @@ class TestBatchNormOpInference(unittest.TestCase): for place in places: for data_format in ["NCHW", "NHWC"]: - self.check_with_place(place, data_format, self.dtype, - [2, 3, 4, 5]) + self.check_with_place( + place, data_format, self.dtype, [2, 3, 4, 5] + ) self.check_with_place(place, data_format, self.dtype, [2, 3]) def init_kernel_type(self): @@ -324,7 +343,6 @@ class TestBatchNormOpInference(unittest.TestCase): class TestFP16BatchNormOpInference(TestBatchNormOpInference): - def setUp(self): self.dtype = np.float16 self.fuse_with_relu = False @@ -337,13 +355,13 @@ class TestFP16BatchNormOpInference(TestBatchNormOpInference): for place in places: for data_format in ["NCHW", "NHWC"]: - self.check_with_place(place, data_format, self.dtype, - [2, 3, 4, 5]) + self.check_with_place( + place, data_format, self.dtype, [2, 3, 4, 5] + ) self.check_with_place(place, data_format, self.dtype, [2, 3]) class TestBatchNormOpTraining(unittest.TestCase): - def setUp(self): self.fuse_with_relu = False self.data_formats = ["NCHW", "NHWC"] @@ -357,27 +375,54 @@ class TestBatchNormOpTraining(unittest.TestCase): self.use_global_stats = False self.no_grad_set = set() self.fetch_list = [ - 'y', 'mean', 'variance', 'saved_mean', 'saved_variance', 'x@GRAD', - 'scale@GRAD', 'bias@GRAD' + 'y', + 'mean', + 'variance', + 'saved_mean', + 'saved_variance', + 'x@GRAD', + 'scale@GRAD', + 'bias@GRAD', ] def __assert_close(self, tensor, np_array, msg, atol=1e-4): np.allclose(np.array(tensor), np_array, atol=atol) - def ref_forward_backward(self, x, y_grad, scale, bias, mean, variance, - epsilon, momentum, shape, data_layout): + def ref_forward_backward( + self, + x, + y_grad, + scale, + bias, + mean, + variance, + epsilon, + momentum, + shape, + data_layout, + ): # run forward - y, saved_mean, var_ref = _reference_training(x, scale, bias, epsilon, - data_layout) - mean_out = saved_mean * (1. - momentum) + momentum * mean - variance_out = var_ref * (1. - momentum) + momentum * variance - saved_variance = 1. / np.sqrt(var_ref + epsilon) + y, saved_mean, var_ref = _reference_training( + x, scale, bias, epsilon, data_layout + ) + mean_out = saved_mean * (1.0 - momentum) + momentum * mean + variance_out = var_ref * (1.0 - momentum) + momentum * variance + saved_variance = 1.0 / np.sqrt(var_ref + epsilon) # run backward - x_grad, scale_grad, bias_grad = _reference_grad(x, y_grad, scale, - saved_mean, var_ref, - epsilon, data_layout) - - return y, mean_out, variance_out, saved_mean, saved_variance, x_grad, scale_grad, bias_grad + x_grad, scale_grad, bias_grad = _reference_grad( + x, y_grad, scale, saved_mean, var_ref, epsilon, data_layout + ) + + return ( + y, + mean_out, + variance_out, + saved_mean, + saved_variance, + x_grad, + scale_grad, + bias_grad, + ) def set_mean_variance(self, scale_shape, x, data_layout): mean, variance = _cal_mean_variance(x, self.epsilon, data_layout) @@ -386,12 +431,11 @@ class TestBatchNormOpTraining(unittest.TestCase): # computing global mean/variance for one step if self.use_global_stats: mom = self.momentum - mean = mean * (1. - mom) + mom * mean_pre - variance = variance * (1. - mom) + mom * variance_pre + mean = mean * (1.0 - mom) + mom * mean_pre + variance = variance * (1.0 - mom) + mom * variance_pre return mean, variance def test_forward_backward(self): - def test_with_place(place, data_layout, shape): # attr epsilon = self.epsilon @@ -410,9 +454,27 @@ class TestBatchNormOpTraining(unittest.TestCase): y_grad = np.random.random_sample(shape).astype(np.float32) momentum_var = np.array([momentum]).astype(np.float32) - y, mean_out, variance_out, saved_mean, saved_variance, x_grad, scale_grad, bias_grad = self.ref_forward_backward( - x, y_grad, scale, bias, mean, variance, epsilon, momentum, - shape, data_layout) + ( + y, + mean_out, + variance_out, + saved_mean, + saved_variance, + x_grad, + scale_grad, + bias_grad, + ) = self.ref_forward_backward( + x, + y_grad, + scale, + bias, + mean, + variance, + epsilon, + momentum, + shape, + data_layout, + ) var_dict = locals() var_dict['y@GRAD'] = y_grad @@ -421,8 +483,15 @@ class TestBatchNormOpTraining(unittest.TestCase): var_dict['bias@GRAD'] = bias_grad var_names = [ - 'x', 'scale', 'bias', 'mean', 'variance', 'y', 'saved_mean', - 'saved_variance', 'momentum_var' + 'x', + 'scale', + 'bias', + 'mean', + 'variance', + 'y', + 'saved_mean', + 'saved_variance', + 'momentum_var', ] ground_truth = {name: var_dict[name] for name in var_names} @@ -430,15 +499,17 @@ class TestBatchNormOpTraining(unittest.TestCase): with fluid.program_guard(program): block = program.global_block() for name in ground_truth: - block.create_var(name=name, - dtype='float32', - shape=ground_truth[name].shape) + block.create_var( + name=name, + dtype='float32', + shape=ground_truth[name].shape, + ) inputs = { "X": block.var('x'), "Scale": block.var('scale'), "Bias": block.var('bias'), "Mean": block.var('mean'), - "Variance": block.var('variance') + "Variance": block.var('variance'), } attrs = { "epsilon": epsilon, @@ -446,7 +517,7 @@ class TestBatchNormOpTraining(unittest.TestCase): "data_layout": data_layout, "use_mkldnn": False, "fuse_with_relu": self.fuse_with_relu, - "use_global_stats": self.use_global_stats + "use_global_stats": self.use_global_stats, } if self.use_momentum_variable: inputs['MomentumTensor'] = block.var('momentum_var') @@ -458,19 +529,22 @@ class TestBatchNormOpTraining(unittest.TestCase): "MeanOut": block.var('mean'), # share memory "VarianceOut": block.var('variance'), # share memory "SavedMean": block.var('saved_mean'), - "SavedVariance": block.var('saved_variance') + "SavedVariance": block.var('saved_variance'), } block.create_var(name="reserve_space", dtype='float32') outputs["ReserveSpace"] = block.var('reserve_space') - bn_op = block.append_op(type="batch_norm", - inputs=inputs, - outputs=outputs, - attrs=attrs) + bn_op = block.append_op( + type="batch_norm", + inputs=inputs, + outputs=outputs, + attrs=attrs, + ) block.create_var(name='y@GRAD', dtype='float32', shape=y.shape) # generate backward op_desc grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc( - bn_op.desc, self.no_grad_set, []) + bn_op.desc, self.no_grad_set, [] + ) grad_op_desc = grad_op_desc_list[0] new_op_desc = block.desc.append_op() new_op_desc.copy_from(grad_op_desc) @@ -485,22 +559,28 @@ class TestBatchNormOpTraining(unittest.TestCase): program._sync_with_cpp() exe = fluid.Executor(place) - out = exe.run(program, - feed={ - name: var_dict[name] - for name in [ - 'x', 'scale', 'bias', 'mean', 'variance', - 'y@GRAD', 'momentum_var' - ] - }, - fetch_list=self.fetch_list) + out = exe.run( + program, + feed={ + name: var_dict[name] + for name in [ + 'x', + 'scale', + 'bias', + 'mean', + 'variance', + 'y@GRAD', + 'momentum_var', + ] + }, + fetch_list=self.fetch_list, + ) for id, name in enumerate(self.fetch_list): if name == 'variance': - self.__assert_close(var_dict[name], - out[id], - name, - atol=1e-3) + self.__assert_close( + var_dict[name], out[id], name, atol=1e-3 + ) continue self.__assert_close(var_dict[name], out[id], name) print("op test forward passed: ", str(place), data_layout) @@ -519,7 +599,6 @@ class TestBatchNormOpTraining(unittest.TestCase): class TestBatchNormOpTrainingCase1(TestBatchNormOpTraining): - def init_test_case(self): self.use_global_stats = False self.no_grad_set = set(['scale@GRAD', 'bias@GRAD']) @@ -527,19 +606,23 @@ class TestBatchNormOpTrainingCase1(TestBatchNormOpTraining): class TestBatchNormOpTrainingCase2(TestBatchNormOpTraining): - def init_test_case(self): self.use_global_stats = False self.no_grad_set = set() self.fetch_list = [ - 'y', 'mean', 'variance', 'saved_mean', 'saved_variance', 'x@GRAD', - 'scale@GRAD', 'bias@GRAD' + 'y', + 'mean', + 'variance', + 'saved_mean', + 'saved_variance', + 'x@GRAD', + 'scale@GRAD', + 'bias@GRAD', ] os.environ['FLAGS_cudnn_batchnorm_spatial_persistent'] = "1" class TestBatchNormOpTrainingCase3(TestBatchNormOpTraining): - def init_test_case(self): self.use_global_stats = False self.no_grad_set = set(['x@GRAD']) @@ -547,24 +630,33 @@ class TestBatchNormOpTrainingCase3(TestBatchNormOpTraining): class TestBatchNormOpTrainingMomentumVariable(TestBatchNormOpTraining): - def init_test_case(self): self.use_momentum_variable = True self.use_global_stats = False self.no_grad_set = set() self.fetch_list = [ - 'y', 'mean', 'variance', 'saved_mean', 'saved_variance', 'x@GRAD', - 'scale@GRAD', 'bias@GRAD' + 'y', + 'mean', + 'variance', + 'saved_mean', + 'saved_variance', + 'x@GRAD', + 'scale@GRAD', + 'bias@GRAD', ] class TestBatchNormOpFreezeStatsTraining(TestBatchNormOpTraining): - def init_test_case(self): self.use_global_stats = True self.no_grad_set = set() self.fetch_list = [ - 'y', 'mean', 'variance', 'x@GRAD', 'scale@GRAD', 'bias@GRAD' + 'y', + 'mean', + 'variance', + 'x@GRAD', + 'scale@GRAD', + 'bias@GRAD', ] def reference_grad(self, x, y_grad, scale, mean, var, epsilon, data_format): @@ -573,8 +665,9 @@ class TestBatchNormOpFreezeStatsTraining(TestBatchNormOpTraining): y_grad = np.transpose(y_grad, (0, 2, 3, 1)) x_grad = scale * y_grad / np.sqrt(var + epsilon) - grad_scale = np.sum(y_grad * (x - mean) / np.sqrt(var + epsilon), - axis=(0, 1, 2)) + grad_scale = np.sum( + y_grad * (x - mean) / np.sqrt(var + epsilon), axis=(0, 1, 2) + ) grad_offset = np.sum(y_grad, axis=(0, 1, 2)) # transfer back to N, C, H, W @@ -585,8 +678,19 @@ class TestBatchNormOpFreezeStatsTraining(TestBatchNormOpTraining): return x_grad, grad_scale, grad_offset - def ref_forward_backward(self, x, y_grad, scale, bias, mean, variance, - epsilon, momentum, shape, data_layout): + def ref_forward_backward( + self, + x, + y_grad, + scale, + bias, + mean, + variance, + epsilon, + momentum, + shape, + data_layout, + ): if data_layout != "NCHW" and data_layout != "NHWC": raise ValueError("Unknown data order.") @@ -604,17 +708,27 @@ class TestBatchNormOpFreezeStatsTraining(TestBatchNormOpTraining): mean_out = mean variance_out = variance - saved_variance = 1. / np.sqrt(variance + epsilon) + saved_variance = 1.0 / np.sqrt(variance + epsilon) # run backward x_grad, scale_grad, bias_grad = self.reference_grad( - x, y_grad, scale, mean, variance, epsilon, data_layout) + x, y_grad, scale, mean, variance, epsilon, data_layout + ) - return y, mean_out, variance_out, mean, saved_variance, x_grad, scale_grad, bias_grad + return ( + y, + mean_out, + variance_out, + mean, + saved_variance, + x_grad, + scale_grad, + bias_grad, + ) class TestBatchNormOpFreezeStatsAndScaleBiasTraining( - TestBatchNormOpFreezeStatsTraining): - + TestBatchNormOpFreezeStatsTraining +): def init_test_case(self): self.use_global_stats = True self.no_grad_set = set(['scale@GRAD', 'bias@GRAD']) @@ -622,12 +736,12 @@ class TestBatchNormOpFreezeStatsAndScaleBiasTraining( class TestBatchNormOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # the input of batch_norm must be Variable. - x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + ) self.assertRaises(TypeError, fluid.layers.batch_norm, x1) # the input dtype of batch_norm must be float16 or float32 or float64 @@ -637,13 +751,13 @@ class TestBatchNormOpError(unittest.TestCase): class TestDygraphBatchNormAPIError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): batch_norm = fluid.dygraph.BatchNorm(10) # the input of BatchNorm must be Variable. - x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + ) self.assertRaises(TypeError, batch_norm, x1) # the input dtype of BatchNorm must be float16 or float32 or float64 @@ -653,7 +767,6 @@ class TestDygraphBatchNormAPIError(unittest.TestCase): class TestDygraphBatchNormTrainableStats(unittest.TestCase): - def test_dygraph(self): places = [fluid.CPUPlace()] if core.is_compiled_with_mlu(): @@ -666,7 +779,8 @@ class TestDygraphBatchNormTrainableStats(unittest.TestCase): bn = fluid.dygraph.BatchNorm( shape[1], is_test=is_test, - trainable_statistics=trainable_statistics) + trainable_statistics=trainable_statistics, + ) y = bn(fluid.dygraph.to_variable(x)) return y.numpy() @@ -688,7 +802,8 @@ class TestDygraphBatchNormTrainableStats(unittest.TestCase): bn = fluid.dygraph.BatchNorm( shape[1], is_test=is_test, - trainable_statistics=trainable_statistics) + trainable_statistics=trainable_statistics, + ) x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) y = bn(x) exe.run(fluid.default_startup_program()) @@ -702,7 +817,6 @@ class TestDygraphBatchNormTrainableStats(unittest.TestCase): class TestDygraphBatchNormOpenReserveSpace(unittest.TestCase): - def test_reservespace(self): with program_guard(Program(), Program()): paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu_v2.py b/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu_v2.py index 27d25c6146e0ff3645a6762945385597d968b8a4..72e7ac89caf3664f8960762c9d3ea4ffe335683f 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu_v2.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu_v2.py @@ -31,7 +31,6 @@ paddle.enable_static() class TestBatchNorm(unittest.TestCase): - def test_name(self): places = [fluid.CPUPlace()] if core.is_compiled_with_mlu(): @@ -45,7 +44,7 @@ class TestBatchNorm(unittest.TestCase): if core.is_compiled_with_mlu(): places.append(fluid.MLUPlace(0)) for p in places: - #paddle.disable_static() + # paddle.disable_static() x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32') x_data_3 = np.random.random(size=(2, 1, 3)).astype('float32') @@ -99,7 +98,8 @@ class TestBatchNorm(unittest.TestCase): bn = fluid.dygraph.BatchNorm( shape[1], is_test=is_test, - trainable_statistics=trainable_statistics) + trainable_statistics=trainable_statistics, + ) y = bn(fluid.dygraph.to_variable(x)) return y.numpy() @@ -116,19 +116,22 @@ class TestBatchNorm(unittest.TestCase): is_test=is_test, param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(1.0), - trainable=False), + trainable=False, + ), bias_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(0.0), - trainable=False), - trainable_statistics=trainable_statistics) + trainable=False, + ), + trainable_statistics=trainable_statistics, + ) y = bn(fluid.dygraph.to_variable(x)) return y.numpy() def compute_v4(x): with fluid.dygraph.guard(p): - bn = paddle.nn.BatchNorm2D(shape[1], - weight_attr=False, - bias_attr=False) + bn = paddle.nn.BatchNorm2D( + shape[1], weight_attr=False, bias_attr=False + ) y = bn(fluid.dygraph.to_variable(x)) return y.numpy() @@ -153,7 +156,8 @@ class TestBatchNorm(unittest.TestCase): bn = fluid.dygraph.BatchNorm( shape[1], is_test=is_test, - trainable_statistics=trainable_statistics) + trainable_statistics=trainable_statistics, + ) x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) y = bn(x) exe.run(fluid.default_startup_program()) @@ -176,7 +180,6 @@ class TestBatchNorm(unittest.TestCase): class TestBatchNormChannelLast(unittest.TestCase): - def setUp(self): self.original_dtyep = paddle.get_default_dtype() paddle.set_default_dtype("float32") @@ -199,10 +202,9 @@ class TestBatchNormChannelLast(unittest.TestCase): channel_first_x = paddle.transpose(x, [0, 2, 1]) y2 = net2(channel_first_x) y2 = paddle.transpose(y2, [0, 2, 1]) - np.testing.assert_allclose(y1.numpy(), - y2.numpy(), - rtol=1e-05, - atol=1e-07) + np.testing.assert_allclose( + y1.numpy(), y2.numpy(), rtol=1e-05, atol=1e-07 + ) def test_2d(self): for p in self.places: @@ -216,10 +218,9 @@ class TestBatchNormChannelLast(unittest.TestCase): channel_first_x = paddle.transpose(x, [0, 3, 1, 2]) y2 = net2(channel_first_x) y2 = paddle.transpose(y2, [0, 2, 3, 1]) - np.testing.assert_allclose(y1.numpy(), - y2.numpy(), - rtol=1e-05, - atol=1e-07) + np.testing.assert_allclose( + y1.numpy(), y2.numpy(), rtol=1e-05, atol=1e-07 + ) def test_3d(self): for p in self.places: @@ -233,10 +234,9 @@ class TestBatchNormChannelLast(unittest.TestCase): channel_first_x = paddle.transpose(x, [0, 4, 1, 2, 3]) y2 = net2(channel_first_x) y2 = paddle.transpose(y2, [0, 2, 3, 4, 1]) - np.testing.assert_allclose(y1.numpy(), - y2.numpy(), - rtol=1e-05, - atol=1e-07) + np.testing.assert_allclose( + y1.numpy(), y2.numpy(), rtol=1e-05, atol=1e-07 + ) # res = np.allclose(y1.numpy(), y2.numpy()) # if res == False: # np.savetxt("./y1.txt", y1.numpy().flatten(), fmt='%.10f', delimiter='\n') @@ -245,7 +245,6 @@ class TestBatchNormChannelLast(unittest.TestCase): class TestBatchNormUseGlobalStats(unittest.TestCase): - def setUp(self): self.places = [fluid.CPUPlace()] if core.is_compiled_with_mlu(): @@ -264,11 +263,14 @@ class TestBatchNormUseGlobalStats(unittest.TestCase): net1 = paddle.fluid.dygraph.BatchNorm( 6, param_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(1.0)), + initializer=fluid.initializer.Constant(1.0) + ), use_global_stats=self.use_global_stats, - trainable_statistics=self.trainable_statistics) + trainable_statistics=self.trainable_statistics, + ) net2 = paddle.nn.BatchNorm2D( - 6, use_global_stats=self.use_global_stats) + 6, use_global_stats=self.use_global_stats + ) net2.weight = net1.weight net2.bias = net1.bias if self.trainable_statistics == True: diff --git a/python/paddle/fluid/tests/unittests/mlu/test_bce_loss_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_bce_loss_mlu.py index 3805d27a14f049b3ff899b971b8cd1a456271efa..58ccf798136f478784c40a5f3a3b2b14075486f7 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_bce_loss_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_bce_loss_mlu.py @@ -24,88 +24,76 @@ from op_test import OpTest paddle.enable_static() -def test_static_layer(place, - input_np, - label_np, - reduction='mean', - weight_np=None): +def test_static_layer( + place, input_np, label_np, reduction='mean', weight_np=None +): prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - input = paddle.fluid.data(name='input', - shape=input_np.shape, - dtype='float32') - label = paddle.fluid.data(name='label', - shape=label_np.shape, - dtype='float32') + input = paddle.fluid.data( + name='input', shape=input_np.shape, dtype='float32' + ) + label = paddle.fluid.data( + name='label', shape=label_np.shape, dtype='float32' + ) if weight_np is not None: - weight = paddle.fluid.data(name='weight', - shape=weight_np.shape, - dtype='float32') - bce_loss = paddle.nn.loss.BCELoss(weight=weight, - reduction=reduction) + weight = paddle.fluid.data( + name='weight', shape=weight_np.shape, dtype='float32' + ) + bce_loss = paddle.nn.loss.BCELoss( + weight=weight, reduction=reduction + ) else: bce_loss = paddle.nn.loss.BCELoss(reduction=reduction) res = bce_loss(input, label) exe = paddle.static.Executor(place) - static_result = exe.run(prog, - feed={ - "input": input_np, - "label": label_np - } if weight_np is None else { - "input": input_np, - "label": label_np, - "weight": weight_np - }, - fetch_list=[res]) + static_result = exe.run( + prog, + feed={"input": input_np, "label": label_np} + if weight_np is None + else {"input": input_np, "label": label_np, "weight": weight_np}, + fetch_list=[res], + ) return static_result[0] -def test_static_functional(place, - input_np, - label_np, - reduction='mean', - weight_np=None): +def test_static_functional( + place, input_np, label_np, reduction='mean', weight_np=None +): prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - input = paddle.fluid.data(name='input', - shape=input_np.shape, - dtype='float32') - label = paddle.fluid.data(name='label', - shape=label_np.shape, - dtype='float32') + input = paddle.fluid.data( + name='input', shape=input_np.shape, dtype='float32' + ) + label = paddle.fluid.data( + name='label', shape=label_np.shape, dtype='float32' + ) if weight_np is not None: - weight = paddle.fluid.data(name='weight', - shape=weight_np.shape, - dtype='float32') - res = paddle.nn.functional.binary_cross_entropy(input, - label, - weight=weight, - reduction=reduction) + weight = paddle.fluid.data( + name='weight', shape=weight_np.shape, dtype='float32' + ) + res = paddle.nn.functional.binary_cross_entropy( + input, label, weight=weight, reduction=reduction + ) else: - res = paddle.nn.functional.binary_cross_entropy(input, - label, - reduction=reduction) + res = paddle.nn.functional.binary_cross_entropy( + input, label, reduction=reduction + ) exe = paddle.static.Executor(place) - static_result = exe.run(prog, - feed={ - "input": input_np, - "label": label_np - } if weight_np is None else { - "input": input_np, - "label": label_np, - "weight": weight_np - }, - fetch_list=[res]) + static_result = exe.run( + prog, + feed={"input": input_np, "label": label_np} + if weight_np is None + else {"input": input_np, "label": label_np, "weight": weight_np}, + fetch_list=[res], + ) return static_result[0] -def test_dygraph_layer(place, - input_np, - label_np, - reduction='mean', - weight_np=None): +def test_dygraph_layer( + place, input_np, label_np, reduction='mean', weight_np=None +): paddle.disable_static() if weight_np is not None: weight = paddle.to_tensor(weight_np) @@ -118,25 +106,22 @@ def test_dygraph_layer(place, return dy_result -def test_dygraph_functional(place, - input_np, - label_np, - reduction='mean', - weight_np=None): +def test_dygraph_functional( + place, input_np, label_np, reduction='mean', weight_np=None +): paddle.disable_static() input = paddle.to_tensor(input_np) label = paddle.to_tensor(label_np) if weight_np is not None: weight = paddle.to_tensor(weight_np) - dy_res = paddle.nn.functional.binary_cross_entropy(input, - label, - weight=weight, - reduction=reduction) + dy_res = paddle.nn.functional.binary_cross_entropy( + input, label, weight=weight, reduction=reduction + ) else: - dy_res = paddle.nn.functional.binary_cross_entropy(input, - label, - reduction=reduction) + dy_res = paddle.nn.functional.binary_cross_entropy( + input, label, reduction=reduction + ) dy_result = dy_res.numpy() paddle.enable_static() return dy_result @@ -144,11 +129,19 @@ def test_dygraph_functional(place, def calc_bceloss(input_np, label_np, reduction='mean', weight_np=None): if weight_np is None: - expected = -1 * (label_np * np.log(input_np) + - (1. - label_np) * np.log(1. - input_np)) + expected = -1 * ( + label_np * np.log(input_np) + + (1.0 - label_np) * np.log(1.0 - input_np) + ) else: - expected = -1 * weight_np * (label_np * np.log(input_np) + - (1. - label_np) * np.log(1. - input_np)) + expected = ( + -1 + * weight_np + * ( + label_np * np.log(input_np) + + (1.0 - label_np) * np.log(1.0 - input_np) + ) + ) if reduction == 'mean': expected = np.mean(expected) @@ -161,7 +154,6 @@ def calc_bceloss(input_np, label_np, reduction='mean', weight_np=None): class TestBCELoss(unittest.TestCase): - def test_BCELoss(self): input_np = np.random.uniform(0.1, 0.8, size=(20, 30)).astype(np.float32) label_np = np.random.randint(0, 2, size=(20, 30)).astype(np.float32) @@ -169,84 +161,82 @@ class TestBCELoss(unittest.TestCase): reductions = ['sum', 'mean', 'none'] for place in places: for reduction in reductions: - static_result = test_static_layer(place, input_np, label_np, - reduction) - dy_result = test_dygraph_layer(place, input_np, label_np, - reduction) + static_result = test_static_layer( + place, input_np, label_np, reduction + ) + dy_result = test_dygraph_layer( + place, input_np, label_np, reduction + ) expected = calc_bceloss(input_np, label_np, reduction) np.testing.assert_allclose(static_result, expected, rtol=1e-6) np.testing.assert_allclose(static_result, dy_result) np.testing.assert_allclose(dy_result, expected, rtol=1e-6) static_functional = test_static_functional( - place, input_np, label_np, reduction) - dy_functional = test_dygraph_functional(place, input_np, - label_np, reduction) - np.testing.assert_allclose(static_functional, - expected, - rtol=1e-6) + place, input_np, label_np, reduction + ) + dy_functional = test_dygraph_functional( + place, input_np, label_np, reduction + ) + np.testing.assert_allclose( + static_functional, expected, rtol=1e-6 + ) np.testing.assert_allclose(static_functional, dy_functional) np.testing.assert_allclose(dy_functional, expected, rtol=1e-6) def test_BCELoss_weight(self): - input_np = np.random.uniform(0.1, 0.8, - size=(2, 3, 4, 10)).astype(np.float32) - label_np = np.random.randint(0, 2, - size=(2, 3, 4, 10)).astype(np.float32) + input_np = np.random.uniform(0.1, 0.8, size=(2, 3, 4, 10)).astype( + np.float32 + ) + label_np = np.random.randint(0, 2, size=(2, 3, 4, 10)).astype( + np.float32 + ) weight_np = np.random.random(size=(3, 4, 10)).astype(np.float32) place = fluid.MLUPlace(0) for reduction in ['sum', 'mean', 'none']: - static_result = test_static_layer(place, - input_np, - label_np, - reduction, - weight_np=weight_np) - dy_result = test_dygraph_layer(place, - input_np, - label_np, - reduction, - weight_np=weight_np) - expected = calc_bceloss(input_np, - label_np, - reduction, - weight_np=weight_np) + static_result = test_static_layer( + place, input_np, label_np, reduction, weight_np=weight_np + ) + dy_result = test_dygraph_layer( + place, input_np, label_np, reduction, weight_np=weight_np + ) + expected = calc_bceloss( + input_np, label_np, reduction, weight_np=weight_np + ) np.testing.assert_allclose(static_result, expected, rtol=1e-6) np.testing.assert_allclose(static_result, dy_result) np.testing.assert_allclose(dy_result, expected, rtol=1e-6) - static_functional = test_static_functional(place, - input_np, - label_np, - reduction, - weight_np=weight_np) - dy_functional = test_dygraph_functional(place, - input_np, - label_np, - reduction, - weight_np=weight_np) + static_functional = test_static_functional( + place, input_np, label_np, reduction, weight_np=weight_np + ) + dy_functional = test_dygraph_functional( + place, input_np, label_np, reduction, weight_np=weight_np + ) np.testing.assert_allclose(static_functional, expected, rtol=1e-6) np.testing.assert_allclose(static_functional, dy_functional) np.testing.assert_allclose(dy_functional, expected, rtol=1e-6) def test_BCELoss_error(self): paddle.disable_static() - self.assertRaises(ValueError, - paddle.nn.loss.BCELoss, - reduction="unsupport reduction") + self.assertRaises( + ValueError, paddle.nn.loss.BCELoss, reduction="unsupport reduction" + ) input = paddle.to_tensor([[0.1, 0.3]], dtype='float32') label = paddle.to_tensor([[0.0, 1.0]], dtype='float32') - self.assertRaises(ValueError, - paddle.nn.functional.binary_cross_entropy, - input=input, - label=label, - reduction="unsupport reduction") + self.assertRaises( + ValueError, + paddle.nn.functional.binary_cross_entropy, + input=input, + label=label, + reduction="unsupport reduction", + ) paddle.enable_static() def bce_loss(input, label): - return -1 * (label * np.log(input) + (1. - label) * np.log(1. - input)) + return -1 * (label * np.log(input) + (1.0 - label) * np.log(1.0 - input)) class TestBceLossOp(OpTest): - def setUp(self): self.init_test_case() self.op_type = "bce_loss" @@ -270,13 +260,11 @@ class TestBceLossOp(OpTest): class TestBceLossOpCase1(TestBceLossOp): - def init_test_case(self): self.shape = [2, 3, 4, 5] class TestBceLossOpCase2(TestBceLossOp): - def init_test_case(self): self.shape = [2, 3, 20] diff --git a/python/paddle/fluid/tests/unittests/mlu/test_bce_with_logits_loss_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_bce_with_logits_loss_mlu.py index 6b0b91cce9383bac185c01f65fd3a170d49ac2ed..0aafe99276fb57d5bdf140677c59f1894f825da5 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_bce_with_logits_loss_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_bce_with_logits_loss_mlu.py @@ -20,43 +20,51 @@ import sys sys.path.append('..') from op_test import OpTest -from test_bce_with_logits_loss import call_bce_layer, call_bce_functional, test_dygraph, calc_bce_with_logits_loss - - -def test_static(place, - logit_np, - label_np, - weight_np=None, - reduction='mean', - pos_weight_np=None, - functional=False): +from test_bce_with_logits_loss import ( + call_bce_layer, + call_bce_functional, + test_dygraph, + calc_bce_with_logits_loss, +) + + +def test_static( + place, + logit_np, + label_np, + weight_np=None, + reduction='mean', + pos_weight_np=None, + functional=False, +): paddle.enable_static() prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - logit = paddle.fluid.data(name='logit', - shape=logit_np.shape, - dtype='float32') - label = paddle.fluid.data(name='label', - shape=label_np.shape, - dtype='float32') + logit = paddle.fluid.data( + name='logit', shape=logit_np.shape, dtype='float32' + ) + label = paddle.fluid.data( + name='label', shape=label_np.shape, dtype='float32' + ) feed_dict = {"logit": logit_np, "label": label_np} pos_weight = None weight = None if pos_weight_np is not None: - pos_weight = paddle.fluid.data(name='pos_weight', - shape=pos_weight_np.shape, - dtype='float32') + pos_weight = paddle.fluid.data( + name='pos_weight', shape=pos_weight_np.shape, dtype='float32' + ) feed_dict["pos_weight"] = pos_weight_np if weight_np is not None: - weight = paddle.fluid.data(name='weight', - shape=weight_np.shape, - dtype='float32') + weight = paddle.fluid.data( + name='weight', shape=weight_np.shape, dtype='float32' + ) feed_dict["weight"] = weight_np if functional: - res = call_bce_functional(logit, label, weight, reduction, - pos_weight) + res = call_bce_functional( + logit, label, weight, reduction, pos_weight + ) else: res = call_bce_layer(logit, label, weight, reduction, pos_weight) exe = paddle.static.Executor(place) @@ -68,7 +76,6 @@ paddle.enable_static() class TestBCEWithLogitsLoss(unittest.TestCase): - def test_BCEWithLogitsLoss(self): logit_np = np.random.uniform(0.1, 0.8, size=(20, 30)).astype(np.float32) label_np = np.random.randint(0, 2, size=(20, 30)).astype(np.float32) @@ -76,125 +83,150 @@ class TestBCEWithLogitsLoss(unittest.TestCase): reductions = ['sum', 'mean', 'none'] for place in places: for reduction in reductions: - static_result = test_static(place, - logit_np, - label_np, - reduction=reduction) - dy_result = test_dygraph(place, - logit_np, - label_np, - reduction=reduction) - expected = calc_bce_with_logits_loss(logit_np, label_np, - reduction) + static_result = test_static( + place, logit_np, label_np, reduction=reduction + ) + dy_result = test_dygraph( + place, logit_np, label_np, reduction=reduction + ) + expected = calc_bce_with_logits_loss( + logit_np, label_np, reduction + ) np.testing.assert_allclose(static_result, expected, rtol=1e-6) np.testing.assert_allclose(static_result, dy_result) np.testing.assert_allclose(dy_result, expected, rtol=1e-6) - static_functional = test_static(place, - logit_np, - label_np, - reduction=reduction, - functional=True) - dy_functional = test_dygraph(place, - logit_np, - label_np, - reduction=reduction, - functional=True) - - np.testing.assert_allclose(static_functional, - expected, - rtol=1e-6) + static_functional = test_static( + place, + logit_np, + label_np, + reduction=reduction, + functional=True, + ) + dy_functional = test_dygraph( + place, + logit_np, + label_np, + reduction=reduction, + functional=True, + ) + + np.testing.assert_allclose( + static_functional, expected, rtol=1e-6 + ) np.testing.assert_allclose(static_functional, dy_functional) np.testing.assert_allclose(dy_functional, expected, rtol=1e-6) def test_BCEWithLogitsLoss_weight(self): - logit_np = np.random.uniform(0.1, 0.8, - size=(2, 3, 4, 10)).astype(np.float32) - label_np = np.random.randint(0, 2, - size=(2, 3, 4, 10)).astype(np.float32) + logit_np = np.random.uniform(0.1, 0.8, size=(2, 3, 4, 10)).astype( + np.float32 + ) + label_np = np.random.randint(0, 2, size=(2, 3, 4, 10)).astype( + np.float32 + ) weight_np = np.random.random(size=(2, 3, 4, 10)).astype(np.float32) place = fluid.MLUPlace(0) for reduction in ['sum', 'mean', 'none']: - static_result = test_static(place, - logit_np, - label_np, - weight_np=weight_np, - reduction=reduction) - dy_result = test_dygraph(place, - logit_np, - label_np, - weight_np=weight_np, - reduction=reduction) - expected = calc_bce_with_logits_loss(logit_np, - label_np, - reduction, - weight_np=weight_np) + static_result = test_static( + place, + logit_np, + label_np, + weight_np=weight_np, + reduction=reduction, + ) + dy_result = test_dygraph( + place, + logit_np, + label_np, + weight_np=weight_np, + reduction=reduction, + ) + expected = calc_bce_with_logits_loss( + logit_np, label_np, reduction, weight_np=weight_np + ) np.testing.assert_allclose(static_result, expected, rtol=1e-6) np.testing.assert_allclose(static_result, dy_result) np.testing.assert_allclose(dy_result, expected, rtol=1e-6) - static_functional = test_static(place, - logit_np, - label_np, - weight_np=weight_np, - reduction=reduction, - functional=True) - dy_functional = test_dygraph(place, - logit_np, - label_np, - weight_np=weight_np, - reduction=reduction, - functional=True) + static_functional = test_static( + place, + logit_np, + label_np, + weight_np=weight_np, + reduction=reduction, + functional=True, + ) + dy_functional = test_dygraph( + place, + logit_np, + label_np, + weight_np=weight_np, + reduction=reduction, + functional=True, + ) np.testing.assert_allclose(static_functional, expected, rtol=1e-6) np.testing.assert_allclose(static_functional, dy_functional) np.testing.assert_allclose(dy_functional, expected, rtol=1e-6) def test_BCEWithLogitsLoss_pos_weight(self): - logit_np = np.random.uniform(0.1, 0.8, - size=(2, 3, 4, 10)).astype(np.float32) - label_np = np.random.randint(0, 2, - size=(2, 3, 4, 10)).astype(np.float32) + logit_np = np.random.uniform(0.1, 0.8, size=(2, 3, 4, 10)).astype( + np.float32 + ) + label_np = np.random.randint(0, 2, size=(2, 3, 4, 10)).astype( + np.float32 + ) pos_weight_np = np.random.random(size=(3, 4, 10)).astype(np.float32) weight_np = np.random.random(size=(2, 3, 4, 10)).astype(np.float32) place = fluid.MLUPlace(0) reduction = "mean" - static_result = test_static(place, logit_np, label_np, weight_np, - reduction, pos_weight_np) - dy_result = test_dygraph(place, logit_np, label_np, weight_np, - reduction, pos_weight_np) - expected = calc_bce_with_logits_loss(logit_np, label_np, reduction, - weight_np, pos_weight_np) + static_result = test_static( + place, logit_np, label_np, weight_np, reduction, pos_weight_np + ) + dy_result = test_dygraph( + place, logit_np, label_np, weight_np, reduction, pos_weight_np + ) + expected = calc_bce_with_logits_loss( + logit_np, label_np, reduction, weight_np, pos_weight_np + ) np.testing.assert_allclose(static_result, expected) np.testing.assert_allclose(static_result, dy_result) np.testing.assert_allclose(dy_result, expected) - static_functional = test_static(place, - logit_np, - label_np, - weight_np, - reduction, - pos_weight_np, - functional=True) - dy_functional = test_dygraph(place, - logit_np, - label_np, - weight_np, - reduction, - pos_weight_np, - functional=True) + static_functional = test_static( + place, + logit_np, + label_np, + weight_np, + reduction, + pos_weight_np, + functional=True, + ) + dy_functional = test_dygraph( + place, + logit_np, + label_np, + weight_np, + reduction, + pos_weight_np, + functional=True, + ) np.testing.assert_allclose(static_functional, expected) np.testing.assert_allclose(static_functional, dy_functional) np.testing.assert_allclose(dy_functional, expected) def test_BCEWithLogitsLoss_error(self): paddle.disable_static() - self.assertRaises(ValueError, - paddle.nn.BCEWithLogitsLoss, - reduction="unsupport reduction") + self.assertRaises( + ValueError, + paddle.nn.BCEWithLogitsLoss, + reduction="unsupport reduction", + ) logit = paddle.to_tensor([[0.1, 0.3]], dtype='float32') label = paddle.to_tensor([[0.0, 1.0]], dtype='float32') - self.assertRaises(ValueError, - paddle.nn.functional.binary_cross_entropy_with_logits, - logit=logit, - label=label, - reduction="unsupport reduction") + self.assertRaises( + ValueError, + paddle.nn.functional.binary_cross_entropy_with_logits, + logit=logit, + label=label, + reduction="unsupport reduction", + ) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/mlu/test_bilinear_interp_v2_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_bilinear_interp_v2_op_mlu.py index b4ae7cf6da0fafb3ed100e080fac25d030dc4d6b..912578cda8c5c373f95debe16170a116b0674c5f 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_bilinear_interp_v2_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_bilinear_interp_v2_op_mlu.py @@ -26,16 +26,18 @@ import paddle paddle.enable_static() -def bilinear_interp_np(input, - out_h, - out_w, - scale_w=0, - scale_h=0, - out_size=None, - actual_shape=None, - align_corners=True, - align_mode=0, - data_layout='NCHW'): +def bilinear_interp_np( + input, + out_h, + out_w, + scale_w=0, + scale_h=0, + out_size=None, + actual_shape=None, + align_corners=True, + align_mode=0, + data_layout='NCHW', +): """bilinear interpolation implement in shape [N, C, H, W]""" if data_layout == "NHWC": input = np.transpose(input, (0, 3, 1, 2)) # NHWC => NCHW @@ -49,7 +51,7 @@ def bilinear_interp_np(input, ratio_h = ratio_w = 0.0 if out_h > 1: - if (align_corners): + if align_corners: ratio_h = (in_h - 1.0) / (out_h - 1.0) else: if scale_h > 0: @@ -57,7 +59,7 @@ def bilinear_interp_np(input, else: ratio_h = 1.0 * in_h / out_h if out_w > 1: - if (align_corners): + if align_corners: ratio_w = (in_w - 1.0) / (out_w - 1.0) else: if scale_w > 0: @@ -68,37 +70,40 @@ def bilinear_interp_np(input, out = np.zeros((batch_size, channel, out_h, out_w)) for i in range(out_h): - if (align_mode == 0 and not align_corners): + if align_mode == 0 and not align_corners: h = int(ratio_h * (i + 0.5) - 0.5) else: h = int(ratio_h * i) h = max(0, h) hid = 1 if h < in_h - 1 else 0 - if (align_mode == 0 and not align_corners): + if align_mode == 0 and not align_corners: idx_src_h = max(ratio_h * (i + 0.5) - 0.5, 0) h1lambda = idx_src_h - h else: h1lambda = ratio_h * i - h h2lambda = 1.0 - h1lambda for j in range(out_w): - if (align_mode == 0 and not align_corners): + if align_mode == 0 and not align_corners: w = int(ratio_w * (j + 0.5) - 0.5) else: w = int(ratio_w * j) w = max(0, w) wid = 1 if w < in_w - 1 else 0 - if (align_mode == 0 and not align_corners): + if align_mode == 0 and not align_corners: idx_src_w = max(ratio_w * (j + 0.5) - 0.5, 0) w1lambda = idx_src_w - w else: w1lambda = ratio_w * j - w w2lambda = 1.0 - w1lambda - out[:, :, i, j] = h2lambda*(w2lambda*input[:, :, h, w] + - w1lambda*input[:, :, h, w+wid]) + \ - h1lambda*(w2lambda*input[:, :, h+hid, w] + - w1lambda*input[:, :, h+hid, w+wid]) + out[:, :, i, j] = h2lambda * ( + w2lambda * input[:, :, h, w] + + w1lambda * input[:, :, h, w + wid] + ) + h1lambda * ( + w2lambda * input[:, :, h + hid, w] + + w1lambda * input[:, :, h + hid, w + wid] + ) if data_layout == "NHWC": out = np.transpose(out, (0, 2, 3, 1)) # NCHW => NHWC @@ -107,7 +112,6 @@ def bilinear_interp_np(input, class TestBilinearInterpOp(OpTest): - def setUp(self): self.place = paddle.device.MLUPlace(0) self.__class__.use_mlu = True @@ -129,7 +133,7 @@ class TestBilinearInterpOp(OpTest): scale_w = 0 if self.scale: if isinstance(self.scale, float) or isinstance(self.scale, int): - if self.scale > 0.: + if self.scale > 0.0: scale_h = scale_w = float(self.scale) if isinstance(self.scale, list) and len(self.scale) == 1: scale_w = scale_h = self.scale[0] @@ -142,10 +146,18 @@ class TestBilinearInterpOp(OpTest): out_h = self.out_h out_w = self.out_w - output_np = bilinear_interp_np(input_np, out_h, out_w, 0, 0, - self.out_size, self.actual_shape, - self.align_corners, self.align_mode, - self.data_layout) + output_np = bilinear_interp_np( + input_np, + out_h, + out_w, + 0, + 0, + self.out_size, + self.actual_shape, + self.align_corners, + self.align_mode, + self.data_layout, + ) self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size @@ -158,12 +170,12 @@ class TestBilinearInterpOp(OpTest): 'interp_method': self.interp_method, 'align_corners': self.align_corners, 'align_mode': self.align_mode, - 'data_layout': self.data_layout + 'data_layout': self.data_layout, } if self.scale: if isinstance(self.scale, float) or isinstance(self.scale, int): - if self.scale > 0.: + if self.scale > 0.0: self.scale = [self.scale] if isinstance(self.scale, list) and len(self.scale) == 1: self.scale = [self.scale[0], self.scale[0]] @@ -181,44 +193,41 @@ class TestBilinearInterpOp(OpTest): self.input_shape = [2, 3, 5, 5] self.out_h = 2 self.out_w = 2 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([3, 3]).astype("int32") self.align_corners = True self.align_mode = 1 class TestBilinearInterpCase1(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [4, 1, 7, 8] self.out_h = 1 self.out_w = 1 - self.scale = 0. + self.scale = 0.0 self.align_corners = True self.align_mode = 1 class TestBilinearInterpCase2(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [3, 3, 9, 6] self.out_h = 12 self.out_w = 12 - self.scale = 0. + self.scale = 0.0 self.align_corners = True self.align_mode = 1 class TestBilinearInterpCase3(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [1, 1, 32, 64] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.align_corners = True self.align_mode = 1 @@ -227,46 +236,42 @@ class TestBilinearInterpCase3(TestBilinearInterpOp): class TestBilinearInterpCase4(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [4, 1, 7, 8] self.out_h = 1 self.out_w = 1 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([2, 2]).astype("int32") self.align_corners = True self.align_mode = 1 class TestBilinearInterpCase5(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [3, 3, 9, 6] self.out_h = 12 self.out_w = 12 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([11, 11]).astype("int32") self.align_corners = True self.align_mode = 1 class TestBilinearInterpCase6(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [1, 1, 32, 64] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([65, 33]).astype("int32") self.align_corners = True self.align_mode = 1 class TestBilinearInterpCase7(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [1, 1, 32, 64] @@ -278,38 +283,35 @@ class TestBilinearInterpCase7(TestBilinearInterpOp): class TestBilinearInterpSame(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [2, 3, 32, 64] self.out_h = 32 self.out_w = 64 - self.scale = 0. + self.scale = 0.0 self.align_corners = True self.align_mode = 1 class TestBilinearInterpActualShape(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [3, 2, 32, 16] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([66, 40]).astype("int32") self.align_corners = True self.align_mode = 1 class TestBilinearInterpDataLayout(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [2, 5, 5, 3] self.out_h = 2 self.out_w = 2 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([3, 3]).astype("int32") self.align_corners = True self.align_mode = 1 @@ -317,52 +319,46 @@ class TestBilinearInterpDataLayout(TestBilinearInterpOp): class TestBilinearInterpOtherMethod1(TestBilinearInterpOp): - def set_align_mode(self): self.align_corners = False self.align_mode = 1 class TestBilinearInterpWithMethod2(TestBilinearInterpOp): - def set_align_mode(self): self.align_corners = False self.align_mode = 0 class TestBilinearInterpWithMethod3(TestBilinearInterpOp): - def set_align_mode(self): self.align_corners = True self.align_mode = 0 class TestBilinearInterpScale1(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [2, 3, 5, 7] self.out_h = 60 self.out_w = 25 - self.scale = 2. + self.scale = 2.0 self.align_corners = True self.align_mode = 1 class TestBilinearInterpScale2(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [2, 3, 5, 7] self.out_h = 60 self.out_w = 25 - self.scale = 1. + self.scale = 1.0 self.align_corners = True self.align_mode = 1 class TestBilinearInterpScale3(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [2, 3, 5, 7] @@ -374,7 +370,6 @@ class TestBilinearInterpScale3(TestBilinearInterpOp): class TestBilinearInterpScale4(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [2, 3, 5, 7] @@ -386,7 +381,6 @@ class TestBilinearInterpScale4(TestBilinearInterpOp): class TestBilinearInterpZero(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [2, 3, 5, 7] @@ -398,7 +392,6 @@ class TestBilinearInterpZero(TestBilinearInterpOp): class TestBilinearInterpOp_attr_tensor(OpTest): - def setUp(self): self.place = paddle.device.MLUPlace(0) self.__class__.use_mlu = True @@ -438,8 +431,9 @@ class TestBilinearInterpOp_attr_tensor(OpTest): elif self.out_size is not None: size_tensor = [] for index, ele in enumerate(self.out_size): - size_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + size_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs['SizeTensor'] = size_tensor self.attrs['out_h'] = self.out_h @@ -451,9 +445,16 @@ class TestBilinearInterpOp_attr_tensor(OpTest): if isinstance(self.scale, list) and len(self.scale) == 1: self.scale = [self.scale[0], self.scale[0]] self.attrs['scale'] = self.scale - output_np = bilinear_interp_np(input_np, out_h, out_w, 0, 0, - self.out_size, self.actual_shape, - self.align_corners) + output_np = bilinear_interp_np( + input_np, + out_h, + out_w, + 0, + 0, + self.out_size, + self.actual_shape, + self.align_corners, + ) self.outputs = {'Out': output_np} def test_check_output(self): @@ -467,33 +468,31 @@ class TestBilinearInterpOp_attr_tensor(OpTest): self.input_shape = [2, 3, 5, 5] self.out_h = 3 self.out_w = 3 - self.scale = 0. + self.scale = 0.0 self.out_size = [3, 3] self.align_corners = True # out_size is a 1-D tensor class TestBilinearInterp_attr_tensor_Case1(TestBilinearInterpOp_attr_tensor): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [3, 3, 9, 6] self.out_h = 12 self.out_w = 12 - self.scale = 0. + self.scale = 0.0 self.out_size = [8, 12] self.align_corners = True # scale is a 1-D tensor class TestBilinearInterp_attr_tensor_Case2(TestBilinearInterpOp_attr_tensor): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [3, 2, 32, 16] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([66, 40]).astype("int32") self.align_corners = True self.shape_by_1Dtensor = True @@ -501,7 +500,6 @@ class TestBilinearInterp_attr_tensor_Case2(TestBilinearInterpOp_attr_tensor): # scale is a 1-D tensor class TestBilinearInterp_attr_tensor_Case3(TestBilinearInterpOp_attr_tensor): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [3, 2, 32, 16] @@ -514,23 +512,22 @@ class TestBilinearInterp_attr_tensor_Case3(TestBilinearInterpOp_attr_tensor): class TestBilinearInterpOpAPI(unittest.TestCase): - def test_case(self): x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") dim = fluid.data(name="dim", shape=[1], dtype="int32") shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32") actual_size = fluid.data(name="actual_size", shape=[2], dtype="int32") - scale_tensor = fluid.data(name="scale_tensor", - shape=[1], - dtype="float32") + scale_tensor = fluid.data( + name="scale_tensor", shape=[1], dtype="float32" + ) out1 = fluid.layers.resize_bilinear(x, out_shape=[12, 12]) out2 = fluid.layers.resize_bilinear(x, out_shape=[12, dim]) out3 = fluid.layers.resize_bilinear(x, out_shape=shape_tensor) - out4 = fluid.layers.resize_bilinear(x, - out_shape=[4, 4], - actual_shape=actual_size) + out4 = fluid.layers.resize_bilinear( + x, out_shape=[4, 4], actual_shape=actual_size + ) out5 = fluid.layers.resize_bilinear(x, scale=scale_tensor) x_data = np.random.random((2, 3, 6, 6)).astype("float32") @@ -545,29 +542,30 @@ class TestBilinearInterpOpAPI(unittest.TestCase): place = core.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - results = exe.run(fluid.default_main_program(), - feed={ - "x": x_data, - "dim": dim_data, - "shape_tensor": shape_data, - "actual_size": actual_size_data, - "scale_tensor": scale_data - }, - fetch_list=[out1, out2, out3, out4, out5], - return_numpy=True) - - expect_res = bilinear_interp_np(x_data, - out_h=12, - out_w=12, - align_corners=True) + results = exe.run( + fluid.default_main_program(), + feed={ + "x": x_data, + "dim": dim_data, + "shape_tensor": shape_data, + "actual_size": actual_size_data, + "scale_tensor": scale_data, + }, + fetch_list=[out1, out2, out3, out4, out5], + return_numpy=True, + ) + + expect_res = bilinear_interp_np( + x_data, out_h=12, out_w=12, align_corners=True + ) for res in results: np.testing.assert_allclose(res, expect_res, rtol=1e-6) class TestBilinearInterpOpAPI_dy(unittest.TestCase): - def test_case(self): import paddle + if core.is_compiled_with_mlu(): place = paddle.device.MLUPlace(0) else: @@ -575,21 +573,19 @@ class TestBilinearInterpOpAPI_dy(unittest.TestCase): with fluid.dygraph.guard(place): input_data = np.random.random((2, 3, 6, 6)).astype("float32") input_x = paddle.to_tensor(input_data) - expect_res = bilinear_interp_np(input_data, - out_h=12, - out_w=12, - align_corners=False) - out = interpolate(x=input_x, - size=[12, 12], - mode="bilinear", - align_corners=False) + expect_res = bilinear_interp_np( + input_data, out_h=12, out_w=12, align_corners=False + ) + out = interpolate( + x=input_x, size=[12, 12], mode="bilinear", align_corners=False + ) np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-6) class TestBilinearInterpOpAPI_dy2(unittest.TestCase): - def test_case(self): import paddle + if core.is_compiled_with_mlu(): place = paddle.device.MLUPlace(0) else: @@ -599,21 +595,19 @@ class TestBilinearInterpOpAPI_dy2(unittest.TestCase): size_np = np.array([12, 12]).astype("int64") input_x = paddle.to_tensor(input_data) size = paddle.to_tensor(size_np) - expect_res = bilinear_interp_np(input_data, - out_h=12, - out_w=12, - align_corners=False) - out = interpolate(x=input_x, - size=size, - mode="bilinear", - align_corners=False) + expect_res = bilinear_interp_np( + input_data, out_h=12, out_w=12, align_corners=False + ) + out = interpolate( + x=input_x, size=size, mode="bilinear", align_corners=False + ) np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-6) class TestBilinearInterpOpAPI_dy3(unittest.TestCase): - def test_case(self): import paddle + if core.is_compiled_with_mlu(): place = paddle.device.MLUPlace(0) else: @@ -623,21 +617,22 @@ class TestBilinearInterpOpAPI_dy3(unittest.TestCase): size_1 = np.array([12]).astype("int64") input_x = paddle.to_tensor(input_data) size = paddle.to_tensor(size_1) - expect_res = bilinear_interp_np(input_data, - out_h=12, - out_w=12, - align_corners=False) - out = interpolate(x=input_x, - size=[size, size], - mode="bilinear", - align_corners=False) + expect_res = bilinear_interp_np( + input_data, out_h=12, out_w=12, align_corners=False + ) + out = interpolate( + x=input_x, + size=[size, size], + mode="bilinear", + align_corners=False, + ) np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-6) class TestBilinearInterpOpAPI_dy4(unittest.TestCase): - def test_case(self): import paddle + if core.is_compiled_with_mlu(): place = paddle.device.MLUPlace(0) else: @@ -647,14 +642,15 @@ class TestBilinearInterpOpAPI_dy4(unittest.TestCase): scale_np = np.array([2, 2]).astype("int64") input_x = paddle.to_tensor(input_data) scale = paddle.to_tensor(scale_np) - expect_res = bilinear_interp_np(input_data, - out_h=12, - out_w=12, - align_corners=False) - out = interpolate(x=input_x, - scale_factor=scale, - mode="bilinear", - align_corners=False) + expect_res = bilinear_interp_np( + input_data, out_h=12, out_w=12, align_corners=False + ) + out = interpolate( + x=input_x, + scale_factor=scale, + mode="bilinear", + align_corners=False, + ) np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-6) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_cast_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_cast_op_mlu.py index 4b75767efe6ce4d94d6b9d59c32c9479af29f4f3..a5c5f7b8a2f2bc70c2c99efecf3433adc43eaeb9 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_cast_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_cast_op_mlu.py @@ -28,14 +28,13 @@ paddle.enable_static() class TestCastOpFp32ToFp16(OpTest): - def setUp(self): ipt = np.random.random(size=[10, 10]) self.inputs = {'X': ipt.astype('float32')} self.outputs = {'Out': ipt.astype('float16')} self.attrs = { 'in_dtype': int(core.VarDesc.VarType.FP32), - 'out_dtype': int(core.VarDesc.VarType.FP16) + 'out_dtype': int(core.VarDesc.VarType.FP16), } self.op_type = 'cast' self.place = paddle.device.MLUPlace(0) @@ -47,14 +46,13 @@ class TestCastOpFp32ToFp16(OpTest): class TestCastOpFp16ToFp32(OpTest): - def setUp(self): ipt = np.random.random(size=[10, 10]) self.inputs = {'X': ipt.astype('float16')} self.outputs = {'Out': ipt.astype('float32')} self.attrs = { 'in_dtype': int(core.VarDesc.VarType.FP16), - 'out_dtype': int(core.VarDesc.VarType.FP32) + 'out_dtype': int(core.VarDesc.VarType.FP32), } self.op_type = 'cast' self.place = paddle.device.MLUPlace(0) @@ -66,14 +64,13 @@ class TestCastOpFp16ToFp32(OpTest): class TestCastOpFp32ToFp64(OpTest): - def setUp(self): ipt = np.random.random(size=[10, 10]) self.inputs = {'X': ipt.astype('float32')} self.outputs = {'Out': ipt.astype('float64')} self.attrs = { 'in_dtype': int(core.VarDesc.VarType.FP32), - 'out_dtype': int(core.VarDesc.VarType.FP64) + 'out_dtype': int(core.VarDesc.VarType.FP64), } self.op_type = 'cast' self.place = paddle.device.MLUPlace(0) @@ -85,14 +82,13 @@ class TestCastOpFp32ToFp64(OpTest): class TestCastOpInt32ToInt32(OpTest): - def setUp(self): ipt = np.random.randint(1000, size=(10, 10)) self.inputs = {'X': ipt.astype('int32')} self.outputs = {'Out': ipt.astype('int32')} self.attrs = { 'in_dtype': int(core.VarDesc.VarType.INT32), - 'out_dtype': int(core.VarDesc.VarType.INT32) + 'out_dtype': int(core.VarDesc.VarType.INT32), } self.op_type = 'cast' self.place = paddle.device.MLUPlace(0) @@ -103,14 +99,13 @@ class TestCastOpInt32ToInt32(OpTest): class TestCastOpInt32ToFp32(OpTest): - def setUp(self): ipt = np.random.randint(1000, size=[10, 10]) self.inputs = {'X': ipt.astype('int32')} self.outputs = {'Out': ipt.astype('float32')} self.attrs = { 'in_dtype': int(core.VarDesc.VarType.INT32), - 'out_dtype': int(core.VarDesc.VarType.FP32) + 'out_dtype': int(core.VarDesc.VarType.FP32), } self.op_type = 'cast' self.place = paddle.device.MLUPlace(0) @@ -121,14 +116,13 @@ class TestCastOpInt32ToFp32(OpTest): class TestCastOpInt16ToFp64(OpTest): - def setUp(self): ipt = np.random.randint(1000, size=[10, 10]) self.inputs = {'X': ipt.astype('int16')} self.outputs = {'Out': ipt.astype('int64')} self.attrs = { 'in_dtype': int(core.VarDesc.VarType.INT16), - 'out_dtype': int(core.VarDesc.VarType.INT64) + 'out_dtype': int(core.VarDesc.VarType.INT64), } self.op_type = 'cast' self.place = paddle.device.MLUPlace(0) @@ -139,12 +133,12 @@ class TestCastOpInt16ToFp64(OpTest): class TestCastOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # The input type of cast_op must be Variable. - x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - fluid.MLUPlace(0)) + x1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.MLUPlace(0) + ) self.assertRaises(TypeError, fluid.layers.cast, x1, 'int32') diff --git a/python/paddle/fluid/tests/unittests/mlu/test_clip_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_clip_op_mlu.py index 12c5dbc2cd217dffa681159d81e2ee62079c445d..c306ab96b61db24ad646b95ce325568df2340faf 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_clip_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_clip_op_mlu.py @@ -27,7 +27,6 @@ paddle.enable_static() class TestClipOp(OpTest): - def setUp(self): self.place = paddle.device.MLUPlace(0) self.__class__.use_mlu = True @@ -73,7 +72,6 @@ class TestClipOp(OpTest): class TestCase1(TestClipOp): - def initTestCase(self): self.dtype = np.float32 self.shape = (8, 16, 8) @@ -82,7 +80,6 @@ class TestCase1(TestClipOp): class TestCase2(TestClipOp): - def initTestCase(self): self.dtype = np.float32 self.shape = (8, 16) @@ -91,7 +88,6 @@ class TestCase2(TestClipOp): class TestCase3(TestClipOp): - def initTestCase(self): self.dtype = np.float32 self.shape = (4, 8, 16) @@ -100,7 +96,6 @@ class TestCase3(TestClipOp): class TestCase4(TestClipOp): - def initTestCase(self): self.dtype = np.float32 self.shape = (4, 8, 8) @@ -111,7 +106,6 @@ class TestCase4(TestClipOp): class TestCase5(TestClipOp): - def initTestCase(self): self.dtype = np.float32 self.shape = (4, 8, 16) @@ -120,7 +114,6 @@ class TestCase5(TestClipOp): class TestCase6(TestClipOp): - def initTestCase(self): self.dtype = np.float16 self.shape = (4, 8, 8) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_coalesce_tensor_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_coalesce_tensor_op_mlu.py index 8eacd633db8bd4f8173c8d7d60561d411fae3f9d..97bc47971f475acd1e433f7253672aa006c73c5c 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_coalesce_tensor_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_coalesce_tensor_op_mlu.py @@ -26,7 +26,6 @@ paddle.enable_static() class TestAllocContinuousSpace(OpTest): - def setUp(self): self.op_type = "coalesce_tensor" self.dtype, self.fluid_dtype = self.init_dtype() @@ -36,7 +35,8 @@ class TestAllocContinuousSpace(OpTest): self.set_constant = attrs["set_constant"] self.Inputs = self.init_input() self.Outputs, self.FusedOutput = self.init_output( - self.Inputs, self.set_constant, self.constant) + self.Inputs, self.set_constant, self.constant + ) self.inputs = {'Input': self.Inputs} self.attrs = attrs self.outputs = {'Output': self.Outputs, 'FusedOutput': self.FusedOutput} @@ -59,7 +59,7 @@ class TestAllocContinuousSpace(OpTest): "copy_data": True, "set_constant": False, "constant": 0.0, - "dtype": self.fluid_dtype + "dtype": self.fluid_dtype, } def init_output(self, input_list, set_constant, constant): @@ -76,32 +76,36 @@ class TestAllocContinuousSpace(OpTest): coalesce_tensor_var = np.concatenate([input for input in inputs]) if set_constant: coalesce_tensor_var = np.ones((len(coalesce_tensor_var))) * constant - outputs = [(out[0], - np.ones(out[1].shape).astype(self.dtype) * constant) - for out in outputs] + outputs = [ + (out[0], np.ones(out[1].shape).astype(self.dtype) * constant) + for out in outputs + ] return outputs, coalesce_tensor_var def test_check_output(self): - self.check_output_with_place(place=paddle.device.MLUPlace(0), - no_check_set=["FusedOutput"], - atol=1e-5) + self.check_output_with_place( + place=paddle.device.MLUPlace(0), + no_check_set=["FusedOutput"], + atol=1e-5, + ) class TestAllocContinuousSpace2(TestAllocContinuousSpace): - def init_attr(self): return { "copy_data": False, "set_constant": True, "constant": 5, "dtype": self.fluid_dtype, - "user_defined_size_of_dtype": 2 + "user_defined_size_of_dtype": 2, } def test_check_output(self): - self.check_output_with_place(place=paddle.device.MLUPlace(0), - no_check_set=["FusedOutput"], - atol=1e-5) + self.check_output_with_place( + place=paddle.device.MLUPlace(0), + no_check_set=["FusedOutput"], + atol=1e-5, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/mlu/test_collective_allgather.py b/python/paddle/fluid/tests/unittests/mlu/test_collective_allgather.py index 236eb8f1003b867bf31098e4bf5df5d4811dc8e9..d4c7779807aa54eb8e4ce5e50bfa1fcb4310401b 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_collective_allgather.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_collective_allgather.py @@ -23,32 +23,36 @@ paddle.enable_static() class TestCAllgatherOp(TestDistBase): - def _setup_config(self): pass def test_allgather_fp32(self): - self.check_with_place("collective_allgather_op.py", "allgather", - "float32") + self.check_with_place( + "collective_allgather_op.py", "allgather", "float32" + ) def test_allgather_fp16(self): - self.check_with_place("collective_allgather_op.py", "allgather", - "float16") + self.check_with_place( + "collective_allgather_op.py", "allgather", "float16" + ) def test_allgather_int32(self): - self.check_with_place("collective_allgather_op.py", "allgather", - "int32") + self.check_with_place( + "collective_allgather_op.py", "allgather", "int32" + ) def test_allgather_int16(self): - self.check_with_place("collective_allgather_op.py", "allgather", - "int16") + self.check_with_place( + "collective_allgather_op.py", "allgather", "int16" + ) def test_allgather_int8(self): self.check_with_place("collective_allgather_op.py", "allgather", "int8") def test_allgather_uint8(self): - self.check_with_place("collective_allgather_op.py", "allgather", - "uint8") + self.check_with_place( + "collective_allgather_op.py", "allgather", "uint8" + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/mlu/test_collective_allgather_api_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_collective_allgather_api_mlu.py index 42b8f92850d3d824ebce74e1d67e1673f3ead121..dfd29e09a23953239433f1a368ecfcedc0ae157f 100755 --- a/python/paddle/fluid/tests/unittests/mlu/test_collective_allgather_api_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_collective_allgather_api_mlu.py @@ -22,21 +22,23 @@ paddle.enable_static() class TestCollectiveAllgatherAPI(TestDistBase): - def _setup_config(self): pass def test_allgather_cncl_fp16(self): - self.check_with_place("collective_allgather_api.py", "allgather", - "float16") + self.check_with_place( + "collective_allgather_api.py", "allgather", "float16" + ) def test_allgather_cncl_fp32(self): - self.check_with_place("collective_allgather_api.py", "allgather", - "float32") + self.check_with_place( + "collective_allgather_api.py", "allgather", "float32" + ) def test_allgather_cncl_int32(self): - self.check_with_place("collective_allgather_api.py", "allgather", - "int32") + self.check_with_place( + "collective_allgather_api.py", "allgather", "int32" + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/mlu/test_collective_allreduce_api_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_collective_allreduce_api_mlu.py index 74ab7c76045f7c88b3be3c8371fae1a9cb6ccd50..f7d7d140579a04b3245b43089d57d65818c58447 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_collective_allreduce_api_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_collective_allreduce_api_mlu.py @@ -22,21 +22,23 @@ paddle.enable_static() class TestCollectiveAllreduceAPI(TestDistBase): - def _setup_config(self): pass def test_allreduce_cncl_fp16(self): - self.check_with_place("collective_allreduce_api.py", "allreduce", - "float16") + self.check_with_place( + "collective_allreduce_api.py", "allreduce", "float16" + ) def test_allreduce_cncl_fp32(self): - self.check_with_place("collective_allreduce_api.py", "allreduce", - "float32") + self.check_with_place( + "collective_allreduce_api.py", "allreduce", "float32" + ) def test_allreduce_cncl_int32(self): - self.check_with_place("collective_allreduce_api.py", "allreduce", - "int32") + self.check_with_place( + "collective_allreduce_api.py", "allreduce", "int32" + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/mlu/test_collective_allreduce_max.py b/python/paddle/fluid/tests/unittests/mlu/test_collective_allreduce_max.py index ed05fed8db091a4760df7f3b9f0c588004ad6c15..fe6cd747106ecdd12fe8d0742b47be070ab3ee49 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_collective_allreduce_max.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_collective_allreduce_max.py @@ -23,33 +23,38 @@ paddle.enable_static() class TestCAllreduceOp(TestDistBase): - def _setup_config(self): pass def test_allreduce_max_fp32(self): - self.check_with_place("collective_allreduce_op.py", "allreduce_max", - "float32") + self.check_with_place( + "collective_allreduce_op.py", "allreduce_max", "float32" + ) def test_allreduce_max_fp16(self): - self.check_with_place("collective_allreduce_op.py", "allreduce_max", - "float16") + self.check_with_place( + "collective_allreduce_op.py", "allreduce_max", "float16" + ) def test_allreduce_max_int32(self): - self.check_with_place("collective_allreduce_op.py", "allreduce_max", - "int32") + self.check_with_place( + "collective_allreduce_op.py", "allreduce_max", "int32" + ) def test_allreduce_max_int16(self): - self.check_with_place("collective_allreduce_op.py", "allreduce_max", - "int16") + self.check_with_place( + "collective_allreduce_op.py", "allreduce_max", "int16" + ) def test_allreduce_max_int8(self): - self.check_with_place("collective_allreduce_op.py", "allreduce_max", - "int8") + self.check_with_place( + "collective_allreduce_op.py", "allreduce_max", "int8" + ) def test_allreduce_max_uint8(self): - self.check_with_place("collective_allreduce_op.py", "allreduce_max", - "uint8") + self.check_with_place( + "collective_allreduce_op.py", "allreduce_max", "uint8" + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/mlu/test_collective_allreduce_min.py b/python/paddle/fluid/tests/unittests/mlu/test_collective_allreduce_min.py index 2b79a7caafea583b993ab1766e8cfae908be83fb..c3b52a6112645a67efeba2f11872465360055b24 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_collective_allreduce_min.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_collective_allreduce_min.py @@ -23,33 +23,38 @@ paddle.enable_static() class TestCAllreduceOp(TestDistBase): - def _setup_config(self): pass def test_allreduce_min_fp32(self): - self.check_with_place("collective_allreduce_op.py", "allreduce_min", - "float32") + self.check_with_place( + "collective_allreduce_op.py", "allreduce_min", "float32" + ) def test_allreduce_min_fp16(self): - self.check_with_place("collective_allreduce_op.py", "allreduce_min", - "float16") + self.check_with_place( + "collective_allreduce_op.py", "allreduce_min", "float16" + ) def test_allreduce_min_int32(self): - self.check_with_place("collective_allreduce_op.py", "allreduce_min", - "int32") + self.check_with_place( + "collective_allreduce_op.py", "allreduce_min", "int32" + ) def test_allreduce_min_int16(self): - self.check_with_place("collective_allreduce_op.py", "allreduce_min", - "int16") + self.check_with_place( + "collective_allreduce_op.py", "allreduce_min", "int16" + ) def test_allreduce_min_int8(self): - self.check_with_place("collective_allreduce_op.py", "allreduce_min", - "int8") + self.check_with_place( + "collective_allreduce_op.py", "allreduce_min", "int8" + ) def test_allreduce_min_uint8(self): - self.check_with_place("collective_allreduce_op.py", "allreduce_min", - "uint8") + self.check_with_place( + "collective_allreduce_op.py", "allreduce_min", "uint8" + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/mlu/test_collective_allreduce_prod.py b/python/paddle/fluid/tests/unittests/mlu/test_collective_allreduce_prod.py index 96bee38af50048a0f2fd4961cb56fac38dc6ba25..a10275553c8921d76c7e5ee3882a8013b8ef3c5c 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_collective_allreduce_prod.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_collective_allreduce_prod.py @@ -23,33 +23,38 @@ paddle.enable_static() class TestCAllreduceOp(TestDistBase): - def _setup_config(self): pass def test_allreduce_prod_fp32(self): - self.check_with_place("collective_allreduce_op.py", "allreduce_prod", - "float32") + self.check_with_place( + "collective_allreduce_op.py", "allreduce_prod", "float32" + ) def test_allreduce_prod_fp16(self): - self.check_with_place("collective_allreduce_op.py", "allreduce_prod", - "float16") + self.check_with_place( + "collective_allreduce_op.py", "allreduce_prod", "float16" + ) def test_allreduce_prod_int32(self): - self.check_with_place("collective_allreduce_op.py", "allreduce_prod", - "int32") + self.check_with_place( + "collective_allreduce_op.py", "allreduce_prod", "int32" + ) def test_allreduce_prod_int16(self): - self.check_with_place("collective_allreduce_op.py", "allreduce_prod", - "int16") + self.check_with_place( + "collective_allreduce_op.py", "allreduce_prod", "int16" + ) def test_allreduce_prod_int8(self): - self.check_with_place("collective_allreduce_op.py", "allreduce_prod", - "int8") + self.check_with_place( + "collective_allreduce_op.py", "allreduce_prod", "int8" + ) def test_allreduce_prod_uint8(self): - self.check_with_place("collective_allreduce_op.py", "allreduce_prod", - "uint8") + self.check_with_place( + "collective_allreduce_op.py", "allreduce_prod", "uint8" + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/mlu/test_collective_allreduce_sum.py b/python/paddle/fluid/tests/unittests/mlu/test_collective_allreduce_sum.py index 302c51beaaffd7706af1da32179360650c660d05..353e41b094ab451596cbf195c1cfdfec06622871 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_collective_allreduce_sum.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_collective_allreduce_sum.py @@ -23,33 +23,38 @@ paddle.enable_static() class TestCAllreduceOp(TestDistBase): - def _setup_config(self): pass def test_allreduce_sum_fp32(self): - self.check_with_place("collective_allreduce_op.py", "allreduce_sum", - "float32") + self.check_with_place( + "collective_allreduce_op.py", "allreduce_sum", "float32" + ) def test_allreduce_sum_fp16(self): - self.check_with_place("collective_allreduce_op.py", "allreduce_sum", - "float16") + self.check_with_place( + "collective_allreduce_op.py", "allreduce_sum", "float16" + ) def test_allreduce_sum_int32(self): - self.check_with_place("collective_allreduce_op.py", "allreduce_sum", - "int32") + self.check_with_place( + "collective_allreduce_op.py", "allreduce_sum", "int32" + ) def test_allreduce_sum_int16(self): - self.check_with_place("collective_allreduce_op.py", "allreduce_sum", - "int16") + self.check_with_place( + "collective_allreduce_op.py", "allreduce_sum", "int16" + ) def test_allreduce_sum_int8(self): - self.check_with_place("collective_allreduce_op.py", "allreduce_sum", - "int8") + self.check_with_place( + "collective_allreduce_op.py", "allreduce_sum", "int8" + ) def test_allreduce_sum_uint8(self): - self.check_with_place("collective_allreduce_op.py", "allreduce_sum", - "uint8") + self.check_with_place( + "collective_allreduce_op.py", "allreduce_sum", "uint8" + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/mlu/test_collective_api_base_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_collective_api_base_mlu.py index b405bdda8d78ea38d21d7d3043ac2df74019848f..26e7c2972b3311acc6cdc555f44f3a637e408327 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_collective_api_base_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_collective_api_base_mlu.py @@ -40,10 +40,10 @@ def DataTypeCast(date_type): class TestCollectiveAPIRunnerBase(object): - def get_model(self, train_prog, startup_prog, rank, indata=None): raise NotImplementedError( - "get model should be implemented by child class.") + "get model should be implemented by child class." + ) def run_trainer(self, args): train_prog = fluid.Program() @@ -65,12 +65,12 @@ class TestCollectiveAPIRunnerBase(object): fetch_list = [] for elem in result: fetch_list.append(elem.name) - out = exe.run(train_prog, - feed={'tindata': indata}, - fetch_list=fetch_list) + out = exe.run( + train_prog, feed={'tindata': indata}, fetch_list=fetch_list + ) else: out = self.get_model(train_prog, startup_prog, rank, indata) - #print(out, sys.stderr) + # print(out, sys.stderr) sys.stdout.buffer.write(pickle.dumps(out)) @@ -95,19 +95,20 @@ from contextlib import closing class TestDistBase(unittest.TestCase): - def setUp(self): self._port_set = set() self._trainers = 2 self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % ( - self._find_free_port(), self._find_free_port()) + self._find_free_port(), + self._find_free_port(), + ) self._python_interp = sys.executable def _find_free_port(self): - def __free_port(): - with closing(socket.socket(socket.AF_INET, - socket.SOCK_STREAM)) as s: + with closing( + socket.socket(socket.AF_INET, socket.SOCK_STREAM) + ) as s: s.bind(('', 0)) return s.getsockname()[1] @@ -120,13 +121,13 @@ class TestDistBase(unittest.TestCase): def _run_cluster(self, model_file, envs): worker_endpoints = self._ps_endpoints.split(",") w0_ep, w1_ep = worker_endpoints - #print("w0_ep:",w0_ep," w1_ep:",w1_ep) + # print("w0_ep:",w0_ep," w1_ep:",w1_ep) env0 = { "FLAGS_selected_mlus": "0", "PADDLE_TRAINER_ID": "0", "PADDLE_TRAINERS_NUM": "2", "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints, - "PADDLE_CURRENT_ENDPOINT": w0_ep + "PADDLE_CURRENT_ENDPOINT": w0_ep, } env1 = { @@ -134,9 +135,9 @@ class TestDistBase(unittest.TestCase): "PADDLE_TRAINER_ID": "1", "PADDLE_TRAINERS_NUM": "2", "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints, - "PADDLE_CURRENT_ENDPOINT": w1_ep + "PADDLE_CURRENT_ENDPOINT": w1_ep, } - #update environment + # update environment env0.update(envs) env1.update(envs) if os.getenv('WITH_COVERAGE', 'OFF') == 'ON': @@ -147,16 +148,20 @@ class TestDistBase(unittest.TestCase): tr1_cmd = tr_cmd % (self._python_interp, model_file) tr0_pipe = open("/tmp/tr0_err_%d.log" % os.getpid(), "w") tr1_pipe = open("/tmp/tr1_err_%d.log" % os.getpid(), "w") - #print(tr0_cmd) - tr0_proc = subprocess.Popen(tr0_cmd.strip().split(), - stdout=subprocess.PIPE, - stderr=tr0_pipe, - env=env0) - - tr1_proc = subprocess.Popen(tr0_cmd.strip().split(), - stdout=subprocess.PIPE, - stderr=tr1_pipe, - env=env1) + # print(tr0_cmd) + tr0_proc = subprocess.Popen( + tr0_cmd.strip().split(), + stdout=subprocess.PIPE, + stderr=tr0_pipe, + env=env0, + ) + + tr1_proc = subprocess.Popen( + tr0_cmd.strip().split(), + stdout=subprocess.PIPE, + stderr=tr1_pipe, + env=env1, + ) tr0_out, tr0_err = tr0_proc.communicate() tr1_out, tr1_err = tr1_proc.communicate() @@ -169,17 +174,23 @@ class TestDistBase(unittest.TestCase): sys.stderr.write('trainer 0 stderr file: %s\n' % f.read()) with open("/tmp/tr1_err_%d.log" % os.getpid(), "r") as f: sys.stderr.write('trainer 1 stderr file: %s\n' % f.read()) - return pickle.loads(tr0_out), pickle.loads( - tr1_out), tr0_proc.pid, tr1_proc.pid - - def check_with_place(self, - model_file, - col_type, - data_type, - path_id="0", - static_mode="1", - check_error_log=False, - need_envs={}): + return ( + pickle.loads(tr0_out), + pickle.loads(tr1_out), + tr0_proc.pid, + tr1_proc.pid, + ) + + def check_with_place( + self, + model_file, + col_type, + data_type, + path_id="0", + static_mode="1", + check_error_log=False, + need_envs={}, + ): required_envs = { "FLAGS_fraction_of_gpu_memory_to_use": "0.15", "FLAGS_eager_delete_tensor_gb": "0.0", @@ -193,7 +204,7 @@ class TestDistBase(unittest.TestCase): "PADDLE_WITH_GLOO": '0', "BACKEND": "cncl", "PATH_ID": path_id, - "DATA_TYPE": data_type + "DATA_TYPE": data_type, } required_envs.update(need_envs) if check_error_log: @@ -201,7 +212,8 @@ class TestDistBase(unittest.TestCase): required_envs["GLOG_logtostderr"] = "1" required_envs["GLOO_LOG_LEVEL"] = "TRACE" tr0_out, tr1_out, pid0, pid1 = self._run_cluster( - model_file, required_envs) + model_file, required_envs + ) np_data_type = DataTypeCast(data_type) np.random.seed(pid0) input1 = np.random.random((10, 1000)).astype(np_data_type) @@ -213,14 +225,12 @@ class TestDistBase(unittest.TestCase): np.testing.assert_allclose(tr1_out[0], need_result) elif col_type == "allreduce": need_result = input1 + input2 - np.testing.assert_allclose(tr0_out[0], - need_result, - rtol=1e-05, - atol=1e-05) - np.testing.assert_allclose(tr1_out[0], - need_result, - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + tr0_out[0], need_result, rtol=1e-05, atol=1e-05 + ) + np.testing.assert_allclose( + tr1_out[0], need_result, rtol=1e-05, atol=1e-05 + ) elif col_type == "reduce": need_result = input1 + input2 np.testing.assert_allclose(tr0_out[0], need_result) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_collective_base_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_collective_base_mlu.py index c2e6f63f4d5d954bcc121ea12ffa8d6bcbe92570..ab089aa26b7741f8869712d03a5ccd6002027db7 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_collective_base_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_collective_base_mlu.py @@ -52,10 +52,10 @@ def DataTypeCast(date_type): class TestCollectiveRunnerBase(object): - def get_model(self, train_prog, startup_prog, col_type): raise NotImplementedError( - "get model should be implemented by child class.") + "get model should be implemented by child class." + ) def wait_server_ready(self, endpoints): while True: @@ -63,13 +63,15 @@ class TestCollectiveRunnerBase(object): not_ready_endpoints = [] for ep in endpoints: ip_port = ep.split(":") - with closing(socket.socket(socket.AF_INET, - socket.SOCK_STREAM)) as sock: + with closing( + socket.socket(socket.AF_INET, socket.SOCK_STREAM) + ) as sock: sock.settimeout(2) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) if hasattr(socket, 'SO_REUSEPORT'): - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, - 1) + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT, 1 + ) result = sock.connect_ex((ip_port[0], int(ip_port[1]))) if result != 0: @@ -77,44 +79,51 @@ class TestCollectiveRunnerBase(object): not_ready_endpoints.append(ep) if not all_ok: sys.stderr.write("server not ready, wait 3 sec to retry...\n") - sys.stderr.write("not ready endpoints:" + - str(not_ready_endpoints) + "\n") + sys.stderr.write( + "not ready endpoints:" + str(not_ready_endpoints) + "\n" + ) sys.stderr.flush() time.sleep(3) else: break + # endpoints should be ["ip1:port1","ip2:port2"] -#endpoints should be ["ip1:port1","ip2:port2"] - - def initCommunicator(self, program, rank, nranks, wait_port, - current_endpoint, endpoints): + def initCommunicator( + self, program, rank, nranks, wait_port, current_endpoint, endpoints + ): other_endpoints = endpoints[:] other_endpoints.remove(current_endpoint) if rank == 0 and wait_port: self.wait_server_ready(other_endpoints) block = program.global_block() - cncl_id_var = block.create_var(name=nameGen.generate('cncl_id'), - persistable=True, - type=core.VarDesc.VarType.RAW) - - block.append_op(type='c_gen_cncl_id', - inputs={}, - outputs={'Out': cncl_id_var}, - attrs={ - 'rank': rank, - 'endpoint': current_endpoint, - 'other_endpoints': other_endpoints - }) - - block.append_op(type='c_comm_init', - inputs={'X': cncl_id_var}, - outputs={}, - attrs={ - 'nranks': nranks, - 'rank': rank, - 'ring_id': self.global_ring_id - }) + cncl_id_var = block.create_var( + name=nameGen.generate('cncl_id'), + persistable=True, + type=core.VarDesc.VarType.RAW, + ) + + block.append_op( + type='c_gen_cncl_id', + inputs={}, + outputs={'Out': cncl_id_var}, + attrs={ + 'rank': rank, + 'endpoint': current_endpoint, + 'other_endpoints': other_endpoints, + }, + ) + + block.append_op( + type='c_comm_init', + inputs={'X': cncl_id_var}, + outputs={}, + attrs={ + 'nranks': nranks, + 'rank': rank, + 'ring_id': self.global_ring_id, + }, + ) def run_trainer(self, args): train_prog = fluid.Program() @@ -123,8 +132,9 @@ class TestCollectiveRunnerBase(object): rank = args["trainerid"] current_endpoint = args["currentendpoint"] nranks = 2 - self.initCommunicator(startup_prog, rank, nranks, True, - current_endpoint, endpoints) + self.initCommunicator( + startup_prog, rank, nranks, True, current_endpoint, endpoints + ) self.rank = rank result = self.get_model(train_prog, startup_prog, args["col_type"]) device_id = int(os.getenv("FLAGS_selected_mlus", "0")) @@ -134,9 +144,9 @@ class TestCollectiveRunnerBase(object): np.random.seed(os.getpid()) np_data_type = DataTypeCast(args["data_type"]) indata = np.random.random((10, 1000)).astype(np_data_type) - out = exe.run(train_prog, - feed={'tindata': indata}, - fetch_list=[result.name]) + out = exe.run( + train_prog, feed={'tindata': indata}, fetch_list=[result.name] + ) sys.stdout.buffer.write(pickle.dumps(out)) @@ -159,19 +169,20 @@ from contextlib import closing class TestDistBase(unittest.TestCase): - def setUp(self): self._port_set = set() self._trainers = 2 self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % ( - self._find_free_port(), self._find_free_port()) + self._find_free_port(), + self._find_free_port(), + ) self._python_interp = sys.executable def _find_free_port(self): - def __free_port(): - with closing(socket.socket(socket.AF_INET, - socket.SOCK_STREAM)) as s: + with closing( + socket.socket(socket.AF_INET, socket.SOCK_STREAM) + ) as s: s.bind(('', 0)) return s.getsockname()[1] @@ -190,7 +201,7 @@ class TestDistBase(unittest.TestCase): "PADDLE_TRAINER_ID": "0", "PADDLE_TRAINERS_NUM": "2", "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints, - "PADDLE_CURRENT_ENDPOINT": w0_ep + "PADDLE_CURRENT_ENDPOINT": w0_ep, } env1 = { @@ -198,9 +209,9 @@ class TestDistBase(unittest.TestCase): "PADDLE_TRAINER_ID": "1", "PADDLE_TRAINERS_NUM": "2", "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints, - "PADDLE_CURRENT_ENDPOINT": w1_ep + "PADDLE_CURRENT_ENDPOINT": w1_ep, } - #update environment + # update environment env0.update(envs) env1.update(envs) tr_cmd = "%s %s" @@ -209,15 +220,19 @@ class TestDistBase(unittest.TestCase): tr0_pipe = open("/tmp/tr0_err.log", "wb") tr1_pipe = open("/tmp/tr1_err.log", "wb") - tr0_proc = subprocess.Popen(tr0_cmd.strip().split(), - stdout=subprocess.PIPE, - stderr=tr0_pipe, - env=env0) + tr0_proc = subprocess.Popen( + tr0_cmd.strip().split(), + stdout=subprocess.PIPE, + stderr=tr0_pipe, + env=env0, + ) - tr1_proc = subprocess.Popen(tr0_cmd.strip().split(), - stdout=subprocess.PIPE, - stderr=tr1_pipe, - env=env1) + tr1_proc = subprocess.Popen( + tr0_cmd.strip().split(), + stdout=subprocess.PIPE, + stderr=tr1_pipe, + env=env1, + ) tr0_out, tr0_err = tr0_proc.communicate() tr1_out, tr1_err = tr1_proc.communicate() @@ -226,15 +241,21 @@ class TestDistBase(unittest.TestCase): # close trainer file tr0_pipe.close() tr1_pipe.close() - return pickle.loads(tr0_out), pickle.loads( - tr1_out), tr0_proc.pid, tr1_proc.pid - - def check_with_place(self, - model_file, - col_type, - data_type, - check_error_log=False, - need_envs={}): + return ( + pickle.loads(tr0_out), + pickle.loads(tr1_out), + tr0_proc.pid, + tr1_proc.pid, + ) + + def check_with_place( + self, + model_file, + col_type, + data_type, + check_error_log=False, + need_envs={}, + ): required_envs = { "FLAGS_eager_delete_tensor_gb": "0.0", "PATH": os.getenv("PATH"), @@ -250,7 +271,8 @@ class TestDistBase(unittest.TestCase): required_envs["GLOG_v"] = "3" required_envs["GLOG_logtostderr"] = "1" tr0_out, tr1_out, pid0, pid1 = self._run_cluster( - model_file, required_envs) + model_file, required_envs + ) np_data_type = DataTypeCast(data_type) np.random.seed(pid0) input1 = np.random.random((10, 1000)).astype(np_data_type) @@ -262,44 +284,36 @@ class TestDistBase(unittest.TestCase): np.testing.assert_allclose(tr1_out[0], need_result) elif col_type == "allreduce_sum": need_result = input1 + input2 - np.testing.assert_allclose(tr0_out[0], - need_result, - rtol=1e-05, - atol=1e-05) - np.testing.assert_allclose(tr1_out[0], - need_result, - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + tr0_out[0], need_result, rtol=1e-05, atol=1e-05 + ) + np.testing.assert_allclose( + tr1_out[0], need_result, rtol=1e-05, atol=1e-05 + ) elif col_type == "allreduce_prod": need_result = input1 * input2 - np.testing.assert_allclose(tr0_out[0], - need_result, - rtol=1e-05, - atol=1e-05) - np.testing.assert_allclose(tr1_out[0], - need_result, - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + tr0_out[0], need_result, rtol=1e-05, atol=1e-05 + ) + np.testing.assert_allclose( + tr1_out[0], need_result, rtol=1e-05, atol=1e-05 + ) elif col_type == "allreduce_max": need_result = np.maximum(input1, input2) - np.testing.assert_allclose(tr0_out[0], - need_result, - rtol=1e-05, - atol=1e-05) - np.testing.assert_allclose(tr1_out[0], - need_result, - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + tr0_out[0], need_result, rtol=1e-05, atol=1e-05 + ) + np.testing.assert_allclose( + tr1_out[0], need_result, rtol=1e-05, atol=1e-05 + ) elif col_type == "allreduce_min": need_result = np.minimum(input1, input2) - np.testing.assert_allclose(tr0_out[0], - need_result, - rtol=1e-05, - atol=1e-05) - np.testing.assert_allclose(tr1_out[0], - need_result, - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + tr0_out[0], need_result, rtol=1e-05, atol=1e-05 + ) + np.testing.assert_allclose( + tr1_out[0], need_result, rtol=1e-05, atol=1e-05 + ) elif col_type == "reduce_sum": need_result = input1 + input2 np.testing.assert_allclose(tr1_out[0], need_result) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_collective_broadcast.py b/python/paddle/fluid/tests/unittests/mlu/test_collective_broadcast.py index 723a14edb932e9daf4dc5557be6a90611b6e79f8..d06a599243e5eb0774d248a2434cc30cde675de8 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_collective_broadcast.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_collective_broadcast.py @@ -23,32 +23,36 @@ paddle.enable_static() class TestCBroadcastOp(TestDistBase): - def _setup_config(self): pass def test_broadcast_fp32(self): - self.check_with_place("collective_broadcast_op.py", "broadcast", - "float32") + self.check_with_place( + "collective_broadcast_op.py", "broadcast", "float32" + ) def test_broadcast_fp16(self): - self.check_with_place("collective_broadcast_op.py", "broadcast", - "float16") + self.check_with_place( + "collective_broadcast_op.py", "broadcast", "float16" + ) def test_broadcast_int32(self): - self.check_with_place("collective_broadcast_op.py", "broadcast", - "int32") + self.check_with_place( + "collective_broadcast_op.py", "broadcast", "int32" + ) def test_broadcast_int16(self): - self.check_with_place("collective_broadcast_op.py", "broadcast", - "int16") + self.check_with_place( + "collective_broadcast_op.py", "broadcast", "int16" + ) def test_broadcast_int8(self): self.check_with_place("collective_broadcast_op.py", "broadcast", "int8") def test_broadcast_uint8(self): - self.check_with_place("collective_broadcast_op.py", "broadcast", - "uint8") + self.check_with_place( + "collective_broadcast_op.py", "broadcast", "uint8" + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/mlu/test_collective_broadcast_api_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_collective_broadcast_api_mlu.py index 0619c5507142e2c5ec48407d07d74316ee9d219d..d40da0f1db66a5338da2ee5fc87a139f5d6c329e 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_collective_broadcast_api_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_collective_broadcast_api_mlu.py @@ -22,21 +22,23 @@ paddle.enable_static() class TestCollectiveBroadcastAPI(TestDistBase): - def _setup_config(self): pass def test_broadcast_cncl_fp16(self): - self.check_with_place("collective_broadcast_api.py", "broadcast", - "float16") + self.check_with_place( + "collective_broadcast_api.py", "broadcast", "float16" + ) def test_broadcast_cncl_fp32(self): - self.check_with_place("collective_broadcast_api.py", "broadcast", - "float32") + self.check_with_place( + "collective_broadcast_api.py", "broadcast", "float32" + ) def test_broadcast_cncl_int32(self): - self.check_with_place("collective_broadcast_api.py", "broadcast", - "int32") + self.check_with_place( + "collective_broadcast_api.py", "broadcast", "int32" + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_api_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_api_mlu.py index d76429196cc28d6b2ff80e049ef1f0083cf0d00d..69c1ebaf43cd8ff7d416146275625bae1ca2d898 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_api_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_api_mlu.py @@ -22,7 +22,6 @@ paddle.enable_static() class TestCollectiveReduceAPI(TestDistBase): - def _setup_config(self): pass diff --git a/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_max.py b/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_max.py index fd84b1c1eab238a700913ae6df5074cde0be6bbf..ac07b352e0cb651c74214332e729245d6eac014d 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_max.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_max.py @@ -23,17 +23,18 @@ paddle.enable_static() class TestCReduceOp(TestDistBase): - def _setup_config(self): pass def test_reduce_max_fp32(self): - self.check_with_place("collective_reduce_op.py", "reduce_max", - "float32") + self.check_with_place( + "collective_reduce_op.py", "reduce_max", "float32" + ) def test_reduce_max_fp16(self): - self.check_with_place("collective_reduce_op.py", "reduce_max", - "float16") + self.check_with_place( + "collective_reduce_op.py", "reduce_max", "float16" + ) def test_reduce_max_int32(self): self.check_with_place("collective_reduce_op.py", "reduce_max", "int32") diff --git a/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_min.py b/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_min.py index 6976e047e91e2abc6d5e744aa2f3f23af77c2d7f..abe970bafb507303b6e79123b11fd508a0c25f10 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_min.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_min.py @@ -23,17 +23,18 @@ paddle.enable_static() class TestCReduceOp(TestDistBase): - def _setup_config(self): pass def test_reduce_min_fp32(self): - self.check_with_place("collective_reduce_op.py", "reduce_min", - "float32") + self.check_with_place( + "collective_reduce_op.py", "reduce_min", "float32" + ) def test_reduce_min_fp16(self): - self.check_with_place("collective_reduce_op.py", "reduce_min", - "float16") + self.check_with_place( + "collective_reduce_op.py", "reduce_min", "float16" + ) def test_reduce_min_int32(self): self.check_with_place("collective_reduce_op.py", "reduce_min", "int32") diff --git a/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_prod.py b/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_prod.py index a5ed2ff8b1e4aacde20abc7ffaeb5bf609989c39..6fc9bd331ca3b591ab47bd4c163247e9fa06c635 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_prod.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_prod.py @@ -23,17 +23,18 @@ paddle.enable_static() class TestCReduceOp(TestDistBase): - def _setup_config(self): pass def test_reduce_prod_fp32(self): - self.check_with_place("collective_reduce_op.py", "reduce_prod", - "float32") + self.check_with_place( + "collective_reduce_op.py", "reduce_prod", "float32" + ) def test_reduce_prod_fp16(self): - self.check_with_place("collective_reduce_op.py", "reduce_prod", - "float16") + self.check_with_place( + "collective_reduce_op.py", "reduce_prod", "float16" + ) def test_reduce_prod_int32(self): self.check_with_place("collective_reduce_op.py", "reduce_prod", "int32") diff --git a/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_sum.py b/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_sum.py index a86749de5fb623a74470b3a19816a79ef9aac171..141a424a978d52e2529bb3bed5663253c29eb458 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_sum.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_collective_reduce_sum.py @@ -23,17 +23,18 @@ paddle.enable_static() class TestCReduceOp(TestDistBase): - def _setup_config(self): pass def test_reduce_sum_fp32(self): - self.check_with_place("collective_reduce_op.py", "reduce_sum", - "float32") + self.check_with_place( + "collective_reduce_op.py", "reduce_sum", "float32" + ) def test_reduce_sum_fp16(self): - self.check_with_place("collective_reduce_op.py", "reduce_sum", - "float16") + self.check_with_place( + "collective_reduce_op.py", "reduce_sum", "float16" + ) def test_reduce_sum_int32(self): self.check_with_place("collective_reduce_op.py", "reduce_sum", "int32") diff --git a/python/paddle/fluid/tests/unittests/mlu/test_compare_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_compare_op_mlu.py index f314a6759d95adf8a35e6be542d21c659e9c662d..f994fb185f64efe22d34ebb75688e6a1c2408d70 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_compare_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_compare_op_mlu.py @@ -24,9 +24,7 @@ from paddle.fluid import Program, program_guard def create_test_class(op_type, typename, callback): - class Cls(OpTest): - def setUp(self): self.set_mlu() self.place = paddle.MLUPlace(0) @@ -77,46 +75,42 @@ def create_test_class(op_type, typename, callback): def test_broadcast_api_1(self): paddle.enable_static() with program_guard(Program(), Program()): - x = paddle.static.data(name='x', - shape=[1, 2, 1, 3], - dtype=typename) - y = paddle.static.data(name='y', - shape=[1, 2, 3], - dtype=typename) + x = paddle.static.data( + name='x', shape=[1, 2, 1, 3], dtype=typename + ) + y = paddle.static.data( + name='y', shape=[1, 2, 3], dtype=typename + ) op = eval("paddle.%s" % (self.op_type)) out = op(x, y) exe = paddle.static.Executor(self.place) input_x = np.arange(1, 7).reshape((1, 2, 1, 3)).astype(typename) input_y = np.arange(0, 6).reshape((1, 2, 3)).astype(typename) real_result = callback(input_x, input_y) - res, = exe.run(feed={ - "x": input_x, - "y": input_y - }, - fetch_list=[out]) + (res,) = exe.run( + feed={"x": input_x, "y": input_y}, fetch_list=[out] + ) self.assertEqual((res == real_result).all(), True) @unittest.skipIf(typename == 'float16', "float16 is not supported now") def test_broadcast_api_2(self): paddle.enable_static() with program_guard(Program(), Program()): - x = paddle.static.data(name='x', - shape=[1, 2, 3], - dtype=typename) - y = paddle.static.data(name='y', - shape=[1, 2, 1, 3], - dtype=typename) + x = paddle.static.data( + name='x', shape=[1, 2, 3], dtype=typename + ) + y = paddle.static.data( + name='y', shape=[1, 2, 1, 3], dtype=typename + ) op = eval("paddle.%s" % (self.op_type)) out = op(x, y) exe = paddle.static.Executor(self.place) input_x = np.arange(0, 6).reshape((1, 2, 3)).astype(typename) input_y = np.arange(1, 7).reshape((1, 2, 1, 3)).astype(typename) real_result = callback(input_x, input_y) - res, = exe.run(feed={ - "x": input_x, - "y": input_y - }, - fetch_list=[out]) + (res,) = exe.run( + feed={"x": input_x, "y": input_y}, fetch_list=[out] + ) self.assertEqual((res == real_result).all(), True) @unittest.skipIf(typename == 'float16', "float16 is not supported now") @@ -131,11 +125,9 @@ def create_test_class(op_type, typename, callback): input_x = np.arange(0, 5).reshape((5)).astype(typename) input_y = np.array([5, 3, 2]).reshape((3, 1)).astype(typename) real_result = callback(input_x, input_y) - res, = exe.run(feed={ - "x": input_x, - "y": input_y - }, - fetch_list=[out]) + (res,) = exe.run( + feed={"x": input_x, "y": input_y}, fetch_list=[out] + ) self.assertEqual((res == real_result).all(), True) @unittest.skipIf(typename == 'float16', "float16 is not supported now") diff --git a/python/paddle/fluid/tests/unittests/mlu/test_concat_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_concat_op_mlu.py index 91bed72ff2867d0548e23a0934d1b55db281173e..d0caa993c33e99eef56e836d02b10934316bff9b 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_concat_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_concat_op_mlu.py @@ -26,7 +26,6 @@ SEED = 2021 class TestConcatOp(OpTest): - def setUp(self): self.set_mlu() self.op_type = "concat" @@ -43,8 +42,9 @@ class TestConcatOp(OpTest): self.actual_axis = self.axis self.outputs = { - 'Out': - np.concatenate((self.x0, self.x1, self.x2), axis=self.actual_axis) + 'Out': np.concatenate( + (self.x0, self.x1, self.x2), axis=self.actual_axis + ) } def set_mlu(self): @@ -69,7 +69,6 @@ class TestConcatOp(OpTest): class TestConcatOp2(TestConcatOp): - def init_test_data(self): self.x0 = np.random.random((2, 3, 4, 5)).astype(self.dtype) self.x1 = np.random.random((2, 3, 4, 5)).astype(self.dtype) @@ -78,9 +77,9 @@ class TestConcatOp2(TestConcatOp): @skip_check_grad_ci( - reason="The function 'check_grad' for large inputs is too slow.") + reason="The function 'check_grad' for large inputs is too slow." +) class TestConcatOp3(TestConcatOp): - def init_test_data(self): self.x0 = np.random.random((1, 256, 170, 256)).astype(self.dtype) self.x1 = np.random.random((1, 128, 170, 256)).astype(self.dtype) @@ -92,11 +91,9 @@ class TestConcatOp3(TestConcatOp): @skip_check_grad_ci( - reason= - "This test will meet fetch error when there is a null grad. The detailed information is in PR#17015." + reason="This test will meet fetch error when there is a null grad. The detailed information is in PR#17015." ) class TestConcatOp4(TestConcatOp): - def init_test_data(self): self.x0 = np.random.random((2, 3, 4, 5)).astype(self.dtype) self.x1 = np.random.random((2, 3, 4, 5)).astype(self.dtype) @@ -108,7 +105,6 @@ class TestConcatOp4(TestConcatOp): class TestConcatOp5(TestConcatOp): - def init_test_data(self): self.x0 = np.random.random((5, 1, 4, 5)).astype(self.dtype) self.x1 = np.random.random((5, 2, 4, 5)).astype(self.dtype) @@ -116,11 +112,9 @@ class TestConcatOp5(TestConcatOp): self.axis = -3 -#----------------Concat Fp16---------------- +# ----------------Concat Fp16---------------- def create_test_fp16(parent): - class TestConcatFp16(parent): - def init_dtype(self): self.dtype = np.float16 @@ -136,11 +130,9 @@ create_test_fp16(TestConcatOp4) create_test_fp16(TestConcatOp5) -#----------------Concat Int64---------------- +# ----------------Concat Int64---------------- def create_test_int64(parent): - class TestConcatInt64(parent): - def init_dtype(self): self.dtype = np.int64 @@ -159,11 +151,9 @@ create_test_int64(TestConcatOp4) create_test_int64(TestConcatOp5) -#----------------Concat Int32---------------- +# ----------------Concat Int32---------------- def create_test_int32(parent): - class TestConcatInt32(parent): - def init_dtype(self): self.dtype = np.int32 @@ -182,11 +172,9 @@ create_test_int32(TestConcatOp4) create_test_int32(TestConcatOp5) -#----------------Concat AxisTensor---------------- +# ----------------Concat AxisTensor---------------- def create_test_AxisTensor(parent): - class TestConcatAxisTensor(parent): - def setUp(self): self.op_type = "concat" self.init_dtype() @@ -194,20 +182,22 @@ def create_test_AxisTensor(parent): self.inputs = { 'X': [('x0', self.x0), ('x1', self.x1), ('x2', self.x2)], - 'AxisTensor': np.array([self.axis]).astype("int32") + 'AxisTensor': np.array([self.axis]).astype("int32"), } self.attrs = {} if self.axis < 0: self.actual_axis = self.axis + len(self.x0.shape) - self.actual_axis = self.actual_axis if self.actual_axis > 0 else 0 + self.actual_axis = ( + self.actual_axis if self.actual_axis > 0 else 0 + ) else: self.actual_axis = self.axis self.outputs = { - 'Out': - np.concatenate((self.x0, self.x1, self.x2), - axis=self.actual_axis) + 'Out': np.concatenate( + (self.x0, self.x1, self.x2), axis=self.actual_axis + ) } self.place = paddle.device.MLUPlace(0) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_conv2d_op_depthwise_conv_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_conv2d_op_depthwise_conv_mlu.py index 8fac2941e43c1ab3721f715b5db3de7912e00cee..593bbc7096e5d90b85952ced0525185de5ed2890 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_conv2d_op_depthwise_conv_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_conv2d_op_depthwise_conv_mlu.py @@ -25,13 +25,19 @@ import paddle.fluid.core as core import paddle.fluid as fluid from op_test import OpTest from paddle.fluid import Program, program_guard -from test_conv2d_op_mlu import TestConv2DOp, TestConv2DOp_v2, create_test_padding_SAME_class, create_test_padding_VALID_class, create_test_channel_last_class, create_test_fp16_class +from test_conv2d_op_mlu import ( + TestConv2DOp, + TestConv2DOp_v2, + create_test_padding_SAME_class, + create_test_padding_VALID_class, + create_test_channel_last_class, + create_test_fp16_class, +) -#----------------TestDepthwiseConv ----- +# ----------------TestDepthwiseConv ----- class TestDepthwiseConv(TestConv2DOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [2, 2] @@ -44,7 +50,6 @@ class TestDepthwiseConv(TestConv2DOp): class TestDepthwiseConv2(TestConv2DOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -57,7 +62,6 @@ class TestDepthwiseConv2(TestConv2DOp): class TestDepthwiseConv3(TestConv2DOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -70,7 +74,6 @@ class TestDepthwiseConv3(TestConv2DOp): class TestDepthwiseConvandFuse(TestConv2DOp): - def init_test_case(self): self.fuse_relu_before_depthwise_conv = True self.pad = [1, 1] @@ -84,7 +87,6 @@ class TestDepthwiseConvandFuse(TestConv2DOp): class TestDepthwiseConv2andFuse(TestConv2DOp): - def init_test_case(self): self.fuse_relu_before_depthwise_conv = True self.pad = [1, 1] @@ -98,7 +100,6 @@ class TestDepthwiseConv2andFuse(TestConv2DOp): class TestDepthwiseConv3andFuse(TestConv2DOp): - def init_test_case(self): self.fuse_relu_before_depthwise_conv = True self.pad = [1, 1] @@ -112,7 +113,6 @@ class TestDepthwiseConv3andFuse(TestConv2DOp): class TestDepthwiseConv_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.stride = [2, 2] self.input_size = [2, 3, 5, 5] # NCHW @@ -128,7 +128,6 @@ class TestDepthwiseConv_AsyPadding(TestConv2DOp_v2): class TestDepthwiseConv2_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW @@ -144,7 +143,6 @@ class TestDepthwiseConv2_AsyPadding(TestConv2DOp_v2): class TestDepthwiseConv3_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW @@ -160,7 +158,6 @@ class TestDepthwiseConv3_AsyPadding(TestConv2DOp_v2): class TestDepthwiseConvandFuse_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.fuse_relu_before_depthwise_conv = True self.pad = [1, 1] @@ -178,7 +175,6 @@ class TestDepthwiseConvandFuse_AsyPadding(TestConv2DOp_v2): class TestDepthwiseConv2andFuse_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.fuse_relu_before_depthwise_conv = True self.pad = [1, 1] @@ -196,7 +192,6 @@ class TestDepthwiseConv2andFuse_AsyPadding(TestConv2DOp_v2): class TestDepthwiseConv3andFuse_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.fuse_relu_before_depthwise_conv = True self.pad = [1, 1] diff --git a/python/paddle/fluid/tests/unittests/mlu/test_conv2d_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_conv2d_op_mlu.py index 7f79caa54992601cf7f053dba9f161532f516afa..7153ea09ef2fc756f97bdc61083407f34364d34f 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_conv2d_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_conv2d_op_mlu.py @@ -28,9 +28,7 @@ paddle.enable_static() def create_test_channel_last_class(parent): - class TestChannelLastCase(parent): - def init_data_format(self): self.data_format = "NHWC" @@ -44,9 +42,7 @@ def create_test_channel_last_class(parent): def create_test_padding_SAME_class(parent): - class TestPaddingSMAECase(parent): - def init_paddings(self): self.pad = [0, 0] self.padding_algorithm = "SAME" @@ -57,9 +53,7 @@ def create_test_padding_SAME_class(parent): def create_test_padding_VALID_class(parent): - class TestPaddingVALIDCase(parent): - def init_paddings(self): self.pad = [1, 1] self.padding_algorithm = "VALID" @@ -70,9 +64,7 @@ def create_test_padding_VALID_class(parent): def create_test_fp16_class(parent): - class TestFp16Case(parent): - def init_dtype(self): self.dtype = np.float16 @@ -82,7 +74,6 @@ def create_test_fp16_class(parent): class TestConv2DOp(OpTest): - def set_mlu(self): self.__class__.use_mlu = True self.place = paddle.device.MLUPlace(0) @@ -105,22 +96,24 @@ class TestConv2DOp(OpTest): conv2d_param = { 'stride': self.stride, 'pad': self.pad, - 'dilation': self.dilations + 'dilation': self.dilations, } input = np.random.random(self.input_size).astype(self.dtype) filter = np.random.uniform(-1, 1, self.filter_size).astype(self.dtype) - output, _, _, _, _ = conv2d_forward_naive(input, - filter, - self.groups, - conv2d_param, - data_format=self.data_format) + output, _, _, _, _ = conv2d_forward_naive( + input, + filter, + self.groups, + conv2d_param, + data_format=self.data_format, + ) output = output.astype(self.dtype) self.inputs = { 'Input': OpTest.np_dtype_to_fluid_dtype(input), - 'Filter': OpTest.np_dtype_to_fluid_dtype(filter) + 'Filter': OpTest.np_dtype_to_fluid_dtype(filter), } self.attrs = { 'strides': self.stride, @@ -137,28 +130,37 @@ class TestConv2DOp(OpTest): def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad_with_place(self.place, {'Input', 'Filter'}, - 'Output', - max_relative_error=0.03, - numeric_place=paddle.CPUPlace()) + self.check_grad_with_place( + self.place, + {'Input', 'Filter'}, + 'Output', + max_relative_error=0.03, + numeric_place=paddle.CPUPlace(), + ) def test_check_grad_no_filter(self): if self.dtype == np.float16: return - self.check_grad_with_place(self.place, ['Input'], - 'Output', - max_relative_error=0.03, - no_grad_set=set(['Filter']), - numeric_place=paddle.CPUPlace()) + self.check_grad_with_place( + self.place, + ['Input'], + 'Output', + max_relative_error=0.03, + no_grad_set=set(['Filter']), + numeric_place=paddle.CPUPlace(), + ) def test_check_grad_no_input(self): if self.dtype == np.float16: return - self.check_grad_with_place(self.place, ['Filter'], - 'Output', - max_relative_error=0.03, - no_grad_set=set(['Input']), - numeric_place=paddle.CPUPlace()) + self.check_grad_with_place( + self.place, + ['Filter'], + 'Output', + max_relative_error=0.03, + no_grad_set=set(['Input']), + numeric_place=paddle.CPUPlace(), + ) def init_test_case(self): self.pad = [0, 0] @@ -176,7 +178,6 @@ class TestConv2DOp(OpTest): class TestWithPad(TestConv2DOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -187,7 +188,6 @@ class TestWithPad(TestConv2DOp): class TestWithStride(TestConv2DOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [2, 2] @@ -198,7 +198,6 @@ class TestWithStride(TestConv2DOp): class TestWithGroup(TestConv2DOp): - def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] @@ -210,7 +209,6 @@ class TestWithGroup(TestConv2DOp): class TestWith1x1(TestConv2DOp): - def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] @@ -227,7 +225,6 @@ class TestWith1x1(TestConv2DOp): class TestWithDepthWise5x5(TestConv2DOp): - def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] @@ -241,7 +238,6 @@ class TestWithDepthWise5x5(TestConv2DOp): class TestWithDepthWise7x7(TestConv2DOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [2, 2] @@ -255,7 +251,6 @@ class TestWithDepthWise7x7(TestConv2DOp): class TestWithDilation(TestConv2DOp): - def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] @@ -274,7 +269,6 @@ class TestWithDilation(TestConv2DOp): class TestWithInput1x1Filter1x1(TestConv2DOp): - def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] @@ -288,7 +282,6 @@ class TestWithInput1x1Filter1x1(TestConv2DOp): class TestConv2DOp_v2(OpTest): - def set_mlu(self): self.__class__.use_mlu = True self.place = paddle.device.MLUPlace(0) @@ -308,20 +301,24 @@ class TestConv2DOp_v2(OpTest): conv2d_param = { 'stride': self.stride, 'pad': self.pad, - 'dilation': self.dilations + 'dilation': self.dilations, } input = np.random.random(self.input_size).astype(self.dtype) filter = np.random.uniform(-1, 1, self.filter_size).astype(self.dtype) - output, _, _, _, _ = conv2d_forward_naive(input, filter, self.groups, - conv2d_param, - self.padding_algorithm, - self.data_format) + output, _, _, _, _ = conv2d_forward_naive( + input, + filter, + self.groups, + conv2d_param, + self.padding_algorithm, + self.data_format, + ) output = output.astype(self.dtype) self.inputs = { 'Input': OpTest.np_dtype_to_fluid_dtype(input), - 'Filter': OpTest.np_dtype_to_fluid_dtype(filter) + 'Filter': OpTest.np_dtype_to_fluid_dtype(filter), } self.attrs = { 'strides': self.stride, @@ -339,27 +336,36 @@ class TestConv2DOp_v2(OpTest): def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad_with_place(self.place, {'Input', 'Filter'}, - 'Output', - max_relative_error=0.02, - numeric_place=paddle.CPUPlace()) + self.check_grad_with_place( + self.place, + {'Input', 'Filter'}, + 'Output', + max_relative_error=0.02, + numeric_place=paddle.CPUPlace(), + ) def test_check_grad_no_filter(self): if self.dtype == np.float16: return - self.check_grad_with_place(self.place, ['Input'], - 'Output', - max_relative_error=0.02, - no_grad_set=set(['Filter']), - numeric_place=paddle.CPUPlace()) + self.check_grad_with_place( + self.place, + ['Input'], + 'Output', + max_relative_error=0.02, + no_grad_set=set(['Filter']), + numeric_place=paddle.CPUPlace(), + ) def test_check_grad_no_input(self): if self.dtype == np.float16: return - self.check_grad_with_place(self.place, ['Filter'], - 'Output', - no_grad_set=set(['Input']), - numeric_place=paddle.CPUPlace()) + self.check_grad_with_place( + self.place, + ['Filter'], + 'Output', + no_grad_set=set(['Input']), + numeric_place=paddle.CPUPlace(), + ) def init_test_case(self): self.pad = [0, 0] @@ -390,14 +396,12 @@ class TestConv2DOp_v2(OpTest): class TestConv2DOp_AsyPadding(TestConv2DOp_v2): - def init_paddings(self): self.pad = [0, 0, 1, 2] self.padding_algorithm = "EXPLICIT" class TestWithPad_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW @@ -411,7 +415,6 @@ class TestWithPad_AsyPadding(TestConv2DOp_v2): class TestWithStride_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.stride = [2, 2] self.input_size = [2, 3, 6, 6] # NCHW @@ -425,7 +428,6 @@ class TestWithStride_AsyPadding(TestConv2DOp_v2): class TestWithGroup_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.pad = [0, 0] self.stride = [1, 2] @@ -437,7 +439,6 @@ class TestWithGroup_AsyPadding(TestConv2DOp_v2): class TestWith1x1_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW @@ -454,7 +455,6 @@ class TestWith1x1_AsyPadding(TestConv2DOp_v2): class TestWithDepthWise3x3_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.stride = [1, 1] self.input_size = [3, 4, 10, 10] # NCHW @@ -476,7 +476,6 @@ class TestWithDepthWise3x3_AsyPadding(TestConv2DOp_v2): class TestWithDepthWise5x5_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.stride = [1, 1] self.input_size = [2, 4, 10, 10] # NCHW @@ -493,7 +492,6 @@ class TestWithDepthWise5x5_AsyPadding(TestConv2DOp_v2): class TestWithDepthWise7x7_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.stride = [2, 2] self.input_size = [2, 8, 10, 10] # NCHW @@ -510,7 +508,6 @@ class TestWithDepthWise7x7_AsyPadding(TestConv2DOp_v2): class TestWithDilation_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.stride = [1, 1] self.input_size = [2, 3, 10, 10] # NCHW @@ -532,7 +529,6 @@ class TestWithDilation_AsyPadding(TestConv2DOp_v2): class TestWithInput1x1Filter1x1_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.stride = [1, 1] self.input_size = [100, 1, 1, 1] # NCHW diff --git a/python/paddle/fluid/tests/unittests/mlu/test_conv2d_transposed_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_conv2d_transposed_op_mlu.py index 0d33bf853691ef2b061c18cd7a6fbd268e2be4a6..076c6e2ca3fcd29b695683b3fbc783342564df87 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_conv2d_transposed_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_conv2d_transposed_op_mlu.py @@ -27,9 +27,10 @@ from paddle.fluid.tests.unittests.op_test import OpTest def conv2dtranspose_forward_naive(input_, filter_, attrs): padding_algorithm = attrs['padding_algorithm'] if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]: - raise ValueError("Unknown Attr(padding_algorithm): '%s'. " - "It can only be 'SAME' or 'VALID'." % - str(padding_algorithm)) + raise ValueError( + "Unknown Attr(padding_algorithm): '%s'. " + "It can only be 'SAME' or 'VALID'." % str(padding_algorithm) + ) if attrs['data_format'] == 'NHWC': input_ = np.transpose(input_, [0, 3, 1, 2]) @@ -40,18 +41,22 @@ def conv2dtranspose_forward_naive(input_, filter_, attrs): out_c = f_out_c * groups sub_in_c = in_c // groups - stride, pad, dilations = attrs['strides'], attrs['paddings'], attrs[ - 'dilations'] + stride, pad, dilations = ( + attrs['strides'], + attrs['paddings'], + attrs['dilations'], + ) # update pad and dilation def _get_padding_with_SAME(input_shape, kernel_size, kernel_stride): padding = [] - for input_size, filter_size, stride_size in zip(input_shape, - kernel_size, - kernel_stride): + for input_size, filter_size, stride_size in zip( + input_shape, kernel_size, kernel_stride + ): out_size = int((input_size + stride_size - 1) / stride_size) pad_sum = np.max( - ((out_size - 1) * stride_size + filter_size - input_size, 0)) + ((out_size - 1) * stride_size + filter_size - input_size, 0) + ) pad_0 = int(pad_sum / 2) pad_1 = int(pad_sum - pad_0) padding.append(pad_0) @@ -85,37 +90,49 @@ def conv2dtranspose_forward_naive(input_, filter_, attrs): if 'output_padding' in attrs: out_pad_h = attrs['output_padding'][0] out_pad_w = attrs['output_padding'][1] - out = np.zeros((in_n, out_c, out_h + out_pad_h, out_w + out_pad_w), - dtype=input_.dtype) + out = np.zeros( + (in_n, out_c, out_h + out_pad_h, out_w + out_pad_w), dtype=input_.dtype + ) for n in range(in_n): for i in range(in_h): for j in range(in_w): for g in range(groups): - input_masked = input_[n, g * sub_in_c:(g + 1) * sub_in_c, i, - j] # (c) + input_masked = input_[ + n, g * sub_in_c : (g + 1) * sub_in_c, i, j + ] # (c) input_masked = np.reshape(input_masked, (sub_in_c, 1, 1)) input_masked = np.tile(input_masked, (1, f_h, f_w)) for k in range(f_out_c): tmp_out = np.sum( - input_masked * - filter_[g * sub_in_c:(g + 1) * sub_in_c, k, :, :], - axis=0) + input_masked + * filter_[ + g * sub_in_c : (g + 1) * sub_in_c, k, :, : + ], + axis=0, + ) i1, i2 = i * stride[0], i * stride[0] + d_bolck_h j1, j2 = j * stride[1], j * stride[1] + d_bolck_w - out[n, g * f_out_c + k, i1:i2:dilations[0], - j1:j2:dilations[1]] += tmp_out - - out = out[:, :, pad_h_0:out_h - pad_h_1 + out_pad_h, - pad_w_0:out_w - pad_w_1 + out_pad_w] + out[ + n, + g * f_out_c + k, + i1 : i2 : dilations[0], + j1 : j2 : dilations[1], + ] += tmp_out + + out = out[ + :, + :, + pad_h_0 : out_h - pad_h_1 + out_pad_h, + pad_w_0 : out_w - pad_w_1 + out_pad_w, + ] if attrs['data_format'] == 'NHWC': out = np.transpose(out, [0, 2, 3, 1]) return out class TestConv2DTransposeOp(OpTest): - def setUp(self): # init as conv transpose self.dtype = np.float32 @@ -145,7 +162,7 @@ class TestConv2DTransposeOp(OpTest): 'use_cudnn': self.use_cudnn, 'is_test': self.is_test, 'use_mkldnn': self.use_mkldnn, - 'data_format': self.data_format + 'data_format': self.data_format, } if self.output_size is not None: self.attrs['output_size'] = self.output_size @@ -153,8 +170,9 @@ class TestConv2DTransposeOp(OpTest): if len(self.output_padding) > 0: self.attrs['output_padding'] = self.output_padding - output = conv2dtranspose_forward_naive(input_, filter_, - self.attrs).astype(self.dtype) + output = conv2dtranspose_forward_naive( + input_, filter_, self.attrs + ).astype(self.dtype) self.outputs = {'Output': output} @@ -167,23 +185,28 @@ class TestConv2DTransposeOp(OpTest): def test_check_grad_no_input(self): if self.need_check_grad: - self.check_grad_with_place(self.place, ['Filter'], - 'Output', - max_relative_error=0.02, - no_grad_set=set(['Input'])) + self.check_grad_with_place( + self.place, + ['Filter'], + 'Output', + max_relative_error=0.02, + no_grad_set=set(['Input']), + ) def test_check_grad_no_filter(self): if self.need_check_grad: - self.check_grad_with_place(self.place, ['Input'], - 'Output', - no_grad_set=set(['Filter'])) + self.check_grad_with_place( + self.place, ['Input'], 'Output', no_grad_set=set(['Filter']) + ) def test_check_grad(self): if self.need_check_grad: - self.check_grad_with_place(self.place, - set(['Input', 'Filter']), - 'Output', - max_relative_error=0.02) + self.check_grad_with_place( + self.place, + set(['Input', 'Filter']), + 'Output', + max_relative_error=0.02, + ) def init_test_case(self): self.pad = [0, 0] @@ -199,7 +222,6 @@ class TestConv2DTransposeOp(OpTest): class TestWithSymmetricPad(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -211,7 +233,6 @@ class TestWithSymmetricPad(TestConv2DTransposeOp): class TestWithAsymmetricPad(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 0, 1, 2] self.stride = [1, 1] @@ -223,7 +244,6 @@ class TestWithAsymmetricPad(TestConv2DTransposeOp): class TestWithSAMEPad(TestConv2DTransposeOp): - def init_test_case(self): self.stride = [2, 1] self.dilations = [1, 2] @@ -235,7 +255,6 @@ class TestWithSAMEPad(TestConv2DTransposeOp): class TestWithVALIDPad(TestConv2DTransposeOp): - def init_test_case(self): self.stride = [1, 1] self.dilations = [1, 1] @@ -247,7 +266,6 @@ class TestWithVALIDPad(TestConv2DTransposeOp): class TestWithGroups(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -259,7 +277,6 @@ class TestWithGroups(TestConv2DTransposeOp): class TestWithStride(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [2, 2] @@ -271,7 +288,6 @@ class TestWithStride(TestConv2DTransposeOp): class TestWithDilation(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -283,7 +299,6 @@ class TestWithDilation(TestConv2DTransposeOp): class TestWithEvenUpsample(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [2, 2] self.stride = [2, 2] @@ -296,7 +311,6 @@ class TestWithEvenUpsample(TestConv2DTransposeOp): class TestWithEvenUpsampleOutputPadding(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [2, 2] self.stride = [2, 2] @@ -309,7 +323,6 @@ class TestWithEvenUpsampleOutputPadding(TestConv2DTransposeOp): class Test_NHWC(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] @@ -322,7 +335,6 @@ class Test_NHWC(TestConv2DTransposeOp): class TestWithSymmetricPad_NHWC(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -335,7 +347,6 @@ class TestWithSymmetricPad_NHWC(TestConv2DTransposeOp): class TestWithAsymmetricPad_NHWC(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 0, 1, 2] self.stride = [1, 1] @@ -348,7 +359,6 @@ class TestWithAsymmetricPad_NHWC(TestConv2DTransposeOp): class TestWithGroups_NHWC(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -361,7 +371,6 @@ class TestWithGroups_NHWC(TestConv2DTransposeOp): class TestWithStride_NHWC(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [2, 2] @@ -374,7 +383,6 @@ class TestWithStride_NHWC(TestConv2DTransposeOp): class TestWithDilation_NHWC(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -387,7 +395,6 @@ class TestWithDilation_NHWC(TestConv2DTransposeOp): class TestWithEvenUpsample_NHWC(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [2, 2] self.stride = [2, 2] @@ -401,7 +408,6 @@ class TestWithEvenUpsample_NHWC(TestConv2DTransposeOp): class TestWithEvenUpsample_NHWC_output_padding(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [2, 2] self.stride = [2, 2] @@ -415,7 +421,6 @@ class TestWithEvenUpsample_NHWC_output_padding(TestConv2DTransposeOp): class TestMLU_FP16(TestConv2DTransposeOp): - def init_test_case(self): self.dtype = np.float16 self.set_mlu() @@ -440,7 +445,6 @@ class TestMLU_FP16(TestConv2DTransposeOp): class TestMLU_NHWC_FP16(TestMLU_FP16): - def init_test_case(self): self.dtype = np.float16 self.pad = [0, 0] @@ -454,7 +458,6 @@ class TestMLU_NHWC_FP16(TestMLU_FP16): class TestMLUWithGroups_NHWC_FP16(TestMLU_FP16): - def init_test_case(self): self.dtype = np.float16 self.pad = [1, 1] @@ -468,7 +471,6 @@ class TestMLUWithGroups_NHWC_FP16(TestMLU_FP16): class TestMLUWithEvenUpsample_NHWC_FP16(TestMLU_FP16): - def init_test_case(self): self.dtype = np.float16 self.pad = [2, 2] @@ -483,7 +485,6 @@ class TestMLUWithEvenUpsample_NHWC_FP16(TestMLU_FP16): class TestConv2DTransposeAPI(unittest.TestCase): - def setUp(self): self.set_mlu() @@ -492,67 +493,78 @@ class TestConv2DTransposeAPI(unittest.TestCase): self.place = paddle.device.MLUPlace(0) def test_case1(self): - data1 = fluid.layers.data(name='data1', - shape=[3, 5, 5], - dtype='float32') - data2 = fluid.layers.data(name='data2', - shape=[5, 5, 3], - dtype='float32') - out1 = fluid.layers.conv2d_transpose(input=data1, - groups=1, - num_filters=6, - filter_size=3, - data_format='NCHW') - out2 = fluid.layers.conv2d_transpose(input=data2, - groups=1, - num_filters=6, - filter_size=3, - data_format='NHWC') - out3 = fluid.layers.conv2d_transpose(input=data1, - groups=1, - num_filters=6, - filter_size=3, - padding=[[0, 0], [1, 1], [1, 1], - [0, 0]], - data_format='NHWC') - out4 = fluid.layers.conv2d_transpose(input=data1, - groups=3, - num_filters=6, - filter_size=3, - padding=[[0, 0], [0, 0], [2, 1], - [0, 0]], - data_format='NCHW') - out5 = fluid.layers.conv2d_transpose(input=data2, - groups=1, - num_filters=6, - filter_size=3, - padding='SAME', - data_format='NCHW') - out6 = fluid.layers.conv2d_transpose(input=data1, - groups=1, - num_filters=6, - filter_size=3, - padding='VALID', - data_format='NHWC') - out7 = fluid.layers.conv2d_transpose(input=data1, - groups=1, - num_filters=6, - output_size=[7, 7], - padding=[0, 0], - data_format='NHWC') + data1 = fluid.layers.data( + name='data1', shape=[3, 5, 5], dtype='float32' + ) + data2 = fluid.layers.data( + name='data2', shape=[5, 5, 3], dtype='float32' + ) + out1 = fluid.layers.conv2d_transpose( + input=data1, + groups=1, + num_filters=6, + filter_size=3, + data_format='NCHW', + ) + out2 = fluid.layers.conv2d_transpose( + input=data2, + groups=1, + num_filters=6, + filter_size=3, + data_format='NHWC', + ) + out3 = fluid.layers.conv2d_transpose( + input=data1, + groups=1, + num_filters=6, + filter_size=3, + padding=[[0, 0], [1, 1], [1, 1], [0, 0]], + data_format='NHWC', + ) + out4 = fluid.layers.conv2d_transpose( + input=data1, + groups=3, + num_filters=6, + filter_size=3, + padding=[[0, 0], [0, 0], [2, 1], [0, 0]], + data_format='NCHW', + ) + out5 = fluid.layers.conv2d_transpose( + input=data2, + groups=1, + num_filters=6, + filter_size=3, + padding='SAME', + data_format='NCHW', + ) + out6 = fluid.layers.conv2d_transpose( + input=data1, + groups=1, + num_filters=6, + filter_size=3, + padding='VALID', + data_format='NHWC', + ) + out7 = fluid.layers.conv2d_transpose( + input=data1, + groups=1, + num_filters=6, + output_size=[7, 7], + padding=[0, 0], + data_format='NHWC', + ) data1_np = np.random.random((2, 3, 5, 5)).astype("float32") data2_np = np.random.random((2, 5, 5, 3)).astype("float32") exe = fluid.Executor(self.place) exe.run(fluid.default_startup_program()) - results = exe.run(fluid.default_main_program(), - feed={ - "data1": data1_np, - "data2": data2_np - }, - fetch_list=[out1, out2, out3, out4, out5, out6, out7], - return_numpy=True) + results = exe.run( + fluid.default_main_program(), + feed={"data1": data1_np, "data2": data2_np}, + fetch_list=[out1, out2, out3, out4, out5, out6, out7], + return_numpy=True, + ) self.assertIsNotNone(results[0]) self.assertIsNotNone(results[1]) self.assertIsNotNone(results[2]) @@ -563,7 +575,6 @@ class TestConv2DTransposeAPI(unittest.TestCase): class TestConv2DTransposeOpException(unittest.TestCase): - def setUp(self): self.set_mlu() @@ -575,68 +586,74 @@ class TestConv2DTransposeOpException(unittest.TestCase): data = fluid.layers.data(name='data', shape=[3, 5, 5], dtype="float32") def attr_data_format(): - out = fluid.layers.conv2d_transpose(input=data, - groups=1, - num_filters=6, - filter_size=3, - data_format="NCDHW") + out = fluid.layers.conv2d_transpose( + input=data, + groups=1, + num_filters=6, + filter_size=3, + data_format="NCDHW", + ) self.assertRaises(ValueError, attr_data_format) def attr_padding_str(): - out = fluid.layers.conv2d_transpose(input=data, - groups=1, - num_filters=6, - filter_size=3, - padding='Vald') + out = fluid.layers.conv2d_transpose( + input=data, + groups=1, + num_filters=6, + filter_size=3, + padding='Vald', + ) self.assertRaises(ValueError, attr_padding_str) def attr_padding_list(): - out = fluid.layers.conv2d_transpose(input=data, - groups=1, - num_filters=6, - filter_size=3, - padding=[[1, 1], [1, 1], [0, 0], - [0, 0]]) + out = fluid.layers.conv2d_transpose( + input=data, + groups=1, + num_filters=6, + filter_size=3, + padding=[[1, 1], [1, 1], [0, 0], [0, 0]], + ) self.assertRaises(ValueError, attr_padding_list) def attr_padding_with_data_format(): - out = fluid.layers.conv2d_transpose(input=data, - groups=1, - num_filters=6, - filter_size=3, - padding=[[1, 1], [0, 0], [0, 0], - [1, 1]], - data_format='NHWC') + out = fluid.layers.conv2d_transpose( + input=data, + groups=1, + num_filters=6, + filter_size=3, + padding=[[1, 1], [0, 0], [0, 0], [1, 1]], + data_format='NHWC', + ) self.assertRaises(ValueError, attr_padding_with_data_format) - error_input = fluid.layers.data(name='error_data', - shape=[1], - dtype="float32") + error_input = fluid.layers.data( + name='error_data', shape=[1], dtype="float32" + ) def error_input_size(): - out = fluid.layers.conv2d_transpose(input=error_input, - groups=1, - num_filters=6, - filter_size=3) + out = fluid.layers.conv2d_transpose( + input=error_input, groups=1, num_filters=6, filter_size=3 + ) self.assertRaises(ValueError, error_input_size) def error_groups(): - out = fluid.layers.conv2d_transpose(input=data, - groups=0, - num_filters=6, - filter_size=3, - data_format='NHWC') + out = fluid.layers.conv2d_transpose( + input=data, + groups=0, + num_filters=6, + filter_size=3, + data_format='NHWC', + ) self.assertRaises(ValueError, error_groups) class TestConv2DTransposeRepr(unittest.TestCase): - def setUp(self): self.set_mlu() @@ -646,7 +663,7 @@ class TestConv2DTransposeRepr(unittest.TestCase): def test_case(self): paddle.disable_static() - x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.) + x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1.0, max=1.0) conv = nn.Conv2DTranspose(4, 6, (3, 3), output_padding=1, stride=2) print(conv) y_var = conv(x_var) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_cumsum_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_cumsum_op_mlu.py index 77847a8f64d4dabbfa4d6dc54c42adda441e2409..ca1b99ec79c4efbbbfbae4607bf649d90622e87a 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_cumsum_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_cumsum_op_mlu.py @@ -24,7 +24,6 @@ paddle.enable_static() class TestMLUCumSumOp(OpTest): - def setUp(self): self.op_type = "cumsum" self.set_mlu() @@ -48,18 +47,17 @@ class TestMLUCumSumOp(OpTest): class TestMLUCumSumOp2(TestMLUCumSumOp): - def init_testcase(self): self.attrs = {'axis': -1, 'reverse': True} self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.outputs = { - 'Out': np.flip(np.flip(self.inputs['X'], axis=2).cumsum(axis=2), - axis=2) + 'Out': np.flip( + np.flip(self.inputs['X'], axis=2).cumsum(axis=2), axis=2 + ) } class TestMLUCumSumOp3(TestMLUCumSumOp): - def init_testcase(self): self.attrs = {'axis': 1} self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} @@ -67,7 +65,6 @@ class TestMLUCumSumOp3(TestMLUCumSumOp): class TestMLUCumSumOp4(TestMLUCumSumOp): - def init_testcase(self): self.attrs = {'axis': 0} self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} @@ -75,107 +72,115 @@ class TestMLUCumSumOp4(TestMLUCumSumOp): class TestMLUCumSumOp5(TestMLUCumSumOp): - def init_testcase(self): self.inputs = {'X': np.random.random((5, 20)).astype(self.dtype)} self.outputs = {'Out': self.inputs['X'].cumsum(axis=1)} class TestMLUCumSumOp7(TestMLUCumSumOp): - def init_testcase(self): self.inputs = {'X': np.random.random((100)).astype(self.dtype)} self.outputs = {'Out': self.inputs['X'].cumsum(axis=0)} class TestNPUCumSumExclusive1(TestMLUCumSumOp): - def init_testcase(self): self.attrs = {'axis': 2, "exclusive": True} a = np.random.random((4, 5, 65)).astype(self.dtype) self.inputs = {'X': a} self.outputs = { - 'Out': - np.concatenate((np.zeros( - (4, 5, 1), dtype=self.dtype), a[:, :, :-1].cumsum(axis=2)), - axis=2) + 'Out': np.concatenate( + ( + np.zeros((4, 5, 1), dtype=self.dtype), + a[:, :, :-1].cumsum(axis=2), + ), + axis=2, + ) } class TestNPUCumSumExclusive2(TestMLUCumSumOp): - def init_testcase(self): self.attrs = {'axis': 2, "exclusive": True} a = np.random.random((1, 1, 888)).astype(self.dtype) self.inputs = {'X': a} self.outputs = { - 'Out': - np.concatenate((np.zeros( - (1, 1, 1), dtype=self.dtype), a[:, :, :-1].cumsum(axis=2)), - axis=2) + 'Out': np.concatenate( + ( + np.zeros((1, 1, 1), dtype=self.dtype), + a[:, :, :-1].cumsum(axis=2), + ), + axis=2, + ) } class TestNPUCumSumExclusive3(TestMLUCumSumOp): - def init_testcase(self): self.attrs = {'axis': 2, "exclusive": True} a = np.random.random((4, 5, 888)).astype(self.dtype) self.inputs = {'X': a} self.outputs = { - 'Out': - np.concatenate((np.zeros( - (4, 5, 1), dtype=self.dtype), a[:, :, :-1].cumsum(axis=2)), - axis=2) + 'Out': np.concatenate( + ( + np.zeros((4, 5, 1), dtype=self.dtype), + a[:, :, :-1].cumsum(axis=2), + ), + axis=2, + ) } class TestNPUCumSumExclusive4(TestMLUCumSumOp): - def init_testcase(self): self.attrs = {'axis': 2, "exclusive": True} a = np.random.random((1, 1, 3049)).astype(self.dtype) self.inputs = {'X': a} self.outputs = { - 'Out': - np.concatenate((np.zeros( - (1, 1, 1), dtype=self.dtype), a[:, :, :-1].cumsum(axis=2)), - axis=2) + 'Out': np.concatenate( + ( + np.zeros((1, 1, 1), dtype=self.dtype), + a[:, :, :-1].cumsum(axis=2), + ), + axis=2, + ) } class TestNPUCumSumExclusive5(TestMLUCumSumOp): - def init_testcase(self): self.attrs = {'axis': 2, "exclusive": True} a = np.random.random((4, 5, 3096)).astype(self.dtype) self.inputs = {'X': a} self.outputs = { - 'Out': - np.concatenate((np.zeros( - (4, 5, 1), dtype=self.dtype), a[:, :, :-1].cumsum(axis=2)), - axis=2) + 'Out': np.concatenate( + ( + np.zeros((4, 5, 1), dtype=self.dtype), + a[:, :, :-1].cumsum(axis=2), + ), + axis=2, + ) } class TestNPUCumSumReverseExclusive(TestMLUCumSumOp): - def init_testcase(self): self.attrs = {'axis': 2, 'reverse': True, "exclusive": True} a = np.random.random((4, 5, 6)).astype(self.dtype) self.inputs = {'X': a} a = np.flip(a, axis=2) self.outputs = { - 'Out': - np.concatenate( - (np.flip(a[:, :, :-1].cumsum(axis=2), - axis=2), np.zeros((4, 5, 1), dtype=self.dtype)), - axis=2) + 'Out': np.concatenate( + ( + np.flip(a[:, :, :-1].cumsum(axis=2), axis=2), + np.zeros((4, 5, 1), dtype=self.dtype), + ), + axis=2, + ) } class TestNPUCumSumWithFlatten1(TestMLUCumSumOp): - def init_testcase(self): self.attrs = {'flatten': True} self.inputs = {'X': np.random.random((5, 6)).astype(self.dtype)} @@ -183,7 +188,6 @@ class TestNPUCumSumWithFlatten1(TestMLUCumSumOp): class TestNPUCumSumWithFlatten2(TestMLUCumSumOp): - def init_testcase(self): self.attrs = {'flatten': True} self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} diff --git a/python/paddle/fluid/tests/unittests/mlu/test_deformable_conv_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_deformable_conv_op_mlu.py index 352d9641827ae1111b31e545e7771d8a00949f78..5332299c44bccd147e879901c2a2094a87d6cba8 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_deformable_conv_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_deformable_conv_op_mlu.py @@ -28,7 +28,6 @@ paddle.enable_static() class TestModulatedDeformableConvOp(OpTest): - def setUp(self): self.place = paddle.device.MLUPlace(0) self.__class__.use_mlu = True @@ -42,7 +41,7 @@ class TestModulatedDeformableConvOp(OpTest): conv_param = { 'stride': self.stride, 'pad': self.pad, - 'dilation': self.dilations + 'dilation': self.dilations, } input = np.random.random(self.input_size).astype(self.dtype) @@ -50,15 +49,16 @@ class TestModulatedDeformableConvOp(OpTest): mask = 10 * np.random.random(self.mask_size).astype(self.dtype) filter = np.random.random(self.filter_size).astype(self.dtype) - output = dconv_im2col_gemm(input, offset, mask, filter, self.groups, - conv_param) + output = dconv_im2col_gemm( + input, offset, mask, filter, self.groups, conv_param + ) output = output.astype(self.dtype) self.inputs = { 'Input': OpTest.np_dtype_to_fluid_dtype(input), 'Offset': OpTest.np_dtype_to_fluid_dtype(offset), 'Mask': OpTest.np_dtype_to_fluid_dtype(mask), - 'Filter': OpTest.np_dtype_to_fluid_dtype(filter) + 'Filter': OpTest.np_dtype_to_fluid_dtype(filter), } self.attrs = { 'strides': self.stride, @@ -74,11 +74,13 @@ class TestModulatedDeformableConvOp(OpTest): self.check_output_with_place(self.place, check_eager=False) def test_check_grad(self): - self.check_grad_with_place(self.place, - {'Input', 'Offset', 'Mask', 'Filter'}, - 'Output', - max_relative_error=0.05, - check_eager=False) + self.check_grad_with_place( + self.place, + {'Input', 'Offset', 'Mask', 'Filter'}, + 'Output', + max_relative_error=0.05, + check_eager=False, + ) def init_test_case(self): self.pad = [1, 1] @@ -90,15 +92,26 @@ class TestModulatedDeformableConvOp(OpTest): self.filter_size = [4, f_c, 3, 3] self.im2col_step = 1 self.deformable_groups = 1 - offset_c = 2 * self.deformable_groups * self.filter_size[ - 2] * self.filter_size[3] - mask_c = self.deformable_groups * self.filter_size[ - 2] * self.filter_size[3] + offset_c = ( + 2 + * self.deformable_groups + * self.filter_size[2] + * self.filter_size[3] + ) + mask_c = ( + self.deformable_groups * self.filter_size[2] * self.filter_size[3] + ) self.offset_size = [ - self.input_size[0], offset_c, self.input_size[2], self.input_size[3] + self.input_size[0], + offset_c, + self.input_size[2], + self.input_size[3], ] self.mask_size = [ - self.input_size[0], mask_c, self.input_size[2], self.input_size[3] + self.input_size[0], + mask_c, + self.input_size[2], + self.input_size[3], ] def init_dilation(self): @@ -112,7 +125,6 @@ class TestModulatedDeformableConvOp(OpTest): class TestWithStride(TestModulatedDeformableConvOp): - def init_test_case(self): self.pad = [3, 3] self.stride = [2, 2] @@ -122,20 +134,30 @@ class TestWithStride(TestModulatedDeformableConvOp): self.filter_size = [6, f_c, 3, 3] self.im2col_step = 1 self.deformable_groups = 1 - offset_c = 2 * self.deformable_groups * self.filter_size[ - 2] * self.filter_size[3] - mask_c = self.deformable_groups * self.filter_size[ - 2] * self.filter_size[3] + offset_c = ( + 2 + * self.deformable_groups + * self.filter_size[2] + * self.filter_size[3] + ) + mask_c = ( + self.deformable_groups * self.filter_size[2] * self.filter_size[3] + ) self.offset_size = [ - self.input_size[0], offset_c, self.input_size[2], self.input_size[3] + self.input_size[0], + offset_c, + self.input_size[2], + self.input_size[3], ] self.mask_size = [ - self.input_size[0], mask_c, self.input_size[2], self.input_size[3] + self.input_size[0], + mask_c, + self.input_size[2], + self.input_size[3], ] class TestWithDilation(TestModulatedDeformableConvOp): - def init_test_case(self): self.pad = [2, 2] self.stride = [1, 1] @@ -145,15 +167,26 @@ class TestWithDilation(TestModulatedDeformableConvOp): self.filter_size = [6, f_c, 3, 3] self.im2col_step = 1 self.deformable_groups = 1 - offset_c = 2 * self.deformable_groups * self.filter_size[ - 2] * self.filter_size[3] - mask_c = self.deformable_groups * self.filter_size[ - 2] * self.filter_size[3] + offset_c = ( + 2 + * self.deformable_groups + * self.filter_size[2] + * self.filter_size[3] + ) + mask_c = ( + self.deformable_groups * self.filter_size[2] * self.filter_size[3] + ) self.offset_size = [ - self.input_size[0], offset_c, self.input_size[2], self.input_size[3] + self.input_size[0], + offset_c, + self.input_size[2], + self.input_size[3], ] self.mask_size = [ - self.input_size[0], mask_c, self.input_size[2], self.input_size[3] + self.input_size[0], + mask_c, + self.input_size[2], + self.input_size[3], ] def init_dilation(self): @@ -161,7 +194,6 @@ class TestWithDilation(TestModulatedDeformableConvOp): class TestWith3x3(TestModulatedDeformableConvOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -171,15 +203,26 @@ class TestWith3x3(TestModulatedDeformableConvOp): self.filter_size = [6, f_c, 3, 3] self.im2col_step = 1 self.deformable_groups = 1 - offset_c = 2 * self.deformable_groups * self.filter_size[ - 2] * self.filter_size[3] - mask_c = self.deformable_groups * self.filter_size[ - 2] * self.filter_size[3] + offset_c = ( + 2 + * self.deformable_groups + * self.filter_size[2] + * self.filter_size[3] + ) + mask_c = ( + self.deformable_groups * self.filter_size[2] * self.filter_size[3] + ) self.offset_size = [ - self.input_size[0], offset_c, self.input_size[2], self.input_size[3] + self.input_size[0], + offset_c, + self.input_size[2], + self.input_size[3], ] self.mask_size = [ - self.input_size[0], mask_c, self.input_size[2], self.input_size[3] + self.input_size[0], + mask_c, + self.input_size[2], + self.input_size[3], ] diff --git a/python/paddle/fluid/tests/unittests/mlu/test_dropout_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_dropout_op_mlu.py index 0bbec2b7ae05e2cb632aa4cfb40e4d355843ef41..9c115c3bd3894ed8dfc42aeaa1892ca4c46df130 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_dropout_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_dropout_op_mlu.py @@ -27,7 +27,6 @@ SEED = 2022 class TestDropoutOp(OpTest): - def setUp(self): self.set_mlu() self.init_dtype() @@ -39,7 +38,7 @@ class TestDropoutOp(OpTest): 'dropout_prob': self.dropout_prob, 'fix_seed': self.fix_seed, 'is_test': self.is_test, - 'dropout_implementation': self.dropout_implementation + 'dropout_implementation': self.dropout_implementation, } out = self.inputs['X'] * (1.0 - self.dropout_prob) @@ -74,21 +73,21 @@ class TestDropoutOp(OpTest): self.check_output_with_place(self.place) def test_check_grad_normal(self): - if hasattr(self.__class__, "no_need_check_grad" - ) and self.__class__.no_need_check_grad == True: + if ( + hasattr(self.__class__, "no_need_check_grad") + and self.__class__.no_need_check_grad == True + ): return self.check_grad_with_place(self.place, ['X'], 'Out') class TestDropoutOpInput1d(TestDropoutOp): - def init_inputs_shape(self): self.shape = [2000] class TestDropoutOp2(TestDropoutOp): - def init_inputs_shape(self): self.shape = [32, 64] @@ -100,13 +99,11 @@ class TestDropoutOp2(TestDropoutOp): class TestDropoutOp3(TestDropoutOp): - def init_inputs_shape(self): self.shape = [32, 64, 2] class TestDropoutOp4(TestDropoutOp): - def init_attrs(self): self.__class__.no_need_check_grad = True self.dropout_prob = 0.35 @@ -116,7 +113,6 @@ class TestDropoutOp4(TestDropoutOp): class TestDropoutOp5(TestDropoutOp): - def init_inputs_shape(self): self.shape = [32, 64, 3] @@ -129,7 +125,6 @@ class TestDropoutOp5(TestDropoutOp): class TestDropoutOp6(TestDropoutOp): - def init_attrs(self): self.__class__.no_need_check_grad = True self.dropout_prob = 0.0 @@ -146,16 +141,16 @@ class TestDropoutOpWithSeed(TestDropoutOp): self.dtype = np.float32 self.inputs = { "X": np.random.random((32, 64)).astype(self.dtype), - "Seed": np.asarray([125], dtype="int32") + "Seed": np.asarray([125], dtype="int32"), } self.attrs = { 'dropout_prob': 0.0, 'is_test': False, - 'dropout_implementation': 'upscale_in_train' + 'dropout_implementation': 'upscale_in_train', } self.outputs = { 'Out': self.inputs['X'], - 'Mask': np.ones((32, 64)).astype('uint8') + 'Mask': np.ones((32, 64)).astype('uint8'), } def set_mlu(self): @@ -192,7 +187,7 @@ class TestDropoutOpInference(OpTest): 'dropout_prob': 0.35, 'fix_seed': True, 'is_test': True, - 'dropout_implementation': 'upscale_in_train' + 'dropout_implementation': 'upscale_in_train', } self.outputs = {'Out': self.inputs['X']} @@ -209,7 +204,6 @@ class TestDropoutOpInference(OpTest): @skip_check_grad_ci(reason="For inference, check_grad is not required.") class TestDropoutOpInference2(TestDropoutOpInference): - def setUp(self): self.op_type = "dropout" self.set_mlu() @@ -218,13 +212,12 @@ class TestDropoutOpInference2(TestDropoutOpInference): self.attrs = { 'dropout_prob': 0.75, 'is_test': True, - 'dropout_implementation': 'upscale_in_train' + 'dropout_implementation': 'upscale_in_train', } self.outputs = {'Out': self.inputs['X']} class TestDropoutAPI(unittest.TestCase): - def setUp(self): np.random.seed(123) self.places = [fluid.CPUPlace(), paddle.device.MLUPlace(0)] @@ -232,43 +225,44 @@ class TestDropoutAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): input = fluid.data(name="input", shape=[40, 40], dtype="float32") - res1 = paddle.nn.functional.dropout(x=input, - p=0., - training=False, - mode='upscale_in_train') - res2 = paddle.nn.functional.dropout(x=input, - p=0., - axis=0, - training=True, - mode='upscale_in_train') - res3 = paddle.nn.functional.dropout(x=input, - p=0., - axis=0, - training=False, - mode='upscale_in_train') - res4 = paddle.nn.functional.dropout(x=input, - p=0., - axis=[0, 1], - training=True, - mode='upscale_in_train') - res5 = paddle.nn.functional.dropout(x=input, - p=0., - axis=[0, 1], - training=False, - mode='upscale_in_train') - res6 = paddle.nn.functional.dropout(x=input, - p=1., - training=True, - mode='upscale_in_train') + res1 = paddle.nn.functional.dropout( + x=input, p=0.0, training=False, mode='upscale_in_train' + ) + res2 = paddle.nn.functional.dropout( + x=input, p=0.0, axis=0, training=True, mode='upscale_in_train' + ) + res3 = paddle.nn.functional.dropout( + x=input, p=0.0, axis=0, training=False, mode='upscale_in_train' + ) + res4 = paddle.nn.functional.dropout( + x=input, + p=0.0, + axis=[0, 1], + training=True, + mode='upscale_in_train', + ) + res5 = paddle.nn.functional.dropout( + x=input, + p=0.0, + axis=[0, 1], + training=False, + mode='upscale_in_train', + ) + res6 = paddle.nn.functional.dropout( + x=input, p=1.0, training=True, mode='upscale_in_train' + ) res7 = paddle.fluid.layers.dropout( x=input, - dropout_prob=0., - dropout_implementation='upscale_in_train') - res8 = paddle.nn.functional.dropout(x=input, - p=0., - axis=(0, 1), - training=False, - mode='upscale_in_train') + dropout_prob=0.0, + dropout_implementation='upscale_in_train', + ) + res8 = paddle.nn.functional.dropout( + x=input, + p=0.0, + axis=(0, 1), + training=False, + mode='upscale_in_train', + ) in_np = np.random.random([40, 40]).astype("float32") res_np = in_np @@ -277,13 +271,17 @@ class TestDropoutAPI(unittest.TestCase): exe = fluid.Executor(place) res_list = [res1, res2, res3, res4, res5, res7, res8] for res in res_list: - fetches = exe.run(fluid.default_main_program(), - feed={"input": in_np}, - fetch_list=[res]) + fetches = exe.run( + fluid.default_main_program(), + feed={"input": in_np}, + fetch_list=[res], + ) np.testing.assert_allclose(fetches[0], res_np) - fetches2 = exe.run(fluid.default_main_program(), - feed={"input": in_np}, - fetch_list=[res6]) + fetches2 = exe.run( + fluid.default_main_program(), + feed={"input": in_np}, + fetch_list=[res6], + ) np.testing.assert_allclose(fetches2[0], res_np2) def test_static(self): diff --git a/python/paddle/fluid/tests/unittests/mlu/test_elementwise_add_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_elementwise_add_op_mlu.py index b76969332dd38449641c2c2f669a4790322abeb6..f5e79bb64ec80cba7e014cac804767ef5673eeed 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_elementwise_add_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_elementwise_add_op_mlu.py @@ -27,7 +27,6 @@ paddle.enable_static() class TestElementwiseAddOp(OpTest): - def set_mlu(self): self.place = paddle.device.MLUPlace(0) self.__class__.use_mlu = True @@ -41,7 +40,7 @@ class TestElementwiseAddOp(OpTest): self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.attrs = {'axis': self.axis} self.outputs = {'Out': self.out} @@ -52,25 +51,31 @@ class TestElementwiseAddOp(OpTest): def test_check_grad_normal(self): if self.dtype == np.float16: return - self.check_grad_with_place(self.place, ['X', 'Y'], - 'Out', - max_relative_error=0.01) + self.check_grad_with_place( + self.place, ['X', 'Y'], 'Out', max_relative_error=0.01 + ) def test_check_grad_ingore_x(self): if self.dtype == np.float16: return - self.check_grad_with_place(self.place, ['Y'], - 'Out', - no_grad_set=set("X"), - max_relative_error=0.01) + self.check_grad_with_place( + self.place, + ['Y'], + 'Out', + no_grad_set=set("X"), + max_relative_error=0.01, + ) def test_check_grad_ingore_y(self): if self.dtype == np.float16: return - self.check_grad_with_place(self.place, ['X'], - 'Out', - no_grad_set=set('Y'), - max_relative_error=0.01) + self.check_grad_with_place( + self.place, + ['X'], + 'Out', + no_grad_set=set('Y'), + max_relative_error=0.01, + ) def init_input_output(self): self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) @@ -85,7 +90,6 @@ class TestElementwiseAddOp(OpTest): class TestFP16ElementwiseAddOp(TestElementwiseAddOp): - def init_dtype(self): self.dtype = np.float16 @@ -94,9 +98,9 @@ class TestFP16ElementwiseAddOp(TestElementwiseAddOp): @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." +) class TestElementwiseAddOp_scalar(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 3, 4).astype(self.dtype) self.y = np.random.rand(1).astype(self.dtype) @@ -104,9 +108,9 @@ class TestElementwiseAddOp_scalar(TestElementwiseAddOp): @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." +) class TestFP16ElementwiseAddOp_scalar(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 3, 4).astype(self.dtype) self.y = np.random.rand(1).astype(self.dtype) @@ -114,9 +118,9 @@ class TestFP16ElementwiseAddOp_scalar(TestFP16ElementwiseAddOp): @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1,1) to test broadcast.") + reason="[skip shape check] Use y_shape(1,1) to test broadcast." +) class TestElementwiseAddOp_scalar2(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 3, 4).astype(self.dtype) self.y = np.random.rand(1, 1).astype(self.dtype) @@ -124,9 +128,9 @@ class TestElementwiseAddOp_scalar2(TestElementwiseAddOp): @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1,1) to test broadcast.") + reason="[skip shape check] Use y_shape(1,1) to test broadcast." +) class TestFP16ElementwiseAddOp_scalar2(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 3, 4).astype(self.dtype) self.y = np.random.rand(1, 1).astype(self.dtype) @@ -134,23 +138,20 @@ class TestFP16ElementwiseAddOp_scalar2(TestFP16ElementwiseAddOp): class TestElementwiseAddOp_Vector(TestElementwiseAddOp): - def init_input_output(self): - self.x = np.random.random((100, )).astype(self.dtype) - self.y = np.random.random((100, )).astype(self.dtype) + self.x = np.random.random((100,)).astype(self.dtype) + self.y = np.random.random((100,)).astype(self.dtype) self.out = np.add(self.x, self.y) class TestFP16ElementwiseAddOp_Vector(TestFP16ElementwiseAddOp): - def init_input_output(self): - self.x = np.random.random((100, )).astype(self.dtype) - self.y = np.random.random((100, )).astype(self.dtype) + self.x = np.random.random((100,)).astype(self.dtype) + self.y = np.random.random((100,)).astype(self.dtype) self.out = np.add(self.x, self.y) class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(100, 2, 3).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype) @@ -161,7 +162,6 @@ class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp): class TestFP16ElementwiseAddOp_broadcast_0(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(100, 2, 3).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype) @@ -172,7 +172,6 @@ class TestFP16ElementwiseAddOp_broadcast_0(TestFP16ElementwiseAddOp): class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 100, 3).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype) @@ -183,7 +182,6 @@ class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp): class TestFP16ElementwiseAddOp_broadcast_1(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 100, 3).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype) @@ -194,7 +192,6 @@ class TestFP16ElementwiseAddOp_broadcast_1(TestFP16ElementwiseAddOp): class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 3, 100).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype) @@ -202,7 +199,6 @@ class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp): class TestFP16ElementwiseAddOp_broadcast_2(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 3, 100).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype) @@ -210,7 +206,6 @@ class TestFP16ElementwiseAddOp_broadcast_2(TestFP16ElementwiseAddOp): class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 10, 12, 1).astype(self.dtype) self.y = np.random.rand(10, 12).astype(self.dtype) @@ -221,7 +216,6 @@ class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp): class TestFP16ElementwiseAddOp_broadcast_3(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype) self.y = np.random.rand(10, 12).astype(self.dtype) @@ -232,7 +226,6 @@ class TestFP16ElementwiseAddOp_broadcast_3(TestFP16ElementwiseAddOp): class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(100, 2, 1, 2).astype(self.dtype) self.y = np.random.rand(100, 1).astype(self.dtype) @@ -243,7 +236,6 @@ class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp): class TestFP16ElementwiseAddOp_broadcast_4(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(100, 2, 1, 2).astype(self.dtype) self.y = np.random.rand(100, 1).astype(self.dtype) @@ -254,7 +246,6 @@ class TestFP16ElementwiseAddOp_broadcast_4(TestFP16ElementwiseAddOp): class TestElementwiseAddOp_broadcast_5(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(10, 3, 12).astype(self.dtype) self.y = np.random.rand(10, 1, 12).astype(self.dtype) @@ -262,7 +253,6 @@ class TestElementwiseAddOp_broadcast_5(TestElementwiseAddOp): class TestFP16ElementwiseAddOp_broadcast_5(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(10, 3, 12).astype(self.dtype) self.y = np.random.rand(10, 1, 12).astype(self.dtype) @@ -270,7 +260,6 @@ class TestFP16ElementwiseAddOp_broadcast_5(TestFP16ElementwiseAddOp): class TestElementwiseAddOp_broadcast_6(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype) self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype) @@ -278,7 +267,6 @@ class TestElementwiseAddOp_broadcast_6(TestElementwiseAddOp): class TestElementwiseAddOp_broadcast_7(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(1, 1, 20, 5).astype(self.dtype) self.y = np.random.rand(20, 5, 1, 1).astype(self.dtype) @@ -286,7 +274,6 @@ class TestElementwiseAddOp_broadcast_7(TestElementwiseAddOp): class TestFP16ElementwiseAddOp_broadcast_6(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype) self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype) @@ -294,7 +281,6 @@ class TestFP16ElementwiseAddOp_broadcast_6(TestFP16ElementwiseAddOp): class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 10, 12).astype(self.dtype) self.y = np.random.rand(10, 12).astype(self.dtype) @@ -305,7 +291,6 @@ class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp): class TestFP16ElementwiseAddOp_rowwise_add_0(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 10, 12).astype(self.dtype) self.y = np.random.rand(10, 12).astype(self.dtype) @@ -316,9 +301,9 @@ class TestFP16ElementwiseAddOp_rowwise_add_0(TestFP16ElementwiseAddOp): @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." +) class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(100, 1).astype(self.dtype) self.y = np.random.rand(1).astype(self.dtype) @@ -329,9 +314,9 @@ class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp): @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." +) class TestFP16ElementwiseAddOp_rowwise_add_1(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(100, 1).astype(self.dtype) self.y = np.random.rand(1).astype(self.dtype) @@ -342,7 +327,6 @@ class TestFP16ElementwiseAddOp_rowwise_add_1(TestFP16ElementwiseAddOp): class TestElementwiseAddOp_channelwise_add(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(100, 2, 3).astype(self.dtype) self.y = np.random.rand(100, 1, 1).astype(self.dtype) @@ -353,7 +337,6 @@ class TestElementwiseAddOp_channelwise_add(TestElementwiseAddOp): class TestFP16ElementwiseAddOp_channelwise_add(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(100, 2, 3).astype(self.dtype) self.y = np.random.rand(100, 1, 1).astype(self.dtype) @@ -364,7 +347,6 @@ class TestFP16ElementwiseAddOp_channelwise_add(TestFP16ElementwiseAddOp): class TestElementwiseAddOp_commonuse_add1(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 3, 100).astype(self.dtype) self.y = np.random.rand(1, 1, 100).astype(self.dtype) @@ -375,7 +357,6 @@ class TestElementwiseAddOp_commonuse_add1(TestElementwiseAddOp): class TestElementwiseFP16AddOp_commonuse_add1(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 3, 100).astype(self.dtype) self.y = np.random.rand(1, 1, 100).astype(self.dtype) @@ -386,7 +367,6 @@ class TestElementwiseFP16AddOp_commonuse_add1(TestFP16ElementwiseAddOp): class TestElementwiseAddOp_commonuse_add2(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(10, 3, 1, 4).astype(self.dtype) self.y = np.random.rand(10, 1, 12, 1).astype(self.dtype) @@ -397,7 +377,6 @@ class TestElementwiseAddOp_commonuse_add2(TestElementwiseAddOp): class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(10, 12).astype(self.dtype) self.y = np.random.rand(2, 2, 10, 12).astype(self.dtype) @@ -408,7 +387,6 @@ class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestElementwiseAddOp): class TestElementwiseAddOp_same_shape_ysize_large(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(10, 1, 12).astype(self.dtype) self.y = np.random.rand(10, 2, 12).astype(self.dtype) @@ -419,14 +397,15 @@ class TestElementwiseAddOp_same_shape_ysize_large(TestElementwiseAddOp): class TestElementwiseAddOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # the input of elementwise_add must be Variable. - x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.MLUPlace(0)) - y1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.MLUPlace(0)) + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.MLUPlace(0) + ) + y1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.MLUPlace(0) + ) self.assertRaises(TypeError, fluid.layers.elementwise_add, x1, y1) # the input dtype of elementwise_add must be float16 or float32 @@ -436,7 +415,6 @@ class TestElementwiseAddOpError(unittest.TestCase): class TestAddApi(unittest.TestCase): - def _executed_api(self, x, y, name=None): return paddle.add(x, y, name) @@ -454,7 +432,7 @@ class TestAddApi(unittest.TestCase): def gen_data(): return { "x": np.array([2, 3, 4]).astype('float32'), - "y": np.array([1, 5, 2]).astype('float32') + "y": np.array([1, 5, 2]).astype('float32'), } x = fluid.data(name="x", shape=[3], dtype='float32') @@ -464,7 +442,7 @@ class TestAddApi(unittest.TestCase): place = fluid.MLUPlace(0) exe = fluid.Executor(place) z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) - z_expected = np.array([3., 8., 6.]) + z_expected = np.array([3.0, 8.0, 6.0]) self.assertEqual((z_value == z_expected).all(), True) def test_dygraph(self): @@ -475,18 +453,16 @@ class TestAddApi(unittest.TestCase): y = fluid.dygraph.to_variable(np_y) z = self._executed_api(x, y) np_z = z.numpy() - z_expected = np.array([3., 8., 6.]) + z_expected = np.array([3.0, 8.0, 6.0]) self.assertEqual((np_z == z_expected).all(), True) class TestAddInplaceApi(TestAddApi): - def _executed_api(self, x, y, name=None): return x.add_(y, name) class TestAddInplaceBroadcastSuccess(unittest.TestCase): - def init_data(self): self.x_numpy = np.random.rand(2, 3, 4).astype('float32') self.y_numpy = np.random.rand(3, 4).astype('float32') @@ -503,21 +479,18 @@ class TestAddInplaceBroadcastSuccess(unittest.TestCase): class TestAddInplaceBroadcastSuccess2(TestAddInplaceBroadcastSuccess): - def init_data(self): self.x_numpy = np.random.rand(1, 2, 3, 1).astype('float32') self.y_numpy = np.random.rand(3, 1).astype('float32') class TestAddInplaceBroadcastSuccess3(TestAddInplaceBroadcastSuccess): - def init_data(self): self.x_numpy = np.random.rand(2, 3, 1, 5).astype('float32') self.y_numpy = np.random.rand(1, 3, 1, 5).astype('float32') class TestAddInplaceBroadcastError(unittest.TestCase): - def init_data(self): self.x_numpy = np.random.rand(3, 4).astype('float32') self.y_numpy = np.random.rand(2, 3, 4).astype('float32') @@ -536,21 +509,18 @@ class TestAddInplaceBroadcastError(unittest.TestCase): class TestAddInplaceBroadcastError2(TestAddInplaceBroadcastError): - def init_data(self): self.x_numpy = np.random.rand(2, 1, 4).astype('float32') self.y_numpy = np.random.rand(2, 3, 4).astype('float32') class TestAddInplaceBroadcastError3(TestAddInplaceBroadcastError): - def init_data(self): self.x_numpy = np.random.rand(5, 2, 1, 4).astype('float32') self.y_numpy = np.random.rand(2, 3, 4).astype('float32') class TestBoolAddFloatElementwiseAddop(unittest.TestCase): - def test_static_add(self): paddle.enable_static() a = 1.5 diff --git a/python/paddle/fluid/tests/unittests/mlu/test_elementwise_div_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_elementwise_div_op_mlu.py index 2171e5043f539df6e9075d637ada4476add228fe..2ca8beed3b3244e629aa4c79541ea5c62ddc32f0 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_elementwise_div_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_elementwise_div_op_mlu.py @@ -27,7 +27,6 @@ SEED = 2022 class TestElementwiseDiv(OpTest): - def setUp(self): self.set_mlu() self.op_type = "elementwise_div" @@ -40,7 +39,7 @@ class TestElementwiseDiv(OpTest): self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(x), - 'Y': OpTest.np_dtype_to_fluid_dtype(y) + 'Y': OpTest.np_dtype_to_fluid_dtype(y), } self.attrs = {} self.outputs = {'Out': out} @@ -56,25 +55,30 @@ class TestElementwiseDiv(OpTest): self.check_output_with_place(self.place) def test_check_grad_normal(self): - self.check_grad_with_place(self.place, ['X', 'Y'], - 'Out', - max_relative_error=0.05) + self.check_grad_with_place( + self.place, ['X', 'Y'], 'Out', max_relative_error=0.05 + ) def test_check_grad_ingore_x(self): - self.check_grad_with_place(self.place, ['Y'], - 'Out', - max_relative_error=0.05, - no_grad_set=set("X")) + self.check_grad_with_place( + self.place, + ['Y'], + 'Out', + max_relative_error=0.05, + no_grad_set=set("X"), + ) def test_check_grad_ingore_y(self): - self.check_grad_with_place(self.place, ['X'], - 'Out', - max_relative_error=0.05, - no_grad_set=set("Y")) + self.check_grad_with_place( + self.place, + ['X'], + 'Out', + max_relative_error=0.05, + no_grad_set=set("Y"), + ) class TestElementwiseDivFp16(OpTest): - def setUp(self): self.set_mlu() self.op_type = "elementwise_div" @@ -87,7 +91,7 @@ class TestElementwiseDivFp16(OpTest): self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(x), - 'Y': OpTest.np_dtype_to_fluid_dtype(y) + 'Y': OpTest.np_dtype_to_fluid_dtype(y), } self.attrs = {} self.outputs = {'Out': out} @@ -105,124 +109,120 @@ class TestElementwiseDivFp16(OpTest): @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." +) class TestTestElementwiseDiv_scalar(TestElementwiseDiv): - def setUp(self): self.set_mlu() self.op_type = "elementwise_div" self.inputs = { 'X': np.random.uniform(0.1, 1, [20, 3, 4]).astype(np.float32), - 'Y': np.random.uniform(0.1, 1, [1]).astype(np.float32) + 'Y': np.random.uniform(0.1, 1, [1]).astype(np.float32), } self.outputs = {'Out': self.inputs['X'] / self.inputs['Y']} class TestTestElementwiseDiv_Vector(TestElementwiseDiv): - def setUp(self): self.set_mlu() self.op_type = "elementwise_div" self.inputs = { 'X': np.random.uniform(0.1, 1, [100]).astype("float32"), - 'Y': np.random.uniform(0.1, 1, [100]).astype("float32") + 'Y': np.random.uniform(0.1, 1, [100]).astype("float32"), } self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])} class TestTestElementwiseDiv_broadcast_0(TestElementwiseDiv): - def setUp(self): self.set_mlu() self.op_type = "elementwise_div" self.inputs = { 'X': np.random.uniform(0.1, 1, [100, 3, 4]).astype("float32"), - 'Y': np.random.uniform(0.1, 1, [100]).astype("float32") + 'Y': np.random.uniform(0.1, 1, [100]).astype("float32"), } self.attrs = {'axis': 0} self.outputs = { - 'Out': np.divide(self.inputs['X'], - self.inputs['Y'].reshape(100, 1, 1)) + 'Out': np.divide( + self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1) + ) } class TestTestElementwiseDiv_broadcast_1(TestElementwiseDiv): - def setUp(self): self.set_mlu() self.op_type = "elementwise_div" self.inputs = { 'X': np.random.uniform(0.1, 1, [2, 100, 4]).astype("float32"), - 'Y': np.random.uniform(0.1, 1, [100]).astype("float32") + 'Y': np.random.uniform(0.1, 1, [100]).astype("float32"), } self.attrs = {'axis': 1} self.outputs = { - 'Out': np.divide(self.inputs['X'], - self.inputs['Y'].reshape(1, 100, 1)) + 'Out': np.divide( + self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1) + ) } class TestTestElementwiseDiv_broadcast_2(TestElementwiseDiv): - def setUp(self): self.set_mlu() self.op_type = "elementwise_div" self.inputs = { 'X': np.random.uniform(0.1, 1, [2, 3, 100]).astype("float32"), - 'Y': np.random.uniform(0.1, 1, [100]).astype("float32") + 'Y': np.random.uniform(0.1, 1, [100]).astype("float32"), } self.outputs = { - 'Out': np.divide(self.inputs['X'], - self.inputs['Y'].reshape(1, 1, 100)) + 'Out': np.divide( + self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100) + ) } class TestTestElementwiseDiv_broadcast_3(TestElementwiseDiv): - def setUp(self): self.set_mlu() self.op_type = "elementwise_div" self.inputs = { 'X': np.random.uniform(0.1, 1, [2, 10, 12, 5]).astype("float32"), - 'Y': np.random.uniform(0.1, 1, [10, 12]).astype("float32") + 'Y': np.random.uniform(0.1, 1, [10, 12]).astype("float32"), } self.attrs = {'axis': 1} self.outputs = { - 'Out': - np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 10, 12, 1)) + 'Out': np.divide( + self.inputs['X'], self.inputs['Y'].reshape(1, 10, 12, 1) + ) } class TestTestElementwiseDiv_broadcast_4(TestElementwiseDiv): - def setUp(self): self.set_mlu() self.op_type = "elementwise_div" self.inputs = { 'X': np.random.uniform(0.1, 1, [2, 3, 50]).astype("float32"), - 'Y': np.random.uniform(0.1, 1, [2, 1, 50]).astype("float32") + 'Y': np.random.uniform(0.1, 1, [2, 1, 50]).astype("float32"), } self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])} class TestTestElementwiseDiv_broadcast_5(TestElementwiseDiv): - def setUp(self): self.set_mlu() self.op_type = "elementwise_div" self.inputs = { 'X': np.random.uniform(0.1, 1, [2, 3, 4, 20]).astype("float32"), - 'Y': np.random.uniform(0.1, 1, [2, 3, 1, 20]).astype("float32") + 'Y': np.random.uniform(0.1, 1, [2, 3, 1, 20]).astype("float32"), } self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])} class TestTestElementwiseDiv_commonuse_1(TestElementwiseDiv): - def setUp(self): self.set_mlu() self.op_type = "elementwise_div" @@ -234,7 +234,6 @@ class TestTestElementwiseDiv_commonuse_1(TestElementwiseDiv): class TestTestElementwiseDiv_commonuse_2(TestElementwiseDiv): - def setUp(self): self.set_mlu() self.op_type = "elementwise_div" @@ -246,7 +245,6 @@ class TestTestElementwiseDiv_commonuse_2(TestElementwiseDiv): class TestTestElementwiseDiv_xsize_lessthan_ysize(TestElementwiseDiv): - def setUp(self): self.set_mlu() self.op_type = "elementwise_div" diff --git a/python/paddle/fluid/tests/unittests/mlu/test_elementwise_max_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_elementwise_max_op_mlu.py index 4fc1533b1bced9d60c90b54a49224432162fec89..cdc354acdcba51043eaefad56602d9c17a85eb99 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_elementwise_max_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_elementwise_max_op_mlu.py @@ -41,7 +41,8 @@ def ComputeGrad(x, y, out, axis): for ax in range(len(shape_out)): if (ax < src_axis or ax >= src_axis + len(shape_x)) or ( - shape_out[ax] > 1 and shape_x[ax - src_axis] == 1): + shape_out[ax] > 1 and shape_x[ax - src_axis] == 1 + ): reduce_axes_x.append(ax) if shape_y != shape_out: @@ -52,7 +53,8 @@ def ComputeGrad(x, y, out, axis): for ax in range(len(shape_out)): if (ax < src_axis or ax >= src_axis + len(shape_y)) or ( - shape_out[ax] > 1 and shape_y[ax - src_axis] == 1): + shape_out[ax] > 1 and shape_y[ax - src_axis] == 1 + ): reduce_axes_y.append(ax) if len(reduce_axes_x) > 0: @@ -79,7 +81,6 @@ def ComputeGrad(x, y, out, axis): class TestElementwiseMaxOp(OpTest): - def setUp(self): self.set_mlu() self.op_type = "elementwise_max" @@ -90,7 +91,7 @@ class TestElementwiseMaxOp(OpTest): self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.attrs = {'axis': self.axis} self.outputs = {'Out': self.out} @@ -106,7 +107,8 @@ class TestElementwiseMaxOp(OpTest): self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) sgn = np.random.choice([-1, 1], [13, 17]).astype(self.dtype) self.y = self.x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype( - self.dtype) + self.dtype + ) self.out = np.maximum(self.x, self.y) def init_axis(self): @@ -117,9 +119,9 @@ class TestElementwiseMaxOp(OpTest): def test_check_grad_normal(self): if self.dtype == np.float16: - self.check_grad_with_place(self.place, ['X', 'Y'], - 'Out', - max_relative_error=0.5) + self.check_grad_with_place( + self.place, ['X', 'Y'], 'Out', max_relative_error=0.5 + ) else: self.check_grad_with_place( self.place, @@ -129,10 +131,13 @@ class TestElementwiseMaxOp(OpTest): def test_check_grad_ingore_x(self): if self.dtype == np.float16: - self.check_grad_with_place(self.place, ['Y'], - 'Out', - no_grad_set=set("X"), - max_relative_error=0.9) + self.check_grad_with_place( + self.place, + ['Y'], + 'Out', + no_grad_set=set("X"), + max_relative_error=0.9, + ) else: self.check_grad_with_place( self.place, @@ -143,10 +148,13 @@ class TestElementwiseMaxOp(OpTest): def test_check_grad_ingore_y(self): if self.dtype == np.float16: - self.check_grad_with_place(self.place, ['X'], - 'Out', - no_grad_set=set("Y"), - max_relative_error=0.1) + self.check_grad_with_place( + self.place, + ['X'], + 'Out', + no_grad_set=set("Y"), + max_relative_error=0.1, + ) else: self.check_grad_with_place( self.place, @@ -157,7 +165,6 @@ class TestElementwiseMaxOp(OpTest): class TestElementwiseMaxOp_int32(TestElementwiseMaxOp): - def init_dtype(self): self.dtype = np.int32 @@ -173,15 +180,14 @@ class TestElementwiseMaxOp_int32(TestElementwiseMaxOp): class TestElementwiseMaxOp_FP16(TestElementwiseMaxOp): - def init_dtype(self): self.dtype = np.float16 @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." +) class TestElementwiseMaxOp_scalar(TestElementwiseMaxOp): - def init_input_output(self): self.x = np.random.random_integers(-5, 5, [2, 3, 20]).astype(self.dtype) self.y = np.array([0.5]).astype(self.dtype) @@ -189,22 +195,22 @@ class TestElementwiseMaxOp_scalar(TestElementwiseMaxOp): class TestElementwiseMaxOp_vector(TestElementwiseMaxOp): - def init_input_output(self): - self.x = np.random.random((100, )).astype(self.dtype) - sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype) - self.y = self.x + sgn * np.random.uniform(0.1, 1, - (100, )).astype(self.dtype) + self.x = np.random.random((100,)).astype(self.dtype) + sgn = np.random.choice([-1, 1], (100,)).astype(self.dtype) + self.y = self.x + sgn * np.random.uniform(0.1, 1, (100,)).astype( + self.dtype + ) self.out = np.maximum(self.x, self.y) class TestElementwiseMaxOp_broadcast_0(TestElementwiseMaxOp): - def init_input_output(self): self.x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(self.dtype) - sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype) - self.y = self.x[:, 0, 0] + sgn * \ - np.random.uniform(1, 2, (100, )).astype(self.dtype) + sgn = np.random.choice([-1, 1], (100,)).astype(self.dtype) + self.y = self.x[:, 0, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( + self.dtype + ) self.out = np.maximum(self.x, self.y.reshape(100, 1, 1)) def init_axis(self): @@ -212,12 +218,12 @@ class TestElementwiseMaxOp_broadcast_0(TestElementwiseMaxOp): class TestElementwiseMaxOp_broadcast_1(TestElementwiseMaxOp): - def init_input_output(self): self.x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(self.dtype) - sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype) - self.y = self.x[0, :, 0] + sgn * \ - np.random.uniform(1, 2, (100, )).astype(self.dtype) + sgn = np.random.choice([-1, 1], (100,)).astype(self.dtype) + self.y = self.x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( + self.dtype + ) self.out = np.maximum(self.x, self.y.reshape(1, 100, 1)) def init_axis(self): @@ -225,56 +231,68 @@ class TestElementwiseMaxOp_broadcast_1(TestElementwiseMaxOp): def test_check_grad_ingore_x(self): _, dy = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['Y'], - 'Out', - no_grad_set=set("X"), - user_defined_grads=[dy]) + self.check_grad_with_place( + self.place, + ['Y'], + 'Out', + no_grad_set=set("X"), + user_defined_grads=[dy], + ) def test_check_grad_ingore_y(self): dx, _ = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['X'], - 'Out', - no_grad_set=set("Y"), - user_defined_grads=[dx]) + self.check_grad_with_place( + self.place, + ['X'], + 'Out', + no_grad_set=set("Y"), + user_defined_grads=[dx], + ) class TestElementwiseMaxOp_broadcast_2(TestElementwiseMaxOp): - def init_input_output(self): self.x = np.random.uniform(0.5, 1, (2, 3, 100)).astype(self.dtype) - sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype) - self.y = self.x[0, 0, :] + sgn * \ - np.random.uniform(1, 2, (100, )).astype(self.dtype) + sgn = np.random.choice([-1, 1], (100,)).astype(self.dtype) + self.y = self.x[0, 0, :] + sgn * np.random.uniform(1, 2, (100,)).astype( + self.dtype + ) self.out = np.maximum(self.x, self.y.reshape(1, 1, 100)) def test_check_grad_normal(self): dx, dy = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['X', 'Y'], - 'Out', - user_defined_grads=[dx, dy]) + self.check_grad_with_place( + self.place, ['X', 'Y'], 'Out', user_defined_grads=[dx, dy] + ) def test_check_grad_ingore_x(self): _, dy = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['Y'], - 'Out', - no_grad_set=set("X"), - user_defined_grads=[dy]) + self.check_grad_with_place( + self.place, + ['Y'], + 'Out', + no_grad_set=set("X"), + user_defined_grads=[dy], + ) def test_check_grad_ingore_y(self): dx, _ = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['X'], - 'Out', - no_grad_set=set("Y"), - user_defined_grads=[dx]) + self.check_grad_with_place( + self.place, + ['X'], + 'Out', + no_grad_set=set("Y"), + user_defined_grads=[dx], + ) class TestElementwiseMaxOp_broadcast_3(TestElementwiseMaxOp): - def init_input_output(self): self.x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(self.dtype) sgn = np.random.choice([-1, 1], (50, 2)).astype(self.dtype) - self.y = self.x[0, :, :, 0] + sgn * \ - np.random.uniform(1, 2, (50, 2)).astype(self.dtype) + self.y = self.x[0, :, :, 0] + sgn * np.random.uniform( + 1, 2, (50, 2) + ).astype(self.dtype) self.out = np.maximum(self.x, self.y.reshape(1, 50, 2, 1)) def init_axis(self): @@ -282,27 +300,26 @@ class TestElementwiseMaxOp_broadcast_3(TestElementwiseMaxOp): class TestElementwiseMaxOp_broadcast_4(TestElementwiseMaxOp): - def init_input_output(self): self.x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(self.dtype) sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(self.dtype) - self.y = self.x + sgn * \ - np.random.uniform(1, 2, (2, 3, 1, 5)).astype(self.dtype) + self.y = self.x + sgn * np.random.uniform(1, 2, (2, 3, 1, 5)).astype( + self.dtype + ) self.out = np.maximum(self.x, self.y) class TestElementwiseMaxOp_broadcast_5(TestElementwiseMaxOp): - def init_input_output(self): self.x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(self.dtype) sgn = np.random.choice([-1, 1], (2, 3, 1, 1)).astype(self.dtype) - self.y = self.x + sgn * \ - np.random.uniform(1, 2, (2, 3, 1, 1)).astype(self.dtype) + self.y = self.x + sgn * np.random.uniform(1, 2, (2, 3, 1, 1)).astype( + self.dtype + ) self.out = np.maximum(self.x, self.y) class TestElementwiseMaxNet(unittest.TestCase): - def _test(self, run_mlu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -317,9 +334,9 @@ class TestElementwiseMaxNet(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=[32, 32], dtype='float32') b = paddle.static.data(name="b", shape=[32, 32], dtype='float32') - label = paddle.static.data(name="label", - shape=[32, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[32, 1], dtype='int64' + ) c = paddle.maximum(a, b) @@ -342,16 +359,17 @@ class TestElementwiseMaxNet(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "b": b_np, "label": label_np}, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res diff --git a/python/paddle/fluid/tests/unittests/mlu/test_elementwise_min_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_elementwise_min_op_mlu.py index 31e22904bdeeeffdcff946863aeea6f0c08de98d..f1546b5ac63e32bddd61106902091c0c481edc22 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_elementwise_min_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_elementwise_min_op_mlu.py @@ -28,7 +28,6 @@ SEED = 2022 class TestElementwiseMinOp(OpTest): - def setUp(self): self.set_mlu() self.op_type = "elementwise_min" @@ -36,7 +35,7 @@ class TestElementwiseMinOp(OpTest): self.init_input_output() self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.outputs = {'Out': self.out} self.attrs = {'axis': self.axis} @@ -52,7 +51,8 @@ class TestElementwiseMinOp(OpTest): self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) self.sgn = np.random.choice([-1, 1], [13, 17]).astype(self.dtype) self.y = self.x + self.sgn * np.random.uniform(0.1, 1, [13, 17]).astype( - self.dtype) + self.dtype + ) self.out = np.minimum(self.x, self.y) self.axis = -1 @@ -64,9 +64,9 @@ class TestElementwiseMinOp(OpTest): def test_check_grad_normal(self): if self.dtype == np.float16: - self.check_grad_with_place(self.place, ['X', 'Y'], - 'Out', - max_relative_error=0.5) + self.check_grad_with_place( + self.place, ['X', 'Y'], 'Out', max_relative_error=0.5 + ) else: self.check_grad_with_place( self.place, @@ -76,10 +76,13 @@ class TestElementwiseMinOp(OpTest): def test_check_grad_ingore_x(self): if self.dtype == np.float16: - self.check_grad_with_place(self.place, ['Y'], - 'Out', - no_grad_set=set("X"), - max_relative_error=0.9) + self.check_grad_with_place( + self.place, + ['Y'], + 'Out', + no_grad_set=set("X"), + max_relative_error=0.9, + ) else: self.check_grad_with_place( self.place, @@ -90,10 +93,13 @@ class TestElementwiseMinOp(OpTest): def test_check_grad_ingore_y(self): if self.dtype == np.float16: - self.check_grad_with_place(self.place, ['X'], - 'Out', - no_grad_set=set("Y"), - max_relative_error=0.1) + self.check_grad_with_place( + self.place, + ['X'], + 'Out', + no_grad_set=set("Y"), + max_relative_error=0.1, + ) else: self.check_grad_with_place( self.place, @@ -104,32 +110,30 @@ class TestElementwiseMinOp(OpTest): class TestElementwiseMinOpFp16(TestElementwiseMinOp): - def init_dtype(self): self.dtype = np.float16 class TestElementwiseMinOp_Vector(TestElementwiseMinOp): - def init_input_output(self): - self.x = np.random.uniform(1, 2, (100, )).astype(self.dtype) - self.sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype) - self.y = self.x + self.sgn * np.random.uniform(0.1, 1, (100, )).astype( - self.dtype) + self.x = np.random.uniform(1, 2, (100,)).astype(self.dtype) + self.sgn = np.random.choice([-1, 1], (100,)).astype(self.dtype) + self.y = self.x + self.sgn * np.random.uniform(0.1, 1, (100,)).astype( + self.dtype + ) self.out = np.minimum(self.x, self.y) self.axis = -1 class TestElementwiseMinOpFp16_Vector(TestElementwiseMinOp_Vector): - def init_dtype(self): self.dtype = np.float16 @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." +) class TestElementwiseMinOp_scalar(TestElementwiseMinOp): - def init_input_output(self): self.x = np.random.random_integers(-5, 5, [10, 3, 4]).astype(self.dtype) self.y = np.array([0.5]).astype(self.dtype) @@ -138,32 +142,30 @@ class TestElementwiseMinOp_scalar(TestElementwiseMinOp): @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." +) class TestElementwiseMinOpFp16_scalar(TestElementwiseMinOp_scalar): - def init_dtype(self): self.dtype = np.float16 class TestElementwiseMinOp_broadcast(TestElementwiseMinOp): - def init_input_output(self): self.x = np.random.uniform(0.5, 1, (2, 3, 100)).astype(self.dtype) - self.sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype) - self.y = self.x[0, 0, :] + self.sgn * \ - np.random.uniform(1, 2, (100, )).astype(self.dtype) + self.sgn = np.random.choice([-1, 1], (100,)).astype(self.dtype) + self.y = self.x[0, 0, :] + self.sgn * np.random.uniform( + 1, 2, (100,) + ).astype(self.dtype) self.out = np.minimum(self.x, self.y.reshape(1, 1, 100)) self.axis = -1 class TestElementwiseMinOpFp16_broadcast(TestElementwiseMinOp_broadcast): - def init_dtype(self): self.dtype = np.float16 class TestElementwiseMinOpNet(unittest.TestCase): - def _test(self, run_mlu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -178,9 +180,9 @@ class TestElementwiseMinOpNet(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=[32, 32], dtype='float32') b = paddle.static.data(name="b", shape=[32, 32], dtype='float32') - label = paddle.static.data(name="label", - shape=[32, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[32, 1], dtype='int64' + ) c = paddle.minimum(a, b) @@ -203,16 +205,17 @@ class TestElementwiseMinOpNet(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "b": b_np, "label": label_np}, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res diff --git a/python/paddle/fluid/tests/unittests/mlu/test_elementwise_mul_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_elementwise_mul_op_mlu.py index fc5e8e89254b2e8fe506ab5d91d4724d05bf4835..cead808d42ca43da2c973f0cd8f82c3b40d9547e 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_elementwise_mul_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_elementwise_mul_op_mlu.py @@ -30,7 +30,6 @@ paddle.enable_static() class ElementwiseMulOp(OpTest): - def init_kernel_type(self): self.__class__.use_mlu = True self.place = paddle.device.MLUPlace(0) @@ -46,7 +45,7 @@ class ElementwiseMulOp(OpTest): self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.outputs = {'Out': self.out} self.attrs = {'axis': self.axis} @@ -58,14 +57,14 @@ class ElementwiseMulOp(OpTest): self.check_grad_with_place(self.place, ['X', 'Y'], 'Out') def test_check_grad_ingore_x(self): - self.check_grad_with_place(self.place, ['Y'], - 'Out', - no_grad_set=set("X")) + self.check_grad_with_place( + self.place, ['Y'], 'Out', no_grad_set=set("X") + ) def test_check_grad_ingore_y(self): - self.check_grad_with_place(self.place, ['X'], - 'Out', - no_grad_set=set('Y')) + self.check_grad_with_place( + self.place, ['X'], 'Out', no_grad_set=set('Y') + ) def init_input_output(self): self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) @@ -80,33 +79,31 @@ class ElementwiseMulOp(OpTest): @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." +) class TestElementwiseMulOp_scalar(ElementwiseMulOp): - def setUp(self): self.op_type = "elementwise_mul" self.inputs = { 'X': np.random.rand(10, 3, 4).astype(np.float32), - 'Y': np.random.rand(1).astype(np.float32) + 'Y': np.random.rand(1).astype(np.float32), } self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} self.init_kernel_type() class TestElementwiseMulOp_Vector(ElementwiseMulOp): - def setUp(self): self.op_type = "elementwise_mul" self.inputs = { - 'X': np.random.random((100, )).astype("float32"), - 'Y': np.random.random((100, )).astype("float32") + 'X': np.random.random((100,)).astype("float32"), + 'Y': np.random.random((100,)).astype("float32"), } self.outputs = {'Out': np.multiply(self.inputs['X'], self.inputs['Y'])} self.init_kernel_type() class TestElementwiseMulOp_broadcast_0(ElementwiseMulOp): - def init_input_output(self): self.x = np.random.rand(100, 2, 3).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype) @@ -117,12 +114,11 @@ class TestElementwiseMulOp_broadcast_0(ElementwiseMulOp): class TestElementwiseMulOp_broadcast_1(ElementwiseMulOp): - def setUp(self): self.op_type = "elementwise_mul" self.inputs = { 'X': np.random.rand(2, 100, 3).astype(np.float32), - 'Y': np.random.rand(100).astype(np.float32) + 'Y': np.random.rand(100).astype(np.float32), } self.attrs = {'axis': 1} @@ -133,12 +129,11 @@ class TestElementwiseMulOp_broadcast_1(ElementwiseMulOp): class TestElementwiseMulOp_broadcast_2(ElementwiseMulOp): - def setUp(self): self.op_type = "elementwise_mul" self.inputs = { 'X': np.random.rand(2, 3, 100).astype(np.float32), - 'Y': np.random.rand(100).astype(np.float32) + 'Y': np.random.rand(100).astype(np.float32), } self.outputs = { @@ -148,12 +143,11 @@ class TestElementwiseMulOp_broadcast_2(ElementwiseMulOp): class TestElementwiseMulOp_broadcast_3(ElementwiseMulOp): - def setUp(self): self.op_type = "elementwise_mul" self.inputs = { 'X': np.random.rand(2, 10, 12, 3).astype(np.float32), - 'Y': np.random.rand(10, 12).astype(np.float32) + 'Y': np.random.rand(10, 12).astype(np.float32), } self.attrs = {'axis': 1} @@ -164,66 +158,60 @@ class TestElementwiseMulOp_broadcast_3(ElementwiseMulOp): class TestElementwiseMulOp_broadcast_4(ElementwiseMulOp): - def setUp(self): self.op_type = "elementwise_mul" self.inputs = { 'X': np.random.rand(10, 2, 11).astype(np.float32), - 'Y': np.random.rand(10, 1, 11).astype(np.float32) + 'Y': np.random.rand(10, 1, 11).astype(np.float32), } self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} self.init_kernel_type() class TestElementwiseMulOp_broadcast_5(ElementwiseMulOp): - def setUp(self): self.op_type = "elementwise_mul" self.inputs = { 'X': np.random.rand(10, 4, 2, 3).astype(np.float32), - 'Y': np.random.rand(10, 4, 1, 3).astype(np.float32) + 'Y': np.random.rand(10, 4, 1, 3).astype(np.float32), } self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} self.init_kernel_type() class TestElementwiseMulOpFp16(ElementwiseMulOp): - def init_dtype(self): self.dtype = np.float16 class TestElementwiseMulOp_commonuse_1(ElementwiseMulOp): - def setUp(self): self.op_type = "elementwise_mul" self.inputs = { 'X': np.random.rand(2, 3, 100).astype(np.float32), - 'Y': np.random.rand(1, 1, 100).astype(np.float32) + 'Y': np.random.rand(1, 1, 100).astype(np.float32), } self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} self.init_kernel_type() class TestElementwiseMulOp_commonuse_2(ElementwiseMulOp): - def setUp(self): self.op_type = "elementwise_mul" self.inputs = { 'X': np.random.rand(30, 3, 1, 5).astype(np.float32), - 'Y': np.random.rand(30, 1, 4, 1).astype(np.float32) + 'Y': np.random.rand(30, 1, 4, 1).astype(np.float32), } self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} self.init_kernel_type() class TestElementwiseMulOp_xsize_lessthan_ysize(ElementwiseMulOp): - def setUp(self): self.op_type = "elementwise_mul" self.inputs = { 'X': np.random.rand(10, 10).astype(np.float32), - 'Y': np.random.rand(2, 2, 10, 10).astype(np.float32) + 'Y': np.random.rand(2, 2, 10, 10).astype(np.float32), } self.attrs = {'axis': 2} @@ -235,14 +223,15 @@ class TestElementwiseMulOp_xsize_lessthan_ysize(ElementwiseMulOp): class TestElementwiseMulOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # the input of elementwise_mul must be Variable. - x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.CPUPlace()) - y1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + ) + y1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + ) self.assertRaises(TypeError, fluid.layers.elementwise_mul, x1, y1) # the input dtype of elementwise_mul must be float16 or float32 or int32 diff --git a/python/paddle/fluid/tests/unittests/mlu/test_elementwise_pow_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_elementwise_pow_op_mlu.py index 300323dba05feb38269ab7a7b38e05e89a5201d3..7215ce6f9ee9b3313b4cea286eb564781f70fb89 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_elementwise_pow_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_elementwise_pow_op_mlu.py @@ -42,7 +42,8 @@ def ComputeGrad(x, y, out, axis): for ax in range(len(shape_out)): if (ax < src_axis or ax >= src_axis + len(shape_x)) or ( - shape_out[ax] > 1 and shape_x[ax - src_axis] == 1): + shape_out[ax] > 1 and shape_x[ax - src_axis] == 1 + ): reduce_axes_x.append(ax) if shape_y != shape_out: @@ -53,7 +54,8 @@ def ComputeGrad(x, y, out, axis): for ax in range(len(shape_out)): if (ax < src_axis or ax >= src_axis + len(shape_y)) or ( - shape_out[ax] > 1 and shape_y[ax - src_axis] == 1): + shape_out[ax] > 1 and shape_y[ax - src_axis] == 1 + ): reduce_axes_y.append(ax) if len(reduce_axes_x) > 0: @@ -79,7 +81,6 @@ def ComputeGrad(x, y, out, axis): class TestElementwisePow(OpTest): - def setUp(self): self.set_mlu() self.op_type = "elementwise_pow" @@ -90,7 +91,7 @@ class TestElementwisePow(OpTest): self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.attrs = {'axis': self.axis} self.outputs = {'Out': self.out} @@ -116,27 +117,32 @@ class TestElementwisePow(OpTest): def test_check_grad_normal(self): dx, dy = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['X', 'Y'], - 'Out', - user_defined_grads=[dx, dy]) + self.check_grad_with_place( + self.place, ['X', 'Y'], 'Out', user_defined_grads=[dx, dy] + ) def test_check_grad_ingore_x(self): _, dy = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['Y'], - 'Out', - no_grad_set=set("X"), - user_defined_grads=[dy]) + self.check_grad_with_place( + self.place, + ['Y'], + 'Out', + no_grad_set=set("X"), + user_defined_grads=[dy], + ) def test_check_grad_ingore_y(self): dx, _ = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['X'], - 'Out', - no_grad_set=set("Y"), - user_defined_grads=[dx]) + self.check_grad_with_place( + self.place, + ['X'], + 'Out', + no_grad_set=set("Y"), + user_defined_grads=[dx], + ) class TestElementwisePowFp16(TestElementwisePow): - def init_input_output(self): np.random.seed(SEED) self.x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype) @@ -156,7 +162,6 @@ class TestElementwisePowFp16(TestElementwisePow): class TestElementwisePowOp_broadcast_0(TestElementwisePow): - def init_axis(self): self.axis = 1 @@ -168,27 +173,32 @@ class TestElementwisePowOp_broadcast_0(TestElementwisePow): def test_check_grad_normal(self): dx, dy = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['X', 'Y'], - 'Out', - user_defined_grads=[dx, dy]) + self.check_grad_with_place( + self.place, ['X', 'Y'], 'Out', user_defined_grads=[dx, dy] + ) def test_check_grad_ingore_x(self): _, dy = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['Y'], - 'Out', - no_grad_set=set("X"), - user_defined_grads=[dy]) + self.check_grad_with_place( + self.place, + ['Y'], + 'Out', + no_grad_set=set("X"), + user_defined_grads=[dy], + ) def test_check_grad_ingore_y(self): dx, _ = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['X'], - 'Out', - no_grad_set=set("Y"), - user_defined_grads=[dx]) + self.check_grad_with_place( + self.place, + ['X'], + 'Out', + no_grad_set=set("Y"), + user_defined_grads=[dx], + ) class TestElementwisePowOp_broadcast_1(TestElementwisePow): - def init_axis(self): self.axis = 1 @@ -200,27 +210,32 @@ class TestElementwisePowOp_broadcast_1(TestElementwisePow): def test_check_grad_normal(self): dx, dy = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['X', 'Y'], - 'Out', - user_defined_grads=[dx, dy]) + self.check_grad_with_place( + self.place, ['X', 'Y'], 'Out', user_defined_grads=[dx, dy] + ) def test_check_grad_ingore_x(self): _, dy = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['Y'], - 'Out', - no_grad_set=set("X"), - user_defined_grads=[dy]) + self.check_grad_with_place( + self.place, + ['Y'], + 'Out', + no_grad_set=set("X"), + user_defined_grads=[dy], + ) def test_check_grad_ingore_y(self): dx, _ = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['X'], - 'Out', - no_grad_set=set("Y"), - user_defined_grads=[dx]) + self.check_grad_with_place( + self.place, + ['X'], + 'Out', + no_grad_set=set("Y"), + user_defined_grads=[dx], + ) class TestElementwisePowOp_broadcast_2(TestElementwisePow): - def init_axis(self): self.axis = 0 @@ -232,23 +247,29 @@ class TestElementwisePowOp_broadcast_2(TestElementwisePow): def test_check_grad_normal(self): dx, dy = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['X', 'Y'], - 'Out', - user_defined_grads=[dx, dy]) + self.check_grad_with_place( + self.place, ['X', 'Y'], 'Out', user_defined_grads=[dx, dy] + ) def test_check_grad_ingore_x(self): _, dy = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['Y'], - 'Out', - no_grad_set=set("X"), - user_defined_grads=[dy]) + self.check_grad_with_place( + self.place, + ['Y'], + 'Out', + no_grad_set=set("X"), + user_defined_grads=[dy], + ) def test_check_grad_ingore_y(self): dx, _ = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['X'], - 'Out', - no_grad_set=set("Y"), - user_defined_grads=[dx]) + self.check_grad_with_place( + self.place, + ['X'], + 'Out', + no_grad_set=set("Y"), + user_defined_grads=[dx], + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/mlu/test_elementwise_sub_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_elementwise_sub_op_mlu.py index 4062cbeaf95f42e360638f95ec01e22023726a1f..5b2d0a435883399690954c6606ecda22c3891459 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_elementwise_sub_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_elementwise_sub_op_mlu.py @@ -27,7 +27,6 @@ SEED = 2022 class TestElementwiseSubOp(OpTest): - def setUp(self): self.set_mlu() self.op_type = "elementwise_sub" @@ -37,7 +36,7 @@ class TestElementwiseSubOp(OpTest): self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.attrs = {'axis': self.axis} self.outputs = {'Out': self.out} @@ -64,52 +63,56 @@ class TestElementwiseSubOp(OpTest): self.check_grad_with_place(self.place, ['X', 'Y'], 'Out') def test_check_grad_ingore_x(self): - self.check_grad_with_place(self.place, ['Y'], - 'Out', - max_relative_error=0.005, - no_grad_set=set("X")) + self.check_grad_with_place( + self.place, + ['Y'], + 'Out', + max_relative_error=0.005, + no_grad_set=set("X"), + ) def test_check_grad_ingore_y(self): - self.check_grad_with_place(self.place, ['X'], - 'Out', - max_relative_error=0.005, - no_grad_set=set('Y')) + self.check_grad_with_place( + self.place, + ['X'], + 'Out', + max_relative_error=0.005, + no_grad_set=set('Y'), + ) @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." +) class TestElementwiseSubOp_scalar(TestElementwiseSubOp): - def setUp(self): self.set_mlu() self.op_type = "elementwise_sub" self.inputs = { 'X': np.random.rand(10, 3, 4).astype(np.float32), - 'Y': np.random.rand(1).astype(np.float32) + 'Y': np.random.rand(1).astype(np.float32), } self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} class TestElementwiseSubOp_Vector(TestElementwiseSubOp): - def setUp(self): self.set_mlu() self.op_type = "elementwise_sub" self.inputs = { - 'X': np.random.random((100, )).astype("float32"), - 'Y': np.random.random((100, )).astype("float32") + 'X': np.random.random((100,)).astype("float32"), + 'Y': np.random.random((100,)).astype("float32"), } self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} class TestElementwiseSubOp_broadcast_0(TestElementwiseSubOp): - def setUp(self): self.set_mlu() self.op_type = "elementwise_sub" self.inputs = { 'X': np.random.rand(100, 3, 2).astype(np.float32), - 'Y': np.random.rand(100).astype(np.float32) + 'Y': np.random.rand(100).astype(np.float32), } self.attrs = {'axis': 0} self.outputs = { @@ -118,13 +121,12 @@ class TestElementwiseSubOp_broadcast_0(TestElementwiseSubOp): class TestElementwiseSubOp_broadcast_1(TestElementwiseSubOp): - def setUp(self): self.set_mlu() self.op_type = "elementwise_sub" self.inputs = { 'X': np.random.rand(2, 100, 3).astype(np.float32), - 'Y': np.random.rand(100).astype(np.float32) + 'Y': np.random.rand(100).astype(np.float32), } self.attrs = {'axis': 1} self.outputs = { @@ -133,13 +135,12 @@ class TestElementwiseSubOp_broadcast_1(TestElementwiseSubOp): class TestElementwiseSubOp_broadcast_2(TestElementwiseSubOp): - def setUp(self): self.set_mlu() self.op_type = "elementwise_sub" self.inputs = { 'X': np.random.rand(2, 3, 100).astype(np.float32), - 'Y': np.random.rand(100).astype(np.float32) + 'Y': np.random.rand(100).astype(np.float32), } self.outputs = { 'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 1, 100) @@ -147,13 +148,12 @@ class TestElementwiseSubOp_broadcast_2(TestElementwiseSubOp): class TestElementwiseSubOp_broadcast_3(TestElementwiseSubOp): - def setUp(self): self.set_mlu() self.op_type = "elementwise_sub" self.inputs = { 'X': np.random.rand(2, 10, 12, 3).astype(np.float32), - 'Y': np.random.rand(10, 12).astype(np.float32) + 'Y': np.random.rand(10, 12).astype(np.float32), } self.attrs = {'axis': 1} self.outputs = { @@ -162,49 +162,45 @@ class TestElementwiseSubOp_broadcast_3(TestElementwiseSubOp): class TestElementwiseSubOp_broadcast_4(TestElementwiseSubOp): - def setUp(self): self.set_mlu() self.op_type = "elementwise_sub" self.inputs = { 'X': np.random.rand(2, 5, 3, 12).astype(np.float32), - 'Y': np.random.rand(2, 5, 1, 12).astype(np.float32) + 'Y': np.random.rand(2, 5, 1, 12).astype(np.float32), } self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} class TestElementwiseSubOp_commonuse_1(TestElementwiseSubOp): - def setUp(self): self.set_mlu() self.op_type = "elementwise_sub" self.inputs = { 'X': np.random.rand(2, 3, 100).astype(np.float32), - 'Y': np.random.rand(1, 1, 100).astype(np.float32) + 'Y': np.random.rand(1, 1, 100).astype(np.float32), } self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} class TestElementwiseSubOp_commonuse_2(TestElementwiseSubOp): - def setUp(self): self.set_mlu() self.op_type = "elementwise_sub" self.inputs = { 'X': np.random.rand(10, 3, 1, 4).astype(np.float32), - 'Y': np.random.rand(10, 1, 12, 1).astype(np.float32) + 'Y': np.random.rand(10, 1, 12, 1).astype(np.float32), } self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} class TestElementwiseSubOp_xsize_lessthan_ysize(TestElementwiseSubOp): - def setUp(self): self.set_mlu() self.op_type = "elementwise_sub" self.inputs = { 'X': np.random.rand(10, 12).astype(np.float32), - 'Y': np.random.rand(2, 3, 10, 12).astype(np.float32) + 'Y': np.random.rand(2, 3, 10, 12).astype(np.float32), } self.attrs = {'axis': 2} self.outputs = { diff --git a/python/paddle/fluid/tests/unittests/mlu/test_exp_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_exp_op_mlu.py index b2d77a4c85ff0551e8f4a1d1988657db3482e87d..e4c6192138655e56c793a60e24e3b731d2a746fe 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_exp_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_exp_op_mlu.py @@ -26,7 +26,6 @@ SEED = 2021 class TestExp(OpTest): - def setUp(self): self.set_mlu() self.op_type = "exp" @@ -55,7 +54,6 @@ class TestExp(OpTest): class TestExpFp16(OpTest): - def setUp(self): self.set_mlu() self.op_type = "exp" @@ -82,7 +80,6 @@ class TestExpFp16(OpTest): class TestExpNeg(OpTest): - def setUp(self): self.set_mlu() self.op_type = "exp" diff --git a/python/paddle/fluid/tests/unittests/mlu/test_expand_as_v2_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_expand_as_v2_op_mlu.py index 5143a5f83a7daa311a189b66bf137ad1b4882577..0a88f759831e4672f7afec6d4a12796f97c2311f 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_expand_as_v2_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_expand_as_v2_op_mlu.py @@ -25,9 +25,7 @@ paddle.enable_static() def test_class1(op_type, typename): - class TestExpandAsBasic(OpTest): - def setUp(self): self.set_mlu() self.op_type = "expand_as_v2" @@ -54,9 +52,7 @@ def test_class1(op_type, typename): def test_class2(op_type, typename): - class TestExpandAsOpRank2(OpTest): - def setUp(self): self.set_mlu() self.op_type = "expand_as_v2" @@ -83,9 +79,7 @@ def test_class2(op_type, typename): def test_class3(op_type, typename): - class TestExpandAsOpRank3(OpTest): - def setUp(self): self.set_mlu() self.op_type = "expand_as_v2" @@ -112,9 +106,7 @@ def test_class3(op_type, typename): def test_class4(op_type, typename): - class TestExpandAsOpRank4(OpTest): - def setUp(self): self.set_mlu() self.op_type = "expand_as_v2" @@ -142,34 +134,39 @@ def test_class4(op_type, typename): # Test python API class TestExpandAsV2API(unittest.TestCase): - def test_api(self): input1 = np.random.random([12, 14]).astype("float32") input2 = np.random.random([2, 12, 14]).astype("float32") - x = fluid.layers.data(name='x', - shape=[12, 14], - append_batch_size=False, - dtype="float32") + x = fluid.layers.data( + name='x', shape=[12, 14], append_batch_size=False, dtype="float32" + ) - y = fluid.layers.data(name='target_tensor', - shape=[2, 12, 14], - append_batch_size=False, - dtype="float32") + y = fluid.layers.data( + name='target_tensor', + shape=[2, 12, 14], + append_batch_size=False, + dtype="float32", + ) out_1 = paddle.expand_as(x, y=y) exe = fluid.Executor(place=fluid.MLUPlace(0)) - res_1 = exe.run(fluid.default_main_program(), - feed={ - "x": input1, - "target_tensor": input2 - }, - fetch_list=[out_1]) + res_1 = exe.run( + fluid.default_main_program(), + feed={"x": input1, "target_tensor": input2}, + fetch_list=[out_1], + ) assert np.array_equal(res_1[0], np.tile(input1, (2, 1, 1))) for _typename in { - 'float16', 'float32', 'int64', 'int32', 'int8', 'uint8', 'bool' + 'float16', + 'float32', + 'int64', + 'int32', + 'int8', + 'uint8', + 'bool', }: test_class1('expand_as_v2', _typename) test_class2('expand_as_v2', _typename) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_expand_v2_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_expand_v2_op_mlu.py index 527a9bd2e55fce54a550bdcab458f60e218f8553..f1e8b06cd9ab6fd0d68927ab5a230793e5fd30db 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_expand_v2_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_expand_v2_op_mlu.py @@ -28,7 +28,6 @@ paddle.enable_static() # Situation 1: shape is a list(without tensor) class TestExpandV2OpRank1(OpTest): - def setUp(self): self.op_type = "expand_v2" self.place = paddle.device.MLUPlace(0) @@ -54,7 +53,6 @@ class TestExpandV2OpRank1(OpTest): class TestExpandV2OpRank2_DimExpanding(TestExpandV2OpRank1): - def init_data(self): self.ori_shape = [120] self.shape = [2, 120] @@ -62,7 +60,6 @@ class TestExpandV2OpRank2_DimExpanding(TestExpandV2OpRank1): class TestExpandV2OpRank2(TestExpandV2OpRank1): - def init_data(self): self.ori_shape = [1, 140] self.shape = [12, 140] @@ -70,7 +67,6 @@ class TestExpandV2OpRank2(TestExpandV2OpRank1): class TestExpandV2OpRank3_Corner(TestExpandV2OpRank1): - def init_data(self): self.ori_shape = (2, 10, 5) self.shape = (2, 10, 5) @@ -78,7 +74,6 @@ class TestExpandV2OpRank3_Corner(TestExpandV2OpRank1): class TestExpandV2OpRank4(TestExpandV2OpRank1): - def init_data(self): self.ori_shape = (2, 4, 5, 7) self.shape = (-1, -1, -1, -1) @@ -86,7 +81,6 @@ class TestExpandV2OpRank4(TestExpandV2OpRank1): class TestExpandV2OpRank5(TestExpandV2OpRank1): - def init_data(self): self.ori_shape = (2, 4, 1, 15) self.shape = (2, -1, 4, -1) @@ -94,7 +88,6 @@ class TestExpandV2OpRank5(TestExpandV2OpRank1): class TestExpandV2OpRank6(TestExpandV2OpRank1): - def init_data(self): self.ori_shape = (4, 1, 30) self.shape = (2, -1, 4, 30) @@ -103,7 +96,6 @@ class TestExpandV2OpRank6(TestExpandV2OpRank1): # Situation 2: shape is a list(with tensor) class TestExpandV2OpRank1_tensor_attr(OpTest): - def setUp(self): self.op_type = "expand_v2" self.place = paddle.device.MLUPlace(0) @@ -111,8 +103,9 @@ class TestExpandV2OpRank1_tensor_attr(OpTest): self.init_data() expand_shapes_tensor = [] for index, ele in enumerate(self.expand_shape): - expand_shapes_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + expand_shapes_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = { 'X': np.random.random(self.ori_shape).astype("float32"), @@ -136,7 +129,6 @@ class TestExpandV2OpRank1_tensor_attr(OpTest): class TestExpandV2OpRank2_Corner_tensor_attr(TestExpandV2OpRank1_tensor_attr): - def init_data(self): self.ori_shape = [12, 14] self.expand_times = [1, 1] @@ -146,7 +138,6 @@ class TestExpandV2OpRank2_Corner_tensor_attr(TestExpandV2OpRank1_tensor_attr): # Situation 3: shape is a tensor class TestExpandV2OpRank1_tensor(OpTest): - def setUp(self): self.op_type = "expand_v2" self.place = paddle.device.MLUPlace(0) @@ -175,7 +166,6 @@ class TestExpandV2OpRank1_tensor(OpTest): # Situation 4: input x is Integer class TestExpandV2OpInteger(OpTest): - def setUp(self): self.op_type = "expand_v2" self.place = paddle.device.MLUPlace(0) @@ -193,7 +183,6 @@ class TestExpandV2OpInteger(OpTest): # Situation 5: input x is Bool class TestExpandV2OpBoolean(OpTest): - def setUp(self): self.op_type = "expand_v2" self.place = paddle.device.MLUPlace(0) @@ -209,7 +198,6 @@ class TestExpandV2OpBoolean(OpTest): # Situation 56: input x is Integer class TestExpandV2OpInt64_t(OpTest): - def setUp(self): self.op_type = "expand_v2" self.place = paddle.device.MLUPlace(0) @@ -226,11 +214,11 @@ class TestExpandV2OpInt64_t(OpTest): class TestExpandV2Error(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): - x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - paddle.device.MLUPlace(0)) + x1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], paddle.device.MLUPlace(0) + ) shape = [2, 2] self.assertRaises(TypeError, paddle.tensor.expand, x1, shape) x2 = fluid.layers.data(name='x2', shape=[4], dtype="uint8") @@ -242,19 +230,19 @@ class TestExpandV2Error(unittest.TestCase): # Test python API class TestExpandV2API(unittest.TestCase): - def test_api(self): input = np.random.random([12, 14]).astype("float32") - x = fluid.layers.data(name='x', - shape=[12, 14], - append_batch_size=False, - dtype="float32") + x = fluid.layers.data( + name='x', shape=[12, 14], append_batch_size=False, dtype="float32" + ) positive_2 = fluid.layers.fill_constant([1], "int32", 12) - expand_shape = fluid.layers.data(name="expand_shape", - shape=[2], - append_batch_size=False, - dtype="int32") + expand_shape = fluid.layers.data( + name="expand_shape", + shape=[2], + append_batch_size=False, + dtype="int32", + ) out_1 = paddle.expand(x, shape=[12, 14]) out_2 = paddle.expand(x, shape=[positive_2, 14]) @@ -263,28 +251,28 @@ class TestExpandV2API(unittest.TestCase): g0 = fluid.backward.calc_gradient(out_2, x) exe = fluid.Executor(place=paddle.device.MLUPlace(0)) - res_1, res_2, res_3 = exe.run(fluid.default_main_program(), - feed={ - "x": - input, - "expand_shape": - np.array([12, 14]).astype("int32") - }, - fetch_list=[out_1, out_2, out_3]) + res_1, res_2, res_3 = exe.run( + fluid.default_main_program(), + feed={ + "x": input, + "expand_shape": np.array([12, 14]).astype("int32"), + }, + fetch_list=[out_1, out_2, out_3], + ) assert np.array_equal(res_1, np.tile(input, (1, 1))) assert np.array_equal(res_2, np.tile(input, (1, 1))) assert np.array_equal(res_3, np.tile(input, (1, 1))) class TestExpandInferShape(unittest.TestCase): - def test_shape_with_var(self): with program_guard(Program(), Program()): x = paddle.static.data(shape=[-1, 1, 3], name='x') fake_var = paddle.randn([2, 3]) target_shape = [ - -1, paddle.shape(fake_var)[0], - paddle.shape(fake_var)[1] + -1, + paddle.shape(fake_var)[0], + paddle.shape(fake_var)[1], ] out = paddle.expand(x, shape=target_shape) self.assertListEqual(list(out.shape), [-1, -1, -1]) @@ -292,7 +280,6 @@ class TestExpandInferShape(unittest.TestCase): # Test python Dygraph API class TestExpandV2DygraphAPI(unittest.TestCase): - def test_expand_times_is_tensor(self): with paddle.fluid.dygraph.guard(): paddle.seed(1) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_fill_any_like_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_fill_any_like_op_mlu.py index 249aa48e3d8ab310494bf08c25bb521cb420cea1..021f45aee0698004505bb57248bcc7ca33a74c6f 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_fill_any_like_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_fill_any_like_op_mlu.py @@ -25,7 +25,6 @@ paddle.enable_static() class TestFillAnyLikeOp(OpTest): - def setUp(self): self.init_dtype() self.set_mlu() @@ -54,25 +53,21 @@ class TestFillAnyLikeOp(OpTest): class TestFillAnyLikeOp2(TestFillAnyLikeOp): - def set_value(self): self.value = -0.0 class TestFillAnyLikeOp3(TestFillAnyLikeOp): - def set_value(self): self.value = 1.0 class TestFillAnyLikeOp4(TestFillAnyLikeOp): - def set_value(self): self.value = 1e-9 class TestFillAnyLikeOp5(TestFillAnyLikeOp): - def set_value(self): if self.dtype == "float16": self.value = 0.05 @@ -81,7 +76,6 @@ class TestFillAnyLikeOp5(TestFillAnyLikeOp): class TestFillAnyLikeOpInt32(TestFillAnyLikeOp): - def init_dtype(self): self.dtype = np.int32 @@ -90,7 +84,6 @@ class TestFillAnyLikeOpInt32(TestFillAnyLikeOp): class TestFillAnyLikeOpInt64(TestFillAnyLikeOp): - def init_dtype(self): self.dtype = np.int64 @@ -99,7 +92,6 @@ class TestFillAnyLikeOpInt64(TestFillAnyLikeOp): class TestFillAnyLikeOpFloat32(TestFillAnyLikeOp): - def init_dtype(self): self.dtype = np.float32 diff --git a/python/paddle/fluid/tests/unittests/mlu/test_fill_constant_batch_size_like_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_fill_constant_batch_size_like_op_mlu.py index ba5983fbb1e018afd2f80e4410bd65ab776b2a53..79b5d7abe7d82e28b6d026a7e5abc10512b066f5 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_fill_constant_batch_size_like_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_fill_constant_batch_size_like_op_mlu.py @@ -27,20 +27,21 @@ from paddle.fluid.framework import convert_np_dtype_to_dtype_ paddle.enable_static() -def fill_constant_batch_size_like(input, - shape, - value, - data_type, - input_dim_idx=0, - output_dim_idx=0, - force_cpu=False): +def fill_constant_batch_size_like( + input, + shape, + value, + data_type, + input_dim_idx=0, + output_dim_idx=0, + force_cpu=False, +): return paddle.fluid.layers.fill_constant_batch_size_like( - input, shape, data_type, value, input_dim_idx, output_dim_idx, - force_cpu) + input, shape, data_type, value, input_dim_idx, output_dim_idx, force_cpu + ) class TestFillConstantBatchSizeLike(OpTest): - def setUp(self): self.place = paddle.device.MLUPlace(0) self.__class__.use_mlu = True @@ -61,11 +62,12 @@ class TestFillConstantBatchSizeLike(OpTest): 'dtype': self.dtype, 'force_cpu': self.force_cpu, 'input_dim_idx': self.input_dim_idx, - 'output_dim_idx': self.output_dim_idx + 'output_dim_idx': self.output_dim_idx, } self.outputs = { - 'Out': np.full(self.output_shape, self.output_value, - self.output_dtype) + 'Out': np.full( + self.output_shape, self.output_value, self.output_dtype + ) } def init_shape(self): @@ -94,7 +96,6 @@ class TestFillConstantBatchSizeLike(OpTest): class TestFillConstantBatchSizeLike2(TestFillConstantBatchSizeLike): - def init_shape(self): # test shape self.input_shape = [4, 5, 6, 7] @@ -103,7 +104,6 @@ class TestFillConstantBatchSizeLike2(TestFillConstantBatchSizeLike): class TestFillConstantBatchSizeLike3(TestFillConstantBatchSizeLike): - def init_value(self): # use 'str_value' rather than 'value' self.value = 3.8 @@ -112,7 +112,6 @@ class TestFillConstantBatchSizeLike3(TestFillConstantBatchSizeLike): class TestFillConstantBatchSizeLike4(TestFillConstantBatchSizeLike): - def init_value(self): # str_value = 'inf' self.value = 3.8 @@ -121,7 +120,6 @@ class TestFillConstantBatchSizeLike4(TestFillConstantBatchSizeLike): class TestFillConstantBatchSizeLike5(TestFillConstantBatchSizeLike): - def init_value(self): # str_value = '-inf' self.value = 3.8 @@ -130,7 +128,6 @@ class TestFillConstantBatchSizeLike5(TestFillConstantBatchSizeLike): class TestFillConstantBatchSizeLike6(TestFillConstantBatchSizeLike): - def init_dtype(self): self.dtype = core.VarDesc.VarType.FP16 self.output_dtype = np.float16 @@ -140,20 +137,17 @@ class TestFillConstantBatchSizeLike6(TestFillConstantBatchSizeLike): class TestFillConstantBatchSizeLike7(TestFillConstantBatchSizeLike): - def init_dtype(self): self.dtype = core.VarDesc.VarType.INT32 self.output_dtype = np.int32 class TestFillConstantBatchSizeLike8(TestFillConstantBatchSizeLike): - def init_force_cpu(self): self.force_cpu = True class TestFillConstantBatchSizeLike9(TestFillConstantBatchSizeLike): - def init_shape(self): self.input_shape = [4, 5] self.shape = [123, 92] @@ -187,11 +181,12 @@ class TestFillConstantBatchSizeLikeLodTensor(TestFillConstantBatchSizeLike): 'dtype': self.dtype, 'force_cpu': self.force_cpu, 'input_dim_idx': self.input_dim_idx, - 'output_dim_idx': self.output_dim_idx + 'output_dim_idx': self.output_dim_idx, } self.outputs = { - 'Out': np.full(self.output_shape, self.output_value, - self.output_dtype) + 'Out': np.full( + self.output_shape, self.output_value, self.output_dtype + ) } def init_shape(self): @@ -201,7 +196,8 @@ class TestFillConstantBatchSizeLikeLodTensor(TestFillConstantBatchSizeLike): class TestFillConstantBatchSizeLikeLodTensor2( - TestFillConstantBatchSizeLikeLodTensor): + TestFillConstantBatchSizeLikeLodTensor +): # test LodTensor with 'input_dim_idx' != 0 def init_shape(self): self.input_shape = [10, 20] diff --git a/python/paddle/fluid/tests/unittests/mlu/test_fill_constant_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_fill_constant_op_mlu.py index b05f720b5f10cb2b64c44cf3ffbe7e3c9c3f8512..7a19779b049ce4d8874f1e4559b67b7149ba3cb4 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_fill_constant_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_fill_constant_op_mlu.py @@ -31,10 +31,8 @@ paddle.enable_static() # Situation 1: Attr(shape) is a list(without tensor) class TestFillConstantOp1(OpTest): - def setUp(self): - '''Test fill_constant op with specified value - ''' + '''Test fill_constant op with specified value''' self.op_type = "fill_constant" self.inputs = {} @@ -49,10 +47,8 @@ class TestFillConstantOp1(OpTest): class TestFillConstantOp2(OpTest): - def setUp(self): - '''Test fill_constant op with default value - ''' + '''Test fill_constant op with default value''' self.op_type = "fill_constant" self.inputs = {} @@ -67,10 +63,8 @@ class TestFillConstantOp2(OpTest): class TestFillConstantOp3(OpTest): - def setUp(self): - '''Test fill_constant op with specified int64 value - ''' + '''Test fill_constant op with specified int64 value''' self.op_type = "fill_constant" self.inputs = {} @@ -85,10 +79,8 @@ class TestFillConstantOp3(OpTest): class TestFillConstantOp4(OpTest): - def setUp(self): - '''Test fill_constant op with specified int value - ''' + '''Test fill_constant op with specified int value''' self.op_type = "fill_constant" self.inputs = {} @@ -103,17 +95,15 @@ class TestFillConstantOp4(OpTest): class TestFillConstantOpWithSelectedRows(unittest.TestCase): - def check_with_place(self, place): scope = core.Scope() # create Out Variable out = scope.var('Out').get_selected_rows() # create and run fill_constant_op operator - fill_constant_op = Operator("fill_constant", - shape=[123, 92], - value=3.8, - Out='Out') + fill_constant_op = Operator( + "fill_constant", shape=[123, 92], value=3.8, Out='Out' + ) fill_constant_op.run(scope, place) # get result from Out @@ -133,16 +123,15 @@ class TestFillConstantOpWithSelectedRows(unittest.TestCase): # Situation 2: Attr(shape) is a list(with tensor) class TestFillConstantOp1_ShapeTensorList(OpTest): - def setUp(self): - '''Test fill_constant op with specified value - ''' + '''Test fill_constant op with specified value''' self.op_type = "fill_constant" self.init_data() shape_tensor_list = [] for index, ele in enumerate(self.shape): - shape_tensor_list.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + shape_tensor_list.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = {"ShapeTensorList": shape_tensor_list} self.attrs = {'shape': self.infer_shape, 'value': self.value} @@ -161,16 +150,15 @@ class TestFillConstantOp1_ShapeTensorList(OpTest): class TestFillConstantOp2_ShapeTensorList(OpTest): - def setUp(self): - '''Test fill_constant op with default value - ''' + '''Test fill_constant op with default value''' self.op_type = "fill_constant" self.init_data() shape_tensor_list = [] for index, ele in enumerate(self.shape): - shape_tensor_list.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + shape_tensor_list.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = {"ShapeTensorList": shape_tensor_list} self.attrs = {'shape': self.infer_shape} @@ -188,7 +176,6 @@ class TestFillConstantOp2_ShapeTensorList(OpTest): class TestFillConstantOp3_ShapeTensorList(TestFillConstantOp1_ShapeTensorList): - def init_data(self): self.shape = [123, 92] self.infer_shape = [123, -1] @@ -196,7 +183,6 @@ class TestFillConstantOp3_ShapeTensorList(TestFillConstantOp1_ShapeTensorList): class TestFillConstantOp4_ShapeTensorList(TestFillConstantOp1_ShapeTensorList): - def init_data(self): self.shape = [123, 92] self.infer_shape = [123, -1] @@ -205,10 +191,8 @@ class TestFillConstantOp4_ShapeTensorList(TestFillConstantOp1_ShapeTensorList): # Situation 3: shape is a tensor class TestFillConstantOp1_ShapeTensor(OpTest): - def setUp(self): - '''Test fill_constant op with specified value - ''' + '''Test fill_constant op with specified value''' self.op_type = "fill_constant" self.init_data() @@ -229,16 +213,14 @@ class TestFillConstantOp1_ShapeTensor(OpTest): # Situation 4: value is a tensor class TestFillConstantOp1_ValueTensor(OpTest): - def setUp(self): - '''Test fill_constant op with specified value - ''' + '''Test fill_constant op with specified value''' self.op_type = "fill_constant" self.init_data() self.inputs = { "ShapeTensor": np.array(self.shape).astype("int32"), - 'ValueTensor': np.array([self.value]).astype("float32") + 'ValueTensor': np.array([self.value]).astype("float32"), } self.attrs = {'value': self.value + 1.0} self.outputs = {'Out': np.full(self.shape, self.value)} @@ -247,7 +229,7 @@ class TestFillConstantOp1_ValueTensor(OpTest): self.__class__.use_mlu = True def init_data(self): - #self.shape = [123, 92] + # self.shape = [123, 92] self.shape = [2, 2] self.value = 3.8 self.dtype = np.float32 @@ -258,16 +240,14 @@ class TestFillConstantOp1_ValueTensor(OpTest): # Situation 5: value is a tensor class TestFillConstantOp2_ValueTensor(OpTest): - def setUp(self): - '''Test fill_constant op with specified value - ''' + '''Test fill_constant op with specified value''' self.op_type = "fill_constant" self.init_data() self.inputs = { "ShapeTensor": np.array(self.shape).astype("int32"), - 'ValueTensor': np.array([self.value]).astype("int32") + 'ValueTensor': np.array([self.value]).astype("int32"), } self.attrs = {'value': self.value, 'dtype': 2} self.outputs = {'Out': np.full(self.shape, self.value)} @@ -286,56 +266,55 @@ class TestFillConstantOp2_ValueTensor(OpTest): # Test python API class TestFillConstantAPI(unittest.TestCase): - def test_api(self): positive_2_int32 = fluid.layers.fill_constant([1], "int32", 2) positive_2_int64 = fluid.layers.fill_constant([1], "int64", 2) - shape_tensor_int32 = fluid.data(name="shape_tensor_int32", - shape=[2], - dtype="int32") - shape_tensor_int64 = fluid.data(name="shape_tensor_int64", - shape=[2], - dtype="int64") - - out_1 = fluid.layers.fill_constant(shape=[1, 2], - dtype="float32", - value=1.1) - - out_2 = fluid.layers.fill_constant(shape=[1, positive_2_int32], - dtype="float32", - value=1.1) - - out_3 = fluid.layers.fill_constant(shape=[1, positive_2_int64], - dtype="float32", - value=1.1) - - out_4 = fluid.layers.fill_constant(shape=shape_tensor_int32, - dtype="float32", - value=1.1) - - out_5 = fluid.layers.fill_constant(shape=shape_tensor_int64, - dtype="float32", - value=1.1) - - out_6 = fluid.layers.fill_constant(shape=shape_tensor_int64, - dtype=np.float32, - value=1.1) - - val1 = fluid.layers.fill_constant(shape=[1], - dtype=np.float32, - value=1.1) - val2 = fluid.layers.fill_constant(shape=[1], - dtype=np.float64, - value=1.1) - out_7 = fluid.layers.fill_constant(shape=shape_tensor_int64, - dtype=np.float32, - value=val1) - - out_8 = fluid.layers.fill_constant(shape=shape_tensor_int64, - dtype=np.float32, - value=val2) + shape_tensor_int32 = fluid.data( + name="shape_tensor_int32", shape=[2], dtype="int32" + ) + shape_tensor_int64 = fluid.data( + name="shape_tensor_int64", shape=[2], dtype="int64" + ) + + out_1 = fluid.layers.fill_constant( + shape=[1, 2], dtype="float32", value=1.1 + ) + + out_2 = fluid.layers.fill_constant( + shape=[1, positive_2_int32], dtype="float32", value=1.1 + ) + + out_3 = fluid.layers.fill_constant( + shape=[1, positive_2_int64], dtype="float32", value=1.1 + ) + + out_4 = fluid.layers.fill_constant( + shape=shape_tensor_int32, dtype="float32", value=1.1 + ) + + out_5 = fluid.layers.fill_constant( + shape=shape_tensor_int64, dtype="float32", value=1.1 + ) + + out_6 = fluid.layers.fill_constant( + shape=shape_tensor_int64, dtype=np.float32, value=1.1 + ) + + val1 = fluid.layers.fill_constant( + shape=[1], dtype=np.float32, value=1.1 + ) + val2 = fluid.layers.fill_constant( + shape=[1], dtype=np.float64, value=1.1 + ) + out_7 = fluid.layers.fill_constant( + shape=shape_tensor_int64, dtype=np.float32, value=val1 + ) + + out_8 = fluid.layers.fill_constant( + shape=shape_tensor_int64, dtype=np.float32, value=val2 + ) exe = fluid.Executor(place=fluid.CPUPlace()) res_1, res_2, res_3, res_4, res_5, res_6, res_7, res_8 = exe.run( @@ -344,7 +323,8 @@ class TestFillConstantAPI(unittest.TestCase): "shape_tensor_int32": np.array([1, 2]).astype("int32"), "shape_tensor_int64": np.array([1, 2]).astype("int64"), }, - fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7, out_8]) + fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7, out_8], + ) assert np.array_equal(res_1, np.full([1, 2], 1.1, dtype="float32")) assert np.array_equal(res_2, np.full([1, 2], 1.1, dtype="float32")) @@ -357,7 +337,6 @@ class TestFillConstantAPI(unittest.TestCase): class TestFillConstantImperative(unittest.TestCase): - def test_api(self): with fluid.dygraph.guard(): data1 = np.array([1, 2]).astype('int32') @@ -366,26 +345,30 @@ class TestFillConstantImperative(unittest.TestCase): shape = fluid.dygraph.to_variable(data1) val = fluid.dygraph.to_variable(data2) value = fluid.dygraph.to_variable(data3) - res1 = fluid.layers.fill_constant(shape=[1, 2], - dtype='float32', - value=1.1) - res2 = fluid.layers.fill_constant(shape=shape, - dtype='float32', - value=1.1) - res3 = fluid.layers.fill_constant(shape=shape, - dtype='float32', - value=val) - res4 = fluid.layers.fill_constant(shape=shape, - dtype='int32', - value=value) - assert np.array_equal(res1.numpy(), - np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(res2.numpy(), - np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(res3.numpy(), - np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(res4.numpy(), - np.full([1, 2], 88, dtype="int32")) + res1 = fluid.layers.fill_constant( + shape=[1, 2], dtype='float32', value=1.1 + ) + res2 = fluid.layers.fill_constant( + shape=shape, dtype='float32', value=1.1 + ) + res3 = fluid.layers.fill_constant( + shape=shape, dtype='float32', value=val + ) + res4 = fluid.layers.fill_constant( + shape=shape, dtype='int32', value=value + ) + assert np.array_equal( + res1.numpy(), np.full([1, 2], 1.1, dtype="float32") + ) + assert np.array_equal( + res2.numpy(), np.full([1, 2], 1.1, dtype="float32") + ) + assert np.array_equal( + res3.numpy(), np.full([1, 2], 1.1, dtype="float32") + ) + assert np.array_equal( + res4.numpy(), np.full([1, 2], 88, dtype="int32") + ) def test_nan(self): with fluid.dygraph.guard(): @@ -405,42 +388,49 @@ class TestFillConstantImperative(unittest.TestCase): class TestFillConstantOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): - #for ci coverage + # for ci coverage x1 = fluid.layers.data(name='x1', shape=[1], dtype="int16") - self.assertRaises(TypeError, - fluid.layers.fill_constant, - shape=[1], - value=5, - dtype='uint4') - - self.assertRaises(TypeError, - fluid.layers.fill_constant, - shape=[1.1], - value=5, - dtype='float32', - out=x1) + self.assertRaises( + TypeError, + fluid.layers.fill_constant, + shape=[1], + value=5, + dtype='uint4', + ) + + self.assertRaises( + TypeError, + fluid.layers.fill_constant, + shape=[1.1], + value=5, + dtype='float32', + out=x1, + ) # The argument dtype of fill_constant_op must be one of bool, float16, - #float32, float64, uint8, int16, int32 or int64 + # float32, float64, uint8, int16, int32 or int64 x2 = fluid.layers.data(name='x2', shape=[1], dtype="int32") - self.assertRaises(TypeError, - fluid.layers.fill_constant, - shape=[1], - value=5, - dtype='float64', - out=x2) + self.assertRaises( + TypeError, + fluid.layers.fill_constant, + shape=[1], + value=5, + dtype='float64', + out=x2, + ) x3 = np.random.randn(100, 100).astype('int32') - self.assertRaises(TypeError, - fluid.layers.fill_constant, - shape=[100, 100], - value=5, - dtype='float64', - out=x3) + self.assertRaises( + TypeError, + fluid.layers.fill_constant, + shape=[100, 100], + value=5, + dtype='float64', + out=x3, + ) # The argument shape's type of fill_constant_op must be list, tuple or Variable. def test_shape_type(): @@ -456,22 +446,22 @@ class TestFillConstantOpError(unittest.TestCase): # The shape dtype of fill_constant_op must be int32 or int64. def test_shape_tensor_dtype(): - shape = fluid.data(name="shape_tensor", - shape=[2], - dtype="float32") - fluid.layers.fill_constant(shape=shape, - dtype="float32", - value=1) + shape = fluid.data( + name="shape_tensor", shape=[2], dtype="float32" + ) + fluid.layers.fill_constant( + shape=shape, dtype="float32", value=1 + ) self.assertRaises(TypeError, test_shape_tensor_dtype) def test_shape_tensor_list_dtype(): - shape = fluid.data(name="shape_tensor_list", - shape=[1], - dtype="bool") - fluid.layers.fill_constant(shape=[shape, 2], - dtype="float32", - value=1) + shape = fluid.data( + name="shape_tensor_list", shape=[1], dtype="bool" + ) + fluid.layers.fill_constant( + shape=[shape, 2], dtype="float32", value=1 + ) self.assertRaises(TypeError, test_shape_tensor_list_dtype) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_flatten2_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_flatten2_op_mlu.py index c799fd011435846a97e206aaa691d6910fb5d7ef..ace458ff42c0576d73ec02b588665a4864df44b9 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_flatten2_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_flatten2_op_mlu.py @@ -25,7 +25,6 @@ paddle.enable_static() class TestFlattenOp(OpTest): - def setUp(self): self.place = paddle.device.MLUPlace(0) self.__class__.use_mlu = True @@ -35,7 +34,7 @@ class TestFlattenOp(OpTest): self.init_attrs() self.outputs = { "Out": self.inputs["X"].reshape(self.new_shape), - "XShape": np.random.random(self.in_shape).astype("float32") + "XShape": np.random.random(self.in_shape).astype("float32"), } def test_check_output(self): @@ -54,7 +53,6 @@ class TestFlattenOp(OpTest): class TestFlattenOp1(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.axis = 0 @@ -62,7 +60,6 @@ class TestFlattenOp1(TestFlattenOp): class TestFlattenOpWithDefaultAxis(TestFlattenOp): - def init_test_case(self): self.in_shape = (10, 2, 2, 3) self.new_shape = (10, 12) @@ -72,7 +69,6 @@ class TestFlattenOpWithDefaultAxis(TestFlattenOp): class TestFlattenOpSixDims(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 3, 2, 4, 4) self.axis = 4 @@ -80,7 +76,6 @@ class TestFlattenOpSixDims(TestFlattenOp): class TestStaticFlattenInferShapePythonAPI(unittest.TestCase): - def execute_api(self, x, axis=1): return fluid.layers.flatten(x, axis=axis) @@ -88,15 +83,14 @@ class TestStaticFlattenInferShapePythonAPI(unittest.TestCase): paddle.enable_static() main_prog = paddle.static.Program() with paddle.static.program_guard(main_prog, paddle.static.Program()): - x = paddle.static.data(name="x", - shape=[-1, 3, -1, -1], - dtype='float32') + x = paddle.static.data( + name="x", shape=[-1, 3, -1, -1], dtype='float32' + ) out = self.execute_api(x, axis=2) self.assertTrue((-1, -1) == out.shape) class TestFlatten2OpError(unittest.TestCase): - def test_errors(self): with fluid.program_guard(fluid.Program(), fluid.Program()): input_data = np.random.random((3, 2, 4, 5)).astype("float64") @@ -109,9 +103,9 @@ class TestFlatten2OpError(unittest.TestCase): def test_type(): # dtype must be float32, float64, int8, int32, int64, uint8. - x2 = fluid.layers.data(name='x2', - shape=[3, 2, 4, 5], - dtype='float16') + x2 = fluid.layers.data( + name='x2', shape=[3, 2, 4, 5], dtype='float16' + ) fluid.layers.flatten(x2, axis=1) self.assertRaises(TypeError, test_type) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_flatten_contigous_range_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_flatten_contigous_range_op_mlu.py index 1a988f6104d25391fd8d11955aa00f4876b92e34..942d5f9c89764db6c8a18afcccf93ff029115e3c 100755 --- a/python/paddle/fluid/tests/unittests/mlu/test_flatten_contigous_range_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_flatten_contigous_range_op_mlu.py @@ -25,7 +25,6 @@ paddle.enable_static() class TestFlattenOp(OpTest): - def setUp(self): self.set_mlu() self.op_type = "flatten_contiguous_range" @@ -39,7 +38,7 @@ class TestFlattenOp(OpTest): self.init_attrs() self.outputs = { "Out": self.inputs["X"].reshape(self.new_shape), - "XShape": np.random.random(self.in_shape).astype("float32") + "XShape": np.random.random(self.in_shape).astype("float32"), } def set_mlu(self): @@ -49,24 +48,23 @@ class TestFlattenOp(OpTest): self.check_output_with_place(self.place, no_check_set=["XShape"]) def test_check_grad(self): - #pass + # pass self.check_grad_with_place(self.place, ["X"], "Out") def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = 0 self.stop_axis = -1 - self.new_shape = (120) + self.new_shape = 120 def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } class TestFlattenOp_1(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = 1 @@ -76,12 +74,11 @@ class TestFlattenOp_1(TestFlattenOp): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } class TestFlattenOp_2(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = 0 @@ -91,12 +88,11 @@ class TestFlattenOp_2(TestFlattenOp): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } class TestFlattenOp_3(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = 0 @@ -106,12 +102,11 @@ class TestFlattenOp_3(TestFlattenOp): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } class TestFlattenOp_4(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = -2 @@ -121,12 +116,11 @@ class TestFlattenOp_4(TestFlattenOp): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } class TestFlattenOp_5(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = 2 @@ -136,12 +130,11 @@ class TestFlattenOp_5(TestFlattenOp): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } class TestFlattenOpSixDims(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 3, 2, 4, 4) self.start_axis = 3 @@ -151,12 +144,11 @@ class TestFlattenOpSixDims(TestFlattenOp): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } class TestFlattenOp_Float32(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = 0 @@ -167,12 +159,11 @@ class TestFlattenOp_Float32(TestFlattenOp): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } class TestFlattenOp_int32(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = 0 @@ -183,7 +174,7 @@ class TestFlattenOp_int32(TestFlattenOp): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } def test_check_grad(self): @@ -191,7 +182,6 @@ class TestFlattenOp_int32(TestFlattenOp): class TestFlattenOp_uint8(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = 0 @@ -202,7 +192,7 @@ class TestFlattenOp_uint8(TestFlattenOp): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } def test_check_grad(self): @@ -210,7 +200,6 @@ class TestFlattenOp_uint8(TestFlattenOp): class TestFlattenOp_int8(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = 0 @@ -221,7 +210,7 @@ class TestFlattenOp_int8(TestFlattenOp): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } def test_check_grad(self): @@ -229,7 +218,6 @@ class TestFlattenOp_int8(TestFlattenOp): class TestFlattenOp_int64(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = 0 @@ -240,7 +228,7 @@ class TestFlattenOp_int64(TestFlattenOp): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } def test_check_grad(self): @@ -248,45 +236,58 @@ class TestFlattenOp_int64(TestFlattenOp): class TestFlatten2OpError(unittest.TestCase): - def test_errors(self): image_shape = (2, 3, 4, 4) - x = np.arange(image_shape[0] * image_shape[1] * image_shape[2] * - image_shape[3]).reshape(image_shape) / 100. + x = ( + np.arange( + image_shape[0] + * image_shape[1] + * image_shape[2] + * image_shape[3] + ).reshape(image_shape) + / 100.0 + ) x = x.astype('float32') def test_ValueError1(): - x_var = paddle.static.data(name="x", - shape=image_shape, - dtype='float32') + x_var = paddle.static.data( + name="x", shape=image_shape, dtype='float32' + ) out = paddle.flatten(x_var, start_axis=2, stop_axis=1) self.assertRaises(ValueError, test_ValueError1) def test_ValueError2(): - x_var = paddle.static.data(name="x", - shape=image_shape, - dtype='float32') + x_var = paddle.static.data( + name="x", shape=image_shape, dtype='float32' + ) paddle.flatten(x_var, start_axis=10, stop_axis=1) self.assertRaises(ValueError, test_ValueError2) def test_ValueError3(): - x_var = paddle.static.data(name="x", - shape=image_shape, - dtype='float32') + x_var = paddle.static.data( + name="x", shape=image_shape, dtype='float32' + ) paddle.flatten(x_var, start_axis=2, stop_axis=10) self.assertRaises(ValueError, test_ValueError3) def test_type(): # dtype must be float32, float64, int8, int32, int64, uint8. - x2 = np.arange(image_shape[0] * image_shape[1] * image_shape[2] * - image_shape[3]).reshape(image_shape) / 100. + x2 = ( + np.arange( + image_shape[0] + * image_shape[1] + * image_shape[2] + * image_shape[3] + ).reshape(image_shape) + / 100.0 + ) x2 = x2.astype('float16') - x2_var = paddle.fluid.data(name='x2', - shape=[3, 2, 4, 5], - dtype='float16') + x2_var = paddle.fluid.data( + name='x2', shape=[3, 2, 4, 5], dtype='float16' + ) paddle.flatten(x2_var) self.assertRaises(TypeError, test_type) @@ -298,7 +299,6 @@ class TestFlatten2OpError(unittest.TestCase): class TestStaticFlattenPythonAPI(unittest.TestCase): - def execute_api(self, x, start_axis=0, stop_axis=-1): return paddle.flatten(x, start_axis, stop_axis) @@ -308,9 +308,9 @@ class TestStaticFlattenPythonAPI(unittest.TestCase): main_prog = paddle.static.Program() with paddle.static.program_guard(main_prog, paddle.static.Program()): - x = paddle.static.data(name="x", - shape=[2, 3, 4, 4], - dtype='float32') + x = paddle.static.data( + name="x", shape=[2, 3, 4, 4], dtype='float32' + ) out = self.execute_api(x, start_axis=-2, stop_axis=-1) exe = paddle.static.Executor(place=paddle.MLUPlace(0)) @@ -319,17 +319,22 @@ class TestStaticFlattenPythonAPI(unittest.TestCase): class TestStaticInplaceFlattenPythonAPI(TestStaticFlattenPythonAPI): - def execute_api(self, x, start_axis=0, stop_axis=-1): return x.flatten_(start_axis, stop_axis) class TestFlattenPython(unittest.TestCase): - def test_python_api(self): image_shape = (2, 3, 4, 4) - x = np.arange(image_shape[0] * image_shape[1] * image_shape[2] * - image_shape[3]).reshape(image_shape) / 100. + x = ( + np.arange( + image_shape[0] + * image_shape[1] + * image_shape[2] + * image_shape[3] + ).reshape(image_shape) + / 100.0 + ) x = x.astype('float32') def test_InputError(): diff --git a/python/paddle/fluid/tests/unittests/mlu/test_flatten_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_flatten_op_mlu.py index d06c165e8d446962e1b370df460e24d567b1d9e1..0b8da1f18f062d8c8108081c50b27700955e1368 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_flatten_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_flatten_op_mlu.py @@ -26,7 +26,6 @@ paddle.enable_static() class TestFlattenOp(OpTest): - def setUp(self): self.place = paddle.device.MLUPlace(0) self.__class__.use_mlu = True @@ -52,7 +51,6 @@ class TestFlattenOp(OpTest): class TestFlattenOp1(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 2, 10) self.axis = 0 @@ -60,7 +58,6 @@ class TestFlattenOp1(TestFlattenOp): class TestFlattenOpWithDefaultAxis(TestFlattenOp): - def init_test_case(self): self.in_shape = (10, 2, 2, 3) self.new_shape = (10, 12) @@ -70,7 +67,6 @@ class TestFlattenOpWithDefaultAxis(TestFlattenOp): class TestFlattenOpSixDims(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 3, 2, 4, 4) self.axis = 4 diff --git a/python/paddle/fluid/tests/unittests/mlu/test_floor_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_floor_op_mlu.py index bdee19091fbddd55669e28b279fbe8e352af1168..2e661b217da8a0b8180019e30cde9f0a8054d495 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_floor_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_floor_op_mlu.py @@ -25,7 +25,6 @@ paddle.enable_static() class TestFloor(OpTest): - def setUp(self): self.op_type = "floor" self.place = paddle.device.MLUPlace(0) @@ -49,7 +48,6 @@ class TestFloor(OpTest): class TestFloorFP16(TestFloor): - def init_dtype(self): self.dtype = np.float16 diff --git a/python/paddle/fluid/tests/unittests/mlu/test_gather_nd_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_gather_nd_op_mlu.py index ad903da68bc8091a4e4bb2793c813f8d45359532..3fb5634df4532019786493c2037f2f9ff0f96fd2 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_gather_nd_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_gather_nd_op_mlu.py @@ -26,11 +26,11 @@ paddle.enable_static() def gather_nd_grad(x, index): # for TestGatherNdOpWithLowIndex - dout_shape = index.shape[:-1] + x.shape[index.shape[-1]:] + dout_shape = index.shape[:-1] + x.shape[index.shape[-1] :] numel = 1 for i in dout_shape: numel = numel * i - dout = np.full(dout_shape, 1. / numel) + dout = np.full(dout_shape, 1.0 / numel) dx = np.full_like(x, 0) index = tuple(index.reshape(-1, index.shape[-1]).T) @@ -40,7 +40,6 @@ def gather_nd_grad(x, index): def test_class1(op_type, typename): - class TestGatherNdOpWithEmptyIndex(OpTest): # Index has empty element, which means copy entire tensor @@ -51,7 +50,7 @@ def test_class1(op_type, typename): xnp = np.random.random((5, 20)).astype(typename) self.inputs = { 'X': xnp, - 'Index': np.array([[], []]).astype("int32") + 'Index': np.array([[], []]).astype("int32"), } self.outputs = { 'Out': np.vstack((xnp[np.newaxis, :], xnp[np.newaxis, :])) @@ -76,9 +75,7 @@ def test_class1(op_type, typename): def test_class2(op_type, typename): - class TestGatherNdOpWithIndex1(OpTest): - def setUp(self): self.set_mlu() self.op_type = "gather_nd" @@ -106,9 +103,8 @@ def test_class2(op_type, typename): def test_class3(op_type, typename): - class TestGatherNdOpWithLowIndex(OpTest): - #Index has low rank, X has high rank + # Index has low rank, X has high rank def setUp(self): self.set_mlu() @@ -132,9 +128,9 @@ def test_class3(op_type, typename): if typename == "float16": self.__class__.no_need_check_grad = True else: - self.check_grad_with_place(self.place, ['X'], - 'Out', - user_defined_grads=[self.x_grad]) + self.check_grad_with_place( + self.place, ['X'], 'Out', user_defined_grads=[self.x_grad] + ) cls_name = "{0}_{1}_3".format(op_type, typename) TestGatherNdOpWithLowIndex.__name__ = cls_name @@ -142,9 +138,8 @@ def test_class3(op_type, typename): def test_class4(op_type, typename): - class TestGatherNdOpIndex1(OpTest): - #Index has low rank, X has high rank + # Index has low rank, X has high rank def setUp(self): self.set_mlu() @@ -176,9 +171,8 @@ def test_class4(op_type, typename): def test_class5(op_type, typename): - class TestGatherNdOpWithSameIndexAsX(OpTest): - #Index has same rank as X's rank + # Index has same rank as X's rank def setUp(self): self.set_mlu() @@ -188,7 +182,7 @@ def test_class5(op_type, typename): index = np.array([[1, 1], [2, 1]]).astype("int64") self.inputs = {'X': xnp, 'Index': index} - self.outputs = {'Out': xnp[tuple(index.T)]} #[25, 22] + self.outputs = {'Out': xnp[tuple(index.T)]} # [25, 22] def set_mlu(self): self.__class__.use_mlu = True @@ -209,9 +203,8 @@ def test_class5(op_type, typename): def test_class6(op_type, typename): - class TestGatherNdOpWithHighRankSame(OpTest): - #Both Index and X have high rank, and Rank(Index) = Rank(X) + # Both Index and X have high rank, and Rank(Index) = Rank(X) def setUp(self): self.set_mlu() @@ -219,8 +212,9 @@ def test_class6(op_type, typename): self.python_api = paddle.gather_nd shape = (5, 2, 3, 1, 10) xnp = np.random.rand(*shape).astype(typename) - index = np.vstack([np.random.randint(0, s, size=2) - for s in shape]).T + index = np.vstack( + [np.random.randint(0, s, size=2) for s in shape] + ).T self.inputs = {'X': xnp, 'Index': index.astype("int32")} self.outputs = {'Out': xnp[tuple(index.T)]} @@ -244,9 +238,8 @@ def test_class6(op_type, typename): def test_class7(op_type, typename): - class TestGatherNdOpWithHighRankDiff(OpTest): - #Both Index and X have high rank, and Rank(Index) < Rank(X) + # Both Index and X have high rank, and Rank(Index) < Rank(X) def setUp(self): self.set_mlu() @@ -255,7 +248,8 @@ def test_class7(op_type, typename): shape = (2, 3, 4, 1, 10) xnp = np.random.rand(*shape).astype(typename) index = np.vstack( - [np.random.randint(0, s, size=200) for s in shape]).T + [np.random.randint(0, s, size=200) for s in shape] + ).T index_re = index.reshape([20, 5, 2, 5]) self.inputs = {'X': xnp, 'Index': index_re.astype("int32")} @@ -279,9 +273,8 @@ def test_class7(op_type, typename): globals()[cls_name] = TestGatherNdOpWithHighRankDiff -#Test Python API +# Test Python API class TestGatherNdAPI2(unittest.TestCase): - def test_imperative(self): paddle.disable_static() input_1 = np.array([[1, 2], [3, 4], [5, 6]]).astype("float32") diff --git a/python/paddle/fluid/tests/unittests/mlu/test_gather_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_gather_op_mlu.py index 32881fe3427225b1752083916c8fb2e9ee7c2761..2f50c10a62f268a84f5f0d358817a7ee31122fd9 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_gather_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_gather_op_mlu.py @@ -34,7 +34,6 @@ def gather_numpy(x, index, axis): class TestGatherOp(OpTest): - def setUp(self): self.op_type = "gather" self.place = paddle.MLUPlace(0) @@ -44,7 +43,7 @@ class TestGatherOp(OpTest): xnp = np.random.random(self.x_shape).astype(self.x_type) self.inputs = { 'X': xnp, - 'Index': np.array(self.index).astype(self.index_type) + 'Index': np.array(self.index).astype(self.index_type), } self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]} @@ -65,31 +64,28 @@ class TestGatherOp(OpTest): class TestCase1(TestGatherOp): - def config(self): """ For one dimension input """ - self.x_shape = (100) + self.x_shape = 100 self.x_type = "float32" self.index = [1, 3, 5] self.index_type = "int32" class TestCase2(TestGatherOp): - def config(self): """ For int64_t index type """ - self.x_shape = (100) + self.x_shape = 100 self.x_type = "float32" self.index = [1, 3, 5] self.index_type = "int64" class API_TestDygraphGather(unittest.TestCase): - def test_out1(self): paddle.disable_static() input_1 = np.array([[1, 2], [3, 4], [5, 6]]).astype('int32') @@ -127,18 +123,18 @@ class API_TestDygraphGather(unittest.TestCase): class TestGathertError(unittest.TestCase): - def test_error1(self): - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): shape = [8, 9, 6] x = paddle.fluid.data(shape=shape, dtype='int8', name='x') axis = paddle.fluid.data(shape=[1], dtype='float32', name='axis') index = paddle.fluid.data(shape=shape, dtype='int32', name='index') - index_float = paddle.fluid.data(shape=shape, - dtype='float32', - name='index_float') + index_float = paddle.fluid.data( + shape=shape, dtype='float32', name='index_float' + ) def test_x_type(): paddle.gather(x, index) @@ -166,9 +162,9 @@ class TestGathertError(unittest.TestCase): shape = [8, 9, 6] x = fluid.data(shape=shape, dtype='int8', name='x') index = fluid.data(shape=shape, dtype='int32', name='mask') - index_float = fluid.data(shape=shape, - dtype='float32', - name='index_float') + index_float = fluid.data( + shape=shape, dtype='float32', name='index_float' + ) def test_x_type(): paddle.fluid.layers.gather(x, index) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_gaussian_random_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_gaussian_random_op_mlu.py index 83540bfadd2bf5bc3f05a8b45f9ba700c21fd44b..4cac3a2e36b6c0bba64ed6a23d3c5b2e0f448835 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_gaussian_random_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_gaussian_random_op_mlu.py @@ -29,7 +29,6 @@ paddle.enable_static() class TestGaussianRandomOp(OpTest): - def setUp(self): self.op_type = "gaussian_random" self.place = paddle.device.MLUPlace(0) @@ -48,7 +47,7 @@ class TestGaussianRandomOp(OpTest): def set_attrs(self): self.mean = 1.0 - self.std = 2. + self.std = 2.0 def test_check_output(self): self.check_output_with_place_customized(self.verify_output, self.place) @@ -67,7 +66,6 @@ class TestGaussianRandomOp(OpTest): class TestMeanStdAreInt(TestGaussianRandomOp): - def set_attrs(self): self.mean = 1 self.std = 2 diff --git a/python/paddle/fluid/tests/unittests/mlu/test_gelu_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_gelu_op_mlu.py index b905eba2371a82bb55f00be8b462f2a989d6c468..8a879aa22213a6284a10b8107369842d90984cf9 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_gelu_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_gelu_op_mlu.py @@ -32,7 +32,6 @@ def np_gelu(x): class TestGelu(OpTest): - def setUp(self): self.set_mlu() self.op_type = "gelu" @@ -57,13 +56,12 @@ class TestGelu(OpTest): self.check_output_with_place(self.place, atol=1e-3) def test_check_grad(self): - self.check_grad_with_place(self.place, ['X'], - 'Out', - max_relative_error=0.007) + self.check_grad_with_place( + self.place, ['X'], 'Out', max_relative_error=0.007 + ) class TestGeluFp16(OpTest): - def setUp(self): self.set_mlu() self.op_type = "gelu" @@ -90,7 +88,6 @@ class TestGeluFp16(OpTest): class TestGeluNet(unittest.TestCase): - def _test(self, run_mlu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -105,9 +102,9 @@ class TestGeluNet(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=[32, 32], dtype='float32') b = paddle.static.data(name="b", shape=[32, 32], dtype='float32') - label = paddle.static.data(name="label", - shape=[32, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[32, 1], dtype='int64' + ) c = paddle.multiply(a, b) @@ -131,16 +128,17 @@ class TestGeluNet(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "b": b_np, "label": label_np}, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res diff --git a/python/paddle/fluid/tests/unittests/mlu/test_grid_sampler_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_grid_sampler_op_mlu.py index 96dbaab9ee157567a0896dea6ea5217d213407cc..df173ebf18c6e30d4a313dd24727d35ccf750f98 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_grid_sampler_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_grid_sampler_op_mlu.py @@ -28,12 +28,15 @@ def AffineGrid(theta, grid_shape): n = grid_shape[0] h = grid_shape[1] w = grid_shape[2] - h_idx = np.repeat(np.linspace(-1, 1, h)[np.newaxis, :], w, - axis=0).T[:, :, np.newaxis] - w_idx = np.repeat(np.linspace(-1, 1, w)[np.newaxis, :], h, - axis=0)[:, :, np.newaxis] - grid = np.concatenate([w_idx, h_idx, np.ones([h, w, 1])], - axis=2) # h * w * 3 + h_idx = np.repeat(np.linspace(-1, 1, h)[np.newaxis, :], w, axis=0).T[ + :, :, np.newaxis + ] + w_idx = np.repeat(np.linspace(-1, 1, w)[np.newaxis, :], h, axis=0)[ + :, :, np.newaxis + ] + grid = np.concatenate( + [w_idx, h_idx, np.ones([h, w, 1])], axis=2 + ) # h * w * 3 grid = np.repeat(grid[np.newaxis, :], n, axis=0) # n * h * w *3 ret = np.zeros([n, h * w, 2]) @@ -53,13 +56,17 @@ def getGridPointValue(data, x, y): out_H = x.shape[1] out_W = x.shape[2] - #out = np.zeros(data_shape, dtype='float32') + # out = np.zeros(data_shape, dtype='float32') out = np.zeros([N, C, out_H, out_W], dtype='float32') for i in range(N): for j in range(out_H): for k in range(out_W): - if y[i, j, k] < 0 or y[i, j, k] > in_H - 1 or x[ - i, j, k] < 0 or x[i, j, k] > in_W - 1: + if ( + y[i, j, k] < 0 + or y[i, j, k] > in_H - 1 + or x[i, j, k] < 0 + or x[i, j, k] > in_W - 1 + ): out[i, :, j, k] = 0 else: out[i, :, j, k] = data[i, :, y[i, j, k], x[i, j, k]] @@ -75,27 +82,28 @@ def unnormalizeAndClip(grid_slice, max_val, align_corners, padding_mode): if align_corners: grid_slice = 0.5 * ((grid_slice.astype('float32') + 1.0) * max_val) else: - grid_slice = 0.5 * ((grid_slice.astype('float32') + 1.0) * - (max_val + 1)) - 0.5 + grid_slice = ( + 0.5 * ((grid_slice.astype('float32') + 1.0) * (max_val + 1)) - 0.5 + ) if padding_mode == "border": grid_slice = clip(grid_slice, 0, max_val) elif padding_mode == "reflection": double_range = 2 * max_val if align_corners else (max_val + 1) * 2 - grid_abs = np.abs(grid_slice) if align_corners else np.abs(grid_slice + - 0.5) + grid_abs = ( + np.abs(grid_slice) if align_corners else np.abs(grid_slice + 0.5) + ) extra = grid_abs - np.floor(grid_abs / double_range) * double_range grid_slice = np.minimum(extra, double_range - extra) - grid_slice = grid_slice if align_corners else clip( - grid_slice - 0.5, 0, max_val) + grid_slice = ( + grid_slice if align_corners else clip(grid_slice - 0.5, 0, max_val) + ) return grid_slice -def GridSampler(data, - grid, - align_corners=True, - mode="bilinear", - padding_mode="zeros"): +def GridSampler( + data, grid, align_corners=True, mode="bilinear", padding_mode="zeros" +): dims = data.shape N = dims[0] in_C = dims[1] @@ -119,14 +127,18 @@ def GridSampler(data, y0 = np.floor(y).astype('int32') y1 = y0 + 1 - wa = np.tile(((x1 - x) * (y1 - y)).reshape((N, 1, out_H, out_W)), - (1, in_C, 1, 1)) - wb = np.tile(((x1 - x) * (y - y0)).reshape((N, 1, out_H, out_W)), - (1, in_C, 1, 1)) - wc = np.tile(((x - x0) * (y1 - y)).reshape((N, 1, out_H, out_W)), - (1, in_C, 1, 1)) - wd = np.tile(((x - x0) * (y - y0)).reshape((N, 1, out_H, out_W)), - (1, in_C, 1, 1)) + wa = np.tile( + ((x1 - x) * (y1 - y)).reshape((N, 1, out_H, out_W)), (1, in_C, 1, 1) + ) + wb = np.tile( + ((x1 - x) * (y - y0)).reshape((N, 1, out_H, out_W)), (1, in_C, 1, 1) + ) + wc = np.tile( + ((x - x0) * (y1 - y)).reshape((N, 1, out_H, out_W)), (1, in_C, 1, 1) + ) + wd = np.tile( + ((x - x0) * (y - y0)).reshape((N, 1, out_H, out_W)), (1, in_C, 1, 1) + ) va = getGridPointValue(data, x0, y0) vb = getGridPointValue(data, x0, y1) @@ -142,7 +154,6 @@ def GridSampler(data, class TestGridSamplerOp(OpTest): - def setUp(self): self.place = paddle.device.MLUPlace(0) self.__class__.use_mlu = True @@ -166,12 +177,12 @@ class TestGridSamplerOp(OpTest): 'use_cudnn': False, "align_corners": self.align_corners, "padding_mode": self.padding_mode, - "mode": self.mode + "mode": self.mode, } self.outputs = { - 'Output': - GridSampler(x, grid, self.align_corners, self.mode, - self.padding_mode) + 'Output': GridSampler( + x, grid, self.align_corners, self.mode, self.padding_mode + ) } def test_check_output(self): @@ -187,7 +198,6 @@ class TestGridSamplerOp(OpTest): class Case1(TestGridSamplerOp): - def initTestCase(self): self.x_shape = (2, 3, 5, 6) self.grid_shape = (2, 8, 9, 2) @@ -198,7 +208,6 @@ class Case1(TestGridSamplerOp): class LargeInputCase(TestGridSamplerOp): - def initTestCase(self): self.x_shape = (2, 3, 128, 128) self.grid_shape = (2, 130, 130, 2) @@ -209,7 +218,6 @@ class LargeInputCase(TestGridSamplerOp): class Case2(LargeInputCase): - def initTestCase(self): self.x_shape = (2, 3, 128, 128) self.grid_shape = (2, 130, 130, 2) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_hard_sigmoid_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_hard_sigmoid_op_mlu.py index fca95416a861c2f10b03a5acbea333e154367eb8..695d37ec54d6a9e01ba7b54d0d59a2456b4bb30a 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_hard_sigmoid_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_hard_sigmoid_op_mlu.py @@ -28,11 +28,10 @@ np.random.seed(SEED) def ref_hardsigmoid(x, slope=0.166666666666667, offset=0.5): - return np.maximum(np.minimum(x * slope + offset, 1.), 0.).astype(x.dtype) + return np.maximum(np.minimum(x * slope + offset, 1.0), 0.0).astype(x.dtype) class TestMLUHardSigmoid(OpTest): - def setUp(self): paddle.enable_static() @@ -43,7 +42,7 @@ class TestMLUHardSigmoid(OpTest): x = np.random.uniform(-5, 5, [10, 12]).astype(self.dtype) lower_threshold = -self.offset / self.slope - upper_threshold = (1. - self.offset) / self.slope + upper_threshold = (1.0 - self.offset) / self.slope # Same reason as TestAbs delta = 0.005 @@ -75,21 +74,18 @@ class TestMLUHardSigmoid(OpTest): class TestMLUHardSigmoid2(TestMLUHardSigmoid): - def set_attrs(self): self.slope = 0.2 self.offset = 0.5 class TestMLUHardSigmoid3(TestMLUHardSigmoid): - def set_attrs(self): self.slope = 0.2 self.offset = 0.4 class TestMLUHardSigmoidFp16(unittest.TestCase): - def setUp(self): paddle.disable_static() @@ -114,17 +110,21 @@ class TestMLUHardSigmoidFp16(unittest.TestCase): cpu_diff_1 = np.divide( np.sum(np.abs(self.float32_y.numpy() - self.float16_y)), - np.sum(np.abs(self.float32_y.numpy()))) + np.sum(np.abs(self.float32_y.numpy())), + ) mlu_diff_1 = np.divide( np.sum(np.abs(self.float32_y.numpy() - mlu_float16_y.numpy())), - np.sum(np.abs(self.float32_y.numpy()))) + np.sum(np.abs(self.float32_y.numpy())), + ) cpu_diff_2 = np.divide( np.sum(np.square(self.float32_y.numpy() - self.float16_y)), - np.sum(np.square(self.float32_y.numpy()))) + np.sum(np.square(self.float32_y.numpy())), + ) mlu_diff_2 = np.divide( np.sum(np.square(self.float32_y.numpy() - mlu_float16_y.numpy())), - np.sum(np.square(self.float32_y.numpy()))) + np.sum(np.square(self.float32_y.numpy())), + ) assert mlu_diff_1 <= cpu_diff_1 assert mlu_diff_2 <= cpu_diff_2 @@ -179,14 +179,14 @@ class TestHardsigmoidAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.hardsigmoid, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data(name='x_int32', - shape=[12, 10], - dtype='int32') + x_int32 = paddle.fluid.data( + name='x_int32', shape=[12, 10], dtype='int32' + ) self.assertRaises(TypeError, F.hardsigmoid, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data(name='x_fp16', - shape=[12, 10], - dtype='float16') + x_fp16 = paddle.fluid.data( + name='x_fp16', shape=[12, 10], dtype='float16' + ) F.hardsigmoid(x_fp16) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_hard_swish_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_hard_swish_op_mlu.py index a88103154e92d5e262004d8bd8437f9d814a6f1f..2774162bc79582e7af3b557922d42e1238317482 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_hard_swish_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_hard_swish_op_mlu.py @@ -38,15 +38,17 @@ def ref_hard_swish_grad(x, threshold, scale, offset, data_type): threshold = scalarToType(threshold, data_type) scale = scalarToType(scale, data_type) offset = scalarToType(offset, data_type) - dout = np.full_like(x, fill_value=1. / x.size) + dout = np.full_like(x, fill_value=1.0 / x.size) tmp = ((x + offset) < threshold).astype(x.dtype) - dx = dout * (((x + offset) > 0).astype(x.dtype) * - (2 * x + offset) * tmp / scale + 1.0 - tmp) + dx = dout * ( + ((x + offset) > 0).astype(x.dtype) * (2 * x + offset) * tmp / scale + + 1.0 + - tmp + ) return dx class TestHardSwishMLU(OpTest): - def setUp(self): paddle.enable_static() @@ -63,11 +65,11 @@ class TestHardSwishMLU(OpTest): x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02 out = ( - x * - (np.minimum(np.maximum(x + offset, 0.), threshold) / scale)).astype( - self.dtype) - self.x_grad = ref_hard_swish_grad(x, threshold, scale, offset, - self.dtype) + x * (np.minimum(np.maximum(x + offset, 0.0), threshold) / scale) + ).astype(self.dtype) + self.x_grad = ref_hard_swish_grad( + x, threshold, scale, offset, self.dtype + ) self.set_mlu() self.inputs = {'X': x} self.attrs = {'threshold': threshold, 'scale': scale, 'offset': offset} @@ -87,7 +89,6 @@ class TestHardSwishMLU(OpTest): class TestHardSwishMLUWithCPUFloat16(unittest.TestCase): - def setUp(self): paddle.disable_static() @@ -111,11 +112,21 @@ class TestHardSwishMLUWithCPUFloat16(unittest.TestCase): threshold = scalarToType(threshold, np.float16) scale = scalarToType(scale, np.float16) offset = scalarToType(offset, np.float16) - self.float16_y = (self.float16_x * (np.minimum( - np.maximum(self.float16_x + offset, scalarToType(0., np.float16)), - threshold) / scale)).astype(np.float16) - self.float16_grad = ref_hard_swish_grad(self.float16_x, threshold, - scale, offset, np.float16) + self.float16_y = ( + self.float16_x + * ( + np.minimum( + np.maximum( + self.float16_x + offset, scalarToType(0.0, np.float16) + ), + threshold, + ) + / scale + ) + ).astype(np.float16) + self.float16_grad = ref_hard_swish_grad( + self.float16_x, threshold, scale, offset, np.float16 + ) def test_check_output_and_grad_mlu(self): # mlu float16 @@ -127,36 +138,45 @@ class TestHardSwishMLUWithCPUFloat16(unittest.TestCase): cpu_diff_1 = np.divide( np.sum(np.abs(self.float32_y.numpy() - self.float16_y)), - np.sum(np.abs(self.float32_y.numpy()))) + np.sum(np.abs(self.float32_y.numpy())), + ) mlu_diff_1 = np.divide( np.sum(np.abs(self.float32_y.numpy() - mlu_float16_y.numpy())), - np.sum(np.abs(self.float32_y.numpy()))) + np.sum(np.abs(self.float32_y.numpy())), + ) cpu_diff_2 = np.divide( np.sum(np.square(self.float32_y.numpy() - self.float16_y)), - np.sum(np.square(self.float32_y.numpy()))) + np.sum(np.square(self.float32_y.numpy())), + ) mlu_diff_2 = np.divide( np.sum(np.square(self.float32_y.numpy() - mlu_float16_y.numpy())), - np.sum(np.square(self.float32_y.numpy()))) + np.sum(np.square(self.float32_y.numpy())), + ) assert mlu_diff_1 <= cpu_diff_1 assert mlu_diff_2 <= cpu_diff_2 cpu_diff_1 = np.divide( np.sum(np.abs(self.float32_grad.numpy() - self.float16_grad)), - np.sum(np.abs(self.float32_grad.numpy()))) + np.sum(np.abs(self.float32_grad.numpy())), + ) mlu_diff_1 = np.divide( - np.sum(np.abs(self.float32_grad.numpy() - - mlu_float16_grad.numpy())), - np.sum(np.abs(self.float32_grad.numpy()))) + np.sum( + np.abs(self.float32_grad.numpy() - mlu_float16_grad.numpy()) + ), + np.sum(np.abs(self.float32_grad.numpy())), + ) cpu_diff_2 = np.divide( np.sum(np.square(self.float32_grad.numpy() - self.float16_grad)), - np.sum(np.square(self.float32_grad.numpy()))) + np.sum(np.square(self.float32_grad.numpy())), + ) mlu_diff_2 = np.divide( np.sum( - np.square(self.float32_grad.numpy() - - mlu_float16_grad.numpy())), - np.sum(np.square(self.float32_grad.numpy()))) + np.square(self.float32_grad.numpy() - mlu_float16_grad.numpy()) + ), + np.sum(np.square(self.float32_grad.numpy())), + ) assert mlu_diff_1 <= cpu_diff_1 assert mlu_diff_2 <= cpu_diff_2 diff --git a/python/paddle/fluid/tests/unittests/mlu/test_huber_loss_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_huber_loss_op_mlu.py index 40e0dab0803028308195a63a25a212fb8dfd6ead..35003418095cb715a5a2eb3c7d436d3157677c07 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_huber_loss_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_huber_loss_op_mlu.py @@ -34,7 +34,6 @@ def huber_loss_forward(val, delta): class TestHuberLossOp(OpTest): - def setUp(self): self.op_type = 'huber_loss' self.set_mlu() @@ -44,16 +43,17 @@ class TestHuberLossOp(OpTest): self.init_input() shape = self.set_shape() residual = self.inputs['Y'] - self.inputs['X'] - loss = np.vectorize(huber_loss_forward)(residual, - self.delta).astype('float32') + loss = np.vectorize(huber_loss_forward)(residual, self.delta).astype( + 'float32' + ) self.attrs = {'delta': self.delta} self.outputs = {'Residual': residual, 'Out': loss.reshape(shape)} def init_input(self): shape = self.set_shape() self.inputs = { - 'X': np.random.uniform(0, 1., shape).astype('float32'), - 'Y': np.random.uniform(0, 1., shape).astype('float32'), + 'X': np.random.uniform(0, 1.0, shape).astype('float32'), + 'Y': np.random.uniform(0, 1.0, shape).astype('float32'), } def set_mlu(self): @@ -70,38 +70,40 @@ class TestHuberLossOp(OpTest): self.check_grad_with_place(self.place, ['X', 'Y'], 'Out') def test_check_grad_ingore_x(self): - self.check_grad_with_place(self.place, ['Y'], - 'Out', - max_relative_error=0.008, - no_grad_set=set("residual")) + self.check_grad_with_place( + self.place, + ['Y'], + 'Out', + max_relative_error=0.008, + no_grad_set=set("residual"), + ) def test_check_grad_ingore_y(self): - self.check_grad_with_place(self.place, ['X'], - 'Out', - max_relative_error=0.008, - no_grad_set=set('residual')) + self.check_grad_with_place( + self.place, + ['X'], + 'Out', + max_relative_error=0.008, + no_grad_set=set('residual'), + ) def TestHuberLossOp1(TestHuberLossOp): - def set_shape(self): - return (64) + return 64 def TestHuberLossOp2(TestHuberLossOp): - def set_shape(self): return (6, 6) def TestHuberLossOp3(TestHuberLossOp): - def set_shape(self): return (6, 6, 1) class TestHuberLossOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # the input and label must be Variable @@ -116,10 +118,12 @@ class TestHuberLossOpError(unittest.TestCase): # the dtype of input and label must be float32 or float64 xw2 = fluid.data(name='xw2', shape=[None, 6], dtype="int32") lw2 = fluid.data(name='lw2', shape=[None, 6], dtype="int32") - self.assertRaises(TypeError, fluid.layers.huber_loss, xw2, lr, - delta) - self.assertRaises(TypeError, fluid.layers.huber_loss, xr, lw2, - delta) + self.assertRaises( + TypeError, fluid.layers.huber_loss, xw2, lr, delta + ) + self.assertRaises( + TypeError, fluid.layers.huber_loss, xr, lw2, delta + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/mlu/test_iou_similarity_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_iou_similarity_op_mlu.py index ac206d6649aca2b7f3899265ed0567b68ac437c1..a9035df040953cad6602a201616dab31bb50ac83 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_iou_similarity_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_iou_similarity_op_mlu.py @@ -28,7 +28,6 @@ np.random.seed(2022) class TestMluIouSimilarityOp(OpTest): - def setUp(self): self.op_type = "iou_similarity" self.set_mlu() @@ -58,7 +57,7 @@ class TestMluIouSimilarityOp(OpTest): self.boxes1_lod = [[1 for _ in range(self.N)]] self.inputs = { 'X': (self.boxes1, self.boxes1_lod), - 'Y': self.boxes2 + 'Y': self.boxes2, } else: self.inputs = {'X': self.boxes1, 'Y': self.boxes2} @@ -74,7 +73,9 @@ class TestMluIouSimilarityOp(OpTest): def test_check_output(self): self.check_output_with_place(self.place) - def _compute_iou(self, ): + def _compute_iou( + self, + ): for row in range(self.boxes1.shape[0]): for col in range(self.boxes2.shape[0]): xmin1, ymin1, xmax1, ymax1 = self.boxes1[row] @@ -104,7 +105,6 @@ class TestMluIouSimilarityOp(OpTest): class TestMluIouSimilarityOpWithLoD(TestMluIouSimilarityOp): - def set_init_config(self): super(TestMluIouSimilarityOpWithLoD, self).set_init_config() self.box_normalized = True @@ -112,7 +112,6 @@ class TestMluIouSimilarityOpWithLoD(TestMluIouSimilarityOp): class TestMluIouSimilarityOpWithBoxNormalized(TestMluIouSimilarityOp): - def set_init_config(self): super(TestMluIouSimilarityOpWithBoxNormalized, self).set_init_config() self.box_normalized = True @@ -120,7 +119,6 @@ class TestMluIouSimilarityOpWithBoxNormalized(TestMluIouSimilarityOp): def TestMluIouSimilarityOpFp16(TestMluIouSimilarityOp): - def init_dtype(self): self.dtype = np.float16 diff --git a/python/paddle/fluid/tests/unittests/mlu/test_label_smooth_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_label_smooth_op_mlu.py index dfc01e42202ec93cf5a9f5a1b28c2d318c8ce129..0379c8c9cd8be28b3012ccb68ff5e5470344fd6d 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_label_smooth_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_label_smooth_op_mlu.py @@ -26,7 +26,6 @@ paddle.enable_static() class TestLabelSmoothOp(OpTest): - def init_dtype(self): self.dtype = np.float32 @@ -46,7 +45,8 @@ class TestLabelSmoothOp(OpTest): self.init_dtype() self.config() smoothed_label = ( - 1 - self.epsilon) * self.label + self.epsilon / self.label_dim + 1 - self.epsilon + ) * self.label + self.epsilon / self.label_dim smoothed_label = smoothed_label.astype(self.dtype) self.inputs = {'X': self.label} self.attrs = {'epsilon': self.epsilon} @@ -57,7 +57,6 @@ class TestLabelSmoothOp(OpTest): class TestLabelSmoothOpWithPriorDist(TestLabelSmoothOp): - def setUp(self): self.init_dtype() self.config() @@ -70,45 +69,43 @@ class TestLabelSmoothOpWithPriorDist(TestLabelSmoothOp): class TestLabelSmoothOp3D(TestLabelSmoothOp): - def setUp(self): super(TestLabelSmoothOp3D, self).setUp() self.inputs['X'] = self.inputs['X'].reshape( - [2, -1, self.inputs['X'].shape[-1]]) + [2, -1, self.inputs['X'].shape[-1]] + ) self.outputs['Out'] = self.outputs['Out'].reshape( - self.inputs['X'].shape) + self.inputs['X'].shape + ) class TestLabelSmoothOpWithPriorDist3D(TestLabelSmoothOpWithPriorDist): - def setUp(self): super(TestLabelSmoothOpWithPriorDist3D, self).setUp() self.inputs['X'] = self.inputs['X'].reshape( - [2, -1, self.inputs['X'].shape[-1]]) + [2, -1, self.inputs['X'].shape[-1]] + ) self.outputs['Out'] = self.outputs['Out'].reshape( - self.inputs['X'].shape) + self.inputs['X'].shape + ) class TestLabelSmoothOpFP16(TestLabelSmoothOp): - def init_dtype(self): self.dtype = np.float16 class TestLabelSmoothOpWithPriorDistFP16(TestLabelSmoothOpWithPriorDist): - def init_dtype(self): self.dtype = np.float16 class TestLabelSmoothOp3DFP16(TestLabelSmoothOp3D): - def init_dtype(self): self.dtype = np.float16 class TestLabelSmoothOpWithPriorDist3DFP16(TestLabelSmoothOpWithPriorDist3D): - def init_dtype(self): self.dtype = np.float16 diff --git a/python/paddle/fluid/tests/unittests/mlu/test_layer_norm_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_layer_norm_op_mlu.py index 838ee571772fb9f237b39e331063c3ec1da3f16a..1a0eac49eccfb08036a9743bffce0b4e31f57f58 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_layer_norm_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_layer_norm_op_mlu.py @@ -26,8 +26,13 @@ import sys sys.path.append('..') from op_test import _set_use_system_allocator from paddle.fluid import Program, program_guard -from paddle.fluid.contrib.mixed_precision.fp16_utils import _keep_layer_norm_scale_bias_to_fp32 -from test_layer_norm_op import _reference_layer_norm_naive, _reference_layer_norm_grad +from paddle.fluid.contrib.mixed_precision.fp16_utils import ( + _keep_layer_norm_scale_bias_to_fp32, +) +from test_layer_norm_op import ( + _reference_layer_norm_naive, + _reference_layer_norm_grad, +) paddle.enable_static() @@ -37,51 +42,57 @@ _set_use_system_allocator(True) class TestLayerNormOp(unittest.TestCase): - def setUp(self): self.use_cudnn = True self.place = paddle.device.MLUPlace(0) self.__class__.use_mlu = True def __assert_close(self, tensor, np_array, msg, atol=1e-4): - np.testing.assert_allclose(np.array(tensor), - np_array, - rtol=1e-5, - atol=atol, - err_msg=msg) - - def check_forward_backward(self, - shape, - begin_norm_axis, - has_scale=True, - has_bias=True, - y_grad_scale=1.0, - use_mkldnn=False): - - def test_with_place(place, - shape, - begin_norm_axis, - use_mkldnn=use_mkldnn): + np.testing.assert_allclose( + np.array(tensor), np_array, rtol=1e-5, atol=atol, err_msg=msg + ) + + def check_forward_backward( + self, + shape, + begin_norm_axis, + has_scale=True, + has_bias=True, + y_grad_scale=1.0, + use_mkldnn=False, + ): + def test_with_place( + place, shape, begin_norm_axis, use_mkldnn=use_mkldnn + ): # attr epsilon = 0.00001 x_shape = shape - D = reduce(mul, x_shape[begin_norm_axis:len(x_shape)], 1) + D = reduce(mul, x_shape[begin_norm_axis : len(x_shape)], 1) scale_shape = [D] np.random.seed(123) x = np.random.random_sample(x_shape).astype(np.float32) - scale = np.random.random_sample(scale_shape).astype( - np.float32) if has_scale else None - bias = np.random.random_sample(scale_shape).astype( - np.float32) if has_bias else None + scale = ( + np.random.random_sample(scale_shape).astype(np.float32) + if has_scale + else None + ) + bias = ( + np.random.random_sample(scale_shape).astype(np.float32) + if has_bias + else None + ) y_grad = (np.random.random_sample(x_shape) * y_grad_scale).astype( - np.float32) + np.float32 + ) # reference forward & backward y, mean, variance = _reference_layer_norm_naive( - x, scale, bias, epsilon, begin_norm_axis) + x, scale, bias, epsilon, begin_norm_axis + ) x_grad, scale_grad, bias_grad = _reference_layer_norm_grad( - x, y_grad, scale, bias, mean, variance, begin_norm_axis) + x, y_grad, scale, bias, mean, variance, begin_norm_axis + ) var_dict = locals() var_dict['y@GRAD'] = y_grad @@ -96,9 +107,11 @@ class TestLayerNormOp(unittest.TestCase): with fluid.program_guard(program): block = program.global_block() for name in ground_truth: - block.create_var(name=name, - dtype='float32', - shape=ground_truth[name].shape) + block.create_var( + name=name, + dtype='float32', + shape=ground_truth[name].shape, + ) inputs = {"X": block.var('x')} fetch_list = [ 'y', @@ -118,17 +131,20 @@ class TestLayerNormOp(unittest.TestCase): outputs={ "Y": block.var('y'), "Mean": block.var('mean'), # share the same memory - "Variance": - block.var('variance'), # share the same memory + "Variance": block.var( + 'variance' + ), # share the same memory }, attrs={ "epsilon": epsilon, "begin_norm_axis": begin_norm_axis, - "use_mkldnn": use_mkldnn - }) + "use_mkldnn": use_mkldnn, + }, + ) # generate backward op_desc grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc( - layer_norm_op.desc, set(), []) + layer_norm_op.desc, set(), [] + ) grad_op_desc = grad_op_desc_list[0] new_op_desc = block.desc.append_op() new_op_desc.copy_from(grad_op_desc) @@ -142,105 +158,130 @@ class TestLayerNormOp(unittest.TestCase): program._sync_with_cpp() exe = fluid.Executor(place) - out = exe.run(program, - feed={ - name: var_dict[name] - for name in ['x', 'scale', 'bias', 'y@GRAD'] - }, - fetch_list=fetch_list) + out = exe.run( + program, + feed={ + name: var_dict[name] + for name in ['x', 'scale', 'bias', 'y@GRAD'] + }, + fetch_list=fetch_list, + ) self.__assert_close(y, out[0], "y") self.__assert_close(mean, out[1], "mean") - self.__assert_close(1 / np.sqrt(variance), out[2], "variance", - 1e-3) + self.__assert_close( + 1 / np.sqrt(variance), out[2], "variance", 1e-3 + ) self.__assert_close(x_grad, out[3], "x_grad") if has_scale: - self.__assert_close(scale_grad.reshape(-1), - out[fetch_list.index('scale@GRAD')], - "scale_grad", 1e-3) + self.__assert_close( + scale_grad.reshape(-1), + out[fetch_list.index('scale@GRAD')], + "scale_grad", + 1e-3, + ) if has_bias: - self.__assert_close(bias_grad.reshape(-1), - out[fetch_list.index('bias@GRAD')], - "bias_grad") + self.__assert_close( + bias_grad.reshape(-1), + out[fetch_list.index('bias@GRAD')], + "bias_grad", + ) test_with_place(self.place, shape, begin_norm_axis) def test_check_forward_backward_with_scale_and_bias(self): self.check_forward_backward(shape=[1, 3, 4, 5], begin_norm_axis=1) self.check_forward_backward(shape=[2, 3, 4, 5], begin_norm_axis=1) - self.check_forward_backward(shape=[2, 3, 4, 5], - begin_norm_axis=1, - has_scale=False, - has_bias=True) - self.check_forward_backward(shape=[2, 3, 4, 5], - begin_norm_axis=1, - has_scale=True, - has_bias=False) - self.check_forward_backward(shape=[2, 3, 4, 5], - begin_norm_axis=1, - has_scale=False, - has_bias=False) + self.check_forward_backward( + shape=[2, 3, 4, 5], + begin_norm_axis=1, + has_scale=False, + has_bias=True, + ) + self.check_forward_backward( + shape=[2, 3, 4, 5], + begin_norm_axis=1, + has_scale=True, + has_bias=False, + ) + self.check_forward_backward( + shape=[2, 3, 4, 5], + begin_norm_axis=1, + has_scale=False, + has_bias=False, + ) self.check_forward_backward(shape=[2, 3, 4, 5], begin_norm_axis=3) - self.check_forward_backward(shape=[92, 513, 129], - begin_norm_axis=2, - y_grad_scale=0.1) + self.check_forward_backward( + shape=[92, 513, 129], begin_norm_axis=2, y_grad_scale=0.1 + ) self.check_forward_backward(shape=[3, 34, 1134], begin_norm_axis=2) - self.check_forward_backward(shape=[92, 513, 1134], - begin_norm_axis=2, - y_grad_scale=0.1) - self.check_forward_backward(shape=[92, 513, 1134], - begin_norm_axis=2, - has_scale=False, - has_bias=True, - y_grad_scale=0.1) - self.check_forward_backward(shape=[92, 513, 1134], - begin_norm_axis=2, - has_scale=True, - has_bias=False, - y_grad_scale=0.1) - self.check_forward_backward(shape=[92, 513, 1134], - begin_norm_axis=2, - has_scale=False, - has_bias=False, - y_grad_scale=0.1) - self.check_forward_backward(shape=[512, 1024], - begin_norm_axis=1, - has_scale=True, - has_bias=True) + self.check_forward_backward( + shape=[92, 513, 1134], begin_norm_axis=2, y_grad_scale=0.1 + ) + self.check_forward_backward( + shape=[92, 513, 1134], + begin_norm_axis=2, + has_scale=False, + has_bias=True, + y_grad_scale=0.1, + ) + self.check_forward_backward( + shape=[92, 513, 1134], + begin_norm_axis=2, + has_scale=True, + has_bias=False, + y_grad_scale=0.1, + ) + self.check_forward_backward( + shape=[92, 513, 1134], + begin_norm_axis=2, + has_scale=False, + has_bias=False, + y_grad_scale=0.1, + ) + self.check_forward_backward( + shape=[512, 1024], begin_norm_axis=1, has_scale=True, has_bias=True + ) class TestLayerNormAPI(unittest.TestCase): - def test_case(self): - x = fluid.layers.data(name='x', - shape=[64, 32, 256], - dtype='float32', - append_batch_size=False) - x = fluid.layers.layer_norm(x, - scale=True, - shift=True, - begin_norm_axis=1, - epsilon=1e-05, - param_attr=None, - bias_attr=None) - x = fluid.layers.layer_norm(x, - scale=False, - shift=False, - begin_norm_axis=1, - epsilon=1e-05, - param_attr=None, - bias_attr=None) - x = fluid.layers.layer_norm(x, - scale=False, - shift=False, - begin_norm_axis=1, - epsilon=1e-05, - param_attr="scale", - bias_attr="shift") + x = fluid.layers.data( + name='x', + shape=[64, 32, 256], + dtype='float32', + append_batch_size=False, + ) + x = fluid.layers.layer_norm( + x, + scale=True, + shift=True, + begin_norm_axis=1, + epsilon=1e-05, + param_attr=None, + bias_attr=None, + ) + x = fluid.layers.layer_norm( + x, + scale=False, + shift=False, + begin_norm_axis=1, + epsilon=1e-05, + param_attr=None, + bias_attr=None, + ) + x = fluid.layers.layer_norm( + x, + scale=False, + shift=False, + begin_norm_axis=1, + epsilon=1e-05, + param_attr="scale", + bias_attr="shift", + ) class TestDygraphLayerNormAPIError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): paddle.enable_static() @@ -256,7 +297,6 @@ class TestDygraphLayerNormAPIError(unittest.TestCase): class TestFP16ScaleBiasLayerNorm(unittest.TestCase): - def check_main(self, x_np, weight_np, bias_np, dtype): paddle.disable_static() @@ -285,9 +325,11 @@ class TestFP16ScaleBiasLayerNorm(unittest.TestCase): bias_np = np.random.random([20]).astype('float16') y_np_1, x_g_np_1, w_g_np_1, b_g_np_1 = self.check_main( - x_np, weight_np, bias_np, 'float16') + x_np, weight_np, bias_np, 'float16' + ) y_np_2, x_g_np_2, w_g_np_2, b_g_np_2 = self.check_main( - x_np, weight_np, bias_np, 'float32') + x_np, weight_np, bias_np, 'float32' + ) def assert_equal(x, y): np.testing.assert_allclose(x, y) @@ -299,7 +341,6 @@ class TestFP16ScaleBiasLayerNorm(unittest.TestCase): class TestGetSetKeepLayerNormScaleBiasFP32Flag(unittest.TestCase): - def test_main(self): self.assertTrue(_keep_layer_norm_scale_bias_to_fp32()) _keep_layer_norm_scale_bias_to_fp32(False) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_leaky_relu_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_leaky_relu_op_mlu.py index 23fd10ffbd88703f87976239498f2b0743a836f6..05d78e7f31a1c34670f605ba7b50e9df7a28cee8 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_leaky_relu_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_leaky_relu_op_mlu.py @@ -27,7 +27,6 @@ SEED = 2021 class TestLeadyRelu(OpTest): - def setUp(self): self.set_mlu() self.op_type = "leaky_relu" @@ -63,33 +62,29 @@ class TestLeadyRelu(OpTest): def test_check_grad(self): if self.dtype == np.float16: - self.check_grad_with_place(self.place, ['X'], - 'Out', - max_relative_error=0.006) + self.check_grad_with_place( + self.place, ['X'], 'Out', max_relative_error=0.006 + ) else: self.check_grad_with_place(self.place, ['X'], 'Out') class TestLeadyReluFP16(TestLeadyRelu): - def init_dtype(self): self.dtype = np.float16 class TestLeadyRelu2(TestLeadyRelu): - def set_attrs(self): self.attrs = {'alpha': 0.5} class TestLeadyRelu3(TestLeadyRelu): - def set_attrs(self): self.attrs = {'alpha': -0.5} class TestLeakyReluNet(unittest.TestCase): - def _test(self, run_mlu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -102,9 +97,9 @@ class TestLeakyReluNet(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): x = paddle.static.data(name="x", shape=[32, 32], dtype='float32') - label = paddle.static.data(name="label", - shape=[32, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[32, 1], dtype='int64' + ) y = paddle.nn.functional.leaky_relu(x) @@ -127,15 +122,17 @@ class TestLeakyReluNet(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "x": x_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"x": x_np, "label": label_np}, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res diff --git a/python/paddle/fluid/tests/unittests/mlu/test_log_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_log_op_mlu.py index 1dca8d9a0bcb4e5b1415e6c7697df147a121afd7..11df0949a6f47151ee38c4efd8ff162be90b1e7d 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_log_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_log_op_mlu.py @@ -26,7 +26,6 @@ SEED = 2021 class TestActivation(OpTest): - def setUp(self): self.set_mlu() self.op_type = "exp" @@ -57,7 +56,6 @@ class TestActivation(OpTest): class TestLog(TestActivation): - def setUp(self): self.set_mlu() self.op_type = "log" @@ -72,21 +70,18 @@ class TestLog(TestActivation): self.outputs = {'Out': out} def test_error(self): - in1 = fluid.layers.data(name="in1", - shape=[11, 17], - append_batch_size=False, - dtype="int32") - in2 = fluid.layers.data(name="in2", - shape=[11, 17], - append_batch_size=False, - dtype="int64") + in1 = fluid.layers.data( + name="in1", shape=[11, 17], append_batch_size=False, dtype="int32" + ) + in2 = fluid.layers.data( + name="in2", shape=[11, 17], append_batch_size=False, dtype="int64" + ) self.assertRaises(TypeError, fluid.layers.log, in1) self.assertRaises(TypeError, fluid.layers.log, in2) class TestLog2(TestActivation): - def setUp(self): self.set_mlu() self.op_type = "log2" @@ -107,19 +102,22 @@ class TestLog2(TestActivation): self.assertRaises(TypeError, paddle.log2, in2) def test_api(self): - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float32") - data_x = paddle.static.data(name="data_x", - shape=[11, 17], - dtype="float32") + data_x = paddle.static.data( + name="data_x", shape=[11, 17], dtype="float32" + ) out1 = paddle.log2(data_x) exe = paddle.static.Executor(place=fluid.CPUPlace()) exe.run(paddle.static.default_startup_program()) - res1 = exe.run(paddle.static.default_main_program(), - feed={"data_x": input_x}, - fetch_list=[out1]) + res1 = exe.run( + paddle.static.default_main_program(), + feed={"data_x": input_x}, + fetch_list=[out1], + ) expected_res = np.log2(input_x) np.testing.assert_allclose(res1[0], expected_res, rtol=1e-6) @@ -136,7 +134,6 @@ class TestLog2(TestActivation): class TestLog10(TestActivation): - def setUp(self): self.set_mlu() self.op_type = "log10" @@ -157,19 +154,22 @@ class TestLog10(TestActivation): self.assertRaises(TypeError, paddle.log10, in2) def test_api(self): - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float32") - data_x = paddle.static.data(name="data_x", - shape=[11, 17], - dtype="float32") + data_x = paddle.static.data( + name="data_x", shape=[11, 17], dtype="float32" + ) out1 = paddle.log10(data_x) exe = paddle.static.Executor(place=paddle.CPUPlace()) exe.run(paddle.static.default_startup_program()) - res1 = exe.run(paddle.static.default_main_program(), - feed={"data_x": input_x}, - fetch_list=[out1]) + res1 = exe.run( + paddle.static.default_main_program(), + feed={"data_x": input_x}, + fetch_list=[out1], + ) expected_res = np.log10(input_x) np.testing.assert_allclose(res1[0], expected_res, rtol=1e-6) @@ -184,7 +184,6 @@ class TestLog10(TestActivation): class TestLogHalf(TestLog): - def init_dtype(self): self.dtype = np.float16 @@ -193,7 +192,6 @@ class TestLogHalf(TestLog): class TestLog2Half(TestLog2): - def init_dtype(self): self.dtype = np.float16 @@ -202,7 +200,6 @@ class TestLog2Half(TestLog2): class TestLog10Half(TestLog10): - def init_dtype(self): self.dtype = np.float16 diff --git a/python/paddle/fluid/tests/unittests/mlu/test_log_softmax_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_log_softmax_op_mlu.py index 91e8a86ce685609e088a1f278b368f76e7dc2416..82d22af8933034efd15c3acbb285437e9bcc73e6 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_log_softmax_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_log_softmax_op_mlu.py @@ -24,7 +24,7 @@ paddle.enable_static() def ref_log_softmax(x): - shiftx = (x - np.max(x)) + shiftx = x - np.max(x) out = shiftx - np.log(np.exp(shiftx).sum()) return out @@ -34,14 +34,14 @@ def ref_log_softmax_grad(x, axis): axis += len(x.shape) out = np.apply_along_axis(ref_log_softmax, axis, x) axis_dim = x.shape[axis] - dout = np.full_like(x, fill_value=1. / x.size) + dout = np.full_like(x, fill_value=1.0 / x.size) dx = dout - np.exp(out) * dout.copy().sum(axis=axis, keepdims=True).repeat( - axis_dim, axis=axis) + axis_dim, axis=axis + ) return dx class TestLogSoftmaxOp(OpTest): - def setUp(self): self.op_type = 'log_softmax' self.set_mlu() @@ -51,7 +51,7 @@ class TestLogSoftmaxOp(OpTest): self.axis = -1 self.set_attrs() - x = np.random.uniform(0.1, 1., self.shape).astype(self.dtype) + x = np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype) out = np.apply_along_axis(ref_log_softmax, self.axis, x) self.x_grad = ref_log_softmax_grad(x, self.axis) @@ -70,24 +70,22 @@ class TestLogSoftmaxOp(OpTest): self.check_output_with_place(self.place) def test_check_grad(self): - self.check_grad_with_place(self.place, ['X'], ['Out'], - user_defined_grads=[self.x_grad]) + self.check_grad_with_place( + self.place, ['X'], ['Out'], user_defined_grads=[self.x_grad] + ) class TestLogSoftmaxShape(TestLogSoftmaxOp): - def set_attrs(self): self.shape = [12, 10] class TestLogSoftmaxAxis(TestLogSoftmaxOp): - def set_attrs(self): self.axis = 1 class TestLogSoftmaxOpFp16(OpTest): - def setUp(self): self.op_type = 'log_softmax' self.set_mlu() @@ -97,7 +95,7 @@ class TestLogSoftmaxOpFp16(OpTest): self.axis = -1 self.set_attrs() - x = np.random.uniform(0.1, 1., self.shape).astype(self.dtype) + x = np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype) out = np.apply_along_axis(ref_log_softmax, self.axis, x) self.x_grad = ref_log_softmax_grad(x, self.axis) @@ -116,17 +114,20 @@ class TestLogSoftmaxOpFp16(OpTest): self.check_output_with_place(self.place, atol=1e-2) def test_check_grad(self): - self.check_grad_with_place(self.place, ['X'], ['Out'], - user_defined_grads=[self.x_grad], - max_relative_error=0.015) + self.check_grad_with_place( + self.place, + ['X'], + ['Out'], + user_defined_grads=[self.x_grad], + max_relative_error=0.015, + ) class TestNNLogSoftmaxAPI(unittest.TestCase): - def setUp(self): self.set_mlu() self.x_shape = [2, 3, 4, 5] - self.x = np.random.uniform(-1., 1., self.x_shape).astype(np.float32) + self.x = np.random.uniform(-1.0, 1.0, self.x_shape).astype(np.float32) def set_mlu(self): self.__class__.use_mlu = True @@ -158,7 +159,6 @@ class TestNNLogSoftmaxAPI(unittest.TestCase): class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase): - def setUp(self): self.set_mlu() self.x_shape = [2, 3, 4, 5] diff --git a/python/paddle/fluid/tests/unittests/mlu/test_logical_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_logical_op_mlu.py index c032748eb9aad568764ec3fd56c369a1b67ebbac..6923f669678158c666f653b879c03c92d4d385ba 100755 --- a/python/paddle/fluid/tests/unittests/mlu/test_logical_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_logical_op_mlu.py @@ -26,84 +26,32 @@ paddle.enable_static() SUPPORTED_DTYPES = [bool, np.int8, np.int16, np.int32, np.float32] -TEST_META_OP_DATA = [{ - 'op_str': 'logical_and', - 'binary_op': True -}, { - 'op_str': 'logical_or', - 'binary_op': True -}, { - 'op_str': 'logical_xor', - 'binary_op': True -}, { - 'op_str': 'logical_not', - 'binary_op': False -}] +TEST_META_OP_DATA = [ + {'op_str': 'logical_and', 'binary_op': True}, + {'op_str': 'logical_or', 'binary_op': True}, + {'op_str': 'logical_xor', 'binary_op': True}, + {'op_str': 'logical_not', 'binary_op': False}, +] TEST_META_SHAPE_DATA = { - 'XDimLargerThanYDim1': { - 'x_shape': [2, 3, 4, 5], - 'y_shape': [4, 5] - }, - 'XDimLargerThanYDim2': { - 'x_shape': [2, 3, 4, 5], - 'y_shape': [4, 1] - }, - 'XDimLargerThanYDim3': { - 'x_shape': [2, 3, 4, 5], - 'y_shape': [1, 4, 1] - }, - 'XDimLargerThanYDim4': { - 'x_shape': [2, 3, 4, 5], - 'y_shape': [3, 4, 1] - }, - 'XDimLargerThanYDim5': { - 'x_shape': [2, 3, 1, 5], - 'y_shape': [3, 1, 1] - }, - 'XDimLessThanYDim1': { - 'x_shape': [4, 1], - 'y_shape': [2, 3, 4, 5] - }, - 'XDimLessThanYDim2': { - 'x_shape': [1, 4, 1], - 'y_shape': [2, 3, 4, 5] - }, - 'XDimLessThanYDim3': { - 'x_shape': [3, 4, 1], - 'y_shape': [2, 3, 4, 5] - }, - 'XDimLessThanYDim4': { - 'x_shape': [3, 1, 1], - 'y_shape': [2, 3, 1, 5] - }, - 'XDimLessThanYDim5': { - 'x_shape': [4, 5], - 'y_shape': [2, 3, 4, 5] - }, - 'Axis1InLargerDim': { - 'x_shape': [1, 4, 5], - 'y_shape': [2, 3, 1, 5] - }, - 'EqualDim1': { - 'x_shape': [10, 7], - 'y_shape': [10, 7] - }, - 'EqualDim2': { - 'x_shape': [1, 1, 4, 5], - 'y_shape': [2, 3, 1, 5] - } + 'XDimLargerThanYDim1': {'x_shape': [2, 3, 4, 5], 'y_shape': [4, 5]}, + 'XDimLargerThanYDim2': {'x_shape': [2, 3, 4, 5], 'y_shape': [4, 1]}, + 'XDimLargerThanYDim3': {'x_shape': [2, 3, 4, 5], 'y_shape': [1, 4, 1]}, + 'XDimLargerThanYDim4': {'x_shape': [2, 3, 4, 5], 'y_shape': [3, 4, 1]}, + 'XDimLargerThanYDim5': {'x_shape': [2, 3, 1, 5], 'y_shape': [3, 1, 1]}, + 'XDimLessThanYDim1': {'x_shape': [4, 1], 'y_shape': [2, 3, 4, 5]}, + 'XDimLessThanYDim2': {'x_shape': [1, 4, 1], 'y_shape': [2, 3, 4, 5]}, + 'XDimLessThanYDim3': {'x_shape': [3, 4, 1], 'y_shape': [2, 3, 4, 5]}, + 'XDimLessThanYDim4': {'x_shape': [3, 1, 1], 'y_shape': [2, 3, 1, 5]}, + 'XDimLessThanYDim5': {'x_shape': [4, 5], 'y_shape': [2, 3, 4, 5]}, + 'Axis1InLargerDim': {'x_shape': [1, 4, 5], 'y_shape': [2, 3, 1, 5]}, + 'EqualDim1': {'x_shape': [10, 7], 'y_shape': [10, 7]}, + 'EqualDim2': {'x_shape': [1, 1, 4, 5], 'y_shape': [2, 3, 1, 5]}, } TEST_META_WRONG_SHAPE_DATA = { - 'ErrorDim1': { - 'x_shape': [2, 3, 4, 5], - 'y_shape': [3, 4] - }, - 'ErrorDim2': { - 'x_shape': [2, 3, 4, 5], - 'y_shape': [4, 3] - } + 'ErrorDim1': {'x_shape': [2, 3, 4, 5], 'y_shape': [3, 4]}, + 'ErrorDim2': {'x_shape': [2, 3, 4, 5], 'y_shape': [4, 3]}, } @@ -162,16 +110,20 @@ def test(unit_test, use_mlu=False, test_error=False): META_DATA = dict(TEST_META_WRONG_SHAPE_DATA) for shape_data in META_DATA.values(): for data_type in SUPPORTED_DTYPES: - meta_data['x_np'] = np_data_generator(shape_data['x_shape'], - dtype=data_type) - meta_data['y_np'] = np_data_generator(shape_data['y_shape'], - dtype=data_type) + meta_data['x_np'] = np_data_generator( + shape_data['x_shape'], dtype=data_type + ) + meta_data['y_np'] = np_data_generator( + shape_data['y_shape'], dtype=data_type + ) if meta_data['binary_op'] and test_error: # catch C++ Exception - unit_test.assertRaises(BaseException, run_static, - **meta_data) - unit_test.assertRaises(BaseException, run_dygraph, - **meta_data) + unit_test.assertRaises( + BaseException, run_static, **meta_data + ) + unit_test.assertRaises( + BaseException, run_dygraph, **meta_data + ) continue static_result = run_static(**meta_data) dygraph_result = run_dygraph(**meta_data) @@ -181,11 +133,11 @@ def test(unit_test, use_mlu=False, test_error=False): np_result = np_op(meta_data['x_np']) unit_test.assertTrue((static_result == np_result).all()) unit_test.assertTrue( - (dygraph_result.numpy() == np_result).all()) + (dygraph_result.numpy() == np_result).all() + ) def test_type_error(unit_test, use_mlu, type_str_map): - def check_type(op_str, x, y, binary_op): op = getattr(paddle, op_str) error_type = ValueError @@ -220,24 +172,24 @@ def test_type_error(unit_test, use_mlu, type_str_map): startup_program = paddle.static.Program() main_program = paddle.static.Program() with paddle.static.program_guard(main_program, startup_program): - x = paddle.static.data(name='x', - shape=[10], - dtype=type_str_map['x']) - y = paddle.static.data(name='y', - shape=[10], - dtype=type_str_map['y']) + x = paddle.static.data( + name='x', shape=[10], dtype=type_str_map['x'] + ) + y = paddle.static.data( + name='y', shape=[10], dtype=type_str_map['y'] + ) check_type(meta_data['op_str'], x, y, binary_op) def type_map_factory(): - return [{ - 'x': x_type, - 'y': y_type - } for x_type in SUPPORTED_DTYPES for y_type in SUPPORTED_DTYPES] + return [ + {'x': x_type, 'y': y_type} + for x_type in SUPPORTED_DTYPES + for y_type in SUPPORTED_DTYPES + ] class TestMLU(unittest.TestCase): - def test(self): test(self, True) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_lookup_table_v2_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_lookup_table_v2_op_mlu.py index 29b58a4bbec0b795691fe4bff3e777030ccadced..f8b962d44d13cd875552381295ef32ad3b3a6987 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_lookup_table_v2_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_lookup_table_v2_op_mlu.py @@ -26,7 +26,6 @@ SEED = 2022 class TestLookupTableV2(OpTest): - def setUp(self): self.set_mlu() self.op_type = "lookup_table_v2" @@ -36,22 +35,22 @@ class TestLookupTableV2(OpTest): self.init_padding_idx() np.random.seed(SEED) w = np.random.random([self.vocab, self.dim]).astype(self.dtype) - x = np.random.randint(0, self.vocab, - size=(self.bsz, - self.seqlen)).astype(self.ids_dtype) + x = np.random.randint( + 0, self.vocab, size=(self.bsz, self.seqlen) + ).astype(self.ids_dtype) out = w[x] if self.padding_idx != -1: out[np.squeeze(x == self.padding_idx)] = np.zeros(self.dim) self.inputs = { 'W': OpTest.np_dtype_to_fluid_dtype(w), - 'Ids': OpTest.np_dtype_to_fluid_dtype(x) + 'Ids': OpTest.np_dtype_to_fluid_dtype(x), } self.attrs = { 'is_sparse': False, 'is_distributed': False, 'remote_prefetch': False, - 'padding_idx': self.padding_idx + 'padding_idx': self.padding_idx, } self.outputs = {'Out': out} @@ -78,9 +77,9 @@ class TestLookupTableV2(OpTest): def test_check_grad(self): if self.dtype == np.float16: - self.check_grad_with_place(self.place, ['W'], - 'Out', - max_relative_error=0.01) + self.check_grad_with_place( + self.place, ['W'], 'Out', max_relative_error=0.01 + ) else: self.check_grad_with_place(self.place, ['W'], 'Out') @@ -98,7 +97,6 @@ class TestLookupTableV2FP16(TestLookupTableV2): class TestLookupTableV2Dim32(TestLookupTableV2): - def init_dims(self): self.bsz = 6 self.seqlen = 8 @@ -126,13 +124,11 @@ class TestLookupTableV2Dim32FP16(TestLookupTableV2): class TestLookupTableV2WithPadding(TestLookupTableV2): - def init_padding_idx(self): self.padding_idx = np.random.randint(0, self.vocab) class TestLookupTableV2WithPadding1(TestLookupTableV2): - def init_padding_idx(self): self.padding_idx = np.random.randint(0, self.vocab) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_masked_select_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_masked_select_op_mlu.py index c60eea07874006ef3784e801e872a132065a5432..11e9dc86cd47016915c15c995746590d236da4b6 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_masked_select_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_masked_select_op_mlu.py @@ -39,7 +39,6 @@ def np_masked_select(shape, x, mask): class TestMaskedSelectOp(OpTest): - def setUp(self): self.init() self.__class__.use_mlu = True @@ -63,20 +62,17 @@ class TestMaskedSelectOp(OpTest): class TestMaskedSelectOp1(TestMaskedSelectOp): - def init(self): self.shape = (6, 8, 9, 18) class TestMaskedSelectOp2(TestMaskedSelectOp): - def init(self): - self.shape = (168, ) + self.shape = (168,) @skip_check_grad_ci(reason="get_numeric_gradient not support int32") class TestMaskedSelectOpInt32(TestMaskedSelectOp): - def init_dtype(self): self.dtype = np.int32 @@ -85,20 +81,18 @@ class TestMaskedSelectOpInt32(TestMaskedSelectOp): class TestMaskedSelectOpFp16(TestMaskedSelectOp): - def init_dtype(self): self.dtype = np.float16 def test_check_grad(self): x_grad = self.inputs['Mask'].astype(self.dtype) x_grad = x_grad * (1 / x_grad.size) - self.check_grad_with_place(self.place, ['X'], - 'Y', - user_defined_grads=[x_grad]) + self.check_grad_with_place( + self.place, ['X'], 'Y', user_defined_grads=[x_grad] + ) class TestMaskedSelectAPI(unittest.TestCase): - def test_imperative_mode(self): paddle.disable_static() shape = (88, 6, 8) @@ -123,27 +117,26 @@ class TestMaskedSelectAPI(unittest.TestCase): exe = paddle.static.Executor(place=paddle.device.MLUPlace(0)) - res = exe.run(paddle.static.default_main_program(), - feed={ - "x": np_x, - "mask": np_mask - }, - fetch_list=[out]) + res = exe.run( + paddle.static.default_main_program(), + feed={"x": np_x, "mask": np_mask}, + fetch_list=[out], + ) self.assertEqual(np.allclose(res, np_out), True) class TestMaskedSelectError(unittest.TestCase): - def test_error(self): - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): shape = [8, 9, 6] x = paddle.fluid.data(shape=shape, dtype='float32', name='x') mask = paddle.fluid.data(shape=shape, dtype='bool', name='mask') - mask_float = paddle.fluid.data(shape=shape, - dtype='float32', - name='mask_float') + mask_float = paddle.fluid.data( + shape=shape, dtype='float32', name='mask_float' + ) np_x = np.random.random(shape).astype('float32') np_mask = np.array(np.random.randint(2, size=shape, dtype=bool)) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_matmul_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_matmul_op_mlu.py index 76daa775756cd154230ba89eb57ecafc32a95a71..a33ef9d54127e22966c63e10e57d78bca53c6ab1 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_matmul_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_matmul_op_mlu.py @@ -31,7 +31,7 @@ def reference_matmul(X, Y, transpose_X=False, transpose_Y=False, scale=1.0): # transpose X and Y appropriately. if transpose_X: if X.ndim == 1: - X = X.reshape((X.size, )) + X = X.reshape((X.size,)) elif X.ndim == 2: X = X.T else: @@ -40,7 +40,7 @@ def reference_matmul(X, Y, transpose_X=False, transpose_Y=False, scale=1.0): X = np.transpose(X, tuple(dim)) if transpose_Y: if Y.ndim == 1: - Y = Y.reshape((Y.size, )) + Y = Y.reshape((Y.size,)) else: dim = [i for i in range(len(Y.shape))] dim[-1], dim[len(Y.shape) - 2] = dim[len(Y.shape) - 2], dim[-1] @@ -76,14 +76,15 @@ class TestMatMulOp(OpTest): X = -0.1 + 0.2 * X Y = -0.1 + 0.2 * Y - Out = reference_matmul(X, Y, self.transpose_X, self.transpose_Y, - self.alpha) + Out = reference_matmul( + X, Y, self.transpose_X, self.transpose_Y, self.alpha + ) Out = Out.astype(self.dtype) self.inputs = {'X': X, 'Y': Y} self.attrs = { 'transpose_X': self.transpose_X, 'transpose_Y': self.transpose_Y, - 'alpha': self.alpha + 'alpha': self.alpha, } self.outputs = {'Out': Out} @@ -92,8 +93,8 @@ class TestMatMulOp(OpTest): self.place = paddle.device.MLUPlace(0) def config(self): - self.x_shape = (100, ) - self.y_shape = (100, ) + self.x_shape = (100,) + self.y_shape = (100,) self.transpose_X = False self.transpose_Y = False @@ -116,7 +117,7 @@ class TestMatMulOp1(TestMatMulOp): """ def config(self): - self.x_shape = (100, ) + self.x_shape = (100,) self.y_shape = (1, 3, 2, 100) self.transpose_X = False self.transpose_Y = True @@ -129,7 +130,7 @@ class TestMatMulOp2(TestMatMulOp): def config(self): self.x_shape = (1, 2, 100, 1) - self.y_shape = (100, ) + self.y_shape = (100,) self.transpose_X = True self.transpose_Y = False @@ -248,7 +249,7 @@ class TestMatMulOp12(TestMatMulOp): """ def config(self): - self.x_shape = (100) + self.x_shape = 100 self.y_shape = (1, 2, 2, 100, 2) self.transpose_X = False self.transpose_Y = False @@ -261,13 +262,13 @@ class TestMatMulOp13(TestMatMulOp): def config(self): self.x_shape = (2, 1, 100) - self.y_shape = (100) + self.y_shape = 100 self.transpose_X = False self.transpose_Y = False # TODO(mlu): alpha will be supported in next version -#--------------------test matmul alpha-------------------- +# --------------------test matmul alpha-------------------- # def create_test_alpha_class(parent): # class TestMatMulOpAlphaCase(parent): # def init_alpha(self): @@ -291,11 +292,9 @@ class TestMatMulOp13(TestMatMulOp): # create_test_alpha_class(TestMatMulOp13) -#--------------------test matmul fp16-------------------- +# --------------------test matmul fp16-------------------- def create_test_fp16_class(parent, atol=0.001, max_relative_error=2.5): - class TestMatMulOpFp16Case(parent): - def init_kernel_type(self): self.dtype = np.float16 @@ -303,9 +302,12 @@ def create_test_fp16_class(parent, atol=0.001, max_relative_error=2.5): self.check_output_with_place(self.place, atol=atol) def test_check_grad(self): - self.check_grad_with_place(self.place, ['X', 'Y'], - 'Out', - max_relative_error=max_relative_error) + self.check_grad_with_place( + self.place, + ['X', 'Y'], + 'Out', + max_relative_error=max_relative_error, + ) cls_name = "{0}_{1}".format(parent.__name__, "Fp16") TestMatMulOpFp16Case.__name__ = cls_name diff --git a/python/paddle/fluid/tests/unittests/mlu/test_matmul_v2_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_matmul_v2_op_mlu.py index 4c782980b3db9205c19a9818d530ea22808ed1ba..f1b142902b10434dbac557eb4ebdcf5f264775a7 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_matmul_v2_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_matmul_v2_op_mlu.py @@ -36,8 +36,8 @@ class TestMatMulV2Op(OpTest): self.place = paddle.device.MLUPlace(0) def config(self): - self.x_shape = (100, ) - self.y_shape = (100, ) + self.x_shape = (100,) + self.y_shape = (100,) self.trans_x = False self.trans_y = False @@ -76,7 +76,7 @@ class TestMatMuklOp2(TestMatMulV2Op): """ def config(self): - self.x_shape = (100, ) + self.x_shape = (100,) self.y_shape = (1, 3, 2, 100) self.trans_x = False self.trans_y = True @@ -88,7 +88,7 @@ class TestMatMuklOp3(TestMatMulV2Op): """ def config(self): - self.x_shape = (100, ) + self.x_shape = (100,) self.y_shape = (1, 1, 100, 2) self.trans_x = False self.trans_y = False @@ -100,7 +100,7 @@ class TestMatMuklOp4(TestMatMulV2Op): """ def config(self): - self.x_shape = (100, ) + self.x_shape = (100,) self.y_shape = (1, 2, 100, 2) self.trans_x = False self.trans_y = False @@ -113,7 +113,7 @@ class TestMatMuklOp5(TestMatMulV2Op): def config(self): self.x_shape = (1, 1, 100, 1) - self.y_shape = (100, ) + self.y_shape = (100,) self.trans_x = True self.trans_y = False @@ -125,7 +125,7 @@ class TestMatMuklOp6(TestMatMulV2Op): def config(self): self.x_shape = (1, 2, 102, 1) - self.y_shape = (102, ) + self.y_shape = (102,) self.trans_x = True self.trans_y = False @@ -137,7 +137,7 @@ class TestMatMuklOp7(TestMatMulV2Op): def config(self): self.x_shape = (1, 2, 1, 100) - self.y_shape = (100, ) + self.y_shape = (100,) self.trans_x = False self.trans_y = False @@ -244,7 +244,7 @@ class TestMatMuklOp16(TestMatMulV2Op): """ def config(self): - self.x_shape = (100) + self.x_shape = 100 self.y_shape = (1, 2, 2, 100, 2) self.trans_x = False self.trans_y = False @@ -257,7 +257,7 @@ class TestMatMuklOp17(TestMatMulV2Op): def config(self): self.x_shape = (2, 1, 100) - self.y_shape = (100) + self.y_shape = 100 self.trans_x = False self.trans_y = False @@ -298,13 +298,11 @@ class TestMatMuklOpBroadcast2(TestMatMulV2Op): self.trans_y = True -#--------------------test matmul fp16-------------------- +# --------------------test matmul fp16-------------------- def create_test_fp16_class(parent, atol=0.001, max_relative_error=2.5): - class TestMatMulOpFp16Case(parent): - def init_kernel_type(self): self.dtype = np.float16 @@ -312,9 +310,12 @@ def create_test_fp16_class(parent, atol=0.001, max_relative_error=2.5): self.check_output_with_place(self.place, atol=atol) def test_check_grad(self): - self.check_grad_with_place(self.place, ['X', 'Y'], - 'Out', - max_relative_error=max_relative_error) + self.check_grad_with_place( + self.place, + ['X', 'Y'], + 'Out', + max_relative_error=max_relative_error, + ) cls_name = "{0}_{1}".format(parent.__name__, "Fp16") TestMatMulOpFp16Case.__name__ = cls_name @@ -342,7 +343,6 @@ create_test_fp16_class(TestMatMuklOp18) class TestMatMulV2API(unittest.TestCase): - def setUp(self): self.places = [paddle.CPUPlace()] if paddle.is_compiled_with_mlu(): @@ -359,12 +359,11 @@ class TestMatMulV2API(unittest.TestCase): y_np = np.random.random([3, 4]).astype("float32") exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={ - "input_x": x_np, - "input_y": y_np - }, - fetch_list=[result]) + fetches = exe.run( + fluid.default_main_program(), + feed={"input_x": x_np, "input_y": y_np}, + fetch_list=[result], + ) def test_static(self): for place in self.places: diff --git a/python/paddle/fluid/tests/unittests/mlu/test_mean_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_mean_op_mlu.py index 5dcdf1db8458d1dd3f21649d711aa3700e1a4db1..60f9d7b8e45b2963ed4616226a3929b862eaba7c 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_mean_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_mean_op_mlu.py @@ -27,7 +27,6 @@ SEED = 2021 class TestMean(OpTest): - def setUp(self): self.set_mlu() self.place = paddle.device.MLUPlace(0) @@ -55,7 +54,6 @@ class TestMean(OpTest): class TestMeanFP16(OpTest): - def setUp(self): self.set_mlu() self.place = paddle.MLUPlace(0) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_merged_adam_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_merged_adam_op_mlu.py index 3aa61e9f982fb8639f49df6af200196184cb41aa..242e1c8e663f49d818225bf8370fd12adf375e7f 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_merged_adam_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_merged_adam_op_mlu.py @@ -21,20 +21,22 @@ from paddle import _C_ops, _legacy_C_ops from paddle.fluid.framework import in_dygraph_mode -def run_adam_op(params, - grads, - lrs, - moment1s, - moment2s, - beta1_pows, - beta2_pows, - master_params, - epsilon, - beta1, - beta2, - place, - multi_precision=False, - use_merged=False): +def run_adam_op( + params, + grads, + lrs, + moment1s, + moment2s, + beta1_pows, + beta2_pows, + master_params, + epsilon, + beta1, + beta2, + place, + multi_precision=False, + use_merged=False, +): assert len(params) == len(grads) assert len(params) == len(lrs) assert len(params) == len(moment1s) @@ -59,25 +61,71 @@ def run_adam_op(params, if not use_merged: for i in range(len(param_vars)): _, _, _, _, _, _ = _legacy_C_ops.adam( - param_vars[i], grad_vars[i], lr_vars[i], moment1_vars[i], - moment2_vars[i], beta1_pow_vars[i], beta2_pow_vars[i], - master_param_vars[i], param_vars[i], moment1_vars[i], - moment2_vars[i], beta1_pow_vars[i], beta2_pow_vars[i], - master_param_vars[i], 'epsilon', epsilon, 'beta1', beta1, - 'beta2', beta2, 'multi_precision', multi_precision) + param_vars[i], + grad_vars[i], + lr_vars[i], + moment1_vars[i], + moment2_vars[i], + beta1_pow_vars[i], + beta2_pow_vars[i], + master_param_vars[i], + param_vars[i], + moment1_vars[i], + moment2_vars[i], + beta1_pow_vars[i], + beta2_pow_vars[i], + master_param_vars[i], + 'epsilon', + epsilon, + 'beta1', + beta1, + 'beta2', + beta2, + 'multi_precision', + multi_precision, + ) else: if in_dygraph_mode(): _, _, _, _, _, _ = _C_ops.merged_adam_( - param_vars, grad_vars, lr_vars, moment1_vars, moment2_vars, - beta1_pow_vars, beta2_pow_vars, master_param_vars, beta1, beta2, - epsilon, multi_precision, False) + param_vars, + grad_vars, + lr_vars, + moment1_vars, + moment2_vars, + beta1_pow_vars, + beta2_pow_vars, + master_param_vars, + beta1, + beta2, + epsilon, + multi_precision, + False, + ) else: _, _, _, _, _, _ = _legacy_C_ops.merged_adam( - param_vars, grad_vars, lr_vars, moment1_vars, moment2_vars, - beta1_pow_vars, beta2_pow_vars, master_param_vars, param_vars, - moment1_vars, moment2_vars, beta1_pow_vars, beta2_pow_vars, - master_param_vars, 'epsilon', epsilon, 'beta1', beta1, 'beta2', - beta2, 'multi_precision', multi_precision) + param_vars, + grad_vars, + lr_vars, + moment1_vars, + moment2_vars, + beta1_pow_vars, + beta2_pow_vars, + master_param_vars, + param_vars, + moment1_vars, + moment2_vars, + beta1_pow_vars, + beta2_pow_vars, + master_param_vars, + 'epsilon', + epsilon, + 'beta1', + beta1, + 'beta2', + beta2, + 'multi_precision', + multi_precision, + ) outputs = { 'ParamOut': param_vars, @@ -85,14 +133,13 @@ def run_adam_op(params, 'Moment2Out': moment2_vars, 'Beta1PowOut': beta1_pow_vars, 'Beta2PowOut': beta2_pow_vars, - 'MasterParamOut': master_param_vars + 'MasterParamOut': master_param_vars, } return outputs class TestMergedAdam(unittest.TestCase): - def setUp(self): paddle.disable_static() self.shapes = [[3, 4], [2, 7], [5, 6], [7, 8]] @@ -116,27 +163,46 @@ class TestMergedAdam(unittest.TestCase): beta1_pows = self.gen_rand_data([[1], [1], [1], [1]], mp_dtype) beta2_pows = self.gen_rand_data([[1], [1], [1], [1]], mp_dtype) master_params = [p.astype(mp_dtype) for p in params] - return params, grads, lrs, moment1s, moment2s, beta1_pows, beta2_pows, master_params + return ( + params, + grads, + lrs, + moment1s, + moment2s, + beta1_pows, + beta2_pows, + master_params, + ) def check_with_place(self, place, multi_precision): - params, grads, lrs, moment1s, moment2s, beta1_pows, beta2_pows, master_params = self.prepare_data( - self.shapes, multi_precision, self.seed, place) + ( + params, + grads, + lrs, + moment1s, + moment2s, + beta1_pows, + beta2_pows, + master_params, + ) = self.prepare_data(self.shapes, multi_precision, self.seed, place) def run_op(use_merged): - return run_adam_op(params=params, - grads=grads, - lrs=lrs, - moment1s=moment1s, - moment2s=moment2s, - beta1_pows=beta1_pows, - beta2_pows=beta2_pows, - master_params=master_params, - epsilon=0.9, - beta1=0.9, - beta2=0.99, - place=place, - multi_precision=multi_precision, - use_merged=use_merged) + return run_adam_op( + params=params, + grads=grads, + lrs=lrs, + moment1s=moment1s, + moment2s=moment2s, + beta1_pows=beta1_pows, + beta2_pows=beta2_pows, + master_params=master_params, + epsilon=0.9, + beta1=0.9, + beta2=0.99, + place=place, + multi_precision=multi_precision, + use_merged=use_merged, + ) outs1 = run_op(True) outs2 = run_op(False) @@ -149,10 +215,9 @@ class TestMergedAdam(unittest.TestCase): if place == 'mlu': np.testing.assert_array_equal(value1[i], value2[i]) else: - np.testing.assert_allclose(value1[i], - value2[i], - rtol=1e-05, - atol=1e-07) + np.testing.assert_allclose( + value1[i], value2[i], rtol=1e-05, atol=1e-07 + ) def test_main(self): for multi_precision in [False, True]: diff --git a/python/paddle/fluid/tests/unittests/mlu/test_merged_momentum_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_merged_momentum_op_mlu.py index 7a1f590cf3da625948e456b01f191083a110a5e8..e7c4c4bb4ca53dffc758ffd99c7bc84a3b49ae7c 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_merged_momentum_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_merged_momentum_op_mlu.py @@ -22,16 +22,18 @@ from paddle.fluid.layer_helper import LayerHelper from collections import OrderedDict -def run_momentum_op(params, - grads, - velocitys, - master_params, - learning_rate, - place, - multi_precision, - mu=0.9, - rescale_grad=0.01, - use_merged=False): +def run_momentum_op( + params, + grads, + velocitys, + master_params, + learning_rate, + place, + multi_precision, + mu=0.9, + rescale_grad=0.01, + use_merged=False, +): assert len(params) == len(grads) assert len(params) == len(velocitys) if multi_precision: @@ -48,48 +50,70 @@ def run_momentum_op(params, } param_vars = [ - helper.create_variable(persistable=True, - shape=p.shape, - dtype=p.dtype) for p in params + helper.create_variable( + persistable=True, shape=p.shape, dtype=p.dtype + ) + for p in params ] grad_vars = [ helper.create_variable(shape=g.shape, dtype=g.dtype) for g in grads ] velocity_vars = [ - helper.create_variable(persistable=True, - shape=v.shape, - dtype=v.dtype) for v in velocitys + helper.create_variable( + persistable=True, shape=v.shape, dtype=v.dtype + ) + for v in velocitys ] - lr_var = helper.create_variable(persistable=True, - shape=learning_rate.shape, - dtype=learning_rate.dtype) + lr_var = helper.create_variable( + persistable=True, + shape=learning_rate.shape, + dtype=learning_rate.dtype, + ) feed_dict = OrderedDict() feed_dict.update( - OrderedDict([(p_var.name, p_val) - for p_var, p_val in zip(param_vars, params)])) + OrderedDict( + [ + (p_var.name, p_val) + for p_var, p_val in zip(param_vars, params) + ] + ) + ) feed_dict.update( - OrderedDict([(v_var.name, v_val) - for v_var, v_val in zip(velocity_vars, velocitys)])) + OrderedDict( + [ + (v_var.name, v_val) + for v_var, v_val in zip(velocity_vars, velocitys) + ] + ) + ) fetch_list = list(feed_dict.keys()) feed_dict.update( - OrderedDict([(g_var.name, g_val) - for g_var, g_val in zip(grad_vars, grads)])) + OrderedDict( + [(g_var.name, g_val) for g_var, g_val in zip(grad_vars, grads)] + ) + ) feed_dict.update({lr_var.name: learning_rate}) if multi_precision: master_param_vars = [ - helper.create_variable(persistable=True, - shape=p.shape, - dtype=p.dtype) for p in master_params + helper.create_variable( + persistable=True, shape=p.shape, dtype=p.dtype + ) + for p in master_params ] feed_dict.update( - OrderedDict([ - (mp_var.name, mp_val) - for mp_var, mp_val in zip(master_param_vars, master_params) - ])) + OrderedDict( + [ + (mp_var.name, mp_val) + for mp_var, mp_val in zip( + master_param_vars, master_params + ) + ] + ) + ) # CPUPlace does not use MasterParam if isinstance(place, paddle.CUDAPlace): fetch_list = fetch_list + [ @@ -99,8 +123,9 @@ def run_momentum_op(params, master_param_vars = None if not use_merged: - for i, (p, g, - v) in enumerate(zip(param_vars, grad_vars, velocity_vars)): + for i, (p, g, v) in enumerate( + zip(param_vars, grad_vars, velocity_vars) + ): inputs = { 'Param': p, 'Grad': g, @@ -111,10 +136,9 @@ def run_momentum_op(params, if multi_precision: inputs['MasterParam'] = master_param_vars[i] outputs['MasterParamOut'] = master_param_vars[i] - helper.append_op(type=op_type, - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type=op_type, inputs=inputs, outputs=outputs, attrs=attrs + ) else: inputs = { 'Param': param_vars, @@ -126,10 +150,9 @@ def run_momentum_op(params, if multi_precision: inputs['MasterParam'] = master_param_vars outputs['MasterParamOut'] = master_param_vars - helper.append_op(type=op_type, - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type=op_type, inputs=inputs, outputs=outputs, attrs=attrs + ) exe = paddle.static.Executor(place) with paddle.static.scope_guard(paddle.static.Scope()): @@ -137,17 +160,19 @@ def run_momentum_op(params, return exe.run(main, feed=feed_dict, fetch_list=fetch_list) -def run_momentum_op2(params, - grads, - velocitys, - master_params, - learning_rate, - place, - multi_precision, - mu=0.9, - rescale_grad=0.01, - use_merged=False, - use_nesterov=True): +def run_momentum_op2( + params, + grads, + velocitys, + master_params, + learning_rate, + place, + multi_precision, + mu=0.9, + rescale_grad=0.01, + use_merged=False, + use_nesterov=True, +): assert len(params) == len(grads) assert len(params) == len(velocitys) if multi_precision: @@ -159,48 +184,70 @@ def run_momentum_op2(params, helper = LayerHelper(op_type, **locals()) param_vars = [ - helper.create_variable(persistable=True, - shape=p.shape, - dtype=p.dtype) for p in params + helper.create_variable( + persistable=True, shape=p.shape, dtype=p.dtype + ) + for p in params ] grad_vars = [ helper.create_variable(shape=g.shape, dtype=g.dtype) for g in grads ] velocity_vars = [ - helper.create_variable(persistable=True, - shape=v.shape, - dtype=v.dtype) for v in velocitys + helper.create_variable( + persistable=True, shape=v.shape, dtype=v.dtype + ) + for v in velocitys ] - lr_var = helper.create_variable(persistable=True, - shape=learning_rate.shape, - dtype=learning_rate.dtype) + lr_var = helper.create_variable( + persistable=True, + shape=learning_rate.shape, + dtype=learning_rate.dtype, + ) feed_dict = OrderedDict() feed_dict.update( - OrderedDict([(p_var.name, p_val) - for p_var, p_val in zip(param_vars, params)])) + OrderedDict( + [ + (p_var.name, p_val) + for p_var, p_val in zip(param_vars, params) + ] + ) + ) feed_dict.update( - OrderedDict([(v_var.name, v_val) - for v_var, v_val in zip(velocity_vars, velocitys)])) + OrderedDict( + [ + (v_var.name, v_val) + for v_var, v_val in zip(velocity_vars, velocitys) + ] + ) + ) fetch_list = list(feed_dict.keys()) feed_dict.update( - OrderedDict([(g_var.name, g_val) - for g_var, g_val in zip(grad_vars, grads)])) + OrderedDict( + [(g_var.name, g_val) for g_var, g_val in zip(grad_vars, grads)] + ) + ) feed_dict.update({lr_var.name: learning_rate}) if multi_precision: master_param_vars = [ - helper.create_variable(persistable=True, - shape=p.shape, - dtype=p.dtype) for p in master_params + helper.create_variable( + persistable=True, shape=p.shape, dtype=p.dtype + ) + for p in master_params ] feed_dict.update( - OrderedDict([ - (mp_var.name, mp_val) - for mp_var, mp_val in zip(master_param_vars, master_params) - ])) + OrderedDict( + [ + (mp_var.name, mp_val) + for mp_var, mp_val in zip( + master_param_vars, master_params + ) + ] + ) + ) # CPUPlace does not use MasterParam if isinstance(place, paddle.CUDAPlace): fetch_list = fetch_list + [ @@ -210,8 +257,9 @@ def run_momentum_op2(params, master_param_vars = None if not use_merged: - for i, (p, g, - v) in enumerate(zip(param_vars, grad_vars, velocity_vars)): + for i, (p, g, v) in enumerate( + zip(param_vars, grad_vars, velocity_vars) + ): inputs = { 'Param': p, 'Grad': g, @@ -230,10 +278,9 @@ def run_momentum_op2(params, 'regularization_method': 'l2_decay', 'regularization_coeff': 2.0, } - helper.append_op(type=op_type, - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type=op_type, inputs=inputs, outputs=outputs, attrs=attrs + ) else: inputs = { 'Param': param_vars, @@ -246,22 +293,18 @@ def run_momentum_op2(params, inputs['MasterParam'] = master_param_vars outputs['MasterParamOut'] = master_param_vars attrs = { - 'mu': - mu, - 'multi_precision': - multi_precision, - 'rescale_grad': - rescale_grad, - 'use_nesterov': - use_nesterov, - 'regularization_method': - ['l2_decay' for i in range(len(param_vars))], + 'mu': mu, + 'multi_precision': multi_precision, + 'rescale_grad': rescale_grad, + 'use_nesterov': use_nesterov, + 'regularization_method': [ + 'l2_decay' for i in range(len(param_vars)) + ], 'regularization_coeff': [2.0 for i in range(len(param_vars))], } - helper.append_op(type=op_type, - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type=op_type, inputs=inputs, outputs=outputs, attrs=attrs + ) exe = paddle.static.Executor(place) with paddle.static.scope_guard(paddle.static.Scope()): @@ -270,7 +313,6 @@ def run_momentum_op2(params, class TestMergedMomentum(unittest.TestCase): - def setUp(self): paddle.enable_static() self.shapes = [[3, 4], [2, 7], [5, 6], [7, 8]] @@ -296,21 +338,28 @@ class TestMergedMomentum(unittest.TestCase): return params, grads, velocitys, master_params, learning_rate def check_with_place(self, place, multi_precision): - params, grads, velocitys, master_params, learning_rate = self.prepare_data( - self.shapes, multi_precision, self.seed, place) + ( + params, + grads, + velocitys, + master_params, + learning_rate, + ) = self.prepare_data(self.shapes, multi_precision, self.seed, place) def run_op(use_merged): # MLU Momentum Op does not support rescale_grad rescale_grad = 1.0 - return run_momentum_op(params, - grads, - velocitys, - master_params, - learning_rate, - place, - multi_precision, - rescale_grad=rescale_grad, - use_merged=use_merged) + return run_momentum_op( + params, + grads, + velocitys, + master_params, + learning_rate, + place, + multi_precision, + rescale_grad=rescale_grad, + use_merged=use_merged, + ) outs1 = run_op(True) outs2 = run_op(False) @@ -323,7 +372,6 @@ class TestMergedMomentum(unittest.TestCase): class TestMergedMomentum2(unittest.TestCase): - def setUp(self): paddle.enable_static() self.shapes = [[3, 4], [2, 7], [5, 6], [7, 8]] @@ -349,22 +397,29 @@ class TestMergedMomentum2(unittest.TestCase): return params, grads, velocitys, master_params, learning_rate def check_with_place(self, place, multi_precision): - params, grads, velocitys, master_params, learning_rate = self.prepare_data( - self.shapes, multi_precision, self.seed, place) + ( + params, + grads, + velocitys, + master_params, + learning_rate, + ) = self.prepare_data(self.shapes, multi_precision, self.seed, place) def run_op(use_nesterov, use_merged): # MLU Momentum Op does not support rescale_grad rescale_grad = 1.0 - return run_momentum_op2(params, - grads, - velocitys, - master_params, - learning_rate, - place, - multi_precision, - rescale_grad=rescale_grad, - use_merged=use_merged, - use_nesterov=use_nesterov) + return run_momentum_op2( + params, + grads, + velocitys, + master_params, + learning_rate, + place, + multi_precision, + rescale_grad=rescale_grad, + use_merged=use_merged, + use_nesterov=use_nesterov, + ) outs1 = run_op(use_nesterov=True, use_merged=True) outs2 = run_op(use_nesterov=True, use_merged=False) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_meshgrid_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_meshgrid_op_mlu.py index d0c596922652428a78983b8e87ce55a8ff0b8a54..6353ca638a5ce8ac80110bcc3c9310770bf35e4c 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_meshgrid_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_meshgrid_op_mlu.py @@ -27,7 +27,6 @@ paddle.enable_static() class TestMeshgridOp(OpTest): - def setUp(self): self.op_type = "meshgrid" self.place = paddle.device.MLUPlace(0) @@ -50,7 +49,7 @@ class TestMeshgridOp(OpTest): ins = [] outs = [] for i in range(len(self.shape)): - ins.append(np.random.random((self.shape[i], )).astype(self.dtype)) + ins.append(np.random.random((self.shape[i],)).astype(self.dtype)) for i in range(len(self.shape)): out_reshape = [1] * len(self.shape) @@ -64,23 +63,29 @@ class TestMeshgridOp(OpTest): class TestMeshgridOp2(TestMeshgridOp): - def get_x_shape(self): return [100, 300] class TestMeshgridOp3(unittest.TestCase): - def test_api(self): x = fluid.data(shape=[100], dtype='int32', name='x') y = fluid.data(shape=[200], dtype='int32', name='y') - input_1 = np.random.randint(0, 100, [ + input_1 = np.random.randint( + 0, 100, - ]).astype('int32') - input_2 = np.random.randint(0, 100, [ - 200, - ]).astype('int32') + [ + 100, + ], + ).astype('int32') + input_2 = np.random.randint( + 0, + 100, + [ + 200, + ], + ).astype('int32') out_1 = np.reshape(input_1, [100, 1]) out_1 = np.broadcast_to(out_1, [100, 200]) @@ -89,28 +94,34 @@ class TestMeshgridOp3(unittest.TestCase): exe = fluid.Executor(place=fluid.MLUPlace(0)) grid_x, grid_y = paddle.tensor.meshgrid(x, y) - res_1, res_2 = exe.run(fluid.default_main_program(), - feed={ - 'x': input_1, - 'y': input_2 - }, - fetch_list=[grid_x, grid_y]) + res_1, res_2 = exe.run( + fluid.default_main_program(), + feed={'x': input_1, 'y': input_2}, + fetch_list=[grid_x, grid_y], + ) assert np.array_equal(res_1, out_1) assert np.array_equal(res_2, out_2) class TestMeshgridOp4(unittest.TestCase): - def test_list_input(self): x = fluid.data(shape=[100], dtype='int32', name='x') y = fluid.data(shape=[200], dtype='int32', name='y') - input_1 = np.random.randint(0, 100, [ + input_1 = np.random.randint( + 0, + 100, + [ + 100, + ], + ).astype('int32') + input_2 = np.random.randint( + 0, 100, - ]).astype('int32') - input_2 = np.random.randint(0, 100, [ - 200, - ]).astype('int32') + [ + 200, + ], + ).astype('int32') out_1 = np.reshape(input_1, [100, 1]) out_1 = np.broadcast_to(out_1, [100, 200]) @@ -119,29 +130,35 @@ class TestMeshgridOp4(unittest.TestCase): exe = fluid.Executor(place=fluid.MLUPlace(0)) grid_x, grid_y = paddle.tensor.meshgrid([x, y]) - res_1, res_2 = exe.run(fluid.default_main_program(), - feed={ - 'x': input_1, - 'y': input_2 - }, - fetch_list=[grid_x, grid_y]) + res_1, res_2 = exe.run( + fluid.default_main_program(), + feed={'x': input_1, 'y': input_2}, + fetch_list=[grid_x, grid_y], + ) assert np.array_equal(res_1, out_1) assert np.array_equal(res_2, out_2) class TestMeshgridOp5(unittest.TestCase): - def test_tuple_input(self): x = fluid.data(shape=[100], dtype='int32', name='x') y = fluid.data(shape=[200], dtype='int32', name='y') - input_1 = np.random.randint(0, 100, [ + input_1 = np.random.randint( + 0, + 100, + [ + 100, + ], + ).astype('int32') + input_2 = np.random.randint( + 0, 100, - ]).astype('int32') - input_2 = np.random.randint(0, 100, [ - 200, - ]).astype('int32') + [ + 200, + ], + ).astype('int32') out_1 = np.reshape(input_1, [100, 1]) out_1 = np.broadcast_to(out_1, [100, 200]) @@ -150,26 +167,32 @@ class TestMeshgridOp5(unittest.TestCase): exe = fluid.Executor(place=fluid.MLUPlace(0)) grid_x, grid_y = paddle.tensor.meshgrid((x, y)) - res_1, res_2 = exe.run(fluid.default_main_program(), - feed={ - 'x': input_1, - 'y': input_2 - }, - fetch_list=[grid_x, grid_y]) + res_1, res_2 = exe.run( + fluid.default_main_program(), + feed={'x': input_1, 'y': input_2}, + fetch_list=[grid_x, grid_y], + ) assert np.array_equal(res_1, out_1) assert np.array_equal(res_2, out_2) class TestMeshgridOp7(unittest.TestCase): - def test_api_with_dygraph_list_input(self): - input_3 = np.random.randint(0, 100, [ + input_3 = np.random.randint( + 0, 100, - ]).astype('int32') - input_4 = np.random.randint(0, 100, [ - 200, - ]).astype('int32') + [ + 100, + ], + ).astype('int32') + input_4 = np.random.randint( + 0, + 100, + [ + 200, + ], + ).astype('int32') with fluid.dygraph.guard(): tensor_3 = fluid.dygraph.to_variable(input_3) @@ -181,14 +204,21 @@ class TestMeshgridOp7(unittest.TestCase): class TestMeshgridOp8(unittest.TestCase): - def test_api_with_dygraph_tuple_input(self): - input_3 = np.random.randint(0, 100, [ + input_3 = np.random.randint( + 0, + 100, + [ + 100, + ], + ).astype('int32') + input_4 = np.random.randint( + 0, 100, - ]).astype('int32') - input_4 = np.random.randint(0, 100, [ - 200, - ]).astype('int32') + [ + 200, + ], + ).astype('int32') with fluid.dygraph.guard(): tensor_3 = fluid.dygraph.to_variable(input_3) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_momentum_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_momentum_op_mlu.py index 542675ada9f4b9241a1a9af8acd444e9aae9f1f7..8efc129b68885b9b8ff3f38d1a19b555c1702fb9 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_momentum_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_momentum_op_mlu.py @@ -29,7 +29,6 @@ paddle.enable_static() class TestMomentumOp1(OpTest): - def setUp(self): self.op_type = "momentum" self.dtype = np.float32 @@ -47,7 +46,7 @@ class TestMomentumOp1(OpTest): 'Param': param, 'Grad': grad, 'Velocity': velocity, - 'LearningRate': learning_rate + 'LearningRate': learning_rate, } self.attrs = {'mu': mu} @@ -58,7 +57,8 @@ class TestMomentumOp1(OpTest): mu=mu, velocity=velocity, use_nesterov=use_nesterov, - learning_rate=learning_rate) + learning_rate=learning_rate, + ) self.outputs = {'ParamOut': param_out, 'VelocityOut': velocity_out} @@ -74,7 +74,6 @@ class TestMomentumOp1(OpTest): class TestMomentumOpFp16(TestMomentumOp1): - def init_dtype(self): self.dtype = np.float16 @@ -83,8 +82,7 @@ class TestMomentumOpFp16(TestMomentumOp1): class TestMomentumOp2(OpTest): - '''Test Momentum with default values for attributes - ''' + '''Test Momentum with default values for attributes''' def setUp(self): self.op_type = "momentum" @@ -102,7 +100,7 @@ class TestMomentumOp2(OpTest): 'Param': param, 'Grad': grad, 'Velocity': velocity, - 'LearningRate': learning_rate + 'LearningRate': learning_rate, } self.attrs = {'mu': mu, 'use_nesterov': use_nesterov} @@ -113,7 +111,8 @@ class TestMomentumOp2(OpTest): mu=mu, velocity=velocity, use_nesterov=use_nesterov, - learning_rate=learning_rate) + learning_rate=learning_rate, + ) self.outputs = {'ParamOut': param_out, 'VelocityOut': velocity_out} @@ -122,16 +121,15 @@ class TestMomentumOp2(OpTest): class TestMomentumV2(unittest.TestCase): - def test_momentum_dygraph(self): paddle.disable_static() value = np.arange(26).reshape(2, 13).astype("float32") a = paddle.to_tensor(value) linear = paddle.nn.Linear(13, 5) # This can be any optimizer supported by dygraph. - adam = paddle.optimizer.Momentum(learning_rate=0.01, - momentum=0.9, - parameters=linear.parameters()) + adam = paddle.optimizer.Momentum( + learning_rate=0.01, momentum=0.9, parameters=linear.parameters() + ) out = linear(a) out.backward() adam.step() @@ -148,13 +146,15 @@ class TestMomentumV2(unittest.TestCase): cost = fluid.layers.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) - rms_optimizer = paddle.optimizer.Momentum(learning_rate=0.1, - momentum=0.9) + rms_optimizer = paddle.optimizer.Momentum( + learning_rate=0.1, momentum=0.9 + ) rms_optimizer.minimize(avg_cost) fetch_list = [avg_cost] - train_reader = paddle.batch(paddle.dataset.uci_housing.train(), - batch_size=1) + train_reader = paddle.batch( + paddle.dataset.uci_housing.train(), batch_size=1 + ) feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) @@ -162,14 +162,13 @@ class TestMomentumV2(unittest.TestCase): exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) def test_raise_error(self): - self.assertRaises(ValueError, - paddle.optimizer.Momentum, - learning_rate=None) + self.assertRaises( + ValueError, paddle.optimizer.Momentum, learning_rate=None + ) self.assertRaises(ValueError, paddle.optimizer.Momentum, momentum=None) class TestMomentumOpWithDecay(OpTest): - def setUp(self): self.op_type = "momentum" self.place = paddle.device.MLUPlace(0) @@ -193,14 +192,14 @@ class TestMomentumOpWithDecay(OpTest): 'Param': param, 'Grad': grad, 'Velocity': velocity, - 'LearningRate': learning_rate + 'LearningRate': learning_rate, } self.attrs = { 'mu': mu, 'use_nesterov': use_nesterov, 'regularization_method': regularization_method, - 'regularization_coeff': regularization_coeff + 'regularization_coeff': regularization_coeff, } grad = grad + regularization_coeff * param @@ -211,7 +210,8 @@ class TestMomentumOpWithDecay(OpTest): mu=mu, velocity=velocity, use_nesterov=use_nesterov, - learning_rate=learning_rate) + learning_rate=learning_rate, + ) self.outputs = {'ParamOut': param_out, 'VelocityOut': velocity_out} @@ -224,7 +224,6 @@ class TestMomentumOpWithDecay(OpTest): class TestMomentumOpWithDecayFP16(TestMomentumOpWithDecay): - def init_config(self): self.dtype = np.float16 @@ -233,13 +232,11 @@ class TestMomentumOpWithDecayFP16(TestMomentumOpWithDecay): class TestMomentumOpWithDecay2(TestMomentumOpWithDecay): - def init_config(self): self.use_nesterov = False class TestMomentumOpWithDecayAPI(unittest.TestCase): - def _test_momentum_dygraph_common(self, regularization): paddle.disable_static() inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") @@ -252,13 +249,16 @@ class TestMomentumOpWithDecayAPI(unittest.TestCase): learning_rate=0.01, momentum=0.9, parameter_list=linear.parameters(), - regularization=regularization) + regularization=regularization, + ) momentum.minimize(loss) def test_momentum_dygraph_1(self): self._test_momentum_dygraph_common( regularization=paddle.fluid.regularizer.L2Decay( - regularization_coeff=0.1)) + regularization_coeff=0.1 + ) + ) def test_momentum_static(self): paddle.enable_static() @@ -272,12 +272,14 @@ class TestMomentumOpWithDecayAPI(unittest.TestCase): avg_cost = paddle.mean(cost) momentum_optimizer = paddle.fluid.contrib.optimizer.Momentum( - learning_rate=0.1, momentum=0.9) + learning_rate=0.1, momentum=0.9 + ) momentum_optimizer.minimize(avg_cost) fetch_list = [avg_cost] - train_reader = paddle.batch(paddle.dataset.uci_housing.train(), - batch_size=1) + train_reader = paddle.batch( + paddle.dataset.uci_housing.train(), batch_size=1 + ) feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) @@ -286,23 +288,23 @@ class TestMomentumOpWithDecayAPI(unittest.TestCase): class TestFusedMomentumWithDecayAPI(unittest.TestCase): - def get_program(self, weight_attr, bias_attr=False): main_program = paddle.static.Program() startup_program = paddle.static.Program() - with paddle.static.program_guard(main_program=main_program, - startup_program=startup_program): + with paddle.static.program_guard( + main_program=main_program, startup_program=startup_program + ): x = paddle.static.data(name='x', shape=[10, 10]) - linear = paddle.nn.Linear(10, - 10, - weight_attr=weight_attr, - bias_attr=bias_attr) + linear = paddle.nn.Linear( + 10, 10, weight_attr=weight_attr, bias_attr=bias_attr + ) out = linear(x) loss = paddle.mean(out) optimizer = paddle.optimizer.Momentum( learning_rate=0.01, momentum=0.9, - weight_decay=paddle.regularizer.L2Decay(0.5)) + weight_decay=paddle.regularizer.L2Decay(0.5), + ) optimizer.minimize(loss) return main_program @@ -311,7 +313,8 @@ class TestFusedMomentumWithDecayAPI(unittest.TestCase): weight_attr = paddle.ParamAttr( name="weight", initializer=paddle.nn.initializer.Constant(value=0.5), - regularizer=paddle.regularizer.L2Decay(0.1)) + regularizer=paddle.regularizer.L2Decay(0.1), + ) program = self.get_program(weight_attr, bias_attr=False) ops = program.global_block().ops @@ -326,11 +329,13 @@ class TestFusedMomentumWithDecayAPI(unittest.TestCase): weight_attr = paddle.ParamAttr( name="weight", initializer=paddle.nn.initializer.Constant(value=0.5), - regularizer=paddle.regularizer.L1Decay(0.1)) + regularizer=paddle.regularizer.L1Decay(0.1), + ) bias_attr = paddle.ParamAttr( name="bias", - initializer=paddle.nn.initializer.Constant(value=0.), - regularizer=None) + initializer=paddle.nn.initializer.Constant(value=0.0), + regularizer=None, + ) program = self.get_program(weight_attr, bias_attr) ops = program.global_block().ops @@ -345,8 +350,9 @@ class TestFusedMomentumWithDecayAPI(unittest.TestCase): self.assertEqual(ops[-1].attr('regularization_coeff'), 0) if 'bias' in ops[-2].input('Param'): self.assertEqual(ops[-2].attr('regularization_method'), 'l2_decay') - self.assertEqual(ops[-2].attr('regularization_coeff'), - np.float32(0.5)) + self.assertEqual( + ops[-2].attr('regularization_coeff'), np.float32(0.5) + ) def test_param_has_no_regularizer(self): paddle.enable_static() @@ -360,11 +366,11 @@ class TestFusedMomentumWithDecayAPI(unittest.TestCase): class TestMomentumOpVsMomentumOpWithDecayAPI(unittest.TestCase): - def __update_params(self, momentum, linear): for i in range(10): - inp = paddle.full(shape=[2, 2], fill_value=i, - dtype='float32').astype("float32") + inp = paddle.full( + shape=[2, 2], fill_value=i, dtype='float32' + ).astype("float32") inp = paddle.to_tensor(inp) out = linear(inp) loss = paddle.mean(out) @@ -379,32 +385,39 @@ class TestMomentumOpVsMomentumOpWithDecayAPI(unittest.TestCase): 2, 2, weight_attr=paddle.nn.initializer.Constant(value=2.0), - bias_attr=paddle.nn.initializer.Constant(value=2.0)) + bias_attr=paddle.nn.initializer.Constant(value=2.0), + ) momentum_old = paddle.fluid.optimizer.Momentum( learning_rate=0.01, momentum=0.9, parameter_list=linear_old.parameters(), regularization=paddle.fluid.regularizer.L2Decay( - regularization_coeff=0.1)) + regularization_coeff=0.1 + ), + ) self.__update_params(momentum=momentum_old, linear=linear_old) linear_new = paddle.nn.Linear( 2, 2, weight_attr=paddle.nn.initializer.Constant(value=2.0), - bias_attr=paddle.nn.initializer.Constant(value=2.0)) + bias_attr=paddle.nn.initializer.Constant(value=2.0), + ) momentum_new = paddle.fluid.contrib.optimizer.Momentum( learning_rate=0.01, momentum=0.9, parameter_list=linear_new.parameters(), regularization=paddle.fluid.regularizer.L2Decay( - regularization_coeff=0.1)) + regularization_coeff=0.1 + ), + ) self.__update_params(momentum=momentum_new, linear=linear_new) self.assertEqual( (linear_old.weight.numpy() == linear_new.weight.numpy()).all(), True, - 'the param weight updated by two Momentum optimizers should equal') + 'the param weight updated by two Momentum optimizers should equal', + ) def test_vs(self, place=fluid.MLUPlace(0)): places = [fluid.MLUPlace(0)] @@ -413,7 +426,6 @@ class TestMomentumOpVsMomentumOpWithDecayAPI(unittest.TestCase): class TestMomentumV2Group(TestMomentumV2): - def test_momentum_dygraph(self): paddle.disable_static() value = np.arange(26).reshape(2, 13).astype("float32") @@ -421,22 +433,20 @@ class TestMomentumV2Group(TestMomentumV2): linear_1 = paddle.nn.Linear(13, 5) linear_2 = paddle.nn.Linear(5, 3) # This can be any optimizer supported by dygraph. - adam = paddle.optimizer.Momentum(learning_rate=0.01, - parameters=[{ - 'params': - linear_1.parameters() - }, { - 'params': - linear_2.parameters(), - 'weight_decay': - 0.001, - 'learning_rate': - 0.1, - 'momentum': - 0.99 - }], - weight_decay=0.1, - momentum=0.9) + adam = paddle.optimizer.Momentum( + learning_rate=0.01, + parameters=[ + {'params': linear_1.parameters()}, + { + 'params': linear_2.parameters(), + 'weight_decay': 0.001, + 'learning_rate': 0.1, + 'momentum': 0.99, + }, + ], + weight_decay=0.1, + momentum=0.9, + ) out = linear_1(a) out = linear_2(out) out.backward() @@ -445,13 +455,14 @@ class TestMomentumV2Group(TestMomentumV2): class TestMultiTensorMomentumDygraph(unittest.TestCase): - - def _momentum_optimize_dygraph(self, - place, - use_param_attr=False, - use_param_group=False, - use_amp=False, - use_multi_tensor=False): + def _momentum_optimize_dygraph( + self, + place, + use_param_attr=False, + use_param_group=False, + use_amp=False, + use_multi_tensor=False, + ): paddle.disable_static() paddle.seed(10) paddle.set_device(place) @@ -459,7 +470,8 @@ class TestMultiTensorMomentumDygraph(unittest.TestCase): weight_attr = paddle.ParamAttr( learning_rate=0.5, regularizer=paddle.regularizer.L2Decay(1.0), - trainable=True) + trainable=True, + ) if use_param_attr: model = paddle.nn.Linear(5, 5, weight_attr) else: @@ -468,17 +480,21 @@ class TestMultiTensorMomentumDygraph(unittest.TestCase): optimizer = paddle.optimizer.Momentum( parameters=model.parameters(), use_multi_tensor=use_multi_tensor, - multi_precision=use_amp) + multi_precision=use_amp, + ) else: optimizer = paddle.optimizer.Momentum( - parameters=[{ - 'params': model.parameters(), - 'weight_decay': 0.001, - 'learning_rate': 0.1, - 'momentum': 0.99 - }], + parameters=[ + { + 'params': model.parameters(), + 'weight_decay': 0.001, + 'learning_rate': 0.1, + 'momentum': 0.99, + } + ], use_multi_tensor=use_multi_tensor, - multi_precision=use_amp) + multi_precision=use_amp, + ) for idx in range(5): if place == 'mlu' and use_amp == True: model = paddle.amp.decorate(models=model, level='O2') @@ -506,9 +522,11 @@ class TestMultiTensorMomentumDygraph(unittest.TestCase): def _check_with_place_amp(self, place, use_amp): output1, params1 = self._momentum_optimize_dygraph( - place=place, use_amp=use_amp, use_multi_tensor=True) + place=place, use_amp=use_amp, use_multi_tensor=True + ) output2, params2 = self._momentum_optimize_dygraph( - place=place, use_amp=use_amp, use_multi_tensor=False) + place=place, use_amp=use_amp, use_multi_tensor=False + ) np.testing.assert_allclose(output1, output2, rtol=1e-05) for idx in range(len(params1)): np.testing.assert_allclose(params1[idx], params2[idx], rtol=1e-05) @@ -518,12 +536,14 @@ class TestMultiTensorMomentumDygraph(unittest.TestCase): place=place, use_amp=use_amp, use_param_attr=True, - use_multi_tensor=True) + use_multi_tensor=True, + ) output2, params2 = self._momentum_optimize_dygraph( place=place, use_amp=use_amp, use_param_attr=True, - use_multi_tensor=False) + use_multi_tensor=False, + ) np.testing.assert_allclose(output1, output2, rtol=1e-05) for idx in range(len(params1)): np.testing.assert_allclose(params1[idx], params2[idx], rtol=1e-05) @@ -533,12 +553,14 @@ class TestMultiTensorMomentumDygraph(unittest.TestCase): place=place, use_amp=use_amp, use_param_group=True, - use_multi_tensor=True) + use_multi_tensor=True, + ) output2, params2 = self._momentum_optimize_dygraph( place=place, use_amp=use_amp, use_param_group=True, - use_multi_tensor=False) + use_multi_tensor=False, + ) np.testing.assert_allclose(output1, output2, rtol=1e-05) for idx in range(len(params1)): np.testing.assert_allclose(params1[idx], params2[idx], rtol=1e-05) @@ -554,11 +576,9 @@ class TestMultiTensorMomentumDygraph(unittest.TestCase): class TestMultiTensorMomentumStatic(unittest.TestCase): - - def _momentum_optimize_static(self, - place, - use_amp=False, - use_multi_tensor=False): + def _momentum_optimize_static( + self, place, use_amp=False, use_multi_tensor=False + ): paddle.enable_static() paddle.seed(10) np.random.seed(10) @@ -567,24 +587,26 @@ class TestMultiTensorMomentumStatic(unittest.TestCase): exe = paddle.static.Executor(place=paddle.device.MLUPlace(0)) train_program = paddle.static.Program() startup_program = paddle.static.Program() - optimizer = paddle.optimizer.Momentum(multi_precision=use_amp, - use_multi_tensor=use_multi_tensor) + optimizer = paddle.optimizer.Momentum( + multi_precision=use_amp, use_multi_tensor=use_multi_tensor + ) if use_amp: optimizer = paddle.static.amp.decorate( optimizer, init_loss_scaling=128.0, use_dynamic_loss_scaling=True, use_pure_fp16=True, - use_fp16_guard=False) + use_fp16_guard=False, + ) with paddle.static.program_guard(train_program, startup_program): if use_amp: - data = paddle.static.data(shape=[2, 2], - name='X', - dtype='float16') + data = paddle.static.data( + shape=[2, 2], name='X', dtype='float16' + ) else: - data = paddle.static.data(shape=[2, 2], - name='X', - dtype='float32') + data = paddle.static.data( + shape=[2, 2], name='X', dtype='float32' + ) hidden = paddle.static.nn.fc(x=data, size=10) loss = paddle.mean(hidden) optimizer.minimize(loss) @@ -596,9 +618,9 @@ class TestMultiTensorMomentumStatic(unittest.TestCase): x = numpy.random.random(size=(2, 2)).astype('float32') out = [] for idx in range(5): - loss_data, = exe.run(train_program, - feed={"X": x}, - fetch_list=[loss.name]) + (loss_data,) = exe.run( + train_program, feed={"X": x}, fetch_list=[loss.name] + ) out.append(loss_data) return out @@ -607,12 +629,12 @@ class TestMultiTensorMomentumStatic(unittest.TestCase): return places def _check_with_place_amp(self, place, use_amp): - output1 = self._momentum_optimize_static(place=place, - use_amp=use_amp, - use_multi_tensor=True) - output2 = self._momentum_optimize_static(place=place, - use_amp=use_amp, - use_multi_tensor=False) + output1 = self._momentum_optimize_static( + place=place, use_amp=use_amp, use_multi_tensor=True + ) + output2 = self._momentum_optimize_static( + place=place, use_amp=use_amp, use_multi_tensor=False + ) for idx in range(len(output1)): np.testing.assert_allclose(output1[idx], output2[idx], rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_nearest_interp_v2_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_nearest_interp_v2_op_mlu.py index 6cf320d56b1449ceaa91eea53fc417f2509443eb..1ab286740ef2d38425ee98d5536b08df5cb25e4f 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_nearest_interp_v2_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_nearest_interp_v2_op_mlu.py @@ -27,15 +27,17 @@ from paddle.nn.functional import interpolate paddle.enable_static() -def nearest_neighbor_interp_np(X, - out_h, - out_w, - scale_h=0, - scale_w=0, - out_size=None, - actual_shape=None, - align_corners=True, - data_layout='NCHW'): +def nearest_neighbor_interp_np( + X, + out_h, + out_w, + scale_h=0, + scale_w=0, + out_size=None, + actual_shape=None, + align_corners=True, + data_layout='NCHW', +): """nearest neighbor interpolation implement in shape [N, C, H, W]""" if data_layout == "NHWC": X = np.transpose(X, (0, 3, 1, 2)) # NHWC => NCHW @@ -48,16 +50,16 @@ def nearest_neighbor_interp_np(X, n, c, in_h, in_w = X.shape ratio_h = ratio_w = 0.0 - if (out_h > 1): - if (align_corners): + if out_h > 1: + if align_corners: ratio_h = (in_h - 1.0) / (out_h - 1.0) else: if scale_h > 0: ratio_h = 1.0 / scale_h else: ratio_h = 1.0 * in_h / out_h - if (out_w > 1): - if (align_corners): + if out_w > 1: + if align_corners: ratio_w = (in_w - 1.0) / (out_w - 1.0) else: if scale_w > 0: @@ -85,17 +87,19 @@ def nearest_neighbor_interp_np(X, return out.astype(X.dtype) -def nearest_neighbor_interp3d_np(X, - out_d, - out_h, - out_w, - scale_d=0, - scale_h=0, - scale_w=0, - out_size=None, - actual_shape=None, - align_corners=True, - data_layout='NCHW'): +def nearest_neighbor_interp3d_np( + X, + out_d, + out_h, + out_w, + scale_d=0, + scale_h=0, + scale_w=0, + out_size=None, + actual_shape=None, + align_corners=True, + data_layout='NCHW', +): """nearest neighbor interpolation implement in shape [N, C, H, W]""" if data_layout == "NHWC": X = np.transpose(X, (0, 4, 1, 2, 3)) # NDHWC => NCDHW @@ -110,24 +114,24 @@ def nearest_neighbor_interp3d_np(X, n, c, in_d, in_h, in_w = X.shape ratio_d = ratio_h = ratio_w = 0.0 - if (out_d > 1): - if (align_corners): + if out_d > 1: + if align_corners: ratio_d = (in_d - 1.0) / (out_d - 1.0) else: if scale_d > 0: ratio_d = 1.0 / scale_d else: ratio_d = 1.0 * in_d / out_d - if (out_h > 1): - if (align_corners): + if out_h > 1: + if align_corners: ratio_h = (in_h - 1.0) / (out_h - 1.0) else: if scale_h > 0: ratio_h = 1.0 / scale_h else: ratio_h = 1.0 * in_h / out_h - if (out_w > 1): - if (align_corners): + if out_w > 1: + if align_corners: ratio_w = (in_w - 1.0) / (out_w - 1.0) else: if scale_w > 0: @@ -159,7 +163,6 @@ def nearest_neighbor_interp3d_np(X, class TestNearestInterpOp(OpTest): - def setUp(self): self.place = paddle.device.MLUPlace(0) self.__class__.use_mlu = True @@ -216,15 +219,30 @@ class TestNearestInterpOp(OpTest): if len(self.input_shape) == 4: output_np = nearest_neighbor_interp_np( - input_np, out_h, out_w, scale_h, scale_w, self.out_size, - self.actual_shape, self.align_corners, self.data_layout) + input_np, + out_h, + out_w, + scale_h, + scale_w, + self.out_size, + self.actual_shape, + self.align_corners, + self.data_layout, + ) elif len(self.input_shape) == 5: - output_np = nearest_neighbor_interp3d_np(input_np, out_d, out_h, - out_w, scale_d, scale_h, - scale_w, self.out_size, - self.actual_shape, - self.align_corners, - self.data_layout) + output_np = nearest_neighbor_interp3d_np( + input_np, + out_d, + out_h, + out_w, + scale_d, + scale_h, + scale_w, + self.out_size, + self.actual_shape, + self.align_corners, + self.data_layout, + ) self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size @@ -237,7 +255,7 @@ class TestNearestInterpOp(OpTest): 'out_w': self.out_w, 'interp_method': self.interp_method, 'align_corners': self.align_corners, - 'data_layout': self.data_layout + 'data_layout': self.data_layout, } else: self.attrs = { @@ -245,7 +263,7 @@ class TestNearestInterpOp(OpTest): 'out_w': self.out_w, 'interp_method': self.interp_method, 'align_corners': self.align_corners, - 'data_layout': self.data_layout + 'data_layout': self.data_layout, } if self.scale: if isinstance(self.scale, float) or isinstance(self.scale, int): @@ -267,7 +285,7 @@ class TestNearestInterpOp(OpTest): self.input_shape = [2, 3, 4, 5] self.out_h = 2 self.out_w = 2 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([3, 3]).astype("int32") self.align_corners = True @@ -285,119 +303,108 @@ class TestNearestInterpOp(OpTest): class TestNearestNeighborInterpCase2(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 3, 9, 6] self.out_h = 12 self.out_w = 12 - self.scale = 0. + self.scale = 0.0 self.align_corners = True class TestNearestNeighborInterpCase3(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [1, 1, 32, 64] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.align_corners = True class TestNearestNeighborInterpCase4(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [4, 1, 7, 8] self.out_h = 1 self.out_w = 1 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([2, 2]).astype("int32") self.align_corners = True class TestNearestNeighborInterpCase5(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 3, 9, 6] self.out_h = 12 self.out_w = 12 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([11, 11]).astype("int32") self.align_corners = True class TestNearestNeighborInterpCase6(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [1, 1, 32, 64] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([65, 129]).astype("int32") self.align_corners = True class TestNearestNeighborInterpSame(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [2, 3, 32, 64] self.out_h = 32 self.out_w = 64 - self.scale = 0. + self.scale = 0.0 self.align_corners = True class TestNearestNeighborInterpActualShape(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 2, 32, 16] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([66, 40]).astype("int32") self.align_corners = True class TestNearestNeighborInterpDataLayout(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [2, 4, 4, 5] self.out_h = 2 self.out_w = 2 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([3, 8]).astype("int32") self.align_corners = True self.data_layout = "NHWC" class TestNearestInterpWithoutCorners(TestNearestInterpOp): - def set_align_corners(self): self.align_corners = False class TestNearestNeighborInterpScale1(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 2, 7, 5] self.out_h = 64 self.out_w = 32 - self.scale = 2. + self.scale = 2.0 self.out_size = np.array([66, 40]).astype("int32") self.align_corners = True class TestNearestNeighborInterpScale2(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 2, 5, 7] @@ -409,7 +416,6 @@ class TestNearestNeighborInterpScale2(TestNearestInterpOp): class TestNearestNeighborInterpScale3(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 2, 7, 5] @@ -421,7 +427,6 @@ class TestNearestNeighborInterpScale3(TestNearestInterpOp): class TestNearestInterpOp_attr_tensor(OpTest): - def setUp(self): self.place = paddle.device.MLUPlace(0) self.__class__.use_mlu = True @@ -461,8 +466,9 @@ class TestNearestInterpOp_attr_tensor(OpTest): elif self.out_size is not None: size_tensor = [] for index, ele in enumerate(self.out_size): - size_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + size_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs['SizeTensor'] = size_tensor self.attrs['out_h'] = self.out_h @@ -474,9 +480,16 @@ class TestNearestInterpOp_attr_tensor(OpTest): if isinstance(self.scale, list) and len(self.scale) == 1: self.scale = [self.scale[0], self.scale[0]] self.attrs['scale'] = self.scale - output_np = nearest_neighbor_interp_np(input_np, out_h, out_w, 0, 0, - self.out_size, self.actual_shape, - self.align_corners) + output_np = nearest_neighbor_interp_np( + input_np, + out_h, + out_w, + 0, + 0, + self.out_size, + self.actual_shape, + self.align_corners, + ) self.outputs = {'Out': output_np} def test_check_output(self): @@ -490,33 +503,31 @@ class TestNearestInterpOp_attr_tensor(OpTest): self.input_shape = [2, 5, 4, 4] self.out_h = 3 self.out_w = 3 - self.scale = 0. + self.scale = 0.0 self.out_size = [3, 3] self.align_corners = True # out_size is a tensor list class TestNearestInterp_attr_tensor_Case1(TestNearestInterpOp_attr_tensor): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 3, 9, 6] self.out_h = 12 self.out_w = 12 - self.scale = 0. + self.scale = 0.0 self.out_size = [8, 12] self.align_corners = True # out_size is a 1-D tensor class TestNearestInterp_attr_tensor_Case2(TestNearestInterpOp_attr_tensor): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 2, 32, 16] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([66, 40]).astype("int32") self.align_corners = True self.shape_by_1Dtensor = True @@ -524,7 +535,6 @@ class TestNearestInterp_attr_tensor_Case2(TestNearestInterpOp_attr_tensor): # scale is a 1-D tensor class TestNearestInterp_attr_tensor_Case3(TestNearestInterpOp_attr_tensor): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 2, 32, 16] @@ -537,7 +547,6 @@ class TestNearestInterp_attr_tensor_Case3(TestNearestInterpOp_attr_tensor): class TestNearestAPI(unittest.TestCase): - def test_case(self): x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") y = fluid.data(name="y", shape=[2, 6, 6, 3], dtype="float32") @@ -545,27 +554,25 @@ class TestNearestAPI(unittest.TestCase): dim = fluid.data(name="dim", shape=[1], dtype="int32") shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32") actual_size = fluid.data(name="actual_size", shape=[2], dtype="int32") - scale_tensor = fluid.data(name="scale_tensor", - shape=[1], - dtype="float32") - - out1 = fluid.layers.resize_nearest(y, - out_shape=[12, 12], - data_format='NHWC', - align_corners=False) - out2 = fluid.layers.resize_nearest(x, - out_shape=[12, dim], - align_corners=False) - out3 = fluid.layers.resize_nearest(x, - out_shape=shape_tensor, - align_corners=False) - out4 = fluid.layers.resize_nearest(x, - out_shape=[4, 4], - actual_shape=actual_size, - align_corners=False) - out5 = fluid.layers.resize_nearest(x, - scale=scale_tensor, - align_corners=False) + scale_tensor = fluid.data( + name="scale_tensor", shape=[1], dtype="float32" + ) + + out1 = fluid.layers.resize_nearest( + y, out_shape=[12, 12], data_format='NHWC', align_corners=False + ) + out2 = fluid.layers.resize_nearest( + x, out_shape=[12, dim], align_corners=False + ) + out3 = fluid.layers.resize_nearest( + x, out_shape=shape_tensor, align_corners=False + ) + out4 = fluid.layers.resize_nearest( + x, out_shape=[4, 4], actual_shape=actual_size, align_corners=False + ) + out5 = fluid.layers.resize_nearest( + x, scale=scale_tensor, align_corners=False + ) x_data = np.random.random((2, 3, 6, 6)).astype("float32") dim_data = np.array([12]).astype("int32") @@ -576,39 +583,41 @@ class TestNearestAPI(unittest.TestCase): place = paddle.MLUPlace(0) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - results = exe.run(fluid.default_main_program(), - feed={ - "x": x_data, - "y": np.transpose(x_data, (0, 2, 3, 1)), - "dim": dim_data, - "shape_tensor": shape_data, - "actual_size": actual_size_data, - "scale_tensor": scale_data - }, - fetch_list=[out1, out2, out3, out4, out5], - return_numpy=True) - - expect_res = nearest_neighbor_interp_np(x_data, - out_h=12, - out_w=12, - align_corners=False) - np.testing.assert_allclose(results[0], - np.transpose(expect_res, (0, 2, 3, 1))) + results = exe.run( + fluid.default_main_program(), + feed={ + "x": x_data, + "y": np.transpose(x_data, (0, 2, 3, 1)), + "dim": dim_data, + "shape_tensor": shape_data, + "actual_size": actual_size_data, + "scale_tensor": scale_data, + }, + fetch_list=[out1, out2, out3, out4, out5], + return_numpy=True, + ) + + expect_res = nearest_neighbor_interp_np( + x_data, out_h=12, out_w=12, align_corners=False + ) + np.testing.assert_allclose( + results[0], np.transpose(expect_res, (0, 2, 3, 1)) + ) for i in range(len(results) - 1): np.testing.assert_allclose(results[i + 1], expect_res) class TestNearestInterpException(unittest.TestCase): - def test_exception(self): import paddle + input = fluid.data(name="input", shape=[1, 3, 6, 6], dtype="float32") def attr_data_format(): # for 4-D input, data_format can only be NCHW or NHWC - out = fluid.layers.resize_nearest(input, - out_shape=[4, 8], - data_format='NDHWC') + out = fluid.layers.resize_nearest( + input, out_shape=[4, 8], data_format='NDHWC' + ) def attr_scale_type(): out = fluid.layers.resize_nearest(input, scale='scale') @@ -622,9 +631,9 @@ class TestNearestInterpException(unittest.TestCase): def mode_error(): x = paddle.randn([1, 3]) - out = paddle.nn.functional.interpolate(x, - scale_factor='scale', - mode="BILINEAR") + out = paddle.nn.functional.interpolate( + x, scale_factor='scale', mode="BILINEAR" + ) self.assertRaises(ValueError, attr_data_format) self.assertRaises(TypeError, attr_scale_type) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_one_hot_v2_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_one_hot_v2_op_mlu.py index cc6db215713767ddbe1675a6a32e8311d4c7a986..3605ab254168dcb06c9d5073c61e9be9cc4189bc 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_one_hot_v2_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_one_hot_v2_op_mlu.py @@ -29,7 +29,6 @@ paddle.enable_static() class TestOneHotOp(OpTest): - def setUp(self): self.place = paddle.device.MLUPlace(0) self.__class__.use_mlu = True @@ -55,7 +54,6 @@ class TestOneHotOp(OpTest): class TestOneHotOp_attr(OpTest): - def setUp(self): self.place = paddle.device.MLUPlace(0) self.__class__.use_mlu = True @@ -66,8 +64,9 @@ class TestOneHotOp_attr(OpTest): x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1]) - out = np.zeros(shape=(np.product(x.shape[:-1]), 1, - depth)).astype('float32') + out = np.zeros(shape=(np.product(x.shape[:-1]), 1, depth)).astype( + 'float32' + ) for i in range(np.product(x.shape)): out[i, 0, x[i]] = 1.0 @@ -83,7 +82,6 @@ class TestOneHotOp_attr(OpTest): class TestOneHotOp_default_dtype(OpTest): - def setUp(self): self.place = paddle.device.MLUPlace(0) self.__class__.use_mlu = True @@ -109,7 +107,6 @@ class TestOneHotOp_default_dtype(OpTest): class TestOneHotOp_default_dtype_attr(OpTest): - def setUp(self): self.place = paddle.device.MLUPlace(0) self.__class__.use_mlu = True @@ -120,8 +117,9 @@ class TestOneHotOp_default_dtype_attr(OpTest): x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1]) - out = np.zeros(shape=(np.product(x.shape[:-1]), 1, - depth)).astype('float32') + out = np.zeros(shape=(np.product(x.shape[:-1]), 1, depth)).astype( + 'float32' + ) for i in range(np.product(x.shape)): out[i, 0, x[i]] = 1.0 @@ -135,7 +133,6 @@ class TestOneHotOp_default_dtype_attr(OpTest): class TestOneHotOp_exception(unittest.TestCase): - def setUp(self): self.place = paddle.device.MLUPlace(0) self.__class__.use_mlu = True @@ -153,30 +150,34 @@ class TestOneHotOp_exception(unittest.TestCase): def test_check_output(self): program = Program() with program_guard(program): - x = fluid.layers.data(name='x', - shape=[self.dimension], - dtype='float32', - lod_level=1) + x = fluid.layers.data( + name='x', shape=[self.dimension], dtype='float32', lod_level=1 + ) block = program.current_block() - one_hot_out = block.create_var(name="one_hot_out", - type=core.VarDesc.VarType.LOD_TENSOR, - dtype='float32') - block.append_op(type='one_hot', - inputs={'X': x}, - attrs={'depth': self.depth}, - outputs={'Out': one_hot_out}) + one_hot_out = block.create_var( + name="one_hot_out", + type=core.VarDesc.VarType.LOD_TENSOR, + dtype='float32', + ) + block.append_op( + type='one_hot', + inputs={'X': x}, + attrs={'depth': self.depth}, + outputs={'Out': one_hot_out}, + ) exe = fluid.Executor(self.place) def run(): - exe.run(feed={'x': self.x}, - fetch_list=[one_hot_out], - return_numpy=False) + exe.run( + feed={'x': self.x}, + fetch_list=[one_hot_out], + return_numpy=False, + ) self.assertRaises(ValueError, run) class TestOneHotOpApi(unittest.TestCase): - def setUp(self): self.place = paddle.device.MLUPlace(0) self.__class__.use_mlu = True @@ -191,14 +192,17 @@ class TestOneHotOpApi(unittest.TestCase): def test_api_with_dygraph(self): depth = 10 - label = np.array([np.random.randint(0, depth - 1) - for i in range(6)]).reshape([6, 1]) + label = np.array( + [np.random.randint(0, depth - 1) for i in range(6)] + ).reshape([6, 1]) with fluid.dygraph.guard(): one_hot_label = fluid.one_hot( - input=fluid.dygraph.to_variable(label), depth=depth) + input=fluid.dygraph.to_variable(label), depth=depth + ) one_hot_label = paddle.nn.functional.one_hot( - fluid.dygraph.to_variable(label), depth) + fluid.dygraph.to_variable(label), depth + ) # with _test_eager_guard(): # one_hot_label = paddle.nn.functional.one_hot( # paddle.to_tensor(label), depth) @@ -207,20 +211,22 @@ class TestOneHotOpApi(unittest.TestCase): label = fluid.layers.data(name="label", shape=[1], dtype="int64") one_hot_label = fluid.one_hot(input=label, depth=depth) - label_data = np.array([np.random.randint(0, 10 - 1) - for i in range(6)]).reshape([6, 1]) + label_data = np.array( + [np.random.randint(0, 10 - 1) for i in range(6)] + ).reshape([6, 1]) exe = fluid.Executor(self.place) exe.run(fluid.default_startup_program()) - ret = exe.run(feed={ - 'label': label_data, - }, - fetch_list=[one_hot_label], - return_numpy=False) + ret = exe.run( + feed={ + 'label': label_data, + }, + fetch_list=[one_hot_label], + return_numpy=False, + ) class BadInputTestOnehotV2(unittest.TestCase): - def setUp(self): self.place = paddle.device.MLUPlace(0) self.__class__.use_mlu = True @@ -229,10 +235,12 @@ class BadInputTestOnehotV2(unittest.TestCase): with fluid.program_guard(fluid.Program()): def test_bad_x(): - label = fluid.layers.data(name="label", - shape=[4], - append_batch_size=False, - dtype="float32") + label = fluid.layers.data( + name="label", + shape=[4], + append_batch_size=False, + dtype="float32", + ) one_hot_label = fluid.one_hot(input=label, depth=4) self.assertRaises(TypeError, test_bad_x) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_parallel_dygraph_sync_batch_norm_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_parallel_dygraph_sync_batch_norm_mlu.py index b42567c5d0c5adf3f493434d99c039daca6ddd99..4d39cd6cdd745025afe9a19249cb1477782a20c7 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_parallel_dygraph_sync_batch_norm_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_parallel_dygraph_sync_batch_norm_mlu.py @@ -31,7 +31,6 @@ print("file: {}".format(flag_name)) class TestParallelDygraphMnistMLU(TestDistBase): - def _setup_config(self): self._sync_mode = False self._cncl_mode = True @@ -49,7 +48,7 @@ class TestParallelDygraphMnistMLU(TestDistBase): "FLAGS_call_stack_level": "2", "GLOG_v": "2", "PADDLE_WITH_GLOO": '0', - "BACKEND": "cncl" + "BACKEND": "cncl", } if check_error_log: @@ -60,14 +59,16 @@ class TestParallelDygraphMnistMLU(TestDistBase): required_envs.update(need_envs) return required_envs - def _run_local(self, - model, - envs, - check_error_log=False, - batch_size=DEFAULT_BATCH_SIZE, - batch_merge_repeat=1, - log_name="", - devices="1"): + def _run_local( + self, + model, + envs, + check_error_log=False, + batch_size=DEFAULT_BATCH_SIZE, + batch_merge_repeat=1, + log_name="", + devices="1", + ): cmd = self._python_interp @@ -75,8 +76,10 @@ class TestParallelDygraphMnistMLU(TestDistBase): envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '') cmd += " -m coverage run --branch -p" - cmd += " %s --role trainer --update_method local --lr %f" % (model, - self._lr) + cmd += " %s --role trainer --update_method local --lr %f" % ( + model, + self._lr, + ) if batch_size != DEFAULT_BATCH_SIZE: cmd += " --batch_size %d" % batch_size @@ -90,7 +93,7 @@ class TestParallelDygraphMnistMLU(TestDistBase): env_local = { "FLAGS_selected_mlus": devices, "PADDLE_TRAINERS_NUM": "1", - "PADDLE_TRAINER_ID": "0" + "PADDLE_TRAINER_ID": "0", } else: env_local = {'CPU_NUM': '1'} @@ -111,30 +114,36 @@ class TestParallelDygraphMnistMLU(TestDistBase): if check_error_log: path = "/tmp/local_err_%d.log" % os.getpid() err_log = open(path, "w") - local_proc = subprocess.Popen(cmd.split(" "), - stdout=subprocess.PIPE, - stderr=err_log, - env=env_local) + local_proc = subprocess.Popen( + cmd.split(" "), + stdout=subprocess.PIPE, + stderr=err_log, + env=env_local, + ) else: - local_proc = subprocess.Popen(cmd.split(" "), - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - env=env_local) + local_proc = subprocess.Popen( + cmd.split(" "), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=env_local, + ) local_out, local_err = local_proc.communicate() if check_error_log: err_log.close() sys.stderr.write( - '\n--run_local-- trainer 0 stderr file saved in: %s\n' % (path)) + '\n--run_local-- trainer 0 stderr file saved in: %s\n' % (path) + ) sys.stderr.write('local_stderr: %s\n' % local_err) sys.stderr.write('local_stdout: %s\n' % pickle.loads(local_out)) return pickle.loads(local_out) - def _run_cluster_nccl2(self, model, envs, update_method, check_error_log, - log_name): + def _run_cluster_nccl2( + self, model, envs, update_method, check_error_log, log_name + ): # NOTE: we reuse ps_endpoints as nccl2 worker endpoints worker_endpoints = self._ps_endpoints.split(",") @@ -144,20 +153,28 @@ class TestParallelDygraphMnistMLU(TestDistBase): pipes = [] for i in range(0, trainer_num): tr_cmd, tr_env = self._get_nccl2_trainer_cmd( - model, worker_endpoints[i], update_method, i, trainer_num) + model, worker_endpoints[i], update_method, i, trainer_num + ) tr_env.update(envs) - print("use_hallreduce:{} \ntr{}_cmd:{}, env: {}".format( - self._use_hallreduce, i, tr_cmd, tr_env)) + print( + "use_hallreduce:{} \ntr{}_cmd:{}, env: {}".format( + self._use_hallreduce, i, tr_cmd, tr_env + ) + ) tr_pipe = open("/tmp/tr%d_err_%d.log" % (i, os.getpid()), "w") sys.stderr.write( "\n{} going to start process {} with nccl2\n".format( - type(self).__name__, i)) - tr_proc = subprocess.Popen(tr_cmd.strip().split(" "), - stdout=subprocess.PIPE, - stderr=tr_pipe, - env=tr_env) + type(self).__name__, i + ) + ) + tr_proc = subprocess.Popen( + tr_cmd.strip().split(" "), + stdout=subprocess.PIPE, + stderr=tr_pipe, + env=tr_env, + ) procs.append(tr_proc) pipes.append(tr_pipe) @@ -170,7 +187,9 @@ class TestParallelDygraphMnistMLU(TestDistBase): sys.stderr.write('trainer {} stderr: {}\n'.format(i, tr_err)) sys.stderr.write( 'trainer {} glog file saved in: /tmp/tr{}_err_{}.log \n'.format( - i, i, os.getpid())) + i, i, os.getpid() + ) + ) if check_error_log: print("outs[0]:", pickle.loads(outs[0])) @@ -184,7 +203,8 @@ class TestParallelDygraphMnistMLU(TestDistBase): os.path.abspath("parallel_dygraph_sync_batch_norm.py"), delta=1e-5, check_error_log=True, - log_name=flag_name) + log_name=flag_name, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/mlu/test_pool2d_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_pool2d_op_mlu.py index e9add5e6a5c39431c84fde3046ca9898e195f89a..57202e62a43330917f26bdc8e4f2382f2753708c 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_pool2d_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_pool2d_op_mlu.py @@ -23,30 +23,40 @@ import sys sys.path.append('..') from op_test import OpTest -from test_pool2d_op import pool2D_forward_naive, avg_pool2D_forward_naive, max_pool2D_forward_naive, adaptive_start_index, adaptive_end_index +from test_pool2d_op import ( + pool2D_forward_naive, + avg_pool2D_forward_naive, + max_pool2D_forward_naive, + adaptive_start_index, + adaptive_end_index, +) paddle.enable_static() -def pool2d_backward_navie(x, - ksize, - strides, - paddings, - global_pool=0, - ceil_mode=False, - exclusive=True, - adaptive=False, - data_format='NCHW', - pool_type="max", - padding_algorithm="EXPLICIT"): +def pool2d_backward_navie( + x, + ksize, + strides, + paddings, + global_pool=0, + ceil_mode=False, + exclusive=True, + adaptive=False, + data_format='NCHW', + pool_type="max", + padding_algorithm="EXPLICIT", +): # update paddings def _get_padding_with_SAME(input_shape, pool_size, pool_stride): padding = [] - for input_size, filter_size, stride_size in zip(input_shape, pool_size, - pool_stride): + for input_size, filter_size, stride_size in zip( + input_shape, pool_size, pool_stride + ): out_size = int((input_size + stride_size - 1) / stride_size) pad_sum = np.max( - ((out_size - 1) * stride_size + filter_size - input_size, 0)) + ((out_size - 1) * stride_size + filter_size - input_size, 0) + ) pad_0 = int(pad_sum / 2) pad_1 = int(pad_sum - pad_0) padding.append(pad_0) @@ -56,9 +66,10 @@ def pool2d_backward_navie(x, if isinstance(padding_algorithm, str): padding_algorithm = padding_algorithm.upper() if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]: - raise ValueError("Unknown Attr(padding_algorithm): '%s'. " - "It can only be 'SAME' or 'VALID'." % - str(padding_algorithm)) + raise ValueError( + "Unknown Attr(padding_algorithm): '%s'. " + "It can only be 'SAME' or 'VALID'." % str(padding_algorithm) + ) if padding_algorithm == "VALID": paddings = [0, 0, 0, 0] @@ -66,7 +77,8 @@ def pool2d_backward_navie(x, raise ValueError( "When Attr(pool_padding) is \"VALID\", Attr(ceil_mode)" " must be False. " - "Received ceil_mode: True.") + "Received ceil_mode: True." + ) elif padding_algorithm == "SAME": input_data_shape = [] if data_format == "NCHW": @@ -95,10 +107,20 @@ def pool2d_backward_navie(x, if adaptive: H_out, W_out = ksize else: - H_out = (H - ksize[0] + pad_h_up + pad_h_down + strides[0] - 1) // strides[0] + 1 \ - if ceil_mode else (H - ksize[0] + pad_h_up + pad_h_down) // strides[0] + 1 - W_out = (W - ksize[1] + pad_w_left + pad_w_right + strides[1] - 1) // strides[1] + 1 \ - if ceil_mode else (W - ksize[1] + pad_w_left + pad_w_right) // strides[1] + 1 + H_out = ( + (H - ksize[0] + pad_h_up + pad_h_down + strides[0] - 1) + // strides[0] + + 1 + if ceil_mode + else (H - ksize[0] + pad_h_up + pad_h_down) // strides[0] + 1 + ) + W_out = ( + (W - ksize[1] + pad_w_left + pad_w_right + strides[1] - 1) + // strides[1] + + 1 + if ceil_mode + else (W - ksize[1] + pad_w_left + pad_w_right) // strides[1] + 1 + ) x_grad = np.zeros_like(x) for i in range(H_out): @@ -126,20 +148,26 @@ def pool2d_backward_navie(x, in_w_end = np.min((in_w_end, W)) if pool_type == 'avg': - if (exclusive or adaptive): - field_size = (in_h_end - in_h_start) * (in_w_end - - in_w_start) - x_grad[:, :, in_h_start:in_h_end, - in_w_start:in_w_end] += 1 / field_size + if exclusive or adaptive: + field_size = (in_h_end - in_h_start) * ( + in_w_end - in_w_start + ) + x_grad[:, :, in_h_start:in_h_end, in_w_start:in_w_end] += ( + 1 / field_size + ) elif pool_type == 'max': for n in range(N): for c in range(C): - idx = np.argmax(x[n, c, in_h_start:in_h_end, - in_w_start:in_w_end].flatten()) + idx = np.argmax( + x[ + n, c, in_h_start:in_h_end, in_w_start:in_w_end + ].flatten() + ) idx_h = idx // (in_w_end - in_w_start) idx_w = idx % (in_w_end - in_w_start) - x_grad[n, c, in_h_start + idx_h, - in_w_start + idx_w] += 1 + x_grad[ + n, c, in_h_start + idx_h, in_w_start + idx_w + ] += 1 if data_format == "NHWC": x_grad = x_grad.transpose([0, 2, 3, 1]) @@ -147,7 +175,6 @@ def pool2d_backward_navie(x, class TestPool2D_Op_Mixin(object): - def setUp(self): self.place = paddle.device.MLUPlace(0) self.__class__.use_mlu = True @@ -165,12 +192,19 @@ class TestPool2D_Op_Mixin(object): self.init_shape() input = np.random.random(self.shape).astype(self.dtype) - output = pool2D_forward_naive(input, self.ksize, self.strides, - self.paddings, self.global_pool, - self.ceil_mode, self.exclusive, - self.adaptive, self.data_format, - self.pool_type, - self.padding_algorithm).astype(self.dtype) + output = pool2D_forward_naive( + input, + self.ksize, + self.strides, + self.paddings, + self.global_pool, + self.ceil_mode, + self.exclusive, + self.adaptive, + self.data_format, + self.pool_type, + self.padding_algorithm, + ).astype(self.dtype) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(input)} self.attrs = { @@ -192,23 +226,27 @@ class TestPool2D_Op_Mixin(object): self.check_output_with_place(self.place) def test_check_grad(self): - x_grad = pool2d_backward_navie(self.inputs["X"], - ksize=self.ksize, - strides=self.strides, - paddings=self.paddings, - global_pool=self.global_pool, - ceil_mode=False, - exclusive=self.exclusive, - adaptive=self.adaptive, - data_format=self.data_format, - pool_type=self.pool_type, - padding_algorithm=self.padding_algorithm) + x_grad = pool2d_backward_navie( + self.inputs["X"], + ksize=self.ksize, + strides=self.strides, + paddings=self.paddings, + global_pool=self.global_pool, + ceil_mode=False, + exclusive=self.exclusive, + adaptive=self.adaptive, + data_format=self.data_format, + pool_type=self.pool_type, + padding_algorithm=self.padding_algorithm, + ) x_grad = x_grad / np.prod(self.outputs['Out'].shape) - self.check_grad_with_place(self.place, - set(['X']), - 'Out', - max_relative_error=0.06, - user_defined_grads=[x_grad]) + self.check_grad_with_place( + self.place, + set(['X']), + 'Out', + max_relative_error=0.06, + user_defined_grads=[x_grad], + ) def init_data_format(self): self.data_format = "NCHW" @@ -249,7 +287,6 @@ class TestPool2D_Op(TestPool2D_Op_Mixin, OpTest): class TestCase1(TestPool2D_Op): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -269,7 +306,6 @@ class TestCase1(TestPool2D_Op): class TestCase2(TestPool2D_Op): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -289,30 +325,25 @@ class TestCase2(TestPool2D_Op): class TestCase3(TestPool2D_Op): - def init_pool_type(self): self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive class TestCase4(TestCase1): - def init_pool_type(self): self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive class TestCase5(TestCase2): - def init_pool_type(self): self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive def create_test_fp16_class(parent): - class TestFp16Case(parent): - def init_data_type(self): self.dtype = np.float16 @@ -332,13 +363,11 @@ create_test_fp16_class(TestCase3) create_test_fp16_class(TestCase4) create_test_fp16_class(TestCase5) -#--------------------test pool2d use ceil mode-------------------- +# --------------------test pool2d use ceil mode-------------------- def create_test_use_ceil_class(parent): - class TestPool2DUseCeilCase(parent): - def init_ceil_mode(self): self.ceil_mode = True @@ -352,19 +381,16 @@ create_test_use_ceil_class(TestCase2) class TestAvgInclude(TestCase2): - def init_exclusive(self): self.exclusive = False class TestAvgPoolAdaptive(TestCase1): - def init_adaptive(self): self.adaptive = True class TestAvgPoolAdaptiveAsyOutSize(TestCase1): - def init_adaptive(self): self.adaptive = True @@ -377,11 +403,10 @@ class TestAvgPoolAdaptiveAsyOutSize(TestCase1): self.paddings = [0, 0, 0, 0] -#-------test pool2d with asymmetric padding----- +# -------test pool2d with asymmetric padding----- class TestPool2D_AsyPadding(TestPool2D_Op): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -392,7 +417,6 @@ class TestPool2D_AsyPadding(TestPool2D_Op): class TestCase1_AsyPadding(TestCase1): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -403,7 +427,6 @@ class TestCase1_AsyPadding(TestCase1): class TestCase2_AsyPadding(TestCase2): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -414,7 +437,6 @@ class TestCase2_AsyPadding(TestCase2): class TestCase3_AsyPadding(TestCase3): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -425,7 +447,6 @@ class TestCase3_AsyPadding(TestCase3): class TestCase4_AsyPadding(TestCase4): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -436,7 +457,6 @@ class TestCase4_AsyPadding(TestCase4): class TestCase5_AsyPadding((TestCase5)): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -451,7 +471,6 @@ create_test_use_ceil_class(TestCase2_AsyPadding) class TestAvgInclude_AsyPadding(TestCase2): - def init_exclusive(self): self.exclusive = False @@ -465,7 +484,6 @@ class TestAvgInclude_AsyPadding(TestCase2): class TestAvgPoolAdaptive_AsyPadding(TestCase1): - def init_adaptive(self): self.adaptive = True @@ -478,9 +496,8 @@ class TestAvgPoolAdaptive_AsyPadding(TestCase1): self.shape = [2, 3, 7, 7] -#----------- test channel_last -------------- +# ----------- test channel_last -------------- class TestPool2D_channel_last(TestPool2D_Op): - def init_data_format(self): self.data_format = "NHWC" @@ -489,7 +506,6 @@ class TestPool2D_channel_last(TestPool2D_Op): class TestCase1_channel_last(TestCase1): - def init_data_format(self): self.data_format = "NHWC" @@ -498,7 +514,6 @@ class TestCase1_channel_last(TestCase1): class TestCase2_channel_last(TestCase2): - def init_data_format(self): self.data_format = "NHWC" @@ -507,7 +522,6 @@ class TestCase2_channel_last(TestCase2): class TestCase3_channel_last(TestCase3): - def init_data_format(self): self.data_format = "NHWC" @@ -516,7 +530,6 @@ class TestCase3_channel_last(TestCase3): class TestCase4_channel_last(TestCase4): - def init_data_format(self): self.data_format = "NHWC" @@ -525,7 +538,6 @@ class TestCase4_channel_last(TestCase4): class TestCase5_channel_last(TestCase5): - def init_data_format(self): self.data_format = "NHWC" @@ -538,13 +550,11 @@ create_test_use_ceil_class(TestCase2_channel_last) class TestCase5_Max(TestCase2): - def init_pool_type(self): self.pool_type = "max" class TestCase5_channel_last_Max(TestCase5_Max): - def init_data_format(self): self.data_format = "NHWC" @@ -553,19 +563,16 @@ class TestCase5_channel_last_Max(TestCase5_Max): class TestAvgInclude_channel_last(TestCase2_channel_last): - def init_exclusive(self): self.exclusive = False class TestAvgPoolAdaptive_channel_last(TestCase1_channel_last): - def init_adaptive(self): self.adaptive = True class TestPool2D_AsyPadding_channel_last(TestPool2D_AsyPadding): - def init_data_format(self): self.data_format = "NHWC" @@ -574,7 +581,6 @@ class TestPool2D_AsyPadding_channel_last(TestPool2D_AsyPadding): class TestCase1_AsyPadding_channel_last(TestCase1_AsyPadding): - def init_data_format(self): self.data_format = "NHWC" @@ -583,7 +589,6 @@ class TestCase1_AsyPadding_channel_last(TestCase1_AsyPadding): class TestCase2_AsyPadding_channel_last(TestCase2_AsyPadding): - def init_data_format(self): self.data_format = "NHWC" @@ -592,7 +597,6 @@ class TestCase2_AsyPadding_channel_last(TestCase2_AsyPadding): class TestCase3_AsyPadding_channel_last(TestCase3_AsyPadding): - def init_data_format(self): self.data_format = "NHWC" @@ -601,7 +605,6 @@ class TestCase3_AsyPadding_channel_last(TestCase3_AsyPadding): class TestCase4_AsyPadding_channel_last(TestCase4_AsyPadding): - def init_data_format(self): self.data_format = "NHWC" @@ -610,7 +613,6 @@ class TestCase4_AsyPadding_channel_last(TestCase4_AsyPadding): class TestCase5_AsyPadding_channel_last(TestCase5_AsyPadding): - def init_data_format(self): self.data_format = "NHWC" @@ -623,7 +625,6 @@ create_test_use_ceil_class(TestCase2_AsyPadding_channel_last) class TestAvgInclude_AsyPadding_channel_last(TestAvgInclude_AsyPadding): - def init_data_format(self): self.data_format = "NHWC" @@ -635,9 +636,7 @@ class TestAvgInclude_AsyPadding_channel_last(TestAvgInclude_AsyPadding): def create_test_padding_SAME_class(parent): - class TestPaddingSMAECase(parent): - def init_paddings(self): self.paddings = [0, 0] self.padding_algorithm = "SAME" @@ -663,9 +662,7 @@ create_test_padding_SAME_class(TestCase5_channel_last) def create_test_padding_VALID_class(parent): - class TestPaddingVALIDCase(parent): - def init_paddings(self): self.paddings = [1, 1] self.padding_algorithm = "VALID" @@ -691,7 +688,6 @@ create_test_padding_VALID_class(TestCase5_channel_last) class TestCase1_strides(TestCase1): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 2] @@ -705,96 +701,121 @@ create_test_padding_SAME_class(TestCase1_strides) # ----- test API class TestPool2DAPI(unittest.TestCase): - def test_api(self): x_NHWC = np.random.random([2, 5, 5, 3]).astype("float32") x_NCHW = np.random.random([2, 3, 5, 5]).astype("float32") - input_NHWC = fluid.layers.data(name="input_NHWC", - shape=[2, 5, 5, 3], - append_batch_size=False, - dtype="float32") - - input_NCHW = fluid.layers.data(name="input_NCHW", - shape=[2, 3, 5, 5], - append_batch_size=False, - dtype="float32") - - input_NHWC_negetive = fluid.layers.data(name="input_NHWC_negetive", - shape=[2, -1, 5, 3], - append_batch_size=False, - dtype="float32") - - input_NCHW_negetive = fluid.layers.data(name="input_NCHW_negetive", - shape=[2, 3, -1, -1], - append_batch_size=False, - dtype="float32") + input_NHWC = fluid.layers.data( + name="input_NHWC", + shape=[2, 5, 5, 3], + append_batch_size=False, + dtype="float32", + ) + + input_NCHW = fluid.layers.data( + name="input_NCHW", + shape=[2, 3, 5, 5], + append_batch_size=False, + dtype="float32", + ) + + input_NHWC_negetive = fluid.layers.data( + name="input_NHWC_negetive", + shape=[2, -1, 5, 3], + append_batch_size=False, + dtype="float32", + ) + + input_NCHW_negetive = fluid.layers.data( + name="input_NCHW_negetive", + shape=[2, 3, -1, -1], + append_batch_size=False, + dtype="float32", + ) ksize = [3, 3] - out_1 = fluid.layers.pool2d(input=input_NHWC, - pool_size=ksize, - pool_type="max", - pool_padding=[1, 1], - data_format="NHWC") - - out_2 = fluid.layers.pool2d(input=input_NHWC, - pool_size=ksize, - pool_type="avg", - pool_padding=[[0, 0], [1, 1], [1, 1], - [0, 0]], - data_format="NHWC") - - out_3 = fluid.layers.pool2d(input=input_NCHW, - pool_size=ksize, - pool_type="avg", - pool_padding=[[0, 0], [0, 0], [1, 1], - [1, 1]], - data_format="NCHW") - - out_4 = fluid.layers.pool2d(input=input_NCHW, - pool_size=ksize, - pool_type="avg", - pool_padding=[1, 2, 1, 0], - data_format="NCHW") + out_1 = fluid.layers.pool2d( + input=input_NHWC, + pool_size=ksize, + pool_type="max", + pool_padding=[1, 1], + data_format="NHWC", + ) + + out_2 = fluid.layers.pool2d( + input=input_NHWC, + pool_size=ksize, + pool_type="avg", + pool_padding=[[0, 0], [1, 1], [1, 1], [0, 0]], + data_format="NHWC", + ) + + out_3 = fluid.layers.pool2d( + input=input_NCHW, + pool_size=ksize, + pool_type="avg", + pool_padding=[[0, 0], [0, 0], [1, 1], [1, 1]], + data_format="NCHW", + ) + + out_4 = fluid.layers.pool2d( + input=input_NCHW, + pool_size=ksize, + pool_type="avg", + pool_padding=[1, 2, 1, 0], + data_format="NCHW", + ) # test VALID - out_5 = fluid.layers.pool2d(input=input_NCHW, - pool_size=ksize, - pool_type="avg", - pool_padding="VALID", - data_format="NCHW") - - out_6 = fluid.layers.pool2d(input=input_NHWC, - pool_size=ksize, - pool_type="max", - pool_padding="VALID", - data_format="NHWC") + out_5 = fluid.layers.pool2d( + input=input_NCHW, + pool_size=ksize, + pool_type="avg", + pool_padding="VALID", + data_format="NCHW", + ) + + out_6 = fluid.layers.pool2d( + input=input_NHWC, + pool_size=ksize, + pool_type="max", + pool_padding="VALID", + data_format="NHWC", + ) # test SAME - out_7 = fluid.layers.pool2d(input=input_NCHW, - pool_size=[4, 4], - pool_type="avg", - pool_padding="SAME", - data_format="NCHW") - - out_8 = fluid.layers.pool2d(input=input_NHWC, - pool_size=[4, 4], - pool_type="max", - pool_padding="SAME", - data_format="NHWC") + out_7 = fluid.layers.pool2d( + input=input_NCHW, + pool_size=[4, 4], + pool_type="avg", + pool_padding="SAME", + data_format="NCHW", + ) + + out_8 = fluid.layers.pool2d( + input=input_NHWC, + pool_size=[4, 4], + pool_type="max", + pool_padding="SAME", + data_format="NHWC", + ) # test negetive - out_9 = fluid.layers.pool2d(input=input_NHWC_negetive, - pool_size=ksize, - pool_type="avg", - pool_padding=[0, 0], - data_format="NHWC") + out_9 = fluid.layers.pool2d( + input=input_NHWC_negetive, + pool_size=ksize, + pool_type="avg", + pool_padding=[0, 0], + data_format="NHWC", + ) assert out_9.shape == (2, -1, 3, 3) - out_10 = fluid.layers.pool2d(input=input_NCHW_negetive, - pool_size=ksize, - pool_type="avg", - pool_padding=[0, 0], - data_format="NCHW") + out_10 = fluid.layers.pool2d( + input=input_NCHW_negetive, + pool_size=ksize, + pool_type="avg", + pool_padding=[0, 0], + data_format="NCHW", + ) assert out_10.shape == (2, 3, -1, -1) exe = fluid.Executor(place=fluid.MLUPlace(0)) @@ -804,46 +825,61 @@ class TestPool2DAPI(unittest.TestCase): "input_NHWC": x_NHWC, "input_NCHW": x_NCHW, "input_NHWC_negetive": x_NHWC, - "input_NCHW_negetive": x_NCHW + "input_NCHW_negetive": x_NCHW, }, - fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7, out_8]) + fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7, out_8], + ) assert np.allclose( res_1, - pool2D_forward_naive(x=x_NHWC, - ksize=ksize, - pool_type="max", - strides=[1, 1], - paddings=[1, 1], - data_format="NHWC")) + pool2D_forward_naive( + x=x_NHWC, + ksize=ksize, + pool_type="max", + strides=[1, 1], + paddings=[1, 1], + data_format="NHWC", + ), + ) assert np.allclose( res_2, - pool2D_forward_naive(x=x_NHWC, - ksize=ksize, - pool_type="avg", - strides=[1, 1], - paddings=[1, 1, 1, 1], - data_format="NHWC")) - assert np.allclose(res_3, - pool2D_forward_naive(x=x_NCHW, - ksize=ksize, - pool_type="avg", - strides=[1, 1], - paddings=[1, 1, 1, 1], - data_format="NCHW"), - rtol=0.07, - atol=1e-05) - - assert np.allclose(res_4, - pool2D_forward_naive(x=x_NCHW, - ksize=ksize, - pool_type="avg", - strides=[1, 1], - paddings=[1, 2, 1, 0], - data_format="NCHW"), - rtol=0.07, - atol=1e-05) + pool2D_forward_naive( + x=x_NHWC, + ksize=ksize, + pool_type="avg", + strides=[1, 1], + paddings=[1, 1, 1, 1], + data_format="NHWC", + ), + ) + assert np.allclose( + res_3, + pool2D_forward_naive( + x=x_NCHW, + ksize=ksize, + pool_type="avg", + strides=[1, 1], + paddings=[1, 1, 1, 1], + data_format="NCHW", + ), + rtol=0.07, + atol=1e-05, + ) + + assert np.allclose( + res_4, + pool2D_forward_naive( + x=x_NCHW, + ksize=ksize, + pool_type="avg", + strides=[1, 1], + paddings=[1, 2, 1, 0], + data_format="NCHW", + ), + rtol=0.07, + atol=1e-05, + ) # VALID assert np.allclose( @@ -855,175 +891,215 @@ class TestPool2DAPI(unittest.TestCase): strides=[1, 1], paddings=[10, 20], # any ele is ok padding_algorithm="VALID", - data_format="NCHW"), + data_format="NCHW", + ), rtol=0.07, - atol=1e-05) + atol=1e-05, + ) assert np.allclose( res_6, - pool2D_forward_naive(x=x_NHWC, - ksize=ksize, - pool_type="max", - strides=[1, 1], - paddings=[10, 20], - padding_algorithm="VALID", - data_format="NHWC")) + pool2D_forward_naive( + x=x_NHWC, + ksize=ksize, + pool_type="max", + strides=[1, 1], + paddings=[10, 20], + padding_algorithm="VALID", + data_format="NHWC", + ), + ) # SAME - assert np.allclose(res_7, - pool2D_forward_naive(x=x_NCHW, - ksize=[4, 4], - pool_type="avg", - strides=[1, 1], - paddings=[10, 20], - padding_algorithm="SAME", - data_format="NCHW"), - rtol=0.07, - atol=1e-05) + assert np.allclose( + res_7, + pool2D_forward_naive( + x=x_NCHW, + ksize=[4, 4], + pool_type="avg", + strides=[1, 1], + paddings=[10, 20], + padding_algorithm="SAME", + data_format="NCHW", + ), + rtol=0.07, + atol=1e-05, + ) assert np.allclose( res_8, - pool2D_forward_naive(x=x_NHWC, - ksize=[4, 4], - pool_type="max", - strides=[1, 1], - paddings=[10, 20], - padding_algorithm="SAME", - data_format="NHWC")) + pool2D_forward_naive( + x=x_NHWC, + ksize=[4, 4], + pool_type="max", + strides=[1, 1], + paddings=[10, 20], + padding_algorithm="SAME", + data_format="NHWC", + ), + ) class TestPool2DAPI_Error(unittest.TestCase): - def test_api(self): - input_NHWC = fluid.layers.data(name="input_NHWC", - shape=[2, 5, 5, 3], - append_batch_size=False, - dtype="float32") + input_NHWC = fluid.layers.data( + name="input_NHWC", + shape=[2, 5, 5, 3], + append_batch_size=False, + dtype="float32", + ) ksize = [3, 3] # data_format value error def run_2(): - out_2 = fluid.layers.pool2d(input=input_NHWC, - pool_size=ksize, - pool_type="max", - pool_padding=[1, 1], - data_format="NHWCC") + out_2 = fluid.layers.pool2d( + input=input_NHWC, + pool_size=ksize, + pool_type="max", + pool_padding=[1, 1], + data_format="NHWCC", + ) self.assertRaises(ValueError, run_2) # padding str value error def run_3(): - out_3 = fluid.layers.pool2d(input=input_NHWC, - pool_size=ksize, - pool_type="max", - pool_padding="VALIDSAME", - data_format="NHWC") + out_3 = fluid.layers.pool2d( + input=input_NHWC, + pool_size=ksize, + pool_type="max", + pool_padding="VALIDSAME", + data_format="NHWC", + ) self.assertRaises(ValueError, run_3) # padding str valid and ceil_mode value error def run_4(): - out_4 = fluid.layers.pool2d(input=input_NHWC, - pool_size=ksize, - pool_type="max", - pool_padding="VALID", - ceil_mode=True, - data_format="NHWC") + out_4 = fluid.layers.pool2d( + input=input_NHWC, + pool_size=ksize, + pool_type="max", + pool_padding="VALID", + ceil_mode=True, + data_format="NHWC", + ) self.assertRaises(ValueError, run_4) # padding with 8 ele. value error def run_5(): - out_5 = fluid.layers.pool2d(input=input_NHWC, - pool_size=ksize, - pool_type="max", - pool_padding=[[1, 1], [0, 0], [0, 0], - [1, 1]], - data_format="NHWC") + out_5 = fluid.layers.pool2d( + input=input_NHWC, + pool_size=ksize, + pool_type="max", + pool_padding=[[1, 1], [0, 0], [0, 0], [1, 1]], + data_format="NHWC", + ) self.assertRaises(ValueError, run_5) class TestDygraphPool2DAPIError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # the input of Pool2D must be Variable. data1 = np.random.random((3, 32, 32, 5)).astype('float32') - pool2d = fluid.dygraph.Pool2D(pool_size=2, - pool_type='max', - pool_stride=1, - global_pooling=False) + pool2d = fluid.dygraph.Pool2D( + pool_size=2, + pool_type='max', + pool_stride=1, + global_pooling=False, + ) self.assertRaises(TypeError, pool2d, data1) # the input dtype of mlu Pool2D must be float16 or float32 - data2 = fluid.layers.data(name='x1', - shape=[3, 32, 32, 5], - dtype="int32") + data2 = fluid.layers.data( + name='x1', shape=[3, 32, 32, 5], dtype="int32" + ) self.assertRaises(TypeError, pool2d, data2) def test_data_format_error(self): with program_guard(Program(), Program()): # the data_format must be 'NCHW' or 'NHWC' data1 = np.random.random((3, 32, 32, 5)).astype('float32') - self.assertRaises(ValueError, - fluid.dygraph.Pool2D, - pool_size=2, - pool_type='max', - pool_stride=1, - global_pooling=False, - data_format='NWHC') + self.assertRaises( + ValueError, + fluid.dygraph.Pool2D, + pool_size=2, + pool_type='max', + pool_stride=1, + global_pooling=False, + data_format='NWHC', + ) class TestDygraphPool2DAPI(unittest.TestCase): - def test_nhwc(self): with fluid.dygraph.guard(): data = np.random.random((3, 32, 32, 5)).astype('float32') x = fluid.dygraph.to_variable(data) - pool2d = fluid.dygraph.Pool2D(pool_size=2, - pool_type='max', - pool_stride=1, - pool_padding=[0, 0], - global_pooling=False, - data_format='NHWC') + pool2d = fluid.dygraph.Pool2D( + pool_size=2, + pool_type='max', + pool_stride=1, + pool_padding=[0, 0], + global_pooling=False, + data_format='NHWC', + ) out1 = pool2d(x) - out2 = pool2D_forward_naive(data, [2, 2], [1, 1], - paddings=[0, 0], - pool_type='max', - data_format='NHWC') + out2 = pool2D_forward_naive( + data, + [2, 2], + [1, 1], + paddings=[0, 0], + pool_type='max', + data_format='NHWC', + ) np.testing.assert_allclose(out1.numpy(), out2) def test_lower_case(self): with fluid.dygraph.guard(): data = np.random.random((3, 32, 32, 5)).astype('float32') x = fluid.dygraph.to_variable(data) - pool2d = fluid.dygraph.Pool2D(pool_size=2, - pool_type='max', - pool_stride=1, - pool_padding=[0, 0], - global_pooling=False, - data_format='nhwc') + pool2d = fluid.dygraph.Pool2D( + pool_size=2, + pool_type='max', + pool_stride=1, + pool_padding=[0, 0], + global_pooling=False, + data_format='nhwc', + ) out1 = pool2d(x) - out2 = pool2D_forward_naive(data, [2, 2], [1, 1], - paddings=[0, 0], - pool_type='max', - data_format='NHWC') + out2 = pool2D_forward_naive( + data, + [2, 2], + [1, 1], + paddings=[0, 0], + pool_type='max', + data_format='NHWC', + ) np.testing.assert_allclose(out1.numpy(), out2) def test_upper_case(self): with fluid.dygraph.guard(): data = np.random.random((3, 32, 32, 5)).astype('float32') x = fluid.dygraph.to_variable(data) - pool2d = fluid.dygraph.Pool2D(pool_size=2, - pool_type='MAX', - pool_stride=1, - pool_padding=[0, 0], - global_pooling=False, - data_format='nhwc') + pool2d = fluid.dygraph.Pool2D( + pool_size=2, + pool_type='MAX', + pool_stride=1, + pool_padding=[0, 0], + global_pooling=False, + data_format='nhwc', + ) out1 = pool2d(x) - out2 = pool2D_forward_naive(data, [2, 2], [1, 1], - paddings=[0, 0], - pool_type='max', - data_format='NHWC') + out2 = pool2D_forward_naive( + data, + [2, 2], + [1, 1], + paddings=[0, 0], + pool_type='max', + data_format='NHWC', + ) np.testing.assert_allclose(out1.numpy(), out2) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_prior_box_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_prior_box_op_mlu.py index 2e2b96c9e77346ff5a860fc5a9f081953420f973..8f8473cceab7d65c7b3bfe128e03a68a7c49a3d7 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_prior_box_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_prior_box_op_mlu.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import unittest import sys @@ -28,7 +26,6 @@ paddle.enable_static() class TestMLUPriorBox(OpTest): - def setUp(self): self.op_type = "prior_box" self.set_mlu() @@ -60,7 +57,7 @@ class TestMLUPriorBox(OpTest): 'min_max_aspect_ratios_order': self.min_max_aspect_ratios_order, 'step_w': self.step_w, 'step_h': self.step_h, - 'offset': self.offset + 'offset': self.offset, } if len(self.max_sizes) > 0: self.attrs['max_sizes'] = self.max_sizes @@ -95,8 +92,9 @@ class TestMLUPriorBox(OpTest): self.flip = True self.set_min_max_aspect_ratios_order() self.real_aspect_ratios = [1, 2.0, 1.0 / 2.0, 3.0, 1.0 / 3.0] - self.aspect_ratios = np.array(self.aspect_ratios, - dtype=np.float64).flatten() + self.aspect_ratios = np.array( + self.aspect_ratios, dtype=np.float64 + ).flatten() self.variances = [0.1, 0.1, 0.2, 0.2] self.variances = np.array(self.variances, dtype=np.float64).flatten() @@ -108,12 +106,12 @@ class TestMLUPriorBox(OpTest): def init_test_input(self): self.image = np.random.random( - (self.batch_size, self.image_channels, self.image_w, - self.image_h)).astype('float32') + (self.batch_size, self.image_channels, self.image_w, self.image_h) + ).astype('float32') self.input = np.random.random( - (self.batch_size, self.input_channels, self.layer_w, - self.layer_h)).astype('float32') + (self.batch_size, self.input_channels, self.layer_w, self.layer_h) + ).astype('float32') def init_test_output(self): out_dim = (self.layer_h, self.layer_w, self.num_priors, 4) @@ -134,73 +132,78 @@ class TestMLUPriorBox(OpTest): ar = self.real_aspect_ratios[r] c_w = min_size * math.sqrt(ar) / 2 c_h = (min_size / math.sqrt(ar)) / 2 - out_boxes[h, w, - idx, :] = [(c_x - c_w) / self.image_w, - (c_y - c_h) / self.image_h, - (c_x + c_w) / self.image_w, - (c_y + c_h) / self.image_h] + out_boxes[h, w, idx, :] = [ + (c_x - c_w) / self.image_w, + (c_y - c_h) / self.image_h, + (c_x + c_w) / self.image_w, + (c_y + c_h) / self.image_h, + ] idx += 1 if len(self.max_sizes) > 0: max_size = self.max_sizes[s] # second prior: aspect_ratio = 1, c_w = c_h = math.sqrt(min_size * max_size) / 2 - out_boxes[h, w, - idx, :] = [(c_x - c_w) / self.image_w, - (c_y - c_h) / self.image_h, - (c_x + c_w) / self.image_w, - (c_y + c_h) / self.image_h] + out_boxes[h, w, idx, :] = [ + (c_x - c_w) / self.image_w, + (c_y - c_h) / self.image_h, + (c_x + c_w) / self.image_w, + (c_y + c_h) / self.image_h, + ] idx += 1 else: - c_w = c_h = min_size / 2. - out_boxes[h, w, idx, :] = [(c_x - c_w) / self.image_w, - (c_y - c_h) / self.image_h, - (c_x + c_w) / self.image_w, - (c_y + c_h) / self.image_h] + c_w = c_h = min_size / 2.0 + out_boxes[h, w, idx, :] = [ + (c_x - c_w) / self.image_w, + (c_y - c_h) / self.image_h, + (c_x + c_w) / self.image_w, + (c_y + c_h) / self.image_h, + ] idx += 1 if len(self.max_sizes) > 0: max_size = self.max_sizes[s] # second prior: aspect_ratio = 1, c_w = c_h = math.sqrt(min_size * max_size) / 2 - out_boxes[h, w, - idx, :] = [(c_x - c_w) / self.image_w, - (c_y - c_h) / self.image_h, - (c_x + c_w) / self.image_w, - (c_y + c_h) / self.image_h] + out_boxes[h, w, idx, :] = [ + (c_x - c_w) / self.image_w, + (c_y - c_h) / self.image_h, + (c_x + c_w) / self.image_w, + (c_y + c_h) / self.image_h, + ] idx += 1 # rest of priors for r in range(len(self.real_aspect_ratios)): ar = self.real_aspect_ratios[r] - if abs(ar - 1.) < 1e-6: + if abs(ar - 1.0) < 1e-6: continue c_w = min_size * math.sqrt(ar) / 2 c_h = (min_size / math.sqrt(ar)) / 2 - out_boxes[h, w, - idx, :] = [(c_x - c_w) / self.image_w, - (c_y - c_h) / self.image_h, - (c_x + c_w) / self.image_w, - (c_y + c_h) / self.image_h] + out_boxes[h, w, idx, :] = [ + (c_x - c_w) / self.image_w, + (c_y - c_h) / self.image_h, + (c_x + c_w) / self.image_w, + (c_y + c_h) / self.image_h, + ] idx += 1 # clip the prior's coordidate such that it is within[0, 1] if self.clip: out_boxes = np.clip(out_boxes, 0.0, 1.0) # set the variance. - out_var = np.tile(self.variances, - (self.layer_h, self.layer_w, self.num_priors, 1)) + out_var = np.tile( + self.variances, (self.layer_h, self.layer_w, self.num_priors, 1) + ) self.out_boxes = out_boxes.astype('float32') self.out_var = out_var.astype('float32') class TestMLUPriorBoxWithoutMaxSize(TestMLUPriorBox): - def set_max_sizes(self): self.max_sizes = [] class TestMLUPriorBoxWithoutSpecifiedOutOrder(TestMLUPriorBox): - def set_min_max_aspect_ratios_order(self): self.min_max_aspect_ratios_order = False diff --git a/python/paddle/fluid/tests/unittests/mlu/test_randperm_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_randperm_op_mlu.py index 5412e6c4a7b606744ba0a76b5df63134f7f2b337..3ee3144905ef5edd2babb3b8bd3247dff38b00b2 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_randperm_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_randperm_op_mlu.py @@ -27,31 +27,38 @@ paddle.enable_static() def check_randperm_out(n, data_np): - assert isinstance(data_np, np.ndarray), \ - "The input data_np should be np.ndarray." + assert isinstance( + data_np, np.ndarray + ), "The input data_np should be np.ndarray." gt_sorted = np.arange(n) out_sorted = np.sort(data_np) return list(gt_sorted == out_sorted) def error_msg(data_np): - return "The sorted ground truth and sorted out should " + \ - "be equal, out = " + str(data_np) + return ( + "The sorted ground truth and sorted out should " + + "be equal, out = " + + str(data_np) + ) def convert_dtype(dtype_str): dtype_str_list = ["int32", "int64", "float32", "float64"] dtype_num_list = [ - core.VarDesc.VarType.INT32, core.VarDesc.VarType.INT64, - core.VarDesc.VarType.FP32, core.VarDesc.VarType.FP64 + core.VarDesc.VarType.INT32, + core.VarDesc.VarType.INT64, + core.VarDesc.VarType.FP32, + core.VarDesc.VarType.FP64, ] - assert dtype_str in dtype_str_list, dtype_str + \ - " should in " + str(dtype_str_list) + assert dtype_str in dtype_str_list, ( + dtype_str + " should in " + str(dtype_str_list) + ) return dtype_num_list[dtype_str_list.index(dtype_str)] class TestRandpermOp(OpTest): - """ Test randperm op.""" + """Test randperm op.""" def setUp(self): self.op_type = "randperm" @@ -77,36 +84,32 @@ class TestRandpermOp(OpTest): def verify_output(self, outs): out_np = np.array(outs[0]) - self.assertTrue(check_randperm_out(self.n, out_np), - msg=error_msg(out_np)) + self.assertTrue( + check_randperm_out(self.n, out_np), msg=error_msg(out_np) + ) class TestRandpermOpN(TestRandpermOp): - def init_attrs(self): self.n = 10000 class TestRandpermOpInt32(TestRandpermOp): - def init_attrs(self): self.dtype = "int32" class TestRandpermOpFloat32(TestRandpermOp): - def init_attrs(self): self.dtype = "float32" class TestRandpermOpFloat64(TestRandpermOp): - def init_attrs(self): self.dtype = "float64" class TestRandpermOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): self.assertRaises(ValueError, paddle.randperm, -3) @@ -114,7 +117,6 @@ class TestRandpermOpError(unittest.TestCase): class TestRandpermAPI(unittest.TestCase): - def test_out(self): n = 10 place = paddle.MLUPlace(0) @@ -132,20 +134,19 @@ class TestRandpermAPI(unittest.TestCase): class TestRandpermImperative(unittest.TestCase): - def test_out(self): paddle.disable_static() n = 10 for dtype in ['int32', np.int64, 'float32', 'float64']: data_p = paddle.randperm(n, dtype) data_np = data_p.numpy() - self.assertTrue(check_randperm_out(n, data_np), - msg=error_msg(data_np)) + self.assertTrue( + check_randperm_out(n, data_np), msg=error_msg(data_np) + ) paddle.enable_static() class TestRandomValue(unittest.TestCase): - def test_fixed_random_number(self): # Test GPU Fixed random number, which is generated by 'curandStatePhilox4_32_10_t' if not paddle.is_compiled_with_cuda(): @@ -158,25 +159,70 @@ class TestRandomValue(unittest.TestCase): x = paddle.randperm(30000, dtype='int32').numpy() expect = [ - 24562, 8409, 9379, 10328, 20503, 18059, 9681, 21883, 11783, 27413 + 24562, + 8409, + 9379, + 10328, + 20503, + 18059, + 9681, + 21883, + 11783, + 27413, ] np.testing.assert_allclose(x[0:10], expect) expect = [ - 29477, 27100, 9643, 16637, 8605, 16892, 27767, 2724, 1612, 13096 + 29477, + 27100, + 9643, + 16637, + 8605, + 16892, + 27767, + 2724, + 1612, + 13096, ] np.testing.assert_allclose(x[10000:10010], expect) expect = [ - 298, 4104, 16479, 22714, 28684, 7510, 14667, 9950, 15940, 28343 + 298, + 4104, + 16479, + 22714, + 28684, + 7510, + 14667, + 9950, + 15940, + 28343, ] np.testing.assert_allclose(x[20000:20010], expect) x = paddle.randperm(30000, dtype='int64').numpy() expect = [ - 6587, 1909, 5525, 23001, 6488, 14981, 14355, 3083, 29561, 8171 + 6587, + 1909, + 5525, + 23001, + 6488, + 14981, + 14355, + 3083, + 29561, + 8171, ] np.testing.assert_allclose(x[0:10], expect) expect = [ - 23460, 12394, 22501, 5427, 20185, 9100, 5127, 1651, 25806, 4818 + 23460, + 12394, + 22501, + 5427, + 20185, + 9100, + 5127, + 1651, + 25806, + 4818, ] np.testing.assert_allclose(x[10000:10010], expect) expect = [5829, 4508, 16193, 24836, 8526, 242, 9984, 9243, 1977, 11839] @@ -184,35 +230,83 @@ class TestRandomValue(unittest.TestCase): x = paddle.randperm(30000, dtype='float32').numpy() expect = [ - 5154., 10537., 14362., 29843., 27185., 28399., 27561., 4144., - 22906., 10705. + 5154.0, + 10537.0, + 14362.0, + 29843.0, + 27185.0, + 28399.0, + 27561.0, + 4144.0, + 22906.0, + 10705.0, ] np.testing.assert_allclose(x[0:10], expect) expect = [ - 1958., 18414., 20090., 21910., 22746., 27346., 22347., 3002., 4564., - 26991. + 1958.0, + 18414.0, + 20090.0, + 21910.0, + 22746.0, + 27346.0, + 22347.0, + 3002.0, + 4564.0, + 26991.0, ] np.testing.assert_allclose(x[10000:10010], expect) expect = [ - 25580., 12606., 553., 16387., 29536., 4241., 20946., 16899., 16339., - 4662. + 25580.0, + 12606.0, + 553.0, + 16387.0, + 29536.0, + 4241.0, + 20946.0, + 16899.0, + 16339.0, + 4662.0, ] np.testing.assert_allclose(x[20000:20010], expect) x = paddle.randperm(30000, dtype='float64').numpy() expect = [ - 19051., 2449., 21940., 11121., 282., 7330., 13747., 24321., 21147., - 9163. + 19051.0, + 2449.0, + 21940.0, + 11121.0, + 282.0, + 7330.0, + 13747.0, + 24321.0, + 21147.0, + 9163.0, ] np.testing.assert_allclose(x[0:10], expect) expect = [ - 15483., 1315., 5723., 20954., 13251., 25539., 5074., 1823., 14945., - 17624. + 15483.0, + 1315.0, + 5723.0, + 20954.0, + 13251.0, + 25539.0, + 5074.0, + 1823.0, + 14945.0, + 17624.0, ] np.testing.assert_allclose(x[10000:10010], expect) expect = [ - 10516., 2552., 29970., 5941., 986., 8007., 24805., 26753., 12202., - 21404. + 10516.0, + 2552.0, + 29970.0, + 5941.0, + 986.0, + 8007.0, + 24805.0, + 26753.0, + 12202.0, + 21404.0, ] np.testing.assert_allclose(x[20000:20010], expect) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/mlu/test_range_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_range_op_mlu.py index d65040ecb394a507438aab16e957b2f5047f116f..6ebbb089e580f274ebd28372a89ac592b278a6b6 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_range_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_range_op_mlu.py @@ -29,7 +29,6 @@ def arange_wrapper(start, end, step, dtype=None): class TestRangeOp(OpTest): - def setUp(self): self.op_type = "range" self.place = paddle.device.MLUPlace(0) @@ -38,13 +37,13 @@ class TestRangeOp(OpTest): self.inputs = { 'Start': np.array([self.case[0]]).astype(self.dtype), 'End': np.array([self.case[1]]).astype(self.dtype), - 'Step': np.array([self.case[2]]).astype(self.dtype) + 'Step': np.array([self.case[2]]).astype(self.dtype), } self.outputs = { - 'Out': - np.arange(self.case[0], self.case[1], - self.case[2]).astype(self.dtype) + 'Out': np.arange(self.case[0], self.case[1], self.case[2]).astype( + self.dtype + ) } def init_config(self): @@ -57,7 +56,6 @@ class TestRangeOp(OpTest): class TestFloatRangeOpCase0(TestRangeOp): - def init_config(self): self.dtype = np.float32 self.python_api = partial(arange_wrapper, dtype=self.dtype) @@ -65,7 +63,6 @@ class TestFloatRangeOpCase0(TestRangeOp): class TestInt32RangeOpCase0(TestRangeOp): - def init_config(self): self.dtype = np.int32 self.python_api = partial(arange_wrapper, dtype=self.dtype) @@ -73,7 +70,6 @@ class TestInt32RangeOpCase0(TestRangeOp): class TestInt32RangeOpCase1(TestRangeOp): - def init_config(self): self.dtype = np.int32 self.python_api = partial(arange_wrapper, dtype=self.dtype) @@ -81,7 +77,6 @@ class TestInt32RangeOpCase1(TestRangeOp): class TestInt32RangeOpCase2(TestRangeOp): - def init_config(self): self.dtype = np.int32 self.python_api = partial(arange_wrapper, dtype=self.dtype) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_reciprocal_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_reciprocal_op_mlu.py index e93afd7cc29e7f84044787fe576c985819e4290b..0b6d08bdfd37d2e1790562363b3d9376e721a769 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_reciprocal_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_reciprocal_op_mlu.py @@ -24,7 +24,6 @@ paddle.enable_static() class TestMLUReciprocal(OpTest): - def setUp(self): self.op_type = "reciprocal" self.set_mlu() @@ -41,9 +40,9 @@ class TestMLUReciprocal(OpTest): self.check_output_with_place(self.place) def test_check_grad(self): - self.check_grad_with_place(self.place, ['X'], - 'Out', - max_relative_error=0.01) + self.check_grad_with_place( + self.place, ['X'], 'Out', max_relative_error=0.01 + ) def set_mlu(self): self.__class__.use_mlu = True @@ -54,7 +53,6 @@ class TestMLUReciprocal(OpTest): class TestMLUReciprocalFp16(TestMLUReciprocal): - def set_mlu(self): self.__class__.use_mlu = True self.place = paddle.MLUPlace(0) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_reduce_max_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_reduce_max_op_mlu.py index dc90a521d379d042a37d4e034abb871eacbc1b98..b225e25b06baa526b57293243bc2002355d4c58d 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_reduce_max_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_reduce_max_op_mlu.py @@ -26,7 +26,8 @@ paddle.enable_static() @skip_check_grad_ci( reason="reduce_max is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestMLUReduceMaxOp(OpTest): """Remove Min with subgradient from gradient check to confirm the success of CI.""" @@ -54,7 +55,8 @@ class TestMLUReduceMaxOp(OpTest): @skip_check_grad_ci( reason="reduce_max is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMaxOpMultiAxises(TestMLUReduceMaxOp): """Remove Min with subgradient from gradient check to confirm the success of CI.""" @@ -72,7 +74,8 @@ class TestReduceMaxOpMultiAxises(TestMLUReduceMaxOp): @skip_check_grad_ci( reason="reduce_max is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceAll(TestMLUReduceMaxOp): """Remove Min with subgradient from gradient check to confirm the success of CI.""" @@ -88,7 +91,8 @@ class TestReduceAll(TestMLUReduceMaxOp): @skip_check_grad_ci( reason="reduce_max is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMaxOpWithOutDtype_int32(TestMLUReduceMaxOp): """Remove Min with subgradient from gradient check to confirm the success of CI.""" @@ -100,11 +104,12 @@ class TestReduceMaxOpWithOutDtype_int32(TestMLUReduceMaxOp): self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = { 'dim': [-2, -1], - 'out_dtype': int(core.VarDesc.VarType.INT32) + 'out_dtype': int(core.VarDesc.VarType.INT32), } self.outputs = { - 'Out': - self.inputs['X'].max(axis=tuple(self.attrs['dim'])).astype(np.int32) + 'Out': self.inputs['X'] + .max(axis=tuple(self.attrs['dim'])) + .astype(np.int32) } def init_dtype(self): @@ -113,7 +118,8 @@ class TestReduceMaxOpWithOutDtype_int32(TestMLUReduceMaxOp): @skip_check_grad_ci( reason="reduce_max is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMaxOpWithOutDtype_fp16(TestMLUReduceMaxOp): """Remove Min with subgradient from gradient check to confirm the success of CI.""" @@ -125,12 +131,12 @@ class TestReduceMaxOpWithOutDtype_fp16(TestMLUReduceMaxOp): self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = { 'dim': [-2, -1], - 'out_dtype': int(core.VarDesc.VarType.FP16) + 'out_dtype': int(core.VarDesc.VarType.FP16), } self.outputs = { - 'Out': - self.inputs['X'].max(axis=tuple(self.attrs['dim'])).astype( - np.float16) + 'Out': self.inputs['X'] + .max(axis=tuple(self.attrs['dim'])) + .astype(np.float16) } def init_dtype(self): @@ -142,7 +148,8 @@ class TestReduceMaxOpWithOutDtype_fp16(TestMLUReduceMaxOp): @skip_check_grad_ci( reason="reduce_max is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMaxOpWithOutDtype_fp32(TestMLUReduceMaxOp): """Remove Min with subgradient from gradient check to confirm the success of CI.""" @@ -154,12 +161,12 @@ class TestReduceMaxOpWithOutDtype_fp32(TestMLUReduceMaxOp): self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = { 'dim': [-2, -1], - 'out_dtype': int(core.VarDesc.VarType.FP32) + 'out_dtype': int(core.VarDesc.VarType.FP32), } self.outputs = { - 'Out': - self.inputs['X'].max(axis=tuple(self.attrs['dim'])).astype( - np.float32) + 'Out': self.inputs['X'] + .max(axis=tuple(self.attrs['dim'])) + .astype(np.float32) } def init_dtype(self): diff --git a/python/paddle/fluid/tests/unittests/mlu/test_reduce_mean_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_reduce_mean_op_mlu.py index 588dc949aeba37ce9dc140c5121082bdc0b5682a..bbb35ef40d3115f5585a34400c67b2d6b2489ddd 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_reduce_mean_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_reduce_mean_op_mlu.py @@ -25,7 +25,6 @@ paddle.enable_static() class TestMeanOp(OpTest): - def set_mlu(self): self.__class__.use_mlu = True self.place = paddle.device.MLUPlace(0) @@ -44,7 +43,6 @@ class TestMeanOp(OpTest): class TestMeanOp5D(TestMeanOp): - def setUp(self): self.set_mlu() self.op_type = "reduce_mean" @@ -55,7 +53,6 @@ class TestMeanOp5D(TestMeanOp): class TestMeanOp6D(TestMeanOp): - def setUp(self): self.set_mlu() self.op_type = "reduce_mean" @@ -66,7 +63,6 @@ class TestMeanOp6D(TestMeanOp): class TestMeanOp8D(TestMeanOp): - def setUp(self): self.set_mlu() self.op_type = "reduce_mean" @@ -78,7 +74,6 @@ class TestMeanOp8D(TestMeanOp): class Test1DReduce(TestMeanOp): - def setUp(self): self.set_mlu() self.op_type = "reduce_mean" @@ -87,7 +82,6 @@ class Test1DReduce(TestMeanOp): class Test2DReduce0(Test1DReduce): - def setUp(self): self.set_mlu() self.op_type = "reduce_mean" @@ -97,7 +91,6 @@ class Test2DReduce0(Test1DReduce): class Test2DReduce1(Test1DReduce): - def setUp(self): self.set_mlu() self.op_type = "reduce_mean" @@ -109,7 +102,6 @@ class Test2DReduce1(Test1DReduce): class Test3DReduce0(Test1DReduce): - def setUp(self): self.set_mlu() self.op_type = "reduce_mean" @@ -121,7 +113,6 @@ class Test3DReduce0(Test1DReduce): class Test3DReduce1(Test1DReduce): - def setUp(self): self.set_mlu() self.op_type = "reduce_mean" @@ -133,7 +124,6 @@ class Test3DReduce1(Test1DReduce): class Test3DReduce2(Test1DReduce): - def setUp(self): self.set_mlu() self.op_type = "reduce_mean" @@ -145,7 +135,6 @@ class Test3DReduce2(Test1DReduce): class Test3DReduce3(Test1DReduce): - def setUp(self): self.set_mlu() self.op_type = "reduce_mean" @@ -157,21 +146,19 @@ class Test3DReduce3(Test1DReduce): class TestKeepDimReduce(Test1DReduce): - def setUp(self): self.set_mlu() self.op_type = "reduce_mean" self.inputs = {'X': np.random.random((5, 6, 10)).astype("float32")} self.attrs = {'dim': [1], 'keep_dim': True} self.outputs = { - 'Out': - self.inputs['X'].mean(axis=tuple(self.attrs['dim']), - keepdims=self.attrs['keep_dim']) + 'Out': self.inputs['X'].mean( + axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim'] + ) } class TestKeepDim8DReduce(Test1DReduce): - def setUp(self): self.set_mlu() self.op_type = "reduce_mean" @@ -180,14 +167,13 @@ class TestKeepDim8DReduce(Test1DReduce): } self.attrs = {'dim': (3, 4, 5), 'keep_dim': True} self.outputs = { - 'Out': - self.inputs['X'].mean(axis=tuple(self.attrs['dim']), - keepdims=self.attrs['keep_dim']) + 'Out': self.inputs['X'].mean( + axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim'] + ) } class TestReduceAll(Test1DReduce): - def setUp(self): self.set_mlu() self.op_type = "reduce_mean" diff --git a/python/paddle/fluid/tests/unittests/mlu/test_reduce_min_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_reduce_min_op_mlu.py index 9d69d6d8cccef84ac795194da727258175fe6b07..d6649f214c4696d489d8d97729dc63afb32d8d94 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_reduce_min_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_reduce_min_op_mlu.py @@ -26,7 +26,8 @@ paddle.enable_static() @skip_check_grad_ci( reason="reduce_min is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestMLUReduceMinOp(OpTest): """Remove Min with subgradient from gradient check to confirm the success of CI.""" @@ -54,7 +55,8 @@ class TestMLUReduceMinOp(OpTest): @skip_check_grad_ci( reason="reduce_min is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMinOpMultiAxises(TestMLUReduceMinOp): """Remove Min with subgradient from gradient check to confirm the success of CI.""" @@ -72,7 +74,8 @@ class TestReduceMinOpMultiAxises(TestMLUReduceMinOp): @skip_check_grad_ci( reason="reduce_min is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceAll(TestMLUReduceMinOp): """Remove Min with subgradient from gradient check to confirm the success of CI.""" @@ -88,7 +91,8 @@ class TestReduceAll(TestMLUReduceMinOp): @skip_check_grad_ci( reason="reduce_min is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMinOpWithOutDtype_int32(TestMLUReduceMinOp): """Remove Min with subgradient from gradient check to confirm the success of CI.""" @@ -100,11 +104,12 @@ class TestReduceMinOpWithOutDtype_int32(TestMLUReduceMinOp): self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = { 'dim': [-2, -1], - 'out_dtype': int(core.VarDesc.VarType.INT32) + 'out_dtype': int(core.VarDesc.VarType.INT32), } self.outputs = { - 'Out': - self.inputs['X'].min(axis=tuple(self.attrs['dim'])).astype(np.int32) + 'Out': self.inputs['X'] + .min(axis=tuple(self.attrs['dim'])) + .astype(np.int32) } def init_dtype(self): @@ -113,7 +118,8 @@ class TestReduceMinOpWithOutDtype_int32(TestMLUReduceMinOp): @skip_check_grad_ci( reason="reduce_min is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMinOpWithOutDtype_fp16(TestMLUReduceMinOp): """Remove Min with subgradient from gradient check to confirm the success of CI.""" @@ -125,12 +131,12 @@ class TestReduceMinOpWithOutDtype_fp16(TestMLUReduceMinOp): self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = { 'dim': [-2, -1], - 'out_dtype': int(core.VarDesc.VarType.FP16) + 'out_dtype': int(core.VarDesc.VarType.FP16), } self.outputs = { - 'Out': - self.inputs['X'].min(axis=tuple(self.attrs['dim'])).astype( - np.float16) + 'Out': self.inputs['X'] + .min(axis=tuple(self.attrs['dim'])) + .astype(np.float16) } def init_dtype(self): @@ -142,7 +148,8 @@ class TestReduceMinOpWithOutDtype_fp16(TestMLUReduceMinOp): @skip_check_grad_ci( reason="reduce_min is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMinOpWithOutDtype_fp32(TestMLUReduceMinOp): """Remove Min with subgradient from gradient check to confirm the success of CI.""" @@ -154,12 +161,12 @@ class TestReduceMinOpWithOutDtype_fp32(TestMLUReduceMinOp): self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = { 'dim': [-2, -1], - 'out_dtype': int(core.VarDesc.VarType.FP32) + 'out_dtype': int(core.VarDesc.VarType.FP32), } self.outputs = { - 'Out': - self.inputs['X'].min(axis=tuple(self.attrs['dim'])).astype( - np.float32) + 'Out': self.inputs['X'] + .min(axis=tuple(self.attrs['dim'])) + .astype(np.float32) } def init_dtype(self): diff --git a/python/paddle/fluid/tests/unittests/mlu/test_reduce_prod_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_reduce_prod_op_mlu.py index 85179a72f0302d180fdc17e5ec53aac8c669ca19..fe690daabea33e03d72ac2066e0ecd2acc5a899f 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_reduce_prod_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_reduce_prod_op_mlu.py @@ -29,7 +29,6 @@ def raw_reduce_prod(x, dim=[0], keep_dim=False): class TestProdOp(OpTest): - def setUp(self): self.op_type = "reduce_prod" self.place = paddle.device.MLUPlace(0) @@ -47,7 +46,6 @@ class TestProdOp(OpTest): class TestProd6DOp(OpTest): - def setUp(self): self.op_type = "reduce_prod" self.place = paddle.device.MLUPlace(0) @@ -70,7 +68,6 @@ class TestProd6DOp(OpTest): class TestProd8DOp(OpTest): - def setUp(self): self.op_type = "reduce_prod" self.place = paddle.device.MLUPlace(0) @@ -78,8 +75,9 @@ class TestProd8DOp(OpTest): self.python_api = raw_reduce_prod self.init_data_type() self.inputs = { - 'X': np.random.random( - (2, 5, 3, 2, 2, 3, 4, 2)).astype(self.data_type) + 'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype( + self.data_type + ) } self.attrs = {'dim': [2, 3, 4]} self.outputs = { diff --git a/python/paddle/fluid/tests/unittests/mlu/test_reduce_sum_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_reduce_sum_op_mlu.py index 823a7398f07092f89255bebb0635f09b0572b95a..219b17a039e1583797ec606357ae082050ed944d 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_reduce_sum_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_reduce_sum_op_mlu.py @@ -24,7 +24,6 @@ paddle.enable_static() class TestMLUReduceSumOp(OpTest): - def setUp(self): self.init_op_type() self.initTestCase() @@ -32,16 +31,16 @@ class TestMLUReduceSumOp(OpTest): self.attrs = { 'dim': self.axis, 'keep_dim': self.keep_dim, - 'reduce_all': self.reduce_all + 'reduce_all': self.reduce_all, } self.inputs = {'X': np.random.random(self.shape).astype("float32")} if self.attrs['reduce_all']: self.outputs = {'Out': self.inputs['X'].sum()} else: self.outputs = { - 'Out': - self.inputs['X'].sum(axis=self.axis, - keepdims=self.attrs['keep_dim']) + 'Out': self.inputs['X'].sum( + axis=self.axis, keepdims=self.attrs['keep_dim'] + ) } def set_mlu(self): @@ -62,105 +61,92 @@ class TestMLUReduceSumOp(OpTest): def initTestCase(self): self.shape = (5, 6, 10) - self.axis = (0, ) + self.axis = (0,) class TestSumOp5D(TestMLUReduceSumOp): - def initTestCase(self): self.shape = (1, 2, 5, 6, 10) - self.axis = (0, ) + self.axis = (0,) class TestSumOp6D(TestMLUReduceSumOp): - def initTestCase(self): self.shape = (1, 1, 2, 5, 6, 10) - self.axis = (0, ) + self.axis = (0,) class TestSumOp8D(TestMLUReduceSumOp): - def initTestCase(self): self.shape = (1, 3, 1, 2, 1, 4, 3, 10) self.axis = (0, 3) class Test1DReduce(TestMLUReduceSumOp): - def initTestCase(self): self.shape = 120 - self.axis = (0, ) + self.axis = (0,) class Test2DReduce0(TestMLUReduceSumOp): - def initTestCase(self): self.shape = (20, 10) - self.axis = (0, ) + self.axis = (0,) class Test2DReduce1(TestMLUReduceSumOp): - def initTestCase(self): self.shape = (20, 10) - self.axis = (1, ) + self.axis = (1,) class Test3DReduce0(TestMLUReduceSumOp): - def initTestCase(self): self.shape = (5, 6, 7) - self.axis = (1, ) + self.axis = (1,) class Test3DReduce1(TestMLUReduceSumOp): - def initTestCase(self): self.shape = (5, 6, 7) - self.axis = (2, ) + self.axis = (2,) class Test3DReduce2(TestMLUReduceSumOp): - def initTestCase(self): self.shape = (5, 6, 7) - self.axis = (-2, ) + self.axis = (-2,) class Test3DReduce3(TestMLUReduceSumOp): - def initTestCase(self): self.shape = (5, 6, 7) self.axis = (1, 2) class TestKeepDimReduce(TestMLUReduceSumOp): - def initTestCase(self): self.shape = (5, 6, 10) - self.axis = (1, ) + self.axis = (1,) self.keep_dim = True class TestKeepDim8DReduce(TestMLUReduceSumOp): - def initTestCase(self): self.shape = (2, 5, 3, 2, 2, 3, 4, 2) self.axis = (3, 4, 5) self.keep_dim = True def test_check_grad(self): - self.check_grad_with_place(self.place, ['X'], - 'Out', - max_relative_error=0.03) + self.check_grad_with_place( + self.place, ['X'], 'Out', max_relative_error=0.03 + ) class TestReduceAll(TestMLUReduceSumOp): - def initTestCase(self): self.shape = (5, 6, 2, 10) - self.axis = (0, ) + self.axis = (0,) self.reduce_all = True diff --git a/python/paddle/fluid/tests/unittests/mlu/test_relu6_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_relu6_op_mlu.py index 45a1a825c435af89bc5fcf5412234a766de0ec7c..4e8ebacf0eb69271164e60fee884fdc2f4ea5414 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_relu6_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_relu6_op_mlu.py @@ -34,7 +34,6 @@ def ref_relu6(x, threshold=6.0): class TestRelu6(OpTest): - def setUp(self): self.set_mlu() self.op_type = "relu6" @@ -64,7 +63,6 @@ class TestRelu6(OpTest): class TestRelu6Float16(TestRelu6): - def set_mlu(self): self.__class__.use_mlu = True self.__class__.no_need_check_grad = True @@ -77,7 +75,6 @@ class TestRelu6Float16(TestRelu6): class TestReluNeg(TestRelu6): - def setUp(self): self.set_mlu() self.op_type = "relu6" @@ -104,7 +101,6 @@ class TestReluNeg(TestRelu6): class TestRelu6Net(unittest.TestCase): - def _test(self, run_mlu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -119,9 +115,9 @@ class TestRelu6Net(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=[32, 32], dtype='float32') b = paddle.static.data(name="b", shape=[32, 32], dtype='float32') - label = paddle.static.data(name="label", - shape=[32, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[32, 1], dtype='int64' + ) sum = paddle.add(a, b) z = paddle.nn.functional.relu6(sum) @@ -145,16 +141,17 @@ class TestRelu6Net(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "b": b_np, "label": label_np}, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res diff --git a/python/paddle/fluid/tests/unittests/mlu/test_relu_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_relu_op_mlu.py index a6c4303a2085aba31eeb32a4b0d37ebe6b9ca777..db62e592ac2dd7bb6a1014faab12a4e44af58aca 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_relu_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_relu_op_mlu.py @@ -26,7 +26,6 @@ SEED = 2021 class TestRelu(OpTest): - def setUp(self): self.set_mlu() self.op_type = "relu" @@ -52,7 +51,6 @@ class TestRelu(OpTest): class TestReluFp16(OpTest): - def setUp(self): self.set_mlu() self.op_type = "relu" @@ -79,7 +77,6 @@ class TestReluFp16(OpTest): class TestReluNeg(OpTest): - def setUp(self): self.set_mlu() self.op_type = "relu" @@ -105,7 +102,6 @@ class TestReluNeg(OpTest): class TestReluNet(unittest.TestCase): - def _test(self, run_mlu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -120,9 +116,9 @@ class TestReluNet(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=[32, 32], dtype='float32') b = paddle.static.data(name="b", shape=[32, 32], dtype='float32') - label = paddle.static.data(name="label", - shape=[32, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[32, 1], dtype='int64' + ) sum = paddle.add(a, b) z = paddle.nn.functional.relu(sum) @@ -146,16 +142,17 @@ class TestReluNet(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "b": b_np, "label": label_np}, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res diff --git a/python/paddle/fluid/tests/unittests/mlu/test_reshape2_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_reshape2_op_mlu.py index 3354acf6d0cbe92209cf893ff42bd917076f11f7..94090b9a1776dbec4b3356b9c4c60f2c9747cb4e 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_reshape2_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_reshape2_op_mlu.py @@ -26,7 +26,6 @@ SEED = 2022 class TestReshape2(OpTest): - def setUp(self): self.set_mlu() self.op_type = "reshape2" @@ -37,7 +36,7 @@ class TestReshape2(OpTest): self.attrs = {"shape": self.new_shape} self.outputs = { "Out": self.inputs["X"].reshape(self.infered_shape), - 'XShape': np.random.random(self.ori_shape).astype("float32") + 'XShape': np.random.random(self.ori_shape).astype("float32"), } def set_mlu(self): @@ -56,7 +55,6 @@ class TestReshape2(OpTest): class TestReshape2_case2(TestReshape2): - def init_data(self): self.ori_shape = (2, 100) self.new_shape = (-1, 10) @@ -64,7 +62,6 @@ class TestReshape2_case2(TestReshape2): class TestReshape2_case3(TestReshape2): - def init_data(self): self.ori_shape = (100, 5, 6) self.new_shape = (-1, 0, 3) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_rnn_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_rnn_op_mlu.py index e5f9d154bfd498aafaa9de7a01fa03cab7df7b41..22597342f7fb489285deeebddd0bfacb19f65768 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_rnn_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_rnn_op_mlu.py @@ -35,7 +35,6 @@ paddle.enable_static() class TestRNNOp(OpTest): - def get_weight_names(self): weight_names = [] for i in range(self.num_layers): @@ -64,41 +63,53 @@ class TestRNNOp(OpTest): self.direction_num = 2 if self.is_bidirec else 1 direction = "bidirectional" if self.is_bidirec else "forward" - input = np.random.uniform(low=-0.1, - high=0.1, - size=(self.seq_length, self.batch_size, - self.input_size)).astype(self.dtype) + input = np.random.uniform( + low=-0.1, + high=0.1, + size=(self.seq_length, self.batch_size, self.input_size), + ).astype(self.dtype) input[11][1:][:] = 0 input[10][2:][:] = 0 input[9][3:][:] = 0 input[8][4:][:] = 0 - rnn1 = LSTM(self.input_size, - self.hidden_size, - num_layers=self.num_layers, - time_major=True, - direction=direction, - dropout=self.dropout, - dtype=self.dtype) + rnn1 = LSTM( + self.input_size, + self.hidden_size, + num_layers=self.num_layers, + time_major=True, + direction=direction, + dropout=self.dropout, + dtype=self.dtype, + ) flat_w = get_params_for_net(rnn1) - output, (last_hidden, - last_cell) = rnn1(input, sequence_length=self.sequence_length) + output, (last_hidden, last_cell) = rnn1( + input, sequence_length=self.sequence_length + ) init_h = np.zeros( - (self.num_layers * self.direction_num, self.batch_size, - self.hidden_size)).astype(self.dtype) + ( + self.num_layers * self.direction_num, + self.batch_size, + self.hidden_size, + ) + ).astype(self.dtype) init_c = np.zeros( - (self.num_layers * self.direction_num, self.batch_size, - self.hidden_size)).astype(self.dtype) + ( + self.num_layers * self.direction_num, + self.batch_size, + self.hidden_size, + ) + ).astype(self.dtype) state_out = np.ndarray((300)).astype("uint8") self.inputs = { 'Input': input, 'WeightList': flat_w, 'PreState': [('init_h', init_h), ('init_c', init_c)], - 'SequenceLength': self.sequence_length + 'SequenceLength': self.sequence_length, } if self.sequence_length is None: self.inputs = { @@ -113,13 +124,13 @@ class TestRNNOp(OpTest): 'hidden_size': self.hidden_size, 'num_layers': self.num_layers, 'mode': self.mode, - 'is_test': self.is_test + 'is_test': self.is_test, } self.outputs = { 'Out': output, "State": [('last_hidden', last_hidden), ('last_cell', last_cell)], 'Reserve': np.ndarray((400)).astype("uint8"), - 'DropoutState': state_out + 'DropoutState': state_out, } def init_dtype(self): @@ -135,7 +146,8 @@ class TestRNNOp(OpTest): self.check_output_with_place( self.place, atol=1e-4, - no_check_set=['Reserve', 'DropoutState', 'State']) + no_check_set=['Reserve', 'DropoutState', 'State'], + ) def set_attrs(self): pass @@ -146,39 +158,38 @@ class TestRNNOp(OpTest): var_name_list = self.get_weight_names() grad_check_list = ['Input', 'init_h', 'init_c'] grad_check_list.extend(var_name_list) - self.check_grad_with_place(self.place, set(grad_check_list), - ['Out', 'last_hidden', 'last_cell']) + self.check_grad_with_place( + self.place, + set(grad_check_list), + ['Out', 'last_hidden', 'last_cell'], + ) class TestRNNOp1(TestRNNOp): - def set_attrs(self): self.sequence_length = None class TestRNNOp2(TestRNNOp): - def set_attrs(self): self.sequence_length = None self.is_bidirec = True class TestRNNOp3(TestRNNOp): - def set_attrs(self): self.is_test = True self.sequence_length = None class TestRNNOp4(TestRNNOp): - def set_attrs(self): self.is_test = True self.sequence_length = None self.is_bidirec = True -#TODO(chenxiao): cnnl doesn't support num_layers > 1 case +# TODO(chenxiao): cnnl doesn't support num_layers > 1 case # class TestRNNOp5(TestRNNOp): # def set_attrs(self): diff --git a/python/paddle/fluid/tests/unittests/mlu/test_roi_align_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_roi_align_op_mlu.py index 61ca7b38a80597e562b0dd6591c36e37c10f1cba..0f7b347315fc6ed4b739b79152e7ab6ea8b83a5d 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_roi_align_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_roi_align_op_mlu.py @@ -26,7 +26,6 @@ np.random.seed(1243) class TestROIAlignMLUOp(OpTest): - def set_data(self): self.init_test_case() self.make_rois() @@ -37,7 +36,7 @@ class TestROIAlignMLUOp(OpTest): self.inputs = { 'X': self.x, 'ROIs': self.rois[:, 1:5], - 'RoisNum': np.asarray(seq_len).astype('int32') + 'RoisNum': np.asarray(seq_len).astype('int32'), } # print("self.inputs: ",self.inputs) @@ -46,7 +45,7 @@ class TestROIAlignMLUOp(OpTest): 'pooled_height': self.pooled_height, 'pooled_width': self.pooled_width, 'sampling_ratio': self.sampling_ratio, - 'aligned': self.aligned + 'aligned': self.aligned, } self.outputs = {'Out': self.out_data} @@ -68,25 +67,45 @@ class TestROIAlignMLUOp(OpTest): self.x = np.random.random(self.x_dim).astype('float32') - def pre_calc(self, x_i, roi_xmin, roi_ymin, roi_bin_grid_h, roi_bin_grid_w, - bin_size_h, bin_size_w): + def pre_calc( + self, + x_i, + roi_xmin, + roi_ymin, + roi_bin_grid_h, + roi_bin_grid_w, + bin_size_h, + bin_size_w, + ): count = roi_bin_grid_h * roi_bin_grid_w bilinear_pos = np.zeros( [self.channels, self.pooled_height, self.pooled_width, count, 4], - np.float32) - bilinear_w = np.zeros([self.pooled_height, self.pooled_width, count, 4], - np.float32) + np.float32, + ) + bilinear_w = np.zeros( + [self.pooled_height, self.pooled_width, count, 4], np.float32 + ) for ph in range(self.pooled_width): for pw in range(self.pooled_height): c = 0 for iy in range(roi_bin_grid_h): - y = roi_ymin + ph * bin_size_h + (iy + 0.5) * \ - bin_size_h / roi_bin_grid_h + y = ( + roi_ymin + + ph * bin_size_h + + (iy + 0.5) * bin_size_h / roi_bin_grid_h + ) for ix in range(roi_bin_grid_w): - x = roi_xmin + pw * bin_size_w + (ix + 0.5) * \ - bin_size_w / roi_bin_grid_w - if y < -1.0 or y > self.height or \ - x < -1.0 or x > self.width: + x = ( + roi_xmin + + pw * bin_size_w + + (ix + 0.5) * bin_size_w / roi_bin_grid_w + ) + if ( + y < -1.0 + or y > self.height + or x < -1.0 + or x > self.width + ): continue if y <= 0: y = 0 @@ -107,14 +126,18 @@ class TestROIAlignMLUOp(OpTest): hy = 1 - ly hx = 1 - lx for ch in range(self.channels): - bilinear_pos[ch, ph, pw, c, 0] = x_i[ch, y_low, - x_low] - bilinear_pos[ch, ph, pw, c, 1] = x_i[ch, y_low, - x_high] - bilinear_pos[ch, ph, pw, c, 2] = x_i[ch, y_high, - x_low] - bilinear_pos[ch, ph, pw, c, 3] = x_i[ch, y_high, - x_high] + bilinear_pos[ch, ph, pw, c, 0] = x_i[ + ch, y_low, x_low + ] + bilinear_pos[ch, ph, pw, c, 1] = x_i[ + ch, y_low, x_high + ] + bilinear_pos[ch, ph, pw, c, 2] = x_i[ + ch, y_high, x_low + ] + bilinear_pos[ch, ph, pw, c, 3] = x_i[ + ch, y_high, x_high + ] bilinear_w[ph, pw, c, 0] = hy * hx bilinear_w[ph, pw, c, 1] = hy * lx bilinear_w[ph, pw, c, 2] = ly * hx @@ -124,10 +147,15 @@ class TestROIAlignMLUOp(OpTest): def calc_roi_align(self): self.out_data = np.zeros( - (self.rois_num, self.channels, self.pooled_height, - self.pooled_width)).astype('float32') - - offset = 0.5 if self.aligned else 0. + ( + self.rois_num, + self.channels, + self.pooled_height, + self.pooled_width, + ) + ).astype('float32') + + offset = 0.5 if self.aligned else 0.0 for i in range(self.rois_num): roi = self.rois[i] roi_batch_id = int(roi[0]) @@ -145,16 +173,27 @@ class TestROIAlignMLUOp(OpTest): bin_size_h = float(roi_height) / float(self.pooled_height) bin_size_w = float(roi_width) / float(self.pooled_width) - roi_bin_grid_h = self.sampling_ratio if self.sampling_ratio > 0 else \ - math.ceil(roi_height / self.pooled_height) - roi_bin_grid_w = self.sampling_ratio if self.sampling_ratio > 0 else \ - math.ceil(roi_width / self.pooled_width) + roi_bin_grid_h = ( + self.sampling_ratio + if self.sampling_ratio > 0 + else math.ceil(roi_height / self.pooled_height) + ) + roi_bin_grid_w = ( + self.sampling_ratio + if self.sampling_ratio > 0 + else math.ceil(roi_width / self.pooled_width) + ) count = max(int(roi_bin_grid_h * roi_bin_grid_w), 1) pre_size = count * self.pooled_width * self.pooled_height - bilinear_pos, bilinear_w = self.pre_calc(x_i, roi_xmin, roi_ymin, - int(roi_bin_grid_h), - int(roi_bin_grid_w), - bin_size_h, bin_size_w) + bilinear_pos, bilinear_w = self.pre_calc( + x_i, + roi_xmin, + roi_ymin, + int(roi_bin_grid_h), + int(roi_bin_grid_w), + bin_size_h, + bin_size_w, + ) for ch in range(self.channels): align_per_bin = (bilinear_pos[ch] * bilinear_w).sum(axis=-1) output_val = align_per_bin.mean(axis=-1) @@ -168,14 +207,18 @@ class TestROIAlignMLUOp(OpTest): # self.rois_lod[0].append(bno) self.rois_lod[0].append(1) x1 = np.random.randint( - 0, self.width // self.spatial_scale - self.pooled_width) + 0, self.width // self.spatial_scale - self.pooled_width + ) y1 = np.random.randint( - 0, self.height // self.spatial_scale - self.pooled_height) + 0, self.height // self.spatial_scale - self.pooled_height + ) - x2 = np.random.randint(x1 + self.pooled_width, - self.width // self.spatial_scale) - y2 = np.random.randint(y1 + self.pooled_height, - self.height // self.spatial_scale) + x2 = np.random.randint( + x1 + self.pooled_width, self.width // self.spatial_scale + ) + y2 = np.random.randint( + y1 + self.pooled_height, self.height // self.spatial_scale + ) roi = [bno, x1, y1, x2, y2] rois.append(roi) @@ -197,7 +240,6 @@ class TestROIAlignMLUOp(OpTest): class TestROIAlignOpWithMinusSample(TestROIAlignMLUOp): - def init_test_case(self): self.batch_size = 3 self.channels = 3 diff --git a/python/paddle/fluid/tests/unittests/mlu/test_scale_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_scale_op_mlu.py index 9b04042458d0ae882c00e360fbdd8c2eb5740f8f..c7858a482295d534c1ed472007cd76ce2a951645 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_scale_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_scale_op_mlu.py @@ -28,7 +28,6 @@ paddle.enable_static() class TestScaleOp(OpTest): - def setUp(self): self.op_type = "scale" self.place = paddle.device.MLUPlace(0) @@ -49,7 +48,6 @@ class TestScaleOp(OpTest): class TestScaleOpScaleVariable(OpTest): - def setUp(self): self.op_type = "scale" self.place = paddle.device.MLUPlace(0) @@ -59,7 +57,7 @@ class TestScaleOpScaleVariable(OpTest): self.scale = -2.3 self.inputs = { 'X': np.random.random((10, 10)).astype(self.dtype), - 'ScaleTensor': np.array([self.scale]).astype('float32') + 'ScaleTensor': np.array([self.scale]).astype('float32'), } self.attrs = {} self.outputs = {'Out': self.inputs['X'] * self.dtype(self.scale)} @@ -72,7 +70,6 @@ class TestScaleOpScaleVariable(OpTest): class TestScaleOpSelectedRows(unittest.TestCase): - def init_dtype_type(self): pass @@ -91,8 +88,9 @@ class TestScaleOpSelectedRows(unittest.TestCase): in_selected_rows = scope.var(in_name).get_selected_rows() in_selected_rows.set_height(in_height) in_selected_rows.set_rows(in_rows) - in_array = np.random.random( - (len(in_rows), in_row_numel)).astype(self.dtype) + in_array = np.random.random((len(in_rows), in_row_numel)).astype( + self.dtype + ) in_tensor = in_selected_rows.get_tensor() in_tensor.set(in_array, place) @@ -131,9 +129,7 @@ class TestScaleOpSelectedRows(unittest.TestCase): class TestScaleRaiseError(unittest.TestCase): - def test_errors(self): - def test_type(): fluid.layers.scale([10]) @@ -141,10 +137,10 @@ class TestScaleRaiseError(unittest.TestCase): # Add FP16 test -@unittest.skipIf(not core.is_compiled_with_mlu(), - "core is not compiled with MLU") +@unittest.skipIf( + not core.is_compiled_with_mlu(), "core is not compiled with MLU" +) class TestScaleFp16Op(TestScaleOp): - def init_dtype_type(self): self.dtype = np.float16 @@ -152,10 +148,10 @@ class TestScaleFp16Op(TestScaleOp): self.check_output_with_place(self.place, atol=0.002) -@unittest.skipIf(not core.is_compiled_with_mlu(), - "core is not compiled with MLU") +@unittest.skipIf( + not core.is_compiled_with_mlu(), "core is not compiled with MLU" +) class TestScaleFp16OpSelectedRows(TestScaleOpSelectedRows): - def init_dtype_type(self): self.dtype = np.float16 @@ -169,7 +165,6 @@ class TestScaleFp16OpSelectedRows(TestScaleOpSelectedRows): class TestScaleApiStatic(unittest.TestCase): - def _executed_api(self, x, scale=1.0, bias=0.0): return paddle.scale(x, scale, bias) @@ -187,13 +182,11 @@ class TestScaleApiStatic(unittest.TestCase): class TestScaleInplaceApiStatic(TestScaleApiStatic): - def _executed_api(self, x, scale=1.0, bias=0.0): return x.scale_(scale, bias) class TestScaleApiDygraph(unittest.TestCase): - def _executed_api(self, x, scale=1.0, bias=0.0): return paddle.scale(x, scale, bias) @@ -207,7 +200,6 @@ class TestScaleApiDygraph(unittest.TestCase): class TestScaleInplaceApiDygraph(TestScaleApiDygraph): - def _executed_api(self, x, scale=1.0, bias=0.0): return x.scale_(scale, bias) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_scatter_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_scatter_op_mlu.py index 76042074b0ea0749a5be54c737ad2baba17f4899..cb13f305caebaa7787161f2849d9f0b988278962 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_scatter_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_scatter_op_mlu.py @@ -28,7 +28,6 @@ paddle.enable_static() class TestScatterOp(OpTest): - def setUp(self): self.op_type = "scatter" self.place = paddle.device.MLUPlace(0) @@ -50,7 +49,6 @@ class TestScatterOp(OpTest): class TestScatterOp0(OpTest): - def setUp(self): self.op_type = "scatter" self.place = paddle.device.MLUPlace(0) @@ -73,7 +71,6 @@ class TestScatterOp0(OpTest): class TestScatterOp1(OpTest): - def setUp(self): self.op_type = "scatter" self.place = paddle.device.MLUPlace(0) @@ -99,7 +96,6 @@ class TestScatterOp1(OpTest): class TestScatterOp2(OpTest): - def setUp(self): self.op_type = "scatter" self.place = paddle.device.MLUPlace(0) @@ -121,7 +117,6 @@ class TestScatterOp2(OpTest): class TestScatterAPI(unittest.TestCase): - def setUp(self): self.places = [paddle.device.MLUPlace(0)] self.__class__.use_mlu = True @@ -139,19 +134,26 @@ class TestScatterAPI(unittest.TestCase): input_data = np.array([[1, 1], [2, 2], [3, 3]]).astype(np.float32) index_data = np.array([2, 1, 0, 1]).astype(np.int64) - updates_data = np.array([[1, 1], [2, 2], [3, 3], - [4, 4]]).astype(np.float32) + updates_data = np.array([[1, 1], [2, 2], [3, 3], [4, 4]]).astype( + np.float32 + ) exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={ - "input": input_data, - "index": index_data, - "updates": updates_data - }, - fetch_list=[result]) - self.assertEqual((fetches[0] == \ - np.array([[3., 3.],[6., 6.],[1., 1.]])).all(), True) + fetches = exe.run( + fluid.default_main_program(), + feed={ + "input": input_data, + "index": index_data, + "updates": updates_data, + }, + fetch_list=[result], + ) + self.assertEqual( + ( + fetches[0] == np.array([[3.0, 3.0], [6.0, 6.0], [1.0, 1.0]]) + ).all(), + True, + ) def test_static(self): for place in self.places: @@ -162,16 +164,22 @@ class TestScatterAPI(unittest.TestCase): with fluid.dygraph.guard(place): x_data = np.array([[1, 1], [2, 2], [3, 3]]).astype(np.float32) index_data = np.array([2, 1, 0, 1]).astype(np.int64) - updates_data = np.array([[1, 1], [2, 2], [3, 3], - [4, 4]]).astype(np.float32) + updates_data = np.array( + [[1, 1], [2, 2], [3, 3], [4, 4]] + ).astype(np.float32) x = fluid.dygraph.to_variable(x_data) index = fluid.dygraph.to_variable(index_data) updates = fluid.dygraph.to_variable(updates_data) output1 = self.scatter(x, index, updates, overwrite=False) - self.assertEqual((output1.numpy() == \ - np.array([[3., 3.],[6., 6.],[1., 1.]])).all(), True) + self.assertEqual( + ( + output1.numpy() + == np.array([[3.0, 3.0], [6.0, 6.0], [1.0, 1.0]]) + ).all(), + True, + ) def test_large_data(self): if os.name == "nt": @@ -183,27 +191,30 @@ class TestScatterAPI(unittest.TestCase): def test_dygraph(): with fluid.dygraph.guard(): - mlu_out = paddle.scatter(paddle.to_tensor(x), - paddle.to_tensor(index), - paddle.to_tensor(updates)) + mlu_out = paddle.scatter( + paddle.to_tensor(x), + paddle.to_tensor(index), + paddle.to_tensor(updates), + ) return mlu_out.numpy() @switch_to_static_graph def test_static_graph(): - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): x_t = paddle.static.data(name="x", dtype=x.dtype, shape=x.shape) - index_t = paddle.static.data(name="index", - dtype=index.dtype, - shape=index.shape) - updates_t = paddle.static.data(name="updates", - dtype=updates.dtype, - shape=updates.shape) + index_t = paddle.static.data( + name="index", dtype=index.dtype, shape=index.shape + ) + updates_t = paddle.static.data( + name="updates", dtype=updates.dtype, shape=updates.shape + ) out_t = paddle.scatter(x_t, index_t, updates_t) feed = { x_t.name: x, index_t.name: index, - updates_t.name: updates + updates_t.name: updates, } fetch = [out_t] @@ -215,7 +226,6 @@ class TestScatterAPI(unittest.TestCase): class TestScatterOpFp16(OpTest): - def setUp(self): self.op_type = "scatter" self.place = paddle.device.MLUPlace(0) @@ -238,7 +248,6 @@ class TestScatterOpFp16(OpTest): class TestScatterInplaceAPI(TestScatterAPI): - def executed_api(self): self.scatter = paddle.scatter_ diff --git a/python/paddle/fluid/tests/unittests/mlu/test_set_value_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_set_value_op_mlu.py index d621aab9507b14e5bafea6ca2f6a1e7aaacca0a1..d9db940b029437aaef2cad95ef0ddf50fbf7a181 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_set_value_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_set_value_op_mlu.py @@ -24,7 +24,6 @@ from paddle.fluid import core class TestSetValueBase(unittest.TestCase): - def set_mlu(self): self.__class__.use_mlu = True self.place = paddle.device.MLUPlace(0) @@ -55,7 +54,6 @@ class TestSetValueBase(unittest.TestCase): class TestSetValueApi(TestSetValueBase): - def _run_static(self): paddle.enable_static() with paddle.static.program_guard(self.program): @@ -71,15 +69,18 @@ class TestSetValueApi(TestSetValueBase): static_out = self._run_static() self._get_answer() - error_msg = "\nIn {} mode: \nExpected res = \n{}, \n\nbut received : \n{}" - self.assertTrue((self.data == static_out).all(), - msg=error_msg.format("static", self.data, static_out)) + error_msg = ( + "\nIn {} mode: \nExpected res = \n{}, \n\nbut received : \n{}" + ) + self.assertTrue( + (self.data == static_out).all(), + msg=error_msg.format("static", self.data, static_out), + ) # 1. Test different type of item: int, Python slice, Paddle Tensor # 1.1 item is int class TestSetValueItemInt(TestSetValueApi): - def _call_setitem(self, x): x[0] = self.value @@ -90,7 +91,6 @@ class TestSetValueItemInt(TestSetValueApi): # 1.2 item is slice # 1.2.1 step is 1 class TestSetValueItemSlice(TestSetValueApi): - def _call_setitem(self, x): x[0:2] = self.value @@ -99,7 +99,6 @@ class TestSetValueItemSlice(TestSetValueApi): class TestSetValueItemSlice2(TestSetValueApi): - def _call_setitem(self, x): x[0:-1] = self.value @@ -108,7 +107,6 @@ class TestSetValueItemSlice2(TestSetValueApi): class TestSetValueItemSlice3(TestSetValueApi): - def _call_setitem(self, x): x[0:-1, 0:2] = self.value @@ -117,7 +115,6 @@ class TestSetValueItemSlice3(TestSetValueApi): class TestSetValueItemSlice4(TestSetValueApi): - def _call_setitem(self, x): x[0:, 1:2, :] = self.value @@ -126,7 +123,6 @@ class TestSetValueItemSlice4(TestSetValueApi): class TestSetValueItemSlice5(TestSetValueApi): - def set_shape(self): self.shape = [100, 426, 640] @@ -137,8 +133,8 @@ class TestSetValueItemSlice5(TestSetValueApi): self.data[0:-1] = self.value -#TODO: Fix this after MLU support while_loop -#class TestSetValueItemSliceInWhile(TestSetValueApi): +# TODO: Fix this after MLU support while_loop +# class TestSetValueItemSliceInWhile(TestSetValueApi): # def _call_setitem(self, x): # def cond(i, x): # return i < 1 @@ -157,7 +153,6 @@ class TestSetValueItemSlice5(TestSetValueApi): # 1.2.2 step > 1 class TestSetValueItemSliceStep(TestSetValueApi): - def set_shape(self): self.shape = [5, 5, 5] @@ -169,7 +164,6 @@ class TestSetValueItemSliceStep(TestSetValueApi): class TestSetValueItemSliceStep2(TestSetValueApi): - def set_shape(self): self.shape = [7, 5, 5] @@ -181,7 +175,6 @@ class TestSetValueItemSliceStep2(TestSetValueApi): class TestSetValueItemSliceStep3(TestSetValueApi): - def _call_setitem(self, x): x[0:-1, 0:2, ::2] = self.value @@ -190,7 +183,6 @@ class TestSetValueItemSliceStep3(TestSetValueApi): class TestSetValueItemSliceStep4(TestSetValueApi): - def _call_setitem(self, x): x[0:, 1:2:2, :] = self.value @@ -200,7 +192,6 @@ class TestSetValueItemSliceStep4(TestSetValueApi): # 1.2.3 step < 0 class TestSetValueItemSliceNegetiveStep(TestSetValueApi): - def set_shape(self): self.shape = [5, 2] @@ -215,7 +206,6 @@ class TestSetValueItemSliceNegetiveStep(TestSetValueApi): class TestSetValueItemSliceNegetiveStep2(TestSetValueApi): - def set_shape(self): self.shape = [5] @@ -230,7 +220,6 @@ class TestSetValueItemSliceNegetiveStep2(TestSetValueApi): class TestSetValueItemSliceNegetiveStep3(TestSetValueApi): - def set_shape(self): self.shape = [3] @@ -245,7 +234,6 @@ class TestSetValueItemSliceNegetiveStep3(TestSetValueApi): class TestSetValueItemSliceNegetiveStep4(TestSetValueApi): - def set_shape(self): self.shape = [3, 4, 5] @@ -260,7 +248,6 @@ class TestSetValueItemSliceNegetiveStep4(TestSetValueApi): class TestSetValueItemEllipsis1(TestSetValueApi): - def _call_setitem(self, x): x[0:, ..., 1:] = self.value @@ -269,7 +256,6 @@ class TestSetValueItemEllipsis1(TestSetValueApi): class TestSetValueItemEllipsis2(TestSetValueApi): - def _call_setitem(self, x): x[0:, ...] = self.value @@ -278,7 +264,6 @@ class TestSetValueItemEllipsis2(TestSetValueApi): class TestSetValueItemEllipsis3(TestSetValueApi): - def _call_setitem(self, x): x[..., 1:] = self.value @@ -287,7 +272,6 @@ class TestSetValueItemEllipsis3(TestSetValueApi): class TestSetValueItemEllipsis4(TestSetValueApi): - def _call_setitem(self, x): x[...] = self.value @@ -297,7 +281,6 @@ class TestSetValueItemEllipsis4(TestSetValueApi): # 1.4 item is Paddle Tensor class TestSetValueItemTensor(TestSetValueApi): - def _call_setitem(self, x): zero = paddle.full([1], 0, dtype="int32") x[zero] = self.value @@ -307,7 +290,6 @@ class TestSetValueItemTensor(TestSetValueApi): class TestSetValueItemTensor2(TestSetValueApi): - def _call_setitem(self, x): zero = paddle.full([1], 0, dtype="int32") two = paddle.full([1], 2, dtype="int64") @@ -318,7 +300,6 @@ class TestSetValueItemTensor2(TestSetValueApi): class TestSetValueItemTensor3(TestSetValueApi): - def _call_setitem(self, x): zero = paddle.full([1], 0, dtype="int32") two = paddle.full([1], 2, dtype="int64") @@ -329,7 +310,6 @@ class TestSetValueItemTensor3(TestSetValueApi): class TestSetValueItemTensor4(TestSetValueApi): - def _call_setitem(self, x): zero = paddle.full([1], 0, dtype="int32") two = paddle.full([1], 2, dtype="int64") @@ -340,7 +320,6 @@ class TestSetValueItemTensor4(TestSetValueApi): class TestSetValueItemTensor5(TestSetValueApi): - def _call_setitem(self, x): zero = paddle.full([1], 0, dtype="int32") two = paddle.full([1], 2, dtype="int64") @@ -351,7 +330,6 @@ class TestSetValueItemTensor5(TestSetValueApi): class TestSetValueItemTensor6(TestSetValueApi): - def set_shape(self): self.shape = [3, 4, 5] @@ -366,7 +344,6 @@ class TestSetValueItemTensor6(TestSetValueApi): # 1.5 item is None class TestSetValueItemNone1(TestSetValueApi): - def _call_setitem(self, x): x[None] = self.value @@ -375,7 +352,6 @@ class TestSetValueItemNone1(TestSetValueApi): class TestSetValueItemNone2(TestSetValueApi): - def _call_setitem(self, x): x[0, None, 1] = self.value @@ -384,7 +360,6 @@ class TestSetValueItemNone2(TestSetValueApi): class TestSetValueItemNone3(TestSetValueApi): - def _call_setitem(self, x): x[:, None, None, 1] = self.value @@ -393,7 +368,6 @@ class TestSetValueItemNone3(TestSetValueApi): class TestSetValueItemNone4(TestSetValueApi): - def _call_setitem(self, x): x[0, 0, None, 1] = self.value @@ -402,7 +376,6 @@ class TestSetValueItemNone4(TestSetValueApi): class TestSetValueItemNone5(TestSetValueApi): - def _call_setitem(self, x): x[0, None, 0, None, 1] = self.value @@ -411,7 +384,6 @@ class TestSetValueItemNone5(TestSetValueApi): class TestSetValueItemNone6(TestSetValueApi): - def _call_setitem(self, x): x[None, 0, 0, None, 0] = self.value @@ -420,7 +392,6 @@ class TestSetValueItemNone6(TestSetValueApi): class TestSetValueItemNone7(TestSetValueApi): - def _call_setitem(self, x): x[:, None, 1] = np.zeros(self.shape)[:, None, 0] @@ -429,7 +400,6 @@ class TestSetValueItemNone7(TestSetValueApi): class TestSetValueItemNone8(TestSetValueApi): - def _call_setitem(self, x): x[:, 1, None] = np.zeros(self.shape)[:, 0, None] @@ -438,7 +408,6 @@ class TestSetValueItemNone8(TestSetValueApi): class TestSetValueItemNone9(TestSetValueApi): - def _call_setitem(self, x): x[None, :, 1, ..., None] = np.zeros(self.shape)[0, 0, :, None] @@ -448,7 +417,6 @@ class TestSetValueItemNone9(TestSetValueApi): # 1.5 item is list or Tensor of bol class TestSetValueItemBool1(TestSetValueApi): - def _call_setitem(self, x): x[[True, False]] = self.value @@ -457,7 +425,6 @@ class TestSetValueItemBool1(TestSetValueApi): class TestSetValueItemBool2(TestSetValueApi): - def _call_setitem(self, x): x[[False, False]] = self.value @@ -466,7 +433,6 @@ class TestSetValueItemBool2(TestSetValueApi): class TestSetValueItemBool3(TestSetValueApi): - def _call_setitem(self, x): x[[False, True]] = np.zeros(self.shape[2]) @@ -475,7 +441,6 @@ class TestSetValueItemBool3(TestSetValueApi): class TestSetValueItemBool4(TestSetValueApi): - def _call_setitem(self, x): idx = paddle.assign(np.array([False, True])) x[idx] = np.zeros(self.shape[2]) @@ -485,19 +450,19 @@ class TestSetValueItemBool4(TestSetValueApi): class TestSetValueItemBool5(TestSetValueApi): - def _call_setitem(self, x): idx = paddle.assign( - np.array([[False, True, False], [True, True, False]])) + np.array([[False, True, False], [True, True, False]]) + ) x[idx] = self.value def _get_answer(self): - self.data[np.array([[False, True, False], [True, True, - False]])] = self.value + self.data[ + np.array([[False, True, False], [True, True, False]]) + ] = self.value class TestSetValueItemBool6(TestSetValueApi): - def _call_setitem(self, x): x[0, ...] = 0 x[x > 0] = self.value @@ -508,9 +473,7 @@ class TestSetValueItemBool6(TestSetValueApi): def create_test_value_int32(parent): - class TestValueInt(parent): - def set_value(self): self.value = 7 @@ -531,9 +494,7 @@ create_test_value_int32(TestSetValueItemSlice5) def create_test_value_tensor_fp32(parent): - class TestValueInt(parent): - def set_dtype(self): self.dtype = "float32" @@ -559,7 +520,6 @@ create_test_value_tensor_fp32(TestSetValueItemSlice5) # 3. Test different shape of value class TestSetValueValueShape1(TestSetValueApi): - def set_value(self): self.value = np.array([3, 4, 5, 6]) # shape is (4,) @@ -571,7 +531,6 @@ class TestSetValueValueShape1(TestSetValueApi): class TestSetValueValueShape2(TestSetValueApi): - def set_value(self): self.value = np.array([[3, 4, 5, 6]]) # shape is (1,4) @@ -583,10 +542,10 @@ class TestSetValueValueShape2(TestSetValueApi): class TestSetValueValueShape3(TestSetValueApi): - def set_value(self): - self.value = np.array([[1, 1, 1, 1], [2, 2, 2, 2], - [3, 3, 3, 3]]) # shape is (3,4) + self.value = np.array( + [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]] + ) # shape is (3,4) def _call_setitem(self, x): x[0] = self.value @@ -596,11 +555,12 @@ class TestSetValueValueShape3(TestSetValueApi): class TestSetValueValueShape4(TestSetValueApi): - def set_value(self): - self.value = np.array([[1, 1, 1, 1], [2, 2, 2, 2], - [3, 3, 3, - 3]]).astype(self.dtype) # shape is (3,4) + self.value = np.array( + [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]] + ).astype( + self.dtype + ) # shape is (3,4) def _call_setitem(self, x): x[0] = paddle.assign(self.value) # x is Paddle.Tensor @@ -610,7 +570,6 @@ class TestSetValueValueShape4(TestSetValueApi): class TestSetValueValueShape5(TestSetValueApi): - def set_value(self): self.value = np.array([3, 3, 3]).astype(self.dtype) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_shape_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_shape_op_mlu.py index 7e743dc13c4f68b908a21592428ae9371d05c4f8..f18ba5532b47b2459e2b85818ef1ed6541b8e007 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_shape_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_shape_op_mlu.py @@ -19,6 +19,7 @@ import sys sys.path.append("..") from op_test import OpTest import paddle + # from paddle.fluid import core # from paddle.fluid.op import Operator @@ -27,7 +28,6 @@ SEED = 2022 class TestShape(OpTest): - def setUp(self): self.set_mlu() self.op_type = "shape" @@ -53,43 +53,36 @@ class TestShape(OpTest): class TestShape_fp16(TestShape): - def init_dtype(self): self.dtype = np.float16 class TestShape_double(TestShape): - def init_dtype(self): self.dtype = np.float64 class TestShape_int32(TestShape): - def init_dtype(self): self.dtype = np.int32 class TestShape_int64(TestShape): - def init_dtype(self): self.dtype = np.int64 class TestShape_int8(TestShape): - def init_dtype(self): self.dtype = np.int8 class TestShape_uint8(TestShape): - def init_dtype(self): self.dtype = np.uint8 class TestShape_bool(TestShape): - def init_dtype(self): self.dtype = bool diff --git a/python/paddle/fluid/tests/unittests/mlu/test_sigmoid_cross_entropy_with_logits_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_sigmoid_cross_entropy_with_logits_op_mlu.py index ead15510e19f04da2bfb9f6662527a458111231e..7e594cb3db5fe044b9f6f8c53883de7fe57ffb8d 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_sigmoid_cross_entropy_with_logits_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_sigmoid_cross_entropy_with_logits_op_mlu.py @@ -29,8 +29,7 @@ paddle.enable_static() class TestSigmoidCrossEntropyWithLogitsOp1(OpTest): - """Test sigmoid_cross_entropy_with_logit_op with binary label - """ + """Test sigmoid_cross_entropy_with_logit_op with binary label""" def setUp(self): self.op_type = "sigmoid_cross_entropy_with_logits" @@ -40,13 +39,14 @@ class TestSigmoidCrossEntropyWithLogitsOp1(OpTest): batch_size = 64 num_classes = 20 self.inputs = { - 'X': - logit( + 'X': logit( np.random.uniform(0, 1, (batch_size, num_classes)).astype( - self.dtype)), - 'Label': - np.random.randint(0, 2, - (batch_size, num_classes)).astype(self.dtype) + self.dtype + ) + ), + 'Label': np.random.randint(0, 2, (batch_size, num_classes)).astype( + self.dtype + ), } # Fw Pass is implemented as elementwise sigmoid followed by @@ -71,10 +71,10 @@ class TestSigmoidCrossEntropyWithLogitsOp1(OpTest): self.dtype = np.float32 -class TestSigmoidCrossEntropyWithLogitsOp3(TestSigmoidCrossEntropyWithLogitsOp1 - ): - """Test sigmoid_cross_entropy_with_logit_op with probabalistic label - """ +class TestSigmoidCrossEntropyWithLogitsOp3( + TestSigmoidCrossEntropyWithLogitsOp1 +): + """Test sigmoid_cross_entropy_with_logit_op with probabalistic label""" def setUp(self): self.op_type = "sigmoid_cross_entropy_with_logits" @@ -84,13 +84,14 @@ class TestSigmoidCrossEntropyWithLogitsOp3(TestSigmoidCrossEntropyWithLogitsOp1 batch_size = 64 num_classes = 20 self.inputs = { - 'X': - logit( + 'X': logit( np.random.uniform(0, 1, (batch_size, num_classes)).astype( - self.dtype)), - 'Label': - np.random.uniform(0, 1, - (batch_size, num_classes)).astype(self.dtype) + self.dtype + ) + ), + 'Label': np.random.uniform(0, 1, (batch_size, num_classes)).astype( + self.dtype + ), } # Fw Pass is implemented as elementwise sigmoid followed by @@ -102,10 +103,10 @@ class TestSigmoidCrossEntropyWithLogitsOp3(TestSigmoidCrossEntropyWithLogitsOp1 self.outputs = {'Out': -term1 - term2} -class TestSigmoidCrossEntropyWithLogitsOp5(TestSigmoidCrossEntropyWithLogitsOp1 - ): - """Test sigmoid_cross_entropy_with_logit_op with probabalistic label - """ +class TestSigmoidCrossEntropyWithLogitsOp5( + TestSigmoidCrossEntropyWithLogitsOp1 +): + """Test sigmoid_cross_entropy_with_logit_op with probabalistic label""" def setUp(self): self.op_type = "sigmoid_cross_entropy_with_logits" @@ -115,14 +116,14 @@ class TestSigmoidCrossEntropyWithLogitsOp5(TestSigmoidCrossEntropyWithLogitsOp1 batch_size = [10, 10] num_classes = 20 self.inputs = { - 'X': - logit( - np.random.uniform(0, 1, - tuple(batch_size + [num_classes])).astype( - self.dtype)), - 'Label': - np.random.uniform(0, 1, tuple(batch_size + [num_classes])).astype( - self.dtype) + 'X': logit( + np.random.uniform( + 0, 1, tuple(batch_size + [num_classes]) + ).astype(self.dtype) + ), + 'Label': np.random.uniform( + 0, 1, tuple(batch_size + [num_classes]) + ).astype(self.dtype), } # Fw Pass is implemented as elementwise sigmoid followed by @@ -134,10 +135,10 @@ class TestSigmoidCrossEntropyWithLogitsOp5(TestSigmoidCrossEntropyWithLogitsOp1 self.outputs = {'Out': -term1 - term2} -class TestSigmoidCrossEntropyWithLogitsOp6(TestSigmoidCrossEntropyWithLogitsOp1 - ): - """Test sigmoid_cross_entropy_with_logit_op with binary label - """ +class TestSigmoidCrossEntropyWithLogitsOp6( + TestSigmoidCrossEntropyWithLogitsOp1 +): + """Test sigmoid_cross_entropy_with_logit_op with binary label""" def setUp(self): self.op_type = "sigmoid_cross_entropy_with_logits" @@ -147,14 +148,14 @@ class TestSigmoidCrossEntropyWithLogitsOp6(TestSigmoidCrossEntropyWithLogitsOp1 batch_size = [10, 10] num_classes = 20 self.inputs = { - 'X': - logit( - np.random.uniform(0, 1, - tuple(batch_size + [num_classes])).astype( - self.dtype)), - 'Label': - np.random.randint(0, 2, tuple(batch_size + [num_classes])).astype( - self.dtype) + 'X': logit( + np.random.uniform( + 0, 1, tuple(batch_size + [num_classes]) + ).astype(self.dtype) + ), + 'Label': np.random.randint( + 0, 2, tuple(batch_size + [num_classes]) + ).astype(self.dtype), } # Fw Pass is implemented as elementwise sigmoid followed by diff --git a/python/paddle/fluid/tests/unittests/mlu/test_sigmoid_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_sigmoid_op_mlu.py index b45dba73c5d65a4e3d6440d4fcda6dfd6d31fa44..0d7e5b08ee148f43856771b2416b9df236ad5ad6 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_sigmoid_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_sigmoid_op_mlu.py @@ -24,7 +24,6 @@ SEED = 2021 class TestMLUSigmoid(OpTest): - def setUp(self): self.op_type = "sigmoid" self.set_mlu() @@ -41,9 +40,9 @@ class TestMLUSigmoid(OpTest): self.check_output_with_place(self.place) def test_check_grad(self): - self.check_grad_with_place(self.place, ['X'], - 'Out', - max_relative_error=0.01) + self.check_grad_with_place( + self.place, ['X'], 'Out', max_relative_error=0.01 + ) def set_mlu(self): self.__class__.use_mlu = True @@ -54,7 +53,6 @@ class TestMLUSigmoid(OpTest): class TestMLUSigmoidFp16(TestMLUSigmoid): - def test_check_output(self): self.check_output_with_place(self.place, atol=1e-3) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_size_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_size_op_mlu.py index a56ca8fe5c91f2d1c0533ad1db5ab24dabe29983..977359f8a9634df49007f74cd5f35d8b51b13fc3 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_size_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_size_op_mlu.py @@ -25,7 +25,6 @@ paddle.enable_static() class TestSizeOp(OpTest): - def setUp(self): self.op_type = "size" self.shape = [] @@ -42,31 +41,26 @@ class TestSizeOp(OpTest): class TestRank1Tensor(TestSizeOp): - def config(self): self.shape = [2] class TestRank2Tensor(TestSizeOp): - def config(self): self.shape = [2, 3] class TestRank3Tensor(TestSizeOp): - def config(self): self.shape = [2, 3, 100] class TestLargeTensor(TestSizeOp): - def config(self): self.shape = [2**10] class TestSizeAPI(unittest.TestCase): - def test_size_static(self): main_program = fluid.Program() startup_program = fluid.Program() @@ -80,17 +74,19 @@ class TestSizeAPI(unittest.TestCase): out_1 = paddle.fluid.layers.size(x_1) out_2 = paddle.fluid.layers.size(x_2) exe = paddle.static.Executor(place=paddle.MLUPlace(0)) - res_1, res_2 = exe.run(feed={ - "x_1": input_1, - "x_2": input_2, - }, - fetch_list=[out_1, out_2]) - assert (np.array_equal(res_1, - np.array([np.size(input_1) - ]).astype("int64"))) - assert (np.array_equal(res_2, - np.array([np.size(input_2) - ]).astype("int64"))) + res_1, res_2 = exe.run( + feed={ + "x_1": input_1, + "x_2": input_2, + }, + fetch_list=[out_1, out_2], + ) + assert np.array_equal( + res_1, np.array([np.size(input_1)]).astype("int64") + ) + assert np.array_equal( + res_2, np.array([np.size(input_2)]).astype("int64") + ) def test_size_imperative(self): paddle.disable_static(paddle.MLUPlace(0)) @@ -100,8 +96,8 @@ class TestSizeAPI(unittest.TestCase): x_2 = paddle.to_tensor(input_2) out_1 = paddle.fluid.layers.size(x_1) out_2 = paddle.fluid.layers.size(x_2) - assert (np.array_equal(out_1.numpy().item(0), np.size(input_1))) - assert (np.array_equal(out_2.numpy().item(0), np.size(input_2))) + assert np.array_equal(out_1.numpy().item(0), np.size(input_1)) + assert np.array_equal(out_2.numpy().item(0), np.size(input_2)) paddle.enable_static() def test_error(self): diff --git a/python/paddle/fluid/tests/unittests/mlu/test_slice_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_slice_op_mlu.py index a9f21a24e68d4d2649d6b3fc21c4db3dc046674c..bc23ea270b5f424933c5cdc766295f2aa3f0faa0 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_slice_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_slice_op_mlu.py @@ -29,7 +29,6 @@ paddle.enable_static() # Situation 1: starts(list, no tensor), ends(list, no tensor) # 1.1 without attr(decrease) class TestSliceOp(OpTest): - def setUp(self): self.op_type = "slice" self.set_mlu() @@ -40,7 +39,7 @@ class TestSliceOp(OpTest): 'axes': self.axes, 'starts': self.starts, 'ends': self.ends, - 'infer_flags': self.infer_flags + 'infer_flags': self.infer_flags, } def config(self): @@ -55,9 +54,9 @@ class TestSliceOp(OpTest): self.check_output_with_place(self.place) def test_check_grad_normal(self): - self.check_grad_with_place(self.place, ['Input'], - 'Out', - max_relative_error=0.006) + self.check_grad_with_place( + self.place, ['Input'], 'Out', max_relative_error=0.006 + ) def set_mlu(self): self.__class__.use_mlu = True @@ -65,7 +64,6 @@ class TestSliceOp(OpTest): class TestCase1(TestSliceOp): - def config(self): self.input = np.random.random([3, 4, 5, 6]).astype("float32") self.starts = [-3, 0, 2] @@ -76,7 +74,6 @@ class TestCase1(TestSliceOp): class TestCase2(TestSliceOp): - def config(self): self.input = np.random.random([3, 4, 5, 6]).astype("float32") self.starts = [-3, 0, 2] @@ -88,7 +85,6 @@ class TestCase2(TestSliceOp): # 1.2 with attr(decrease) class TestSliceOp_decs_dim(OpTest): - def setUp(self): self.op_type = "slice" self.set_mlu() @@ -116,9 +112,9 @@ class TestSliceOp_decs_dim(OpTest): self.check_output_with_place(self.place) def test_check_grad_normal(self): - self.check_grad_with_place(self.place, ['Input'], - 'Out', - max_relative_error=0.006) + self.check_grad_with_place( + self.place, ['Input'], 'Out', max_relative_error=0.006 + ) def set_mlu(self): self.__class__.use_mlu = True @@ -126,7 +122,6 @@ class TestSliceOp_decs_dim(OpTest): class TestSliceOp_decs_dim_2(TestSliceOp_decs_dim): - def config(self): self.input = np.random.random([3, 4, 5, 6]).astype("float32") self.starts = [1, 0, 2] @@ -138,7 +133,6 @@ class TestSliceOp_decs_dim_2(TestSliceOp_decs_dim): class TestSliceOp_decs_dim_3(TestSliceOp_decs_dim): - def config(self): self.input = np.random.random([3, 4, 5, 6]).astype("float32") self.starts = [-1, 0, 2] @@ -150,7 +144,6 @@ class TestSliceOp_decs_dim_3(TestSliceOp_decs_dim): class TestSliceOp_decs_dim_4(TestSliceOp_decs_dim): - def config(self): self.input = np.random.random([3, 4, 5, 7]).astype("float32") self.starts = [0, 1, 2, 3] @@ -162,7 +155,6 @@ class TestSliceOp_decs_dim_4(TestSliceOp_decs_dim): class TestSliceOp_decs_dim_5(TestSliceOp_decs_dim): - def config(self): self.input = np.random.random([3, 4, 5, 6]).astype("float32") self.starts = [-1] @@ -174,7 +166,6 @@ class TestSliceOp_decs_dim_5(TestSliceOp_decs_dim): class TestSliceOp_decs_dim_6(TestSliceOp_decs_dim): - def config(self): self.input = np.random.random([3, 4, 5, 6]).astype("float32") self.starts = [0, 1, 2, 3] @@ -188,7 +179,6 @@ class TestSliceOp_decs_dim_6(TestSliceOp_decs_dim): # Situation 2: starts(list, have tensor), ends(list, no tensor) # without attr(decrease) class TestSliceOp_starts_ListTensor(OpTest): - def setUp(self): self.op_type = "slice" self.set_mlu() @@ -196,8 +186,9 @@ class TestSliceOp_starts_ListTensor(OpTest): starts_tensor = [] for index, ele in enumerate(self.starts): - starts_tensor.append(("x" + str(index), np.ones( - (1)).astype('int64') * ele)) + starts_tensor.append( + ("x" + str(index), np.ones((1)).astype('int64') * ele) + ) self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor} self.outputs = {'Out': self.out} @@ -205,7 +196,7 @@ class TestSliceOp_starts_ListTensor(OpTest): 'axes': self.axes, 'starts': self.starts_infer, 'ends': self.ends, - 'infer_flags': self.infer_flags + 'infer_flags': self.infer_flags, } def config(self): @@ -222,9 +213,9 @@ class TestSliceOp_starts_ListTensor(OpTest): self.check_output_with_place(self.place) def test_check_grad_normal(self): - self.check_grad_with_place(self.place, ['Input'], - 'Out', - max_relative_error=0.006) + self.check_grad_with_place( + self.place, ['Input'], 'Out', max_relative_error=0.006 + ) def set_mlu(self): self.__class__.use_mlu = True @@ -234,7 +225,6 @@ class TestSliceOp_starts_ListTensor(OpTest): # Situation 2: starts(list, have tensor), ends(list, no tensor) # with attr(decrease) class TestSliceOp_decs_dim_starts_ListTensor(OpTest): - def setUp(self): self.op_type = "slice" self.set_mlu() @@ -242,8 +232,9 @@ class TestSliceOp_decs_dim_starts_ListTensor(OpTest): starts_tensor = [] for index, ele in enumerate(self.starts): - starts_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + starts_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor} @@ -271,9 +262,9 @@ class TestSliceOp_decs_dim_starts_ListTensor(OpTest): self.check_output_with_place(self.place) def test_check_grad_normal(self): - self.check_grad_with_place(self.place, ['Input'], - 'Out', - max_relative_error=0.006) + self.check_grad_with_place( + self.place, ['Input'], 'Out', max_relative_error=0.006 + ) def set_mlu(self): self.__class__.use_mlu = True @@ -281,8 +272,8 @@ class TestSliceOp_decs_dim_starts_ListTensor(OpTest): class TestSliceOp_decs_dim_5_starts_ListTensor( - TestSliceOp_decs_dim_starts_ListTensor): - + TestSliceOp_decs_dim_starts_ListTensor +): def config(self): self.input = np.random.random([3, 4, 5, 6]).astype("float32") self.starts = [-1] @@ -298,7 +289,6 @@ class TestSliceOp_decs_dim_5_starts_ListTensor( # Situation 3: starts(tensor), ends(list, no tensor) # with attr(decrease) class TestSliceOp_decs_dim_starts_OneTensor(OpTest): - def setUp(self): self.op_type = "slice" self.__class__.use_mlu = True @@ -306,7 +296,7 @@ class TestSliceOp_decs_dim_starts_OneTensor(OpTest): self.config() self.inputs = { 'Input': self.input, - "StartsTensor": np.array(self.starts, dtype="int32") + "StartsTensor": np.array(self.starts, dtype="int32"), } self.outputs = {'Out': self.out} self.attrs = { @@ -330,15 +320,14 @@ class TestSliceOp_decs_dim_starts_OneTensor(OpTest): self.check_output_with_place(self.place) def test_check_grad_normal(self): - self.check_grad_with_place(self.place, ['Input'], - 'Out', - max_relative_error=0.006) + self.check_grad_with_place( + self.place, ['Input'], 'Out', max_relative_error=0.006 + ) # Situation 4: starts(tensor), ends(tensor) # without attr(decrease) class TestSliceOp_starts_OneTensor_ends_OneTensor(OpTest): - def setUp(self): self.op_type = "slice" self.__class__.use_mlu = True @@ -348,14 +337,14 @@ class TestSliceOp_starts_OneTensor_ends_OneTensor(OpTest): self.inputs = { 'Input': self.input, "StartsTensor": np.array(self.starts, dtype="int64"), - "EndsTensor": np.array(self.ends, dtype="int32") + "EndsTensor": np.array(self.ends, dtype="int32"), } self.outputs = {'Out': self.out} self.attrs = { 'axes': self.axes, #'starts': self.starts, #'ends': self.ends_infer, - 'infer_flags': self.infer_flags + 'infer_flags': self.infer_flags, } def config(self): @@ -370,15 +359,14 @@ class TestSliceOp_starts_OneTensor_ends_OneTensor(OpTest): self.check_output_with_place(self.place) def test_check_grad_normal(self): - self.check_grad_with_place(self.place, ['Input'], - 'Out', - max_relative_error=0.006) + self.check_grad_with_place( + self.place, ['Input'], 'Out', max_relative_error=0.006 + ) # Situation 5: starts(tensor), ends(tensor) # with attr(decrease) class TestSliceOp_decs_dim_starts_and_ends_OneTensor(OpTest): - def setUp(self): self.op_type = "slice" self.__class__.use_mlu = True @@ -387,7 +375,7 @@ class TestSliceOp_decs_dim_starts_and_ends_OneTensor(OpTest): self.inputs = { 'Input': self.input, "StartsTensor": np.array(self.starts, dtype="int32"), - "EndsTensor": np.array(self.ends, dtype="int32") + "EndsTensor": np.array(self.ends, dtype="int32"), } self.outputs = {'Out': self.out} self.attrs = { @@ -411,15 +399,14 @@ class TestSliceOp_decs_dim_starts_and_ends_OneTensor(OpTest): self.check_output_with_place(self.place) def test_check_grad_normal(self): - self.check_grad_with_place(self.place, ['Input'], - 'Out', - max_relative_error=0.006) + self.check_grad_with_place( + self.place, ['Input'], 'Out', max_relative_error=0.006 + ) # Situation 6: starts(tensor), ends(list, have tensor) # without attr(decrease) class TestSliceOp_starts_OneTensor_ends_ListTensor(OpTest): - def setUp(self): self.op_type = "slice" self.__class__.use_mlu = True @@ -428,20 +415,21 @@ class TestSliceOp_starts_OneTensor_ends_ListTensor(OpTest): ends_tensor = [] for index, ele in enumerate(self.ends): - ends_tensor.append(("y" + str(index), np.ones( - (1)).astype('int32') * ele)) + ends_tensor.append( + ("y" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = { 'Input': self.input, "StartsTensor": np.array(self.starts, dtype="int32"), - 'EndsTensorList': ends_tensor + 'EndsTensorList': ends_tensor, } self.outputs = {'Out': self.out} self.attrs = { 'axes': self.axes, #'starts': self.starts, 'ends': self.ends_infer, - 'infer_flags': self.infer_flags + 'infer_flags': self.infer_flags, } def config(self): @@ -458,14 +446,13 @@ class TestSliceOp_starts_OneTensor_ends_ListTensor(OpTest): self.check_output_with_place(self.place) def test_check_grad_normal(self): - self.check_grad_with_place(self.place, ['Input'], - 'Out', - max_relative_error=0.006) + self.check_grad_with_place( + self.place, ['Input'], 'Out', max_relative_error=0.006 + ) # Test float16 class TestFP16(OpTest): - def setUp(self): self.op_type = "slice" self.__class__.use_mlu = True @@ -477,7 +464,7 @@ class TestFP16(OpTest): 'axes': self.axes, 'starts': self.starts, 'ends': self.ends, - 'infer_flags': self.infer_flags + 'infer_flags': self.infer_flags, } def config(self): @@ -493,13 +480,12 @@ class TestFP16(OpTest): self.check_output_with_place(self.place, atol=1e-5) def test_check_grad_normal(self): - self.check_grad_with_place(self.place, ['Input'], - 'Out', - max_relative_error=0.006) + self.check_grad_with_place( + self.place, ['Input'], 'Out', max_relative_error=0.006 + ) class TestFP16_2(OpTest): - def setUp(self): self.op_type = "slice" self.__class__.use_mlu = True @@ -511,7 +497,7 @@ class TestFP16_2(OpTest): 'axes': self.axes, 'starts': self.starts, 'ends': self.ends, - 'infer_flags': self.infer_flags + 'infer_flags': self.infer_flags, } def config(self): @@ -527,24 +513,28 @@ class TestFP16_2(OpTest): self.check_output_with_place(self.place, atol=1e-5) def test_check_grad_normal(self): - self.check_grad_with_place(self.place, ['Input'], - 'Out', - max_relative_error=0.006, - numeric_grad_delta=0.5) + self.check_grad_with_place( + self.place, + ['Input'], + 'Out', + max_relative_error=0.006, + numeric_grad_delta=0.5, + ) class TestSliceApiWithTensor(unittest.TestCase): - def test_starts_ends_is_tensor(self): with paddle.fluid.dygraph.guard(): a = paddle.rand(shape=[4, 5, 6], dtype='float32') axes = [0, 1, 2] starts = [-3, 0, 2] ends = [3, 2, 4] - a_1 = paddle.slice(a, - axes=axes, - starts=paddle.to_tensor(starts, dtype='int32'), - ends=paddle.to_tensor(ends, dtype='int32')) + a_1 = paddle.slice( + a, + axes=axes, + starts=paddle.to_tensor(starts, dtype='int32'), + ends=paddle.to_tensor(ends, dtype='int32'), + ) a_2 = paddle.slice(a, axes=axes, starts=starts, ends=ends) np.testing.assert_allclose(a_1.numpy(), a_2.numpy()) @@ -567,24 +557,22 @@ class TestSliceApiWithTensor(unittest.TestCase): class TestImperativeVarBaseGetItem(unittest.TestCase): - def test_getitem_with_long(self): with fluid.dygraph.guard(): data = np.random.random((2, 80, 16128)).astype('float32') var = fluid.dygraph.to_variable(data) - sliced = var[:, 10:, :var.shape[1]] # var.shape[1] is 80L here + sliced = var[:, 10:, : var.shape[1]] # var.shape[1] is 80L here self.assertEqual(sliced.shape, [2, 70, 80]) - sliced = var[:, var.shape[0]:, var.shape[0]:var.shape[1]] + sliced = var[:, var.shape[0] :, var.shape[0] : var.shape[1]] self.assertEqual(sliced.shape, [2, 78, 78]) def test_getitem_with_float(self): - def test_float_in_slice_item(): with fluid.dygraph.guard(): data = np.random.random((2, 80, 16128)).astype('float32') var = fluid.dygraph.to_variable(data) - sliced = var[:, 1.1:, :var.shape[1]] + sliced = var[:, 1.1:, : var.shape[1]] self.assertRaises(Exception, test_float_in_slice_item) @@ -598,7 +586,6 @@ class TestImperativeVarBaseGetItem(unittest.TestCase): class TestInferShape(unittest.TestCase): - def test_axis_less_than_zero(self): # Using paddle.disable_static will make other unittests fail. @@ -606,13 +593,18 @@ class TestInferShape(unittest.TestCase): x_arr = np.arange(0, 24, dtype=np.float32).reshape([2, 3, 4]) x = paddle.to_tensor(x_arr) - pp_slice = paddle.slice(x, [ - 100, - ], [0], [1]) + pp_slice = paddle.slice( + x, + [ + 100, + ], + [0], + [1], + ) np_slice = x_arr[:, :, 0:1] np.testing.assert_allclose(pp_slice, np_slice) - pp_slice = paddle.slice(x, (-100, ), [0], [1]) + pp_slice = paddle.slice(x, (-100,), [0], [1]) np_slice = x_arr[0:1] np.testing.assert_allclose(pp_slice, np_slice) @@ -620,9 +612,11 @@ class TestInferShape(unittest.TestCase): x = paddle.to_tensor(np.reshape(x_arr, (0, 0, 0))) starts = paddle.to_tensor( - np.reshape(np.array([], dtype=np.int32), (0, ))) + np.reshape(np.array([], dtype=np.int32), (0,)) + ) ends = paddle.to_tensor( - np.reshape(np.array([], dtype=np.int32), (0, ))) + np.reshape(np.array([], dtype=np.int32), (0,)) + ) with self.assertRaises(ValueError): paddle.slice(x, [-1000000], starts, ends) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_softmax_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_softmax_op_mlu.py index b9362c9a27d49c25a720d6734142d31f3866cd4a..8c1ebbe01e5cffd83edf7f5b1ffbbcbc92abba61 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_softmax_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_softmax_op_mlu.py @@ -32,7 +32,7 @@ def stable_softmax(x): """Compute the softmax of vector x in a numerically stable way.""" # clip to shiftx, otherwise, when calc loss with # log(exp(shiftx)), may get log(0)=INF - shiftx = (x - np.max(x)).clip(-64.) + shiftx = (x - np.max(x)).clip(-64.0) exps = np.exp(shiftx) return exps / np.sum(exps) @@ -47,7 +47,6 @@ def ref_softmax(x, axis=None, dtype=None): class TestSoftmaxOp(OpTest): - def get_x_shape(self): return [10, 10] @@ -79,19 +78,17 @@ class TestSoftmaxOp(OpTest): self.check_output_with_place(self.place) def test_check_grad(self): - self.check_grad_with_place(self.place, ["X"], - "Out", - max_relative_error=0.01) + self.check_grad_with_place( + self.place, ["X"], "Out", max_relative_error=0.01 + ) class TestSoftmaxOp2(TestSoftmaxOp): - def get_x_shape(self): return [2, 3, 4, 5] class TestSoftmaxOp3(TestSoftmaxOp): - def get_x_shape(self): return [2, 3, 4, 5] @@ -100,7 +97,6 @@ class TestSoftmaxOp3(TestSoftmaxOp): class TestSoftmaxOp4(TestSoftmaxOp): - def get_x_shape(self): return [2, 3, 4, 5] @@ -109,7 +105,6 @@ class TestSoftmaxOp4(TestSoftmaxOp): class TestSoftmaxOp5(TestSoftmaxOp): - def get_x_shape(self): return [2, 3, 4, 5] @@ -118,7 +113,6 @@ class TestSoftmaxOp5(TestSoftmaxOp): class TestSoftmaxOp6(TestSoftmaxOp): - def get_x_shape(self): return [2, 3, 4, 5] @@ -127,10 +121,9 @@ class TestSoftmaxOp6(TestSoftmaxOp): class TestSoftmaxAPI(unittest.TestCase): - def setUp(self): self.place = paddle.MLUPlace(0) - self.x_np = np.random.uniform(-1., 1., [2, 3, 4, 5]).astype('float32') + self.x_np = np.random.uniform(-1.0, 1.0, [2, 3, 4, 5]).astype('float32') self.out_ref = np.apply_along_axis(stable_softmax, -1, self.x_np) self.executed_api() @@ -180,19 +173,18 @@ class TestSoftmaxAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, self.softmax, 1) # The input dtype must be float16, float32 - x_int32 = paddle.fluid.data(name='x_int32', - shape=[2, 3], - dtype='int32') + x_int32 = paddle.fluid.data( + name='x_int32', shape=[2, 3], dtype='int32' + ) self.assertRaises(TypeError, self.softmax, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data(name='x_fp16', - shape=[2, 3], - dtype='float16') + x_fp16 = paddle.fluid.data( + name='x_fp16', shape=[2, 3], dtype='float16' + ) self.softmax(x_fp16) class TestSoftmaxInplaceAPI(TestSoftmaxAPI): - def executed_api(self): self.softmax = F.softmax_ diff --git a/python/paddle/fluid/tests/unittests/mlu/test_softmax_with_cross_entropy_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_softmax_with_cross_entropy_op_mlu.py index e7ae6dd511177347562e0cd2978edff23e414e2d..bf77d52532926c3c8e1aab4ae3b3f5516eee635e 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_softmax_with_cross_entropy_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_softmax_with_cross_entropy_op_mlu.py @@ -28,7 +28,6 @@ SEED = 2021 class TestSoftmaxWithCrossEntropyOp(OpTest): - def set_mlu(self): self.__class__.use_mlu = True @@ -51,8 +50,10 @@ class TestSoftmaxWithCrossEntropyOp(OpTest): self.initParams() logits = getattr( - self, "logits", - np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype)) + self, + "logits", + np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype), + ) softmax = np.apply_along_axis(stable_softmax, self.axis, logits) if self.soft_label: @@ -63,8 +64,9 @@ class TestSoftmaxWithCrossEntropyOp(OpTest): self.shape[self.axis] = 1 labels = np.random.randint(0, axis_dim, self.shape, dtype="int64") - loss = cross_entropy(softmax, labels, self.soft_label, self.axis, - self.ignore_index) + loss = cross_entropy( + softmax, labels, self.soft_label, self.axis, self.ignore_index + ) one_hot_label = np.eye(axis_dim)[labels.reshape(-1)] @@ -72,7 +74,7 @@ class TestSoftmaxWithCrossEntropyOp(OpTest): self.outputs = { "Backprop": (softmax - one_hot_label).astype(self.dtype), "Softmax": softmax.astype(self.dtype), - "Loss": loss.astype(self.dtype) + "Loss": loss.astype(self.dtype), } self.attrs = { "numeric_stable_mode": self.numeric_stable_mode, @@ -90,14 +92,16 @@ class TestSoftmaxWithCrossEntropyOp(OpTest): if self.dtype == np.float16: return # fp32 has low precision, cpu and mlu both need to relax the max_relative_error if using fp32 - self.check_grad_with_place(self.place, ['Logits'], - 'Loss', - numeric_grad_delta=0.001, - max_relative_error=0.5) + self.check_grad_with_place( + self.place, + ['Logits'], + 'Loss', + numeric_grad_delta=0.001, + max_relative_error=0.5, + ) class TestPowNet(unittest.TestCase): - def _test(self, run_mlu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -112,9 +116,9 @@ class TestPowNet(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=[32, 32], dtype='float32') b = paddle.static.data(name="b", shape=[32, 32], dtype='float32') - label = paddle.static.data(name="label", - shape=[32, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[32, 1], dtype='int64' + ) sum = paddle.add(a, b) z = paddle.pow(sum, 2.0) @@ -138,16 +142,17 @@ class TestPowNet(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "b": b_np, "label": label_np}, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res diff --git a/python/paddle/fluid/tests/unittests/mlu/test_spawn_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_spawn_mlu.py index e97f4deead5f139ba5df45c9ca9751a700d750fc..605c2693afc0e7a64c17bec5577d3c127454cb9f 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_spawn_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_spawn_mlu.py @@ -19,12 +19,15 @@ import paddle import paddle.nn as nn import paddle.optimizer as opt import paddle.distributed as dist -from paddle.distributed.spawn import _get_subprocess_env_list, _options_valid_check, _get_default_nprocs +from paddle.distributed.spawn import ( + _get_subprocess_env_list, + _options_valid_check, + _get_default_nprocs, +) from paddle.fluid import core class LinearNet(nn.Layer): - def __init__(self): super(LinearNet, self).__init__() self._linear1 = nn.Linear(10, 10) @@ -62,7 +65,6 @@ def train(print_result=False): class TestSpawn(unittest.TestCase): - def test_nprocs_greater_than_device_num_error(self): with self.assertRaises(RuntimeError): _get_subprocess_env_list(nprocs=100, options=dict()) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_split_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_split_op_mlu.py index 28585629dca3e6c52d8487f438e5eae5f505292c..072b985613e0818075fa1d51583939665b0434a4 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_split_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_split_op_mlu.py @@ -27,7 +27,6 @@ SEED = 2021 class TestCase1(OpTest): - def setUp(self): self.set_mlu() self.set_example() @@ -35,9 +34,9 @@ class TestCase1(OpTest): self.place = paddle.device.MLUPlace(0) ipt = self.x.astype(self.dtype) axis = self.axis if isinstance(self.axis, int) else int(self.axis[0]) - tmp_outs = np.split(ipt, - axis=axis, - indices_or_sections=self.num_or_sections) + tmp_outs = np.split( + ipt, axis=axis, indices_or_sections=self.num_or_sections + ) tmp_outs = [o.astype(self.dtype) for o in tmp_outs] self.outputs = {'Out': []} self.outs = [] @@ -64,7 +63,6 @@ class TestCase1(OpTest): class TestCase2(TestCase1): - def set_example(self): self.dtype = "float32" self.x = np.random.random((20, 4, 50)) @@ -73,7 +71,6 @@ class TestCase2(TestCase1): class TestCase4(TestCase1): - def set_example(self): self.dtype = "float16" self.x = np.random.random((4, 50, 20)) @@ -83,7 +80,6 @@ class TestCase4(TestCase1): # Test Sections class TestCase5(TestCase1): - def set_example(self): super().set_example() self.x = np.random.random((2, 10, 4)) @@ -96,7 +92,6 @@ class TestCase5(TestCase1): class API_TestSplit(unittest.TestCase): - def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): data = fluid.layers.data('data', shape=[-1, 10], dtype='float32') @@ -105,13 +100,12 @@ class API_TestSplit(unittest.TestCase): exe = fluid.Executor(place) input1 = np.random.random([1, 10]).astype('float32') r0, r1 = exe.run(feed={"data": input1}, fetch_list=[x0, x1]) - ex_x0, ex_x1 = np.split(input1, (3, ), axis=1) + ex_x0, ex_x1 = np.split(input1, (3,), axis=1) np.testing.assert_allclose(ex_x0, r0) np.testing.assert_allclose(ex_x1, r1) class API_TestSplit2(unittest.TestCase): - def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): data = fluid.layers.data('data', shape=[-1, 10], dtype='float32') @@ -126,7 +120,6 @@ class API_TestSplit2(unittest.TestCase): class API_TestDygraphSplit(unittest.TestCase): - def test_out1(self): with fluid.dygraph.guard(paddle.MLUPlace(0)): input_1 = np.random.random([4, 6, 6]).astype("int32") @@ -158,20 +151,18 @@ class API_TestDygraphSplit(unittest.TestCase): # attr(axis) is Tensor class TestSplitOp_AxisTensor(OpTest): - def setUp(self): self._set_op_type() self.dtype = self.get_dtype() self.init_data() self.inputs = { 'X': self.x, - 'AxisTensor': np.array([self.axis]).astype("int32") + 'AxisTensor': np.array([self.axis]).astype("int32"), } self.attrs = {'sections': self.sections, 'num': self.num} out = np.split(self.x, self.indices_or_sections, self.axis) - self.outputs = {'Out': [('out%d' % i, out[i]) \ - for i in range(len(out))]} + self.outputs = {'Out': [('out%d' % i, out[i]) for i in range(len(out))]} self.place = paddle.device.MLUPlace(0) self.__class__.use_mlu = True @@ -194,7 +185,6 @@ class TestSplitOp_AxisTensor(OpTest): class TestSplitOp_SectionsTensor(OpTest): - def setUp(self): self._set_op_type() self.dtype = self.get_dtype() @@ -203,20 +193,20 @@ class TestSplitOp_SectionsTensor(OpTest): sections_tensor = [] for index, ele in enumerate(self.sections): - sections_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + sections_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs['SectionsTensorList'] = sections_tensor self.attrs = { 'axis': self.axis, 'sections': self.sections_infer, - 'num': self.num + 'num': self.num, } out = np.split(self.x, self.indices_or_sections, self.axis) - self.outputs = {'Out': [('out%d' % i, out[i]) \ - for i in range(len(out))]} + self.outputs = {'Out': [('out%d' % i, out[i]) for i in range(len(out))]} self.place = paddle.device.MLUPlace(0) self.__class__.use_mlu = True diff --git a/python/paddle/fluid/tests/unittests/mlu/test_sqrt_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_sqrt_op_mlu.py index cc02f975abbead8858a1fea6ab2a8252bc85bce0..211c8e349eb99e014c4164b6496e5c95b160c03e 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_sqrt_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_sqrt_op_mlu.py @@ -29,7 +29,6 @@ np.random.seed(10) class TestSqrt(OpTest): - def setUp(self): self.op_type = "sqrt" self.dtype = 'float32' @@ -55,7 +54,6 @@ class TestSqrt(OpTest): class TestSqrtHalf(OpTest): - def setUp(self): self.op_type = "sqrt" self.dtype = 'float16' @@ -74,10 +72,9 @@ class TestSqrtHalf(OpTest): self.place = paddle.device.MLUPlace(0) def test_check_grad(self): - self.check_grad_with_place(self.place, ['X'], - 'Out', - check_eager=False, - max_relative_error=0.85) + self.check_grad_with_place( + self.place, ['X'], 'Out', check_eager=False, max_relative_error=0.85 + ) def test_check_output(self): self.check_output_with_place(self.place) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_squared_l2_norm_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_squared_l2_norm_op_mlu.py index a08b2f263af6796183462932335f726891d32701..27642b92c3d5d879b05d00e24f0094144ce86ec0 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_squared_l2_norm_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_squared_l2_norm_op_mlu.py @@ -26,8 +26,7 @@ paddle.enable_static() class TestL2LossOp(OpTest): - """Test squared_l2_norm - """ + """Test squared_l2_norm""" def setUp(self): self.place = paddle.device.MLUPlace(0) @@ -44,13 +43,12 @@ class TestL2LossOp(OpTest): self.check_output_with_place(self.place) def test_check_grad(self): - self.check_grad_with_place(self.place, ['X'], - 'Out', - max_relative_error=self.max_relative_error) + self.check_grad_with_place( + self.place, ['X'], 'Out', max_relative_error=self.max_relative_error + ) class TestL2LossDeterministic(unittest.TestCase): - def check_place(self, place): with paddle.fluid.dygraph.guard(place): x_np = np.random.rand(5, 11, 13).astype('float32') diff --git a/python/paddle/fluid/tests/unittests/mlu/test_squeeze2_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_squeeze2_op_mlu.py index bddd71ce2e161015234d00be13f0372083c07384..62bd3d86b459b2e40ff9cc52b63473da9bcf2eef 100755 --- a/python/paddle/fluid/tests/unittests/mlu/test_squeeze2_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_squeeze2_op_mlu.py @@ -25,7 +25,6 @@ paddle.enable_static() # Correct: General. class TestSqueezeOp(OpTest): - def setUp(self): self.init_test_case() self.set_mlu() @@ -34,7 +33,7 @@ class TestSqueezeOp(OpTest): self.init_attrs() self.outputs = { "Out": self.inputs["X"].reshape(self.new_shape), - "XShape": np.random.random(self.ori_shape).astype("float32") + "XShape": np.random.random(self.ori_shape).astype("float32"), } def set_mlu(self): @@ -58,7 +57,6 @@ class TestSqueezeOp(OpTest): # Correct: There is mins axis. class TestSqueezeOp1(TestSqueezeOp): - def init_test_case(self): self.ori_shape = (1, 20, 1, 5) self.axes = (0, -2) @@ -67,7 +65,6 @@ class TestSqueezeOp1(TestSqueezeOp): # Correct: No axes input. class TestSqueezeOp2(TestSqueezeOp): - def init_test_case(self): self.ori_shape = (1, 20, 1, 5) self.axes = () @@ -76,7 +73,6 @@ class TestSqueezeOp2(TestSqueezeOp): # Correct: Just part of axes be squeezed. class TestSqueezeOp3(TestSqueezeOp): - def init_test_case(self): self.ori_shape = (6, 1, 5, 1, 4, 1) self.axes = (1, -1) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_squeeze_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_squeeze_op_mlu.py index 24f02492c63cd69f877e550911cbfe1e8e091ae2..9df87453929de0b61a4fb33a96e025887b75baa0 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_squeeze_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_squeeze_op_mlu.py @@ -30,7 +30,6 @@ paddle.enable_static() # Correct: General. class TestSqueezeOp(OpTest): - def setUp(self): self.op_type = "squeeze" self.init_test_case() @@ -61,7 +60,6 @@ class TestSqueezeOp(OpTest): class TestSqueezeBF16Op(OpTest): - def setUp(self): self.op_type = "squeeze" self.dtype = np.uint16 @@ -89,7 +87,6 @@ class TestSqueezeBF16Op(OpTest): # Correct: There is mins axis. class TestSqueezeOp1(TestSqueezeOp): - def init_test_case(self): self.ori_shape = (1, 3, 1, 40) self.axes = (0, -2) @@ -98,7 +95,6 @@ class TestSqueezeOp1(TestSqueezeOp): # Correct: No axes input. class TestSqueezeOp2(TestSqueezeOp): - def init_test_case(self): self.ori_shape = (1, 20, 1, 5) self.axes = () @@ -107,7 +103,6 @@ class TestSqueezeOp2(TestSqueezeOp): # Correct: Just part of axes be squeezed. class TestSqueezeOp3(TestSqueezeOp): - def init_test_case(self): self.ori_shape = (6, 1, 5, 1, 4, 1) self.axes = (1, -1) @@ -116,7 +111,6 @@ class TestSqueezeOp3(TestSqueezeOp): # Correct: The demension of axis is not of size 1 remains unchanged. class TestSqueezeOp4(TestSqueezeOp): - def init_test_case(self): self.ori_shape = (6, 1, 5, 1, 4, 1) self.axes = (1, 2) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_stack_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_stack_op_mlu.py index 1d4550a11a5f33a3fbae8675143e023cb68c1247..eefe1d7d691fcdc2325637d38349a50dc58f6c08 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_stack_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_stack_op_mlu.py @@ -25,7 +25,6 @@ paddle.enable_static() class TestStackOpBase(OpTest): - def initDefaultParameters(self): self.num_inputs = 4 self.input_dim = (5, 6, 7) @@ -49,7 +48,8 @@ class TestStackOpBase(OpTest): self.x = [] for i in range(self.num_inputs): self.x.append( - np.random.random(size=self.input_dim).astype(self.dtype)) + np.random.random(size=self.input_dim).astype(self.dtype) + ) tmp = [] x_names = self.get_x_names() @@ -73,61 +73,51 @@ class TestStackOpBase(OpTest): class TestStackOp1(TestStackOpBase): - def initParameters(self): self.num_inputs = 16 class TestStackOp2(TestStackOpBase): - def initParameters(self): self.num_inputs = 20 class TestStackOp3(TestStackOpBase): - def initParameters(self): self.axis = -1 class TestStackOp4(TestStackOpBase): - def initParameters(self): self.axis = -4 class TestStackOp5(TestStackOpBase): - def initParameters(self): self.axis = 1 class TestStackOp6(TestStackOpBase): - def initParameters(self): self.axis = 3 class TestStackOpINT32(TestStackOpBase): - def init_dtype(self): self.dtype = np.int32 class TestStackOpINT64(TestStackOpBase): - def init_dtype(self): self.dtype = np.int64 class TestStackOpHalf(TestStackOpBase): - def init_dtype(self): self.dtype = np.float16 class API_test(unittest.TestCase): - def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): data1 = fluid.layers.data('data1', shape=[1, 2], dtype='float32') @@ -139,12 +129,10 @@ class API_test(unittest.TestCase): input1 = np.random.random([1, 2]).astype('float32') input2 = np.random.random([1, 2]).astype('float32') input3 = np.random.random([1, 2]).astype('float32') - result, = exe.run(feed={ - "data1": input1, - "data2": input2, - "data3": input3 - }, - fetch_list=[result_stack]) + (result,) = exe.run( + feed={"data1": input1, "data2": input2, "data3": input3}, + fetch_list=[result_stack], + ) expected_result = np.stack([input1, input2, input3], axis=0) np.testing.assert_allclose(expected_result, result) @@ -155,7 +143,6 @@ class API_test(unittest.TestCase): class API_DygraphTest(unittest.TestCase): - def test_out(self): data1 = np.array([[1.0, 2.0]]).astype("float32") data2 = np.array([[3.0, 4.0]]).astype("float32") diff --git a/python/paddle/fluid/tests/unittests/mlu/test_strided_slice_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_strided_slice_op_mlu.py index 21388f1131f72b5098ccc3052065897dd324ce4a..35069485054f58079924aee28321397dc17e8f51 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_strided_slice_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_strided_slice_op_mlu.py @@ -26,16 +26,15 @@ paddle.enable_static() class TestStrideSliceOp(OpTest): - def setUp(self): self.initTestCase() self.place = paddle.device.MLUPlace(0) self.__class__.use_mlu = True self.op_type = 'strided_slice' self.python_api = paddle.strided_slice - self.output = strided_slice_native_forward(self.input, self.axes, - self.starts, self.ends, - self.strides) + self.output = strided_slice_native_forward( + self.input, self.axes, self.starts, self.ends, self.strides + ) self.inputs = {'Input': self.input} self.outputs = {'Out': self.output} @@ -44,17 +43,16 @@ class TestStrideSliceOp(OpTest): 'starts': self.starts, 'ends': self.ends, 'strides': self.strides, - 'infer_flags': self.infer_flags + 'infer_flags': self.infer_flags, } def test_check_output(self): self.check_output_with_place(self.place, check_eager=False) def test_check_grad(self): - self.check_grad_with_place(self.place, - set(['Input']), - 'Out', - check_eager=False) + self.check_grad_with_place( + self.place, set(['Input']), 'Out', check_eager=False + ) def initTestCase(self): self.input = np.random.rand(100).astype(np.float32) @@ -66,7 +64,6 @@ class TestStrideSliceOp(OpTest): class TestStrideSliceOp1(TestStrideSliceOp): - def initTestCase(self): self.input = np.random.rand(100).astype(np.float32) self.axes = [0] @@ -77,7 +74,6 @@ class TestStrideSliceOp1(TestStrideSliceOp): class TestStrideSliceOp2(TestStrideSliceOp): - def initTestCase(self): self.input = np.random.rand(100).astype(np.float32) self.axes = [0] @@ -88,7 +84,6 @@ class TestStrideSliceOp2(TestStrideSliceOp): class TestStrideSliceOp3(TestStrideSliceOp): - def initTestCase(self): self.input = np.random.rand(100).astype(np.float32) self.axes = [0] @@ -99,7 +94,6 @@ class TestStrideSliceOp3(TestStrideSliceOp): class TestStrideSliceOp4(TestStrideSliceOp): - def initTestCase(self): self.input = np.random.rand(3, 4, 10).astype(np.float32) self.axes = [0, 1, 2] @@ -110,7 +104,6 @@ class TestStrideSliceOp4(TestStrideSliceOp): class TestStrideSliceOp5(TestStrideSliceOp): - def initTestCase(self): self.input = np.random.rand(5, 5, 5).astype(np.float32) self.axes = [0, 1, 2] @@ -121,7 +114,6 @@ class TestStrideSliceOp5(TestStrideSliceOp): class TestStrideSliceOp6(TestStrideSliceOp): - def initTestCase(self): self.input = np.random.rand(5, 5, 5).astype(np.float32) self.axes = [0, 1, 2] @@ -132,7 +124,6 @@ class TestStrideSliceOp6(TestStrideSliceOp): class TestStrideSliceOp7(TestStrideSliceOp): - def initTestCase(self): self.input = np.random.rand(5, 5, 5).astype(np.float32) self.axes = [0, 1, 2] @@ -143,7 +134,6 @@ class TestStrideSliceOp7(TestStrideSliceOp): class TestStrideSliceOp8(TestStrideSliceOp): - def initTestCase(self): self.input = np.random.rand(1, 100, 1).astype(np.float32) self.axes = [1] @@ -154,7 +144,6 @@ class TestStrideSliceOp8(TestStrideSliceOp): class TestStrideSliceOp9(TestStrideSliceOp): - def initTestCase(self): self.input = np.random.rand(1, 100, 1).astype(np.float32) self.axes = [1] @@ -165,7 +154,6 @@ class TestStrideSliceOp9(TestStrideSliceOp): class TestStrideSliceOp10(TestStrideSliceOp): - def initTestCase(self): self.input = np.random.rand(10, 10).astype(np.float32) self.axes = [0, 1] @@ -176,7 +164,6 @@ class TestStrideSliceOp10(TestStrideSliceOp): class TestStrideSliceOp11(TestStrideSliceOp): - def initTestCase(self): self.input = np.random.rand(3, 3, 3, 4).astype(np.float32) self.axes = [0, 1, 2, 3] @@ -187,7 +174,6 @@ class TestStrideSliceOp11(TestStrideSliceOp): class TestStrideSliceOp12(TestStrideSliceOp): - def initTestCase(self): self.input = np.random.rand(3, 3, 3, 4, 5).astype(np.float32) self.axes = [0, 1, 2, 3, 4] @@ -198,7 +184,6 @@ class TestStrideSliceOp12(TestStrideSliceOp): class TestStrideSliceOp13(TestStrideSliceOp): - def initTestCase(self): self.input = np.random.rand(3, 3, 3, 6, 7, 8).astype(np.float32) self.axes = [0, 1, 2, 3, 4, 5] @@ -209,7 +194,6 @@ class TestStrideSliceOp13(TestStrideSliceOp): class TestStrideSliceOp14(TestStrideSliceOp): - def initTestCase(self): self.input = np.random.rand(4, 4, 4, 4).astype(np.float32) self.axes = [1, 2, 3] @@ -220,13 +204,11 @@ class TestStrideSliceOp14(TestStrideSliceOp): class TestStrideSliceOpBool(TestStrideSliceOp): - def test_check_grad(self): pass class TestStrideSliceOpBool1D(TestStrideSliceOpBool): - def initTestCase(self): self.input = np.random.rand(100).astype("bool") self.axes = [0] @@ -237,7 +219,6 @@ class TestStrideSliceOpBool1D(TestStrideSliceOpBool): class TestStrideSliceOpBool2D(TestStrideSliceOpBool): - def initTestCase(self): self.input = np.random.rand(10, 10).astype("bool") self.axes = [0, 1] @@ -248,7 +229,6 @@ class TestStrideSliceOpBool2D(TestStrideSliceOpBool): class TestStrideSliceOpBool3D(TestStrideSliceOpBool): - def initTestCase(self): self.input = np.random.rand(3, 4, 10).astype("bool") self.axes = [0, 1, 2] @@ -259,7 +239,6 @@ class TestStrideSliceOpBool3D(TestStrideSliceOpBool): class TestStrideSliceOpBool4D(TestStrideSliceOpBool): - def initTestCase(self): self.input = np.random.rand(3, 3, 3, 4).astype("bool") self.axes = [0, 1, 2, 3] @@ -270,7 +249,6 @@ class TestStrideSliceOpBool4D(TestStrideSliceOpBool): class TestStrideSliceOpBool5D(TestStrideSliceOpBool): - def initTestCase(self): self.input = np.random.rand(3, 3, 3, 4, 5).astype("bool") self.axes = [0, 1, 2, 3, 4] @@ -281,7 +259,6 @@ class TestStrideSliceOpBool5D(TestStrideSliceOpBool): class TestStrideSliceOpBool6D(TestStrideSliceOpBool): - def initTestCase(self): self.input = np.random.rand(3, 3, 3, 6, 7, 8).astype("bool") self.axes = [0, 1, 2, 3, 4, 5] @@ -292,7 +269,6 @@ class TestStrideSliceOpBool6D(TestStrideSliceOpBool): class TestStridedSliceOp_starts_ListTensor(OpTest): - def setUp(self): self.op_type = "strided_slice" self.place = paddle.device.MLUPlace(0) @@ -301,8 +277,9 @@ class TestStridedSliceOp_starts_ListTensor(OpTest): starts_tensor = [] for index, ele in enumerate(self.starts): - starts_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + starts_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor} self.outputs = {'Out': self.output} @@ -311,7 +288,7 @@ class TestStridedSliceOp_starts_ListTensor(OpTest): 'starts': self.starts_infer, 'ends': self.ends, 'strides': self.strides, - 'infer_flags': self.infer_flags + 'infer_flags': self.infer_flags, } def config(self): @@ -321,9 +298,9 @@ class TestStridedSliceOp_starts_ListTensor(OpTest): self.axes = [0, 1, 2] self.strides = [1, 1, 1] self.infer_flags = [1, -1, 1] - self.output = strided_slice_native_forward(self.input, self.axes, - self.starts, self.ends, - self.strides) + self.output = strided_slice_native_forward( + self.input, self.axes, self.starts, self.ends, self.strides + ) self.starts_infer = [1, 10, 2] @@ -331,13 +308,12 @@ class TestStridedSliceOp_starts_ListTensor(OpTest): self.check_output_with_place(self.place) def test_check_grad_normal(self): - self.check_grad_with_place(self.place, ['Input'], - 'Out', - max_relative_error=0.006) + self.check_grad_with_place( + self.place, ['Input'], 'Out', max_relative_error=0.006 + ) class TestStridedSliceOp_ends_ListTensor(OpTest): - def setUp(self): self.op_type = "strided_slice" self.place = paddle.device.MLUPlace(0) @@ -346,8 +322,9 @@ class TestStridedSliceOp_ends_ListTensor(OpTest): ends_tensor = [] for index, ele in enumerate(self.ends): - ends_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + ends_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = {'Input': self.input, 'EndsTensorList': ends_tensor} self.outputs = {'Out': self.output} @@ -356,7 +333,7 @@ class TestStridedSliceOp_ends_ListTensor(OpTest): 'starts': self.starts, 'ends': self.ends_infer, 'strides': self.strides, - 'infer_flags': self.infer_flags + 'infer_flags': self.infer_flags, } def config(self): @@ -366,9 +343,9 @@ class TestStridedSliceOp_ends_ListTensor(OpTest): self.axes = [0, 1, 2] self.strides = [1, 1, 2] self.infer_flags = [1, -1, 1] - self.output = strided_slice_native_forward(self.input, self.axes, - self.starts, self.ends, - self.strides) + self.output = strided_slice_native_forward( + self.input, self.axes, self.starts, self.ends, self.strides + ) self.ends_infer = [3, 1, 4] @@ -376,13 +353,12 @@ class TestStridedSliceOp_ends_ListTensor(OpTest): self.check_output_with_place(self.place) def test_check_grad_normal(self): - self.check_grad_with_place(self.place, ['Input'], - 'Out', - max_relative_error=0.006) + self.check_grad_with_place( + self.place, ['Input'], 'Out', max_relative_error=0.006 + ) class TestStridedSliceOp_starts_Tensor(OpTest): - def setUp(self): self.op_type = "strided_slice" self.place = paddle.device.MLUPlace(0) @@ -390,7 +366,7 @@ class TestStridedSliceOp_starts_Tensor(OpTest): self.config() self.inputs = { 'Input': self.input, - "StartsTensor": np.array(self.starts, dtype="int32") + "StartsTensor": np.array(self.starts, dtype="int32"), } self.outputs = {'Out': self.output} self.attrs = { @@ -408,21 +384,20 @@ class TestStridedSliceOp_starts_Tensor(OpTest): self.axes = [0, 1, 2] self.strides = [1, 1, 1] self.infer_flags = [-1, -1, -1] - self.output = strided_slice_native_forward(self.input, self.axes, - self.starts, self.ends, - self.strides) + self.output = strided_slice_native_forward( + self.input, self.axes, self.starts, self.ends, self.strides + ) def test_check_output(self): self.check_output_with_place(self.place) def test_check_grad_normal(self): - self.check_grad_with_place(self.place, ['Input'], - 'Out', - max_relative_error=0.006) + self.check_grad_with_place( + self.place, ['Input'], 'Out', max_relative_error=0.006 + ) class TestStridedSliceOp_ends_Tensor(OpTest): - def setUp(self): self.op_type = "strided_slice" self.place = paddle.device.MLUPlace(0) @@ -430,7 +405,7 @@ class TestStridedSliceOp_ends_Tensor(OpTest): self.config() self.inputs = { 'Input': self.input, - "EndsTensor": np.array(self.ends, dtype="int32") + "EndsTensor": np.array(self.ends, dtype="int32"), } self.outputs = {'Out': self.output} self.attrs = { @@ -448,35 +423,35 @@ class TestStridedSliceOp_ends_Tensor(OpTest): self.axes = [0, 1, 2] self.strides = [1, 1, 1] self.infer_flags = [-1, -1, -1] - self.output = strided_slice_native_forward(self.input, self.axes, - self.starts, self.ends, - self.strides) + self.output = strided_slice_native_forward( + self.input, self.axes, self.starts, self.ends, self.strides + ) def test_check_output(self): self.check_output_with_place(self.place) def test_check_grad_normal(self): - self.check_grad_with_place(self.place, ['Input'], - 'Out', - max_relative_error=0.006) + self.check_grad_with_place( + self.place, ['Input'], 'Out', max_relative_error=0.006 + ) class TestStridedSliceOp_listTensor_Tensor(OpTest): - def setUp(self): self.config() self.place = paddle.device.MLUPlace(0) self.__class__.use_mlu = True ends_tensor = [] for index, ele in enumerate(self.ends): - ends_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + ends_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.op_type = "strided_slice" self.inputs = { 'Input': self.input, "StartsTensor": np.array(self.starts, dtype="int32"), - "EndsTensorList": ends_tensor + "EndsTensorList": ends_tensor, } self.outputs = {'Out': self.output} self.attrs = { @@ -494,21 +469,20 @@ class TestStridedSliceOp_listTensor_Tensor(OpTest): self.axes = [0, 1, 2] self.strides = [1, 1, 1] self.infer_flags = [-1, -1, -1] - self.output = strided_slice_native_forward(self.input, self.axes, - self.starts, self.ends, - self.strides) + self.output = strided_slice_native_forward( + self.input, self.axes, self.starts, self.ends, self.strides + ) def test_check_output(self): self.check_output_with_place(self.place) def test_check_grad_normal(self): - self.check_grad_with_place(self.place, ['Input'], - 'Out', - max_relative_error=0.006) + self.check_grad_with_place( + self.place, ['Input'], 'Out', max_relative_error=0.006 + ) class TestStridedSliceOp_strides_Tensor(OpTest): - def setUp(self): self.op_type = "strided_slice" self.place = paddle.device.MLUPlace(0) @@ -516,7 +490,7 @@ class TestStridedSliceOp_strides_Tensor(OpTest): self.config() self.inputs = { 'Input': self.input, - "StridesTensor": np.array(self.strides, dtype="int32") + "StridesTensor": np.array(self.strides, dtype="int32"), } self.outputs = {'Out': self.output} self.attrs = { @@ -534,63 +508,65 @@ class TestStridedSliceOp_strides_Tensor(OpTest): self.axes = [0, 1, 2] self.strides = [1, -1, 1] self.infer_flags = [-1, -1, -1] - self.output = strided_slice_native_forward(self.input, self.axes, - self.starts, self.ends, - self.strides) + self.output = strided_slice_native_forward( + self.input, self.axes, self.starts, self.ends, self.strides + ) def test_check_output(self): self.check_output_with_place(self.place) def test_check_grad_normal(self): - self.check_grad_with_place(self.place, ['Input'], - 'Out', - max_relative_error=0.006) + self.check_grad_with_place( + self.place, ['Input'], 'Out', max_relative_error=0.006 + ) # Test python API class TestStridedSliceAPI(unittest.TestCase): - def test_1(self): input = np.random.random([3, 4, 5, 6]).astype("float32") minus_1 = fluid.layers.fill_constant([1], "int32", -1) minus_3 = fluid.layers.fill_constant([1], "int32", -3) - starts = fluid.layers.data(name='starts', - shape=[3], - dtype='int32', - append_batch_size=False) - ends = fluid.layers.data(name='ends', - shape=[3], - dtype='int32', - append_batch_size=False) - strides = fluid.layers.data(name='strides', - shape=[3], - dtype='int32', - append_batch_size=False) - - x = fluid.layers.data(name="x", - shape=[3, 4, 5, 6], - append_batch_size=False, - dtype="float32") - out_1 = paddle.strided_slice(x, - axes=[0, 1, 2], - starts=[-3, 0, 2], - ends=[3, 100, -1], - strides=[1, 1, 1]) - out_2 = paddle.strided_slice(x, - axes=[0, 1, 3], - starts=[minus_3, 0, 2], - ends=[3, 100, -1], - strides=[1, 1, 1]) - out_3 = paddle.strided_slice(x, - axes=[0, 1, 3], - starts=[minus_3, 0, 2], - ends=[3, 100, minus_1], - strides=[1, 1, 1]) - out_4 = paddle.strided_slice(x, - axes=[0, 1, 2], - starts=starts, - ends=ends, - strides=strides) + starts = fluid.layers.data( + name='starts', shape=[3], dtype='int32', append_batch_size=False + ) + ends = fluid.layers.data( + name='ends', shape=[3], dtype='int32', append_batch_size=False + ) + strides = fluid.layers.data( + name='strides', shape=[3], dtype='int32', append_batch_size=False + ) + + x = fluid.layers.data( + name="x", + shape=[3, 4, 5, 6], + append_batch_size=False, + dtype="float32", + ) + out_1 = paddle.strided_slice( + x, + axes=[0, 1, 2], + starts=[-3, 0, 2], + ends=[3, 100, -1], + strides=[1, 1, 1], + ) + out_2 = paddle.strided_slice( + x, + axes=[0, 1, 3], + starts=[minus_3, 0, 2], + ends=[3, 100, -1], + strides=[1, 1, 1], + ) + out_3 = paddle.strided_slice( + x, + axes=[0, 1, 3], + starts=[minus_3, 0, 2], + ends=[3, 100, minus_1], + strides=[1, 1, 1], + ) + out_4 = paddle.strided_slice( + x, axes=[0, 1, 2], starts=starts, ends=ends, strides=strides + ) out_5 = x[-3:3, 0:100:2, -1:2:-1] out_6 = x[minus_3:3:1, 0:100:2, :, minus_1:2:minus_1] @@ -603,9 +579,10 @@ class TestStridedSliceAPI(unittest.TestCase): "x": input, 'starts': np.array([-3, 0, 2]).astype("int32"), 'ends': np.array([3, 2147483648, -1]).astype("int64"), - 'strides': np.array([1, 1, 1]).astype("int32") + 'strides': np.array([1, 1, 1]).astype("int32"), }, - fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7]) + fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7], + ) assert np.array_equal(res_1, input[-3:3, 0:100, 2:-1, :]) assert np.array_equal(res_2, input[-3:3, 0:100, :, 2:-1]) assert np.array_equal(res_3, input[-3:3, 0:100, :, 2:-1]) @@ -620,11 +597,9 @@ class TestStridedSliceAPI(unittest.TestCase): starts = [-3, 0, 2] ends = [3, 2, 4] strides_1 = [1, 1, 1] - sliced_1 = paddle.strided_slice(x, - axes=axes, - starts=starts, - ends=ends, - strides=strides_1) + sliced_1 = paddle.strided_slice( + x, axes=axes, starts=starts, ends=ends, strides=strides_1 + ) assert sliced_1.shape == (3, 2, 2, 2) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_sum_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_sum_op_mlu.py index ebf00a9db08f4e23d2c97b6d0818776bfbe9aef7..0dae84dc79b71aa2a49b175d342b9d4a6460385a 100755 --- a/python/paddle/fluid/tests/unittests/mlu/test_sum_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_sum_op_mlu.py @@ -27,7 +27,6 @@ SEED = 2021 class TestSum1(OpTest): - def setUp(self): self.set_mlu() self.init_dtype() @@ -54,7 +53,6 @@ class TestSum1(OpTest): class TestSum2(OpTest): - def setUp(self): self.set_mlu() self.init_dtype() @@ -72,8 +70,12 @@ class TestSum2(OpTest): # For example, the results of `x0+x1+x2+x3` is different from that of # `x3+x2+x1+x0` if the dtype is fp16. # Therefore, converting the input to fp32 for calculation. - y = (x0.astype(np.float32) + x1.astype(np.float32) + - x2.astype(np.float32) + x3.astype(np.float32)).astype(self.dtype) + y = ( + x0.astype(np.float32) + + x1.astype(np.float32) + + x2.astype(np.float32) + + x3.astype(np.float32) + ).astype(self.dtype) self.outputs = {'Out': y} self.attrs = {'use_mkldnn': False} @@ -89,7 +91,6 @@ class TestSum2(OpTest): class TestSum3(OpTest): - def setUp(self): self.set_mlu() self.init_dtype() diff --git a/python/paddle/fluid/tests/unittests/mlu/test_sync_batch_norm_base_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_sync_batch_norm_base_mlu.py index 0d73e5af5c5b83192a18c12b490699bce7e2341a..dd842800198cb42487ff7e943567c533466f23e0 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_sync_batch_norm_base_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_sync_batch_norm_base_mlu.py @@ -41,17 +41,19 @@ SEED = 10 class TestSyncBatchNormRunnerBase(object): - - def get_model(self, - main, - startup, - place, - layout, - seed, - sync_bn=False, - only_forward=False): + def get_model( + self, + main, + startup, + place, + layout, + seed, + sync_bn=False, + only_forward=False, + ): raise NotImplementedError( - "get model should be implemented by child class.") + "get model should be implemented by child class." + ) def wait_server_ready(self, endpoints): assert not isinstance(endpoints, str) @@ -60,13 +62,15 @@ class TestSyncBatchNormRunnerBase(object): not_ready_endpoints = [] for ep in endpoints: ip_port = ep.split(":") - with closing(socket.socket(socket.AF_INET, - socket.SOCK_STREAM)) as sock: + with closing( + socket.socket(socket.AF_INET, socket.SOCK_STREAM) + ) as sock: sock.settimeout(2) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) if hasattr(socket, 'SO_REUSEPORT'): - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, - 1) + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT, 1 + ) result = sock.connect_ex((ip_port[0], int(ip_port[1]))) if result != 0: @@ -74,39 +78,47 @@ class TestSyncBatchNormRunnerBase(object): not_ready_endpoints.append(ep) if not all_ok: sys.stderr.write("server not ready, wait 3 sec to retry...\n") - sys.stderr.write("not ready endpoints:" + - str(not_ready_endpoints) + "\n") + sys.stderr.write( + "not ready endpoints:" + str(not_ready_endpoints) + "\n" + ) sys.stderr.flush() time.sleep(3) else: break - def initCommunicator(self, program, rank, nranks, wait_port, - current_endpoint, endpoints): + def initCommunicator( + self, program, rank, nranks, wait_port, current_endpoint, endpoints + ): other_endpoints = endpoints[:] other_endpoints.remove(current_endpoint) if rank == 0 and wait_port: self.wait_server_ready(other_endpoints) block = program.global_block() - cncl_id_var = block.create_var(name=nameGen.generate('cncl_id'), - persistable=True, - type=core.VarDesc.VarType.RAW) - block.append_op(type='c_gen_cncl_id', - inputs={}, - outputs={'Out': cncl_id_var}, - attrs={ - 'rank': rank, - 'endpoint': current_endpoint, - 'other_endpoints': other_endpoints - }) - block.append_op(type='c_comm_init', - inputs={'X': cncl_id_var}, - outputs={}, - attrs={ - 'nranks': nranks, - 'rank': rank, - 'ring_id': self.global_ring_id - }) + cncl_id_var = block.create_var( + name=nameGen.generate('cncl_id'), + persistable=True, + type=core.VarDesc.VarType.RAW, + ) + block.append_op( + type='c_gen_cncl_id', + inputs={}, + outputs={'Out': cncl_id_var}, + attrs={ + 'rank': rank, + 'endpoint': current_endpoint, + 'other_endpoints': other_endpoints, + }, + ) + block.append_op( + type='c_comm_init', + inputs={'X': cncl_id_var}, + outputs={}, + attrs={ + 'nranks': nranks, + 'rank': rank, + 'ring_id': self.global_ring_id, + }, + ) def run_trainer(self, args): device_id = int(os.getenv("FLAGS_selected_mlus", "0")) @@ -139,24 +151,30 @@ class TestSyncBatchNormRunnerBase(object): sys.stdout.buffer.write( pickle.dumps( - 'training, inference, fp32, fp16, NCHW, NHWC all passed')) + 'training, inference, fp32, fp16, NCHW, NHWC all passed' + ) + ) def _compare(self, args, place, layout, only_forward): scope = core.Scope() np.random.seed(SEED) - data = np.random.random(size=self.dshape).astype(self.dtype) * 4. - 2 + data = np.random.random(size=self.dshape).astype(self.dtype) * 4.0 - 2 sys.stderr.write("data: " + str(data) + "\n") - data = create_or_get_tensor(scope, "input", - OpTest.np_dtype_to_fluid_dtype(data), place) + data = create_or_get_tensor( + scope, "input", OpTest.np_dtype_to_fluid_dtype(data), place + ) - bn_fetches = self._cal_single_card(args, data, place, layout, - only_forward) + bn_fetches = self._cal_single_card( + args, data, place, layout, only_forward + ) fetch_names, sync_bn_fetches = self._cal_multiple_cards( - args, data, place, layout, only_forward) + args, data, place, layout, only_forward + ) - sys.stderr.write("len(sync_bn_fetches): " + str(len(sync_bn_fetches)) + - "\n") + sys.stderr.write( + "len(sync_bn_fetches): " + str(len(sync_bn_fetches)) + "\n" + ) for i in range(0, len(sync_bn_fetches)): sys.stderr.write("i: " + str(i) + "\n") sys.stderr.write("fetch_names[i]): " + fetch_names[i] + "\n") @@ -164,13 +182,14 @@ class TestSyncBatchNormRunnerBase(object): bn_val = bn_fetches[i] sync_bn_val = sync_bn_fetches[i] if sync_bn_val.shape != bn_val.shape: - sync_bn_val = sync_bn_val[:bn_val.shape[0]] + sync_bn_val = sync_bn_val[: bn_val.shape[0]] # i = 0 if fetch_names[i] == 'reduce_sum_0.tmp_0': # sys.stderr.write("skip reduce_sum_0.tmp_0 (Out of reduce_sum op)" + "\n") - sys.stderr.write("reduce_sum_0.tmp_0 (Out of reduce_sum op)" + - "\n") + sys.stderr.write( + "reduce_sum_0.tmp_0 (Out of reduce_sum op)" + "\n" + ) sys.stderr.write("bn_val: " + str(bn_val) + "\n") sys.stderr.write("sync_bn_val: " + str(sync_bn_val) + "\n") @@ -198,7 +217,8 @@ class TestSyncBatchNormRunnerBase(object): if fetch_names[i] == 'batch_norm_0.tmp_2': # sys.stderr.write("skip batch_norm_0.tmp_2 (ReserveSpace of batch_norm)" + "\n") sys.stderr.write( - "batch_norm_0.tmp_2 (ReserveSpace of batch_norm)" + "\n") + "batch_norm_0.tmp_2 (ReserveSpace of batch_norm)" + "\n" + ) sys.stderr.write("bn_val: " + str(bn_val) + "\n") sys.stderr.write("sync_bn_val: " + str(sync_bn_val) + "\n") @@ -231,8 +251,9 @@ class TestSyncBatchNormRunnerBase(object): # i = 8 if fetch_names[i] == 'batch_norm_0.tmp_1': - sys.stderr.write("skip batch_norm_0.tmp_1 (SavedVariance)" + - "\n") + sys.stderr.write( + "skip batch_norm_0.tmp_1 (SavedVariance)" + "\n" + ) sys.stderr.write("bn_val: " + str(bn_val) + "\n") sys.stderr.write("sync_bn_val: " + str(sync_bn_val) + "\n") @@ -278,10 +299,16 @@ class TestSyncBatchNormRunnerBase(object): if fetch_names[i] == 'conv2d_0.tmp_0@GRAD': atol = 1e-2 - assert np.allclose( - bn_val, sync_bn_val, atol=atol), "Output (" + fetch_names[ - i] + ") has diff. \n" + "\nBN " + str( - bn_val) + "\n" + "Sync BN " + str(sync_bn_val) + assert np.allclose(bn_val, sync_bn_val, atol=atol), ( + "Output (" + + fetch_names[i] + + ") has diff. \n" + + "\nBN " + + str(bn_val) + + "\n" + + "Sync BN " + + str(sync_bn_val) + ) def _cal_single_card(self, args, data, place, layout, only_forward): # Single-MLU, N = 32 per MLU @@ -291,23 +318,31 @@ class TestSyncBatchNormRunnerBase(object): startup_prog.global_seed(SEED) paddle.seed(SEED) - outs = self.get_model(train_prog, startup_prog, place, layout, SEED, - False, only_forward) + outs = self.get_model( + train_prog, startup_prog, place, layout, SEED, False, only_forward + ) exe = fluid.Executor(place) exe.run(startup_prog) fetch_names = [v.name for v in outs] + [ - 'bn_moving_mean', 'bn_moving_variance', 'bn_scale', 'bn_bias' + 'bn_moving_mean', + 'bn_moving_variance', + 'bn_scale', + 'bn_bias', ] if not only_forward: others = [ - 'batch_norm_0.tmp_0', 'batch_norm_0.tmp_1', 'bn_scale@GRAD', - 'bn_bias@GRAD', 'batch_norm_0.tmp_3@GRAD', 'conv2d_0.tmp_0@GRAD' + 'batch_norm_0.tmp_0', + 'batch_norm_0.tmp_1', + 'bn_scale@GRAD', + 'bn_bias@GRAD', + 'batch_norm_0.tmp_3@GRAD', + 'conv2d_0.tmp_0@GRAD', ] fetch_names += others - bn_fetches = exe.run(program=train_prog, - feed={'input': data}, - fetch_list=fetch_names) + bn_fetches = exe.run( + program=train_prog, feed={'input': data}, fetch_list=fetch_names + ) return bn_fetches @@ -328,8 +363,9 @@ class TestSyncBatchNormRunnerBase(object): current_endpoint = args["currentendpoint"] nranks = 2 - self.initCommunicator(startup_prog, rank, nranks, True, - current_endpoint, endpoints) + self.initCommunicator( + startup_prog, rank, nranks, True, current_endpoint, endpoints + ) # sys.stderr.write("after init, startup_prog: " + # startup_prog.to_string(True) + "\n") train_prog.global_seed(SEED) @@ -339,8 +375,9 @@ class TestSyncBatchNormRunnerBase(object): paddle.seed(SEED) self.rank = rank - outs = self.get_model(train_prog, startup_prog, place, layout, SEED, - True, only_forward) + outs = self.get_model( + train_prog, startup_prog, place, layout, SEED, True, only_forward + ) # sys.stderr.write("after get_model, train_prog: " + # train_prog.to_string(True) + "\n") # sys.stderr.write("after get_model, startup_prog: " + @@ -363,17 +400,24 @@ class TestSyncBatchNormRunnerBase(object): exe = fluid.Executor(place) exe.run(startup_prog) fetch_names = [v.name for v in outs] + [ - 'bn_moving_mean', 'bn_moving_variance', 'bn_scale', 'bn_bias' + 'bn_moving_mean', + 'bn_moving_variance', + 'bn_scale', + 'bn_bias', ] if not only_forward: others = [ - 'batch_norm_0.tmp_0', 'batch_norm_0.tmp_1', 'bn_scale@GRAD', - 'bn_bias@GRAD', 'batch_norm_0.tmp_3@GRAD', 'conv2d_0.tmp_0@GRAD' + 'batch_norm_0.tmp_0', + 'batch_norm_0.tmp_1', + 'bn_scale@GRAD', + 'bn_bias@GRAD', + 'batch_norm_0.tmp_3@GRAD', + 'conv2d_0.tmp_0@GRAD', ] fetch_names += others - sync_bn_fetches = exe.run(program=train_prog, - feed={'input': data}, - fetch_list=fetch_names) + sync_bn_fetches = exe.run( + program=train_prog, feed={'input': data}, fetch_list=fetch_names + ) return fetch_names, sync_bn_fetches @@ -396,19 +440,20 @@ from contextlib import closing class TestDistBase(unittest.TestCase): - def setUp(self): self._port_set = set() self._trainers = 2 self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % ( - self._find_free_port(), self._find_free_port()) + self._find_free_port(), + self._find_free_port(), + ) self._python_interp = sys.executable def _find_free_port(self): - def __free_port(): - with closing(socket.socket(socket.AF_INET, - socket.SOCK_STREAM)) as s: + with closing( + socket.socket(socket.AF_INET, socket.SOCK_STREAM) + ) as s: s.bind(('', 0)) return s.getsockname()[1] @@ -437,7 +482,7 @@ class TestDistBase(unittest.TestCase): "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints, "PADDLE_CURRENT_ENDPOINT": w1_ep, } - #update environment + # update environment env0.update(envs) env1.update(envs) @@ -448,15 +493,19 @@ class TestDistBase(unittest.TestCase): tr1_pipe = open("/tmp/tr1_err_%d.log" % os.getpid(), "w") print("tr0_cmd: {}, env: {}\n".format(tr0_cmd, env0)) print("tr1_cmd: {}, env: {}\n".format(tr1_cmd, env1)) - tr0_proc = subprocess.Popen(tr0_cmd.strip().split(), - stdout=subprocess.PIPE, - stderr=tr0_pipe, - env=env0) - - tr1_proc = subprocess.Popen(tr0_cmd.strip().split(), - stdout=subprocess.PIPE, - stderr=tr1_pipe, - env=env1) + tr0_proc = subprocess.Popen( + tr0_cmd.strip().split(), + stdout=subprocess.PIPE, + stderr=tr0_pipe, + env=env0, + ) + + tr1_proc = subprocess.Popen( + tr0_cmd.strip().split(), + stdout=subprocess.PIPE, + stderr=tr1_pipe, + env=env1, + ) tr0_out, tr0_err = tr0_proc.communicate() tr1_out, tr1_err = tr1_proc.communicate() @@ -470,14 +519,16 @@ class TestDistBase(unittest.TestCase): sys.stderr.write('trainer 0 stderr file: %s\n' % f.read()) with open("/tmp/tr1_err_%d.log" % os.getpid(), "r") as f: sys.stderr.write('trainer 1 stderr file: %s\n' % f.read()) - return pickle.loads(tr0_out), pickle.loads( - tr1_out), tr0_proc.pid, tr1_proc.pid - - def check_with_place(self, - model_file, - col_type, - check_error_log=False, - need_envs={}): + return ( + pickle.loads(tr0_out), + pickle.loads(tr1_out), + tr0_proc.pid, + tr1_proc.pid, + ) + + def check_with_place( + self, model_file, col_type, check_error_log=False, need_envs={} + ): required_envs = { "FLAGS_fraction_of_gpu_memory_to_use": "0.15", "FLAGS_eager_delete_tensor_gb": "0.0", @@ -488,7 +539,7 @@ class TestDistBase(unittest.TestCase): "FLAGS_call_stack_level": "2", "GLOG_v": "3", "PADDLE_WITH_GLOO": '0', - "BACKEND": "cncl" + "BACKEND": "cncl", } required_envs.update(need_envs) if check_error_log: @@ -496,8 +547,11 @@ class TestDistBase(unittest.TestCase): required_envs["GLOG_logtostderr"] = "1" required_envs["GLOO_LOG_LEVEL"] = "TRACE" tr0_out, tr1_out, pid0, pid1 = self._run_cluster( - model_file, required_envs) + model_file, required_envs + ) self.assertEqual( - tr0_out, 'training, inference, fp32, fp16, NCHW, NHWC all passed') + tr0_out, 'training, inference, fp32, fp16, NCHW, NHWC all passed' + ) self.assertEqual( - tr1_out, 'training, inference, fp32, fp16, NCHW, NHWC all passed') + tr1_out, 'training, inference, fp32, fp16, NCHW, NHWC all passed' + ) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_sync_batch_norm_op_mlu_baseline.py b/python/paddle/fluid/tests/unittests/mlu/test_sync_batch_norm_op_mlu_baseline.py index 44dc8cf2359840a6e8b61b6d7a322862c01714e1..597c033ba78fb60babcbda2040686730532460cd 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_sync_batch_norm_op_mlu_baseline.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_sync_batch_norm_op_mlu_baseline.py @@ -28,14 +28,13 @@ paddle.enable_static() class TestSyncBatchNormOp(TestDistBase): - def _setup_config(self): pass def test_identity(self, col_type="identity"): - self.check_with_place("sync_batch_norm_op_mlu.py", - col_type, - check_error_log=True) + self.check_with_place( + "sync_batch_norm_op_mlu.py", col_type, check_error_log=True + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/mlu/test_sync_batch_norm_op_mlu_extra.py b/python/paddle/fluid/tests/unittests/mlu/test_sync_batch_norm_op_mlu_extra.py index 6488f757c50f8cd4f9c4b67ef7788e1c5eb0bf8c..fd19512528b6b8cb79f7b912bd35eb6b60df946a 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_sync_batch_norm_op_mlu_extra.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_sync_batch_norm_op_mlu_extra.py @@ -34,15 +34,15 @@ paddle.enable_static() class TestDygraphSyncBatchNormAPIError(unittest.TestCase): - def test_errors(self): if not core.is_compiled_with_mlu(): return with program_guard(Program(), Program()): my_sync_batch_norm = paddle.nn.SyncBatchNorm(10) - x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.MLUPlace(0)) + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.MLUPlace(0) + ) self.assertRaises(TypeError, my_sync_batch_norm, x1) # the input dtype of SyncBatchNorm must be float16 or float32 @@ -51,36 +51,39 @@ class TestDygraphSyncBatchNormAPIError(unittest.TestCase): class TestConvertSyncBatchNorm(unittest.TestCase): - def test_convert(self): if not core.is_compiled_with_mlu(): return with program_guard(Program(), Program()): - compare_model = paddle.nn.Sequential(paddle.nn.Conv2D(3, 5, 3), - paddle.nn.BatchNorm2D(5), - paddle.nn.BatchNorm2D(5)) + compare_model = paddle.nn.Sequential( + paddle.nn.Conv2D(3, 5, 3), + paddle.nn.BatchNorm2D(5), + paddle.nn.BatchNorm2D(5), + ) model = paddle.nn.Sequential( - paddle.nn.Conv2D(3, 5, 3), paddle.nn.BatchNorm2D(5), + paddle.nn.Conv2D(3, 5, 3), + paddle.nn.BatchNorm2D(5), paddle.nn.BatchNorm2D( 5, weight_attr=fluid.ParamAttr(name='bn.scale'), - bias_attr=fluid.ParamAttr(name='bn.bias'))) + bias_attr=fluid.ParamAttr(name='bn.bias'), + ), + ) model = paddle.nn.SyncBatchNorm.convert_sync_batchnorm(model) for idx, sublayer in enumerate(compare_model.sublayers()): if isinstance(sublayer, paddle.nn.BatchNorm2D): self.assertEqual( - isinstance(model[idx], paddle.nn.SyncBatchNorm), True) + isinstance(model[idx], paddle.nn.SyncBatchNorm), True + ) class TestConvertSyncBatchNormCast1(unittest.TestCase): - def test_convert(self): if not core.is_compiled_with_mlu(): return class Net(nn.Layer): - def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2D(3, 5, 3) @@ -105,7 +108,6 @@ class TestConvertSyncBatchNormCast1(unittest.TestCase): class TestConvertSyncBatchNormCase2(unittest.TestCase): - def test_convert(self): if not core.is_compiled_with_mlu(): return @@ -113,16 +115,19 @@ class TestConvertSyncBatchNormCase2(unittest.TestCase): with fluid.dygraph.guard(fluid.MLUPlace(0)): class SyBNNet(paddle.nn.Layer): - def __init__(self, in_ch=3, out_ch=3, dirate=1): super(SyBNNet, self).__init__() self.bn_s1 = paddle.nn.SyncBatchNorm.convert_sync_batchnorm( paddle.nn.BatchNorm3D( out_ch, weight_attr=paddle.ParamAttr( - regularizer=paddle.regularizer.L2Decay(0.)))) + regularizer=paddle.regularizer.L2Decay(0.0) + ), + ) + ) self.bn_s2 = paddle.nn.SyncBatchNorm.convert_sync_batchnorm( - paddle.nn.BatchNorm3D(out_ch, data_format='NDHWC')) + paddle.nn.BatchNorm3D(out_ch, data_format='NDHWC') + ) def forward(self, x): x = self.bn_s1(x) @@ -130,15 +135,17 @@ class TestConvertSyncBatchNormCase2(unittest.TestCase): return out class BNNet(paddle.nn.Layer): - def __init__(self, in_ch=3, out_ch=3, dirate=1): super(BNNet, self).__init__() self.bn_s1 = paddle.nn.BatchNorm3D( out_ch, weight_attr=paddle.ParamAttr( - regularizer=paddle.regularizer.L2Decay(0.))) + regularizer=paddle.regularizer.L2Decay(0.0) + ), + ) self.bn_s2 = paddle.nn.SyncBatchNorm.convert_sync_batchnorm( - paddle.nn.BatchNorm3D(out_ch, data_format='NDHWC')) + paddle.nn.BatchNorm3D(out_ch, data_format='NDHWC') + ) def forward(self, x): x = self.bn_s1(x) @@ -156,12 +163,16 @@ class TestConvertSyncBatchNormCase2(unittest.TestCase): bn_out.numpy(), sybn_out.numpy(), rtol=1e-05, - err_msg='Output has diff. \n' + '\nBN ' + - str(bn_out.numpy()) + '\n' + 'Sync BN ' + str(sybn_out.numpy())) + err_msg='Output has diff. \n' + + '\nBN ' + + str(bn_out.numpy()) + + '\n' + + 'Sync BN ' + + str(sybn_out.numpy()), + ) class TestDygraphSyncBatchNormDataFormatError(unittest.TestCase): - def test_errors(self): if not core.is_compiled_with_mlu(): return diff --git a/python/paddle/fluid/tests/unittests/mlu/test_tanh_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_tanh_op_mlu.py index a50ad86523afdacb9f9cde343b71902ddaa4fce0..c346dd0867a5bd61204cdc7c189a3cf7ae02afb3 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_tanh_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_tanh_op_mlu.py @@ -26,7 +26,6 @@ SEED = 2021 class TestTanh(OpTest): - def setUp(self): self.set_mlu() self.op_type = "tanh" @@ -58,7 +57,6 @@ class TestTanh(OpTest): class TestTanhFp16(OpTest): - def setUp(self): self.set_mlu() self.op_type = "tanh" @@ -85,7 +83,6 @@ class TestTanhFp16(OpTest): class TestTanhNet(unittest.TestCase): - def _test(self, run_mlu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -100,9 +97,9 @@ class TestTanhNet(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=[32, 32], dtype='float32') b = paddle.static.data(name="b", shape=[32, 32], dtype='float32') - label = paddle.static.data(name="label", - shape=[32, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[32, 1], dtype='int64' + ) c = paddle.multiply(a, b) d = paddle.tanh(c) @@ -126,16 +123,17 @@ class TestTanhNet(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "b": b_np, "label": label_np}, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res diff --git a/python/paddle/fluid/tests/unittests/mlu/test_tile_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_tile_op_mlu.py index c42695f2b5d31b147efc27fbb8c1578cbd75ac25..2f1a0d990bf01a262a72433d7d949c6d7e28d0ad 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_tile_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_tile_op_mlu.py @@ -25,9 +25,8 @@ from paddle.fluid import compiler, Program, program_guard paddle.enable_static() -#Situation 1: repeat_times is a list (without tensor) +# Situation 1: repeat_times is a list (without tensor) class TestTileOpRank1(OpTest): - def setUp(self): self.op_type = "tile" self.place = paddle.device.MLUPlace(0) @@ -51,42 +50,36 @@ class TestTileOpRank1(OpTest): # with dimension expanding class TestTileOpRank2Expanding(TestTileOpRank1): - def init_data(self): self.ori_shape = [120] self.repeat_times = [2, 2] class TestTileOpRank2(TestTileOpRank1): - def init_data(self): self.ori_shape = [12, 14] self.repeat_times = [2, 3] class TestTileOpRank3_Corner(TestTileOpRank1): - def init_data(self): self.ori_shape = (2, 10, 5) self.repeat_times = (1, 1, 1) class TestTileOpRank3_Corner2(TestTileOpRank1): - def init_data(self): self.ori_shape = (2, 10, 5) self.repeat_times = (2, 2) class TestTileOpRank3(TestTileOpRank1): - def init_data(self): self.ori_shape = (2, 4, 15) self.repeat_times = (2, 1, 4) class TestTileOpRank4(TestTileOpRank1): - def init_data(self): self.ori_shape = (2, 4, 5, 7) self.repeat_times = (3, 2, 1, 2) @@ -94,7 +87,6 @@ class TestTileOpRank4(TestTileOpRank1): # Situation 2: repeat_times is a list (with tensor) class TestTileOpRank1_tensor_attr(OpTest): - def setUp(self): self.op_type = "tile" self.place = paddle.device.MLUPlace(0) @@ -102,8 +94,9 @@ class TestTileOpRank1_tensor_attr(OpTest): self.init_data() repeat_times_tensor = [] for index, ele in enumerate(self.repeat_times): - repeat_times_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + repeat_times_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = { 'X': np.random.random(self.ori_shape).astype("float32"), @@ -126,7 +119,6 @@ class TestTileOpRank1_tensor_attr(OpTest): class TestTileOpRank2_Corner_tensor_attr(TestTileOpRank1_tensor_attr): - def init_data(self): self.ori_shape = [12, 14] self.repeat_times = [1, 1] @@ -134,7 +126,6 @@ class TestTileOpRank2_Corner_tensor_attr(TestTileOpRank1_tensor_attr): class TestTileOpRank2_attr_tensor(TestTileOpRank1_tensor_attr): - def init_data(self): self.ori_shape = [12, 14] self.repeat_times = [2, 3] @@ -143,7 +134,6 @@ class TestTileOpRank2_attr_tensor(TestTileOpRank1_tensor_attr): # Situation 3: repeat_times is a tensor class TestTileOpRank1_tensor(OpTest): - def setUp(self): self.op_type = "tile" self.place = paddle.device.MLUPlace(0) @@ -169,7 +159,6 @@ class TestTileOpRank1_tensor(OpTest): class TestTileOpRank2_tensor(TestTileOpRank1_tensor): - def init_data(self): self.ori_shape = [12, 14] self.repeat_times = [2, 3] @@ -177,7 +166,6 @@ class TestTileOpRank2_tensor(TestTileOpRank1_tensor): # Situation 4: input x is Integer class TestTileOpInteger(OpTest): - def setUp(self): self.op_type = "tile" self.place = paddle.device.MLUPlace(0) @@ -195,7 +183,6 @@ class TestTileOpInteger(OpTest): # Situation 5: input x is Bool class TestTileOpBoolean(OpTest): - def setUp(self): self.op_type = "tile" self.place = paddle.device.MLUPlace(0) @@ -211,7 +198,6 @@ class TestTileOpBoolean(OpTest): # Situation 56: input x is Integer class TestTileOpInt64_t(OpTest): - def setUp(self): self.op_type = "tile" self.place = paddle.device.MLUPlace(0) @@ -228,11 +214,11 @@ class TestTileOpInt64_t(OpTest): class TestTileError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): - x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace() + ) repeat_times = [2, 2] self.assertRaises(TypeError, paddle.tile, x1, repeat_times) x2 = fluid.layers.data(name='x2', shape=[4], dtype="uint8") @@ -243,7 +229,6 @@ class TestTileError(unittest.TestCase): class TestTileAPIStatic(unittest.TestCase): - def test_api(self): with program_guard(Program(), Program()): repeat_times = [2, 2] @@ -255,7 +240,6 @@ class TestTileAPIStatic(unittest.TestCase): # Test python API class TestTileAPI(unittest.TestCase): - def test_api(self): with fluid.dygraph.guard(): np_x = np.random.random([12, 14]).astype("float32") diff --git a/python/paddle/fluid/tests/unittests/mlu/test_top_k_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_top_k_op_mlu.py index 8d7a5eb2c60ad37fe4bf4dea4ac0ec7cd8cd5190..8e4a9d483892e5bc3cc855cb31d8d1356b55e16d 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_top_k_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_top_k_op_mlu.py @@ -25,7 +25,6 @@ paddle.enable_static() class TestTopkOp(OpTest): - def setUp(self): self.variable_k = False self.set_args() @@ -66,7 +65,6 @@ class TestTopkOp(OpTest): class TestTopkFP16Op(TestTopkOp): - def init_dtype(self): self.dtype = np.float16 diff --git a/python/paddle/fluid/tests/unittests/mlu/test_top_k_v2_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_top_k_v2_op_mlu.py index 8516fa224fa6a37a7e7af5c1948767d021802270..4c9d426e82d9046a296980dd62630995e683eae5 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_top_k_v2_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_top_k_v2_op_mlu.py @@ -39,7 +39,6 @@ def numpy_topk(x, k=1, axis=-1, largest=True): class TestTopkOp(OpTest): - def init_args(self): self.k = 3 self.axis = 1 @@ -55,10 +54,9 @@ class TestTopkOp(OpTest): self.init_args() self.inputs = {'X': self.input_data} self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest} - output, indices = numpy_topk(self.input_data, - axis=self.axis, - k=self.k, - largest=self.largest) + output, indices = numpy_topk( + self.input_data, axis=self.axis, k=self.k, largest=self.largest + ) self.outputs = {'Out': output, 'Indices': indices} def test_check_output(self): @@ -67,7 +65,6 @@ class TestTopkOp(OpTest): class TestTopkOp1(TestTopkOp): - def init_args(self): self.k = 3 self.axis = 0 @@ -75,7 +72,6 @@ class TestTopkOp1(TestTopkOp): class TestTopkOp2(TestTopkOp): - def init_args(self): self.k = 4 self.axis = 0 @@ -83,7 +79,6 @@ class TestTopkOp2(TestTopkOp): class TestTopkOp3(OpTest): - def init_args(self): self.k = 6 self.axis = 1 @@ -96,15 +91,13 @@ class TestTopkOp3(OpTest): self.init_args() self.inputs = {'X': self.input_data} self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest} - output, indices = numpy_topk(self.input_data, - axis=self.axis, - k=self.k, - largest=self.largest) + output, indices = numpy_topk( + self.input_data, axis=self.axis, k=self.k, largest=self.largest + ) self.outputs = {'Out': output, 'Indices': indices} class TestTopkOp4(TestTopkOp): - def init_args(self): self.k = 3 self.axis = 1 @@ -119,15 +112,13 @@ class TestTopkOp4(TestTopkOp): self.init_args() self.inputs = {'X': self.input_data} self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest} - output, indices = numpy_topk(self.input_data, - axis=self.axis, - k=self.k, - largest=self.largest) + output, indices = numpy_topk( + self.input_data, axis=self.axis, k=self.k, largest=self.largest + ) self.outputs = {'Out': output, 'Indices': indices} class TestTopkOp5(TestTopkOp): - def init_args(self): self.k = 3 self.axis = 1 @@ -142,15 +133,13 @@ class TestTopkOp5(TestTopkOp): self.init_args() self.inputs = {'X': self.input_data} self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest} - output, indices = numpy_topk(self.input_data, - axis=self.axis, - k=self.k, - largest=self.largest) + output, indices = numpy_topk( + self.input_data, axis=self.axis, k=self.k, largest=self.largest + ) self.outputs = {'Out': output, 'Indices': indices} class TestTopkOp6(OpTest): - def init_args(self): self.k = 100 self.axis = 1 @@ -165,15 +154,13 @@ class TestTopkOp6(OpTest): self.init_args() self.inputs = {'X': self.input_data} self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest} - output, indices = numpy_topk(self.input_data, - axis=self.axis, - k=self.k, - largest=self.largest) + output, indices = numpy_topk( + self.input_data, axis=self.axis, k=self.k, largest=self.largest + ) self.outputs = {'Out': output, 'Indices': indices} class TestTopKAPI(unittest.TestCase): - def setUp(self): np.random.seed(123) self.dtype = np.float32 @@ -219,22 +206,23 @@ class TestTopKAPI(unittest.TestCase): np.testing.assert_allclose(paddle_result[1].numpy(), numpy_result[1]) # test case for basic test case 7 for the unsorted paddle_result = paddle.topk(input_tensor, k=2, axis=1, sorted=False) - sort_paddle = numpy_topk(np.array(paddle_result[0].numpy()), - axis=1, - k=2) + sort_paddle = numpy_topk( + np.array(paddle_result[0].numpy()), axis=1, k=2 + ) numpy_result = numpy_topk(self.input_data, k=2, axis=1) np.testing.assert_allclose(sort_paddle[0], numpy_result[0]) def run_static(self, place): paddle.enable_static() - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): - input_tensor = paddle.static.data(name="x", - shape=[6, 7, 8], - dtype="float32") - large_input_tensor = paddle.static.data(name="large_x", - shape=[2, 1030], - dtype="float32") + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + input_tensor = paddle.static.data( + name="x", shape=[6, 7, 8], dtype="float32" + ) + large_input_tensor = paddle.static.data( + name="large_x", shape=[2, 1030], dtype="float32" + ) k_tensor = paddle.static.data(name="k", shape=[1], dtype="int32") result1 = paddle.topk(input_tensor, k=2) result2 = paddle.topk(input_tensor, k=2, axis=-1) @@ -248,18 +236,29 @@ class TestTopKAPI(unittest.TestCase): exe = paddle.static.Executor(place) input_data = np.random.rand(10, 20).astype("float32") large_input_data = np.random.rand(2, 100).astype("float32") - paddle_result = exe.run(feed={ - "x": self.input_data, - "large_x": self.large_input_data, - "k": np.array([2]).astype("int32") - }, - fetch_list=[ - result1[0], result1[1], result2[0], - result2[1], result3[0], result3[1], - result4[0], result4[1], result5[0], - result5[1], result6[0], result6[1], - result7[0], result7[1] - ]) + paddle_result = exe.run( + feed={ + "x": self.input_data, + "large_x": self.large_input_data, + "k": np.array([2]).astype("int32"), + }, + fetch_list=[ + result1[0], + result1[1], + result2[0], + result2[1], + result3[0], + result3[1], + result4[0], + result4[1], + result5[0], + result5[1], + result6[0], + result6[1], + result7[0], + result7[1], + ], + ) numpy_result = numpy_topk(self.input_data, k=2) np.testing.assert_allclose(paddle_result[0], numpy_result[0]) np.testing.assert_allclose(paddle_result[1], numpy_result[1]) @@ -269,16 +268,14 @@ class TestTopKAPI(unittest.TestCase): numpy_result = numpy_topk(self.input_data, k=2, axis=1) np.testing.assert_allclose(paddle_result[4], numpy_result[0]) np.testing.assert_allclose(paddle_result[5], numpy_result[1]) - numpy_result = numpy_topk(self.input_data, - k=2, - axis=1, - largest=False) + numpy_result = numpy_topk( + self.input_data, k=2, axis=1, largest=False + ) np.testing.assert_allclose(paddle_result[6], numpy_result[0]) np.testing.assert_allclose(paddle_result[7], numpy_result[1]) - numpy_result = numpy_topk(self.input_data, - k=2, - axis=-1, - largest=False) + numpy_result = numpy_topk( + self.input_data, k=2, axis=-1, largest=False + ) np.testing.assert_allclose(paddle_result[8], numpy_result[0]) np.testing.assert_allclose(paddle_result[9], numpy_result[1]) numpy_result = numpy_topk(self.large_input_data, k=1, axis=-1) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_transpose_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_transpose_op_mlu.py index 6aca81d0ba9339b5bad7b0867a5ded089c734dec..30e45198879567fd0331ce7f6a15cef9996e0d78 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_transpose_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_transpose_op_mlu.py @@ -27,7 +27,6 @@ paddle.enable_static() class TestTransposeOp(OpTest): - def setUp(self): self.init_op_type() self.initKernelType() @@ -57,92 +56,79 @@ class TestTransposeOp(OpTest): class TestCase0(TestTransposeOp): - def initTestCase(self): - self.shape = (100, ) - self.axis = (0, ) + self.shape = (100,) + self.axis = (0,) class TestCase1(TestTransposeOp): - def initTestCase(self): self.shape = (3, 4, 10) self.axis = (0, 2, 1) class TestCase2(TestTransposeOp): - def initTestCase(self): self.shape = (2, 3, 4, 5) self.axis = (0, 2, 3, 1) class TestCase3(TestTransposeOp): - def initTestCase(self): self.shape = (2, 3, 4, 5, 6) self.axis = (4, 2, 3, 1, 0) class TestCase4(TestTransposeOp): - def initTestCase(self): self.shape = (2, 3, 4, 5, 6, 1) self.axis = (4, 2, 3, 1, 0, 5) class TestCase5(TestTransposeOp): - def initTestCase(self): self.shape = (2, 16, 96) self.axis = (0, 2, 1) class TestCase6(TestTransposeOp): - def initTestCase(self): self.shape = (2, 10, 12, 16) self.axis = (3, 1, 2, 0) class TestCase7(TestTransposeOp): - def initTestCase(self): self.shape = (2, 10, 2, 16) self.axis = (0, 1, 3, 2) class TestCase8(TestTransposeOp): - def initTestCase(self): self.shape = (2, 3, 2, 3, 2, 4, 3, 3) self.axis = (0, 1, 3, 2, 4, 5, 6, 7) class TestCase9(TestTransposeOp): - def initTestCase(self): self.shape = (2, 3, 2, 3, 2, 4, 3, 3) self.axis = (6, 1, 3, 5, 0, 2, 4, 7) class TestTransposeOpBool(TestTransposeOp): - def test_check_grad(self): pass class TestTransposeOpBool1D(TestTransposeOpBool): - def initTestCase(self): - self.shape = (100, ) - self.axis = (0, ) + self.shape = (100,) + self.axis = (0,) self.inputs = {'X': np.random.random(self.shape).astype("bool")} self.outputs = {'Out': self.inputs['X'].transpose(self.axis)} class TestTransposeOpBool2D(TestTransposeOpBool): - def initTestCase(self): self.shape = (3, 40) self.axis = (1, 0) @@ -151,7 +137,6 @@ class TestTransposeOpBool2D(TestTransposeOpBool): class TestTransposeOpBool3D(TestTransposeOpBool): - def initTestCase(self): self.shape = (3, 4, 10) self.axis = (0, 2, 1) @@ -160,7 +145,6 @@ class TestTransposeOpBool3D(TestTransposeOpBool): class TestTransposeOpBool4D(TestTransposeOpBool): - def initTestCase(self): self.shape = (2, 3, 4, 5) self.axis = (0, 2, 3, 1) @@ -169,7 +153,6 @@ class TestTransposeOpBool4D(TestTransposeOpBool): class TestTransposeOpBool5D(TestTransposeOpBool): - def initTestCase(self): self.shape = (2, 3, 4, 5, 6) self.axis = (4, 2, 3, 1, 0) @@ -178,7 +161,6 @@ class TestTransposeOpBool5D(TestTransposeOpBool): class TestTransposeOpBool6D(TestTransposeOpBool): - def initTestCase(self): self.shape = (2, 3, 4, 5, 6, 1) self.axis = (4, 2, 3, 1, 0, 5) @@ -187,7 +169,6 @@ class TestTransposeOpBool6D(TestTransposeOpBool): class TestTransposeOpBool7D(TestTransposeOpBool): - def initTestCase(self): self.shape = (2, 3, 2, 3, 2, 4, 3) self.axis = (0, 1, 3, 2, 4, 5, 6) @@ -196,7 +177,6 @@ class TestTransposeOpBool7D(TestTransposeOpBool): class TestTransposeOpBool8D(TestTransposeOpBool): - def initTestCase(self): self.shape = (2, 3, 2, 3, 2, 4, 3, 3) self.axis = (6, 1, 3, 5, 0, 2, 4, 7) @@ -205,7 +185,6 @@ class TestTransposeOpBool8D(TestTransposeOpBool): class TestTransposeOpError(unittest.TestCase): - def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): @@ -238,7 +217,6 @@ class TestTransposeOpError(unittest.TestCase): class TestTransposeApi(unittest.TestCase): - def test_static_out(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): @@ -248,8 +226,9 @@ class TestTransposeApi(unittest.TestCase): place = paddle.MLUPlace(0) exe = paddle.static.Executor(place) x_np = np.random.random([2, 3, 4]).astype("float32") - result1, result2 = exe.run(feed={"x": x_np}, - fetch_list=[x_trans1, x_trans2]) + result1, result2 = exe.run( + feed={"x": x_np}, fetch_list=[x_trans1, x_trans2] + ) expected_result1 = np.transpose(x_np, [1, 0, 2]) expected_result2 = np.transpose(x_np, (2, 1, 0)) @@ -275,7 +254,6 @@ class TestTransposeApi(unittest.TestCase): class TestTAPI(unittest.TestCase): - def test_out(self): with fluid.program_guard(fluid.Program()): data = fluid.data(shape=[10], dtype="float32", name="data") @@ -283,7 +261,7 @@ class TestTAPI(unittest.TestCase): place = fluid.MLUPlace(0) exe = fluid.Executor(place) data_np = np.random.random([10]).astype("float32") - result, = exe.run(feed={"data": data_np}, fetch_list=[data_t]) + (result,) = exe.run(feed={"data": data_np}, fetch_list=[data_t]) expected_result = np.transpose(data_np) self.assertEqual((result == expected_result).all(), True) @@ -293,7 +271,7 @@ class TestTAPI(unittest.TestCase): place = fluid.MLUPlace(0) exe = fluid.Executor(place) data_np = np.random.random([10, 5]).astype("float32") - result, = exe.run(feed={"data": data_np}, fetch_list=[data_t]) + (result,) = exe.run(feed={"data": data_np}, fetch_list=[data_t]) expected_result = np.transpose(data_np) self.assertEqual((result == expected_result).all(), True) @@ -303,7 +281,7 @@ class TestTAPI(unittest.TestCase): place = fluid.MLUPlace(0) exe = fluid.Executor(place) data_np = np.random.random([1, 5]).astype("float32") - result, = exe.run(feed={"data": data_np}, fetch_list=[data_t]) + (result,) = exe.run(feed={"data": data_np}, fetch_list=[data_t]) expected_result = np.transpose(data_np) self.assertEqual((result == expected_result).all(), True) @@ -342,7 +320,6 @@ class TestTAPI(unittest.TestCase): class TestMoveAxis(unittest.TestCase): - def test_moveaxis1(self): x_np = np.random.randn(2, 3, 4, 5, 7).astype('float32') expected = np.moveaxis(x_np, [0, 4, 3, 2], [1, 3, 2, 0]) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_tril_triu_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_tril_triu_op_mlu.py index fe74cbb05b0e3b015f92410ef0ee4c32313f4c71..2ed56de3c6d311e4556ab96b53d25a65e2902c8d 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_tril_triu_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_tril_triu_op_mlu.py @@ -27,14 +27,15 @@ paddle.enable_static() class TrilTriuOpDefaultTest(OpTest): - """ the base class of other op testcases - """ + """the base class of other op testcases""" def setUp(self): self.initTestCase() self.__class__.use_mlu = True self.place = paddle.device.MLUPlace(0) - self.python_api = paddle.tril if self.real_op_type == 'tril' else paddle.triu + self.python_api = ( + paddle.tril if self.real_op_type == 'tril' else paddle.triu + ) self.real_np_op = getattr(np, self.real_op_type) self.op_type = "tril_triu" @@ -44,9 +45,9 @@ class TrilTriuOpDefaultTest(OpTest): 'lower': True if self.real_op_type == 'tril' else False, } self.outputs = { - 'Out': - self.real_np_op(self.X, self.diagonal) - if self.diagonal else self.real_np_op(self.X) + 'Out': self.real_np_op(self.X, self.diagonal) + if self.diagonal + else self.real_np_op(self.X) } def test_check_output(self): @@ -64,27 +65,29 @@ def case_generator(op_type, Xshape, diagonal, expected): If arg`expercted` is 'success', it will register an Optest case and expect to pass. Otherwise, it will register an API case and check the expect failure. """ - cls_name = "{0}_{1}_shape_{2}_diag_{3}".format(expected, op_type, Xshape, - diagonal) + cls_name = "{0}_{1}_shape_{2}_diag_{3}".format( + expected, op_type, Xshape, diagonal + ) errmsg = { - "diagonal: TypeError": - "diagonal in {} must be a python Int".format(op_type), - "input: ValueError": - "x shape in {} must be at least 2-D".format(op_type), + "diagonal: TypeError": "diagonal in {} must be a python Int".format( + op_type + ), + "input: ValueError": "x shape in {} must be at least 2-D".format( + op_type + ), } class FailureCase(unittest.TestCase): - def test_failure(self): paddle.enable_static() data = fluid.data(shape=Xshape, dtype='float64', name=cls_name) - with self.assertRaisesRegexp(eval(expected.split(':')[-1]), - errmsg[expected]): + with self.assertRaisesRegexp( + eval(expected.split(':')[-1]), errmsg[expected] + ): getattr(tensor, op_type)(x=data, diagonal=diagonal) class SuccessCase(TrilTriuOpDefaultTest): - def initTestCase(self): paddle.enable_static() @@ -109,15 +112,13 @@ cases = { (20, 20): [ '2020', [20], - { - 20: 20 - }, + {20: 20}, (20, 20), 20.20, ], # str, list, dict, tuple, float }, 'input: ValueError': { - (2020, ): [None], + (2020,): [None], }, } for _op_type in ['tril', 'triu']: @@ -126,12 +127,15 @@ for _op_type in ['tril', 'triu']: list( map( lambda _diagonal: case_generator( - _op_type, _Xshape, _diagonal, _expected), _diaglist)) + _op_type, _Xshape, _diagonal, _expected + ), + _diaglist, + ) + ) class TestTrilTriuOpAPI(unittest.TestCase): - """ test case by using API and has -1 dimension - """ + """test case by using API and has -1 dimension""" def test_api(self): paddle.enable_static() @@ -163,8 +167,10 @@ class TestTrilTriuOpAPI(unittest.TestCase): with fluid.dygraph.guard(): data = np.random.random([1, 9, 9, 4]).astype(dtype) x = fluid.dygraph.to_variable(data) - tril_out, triu_out = tensor.tril(x).numpy(), tensor.triu( - x).numpy() + tril_out, triu_out = ( + tensor.tril(x).numpy(), + tensor.triu(x).numpy(), + ) np.testing.assert_allclose(tril_out, np.tril(data)) np.testing.assert_allclose(triu_out, np.triu(data)) @@ -182,9 +188,11 @@ class TestTrilTriuOpAPI(unittest.TestCase): place = fluid.MLUPlace(0) exe = fluid.Executor(place) - triu_out = exe.run(fluid.default_main_program(), - feed={"x": data}, - fetch_list=[triu_out]) + triu_out = exe.run( + fluid.default_main_program(), + feed={"x": data}, + fetch_list=[triu_out], + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/mlu/test_truncated_gaussian_random_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_truncated_gaussian_random_op_mlu.py index 50e5537fbffd757cf50ae3f9b2082428625a586f..98bcd7109d03702fb53958a45e88fc16de233ce7 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_truncated_gaussian_random_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_truncated_gaussian_random_op_mlu.py @@ -30,14 +30,13 @@ paddle.enable_static() class TestTrunctedGaussianRandomOp(unittest.TestCase): - def setUp(self): self.op_type = "truncated_gaussian_random" self.inputs = {} self.attrs = { "shape": [10000], - "mean": .0, - "std": 1., + "mean": 0.0, + "std": 1.0, "seed": 10, } self.outputs = ["Out"] @@ -56,9 +55,9 @@ class TestTrunctedGaussianRandomOp(unittest.TestCase): program = fluid.Program() block = program.global_block() vout = block.create_var(name="Out") - op = block.append_op(type=self.op_type, - outputs={"Out": vout}, - attrs=self.attrs) + op = block.append_op( + type=self.op_type, outputs={"Out": vout}, attrs=self.attrs + ) op.desc.infer_var_type(block.desc) op.desc.infer_shape(block.desc) @@ -70,7 +69,7 @@ class TestTrunctedGaussianRandomOp(unittest.TestCase): exe = Executor(place) outs = exe.run(program, fetch_list=fetch_list) tensor = outs[0] - self.assertAlmostEqual(numpy.mean(tensor), .0, delta=0.1) + self.assertAlmostEqual(numpy.mean(tensor), 0.0, delta=0.1) self.assertAlmostEqual(numpy.var(tensor), 0.773, delta=0.1) # TruncatedNormal.__call__ has no return value, so here call _C_ops api @@ -79,9 +78,14 @@ class TestTrunctedGaussianRandomOp(unittest.TestCase): with fluid.dygraph.guard(place): with _test_eager_guard(): out = paddle._C_ops.truncated_gaussian_random( - self.attrs["shape"], self.attrs["mean"], self.attrs["std"], - self.attrs["seed"], core.VarDesc.VarType.FP32, place) - self.assertAlmostEqual(numpy.mean(out.numpy()), .0, delta=0.1) + self.attrs["shape"], + self.attrs["mean"], + self.attrs["std"], + self.attrs["seed"], + core.VarDesc.VarType.FP32, + place, + ) + self.assertAlmostEqual(numpy.mean(out.numpy()), 0.0, delta=0.1) self.assertAlmostEqual(numpy.var(out.numpy()), 0.773, delta=0.1) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_uniform_random_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_uniform_random_op_mlu.py index 0dc702b5a221d09536ac56f147f46bca30919e4b..20abed4245c8909d6a90607f9670e95d38d480c2 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_uniform_random_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_uniform_random_op_mlu.py @@ -25,7 +25,10 @@ import paddle from paddle.fluid.op import Operator import paddle.fluid as fluid from paddle.fluid import Program, program_guard -from test_uniform_random_op import TestUniformRandomOp, TestUniformRandomOpSelectedRows +from test_uniform_random_op import ( + TestUniformRandomOp, + TestUniformRandomOpSelectedRows, +) paddle.enable_static() @@ -39,7 +42,6 @@ def output_hist(out): class TestMLUUniformRandomOp(OpTest): - def setUp(self): self.set_mlu() self.op_type = "uniform_random" @@ -53,7 +55,7 @@ class TestMLUUniformRandomOp(OpTest): "shape": [1000, 784], "min": -5.0, "max": 10.0, - "seed": 10 + "seed": 10, } self.output_hist = output_hist @@ -73,7 +75,6 @@ class TestMLUUniformRandomOp(OpTest): class TestMLUUniformRandomOpSelectedRows(unittest.TestCase): - def get_places(self): places = [core.CPUPlace()] if core.is_compiled_with_mlu(): @@ -88,12 +89,14 @@ class TestMLUUniformRandomOpSelectedRows(unittest.TestCase): scope = core.Scope() out = scope.var("X").get_selected_rows() paddle.seed(10) - op = Operator("uniform_random", - Out="X", - shape=[1000, 784], - min=-5.0, - max=10.0, - seed=10) + op = Operator( + "uniform_random", + Out="X", + shape=[1000, 784], + min=-5.0, + max=10.0, + seed=10, + ) op.run(scope, place) self.assertEqual(out.get_tensor().shape(), [1000, 784]) hist, prob = output_hist(np.array(out.get_tensor())) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_unsqueeze2_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_unsqueeze2_op_mlu.py index 3a5201935751745d2e591dd421a75b3f35fd5e6e..281a9a1ca2d08c28d907be48bca9933e50ecdbd1 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_unsqueeze2_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_unsqueeze2_op_mlu.py @@ -27,7 +27,6 @@ paddle.enable_static() # Correct: General. class TestUnsqueezeOp(OpTest): - def setUp(self): self.init_test_case() self.set_mlu() @@ -36,7 +35,7 @@ class TestUnsqueezeOp(OpTest): self.init_attrs() self.outputs = { "Out": self.inputs["X"].reshape(self.new_shape), - "XShape": np.random.random(self.ori_shape).astype("float32") + "XShape": np.random.random(self.ori_shape).astype("float32"), } def set_mlu(self): @@ -60,16 +59,14 @@ class TestUnsqueezeOp(OpTest): # Correct: Single input index. class TestUnsqueezeOp1(TestUnsqueezeOp): - def init_test_case(self): self.ori_shape = (20, 5) - self.axes = (-1, ) + self.axes = (-1,) self.new_shape = (20, 5, 1) # Correct: Mixed input axis. class TestUnsqueezeOp2(TestUnsqueezeOp): - def init_test_case(self): self.ori_shape = (20, 5) self.axes = (0, -1) @@ -78,7 +75,6 @@ class TestUnsqueezeOp2(TestUnsqueezeOp): # Correct: There is duplicated axis. class TestUnsqueezeOp3(TestUnsqueezeOp): - def init_test_case(self): self.ori_shape = (10, 2, 5) self.axes = (0, 3, 3) @@ -87,7 +83,6 @@ class TestUnsqueezeOp3(TestUnsqueezeOp): # Correct: Reversed axes. class TestUnsqueezeOp4(TestUnsqueezeOp): - def init_test_case(self): self.ori_shape = (10, 2, 5) self.axes = (3, 1, 1) @@ -96,7 +91,6 @@ class TestUnsqueezeOp4(TestUnsqueezeOp): # axes is a list(with tensor) class TestUnsqueezeOp_AxesTensorList(OpTest): - def setUp(self): self.init_test_case() self.set_mlu() @@ -104,17 +98,18 @@ class TestUnsqueezeOp_AxesTensorList(OpTest): axes_tensor_list = [] for index, ele in enumerate(self.axes): - axes_tensor_list.append(("axes" + str(index), np.ones( - (1)).astype('int32') * ele)) + axes_tensor_list.append( + ("axes" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = { "X": np.random.random(self.ori_shape).astype("float32"), - "AxesTensorList": axes_tensor_list + "AxesTensorList": axes_tensor_list, } self.init_attrs() self.outputs = { "Out": self.inputs["X"].reshape(self.new_shape), - "XShape": np.random.random(self.ori_shape).astype("float32") + "XShape": np.random.random(self.ori_shape).astype("float32"), } def set_mlu(self): @@ -137,15 +132,13 @@ class TestUnsqueezeOp_AxesTensorList(OpTest): class TestUnsqueezeOp1_AxesTensorList(TestUnsqueezeOp_AxesTensorList): - def init_test_case(self): self.ori_shape = (20, 5) - self.axes = (-1, ) + self.axes = (-1,) self.new_shape = (20, 5, 1) class TestUnsqueezeOp2_AxesTensorList(TestUnsqueezeOp_AxesTensorList): - def init_test_case(self): self.ori_shape = (20, 5) self.axes = (0, -1) @@ -153,7 +146,6 @@ class TestUnsqueezeOp2_AxesTensorList(TestUnsqueezeOp_AxesTensorList): class TestUnsqueezeOp3_AxesTensorList(TestUnsqueezeOp_AxesTensorList): - def init_test_case(self): self.ori_shape = (10, 2, 5) self.axes = (0, 3, 3) @@ -161,7 +153,6 @@ class TestUnsqueezeOp3_AxesTensorList(TestUnsqueezeOp_AxesTensorList): class TestUnsqueezeOp4_AxesTensorList(TestUnsqueezeOp_AxesTensorList): - def init_test_case(self): self.ori_shape = (10, 2, 5) self.axes = (3, 1, 1) @@ -170,7 +161,6 @@ class TestUnsqueezeOp4_AxesTensorList(TestUnsqueezeOp_AxesTensorList): # axes is a Tensor class TestUnsqueezeOp_AxesTensor(OpTest): - def setUp(self): self.init_test_case() self.set_mlu() @@ -178,12 +168,12 @@ class TestUnsqueezeOp_AxesTensor(OpTest): self.inputs = { "X": np.random.random(self.ori_shape).astype("float32"), - "AxesTensor": np.array(self.axes).astype("int32") + "AxesTensor": np.array(self.axes).astype("int32"), } self.init_attrs() self.outputs = { "Out": self.inputs["X"].reshape(self.new_shape), - "XShape": np.random.random(self.ori_shape).astype("float32") + "XShape": np.random.random(self.ori_shape).astype("float32"), } def set_mlu(self): @@ -206,15 +196,13 @@ class TestUnsqueezeOp_AxesTensor(OpTest): class TestUnsqueezeOp1_AxesTensor(TestUnsqueezeOp_AxesTensor): - def init_test_case(self): self.ori_shape = (20, 5) - self.axes = (-1, ) + self.axes = (-1,) self.new_shape = (20, 5, 1) class TestUnsqueezeOp2_AxesTensor(TestUnsqueezeOp_AxesTensor): - def init_test_case(self): self.ori_shape = (20, 5) self.axes = (0, -1) @@ -222,7 +210,6 @@ class TestUnsqueezeOp2_AxesTensor(TestUnsqueezeOp_AxesTensor): class TestUnsqueezeOp3_AxesTensor(TestUnsqueezeOp_AxesTensor): - def init_test_case(self): self.ori_shape = (10, 2, 5) self.axes = (0, 3, 3) @@ -230,7 +217,6 @@ class TestUnsqueezeOp3_AxesTensor(TestUnsqueezeOp_AxesTensor): class TestUnsqueezeOp4_AxesTensor(TestUnsqueezeOp_AxesTensor): - def init_test_case(self): self.ori_shape = (10, 2, 5) self.axes = (3, 1, 1) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_unsqueeze_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_unsqueeze_op_mlu.py index 11317b61219e93ffbdb3895281074a9aefc00c5b..033f58d0e72f10b54dd4c54295fd6a03ed32a211 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_unsqueeze_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_unsqueeze_op_mlu.py @@ -27,7 +27,6 @@ paddle.enable_static() # Correct: General. class TestUnsqueezeOp(OpTest): - def setUp(self): self.init_test_case() self.set_mlu() @@ -57,16 +56,14 @@ class TestUnsqueezeOp(OpTest): # Correct: Single input index. class TestUnsqueezeOp1(TestUnsqueezeOp): - def init_test_case(self): self.ori_shape = (20, 5) - self.axes = (-1, ) + self.axes = (-1,) self.new_shape = (20, 5, 1) # Correct: Mixed input axis. class TestUnsqueezeOp2(TestUnsqueezeOp): - def init_test_case(self): self.ori_shape = (20, 5) self.axes = (0, -1) @@ -75,7 +72,6 @@ class TestUnsqueezeOp2(TestUnsqueezeOp): # Correct: There is duplicated axis. class TestUnsqueezeOp3(TestUnsqueezeOp): - def init_test_case(self): self.ori_shape = (10, 2, 5) self.axes = (0, 3, 3) @@ -84,7 +80,6 @@ class TestUnsqueezeOp3(TestUnsqueezeOp): # Correct: Reversed axes. class TestUnsqueezeOp4(TestUnsqueezeOp): - def init_test_case(self): self.ori_shape = (10, 2, 5) self.axes = (3, 1, 1) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_unstack_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_unstack_op_mlu.py index 2ec9726bcf2ad2f8e3e4f620b14a0adb9bd87a1b..6e46a9ff0b2d9b2217f52458302fd138d01df66d 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_unstack_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_unstack_op_mlu.py @@ -24,7 +24,6 @@ paddle.enable_static() class TestUnStackOpBase(OpTest): - def initDefaultParameters(self): self.input_dim = (5, 6, 7) self.axis = 0 @@ -74,25 +73,21 @@ class TestUnStackOpBase(OpTest): class TestStackOp3(TestUnStackOpBase): - def initParameters(self): self.axis = -1 class TestStackOp4(TestUnStackOpBase): - def initParameters(self): self.axis = -3 class TestStackOp5(TestUnStackOpBase): - def initParameters(self): self.axis = 1 class TestStackOp6(TestUnStackOpBase): - def initParameters(self): self.axis = 2 diff --git a/python/paddle/fluid/tests/unittests/mlu/test_where_index_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_where_index_op_mlu.py index 2486bd8324ad3828a9c03d105561d31346d1958f..163d47e33c2fb7b46aefaf988b0c20376bcc1e27 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_where_index_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_where_index_op_mlu.py @@ -28,7 +28,6 @@ paddle.enable_static() class TestWhereIndexOp(OpTest): - def setUp(self): self.op_type = "where_index" self.place = paddle.device.MLUPlace(0) @@ -47,7 +46,6 @@ class TestWhereIndexOp(OpTest): class TestAllFalse(unittest.TestCase): - def setUp(self): self.op_type = "where_index" self.place = paddle.device.MLUPlace(0) @@ -78,7 +76,6 @@ class TestAllFalse(unittest.TestCase): class TestRank2(TestWhereIndexOp): - def init_config(self): self.inputs = { 'Condition': np.array([[True, False], [False, True]]), @@ -88,24 +85,26 @@ class TestRank2(TestWhereIndexOp): class TestRank3(TestWhereIndexOp): - def init_config(self): self.inputs = { - 'Condition': - np.array([[[True, False], [False, True]], - [[False, True], [True, False]], - [[False, False], [False, True]]]), + 'Condition': np.array( + [ + [[True, False], [False, True]], + [[False, True], [True, False]], + [[False, False], [False, True]], + ] + ), } self.outputs = { - 'Out': - np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0], [2, 1, 1]], - dtype='int64') + 'Out': np.array( + [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0], [2, 1, 1]], + dtype='int64', + ) } class TestWhereOpError(unittest.TestCase): - def test_api(self): with program_guard(Program(), Program()): cond = fluid.layers.data(name='cond', shape=[4], dtype='bool') @@ -118,9 +117,7 @@ class TestWhereOpError(unittest.TestCase): class TestWhereRaiseError(unittest.TestCase): - def test_errors(self): - def test_type(): fluid.layers.where([10]) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_where_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_where_op_mlu.py index 5f757015656df55e88d559ac4194e68a7e6a9018..2bb780e1f268bebd245fd8b4a1934859bb7fee07 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_where_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_where_op_mlu.py @@ -31,7 +31,6 @@ paddle.enable_static() class TestWhereOp(OpTest): - def setUp(self): self.op_type = 'where' self.place = paddle.device.MLUPlace(0) @@ -55,7 +54,6 @@ class TestWhereOp(OpTest): class TestWhereOp2(TestWhereOp): - def init_config(self): self.x = np.random.uniform((-5), 5, (60, 2)).astype('float32') self.y = np.random.uniform((-5), 5, (60, 2)).astype('float32') @@ -63,7 +61,6 @@ class TestWhereOp2(TestWhereOp): class TestWhereOp3(TestWhereOp): - def init_config(self): self.x = np.random.uniform((-3), 5, (20, 2, 4)).astype('float32') self.y = np.random.uniform((-3), 5, (20, 2, 4)).astype('float32') @@ -71,7 +68,6 @@ class TestWhereOp3(TestWhereOp): class TestWhereAPI(unittest.TestCase): - def setUp(self): self.place = paddle.device.MLUPlace(0) self.__class__.use_mlu = True @@ -95,45 +91,49 @@ class TestWhereAPI(unittest.TestCase): for x_stop_gradient in [False, True]: for y_stop_gradient in [False, True]: with fluid.program_guard(Program(), Program()): - cond = fluid.layers.data(name='cond', - shape=self.shape, - dtype='bool') - x = fluid.layers.data(name='x', - shape=self.shape, - dtype='float32') - y = fluid.layers.data(name='y', - shape=self.shape, - dtype='float32') + cond = fluid.layers.data( + name='cond', shape=self.shape, dtype='bool' + ) + x = fluid.layers.data( + name='x', shape=self.shape, dtype='float32' + ) + y = fluid.layers.data( + name='y', shape=self.shape, dtype='float32' + ) x.stop_gradient = x_stop_gradient y.stop_gradient = y_stop_gradient result = paddle.where(cond, x, y) append_backward(paddle.mean(result)) for use_mlu in [False, True]: - place = (paddle.device.MLUPlace(0) - if use_mlu else fluid.CPUPlace()) + place = ( + paddle.device.MLUPlace(0) + if use_mlu + else fluid.CPUPlace() + ) exe = fluid.Executor(place) fetch_list = [result, result.grad_name] - if (x_stop_gradient is False): + if x_stop_gradient is False: fetch_list.append(x.grad_name) - if (y_stop_gradient is False): + if y_stop_gradient is False: fetch_list.append(y.grad_name) - out = exe.run(fluid.default_main_program(), - feed={ - 'cond': self.cond, - 'x': self.x, - 'y': self.y - }, - fetch_list=fetch_list) + out = exe.run( + fluid.default_main_program(), + feed={'cond': self.cond, 'x': self.x, 'y': self.y}, + fetch_list=fetch_list, + ) assert np.array_equal(out[0], self.out) - if (x_stop_gradient is False): - assert np.array_equal(out[2], - self.ref_x_backward(out[1])) - if (y.stop_gradient is False): + if x_stop_gradient is False: + assert np.array_equal( + out[2], self.ref_x_backward(out[1]) + ) + if y.stop_gradient is False: assert np.array_equal( - out[3], self.ref_y_backward(out[1])) - elif (y.stop_gradient is False): - assert np.array_equal(out[2], - self.ref_y_backward(out[1])) + out[3], self.ref_y_backward(out[1]) + ) + elif y.stop_gradient is False: + assert np.array_equal( + out[2], self.ref_y_backward(out[1]) + ) def test_api_broadcast(self, use_mlu=False): main_program = Program() @@ -141,19 +141,20 @@ class TestWhereAPI(unittest.TestCase): x = fluid.layers.data(name='x', shape=[4, 1], dtype='float32') y = fluid.layers.data(name='y', shape=[4, 2], dtype='float32') x_i = np.array([[0.9383, 0.1983, 3.2, 1.2]]).astype('float32') - y_i = np.array([[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, - 1.0]]).astype('float32') + y_i = np.array([[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]).astype( + 'float32' + ) result = paddle.where((x > 1), x=x, y=y) for use_mlu in [False, True]: - place = (paddle.device.MLUPlace(0) - if use_mlu else fluid.CPUPlace()) + place = ( + paddle.device.MLUPlace(0) if use_mlu else fluid.CPUPlace() + ) exe = fluid.Executor(place) - out = exe.run(fluid.default_main_program(), - feed={ - 'x': x_i, - 'y': y_i - }, - fetch_list=[result]) + out = exe.run( + fluid.default_main_program(), + feed={'x': x_i, 'y': y_i}, + fetch_list=[result], + ) assert np.array_equal(out[0], np.where((x_i > 1), x_i, y_i)) def test_scalar(self): @@ -161,20 +162,23 @@ class TestWhereAPI(unittest.TestCase): main_program = Program() with fluid.program_guard(main_program): cond_shape = [2, 4] - cond = fluid.layers.data(name='cond', - shape=cond_shape, - dtype='bool') + cond = fluid.layers.data( + name='cond', shape=cond_shape, dtype='bool' + ) x_data = 1.0 y_data = 2.0 cond_data = np.array([False, False, True, True]).astype('bool') result = paddle.where(condition=cond, x=x_data, y=y_data) for use_mlu in [False, True]: - place = (paddle.device.MLUPlace(0) - if use_mlu else fluid.CPUPlace()) + place = ( + paddle.device.MLUPlace(0) if use_mlu else fluid.CPUPlace() + ) exe = fluid.Executor(place) - out = exe.run(fluid.default_main_program(), - feed={'cond': cond_data}, - fetch_list=[result]) + out = exe.run( + fluid.default_main_program(), + feed={'cond': cond_data}, + fetch_list=[result], + ) expect = np.where(cond_data, x_data, y_data) assert np.array_equal(out[0], expect) @@ -182,27 +186,26 @@ class TestWhereAPI(unittest.TestCase): paddle.enable_static() main_program = Program() with fluid.program_guard(main_program): - cond = fluid.layers.data(name='cond', - shape=cond_shape, - dtype='bool') + cond = fluid.layers.data( + name='cond', shape=cond_shape, dtype='bool' + ) x = fluid.layers.data(name='x', shape=x_shape, dtype='float32') y = fluid.layers.data(name='y', shape=y_shape, dtype='float32') cond_data_tmp = np.random.random(size=cond_shape).astype('float32') - cond_data = (cond_data_tmp < 0.3) + cond_data = cond_data_tmp < 0.3 x_data = np.random.random(size=x_shape).astype('float32') y_data = np.random.random(size=y_shape).astype('float32') result = paddle.where(condition=cond, x=x, y=y) for use_mlu in [False, True]: - place = (paddle.device.MLUPlace(0) - if use_mlu else fluid.CPUPlace()) + place = ( + paddle.device.MLUPlace(0) if use_mlu else fluid.CPUPlace() + ) exe = fluid.Executor(place) - out = exe.run(fluid.default_main_program(), - feed={ - 'cond': cond_data, - 'x': x_data, - 'y': y_data - }, - fetch_list=[result]) + out = exe.run( + fluid.default_main_program(), + feed={'cond': cond_data, 'x': x_data, 'y': y_data}, + fetch_list=[result], + ) expect = np.where(cond_data, x_data, y_data) assert np.array_equal(out[0], expect) @@ -256,7 +259,6 @@ class TestWhereAPI(unittest.TestCase): class TestWhereDygraphAPI(unittest.TestCase): - def test_api(self): with fluid.dygraph.guard(): x_i = np.array([0.9383, 0.1983, 3.2, 1.2]).astype('float32') @@ -280,7 +282,7 @@ class TestWhereDygraphAPI(unittest.TestCase): def __test_where_with_broadcast_dygraph(self, cond_shape, a_shape, b_shape): with fluid.dygraph.guard(): cond_tmp = paddle.rand(cond_shape) - cond = (cond_tmp < 0.3) + cond = cond_tmp < 0.3 a = paddle.rand(a_shape) b = paddle.rand(b_shape) result = paddle.where(cond, a, b) @@ -345,9 +347,9 @@ class TestWhereDygraphAPI(unittest.TestCase): self.assertEqual(len(y), 2) z = fluid.layers.concat(list(y), axis=1) exe = fluid.Executor(paddle.device.MLUPlace(0)) - (res, ) = exe.run(feed={'x': data}, - fetch_list=[z.name], - return_numpy=False) + (res,) = exe.run( + feed={'x': data}, fetch_list=[z.name], return_numpy=False + ) expect_out = np.array([[0, 0], [1, 1]]) np.testing.assert_allclose(expect_out, np.array(res)) data = np.array([True, True, False]) @@ -358,15 +360,14 @@ class TestWhereDygraphAPI(unittest.TestCase): self.assertEqual(len(y), 1) z = fluid.layers.concat(list(y), axis=1) exe = fluid.Executor(paddle.device.MLUPlace(0)) - (res, ) = exe.run(feed={'x': data}, - fetch_list=[z.name], - return_numpy=False) + (res,) = exe.run( + feed={'x': data}, fetch_list=[z.name], return_numpy=False + ) expect_out = np.array([[0], [1]]) np.testing.assert_allclose(expect_out, np.array(res)) class TestWhereOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): x_i = np.array([0.9383, 0.1983, 3.2, 1.2]).astype('float32') @@ -390,7 +391,7 @@ class TestWhereOpError(unittest.TestCase): with fluid.dygraph.guard(): cond_shape = [2, 2, 4] cond_tmp = paddle.rand(cond_shape) - cond = (cond_tmp < 0.3) + cond = cond_tmp < 0.3 a = paddle.rand(cond_shape) self.assertRaises(ValueError, paddle.where, cond, a) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_yolo_box_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_yolo_box_op_mlu.py index 86bc314eafcd80b49a8342499c7e4664309d7a9d..fd8855aeb570624529aa4bcea6ae9b29e34e5af2 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_yolo_box_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_yolo_box_op_mlu.py @@ -29,7 +29,7 @@ paddle.enable_static() def sigmoid(x): - return (1.0 / (1.0 + np.exp(((-1.0) * x)))) + return 1.0 / (1.0 + np.exp(((-1.0) * x))) def YoloBox(x, img_size, attrs): @@ -43,9 +43,9 @@ def YoloBox(x, img_size, attrs): scale_x_y = attrs['scale_x_y'] iou_aware = attrs['iou_aware'] iou_aware_factor = attrs['iou_aware_factor'] - bias_x_y = ((-0.5) * (scale_x_y - 1.0)) - input_h = (downsample * h) - input_w = (downsample * w) + bias_x_y = (-0.5) * (scale_x_y - 1.0) + input_h = downsample * h + input_w = downsample * w if iou_aware: ioup = x[:, :an_num, :, :] ioup = np.expand_dims(ioup, axis=(-1)) @@ -54,49 +54,54 @@ def YoloBox(x, img_size, attrs): pred_box = x[:, :, :, :, :4].copy() grid_x = np.tile(np.arange(w).reshape((1, w)), (h, 1)) grid_y = np.tile(np.arange(h).reshape((h, 1)), (1, w)) - pred_box[:, :, :, :, 0] = (( - (grid_x + (sigmoid(pred_box[:, :, :, :, 0]) * scale_x_y)) + bias_x_y) / - w) - pred_box[:, :, :, :, 1] = (( - (grid_y + (sigmoid(pred_box[:, :, :, :, 1]) * scale_x_y)) + bias_x_y) / - h) - anchors = [(anchors[i], anchors[(i + 1)]) - for i in range(0, len(anchors), 2)] - anchors_s = np.array([((an_w / input_w), (an_h / input_h)) - for (an_w, an_h) in anchors]) + pred_box[:, :, :, :, 0] = ( + (grid_x + (sigmoid(pred_box[:, :, :, :, 0]) * scale_x_y)) + bias_x_y + ) / w + pred_box[:, :, :, :, 1] = ( + (grid_y + (sigmoid(pred_box[:, :, :, :, 1]) * scale_x_y)) + bias_x_y + ) / h + anchors = [ + (anchors[i], anchors[(i + 1)]) for i in range(0, len(anchors), 2) + ] + anchors_s = np.array( + [((an_w / input_w), (an_h / input_h)) for (an_w, an_h) in anchors] + ) anchor_w = anchors_s[:, 0:1].reshape((1, an_num, 1, 1)) anchor_h = anchors_s[:, 1:2].reshape((1, an_num, 1, 1)) - pred_box[:, :, :, :, 2] = (np.exp(pred_box[:, :, :, :, 2]) * anchor_w) - pred_box[:, :, :, :, 3] = (np.exp(pred_box[:, :, :, :, 3]) * anchor_h) + pred_box[:, :, :, :, 2] = np.exp(pred_box[:, :, :, :, 2]) * anchor_w + pred_box[:, :, :, :, 3] = np.exp(pred_box[:, :, :, :, 3]) * anchor_h if iou_aware: - pred_conf = ((sigmoid(x[:, :, :, :, 4:5])**(1 - iou_aware_factor)) * - (sigmoid(ioup)**iou_aware_factor)) + pred_conf = (sigmoid(x[:, :, :, :, 4:5]) ** (1 - iou_aware_factor)) * ( + sigmoid(ioup) ** iou_aware_factor + ) else: pred_conf = sigmoid(x[:, :, :, :, 4:5]) pred_conf[(pred_conf < conf_thresh)] = 0.0 - pred_score = (sigmoid(x[:, :, :, :, 5:]) * pred_conf) - pred_box = (pred_box * (pred_conf > 0.0).astype('float32')) + pred_score = sigmoid(x[:, :, :, :, 5:]) * pred_conf + pred_box = pred_box * (pred_conf > 0.0).astype('float32') pred_box = pred_box.reshape((n, (-1), 4)) - (pred_box[:, :, :2], - pred_box[:, :, 2:4]) = ((pred_box[:, :, :2] - (pred_box[:, :, 2:4] / 2.0)), - (pred_box[:, :, :2] + (pred_box[:, :, 2:4] / 2.0))) - pred_box[:, :, 0] = (pred_box[:, :, 0] * img_size[:, 1][:, np.newaxis]) - pred_box[:, :, 1] = (pred_box[:, :, 1] * img_size[:, 0][:, np.newaxis]) - pred_box[:, :, 2] = (pred_box[:, :, 2] * img_size[:, 1][:, np.newaxis]) - pred_box[:, :, 3] = (pred_box[:, :, 3] * img_size[:, 0][:, np.newaxis]) + (pred_box[:, :, :2], pred_box[:, :, 2:4]) = ( + (pred_box[:, :, :2] - (pred_box[:, :, 2:4] / 2.0)), + (pred_box[:, :, :2] + (pred_box[:, :, 2:4] / 2.0)), + ) + pred_box[:, :, 0] = pred_box[:, :, 0] * img_size[:, 1][:, np.newaxis] + pred_box[:, :, 1] = pred_box[:, :, 1] * img_size[:, 0][:, np.newaxis] + pred_box[:, :, 2] = pred_box[:, :, 2] * img_size[:, 1][:, np.newaxis] + pred_box[:, :, 3] = pred_box[:, :, 3] * img_size[:, 0][:, np.newaxis] if clip_bbox: for i in range(len(pred_box)): pred_box[i, :, 0] = np.clip(pred_box[i, :, 0], 0, np.inf) pred_box[i, :, 1] = np.clip(pred_box[i, :, 1], 0, np.inf) - pred_box[i, :, 2] = np.clip(pred_box[i, :, 2], (-np.inf), - (img_size[(i, 1)] - 1)) - pred_box[i, :, 3] = np.clip(pred_box[i, :, 3], (-np.inf), - (img_size[(i, 0)] - 1)) + pred_box[i, :, 2] = np.clip( + pred_box[i, :, 2], (-np.inf), (img_size[(i, 1)] - 1) + ) + pred_box[i, :, 3] = np.clip( + pred_box[i, :, 3], (-np.inf), (img_size[(i, 0)] - 1) + ) return (pred_box, pred_score.reshape((n, (-1), class_num))) class TestYoloBoxOp(OpTest): - def setUp(self): self.initTestCase() self.op_type = 'yolo_box' @@ -114,7 +119,7 @@ class TestYoloBoxOp(OpTest): 'clip_bbox': self.clip_bbox, 'scale_x_y': self.scale_x_y, 'iou_aware': self.iou_aware, - 'iou_aware_factor': self.iou_aware_factor + 'iou_aware_factor': self.iou_aware_factor, } self.inputs = {'X': x, 'ImgSize': img_size} (boxes, scores) = YoloBox(x, img_size, self.attrs) @@ -131,8 +136,12 @@ class TestYoloBoxOp(OpTest): self.conf_thresh = 0.5 self.downsample = 32 self.clip_bbox = True - self.x_shape = (self.batch_size, (an_num * (5 + self.class_num)), 13, - 13) + self.x_shape = ( + self.batch_size, + (an_num * (5 + self.class_num)), + 13, + 13, + ) self.imgsize_shape = (self.batch_size, 2) self.scale_x_y = 1.0 self.iou_aware = False @@ -140,7 +149,6 @@ class TestYoloBoxOp(OpTest): class TestYoloBoxOpNoClipBbox(TestYoloBoxOp): - def initTestCase(self): self.anchors = [10, 13, 16, 30, 33, 23] an_num = int((len(self.anchors) // 2)) @@ -149,8 +157,12 @@ class TestYoloBoxOpNoClipBbox(TestYoloBoxOp): self.conf_thresh = 0.5 self.downsample = 32 self.clip_bbox = False - self.x_shape = (self.batch_size, (an_num * (5 + self.class_num)), 13, - 13) + self.x_shape = ( + self.batch_size, + (an_num * (5 + self.class_num)), + 13, + 13, + ) self.imgsize_shape = (self.batch_size, 2) self.scale_x_y = 1.0 self.iou_aware = False @@ -158,7 +170,6 @@ class TestYoloBoxOpNoClipBbox(TestYoloBoxOp): class TestYoloBoxOpScaleXY(TestYoloBoxOp): - def initTestCase(self): self.anchors = [10, 13, 16, 30, 33, 23] an_num = int((len(self.anchors) // 2)) @@ -167,8 +178,12 @@ class TestYoloBoxOpScaleXY(TestYoloBoxOp): self.conf_thresh = 0.5 self.downsample = 32 self.clip_bbox = True - self.x_shape = (self.batch_size, (an_num * (5 + self.class_num)), 13, - 13) + self.x_shape = ( + self.batch_size, + (an_num * (5 + self.class_num)), + 13, + 13, + ) self.imgsize_shape = (self.batch_size, 2) self.scale_x_y = 1.2 self.iou_aware = False @@ -176,7 +191,6 @@ class TestYoloBoxOpScaleXY(TestYoloBoxOp): class TestYoloBoxOpIoUAware(TestYoloBoxOp): - def initTestCase(self): self.anchors = [10, 13, 16, 30, 33, 23] an_num = int((len(self.anchors) // 2)) @@ -185,8 +199,12 @@ class TestYoloBoxOpIoUAware(TestYoloBoxOp): self.conf_thresh = 0.5 self.downsample = 32 self.clip_bbox = True - self.x_shape = (self.batch_size, (an_num * (6 + self.class_num)), 13, - 13) + self.x_shape = ( + self.batch_size, + (an_num * (6 + self.class_num)), + 13, + 13, + ) self.imgsize_shape = (self.batch_size, 2) self.scale_x_y = 1.0 self.iou_aware = True @@ -194,67 +212,72 @@ class TestYoloBoxOpIoUAware(TestYoloBoxOp): class TestYoloBoxDygraph(unittest.TestCase): - def test_dygraph(self): paddle.disable_static() img_size = np.ones((2, 2)).astype('int32') img_size = paddle.to_tensor(img_size) x1 = np.random.random([2, 14, 8, 8]).astype('float32') x1 = paddle.to_tensor(x1) - (boxes, scores) = paddle.vision.ops.yolo_box(x1, - img_size=img_size, - anchors=[10, 13, 16, 30], - class_num=2, - conf_thresh=0.01, - downsample_ratio=8, - clip_bbox=True, - scale_x_y=1.0) - assert ((boxes is not None) and (scores is not None)) + (boxes, scores) = paddle.vision.ops.yolo_box( + x1, + img_size=img_size, + anchors=[10, 13, 16, 30], + class_num=2, + conf_thresh=0.01, + downsample_ratio=8, + clip_bbox=True, + scale_x_y=1.0, + ) + assert (boxes is not None) and (scores is not None) x2 = np.random.random([2, 16, 8, 8]).astype('float32') x2 = paddle.to_tensor(x2) - (boxes, scores) = paddle.vision.ops.yolo_box(x2, - img_size=img_size, - anchors=[10, 13, 16, 30], - class_num=2, - conf_thresh=0.01, - downsample_ratio=8, - clip_bbox=True, - scale_x_y=1.0, - iou_aware=True, - iou_aware_factor=0.5) + (boxes, scores) = paddle.vision.ops.yolo_box( + x2, + img_size=img_size, + anchors=[10, 13, 16, 30], + class_num=2, + conf_thresh=0.01, + downsample_ratio=8, + clip_bbox=True, + scale_x_y=1.0, + iou_aware=True, + iou_aware_factor=0.5, + ) paddle.enable_static() class TestYoloBoxStatic(unittest.TestCase): - def test_static(self): x1 = paddle.static.data('x1', [2, 14, 8, 8], 'float32') img_size = paddle.static.data('img_size', [2, 2], 'int32') - (boxes, scores) = paddle.vision.ops.yolo_box(x1, - img_size=img_size, - anchors=[10, 13, 16, 30], - class_num=2, - conf_thresh=0.01, - downsample_ratio=8, - clip_bbox=True, - scale_x_y=1.0) - assert ((boxes is not None) and (scores is not None)) + (boxes, scores) = paddle.vision.ops.yolo_box( + x1, + img_size=img_size, + anchors=[10, 13, 16, 30], + class_num=2, + conf_thresh=0.01, + downsample_ratio=8, + clip_bbox=True, + scale_x_y=1.0, + ) + assert (boxes is not None) and (scores is not None) x2 = paddle.static.data('x2', [2, 16, 8, 8], 'float32') - (boxes, scores) = paddle.vision.ops.yolo_box(x2, - img_size=img_size, - anchors=[10, 13, 16, 30], - class_num=2, - conf_thresh=0.01, - downsample_ratio=8, - clip_bbox=True, - scale_x_y=1.0, - iou_aware=True, - iou_aware_factor=0.5) - assert ((boxes is not None) and (scores is not None)) + (boxes, scores) = paddle.vision.ops.yolo_box( + x2, + img_size=img_size, + anchors=[10, 13, 16, 30], + class_num=2, + conf_thresh=0.01, + downsample_ratio=8, + clip_bbox=True, + scale_x_y=1.0, + iou_aware=True, + iou_aware_factor=0.5, + ) + assert (boxes is not None) and (scores is not None) class TestYoloBoxOpHW(TestYoloBoxOp): - def initTestCase(self): self.anchors = [10, 13, 16, 30, 33, 23] an_num = int((len(self.anchors) // 2)) diff --git a/python/paddle/fluid/tests/unittests/multi_process.py b/python/paddle/fluid/tests/unittests/multi_process.py index fa6b7200f32f90b14d7ef4fe5bec0702c95dfb32..74283be7b3726fa9ec503f5af0936586fd87706e 100644 --- a/python/paddle/fluid/tests/unittests/multi_process.py +++ b/python/paddle/fluid/tests/unittests/multi_process.py @@ -25,12 +25,18 @@ def train(prefix): worker_endpoints = worker_endpoints_env trainers_num = len(worker_endpoints.split(',')) - name = "selected_gpus:{} worker_endpoints:{} trainers_num:{} current_endpoint:{} trainer_id:{}"\ - .format(selected_gpus, worker_endpoints, trainers_num, current_endpoint,trainer_id) + name = "selected_gpus:{} worker_endpoints:{} trainers_num:{} current_endpoint:{} trainer_id:{}".format( + selected_gpus, + worker_endpoints, + trainers_num, + current_endpoint, + trainer_id, + ) print(name) - with open("multi_process_{}.check_{}.log".format(prefix, trainer_id), - "w") as f: + with open( + "multi_process_{}.check_{}.log".format(prefix, trainer_id), "w" + ) as f: f.write(name) @@ -47,23 +53,34 @@ def train_abort(prefix): # train abort exit(1) except SystemExit: - name = "abort>>> selected_gpus:{} worker_endpoints:{} trainers_num:{} current_endpoint:{} trainer_id:{}"\ - .format(selected_gpus, worker_endpoints, trainers_num, current_endpoint,trainer_id) + name = "abort>>> selected_gpus:{} worker_endpoints:{} trainers_num:{} current_endpoint:{} trainer_id:{}".format( + selected_gpus, + worker_endpoints, + trainers_num, + current_endpoint, + trainer_id, + ) print(name) with open( - "multi_process_{}.check_{}.log".format(prefix, trainer_id), - "w") as f: + "multi_process_{}.check_{}.log".format(prefix, trainer_id), "w" + ) as f: f.write(name) raise else: # sleep 30s to make sure paddle.distributed.launch will terminate this process time.sleep(30) - name = "selected_gpus:{} worker_endpoints:{} trainers_num:{} current_endpoint:{} trainer_id:{}"\ - .format(selected_gpus, worker_endpoints, trainers_num, current_endpoint,trainer_id) + name = "selected_gpus:{} worker_endpoints:{} trainers_num:{} current_endpoint:{} trainer_id:{}".format( + selected_gpus, + worker_endpoints, + trainers_num, + current_endpoint, + trainer_id, + ) print(name) - with open("multi_process_{}.check_{}.log".format(prefix, trainer_id), - "w") as f: + with open( + "multi_process_{}.check_{}.log".format(prefix, trainer_id), "w" + ) as f: f.write(name) diff --git a/python/paddle/fluid/tests/unittests/my_data_generator.py b/python/paddle/fluid/tests/unittests/my_data_generator.py index 5279973e01acfcdeabf80731df4771808a165f2c..b08e5c53815d38ae1164733a2ecdea7e6c846eea 100644 --- a/python/paddle/fluid/tests/unittests/my_data_generator.py +++ b/python/paddle/fluid/tests/unittests/my_data_generator.py @@ -16,14 +16,14 @@ import paddle.distributed.fleet as fleet class MyDataset(fleet.MultiSlotDataGenerator): - def generate_sample(self, line): - def data_iter(): elements = line.strip().split()[0:] - output = [("show", [int(elements[0])]), - ("click", [int(elements[1])]), - ("slot1", [int(elements[2])])] + output = [ + ("show", [int(elements[0])]), + ("click", [int(elements[1])]), + ("slot1", [int(elements[2])]), + ] yield output return data_iter diff --git a/python/paddle/fluid/tests/unittests/nproc_process.py b/python/paddle/fluid/tests/unittests/nproc_process.py index bbd0b6a0e5ee6a619ddaf0702345dbcdff2778fc..7a01005fe2e885f6e1e3841968c11fe5a26acd5f 100644 --- a/python/paddle/fluid/tests/unittests/nproc_process.py +++ b/python/paddle/fluid/tests/unittests/nproc_process.py @@ -28,8 +28,13 @@ def train(prefix): worker_endpoints = worker_endpoints_env trainers_num = len(worker_endpoints.split(',')) - name = "selected_devices:{} worker_endpoints:{} trainers_num:{} current_endpoint:{} trainer_id:{}"\ - .format(selected_devices, worker_endpoints, trainers_num, current_endpoint,trainer_id) + name = "selected_devices:{} worker_endpoints:{} trainers_num:{} current_endpoint:{} trainer_id:{}".format( + selected_devices, + worker_endpoints, + trainers_num, + current_endpoint, + trainer_id, + ) print(name) with open("{}.check_{}.log".format(prefix, trainer_id), "w") as f: diff --git a/python/paddle/fluid/tests/unittests/npu/collective_identity_op_npu.py b/python/paddle/fluid/tests/unittests/npu/collective_identity_op_npu.py index e4ed24a73f5ea6b7ee736116b5b3ee85367f4ab6..f8f8612ca89e32ceee64e1f1ab7acd69c80612d3 100644 --- a/python/paddle/fluid/tests/unittests/npu/collective_identity_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/collective_identity_op_npu.py @@ -35,7 +35,6 @@ paddle.enable_static() class TestCollectiveIdentity(TestCollectiveRunnerBase): - def __init__(self): self.global_ring_id = 0 @@ -43,22 +42,22 @@ class TestCollectiveIdentity(TestCollectiveRunnerBase): ring_id = 0 nranks = 2 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype='float32') + tindata = layers.data( + name="tindata", shape=[10, 1000], dtype='float32' + ) toutdata = main_prog.current_block().create_var( name="outofgather", dtype='float32', type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=False) - main_prog.global_block().append_op(type="c_identity", - inputs={'X': tindata}, - outputs={'Out': toutdata}, - attrs={ - 'ring_id': ring_id, - 'nranks': nranks - }) + stop_gradient=False, + ) + main_prog.global_block().append_op( + type="c_identity", + inputs={'X': tindata}, + outputs={'Out': toutdata}, + attrs={'ring_id': ring_id, 'nranks': nranks}, + ) return toutdata diff --git a/python/paddle/fluid/tests/unittests/npu/process_group_hccl.py b/python/paddle/fluid/tests/unittests/npu/process_group_hccl.py index 4b96002536ee143f47ba244ecab3eceb3ccfb1c6..474d4104b04eb21ac3d963759c53ee04d4ecbd81 100644 --- a/python/paddle/fluid/tests/unittests/npu/process_group_hccl.py +++ b/python/paddle/fluid/tests/unittests/npu/process_group_hccl.py @@ -37,7 +37,6 @@ def init_process_group(strategy=None): class TestProcessGroupFp32(unittest.TestCase): - def setUp(self): paddle.seed(2022) random.seed(2022) @@ -50,8 +49,9 @@ class TestProcessGroupFp32(unittest.TestCase): def test_create_process_group_nccl(self): with _test_eager_guard(): - paddle.set_device('npu:%d' % - paddle.distributed.ParallelEnv().dev_id) + paddle.set_device( + 'npu:%d' % paddle.distributed.ParallelEnv().dev_id + ) pg = init_process_group() @@ -147,8 +147,9 @@ class TestProcessGroupFp32(unittest.TestCase): task.wait() paddle.device.cuda.synchronize() out_1 = paddle.slice(tensor_out, [0], [0], [out_shape[0] // 2]) - out_2 = paddle.slice(tensor_out, [0], [out_shape[0] // 2], - [out_shape[0]]) + out_2 = paddle.slice( + tensor_out, [0], [out_shape[0] // 2], [out_shape[0]] + ) assert np.array_equal(tensor_x, out_1) assert np.array_equal(tensor_y, out_2) print("test allgather api ok\n") @@ -163,10 +164,12 @@ class TestProcessGroupFp32(unittest.TestCase): tensor_y = paddle.to_tensor(y) tensor_out1 = paddle.to_tensor(out1) tensor_out2 = paddle.to_tensor(out2) - raw_tensor_x_2 = paddle.slice(tensor_x, [0], [self.shape[0] // 2], - [self.shape[0]]) - raw_tensor_y_1 = paddle.slice(tensor_y, [0], [0], - [self.shape[0] // 2]) + raw_tensor_x_2 = paddle.slice( + tensor_x, [0], [self.shape[0] // 2], [self.shape[0]] + ) + raw_tensor_y_1 = paddle.slice( + tensor_y, [0], [0], [self.shape[0] // 2] + ) if pg.rank() == 0: task = pg.alltoall(tensor_x, tensor_out1) task.wait() @@ -176,8 +179,9 @@ class TestProcessGroupFp32(unittest.TestCase): task = pg.alltoall(tensor_y, tensor_out2) task.wait() paddle.device.cuda.synchronize() - out1_2 = paddle.slice(tensor_out1, [0], [self.shape[0] // 2], - [self.shape[0]]) + out1_2 = paddle.slice( + tensor_out1, [0], [self.shape[0] // 2], [self.shape[0]] + ) out2_1 = paddle.slice(tensor_out2, [0], [0], [self.shape[0] // 2]) if pg.rank() == 0: assert np.array_equal(out1_2.numpy(), raw_tensor_y_1.numpy()) @@ -223,8 +227,9 @@ class TestProcessGroupFp32(unittest.TestCase): task.wait() paddle.device.cuda.synchronize() out1 = paddle.slice(tensor_x, [0], [0], [self.shape[0]]) - out2 = paddle.slice(tensor_x, [0], [self.shape[0]], - [self.shape[0] * 2]) + out2 = paddle.slice( + tensor_x, [0], [self.shape[0]], [self.shape[0] * 2] + ) if pg.rank() == 0: assert np.array_equal(tensor_y, out1) else: @@ -233,7 +238,6 @@ class TestProcessGroupFp32(unittest.TestCase): class TestProcessGroupFp16(TestProcessGroupFp32): - def setUp(self): paddle.seed(2022) random.seed(2022) diff --git a/python/paddle/fluid/tests/unittests/npu/sync_batch_norm_op_npu.py b/python/paddle/fluid/tests/unittests/npu/sync_batch_norm_op_npu.py index 84f93bc88a18c389d068c17c6dcdc11b04a2b0ea..4960fee949b61c36c9d027aeda035ff142fcbe42 100644 --- a/python/paddle/fluid/tests/unittests/npu/sync_batch_norm_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/sync_batch_norm_op_npu.py @@ -31,17 +31,24 @@ import unittest from multiprocessing import Process import paddle.fluid.layers as layers from functools import reduce -from test_sync_batch_norm_base_npu import TestSyncBatchNormRunnerBase, runtime_main -from paddle.fluid.tests.unittests.op_test import OpTest, _set_use_system_allocator +from test_sync_batch_norm_base_npu import ( + TestSyncBatchNormRunnerBase, + runtime_main, +) +from paddle.fluid.tests.unittests.op_test import ( + OpTest, + _set_use_system_allocator, +) -from paddle.fluid.tests.unittests.test_sync_batch_norm_op import create_or_get_tensor +from paddle.fluid.tests.unittests.test_sync_batch_norm_op import ( + create_or_get_tensor, +) _set_use_system_allocator(False) paddle.enable_static() class TestSyncBatchNormOpTraining(TestSyncBatchNormRunnerBase): - def __init__(self): self.global_ring_id = 0 @@ -53,29 +60,34 @@ class TestSyncBatchNormOpTraining(TestSyncBatchNormRunnerBase): self.dshape = [self.N, self.C, self.H, self.W] self.atol = 1e-3 - def get_model(self, - main, - startup, - place, - layout, - seed, - sync_bn=False, - only_forward=False): + def get_model( + self, + main, + startup, + place, + layout, + seed, + sync_bn=False, + only_forward=False, + ): """Build program.""" use_cudnn = False with fluid.unique_name.guard(): with fluid.program_guard(main, startup): - data = fluid.layers.data(name='input', - shape=self.dshape, - dtype=self.dtype, - append_batch_size=False) + data = fluid.layers.data( + name='input', + shape=self.dshape, + dtype=self.dtype, + append_batch_size=False, + ) conv = fluid.layers.conv2d( input=data, num_filters=32, filter_size=1, param_attr=fluid.ParamAttr(name='conv2d_weight'), bias_attr=False, - use_cudnn=use_cudnn) + use_cudnn=use_cudnn, + ) bn = fluid.layers.batch_norm( conv, param_attr=fluid.ParamAttr(name='bn_scale'), @@ -83,7 +95,8 @@ class TestSyncBatchNormOpTraining(TestSyncBatchNormRunnerBase): moving_mean_name='bn_moving_mean', moving_variance_name='bn_moving_variance', data_layout=layout, - is_test=only_forward) + is_test=only_forward, + ) # if self.dtype == np.float16: # bn = fluid.layers.cast(bn, 'float32') sigmoid = fluid.layers.sigmoid(bn) diff --git a/python/paddle/fluid/tests/unittests/npu/test_abs_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_abs_op_npu.py index 8926937509482717af974475513dc00624612f7a..f28552f14e749e961b364c3bb240bcfa60532a6b 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_abs_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_abs_op_npu.py @@ -25,7 +25,6 @@ paddle.enable_static() class TestNPUAbs(OpTest): - def setUp(self): self.op_type = "abs" self.set_npu() diff --git a/python/paddle/fluid/tests/unittests/npu/test_accuracy_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_accuracy_op_npu.py index c46a51dfe3ae6c31a2f5fa7fed40ed6bd53cf2b6..54d029384c6c0df0ca770b88e3bd2a2608bdd480 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_accuracy_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_accuracy_op_npu.py @@ -27,7 +27,6 @@ SEED = 2021 class TestAccuracy(OpTest): - def setUp(self): self.op_type = "accuracy" self.set_npu() @@ -47,7 +46,7 @@ class TestAccuracy(OpTest): self.outputs = { 'Accuracy': np.array([num_correct / float(n)]).astype(self.dtype), 'Correct': np.array([num_correct]).astype("int32"), - 'Total': np.array([n]).astype("int32") + 'Total': np.array([n]).astype("int32"), } def set_npu(self): @@ -62,7 +61,6 @@ class TestAccuracy(OpTest): class TestAccuracy2(TestAccuracy): - def setUp(self): self.op_type = "accuracy" self.set_npu() @@ -82,12 +80,11 @@ class TestAccuracy2(TestAccuracy): self.outputs = { 'Accuracy': np.array([num_correct / float(n)]).astype(self.dtype), 'Correct': np.array([num_correct]).astype("int32"), - 'Total': np.array([n]).astype("int32") + 'Total': np.array([n]).astype("int32"), } class TestAccuracyType(TestAccuracy): - def setUp(self): self.op_type = "accuracy" self.set_npu() @@ -107,12 +104,11 @@ class TestAccuracyType(TestAccuracy): self.outputs = { 'Accuracy': np.array([num_correct / float(n)]).astype(self.dtype), 'Correct': np.array([num_correct]).astype("int32"), - 'Total': np.array([n]).astype("int32") + 'Total': np.array([n]).astype("int32"), } class TestAccuracyType2(TestAccuracy): - def setUp(self): self.op_type = "accuracy" self.set_npu() @@ -132,7 +128,7 @@ class TestAccuracyType2(TestAccuracy): self.outputs = { 'Accuracy': np.array([num_correct / float(n)]).astype(self.dtype), 'Correct': np.array([num_correct]).astype("int32"), - 'Total': np.array([n]).astype("int32") + 'Total': np.array([n]).astype("int32"), } diff --git a/python/paddle/fluid/tests/unittests/npu/test_adam_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_adam_op_npu.py index 70ab75ef5f242508684a4a2ba96b1944134c800a..49589d5d9dc80e1f5b0017dbc148f03cef272c42 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_adam_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_adam_op_npu.py @@ -28,7 +28,6 @@ SEED = 2021 class TestAdam(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -53,20 +52,19 @@ class TestAdam(OpTest): 'Moment2': moment2, 'LearningRate': np.array([learning_rate]).astype("float32"), 'Beta1Pow': np.array([beta1_pow]).astype("float32"), - 'Beta2Pow': np.array([beta2_pow]).astype("float32") + 'Beta2Pow': np.array([beta2_pow]).astype("float32"), } self.attrs = {'epsilon': epsilon, 'beta1': beta1, 'beta2': beta2} - param_out, moment1_out, \ - moment2_out = adam_step(self.inputs, self.attrs) + param_out, moment1_out, moment2_out = adam_step(self.inputs, self.attrs) self.outputs = { 'Moment1Out': moment1_out, 'Moment2Out': moment2_out, 'ParamOut': param_out, 'Beta1PowOut': np.array([beta1_pow]).astype("float32") * beta1, - 'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2 + 'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2, } def set_npu(self): @@ -80,7 +78,6 @@ class TestAdam(OpTest): class TestAdamWithEpsilonTensor(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -113,15 +110,14 @@ class TestAdamWithEpsilonTensor(OpTest): self.attrs = {'epsilon': epsilon} - param_out, moment1_out, \ - moment2_out = adam_step(self.inputs, self.attrs) + param_out, moment1_out, moment2_out = adam_step(self.inputs, self.attrs) self.outputs = { 'Moment1Out': moment1_out, 'Moment2Out': moment2_out, 'ParamOut': param_out, 'Beta1PowOut': np.array([beta1_pow]).astype("float32") * beta1, - 'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2 + 'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2, } def set_npu(self): @@ -135,7 +131,6 @@ class TestAdamWithEpsilonTensor(OpTest): class TestAdamOpWithSkipUpdate(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -188,7 +183,6 @@ class TestAdamOpWithSkipUpdate(OpTest): class TestAdamOpWithGlobalBetaPow(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -221,8 +215,7 @@ class TestAdamOpWithGlobalBetaPow(OpTest): attributes = {'epsilon': epsilon} - param_out, moment1_out, \ - moment2_out = adam_step(self.inputs, attributes) + param_out, moment1_out, moment2_out = adam_step(self.inputs, attributes) self.attrs = {'use_global_beta_pow': True} @@ -232,7 +225,7 @@ class TestAdamOpWithGlobalBetaPow(OpTest): 'Moment2Out': moment2_out, 'ParamOut': param_out, 'Beta1PowOut': np.array([]), - 'Beta2PowOut': np.array([]) + 'Beta2PowOut': np.array([]), } def set_npu(self): @@ -246,7 +239,6 @@ class TestAdamOpWithGlobalBetaPow(OpTest): class TestNet(unittest.TestCase): - def _test(self, run_npu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -261,9 +253,9 @@ class TestNet(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=[32, 32], dtype='float32') b = paddle.static.data(name="b", shape=[32, 32], dtype='float32') - label = paddle.static.data(name="label", - shape=[32, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[32, 1], dtype='int64' + ) sum = paddle.add(a, b) z = paddle.pow(sum, 2.0) @@ -287,16 +279,17 @@ class TestNet(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "b": b_np, "label": label_np}, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res @@ -309,13 +302,14 @@ class TestNet(unittest.TestCase): class TestNetWithEpsilonTensor(unittest.TestCase): - - def _test(self, - place, - use_tensor=True, - use_fluid_api=True, - use_global_beta_pow=False, - flatten_param_grads=False): + def _test( + self, + place, + use_tensor=True, + use_fluid_api=True, + use_global_beta_pow=False, + flatten_param_grads=False, + ): paddle.enable_static() main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -329,29 +323,30 @@ class TestNetWithEpsilonTensor(unittest.TestCase): weight_attr1 = paddle.ParamAttr( name="weight1", initializer=fluid.initializer.Constant(value=1.0), - trainable=True) + trainable=True, + ) weight_attr2 = paddle.ParamAttr( name="weight2", initializer=fluid.initializer.Constant(value=2.0), - trainable=True) + trainable=True, + ) clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0) with paddle.static.program_guard(main_prog, startup_prog): with paddle.utils.unique_name.guard(): a = paddle.static.data(name="a", shape=[2, 2], dtype='float32') b = paddle.static.data(name="b", shape=[2, 2], dtype='float32') - label = paddle.static.data(name="label", - shape=[2, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[2, 1], dtype='int64' + ) sum = paddle.add(a, b) z = paddle.pow(sum, 2.0) fc_1 = fluid.layers.fc(input=z, size=2, param_attr=weight_attr1) - prediction = fluid.layers.fc(input=fc_1, - size=2, - param_attr=weight_attr2, - act='softmax') + prediction = fluid.layers.fc( + input=fc_1, size=2, param_attr=weight_attr2, act='softmax' + ) cost = fluid.layers.cross_entropy(input=prediction, label=label) loss = fluid.layers.reduce_mean(cost) @@ -364,19 +359,22 @@ class TestNetWithEpsilonTensor(unittest.TestCase): value=float(beta1_init), dtype='float32', persistable=True, - name="beta1") + name="beta1", + ) beta2 = fluid.layers.create_global_var( shape=[1], value=float(beta2_init), dtype='float32', persistable=True, - name="beta2") + name="beta2", + ) epsilon = fluid.layers.create_global_var( shape=[1], value=float(epsilon_init), dtype='float32', persistable=True, - name="epsilon") + name="epsilon", + ) if use_fluid_api: adam = fluid.optimizer.Adam( learning_rate=0.01, @@ -386,13 +384,16 @@ class TestNetWithEpsilonTensor(unittest.TestCase): use_global_beta_pow=use_global_beta_pow, flatten_param_grads=flatten_param_grads, align_size=256, - grad_clip=clip) + grad_clip=clip, + ) else: - adam = paddle.optimizer.Adam(learning_rate=0.01, - beta1=beta1, - beta2=beta2, - epsilon=epsilon, - grad_clip=clip) + adam = paddle.optimizer.Adam( + learning_rate=0.01, + beta1=beta1, + beta2=beta2, + epsilon=epsilon, + grad_clip=clip, + ) else: if use_fluid_api: adam = fluid.optimizer.Adam( @@ -403,13 +404,16 @@ class TestNetWithEpsilonTensor(unittest.TestCase): use_global_beta_pow=use_global_beta_pow, flatten_param_grads=flatten_param_grads, align_size=256, - grad_clip=clip) + grad_clip=clip, + ) else: - adam = fluid.optimizer.Adam(learning_rate=0.01, - beta1=beta1_init, - beta2=beta2_init, - epsilon=epsilon_init, - grad_clip=clip) + adam = fluid.optimizer.Adam( + learning_rate=0.01, + beta1=beta1_init, + beta2=beta2_init, + epsilon=epsilon_init, + grad_clip=clip, + ) adam.minimize(loss) @@ -420,15 +424,16 @@ class TestNetWithEpsilonTensor(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(10): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "label": label_np - }, - fetch_list=[prediction, loss]) - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "b": b_np, "label": label_np}, + fetch_list=[prediction, loss], + ) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) paddle.disable_static() return pred_res, loss_res @@ -440,10 +445,13 @@ class TestNetWithEpsilonTensor(unittest.TestCase): for use_fluid_api in [True, False]: for use_global_beta_pow in [True, False]: for flatten_param_grads in [True, False]: - pred, loss = self._test(place, use_tensor, - use_fluid_api, - use_global_beta_pow, - flatten_param_grads) + pred, loss = self._test( + place, + use_tensor, + use_fluid_api, + use_global_beta_pow, + flatten_param_grads, + ) preds.append(pred) losses.append(loss) for pred in preds: diff --git a/python/paddle/fluid/tests/unittests/npu/test_adamw_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_adamw_op_npu.py index 579892dee3dce09b2d8a5ae4ea41199b1435c1ec..0211eb196d58ebee547750b208ab2f3092c03920 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_adamw_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_adamw_op_npu.py @@ -28,7 +28,6 @@ SEED = 2021 class TestAdamW(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -53,7 +52,7 @@ class TestAdamW(OpTest): 'Moment2': moment2, 'LearningRate': np.array([learning_rate]).astype("float32"), 'Beta1Pow': np.array([beta1_pow]).astype("float32"), - 'Beta2Pow': np.array([beta2_pow]).astype("float32") + 'Beta2Pow': np.array([beta2_pow]).astype("float32"), } self.attrs = { @@ -61,18 +60,19 @@ class TestAdamW(OpTest): 'beta1': beta1, 'beta2': beta2, "coeff": 0.9, - "with_decay": True + "with_decay": True, } - param_out, moment1_out, \ - moment2_out = adamw_step(self.inputs, self.attrs) + param_out, moment1_out, moment2_out = adamw_step( + self.inputs, self.attrs + ) self.outputs = { 'Moment1Out': moment1_out, 'Moment2Out': moment2_out, 'ParamOut': param_out, 'Beta1PowOut': np.array([beta1_pow]).astype("float32") * beta1, - 'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2 + 'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2, } def set_npu(self): @@ -86,7 +86,6 @@ class TestAdamW(OpTest): class TestAdamOpWithSkipUpdate(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -139,7 +138,6 @@ class TestAdamOpWithSkipUpdate(OpTest): class TestAdamOpWithoutDecay(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -192,7 +190,6 @@ class TestAdamOpWithoutDecay(OpTest): class TestNet(unittest.TestCase): - def _test(self, run_npu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -207,9 +204,9 @@ class TestNet(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=[32, 32], dtype='float32') b = paddle.static.data(name="b", shape=[32, 32], dtype='float32') - label = paddle.static.data(name="label", - shape=[32, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[32, 1], dtype='int64' + ) sum = paddle.add(a, b) z = paddle.pow(sum, 2.0) @@ -233,16 +230,17 @@ class TestNet(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "b": b_np, "label": label_np}, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res diff --git a/python/paddle/fluid/tests/unittests/npu/test_amp_check_finite_and_scale_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_amp_check_finite_and_scale_op_npu.py index ee43d18ae2f0160eb6175d3e581d0f2813eadf5b..ff11f1e68f0ca0d23d3d44a1ef4c2bb449acdf15 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_amp_check_finite_and_scale_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_amp_check_finite_and_scale_op_npu.py @@ -27,7 +27,6 @@ paddle.enable_static() class TestCheckFiniteAndUnscale(unittest.TestCase): - def get_prog(self): paddle.enable_static() main_program = paddle.static.Program() @@ -35,20 +34,21 @@ class TestCheckFiniteAndUnscale(unittest.TestCase): a = paddle.static.data(name="a", shape=[32, 32], dtype='float32') b = paddle.static.data(name="b", shape=[32, 32], dtype='float32') scale = paddle.static.data(name="scale", shape=[1], dtype='float32') - float_status = paddle.static.data(name="status", - shape=[8], - dtype='float32') + float_status = paddle.static.data( + name="status", shape=[8], dtype='float32' + ) main_program.global_block().append_op( - type="alloc_float_status", - outputs={"FloatStatus": float_status}) + type="alloc_float_status", outputs={"FloatStatus": float_status} + ) main_program.global_block().append_op( type="clear_float_status", inputs={"FloatStatus": float_status}, - outputs={"FloatStatusOut": float_status}) + outputs={"FloatStatusOut": float_status}, + ) c = paddle.fluid.layers.elementwise_div(a, b) - out, found_inf = check_finite_and_unscale([c], - scale, - float_status=float_status) + out, found_inf = check_finite_and_unscale( + [c], scale, float_status=float_status + ) return main_program, out, found_inf, float_status @@ -58,12 +58,9 @@ class TestCheckFiniteAndUnscale(unittest.TestCase): exe = fluid.Executor(place) out_, founf_inf_, float_status_ = exe.run( main_program, - feed={ - "a": a, - "b": b, - "scale": scale - }, - fetch_list=[out, found_inf, float_status]) + feed={"a": a, "b": b, "scale": scale}, + fetch_list=[out, found_inf, float_status], + ) print(float_status_) return out_, founf_inf_ @@ -100,7 +97,6 @@ class TestCheckFiniteAndUnscale(unittest.TestCase): class TestCheckFiniteAndUnscaleClearFloatStatus(unittest.TestCase): - def get_prog(self): paddle.enable_static() main_program = paddle.static.Program() @@ -108,31 +104,33 @@ class TestCheckFiniteAndUnscaleClearFloatStatus(unittest.TestCase): a = paddle.static.data(name="a", shape=[32, 32], dtype='float32') b = paddle.static.data(name="b", shape=[32, 32], dtype='float32') scale = paddle.static.data(name="scale", shape=[1], dtype='float32') - float_status = paddle.static.data(name="status", - shape=[8], - dtype='float32') + float_status = paddle.static.data( + name="status", shape=[8], dtype='float32' + ) main_program.global_block().append_op( - type="alloc_float_status", - outputs={"FloatStatus": float_status}) + type="alloc_float_status", outputs={"FloatStatus": float_status} + ) main_program.global_block().append_op( type="clear_float_status", inputs={"FloatStatus": float_status}, - outputs={"FloatStatusOut": float_status}) + outputs={"FloatStatusOut": float_status}, + ) c = paddle.fluid.layers.elementwise_div(a, b) - out, found_inf = check_finite_and_unscale([c], - scale, - float_status=float_status) + out, found_inf = check_finite_and_unscale( + [c], scale, float_status=float_status + ) main_program.global_block().append_op( - type="alloc_float_status", - outputs={"FloatStatus": float_status}) + type="alloc_float_status", outputs={"FloatStatus": float_status} + ) main_program.global_block().append_op( type="clear_float_status", inputs={"FloatStatus": float_status}, - outputs={"FloatStatusOut": float_status}) + outputs={"FloatStatusOut": float_status}, + ) d = paddle.fluid.layers.elementwise_add(a, b) - out, found_inf = check_finite_and_unscale([d], - scale, - float_status=float_status) + out, found_inf = check_finite_and_unscale( + [d], scale, float_status=float_status + ) return main_program, out, found_inf, float_status @@ -142,12 +140,9 @@ class TestCheckFiniteAndUnscaleClearFloatStatus(unittest.TestCase): exe = fluid.Executor(place) out_, founf_inf_, float_status_ = exe.run( main_program, - feed={ - "a": a, - "b": b, - "scale": scale - }, - fetch_list=[out, found_inf, float_status]) + feed={"a": a, "b": b, "scale": scale}, + fetch_list=[out, found_inf, float_status], + ) print(float_status_) return out_, founf_inf_ diff --git a/python/paddle/fluid/tests/unittests/npu/test_arg_max_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_arg_max_op_npu.py index 46e21378b848fb43a94b2dfa5302cae9d6bb85e5..9b089462b01b51825753809f7d158f154428c0ed 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_arg_max_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_arg_max_op_npu.py @@ -27,7 +27,6 @@ paddle.enable_static() class BaseTestCase(OpTest): - def set_npu(self): self.__class__.use_npu = True self.place = paddle.NPUPlace(0) @@ -52,7 +51,6 @@ class BaseTestCase(OpTest): # test argmax, dtype: float16 class TestArgMaxFloat16Case1(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (3, 4, 5) @@ -61,7 +59,6 @@ class TestArgMaxFloat16Case1(BaseTestCase): class TestArgMaxFloat16Case2(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (3, 4, 5) @@ -70,7 +67,6 @@ class TestArgMaxFloat16Case2(BaseTestCase): class TestArgMaxFloat16Case3(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (3, 4, 5) @@ -79,7 +75,6 @@ class TestArgMaxFloat16Case3(BaseTestCase): class TestArgMaxFloat16Case4(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (3, 4, 5) @@ -88,7 +83,6 @@ class TestArgMaxFloat16Case4(BaseTestCase): class TestArgMaxFloat16Case5(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (3, 4) @@ -97,7 +91,6 @@ class TestArgMaxFloat16Case5(BaseTestCase): class TestArgMaxFloat16Case6(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (3, 4) @@ -106,7 +99,6 @@ class TestArgMaxFloat16Case6(BaseTestCase): class TestArgMaxFloat16Case7(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (3, 4) @@ -115,35 +107,31 @@ class TestArgMaxFloat16Case7(BaseTestCase): class TestArgMaxFloat16Case8(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' - self.dims = (1, ) + self.dims = (1,) self.dtype = 'float16' self.axis = 0 class TestArgMaxFloat16Case9(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' - self.dims = (2, ) + self.dims = (2,) self.dtype = 'float16' self.axis = 0 class TestArgMaxFloat16Case10(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' - self.dims = (3, ) + self.dims = (3,) self.dtype = 'float16' self.axis = 0 # test argmax, dtype: float32 class TestArgMaxFloat32Case1(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (3, 4, 5) @@ -152,7 +140,6 @@ class TestArgMaxFloat32Case1(BaseTestCase): class TestArgMaxFloat32Case2(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (3, 4, 5) @@ -161,7 +148,6 @@ class TestArgMaxFloat32Case2(BaseTestCase): class TestArgMaxFloat32Case3(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (3, 4, 5) @@ -170,7 +156,6 @@ class TestArgMaxFloat32Case3(BaseTestCase): class TestArgMaxFloat32Case4(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (3, 4, 5) @@ -179,7 +164,6 @@ class TestArgMaxFloat32Case4(BaseTestCase): class TestArgMaxFloat32Case5(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (3, 4) @@ -188,7 +172,6 @@ class TestArgMaxFloat32Case5(BaseTestCase): class TestArgMaxFloat32Case6(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (3, 4) @@ -197,7 +180,6 @@ class TestArgMaxFloat32Case6(BaseTestCase): class TestArgMaxFloat32Case7(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (3, 4) @@ -206,34 +188,30 @@ class TestArgMaxFloat32Case7(BaseTestCase): class TestArgMaxFloat32Case8(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' - self.dims = (1, ) + self.dims = (1,) self.dtype = 'float32' self.axis = 0 class TestArgMaxFloat32Case9(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' - self.dims = (2, ) + self.dims = (2,) self.dtype = 'float32' self.axis = 0 class TestArgMaxFloat32Case10(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' - self.dims = (3, ) + self.dims = (3,) self.dtype = 'float32' self.axis = 0 class BaseTestComplex1_1(OpTest): - def set_npu(self): self.__class__.use_npu = True self.place = paddle.NPUPlace(0) @@ -251,7 +229,7 @@ class BaseTestComplex1_1(OpTest): self.inputs = {'X': self.x} self.attrs = { 'axis': self.axis, - 'dtype': int(core.VarDesc.VarType.INT32) + 'dtype': int(core.VarDesc.VarType.INT32), } self.outputs = { 'Out': np.argmax(self.x, axis=self.axis).astype("int32") @@ -262,7 +240,6 @@ class BaseTestComplex1_1(OpTest): class BaseTestComplex1_2(OpTest): - def set_npu(self): self.__class__.use_npu = True self.place = paddle.NPUPlace(0) @@ -280,7 +257,7 @@ class BaseTestComplex1_2(OpTest): self.inputs = {'X': self.x} self.attrs = { 'axis': self.axis, - 'dtype': int(core.VarDesc.VarType.INT32) + 'dtype': int(core.VarDesc.VarType.INT32), } self.outputs = { 'Out': np.argmax(self.x, axis=self.axis).astype("int32") @@ -291,7 +268,6 @@ class BaseTestComplex1_2(OpTest): class TestArgMaxAPI(unittest.TestCase): - def initTestCase(self): self.dims = (3, 4, 5) self.dtype = 'float32' @@ -303,7 +279,6 @@ class TestArgMaxAPI(unittest.TestCase): self.place = [paddle.NPUPlace(0)] def test_dygraph_api(self): - def run(place): paddle.disable_static(place) np.random.seed(2021) @@ -311,9 +286,9 @@ class TestArgMaxAPI(unittest.TestCase): tensor_input = paddle.to_tensor(numpy_input) numpy_output = np.argmax(numpy_input, axis=self.axis) paddle_output = paddle.argmax(tensor_input, axis=self.axis) - np.testing.assert_allclose(numpy_output, - paddle_output.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + numpy_output, paddle_output.numpy(), rtol=1e-05 + ) paddle.enable_static() for place in self.place: @@ -321,7 +296,6 @@ class TestArgMaxAPI(unittest.TestCase): class TestArgMaxAPI_2(unittest.TestCase): - def initTestCase(self): self.dims = (3, 4, 5) self.dtype = 'float32' @@ -334,20 +308,20 @@ class TestArgMaxAPI_2(unittest.TestCase): self.place = [paddle.NPUPlace(0)] def test_dygraph_api(self): - def run(place): paddle.disable_static(place) np.random.seed(2021) numpy_input = (np.random.random(self.dims)).astype(self.dtype) tensor_input = paddle.to_tensor(numpy_input) - numpy_output = np.argmax(numpy_input, - axis=self.axis).reshape(1, 4, 5) - paddle_output = paddle.argmax(tensor_input, - axis=self.axis, - keepdim=self.keep_dims) - np.testing.assert_allclose(numpy_output, - paddle_output.numpy(), - rtol=1e-05) + numpy_output = np.argmax(numpy_input, axis=self.axis).reshape( + 1, 4, 5 + ) + paddle_output = paddle.argmax( + tensor_input, axis=self.axis, keepdim=self.keep_dims + ) + np.testing.assert_allclose( + numpy_output, paddle_output.numpy(), rtol=1e-05 + ) self.assertEqual(numpy_output.shape, paddle_output.numpy().shape) paddle.enable_static() @@ -356,7 +330,6 @@ class TestArgMaxAPI_2(unittest.TestCase): class TestArgMaxAPI_3(unittest.TestCase): - def initTestCase(self): self.dims = (1, 9) self.dtype = 'float32' @@ -367,7 +340,6 @@ class TestArgMaxAPI_3(unittest.TestCase): self.place = [paddle.NPUPlace(0)] def test_dygraph_api(self): - def run(place): paddle.disable_static(place) np.random.seed(2021) @@ -375,9 +347,9 @@ class TestArgMaxAPI_3(unittest.TestCase): tensor_input = paddle.to_tensor(numpy_input) numpy_output = np.argmax(numpy_input).reshape([1]) paddle_output = paddle.argmax(tensor_input) - np.testing.assert_allclose(numpy_output, - paddle_output.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + numpy_output, paddle_output.numpy(), rtol=1e-05 + ) self.assertEqual(numpy_output.shape, paddle_output.numpy().shape) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/npu/test_arg_min_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_arg_min_op_npu.py index 8aeb32e5e5d9bacdf04dc7195c81692519fb5ac8..af8c038ac76067e35ce97f7b2026b79aef927cf3 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_arg_min_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_arg_min_op_npu.py @@ -25,7 +25,6 @@ paddle.enable_static() class BaseTestCase(OpTest): - def initTestCase(self): self.op_type = 'arg_min' self.dims = (3, 4) @@ -51,7 +50,6 @@ class BaseTestCase(OpTest): # test argmin, dtype: float16 class TestArgMinFloat16Case1(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_min' self.dims = (3, 4, 5) @@ -60,7 +58,6 @@ class TestArgMinFloat16Case1(BaseTestCase): class TestArgMinFloat16Case2(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_min' self.dims = (3, 4, 5) @@ -69,7 +66,6 @@ class TestArgMinFloat16Case2(BaseTestCase): class TestArgMinFloat16Case3(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_min' self.dims = (3, 4, 5) @@ -78,7 +74,6 @@ class TestArgMinFloat16Case3(BaseTestCase): class TestArgMinFloat16Case4(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_min' self.dims = (3, 4, 5) @@ -87,7 +82,6 @@ class TestArgMinFloat16Case4(BaseTestCase): class TestArgMinFloat16Case5(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_min' self.dims = (3, 4) @@ -96,7 +90,6 @@ class TestArgMinFloat16Case5(BaseTestCase): class TestArgMinFloat16Case6(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_min' self.dims = (3, 4) @@ -105,7 +98,6 @@ class TestArgMinFloat16Case6(BaseTestCase): class TestArgMinFloat16Case7(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_min' self.dims = (3, 4) @@ -114,35 +106,31 @@ class TestArgMinFloat16Case7(BaseTestCase): class TestArgMinFloat16Case8(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_min' - self.dims = (1, ) + self.dims = (1,) self.dtype = 'float16' self.axis = 0 class TestArgMinFloat16Case9(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_min' - self.dims = (2, ) + self.dims = (2,) self.dtype = 'float16' self.axis = 0 class TestArgMinFloat16Case10(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_min' - self.dims = (3, ) + self.dims = (3,) self.dtype = 'float16' self.axis = 0 # test argmin, dtype: float32 class TestArgMinFloat32Case1(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_min' self.dims = (3, 4, 5) @@ -151,7 +139,6 @@ class TestArgMinFloat32Case1(BaseTestCase): class TestArgMinFloat32Case2(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_min' self.dims = (3, 4, 5) @@ -160,7 +147,6 @@ class TestArgMinFloat32Case2(BaseTestCase): class TestArgMinFloat32Case3(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_min' self.dims = (3, 4, 5) @@ -169,7 +155,6 @@ class TestArgMinFloat32Case3(BaseTestCase): class TestArgMinFloat32Case4(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_min' self.dims = (3, 4, 5) @@ -178,7 +163,6 @@ class TestArgMinFloat32Case4(BaseTestCase): class TestArgMinFloat32Case5(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_min' self.dims = (3, 4) @@ -187,7 +171,6 @@ class TestArgMinFloat32Case5(BaseTestCase): class TestArgMinFloat32Case6(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_min' self.dims = (3, 4) @@ -196,7 +179,6 @@ class TestArgMinFloat32Case6(BaseTestCase): class TestArgMinFloat32Case7(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_min' self.dims = (3, 4) @@ -205,34 +187,30 @@ class TestArgMinFloat32Case7(BaseTestCase): class TestArgMinFloat32Case8(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_min' - self.dims = (1, ) + self.dims = (1,) self.dtype = 'float32' self.axis = 0 class TestArgMinFloat32Case9(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_min' - self.dims = (2, ) + self.dims = (2,) self.dtype = 'float32' self.axis = 0 class TestArgMinFloat32Case10(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_min' - self.dims = (3, ) + self.dims = (3,) self.dtype = 'float32' self.axis = 0 class TestArgMinAPI(unittest.TestCase): - def initTestCase(self): self.dims = (3, 4, 5) self.dtype = 'float32' @@ -244,7 +222,6 @@ class TestArgMinAPI(unittest.TestCase): self.place = [paddle.NPUPlace(0)] def test_dygraph_api(self): - def run(place): paddle.disable_static(place) np.random.seed(2021) @@ -252,9 +229,9 @@ class TestArgMinAPI(unittest.TestCase): tensor_input = paddle.to_tensor(numpy_input) numpy_output = np.argmin(numpy_input, axis=self.axis) paddle_output = paddle.argmin(tensor_input, axis=self.axis) - np.testing.assert_allclose(numpy_output, - paddle_output.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + numpy_output, paddle_output.numpy(), rtol=1e-05 + ) paddle.enable_static() for place in self.place: @@ -262,7 +239,6 @@ class TestArgMinAPI(unittest.TestCase): class TestArgMaxAPI_2(unittest.TestCase): - def initTestCase(self): self.dims = (3, 4, 5) self.dtype = 'float32' @@ -275,20 +251,20 @@ class TestArgMaxAPI_2(unittest.TestCase): self.place = [paddle.NPUPlace(0)] def test_dygraph_api(self): - def run(place): paddle.disable_static(place) np.random.seed(2021) numpy_input = (np.random.random(self.dims)).astype(self.dtype) tensor_input = paddle.to_tensor(numpy_input) - numpy_output = np.argmin(numpy_input, - axis=self.axis).reshape(1, 4, 5) - paddle_output = paddle.argmin(tensor_input, - axis=self.axis, - keepdim=self.keep_dims) - np.testing.assert_allclose(numpy_output, - paddle_output.numpy(), - rtol=1e-05) + numpy_output = np.argmin(numpy_input, axis=self.axis).reshape( + 1, 4, 5 + ) + paddle_output = paddle.argmin( + tensor_input, axis=self.axis, keepdim=self.keep_dims + ) + np.testing.assert_allclose( + numpy_output, paddle_output.numpy(), rtol=1e-05 + ) self.assertEqual(numpy_output.shape, paddle_output.numpy().shape) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/npu/test_argsort_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_argsort_op_npu.py index 36f0e5e55f9493ef8f87f1dc1bc77e84ab0dfaaa..5aaf1de24516ea206bafec50160a6e617ada2d20 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_argsort_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_argsort_op_npu.py @@ -31,7 +31,6 @@ paddle.enable_static() class TestArgsortOp(OpTest): - def setUp(self): self.set_npu() self.op_type = "argsort" @@ -50,9 +49,11 @@ class TestArgsortOp(OpTest): def get_output(self): if self.descending: self.indices = np.flip( - np.argsort(self.x, kind='heapsort', axis=self.axis), self.axis) + np.argsort(self.x, kind='heapsort', axis=self.axis), self.axis + ) self.sorted_x = np.flip( - np.sort(self.x, kind='heapsort', axis=self.axis), self.axis) + np.sort(self.x, kind='heapsort', axis=self.axis), self.axis + ) else: self.indices = np.argsort(self.x, kind='heapsort', axis=self.axis) self.sorted_x = np.sort(self.x, kind='heapsort', axis=self.axis) @@ -78,67 +79,56 @@ class TestArgsortOp(OpTest): class TestArgsortOpAxis0NPU(TestArgsortOp): - def init_axis(self): self.axis = 0 class TestArgsortOpAxis1NPU(TestArgsortOp): - def init_axis(self): self.axis = 1 class TestArgsortOpAxis2NPU(TestArgsortOp): - def init_axis(self): self.axis = 2 class TestArgsortOpAxisNeg1NPU(TestArgsortOp): - def init_axis(self): self.axis = -1 class TestArgsortOpAxisNeg2NPU(TestArgsortOp): - def init_axis(self): self.axis = -2 class TestArgsortOpDescendingAxisNPU(TestArgsortOp): - def init_direction(self): self.descending = True class TestArgsortOpDescendingAxis0NPU(TestArgsortOpAxis0NPU): - def init_direction(self): self.descending = True class TestArgsortOpDescendingAxis1NPU(TestArgsortOpAxis1NPU): - def init_direction(self): self.descending = True class TestArgsortOpDescendingAxis2NPU(TestArgsortOpAxis2NPU): - def init_direction(self): self.descending = True class TestArgsortOpDescendingAxisNeg1NPU(TestArgsortOpAxisNeg1NPU): - def init_direction(self): self.descending = True class TestArgsortOpDescendingAxisNeg2NPU(TestArgsortOpAxisNeg2NPU): - def init_direction(self): self.descending = True @@ -151,7 +141,6 @@ class TestArgsortOpDescendingAxisNeg2NPU(TestArgsortOpAxisNeg2NPU): class TestArgsortOpAxis0NPUFP32(TestArgsortOp): - def init_axis(self): self.axis = 0 @@ -165,74 +154,63 @@ class TestArgsortOpAxis0NPUFP32(TestArgsortOp): self.__class__.use_npu = True def test_check_grad(self): - self.check_grad_with_place(self.place, ["X"], - "Out", - max_relative_error=0.03) + self.check_grad_with_place( + self.place, ["X"], "Out", max_relative_error=0.03 + ) class TestArgsortOpAxis1NPUFP32(TestArgsortOpAxis0NPUFP32): - def init_axis(self): self.axis = 1 class TestArgsortOpAxis2NPUFP32(TestArgsortOpAxis0NPUFP32): - def init_axis(self): self.axis = 2 class TestArgsortOpAxisNeg1NPUFP32(TestArgsortOpAxis0NPUFP32): - def init_axis(self): self.axis = -1 class TestArgsortOpAxisNeg2NPUFP32(TestArgsortOpAxis0NPUFP32): - def init_axis(self): self.axis = -2 class TestArgsortOpDescendingAxisNPUFP32(TestArgsortOpAxis0NPUFP32): - def init_direction(self): self.descending = True class TestArgsortOpDescendingAxis0NPUFP32(TestArgsortOpAxis0NPUFP32): - def init_direction(self): self.descending = True class TestArgsortOpDescendingAxis1NPUFP32(TestArgsortOpAxis1NPUFP32): - def init_direction(self): self.descending = True class TestArgsortOpDescendingAxis2NPUFP32(TestArgsortOpAxis2NPUFP32): - def init_direction(self): self.descending = True class TestArgsortOpDescendingAxisNeg1NPUFP32(TestArgsortOpAxisNeg1NPUFP32): - def init_direction(self): self.descending = True class TestArgsortOpDescendingAxisNeg2NPUFP32(TestArgsortOpAxisNeg2NPUFP32): - def init_direction(self): self.descending = True # test cases for int64 class TestArgsortOpAxis0NPUINT64(TestArgsortOp): - def setUp(self): self.set_npu() self.op_type = "argsort" @@ -242,10 +220,9 @@ class TestArgsortOpAxis0NPUINT64(TestArgsortOp): self.init_axis() self.init_direction() - self.x = np.random.randint(low=-100, - high=100, - size=self.input_shape, - dtype=self.dtype).astype(self.dtype) + self.x = np.random.randint( + low=-100, high=100, size=self.input_shape, dtype=self.dtype + ).astype(self.dtype) self.inputs = {"X": self.x} self.attrs = {"axis": self.axis, "descending": self.descending} self.get_output() @@ -265,61 +242,51 @@ class TestArgsortOpAxis0NPUINT64(TestArgsortOp): class TestArgsortOpAxis1NPUINT64(TestArgsortOpAxis0NPUINT64): - def init_axis(self): self.axis = 1 class TestArgsortOpAxis2NPUINT64(TestArgsortOpAxis0NPUINT64): - def init_axis(self): self.axis = 2 class TestArgsortOpAxisNeg1NPUINT64(TestArgsortOpAxis0NPUINT64): - def init_axis(self): self.axis = -1 class TestArgsortOpAxisNeg2NPUINT64(TestArgsortOpAxis0NPUINT64): - def init_axis(self): self.axis = -2 class TestArgsortOpDescendingAxisNPUINT64(TestArgsortOpAxis0NPUINT64): - def init_direction(self): self.descending = True class TestArgsortOpDescendingAxis0NPUINT64(TestArgsortOpAxis0NPUINT64): - def init_direction(self): self.descending = True class TestArgsortOpDescendingAxis1NPUINT64(TestArgsortOpAxis1NPUINT64): - def init_direction(self): self.descending = True class TestArgsortOpDescendingAxis2NPUINT64(TestArgsortOpAxis2NPUINT64): - def init_direction(self): self.descending = True class TestArgsortOpDescendingAxisNeg1NPUINT64(TestArgsortOpAxisNeg1NPUINT64): - def init_direction(self): self.descending = True class TestArgsortOpDescendingAxisNeg2NPUINT64(TestArgsortOpAxisNeg2NPUINT64): - def init_direction(self): self.descending = True diff --git a/python/paddle/fluid/tests/unittests/npu/test_assign_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_assign_op_npu.py index 82969805dc16816bff0dcd918e7a13908316eaf1..98da7c4141dee72f07d101045ebfa70b70eb40e9 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_assign_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_assign_op_npu.py @@ -27,7 +27,6 @@ SEED = 2021 class TestAssign(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) diff --git a/python/paddle/fluid/tests/unittests/npu/test_assign_value_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_assign_value_op_npu.py index 7ffbcd1db13a73ddb7f915e84550353e341fb856..402b90bc49bbdf736e2f9ebdde8906569d8181c0 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_assign_value_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_assign_value_op_npu.py @@ -29,7 +29,6 @@ np.random.seed(2021) class TestAssignValueNPUOp(op_test.OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -41,7 +40,8 @@ class TestAssignValueNPUOp(op_test.OpTest): self.attrs["shape"] = self.value.shape self.attrs["dtype"] = framework.convert_np_dtype_to_dtype_( - self.value.dtype) + self.value.dtype + ) self.outputs = {"Out": self.value} def set_npu(self): @@ -56,35 +56,36 @@ class TestAssignValueNPUOp(op_test.OpTest): class TestAssignValueNPUOp2(TestAssignValueNPUOp): - def init_data(self): self.value = np.random.random(size=(2, 5)).astype(np.int32) self.attrs["int32_values"] = [int(v) for v in self.value.flat] class TestAssignValueNPUOp3(TestAssignValueNPUOp): - def init_data(self): self.value = np.random.random(size=(2, 5)).astype(np.int64) self.attrs["int64_values"] = [int(v) for v in self.value.flat] class TestAssignValueNPUOp4(TestAssignValueNPUOp): - def init_data(self): - self.value = np.random.choice(a=[False, True], - size=(2, 5)).astype(np.bool) + self.value = np.random.choice(a=[False, True], size=(2, 5)).astype( + np.bool + ) self.attrs["bool_values"] = [int(v) for v in self.value.flat] class TestAssignApi(unittest.TestCase): - def setUp(self): self.init_dtype() self.value = (-100 + 200 * np.random.random(size=(2, 5))).astype( - self.dtype) - self.place = fluid.NPUPlace( - 0) if fluid.core.is_compiled_with_npu() else fluid.CPUPlace() + self.dtype + ) + self.place = ( + fluid.NPUPlace(0) + if fluid.core.is_compiled_with_npu() + else fluid.CPUPlace() + ) def init_dtype(self): self.dtype = "float32" @@ -102,25 +103,26 @@ class TestAssignApi(unittest.TestCase): class TestAssignApi2(TestAssignApi): - def init_dtype(self): self.dtype = "int32" class TestAssignApi3(TestAssignApi): - def init_dtype(self): self.dtype = "int64" class TestAssignApi4(TestAssignApi): - def setUp(self): self.init_dtype() - self.value = np.random.choice(a=[False, True], - size=(2, 5)).astype(np.bool) - self.place = fluid.NPUPlace( - 0) if fluid.core.is_compiled_with_npu() else fluid.CPUPlace() + self.value = np.random.choice(a=[False, True], size=(2, 5)).astype( + np.bool + ) + self.place = ( + fluid.NPUPlace(0) + if fluid.core.is_compiled_with_npu() + else fluid.CPUPlace() + ) def init_dtype(self): self.dtype = "bool" diff --git a/python/paddle/fluid/tests/unittests/npu/test_atan_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_atan_op_npu.py index bb13d82366935610d04e9932a1fac46c1644c7a6..fe22453f4fa45f71912681676fbea290245b6804 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_atan_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_atan_op_npu.py @@ -26,7 +26,6 @@ SEED = 1024 class TestAtan(OpTest): - def setUp(self): self.set_npu() self.op_type = "atan" @@ -57,7 +56,7 @@ class TestAtan(OpTest): out = paddle.atan(data, name='Y') place = paddle.NPUPlace(0) exe = fluid.Executor(place) - result, = exe.run(feed={"X": np_x}, fetch_list=[out]) + (result,) = exe.run(feed={"X": np_x}, fetch_list=[out]) expected = np.arctan(np_x) self.assertEqual(result, expected) @@ -74,13 +73,11 @@ class TestAtan(OpTest): class TestAtanShape(TestAtan): - def set_attrs(self): self.shape = [12, 23, 10] class TestAtanFloat16(TestAtan): - def set_attrs(self): self.dtype = np.float16 diff --git a/python/paddle/fluid/tests/unittests/npu/test_batch_norm_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_batch_norm_op_npu.py index 37c6fcce1e1ac57546aa595ebf41ac9527f7d6d6..e39506eed7a9bab4d12638a7bd94f7aeb3fd171c 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_batch_norm_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_batch_norm_op_npu.py @@ -24,24 +24,27 @@ from paddle.fluid.op import Operator from op_test import OpTest, _set_use_system_allocator from paddle.fluid import Program, program_guard -from test_batch_norm_op import _reference_testing, _cal_mean_variance, _reference_training, _reference_grad +from test_batch_norm_op import ( + _reference_testing, + _cal_mean_variance, + _reference_training, + _reference_grad, +) _set_use_system_allocator(False) paddle.enable_static() class TestBatchNormOpInference(unittest.TestCase): - def setUp(self): self.dtype = np.float32 self.init_kernel_type() self.data_formats = ["NCHW", "NHWC"] def __assert_close(self, tensor, np_array, msg, atol=1e-4): - np.testing.assert_allclose(np.array(tensor), - np_array, - atol=atol, - err_msg=msg) + np.testing.assert_allclose( + np.array(tensor), np_array, atol=atol, err_msg=msg + ) def check_with_place(self, place, data_layout, dtype, shape): epsilon = epsilon = 0.00001 @@ -72,8 +75,9 @@ class TestBatchNormOpInference(unittest.TestCase): bias = np.random.random_sample(scale_shape).astype(np.float32) mean = np.zeros(scale_shape).astype(np.float32) variance = np.ones(scale_shape).astype(np.float32) - y = _reference_testing(x, scale, bias, mean, variance, epsilon, - data_layout).astype(dtype) + y = _reference_testing( + x, scale, bias, mean, variance, epsilon, data_layout + ).astype(dtype) var_dict = locals() var_names = ["x", "scale", "bias", "mean", "variance", "y"] ground_truth = {name: var_dict[name] for name in var_names} @@ -84,15 +88,15 @@ class TestBatchNormOpInference(unittest.TestCase): with fluid.program_guard(program): block = program.global_block() for name in ground_truth: - block.create_var(name=name, - dtype="float32", - shape=ground_truth[name].shape) + block.create_var( + name=name, dtype="float32", shape=ground_truth[name].shape + ) inputs = { "X": block.var("x"), "Scale": block.var("scale"), "Bias": block.var("bias"), "Mean": block.var("mean"), - "Variance": block.var("variance") + "Variance": block.var("variance"), } attrs = { "epsilon": epsilon, @@ -106,14 +110,13 @@ class TestBatchNormOpInference(unittest.TestCase): "MeanOut": block.var("mean"), # share memory "VarianceOut": block.var("variance"), # share memory "SavedMean": block.var("saved_mean"), - "SavedVariance": block.var("saved_variance") + "SavedVariance": block.var("saved_variance"), } block.create_var(name="reserve_space", dtype='float32') outputs["ReserveSpace"] = block.var('reserve_space') - bn_op = block.append_op(type="batch_norm", - inputs=inputs, - outputs=outputs, - attrs=attrs) + bn_op = block.append_op( + type="batch_norm", inputs=inputs, outputs=outputs, attrs=attrs + ) program._sync_with_cpp() @@ -124,7 +127,8 @@ class TestBatchNormOpInference(unittest.TestCase): name: ground_truth[name] for name in ["x", "scale", "bias", "mean", "variance"] }, - fetch_list=["y"]) + fetch_list=["y"], + ) self.__assert_close(var_dict["y"], out[0], "y", atol=1e-3) def test_check_output(self): @@ -138,7 +142,6 @@ class TestBatchNormOpInference(unittest.TestCase): class TestFP16BatchNormOpInference(TestBatchNormOpInference): - def setUp(self): self.dtype = np.float16 self.init_kernel_type() @@ -146,7 +149,6 @@ class TestFP16BatchNormOpInference(TestBatchNormOpInference): class TestBatchNormOpTraining(unittest.TestCase): - def set_npu(self): self.__class__.use_npu = True @@ -169,27 +171,54 @@ class TestBatchNormOpTraining(unittest.TestCase): self.use_global_stats = False self.no_grad_set = set() self.fetch_list = [ - "y", 'mean', 'variance', 'saved_mean', 'saved_variance', 'x@GRAD', - 'scale@GRAD', 'bias@GRAD' + "y", + 'mean', + 'variance', + 'saved_mean', + 'saved_variance', + 'x@GRAD', + 'scale@GRAD', + 'bias@GRAD', ] def __assert_close(self, tensor, np_array, msg, atol=1e-4): np.allclose(np.array(tensor), np_array, atol=atol) - def ref_forward_backward(self, x, y_grad, scale, bias, mean, variance, - epsilon, momentum, shape, data_layout): + def ref_forward_backward( + self, + x, + y_grad, + scale, + bias, + mean, + variance, + epsilon, + momentum, + shape, + data_layout, + ): # run forward - y, saved_mean, var_ref = _reference_training(x, scale, bias, epsilon, - data_layout) - mean_out = saved_mean * (1. - momentum) + momentum * mean - variance_out = var_ref * (1. - momentum) + momentum * variance - saved_variance = 1. / np.sqrt(var_ref + epsilon) + y, saved_mean, var_ref = _reference_training( + x, scale, bias, epsilon, data_layout + ) + mean_out = saved_mean * (1.0 - momentum) + momentum * mean + variance_out = var_ref * (1.0 - momentum) + momentum * variance + saved_variance = 1.0 / np.sqrt(var_ref + epsilon) # run backward - x_grad, scale_grad, bias_grad = _reference_grad(x, y_grad, scale, - saved_mean, var_ref, - epsilon, data_layout) - - return y, mean_out, variance_out, saved_mean, saved_variance, x_grad, scale_grad, bias_grad + x_grad, scale_grad, bias_grad = _reference_grad( + x, y_grad, scale, saved_mean, var_ref, epsilon, data_layout + ) + + return ( + y, + mean_out, + variance_out, + saved_mean, + saved_variance, + x_grad, + scale_grad, + bias_grad, + ) def set_mean_variance(self, scale_shape, x, data_layout): mean, variance = _cal_mean_variance(x, self.epsilon, data_layout) @@ -198,12 +227,11 @@ class TestBatchNormOpTraining(unittest.TestCase): # computing global mean/variance for one step if self.use_global_stats: mom = self.momentum - mean = mean * (1. - mom) + mom * mean_pre - variance = variance * (1. - mom) + mom * variance_pre + mean = mean * (1.0 - mom) + mom * mean_pre + variance = variance * (1.0 - mom) + mom * variance_pre return mean, variance def test_forward_backward(self): - def test_with_place(place, data_layout, shape): # attr epsilon = self.epsilon @@ -236,9 +264,27 @@ class TestBatchNormOpTraining(unittest.TestCase): y_grad = np.random.random_sample(shape).astype(self.dtype) momentum_var = np.array([momentum]).astype(np.float32) - y, mean_out, variance_out, saved_mean, saved_variance, x_grad, scale_grad, bias_grad = self.ref_forward_backward( - x, y_grad, scale, bias, mean, variance, epsilon, momentum, - shape, data_layout) + ( + y, + mean_out, + variance_out, + saved_mean, + saved_variance, + x_grad, + scale_grad, + bias_grad, + ) = self.ref_forward_backward( + x, + y_grad, + scale, + bias, + mean, + variance, + epsilon, + momentum, + shape, + data_layout, + ) var_dict = locals() var_dict['y@GRAD'] = y_grad @@ -247,8 +293,15 @@ class TestBatchNormOpTraining(unittest.TestCase): var_dict['bias@GRAD'] = bias_grad var_names = [ - 'x', 'scale', 'bias', 'mean', 'variance', "y", 'saved_mean', - 'saved_variance', 'momentum_var' + 'x', + 'scale', + 'bias', + 'mean', + 'variance', + "y", + 'saved_mean', + 'saved_variance', + 'momentum_var', ] ground_truth = {name: var_dict[name] for name in var_names} @@ -256,15 +309,17 @@ class TestBatchNormOpTraining(unittest.TestCase): with fluid.program_guard(program): block = program.global_block() for name in ground_truth: - block.create_var(name=name, - dtype='float32', - shape=ground_truth[name].shape) + block.create_var( + name=name, + dtype='float32', + shape=ground_truth[name].shape, + ) inputs = { "X": block.var('x'), "Scale": block.var('scale'), "Bias": block.var('bias'), "Mean": block.var('mean'), - "Variance": block.var('variance') + "Variance": block.var('variance'), } attrs = { "epsilon": epsilon, @@ -272,7 +327,7 @@ class TestBatchNormOpTraining(unittest.TestCase): "data_layout": data_layout, "use_mkldnn": self.use_mkldnn, "fuse_with_relu": self.fuse_with_relu, - "use_global_stats": self.use_global_stats + "use_global_stats": self.use_global_stats, } if self.use_momentum_variable: inputs['MomentumTensor'] = block.var('momentum_var') @@ -284,19 +339,22 @@ class TestBatchNormOpTraining(unittest.TestCase): "MeanOut": block.var('mean'), # share memory "VarianceOut": block.var('variance'), # share memory "SavedMean": block.var('saved_mean'), - "SavedVariance": block.var('saved_variance') + "SavedVariance": block.var('saved_variance'), } block.create_var(name="reserve_space", dtype='float32') outputs["ReserveSpace"] = block.var('reserve_space') - bn_op = block.append_op(type="batch_norm", - inputs=inputs, - outputs=outputs, - attrs=attrs) + bn_op = block.append_op( + type="batch_norm", + inputs=inputs, + outputs=outputs, + attrs=attrs, + ) block.create_var(name='y@GRAD', dtype=self.dtype, shape=y.shape) # generate backward op_desc grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc( - bn_op.desc, self.no_grad_set, []) + bn_op.desc, self.no_grad_set, [] + ) grad_op_desc = grad_op_desc_list[0] new_op_desc = block.desc.append_op() new_op_desc.copy_from(grad_op_desc) @@ -311,22 +369,28 @@ class TestBatchNormOpTraining(unittest.TestCase): program._sync_with_cpp() exe = fluid.Executor(place) - out = exe.run(program, - feed={ - name: var_dict[name] - for name in [ - 'x', 'scale', 'bias', 'mean', 'variance', - 'y@GRAD', 'momentum_var' - ] - }, - fetch_list=self.fetch_list) + out = exe.run( + program, + feed={ + name: var_dict[name] + for name in [ + 'x', + 'scale', + 'bias', + 'mean', + 'variance', + 'y@GRAD', + 'momentum_var', + ] + }, + fetch_list=self.fetch_list, + ) for id, name in enumerate(self.fetch_list): if name == 'variance': - self.__assert_close(var_dict[name], - out[id], - name, - atol=1e-3) + self.__assert_close( + var_dict[name], out[id], name, atol=1e-3 + ) continue self.__assert_close(var_dict[name], out[id], name) print("op test forward passed: ", str(place), data_layout) @@ -340,13 +404,11 @@ class TestBatchNormOpTraining(unittest.TestCase): class TestFP16BatchNormOpTraining(TestBatchNormOpTraining): - def init_dtype(self): self.dtype = np.float16 class TestBatchNormOpTrainingCase1(TestBatchNormOpTraining): - def init_test_case(self): self.use_global_stats = False self.no_grad_set = set(['scale@GRAD', 'bias@GRAD']) @@ -354,24 +416,33 @@ class TestBatchNormOpTrainingCase1(TestBatchNormOpTraining): class TestBatchNormOpTrainingMomentumVariable(TestBatchNormOpTraining): - def init_test_case(self): self.use_momentum_variable = True self.use_global_stats = False self.no_grad_set = set() self.fetch_list = [ - 'y', 'mean', 'variance', 'saved_mean', 'saved_variance', 'x@GRAD', - 'scale@GRAD', 'bias@GRAD' + 'y', + 'mean', + 'variance', + 'saved_mean', + 'saved_variance', + 'x@GRAD', + 'scale@GRAD', + 'bias@GRAD', ] class TestBatchNormOpFreezeStatsTraining(TestBatchNormOpTraining): - def init_test_case(self): self.use_global_stats = True self.no_grad_set = set() self.fetch_list = [ - 'y', 'mean', 'variance', 'x@GRAD', 'scale@GRAD', 'bias@GRAD' + 'y', + 'mean', + 'variance', + 'x@GRAD', + 'scale@GRAD', + 'bias@GRAD', ] def reference_grad(self, x, y_grad, scale, mean, var, epsilon, data_format): @@ -379,20 +450,23 @@ class TestBatchNormOpFreezeStatsTraining(TestBatchNormOpTraining): if len(x_shape) == 3: if data_format == "NCHW": # NCL -> NCL1 x = np.reshape(x, (x_shape[0], x_shape[1], x_shape[2], 1)) - y_grad = np.reshape(y_grad, - (x_shape[0], x_shape[1], x_shape[2], 1)) + y_grad = np.reshape( + y_grad, (x_shape[0], x_shape[1], x_shape[2], 1) + ) else: # NLC -> NL1C x = np.reshape(x, (x_shape[0], x_shape[1], 1, x_shape[2])) - y_grad = np.reshape(y_grad, - (x_shape[0], x_shape[1], 1, x_shape[2])) + y_grad = np.reshape( + y_grad, (x_shape[0], x_shape[1], 1, x_shape[2]) + ) if data_format == "NCHW": x = np.transpose(x, (0, 2, 3, 1)) y_grad = np.transpose(y_grad, (0, 2, 3, 1)) x_grad = scale * y_grad / np.sqrt(var + epsilon) - grad_scale = np.sum(y_grad * (x - mean) / np.sqrt(var + epsilon), - axis=(0, 1, 2)) + grad_scale = np.sum( + y_grad * (x - mean) / np.sqrt(var + epsilon), axis=(0, 1, 2) + ) grad_offset = np.sum(y_grad, axis=(0, 1, 2)) # transfer back to N, C, H, W @@ -406,8 +480,19 @@ class TestBatchNormOpFreezeStatsTraining(TestBatchNormOpTraining): return x_grad, grad_scale, grad_offset - def ref_forward_backward(self, x, y_grad, scale, bias, mean, variance, - epsilon, momentum, shape, data_layout): + def ref_forward_backward( + self, + x, + y_grad, + scale, + bias, + mean, + variance, + epsilon, + momentum, + shape, + data_layout, + ): if data_layout != "NCHW" and data_layout != "NHWC": raise ValueError("Unknown data order.") @@ -415,12 +500,14 @@ class TestBatchNormOpFreezeStatsTraining(TestBatchNormOpTraining): if len(x_shape) == 3: if data_layout == "NCHW": # NCL -> NCL1 x = np.reshape(x, (x_shape[0], x_shape[1], x_shape[2], 1)) - y_grad = np.reshape(y_grad, - (x_shape[0], x_shape[1], x_shape[2], 1)) + y_grad = np.reshape( + y_grad, (x_shape[0], x_shape[1], x_shape[2], 1) + ) else: # NLC -> NL1C x = np.reshape(x, (x_shape[0], x_shape[1], 1, x_shape[2])) - y_grad = np.reshape(y_grad, - (x_shape[0], x_shape[1], 1, x_shape[2])) + y_grad = np.reshape( + y_grad, (x_shape[0], x_shape[1], 1, x_shape[2]) + ) if data_layout == "NCHW": x = np.transpose(x, (0, 2, 3, 1)) @@ -436,21 +523,31 @@ class TestBatchNormOpFreezeStatsTraining(TestBatchNormOpTraining): mean_out = mean variance_out = variance - saved_variance = 1. / np.sqrt(variance + epsilon) + saved_variance = 1.0 / np.sqrt(variance + epsilon) # run backward x_grad, scale_grad, bias_grad = self.reference_grad( - x, y_grad, scale, mean, variance, epsilon, data_layout) + x, y_grad, scale, mean, variance, epsilon, data_layout + ) if len(x_shape) == 3: y = np.reshape(y, x_shape) x_grad = np.reshape(x_grad, x_shape) - return y, mean_out, variance_out, mean, saved_variance, x_grad, scale_grad, bias_grad + return ( + y, + mean_out, + variance_out, + mean, + saved_variance, + x_grad, + scale_grad, + bias_grad, + ) class TestBatchNormOpFreezeStatsAndScaleBiasTraining( - TestBatchNormOpFreezeStatsTraining): - + TestBatchNormOpFreezeStatsTraining +): def init_test_case(self): self.use_global_stats = True self.no_grad_set = set(['scale@GRAD', 'bias@GRAD']) @@ -458,7 +555,6 @@ class TestBatchNormOpFreezeStatsAndScaleBiasTraining( class TestDygraphBatchNormTrainableStats(unittest.TestCase): - def test_dygraph(self): places = [fluid.NPUPlace(0)] for p in places: @@ -469,7 +565,8 @@ class TestDygraphBatchNormTrainableStats(unittest.TestCase): bn = fluid.dygraph.BatchNorm( shape[1], is_test=is_test, - trainable_statistics=trainable_statistics) + trainable_statistics=trainable_statistics, + ) y = bn(fluid.dygraph.to_variable(x)) return y.numpy() @@ -489,7 +586,8 @@ class TestDygraphBatchNormTrainableStats(unittest.TestCase): bn = fluid.dygraph.BatchNorm( shape[1], is_test=is_test, - trainable_statistics=trainable_statistics) + trainable_statistics=trainable_statistics, + ) x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) y = bn(x) exe.run(fluid.default_startup_program()) diff --git a/python/paddle/fluid/tests/unittests/npu/test_bce_loss_npu.py b/python/paddle/fluid/tests/unittests/npu/test_bce_loss_npu.py index 10ec6f3fae1ed7dbb121260ac9e6c71e7ef9c687..d9e968964afb65c01042cea5c85f8d2bf78215b9 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_bce_loss_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_bce_loss_npu.py @@ -24,88 +24,76 @@ from op_test import OpTest paddle.enable_static() -def test_static_layer(place, - input_np, - label_np, - reduction='mean', - weight_np=None): +def test_static_layer( + place, input_np, label_np, reduction='mean', weight_np=None +): prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - input = paddle.fluid.data(name='input', - shape=input_np.shape, - dtype='float32') - label = paddle.fluid.data(name='label', - shape=label_np.shape, - dtype='float32') + input = paddle.fluid.data( + name='input', shape=input_np.shape, dtype='float32' + ) + label = paddle.fluid.data( + name='label', shape=label_np.shape, dtype='float32' + ) if weight_np is not None: - weight = paddle.fluid.data(name='weight', - shape=weight_np.shape, - dtype='float32') - bce_loss = paddle.nn.loss.BCELoss(weight=weight, - reduction=reduction) + weight = paddle.fluid.data( + name='weight', shape=weight_np.shape, dtype='float32' + ) + bce_loss = paddle.nn.loss.BCELoss( + weight=weight, reduction=reduction + ) else: bce_loss = paddle.nn.loss.BCELoss(reduction=reduction) res = bce_loss(input, label) exe = paddle.static.Executor(place) - static_result = exe.run(prog, - feed={ - "input": input_np, - "label": label_np - } if weight_np is None else { - "input": input_np, - "label": label_np, - "weight": weight_np - }, - fetch_list=[res]) + static_result = exe.run( + prog, + feed={"input": input_np, "label": label_np} + if weight_np is None + else {"input": input_np, "label": label_np, "weight": weight_np}, + fetch_list=[res], + ) return static_result[0] -def test_static_functional(place, - input_np, - label_np, - reduction='mean', - weight_np=None): +def test_static_functional( + place, input_np, label_np, reduction='mean', weight_np=None +): prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - input = paddle.fluid.data(name='input', - shape=input_np.shape, - dtype='float32') - label = paddle.fluid.data(name='label', - shape=label_np.shape, - dtype='float32') + input = paddle.fluid.data( + name='input', shape=input_np.shape, dtype='float32' + ) + label = paddle.fluid.data( + name='label', shape=label_np.shape, dtype='float32' + ) if weight_np is not None: - weight = paddle.fluid.data(name='weight', - shape=weight_np.shape, - dtype='float32') - res = paddle.nn.functional.binary_cross_entropy(input, - label, - weight=weight, - reduction=reduction) + weight = paddle.fluid.data( + name='weight', shape=weight_np.shape, dtype='float32' + ) + res = paddle.nn.functional.binary_cross_entropy( + input, label, weight=weight, reduction=reduction + ) else: - res = paddle.nn.functional.binary_cross_entropy(input, - label, - reduction=reduction) + res = paddle.nn.functional.binary_cross_entropy( + input, label, reduction=reduction + ) exe = paddle.static.Executor(place) - static_result = exe.run(prog, - feed={ - "input": input_np, - "label": label_np - } if weight_np is None else { - "input": input_np, - "label": label_np, - "weight": weight_np - }, - fetch_list=[res]) + static_result = exe.run( + prog, + feed={"input": input_np, "label": label_np} + if weight_np is None + else {"input": input_np, "label": label_np, "weight": weight_np}, + fetch_list=[res], + ) return static_result[0] -def test_dygraph_layer(place, - input_np, - label_np, - reduction='mean', - weight_np=None): +def test_dygraph_layer( + place, input_np, label_np, reduction='mean', weight_np=None +): paddle.disable_static(place) if weight_np is not None: weight = paddle.to_tensor(weight_np) @@ -118,25 +106,22 @@ def test_dygraph_layer(place, return dy_result -def test_dygraph_functional(place, - input_np, - label_np, - reduction='mean', - weight_np=None): +def test_dygraph_functional( + place, input_np, label_np, reduction='mean', weight_np=None +): paddle.disable_static(place) input = paddle.to_tensor(input_np) label = paddle.to_tensor(label_np) if weight_np is not None: weight = paddle.to_tensor(weight_np) - dy_res = paddle.nn.functional.binary_cross_entropy(input, - label, - weight=weight, - reduction=reduction) + dy_res = paddle.nn.functional.binary_cross_entropy( + input, label, weight=weight, reduction=reduction + ) else: - dy_res = paddle.nn.functional.binary_cross_entropy(input, - label, - reduction=reduction) + dy_res = paddle.nn.functional.binary_cross_entropy( + input, label, reduction=reduction + ) dy_result = dy_res.numpy() paddle.enable_static() return dy_result @@ -144,11 +129,19 @@ def test_dygraph_functional(place, def calc_bceloss(input_np, label_np, reduction='mean', weight_np=None): if weight_np is None: - expected = -1 * (label_np * np.log(input_np) + - (1. - label_np) * np.log(1. - input_np)) + expected = -1 * ( + label_np * np.log(input_np) + + (1.0 - label_np) * np.log(1.0 - input_np) + ) else: - expected = -1 * weight_np * (label_np * np.log(input_np) + - (1. - label_np) * np.log(1. - input_np)) + expected = ( + -1 + * weight_np + * ( + label_np * np.log(input_np) + + (1.0 - label_np) * np.log(1.0 - input_np) + ) + ) if reduction == 'mean': expected = np.mean(expected) @@ -161,7 +154,6 @@ def calc_bceloss(input_np, label_np, reduction='mean', weight_np=None): class TestBCELoss(unittest.TestCase): - def test_BCELoss(self): input_np = np.random.uniform(0.1, 0.8, size=(20, 30)).astype(np.float32) label_np = np.random.randint(0, 2, size=(20, 30)).astype(np.float32) @@ -171,87 +163,88 @@ class TestBCELoss(unittest.TestCase): reductions = ['sum', 'mean', 'none'] for place in places: for reduction in reductions: - static_result = test_static_layer(place, input_np, label_np, - reduction) - dy_result = test_dygraph_layer(place, input_np, label_np, - reduction) + static_result = test_static_layer( + place, input_np, label_np, reduction + ) + dy_result = test_dygraph_layer( + place, input_np, label_np, reduction + ) expected = calc_bceloss(input_np, label_np, reduction) np.testing.assert_allclose(static_result, expected, rtol=1e-6) np.testing.assert_allclose(static_result, dy_result) np.testing.assert_allclose(dy_result, expected, rtol=1e-6) static_functional = test_static_functional( - place, input_np, label_np, reduction) - dy_functional = test_dygraph_functional(place, input_np, - label_np, reduction) - np.testing.assert_allclose(static_functional, - expected, - rtol=1e-6) + place, input_np, label_np, reduction + ) + dy_functional = test_dygraph_functional( + place, input_np, label_np, reduction + ) + np.testing.assert_allclose( + static_functional, expected, rtol=1e-6 + ) np.testing.assert_allclose(static_functional, dy_functional) np.testing.assert_allclose(dy_functional, expected, rtol=1e-6) def test_BCELoss_weight(self): - input_np = np.random.uniform(0.1, 0.8, - size=(2, 3, 4, 10)).astype(np.float32) - label_np = np.random.randint(0, 2, - size=(2, 3, 4, 10)).astype(np.float32) + input_np = np.random.uniform(0.1, 0.8, size=(2, 3, 4, 10)).astype( + np.float32 + ) + label_np = np.random.randint(0, 2, size=(2, 3, 4, 10)).astype( + np.float32 + ) weight_np = np.random.random(size=(3, 4, 10)).astype(np.float32) - place = fluid.NPUPlace( - 0) if fluid.core.is_compiled_with_npu() else fluid.CPUPlace() + place = ( + fluid.NPUPlace(0) + if fluid.core.is_compiled_with_npu() + else fluid.CPUPlace() + ) for reduction in ['sum', 'mean', 'none']: - static_result = test_static_layer(place, - input_np, - label_np, - reduction, - weight_np=weight_np) - dy_result = test_dygraph_layer(place, - input_np, - label_np, - reduction, - weight_np=weight_np) - expected = calc_bceloss(input_np, - label_np, - reduction, - weight_np=weight_np) + static_result = test_static_layer( + place, input_np, label_np, reduction, weight_np=weight_np + ) + dy_result = test_dygraph_layer( + place, input_np, label_np, reduction, weight_np=weight_np + ) + expected = calc_bceloss( + input_np, label_np, reduction, weight_np=weight_np + ) np.testing.assert_allclose(static_result, expected, rtol=1e-6) np.testing.assert_allclose(static_result, dy_result, rtol=1e-6) np.testing.assert_allclose(dy_result, expected, rtol=1e-6) - static_functional = test_static_functional(place, - input_np, - label_np, - reduction, - weight_np=weight_np) - dy_functional = test_dygraph_functional(place, - input_np, - label_np, - reduction, - weight_np=weight_np) + static_functional = test_static_functional( + place, input_np, label_np, reduction, weight_np=weight_np + ) + dy_functional = test_dygraph_functional( + place, input_np, label_np, reduction, weight_np=weight_np + ) np.testing.assert_allclose(static_functional, expected, rtol=1e-6) - np.testing.assert_allclose(static_functional, - dy_functional, - rtol=1e-6) + np.testing.assert_allclose( + static_functional, dy_functional, rtol=1e-6 + ) np.testing.assert_allclose(dy_functional, expected, rtol=1e-6) def test_BCELoss_error(self): paddle.disable_static(paddle.NPUPlace(0)) - self.assertRaises(ValueError, - paddle.nn.loss.BCELoss, - reduction="unsupport reduction") + self.assertRaises( + ValueError, paddle.nn.loss.BCELoss, reduction="unsupport reduction" + ) input = paddle.to_tensor([[0.1, 0.3]], dtype='float32') label = paddle.to_tensor([[0.0, 1.0]], dtype='float32') - self.assertRaises(ValueError, - paddle.nn.functional.binary_cross_entropy, - input=input, - label=label, - reduction="unsupport reduction") + self.assertRaises( + ValueError, + paddle.nn.functional.binary_cross_entropy, + input=input, + label=label, + reduction="unsupport reduction", + ) paddle.enable_static() def bce_loss(input, label): - return -1 * (label * np.log(input) + (1. - label) * np.log(1. - input)) + return -1 * (label * np.log(input) + (1.0 - label) * np.log(1.0 - input)) class TestBceLossOp(OpTest): - def setUp(self): self.set_npu() self.init_test_case() @@ -278,13 +271,11 @@ class TestBceLossOp(OpTest): class TestBceLossOpCase1(OpTest): - def init_test_cast(self): self.shape = [2, 3, 4, 5] class TestBceLossOpCase2(OpTest): - def init_test_cast(self): self.shape = [2, 3, 20] diff --git a/python/paddle/fluid/tests/unittests/npu/test_beam_search_decode_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_beam_search_decode_op_npu.py index f00fc60fb42ecf698885e1693aa8f6ae45316b37..7f5588f34aca37234654569dc8032464d9afb7bb 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_beam_search_decode_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_beam_search_decode_op_npu.py @@ -42,28 +42,41 @@ class TestBeamSearchDecodeNPUOp(unittest.TestCase): # beam_size = 2, end_id = 1 # start with start_id [ - self.append_lod_tensor(array, [[0, 1, 2], [0, 1, 2]], - np.array([0, 0], dtype=dtype)) + self.append_lod_tensor( + array, [[0, 1, 2], [0, 1, 2]], np.array([0, 0], dtype=dtype) + ) for array, dtype in ((ids, "int64"), (scores, "float32")) ] [ - self.append_lod_tensor(array, [[0, 1, 2], [0, 2, 4]], - np.array([2, 3, 4, 5], dtype=dtype)) + self.append_lod_tensor( + array, + [[0, 1, 2], [0, 2, 4]], + np.array([2, 3, 4, 5], dtype=dtype), + ) for array, dtype in ((ids, "int64"), (scores, "float32")) ] [ - self.append_lod_tensor(array, [[0, 2, 4], [0, 2, 2, 4, 4]], - np.array([3, 1, 5, 4], dtype=dtype)) + self.append_lod_tensor( + array, + [[0, 2, 4], [0, 2, 2, 4, 4]], + np.array([3, 1, 5, 4], dtype=dtype), + ) for array, dtype in ((ids, "int64"), (scores, "float32")) ] [ - self.append_lod_tensor(array, [[0, 2, 4], [0, 1, 2, 3, 4]], - np.array([1, 1, 3, 5], dtype=dtype)) + self.append_lod_tensor( + array, + [[0, 2, 4], [0, 1, 2, 3, 4]], + np.array([1, 1, 3, 5], dtype=dtype), + ) for array, dtype in ((ids, "int64"), (scores, "float32")) ] [ - self.append_lod_tensor(array, [[0, 2, 4], [0, 0, 0, 2, 2]], - np.array([5, 1], dtype=dtype)) + self.append_lod_tensor( + array, + [[0, 2, 4], [0, 0, 0, 2, 2]], + np.array([5, 1], dtype=dtype), + ) for array, dtype in ((ids, "int64"), (scores, "float32")) ] @@ -89,7 +102,8 @@ class TestBeamSearchDecodeNPUOp(unittest.TestCase): self.assertEqual(sentence_scores.lod(), expected_lod) expected_data = np.array( - [0, 2, 3, 1, 0, 2, 1, 0, 4, 5, 3, 5, 0, 4, 5, 3, 1], "int64") + [0, 2, 3, 1, 0, 2, 1, 0, 4, 5, 3, 5, 0, 4, 5, 3, 1], "int64" + ) np.testing.assert_array_equal(np.array(sentence_ids), expected_data) np.testing.assert_array_equal(np.array(sentence_scores), expected_data) diff --git a/python/paddle/fluid/tests/unittests/npu/test_beam_search_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_beam_search_op_npu.py index d185260a86a261c13f84165840b55b79aee18857..df340b97cdbc72105cf13bfa3488d41dc3b74290 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_beam_search_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_beam_search_op_npu.py @@ -25,7 +25,6 @@ paddle.enable_static() class TestBeamSearchNPUOp(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -35,19 +34,19 @@ class TestBeamSearchNPUOp(OpTest): 'pre_ids': (self.pre_ids, self.lod), 'pre_scores': (self.pre_score, self.lod), 'ids': (self.ids, self.lod), - 'scores': (self.score, self.lod) + 'scores': (self.score, self.lod), } # The `target_lod` attribute is still based on offset self.attrs = { 'level': 0, 'beam_size': self.beam_size, 'end_id': 0, - 'is_accumulated': self.is_accumulated + 'is_accumulated': self.is_accumulated, } self.outputs = { 'selected_ids': (self.selected_ids, self.out_lod), 'selected_scores': (self.selected_scores, self.out_lod), - 'parent_idx': self.parent_idx + 'parent_idx': self.parent_idx, } def set_npu(self): @@ -57,18 +56,21 @@ class TestBeamSearchNPUOp(OpTest): self.beam_size = 2 self.is_accumulated = True self.pre_ids = np.array([[1], [2], [3], [4]], dtype='int64') - self.ids = np.array([[4, 2, 5], [2, 1, 3], [3, 5, 2], [8, 2, 1]], - dtype='int64') + self.ids = np.array( + [[4, 2, 5], [2, 1, 3], [3, 5, 2], [8, 2, 1]], dtype='int64' + ) self.lod = [[2, 2], [1, 1, 1, 1]] self.out_lod = [[2, 2], [1, 1, 1, 1]] self.offset_lod = [[0, 2, 4], [0, 1, 2, 3, 4]] - self.score = np.array([ - [0.5, 0.3, 0.2], - [0.6, 0.3, 0.1], - [0.9, 0.5, 0.1], - [0.7, 0.5, 0.1], - ], - dtype='float32') + self.score = np.array( + [ + [0.5, 0.3, 0.2], + [0.6, 0.3, 0.1], + [0.9, 0.5, 0.1], + [0.7, 0.5, 0.1], + ], + dtype='float32', + ) self.pre_score = np.array([[0.1], [0.2], [0.3], [0.4]], dtype='float32') self.selected_ids = np.array([4, 2, 3, 8])[:, np.newaxis] self.selected_scores = np.array([0.5, 0.6, 0.9, 0.7])[:, np.newaxis] @@ -79,7 +81,6 @@ class TestBeamSearchNPUOp(OpTest): class TestBeamSearchNPUOp2(TestBeamSearchNPUOp): - def init_data(self): self.beam_size = 2 self.is_accumulated = True @@ -88,13 +89,15 @@ class TestBeamSearchNPUOp2(TestBeamSearchNPUOp): self.lod = [[2, 2], [1, 1, 1, 1]] self.out_lod = [[2, 2], [2, 0, 1, 1]] self.offset_lod = [[0, 2, 4], [0, 2, 2, 3, 4]] - self.score = np.array([ - [0.6, 0.9], - [0.5, 0.3], - [0.9, 0.5], - [0.1, 0.7], - ], - dtype='float32') + self.score = np.array( + [ + [0.6, 0.9], + [0.5, 0.3], + [0.9, 0.5], + [0.1, 0.7], + ], + dtype='float32', + ) self.pre_score = np.array([[0.1], [0.2], [0.3], [0.4]], dtype='float32') self.selected_ids = np.array([4, 2, 3, 1])[:, np.newaxis] self.selected_scores = np.array([0.6, 0.9, 0.9, 0.7])[:, np.newaxis] @@ -102,7 +105,6 @@ class TestBeamSearchNPUOp2(TestBeamSearchNPUOp): class TestBeamSearchNPUOp3(TestBeamSearchNPUOp): - def init_data(self): # end_id = 0 self.beam_size = 2 @@ -112,13 +114,15 @@ class TestBeamSearchNPUOp3(TestBeamSearchNPUOp): self.lod = [[2, 2], [1, 1, 1, 1]] self.out_lod = [[2, 2], [1, 1, 0, 2]] self.offset_lod = [[0, 2, 4], [0, 1, 2, 2, 4]] - self.score = np.array([ - [0.6, 0.9], - [0.5, 0.3], - [0.9, 0.5], - [0.6, 0.7], - ], - dtype='float32') + self.score = np.array( + [ + [0.6, 0.9], + [0.5, 0.3], + [0.9, 0.5], + [0.6, 0.7], + ], + dtype='float32', + ) self.pre_score = np.array([[0.1], [1.2], [0.5], [0.4]], dtype='float32') self.selected_ids = np.array([2, 0, 8, 1])[:, np.newaxis] self.selected_scores = np.array([0.9, 1.2, 0.6, 0.7])[:, np.newaxis] @@ -126,7 +130,6 @@ class TestBeamSearchNPUOp3(TestBeamSearchNPUOp): class TestBeamSearchNPUOp4(TestBeamSearchNPUOp): - def init_data(self): # is_accumulated = False self.beam_size = 2 @@ -136,22 +139,24 @@ class TestBeamSearchNPUOp4(TestBeamSearchNPUOp): self.lod = [[2, 2], [1, 1, 1, 1]] self.out_lod = [[2, 2], [0, 2, 1, 1]] self.offset_lod = [[0, 2, 4], [0, 0, 2, 3, 4]] - self.score = np.array([ - [0.6, 0.9], - [0.5, 0.3], - [0.9, 0.5], - [0.1, 0.7], - ], - dtype='float32') + self.score = np.array( + [ + [0.6, 0.9], + [0.5, 0.3], + [0.9, 0.5], + [0.1, 0.7], + ], + dtype='float32', + ) self.pre_score = np.array([[0.1], [2.2], [0.3], [0.4]], dtype='float32') self.selected_ids = np.array([7, 3, 3, 1])[:, np.newaxis] - self.selected_scores = np.array([1.50685, 0.996027, 0.194639, - 0.043325])[:, np.newaxis] + self.selected_scores = np.array( + [1.50685, 0.996027, 0.194639, 0.043325] + )[:, np.newaxis] self.parent_idx = np.array([1, 1, 2, 3]) class TestBeamSearchNPUOp5(TestBeamSearchNPUOp): - def init_data(self): # beam_size = 1 self.beam_size = 1 @@ -161,13 +166,15 @@ class TestBeamSearchNPUOp5(TestBeamSearchNPUOp): self.lod = [[1, 1, 1, 1], [1, 1, 1, 1]] self.out_lod = [[1, 1, 1, 1], [1, 1, 1, 1]] self.offset_lod = [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]] - self.score = np.array([ - [0.6, 0.9], - [0.5, 0.3], - [0.9, 0.5], - [0.1, 0.7], - ], - dtype='float32') + self.score = np.array( + [ + [0.6, 0.9], + [0.5, 0.3], + [0.9, 0.5], + [0.1, 0.7], + ], + dtype='float32', + ) self.pre_score = np.array([[0.1], [0.2], [0.3], [0.4]], dtype='float32') self.selected_ids = np.array([2, 7, 3, 1])[:, np.newaxis] self.selected_scores = np.array([0.9, 0.5, 0.9, 0.7])[:, np.newaxis] diff --git a/python/paddle/fluid/tests/unittests/npu/test_bilinear_interp_v2_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_bilinear_interp_v2_op_npu.py index 5bcd736f5405b05936a32431dc52e358130ee6cf..5429125b4ef357bb7e351d1a41443287f3efd99f 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_bilinear_interp_v2_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_bilinear_interp_v2_op_npu.py @@ -29,7 +29,6 @@ paddle.enable_static() class TestBilinearInterpOp(OpTest): - def set_npu(self): self.__class__.use_npu = True self.place = paddle.NPUPlace(0) @@ -53,7 +52,7 @@ class TestBilinearInterpOp(OpTest): scale_w = 0 if self.scale: if isinstance(self.scale, float) or isinstance(self.scale, int): - if self.scale > 0.: + if self.scale > 0.0: scale_h = scale_w = float(self.scale) if isinstance(self.scale, list) and len(self.scale) == 1: scale_w = scale_h = self.scale[0] @@ -66,10 +65,18 @@ class TestBilinearInterpOp(OpTest): out_h = self.out_h out_w = self.out_w - output_np = bilinear_interp_np(input_np, out_h, out_w, scale_w, scale_h, - self.out_size, self.actual_shape, - self.align_corners, self.align_mode, - self.data_layout) + output_np = bilinear_interp_np( + input_np, + out_h, + out_w, + scale_w, + scale_h, + self.out_size, + self.actual_shape, + self.align_corners, + self.align_mode, + self.data_layout, + ) self.inputs = {'X': input_np} if self.out_size is not None: @@ -83,11 +90,11 @@ class TestBilinearInterpOp(OpTest): 'interp_method': self.interp_method, 'align_corners': self.align_corners, 'align_mode': self.align_mode, - 'data_layout': self.data_layout + 'data_layout': self.data_layout, } if self.scale: if isinstance(self.scale, float) or isinstance(self.scale, int): - if self.scale > 0.: + if self.scale > 0.0: self.scale = [self.scale] if isinstance(self.scale, list) and len(self.scale) == 1: self.scale = [self.scale[0], self.scale[0]] @@ -106,13 +113,19 @@ class TestBilinearInterpOp(OpTest): output_names = ['Out'] no_grad_set = set() cpu_place = fluid.CPUPlace() - cpu_grads = self._get_gradient(inputs_to_check, cpu_place, output_names, - no_grad_set) - npu_grads = self._get_gradient(inputs_to_check, self.place, - output_names, no_grad_set) - self._assert_is_close(cpu_grads, npu_grads, inputs_to_check, - self.max_relative_error, - "Gradient Check between places") + cpu_grads = self._get_gradient( + inputs_to_check, cpu_place, output_names, no_grad_set + ) + npu_grads = self._get_gradient( + inputs_to_check, self.place, output_names, no_grad_set + ) + self._assert_is_close( + cpu_grads, + npu_grads, + inputs_to_check, + self.max_relative_error, + "Gradient Check between places", + ) def init_test_case(self): self.interp_method = 'bilinear' @@ -127,7 +140,6 @@ class TestBilinearInterpOp(OpTest): class TestBilinearInterpCaseFP16(TestBilinearInterpOp): - def init_test_case(self): super(TestBilinearInterpCaseFP16, self).init_test_case() self.dtype = 'float16' @@ -135,70 +147,63 @@ class TestBilinearInterpCaseFP16(TestBilinearInterpOp): class TestBilinearInterpCase1(TestBilinearInterpOp): - def init_test_case(self): super(TestBilinearInterpCase1, self).init_test_case() self.input_shape = [4, 1, 7, 8] self.out_h = 1 self.out_w = 1 - self.scale = 0. + self.scale = 0.0 class TestBilinearInterpCase2(TestBilinearInterpOp): - def init_test_case(self): super(TestBilinearInterpCase2, self).init_test_case() self.input_shape = [3, 3, 9, 6] self.out_h = 12 self.out_w = 12 - self.scale = 0. + self.scale = 0.0 class TestBilinearInterpCase3(TestBilinearInterpOp): - def init_test_case(self): super(TestBilinearInterpCase3, self).init_test_case() self.input_shape = [1, 1, 32, 64] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 class TestBilinearInterpCase4(TestBilinearInterpOp): - def init_test_case(self): super(TestBilinearInterpCase4, self).init_test_case() self.input_shape = [4, 1, 7, 8] self.out_h = 1 self.out_w = 1 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([2, 2]).astype("int32") class TestBilinearInterpCase5(TestBilinearInterpOp): - def init_test_case(self): super(TestBilinearInterpCase5, self).init_test_case() self.input_shape = [3, 3, 9, 6] self.out_h = 12 self.out_w = 12 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([11, 11]).astype("int32") class TestBilinearInterpCase6(TestBilinearInterpOp): - def init_test_case(self): super(TestBilinearInterpCase6, self).init_test_case() self.input_shape = [1, 1, 32, 64] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([65, 33]).astype("int32") class TestBilinearInterpCase7(TestBilinearInterpOp): - def init_test_case(self): super(TestBilinearInterpCase7, self).init_test_case() self.input_shape = [1, 1, 32, 64] @@ -208,81 +213,72 @@ class TestBilinearInterpCase7(TestBilinearInterpOp): class TestBilinearInterpSame(TestBilinearInterpOp): - def init_test_case(self): super(TestBilinearInterpSame, self).init_test_case() self.input_shape = [2, 3, 32, 64] self.out_h = 32 self.out_w = 64 - self.scale = 0. + self.scale = 0.0 class TestBilinearInterpActualShape(TestBilinearInterpOp): - def init_test_case(self): super(TestBilinearInterpActualShape, self).init_test_case() self.input_shape = [3, 2, 32, 16] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([66, 40]).astype("int32") class TestBilinearInterpDataLayout(TestBilinearInterpOp): - def init_test_case(self): super(TestBilinearInterpDataLayout, self).init_test_case() self.input_shape = [2, 5, 5, 3] self.out_h = 2 self.out_w = 2 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([3, 3]).astype("int32") self.data_layout = "NHWC" class TestBilinearInterpOtherMethod1(TestBilinearInterpOp): - def set_align_mode(self): self.align_corners = False self.align_mode = 1 class TestBilinearInterpWithMethod2(TestBilinearInterpOp): - def set_align_mode(self): self.align_corners = False self.align_mode = 0 class TestBilinearInterpWithMethod3(TestBilinearInterpOp): - def set_align_mode(self): self.align_corners = True self.align_mode = 0 class TestBilinearInterpScale1(TestBilinearInterpOp): - def init_test_case(self): super(TestBilinearInterpScale1, self).init_test_case() self.input_shape = [2, 3, 5, 7] self.out_h = 60 self.out_w = 25 - self.scale = 2. + self.scale = 2.0 class TestBilinearInterpScale2(TestBilinearInterpOp): - def init_test_case(self): super(TestBilinearInterpScale2, self).init_test_case() self.input_shape = [2, 3, 5, 7] self.out_h = 60 self.out_w = 25 - self.scale = 1. + self.scale = 1.0 class TestBilinearInterpZero(TestBilinearInterpOp): - def init_test_case(self): super(TestBilinearInterpZero, self).init_test_case() self.input_shape = [2, 3, 5, 7] diff --git a/python/paddle/fluid/tests/unittests/npu/test_box_coder_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_box_coder_op_npu.py index 6655c8f09f601172764ab88150edf2d340fce2bc..a8409474e066b69b749c29bb3f8ffda7c6017a7a 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_box_coder_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_box_coder_op_npu.py @@ -40,9 +40,11 @@ def box_decoder(t_box, p_box, pb_v, output_box, norm, axis=0): pb_y = pb_y.reshape(shape) if pb_v.ndim == 2: - var_shape = (1, pb_v.shape[0], - pb_v.shape[1]) if axis == 0 else (pb_v.shape[0], 1, - pb_v.shape[1]) + var_shape = ( + (1, pb_v.shape[0], pb_v.shape[1]) + if axis == 0 + else (pb_v.shape[0], 1, pb_v.shape[1]) + ) pb_v = pb_v.reshape(var_shape) if pb_v.ndim == 1: tb_x = pb_v[0] * t_box[:, :, 0] * pb_w + pb_x @@ -99,20 +101,24 @@ def batch_box_coder(p_box, pb_v, t_box, lod, code_type, norm, axis=0): cur_offset = 0 for i in range(len(lod)): - if (code_type == "encode_center_size"): - box_encoder(t_box[cur_offset:(cur_offset + lod[i]), :], p_box, pb_v, - output_box[cur_offset:(cur_offset + lod[i]), :, :], - norm) - elif (code_type == "decode_center_size"): + if code_type == "encode_center_size": + box_encoder( + t_box[cur_offset : (cur_offset + lod[i]), :], + p_box, + pb_v, + output_box[cur_offset : (cur_offset + lod[i]), :, :], + norm, + ) + elif code_type == "decode_center_size": box_decoder(t_box, p_box, pb_v, output_box, norm, axis) cur_offset += lod[i] return output_box -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") +@unittest.skipIf( + not paddle.is_compiled_with_npu(), "core is not compiled with NPU" +) class TestBoxCoderOp(OpTest): - def setUp(self): self.op_type = "box_coder" self.set_npu() @@ -143,10 +149,10 @@ class TestBoxCoderOp(OpTest): def set_inputs(self): self.inputs = {} - assert (self.code_type in ['decode_center_size', 'encode_center_size']) - assert (self.axis in [0, 1]) + assert self.code_type in ['decode_center_size', 'encode_center_size'] + assert self.axis in [0, 1] if self.code_type == 'decode_center_size': - assert (not self.use_variance or not self.without_prior_box_var) + assert not self.use_variance or not self.without_prior_box_var self.prior_box = np.random.random((self.M, 4)).astype(self.dtype) @@ -156,50 +162,60 @@ class TestBoxCoderOp(OpTest): if self.without_prior_box_var: self.prior_box_var = np.ones((self.M, 4)).astype(self.dtype) else: - self.prior_box_var = np.random.random( - (self.M, 4)).astype(self.dtype) + self.prior_box_var = np.random.random((self.M, 4)).astype( + self.dtype + ) if self.axis == 0: - self.target_box = np.random.random( - (self.N, self.M, 4)).astype(self.dtype) + self.target_box = np.random.random((self.N, self.M, 4)).astype( + self.dtype + ) else: - self.target_box = np.random.random( - (self.M, self.N, 4)).astype(self.dtype) + self.target_box = np.random.random((self.M, self.N, 4)).astype( + self.dtype + ) self.inputs['PriorBox'] = self.prior_box self.inputs['TargetBox'] = self.target_box - if (not self.use_variance and not self.without_prior_box_var): + if not self.use_variance and not self.without_prior_box_var: self.inputs['PriorBoxVar'] = self.prior_box_var else: - #encode_center_size + # encode_center_size self.prior_box = np.random.random((self.M, 4)).astype(self.dtype) if self.use_variance: self.prior_box_var = np.random.random(4).astype(self.dtype) else: - self.prior_box_var = np.random.random( - (self.M, 4)).astype(self.dtype) + self.prior_box_var = np.random.random((self.M, 4)).astype( + self.dtype + ) self.target_box = np.random.random((self.N, 4)).astype(self.dtype) self.inputs['PriorBox'] = self.prior_box - #self.inputs['PriorBoxVar'] = self.prior_box_var + # self.inputs['PriorBoxVar'] = self.prior_box_var self.inputs['TargetBox'] = (self.target_box, self.lod) - if (not self.use_variance): + if not self.use_variance: self.inputs['PriorBoxVar'] = self.prior_box_var def set_attrs(self): self.attrs = { 'code_type': self.code_type, - 'box_normalized': self.box_normalized + 'box_normalized': self.box_normalized, } if self.use_variance: self.attrs['variance'] = self.prior_box_var.astype( - np.float64).flatten() + np.float64 + ).flatten() if self.axis != 0: self.attrs['axis'] = self.axis def set_outputs(self): - output_box = batch_box_coder(self.prior_box, self.prior_box_var, - self.target_box, self.lod[0], - self.code_type, self.box_normalized, - self.axis) + output_box = batch_box_coder( + self.prior_box, + self.prior_box_var, + self.target_box, + self.lod[0], + self.code_type, + self.box_normalized, + self.axis, + ) self.outputs = {'OutputBox': output_box.astype(self.dtype)} def test_check_output(self): @@ -207,7 +223,6 @@ class TestBoxCoderOp(OpTest): class TestBoxCoderOpWithoutBoxVar(TestBoxCoderOp): - def set_init_config(self): super(TestBoxCoderOpWithoutBoxVar, self).set_init_config() self.without_prior_box_var = True @@ -215,7 +230,6 @@ class TestBoxCoderOpWithoutBoxVar(TestBoxCoderOp): class TestBoxCoderOpWithLoD(TestBoxCoderOp): - def set_init_config(self): super(TestBoxCoderOpWithLoD, self).set_init_config() self.M = 20 @@ -226,28 +240,24 @@ class TestBoxCoderOpWithLoD(TestBoxCoderOp): class TestBoxCoderOpWithLoDWithVariance(TestBoxCoderOpWithLoD): - def set_init_config(self): super(TestBoxCoderOpWithLoDWithVariance, self).set_init_config() self.use_variance = True class TestBoxCoderOpWithAxis(TestBoxCoderOp): - def set_init_config(self): super(TestBoxCoderOpWithAxis, self).set_init_config() self.axis = 1 class TestBoxCoderOpWithVariance(TestBoxCoderOp): - def set_init_config(self): super(TestBoxCoderOpWithVariance, self).set_init_config() self.use_variance = True class TestBoxCoderOpFP16(TestBoxCoderOp): - def init_dtype(self): self.dtype = np.float16 diff --git a/python/paddle/fluid/tests/unittests/npu/test_c_embedding_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_c_embedding_op_npu.py index fb9447cc2d4e3dc10fe21dd00b0c7f844dc09195..eaceefe7e6d9375a70cb57dd48de5d2ef0730393 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_c_embedding_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_c_embedding_op_npu.py @@ -21,7 +21,11 @@ from op_test import OpTest import paddle import paddle.fluid as fluid import paddle.fluid.core as core -from paddle.fluid.tests.unittests.c_embedding_op_base import TestCEmbeddingCPU, TestCEmbeddingOpBase, TestCEmbeddingOpFP32 +from paddle.fluid.tests.unittests.c_embedding_op_base import ( + TestCEmbeddingCPU, + TestCEmbeddingOpBase, + TestCEmbeddingOpFP32, +) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/npu/test_c_identity_npu.py b/python/paddle/fluid/tests/unittests/npu/test_c_identity_npu.py index 5701739c883bed12545882d3603f45d9accc8f6e..381fd5baa4632c149a604c478ab293f59a2732be 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_c_identity_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_c_identity_npu.py @@ -23,15 +23,14 @@ paddle.enable_static() class TestIdentityOp(TestDistBase): - def _setup_config(self): pass def test_identity(self, col_type="identity"): dist_env = os.environ - self.check_with_place("collective_identity_op_npu.py", - col_type, - need_envs=dist_env) + self.check_with_place( + "collective_identity_op_npu.py", col_type, need_envs=dist_env + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/npu/test_cast_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_cast_op_npu.py index 4b3e877fef12c159e7947eb8f5a37767d782d82e..dfc651074632ae0a3e602c023a62438631f8ef98 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_cast_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_cast_op_npu.py @@ -28,7 +28,6 @@ SEED = 2021 @skip_check_grad_ci(reason="[skip NPU cast grad check] not implemented yet.") class TestCast1(OpTest): - def setUp(self): self.set_npu() self.op_type = "cast" @@ -40,7 +39,7 @@ class TestCast1(OpTest): self.attrs = { 'in_dtype': int(core.VarDesc.VarType.FP32), - 'out_dtype': int(core.VarDesc.VarType.FP16) + 'out_dtype': int(core.VarDesc.VarType.FP16), } def set_npu(self): @@ -52,7 +51,6 @@ class TestCast1(OpTest): @skip_check_grad_ci(reason="[skip NPU cast grad check] not implemented yet.") class TestCast2(OpTest): - def setUp(self): self.set_npu() self.op_type = "cast" @@ -64,7 +62,7 @@ class TestCast2(OpTest): self.attrs = { 'in_dtype': int(core.VarDesc.VarType.FP16), - 'out_dtype': int(core.VarDesc.VarType.FP32) + 'out_dtype': int(core.VarDesc.VarType.FP32), } def set_npu(self): @@ -76,7 +74,6 @@ class TestCast2(OpTest): @skip_check_grad_ci(reason="[skip NPU cast grad check] not implemented yet.") class TestCast3(OpTest): - def setUp(self): self.set_npu() self.op_type = "cast" @@ -88,7 +85,7 @@ class TestCast3(OpTest): self.attrs = { 'in_dtype': int(core.VarDesc.VarType.INT32), - 'out_dtype': int(core.VarDesc.VarType.INT32) + 'out_dtype': int(core.VarDesc.VarType.INT32), } def set_npu(self): diff --git a/python/paddle/fluid/tests/unittests/npu/test_clip_by_norm_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_clip_by_norm_op_npu.py index a1c397796cc0f269ed832b7c096258ea9d9ee33c..bff2e674e54255d6d94493b5ab42e86124df5f9d 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_clip_by_norm_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_clip_by_norm_op_npu.py @@ -26,7 +26,6 @@ paddle.enable_static() class TestClipByNormOp(OpTest): - def setUp(self): self.set_npu() self.max_relative_error = 0.006 @@ -55,7 +54,7 @@ class TestClipByNormOp(OpTest): self.check_output_with_place(self.place) def initTestCase(self): - self.shape = (100, ) + self.shape = (100,) self.max_norm = 1.0 def init_dtype(self): @@ -63,28 +62,24 @@ class TestClipByNormOp(OpTest): class TestCase1(TestClipByNormOp): - def initTestCase(self): - self.shape = (100, ) + self.shape = (100,) self.max_norm = 1e20 class TestCase2(TestClipByNormOp): - def initTestCase(self): self.shape = (16, 16) self.max_norm = 0.1 class TestCase3(TestClipByNormOp): - def initTestCase(self): self.shape = (4, 8, 16) self.max_norm = 1.0 class TestClipByNormOpFp16(TestClipByNormOp): - def init_dtype(self): self.dtype = np.float16 @@ -93,21 +88,18 @@ class TestClipByNormOpFp16(TestClipByNormOp): class TestClipByNormOpFp16Case1(TestClipByNormOpFp16): - def initTestCase(self): - self.shape = (100, ) + self.shape = (100,) self.max_norm = 1e20 class TestClipByNormOpFp16Case2(TestClipByNormOpFp16): - def initTestCase(self): self.shape = (16, 16) self.max_norm = 0.1 class TestClipByNormOpFp16Case3(TestClipByNormOpFp16): - def initTestCase(self): self.shape = (4, 8, 16) self.max_norm = 1.0 diff --git a/python/paddle/fluid/tests/unittests/npu/test_clip_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_clip_op_npu.py index 3c6b6cb8949ded5f3cdc331decac5ac802bdb382..8b13546d9a2852009dfba4744b5bdfaaac07d3d0 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_clip_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_clip_op_npu.py @@ -24,7 +24,6 @@ from op_test import OpTest class TestClipOp(OpTest): - def set_npu(self): self.__class__.use_npu = True self.place = paddle.NPUPlace(0) @@ -75,7 +74,6 @@ class TestClipOp(OpTest): class TestCase1(TestClipOp): - def initTestCase(self): self.shape = (8, 16, 8) self.max = 0.7 @@ -83,7 +81,6 @@ class TestCase1(TestClipOp): class TestCase2(TestClipOp): - def initTestCase(self): self.shape = (8, 16) self.max = 1.0 @@ -91,7 +88,6 @@ class TestCase2(TestClipOp): class TestCase3(TestClipOp): - def initTestCase(self): self.shape = (4, 8, 16) self.max = 0.7 @@ -99,7 +95,6 @@ class TestCase3(TestClipOp): class TestCase4(TestClipOp): - def initTestCase(self): self.shape = (4, 8, 8) self.max = 0.7 @@ -109,7 +104,6 @@ class TestCase4(TestClipOp): class TestCase5(TestClipOp): - def initTestCase(self): self.shape = (4, 8, 16) self.max = 0.5 @@ -117,7 +111,6 @@ class TestCase5(TestClipOp): class TestClipOpError(unittest.TestCase): - def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): @@ -137,7 +130,6 @@ class TestClipOpError(unittest.TestCase): class TestClipAPI(unittest.TestCase): - def _executed_api(self, x, min=None, max=None): return paddle.clip(x, min, max) @@ -149,8 +141,11 @@ class TestClipAPI(unittest.TestCase): min = fluid.data(name='min', shape=[1], dtype='float32') max = fluid.data(name='max', shape=[1], dtype='float32') - place = fluid.NPUPlace( - 0) if fluid.core.is_compiled_with_npu() else fluid.CPUPlace() + place = ( + fluid.NPUPlace(0) + if fluid.core.is_compiled_with_npu() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) out_1 = self._executed_api(images, min=min, max=max) @@ -159,7 +154,7 @@ class TestClipAPI(unittest.TestCase): out_4 = self._executed_api(images, max=0.7) out_5 = self._executed_api(images, min=min) out_6 = self._executed_api(images, max=max) - out_7 = self._executed_api(images, max=-1.) + out_7 = self._executed_api(images, max=-1.0) out_8 = self._executed_api(images) res1, res2, res3, res4, res5, res6, res7, res8 = exe.run( @@ -167,9 +162,10 @@ class TestClipAPI(unittest.TestCase): feed={ "image": data, "min": np.array([0.2]).astype('float32'), - "max": np.array([0.8]).astype('float32') + "max": np.array([0.8]).astype('float32'), }, - fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7, out_8]) + fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7, out_8], + ) np.testing.assert_allclose(res1, data.clip(0.2, 0.8)) np.testing.assert_allclose(res2, data.clip(0.2, 0.9)) @@ -183,8 +179,11 @@ class TestClipAPI(unittest.TestCase): def test_clip_dygraph(self): paddle.disable_static() - place = fluid.NPUPlace( - 0) if fluid.core.is_compiled_with_npu() else fluid.CPUPlace() + place = ( + fluid.NPUPlace(0) + if fluid.core.is_compiled_with_npu() + else fluid.CPUPlace() + ) paddle.disable_static(place) data_shape = [1, 9, 9, 4] data = np.random.random(data_shape).astype('float32') @@ -212,7 +211,6 @@ class TestClipAPI(unittest.TestCase): class TestInplaceClipAPI(TestClipAPI): - def _executed_api(self, x, min=None, max=None): return x.clip_(min, max) diff --git a/python/paddle/fluid/tests/unittests/npu/test_coalesce_tensor_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_coalesce_tensor_op_npu.py index ec257b84d437591c161a2e2aed6331edf4ebbde2..8f6ef521070487ca117310eec116223c23f8283f 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_coalesce_tensor_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_coalesce_tensor_op_npu.py @@ -28,7 +28,6 @@ alignment = 512 class TestAllocContinuousSpace(OpTest): - def setUp(self): self.__class__.use_npu = True self.op_type = "coalesce_tensor" @@ -39,7 +38,8 @@ class TestAllocContinuousSpace(OpTest): self.set_constant = attrs["set_constant"] self.Inputs = self.init_input() self.Outputs, self.FusedOutput = self.init_output( - self.Inputs, self.set_constant, self.constant) + self.Inputs, self.set_constant, self.constant + ) self.inputs = {'Input': self.Inputs} self.attrs = attrs self.outputs = {'Output': self.Outputs, 'FusedOutput': self.FusedOutput} @@ -59,7 +59,7 @@ class TestAllocContinuousSpace(OpTest): "set_constant": False, "constant": 0.0, "use_align": True, - "dtype": self.fluid_dtype + "dtype": self.fluid_dtype, } def init_output(self, input_list, set_constant, constant): @@ -85,7 +85,6 @@ class TestAllocContinuousSpace(OpTest): class TestAllocContinuousSpace2(TestAllocContinuousSpace): - def init_attr(self): return { "copy_data": True, @@ -93,7 +92,7 @@ class TestAllocContinuousSpace2(TestAllocContinuousSpace): "constant": 0.5, "use_align": True, "dtype": self.fluid_dtype, - "user_defined_size_of_dtype": 2 + "user_defined_size_of_dtype": 2, } def test_check_output(self): diff --git a/python/paddle/fluid/tests/unittests/npu/test_collective_base_npu.py b/python/paddle/fluid/tests/unittests/npu/test_collective_base_npu.py index fbbb64aaf078b787fc0874e88300292aae1307b5..6b7e056a7f86a6832be2899dd4e29abbf64f2fd2 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_collective_base_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_collective_base_npu.py @@ -29,10 +29,10 @@ from paddle.fluid import core class TestCollectiveRunnerBase(object): - def get_model(self, train_prog, startup_prog): raise NotImplementedError( - "get model should be implemented by child class.") + "get model should be implemented by child class." + ) def wait_server_ready(self, endpoints): assert not isinstance(endpoints, str) @@ -41,13 +41,15 @@ class TestCollectiveRunnerBase(object): not_ready_endpoints = [] for ep in endpoints: ip_port = ep.split(":") - with closing(socket.socket(socket.AF_INET, - socket.SOCK_STREAM)) as sock: + with closing( + socket.socket(socket.AF_INET, socket.SOCK_STREAM) + ) as sock: sock.settimeout(2) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) if hasattr(socket, 'SO_REUSEPORT'): - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, - 1) + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT, 1 + ) result = sock.connect_ex((ip_port[0], int(ip_port[1]))) if result != 0: @@ -55,43 +57,50 @@ class TestCollectiveRunnerBase(object): not_ready_endpoints.append(ep) if not all_ok: sys.stderr.write("server not ready, wait 3 sec to retry...\n") - sys.stderr.write("not ready endpoints:" + - str(not_ready_endpoints) + "\n") + sys.stderr.write( + "not ready endpoints:" + str(not_ready_endpoints) + "\n" + ) sys.stderr.flush() time.sleep(3) else: break + # endpoints should be ["ip1:port1","ip2:port2"] -#endpoints should be ["ip1:port1","ip2:port2"] - - def initCommunicator(self, program, rank, nranks, wait_port, - current_endpoint, endpoints): + def initCommunicator( + self, program, rank, nranks, wait_port, current_endpoint, endpoints + ): other_endpoints = endpoints[:] other_endpoints.remove(current_endpoint) if rank == 0 and wait_port: self.wait_server_ready(other_endpoints) block = program.global_block() - hccl_id_var = block.create_var(name=nameGen.generate('hccl_id'), - persistable=True, - type=core.VarDesc.VarType.RAW) - block.append_op(type='c_gen_hccl_id', - inputs={}, - outputs={'Out': hccl_id_var}, - attrs={ - 'rank': rank, - 'endpoint': current_endpoint, - 'other_endpoints': other_endpoints - }) - block.append_op(type='c_comm_init_hccl', - inputs={'X': hccl_id_var}, - outputs={}, - attrs={ - 'rank': rank, - 'ring_id': self.global_ring_id, - 'device_id': int(os.getenv("FLAGS_selected_npus")), - 'rank_ids': nranks - }) + hccl_id_var = block.create_var( + name=nameGen.generate('hccl_id'), + persistable=True, + type=core.VarDesc.VarType.RAW, + ) + block.append_op( + type='c_gen_hccl_id', + inputs={}, + outputs={'Out': hccl_id_var}, + attrs={ + 'rank': rank, + 'endpoint': current_endpoint, + 'other_endpoints': other_endpoints, + }, + ) + block.append_op( + type='c_comm_init_hccl', + inputs={'X': hccl_id_var}, + outputs={}, + attrs={ + 'rank': rank, + 'ring_id': self.global_ring_id, + 'device_id': int(os.getenv("FLAGS_selected_npus")), + 'rank_ids': nranks, + }, + ) def run_trainer(self, args): train_prog = fluid.Program() @@ -100,8 +109,9 @@ class TestCollectiveRunnerBase(object): rank = args["trainerid"] current_endpoint = args["currentendpoint"] nranks = 2 - self.initCommunicator(startup_prog, rank, nranks, True, - current_endpoint, endpoints) + self.initCommunicator( + startup_prog, rank, nranks, True, current_endpoint, endpoints + ) self.rank = rank result = self.get_model(train_prog, startup_prog) device_id = int(os.getenv("FLAGS_selected_npus", "0")) @@ -110,9 +120,9 @@ class TestCollectiveRunnerBase(object): exe.run(startup_prog) np.random.seed(os.getpid()) indata = np.random.random((10, 1000)) - out = exe.run(train_prog, - feed={'tindata': indata}, - fetch_list=[result.name]) + out = exe.run( + train_prog, feed={'tindata': indata}, fetch_list=[result.name] + ) sys.stdout.buffer.write(pickle.dumps(out)) @@ -134,19 +144,20 @@ from contextlib import closing class TestDistBase(unittest.TestCase): - def setUp(self): self._port_set = set() self._trainers = 2 self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % ( - self._find_free_port(), self._find_free_port()) + self._find_free_port(), + self._find_free_port(), + ) self._python_interp = sys.executable def _find_free_port(self): - def __free_port(): - with closing(socket.socket(socket.AF_INET, - socket.SOCK_STREAM)) as s: + with closing( + socket.socket(socket.AF_INET, socket.SOCK_STREAM) + ) as s: s.bind(('', 0)) return s.getsockname()[1] @@ -159,7 +170,7 @@ class TestDistBase(unittest.TestCase): def _run_cluster(self, model_file, envs): worker_endpoints = self._ps_endpoints.split(",") w0_ep, w1_ep = worker_endpoints - #print("w0_ep:",w0_ep," w1_ep:",w1_ep) + # print("w0_ep:",w0_ep," w1_ep:",w1_ep) env0 = { "FLAGS_selected_npus": "0", "PADDLE_TRAINER_ID": "0", @@ -175,7 +186,7 @@ class TestDistBase(unittest.TestCase): "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints, "PADDLE_CURRENT_ENDPOINT": w1_ep, } - #update environment + # update environment env0.update(envs) env1.update(envs) tr_cmd = "%s %s" @@ -183,16 +194,20 @@ class TestDistBase(unittest.TestCase): tr1_cmd = tr_cmd % (self._python_interp, model_file) tr0_pipe = open("/tmp/tr0_err.log", "wb") tr1_pipe = open("/tmp/tr1_err.log", "wb") - #print(tr0_cmd) - tr0_proc = subprocess.Popen(tr0_cmd.strip().split(), - stdout=subprocess.PIPE, - stderr=tr0_pipe, - env=env0) - - tr1_proc = subprocess.Popen(tr0_cmd.strip().split(), - stdout=subprocess.PIPE, - stderr=tr1_pipe, - env=env1) + # print(tr0_cmd) + tr0_proc = subprocess.Popen( + tr0_cmd.strip().split(), + stdout=subprocess.PIPE, + stderr=tr0_pipe, + env=env0, + ) + + tr1_proc = subprocess.Popen( + tr0_cmd.strip().split(), + stdout=subprocess.PIPE, + stderr=tr1_pipe, + env=env1, + ) tr0_out, tr0_err = tr0_proc.communicate() tr1_out, tr1_err = tr1_proc.communicate() @@ -201,8 +216,12 @@ class TestDistBase(unittest.TestCase): # close trainer file tr0_pipe.close() tr1_pipe.close() - return pickle.loads(tr0_out), pickle.loads( - tr1_out), tr0_proc.pid, tr1_proc.pid + return ( + pickle.loads(tr0_out), + pickle.loads(tr1_out), + tr0_proc.pid, + tr1_proc.pid, + ) def check_with_place(self, model_file, col_type, need_envs={}): diff --git a/python/paddle/fluid/tests/unittests/npu/test_collective_process_group_hccl.py b/python/paddle/fluid/tests/unittests/npu/test_collective_process_group_hccl.py index f26d33840ae881fee03d3dd07d20314821d3af94..181ba75c11b7ea83407a9c375d360c9d924826c5 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_collective_process_group_hccl.py +++ b/python/paddle/fluid/tests/unittests/npu/test_collective_process_group_hccl.py @@ -20,7 +20,6 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus class TestProcessGroup(TestMultipleGpus): - def test_process_group_nccl(self): self.run_mnist_2gpu('process_group_hccl.py') diff --git a/python/paddle/fluid/tests/unittests/npu/test_compare_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_compare_op_npu.py index b31b805fb771f1b453c211a01a68b8ed492fa56b..4fd0be14c12b54b22178543eb5dddf8df481dfb5 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_compare_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_compare_op_npu.py @@ -24,9 +24,7 @@ from paddle.fluid import Program, program_guard def create_test_class(op_type, typename, callback): - class Cls(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -77,46 +75,42 @@ def create_test_class(op_type, typename, callback): def test_broadcast_api_1(self): paddle.enable_static() with program_guard(Program(), Program()): - x = paddle.static.data(name='x', - shape=[1, 2, 1, 3], - dtype=typename) - y = paddle.static.data(name='y', - shape=[1, 2, 3], - dtype=typename) + x = paddle.static.data( + name='x', shape=[1, 2, 1, 3], dtype=typename + ) + y = paddle.static.data( + name='y', shape=[1, 2, 3], dtype=typename + ) op = eval("paddle.%s" % (self.op_type)) out = op(x, y) exe = paddle.static.Executor(self.place) input_x = np.arange(1, 7).reshape((1, 2, 1, 3)).astype(typename) input_y = np.arange(0, 6).reshape((1, 2, 3)).astype(typename) real_result = callback(input_x, input_y) - res, = exe.run(feed={ - "x": input_x, - "y": input_y - }, - fetch_list=[out]) + (res,) = exe.run( + feed={"x": input_x, "y": input_y}, fetch_list=[out] + ) self.assertEqual((res == real_result).all(), True) @unittest.skipIf(typename == 'float16', "float16 is not supported now") def test_broadcast_api_2(self): paddle.enable_static() with program_guard(Program(), Program()): - x = paddle.static.data(name='x', - shape=[1, 2, 3], - dtype=typename) - y = paddle.static.data(name='y', - shape=[1, 2, 1, 3], - dtype=typename) + x = paddle.static.data( + name='x', shape=[1, 2, 3], dtype=typename + ) + y = paddle.static.data( + name='y', shape=[1, 2, 1, 3], dtype=typename + ) op = eval("paddle.%s" % (self.op_type)) out = op(x, y) exe = paddle.static.Executor(self.place) input_x = np.arange(0, 6).reshape((1, 2, 3)).astype(typename) input_y = np.arange(1, 7).reshape((1, 2, 1, 3)).astype(typename) real_result = callback(input_x, input_y) - res, = exe.run(feed={ - "x": input_x, - "y": input_y - }, - fetch_list=[out]) + (res,) = exe.run( + feed={"x": input_x, "y": input_y}, fetch_list=[out] + ) self.assertEqual((res == real_result).all(), True) @unittest.skipIf(typename == 'float16', "float16 is not supported now") @@ -131,11 +125,9 @@ def create_test_class(op_type, typename, callback): input_x = np.arange(0, 5).reshape((5)).astype(typename) input_y = np.array([5, 3, 2]).reshape((3, 1)).astype(typename) real_result = callback(input_x, input_y) - res, = exe.run(feed={ - "x": input_x, - "y": input_y - }, - fetch_list=[out]) + (res,) = exe.run( + feed={"x": input_x, "y": input_y}, fetch_list=[out] + ) self.assertEqual((res == real_result).all(), True) @unittest.skipIf(typename == 'float16', "float16 is not supported now") diff --git a/python/paddle/fluid/tests/unittests/npu/test_concat_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_concat_op_npu.py index a9c43a868f564877ac2d3498f6b49f9941bbf329..301ce9c47736f1a77068e96c4e0fc0f145a94a1e 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_concat_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_concat_op_npu.py @@ -26,7 +26,6 @@ SEED = 2021 class TestConcatOp(OpTest): - def setUp(self): self.set_npu() self.op_type = "concat" @@ -43,8 +42,9 @@ class TestConcatOp(OpTest): self.actual_axis = self.axis self.outputs = { - 'Out': - np.concatenate((self.x0, self.x1, self.x2), axis=self.actual_axis) + 'Out': np.concatenate( + (self.x0, self.x1, self.x2), axis=self.actual_axis + ) } def set_npu(self): @@ -69,7 +69,6 @@ class TestConcatOp(OpTest): class TestConcatOp2(TestConcatOp): - def init_test_data(self): self.x0 = np.random.random((2, 3, 4, 5)).astype(self.dtype) self.x1 = np.random.random((2, 3, 4, 5)).astype(self.dtype) @@ -78,9 +77,9 @@ class TestConcatOp2(TestConcatOp): @skip_check_grad_ci( - reason="The function 'check_grad' for large inputs is too slow.") + reason="The function 'check_grad' for large inputs is too slow." +) class TestConcatOp3(TestConcatOp): - def init_test_data(self): self.x0 = np.random.random((1, 256, 170, 256)).astype(self.dtype) self.x1 = np.random.random((1, 128, 170, 256)).astype(self.dtype) @@ -92,11 +91,9 @@ class TestConcatOp3(TestConcatOp): @skip_check_grad_ci( - reason= - "This test will meet fetch error when there is a null grad. The detailed information is in PR#17015." + reason="This test will meet fetch error when there is a null grad. The detailed information is in PR#17015." ) class TestConcatOp4(TestConcatOp): - def init_test_data(self): self.x0 = np.random.random((2, 3, 4, 5)).astype(self.dtype) self.x1 = np.random.random((2, 3, 4, 5)).astype(self.dtype) @@ -108,7 +105,6 @@ class TestConcatOp4(TestConcatOp): class TestConcatOp5(TestConcatOp): - def init_test_data(self): self.x0 = np.random.random((5, 1, 4, 5)).astype(self.dtype) self.x1 = np.random.random((5, 2, 4, 5)).astype(self.dtype) @@ -116,11 +112,9 @@ class TestConcatOp5(TestConcatOp): self.axis = -3 -#----------------Concat Fp16---------------- +# ----------------Concat Fp16---------------- def create_test_fp16(parent): - class TestConcatFp16(parent): - def init_dtype(self): self.dtype = np.float16 @@ -136,11 +130,9 @@ create_test_fp16(TestConcatOp4) create_test_fp16(TestConcatOp5) -#----------------Concat Int64---------------- +# ----------------Concat Int64---------------- def create_test_int64(parent): - class TestConcatInt64(parent): - def init_dtype(self): self.dtype = np.int64 @@ -179,9 +171,9 @@ class TestConcatAPIWithLoDTensorArray(unittest.TestCase): with fluid.program_guard(self.program): input = fluid.layers.assign(self.x) tensor_array = fluid.layers.create_array(dtype='float32') - zero = fluid.layers.fill_constant(shape=[1], - value=0, - dtype="int64") + zero = fluid.layers.fill_constant( + shape=[1], value=0, dtype="int64" + ) for i in range(self.iter_num): fluid.layers.array_write(input, zero + i, tensor_array) @@ -217,7 +209,8 @@ class TestConcatAPIWithLoDTensorArray(unittest.TestCase): exe = fluid.Executor(self.place) res = exe.run(self.program, fetch_list=self.out_var) np.testing.assert_allclose( - res[0], np.concatenate([self.x] * self.iter_num, axis=self.axis)) + res[0], np.concatenate([self.x] * self.iter_num, axis=self.axis) + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/npu/test_conv2d_op_depthwise_conv_npu.py b/python/paddle/fluid/tests/unittests/npu/test_conv2d_op_depthwise_conv_npu.py index 0161f6bcdce70262d6b6da3501e6b94f3c90eb13..537912edb46068a9643c2760dfef4a9f41e80242 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_conv2d_op_depthwise_conv_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_conv2d_op_depthwise_conv_npu.py @@ -31,9 +31,7 @@ SEED = 2021 def create_test_channel_last_class(parent): - class TestChannelLastCase(parent): - def init_data_format(self): self.data_format = "NHWC" @@ -47,9 +45,7 @@ def create_test_channel_last_class(parent): def create_test_padding_SAME_class(parent): - class TestPaddingSMAECase(parent): - def init_paddings(self): self.pad = [0, 0] self.padding_algorithm = "SAME" @@ -60,9 +56,7 @@ def create_test_padding_SAME_class(parent): def create_test_padding_VALID_class(parent): - class TestPaddingVALIDCase(parent): - def init_paddings(self): self.pad = [1, 1] self.padding_algorithm = "VALID" @@ -73,9 +67,7 @@ def create_test_padding_VALID_class(parent): def create_test_fp16_class(parent): - class TestFp16Case(parent): - def init_data_type(self): self.dtype = np.float16 @@ -85,7 +77,6 @@ def create_test_fp16_class(parent): class TestDepthwiseConvNPU(OpTest): - def setUp(self): self.set_npu() self.op_type = "depthwise_conv2d" @@ -97,21 +88,26 @@ class TestDepthwiseConvNPU(OpTest): conv2d_param = { 'stride': self.stride, 'pad': self.pad, - 'dilation': self.dilations + 'dilation': self.dilations, } input = np.random.random(self.input_size).astype(self.dtype) filter = np.random.uniform(-1, 1, self.filter_size).astype(self.dtype) - output, _, _, _, _ = conv2d_forward_naive(input, filter, self.groups, - conv2d_param, "EXPLICIT", - self.data_format) + output, _, _, _, _ = conv2d_forward_naive( + input, + filter, + self.groups, + conv2d_param, + "EXPLICIT", + self.data_format, + ) output = output.astype(self.dtype) self.inputs = { 'Input': OpTest.np_dtype_to_fluid_dtype(input), - 'Filter': OpTest.np_dtype_to_fluid_dtype(filter) + 'Filter': OpTest.np_dtype_to_fluid_dtype(filter), } self.attrs = { @@ -143,41 +139,59 @@ class TestDepthwiseConvNPU(OpTest): def test_check_grad(self): if self.dilations[0] == 1 and self.dilations[1] == 1: if self.dtype == np.float16: - self.check_grad_with_place(self.place, {'Input', 'Filter'}, - 'Output', - max_relative_error=0.9) + self.check_grad_with_place( + self.place, + {'Input', 'Filter'}, + 'Output', + max_relative_error=0.9, + ) else: - self.check_grad_with_place(self.place, {'Input', 'Filter'}, - 'Output', - max_relative_error=0.03, - numeric_place=paddle.CPUPlace()) + self.check_grad_with_place( + self.place, + {'Input', 'Filter'}, + 'Output', + max_relative_error=0.03, + numeric_place=paddle.CPUPlace(), + ) def test_check_grad_no_filter(self): if self.dtype == np.float16: - self.check_grad_with_place(self.place, ['Input'], - 'Output', - no_grad_set=set(['Filter']), - max_relative_error=0.9) + self.check_grad_with_place( + self.place, + ['Input'], + 'Output', + no_grad_set=set(['Filter']), + max_relative_error=0.9, + ) else: - self.check_grad_with_place(self.place, ['Input'], - 'Output', - no_grad_set=set(['Filter']), - max_relative_error=0.03, - numeric_place=paddle.CPUPlace()) + self.check_grad_with_place( + self.place, + ['Input'], + 'Output', + no_grad_set=set(['Filter']), + max_relative_error=0.03, + numeric_place=paddle.CPUPlace(), + ) def test_check_grad_no_input(self): if self.dilations[0] == 1 and self.dilations[1] == 1: if self.dtype == np.float16: - self.check_grad_with_place(self.place, ['Filter'], - 'Output', - no_grad_set=set(['Input']), - max_relative_error=0.9) + self.check_grad_with_place( + self.place, + ['Filter'], + 'Output', + no_grad_set=set(['Input']), + max_relative_error=0.9, + ) else: - self.check_grad_with_place(self.place, ['Filter'], - 'Output', - no_grad_set=set(['Input']), - max_relative_error=0.03, - numeric_place=paddle.CPUPlace()) + self.check_grad_with_place( + self.place, + ['Filter'], + 'Output', + no_grad_set=set(['Input']), + max_relative_error=0.03, + numeric_place=paddle.CPUPlace(), + ) def init_data_format(self): self.data_format = "NCHW" @@ -190,7 +204,6 @@ class TestDepthwiseConvNPU(OpTest): class TestDepthwiseConvNPU2(TestDepthwiseConvNPU): - def init_test_case(self): self.pad = [1, 1] self.dilations = [1, 1] @@ -203,7 +216,6 @@ class TestDepthwiseConvNPU2(TestDepthwiseConvNPU): class TestDepthwiseConvNPU3(TestDepthwiseConvNPU): - def init_test_case(self): self.pad = [1, 1] self.dilations = [1, 1] @@ -216,7 +228,6 @@ class TestDepthwiseConvNPU3(TestDepthwiseConvNPU): class TestDepthwiseConvNPU4(TestDepthwiseConvNPU): - def init_test_case(self): self.pad = [1, 1] self.dilations = [1, 1] @@ -229,7 +240,6 @@ class TestDepthwiseConvNPU4(TestDepthwiseConvNPU): class TestDepthwiseConvNPU_Padding(OpTest): - def setUp(self): self.op_type = "depthwise_conv2d" self.dtype = np.float32 @@ -243,21 +253,25 @@ class TestDepthwiseConvNPU_Padding(OpTest): conv2d_param = { 'stride': self.stride, 'pad': self.pad, - 'dilation': self.dilations + 'dilation': self.dilations, } input = np.random.random(self.input_size).astype(self.dtype) filter = np.random.uniform(-1, 1, self.filter_size).astype(self.dtype) - output, _, _, _, _ = conv2d_forward_naive(input, filter, self.groups, - conv2d_param, - self.padding_algorithm, - self.data_format) + output, _, _, _, _ = conv2d_forward_naive( + input, + filter, + self.groups, + conv2d_param, + self.padding_algorithm, + self.data_format, + ) output = output.astype(self.dtype) self.inputs = { 'Input': OpTest.np_dtype_to_fluid_dtype(input), - 'Filter': OpTest.np_dtype_to_fluid_dtype(filter) + 'Filter': OpTest.np_dtype_to_fluid_dtype(filter), } self.attrs = { @@ -266,7 +280,7 @@ class TestDepthwiseConvNPU_Padding(OpTest): 'padding_algorithm': self.padding_algorithm, 'groups': self.groups, 'dilations': self.dilations, - 'data_format': self.data_format + 'data_format': self.data_format, } self.outputs = {'Output': output} @@ -289,40 +303,58 @@ class TestDepthwiseConvNPU_Padding(OpTest): def test_check_grad(self): if self.dtype == np.float16: - self.check_grad_with_place(self.place, {'Input', 'Filter'}, - 'Output', - max_relative_error=1.2) + self.check_grad_with_place( + self.place, + {'Input', 'Filter'}, + 'Output', + max_relative_error=1.2, + ) else: - self.check_grad_with_place(self.place, {'Input', 'Filter'}, - 'Output', - max_relative_error=0.03, - numeric_place=paddle.CPUPlace()) + self.check_grad_with_place( + self.place, + {'Input', 'Filter'}, + 'Output', + max_relative_error=0.03, + numeric_place=paddle.CPUPlace(), + ) def test_check_grad_no_filter(self): if self.dtype == np.float16: - self.check_grad_with_place(self.place, ['Input'], - 'Output', - max_relative_error=0.7, - no_grad_set=set(['Filter'])) + self.check_grad_with_place( + self.place, + ['Input'], + 'Output', + max_relative_error=0.7, + no_grad_set=set(['Filter']), + ) else: - self.check_grad_with_place(self.place, ['Input'], - 'Output', - max_relative_error=0.03, - no_grad_set=set(['Filter']), - numeric_place=paddle.CPUPlace()) + self.check_grad_with_place( + self.place, + ['Input'], + 'Output', + max_relative_error=0.03, + no_grad_set=set(['Filter']), + numeric_place=paddle.CPUPlace(), + ) def test_check_grad_no_input(self): if self.dtype == np.float16: - self.check_grad_with_place(self.place, ['Filter'], - 'Output', - max_relative_error=0.8, - no_grad_set=set(['Input'])) + self.check_grad_with_place( + self.place, + ['Filter'], + 'Output', + max_relative_error=0.8, + no_grad_set=set(['Input']), + ) else: - self.check_grad_with_place(self.place, ['Filter'], - 'Output', - max_relative_error=0.03, - no_grad_set=set(['Input']), - numeric_place=paddle.CPUPlace()) + self.check_grad_with_place( + self.place, + ['Filter'], + 'Output', + max_relative_error=0.03, + no_grad_set=set(['Input']), + numeric_place=paddle.CPUPlace(), + ) def init_data_format(self): self.data_format = "NCHW" @@ -339,7 +371,6 @@ class TestDepthwiseConvNPU_Padding(OpTest): class TestDepthwiseConvNPU2_Padding(TestDepthwiseConvNPU_Padding): - def init_test_case(self): self.pad = [1, 1, 0, 1] self.dilations = [1, 1] @@ -356,7 +387,6 @@ class TestDepthwiseConvNPU2_Padding(TestDepthwiseConvNPU_Padding): class TestDepthwiseConvNPU3_Padding(TestDepthwiseConvNPU_Padding): - def init_test_case(self): self.pad = [1, 1, 0, 1] self.dilations = [1, 1] diff --git a/python/paddle/fluid/tests/unittests/npu/test_conv2d_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_conv2d_op_npu.py index 974607bc2522663e6662e78ed8d9b408b009ac98..d30a68ac9164606cb223ecfc161d4a081831e320 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_conv2d_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_conv2d_op_npu.py @@ -28,9 +28,7 @@ paddle.enable_static() def create_test_channel_last_class(parent): - class TestChannelLastCase(parent): - def init_data_format(self): self.data_format = "NHWC" @@ -44,9 +42,7 @@ def create_test_channel_last_class(parent): def create_test_padding_SAME_class(parent): - class TestPaddingSMAECase(parent): - def init_paddings(self): self.pad = [0, 0] self.padding_algorithm = "SAME" @@ -57,9 +53,7 @@ def create_test_padding_SAME_class(parent): def create_test_padding_VALID_class(parent): - class TestPaddingVALIDCase(parent): - def init_paddings(self): self.pad = [1, 1] self.padding_algorithm = "VALID" @@ -70,9 +64,7 @@ def create_test_padding_VALID_class(parent): def create_test_fp16_class(parent): - class TestFp16Case(parent): - def init_dtype(self): self.dtype = np.float16 @@ -82,7 +74,6 @@ def create_test_fp16_class(parent): class TestConv2DOp(OpTest): - def set_npu(self): self.__class__.use_npu = True @@ -104,22 +95,24 @@ class TestConv2DOp(OpTest): conv2d_param = { 'stride': self.stride, 'pad': self.pad, - 'dilation': self.dilations + 'dilation': self.dilations, } input = np.random.random(self.input_size).astype(self.dtype) filter = np.random.uniform(-1, 1, self.filter_size).astype(self.dtype) - output, _, _, _, _ = conv2d_forward_naive(input, - filter, - self.groups, - conv2d_param, - data_format=self.data_format) + output, _, _, _, _ = conv2d_forward_naive( + input, + filter, + self.groups, + conv2d_param, + data_format=self.data_format, + ) output = output.astype(self.dtype) self.inputs = { 'Input': OpTest.np_dtype_to_fluid_dtype(input), - 'Filter': OpTest.np_dtype_to_fluid_dtype(filter) + 'Filter': OpTest.np_dtype_to_fluid_dtype(filter), } self.attrs = { 'strides': self.stride, @@ -134,24 +127,33 @@ class TestConv2DOp(OpTest): self.check_output_with_place(fluid.NPUPlace(0), atol=1e-2) def test_check_grad(self): - self.check_grad_with_place(fluid.NPUPlace(0), {'Input', 'Filter'}, - 'Output', - max_relative_error=0.03, - numeric_place=paddle.CPUPlace()) + self.check_grad_with_place( + fluid.NPUPlace(0), + {'Input', 'Filter'}, + 'Output', + max_relative_error=0.03, + numeric_place=paddle.CPUPlace(), + ) def test_check_grad_no_filter(self): - self.check_grad_with_place(fluid.NPUPlace(0), ['Input'], - 'Output', - max_relative_error=0.03, - no_grad_set=set(['Filter']), - numeric_place=paddle.CPUPlace()) + self.check_grad_with_place( + fluid.NPUPlace(0), + ['Input'], + 'Output', + max_relative_error=0.03, + no_grad_set=set(['Filter']), + numeric_place=paddle.CPUPlace(), + ) def test_check_grad_no_input(self): - self.check_grad_with_place(fluid.NPUPlace(0), ['Filter'], - 'Output', - max_relative_error=0.03, - no_grad_set=set(['Input']), - numeric_place=paddle.CPUPlace()) + self.check_grad_with_place( + fluid.NPUPlace(0), + ['Filter'], + 'Output', + max_relative_error=0.03, + no_grad_set=set(['Input']), + numeric_place=paddle.CPUPlace(), + ) def init_test_case(self): self.pad = [0, 0] @@ -169,7 +171,6 @@ class TestConv2DOp(OpTest): class TestWithPad(TestConv2DOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -180,7 +181,6 @@ class TestWithPad(TestConv2DOp): class TestWithStride(TestConv2DOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [2, 2] @@ -191,7 +191,6 @@ class TestWithStride(TestConv2DOp): class TestWithGroup(TestConv2DOp): - def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] @@ -203,7 +202,6 @@ class TestWithGroup(TestConv2DOp): class TestWith1x1(TestConv2DOp): - def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] @@ -220,7 +218,6 @@ class TestWith1x1(TestConv2DOp): class TestWithDepthWise5x5(TestConv2DOp): - def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] @@ -234,7 +231,6 @@ class TestWithDepthWise5x5(TestConv2DOp): class TestWithDepthWise7x7(TestConv2DOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [2, 2] @@ -248,7 +244,6 @@ class TestWithDepthWise7x7(TestConv2DOp): class TestWithDilation(TestConv2DOp): - def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] @@ -265,7 +260,6 @@ class TestWithDilation(TestConv2DOp): class TestWithInput1x1Filter1x1(TestConv2DOp): - def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] @@ -279,7 +273,6 @@ class TestWithInput1x1Filter1x1(TestConv2DOp): class TestConv2DOp_v2(OpTest): - def set_npu(self): self.__class__.use_npu = True @@ -301,20 +294,24 @@ class TestConv2DOp_v2(OpTest): conv2d_param = { 'stride': self.stride, 'pad': self.pad, - 'dilation': self.dilations + 'dilation': self.dilations, } input = np.random.random(self.input_size).astype(self.dtype) filter = np.random.uniform(-1, 1, self.filter_size).astype(self.dtype) - output, _, _, _, _ = conv2d_forward_naive(input, filter, self.groups, - conv2d_param, - self.padding_algorithm, - self.data_format) + output, _, _, _, _ = conv2d_forward_naive( + input, + filter, + self.groups, + conv2d_param, + self.padding_algorithm, + self.data_format, + ) output = output.astype(self.dtype) self.inputs = { 'Input': OpTest.np_dtype_to_fluid_dtype(input), - 'Filter': OpTest.np_dtype_to_fluid_dtype(filter) + 'Filter': OpTest.np_dtype_to_fluid_dtype(filter), } self.attrs = { 'strides': self.stride, @@ -331,39 +328,57 @@ class TestConv2DOp_v2(OpTest): def test_check_grad(self): if self.dtype == np.float16: - self.check_grad_with_place(paddle.NPUPlace(0), {'Input', 'Filter'}, - 'Output', - max_relative_error=1.1) + self.check_grad_with_place( + paddle.NPUPlace(0), + {'Input', 'Filter'}, + 'Output', + max_relative_error=1.1, + ) else: - self.check_grad_with_place(paddle.NPUPlace(0), {'Input', 'Filter'}, - 'Output', - max_relative_error=0.02, - numeric_place=paddle.CPUPlace()) + self.check_grad_with_place( + paddle.NPUPlace(0), + {'Input', 'Filter'}, + 'Output', + max_relative_error=0.02, + numeric_place=paddle.CPUPlace(), + ) def test_check_grad_no_filter(self): if self.dtype == np.float16: - self.check_grad_with_place(paddle.NPUPlace(0), ['Input'], - 'Output', - max_relative_error=0.99, - no_grad_set=set(['Filter'])) + self.check_grad_with_place( + paddle.NPUPlace(0), + ['Input'], + 'Output', + max_relative_error=0.99, + no_grad_set=set(['Filter']), + ) else: - self.check_grad_with_place(paddle.NPUPlace(0), ['Input'], - 'Output', - max_relative_error=0.02, - no_grad_set=set(['Filter']), - numeric_place=paddle.CPUPlace()) + self.check_grad_with_place( + paddle.NPUPlace(0), + ['Input'], + 'Output', + max_relative_error=0.02, + no_grad_set=set(['Filter']), + numeric_place=paddle.CPUPlace(), + ) def test_check_grad_no_input(self): if self.dtype == np.float16: - self.check_grad_with_place(paddle.NPUPlace(0), ['Filter'], - 'Output', - max_relative_error=0.99, - no_grad_set=set(['Input'])) + self.check_grad_with_place( + paddle.NPUPlace(0), + ['Filter'], + 'Output', + max_relative_error=0.99, + no_grad_set=set(['Input']), + ) else: - self.check_grad_with_place(paddle.NPUPlace(0), ['Filter'], - 'Output', - no_grad_set=set(['Input']), - numeric_place=paddle.CPUPlace()) + self.check_grad_with_place( + paddle.NPUPlace(0), + ['Filter'], + 'Output', + no_grad_set=set(['Input']), + numeric_place=paddle.CPUPlace(), + ) def init_test_case(self): self.pad = [0, 0] @@ -394,14 +409,12 @@ class TestConv2DOp_v2(OpTest): class TestConv2DOp_AsyPadding(TestConv2DOp_v2): - def init_paddings(self): self.pad = [0, 0, 1, 2] self.padding_algorithm = "EXPLICIT" class TestWithPad_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW @@ -415,7 +428,6 @@ class TestWithPad_AsyPadding(TestConv2DOp_v2): class TestWithStride_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.stride = [2, 2] self.input_size = [2, 3, 6, 6] # NCHW @@ -429,7 +441,6 @@ class TestWithStride_AsyPadding(TestConv2DOp_v2): class TestWithGroup_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.pad = [0, 0] self.stride = [1, 2] @@ -441,7 +452,6 @@ class TestWithGroup_AsyPadding(TestConv2DOp_v2): class TestWith1x1_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW @@ -458,7 +468,6 @@ class TestWith1x1_AsyPadding(TestConv2DOp_v2): class TestWithDepthWise3x3_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.stride = [1, 1] self.input_size = [3, 4, 10, 10] # NCHW @@ -478,7 +487,6 @@ class TestWithDepthWise3x3_AsyPadding(TestConv2DOp_v2): class TestWithDepthWise5x5_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.stride = [1, 1] self.input_size = [2, 4, 10, 10] # NCHW @@ -495,7 +503,6 @@ class TestWithDepthWise5x5_AsyPadding(TestConv2DOp_v2): class TestWithDepthWise7x7_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.stride = [2, 2] self.input_size = [2, 8, 10, 10] # NCHW @@ -512,7 +519,6 @@ class TestWithDepthWise7x7_AsyPadding(TestConv2DOp_v2): class TestWithDilation_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.stride = [1, 1] self.input_size = [2, 3, 10, 10] # NCHW @@ -532,7 +538,6 @@ class TestWithDilation_AsyPadding(TestConv2DOp_v2): class TestWithInput1x1Filter1x1_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.stride = [1, 1] self.input_size = [100, 1, 1, 1] # NCHW diff --git a/python/paddle/fluid/tests/unittests/npu/test_conv2d_transpose_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_conv2d_transpose_op_npu.py index ff66e35b0c68c345d921785e8bd2a5496644b089..d27c98b270bfb5bf59f425bd0bfeb5d853acacb0 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_conv2d_transpose_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_conv2d_transpose_op_npu.py @@ -29,7 +29,6 @@ paddle.enable_static() class TestConv2DTransposeOp(OpTest): - def set_npu(self): self.__class__.use_npu = True self.place = paddle.NPUPlace(0) @@ -62,15 +61,16 @@ class TestConv2DTransposeOp(OpTest): 'use_cudnn': False, 'is_test': False, 'use_mkldnn': False, - 'data_format': self.data_format + 'data_format': self.data_format, } if self.output_size is not None: self.attrs['output_size'] = self.output_size if len(self.output_padding) > 0: self.attrs['output_padding'] = self.output_padding - output = conv2dtranspose_forward_naive(input_, filter_, - self.attrs).astype(self.dtype) + output = conv2dtranspose_forward_naive( + input_, filter_, self.attrs + ).astype(self.dtype) self.outputs = {'Output': output} @@ -79,26 +79,34 @@ class TestConv2DTransposeOp(OpTest): def test_check_grad_no_input(self): if self.need_check_grad: - self.check_grad_with_place(self.place, ['Filter'], - 'Output', - no_grad_set=set(['Input']), - numeric_place=paddle.CPUPlace()) + self.check_grad_with_place( + self.place, + ['Filter'], + 'Output', + no_grad_set=set(['Input']), + numeric_place=paddle.CPUPlace(), + ) def test_check_grad_no_filter(self): if self.need_check_grad: - self.check_grad_with_place(self.place, ['Input'], - 'Output', - no_grad_set=set(['Filter']), - max_relative_error=0.006, - numeric_place=paddle.CPUPlace()) + self.check_grad_with_place( + self.place, + ['Input'], + 'Output', + no_grad_set=set(['Filter']), + max_relative_error=0.006, + numeric_place=paddle.CPUPlace(), + ) def test_check_grad(self): if self.need_check_grad: - self.check_grad_with_place(self.place, - set(['Input', 'Filter']), - 'Output', - max_relative_error=0.02, - numeric_place=paddle.CPUPlace()) + self.check_grad_with_place( + self.place, + set(['Input', 'Filter']), + 'Output', + max_relative_error=0.02, + numeric_place=paddle.CPUPlace(), + ) def init_test_case(self): self.pad = [0, 0] @@ -117,7 +125,6 @@ class TestConv2DTransposeOp(OpTest): class TestWithSymmetricPad(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -129,14 +136,12 @@ class TestWithSymmetricPad(TestConv2DTransposeOp): class TestWithSymmetricPad_FP16(TestWithSymmetricPad): - def init_dtype(self): self.dtype = np.float16 self.need_check_grad = False class TestWithAsymmetricPad(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 0, 1, 2] self.stride = [1, 1] @@ -148,14 +153,12 @@ class TestWithAsymmetricPad(TestConv2DTransposeOp): class TestWithAsymmetricPad_FP16(TestWithAsymmetricPad): - def init_dtype(self): self.dtype = np.float16 self.need_check_grad = False class TestWithSAMEPad(TestConv2DTransposeOp): - def init_test_case(self): self.stride = [2, 1] self.dilations = [1, 2] @@ -167,14 +170,12 @@ class TestWithSAMEPad(TestConv2DTransposeOp): class TestWithSAMEPad_FP16(TestWithSAMEPad): - def init_dtype(self): self.dtype = np.float16 self.need_check_grad = False class TestWithVALIDPad(TestConv2DTransposeOp): - def init_test_case(self): self.stride = [1, 1] self.dilations = [1, 1] @@ -186,14 +187,12 @@ class TestWithVALIDPad(TestConv2DTransposeOp): class TestWithVALIDPad_FP16(TestWithVALIDPad): - def init_dtype(self): self.dtype = np.float16 self.need_check_grad = False class TestWithGroups(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -205,14 +204,12 @@ class TestWithGroups(TestConv2DTransposeOp): class TestWithGroups_FP16(TestWithGroups): - def init_dtype(self): self.dtype = np.float16 self.need_check_grad = False class TestWithStride(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [2, 2] @@ -224,14 +221,12 @@ class TestWithStride(TestConv2DTransposeOp): class TestWithStride_FP16(TestWithStride): - def init_dtype(self): self.dtype = np.float16 self.need_check_grad = False class TestWithDilation(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -243,14 +238,12 @@ class TestWithDilation(TestConv2DTransposeOp): class TestWithDilation_FP16(TestWithDilation): - def init_dtype(self): self.dtype = np.float16 self.need_check_grad = False class TestWithEvenUpsample(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [2, 2] self.stride = [2, 2] @@ -263,14 +256,12 @@ class TestWithEvenUpsample(TestConv2DTransposeOp): class TestWithEvenUpsample_FP16(TestWithEvenUpsample): - def init_dtype(self): self.dtype = np.float16 self.need_check_grad = False class TestWithEvenUpsampleOutputPadding(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [2, 2] self.stride = [2, 2] @@ -283,14 +274,12 @@ class TestWithEvenUpsampleOutputPadding(TestConv2DTransposeOp): class TestWithEvenUpsampleOutputPadding_FP16(TestWithEvenUpsampleOutputPadding): - def init_dtype(self): self.dtype = np.float16 self.need_check_grad = False class Test_NHWC(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] @@ -303,14 +292,12 @@ class Test_NHWC(TestConv2DTransposeOp): class Test_NHWC_FP16(Test_NHWC): - def init_dtype(self): self.dtype = np.float16 self.need_check_grad = False class TestWithSymmetricPad_NHWC(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -323,14 +310,12 @@ class TestWithSymmetricPad_NHWC(TestConv2DTransposeOp): class TestWithSymmetricPad_NHWC_FP16(TestWithSymmetricPad_NHWC): - def init_dtype(self): self.dtype = np.float16 self.need_check_grad = False class TestWithAsymmetricPad_NHWC(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 0, 1, 2] self.stride = [1, 1] @@ -343,14 +328,12 @@ class TestWithAsymmetricPad_NHWC(TestConv2DTransposeOp): class TestWithAsymmetricPad_NHWC_FP16(TestWithAsymmetricPad_NHWC): - def init_dtype(self): self.dtype = np.float16 self.need_check_grad = False class TestWithGroups_NHWC(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -363,14 +346,12 @@ class TestWithGroups_NHWC(TestConv2DTransposeOp): class TestWithGroups_NHWC_FP16(TestWithGroups_NHWC): - def init_dtype(self): self.dtype = np.float16 self.need_check_grad = False class TestWithStride_NHWC(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [2, 2] @@ -383,14 +364,12 @@ class TestWithStride_NHWC(TestConv2DTransposeOp): class TestWithStride_NHWC_FP16(TestWithStride_NHWC): - def init_dtype(self): self.dtype = np.float16 self.need_check_grad = False class TestWithDilation_NHWC(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -403,14 +382,12 @@ class TestWithDilation_NHWC(TestConv2DTransposeOp): class TestWithDilation_NHWC_FP16(TestWithDilation_NHWC): - def init_dtype(self): self.dtype = np.float16 self.need_check_grad = False class TestWithEvenUpsample_NHWC(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [2, 2] self.stride = [2, 2] @@ -424,14 +401,12 @@ class TestWithEvenUpsample_NHWC(TestConv2DTransposeOp): class TestWithEvenUpsample_NHWC_FP16(TestWithEvenUpsample_NHWC): - def init_dtype(self): self.dtype = np.float16 self.need_check_grad = False class TestWithEvenUpsample_NHWC_output_padding(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [2, 2] self.stride = [2, 2] @@ -445,64 +420,75 @@ class TestWithEvenUpsample_NHWC_output_padding(TestConv2DTransposeOp): class TestWithEvenUpsample_NHWC_output_padding_FP16( - TestWithEvenUpsample_NHWC_output_padding): - + TestWithEvenUpsample_NHWC_output_padding +): def init_dtype(self): self.dtype = np.float16 self.need_check_grad = False class TestConv2DTransposeAPI(unittest.TestCase): - def test_case1(self): - data1 = fluid.layers.data(name='data1', - shape=[3, 5, 5], - dtype='float32') - data2 = fluid.layers.data(name='data2', - shape=[5, 5, 3], - dtype='float32') - out1 = fluid.layers.conv2d_transpose(input=data1, - groups=1, - num_filters=6, - filter_size=3, - data_format='NCHW') - out2 = fluid.layers.conv2d_transpose(input=data2, - groups=1, - num_filters=6, - filter_size=3, - data_format='NHWC') - out3 = fluid.layers.conv2d_transpose(input=data1, - groups=1, - num_filters=6, - filter_size=3, - padding=[[0, 0], [1, 1], [1, 1], - [0, 0]], - data_format='NHWC') - out4 = fluid.layers.conv2d_transpose(input=data1, - groups=3, - num_filters=6, - filter_size=3, - padding=[[0, 0], [0, 0], [2, 1], - [0, 0]], - data_format='NCHW') - out5 = fluid.layers.conv2d_transpose(input=data2, - groups=1, - num_filters=6, - filter_size=3, - padding='SAME', - data_format='NCHW') - out6 = fluid.layers.conv2d_transpose(input=data1, - groups=1, - num_filters=6, - filter_size=3, - padding='VALID', - data_format='NHWC') - out7 = fluid.layers.conv2d_transpose(input=data1, - groups=1, - num_filters=6, - output_size=[7, 7], - padding=[0, 0], - data_format='NHWC') + data1 = fluid.layers.data( + name='data1', shape=[3, 5, 5], dtype='float32' + ) + data2 = fluid.layers.data( + name='data2', shape=[5, 5, 3], dtype='float32' + ) + out1 = fluid.layers.conv2d_transpose( + input=data1, + groups=1, + num_filters=6, + filter_size=3, + data_format='NCHW', + ) + out2 = fluid.layers.conv2d_transpose( + input=data2, + groups=1, + num_filters=6, + filter_size=3, + data_format='NHWC', + ) + out3 = fluid.layers.conv2d_transpose( + input=data1, + groups=1, + num_filters=6, + filter_size=3, + padding=[[0, 0], [1, 1], [1, 1], [0, 0]], + data_format='NHWC', + ) + out4 = fluid.layers.conv2d_transpose( + input=data1, + groups=3, + num_filters=6, + filter_size=3, + padding=[[0, 0], [0, 0], [2, 1], [0, 0]], + data_format='NCHW', + ) + out5 = fluid.layers.conv2d_transpose( + input=data2, + groups=1, + num_filters=6, + filter_size=3, + padding='SAME', + data_format='NCHW', + ) + out6 = fluid.layers.conv2d_transpose( + input=data1, + groups=1, + num_filters=6, + filter_size=3, + padding='VALID', + data_format='NHWC', + ) + out7 = fluid.layers.conv2d_transpose( + input=data1, + groups=1, + num_filters=6, + output_size=[7, 7], + padding=[0, 0], + data_format='NHWC', + ) data1_np = np.random.random((2, 3, 5, 5)).astype("float32") data2_np = np.random.random((2, 5, 5, 3)).astype("float32") @@ -510,13 +496,12 @@ class TestConv2DTransposeAPI(unittest.TestCase): place = core.NPUPlace(0) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - results = exe.run(fluid.default_main_program(), - feed={ - "data1": data1_np, - "data2": data2_np - }, - fetch_list=[out1, out2, out3, out4, out5, out6, out7], - return_numpy=True) + results = exe.run( + fluid.default_main_program(), + feed={"data1": data1_np, "data2": data2_np}, + fetch_list=[out1, out2, out3, out4, out5, out6, out7], + return_numpy=True, + ) self.assertIsNotNone(results[0]) self.assertIsNotNone(results[1]) self.assertIsNotNone(results[2]) @@ -527,10 +512,9 @@ class TestConv2DTransposeAPI(unittest.TestCase): class TestConv2DTransposeRepr(unittest.TestCase): - def test_case(self): paddle.disable_static(paddle.NPUPlace(0)) - x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.) + x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1.0, max=1.0) conv = nn.Conv2DTranspose(4, 6, (3, 3), output_padding=1, stride=2) print(conv) y_var = conv(x_var) diff --git a/python/paddle/fluid/tests/unittests/npu/test_conv3d_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_conv3d_op_npu.py index fcbfde79e3f4346f2baca7f206b1607bc381751a..81af05b156c86aba47d0a497707e2d36edcde32d 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_conv3d_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_conv3d_op_npu.py @@ -29,9 +29,7 @@ paddle.enable_static() def create_test_padding_SAME_class(parent): - class TestPaddingSMAECase(parent): - def init_paddings(self): self.pad = [0, 0, 0] self.padding_algorithm = "SAME" @@ -42,9 +40,7 @@ def create_test_padding_SAME_class(parent): def create_test_padding_VALID_class(parent): - class TestPaddingVALIDCase(parent): - def init_paddings(self): self.pad = [1, 1, 1] self.padding_algorithm = "VALID" @@ -55,9 +51,7 @@ def create_test_padding_VALID_class(parent): def create_test_channel_last_class(parent): - class TestChannelLastCase(parent): - def init_data_format(self): self.data_format = "NDHWC" @@ -71,9 +65,7 @@ def create_test_channel_last_class(parent): def create_test_fp16_class(parent): - class TestFp16Case(parent): - def init_dtype(self): self.dtype = np.float16 @@ -83,7 +75,6 @@ def create_test_fp16_class(parent): class TestConv3DOp(OpTest): - def setUp(self): self.op_type = "conv3d" self.set_npu() @@ -96,7 +87,7 @@ class TestConv3DOp(OpTest): conv3d_param = { 'stride': self.stride, 'pad': self.pad, - 'dilations': self.dilations + 'dilations': self.dilations, } input = np.random.random(self.input_size).astype(self.dtype) @@ -110,14 +101,14 @@ class TestConv3DOp(OpTest): self.inputs = { 'Input': OpTest.np_dtype_to_fluid_dtype(input), - 'Filter': OpTest.np_dtype_to_fluid_dtype(filter) + 'Filter': OpTest.np_dtype_to_fluid_dtype(filter), } self.attrs = { 'strides': self.stride, 'paddings': self.pad, 'groups': self.groups, 'dilations': self.dilations, - 'data_format': self.data_format + 'data_format': self.data_format, } self.outputs = {'Output': output} @@ -128,30 +119,39 @@ class TestConv3DOp(OpTest): if self.dtype == np.float16: return - self.check_grad_with_place(self.place, {'Input', 'Filter'}, - 'Output', - max_relative_error=0.03, - numeric_place=paddle.CPUPlace()) + self.check_grad_with_place( + self.place, + {'Input', 'Filter'}, + 'Output', + max_relative_error=0.03, + numeric_place=paddle.CPUPlace(), + ) def test_check_grad_no_filter(self): if self.dtype == np.float16: return - self.check_grad_with_place(self.place, ['Input'], - 'Output', - max_relative_error=0.03, - no_grad_set=set(['Filter']), - numeric_place=paddle.CPUPlace()) + self.check_grad_with_place( + self.place, + ['Input'], + 'Output', + max_relative_error=0.03, + no_grad_set=set(['Filter']), + numeric_place=paddle.CPUPlace(), + ) def test_check_grad_no_input(self): if self.dtype == np.float16: return - self.check_grad_with_place(self.place, ['Filter'], - 'Output', - max_relative_error=0.03, - no_grad_set=set(['Input']), - numeric_place=paddle.CPUPlace()) + self.check_grad_with_place( + self.place, + ['Filter'], + 'Output', + max_relative_error=0.03, + no_grad_set=set(['Input']), + numeric_place=paddle.CPUPlace(), + ) def set_npu(self): self.__class__.use_npu = True @@ -179,7 +179,6 @@ class TestConv3DOp(OpTest): class TestCase1(TestConv3DOp): - def init_test_case(self): self.pad = [1, 1, 1] self.stride = [1, 1, 1] @@ -193,7 +192,6 @@ class TestCase1(TestConv3DOp): class TestConv3DOp_2(OpTest): - def setUp(self): self.op_type = "conv3d" self.set_npu() @@ -209,18 +207,23 @@ class TestConv3DOp_2(OpTest): conv3d_param = { 'stride': self.stride, 'pad': self.pad, - 'dilations': self.dilations + 'dilations': self.dilations, } input = np.random.random(self.input_size).astype(self.dtype) filter = np.random.random(self.filter_size).astype(self.dtype) - output = conv3d_forward_naive(input, filter, self.groups, conv3d_param, - self.padding_algorithm, - self.data_format).astype(self.dtype) + output = conv3d_forward_naive( + input, + filter, + self.groups, + conv3d_param, + self.padding_algorithm, + self.data_format, + ).astype(self.dtype) self.inputs = { 'Input': OpTest.np_dtype_to_fluid_dtype(input), - 'Filter': OpTest.np_dtype_to_fluid_dtype(filter) + 'Filter': OpTest.np_dtype_to_fluid_dtype(filter), } self.attrs = { 'strides': self.stride, @@ -228,7 +231,7 @@ class TestConv3DOp_2(OpTest): 'padding_algorithm': self.padding_algorithm, 'groups': self.groups, 'dilations': self.dilations, - 'data_format': self.data_format + 'data_format': self.data_format, } self.outputs = {'Output': output} @@ -239,30 +242,39 @@ class TestConv3DOp_2(OpTest): if self.dtype == np.float16: return - self.check_grad_with_place(self.place, {'Input', 'Filter'}, - 'Output', - max_relative_error=0.03, - numeric_place=paddle.CPUPlace()) + self.check_grad_with_place( + self.place, + {'Input', 'Filter'}, + 'Output', + max_relative_error=0.03, + numeric_place=paddle.CPUPlace(), + ) def test_check_grad_no_filter(self): if self.dtype == np.float16: return - self.check_grad_with_place(self.place, ['Input'], - 'Output', - max_relative_error=0.03, - no_grad_set=set(['Filter']), - numeric_place=paddle.CPUPlace()) + self.check_grad_with_place( + self.place, + ['Input'], + 'Output', + max_relative_error=0.03, + no_grad_set=set(['Filter']), + numeric_place=paddle.CPUPlace(), + ) def test_check_grad_no_input(self): if self.dtype == np.float16: return - self.check_grad_with_place(self.place, ['Filter'], - 'Output', - max_relative_error=0.03, - no_grad_set=set(['Input']), - numeric_place=paddle.CPUPlace()) + self.check_grad_with_place( + self.place, + ['Filter'], + 'Output', + max_relative_error=0.03, + no_grad_set=set(['Input']), + numeric_place=paddle.CPUPlace(), + ) def set_npu(self): self.__class__.use_npu = True @@ -296,7 +308,6 @@ class TestConv3DOp_2(OpTest): class TestConv3DOp_AsyPadding(TestConv3DOp_2): - def init_test_case(self): self.stride = [1, 1, 2] self.input_size = [2, 3, 4, 4, 4] # NCDHW @@ -310,7 +321,6 @@ class TestConv3DOp_AsyPadding(TestConv3DOp_2): class TestConv3DOp_DiffDataInDiffDim(TestConv3DOp_2): - def init_test_case(self): self.stride = [1, 1, 2] self.input_size = [2, 3, 4, 5, 5] # NCDHW @@ -324,7 +334,6 @@ class TestConv3DOp_DiffDataInDiffDim(TestConv3DOp_2): class TestCase1_AsyPadding(TestConv3DOp_2): - def init_test_case(self): self.stride = [1, 1, 1] self.input_size = [2, 3, 4, 4, 4] # NCDHW @@ -339,196 +348,228 @@ class TestCase1_AsyPadding(TestConv3DOp_2): # --------- test python API --------------- class TestConv3DAPI(unittest.TestCase): - def test_api(self): - input_NDHWC = fluid.layers.data(name="input_NDHWC", - shape=[2, 5, 5, 5, 3], - append_batch_size=False, - dtype="float32") - - input_NCDHW = fluid.layers.data(name="input_NCDHW", - shape=[2, 3, 5, 5, 3], - append_batch_size=False, - dtype="float32") - - fluid.layers.conv3d(input=input_NDHWC, - num_filters=3, - filter_size=[3, 3, 3], - stride=[1, 1, 1], - padding=0, - dilation=[1, 1, 1], - groups=1, - data_format="NCDHW") - - fluid.layers.conv3d(input=input_NCDHW, - num_filters=3, - filter_size=[3, 3, 3], - stride=[1, 1, 1], - padding=[1, 2, 1, 0, 1, 0], - dilation=[1, 1, 1], - groups=1, - data_format="NCDHW") - - fluid.layers.conv3d(input=input_NCDHW, - num_filters=3, - filter_size=[3, 3, 3], - stride=[1, 1, 1], - padding=[[0, 0], [0, 0], [1, 1], [1, 1], [1, 1]], - dilation=[1, 1, 1], - groups=1, - data_format="NCDHW") - - fluid.layers.conv3d(input=input_NDHWC, - num_filters=3, - filter_size=[3, 3, 3], - stride=[1, 1, 1], - padding=[[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]], - dilation=[1, 1, 1], - groups=1, - data_format="NDHWC") - - fluid.layers.conv3d(input=input_NCDHW, - num_filters=3, - filter_size=[3, 3, 3], - stride=[1, 1, 1], - padding="SAME", - dilation=[1, 1, 1], - groups=1, - data_format="NCDHW") - - fluid.layers.conv3d(input=input_NCDHW, - num_filters=3, - filter_size=[3, 3, 3], - stride=[1, 1, 1], - padding="VALID", - dilation=[1, 1, 1], - groups=1, - data_format="NCDHW") + input_NDHWC = fluid.layers.data( + name="input_NDHWC", + shape=[2, 5, 5, 5, 3], + append_batch_size=False, + dtype="float32", + ) + + input_NCDHW = fluid.layers.data( + name="input_NCDHW", + shape=[2, 3, 5, 5, 3], + append_batch_size=False, + dtype="float32", + ) + + fluid.layers.conv3d( + input=input_NDHWC, + num_filters=3, + filter_size=[3, 3, 3], + stride=[1, 1, 1], + padding=0, + dilation=[1, 1, 1], + groups=1, + data_format="NCDHW", + ) + + fluid.layers.conv3d( + input=input_NCDHW, + num_filters=3, + filter_size=[3, 3, 3], + stride=[1, 1, 1], + padding=[1, 2, 1, 0, 1, 0], + dilation=[1, 1, 1], + groups=1, + data_format="NCDHW", + ) + + fluid.layers.conv3d( + input=input_NCDHW, + num_filters=3, + filter_size=[3, 3, 3], + stride=[1, 1, 1], + padding=[[0, 0], [0, 0], [1, 1], [1, 1], [1, 1]], + dilation=[1, 1, 1], + groups=1, + data_format="NCDHW", + ) + + fluid.layers.conv3d( + input=input_NDHWC, + num_filters=3, + filter_size=[3, 3, 3], + stride=[1, 1, 1], + padding=[[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]], + dilation=[1, 1, 1], + groups=1, + data_format="NDHWC", + ) + + fluid.layers.conv3d( + input=input_NCDHW, + num_filters=3, + filter_size=[3, 3, 3], + stride=[1, 1, 1], + padding="SAME", + dilation=[1, 1, 1], + groups=1, + data_format="NCDHW", + ) + + fluid.layers.conv3d( + input=input_NCDHW, + num_filters=3, + filter_size=[3, 3, 3], + stride=[1, 1, 1], + padding="VALID", + dilation=[1, 1, 1], + groups=1, + data_format="NCDHW", + ) class TestConv3DAPI_Error(unittest.TestCase): - def test_api(self): - input = fluid.layers.data(name="input", - shape=[2, 5, 5, 5, 4], - append_batch_size=False, - dtype="float32") + input = fluid.layers.data( + name="input", + shape=[2, 5, 5, 5, 4], + append_batch_size=False, + dtype="float32", + ) # ValueError: cudnn def run_1(): - fluid.layers.conv3d(input=input, - num_filters=3, - filter_size=3, - stride=1, - padding=0, - dilation=1, - groups=1, - use_cudnn=[0], - data_format="NCDHW") + fluid.layers.conv3d( + input=input, + num_filters=3, + filter_size=3, + stride=1, + padding=0, + dilation=1, + groups=1, + use_cudnn=[0], + data_format="NCDHW", + ) self.assertRaises(ValueError, run_1) # ValueError: data_format def run_2(): - fluid.layers.conv3d(input=input, - num_filters=3, - filter_size=[3, 3, 3], - stride=[1, 1, 1], - padding=0, - dilation=[1, 1, 1], - groups=1, - use_cudnn=False, - data_format="NCHWC") + fluid.layers.conv3d( + input=input, + num_filters=3, + filter_size=[3, 3, 3], + stride=[1, 1, 1], + padding=0, + dilation=[1, 1, 1], + groups=1, + use_cudnn=False, + data_format="NCHWC", + ) self.assertRaises(ValueError, run_2) # ValueError: padding def run_3(): - fluid.layers.conv3d(input=input, - num_filters=3, - filter_size=3, - stride=1, - padding="SAMEE", - dilation=1, - groups=1, - use_cudnn=False, - data_format="NCDHW") + fluid.layers.conv3d( + input=input, + num_filters=3, + filter_size=3, + stride=1, + padding="SAMEE", + dilation=1, + groups=1, + use_cudnn=False, + data_format="NCDHW", + ) self.assertRaises(ValueError, run_3) def run_4(): - fluid.layers.conv3d(input=input, - num_filters=3, - filter_size=3, - stride=1, - padding=[[0, 1], [0, 0], [0, 1], [0, 1], [0, - 1]], - dilation=1, - groups=1, - use_cudnn=False, - data_format="NCDHW") + fluid.layers.conv3d( + input=input, + num_filters=3, + filter_size=3, + stride=1, + padding=[[0, 1], [0, 0], [0, 1], [0, 1], [0, 1]], + dilation=1, + groups=1, + use_cudnn=False, + data_format="NCDHW", + ) self.assertRaises(ValueError, run_4) def run_5(): - fluid.layers.conv3d(input=input, - num_filters=3, - filter_size=0, - stride=0, - padding=[[0, 1], [0, 1], [0, 1], [0, 1], [0, - 1]], - dilation=1, - groups=1, - use_cudnn=False, - data_format="NDHWC") + fluid.layers.conv3d( + input=input, + num_filters=3, + filter_size=0, + stride=0, + padding=[[0, 1], [0, 1], [0, 1], [0, 1], [0, 1]], + dilation=1, + groups=1, + use_cudnn=False, + data_format="NDHWC", + ) self.assertRaises(ValueError, run_5) # ValueError: channel dimmention - x = fluid.layers.data(name="x", - shape=[2, 5, 5, 5, -1], - append_batch_size=False, - dtype="float32") + x = fluid.layers.data( + name="x", + shape=[2, 5, 5, 5, -1], + append_batch_size=False, + dtype="float32", + ) def run_6(): - fluid.layers.conv3d(input=x, - num_filters=3, - filter_size=3, - stride=1, - padding=0, - dilation=1, - groups=1, - use_cudnn=False, - data_format="NDHWC") + fluid.layers.conv3d( + input=x, + num_filters=3, + filter_size=3, + stride=1, + padding=0, + dilation=1, + groups=1, + use_cudnn=False, + data_format="NDHWC", + ) self.assertRaises(ValueError, run_6) # ValueError: groups def run_7(): - fluid.layers.conv3d(input=input, - num_filters=3, - filter_size=3, - stride=1, - padding=0, - dilation=1, - groups=3, - use_cudnn=False, - data_format="NDHWC") + fluid.layers.conv3d( + input=input, + num_filters=3, + filter_size=3, + stride=1, + padding=0, + dilation=1, + groups=3, + use_cudnn=False, + data_format="NDHWC", + ) self.assertRaises(ValueError, run_7) # ValueError: filter num def run_8(): - fluid.layers.conv3d(input=input, - num_filters=0, - filter_size=0, - stride=0, - padding=0, - dilation=0, - groups=1, - use_cudnn=False, - data_format="NDHWC") + fluid.layers.conv3d( + input=input, + num_filters=0, + filter_size=0, + stride=0, + padding=0, + dilation=0, + groups=1, + use_cudnn=False, + data_format="NDHWC", + ) self.assertRaises(ValueError, run_8) diff --git a/python/paddle/fluid/tests/unittests/npu/test_conv3d_transpose_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_conv3d_transpose_op_npu.py index da6966cf50a1e4b9c674a25db36c51a52f7492df..29113544c3b12f5349f51929b637eedbc512b5c7 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_conv3d_transpose_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_conv3d_transpose_op_npu.py @@ -29,9 +29,10 @@ paddle.enable_static() def conv3dtranspose_forward_naive(input_, filter_, attrs): padding_algorithm = attrs['padding_algorithm'] if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]: - raise ValueError("Unknown Attr(padding_algorithm): '%s'. " - "It can only be 'SAME' or 'VALID'." % - str(padding_algorithm)) + raise ValueError( + "Unknown Attr(padding_algorithm): '%s'. " + "It can only be 'SAME' or 'VALID'." % str(padding_algorithm) + ) if attrs['data_format'] == 'NHWC': input_ = np.transpose(input_, [0, 4, 1, 2, 3]) @@ -42,17 +43,21 @@ def conv3dtranspose_forward_naive(input_, filter_, attrs): out_c = f_out_c * groups sub_in_c = in_c // groups - stride, pad, dilations = attrs['strides'], attrs['paddings'], attrs[ - 'dilations'] + stride, pad, dilations = ( + attrs['strides'], + attrs['paddings'], + attrs['dilations'], + ) def _get_padding_with_SAME(input_shape, kernel_size, kernel_stride): padding = [] - for input_size, filter_size, stride_size in zip(input_shape, - kernel_size, - kernel_stride): + for input_size, filter_size, stride_size in zip( + input_shape, kernel_size, kernel_stride + ): out_size = int((input_size + stride_size - 1) / stride_size) pad_sum = np.max( - ((out_size - 1) * stride_size + filter_size - input_size, 0)) + ((out_size - 1) * stride_size + filter_size - input_size, 0) + ) pad_0 = int(pad_sum / 2) pad_1 = int(pad_sum - pad_0) padding.append(pad_0) @@ -88,34 +93,50 @@ def conv3dtranspose_forward_naive(input_, filter_, attrs): for i in range(in_h): for j in range(in_w): for g in range(groups): - input_masked = input_[n, - g * sub_in_c:(g + 1) * sub_in_c, - d, i, j] # (c) - input_masked = np.reshape(input_masked, - (sub_in_c, 1, 1, 1)) + input_masked = input_[ + n, g * sub_in_c : (g + 1) * sub_in_c, d, i, j + ] # (c) + input_masked = np.reshape( + input_masked, (sub_in_c, 1, 1, 1) + ) input_masked = np.tile(input_masked, (1, f_d, f_h, f_w)) for k in range(f_out_c): - tmp_out = np.sum(input_masked * - filter_[g * sub_in_c:(g + 1) * - sub_in_c, k, :, :, :], - axis=0) + tmp_out = np.sum( + input_masked + * filter_[ + g * sub_in_c : (g + 1) * sub_in_c, + k, + :, + :, + :, + ], + axis=0, + ) d1, d2 = d * stride[0], d * stride[0] + d_bolck_d i1, i2 = i * stride[1], i * stride[1] + d_bolck_h j1, j2 = j * stride[2], j * stride[2] + d_bolck_w - out[n, g * f_out_c + k, d1:d2:dilations[0], - i1:i2:dilations[1], - j1:j2:dilations[2]] += tmp_out - - out = out[:, :, pad_d_0:out_d - pad_d_1, pad_h_0:out_h - pad_h_1, - pad_w_0:out_w - pad_w_1] + out[ + n, + g * f_out_c + k, + d1 : d2 : dilations[0], + i1 : i2 : dilations[1], + j1 : j2 : dilations[2], + ] += tmp_out + + out = out[ + :, + :, + pad_d_0 : out_d - pad_d_1, + pad_h_0 : out_h - pad_h_1, + pad_w_0 : out_w - pad_w_1, + ] if attrs['data_format'] == 'NHWC': out = np.transpose(out, [0, 2, 3, 4, 1]) return out class TestConv3DTransposeOp(OpTest): - def set_npu(self): self.__class__.use_npu = True self.place = paddle.NPUPlace(0) @@ -141,11 +162,12 @@ class TestConv3DTransposeOp(OpTest): 'padding_algorithm': self.padding_algorithm, 'dilations': self.dilations, 'groups': self.groups, - 'data_format': self.data_format + 'data_format': self.data_format, } - output = conv3dtranspose_forward_naive(input_, filter_, - self.attrs).astype("float32") + output = conv3dtranspose_forward_naive( + input_, filter_, self.attrs + ).astype("float32") self.outputs = {'Output': output} @@ -166,7 +188,6 @@ class TestConv3DTransposeOp(OpTest): class TestWithSymmetricPad(TestConv3DTransposeOp): - def init_test_case(self): self.check_no_input = True self.pad = [1, 1, 1] @@ -179,7 +200,6 @@ class TestWithSymmetricPad(TestConv3DTransposeOp): class TestWithAsymmetricPad(TestConv3DTransposeOp): - def init_test_case(self): self.pad = [1, 0, 1, 0, 1, 2] self.stride = [1, 1, 1] @@ -191,7 +211,6 @@ class TestWithAsymmetricPad(TestConv3DTransposeOp): class TestWithSAMEPad(TestConv3DTransposeOp): - def init_test_case(self): self.stride = [1, 1, 2] self.dilations = [1, 2, 1] @@ -203,7 +222,6 @@ class TestWithSAMEPad(TestConv3DTransposeOp): class TestWithVALIDPad(TestConv3DTransposeOp): - def init_test_case(self): self.stride = [2, 1, 1] self.dilations = [1, 1, 1] @@ -215,7 +233,6 @@ class TestWithVALIDPad(TestConv3DTransposeOp): class TestWithStride(TestConv3DTransposeOp): - def init_test_case(self): self.check_no_filter = True self.pad = [1, 1, 1] @@ -228,7 +245,6 @@ class TestWithStride(TestConv3DTransposeOp): class TestWithDilation(TestConv3DTransposeOp): - def init_test_case(self): self.pad = [1, 1, 1] self.stride = [1, 1, 1] @@ -240,7 +256,6 @@ class TestWithDilation(TestConv3DTransposeOp): class Test_NHWC(TestConv3DTransposeOp): - def init_test_case(self): self.pad = [0, 0, 0] self.stride = [1, 1, 1] diff --git a/python/paddle/fluid/tests/unittests/npu/test_cos_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_cos_op_npu.py index f3e73b0c65e00be23738350858f3461142ecc1c6..1ab4edef710803db2fdb8b1109b67f34e3f9b07a 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_cos_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_cos_op_npu.py @@ -26,7 +26,6 @@ SEED = 2021 class TestCos(OpTest): - def setUp(self): self.set_npu() self.op_type = "cos" @@ -55,7 +54,6 @@ class TestCos(OpTest): class TestCosFp16(OpTest): - def setUp(self): self.set_npu() self.op_type = "cos" @@ -82,7 +80,6 @@ class TestCosFp16(OpTest): class TestCosNet(unittest.TestCase): - def _test(self, run_npu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -97,9 +94,9 @@ class TestCosNet(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=[32, 32], dtype='float32') b = paddle.static.data(name="b", shape=[32, 32], dtype='float32') - label = paddle.static.data(name="label", - shape=[32, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[32, 1], dtype='int64' + ) c = paddle.multiply(a, b) d = paddle.cos(c) @@ -123,16 +120,17 @@ class TestCosNet(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "b": b_np, "label": label_np}, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res diff --git a/python/paddle/fluid/tests/unittests/npu/test_crop_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_crop_op_npu.py index 446ba19c2a49869ef3bbaf2b9e89eb30f3c5e67b..1ab9f21fdce8b6212e1518b31eb953b37abe6f6e 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_crop_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_crop_op_npu.py @@ -28,7 +28,6 @@ np.random.seed(10) class TestCropOp(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -41,7 +40,7 @@ class TestCropOp(OpTest): if self.crop_by_input: self.inputs = { 'X': np.random.random(self.x_shape).astype(self.dtype), - 'Y': np.random.random(self.crop_shape).astype(self.dtype) + 'Y': np.random.random(self.crop_shape).astype(self.dtype), } else: self.attrs['shape'] = self.crop_shape @@ -74,7 +73,6 @@ class TestCropOp(OpTest): class TestCase1(TestCropOp): - def initTestCase(self): self.x_shape = (16, 8, 32) self.crop_shape = [2, 2, 3] @@ -82,7 +80,6 @@ class TestCase1(TestCropOp): class TestCase2(TestCropOp): - def initTestCase(self): self.x_shape = (15, 8) self.crop_shape = [15, 8] @@ -90,7 +87,6 @@ class TestCase2(TestCropOp): class TestCase3(TestCropOp): - def initTestCase(self): self.x_shape = (4, 10) self.crop_shape = [2, 3] @@ -99,7 +95,6 @@ class TestCase3(TestCropOp): class TestCase4(TestCropOp): - def initTestCase(self): self.x_shape = (10, 9, 14) self.crop_shape = [3, 3, 5] @@ -107,7 +102,6 @@ class TestCase4(TestCropOp): class TestCase5(TestCropOp): - def initTestCase(self): self.x_shape = (10, 9, 14) self.crop_shape = [3, 3, 5] @@ -116,7 +110,6 @@ class TestCase5(TestCropOp): class TestCase6(TestCropOp): - def initTestCase(self): self.x_shape = (10, 9, 14) self.crop_shape = [3, 3, 5] @@ -127,7 +120,6 @@ class TestCase6(TestCropOp): class TestCase7(TestCropOp): - def initTestCase(self): self.x_shape = (10, 9, 14) self.crop_shape = [3, 3, 5] @@ -137,7 +129,6 @@ class TestCase7(TestCropOp): class TestCase8(TestCropOp): - def initTestCase(self): self.x_shape = (10, 9, 14) self.crop_shape = [3, 3, 5] @@ -146,7 +137,6 @@ class TestCase8(TestCropOp): class TestCase9(TestCropOp): - def initTestCase(self): self.x_shape = (10, 9, 14) self.crop_shape = [3, 3, 5] @@ -155,7 +145,6 @@ class TestCase9(TestCropOp): class TestCase10(TestCropOp): - def initTestCase(self): self.x_shape = (10, 9, 14) self.crop_shape = [3, 3, 5] diff --git a/python/paddle/fluid/tests/unittests/npu/test_cumsum_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_cumsum_op_npu.py index 64cfef9ced6516f77be775a131510b8a6f2e3675..4f9badc230aed90df120782b6af50691109b53c3 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_cumsum_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_cumsum_op_npu.py @@ -24,7 +24,6 @@ paddle.enable_static() class TestCumsumOp(unittest.TestCase): - def run_cases(self): data_np = np.arange(12).reshape(3, 4) data = paddle.to_tensor(data_np) @@ -65,11 +64,17 @@ class TestCumsumOp(unittest.TestCase): place = fluid.NPUPlace(0) if use_npu else fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - out = exe.run(feed={'X': data_np}, - fetch_list=[ - y.name, y2.name, y3.name, y4.name, y5.name, - y6.name - ]) + out = exe.run( + feed={'X': data_np}, + fetch_list=[ + y.name, + y2.name, + y3.name, + y4.name, + y5.name, + y6.name, + ], + ) z = np.cumsum(data_np) np.testing.assert_allclose(z, out[0]) @@ -95,7 +100,6 @@ class TestCumsumOp(unittest.TestCase): class TestNPUCumSumOp1(OpTest): - def setUp(self): self.op_type = "cumsum" self.set_npu() @@ -119,18 +123,17 @@ class TestNPUCumSumOp1(OpTest): class TestNPUCumSumOp2(TestNPUCumSumOp1): - def init_testcase(self): self.attrs = {'axis': -1, 'reverse': True} self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.outputs = { - 'Out': np.flip(np.flip(self.inputs['X'], axis=2).cumsum(axis=2), - axis=2) + 'Out': np.flip( + np.flip(self.inputs['X'], axis=2).cumsum(axis=2), axis=2 + ) } class TestNPUCumSumOp3(TestNPUCumSumOp1): - def init_testcase(self): self.attrs = {'axis': 1} self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} @@ -138,7 +141,6 @@ class TestNPUCumSumOp3(TestNPUCumSumOp1): class TestNPUCumSumOp4(TestNPUCumSumOp1): - def init_testcase(self): self.attrs = {'axis': 0} self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} @@ -146,107 +148,115 @@ class TestNPUCumSumOp4(TestNPUCumSumOp1): class TestNPUCumSumOp5(TestNPUCumSumOp1): - def init_testcase(self): self.inputs = {'X': np.random.random((5, 20)).astype(self.dtype)} self.outputs = {'Out': self.inputs['X'].cumsum(axis=1)} class TestNPUCumSumOp7(TestNPUCumSumOp1): - def init_testcase(self): self.inputs = {'X': np.random.random((100)).astype(self.dtype)} self.outputs = {'Out': self.inputs['X'].cumsum(axis=0)} class TestNPUCumSumExclusive1(TestNPUCumSumOp1): - def init_testcase(self): self.attrs = {'axis': 2, "exclusive": True} a = np.random.random((4, 5, 65)).astype(self.dtype) self.inputs = {'X': a} self.outputs = { - 'Out': - np.concatenate((np.zeros( - (4, 5, 1), dtype=self.dtype), a[:, :, :-1].cumsum(axis=2)), - axis=2) + 'Out': np.concatenate( + ( + np.zeros((4, 5, 1), dtype=self.dtype), + a[:, :, :-1].cumsum(axis=2), + ), + axis=2, + ) } class TestNPUCumSumExclusive2(TestNPUCumSumOp1): - def init_testcase(self): self.attrs = {'axis': 2, "exclusive": True} a = np.random.random((1, 1, 888)).astype(self.dtype) self.inputs = {'X': a} self.outputs = { - 'Out': - np.concatenate((np.zeros( - (1, 1, 1), dtype=self.dtype), a[:, :, :-1].cumsum(axis=2)), - axis=2) + 'Out': np.concatenate( + ( + np.zeros((1, 1, 1), dtype=self.dtype), + a[:, :, :-1].cumsum(axis=2), + ), + axis=2, + ) } class TestNPUCumSumExclusive3(TestNPUCumSumOp1): - def init_testcase(self): self.attrs = {'axis': 2, "exclusive": True} a = np.random.random((4, 5, 888)).astype(self.dtype) self.inputs = {'X': a} self.outputs = { - 'Out': - np.concatenate((np.zeros( - (4, 5, 1), dtype=self.dtype), a[:, :, :-1].cumsum(axis=2)), - axis=2) + 'Out': np.concatenate( + ( + np.zeros((4, 5, 1), dtype=self.dtype), + a[:, :, :-1].cumsum(axis=2), + ), + axis=2, + ) } class TestNPUCumSumExclusive4(TestNPUCumSumOp1): - def init_testcase(self): self.attrs = {'axis': 2, "exclusive": True} a = np.random.random((1, 1, 3049)).astype(self.dtype) self.inputs = {'X': a} self.outputs = { - 'Out': - np.concatenate((np.zeros( - (1, 1, 1), dtype=self.dtype), a[:, :, :-1].cumsum(axis=2)), - axis=2) + 'Out': np.concatenate( + ( + np.zeros((1, 1, 1), dtype=self.dtype), + a[:, :, :-1].cumsum(axis=2), + ), + axis=2, + ) } class TestNPUCumSumExclusive5(TestNPUCumSumOp1): - def init_testcase(self): self.attrs = {'axis': 2, "exclusive": True} a = np.random.random((4, 5, 3096)).astype(self.dtype) self.inputs = {'X': a} self.outputs = { - 'Out': - np.concatenate((np.zeros( - (4, 5, 1), dtype=self.dtype), a[:, :, :-1].cumsum(axis=2)), - axis=2) + 'Out': np.concatenate( + ( + np.zeros((4, 5, 1), dtype=self.dtype), + a[:, :, :-1].cumsum(axis=2), + ), + axis=2, + ) } class TestNPUCumSumReverseExclusive(TestNPUCumSumOp1): - def init_testcase(self): self.attrs = {'axis': 2, 'reverse': True, "exclusive": True} a = np.random.random((4, 5, 6)).astype(self.dtype) self.inputs = {'X': a} a = np.flip(a, axis=2) self.outputs = { - 'Out': - np.concatenate( - (np.flip(a[:, :, :-1].cumsum(axis=2), - axis=2), np.zeros((4, 5, 1), dtype=self.dtype)), - axis=2) + 'Out': np.concatenate( + ( + np.flip(a[:, :, :-1].cumsum(axis=2), axis=2), + np.zeros((4, 5, 1), dtype=self.dtype), + ), + axis=2, + ) } class TestNPUCumSumWithFlatten1(TestNPUCumSumOp1): - def init_testcase(self): self.attrs = {'flatten': True} self.inputs = {'X': np.random.random((5, 6)).astype(self.dtype)} @@ -254,31 +264,28 @@ class TestNPUCumSumWithFlatten1(TestNPUCumSumOp1): class TestNPUCumSumWithFlatten2(TestNPUCumSumOp1): - def init_testcase(self): self.attrs = {'flatten': True} self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.outputs = {'Out': self.inputs['X'].cumsum()} -#----------------Cumsum Int64---------------- +# ----------------Cumsum Int64---------------- class TestNPUCumSumOpInt64(TestNPUCumSumOp1): - def init_testcase(self): self.attrs = {'axis': -1, 'reverse': True} self.inputs = { 'X': np.random.randint(1, 10000, size=(5, 6, 10)).astype(self.dtype) } self.outputs = { - 'Out': np.flip(np.flip(self.inputs['X'], axis=2).cumsum(axis=2), - axis=2) + 'Out': np.flip( + np.flip(self.inputs['X'], axis=2).cumsum(axis=2), axis=2 + ) } def create_test_int64(parent): - class TestCumSumInt64(parent): - def init_dtype(self): self.dtype = np.int64 diff --git a/python/paddle/fluid/tests/unittests/npu/test_density_prior_box_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_density_prior_box_op_npu.py index c3bb0880299d62a9b7ac564b6859be491d614eb7..8c7ea643edc7479b73a8c15024408b4c04632e4a 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_density_prior_box_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_density_prior_box_op_npu.py @@ -27,12 +27,11 @@ np.random.seed(2021) class TestNpuDensityPriorBoxOp(OpTest): - def set_data(self): self.init_test_params() self.init_test_input() self.init_test_output() - #self.init_test_output2() + # self.init_test_output2() self.inputs = {'Input': self.input, 'Image': self.image} self.attrs = { @@ -44,7 +43,7 @@ class TestNpuDensityPriorBoxOp(OpTest): 'densities': self.densities, 'fixed_sizes': self.fixed_sizes, 'fixed_ratios': self.fixed_ratios, - 'flatten_to_2d': self.flatten_to_2d + 'flatten_to_2d': self.flatten_to_2d, } self.outputs = {'Boxes': self.out_boxes, 'Variances': self.out_var} @@ -89,19 +88,20 @@ class TestNpuDensityPriorBoxOp(OpTest): if len(self.fixed_sizes) > 0 and len(self.densities) > 0: for density in self.densities: if len(self.fixed_ratios) > 0: - self.num_priors += len(self.fixed_ratios) * (pow( - density, 2)) + self.num_priors += len(self.fixed_ratios) * ( + pow(density, 2) + ) self.offset = 0.5 self.atol = 1e-5 def init_test_input(self): self.image = np.random.random( - (self.batch_size, self.image_channels, self.image_h, - self.image_w)).astype(self.dtype) + (self.batch_size, self.image_channels, self.image_h, self.image_w) + ).astype(self.dtype) self.input = np.random.random( - (self.batch_size, self.input_channels, self.layer_h, - self.layer_w)).astype(self.dtype) + (self.batch_size, self.input_channels, self.layer_h, self.layer_w) + ).astype(self.dtype) def init_test_output(self): out_dim = (self.layer_h, self.layer_w, self.num_priors, 4) @@ -115,32 +115,56 @@ class TestNpuDensityPriorBoxOp(OpTest): c_x = (w + self.offset) * self.step_w c_y = (h + self.offset) * self.step_h # Generate density prior boxes with fixed size - for density, fixed_size in zip(self.densities, - self.fixed_sizes): - if (len(self.fixed_ratios) > 0): + for density, fixed_size in zip( + self.densities, self.fixed_sizes + ): + if len(self.fixed_ratios) > 0: for ar in self.fixed_ratios: shift = int(step_average / density) box_width_ratio = fixed_size * math.sqrt(ar) box_height_ratio = fixed_size / math.sqrt(ar) for di in range(density): for dj in range(density): - c_x_temp = c_x - step_average / 2.0 + shift / 2.0 + dj * shift - c_y_temp = c_y - step_average / 2.0 + shift / 2.0 + di * shift + c_x_temp = ( + c_x + - step_average / 2.0 + + shift / 2.0 + + dj * shift + ) + c_y_temp = ( + c_y + - step_average / 2.0 + + shift / 2.0 + + di * shift + ) out_boxes[h, w, idx, :] = [ - max((c_x_temp - box_width_ratio / 2.0) / - self.image_w, 0), - max((c_y_temp - box_height_ratio / 2.0) - / self.image_h, 0), - min((c_x_temp + box_width_ratio / 2.0) / - self.image_w, 1), - min((c_y_temp + box_height_ratio / 2.0) - / self.image_h, 1) + max( + (c_x_temp - box_width_ratio / 2.0) + / self.image_w, + 0, + ), + max( + (c_y_temp - box_height_ratio / 2.0) + / self.image_h, + 0, + ), + min( + (c_x_temp + box_width_ratio / 2.0) + / self.image_w, + 1, + ), + min( + (c_y_temp + box_height_ratio / 2.0) + / self.image_h, + 1, + ), ] idx += 1 if self.clip: out_boxes = np.clip(out_boxes, 0.0, 1.0) - out_var = np.tile(self.variances, - (self.layer_h, self.layer_w, self.num_priors, 1)) + out_var = np.tile( + self.variances, (self.layer_h, self.layer_w, self.num_priors, 1) + ) self.out_boxes = out_boxes.astype(self.dtype) self.out_var = out_var.astype(self.dtype) if self.flatten_to_2d: @@ -149,7 +173,6 @@ class TestNpuDensityPriorBoxOp(OpTest): class TestNpuDensityPriorBoxFlatten(TestNpuDensityPriorBoxOp): - def set_density(self): self.densities = [3, 4] self.fixed_sizes = [1.0, 2.0] @@ -162,7 +185,6 @@ class TestNpuDensityPriorBoxFlatten(TestNpuDensityPriorBoxOp): class TestNpuDensityPriorBoxOp1(TestNpuDensityPriorBoxOp): - def set_density(self): super(TestNpuDensityPriorBoxOp1, self).set_density() self.layer_w = 1 @@ -170,7 +192,6 @@ class TestNpuDensityPriorBoxOp1(TestNpuDensityPriorBoxOp): class TestNpuDensityPriorBoxOp2(TestNpuDensityPriorBoxOp): - def set_density(self): super(TestNpuDensityPriorBoxOp2, self).set_density() self.layer_w = 15 @@ -180,14 +201,12 @@ class TestNpuDensityPriorBoxOp2(TestNpuDensityPriorBoxOp): class TestNpuDensityPriorBoxOp3(TestNpuDensityPriorBoxOp): - def set_density(self): super(TestNpuDensityPriorBoxOp3, self).set_density() self.fixed_ratios = [1.0, 4.0] class TestNpuDensityPriorBoxOpFP16(TestNpuDensityPriorBoxOp): - def init_dtype(self): self.dtype = np.float16 diff --git a/python/paddle/fluid/tests/unittests/npu/test_dropout_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_dropout_op_npu.py index 3c55d7537b066c64caa03bcc6f9223cbf6b3f542..b9233f3fd685ca654a2acdd18e0b68ea110a7e92 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_dropout_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_dropout_op_npu.py @@ -28,7 +28,6 @@ EPOCH = 100 class TestDropoutOp(OpTest): - def setUp(self): self.op_type = "dropout" self.set_npu() @@ -38,11 +37,11 @@ class TestDropoutOp(OpTest): 'dropout_prob': 0.0, 'fix_seed': True, 'is_test': False, - 'dropout_implementation': 'upscale_in_train' + 'dropout_implementation': 'upscale_in_train', } self.outputs = { 'Out': self.inputs['X'], - 'Mask': np.ones((32, 64)).astype('uint8') + 'Mask': np.ones((32, 64)).astype('uint8'), } def init_dtype(self): @@ -70,11 +69,11 @@ class TestDropoutOpInput1d(TestDropoutOp): 'dropout_prob': 0.0, 'fix_seed': True, 'is_test': False, - 'dropout_implementation': 'upscale_in_train' + 'dropout_implementation': 'upscale_in_train', } self.outputs = { 'Out': self.inputs['X'], - 'Mask': np.ones((3, 62)).astype('uint8') + 'Mask': np.ones((3, 62)).astype('uint8'), } @@ -89,11 +88,11 @@ class TestDropoutOpInput1d_1(TestDropoutOp): 'dropout_prob': 0.0, 'fix_seed': True, 'is_test': False, - 'dropout_implementation': 'upscale_in_train' + 'dropout_implementation': 'upscale_in_train', } self.outputs = { 'Out': self.inputs['X'], - 'Mask': np.ones((2000)).astype('uint8') + 'Mask': np.ones((2000)).astype('uint8'), } @@ -108,11 +107,11 @@ class TestDropoutOp2(TestDropoutOp): 'dropout_prob': 1.0, 'fix_seed': True, 'is_test': False, - 'dropout_implementation': 'upscale_in_train' + 'dropout_implementation': 'upscale_in_train', } self.outputs = { 'Out': np.zeros((32, 64)).astype('float32'), - 'Mask': np.zeros((32, 64)).astype('uint8') + 'Mask': np.zeros((32, 64)).astype('uint8'), } @@ -127,11 +126,11 @@ class TestDropoutOp3(TestDropoutOp): 'dropout_prob': 0.0, 'fix_seed': True, 'is_test': False, - 'dropout_implementation': 'upscale_in_train' + 'dropout_implementation': 'upscale_in_train', } self.outputs = { 'Out': self.inputs['X'], - 'Mask': np.ones((32, 64, 2)).astype('uint8') + 'Mask': np.ones((32, 64, 2)).astype('uint8'), } @@ -147,7 +146,7 @@ class TestDropoutOpInference(OpTest): 'dropout_prob': 0.35, 'fix_seed': True, 'is_test': True, - 'dropout_implementation': 'upscale_in_train' + 'dropout_implementation': 'upscale_in_train', } self.outputs = {'Out': self.inputs['X']} @@ -164,7 +163,6 @@ class TestDropoutOpInference(OpTest): @skip_check_grad_ci(reason="For inference, check_grad is not required.") class TestDropoutOpInference2(TestDropoutOpInference): - def setUp(self): self.op_type = "dropout" self.set_npu() @@ -173,7 +171,7 @@ class TestDropoutOpInference2(TestDropoutOpInference): self.attrs = { 'dropout_prob': 0.75, 'is_test': True, - 'dropout_implementation': 'upscale_in_train' + 'dropout_implementation': 'upscale_in_train', } self.outputs = {'Out': self.inputs['X']} @@ -186,16 +184,16 @@ class TestDropoutOpWithSeed(TestDropoutOp): self.init_dtype() self.inputs = { "X": np.random.random((32, 64)).astype(self.dtype), - "Seed": np.asarray([125], dtype="int32") + "Seed": np.asarray([125], dtype="int32"), } self.attrs = { 'dropout_prob': 0.0, 'is_test': False, - 'dropout_implementation': 'upscale_in_train' + 'dropout_implementation': 'upscale_in_train', } self.outputs = { 'Out': self.inputs['X'], - 'Mask': np.ones((32, 64)).astype('uint8') + 'Mask': np.ones((32, 64)).astype('uint8'), } @@ -211,7 +209,6 @@ class TestDropoutOpFp16(TestDropoutOp): class TestDropoutAPI(unittest.TestCase): - def setUp(self): np.random.seed(123) self.places = [fluid.CPUPlace(), paddle.NPUPlace(0)] @@ -219,43 +216,44 @@ class TestDropoutAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): input = fluid.data(name="input", shape=[40, 40], dtype="float32") - res1 = paddle.nn.functional.dropout(x=input, - p=0., - training=False, - mode='upscale_in_train') - res2 = paddle.nn.functional.dropout(x=input, - p=0., - axis=0, - training=True, - mode='upscale_in_train') - res3 = paddle.nn.functional.dropout(x=input, - p=0., - axis=0, - training=False, - mode='upscale_in_train') - res4 = paddle.nn.functional.dropout(x=input, - p=0., - axis=[0, 1], - training=True, - mode='upscale_in_train') - res5 = paddle.nn.functional.dropout(x=input, - p=0., - axis=[0, 1], - training=False, - mode='upscale_in_train') - res6 = paddle.nn.functional.dropout(x=input, - p=1., - training=True, - mode='upscale_in_train') + res1 = paddle.nn.functional.dropout( + x=input, p=0.0, training=False, mode='upscale_in_train' + ) + res2 = paddle.nn.functional.dropout( + x=input, p=0.0, axis=0, training=True, mode='upscale_in_train' + ) + res3 = paddle.nn.functional.dropout( + x=input, p=0.0, axis=0, training=False, mode='upscale_in_train' + ) + res4 = paddle.nn.functional.dropout( + x=input, + p=0.0, + axis=[0, 1], + training=True, + mode='upscale_in_train', + ) + res5 = paddle.nn.functional.dropout( + x=input, + p=0.0, + axis=[0, 1], + training=False, + mode='upscale_in_train', + ) + res6 = paddle.nn.functional.dropout( + x=input, p=1.0, training=True, mode='upscale_in_train' + ) res7 = paddle.fluid.layers.dropout( x=input, - dropout_prob=0., - dropout_implementation='upscale_in_train') - res8 = paddle.nn.functional.dropout(x=input, - p=0., - axis=(0, 1), - training=False, - mode='upscale_in_train') + dropout_prob=0.0, + dropout_implementation='upscale_in_train', + ) + res8 = paddle.nn.functional.dropout( + x=input, + p=0.0, + axis=(0, 1), + training=False, + mode='upscale_in_train', + ) in_np = np.random.random([40, 40]).astype("float32") res_np = in_np @@ -264,13 +262,17 @@ class TestDropoutAPI(unittest.TestCase): exe = fluid.Executor(place) res_list = [res1, res2, res3, res4, res5, res7, res8] for res in res_list: - fetches = exe.run(fluid.default_main_program(), - feed={"input": in_np}, - fetch_list=[res]) + fetches = exe.run( + fluid.default_main_program(), + feed={"input": in_np}, + fetch_list=[res], + ) np.testing.assert_allclose(fetches[0], res_np) - fetches2 = exe.run(fluid.default_main_program(), - feed={"input": in_np}, - fetch_list=[res6]) + fetches2 = exe.run( + fluid.default_main_program(), + feed={"input": in_np}, + fetch_list=[res6], + ) np.testing.assert_allclose(fetches2[0], res_np2) def test_static(self): diff --git a/python/paddle/fluid/tests/unittests/npu/test_elementwise_add_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_elementwise_add_op_npu.py index b4da50a47acc9cab70b13b7c497a914c41c7bafe..735b1d0ac06138a3f84f007b0fdf7e7bd2669a8f 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_elementwise_add_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_elementwise_add_op_npu.py @@ -28,7 +28,6 @@ paddle.enable_static() class TestElementwiseAddOp(OpTest): - def setUp(self): self.set_npu() self.op_type = "elementwise_add" @@ -40,7 +39,7 @@ class TestElementwiseAddOp(OpTest): self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} self.outputs = {'Out': self.out} @@ -128,21 +127,19 @@ class TestElementwiseAddOp(OpTest): class TestFP16ElementwiseAddOp(TestElementwiseAddOp): - def init_dtype(self): self.dtype = np.float16 class TestINT64ElementwiseAddOp(TestElementwiseAddOp): - def init_dtype(self): self.dtype = np.int64 @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." +) class TestElementwiseAddOp_scalar(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 3, 4).astype(self.dtype) self.y = np.random.rand(1).astype(self.dtype) @@ -150,9 +147,9 @@ class TestElementwiseAddOp_scalar(TestElementwiseAddOp): @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." +) class TestFP16ElementwiseAddOp_scalar(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 3, 4).astype(self.dtype) self.y = np.random.rand(1).astype(self.dtype) @@ -160,9 +157,9 @@ class TestFP16ElementwiseAddOp_scalar(TestFP16ElementwiseAddOp): @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1,1) to test broadcast.") + reason="[skip shape check] Use y_shape(1,1) to test broadcast." +) class TestElementwiseAddOp_scalar2(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 3, 4).astype(self.dtype) self.y = np.random.rand(1, 1).astype(self.dtype) @@ -170,9 +167,9 @@ class TestElementwiseAddOp_scalar2(TestElementwiseAddOp): @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1,1) to test broadcast.") + reason="[skip shape check] Use y_shape(1,1) to test broadcast." +) class TestFP16ElementwiseAddOp_scalar2(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 3, 4).astype(self.dtype) self.y = np.random.rand(1, 1).astype(self.dtype) @@ -180,7 +177,6 @@ class TestFP16ElementwiseAddOp_scalar2(TestFP16ElementwiseAddOp): class TestAddAPI(unittest.TestCase): - def test_name(self): with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data(name="x", shape=[2, 3], dtype="float32") @@ -205,66 +201,65 @@ class TestAddAPI(unittest.TestCase): place = paddle.NPUPlace(0) exe = paddle.static.Executor(place) - x_value, y_value, z_value = exe.run(feed={ - "x": x_np, - "y": y_np - }, - fetch_list=[x, y, z]) + x_value, y_value, z_value = exe.run( + feed={"x": x_np, "y": y_np}, fetch_list=[x, y, z] + ) - z_expected = np.array([3., 8., 6.]) + z_expected = np.array([3.0, 8.0, 6.0]) self.assertEqual( (x_value == x_np).all(), True, - msg="x_value = {}, but expected {}".format(x_value, x_np)) + msg="x_value = {}, but expected {}".format(x_value, x_np), + ) self.assertEqual( (y_value == y_np).all(), True, - msg="y_value = {}, but expected {}".format(y_value, y_np)) + msg="y_value = {}, but expected {}".format(y_value, y_np), + ) self.assertEqual( (z_value == z_expected).all(), True, - msg="z_value = {}, but expected {}".format(z_value, z_expected)) + msg="z_value = {}, but expected {}".format(z_value, z_expected), + ) class TestAddError(unittest.TestCase): - def test_errors(self): with paddle.static.program_guard(paddle.static.Program()): # the input of elementwise_add must be Variable. - x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.NPUPlace(0)) - y1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.NPUPlace(0)) + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.NPUPlace(0) + ) + y1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.NPUPlace(0) + ) self.assertRaises(TypeError, paddle.add, x1, y1) # the input dtype must be float16 or float32 or float64 or int32 or int64 - x2 = paddle.static.data(name='x2', - shape=[3, 4, 5, 6], - dtype="uint8") - y2 = paddle.static.data(name='y2', - shape=[3, 4, 5, 6], - dtype="uint8") + x2 = paddle.static.data( + name='x2', shape=[3, 4, 5, 6], dtype="uint8" + ) + y2 = paddle.static.data( + name='y2', shape=[3, 4, 5, 6], dtype="uint8" + ) self.assertRaises(TypeError, paddle.add, x2, y2) class TestElementwiseAddOp_Vector(TestElementwiseAddOp): - def init_input_output(self): - self.x = np.random.random((100, )).astype(self.dtype) - self.y = np.random.random((100, )).astype(self.dtype) + self.x = np.random.random((100,)).astype(self.dtype) + self.y = np.random.random((100,)).astype(self.dtype) self.out = np.add(self.x, self.y) class TestFP16ElementwiseAddOp_Vector(TestFP16ElementwiseAddOp): - def init_input_output(self): - self.x = np.random.random((100, )).astype(self.dtype) - self.y = np.random.random((100, )).astype(self.dtype) + self.x = np.random.random((100,)).astype(self.dtype) + self.y = np.random.random((100,)).astype(self.dtype) self.out = np.add(self.x, self.y) class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(100, 2, 3).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype) @@ -275,7 +270,6 @@ class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp): class TestFP16ElementwiseAddOp_broadcast_0(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(100, 2, 3).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype) @@ -286,7 +280,6 @@ class TestFP16ElementwiseAddOp_broadcast_0(TestFP16ElementwiseAddOp): class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 100, 3).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype) @@ -297,7 +290,6 @@ class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp): class TestFP16ElementwiseAddOp_broadcast_1(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 100, 3).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype) @@ -308,7 +300,6 @@ class TestFP16ElementwiseAddOp_broadcast_1(TestFP16ElementwiseAddOp): class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 3, 100).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype) @@ -316,7 +307,6 @@ class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp): class TestFP16ElementwiseAddOp_broadcast_2(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 3, 100).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype) @@ -324,7 +314,6 @@ class TestFP16ElementwiseAddOp_broadcast_2(TestFP16ElementwiseAddOp): class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 10, 12, 1).astype(self.dtype) self.y = np.random.rand(10, 12).astype(self.dtype) @@ -335,7 +324,6 @@ class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp): class TestFP16ElementwiseAddOp_broadcast_3(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype) self.y = np.random.rand(10, 12).astype(self.dtype) @@ -346,7 +334,6 @@ class TestFP16ElementwiseAddOp_broadcast_3(TestFP16ElementwiseAddOp): class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(100, 2, 1, 2).astype(self.dtype) self.y = np.random.rand(100, 1).astype(self.dtype) @@ -357,7 +344,6 @@ class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp): class TestFP16ElementwiseAddOp_broadcast_4(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(100, 2, 1, 2).astype(self.dtype) self.y = np.random.rand(100, 1).astype(self.dtype) @@ -368,7 +354,6 @@ class TestFP16ElementwiseAddOp_broadcast_4(TestFP16ElementwiseAddOp): class TestElementwiseAddOp_broadcast_5(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(10, 3, 12).astype(self.dtype) self.y = np.random.rand(10, 1, 12).astype(self.dtype) @@ -376,7 +361,6 @@ class TestElementwiseAddOp_broadcast_5(TestElementwiseAddOp): class TestFP16ElementwiseAddOp_broadcast_5(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(10, 3, 12).astype(self.dtype) self.y = np.random.rand(10, 1, 12).astype(self.dtype) @@ -384,7 +368,6 @@ class TestFP16ElementwiseAddOp_broadcast_5(TestFP16ElementwiseAddOp): class TestElementwiseAddOp_broadcast_6(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype) self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype) @@ -392,7 +375,6 @@ class TestElementwiseAddOp_broadcast_6(TestElementwiseAddOp): class TestElementwiseAddOp_broadcast_7(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(1, 1, 20, 5).astype(self.dtype) self.y = np.random.rand(20, 5, 1, 1).astype(self.dtype) @@ -400,7 +382,6 @@ class TestElementwiseAddOp_broadcast_7(TestElementwiseAddOp): class TestFP16ElementwiseAddOp_broadcast_6(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype) self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype) @@ -408,7 +389,6 @@ class TestFP16ElementwiseAddOp_broadcast_6(TestFP16ElementwiseAddOp): class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 10, 12).astype(self.dtype) self.y = np.random.rand(10, 12).astype(self.dtype) @@ -419,7 +399,6 @@ class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp): class TestFP16ElementwiseAddOp_rowwise_add_0(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 10, 12).astype(self.dtype) self.y = np.random.rand(10, 12).astype(self.dtype) @@ -430,9 +409,9 @@ class TestFP16ElementwiseAddOp_rowwise_add_0(TestFP16ElementwiseAddOp): @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." +) class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(100, 1).astype(self.dtype) self.y = np.random.rand(1).astype(self.dtype) @@ -443,9 +422,9 @@ class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp): @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." +) class TestFP16ElementwiseAddOp_rowwise_add_1(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(100, 1).astype(self.dtype) self.y = np.random.rand(1).astype(self.dtype) @@ -456,7 +435,6 @@ class TestFP16ElementwiseAddOp_rowwise_add_1(TestFP16ElementwiseAddOp): class TestElementwiseAddOp_channelwise_add(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(100, 2, 3).astype(self.dtype) self.y = np.random.rand(100, 1, 1).astype(self.dtype) @@ -467,7 +445,6 @@ class TestElementwiseAddOp_channelwise_add(TestElementwiseAddOp): class TestFP16ElementwiseAddOp_channelwise_add(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(100, 2, 3).astype(self.dtype) self.y = np.random.rand(100, 1, 1).astype(self.dtype) @@ -478,7 +455,6 @@ class TestFP16ElementwiseAddOp_channelwise_add(TestFP16ElementwiseAddOp): class TestElementwiseAddOp_commonuse_add1(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 3, 100).astype(self.dtype) self.y = np.random.rand(1, 1, 100).astype(self.dtype) @@ -489,7 +465,6 @@ class TestElementwiseAddOp_commonuse_add1(TestElementwiseAddOp): class TestElementwiseFP16AddOp_commonuse_add1(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 3, 100).astype(self.dtype) self.y = np.random.rand(1, 1, 100).astype(self.dtype) @@ -500,7 +475,6 @@ class TestElementwiseFP16AddOp_commonuse_add1(TestFP16ElementwiseAddOp): class TestElementwiseAddOp_commonuse_add2(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(10, 3, 1, 4).astype(self.dtype) self.y = np.random.rand(10, 1, 12, 1).astype(self.dtype) @@ -511,7 +485,6 @@ class TestElementwiseAddOp_commonuse_add2(TestElementwiseAddOp): class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(10, 12).astype(self.dtype) self.y = np.random.rand(2, 2, 10, 12).astype(self.dtype) @@ -522,7 +495,6 @@ class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestElementwiseAddOp): class TestElementwiseAddOp_same_shape_ysize_large(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(10, 1, 12).astype(self.dtype) self.y = np.random.rand(10, 2, 12).astype(self.dtype) @@ -533,14 +505,15 @@ class TestElementwiseAddOp_same_shape_ysize_large(TestElementwiseAddOp): class TestElementwiseAddOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # the input of elementwise_add must be Variable. - x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.NPUPlace(0)) - y1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.NPUPlace(0)) + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.NPUPlace(0) + ) + y1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.NPUPlace(0) + ) self.assertRaises(TypeError, fluid.layers.elementwise_add, x1, y1) # the input dtype of elementwise_add must be float16 or float32 or float64 or int32 or int64 @@ -551,7 +524,6 @@ class TestElementwiseAddOpError(unittest.TestCase): class TestAddApi(unittest.TestCase): - def _executed_api(self, x, y, name=None): return paddle.add(x, y, name) @@ -569,7 +541,7 @@ class TestAddApi(unittest.TestCase): def gen_data(): return { "x": np.array([2, 3, 4]).astype('float32'), - "y": np.array([1, 5, 2]).astype('float32') + "y": np.array([1, 5, 2]).astype('float32'), } x = fluid.data(name="x", shape=[3], dtype='float32') @@ -579,7 +551,7 @@ class TestAddApi(unittest.TestCase): place = fluid.NPUPlace(0) exe = fluid.Executor(place) z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) - z_expected = np.array([3., 8., 6.]) + z_expected = np.array([3.0, 8.0, 6.0]) self.assertEqual((z_value == z_expected).all(), True) def test_dygraph(self): @@ -590,18 +562,16 @@ class TestAddApi(unittest.TestCase): y = fluid.dygraph.to_variable(np_y) z = self._executed_api(x, y) np_z = z.numpy() - z_expected = np.array([3., 8., 6.]) + z_expected = np.array([3.0, 8.0, 6.0]) self.assertEqual((np_z == z_expected).all(), True) class TestAddInplaceApi(TestAddApi): - def _executed_api(self, x, y, name=None): return x.add_(y, name) class TestAddInplaceBroadcastSuccess(unittest.TestCase): - def init_data(self): self.x_numpy = np.random.rand(2, 3, 4).astype('float') self.y_numpy = np.random.rand(3, 4).astype('float') @@ -618,21 +588,18 @@ class TestAddInplaceBroadcastSuccess(unittest.TestCase): class TestAddInplaceBroadcastSuccess2(TestAddInplaceBroadcastSuccess): - def init_data(self): self.x_numpy = np.random.rand(1, 2, 3, 1).astype('float') self.y_numpy = np.random.rand(3, 1).astype('float') class TestAddInplaceBroadcastSuccess3(TestAddInplaceBroadcastSuccess): - def init_data(self): self.x_numpy = np.random.rand(2, 3, 1, 5).astype('float') self.y_numpy = np.random.rand(1, 3, 1, 5).astype('float') class TestAddInplaceBroadcastError(unittest.TestCase): - def init_data(self): self.x_numpy = np.random.rand(3, 4).astype('float') self.y_numpy = np.random.rand(2, 3, 4).astype('float') @@ -651,14 +618,12 @@ class TestAddInplaceBroadcastError(unittest.TestCase): class TestAddInplaceBroadcastError2(TestAddInplaceBroadcastError): - def init_data(self): self.x_numpy = np.random.rand(2, 1, 4).astype('float') self.y_numpy = np.random.rand(2, 3, 4).astype('float') class TestAddInplaceBroadcastError3(TestAddInplaceBroadcastError): - def init_data(self): self.x_numpy = np.random.rand(5, 2, 1, 4).astype('float') self.y_numpy = np.random.rand(2, 3, 4).astype('float') diff --git a/python/paddle/fluid/tests/unittests/npu/test_elementwise_div_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_elementwise_div_op_npu.py index ab9c74fcf7006bf4dfb1ec208062ba001224714e..acdb8c75db56171cbe3046ffa54778924e88d646 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_elementwise_div_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_elementwise_div_op_npu.py @@ -27,7 +27,6 @@ SEED = 2021 class TestElementwiseDiv(OpTest): - def setUp(self): self.set_npu() self.op_type = "elementwise_div" @@ -41,7 +40,7 @@ class TestElementwiseDiv(OpTest): self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(x), - 'Y': OpTest.np_dtype_to_fluid_dtype(y) + 'Y': OpTest.np_dtype_to_fluid_dtype(y), } self.attrs = {} self.outputs = {'Out': out} @@ -73,13 +72,12 @@ class TestElementwiseDiv(OpTest): ) def test_check_grad_ingore_y(self): - self.check_grad_with_place(self.place, ['X'], - 'Out', - no_grad_set=set("Y")) + self.check_grad_with_place( + self.place, ['X'], 'Out', no_grad_set=set("Y") + ) class TestElementwiseDivFp16(OpTest): - def setUp(self): self.set_npu() self.op_type = "elementwise_div" @@ -93,7 +91,7 @@ class TestElementwiseDivFp16(OpTest): self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(x), - 'Y': OpTest.np_dtype_to_fluid_dtype(y) + 'Y': OpTest.np_dtype_to_fluid_dtype(y), } self.attrs = {} self.outputs = {'Out': out} @@ -110,7 +108,6 @@ class TestElementwiseDivFp16(OpTest): class TestElementwiseDivNet(unittest.TestCase): - def _test(self, run_npu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -129,9 +126,9 @@ class TestElementwiseDivNet(unittest.TestCase): b = paddle.static.data(name="b", shape=[32, 32], dtype='float32') c = paddle.static.data(name="c", shape=[32, 32], dtype='float32') d = paddle.static.data(name="d", shape=[32, 32], dtype='float32') - label = paddle.static.data(name="label", - shape=[32, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[32, 1], dtype='int64' + ) e = paddle.multiply(a, b) f = paddle.multiply(c, d) @@ -157,18 +154,23 @@ class TestElementwiseDivNet(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "c": c_np, - "d": d_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={ + "a": a_np, + "b": b_np, + "c": c_np, + "d": d_np, + "label": label_np, + }, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res @@ -181,7 +183,6 @@ class TestElementwiseDivNet(unittest.TestCase): class TestFloatStatus(unittest.TestCase): - def test_overflow(self): paddle.disable_static() paddle.set_device('npu') @@ -191,9 +192,9 @@ class TestFloatStatus(unittest.TestCase): self.assertEqual(flag.numpy().sum(), 0.0) x = paddle.to_tensor([12.564], stop_gradient=False) - y = paddle.to_tensor([2.], stop_gradient=False) + y = paddle.to_tensor([2.0], stop_gradient=False) z = x / y - out = 32768. * z + out = 32768.0 * z ops.get_float_status(flag, flag) self.assertEqual(flag.numpy().sum(), 0.0) diff --git a/python/paddle/fluid/tests/unittests/npu/test_elementwise_floordiv_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_elementwise_floordiv_op_npu.py index 7301f582fca67bc3e28747c53a4d4b3f263bc5e6..6762c51ca3898428a6b776fa47e87f71a750e15b 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_elementwise_floordiv_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_elementwise_floordiv_op_npu.py @@ -24,7 +24,6 @@ paddle.enable_static() class TestElementwiseFloorDiv(OpTest): - def setUp(self): self.op_type = "elementwise_floordiv" self.set_npu() @@ -33,7 +32,7 @@ class TestElementwiseFloorDiv(OpTest): self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.attrs = {} self.outputs = {'Out': self.out} @@ -55,7 +54,6 @@ class TestElementwiseFloorDiv(OpTest): class TestElementwiseFloorDiv2(TestElementwiseFloorDiv): - def init_dtype(self): self.dtype = "int32" diff --git a/python/paddle/fluid/tests/unittests/npu/test_elementwise_max_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_elementwise_max_op_npu.py index 92a91b384fcb79342e15bcd3096555ce1880b9aa..fe3d58479294417dc7f63c9bc46629ea86aec8ad 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_elementwise_max_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_elementwise_max_op_npu.py @@ -41,7 +41,8 @@ def ComputeGrad(x, y, out, axis): for ax in range(len(shape_out)): if (ax < src_axis or ax >= src_axis + len(shape_x)) or ( - shape_out[ax] > 1 and shape_x[ax - src_axis] == 1): + shape_out[ax] > 1 and shape_x[ax - src_axis] == 1 + ): reduce_axes_x.append(ax) if shape_y != shape_out: @@ -52,7 +53,8 @@ def ComputeGrad(x, y, out, axis): for ax in range(len(shape_out)): if (ax < src_axis or ax >= src_axis + len(shape_y)) or ( - shape_out[ax] > 1 and shape_y[ax - src_axis] == 1): + shape_out[ax] > 1 and shape_y[ax - src_axis] == 1 + ): reduce_axes_y.append(ax) if len(reduce_axes_x) > 0: @@ -79,7 +81,6 @@ def ComputeGrad(x, y, out, axis): class TestElementwiseMaxOp(OpTest): - def setUp(self): self.set_npu() self.op_type = "elementwise_max" @@ -91,7 +92,7 @@ class TestElementwiseMaxOp(OpTest): self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.attrs = {'axis': self.axis} self.outputs = {'Out': self.out} @@ -106,7 +107,8 @@ class TestElementwiseMaxOp(OpTest): self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) sgn = np.random.choice([-1, 1], [13, 17]).astype(self.dtype) self.y = self.x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype( - self.dtype) + self.dtype + ) self.out = np.maximum(self.x, self.y) def init_axis(self): @@ -119,18 +121,17 @@ class TestElementwiseMaxOp(OpTest): self.check_grad_with_place(self.place, ['X', 'Y'], 'Out') def test_check_grad_ingore_x(self): - self.check_grad_with_place(self.place, ['Y'], - 'Out', - no_grad_set=set("X")) + self.check_grad_with_place( + self.place, ['Y'], 'Out', no_grad_set=set("X") + ) def test_check_grad_ingore_y(self): - self.check_grad_with_place(self.place, ['X'], - 'Out', - no_grad_set=set("Y")) + self.check_grad_with_place( + self.place, ['X'], 'Out', no_grad_set=set("Y") + ) class TestElementwiseMaxOp_int32(TestElementwiseMaxOp): - def init_dtype(self): self.dtype = np.int32 @@ -146,7 +147,6 @@ class TestElementwiseMaxOp_int32(TestElementwiseMaxOp): class TestElementwiseMaxOp_scalar(TestElementwiseMaxOp): - def init_input_output(self): self.x = np.random.random_integers(-5, 5, [2, 3, 20]).astype(self.dtype) self.y = np.array([0.5]).astype(self.dtype) @@ -154,22 +154,22 @@ class TestElementwiseMaxOp_scalar(TestElementwiseMaxOp): class TestElementwiseMaxOp_vector(TestElementwiseMaxOp): - def init_input_output(self): - self.x = np.random.random((100, )).astype(self.dtype) - sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype) - self.y = self.x + sgn * np.random.uniform(0.1, 1, - (100, )).astype(self.dtype) + self.x = np.random.random((100,)).astype(self.dtype) + sgn = np.random.choice([-1, 1], (100,)).astype(self.dtype) + self.y = self.x + sgn * np.random.uniform(0.1, 1, (100,)).astype( + self.dtype + ) self.out = np.maximum(self.x, self.y) class TestElementwiseMaxOp_broadcast_0(TestElementwiseMaxOp): - def init_input_output(self): self.x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(self.dtype) - sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype) - self.y = self.x[:, 0, 0] + sgn * \ - np.random.uniform(1, 2, (100, )).astype(self.dtype) + sgn = np.random.choice([-1, 1], (100,)).astype(self.dtype) + self.y = self.x[:, 0, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( + self.dtype + ) self.out = np.maximum(self.x, self.y.reshape(100, 1, 1)) def init_axis(self): @@ -177,12 +177,12 @@ class TestElementwiseMaxOp_broadcast_0(TestElementwiseMaxOp): class TestElementwiseMaxOp_broadcast_1(TestElementwiseMaxOp): - def init_input_output(self): self.x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(self.dtype) - sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype) - self.y = self.x[0, :, 0] + sgn * \ - np.random.uniform(1, 2, (100, )).astype(self.dtype) + sgn = np.random.choice([-1, 1], (100,)).astype(self.dtype) + self.y = self.x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( + self.dtype + ) self.out = np.maximum(self.x, self.y.reshape(1, 100, 1)) def init_axis(self): @@ -190,56 +190,68 @@ class TestElementwiseMaxOp_broadcast_1(TestElementwiseMaxOp): def test_check_grad_ingore_x(self): _, dy = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['Y'], - 'Out', - no_grad_set=set("X"), - user_defined_grads=[dy]) + self.check_grad_with_place( + self.place, + ['Y'], + 'Out', + no_grad_set=set("X"), + user_defined_grads=[dy], + ) def test_check_grad_ingore_y(self): dx, _ = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['X'], - 'Out', - no_grad_set=set("Y"), - user_defined_grads=[dx]) + self.check_grad_with_place( + self.place, + ['X'], + 'Out', + no_grad_set=set("Y"), + user_defined_grads=[dx], + ) class TestElementwiseMaxOp_broadcast_2(TestElementwiseMaxOp): - def init_input_output(self): self.x = np.random.uniform(0.5, 1, (2, 3, 100)).astype(self.dtype) - sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype) - self.y = self.x[0, 0, :] + sgn * \ - np.random.uniform(1, 2, (100, )).astype(self.dtype) + sgn = np.random.choice([-1, 1], (100,)).astype(self.dtype) + self.y = self.x[0, 0, :] + sgn * np.random.uniform(1, 2, (100,)).astype( + self.dtype + ) self.out = np.maximum(self.x, self.y.reshape(1, 1, 100)) def test_check_grad_normal(self): dx, dy = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['X', 'Y'], - 'Out', - user_defined_grads=[dx, dy]) + self.check_grad_with_place( + self.place, ['X', 'Y'], 'Out', user_defined_grads=[dx, dy] + ) def test_check_grad_ingore_x(self): _, dy = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['Y'], - 'Out', - no_grad_set=set("X"), - user_defined_grads=[dy]) + self.check_grad_with_place( + self.place, + ['Y'], + 'Out', + no_grad_set=set("X"), + user_defined_grads=[dy], + ) def test_check_grad_ingore_y(self): dx, _ = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['X'], - 'Out', - no_grad_set=set("Y"), - user_defined_grads=[dx]) + self.check_grad_with_place( + self.place, + ['X'], + 'Out', + no_grad_set=set("Y"), + user_defined_grads=[dx], + ) class TestElementwiseMaxOp_broadcast_3(TestElementwiseMaxOp): - def init_input_output(self): self.x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(self.dtype) sgn = np.random.choice([-1, 1], (50, 2)).astype(self.dtype) - self.y = self.x[0, :, :, 0] + sgn * \ - np.random.uniform(1, 2, (50, 2)).astype(self.dtype) + self.y = self.x[0, :, :, 0] + sgn * np.random.uniform( + 1, 2, (50, 2) + ).astype(self.dtype) self.out = np.maximum(self.x, self.y.reshape(1, 50, 2, 1)) def init_axis(self): @@ -247,27 +259,26 @@ class TestElementwiseMaxOp_broadcast_3(TestElementwiseMaxOp): class TestElementwiseMaxOp_broadcast_4(TestElementwiseMaxOp): - def init_input_output(self): self.x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(self.dtype) sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(self.dtype) - self.y = self.x + sgn * \ - np.random.uniform(1, 2, (2, 3, 1, 5)).astype(self.dtype) + self.y = self.x + sgn * np.random.uniform(1, 2, (2, 3, 1, 5)).astype( + self.dtype + ) self.out = np.maximum(self.x, self.y) class TestElementwiseMaxOp_broadcast_5(TestElementwiseMaxOp): - def init_input_output(self): self.x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(self.dtype) sgn = np.random.choice([-1, 1], (2, 3, 1, 1)).astype(self.dtype) - self.y = self.x + sgn * \ - np.random.uniform(1, 2, (2, 3, 1, 1)).astype(self.dtype) + self.y = self.x + sgn * np.random.uniform(1, 2, (2, 3, 1, 1)).astype( + self.dtype + ) self.out = np.maximum(self.x, self.y) class TestElementwiseMaxNet(unittest.TestCase): - def _test(self, run_npu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -282,9 +293,9 @@ class TestElementwiseMaxNet(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=[32, 32], dtype='float32') b = paddle.static.data(name="b", shape=[32, 32], dtype='float32') - label = paddle.static.data(name="label", - shape=[32, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[32, 1], dtype='int64' + ) c = paddle.maximum(a, b) @@ -307,16 +318,17 @@ class TestElementwiseMaxNet(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "b": b_np, "label": label_np}, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res diff --git a/python/paddle/fluid/tests/unittests/npu/test_elementwise_min_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_elementwise_min_op_npu.py index 5956faba0e1e9c78551ff03d54fc3ae9f6b1fcb3..8cd51765bd8292b6eb6613a8873d9134b3ff516e 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_elementwise_min_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_elementwise_min_op_npu.py @@ -28,7 +28,6 @@ SEED = 2021 class TestElementwiseMinOp(OpTest): - def setUp(self): self.set_npu() self.op_type = "elementwise_min" @@ -37,7 +36,7 @@ class TestElementwiseMinOp(OpTest): self.init_input_output() self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.outputs = {'Out': self.out} self.attrs = {'axis': self.axis} @@ -52,7 +51,8 @@ class TestElementwiseMinOp(OpTest): self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) self.sgn = np.random.choice([-1, 1], [13, 17]).astype(self.dtype) self.y = self.x + self.sgn * np.random.uniform(0.1, 1, [13, 17]).astype( - self.dtype) + self.dtype + ) self.out = np.minimum(self.x, self.y) self.axis = -1 @@ -64,9 +64,9 @@ class TestElementwiseMinOp(OpTest): def test_check_grad_normal(self): if self.dtype == np.float16: - self.check_grad_with_place(self.place, ['X', 'Y'], - 'Out', - max_relative_error=0.5) + self.check_grad_with_place( + self.place, ['X', 'Y'], 'Out', max_relative_error=0.5 + ) else: self.check_grad_with_place( self.place, @@ -76,10 +76,13 @@ class TestElementwiseMinOp(OpTest): def test_check_grad_ingore_x(self): if self.dtype == np.float16: - self.check_grad_with_place(self.place, ['Y'], - 'Out', - no_grad_set=set("X"), - max_relative_error=0.9) + self.check_grad_with_place( + self.place, + ['Y'], + 'Out', + no_grad_set=set("X"), + max_relative_error=0.9, + ) else: self.check_grad_with_place( self.place, @@ -90,10 +93,13 @@ class TestElementwiseMinOp(OpTest): def test_check_grad_ingore_y(self): if self.dtype == np.float16: - self.check_grad_with_place(self.place, ['X'], - 'Out', - no_grad_set=set("Y"), - max_relative_error=0.1) + self.check_grad_with_place( + self.place, + ['X'], + 'Out', + no_grad_set=set("Y"), + max_relative_error=0.1, + ) else: self.check_grad_with_place( self.place, @@ -104,32 +110,30 @@ class TestElementwiseMinOp(OpTest): class TestElementwiseMinOpFp16(TestElementwiseMinOp): - def init_dtype(self): self.dtype = np.float16 class TestElementwiseMinOp_Vector(TestElementwiseMinOp): - def init_input_output(self): - self.x = np.random.uniform(1, 2, (100, )).astype(self.dtype) - self.sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype) - self.y = self.x + self.sgn * np.random.uniform(0.1, 1, (100, )).astype( - self.dtype) + self.x = np.random.uniform(1, 2, (100,)).astype(self.dtype) + self.sgn = np.random.choice([-1, 1], (100,)).astype(self.dtype) + self.y = self.x + self.sgn * np.random.uniform(0.1, 1, (100,)).astype( + self.dtype + ) self.out = np.minimum(self.x, self.y) self.axis = -1 class TestElementwiseMinOpFp16_Vector(TestElementwiseMinOp_Vector): - def init_dtype(self): self.dtype = np.float16 @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." +) class TestElementwiseMinOp_scalar(TestElementwiseMinOp): - def init_input_output(self): self.x = np.random.random_integers(-5, 5, [10, 3, 4]).astype(self.dtype) self.y = np.array([0.5]).astype(self.dtype) @@ -138,32 +142,30 @@ class TestElementwiseMinOp_scalar(TestElementwiseMinOp): @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." +) class TestElementwiseMinOpFp16_scalar(TestElementwiseMinOp_scalar): - def init_dtype(self): self.dtype = np.float16 class TestElementwiseMinOp_broadcast(TestElementwiseMinOp): - def init_input_output(self): self.x = np.random.uniform(0.5, 1, (2, 3, 100)).astype(self.dtype) - self.sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype) - self.y = self.x[0, 0, :] + self.sgn * \ - np.random.uniform(1, 2, (100, )).astype(self.dtype) + self.sgn = np.random.choice([-1, 1], (100,)).astype(self.dtype) + self.y = self.x[0, 0, :] + self.sgn * np.random.uniform( + 1, 2, (100,) + ).astype(self.dtype) self.out = np.minimum(self.x, self.y.reshape(1, 1, 100)) self.axis = -1 class TestElementwiseMinOpFp16_broadcast(TestElementwiseMinOp_broadcast): - def init_dtype(self): self.dtype = np.float16 class TestElementwiseMinOpNet(unittest.TestCase): - def _test(self, run_npu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -178,9 +180,9 @@ class TestElementwiseMinOpNet(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=[32, 32], dtype='float32') b = paddle.static.data(name="b", shape=[32, 32], dtype='float32') - label = paddle.static.data(name="label", - shape=[32, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[32, 1], dtype='int64' + ) c = paddle.minimum(a, b) @@ -203,16 +205,17 @@ class TestElementwiseMinOpNet(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "b": b_np, "label": label_np}, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res diff --git a/python/paddle/fluid/tests/unittests/npu/test_elementwise_mod_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_elementwise_mod_op_npu.py index fb5c773c825dc0c690da841df9f54aaccf05725a..efb81fbad6ec0cb86e136f11e81d834635433dab 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_elementwise_mod_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_elementwise_mod_op_npu.py @@ -29,7 +29,6 @@ paddle.enable_static() class TestElementwiseModOp(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -42,7 +41,7 @@ class TestElementwiseModOp(OpTest): self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} self.outputs = {'Out': self.out} @@ -69,13 +68,11 @@ class TestElementwiseModOp(OpTest): class TestElementwiseModOpInt64(TestElementwiseModOp): - def init_dtype(self): self.dtype = np.int64 class TestElementwiseModOp_scalar(TestElementwiseModOp): - def init_input_output(self): scale_x = random.randint(0, 100000000) scale_y = random.randint(1, 100000000) @@ -85,7 +82,6 @@ class TestElementwiseModOp_scalar(TestElementwiseModOp): class TestElementwiseModOpFloat(TestElementwiseModOp): - def init_dtype(self): self.dtype = np.float32 @@ -99,7 +95,6 @@ class TestElementwiseModOpFloat(TestElementwiseModOp): class TestElementwiseModOpDouble(TestElementwiseModOpFloat): - def init_dtype(self): self.dtype = np.float64 @@ -108,7 +103,6 @@ class TestElementwiseModOpDouble(TestElementwiseModOpFloat): class TestElementwiseModOpFP16(TestElementwiseModOpFloat): - def init_dtype(self): self.dtype = np.float16 @@ -117,7 +111,6 @@ class TestElementwiseModOpFP16(TestElementwiseModOpFloat): class TestElementwiseModOp_broadcast_0(TestElementwiseModOp): - def init_input_output(self): self.x = np.random.rand(100, 2, 3).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype) @@ -128,7 +121,6 @@ class TestElementwiseModOp_broadcast_0(TestElementwiseModOp): class TestElementwiseModOp_broadcast_1(TestElementwiseModOp): - def init_input_output(self): self.x = np.random.rand(2, 100, 3).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype) @@ -139,7 +131,6 @@ class TestElementwiseModOp_broadcast_1(TestElementwiseModOp): class TestElementwiseModOp_broadcast_2(TestElementwiseModOp): - def init_input_output(self): self.x = np.random.rand(2, 3, 100).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype) @@ -150,7 +141,6 @@ class TestElementwiseModOp_broadcast_2(TestElementwiseModOp): class TestRemainderOp(unittest.TestCase): - def test_name(self): paddle.set_device('npu:0') with fluid.program_guard(fluid.Program()): @@ -172,7 +162,7 @@ class TestRemainderOp(unittest.TestCase): self.assertEqual((np_z == z_expected).all(), True) np_x = np.array([-3.3, 11.5, -2, 3.5]) - np_y = np.array([-1.2, 2., 3.3, -2.3]) + np_y = np.array([-1.2, 2.0, 3.3, -2.3]) x = paddle.to_tensor(np_x) y = paddle.to_tensor(np_y) z = x % y diff --git a/python/paddle/fluid/tests/unittests/npu/test_elementwise_mul_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_elementwise_mul_op_npu.py index a3fe2dcf453a25de9a305f018072f907b43d4122..fa0b80d633898adbc1c96aad3a496a5aa3eacdf1 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_elementwise_mul_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_elementwise_mul_op_npu.py @@ -25,7 +25,6 @@ paddle.enable_static() class ElementwiseMulOp(OpTest): - def set_npu(self): self.__class__.use_npu = True self.place = paddle.NPUPlace(0) @@ -41,7 +40,7 @@ class ElementwiseMulOp(OpTest): self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.outputs = {'Out': self.out} self.attrs = {'axis': self.axis} @@ -53,14 +52,14 @@ class ElementwiseMulOp(OpTest): self.check_grad_with_place(self.place, ['X', 'Y'], 'Out') def test_check_grad_ingore_x(self): - self.check_grad_with_place(self.place, ['Y'], - 'Out', - no_grad_set=set("X")) + self.check_grad_with_place( + self.place, ['Y'], 'Out', no_grad_set=set("X") + ) def test_check_grad_ingore_y(self): - self.check_grad_with_place(self.place, ['X'], - 'Out', - no_grad_set=set('Y')) + self.check_grad_with_place( + self.place, ['X'], 'Out', no_grad_set=set('Y') + ) def init_input_output(self): self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) @@ -75,33 +74,31 @@ class ElementwiseMulOp(OpTest): @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." +) class TestElementwiseMulOp_scalar(ElementwiseMulOp): - def setUp(self): self.set_npu() self.op_type = "elementwise_mul" self.inputs = { 'X': np.random.rand(10, 3, 4).astype(np.float32), - 'Y': np.random.rand(1).astype(np.float32) + 'Y': np.random.rand(1).astype(np.float32), } self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} class TestElementwiseMulOp_Vector(ElementwiseMulOp): - def setUp(self): self.set_npu() self.op_type = "elementwise_mul" self.inputs = { - 'X': np.random.random((100, )).astype("float32"), - 'Y': np.random.random((100, )).astype("float32") + 'X': np.random.random((100,)).astype("float32"), + 'Y': np.random.random((100,)).astype("float32"), } self.outputs = {'Out': np.multiply(self.inputs['X'], self.inputs['Y'])} class TestElementwiseMulOp_broadcast_0(ElementwiseMulOp): - def init_input_output(self): self.x = np.random.rand(100, 2, 3).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype) @@ -112,13 +109,12 @@ class TestElementwiseMulOp_broadcast_0(ElementwiseMulOp): class TestElementwiseMulOp_broadcast_1(ElementwiseMulOp): - def setUp(self): self.set_npu() self.op_type = "elementwise_mul" self.inputs = { 'X': np.random.rand(2, 100, 3).astype(np.float32), - 'Y': np.random.rand(100).astype(np.float32) + 'Y': np.random.rand(100).astype(np.float32), } self.attrs = {'axis': 1} @@ -128,13 +124,12 @@ class TestElementwiseMulOp_broadcast_1(ElementwiseMulOp): class TestElementwiseMulOp_broadcast_2(ElementwiseMulOp): - def setUp(self): self.set_npu() self.op_type = "elementwise_mul" self.inputs = { 'X': np.random.rand(2, 3, 100).astype(np.float32), - 'Y': np.random.rand(100).astype(np.float32) + 'Y': np.random.rand(100).astype(np.float32), } self.outputs = { @@ -143,13 +138,12 @@ class TestElementwiseMulOp_broadcast_2(ElementwiseMulOp): class TestElementwiseMulOp_broadcast_3(ElementwiseMulOp): - def setUp(self): self.set_npu() self.op_type = "elementwise_mul" self.inputs = { 'X': np.random.rand(2, 10, 12, 3).astype(np.float32), - 'Y': np.random.rand(10, 12).astype(np.float32) + 'Y': np.random.rand(10, 12).astype(np.float32), } self.attrs = {'axis': 1} @@ -159,69 +153,64 @@ class TestElementwiseMulOp_broadcast_3(ElementwiseMulOp): class TestElementwiseMulOp_broadcast_4(ElementwiseMulOp): - def setUp(self): self.set_npu() self.op_type = "elementwise_mul" self.inputs = { 'X': np.random.rand(10, 2, 11).astype(np.float32), - 'Y': np.random.rand(10, 1, 11).astype(np.float32) + 'Y': np.random.rand(10, 1, 11).astype(np.float32), } self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} class TestElementwiseMulOp_broadcast_5(ElementwiseMulOp): - def setUp(self): self.set_npu() self.op_type = "elementwise_mul" self.inputs = { 'X': np.random.rand(10, 4, 2, 3).astype(np.float32), - 'Y': np.random.rand(10, 4, 1, 3).astype(np.float32) + 'Y': np.random.rand(10, 4, 1, 3).astype(np.float32), } self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "paddle is not compiled with NPU") +@unittest.skipIf( + not paddle.is_compiled_with_npu(), "paddle is not compiled with NPU" +) class TestElementwiseMulOpFp16(ElementwiseMulOp): - def init_dtype(self): self.dtype = np.float16 class TestElementwiseMulOp_commonuse_1(ElementwiseMulOp): - def setUp(self): self.set_npu() self.op_type = "elementwise_mul" self.inputs = { 'X': np.random.rand(2, 3, 100).astype(np.float32), - 'Y': np.random.rand(1, 1, 100).astype(np.float32) + 'Y': np.random.rand(1, 1, 100).astype(np.float32), } self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} class TestElementwiseMulOp_commonuse_2(ElementwiseMulOp): - def setUp(self): self.set_npu() self.op_type = "elementwise_mul" self.inputs = { 'X': np.random.rand(30, 3, 1, 5).astype(np.float32), - 'Y': np.random.rand(30, 1, 4, 1).astype(np.float32) + 'Y': np.random.rand(30, 1, 4, 1).astype(np.float32), } self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} class TestElementwiseMulOp_xsize_lessthan_ysize(ElementwiseMulOp): - def setUp(self): self.set_npu() self.op_type = "elementwise_mul" self.inputs = { 'X': np.random.rand(10, 10).astype(np.float32), - 'Y': np.random.rand(2, 2, 10, 10).astype(np.float32) + 'Y': np.random.rand(2, 2, 10, 10).astype(np.float32), } self.attrs = {'axis': 2} diff --git a/python/paddle/fluid/tests/unittests/npu/test_elementwise_pow_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_elementwise_pow_op_npu.py index 715289942384a9a73230b737d97f6b4775ad6997..b872c5bf83edf0e29cf8e2b16e5b1be7f3ff8f72 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_elementwise_pow_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_elementwise_pow_op_npu.py @@ -42,7 +42,8 @@ def ComputeGrad(x, y, out, axis): for ax in range(len(shape_out)): if (ax < src_axis or ax >= src_axis + len(shape_x)) or ( - shape_out[ax] > 1 and shape_x[ax - src_axis] == 1): + shape_out[ax] > 1 and shape_x[ax - src_axis] == 1 + ): reduce_axes_x.append(ax) if shape_y != shape_out: @@ -53,7 +54,8 @@ def ComputeGrad(x, y, out, axis): for ax in range(len(shape_out)): if (ax < src_axis or ax >= src_axis + len(shape_y)) or ( - shape_out[ax] > 1 and shape_y[ax - src_axis] == 1): + shape_out[ax] > 1 and shape_y[ax - src_axis] == 1 + ): reduce_axes_y.append(ax) if len(reduce_axes_x) > 0: @@ -79,7 +81,6 @@ def ComputeGrad(x, y, out, axis): class TestElementwisePow(OpTest): - def setUp(self): self.set_npu() self.op_type = "elementwise_pow" @@ -91,7 +92,7 @@ class TestElementwisePow(OpTest): self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.attrs = {'axis': self.axis} self.outputs = {'Out': self.out} @@ -116,27 +117,32 @@ class TestElementwisePow(OpTest): def test_check_grad_normal(self): dx, dy = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['X', 'Y'], - 'Out', - user_defined_grads=[dx, dy]) + self.check_grad_with_place( + self.place, ['X', 'Y'], 'Out', user_defined_grads=[dx, dy] + ) def test_check_grad_ingore_x(self): _, dy = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['Y'], - 'Out', - no_grad_set=set("X"), - user_defined_grads=[dy]) + self.check_grad_with_place( + self.place, + ['Y'], + 'Out', + no_grad_set=set("X"), + user_defined_grads=[dy], + ) def test_check_grad_ingore_y(self): dx, _ = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['X'], - 'Out', - no_grad_set=set("Y"), - user_defined_grads=[dx]) + self.check_grad_with_place( + self.place, + ['X'], + 'Out', + no_grad_set=set("Y"), + user_defined_grads=[dx], + ) class TestElementwisePowFp16(TestElementwisePow): - def init_input_output(self): np.random.seed(SEED) self.x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype) @@ -155,7 +161,6 @@ class TestElementwisePowFp16(TestElementwisePow): class TestElementwisePowDouble(TestElementwisePow): - def init_input_output(self): np.random.seed(SEED) self.x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype) @@ -174,7 +179,6 @@ class TestElementwisePowDouble(TestElementwisePow): class TestElementwisePowOp_broadcast_0(TestElementwisePow): - def init_axis(self): self.axis = 1 @@ -186,27 +190,32 @@ class TestElementwisePowOp_broadcast_0(TestElementwisePow): def test_check_grad_normal(self): dx, dy = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['X', 'Y'], - 'Out', - user_defined_grads=[dx, dy]) + self.check_grad_with_place( + self.place, ['X', 'Y'], 'Out', user_defined_grads=[dx, dy] + ) def test_check_grad_ingore_x(self): _, dy = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['Y'], - 'Out', - no_grad_set=set("X"), - user_defined_grads=[dy]) + self.check_grad_with_place( + self.place, + ['Y'], + 'Out', + no_grad_set=set("X"), + user_defined_grads=[dy], + ) def test_check_grad_ingore_y(self): dx, _ = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['X'], - 'Out', - no_grad_set=set("Y"), - user_defined_grads=[dx]) + self.check_grad_with_place( + self.place, + ['X'], + 'Out', + no_grad_set=set("Y"), + user_defined_grads=[dx], + ) class TestElementwisePowOp_broadcast_1(TestElementwisePow): - def init_axis(self): self.axis = 1 @@ -218,27 +227,32 @@ class TestElementwisePowOp_broadcast_1(TestElementwisePow): def test_check_grad_normal(self): dx, dy = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['X', 'Y'], - 'Out', - user_defined_grads=[dx, dy]) + self.check_grad_with_place( + self.place, ['X', 'Y'], 'Out', user_defined_grads=[dx, dy] + ) def test_check_grad_ingore_x(self): _, dy = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['Y'], - 'Out', - no_grad_set=set("X"), - user_defined_grads=[dy]) + self.check_grad_with_place( + self.place, + ['Y'], + 'Out', + no_grad_set=set("X"), + user_defined_grads=[dy], + ) def test_check_grad_ingore_y(self): dx, _ = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['X'], - 'Out', - no_grad_set=set("Y"), - user_defined_grads=[dx]) + self.check_grad_with_place( + self.place, + ['X'], + 'Out', + no_grad_set=set("Y"), + user_defined_grads=[dx], + ) class TestElementwisePowOp_broadcast_2(TestElementwisePow): - def init_axis(self): self.axis = 0 @@ -250,27 +264,32 @@ class TestElementwisePowOp_broadcast_2(TestElementwisePow): def test_check_grad_normal(self): dx, dy = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['X', 'Y'], - 'Out', - user_defined_grads=[dx, dy]) + self.check_grad_with_place( + self.place, ['X', 'Y'], 'Out', user_defined_grads=[dx, dy] + ) def test_check_grad_ingore_x(self): _, dy = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['Y'], - 'Out', - no_grad_set=set("X"), - user_defined_grads=[dy]) + self.check_grad_with_place( + self.place, + ['Y'], + 'Out', + no_grad_set=set("X"), + user_defined_grads=[dy], + ) def test_check_grad_ingore_y(self): dx, _ = ComputeGrad(self.x, self.y, self.out, self.axis) - self.check_grad_with_place(self.place, ['X'], - 'Out', - no_grad_set=set("Y"), - user_defined_grads=[dx]) + self.check_grad_with_place( + self.place, + ['X'], + 'Out', + no_grad_set=set("Y"), + user_defined_grads=[dx], + ) class TestElementwisePowNet(unittest.TestCase): - def _test(self, run_npu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -285,9 +304,9 @@ class TestElementwisePowNet(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=[32, 32], dtype='float32') b = paddle.static.data(name="b", shape=[32, 32], dtype='float32') - label = paddle.static.data(name="label", - shape=[32, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[32, 1], dtype='int64' + ) c = paddle.pow(a, b) @@ -310,16 +329,17 @@ class TestElementwisePowNet(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "b": b_np, "label": label_np}, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res diff --git a/python/paddle/fluid/tests/unittests/npu/test_elementwise_sub_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_elementwise_sub_op_npu.py index ac3658f0c34ae4772c1fdbaa6ed3e75d7b00c31f..8542ed6bdc39697e452c0e0f1285194ab710fe50 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_elementwise_sub_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_elementwise_sub_op_npu.py @@ -27,7 +27,6 @@ SEED = 2021 class TestElementwiseSubOp(OpTest): - def setUp(self): self.set_npu() self.op_type = "elementwise_sub" @@ -39,7 +38,7 @@ class TestElementwiseSubOp(OpTest): self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} self.outputs = {'Out': self.out} @@ -91,19 +90,16 @@ class TestElementwiseSubOp(OpTest): class TestElementwiseSubOpInt32(TestElementwiseSubOp): - def init_dtype(self): self.dtype = np.int32 class TestElementwiseSubOpInt64(TestElementwiseSubOp): - def init_dtype(self): self.dtype = np.int64 class TestSubtractAPI(unittest.TestCase): - def test_name(self): with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data(name="x", shape=[2, 3], dtype="float32") @@ -128,50 +124,51 @@ class TestSubtractAPI(unittest.TestCase): place = paddle.NPUPlace(0) exe = paddle.static.Executor(place) - x_value, y_value, z_value = exe.run(feed={ - "x": x_np, - "y": y_np - }, - fetch_list=[x, y, z]) + x_value, y_value, z_value = exe.run( + feed={"x": x_np, "y": y_np}, fetch_list=[x, y, z] + ) - z_expected = np.array([1., -2., 2.]) + z_expected = np.array([1.0, -2.0, 2.0]) self.assertEqual( (x_value == x_np).all(), True, - msg="x_value = {}, but expected {}".format(x_value, x_np)) + msg="x_value = {}, but expected {}".format(x_value, x_np), + ) self.assertEqual( (y_value == y_np).all(), True, - msg="y_value = {}, but expected {}".format(y_value, y_np)) + msg="y_value = {}, but expected {}".format(y_value, y_np), + ) self.assertEqual( (z_value == z_expected).all(), True, - msg="z_value = {}, but expected {}".format(z_value, z_expected)) + msg="z_value = {}, but expected {}".format(z_value, z_expected), + ) class TestSubtractError(unittest.TestCase): - def test_errors(self): with paddle.static.program_guard(paddle.static.Program()): # the input of elementwise_add must be Variable. - x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.NPUPlace(0)) - y1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.NPUPlace(0)) + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.NPUPlace(0) + ) + y1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.NPUPlace(0) + ) self.assertRaises(TypeError, paddle.subtract, x1, y1) # the input dtype must be float16 or float32 or float64 or int32 or int64 - x2 = paddle.static.data(name='x2', - shape=[3, 4, 5, 6], - dtype="uint8") - y2 = paddle.static.data(name='y2', - shape=[3, 4, 5, 6], - dtype="uint8") + x2 = paddle.static.data( + name='x2', shape=[3, 4, 5, 6], dtype="uint8" + ) + y2 = paddle.static.data( + name='y2', shape=[3, 4, 5, 6], dtype="uint8" + ) self.assertRaises(TypeError, paddle.subtract, x2, y2) class TestSubtractNet(unittest.TestCase): - def _test(self, run_npu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -186,9 +183,9 @@ class TestSubtractNet(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=[32, 32], dtype='float32') b = paddle.static.data(name="b", shape=[32, 32], dtype='float32') - label = paddle.static.data(name="label", - shape=[32, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[32, 1], dtype='int64' + ) sum = paddle.add(a, b) c = paddle.assign(b) @@ -212,16 +209,17 @@ class TestSubtractNet(unittest.TestCase): for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "b": b_np, "label": label_np}, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res diff --git a/python/paddle/fluid/tests/unittests/npu/test_exp_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_exp_op_npu.py index 44ba219d40fc79ff345576f6018de9a45eaba120..ca43877d45315262dfd48588086518ad4ae0a604 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_exp_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_exp_op_npu.py @@ -30,7 +30,6 @@ SEED = 2049 class TestExpNPUOP(OpTest): - def setUp(self): self.set_npu() @@ -63,7 +62,6 @@ class TestExpNPUOP(OpTest): class TestExpNPUOPFloat64(TestExpNPUOP): - def init_dtype(self): self.dtype = np.float64 diff --git a/python/paddle/fluid/tests/unittests/npu/test_expand_as_v2_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_expand_as_v2_op_npu.py index 76ab1d2959a246426f4b1cdfb5c5e3286fbff750..ca2b0195b4280e2c739f51339e3d87115e464673 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_expand_as_v2_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_expand_as_v2_op_npu.py @@ -26,7 +26,6 @@ np.random.seed(10) class TestExpandAsOpRank1(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -50,7 +49,6 @@ class TestExpandAsOpRank1(OpTest): class TestExpandAsOpRank2(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -74,7 +72,6 @@ class TestExpandAsOpRank2(OpTest): class TestExpandAsOpRank3(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -98,7 +95,6 @@ class TestExpandAsOpRank3(OpTest): class TestExpandAsOpRank4(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -123,29 +119,28 @@ class TestExpandAsOpRank4(OpTest): # Test python API class TestExpandAsV2API(unittest.TestCase): - def test_api(self): input1 = np.random.random([12, 14]).astype("float32") input2 = np.random.random([2, 12, 14]).astype("float32") - x = fluid.layers.data(name='x', - shape=[12, 14], - append_batch_size=False, - dtype="float32") + x = fluid.layers.data( + name='x', shape=[12, 14], append_batch_size=False, dtype="float32" + ) - y = fluid.layers.data(name='target_tensor', - shape=[2, 12, 14], - append_batch_size=False, - dtype="float32") + y = fluid.layers.data( + name='target_tensor', + shape=[2, 12, 14], + append_batch_size=False, + dtype="float32", + ) out_1 = paddle.expand_as(x, y=y) exe = fluid.Executor(place=fluid.NPUPlace(0)) - res_1 = exe.run(fluid.default_main_program(), - feed={ - "x": input1, - "target_tensor": input2 - }, - fetch_list=[out_1]) + res_1 = exe.run( + fluid.default_main_program(), + feed={"x": input1, "target_tensor": input2}, + fetch_list=[out_1], + ) assert np.array_equal(res_1[0], np.tile(input1, (2, 1, 1))) diff --git a/python/paddle/fluid/tests/unittests/npu/test_expand_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_expand_op_npu.py index 3096369b1966a9287ecf58600707455e586597c2..0ac8cfc5d3c13de6a720070dab34e869251e4d43 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_expand_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_expand_op_npu.py @@ -26,7 +26,6 @@ SEED = 2021 class TestExpand(OpTest): - def setUp(self): self.set_npu() self.op_type = "expand" @@ -55,7 +54,6 @@ class TestExpand(OpTest): class TestExpandV2(TestExpand): - def setUp(self): self.set_npu() self.op_type = "expand" @@ -69,7 +67,7 @@ class TestExpandV2(TestExpand): self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(x), - 'ExpandTimes': OpTest.np_dtype_to_fluid_dtype(expand_times) + 'ExpandTimes': OpTest.np_dtype_to_fluid_dtype(expand_times), } self.attrs = {} self.outputs = {'Out': out} @@ -83,7 +81,6 @@ class TestExpandFp16(TestExpand): class TestExpandNet(unittest.TestCase): - def _test(self, run_npu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -96,9 +93,9 @@ class TestExpandNet(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=[32, 1], dtype='float32') - label = paddle.static.data(name="label", - shape=[32, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[32, 1], dtype='int64' + ) res = paddle.fluid.layers.expand(a, [1, 32]) loss = res.sum() @@ -115,12 +112,11 @@ class TestExpandNet(unittest.TestCase): for epoch in range(100): - loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "label": label_np - }, - fetch_list=[loss]) + loss_res = exe.run( + main_prog, + feed={"a": a_np, "label": label_np}, + fetch_list=[loss], + ) if epoch % 10 == 0: print("Epoch {} | Loss: {}".format(epoch, loss)) @@ -139,7 +135,6 @@ class TestExpandNet(unittest.TestCase): class TestExpand_expand_times_all_one(TestExpand): - def setUp(self): self.set_npu() self.op_type = "expand" diff --git a/python/paddle/fluid/tests/unittests/npu/test_expand_v2_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_expand_v2_op_npu.py index d922cef46c83c0345d9474a187ef9d2b303197cd..0ea52f04d9d7693465de77209f20049da59a98a6 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_expand_v2_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_expand_v2_op_npu.py @@ -29,7 +29,6 @@ np.random.seed(10) # CANN Op Support X: float16, float32, int32, int8 ,uint8 # Situation 1: shape is a list(without tensor) class TestExpandV2NPUOpRank1(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -58,7 +57,6 @@ class TestExpandV2NPUOpRank1(OpTest): class TestExpandV2OpRank2_DimExpanding(TestExpandV2NPUOpRank1): - def init_data(self): self.ori_shape = [120] self.shape = [2, 120] @@ -66,7 +64,6 @@ class TestExpandV2OpRank2_DimExpanding(TestExpandV2NPUOpRank1): class TestExpandV2OpRank2(TestExpandV2NPUOpRank1): - def init_data(self): self.ori_shape = [1, 140] self.shape = [12, 140] @@ -74,7 +71,6 @@ class TestExpandV2OpRank2(TestExpandV2NPUOpRank1): class TestExpandV2OpRank3_Corner(TestExpandV2NPUOpRank1): - def init_data(self): self.ori_shape = (2, 10, 5) self.shape = (2, 10, 5) @@ -82,7 +78,6 @@ class TestExpandV2OpRank3_Corner(TestExpandV2NPUOpRank1): class TestExpandV2OpRank4(TestExpandV2NPUOpRank1): - def init_data(self): self.ori_shape = (2, 4, 5, 7) self.shape = (-1, -1, -1, -1) @@ -90,7 +85,6 @@ class TestExpandV2OpRank4(TestExpandV2NPUOpRank1): class TestExpandV2OpRank5(TestExpandV2NPUOpRank1): - def init_data(self): self.ori_shape = (2, 4, 1, 15) self.shape = (2, -1, 4, -1) @@ -98,7 +92,6 @@ class TestExpandV2OpRank5(TestExpandV2NPUOpRank1): class TestExpandV2OpRank6(TestExpandV2NPUOpRank1): - def init_data(self): self.ori_shape = (4, 1, 30) self.shape = (2, -1, 4, 30) @@ -107,7 +100,6 @@ class TestExpandV2OpRank6(TestExpandV2NPUOpRank1): # Situation 2: shape is a list(with tensor) class TestExpandV2OpNPURank1_tensor_attr(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -116,8 +108,9 @@ class TestExpandV2OpNPURank1_tensor_attr(OpTest): self.dtype = np.float32 expand_shapes_tensor = [] for index, ele in enumerate(self.expand_shape): - expand_shapes_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + expand_shapes_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = { 'X': np.random.random(self.ori_shape).astype(self.dtype), @@ -143,9 +136,9 @@ class TestExpandV2OpNPURank1_tensor_attr(OpTest): self.check_grad_with_place(self.place, ['X'], 'Out') -class TestExpandV2OpRank2_Corner_tensor_attr(TestExpandV2OpNPURank1_tensor_attr - ): - +class TestExpandV2OpRank2_Corner_tensor_attr( + TestExpandV2OpNPURank1_tensor_attr +): def init_data(self): self.ori_shape = [12, 14] self.expand_times = [1, 1] @@ -155,7 +148,6 @@ class TestExpandV2OpRank2_Corner_tensor_attr(TestExpandV2OpNPURank1_tensor_attr # Situation 3: shape is a tensor class TestExpandV2NPUOpRank1_tensor(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -189,7 +181,6 @@ class TestExpandV2NPUOpRank1_tensor(OpTest): # Situation 4: input x is float16 # skip grad check for float16 class TestExpandV2OpFloat(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -212,7 +203,6 @@ class TestExpandV2OpFloat(OpTest): # Situation 5: input x is int32 # skip grad check for int32 class TestExpandV2OpInteger(OpTest): - def init_dtype(self): self.dtype = 'int32' @@ -236,13 +226,11 @@ class TestExpandV2OpInteger(OpTest): class TesstExpandV2OpInt64(TestExpandV2OpInteger): - def init_dtype(self): self.dtype = 'int64' class TesstExpandV2OpBool(TestExpandV2OpInteger): - def init_dtype(self): self.dtype = 'bool' @@ -257,11 +245,11 @@ class TesstExpandV2OpBool(TestExpandV2OpInteger): class TestExpandV2Error(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): - x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - paddle.NPUPlace(0)) + x1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], paddle.NPUPlace(0) + ) shape = [2, 2] self.assertRaises(TypeError, paddle.tensor.expand, x1, shape) x2 = fluid.layers.data(name='x2', shape=[2], dtype="uint8") @@ -273,20 +261,23 @@ class TestExpandV2Error(unittest.TestCase): # Test python API class TestExpandV2API(unittest.TestCase): - def test_static(self): with fluid.program_guard(fluid.Program(), fluid.Program()): input = np.random.random([12, 14]).astype("float32") - x = fluid.layers.data(name='x', - shape=[12, 14], - append_batch_size=False, - dtype="float32") + x = fluid.layers.data( + name='x', + shape=[12, 14], + append_batch_size=False, + dtype="float32", + ) positive_2 = fluid.layers.fill_constant([1], "int32", 12) - expand_shape = fluid.layers.data(name="expand_shape", - shape=[2], - append_batch_size=False, - dtype="int32") + expand_shape = fluid.layers.data( + name="expand_shape", + shape=[2], + append_batch_size=False, + dtype="int32", + ) out_1 = paddle.expand(x, shape=[12, 14]) out_2 = paddle.expand(x, shape=[positive_2, 14]) @@ -295,14 +286,14 @@ class TestExpandV2API(unittest.TestCase): g0 = fluid.backward.calc_gradient(out_2, x) exe = fluid.Executor(place=paddle.NPUPlace(0)) - res_1, res_2, res_3 = exe.run(fluid.default_main_program(), - feed={ - "x": - input, - "expand_shape": - np.array([12, 14]).astype("int32") - }, - fetch_list=[out_1, out_2, out_3]) + res_1, res_2, res_3 = exe.run( + fluid.default_main_program(), + feed={ + "x": input, + "expand_shape": np.array([12, 14]).astype("int32"), + }, + fetch_list=[out_1, out_2, out_3], + ) assert np.array_equal(res_1, np.tile(input, (1, 1))) assert np.array_equal(res_2, np.tile(input, (1, 1))) diff --git a/python/paddle/fluid/tests/unittests/npu/test_eye_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_eye_op_npu.py index fe0ba17d20101b39797a61ca88ca98ea2d507242..e80be96f2cdcbfab5b31f95a2e1bd9f4daf107c0 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_eye_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_eye_op_npu.py @@ -28,7 +28,6 @@ np.random.seed(10) class TestEyeOp(OpTest): - def setUp(self): ''' Test eye op with specified shape @@ -47,14 +46,14 @@ class TestEyeOp(OpTest): if self.num_columns == 0: self.attrs = { 'num_rows': self.num_rows, - 'dtype': framework.convert_np_dtype_to_dtype_(self.dtype) + 'dtype': framework.convert_np_dtype_to_dtype_(self.dtype), } self.outputs = {'Out': np.eye(self.num_rows, dtype=self.dtype)} else: self.attrs = { 'num_rows': self.num_rows, 'num_columns': self.num_columns, - 'dtype': framework.convert_np_dtype_to_dtype_(self.dtype) + 'dtype': framework.convert_np_dtype_to_dtype_(self.dtype), } self.outputs = { 'Out': np.eye(self.num_rows, self.num_columns, dtype=self.dtype) @@ -73,41 +72,35 @@ class TestEyeOp(OpTest): class TestEyeOp1(TestEyeOp): - def initTestCase(self): self.num_rows = 50 class TestEyeOp2(TestEyeOp): - def initTestCase(self): self.num_rows = 50 self.dtype = np.int32 class TestEyeOp3(TestEyeOp): - def initTestCase(self): self.num_rows = 50 self.dtype = np.float16 class TestEyeOp4(TestEyeOp): - def initTestCase(self): self.num_rows = 1 self.num_columns = 99 class TestEyeOp5(TestEyeOp): - def initTestCase(self): self.num_rows = 100 self.num_columns = 100 class TestEyeOp6(TestEyeOp): - def initTestCase(self): self.num_rows = 100 self.num_columns = 100 @@ -115,13 +108,12 @@ class TestEyeOp6(TestEyeOp): class API_TestTensorEye(unittest.TestCase): - def test_out(self): with paddle.static.program_guard(paddle.static.Program()): data = paddle.eye(10) place = paddle.NPUPlace(0) exe = paddle.static.Executor(place) - result, = exe.run(fetch_list=[data]) + (result,) = exe.run(fetch_list=[data]) expected_result = np.eye(10, dtype="float32") self.assertEqual((result == expected_result).all(), True) @@ -129,7 +121,7 @@ class API_TestTensorEye(unittest.TestCase): data = paddle.eye(10, num_columns=7, dtype="float16") place = paddle.NPUPlace(0) exe = paddle.static.Executor(place) - result, = exe.run(fetch_list=[data]) + (result,) = exe.run(fetch_list=[data]) expected_result = np.eye(10, 7, dtype="float16") self.assertEqual((result == expected_result).all(), True) @@ -137,7 +129,7 @@ class API_TestTensorEye(unittest.TestCase): data = paddle.eye(10, dtype="int32") place = paddle.NPUPlace(0) exe = paddle.static.Executor(place) - result, = exe.run(fetch_list=[data]) + (result,) = exe.run(fetch_list=[data]) expected_result = np.eye(10, dtype="int32") self.assertEqual((result == expected_result).all(), True) @@ -159,8 +151,9 @@ class API_TestTensorEye(unittest.TestCase): result = tmp_result expected_result = np.stack(result, axis=0) paddle.enable_static() - self.assertEqual(out.numpy().shape == np.array(expected_result).shape, - True) + self.assertEqual( + out.numpy().shape == np.array(expected_result).shape, True + ) self.assertEqual((out.numpy() == expected_result).all(), True) paddle.disable_static(paddle.NPUPlace(0)) @@ -175,8 +168,9 @@ class API_TestTensorEye(unittest.TestCase): result = tmp_result expected_result = np.stack(result, axis=0) paddle.enable_static() - self.assertEqual(out.numpy().shape == np.array(expected_result).shape, - True) + self.assertEqual( + out.numpy().shape == np.array(expected_result).shape, True + ) self.assertEqual((out.numpy() == expected_result).all(), True) def test_errors(self): diff --git a/python/paddle/fluid/tests/unittests/npu/test_fill_any_like_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_fill_any_like_op_npu.py index 23f07d299e2890fcb14617f03189a7c02af5c113..09e38fe0665d947f7c0399f26997de4566fe79bb 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_fill_any_like_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_fill_any_like_op_npu.py @@ -26,7 +26,6 @@ paddle.enable_static() class TestFillAnyLikeNPUOp(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -52,47 +51,40 @@ class TestFillAnyLikeNPUOp(OpTest): class TestFillAnyLikeNPUOpInt32(TestFillAnyLikeNPUOp): - def init(self): self.dtype = np.int32 self.value = -1 class TestFillAnyLikeNPUOpInt64(TestFillAnyLikeNPUOp): - def init(self): self.dtype = np.int64 self.value = -1 class TestFillAnyLikeNPUOpFloat32(TestFillAnyLikeNPUOp): - def init(self): self.dtype = np.float32 self.value = 0.09 class TestFillAnyLikeNPUOpFloat16(TestFillAnyLikeNPUOp): - def init(self): self.dtype = np.float16 self.value = 0.05 class TestFillAnyLikeNPUOpValue1(TestFillAnyLikeNPUOp): - def init(self): self.value = 1.0 class TestFillAnyLikeNPUOpValue2(TestFillAnyLikeNPUOp): - def init(self): self.value = 1e-9 class TestFillAnyLikeNPUOpShape(TestFillAnyLikeNPUOp): - def init(self): self.shape = [12, 10] diff --git a/python/paddle/fluid/tests/unittests/npu/test_fill_constant_batch_size_like_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_fill_constant_batch_size_like_op_npu.py index f157845d24b1004347f82e713eb1145789c52d7e..0ceaa0faf343e5004b049fabf962d4f707d2f6bd 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_fill_constant_batch_size_like_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_fill_constant_batch_size_like_op_npu.py @@ -27,7 +27,6 @@ SEED = 2021 class TestFillConstantBatchSizeLike(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -48,11 +47,12 @@ class TestFillConstantBatchSizeLike(OpTest): 'dtype': self.dtype, 'force_cpu': self.force_cpu, 'input_dim_idx': self.input_dim_idx, - 'output_dim_idx': self.output_dim_idx + 'output_dim_idx': self.output_dim_idx, } self.outputs = { - 'Out': np.full(self.output_shape, self.output_value, - self.output_dtype) + 'Out': np.full( + self.output_shape, self.output_value, self.output_dtype + ) } def set_npu(self): @@ -84,7 +84,6 @@ class TestFillConstantBatchSizeLike(OpTest): class TestFillConstantBatchSizeLike2(TestFillConstantBatchSizeLike): - def init_shape(self): # test shape self.input_shape = [4, 5, 6, 7] @@ -93,7 +92,6 @@ class TestFillConstantBatchSizeLike2(TestFillConstantBatchSizeLike): class TestFillConstantBatchSizeLike3(TestFillConstantBatchSizeLike): - def init_value(self): # use 'str_value' rather than 'value' self.value = 3.8 @@ -102,7 +100,6 @@ class TestFillConstantBatchSizeLike3(TestFillConstantBatchSizeLike): class TestFillConstantBatchSizeLike4(TestFillConstantBatchSizeLike): - def init_value(self): # str_value = 'inf' self.value = 3.8 @@ -111,7 +108,6 @@ class TestFillConstantBatchSizeLike4(TestFillConstantBatchSizeLike): class TestFillConstantBatchSizeLike5(TestFillConstantBatchSizeLike): - def init_value(self): # str_value = '-inf' self.value = 3.8 @@ -120,7 +116,6 @@ class TestFillConstantBatchSizeLike5(TestFillConstantBatchSizeLike): class TestFillConstantBatchSizeLike6(TestFillConstantBatchSizeLike): - def init_dtype(self): self.dtype = core.VarDesc.VarType.FP16 self.output_dtype = np.float16 @@ -130,20 +125,17 @@ class TestFillConstantBatchSizeLike6(TestFillConstantBatchSizeLike): class TestFillConstantBatchSizeLike7(TestFillConstantBatchSizeLike): - def init_dtype(self): self.dtype = core.VarDesc.VarType.INT32 self.output_dtype = np.int32 class TestFillConstantBatchSizeLike8(TestFillConstantBatchSizeLike): - def init_force_cpu(self): self.force_cpu = True class TestFillConstantBatchSizeLike9(TestFillConstantBatchSizeLike): - def init_shape(self): self.input_shape = [4, 5] self.shape = [123, 92] @@ -177,11 +169,12 @@ class TestFillConstantBatchSizeLikeLodTensor(TestFillConstantBatchSizeLike): 'dtype': self.dtype, 'force_cpu': self.force_cpu, 'input_dim_idx': self.input_dim_idx, - 'output_dim_idx': self.output_dim_idx + 'output_dim_idx': self.output_dim_idx, } self.outputs = { - 'Out': np.full(self.output_shape, self.output_value, - self.output_dtype) + 'Out': np.full( + self.output_shape, self.output_value, self.output_dtype + ) } def init_shape(self): @@ -191,7 +184,8 @@ class TestFillConstantBatchSizeLikeLodTensor(TestFillConstantBatchSizeLike): class TestFillConstantBatchSizeLikeLodTensor2( - TestFillConstantBatchSizeLikeLodTensor): + TestFillConstantBatchSizeLikeLodTensor +): # test LodTensor with 'input_dim_idx' != 0 def init_shape(self): self.input_shape = [10, 20] diff --git a/python/paddle/fluid/tests/unittests/npu/test_fill_constant_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_fill_constant_op_npu.py index 3d0f770b4eb64c13c26cfcedb318e63ed0ae7ebc..d3087514c637483bafeb80f779affd2d1f2381ad 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_fill_constant_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_fill_constant_op_npu.py @@ -27,7 +27,6 @@ SEED = 2021 class TestFillConstant(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -49,7 +48,6 @@ class TestFillConstant(OpTest): class TestFillConstantInt(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -59,7 +57,7 @@ class TestFillConstantInt(OpTest): self.attrs = { 'shape': [123, 92], 'value': 1, - 'dtype': core.VarDesc.VarType.INT32 + 'dtype': core.VarDesc.VarType.INT32, } self.outputs = {'Out': np.full((123, 92), 1).astype(self.dtype)} @@ -74,7 +72,6 @@ class TestFillConstantInt(OpTest): class TestFillConstantInt64(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -84,7 +81,7 @@ class TestFillConstantInt64(OpTest): self.attrs = { 'shape': [123, 92], 'value': 1, - 'dtype': core.VarDesc.VarType.INT64 + 'dtype': core.VarDesc.VarType.INT64, } self.outputs = {'Out': np.full((123, 92), 1).astype(self.dtype)} @@ -99,7 +96,6 @@ class TestFillConstantInt64(OpTest): class TestFillConstantFP16(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -109,7 +105,7 @@ class TestFillConstantFP16(OpTest): self.attrs = { 'shape': [123, 92], 'value': 1.0, - 'dtype': core.VarDesc.VarType.FP16 + 'dtype': core.VarDesc.VarType.FP16, } self.outputs = {'Out': np.full((123, 92), 1.0).astype(self.dtype)} @@ -124,7 +120,6 @@ class TestFillConstantFP16(OpTest): class TestFillConstantBool(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -134,7 +129,7 @@ class TestFillConstantBool(OpTest): self.attrs = { 'shape': [123, 92], 'value': True, - 'dtype': core.VarDesc.VarType.BOOL + 'dtype': core.VarDesc.VarType.BOOL, } self.outputs = {'Out': np.full((123, 92), True).astype(self.dtype)} @@ -149,7 +144,6 @@ class TestFillConstantBool(OpTest): class TestFillConstantWithPlaceType(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) diff --git a/python/paddle/fluid/tests/unittests/npu/test_fill_zeros_like_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_fill_zeros_like_op_npu.py index 3095c18179256bab18d8cf66f9de8ba221d4528e..59b0c43c157686cbbcaf2931bd358fd9e05d1769 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_fill_zeros_like_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_fill_zeros_like_op_npu.py @@ -24,7 +24,6 @@ paddle.enable_static() class TestFillZerosLikeOp(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -44,31 +43,26 @@ class TestFillZerosLikeOp(OpTest): class TestFillZerosLikeOpBool(TestFillZerosLikeOp): - def init_dtype(self): self.dtype = np.bool_ class TestFillZerosLikeOpFp16(TestFillZerosLikeOp): - def init_dtype(self): self.dtype = np.float16 class TestFillZerosLikeOpFp64(TestFillZerosLikeOp): - def init_dtype(self): self.dtype = np.float64 class TestFillZerosLikeOpInt32(TestFillZerosLikeOp): - def init_dtype(self): self.dtype = np.int32 class TestFillZerosLikeOpInt64(TestFillZerosLikeOp): - def init_dtype(self): self.dtype = np.int64 diff --git a/python/paddle/fluid/tests/unittests/npu/test_flags_check_nan_inf_npu.py b/python/paddle/fluid/tests/unittests/npu/test_flags_check_nan_inf_npu.py index 69c586fb2d8841a80c7266dfd543bc53dd19b0ad..88949de237cd2b32011d9a3dccc4bf0cdd5c45b8 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_flags_check_nan_inf_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_flags_check_nan_inf_npu.py @@ -27,7 +27,6 @@ paddle.enable_static() class TestCheckFiniteAndUnscale(unittest.TestCase): - def setUp(self): fluid.set_flags({'FLAGS_check_nan_inf': True}) diff --git a/python/paddle/fluid/tests/unittests/npu/test_flatten2_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_flatten2_op_npu.py index f384620f2a20dcb75e163eef54ed95a99ed6bd6c..caf370ed098267af138832fb51c5b933fca1bafe 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_flatten2_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_flatten2_op_npu.py @@ -25,7 +25,6 @@ paddle.enable_static() class TestFlatten2Op(OpTest): - def setUp(self): self.set_npu() self.op_type = "flatten2" @@ -35,7 +34,7 @@ class TestFlatten2Op(OpTest): self.init_attrs() self.outputs = { "Out": self.inputs["X"].reshape(self.new_shape), - "XShape": np.random.random(self.in_shape).astype("float32") + "XShape": np.random.random(self.in_shape).astype("float32"), } def set_npu(self): @@ -57,7 +56,6 @@ class TestFlatten2Op(OpTest): class TestFlatten2OpWithCornerAxis(TestFlatten2Op): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.axis = 0 @@ -65,7 +63,6 @@ class TestFlatten2OpWithCornerAxis(TestFlatten2Op): class TestFlatten2OpWithDefaultAxis(TestFlatten2Op): - def init_test_case(self): self.in_shape = (10, 2, 2, 3) self.new_shape = (10, 12) @@ -75,7 +72,6 @@ class TestFlatten2OpWithDefaultAxis(TestFlatten2Op): class TestFlatten2OpSixDims(TestFlatten2Op): - def init_test_case(self): self.in_shape = (3, 2, 3, 2, 4, 4) self.axis = 4 diff --git a/python/paddle/fluid/tests/unittests/npu/test_flatten_contiguous_range_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_flatten_contiguous_range_op_npu.py index 7c1b6f1776a63513ba5f02db39d81a880b85abb0..1ee72cbac8c090076623b764e40108424f149e2a 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_flatten_contiguous_range_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_flatten_contiguous_range_op_npu.py @@ -25,7 +25,6 @@ paddle.enable_static() class TestFlattenOp(OpTest): - def setUp(self): self.set_npu() self.op_type = "flatten_contiguous_range" @@ -39,7 +38,7 @@ class TestFlattenOp(OpTest): self.init_attrs() self.outputs = { "Out": self.inputs["X"].reshape(self.new_shape), - "XShape": np.random.random(self.in_shape).astype("float32") + "XShape": np.random.random(self.in_shape).astype("float32"), } def set_npu(self): @@ -55,17 +54,16 @@ class TestFlattenOp(OpTest): self.in_shape = (3, 2, 5, 4) self.start_axis = 0 self.stop_axis = -1 - self.new_shape = (120) + self.new_shape = 120 def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } class TestFlattenOp_1(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = 1 @@ -75,12 +73,11 @@ class TestFlattenOp_1(TestFlattenOp): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } class TestFlattenOp_2(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = 0 @@ -90,12 +87,11 @@ class TestFlattenOp_2(TestFlattenOp): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } class TestFlattenOp_3(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = 0 @@ -105,12 +101,11 @@ class TestFlattenOp_3(TestFlattenOp): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } class TestFlattenOp_4(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = -2 @@ -120,12 +115,11 @@ class TestFlattenOp_4(TestFlattenOp): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } class TestFlattenOp_5(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = 2 @@ -135,12 +129,11 @@ class TestFlattenOp_5(TestFlattenOp): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } class TestFlattenOpSixDims(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 3, 2, 4, 4) self.start_axis = 3 @@ -150,12 +143,11 @@ class TestFlattenOpSixDims(TestFlattenOp): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } class TestFlattenOp_Float32(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = 0 @@ -166,12 +158,11 @@ class TestFlattenOp_Float32(TestFlattenOp): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } class TestFlattenOp_int32(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = 0 @@ -182,7 +173,7 @@ class TestFlattenOp_int32(TestFlattenOp): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } def test_check_grad(self): @@ -190,7 +181,6 @@ class TestFlattenOp_int32(TestFlattenOp): class TestFlattenOp_uint8(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = 0 @@ -201,7 +191,7 @@ class TestFlattenOp_uint8(TestFlattenOp): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } def test_check_grad(self): @@ -209,7 +199,6 @@ class TestFlattenOp_uint8(TestFlattenOp): class TestFlattenOp_int8(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = 0 @@ -220,7 +209,7 @@ class TestFlattenOp_int8(TestFlattenOp): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } def test_check_grad(self): @@ -228,7 +217,6 @@ class TestFlattenOp_int8(TestFlattenOp): class TestFlattenOp_int64(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = 0 @@ -239,7 +227,7 @@ class TestFlattenOp_int64(TestFlattenOp): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } def test_check_grad(self): @@ -247,45 +235,58 @@ class TestFlattenOp_int64(TestFlattenOp): class TestFlatten2OpError(unittest.TestCase): - def test_errors(self): image_shape = (2, 3, 4, 4) - x = np.arange(image_shape[0] * image_shape[1] * image_shape[2] * - image_shape[3]).reshape(image_shape) / 100. + x = ( + np.arange( + image_shape[0] + * image_shape[1] + * image_shape[2] + * image_shape[3] + ).reshape(image_shape) + / 100.0 + ) x = x.astype('float32') def test_ValueError1(): - x_var = paddle.static.data(name="x", - shape=image_shape, - dtype='float32') + x_var = paddle.static.data( + name="x", shape=image_shape, dtype='float32' + ) out = paddle.flatten(x_var, start_axis=2, stop_axis=1) self.assertRaises(ValueError, test_ValueError1) def test_ValueError2(): - x_var = paddle.static.data(name="x", - shape=image_shape, - dtype='float32') + x_var = paddle.static.data( + name="x", shape=image_shape, dtype='float32' + ) paddle.flatten(x_var, start_axis=10, stop_axis=1) self.assertRaises(ValueError, test_ValueError2) def test_ValueError3(): - x_var = paddle.static.data(name="x", - shape=image_shape, - dtype='float32') + x_var = paddle.static.data( + name="x", shape=image_shape, dtype='float32' + ) paddle.flatten(x_var, start_axis=2, stop_axis=10) self.assertRaises(ValueError, test_ValueError3) def test_type(): # dtype must be float32, float64, int8, int32, int64, uint8. - x2 = np.arange(image_shape[0] * image_shape[1] * image_shape[2] * - image_shape[3]).reshape(image_shape) / 100. + x2 = ( + np.arange( + image_shape[0] + * image_shape[1] + * image_shape[2] + * image_shape[3] + ).reshape(image_shape) + / 100.0 + ) x2 = x2.astype('float16') - x2_var = paddle.fluid.data(name='x2', - shape=[3, 2, 4, 5], - dtype='float16') + x2_var = paddle.fluid.data( + name='x2', shape=[3, 2, 4, 5], dtype='float16' + ) paddle.flatten(x2_var) self.assertRaises(TypeError, test_type) @@ -297,7 +298,6 @@ class TestFlatten2OpError(unittest.TestCase): class TestStaticFlattenPythonAPI(unittest.TestCase): - def execute_api(self, x, start_axis=0, stop_axis=-1): return paddle.flatten(x, start_axis, stop_axis) @@ -307,9 +307,9 @@ class TestStaticFlattenPythonAPI(unittest.TestCase): main_prog = paddle.static.Program() with paddle.static.program_guard(main_prog, paddle.static.Program()): - x = paddle.static.data(name="x", - shape=[2, 3, 4, 4], - dtype='float32') + x = paddle.static.data( + name="x", shape=[2, 3, 4, 4], dtype='float32' + ) out = self.execute_api(x, start_axis=-2, stop_axis=-1) exe = paddle.static.Executor(place=paddle.NPUPlace(0)) @@ -318,17 +318,22 @@ class TestStaticFlattenPythonAPI(unittest.TestCase): class TestStaticInplaceFlattenPythonAPI(TestStaticFlattenPythonAPI): - def execute_api(self, x, start_axis=0, stop_axis=-1): return x.flatten_(start_axis, stop_axis) class TestFlattenPython(unittest.TestCase): - def test_python_api(self): image_shape = (2, 3, 4, 4) - x = np.arange(image_shape[0] * image_shape[1] * image_shape[2] * - image_shape[3]).reshape(image_shape) / 100. + x = ( + np.arange( + image_shape[0] + * image_shape[1] + * image_shape[2] + * image_shape[3] + ).reshape(image_shape) + / 100.0 + ) x = x.astype('float32') def test_InputError(): diff --git a/python/paddle/fluid/tests/unittests/npu/test_float_status_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_float_status_op_npu.py index 3db98ea8f3849e09af552917d9d7e39f1a36a638..fdc9b91dd4dfa895d705e69964209289ce089658 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_float_status_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_float_status_op_npu.py @@ -23,7 +23,6 @@ import paddle._legacy_C_ops as ops class TestGetFloatStatusOp(unittest.TestCase): - def setUp(self): device = paddle.set_device('npu') @@ -64,7 +63,6 @@ class TestGetFloatStatusOp(unittest.TestCase): class TestClearFloatStatusOp(unittest.TestCase): - def setUp(self): device = paddle.set_device('npu') diff --git a/python/paddle/fluid/tests/unittests/npu/test_gather_nd_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_gather_nd_op_npu.py index fa6da7817de02857345bb41ff220cd83e33632ec..4d66c28d6fa4028fe986140e11c8165812d974d7 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_gather_nd_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_gather_nd_op_npu.py @@ -23,11 +23,11 @@ import paddle def gather_nd_grad(x, index): - dout_shape = index.shape[:-1] + x.shape[index.shape[-1]:] + dout_shape = index.shape[:-1] + x.shape[index.shape[-1] :] numel = 1 for i in dout_shape: numel = numel * i - dout = np.full(dout_shape, 1. / numel) + dout = np.full(dout_shape, 1.0 / numel) dx = np.full_like(x, 0) index = tuple(index.reshape(-1, index.shape[-1]).T) @@ -37,9 +37,8 @@ def gather_nd_grad(x, index): def test_class1(op_type, typename): - class TestGatherNdOpWithEmptyIndex(OpTest): - #Index has empty element, which means copy entire tensor + # Index has empty element, which means copy entire tensor def setUp(self): self.set_npu() @@ -48,7 +47,7 @@ def test_class1(op_type, typename): xnp = np.random.random((5, 20)).astype(typename) self.inputs = { 'X': xnp, - 'Index': np.array([[], []]).astype("int32") + 'Index': np.array([[], []]).astype("int32"), } self.outputs = { 'Out': np.vstack((xnp[np.newaxis, :], xnp[np.newaxis, :])) @@ -72,9 +71,7 @@ def test_class1(op_type, typename): def test_class2(op_type, typename): - class TestGatherNdOpWithIndex1(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -101,9 +98,8 @@ def test_class2(op_type, typename): def test_class3(op_type, typename): - class TestGatherNdOpWithLowIndex(OpTest): - #Index has low rank, X has high rank + # Index has low rank, X has high rank def setUp(self): self.set_npu() @@ -126,9 +122,9 @@ def test_class3(op_type, typename): if typename == "float16" or typename == "int64": self.__class__.no_need_check_grad = True else: - self.check_grad_with_place(self.place, ['X'], - 'Out', - user_defined_grads=[self.x_grad]) + self.check_grad_with_place( + self.place, ['X'], 'Out', user_defined_grads=[self.x_grad] + ) cls_name = "{0}_{1}_3".format(op_type, typename) TestGatherNdOpWithLowIndex.__name__ = cls_name @@ -136,9 +132,8 @@ def test_class3(op_type, typename): def test_class4(op_type, typename): - class TestGatherNdOpIndex1(OpTest): - #Index has low rank, X has high rank + # Index has low rank, X has high rank def setUp(self): self.set_npu() @@ -169,9 +164,8 @@ def test_class4(op_type, typename): def test_class5(op_type, typename): - class TestGatherNdOpWithSameIndexAsX(OpTest): - #Index has same rank as X's rank + # Index has same rank as X's rank def setUp(self): self.set_npu() @@ -181,7 +175,7 @@ def test_class5(op_type, typename): index = np.array([[1, 1], [2, 1]]).astype("int64") self.inputs = {'X': xnp, 'Index': index} - self.outputs = {'Out': xnp[tuple(index.T)]} #[25, 22] + self.outputs = {'Out': xnp[tuple(index.T)]} # [25, 22] def set_npu(self): self.__class__.use_npu = True @@ -201,9 +195,8 @@ def test_class5(op_type, typename): def test_class6(op_type, typename): - class TestGatherNdOpWithHighRankSame(OpTest): - #Both Index and X have high rank, and Rank(Index) = Rank(X) + # Both Index and X have high rank, and Rank(Index) = Rank(X) def setUp(self): self.set_npu() @@ -211,8 +204,9 @@ def test_class6(op_type, typename): self.op_type = "gather_nd" shape = (5, 2, 3, 1, 10) xnp = np.random.rand(*shape).astype(typename) - index = np.vstack([np.random.randint(0, s, size=2) - for s in shape]).T + index = np.vstack( + [np.random.randint(0, s, size=2) for s in shape] + ).T self.inputs = {'X': xnp, 'Index': index.astype("int32")} self.outputs = {'Out': xnp[tuple(index.T)]} @@ -235,9 +229,8 @@ def test_class6(op_type, typename): def test_class7(op_type, typename): - class TestGatherNdOpWithHighRankDiff(OpTest): - #Both Index and X have high rank, Rank(Index) < Rank(X) + # Both Index and X have high rank, Rank(Index) < Rank(X) def setUp(self): self.set_npu() @@ -246,7 +239,8 @@ def test_class7(op_type, typename): shape = (2, 3, 4, 1, 10) xnp = np.random.rand(*shape).astype(typename) index = np.vstack( - [np.random.randint(0, s, size=200) for s in shape]).T + [np.random.randint(0, s, size=200) for s in shape] + ).T index_re = index.reshape([20, 5, 2, 5]) self.inputs = {'X': xnp, 'Index': index_re.astype("int32")} @@ -270,7 +264,6 @@ def test_class7(op_type, typename): class TestGatherNdAPI(unittest.TestCase): - def test_imperative(self): paddle.disable_static() input_1 = np.array([[1, 2], [3, 4], [5, 6]]) diff --git a/python/paddle/fluid/tests/unittests/npu/test_gather_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_gather_op_npu.py index bedc6f1efc7f9cf07c0f0d5caf05a609a0da4559..5e783b952af9c2b6c59f975db50a8a67147587d3 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_gather_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_gather_op_npu.py @@ -34,7 +34,6 @@ def gather_numpy(x, index, axis): class TestGatherOp(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -43,7 +42,7 @@ class TestGatherOp(OpTest): xnp = np.random.random(self.x_shape).astype(self.x_type) self.inputs = { 'X': xnp, - 'Index': np.array(self.index).astype(self.index_type) + 'Index': np.array(self.index).astype(self.index_type), } self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]} @@ -72,19 +71,17 @@ class TestGatherOp(OpTest): class TestCase1(TestGatherOp): - def config(self): """ For one dimension input """ - self.x_shape = (100) + self.x_shape = 100 self.x_type = "float32" self.index = [1, 3, 5] self.index_type = "int32" class API_TestGather(unittest.TestCase): - def test_out1(self): with fluid.program_guard(fluid.Program(), fluid.Program()): data1 = fluid.layers.data('data1', shape=[-1, 2], dtype='float32') @@ -94,17 +91,16 @@ class API_TestGather(unittest.TestCase): exe = fluid.Executor(place) input = np.array([[1, 2], [3, 4], [5, 6]]) index_1 = np.array([1, 2]) - result, = exe.run(feed={ - "data1": input, - "index": index_1 - }, - fetch_list=[out]) + (result,) = exe.run( + feed={"data1": input, "index": index_1}, fetch_list=[out] + ) expected_output = np.array([[3, 4], [5, 6]]) np.testing.assert_allclose(result, expected_output, rtol=1e-5) def test_out2(self): - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): x = paddle.fluid.data('x', shape=[-1, 2], dtype='float32') index = paddle.fluid.data('index', shape=[-1, 1], dtype='int32') out = paddle.gather(x, index) @@ -112,17 +108,14 @@ class API_TestGather(unittest.TestCase): exe = paddle.static.Executor(place) x_np = np.array([[1, 2], [3, 4], [5, 6]]).astype('float32') index_np = np.array([1, 1]).astype('int32') - result, = exe.run(feed={ - "x": x_np, - "index": index_np - }, - fetch_list=[out]) + (result,) = exe.run( + feed={"x": x_np, "index": index_np}, fetch_list=[out] + ) expected_output = gather_numpy(x_np, index_np, axis=0) np.testing.assert_allclose(result, expected_output, rtol=1e-5) class TestGatherGrad(unittest.TestCase): - def _test(self, run_npu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -135,9 +128,9 @@ class TestGatherGrad(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=[8192, 768], dtype='float32') - index = paddle.static.data(name="index", - shape=[1232, 1], - dtype='int32') + index = paddle.static.data( + name="index", shape=[1232, 1], dtype='int32' + ) a.stop_gradient = False b = paddle.gather(a, index) @@ -156,15 +149,17 @@ class TestGatherGrad(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "index": index_np - }, - fetch_list=[b, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "index": index_np}, + fetch_list=[b, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res[0])) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res[0] + ) + ) return pred_res, loss_res diff --git a/python/paddle/fluid/tests/unittests/npu/test_gaussian_random_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_gaussian_random_op_npu.py index 5f6b756f175542d53b3c7c6402745b8de749f226..326945bb478269631ecd54f5a02f823269477d33 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_gaussian_random_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_gaussian_random_op_npu.py @@ -26,7 +26,6 @@ paddle.enable_static() class TestNPUGaussianRandomOp(OpTest): - def setUp(self): self.set_npu() self.op_type = "gaussian_random" @@ -39,7 +38,7 @@ class TestNPUGaussianRandomOp(OpTest): "mean": self.mean, "std": self.std, "seed": 10, - "use_mkldnn": self.use_mkldnn + "use_mkldnn": self.use_mkldnn, } paddle.seed(10) @@ -47,7 +46,7 @@ class TestNPUGaussianRandomOp(OpTest): def set_attrs(self): self.mean = 1.0 - self.std = 2. + self.std = 2.0 def set_npu(self): self.__class__.use_npu = True @@ -68,12 +67,13 @@ class TestNPUGaussianRandomOp(OpTest): hist2, _ = np.histogram(data, range=(-3, 5)) hist2 = hist2.astype("float32") hist2 /= float(outs[0].size) - np.testing.assert_allclose(hist, - hist2, - rtol=0, - atol=0.01, - err_msg="hist: " + str(hist) + " hist2: " + - str(hist2)) + np.testing.assert_allclose( + hist, + hist2, + rtol=0, + atol=0.01, + err_msg="hist: " + str(hist) + " hist2: " + str(hist2), + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/npu/test_gelu_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_gelu_op_npu.py index 2a03c36622d2cc1efcf785c33df51d319225ab09..0c2d163becadd8c40c87f2c9ff2f9df27fa88b0c 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_gelu_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_gelu_op_npu.py @@ -32,7 +32,6 @@ def np_gelu(x): class TestGelu(OpTest): - def setUp(self): self.set_npu() self.op_type = "gelu" @@ -57,13 +56,12 @@ class TestGelu(OpTest): self.check_output_with_place(self.place, atol=1e-3) def test_check_grad(self): - self.check_grad_with_place(self.place, ['X'], - 'Out', - max_relative_error=0.007) + self.check_grad_with_place( + self.place, ['X'], 'Out', max_relative_error=0.007 + ) class TestGeluFp16(OpTest): - def setUp(self): self.set_npu() self.op_type = "gelu" @@ -90,7 +88,6 @@ class TestGeluFp16(OpTest): class TestGeluNet(unittest.TestCase): - def _test(self, run_npu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -105,9 +102,9 @@ class TestGeluNet(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=[32, 32], dtype='float32') b = paddle.static.data(name="b", shape=[32, 32], dtype='float32') - label = paddle.static.data(name="label", - shape=[32, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[32, 1], dtype='int64' + ) c = paddle.multiply(a, b) @@ -131,16 +128,17 @@ class TestGeluNet(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "b": b_np, "label": label_np}, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res diff --git a/python/paddle/fluid/tests/unittests/npu/test_group_norm_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_group_norm_op_npu.py index e995e7f9475e9e98db93b2191eec8639af80bb50..06d0f5dd1d029c35da497e5cbceb5fae2f26f685 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_group_norm_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_group_norm_op_npu.py @@ -46,7 +46,6 @@ def group_norm_naive(x, scale, bias, epsilon, groups, data_layout): class TestGroupNormOpError(unittest.TestCase): - def test_errors(self): with fluid.program_guard(fluid.Program(), fluid.Program()): @@ -58,9 +57,9 @@ class TestGroupNormOpError(unittest.TestCase): self.assertRaises(TypeError, test_x_type) def test_x_dtype(): - x2 = fluid.layers.data(name='x2', - shape=[2, 100, 3, 5], - dtype='int32') + x2 = fluid.layers.data( + name='x2', shape=[2, 100, 3, 5], dtype='int32' + ) groups = 2 fluid.layers.group_norm(x2, groups) @@ -68,7 +67,6 @@ class TestGroupNormOpError(unittest.TestCase): class TestGroupNormOp(OpTest): - def setUp(self): self.set_npu() self.op_type = 'group_norm' @@ -89,15 +87,19 @@ class TestGroupNormOp(OpTest): input = np.transpose(input, (0, 2, 3, 1)) scale = np.random.random([self.shape[1]]).astype(self.dtype) bias = np.random.random([self.shape[1]]).astype(self.dtype) - output, mean, var = group_norm_naive(input, scale, bias, - self.attrs['epsilon'], - self.attrs['groups'], - self.data_format) + output, mean, var = group_norm_naive( + input, + scale, + bias, + self.attrs['epsilon'], + self.attrs['groups'], + self.data_format, + ) self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(input), 'Scale': OpTest.np_dtype_to_fluid_dtype(scale), - 'Bias': OpTest.np_dtype_to_fluid_dtype(bias) + 'Bias': OpTest.np_dtype_to_fluid_dtype(bias), } self.outputs = {'Y': output, 'Mean': mean, 'Variance': var} self.attrs['data_layout'] = self.data_format @@ -120,67 +122,65 @@ class TestGroupNormOp(OpTest): output_names = 'Y' no_grad_set = set() cpu_place = fluid.CPUPlace() - cpu_grads = self._get_gradient(inputs_to_check, cpu_place, output_names, - no_grad_set) - npu_grads = self._get_gradient(inputs_to_check, self.place, - output_names, no_grad_set) - - self._assert_is_close(cpu_grads, npu_grads, inputs_to_check, - self.max_relative_error, - "Gradient Check between places") + cpu_grads = self._get_gradient( + inputs_to_check, cpu_place, output_names, no_grad_set + ) + npu_grads = self._get_gradient( + inputs_to_check, self.place, output_names, no_grad_set + ) + + self._assert_is_close( + cpu_grads, + npu_grads, + inputs_to_check, + self.max_relative_error, + "Gradient Check between places", + ) def init_test_case(self): pass class TestGroupNormOp1(TestGroupNormOp): - def init_test_case(self): self.attrs['groups'] = 1 class TestGroupNormOp2(TestGroupNormOp): - def init_test_case(self): self.attrs['groups'] = 4 class TestGroupNormOpBigEps1(TestGroupNormOp): - def init_test_case(self): self.attrs['groups'] = 1 self.attrs['epsilon'] = 0.5 class TestGroupNormOpBigEps2(TestGroupNormOp): - def init_test_case(self): self.attrs['groups'] = 4 self.attrs['epsilon'] = 0.5 class TestGroupNormOpBigEps3(TestGroupNormOp): - def init_test_case(self): self.attrs['epsilon'] = 0.5 class TestGroupNormOp1_With_NHWC(TestGroupNormOp): - def init_test_case(self): self.attrs['groups'] = 1 self.data_format = "NHWC" class TestGroupNormOp2_With_NHWC(TestGroupNormOp): - def init_test_case(self): self.attrs['groups'] = 4 self.data_format = "NHWC" class TestGroupNormOpBigEps1_With_NHWC(TestGroupNormOp): - def init_test_case(self): self.attrs['groups'] = 1 self.attrs['epsilon'] = 0.5 @@ -188,7 +188,6 @@ class TestGroupNormOpBigEps1_With_NHWC(TestGroupNormOp): class TestGroupNormOpBigEps2_With_NHWC(TestGroupNormOp): - def init_test_case(self): self.attrs['groups'] = 4 self.attrs['epsilon'] = 0.5 @@ -196,20 +195,17 @@ class TestGroupNormOpBigEps2_With_NHWC(TestGroupNormOp): class TestGroupNormOpBigEps3_With_NHWC(TestGroupNormOp): - def init_test_case(self): self.attrs['epsilon'] = 0.5 self.data_format = "NHWC" class TestGroupNormOpFP16(TestGroupNormOp): - def init_dtype(self): self.dtype = np.float16 class TestGroupNormOpFP16_With_NHWC(TestGroupNormOp): - def init_dtype(self): self.dtype = np.float16 @@ -223,9 +219,9 @@ class TestGroupNormException(unittest.TestCase): data = fluid.data(name='data', shape=[None, 3, 3, 4], dtype="float64") def attr_data_format(): - out = fluid.layers.group_norm(input=data, - groups=2, - data_layout="NDHW") + out = fluid.layers.group_norm( + input=data, groups=2, data_layout="NDHW" + ) self.assertRaises(ValueError, attr_data_format) diff --git a/python/paddle/fluid/tests/unittests/npu/test_hard_sigmoid_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_hard_sigmoid_op_npu.py index c7c69eef5e59a935f3402c9013c5afa19c00f4af..44155e4388062d74e4dd21345b4bb1c70bf158c0 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_hard_sigmoid_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_hard_sigmoid_op_npu.py @@ -26,11 +26,10 @@ SEED = 2021 def ref_hardsigmoid(x, slope=0.166666666666667, offset=0.5): - return np.maximum(np.minimum(x * slope + offset, 1.), 0.).astype(x.dtype) + return np.maximum(np.minimum(x * slope + offset, 1.0), 0.0).astype(x.dtype) class TestNPUHardSigmoid(OpTest): - def setUp(self): paddle.enable_static() @@ -41,7 +40,7 @@ class TestNPUHardSigmoid(OpTest): x = np.random.uniform(-5, 5, [10, 12]).astype(self.dtype) lower_threshold = -self.offset / self.slope - upper_threshold = (1. - self.offset) / self.slope + upper_threshold = (1.0 - self.offset) / self.slope # Same reason as TestAbs delta = 0.005 @@ -73,21 +72,18 @@ class TestNPUHardSigmoid(OpTest): class TestNPUHardSigmoid2(TestNPUHardSigmoid): - def set_attrs(self): self.slope = 0.2 self.offset = 0.5 class TestNPUHardSigmoid3(TestNPUHardSigmoid): - def set_attrs(self): self.slope = 0.2 self.offset = 0.4 class TestNPUHardSigmoidFp16(TestNPUHardSigmoid): - def test_check_output(self): self.check_output_with_place(self.place, atol=1e-3) @@ -144,14 +140,14 @@ class TestHardsigmoidAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.hardsigmoid, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data(name='x_int32', - shape=[12, 10], - dtype='int32') + x_int32 = paddle.fluid.data( + name='x_int32', shape=[12, 10], dtype='int32' + ) self.assertRaises(TypeError, F.hardsigmoid, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data(name='x_fp16', - shape=[12, 10], - dtype='float16') + x_fp16 = paddle.fluid.data( + name='x_fp16', shape=[12, 10], dtype='float16' + ) F.hardsigmoid(x_fp16) diff --git a/python/paddle/fluid/tests/unittests/npu/test_hard_swish_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_hard_swish_op_npu.py index 3b0b5a712c20c1ae066fa5cf8c962c6bc6cacc81..86c4bf5402d1be8d65d19dc77e9fb8bb1638db8f 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_hard_swish_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_hard_swish_op_npu.py @@ -24,15 +24,17 @@ import paddle.nn.functional as F def ref_hard_swish_grad(x, threshold=6.0, scale=6.0, offset=3.0): - dout = np.full_like(x, fill_value=1. / x.size) + dout = np.full_like(x, fill_value=1.0 / x.size) tmp = ((x + offset) < threshold).astype(x.dtype) - dx = dout * (((x + offset) > 0).astype(x.dtype) * - (2 * x + offset) * tmp / scale + 1.0 - tmp) + dx = dout * ( + ((x + offset) > 0).astype(x.dtype) * (2 * x + offset) * tmp / scale + + 1.0 + - tmp + ) return dx class TestHardSwishNPU(OpTest): - def setUp(self): paddle.enable_static() @@ -45,13 +47,12 @@ class TestHardSwishNPU(OpTest): threshold = 6.0 scale = 6.0 offset = 3.0 - #the same with TestAbs + # the same with TestAbs x[np.abs(x + offset) < 0.005] = 0.02 x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02 out = ( - x * - (np.minimum(np.maximum(x + offset, 0.), threshold) / scale)).astype( - self.dtype) + x * (np.minimum(np.maximum(x + offset, 0.0), threshold) / scale) + ).astype(self.dtype) self.x_grad = ref_hard_swish_grad(x, threshold, scale, offset) self.inputs = {'X': x} @@ -72,13 +73,12 @@ class TestHardSwishNPU(OpTest): # can't satisfy the default precision requirement # when compared with numeric_grads, but the results on # NPU and CPU are same (verified in TestHardSwishNPUWithCPU) - self.check_grad_with_place(self.place, ['X'], - 'Out', - user_defined_grads=[self.x_grad]) + self.check_grad_with_place( + self.place, ['X'], 'Out', user_defined_grads=[self.x_grad] + ) class TestHardSwishNPUFp16(TestHardSwishNPU): - def test_check_output(self): self.check_output_with_place(self.place) @@ -88,7 +88,6 @@ class TestHardSwishNPUFp16(TestHardSwishNPU): # test the result of hard_swish and hard_swish_grad on CPU and NPU class TestHardSwishNPUWithCPU(unittest.TestCase): - def setUp(self): paddle.disable_static() @@ -116,17 +115,33 @@ class TestHardSwishNPUWithCPU(unittest.TestCase): np.testing.assert_allclose( self.out_y.numpy(), y.numpy(), - err_msg="Output of NPU HardSwish forward has diff at " + - str(self.place) + "\nExpect " + str(self.out_y) + "\n" + "But Got" + - str(y) + " in class " + self.__class__.__name__ + ".", - rtol=1e-5) + err_msg="Output of NPU HardSwish forward has diff at " + + str(self.place) + + "\nExpect " + + str(self.out_y) + + "\n" + + "But Got" + + str(y) + + " in class " + + self.__class__.__name__ + + ".", + rtol=1e-5, + ) np.testing.assert_allclose( self.out_g.numpy(), data.grad.numpy(), - err_msg="Output of NPU HardSwish backward has diff at " + - str(self.place) + "\nExpect " + str(self.out_g) + "\n" + "But Got" + - str(data.grad) + " in class " + self.__class__.__name__ + ".", - rtol=1e-5) + err_msg="Output of NPU HardSwish backward has diff at " + + str(self.place) + + "\nExpect " + + str(self.out_g) + + "\n" + + "But Got" + + str(data.grad) + + " in class " + + self.__class__.__name__ + + ".", + rtol=1e-5, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/npu/test_huber_loss_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_huber_loss_op_npu.py index d9a83d74e1f125343f60135a260839bf5b5550b2..0e81f00de62c8826a30a54ce59d33aba26862fc6 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_huber_loss_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_huber_loss_op_npu.py @@ -33,10 +33,10 @@ def huber_loss_forward(val, delta): return delta * (abs_val - 0.5 * delta) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") +@unittest.skipIf( + not paddle.is_compiled_with_npu(), "core is not compiled with NPU" +) class TestHuberLossOp(OpTest): - def setUp(self): self.set_npu() self.op_type = 'huber_loss' @@ -50,11 +50,11 @@ class TestHuberLossOp(OpTest): def set_inputs(self): shape = self.set_shape() - x = np.random.uniform(0, 1., shape).astype(self.dtype) - y = np.random.uniform(0, 1., shape).astype(self.dtype) + x = np.random.uniform(0, 1.0, shape).astype(self.dtype) + y = np.random.uniform(0, 1.0, shape).astype(self.dtype) self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(x), - 'Y': OpTest.np_dtype_to_fluid_dtype(y) + 'Y': OpTest.np_dtype_to_fluid_dtype(y), } def set_attrs(self): @@ -64,8 +64,9 @@ class TestHuberLossOp(OpTest): delta = self.attrs['delta'] shape = self.set_shape() residual = self.inputs['Y'] - self.inputs['X'] - loss = np.vectorize(huber_loss_forward)(residual, - delta).astype(self.dtype) + loss = np.vectorize(huber_loss_forward)(residual, delta).astype( + self.dtype + ) self.outputs = {'Residual': residual, 'Out': loss.reshape(shape)} def set_shape(self): @@ -84,46 +85,48 @@ class TestHuberLossOp(OpTest): self.check_grad_with_place(self.place, ['X', 'Y'], 'Out') def test_check_grad_ingore_x(self): - self.check_grad_with_place(self.place, ['Y'], - 'Out', - max_relative_error=0.008, - no_grad_set=set("residual")) + self.check_grad_with_place( + self.place, + ['Y'], + 'Out', + max_relative_error=0.008, + no_grad_set=set("residual"), + ) def test_check_grad_ingore_y(self): - self.check_grad_with_place(self.place, ['X'], - 'Out', - max_relative_error=0.008, - no_grad_set=set('residual')) + self.check_grad_with_place( + self.place, + ['X'], + 'Out', + max_relative_error=0.008, + no_grad_set=set('residual'), + ) def TestHuberLossOp1(TestHuberLossOp): - def set_shape(self): - return (64) + return 64 def TestHuberLossOp2(TestHuberLossOp): - def set_shape(self): return (6, 6) def TestHuberLossOp3(TestHuberLossOp): - def set_shape(self): return (6, 6, 1) def TestHuberLossOpFP16(TestHuberLossOp): - def init_dtype(self): self.dtype = np.float16 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") +@unittest.skipIf( + not paddle.is_compiled_with_npu(), "core is not compiled with NPU" +) class TestHuberLossOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # the input and label must be Variable @@ -138,10 +141,12 @@ class TestHuberLossOpError(unittest.TestCase): # the dtype of input and label must be float32 or float64 xw2 = fluid.data(name='xw2', shape=[None, 6], dtype="int32") lw2 = fluid.data(name='lw2', shape=[None, 6], dtype="int32") - self.assertRaises(TypeError, fluid.layers.huber_loss, xw2, lr, - delta) - self.assertRaises(TypeError, fluid.layers.huber_loss, xr, lw2, - delta) + self.assertRaises( + TypeError, fluid.layers.huber_loss, xw2, lr, delta + ) + self.assertRaises( + TypeError, fluid.layers.huber_loss, xr, lw2, delta + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/npu/test_increment_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_increment_op_npu.py index c84abc5d479e5f76dcc85faa6bf4b84155de7dd7..39f11737bce6cf9638436459531b17e5b4f70b6a 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_increment_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_increment_op_npu.py @@ -29,7 +29,6 @@ NPUPlace = 0 class TestIncrement(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(NPUPlace) @@ -37,8 +36,9 @@ class TestIncrement(OpTest): self.init_dtype() self.inputs = { - 'X': - OpTest.np_dtype_to_fluid_dtype(np.array([1]).astype(self.dtype)), + 'X': OpTest.np_dtype_to_fluid_dtype( + np.array([1]).astype(self.dtype) + ), } self.attrs = {"Step": 1} @@ -56,7 +56,6 @@ class TestIncrement(OpTest): class TestIncrementFP16(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(NPUPlace) @@ -64,8 +63,9 @@ class TestIncrementFP16(OpTest): self.init_dtype() self.inputs = { - 'X': - OpTest.np_dtype_to_fluid_dtype(np.array([1]).astype(self.dtype)), + 'X': OpTest.np_dtype_to_fluid_dtype( + np.array([1]).astype(self.dtype) + ), } self.pre_input_id = id(self.inputs['X']) @@ -83,7 +83,6 @@ class TestIncrementFP16(OpTest): class TestIncrementINT64(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(NPUPlace) @@ -91,8 +90,9 @@ class TestIncrementINT64(OpTest): self.init_dtype() self.inputs = { - 'X': - OpTest.np_dtype_to_fluid_dtype(np.array([1]).astype(self.dtype)), + 'X': OpTest.np_dtype_to_fluid_dtype( + np.array([1]).astype(self.dtype) + ), } self.pre_input_id = id(self.inputs['X']) @@ -110,7 +110,6 @@ class TestIncrementINT64(OpTest): class TestIncrementInplace(unittest.TestCase): - def test_npu(self): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -129,9 +128,13 @@ class TestIncrementInplace(unittest.TestCase): exe = paddle.static.Executor(place) exe.run(startup_prog) - b_value = exe.run(main_prog, feed={ - "a": a_np, - }, fetch_list=[b]) + b_value = exe.run( + main_prog, + feed={ + "a": a_np, + }, + fetch_list=[b], + ) print('input a id is : {}'.format(id(a))) print('input b id is : {}'.format(id(b))) diff --git a/python/paddle/fluid/tests/unittests/npu/test_index_sample_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_index_sample_op_npu.py index e64d556ebdc6d5ed45c54a89a4dcd9308df41f3c..5883ef7b567017815ecfe063203a0c479f2014ce 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_index_sample_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_index_sample_op_npu.py @@ -26,7 +26,6 @@ paddle.enable_static() class TestIndexSampleOp(OpTest): - def set_npu(self): self.__class__.use_npu = True @@ -35,10 +34,9 @@ class TestIndexSampleOp(OpTest): self.op_type = "index_sample" self.config() xnp = np.random.random(self.x_shape).astype(self.dtype) - indexnp = np.random.randint(low=0, - high=self.x_shape[1], - size=self.index_shape).astype( - self.index_type) + indexnp = np.random.randint( + low=0, high=self.x_shape[1], size=self.index_shape + ).astype(self.index_type) self.inputs = {'X': xnp, 'Index': indexnp} index_array = [] for i in range(self.index_shape[0]): @@ -65,7 +63,6 @@ class TestIndexSampleOp(OpTest): class TestCase1(TestIndexSampleOp): - def config(self): """ For one dimension input @@ -77,7 +74,6 @@ class TestCase1(TestIndexSampleOp): class TestCase2(TestIndexSampleOp): - def config(self): """ For int64_t index type @@ -89,7 +85,6 @@ class TestCase2(TestIndexSampleOp): class TestCase3(TestIndexSampleOp): - def config(self): """ For int index type @@ -101,7 +96,6 @@ class TestCase3(TestIndexSampleOp): class TestCase4(TestIndexSampleOp): - def config(self): """ For int64 index type @@ -113,7 +107,6 @@ class TestCase4(TestIndexSampleOp): class TestCase5(TestIndexSampleOp): - def config(self): """ For float16 x type @@ -129,7 +122,6 @@ class TestCase5(TestIndexSampleOp): class TestCase6(TestCase5): - def config(self): """ For int32 x type @@ -142,7 +134,6 @@ class TestCase6(TestCase5): class TestCase7(TestCase5): - def config(self): """ For int64 x type @@ -155,7 +146,6 @@ class TestCase7(TestCase5): class TestIndexSampleShape(unittest.TestCase): - def test_shape(self): paddle.enable_static() # create x value @@ -166,8 +156,9 @@ class TestIndexSampleShape(unittest.TestCase): # create index value index_shape = (2, 3) index_type = "int32" - index_np = np.random.randint(low=0, high=x_shape[1], - size=index_shape).astype(index_type) + index_np = np.random.randint( + low=0, high=x_shape[1], size=index_shape + ).astype(index_type) x = fluid.data(name='x', shape=[-1, 5], dtype='float32') index = fluid.data(name='index', shape=[-1, 3], dtype='int32') @@ -182,18 +173,24 @@ class TestIndexSampleShape(unittest.TestCase): class TestIndexSampleDynamic(unittest.TestCase): - def test_result(self): with fluid.dygraph.guard(paddle.NPUPlace(0)): - x = paddle.to_tensor([[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], - [9.0, 10.0, 11.0, 12.0]], - dtype='float32') - index = paddle.to_tensor([[0, 1, 2], [1, 2, 3], [0, 0, 0]], - dtype='int32') + x = paddle.to_tensor( + [ + [1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [9.0, 10.0, 11.0, 12.0], + ], + dtype='float32', + ) + index = paddle.to_tensor( + [[0, 1, 2], [1, 2, 3], [0, 0, 0]], dtype='int32' + ) out_z1 = paddle.index_sample(x, index) - except_output = np.array([[1.0, 2.0, 3.0], [6.0, 7.0, 8.0], - [9.0, 9.0, 9.0]]) + except_output = np.array( + [[1.0, 2.0, 3.0], [6.0, 7.0, 8.0], [9.0, 9.0, 9.0]] + ) assert out_z1.numpy().all() == except_output.all() diff --git a/python/paddle/fluid/tests/unittests/npu/test_index_select_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_index_select_op_npu.py index 7050afa44110158419c23c6458c15ea93c00b6a5..39b0a01189313fd0a9f8f8f4f90f5b878b1296b5 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_index_select_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_index_select_op_npu.py @@ -26,7 +26,6 @@ SEED = 2021 class TestNPUIndexSelect(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -34,15 +33,17 @@ class TestNPUIndexSelect(OpTest): self.config() x_np = np.random.random(self.x_shape).astype(self.x_type) - index_np = np.random.randint(low=0, - high=self.x_shape[self.dim], - size=self.index_size, - dtype=self.index_type) + index_np = np.random.randint( + low=0, + high=self.x_shape[self.dim], + size=self.index_size, + dtype=self.index_type, + ) # compute real output as baseline. - outer_loop = np.prod(self.x_shape[:self.dim]) + outer_loop = np.prod(self.x_shape[: self.dim]) outer_loop = outer_loop.astype(self.index_type) - x_reshape = [outer_loop] + list(self.x_shape[self.dim:]) + x_reshape = [outer_loop] + list(self.x_shape[self.dim :]) x_np_reshape = np.reshape(x_np, tuple(x_reshape)) out_list = [] @@ -76,7 +77,6 @@ class TestNPUIndexSelect(OpTest): class TestNPUIndexSelectCase2(TestNPUIndexSelect): - def config(self): self.dim = -2 self.x_type = np.float32 @@ -86,7 +86,6 @@ class TestNPUIndexSelectCase2(TestNPUIndexSelect): class TestNPUIndexSelectCase3(TestNPUIndexSelect): - def config(self): self.dim = 0 self.x_type = np.float32 @@ -96,7 +95,6 @@ class TestNPUIndexSelectCase3(TestNPUIndexSelect): class TestNPUIndexSelectCase4(TestNPUIndexSelect): - def config(self): self.dim = -1 self.x_type = np.float32 @@ -106,10 +104,14 @@ class TestNPUIndexSelectCase4(TestNPUIndexSelect): class TestNPUIndexSelectAPI(unittest.TestCase): - def input_data(self): - self.data_x = np.array([[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], - [9.0, 10.0, 11.0, 12.0]]).astype('float32') + self.data_x = np.array( + [ + [1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [9.0, 10.0, 11.0, 12.0], + ] + ).astype('float32') self.data_index = np.array([0, 1, 1]).astype('int32') def test_index_select_api(self): @@ -123,14 +125,14 @@ class TestNPUIndexSelectAPI(unittest.TestCase): index = paddle.static.data(name='index', shape=[3], dtype='int32') z = paddle.index_select(x, index, axis=1) exe = paddle.static.Executor(paddle.NPUPlace(0)) - res, = exe.run(feed={ - 'x': self.data_x, - 'index': self.data_index - }, - fetch_list=[z.name], - return_numpy=False) - expect_out = np.array([[1.0, 2.0, 2.0], [5.0, 6.0, 6.0], - [9.0, 10.0, 10.0]]).astype('float32') + (res,) = exe.run( + feed={'x': self.data_x, 'index': self.data_index}, + fetch_list=[z.name], + return_numpy=False, + ) + expect_out = np.array( + [[1.0, 2.0, 2.0], [5.0, 6.0, 6.0], [9.0, 10.0, 10.0]] + ).astype('float32') np.testing.assert_allclose(expect_out, np.array(res)) # case 2: @@ -139,14 +141,14 @@ class TestNPUIndexSelectAPI(unittest.TestCase): index = paddle.static.data(name='index', shape=[3], dtype='int32') z = paddle.index_select(x, index) exe = paddle.static.Executor(paddle.NPUPlace(0)) - res, = exe.run(feed={ - 'x': self.data_x, - 'index': self.data_index - }, - fetch_list=[z.name], - return_numpy=False) - expect_out = np.array([[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], - [5.0, 6.0, 7.0, 8.0]]).astype('float32') + (res,) = exe.run( + feed={'x': self.data_x, 'index': self.data_index}, + fetch_list=[z.name], + return_numpy=False, + ) + expect_out = np.array( + [[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], [5.0, 6.0, 7.0, 8.0]] + ).astype('float32') np.testing.assert_allclose(expect_out, np.array(res)) def test_dygraph_index_select_api(self): @@ -159,8 +161,9 @@ class TestNPUIndexSelectAPI(unittest.TestCase): index = paddle.to_tensor(self.data_index) z = paddle.index_select(x, index) np_z = z.numpy() - expect_out = np.array([[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], - [5.0, 6.0, 7.0, 8.0]]).astype('float32') + expect_out = np.array( + [[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], [5.0, 6.0, 7.0, 8.0]] + ).astype('float32') np.testing.assert_allclose(expect_out, np_z) # case 2: @@ -168,8 +171,9 @@ class TestNPUIndexSelectAPI(unittest.TestCase): index = paddle.to_tensor(self.data_index) z = paddle.index_select(x, index, axis=1) np_z = z.numpy() - expect_out = np.array([[1.0, 2.0, 2.0], [5.0, 6.0, 6.0], - [9.0, 10.0, 10.0]]).astype('float32') + expect_out = np.array( + [[1.0, 2.0, 2.0], [5.0, 6.0, 6.0], [9.0, 10.0, 10.0]] + ).astype('float32') np.testing.assert_allclose(expect_out, np_z) diff --git a/python/paddle/fluid/tests/unittests/npu/test_instance_norm_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_instance_norm_op_npu.py index 761c38a8576bbef6eaa0b53d0eedf68c36ec837b..1abc4af24cd4f82ef8db6af4892f5a4589b5f83f 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_instance_norm_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_instance_norm_op_npu.py @@ -31,7 +31,6 @@ paddle.enable_static() class TestInstanceNorm(unittest.TestCase): - def test_dygraph(self): places = [fluid.NPUPlace(0)] for p in places: diff --git a/python/paddle/fluid/tests/unittests/npu/test_iou_similarity_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_iou_similarity_op_npu.py index 2485c8308c1c3930130cc13462ecb70a6d66d39b..9b7b005dd3a7c45b509f92a873cd434d7b440803 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_iou_similarity_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_iou_similarity_op_npu.py @@ -28,7 +28,6 @@ np.random.seed(2021) class TestNpuIouSimilarityOp(OpTest): - def setUp(self): self.op_type = "iou_similarity" self.set_npu() @@ -58,7 +57,7 @@ class TestNpuIouSimilarityOp(OpTest): self.boxes1_lod = [[1 for _ in range(self.N)]] self.inputs = { 'X': (self.boxes1, self.boxes1_lod), - 'Y': self.boxes2 + 'Y': self.boxes2, } else: self.inputs = {'X': self.boxes1, 'Y': self.boxes2} @@ -74,7 +73,9 @@ class TestNpuIouSimilarityOp(OpTest): def test_check_output(self): self.check_output_with_place(self.place) - def _compute_iou(self, ): + def _compute_iou( + self, + ): for row in range(self.boxes1.shape[0]): for col in range(self.boxes2.shape[0]): xmin1, ymin1, xmax1, ymax1 = self.boxes1[row] @@ -104,7 +105,6 @@ class TestNpuIouSimilarityOp(OpTest): class TestNpuIouSimilarityOpWithLoD(TestNpuIouSimilarityOp): - def set_init_config(self): super(TestNpuIouSimilarityOpWithLoD, self).set_init_config() self.box_normalized = True @@ -112,7 +112,6 @@ class TestNpuIouSimilarityOpWithLoD(TestNpuIouSimilarityOp): class TestNpuIouSimilarityOpWithBoxNormalized(TestNpuIouSimilarityOp): - def set_init_config(self): super(TestNpuIouSimilarityOpWithBoxNormalized, self).set_init_config() self.box_normalized = True @@ -120,7 +119,6 @@ class TestNpuIouSimilarityOpWithBoxNormalized(TestNpuIouSimilarityOp): def TestNpuIouSimilarityOpFp16(TestNpuIouSimilarityOp): - def init_dtype(self): self.dtype = np.float16 diff --git a/python/paddle/fluid/tests/unittests/npu/test_is_empty_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_is_empty_op_npu.py index f939716f37fbb65a1aff922889639f9bf2d96b03..9834ea0d5f2df216cafe1dfebf87cf3982198479 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_is_empty_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_is_empty_op_npu.py @@ -23,10 +23,10 @@ import paddle paddle.enable_static() -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") +@unittest.skipIf( + not paddle.is_compiled_with_npu(), "core is not compiled with NPU" +) class TestEmpty(OpTest): - def setUp(self): self.set_npu() self.init_dtype() @@ -48,23 +48,24 @@ class TestEmpty(OpTest): self.check_output_with_place(self.place) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") +@unittest.skipIf( + not paddle.is_compiled_with_npu(), "core is not compiled with NPU" +) class TestNotEmpty(TestEmpty): - def set_data(self): self.inputs = {'X': np.array([])} self.outputs = {'Out': np.array([True])} -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") +@unittest.skipIf( + not paddle.is_compiled_with_npu(), "core is not compiled with NPU" +) class TestIsEmptyOpError(unittest.TestCase): - def test_errors(self): paddle.enable_static() - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): input_data = np.random.random((3, 2)).astype("float32") def test_Variable(): @@ -75,27 +76,27 @@ class TestIsEmptyOpError(unittest.TestCase): def test_type(): # dtype must be float32, float16 in NPU - x3 = paddle.static.data(name="x3", - shape=[4, 32, 32], - dtype="bool") + x3 = paddle.static.data( + name="x3", shape=[4, 32, 32], dtype="bool" + ) res = paddle.is_empty(x=x3) self.assertRaises(TypeError, test_type) def test_name_type(): # name type must be string. - x4 = paddle.static.data(name="x4", - shape=[3, 2], - dtype="float32") + x4 = paddle.static.data( + name="x4", shape=[3, 2], dtype="float32" + ) res = paddle.is_empty(x=x4, name=1) self.assertRaises(TypeError, test_name_type) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") +@unittest.skipIf( + not paddle.is_compiled_with_npu(), "core is not compiled with NPU" +) class TestIsEmptyOpDygraph(unittest.TestCase): - def test_dygraph(self): paddle.disable_static(paddle.NPUPlace(0)) input = paddle.rand(shape=[4, 32, 32], dtype='float32') diff --git a/python/paddle/fluid/tests/unittests/npu/test_kldiv_loss_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_kldiv_loss_op_npu.py index bf5042c01d2273731ee3e0841b08c8139ba72b0b..3a55d9973af9733122c1827d6696a55e905bfacf 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_kldiv_loss_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_kldiv_loss_op_npu.py @@ -26,7 +26,6 @@ paddle.enable_static() class TestKLDivLossOp(OpTest): - def set_npu(self): self.__class__.use_npu = True self.place = paddle.NPUPlace(0) @@ -55,10 +54,13 @@ class TestKLDivLossOp(OpTest): self.check_output_with_place(self.place) def test_check_grad(self): - self.check_grad_with_place(self.place, ['X'], - 'Loss', - no_grad_set=set(["Target"]), - max_relative_error=0.15) + self.check_grad_with_place( + self.place, + ['X'], + 'Loss', + no_grad_set=set(["Target"]), + max_relative_error=0.15, + ) def initTestCase(self): self.x_shape = (4, 5, 5) @@ -66,28 +68,24 @@ class TestKLDivLossOp(OpTest): class TestKLDivLossOp2(TestKLDivLossOp): - def initTestCase(self): self.x_shape = (3, 2, 7, 7) self.reduction = 'none' class TestKLDivLossOp3(TestKLDivLossOp): - def initTestCase(self): self.x_shape = (2, 3, 5, 7, 9) self.reduction = 'mean' class TestKLDivLossOp4(TestKLDivLossOp): - def initTestCase(self): self.x_shape = (5, 20) self.reduction = 'sum' class TestKLDivLossOp_fp16(TestKLDivLossOp): - def init_dtype(self): self.dtype = 'float16' @@ -95,17 +93,22 @@ class TestKLDivLossOp_fp16(TestKLDivLossOp): self.check_output_with_place(self.place, atol=3e-1) def test_check_grad(self): - input_grad = -self.inputs['Target'] * ( - self.inputs['Target'] > 0) / self.inputs['Target'].shape[0] - self.check_grad_with_place(self.place, ['X'], - 'Loss', - no_grad_set=set(["Target"]), - max_relative_error=0.2, - user_defined_grads=[input_grad]) + input_grad = ( + -self.inputs['Target'] + * (self.inputs['Target'] > 0) + / self.inputs['Target'].shape[0] + ) + self.check_grad_with_place( + self.place, + ['X'], + 'Loss', + no_grad_set=set(["Target"]), + max_relative_error=0.2, + user_defined_grads=[input_grad], + ) class TestKLDivLossDygraph(unittest.TestCase): - def run_kl_loss(self, reduction, shape=(5, 20)): x = np.random.uniform(-10, 10, shape).astype('float32') target = np.random.uniform(-10, 10, shape).astype('float32') @@ -113,8 +116,9 @@ class TestKLDivLossDygraph(unittest.TestCase): with paddle.fluid.dygraph.guard(paddle.NPUPlace(0)): kldiv_criterion = paddle.nn.KLDivLoss(reduction) - pred_loss = kldiv_criterion(paddle.to_tensor(x), - paddle.to_tensor(target)) + pred_loss = kldiv_criterion( + paddle.to_tensor(x), paddle.to_tensor(target) + ) np.testing.assert_allclose(pred_loss.numpy(), gt_loss, rtol=1e-6) def test_kl_loss_batchmean(self): @@ -140,7 +144,6 @@ class TestKLDivLossDygraph(unittest.TestCase): class TestKLDivLossTypePromotion(unittest.TestCase): - def test_kl_div_promotion(self): with paddle.fluid.dygraph.guard(paddle.NPUPlace(0)): x1 = paddle.rand([5, 20], dtype='float32') diff --git a/python/paddle/fluid/tests/unittests/npu/test_label_smooth_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_label_smooth_op_npu.py index 26bcf0f6f860a62f13e10db0023a2df3b68b9326..8bc5c9f5439a6b79fc9895a4c3dae82bd08fbaac 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_label_smooth_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_label_smooth_op_npu.py @@ -25,10 +25,10 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") +@unittest.skipIf( + not paddle.is_compiled_with_npu(), "core is not compiled with NPU" +) class TestLabelSmoothOp(OpTest): - def setUp(self): self.set_npu() self.op_type = "label_smooth" @@ -62,8 +62,9 @@ class TestLabelSmoothOp(OpTest): self.attrs = {"epsilon": epsilon} def set_outputs(self): - dist = None if 'PriorDist' not in self.inputs else self.inputs[ - 'PriorDist'] + dist = ( + None if 'PriorDist' not in self.inputs else self.inputs['PriorDist'] + ) out = self.calc_out(self.inputs['X'], self.attrs['epsilon'], dist) self.outputs = {'Out': out} @@ -78,15 +79,14 @@ class TestLabelSmoothOp(OpTest): def test_check_grad(self): if self.dtype == np.float16: - self.check_grad_with_place(self.place, ['X'], - 'Out', - max_relative_error=0.5) + self.check_grad_with_place( + self.place, ['X'], 'Out', max_relative_error=0.5 + ) else: self.check_grad_with_place(self.place, ['X'], 'Out') class TestLabelSmoothOpWithPriorDist(TestLabelSmoothOp): - def set_inputs(self): super(TestLabelSmoothOpWithPriorDist, self).set_inputs() label_dim = self.inputs['X'].shape[-1] @@ -95,39 +95,33 @@ class TestLabelSmoothOpWithPriorDist(TestLabelSmoothOp): class TestLabelSmoothOp3D(TestLabelSmoothOp): - def set_inputs(self): super(TestLabelSmoothOp3D, self).set_inputs() self.inputs['X'].reshape([2, -1, self.inputs['X'].shape[-1]]) class TestLabelSmoothOpWithPriorDist3D(TestLabelSmoothOpWithPriorDist): - def set_inputs(self): super(TestLabelSmoothOpWithPriorDist3D, self).set_inputs() self.inputs['X'].reshape([2, -1, self.inputs['X'].shape[-1]]) class TestLabelSmoothOpFP16(TestLabelSmoothOp): - def init_dtype(self): self.dtype = np.float16 class TestLabelSmoothOpWithPriorDistFP16(TestLabelSmoothOpWithPriorDist): - def init_dtype(self): self.dtype = np.float16 class TestLabelSmoothOp3DFP16(TestLabelSmoothOp3D): - def init_dtype(self): self.dtype = np.float16 class TestLabelSmoothOpWithPriorDist3DFP16(TestLabelSmoothOpWithPriorDist3D): - def init_dtype(self): self.dtype = np.float16 diff --git a/python/paddle/fluid/tests/unittests/npu/test_layer_norm_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_layer_norm_op_npu.py index f87cf8c8053036a4765b78cd43a46305a10f7c2e..6ca621b647d57ecfe669c4955bfb37ecc28060a3 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_layer_norm_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_layer_norm_op_npu.py @@ -23,7 +23,10 @@ from operator import mul import paddle import paddle.fluid as fluid import paddle.fluid.core as core -from test_layer_norm_op import _reference_layer_norm_naive, _reference_layer_norm_grad +from test_layer_norm_op import ( + _reference_layer_norm_naive, + _reference_layer_norm_grad, +) paddle.enable_static() @@ -36,7 +39,6 @@ _set_use_system_allocator(False) class TestLayerNormOp(unittest.TestCase): - def setUp(self): self.use_cudnn = True self.set_npu() @@ -51,44 +53,54 @@ class TestLayerNormOp(unittest.TestCase): self.atol = 1e-4 def __assert_close(self, tensor, np_array, msg, atol=1e-4): - np.testing.assert_allclose(np.array(tensor).astype( - np_array.dtype).reshape(np_array.shape), - np_array, - atol=atol, - err_msg=msg) - - def check_forward_backward(self, - shape, - begin_norm_axis, - has_scale=True, - has_bias=True, - y_grad_scale=1.0, - use_mkldnn=False): - - def test_with_place(place, - shape, - begin_norm_axis, - use_mkldnn=use_mkldnn): + np.testing.assert_allclose( + np.array(tensor).astype(np_array.dtype).reshape(np_array.shape), + np_array, + atol=atol, + err_msg=msg, + ) + + def check_forward_backward( + self, + shape, + begin_norm_axis, + has_scale=True, + has_bias=True, + y_grad_scale=1.0, + use_mkldnn=False, + ): + def test_with_place( + place, shape, begin_norm_axis, use_mkldnn=use_mkldnn + ): # attr epsilon = 0.00001 x_shape = shape - D = reduce(mul, x_shape[begin_norm_axis:len(x_shape)], 1) + D = reduce(mul, x_shape[begin_norm_axis : len(x_shape)], 1) scale_shape = [D] np.random.seed(123) x = np.random.random_sample(x_shape).astype(self.dtype) - scale = np.random.random_sample(scale_shape).astype( - np.float32) if has_scale else None - bias = np.random.random_sample(scale_shape).astype( - np.float32) if has_bias else None + scale = ( + np.random.random_sample(scale_shape).astype(np.float32) + if has_scale + else None + ) + bias = ( + np.random.random_sample(scale_shape).astype(np.float32) + if has_bias + else None + ) y_grad = (np.random.random_sample(x_shape) * y_grad_scale).astype( - self.dtype) + self.dtype + ) # reference forward & backward y, mean, variance = _reference_layer_norm_naive( - x, scale, bias, epsilon, begin_norm_axis) + x, scale, bias, epsilon, begin_norm_axis + ) x_grad, scale_grad, bias_grad = _reference_layer_norm_grad( - x, y_grad, scale, bias, mean, variance, begin_norm_axis) + x, y_grad, scale, bias, mean, variance, begin_norm_axis + ) var_dict = locals() var_dict['y@GRAD'] = y_grad @@ -103,9 +115,11 @@ class TestLayerNormOp(unittest.TestCase): with fluid.program_guard(program): block = program.global_block() for name in ground_truth: - block.create_var(name=name, - dtype=self.dtype, - shape=ground_truth[name].shape) + block.create_var( + name=name, + dtype=self.dtype, + shape=ground_truth[name].shape, + ) inputs = {"X": block.var('x')} fetch_list = [ 'y', @@ -125,17 +139,20 @@ class TestLayerNormOp(unittest.TestCase): outputs={ "Y": block.var('y'), "Mean": block.var('mean'), # share the same memory - "Variance": - block.var('variance'), # share the same memory + "Variance": block.var( + 'variance' + ), # share the same memory }, attrs={ "epsilon": epsilon, "begin_norm_axis": begin_norm_axis, - "use_mkldnn": use_mkldnn - }) + "use_mkldnn": use_mkldnn, + }, + ) # generate backward op_desc grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc( - layer_norm_op.desc, set(), []) + layer_norm_op.desc, set(), [] + ) grad_op_desc = grad_op_desc_list[0] new_op_desc = block.desc.append_op() new_op_desc.copy_from(grad_op_desc) @@ -149,46 +166,59 @@ class TestLayerNormOp(unittest.TestCase): program._sync_with_cpp() exe = fluid.Executor(place) - out = exe.run(program, - feed={ - name: var_dict[name] - for name in ['x', 'scale', 'bias', 'y@GRAD'] - }, - fetch_list=fetch_list) + out = exe.run( + program, + feed={ + name: var_dict[name] + for name in ['x', 'scale', 'bias', 'y@GRAD'] + }, + fetch_list=fetch_list, + ) self.__assert_close(y, out[0], "y", self.atol) self.__assert_close(mean, out[1], "mean") self.__assert_close(variance, out[2], "variance", 1e-3) self.__assert_close(x_grad, out[3], "x_grad", 1e-2) if has_scale: - self.__assert_close(scale_grad, - out[fetch_list.index('scale@GRAD')], - "scale_grad", 1e-2) + self.__assert_close( + scale_grad, + out[fetch_list.index('scale@GRAD')], + "scale_grad", + 1e-2, + ) if has_bias: - self.__assert_close(bias_grad, - out[fetch_list.index('bias@GRAD')], - "bias_grad", self.atol) + self.__assert_close( + bias_grad, + out[fetch_list.index('bias@GRAD')], + "bias_grad", + self.atol, + ) test_with_place(self.place, shape, begin_norm_axis) def test_check_forward_backward_with_scale_and_bias(self): self.check_forward_backward(shape=[2, 3, 4, 5], begin_norm_axis=1) - self.check_forward_backward(shape=[2, 3, 4, 5], - begin_norm_axis=1, - has_scale=False, - has_bias=True) - self.check_forward_backward(shape=[2, 3, 4, 5], - begin_norm_axis=1, - has_scale=True, - has_bias=False) - self.check_forward_backward(shape=[2, 3, 4, 5], - begin_norm_axis=1, - has_scale=False, - has_bias=False) + self.check_forward_backward( + shape=[2, 3, 4, 5], + begin_norm_axis=1, + has_scale=False, + has_bias=True, + ) + self.check_forward_backward( + shape=[2, 3, 4, 5], + begin_norm_axis=1, + has_scale=True, + has_bias=False, + ) + self.check_forward_backward( + shape=[2, 3, 4, 5], + begin_norm_axis=1, + has_scale=False, + has_bias=False, + ) self.check_forward_backward(shape=[2, 3, 4, 5], begin_norm_axis=3) class TestLayerNormOpFP16(TestLayerNormOp): - def init_dtype(self): self.dtype = np.float16 self.atol = 1e-2 diff --git a/python/paddle/fluid/tests/unittests/npu/test_leaky_relu_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_leaky_relu_op_npu.py index 07067ee2519aeac77c423d81431527c1b98e82d5..e91a65faeec163f3220e5d95036a40472651e6bc 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_leaky_relu_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_leaky_relu_op_npu.py @@ -27,7 +27,6 @@ SEED = 2021 class TestLeadyRelu(OpTest): - def setUp(self): self.set_npu() self.op_type = "leaky_relu" @@ -63,33 +62,29 @@ class TestLeadyRelu(OpTest): def test_check_grad(self): if self.dtype == np.float16: - self.check_grad_with_place(self.place, ['X'], - 'Out', - max_relative_error=0.006) + self.check_grad_with_place( + self.place, ['X'], 'Out', max_relative_error=0.006 + ) else: self.check_grad_with_place(self.place, ['X'], 'Out') class TestLeadyReluFP16(TestLeadyRelu): - def init_dtype(self): self.dtype = np.float16 class TestLeadyRelu2(TestLeadyRelu): - def set_attrs(self): self.attrs = {'alpha': 0.5} class TestLeadyRelu3(TestLeadyRelu): - def set_attrs(self): self.attrs = {'alpha': -0.5} class TestLeakyReluNet(unittest.TestCase): - def _test(self, run_npu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -102,9 +97,9 @@ class TestLeakyReluNet(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): x = paddle.static.data(name="x", shape=[32, 32], dtype='float32') - label = paddle.static.data(name="label", - shape=[32, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[32, 1], dtype='int64' + ) y = paddle.nn.functional.leaky_relu(x) @@ -127,15 +122,17 @@ class TestLeakyReluNet(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "x": x_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"x": x_np, "label": label_np}, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res diff --git a/python/paddle/fluid/tests/unittests/npu/test_log_loss_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_log_loss_op_npu.py index febc99093f18b281a2fbbeee8392bf0fe23943e9..87cd872e8cc9135bd8660e9c289d01f177d8921c 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_log_loss_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_log_loss_op_npu.py @@ -28,10 +28,10 @@ def sigmoid_array(x): return 1 / (1 + np.exp(-x)) -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") +@unittest.skipIf( + not paddle.is_compiled_with_npu(), "core is not compiled with NPU" +) class TestLogLossOp(OpTest): - def setUp(self): self.set_npu() self.op_type = 'log_loss' @@ -58,8 +58,9 @@ class TestLogLossOp(OpTest): epsilon = self.attrs['epsilon'] labels = self.inputs['Labels'] predicted = self.inputs['Predicted'] - loss = -labels * np.log(predicted + epsilon) - ( - 1 - labels) * np.log(1 - predicted + epsilon) + loss = -labels * np.log(predicted + epsilon) - (1 - labels) * np.log( + 1 - predicted + epsilon + ) self.outputs = {'Loss': loss} def set_npu(self): @@ -75,10 +76,10 @@ class TestLogLossOp(OpTest): self.check_grad_with_place(self.place, ['Predicted'], 'Loss') -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") +@unittest.skipIf( + not paddle.is_compiled_with_npu(), "core is not compiled with NPU" +) class TestLogLossOpError(unittest.TestCase): - def test_errors(self): with fluid.program_guard(fluid.Program()): diff --git a/python/paddle/fluid/tests/unittests/npu/test_log_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_log_op_npu.py index ea7dd3abde92891ea450afb3fac2992c75c31a7c..8745a66b45a4b4a3330611e308a689d95aea7136 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_log_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_log_op_npu.py @@ -26,7 +26,6 @@ SEED = 2021 class TestLog(OpTest): - def setUp(self): self.set_npu() self.op_type = "log" @@ -55,7 +54,6 @@ class TestLog(OpTest): class TestLogFp16(OpTest): - def setUp(self): self.set_npu() self.op_type = "log" @@ -82,7 +80,6 @@ class TestLogFp16(OpTest): class TestLogNet(unittest.TestCase): - def _test(self, run_npu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -97,9 +94,9 @@ class TestLogNet(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=[32, 32], dtype='float32') b = paddle.static.data(name="b", shape=[32, 32], dtype='float32') - label = paddle.static.data(name="label", - shape=[32, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[32, 1], dtype='int64' + ) c = paddle.multiply(a, b) d = paddle.log(c) @@ -123,16 +120,17 @@ class TestLogNet(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "b": b_np, "label": label_np}, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res diff --git a/python/paddle/fluid/tests/unittests/npu/test_log_softmax_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_log_softmax_op_npu.py index d5dabdeb2018b6f80d3f5addd1f4ef5711af8777..fc0c428983fa9e89206750af7de533d1d3e34796 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_log_softmax_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_log_softmax_op_npu.py @@ -29,7 +29,6 @@ paddle.enable_static() class TestLogSoftmaxNPUOp(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -39,7 +38,7 @@ class TestLogSoftmaxNPUOp(OpTest): self.axis = -1 self.set_attrs() self.set_dtype() - x = np.random.uniform(0.1, 1., self.shape).astype(self.dtype) + x = np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype) out = np.apply_along_axis(ref_log_softmax, self.axis, x) self.x_grad = ref_log_softmax_grad(x, self.axis) self.inputs = {'X': x} @@ -64,18 +63,21 @@ class TestLogSoftmaxNPUOp(OpTest): def test_check_grad(self): if self.dtype == np.float16: - self.check_grad_with_place(self.place, ['X'], ['Out'], - user_defined_grads=[self.x_grad], - max_relative_error=0.02) + self.check_grad_with_place( + self.place, + ['X'], + ['Out'], + user_defined_grads=[self.x_grad], + max_relative_error=0.02, + ) else: - self.check_grad_with_place(self.place, ['X'], ['Out'], - user_defined_grads=[self.x_grad]) + self.check_grad_with_place( + self.place, ['X'], ['Out'], user_defined_grads=[self.x_grad] + ) def test_class(op_type, typename): - class TestLogSoftmaxShape(TestLogSoftmaxNPUOp): - def set_attrs(self): self.shape = [12, 10] @@ -88,9 +90,7 @@ def test_class(op_type, typename): def test_class2(op_type, typename): - class TestLogSoftmaxAxis(TestLogSoftmaxNPUOp): - def set_attrs(self): self.axis = 0 @@ -109,13 +109,14 @@ for _typename in {np.float32, np.float16}: class TestNNLogSoftmaxAPI(unittest.TestCase): - def setUp(self): self.x_shape = [2, 3, 4, 5] - self.x = np.random.uniform(-1., 1., self.x_shape).astype(np.float32) - self.place = paddle.NPUPlace(0) \ - if paddle.fluid.core.is_compiled_with_npu() \ + self.x = np.random.uniform(-1.0, 1.0, self.x_shape).astype(np.float32) + self.place = ( + paddle.NPUPlace(0) + if paddle.fluid.core.is_compiled_with_npu() else paddle.CPUPlace() + ) def check_api(self, axis=-1): ref_out = np.apply_along_axis(ref_log_softmax, axis, self.x) @@ -142,13 +143,14 @@ class TestNNLogSoftmaxAPI(unittest.TestCase): class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase): - def setUp(self): self.x_shape = [2, 3, 4, 5] self.x = np.random.uniform(-1, 1, self.x_shape).astype(np.float32) - self.place = paddle.NPUPlace(0) \ - if paddle.fluid.core.is_compiled_with_npu() \ + self.place = ( + paddle.NPUPlace(0) + if paddle.fluid.core.is_compiled_with_npu() else paddle.CPUPlace() + ) def check_api(self, axis=-1, dtype=None): x = self.x.copy() diff --git a/python/paddle/fluid/tests/unittests/npu/test_logical_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_logical_op_npu.py index 14422e8565a9e2b316ac9c3632f577804b1f1be7..a783e04c676340782d1a0cc96232e2a76b00a5df 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_logical_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_logical_op_npu.py @@ -24,81 +24,31 @@ from paddle.static import Program, program_guard SUPPORTED_DTYPES = [bool] -TEST_META_OP_DATA = [{ - 'op_str': 'logical_and', - 'binary_op': True -}, { - 'op_str': 'logical_or', - 'binary_op': True -}, { - 'op_str': 'logical_not', - 'binary_op': False -}] +TEST_META_OP_DATA = [ + {'op_str': 'logical_and', 'binary_op': True}, + {'op_str': 'logical_or', 'binary_op': True}, + {'op_str': 'logical_not', 'binary_op': False}, +] TEST_META_SHAPE_DATA = { - 'XDimLargerThanYDim1': { - 'x_shape': [2, 3, 4, 5], - 'y_shape': [4, 5] - }, - 'XDimLargerThanYDim2': { - 'x_shape': [2, 3, 4, 5], - 'y_shape': [4, 1] - }, - 'XDimLargerThanYDim3': { - 'x_shape': [2, 3, 4, 5], - 'y_shape': [1, 4, 1] - }, - 'XDimLargerThanYDim4': { - 'x_shape': [2, 3, 4, 5], - 'y_shape': [3, 4, 1] - }, - 'XDimLargerThanYDim5': { - 'x_shape': [2, 3, 1, 5], - 'y_shape': [3, 1, 1] - }, - 'XDimLessThanYDim1': { - 'x_shape': [4, 1], - 'y_shape': [2, 3, 4, 5] - }, - 'XDimLessThanYDim2': { - 'x_shape': [1, 4, 1], - 'y_shape': [2, 3, 4, 5] - }, - 'XDimLessThanYDim3': { - 'x_shape': [3, 4, 1], - 'y_shape': [2, 3, 4, 5] - }, - 'XDimLessThanYDim4': { - 'x_shape': [3, 1, 1], - 'y_shape': [2, 3, 1, 5] - }, - 'XDimLessThanYDim5': { - 'x_shape': [4, 5], - 'y_shape': [2, 3, 4, 5] - }, - 'Axis1InLargerDim': { - 'x_shape': [1, 4, 5], - 'y_shape': [2, 3, 1, 5] - }, - 'EqualDim1': { - 'x_shape': [10, 7], - 'y_shape': [10, 7] - }, - 'EqualDim2': { - 'x_shape': [1, 1, 4, 5], - 'y_shape': [2, 3, 1, 5] - } + 'XDimLargerThanYDim1': {'x_shape': [2, 3, 4, 5], 'y_shape': [4, 5]}, + 'XDimLargerThanYDim2': {'x_shape': [2, 3, 4, 5], 'y_shape': [4, 1]}, + 'XDimLargerThanYDim3': {'x_shape': [2, 3, 4, 5], 'y_shape': [1, 4, 1]}, + 'XDimLargerThanYDim4': {'x_shape': [2, 3, 4, 5], 'y_shape': [3, 4, 1]}, + 'XDimLargerThanYDim5': {'x_shape': [2, 3, 1, 5], 'y_shape': [3, 1, 1]}, + 'XDimLessThanYDim1': {'x_shape': [4, 1], 'y_shape': [2, 3, 4, 5]}, + 'XDimLessThanYDim2': {'x_shape': [1, 4, 1], 'y_shape': [2, 3, 4, 5]}, + 'XDimLessThanYDim3': {'x_shape': [3, 4, 1], 'y_shape': [2, 3, 4, 5]}, + 'XDimLessThanYDim4': {'x_shape': [3, 1, 1], 'y_shape': [2, 3, 1, 5]}, + 'XDimLessThanYDim5': {'x_shape': [4, 5], 'y_shape': [2, 3, 4, 5]}, + 'Axis1InLargerDim': {'x_shape': [1, 4, 5], 'y_shape': [2, 3, 1, 5]}, + 'EqualDim1': {'x_shape': [10, 7], 'y_shape': [10, 7]}, + 'EqualDim2': {'x_shape': [1, 1, 4, 5], 'y_shape': [2, 3, 1, 5]}, } TEST_META_WRONG_SHAPE_DATA = { - 'ErrorDim1': { - 'x_shape': [2, 3, 4, 5], - 'y_shape': [3, 4] - }, - 'ErrorDim2': { - 'x_shape': [2, 3, 4, 5], - 'y_shape': [4, 3] - } + 'ErrorDim1': {'x_shape': [2, 3, 4, 5], 'y_shape': [3, 4]}, + 'ErrorDim2': {'x_shape': [2, 3, 4, 5], 'y_shape': [4, 3]}, } @@ -157,16 +107,20 @@ def test(unit_test, use_npu=False, test_error=False): META_DATA = dict(TEST_META_WRONG_SHAPE_DATA) for shape_data in META_DATA.values(): for data_type in SUPPORTED_DTYPES: - meta_data['x_np'] = np_data_generator(shape_data['x_shape'], - dtype=data_type) - meta_data['y_np'] = np_data_generator(shape_data['y_shape'], - dtype=data_type) + meta_data['x_np'] = np_data_generator( + shape_data['x_shape'], dtype=data_type + ) + meta_data['y_np'] = np_data_generator( + shape_data['y_shape'], dtype=data_type + ) if meta_data['binary_op'] and test_error: # catch C++ Exception - unit_test.assertRaises(BaseException, run_static, - **meta_data) - unit_test.assertRaises(BaseException, run_dygraph, - **meta_data) + unit_test.assertRaises( + BaseException, run_static, **meta_data + ) + unit_test.assertRaises( + BaseException, run_dygraph, **meta_data + ) continue static_result = run_static(**meta_data) dygraph_result = run_dygraph(**meta_data) @@ -176,11 +130,11 @@ def test(unit_test, use_npu=False, test_error=False): np_result = np_op(meta_data['x_np']) unit_test.assertTrue((static_result == np_result).all()) unit_test.assertTrue( - (dygraph_result.numpy() == np_result).all()) + (dygraph_result.numpy() == np_result).all() + ) def test_type_error(unit_test, use_npu, type_str_map): - def check_type(op_str, x, y, binary_op): op = getattr(paddle, op_str) error_type = ValueError @@ -215,24 +169,24 @@ def test_type_error(unit_test, use_npu, type_str_map): startup_program = paddle.static.Program() main_program = paddle.static.Program() with paddle.static.program_guard(main_program, startup_program): - x = paddle.static.data(name='x', - shape=[10], - dtype=type_str_map['x']) - y = paddle.static.data(name='y', - shape=[10], - dtype=type_str_map['y']) + x = paddle.static.data( + name='x', shape=[10], dtype=type_str_map['x'] + ) + y = paddle.static.data( + name='y', shape=[10], dtype=type_str_map['y'] + ) check_type(meta_data['op_str'], x, y, binary_op) def type_map_factory(): - return [{ - 'x': x_type, - 'y': y_type - } for x_type in SUPPORTED_DTYPES for y_type in SUPPORTED_DTYPES] + return [ + {'x': x_type, 'y': y_type} + for x_type in SUPPORTED_DTYPES + for y_type in SUPPORTED_DTYPES + ] class TestCPU(unittest.TestCase): - def test(self): test(self) @@ -246,7 +200,6 @@ class TestCPU(unittest.TestCase): class TestNPU(unittest.TestCase): - def test(self): test(self, True) diff --git a/python/paddle/fluid/tests/unittests/npu/test_lookup_table_v2_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_lookup_table_v2_op_npu.py index 9efec50ddc59cf2cfe1a6daa39352597dc456329..0670b69851a26a5b76d41114270b10edfc51078b 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_lookup_table_v2_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_lookup_table_v2_op_npu.py @@ -26,7 +26,6 @@ SEED = 2021 class TestLookupTableV2(OpTest): - def setUp(self): self.set_npu() self.op_type = "lookup_table_v2" @@ -37,22 +36,22 @@ class TestLookupTableV2(OpTest): self.init_padding_idx() np.random.seed(SEED) w = np.random.random([self.vocab, self.dim]).astype(self.dtype) - x = np.random.randint(0, self.vocab, - size=(self.bsz, - self.seqlen)).astype(self.ids_dtype) + x = np.random.randint( + 0, self.vocab, size=(self.bsz, self.seqlen) + ).astype(self.ids_dtype) out = w[x] if self.padding_idx != -1: out[np.squeeze(x == self.padding_idx)] = np.zeros(self.dim) self.inputs = { 'W': OpTest.np_dtype_to_fluid_dtype(w), - 'Ids': OpTest.np_dtype_to_fluid_dtype(x) + 'Ids': OpTest.np_dtype_to_fluid_dtype(x), } self.attrs = { 'is_sparse': False, 'is_distributed': False, 'remote_prefetch': False, - 'padding_idx': self.padding_idx + 'padding_idx': self.padding_idx, } self.outputs = {'Out': out} @@ -78,9 +77,9 @@ class TestLookupTableV2(OpTest): def test_check_grad(self): if self.dtype == np.float16: - self.check_grad_with_place(self.place, ['W'], - 'Out', - max_relative_error=0.01) + self.check_grad_with_place( + self.place, ['W'], 'Out', max_relative_error=0.01 + ) else: self.check_grad_with_place(self.place, ['W'], 'Out') @@ -98,7 +97,6 @@ class TestLookupTableV2FP16(TestLookupTableV2): class TestLookupTableV2Dim32(TestLookupTableV2): - def init_dims(self): self.bsz = 6 self.seqlen = 8 @@ -126,13 +124,11 @@ class TestLookupTableV2Dim32FP16(TestLookupTableV2): class TestLookupTableV2WithPadding(TestLookupTableV2): - def init_padding_idx(self): self.padding_idx = np.random.randint(0, self.vocab) class TestLookupTableV2WithPadding1(TestLookupTableV2): - def init_padding_idx(self): self.padding_idx = np.random.randint(0, self.vocab) diff --git a/python/paddle/fluid/tests/unittests/npu/test_masked_select_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_masked_select_op_npu.py index 50c9ca56f6663342ef2fecf18d833554bb2504f5..8943b5ba95f428aa99d2a8cb205f824b78688862 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_masked_select_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_masked_select_op_npu.py @@ -33,7 +33,6 @@ def np_masked_select(x, mask): class TestMaskedSelectOp(OpTest): - def set_npu(self): self.__class__.use_npu = True @@ -63,33 +62,29 @@ class TestMaskedSelectOp(OpTest): class TestMaskedSelectOp1(TestMaskedSelectOp): - def init(self): self.shape = (6, 8, 9, 18) class TestMaskedSelectOp2(TestMaskedSelectOp): - def init(self): - self.shape = (168, ) + self.shape = (168,) class TestMaskedSelectOpFp16(TestMaskedSelectOp): - def init_dtype(self): self.dtype = np.float16 def test_check_grad(self): x_grad = self.inputs['Mask'].astype(self.dtype) x_grad = x_grad * (1 / x_grad.sum()) - self.check_grad_with_place(self.place, ['X'], - 'Y', - user_defined_grads=[x_grad]) + self.check_grad_with_place( + self.place, ['X'], 'Y', user_defined_grads=[x_grad] + ) @skip_check_grad_ci(reason="get_numeric_gradient not support int32") class TestMaskedSelectOpInt32(TestMaskedSelectOp): - def init_dtype(self): self.dtype = np.int32 @@ -99,7 +94,6 @@ class TestMaskedSelectOpInt32(TestMaskedSelectOp): @skip_check_grad_ci(reason="get_numeric_gradient not support int64") class TestMaskedSelectOpInt64(TestMaskedSelectOp): - def init_dtype(self): self.dtype = np.int64 @@ -108,7 +102,6 @@ class TestMaskedSelectOpInt64(TestMaskedSelectOp): class TestMaskedSelectAPI(unittest.TestCase): - def test_imperative_mode(self): paddle.disable_static(paddle.NPUPlace(0)) shape = (88, 6, 8) @@ -133,27 +126,26 @@ class TestMaskedSelectAPI(unittest.TestCase): exe = paddle.static.Executor(place=paddle.NPUPlace(0)) - res = exe.run(paddle.static.default_main_program(), - feed={ - "x": np_x, - "mask": np_mask - }, - fetch_list=[out]) + res = exe.run( + paddle.static.default_main_program(), + feed={"x": np_x, "mask": np_mask}, + fetch_list=[out], + ) self.assertEqual(np.allclose(res, np_out), True) class TestMaskedSelectError(unittest.TestCase): - def test_error(self): - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): shape = [8, 9, 6] x = paddle.fluid.data(shape=shape, dtype='float32', name='x') mask = paddle.fluid.data(shape=shape, dtype='bool', name='mask') - mask_float = paddle.fluid.data(shape=shape, - dtype='float32', - name='mask_float') + mask_float = paddle.fluid.data( + shape=shape, dtype='float32', name='mask_float' + ) np_x = np.random.random(shape).astype('float32') np_mask = np.array(np.random.randint(2, size=shape, dtype=bool)) diff --git a/python/paddle/fluid/tests/unittests/npu/test_matmul_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_matmul_op_npu.py index d00c8ab8674001ffecbf653fa384031723f728e4..9a2563ad306fa5b9f65f92b869cdd019ce7abc50 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_matmul_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_matmul_op_npu.py @@ -31,7 +31,7 @@ def reference_matmul(X, Y, transpose_X=False, transpose_Y=False, scale=1.0): # transpose X and Y appropriately. if transpose_X: if X.ndim == 1: - X = X.reshape((X.size, )) + X = X.reshape((X.size,)) elif X.ndim == 2: X = X.T else: @@ -40,7 +40,7 @@ def reference_matmul(X, Y, transpose_X=False, transpose_Y=False, scale=1.0): X = np.transpose(X, tuple(dim)) if transpose_Y: if Y.ndim == 1: - Y = Y.reshape((Y.size, )) + Y = Y.reshape((Y.size,)) else: dim = [i for i in range(len(Y.shape))] dim[-1], dim[len(Y.shape) - 2] = dim[len(Y.shape) - 2], dim[-1] @@ -76,14 +76,15 @@ class TestMatMulOp(OpTest): X = -0.1 + 0.2 * X Y = -0.1 + 0.2 * Y - Out = reference_matmul(X, Y, self.transpose_X, self.transpose_Y, - self.alpha) + Out = reference_matmul( + X, Y, self.transpose_X, self.transpose_Y, self.alpha + ) Out = Out.astype(self.dtype) self.inputs = {'X': X, 'Y': Y} self.attrs = { 'transpose_X': self.transpose_X, 'transpose_Y': self.transpose_Y, - 'alpha': self.alpha + 'alpha': self.alpha, } self.outputs = {'Out': Out} @@ -92,8 +93,8 @@ class TestMatMulOp(OpTest): self.place = paddle.NPUPlace(0) def config(self): - self.x_shape = (100, ) - self.y_shape = (100, ) + self.x_shape = (100,) + self.y_shape = (100,) self.transpose_X = False self.transpose_Y = False @@ -116,7 +117,7 @@ class TestMatMulOp1(TestMatMulOp): """ def config(self): - self.x_shape = (100, ) + self.x_shape = (100,) self.y_shape = (1, 3, 2, 100) self.transpose_X = False self.transpose_Y = True @@ -129,7 +130,7 @@ class TestMatMulOp2(TestMatMulOp): def config(self): self.x_shape = (1, 2, 100, 1) - self.y_shape = (100, ) + self.y_shape = (100,) self.transpose_X = True self.transpose_Y = False @@ -248,7 +249,7 @@ class TestMatMulOp12(TestMatMulOp): """ def config(self): - self.x_shape = (100) + self.x_shape = 100 self.y_shape = (1, 2, 2, 100, 2) self.transpose_X = False self.transpose_Y = False @@ -261,16 +262,14 @@ class TestMatMulOp13(TestMatMulOp): def config(self): self.x_shape = (2, 1, 100) - self.y_shape = (100) + self.y_shape = 100 self.transpose_X = False self.transpose_Y = False -#--------------------test matmul alpha-------------------- +# --------------------test matmul alpha-------------------- def create_test_alpha_class(parent): - class TestMatMulOpAlphaCase(parent): - def init_alpha(self): self.alpha = 0.125 @@ -293,11 +292,9 @@ create_test_alpha_class(TestMatMulOp12) create_test_alpha_class(TestMatMulOp13) -#--------------------test matmul fp16-------------------- +# --------------------test matmul fp16-------------------- def create_test_fp16_class(parent, atol=0.001, max_relative_error=2.5): - class TestMatMulOpFp16Case(parent): - def init_kernel_type(self): self.dtype = np.float16 @@ -305,9 +302,12 @@ def create_test_fp16_class(parent, atol=0.001, max_relative_error=2.5): self.check_output_with_place(self.place, atol=atol) def test_check_grad(self): - self.check_grad_with_place(self.place, ['X', 'Y'], - 'Out', - max_relative_error=max_relative_error) + self.check_grad_with_place( + self.place, + ['X', 'Y'], + 'Out', + max_relative_error=max_relative_error, + ) cls_name = "{0}_{1}".format(parent.__name__, "Fp16") TestMatMulOpFp16Case.__name__ = cls_name diff --git a/python/paddle/fluid/tests/unittests/npu/test_matmulv2_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_matmulv2_op_npu.py index 1d3cdaf591523ec72e08a048bd754736a52d4e53..91883824cf5b68e88ceef03e193cb65bfaf7ed35 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_matmulv2_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_matmulv2_op_npu.py @@ -36,8 +36,8 @@ class TestMatMulV2Op(OpTest): self.place = paddle.NPUPlace(0) def config(self): - self.x_shape = (100, ) - self.y_shape = (100, ) + self.x_shape = (100,) + self.y_shape = (100,) self.trans_x = False self.trans_y = False @@ -76,7 +76,7 @@ class TestMatMulOp2(TestMatMulV2Op): """ def config(self): - self.x_shape = (100, ) + self.x_shape = (100,) self.y_shape = (1, 3, 2, 100) self.trans_x = False self.trans_y = True @@ -88,7 +88,7 @@ class TestMatMulOp3(TestMatMulV2Op): """ def config(self): - self.x_shape = (100, ) + self.x_shape = (100,) self.y_shape = (1, 1, 100, 2) self.trans_x = False self.trans_y = False @@ -100,7 +100,7 @@ class TestMatMulOp4(TestMatMulV2Op): """ def config(self): - self.x_shape = (100, ) + self.x_shape = (100,) self.y_shape = (1, 2, 100, 2) self.trans_x = False self.trans_y = False @@ -113,7 +113,7 @@ class TestMatMulOp5(TestMatMulV2Op): def config(self): self.x_shape = (1, 1, 100, 1) - self.y_shape = (100, ) + self.y_shape = (100,) self.trans_x = True self.trans_y = False @@ -125,7 +125,7 @@ class TestMatMulOp6(TestMatMulV2Op): def config(self): self.x_shape = (1, 2, 102, 1) - self.y_shape = (102, ) + self.y_shape = (102,) self.trans_x = True self.trans_y = False @@ -137,7 +137,7 @@ class TestMatMulOp7(TestMatMulV2Op): def config(self): self.x_shape = (1, 2, 1, 100) - self.y_shape = (100, ) + self.y_shape = (100,) self.trans_x = False self.trans_y = False @@ -244,7 +244,7 @@ class TestMatMulOp16(TestMatMulV2Op): """ def config(self): - self.x_shape = (100) + self.x_shape = 100 self.y_shape = (1, 2, 2, 100, 2) self.trans_x = False self.trans_y = False @@ -257,7 +257,7 @@ class TestMatMulOp17(TestMatMulV2Op): def config(self): self.x_shape = (2, 1, 100) - self.y_shape = (100) + self.y_shape = 100 self.trans_x = False self.trans_y = False @@ -286,13 +286,11 @@ class TestMatMulOpBroadcast2(TestMatMulV2Op): self.trans_y = True -#--------------------test matmul fp16-------------------- +# --------------------test matmul fp16-------------------- def create_test_fp16_class(parent, atol=0.001, max_relative_error=2.5): - class TestMatMulOpFp16Case(parent): - def init_kernel_type(self): self.dtype = np.float16 @@ -300,9 +298,12 @@ def create_test_fp16_class(parent, atol=0.001, max_relative_error=2.5): self.check_output_with_place(self.place, atol=atol) def test_check_grad(self): - self.check_grad_with_place(self.place, ['X', 'Y'], - 'Out', - max_relative_error=max_relative_error) + self.check_grad_with_place( + self.place, + ['X', 'Y'], + 'Out', + max_relative_error=max_relative_error, + ) cls_name = "{0}_{1}".format(parent.__name__, "Fp16") TestMatMulOpFp16Case.__name__ = cls_name @@ -329,7 +330,6 @@ create_test_fp16_class(TestMatMulOp17) class TestMatMulV2API(unittest.TestCase): - def setUp(self): self.places = [paddle.CPUPlace()] if paddle.is_compiled_with_npu(): @@ -346,12 +346,11 @@ class TestMatMulV2API(unittest.TestCase): y_np = np.random.random([3, 4]).astype("float32") exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={ - "input_x": x_np, - "input_y": y_np - }, - fetch_list=[result]) + fetches = exe.run( + fluid.default_main_program(), + feed={"input_x": x_np, "input_y": y_np}, + fetch_list=[result], + ) def test_static(self): for place in self.places: diff --git a/python/paddle/fluid/tests/unittests/npu/test_mean_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_mean_op_npu.py index 37e2afbb892d6a7937b2101e34fedc31e4fccaa0..4041ee9a2c182fc71207236ca7c89df0a605eb81 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_mean_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_mean_op_npu.py @@ -27,7 +27,6 @@ SEED = 2021 class TestMean(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -55,7 +54,6 @@ class TestMean(OpTest): class TestMeanFP16(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) diff --git a/python/paddle/fluid/tests/unittests/npu/test_memcpy_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_memcpy_op_npu.py index 5367ea8a92be5f32a266a67b3e5d2da300d160a3..6ed36b710ab389dc9161899c52a910285ea26f65 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_memcpy_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_memcpy_op_npu.py @@ -28,66 +28,77 @@ SEED = 2021 class TestMemcpy_FillConstant(unittest.TestCase): - def get_prog(self): paddle.enable_static() main_program = Program() with program_guard(main_program): cpu_var_name = "tensor@Cpu" npu_var_name = "tensor@Npu" - cpu_var = main_program.global_block().create_var(name=cpu_var_name, - shape=[10, 10], - dtype='float32', - persistable=False, - stop_gradient=True) - npu_var = main_program.global_block().create_var(name=npu_var_name, - shape=[10, 10], - dtype='float32', - persistable=False, - stop_gradient=True) - main_program.global_block().append_op(type="fill_constant", - outputs={"Out": npu_var_name}, - attrs={ - "shape": [10, 10], - "dtype": npu_var.dtype, - "value": 1.0, - "place_type": 4 - }) - main_program.global_block().append_op(type="fill_constant", - outputs={"Out": cpu_var_name}, - attrs={ - "shape": [10, 10], - "dtype": cpu_var.dtype, - "value": 0.0, - "place_type": 0 - }) + cpu_var = main_program.global_block().create_var( + name=cpu_var_name, + shape=[10, 10], + dtype='float32', + persistable=False, + stop_gradient=True, + ) + npu_var = main_program.global_block().create_var( + name=npu_var_name, + shape=[10, 10], + dtype='float32', + persistable=False, + stop_gradient=True, + ) + main_program.global_block().append_op( + type="fill_constant", + outputs={"Out": npu_var_name}, + attrs={ + "shape": [10, 10], + "dtype": npu_var.dtype, + "value": 1.0, + "place_type": 4, + }, + ) + main_program.global_block().append_op( + type="fill_constant", + outputs={"Out": cpu_var_name}, + attrs={ + "shape": [10, 10], + "dtype": cpu_var.dtype, + "value": 0.0, + "place_type": 0, + }, + ) return main_program, npu_var, cpu_var def test_npu_cpoy_to_cpu(self): main_program, npu_var, cpu_var = self.get_prog() - main_program.global_block().append_op(type='memcpy', - inputs={'X': npu_var}, - outputs={'Out': cpu_var}, - attrs={'dst_place_type': 0}) + main_program.global_block().append_op( + type='memcpy', + inputs={'X': npu_var}, + outputs={'Out': cpu_var}, + attrs={'dst_place_type': 0}, + ) place = fluid.NPUPlace(0) exe = fluid.Executor(place) - npu_, cpu_ = exe.run(main_program, - feed={}, - fetch_list=[npu_var.name, cpu_var.name]) + npu_, cpu_ = exe.run( + main_program, feed={}, fetch_list=[npu_var.name, cpu_var.name] + ) np.testing.assert_allclose(npu_, cpu_) np.testing.assert_allclose(cpu_, np.ones((10, 10))) def test_cpu_cpoy_npu(self): main_program, npu_var, cpu_var = self.get_prog() - main_program.global_block().append_op(type='memcpy', - inputs={'X': cpu_var}, - outputs={'Out': npu_var}, - attrs={'dst_place_type': 4}) + main_program.global_block().append_op( + type='memcpy', + inputs={'X': cpu_var}, + outputs={'Out': npu_var}, + attrs={'dst_place_type': 4}, + ) place = fluid.NPUPlace(0) exe = fluid.Executor(place) - npu_, cpu_ = exe.run(main_program, - feed={}, - fetch_list=[npu_var.name, cpu_var.name]) + npu_, cpu_ = exe.run( + main_program, feed={}, fetch_list=[npu_var.name, cpu_var.name] + ) np.testing.assert_allclose(npu_, cpu_) np.testing.assert_allclose(npu_, np.zeros((10, 10))) diff --git a/python/paddle/fluid/tests/unittests/npu/test_merged_momentum_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_merged_momentum_op_npu.py index 086911a56dbae58ea1159bf9473b6156767101d4..d530438bd581ab176ac33333890f5c258d291c88 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_merged_momentum_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_merged_momentum_op_npu.py @@ -22,16 +22,18 @@ from paddle.fluid.layer_helper import LayerHelper from collections import OrderedDict -def run_momentum_op(params, - grads, - velocitys, - master_params, - learning_rate, - place, - multi_precision, - mu=0.9, - rescale_grad=0.01, - use_merged=False): +def run_momentum_op( + params, + grads, + velocitys, + master_params, + learning_rate, + place, + multi_precision, + mu=0.9, + rescale_grad=0.01, + use_merged=False, +): assert len(params) == len(grads) assert len(params) == len(velocitys) if multi_precision: @@ -48,48 +50,70 @@ def run_momentum_op(params, } param_vars = [ - helper.create_variable(persistable=True, - shape=p.shape, - dtype=p.dtype) for p in params + helper.create_variable( + persistable=True, shape=p.shape, dtype=p.dtype + ) + for p in params ] grad_vars = [ helper.create_variable(shape=g.shape, dtype=g.dtype) for g in grads ] velocity_vars = [ - helper.create_variable(persistable=True, - shape=v.shape, - dtype=v.dtype) for v in velocitys + helper.create_variable( + persistable=True, shape=v.shape, dtype=v.dtype + ) + for v in velocitys ] - lr_var = helper.create_variable(persistable=True, - shape=learning_rate.shape, - dtype=learning_rate.dtype) + lr_var = helper.create_variable( + persistable=True, + shape=learning_rate.shape, + dtype=learning_rate.dtype, + ) feed_dict = OrderedDict() feed_dict.update( - OrderedDict([(p_var.name, p_val) - for p_var, p_val in zip(param_vars, params)])) + OrderedDict( + [ + (p_var.name, p_val) + for p_var, p_val in zip(param_vars, params) + ] + ) + ) feed_dict.update( - OrderedDict([(v_var.name, v_val) - for v_var, v_val in zip(velocity_vars, velocitys)])) + OrderedDict( + [ + (v_var.name, v_val) + for v_var, v_val in zip(velocity_vars, velocitys) + ] + ) + ) fetch_list = list(feed_dict.keys()) feed_dict.update( - OrderedDict([(g_var.name, g_val) - for g_var, g_val in zip(grad_vars, grads)])) + OrderedDict( + [(g_var.name, g_val) for g_var, g_val in zip(grad_vars, grads)] + ) + ) feed_dict.update({lr_var.name: learning_rate}) if multi_precision: master_param_vars = [ - helper.create_variable(persistable=True, - shape=p.shape, - dtype=p.dtype) for p in master_params + helper.create_variable( + persistable=True, shape=p.shape, dtype=p.dtype + ) + for p in master_params ] feed_dict.update( - OrderedDict([ - (mp_var.name, mp_val) - for mp_var, mp_val in zip(master_param_vars, master_params) - ])) + OrderedDict( + [ + (mp_var.name, mp_val) + for mp_var, mp_val in zip( + master_param_vars, master_params + ) + ] + ) + ) # CPUPlace does not use MasterParam if isinstance(place, paddle.CUDAPlace): fetch_list = fetch_list + [ @@ -99,8 +123,9 @@ def run_momentum_op(params, master_param_vars = None if not use_merged: - for i, (p, g, - v) in enumerate(zip(param_vars, grad_vars, velocity_vars)): + for i, (p, g, v) in enumerate( + zip(param_vars, grad_vars, velocity_vars) + ): inputs = { 'Param': p, 'Grad': g, @@ -111,10 +136,9 @@ def run_momentum_op(params, if multi_precision: inputs['MasterParam'] = master_param_vars[i] outputs['MasterParamOut'] = master_param_vars[i] - helper.append_op(type=op_type, - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type=op_type, inputs=inputs, outputs=outputs, attrs=attrs + ) else: inputs = { 'Param': param_vars, @@ -126,10 +150,9 @@ def run_momentum_op(params, if multi_precision: inputs['MasterParam'] = master_param_vars outputs['MasterParamOut'] = master_param_vars - helper.append_op(type=op_type, - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type=op_type, inputs=inputs, outputs=outputs, attrs=attrs + ) exe = paddle.static.Executor(place) with paddle.static.scope_guard(paddle.static.Scope()): @@ -137,17 +160,19 @@ def run_momentum_op(params, return exe.run(main, feed=feed_dict, fetch_list=fetch_list) -def run_momentum_op2(params, - grads, - velocitys, - master_params, - learning_rate, - place, - multi_precision, - mu=0.9, - rescale_grad=0.01, - use_merged=False, - use_nesterov=True): +def run_momentum_op2( + params, + grads, + velocitys, + master_params, + learning_rate, + place, + multi_precision, + mu=0.9, + rescale_grad=0.01, + use_merged=False, + use_nesterov=True, +): assert len(params) == len(grads) assert len(params) == len(velocitys) if multi_precision: @@ -159,48 +184,70 @@ def run_momentum_op2(params, helper = LayerHelper(op_type, **locals()) param_vars = [ - helper.create_variable(persistable=True, - shape=p.shape, - dtype=p.dtype) for p in params + helper.create_variable( + persistable=True, shape=p.shape, dtype=p.dtype + ) + for p in params ] grad_vars = [ helper.create_variable(shape=g.shape, dtype=g.dtype) for g in grads ] velocity_vars = [ - helper.create_variable(persistable=True, - shape=v.shape, - dtype=v.dtype) for v in velocitys + helper.create_variable( + persistable=True, shape=v.shape, dtype=v.dtype + ) + for v in velocitys ] - lr_var = helper.create_variable(persistable=True, - shape=learning_rate.shape, - dtype=learning_rate.dtype) + lr_var = helper.create_variable( + persistable=True, + shape=learning_rate.shape, + dtype=learning_rate.dtype, + ) feed_dict = OrderedDict() feed_dict.update( - OrderedDict([(p_var.name, p_val) - for p_var, p_val in zip(param_vars, params)])) + OrderedDict( + [ + (p_var.name, p_val) + for p_var, p_val in zip(param_vars, params) + ] + ) + ) feed_dict.update( - OrderedDict([(v_var.name, v_val) - for v_var, v_val in zip(velocity_vars, velocitys)])) + OrderedDict( + [ + (v_var.name, v_val) + for v_var, v_val in zip(velocity_vars, velocitys) + ] + ) + ) fetch_list = list(feed_dict.keys()) feed_dict.update( - OrderedDict([(g_var.name, g_val) - for g_var, g_val in zip(grad_vars, grads)])) + OrderedDict( + [(g_var.name, g_val) for g_var, g_val in zip(grad_vars, grads)] + ) + ) feed_dict.update({lr_var.name: learning_rate}) if multi_precision: master_param_vars = [ - helper.create_variable(persistable=True, - shape=p.shape, - dtype=p.dtype) for p in master_params + helper.create_variable( + persistable=True, shape=p.shape, dtype=p.dtype + ) + for p in master_params ] feed_dict.update( - OrderedDict([ - (mp_var.name, mp_val) - for mp_var, mp_val in zip(master_param_vars, master_params) - ])) + OrderedDict( + [ + (mp_var.name, mp_val) + for mp_var, mp_val in zip( + master_param_vars, master_params + ) + ] + ) + ) # CPUPlace does not use MasterParam if isinstance(place, paddle.CUDAPlace): fetch_list = fetch_list + [ @@ -210,8 +257,9 @@ def run_momentum_op2(params, master_param_vars = None if not use_merged: - for i, (p, g, - v) in enumerate(zip(param_vars, grad_vars, velocity_vars)): + for i, (p, g, v) in enumerate( + zip(param_vars, grad_vars, velocity_vars) + ): inputs = { 'Param': p, 'Grad': g, @@ -230,10 +278,9 @@ def run_momentum_op2(params, 'regularization_method': 'l2_decay', 'regularization_coeff': 2.0, } - helper.append_op(type=op_type, - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type=op_type, inputs=inputs, outputs=outputs, attrs=attrs + ) else: inputs = { 'Param': param_vars, @@ -246,22 +293,18 @@ def run_momentum_op2(params, inputs['MasterParam'] = master_param_vars outputs['MasterParamOut'] = master_param_vars attrs = { - 'mu': - mu, - 'multi_precision': - multi_precision, - 'rescale_grad': - rescale_grad, - 'use_nesterov': - use_nesterov, - 'regularization_method': - ['l2_decay' for i in range(len(param_vars))], + 'mu': mu, + 'multi_precision': multi_precision, + 'rescale_grad': rescale_grad, + 'use_nesterov': use_nesterov, + 'regularization_method': [ + 'l2_decay' for i in range(len(param_vars)) + ], 'regularization_coeff': [2.0 for i in range(len(param_vars))], } - helper.append_op(type=op_type, - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type=op_type, inputs=inputs, outputs=outputs, attrs=attrs + ) exe = paddle.static.Executor(place) with paddle.static.scope_guard(paddle.static.Scope()): @@ -270,7 +313,6 @@ def run_momentum_op2(params, class TestMergedMomentum(unittest.TestCase): - def setUp(self): paddle.enable_static() self.shapes = [[3, 4], [2, 7], [5, 6], [7, 8]] @@ -296,21 +338,28 @@ class TestMergedMomentum(unittest.TestCase): return params, grads, velocitys, master_params, learning_rate def check_with_place(self, place, multi_precision): - params, grads, velocitys, master_params, learning_rate = self.prepare_data( - self.shapes, multi_precision, self.seed, place) + ( + params, + grads, + velocitys, + master_params, + learning_rate, + ) = self.prepare_data(self.shapes, multi_precision, self.seed, place) def run_op(use_merged): # NPU Momentum Op does not support rescale_grad rescale_grad = 1.0 - return run_momentum_op(params, - grads, - velocitys, - master_params, - learning_rate, - place, - multi_precision, - rescale_grad=rescale_grad, - use_merged=use_merged) + return run_momentum_op( + params, + grads, + velocitys, + master_params, + learning_rate, + place, + multi_precision, + rescale_grad=rescale_grad, + use_merged=use_merged, + ) outs1 = run_op(True) outs2 = run_op(False) @@ -323,7 +372,6 @@ class TestMergedMomentum(unittest.TestCase): class TestMergedMomentum2(unittest.TestCase): - def setUp(self): paddle.enable_static() self.shapes = [[3, 4], [2, 7], [5, 6], [7, 8]] @@ -349,22 +397,29 @@ class TestMergedMomentum2(unittest.TestCase): return params, grads, velocitys, master_params, learning_rate def check_with_place(self, place, multi_precision): - params, grads, velocitys, master_params, learning_rate = self.prepare_data( - self.shapes, multi_precision, self.seed, place) + ( + params, + grads, + velocitys, + master_params, + learning_rate, + ) = self.prepare_data(self.shapes, multi_precision, self.seed, place) def run_op(use_nesterov, use_merged): # NPU Momentum Op does not support rescale_grad rescale_grad = 1.0 - return run_momentum_op2(params, - grads, - velocitys, - master_params, - learning_rate, - place, - multi_precision, - rescale_grad=rescale_grad, - use_merged=use_merged, - use_nesterov=use_nesterov) + return run_momentum_op2( + params, + grads, + velocitys, + master_params, + learning_rate, + place, + multi_precision, + rescale_grad=rescale_grad, + use_merged=use_merged, + use_nesterov=use_nesterov, + ) outs1 = run_op(use_nesterov=True, use_merged=True) outs2 = run_op(use_nesterov=True, use_merged=False) diff --git a/python/paddle/fluid/tests/unittests/npu/test_meshgrid_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_meshgrid_op_npu.py index 8ff2fb65aa73e67a55b41f0c3b589df6738ca847..8af8f899244586c216648df481002fd39243ca70 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_meshgrid_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_meshgrid_op_npu.py @@ -26,7 +26,6 @@ paddle.enable_static() class TestMeshgridOp(OpTest): - def setUp(self): self.set_npu() self.op_type = "meshgrid" @@ -55,7 +54,7 @@ class TestMeshgridOp(OpTest): ins = [] outs = [] for i in range(len(self.shape)): - ins.append(np.random.random((self.shape[i], )).astype(self.dtype)) + ins.append(np.random.random((self.shape[i],)).astype(self.dtype)) for i in range(len(self.shape)): out_reshape = [1] * len(self.shape) @@ -69,43 +68,47 @@ class TestMeshgridOp(OpTest): @skip_check_grad_ci( - reason="The backward test is not supported for float16 type on NPU.") + reason="The backward test is not supported for float16 type on NPU." +) class TestMeshgridOpFP16(TestMeshgridOp): - def get_dtype(self): return "float16" class TestMeshgridOpINT32(TestMeshgridOp): - def get_dtype(self): return "int32" class TestMeshgridOpINT64(TestMeshgridOp): - def get_dtype(self): return "int64" class TestMeshgridOp2(TestMeshgridOp): - def get_x_shape(self): return [100, 300] class TestMeshgridOp3(unittest.TestCase): - def test_api(self): x = fluid.data(shape=[100], dtype='int32', name='x') y = fluid.data(shape=[200], dtype='int32', name='y') - input_1 = np.random.randint(0, 100, [ + input_1 = np.random.randint( + 0, + 100, + [ + 100, + ], + ).astype('int32') + input_2 = np.random.randint( + 0, 100, - ]).astype('int32') - input_2 = np.random.randint(0, 100, [ - 200, - ]).astype('int32') + [ + 200, + ], + ).astype('int32') out_1 = np.reshape(input_1, [100, 1]) out_1 = np.broadcast_to(out_1, [100, 200]) @@ -114,29 +117,35 @@ class TestMeshgridOp3(unittest.TestCase): exe = fluid.Executor(place=fluid.NPUPlace(0)) grid_x, grid_y = paddle.tensor.meshgrid(x, y) - res_1, res_2 = exe.run(fluid.default_main_program(), - feed={ - 'x': input_1, - 'y': input_2 - }, - fetch_list=[grid_x, grid_y]) + res_1, res_2 = exe.run( + fluid.default_main_program(), + feed={'x': input_1, 'y': input_2}, + fetch_list=[grid_x, grid_y], + ) np.testing.assert_allclose(res_1, out_1) np.testing.assert_allclose(res_2, out_2) class TestMeshgridOp4(unittest.TestCase): - def test_list_input(self): x = fluid.data(shape=[100], dtype='int32', name='x') y = fluid.data(shape=[200], dtype='int32', name='y') - input_1 = np.random.randint(0, 100, [ + input_1 = np.random.randint( + 0, + 100, + [ + 100, + ], + ).astype('int32') + input_2 = np.random.randint( + 0, 100, - ]).astype('int32') - input_2 = np.random.randint(0, 100, [ - 200, - ]).astype('int32') + [ + 200, + ], + ).astype('int32') out_1 = np.reshape(input_1, [100, 1]) out_1 = np.broadcast_to(out_1, [100, 200]) @@ -145,29 +154,35 @@ class TestMeshgridOp4(unittest.TestCase): exe = fluid.Executor(place=fluid.NPUPlace(0)) grid_x, grid_y = paddle.tensor.meshgrid([x, y]) - res_1, res_2 = exe.run(fluid.default_main_program(), - feed={ - 'x': input_1, - 'y': input_2 - }, - fetch_list=[grid_x, grid_y]) + res_1, res_2 = exe.run( + fluid.default_main_program(), + feed={'x': input_1, 'y': input_2}, + fetch_list=[grid_x, grid_y], + ) np.testing.assert_allclose(res_1, out_1) np.testing.assert_allclose(res_2, out_2) class TestMeshgridOp5(unittest.TestCase): - def test_tuple_input(self): x = fluid.data(shape=[100], dtype='int32', name='x') y = fluid.data(shape=[200], dtype='int32', name='y') - input_1 = np.random.randint(0, 100, [ + input_1 = np.random.randint( + 0, + 100, + [ + 100, + ], + ).astype('int32') + input_2 = np.random.randint( + 0, 100, - ]).astype('int32') - input_2 = np.random.randint(0, 100, [ - 200, - ]).astype('int32') + [ + 200, + ], + ).astype('int32') out_1 = np.reshape(input_1, [100, 1]) out_1 = np.broadcast_to(out_1, [100, 200]) @@ -176,27 +191,33 @@ class TestMeshgridOp5(unittest.TestCase): exe = fluid.Executor(place=fluid.NPUPlace(0)) grid_x, grid_y = paddle.tensor.meshgrid((x, y)) - res_1, res_2 = exe.run(fluid.default_main_program(), - feed={ - 'x': input_1, - 'y': input_2 - }, - fetch_list=[grid_x, grid_y]) + res_1, res_2 = exe.run( + fluid.default_main_program(), + feed={'x': input_1, 'y': input_2}, + fetch_list=[grid_x, grid_y], + ) np.testing.assert_allclose(res_1, out_1) np.testing.assert_allclose(res_2, out_2) class TestMeshgridOp6(unittest.TestCase): - def test_api_with_dygraph(self): paddle.disable_static(paddle.NPUPlace(0)) - input_3 = np.random.randint(0, 100, [ + input_3 = np.random.randint( + 0, + 100, + [ + 100, + ], + ).astype('int32') + input_4 = np.random.randint( + 0, 100, - ]).astype('int32') - input_4 = np.random.randint(0, 100, [ - 200, - ]).astype('int32') + [ + 200, + ], + ).astype('int32') out_3 = np.reshape(input_3, [100, 1]) out_3 = np.broadcast_to(out_3, [100, 200]) @@ -213,15 +234,22 @@ class TestMeshgridOp6(unittest.TestCase): class TestMeshgridOp7(unittest.TestCase): - def test_api_with_dygraph_list_input(self): paddle.disable_static(paddle.NPUPlace(0)) - input_3 = np.random.randint(0, 100, [ + input_3 = np.random.randint( + 0, + 100, + [ + 100, + ], + ).astype('int32') + input_4 = np.random.randint( + 0, 100, - ]).astype('int32') - input_4 = np.random.randint(0, 100, [ - 200, - ]).astype('int32') + [ + 200, + ], + ).astype('int32') out_3 = np.reshape(input_3, [100, 1]) out_3 = np.broadcast_to(out_3, [100, 200]) @@ -238,15 +266,22 @@ class TestMeshgridOp7(unittest.TestCase): class TestMeshgridOp8(unittest.TestCase): - def test_api_with_dygraph_tuple_input(self): paddle.disable_static(paddle.NPUPlace(0)) - input_3 = np.random.randint(0, 100, [ + input_3 = np.random.randint( + 0, + 100, + [ + 100, + ], + ).astype('int32') + input_4 = np.random.randint( + 0, 100, - ]).astype('int32') - input_4 = np.random.randint(0, 100, [ - 200, - ]).astype('int32') + [ + 200, + ], + ).astype('int32') out_3 = np.reshape(input_3, [100, 1]) out_3 = np.broadcast_to(out_3, [100, 200]) diff --git a/python/paddle/fluid/tests/unittests/npu/test_mixed_precision_npu.py b/python/paddle/fluid/tests/unittests/npu/test_mixed_precision_npu.py index 9927316fddc08421134b971544707ecec0dfd254..3685ab9e1e1db44bacac59ecdba4bba7b3f265a8 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_mixed_precision_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_mixed_precision_npu.py @@ -28,7 +28,6 @@ paddle.enable_static() class SimpleNet(nn.Layer): - def __init__(self, input_size, output_size): super(SimpleNet, self).__init__() self.linear1 = nn.Linear(input_size, output_size) @@ -42,16 +41,15 @@ class SimpleNet(nn.Layer): x = self.linear1(x) # currently, paddle's relu may hide nan/inf, relu(nan) = 0, relu(inf)= inf # so, do not use it here. - #x = self.relu1(x) + # x = self.relu1(x) x = self.linear2(x) - #x = self.relu2(x) + # x = self.relu2(x) x = self.linear3(x) return x class AMPTestNpu(unittest.TestCase): - def setUp(self): self.place = paddle.NPUPlace(0) @@ -67,10 +65,11 @@ class AMPTestNpu(unittest.TestCase): loss = mse(out, label) opt = paddle.fluid.optimizer.Adam( - learning_rate=0.0001, parameter_list=model.parameters()) # 定义优化器 - opt = paddle.static.amp.decorate(opt, - init_loss_scaling=128.0, - use_dynamic_loss_scaling=True) + learning_rate=0.0001, parameter_list=model.parameters() + ) # 定义优化器 + opt = paddle.static.amp.decorate( + opt, init_loss_scaling=128.0, use_dynamic_loss_scaling=True + ) opt.minimize(loss) return model, loss, opt @@ -85,11 +84,17 @@ class AMPTestNpu(unittest.TestCase): model, loss, opt = self.net() weight = model.linear1.weight moment1 = opt._optimizer._get_accumulator( - opt._optimizer._moment1_acc_str, weight) + opt._optimizer._moment1_acc_str, weight + ) beta_pow1 = opt._optimizer._get_accumulator( - opt._optimizer._beta1_pow_acc_str, weight) + opt._optimizer._beta1_pow_acc_str, weight + ) fetch_list = [ - loss, weight, moment1, beta_pow1, 'find_infinite_scale.tmp_0' + loss, + weight, + moment1, + beta_pow1, + 'find_infinite_scale.tmp_0', ] exe = paddle.static.Executor(self.place) @@ -104,20 +109,24 @@ class AMPTestNpu(unittest.TestCase): ] weight_, moment1_, beta_pow1_ = exe.run( - startup_prog, fetch_list=[weight, moment1, beta_pow1]) - pre_weight_, pre_moment1_, pre_beta_pow1_ = weight_, moment1_, beta_pow1_ + startup_prog, fetch_list=[weight, moment1, beta_pow1] + ) + pre_weight_, pre_moment1_, pre_beta_pow1_ = ( + weight_, + moment1_, + beta_pow1_, + ) for i in range(nums_batch): if i % 2: train_data[i][10] = np.inf loss_, weight_, moment1_, beta_pow1_, found_inf = exe.run( main_prog, - feed={ - "X": train_data[i], - "Y": labels[i] - }, - fetch_list=fetch_list) - print(loss_, weight_[0][0], moment1_[0][0], beta_pow1_, - found_inf) + feed={"X": train_data[i], "Y": labels[i]}, + fetch_list=fetch_list, + ) + print( + loss_, weight_[0][0], moment1_[0][0], beta_pow1_, found_inf + ) if i % 2: self.assertTrue(found_inf) np.testing.assert_array_equal(weight_, pre_weight_) @@ -128,7 +137,11 @@ class AMPTestNpu(unittest.TestCase): self.assertFalse(np.array_equal(weight_, pre_weight_)) self.assertFalse(np.array_equal(moment1_, pre_moment1_)) self.assertFalse(np.array_equal(beta_pow1_, pre_beta_pow1_)) - pre_weight_, pre_moment1_, pre_beta_pow1_ = weight_, moment1_, beta_pow1_ + pre_weight_, pre_moment1_, pre_beta_pow1_ = ( + weight_, + moment1_, + beta_pow1_, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/npu/test_momentum_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_momentum_op_npu.py index 852f4fd3dc2c4815705f79b8258ad059020b2c33..9719c5582bc8c6ff7cb4f6b43d497e0047b98519 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_momentum_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_momentum_op_npu.py @@ -28,7 +28,6 @@ paddle.enable_static() class TestMomentumOp1(OpTest): - def set_npu(self): self.__class__.use_npu = True @@ -48,7 +47,7 @@ class TestMomentumOp1(OpTest): 'Param': param, 'Grad': grad, 'Velocity': velocity, - 'LearningRate': learning_rate + 'LearningRate': learning_rate, } self.attrs = {'mu': mu, 'use_nesterov': self.use_nesterov} @@ -59,7 +58,8 @@ class TestMomentumOp1(OpTest): mu=mu, velocity=velocity, use_nesterov=self.use_nesterov, - learning_rate=learning_rate) + learning_rate=learning_rate, + ) self.outputs = {'ParamOut': param_out, 'VelocityOut': velocity_out} @@ -75,7 +75,6 @@ class TestMomentumOp1(OpTest): class TestMomentumOpFp16(TestMomentumOp1): - def init_dtype(self): self.dtype = np.float16 @@ -84,23 +83,21 @@ class TestMomentumOpFp16(TestMomentumOp1): class TestMomentumOp2(TestMomentumOp1): - def init_case(self): self.shape = (123, 321) self.use_nesterov = True class TestMomentumV2(unittest.TestCase): - def test_momentum_dygraph(self): paddle.disable_static(place=fluid.NPUPlace(0)) value = np.arange(26).reshape(2, 13).astype("float32") a = paddle.to_tensor(value) linear = paddle.nn.Linear(13, 5) # This can be any optimizer supported by dygraph. - adam = paddle.optimizer.Momentum(learning_rate=0.01, - momentum=0.9, - parameters=linear.parameters()) + adam = paddle.optimizer.Momentum( + learning_rate=0.01, momentum=0.9, parameters=linear.parameters() + ) out = linear(a) out.backward() adam.step() @@ -117,13 +114,15 @@ class TestMomentumV2(unittest.TestCase): cost = fluid.layers.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) - rms_optimizer = paddle.optimizer.Momentum(learning_rate=0.1, - momentum=0.9) + rms_optimizer = paddle.optimizer.Momentum( + learning_rate=0.1, momentum=0.9 + ) rms_optimizer.minimize(avg_cost) fetch_list = [avg_cost] - train_reader = paddle.batch(paddle.dataset.uci_housing.train(), - batch_size=1) + train_reader = paddle.batch( + paddle.dataset.uci_housing.train(), batch_size=1 + ) feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) @@ -131,14 +130,13 @@ class TestMomentumV2(unittest.TestCase): exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) def test_raise_error(self): - self.assertRaises(ValueError, - paddle.optimizer.Momentum, - learning_rate=None) + self.assertRaises( + ValueError, paddle.optimizer.Momentum, learning_rate=None + ) self.assertRaises(ValueError, paddle.optimizer.Momentum, momentum=None) class TestMomentumOpWithDecay(OpTest): - def set_npu(self): self.__class__.use_npu = True @@ -164,14 +162,14 @@ class TestMomentumOpWithDecay(OpTest): 'Param': param, 'Grad': grad, 'Velocity': velocity, - 'LearningRate': learning_rate + 'LearningRate': learning_rate, } self.attrs = { 'mu': mu, 'use_nesterov': use_nesterov, 'regularization_method': regularization_method, - 'regularization_coeff': regularization_coeff + 'regularization_coeff': regularization_coeff, } grad = grad + regularization_coeff * param @@ -182,7 +180,8 @@ class TestMomentumOpWithDecay(OpTest): mu=mu, velocity=velocity, use_nesterov=use_nesterov, - learning_rate=learning_rate) + learning_rate=learning_rate, + ) self.outputs = {'ParamOut': param_out, 'VelocityOut': velocity_out} @@ -195,7 +194,6 @@ class TestMomentumOpWithDecay(OpTest): class TestMomentumOpWithDecayFP16(TestMomentumOpWithDecay): - def init_config(self): self.dtype = np.float16 @@ -205,13 +203,11 @@ class TestMomentumOpWithDecayFP16(TestMomentumOpWithDecay): class TestMomentumOpWithDecay2(TestMomentumOpWithDecay): - def init_config(self): self.use_nesterov = False class TestMomentumOpWithDecayAPI(unittest.TestCase): - def _test_momentum_dygraph_common(self, regularization): paddle.disable_static(fluid.NPUPlace(0)) inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") @@ -224,13 +220,16 @@ class TestMomentumOpWithDecayAPI(unittest.TestCase): learning_rate=0.01, momentum=0.9, parameter_list=linear.parameters(), - regularization=regularization) + regularization=regularization, + ) momentum.minimize(loss) def test_momentum_dygraph_1(self): self._test_momentum_dygraph_common( regularization=paddle.fluid.regularizer.L2Decay( - regularization_coeff=0.1)) + regularization_coeff=0.1 + ) + ) def test_momentum_static(self): paddle.enable_static() @@ -244,12 +243,14 @@ class TestMomentumOpWithDecayAPI(unittest.TestCase): avg_cost = paddle.mean(cost) momentum_optimizer = paddle.fluid.contrib.optimizer.Momentum( - learning_rate=0.1, momentum=0.9) + learning_rate=0.1, momentum=0.9 + ) momentum_optimizer.minimize(avg_cost) fetch_list = [avg_cost] - train_reader = paddle.batch(paddle.dataset.uci_housing.train(), - batch_size=1) + train_reader = paddle.batch( + paddle.dataset.uci_housing.train(), batch_size=1 + ) feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) @@ -258,11 +259,11 @@ class TestMomentumOpWithDecayAPI(unittest.TestCase): class TestMomentumOpVsMomentumOpWithDecayAPI(unittest.TestCase): - def __update_params(self, momentum, linear): for i in range(10): - inp = paddle.full(shape=[2, 2], fill_value=i, - dtype='float32').astype("float32") + inp = paddle.full( + shape=[2, 2], fill_value=i, dtype='float32' + ).astype("float32") inp = paddle.to_tensor(inp) out = linear(inp) loss = paddle.mean(out) @@ -276,39 +277,45 @@ class TestMomentumOpVsMomentumOpWithDecayAPI(unittest.TestCase): 2, 2, weight_attr=paddle.nn.initializer.Constant(value=2.0), - bias_attr=paddle.nn.initializer.Constant(value=2.0)) + bias_attr=paddle.nn.initializer.Constant(value=2.0), + ) momentum_old = paddle.fluid.optimizer.Momentum( learning_rate=0.01, momentum=0.9, parameter_list=linear_old.parameters(), regularization=paddle.fluid.regularizer.L2Decay( - regularization_coeff=0.1)) + regularization_coeff=0.1 + ), + ) self.__update_params(momentum=momentum_old, linear=linear_old) linear_new = paddle.nn.Linear( 2, 2, weight_attr=paddle.nn.initializer.Constant(value=2.0), - bias_attr=paddle.nn.initializer.Constant(value=2.0)) + bias_attr=paddle.nn.initializer.Constant(value=2.0), + ) momentum_new = paddle.fluid.contrib.optimizer.Momentum( learning_rate=0.01, momentum=0.9, parameter_list=linear_new.parameters(), regularization=paddle.fluid.regularizer.L2Decay( - regularization_coeff=0.1)) + regularization_coeff=0.1 + ), + ) self.__update_params(momentum=momentum_new, linear=linear_new) self.assertEqual( (linear_old.weight.numpy() == linear_new.weight.numpy()).all(), True, - 'the param weight updated by two Momentum optimizers should equal') + 'the param weight updated by two Momentum optimizers should equal', + ) def test_vs(self, place=fluid.NPUPlace(0)): self.__test_vs(place=place) class TestMomentumV2Group(TestMomentumV2): - def test_momentum_dygraph(self): paddle.disable_static(place=fluid.NPUPlace(0)) value = np.arange(26).reshape(2, 13).astype("float32") @@ -316,22 +323,20 @@ class TestMomentumV2Group(TestMomentumV2): linear_1 = paddle.nn.Linear(13, 5) linear_2 = paddle.nn.Linear(5, 3) # This can be any optimizer supported by dygraph. - adam = paddle.optimizer.Momentum(learning_rate=0.01, - parameters=[{ - 'params': - linear_1.parameters() - }, { - 'params': - linear_2.parameters(), - 'weight_decay': - 0.001, - 'learning_rate': - 0.1, - 'momentum': - 0.99 - }], - weight_decay=0.1, - momentum=0.9) + adam = paddle.optimizer.Momentum( + learning_rate=0.01, + parameters=[ + {'params': linear_1.parameters()}, + { + 'params': linear_2.parameters(), + 'weight_decay': 0.001, + 'learning_rate': 0.1, + 'momentum': 0.99, + }, + ], + weight_decay=0.1, + momentum=0.9, + ) out = linear_1(a) out = linear_2(out) out.backward() diff --git a/python/paddle/fluid/tests/unittests/npu/test_mul_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_mul_op_npu.py index 0b3a0c322cde0e21dbb688b37a12f8909d937536..8f8abea53964a9c8b32bf5d0765643ec5def24dc 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_mul_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_mul_op_npu.py @@ -40,7 +40,7 @@ class TestMul(OpTest): np.random.seed(SEED) self.inputs = { 'X': np.random.random(self.x_shape).astype(self.dtype), - 'Y': np.random.random(self.y_shape).astype(self.dtype) + 'Y': np.random.random(self.y_shape).astype(self.dtype), } self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])} @@ -81,9 +81,9 @@ class TestMul(OpTest): @skip_check_grad_ci( - reason="Don't support grad checking for NPU OP with FP16 data type.") + reason="Don't support grad checking for NPU OP with FP16 data type." +) class TestMulFP16(TestMul): - def init_dtype(self): self.dtype = np.float16 @@ -112,7 +112,7 @@ class TestMul2(TestMul): np.random.seed(SEED) self.inputs = { 'X': np.random.random(self.x_shape).astype(self.dtype), - 'Y': np.random.random(self.y_shape).astype(self.dtype) + 'Y': np.random.random(self.y_shape).astype(self.dtype), } self.outputs = { 'Out': np.dot(self.inputs['X'].reshape(20, 10), self.inputs['Y']) @@ -120,9 +120,9 @@ class TestMul2(TestMul): @skip_check_grad_ci( - reason="Don't support grad checking for NPU OP with FP16 data type.") + reason="Don't support grad checking for NPU OP with FP16 data type." +) class TestMul2FP16(TestMul2): - def init_dtype(self): self.dtype = np.float16 @@ -152,16 +152,16 @@ class TestMul3(TestMul): np.random.seed(SEED) self.inputs = { 'X': np.random.random(self.x_shape).astype(self.dtype), - 'Y': np.random.random(self.y_shape).astype(self.dtype) + 'Y': np.random.random(self.y_shape).astype(self.dtype), } self.attrs = {"x_num_col_dims": 2} self.outputs = {'Out': np.matmul(self.inputs['X'], self.inputs['Y'])} @skip_check_grad_ci( - reason="Don't support grad checking for NPU OP with FP16 data type.") + reason="Don't support grad checking for NPU OP with FP16 data type." +) class TestMul3FP16(TestMul3): - def init_dtype(self): self.dtype = np.float16 @@ -190,7 +190,7 @@ class TestMul4(TestMul): np.random.seed(SEED) self.inputs = { 'X': np.random.random(self.x_shape).astype(self.dtype), - 'Y': np.random.random(self.y_shape).astype(self.dtype) + 'Y': np.random.random(self.y_shape).astype(self.dtype), } self.outputs = { 'Out': np.dot(self.inputs['X'].reshape(20, 12), self.inputs['Y']) @@ -198,9 +198,9 @@ class TestMul4(TestMul): @skip_check_grad_ci( - reason="Don't support grad checking for NPU OP with FP16 data type.") + reason="Don't support grad checking for NPU OP with FP16 data type." +) class TestMul4FP16(TestMul4): - def init_dtype(self): self.dtype = np.float16 @@ -215,7 +215,6 @@ class TestMul4FP16(TestMul4): class TestMulNet(unittest.TestCase): - def init_dtype(self): self.dtype = np.float32 @@ -237,9 +236,9 @@ class TestMulNet(unittest.TestCase): b = paddle.static.data(name="b", shape=[2, 3], dtype=self.dtype) c = paddle.static.data(name="c", shape=[3, 2], dtype=self.dtype) d = paddle.static.data(name="d", shape=[3, 2], dtype=self.dtype) - label = paddle.static.data(name="label", - shape=[2, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[2, 1], dtype='int64' + ) sum_1 = paddle.add(a, b) sum_2 = paddle.add(c, d) @@ -263,18 +262,23 @@ class TestMulNet(unittest.TestCase): print("TestMulNet Start run on {} . ".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "c": c_np, - "d": d_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={ + "a": a_np, + "b": b_np, + "c": c_np, + "d": d_np, + "label": label_np, + }, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res @@ -288,7 +292,6 @@ class TestMulNet(unittest.TestCase): class TestMulNet3_2(unittest.TestCase): - def init_dtype(self): self.dtype = np.float32 @@ -310,9 +313,9 @@ class TestMulNet3_2(unittest.TestCase): b = paddle.static.data(name="b", shape=[2, 3, 4], dtype=self.dtype) c = paddle.static.data(name="c", shape=[12, 5], dtype=self.dtype) d = paddle.static.data(name="d", shape=[12, 5], dtype=self.dtype) - label = paddle.static.data(name="label", - shape=[2, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[2, 1], dtype='int64' + ) sum_1 = paddle.add(a, b) sum_2 = paddle.add(c, d) @@ -336,18 +339,23 @@ class TestMulNet3_2(unittest.TestCase): print("testMulNet3_2 tart run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "c": c_np, - "d": d_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={ + "a": a_np, + "b": b_np, + "c": c_np, + "d": d_np, + "label": label_np, + }, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res @@ -356,13 +364,13 @@ class TestMulNet3_2(unittest.TestCase): cpu_pred, cpu_loss = self._test(False) npu_pred, npu_loss = self._test(True) - np.testing.assert_allclose(npu_pred, cpu_pred, - atol=1e-5) # atol needed on cann 20.3 + np.testing.assert_allclose( + npu_pred, cpu_pred, atol=1e-5 + ) # atol needed on cann 20.3 np.testing.assert_allclose(npu_loss, cpu_loss, atol=1e-5) class TestMulNet3_2_xc2(unittest.TestCase): - def init_dtype(self): self.dtype = np.float32 @@ -384,9 +392,9 @@ class TestMulNet3_2_xc2(unittest.TestCase): b = paddle.static.data(name="b", shape=[2, 3, 4], dtype=self.dtype) c = paddle.static.data(name="c", shape=[4, 5], dtype=self.dtype) d = paddle.static.data(name="d", shape=[4, 5], dtype=self.dtype) - label = paddle.static.data(name="label", - shape=[2, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[2, 1], dtype='int64' + ) sum_1 = paddle.add(a, b) sum_2 = paddle.add(c, d) @@ -411,18 +419,23 @@ class TestMulNet3_2_xc2(unittest.TestCase): print("TestMulNet3_2_xc2. Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "c": c_np, - "d": d_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={ + "a": a_np, + "b": b_np, + "c": c_np, + "d": d_np, + "label": label_np, + }, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res @@ -436,7 +449,6 @@ class TestMulNet3_2_xc2(unittest.TestCase): class TestMulNet4_2(unittest.TestCase): - def init_dtype(self): self.dtype = np.float32 @@ -458,17 +470,18 @@ class TestMulNet4_2(unittest.TestCase): b = paddle.static.data(name="b", shape=[12, 5], dtype=self.dtype) c = paddle.static.data(name="c", shape=[12, 5], dtype=self.dtype) d = paddle.static.data(name="d", shape=[12, 5], dtype=self.dtype) - label = paddle.static.data(name="label", - shape=[2, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[2, 1], dtype='int64' + ) sum_1 = paddle.add(a, b) # [12, 5] sum_2 = paddle.add(c, d) # [12, 5] fc_1 = fluid.layers.fc(input=sum_1, size=2) # [12, 2] fc_1_re_shape = paddle.reshape(fc_1, shape=[2, 3, 2, 2]) fc_2 = fluid.layers.fc(input=sum_2, size=2) # [12, 2] - result = paddle.fluid.layers.mul(fc_1_re_shape, - fc_2) # [2, 3, 2, 2] * [12, 2] + result = paddle.fluid.layers.mul( + fc_1_re_shape, fc_2 + ) # [2, 3, 2, 2] * [12, 2] prediction = fluid.layers.fc(input=result, size=2, act='softmax') @@ -487,18 +500,23 @@ class TestMulNet4_2(unittest.TestCase): print("testMulNet4_2 tart run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "c": c_np, - "d": d_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={ + "a": a_np, + "b": b_np, + "c": c_np, + "d": d_np, + "label": label_np, + }, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res @@ -507,8 +525,9 @@ class TestMulNet4_2(unittest.TestCase): cpu_pred, cpu_loss = self._test(False) npu_pred, npu_loss = self._test(True) - np.testing.assert_allclose(npu_pred, cpu_pred, - atol=1e-5) # atol needed on cann 20.3 + np.testing.assert_allclose( + npu_pred, cpu_pred, atol=1e-5 + ) # atol needed on cann 20.3 np.testing.assert_allclose(npu_loss, cpu_loss, atol=1e-5) diff --git a/python/paddle/fluid/tests/unittests/npu/test_multinomial_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_multinomial_op_npu.py index d1599766756fbad0f9d7c0019d84e7c120520006..ccff4ffd0cfb5d5e7a534d07d334e184ecdf7bc8 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_multinomial_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_multinomial_op_npu.py @@ -47,7 +47,6 @@ def sample_output_two_dimension(out, shape): class TestMultinomialOp(OpTest): - def setUp(self): self.set_npu() self.op_type = "multinomial" @@ -65,8 +64,9 @@ class TestMultinomialOp(OpTest): self.attrs = {"num_samples": 100000, "replacement": True} def test_check_output(self): - self.check_output_customized(self.verify_output, - custom_place=self.place) + self.check_output_customized( + self.verify_output, custom_place=self.place + ) def sample_output(self, out): return sample_output_one_dimension(out, 4) @@ -79,7 +79,6 @@ class TestMultinomialOp(OpTest): class TestMultinomialOp2(TestMultinomialOp): - def init_data(self): # input probability is a matrix self.input_np = np.random.rand(3, 4) @@ -91,7 +90,6 @@ class TestMultinomialOp2(TestMultinomialOp): class TestMultinomialOp3(TestMultinomialOp): - def init_data(self): # replacement is False. number of samples must be less than number of categories. self.input_np = np.random.rand(1000) @@ -102,12 +100,13 @@ class TestMultinomialOp3(TestMultinomialOp): out = np.array(outs[0]) unique_out = np.unique(out) self.assertEqual( - len(unique_out), 100, - "replacement is False. categories can't be sampled repeatedly") + len(unique_out), + 100, + "replacement is False. categories can't be sampled repeatedly", + ) class TestMultinomialApi(unittest.TestCase): - def test_dygraph(self): # input probability is a vector, and replacement is True paddle.set_device('npu:0') @@ -144,8 +143,10 @@ class TestMultinomialApi(unittest.TestCase): unique_out = np.unique(out.numpy()) self.assertEqual( - len(unique_out), 100, - "replacement is False. categories can't be sampled repeatedly") + len(unique_out), + 100, + "replacement is False. categories can't be sampled repeatedly", + ) paddle.enable_static() def test_dygraph4(self): @@ -178,19 +179,17 @@ class TestMultinomialApi(unittest.TestCase): class TestMultinomialAlias(unittest.TestCase): - def test_alias(self): paddle.set_device('npu:0') x = paddle.rand([4]) out1 = paddle.multinomial(x, num_samples=10, replacement=True) out2 = paddle.tensor.multinomial(x, num_samples=10, replacement=True) - out3 = paddle.tensor.random.multinomial(x, - num_samples=10, - replacement=True) + out3 = paddle.tensor.random.multinomial( + x, num_samples=10, replacement=True + ) class TestMultinomialError(unittest.TestCase): - def setUp(self): paddle.set_device('npu:0') paddle.disable_static() @@ -199,7 +198,6 @@ class TestMultinomialError(unittest.TestCase): paddle.enable_static() def test_num_sample(self): - def test_num_sample_less_than_0(): x = paddle.rand([4]) out = paddle.multinomial(x, num_samples=-2) @@ -207,7 +205,6 @@ class TestMultinomialError(unittest.TestCase): self.assertRaises(ValueError, test_num_sample_less_than_0) def test_input_probs_dim(self): - def test_dim_larger_than_2(): x = paddle.rand([2, 3, 3]) out = paddle.multinomial(x) diff --git a/python/paddle/fluid/tests/unittests/npu/test_nearest_interp_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_nearest_interp_op_npu.py index 9c9d50e1ddac2a05cfd3e79c9098cbf076c50a9b..82a2ff48251df0b8428d0b0517d80c2a39b71cc5 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_nearest_interp_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_nearest_interp_op_npu.py @@ -27,7 +27,6 @@ paddle.enable_static() class TestNearestInterpOp(OpTest): - def setUp(self): self.__class__.use_npu = True self.place = paddle.NPUPlace(0) @@ -52,10 +51,15 @@ class TestNearestInterpOp(OpTest): out_h = self.out_h out_w = self.out_w - output_np = nearest_neighbor_interp_np(input_np, out_h, out_w, - self.out_size, self.actual_shape, - self.align_corners, - self.data_layout) + output_np = nearest_neighbor_interp_np( + input_np, + out_h, + out_w, + self.out_size, + self.actual_shape, + self.align_corners, + self.data_layout, + ) self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size @@ -67,7 +71,7 @@ class TestNearestInterpOp(OpTest): 'scale': self.scale, 'interp_method': self.interp_method, 'align_corners': self.align_corners, - 'data_layout': self.data_layout + 'data_layout': self.data_layout, } self.outputs = {'Out': output_np} @@ -83,124 +87,113 @@ class TestNearestInterpOp(OpTest): self.input_shape = [2, 3, 4, 5] self.out_h = 2 self.out_w = 2 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([3, 3]).astype("int32") self.align_corners = False class TestNearestNeighborInterpCase1(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [4, 1, 7, 8] self.out_h = 1 self.out_w = 1 - self.scale = 0. + self.scale = 0.0 self.align_corners = False class TestNearestNeighborInterpCase2(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 3, 9, 6] self.out_h = 12 self.out_w = 12 - self.scale = 0. + self.scale = 0.0 self.align_corners = False def test_check_grad(self): - self.check_grad_with_place(self.place, ['X'], - 'Out', - in_place=True, - max_relative_error=0.006) + self.check_grad_with_place( + self.place, ['X'], 'Out', in_place=True, max_relative_error=0.006 + ) class TestNearestNeighborInterpCase3(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [1, 1, 32, 64] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.align_corners = False class TestNearestNeighborInterpCase4(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [4, 1, 7, 8] self.out_h = 1 self.out_w = 1 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([2, 2]).astype("int32") self.align_corners = False class TestNearestNeighborInterpCase5(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 3, 9, 6] self.out_h = 12 self.out_w = 12 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([11, 11]).astype("int32") self.align_corners = False class TestNearestNeighborInterpCase6(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [1, 1, 32, 64] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([65, 129]).astype("int32") self.align_corners = False class TestNearestNeighborInterpSame(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [2, 3, 32, 64] self.out_h = 32 self.out_w = 64 - self.scale = 0. + self.scale = 0.0 self.align_corners = False class TestNearestNeighborInterpActualShape(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 2, 32, 16] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([66, 40]).astype("int32") self.align_corners = False class TestNearestNeighborInterpDataLayout(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [2, 4, 4, 5] self.out_h = 2 self.out_w = 2 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([3, 8]).astype("int32") self.align_corners = False self.data_layout = "NHWC" class TestNearestInterpOpUint8(OpTest): - def setUp(self): self.__class__.use_npu = True self.place = paddle.NPUPlace(0) @@ -209,8 +202,9 @@ class TestNearestInterpOpUint8(OpTest): self.actual_shape = None self.init_test_case() self.op_type = "nearest_interp" - input_np = np.random.randint(low=0, high=256, - size=self.input_shape).astype("uint8") + input_np = np.random.randint( + low=0, high=256, size=self.input_shape + ).astype("uint8") if self.scale > 0: out_h = int(self.input_shape[2] * self.scale) @@ -219,9 +213,14 @@ class TestNearestInterpOpUint8(OpTest): out_h = self.out_h out_w = self.out_w - output_np = nearest_neighbor_interp_np(input_np, out_h, out_w, - self.out_size, self.actual_shape, - self.align_corners) + output_np = nearest_neighbor_interp_np( + input_np, + out_h, + out_w, + self.out_size, + self.actual_shape, + self.align_corners, + ) self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size @@ -230,7 +229,7 @@ class TestNearestInterpOpUint8(OpTest): 'out_w': self.out_w, 'scale': self.scale, 'interp_method': self.interp_method, - 'align_corners': self.align_corners + 'align_corners': self.align_corners, } self.outputs = {'Out': output_np} @@ -242,47 +241,43 @@ class TestNearestInterpOpUint8(OpTest): self.input_shape = [1, 3, 9, 6] self.out_h = 10 self.out_w = 9 - self.scale = 0. + self.scale = 0.0 self.align_corners = False class TestNearestNeighborInterpCase1Uint8(TestNearestInterpOpUint8): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [2, 3, 32, 64] self.out_h = 80 self.out_w = 40 - self.scale = 0. + self.scale = 0.0 self.align_corners = False class TestNearestNeighborInterpCase2Uint8(TestNearestInterpOpUint8): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [4, 1, 7, 8] self.out_h = 5 self.out_w = 13 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([6, 15]).astype("int32") self.align_corners = False class TestNearestNeighborInterpScale1(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 2, 7, 5] self.out_h = 64 self.out_w = 32 - self.scale = 2. + self.scale = 2.0 self.out_size = np.array([66, 40]).astype("int32") self.align_corners = False class TestNearestNeighborInterpScale2(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 2, 5, 7] @@ -294,19 +289,17 @@ class TestNearestNeighborInterpScale2(TestNearestInterpOp): class TestNearestNeighborInterpScale3(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 2, 7, 5] self.out_h = 64 self.out_w = 32 - self.scale = 1. + self.scale = 1.0 self.out_size = np.array([66, 40]).astype("int32") self.align_corners = False class TestNearestInterpOp_attr_tensor(OpTest): - def setUp(self): self.__class__.use_npu = True self.place = paddle.NPUPlace(0) @@ -340,15 +333,21 @@ class TestNearestInterpOp_attr_tensor(OpTest): elif self.out_size is not None: size_tensor = [] for index, ele in enumerate(self.out_size): - size_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + size_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs['SizeTensor'] = size_tensor self.attrs['out_h'] = self.out_h self.attrs['out_w'] = self.out_w - output_np = nearest_neighbor_interp_np(input_np, out_h, out_w, - self.out_size, self.actual_shape, - self.align_corners) + output_np = nearest_neighbor_interp_np( + input_np, + out_h, + out_w, + self.out_size, + self.actual_shape, + self.align_corners, + ) self.outputs = {'Out': output_np} def test_check_output(self): @@ -362,33 +361,31 @@ class TestNearestInterpOp_attr_tensor(OpTest): self.input_shape = [2, 5, 4, 4] self.out_h = 3 self.out_w = 3 - self.scale = 0. + self.scale = 0.0 self.out_size = [3, 3] self.align_corners = False # out_size is a tensor list class TestNearestInterp_attr_tensor_Case1(TestNearestInterpOp_attr_tensor): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 3, 9, 6] self.out_h = 12 self.out_w = 12 - self.scale = 0. + self.scale = 0.0 self.out_size = [8, 12] self.align_corners = False # out_size is a 1-D tensor class TestNearestInterp_attr_tensor_Case2(TestNearestInterpOp_attr_tensor): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 2, 32, 16] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([66, 40]).astype("int32") self.align_corners = False self.shape_by_1Dtensor = True @@ -396,7 +393,6 @@ class TestNearestInterp_attr_tensor_Case2(TestNearestInterpOp_attr_tensor): # scale is a 1-D tensor class TestNearestInterp_attr_tensor_Case3(TestNearestInterpOp_attr_tensor): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 2, 32, 16] @@ -409,7 +405,6 @@ class TestNearestInterp_attr_tensor_Case3(TestNearestInterpOp_attr_tensor): class TestNearestAPI(unittest.TestCase): - def test_case(self): x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") y = fluid.data(name="y", shape=[2, 6, 6, 3], dtype="float32") @@ -417,27 +412,25 @@ class TestNearestAPI(unittest.TestCase): dim = fluid.data(name="dim", shape=[1], dtype="int32") shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32") actual_size = fluid.data(name="actual_size", shape=[2], dtype="int32") - scale_tensor = fluid.data(name="scale_tensor", - shape=[1], - dtype="float32") - - out1 = fluid.layers.resize_nearest(y, - out_shape=[12, 12], - data_format='NHWC', - align_corners=False) - out2 = fluid.layers.resize_nearest(x, - out_shape=[12, dim], - align_corners=False) - out3 = fluid.layers.resize_nearest(x, - out_shape=shape_tensor, - align_corners=False) - out4 = fluid.layers.resize_nearest(x, - out_shape=[4, 4], - actual_shape=actual_size, - align_corners=False) - out5 = fluid.layers.resize_nearest(x, - scale=scale_tensor, - align_corners=False) + scale_tensor = fluid.data( + name="scale_tensor", shape=[1], dtype="float32" + ) + + out1 = fluid.layers.resize_nearest( + y, out_shape=[12, 12], data_format='NHWC', align_corners=False + ) + out2 = fluid.layers.resize_nearest( + x, out_shape=[12, dim], align_corners=False + ) + out3 = fluid.layers.resize_nearest( + x, out_shape=shape_tensor, align_corners=False + ) + out4 = fluid.layers.resize_nearest( + x, out_shape=[4, 4], actual_shape=actual_size, align_corners=False + ) + out5 = fluid.layers.resize_nearest( + x, scale=scale_tensor, align_corners=False + ) x_data = np.random.random((2, 3, 6, 6)).astype("float32") dim_data = np.array([12]).astype("int32") @@ -448,38 +441,39 @@ class TestNearestAPI(unittest.TestCase): place = paddle.NPUPlace(0) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - results = exe.run(fluid.default_main_program(), - feed={ - "x": x_data, - "y": np.transpose(x_data, (0, 2, 3, 1)), - "dim": dim_data, - "shape_tensor": shape_data, - "actual_size": actual_size_data, - "scale_tensor": scale_data - }, - fetch_list=[out1, out2, out3, out4, out5], - return_numpy=True) - - expect_res = nearest_neighbor_interp_np(x_data, - out_h=12, - out_w=12, - align_corners=False) - np.testing.assert_allclose(results[0], - np.transpose(expect_res, (0, 2, 3, 1))) + results = exe.run( + fluid.default_main_program(), + feed={ + "x": x_data, + "y": np.transpose(x_data, (0, 2, 3, 1)), + "dim": dim_data, + "shape_tensor": shape_data, + "actual_size": actual_size_data, + "scale_tensor": scale_data, + }, + fetch_list=[out1, out2, out3, out4, out5], + return_numpy=True, + ) + + expect_res = nearest_neighbor_interp_np( + x_data, out_h=12, out_w=12, align_corners=False + ) + np.testing.assert_allclose( + results[0], np.transpose(expect_res, (0, 2, 3, 1)) + ) for i in range(len(results) - 1): np.testing.assert_allclose(results[i + 1], expect_res) class TestNearestInterpException(unittest.TestCase): - def test_exception(self): input = fluid.data(name="input", shape=[1, 3, 6, 6], dtype="float32") def attr_data_format(): # for 4-D input, data_format can only be NCHW or NHWC - out = fluid.layers.resize_nearest(input, - out_shape=[4, 8], - data_format='NDHWC') + out = fluid.layers.resize_nearest( + input, out_shape=[4, 8], data_format='NDHWC' + ) def attr_scale_type(): out = fluid.layers.resize_nearest(input, scale='scale') diff --git a/python/paddle/fluid/tests/unittests/npu/test_nearest_interp_v2_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_nearest_interp_v2_op_npu.py index 6c4b6f97b03ebf606d2071a6902ea66989424174..4e0b6089835f021fc100faa976f7b5e0e048c3c5 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_nearest_interp_v2_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_nearest_interp_v2_op_npu.py @@ -30,7 +30,6 @@ paddle.enable_static() class TestNearestInterpOp(OpTest): - def set_npu(self): self.__class__.use_npu = True self.place = paddle.NPUPlace(0) @@ -68,11 +67,17 @@ class TestNearestInterpOp(OpTest): output_h = self.out_h output_w = self.out_w - output_np = nearest_neighbor_interp_np(input_np, output_h, output_w, - scale_h, scale_w, self.out_size, - self.actual_shape, - self.align_corners, - self.data_layout) + output_np = nearest_neighbor_interp_np( + input_np, + output_h, + output_w, + scale_h, + scale_w, + self.out_size, + self.actual_shape, + self.align_corners, + self.data_layout, + ) self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size @@ -83,7 +88,7 @@ class TestNearestInterpOp(OpTest): 'out_w': self.out_w, 'interp_method': self.interp_method, 'align_corners': self.align_corners, - 'data_layout': self.data_layout + 'data_layout': self.data_layout, } if self.scale: if isinstance(self.scale, float) or isinstance(self.scale, int): @@ -99,15 +104,17 @@ class TestNearestInterpOp(OpTest): def test_check_grad(self): if self.dtype == np.float16: - self.check_grad_with_place(self.place, ['X'], - 'Out', - in_place=True, - max_relative_error=0.02) + self.check_grad_with_place( + self.place, ['X'], 'Out', in_place=True, max_relative_error=0.02 + ) else: - self.check_grad_with_place(self.place, ['X'], - 'Out', - in_place=True, - max_relative_error=0.006) + self.check_grad_with_place( + self.place, + ['X'], + 'Out', + in_place=True, + max_relative_error=0.006, + ) def init_dtype(self): self.dtype = np.float32 @@ -117,123 +124,112 @@ class TestNearestInterpOp(OpTest): self.input_shape = [2, 3, 4, 5] self.out_h = 2 self.out_w = 2 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([3, 3]).astype("int32") self.align_corners = False class TestNearestNeighborInterpFP16(TestNearestInterpOp): - def init_dtype(self): self.dtype = np.float16 class TestNearestNeighborInterpCase1(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [4, 1, 7, 8] self.out_h = 1 self.out_w = 1 - self.scale = 0. + self.scale = 0.0 self.align_corners = False class TestNearestNeighborInterpCase2(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 3, 9, 6] self.out_h = 12 self.out_w = 12 - self.scale = 0. + self.scale = 0.0 self.align_corners = False class TestNearestNeighborInterpCase3(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [1, 1, 32, 64] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.align_corners = False class TestNearestNeighborInterpCase4(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [4, 1, 7, 8] self.out_h = 1 self.out_w = 1 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([2, 2]).astype("int32") self.align_corners = False class TestNearestNeighborInterpCase5(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 3, 9, 6] self.out_h = 12 self.out_w = 12 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([11, 11]).astype("int32") self.align_corners = False class TestNearestNeighborInterpCase6(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [1, 1, 32, 64] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([65, 129]).astype("int32") self.align_corners = False class TestNearestNeighborInterpSame(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [2, 3, 32, 64] self.out_h = 32 self.out_w = 64 - self.scale = 0. + self.scale = 0.0 self.align_corners = False class TestNearestNeighborInterpActualShape(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 2, 32, 16] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([66, 40]).astype("int32") self.align_corners = False class TestNearestNeighborInterpScale1(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 2, 7, 5] self.out_h = 64 self.out_w = 32 - self.scale = 2. + self.scale = 2.0 self.out_size = None self.align_corners = False class TestNearestNeighborInterpScale2(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 2, 5, 7] @@ -245,7 +241,6 @@ class TestNearestNeighborInterpScale2(TestNearestInterpOp): class TestNearestNeighborInterpScale3(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 2, 7, 5] @@ -257,7 +252,6 @@ class TestNearestNeighborInterpScale3(TestNearestInterpOp): class TestNearestInterpOp_attr_tensor(OpTest): - def set_npu(self): self.__class__.use_npu = True self.place = paddle.NPUPlace(0) @@ -300,8 +294,9 @@ class TestNearestInterpOp_attr_tensor(OpTest): elif self.out_size is not None: size_tensor = [] for index, ele in enumerate(self.out_size): - size_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + size_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs['SizeTensor'] = size_tensor self.attrs['out_h'] = self.out_h @@ -313,9 +308,16 @@ class TestNearestInterpOp_attr_tensor(OpTest): if isinstance(self.scale, list) and len(self.scale) == 1: self.scale = [self.scale[0], self.scale[0]] self.attrs['scale'] = self.scale - output_np = nearest_neighbor_interp_np(input_np, out_h, out_w, 0, 0, - self.out_size, self.actual_shape, - self.align_corners) + output_np = nearest_neighbor_interp_np( + input_np, + out_h, + out_w, + 0, + 0, + self.out_size, + self.actual_shape, + self.align_corners, + ) self.outputs = {'Out': output_np} def test_check_output(self): @@ -329,33 +331,31 @@ class TestNearestInterpOp_attr_tensor(OpTest): self.input_shape = [2, 5, 4, 4] self.out_h = 3 self.out_w = 3 - self.scale = 0. + self.scale = 0.0 self.out_size = [3, 3] self.align_corners = False # out_size is a tensor list class TestNearestInterp_attr_tensor_Case1(TestNearestInterpOp_attr_tensor): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 3, 9, 6] self.out_h = 12 self.out_w = 12 - self.scale = 0. + self.scale = 0.0 self.out_size = [8, 12] self.align_corners = False # out_size is a 1-D tensor class TestNearestInterp_attr_tensor_Case2(TestNearestInterpOp_attr_tensor): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 2, 32, 16] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([66, 40]).astype("int32") self.align_corners = False self.shape_by_1Dtensor = True @@ -363,7 +363,6 @@ class TestNearestInterp_attr_tensor_Case2(TestNearestInterpOp_attr_tensor): # scale is a 1-D tensor class TestNearestInterp_attr_tensor_Case3(TestNearestInterpOp_attr_tensor): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 2, 32, 16] @@ -376,9 +375,9 @@ class TestNearestInterp_attr_tensor_Case3(TestNearestInterpOp_attr_tensor): class TestNearestInterpOpAPI_dy(unittest.TestCase): - def test_case(self): import paddle + if core.is_compiled_with_npu(): place = core.NPUPlace(0) else: @@ -388,14 +387,15 @@ class TestNearestInterpOpAPI_dy(unittest.TestCase): scale_np = np.array([2, 2]).astype("int64") input_x = paddle.to_tensor(input_data) scale = paddle.to_tensor(scale_np) - expect_res = nearest_neighbor_interp_np(input_data, - out_h=12, - out_w=12, - align_corners=False) - out = interpolate(x=input_x, - scale_factor=scale, - mode="nearest", - align_corners=False) + expect_res = nearest_neighbor_interp_np( + input_data, out_h=12, out_w=12, align_corners=False + ) + out = interpolate( + x=input_x, + scale_factor=scale, + mode="nearest", + align_corners=False, + ) np.testing.assert_allclose(out.numpy(), expect_res) diff --git a/python/paddle/fluid/tests/unittests/npu/test_norm_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_norm_op_npu.py index e0a62d5641acb533abe47480ad86aae2167bb82d..c59daf979fb89d46f97e94e1d8a8ce08562f24c3 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_norm_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_norm_op_npu.py @@ -24,7 +24,6 @@ from paddle.fluid.tests.unittests.test_norm_op import l2_norm class TestNPUNormOp(OpTest): - def setUp(self): paddle.enable_static() self.set_npu() @@ -54,13 +53,12 @@ class TestNPUNormOp(OpTest): self.check_output_with_place(self.place) def test_check_grad(self): - self.check_grad_with_place(self.place, ['X'], - 'Out', - max_relative_error=0.006) + self.check_grad_with_place( + self.place, ['X'], 'Out', max_relative_error=0.006 + ) class TestNPUNormOp2(TestNPUNormOp): - def init_test_case(self): self.shape = [5, 3, 9, 7] self.axis = 0 @@ -68,17 +66,17 @@ class TestNPUNormOp2(TestNPUNormOp): class TestNPUNormOp3(TestNPUNormOp): - def init_test_case(self): self.shape = [5, 3, 2, 7] self.axis = -1 self.epsilon = 1e-8 -@skip_check_grad_ci(reason="'check_grad' on large inputs is too slow, " + - "however it is desirable to cover the forward pass") +@skip_check_grad_ci( + reason="'check_grad' on large inputs is too slow, " + + "however it is desirable to cover the forward pass" +) class TestNPUNormOp4(TestNPUNormOp): - def init_test_case(self): self.shape = [128, 1024, 14, 14] self.axis = 2 @@ -88,10 +86,11 @@ class TestNPUNormOp4(TestNPUNormOp): pass -@skip_check_grad_ci(reason="'check_grad' on large inputs is too slow, " + - "however it is desirable to cover the forward pass") +@skip_check_grad_ci( + reason="'check_grad' on large inputs is too slow, " + + "however it is desirable to cover the forward pass" +) class TestNPUNormOp5(TestNPUNormOp): - def init_test_case(self): self.shape = [2048, 2048] self.axis = 1 @@ -102,7 +101,6 @@ class TestNPUNormOp5(TestNPUNormOp): class API_NormTest(unittest.TestCase): - def test_errors(self): paddle.enable_static() with fluid.program_guard(fluid.Program()): @@ -115,7 +113,6 @@ class API_NormTest(unittest.TestCase): class TestNPUNormOpFP16(TestNPUNormOp): - def set_npu(self): self.__class__.use_npu = True self.__class__.no_need_check_grad = True diff --git a/python/paddle/fluid/tests/unittests/npu/test_npu_place.py b/python/paddle/fluid/tests/unittests/npu/test_npu_place.py index 0075fa49e0440f0fe47a5550a9bbb0bef7f3e72b..8f4a9ee03e39d13f8659b72f00afdad5ec3eb86e 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_npu_place.py +++ b/python/paddle/fluid/tests/unittests/npu/test_npu_place.py @@ -21,7 +21,6 @@ paddle.enable_static() class TestNpuPlace(unittest.TestCase): - def test(self): p = core.Place() p.set_place(paddle.NPUPlace(0)) @@ -31,7 +30,6 @@ class TestNpuPlace(unittest.TestCase): class TestNpuPlaceError(unittest.TestCase): - def test_static(self): # NPU is not supported in ParallelExecutor prog = paddle.static.Program() @@ -48,8 +46,9 @@ class TestNpuPlaceError(unittest.TestCase): place = paddle.NPUPlace(0) exe = paddle.static.Executor(place) - with self.assertRaisesRegex(RuntimeError, - "NPU is not supported in ParallelExecutor"): + with self.assertRaisesRegex( + RuntimeError, "NPU is not supported in ParallelExecutor" + ): exe.run(compiled_prog, feed={"x": x_np, "y": y_np}) diff --git a/python/paddle/fluid/tests/unittests/npu/test_one_hot_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_one_hot_op_npu.py index 5379e9505dfd90d995de0e77c483a9aa8370cba6..008645deca3594096c89b0cb098b215359028df5 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_one_hot_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_one_hot_op_npu.py @@ -28,7 +28,6 @@ paddle.enable_static() class TestOneHotOp(OpTest): - def set_npu(self): self.__class__.use_npu = True @@ -42,8 +41,9 @@ class TestOneHotOp(OpTest): x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1]) - out = np.zeros(shape=(np.product(x.shape[:-1]), - depth)).astype('float32') + out = np.zeros(shape=(np.product(x.shape[:-1]), depth)).astype( + 'float32' + ) for i in range(np.product(x.shape)): out[i, x[i]] = 1.0 @@ -57,7 +57,6 @@ class TestOneHotOp(OpTest): class TestOneHotOp_attr(OpTest): - def set_npu(self): self.__class__.use_npu = True @@ -70,8 +69,9 @@ class TestOneHotOp_attr(OpTest): x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1]) - out = np.zeros(shape=(np.product(x.shape[:-1]), - depth)).astype('float32') + out = np.zeros(shape=(np.product(x.shape[:-1]), depth)).astype( + 'float32' + ) for i in range(np.product(x.shape)): out[i, x[i]] = 1.0 @@ -85,7 +85,6 @@ class TestOneHotOp_attr(OpTest): class TestOneHotOp_default_dtype(OpTest): - def set_npu(self): self.__class__.use_npu = True @@ -99,8 +98,9 @@ class TestOneHotOp_default_dtype(OpTest): x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1]) - out = np.zeros(shape=(np.product(x.shape[:-1]), - depth)).astype('float32') + out = np.zeros(shape=(np.product(x.shape[:-1]), depth)).astype( + 'float32' + ) for i in range(np.product(x.shape)): out[i, x[i]] = 1.0 @@ -114,7 +114,6 @@ class TestOneHotOp_default_dtype(OpTest): class TestOneHotOp_default_dtype_attr(OpTest): - def set_npu(self): self.__class__.use_npu = True @@ -127,8 +126,9 @@ class TestOneHotOp_default_dtype_attr(OpTest): x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1]) - out = np.zeros(shape=(np.product(x.shape[:-1]), - depth)).astype('float32') + out = np.zeros(shape=(np.product(x.shape[:-1]), depth)).astype( + 'float32' + ) for i in range(np.product(x.shape)): out[i, x[i]] = 1.0 @@ -142,7 +142,6 @@ class TestOneHotOp_default_dtype_attr(OpTest): class TestOneHotOp_out_of_range(OpTest): - def set_npu(self): self.__class__.use_npu = True @@ -154,8 +153,9 @@ class TestOneHotOp_out_of_range(OpTest): x = [np.random.choice([-1, depth]) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1]) - out = np.zeros(shape=(np.product(x.shape[:-1]), - depth)).astype('float32') + out = np.zeros(shape=(np.product(x.shape[:-1]), depth)).astype( + 'float32' + ) self.inputs = {'X': (x, x_lod)} self.attrs = {'depth': depth, 'allow_out_of_range': True} @@ -166,7 +166,6 @@ class TestOneHotOp_out_of_range(OpTest): class TestOneHotOp_dtype_int64(OpTest): - def set_npu(self): self.__class__.use_npu = True @@ -179,8 +178,9 @@ class TestOneHotOp_dtype_int64(OpTest): x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int64').reshape([sum(x_lod[0]), 1]) - out = np.zeros(shape=(np.product(x.shape[:-1]), - depth)).astype('float32') + out = np.zeros(shape=(np.product(x.shape[:-1]), depth)).astype( + 'float32' + ) for i in range(np.product(x.shape)): out[i, x[i]] = 1.0 diff --git a/python/paddle/fluid/tests/unittests/npu/test_one_hot_v2_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_one_hot_v2_op_npu.py index b3fd2e6922135ba929a88968f3c1239329b61993..4ccd33134bed7e5b2036baf6ce7f37dbb8ce2726 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_one_hot_v2_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_one_hot_v2_op_npu.py @@ -28,7 +28,6 @@ paddle.enable_static() class TestOneHotOp(OpTest): - def set_npu(self): self.__class__.use_npu = True @@ -56,7 +55,6 @@ class TestOneHotOp(OpTest): class TestOneHotOp_non_lod(OpTest): - def setUp(self): self.op_type = 'one_hot_v2' depth = 10 @@ -80,7 +78,6 @@ class TestOneHotOp_non_lod(OpTest): class TestOneHotOp_attr(OpTest): - def set_npu(self): self.__class__.use_npu = True @@ -93,8 +90,9 @@ class TestOneHotOp_attr(OpTest): x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1]) - out = np.zeros(shape=(np.product(x.shape[:-1]), 1, - depth)).astype('float32') + out = np.zeros(shape=(np.product(x.shape[:-1]), 1, depth)).astype( + 'float32' + ) for i in range(np.product(x.shape)): out[i, 0, x[i]] = 1.0 @@ -108,7 +106,6 @@ class TestOneHotOp_attr(OpTest): class TestOneHotOp_default_dtype(OpTest): - def set_npu(self): self.__class__.use_npu = True @@ -136,7 +133,6 @@ class TestOneHotOp_default_dtype(OpTest): class TestOneHotOp_default_dtype_attr(OpTest): - def set_npu(self): self.__class__.use_npu = True @@ -149,8 +145,9 @@ class TestOneHotOp_default_dtype_attr(OpTest): x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1]) - out = np.zeros(shape=(np.product(x.shape[:-1]), 1, - depth)).astype('float32') + out = np.zeros(shape=(np.product(x.shape[:-1]), 1, depth)).astype( + 'float32' + ) for i in range(np.product(x.shape)): out[i, 0, x[i]] = 1.0 @@ -164,7 +161,6 @@ class TestOneHotOp_default_dtype_attr(OpTest): class TestOneHotOp_out_of_range(OpTest): - def set_npu(self): self.__class__.use_npu = True @@ -187,7 +183,6 @@ class TestOneHotOp_out_of_range(OpTest): class TestOneHotOp_dtype_int64(OpTest): - def set_npu(self): self.__class__.use_npu = True @@ -210,7 +205,6 @@ class TestOneHotOp_dtype_int64(OpTest): class TestOneHotOpApi(unittest.TestCase): - def test_api(self): depth = 10 self._run(depth) @@ -221,27 +215,32 @@ class TestOneHotOpApi(unittest.TestCase): def test_api_with_dygraph(self): depth = 10 - label = np.array([np.random.randint(0, depth - 1) - for i in range(6)]).reshape([6, 1]) + label = np.array( + [np.random.randint(0, depth - 1) for i in range(6)] + ).reshape([6, 1]) with fluid.dygraph.guard(paddle.NPUPlace(0)): one_hot_label = fluid.one_hot( - input=fluid.dygraph.to_variable(label), depth=depth) + input=fluid.dygraph.to_variable(label), depth=depth + ) def _run(self, depth): label = fluid.layers.data(name="label", shape=[1], dtype="int64") one_hot_label = fluid.one_hot(input=label, depth=depth) place = fluid.NPUPlace(0) - label_data = np.array([np.random.randint(0, 10 - 1) - for i in range(6)]).reshape([6, 1]) + label_data = np.array( + [np.random.randint(0, 10 - 1) for i in range(6)] + ).reshape([6, 1]) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - ret = exe.run(feed={ - 'label': label_data, - }, - fetch_list=[one_hot_label], - return_numpy=False) + ret = exe.run( + feed={ + 'label': label_data, + }, + fetch_list=[one_hot_label], + return_numpy=False, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/npu/test_p_norm_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_p_norm_op_npu.py index 5560b8bbd143a36db403fad3323c342315392ab9..8cb1d0b11a8d9e3da2aa0427849acc39a598e094 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_p_norm_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_p_norm_op_npu.py @@ -26,7 +26,6 @@ paddle.enable_static() class TestPnormOp(OpTest): - def set_npu(self): self.__class__.use_npu = True @@ -41,7 +40,7 @@ class TestPnormOp(OpTest): 'epsilon': self.epsilon, 'axis': self.axis, 'keepdim': self.keepdim, - 'porder': float(self.porder) + 'porder': float(self.porder), } self.outputs = {'Out': norm} self.gradient = self.calc_gradient() @@ -53,9 +52,9 @@ class TestPnormOp(OpTest): self.check_output_with_place(paddle.NPUPlace(0)) def test_check_grad(self): - self.check_grad_with_place(paddle.NPUPlace(0), ['X'], - 'Out', - user_defined_grads=self.gradient) + self.check_grad_with_place( + paddle.NPUPlace(0), ['X'], 'Out', user_defined_grads=self.gradient + ) def init_test_case(self): self.shape = [2, 3, 4, 5] @@ -73,7 +72,7 @@ class TestPnormOp(OpTest): 'epsilon': self.epsilon, 'axis': self.axis, 'keepdim': self.keepdim, - 'porder': float(self.porder) + 'porder': float(self.porder), } x = self.inputs["X"] porder = self.attrs["porder"] @@ -87,8 +86,11 @@ class TestPnormOp(OpTest): grad[x_abs != norm] = 0.0 else: norm = p_norm(x, axis=axis, porder=porder, keepdims=True) - grad = np.power(norm, 1 - porder) * np.power( - np.abs(x), porder - 1) * np.sign(x) + grad = ( + np.power(norm, 1 - porder) + * np.power(np.abs(x), porder - 1) + * np.sign(x) + ) numel = 1 for s in x.shape: @@ -98,7 +100,6 @@ class TestPnormOp(OpTest): class TestPnormOp2(TestPnormOp): - def init_test_case(self): self.shape = [3, 20, 3] self.axis = 2 @@ -109,7 +110,6 @@ class TestPnormOp2(TestPnormOp): class TestPnormOp3(TestPnormOp): - def init_test_case(self): self.shape = [3, 20, 3] self.axis = 2 @@ -120,7 +120,6 @@ class TestPnormOp3(TestPnormOp): class TestPnormOp4(TestPnormOp3): - def init_test_case(self): self.shape = [3, 20, 3] self.axis = 2 @@ -131,7 +130,6 @@ class TestPnormOp4(TestPnormOp3): class TestPnormOp5(TestPnormOp3): - def init_test_case(self): self.shape = [3, 20, 3] self.axis = 2 @@ -142,7 +140,6 @@ class TestPnormOp5(TestPnormOp3): class TestPnormOp6(TestPnormOp3): - def init_test_case(self): self.shape = [2, 3, 4, 5] self.axis = 1 @@ -153,31 +150,26 @@ class TestPnormOp6(TestPnormOp3): class TestPnormOpfp16(TestPnormOp): - def init_dtype(self): self.dtype = "float16" class TestPnormOp2fp16(TestPnormOp2): - def init_dtype(self): self.dtype = "float16" class TestPnormOp3fp16(TestPnormOp3): - def init_dtype(self): self.dtype = "float16" class TestPnormOp4fp16(TestPnormOp4): - def init_dtype(self): self.dtype = "float16" class TestPnormOp5fp16(TestPnormOp5): - def init_dtype(self): self.dtype = "float16" diff --git a/python/paddle/fluid/tests/unittests/npu/test_pad3d_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_pad3d_op_npu.py index f388e66881c87b9f9a4c9596159000f2ba35077e..5d0d25e1f2081b7c9b96a091265bfb1378c849ec 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_pad3d_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_pad3d_op_npu.py @@ -27,7 +27,6 @@ import paddle.fluid as fluid class TestPad3dNPUOp(op_test.OpTest): - def setUp(self): paddle.enable_static() self.__class__.use_npu = True @@ -39,16 +38,18 @@ class TestPad3dNPUOp(op_test.OpTest): self.variable_paddings = False self.initTestCase() - self.value = 0 #Asend npu only support constant_values = 0 right now. + self.value = 0 # Asend npu only support constant_values = 0 right now. self.inputs = {'X': np.random.random(self.shape).astype(self.x_type)} self.attrs = {} if self.variable_paddings: self.attrs['paddings'] = [] - self.inputs['Paddings'] = np.array( - self.paddings).flatten().astype("int32") + self.inputs['Paddings'] = ( + np.array(self.paddings).flatten().astype("int32") + ) else: - self.attrs['paddings'] = np.array( - self.paddings).flatten().astype("int32") + self.attrs['paddings'] = ( + np.array(self.paddings).flatten().astype("int32") + ) self.attrs['value'] = self.value self.attrs['mode'] = self.mode self.attrs['data_format'] = self.data_format @@ -69,10 +70,12 @@ class TestPad3dNPUOp(op_test.OpTest): (0, 0), ] - out = np.pad(self.inputs['X'], - paddings, - mode=self.mode, - constant_values=self.value) + out = np.pad( + self.inputs['X'], + paddings, + mode=self.mode, + constant_values=self.value, + ) self.outputs = {'Out': out} @@ -89,7 +92,6 @@ class TestPad3dNPUOp(op_test.OpTest): class TestCase1(TestPad3dNPUOp): - def initTestCase(self): self.shape = (3, 4, 5, 6, 7) self.paddings = [0, 1, 2, 3, 4, 5] @@ -102,7 +104,6 @@ class TestCase1(TestPad3dNPUOp): class TestCase2(TestPad3dNPUOp): - def initTestCase(self): self.shape = (4, 5, 6, 7, 8) self.paddings = [1, 1, 1, 1, 1, 1] @@ -111,13 +112,9 @@ class TestCase2(TestPad3dNPUOp): class TestPadAPI(unittest.TestCase): - - def _get_numpy_out(self, - input_data, - pad, - mode, - value=0, - data_format="NCDHW"): + def _get_numpy_out( + self, input_data, pad, mode, value=0, data_format="NCDHW" + ): if mode == "constant" and len(pad) == len(input_data.shape) * 2: pad = np.reshape(pad, (-1, 2)).tolist() elif data_format == "NCDHW": @@ -168,8 +165,11 @@ class TestPadAPI(unittest.TestCase): def test_static(self): paddle.enable_static() - self.place = fluid.NPUPlace( - 0) if fluid.core.is_compiled_with_npu() else fluid.CPUPlace() + self.place = ( + fluid.NPUPlace(0) + if fluid.core.is_compiled_with_npu() + else fluid.CPUPlace() + ) with program_guard(Program(), Program()): input_shape = (1, 2, 3, 4, 5) pad = [1, 2, 1, 1, 3, 4] @@ -177,31 +177,25 @@ class TestPadAPI(unittest.TestCase): value = 0 input_data = np.random.rand(*input_shape).astype(np.float32) x = paddle.fluid.data(name="x", shape=input_shape) - result1 = F.pad(x=x, - pad=pad, - value=value, - mode=mode, - data_format="NCDHW") - result2 = F.pad(x=x, - pad=pad, - value=value, - mode=mode, - data_format="NDHWC") + result1 = F.pad( + x=x, pad=pad, value=value, mode=mode, data_format="NCDHW" + ) + result2 = F.pad( + x=x, pad=pad, value=value, mode=mode, data_format="NDHWC" + ) exe = Executor(self.place) - fetches = exe.run(default_main_program(), - feed={"x": input_data}, - fetch_list=[result1, result2]) - - np_out1 = self._get_numpy_out(input_data, - pad, - mode, - value, - data_format="NCDHW") - np_out2 = self._get_numpy_out(input_data, - pad, - mode, - value, - data_format="NDHWC") + fetches = exe.run( + default_main_program(), + feed={"x": input_data}, + fetch_list=[result1, result2], + ) + + np_out1 = self._get_numpy_out( + input_data, pad, mode, value, data_format="NCDHW" + ) + np_out2 = self._get_numpy_out( + input_data, pad, mode, value, data_format="NDHWC" + ) np.testing.assert_allclose(fetches[0], np_out1) np.testing.assert_allclose(fetches[1], np_out2) @@ -216,27 +210,19 @@ class TestPadAPI(unittest.TestCase): input_data = np.random.rand(*input_shape).astype(np.float32) tensor_data = paddle.to_tensor(input_data) - np_out1 = self._get_numpy_out(input_data, - pad, - mode, - value, - data_format="NCDHW") - np_out2 = self._get_numpy_out(input_data, - pad, - mode, - value, - data_format="NDHWC") - - y1 = F.pad(tensor_data, - pad=pad, - mode=mode, - value=value, - data_format="NCDHW") - y2 = F.pad(tensor_data, - pad=pad, - mode=mode, - value=value, - data_format="NDHWC") + np_out1 = self._get_numpy_out( + input_data, pad, mode, value, data_format="NCDHW" + ) + np_out2 = self._get_numpy_out( + input_data, pad, mode, value, data_format="NDHWC" + ) + + y1 = F.pad( + tensor_data, pad=pad, mode=mode, value=value, data_format="NCDHW" + ) + y2 = F.pad( + tensor_data, pad=pad, mode=mode, value=value, data_format="NDHWC" + ) np.testing.assert_allclose(y1.numpy(), np_out1) np.testing.assert_allclose(y2.numpy(), np_out2) @@ -252,27 +238,19 @@ class TestPadAPI(unittest.TestCase): input_data = np.random.rand(*input_shape).astype(np.float32) tensor_data = paddle.to_tensor(input_data) - np_out1 = self._get_numpy_out(input_data, - pad, - mode, - value, - data_format="NCHW") - np_out2 = self._get_numpy_out(input_data, - pad, - mode, - value, - data_format="NHWC") - - y1 = F.pad(tensor_data, - pad=pad, - mode=mode, - value=value, - data_format="NCHW") - y2 = F.pad(tensor_data, - pad=pad, - mode=mode, - value=value, - data_format="NHWC") + np_out1 = self._get_numpy_out( + input_data, pad, mode, value, data_format="NCHW" + ) + np_out2 = self._get_numpy_out( + input_data, pad, mode, value, data_format="NHWC" + ) + + y1 = F.pad( + tensor_data, pad=pad, mode=mode, value=value, data_format="NCHW" + ) + y2 = F.pad( + tensor_data, pad=pad, mode=mode, value=value, data_format="NHWC" + ) np.testing.assert_allclose(y1.numpy(), np_out1) np.testing.assert_allclose(y2.numpy(), np_out2) @@ -288,40 +266,28 @@ class TestPadAPI(unittest.TestCase): input_data = np.random.rand(*input_shape).astype(np.float32) tensor_data = paddle.to_tensor(input_data) - np_out1 = self._get_numpy_out(input_data, - pad, - mode, - value, - data_format="NCL") - np_out2 = self._get_numpy_out(input_data, - pad, - mode, - value, - data_format="NLC") - - y1 = F.pad(tensor_data, - pad=pad, - mode=mode, - value=value, - data_format="NCL") - y2 = F.pad(tensor_data, - pad=pad, - mode=mode, - value=value, - data_format="NLC") + np_out1 = self._get_numpy_out( + input_data, pad, mode, value, data_format="NCL" + ) + np_out2 = self._get_numpy_out( + input_data, pad, mode, value, data_format="NLC" + ) + + y1 = F.pad( + tensor_data, pad=pad, mode=mode, value=value, data_format="NCL" + ) + y2 = F.pad( + tensor_data, pad=pad, mode=mode, value=value, data_format="NLC" + ) np.testing.assert_allclose(y1.numpy(), np_out1) np.testing.assert_allclose(y2.numpy(), np_out2) class TestPad1dAPI(unittest.TestCase): - - def _get_numpy_out(self, - input_data, - pad, - mode, - value=0.0, - data_format="NCL"): + def _get_numpy_out( + self, input_data, pad, mode, value=0.0, data_format="NCL" + ): if data_format == "NCL": pad = [ (0, 0), @@ -348,36 +314,33 @@ class TestPad1dAPI(unittest.TestCase): input_data = np.random.rand(*input_shape).astype(np.float32) pad_constant = nn.Pad1D(padding=pad, mode="constant", value=value) - pad_constant_int = nn.Pad1D(padding=pad_int, - mode="constant", - value=value) + pad_constant_int = nn.Pad1D( + padding=pad_int, mode="constant", value=value + ) data = paddle.to_tensor(input_data) output = pad_constant(data) - np_out = self._get_numpy_out(input_data, - pad, - "constant", - value=value, - data_format="NCL") + np_out = self._get_numpy_out( + input_data, pad, "constant", value=value, data_format="NCL" + ) np.testing.assert_allclose(output.numpy(), np_out) output = pad_constant_int(data) - np_out = self._get_numpy_out(input_data, [pad_int] * 2, - "constant", - value=value, - data_format="NCL") + np_out = self._get_numpy_out( + input_data, + [pad_int] * 2, + "constant", + value=value, + data_format="NCL", + ) np.testing.assert_allclose(output.numpy(), np_out) class TestPad2dAPI(unittest.TestCase): - - def _get_numpy_out(self, - input_data, - pad, - mode, - value=0.0, - data_format="NCHW"): + def _get_numpy_out( + self, input_data, pad, mode, value=0.0, data_format="NCHW" + ): if data_format == "NCHW": pad = [ (0, 0), @@ -406,36 +369,33 @@ class TestPad2dAPI(unittest.TestCase): input_data = np.random.rand(*input_shape).astype(np.float32) pad_constant = nn.Pad2D(padding=pad, mode="constant", value=value) - pad_constant_int = nn.Pad2D(padding=pad_int, - mode="constant", - value=value) + pad_constant_int = nn.Pad2D( + padding=pad_int, mode="constant", value=value + ) data = paddle.to_tensor(input_data) output = pad_constant(data) - np_out = self._get_numpy_out(input_data, - pad, - "constant", - value=value, - data_format="NCHW") + np_out = self._get_numpy_out( + input_data, pad, "constant", value=value, data_format="NCHW" + ) np.testing.assert_allclose(output.numpy(), np_out) output = pad_constant_int(data) - np_out = self._get_numpy_out(input_data, [pad_int] * 4, - "constant", - value=value, - data_format="NCHW") + np_out = self._get_numpy_out( + input_data, + [pad_int] * 4, + "constant", + value=value, + data_format="NCHW", + ) np.testing.assert_allclose(output.numpy(), np_out) class TestPad3dAPI(unittest.TestCase): - - def _get_numpy_out(self, - input_data, - pad, - mode, - value=0.0, - data_format="NCDHW"): + def _get_numpy_out( + self, input_data, pad, mode, value=0.0, data_format="NCDHW" + ): if data_format == "NCDHW": pad = [ (0, 0), @@ -466,32 +426,31 @@ class TestPad3dAPI(unittest.TestCase): input_data = np.random.rand(*input_shape).astype(np.float32) pad_constant = nn.Pad3D(padding=pad, mode="constant", value=value) - pad_constant_int = nn.Pad3D(padding=pad_int, - mode="constant", - value=value) + pad_constant_int = nn.Pad3D( + padding=pad_int, mode="constant", value=value + ) data = paddle.to_tensor(input_data) output = pad_constant(data) - np_out = self._get_numpy_out(input_data, - pad, - "constant", - value=value, - data_format="NCDHW") + np_out = self._get_numpy_out( + input_data, pad, "constant", value=value, data_format="NCDHW" + ) np.testing.assert_allclose(output.numpy(), np_out) output = pad_constant_int(data) - np_out = self._get_numpy_out(input_data, [pad_int] * 6, - "constant", - value=value, - data_format="NCDHW") + np_out = self._get_numpy_out( + input_data, + [pad_int] * 6, + "constant", + value=value, + data_format="NCDHW", + ) np.testing.assert_allclose(output.numpy(), np_out) class TestPad3dOpNpuError(unittest.TestCase): - def test_errors(self): - def test_value(): input_shape = (1, 2, 3, 4, 5) data = np.random.rand(*input_shape).astype(np.float32) @@ -538,14 +497,16 @@ class TestPad3dOpNpuError(unittest.TestCase): class TestPadDataformatError(unittest.TestCase): - def test_errors(self): - def test_ncl(): input_shape = (1, 2, 3, 4) pad = paddle.to_tensor(np.array([2, 1, 2, 1]).astype('int32')) - data = np.arange(np.prod(input_shape), - dtype=np.float64).reshape(input_shape) + 1 + data = ( + np.arange(np.prod(input_shape), dtype=np.float64).reshape( + input_shape + ) + + 1 + ) my_pad = nn.Pad1D(padding=pad, mode="replicate", data_format="NCL") data = paddle.to_tensor(data) result = my_pad(data) @@ -553,8 +514,12 @@ class TestPadDataformatError(unittest.TestCase): def test_nchw(): input_shape = (1, 2, 4) pad = paddle.to_tensor(np.array([2, 1, 2, 1]).astype('int32')) - data = np.arange(np.prod(input_shape), - dtype=np.float64).reshape(input_shape) + 1 + data = ( + np.arange(np.prod(input_shape), dtype=np.float64).reshape( + input_shape + ) + + 1 + ) my_pad = nn.Pad1D(padding=pad, mode="replicate", data_format="NCHW") data = paddle.to_tensor(data) result = my_pad(data) @@ -562,11 +527,15 @@ class TestPadDataformatError(unittest.TestCase): def test_ncdhw(): input_shape = (1, 2, 3, 4) pad = paddle.to_tensor(np.array([2, 1, 2, 1]).astype('int32')) - data = np.arange(np.prod(input_shape), - dtype=np.float64).reshape(input_shape) + 1 - my_pad = nn.Pad1D(padding=pad, - mode="replicate", - data_format="NCDHW") + data = ( + np.arange(np.prod(input_shape), dtype=np.float64).reshape( + input_shape + ) + + 1 + ) + my_pad = nn.Pad1D( + padding=pad, mode="replicate", data_format="NCDHW" + ) data = paddle.to_tensor(data) result = my_pad(data) diff --git a/python/paddle/fluid/tests/unittests/npu/test_pad_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_pad_op_npu.py index 7b16b19f2147739583ef4bd42babd3a8d3a3b294..97d2a83260146b8dcad65dac668ed0ebe9fd686a 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_pad_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_pad_op_npu.py @@ -27,7 +27,6 @@ paddle.enable_static() class TestPadOp(OpTest): - def setUp(self): self.op_type = "pad" self.set_npu() @@ -41,11 +40,12 @@ class TestPadOp(OpTest): self.attrs['paddings'] = np.array(self.paddings).flatten() self.attrs['pad_value'] = self.pad_value self.outputs = { - 'Out': - np.pad(self.inputs['X'], - self.paddings, - mode='constant', - constant_values=self.pad_value) + 'Out': np.pad( + self.inputs['X'], + self.paddings, + mode='constant', + constant_values=self.pad_value, + ) } def test_check_output(self): @@ -53,9 +53,9 @@ class TestPadOp(OpTest): def test_check_grad_normal(self): if self.dtype == np.float16: - self.check_grad_with_place(self.place, ['X'], - 'Out', - max_relative_error=0.6) + self.check_grad_with_place( + self.place, ['X'], 'Out', max_relative_error=0.6 + ) else: self.check_grad_with_place(self.place, ['X'], 'Out') @@ -73,7 +73,6 @@ class TestPadOp(OpTest): class TestCase1(TestPadOp): - def initTestCase(self): self.shape = (2, 3, 4, 5) self.paddings = [(0, 1), (2, 3), (2, 1), (1, 1)] @@ -81,7 +80,6 @@ class TestCase1(TestPadOp): class TestCase2(TestPadOp): - def initTestCase(self): self.shape = (5, 5, 5) self.paddings = [(0, 0), (0, 0), (1, 2)] @@ -89,20 +87,17 @@ class TestCase2(TestPadOp): class TestCase3(TestPadOp): - def initTestCase(self): - self.shape = (100) + self.shape = 100 self.paddings = [(0, 1)] self.pad_value = 0.0 -#----------------Pad Fp16---------------- +# ----------------Pad Fp16---------------- def create_test_fp16(parent): - class TestPadFp16(parent): - def init_dtype(self): self.dtype = np.float16 @@ -118,7 +113,6 @@ create_test_fp16(TestCase3) class TestPadOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): input_data = np.random.random((2, 2)).astype("float32") diff --git a/python/paddle/fluid/tests/unittests/npu/test_parallel_dygraph_mnist_npu.py b/python/paddle/fluid/tests/unittests/npu/test_parallel_dygraph_mnist_npu.py index a2be1ccfa67beb643d91c212dcd847f2a5a83697..0eb98ff7a68d7a7e65b1ee7f5a8024ce4372ad24 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_parallel_dygraph_mnist_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_parallel_dygraph_mnist_npu.py @@ -46,23 +46,20 @@ rank_table_file = b"""{ }""" need_envs = { - "ASCEND_AICPU_PATH": - os.getenv("ASCEND_AICPU_PATH", "/usr/local/Ascend/nnae/latest"), - "ASCEND_OPP_PATH": - os.getenv("ASCEND_OPP_PATH", "/usr/local/Ascend/nnae/latest/opp"), - "HCCL_CONNECT_TIMEOUT": - "7200", - "HCCL_WHITELIST_DISABLE": - "1", - "HCCL_SECURITY_MODE": - "1", - "RANK_TABLE_FILE": - "rank_table_file.json", + "ASCEND_AICPU_PATH": os.getenv( + "ASCEND_AICPU_PATH", "/usr/local/Ascend/nnae/latest" + ), + "ASCEND_OPP_PATH": os.getenv( + "ASCEND_OPP_PATH", "/usr/local/Ascend/nnae/latest/opp" + ), + "HCCL_CONNECT_TIMEOUT": "7200", + "HCCL_WHITELIST_DISABLE": "1", + "HCCL_SECURITY_MODE": "1", + "RANK_TABLE_FILE": "rank_table_file.json", } class TestParallelDygraphMnistNPU(TestDistBase): - def _setup_config(self): self._sync_mode = False self._hccl_mode = True @@ -78,11 +75,11 @@ class TestParallelDygraphMnistNPU(TestDistBase): delta=1e-3, check_error_log=True, need_envs=need_envs, - log_name=flag_name) + log_name=flag_name, + ) class TestFleetDygraphMnistNPU(TestParallelDygraphMnistNPU): - def _setup_config(self): self._sync_mode = False self._hccl_mode = True diff --git a/python/paddle/fluid/tests/unittests/npu/test_pool2d_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_pool2d_op_npu.py index cb554d3c1d4890421dda32d1b5c75133f7beda48..3abe452b4e56b4d5c83b81366dfff8db60885613 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_pool2d_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_pool2d_op_npu.py @@ -22,16 +22,20 @@ import paddle import paddle.fluid as fluid import paddle.fluid.core as core from op_test import OpTest -from test_pool2d_op import pool2D_forward_naive, avg_pool2D_forward_naive, max_pool2D_forward_naive, adaptive_start_index, adaptive_end_index +from test_pool2d_op import ( + pool2D_forward_naive, + avg_pool2D_forward_naive, + max_pool2D_forward_naive, + adaptive_start_index, + adaptive_end_index, +) from paddle.nn.functional import avg_pool2d, max_pool2d paddle.enable_static() def create_test_padding_SAME_class(parent): - class TestPaddingSMAECase(parent): - def init_paddings(self): self.paddings = [0, 0] self.padding_algorithm = "SAME" @@ -42,9 +46,7 @@ def create_test_padding_SAME_class(parent): def create_test_use_ceil_class(parent): - class TestPool2DUseCeilCase(parent): - def init_ceil_mode(self): self.ceil_mode = True @@ -54,9 +56,7 @@ def create_test_use_ceil_class(parent): def create_test_padding_VALID_class(parent): - class TestPaddingVALIDCase(parent): - def init_paddings(self): self.paddings = [1, 1] self.padding_algorithm = "VALID" @@ -67,9 +67,7 @@ def create_test_padding_VALID_class(parent): def create_test_fp16_class(parent): - class TestFp16Case(parent): - def init_kernel_type(self): self.use_cudnn = False self.dtype = np.float16 @@ -79,25 +77,29 @@ def create_test_fp16_class(parent): globals()[cls_name] = TestFp16Case -def pool2d_backward_navie(x, - ksize, - strides, - paddings, - global_pool=0, - ceil_mode=False, - exclusive=True, - adaptive=False, - data_format='NCHW', - pool_type="max", - padding_algorithm="EXPLICIT"): +def pool2d_backward_navie( + x, + ksize, + strides, + paddings, + global_pool=0, + ceil_mode=False, + exclusive=True, + adaptive=False, + data_format='NCHW', + pool_type="max", + padding_algorithm="EXPLICIT", +): # update paddings def _get_padding_with_SAME(input_shape, pool_size, pool_stride): padding = [] - for input_size, filter_size, stride_size in zip(input_shape, pool_size, - pool_stride): + for input_size, filter_size, stride_size in zip( + input_shape, pool_size, pool_stride + ): out_size = int((input_size + stride_size - 1) / stride_size) pad_sum = np.max( - ((out_size - 1) * stride_size + filter_size - input_size, 0)) + ((out_size - 1) * stride_size + filter_size - input_size, 0) + ) pad_0 = int(pad_sum / 2) pad_1 = int(pad_sum - pad_0) padding.append(pad_0) @@ -107,9 +109,10 @@ def pool2d_backward_navie(x, if isinstance(padding_algorithm, str): padding_algorithm = padding_algorithm.upper() if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]: - raise ValueError("Unknown Attr(padding_algorithm): '%s'. " - "It can only be 'SAME' or 'VALID'." % - str(padding_algorithm)) + raise ValueError( + "Unknown Attr(padding_algorithm): '%s'. " + "It can only be 'SAME' or 'VALID'." % str(padding_algorithm) + ) if padding_algorithm == "VALID": paddings = [0, 0, 0, 0] @@ -117,7 +120,8 @@ def pool2d_backward_navie(x, raise ValueError( "When Attr(pool_padding) is \"VALID\", Attr(ceil_mode)" " must be False. " - "Received ceil_mode: True.") + "Received ceil_mode: True." + ) elif padding_algorithm == "SAME": input_data_shape = [] if data_format == "NCHW": @@ -146,10 +150,20 @@ def pool2d_backward_navie(x, if adaptive: H_out, W_out = ksize else: - H_out = (H - ksize[0] + pad_h_up + pad_h_down + strides[0] - 1) // strides[0] + 1 \ - if ceil_mode else (H - ksize[0] + pad_h_up + pad_h_down) // strides[0] + 1 - W_out = (W - ksize[1] + pad_w_left + pad_w_right + strides[1] - 1) // strides[1] + 1 \ - if ceil_mode else (W - ksize[1] + pad_w_left + pad_w_right) // strides[1] + 1 + H_out = ( + (H - ksize[0] + pad_h_up + pad_h_down + strides[0] - 1) + // strides[0] + + 1 + if ceil_mode + else (H - ksize[0] + pad_h_up + pad_h_down) // strides[0] + 1 + ) + W_out = ( + (W - ksize[1] + pad_w_left + pad_w_right + strides[1] - 1) + // strides[1] + + 1 + if ceil_mode + else (W - ksize[1] + pad_w_left + pad_w_right) // strides[1] + 1 + ) x_grad = np.zeros_like(x) for i in range(H_out): @@ -177,20 +191,26 @@ def pool2d_backward_navie(x, in_w_end = np.min((in_w_end, W)) if pool_type == 'avg': - if (exclusive or adaptive): - field_size = (in_h_end - in_h_start) * (in_w_end - - in_w_start) - x_grad[:, :, in_h_start:in_h_end, - in_w_start:in_w_end] += 1 / field_size + if exclusive or adaptive: + field_size = (in_h_end - in_h_start) * ( + in_w_end - in_w_start + ) + x_grad[:, :, in_h_start:in_h_end, in_w_start:in_w_end] += ( + 1 / field_size + ) elif pool_type == 'max': for n in range(N): for c in range(C): - idx = np.argmax(x[n, c, in_h_start:in_h_end, - in_w_start:in_w_end].flatten()) + idx = np.argmax( + x[ + n, c, in_h_start:in_h_end, in_w_start:in_w_end + ].flatten() + ) idx_h = idx // (in_w_end - in_w_start) idx_w = idx % (in_w_end - in_w_start) - x_grad[n, c, in_h_start + idx_h, - in_w_start + idx_w] += 1 + x_grad[ + n, c, in_h_start + idx_h, in_w_start + idx_w + ] += 1 if data_format == "NHWC": x_grad = x_grad.transpose([0, 2, 3, 1]) @@ -198,7 +218,6 @@ def pool2d_backward_navie(x, class TestPool2D_Op(OpTest): - def setUp(self): self.set_npu() self.op_type = "pool2d" @@ -218,14 +237,24 @@ class TestPool2D_Op(OpTest): input = np.random.random(self.shape).astype(self.dtype) if self.pool_type == "max": - input = np.array([x for x in range(np.prod(self.shape)) - ]).reshape(self.shape).astype(self.dtype) - output = pool2D_forward_naive(input, self.ksize, self.strides, - self.paddings, self.global_pool, - self.ceil_mode, self.exclusive, - self.adaptive, self.data_format, - self.pool_type, - self.padding_algorithm).astype(self.dtype) + input = ( + np.array([x for x in range(np.prod(self.shape))]) + .reshape(self.shape) + .astype(self.dtype) + ) + output = pool2D_forward_naive( + input, + self.ksize, + self.strides, + self.paddings, + self.global_pool, + self.ceil_mode, + self.exclusive, + self.adaptive, + self.data_format, + self.pool_type, + self.padding_algorithm, + ).astype(self.dtype) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(input)} self.attrs = { @@ -288,27 +317,30 @@ class TestPool2D_Op(OpTest): self.check_output_with_place(fluid.NPUPlace(0), atol=1e-3) def test_check_grad(self): - x_grad = pool2d_backward_navie(self.inputs["X"], - ksize=self.ksize, - strides=self.strides, - paddings=self.paddings, - global_pool=self.global_pool, - ceil_mode=False, - exclusive=self.exclusive, - adaptive=self.adaptive, - data_format=self.data_format, - pool_type=self.pool_type, - padding_algorithm=self.padding_algorithm) + x_grad = pool2d_backward_navie( + self.inputs["X"], + ksize=self.ksize, + strides=self.strides, + paddings=self.paddings, + global_pool=self.global_pool, + ceil_mode=False, + exclusive=self.exclusive, + adaptive=self.adaptive, + data_format=self.data_format, + pool_type=self.pool_type, + padding_algorithm=self.padding_algorithm, + ) x_grad = x_grad / np.prod(self.outputs['Out'].shape) - self.check_grad_with_place(fluid.NPUPlace(0), - set(['X']), - 'Out', - max_relative_error=0.06, - user_defined_grads=[x_grad]) + self.check_grad_with_place( + fluid.NPUPlace(0), + set(['X']), + 'Out', + max_relative_error=0.06, + user_defined_grads=[x_grad], + ) class TestCase1(TestPool2D_Op): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -328,7 +360,6 @@ class TestCase1(TestPool2D_Op): class TestCase2(TestPool2D_Op): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -348,34 +379,29 @@ class TestCase2(TestPool2D_Op): class TestCase3(TestPool2D_Op): - def init_pool_type(self): self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive class TestCase4(TestCase1): - def init_pool_type(self): self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive class TestCase5(TestCase2): - def init_pool_type(self): self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive class TestAvgInclude(TestCase2): - def init_exclusive(self): self.exclusive = False class TestAvgPoolAdaptive(TestCase1): - def init_adaptive(self): self.adaptive = True @@ -389,7 +415,6 @@ class TestAvgPoolAdaptive(TestCase1): class TestAvgPoolAdaptiveAsyOutSize(TestCase1): - def init_adaptive(self): self.adaptive = True @@ -404,9 +429,8 @@ class TestAvgPoolAdaptiveAsyOutSize(TestCase1): self.paddings = [0, 0, 0, 0] -#-------test pool2d with asymmetric padding----- +# -------test pool2d with asymmetric padding----- class TestPool2D_AsyPadding(TestPool2D_Op): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -417,7 +441,6 @@ class TestPool2D_AsyPadding(TestPool2D_Op): class TestCase1_AsyPadding(TestCase1): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -428,7 +451,6 @@ class TestCase1_AsyPadding(TestCase1): class TestCase2_AsyPadding(TestCase2): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -439,7 +461,6 @@ class TestCase2_AsyPadding(TestCase2): class TestCase3_AsyPadding(TestCase3): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -450,7 +471,6 @@ class TestCase3_AsyPadding(TestCase3): class TestCase4_AsyPadding(TestCase4): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -461,7 +481,6 @@ class TestCase4_AsyPadding(TestCase4): class TestCase5_AsyPadding((TestCase5)): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -472,7 +491,6 @@ class TestCase5_AsyPadding((TestCase5)): class TestAvgInclude_AsyPadding(TestCase2): - def init_exclusive(self): self.exclusive = False @@ -486,7 +504,6 @@ class TestAvgInclude_AsyPadding(TestCase2): class TestAvgPoolAdaptive_AsyPadding(TestCase1): - def init_adaptive(self): self.adaptive = True @@ -499,9 +516,8 @@ class TestAvgPoolAdaptive_AsyPadding(TestCase1): self.shape = [2, 3, 8, 8] -#----------- test channel_last -------------- +# ----------- test channel_last -------------- class TestPool2D_channel_last(TestPool2D_Op): - def init_data_format(self): self.data_format = "NHWC" @@ -510,7 +526,6 @@ class TestPool2D_channel_last(TestPool2D_Op): class TestCase1_channel_last(TestCase1): - def init_data_format(self): self.data_format = "NHWC" @@ -519,7 +534,6 @@ class TestCase1_channel_last(TestCase1): class TestCase2_channel_last(TestCase2): - def init_data_format(self): self.data_format = "NHWC" @@ -528,7 +542,6 @@ class TestCase2_channel_last(TestCase2): class TestCase3_channel_last(TestCase3): - def init_data_format(self): self.data_format = "NHWC" @@ -537,7 +550,6 @@ class TestCase3_channel_last(TestCase3): class TestCase4_channel_last(TestCase4): - def init_data_format(self): self.data_format = "NHWC" @@ -546,7 +558,6 @@ class TestCase4_channel_last(TestCase4): class TestCase5_channel_last(TestCase5): - def init_data_format(self): self.data_format = "NHWC" @@ -555,13 +566,11 @@ class TestCase5_channel_last(TestCase5): class TestCase5_Max(TestCase2): - def init_pool_type(self): self.pool_type = "max" class TestCase5_channel_last_Max(TestCase5_Max): - def init_data_format(self): self.data_format = "NHWC" @@ -570,13 +579,11 @@ class TestCase5_channel_last_Max(TestCase5_Max): class TestAvgInclude_channel_last(TestCase2_channel_last): - def init_exclusive(self): self.exclusive = False class TestAvgPoolAdaptive_channel_last(TestCase1_channel_last): - def init_adaptive(self): self.adaptive = True @@ -589,7 +596,6 @@ class TestAvgPoolAdaptive_channel_last(TestCase1_channel_last): class TestPool2D_AsyPadding_channel_last(TestPool2D_AsyPadding): - def init_data_format(self): self.data_format = "NHWC" @@ -598,7 +604,6 @@ class TestPool2D_AsyPadding_channel_last(TestPool2D_AsyPadding): class TestCase1_AsyPadding_channel_last(TestCase1_AsyPadding): - def init_data_format(self): self.data_format = "NHWC" @@ -607,7 +612,6 @@ class TestCase1_AsyPadding_channel_last(TestCase1_AsyPadding): class TestCase2_AsyPadding_channel_last(TestCase2_AsyPadding): - def init_data_format(self): self.data_format = "NHWC" @@ -616,7 +620,6 @@ class TestCase2_AsyPadding_channel_last(TestCase2_AsyPadding): class TestCase3_AsyPadding_channel_last(TestCase3_AsyPadding): - def init_data_format(self): self.data_format = "NHWC" @@ -625,7 +628,6 @@ class TestCase3_AsyPadding_channel_last(TestCase3_AsyPadding): class TestCase4_AsyPadding_channel_last(TestCase4_AsyPadding): - def init_data_format(self): self.data_format = "NHWC" @@ -634,7 +636,6 @@ class TestCase4_AsyPadding_channel_last(TestCase4_AsyPadding): class TestCase5_AsyPadding_channel_last(TestCase5_AsyPadding): - def init_data_format(self): self.data_format = "NHWC" @@ -643,7 +644,6 @@ class TestCase5_AsyPadding_channel_last(TestCase5_AsyPadding): class TestAvgInclude_AsyPadding_channel_last(TestAvgInclude_AsyPadding): - def init_data_format(self): self.data_format = "NHWC" @@ -651,9 +651,9 @@ class TestAvgInclude_AsyPadding_channel_last(TestAvgInclude_AsyPadding): self.shape = [2, 7, 7, 3] -class TestAvgPoolAdaptive_AsyPadding_channel_last(TestAvgPoolAdaptive_AsyPadding - ): - +class TestAvgPoolAdaptive_AsyPadding_channel_last( + TestAvgPoolAdaptive_AsyPadding +): def init_data_format(self): self.data_format = "NHWC" @@ -662,7 +662,6 @@ class TestAvgPoolAdaptive_AsyPadding_channel_last(TestAvgPoolAdaptive_AsyPadding class TestCase1_strides(TestCase1): - def init_test_case(self): self.ksize = [3, 3] # fixme: CANN AvgPoolGradV3 dose not support asymmetric strides diff --git a/python/paddle/fluid/tests/unittests/npu/test_pow_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_pow_op_npu.py index 67bb2f9c40fe06ccf516e759aeb6e5a376217893..09f2d0fc055c13332e7d52cb696c00924ae8da57 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_pow_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_pow_op_npu.py @@ -26,7 +26,6 @@ SEED = 2021 class TestPow(OpTest): - def setUp(self): self.set_npu() self.op_type = "pow" @@ -55,7 +54,6 @@ class TestPow(OpTest): class TestPowFp16(OpTest): - def setUp(self): self.set_npu() self.op_type = "pow" @@ -82,7 +80,6 @@ class TestPowFp16(OpTest): class TestPowNet(unittest.TestCase): - def _test(self, run_npu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -97,9 +94,9 @@ class TestPowNet(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=[32, 32], dtype='float32') b = paddle.static.data(name="b", shape=[32, 32], dtype='float32') - label = paddle.static.data(name="label", - shape=[32, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[32, 1], dtype='int64' + ) sum = paddle.add(a, b) z = paddle.pow(sum, 2.0) @@ -123,16 +120,17 @@ class TestPowNet(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "b": b_np, "label": label_np}, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res diff --git a/python/paddle/fluid/tests/unittests/npu/test_prior_box_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_prior_box_op_npu.py index 4af37416af5c1c85e2505c0ec41993d87ddd5813..08e9226a37cb12a16a9cc704261b2feb2c650feb 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_prior_box_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_prior_box_op_npu.py @@ -18,13 +18,15 @@ import paddle import sys import math -from paddle.fluid.tests.unittests.op_test import OpTest, _set_use_system_allocator +from paddle.fluid.tests.unittests.op_test import ( + OpTest, + _set_use_system_allocator, +) paddle.enable_static() class TestNPUPriorBox(OpTest): - def setUp(self): self.op_type = "prior_box" self.set_npu() @@ -56,7 +58,7 @@ class TestNPUPriorBox(OpTest): 'min_max_aspect_ratios_order': self.min_max_aspect_ratios_order, 'step_w': self.step_w, 'step_h': self.step_h, - 'offset': self.offset + 'offset': self.offset, } if len(self.max_sizes) > 0: self.attrs['max_sizes'] = self.max_sizes @@ -92,8 +94,9 @@ class TestNPUPriorBox(OpTest): self.flip = True self.set_min_max_aspect_ratios_order() self.real_aspect_ratios = [1, 2.0, 1.0 / 2.0, 3.0, 1.0 / 3.0] - self.aspect_ratios = np.array(self.aspect_ratios, - dtype=np.float64).flatten() + self.aspect_ratios = np.array( + self.aspect_ratios, dtype=np.float64 + ).flatten() self.variances = [0.1, 0.1, 0.2, 0.2] self.variances = np.array(self.variances, dtype=np.float64).flatten() @@ -105,12 +108,12 @@ class TestNPUPriorBox(OpTest): def init_test_input(self): self.image = np.random.random( - (self.batch_size, self.image_channels, self.image_w, - self.image_h)).astype('float32') + (self.batch_size, self.image_channels, self.image_w, self.image_h) + ).astype('float32') self.input = np.random.random( - (self.batch_size, self.input_channels, self.layer_w, - self.layer_h)).astype('float32') + (self.batch_size, self.input_channels, self.layer_w, self.layer_h) + ).astype('float32') def init_test_output(self): out_dim = (self.layer_h, self.layer_w, self.num_priors, 4) @@ -131,73 +134,78 @@ class TestNPUPriorBox(OpTest): ar = self.real_aspect_ratios[r] c_w = min_size * math.sqrt(ar) / 2 c_h = (min_size / math.sqrt(ar)) / 2 - out_boxes[h, w, - idx, :] = [(c_x - c_w) / self.image_w, - (c_y - c_h) / self.image_h, - (c_x + c_w) / self.image_w, - (c_y + c_h) / self.image_h] + out_boxes[h, w, idx, :] = [ + (c_x - c_w) / self.image_w, + (c_y - c_h) / self.image_h, + (c_x + c_w) / self.image_w, + (c_y + c_h) / self.image_h, + ] idx += 1 if len(self.max_sizes) > 0: max_size = self.max_sizes[s] # second prior: aspect_ratio = 1, c_w = c_h = math.sqrt(min_size * max_size) / 2 - out_boxes[h, w, - idx, :] = [(c_x - c_w) / self.image_w, - (c_y - c_h) / self.image_h, - (c_x + c_w) / self.image_w, - (c_y + c_h) / self.image_h] + out_boxes[h, w, idx, :] = [ + (c_x - c_w) / self.image_w, + (c_y - c_h) / self.image_h, + (c_x + c_w) / self.image_w, + (c_y + c_h) / self.image_h, + ] idx += 1 else: - c_w = c_h = min_size / 2. - out_boxes[h, w, idx, :] = [(c_x - c_w) / self.image_w, - (c_y - c_h) / self.image_h, - (c_x + c_w) / self.image_w, - (c_y + c_h) / self.image_h] + c_w = c_h = min_size / 2.0 + out_boxes[h, w, idx, :] = [ + (c_x - c_w) / self.image_w, + (c_y - c_h) / self.image_h, + (c_x + c_w) / self.image_w, + (c_y + c_h) / self.image_h, + ] idx += 1 if len(self.max_sizes) > 0: max_size = self.max_sizes[s] # second prior: aspect_ratio = 1, c_w = c_h = math.sqrt(min_size * max_size) / 2 - out_boxes[h, w, - idx, :] = [(c_x - c_w) / self.image_w, - (c_y - c_h) / self.image_h, - (c_x + c_w) / self.image_w, - (c_y + c_h) / self.image_h] + out_boxes[h, w, idx, :] = [ + (c_x - c_w) / self.image_w, + (c_y - c_h) / self.image_h, + (c_x + c_w) / self.image_w, + (c_y + c_h) / self.image_h, + ] idx += 1 # rest of priors for r in range(len(self.real_aspect_ratios)): ar = self.real_aspect_ratios[r] - if abs(ar - 1.) < 1e-6: + if abs(ar - 1.0) < 1e-6: continue c_w = min_size * math.sqrt(ar) / 2 c_h = (min_size / math.sqrt(ar)) / 2 - out_boxes[h, w, - idx, :] = [(c_x - c_w) / self.image_w, - (c_y - c_h) / self.image_h, - (c_x + c_w) / self.image_w, - (c_y + c_h) / self.image_h] + out_boxes[h, w, idx, :] = [ + (c_x - c_w) / self.image_w, + (c_y - c_h) / self.image_h, + (c_x + c_w) / self.image_w, + (c_y + c_h) / self.image_h, + ] idx += 1 # clip the prior's coordidate such that it is within[0, 1] if self.clip: out_boxes = np.clip(out_boxes, 0.0, 1.0) # set the variance. - out_var = np.tile(self.variances, - (self.layer_h, self.layer_w, self.num_priors, 1)) + out_var = np.tile( + self.variances, (self.layer_h, self.layer_w, self.num_priors, 1) + ) self.out_boxes = out_boxes.astype('float32') self.out_var = out_var.astype('float32') class TestNPUPriorBoxWithoutMaxSize(TestNPUPriorBox): - def set_max_sizes(self): self.max_sizes = [] class TestNPUPriorBoxWithoutSpecifiedOutOrder(TestNPUPriorBox): - def set_min_max_aspect_ratios_order(self): self.min_max_aspect_ratios_order = False self.atol = 1e-1 diff --git a/python/paddle/fluid/tests/unittests/npu/test_randperm_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_randperm_op_npu.py index ca2d41e763333485da15d42d911aeafcbe483c9f..71d955f1960879571668a60f4c1aba44fd2ecc29 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_randperm_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_randperm_op_npu.py @@ -27,7 +27,7 @@ paddle.enable_static() class TestRandpermOp(OpTest): - """ Test randperm op.""" + """Test randperm op.""" def setUp(self): self.set_npu() @@ -57,36 +57,32 @@ class TestRandpermOp(OpTest): def verify_output(self, outs): out_np = np.array(outs[0]) - self.assertTrue(check_randperm_out(self.n, out_np), - msg=error_msg(out_np)) + self.assertTrue( + check_randperm_out(self.n, out_np), msg=error_msg(out_np) + ) class TestRandpermOpN(TestRandpermOp): - def init_attrs(self): self.n = 10000 class TestRandpermOpInt32(TestRandpermOp): - def init_attrs(self): self.dtype = "int32" class TestRandpermOpFloat32(TestRandpermOp): - def init_attrs(self): self.dtype = "float32" class TestRandpermOpFloat64(TestRandpermOp): - def init_attrs(self): self.dtype = "float64" class TestRandpermOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): self.assertRaises(ValueError, paddle.randperm, -3) @@ -94,7 +90,6 @@ class TestRandpermOpError(unittest.TestCase): class TestRandpermAPI(unittest.TestCase): - def test_out(self): n = 10 place = paddle.NPUPlace(0) @@ -112,15 +107,15 @@ class TestRandpermAPI(unittest.TestCase): class TestRandpermImperative(unittest.TestCase): - def test_out(self): paddle.disable_static(paddle.NPUPlace(0)) n = 10 for dtype in ['int32', np.int64, 'float32', 'float64']: data_p = paddle.randperm(n, dtype) data_np = data_p.numpy() - self.assertTrue(check_randperm_out(n, data_np), - msg=error_msg(data_np)) + self.assertTrue( + check_randperm_out(n, data_np), msg=error_msg(data_np) + ) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/npu/test_range_npu.py b/python/paddle/fluid/tests/unittests/npu/test_range_npu.py index 8a89b57e38a3cbb354f5e7601405715a8a738355..b0f4ad242f77706902b5da2707cf94f6981cfff7 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_range_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_range_npu.py @@ -24,7 +24,6 @@ paddle.enable_static() class TestRangeOp(OpTest): - def set_npu(self): self.__class__.use_npu = True self.place = paddle.NPUPlace(0) @@ -36,13 +35,13 @@ class TestRangeOp(OpTest): self.inputs = { 'Start': np.array([self.case[0]]).astype(self.dtype), 'End': np.array([self.case[1]]).astype(self.dtype), - 'Step': np.array([self.case[2]]).astype(self.dtype) + 'Step': np.array([self.case[2]]).astype(self.dtype), } self.outputs = { - 'Out': - np.arange(self.case[0], self.case[1], - self.case[2]).astype(self.dtype) + 'Out': np.arange(self.case[0], self.case[1], self.case[2]).astype( + self.dtype + ) } def init_config(self): @@ -54,49 +53,42 @@ class TestRangeOp(OpTest): class TestFloatRangeOpCase0(TestRangeOp): - def init_config(self): self.dtype = np.float32 self.case = (0, 5, 1) class TestInt32RangeOpCase0(TestRangeOp): - def init_config(self): self.dtype = np.int32 self.case = (0, 5, 2) class TestInt32RangeOpCase1(TestRangeOp): - def init_config(self): self.dtype = np.int32 self.case = (10, 1, -2) class TestInt32RangeOpCase2(TestRangeOp): - def init_config(self): self.dtype = np.int32 self.case = (-1, -10, -2) class TestInt64RangeOpCase0(TestRangeOp): - def init_config(self): self.dtype = np.int64 self.case = (0, 5, 2) class TestInt64RangeOpCase1(TestRangeOp): - def init_config(self): self.dtype = np.int64 self.case = (10, 1, -2) class TestInt64RangeOpCase2(TestRangeOp): - def init_config(self): self.dtype = np.int64 self.case = (-1, -10, -2) diff --git a/python/paddle/fluid/tests/unittests/npu/test_reciprocal_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_reciprocal_op_npu.py index b67cc43efcfa1c620add3d51ed7ab7727984b86a..2344b22cdea3fa6c48040770e8d1ceb37aa14f77 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_reciprocal_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_reciprocal_op_npu.py @@ -24,7 +24,6 @@ paddle.enable_static() class TestNPUReciprocal(OpTest): - def setUp(self): self.op_type = "reciprocal" self.set_npu() @@ -41,9 +40,9 @@ class TestNPUReciprocal(OpTest): self.check_output_with_place(self.place) def test_check_grad(self): - self.check_grad_with_place(self.place, ['X'], - 'Out', - max_relative_error=0.01) + self.check_grad_with_place( + self.place, ['X'], 'Out', max_relative_error=0.01 + ) def set_npu(self): self.__class__.use_npu = True @@ -54,7 +53,6 @@ class TestNPUReciprocal(OpTest): class TestNPUReciprocalFp64(TestNPUReciprocal): - def set_npu(self): self.__class__.use_npu = True self.place = paddle.NPUPlace(0) @@ -64,9 +62,9 @@ class TestNPUReciprocalFp64(TestNPUReciprocal): @skip_check_grad_ci( - reason="The backward test is not supported for float16 type on NPU.") + reason="The backward test is not supported for float16 type on NPU." +) class TestNPUReciprocalFp16(TestNPUReciprocal): - def set_npu(self): self.__class__.use_npu = True self.place = paddle.NPUPlace(0) diff --git a/python/paddle/fluid/tests/unittests/npu/test_reduce_any_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_reduce_any_op_npu.py index 9ab2e58228c19667c9e48cb945b3ef6a9a27a3bc..ece002e47992a57de8255d183e5b752528a0e4ba 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_reduce_any_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_reduce_any_op_npu.py @@ -28,14 +28,14 @@ paddle.enable_static() class TestAny8DOp(OpTest): - def setUp(self): self.set_npu() self.op_type = "reduce_any" self.place = paddle.NPUPlace(0) self.inputs = { - 'X': np.random.randint(0, 2, - (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool") + 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( + "bool" + ) } self.attrs = {'dim': (3, 5, 4)} self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])} @@ -48,7 +48,6 @@ class TestAny8DOp(OpTest): class TestAnyOpWithDim(OpTest): - def setUp(self): self.set_npu() self.op_type = "reduce_any" @@ -65,14 +64,14 @@ class TestAnyOpWithDim(OpTest): class TestAny8DOpWithDim(OpTest): - def setUp(self): self.set_npu() self.op_type = "reduce_any" self.place = paddle.NPUPlace(0) self.inputs = { - 'X': np.random.randint(0, 2, - (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool") + 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( + "bool" + ) } self.attrs = {'dim': (3, 6)} self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])} @@ -85,7 +84,6 @@ class TestAny8DOpWithDim(OpTest): class TestAnyOpWithKeepDim(OpTest): - def setUp(self): self.set_npu() self.op_type = "reduce_any" @@ -93,8 +91,9 @@ class TestAnyOpWithKeepDim(OpTest): self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} self.attrs = {'dim': (1), 'keep_dim': True} self.outputs = { - 'Out': - np.expand_dims(self.inputs['X'].any(axis=self.attrs['dim']), axis=1) + 'Out': np.expand_dims( + self.inputs['X'].any(axis=self.attrs['dim']), axis=1 + ) } def set_npu(self): @@ -105,19 +104,20 @@ class TestAnyOpWithKeepDim(OpTest): class TestAny8DOpWithKeepDim(OpTest): - def setUp(self): self.set_npu() self.op_type = "reduce_any" self.place = paddle.NPUPlace(0) self.inputs = { - 'X': np.random.randint(0, 2, - (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool") + 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( + "bool" + ) } self.attrs = {'dim': (1), 'keep_dim': True} self.outputs = { - 'Out': - np.expand_dims(self.inputs['X'].any(axis=self.attrs['dim']), axis=1) + 'Out': np.expand_dims( + self.inputs['X'].any(axis=self.attrs['dim']), axis=1 + ) } def set_npu(self): diff --git a/python/paddle/fluid/tests/unittests/npu/test_reduce_max_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_reduce_max_op_npu.py index 22ababf99b066a607d0ca6a0fa702bf15fa4b7f3..8e0cdd711b4f23c59aea6e77e35f74bdbc6d7cf7 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_reduce_max_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_reduce_max_op_npu.py @@ -26,7 +26,8 @@ paddle.enable_static() @skip_check_grad_ci( reason="reduce_max is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestNPUReduceMaxOp(OpTest): """Remove Max with subgradient from gradient check to confirm the success of CI.""" @@ -54,7 +55,8 @@ class TestNPUReduceMaxOp(OpTest): @skip_check_grad_ci( reason="reduce_max is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMaxOpMultiAxises(TestNPUReduceMaxOp): """Remove Max with subgradient from gradient check to confirm the success of CI.""" @@ -72,7 +74,8 @@ class TestReduceMaxOpMultiAxises(TestNPUReduceMaxOp): @skip_check_grad_ci( reason="reduce_max is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceAll(TestNPUReduceMaxOp): """Remove Max with subgradient from gradient check to confirm the success of CI.""" @@ -88,7 +91,8 @@ class TestReduceAll(TestNPUReduceMaxOp): @skip_check_grad_ci( reason="reduce_max is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMaxOpWithOutDtype_bool(TestNPUReduceMaxOp): """Remove Max with subgradient from gradient check to confirm the success of CI.""" @@ -100,17 +104,19 @@ class TestReduceMaxOpWithOutDtype_bool(TestNPUReduceMaxOp): self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = { 'dim': [-2, -1], - 'out_dtype': int(core.VarDesc.VarType.BOOL) + 'out_dtype': int(core.VarDesc.VarType.BOOL), } self.outputs = { - 'Out': - self.inputs['X'].max(axis=tuple(self.attrs['dim'])).astype(np.bool_) + 'Out': self.inputs['X'] + .max(axis=tuple(self.attrs['dim'])) + .astype(np.bool_) } @skip_check_grad_ci( reason="reduce_max is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMaxOpWithOutDtype_int16(TestNPUReduceMaxOp): """Remove Max with subgradient from gradient check to confirm the success of CI.""" @@ -122,18 +128,20 @@ class TestReduceMaxOpWithOutDtype_int16(TestNPUReduceMaxOp): self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = { 'dim': [-2, -1], - 'out_dtype': int(core.VarDesc.VarType.INT16) + 'out_dtype': int(core.VarDesc.VarType.INT16), } self.outputs = { - 'Out': - self.inputs['X'].max(axis=tuple(self.attrs['dim'])).astype(np.int16) + 'Out': self.inputs['X'] + .max(axis=tuple(self.attrs['dim'])) + .astype(np.int16) } @skip_check_grad_ci( reason="reduce_max is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMaxOpWithOutDtype_int32(TestNPUReduceMaxOp): """Remove Max with subgradient from gradient check to confirm the success of CI.""" @@ -145,17 +153,19 @@ class TestReduceMaxOpWithOutDtype_int32(TestNPUReduceMaxOp): self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = { 'dim': [-2, -1], - 'out_dtype': int(core.VarDesc.VarType.INT32) + 'out_dtype': int(core.VarDesc.VarType.INT32), } self.outputs = { - 'Out': - self.inputs['X'].max(axis=tuple(self.attrs['dim'])).astype(np.int32) + 'Out': self.inputs['X'] + .max(axis=tuple(self.attrs['dim'])) + .astype(np.int32) } @skip_check_grad_ci( reason="reduce_max is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMaxOpWithOutDtype_int64(TestNPUReduceMaxOp): """Remove Max with subgradient from gradient check to confirm the success of CI.""" @@ -167,17 +177,19 @@ class TestReduceMaxOpWithOutDtype_int64(TestNPUReduceMaxOp): self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = { 'dim': [-2, -1], - 'out_dtype': int(core.VarDesc.VarType.INT64) + 'out_dtype': int(core.VarDesc.VarType.INT64), } self.outputs = { - 'Out': - self.inputs['X'].max(axis=tuple(self.attrs['dim'])).astype(np.int64) + 'Out': self.inputs['X'] + .max(axis=tuple(self.attrs['dim'])) + .astype(np.int64) } @skip_check_grad_ci( reason="reduce_max is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMaxOpWithOutDtype_fp16(TestNPUReduceMaxOp): """Remove Max with subgradient from gradient check to confirm the success of CI.""" @@ -189,12 +201,12 @@ class TestReduceMaxOpWithOutDtype_fp16(TestNPUReduceMaxOp): self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = { 'dim': [-2, -1], - 'out_dtype': int(core.VarDesc.VarType.FP16) + 'out_dtype': int(core.VarDesc.VarType.FP16), } self.outputs = { - 'Out': - self.inputs['X'].max(axis=tuple(self.attrs['dim'])).astype( - np.float16) + 'Out': self.inputs['X'] + .max(axis=tuple(self.attrs['dim'])) + .astype(np.float16) } def test_check_output(self): @@ -203,7 +215,8 @@ class TestReduceMaxOpWithOutDtype_fp16(TestNPUReduceMaxOp): @skip_check_grad_ci( reason="reduce_max is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMaxOpWithOutDtype_fp32(TestNPUReduceMaxOp): """Remove Max with subgradient from gradient check to confirm the success of CI.""" @@ -215,18 +228,19 @@ class TestReduceMaxOpWithOutDtype_fp32(TestNPUReduceMaxOp): self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = { 'dim': [-2, -1], - 'out_dtype': int(core.VarDesc.VarType.FP32) + 'out_dtype': int(core.VarDesc.VarType.FP32), } self.outputs = { - 'Out': - self.inputs['X'].max(axis=tuple(self.attrs['dim'])).astype( - np.float32) + 'Out': self.inputs['X'] + .max(axis=tuple(self.attrs['dim'])) + .astype(np.float32) } @skip_check_grad_ci( reason="reduce_max is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMaxOpWithOutDtype_fp64(TestNPUReduceMaxOp): """Remove Max with subgradient from gradient check to confirm the success of CI.""" @@ -238,18 +252,19 @@ class TestReduceMaxOpWithOutDtype_fp64(TestNPUReduceMaxOp): self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = { 'dim': [-2, -1], - 'out_dtype': int(core.VarDesc.VarType.FP64) + 'out_dtype': int(core.VarDesc.VarType.FP64), } self.outputs = { - 'Out': - self.inputs['X'].max(axis=tuple(self.attrs['dim'])).astype( - np.float64) + 'Out': self.inputs['X'] + .max(axis=tuple(self.attrs['dim'])) + .astype(np.float64) } @skip_check_grad_ci( reason="reduce_max is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMaxOpWithOutDtype_fp32_2(TestNPUReduceMaxOp): """Remove Max with subgradient from gradient check to confirm the success of CI.""" @@ -261,12 +276,12 @@ class TestReduceMaxOpWithOutDtype_fp32_2(TestNPUReduceMaxOp): self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = { 'dim': [-2, -1], - 'out_dtype': int(core.VarDesc.VarType.FP32) + 'out_dtype': int(core.VarDesc.VarType.FP32), } self.outputs = { - 'Out': - self.inputs['X'].max(axis=tuple(self.attrs['dim'])).astype( - np.float32) + 'Out': self.inputs['X'] + .max(axis=tuple(self.attrs['dim'])) + .astype(np.float32) } def init_dtype(self): @@ -275,7 +290,8 @@ class TestReduceMaxOpWithOutDtype_fp32_2(TestNPUReduceMaxOp): @skip_check_grad_ci( reason="reduce_max is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMaxOpInt64(TestNPUReduceMaxOp): """Remove Max with subgradient from gradient check to confirm the success of CI.""" @@ -287,12 +303,12 @@ class TestReduceMaxOpInt64(TestNPUReduceMaxOp): self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = { 'dim': [-2, -1], - 'out_dtype': int(core.VarDesc.VarType.INT64) + 'out_dtype': int(core.VarDesc.VarType.INT64), } self.outputs = { - 'Out': - self.inputs['X'].max(axis=tuple(self.attrs['dim'])).astype( - np.float32) + 'Out': self.inputs['X'] + .max(axis=tuple(self.attrs['dim'])) + .astype(np.float32) } def init_dtype(self): diff --git a/python/paddle/fluid/tests/unittests/npu/test_reduce_mean_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_reduce_mean_op_npu.py index 232d53d49395adf99d91e6306e07419c03e3a566..fe56cb152c6e2019db2db8e1f0f758e80e1fb9bd 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_reduce_mean_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_reduce_mean_op_npu.py @@ -25,7 +25,6 @@ paddle.enable_static() class TestMeanOp(OpTest): - def set_npu(self): self.__class__.use_npu = True @@ -43,7 +42,6 @@ class TestMeanOp(OpTest): class TestMeanOp5D(TestMeanOp): - def setUp(self): self.set_npu() self.op_type = "reduce_mean" @@ -54,7 +52,6 @@ class TestMeanOp5D(TestMeanOp): class TestMeanOp6D(TestMeanOp): - def setUp(self): self.set_npu() self.op_type = "reduce_mean" @@ -65,7 +62,6 @@ class TestMeanOp6D(TestMeanOp): class TestMeanOp8D(TestMeanOp): - def setUp(self): self.set_npu() self.op_type = "reduce_mean" @@ -77,7 +73,6 @@ class TestMeanOp8D(TestMeanOp): class Test1DReduce(TestMeanOp): - def setUp(self): self.set_npu() self.op_type = "reduce_mean" @@ -86,7 +81,6 @@ class Test1DReduce(TestMeanOp): class Test2DReduce0(Test1DReduce): - def setUp(self): self.set_npu() self.op_type = "reduce_mean" @@ -96,7 +90,6 @@ class Test2DReduce0(Test1DReduce): class Test2DReduce1(Test1DReduce): - def setUp(self): self.set_npu() self.op_type = "reduce_mean" @@ -108,7 +101,6 @@ class Test2DReduce1(Test1DReduce): class Test3DReduce0(Test1DReduce): - def setUp(self): self.set_npu() self.op_type = "reduce_mean" @@ -120,7 +112,6 @@ class Test3DReduce0(Test1DReduce): class Test3DReduce1(Test1DReduce): - def setUp(self): self.set_npu() self.op_type = "reduce_mean" @@ -132,7 +123,6 @@ class Test3DReduce1(Test1DReduce): class Test3DReduce2(Test1DReduce): - def setUp(self): self.set_npu() self.op_type = "reduce_mean" @@ -144,7 +134,6 @@ class Test3DReduce2(Test1DReduce): class Test3DReduce3(Test1DReduce): - def setUp(self): self.set_npu() self.op_type = "reduce_mean" @@ -156,21 +145,19 @@ class Test3DReduce3(Test1DReduce): class TestKeepDimReduce(Test1DReduce): - def setUp(self): self.set_npu() self.op_type = "reduce_mean" self.inputs = {'X': np.random.random((5, 6, 10)).astype("float32")} self.attrs = {'dim': [1], 'keep_dim': True} self.outputs = { - 'Out': - self.inputs['X'].mean(axis=tuple(self.attrs['dim']), - keepdims=self.attrs['keep_dim']) + 'Out': self.inputs['X'].mean( + axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim'] + ) } class TestKeepDim8DReduce(Test1DReduce): - def setUp(self): self.set_npu() self.op_type = "reduce_mean" @@ -179,14 +166,13 @@ class TestKeepDim8DReduce(Test1DReduce): } self.attrs = {'dim': (3, 4, 5), 'keep_dim': True} self.outputs = { - 'Out': - self.inputs['X'].mean(axis=tuple(self.attrs['dim']), - keepdims=self.attrs['keep_dim']) + 'Out': self.inputs['X'].mean( + axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim'] + ) } class TestReduceAll(Test1DReduce): - def setUp(self): self.set_npu() self.op_type = "reduce_mean" diff --git a/python/paddle/fluid/tests/unittests/npu/test_reduce_min_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_reduce_min_op_npu.py index da9d8a1893b940520d50004b25b577504a95525a..2ced99b6020f3e8bcd67241e68a8409d62a15e72 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_reduce_min_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_reduce_min_op_npu.py @@ -26,7 +26,8 @@ paddle.enable_static() @skip_check_grad_ci( reason="reduce_min is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestNPUReduceMinOp(OpTest): """Remove Min with subgradient from gradient check to confirm the success of CI.""" @@ -54,7 +55,8 @@ class TestNPUReduceMinOp(OpTest): @skip_check_grad_ci( reason="reduce_min is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMinOpMultiAxises(TestNPUReduceMinOp): """Remove Min with subgradient from gradient check to confirm the success of CI.""" @@ -72,7 +74,8 @@ class TestReduceMinOpMultiAxises(TestNPUReduceMinOp): @skip_check_grad_ci( reason="reduce_min is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceAll(TestNPUReduceMinOp): """Remove Min with subgradient from gradient check to confirm the success of CI.""" @@ -88,7 +91,8 @@ class TestReduceAll(TestNPUReduceMinOp): @skip_check_grad_ci( reason="reduce_min is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMinOpWithOutDtype_bool(TestNPUReduceMinOp): """Remove Min with subgradient from gradient check to confirm the success of CI.""" @@ -100,17 +104,19 @@ class TestReduceMinOpWithOutDtype_bool(TestNPUReduceMinOp): self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = { 'dim': [-2, -1], - 'out_dtype': int(core.VarDesc.VarType.BOOL) + 'out_dtype': int(core.VarDesc.VarType.BOOL), } self.outputs = { - 'Out': - self.inputs['X'].min(axis=tuple(self.attrs['dim'])).astype(np.bool_) + 'Out': self.inputs['X'] + .min(axis=tuple(self.attrs['dim'])) + .astype(np.bool_) } @skip_check_grad_ci( reason="reduce_min is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMinOpWithOutDtype_int16(TestNPUReduceMinOp): """Remove Min with subgradient from gradient check to confirm the success of CI.""" @@ -122,18 +128,20 @@ class TestReduceMinOpWithOutDtype_int16(TestNPUReduceMinOp): self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = { 'dim': [-2, -1], - 'out_dtype': int(core.VarDesc.VarType.INT16) + 'out_dtype': int(core.VarDesc.VarType.INT16), } self.outputs = { - 'Out': - self.inputs['X'].min(axis=tuple(self.attrs['dim'])).astype(np.int16) + 'Out': self.inputs['X'] + .min(axis=tuple(self.attrs['dim'])) + .astype(np.int16) } @skip_check_grad_ci( reason="reduce_min is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMinOpWithOutDtype_int32(TestNPUReduceMinOp): """Remove Min with subgradient from gradient check to confirm the success of CI.""" @@ -145,17 +153,19 @@ class TestReduceMinOpWithOutDtype_int32(TestNPUReduceMinOp): self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = { 'dim': [-2, -1], - 'out_dtype': int(core.VarDesc.VarType.INT32) + 'out_dtype': int(core.VarDesc.VarType.INT32), } self.outputs = { - 'Out': - self.inputs['X'].min(axis=tuple(self.attrs['dim'])).astype(np.int32) + 'Out': self.inputs['X'] + .min(axis=tuple(self.attrs['dim'])) + .astype(np.int32) } @skip_check_grad_ci( reason="reduce_min is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMinOpWithOutDtype_int64(TestNPUReduceMinOp): """Remove Min with subgradient from gradient check to confirm the success of CI.""" @@ -167,17 +177,19 @@ class TestReduceMinOpWithOutDtype_int64(TestNPUReduceMinOp): self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = { 'dim': [-2, -1], - 'out_dtype': int(core.VarDesc.VarType.INT64) + 'out_dtype': int(core.VarDesc.VarType.INT64), } self.outputs = { - 'Out': - self.inputs['X'].min(axis=tuple(self.attrs['dim'])).astype(np.int64) + 'Out': self.inputs['X'] + .min(axis=tuple(self.attrs['dim'])) + .astype(np.int64) } @skip_check_grad_ci( reason="reduce_min is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMinOpWithOutDtype_fp16(TestNPUReduceMinOp): """Remove Min with subgradient from gradient check to confirm the success of CI.""" @@ -189,12 +201,12 @@ class TestReduceMinOpWithOutDtype_fp16(TestNPUReduceMinOp): self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = { 'dim': [-2, -1], - 'out_dtype': int(core.VarDesc.VarType.FP16) + 'out_dtype': int(core.VarDesc.VarType.FP16), } self.outputs = { - 'Out': - self.inputs['X'].min(axis=tuple(self.attrs['dim'])).astype( - np.float16) + 'Out': self.inputs['X'] + .min(axis=tuple(self.attrs['dim'])) + .astype(np.float16) } def test_check_output(self): @@ -203,7 +215,8 @@ class TestReduceMinOpWithOutDtype_fp16(TestNPUReduceMinOp): @skip_check_grad_ci( reason="reduce_min is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMinOpWithOutDtype_fp32(TestNPUReduceMinOp): """Remove Min with subgradient from gradient check to confirm the success of CI.""" @@ -215,18 +228,19 @@ class TestReduceMinOpWithOutDtype_fp32(TestNPUReduceMinOp): self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = { 'dim': [-2, -1], - 'out_dtype': int(core.VarDesc.VarType.FP32) + 'out_dtype': int(core.VarDesc.VarType.FP32), } self.outputs = { - 'Out': - self.inputs['X'].min(axis=tuple(self.attrs['dim'])).astype( - np.float32) + 'Out': self.inputs['X'] + .min(axis=tuple(self.attrs['dim'])) + .astype(np.float32) } @skip_check_grad_ci( reason="reduce_min is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMinOpWithOutDtype_fp64(TestNPUReduceMinOp): """Remove Min with subgradient from gradient check to confirm the success of CI.""" @@ -238,18 +252,19 @@ class TestReduceMinOpWithOutDtype_fp64(TestNPUReduceMinOp): self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = { 'dim': [-2, -1], - 'out_dtype': int(core.VarDesc.VarType.FP64) + 'out_dtype': int(core.VarDesc.VarType.FP64), } self.outputs = { - 'Out': - self.inputs['X'].min(axis=tuple(self.attrs['dim'])).astype( - np.float64) + 'Out': self.inputs['X'] + .min(axis=tuple(self.attrs['dim'])) + .astype(np.float64) } @skip_check_grad_ci( reason="reduce_min is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMinOpWithOutDtype_fp32_2(TestNPUReduceMinOp): """Remove Min with subgradient from gradient check to confirm the success of CI.""" @@ -261,12 +276,12 @@ class TestReduceMinOpWithOutDtype_fp32_2(TestNPUReduceMinOp): self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = { 'dim': [-2, -1], - 'out_dtype': int(core.VarDesc.VarType.FP32) + 'out_dtype': int(core.VarDesc.VarType.FP32), } self.outputs = { - 'Out': - self.inputs['X'].min(axis=tuple(self.attrs['dim'])).astype( - np.float32) + 'Out': self.inputs['X'] + .min(axis=tuple(self.attrs['dim'])) + .astype(np.float32) } def init_dtype(self): @@ -275,7 +290,8 @@ class TestReduceMinOpWithOutDtype_fp32_2(TestNPUReduceMinOp): @skip_check_grad_ci( reason="reduce_min is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMinOpInt64(TestNPUReduceMinOp): """Remove Min with subgradient from gradient check to confirm the success of CI.""" @@ -287,12 +303,12 @@ class TestReduceMinOpInt64(TestNPUReduceMinOp): self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = { 'dim': [-2, -1], - 'out_dtype': int(core.VarDesc.VarType.INT64) + 'out_dtype': int(core.VarDesc.VarType.INT64), } self.outputs = { - 'Out': - self.inputs['X'].min(axis=tuple(self.attrs['dim'])).astype( - np.float32) + 'Out': self.inputs['X'] + .min(axis=tuple(self.attrs['dim'])) + .astype(np.float32) } def init_dtype(self): diff --git a/python/paddle/fluid/tests/unittests/npu/test_reduce_prod_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_reduce_prod_op_npu.py index e8f4430d655f401a61b3b6006453d32d86ce30cc..956a2258224c39c449005309a2df22e9b8664b09 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_reduce_prod_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_reduce_prod_op_npu.py @@ -25,7 +25,6 @@ paddle.enable_static() class TestNPUReduceProd(OpTest): - def setUp(self): self.op_type = "reduce_prod" self.set_npu() @@ -49,7 +48,6 @@ class TestNPUReduceProd(OpTest): class TestNPUReduceProd2(TestNPUReduceProd): - def setUp(self): self.op_type = "reduce_prod" self.set_npu() @@ -61,7 +59,6 @@ class TestNPUReduceProd2(TestNPUReduceProd): class TestNPUReduceProd3(TestNPUReduceProd): - def setUp(self): self.op_type = "reduce_prod" self.set_npu() @@ -73,7 +70,6 @@ class TestNPUReduceProd3(TestNPUReduceProd): class TestNPUReduceProd6D(TestNPUReduceProd): - def setUp(self): self.op_type = "reduce_prod" self.set_npu() @@ -89,7 +85,6 @@ class TestNPUReduceProd6D(TestNPUReduceProd): class TestNPUReduceProd8D(TestNPUReduceProd): - def setUp(self): self.op_type = "reduce_prod" self.set_npu() @@ -105,7 +100,6 @@ class TestNPUReduceProd8D(TestNPUReduceProd): class TestReduceAll(TestNPUReduceProd): - def setUp(self): self.op_type = "reduce_prod" self.set_npu() @@ -117,7 +111,6 @@ class TestReduceAll(TestNPUReduceProd): class TestNPUReduceProdWithOutDtype_bool(TestNPUReduceProd): - def setUp(self): self.op_type = "reduce_prod" self.set_npu() @@ -126,14 +119,13 @@ class TestNPUReduceProdWithOutDtype_bool(TestNPUReduceProd): self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = {'dim': [0], 'out_dtype': int(core.VarDesc.VarType.BOOL)} self.outputs = { - 'Out': - self.inputs['X'].prod(axis=tuple(self.attrs['dim'])).astype( - np.bool_) + 'Out': self.inputs['X'] + .prod(axis=tuple(self.attrs['dim'])) + .astype(np.bool_) } class TestNPUReduceProdWithOutDtype_int16(TestNPUReduceProd): - def setUp(self): self.op_type = "reduce_prod" self.set_npu() @@ -142,14 +134,13 @@ class TestNPUReduceProdWithOutDtype_int16(TestNPUReduceProd): self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = {'dim': [0], 'out_dtype': int(core.VarDesc.VarType.INT16)} self.outputs = { - 'Out': - self.inputs['X'].prod(axis=tuple(self.attrs['dim'])).astype( - np.int16) + 'Out': self.inputs['X'] + .prod(axis=tuple(self.attrs['dim'])) + .astype(np.int16) } class TestNPUReduceProdWithOutDtype_int32(TestNPUReduceProd): - def setUp(self): self.op_type = "reduce_prod" self.set_npu() @@ -158,14 +149,13 @@ class TestNPUReduceProdWithOutDtype_int32(TestNPUReduceProd): self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = {'dim': [0], 'out_dtype': int(core.VarDesc.VarType.INT32)} self.outputs = { - 'Out': - self.inputs['X'].prod(axis=tuple(self.attrs['dim'])).astype( - np.int32) + 'Out': self.inputs['X'] + .prod(axis=tuple(self.attrs['dim'])) + .astype(np.int32) } class TestNPUReduceProdWithOutDtype_int64(TestNPUReduceProd): - def setUp(self): self.op_type = "reduce_prod" self.set_npu() @@ -174,14 +164,13 @@ class TestNPUReduceProdWithOutDtype_int64(TestNPUReduceProd): self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = {'dim': [0], 'out_dtype': int(core.VarDesc.VarType.INT64)} self.outputs = { - 'Out': - self.inputs['X'].prod(axis=tuple(self.attrs['dim'])).astype( - np.int64) + 'Out': self.inputs['X'] + .prod(axis=tuple(self.attrs['dim'])) + .astype(np.int64) } class TestNPUReduceProdWithOutDtype_fp16(TestNPUReduceProd): - def setUp(self): self.op_type = "reduce_prod" self.set_npu() @@ -190,9 +179,9 @@ class TestNPUReduceProdWithOutDtype_fp16(TestNPUReduceProd): self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = {'dim': [0], 'out_dtype': int(core.VarDesc.VarType.FP16)} self.outputs = { - 'Out': - self.inputs['X'].prod(axis=tuple(self.attrs['dim'])).astype( - np.float16) + 'Out': self.inputs['X'] + .prod(axis=tuple(self.attrs['dim'])) + .astype(np.float16) } def test_check_output(self): @@ -200,7 +189,6 @@ class TestNPUReduceProdWithOutDtype_fp16(TestNPUReduceProd): class TestNPUReduceProdWithOutDtype_fp32(TestNPUReduceProd): - def setUp(self): self.op_type = "reduce_prod" self.set_npu() @@ -209,14 +197,13 @@ class TestNPUReduceProdWithOutDtype_fp32(TestNPUReduceProd): self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = {'dim': [0], 'out_dtype': int(core.VarDesc.VarType.FP32)} self.outputs = { - 'Out': - self.inputs['X'].prod(axis=tuple(self.attrs['dim'])).astype( - np.float32) + 'Out': self.inputs['X'] + .prod(axis=tuple(self.attrs['dim'])) + .astype(np.float32) } class TestNPUReduceProdWithOutDtype_fp64(TestNPUReduceProd): - def setUp(self): self.op_type = "reduce_prod" self.set_npu() @@ -225,15 +212,14 @@ class TestNPUReduceProdWithOutDtype_fp64(TestNPUReduceProd): self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = {'dim': [0], 'out_dtype': int(core.VarDesc.VarType.FP64)} self.outputs = { - 'Out': - self.inputs['X'].prod(axis=tuple(self.attrs['dim'])).astype( - np.float64) + 'Out': self.inputs['X'] + .prod(axis=tuple(self.attrs['dim'])) + .astype(np.float64) } @skip_check_grad_ci(reason="right now not implement grad op") class TestNPUReduceProdWithOutDtype_fp32_2(TestNPUReduceProd): - def setUp(self): self.op_type = "reduce_prod" self.set_npu() @@ -242,9 +228,9 @@ class TestNPUReduceProdWithOutDtype_fp32_2(TestNPUReduceProd): self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = {'dim': [0], 'out_dtype': int(core.VarDesc.VarType.FP32)} self.outputs = { - 'Out': - self.inputs['X'].prod(axis=tuple(self.attrs['dim'])).astype( - np.float32) + 'Out': self.inputs['X'] + .prod(axis=tuple(self.attrs['dim'])) + .astype(np.float32) } def init_dtype(self): diff --git a/python/paddle/fluid/tests/unittests/npu/test_reduce_sum_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_reduce_sum_op_npu.py index 498cc2d97ace0dbe770c40138118e3c75b3b2014..d5cd5febfb9fe21dbf0cfefdb1000cdab7b88d60 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_reduce_sum_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_reduce_sum_op_npu.py @@ -26,7 +26,6 @@ SEED = 2021 class TestReduceSum(OpTest): - def setUp(self): np.random.seed(SEED) self.set_npu() @@ -39,16 +38,16 @@ class TestReduceSum(OpTest): self.attrs = { 'dim': self.axis, 'keep_dim': self.keep_dim, - 'reduce_all': self.reduce_all + 'reduce_all': self.reduce_all, } self.inputs = {'X': np.random.random(self.shape).astype(self.dtype)} if self.attrs['reduce_all']: self.outputs = {'Out': self.inputs['X'].sum()} else: self.outputs = { - 'Out': - self.inputs['X'].sum(axis=self.axis, - keepdims=self.attrs['keep_dim']) + 'Out': self.inputs['X'].sum( + axis=self.axis, keepdims=self.attrs['keep_dim'] + ) } def set_npu(self): @@ -65,7 +64,7 @@ class TestReduceSum(OpTest): def initTestCase(self): self.shape = (5, 6) - self.axis = (0) + self.axis = 0 def test_check_output(self): self.check_output_with_place(self.place) @@ -79,13 +78,11 @@ class TestReduceSum(OpTest): class TestReduceSum2(OpTest): - def init_dtype(self): self.dtype = np.int32 class TestReduceSumNet(unittest.TestCase): - def set_reduce_sum_function(self, x): # keep_dim = False return paddle.fluid.layers.reduce_sum(x, dim=-1) @@ -104,9 +101,9 @@ class TestReduceSumNet(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=[2, 3, 4], dtype='float32') b = paddle.static.data(name="b", shape=[2, 3, 4], dtype='float32') - label = paddle.static.data(name="label", - shape=[2, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[2, 1], dtype='int64' + ) a_1 = fluid.layers.fc(input=a, size=4, num_flatten_dims=2, act=None) b_1 = fluid.layers.fc(input=b, size=4, num_flatten_dims=2, act=None) @@ -131,16 +128,17 @@ class TestReduceSumNet(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "b": b_np, "label": label_np}, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res @@ -153,14 +151,12 @@ class TestReduceSumNet(unittest.TestCase): class TestReduceSumNet2(TestReduceSumNet): - def set_reduce_sum_function(self, x): # keep_dim = True return paddle.fluid.layers.reduce_sum(x, dim=-1, keep_dim=True) class TestReduceSumNet3(TestReduceSumNet): - def _test(self, run_npu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -191,12 +187,9 @@ class TestReduceSumNet3(TestReduceSumNet): print("Start run on {}".format(place)) for epoch in range(100): - loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np - }, - fetch_list=[loss]) + loss_res = exe.run( + main_prog, feed={"a": a_np, "b": b_np}, fetch_list=[loss] + ) if epoch % 10 == 0: print("Epoch {} | Loss: {}".format(epoch, loss_res)) diff --git a/python/paddle/fluid/tests/unittests/npu/test_relu6_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_relu6_op_npu.py index 0fe796a0b0da0e773cf703a8a4085b86b9dd9d46..3988b4a6a939589a6eaf9c4026c9b36f6042a1f0 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_relu6_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_relu6_op_npu.py @@ -34,7 +34,6 @@ def ref_relu6(x, threshold=6.0): class TestRelu6(OpTest): - def setUp(self): self.set_npu() self.op_type = "relu6" @@ -64,7 +63,6 @@ class TestRelu6(OpTest): class TestRelu6Float16(TestRelu6): - def set_npu(self): self.__class__.use_npu = True self.__class__.no_need_check_grad = True @@ -77,7 +75,6 @@ class TestRelu6Float16(TestRelu6): class TestReluNeg(TestRelu6): - def setUp(self): self.set_npu() self.op_type = "relu6" @@ -104,7 +101,6 @@ class TestReluNeg(TestRelu6): class TestRelu6Net(unittest.TestCase): - def _test(self, run_npu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -119,9 +115,9 @@ class TestRelu6Net(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=[32, 32], dtype='float32') b = paddle.static.data(name="b", shape=[32, 32], dtype='float32') - label = paddle.static.data(name="label", - shape=[32, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[32, 1], dtype='int64' + ) sum = paddle.add(a, b) z = paddle.nn.functional.relu6(sum) @@ -145,16 +141,17 @@ class TestRelu6Net(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "b": b_np, "label": label_np}, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res diff --git a/python/paddle/fluid/tests/unittests/npu/test_relu_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_relu_op_npu.py index 98c46329350267d67c70eba34a0c15b05b954cf5..a52e4d39cfd77b4d82a9cee766bd968209f87bbd 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_relu_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_relu_op_npu.py @@ -26,7 +26,6 @@ SEED = 2021 class TestRelu(OpTest): - def setUp(self): self.set_npu() self.op_type = "relu" @@ -53,21 +52,19 @@ class TestRelu(OpTest): def test_check_grad(self): if self.dtype == np.float16: - self.check_grad_with_place(self.place, ['X'], - 'Out', - max_relative_error=0.006) + self.check_grad_with_place( + self.place, ['X'], 'Out', max_relative_error=0.006 + ) else: self.check_grad_with_place(self.place, ['X'], 'Out') class TestReluFp16(TestRelu): - def init_dtype(self): self.dtype = np.float16 class TestReluNeg(OpTest): - def setUp(self): self.set_npu() self.op_type = "relu" @@ -97,7 +94,6 @@ class TestReluNeg(OpTest): class TestReluNet(unittest.TestCase): - def _test(self, run_npu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -112,9 +108,9 @@ class TestReluNet(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=[32, 32], dtype='float32') b = paddle.static.data(name="b", shape=[32, 32], dtype='float32') - label = paddle.static.data(name="label", - shape=[32, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[32, 1], dtype='int64' + ) sum = paddle.add(a, b) z = paddle.nn.functional.relu(sum) @@ -138,16 +134,17 @@ class TestReluNet(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "b": b_np, "label": label_np}, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res diff --git a/python/paddle/fluid/tests/unittests/npu/test_reshape2_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_reshape2_op_npu.py index 10ce920fb903f9a30f440dbfda1a8d50bf22d5e1..74175ca6bbc513d75d2b144d0f163e86a9ea8d5e 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_reshape2_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_reshape2_op_npu.py @@ -26,7 +26,6 @@ SEED = 2021 class TestReshape2(OpTest): - def setUp(self): self.set_npu() self.op_type = "reshape2" @@ -37,7 +36,7 @@ class TestReshape2(OpTest): self.attrs = {"shape": self.new_shape} self.outputs = { "Out": self.inputs["X"].reshape(self.infered_shape), - 'XShape': np.random.random(self.ori_shape).astype("float32") + 'XShape': np.random.random(self.ori_shape).astype("float32"), } def set_npu(self): @@ -56,7 +55,6 @@ class TestReshape2(OpTest): class TestReshape2_case2(TestReshape2): - def init_data(self): self.ori_shape = (2, 100) self.new_shape = (-1, 10) @@ -64,7 +62,6 @@ class TestReshape2_case2(TestReshape2): class TestReshape2_case3(TestReshape2): - def init_data(self): self.ori_shape = (100, 5, 6) self.new_shape = (-1, 0, 3) diff --git a/python/paddle/fluid/tests/unittests/npu/test_rmsprop_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_rmsprop_op_npu.py index ed872aa0dea7f078d2a410ee66c55b0b8994d463..9274d8daa1d4237a52251f5fb65cb22e0ab4bc61 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_rmsprop_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_rmsprop_op_npu.py @@ -28,7 +28,6 @@ SEED = 2021 class TestNet(unittest.TestCase): - def _test(self, run_npu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -43,9 +42,9 @@ class TestNet(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=[32, 32], dtype='float32') b = paddle.static.data(name="b", shape=[32, 32], dtype='float32') - label = paddle.static.data(name="label", - shape=[32, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[32, 1], dtype='int64' + ) sum = paddle.add(a, b) z = paddle.pow(sum, 2.0) @@ -69,16 +68,17 @@ class TestNet(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "b": b_np, "label": label_np}, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res @@ -91,7 +91,6 @@ class TestNet(unittest.TestCase): class TestCenteredNet(unittest.TestCase): - def _test(self, run_npu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -106,9 +105,9 @@ class TestCenteredNet(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=[32, 32], dtype='float32') b = paddle.static.data(name="b", shape=[32, 32], dtype='float32') - label = paddle.static.data(name="label", - shape=[32, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[32, 1], dtype='int64' + ) sum = paddle.add(a, b) z = paddle.pow(sum, 2.0) @@ -132,16 +131,17 @@ class TestCenteredNet(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "b": b_np, "label": label_np}, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res diff --git a/python/paddle/fluid/tests/unittests/npu/test_roi_align_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_roi_align_op_npu.py index 8e1529e43f5d37d38aa8aa5bd7a2cb8815bcbbf0..fde9824d95d3b550c645261024d397d3d6199881 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_roi_align_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_roi_align_op_npu.py @@ -26,7 +26,6 @@ np.random.seed(1243) class TestROIAlignNPUOp(OpTest): - def set_data(self): self.init_test_case() self.make_rois() @@ -37,7 +36,7 @@ class TestROIAlignNPUOp(OpTest): self.inputs = { 'X': self.x, 'ROIs': self.rois[:, 1:5], - 'RoisNum': np.asarray(seq_len).astype('int32') + 'RoisNum': np.asarray(seq_len).astype('int32'), } self.attrs = { @@ -45,7 +44,7 @@ class TestROIAlignNPUOp(OpTest): 'pooled_height': self.pooled_height, 'pooled_width': self.pooled_width, 'sampling_ratio': self.sampling_ratio, - 'aligned': self.aligned + 'aligned': self.aligned, } self.outputs = {'Out': self.out_data} @@ -67,25 +66,45 @@ class TestROIAlignNPUOp(OpTest): self.x = np.random.random(self.x_dim).astype('float32') - def pre_calc(self, x_i, roi_xmin, roi_ymin, roi_bin_grid_h, roi_bin_grid_w, - bin_size_h, bin_size_w): + def pre_calc( + self, + x_i, + roi_xmin, + roi_ymin, + roi_bin_grid_h, + roi_bin_grid_w, + bin_size_h, + bin_size_w, + ): count = roi_bin_grid_h * roi_bin_grid_w bilinear_pos = np.zeros( [self.channels, self.pooled_height, self.pooled_width, count, 4], - np.float32) - bilinear_w = np.zeros([self.pooled_height, self.pooled_width, count, 4], - np.float32) + np.float32, + ) + bilinear_w = np.zeros( + [self.pooled_height, self.pooled_width, count, 4], np.float32 + ) for ph in range(self.pooled_width): for pw in range(self.pooled_height): c = 0 for iy in range(roi_bin_grid_h): - y = roi_ymin + ph * bin_size_h + (iy + 0.5) * \ - bin_size_h / roi_bin_grid_h + y = ( + roi_ymin + + ph * bin_size_h + + (iy + 0.5) * bin_size_h / roi_bin_grid_h + ) for ix in range(roi_bin_grid_w): - x = roi_xmin + pw * bin_size_w + (ix + 0.5) * \ - bin_size_w / roi_bin_grid_w - if y < -1.0 or y > self.height or \ - x < -1.0 or x > self.width: + x = ( + roi_xmin + + pw * bin_size_w + + (ix + 0.5) * bin_size_w / roi_bin_grid_w + ) + if ( + y < -1.0 + or y > self.height + or x < -1.0 + or x > self.width + ): continue if y <= 0: y = 0 @@ -106,14 +125,18 @@ class TestROIAlignNPUOp(OpTest): hy = 1 - ly hx = 1 - lx for ch in range(self.channels): - bilinear_pos[ch, ph, pw, c, 0] = x_i[ch, y_low, - x_low] - bilinear_pos[ch, ph, pw, c, 1] = x_i[ch, y_low, - x_high] - bilinear_pos[ch, ph, pw, c, 2] = x_i[ch, y_high, - x_low] - bilinear_pos[ch, ph, pw, c, 3] = x_i[ch, y_high, - x_high] + bilinear_pos[ch, ph, pw, c, 0] = x_i[ + ch, y_low, x_low + ] + bilinear_pos[ch, ph, pw, c, 1] = x_i[ + ch, y_low, x_high + ] + bilinear_pos[ch, ph, pw, c, 2] = x_i[ + ch, y_high, x_low + ] + bilinear_pos[ch, ph, pw, c, 3] = x_i[ + ch, y_high, x_high + ] bilinear_w[ph, pw, c, 0] = hy * hx bilinear_w[ph, pw, c, 1] = hy * lx bilinear_w[ph, pw, c, 2] = ly * hx @@ -123,10 +146,15 @@ class TestROIAlignNPUOp(OpTest): def calc_roi_align(self): self.out_data = np.zeros( - (self.rois_num, self.channels, self.pooled_height, - self.pooled_width)).astype('float32') - - offset = 0.5 if self.aligned else 0. + ( + self.rois_num, + self.channels, + self.pooled_height, + self.pooled_width, + ) + ).astype('float32') + + offset = 0.5 if self.aligned else 0.0 for i in range(self.rois_num): roi = self.rois[i] roi_batch_id = int(roi[0]) @@ -144,16 +172,27 @@ class TestROIAlignNPUOp(OpTest): bin_size_h = float(roi_height) / float(self.pooled_height) bin_size_w = float(roi_width) / float(self.pooled_width) - roi_bin_grid_h = self.sampling_ratio if self.sampling_ratio > 0 else \ - math.ceil(roi_height / self.pooled_height) - roi_bin_grid_w = self.sampling_ratio if self.sampling_ratio > 0 else \ - math.ceil(roi_width / self.pooled_width) + roi_bin_grid_h = ( + self.sampling_ratio + if self.sampling_ratio > 0 + else math.ceil(roi_height / self.pooled_height) + ) + roi_bin_grid_w = ( + self.sampling_ratio + if self.sampling_ratio > 0 + else math.ceil(roi_width / self.pooled_width) + ) count = max(int(roi_bin_grid_h * roi_bin_grid_w), 1) pre_size = count * self.pooled_width * self.pooled_height - bilinear_pos, bilinear_w = self.pre_calc(x_i, roi_xmin, roi_ymin, - int(roi_bin_grid_h), - int(roi_bin_grid_w), - bin_size_h, bin_size_w) + bilinear_pos, bilinear_w = self.pre_calc( + x_i, + roi_xmin, + roi_ymin, + int(roi_bin_grid_h), + int(roi_bin_grid_w), + bin_size_h, + bin_size_w, + ) for ch in range(self.channels): align_per_bin = (bilinear_pos[ch] * bilinear_w).sum(axis=-1) output_val = align_per_bin.mean(axis=-1) @@ -166,14 +205,18 @@ class TestROIAlignNPUOp(OpTest): # for i in range(bno + 1): self.rois_lod[0].append(bno) x1 = np.random.randint( - 0, self.width // self.spatial_scale - self.pooled_width) + 0, self.width // self.spatial_scale - self.pooled_width + ) y1 = np.random.randint( - 0, self.height // self.spatial_scale - self.pooled_height) + 0, self.height // self.spatial_scale - self.pooled_height + ) - x2 = np.random.randint(x1 + self.pooled_width, - self.width // self.spatial_scale) - y2 = np.random.randint(y1 + self.pooled_height, - self.height // self.spatial_scale) + x2 = np.random.randint( + x1 + self.pooled_width, self.width // self.spatial_scale + ) + y2 = np.random.randint( + y1 + self.pooled_height, self.height // self.spatial_scale + ) roi = [bno, x1, y1, x2, y2] rois.append(roi) @@ -195,7 +238,6 @@ class TestROIAlignNPUOp(OpTest): class TestROIAlignOpWithMinusSample(TestROIAlignNPUOp): - def init_test_case(self): self.batch_size = 3 self.channels = 3 diff --git a/python/paddle/fluid/tests/unittests/npu/test_run_program_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_run_program_op_npu.py index 52b95c7d9dcffb434dae2a1d505b193d25d0e511..899a3fdf03cd7e8c98830c684bde4e16cab46c65 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_run_program_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_run_program_op_npu.py @@ -50,10 +50,10 @@ def program_scope_guard(): # when create Operator, so here compare gradients with static graph # NOTE: Here rewrite a simple unittest framework for RunProgramOp class RunProgramNPUOpTest(unittest.TestCase): - def build_model(self): raise NotImplementedError( - "RunProgramOp test should implement build_model") + "RunProgramOp test should implement build_model" + ) def check_output(self): places = [fluid.NPUPlace(0)] @@ -84,9 +84,9 @@ class RunProgramNPUOpTest(unittest.TestCase): else: fetch_list = self.get_param_grad_names() - outs = exe.run(main_program, - feed=self.inputs['X'], - fetch_list=fetch_list) + outs = exe.run( + main_program, feed=self.inputs['X'], fetch_list=fetch_list + ) return outs def get_program_desc(self): @@ -95,9 +95,16 @@ class RunProgramNPUOpTest(unittest.TestCase): return fluid.default_main_program().desc, fwd_op_num def prepare_attrs(self): - return ('global_block', self.program_desc.block(0), 'start_op_index', 0, - 'end_op_index', self.fwd_op_num, 'program_id', - _hash_with_id(self.program_desc, self)) + return ( + 'global_block', + self.program_desc.block(0), + 'start_op_index', + 0, + 'end_op_index', + self.fwd_op_num, + 'program_id', + _hash_with_id(self.program_desc, self), + ) def get_param_grad_names(self): grad_names = [] @@ -111,10 +118,9 @@ class RunProgramNPUOpTest(unittest.TestCase): # Step 2. compare output for expect_v, actual_v in zip(self.expect_outs, actual_outs): - np.testing.assert_allclose(expect_v, - actual_v.numpy(), - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + expect_v, actual_v.numpy(), rtol=1e-05, atol=1e-05 + ) def check_grad_with_place(self, place): # Step 1. calc grads @@ -123,24 +129,20 @@ class RunProgramNPUOpTest(unittest.TestCase): # Step 2. compare grads for expect_v, actual_v in zip(self.expect_grads, actual_grads): np.testing.assert_array_almost_equal(expect_v, actual_v) - np.testing.assert_allclose(expect_v, - actual_v, - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + expect_v, actual_v, rtol=1e-05, atol=1e-05 + ) def prepare_dygraph_input(self, place, return_param_list=False): - def create_var_base(is_input, name, np_value, stop_gradient): if _in_eager_mode_: - var = core.eager.Tensor(value=np_value, - name=name, - place=place, - zero_copy=True) + var = core.eager.Tensor( + value=np_value, name=name, place=place, zero_copy=True + ) else: - var = core.VarBase(value=np_value, - name=name, - place=place, - zero_copy=True) + var = core.VarBase( + value=np_value, name=name, place=place, zero_copy=True + ) var.stop_gradient = stop_gradient return var @@ -163,7 +165,6 @@ class RunProgramNPUOpTest(unittest.TestCase): return inputs def prepare_dygraph_output(self): - def create_var_base(is_input, name): var = framework._varbase_creator(dtype=None, shape=None, name=name) var.stop_gradient = False @@ -181,7 +182,8 @@ class RunProgramNPUOpTest(unittest.TestCase): outputs['OutScope'] = framework._varbase_creator( type=core.VarDesc.VarType.STEP_SCOPES, name="program_out_scope", - persistable=True) + persistable=True, + ) inner_scope = core.Scope() outputs['OutScope'].value().set_scope(inner_scope) @@ -196,9 +198,15 @@ class RunProgramNPUOpTest(unittest.TestCase): inputs = self.prepare_dygraph_input(place) outputs = self.prepare_dygraph_output() - _legacy_C_ops.run_program(inputs['X'], inputs['Params'], - outputs['Out'], outputs['OutScope'], - outputs['DOut'], None, *self.attrs) + _legacy_C_ops.run_program( + inputs['X'], + inputs['Params'], + outputs['Out'], + outputs['OutScope'], + outputs['DOut'], + None, + *self.attrs + ) return outputs['Out'] def calc_dygraph_grad(self, place): @@ -210,9 +218,15 @@ class RunProgramNPUOpTest(unittest.TestCase): inputs, input_param_list = self.prepare_dygraph_input(place, True) outputs = self.prepare_dygraph_output() - _legacy_C_ops.run_program(inputs['X'], inputs['Params'], - outputs['Out'], outputs['OutScope'], - outputs['DOut'], None, *self.attrs) + _legacy_C_ops.run_program( + inputs['X'], + inputs['Params'], + outputs['Out'], + outputs['OutScope'], + outputs['DOut'], + None, + *self.attrs + ) for param in input_param_list: var_type = self._get_grad_vartype(param.name) @@ -243,27 +257,29 @@ class RunProgramNPUOpTest(unittest.TestCase): class TestRunProgramOpWithFC(RunProgramNPUOpTest): - def setUp(self): self.op_type = "run_program" self.dtype = np.float32 self.input_names = { 'X': ['img'], - 'Params': ['weight_param', 'bias_param'] + 'Params': ['weight_param', 'bias_param'], } self.output_names = {'Out': ['fc_0.tmp_2']} self.inputs = { 'X': { - self.input_names['X'][0]: - np.random.random((32, 1, 28, 28)).astype(self.dtype) + self.input_names['X'][0]: np.random.random( + (32, 1, 28, 28) + ).astype(self.dtype) }, 'Params': { - self.input_names['Params'][0]: - np.random.random((784, 10)).astype(self.dtype), - self.input_names['Params'][1]: - np.random.random((32, 10)).astype(self.dtype) - } + self.input_names['Params'][0]: np.random.random( + (784, 10) + ).astype(self.dtype), + self.input_names['Params'][1]: np.random.random( + (32, 10) + ).astype(self.dtype), + }, } def test_check_output(self): @@ -274,26 +290,34 @@ class TestRunProgramOpWithFC(RunProgramNPUOpTest): def build_model(self): # 1. simple model - img = fluid.data(name=self.input_names['X'][0], - shape=[None, 1, 28, 28], - dtype='float32') + img = fluid.data( + name=self.input_names['X'][0], + shape=[None, 1, 28, 28], + dtype='float32', + ) weight_attr = fluid.ParamAttr( name=self.input_names['Params'][0], learning_rate=0.5, initializer=fluid.initializer.NumpyArrayInitializer( - self.inputs['Params'][self.input_names['Params'][0]]), - trainable=True) + self.inputs['Params'][self.input_names['Params'][0]] + ), + trainable=True, + ) bias_attr = fluid.ParamAttr( name=self.input_names['Params'][1], learning_rate=0.5, initializer=fluid.initializer.NumpyArrayInitializer( - self.inputs['Params'][self.input_names['Params'][1]]), - trainable=True) - pred = fluid.layers.fc(input=img, - size=10, - param_attr=weight_attr, - bias_attr=bias_attr, - act='relu') + self.inputs['Params'][self.input_names['Params'][1]] + ), + trainable=True, + ) + pred = fluid.layers.fc( + input=img, + size=10, + param_attr=weight_attr, + bias_attr=bias_attr, + act='relu', + ) # 2. get forward op num fwd_op_num = fluid.default_main_program().global_block().desc.op_size() # 3. append backward diff --git a/python/paddle/fluid/tests/unittests/npu/test_sampling_id_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_sampling_id_op_npu.py index 399ac0d25225a41c3a0b998ea5847f5dd08011d0..d354e39bcf08b83efb7ecc1d4f23c34ac8274f76 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_sampling_id_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_sampling_id_op_npu.py @@ -28,7 +28,6 @@ _set_use_system_allocator(False) class TestSamplingIdShape(unittest.TestCase): - def test_shape(self): paddle.enable_static() x = fluid.layers.data(name='x', shape=[3], dtype='float32') diff --git a/python/paddle/fluid/tests/unittests/npu/test_save_load_npu.py b/python/paddle/fluid/tests/unittests/npu/test_save_load_npu.py index b6a6a0464a02c37789ad167d3e30eb8067346bf7..09f25273696efd41d395cc76b6c6c6274ef5b754 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_save_load_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_save_load_npu.py @@ -36,62 +36,78 @@ paddle.enable_static() class TestNPUSaveLoadBase(TestSaveLoadBase): - def set_place(self): - return fluid.CPUPlace( - ) if not core.is_compiled_with_npu() else paddle.NPUPlace(0) + return ( + fluid.CPUPlace() + if not core.is_compiled_with_npu() + else paddle.NPUPlace(0) + ) class TestNPUSaveLoadPartial(TestSaveLoadPartial): - def set_place(self): - return fluid.CPUPlace( - ) if not core.is_compiled_with_npu() else paddle.NPUPlace(0) + return ( + fluid.CPUPlace() + if not core.is_compiled_with_npu() + else paddle.NPUPlace(0) + ) class TestNPUSaveLoadSetStateDict(TestSaveLoadSetStateDict): - def set_place(self): - return fluid.CPUPlace( - ) if not core.is_compiled_with_npu() else paddle.NPUPlace(0) + return ( + fluid.CPUPlace() + if not core.is_compiled_with_npu() + else paddle.NPUPlace(0) + ) class TestNPUProgramStatePartial(TestProgramStatePartial): - def set_place(self): - return fluid.CPUPlace( - ) if not core.is_compiled_with_npu() else paddle.NPUPlace(0) + return ( + fluid.CPUPlace() + if not core.is_compiled_with_npu() + else paddle.NPUPlace(0) + ) class TestNPULoadFromOldInterface(TestLoadFromOldInterface): - def set_place(self): - return fluid.CPUPlace( - ) if not core.is_compiled_with_npu() else paddle.NPUPlace(0) + return ( + fluid.CPUPlace() + if not core.is_compiled_with_npu() + else paddle.NPUPlace(0) + ) class TestNPULoadFromOldInterfaceSingleFile(TestLoadFromOldInterfaceSingleFile): - def set_place(self): - return fluid.CPUPlace( - ) if not core.is_compiled_with_npu() else paddle.NPUPlace(0) + return ( + fluid.CPUPlace() + if not core.is_compiled_with_npu() + else paddle.NPUPlace(0) + ) class TestNPUProgramStateOldSave(TestProgramStateOldSave): - def setUp(self): self.test_dygraph = False def set_place(self): - return fluid.CPUPlace( - ) if not core.is_compiled_with_npu() else paddle.NPUPlace(0) + return ( + fluid.CPUPlace() + if not core.is_compiled_with_npu() + else paddle.NPUPlace(0) + ) class TestNPUProgramStateOldSaveSingleModel(TestProgramStateOldSaveSingleModel): - def set_place(self): - return fluid.CPUPlace( - ) if not core.is_compiled_with_npu() else paddle.NPUPlace(0) + return ( + fluid.CPUPlace() + if not core.is_compiled_with_npu() + else paddle.NPUPlace(0) + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/npu/test_scale_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_scale_op_npu.py index c7166159f9a8fd7148b4eb213b514ea7eeb3109e..94477432e57a88330a0b3ff5a33279a5d01aa8b4 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_scale_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_scale_op_npu.py @@ -26,7 +26,6 @@ SEED = 2021 class TestScale(OpTest): - def setUp(self): self.set_npu() self.op_type = "scale" @@ -34,14 +33,15 @@ class TestScale(OpTest): self.init_dtype() self.inputs = { - 'X': - OpTest.np_dtype_to_fluid_dtype( - np.random.random((10, 10)).astype(self.dtype)) + 'X': OpTest.np_dtype_to_fluid_dtype( + np.random.random((10, 10)).astype(self.dtype) + ) } self.attrs = {'scale': -2.3, 'bias': 0, 'bias_after_scale': True} self.outputs = { 'Out': (self.inputs['X'] * self.dtype(self.attrs['scale'])).astype( - self.dtype) + self.dtype + ) } def set_npu(self): @@ -55,25 +55,21 @@ class TestScale(OpTest): class TestFP16Scale(TestScale): - def init_dtype(self): self.dtype = np.float16 class TestScaleInt(TestScale): - def init_dtype(self): self.dtype = np.int32 class TestScaleInt64(TestScale): - def init_dtype(self): self.dtype = np.int64 class TestBiasAfterScale(OpTest): - def setUp(self): self.set_npu() self.op_type = "scale" @@ -81,9 +77,9 @@ class TestBiasAfterScale(OpTest): self.init_dtype() self.inputs = { - 'X': - OpTest.np_dtype_to_fluid_dtype( - np.random.random((10, 10)).astype(self.dtype)) + 'X': OpTest.np_dtype_to_fluid_dtype( + np.random.random((10, 10)).astype(self.dtype) + ) } self.attrs = {'scale': -2.3, 'bias': 0, 'bias_after_scale': False} self.outputs = { diff --git a/python/paddle/fluid/tests/unittests/npu/test_scatter_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_scatter_op_npu.py index 8bb28b10997c83e4aae30f44701b8fe29c44b046..bfa591e3f10d97cbe007b3351583e155d6a6ff4b 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_scatter_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_scatter_op_npu.py @@ -27,7 +27,6 @@ SEED = 2021 class TestCast1_FP32(OpTest): - def setUp(self): self.set_npu() self.op_type = "scatter" @@ -51,7 +50,6 @@ class TestCast1_FP32(OpTest): class TestCast_INT32(OpTest): - def setUp(self): self.set_npu() self.op_type = "scatter" @@ -75,7 +73,6 @@ class TestCast_INT32(OpTest): class TestCast2_FP32(OpTest): - def setUp(self): self.set_npu() self.op_type = "scatter" @@ -99,7 +96,6 @@ class TestCast2_FP32(OpTest): class TestCast3_FP32(OpTest): - def setUp(self): self.set_npu() self.op_type = "scatter" @@ -124,7 +120,6 @@ class TestCast3_FP32(OpTest): class TestCast_INT64(OpTest): - def setUp(self): self.set_npu() self.op_type = "scatter" diff --git a/python/paddle/fluid/tests/unittests/npu/test_seed_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_seed_op_npu.py index 72402bd2b8667798881bab961261612b82e04d9c..dd688a29c442a942a9444624fa2c85574549787e 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_seed_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_seed_op_npu.py @@ -26,7 +26,6 @@ paddle.enable_static() class TestSeedOpFixSeed(OpTest): - def setUp(self): self.set_npu() self.op_type = "seed" @@ -42,7 +41,6 @@ class TestSeedOpFixSeed(OpTest): class TestSeedOpDiffSeed(OpTest): - def setUp(self): self.set_npu() self.op_type = "seed" diff --git a/python/paddle/fluid/tests/unittests/npu/test_sequence_mask_npu.py b/python/paddle/fluid/tests/unittests/npu/test_sequence_mask_npu.py index df0cb03d58bfdffde03c6f1d116e2c4be4eb8c41..66642cd4bc66fe06419c356dc2e0d7c04aa75fae 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_sequence_mask_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_sequence_mask_npu.py @@ -21,13 +21,16 @@ from op_test import OpTest import paddle import paddle.fluid as fluid import paddle.fluid.core as core -from paddle.fluid.framework import convert_np_dtype_to_dtype_, Program, program_guard +from paddle.fluid.framework import ( + convert_np_dtype_to_dtype_, + Program, + program_guard, +) paddle.enable_static() class SequenceMaskTestBase(OpTest): - def set_npu(self): self.__class__.use_npu = True @@ -51,19 +54,19 @@ class SequenceMaskTestBase(OpTest): self.outputs = {'Y': self.calc_ground_truth_mask()} self.attrs = { 'maxlen': self.maxlen, - 'out_dtype': convert_np_dtype_to_dtype_(self.mask_dtype) + 'out_dtype': convert_np_dtype_to_dtype_(self.mask_dtype), } def calc_ground_truth_mask(self): maxlen = np.max(self.x) if self.maxlen < 0 else self.maxlen - shape = self.x.shape + (maxlen, ) - index_broadcast = np.broadcast_to(np.reshape( - range(maxlen), newshape=[1] * self.x.ndim + [-1]), - shape=shape) - x_broadcast = np.broadcast_to(np.reshape(self.x, - newshape=self.x.shape + - (-1, )), - shape=shape) + shape = self.x.shape + (maxlen,) + index_broadcast = np.broadcast_to( + np.reshape(range(maxlen), newshape=[1] * self.x.ndim + [-1]), + shape=shape, + ) + x_broadcast = np.broadcast_to( + np.reshape(self.x, newshape=self.x.shape + (-1,)), shape=shape + ) return (index_broadcast < x_broadcast).astype(self.mask_dtype) def test_check_output(self): @@ -71,43 +74,36 @@ class SequenceMaskTestBase(OpTest): class SequenceMaskTest1(SequenceMaskTestBase): - def initParameters(self): self.mask_dtype = 'bool' class SequenceMaskTest2(SequenceMaskTestBase): - def initParameters(self): self.mask_dtype = 'uint8' class SequenceMaskTest3(SequenceMaskTestBase): - def initParameters(self): self.mask_dtype = 'int32' class SequenceMaskTest4(SequenceMaskTestBase): - def initParameters(self): self.mask_dtype = 'float32' class SequenceMaskTest5(SequenceMaskTestBase): - def initParameters(self): self.mask_dtype = 'float64' class SequenceMaskTest6(SequenceMaskTestBase): - def initParameters(self): self.maxlen = -1 class SequenceMaskTestBase_tensor_attr(OpTest): - def set_npu(self): self.__class__.use_npu = True @@ -134,14 +130,14 @@ class SequenceMaskTestBase_tensor_attr(OpTest): def calc_ground_truth_mask(self): maxlen = np.max(self.x) if self.maxlen < 0 else self.maxlen - shape = self.x.shape + (maxlen, ) - index_broadcast = np.broadcast_to(np.reshape( - range(maxlen), newshape=[1] * self.x.ndim + [-1]), - shape=shape) - x_broadcast = np.broadcast_to(np.reshape(self.x, - newshape=self.x.shape + - (-1, )), - shape=shape) + shape = self.x.shape + (maxlen,) + index_broadcast = np.broadcast_to( + np.reshape(range(maxlen), newshape=[1] * self.x.ndim + [-1]), + shape=shape, + ) + x_broadcast = np.broadcast_to( + np.reshape(self.x, newshape=self.x.shape + (-1,)), shape=shape + ) return (index_broadcast < x_broadcast).astype(self.mask_dtype) def test_check_output(self): @@ -149,37 +145,31 @@ class SequenceMaskTestBase_tensor_attr(OpTest): class SequenceMaskTest1_tensor_attr(SequenceMaskTestBase_tensor_attr): - def initParameters(self): self.mask_dtype = 'bool' class SequenceMaskTest2_tensor_attr(SequenceMaskTestBase_tensor_attr): - def initParameters(self): self.mask_dtype = 'uint8' class SequenceMaskTest3_tensor_attr(SequenceMaskTestBase_tensor_attr): - def initParameters(self): self.mask_dtype = 'int32' class SequenceMaskTest4_tensor_attr(SequenceMaskTestBase_tensor_attr): - def initParameters(self): self.mask_dtype = 'float32' class SequenceMaskTest5_tensor_attr(SequenceMaskTestBase_tensor_attr): - def initParameters(self): self.mask_dtype = 'float64' class TestSequenceMaskOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): input_data = np.random.uniform(1, 5, [4]).astype("float32") diff --git a/python/paddle/fluid/tests/unittests/npu/test_set_value_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_set_value_op_npu.py index e1b866f31f506690f675ee3658c6016be60c6c90..96af4148833196e8146451123888495f596e0b1c 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_set_value_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_set_value_op_npu.py @@ -24,7 +24,6 @@ from paddle.fluid import core class TestSetValueBase(unittest.TestCase): - def set_npu(self): self.__class__.use_npu = True self.place = paddle.NPUPlace(0) @@ -55,7 +54,6 @@ class TestSetValueBase(unittest.TestCase): class TestSetValueApi(TestSetValueBase): - def _run_static(self): paddle.enable_static() with paddle.static.program_guard(self.program): @@ -80,17 +78,22 @@ class TestSetValueApi(TestSetValueBase): dynamic_out = self._run_dynamic() self._get_answer() - error_msg = "\nIn {} mode: \nExpected res = \n{}, \n\nbut received : \n{}" - self.assertTrue((self.data == static_out).all(), - msg=error_msg.format("static", self.data, static_out)) - self.assertTrue((self.data == dynamic_out).all(), - msg=error_msg.format("dynamic", self.data, dynamic_out)) + error_msg = ( + "\nIn {} mode: \nExpected res = \n{}, \n\nbut received : \n{}" + ) + self.assertTrue( + (self.data == static_out).all(), + msg=error_msg.format("static", self.data, static_out), + ) + self.assertTrue( + (self.data == dynamic_out).all(), + msg=error_msg.format("dynamic", self.data, dynamic_out), + ) # 1. Test different type of item: int, Python slice, Paddle Tensor # 1.1 item is int class TestSetValueItemInt(TestSetValueApi): - def _call_setitem(self, x): x[0] = self.value @@ -101,7 +104,6 @@ class TestSetValueItemInt(TestSetValueApi): # 1.2 item is slice # 1.2.1 step is 1 class TestSetValueItemSlice(TestSetValueApi): - def _call_setitem(self, x): x[0:2] = self.value @@ -110,7 +112,6 @@ class TestSetValueItemSlice(TestSetValueApi): class TestSetValueItemSlice2(TestSetValueApi): - def _call_setitem(self, x): x[0:-1] = self.value @@ -119,7 +120,6 @@ class TestSetValueItemSlice2(TestSetValueApi): class TestSetValueItemSlice3(TestSetValueApi): - def _call_setitem(self, x): x[0:-1, 0:2] = self.value @@ -128,7 +128,6 @@ class TestSetValueItemSlice3(TestSetValueApi): class TestSetValueItemSlice4(TestSetValueApi): - def _call_setitem(self, x): x[0:, 1:2, :] = self.value @@ -156,7 +155,6 @@ class TestSetValueItemSlice4(TestSetValueApi): # 1.2.2 step > 1 class TestSetValueItemSliceStep(TestSetValueApi): - def set_shape(self): self.shape = [5, 5, 5] @@ -168,7 +166,6 @@ class TestSetValueItemSliceStep(TestSetValueApi): class TestSetValueItemSliceStep2(TestSetValueApi): - def set_shape(self): self.shape = [7, 5, 5] @@ -180,7 +177,6 @@ class TestSetValueItemSliceStep2(TestSetValueApi): class TestSetValueItemSliceStep3(TestSetValueApi): - def _call_setitem(self, x): x[0:-1, 0:2, ::2] = self.value @@ -189,7 +185,6 @@ class TestSetValueItemSliceStep3(TestSetValueApi): class TestSetValueItemSliceStep4(TestSetValueApi): - def _call_setitem(self, x): x[0:, 1:2:2, :] = self.value @@ -199,7 +194,6 @@ class TestSetValueItemSliceStep4(TestSetValueApi): # 1.2.3 step < 0 class TestSetValueItemSliceNegetiveStep(TestSetValueApi): - def set_shape(self): self.shape = [5, 2] @@ -214,7 +208,6 @@ class TestSetValueItemSliceNegetiveStep(TestSetValueApi): class TestSetValueItemSliceNegetiveStep2(TestSetValueApi): - def set_shape(self): self.shape = [5] @@ -229,7 +222,6 @@ class TestSetValueItemSliceNegetiveStep2(TestSetValueApi): class TestSetValueItemSliceNegetiveStep3(TestSetValueApi): - def set_shape(self): self.shape = [3] @@ -244,7 +236,6 @@ class TestSetValueItemSliceNegetiveStep3(TestSetValueApi): class TestSetValueItemSliceNegetiveStep4(TestSetValueApi): - def set_shape(self): self.shape = [3, 4, 5] @@ -259,7 +250,6 @@ class TestSetValueItemSliceNegetiveStep4(TestSetValueApi): class TestSetValueItemEllipsis1(TestSetValueApi): - def _call_setitem(self, x): x[0:, ..., 1:] = self.value @@ -268,7 +258,6 @@ class TestSetValueItemEllipsis1(TestSetValueApi): class TestSetValueItemEllipsis2(TestSetValueApi): - def _call_setitem(self, x): x[0:, ...] = self.value @@ -277,7 +266,6 @@ class TestSetValueItemEllipsis2(TestSetValueApi): class TestSetValueItemEllipsis3(TestSetValueApi): - def _call_setitem(self, x): x[..., 1:] = self.value @@ -286,7 +274,6 @@ class TestSetValueItemEllipsis3(TestSetValueApi): class TestSetValueItemEllipsis4(TestSetValueApi): - def _call_setitem(self, x): x[...] = self.value @@ -296,7 +283,6 @@ class TestSetValueItemEllipsis4(TestSetValueApi): # 1.4 item is Paddle Tensor class TestSetValueItemTensor(TestSetValueApi): - def _call_setitem(self, x): zero = paddle.full([1], 0, dtype="int32") x[zero] = self.value @@ -306,7 +292,6 @@ class TestSetValueItemTensor(TestSetValueApi): class TestSetValueItemTensor2(TestSetValueApi): - def _call_setitem(self, x): zero = paddle.full([1], 0, dtype="int32") two = paddle.full([1], 2, dtype="int64") @@ -317,7 +302,6 @@ class TestSetValueItemTensor2(TestSetValueApi): class TestSetValueItemTensor3(TestSetValueApi): - def _call_setitem(self, x): zero = paddle.full([1], 0, dtype="int32") two = paddle.full([1], 2, dtype="int64") @@ -328,7 +312,6 @@ class TestSetValueItemTensor3(TestSetValueApi): class TestSetValueItemTensor4(TestSetValueApi): - def _call_setitem(self, x): zero = paddle.full([1], 0, dtype="int32") two = paddle.full([1], 2, dtype="int64") @@ -339,7 +322,6 @@ class TestSetValueItemTensor4(TestSetValueApi): class TestSetValueItemTensor5(TestSetValueApi): - def _call_setitem(self, x): zero = paddle.full([1], 0, dtype="int32") two = paddle.full([1], 2, dtype="int64") @@ -350,7 +332,6 @@ class TestSetValueItemTensor5(TestSetValueApi): class TestSetValueItemTensor6(TestSetValueApi): - def set_shape(self): self.shape = [3, 4, 5] @@ -365,7 +346,6 @@ class TestSetValueItemTensor6(TestSetValueApi): # 1.5 item is None class TestSetValueItemNone1(TestSetValueApi): - def _call_setitem(self, x): x[None] = self.value @@ -374,7 +354,6 @@ class TestSetValueItemNone1(TestSetValueApi): class TestSetValueItemNone2(TestSetValueApi): - def _call_setitem(self, x): x[0, None, 1] = self.value @@ -383,7 +362,6 @@ class TestSetValueItemNone2(TestSetValueApi): class TestSetValueItemNone3(TestSetValueApi): - def _call_setitem(self, x): x[:, None, None, 1] = self.value @@ -392,7 +370,6 @@ class TestSetValueItemNone3(TestSetValueApi): class TestSetValueItemNone4(TestSetValueApi): - def _call_setitem(self, x): x[0, 0, None, 1] = self.value @@ -401,7 +378,6 @@ class TestSetValueItemNone4(TestSetValueApi): class TestSetValueItemNone5(TestSetValueApi): - def _call_setitem(self, x): x[0, None, 0, None, 1] = self.value @@ -410,7 +386,6 @@ class TestSetValueItemNone5(TestSetValueApi): class TestSetValueItemNone6(TestSetValueApi): - def _call_setitem(self, x): x[None, 0, 0, None, 0] = self.value @@ -419,7 +394,6 @@ class TestSetValueItemNone6(TestSetValueApi): class TestSetValueItemNone7(TestSetValueApi): - def _call_setitem(self, x): x[:, None, 1] = np.zeros(self.shape)[:, None, 0] @@ -428,7 +402,6 @@ class TestSetValueItemNone7(TestSetValueApi): class TestSetValueItemNone8(TestSetValueApi): - def _call_setitem(self, x): x[:, 1, None] = np.zeros(self.shape)[:, 0, None] @@ -437,7 +410,6 @@ class TestSetValueItemNone8(TestSetValueApi): class TestSetValueItemNone9(TestSetValueApi): - def _call_setitem(self, x): x[None, :, 1, ..., None] = np.zeros(self.shape)[0, 0, :, None] @@ -447,7 +419,6 @@ class TestSetValueItemNone9(TestSetValueApi): # 1.5 item is list or Tensor of bol class TestSetValueItemBool1(TestSetValueApi): - def _call_setitem(self, x): x[[True, False]] = self.value @@ -456,7 +427,6 @@ class TestSetValueItemBool1(TestSetValueApi): class TestSetValueItemBool2(TestSetValueApi): - def _call_setitem(self, x): x[[False, False]] = self.value @@ -465,7 +435,6 @@ class TestSetValueItemBool2(TestSetValueApi): class TestSetValueItemBool3(TestSetValueApi): - def _call_setitem(self, x): x[[False, True]] = np.zeros(self.shape[2]) @@ -474,7 +443,6 @@ class TestSetValueItemBool3(TestSetValueApi): class TestSetValueItemBool4(TestSetValueApi): - def _call_setitem(self, x): idx = paddle.assign(np.array([False, True])) x[idx] = np.zeros(self.shape[2]) @@ -484,19 +452,19 @@ class TestSetValueItemBool4(TestSetValueApi): class TestSetValueItemBool5(TestSetValueApi): - def _call_setitem(self, x): idx = paddle.assign( - np.array([[False, True, False], [True, True, False]])) + np.array([[False, True, False], [True, True, False]]) + ) x[idx] = self.value def _get_answer(self): - self.data[np.array([[False, True, False], [True, True, - False]])] = self.value + self.data[ + np.array([[False, True, False], [True, True, False]]) + ] = self.value class TestSetValueItemBool6(TestSetValueApi): - def _call_setitem(self, x): x[0, ...] = 0 x[x > 0] = self.value @@ -507,9 +475,7 @@ class TestSetValueItemBool6(TestSetValueApi): def create_test_value_int32(parent): - class TestValueInt(parent): - def set_value(self): self.value = 7 @@ -529,9 +495,7 @@ create_test_value_int32(TestSetValueItemSlice4) def create_test_value_int64(parent): - class TestValueInt(parent): - def set_value(self): self.value = 7 @@ -551,9 +515,7 @@ create_test_value_int64(TestSetValueItemSlice4) def create_test_value_tensor_fp32(parent): - class TestValueInt(parent): - def set_dtype(self): self.dtype = "float32" @@ -578,7 +540,6 @@ create_test_value_tensor_fp32(TestSetValueItemSlice4) # 3. Test different shape of value class TestSetValueValueShape1(TestSetValueApi): - def set_value(self): self.value = np.array([3, 4, 5, 6]) # shape is (4,) @@ -590,7 +551,6 @@ class TestSetValueValueShape1(TestSetValueApi): class TestSetValueValueShape2(TestSetValueApi): - def set_value(self): self.value = np.array([[3, 4, 5, 6]]) # shape is (1,4) @@ -602,10 +562,10 @@ class TestSetValueValueShape2(TestSetValueApi): class TestSetValueValueShape3(TestSetValueApi): - def set_value(self): - self.value = np.array([[1, 1, 1, 1], [2, 2, 2, 2], - [3, 3, 3, 3]]) # shape is (3,4) + self.value = np.array( + [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]] + ) # shape is (3,4) def _call_setitem(self, x): x[0] = self.value @@ -615,11 +575,12 @@ class TestSetValueValueShape3(TestSetValueApi): class TestSetValueValueShape4(TestSetValueApi): - def set_value(self): - self.value = np.array([[1, 1, 1, 1], [2, 2, 2, 2], - [3, 3, 3, - 3]]).astype(self.dtype) # shape is (3,4) + self.value = np.array( + [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]] + ).astype( + self.dtype + ) # shape is (3,4) def _call_setitem(self, x): x[0] = paddle.assign(self.value) # x is Paddle.Tensor @@ -629,7 +590,6 @@ class TestSetValueValueShape4(TestSetValueApi): class TestSetValueValueShape5(TestSetValueApi): - def set_value(self): self.value = np.array([3, 3, 3]).astype(self.dtype) diff --git a/python/paddle/fluid/tests/unittests/npu/test_sgd_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_sgd_op_npu.py index 03c48b415dd4ca3fc1540255f747b601813cd4ca..9747953862508a5fa32c24d7d98ebd34f6ecb7db 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_sgd_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_sgd_op_npu.py @@ -26,7 +26,6 @@ SEED = 2021 class TestSGD(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -54,7 +53,6 @@ class TestSGD(OpTest): class TestNet(unittest.TestCase): - def _test(self, run_npu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -69,9 +67,9 @@ class TestNet(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=[32, 32], dtype='float32') b = paddle.static.data(name="b", shape=[32, 32], dtype='float32') - label = paddle.static.data(name="label", - shape=[32, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[32, 1], dtype='int64' + ) sum = paddle.add(a, b) z = paddle.pow(sum, 2.0) @@ -95,16 +93,17 @@ class TestNet(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "b": b_np, "label": label_np}, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res diff --git a/python/paddle/fluid/tests/unittests/npu/test_shape_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_shape_op_npu.py index 136bedea8226cbbd1287be664f7ae612576fca33..fdee5eced0cbcd612156e68ca2f04baf31357af8 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_shape_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_shape_op_npu.py @@ -26,7 +26,6 @@ SEED = 2021 class TestShape(OpTest): - def setUp(self): self.set_npu() self.op_type = "shape" @@ -52,25 +51,21 @@ class TestShape(OpTest): class TestShape_fp16(TestShape): - def init_dtype(self): self.dtype = np.float16 class TestShape_double(TestShape): - def init_dtype(self): self.dtype = np.float64 class TestShape_int32(TestShape): - def init_dtype(self): self.dtype = np.int32 class TestShape_int64(TestShape): - def init_dtype(self): self.dtype = np.int64 diff --git a/python/paddle/fluid/tests/unittests/npu/test_shard_index_op.py b/python/paddle/fluid/tests/unittests/npu/test_shard_index_op.py index 3ed6051b46fd2f73d86b40358207634ccd9a7126..2fc3d9beeb81e48455ac6f5df0e78b2753eb1037 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_shard_index_op.py +++ b/python/paddle/fluid/tests/unittests/npu/test_shard_index_op.py @@ -52,13 +52,12 @@ def common_setup(self, index_num, nshards, shard_id, ignore_value): 'index_num': index_num, 'nshards': nshards, 'shard_id': shard_id, - 'ignore_value': ignore_value + 'ignore_value': ignore_value, } self.outputs = {'Out': (out, x_lod)} class TestShardIndexShardId0Op(OpTest): - def setUp(self): common_setup(self, 20, 2, 0, -1) @@ -67,19 +66,16 @@ class TestShardIndexShardId0Op(OpTest): class TestShardIndexShardId1Op(TestShardIndexShardId0Op): - def setUp(self): common_setup(self, 20, 2, 1, -1) class TestShardIndexIgnoreValueOp(TestShardIndexShardId0Op): - def setUp(self): common_setup(self, 20, 2, 0, -2) class TestShardIndexNotEvenlyDividedOp(TestShardIndexShardId0Op): - def setUp(self): common_setup(self, 15, 2, 1, -1) diff --git a/python/paddle/fluid/tests/unittests/npu/test_sigmoid_cross_entropy_with_logits_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_sigmoid_cross_entropy_with_logits_op_npu.py index 1454edd7a7538bc9d8d7bdfd53dc7685634c0f9c..e06c132df251f1c46b119a55888d68290656deae 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_sigmoid_cross_entropy_with_logits_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_sigmoid_cross_entropy_with_logits_op_npu.py @@ -25,11 +25,11 @@ import paddle paddle.enable_static() -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") +@unittest.skipIf( + not paddle.is_compiled_with_npu(), "core is not compiled with NPU" +) class TestSigmoidCrossEntropyWithLogitsOp1(OpTest): - """Test sigmoid_cross_entropy_with_logit_op with binary label - """ + """Test sigmoid_cross_entropy_with_logit_op with binary label""" def setUp(self): self.op_type = "sigmoid_cross_entropy_with_logits" @@ -39,13 +39,14 @@ class TestSigmoidCrossEntropyWithLogitsOp1(OpTest): batch_size = 64 num_classes = 20 self.inputs = { - 'X': - logit( + 'X': logit( np.random.uniform(0, 1, (batch_size, num_classes)).astype( - self.dtype)), - 'Label': - np.random.randint(0, 2, - (batch_size, num_classes)).astype(self.dtype) + self.dtype + ) + ), + 'Label': np.random.randint(0, 2, (batch_size, num_classes)).astype( + self.dtype + ), } # Fw Pass is implemented as elementwise sigmoid followed by @@ -70,12 +71,13 @@ class TestSigmoidCrossEntropyWithLogitsOp1(OpTest): self.dtype = np.float32 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") -class TestSigmoidCrossEntropyWithLogitsOp3(TestSigmoidCrossEntropyWithLogitsOp1 - ): - """Test sigmoid_cross_entropy_with_logit_op with probabalistic label - """ +@unittest.skipIf( + not paddle.is_compiled_with_npu(), "core is not compiled with NPU" +) +class TestSigmoidCrossEntropyWithLogitsOp3( + TestSigmoidCrossEntropyWithLogitsOp1 +): + """Test sigmoid_cross_entropy_with_logit_op with probabalistic label""" def setUp(self): self.op_type = "sigmoid_cross_entropy_with_logits" @@ -85,13 +87,14 @@ class TestSigmoidCrossEntropyWithLogitsOp3(TestSigmoidCrossEntropyWithLogitsOp1 batch_size = 64 num_classes = 20 self.inputs = { - 'X': - logit( + 'X': logit( np.random.uniform(0, 1, (batch_size, num_classes)).astype( - self.dtype)), - 'Label': - np.random.uniform(0, 1, - (batch_size, num_classes)).astype(self.dtype) + self.dtype + ) + ), + 'Label': np.random.uniform(0, 1, (batch_size, num_classes)).astype( + self.dtype + ), } # Fw Pass is implemented as elementwise sigmoid followed by @@ -103,12 +106,13 @@ class TestSigmoidCrossEntropyWithLogitsOp3(TestSigmoidCrossEntropyWithLogitsOp1 self.outputs = {'Out': -term1 - term2} -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") -class TestSigmoidCrossEntropyWithLogitsOp5(TestSigmoidCrossEntropyWithLogitsOp1 - ): - """Test sigmoid_cross_entropy_with_logit_op with probabalistic label - """ +@unittest.skipIf( + not paddle.is_compiled_with_npu(), "core is not compiled with NPU" +) +class TestSigmoidCrossEntropyWithLogitsOp5( + TestSigmoidCrossEntropyWithLogitsOp1 +): + """Test sigmoid_cross_entropy_with_logit_op with probabalistic label""" def setUp(self): self.op_type = "sigmoid_cross_entropy_with_logits" @@ -118,14 +122,14 @@ class TestSigmoidCrossEntropyWithLogitsOp5(TestSigmoidCrossEntropyWithLogitsOp1 batch_size = [10, 10] num_classes = 20 self.inputs = { - 'X': - logit( - np.random.uniform(0, 1, - tuple(batch_size + [num_classes])).astype( - self.dtype)), - 'Label': - np.random.uniform(0, 1, tuple(batch_size + [num_classes])).astype( - self.dtype) + 'X': logit( + np.random.uniform( + 0, 1, tuple(batch_size + [num_classes]) + ).astype(self.dtype) + ), + 'Label': np.random.uniform( + 0, 1, tuple(batch_size + [num_classes]) + ).astype(self.dtype), } # Fw Pass is implemented as elementwise sigmoid followed by @@ -137,12 +141,13 @@ class TestSigmoidCrossEntropyWithLogitsOp5(TestSigmoidCrossEntropyWithLogitsOp1 self.outputs = {'Out': -term1 - term2} -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") -class TestSigmoidCrossEntropyWithLogitsOp6(TestSigmoidCrossEntropyWithLogitsOp1 - ): - """Test sigmoid_cross_entropy_with_logit_op with binary label - """ +@unittest.skipIf( + not paddle.is_compiled_with_npu(), "core is not compiled with NPU" +) +class TestSigmoidCrossEntropyWithLogitsOp6( + TestSigmoidCrossEntropyWithLogitsOp1 +): + """Test sigmoid_cross_entropy_with_logit_op with binary label""" def setUp(self): self.op_type = "sigmoid_cross_entropy_with_logits" @@ -152,14 +157,14 @@ class TestSigmoidCrossEntropyWithLogitsOp6(TestSigmoidCrossEntropyWithLogitsOp1 batch_size = [10, 10] num_classes = 20 self.inputs = { - 'X': - logit( - np.random.uniform(0, 1, - tuple(batch_size + [num_classes])).astype( - self.dtype)), - 'Label': - np.random.randint(0, 2, tuple(batch_size + [num_classes])).astype( - self.dtype) + 'X': logit( + np.random.uniform( + 0, 1, tuple(batch_size + [num_classes]) + ).astype(self.dtype) + ), + 'Label': np.random.randint( + 0, 2, tuple(batch_size + [num_classes]) + ).astype(self.dtype), } # Fw Pass is implemented as elementwise sigmoid followed by diff --git a/python/paddle/fluid/tests/unittests/npu/test_sigmoid_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_sigmoid_op_npu.py index 47d881ee013ed95bb2f30ffdbf7d6b8e39476cd1..caac4ed6ba426c85e9bcff920ed7fe8c5852e90b 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_sigmoid_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_sigmoid_op_npu.py @@ -23,10 +23,10 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") +@unittest.skipIf( + not paddle.is_compiled_with_npu(), "core is not compiled with NPU" +) class TestNPUSigmoid(OpTest): - def setUp(self): self.op_type = "sigmoid" self.set_npu() @@ -43,9 +43,9 @@ class TestNPUSigmoid(OpTest): self.check_output_with_place(self.place) def test_check_grad(self): - self.check_grad_with_place(self.place, ['X'], - 'Out', - max_relative_error=0.01) + self.check_grad_with_place( + self.place, ['X'], 'Out', max_relative_error=0.01 + ) def set_npu(self): self.__class__.use_npu = True @@ -55,10 +55,10 @@ class TestNPUSigmoid(OpTest): self.dtype = np.float32 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") +@unittest.skipIf( + not paddle.is_compiled_with_npu(), "core is not compiled with NPU" +) class TestNPUSigmoidFp16(TestNPUSigmoid): - def test_check_output(self): self.check_output_with_place(self.place, atol=1e-3) diff --git a/python/paddle/fluid/tests/unittests/npu/test_sin_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_sin_op_npu.py index 26e6ae9255d42312934a1d070be15c3a3c97fa32..971184956e6d8bb81d48cd5544af02ec54971cb0 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_sin_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_sin_op_npu.py @@ -17,7 +17,11 @@ import unittest import numpy as np from scipy.special import expit, erf -from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci +from paddle.fluid.tests.unittests.op_test import ( + OpTest, + convert_float_to_uint16, + skip_check_grad_ci, +) import paddle import paddle.nn as nn import paddle.nn.functional as F @@ -29,9 +33,7 @@ paddle.enable_static() def test_class(op_type, typename): - class TestSin(OpTest): - def setUp(self): self.op_type = "sin" self.__class__.use_npu = True @@ -57,7 +59,7 @@ def test_class(op_type, typename): out = eval("paddle.%s(data, name='Y')" % self.op_type) place = fluid.NPUPlace(0) exe = fluid.Executor(place) - result, = exe.run(feed={"X": np_x}, fetch_list=[out]) + (result,) = exe.run(feed={"X": np_x}, fetch_list=[out]) expected = eval("np.%s(np_x)" % self.op_type) self.assertEqual(result, expected) diff --git a/python/paddle/fluid/tests/unittests/npu/test_size_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_size_op_npu.py index e5fd042674204028df9e47f3b4fd040a1c9f5b6d..1e768a5dd185a4d0acb8970f26a47f58a3f7075e 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_size_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_size_op_npu.py @@ -25,7 +25,6 @@ paddle.enable_static() class TestSizeOp(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -48,49 +47,42 @@ class TestSizeOp(OpTest): class TestSizeOp1(TestSizeOp): - def config(self): self.shape = [2] self.dtype = np.float64 class TestSizeOp2(TestSizeOp): - def config(self): self.shape = [2, 3] self.dtype = np.float32 class TestSizeOp3(TestSizeOp): - def config(self): self.shape = [2, 3, 100] self.dtype = np.float16 class TestSizeOp4(TestSizeOp): - def config(self): self.shape = [2**10] self.dtype = np.bool_ class TestSizeOp5(TestSizeOp): - def config(self): self.shape = [7, 8, 9, 10] self.dtype = np.int64 class TestSizeOp6(TestSizeOp): - def config(self): self.shape = [] self.dtype = np.int64 class TestSizeAPI(unittest.TestCase): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -111,17 +103,19 @@ class TestSizeAPI(unittest.TestCase): out_1 = paddle.fluid.layers.size(x_1) out_2 = paddle.fluid.layers.size(x_2) exe = paddle.static.Executor(place=self.place) - res_1, res_2 = exe.run(feed={ - "x_1": input_1, - "x_2": input_2, - }, - fetch_list=[out_1, out_2]) - assert (np.array_equal(res_1, - np.array([np.size(input_1) - ]).astype("int64"))) - assert (np.array_equal(res_2, - np.array([np.size(input_2) - ]).astype("int64"))) + res_1, res_2 = exe.run( + feed={ + "x_1": input_1, + "x_2": input_2, + }, + fetch_list=[out_1, out_2], + ) + assert np.array_equal( + res_1, np.array([np.size(input_1)]).astype("int64") + ) + assert np.array_equal( + res_2, np.array([np.size(input_2)]).astype("int64") + ) def test_size_imperative(self): paddle.disable_static(self.place) @@ -131,8 +125,8 @@ class TestSizeAPI(unittest.TestCase): x_2 = paddle.to_tensor(input_2) out_1 = paddle.fluid.layers.size(x_1) out_2 = paddle.fluid.layers.size(x_2) - assert (np.array_equal(out_1.numpy().item(0), np.size(input_1))) - assert (np.array_equal(out_2.numpy().item(0), np.size(input_2))) + assert np.array_equal(out_1.numpy().item(0), np.size(input_1)) + assert np.array_equal(out_2.numpy().item(0), np.size(input_2)) paddle.enable_static() def test_error(self): diff --git a/python/paddle/fluid/tests/unittests/npu/test_slice_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_slice_op_npu.py index ba2b45ab9f4acbd727fab733e95aa62751e0fbfc..5bc1700cc16f46405d1a38417b915ce88351a06b 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_slice_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_slice_op_npu.py @@ -27,7 +27,6 @@ EPOCH = 100 class TestSliceOp(OpTest): - def setUp(self): self.op_type = "slice" self.set_npu() @@ -39,7 +38,7 @@ class TestSliceOp(OpTest): 'axes': self.axes, 'starts': self.starts, 'ends': self.ends, - 'infer_flags': self.infer_flags + 'infer_flags': self.infer_flags, } def config(self): @@ -65,15 +64,14 @@ class TestSliceOp(OpTest): def test_check_grad_normal(self): if self.dtype == np.float16: - self.check_grad_with_place(self.place, ['Input'], - 'Out', - max_relative_error=0.02) + self.check_grad_with_place( + self.place, ['Input'], 'Out', max_relative_error=0.02 + ) else: self.check_grad_with_place(self.place, ['Input'], 'Out') class TestSliceOp2(TestSliceOp): - def config(self): self.input = np.random.random([10, 5, 6]).astype(self.dtype) self.starts = [0] @@ -84,7 +82,6 @@ class TestSliceOp2(TestSliceOp): class TestSliceOpFp16(TestSliceOp): - def init_dtype(self): self.dtype = np.float16 @@ -95,7 +92,6 @@ class TestSliceOpFp16(TestSliceOp): class TestSliceOpTensor(TestSliceOp): - def setUp(self): self.op_type = "slice" self.set_npu() @@ -104,14 +100,14 @@ class TestSliceOpTensor(TestSliceOp): self.inputs = { 'Input': self.input, 'StartsTensor': self.starts, - 'EndsTensor': self.ends + 'EndsTensor': self.ends, } self.outputs = {'Out': self.out} self.attrs = { 'axes': self.axes, 'starts': [-1, -1, -1], 'ends': [-1, -1, -1], - 'infer_flags': self.infer_flags + 'infer_flags': self.infer_flags, } def config(self): @@ -124,7 +120,6 @@ class TestSliceOpTensor(TestSliceOp): class TestSliceOpTensor2(TestSliceOpTensor): - def setUp(self): self.op_type = "slice" self.set_npu() @@ -133,14 +128,14 @@ class TestSliceOpTensor2(TestSliceOpTensor): self.inputs = { 'Input': self.input, 'StartsTensor': self.starts, - 'EndsTensor': self.ends + 'EndsTensor': self.ends, } self.outputs = {'Out': self.out} self.attrs = { 'axes': self.axes, 'starts': [-1], 'ends': [-1], - 'infer_flags': self.infer_flags + 'infer_flags': self.infer_flags, } def config(self): @@ -153,7 +148,6 @@ class TestSliceOpTensor2(TestSliceOpTensor): class TestSliceOpFp16Tensor(TestSliceOpTensor): - def init_dtype(self): self.dtype = np.float16 @@ -164,7 +158,6 @@ class TestSliceOpFp16Tensor(TestSliceOpTensor): class TestSliceOpTensorList(TestSliceOp): - def setUp(self): self.op_type = "slice" self.set_npu() @@ -173,25 +166,27 @@ class TestSliceOpTensorList(TestSliceOp): self.starts_tensor_list = [] for index, ele in enumerate(self.starts): - self.starts_tensor_list.append(("start" + str(index), np.ones( - (1)).astype('int32') * ele)) + self.starts_tensor_list.append( + ("start" + str(index), np.ones((1)).astype('int32') * ele) + ) self.ends_tensor_list = [] for index, ele in enumerate(self.ends): - self.ends_tensor_list.append(("end" + str(index), np.ones( - (1)).astype('int32') * ele)) + self.ends_tensor_list.append( + ("end" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = { 'Input': self.input, 'StartsTensorList': self.starts_tensor_list, - 'EndsTensorList': self.ends_tensor_list + 'EndsTensorList': self.ends_tensor_list, } self.outputs = {'Out': self.out} self.attrs = { 'axes': self.axes, 'starts': [-1, -1, -1], 'ends': [-1, -1, -1], - 'infer_flags': self.infer_flags + 'infer_flags': self.infer_flags, } def config(self): @@ -204,7 +199,6 @@ class TestSliceOpTensorList(TestSliceOp): class TestSliceOpTensorList2(TestSliceOpTensorList): - def setUp(self): self.op_type = "slice" self.set_npu() @@ -213,25 +207,27 @@ class TestSliceOpTensorList2(TestSliceOpTensorList): self.starts_tensor_list = [] for index, ele in enumerate(self.starts): - self.starts_tensor_list.append(("start" + str(index), np.ones( - (1)).astype('int32') * ele)) + self.starts_tensor_list.append( + ("start" + str(index), np.ones((1)).astype('int32') * ele) + ) self.ends_tensor_list = [] for index, ele in enumerate(self.ends): - self.ends_tensor_list.append(("end" + str(index), np.ones( - (1)).astype('int32') * ele)) + self.ends_tensor_list.append( + ("end" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = { 'Input': self.input, 'StartsTensorList': self.starts_tensor_list, - 'EndsTensorList': self.ends_tensor_list + 'EndsTensorList': self.ends_tensor_list, } self.outputs = {'Out': self.out} self.attrs = { 'axes': self.axes, 'starts': [-1], 'ends': [-1], - 'infer_flags': self.infer_flags + 'infer_flags': self.infer_flags, } def config(self): @@ -244,7 +240,6 @@ class TestSliceOpTensorList2(TestSliceOpTensorList): class TestSliceOpFp16TensorList(TestSliceOpTensorList): - def init_dtype(self): self.dtype = np.float16 @@ -255,7 +250,6 @@ class TestSliceOpFp16TensorList(TestSliceOpTensorList): class TestSliceNet(unittest.TestCase): - def _test(self, run_npu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -272,9 +266,9 @@ class TestSliceNet(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=data_shape, dtype='float32') b = paddle.static.data(name="b", shape=data_shape, dtype='float32') - label = paddle.static.data(name="label", - shape=[batch_size, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[batch_size, 1], dtype='int64' + ) sum = paddle.add(a, b) z = paddle.slice(sum, axes=[0, 1], starts=[0, 0], ends=[33, 2]) @@ -282,7 +276,8 @@ class TestSliceNet(unittest.TestCase): prediction = paddle.static.nn.fc(z, size=2, activation='softmax') cost = paddle.fluid.layers.softmax_with_cross_entropy( - logits=prediction, label=label) + logits=prediction, label=label + ) loss = paddle.mean(cost) sgd = paddle.optimizer.SGD(learning_rate=0.01) sgd.minimize(loss) @@ -297,16 +292,17 @@ class TestSliceNet(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(EPOCH): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "b": b_np, "label": label_np}, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res @@ -319,7 +315,6 @@ class TestSliceNet(unittest.TestCase): class TestSliceOpDecsDim(OpTest): - def setUp(self): self.op_type = "slice" self.set_npu() @@ -365,21 +360,19 @@ class TestSliceOpDecsDim(OpTest): def test_check_grad_normal(self): if self.dtype == np.float16: - self.check_grad_with_place(self.place, ['Input'], - 'Out', - max_relative_error=0.5) + self.check_grad_with_place( + self.place, ['Input'], 'Out', max_relative_error=0.5 + ) else: self.check_grad_with_place(self.place, ['Input'], 'Out') class TestSliceOpDecsDimFp16(TestSliceOpDecsDim): - def init_dtype(self): self.dtype = np.float16 class TestSliceOpDecsDim2(TestSliceOpDecsDim): - def config(self): self.input = np.random.random([3, 4, 5, 6]).astype(self.dtype) self.starts = [1, 0, 2] @@ -391,7 +384,6 @@ class TestSliceOpDecsDim2(TestSliceOpDecsDim): class TestSliceOpDecsDim3(TestSliceOpDecsDim): - def config(self): self.input = np.random.random([3, 4, 5, 6]).astype(self.dtype) self.starts = [-1, 0, 2] @@ -403,7 +395,6 @@ class TestSliceOpDecsDim3(TestSliceOpDecsDim): class TestSliceOpDecsDim4(TestSliceOpDecsDim): - def config(self): self.input = np.random.random([3, 4, 5, 7]).astype(self.dtype) self.starts = [0, 1, 2, 3] @@ -415,7 +406,6 @@ class TestSliceOpDecsDim4(TestSliceOpDecsDim): class TestSliceOpDecsDim5(TestSliceOpDecsDim): - def config(self): self.input = np.random.random([3, 4, 5, 6]).astype(self.dtype) self.starts = [-1] @@ -427,7 +417,6 @@ class TestSliceOpDecsDim5(TestSliceOpDecsDim): class TestSliceOpDecsDim6(TestSliceOpDecsDim): - def config(self): self.input = np.random.random([3, 4, 5, 6]).astype(self.dtype) self.starts = [0, 1, 2, 3] @@ -439,11 +428,10 @@ class TestSliceOpDecsDim6(TestSliceOpDecsDim): class TestSliceOpDecsDimStartsTensor(TestSliceOpDecsDim): - def set_inputs(self): self.inputs = { 'Input': self.input, - "StartsTensor": np.array(self.starts, dtype='int32') + "StartsTensor": np.array(self.starts, dtype='int32'), } def set_attrs(self): @@ -466,18 +454,16 @@ class TestSliceOpDecsDimStartsTensor(TestSliceOpDecsDim): class TestSliceOpDecsDimStartsTensorFP16(TestSliceOpDecsDimStartsTensor): - def init_dtype(self): self.dtype = np.float16 class TestSliceOpDecsDimStartsTensorStartsAndEndsTensor(TestSliceOpDecsDim): - def set_inputs(self): self.inputs = { 'Input': self.input, "StartsTensor": np.array(self.starts, dtype='int64'), - "EndsTensor": np.array(self.ends, dtype='int32') + "EndsTensor": np.array(self.ends, dtype='int32'), } def set_attrs(self): @@ -500,19 +486,19 @@ class TestSliceOpDecsDimStartsTensorStartsAndEndsTensor(TestSliceOpDecsDim): class TestSliceOpDecsDimStartsTensorStartsAndEndsTensorFP16( - TestSliceOpDecsDimStartsTensorStartsAndEndsTensor): - + TestSliceOpDecsDimStartsTensorStartsAndEndsTensor +): def init_dtype(self): self.dtype = np.float16 class TestSliceOpDecsDimStartsListTensor(TestSliceOpDecsDim): - def set_inputs(self): starts_tensor = [] for index, ele in enumerate(self.starts): - starts_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + starts_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor} @@ -538,7 +524,6 @@ class TestSliceOpDecsDimStartsListTensor(TestSliceOpDecsDim): class TestSliceOpDecsDimStartsListTensor2(TestSliceOpDecsDimStartsListTensor): - def config(self): self.input = np.random.random([3, 4, 5, 6]).astype(self.dtype) self.starts = [-1] @@ -551,15 +536,14 @@ class TestSliceOpDecsDimStartsListTensor2(TestSliceOpDecsDimStartsListTensor): self.starts_infer = [-1] -class TestSliceOpDecsDimStartsListTensorFP16(TestSliceOpDecsDimStartsListTensor - ): - +class TestSliceOpDecsDimStartsListTensorFP16( + TestSliceOpDecsDimStartsListTensor +): def init_dtype(self): self.dtype = np.float16 class TestSliceOpInt64(OpTest): - def set_npu(self): self.__class__.use_npu = True self.place = paddle.NPUPlace(0) @@ -575,12 +559,13 @@ class TestSliceOpInt64(OpTest): 'axes': self.axes, 'starts': self.starts, 'ends': self.ends, - 'infer_flags': self.infer_flags + 'infer_flags': self.infer_flags, } def config(self): - self.input = np.random.randint(100, - size=(3, 4, 5, 6)).astype(self.dtype) + self.input = np.random.randint(100, size=(3, 4, 5, 6)).astype( + self.dtype + ) self.starts = [1, 0, 2] self.ends = [3, 3, 4] self.axes = [0, 1, 2] @@ -595,7 +580,6 @@ class TestSliceOpInt64(OpTest): class TestSliceOpTensorInt64(TestSliceOpInt64): - def setUp(self): self.op_type = "slice" self.set_npu() @@ -604,19 +588,20 @@ class TestSliceOpTensorInt64(TestSliceOpInt64): self.inputs = { 'Input': self.input, 'StartsTensor': self.starts, - 'EndsTensor': self.ends + 'EndsTensor': self.ends, } self.outputs = {'Out': self.out} self.attrs = { 'axes': self.axes, 'starts': [-1, -1, -1], 'ends': [-1, -1, -1], - 'infer_flags': self.infer_flags + 'infer_flags': self.infer_flags, } def config(self): - self.input = np.random.randint(100, - size=(3, 4, 5, 6)).astype(self.dtype) + self.input = np.random.randint(100, size=(3, 4, 5, 6)).astype( + self.dtype + ) self.starts = np.array([1, 0, 2]).astype('int32') self.ends = np.array([3, 3, 4]).astype('int32') self.axes = [0, 1, 2] diff --git a/python/paddle/fluid/tests/unittests/npu/test_smooth_l1_loss_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_smooth_l1_loss_op_npu.py index f133e7865cc3f677ae4b3c373d096b0d93bc29a8..54e625a9f4a91905b427bc486ca80e108a072db1 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_smooth_l1_loss_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_smooth_l1_loss_op_npu.py @@ -33,7 +33,6 @@ def smooth_l1_loss_forward(val, sigma2): class TestSmoothL1LossOp1(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -41,7 +40,7 @@ class TestSmoothL1LossOp1(OpTest): dims = (5, 20) self.inputs = { 'X': np.random.random(dims).astype("float32"), - 'Y': np.random.random(dims).astype("float32") + 'Y': np.random.random(dims).astype("float32"), } sigma = 3.0 self.attrs = {'sigma': sigma} @@ -51,7 +50,7 @@ class TestSmoothL1LossOp1(OpTest): loss = loss.reshape((dims[0], 1)) self.outputs = { 'Diff': diff.astype('float32'), - 'Out': loss.astype('float32') + 'Out': loss.astype('float32'), } def set_npu(self): @@ -61,25 +60,30 @@ class TestSmoothL1LossOp1(OpTest): self.check_output_with_place(self.place) def test_check_grad_normal(self): - self.check_grad_with_place(self.place, ['X', 'Y'], - 'Out', - max_relative_error=0.02) + self.check_grad_with_place( + self.place, ['X', 'Y'], 'Out', max_relative_error=0.02 + ) def test_check_grad_ingore_x(self): - self.check_grad_with_place(self.place, ['Y'], - 'Out', - max_relative_error=0.03, - no_grad_set=set("X")) + self.check_grad_with_place( + self.place, + ['Y'], + 'Out', + max_relative_error=0.03, + no_grad_set=set("X"), + ) def test_check_grad_ingore_y(self): - self.check_grad_with_place(self.place, ['X'], - 'Out', - max_relative_error=0.03, - no_grad_set=set('Y')) + self.check_grad_with_place( + self.place, + ['X'], + 'Out', + max_relative_error=0.03, + no_grad_set=set('Y'), + ) class TestSmoothL1LossOp2(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -89,7 +93,7 @@ class TestSmoothL1LossOp2(OpTest): 'X': np.random.random(dims).astype("float32"), 'Y': np.random.random(dims).astype("float32"), 'InsideWeight': np.random.random(dims).astype("float32"), - 'OutsideWeight': np.random.random(dims).astype("float32") + 'OutsideWeight': np.random.random(dims).astype("float32"), } sigma = 3.0 self.attrs = {'sigma': sigma} @@ -101,7 +105,7 @@ class TestSmoothL1LossOp2(OpTest): loss = loss.sum(1).reshape((dims[0], 1)) self.outputs = { 'Diff': diff.astype('float32'), - 'Out': loss.astype('float32') + 'Out': loss.astype('float32'), } def set_npu(self): @@ -111,34 +115,39 @@ class TestSmoothL1LossOp2(OpTest): self.check_output_with_place(self.place) def test_check_grad_normal(self): - self.check_grad_with_place(self.place, ['X', 'Y'], - 'Out', - max_relative_error=0.03) + self.check_grad_with_place( + self.place, ['X', 'Y'], 'Out', max_relative_error=0.03 + ) def test_check_grad_ingore_x(self): - self.check_grad_with_place(self.place, ['Y'], - 'Out', - max_relative_error=0.03, - no_grad_set=set( - ['X', 'InsideWeight', 'OutsideWeight'])) + self.check_grad_with_place( + self.place, + ['Y'], + 'Out', + max_relative_error=0.03, + no_grad_set=set(['X', 'InsideWeight', 'OutsideWeight']), + ) def test_check_grad_ingore_y(self): - self.check_grad_with_place(self.place, ['X'], - 'Out', - max_relative_error=0.03, - no_grad_set=set( - ['Y', 'InsideWeight', 'OutsideWeight'])) + self.check_grad_with_place( + self.place, + ['X'], + 'Out', + max_relative_error=0.03, + no_grad_set=set(['Y', 'InsideWeight', 'OutsideWeight']), + ) class TestSmoothL1LossOpError(unittest.TestCase): - def test_errors(self): with fluid.program_guard(fluid.Program(), fluid.Program()): # The input type of accuracy_op must be Variable. - x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - fluid.NPUPlace(0)) - y1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - fluid.NPUPlace(0)) + x1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.NPUPlace(0) + ) + y1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.NPUPlace(0) + ) self.assertRaises(TypeError, fluid.layers.smooth_l1, x1, y1) # The input dtype of accuracy_op must be float32 or float64. x2 = fluid.layers.data(name='x2', shape=[4], dtype="int32") diff --git a/python/paddle/fluid/tests/unittests/npu/test_softmax_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_softmax_op_npu.py index 1c145c497214670221ad2b15c4c8f019652fad37..41ccda3dba7622a0ec8494c921a5cbc14aaf9721 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_softmax_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_softmax_op_npu.py @@ -27,7 +27,6 @@ SEED = 2021 class TestSoftmax(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -53,7 +52,6 @@ class TestSoftmax(OpTest): class TestSoftmaxNet(unittest.TestCase): - def _test(self, run_npu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -68,9 +66,9 @@ class TestSoftmaxNet(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=[4, 32], dtype='float32') b = paddle.static.data(name="b", shape=[4, 32], dtype='float32') - label = paddle.static.data(name="label", - shape=[4, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[4, 1], dtype='int64' + ) c = paddle.multiply(a, b) d = paddle.sqrt(c) @@ -99,16 +97,17 @@ class TestSoftmaxNet(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "b": b_np, "label": label_np}, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res diff --git a/python/paddle/fluid/tests/unittests/npu/test_softmax_with_cross_entropy_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_softmax_with_cross_entropy_op_npu.py index 79654bf170eddecf64be838b49bc1ff7b3e5a9f0..f19e892f9a37e91ea244eb849260d7cbb8329d85 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_softmax_with_cross_entropy_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_softmax_with_cross_entropy_op_npu.py @@ -28,7 +28,6 @@ SEED = 2021 class TestSoftmaxWithCrossEntropyOp(OpTest): - def set_npu(self): self.__class__.use_npu = True @@ -51,8 +50,10 @@ class TestSoftmaxWithCrossEntropyOp(OpTest): self.initParams() logits = getattr( - self, "logits", - np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype)) + self, + "logits", + np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype), + ) softmax = np.apply_along_axis(stable_softmax, self.axis, logits) if self.soft_label: @@ -63,8 +64,9 @@ class TestSoftmaxWithCrossEntropyOp(OpTest): self.shape[self.axis] = 1 labels = np.random.randint(0, axis_dim, self.shape, dtype="int64") - loss = cross_entropy(softmax, labels, self.soft_label, self.axis, - self.ignore_index) + loss = cross_entropy( + softmax, labels, self.soft_label, self.axis, self.ignore_index + ) one_hot_label = np.eye(axis_dim)[labels.reshape(-1)] @@ -72,7 +74,7 @@ class TestSoftmaxWithCrossEntropyOp(OpTest): self.outputs = { "Backprop": (softmax - one_hot_label).astype(self.dtype), "Softmax": softmax.astype(self.dtype), - "Loss": loss.astype(self.dtype) + "Loss": loss.astype(self.dtype), } self.attrs = { "numeric_stable_mode": self.numeric_stable_mode, @@ -88,14 +90,16 @@ class TestSoftmaxWithCrossEntropyOp(OpTest): def test_check_grad(self): # fp32 has low precision, cpu and npu both need to relax the max_relative_error if using fp32 - self.check_grad_with_place(self.place, ['Logits'], - 'Loss', - numeric_grad_delta=0.001, - max_relative_error=0.5) + self.check_grad_with_place( + self.place, + ['Logits'], + 'Loss', + numeric_grad_delta=0.001, + max_relative_error=0.5, + ) class TestPowNet(unittest.TestCase): - def _test(self, run_npu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -110,9 +114,9 @@ class TestPowNet(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=[32, 32], dtype='float32') b = paddle.static.data(name="b", shape=[32, 32], dtype='float32') - label = paddle.static.data(name="label", - shape=[32, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[32, 1], dtype='int64' + ) sum = paddle.add(a, b) z = paddle.pow(sum, 2.0) @@ -136,16 +140,17 @@ class TestPowNet(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "b": b_np, "label": label_np}, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res diff --git a/python/paddle/fluid/tests/unittests/npu/test_split_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_split_op_npu.py index 7550735c4969620b865f6aeefdeda73961c718a4..d6a6cf9d573232aed6f3e5a8e657bb666fd4c875 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_split_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_split_op_npu.py @@ -26,10 +26,10 @@ paddle.enable_static() SEED = 2021 -@unittest.skipIf(not paddle.is_compiled_with_npu(), - "core is not compiled with NPU") +@unittest.skipIf( + not paddle.is_compiled_with_npu(), "core is not compiled with NPU" +) class TestCase1(OpTest): - def setUp(self): self.set_npu() self.set_example() @@ -37,9 +37,9 @@ class TestCase1(OpTest): self.place = paddle.NPUPlace(0) ipt = self.x.astype(self.dtype) axis = self.axis if isinstance(self.axis, int) else int(self.axis[0]) - tmp_outs = np.split(ipt, - axis=axis, - indices_or_sections=self.num_or_sections) + tmp_outs = np.split( + ipt, axis=axis, indices_or_sections=self.num_or_sections + ) tmp_outs = [o.astype(self.dtype) for o in tmp_outs] self.outputs = {'Out': []} self.outs = [] @@ -69,7 +69,6 @@ class TestCase1(OpTest): class TestCase2(TestCase1): - def set_example(self): self.dtype = "float32" self.x = np.random.random((20, 4, 50)) @@ -78,7 +77,6 @@ class TestCase2(TestCase1): class TestCase4(TestCase1): - def set_example(self): self.dtype = "float16" self.x = np.random.random((4, 50, 20)) @@ -88,7 +86,6 @@ class TestCase4(TestCase1): # Test Sections class TestCase5(TestCase1): - def set_example(self): super().set_example() self.x = np.random.random((2, 10, 4)) @@ -101,7 +98,6 @@ class TestCase5(TestCase1): class API_TestSplit(unittest.TestCase): - def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): data = fluid.layers.data('data', shape=[-1, 10], dtype='float32') @@ -110,13 +106,12 @@ class API_TestSplit(unittest.TestCase): exe = fluid.Executor(place) input1 = np.random.random([1, 10]).astype('float32') r0, r1 = exe.run(feed={"data": input1}, fetch_list=[x0, x1]) - ex_x0, ex_x1 = np.split(input1, (3, ), axis=1) + ex_x0, ex_x1 = np.split(input1, (3,), axis=1) np.testing.assert_allclose(ex_x0, r0) np.testing.assert_allclose(ex_x1, r1) class API_TestSplit2(unittest.TestCase): - def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): data = fluid.layers.data('data', shape=[-1, 10], dtype='float32') @@ -131,7 +126,6 @@ class API_TestSplit2(unittest.TestCase): class API_TestDygraphSplit(unittest.TestCase): - def test_out1(self): with fluid.dygraph.guard(paddle.NPUPlace(0)): input_1 = np.random.random([4, 6, 6]).astype("int32") diff --git a/python/paddle/fluid/tests/unittests/npu/test_sqrt_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_sqrt_op_npu.py index 5aca464aeba9a7a2bb327801a0e4586724fb85c5..d28f67a51e3e5595999eeb977c6bf6ee53d84dd9 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_sqrt_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_sqrt_op_npu.py @@ -26,7 +26,6 @@ SEED = 2021 class TestSqrt(OpTest): - def setUp(self): self.set_npu() self.op_type = "sqrt" @@ -58,7 +57,6 @@ class TestSqrt(OpTest): class TestSqrtFp16(OpTest): - def setUp(self): self.set_npu() self.op_type = "sqrt" @@ -85,7 +83,6 @@ class TestSqrtFp16(OpTest): class TestSqrtNet(unittest.TestCase): - def _test(self, run_npu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -100,9 +97,9 @@ class TestSqrtNet(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=[32, 32], dtype='float32') b = paddle.static.data(name="b", shape=[32, 32], dtype='float32') - label = paddle.static.data(name="label", - shape=[32, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[32, 1], dtype='int64' + ) c = paddle.multiply(a, b) d = paddle.sqrt(c) @@ -126,16 +123,17 @@ class TestSqrtNet(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "b": b_np, "label": label_np}, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res diff --git a/python/paddle/fluid/tests/unittests/npu/test_square_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_square_op_npu.py index 86849cdb9c2824af94b0074915eaf7702adc2414..f6dbefee32a6f69cdab69806aecad284df5537b6 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_square_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_square_op_npu.py @@ -26,7 +26,6 @@ SEED = 2021 class TestSquare(OpTest): - def setUp(self): self.set_npu() self.op_type = "square" @@ -55,7 +54,6 @@ class TestSquare(OpTest): class TestSquareFp16(OpTest): - def setUp(self): self.set_npu() self.op_type = "square" @@ -82,7 +80,6 @@ class TestSquareFp16(OpTest): class TestSquareNet(unittest.TestCase): - def _test(self, run_npu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -97,9 +94,9 @@ class TestSquareNet(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=[32, 32], dtype='float32') b = paddle.static.data(name="b", shape=[32, 32], dtype='float32') - label = paddle.static.data(name="label", - shape=[32, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[32, 1], dtype='int64' + ) c = paddle.multiply(a, b) d = paddle.square(c) @@ -123,16 +120,17 @@ class TestSquareNet(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "b": b_np, "label": label_np}, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res diff --git a/python/paddle/fluid/tests/unittests/npu/test_squared_l2_norm_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_squared_l2_norm_op_npu.py index 6c4f66958acde1c924868c1bd63e06c7dfcb4313..ac1e57f29599337febe54397d8d35006846df5aa 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_squared_l2_norm_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_squared_l2_norm_op_npu.py @@ -25,8 +25,7 @@ paddle.enable_static() class TestL2LossOp(OpTest): - """Test npu squared_l2_norm - """ + """Test npu squared_l2_norm""" def setUp(self): self.set_npu() @@ -46,9 +45,9 @@ class TestL2LossOp(OpTest): self.check_output_with_place(place=self.place) def test_check_grad(self): - self.check_grad_with_place(self.place, ['X'], - 'Out', - max_relative_error=self.max_relative_error) + self.check_grad_with_place( + self.place, ['X'], 'Out', max_relative_error=self.max_relative_error + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/npu/test_squeeze_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_squeeze_op_npu.py index 372c13fb90083570e1d44fbba0ee7328c0175b07..35940f62b833386903df24e05cd1b0b70b2e9f2d 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_squeeze_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_squeeze_op_npu.py @@ -29,7 +29,6 @@ paddle.enable_static() class TestSqueezeOp(OpTest): - def setUp(self): self.set_npu() self.op_type = "squeeze" @@ -62,7 +61,6 @@ class TestSqueezeOp(OpTest): class TestSqueezeOp1(TestSqueezeOp): - def init_test_case(self): self.ori_shape = (1, 3, 1, 40) self.axes = (0, -2) @@ -73,7 +71,6 @@ class TestSqueezeOp1(TestSqueezeOp): class TestSqueezeOp2(TestSqueezeOp): - def init_test_case(self): self.ori_shape = (1, 20, 1, 5) self.axes = () @@ -84,7 +81,6 @@ class TestSqueezeOp2(TestSqueezeOp): class TestSqueezeOp3(TestSqueezeOp): - def init_test_case(self): self.ori_shape = (6, 1, 5, 1, 4, 1) self.axes = (1, -1) @@ -95,7 +91,6 @@ class TestSqueezeOp3(TestSqueezeOp): class TestSqueezeOp4(TestSqueezeOp): - def init_test_case(self): self.ori_shape = (6, 1, 5, 1, 4, 1) self.axes = (1, 2) @@ -103,13 +98,13 @@ class TestSqueezeOp4(TestSqueezeOp): class TestSqueezeOpError(unittest.TestCase): - def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): # The input type of softmax_op must be Variable. - x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - paddle.NPUPlace(0)) + x1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], paddle.NPUPlace(0) + ) self.assertRaises(TypeError, paddle.squeeze, x1) # The input axes of squeeze must be list. x2 = paddle.static.data(name='x2', shape=[4], dtype="int32") @@ -120,7 +115,6 @@ class TestSqueezeOpError(unittest.TestCase): class API_TestSqueeze(unittest.TestCase): - def setUp(self): self.executed_api() @@ -129,29 +123,29 @@ class API_TestSqueeze(unittest.TestCase): def test_out(self): paddle.enable_static() - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): - data1 = paddle.static.data('data1', - shape=[-1, 1, 10], - dtype='float64') + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + data1 = paddle.static.data( + 'data1', shape=[-1, 1, 10], dtype='float64' + ) result_squeeze = self.squeeze(data1, axis=[1]) place = paddle.NPUPlace(0) exe = paddle.static.Executor(place) input1 = np.random.random([5, 1, 10]).astype('float64') - result, = exe.run(feed={"data1": input1}, - fetch_list=[result_squeeze]) + (result,) = exe.run( + feed={"data1": input1}, fetch_list=[result_squeeze] + ) expected_result = np.squeeze(input1, axis=1) np.testing.assert_allclose(expected_result, result) class API_TestStaticSqueeze_(API_TestSqueeze): - def executed_api(self): self.squeeze = paddle.squeeze_ class API_TestDygraphSqueeze(unittest.TestCase): - def setUp(self): self.executed_api() @@ -210,14 +204,12 @@ class API_TestDygraphSqueeze(unittest.TestCase): class API_TestDygraphSqueezeInplace(API_TestDygraphSqueeze): - def executed_api(self): self.squeeze = paddle.squeeze_ # Correct: General. class TestSqueeze2Op(OpTest): - def setUp(self): self.set_npu() self.op_type = "squeeze2" @@ -226,15 +218,16 @@ class TestSqueeze2Op(OpTest): self.init_attrs() self.outputs = { "Out": self.inputs["X"].reshape(self.new_shape), - "XShape": np.random.random(self.ori_shape).astype("float32") + "XShape": np.random.random(self.ori_shape).astype("float32"), } def set_npu(self): self.__class__.use_npu = True def test_check_output(self): - self.check_output_with_place(paddle.NPUPlace(0), - no_check_set=['XShape']) + self.check_output_with_place( + paddle.NPUPlace(0), no_check_set=['XShape'] + ) def test_check_grad(self): self.check_grad_with_place(paddle.NPUPlace(0), ["X"], "Out") @@ -250,7 +243,6 @@ class TestSqueeze2Op(OpTest): # Correct: There is mins axis. class TestSqueeze2Op1(TestSqueeze2Op): - def init_test_case(self): self.ori_shape = (1, 20, 1, 5) self.axes = (0, -2) @@ -259,7 +251,6 @@ class TestSqueeze2Op1(TestSqueeze2Op): # Correct: No axes input. class TestSqueeze2Op2(TestSqueeze2Op): - def init_test_case(self): self.ori_shape = (1, 20, 1, 5) self.axes = () @@ -268,7 +259,6 @@ class TestSqueeze2Op2(TestSqueeze2Op): # Correct: Just part of axes be squeezed. class TestSqueeze2Op3(TestSqueeze2Op): - def init_test_case(self): self.ori_shape = (6, 1, 5, 1, 4, 1) self.axes = (1, -1) diff --git a/python/paddle/fluid/tests/unittests/npu/test_stack_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_stack_op_npu.py index 5f768c78af76491081416b929421ea64ef99f47e..8a9e8879b8226f00637a7774d92c3cefd190fb9e 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_stack_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_stack_op_npu.py @@ -26,7 +26,6 @@ paddle.enable_static() class TestStackOpBase(OpTest): - def initDefaultParameters(self): self.num_inputs = 4 self.input_dim = (5, 6, 7) @@ -50,7 +49,8 @@ class TestStackOpBase(OpTest): self.x = [] for i in range(self.num_inputs): self.x.append( - np.random.random(size=self.input_dim).astype(self.dtype)) + np.random.random(size=self.input_dim).astype(self.dtype) + ) tmp = [] x_names = self.get_x_names() @@ -78,49 +78,41 @@ class TestStackOpBase(OpTest): class TestStackOp1(TestStackOpBase): - def initParameters(self): self.num_inputs = 16 class TestStackOp2(TestStackOpBase): - def initParameters(self): self.num_inputs = 20 class TestStackOp3(TestStackOpBase): - def initParameters(self): self.axis = -1 class TestStackOp4(TestStackOpBase): - def initParameters(self): self.axis = -4 class TestStackOp5(TestStackOpBase): - def initParameters(self): self.axis = 1 class TestStackOp6(TestStackOpBase): - def initParameters(self): self.axis = 3 class TestStackOpINT32(TestStackOpBase): - def init_dtype(self): self.dtype = np.int32 class TestStackOpINT64(TestStackOpBase): - def init_dtype(self): self.dtype = np.int64 @@ -135,8 +127,11 @@ class TestStackAPIWithLoDTensorArray(unittest.TestCase): self.iter_num = 3 self.input_shape = [2, 3] self.x = np.random.random(self.input_shape).astype("float32") - self.place = paddle.NPUPlace(0) \ - if paddle.is_compiled_with_npu() else paddle.CPUPlace() + self.place = ( + paddle.NPUPlace(0) + if paddle.is_compiled_with_npu() + else paddle.CPUPlace() + ) self.set_program() def set_program(self): @@ -156,7 +151,8 @@ class TestStackAPIWithLoDTensorArray(unittest.TestCase): exe = fluid.Executor(self.place) res = exe.run(self.program, fetch_list=self.out_var) np.testing.assert_allclose( - res[0], np.stack([self.x] * self.iter_num, axis=self.axis)) + res[0], np.stack([self.x] * self.iter_num, axis=self.axis) + ) class TestTensorStackAPIWithLoDTensorArray(unittest.TestCase): @@ -169,8 +165,11 @@ class TestTensorStackAPIWithLoDTensorArray(unittest.TestCase): self.iter_num = 3 self.input_shape = [2, 3] self.x = np.random.random(self.input_shape).astype("float32") - self.place = paddle.NPUPlace(0) \ - if paddle.is_compiled_with_npu() else paddle.CPUPlace() + self.place = ( + paddle.NPUPlace(0) + if paddle.is_compiled_with_npu() + else paddle.CPUPlace() + ) self.set_program() def set_program(self): @@ -190,11 +189,11 @@ class TestTensorStackAPIWithLoDTensorArray(unittest.TestCase): exe = fluid.Executor(self.place) res = exe.run(self.program, fetch_list=self.out_var) np.testing.assert_allclose( - res[0], np.stack([self.x] * self.iter_num, axis=self.axis)) + res[0], np.stack([self.x] * self.iter_num, axis=self.axis) + ) class API_test(unittest.TestCase): - def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): data1 = fluid.layers.data('data1', shape=[1, 2], dtype='float32') @@ -206,12 +205,10 @@ class API_test(unittest.TestCase): input1 = np.random.random([1, 2]).astype('float32') input2 = np.random.random([1, 2]).astype('float32') input3 = np.random.random([1, 2]).astype('float32') - result, = exe.run(feed={ - "data1": input1, - "data2": input2, - "data3": input3 - }, - fetch_list=[result_stack]) + (result,) = exe.run( + feed={"data1": input1, "data2": input2, "data3": input3}, + fetch_list=[result_stack], + ) expected_result = np.stack([input1, input2, input3], axis=0) np.testing.assert_allclose(expected_result, result) @@ -222,7 +219,6 @@ class API_test(unittest.TestCase): class API_DygraphTest(unittest.TestCase): - def test_out(self): data1 = np.array([[1.0, 2.0]]) data2 = np.array([[3.0, 4.0]]) diff --git a/python/paddle/fluid/tests/unittests/npu/test_strided_slice_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_strided_slice_op_npu.py index bf32653455c22c111478148c8555837b95510b22..6dcd8fcdae2ba37f32e86085ecdbc2413674f900 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_strided_slice_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_strided_slice_op_npu.py @@ -40,33 +40,52 @@ def strided_slice_native_forward(input, axes, starts, ends, strides): stride[axes[i]] = strides[i] result = { - 1: lambda input, start, end, stride: input[start[0]:end[0]:stride[0]], - 2: lambda input, start, end, stride: input[start[0]:end[0]:stride[0], \ - start[1]:end[1]:stride[1]], - 3: lambda input, start, end, stride: input[start[0]:end[0]:stride[0], \ - start[1]:end[1]:stride[1], start[2]:end[2]:stride[2]], - 4: lambda input, start, end, stride: input[start[0]:end[0]:stride[0], \ - start[1]:end[1]:stride[1], start[2]:end[2]:stride[2], start[3]:end[3]:stride[3]], - 5: lambda input, start, end, stride: input[start[0]:end[0]:stride[0], \ - start[1]:end[1]:stride[1], start[2]:end[2]:stride[2], start[3]:end[3]:stride[3], start[4]:end[4]:stride[4]], - 6: lambda input, start, end, stride: input[start[0]:end[0]:stride[0], \ - start[1]:end[1]:stride[1], start[2]:end[2]:stride[2], start[3]:end[3]:stride[3], \ - start[4]:end[4]:stride[4], start[5]:end[5]:stride[5]] + 1: lambda input, start, end, stride: input[ + start[0] : end[0] : stride[0] + ], + 2: lambda input, start, end, stride: input[ + start[0] : end[0] : stride[0], start[1] : end[1] : stride[1] + ], + 3: lambda input, start, end, stride: input[ + start[0] : end[0] : stride[0], + start[1] : end[1] : stride[1], + start[2] : end[2] : stride[2], + ], + 4: lambda input, start, end, stride: input[ + start[0] : end[0] : stride[0], + start[1] : end[1] : stride[1], + start[2] : end[2] : stride[2], + start[3] : end[3] : stride[3], + ], + 5: lambda input, start, end, stride: input[ + start[0] : end[0] : stride[0], + start[1] : end[1] : stride[1], + start[2] : end[2] : stride[2], + start[3] : end[3] : stride[3], + start[4] : end[4] : stride[4], + ], + 6: lambda input, start, end, stride: input[ + start[0] : end[0] : stride[0], + start[1] : end[1] : stride[1], + start[2] : end[2] : stride[2], + start[3] : end[3] : stride[3], + start[4] : end[4] : stride[4], + start[5] : end[5] : stride[5], + ], }[dim](input, start, end, stride) return result class TestStridedSliceOp(OpTest): - def setUp(self): self.initTestCase() self.set_npu() self.place = paddle.NPUPlace(0) self.op_type = 'strided_slice' - self.output = strided_slice_native_forward(self.input, self.axes, - self.starts, self.ends, - self.strides) + self.output = strided_slice_native_forward( + self.input, self.axes, self.starts, self.ends, self.strides + ) self.inputs = {'Input': self.input} self.outputs = {'Out': self.output} @@ -75,7 +94,7 @@ class TestStridedSliceOp(OpTest): 'starts': self.starts, 'ends': self.ends, 'strides': self.strides, - 'infer_flags': self.infer_flags + 'infer_flags': self.infer_flags, } def set_npu(self): @@ -97,7 +116,6 @@ class TestStridedSliceOp(OpTest): class TestStridedSliceOp1(TestStridedSliceOp): - def initTestCase(self): self.input = np.random.rand(100) self.axes = [0] @@ -108,7 +126,6 @@ class TestStridedSliceOp1(TestStridedSliceOp): class TestStridedSliceOp2(TestStridedSliceOp): - def initTestCase(self): self.input = np.random.rand(100) self.axes = [0] @@ -119,7 +136,6 @@ class TestStridedSliceOp2(TestStridedSliceOp): class TestStridedSliceOp3(TestStridedSliceOp): - def initTestCase(self): self.input = np.random.rand(100) self.axes = [0] @@ -130,7 +146,6 @@ class TestStridedSliceOp3(TestStridedSliceOp): class TestStridedSliceOp4(TestStridedSliceOp): - def initTestCase(self): self.input = np.random.rand(3, 4, 10) self.axes = [0, 1, 2] @@ -141,7 +156,6 @@ class TestStridedSliceOp4(TestStridedSliceOp): class TestStridedSliceOp5(TestStridedSliceOp): - def initTestCase(self): self.input = np.random.rand(5, 5, 5) self.axes = [0, 1, 2] @@ -152,7 +166,6 @@ class TestStridedSliceOp5(TestStridedSliceOp): class TestStridedSliceOp6(TestStridedSliceOp): - def initTestCase(self): self.input = np.random.rand(5, 5, 5) self.axes = [0, 1, 2] @@ -163,7 +176,6 @@ class TestStridedSliceOp6(TestStridedSliceOp): class TestStridedSliceOp7(TestStridedSliceOp): - def initTestCase(self): self.input = np.random.rand(5, 5, 5) self.axes = [0, 1, 2] @@ -174,7 +186,6 @@ class TestStridedSliceOp7(TestStridedSliceOp): class TestStridedSliceOp8(TestStridedSliceOp): - def initTestCase(self): self.input = np.random.rand(1, 100, 1) self.axes = [1] @@ -185,7 +196,6 @@ class TestStridedSliceOp8(TestStridedSliceOp): class TestStridedSliceOp9(TestStridedSliceOp): - def initTestCase(self): self.input = np.random.rand(1, 100, 1) self.axes = [1] @@ -196,7 +206,6 @@ class TestStridedSliceOp9(TestStridedSliceOp): class TestStridedSliceOp10(TestStridedSliceOp): - def initTestCase(self): self.input = np.random.rand(10, 10) self.axes = [0, 1] @@ -207,7 +216,6 @@ class TestStridedSliceOp10(TestStridedSliceOp): class TestStridedSliceOp11(TestStridedSliceOp): - def initTestCase(self): self.input = np.random.rand(3, 3, 3, 4) self.axes = [0, 1, 2, 3] @@ -218,7 +226,6 @@ class TestStridedSliceOp11(TestStridedSliceOp): class TestStridedSliceOp12(TestStridedSliceOp): - def initTestCase(self): self.input = np.random.rand(3, 3, 3, 4, 5) self.axes = [0, 1, 2, 3, 4] @@ -229,7 +236,6 @@ class TestStridedSliceOp12(TestStridedSliceOp): class TestStridedSliceOp13(TestStridedSliceOp): - def initTestCase(self): self.input = np.random.rand(3, 3, 3, 6, 7, 8) self.axes = [0, 1, 2, 3, 4, 5] @@ -240,13 +246,11 @@ class TestStridedSliceOp13(TestStridedSliceOp): class TestStridedSliceOpBool(TestStridedSliceOp): - def test_check_grad(self): pass class TestStridedSliceOpBool1D(TestStridedSliceOpBool): - def initTestCase(self): self.input = np.random.rand(100).astype("bool") self.axes = [0] @@ -257,7 +261,6 @@ class TestStridedSliceOpBool1D(TestStridedSliceOpBool): class TestStridedSliceOpBool2D(TestStridedSliceOpBool): - def initTestCase(self): self.input = np.random.rand(10, 10).astype("bool") self.axes = [0, 1] @@ -268,7 +271,6 @@ class TestStridedSliceOpBool2D(TestStridedSliceOpBool): class TestStridedSliceOpBool3D(TestStridedSliceOpBool): - def initTestCase(self): self.input = np.random.rand(3, 4, 10).astype("bool") self.axes = [0, 1, 2] @@ -279,7 +281,6 @@ class TestStridedSliceOpBool3D(TestStridedSliceOpBool): class TestStridedSliceOpBool4D(TestStridedSliceOpBool): - def initTestCase(self): self.input = np.random.rand(3, 3, 3, 4).astype("bool") self.axes = [0, 1, 2, 3] @@ -290,7 +291,6 @@ class TestStridedSliceOpBool4D(TestStridedSliceOpBool): class TestStridedSliceOpBool5D(TestStridedSliceOpBool): - def initTestCase(self): self.input = np.random.rand(3, 3, 3, 4, 5).astype("bool") self.axes = [0, 1, 2, 3, 4] @@ -301,7 +301,6 @@ class TestStridedSliceOpBool5D(TestStridedSliceOpBool): class TestStridedSliceOpBool6D(TestStridedSliceOpBool): - def initTestCase(self): self.input = np.random.rand(3, 3, 3, 6, 7, 8).astype("bool") self.axes = [0, 1, 2, 3, 4, 5] @@ -312,7 +311,6 @@ class TestStridedSliceOpBool6D(TestStridedSliceOpBool): class TestStridedSliceOp_starts_ListTensor(OpTest): - def setUp(self): self.place = paddle.NPUPlace(0) self.op_type = "strided_slice" @@ -321,8 +319,9 @@ class TestStridedSliceOp_starts_ListTensor(OpTest): starts_tensor = [] for index, ele in enumerate(self.starts): - starts_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + starts_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor} self.outputs = {'Out': self.output} @@ -331,7 +330,7 @@ class TestStridedSliceOp_starts_ListTensor(OpTest): 'starts': self.starts_infer, 'ends': self.ends, 'strides': self.strides, - 'infer_flags': self.infer_flags + 'infer_flags': self.infer_flags, } def set_npu(self): @@ -344,9 +343,9 @@ class TestStridedSliceOp_starts_ListTensor(OpTest): self.axes = [0, 1, 2] self.strides = [1, 1, 1] self.infer_flags = [1, -1, 1] - self.output = strided_slice_native_forward(self.input, self.axes, - self.starts, self.ends, - self.strides) + self.output = strided_slice_native_forward( + self.input, self.axes, self.starts, self.ends, self.strides + ) self.starts_infer = [1, 10, 2] @@ -358,7 +357,6 @@ class TestStridedSliceOp_starts_ListTensor(OpTest): class TestStridedSliceOp_ends_ListTensor(OpTest): - def setUp(self): self.place = paddle.NPUPlace(0) self.op_type = "strided_slice" @@ -367,8 +365,9 @@ class TestStridedSliceOp_ends_ListTensor(OpTest): ends_tensor = [] for index, ele in enumerate(self.ends): - ends_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + ends_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = {'Input': self.input, 'EndsTensorList': ends_tensor} self.outputs = {'Out': self.output} @@ -377,7 +376,7 @@ class TestStridedSliceOp_ends_ListTensor(OpTest): 'starts': self.starts, 'ends': self.ends_infer, 'strides': self.strides, - 'infer_flags': self.infer_flags + 'infer_flags': self.infer_flags, } def set_npu(self): @@ -390,9 +389,9 @@ class TestStridedSliceOp_ends_ListTensor(OpTest): self.axes = [0, 1, 2] self.strides = [1, 1, 2] self.infer_flags = [1, -1, 1] - self.output = strided_slice_native_forward(self.input, self.axes, - self.starts, self.ends, - self.strides) + self.output = strided_slice_native_forward( + self.input, self.axes, self.starts, self.ends, self.strides + ) self.ends_infer = [3, 1, 4] @@ -404,7 +403,6 @@ class TestStridedSliceOp_ends_ListTensor(OpTest): class TestStridedSliceOp_starts_Tensor(OpTest): - def setUp(self): self.place = paddle.NPUPlace(0) self.op_type = "strided_slice" @@ -413,7 +411,7 @@ class TestStridedSliceOp_starts_Tensor(OpTest): self.inputs = { 'Input': self.input, - "StartsTensor": np.array(self.starts, dtype="int32") + "StartsTensor": np.array(self.starts, dtype="int32"), } self.outputs = {'Out': self.output} self.attrs = { @@ -434,9 +432,9 @@ class TestStridedSliceOp_starts_Tensor(OpTest): self.axes = [0, 1, 2] self.strides = [1, 1, 1] self.infer_flags = [-1, -1, -1] - self.output = strided_slice_native_forward(self.input, self.axes, - self.starts, self.ends, - self.strides) + self.output = strided_slice_native_forward( + self.input, self.axes, self.starts, self.ends, self.strides + ) def test_check_output(self): self.check_output_with_place(self.place) @@ -446,7 +444,6 @@ class TestStridedSliceOp_starts_Tensor(OpTest): class TestStridedSliceOp_ends_Tensor(OpTest): - def setUp(self): self.place = paddle.NPUPlace(0) self.op_type = "strided_slice" @@ -455,7 +452,7 @@ class TestStridedSliceOp_ends_Tensor(OpTest): self.inputs = { 'Input': self.input, - "EndsTensor": np.array(self.ends, dtype="int32") + "EndsTensor": np.array(self.ends, dtype="int32"), } self.outputs = {'Out': self.output} self.attrs = { @@ -476,9 +473,9 @@ class TestStridedSliceOp_ends_Tensor(OpTest): self.axes = [0, 1, 2] self.strides = [1, 1, 1] self.infer_flags = [-1, -1, -1] - self.output = strided_slice_native_forward(self.input, self.axes, - self.starts, self.ends, - self.strides) + self.output = strided_slice_native_forward( + self.input, self.axes, self.starts, self.ends, self.strides + ) def test_check_output(self): self.check_output_with_place(self.place) @@ -488,7 +485,6 @@ class TestStridedSliceOp_ends_Tensor(OpTest): class TestStridedSliceOp_listTensor_Tensor(OpTest): - def setUp(self): self.place = paddle.NPUPlace(0) self.op_type = "strided_slice" @@ -497,13 +493,14 @@ class TestStridedSliceOp_listTensor_Tensor(OpTest): ends_tensor = [] for index, ele in enumerate(self.ends): - ends_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + ends_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = { 'Input': self.input, "StartsTensor": np.array(self.starts, dtype="int32"), - "EndsTensorList": ends_tensor + "EndsTensorList": ends_tensor, } self.outputs = {'Out': self.output} self.attrs = { @@ -524,9 +521,9 @@ class TestStridedSliceOp_listTensor_Tensor(OpTest): self.axes = [0, 1, 2] self.strides = [1, 1, 1] self.infer_flags = [-1, -1, -1] - self.output = strided_slice_native_forward(self.input, self.axes, - self.starts, self.ends, - self.strides) + self.output = strided_slice_native_forward( + self.input, self.axes, self.starts, self.ends, self.strides + ) def test_check_output(self): self.check_output_with_place(self.place) @@ -536,7 +533,6 @@ class TestStridedSliceOp_listTensor_Tensor(OpTest): class TestStridedSliceOp_strides_Tensor(OpTest): - def setUp(self): self.place = paddle.NPUPlace(0) self.op_type = "strided_slice" @@ -545,7 +541,7 @@ class TestStridedSliceOp_strides_Tensor(OpTest): self.inputs = { 'Input': self.input, - "StridesTensor": np.array(self.strides, dtype="int32") + "StridesTensor": np.array(self.strides, dtype="int32"), } self.outputs = {'Out': self.output} self.attrs = { @@ -566,9 +562,9 @@ class TestStridedSliceOp_strides_Tensor(OpTest): self.axes = [0, 1, 2] self.strides = [1, -1, 1] self.infer_flags = [-1, -1, -1] - self.output = strided_slice_native_forward(self.input, self.axes, - self.starts, self.ends, - self.strides) + self.output = strided_slice_native_forward( + self.input, self.axes, self.starts, self.ends, self.strides + ) def test_check_output(self): self.check_output_with_place(self.place) @@ -576,51 +572,54 @@ class TestStridedSliceOp_strides_Tensor(OpTest): def test_check_grad_normal(self): self.check_grad_with_place(self.place, ['Input'], 'Out') - # Test python API -class TestStridedSliceAPI(unittest.TestCase): + +class TestStridedSliceAPI(unittest.TestCase): def test_1(self): input = np.random.random([3, 4, 5, 6]).astype("float64") minus_1 = fluid.layers.fill_constant([1], "int32", -1) minus_3 = fluid.layers.fill_constant([1], "int32", -3) - starts = fluid.layers.data(name='starts', - shape=[3], - dtype='int32', - append_batch_size=False) - ends = fluid.layers.data(name='ends', - shape=[3], - dtype='int32', - append_batch_size=False) - strides = fluid.layers.data(name='strides', - shape=[3], - dtype='int32', - append_batch_size=False) - - x = fluid.layers.data(name="x", - shape=[3, 4, 5, 6], - append_batch_size=False, - dtype="float64") - out_1 = fluid.layers.strided_slice(x, - axes=[0, 1, 2], - starts=[-3, 0, 2], - ends=[3, 100, -1], - strides=[1, 1, 1]) - out_2 = fluid.layers.strided_slice(x, - axes=[0, 1, 3], - starts=[minus_3, 0, 2], - ends=[3, 100, -1], - strides=[1, 1, 1]) - out_3 = fluid.layers.strided_slice(x, - axes=[0, 1, 3], - starts=[minus_3, 0, 2], - ends=[3, 100, minus_1], - strides=[1, 1, 1]) - out_4 = fluid.layers.strided_slice(x, - axes=[0, 1, 2], - starts=starts, - ends=ends, - strides=strides) + starts = fluid.layers.data( + name='starts', shape=[3], dtype='int32', append_batch_size=False + ) + ends = fluid.layers.data( + name='ends', shape=[3], dtype='int32', append_batch_size=False + ) + strides = fluid.layers.data( + name='strides', shape=[3], dtype='int32', append_batch_size=False + ) + + x = fluid.layers.data( + name="x", + shape=[3, 4, 5, 6], + append_batch_size=False, + dtype="float64", + ) + out_1 = fluid.layers.strided_slice( + x, + axes=[0, 1, 2], + starts=[-3, 0, 2], + ends=[3, 100, -1], + strides=[1, 1, 1], + ) + out_2 = fluid.layers.strided_slice( + x, + axes=[0, 1, 3], + starts=[minus_3, 0, 2], + ends=[3, 100, -1], + strides=[1, 1, 1], + ) + out_3 = fluid.layers.strided_slice( + x, + axes=[0, 1, 3], + starts=[minus_3, 0, 2], + ends=[3, 100, minus_1], + strides=[1, 1, 1], + ) + out_4 = fluid.layers.strided_slice( + x, axes=[0, 1, 2], starts=starts, ends=ends, strides=strides + ) out_5 = x[-3:3, 0:100:2, -1:2:-1] out_6 = x[minus_3:3:1, 0:100:2, :, minus_1:2:minus_1] @@ -633,9 +632,10 @@ class TestStridedSliceAPI(unittest.TestCase): "x": input, 'starts': np.array([-3, 0, 2]).astype("int32"), 'ends': np.array([3, 2147483648, -1]).astype("int64"), - 'strides': np.array([1, 1, 1]).astype("int32") + 'strides': np.array([1, 1, 1]).astype("int32"), }, - fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7]) + fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7], + ) assert np.array_equal(res_1, input[-3:3, 0:100, 2:-1, :]) assert np.array_equal(res_2, input[-3:3, 0:100, :, 2:-1]) assert np.array_equal(res_3, input[-3:3, 0:100, :, 2:-1]) @@ -650,11 +650,9 @@ class TestStridedSliceAPI(unittest.TestCase): starts = [-3, 0, 2] ends = [3, 2, 4] strides_1 = [1, 1, 1] - sliced_1 = paddle.strided_slice(x, - axes=axes, - starts=starts, - ends=ends, - strides=strides_1) + sliced_1 = paddle.strided_slice( + x, axes=axes, starts=starts, ends=ends, strides=strides_1 + ) assert sliced_1.shape == (3, 2, 2, 2) diff --git a/python/paddle/fluid/tests/unittests/npu/test_sum_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_sum_op_npu.py index bda9ebe9d1114ecdf8c175c4e465f66040760d92..8471fa09e2082e15d8bc9a6d79f73dce4df5eb66 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_sum_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_sum_op_npu.py @@ -27,7 +27,6 @@ SEED = 2021 class TestSum1(OpTest): - def setUp(self): self.set_npu() self.init_dtype() @@ -54,7 +53,6 @@ class TestSum1(OpTest): class TestSum2(OpTest): - def setUp(self): self.set_npu() self.init_dtype() @@ -72,8 +70,12 @@ class TestSum2(OpTest): # For example, the results of `x0+x1+x2+x3` is different from that of # `x3+x2+x1+x0` if the dtype is fp16. # Therefore, converting the input to fp32 for calculation. - y = (x0.astype(np.float32) + x1.astype(np.float32) + - x2.astype(np.float32) + x3.astype(np.float32)).astype(self.dtype) + y = ( + x0.astype(np.float32) + + x1.astype(np.float32) + + x2.astype(np.float32) + + x3.astype(np.float32) + ).astype(self.dtype) self.outputs = {'Out': y} self.attrs = {'use_mkldnn': False} @@ -89,7 +91,6 @@ class TestSum2(OpTest): class TestSum3(OpTest): - def setUp(self): self.set_npu() self.init_dtype() diff --git a/python/paddle/fluid/tests/unittests/npu/test_swish_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_swish_op_npu.py index 9c45c84c43bd7a2d72e26415da60d3b9e7913a95..d967e1fc06d0857dabbe8d46175cf8564942dbef 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_swish_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_swish_op_npu.py @@ -27,7 +27,6 @@ SEED = 1024 class TestSwishOp(OpTest): - def setUp(self): self.op_type = "swish" self.set_npu() @@ -49,10 +48,13 @@ class TestSwishOp(OpTest): dx = beta * out + expit(x) * (1 - beta * out) dx = dx / x.size - self.check_grad_with_place(self.place, ['X'], - 'Out', - max_relative_error=0.01, - user_defined_grads=[dx]) + self.check_grad_with_place( + self.place, + ['X'], + 'Out', + max_relative_error=0.01, + user_defined_grads=[dx], + ) def set_npu(self): self.__class__.use_npu = True @@ -63,7 +65,6 @@ class TestSwishOp(OpTest): class TestSwishOpFp16(TestSwishOp): - def test_check_output(self): self.check_output_with_place(self.place, atol=1e-3) diff --git a/python/paddle/fluid/tests/unittests/npu/test_sync_batch_norm_base_npu.py b/python/paddle/fluid/tests/unittests/npu/test_sync_batch_norm_base_npu.py index 30aa594114307886de60a17e08afc9a2a44561b1..1629c8e8fb20a8d70190482f0598c12719115dd1 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_sync_batch_norm_base_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_sync_batch_norm_base_npu.py @@ -30,9 +30,14 @@ import paddle.fluid.unique_name as nameGen from paddle.fluid import core import paddle -from paddle.fluid.tests.unittests.op_test import OpTest, _set_use_system_allocator +from paddle.fluid.tests.unittests.op_test import ( + OpTest, + _set_use_system_allocator, +) -from paddle.fluid.tests.unittests.test_sync_batch_norm_op import create_or_get_tensor +from paddle.fluid.tests.unittests.test_sync_batch_norm_op import ( + create_or_get_tensor, +) _set_use_system_allocator(False) paddle.enable_static() @@ -41,17 +46,19 @@ SEED = 10 class TestSyncBatchNormRunnerBase(object): - - def get_model(self, - main, - startup, - place, - layout, - seed, - sync_bn=False, - only_forward=False): + def get_model( + self, + main, + startup, + place, + layout, + seed, + sync_bn=False, + only_forward=False, + ): raise NotImplementedError( - "get model should be implemented by child class.") + "get model should be implemented by child class." + ) def wait_server_ready(self, endpoints): assert not isinstance(endpoints, str) @@ -60,13 +67,15 @@ class TestSyncBatchNormRunnerBase(object): not_ready_endpoints = [] for ep in endpoints: ip_port = ep.split(":") - with closing(socket.socket(socket.AF_INET, - socket.SOCK_STREAM)) as sock: + with closing( + socket.socket(socket.AF_INET, socket.SOCK_STREAM) + ) as sock: sock.settimeout(2) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) if hasattr(socket, 'SO_REUSEPORT'): - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, - 1) + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT, 1 + ) result = sock.connect_ex((ip_port[0], int(ip_port[1]))) if result != 0: @@ -74,43 +83,50 @@ class TestSyncBatchNormRunnerBase(object): not_ready_endpoints.append(ep) if not all_ok: sys.stderr.write("server not ready, wait 3 sec to retry...\n") - sys.stderr.write("not ready endpoints:" + - str(not_ready_endpoints) + "\n") + sys.stderr.write( + "not ready endpoints:" + str(not_ready_endpoints) + "\n" + ) sys.stderr.flush() time.sleep(3) else: break + # endpoints should be ["ip1:port1","ip2:port2"] -#endpoints should be ["ip1:port1","ip2:port2"] - - def initCommunicator(self, program, rank, nranks, wait_port, - current_endpoint, endpoints): + def initCommunicator( + self, program, rank, nranks, wait_port, current_endpoint, endpoints + ): other_endpoints = endpoints[:] other_endpoints.remove(current_endpoint) if rank == 0 and wait_port: self.wait_server_ready(other_endpoints) block = program.global_block() - hccl_id_var = block.create_var(name=nameGen.generate('hccl_id'), - persistable=True, - type=core.VarDesc.VarType.RAW) - block.append_op(type='c_gen_hccl_id', - inputs={}, - outputs={'Out': hccl_id_var}, - attrs={ - 'rank': rank, - 'endpoint': current_endpoint, - 'other_endpoints': other_endpoints - }) - block.append_op(type='c_comm_init_hccl', - inputs={'X': hccl_id_var}, - outputs={}, - attrs={ - 'rank': rank, - 'ring_id': self.global_ring_id, - 'device_id': int(os.getenv("FLAGS_selected_npus")), - 'rank_ids': nranks - }) + hccl_id_var = block.create_var( + name=nameGen.generate('hccl_id'), + persistable=True, + type=core.VarDesc.VarType.RAW, + ) + block.append_op( + type='c_gen_hccl_id', + inputs={}, + outputs={'Out': hccl_id_var}, + attrs={ + 'rank': rank, + 'endpoint': current_endpoint, + 'other_endpoints': other_endpoints, + }, + ) + block.append_op( + type='c_comm_init_hccl', + inputs={'X': hccl_id_var}, + outputs={}, + attrs={ + 'rank': rank, + 'ring_id': self.global_ring_id, + 'device_id': int(os.getenv("FLAGS_selected_npus")), + 'rank_ids': nranks, + }, + ) def run_trainer(self, args): device_id = int(os.getenv("FLAGS_selected_npus", "0")) @@ -143,24 +159,30 @@ class TestSyncBatchNormRunnerBase(object): sys.stdout.buffer.write( pickle.dumps( - 'training, inference, fp32, fp16, NCHW, NHWC all passed')) + 'training, inference, fp32, fp16, NCHW, NHWC all passed' + ) + ) def _compare(self, args, place, layout, only_forward): scope = core.Scope() np.random.seed(SEED) - data = np.random.random(size=self.dshape).astype(self.dtype) * 4. - 2 + data = np.random.random(size=self.dshape).astype(self.dtype) * 4.0 - 2 sys.stderr.write("data: " + str(data) + "\n") - data = create_or_get_tensor(scope, "input", - OpTest.np_dtype_to_fluid_dtype(data), place) + data = create_or_get_tensor( + scope, "input", OpTest.np_dtype_to_fluid_dtype(data), place + ) - bn_fetches = self._cal_single_card(args, data, place, layout, - only_forward) + bn_fetches = self._cal_single_card( + args, data, place, layout, only_forward + ) fetch_names, sync_bn_fetches = self._cal_multiple_cards( - args, data, place, layout, only_forward) + args, data, place, layout, only_forward + ) - sys.stderr.write("len(sync_bn_fetches): " + str(len(sync_bn_fetches)) + - "\n") + sys.stderr.write( + "len(sync_bn_fetches): " + str(len(sync_bn_fetches)) + "\n" + ) for i in range(0, len(sync_bn_fetches)): sys.stderr.write("i: " + str(i) + "\n") sys.stderr.write("fetch_names[i]): " + fetch_names[i] + "\n") @@ -168,13 +190,14 @@ class TestSyncBatchNormRunnerBase(object): bn_val = bn_fetches[i] sync_bn_val = sync_bn_fetches[i] if sync_bn_val.shape != bn_val.shape: - sync_bn_val = sync_bn_val[:bn_val.shape[0]] + sync_bn_val = sync_bn_val[: bn_val.shape[0]] # i = 0 if fetch_names[i] == 'reduce_sum_0.tmp_0': # sys.stderr.write("skip reduce_sum_0.tmp_0 (Out of reduce_sum op)" + "\n") - sys.stderr.write("reduce_sum_0.tmp_0 (Out of reduce_sum op)" + - "\n") + sys.stderr.write( + "reduce_sum_0.tmp_0 (Out of reduce_sum op)" + "\n" + ) sys.stderr.write("bn_val: " + str(bn_val) + "\n") sys.stderr.write("sync_bn_val: " + str(sync_bn_val) + "\n") @@ -202,7 +225,8 @@ class TestSyncBatchNormRunnerBase(object): if fetch_names[i] == 'batch_norm_0.tmp_2': # sys.stderr.write("skip batch_norm_0.tmp_2 (ReserveSpace of batch_norm)" + "\n") sys.stderr.write( - "batch_norm_0.tmp_2 (ReserveSpace of batch_norm)" + "\n") + "batch_norm_0.tmp_2 (ReserveSpace of batch_norm)" + "\n" + ) sys.stderr.write("bn_val: " + str(bn_val) + "\n") sys.stderr.write("sync_bn_val: " + str(sync_bn_val) + "\n") @@ -235,8 +259,9 @@ class TestSyncBatchNormRunnerBase(object): # i = 8 if fetch_names[i] == 'batch_norm_0.tmp_1': - sys.stderr.write("skip batch_norm_0.tmp_1 (SavedVariance)" + - "\n") + sys.stderr.write( + "skip batch_norm_0.tmp_1 (SavedVariance)" + "\n" + ) sys.stderr.write("bn_val: " + str(bn_val) + "\n") sys.stderr.write("sync_bn_val: " + str(sync_bn_val) + "\n") @@ -282,10 +307,16 @@ class TestSyncBatchNormRunnerBase(object): if fetch_names[i] == 'conv2d_0.tmp_0@GRAD': atol = 1e-2 - assert np.allclose( - bn_val, sync_bn_val, atol=atol), "Output (" + fetch_names[ - i] + ") has diff. \n" + "\nBN " + str( - bn_val) + "\n" + "Sync BN " + str(sync_bn_val) + assert np.allclose(bn_val, sync_bn_val, atol=atol), ( + "Output (" + + fetch_names[i] + + ") has diff. \n" + + "\nBN " + + str(bn_val) + + "\n" + + "Sync BN " + + str(sync_bn_val) + ) def _cal_single_card(self, args, data, place, layout, only_forward): # Single-NPU, N = 32 per NPU @@ -295,23 +326,31 @@ class TestSyncBatchNormRunnerBase(object): startup_prog.global_seed(SEED) paddle.seed(SEED) - outs = self.get_model(train_prog, startup_prog, place, layout, SEED, - False, only_forward) + outs = self.get_model( + train_prog, startup_prog, place, layout, SEED, False, only_forward + ) exe = fluid.Executor(place) exe.run(startup_prog) fetch_names = [v.name for v in outs] + [ - 'bn_moving_mean', 'bn_moving_variance', 'bn_scale', 'bn_bias' + 'bn_moving_mean', + 'bn_moving_variance', + 'bn_scale', + 'bn_bias', ] if not only_forward: others = [ - 'batch_norm_0.tmp_0', 'batch_norm_0.tmp_1', 'bn_scale@GRAD', - 'bn_bias@GRAD', 'batch_norm_0.tmp_3@GRAD', 'conv2d_0.tmp_0@GRAD' + 'batch_norm_0.tmp_0', + 'batch_norm_0.tmp_1', + 'bn_scale@GRAD', + 'bn_bias@GRAD', + 'batch_norm_0.tmp_3@GRAD', + 'conv2d_0.tmp_0@GRAD', ] fetch_names += others - bn_fetches = exe.run(program=train_prog, - feed={'input': data}, - fetch_list=fetch_names) + bn_fetches = exe.run( + program=train_prog, feed={'input': data}, fetch_list=fetch_names + ) return bn_fetches @@ -333,10 +372,12 @@ class TestSyncBatchNormRunnerBase(object): current_endpoint = args["currentendpoint"] nranks = 2 - self.initCommunicator(startup_prog, rank, nranks, True, - current_endpoint, endpoints) - sys.stderr.write("after init, startup_prog: " + - startup_prog.to_string(True) + "\n") + self.initCommunicator( + startup_prog, rank, nranks, True, current_endpoint, endpoints + ) + sys.stderr.write( + "after init, startup_prog: " + startup_prog.to_string(True) + "\n" + ) train_prog.global_seed(SEED) train_prog._sync_with_cpp() startup_prog.global_seed(SEED) @@ -344,12 +385,17 @@ class TestSyncBatchNormRunnerBase(object): paddle.seed(SEED) self.rank = rank - outs = self.get_model(train_prog, startup_prog, place, layout, SEED, - True, only_forward) - sys.stderr.write("after get_model, train_prog: " + - train_prog.to_string(True) + "\n") - sys.stderr.write("after get_model, startup_prog: " + - startup_prog.to_string(True) + "\n") + outs = self.get_model( + train_prog, startup_prog, place, layout, SEED, True, only_forward + ) + sys.stderr.write( + "after get_model, train_prog: " + train_prog.to_string(True) + "\n" + ) + sys.stderr.write( + "after get_model, startup_prog: " + + startup_prog.to_string(True) + + "\n" + ) ops = train_prog.blocks[0].ops for i, op in enumerate(ops): @@ -362,23 +408,33 @@ class TestSyncBatchNormRunnerBase(object): sys.stderr.write("op type: " + op.type + "\n") op.desc.set_type('sync_batch_norm_grad') - sys.stderr.write("after update sync_batch_norm, train_prog: " + - train_prog.to_string(True) + "\n") + sys.stderr.write( + "after update sync_batch_norm, train_prog: " + + train_prog.to_string(True) + + "\n" + ) exe = fluid.Executor(place) exe.run(startup_prog) fetch_names = [v.name for v in outs] + [ - 'bn_moving_mean', 'bn_moving_variance', 'bn_scale', 'bn_bias' + 'bn_moving_mean', + 'bn_moving_variance', + 'bn_scale', + 'bn_bias', ] if not only_forward: others = [ - 'batch_norm_0.tmp_0', 'batch_norm_0.tmp_1', 'bn_scale@GRAD', - 'bn_bias@GRAD', 'batch_norm_0.tmp_3@GRAD', 'conv2d_0.tmp_0@GRAD' + 'batch_norm_0.tmp_0', + 'batch_norm_0.tmp_1', + 'bn_scale@GRAD', + 'bn_bias@GRAD', + 'batch_norm_0.tmp_3@GRAD', + 'conv2d_0.tmp_0@GRAD', ] fetch_names += others - sync_bn_fetches = exe.run(program=train_prog, - feed={'input': data}, - fetch_list=fetch_names) + sync_bn_fetches = exe.run( + program=train_prog, feed={'input': data}, fetch_list=fetch_names + ) return fetch_names, sync_bn_fetches @@ -401,19 +457,20 @@ from contextlib import closing class TestDistBase(unittest.TestCase): - def setUp(self): self._port_set = set() self._trainers = 2 self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % ( - self._find_free_port(), self._find_free_port()) + self._find_free_port(), + self._find_free_port(), + ) self._python_interp = sys.executable def _find_free_port(self): - def __free_port(): - with closing(socket.socket(socket.AF_INET, - socket.SOCK_STREAM)) as s: + with closing( + socket.socket(socket.AF_INET, socket.SOCK_STREAM) + ) as s: s.bind(('', 0)) return s.getsockname()[1] @@ -442,7 +499,7 @@ class TestDistBase(unittest.TestCase): "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints, "PADDLE_CURRENT_ENDPOINT": w1_ep, } - #update environment + # update environment env0.update(envs) env1.update(envs) @@ -453,15 +510,19 @@ class TestDistBase(unittest.TestCase): tr1_pipe = open("/tmp/tr1_err.log", "wb") # print(tr0_cmd) # print(tr1_cmd) - tr0_proc = subprocess.Popen(tr0_cmd.strip().split(), - stdout=subprocess.PIPE, - stderr=tr0_pipe, - env=env0) - - tr1_proc = subprocess.Popen(tr0_cmd.strip().split(), - stdout=subprocess.PIPE, - stderr=tr1_pipe, - env=env1) + tr0_proc = subprocess.Popen( + tr0_cmd.strip().split(), + stdout=subprocess.PIPE, + stderr=tr0_pipe, + env=env0, + ) + + tr1_proc = subprocess.Popen( + tr0_cmd.strip().split(), + stdout=subprocess.PIPE, + stderr=tr1_pipe, + env=env1, + ) tr0_out, tr0_err = tr0_proc.communicate() tr1_out, tr1_err = tr1_proc.communicate() @@ -471,12 +532,18 @@ class TestDistBase(unittest.TestCase): # close trainer file tr0_pipe.close() tr1_pipe.close() - return pickle.loads(tr0_out), pickle.loads( - tr1_out), tr0_proc.pid, tr1_proc.pid + return ( + pickle.loads(tr0_out), + pickle.loads(tr1_out), + tr0_proc.pid, + tr1_proc.pid, + ) def check_with_place(self, model_file, col_type, need_envs={}): tr0_out, tr1_out, pid0, pid1 = self._run_cluster(model_file, need_envs) self.assertEqual( - tr0_out, 'training, inference, fp32, fp16, NCHW, NHWC all passed') + tr0_out, 'training, inference, fp32, fp16, NCHW, NHWC all passed' + ) self.assertEqual( - tr1_out, 'training, inference, fp32, fp16, NCHW, NHWC all passed') + tr1_out, 'training, inference, fp32, fp16, NCHW, NHWC all passed' + ) diff --git a/python/paddle/fluid/tests/unittests/npu/test_sync_batch_norm_op_npu_baseline.py b/python/paddle/fluid/tests/unittests/npu/test_sync_batch_norm_op_npu_baseline.py index d66cb04e009c7f38e83ef574a79fb72121d0bb1c..2efe1f91cb87ef94619c30ced832e492c59231e1 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_sync_batch_norm_op_npu_baseline.py +++ b/python/paddle/fluid/tests/unittests/npu/test_sync_batch_norm_op_npu_baseline.py @@ -20,7 +20,10 @@ import sys sys.path.append("..") -from paddle.fluid.tests.unittests.op_test import OpTest, _set_use_system_allocator +from paddle.fluid.tests.unittests.op_test import ( + OpTest, + _set_use_system_allocator, +) from test_sync_batch_norm_base_npu import TestDistBase @@ -29,15 +32,14 @@ paddle.enable_static() class TestSyncBatchNormOp(TestDistBase): - def _setup_config(self): pass def test_identity(self, col_type="identity"): dist_env = os.environ - self.check_with_place("sync_batch_norm_op_npu.py", - col_type, - need_envs=dist_env) + self.check_with_place( + "sync_batch_norm_op_npu.py", col_type, need_envs=dist_env + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/npu/test_sync_batch_norm_op_npu_extra.py b/python/paddle/fluid/tests/unittests/npu/test_sync_batch_norm_op_npu_extra.py index 44644ef245ee41a068b85b012894a5aa5efb1e25..15c491704295864883eca44ec2dcc50a5f75d30e 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_sync_batch_norm_op_npu_extra.py +++ b/python/paddle/fluid/tests/unittests/npu/test_sync_batch_norm_op_npu_extra.py @@ -26,19 +26,22 @@ import paddle.fluid as fluid import paddle.nn as nn from paddle.fluid import Program, program_guard -from paddle.fluid.tests.unittests.op_test import OpTest, _set_use_system_allocator +from paddle.fluid.tests.unittests.op_test import ( + OpTest, + _set_use_system_allocator, +) # _set_use_system_allocator(False) paddle.enable_static() class TestDygraphSyncBatchNormAPIError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): my_sync_batch_norm = paddle.nn.SyncBatchNorm(10) - x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.NPUPlace(0)) + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.NPUPlace(0) + ) self.assertRaises(TypeError, my_sync_batch_norm, x1) # the input dtype of SyncBatchNorm must be float16 or float32 @@ -48,31 +51,33 @@ class TestDygraphSyncBatchNormAPIError(unittest.TestCase): class TestConvertSyncBatchNorm(unittest.TestCase): - def test_convert(self): with program_guard(Program(), Program()): - compare_model = paddle.nn.Sequential(paddle.nn.Conv2D(3, 5, 3), - paddle.nn.BatchNorm2D(5), - paddle.nn.BatchNorm2D(5)) + compare_model = paddle.nn.Sequential( + paddle.nn.Conv2D(3, 5, 3), + paddle.nn.BatchNorm2D(5), + paddle.nn.BatchNorm2D(5), + ) model = paddle.nn.Sequential( - paddle.nn.Conv2D(3, 5, 3), paddle.nn.BatchNorm2D(5), + paddle.nn.Conv2D(3, 5, 3), + paddle.nn.BatchNorm2D(5), paddle.nn.BatchNorm2D( 5, weight_attr=fluid.ParamAttr(name='bn.scale'), - bias_attr=fluid.ParamAttr(name='bn.bias'))) + bias_attr=fluid.ParamAttr(name='bn.bias'), + ), + ) model = paddle.nn.SyncBatchNorm.convert_sync_batchnorm(model) for idx, sublayer in enumerate(compare_model.sublayers()): if isinstance(sublayer, paddle.nn.BatchNorm2D): self.assertEqual( - isinstance(model[idx], paddle.nn.SyncBatchNorm), True) + isinstance(model[idx], paddle.nn.SyncBatchNorm), True + ) class TestConvertSyncBatchNormCast1(unittest.TestCase): - def test_convert(self): - class Net(nn.Layer): - def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2D(3, 5, 3) @@ -97,7 +102,6 @@ class TestConvertSyncBatchNormCast1(unittest.TestCase): class TestDygraphSyncBatchNormDataFormatError(unittest.TestCase): - def test_errors(self): with fluid.dygraph.guard(fluid.NPUPlace(0)): my_sync_batch_norm = paddle.nn.SyncBatchNorm(10, data_format='CN') diff --git a/python/paddle/fluid/tests/unittests/npu/test_take_along_axis_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_take_along_axis_op_npu.py index d0cc8e3abde4613b540d419a1a43fa5974f3672d..d3e34c39366126b4b11766659140d244b7383bd8 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_take_along_axis_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_take_along_axis_op_npu.py @@ -27,7 +27,6 @@ paddle.enable_static() @unittest.skip(reason="Skip unsupported ut, need paddle surpport cann 5.0.4+") class TestTakeAlongAxisOp(OpTest): - def setUp(self): self.set_npu() self.init_data() @@ -59,14 +58,14 @@ class TestTakeAlongAxisOp(OpTest): self.x_type = "float64" self.x_shape = (5, 5, 5) self.index_type = "int32" - self.index = np.array([[[1]], [[1]], [[2]], [[4]], - [[3]]]).astype(self.index_type) + self.index = np.array([[[1]], [[1]], [[2]], [[4]], [[3]]]).astype( + self.index_type + ) self.axis = 2 self.axis_type = "int64" class TestCase1(TestTakeAlongAxisOp): - def init_data(self): self.x_type = "float64" self.x_shape = (5, 5, 5) @@ -78,7 +77,6 @@ class TestCase1(TestTakeAlongAxisOp): @unittest.skip(reason="Skip unsupported ut, need paddle surpport cann 5.0.4+") class TestTakeAlongAxisAPI(unittest.TestCase): - def setUp(self): np.random.seed(0) self.shape = [3, 3] @@ -95,13 +93,12 @@ class TestTakeAlongAxisAPI(unittest.TestCase): index = paddle.fluid.data('Index', self.index_shape, "int64") out = paddle.take_along_axis(x, index, self.axis) exe = paddle.static.Executor(self.place) - res = exe.run(feed={ - 'X': self.x_np, - 'Index': self.index_np - }, - fetch_list=[out]) + res = exe.run( + feed={'X': self.x_np, 'Index': self.index_np}, fetch_list=[out] + ) out_ref = np.array( - np.take_along_axis(self.x_np, self.index_np, self.axis)) + np.take_along_axis(self.x_np, self.index_np, self.axis) + ) for out in res: np.testing.assert_allclose(out, out_ref, rtol=0.001) @@ -111,20 +108,21 @@ class TestTakeAlongAxisAPI(unittest.TestCase): self.index = paddle.to_tensor(self.index_np) out = paddle.take_along_axis(x_tensor, self.index, self.axis) out_ref = np.array( - np.take_along_axis(self.x_np, self.index_np, self.axis)) + np.take_along_axis(self.x_np, self.index_np, self.axis) + ) np.testing.assert_allclose(out.numpy(), out_ref, rtol=0.001) paddle.enable_static() @unittest.skip(reason="Skip unsupported ut, need paddle surpport cann 5.0.4+") class TestTakeAlongAxisAPICase1(TestTakeAlongAxisAPI): - def setUp(self): np.random.seed(0) self.shape = [2, 2] self.index_shape = [4, 2] - self.index_np = np.array([[0, 0], [1, 0], [0, 0], [1, - 0]]).astype('int64') + self.index_np = np.array([[0, 0], [1, 0], [0, 0], [1, 0]]).astype( + 'int64' + ) self.x_np = np.random.random(self.shape).astype(np.float32) self.place = paddle.NPUPlace(0) self.axis = 0 diff --git a/python/paddle/fluid/tests/unittests/npu/test_tanh_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_tanh_op_npu.py index 0b1563282c6d5c1f7deb3b9abb32f5fb98684d59..8cbb0d217eb3706b8bd19904b027277718cc5ba4 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_tanh_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_tanh_op_npu.py @@ -26,7 +26,6 @@ SEED = 2021 class TestTanh(OpTest): - def setUp(self): self.set_npu() self.op_type = "tanh" @@ -58,7 +57,6 @@ class TestTanh(OpTest): class TestTanhFp16(OpTest): - def setUp(self): self.set_npu() self.op_type = "tanh" @@ -85,7 +83,6 @@ class TestTanhFp16(OpTest): class TestTanhNet(unittest.TestCase): - def _test(self, run_npu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -100,9 +97,9 @@ class TestTanhNet(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=[32, 32], dtype='float32') b = paddle.static.data(name="b", shape=[32, 32], dtype='float32') - label = paddle.static.data(name="label", - shape=[32, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[32, 1], dtype='int64' + ) c = paddle.multiply(a, b) d = paddle.tanh(c) @@ -126,16 +123,17 @@ class TestTanhNet(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(100): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "label": label_np - }, - fetch_list=[prediction, loss]) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "b": b_np, "label": label_np}, + fetch_list=[prediction, loss], + ) if epoch % 10 == 0: - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) return pred_res, loss_res diff --git a/python/paddle/fluid/tests/unittests/npu/test_tile_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_tile_op_npu.py index 723beb31c9afb87677a2fb2a3d9855c1d5e10d4d..e8003f82aa950c47307c68a8644b6b4144cae71b 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_tile_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_tile_op_npu.py @@ -27,9 +27,8 @@ paddle.enable_static() np.random.seed(10) -#Situation 1: repeat_times is a list (without tensor) +# Situation 1: repeat_times is a list (without tensor) class TestTileOpRank1(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -55,44 +54,38 @@ class TestTileOpRank1(OpTest): pass -#with dimension expanding +# with dimension expanding class TestTileOpRank2Expanding(TestTileOpRank1): - def init_data(self): self.ori_shape = [120] self.repeat_times = [2, 2] class TestTileOpRank2(TestTileOpRank1): - def init_data(self): self.ori_shape = [12, 14] self.repeat_times = [2, 3] class TestTileOpRank3_Corner(TestTileOpRank1): - def init_data(self): self.ori_shape = (2, 10, 5) self.repeat_times = (1, 1, 1) class TestTileOpRank3_Corner2(TestTileOpRank1): - def init_data(self): self.ori_shape = (2, 10, 5) self.repeat_times = (2, 2) class TestTileOpRank3(TestTileOpRank1): - def init_data(self): self.ori_shape = (2, 4, 15) self.repeat_times = (2, 1, 4) class TestTileOpRank4(TestTileOpRank1): - def init_data(self): self.ori_shape = (2, 4, 5, 7) self.repeat_times = (3, 2, 1, 2) @@ -100,7 +93,6 @@ class TestTileOpRank4(TestTileOpRank1): # Situation 2: repeat_times is a list (with tensor) class TestTileOpRank1_tensor_attr(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -108,8 +100,9 @@ class TestTileOpRank1_tensor_attr(OpTest): self.init_data() repeat_times_tensor = [] for index, ele in enumerate(self.repeat_times): - repeat_times_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + repeat_times_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = { 'X': np.random.random(self.ori_shape).astype("float32"), @@ -135,7 +128,6 @@ class TestTileOpRank1_tensor_attr(OpTest): class TestTileOpRank2_Corner_tensor_attr(TestTileOpRank1_tensor_attr): - def init_data(self): self.ori_shape = [12, 14] self.repeat_times = [1, 1] @@ -143,7 +135,6 @@ class TestTileOpRank2_Corner_tensor_attr(TestTileOpRank1_tensor_attr): class TestTileOpRank2_attr_tensor(TestTileOpRank1_tensor_attr): - def init_data(self): self.ori_shape = [12, 14] self.repeat_times = [2, 3] @@ -152,7 +143,6 @@ class TestTileOpRank2_attr_tensor(TestTileOpRank1_tensor_attr): # Situation 3: repeat_times is a tensor class TestTileOpRank1_tensor(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -182,7 +172,6 @@ class TestTileOpRank1_tensor(OpTest): class TestTileOpRank2_tensor(TestTileOpRank1_tensor): - def init_data(self): self.ori_shape = [12, 14] self.repeat_times = [2, 3] @@ -190,7 +179,6 @@ class TestTileOpRank2_tensor(TestTileOpRank1_tensor): # Situation 4: input x is Integer class TestTileOpInteger(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -211,7 +199,6 @@ class TestTileOpInteger(OpTest): # Situation 5: input x is Integer class TestTileOpInt64_t(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -232,7 +219,6 @@ class TestTileOpInt64_t(OpTest): # Situation 6: input x is Bool class TestTileOpBool(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -251,7 +237,6 @@ class TestTileOpBool(OpTest): # Test python API class TestTileAPI(unittest.TestCase): - def test_api(self): with fluid.dygraph.guard(paddle.NPUPlace(0)): np_x = np.random.random([12, 14]).astype("float32") diff --git a/python/paddle/fluid/tests/unittests/npu/test_top_k_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_top_k_op_npu.py index 6e73f282c8c32e17c7460c3daa387fb4cb274a0f..f4c7bab1105c4bfba791a290a375e66b8064132b 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_top_k_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_top_k_op_npu.py @@ -28,20 +28,24 @@ SEED = 2021 class TestTopk(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) self.op_type = "top_k" self.init_dtype() - x = np.array([[0.78104149, 0.88745828, 0.32362268], - [0.82196718, 0.48763277, 0.42826136], - [0.96527182, 0.34851612, 0.12959783]]).astype(self.dtype) + x = np.array( + [ + [0.78104149, 0.88745828, 0.32362268], + [0.82196718, 0.48763277, 0.42826136], + [0.96527182, 0.34851612, 0.12959783], + ] + ).astype(self.dtype) self.inputs = {'X': x} - np_out = np.array([[0.88745828], [0.82196718], - [0.96527182]]).astype(self.dtype) + np_out = np.array([[0.88745828], [0.82196718], [0.96527182]]).astype( + self.dtype + ) np_indices = np.array([[1], [0], [0]]) self.attrs = {'k': 1, "axis": -1} @@ -59,20 +63,28 @@ class TestTopk(OpTest): class TestTopkV2(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) self.op_type = "top_k" self.init_dtype() - x = np.array([[0.78104149, 0.88745828, 0.32362268], - [0.82196718, 0.48763277, 0.42826136], - [0.96527182, 0.34851612, 0.12959783]]).astype(self.dtype) + x = np.array( + [ + [0.78104149, 0.88745828, 0.32362268], + [0.82196718, 0.48763277, 0.42826136], + [0.96527182, 0.34851612, 0.12959783], + ] + ).astype(self.dtype) self.inputs = {'X': x} - np_out = np.array([[0.88745828, 0.78104149], [0.82196718, 0.48763277], - [0.96527182, 0.34851612]]).astype(self.dtype) + np_out = np.array( + [ + [0.88745828, 0.78104149], + [0.82196718, 0.48763277], + [0.96527182, 0.34851612], + ] + ).astype(self.dtype) np_indices = np.array([[1, 0], [0, 1], [0, 1]]) self.attrs = {'k': 2, "axis": -1} @@ -90,7 +102,6 @@ class TestTopkV2(OpTest): class TestTopkV3(OpTest): - def setUp(self): self.set_npu() self.place = paddle.NPUPlace(0) @@ -99,10 +110,9 @@ class TestTopkV3(OpTest): self.init_dtype() self.set_input_data() self.set_attrs() - output, indices = numpy_topk(self.input_data, - axis=self.axis, - k=self.k, - largest=True) + output, indices = numpy_topk( + self.input_data, axis=self.axis, k=self.k, largest=True + ) self.inputs = {'X': self.input_data} self.attrs = {'k': self.k, 'axis': self.axis} @@ -123,8 +133,9 @@ class TestTopkV3(OpTest): self.axis = 1 def set_input_data(self): - self.input_data = np.random.choice(10000, size=(10, 20), - replace=False).astype(self.dtype) + self.input_data = np.random.choice( + 10000, size=(10, 20), replace=False + ).astype(self.dtype) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/npu/test_top_k_v2_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_top_k_v2_op_npu.py index c72ce4831f7c861c10c4d91c43382ebb187cec5d..6b46a41fe1d614d6b7ffaab90aa10bfdd6b17ffa 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_top_k_v2_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_top_k_v2_op_npu.py @@ -39,7 +39,6 @@ def numpy_topk(x, k=1, axis=-1, largest=True): class TestTopkV2NPUOp(OpTest): - def setUp(self): paddle.enable_static() self.op_type = "top_k_v2" @@ -48,10 +47,9 @@ class TestTopkV2NPUOp(OpTest): self.set_dtype() self.set_input_data() self.set_attrs() - output, indices = numpy_topk(self.input_data, - axis=self.axis, - k=self.k, - largest=self.largest) + output, indices = numpy_topk( + self.input_data, axis=self.axis, k=self.k, largest=self.largest + ) self.inputs = {'X': self.input_data} self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest} @@ -66,8 +64,9 @@ class TestTopkV2NPUOp(OpTest): self.largest = True def set_input_data(self): - self.input_data = np.random.choice(10000, size=(10, 20), - replace=False).astype(self.dtype) + self.input_data = np.random.choice( + 10000, size=(10, 20), replace=False + ).astype(self.dtype) def test_check_output(self): self.__class__.no_need_check_grad = True @@ -82,7 +81,6 @@ class TestTopkV2NPUOp(OpTest): class TestTopkV2OpFloat16(TestTopkV2NPUOp): - def set_attrs(self): self.k = 3 self.axis = 1 @@ -96,7 +94,6 @@ class TestTopkV2OpFloat16(TestTopkV2NPUOp): class TestTopkV2OP1Int32(TestTopkV2NPUOp): - def set_attrs(self): self.k = 3 self.axis = 0 @@ -104,7 +101,6 @@ class TestTopkV2OP1Int32(TestTopkV2NPUOp): class TestTopkV2OP2Int32(TestTopkV2NPUOp): - def set_attrs(self): self.k = 4 self.axis = 0 @@ -112,7 +108,6 @@ class TestTopkV2OP2Int32(TestTopkV2NPUOp): class TestTopkV2OP3Int32(TestTopkV2NPUOp): - def set_attrs(self): self.k = 6 self.axis = 1 @@ -120,7 +115,6 @@ class TestTopkV2OP3Int32(TestTopkV2NPUOp): class TestTopkV2OP4Int32(TestTopkV2NPUOp): - def set_attrs(self): self.k = 3 self.axis = 1 @@ -128,31 +122,26 @@ class TestTopkV2OP4Int32(TestTopkV2NPUOp): class TestTopkV2Op1Int64(TestTopkV2OP1Int32): - def set_dtype(self): self.dtype = np.int64 class TestTopkV2Op2Int64(TestTopkV2OP2Int32): - def set_dtype(self): self.dtype = np.int64 class TestTopkV2Op3Int64(TestTopkV2OP3Int32): - def set_dtype(self): self.dtype = np.int64 class TestTopkV2Op4Int64(TestTopkV2OP4Int32): - def set_dtype(self): self.dtype = np.int64 class TestTopkV2Op1Float32(TestTopkV2OP1Int32): - def set_dtype(self): self.dtype = np.float32 @@ -161,7 +150,6 @@ class TestTopkV2Op1Float32(TestTopkV2OP1Int32): class TestTopkV2Op2Float32(TestTopkV2OP2Int32): - def set_dtype(self): self.dtype = np.float32 @@ -170,7 +158,6 @@ class TestTopkV2Op2Float32(TestTopkV2OP2Int32): class TestTopkV2Op3Float32(TestTopkV2OP3Int32): - def set_dtype(self): self.dtype = np.float32 @@ -179,7 +166,6 @@ class TestTopkV2Op3Float32(TestTopkV2OP3Int32): class TestTopkV2Op4Float32(TestTopkV2OP4Int32): - def set_dtype(self): self.dtype = np.float32 @@ -188,7 +174,6 @@ class TestTopkV2Op4Float32(TestTopkV2OP4Int32): class TestTopkV2Op1Float64(TestTopkV2OP1Int32): - def set_dtype(self): self.dtype = np.float64 @@ -197,7 +182,6 @@ class TestTopkV2Op1Float64(TestTopkV2OP1Int32): class TestTopkV2Op2Float64(TestTopkV2OP2Int32): - def set_dtype(self): self.dtype = np.float64 @@ -206,7 +190,6 @@ class TestTopkV2Op2Float64(TestTopkV2OP2Int32): class TestTopkV2Op3Float64(TestTopkV2OP3Int32): - def set_dtype(self): self.dtype = np.float64 @@ -215,7 +198,6 @@ class TestTopkV2Op3Float64(TestTopkV2OP3Int32): class TestTopkV2Op4Float64(TestTopkV2OP4Int32): - def set_dtype(self): self.dtype = np.float64 @@ -224,7 +206,6 @@ class TestTopkV2Op4Float64(TestTopkV2OP4Int32): class TestTopKAPI(unittest.TestCase): - def setUp(self): self.__class__.use_npu = True self.place = paddle.NPUPlace(0) @@ -275,22 +256,23 @@ class TestTopKAPI(unittest.TestCase): np.testing.assert_allclose(paddle_result[1].numpy(), numpy_result[1]) # test case for basic test case 7 for the unsorted paddle_result = paddle.topk(input_tensor, k=2, axis=1, sorted=False) - sort_paddle = numpy_topk(np.array(paddle_result[0].numpy()), - axis=1, - k=2) + sort_paddle = numpy_topk( + np.array(paddle_result[0].numpy()), axis=1, k=2 + ) numpy_result = numpy_topk(self.input_data, k=2, axis=1) np.testing.assert_allclose(sort_paddle[0], numpy_result[0]) def run_static(self, place): paddle.enable_static() - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): - input_tensor = paddle.static.data(name="x", - shape=[6, 7, 8], - dtype="float64") - large_input_tensor = paddle.static.data(name="large_x", - shape=[2, 1030], - dtype="float64") + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + input_tensor = paddle.static.data( + name="x", shape=[6, 7, 8], dtype="float64" + ) + large_input_tensor = paddle.static.data( + name="large_x", shape=[2, 1030], dtype="float64" + ) k_tensor = paddle.static.data(name="k", shape=[1], dtype="int32") result1 = paddle.topk(input_tensor, k=2) result2 = paddle.topk(input_tensor, k=2, axis=-1) @@ -304,18 +286,29 @@ class TestTopKAPI(unittest.TestCase): exe = paddle.static.Executor(place) input_data = np.random.rand(10, 20).astype("float64") large_input_data = np.random.rand(2, 100).astype("float64") - paddle_result = exe.run(feed={ - "x": self.input_data, - "large_x": self.large_input_data, - "k": np.array([2]).astype("int32") - }, - fetch_list=[ - result1[0], result1[1], result2[0], - result2[1], result3[0], result3[1], - result4[0], result4[1], result5[0], - result5[1], result6[0], result6[1], - result7[0], result7[1] - ]) + paddle_result = exe.run( + feed={ + "x": self.input_data, + "large_x": self.large_input_data, + "k": np.array([2]).astype("int32"), + }, + fetch_list=[ + result1[0], + result1[1], + result2[0], + result2[1], + result3[0], + result3[1], + result4[0], + result4[1], + result5[0], + result5[1], + result6[0], + result6[1], + result7[0], + result7[1], + ], + ) numpy_result = numpy_topk(self.input_data, k=2) np.testing.assert_allclose(paddle_result[0], numpy_result[0]) np.testing.assert_allclose(paddle_result[1], numpy_result[1]) @@ -328,17 +321,15 @@ class TestTopKAPI(unittest.TestCase): np.testing.assert_allclose(paddle_result[4], numpy_result[0]) np.testing.assert_allclose(paddle_result[5], numpy_result[1]) - numpy_result = numpy_topk(self.input_data, - k=2, - axis=1, - largest=False) + numpy_result = numpy_topk( + self.input_data, k=2, axis=1, largest=False + ) np.testing.assert_allclose(paddle_result[6], numpy_result[0]) np.testing.assert_allclose(paddle_result[7], numpy_result[1]) - numpy_result = numpy_topk(self.input_data, - k=2, - axis=-1, - largest=False) + numpy_result = numpy_topk( + self.input_data, k=2, axis=-1, largest=False + ) np.testing.assert_allclose(paddle_result[8], numpy_result[0]) np.testing.assert_allclose(paddle_result[9], numpy_result[1]) diff --git a/python/paddle/fluid/tests/unittests/npu/test_transpose_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_transpose_op_npu.py index e6a457ca3e6422fe1a44d10ef6751441b4762c13..9503e28208eed42209dd9ee9950b09574aab2dad 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_transpose_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_transpose_op_npu.py @@ -25,7 +25,6 @@ paddle.enable_static() class TestTransposeOp(OpTest): - def setUp(self): self.set_npu() self.op_type = "transpose2" @@ -55,83 +54,72 @@ class TestTransposeOp(OpTest): class TestCase_ZeroDim(TestTransposeOp): - def init_shape_axis(self): self.shape = () self.axis = () -class TestCase0(TestTransposeOp): +class TestCase0(TestTransposeOp): def init_shape_axis(self): - self.shape = (100, ) - self.axis = (0, ) + self.shape = (100,) + self.axis = (0,) class TestCase1(TestTransposeOp): - def init_shape_axis(self): self.shape = (3, 4, 10) self.axis = (0, 2, 1) class TestCase2(TestTransposeOp): - def init_shape_axis(self): self.shape = (2, 3, 4, 5) self.axis = (0, 2, 3, 1) class TestCase3(TestTransposeOp): - def init_shape_axis(self): self.shape = (2, 3, 4, 5, 6) self.axis = (4, 2, 3, 1, 0) class TestCase4(TestTransposeOp): - def init_shape_axis(self): self.shape = (2, 3, 4, 5, 6, 1) self.axis = (4, 2, 3, 1, 0, 5) class TestCase5(TestTransposeOp): - def init_shape_axis(self): self.shape = (2, 16, 96) self.axis = (0, 2, 1) class TestCase6(TestTransposeOp): - def init_shape_axis(self): self.shape = (2, 10, 12, 16) self.axis = (3, 1, 2, 0) class TestCase7(TestTransposeOp): - def init_shape_axis(self): self.shape = (2, 10, 2, 16) self.axis = (0, 1, 3, 2) class TestCase8(TestTransposeOp): - def init_shape_axis(self): self.shape = (2, 3, 2, 3, 2, 4, 3, 3) self.axis = (0, 1, 3, 2, 4, 5, 6, 7) class TestCase9(TestTransposeOp): - def init_shape_axis(self): self.shape = (2, 3, 2, 3, 2, 4, 3, 3) self.axis = (6, 1, 3, 5, 0, 2, 4, 7) class TestTransposeOpFP16(TestTransposeOp): - def init_dtype(self): self.dtype = np.float16 @@ -140,7 +128,6 @@ class TestTransposeOpFP16(TestTransposeOp): class TestTransposeOpInt64(TestTransposeOp): - def init_dtype(self): self.dtype = np.int64 diff --git a/python/paddle/fluid/tests/unittests/npu/test_tril_triu_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_tril_triu_op_npu.py index 2aecede417a5f175545d6cdddf16ac50c44db943..aca61989b7b638cd68ea70bef1c737d497a53f4a 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_tril_triu_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_tril_triu_op_npu.py @@ -23,8 +23,7 @@ paddle.enable_static() class TestNPUTrilTriu(OpTest): - """ the base class of other op testcases - """ + """the base class of other op testcases""" def setUp(self): self.op_type = "tril_triu" @@ -40,9 +39,9 @@ class TestNPUTrilTriu(OpTest): 'lower': True if self.real_op_type == 'tril' else False, } self.outputs = { - 'Out': - self.real_np_op(self.X, self.diagonal) - if self.diagonal else self.real_np_op(self.X) + 'Out': self.real_np_op(self.X, self.diagonal) + if self.diagonal + else self.real_np_op(self.X) } def test_check_output(self): @@ -67,27 +66,29 @@ def case_generator(op_type, Xshape, diagonal, expected): If arg `expercted` is 'success', it will register an Optest case and expect to pass. Otherwise, it will register an API case and check the expect failure. """ - cls_name = "{0}_{1}_shape_{2}_diag_{3}".format(expected, op_type, Xshape, - diagonal) + cls_name = "{0}_{1}_shape_{2}_diag_{3}".format( + expected, op_type, Xshape, diagonal + ) errmsg = { - "diagonal: TypeError": - "diagonal in {} must be a python Int".format(op_type), - "input: ValueError": - "x shape in {} must be at least 2-D".format(op_type), + "diagonal: TypeError": "diagonal in {} must be a python Int".format( + op_type + ), + "input: ValueError": "x shape in {} must be at least 2-D".format( + op_type + ), } class FailureCase(unittest.TestCase): - def test_failure(self): paddle.enable_static() data = fluid.data(shape=Xshape, dtype='float32', name=cls_name) - with self.assertRaisesRegexp(eval(expected.split(':')[-1]), - errmsg[expected]): + with self.assertRaisesRegexp( + eval(expected.split(':')[-1]), errmsg[expected] + ): getattr(tensor, op_type)(x=data, diagonal=diagonal) class SuccessCase(TestNPUTrilTriu): - def initTestCase(self): paddle.enable_static() @@ -112,15 +113,13 @@ cases = { (20, 20): [ '2020', [20], - { - 20: 20 - }, + {20: 20}, (20, 20), 20.20, ], # str, list, dict, tuple, float }, 'input: ValueError': { - (2020, ): [None], + (2020,): [None], }, } for _op_type in ['tril', 'triu']: @@ -129,12 +128,15 @@ for _op_type in ['tril', 'triu']: list( map( lambda _diagonal: case_generator( - _op_type, _Xshape, _diagonal, _expected), _diaglist)) + _op_type, _Xshape, _diagonal, _expected + ), + _diaglist, + ) + ) class TestTrilTriuOpAPI(unittest.TestCase): - """ test case by using API and has -1 dimension - """ + """test case by using API and has -1 dimension""" def test_api(self): paddle.enable_static() @@ -166,8 +168,10 @@ class TestTrilTriuOpAPI(unittest.TestCase): with fluid.dygraph.guard(): data = np.random.random([1, 9, 9, 4]).astype(dtype) x = fluid.dygraph.to_variable(data) - tril_out, triu_out = tensor.tril(x).numpy(), tensor.triu( - x).numpy() + tril_out, triu_out = ( + tensor.tril(x).numpy(), + tensor.triu(x).numpy(), + ) np.testing.assert_allclose(tril_out, np.tril(data)) np.testing.assert_allclose(triu_out, np.triu(data)) @@ -185,14 +189,15 @@ class TestTrilTriuOpAPI(unittest.TestCase): place = fluid.NPUPlace(0) exe = fluid.Executor(place) - triu_out = exe.run(fluid.default_main_program(), - feed={"x": data}, - fetch_list=[triu_out]) + triu_out = exe.run( + fluid.default_main_program(), + feed={"x": data}, + fetch_list=[triu_out], + ) # @skip_check_grad_ci(reason="[NPU does not support grad right now.") class TestNPUTrilTriu_bool(TestNPUTrilTriu): - def test_check_output(self): self.check_output_with_place(self.place) diff --git a/python/paddle/fluid/tests/unittests/npu/test_truncated_gaussian_random_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_truncated_gaussian_random_op_npu.py index 98bc517b90878c188f7366afef11977dda3570e5..dfa70ec0b65ad50d69f4b4287561d2c7c148ab61 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_truncated_gaussian_random_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_truncated_gaussian_random_op_npu.py @@ -29,7 +29,6 @@ SEED = 2021 class TestTruncatedNormal(unittest.TestCase): - def _test(self, run_npu=True): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -44,12 +43,13 @@ class TestTruncatedNormal(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): weight_attr = paddle.framework.ParamAttr( name="linear_weight", - initializer=paddle.nn.initializer.TruncatedNormal(mean=0.0, - std=2.0)) - linear = paddle.nn.Linear(2, - 2, - weight_attr=weight_attr, - bias_attr=False) + initializer=paddle.nn.initializer.TruncatedNormal( + mean=0.0, std=2.0 + ), + ) + linear = paddle.nn.Linear( + 2, 2, weight_attr=weight_attr, bias_attr=False + ) if run_npu: place = paddle.NPUPlace(0) diff --git a/python/paddle/fluid/tests/unittests/npu/test_uniform_random_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_uniform_random_op_npu.py index 0c3e37a54a85bd053e002d12f301c72402c7cf47..2fdf53b23a7ba51e5f8a648857b776f180d616d8 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_uniform_random_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_uniform_random_op_npu.py @@ -25,7 +25,10 @@ import paddle from paddle.fluid.op import Operator import paddle.fluid as fluid from paddle.fluid import Program, program_guard -from test_uniform_random_op import TestUniformRandomOp, TestUniformRandomOpSelectedRows +from test_uniform_random_op import ( + TestUniformRandomOp, + TestUniformRandomOpSelectedRows, +) paddle.enable_static() @@ -39,7 +42,6 @@ def output_hist(out): class TestNPUUniformRandomOp(OpTest): - def setUp(self): self.set_npu() self.op_type = "uniform_random" @@ -53,7 +55,7 @@ class TestNPUUniformRandomOp(OpTest): "shape": [1000, 784], "min": -5.0, "max": 10.0, - "seed": 10 + "seed": 10, } self.output_hist = output_hist @@ -73,7 +75,6 @@ class TestNPUUniformRandomOp(OpTest): class TestNPUUniformRandomOpSelectedRows(unittest.TestCase): - def get_places(self): places = [core.CPUPlace()] if core.is_compiled_with_npu(): @@ -88,12 +89,14 @@ class TestNPUUniformRandomOpSelectedRows(unittest.TestCase): scope = core.Scope() out = scope.var("X").get_selected_rows() paddle.seed(10) - op = Operator("uniform_random", - Out="X", - shape=[1000, 784], - min=-5.0, - max=10.0, - seed=10) + op = Operator( + "uniform_random", + Out="X", + shape=[1000, 784], + min=-5.0, + max=10.0, + seed=10, + ) op.run(scope, place) self.assertEqual(out.get_tensor().shape(), [1000, 784]) hist, prob = output_hist(np.array(out.get_tensor())) diff --git a/python/paddle/fluid/tests/unittests/npu/test_unsqueeze_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_unsqueeze_op_npu.py index 4861c267922773486e562e3397c17a15d2442af2..0905e928e041e36726047fd0a2b707cb8539a621 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_unsqueeze_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_unsqueeze_op_npu.py @@ -28,7 +28,6 @@ paddle.enable_static() # unsqueeze class TestUnsqueezeOp(OpTest): - def setUp(self): self.set_npu() self.op_type = "unsqueeze" @@ -60,7 +59,6 @@ class TestUnsqueezeOp(OpTest): class TestUnsqueezeOp1(TestUnsqueezeOp): - def init_test_case(self): self.ori_shape = (3, 40) self.axes = (0, -2) @@ -69,7 +67,6 @@ class TestUnsqueezeOp1(TestUnsqueezeOp): # No axes input. class TestUnsqueezeOp2(TestUnsqueezeOp): - def init_test_case(self): self.ori_shape = (20, 5) self.axes = () @@ -78,7 +75,6 @@ class TestUnsqueezeOp2(TestUnsqueezeOp): # Just part of axes be squeezed. class TestUnsqueezeOp3(TestUnsqueezeOp): - def init_test_case(self): self.ori_shape = (6, 5, 1, 4) self.axes = (1, -1) @@ -87,7 +83,6 @@ class TestUnsqueezeOp3(TestUnsqueezeOp): # unsqueeze 2 class TestUnsqueeze2Op(OpTest): - def setUp(self): self.set_npu() self.op_type = "unsqueeze2" @@ -98,7 +93,7 @@ class TestUnsqueeze2Op(OpTest): self.init_attrs() self.outputs = { "Out": self.x.reshape(self.new_shape), - "XShape": np.random.random(self.ori_shape).astype("float32") + "XShape": np.random.random(self.ori_shape).astype("float32"), } def set_npu(self): @@ -121,7 +116,6 @@ class TestUnsqueeze2Op(OpTest): # Correct: There is mins axis. class TestUnsqueeze2Op1(TestUnsqueeze2Op): - def init_test_case(self): self.ori_shape = (20, 5) self.axes = (0, -2) @@ -130,7 +124,6 @@ class TestUnsqueeze2Op1(TestUnsqueeze2Op): # Correct: No axes input. class TestUnsqueeze2Op2(TestUnsqueeze2Op): - def init_test_case(self): self.ori_shape = (20, 5) self.axes = () @@ -139,7 +132,6 @@ class TestUnsqueeze2Op2(TestUnsqueeze2Op): # Correct: Just part of axes be squeezed. class TestUnsqueeze2Op3(TestUnsqueeze2Op): - def init_test_case(self): self.ori_shape = (6, 5, 1, 4) self.axes = (1, -1) diff --git a/python/paddle/fluid/tests/unittests/npu/test_unstack_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_unstack_op_npu.py index 91f2fb047fe728d5bf43939b3bc5e1688f707a61..5c20b083bfcb40518f99d16f320c7abf2f6a0c56 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_unstack_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_unstack_op_npu.py @@ -24,7 +24,6 @@ paddle.enable_static() class TestUnStackOpBase(OpTest): - def initDefaultParameters(self): self.input_dim = (5, 6, 7) self.axis = 0 @@ -74,25 +73,21 @@ class TestUnStackOpBase(OpTest): class TestStackOp3(TestUnStackOpBase): - def initParameters(self): self.axis = -1 class TestStackOp4(TestUnStackOpBase): - def initParameters(self): self.axis = -3 class TestStackOp5(TestUnStackOpBase): - def initParameters(self): self.axis = 1 class TestStackOp6(TestUnStackOpBase): - def initParameters(self): self.axis = 2 diff --git a/python/paddle/fluid/tests/unittests/npu/test_update_loss_scaling_min_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_update_loss_scaling_min_op_npu.py index 48df4ad454aad4847f1d7ce4f347d3747f7148ed..48edac9ab331fc6981925708a4cda1d87c53be98 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_update_loss_scaling_min_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_update_loss_scaling_min_op_npu.py @@ -29,7 +29,6 @@ SEED = 2021 class TestUpdateLossScalingOpMinLossScalingBad(TestUpdateLossScalingOpBad): - def setUp(self): self.set_npu() self.op_type = "update_loss_scaling" @@ -48,14 +47,14 @@ class TestUpdateLossScalingOpMinLossScalingBad(TestUpdateLossScalingOpBad): 'FoundInfinite': found_inf, 'PrevLossScaling': self.prev_loss_scaling, 'InGoodSteps': self.num_good_steps, - 'InBadSteps': self.num_bad_steps + 'InBadSteps': self.num_bad_steps, } self.outputs = { 'Out': [('out0', np.zeros_like(x))], 'LossScaling': np.array([1639.0]).astype(self.dtype), 'OutGoodSteps': self.zero_steps, - 'OutBadSteps': self.zero_steps + 'OutBadSteps': self.zero_steps, } def init(self): diff --git a/python/paddle/fluid/tests/unittests/npu/test_update_loss_scaling_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_update_loss_scaling_op_npu.py index 678e50247afc817aac4e6c63301c8b6c078c66be..8d364af89e3cb5a5b1d02cd59e6a370dc9859e93 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_update_loss_scaling_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_update_loss_scaling_op_npu.py @@ -27,7 +27,6 @@ SEED = 2021 class TestUpdateLossScalingOp(OpTest): - def setUp(self): self.set_npu() self.op_type = "update_loss_scaling" @@ -42,14 +41,14 @@ class TestUpdateLossScalingOp(OpTest): 'FoundInfinite': found_inf, 'PrevLossScaling': self.prev_loss_scaling, 'InGoodSteps': self.num_good_steps, - 'InBadSteps': self.num_bad_steps + 'InBadSteps': self.num_bad_steps, } self.outputs = { 'Out': [('out0', x)], 'LossScaling': self.prev_loss_scaling * self.incr_ratio, 'OutGoodSteps': self.zero_steps, - 'OutBadSteps': self.zero_steps + 'OutBadSteps': self.zero_steps, } def set_npu(self): @@ -75,7 +74,6 @@ class TestUpdateLossScalingOp(OpTest): class TestUpdateLossScalingOpBad(TestUpdateLossScalingOp): - def setUp(self): self.set_npu() self.op_type = "update_loss_scaling" @@ -93,33 +91,32 @@ class TestUpdateLossScalingOpBad(TestUpdateLossScalingOp): 'FoundInfinite': found_inf, 'PrevLossScaling': self.prev_loss_scaling, 'InGoodSteps': self.num_good_steps, - 'InBadSteps': self.num_bad_steps + 'InBadSteps': self.num_bad_steps, } self.outputs = { 'Out': [('out0', np.zeros_like(x))], 'LossScaling': self.prev_loss_scaling * self.decr_ratio, 'OutGoodSteps': self.zero_steps, - 'OutBadSteps': self.zero_steps + 'OutBadSteps': self.zero_steps, } class TestUpdateLossScalingLayer(unittest.TestCase): - def loss_scaling_check(self, use_npu=True, scope=fluid.Scope()): a = fluid.data(name="a", shape=[1024, 1024], dtype='float32') b = fluid.data(name="b", shape=[512, 128], dtype='float32') x = [a, b] found_inf = fluid.data(name="found_inf", shape=[1], dtype='bool') - prev_loss_scaling = fluid.data(name="prev_loss_scaling", - shape=[1], - dtype='float32') - num_good_steps = fluid.data(name="num_good_steps", - shape=[1], - dtype='int32') - num_bad_steps = fluid.data(name="num_bad_steps", - shape=[1], - dtype='int32') + prev_loss_scaling = fluid.data( + name="prev_loss_scaling", shape=[1], dtype='float32' + ) + num_good_steps = fluid.data( + name="num_good_steps", shape=[1], dtype='int32' + ) + num_bad_steps = fluid.data( + name="num_bad_steps", shape=[1], dtype='int32' + ) a_v = np.random.random([1024, 1024]).astype('float32') b_v = np.random.random([512, 128]).astype('float32') @@ -133,33 +130,41 @@ class TestUpdateLossScalingLayer(unittest.TestCase): incr_ratio = 2 decr_ratio = 0.8 - result = amp_nn.update_loss_scaling(x, - found_inf, - prev_loss_scaling, - num_good_steps, - num_bad_steps, - incr_every_n_steps, - decr_every_n_nan_or_inf, - incr_ratio, - decr_ratio, - name="update_loss_scaling") + result = amp_nn.update_loss_scaling( + x, + found_inf, + prev_loss_scaling, + num_good_steps, + num_bad_steps, + incr_every_n_steps, + decr_every_n_nan_or_inf, + incr_ratio, + decr_ratio, + name="update_loss_scaling", + ) place = paddle.NPUPlace(0) if use_npu else fluid.CPUPlace() exe = fluid.Executor(place) with fluid.scope_guard(scope): exe.run(fluid.default_startup_program()) - result_v = exe.run(feed={ - 'a': a_v, - 'b': b_v, - 'found_inf': found_inf_v, - 'prev_loss_scaling': prev_loss_scaling_v, - 'num_good_steps': num_good_steps_v, - 'num_bad_steps': num_bad_steps_v - }, - fetch_list=[ - result, x, found_inf, prev_loss_scaling, - num_good_steps, num_bad_steps - ]) + result_v = exe.run( + feed={ + 'a': a_v, + 'b': b_v, + 'found_inf': found_inf_v, + 'prev_loss_scaling': prev_loss_scaling_v, + 'num_good_steps': num_good_steps_v, + 'num_bad_steps': num_bad_steps_v, + }, + fetch_list=[ + result, + x, + found_inf, + prev_loss_scaling, + num_good_steps, + num_bad_steps, + ], + ) assert np.array_equal(result_v[0], a_v) assert np.array_equal(result_v[1], b_v) assert np.array_equal(result_v[0], result_v[2]) @@ -174,15 +179,15 @@ class TestUpdateLossScalingLayer(unittest.TestCase): b = fluid.data(name="b", shape=[512, 128], dtype='float32') x = [a, b] found_inf = fluid.data(name="found_inf", shape=[1], dtype='bool') - prev_loss_scaling = fluid.data(name="prev_loss_scaling", - shape=[1], - dtype='float32') - num_good_steps = fluid.data(name="num_good_steps", - shape=[1], - dtype='int32') - num_bad_steps = fluid.data(name="num_bad_steps", - shape=[1], - dtype='int32') + prev_loss_scaling = fluid.data( + name="prev_loss_scaling", shape=[1], dtype='float32' + ) + num_good_steps = fluid.data( + name="num_good_steps", shape=[1], dtype='int32' + ) + num_bad_steps = fluid.data( + name="num_bad_steps", shape=[1], dtype='int32' + ) a_v = np.random.random([1024, 1024]).astype('float32') b_v = np.random.random([512, 128]).astype('float32') @@ -199,33 +204,41 @@ class TestUpdateLossScalingLayer(unittest.TestCase): incr_ratio = 2 decr_ratio = 0.8 - result = amp_nn.update_loss_scaling(x, - found_inf, - prev_loss_scaling, - num_good_steps, - num_bad_steps, - incr_every_n_steps, - decr_every_n_nan_or_inf, - incr_ratio, - decr_ratio, - name="update_loss_scaling") + result = amp_nn.update_loss_scaling( + x, + found_inf, + prev_loss_scaling, + num_good_steps, + num_bad_steps, + incr_every_n_steps, + decr_every_n_nan_or_inf, + incr_ratio, + decr_ratio, + name="update_loss_scaling", + ) place = paddle.NPUPlace(0) if use_npu else fluid.CPUPlace() exe = fluid.Executor(place) with fluid.scope_guard(scope): exe.run(fluid.default_startup_program()) - result_v = exe.run(feed={ - 'a': a_v, - 'b': b_v, - 'found_inf': found_inf_v, - 'prev_loss_scaling': prev_loss_scaling_v, - 'num_good_steps': num_good_steps_v, - 'num_bad_steps': num_bad_steps_v - }, - fetch_list=[ - result, x, found_inf, prev_loss_scaling, - num_good_steps, num_bad_steps - ]) + result_v = exe.run( + feed={ + 'a': a_v, + 'b': b_v, + 'found_inf': found_inf_v, + 'prev_loss_scaling': prev_loss_scaling_v, + 'num_good_steps': num_good_steps_v, + 'num_bad_steps': num_bad_steps_v, + }, + fetch_list=[ + result, + x, + found_inf, + prev_loss_scaling, + num_good_steps, + num_bad_steps, + ], + ) assert np.array_equal(result_v[0], np.zeros_like(a_v)) assert np.array_equal(result_v[1], np.zeros_like(b_v)) assert np.array_equal(result_v[2], np.zeros_like(a_v)) diff --git a/python/paddle/fluid/tests/unittests/npu/test_where_index_npu.py b/python/paddle/fluid/tests/unittests/npu/test_where_index_npu.py index ff159c1473f8603ffb5644e04f6344d0d78fdd0b..370a1934bff96054fd38ac905971733f493f9e45 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_where_index_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_where_index_npu.py @@ -27,7 +27,6 @@ paddle.enable_static() class TestWhereIndexOp(OpTest): - def setUp(self): self.set_npu() self.op_type = "where_index" @@ -49,7 +48,6 @@ class TestWhereIndexOp(OpTest): class TestNotBool(TestWhereIndexOp): - def init_config(self): self.inputs = { 'Condition': np.array([1, 0, 8]), @@ -59,7 +57,6 @@ class TestNotBool(TestWhereIndexOp): class TestAllFalse(TestWhereIndexOp): - def init_config(self): self.inputs = { 'Condition': np.array([False, False, False]), @@ -69,7 +66,6 @@ class TestAllFalse(TestWhereIndexOp): class TestRank2(TestWhereIndexOp): - def init_config(self): self.inputs = { 'Condition': np.array([[True, False], [False, True]]), @@ -79,24 +75,26 @@ class TestRank2(TestWhereIndexOp): class TestRank3(TestWhereIndexOp): - def init_config(self): self.inputs = { - 'Condition': - np.array([[[True, False], [False, True]], - [[False, True], [True, False]], - [[False, False], [False, True]]]), + 'Condition': np.array( + [ + [[True, False], [False, True]], + [[False, True], [True, False]], + [[False, False], [False, True]], + ] + ), } self.outputs = { - 'Out': - np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0], [2, 1, 1]], - dtype='int64') + 'Out': np.array( + [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0], [2, 1, 1]], + dtype='int64', + ) } class TestWhereOpError(unittest.TestCase): - def test_api(self): with program_guard(Program(), Program()): cond = fluid.layers.data(name='cond', shape=[4], dtype='bool') @@ -109,9 +107,7 @@ class TestWhereOpError(unittest.TestCase): class TestWhereRaiseError(unittest.TestCase): - def test_errors(self): - def test_type(): fluid.layers.where([10]) diff --git a/python/paddle/fluid/tests/unittests/npu/test_where_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_where_op_npu.py index ab26d506c2fa8e18eb25606c6d63678069c2cfdc..9e1126d0aa0436204f28e904f3e8f4cd439dfc77 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_where_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_where_op_npu.py @@ -27,7 +27,6 @@ paddle.enable_static() class TestNPUWhereOp(OpTest): - def setUp(self): self.op_type = "where" self.set_npu() @@ -52,7 +51,6 @@ class TestNPUWhereOp(OpTest): class TestNPUWhereOp2(TestNPUWhereOp): - def init_config(self): self.x = np.random.uniform(-5, 5, (60, 2)).astype("float64") self.y = np.random.uniform(-5, 5, (60, 2)).astype("float64") @@ -60,7 +58,6 @@ class TestNPUWhereOp2(TestNPUWhereOp): class TestNPUWhereOp3(TestNPUWhereOp): - def init_config(self): self.x = np.random.uniform(-3, 5, (20, 2, 4)).astype("float64") self.y = np.random.uniform(-3, 5, (20, 2, 4)).astype("float64") @@ -68,7 +65,6 @@ class TestNPUWhereOp3(TestNPUWhereOp): class TestNPUWhereAPI(unittest.TestCase): - def setUp(self): self.__class__.use_npu = True self.place = paddle.NPUPlace(0) @@ -93,9 +89,9 @@ class TestNPUWhereAPI(unittest.TestCase): train_prog = fluid.Program() startup = fluid.Program() with fluid.program_guard(train_prog, startup): - cond = fluid.data(name='cond', - shape=self.shape, - dtype='bool') + cond = fluid.data( + name='cond', shape=self.shape, dtype='bool' + ) x = fluid.data(name='x', shape=self.shape, dtype='float32') y = fluid.data(name='y', shape=self.shape, dtype='float32') @@ -113,24 +109,25 @@ class TestNPUWhereAPI(unittest.TestCase): fetch_list.append(x.grad_name) if y_stop_gradient is False: fetch_list.append(y.grad_name) - out = exe.run(train_prog, - feed={ - 'cond': self.cond, - 'x': self.x, - 'y': self.y - }, - fetch_list=fetch_list) + out = exe.run( + train_prog, + feed={'cond': self.cond, 'x': self.x, 'y': self.y}, + fetch_list=fetch_list, + ) assert np.array_equal(out[0], self.out) if x_stop_gradient is False: - assert np.array_equal(out[2], - self.ref_x_backward(out[1])) + assert np.array_equal( + out[2], self.ref_x_backward(out[1]) + ) if y.stop_gradient is False: - assert np.array_equal(out[3], - self.ref_y_backward(out[1])) + assert np.array_equal( + out[3], self.ref_y_backward(out[1]) + ) elif y.stop_gradient is False: - assert np.array_equal(out[2], - self.ref_y_backward(out[1])) + assert np.array_equal( + out[2], self.ref_y_backward(out[1]) + ) def test_api_broadcast(self, use_cuda=False): train_prog = fluid.Program() @@ -139,24 +136,21 @@ class TestNPUWhereAPI(unittest.TestCase): x = fluid.layers.data(name='x', shape=[4, 1], dtype='float32') y = fluid.layers.data(name='y', shape=[4, 2], dtype='float32') x_i = np.array([[0.9383, 0.1983, 3.2, 1.2]]).astype("float32") - y_i = np.array([[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, - 1.0]]).astype("float32") + y_i = np.array([[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]).astype( + "float32" + ) result = paddle.where(x > 1, x=x, y=y) exe = fluid.Executor(self.place) exe.run(startup) - out = exe.run(train_prog, - feed={ - 'x': x_i, - 'y': y_i - }, - fetch_list=[result]) + out = exe.run( + train_prog, feed={'x': x_i, 'y': y_i}, fetch_list=[result] + ) assert np.array_equal(out[0], np.where(x_i > 1, x_i, y_i)) class TestWhereDygraphAPI(unittest.TestCase): - def test_api(self): with fluid.dygraph.guard(paddle.NPUPlace(0)): x_i = np.array([0.9383, 0.1983, 3.2, 1.2]).astype("float64") diff --git a/python/paddle/fluid/tests/unittests/npu/test_while_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_while_op_npu.py index 54e11413fe70030c2c0686e11585b4ca22bf14f2..45aba89a84ee8c1dfa1880c5067f052967cc1e2b 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_while_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_while_op_npu.py @@ -26,20 +26,16 @@ paddle.enable_static() class TestWhileOp(unittest.TestCase): - def simple_net(self): - d0 = layers.data("d0", - shape=[10], - append_batch_size=False, - dtype='float32') - d1 = layers.data("d1", - shape=[10], - append_batch_size=False, - dtype='float32') - d2 = layers.data("d2", - shape=[10], - append_batch_size=False, - dtype='float32') + d0 = layers.data( + "d0", shape=[10], append_batch_size=False, dtype='float32' + ) + d1 = layers.data( + "d1", shape=[10], append_batch_size=False, dtype='float32' + ) + d2 = layers.data( + "d2", shape=[10], append_batch_size=False, dtype='float32' + ) # fill_constant npu op doesn't support int64 i = layers.zeros(shape=[1], dtype='int32') i = layers.cast(i, 'int64') @@ -107,12 +103,10 @@ class TestWhileOp(unittest.TestCase): for i in range(3): d.append(numpy.random.random(size=[10]).astype('float32')) - outs = exe.run(feed={ - 'd0': d[0], - 'd1': d[1], - 'd2': d[2] - }, - fetch_list=[sum_result]) + outs = exe.run( + feed={'d0': d[0], 'd1': d[1], 'd2': d[2]}, + fetch_list=[sum_result], + ) self.assertAlmostEqual(numpy.sum(d), numpy.sum(outs[0]), delta=0.01) def test_simple_net_forward(self): diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index 01a4d788901151023ae5030272a6f94d0b854253..6147c88dc56801a2d8f7f8afcc85d12533efd6a7 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -27,12 +27,21 @@ import paddle import paddle.fluid as fluid from paddle.fluid.framework import _dygraph_tracer import paddle.fluid.core as core -from paddle.fluid.framework import _in_legacy_dygraph, _enable_legacy_dygraph, _in_eager_without_dygraph_check, _disable_legacy_dygraph +from paddle.fluid.framework import ( + _in_legacy_dygraph, + _enable_legacy_dygraph, + _in_eager_without_dygraph_check, + _disable_legacy_dygraph, +) from paddle.fluid.framework import _test_eager_guard from paddle.fluid.backward import append_backward from paddle.fluid.op import Operator from paddle.fluid.executor import Executor -from paddle.fluid.framework import OpProtoHolder, Program, _current_expected_place +from paddle.fluid.framework import ( + OpProtoHolder, + Program, + _current_expected_place, +) from paddle.fluid import unique_name from paddle.fluid.dygraph.dygraph_to_static.utils import parse_arg_and_kwargs @@ -54,8 +63,12 @@ from white_list import ( # For switch new eager mode globally g_is_in_eager = _in_eager_without_dygraph_check() -g_enable_legacy_dygraph = _enable_legacy_dygraph if g_is_in_eager else lambda: None -g_disable_legacy_dygraph = _disable_legacy_dygraph if g_is_in_eager else lambda: None +g_enable_legacy_dygraph = ( + _enable_legacy_dygraph if g_is_in_eager else lambda: None +) +g_disable_legacy_dygraph = ( + _disable_legacy_dygraph if g_is_in_eager else lambda: None +) def check_out_dtype(api_fn, in_specs, expect_dtypes, target_index=0, **configs): @@ -85,12 +98,15 @@ def check_out_dtype(api_fn, in_specs, expect_dtypes, target_index=0, **configs): shape, dtype = spec else: raise ValueError( - "Value of in_specs[{}] should contains two elements: [shape, dtype]" - .format(index)) + "Value of in_specs[{}] should contains two elements: [shape, dtype]".format( + index + ) + ) input_t.append( - paddle.static.data(name='data_%s' % index, - shape=shape, - dtype=dtype)) + paddle.static.data( + name='data_%s' % index, shape=shape, dtype=dtype + ) + ) out = api_fn(*input_t, **configs) out_dtype = fluid.data_feeder.convert_dtype(out.dtype) @@ -98,7 +114,9 @@ def check_out_dtype(api_fn, in_specs, expect_dtypes, target_index=0, **configs): if out_dtype != expect_dtype: raise ValueError( "Expected out.dtype is {}, but got {} from {}.".format( - expect_dtype, out_dtype, api_fn.__name__)) + expect_dtype, out_dtype, api_fn.__name__ + ) + ) def _set_use_system_allocator(value=None): @@ -110,22 +128,25 @@ def _set_use_system_allocator(value=None): def randomize_probability(batch_size, class_num, dtype='float32'): - prob = np.random.uniform(0.1, 1.0, - size=(batch_size, class_num)).astype(dtype) + prob = np.random.uniform(0.1, 1.0, size=(batch_size, class_num)).astype( + dtype + ) prob_sum = prob.sum(axis=1) for i in range(len(prob)): prob[i] /= prob_sum[i] return prob -def get_numeric_gradient(place, - scope, - op, - inputs, - input_to_check, - output_names, - delta=0.005, - in_place=False): +def get_numeric_gradient( + place, + scope, + op, + inputs, + input_to_check, + output_names, + delta=0.005, + in_place=False, +): # FIXME: change this method by compile time concepts set_input(scope, op, inputs, place) @@ -150,9 +171,12 @@ def get_numeric_gradient(place, elif tensor_to_check_dtype == core.VarDesc.VarType.COMPLEX128: tensor_to_check_dtype = np.complex128 else: - raise ValueError("Not supported data type " + - str(tensor_to_check_dtype) + ", tensor name : " + - str(input_to_check)) + raise ValueError( + "Not supported data type " + + str(tensor_to_check_dtype) + + ", tensor name : " + + str(input_to_check) + ) def get_output(): sum = [] @@ -167,7 +191,7 @@ def get_numeric_gradient(place, sum.append(output_numpy.astype(tensor_to_check_dtype).mean()) return tensor_to_check_dtype(np.array(sum).sum() / len(output_names)) - gradient_flat = np.zeros(shape=(tensor_size, ), dtype=tensor_to_check_dtype) + gradient_flat = np.zeros(shape=(tensor_size,), dtype=tensor_to_check_dtype) def __get_elem__(tensor, i): if tensor_to_check_dtype == np.float16: @@ -179,15 +203,16 @@ def get_numeric_gradient(place, numpy_tensor = numpy_tensor.flatten() return struct.unpack( ' 1 and is_np_data( - sub_val_value[1]): # case 3 + sub_val_value[1] + ): # case 3 dtype_set.add(sub_val_value[1].dtype) - elif len(sub_val_value) > 1 and isinstance(sub_val_value[1], (list, tuple)) \ - and is_np_data(sub_val_value[1][0]): # case 4 + elif ( + len(sub_val_value) > 1 + and isinstance(sub_val_value[1], (list, tuple)) + and is_np_data(sub_val_value[1][0]) + ): # case 4 dtype_set.add(sub_val_value[1][0].dtype) # infer dtype from inputs, and dtype means the precision of the test @@ -466,7 +525,7 @@ class OpTest(unittest.TestCase): np.dtype(np.int16), np.dtype(np.int8), np.dtype(np.uint8), - np.dtype(np.bool_) + np.dtype(np.bool_), ] # check the dtype in dtype_list in order, select the first dtype that in dtype_set for dtype in dtype_list: @@ -501,7 +560,8 @@ class OpTest(unittest.TestCase): if isinstance(self.inputs[var_name], tuple): tensor.set(self.inputs[var_name][0], place) tensor.set_recursive_sequence_lengths( - self.inputs[var_name][1]) + self.inputs[var_name][1] + ) else: tensor.set(self.inputs[var_name], place) feed_map[var_name] = tensor @@ -509,7 +569,9 @@ class OpTest(unittest.TestCase): return feed_map def _append_ops(self, block): - self.__class__.op_type = self.op_type # for ci check, please not delete it for now + self.__class__.op_type = ( + self.op_type + ) # for ci check, please not delete it for now if self.is_mkldnn_op(): self.__class__.use_mkldnn = True @@ -524,23 +586,28 @@ class OpTest(unittest.TestCase): self.output_dtype = np.uint16 else: self.infer_dtype_from_inputs_outputs(self.inputs, self.outputs) - inputs = append_input_output(block, op_proto, self.inputs, True, - self.dtype) - outputs = append_input_output(block, op_proto, self.outputs, False, - self.dtype) + inputs = append_input_output( + block, op_proto, self.inputs, True, self.dtype + ) + outputs = append_input_output( + block, op_proto, self.outputs, False, self.dtype + ) if hasattr(self, "cache_name_list"): for name in self.cache_name_list: - inputs[name] = block.create_var(name=name, - persistable=True, - type=core.VarDesc.VarType.RAW, - stop_gradient=True) + inputs[name] = block.create_var( + name=name, + persistable=True, + type=core.VarDesc.VarType.RAW, + stop_gradient=True, + ) op = block.append_op( type=self.op_type, inputs=inputs, outputs=outputs, - attrs=copy(self.attrs) if hasattr(self, "attrs") else dict()) + attrs=copy(self.attrs) if hasattr(self, "attrs") else dict(), + ) # infer variable type and infer shape in compile-time op.desc.infer_var_type(block.desc) op.desc.infer_shape(block.desc) @@ -605,9 +672,12 @@ class OpTest(unittest.TestCase): def lod_has_continuous_zero(self, lod): for i in range(len(lod) - 3): - if lod[i] != 0 and lod[i + - 1] == 0 and lod[i + - 2] == 0 and lod[i + 3] != 0: + if ( + lod[i] != 0 + and lod[i + 1] == 0 + and lod[i + 2] == 0 + and lod[i + 3] != 0 + ): return True return False @@ -625,8 +695,12 @@ class OpTest(unittest.TestCase): if shape is None: shape = [12, 10] assert len(lod[0]) >= 8 - assert lod[0][0] == 0 and lod[0][1] == 0 and lod[0][-1] == 0 and lod[0][ - -2] == 0 + assert ( + lod[0][0] == 0 + and lod[0][1] == 0 + and lod[0][-1] == 0 + and lod[0][-2] == 0 + ) assert self.lod_has_single_zero(lod[0]) is True assert self.lod_has_continuous_zero(lod[0]) is True assert sum(lod[0]) == shape[0] @@ -634,9 +708,9 @@ class OpTest(unittest.TestCase): x = np.random.uniform(0.1, 1, shape).astype('float32') return (x, lod) - def append_input_output_for_dygraph(self, op_proto, np_list, is_input, - if_return_inputs_grad_dict, block): - + def append_input_output_for_dygraph( + self, op_proto, np_list, is_input, if_return_inputs_grad_dict, block + ): def create_var(np_value, name, is_input, if_return_inputs_grad_dict): np_value_temp = np_value has_lod = False @@ -656,13 +730,16 @@ class OpTest(unittest.TestCase): if has_lod: v.value().get_tensor().set_recursive_sequence_lengths( - lod_temp) + lod_temp + ) else: - v = block.create_var(name=name, - dtype=np_value_temp.dtype, - type=core.VarDesc.VarType.LOD_TENSOR, - persistable=False, - stop_gradient=False) + v = block.create_var( + name=name, + dtype=np_value_temp.dtype, + type=core.VarDesc.VarType.LOD_TENSOR, + persistable=False, + stop_gradient=False, + ) return v # prepare variable for input or output @@ -676,21 +753,23 @@ class OpTest(unittest.TestCase): continue if name not in np_list: assert var_proto.intermediate, "{} not found".format(name) - v = block.create_var(dtype='float32', - type=core.VarDesc.VarType.LOD_TENSOR) + v = block.create_var( + dtype='float32', type=core.VarDesc.VarType.LOD_TENSOR + ) var_dict[name].append(v) if if_return_inputs_grad_dict: inputs_grad_dict[name] = v continue if var_proto.duplicable: assert isinstance( - np_list[name], - list), "Duplicable {} should be set as list".format(name) + np_list[name], list + ), "Duplicable {} should be set as list".format(name) var_list = [] slot_name = name for (name, np_value) in np_list[name]: - v = create_var(np_value, name, is_input, - if_return_inputs_grad_dict) + v = create_var( + np_value, name, is_input, if_return_inputs_grad_dict + ) var_list.append(v) if if_return_inputs_grad_dict: inputs_grad_dict[name] = v @@ -704,8 +783,12 @@ class OpTest(unittest.TestCase): else: nplist_value_temp = np_list[name] name_temp = unique_name.generate("%s_out" % (name)) - v = create_var(nplist_value_temp, name_temp, is_input, - if_return_inputs_grad_dict) + v = create_var( + nplist_value_temp, + name_temp, + is_input, + if_return_inputs_grad_dict, + ) var_dict[name].append(v) if if_return_inputs_grad_dict: inputs_grad_dict[name] = v @@ -716,10 +799,10 @@ class OpTest(unittest.TestCase): return var_dict def _check_api_outs_by_dygraph_outs(self, api_outs, dygraph_outs, place): - """ for quick verify, here we take a simplest strategy: - 1. we only check variable in api_outs. - 2. we simply check the numpy (tensor) . - 3. we set atol and rtol as 1e-5, because they are unrelated to dtype. + """for quick verify, here we take a simplest strategy: + 1. we only check variable in api_outs. + 2. we simply check the numpy (tensor) . + 3. we set atol and rtol as 1e-5, because they are unrelated to dtype. """ for name in api_outs: np_api = np.array(api_outs[name]) @@ -729,19 +812,28 @@ class OpTest(unittest.TestCase): np_dyg, rtol=1e-05, equal_nan=False, - err_msg='Output (' + name + ') has diff at ' + str(place) + - '\nExpect ' + str(np_dyg) + '\n' + 'But Got' + str(np_api) + - ' in class ' + self.__class__.__name__) + err_msg='Output (' + + name + + ') has diff at ' + + str(place) + + '\nExpect ' + + str(np_dyg) + + '\n' + + 'But Got' + + str(np_api) + + ' in class ' + + self.__class__.__name__, + ) def _calc_python_api_output(self, place, egr_inps=None, egr_oups=None): - """ set egr_inps and egr_oups = None if you want to create it by yourself. - """ + """set egr_inps and egr_oups = None if you want to create it by yourself.""" - def prepare_python_api_arguments(api, op_proto_ins, op_proto_attrs, - kernel_sig): - """ map from `op proto inputs and attrs` to `api input list and api attrs dict` + def prepare_python_api_arguments( + api, op_proto_ins, op_proto_attrs, kernel_sig + ): + """map from `op proto inputs and attrs` to `api input list and api attrs dict` - NOTE: the op_proto_attrs and op_proto_ins is a default dict. default value is [] + NOTE: the op_proto_attrs and op_proto_ins is a default dict. default value is [] """ class Empty: @@ -751,19 +843,19 @@ class OpTest(unittest.TestCase): return isinstance(a, Empty) def get_default(idx, defaults): - assert not isinstance( - defaults[idx], Empty - ), "%d-th params of python api don't have default value." % idx + assert not isinstance(defaults[idx], Empty), ( + "%d-th params of python api don't have default value." % idx + ) return defaults[idx] def to_defaults_list(params, defaults): return [defaults[p] for p in params if p in defaults] def parse_attri_value(name, op_inputs, op_attrs): - """ parse true value from inputs and attrs, if there is no name passed by OpTest, return Empty - 1. if the name in op_attrs, use the op_attrs[name] - 2. if the name in op_inputs, convert the op_inputs to [type of default value] - 3. if the name not in op_attrs ans op_inputs, return Empty. (this will use the default value from python api) + """parse true value from inputs and attrs, if there is no name passed by OpTest, return Empty + 1. if the name in op_attrs, use the op_attrs[name] + 2. if the name in op_inputs, convert the op_inputs to [type of default value] + 3. if the name not in op_attrs ans op_inputs, return Empty. (this will use the default value from python api) """ if name in op_proto_attrs: return op_proto_attrs[name] @@ -771,8 +863,9 @@ class OpTest(unittest.TestCase): if len(op_inputs[name]) == 1: # why don't use numpy().item() : if the Tensor is float64, we will change it to python.float32, where we loss accuracy: [allclose_op] # why we reconstruct a tensor: because we want the tensor in cpu. - return paddle.to_tensor(op_inputs[name][0].numpy(), - place='cpu') + return paddle.to_tensor( + op_inputs[name][0].numpy(), place='cpu' + ) else: # if this is a list (test_unsqueeze2_op): we just pass it into the python api. return op_inputs[name] @@ -799,7 +892,8 @@ class OpTest(unittest.TestCase): Empty() for i in range(len(api_params) - len(api_defaults)) ] + api_defaults assert len(api_defaults) == len( - api_params), "Error happens. contack xiongkun03 to solve." + api_params + ), "Error happens. contack xiongkun03 to solve." inputs_sig, attrs_sig, outputs_sig = kernel_sig inputs_and_attrs = inputs_sig + attrs_sig input_arguments = [ @@ -815,7 +909,7 @@ class OpTest(unittest.TestCase): if arg_name in api_ignore_param_list: results.append(get_default(idx, api_defaults)) else: - if (idx_of_op_proto_arguments < len(input_arguments)): + if idx_of_op_proto_arguments < len(input_arguments): tmp = input_arguments[idx_of_op_proto_arguments] idx_of_op_proto_arguments += 1 else: @@ -838,9 +932,9 @@ class OpTest(unittest.TestCase): return {a: [b] for a, b in zip(output_sig, ret_tuple)} else: # [assumption]: return multi-Tensor in a single output. such as paddle.split() - assert len( - output_sig - ) == 1, "Don't support multi-output with multi-tensor output. (May be you can use set `python_out_sig`, see `test_squeeze2_op` as a example.)" + assert ( + len(output_sig) == 1 + ), "Don't support multi-output with multi-tensor output. (May be you can use set `python_out_sig`, see `test_squeeze2_op` as a example.)" return {output_sig[0]: ret_tuple} def assumption_assert_and_transform(args, inp_num): @@ -853,29 +947,35 @@ class OpTest(unittest.TestCase): only support "X" is list of Tensor, currently don't support other structure like dict. """ - inp_args = [[inp] if inp is None else inp - for inp in args[:inp_num]] # convert None -> [None] + inp_args = [ + [inp] if inp is None else inp for inp in args[:inp_num] + ] # convert None -> [None] for inp in inp_args: assert isinstance( inp, list ), "currently only support `X` is [Tensor], don't support other structure." - args = [inp[0] if len(inp) == 1 else inp - for inp in inp_args] + args[inp_num:] + args = [ + inp[0] if len(inp) == 1 else inp for inp in inp_args + ] + args[inp_num:] return args - def _get_kernel_signature(eager_tensor_inputs, eager_tensor_outputs, - attrs_outputs): + def _get_kernel_signature( + eager_tensor_inputs, eager_tensor_outputs, attrs_outputs + ): try: kernel_sig = _dygraph_tracer()._get_kernel_signature( - self.op_type, eager_tensor_inputs, eager_tensor_outputs, - attrs_outputs) + self.op_type, + eager_tensor_inputs, + eager_tensor_outputs, + attrs_outputs, + ) except RuntimeError as re: - """ we think the kernel_sig is missing. - """ + """we think the kernel_sig is missing.""" kernel_sig = None print( "[Warning: op_test.py] Kernel Signature is not found for %s, fall back to intermediate state." - % self.op_type) + % self.op_type + ) return kernel_sig def cal_python_api(python_api, args, kernel_sig): @@ -888,11 +988,21 @@ class OpTest(unittest.TestCase): block = fluid.default_main_program().global_block() op_proto = OpProtoHolder.instance().get_op_proto(self.op_type) # prepare input variable - eager_tensor_inputs = egr_inps if egr_inps else self.append_input_output_for_dygraph( - op_proto, self.inputs, True, False, block) + eager_tensor_inputs = ( + egr_inps + if egr_inps + else self.append_input_output_for_dygraph( + op_proto, self.inputs, True, False, block + ) + ) # prepare output variable - eager_tensor_outputs = egr_oups if egr_oups else self.append_input_output_for_dygraph( - op_proto, self.outputs, False, False, block) + eager_tensor_outputs = ( + egr_oups + if egr_oups + else self.append_input_output_for_dygraph( + op_proto, self.outputs, False, False, block + ) + ) # prepare attributes attrs_outputs = {} @@ -901,23 +1011,26 @@ class OpTest(unittest.TestCase): if self.attrs[attrs_name] is not None: attrs_outputs[attrs_name] = self.attrs[attrs_name] - kernel_sig = _get_kernel_signature(eager_tensor_inputs, - eager_tensor_outputs, - attrs_outputs) + kernel_sig = _get_kernel_signature( + eager_tensor_inputs, eager_tensor_outputs, attrs_outputs + ) if not kernel_sig: return None - assert hasattr( - self, "python_api" - ), "Detect there is KernelSignature for `%s` op, please set the `self.python_api` if you set check_eager = True" % self.op_type - args = prepare_python_api_arguments(self.python_api, - eager_tensor_inputs, - attrs_outputs, kernel_sig) + assert hasattr(self, "python_api"), ( + "Detect there is KernelSignature for `%s` op, please set the `self.python_api` if you set check_eager = True" + % self.op_type + ) + args = prepare_python_api_arguments( + self.python_api, eager_tensor_inputs, attrs_outputs, kernel_sig + ) """ we directly return the cal_python_api value because the value is already tensor. """ return cal_python_api(self.python_api, args, kernel_sig) def _calc_dygraph_output(self, place, parallel=False, no_check_set=None): - self.__class__.op_type = self.op_type # for ci check, please not delete it for now + self.__class__.op_type = ( + self.op_type + ) # for ci check, please not delete it for now with fluid.dygraph.base.guard(place=place): block = fluid.default_main_program().global_block() @@ -925,10 +1038,12 @@ class OpTest(unittest.TestCase): # prepare input variable inputs = self.append_input_output_for_dygraph( - op_proto, self.inputs, True, False, block) + op_proto, self.inputs, True, False, block + ) # prepare output variable outputs = self.append_input_output_for_dygraph( - op_proto, self.outputs, False, False, block) + op_proto, self.outputs, False, False, block + ) # prepare attributes attrs_outputs = {} @@ -941,16 +1056,19 @@ class OpTest(unittest.TestCase): type=self.op_type, inputs=inputs, outputs=outputs, - attrs=attrs_outputs if hasattr(self, "attrs") else None) + attrs=attrs_outputs if hasattr(self, "attrs") else None, + ) return outputs - def _calc_output(self, - place, - parallel=False, - no_check_set=None, - loss=None, - enable_inplace=None, - for_inplace_test=None): + def _calc_output( + self, + place, + parallel=False, + no_check_set=None, + loss=None, + enable_inplace=None, + for_inplace_test=None, + ): program = Program() block = program.global_block() op = self._append_ops(block) @@ -974,7 +1092,8 @@ class OpTest(unittest.TestCase): if isinstance(place, fluid.CUDAPlace): use_cuda = True compiled_prog = fluid.CompiledProgram(program).with_data_parallel( - loss_name=loss.name if loss else None, places=place) + loss_name=loss.name if loss else None, places=place + ) program = compiled_prog fetch_list = getattr(self, "fetch_list", []) # if the fetch_list is customized by user, we use it directly. @@ -998,14 +1117,14 @@ class OpTest(unittest.TestCase): build_strategy.enable_inplace = enable_inplace compiled_prog = fluid.CompiledProgram(program).with_data_parallel( - build_strategy=build_strategy, places=place) + build_strategy=build_strategy, places=place + ) program = compiled_prog executor = Executor(place) - outs = executor.run(program, - feed=feed_map, - fetch_list=fetch_list, - return_numpy=False) + outs = executor.run( + program, feed=feed_map, fetch_list=fetch_list, return_numpy=False + ) self.op = op self.program = original_program if for_inplace_test: @@ -1013,12 +1132,9 @@ class OpTest(unittest.TestCase): else: return outs, fetch_list - def _compare_expect_and_actual_outputs(self, - place, - fetch_list, - expect_outs, - actual_outs, - inplace_atol=None): + def _compare_expect_and_actual_outputs( + self, place, fetch_list, expect_outs, actual_outs, inplace_atol=None + ): """Compare expect outs and actual outs of an tested op. Args: @@ -1045,21 +1161,41 @@ class OpTest(unittest.TestCase): actual_out, rtol=1e-05, atol=inplace_atol, - err_msg='Output (' + name + ') has diff at ' + str(place) + - ' when using and not using inplace' + '\nExpect ' + - str(expect_out) + '\n' + 'But Got' + str(actual_out) + - ' in class ' + self.__class__.__name__) + err_msg='Output (' + + name + + ') has diff at ' + + str(place) + + ' when using and not using inplace' + + '\nExpect ' + + str(expect_out) + + '\n' + + 'But Got' + + str(actual_out) + + ' in class ' + + self.__class__.__name__, + ) else: np.testing.assert_array_equal( expect_out, actual_out, - err_msg='Output (' + name + ') has diff at ' + str(place) + - ' when using and not using inplace' + '\nExpect ' + - str(expect_out) + '\n' + 'But Got' + str(actual_out) + - ' in class ' + self.__class__.__name__ + '\n') - - def _construct_grad_program_from_forward(self, fwd_program, grad_op_desc, - op_grad_to_var): + err_msg='Output (' + + name + + ') has diff at ' + + str(place) + + ' when using and not using inplace' + + '\nExpect ' + + str(expect_out) + + '\n' + + 'But Got' + + str(actual_out) + + ' in class ' + + self.__class__.__name__ + + '\n', + ) + + def _construct_grad_program_from_forward( + self, fwd_program, grad_op_desc, op_grad_to_var + ): """Generate grad_program which contains the grad_op. Args: @@ -1077,19 +1213,23 @@ class OpTest(unittest.TestCase): grad_program._sync_with_cpp() # Create grad vars based on fwd vars (shape and dtype) - for arg in grad_op_desc.input_arg_names( - ) + grad_op_desc.output_arg_names(): + for arg in ( + grad_op_desc.input_arg_names() + grad_op_desc.output_arg_names() + ): fwd_var_name = op_grad_to_var.get(arg, None) if fwd_var_name is None: fwd_var_name = arg fwd_var = fwd_program.global_block().vars.get(fwd_var_name) assert fwd_var is not None, "{} cannot be found".format( - fwd_var_name) - grad_var = grad_block.create_var(name=arg, - dtype=fwd_var.dtype, - shape=fwd_var.shape, - type=fwd_var.type, - persistable=False) + fwd_var_name + ) + grad_var = grad_block.create_var( + name=arg, + dtype=fwd_var.dtype, + shape=fwd_var.shape, + type=fwd_var.type, + persistable=False, + ) # Some variables' tensors hold no buffer (tensor's _holder is NULL), like XShape in reshape2 op, # and the shapes of those variables contain 0 (eg. Xshape.shape = [0, 2, 5]). @@ -1100,8 +1240,9 @@ class OpTest(unittest.TestCase): grad_program._sync_with_cpp() return grad_program - def _construct_grad_feed_map_from_forward(self, place, fwd_res, - grad_op_desc, op_grad_to_var): + def _construct_grad_feed_map_from_forward( + self, place, fwd_res, grad_op_desc, op_grad_to_var + ): """Generate grad_feed_map for grad_program. since we don`t really check gradient accuracy, but check the consistency when using and not using inplace, @@ -1117,7 +1258,13 @@ class OpTest(unittest.TestCase): Returns: grad_feed_map (dict): The feed_map of grad_op. """ - fwd_outs, fwd_fetch_list, fwd_feed_map, fwd_program, fwd_op_desc = fwd_res + ( + fwd_outs, + fwd_fetch_list, + fwd_feed_map, + fwd_program, + fwd_op_desc, + ) = fwd_res p = core.Place() p.set_place(place) grad_feed_map = {} @@ -1167,14 +1314,16 @@ class OpTest(unittest.TestCase): else: # get grad_op_desc grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc( - op_desc, set(), []) + op_desc, set(), [] + ) if not grad_op_desc_list: has_infer_inplace_in_grad_descendants = False else: for i, grad_op_desc in enumerate(grad_op_desc_list): - if grad_op_desc.type( - ) not in visited_ops and _dfs_grad_op( - grad_op_desc, fwd_op_desc=op_desc): + if ( + grad_op_desc.type() not in visited_ops + and _dfs_grad_op(grad_op_desc, fwd_op_desc=op_desc) + ): has_infer_inplace_in_grad_descendants = True if has_infer_inplace or has_infer_inplace_in_grad_descendants: need_run_ops.append((op_desc, fwd_op_desc)) @@ -1185,10 +1334,9 @@ class OpTest(unittest.TestCase): _dfs_grad_op(op_desc, fwd_op_desc=fwd_op_desc) return need_run_ops - def _check_forward_inplace(self, - place, - no_check_set=None, - inplace_atol=None): + def _check_forward_inplace( + self, place, no_check_set=None, inplace_atol=None + ): """Check the inplace correctness of given op (self.op_type). Run the op twice with same inputs, one enable inplace and another disable, compare their outputs. @@ -1202,27 +1350,31 @@ class OpTest(unittest.TestCase): We return this to construct grad_program and grad_feed_map for grad inplace check. """ # _calc_output() returns in the form tuple(outs, fetch_list, feed_map, program, op_desc) when for_inplace_test=True. - expect_res = self._calc_output(place, - no_check_set=no_check_set, - enable_inplace=False, - for_inplace_test=True) - actual_res = self._calc_output(place, - no_check_set=no_check_set, - enable_inplace=True, - for_inplace_test=True) + expect_res = self._calc_output( + place, + no_check_set=no_check_set, + enable_inplace=False, + for_inplace_test=True, + ) + actual_res = self._calc_output( + place, + no_check_set=no_check_set, + enable_inplace=True, + for_inplace_test=True, + ) # compare expect_outs and actual_outs - self._compare_expect_and_actual_outputs(place, - expect_res[1], - expect_res[0], - actual_res[0], - inplace_atol=inplace_atol) + self._compare_expect_and_actual_outputs( + place, + expect_res[1], + expect_res[0], + actual_res[0], + inplace_atol=inplace_atol, + ) return expect_res - def _calc_grad_output(self, - place, - fwd_res, - grad_op_desc, - enable_inplace=None): + def _calc_grad_output( + self, place, fwd_res, grad_op_desc, enable_inplace=None + ): """Calculate grad_output for given grad_op_desc. since we don`t really check gradient accuracy, but check the consistency when using and not using inplace, @@ -1238,13 +1390,22 @@ class OpTest(unittest.TestCase): Returns: res (tuple(outs, fetch_list, feed_map, program, op_desc)): The results of given grad_op_desc. """ - fwd_outs, fwd_fetch_list, fwd_feed_map, fwd_program, fwd_op_desc = fwd_res + ( + fwd_outs, + fwd_fetch_list, + fwd_feed_map, + fwd_program, + fwd_op_desc, + ) = fwd_res grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc( - fwd_op_desc, set(), []) + fwd_op_desc, set(), [] + ) grad_program = self._construct_grad_program_from_forward( - fwd_program, grad_op_desc, op_grad_to_var) + fwd_program, grad_op_desc, op_grad_to_var + ) grad_feed_map = self._construct_grad_feed_map_from_forward( - place, fwd_res, grad_op_desc, op_grad_to_var) + place, fwd_res, grad_op_desc, op_grad_to_var + ) grad_fetch_list = grad_op_desc.output_arg_names() exe = Executor(place) program = grad_program @@ -1252,22 +1413,23 @@ class OpTest(unittest.TestCase): build_strategy = fluid.BuildStrategy() build_strategy.enable_inplace = enable_inplace compiled_program = fluid.CompiledProgram( - grad_program).with_data_parallel(loss_name="", - build_strategy=build_strategy, - places=place) + grad_program + ).with_data_parallel( + loss_name="", build_strategy=build_strategy, places=place + ) program = compiled_program - outs = exe.run(program, - feed=grad_feed_map, - fetch_list=grad_fetch_list, - return_numpy=False) + outs = exe.run( + program, + feed=grad_feed_map, + fetch_list=grad_fetch_list, + return_numpy=False, + ) return outs, grad_fetch_list, grad_feed_map, grad_program, grad_op_desc - def _check_grad_inplace(self, - place, - fwd_res, - grad_op_desc, - inplace_atol=None): + def _check_grad_inplace( + self, place, fwd_res, grad_op_desc, inplace_atol=None + ): """Check the inplace correctness of given grad_op_desc. Run the grad op twice with same inputs, one enable inplace and another disable, compare their outputs. @@ -1285,26 +1447,25 @@ class OpTest(unittest.TestCase): expect_res (tuple(outs, fetch_list, feed_map, program, op_desc)): The results of given op. We return this to construct grad_program and grad_feed_map for grad inplace check. """ - expect_res = self._calc_grad_output(place, - fwd_res, - grad_op_desc, - enable_inplace=False) - actual_res = self._calc_grad_output(place, - fwd_res, - grad_op_desc, - enable_inplace=True) - - self._compare_expect_and_actual_outputs(place, - expect_res[1], - expect_res[0], - actual_res[0], - inplace_atol=inplace_atol) + expect_res = self._calc_grad_output( + place, fwd_res, grad_op_desc, enable_inplace=False + ) + actual_res = self._calc_grad_output( + place, fwd_res, grad_op_desc, enable_inplace=True + ) + + self._compare_expect_and_actual_outputs( + place, + expect_res[1], + expect_res[0], + actual_res[0], + inplace_atol=inplace_atol, + ) return expect_res - def check_inplace_output_with_place(self, - place, - no_check_set=None, - inplace_atol=None): + def check_inplace_output_with_place( + self, place, no_check_set=None, inplace_atol=None + ): """Chech the inplace correctness of given op, its grad op, its grad_grad op, etc. (1) Get all ops need to run. (see conditions in _get_need_run_ops()) @@ -1324,9 +1485,9 @@ class OpTest(unittest.TestCase): has_infer_inplace = fluid.core.has_infer_inplace(self.op_type) has_grad_op_maker = fluid.core.has_grad_op_maker(self.op_type) - fwd_res = self._calc_output(place, - no_check_set=no_check_set, - for_inplace_test=True) + fwd_res = self._calc_output( + place, no_check_set=no_check_set, for_inplace_test=True + ) op_desc = fwd_res[4] need_run_ops = self._get_need_run_ops(op_desc) @@ -1341,17 +1502,19 @@ class OpTest(unittest.TestCase): res[op_desc] = self._check_forward_inplace( place, no_check_set=no_check_set, - inplace_atol=inplace_atol) + inplace_atol=inplace_atol, + ) else: - res[op_desc] = self._calc_output(place, - no_check_set=no_check_set, - for_inplace_test=True) + res[op_desc] = self._calc_output( + place, no_check_set=no_check_set, for_inplace_test=True + ) else: # TODO(zhiqiu): enhance inplace_grad test for ops (sum and activation) using mkldnn # skip op that use_mkldnn currently flags_use_mkldnn = fluid.core.globals()["FLAGS_use_mkldnn"] attrs_use_mkldnn = hasattr(self, 'attrs') and bool( - self.attrs.get('use_mkldnn', False)) + self.attrs.get('use_mkldnn', False) + ) if flags_use_mkldnn or attrs_use_mkldnn: warnings.warn( "check inplace_grad for ops using mkldnn is not supported" @@ -1360,19 +1523,23 @@ class OpTest(unittest.TestCase): if has_infer_inplace: fwd_res = res[father_op_desc] res[op_desc] = self._check_grad_inplace( - place, fwd_res, op_desc, inplace_atol=inplace_atol) + place, fwd_res, op_desc, inplace_atol=inplace_atol + ) else: res[op_desc] = self._calc_grad_output( - place, fwd_res, op_desc) + place, fwd_res, op_desc + ) - def check_output_with_place(self, - place, - atol=0, - no_check_set=None, - equal_nan=False, - check_dygraph=True, - inplace_atol=None, - check_eager=False): + def check_output_with_place( + self, + place, + atol=0, + no_check_set=None, + equal_nan=False, + check_dygraph=True, + inplace_atol=None, + check_eager=False, + ): # disable legacy dygraph check when check_eager is True if check_eager == True: @@ -1387,26 +1554,29 @@ class OpTest(unittest.TestCase): if var.name == target_name: return dygraph_outs[name][i] self.assertTrue( - False, "Found failed {} {}".format(dygraph_outs.keys(), - target_name)) + False, + "Found failed {} {}".format(dygraph_outs.keys(), target_name), + ) def find_actual(target_name, fetch_list): found = [ - i for i, var_name in enumerate(fetch_list) + i + for i, var_name in enumerate(fetch_list) if var_name == target_name ] self.assertTrue( - len(found) == 1, "Found {} {}".format(len(found), target_name)) + len(found) == 1, "Found {} {}".format(len(found), target_name) + ) return found[0] class Checker(object): - """ base class for check with self.outputs. - currently don't support check between checkers. + """base class for check with self.outputs. + currently don't support check between checkers. """ def __init__(self, op_test, expect_dict): - """ expect_dict is the self.outputs - support : {str: [numpy]} and {str: [(str, numpy), (str, numpy)]} + """expect_dict is the self.outputs + support : {str: [numpy]} and {str: [(str, numpy), (str, numpy)]} """ self.expects = expect_dict self.checker_name = "checker" @@ -1433,8 +1603,7 @@ class OpTest(unittest.TestCase): return False def find_actual_value(self, name): - """ return: (actual_tensor(var_base), actual_numpy) - """ + """return: (actual_tensor(var_base), actual_numpy)""" raise NotImplementedError("base class, not implement!") def _compare_numpy(self, name, actual_np, expect_np): @@ -1444,20 +1613,26 @@ class OpTest(unittest.TestCase): expect_np, atol=atol, rtol=self.rtol if hasattr(self, 'rtol') else 1e-5, - equal_nan=equal_nan), "Output (" + name + - ") has diff at " + str(place) + " in " + self.checker_name) + equal_nan=equal_nan, + ), + "Output (" + + name + + ") has diff at " + + str(place) + + " in " + + self.checker_name, + ) def _compare_list(self, name, actual, expect): - """ if expect is a tuple, we need to compare list. - """ + """if expect is a tuple, we need to compare list.""" raise NotImplementedError("base class, not implement!") def compare_single_output_with_expect(self, name, expect): actual, actual_np = self.find_actual_value(name) - expect_np = expect[0] \ - if isinstance(expect, tuple) else expect + expect_np = expect[0] if isinstance(expect, tuple) else expect actual_np, expect_np = self.convert_uint16_to_float_ifneed( - actual_np, expect_np) + actual_np, expect_np + ) # NOTE(zhiqiu): np.allclose([], [1.]) returns True # see details: https://stackoverflow.com/questions/38331703/why-does-numpys-broadcasting-sometimes-allow-comparing-arrays-of-different-leng if expect_np.size == 0: @@ -1468,17 +1643,20 @@ class OpTest(unittest.TestCase): def compare_outputs_with_expects(self): for out_name, out_dup in Operator.get_op_outputs(self.op_type): - if self._is_skip_name(out_name): continue + if self._is_skip_name(out_name): + continue if out_dup: # if self.output = {'name': [(subname, Tensor), (subname, Tensor)]} sub_out = self.expects[out_name] if not isinstance(sub_out, list): - raise AssertionError("sub_out type %s is not list", - type(sub_out)) + raise AssertionError( + "sub_out type %s is not list", type(sub_out) + ) for item in sub_out: sub_out_name, expect = item[0], item[1] self.compare_single_output_with_expect( - sub_out_name, expect) + sub_out_name, expect + ) else: expect = self.expects[out_name] self.compare_single_output_with_expect(out_name, expect) @@ -1494,13 +1672,13 @@ class OpTest(unittest.TestCase): self.compare_outputs_with_expects() class StaticChecker(Checker): - def init(self): self.checker_name = "static checker" def calculate_output(self): outs, fetch_list = self.op_test._calc_output( - place, no_check_set=no_check_set) + place, no_check_set=no_check_set + ) self.outputs = outs self.fetch_list = fetch_list @@ -1516,13 +1694,17 @@ class OpTest(unittest.TestCase): return True | False """ if actual_np.dtype == np.uint16 and expect_np.dtype in [ - np.float32, np.float64 + np.float32, + np.float64, ]: actual_np = convert_uint16_to_float(actual_np) - self.rtol = 1.e-2 + self.rtol = 1.0e-2 else: - self.rtol = 1.e-5 - if expect_np.dtype == np.uint16 and actual_np.dtype == np.uint16: + self.rtol = 1.0e-5 + if ( + expect_np.dtype == np.uint16 + and actual_np.dtype == np.uint16 + ): nonlocal atol expect_np = convert_uint16_to_float(expect_np) actual_np = convert_uint16_to_float(actual_np) @@ -1530,36 +1712,40 @@ class OpTest(unittest.TestCase): return actual_np, expect_np def _compare_list(self, name, actual, expect): - """ if expect is a tuple, we need to compare list. - """ + """if expect is a tuple, we need to compare list.""" self.op_test.assertListEqual( - actual.recursive_sequence_lengths(), expect[1], - "Output (" + name + ") has different lod at " + str(place)) + actual.recursive_sequence_lengths(), + expect[1], + "Output (" + name + ") has different lod at " + str(place), + ) class DygraphChecker(Checker): - def init(self): self.checker_name = "dygraph checker" def calculate_output(self): self.outputs = self.op_test._calc_dygraph_output( - place, no_check_set=no_check_set) + place, no_check_set=no_check_set + ) def find_actual_value(self, name): with fluid.dygraph.base.guard(place=place): imperative_actual = find_imperative_actual( - name, self.outputs, place) + name, self.outputs, place + ) imperative_actual_t = np.array( - imperative_actual.value().get_tensor()) + imperative_actual.value().get_tensor() + ) return imperative_actual, imperative_actual_t def convert_uint16_to_float_ifneed(self, actual_np, expect_np): if actual_np.dtype == np.uint16 and expect_np.dtype in [ - np.float32, np.float64 + np.float32, + np.float64, ]: - self.rtol = 1.e-2 + self.rtol = 1.0e-2 else: - self.rtol = 1.e-5 + self.rtol = 1.0e-5 if self.op_test.is_bfloat16_op(): if actual_np.dtype == np.uint16: actual_np = convert_uint16_to_float(actual_np) @@ -1568,20 +1754,27 @@ class OpTest(unittest.TestCase): return actual_np, expect_np def _compare_list(self, name, actual, expect): - """ if expect is a tuple, we need to compare list. - """ + """if expect is a tuple, we need to compare list.""" with fluid.dygraph.base.guard(place=place): self.op_test.assertListEqual( - actual.value().get_tensor().recursive_sequence_lengths( - ), expect[1], - "Output (" + name + ") has different lod at " + - str(place) + " in dygraph mode") + actual.value() + .get_tensor() + .recursive_sequence_lengths(), + expect[1], + "Output (" + + name + + ") has different lod at " + + str(place) + + " in dygraph mode", + ) def _compare_numpy(self, name, actual_np, expect_np): - if functools.reduce(lambda x, y: x * y, actual_np.shape, - 1) == 0 and functools.reduce( - lambda x, y: x * y, expect_np.shape, - 1) == 0: + if ( + functools.reduce(lambda x, y: x * y, actual_np.shape, 1) + == 0 + and functools.reduce(lambda x, y: x * y, expect_np.shape, 1) + == 0 + ): pass else: self.op_test.assertTrue( @@ -1590,12 +1783,17 @@ class OpTest(unittest.TestCase): expect_np, atol=atol, rtol=self.rtol if hasattr(self, 'rtol') else 1e-5, - equal_nan=equal_nan), - "Output (" + name + ") has diff at " + str(place) + - " in " + self.checker_name) + equal_nan=equal_nan, + ), + "Output (" + + name + + ") has diff at " + + str(place) + + " in " + + self.checker_name, + ) class EagerChecker(DygraphChecker): - def init(self): self.checker_name = "eager checker" @@ -1604,12 +1802,14 @@ class OpTest(unittest.TestCase): with _test_eager_guard(): self.is_python_api_test = True eager_dygraph_outs = self.op_test._calc_python_api_output( - place) + place + ) if eager_dygraph_outs is None: self.is_python_api_test = False # missing KernelSignature, fall back to eager middle output. eager_dygraph_outs = self.op_test._calc_dygraph_output( - place, no_check_set=no_check_set) + place, no_check_set=no_check_set + ) self.outputs = eager_dygraph_outs def _compare_numpy(self, name, actual_np, expect_np): @@ -1619,30 +1819,35 @@ class OpTest(unittest.TestCase): def convert_uint16_to_float_ifneed(self, actual_np, expect_np): with _test_eager_guard(): return super().convert_uint16_to_float_ifneed( - actual_np, expect_np) + actual_np, expect_np + ) def find_actual_value(self, name): with _test_eager_guard(): return super().find_actual_value(name) def _compare_list(self, name, actual, expect): - """ if expect is a tuple, we need to compare list. - """ + """if expect is a tuple, we need to compare list.""" with _test_eager_guard(): super()._compare_list(name, actual, expect) def _is_skip_name(self, name): # if in final state and kernel signature don't have name, then skip it. - if self.is_python_api_test and hasattr( - self.op_test, "python_out_sig" - ) and name not in self.op_test.python_out_sig: + if ( + self.is_python_api_test + and hasattr(self.op_test, "python_out_sig") + and name not in self.op_test.python_out_sig + ): return True return super()._is_skip_name(name) # set some flags by the combination of arguments. self.infer_dtype_from_inputs_outputs(self.inputs, self.outputs) - if self.dtype == np.float64 and \ - self.op_type not in op_threshold_white_list.NEED_FIX_FP64_CHECK_OUTPUT_THRESHOLD_OP_LIST: + if ( + self.dtype == np.float64 + and self.op_type + not in op_threshold_white_list.NEED_FIX_FP64_CHECK_OUTPUT_THRESHOLD_OP_LIST + ): atol = 0 if self.is_bfloat16_op(): @@ -1650,7 +1855,8 @@ class OpTest(unittest.TestCase): check_dygraph = False check_eager = False if hasattr(self, 'force_fp32_output') and getattr( - self, 'force_fp32_output'): + self, 'force_fp32_output' + ): atol = 1e-2 else: atol = 2 @@ -1658,9 +1864,13 @@ class OpTest(unittest.TestCase): atol = 1e-1 if no_check_set is not None: - if self.op_type not in no_check_set_white_list.no_check_set_white_list: + if ( + self.op_type + not in no_check_set_white_list.no_check_set_white_list + ): raise AssertionError( - "no_check_set of op %s must be set to None." % self.op_type) + "no_check_set of op %s must be set to None." % self.op_type + ) static_checker = StaticChecker(self, self.outputs) static_checker.check() outs, fetch_list = static_checker.outputs, static_checker.fetch_list @@ -1691,13 +1901,15 @@ class OpTest(unittest.TestCase): # Check inplace for given op, its grad op, its grad_grad op, etc. # No effect on original OpTest # Currently not support ParallelExecutor on XPUPlace. - if not paddle.is_compiled_with_xpu( - ) and not paddle.is_compiled_with_npu( - ) and not paddle.is_compiled_with_mlu() and not isinstance( - place, core.CustomPlace): - self.check_inplace_output_with_place(place, - no_check_set=no_check_set, - inplace_atol=inplace_atol) + if ( + not paddle.is_compiled_with_xpu() + and not paddle.is_compiled_with_npu() + and not paddle.is_compiled_with_mlu() + and not isinstance(place, core.CustomPlace) + ): + self.check_inplace_output_with_place( + place, no_check_set=no_check_set, inplace_atol=inplace_atol + ) if check_eager: assert check_dygraph == False @@ -1708,10 +1920,10 @@ class OpTest(unittest.TestCase): return outs, fetch_list def check_compile_vs_runtime(self, fetch_list, fetch_outs): - def find_fetch_index(target_name, fetch_list): found = [ - i for i, var_name in enumerate(fetch_list) + i + for i, var_name in enumerate(fetch_list) if var_name == target_name ] if len(found) == 0: @@ -1719,7 +1931,8 @@ class OpTest(unittest.TestCase): else: self.assertTrue( len(found) == 1, - "Found {} {}".format(len(found), target_name)) + "Found {} {}".format(len(found), target_name), + ) return found[0] for name in self.op.desc.output_names(): @@ -1745,16 +1958,22 @@ class OpTest(unittest.TestCase): else: lod_level_compile = 0 self.assertEqual( - lod_level_compile, lod_level_runtime, - "The lod_level of Output (" + name + - ") is different between compile-time and runtime (" + - str(lod_level_compile) + " vs " + str(lod_level_runtime) + - ")") + lod_level_compile, + lod_level_runtime, + "The lod_level of Output (" + + name + + ") is different between compile-time and runtime (" + + str(lod_level_compile) + + " vs " + + str(lod_level_runtime) + + ")", + ) def _get_places(self): if self.dtype == np.float16: if core.is_compiled_with_cuda() and core.op_support_gpu( - self.op_type): + self.op_type + ): place = core.CUDAPlace(0) if core.is_float16_supported(place): return [place] @@ -1764,18 +1983,23 @@ class OpTest(unittest.TestCase): return [] places = [fluid.CPUPlace()] cpu_only = self._cpu_only if hasattr(self, '_cpu_only') else False - if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type)\ - and not cpu_only: + if ( + core.is_compiled_with_cuda() + and core.op_support_gpu(self.op_type) + and not cpu_only + ): places.append(core.CUDAPlace(0)) return places - def check_output(self, - atol=1e-5, - no_check_set=None, - equal_nan=False, - check_dygraph=True, - inplace_atol=None, - check_eager=False): + def check_output( + self, + atol=1e-5, + no_check_set=None, + equal_nan=False, + check_dygraph=True, + inplace_atol=None, + check_eager=False, + ): # disable legacy dygraph check when check_eager is True if check_eager == True: @@ -1790,13 +2014,15 @@ class OpTest(unittest.TestCase): places = self._get_places() for place in places: - res = self.check_output_with_place(place, - atol, - no_check_set, - equal_nan, - check_dygraph, - inplace_atol, - check_eager=check_eager) + res = self.check_output_with_place( + place, + atol, + no_check_set, + equal_nan, + check_dygraph, + inplace_atol, + check_eager=check_eager, + ) if check_eager: assert check_dygraph == False outs, eager_dygraph_outs, fetch_list = res @@ -1804,7 +2030,10 @@ class OpTest(unittest.TestCase): outs, dygraph_outs, fetch_list = res else: outs, fetch_list = res - if self.op_type not in compile_vs_runtime_white_list.COMPILE_RUN_OP_WHITE_LIST: + if ( + self.op_type + not in compile_vs_runtime_white_list.COMPILE_RUN_OP_WHITE_LIST + ): self.check_compile_vs_runtime(fetch_list, outs) def check_output_customized(self, checker, custom_place=None): @@ -1823,8 +2052,14 @@ class OpTest(unittest.TestCase): outs.sort(key=len) checker(outs) - def _assert_is_close(self, numeric_grads, analytic_grads, names, - max_relative_error, msg_prefix): + def _assert_is_close( + self, + numeric_grads, + analytic_grads, + names, + max_relative_error, + msg_prefix, + ): for a, b, name in zip(numeric_grads, analytic_grads, names): # It asserts np.abs(a - b) / np.abs(a) < max_relative_error, in which # max_relative_error is 1e-7. According to the value of np.abs(a), we @@ -1834,8 +2069,11 @@ class OpTest(unittest.TestCase): # which is the same as np.abs(a - b) / np.abs(a) < max_relative_error*1e4. abs_a = np.abs(a) if abs_a.ndim > 0: - if self.dtype == np.float64 and \ - self.op_type not in op_threshold_white_list.NEED_FIX_FP64_CHECK_GRAD_THRESHOLD_OP_LIST: + if ( + self.dtype == np.float64 + and self.op_type + not in op_threshold_white_list.NEED_FIX_FP64_CHECK_GRAD_THRESHOLD_OP_LIST + ): abs_a[abs_a < 1e-10] = 1e-3 abs_a[np.logical_and(abs_a > 1e-10, abs_a <= 1e-8)] *= 1e4 abs_a[np.logical_and(abs_a > 1e-8, abs_a <= 1e-6)] *= 1e2 @@ -1844,8 +2082,11 @@ class OpTest(unittest.TestCase): else: abs_a[abs_a < 1e-3] = 1 elif abs_a.ndim == 0: - if self.dtype == np.float64 and \ - self.op_type not in op_threshold_white_list.NEED_FIX_FP64_CHECK_GRAD_THRESHOLD_OP_LIST: + if ( + self.dtype == np.float64 + and self.op_type + not in op_threshold_white_list.NEED_FIX_FP64_CHECK_GRAD_THRESHOLD_OP_LIST + ): if abs_a < 1e-10: abs_a = 1e-3 elif abs_a > 1e-10 and abs_a <= 1e-8: @@ -1862,10 +2103,21 @@ class OpTest(unittest.TestCase): def err_msg(): offset = np.argmax(diff_mat > max_relative_error) - return ("Operator %s error, %s variable %s (shape: %s, dtype: %s) max gradient diff %e over limit %e, " - "the first error element is %d, expected %e, but got %e.") \ - % (self.op_type, msg_prefix, name, str(a.shape), self.dtype, max_diff, max_relative_error, - offset, a.flatten()[offset], b.flatten()[offset]) + return ( + "Operator %s error, %s variable %s (shape: %s, dtype: %s) max gradient diff %e over limit %e, " + "the first error element is %d, expected %e, but got %e." + ) % ( + self.op_type, + msg_prefix, + name, + str(a.shape), + self.dtype, + max_diff, + max_relative_error, + offset, + a.flatten()[offset], + b.flatten()[offset], + ) self.assertLessEqual(max_diff, max_relative_error, err_msg()) @@ -1876,17 +2128,19 @@ class OpTest(unittest.TestCase): if self.dtype == np.float64: self.__class__.exist_fp64_check_grad = True - def check_grad(self, - inputs_to_check, - output_names, - no_grad_set=None, - numeric_grad_delta=0.005, - in_place=False, - max_relative_error=0.005, - user_defined_grads=None, - user_defined_grad_outputs=None, - check_dygraph=True, - check_eager=False): + def check_grad( + self, + inputs_to_check, + output_names, + no_grad_set=None, + numeric_grad_delta=0.005, + in_place=False, + max_relative_error=0.005, + user_defined_grads=None, + user_defined_grad_outputs=None, + check_dygraph=True, + check_eager=False, + ): # disable legacy dygraph check when check_eager is True if check_eager == True: @@ -1895,31 +2149,35 @@ class OpTest(unittest.TestCase): self._check_grad_helper() places = self._get_places() for place in places: - self.check_grad_with_place(place, - inputs_to_check, - output_names, - no_grad_set, - numeric_grad_delta, - in_place, - max_relative_error, - user_defined_grads, - user_defined_grad_outputs, - check_dygraph, - check_eager=check_eager) - - def check_grad_with_place(self, - place, - inputs_to_check, - output_names, - no_grad_set=None, - numeric_grad_delta=0.005, - in_place=False, - max_relative_error=0.005, - user_defined_grads=None, - user_defined_grad_outputs=None, - check_dygraph=True, - numeric_place=None, - check_eager=False): + self.check_grad_with_place( + place, + inputs_to_check, + output_names, + no_grad_set, + numeric_grad_delta, + in_place, + max_relative_error, + user_defined_grads, + user_defined_grad_outputs, + check_dygraph, + check_eager=check_eager, + ) + + def check_grad_with_place( + self, + place, + inputs_to_check, + output_names, + no_grad_set=None, + numeric_grad_delta=0.005, + in_place=False, + max_relative_error=0.005, + user_defined_grads=None, + user_defined_grad_outputs=None, + check_dygraph=True, + numeric_place=None, + check_eager=False, + ): # disable legacy dygraph check when check_eager is True if check_eager == True: @@ -1935,8 +2193,11 @@ class OpTest(unittest.TestCase): check_dygraph = False check_eager = False - if self.dtype == np.float64 and \ - self.op_type not in op_threshold_white_list.NEED_FIX_FP64_CHECK_GRAD_THRESHOLD_OP_LIST: + if ( + self.dtype == np.float64 + and self.op_type + not in op_threshold_white_list.NEED_FIX_FP64_CHECK_GRAD_THRESHOLD_OP_LIST + ): numeric_grad_delta = 1e-5 max_relative_error = 1e-7 @@ -1950,12 +2211,14 @@ class OpTest(unittest.TestCase): op_attrs["use_mkldnn"] = False use_onednn = True - self.op = create_op(self.scope, - self.op_type, - op_inputs, - op_outputs, - op_attrs, - cache_list=cache_list) + self.op = create_op( + self.scope, + self.op_type, + op_inputs, + op_outputs, + op_attrs, + cache_list=cache_list, + ) if use_onednn: op_attrs["use_mkldnn"] = True @@ -1963,18 +2226,25 @@ class OpTest(unittest.TestCase): if no_grad_set is None: no_grad_set = set() else: - if (self.op_type not in no_grad_set_white_list.NEED_TO_FIX_OP_LIST - ) and (self.op_type - not in no_grad_set_white_list.NOT_CHECK_OP_LIST) and ( - not self.is_bfloat16_op()): - raise AssertionError("no_grad_set must be None, op_type is " + - self.op_type + " Op.") + if ( + (self.op_type not in no_grad_set_white_list.NEED_TO_FIX_OP_LIST) + and ( + self.op_type not in no_grad_set_white_list.NOT_CHECK_OP_LIST + ) + and (not self.is_bfloat16_op()) + ): + raise AssertionError( + "no_grad_set must be None, op_type is " + + self.op_type + + " Op." + ) for input_to_check in inputs_to_check: set_input(self.scope, self.op, self.inputs, place) tensor_to_check = self.scope.find_var(input_to_check).get_tensor() - tensor_size = functools.reduce(lambda a, b: a * b, - tensor_to_check.shape(), 1) + tensor_size = functools.reduce( + lambda a, b: a * b, tensor_to_check.shape(), 1 + ) tensor_ndim = len(tensor_to_check.shape()) # for 0D Tensor, it's additional case for OP, so not raise error if tensor_ndim > 0 and tensor_size < 100: @@ -1987,26 +2257,34 @@ class OpTest(unittest.TestCase): numeric_place = place numeric_grads = user_defined_grads or [ - get_numeric_gradient(numeric_place, - self.scope, - self.op, - self.inputs, - input_to_check, - output_names, - delta=numeric_grad_delta, - in_place=in_place) + get_numeric_gradient( + numeric_place, + self.scope, + self.op, + self.inputs, + input_to_check, + output_names, + delta=numeric_grad_delta, + in_place=in_place, + ) for input_to_check in inputs_to_check ] - analytic_grads = self._get_gradient(inputs_to_check, place, - output_names, no_grad_set, - user_defined_grad_outputs) + analytic_grads = self._get_gradient( + inputs_to_check, + place, + output_names, + no_grad_set, + user_defined_grad_outputs, + ) # comparison of bf16 results will happen as fp32 # loop over list of grads and convert bf16 to fp32 fp32_analytic_grads = [] for grad in analytic_grads: if grad.dtype == np.uint16: grad = convert_uint16_to_float(grad) - max_relative_error = 0.04 if max_relative_error < 0.04 else max_relative_error + max_relative_error = ( + 0.04 if max_relative_error < 0.04 else max_relative_error + ) fp32_analytic_grads.append(grad) analytic_grads = fp32_analytic_grads @@ -2014,32 +2292,50 @@ class OpTest(unittest.TestCase): for grad in numeric_grads: if grad.dtype == np.uint16: grad = convert_uint16_to_float(grad) - max_relative_error = 0.04 if max_relative_error < 0.04 else max_relative_error + max_relative_error = ( + 0.04 if max_relative_error < 0.04 else max_relative_error + ) fp32_numeric_grads.append(grad) numeric_grads = fp32_numeric_grads - self._assert_is_close(numeric_grads, analytic_grads, inputs_to_check, - max_relative_error, - "Gradient Check On %s" % str(place)) + self._assert_is_close( + numeric_grads, + analytic_grads, + inputs_to_check, + max_relative_error, + "Gradient Check On %s" % str(place), + ) if check_dygraph: # ensure switch into legacy dygraph g_enable_legacy_dygraph() - dygraph_grad = self._get_dygraph_grad(inputs_to_check, place, - output_names, - user_defined_grad_outputs, - no_grad_set, False) + dygraph_grad = self._get_dygraph_grad( + inputs_to_check, + place, + output_names, + user_defined_grad_outputs, + no_grad_set, + False, + ) fp32_grads = [] for grad in dygraph_grad: if grad.dtype == np.uint16: grad = convert_uint16_to_float(grad) - max_relative_error = 0.03 if max_relative_error < 0.03 else max_relative_error + max_relative_error = ( + 0.03 + if max_relative_error < 0.03 + else max_relative_error + ) fp32_grads.append(grad) dygraph_grad = fp32_grads - self._assert_is_close(numeric_grads, dygraph_grad, inputs_to_check, - max_relative_error, - "Gradient Check On %s" % str(place)) + self._assert_is_close( + numeric_grads, + dygraph_grad, + inputs_to_check, + max_relative_error, + "Gradient Check On %s" % str(place), + ) # ensure switch back eager dygraph g_disable_legacy_dygraph() @@ -2047,18 +2343,31 @@ class OpTest(unittest.TestCase): with fluid.dygraph.base.guard(place): with _test_eager_guard(): eager_dygraph_grad = self._get_dygraph_grad( - inputs_to_check, place, output_names, - user_defined_grad_outputs, no_grad_set, check_eager) + inputs_to_check, + place, + output_names, + user_defined_grad_outputs, + no_grad_set, + check_eager, + ) fp32_grads = [] for grad in eager_dygraph_grad: if grad.dtype == np.uint16: grad = convert_uint16_to_float(grad) - max_relative_error = 0.03 if max_relative_error < 0.03 else max_relative_error + max_relative_error = ( + 0.03 + if max_relative_error < 0.03 + else max_relative_error + ) fp32_grads.append(grad) eager_dygraph_grad = fp32_grads - self._assert_is_close(numeric_grads, eager_dygraph_grad, - inputs_to_check, max_relative_error, - "Gradient Check On %s" % str(place)) + self._assert_is_close( + numeric_grads, + eager_dygraph_grad, + inputs_to_check, + max_relative_error, + "Gradient Check On %s" % str(place), + ) def _find_var_in_dygraph(self, output_vars, name): if name in output_vars: @@ -2069,13 +2378,15 @@ class OpTest(unittest.TestCase): if output_vars_selected.name == name: return output_vars_selected - def _get_dygraph_grad(self, - inputs_to_check, - place, - output_names, - user_defined_grad_outputs=None, - no_grad_set=None, - check_eager=False): + def _get_dygraph_grad( + self, + inputs_to_check, + place, + output_names, + user_defined_grad_outputs=None, + no_grad_set=None, + check_eager=False, + ): with fluid.dygraph.base.guard(place=place): block = fluid.default_main_program().global_block() @@ -2083,11 +2394,13 @@ class OpTest(unittest.TestCase): # prepare input variable inputs, inputs_grad_dict = self.append_input_output_for_dygraph( - op_proto, self.inputs, True, True, block) + op_proto, self.inputs, True, True, block + ) # prepare output variable outputs = self.append_input_output_for_dygraph( - op_proto, self.outputs, False, False, block) + op_proto, self.outputs, False, False, block + ) # prepare attributes attrs_outputs = {} @@ -2098,37 +2411,42 @@ class OpTest(unittest.TestCase): if check_eager: eager_outputs = self._calc_python_api_output( - place, inputs, outputs) + place, inputs, outputs + ) # if outputs is None, kernel sig is empty or other error is happens. if not check_eager or eager_outputs is None: block.append_op( type=self.op_type, inputs=inputs, outputs=outputs, - attrs=attrs_outputs if hasattr(self, "attrs") else None) + attrs=attrs_outputs if hasattr(self, "attrs") else None, + ) else: outputs = eager_outputs if self.dtype == np.uint16: - cast_inputs = self._find_var_in_dygraph(outputs, - output_names[0]) - cast_outputs = block.create_var(dtype="float32", - shape=cast_inputs[0].shape) - cast_op = block.append_op(inputs={"X": cast_inputs}, - outputs={"Out": cast_outputs}, - type="cast", - attrs={ - "in_dtype": - core.VarDesc.VarType.BF16, - "out_dtype": - core.VarDesc.VarType.FP32 - }) + cast_inputs = self._find_var_in_dygraph( + outputs, output_names[0] + ) + cast_outputs = block.create_var( + dtype="float32", shape=cast_inputs[0].shape + ) + cast_op = block.append_op( + inputs={"X": cast_inputs}, + outputs={"Out": cast_outputs}, + type="cast", + attrs={ + "in_dtype": core.VarDesc.VarType.BF16, + "out_dtype": core.VarDesc.VarType.FP32, + }, + ) outputs = {output_names[0]: cast_outputs} outputs_valid = {} for output_name in output_names: outputs_valid[output_name] = self._find_var_in_dygraph( - outputs, output_name) + outputs, output_name + ) if user_defined_grad_outputs is None: if len(outputs_valid) == 1: @@ -2137,13 +2455,15 @@ class OpTest(unittest.TestCase): type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, stop_gradient=False, - shape=[1]) + shape=[1], + ) for outputs_valid_key in outputs_valid: block.append_op( type="mean", inputs={"X": outputs_valid[outputs_valid_key]}, outputs={"Out": [loss]}, - attrs=None) + attrs=None, + ) else: avg_sum = [] for cur_loss in outputs_valid: @@ -2151,32 +2471,41 @@ class OpTest(unittest.TestCase): dtype=self.dtype, type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=False) - block.append_op(type="mean", - inputs={"X": outputs_valid[cur_loss]}, - outputs={"Out": [cur_avg_loss]}, - attrs=None) + stop_gradient=False, + ) + block.append_op( + type="mean", + inputs={"X": outputs_valid[cur_loss]}, + outputs={"Out": [cur_avg_loss]}, + attrs=None, + ) avg_sum.append(cur_avg_loss) loss_sum = block.create_var( dtype=self.dtype, type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, stop_gradient=False, - shape=[1]) - block.append_op(type='sum', - inputs={"X": avg_sum}, - outputs={"Out": loss_sum}, - attrs=None) + shape=[1], + ) + block.append_op( + type='sum', + inputs={"X": avg_sum}, + outputs={"Out": loss_sum}, + attrs=None, + ) loss = block.create_var( dtype=self.dtype, type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, stop_gradient=False, - shape=[1]) - block.append_op(type='scale', - inputs={"X": loss_sum}, - outputs={"Out": loss}, - attrs={'scale': 1.0 / float(len(avg_sum))}) + shape=[1], + ) + block.append_op( + type='scale', + inputs={"X": loss_sum}, + outputs={"Out": loss}, + attrs={'scale': 1.0 / float(len(avg_sum))}, + ) loss.backward() fetch_list_grad = [] @@ -2193,11 +2522,12 @@ class OpTest(unittest.TestCase): grad_outputs.append(paddle.to_tensor(grad_out_value)) # delete the inputs which no need to calculate grad for no_grad_val in no_grad_set: - del (inputs[no_grad_val]) + del inputs[no_grad_val] if not _in_legacy_dygraph(): - core.eager.run_backward(fluid.layers.utils.flatten(outputs), - grad_outputs, False) + core.eager.run_backward( + fluid.layers.utils.flatten(outputs), grad_outputs, False + ) grad_inputs = [] for inputs_list in inputs.values(): for inp in inputs_list: @@ -2207,7 +2537,8 @@ class OpTest(unittest.TestCase): grad_inputs = paddle.grad( outputs=fluid.layers.utils.flatten(outputs), inputs=fluid.layers.utils.flatten(inputs), - grad_outputs=grad_outputs) + grad_outputs=grad_outputs, + ) return [grad.numpy() for grad in grad_inputs] @staticmethod @@ -2230,13 +2561,15 @@ class OpTest(unittest.TestCase): def np_value_to_fluid_value(input): return input - def _get_gradient(self, - input_to_check, - place, - output_names, - no_grad_set, - user_defined_grad_outputs=None, - parallel=False): + def _get_gradient( + self, + input_to_check, + place, + output_names, + no_grad_set, + user_defined_grad_outputs=None, + parallel=False, + ): prog = Program() scope = core.Scope() block = prog.global_block() @@ -2249,36 +2582,43 @@ class OpTest(unittest.TestCase): if user_defined_grad_outputs is None: if self.dtype == np.uint16: cast_inputs = list(map(block.var, output_names)) - cast_outputs = block.create_var(dtype="float32", - shape=cast_inputs[0].shape) - cast_op = block.append_op(inputs={"X": cast_inputs}, - outputs={"Out": cast_outputs}, - type="cast", - attrs={ - "in_dtype": - core.VarDesc.VarType.BF16, - "out_dtype": - core.VarDesc.VarType.FP32 - }) + cast_outputs = block.create_var( + dtype="float32", shape=cast_inputs[0].shape + ) + cast_op = block.append_op( + inputs={"X": cast_inputs}, + outputs={"Out": cast_outputs}, + type="cast", + attrs={ + "in_dtype": core.VarDesc.VarType.BF16, + "out_dtype": core.VarDesc.VarType.FP32, + }, + ) cast_op.desc.infer_var_type(block.desc) cast_op.desc.infer_shape(block.desc) output_names = [cast_outputs.name] loss = append_loss_ops(block, output_names) - param_grad_list = append_backward(loss=loss, - parameter_list=input_to_check, - no_grad_set=no_grad_set) + param_grad_list = append_backward( + loss=loss, + parameter_list=input_to_check, + no_grad_set=no_grad_set, + ) fetch_list = [g for p, g in param_grad_list] else: - assert parallel is False, "unsupported parallel mode when giving custom grad outputs." + assert ( + parallel is False + ), "unsupported parallel mode when giving custom grad outputs." # user_defined_grad_outputs here are numpy arrays if not isinstance(user_defined_grad_outputs, list): user_defined_grad_outputs = [user_defined_grad_outputs] grad_outputs = [] for grad_out_value in user_defined_grad_outputs: # `presistable` is used to avoid executor create new var in local scope - var = block.create_var(shape=grad_out_value.shape, - dtype=grad_out_value.dtype, - persistable=True) + var = block.create_var( + shape=grad_out_value.shape, + dtype=grad_out_value.dtype, + persistable=True, + ) true_var = scope.var(var.name) tensor = true_var.get_tensor() tensor.set(grad_out_value, place) @@ -2287,8 +2627,9 @@ class OpTest(unittest.TestCase): outputs[name] for name in outputs if name in output_names ] inputs = [inputs[name] for name in input_to_check if name in inputs] - grad_inputs = paddle.static.gradients(targets, inputs, grad_outputs, - no_grad_set) + grad_inputs = paddle.static.gradients( + targets, inputs, grad_outputs, no_grad_set + ) fetch_list = grad_inputs if parallel: @@ -2296,21 +2637,21 @@ class OpTest(unittest.TestCase): if isinstance(place, fluid.CUDAPlace): use_cuda = True compiled_prog = fluid.CompiledProgram(prog).with_data_parallel( - loss_name=loss.name, places=place) + loss_name=loss.name, places=place + ) prog = compiled_prog executor = fluid.Executor(place) return list( map( np.array, - executor.run(prog, - feed_dict, - fetch_list, - scope=scope, - return_numpy=False))) + executor.run( + prog, feed_dict, fetch_list, scope=scope, return_numpy=False + ), + ) + ) class OpTestTool: - @classmethod def skip_if(cls, condition: object, reason: str): return unittest.skipIf(condition, reason) @@ -2318,12 +2659,16 @@ class OpTestTool: @classmethod def skip_if_not_cpu_bf16(cls): return OpTestTool.skip_if( - not (isinstance(_current_expected_place(), core.CPUPlace) - and core.supports_bfloat16()), - "Place does not support BF16 evaluation") + not ( + isinstance(_current_expected_place(), core.CPUPlace) + and core.supports_bfloat16() + ), + "Place does not support BF16 evaluation", + ) @classmethod def skip_if_not_cpu(cls): return OpTestTool.skip_if( not isinstance(_current_expected_place(), core.CPUPlace), - "OneDNN supports only CPU for now") + "OneDNN supports only CPU for now", + ) diff --git a/python/paddle/fluid/tests/unittests/op_test_xpu.py b/python/paddle/fluid/tests/unittests/op_test_xpu.py index 82c8eb73969f153c8e4b7af63d3e7561d25144ea..220bd09f2cafa06abe532b2ee49630d106084d82 100644 --- a/python/paddle/fluid/tests/unittests/op_test_xpu.py +++ b/python/paddle/fluid/tests/unittests/op_test_xpu.py @@ -22,11 +22,14 @@ from paddle.fluid.framework import Program, convert_np_dtype_to_dtype_ from testsuite import append_loss_ops, create_op, set_input from white_list import op_threshold_white_list, no_grad_set_white_list from op_test import OpTest -from xpu.get_test_cover_info import is_empty_grad_op_type, get_xpu_op_support_types, type_dict_str_to_numpy +from xpu.get_test_cover_info import ( + is_empty_grad_op_type, + get_xpu_op_support_types, + type_dict_str_to_numpy, +) class XPUOpTest(OpTest): - @classmethod def setUpClass(cls): '''Fix random seeds to remove randomness from tests''' @@ -60,25 +63,36 @@ class XPUOpTest(OpTest): places = [paddle.XPUPlace(0)] return places - def check_output(self, - atol=0.001, - no_check_set=None, - equal_nan=False, - check_dygraph=True, - inplace_atol=None, - check_eager=False): + def check_output( + self, + atol=0.001, + no_check_set=None, + equal_nan=False, + check_dygraph=True, + inplace_atol=None, + check_eager=False, + ): place = paddle.XPUPlace(0) - self.check_output_with_place(place, atol, no_check_set, equal_nan, - check_dygraph, inplace_atol, check_eager) - - def check_output_with_place(self, - place, - atol=0.001, - no_check_set=None, - equal_nan=False, - check_dygraph=True, - inplace_atol=None, - check_eager=False): + self.check_output_with_place( + place, + atol, + no_check_set, + equal_nan, + check_dygraph, + inplace_atol, + check_eager, + ) + + def check_output_with_place( + self, + place, + atol=0.001, + no_check_set=None, + equal_nan=False, + check_dygraph=True, + inplace_atol=None, + check_eager=False, + ): self.infer_dtype_from_inputs_outputs(self.inputs, self.outputs) if self.dtype == np.float64: return @@ -89,46 +103,60 @@ class XPUOpTest(OpTest): if self.dtype == np.float16: atol = 0.1 - return super().check_output_with_place(place, atol, no_check_set, - equal_nan, check_dygraph, - inplace_atol) - - def check_grad(self, - inputs_to_check, - output_names, - no_grad_set=None, - numeric_grad_delta=0.005, - in_place=False, - max_relative_error=0.005, - user_defined_grads=None, - user_defined_grad_outputs=None, - check_dygraph=True, - numeric_place=None, - check_eager=False): + return super().check_output_with_place( + place, atol, no_check_set, equal_nan, check_dygraph, inplace_atol + ) + + def check_grad( + self, + inputs_to_check, + output_names, + no_grad_set=None, + numeric_grad_delta=0.005, + in_place=False, + max_relative_error=0.005, + user_defined_grads=None, + user_defined_grad_outputs=None, + check_dygraph=True, + numeric_place=None, + check_eager=False, + ): place = paddle.XPUPlace(0) - self.check_grad_with_place(place, inputs_to_check, output_names, - no_grad_set, numeric_grad_delta, in_place, - max_relative_error, user_defined_grads, - user_defined_grad_outputs, check_dygraph, - numeric_place, check_eager) - - def check_grad_with_place(self, - place, - inputs_to_check, - output_names, - no_grad_set=None, - numeric_grad_delta=0.005, - in_place=False, - max_relative_error=0.005, - user_defined_grads=None, - user_defined_grad_outputs=None, - check_dygraph=True, - numeric_place=None, - check_eager=False): + self.check_grad_with_place( + place, + inputs_to_check, + output_names, + no_grad_set, + numeric_grad_delta, + in_place, + max_relative_error, + user_defined_grads, + user_defined_grad_outputs, + check_dygraph, + numeric_place, + check_eager, + ) + + def check_grad_with_place( + self, + place, + inputs_to_check, + output_names, + no_grad_set=None, + numeric_grad_delta=0.005, + in_place=False, + max_relative_error=0.005, + user_defined_grads=None, + user_defined_grad_outputs=None, + check_dygraph=True, + numeric_place=None, + check_eager=False, + ): if hasattr(self, 'op_type_need_check_grad'): xpu_version = core.get_xpu_device_version(0) - if is_empty_grad_op_type(xpu_version, self.op_type, - self.in_type_str): + if is_empty_grad_op_type( + xpu_version, self.op_type, self.in_type_str + ): self._check_grad_helper() return @@ -137,7 +165,7 @@ class XPUOpTest(OpTest): for ctype in cast_grad_op_types: cast_grad_op_types_np.append(type_dict_str_to_numpy[ctype]) - if (self.dtype not in cast_grad_op_types_np): + if self.dtype not in cast_grad_op_types_np: return if self.dtype == np.float64: @@ -150,51 +178,73 @@ class XPUOpTest(OpTest): if self.dtype == np.float16: max_relative_error = 1.0 return super().check_grad_with_place( - place, inputs_to_check, output_names, no_grad_set, - numeric_grad_delta, in_place, max_relative_error, - user_defined_grads, user_defined_grad_outputs, check_dygraph) + place, + inputs_to_check, + output_names, + no_grad_set, + numeric_grad_delta, + in_place, + max_relative_error, + user_defined_grads, + user_defined_grad_outputs, + check_dygraph, + ) a1 = self.get_grad_with_place( place, inputs_to_check, output_names, no_grad_set=no_grad_set, - user_defined_grad_outputs=user_defined_grad_outputs) + user_defined_grad_outputs=user_defined_grad_outputs, + ) a2 = self.get_grad_with_place( place, inputs_to_check, output_names, no_grad_set=no_grad_set, - user_defined_grad_outputs=user_defined_grad_outputs) + user_defined_grad_outputs=user_defined_grad_outputs, + ) a3 = self.get_grad_with_place( paddle.CPUPlace(), inputs_to_check, output_names, no_grad_set=no_grad_set, - user_defined_grad_outputs=user_defined_grad_outputs) - self._assert_is_close(a1, a2, inputs_to_check, 0.00000001, - "Gradient Check On two xpu") - self._assert_is_close(a1, a3, inputs_to_check, max_relative_error, - "Gradient Check On cpu & xpu") - - def get_grad_with_place(self, - place, - inputs_to_check, - output_names, - no_grad_set=None, - numeric_grad_delta=0.005, - in_place=False, - max_relative_error=0.005, - user_defined_grad_outputs=None, - check_dygraph=True): + user_defined_grad_outputs=user_defined_grad_outputs, + ) + self._assert_is_close( + a1, a2, inputs_to_check, 0.00000001, "Gradient Check On two xpu" + ) + self._assert_is_close( + a1, + a3, + inputs_to_check, + max_relative_error, + "Gradient Check On cpu & xpu", + ) + + def get_grad_with_place( + self, + place, + inputs_to_check, + output_names, + no_grad_set=None, + numeric_grad_delta=0.005, + in_place=False, + max_relative_error=0.005, + user_defined_grad_outputs=None, + check_dygraph=True, + ): self.scope = core.Scope() op_inputs = self.inputs if hasattr(self, "inputs") else dict() op_outputs = self.outputs if hasattr(self, "outputs") else dict() op_attrs = self.attrs if hasattr(self, "attrs") else dict() self._check_grad_helper() - if self.dtype == np.float64 and \ - self.op_type not in op_threshold_white_list.NEED_FIX_FP64_CHECK_GRAD_THRESHOLD_OP_LIST: + if ( + self.dtype == np.float64 + and self.op_type + not in op_threshold_white_list.NEED_FIX_FP64_CHECK_GRAD_THRESHOLD_OP_LIST + ): numeric_grad_delta = 1e-5 max_relative_error = 1e-7 @@ -213,12 +263,14 @@ class XPUOpTest(OpTest): for mtype in mean_grad_op_types: mean_grad_op_types_np.append(type_dict_str_to_numpy[mtype]) - self.op = create_op(self.scope, - self.op_type, - op_inputs, - op_outputs, - op_attrs, - cache_list=cache_list) + self.op = create_op( + self.scope, + self.op_type, + op_inputs, + op_outputs, + op_attrs, + cache_list=cache_list, + ) if use_onednn: op_attrs["use_mkldnn"] = True @@ -226,12 +278,18 @@ class XPUOpTest(OpTest): if no_grad_set is None: no_grad_set = set() else: - if (self.op_type not in no_grad_set_white_list.NEED_TO_FIX_OP_LIST - ) and (self.op_type - not in no_grad_set_white_list.NOT_CHECK_OP_LIST) and ( - not self.is_bfloat16_op()): - raise AssertionError("no_grad_set must be None, op_type is " + - self.op_type + " Op.") + if ( + (self.op_type not in no_grad_set_white_list.NEED_TO_FIX_OP_LIST) + and ( + self.op_type not in no_grad_set_white_list.NOT_CHECK_OP_LIST + ) + and (not self.is_bfloat16_op()) + ): + raise AssertionError( + "no_grad_set must be None, op_type is " + + self.op_type + + " Op." + ) for input_to_check in inputs_to_check: set_input(self.scope, self.op, self.inputs, place) @@ -239,7 +297,7 @@ class XPUOpTest(OpTest): if not type(output_names) is list: output_names = [output_names] - if (self.dtype not in mean_grad_op_types_np): + if self.dtype not in mean_grad_op_types_np: prog = Program() block = prog.global_block() @@ -250,18 +308,18 @@ class XPUOpTest(OpTest): outputs = self._get_outputs(block) feed_dict = self.feed_var(inputs, place) cast_inputs = list(map(block.var, output_names)) - cast_outputs = block.create_var(dtype="float32", - shape=cast_inputs[0].shape) - cast_op = block.append_op(type="cast", - inputs={"X": cast_inputs}, - outputs={"Out": cast_outputs}, - attrs={ - "in_dtype": - convert_np_dtype_to_dtype_( - self.dtype), - "out_dtype": - core.VarDesc.VarType.FP32 - }) + cast_outputs = block.create_var( + dtype="float32", shape=cast_inputs[0].shape + ) + cast_op = block.append_op( + type="cast", + inputs={"X": cast_inputs}, + outputs={"Out": cast_outputs}, + attrs={ + "in_dtype": convert_np_dtype_to_dtype_(self.dtype), + "out_dtype": core.VarDesc.VarType.FP32, + }, + ) cast_op.desc.infer_var_type(block.desc) cast_op.desc.infer_shape(block.desc) @@ -270,41 +328,48 @@ class XPUOpTest(OpTest): loss = append_loss_ops(block, output_names) loss_names = [loss.name] recast_inputs = list(map(block.var, loss_names)) - recast_loss = block.create_var(dtype=self.dtype, - shape=recast_inputs[0].shape) - - recast_op = block.append_op(type="cast", - inputs={"X": recast_inputs}, - outputs={"Out": recast_loss}, - attrs={ - "in_dtype": - core.VarDesc.VarType.FP32, - "out_dtype": - convert_np_dtype_to_dtype_( - self.dtype) - }) + recast_loss = block.create_var( + dtype=self.dtype, shape=recast_inputs[0].shape + ) + + recast_op = block.append_op( + type="cast", + inputs={"X": recast_inputs}, + outputs={"Out": recast_loss}, + attrs={ + "in_dtype": core.VarDesc.VarType.FP32, + "out_dtype": convert_np_dtype_to_dtype_(self.dtype), + }, + ) recast_op.desc.infer_var_type(block.desc) recast_op.desc.infer_shape(block.desc) - param_grad_list = append_backward(loss=recast_loss, - parameter_list=[input_to_check], - no_grad_set=no_grad_set) + param_grad_list = append_backward( + loss=recast_loss, + parameter_list=[input_to_check], + no_grad_set=no_grad_set, + ) fetch_list = [g for p, g in param_grad_list] executor = fluid.Executor(place) return list( map( np.array, - executor.run(prog, - feed_dict, - fetch_list, - scope=scope, - return_numpy=False))) + executor.run( + prog, + feed_dict, + fetch_list, + scope=scope, + return_numpy=False, + ), + ) + ) analytic_grads = self._get_gradient( inputs_to_check, place, output_names, no_grad_set, - user_defined_grad_outputs=user_defined_grad_outputs) + user_defined_grad_outputs=user_defined_grad_outputs, + ) return analytic_grads diff --git a/python/paddle/fluid/tests/unittests/parallel_dygraph_dataparallel_with_pylayer.py b/python/paddle/fluid/tests/unittests/parallel_dygraph_dataparallel_with_pylayer.py index b85caf2c966ec4fa79c0586ea6de8458144e8348..d49202def5b27ac1cfd99756c936075d2482db2b 100644 --- a/python/paddle/fluid/tests/unittests/parallel_dygraph_dataparallel_with_pylayer.py +++ b/python/paddle/fluid/tests/unittests/parallel_dygraph_dataparallel_with_pylayer.py @@ -18,7 +18,9 @@ import paddle import numpy as np import paddle.distributed as dist from paddle.autograd import PyLayer -from paddle.distributed.fleet.utils.hybrid_parallel_util import fused_allreduce_gradients +from paddle.distributed.fleet.utils.hybrid_parallel_util import ( + fused_allreduce_gradients, +) batch = 5 in_dim = 20 @@ -26,7 +28,6 @@ out_dim = 10 class cus_tanh(PyLayer): - @staticmethod def forward(ctx, x): y = paddle.tanh(x) @@ -35,13 +36,12 @@ class cus_tanh(PyLayer): @staticmethod def backward(ctx, dy): - y, = ctx.saved_tensor() + (y,) = ctx.saved_tensor() grad = dy * (1 - paddle.square(y)) return grad class SimpleNet(paddle.nn.Layer): - def __init__(self, train_id, model_id): super(SimpleNet, self).__init__() self.w = self.create_parameter(shape=[in_dim, batch], dtype="float32") @@ -62,7 +62,6 @@ class SimpleNet(paddle.nn.Layer): class TestDistTraning(unittest.TestCase): - def test_multiple_gpus(self): self.trainer_id = dist.get_rank() dist.init_parallel_env() diff --git a/python/paddle/fluid/tests/unittests/parallel_dygraph_gradient_check.py b/python/paddle/fluid/tests/unittests/parallel_dygraph_gradient_check.py index bb66e117f339e35cedd873c533345cfca5097388..d33bde8e4060e14af1ad4152701ab690870d571d 100644 --- a/python/paddle/fluid/tests/unittests/parallel_dygraph_gradient_check.py +++ b/python/paddle/fluid/tests/unittests/parallel_dygraph_gradient_check.py @@ -29,17 +29,19 @@ out_dim = 20 class SimpleNet(fluid.Layer): - def __init__(self, train_id): super(SimpleNet, self).__init__() - self.w1 = self.create_parameter(shape=[in_dim, out_dim], - dtype="float32") - self.w2 = self.create_parameter(shape=[in_dim, out_dim], - dtype="float32") + self.w1 = self.create_parameter( + shape=[in_dim, out_dim], dtype="float32" + ) + self.w2 = self.create_parameter( + shape=[in_dim, out_dim], dtype="float32" + ) self.share_net = Linear(out_dim, 10) - self.unused_param = self.create_parameter(shape=[out_dim, in_dim], - dtype="float64") + self.unused_param = self.create_parameter( + shape=[out_dim, in_dim], dtype="float64" + ) # just for test sync_params_buffers self.register_buffer("queue", paddle.randn([10, 5])) @@ -49,9 +51,10 @@ class SimpleNet(fluid.Layer): self.trainer_id = train_id def forward(self, x): - is_use = (paddle.equal_all( - x, paddle.ones(shape=(batch, in_dim))).numpy()[0] - and self.trainer_id == 1) + is_use = ( + paddle.equal_all(x, paddle.ones(shape=(batch, in_dim))).numpy()[0] + and self.trainer_id == 1 + ) if is_use: tmp = paddle.matmul(x, self.w1) @@ -62,7 +65,6 @@ class SimpleNet(fluid.Layer): class TestDistTraning(unittest.TestCase): - def test_multiple_gpus(self): dist.init_parallel_env() self.trainer_id = dist.get_rank() @@ -100,10 +102,12 @@ class TestDistTraning(unittest.TestCase): self.check_gradient(model_b.parameters()) # test acc gradient - w1_grad_sum = self.check_acc(model_a._layers.w1.grad, w1_grad_sum, - model_b._layers.w1.grad) - w2_grad_sum = self.check_acc(model_a._layers.w2.grad, w2_grad_sum, - model_b._layers.w2.grad) + w1_grad_sum = self.check_acc( + model_a._layers.w1.grad, w1_grad_sum, model_b._layers.w1.grad + ) + w2_grad_sum = self.check_acc( + model_a._layers.w2.grad, w2_grad_sum, model_b._layers.w2.grad + ) model_a.clear_gradients() diff --git a/python/paddle/fluid/tests/unittests/parallel_dygraph_gradient_check_in_eager_mode.py b/python/paddle/fluid/tests/unittests/parallel_dygraph_gradient_check_in_eager_mode.py index 506e025225d42817b043a60065bd8d5717667bfa..595aa09db262000306698ca08cd93336b1d7798d 100644 --- a/python/paddle/fluid/tests/unittests/parallel_dygraph_gradient_check_in_eager_mode.py +++ b/python/paddle/fluid/tests/unittests/parallel_dygraph_gradient_check_in_eager_mode.py @@ -30,17 +30,19 @@ out_dim = 20 class SimpleNet(fluid.Layer): - def __init__(self, train_id): super(SimpleNet, self).__init__() - self.w1 = self.create_parameter(shape=[in_dim, out_dim], - dtype="float32") - self.w2 = self.create_parameter(shape=[in_dim, out_dim], - dtype="float32") + self.w1 = self.create_parameter( + shape=[in_dim, out_dim], dtype="float32" + ) + self.w2 = self.create_parameter( + shape=[in_dim, out_dim], dtype="float32" + ) self.share_net = Linear(out_dim, 10) - self.unused_param = self.create_parameter(shape=[out_dim, in_dim], - dtype="float64") + self.unused_param = self.create_parameter( + shape=[out_dim, in_dim], dtype="float64" + ) # just for test sync_params_buffers # self.register_buffer("queue", paddle.randn([10, 5])) @@ -50,9 +52,10 @@ class SimpleNet(fluid.Layer): self.trainer_id = train_id def forward(self, x): - is_use = (paddle.equal_all( - x, paddle.ones(shape=(batch, in_dim))).numpy()[0] - and self.trainer_id == 1) + is_use = ( + paddle.equal_all(x, paddle.ones(shape=(batch, in_dim))).numpy()[0] + and self.trainer_id == 1 + ) if is_use: tmp = paddle.matmul(x, self.w1) @@ -63,7 +66,6 @@ class SimpleNet(fluid.Layer): class TestDistTraning(unittest.TestCase): - def test_multiple_gpus(self): self.trainer_id = dist.get_rank() with _test_eager_guard(): @@ -75,12 +77,12 @@ class TestDistTraning(unittest.TestCase): state_dict = model_a.state_dict() model_b.set_state_dict(state_dict) - model_a = paddle.DataParallel(model_a, - find_unused_parameters=True, - group=self.pg) - model_b = paddle.DataParallel(model_b, - find_unused_parameters=True, - group=self.pg) + model_a = paddle.DataParallel( + model_a, find_unused_parameters=True, group=self.pg + ) + model_b = paddle.DataParallel( + model_b, find_unused_parameters=True, group=self.pg + ) ones_input = paddle.ones(shape=(batch, in_dim)) ones_input.stop_gradient = True @@ -106,12 +108,16 @@ class TestDistTraning(unittest.TestCase): self.check_gradient(model_b.parameters()) # test acc gradient - w1_grad_sum = self.check_acc(model_a._layers.w1.grad, - w1_grad_sum, - model_b._layers.w1.grad) - w2_grad_sum = self.check_acc(model_a._layers.w2.grad, - w2_grad_sum, - model_b._layers.w2.grad) + w1_grad_sum = self.check_acc( + model_a._layers.w1.grad, + w1_grad_sum, + model_b._layers.w1.grad, + ) + w2_grad_sum = self.check_acc( + model_a._layers.w2.grad, + w2_grad_sum, + model_b._layers.w2.grad, + ) model_a.clear_gradients() diff --git a/python/paddle/fluid/tests/unittests/parallel_dygraph_mnist.py b/python/paddle/fluid/tests/unittests/parallel_dygraph_mnist.py index 8b07142ac2efb518f06e206e59ea0ef53731de92..694e1cf0e62a1b50ee6962324896990c722a3121 100644 --- a/python/paddle/fluid/tests/unittests/parallel_dygraph_mnist.py +++ b/python/paddle/fluid/tests/unittests/parallel_dygraph_mnist.py @@ -23,43 +23,48 @@ from test_dist_base import runtime_main, TestParallelDyGraphRunnerBase class SimpleImgConvPool(fluid.dygraph.Layer): - - def __init__(self, - num_channels, - num_filters, - filter_size, - pool_size, - pool_stride, - pool_padding=0, - pool_type='max', - global_pooling=False, - conv_stride=1, - conv_padding=0, - conv_dilation=1, - conv_groups=1, - act=None, - use_cudnn=False, - param_attr=None, - bias_attr=None): + def __init__( + self, + num_channels, + num_filters, + filter_size, + pool_size, + pool_stride, + pool_padding=0, + pool_type='max', + global_pooling=False, + conv_stride=1, + conv_padding=0, + conv_dilation=1, + conv_groups=1, + act=None, + use_cudnn=False, + param_attr=None, + bias_attr=None, + ): super(SimpleImgConvPool, self).__init__() - self._conv2d = Conv2D(num_channels=num_channels, - num_filters=num_filters, - filter_size=filter_size, - stride=conv_stride, - padding=conv_padding, - dilation=conv_dilation, - groups=conv_groups, - param_attr=None, - bias_attr=None, - use_cudnn=use_cudnn) - - self._pool2d = Pool2D(pool_size=pool_size, - pool_type=pool_type, - pool_stride=pool_stride, - pool_padding=pool_padding, - global_pooling=global_pooling, - use_cudnn=use_cudnn) + self._conv2d = Conv2D( + num_channels=num_channels, + num_filters=num_filters, + filter_size=filter_size, + stride=conv_stride, + padding=conv_padding, + dilation=conv_dilation, + groups=conv_groups, + param_attr=None, + bias_attr=None, + use_cudnn=use_cudnn, + ) + + self._pool2d = Pool2D( + pool_size=pool_size, + pool_type=pool_type, + pool_stride=pool_stride, + pool_padding=pool_padding, + global_pooling=global_pooling, + use_cudnn=use_cudnn, + ) def forward(self, inputs): x = self._conv2d(inputs) @@ -68,33 +73,30 @@ class SimpleImgConvPool(fluid.dygraph.Layer): class MNIST(fluid.dygraph.Layer): - def __init__(self): super(MNIST, self).__init__() - self._simple_img_conv_pool_1 = SimpleImgConvPool(1, - 20, - 5, - 2, - 2, - act="relu") + self._simple_img_conv_pool_1 = SimpleImgConvPool( + 1, 20, 5, 2, 2, act="relu" + ) - self._simple_img_conv_pool_2 = SimpleImgConvPool(20, - 50, - 5, - 2, - 2, - act="relu") + self._simple_img_conv_pool_2 = SimpleImgConvPool( + 20, 50, 5, 2, 2, act="relu" + ) self.pool_2_shape = 50 * 4 * 4 SIZE = 10 - scale = (2.0 / (self.pool_2_shape**2 * SIZE))**0.5 - self._fc = Linear(self.pool_2_shape, - 10, - param_attr=fluid.param_attr.ParamAttr( - initializer=fluid.initializer.NormalInitializer( - loc=0.0, scale=scale)), - act="softmax") + scale = (2.0 / (self.pool_2_shape**2 * SIZE)) ** 0.5 + self._fc = Linear( + self.pool_2_shape, + 10, + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.NormalInitializer( + loc=0.0, scale=scale + ) + ), + act="softmax", + ) def forward(self, inputs, label): x = self._simple_img_conv_pool_1(inputs) @@ -107,22 +109,26 @@ class MNIST(fluid.dygraph.Layer): class TestMnist(TestParallelDyGraphRunnerBase): - def get_model(self): model = MNIST() - train_reader = paddle.batch(paddle.dataset.mnist.train(), - batch_size=2, - drop_last=True) - opt = paddle.optimizer.Adam(learning_rate=1e-3, - parameters=model.parameters()) + train_reader = paddle.batch( + paddle.dataset.mnist.train(), batch_size=2, drop_last=True + ) + opt = paddle.optimizer.Adam( + learning_rate=1e-3, parameters=model.parameters() + ) return model, train_reader, opt def run_one_loop(self, model, opt, data): batch_size = len(data) - dy_x_data = np.array([x[0].reshape(1, 28, 28) - for x in data]).astype('float32') - y_data = np.array([x[1] for x in data - ]).astype('int64').reshape(batch_size, 1) + dy_x_data = np.array([x[0].reshape(1, 28, 28) for x in data]).astype( + 'float32' + ) + y_data = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape(batch_size, 1) + ) img = to_variable(dy_x_data) label = to_variable(y_data) label.stop_gradient = True diff --git a/python/paddle/fluid/tests/unittests/parallel_dygraph_none_var.py b/python/paddle/fluid/tests/unittests/parallel_dygraph_none_var.py index 5713c93920f75e3c92ea1b54ab26c409aa50417b..a46b3344333adbfd145c1f044325a4308c55a10c 100644 --- a/python/paddle/fluid/tests/unittests/parallel_dygraph_none_var.py +++ b/python/paddle/fluid/tests/unittests/parallel_dygraph_none_var.py @@ -27,15 +27,18 @@ batch_num = 1000 class SimpleNet(fluid.Layer): - def __init__(self): super(SimpleNet, self).__init__() - self.net_a = paddle.nn.Sequential(paddle.nn.Linear(10, 20), - paddle.nn.Linear(20, 20), - paddle.nn.Linear(20, 5)) - self.net_b = paddle.nn.Sequential(paddle.nn.Linear(10, 20), - paddle.nn.Linear(20, 20), - paddle.nn.Linear(20, 5)) + self.net_a = paddle.nn.Sequential( + paddle.nn.Linear(10, 20), + paddle.nn.Linear(20, 20), + paddle.nn.Linear(20, 5), + ) + self.net_b = paddle.nn.Sequential( + paddle.nn.Linear(10, 20), + paddle.nn.Linear(20, 20), + paddle.nn.Linear(20, 5), + ) self.step = 0 def forward(self, x): @@ -43,24 +46,23 @@ class SimpleNet(fluid.Layer): def fake_sample_reader(): - def __reader__(): for i in range(batch_num): - x_data = np.random.random_sample((10, )).astype('float32') + x_data = np.random.random_sample((10,)).astype('float32') yield x_data return __reader__ class TestSimpleNet(TestParallelDyGraphRunnerBase): - def get_model(self): model = SimpleNet() - train_reader = paddle.batch(fake_sample_reader(), - batch_size=batch_size, - drop_last=True) - optimizer = paddle.optimizer.SGD(learning_rate=0.001, - parameters=model.parameters()) + train_reader = paddle.batch( + fake_sample_reader(), batch_size=batch_size, drop_last=True + ) + optimizer = paddle.optimizer.SGD( + learning_rate=0.001, parameters=model.parameters() + ) return model, train_reader, optimizer def run_one_loop(self, model, optimizer, batch): diff --git a/python/paddle/fluid/tests/unittests/parallel_dygraph_shared_unused_var.py b/python/paddle/fluid/tests/unittests/parallel_dygraph_shared_unused_var.py index 851bf3b2fdc89eb2b32e96ad0f32dd032d8fd8ed..60b6601b8423b0f636950551eb60aba1a3aa45f5 100644 --- a/python/paddle/fluid/tests/unittests/parallel_dygraph_shared_unused_var.py +++ b/python/paddle/fluid/tests/unittests/parallel_dygraph_shared_unused_var.py @@ -24,7 +24,6 @@ paddle.seed(1024) class SimpleNet(fluid.Layer): - def __init__(self): # bias is unused parameters, and it share with net_a super(SimpleNet, self).__init__() @@ -41,24 +40,23 @@ batch_num = 1000 def fake_sample_reader(): - def __reader__(): for i in range(batch_num): - x_data = np.random.random_sample((10, )).astype('float32') + x_data = np.random.random_sample((10,)).astype('float32') yield x_data return __reader__ class TestSimpleNet(TestParallelDyGraphRunnerBase): - def get_model(self): model = SimpleNet() - train_reader = paddle.batch(fake_sample_reader(), - batch_size=batch_size, - drop_last=True) - optimizer = paddle.optimizer.SGD(learning_rate=0.001, - parameters=model.parameters()) + train_reader = paddle.batch( + fake_sample_reader(), batch_size=batch_size, drop_last=True + ) + optimizer = paddle.optimizer.SGD( + learning_rate=0.001, parameters=model.parameters() + ) return model, train_reader, optimizer def run_one_loop(self, model, optimizer, batch): diff --git a/python/paddle/fluid/tests/unittests/parallel_dygraph_sparse_embedding.py b/python/paddle/fluid/tests/unittests/parallel_dygraph_sparse_embedding.py index c8e9bacad6208ae153795db38c62c7a5367ebe4c..f2e5ff7751c62c4b6f116d4d308f2e4ca19c1248 100644 --- a/python/paddle/fluid/tests/unittests/parallel_dygraph_sparse_embedding.py +++ b/python/paddle/fluid/tests/unittests/parallel_dygraph_sparse_embedding.py @@ -23,14 +23,15 @@ from test_dist_base import runtime_main, TestParallelDyGraphRunnerBase class SimpleNet(fluid.Layer): - - def __init__(self, - hidden_size, - vocab_size, - num_steps=20, - init_scale=0.1, - is_sparse=False, - dtype="float32"): + def __init__( + self, + hidden_size, + vocab_size, + num_steps=20, + init_scale=0.1, + is_sparse=False, + dtype="float32", + ): super(SimpleNet, self).__init__() self.hidden_size = hidden_size self.vocab_size = vocab_size @@ -42,28 +43,35 @@ class SimpleNet(fluid.Layer): is_sparse=is_sparse, param_attr=fluid.ParamAttr( initializer=fluid.initializer.UniformInitializer( - low=-init_scale, high=init_scale))) + low=-init_scale, high=init_scale + ) + ), + ) self.softmax_weight = self.create_parameter( attr=fluid.ParamAttr(), shape=[self.hidden_size, self.vocab_size], dtype=dtype, default_initializer=fluid.initializer.UniformInitializer( - low=-self.init_scale, high=self.init_scale)) + low=-self.init_scale, high=self.init_scale + ), + ) self.softmax_bias = self.create_parameter( attr=fluid.ParamAttr(), shape=[self.vocab_size], dtype=dtype, default_initializer=fluid.initializer.UniformInitializer( - low=-self.init_scale, high=self.init_scale)) + low=-self.init_scale, high=self.init_scale + ), + ) def forward(self, input, label): x_emb = self.embedding(input) fc = fluid.layers.matmul(x_emb, self.softmax_weight) fc = fluid.layers.elementwise_add(fc, self.softmax_bias) projection = fluid.layers.reshape(fc, shape=[-1, self.vocab_size]) - loss = fluid.layers.softmax_with_cross_entropy(logits=projection, - label=label, - soft_label=False) + loss = fluid.layers.softmax_with_cross_entropy( + logits=projection, label=label, soft_label=False + ) loss = fluid.layers.reshape(loss, shape=[-1, self.num_steps]) loss = fluid.layers.reduce_mean(loss, dim=[0]) loss = fluid.layers.reduce_sum(loss) @@ -81,7 +89,6 @@ init_scale = 0.1 def fake_sample_reader(): - def __reader__(): for i in range(batch_num): x_data = np.arange(num_steps).astype('int64') @@ -92,20 +99,22 @@ def fake_sample_reader(): class TestSparseEmbedding(TestParallelDyGraphRunnerBase): - def get_model(self): - model = SimpleNet(hidden_size=hidden_size, - vocab_size=vocab_size, - num_steps=num_steps, - init_scale=init_scale, - is_sparse=True) - - train_reader = paddle.batch(fake_sample_reader(), - batch_size=batch_size, - drop_last=True) - - optimizer = paddle.optimizer.SGD(learning_rate=0.001, - parameters=model.parameters()) + model = SimpleNet( + hidden_size=hidden_size, + vocab_size=vocab_size, + num_steps=num_steps, + init_scale=init_scale, + is_sparse=True, + ) + + train_reader = paddle.batch( + fake_sample_reader(), batch_size=batch_size, drop_last=True + ) + + optimizer = paddle.optimizer.SGD( + learning_rate=0.001, parameters=model.parameters() + ) return model, train_reader, optimizer diff --git a/python/paddle/fluid/tests/unittests/parallel_dygraph_sparse_embedding_fp64.py b/python/paddle/fluid/tests/unittests/parallel_dygraph_sparse_embedding_fp64.py index 599c0754e7f988df6f63180709bb76ed116d2bfd..d2a07868e886074e4bc77aa496f332d899128211 100644 --- a/python/paddle/fluid/tests/unittests/parallel_dygraph_sparse_embedding_fp64.py +++ b/python/paddle/fluid/tests/unittests/parallel_dygraph_sparse_embedding_fp64.py @@ -22,14 +22,15 @@ paddle.set_default_dtype("float64") class SimpleNet(Layer): - - def __init__(self, - hidden_size, - vocab_size, - num_steps=20, - init_scale=0.1, - is_sparse=False, - dtype="float64"): + def __init__( + self, + hidden_size, + vocab_size, + num_steps=20, + init_scale=0.1, + is_sparse=False, + dtype="float64", + ): super(SimpleNet, self).__init__() self.hidden_size = hidden_size self.vocab_size = vocab_size @@ -40,26 +41,35 @@ class SimpleNet(Layer): self.hidden_size, sparse=True, weight_attr=paddle.ParamAttr( - initializer=paddle.nn.initializer.Uniform(low=-init_scale, - high=init_scale))) + initializer=paddle.nn.initializer.Uniform( + low=-init_scale, high=init_scale + ) + ), + ) self.softmax_weight = self.create_parameter( attr=paddle.ParamAttr(), shape=[self.hidden_size, self.vocab_size], dtype=dtype, default_initializer=paddle.nn.initializer.Uniform( - low=-self.init_scale, high=self.init_scale)) + low=-self.init_scale, high=self.init_scale + ), + ) self.softmax_bias = self.create_parameter( attr=paddle.ParamAttr(), shape=[self.vocab_size], dtype=dtype, default_initializer=paddle.nn.initializer.Uniform( - low=-self.init_scale, high=self.init_scale)) + low=-self.init_scale, high=self.init_scale + ), + ) self.tmp = self.create_parameter( attr=paddle.ParamAttr(), shape=[self.hidden_size, self.vocab_size], dtype=dtype, default_initializer=paddle.nn.initializer.Uniform( - low=-self.init_scale, high=self.init_scale)) + low=-self.init_scale, high=self.init_scale + ), + ) def forward(self, input, label): x_emb = self.embedding(input) @@ -67,7 +77,8 @@ class SimpleNet(Layer): fc = paddle.add(fc, self.softmax_bias) projection = paddle.reshape(fc, shape=[-1, self.vocab_size]) loss = paddle.nn.functional.softmax_with_cross_entropy( - logits=projection, label=label, soft_label=False) + logits=projection, label=label, soft_label=False + ) loss = paddle.reshape(loss, shape=[-1, self.num_steps]) loss = paddle.mean(loss, axis=[0]) loss = paddle.sum(loss) @@ -85,7 +96,6 @@ init_scale = 0.1 def fake_sample_reader(): - def __reader__(): for i in range(batch_num): x_data = np.arange(num_steps).astype('int64') @@ -96,20 +106,22 @@ def fake_sample_reader(): class TestSparseEmbeddingFP64(TestParallelDyGraphRunnerBase): - def get_model(self): - model = SimpleNet(hidden_size=hidden_size, - vocab_size=vocab_size, - num_steps=num_steps, - init_scale=init_scale, - is_sparse=True) - - train_reader = paddle.batch(fake_sample_reader(), - batch_size=batch_size, - drop_last=True) - - optimizer = paddle.optimizer.SGD(learning_rate=0.001, - parameters=model.parameters()) + model = SimpleNet( + hidden_size=hidden_size, + vocab_size=vocab_size, + num_steps=num_steps, + init_scale=init_scale, + is_sparse=True, + ) + + train_reader = paddle.batch( + fake_sample_reader(), batch_size=batch_size, drop_last=True + ) + + optimizer = paddle.optimizer.SGD( + learning_rate=0.001, parameters=model.parameters() + ) return model, train_reader, optimizer diff --git a/python/paddle/fluid/tests/unittests/parallel_dygraph_sparse_embedding_over_height.py b/python/paddle/fluid/tests/unittests/parallel_dygraph_sparse_embedding_over_height.py index 7bd30deea6407078f91f9250628e4c2a6284e413..9d70390b1356ec7c6e9ec68571ac5c39376d11a7 100644 --- a/python/paddle/fluid/tests/unittests/parallel_dygraph_sparse_embedding_over_height.py +++ b/python/paddle/fluid/tests/unittests/parallel_dygraph_sparse_embedding_over_height.py @@ -14,7 +14,11 @@ import paddle import paddle.fluid as fluid -from parallel_dygraph_sparse_embedding import SimpleNet, fake_sample_reader, TestSparseEmbedding +from parallel_dygraph_sparse_embedding import ( + SimpleNet, + fake_sample_reader, + TestSparseEmbedding, +) from test_dist_base import runtime_main @@ -29,20 +33,22 @@ init_scale = 0.1 class TestSparseEmbeddingOverHeight(TestSparseEmbedding): - def get_model(self): - model = SimpleNet(hidden_size=hidden_size, - vocab_size=vocab_size, - num_steps=num_steps, - init_scale=init_scale, - is_sparse=True) - - train_reader = paddle.batch(fake_sample_reader(), - batch_size=batch_size, - drop_last=True) - - optimizer = fluid.optimizer.SGD(learning_rate=0.001, - parameter_list=model.parameters()) + model = SimpleNet( + hidden_size=hidden_size, + vocab_size=vocab_size, + num_steps=num_steps, + init_scale=init_scale, + is_sparse=True, + ) + + train_reader = paddle.batch( + fake_sample_reader(), batch_size=batch_size, drop_last=True + ) + + optimizer = fluid.optimizer.SGD( + learning_rate=0.001, parameter_list=model.parameters() + ) return model, train_reader, optimizer diff --git a/python/paddle/fluid/tests/unittests/parallel_dygraph_unused_variables.py b/python/paddle/fluid/tests/unittests/parallel_dygraph_unused_variables.py index bcd8d765a27f33de7fc9b00dfc3c85a2d5709eb8..1c8c30b856d82c225d0646e414d2ab506653eae5 100644 --- a/python/paddle/fluid/tests/unittests/parallel_dygraph_unused_variables.py +++ b/python/paddle/fluid/tests/unittests/parallel_dygraph_unused_variables.py @@ -20,14 +20,15 @@ from paddle.nn import Layer, Embedding class SimpleNet(Layer): - - def __init__(self, - hidden_size, - vocab_size, - num_steps=20, - init_scale=0.1, - is_sparse=False, - dtype="float32"): + def __init__( + self, + hidden_size, + vocab_size, + num_steps=20, + init_scale=0.1, + is_sparse=False, + dtype="float32", + ): super(SimpleNet, self).__init__() self.hidden_size = hidden_size self.vocab_size = vocab_size @@ -38,27 +39,36 @@ class SimpleNet(Layer): self.hidden_size, sparse=is_sparse, weight_attr=paddle.ParamAttr( - initializer=paddle.nn.initializer.Uniform(low=-init_scale, - high=init_scale))) + initializer=paddle.nn.initializer.Uniform( + low=-init_scale, high=init_scale + ) + ), + ) self.softmax_weight = self.create_parameter( attr=paddle.ParamAttr(), shape=[self.hidden_size, self.vocab_size], dtype=dtype, default_initializer=paddle.nn.initializer.Uniform( - low=-self.init_scale, high=self.init_scale)) + low=-self.init_scale, high=self.init_scale + ), + ) self.softmax_bias = self.create_parameter( attr=paddle.ParamAttr(), shape=[self.vocab_size], dtype=dtype, default_initializer=paddle.nn.initializer.Uniform( - low=-self.init_scale, high=self.init_scale)) + low=-self.init_scale, high=self.init_scale + ), + ) # add tmp var self.tmp = self.create_parameter( attr=paddle.ParamAttr(), shape=[self.vocab_size], dtype=dtype, default_initializer=paddle.nn.initializer.Uniform( - low=-self.init_scale, high=self.init_scale)) + low=-self.init_scale, high=self.init_scale + ), + ) def forward(self, input, label): x_emb = self.embedding(input) @@ -69,7 +79,8 @@ class SimpleNet(Layer): fc = paddle.add(fc, self.softmax_bias) projection = paddle.reshape(fc, shape=[-1, self.vocab_size]) loss = paddle.nn.functional.softmax_with_cross_entropy( - logits=projection, label=label, soft_label=False) + logits=projection, label=label, soft_label=False + ) loss = paddle.reshape(loss, shape=[-1, self.num_steps]) loss = paddle.mean(loss, axis=[0]) loss = paddle.sum(loss) @@ -87,7 +98,6 @@ init_scale = 0.1 def fake_sample_reader(): - def __reader__(): for i in range(batch_num): x_data = np.arange(num_steps).astype('int64') @@ -98,20 +108,22 @@ def fake_sample_reader(): class TestSparseEmbeddingUnusedVars(TestParallelDyGraphRunnerBase): - def get_model(self): - model = SimpleNet(hidden_size=hidden_size, - vocab_size=vocab_size, - num_steps=num_steps, - init_scale=init_scale, - is_sparse=False) - - train_reader = paddle.batch(fake_sample_reader(), - batch_size=batch_size, - drop_last=True) - - optimizer = paddle.optimizer.SGD(learning_rate=0.001, - parameters=model.parameters()) + model = SimpleNet( + hidden_size=hidden_size, + vocab_size=vocab_size, + num_steps=num_steps, + init_scale=init_scale, + is_sparse=False, + ) + + train_reader = paddle.batch( + fake_sample_reader(), batch_size=batch_size, drop_last=True + ) + + optimizer = paddle.optimizer.SGD( + learning_rate=0.001, parameters=model.parameters() + ) return model, train_reader, optimizer diff --git a/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py b/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py index 2d467ef50ef3e0f131de57903efd47d028802b94..88caad052ad2ceed368a5f8395d8889b7ab151aa 100644 --- a/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py +++ b/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py @@ -30,35 +30,37 @@ DeviceType = core.DeviceType class TestParallelExecutorBase(unittest.TestCase): - @classmethod - def check_network_convergence(cls, - method, - use_device=DeviceType.CUDA, - iter=5, - batch_size=None, - feed_dict=None, - feed_data_reader=None, - get_data_from_feeder=None, - use_parallel_executor=True, - use_reduce=False, - use_ir_memory_optimize=False, - enable_inplace=True, - fuse_elewise_add_act_ops=False, - fuse_all_optimizer_ops=False, - fuse_all_reduce_ops=False, - fuse_relu_depthwise_conv=False, - optimizer=fluid.optimizer.Adam, - use_fast_executor=False, - enable_sequential_execution=False): - + def check_network_convergence( + cls, + method, + use_device=DeviceType.CUDA, + iter=5, + batch_size=None, + feed_dict=None, + feed_data_reader=None, + get_data_from_feeder=None, + use_parallel_executor=True, + use_reduce=False, + use_ir_memory_optimize=False, + enable_inplace=True, + fuse_elewise_add_act_ops=False, + fuse_all_optimizer_ops=False, + fuse_all_reduce_ops=False, + fuse_relu_depthwise_conv=False, + optimizer=fluid.optimizer.Adam, + use_fast_executor=False, + enable_sequential_execution=False, + ): def run_executor(exe, binary, feed, fetch_list): if feed_data_reader is None: res = exe.run(binary, feed=feed, fetch_list=fetch_list) else: - res = exe.run(binary, - feed=feed_data_reader.get_next(exe, binary), - fetch_list=fetch_list) + res = exe.run( + binary, + feed=feed_data_reader.get_next(exe, binary), + fetch_list=fetch_list, + ) return res if feed_data_reader is not None: @@ -72,63 +74,79 @@ class TestParallelExecutorBase(unittest.TestCase): startup = fluid.Program() with fluid.program_guard(main, startup): - feed_dict, loss = cls.build_model(feed_dict, get_data_from_feeder, - main, method, optimizer) + feed_dict, loss = cls.build_model( + feed_dict, get_data_from_feeder, main, method, optimizer + ) - place = fluid.CUDAPlace( - 0) if use_device == DeviceType.CUDA else fluid.XPUPlace( - 0) if use_device == DeviceType.XPU else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if use_device == DeviceType.CUDA + else fluid.XPUPlace(0) + if use_device == DeviceType.XPU + else fluid.CPUPlace() + ) exe = fluid.Executor(place) exe.run(startup) build_strategy, exec_strategy = cls.set_strategy( - enable_inplace, enable_sequential_execution, fuse_all_optimizer_ops, - fuse_all_reduce_ops, fuse_elewise_add_act_ops, - fuse_relu_depthwise_conv, use_fast_executor, use_ir_memory_optimize, - use_reduce, use_device) + enable_inplace, + enable_sequential_execution, + fuse_all_optimizer_ops, + fuse_all_reduce_ops, + fuse_elewise_add_act_ops, + fuse_relu_depthwise_conv, + use_fast_executor, + use_ir_memory_optimize, + use_reduce, + use_device, + ) if use_parallel_executor: binary = compiler.CompiledProgram(main).with_data_parallel( loss_name=loss.name, build_strategy=build_strategy, - exec_strategy=exec_strategy) + exec_strategy=exec_strategy, + ) else: binary = main if batch_size is not None: - batch_size *= fluid.core.get_cuda_device_count( - ) if use_device == DeviceType.CUDA else fluid.core.get_xpu_device_count( - ) if use_device == DeviceType.XPU else int( - os.environ.get('CPU_NUM', multiprocessing.cpu_count())) + batch_size *= ( + fluid.core.get_cuda_device_count() + if use_device == DeviceType.CUDA + else fluid.core.get_xpu_device_count() + if use_device == DeviceType.XPU + else int(os.environ.get('CPU_NUM', multiprocessing.cpu_count())) + ) area_below_loss = 0 begin = time.time() - first_loss, = run_executor(exe=exe, - binary=binary, - feed=feed_dict, - fetch_list=[loss.name]) + (first_loss,) = run_executor( + exe=exe, binary=binary, feed=feed_dict, fetch_list=[loss.name] + ) area_below_loss += 0.5 * first_loss.mean() for _ in range(iter): - mid_loss = run_executor(exe=exe, - binary=binary, - feed=feed_dict, - fetch_list=[loss.name]) + mid_loss = run_executor( + exe=exe, binary=binary, feed=feed_dict, fetch_list=[loss.name] + ) area_below_loss += mid_loss[0].mean() - last_loss, = run_executor(exe=exe, - binary=binary, - feed=feed_dict, - fetch_list=[loss.name]) + (last_loss,) = run_executor( + exe=exe, binary=binary, feed=feed_dict, fetch_list=[loss.name] + ) area_below_loss += 0.5 * last_loss.mean() end = time.time() if batch_size is not None: - print("%.4f Instance per second" % ((batch_size * iter + 2) / - (end - begin))) + print( + "%.4f Instance per second" + % ((batch_size * iter + 2) / (end - begin)) + ) avg_last_loss_val = np.array(last_loss).mean() avg_first_loss_val = np.array(first_loss).mean() if math.isnan(float(avg_last_loss_val)) or math.isnan( - float(avg_first_loss_val)): + float(avg_first_loss_val) + ): sys.exit("got NaN loss, training failed.") print(first_loss, last_loss, area_below_loss) @@ -136,59 +154,85 @@ class TestParallelExecutorBase(unittest.TestCase): return first_loss, last_loss, area_below_loss @classmethod - def check_pass_conflict(cls, - method, - use_device=DeviceType.CUDA, - feed_dict=None, - get_data_from_feeder=None, - use_reduce=False, - use_ir_memory_optimize=True, - enable_inplace=True, - fuse_elewise_add_act_ops=False, - fuse_all_optimizer_ops=False, - fuse_all_reduce_ops=False, - fuse_relu_depthwise_conv=False, - optimizer=fluid.optimizer.Adam, - use_fast_executor=True, - enable_sequential_execution=False): + def check_pass_conflict( + cls, + method, + use_device=DeviceType.CUDA, + feed_dict=None, + get_data_from_feeder=None, + use_reduce=False, + use_ir_memory_optimize=True, + enable_inplace=True, + fuse_elewise_add_act_ops=False, + fuse_all_optimizer_ops=False, + fuse_all_reduce_ops=False, + fuse_relu_depthwise_conv=False, + optimizer=fluid.optimizer.Adam, + use_fast_executor=True, + enable_sequential_execution=False, + ): main = fluid.Program() startup = fluid.Program() with fluid.program_guard(main, startup): - feed_dict, loss = cls.build_model(feed_dict, get_data_from_feeder, - main, method, optimizer) + feed_dict, loss = cls.build_model( + feed_dict, get_data_from_feeder, main, method, optimizer + ) - place = fluid.CUDAPlace( - 0) if use_device == DeviceType.CUDA else fluid.XPUPlace( - 0) if use_device == DeviceType.XPU else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if use_device == DeviceType.CUDA + else fluid.XPUPlace(0) + if use_device == DeviceType.XPU + else fluid.CPUPlace() + ) exe = fluid.Executor(place) exe.run(startup) build_strategy, exec_strategy = cls.set_strategy( - enable_inplace, enable_sequential_execution, fuse_all_optimizer_ops, - fuse_all_reduce_ops, fuse_elewise_add_act_ops, - fuse_relu_depthwise_conv, use_fast_executor, use_ir_memory_optimize, - use_reduce, use_device) + enable_inplace, + enable_sequential_execution, + fuse_all_optimizer_ops, + fuse_all_reduce_ops, + fuse_elewise_add_act_ops, + fuse_relu_depthwise_conv, + use_fast_executor, + use_ir_memory_optimize, + use_reduce, + use_device, + ) binary = compiler.CompiledProgram(main).with_data_parallel( loss_name=loss.name, build_strategy=build_strategy, - exec_strategy=exec_strategy) + exec_strategy=exec_strategy, + ) exe.run(binary, feed=feed_dict, fetch_list=[loss.name]) @classmethod - def set_strategy(cls, enable_inplace, enable_sequential_execution, - fuse_all_optimizer_ops, fuse_all_reduce_ops, - fuse_elewise_add_act_ops, fuse_relu_depthwise_conv, - use_fast_executor, use_ir_memory_optimize, use_reduce, - use_device): + def set_strategy( + cls, + enable_inplace, + enable_sequential_execution, + fuse_all_optimizer_ops, + fuse_all_reduce_ops, + fuse_elewise_add_act_ops, + fuse_relu_depthwise_conv, + use_fast_executor, + use_ir_memory_optimize, + use_reduce, + use_device, + ): exec_strategy = fluid.ExecutionStrategy() if use_fast_executor: exec_strategy.use_experimental_executor = True build_strategy = fluid.BuildStrategy() - build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce \ - if use_reduce else fluid.BuildStrategy.ReduceStrategy.AllReduce + build_strategy.reduce_strategy = ( + fluid.BuildStrategy.ReduceStrategy.Reduce + if use_reduce + else fluid.BuildStrategy.ReduceStrategy.AllReduce + ) build_strategy.fuse_elewise_add_act_ops = fuse_elewise_add_act_ops build_strategy.fuse_relu_depthwise_conv = fuse_relu_depthwise_conv build_strategy.fuse_all_optimizer_ops = fuse_all_optimizer_ops @@ -210,8 +254,9 @@ class TestParallelExecutorBase(unittest.TestCase): return build_strategy, exec_strategy @classmethod - def build_model(cls, feed_dict, get_data_from_feeder, main, method, - optimizer): + def build_model( + cls, feed_dict, get_data_from_feeder, main, method, optimizer + ): loss = method(use_feed=feed_dict is not None) # NOTE(zjl): memory_optimize/inplace pass would not require # that loss.persistable = True. diff --git a/python/paddle/fluid/tests/unittests/ps/dataset_generator_A.py b/python/paddle/fluid/tests/unittests/ps/dataset_generator_A.py index 1ab4b3580d6ad034612e0e071dfaa2185704faae..a53da27593907a17850bcb87fae6af9c3dcbb553 100755 --- a/python/paddle/fluid/tests/unittests/ps/dataset_generator_A.py +++ b/python/paddle/fluid/tests/unittests/ps/dataset_generator_A.py @@ -23,7 +23,6 @@ categorical_range_ = range(14, 40) class CriteoDataset(dg.MultiSlotDataGenerator): - def generate_sample(self, line): """ Read the data line by line and process it as a dictionary @@ -38,7 +37,8 @@ class CriteoDataset(dg.MultiSlotDataGenerator): sparse_feature = [] for idx in categorical_range_: sparse_feature.append( - [hash(str(idx) + features[idx]) % hash_dim_]) + [hash(str(idx) + features[idx]) % hash_dim_] + ) for idx in categorical_range_: feature_name.append("C" + str(idx - 13)) yield list(zip(feature_name, sparse_feature)) diff --git a/python/paddle/fluid/tests/unittests/ps/dataset_generator_B.py b/python/paddle/fluid/tests/unittests/ps/dataset_generator_B.py index 76b2468592dff83d88eafa03a7c70f180ba4fe3a..77178ee4c333dd5d8ff043778aeaacb44befe7f5 100755 --- a/python/paddle/fluid/tests/unittests/ps/dataset_generator_B.py +++ b/python/paddle/fluid/tests/unittests/ps/dataset_generator_B.py @@ -23,7 +23,6 @@ categorical_range_ = range(14, 40) class CriteoDataset(dg.MultiSlotDataGenerator): - def generate_sample(self, line): """ Read the data line by line and process it as a dictionary @@ -40,8 +39,9 @@ class CriteoDataset(dg.MultiSlotDataGenerator): dense_feature.append(0.0) else: dense_feature.append( - (float(features[idx]) - cont_min_[idx - 1]) / - cont_diff_[idx - 1]) + (float(features[idx]) - cont_min_[idx - 1]) + / cont_diff_[idx - 1] + ) label = [int(features[0])] feature_name = ["dense_feature"] feature_name.append("label") diff --git a/python/paddle/fluid/tests/unittests/ps/fl_ps_trainer.py b/python/paddle/fluid/tests/unittests/ps/fl_ps_trainer.py index aebf38a458538d2cbb503a8494a6eefcda023e2c..66e6b1cb62e0b1a164952d0971c561ed11725afe 100755 --- a/python/paddle/fluid/tests/unittests/ps/fl_ps_trainer.py +++ b/python/paddle/fluid/tests/unittests/ps/fl_ps_trainer.py @@ -44,16 +44,22 @@ def get_dataset(inputs, config, pipe_cmd, role="worker"): def fl_ps_train(): # 0. get role import paddle.distributed.fleet.base.role_maker as role_maker + role_maker = role_maker.PaddleCloudRoleMaker() role_maker._generate_role() fleet.util._set_role_maker(role_maker) # 1. load yaml-config to dict-config - from ps_dnn_trainer import YamlHelper, StaticModel, get_user_defined_strategy + from ps_dnn_trainer import ( + YamlHelper, + StaticModel, + get_user_defined_strategy, + ) + yaml_helper = YamlHelper() config_yaml_path = '../ps/fl_async_ps_config.yaml' config = yaml_helper.load_yaml(config_yaml_path) - #yaml_helper.print_yaml(config) + # yaml_helper.print_yaml(config) # 2. get static model paddle.enable_static() @@ -67,18 +73,25 @@ def fl_ps_train(): a_sync_configs = user_defined_strategy.a_sync_configs a_sync_configs["launch_barrier"] = True user_defined_strategy.a_sync_configs = a_sync_configs - print("launch_barrier: ", - user_defined_strategy.a_sync_configs["launch_barrier"]) + print( + "launch_barrier: ", + user_defined_strategy.a_sync_configs["launch_barrier"], + ) learning_rate = config.get("hyper_parameters.optimizer.learning_rate") inner_optimizer = paddle.optimizer.Adam(learning_rate, lazy_mode=True) - from paddle.distributed.fleet.meta_optimizers.ps_optimizer import ParameterServerOptimizer + from paddle.distributed.fleet.meta_optimizers.ps_optimizer import ( + ParameterServerOptimizer, + ) + ps_optimizer = ParameterServerOptimizer(inner_optimizer) - ps_optimizer._set_basic_info(loss, role_maker, inner_optimizer, - user_defined_strategy) + ps_optimizer._set_basic_info( + loss, role_maker, inner_optimizer, user_defined_strategy + ) ps_optimizer.minimize_impl(loss) # 4. runtime from paddle.distributed.ps.the_one_ps import TheOnePSRuntime + _runtime_handle = TheOnePSRuntime() # ps 目录下重构版的 TheOnePSRuntime _runtime_handle._set_basic_info(ps_optimizer.pass_ctx._attrs) epoch_num = int(config.get('runner.epoch_num')) @@ -94,21 +107,29 @@ def fl_ps_train(): _runtime_handle._init_worker() print('trainer get dataset') inputs = feeds_list[1:-1] - dataset, file_list = get_dataset(inputs, config, - "python dataset_generator_A.py") - print("fluid.default_main_program: {}".format( - fluid.default_main_program()._heter_pipeline_opt)) + dataset, file_list = get_dataset( + inputs, config, "python dataset_generator_A.py" + ) + print( + "fluid.default_main_program: {}".format( + fluid.default_main_program()._heter_pipeline_opt + ) + ) for epoch in range(epoch_num): # A 方和 B 方如果要以文件粒度 shuffle 时,则需要固定同一个种子 dataset.set_filelist(file_list) start_time = time.time() - exe.train_from_dataset(program=fluid.default_main_program(), - dataset=dataset, - print_period=2, - debug=False) + exe.train_from_dataset( + program=fluid.default_main_program(), + dataset=dataset, + print_period=2, + debug=False, + ) end_time = time.time() - print("trainer epoch %d finished, use time=%d\n" % - ((epoch), end_time - start_time)) + print( + "trainer epoch %d finished, use time=%d\n" + % ((epoch), end_time - start_time) + ) exe.close() _runtime_handle._stop_worker() print("Fl partyA Trainer Success!") @@ -116,19 +137,26 @@ def fl_ps_train(): exe = fluid.Executor() exe.run(fluid.default_startup_program()) _runtime_handle._init_worker() - inputs = [feeds_list[0], - feeds_list[-1]] # 顺序务必要和 dataset_generator_B.py 中保持一致 - dataset, file_list = get_dataset(inputs, config, - "python dataset_generator_B.py", - "heter_worker") - print("fluid.default_main_program: {}".format( - fluid.default_main_program()._heter_pipeline_opt)) + inputs = [ + feeds_list[0], + feeds_list[-1], + ] # 顺序务必要和 dataset_generator_B.py 中保持一致 + dataset, file_list = get_dataset( + inputs, config, "python dataset_generator_B.py", "heter_worker" + ) + print( + "fluid.default_main_program: {}".format( + fluid.default_main_program()._heter_pipeline_opt + ) + ) for epoch in range(epoch_num): dataset.set_filelist(file_list) - exe.train_from_dataset(program=fluid.default_main_program(), - dataset=dataset, - print_period=2, - debug=False) + exe.train_from_dataset( + program=fluid.default_main_program(), + dataset=dataset, + print_period=2, + debug=False, + ) exe.close() _runtime_handle._stop_worker() print("Fl partB Trainer Success!") diff --git a/python/paddle/fluid/tests/unittests/ps/ps_dnn_trainer.py b/python/paddle/fluid/tests/unittests/ps/ps_dnn_trainer.py index 8d63c9aa0acca5191e68eb2bd6d718ac9a259c71..08055ae170393a46694fa567599af6229dc949e3 100755 --- a/python/paddle/fluid/tests/unittests/ps/ps_dnn_trainer.py +++ b/python/paddle/fluid/tests/unittests/ps/ps_dnn_trainer.py @@ -13,7 +13,12 @@ # limitations under the License. import paddle.distributed.fleet.base.role_maker as role_maker -from paddle.distributed.ps.utils.ps_program_builder import debug_program, logger, new_pass, ps_log_root_dir +from paddle.distributed.ps.utils.ps_program_builder import ( + debug_program, + logger, + new_pass, + ps_log_root_dir, +) import paddle.distributed.fleet as fleet import argparse import sys @@ -42,7 +47,6 @@ def is_distributed_env(): class YamlHelper(object): - def load_yaml(self, yaml_file, other_part=None): part_list = ["runner", "hyper_parameters"] if other_part: @@ -114,7 +118,8 @@ class YamlHelper(object): max_k = max(max_k, len(k)) h_format = " " + "|{{:>{}s}}{}{{:^{}s}}|\n".format( - max_k, " " * spacing, max_v) + max_k, " " * spacing, max_v + ) l_format = " " + "|{{:>{}s}}{{}}{{:^{}s}}|\n".format(max_k, max_v) length = max_k + max_v + spacing @@ -150,7 +155,7 @@ def get_user_defined_strategy(config): logger.warn( "Not Find Distributed env, Change To local train mode. If you want train with fleet, please use [fleetrun] command." ) - #return None + # return None sync_mode = config.get("runner.sync_mode") assert sync_mode in ["async", "sync", "geo", "heter", "gpubox"] if sync_mode == "sync": @@ -159,8 +164,9 @@ def get_user_defined_strategy(config): elif sync_mode == "async": strategy = paddle.distributed.fleet.DistributedStrategy() strategy.a_sync = True - strategy.is_fl_ps_mode = True if config.get( - "runner.is_fl_ps_mode") == 1 else False + strategy.is_fl_ps_mode = ( + True if config.get("runner.is_fl_ps_mode") == 1 else False + ) if strategy.is_fl_ps_mode == True: strategy.pipeline = False micro_num = 1 @@ -191,7 +197,7 @@ def get_user_defined_strategy(config): "dump_param": config.get("runner.dump_param", []), "stat_var_names": config.get("stat_var_names", []), "local_sparse": config.get("runner.local_sparse", []), - "remote_sparse": config.get("runner.remote_sparse", []) + "remote_sparse": config.get("runner.remote_sparse", []), } print("strategy:", strategy.trainer_desc_configs) @@ -200,7 +206,7 @@ def get_user_defined_strategy(config): "uri": config.get("runner.fs_client.uri", ""), "user": config.get("runner.fs_client.user", ""), "passwd": config.get("runner.fs_client.passwd", ""), - "hadoop_bin": config.get("runner.fs_client.hadoop_bin", "hadoop") + "hadoop_bin": config.get("runner.fs_client.hadoop_bin", "hadoop"), } print("strategy:", strategy.fs_client_param) @@ -225,7 +231,9 @@ def get_user_defined_strategy(config): def get_distributed_strategy(user_defined_strategy): # pslib - from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import StrategyFactory + from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import ( + StrategyFactory, + ) k_steps = user_defined_strategy.a_sync_configs["k_steps"] strategy = None @@ -254,45 +262,38 @@ def get_model(config): def parse_args(): parser = argparse.ArgumentParser("PsTest train script") - parser.add_argument('-m', - '--config_yaml', - type=str, - required=True, - help='config file path') - parser.add_argument('-bf16', - '--pure_bf16', - type=ast.literal_eval, - default=False, - help="whether use bf16") - - parser.add_argument('--run_minimize', - type=int, - default=0, - help="test single pass") - parser.add_argument('--run_single_pass', - type=int, - default=0, - help="test single pass") - parser.add_argument('--run_the_one_ps', - type=int, - default=0, - help="test the_one_ps") - parser.add_argument('--debug_new_minimize', - type=int, - default=0, - help="test single pass") - parser.add_argument('--debug_new_pass', - type=int, - default=0, - help="test single pass") - parser.add_argument('--applied_pass_name', - type=str, - default="", - help="test single pass") - parser.add_argument('--debug_the_one_ps', - type=int, - default=0, - help="test the_one_ps") + parser.add_argument( + '-m', '--config_yaml', type=str, required=True, help='config file path' + ) + parser.add_argument( + '-bf16', + '--pure_bf16', + type=ast.literal_eval, + default=False, + help="whether use bf16", + ) + + parser.add_argument( + '--run_minimize', type=int, default=0, help="test single pass" + ) + parser.add_argument( + '--run_single_pass', type=int, default=0, help="test single pass" + ) + parser.add_argument( + '--run_the_one_ps', type=int, default=0, help="test the_one_ps" + ) + parser.add_argument( + '--debug_new_minimize', type=int, default=0, help="test single pass" + ) + parser.add_argument( + '--debug_new_pass', type=int, default=0, help="test single pass" + ) + parser.add_argument( + '--applied_pass_name', type=str, default="", help="test single pass" + ) + parser.add_argument( + '--debug_the_one_ps', type=int, default=0, help="test the_one_ps" + ) args = parser.parse_args() args.abs_dir = os.path.dirname(os.path.abspath(args.config_yaml)) @@ -317,7 +318,6 @@ def bf16_to_fp32(val): class DnnTrainer(object): - def __init__(self, config): self.metrics = {} self.config = config @@ -351,36 +351,59 @@ class DnnTrainer(object): loss = self.model._cost user_defined_strategy = get_user_defined_strategy(self.config) learning_rate = self.config.get( - "hyper_parameters.optimizer.learning_rate") + "hyper_parameters.optimizer.learning_rate" + ) sync_mode = self.config.get("runner.sync_mode") inner_optimizer = paddle.optimizer.Adam(learning_rate, lazy_mode=True) self.role_maker._generate_role() # 必要 if self.config['debug_new_minimize'] == 1: print("entering run_minimize -- new") - from paddle.distributed.fleet.meta_optimizers.ps_optimizer import ParameterServerOptimizer + from paddle.distributed.fleet.meta_optimizers.ps_optimizer import ( + ParameterServerOptimizer, + ) + ps_optimizer = ParameterServerOptimizer(inner_optimizer) - ps_optimizer._set_basic_info(loss, self.role_maker, inner_optimizer, - user_defined_strategy) + ps_optimizer._set_basic_info( + loss, self.role_maker, inner_optimizer, user_defined_strategy + ) ps_optimizer.minimize_impl(loss) else: print("entering run_minimize -- old") fleet_obj = fleet.distributed_optimizer( - inner_optimizer, user_defined_strategy) ## Fleet 对象 + inner_optimizer, user_defined_strategy + ) ## Fleet 对象 fleet_obj.minimize(loss) if fleet.is_server(): - _main_file = ps_log_root_dir + sync_mode + '_run_minimize' + '_debug:_' + str( - self.config['debug_new_minimize']) + '_server_main.prototxt' + _main_file = ( + ps_log_root_dir + + sync_mode + + '_run_minimize' + + '_debug:_' + + str(self.config['debug_new_minimize']) + + '_server_main.prototxt' + ) debug_program(_main_file, loss.block.program) elif fleet.is_worker(): - _main_file = ps_log_root_dir + sync_mode + '_run_minimize' + '_debug:_' + str( - self.config['debug_new_minimize']) + '_worker_main.prototxt' + _main_file = ( + ps_log_root_dir + + sync_mode + + '_run_minimize' + + '_debug:_' + + str(self.config['debug_new_minimize']) + + '_worker_main.prototxt' + ) debug_program(_main_file, loss.block.program) elif self.role_maker._is_heter_worker(): - _main_file = ps_log_root_dir + sync_mode + '_run_minimize' + '_debug:_' + str( - self.config['debug_new_minimize'] - ) + '_heter_worker_main.prototxt' + _main_file = ( + ps_log_root_dir + + sync_mode + + '_run_minimize' + + '_debug:_' + + str(self.config['debug_new_minimize']) + + '_heter_worker_main.prototxt' + ) debug_program(_main_file, loss.block.program) def run_single_pass(self): @@ -396,42 +419,70 @@ class DnnTrainer(object): startup_program = paddle.static.default_startup_program() inner_optimizer.minimize(loss, startup_program) if self.config['debug_new_pass'] == 1: - print("entering run {} - new".format( - str(config["applied_pass_name"]))) - from paddle.distributed.fleet.meta_optimizers.ps_optimizer import ParameterServerOptimizer + print( + "entering run {} - new".format(str(config["applied_pass_name"])) + ) + from paddle.distributed.fleet.meta_optimizers.ps_optimizer import ( + ParameterServerOptimizer, + ) + ps_optimizer = ParameterServerOptimizer(inner_optimizer) - ps_optimizer._set_basic_info(loss, self.role_maker, inner_optimizer, - user_defined_strategy) + ps_optimizer._set_basic_info( + loss, self.role_maker, inner_optimizer, user_defined_strategy + ) ps_optimizer._set_origin_programs([loss]) ps_optimizer._init_ps_pass_context(loss, startup_program) _main = ps_optimizer.pass_ctx._attrs['cloned_main'] - append_send_ops_pass = new_pass(config["applied_pass_name"], - ps_optimizer.pass_ctx._attrs) + append_send_ops_pass = new_pass( + config["applied_pass_name"], ps_optimizer.pass_ctx._attrs + ) append_send_ops_pass.apply([_main], [None], ps_optimizer.pass_ctx) else: - print("entering run {} - old".format( - str(config["applied_pass_name"]))) - from paddle.fluid.incubate.fleet.parameter_server.ir import public as public + print( + "entering run {} - old".format(str(config["applied_pass_name"])) + ) + from paddle.fluid.incubate.fleet.parameter_server.ir import ( + public as public, + ) + dist_strategy = get_distributed_strategy(user_defined_strategy) compiled_config = public.CompileTimeStrategy( - loss.block.program, startup_program, dist_strategy, - self.role_maker) + loss.block.program, + startup_program, + dist_strategy, + self.role_maker, + ) _main = compiled_config.origin_main_program.clone() _startup = compiled_config.origin_startup_program.clone() - from paddle.fluid.incubate.fleet.parameter_server.ir import trainer_pass as worker + from paddle.fluid.incubate.fleet.parameter_server.ir import ( + trainer_pass as worker, + ) + _main = worker.append_send_ops_pass(_main, compiled_config) if fleet.is_server(): - _main_file = ps_log_root_dir + sync_mode + "_" + str( - config["applied_pass_name"]) + '_debug:_' + str( - self.config['debug_new_pass']) + '_server_main.prototxt' + _main_file = ( + ps_log_root_dir + + sync_mode + + "_" + + str(config["applied_pass_name"]) + + '_debug:_' + + str(self.config['debug_new_pass']) + + '_server_main.prototxt' + ) debug_program(_main_file, _main) elif fleet.is_worker(): - _main_file = ps_log_root_dir + sync_mode + "_" + str( - config["applied_pass_name"]) + '_debug:_' + str( - self.config['debug_new_pass']) + '_worker_main.prototxt' + _main_file = ( + ps_log_root_dir + + sync_mode + + "_" + + str(config["applied_pass_name"]) + + '_debug:_' + + str(self.config['debug_new_pass']) + + '_worker_main.prototxt' + ) debug_program(_main_file, _main) def run_the_one_ps(self): @@ -442,7 +493,8 @@ class DnnTrainer(object): loss = self.model._cost user_defined_strategy = get_user_defined_strategy(self.config) learning_rate = self.config.get( - "hyper_parameters.optimizer.learning_rate") + "hyper_parameters.optimizer.learning_rate" + ) sync_mode = self.config.get("runner.sync_mode") inner_optimizer = paddle.optimizer.Adam(learning_rate, lazy_mode=True) @@ -450,28 +502,37 @@ class DnnTrainer(object): if self.config['debug_the_one_ps'] == 1: print("entering run_the_one_ps -- new") - from paddle.distributed.fleet.meta_optimizers.ps_optimizer import ParameterServerOptimizer + from paddle.distributed.fleet.meta_optimizers.ps_optimizer import ( + ParameterServerOptimizer, + ) + ps_optimizer = ParameterServerOptimizer(inner_optimizer) - ps_optimizer._set_basic_info(loss, self.role_maker, inner_optimizer, - user_defined_strategy) + ps_optimizer._set_basic_info( + loss, self.role_maker, inner_optimizer, user_defined_strategy + ) ps_optimizer.minimize_impl(loss) from paddle.distributed.ps.the_one_ps import TheOnePSRuntime + _runtime_handle = TheOnePSRuntime() # ps 目录下重构版的 TheOnePSRuntime _runtime_handle._set_basic_info(ps_optimizer.pass_ctx._attrs) if fleet.is_worker(): - worker_desc = _runtime_handle.ps_desc_builder.build_worker_desc( + worker_desc = ( + _runtime_handle.ps_desc_builder.build_worker_desc() ) with open( - ps_log_root_dir + sync_mode + '_' + - 'new_worker_ps_desc', 'w') as f: + ps_log_root_dir + sync_mode + '_' + 'new_worker_ps_desc', + 'w', + ) as f: f.write(worker_desc) if fleet.is_server(): - server_desc = _runtime_handle.ps_desc_builder.build_server_desc( + server_desc = ( + _runtime_handle.ps_desc_builder.build_server_desc() ) with open( - ps_log_root_dir + sync_mode + '_' + - 'new_server_ps_desc', 'w') as f: + ps_log_root_dir + sync_mode + '_' + 'new_server_ps_desc', + 'w', + ) as f: f.write(server_desc) else: @@ -492,16 +553,34 @@ class DnnTrainer(object): f.write(str(server_desc) + str(fleet_obj._runtime_handle._get_fs_client_desc().to_string())) ''' if fleet.is_server(): - _main_file = ps_log_root_dir + sync_mode + '_run_the_one_ps' + '_debug:_' + str( - self.config['debug_the_one_ps']) + '_server_main.prototxt' + _main_file = ( + ps_log_root_dir + + sync_mode + + '_run_the_one_ps' + + '_debug:_' + + str(self.config['debug_the_one_ps']) + + '_server_main.prototxt' + ) debug_program(_main_file, loss.block.program) elif fleet.is_worker(): - _main_file = ps_log_root_dir + sync_mode + '_run_the_one_ps' + '_debug:_' + str( - self.config['debug_the_one_ps']) + '_worker_main.prototxt' + _main_file = ( + ps_log_root_dir + + sync_mode + + '_run_the_one_ps' + + '_debug:_' + + str(self.config['debug_the_one_ps']) + + '_worker_main.prototxt' + ) debug_program(_main_file, loss.block.program) elif self.role_maker._is_heter_worker(): - _main_file = ps_log_root_dir + sync_mode + '_run_the_one_ps' + '_debug:_' + str( - self.config['debug_the_one_ps']) + '_heter_worker_main.prototxt' + _main_file = ( + ps_log_root_dir + + sync_mode + + '_run_the_one_ps' + + '_debug:_' + + str(self.config['debug_the_one_ps']) + + '_heter_worker_main.prototxt' + ) debug_program(_main_file, loss.block.program) diff --git a/python/paddle/fluid/tests/unittests/ps/test_fl_ps.py b/python/paddle/fluid/tests/unittests/ps/test_fl_ps.py index 2726d7c10cdbe19984a84b73a1bd0d558cf7d5b6..dc315b7df29ad1feafcd1700c92f76d5f87f0251 100755 --- a/python/paddle/fluid/tests/unittests/ps/test_fl_ps.py +++ b/python/paddle/fluid/tests/unittests/ps/test_fl_ps.py @@ -16,12 +16,13 @@ import unittest import shlex # noqa: F401 -from paddle.fluid.tests.unittests.distributed_passes.dist_pass_test_base import remove_path_if_exists # noqa: F401 +from paddle.fluid.tests.unittests.distributed_passes.dist_pass_test_base import ( + remove_path_if_exists, +) # noqa: F401 import os class FlPsTest(unittest.TestCase): - def test_launch_fl_ps(self): ''' cmd = [ diff --git a/python/paddle/fluid/tests/unittests/ps/test_the_one_ps.py b/python/paddle/fluid/tests/unittests/ps/test_the_one_ps.py index b561d82fc5f30c647e627abbdb870ff09a435907..c9d0c9fc943dcb8c7e2ae3accaff1257e4377957 100755 --- a/python/paddle/fluid/tests/unittests/ps/test_the_one_ps.py +++ b/python/paddle/fluid/tests/unittests/ps/test_the_one_ps.py @@ -14,14 +14,16 @@ import unittest -from paddle.fluid.tests.unittests.distributed_passes.ps_pass_test_base import PsPassTestBase, remove_path_if_exists +from paddle.fluid.tests.unittests.distributed_passes.ps_pass_test_base import ( + PsPassTestBase, + remove_path_if_exists, +) from paddle.distributed.ps.utils.public import logger, ps_log_root_dir import paddle.distributed.fleet.proto.the_one_ps_pb2 as ps_pb2 # noqa: F401 from google.protobuf import text_format # noqa: F401 class TestTheOnePs(PsPassTestBase): - def setUp(self): pass @@ -56,14 +58,16 @@ class TestTheOnePs(PsPassTestBase): self.config['run_the_one_ps'] = '1' self.config['debug_the_one_ps'] = '0' - self.config[ - 'log_dir'] = ps_log_root_dir + "async_cpu_log_old_the_one_ps" + self.config['log_dir'] = ( + ps_log_root_dir + "async_cpu_log_old_the_one_ps" + ) remove_path_if_exists(self.config['log_dir']) self.ps_launch() self.config['debug_the_one_ps'] = '1' - self.config[ - 'log_dir'] = ps_log_root_dir + "async_cpu_log_new_the_one_ps" + self.config['log_dir'] = ( + ps_log_root_dir + "async_cpu_log_new_the_one_ps" + ) remove_path_if_exists(self.config['log_dir']) self.ps_launch() diff --git a/python/paddle/fluid/tests/unittests/ps_dnn_model.py b/python/paddle/fluid/tests/unittests/ps_dnn_model.py index 89472c73bd31dde5c27c111ba7f241a5a93d6510..c2775fbca312577e840e08fc653b4f691c6854f7 100755 --- a/python/paddle/fluid/tests/unittests/ps_dnn_model.py +++ b/python/paddle/fluid/tests/unittests/ps_dnn_model.py @@ -18,14 +18,15 @@ import math class DNNLayer(nn.Layer): - - def __init__(self, - sparse_feature_number, - sparse_feature_dim, - dense_feature_dim, - num_field, - layer_sizes, - sync_mode=None): + def __init__( + self, + sparse_feature_number, + sparse_feature_dim, + dense_feature_dim, + num_field, + layer_sizes, + sync_mode=None, + ): super(DNNLayer, self).__init__() self.sync_mode = sync_mode self.sparse_feature_number = sparse_feature_number @@ -40,10 +41,15 @@ class DNNLayer(nn.Layer): sparse=True, weight_attr=paddle.ParamAttr( name="SparseFeatFactors", - initializer=paddle.nn.initializer.Uniform())) - - sizes = [sparse_feature_dim * num_field + dense_feature_dim - ] + self.layer_sizes + [2] + initializer=paddle.nn.initializer.Uniform(), + ), + ) + + sizes = ( + [sparse_feature_dim * num_field + dense_feature_dim] + + self.layer_sizes + + [2] + ) acts = ["relu" for _ in range(len(self.layer_sizes))] + [None] self._mlp_layers = [] for i in range(len(layer_sizes) + 1): @@ -52,7 +58,10 @@ class DNNLayer(nn.Layer): out_features=sizes[i + 1], weight_attr=paddle.ParamAttr( initializer=paddle.nn.initializer.Normal( - std=1.0 / math.sqrt(sizes[i])))) + std=1.0 / math.sqrt(sizes[i]) + ) + ), + ) self.add_sublayer('linear_%d' % i, linear) self._mlp_layers.append(linear) if acts[i] == 'relu': @@ -68,7 +77,8 @@ class DNNLayer(nn.Layer): emb = paddle.fluid.contrib.sparse_embedding( input=s_input, size=[self.sparse_feature_number, self.sparse_feature_dim], - param_attr=paddle.ParamAttr(name="embedding")) + param_attr=paddle.ParamAttr(name="embedding"), + ) else: emb = self.embedding(s_input) emb = paddle.reshape(emb, shape=[-1, self.sparse_feature_dim]) @@ -89,13 +99,14 @@ class DNNLayer(nn.Layer): class FlDNNLayer(nn.Layer): - - def __init__(self, - sparse_feature_number, - sparse_feature_dim, - dense_feature_dim, - sparse_number, - sync_mode=None): + def __init__( + self, + sparse_feature_number, + sparse_feature_dim, + dense_feature_dim, + sparse_number, + sync_mode=None, + ): super(FlDNNLayer, self).__init__() self.PART_A_DEVICE_FlAG = 'gpu:0' @@ -109,8 +120,11 @@ class FlDNNLayer(nn.Layer): self.slot_num = sparse_number self.dense_feature_dim = dense_feature_dim - layer_sizes_a = [self.slot_num * self.sparse_feature_dim, 5, - 7] # for test + layer_sizes_a = [ + self.slot_num * self.sparse_feature_dim, + 5, + 7, + ] # for test layer_sizes_b = [self.dense_feature_dim, 6, 7] layer_sizes_top = [7, 2] @@ -120,7 +134,9 @@ class FlDNNLayer(nn.Layer): sparse=True, weight_attr=paddle.ParamAttr( name="SparseFeatFactors", - initializer=paddle.nn.initializer.Uniform())) + initializer=paddle.nn.initializer.Uniform(), + ), + ) # part_a fc acts = ["relu" for _ in range(len(layer_sizes_a))] @@ -131,7 +147,10 @@ class FlDNNLayer(nn.Layer): out_features=layer_sizes_a[i + 1], weight_attr=paddle.ParamAttr( initializer=paddle.nn.initializer.Normal( - std=1.0 / math.sqrt(layer_sizes_a[i])))) + std=1.0 / math.sqrt(layer_sizes_a[i]) + ) + ), + ) self.add_sublayer('linear_%d' % i, linear) self._mlp_layers_a.append(linear) act = paddle.nn.ReLU() @@ -147,7 +166,10 @@ class FlDNNLayer(nn.Layer): out_features=layer_sizes_b[i + 1], weight_attr=paddle.ParamAttr( initializer=paddle.nn.initializer.Normal( - std=1.0 / math.sqrt(layer_sizes_b[i])))) + std=1.0 / math.sqrt(layer_sizes_b[i]) + ) + ), + ) self.add_sublayer('linear_%d' % i, linear) self._mlp_layers_b.append(linear) act = paddle.nn.ReLU() @@ -163,7 +185,10 @@ class FlDNNLayer(nn.Layer): out_features=layer_sizes_top[i + 1], weight_attr=paddle.ParamAttr( initializer=paddle.nn.initializer.Normal( - std=1.0 / math.sqrt(layer_sizes_top[i])))) + std=1.0 / math.sqrt(layer_sizes_top[i]) + ) + ), + ) self.add_sublayer('linear_%d' % i, linear) self._mlp_layers_top.append(linear) act = paddle.nn.ReLU() @@ -184,7 +209,8 @@ class FlDNNLayer(nn.Layer): y = self._mlp_layers_a[2](y) with paddle.fluid.device_guard( - self.PART_A_JOINT_OP_DEVICE_FlAG): # joint point + self.PART_A_JOINT_OP_DEVICE_FlAG + ): # joint point bottom_a = self._mlp_layers_a[3](y) return bottom_a @@ -201,9 +227,11 @@ class FlDNNLayer(nn.Layer): def interactive_layer(self, bottom_a, bottom_b): with paddle.fluid.device_guard( - self.PART_B_JOINT_OP_DEVICE_FlAG): # joint point + self.PART_B_JOINT_OP_DEVICE_FlAG + ): # joint point interactive = paddle.fluid.layers.elementwise_add( - bottom_a, bottom_b) + bottom_a, bottom_b + ) return interactive def top_layer(self, interactive, label_input): @@ -211,16 +239,25 @@ class FlDNNLayer(nn.Layer): y = self._mlp_layers_top[0](interactive) y_top = self._mlp_layers_top[1](y) predict_2d = paddle.nn.functional.softmax(y_top) - auc, batch_auc, [ - self.batch_stat_pos, self.batch_stat_neg, self.stat_pos, - self.stat_neg - ] = paddle.static.auc(input=predict_2d, - label=label_input, - num_thresholds=2**12, - slide_steps=20) - - cost = paddle.nn.functional.cross_entropy(input=y_top, - label=label_input) + ( + auc, + batch_auc, + [ + self.batch_stat_pos, + self.batch_stat_neg, + self.stat_pos, + self.stat_neg, + ], + ) = paddle.static.auc( + input=predict_2d, + label=label_input, + num_thresholds=2**12, + slide_steps=20, + ) + + cost = paddle.nn.functional.cross_entropy( + input=y_top, label=label_input + ) avg_cost = paddle.mean(x=cost) return auc, avg_cost @@ -237,8 +274,7 @@ class FlDNNLayer(nn.Layer): return auc, avg_cost -class StaticModel(): - +class StaticModel: def __init__(self, config): self.cost = None self.infer_target_var = None @@ -254,21 +290,28 @@ class StaticModel(): self.distributed_embedding = True self.sparse_feature_number = self.config.get( - "hyper_parameters.sparse_feature_number") + "hyper_parameters.sparse_feature_number" + ) self.sparse_feature_dim = self.config.get( - "hyper_parameters.sparse_feature_dim") + "hyper_parameters.sparse_feature_dim" + ) self.sparse_inputs_slots = self.config.get( - "hyper_parameters.sparse_inputs_slots") + "hyper_parameters.sparse_inputs_slots" + ) self.dense_input_dim = self.config.get( - "hyper_parameters.dense_input_dim") + "hyper_parameters.dense_input_dim" + ) self.learning_rate = self.config.get( - "hyper_parameters.optimizer.learning_rate") + "hyper_parameters.optimizer.learning_rate" + ) self.fc_sizes = self.config.get("hyper_parameters.fc_sizes") def create_feeds(self, is_infer=False): - dense_input = paddle.static.data(name="dense_input", - shape=[None, self.dense_input_dim], - dtype="float32") + dense_input = paddle.static.data( + name="dense_input", + shape=[None, self.dense_input_dim], + dtype="float32", + ) sparse_input_ids = [ paddle.static.data(name=str(i), shape=[None, 1], dtype="int64") @@ -282,33 +325,44 @@ class StaticModel(): def net(self, input, is_infer=False): self.label_input = input[0] - self.sparse_inputs = input[1:self.sparse_inputs_slots] + self.sparse_inputs = input[1 : self.sparse_inputs_slots] self.dense_input = input[-1] sparse_number = self.sparse_inputs_slots - 1 - dnn_model = DNNLayer(self.sparse_feature_number, - self.sparse_feature_dim, - self.dense_input_dim, - sparse_number, - self.fc_sizes, - sync_mode=self.sync_mode) + dnn_model = DNNLayer( + self.sparse_feature_number, + self.sparse_feature_dim, + self.dense_input_dim, + sparse_number, + self.fc_sizes, + sync_mode=self.sync_mode, + ) raw_predict_2d = dnn_model.forward(self.sparse_inputs, self.dense_input) predict_2d = paddle.nn.functional.softmax(raw_predict_2d) self.predict = predict_2d - auc, batch_auc, [ - self.batch_stat_pos, self.batch_stat_neg, self.stat_pos, - self.stat_neg - ] = paddle.static.auc(input=self.predict, - label=self.label_input, - num_thresholds=2**12, - slide_steps=20) + ( + auc, + batch_auc, + [ + self.batch_stat_pos, + self.batch_stat_neg, + self.stat_pos, + self.stat_neg, + ], + ) = paddle.static.auc( + input=self.predict, + label=self.label_input, + num_thresholds=2**12, + slide_steps=20, + ) self.inference_target_var = auc if is_infer: fetch_dict = {'auc': auc} return fetch_dict - cost = paddle.nn.functional.cross_entropy(input=raw_predict_2d, - label=self.label_input) + cost = paddle.nn.functional.cross_entropy( + input=raw_predict_2d, label=self.label_input + ) avg_cost = paddle.mean(x=cost) self._cost = avg_cost @@ -317,18 +371,21 @@ class StaticModel(): def fl_net(self, input, is_infer=False): self.label_input = input[0] - self.sparse_inputs = input[1:self.sparse_inputs_slots] + self.sparse_inputs = input[1 : self.sparse_inputs_slots] self.dense_input = input[-1] self.sparse_number = self.sparse_inputs_slots - 1 - fl_dnn_model = FlDNNLayer(self.sparse_feature_number, - self.sparse_feature_dim, - self.dense_input_dim, - self.sparse_number, - sync_mode=self.sync_mode) - - auc, avg_cost = fl_dnn_model.forward(self.sparse_inputs, - self.dense_input, self.label_input) + fl_dnn_model = FlDNNLayer( + self.sparse_feature_number, + self.sparse_feature_dim, + self.dense_input_dim, + self.sparse_number, + sync_mode=self.sync_mode, + ) + + auc, avg_cost = fl_dnn_model.forward( + self.sparse_inputs, self.dense_input, self.label_input + ) fetch_dict = {'cost': avg_cost, 'auc': auc} self._cost = avg_cost return fetch_dict diff --git a/python/paddle/fluid/tests/unittests/py_precise_roi_pool.py b/python/paddle/fluid/tests/unittests/py_precise_roi_pool.py index 29721c86200aa47c59d2b1d5dda01e8a3d09e994..e18c302fd7b41a551028510db983236140ff942c 100644 --- a/python/paddle/fluid/tests/unittests/py_precise_roi_pool.py +++ b/python/paddle/fluid/tests/unittests/py_precise_roi_pool.py @@ -17,7 +17,6 @@ import numpy as np class PyPrRoIPool(object): - def __init__(self): pass @@ -28,50 +27,65 @@ class PyPrRoIPool(object): else: return data[h][w] - def _PrRoIPoolingMatCalculation(self, this_data, s_h, s_w, e_h, e_w, y0, x0, - y1, x1, h0, w0): + def _PrRoIPoolingMatCalculation( + self, this_data, s_h, s_w, e_h, e_w, y0, x0, y1, x1, h0, w0 + ): sum_out = 0.0 alpha = x0 - float(s_w) beta = y0 - float(s_h) lim_alpha = x1 - float(s_w) lim_beta = y1 - float(s_h) - tmp = (lim_alpha - 0.5 * lim_alpha * lim_alpha - alpha + - 0.5 * alpha * alpha) * (lim_beta - 0.5 * lim_beta * lim_beta - - beta + 0.5 * beta * beta) + tmp = ( + lim_alpha + - 0.5 * lim_alpha * lim_alpha + - alpha + + 0.5 * alpha * alpha + ) * (lim_beta - 0.5 * lim_beta * lim_beta - beta + 0.5 * beta * beta) sum_out += self._PrRoIPoolingGetData(this_data, s_h, s_w, h0, w0) * tmp alpha = float(e_w) - x1 lim_alpha = float(e_w) - x0 - tmp = (lim_alpha - 0.5 * lim_alpha * lim_alpha - alpha + - 0.5 * alpha * alpha) * (lim_beta - 0.5 * lim_beta * lim_beta - - beta + 0.5 * beta * beta) + tmp = ( + lim_alpha + - 0.5 * lim_alpha * lim_alpha + - alpha + + 0.5 * alpha * alpha + ) * (lim_beta - 0.5 * lim_beta * lim_beta - beta + 0.5 * beta * beta) sum_out += self._PrRoIPoolingGetData(this_data, s_h, e_w, h0, w0) * tmp alpha = x0 - float(s_w) beta = float(e_h) - y1 lim_alpha = x1 - float(s_w) lim_beta = float(e_h) - y0 - tmp = (lim_alpha - 0.5 * lim_alpha * lim_alpha - alpha + - 0.5 * alpha * alpha) * (lim_beta - 0.5 * lim_beta * lim_beta - - beta + 0.5 * beta * beta) + tmp = ( + lim_alpha + - 0.5 * lim_alpha * lim_alpha + - alpha + + 0.5 * alpha * alpha + ) * (lim_beta - 0.5 * lim_beta * lim_beta - beta + 0.5 * beta * beta) sum_out += self._PrRoIPoolingGetData(this_data, e_h, s_w, h0, w0) * tmp alpha = float(e_w) - x1 lim_alpha = float(e_w) - x0 - tmp = (lim_alpha - 0.5 * lim_alpha * lim_alpha - alpha + - 0.5 * alpha * alpha) * (lim_beta - 0.5 * lim_beta * lim_beta - - beta + 0.5 * beta * beta) + tmp = ( + lim_alpha + - 0.5 * lim_alpha * lim_alpha + - alpha + + 0.5 * alpha * alpha + ) * (lim_beta - 0.5 * lim_beta * lim_beta - beta + 0.5 * beta * beta) sum_out += self._PrRoIPoolingGetData(this_data, e_h, e_w, h0, w0) * tmp return sum_out - def compute(self, - x, - rois, - output_channels, - spatial_scale=0.1, - pooled_height=1, - pooled_width=1): + def compute( + self, + x, + rois, + output_channels, + spatial_scale=0.1, + pooled_height=1, + pooled_width=1, + ): ''' calculate the precise roi pooling values Note: This function is implements as pure python without any paddle concept involved @@ -134,14 +148,18 @@ class PyPrRoIPool(object): for w_iter in range(int(s_w), int(e_w)): for h_iter in range(int(s_h), int(e_h)): sum_out += self._PrRoIPoolingMatCalculation( - x_i[c_in], h_iter, w_iter, h_iter + 1, + x_i[c_in], + h_iter, + w_iter, + h_iter + 1, w_iter + 1, max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)), - min(win_end_h, - float(h_iter) + 1.0), - min(win_end_w, - float(w_iter + 1.0)), height, width) + min(win_end_h, float(h_iter) + 1.0), + min(win_end_w, float(w_iter + 1.0)), + height, + width, + ) out_data[i, c, ph, pw] = sum_out / win_size diff --git a/python/paddle/fluid/tests/unittests/rnn/convert.py b/python/paddle/fluid/tests/unittests/rnn/convert.py index b26db11e1d34ff107286c18921ea872bb71c9505..c41f80c7eabbf09b6486a038bf94363c8e990877 100644 --- a/python/paddle/fluid/tests/unittests/rnn/convert.py +++ b/python/paddle/fluid/tests/unittests/rnn/convert.py @@ -41,22 +41,28 @@ def convert_params_for_net(np_net, paddle_net): def convert_params_for_net_static(np_net, paddle_net, place): for np_layer, paddle_layer in zip(np_net, paddle_net): if hasattr(np_layer, "cell"): - convert_params_for_cell_static(np_layer.cell, paddle_layer.cell, - place) + convert_params_for_cell_static( + np_layer.cell, paddle_layer.cell, place + ) else: - convert_params_for_cell_static(np_layer.cell_fw, - paddle_layer.cell_fw, place) - convert_params_for_cell_static(np_layer.cell_bw, - paddle_layer.cell_bw, place) + convert_params_for_cell_static( + np_layer.cell_fw, paddle_layer.cell_fw, place + ) + convert_params_for_cell_static( + np_layer.cell_bw, paddle_layer.cell_bw, place + ) def get_params_for_cell(np_cell, num_layers, idx): state = np_cell.parameters - weight_list = [('{}.weight_{}'.format(num_layers, idx), state['weight_ih']), - ('{}.weight_{}'.format(num_layers, - idx + 1), state['weight_hh'])] - bias_list = [('{}.bias_{}'.format(num_layers, idx), state['bias_ih']), - ('{}.bias_{}'.format(num_layers, idx + 1), state['bias_hh'])] + weight_list = [ + ('{}.weight_{}'.format(num_layers, idx), state['weight_ih']), + ('{}.weight_{}'.format(num_layers, idx + 1), state['weight_hh']), + ] + bias_list = [ + ('{}.bias_{}'.format(num_layers, idx), state['bias_ih']), + ('{}.bias_{}'.format(num_layers, idx + 1), state['bias_hh']), + ] return weight_list, bias_list diff --git a/python/paddle/fluid/tests/unittests/rnn/rnn_numpy.py b/python/paddle/fluid/tests/unittests/rnn/rnn_numpy.py index fbdc3ec8a4854fe4edef859c407510059cbd73b6..20859f896d13fc9b9468349d9df149ee182c9f04 100644 --- a/python/paddle/fluid/tests/unittests/rnn/rnn_numpy.py +++ b/python/paddle/fluid/tests/unittests/rnn/rnn_numpy.py @@ -17,13 +17,11 @@ import math class LayerMixin(object): - def __call__(self, *args, **kwargs): return self.forward(*args, **kwargs) class LayerListMixin(LayerMixin): - def __init__(self, layers=None): self._layers = list(layers) if layers else [] @@ -35,34 +33,39 @@ class LayerListMixin(LayerMixin): class SimpleRNNCell(LayerMixin): - - def __init__(self, - input_size, - hidden_size, - bias=True, - nonlinearity="RNN_TANH", - dtype="float64"): + def __init__( + self, + input_size, + hidden_size, + bias=True, + nonlinearity="RNN_TANH", + dtype="float64", + ): self.input_size = input_size self.hidden_size = hidden_size self.bias = bias if nonlinearity == 'RNN_TANH': self.nonlinearity = np.tanh else: - self.nonlinearity = lambda x: np.maximum(x, 0.) + self.nonlinearity = lambda x: np.maximum(x, 0.0) self.parameters = dict() std = 1.0 / math.sqrt(hidden_size) self.weight_ih = np.random.uniform( - -std, std, (hidden_size, input_size)).astype(dtype) + -std, std, (hidden_size, input_size) + ).astype(dtype) self.weight_hh = np.random.uniform( - -std, std, (hidden_size, hidden_size)).astype(dtype) + -std, std, (hidden_size, hidden_size) + ).astype(dtype) self.parameters['weight_ih'] = self.weight_ih self.parameters['weight_hh'] = self.weight_hh if bias: - self.bias_ih = np.random.uniform(-std, std, - (hidden_size, )).astype(dtype) - self.bias_hh = np.random.uniform(-std, std, - (hidden_size, )).astype(dtype) + self.bias_ih = np.random.uniform(-std, std, (hidden_size,)).astype( + dtype + ) + self.bias_hh = np.random.uniform(-std, std, (hidden_size,)).astype( + dtype + ) self.parameters['bias_ih'] = self.bias_ih self.parameters['bias_hh'] = self.bias_hh else: @@ -88,7 +91,6 @@ class SimpleRNNCell(LayerMixin): class GRUCell(LayerMixin): - def __init__(self, input_size, hidden_size, bias=True, dtype="float64"): self.input_size = input_size self.hidden_size = hidden_size @@ -96,16 +98,20 @@ class GRUCell(LayerMixin): self.parameters = dict() std = 1.0 / math.sqrt(hidden_size) self.weight_ih = np.random.uniform( - -std, std, (3 * hidden_size, input_size)).astype(dtype) + -std, std, (3 * hidden_size, input_size) + ).astype(dtype) self.weight_hh = np.random.uniform( - -std, std, (3 * hidden_size, hidden_size)).astype(dtype) + -std, std, (3 * hidden_size, hidden_size) + ).astype(dtype) self.parameters['weight_ih'] = self.weight_ih self.parameters['weight_hh'] = self.weight_hh if bias: - self.bias_ih = np.random.uniform(-std, std, - (3 * hidden_size)).astype(dtype) - self.bias_hh = np.random.uniform(-std, std, - (3 * hidden_size)).astype(dtype) + self.bias_ih = np.random.uniform( + -std, std, (3 * hidden_size) + ).astype(dtype) + self.bias_hh = np.random.uniform( + -std, std, (3 * hidden_size) + ).astype(dtype) self.parameters['bias_ih'] = self.bias_ih self.parameters['bias_hh'] = self.bias_hh else: @@ -137,7 +143,6 @@ class GRUCell(LayerMixin): class LSTMCell(LayerMixin): - def __init__(self, input_size, hidden_size, bias=True, dtype="float64"): self.input_size = input_size self.hidden_size = hidden_size @@ -145,16 +150,20 @@ class LSTMCell(LayerMixin): self.parameters = dict() std = 1.0 / math.sqrt(hidden_size) self.weight_ih = np.random.uniform( - -std, std, (4 * hidden_size, input_size)).astype(dtype) + -std, std, (4 * hidden_size, input_size) + ).astype(dtype) self.weight_hh = np.random.uniform( - -std, std, (4 * hidden_size, hidden_size)).astype(dtype) + -std, std, (4 * hidden_size, hidden_size) + ).astype(dtype) self.parameters['weight_ih'] = self.weight_ih self.parameters['weight_hh'] = self.weight_hh if bias: - self.bias_ih = np.random.uniform(-std, std, - (4 * hidden_size)).astype(dtype) - self.bias_hh = np.random.uniform(-std, std, - (4 * hidden_size)).astype(dtype) + self.bias_ih = np.random.uniform( + -std, std, (4 * hidden_size) + ).astype(dtype) + self.bias_hh = np.random.uniform( + -std, std, (4 * hidden_size) + ).astype(dtype) self.parameters['bias_ih'] = self.bias_ih self.parameters['bias_hh'] = self.bias_hh else: @@ -204,12 +213,14 @@ def update_state(mask, new, old): return tuple(map(lambda x, y: np.where(mask, x, y), new, old)) -def rnn(cell, - inputs, - initial_states, - sequence_length=None, - time_major=False, - is_reverse=False): +def rnn( + cell, + inputs, + initial_states, + sequence_length=None, + time_major=False, + is_reverse=False, +): if not time_major: inputs = np.transpose(inputs, [1, 0, 2]) if is_reverse: @@ -234,7 +245,7 @@ def rnn(cell, if mask is not None: m_t = mask[t] y, new_state = cell(x_t, state) - y = np.where(m_t, y, 0.) + y = np.where(m_t, y, 0.0) outputs.append(y) state = update_state(m_t, new_state, state) else: @@ -252,25 +263,27 @@ def rnn(cell, return outputs, final_state -def birnn(cell_fw, - cell_bw, - inputs, - initial_states, - sequence_length=None, - time_major=False): +def birnn( + cell_fw, + cell_bw, + inputs, + initial_states, + sequence_length=None, + time_major=False, +): states_fw, states_bw = initial_states - outputs_fw, states_fw = rnn(cell_fw, - inputs, - states_fw, - sequence_length, - time_major=time_major) - - outputs_bw, states_bw = rnn(cell_bw, - inputs, - states_bw, - sequence_length, - time_major=time_major, - is_reverse=True) + outputs_fw, states_fw = rnn( + cell_fw, inputs, states_fw, sequence_length, time_major=time_major + ) + + outputs_bw, states_bw = rnn( + cell_bw, + inputs, + states_bw, + sequence_length, + time_major=time_major, + is_reverse=True, + ) outputs = np.concatenate((outputs_fw, outputs_bw), -1) final_states = (states_fw, states_bw) @@ -332,7 +345,6 @@ def concat_states(states, bidirectional=False, state_components=1): class RNN(LayerMixin): - def __init__(self, cell, is_reverse=False, time_major=False): super(RNN, self).__init__() self.cell = cell @@ -343,124 +355,135 @@ class RNN(LayerMixin): self.time_major = time_major def forward(self, inputs, initial_states=None, sequence_length=None): - final_outputs, final_states = rnn(self.cell, - inputs, - initial_states=initial_states, - sequence_length=sequence_length, - time_major=self.time_major, - is_reverse=self.is_reverse) + final_outputs, final_states = rnn( + self.cell, + inputs, + initial_states=initial_states, + sequence_length=sequence_length, + time_major=self.time_major, + is_reverse=self.is_reverse, + ) return final_outputs, final_states class BiRNN(LayerMixin): - def __init__(self, cell_fw, cell_bw, time_major=False): super(BiRNN, self).__init__() self.cell_fw = cell_fw self.cell_bw = cell_bw self.time_major = time_major - def forward(self, - inputs, - initial_states=None, - sequence_length=None, - **kwargs): + def forward( + self, inputs, initial_states=None, sequence_length=None, **kwargs + ): if isinstance(initial_states, (list, tuple)): - assert len(initial_states) == 2, \ - "length of initial_states should be 2 when it is a list/tuple" + assert ( + len(initial_states) == 2 + ), "length of initial_states should be 2 when it is a list/tuple" else: initial_states = [initial_states, initial_states] - outputs, final_states = birnn(self.cell_fw, self.cell_bw, inputs, - initial_states, sequence_length, - self.time_major) + outputs, final_states = birnn( + self.cell_fw, + self.cell_bw, + inputs, + initial_states, + sequence_length, + self.time_major, + ) return outputs, final_states class RNNMixin(LayerListMixin): - def forward(self, inputs, initial_states=None, sequence_length=None): batch_index = 1 if self.time_major else 0 batch_size = inputs.shape[batch_index] dtype = inputs.dtype if initial_states is None: - state_shape = (self.num_layers * self.num_directions, batch_size, - self.hidden_size) + state_shape = ( + self.num_layers * self.num_directions, + batch_size, + self.hidden_size, + ) if self.state_components == 1: initial_states = np.zeros(state_shape, dtype) else: - initial_states = tuple([ - np.zeros(state_shape, dtype) - for _ in range(self.state_components) - ]) - - states = split_states(initial_states, self.num_directions == 2, - self.state_components) + initial_states = tuple( + [ + np.zeros(state_shape, dtype) + for _ in range(self.state_components) + ] + ) + + states = split_states( + initial_states, self.num_directions == 2, self.state_components + ) final_states = [] input_temp = inputs for i, rnn_layer in enumerate(self): if i > 0: input_temp = dropout(inputs, self.dropout) - outputs, final_state = rnn_layer(input_temp, states[i], - sequence_length) + outputs, final_state = rnn_layer( + input_temp, states[i], sequence_length + ) final_states.append(final_state) inputs = outputs - final_states = concat_states(final_states, self.num_directions == 2, - self.state_components) + final_states = concat_states( + final_states, self.num_directions == 2, self.state_components + ) return outputs, final_states class SimpleRNN(RNNMixin): - - def __init__(self, - input_size, - hidden_size, - num_layers=1, - nonlinearity="RNN_TANH", - direction="forward", - dropout=0., - time_major=False, - dtype="float64"): + def __init__( + self, + input_size, + hidden_size, + num_layers=1, + nonlinearity="RNN_TANH", + direction="forward", + dropout=0.0, + time_major=False, + dtype="float64", + ): super(SimpleRNN, self).__init__() bidirectional_list = ["bidirectional", "bidirect"] if direction in ["forward"]: is_reverse = False - cell = SimpleRNNCell(input_size, - hidden_size, - nonlinearity=nonlinearity, - dtype=dtype) + cell = SimpleRNNCell( + input_size, hidden_size, nonlinearity=nonlinearity, dtype=dtype + ) self.append(RNN(cell, is_reverse, time_major)) for i in range(1, num_layers): - cell = SimpleRNNCell(hidden_size, - hidden_size, - nonlinearity=nonlinearity, - dtype=dtype) + cell = SimpleRNNCell( + hidden_size, + hidden_size, + nonlinearity=nonlinearity, + dtype=dtype, + ) self.append(RNN(cell, is_reverse, time_major)) elif direction in bidirectional_list: - cell_fw = SimpleRNNCell(input_size, - hidden_size, - nonlinearity=nonlinearity, - dtype=dtype) - cell_bw = SimpleRNNCell(input_size, - hidden_size, - nonlinearity=nonlinearity, - dtype=dtype) + cell_fw = SimpleRNNCell( + input_size, hidden_size, nonlinearity=nonlinearity, dtype=dtype + ) + cell_bw = SimpleRNNCell( + input_size, hidden_size, nonlinearity=nonlinearity, dtype=dtype + ) self.append(BiRNN(cell_fw, cell_bw, time_major)) for i in range(1, num_layers): - cell_fw = SimpleRNNCell(2 * hidden_size, - hidden_size, - nonlinearity, - dtype=dtype) - cell_bw = SimpleRNNCell(2 * hidden_size, - hidden_size, - nonlinearity, - dtype=dtype) + cell_fw = SimpleRNNCell( + 2 * hidden_size, hidden_size, nonlinearity, dtype=dtype + ) + cell_bw = SimpleRNNCell( + 2 * hidden_size, hidden_size, nonlinearity, dtype=dtype + ) self.append(BiRNN(cell_fw, cell_bw, time_major)) else: raise ValueError( "direction should be forward, backward or bidirectional, " - "received direction = {}".format(direction)) + "received direction = {}".format(direction) + ) self.input_size = input_size self.hidden_size = hidden_size @@ -472,15 +495,16 @@ class SimpleRNN(RNNMixin): class LSTM(RNNMixin): - - def __init__(self, - input_size, - hidden_size, - num_layers=1, - direction="forward", - dropout=0., - time_major=False, - dtype="float64"): + def __init__( + self, + input_size, + hidden_size, + num_layers=1, + direction="forward", + dropout=0.0, + time_major=False, + dtype="float64", + ): super(LSTM, self).__init__() bidirectional_list = ["bidirectional", "bidirect"] @@ -502,7 +526,8 @@ class LSTM(RNNMixin): else: raise ValueError( "direction should be forward, backward or bidirectional, " - "received direction = {}".format(direction)) + "received direction = {}".format(direction) + ) self.input_size = input_size self.hidden_size = hidden_size @@ -514,15 +539,16 @@ class LSTM(RNNMixin): class GRU(RNNMixin): - - def __init__(self, - input_size, - hidden_size, - num_layers=1, - direction="forward", - dropout=0., - time_major=False, - dtype="float64"): + def __init__( + self, + input_size, + hidden_size, + num_layers=1, + direction="forward", + dropout=0.0, + time_major=False, + dtype="float64", + ): super(GRU, self).__init__() bidirectional_list = ["bidirectional", "bidirect"] @@ -544,7 +570,8 @@ class GRU(RNNMixin): else: raise ValueError( "direction should be forward, backward or bidirectional, " - "received direction = {}".format(direction)) + "received direction = {}".format(direction) + ) self.input_size = input_size self.hidden_size = hidden_size diff --git a/python/paddle/fluid/tests/unittests/rnn/test_rnn_cells.py b/python/paddle/fluid/tests/unittests/rnn/test_rnn_cells.py index 33dca32b76cd5501052c9416134a7824ea8c61dc..ee2e7cfaa0f9b0a1f165e2ad4df699b41264f202 100644 --- a/python/paddle/fluid/tests/unittests/rnn/test_rnn_cells.py +++ b/python/paddle/fluid/tests/unittests/rnn/test_rnn_cells.py @@ -24,20 +24,19 @@ from convert import convert_params_for_cell class TestSimpleRNNCell(unittest.TestCase): - def __init__(self, bias=True, place="cpu"): super(TestSimpleRNNCell, self).__init__(methodName="runTest") self.bias = bias - self.place = paddle.CPUPlace() if place == "cpu" \ - else paddle.CUDAPlace(0) + self.place = ( + paddle.CPUPlace() if place == "cpu" else paddle.CUDAPlace(0) + ) def setUp(self): paddle.disable_static(self.place) rnn1 = SimpleRNNCell(16, 32, bias=self.bias) - rnn2 = paddle.nn.SimpleRNNCell(16, - 32, - bias_ih_attr=self.bias, - bias_hh_attr=self.bias) + rnn2 = paddle.nn.SimpleRNNCell( + 16, 32, bias_ih_attr=self.bias, bias_hh_attr=self.bias + ) convert_params_for_cell(rnn1, rnn2) self.rnn1 = rnn1 @@ -65,7 +64,6 @@ class TestSimpleRNNCell(unittest.TestCase): np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5) def test_errors(self): - def test_zero_hidden_size(): cell = paddle.nn.SimpleRNNCell(-1, 0) @@ -78,20 +76,19 @@ class TestSimpleRNNCell(unittest.TestCase): class TestGRUCell(unittest.TestCase): - def __init__(self, bias=True, place="cpu"): super(TestGRUCell, self).__init__(methodName="runTest") self.bias = bias - self.place = paddle.CPUPlace() if place == "cpu" \ - else paddle.CUDAPlace(0) + self.place = ( + paddle.CPUPlace() if place == "cpu" else paddle.CUDAPlace(0) + ) def setUp(self): paddle.disable_static(self.place) rnn1 = GRUCell(16, 32, bias=self.bias) - rnn2 = paddle.nn.GRUCell(16, - 32, - bias_ih_attr=self.bias, - bias_hh_attr=self.bias) + rnn2 = paddle.nn.GRUCell( + 16, 32, bias_ih_attr=self.bias, bias_hh_attr=self.bias + ) convert_params_for_cell(rnn1, rnn2) self.rnn1 = rnn1 @@ -119,7 +116,6 @@ class TestGRUCell(unittest.TestCase): np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5) def test_errors(self): - def test_zero_hidden_size(): cell = paddle.nn.GRUCell(-1, 0) @@ -132,19 +128,18 @@ class TestGRUCell(unittest.TestCase): class TestLSTMCell(unittest.TestCase): - def __init__(self, bias=True, place="cpu"): super(TestLSTMCell, self).__init__(methodName="runTest") self.bias = bias - self.place = paddle.CPUPlace() if place == "cpu" \ - else paddle.CUDAPlace(0) + self.place = ( + paddle.CPUPlace() if place == "cpu" else paddle.CUDAPlace(0) + ) def setUp(self): rnn1 = LSTMCell(16, 32, bias=self.bias) - rnn2 = paddle.nn.LSTMCell(16, - 32, - bias_ih_attr=self.bias, - bias_hh_attr=self.bias) + rnn2 = paddle.nn.LSTMCell( + 16, 32, bias_ih_attr=self.bias, bias_hh_attr=self.bias + ) convert_params_for_cell(rnn1, rnn2) self.rnn1 = rnn1 @@ -159,9 +154,10 @@ class TestLSTMCell(unittest.TestCase): prev_c = np.random.randn(4, 32) y1, (h1, c1) = rnn1(x, (prev_h, prev_c)) - y2, (h2, - c2) = rnn2(paddle.to_tensor(x), - (paddle.to_tensor(prev_h), paddle.to_tensor(prev_c))) + y2, (h2, c2) = rnn2( + paddle.to_tensor(x), + (paddle.to_tensor(prev_h), paddle.to_tensor(prev_c)), + ) np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5) np.testing.assert_allclose(c1, c2.numpy(), atol=1e-8, rtol=1e-5) @@ -177,7 +173,6 @@ class TestLSTMCell(unittest.TestCase): np.testing.assert_allclose(c1, c2.numpy(), atol=1e-8, rtol=1e-5) def test_errors(self): - def test_zero_hidden_size(): cell = paddle.nn.LSTMCell(-1, 0) @@ -191,8 +186,9 @@ class TestLSTMCell(unittest.TestCase): def load_tests(loader, tests, pattern): suite = unittest.TestSuite() - devices = ["cpu", "gpu"] if paddle.fluid.is_compiled_with_cuda() \ - else ["cpu"] + devices = ( + ["cpu", "gpu"] if paddle.fluid.is_compiled_with_cuda() else ["cpu"] + ) for bias in [True, False]: for device in devices: for test_class in [TestSimpleRNNCell, TestGRUCell, TestLSTMCell]: diff --git a/python/paddle/fluid/tests/unittests/rnn/test_rnn_cells_static.py b/python/paddle/fluid/tests/unittests/rnn/test_rnn_cells_static.py index b4a5887c593bd7a9995f589acd3bd2b7aaa0fab9..a9e2a013a25d52b8c1b1a689bc8130b75b8c2f2f 100644 --- a/python/paddle/fluid/tests/unittests/rnn/test_rnn_cells_static.py +++ b/python/paddle/fluid/tests/unittests/rnn/test_rnn_cells_static.py @@ -25,12 +25,12 @@ from rnn_numpy import SimpleRNNCell, LSTMCell, GRUCell class TestSimpleRNNCell(unittest.TestCase): - def __init__(self, bias=True, place="cpu"): super(TestSimpleRNNCell, self).__init__(methodName="runTest") self.bias = bias - self.place = paddle.CPUPlace() if place == "cpu" \ - else paddle.CUDAPlace(0) + self.place = ( + paddle.CPUPlace() if place == "cpu" else paddle.CUDAPlace(0) + ) def setUp(self): rnn1 = SimpleRNNCell(16, 32, bias=self.bias) @@ -39,10 +39,9 @@ class TestSimpleRNNCell(unittest.TestCase): sp = paddle.static.Program() with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - rnn2 = paddle.nn.SimpleRNNCell(16, - 32, - bias_ih_attr=self.bias, - bias_hh_attr=self.bias) + rnn2 = paddle.nn.SimpleRNNCell( + 16, 32, bias_ih_attr=self.bias, bias_hh_attr=self.bias + ) place = self.place exe = paddle.static.Executor(place) @@ -75,11 +74,15 @@ class TestSimpleRNNCell(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): x_data = paddle.fluid.data( - "input", [-1, 16], - dtype=paddle.framework.get_default_dtype()) + "input", + [-1, 16], + dtype=paddle.framework.get_default_dtype(), + ) init_h = paddle.fluid.data( - "init_h", [-1, 32], - dtype=paddle.framework.get_default_dtype()) + "init_h", + [-1, 32], + dtype=paddle.framework.get_default_dtype(), + ) y, h = rnn2(x_data, init_h) feed_dict = {x_data.name: x, init_h.name: prev_h} @@ -103,17 +106,18 @@ class TestSimpleRNNCell(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): x_data = paddle.fluid.data( - "input", [-1, 16], - dtype=paddle.framework.get_default_dtype()) + "input", + [-1, 16], + dtype=paddle.framework.get_default_dtype(), + ) y, h = rnn2(x_data) feed_dict = {x_data.name: x} with paddle.static.scope_guard(scope): - y2, h2 = exe.run(mp, - feed=feed_dict, - fetch_list=[y, h], - use_prune=True) + y2, h2 = exe.run( + mp, feed=feed_dict, fetch_list=[y, h], use_prune=True + ) np.testing.assert_allclose(h1, h2, atol=1e-8, rtol=1e-5) @@ -123,12 +127,12 @@ class TestSimpleRNNCell(unittest.TestCase): class TestGRUCell(unittest.TestCase): - def __init__(self, bias=True, place="cpu"): super(TestGRUCell, self).__init__(methodName="runTest") self.bias = bias - self.place = paddle.CPUPlace() if place == "cpu" \ - else paddle.CUDAPlace(0) + self.place = ( + paddle.CPUPlace() if place == "cpu" else paddle.CUDAPlace(0) + ) def setUp(self): rnn1 = GRUCell(16, 32, bias=self.bias) @@ -137,10 +141,9 @@ class TestGRUCell(unittest.TestCase): sp = paddle.static.Program() with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - rnn2 = paddle.nn.GRUCell(16, - 32, - bias_ih_attr=self.bias, - bias_hh_attr=self.bias) + rnn2 = paddle.nn.GRUCell( + 16, 32, bias_ih_attr=self.bias, bias_hh_attr=self.bias + ) place = self.place exe = paddle.static.Executor(place) @@ -174,11 +177,15 @@ class TestGRUCell(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): x_data = paddle.fluid.data( - "input", [-1, 16], - dtype=paddle.framework.get_default_dtype()) + "input", + [-1, 16], + dtype=paddle.framework.get_default_dtype(), + ) init_h = paddle.fluid.data( - "init_h", [-1, 32], - dtype=paddle.framework.get_default_dtype()) + "init_h", + [-1, 32], + dtype=paddle.framework.get_default_dtype(), + ) y, h = rnn2(x_data, init_h) feed_dict = {x_data.name: x, init_h.name: prev_h} @@ -202,17 +209,18 @@ class TestGRUCell(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): x_data = paddle.fluid.data( - "input", [-1, 16], - dtype=paddle.framework.get_default_dtype()) + "input", + [-1, 16], + dtype=paddle.framework.get_default_dtype(), + ) y, h = rnn2(x_data) feed_dict = {x_data.name: x} with paddle.static.scope_guard(scope): - y2, h2 = exe.run(mp, - feed=feed_dict, - fetch_list=[y, h], - use_prune=True) + y2, h2 = exe.run( + mp, feed=feed_dict, fetch_list=[y, h], use_prune=True + ) np.testing.assert_allclose(h1, h2, atol=1e-8, rtol=1e-5) @@ -222,12 +230,12 @@ class TestGRUCell(unittest.TestCase): class TestLSTMCell(unittest.TestCase): - def __init__(self, bias=True, place="cpu"): super(TestLSTMCell, self).__init__(methodName="runTest") self.bias = bias - self.place = paddle.CPUPlace() if place == "cpu" \ - else paddle.CUDAPlace(0) + self.place = ( + paddle.CPUPlace() if place == "cpu" else paddle.CUDAPlace(0) + ) def setUp(self): rnn1 = LSTMCell(16, 32, bias=self.bias) @@ -236,10 +244,9 @@ class TestLSTMCell(unittest.TestCase): sp = paddle.static.Program() with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - rnn2 = paddle.nn.LSTMCell(16, - 32, - bias_ih_attr=self.bias, - bias_hh_attr=self.bias) + rnn2 = paddle.nn.LSTMCell( + 16, 32, bias_ih_attr=self.bias, bias_hh_attr=self.bias + ) place = self.place exe = paddle.static.Executor(place) @@ -274,14 +281,20 @@ class TestLSTMCell(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): x_data = paddle.fluid.data( - "input", [-1, 16], - dtype=paddle.framework.get_default_dtype()) + "input", + [-1, 16], + dtype=paddle.framework.get_default_dtype(), + ) init_h = paddle.fluid.data( - "init_h", [-1, 32], - dtype=paddle.framework.get_default_dtype()) + "init_h", + [-1, 32], + dtype=paddle.framework.get_default_dtype(), + ) init_c = paddle.fluid.data( - "init_c", [-1, 32], - dtype=paddle.framework.get_default_dtype()) + "init_c", + [-1, 32], + dtype=paddle.framework.get_default_dtype(), + ) y, (h, c) = rnn2(x_data, (init_h, init_c)) feed_dict = {x_data.name: x, init_h.name: prev_h, init_c.name: prev_c} @@ -306,17 +319,18 @@ class TestLSTMCell(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): x_data = paddle.fluid.data( - "input", [-1, 16], - dtype=paddle.framework.get_default_dtype()) + "input", + [-1, 16], + dtype=paddle.framework.get_default_dtype(), + ) y, (h, c) = rnn2(x_data) feed_dict = {x_data.name: x} with paddle.static.scope_guard(scope): - y2, h2, c2 = exe.run(mp, - feed=feed_dict, - fetch_list=[y, h, c], - use_prune=True) + y2, h2, c2 = exe.run( + mp, feed=feed_dict, fetch_list=[y, h, c], use_prune=True + ) np.testing.assert_allclose(h1, h2, atol=1e-8, rtol=1e-5) np.testing.assert_allclose(c1, c2, atol=1e-8, rtol=1e-5) @@ -328,8 +342,9 @@ class TestLSTMCell(unittest.TestCase): def load_tests(loader, tests, pattern): suite = unittest.TestSuite() - devices = ["cpu", "gpu"] if paddle.fluid.is_compiled_with_cuda() \ - else ["cpu"] + devices = ( + ["cpu", "gpu"] if paddle.fluid.is_compiled_with_cuda() else ["cpu"] + ) for bias in [True, False]: for device in devices: for test_class in [TestSimpleRNNCell, TestGRUCell, TestLSTMCell]: diff --git a/python/paddle/fluid/tests/unittests/rnn/test_rnn_cudnn_params_packing.py b/python/paddle/fluid/tests/unittests/rnn/test_rnn_cudnn_params_packing.py index f4dbc3bbbc7c8f4cafa9c416c8710a14ed0b0e1d..5949addbea16cdfdd66f13beefb6993bf2eef7c9 100644 --- a/python/paddle/fluid/tests/unittests/rnn/test_rnn_cudnn_params_packing.py +++ b/python/paddle/fluid/tests/unittests/rnn/test_rnn_cudnn_params_packing.py @@ -18,15 +18,13 @@ from unittest import TestCase def create_model(): hidden_size = 32 - bilstm = paddle.nn.LSTM(hidden_size, - hidden_size, - num_layers=1, - direction='bidirectional') + bilstm = paddle.nn.LSTM( + hidden_size, hidden_size, num_layers=1, direction='bidirectional' + ) return bilstm class TestRNNProgramClone(TestCase): - def setUp(self): paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/rnn/test_rnn_nets.py b/python/paddle/fluid/tests/unittests/rnn/test_rnn_nets.py index bf33d5532014fb83ea94180ce3da04d7314d58b6..0e3407631dcc63a7e71dda93d255ac24f622ce99 100755 --- a/python/paddle/fluid/tests/unittests/rnn/test_rnn_nets.py +++ b/python/paddle/fluid/tests/unittests/rnn/test_rnn_nets.py @@ -29,7 +29,6 @@ bidirectional_list = ["bidirectional", "bidirect"] class TestSimpleRNN(unittest.TestCase): - def __init__(self, time_major=True, direction="forward", place="cpu"): super(TestSimpleRNN, self).__init__("runTest") self.time_major = time_major @@ -42,16 +41,12 @@ class TestSimpleRNN(unittest.TestCase): # `__init__` to avoid using an error device set by another test case. place = paddle.set_device(self.place) paddle.disable_static(place) - rnn1 = SimpleRNN(16, - 32, - 2, - time_major=self.time_major, - direction=self.direction) - rnn2 = paddle.nn.SimpleRNN(16, - 32, - 2, - time_major=self.time_major, - direction=self.direction) + rnn1 = SimpleRNN( + 16, 32, 2, time_major=self.time_major, direction=self.direction + ) + rnn2 = paddle.nn.SimpleRNN( + 16, 32, 2, time_major=self.time_major, direction=self.direction + ) convert_params_for_net(rnn1, rnn2) self.rnn1 = rnn1 @@ -117,7 +112,6 @@ class TestSimpleRNN(unittest.TestCase): class TestGRU(unittest.TestCase): - def __init__(self, time_major=True, direction="forward", place="cpu"): super(TestGRU, self).__init__("runTest") self.time_major = time_major @@ -130,16 +124,12 @@ class TestGRU(unittest.TestCase): # `__init__` to avoid using an error device set by another test case. place = paddle.set_device(self.place) paddle.disable_static(place) - rnn1 = GRU(16, - 32, - 2, - time_major=self.time_major, - direction=self.direction) - rnn2 = paddle.nn.GRU(16, - 32, - 2, - time_major=self.time_major, - direction=self.direction) + rnn1 = GRU( + 16, 32, 2, time_major=self.time_major, direction=self.direction + ) + rnn2 = paddle.nn.GRU( + 16, 32, 2, time_major=self.time_major, direction=self.direction + ) convert_params_for_net(rnn1, rnn2) self.rnn1 = rnn1 @@ -205,7 +195,6 @@ class TestGRU(unittest.TestCase): class TestLSTM(unittest.TestCase): - def __init__(self, time_major=True, direction="forward", place="cpu"): super(TestLSTM, self).__init__("runTest") self.time_major = time_major @@ -218,16 +207,12 @@ class TestLSTM(unittest.TestCase): # `__init__` to avoid using an error device set by another test case. place = paddle.set_device(self.place) paddle.disable_static(place) - rnn1 = LSTM(16, - 32, - 2, - time_major=self.time_major, - direction=self.direction) - rnn2 = paddle.nn.LSTM(16, - 32, - 2, - time_major=self.time_major, - direction=self.direction) + rnn1 = LSTM( + 16, 32, 2, time_major=self.time_major, direction=self.direction + ) + rnn2 = paddle.nn.LSTM( + 16, 32, 2, time_major=self.time_major, direction=self.direction + ) convert_params_for_net(rnn1, rnn2) self.rnn1 = rnn1 @@ -244,9 +229,10 @@ class TestLSTM(unittest.TestCase): prev_c = np.random.randn(2 * self.num_directions, 4, 32) y1, (h1, c1) = rnn1(x, (prev_h, prev_c)) - y2, (h2, - c2) = rnn2(paddle.to_tensor(x), - (paddle.to_tensor(prev_h), paddle.to_tensor(prev_c))) + y2, (h2, c2) = rnn2( + paddle.to_tensor(x), + (paddle.to_tensor(prev_h), paddle.to_tensor(prev_c)), + ) np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5) np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5) np.testing.assert_allclose(c1, c2.numpy(), atol=1e-8, rtol=1e-5) @@ -305,14 +291,11 @@ def predict_test_util(place, mode, stop_gradient=True): np.random.seed(123) class Net(paddle.nn.Layer): - def __init__(self): super(Net, self).__init__() - self.rnn = getattr(paddle.nn, mode)(16, - 32, - 2, - direction="bidirectional", - dropout=0.1) + self.rnn = getattr(paddle.nn, mode)( + 16, 32, 2, direction="bidirectional", dropout=0.1 + ) def forward(self, input): return self.rnn(input) @@ -327,8 +310,9 @@ def predict_test_util(place, mode, stop_gradient=True): y = y * mask loss = paddle.mean(y) loss.backward() - optimizer = paddle.optimizer.Adam(learning_rate=0.1, - parameters=rnn.parameters()) + optimizer = paddle.optimizer.Adam( + learning_rate=0.1, parameters=rnn.parameters() + ) optimizer.step() rnn.eval() y, _ = rnn(x) @@ -337,7 +321,8 @@ def predict_test_util(place, mode, stop_gradient=True): rnn.train() rnn = paddle.jit.to_static( - rnn, [paddle.static.InputSpec(shape=[None, None, 16], dtype=x.dtype)]) + rnn, [paddle.static.InputSpec(shape=[None, None, 16], dtype=x.dtype)] + ) temp_dir = tempfile.TemporaryDirectory() save_dirname = os.path.join(temp_dir.name, "./inference/%s_infer" % mode) @@ -348,13 +333,19 @@ def predict_test_util(place, mode, stop_gradient=True): new_scope = paddle.static.Scope() with paddle.static.scope_guard(new_scope): exe = paddle.static.Executor(place) - [inference_program, feed_target_names, - fetch_targets] = paddle.static.load_inference_model(save_dirname, exe) - results = exe.run(inference_program, - feed={feed_target_names[0]: x.numpy()}, - fetch_list=fetch_targets) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = paddle.static.load_inference_model(save_dirname, exe) + results = exe.run( + inference_program, + feed={feed_target_names[0]: x.numpy()}, + fetch_list=fetch_targets, + ) np.testing.assert_equal( - y.numpy(), results[0]) # eval results equal predict results + y.numpy(), results[0] + ) # eval results equal predict results paddle.disable_static() temp_dir.cleanup() @@ -362,8 +353,9 @@ def predict_test_util(place, mode, stop_gradient=True): def load_tests(loader, tests, pattern): suite = unittest.TestSuite() - devices = ["cpu", "gpu"] if paddle.fluid.is_compiled_with_cuda() \ - else ["cpu"] + devices = ( + ["cpu", "gpu"] if paddle.fluid.is_compiled_with_cuda() else ["cpu"] + ) for direction in ["forward", "bidirectional", "bidirect"]: for time_major in [True, False]: for device in devices: diff --git a/python/paddle/fluid/tests/unittests/rnn/test_rnn_nets_static.py b/python/paddle/fluid/tests/unittests/rnn/test_rnn_nets_static.py index 436bf0b6ea01d9c9e82dfd33d697a87c4bb69b45..e186f28c35cf9c5a17d0185d45a179d7635196e7 100755 --- a/python/paddle/fluid/tests/unittests/rnn/test_rnn_nets_static.py +++ b/python/paddle/fluid/tests/unittests/rnn/test_rnn_nets_static.py @@ -29,7 +29,6 @@ bidirectional_list = ["bidirectional", "bidirect"] class TestSimpleRNN(unittest.TestCase): - def __init__(self, time_major=True, direction="forward", place="cpu"): super(TestSimpleRNN, self).__init__("runTest") self.time_major = time_major @@ -41,21 +40,21 @@ class TestSimpleRNN(unittest.TestCase): # Since `set_device` is global, set `set_device` in `setUp` rather than # `__init__` to avoid using an error device set by another test case. place = paddle.set_device(self.place) - rnn1 = SimpleRNN(16, - 32, - 2, - time_major=self.time_major, - direction=self.direction) + rnn1 = SimpleRNN( + 16, 32, 2, time_major=self.time_major, direction=self.direction + ) mp = paddle.static.Program() sp = paddle.static.Program() with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - rnn2 = paddle.nn.SimpleRNN(16, - 32, - 2, - time_major=self.time_major, - direction=self.direction) + rnn2 = paddle.nn.SimpleRNN( + 16, + 32, + 2, + time_major=self.time_major, + direction=self.direction, + ) exe = paddle.static.Executor(place) scope = paddle.fluid.Scope() @@ -90,11 +89,15 @@ class TestSimpleRNN(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): x_data = paddle.fluid.data( - "input", [-1, -1, 16], - dtype=paddle.framework.get_default_dtype()) + "input", + [-1, -1, 16], + dtype=paddle.framework.get_default_dtype(), + ) init_h = paddle.fluid.data( - "init_h", [2 * self.num_directions, -1, 32], - dtype=paddle.framework.get_default_dtype()) + "init_h", + [2 * self.num_directions, -1, 32], + dtype=paddle.framework.get_default_dtype(), + ) y, h = rnn2(x_data, init_h) feed_dict = {x_data.name: x, init_h.name: prev_h} @@ -121,8 +124,10 @@ class TestSimpleRNN(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): x_data = paddle.fluid.data( - "input", [-1, -1, 16], - dtype=paddle.framework.get_default_dtype()) + "input", + [-1, -1, 16], + dtype=paddle.framework.get_default_dtype(), + ) y, h = rnn2(x_data) feed_dict = {x_data.name: x} @@ -151,8 +156,10 @@ class TestSimpleRNN(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): x_data = paddle.fluid.data( - "input", [-1, -1, 16], - dtype=paddle.framework.get_default_dtype()) + "input", + [-1, -1, 16], + dtype=paddle.framework.get_default_dtype(), + ) seq_len = paddle.fluid.data("seq_len", [-1], dtype="int64") mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype()) if self.time_major: @@ -176,7 +183,6 @@ class TestSimpleRNN(unittest.TestCase): class TestGRU(unittest.TestCase): - def __init__(self, time_major=True, direction="forward", place="cpu"): super(TestGRU, self).__init__("runTest") self.time_major = time_major @@ -188,21 +194,21 @@ class TestGRU(unittest.TestCase): # Since `set_device` is global, set `set_device` in `setUp` rather than # `__init__` to avoid using an error device set by another test case. place = paddle.set_device(self.place) - rnn1 = GRU(16, - 32, - 2, - time_major=self.time_major, - direction=self.direction) + rnn1 = GRU( + 16, 32, 2, time_major=self.time_major, direction=self.direction + ) mp = paddle.static.Program() sp = paddle.static.Program() with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - rnn2 = paddle.nn.GRU(16, - 32, - 2, - time_major=self.time_major, - direction=self.direction) + rnn2 = paddle.nn.GRU( + 16, + 32, + 2, + time_major=self.time_major, + direction=self.direction, + ) exe = paddle.static.Executor(place) scope = paddle.fluid.Scope() @@ -238,11 +244,15 @@ class TestGRU(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): x_data = paddle.fluid.data( - "input", [-1, -1, 16], - dtype=paddle.framework.get_default_dtype()) + "input", + [-1, -1, 16], + dtype=paddle.framework.get_default_dtype(), + ) init_h = paddle.fluid.data( - "init_h", [2 * self.num_directions, -1, 32], - dtype=paddle.framework.get_default_dtype()) + "init_h", + [2 * self.num_directions, -1, 32], + dtype=paddle.framework.get_default_dtype(), + ) y, h = rnn2(x_data, init_h) feed_dict = {x_data.name: x, init_h.name: prev_h} @@ -269,8 +279,10 @@ class TestGRU(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): x_data = paddle.fluid.data( - "input", [-1, -1, 16], - dtype=paddle.framework.get_default_dtype()) + "input", + [-1, -1, 16], + dtype=paddle.framework.get_default_dtype(), + ) y, h = rnn2(x_data) feed_dict = {x_data.name: x} @@ -299,8 +311,10 @@ class TestGRU(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): x_data = paddle.fluid.data( - "input", [-1, -1, 16], - dtype=paddle.framework.get_default_dtype()) + "input", + [-1, -1, 16], + dtype=paddle.framework.get_default_dtype(), + ) seq_len = paddle.fluid.data("seq_len", [-1], dtype="int64") mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype()) if self.time_major: @@ -323,7 +337,6 @@ class TestGRU(unittest.TestCase): class TestLSTM(unittest.TestCase): - def __init__(self, time_major=True, direction="forward", place="cpu"): super(TestLSTM, self).__init__("runTest") self.time_major = time_major @@ -335,21 +348,21 @@ class TestLSTM(unittest.TestCase): # Since `set_device` is global, set `set_device` in `setUp` rather than # `__init__` to avoid using an error device set by another test case. place = paddle.set_device(self.place) - rnn1 = LSTM(16, - 32, - 2, - time_major=self.time_major, - direction=self.direction) + rnn1 = LSTM( + 16, 32, 2, time_major=self.time_major, direction=self.direction + ) mp = paddle.static.Program() sp = paddle.static.Program() with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - rnn2 = paddle.nn.LSTM(16, - 32, - 2, - time_major=self.time_major, - direction=self.direction) + rnn2 = paddle.nn.LSTM( + 16, + 32, + 2, + time_major=self.time_major, + direction=self.direction, + ) exe = paddle.static.Executor(place) scope = paddle.fluid.Scope() @@ -385,14 +398,20 @@ class TestLSTM(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): x_data = paddle.fluid.data( - "input", [-1, -1, 16], - dtype=paddle.framework.get_default_dtype()) + "input", + [-1, -1, 16], + dtype=paddle.framework.get_default_dtype(), + ) init_h = paddle.fluid.data( - "init_h", [2 * self.num_directions, -1, 32], - dtype=paddle.framework.get_default_dtype()) + "init_h", + [2 * self.num_directions, -1, 32], + dtype=paddle.framework.get_default_dtype(), + ) init_c = paddle.fluid.data( - "init_c", [2 * self.num_directions, -1, 32], - dtype=paddle.framework.get_default_dtype()) + "init_c", + [2 * self.num_directions, -1, 32], + dtype=paddle.framework.get_default_dtype(), + ) y, (h, c) = rnn2(x_data, (init_h, init_c)) feed_dict = {x_data.name: x, init_h.name: prev_h, init_c.name: prev_c} @@ -420,8 +439,10 @@ class TestLSTM(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): x_data = paddle.fluid.data( - "input", [-1, -1, 16], - dtype=paddle.framework.get_default_dtype()) + "input", + [-1, -1, 16], + dtype=paddle.framework.get_default_dtype(), + ) y, (h, c) = rnn2(x_data) feed_dict = {x_data.name: x} @@ -451,8 +472,10 @@ class TestLSTM(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): x_data = paddle.fluid.data( - "input", [-1, -1, 16], - dtype=paddle.framework.get_default_dtype()) + "input", + [-1, -1, 16], + dtype=paddle.framework.get_default_dtype(), + ) seq_len = paddle.fluid.data("seq_len", [-1], dtype="int64") mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype()) if self.time_major: @@ -478,8 +501,9 @@ class TestLSTM(unittest.TestCase): def load_tests(loader, tests, pattern): suite = unittest.TestSuite() - devices = ["cpu", "gpu"] if paddle.fluid.is_compiled_with_cuda() \ - else ["cpu"] + devices = ( + ["cpu", "gpu"] if paddle.fluid.is_compiled_with_cuda() else ["cpu"] + ) for direction in ["forward", "bidirectional", "bidirect"]: for time_major in [True, False]: for device in devices: diff --git a/python/paddle/fluid/tests/unittests/rnn/test_wrappers.py b/python/paddle/fluid/tests/unittests/rnn/test_wrappers.py index 2442e6b7a3b8c43b3ebcab022c8bc64013ecb5c9..27e1293cb68954d01fcaba9cbc89a593b0f8abf7 100755 --- a/python/paddle/fluid/tests/unittests/rnn/test_wrappers.py +++ b/python/paddle/fluid/tests/unittests/rnn/test_wrappers.py @@ -25,25 +25,29 @@ from rnn_numpy import GRUCell, RNN, BiRNN class TestRNNWrapper(unittest.TestCase): - def __init__(self, time_major=True, direction="forward", place="cpu"): super(TestRNNWrapper, self).__init__("runTest") self.time_major = time_major self.direction = direction - self.place = paddle.CPUPlace() if place == "cpu" \ - else paddle.CUDAPlace(0) + self.place = ( + paddle.CPUPlace() if place == "cpu" else paddle.CUDAPlace(0) + ) def setUp(self): paddle.disable_static(self.place) cell1 = GRUCell(16, 32) cell2 = paddle.nn.GRUCell(16, 32) convert_params_for_cell(cell1, cell2) - rnn1 = RNN(cell1, - is_reverse=self.direction == "backward", - time_major=self.time_major) - rnn2 = paddle.nn.RNN(cell2, - is_reverse=self.direction == "backward", - time_major=self.time_major) + rnn1 = RNN( + cell1, + is_reverse=self.direction == "backward", + time_major=self.time_major, + ) + rnn2 = paddle.nn.RNN( + cell2, + is_reverse=self.direction == "backward", + time_major=self.time_major, + ) self.rnn1 = rnn1 self.rnn2 = rnn2 @@ -104,12 +108,12 @@ class TestRNNWrapper(unittest.TestCase): class TestBiRNNWrapper(unittest.TestCase): - def __init__(self, time_major=True, place="cpu"): super(TestBiRNNWrapper, self).__init__("runTest") self.time_major = time_major - self.place = paddle.CPUPlace() if place == "cpu" \ - else paddle.CUDAPlace(0) + self.place = ( + paddle.CPUPlace() if place == "cpu" else paddle.CUDAPlace(0) + ) def setUp(self): paddle.disable_static(self.place) @@ -138,7 +142,8 @@ class TestBiRNNWrapper(unittest.TestCase): y1, (fw_h1, bw_h1) = rnn1(x, (fw_prev_h, bw_prev_h)) y2, (fw_h2, bw_h2) = rnn2( paddle.to_tensor(x), - (paddle.to_tensor(fw_prev_h), paddle.to_tensor(bw_prev_h))) + (paddle.to_tensor(fw_prev_h), paddle.to_tensor(bw_prev_h)), + ) np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5) np.testing.assert_allclose(fw_h1, fw_h2.numpy(), atol=1e-8, rtol=1e-5) np.testing.assert_allclose(bw_h1, bw_h2.numpy(), atol=1e-8, rtol=1e-5) @@ -188,8 +193,9 @@ class TestBiRNNWrapper(unittest.TestCase): def load_tests(loader, tests, pattern): suite = unittest.TestSuite() - devices = ["cpu", "gpu"] if paddle.fluid.is_compiled_with_cuda() \ - else ["cpu"] + devices = ( + ["cpu", "gpu"] if paddle.fluid.is_compiled_with_cuda() else ["cpu"] + ) for direction in ["forward", "backward"]: for device in devices: for time_major in [False]: diff --git a/python/paddle/fluid/tests/unittests/rpc/rpc_launch_sync_add.py b/python/paddle/fluid/tests/unittests/rpc/rpc_launch_sync_add.py index 3294f159b77708dc282a47ed212e43adf71d746f..794efb22fc7ae51aa58b7fae26fa7fdfa2703a49 100644 --- a/python/paddle/fluid/tests/unittests/rpc/rpc_launch_sync_add.py +++ b/python/paddle/fluid/tests/unittests/rpc/rpc_launch_sync_add.py @@ -59,11 +59,11 @@ def main(): shape=(10 * world_size, 100), ) for i in range(world_size): - a = mmap_data1[i * 10:(i + 1) * 10, :] - b = mmap_data2[i * 10:(i + 1) * 10, :] + a = mmap_data1[i * 10 : (i + 1) * 10, :] + b = mmap_data2[i * 10 : (i + 1) * 10, :] args = (a, b) out = rpc_add(worker_name(i), args) - mmap_out[i * 10:(i + 1) * 10, :] = out[:] + mmap_out[i * 10 : (i + 1) * 10, :] = out[:] dist.rpc.shutdown() diff --git a/python/paddle/fluid/tests/unittests/rpc/test_rpc.py b/python/paddle/fluid/tests/unittests/rpc/test_rpc.py index c6c2eb43be204acd77f56cb008fa9e7829578127..67e8b65423c94b1b933d2057e95bb3525c14d450 100644 --- a/python/paddle/fluid/tests/unittests/rpc/test_rpc.py +++ b/python/paddle/fluid/tests/unittests/rpc/test_rpc.py @@ -35,7 +35,6 @@ def paddle_add(a, b): class TestMultiProcessRpc(RpcTestBase): - def test_one_server_sync_paddle_add(self): a = np.random.random((10, 100)) b = np.random.random((10, 100)) @@ -78,7 +77,6 @@ class TestMultiProcessRpc(RpcTestBase): class TestSingleProcessRpc(RpcTestBase): - def setUp(self): self._port_set = set() master_endpoint = "127.0.0.1:{}".format(self._find_free_port()) @@ -123,7 +121,6 @@ class TestSingleProcessRpc(RpcTestBase): class RpcLaunchTest(RpcLaunchTestBase): - def test_sync_rpc_paddle_add1(self): nnodes = 2 nproc_per_node = 1 diff --git a/python/paddle/fluid/tests/unittests/rpc/test_rpc_base.py b/python/paddle/fluid/tests/unittests/rpc/test_rpc_base.py index 95a59267d530ccb636849e7d2de2d1690af74238..daf50b8884505630e6c12e9d2bd0ffff06ce3b54 100644 --- a/python/paddle/fluid/tests/unittests/rpc/test_rpc_base.py +++ b/python/paddle/fluid/tests/unittests/rpc/test_rpc_base.py @@ -64,10 +64,9 @@ def run_rpc_sync_master_working( ) if dist.get_rank() == 0: for i in range(1, dist.get_rank()): - res = dist.rpc.rpc_sync(worker_name(i), - fn, - args=args, - kwargs=kwargs) + res = dist.rpc.rpc_sync( + worker_name(i), fn, args=args, kwargs=kwargs + ) queue.put(res) dist.rpc.shutdown() @@ -109,16 +108,14 @@ def run_rpc_async_master_working( ) if dist.get_rank() == 0: for i in range(1, dist.get_rank()): - res = dist.rpc.rpc_async(worker_name(i), - fn, - args=args, - kwargs=kwargs) + res = dist.rpc.rpc_async( + worker_name(i), fn, args=args, kwargs=kwargs + ) queue.put(res.wait()) dist.rpc.shutdown() class RpcTestBase(unittest.TestCase): - def setUp(self): self._port_set = set() print("RPC setUp...") @@ -129,10 +126,10 @@ class RpcTestBase(unittest.TestCase): print("RPC tearDown...") def _find_free_port(self): - def __free_port(): - with closing(socket.socket(socket.AF_INET, - socket.SOCK_STREAM)) as s: + with closing( + socket.socket(socket.AF_INET, socket.SOCK_STREAM) + ) as s: s.bind(("", 0)) return s.getsockname()[1] @@ -162,7 +159,8 @@ class RpcTestBase(unittest.TestCase): fn_args, fn_kwargs, ), - )) + ) + ) else: self.processes.append( Process( @@ -176,13 +174,13 @@ class RpcTestBase(unittest.TestCase): fn_args, fn_kwargs, ), - )) + ) + ) [p.start() for p in self.processes] return queues class RpcLaunchTestBase(unittest.TestCase): - def setUp(self): self._port_set = set() print("Launch RPC setUp...") @@ -192,10 +190,10 @@ class RpcLaunchTestBase(unittest.TestCase): print("Launch RPC tearDown...") def _find_free_port(self): - def __free_port(): - with closing(socket.socket(socket.AF_INET, - socket.SOCK_STREAM)) as s: + with closing( + socket.socket(socket.AF_INET, socket.SOCK_STREAM) + ) as s: s.bind(("", 0)) return s.getsockname()[1] @@ -221,8 +219,8 @@ class RpcLaunchTestBase(unittest.TestCase): for i in range(nnodes * nproc_per_node): a = np.random.random((10, 100)).astype(np.float32) b = np.random.random((10, 100)).astype(np.float32) - mmap_data1[i * 10:(i + 1) * 10, :] = a - mmap_data2[i * 10:(i + 1) * 10, :] = b + mmap_data1[i * 10 : (i + 1) * 10, :] = a + mmap_data2[i * 10 : (i + 1) * 10, :] = b return mmap_data1, mmap_data2 def remove_data(self): @@ -234,8 +232,15 @@ class RpcLaunchTestBase(unittest.TestCase): log_dir = "log" tr_cmd = "python -m paddle.distributed.launch --master {} --rank {} --nnodes {} --nproc_per_node {} --run_mode rpc {} --log_dir {}" cmds = [ - tr_cmd.format(master_endpoint, rank, nnodes, nproc_per_node, - model_file, log_dir) for rank in range(nnodes) + tr_cmd.format( + master_endpoint, + rank, + nnodes, + nproc_per_node, + model_file, + log_dir, + ) + for rank in range(nnodes) ] processes = [subprocess.Popen(cmd.strip().split()) for cmd in cmds] [proc.communicate() for proc in processes] diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_concat.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_concat.py index 4b60a0cbcfc514b03d7b3cbbfc496618e3763b27..80c975221ad49c19a5379077dd4c37d2be65f1c7 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_concat.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_concat.py @@ -24,7 +24,6 @@ from paddle import fluid class TestSequenceConcat(OpTest): - def setLoD(self): self.lod1 = [7, 3] self.lod2 = [12, 8] @@ -35,8 +34,14 @@ class TestSequenceConcat(OpTest): x2 = np.random.random(size=(20, 80)).astype('float64') self.setLoD() - out = np.concatenate((x1[0:self.lod1[0]], x2[0:self.lod2[0]], - x1[self.lod1[0]:], x2[self.lod2[0]:])) + out = np.concatenate( + ( + x1[0 : self.lod1[0]], + x2[0 : self.lod2[0]], + x1[self.lod1[0] :], + x2[self.lod2[0] :], + ) + ) self.op_type = "sequence_concat" self.inputs = { @@ -52,7 +57,6 @@ class TestSequenceConcat(OpTest): class TestSequenceConcatCase2(TestSequenceConcat): - def setLoD(self): self.lod1 = [10, 0] self.lod2 = [12, 8] @@ -60,7 +64,6 @@ class TestSequenceConcatCase2(TestSequenceConcat): class TestSequenceConcatCase3(TestSequenceConcat): - def setLoD(self): self.lod1 = [10, 0] self.lod2 = [20, 0] @@ -68,7 +71,6 @@ class TestSequenceConcatCase3(TestSequenceConcat): class TestSequenceConcatCase4(TestSequenceConcat): - def setLoD(self): self.lod1 = [0, 10] self.lod2 = [0, 20] @@ -76,7 +78,6 @@ class TestSequenceConcatCase4(TestSequenceConcat): class TestSequenceConcatCase5(TestSequenceConcat): - def setLoD(self): self.lod1 = [0, 10] self.lod2 = [20, 0] @@ -84,9 +85,7 @@ class TestSequenceConcatCase5(TestSequenceConcat): class TestSequenceConcatOpError(unittest.TestCase): - def test_errors(self): - def test_input_list(): # the input type must be list x_data = fluid.layers.data(name='x', shape=[4], dtype='float32') diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_conv.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_conv.py index 047ec6e294358fff6d2f4c0dcacef2e3b00915e0..78fc6089cfe73fdfe22df36b005bd76219dde2c0 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_conv.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_conv.py @@ -21,13 +21,15 @@ sys.path.append("../") from op_test import OpTest -def seqconv(x, - lod, - filter, - context_length, - context_start, - padding_trainable=False, - padding_data=None): +def seqconv( + x, + lod, + filter, + context_length, + context_start, + padding_trainable=False, + padding_data=None, +): [T, M] = x.shape col = np.zeros((T, context_length * M)).astype('float32') offset = [0] @@ -42,58 +44,79 @@ def seqconv(x, out_end = offset[i + 1] if in_begin < offset[i]: pad_size = np.min( - [offset[i] - in_begin, offset[i + 1] - offset[i]]) + [offset[i] - in_begin, offset[i + 1] - offset[i]] + ) if padding_trainable: - sub_w = padding_data[j:j + pad_size, :] - col[offset[i]:offset[i] + pad_size, - j * M:(j + 1) * M] = sub_w + sub_w = padding_data[j : j + pad_size, :] + col[ + offset[i] : offset[i] + pad_size, j * M : (j + 1) * M + ] = sub_w out_begin = offset[i] + pad_size in_begin = offset[i] if in_end > offset[i + 1]: pad_size = np.min( - [in_end - offset[i + 1], offset[i + 1] - offset[i]]) + [in_end - offset[i + 1], offset[i + 1] - offset[i]] + ) if padding_trainable: - sub_w = padding_data[begin_pad + context_start + j - - pad_size:begin_pad + context_start + - j, :] - col[offset[i + 1] - pad_size:offset[i + 1], - j * M:(j + 1) * M] = sub_w + sub_w = padding_data[ + begin_pad + + context_start + + j + - pad_size : begin_pad + + context_start + + j, + :, + ] + col[ + offset[i + 1] - pad_size : offset[i + 1], + j * M : (j + 1) * M, + ] = sub_w in_end = offset[i + 1] out_end = offset[i + 1] - pad_size if in_end <= in_begin: continue in_sub = x[in_begin:in_end, :] - col[out_begin:out_end, j * M:(j + 1) * M] += in_sub + col[out_begin:out_end, j * M : (j + 1) * M] += in_sub return np.dot(col, filter) class TestSeqProject(OpTest): - def setUp(self): self.init_test_case() self.op_type = 'sequence_conv' - if self.context_length == 1 \ - and self.context_start == 0 \ - and self.padding_trainable: - print("If context_start is 0 " \ - "and context_length is 1," \ - " padding_trainable should be false.") + if ( + self.context_length == 1 + and self.context_start == 0 + and self.padding_trainable + ): + print( + "If context_start is 0 " + "and context_length is 1," + " padding_trainable should be false." + ) return # one level, batch size x = np.random.uniform( - 0.1, 1, [self.input_size[0], self.input_size[1]]).astype('float32') - w = np.random.uniform(0.1, 1, [ - self.context_length * self.input_size[1], self.output_represention - ]).astype('float32') + 0.1, 1, [self.input_size[0], self.input_size[1]] + ).astype('float32') + w = np.random.uniform( + 0.1, + 1, + [ + self.context_length * self.input_size[1], + self.output_represention, + ], + ).astype('float32') begin_pad = np.max([0, -self.context_start]) end_pad = np.max([0, self.context_start + self.context_length - 1]) total_pad = begin_pad + end_pad padding_data = np.random.uniform( - 0.1, 1, [total_pad, self.input_size[1]]).astype('float32') + 0.1, 1, [total_pad, self.input_size[1]] + ).astype('float32') self.pad_data = padding_data self.inputs = { 'X': (x, self.lod), @@ -113,10 +136,17 @@ class TestSeqProject(OpTest): 'contextStart': self.context_start, 'contextLength': self.context_length, 'paddingTrainable': self.padding_trainable, - 'contextStride': self.context_stride + 'contextStride': self.context_stride, } - out = seqconv(x, self.lod, w, self.context_length, self.context_start, - self.padding_trainable, self.pad_data) + out = seqconv( + x, + self.lod, + w, + self.context_length, + self.context_start, + self.padding_trainable, + self.pad_data, + ) self.outputs = {'Out': out} def test_check_output(self): @@ -124,48 +154,58 @@ class TestSeqProject(OpTest): def test_check_grad(self): if self.padding_trainable: - self.check_grad(set(self.inputs_val), - 'Out', - max_relative_error=0.05) + self.check_grad( + set(self.inputs_val), 'Out', max_relative_error=0.05 + ) def test_check_grad_input(self): - self.check_grad(['X'], - 'Out', - max_relative_error=0.05, - no_grad_set=set(self.inputs_val_no_x)) + self.check_grad( + ['X'], + 'Out', + max_relative_error=0.05, + no_grad_set=set(self.inputs_val_no_x), + ) def test_check_grad_padding_data(self): if self.padding_trainable: - self.check_grad(['PaddingData'], - 'Out', - no_grad_set=set(['X', 'Filter'])) + self.check_grad( + ['PaddingData'], 'Out', no_grad_set=set(['X', 'Filter']) + ) def test_check_grad_Filter(self): - self.check_grad(['Filter'], - 'Out', - max_relative_error=0.05, - no_grad_set=set(self.inputs_val_no_f)) + self.check_grad( + ['Filter'], + 'Out', + max_relative_error=0.05, + no_grad_set=set(self.inputs_val_no_f), + ) def test_check_grad_input_filter(self): if self.padding_trainable: - self.check_grad(['X', 'Filter'], - 'Out', - max_relative_error=0.05, - no_grad_set=set(['PaddingData'])) + self.check_grad( + ['X', 'Filter'], + 'Out', + max_relative_error=0.05, + no_grad_set=set(['PaddingData']), + ) def test_check_grad_padding_input(self): if self.padding_trainable: - self.check_grad(self.inputs_val_no_f, - 'Out', - max_relative_error=0.05, - no_grad_set=set(['Filter'])) + self.check_grad( + self.inputs_val_no_f, + 'Out', + max_relative_error=0.05, + no_grad_set=set(['Filter']), + ) def test_check_grad_padding_filter(self): if self.padding_trainable: - self.check_grad(self.inputs_val_no_x, - 'Out', - max_relative_error=0.05, - no_grad_set=set(['X'])) + self.check_grad( + self.inputs_val_no_x, + 'Out', + max_relative_error=0.05, + no_grad_set=set(['X']), + ) def init_test_case(self): self.input_row = 11 @@ -184,7 +224,6 @@ class TestSeqProject(OpTest): class TestSeqProjectCase1(TestSeqProject): - def init_test_case(self): self.input_row = 11 self.context_start = -1 @@ -202,7 +241,6 @@ class TestSeqProjectCase1(TestSeqProject): class TestSeqProjectCase2Len0(TestSeqProject): - def init_test_case(self): self.input_row = 11 self.context_start = -1 @@ -220,7 +258,6 @@ class TestSeqProjectCase2Len0(TestSeqProject): class TestSeqProjectCase3(TestSeqProject): - def init_test_case(self): self.input_row = 25 self.context_start = 2 @@ -231,8 +268,9 @@ class TestSeqProjectCase3(TestSeqProject): self.input_size = [self.input_row, 25] idx = list(range(self.input_size[0])) del idx[0] - offset_lod = [[0] + np.sort(random.sample(idx, 8)).tolist() + - [self.input_size[0]]] + offset_lod = [ + [0] + np.sort(random.sample(idx, 8)).tolist() + [self.input_size[0]] + ] self.lod = [[]] # convert from offset-based lod to length-based lod for i in range(len(offset_lod[0]) - 1): @@ -241,19 +279,18 @@ class TestSeqProjectCase3(TestSeqProject): class TestSeqConvApi(unittest.TestCase): - def test_api(self): import paddle.fluid as fluid x = fluid.layers.data('x', shape=[32], lod_level=1) - y = fluid.layers.sequence_conv(input=x, - num_filters=2, - filter_size=3, - padding_start=None) + y = fluid.layers.sequence_conv( + input=x, num_filters=2, filter_size=3, padding_start=None + ) place = fluid.CPUPlace() x_tensor = fluid.create_lod_tensor( - np.random.rand(10, 32).astype("float32"), [[2, 3, 1, 4]], place) + np.random.rand(10, 32).astype("float32"), [[2, 3, 1, 4]], place + ) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) ret = exe.run(feed={'x': x_tensor}, fetch_list=[y], return_numpy=False) diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_enumerate_op.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_enumerate_op.py index 68c6d7d75359d2c470992aaafa18609d00a182c2..4f75fc08bef693873e1cdd08c778152d7c5f246b 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_enumerate_op.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_enumerate_op.py @@ -30,15 +30,15 @@ def sequence_enumerate(input_seq, in_lod, win_size, pad_value): single_seq = [] for word_idx in range(win_size): word_pos = idx + word_idx - dat = input_seq[word_pos] if word_pos < lod0[i+1] \ - else pad_value + dat = ( + input_seq[word_pos] if word_pos < lod0[i + 1] else pad_value + ) single_seq.append(dat) out_seq.append(single_seq) return out_seq class TestSequenceEnumerateOp(OpTest): - def setUp(self): self.op_type = "sequence_enumerate" self.init_test_case() @@ -54,68 +54,69 @@ class TestSequenceEnumerateOp(OpTest): self.lod = [[9, 4, 11, 6]] self.win_size = 2 self.pad_value = 0 - out_seq = sequence_enumerate(self.in_seq, self.lod, self.win_size, - self.pad_value) + out_seq = sequence_enumerate( + self.in_seq, self.lod, self.win_size, self.pad_value + ) self.out_seq = np.array(out_seq).astype("int32") class TesSequenceEnumerateOpInt64(TestSequenceEnumerateOp): - def init_test_case(self): self.in_seq = np.random.randint(0, 10, (30, 1)).astype("int64") self.lod = [[9, 4, 11, 6]] self.win_size = 2 self.pad_value = 0 - out_seq = sequence_enumerate(self.in_seq, self.lod, self.win_size, - self.pad_value) + out_seq = sequence_enumerate( + self.in_seq, self.lod, self.win_size, self.pad_value + ) self.out_seq = np.array(out_seq).astype("int64") class TestSequenceEnumerateOpLargeWinSize(TestSequenceEnumerateOp): - def init_test_case(self): self.in_seq = np.random.randint(0, 10, (30, 1)).astype("int32") self.lod = [[9, 4, 11, 6]] self.win_size = 5 self.pad_value = 0 - out_seq = sequence_enumerate(self.in_seq, self.lod, self.win_size, - self.pad_value) + out_seq = sequence_enumerate( + self.in_seq, self.lod, self.win_size, self.pad_value + ) self.out_seq = np.array(out_seq).astype("int32") class TestSequenceEnumerateOpMaxWinSize(TestSequenceEnumerateOp): - def init_test_case(self): self.in_seq = np.random.randint(0, 10, (30, 1)).astype("int32") self.lod = [[9, 4, 11, 6]] self.win_size = 30 self.pad_value = 0 - out_seq = sequence_enumerate(self.in_seq, self.lod, self.win_size, - self.pad_value) + out_seq = sequence_enumerate( + self.in_seq, self.lod, self.win_size, self.pad_value + ) self.out_seq = np.array(out_seq).astype("int32") class TestSequenceEnumerateOpLargePadValue(TestSequenceEnumerateOp): - def init_test_case(self): self.in_seq = np.random.randint(0, 10, (30, 1)).astype("int32") self.lod = [[9, 4, 11, 6]] self.win_size = 5 self.pad_value = 5 - out_seq = sequence_enumerate(self.in_seq, self.lod, self.win_size, - self.pad_value) + out_seq = sequence_enumerate( + self.in_seq, self.lod, self.win_size, self.pad_value + ) self.out_seq = np.array(out_seq).astype("int32") class TestSequenceEnumerateOpLargePadValueSeqLen0(TestSequenceEnumerateOp): - def init_test_case(self): self.in_seq = np.random.randint(0, 10, (30, 1)).astype("int32") self.lod = [[0, 14, 0, 16, 0]] self.win_size = 5 self.pad_value = 5 - out_seq = sequence_enumerate(self.in_seq, self.lod, self.win_size, - self.pad_value) + out_seq = sequence_enumerate( + self.in_seq, self.lod, self.win_size, self.pad_value + ) self.out_seq = np.array(out_seq).astype("int32") diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_erase_op.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_erase_op.py index 99d72e11ff17975354e1576742275e03ae0dd91a..4c4ea068117aab4116c6dedd635902971f90ff49 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_erase_op.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_erase_op.py @@ -26,7 +26,7 @@ def sequence_erase(in_seq, lod0, tokens): offset = 0 for i in range(0, len(lod0)): num_out = 0 - for dat in in_seq[offset:(offset + lod0[i])]: + for dat in in_seq[offset : (offset + lod0[i])]: if dat not in tokens: out_seq.append(dat) num_out += 1 @@ -36,7 +36,6 @@ def sequence_erase(in_seq, lod0, tokens): class TestSequenceEraseOpInt32(OpTest): - def setUp(self): self.op_type = "sequence_erase" in_seq = np.random.randint(0, 10, (30, 1)).astype("int32") @@ -52,7 +51,6 @@ class TestSequenceEraseOpInt32(OpTest): class TestSequenceEraseOpInt32LoD2(OpTest): - def setUp(self): self.op_type = "sequence_erase" in_seq = np.random.randint(0, 10, (30, 1)).astype("int32") @@ -68,7 +66,6 @@ class TestSequenceEraseOpInt32LoD2(OpTest): class TestSequenceEraseOpInt64(OpTest): - def setUp(self): self.op_type = "sequence_erase" in_seq = np.random.randint(0, 10, (30, 1)).astype("int64") @@ -84,7 +81,6 @@ class TestSequenceEraseOpInt64(OpTest): class TestSequenceEraseOpInt64SeqLen0(OpTest): - def setUp(self): self.op_type = "sequence_erase" in_seq = np.random.randint(0, 10, (30, 1)).astype("int64") @@ -100,7 +96,6 @@ class TestSequenceEraseOpInt64SeqLen0(OpTest): class TestSequenceEraseOpEmpty(OpTest): - def setUp(self): self.op_type = "sequence_erase" in_seq = np.random.randint(0, 10, (30, 1)).astype("int32") diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_expand.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_expand.py index 3d6f66f2a55463d66f0b9fce0077efcb8d230ead..4144812fb68e3955b634c87e99482b9da6ba3169 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_expand.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_expand.py @@ -21,7 +21,6 @@ from op_test import OpTest class TestSequenceExpand(OpTest): - def set_data(self): x_data = np.random.uniform(0.1, 1, [3, 40]).astype('float64') y_data = np.random.uniform(0.1, 1, [8, 1]).astype('float64') @@ -38,7 +37,7 @@ class TestSequenceExpand(OpTest): else: ref_level = len(y_lod) - 1 - out = np.zeros(shape=((0, ) + x_data.shape[1:]), dtype=x_data.dtype) + out = np.zeros(shape=((0,) + x_data.shape[1:]), dtype=x_data.dtype) if x_lod is None: # x_idx = [i for i in xrange(x_data.shape[0] + 1)] @@ -53,7 +52,7 @@ class TestSequenceExpand(OpTest): x_len = x_idx[i] if repeat_num > 0: - x_sub = x_data[offset:(offset + x_len), :] + x_sub = x_data[offset : (offset + x_len), :] stacked_x_sub = x_sub for r in range(repeat_num - 1): stacked_x_sub = np.vstack((stacked_x_sub, x_sub)) @@ -81,7 +80,6 @@ class TestSequenceExpand(OpTest): class TestSequenceExpandCase1(TestSequenceExpand): - def set_data(self): x_data = np.random.uniform(0.1, 1, [5, 20]).astype('float64') y_data = np.random.uniform(0.1, 1, [13, 1]).astype('float64') @@ -91,7 +89,6 @@ class TestSequenceExpandCase1(TestSequenceExpand): class TestSequenceExpandCase2(TestSequenceExpand): - def set_data(self): x_data = np.random.uniform(0.1, 1, [1, 2, 50]).astype('float64') x_lod = [[1]] @@ -102,7 +99,6 @@ class TestSequenceExpandCase2(TestSequenceExpand): class TestSequenceExpandCase3(TestSequenceExpand): - def set_data(self): x_data = np.random.uniform(0.1, 1, [4, 25]).astype('float64') x_lod = [[1, 1, 1, 1]] @@ -112,7 +108,6 @@ class TestSequenceExpandCase3(TestSequenceExpand): class TestSequenceExpandCase4(TestSequenceExpand): - def set_data(self): data = np.random.uniform(0.1, 1, [5 * 20, 1]) x_data = np.array(data).reshape([5, 20]).astype('float64') @@ -123,7 +118,6 @@ class TestSequenceExpandCase4(TestSequenceExpand): class TestSequenceExpandCase5(TestSequenceExpand): - def set_data(self): x_data = np.random.uniform(0.1, 1, [6, 20]).astype('float64') y_data = np.random.uniform(0.1, 1, [13, 1]).astype('float64') @@ -133,7 +127,6 @@ class TestSequenceExpandCase5(TestSequenceExpand): class TestSequenceExpandCase6(TestSequenceExpand): - def set_data(self): x_data = np.random.uniform(0.1, 1, [4, 25]).astype('float64') x_lod = [[1, 1, 0, 1, 1]] diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_expand_as.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_expand_as.py index 00b6ca518e8fde3be3e787a1d11b2ad13ab96e80..b1d6bc915ec82588e2d201bf74206e15d38dc5bd 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_expand_as.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_expand_as.py @@ -23,7 +23,6 @@ from paddle.fluid import Program, program_guard class TestSequenceExpandAs(OpTest): - def setUp(self): self.op_type = 'sequence_expand_as' self.set_data() @@ -60,7 +59,6 @@ class TestSequenceExpandAs(OpTest): class TestSequenceExpandAsCase1(TestSequenceExpandAs): - def set_data(self): x_data = np.random.uniform(0.1, 1, [5, 20]).astype('float64') x_lod = [[2, 3]] @@ -70,7 +68,6 @@ class TestSequenceExpandAsCase1(TestSequenceExpandAs): class TestSequenceExpandAsCase2(TestSequenceExpandAs): - def set_data(self): x_data = np.random.uniform(0.1, 1, [5, 20]).astype('float64') x_lod = [[2, 3]] @@ -80,7 +77,6 @@ class TestSequenceExpandAsCase2(TestSequenceExpandAs): class TestSequenceExpandAsCase3(TestSequenceExpandAs): - def set_data(self): x_data = np.random.uniform(0.1, 1, [1, 2, 50]).astype('float64') x_lod = [[1]] @@ -90,7 +86,6 @@ class TestSequenceExpandAsCase3(TestSequenceExpandAs): class TestSequenceExpandAsOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # the input x must be Variable diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_first_step.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_first_step.py index 25589f2ad9b3e30a91aa9634cccf49032b9a2948..a7771634980c220399a04c8d77b3f0b37af3f42f 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_first_step.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_first_step.py @@ -21,7 +21,6 @@ sys.path.append("../") class TestSequenceFirstStepOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): @@ -34,11 +33,13 @@ class TestSequenceFirstStepOpError(unittest.TestCase): def test_input_dtype(): # the dtype of input must be int64 - type_data = fluid.layers.data(name='type_data', - shape=[7, 1], - append_batch_size=False, - dtype='int64', - lod_level=1) + type_data = fluid.layers.data( + name='type_data', + shape=[7, 1], + append_batch_size=False, + dtype='int64', + lod_level=1, + ) fluid.layers.sequence_last_step(type_data) self.assertRaises(TypeError, test_input_dtype) diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_last_step.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_last_step.py index 0ddc348044d63da24f47bbbefa626c99a337fe71..ca5ff76a8e47e007234643690026d2aad259726e 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_last_step.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_last_step.py @@ -21,7 +21,6 @@ sys.path.append("../") class TestSequenceLastStepOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): @@ -34,11 +33,13 @@ class TestSequenceLastStepOpError(unittest.TestCase): def test_input_dtype(): # the dtype of input must be int64 - type_data = fluid.layers.data(name='type_data', - shape=[7, 1], - append_batch_size=False, - dtype='int64', - lod_level=1) + type_data = fluid.layers.data( + name='type_data', + shape=[7, 1], + append_batch_size=False, + dtype='int64', + lod_level=1, + ) fluid.layers.sequence_last_step(type_data) self.assertRaises(TypeError, test_input_dtype) diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_mask.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_mask.py index f240bc9900480b3e60791d263c4ee7e14ff1cc59..24210c318c2840e4536202b19c6068305aa987b8 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_mask.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_mask.py @@ -13,7 +13,11 @@ # limitations under the License. import paddle.fluid as fluid -from paddle.fluid.framework import convert_np_dtype_to_dtype_, Program, program_guard +from paddle.fluid.framework import ( + convert_np_dtype_to_dtype_, + Program, + program_guard, +) import numpy as np import unittest import sys @@ -23,7 +27,6 @@ from op_test import OpTest class SequenceMaskTestBase(OpTest): - def initDefaultParameters(self): self.op_type = 'sequence_mask' self.maxlen = 10 @@ -43,19 +46,19 @@ class SequenceMaskTestBase(OpTest): self.outputs = {'Y': self.calc_ground_truth_mask()} self.attrs = { 'maxlen': self.maxlen, - 'out_dtype': convert_np_dtype_to_dtype_(self.mask_dtype) + 'out_dtype': convert_np_dtype_to_dtype_(self.mask_dtype), } def calc_ground_truth_mask(self): maxlen = np.max(self.x) if self.maxlen < 0 else self.maxlen - shape = self.x.shape + (maxlen, ) - index_broadcast = np.broadcast_to(np.reshape( - range(maxlen), newshape=[1] * self.x.ndim + [-1]), - shape=shape) - x_broadcast = np.broadcast_to(np.reshape(self.x, - newshape=self.x.shape + - (-1, )), - shape=shape) + shape = self.x.shape + (maxlen,) + index_broadcast = np.broadcast_to( + np.reshape(range(maxlen), newshape=[1] * self.x.ndim + [-1]), + shape=shape, + ) + x_broadcast = np.broadcast_to( + np.reshape(self.x, newshape=self.x.shape + (-1,)), shape=shape + ) return (index_broadcast < x_broadcast).astype(self.mask_dtype) def test_check_output(self): @@ -63,43 +66,36 @@ class SequenceMaskTestBase(OpTest): class SequenceMaskTest1(SequenceMaskTestBase): - def initParameters(self): self.mask_dtype = 'bool' class SequenceMaskTest2(SequenceMaskTestBase): - def initParameters(self): self.mask_dtype = 'uint8' class SequenceMaskTest3(SequenceMaskTestBase): - def initParameters(self): self.mask_dtype = 'int32' class SequenceMaskTest4(SequenceMaskTestBase): - def initParameters(self): self.mask_dtype = 'float32' class SequenceMaskTest5(SequenceMaskTestBase): - def initParameters(self): self.mask_dtype = 'float64' class SequenceMaskTest6(SequenceMaskTestBase): - def initParameters(self): self.maxlen = -1 class SequenceMaskTestBase_tensor_attr(OpTest): - def initDefaultParameters(self): self.op_type = 'sequence_mask' self.maxlen = 10 @@ -122,14 +118,14 @@ class SequenceMaskTestBase_tensor_attr(OpTest): def calc_ground_truth_mask(self): maxlen = np.max(self.x) if self.maxlen < 0 else self.maxlen - shape = self.x.shape + (maxlen, ) - index_broadcast = np.broadcast_to(np.reshape( - range(maxlen), newshape=[1] * self.x.ndim + [-1]), - shape=shape) - x_broadcast = np.broadcast_to(np.reshape(self.x, - newshape=self.x.shape + - (-1, )), - shape=shape) + shape = self.x.shape + (maxlen,) + index_broadcast = np.broadcast_to( + np.reshape(range(maxlen), newshape=[1] * self.x.ndim + [-1]), + shape=shape, + ) + x_broadcast = np.broadcast_to( + np.reshape(self.x, newshape=self.x.shape + (-1,)), shape=shape + ) return (index_broadcast < x_broadcast).astype(self.mask_dtype) def test_check_output(self): @@ -137,37 +133,31 @@ class SequenceMaskTestBase_tensor_attr(OpTest): class SequenceMaskTest1_tensor_attr(SequenceMaskTestBase_tensor_attr): - def initParameters(self): self.mask_dtype = 'bool' class SequenceMaskTest2_tensor_attr(SequenceMaskTestBase_tensor_attr): - def initParameters(self): self.mask_dtype = 'uint8' class SequenceMaskTest3_tensor_attr(SequenceMaskTestBase_tensor_attr): - def initParameters(self): self.mask_dtype = 'int32' class SequenceMaskTest4_tensor_attr(SequenceMaskTestBase_tensor_attr): - def initParameters(self): self.mask_dtype = 'float32' class SequenceMaskTest5_tensor_attr(SequenceMaskTestBase_tensor_attr): - def initParameters(self): self.mask_dtype = 'float64' class TestSequenceMaskOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): input_data = np.random.uniform(1, 5, [4]).astype("float32") diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_pad_op.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_pad_op.py index 934e0ebe8fd782a0cc012c43cacfa000c6577ed3..d04091d9332b9f09157961e7829b1c2e10ae5db6 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_pad_op.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_pad_op.py @@ -24,7 +24,6 @@ import paddle.fluid.core as core class TestSequencePadOp(OpTest): - def set_attr(self): self.x_shape = [12, 10] self.x_len_lod = [[2, 3, 4, 3]] @@ -37,7 +36,7 @@ class TestSequencePadOp(OpTest): pad_value_data = np.array(self.pad_value).astype(self.dtype) self.inputs = { 'X': (x_data, self.x_len_lod), - 'PadValue': pad_value_data + 'PadValue': pad_value_data, } self.attrs = {'padded_length': self.padded_length} @@ -54,9 +53,10 @@ class TestSequencePadOp(OpTest): # do padding x_data = self.inputs['X'][0] pad_value_data = self.inputs['PadValue'] - if pad_value_data.shape == (1, ): - pad_value_data = np.broadcast_to(pad_value_data, - shape=x_data.shape[1:]) + if pad_value_data.shape == (1,): + pad_value_data = np.broadcast_to( + pad_value_data, shape=x_data.shape[1:] + ) padded_sequences = [] start_idx = 0 for l in x_len_lod_0: @@ -86,7 +86,6 @@ class TestSequencePadOp(OpTest): class TestSequencePadOp2(TestSequencePadOp): - def set_attr(self): self.x_shape = [12, 10] self.x_len_lod = [[2, 3, 4, 3]] @@ -96,7 +95,6 @@ class TestSequencePadOp2(TestSequencePadOp): class TestSequencePadOp3(TestSequencePadOp): - def set_attr(self): self.x_shape = [12, 10] self.x_len_lod = [[2, 3, 4, 3]] @@ -106,7 +104,6 @@ class TestSequencePadOp3(TestSequencePadOp): class TestSequencePadOp4(TestSequencePadOp): - def set_attr(self): self.x_shape = [12, 10] self.x_len_lod = [[2, 3, 4, 3]] @@ -116,7 +113,6 @@ class TestSequencePadOp4(TestSequencePadOp): class TestSequencePadOp5(TestSequencePadOp): - def set_attr(self): self.x_shape = [12, 2, 5] self.x_len_lod = [[2, 3, 4, 3]] @@ -126,7 +122,6 @@ class TestSequencePadOp5(TestSequencePadOp): class TestSequencePadOp6(TestSequencePadOp): - def set_attr(self): self.x_shape = [12, 2, 5] self.x_len_lod = [[2, 3, 4, 3]] @@ -136,7 +131,6 @@ class TestSequencePadOp6(TestSequencePadOp): class TestSequencePadOp7(TestSequencePadOp): - def set_attr(self): self.x_shape = [12, 2, 5] self.x_len_lod = [[2, 3, 4, 3]] @@ -146,7 +140,6 @@ class TestSequencePadOp7(TestSequencePadOp): class TestSequencePadOp8(TestSequencePadOp): - def set_attr(self): self.x_shape = [12, 2, 5] self.x_len_lod = [[0, 8, 0, 4, 0]] @@ -156,35 +149,33 @@ class TestSequencePadOp8(TestSequencePadOp): class TestSequencePadOpError(unittest.TestCase): - def test_error(self): - def test_x_variable(): # the input x type must be Variable x = np.random.random((2, 4)).astype("float32") pad_value = fluid.layers.assign( - input=np.array([0.0], dtype=np.float32)) + input=np.array([0.0], dtype=np.float32) + ) fluid.layers.sequence_pad(x=x, pad_value=pad_value) self.assertRaises(TypeError, test_x_variable) def test_pad_value_variable(): - x1 = fluid.layers.data(name='x1', - shape=[10, 5], - dtype='float32', - lod_level=1) + x1 = fluid.layers.data( + name='x1', shape=[10, 5], dtype='float32', lod_level=1 + ) pad_value1 = np.array([0.0], dtype=np.float32) fluid.layers.sequence_pad(x=x1, pad_value=pad_value1) self.assertRaises(TypeError, test_pad_value_variable) def test_dtype(): - x2 = fluid.layers.data(name='x2', - shape=[10, 5], - dtype='int16', - lod_level=1) + x2 = fluid.layers.data( + name='x2', shape=[10, 5], dtype='int16', lod_level=1 + ) pad_value2 = fluid.layers.assign( - input=np.array([0.0], dtype=np.int32)) + input=np.array([0.0], dtype=np.int32) + ) fluid.layers.sequence_pad(x=x2, pad_value=pad_value2) self.assertRaises(TypeError, test_dtype) diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_pool.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_pool.py index ee4f4fa086034612abbb22dabf2f2e3ddbf85e1b..b4af6f239df1c2fb87049610f24166a398f5d67c 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_pool.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_pool.py @@ -27,7 +27,7 @@ def compute_seqpool_sum(x, offset, out, pad_value=0.0): if offset[level][i] == offset[level][i + 1]: out[i] = pad_value else: - sub_x = x[offset[level][i]:offset[level][i + 1], :] + sub_x = x[offset[level][i] : offset[level][i + 1], :] out[i] = sub_x.sum(axis=0) @@ -37,7 +37,7 @@ def compute_seqpool_avg(x, offset, out, pad_value=0.0): if offset[level][i] == offset[level][i + 1]: out[i] = pad_value else: - sub_x = x[offset[level][i]:offset[level][i + 1], :] + sub_x = x[offset[level][i] : offset[level][i + 1], :] out[i] = sub_x.mean(axis=0) @@ -47,13 +47,12 @@ def compute_seqpool_sqrt(x, offset, out, pad_value=0.0): if offset[level][i] == offset[level][i + 1]: out[i] = pad_value else: - sub_x = x[offset[level][i]:offset[level][i + 1], :] + sub_x = x[offset[level][i] : offset[level][i + 1], :] seq_len = offset[level][i + 1] - offset[level][i] out[i] = sub_x.sum(axis=0) / np.sqrt(seq_len) class TestSeqAvgPool(OpTest): - def set_lod(self): return [[11]] @@ -88,69 +87,63 @@ class TestSeqAvgPool(OpTest): def test_check_grad(self): # Remove MaxIndex after check_grad is refined. out = self.outputs['Out'] - if isinstance(out, tuple): out = out[0] - self.outputs['MaxIndex'] = \ - np.zeros(out.shape).astype('int32') + if isinstance(out, tuple): + out = out[0] + self.outputs['MaxIndex'] = np.zeros(out.shape).astype('int32') self.check_grad(["X"], "Out", check_dygraph=False) class TestSeqAvgPoolBatch1(TestSeqAvgPool): - def set_lod(self): return [[11]] def set_lod_data(self): lod = self.set_lod() - x, _ = self.get_sequence_batch_size_1_input(lod=lod, - shape=[lod[0][0], 23]) + x, _ = self.get_sequence_batch_size_1_input( + lod=lod, shape=[lod[0][0], 23] + ) return x class TestSeqAvgPoolInstance0(TestSeqAvgPool): - def set_lod(self): return [[0, 0, 4, 0, 3, 0, 0, 5, 0, 0]] def set_lod_data(self): lod = self.set_lod() - x, _ = self.get_sequence_instance_size_0_input(lod=lod, - shape=[sum(lod[0]), 10]) + x, _ = self.get_sequence_instance_size_0_input( + lod=lod, shape=[sum(lod[0]), 10] + ) return x class TestSeqAvgPoolLen0(TestSeqAvgPool): - def set_lod(self): return [[0, 4, 0, 7, 0]] class TestSeqAvgPoolLen0LoDLevel2(TestSeqAvgPool): - def set_lod(self): return [[2, 0, 1, 2], [0, 4, 0, 7, 0]] class TestSeqSumPool(TestSeqAvgPool): - def compute(self, x, offset, out): self.attrs = {"pad_value": 0.1, 'pooltype': "SUM"} compute_seqpool_sum(x, offset, out, self.attrs["pad_value"]) class TestSeqSumPoolLen0(TestSeqSumPool): - def set_lod(self): return [[0, 4, 0, 7, 0]] class TestSeqSumPoolLen0LoDLevel2(TestSeqSumPool): - def set_lod(self): return [[2, 0, 1, 2], [0, 4, 0, 7, 0]] class TestSeqMaxPool(TestSeqAvgPool): - def set_lod(self): return [[13]] @@ -178,43 +171,37 @@ class TestSeqMaxPool(TestSeqAvgPool): if offset[level][i] == offset[level][i + 1]: out[i] = self.attrs["pad_value"] else: - sub_x = x[offset[level][i]:offset[level][i + 1], :] + sub_x = x[offset[level][i] : offset[level][i + 1], :] out[i] = np.amax(sub_x, axis=0) class TestSeqMaxPoolLen0(TestSeqMaxPool): - def set_lod(self): return [[0, 1, 1, 5, 6, 0]] class TestSeqMaxPoolLen0LoDLevel2(TestSeqMaxPool): - def set_lod(self): return [[2, 0, 3, 1], [0, 1, 1, 5, 6, 0]] class TestSeqSqrtPool(TestSeqAvgPool): - def compute(self, x, offset, out): self.attrs = {"pad_value": 0.0, 'pooltype': "SQRT"} compute_seqpool_sqrt(x, offset, out, self.attrs["pad_value"]) class TestSeqSqrtPoolLen0(TestSeqSqrtPool): - def set_lod(self): return [[0, 7, 0, 2, 2, 0]] class TestSeqSqrtPoolLen0LoDLevel2(TestSeqSqrtPool): - def set_lod(self): return [[1, 2, 0, 3], [0, 7, 0, 2, 2, 0]] class TestSeqLastPool(TestSeqAvgPool): - def compute(self, x, offset, out): self.attrs = {"pad_value": 0.0, 'pooltype': "LAST"} level = len(offset) - 1 @@ -222,24 +209,21 @@ class TestSeqLastPool(TestSeqAvgPool): if offset[level][i] == offset[level][i + 1]: out[i] = self.attrs["pad_value"] else: - sub_x = x[offset[level][i]:offset[level][i + 1], :] + sub_x = x[offset[level][i] : offset[level][i + 1], :] out[i] = sub_x[-1, :] class TestSeqLastPoolLen0(TestSeqLastPool): - def set_lod(self): return [[0, 3, 4, 0, 4, 0]] class TestSeqLastPoolLen0LoDLevel2(TestSeqLastPool): - def set_lod(self): return [[1, 0, 2, 3], [0, 3, 4, 0, 4, 0]] class TestSeqFirstPool(TestSeqAvgPool): - def compute(self, x, offset, out): self.attrs = {"pad_value": 0.3, 'pooltype': "FIRST"} level = len(offset) - 1 @@ -247,24 +231,21 @@ class TestSeqFirstPool(TestSeqAvgPool): if offset[level][i] == offset[level][i + 1]: out[i] = self.attrs["pad_value"] else: - sub_x = x[offset[level][i]:offset[level][i + 1], :] + sub_x = x[offset[level][i] : offset[level][i + 1], :] out[i] = sub_x[0, :] class TestSeqFirstPoolLen0(TestSeqFirstPool): - def set_lod(self): return [[0, 2, 0, 3, 6, 0]] class TestSeqFirstPoolLen0LoDLevel2(TestSeqFirstPool): - def set_lod(self): return [[1, 0, 2, 3], [0, 2, 0, 3, 6, 0]] class TestSeqAvgPool2D(TestSeqAvgPool): - def set_lod(self): return [[4, 1, 3, 5]] @@ -287,25 +268,23 @@ class TestSeqAvgPool2D(TestSeqAvgPool): if offset[level][i] == offset[level][i + 1]: out[i] = self.attrs["pad_value"] * np.ones((3, 17)) else: - sub_x = np.reshape(x[offset[level][i]:offset[level][i + 1], :], - (-1, 3 * 17)) + sub_x = np.reshape( + x[offset[level][i] : offset[level][i + 1], :], (-1, 3 * 17) + ) out[i] = np.reshape(sub_x.mean(axis=0), (3, 17)) class TestSeqAvgPool2DLen0(TestSeqAvgPool2D): - def set_lod(self): return [[0, 5, 0, 8, 0]] class TestSeqAvgPool2DLen0LoDLevel2(TestSeqAvgPool2D): - def set_lod(self): return [[1, 0, 4], [0, 5, 0, 8, 0]] class TestSeqSumPool2D(TestSeqAvgPool2D): - def compute(self, x, offset, out): self.attrs = {"pad_value": 0.2, 'pooltype': "SUM"} level = len(offset) - 1 @@ -313,25 +292,23 @@ class TestSeqSumPool2D(TestSeqAvgPool2D): if offset[level][i] == offset[level][i + 1]: out[i] = self.attrs["pad_value"] * np.ones((3, 17)) else: - sub_x = np.reshape(x[offset[level][i]:offset[level][i + 1], :], - (-1, 3 * 17)) + sub_x = np.reshape( + x[offset[level][i] : offset[level][i + 1], :], (-1, 3 * 17) + ) out[i] = np.reshape(sub_x.sum(axis=0), (3, 17)) class TestSeqSumPool2DLen0(TestSeqSumPool2D): - def set_lod(self): return [[0, 8, 0, 5, 0]] class TestSeqSumPool2DLen0LoDLevel2(TestSeqSumPool2D): - def set_lod(self): return [[1, 0, 4], [0, 8, 0, 5, 0]] class TestSeqSqrtPool2D(TestSeqAvgPool2D): - def compute(self, x, offset, out): self.attrs = {"pad_value": 0.0, 'pooltype': "SQRT"} level = len(offset) - 1 @@ -339,39 +316,36 @@ class TestSeqSqrtPool2D(TestSeqAvgPool2D): if offset[level][i] == offset[level][i + 1]: out[i] = self.attrs["pad_value"] * np.ones((3, 17)) else: - sub_x = np.reshape(x[offset[level][i]:offset[level][i + 1], :], - (-1, 3 * 17)) + sub_x = np.reshape( + x[offset[level][i] : offset[level][i + 1], :], (-1, 3 * 17) + ) seq_len = offset[level][i + 1] - offset[level][i] out[i] = np.reshape( - sub_x.sum(axis=0) / np.sqrt(seq_len), (3, 17)) + sub_x.sum(axis=0) / np.sqrt(seq_len), (3, 17) + ) def test_check_grad(self): # Remove MaxIndex after check_grad is refined. out = self.outputs['Out'] if isinstance(out, tuple): out = out[0] - self.outputs['MaxIndex'] = \ - np.zeros(out.shape).astype('int32') - self.check_grad(["X"], - "Out", - max_relative_error=0.06, - check_dygraph=False) + self.outputs['MaxIndex'] = np.zeros(out.shape).astype('int32') + self.check_grad( + ["X"], "Out", max_relative_error=0.06, check_dygraph=False + ) class TestSeqSqrtPool2DLen0(TestSeqSqrtPool2D): - def set_lod(self): return [[0, 8, 0, 5, 0]] class TestSeqSqrtPool2DLen0LoDLevel2(TestSeqSqrtPool2D): - def set_lod(self): return [[1, 0, 2, 2], [0, 8, 0, 5, 0]] class TestSeqMaxPool2D(TestSeqAvgPool2D): - def set_lod(self): return [[4, 1, 3, 5]] @@ -399,27 +373,27 @@ class TestSeqMaxPool2D(TestSeqAvgPool2D): if offset[level][i] == offset[level][i + 1]: out[i] = self.attrs["pad_value"] * np.ones((3, 11)) continue - sub_x = np.reshape(x[offset[level][i]:offset[level][i + 1], :], - (-1, 3 * 11)) + sub_x = np.reshape( + x[offset[level][i] : offset[level][i + 1], :], (-1, 3 * 11) + ) out[i] = np.reshape(np.amax(sub_x, axis=0), (3, 11)) class TestSeqMaxPool2DLen0(TestSeqMaxPool2D): - def set_lod(self): return [[0, 3, 0, 10, 0]] class TestSeqMaxPool2DLen0LoDLevel2(TestSeqMaxPool2D): - def set_lod(self): return [[1, 0, 2, 2], [0, 3, 0, 10, 0]] -@skip_check_grad_ci(reason="Grad computation does not apply to Sequence MAX " - "Pool executed when is_test is true.") +@skip_check_grad_ci( + reason="Grad computation does not apply to Sequence MAX " + "Pool executed when is_test is true." +) class TestSeqMaxPool2DInference(TestSeqMaxPool2D): - def compute(self, x, offset, out): self.attrs = {"pad_value": 1.0, 'pooltype': "MAX", 'is_test': True} level = len(offset) - 1 @@ -427,30 +401,28 @@ class TestSeqMaxPool2DInference(TestSeqMaxPool2D): if offset[level][i] == offset[level][i + 1]: out[i] = self.attrs["pad_value"] * np.ones((3, 11)) else: - sub_x = np.reshape(x[offset[level][i]:offset[level][i + 1], :], - (-1, 3 * 11)) + sub_x = np.reshape( + x[offset[level][i] : offset[level][i + 1], :], (-1, 3 * 11) + ) out[i] = np.reshape(np.amax(sub_x, axis=0), (3, 11)) def test_check_grad(self): """Grad computation does not apply to Sequence MAX - Pool executed when is_test is true """ + Pool executed when is_test is true""" return class TestSeqMaxPool2DInferenceLen0(TestSeqMaxPool2DInference): - def set_lod(self): return [[0, 3, 0, 10, 0]] class TestSeqMaxPool2DInferenceLen0LoDLevel2(TestSeqMaxPool2DInference): - def set_lod(self): return [[1, 0, 2, 2], [0, 3, 0, 10, 0]] class TestSeqLastPool2D(TestSeqAvgPool2D): - def compute(self, x, offset, out): self.attrs = {"pad_value": 0.0, 'pooltype': "LAST"} level = len(offset) - 1 @@ -458,25 +430,23 @@ class TestSeqLastPool2D(TestSeqAvgPool2D): if offset[level][i] == offset[level][i + 1]: out[i] = self.attrs["pad_value"] * np.ones((3, 17)) else: - sub_x = np.reshape(x[offset[level][i]:offset[level][i + 1], :], - (-1, 3 * 17)) + sub_x = np.reshape( + x[offset[level][i] : offset[level][i + 1], :], (-1, 3 * 17) + ) out[i] = np.reshape(sub_x[-1, :], (3, 17)) class TestSeqLastPool2DLen0(TestSeqLastPool2D): - def set_lod(self): return [[0, 3, 0, 1, 9, 0]] class TestSeqLastPool2DLen0LoDLevel2(TestSeqLastPool2D): - def set_lod(self): return [[1, 0, 2, 3], [0, 3, 0, 1, 9, 0]] class TestSeqFirstPool2D(TestSeqAvgPool2D): - def compute(self, x, offset, out): self.attrs = {"pad_value": 0.0, 'pooltype': "FIRST"} level = len(offset) - 1 @@ -484,19 +454,18 @@ class TestSeqFirstPool2D(TestSeqAvgPool2D): if offset[level][i] == offset[level][i + 1]: out[i] = self.attrs["pad_value"] * np.ones((3, 17)) else: - sub_x = np.reshape(x[offset[level][i]:offset[level][i + 1], :], - (-1, 3 * 17)) + sub_x = np.reshape( + x[offset[level][i] : offset[level][i + 1], :], (-1, 3 * 17) + ) out[i] = np.reshape(sub_x[0, :], (3, 17)) class TestSeqFirstPool2DLen0(TestSeqFirstPool2D): - def set_lod(self): return [[0, 3, 0, 3, 7, 0]] class TestSeqFirstPool2DLen0LoDLevel2(TestSeqFirstPool2D): - def set_lod(self): return [[1, 0, 2, 3], [0, 3, 0, 3, 7, 0]] diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_reshape.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_reshape.py index 5577ad052c04de5922ad626c60e2397a6675a97e..ea1b5bbf916b02b658d2229c31300be89d781817 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_reshape.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_reshape.py @@ -23,7 +23,6 @@ import paddle.fluid as fluid class TestSequenceReshape(OpTest): - def init_data(self): self.dimension = 12 self.x_lod = [[4, 1, 3, 3]] @@ -57,7 +56,6 @@ class TestSequenceReshape(OpTest): class TestSequenceReshape_reduce(TestSequenceReshape): - def init_data(self): self.dimension = 24 self.x_lod = [[4, 2, 2, 4]] @@ -65,7 +63,6 @@ class TestSequenceReshape_reduce(TestSequenceReshape): class TestSequenceReshape_same(TestSequenceReshape): - def init_data(self): self.dimension = 12 self.x_lod = [[4, 2, 2, 4]] @@ -73,7 +70,6 @@ class TestSequenceReshape_same(TestSequenceReshape): class TestSequenceReshape_reduce_seq_len0(TestSequenceReshape): - def init_data(self): self.dimension = 24 self.x_lod = [[0, 6, 0, 2, 4]] @@ -81,7 +77,6 @@ class TestSequenceReshape_reduce_seq_len0(TestSequenceReshape): class TestSequenceReshape_reduce_seq_len0_case1(TestSequenceReshape): - def init_data(self): self.dimension = 24 self.x_lod = [[0, 2, 8, 2, 0]] @@ -89,9 +84,7 @@ class TestSequenceReshape_reduce_seq_len0_case1(TestSequenceReshape): class TestSequenceReshapeOpError(unittest.TestCase): - def test_error(self): - def test_variable(): x = np.random.random((2, 4)).astype("float32") fluid.layers.sequence_reshape(x=x, new_dim=4) @@ -99,11 +92,13 @@ class TestSequenceReshapeOpError(unittest.TestCase): self.assertRaises(TypeError, test_variable) def test_dtype(): - x1 = fluid.layers.data(name='x1', - shape=[2, 6], - append_batch_size=False, - dtype='float16', - lod_level=1) + x1 = fluid.layers.data( + name='x1', + shape=[2, 6], + append_batch_size=False, + dtype='float16', + lod_level=1, + ) fluid.layers.sequence_reshape(x=x1, new_dim=4) self.assertRaises(TypeError, test_dtype) diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_reverse.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_reverse.py index e4612b064eb32b7d06ba59a4532ffcaa21da4f05..80b03dd01b4568306ae0076189968c725d017a35 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_reverse.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_reverse.py @@ -22,7 +22,6 @@ from op_test import OpTest class TestSequenceReverseBase(OpTest): - def initParameters(self): pass @@ -36,14 +35,20 @@ class TestSequenceReverseBase(OpTest): self.y = self.get_output() self.inputs = { - 'X': (self.x, [ - self.lod, - ]), + 'X': ( + self.x, + [ + self.lod, + ], + ), } self.outputs = { - 'Y': (self.y, [ - self.lod, - ]), + 'Y': ( + self.y, + [ + self.lod, + ], + ), } def get_output(self): @@ -65,37 +70,31 @@ class TestSequenceReverseBase(OpTest): class TestSequenceReserve1(TestSequenceReverseBase): - def initParameters(self): self.size = (12, 10) self.lod = [4, 5, 3] class TestSequenceReverse2(TestSequenceReverseBase): - def initParameters(self): self.size = (12, 10) self.lod = [12] class TestSequenceReverse3(TestSequenceReverseBase): - def initParameters(self): self.size = (12, 10) self.lod = [3, 0, 6, 3] class TestSequenceReverse4(TestSequenceReverseBase): - def initParameters(self): self.size = (12, 10) self.lod = [0, 2, 10, 0] class TestSequenceReverseOpError(unittest.TestCase): - def test_error(self): - def test_variable(): # the input type must be Variable x_data = np.random.random((2, 4)).astype("float32") diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_scatter_op.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_scatter_op.py index 2b8e8261829090a743f55b7fed1b1be1332d54bc..ad4025a349d20e6d52a004a3254b9be0569118a0 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_scatter_op.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_scatter_op.py @@ -21,7 +21,6 @@ from op_test import OpTest class TestSequenceScatterOp(OpTest): - def init_lod(self): return [[30, 50, 40]] @@ -45,7 +44,7 @@ class TestSequenceScatterOp(OpTest): self.inputs = { 'X': X_data, 'Ids': (Ids_data, Ids_lod), - 'Updates': (Updates_data, Updates_lod) + 'Updates': (Updates_data, Updates_lod), } self.outputs = {'Out': Out_data} @@ -57,37 +56,31 @@ class TestSequenceScatterOp(OpTest): class TestSequenceScatterOpSeqLen0(TestSequenceScatterOp): - def init_lod(self): return [[60, 60, 00]] class TestSequenceScatterOpSeqLen0Case1(TestSequenceScatterOp): - def init_lod(self): return [[0, 60, 60]] class TestSequenceScatterOpSeqLen0Case2(TestSequenceScatterOp): - def init_lod(self): return [[60, 0, 60]] class TestSequenceScatterOpSeqLen0Case3(TestSequenceScatterOp): - def init_lod(self): return [[120, 0, 0]] class TestSequenceScatterOpSeqLen0Case4(TestSequenceScatterOp): - def init_lod(self): return [[0, 120, 0]] class TestSequenceScatterOpSeqLen0Case5(TestSequenceScatterOp): - def init_lod(self): return [[0, 0, 120]] diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_slice_op.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_slice_op.py index 052e0fad0625917404139f13c32414f7e23f4746..c3b1e291de866097e7f05758972d69d67eb9e391 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_slice_op.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_slice_op.py @@ -21,7 +21,6 @@ from op_test import OpTest class TestSequenceSliceOp(OpTest): - def set_data(self): self.init_test_case() # only supprot one level LoD @@ -31,12 +30,17 @@ class TestSequenceSliceOp(OpTest): length = np.array(self.length).astype("int64") self.inputs = {'X': (x, lod), 'Offset': offset, 'Length': length} - outs = [] #np.zeros((100, 3, 2)).astype('float32') + outs = [] # np.zeros((100, 3, 2)).astype('float32') out_lod = [[]] lod_offset = 0 for i in range(len(offset)): - sub_x = x[lod_offset + offset[i, 0]:lod_offset + offset[i, 0] + - length[i, 0], :] + sub_x = x[ + lod_offset + + offset[i, 0] : lod_offset + + offset[i, 0] + + length[i, 0], + :, + ] outs.append(sub_x) out_lod[0].append(len(sub_x)) lod_offset += lod[0][i] @@ -61,7 +65,6 @@ class TestSequenceSliceOp(OpTest): class TestSequenceSliceOpSeqlen0Case0(TestSequenceSliceOp): - def init_test_case(self): self.x_dim = (100, 3, 2) self.x_lod = [[20, 30, 0, 30, 20]] @@ -70,7 +73,6 @@ class TestSequenceSliceOpSeqlen0Case0(TestSequenceSliceOp): class TestSequenceSliceOpSeqlen0Case1(TestSequenceSliceOp): - def init_test_case(self): self.x_dim = (100, 3, 2) self.x_lod = [[0, 70, 0, 30, 0]] @@ -79,7 +81,6 @@ class TestSequenceSliceOpSeqlen0Case1(TestSequenceSliceOp): class TestSequenceSliceOpSeqlen0Case2(TestSequenceSliceOp): - def init_test_case(self): self.x_dim = (100, 3, 2) self.x_lod = [[0, 100, 0, 0, 0]] diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_softmax_op.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_softmax_op.py index 1918dc37a0be214f0ef28e8b4b7936f158873589..a08cde6af4dd19f1a10d9b8307e5067a1f9c6b17 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_softmax_op.py @@ -23,7 +23,6 @@ from test_softmax_op import stable_softmax class TestSequenceSoftmaxOp(OpTest): - def setUp(self): self.op_type = "sequence_softmax" self.use_cudnn = False @@ -34,13 +33,14 @@ class TestSequenceSoftmaxOp(OpTest): out = np.zeros((110, 1)).astype(self.dtype) offset = 0 for i in range(len(self.lod[0])): - if (self.lod[0][i] == 0): + if self.lod[0][i] == 0: continue - sub_x = x[offset:offset + self.lod[0][i], :] + sub_x = x[offset : offset + self.lod[0][i], :] sub_x = sub_x.reshape(1, self.lod[0][i]) sub_out = stable_softmax(sub_x) - out[offset:offset + self.lod[0][i], :] = sub_out.reshape( - self.lod[0][i], 1) + out[offset : offset + self.lod[0][i], :] = sub_out.reshape( + self.lod[0][i], 1 + ) offset += self.lod[0][i] self.inputs = {"X": (x, self.lod)} @@ -71,28 +71,25 @@ class TestSequenceSoftmaxOp(OpTest): # ----------------cudnn Sequencesoftmax---------------- -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestSequenceSoftmaxCUDNNOp(TestSequenceSoftmaxOp): - def init_op_type(self): self.use_cudnn = True class TestSequenceSoftmaxOpSeqLen0Case0(TestSequenceSoftmaxOp): - def init_lod(self): self.lod = [[40, 0, 40, 30]] class TestSequenceSoftmaxOpSeqLen0Case1(TestSequenceSoftmaxOp): - def init_lod(self): self.lod = [[0, 40, 70, 0]] class TestSequenceSoftmaxOpSeqLen0Case2(TestSequenceSoftmaxOp): - def init_lod(self): self.lod = [[0, 0, 0, 110]] diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_topk_avg_pooling.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_topk_avg_pooling.py index 415e9688cfbd1431938d0541c288c8ca43fa39d4..651ce9213da3097f00c52497eb899d7a4296e1e8 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_topk_avg_pooling.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_topk_avg_pooling.py @@ -22,7 +22,6 @@ from op_test import OpTest class TestSequenceTopkAvgPoolingOp(OpTest): - def setUp(self): self.init_op_type() self.set_data() @@ -51,7 +50,7 @@ class TestSequenceTopkAvgPoolingOp(OpTest): self.inputs = { 'X': (x_data, x_lod), 'ROW': (row_data, [row]), - 'COLUMN': (col_data, [col]) + 'COLUMN': (col_data, [col]), } def calc_gradient(self, pos_data, topks, channel_num, row, col): @@ -60,7 +59,7 @@ class TestSequenceTopkAvgPoolingOp(OpTest): in_numel = sum([row[i] * col[i] for i in range(len(row))]) * channel_num out_numel = sum(row) * len(topks) * channel_num gradient = np.zeros(shape=(in_numel), dtype="float32") - dout_val = 1. / out_numel + dout_val = 1.0 / out_numel pos_offset, in_offset = 0, 0 for bs_idx in range(len(row)): # batch row_size = row[bs_idx] @@ -72,9 +71,9 @@ class TestSequenceTopkAvgPoolingOp(OpTest): for k_idx in range(len(topks)): for k in range(topks[k_idx]): if pos_data[pos_idx + k] != -1: - gradient[in_idx + - pos_data[pos_idx + - k]] += dout_val / topks[k_idx] + gradient[in_idx + pos_data[pos_idx + k]] += ( + dout_val / topks[k_idx] + ) in_offset += row_size * col_size pos_offset += row_size * max_k return gradient @@ -87,7 +86,7 @@ class TestSequenceTopkAvgPoolingOp(OpTest): col_data, col_lod = self.inputs['COLUMN'] channel_num = self.attrs['channel_num'] out = np.zeros((0, len(topks) * channel_num), dtype=x_data.dtype) - pos = np.zeros((0, ), dtype='int32') + pos = np.zeros((0,), dtype='int32') out_lod = deepcopy(row_lod) offset = 0 @@ -95,45 +94,51 @@ class TestSequenceTopkAvgPoolingOp(OpTest): x_len = x_lod[0][idx] self.assertTrue( x_len == channel_num * row_lod[0][idx] * col_lod[0][idx], - "x_len: %s can't mod channel_num: %s" % (x_len, channel_num)) - out_tmp = np.zeros((0, ), dtype=x_data.dtype) - pos_tmp = np.zeros((0, ), dtype='int32') + "x_len: %s can't mod channel_num: %s" % (x_len, channel_num), + ) + out_tmp = np.zeros((0,), dtype=x_data.dtype) + pos_tmp = np.zeros((0,), dtype='int32') for ch in range(channel_num): for r_id in range(row_lod[0][idx]): - x_sub = x_data[offset:(offset + col_lod[0][idx])] + x_sub = x_data[offset : (offset + col_lod[0][idx])] topk_val, topk_pos = self.get_topk(x_sub, max_k) sum_data = self.topk_sum(topk_val, topk_pos, max_k) new_feature = np.array( - [sum_data[topk] / topk for topk in topks]) + [sum_data[topk] / topk for topk in topks] + ) out_tmp = np.hstack((out_tmp, new_feature)) pos_tmp = np.hstack((pos_tmp, topk_pos)) offset += col_lod[0][idx] - out_tmp = out_tmp.reshape([channel_num, -1, - len(topks)]).transpose(1, 0, 2) - pos_tmp = pos_tmp.reshape([channel_num, -1, - max_k]).transpose(1, 0, 2) + out_tmp = out_tmp.reshape([channel_num, -1, len(topks)]).transpose( + 1, 0, 2 + ) + pos_tmp = pos_tmp.reshape([channel_num, -1, max_k]).transpose( + 1, 0, 2 + ) out = np.vstack( - (out, out_tmp.reshape([-1, len(topks) * channel_num]))) + (out, out_tmp.reshape([-1, len(topks) * channel_num])) + ) pos = np.hstack((pos, pos_tmp.flatten())) self.outputs = {'Out': (out.astype('float32'), out_lod), 'pos': pos} - self.gradient = self.calc_gradient(pos, topks, channel_num, row_lod[0], - col_lod[0]) + self.gradient = self.calc_gradient( + pos, topks, channel_num, row_lod[0], col_lod[0] + ) def get_topk(self, x, topk): real_topk = topk if topk < len(x) else len(x) topk_pos = np.array(x).argsort()[-topk:][::-1] topk_val = np.array(x)[topk_pos] if real_topk < topk: - topk_pos = np.hstack((topk_pos, np.full((topk - real_topk, ), -1))) - topk_val = np.hstack((topk_val, np.full((topk - real_topk, ), 0.0))) + topk_pos = np.hstack((topk_pos, np.full((topk - real_topk,), -1))) + topk_val = np.hstack((topk_val, np.full((topk - real_topk,), 0.0))) return topk_val, topk_pos def topk_sum(self, x, pos, max_k): - sum_data = [0.] * (max_k + 1) + sum_data = [0.0] * (max_k + 1) for i in range(1, max_k + 1): if pos[i - 1] == -1: sum_data[i] = sum_data[i - 1] @@ -149,7 +154,6 @@ class TestSequenceTopkAvgPoolingOp(OpTest): class TestSequenceTopkAvgPoolingOpCase1(TestSequenceTopkAvgPoolingOp): - def set_data(self): topks = [2, 3] channel_num = 5 @@ -160,32 +164,32 @@ class TestSequenceTopkAvgPoolingOpCase1(TestSequenceTopkAvgPoolingOp): def test_api(self): import paddle.fluid as fluid + x = fluid.layers.data(name='x', shape=[1], lod_level=1) row = fluid.layers.data(name='row', shape=[10], lod_level=1) col = fluid.layers.data(name='col', shape=[10], lod_level=1) - topk_avg = fluid.contrib.sequence_topk_avg_pooling(input=x, - row=row, - col=col, - topks=[1, 3, 5], - channel_num=5) + topk_avg = fluid.contrib.sequence_topk_avg_pooling( + input=x, row=row, col=col, topks=[1, 3, 5], channel_num=5 + ) place = fluid.CPUPlace() x_tensor = fluid.create_lod_tensor( - np.random.rand(45, 1).astype('float32'), [[30, 15]], place) + np.random.rand(45, 1).astype('float32'), [[30, 15]], place + ) row_tensor = fluid.create_lod_tensor( - np.random.rand(5, 10).astype('float32'), [[2, 3]], place) + np.random.rand(5, 10).astype('float32'), [[2, 3]], place + ) col_tensor = fluid.create_lod_tensor( - np.random.rand(4, 10).astype('float32'), [[3, 1]], place) + np.random.rand(4, 10).astype('float32'), [[3, 1]], place + ) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - ret = exe.run(feed={ - 'x': x_tensor, - 'row': row_tensor, - 'col': col_tensor - }, - fetch_list=[topk_avg], - return_numpy=False) + ret = exe.run( + feed={'x': x_tensor, 'row': row_tensor, 'col': col_tensor}, + fetch_list=[topk_avg], + return_numpy=False, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_unpad_op.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_unpad_op.py index 7345274caeb7777c12700d7dffd271b806cbbbef..07dfd70ca2bca077e1bce226596293c1940eb01a 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_unpad_op.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_unpad_op.py @@ -23,7 +23,6 @@ import paddle.fluid as fluid class TestSequenceUnpadOp(OpTest): - def init(self): self.length = [2, 3, 4] self.x_shape = (3, 40) @@ -34,13 +33,13 @@ class TestSequenceUnpadOp(OpTest): x = np.random.random(self.x_shape).astype(self.dtype) out_lod = [self.length] - out = x[0, 0:self.length[0]] + out = x[0, 0 : self.length[0]] for i in range(1, x.shape[0]): - out = np.append(out, x[i, 0:self.length[i]], axis=0) + out = np.append(out, x[i, 0 : self.length[i]], axis=0) - out_shape = (sum(self.length), ) + out_shape = (sum(self.length),) if len(self.x_shape) == 2: - out_shape = out_shape + (1, ) + out_shape = out_shape + (1,) else: out_shape = out_shape + self.x_shape[2:] @@ -60,7 +59,6 @@ class TestSequenceUnpadOp(OpTest): class TestSequenceUnpadOp2(TestSequenceUnpadOp): - def init(self): self.length = [2, 3, 4] self.x_shape = (3, 5, 4, 3) @@ -68,7 +66,6 @@ class TestSequenceUnpadOp2(TestSequenceUnpadOp): class TestSequenceUnpadOp3(TestSequenceUnpadOp): - def init(self): self.length = [5, 2, 3, 4] self.x_shape = (4, 5, 3, 3, 6) @@ -76,7 +73,6 @@ class TestSequenceUnpadOp3(TestSequenceUnpadOp): class TestSequenceUnpadOp4(TestSequenceUnpadOp): - def init(self): self.length = [5, 0, 0, 4] self.x_shape = (4, 5, 3, 3, 6) @@ -84,7 +80,6 @@ class TestSequenceUnpadOp4(TestSequenceUnpadOp): class TestSequenceUnpadOp5(TestSequenceUnpadOp): - def init(self): self.length = [0, 4, 3, 0] self.x_shape = (4, 5, 3, 3, 6) @@ -92,9 +87,7 @@ class TestSequenceUnpadOp5(TestSequenceUnpadOp): class TestSequenceUnpadOpError(unittest.TestCase): - def test_error(self): - def test_x_variable(): x = np.random.random((10, 5)).astype("float64") len = fluid.data(name='length2', shape=[10], dtype='int64') diff --git a/python/paddle/fluid/tests/unittests/seresnext_net.py b/python/paddle/fluid/tests/unittests/seresnext_net.py index d1676380afefb6c6bb71c002f4c3db42b6505de9..b2bc25e35ae3e304cb52db2ded46df249e603570 100644 --- a/python/paddle/fluid/tests/unittests/seresnext_net.py +++ b/python/paddle/fluid/tests/unittests/seresnext_net.py @@ -48,37 +48,40 @@ def squeeze_excitation(input, num_channels, reduction_ratio): # input=input, pool_size=0, pool_type='avg', global_pooling=True) conv = input shape = conv.shape - reshape = fluid.layers.reshape(x=conv, - shape=[-1, shape[1], shape[2] * shape[3]]) + reshape = fluid.layers.reshape( + x=conv, shape=[-1, shape[1], shape[2] * shape[3]] + ) pool = fluid.layers.reduce_mean(input=reshape, dim=2) - squeeze = fluid.layers.fc(input=pool, - size=num_channels // reduction_ratio, - act='relu') - excitation = fluid.layers.fc(input=squeeze, - size=num_channels, - act='sigmoid') + squeeze = fluid.layers.fc( + input=pool, size=num_channels // reduction_ratio, act='relu' + ) + excitation = fluid.layers.fc( + input=squeeze, size=num_channels, act='sigmoid' + ) scale = fluid.layers.elementwise_mul(x=input, y=excitation, axis=0) return scale -def conv_bn_layer(input, - num_filters, - filter_size, - stride=1, - groups=1, - act=None): - conv = fluid.layers.conv2d(input=input, - num_filters=num_filters, - filter_size=filter_size, - stride=stride, - padding=(filter_size - 1) // 2, - groups=groups, - act=None, - use_cudnn=(not remove_cudnn_conv), - bias_attr=False) - return conv if remove_bn else fluid.layers.batch_norm( - input=conv, act=act, momentum=0.1) +def conv_bn_layer( + input, num_filters, filter_size, stride=1, groups=1, act=None +): + conv = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + act=None, + use_cudnn=(not remove_cudnn_conv), + bias_attr=False, + ) + return ( + conv + if remove_bn + else fluid.layers.batch_norm(input=conv, act=act, momentum=0.1) + ) def shortcut(input, ch_out, stride): @@ -96,23 +99,25 @@ def shortcut(input, ch_out, stride): def bottleneck_block(input, num_filters, stride, cardinality, reduction_ratio): # The number of first 1x1 convolutional channels for each bottleneck build block # was halved to reduce the compution cost. - conv0 = conv_bn_layer(input=input, - num_filters=num_filters, - filter_size=1, - act='relu') - conv1 = conv_bn_layer(input=conv0, - num_filters=num_filters * 2, - filter_size=3, - stride=stride, - groups=cardinality, - act='relu') - conv2 = conv_bn_layer(input=conv1, - num_filters=num_filters * 2, - filter_size=1, - act=None) - scale = squeeze_excitation(input=conv2, - num_channels=num_filters * 2, - reduction_ratio=reduction_ratio) + conv0 = conv_bn_layer( + input=input, num_filters=num_filters, filter_size=1, act='relu' + ) + conv1 = conv_bn_layer( + input=conv0, + num_filters=num_filters * 2, + filter_size=3, + stride=stride, + groups=cardinality, + act='relu', + ) + conv2 = conv_bn_layer( + input=conv1, num_filters=num_filters * 2, filter_size=1, act=None + ) + scale = squeeze_excitation( + input=conv2, + num_channels=num_filters * 2, + reduction_ratio=reduction_ratio, + ) short = shortcut(input, num_filters * 2, stride) @@ -127,26 +132,18 @@ def SE_ResNeXt50Small(use_feed): img = fluid.layers.data(name='image', shape=img_shape, dtype='float32') label = fluid.layers.data(name='label', shape=[1], dtype='int64') - conv = conv_bn_layer(input=img, - num_filters=16, - filter_size=3, - stride=2, - act='relu') - conv = conv_bn_layer(input=conv, - num_filters=16, - filter_size=3, - stride=1, - act='relu') - conv = conv_bn_layer(input=conv, - num_filters=16, - filter_size=3, - stride=1, - act='relu') - conv = fluid.layers.pool2d(input=conv, - pool_size=3, - pool_stride=2, - pool_padding=1, - pool_type='max') + conv = conv_bn_layer( + input=img, num_filters=16, filter_size=3, stride=2, act='relu' + ) + conv = conv_bn_layer( + input=conv, num_filters=16, filter_size=3, stride=1, act='relu' + ) + conv = conv_bn_layer( + input=conv, num_filters=16, filter_size=3, stride=1, act='relu' + ) + conv = fluid.layers.pool2d( + input=conv, pool_size=3, pool_stride=2, pool_padding=1, pool_type='max' + ) cardinality = 32 reduction_ratio = 16 @@ -155,18 +152,24 @@ def SE_ResNeXt50Small(use_feed): for block in range(len(depth)): for i in range(depth[block]): - conv = bottleneck_block(input=conv, - num_filters=num_filters[block], - stride=2 if i == 0 and block != 0 else 1, - cardinality=cardinality, - reduction_ratio=reduction_ratio) + conv = bottleneck_block( + input=conv, + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + cardinality=cardinality, + reduction_ratio=reduction_ratio, + ) shape = conv.shape - reshape = fluid.layers.reshape(x=conv, - shape=[-1, shape[1], shape[2] * shape[3]]) + reshape = fluid.layers.reshape( + x=conv, shape=[-1, shape[1], shape[2] * shape[3]] + ) pool = fluid.layers.reduce_mean(input=reshape, dim=2) - dropout = pool if remove_dropout else fluid.layers.dropout( - x=pool, dropout_prob=0.2, seed=1) + dropout = ( + pool + if remove_dropout + else fluid.layers.dropout(x=pool, dropout_prob=0.2, seed=1) + ) # Classifier layer: prediction = fluid.layers.fc(input=dropout, size=1000, act='softmax') loss = fluid.layers.cross_entropy(input=prediction, label=label) @@ -176,11 +179,12 @@ def SE_ResNeXt50Small(use_feed): def optimizer(learning_rate=0.01): optimizer = fluid.optimizer.Momentum( - learning_rate=cosine_decay(learning_rate=learning_rate, - step_each_epoch=2, - epochs=1), + learning_rate=cosine_decay( + learning_rate=learning_rate, step_each_epoch=2, epochs=1 + ), momentum=0.9, - regularization=fluid.regularizer.L2Decay(1e-4)) + regularization=fluid.regularizer.L2Decay(1e-4), + ) return optimizer @@ -203,10 +207,13 @@ def iter(use_device): gpu_img, gpu_label = init_data( batch_size=batch_size(use_device=DeviceType.CUDA), img_shape=img_shape, - label_range=999) -cpu_img, cpu_label = init_data(batch_size=batch_size(use_device=DeviceType.CPU), - img_shape=img_shape, - label_range=999) + label_range=999, +) +cpu_img, cpu_label = init_data( + batch_size=batch_size(use_device=DeviceType.CPU), + img_shape=img_shape, + label_range=999, +) feed_dict_gpu = {"image": gpu_img, "label": gpu_label} feed_dict_cpu = {"image": cpu_img, "label": cpu_label} diff --git a/python/paddle/fluid/tests/unittests/seresnext_test_base.py b/python/paddle/fluid/tests/unittests/seresnext_test_base.py index 739de92db60c4e0afff0f1b82289185e840c72a6..2199d1ed20a59d094fc072b4de5fdeaa178e294a 100644 --- a/python/paddle/fluid/tests/unittests/seresnext_test_base.py +++ b/python/paddle/fluid/tests/unittests/seresnext_test_base.py @@ -20,30 +20,33 @@ import numpy as np class TestResnetBase(TestParallelExecutorBase): - - def _compare_result_with_origin_model(self, - check_func, - use_device, - delta2=1e-5, - compare_separately=True): + def _compare_result_with_origin_model( + self, check_func, use_device, delta2=1e-5, compare_separately=True + ): if use_device == DeviceType.CUDA and not core.is_compiled_with_cuda(): return - func_1_first_loss, func_1_last_loss, func_1_loss_area = self.check_network_convergence( + ( + func_1_first_loss, + func_1_last_loss, + func_1_loss_area, + ) = self.check_network_convergence( seresnext_net.model, feed_dict=seresnext_net.feed_dict(use_device), iter=seresnext_net.iter(use_device), batch_size=seresnext_net.batch_size(use_device), use_device=use_device, use_reduce=False, - optimizer=seresnext_net.optimizer) + optimizer=seresnext_net.optimizer, + ) func_2_first_loss, func_2_last_loss, func_2_loss_area = check_func( seresnext_net.model, feed_dict=seresnext_net.feed_dict(use_device), iter=seresnext_net.iter(use_device), batch_size=seresnext_net.batch_size(use_device), - use_device=use_device) + use_device=use_device, + ) if compare_separately: for loss in zip(func_1_first_loss, func_2_first_loss): @@ -51,12 +54,12 @@ class TestResnetBase(TestParallelExecutorBase): for loss in zip(func_1_last_loss, func_2_last_loss): self.assertAlmostEquals(loss[0], loss[1], delta=delta2) else: - np.testing.assert_allclose(func_1_loss_area, - func_2_loss_area, - rtol=delta2) - self.assertAlmostEquals(np.mean(func_1_first_loss), - func_2_first_loss[0], - delta=1e-5) - self.assertAlmostEquals(np.mean(func_1_last_loss), - func_2_last_loss[0], - delta=delta2) + np.testing.assert_allclose( + func_1_loss_area, func_2_loss_area, rtol=delta2 + ) + self.assertAlmostEquals( + np.mean(func_1_first_loss), func_2_first_loss[0], delta=1e-5 + ) + self.assertAlmostEquals( + np.mean(func_1_last_loss), func_2_last_loss[0], delta=delta2 + ) diff --git a/python/paddle/fluid/tests/unittests/simnet_dataset_reader.py b/python/paddle/fluid/tests/unittests/simnet_dataset_reader.py index 5eae0ff22e14df8787d45b922ad32f0341849f83..5b4a49e693ad7d6fa51745ae94554fd9a476d1d1 100644 --- a/python/paddle/fluid/tests/unittests/simnet_dataset_reader.py +++ b/python/paddle/fluid/tests/unittests/simnet_dataset_reader.py @@ -22,6 +22,5 @@ logger.setLevel(logging.INFO) class DatasetSimnetReader(fleet.MultiSlotDataGenerator): - def generate_sample(self, line): pass diff --git a/python/paddle/fluid/tests/unittests/simple_nets.py b/python/paddle/fluid/tests/unittests/simple_nets.py index 9326d51591576578fa0f69563a1c8e3123b92a4e..f85a0f9e135e2b4f0a9a93ef8b25772b7202a5a2 100644 --- a/python/paddle/fluid/tests/unittests/simple_nets.py +++ b/python/paddle/fluid/tests/unittests/simple_nets.py @@ -24,8 +24,10 @@ def simple_fc_net_with_inputs(img, label, class_num=10): hidden, size=100, act='relu', - bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=1.0))) + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=1.0) + ), + ) prediction = fluid.layers.fc(hidden, size=class_num, act='softmax') loss = fluid.layers.cross_entropy(input=prediction, label=label) loss = paddle.mean(loss) @@ -45,8 +47,10 @@ def batchnorm_fc_with_inputs(img, label, class_num=10): hidden, size=200, act='relu', - bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=1.0))) + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=1.0) + ), + ) hidden = fluid.layers.batch_norm(input=hidden) @@ -62,26 +66,27 @@ def fc_with_batchnorm(use_feed=None): return batchnorm_fc_with_inputs(img, label, class_num=10) -def bow_net(use_feed, - dict_dim, - is_sparse=False, - emb_dim=128, - hid_dim=128, - hid_dim2=96, - class_dim=2): +def bow_net( + use_feed, + dict_dim, + is_sparse=False, + emb_dim=128, + hid_dim=128, + hid_dim2=96, + class_dim=2, +): """ BOW net This model is from https://github.com/PaddlePaddle/models: fluid/PaddleNLP/text_classification/nets.py """ - data = fluid.layers.data(name="words", - shape=[1], - dtype="int64", - lod_level=1) + data = fluid.layers.data( + name="words", shape=[1], dtype="int64", lod_level=1 + ) label = fluid.layers.data(name="label", shape=[1], dtype="int64") - emb = fluid.layers.embedding(input=data, - is_sparse=is_sparse, - size=[dict_dim, emb_dim]) + emb = fluid.layers.embedding( + input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim] + ) bow = fluid.layers.sequence_pool(input=emb, pool_type='sum') bow_tanh = fluid.layers.tanh(bow) fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh") @@ -98,7 +103,9 @@ def init_data(batch_size=32, img_shape=[784], label_range=9): assert isinstance(img_shape, list) input_shape = [batch_size] + img_shape img = np.random.random(size=input_shape).astype(np.float32) - label = np.array( - [np.random.randint(0, label_range) for _ in range(batch_size)]).reshape( - (-1, 1)).astype("int64") + label = ( + np.array([np.random.randint(0, label_range) for _ in range(batch_size)]) + .reshape((-1, 1)) + .astype("int64") + ) return img, label diff --git a/python/paddle/fluid/tests/unittests/spawn_runner_base.py b/python/paddle/fluid/tests/unittests/spawn_runner_base.py index d832781ffbc4c61300c90501413a6ddf2e94e05f..2b35fe70cf9359d854cac9db98da2e40bed17eca 100644 --- a/python/paddle/fluid/tests/unittests/spawn_runner_base.py +++ b/python/paddle/fluid/tests/unittests/spawn_runner_base.py @@ -30,7 +30,6 @@ class SpawnAssistTestArgs(object): class TestDistSpawnRunner(unittest.TestCase): - def setUp(self): # NOTE(chenweihang): keep consistent with # TestDistBase.check_with_place @@ -42,10 +41,12 @@ class TestDistSpawnRunner(unittest.TestCase): def _run_parallel(self, model, args): args.update_method = "nccl2" - context = paddle.distributed.spawn(func=model.run_trainer_with_spawn, - args=(args, ), - nprocs=self.nprocs, - join=True) + context = paddle.distributed.spawn( + func=model.run_trainer_with_spawn, + args=(args,), + nprocs=self.nprocs, + join=True, + ) result_list = [] for res_queue in context.return_queues: result_list.append(res_queue.get()) @@ -53,10 +54,12 @@ class TestDistSpawnRunner(unittest.TestCase): def check_dist_result_with_spawn(self, test_class, delta=1e-3): with _test_eager_guard(): - self.check_dist_result_with_spawn_func(test_class=test_class, - delta=delta) - self.check_dist_result_with_spawn_func(test_class=test_class, - delta=delta) + self.check_dist_result_with_spawn_func( + test_class=test_class, delta=delta + ) + self.check_dist_result_with_spawn_func( + test_class=test_class, delta=delta + ) def check_dist_result_with_spawn_func(self, test_class, delta=1e-3): # 0. prepare model and args @@ -83,7 +86,8 @@ class TestDistSpawnRunner(unittest.TestCase): loss, dist_loss, delta=delta, - msg= - "The results of single-card execution and multi-card execution are inconsistent." - "signal-card loss is:\n{}\nmulti-card average loss is:\n{}\n". - format(loss, dist_loss)) + msg="The results of single-card execution and multi-card execution are inconsistent." + "signal-card loss is:\n{}\nmulti-card average loss is:\n{}\n".format( + loss, dist_loss + ), + ) diff --git a/python/paddle/fluid/tests/unittests/static_model_parallel_fused_attention.py b/python/paddle/fluid/tests/unittests/static_model_parallel_fused_attention.py index c6c318536164c679ba0fecfb9c1856b7a9ebea9f..2d825af4b3362ba211b2a396a8cc1797b9943f21 100644 --- a/python/paddle/fluid/tests/unittests/static_model_parallel_fused_attention.py +++ b/python/paddle/fluid/tests/unittests/static_model_parallel_fused_attention.py @@ -25,9 +25,11 @@ paddle.enable_static() def get_param_attr(weight, bias): weight_attr = paddle.ParamAttr( - initializer=fluid.initializer.NumpyArrayInitializer(weight)) + initializer=fluid.initializer.NumpyArrayInitializer(weight) + ) bias_attr = paddle.ParamAttr( - initializer=fluid.initializer.NumpyArrayInitializer(bias)) + initializer=fluid.initializer.NumpyArrayInitializer(bias) + ) return weight_attr, bias_attr @@ -40,14 +42,16 @@ hidden = n_head * d_key def create_model(data, rank): np.random.seed(2021) - pre_ln_w = np.random.uniform(-1, 1, size=(hidden, )).astype(DTYPE) - pre_ln_b = np.random.uniform(-1, 1, size=(hidden, )).astype(DTYPE) - qkv_w = np.random.uniform(-1, 1, - size=(3, n_head, d_key, hidden)).astype(DTYPE) + pre_ln_w = np.random.uniform(-1, 1, size=(hidden,)).astype(DTYPE) + pre_ln_b = np.random.uniform(-1, 1, size=(hidden,)).astype(DTYPE) + qkv_w = np.random.uniform(-1, 1, size=(3, n_head, d_key, hidden)).astype( + DTYPE + ) qkv_b = np.random.uniform(-1, 1, size=(3, n_head, d_key)).astype(DTYPE) - linear_w = np.random.uniform(-1, 1, - size=(n_head * d_key, hidden)).astype(DTYPE) - linear_b = np.random.uniform(-1, 1, size=(hidden, )).astype(DTYPE) + linear_w = np.random.uniform(-1, 1, size=(n_head * d_key, hidden)).astype( + DTYPE + ) + linear_b = np.random.uniform(-1, 1, size=(hidden,)).astype(DTYPE) data.stop_gradient = False if rank is not None: @@ -55,46 +59,50 @@ def create_model(data, rank): end = start + n_head // MODEL_PARALLEL_SIZE col_qkv_w = qkv_w[:, start:end, :, :] col_qkv_b = qkv_b[:, start:end, :] - row_linear_w = linear_w[(start * d_key):(end * d_key), :] + row_linear_w = linear_w[(start * d_key) : (end * d_key), :] pre_ln_w_attr, pre_ln_b_attr = get_param_attr(pre_ln_w, pre_ln_b) qkv_w_attr, qkv_b_attr = get_param_attr(col_qkv_w, col_qkv_b) linear_w_attr, linear_b_attr = get_param_attr(row_linear_w, linear_b) - attn = FusedMultiHeadAttention(hidden, - n_head, - dropout_rate=0.0, - attn_dropout_rate=0.0, - normalize_before=False, - qkv_weight_attr=qkv_w_attr, - qkv_bias_attr=qkv_b_attr, - linear_weight_attr=linear_w_attr, - linear_bias_attr=linear_b_attr, - pre_ln_scale_attr=pre_ln_w_attr, - pre_ln_bias_attr=pre_ln_b_attr, - ln_scale_attr=pre_ln_w_attr, - ln_bias_attr=pre_ln_b_attr, - nranks=MODEL_PARALLEL_SIZE, - ring_id=0) + attn = FusedMultiHeadAttention( + hidden, + n_head, + dropout_rate=0.0, + attn_dropout_rate=0.0, + normalize_before=False, + qkv_weight_attr=qkv_w_attr, + qkv_bias_attr=qkv_b_attr, + linear_weight_attr=linear_w_attr, + linear_bias_attr=linear_b_attr, + pre_ln_scale_attr=pre_ln_w_attr, + pre_ln_bias_attr=pre_ln_b_attr, + ln_scale_attr=pre_ln_w_attr, + ln_bias_attr=pre_ln_b_attr, + nranks=MODEL_PARALLEL_SIZE, + ring_id=0, + ) result = attn(data) else: pre_ln_w_attr, pre_ln_b_attr = get_param_attr(pre_ln_w, pre_ln_b) qkv_w_attr, qkv_b_attr = get_param_attr(qkv_w, qkv_b) linear_w_attr, linear_b_attr = get_param_attr(linear_w, linear_b) - attn = FusedMultiHeadAttention(hidden, - n_head, - dropout_rate=0.0, - attn_dropout_rate=0.0, - normalize_before=False, - qkv_weight_attr=qkv_w_attr, - qkv_bias_attr=qkv_b_attr, - linear_weight_attr=linear_w_attr, - linear_bias_attr=linear_b_attr, - pre_ln_scale_attr=pre_ln_w_attr, - pre_ln_bias_attr=pre_ln_b_attr, - ln_scale_attr=pre_ln_w_attr, - ln_bias_attr=pre_ln_b_attr) + attn = FusedMultiHeadAttention( + hidden, + n_head, + dropout_rate=0.0, + attn_dropout_rate=0.0, + normalize_before=False, + qkv_weight_attr=qkv_w_attr, + qkv_bias_attr=qkv_b_attr, + linear_weight_attr=linear_w_attr, + linear_bias_attr=linear_b_attr, + pre_ln_scale_attr=pre_ln_w_attr, + pre_ln_bias_attr=pre_ln_b_attr, + ln_scale_attr=pre_ln_w_attr, + ln_bias_attr=pre_ln_b_attr, + ) result = attn(data) predict = paddle.sum(result) @@ -102,20 +110,20 @@ def create_model(data, rank): class TestModelParallel(TestDistRunnerBase): - def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): # Input data seq_len = 2 - data_in = fluid.data(name='data_in', - shape=[batch_size, seq_len, hidden], - dtype=DTYPE) + data_in = fluid.data( + name='data_in', shape=[batch_size, seq_len, hidden], dtype=DTYPE + ) if dist_strategy: data_loader = fluid.io.DataLoader.from_generator( feed_list=[data_in], capacity=64, use_double_buffer=False, - iterable=False) + iterable=False, + ) if dist_strategy: fleet.init(is_collective=True) @@ -128,8 +136,9 @@ class TestModelParallel(TestDistRunnerBase): opt = fluid.optimizer.SGD(0.1) if dist_strategy: - dist_opt = fleet.distributed_optimizer(optimizer=opt, - strategy=strategy) + dist_opt = fleet.distributed_optimizer( + optimizer=opt, strategy=strategy + ) dist_opt.minimize(avg_cost) else: opt.minimize(avg_cost) diff --git a/python/paddle/fluid/tests/unittests/static_model_parallel_fused_feedforward.py b/python/paddle/fluid/tests/unittests/static_model_parallel_fused_feedforward.py index 409c84eaab583b983abec4426aca47cb200291fe..f15dd57123f8cbadd40345eb4a7e9f1e994929e2 100644 --- a/python/paddle/fluid/tests/unittests/static_model_parallel_fused_feedforward.py +++ b/python/paddle/fluid/tests/unittests/static_model_parallel_fused_feedforward.py @@ -30,20 +30,22 @@ OUT_SIZE = 2 * MODEL_PARALLEL_SIZE def get_param_attr(weight, bias): weight_attr = paddle.ParamAttr( - initializer=fluid.initializer.NumpyArrayInitializer(weight)) + initializer=fluid.initializer.NumpyArrayInitializer(weight) + ) bias_attr = paddle.ParamAttr( - initializer=fluid.initializer.NumpyArrayInitializer(bias)) + initializer=fluid.initializer.NumpyArrayInitializer(bias) + ) return weight_attr, bias_attr def create_model(data, rank): np.random.seed(2021) - ln_w = np.random.uniform(-1, 1, size=(IN_SIZE, )).astype(DTYPE) - ln_b = np.random.uniform(-1, 1, size=(IN_SIZE, )).astype(DTYPE) + ln_w = np.random.uniform(-1, 1, size=(IN_SIZE,)).astype(DTYPE) + ln_b = np.random.uniform(-1, 1, size=(IN_SIZE,)).astype(DTYPE) w0 = np.random.uniform(-1, 1, size=(IN_SIZE, OUT_SIZE)).astype(DTYPE) - b0 = np.random.uniform(-1, 1, size=(OUT_SIZE, )).astype(DTYPE) + b0 = np.random.uniform(-1, 1, size=(OUT_SIZE,)).astype(DTYPE) w1 = np.random.uniform(-1, 1, size=(OUT_SIZE, IN_SIZE)).astype(DTYPE) - b1 = np.random.uniform(-1, 1, size=(IN_SIZE, )).astype(DTYPE) + b1 = np.random.uniform(-1, 1, size=(IN_SIZE,)).astype(DTYPE) data.stop_gradient = False if rank is not None: start = 0 if rank == 0 else OUT_SIZE // MODEL_PARALLEL_SIZE @@ -56,38 +58,42 @@ def create_model(data, rank): w0_attr, b0_attr = get_param_attr(col_w0, col_b0) w1_attr, b1_attr = get_param_attr(row_w1, b1) - ffn = FusedFeedForward(IN_SIZE, - OUT_SIZE, - dropout_rate=0.0, - activation='gelu', - normalize_before=True, - linear1_weight_attr=w0_attr, - linear1_bias_attr=b0_attr, - linear2_weight_attr=w1_attr, - linear2_bias_attr=b1_attr, - ln1_scale_attr=ln_w_attr, - ln1_bias_attr=ln_b_attr, - nranks=MODEL_PARALLEL_SIZE, - ring_id=0) - #ffn.eval() + ffn = FusedFeedForward( + IN_SIZE, + OUT_SIZE, + dropout_rate=0.0, + activation='gelu', + normalize_before=True, + linear1_weight_attr=w0_attr, + linear1_bias_attr=b0_attr, + linear2_weight_attr=w1_attr, + linear2_bias_attr=b1_attr, + ln1_scale_attr=ln_w_attr, + ln1_bias_attr=ln_b_attr, + nranks=MODEL_PARALLEL_SIZE, + ring_id=0, + ) + # ffn.eval() result = ffn(data) else: ln_w_attr, ln_b_attr = get_param_attr(ln_w, ln_b) w0_attr, b0_attr = get_param_attr(w0, b0) w1_attr, b1_attr = get_param_attr(w1, b1) - ffn = FusedFeedForward(IN_SIZE, - OUT_SIZE, - dropout_rate=0.0, - activation='gelu', - normalize_before=True, - linear1_weight_attr=w0_attr, - linear1_bias_attr=b0_attr, - linear2_weight_attr=w1_attr, - linear2_bias_attr=b1_attr, - ln1_scale_attr=ln_w_attr, - ln1_bias_attr=ln_b_attr) - #ffn.eval() + ffn = FusedFeedForward( + IN_SIZE, + OUT_SIZE, + dropout_rate=0.0, + activation='gelu', + normalize_before=True, + linear1_weight_attr=w0_attr, + linear1_bias_attr=b0_attr, + linear2_weight_attr=w1_attr, + linear2_bias_attr=b1_attr, + ln1_scale_attr=ln_w_attr, + ln1_bias_attr=ln_b_attr, + ) + # ffn.eval() result = ffn(data) predict = paddle.sum(result) @@ -95,20 +101,20 @@ def create_model(data, rank): class TestModelParallel(TestDistRunnerBase): - def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): # Input data seq_len = 2 - data_in = fluid.data(name='data_in', - shape=[batch_size, seq_len, IN_SIZE], - dtype=DTYPE) + data_in = fluid.data( + name='data_in', shape=[batch_size, seq_len, IN_SIZE], dtype=DTYPE + ) if dist_strategy: data_loader = fluid.io.DataLoader.from_generator( feed_list=[data_in], capacity=64, use_double_buffer=False, - iterable=False) + iterable=False, + ) if dist_strategy: fleet.init(is_collective=True) @@ -121,8 +127,9 @@ class TestModelParallel(TestDistRunnerBase): opt = fluid.optimizer.SGD(0.1) if dist_strategy: - dist_opt = fleet.distributed_optimizer(optimizer=opt, - strategy=strategy) + dist_opt = fleet.distributed_optimizer( + optimizer=opt, strategy=strategy + ) dist_opt.minimize(avg_cost) else: opt.minimize(avg_cost) diff --git a/python/paddle/fluid/tests/unittests/static_model_parallel_fused_multi_transformer.py b/python/paddle/fluid/tests/unittests/static_model_parallel_fused_multi_transformer.py index 0ab441d29631620e24ad1243fbce8ca32e29f5b3..f9125dae34567597a90f6606bd002bbf26b8035f 100644 --- a/python/paddle/fluid/tests/unittests/static_model_parallel_fused_multi_transformer.py +++ b/python/paddle/fluid/tests/unittests/static_model_parallel_fused_multi_transformer.py @@ -25,9 +25,11 @@ paddle.enable_static() def get_param_attr(weight, bias): weight_attr = paddle.ParamAttr( - initializer=fluid.initializer.NumpyArrayInitializer(weight)) + initializer=fluid.initializer.NumpyArrayInitializer(weight) + ) bias_attr = paddle.ParamAttr( - initializer=fluid.initializer.NumpyArrayInitializer(bias)) + initializer=fluid.initializer.NumpyArrayInitializer(bias) + ) return weight_attr, bias_attr @@ -41,28 +43,30 @@ dim_ffn = 4 * hidden def create_model(data, rank): np.random.seed(2021) - ln_w = np.random.uniform(-1, 1, size=(hidden, )).astype(DTYPE) - ln_b = np.random.uniform(-1, 1, size=(hidden, )).astype(DTYPE) - qkv_w = np.random.uniform(-1, 1, size=(3, num_head, dim_head, - hidden)).astype(DTYPE) + ln_w = np.random.uniform(-1, 1, size=(hidden,)).astype(DTYPE) + ln_b = np.random.uniform(-1, 1, size=(hidden,)).astype(DTYPE) + qkv_w = np.random.uniform( + -1, 1, size=(3, num_head, dim_head, hidden) + ).astype(DTYPE) qkv_b = np.random.uniform(-1, 1, size=(3, num_head, dim_head)).astype(DTYPE) - linear_w = np.random.uniform(-1, 1, size=(num_head * dim_head, - hidden)).astype(DTYPE) - linear_b = np.random.uniform(-1, 1, size=(hidden, )).astype(DTYPE) + linear_w = np.random.uniform( + -1, 1, size=(num_head * dim_head, hidden) + ).astype(DTYPE) + linear_b = np.random.uniform(-1, 1, size=(hidden,)).astype(DTYPE) - ffn_ln_w = np.random.uniform(-1, 1, size=(hidden, )).astype(DTYPE) - ffn_ln_b = np.random.uniform(-1, 1, size=(hidden, )).astype(DTYPE) + ffn_ln_w = np.random.uniform(-1, 1, size=(hidden,)).astype(DTYPE) + ffn_ln_b = np.random.uniform(-1, 1, size=(hidden,)).astype(DTYPE) ffn1_w = np.random.uniform(-1, 1, size=(hidden, dim_ffn)).astype(DTYPE) - ffn1_b = np.random.uniform(-1, 1, size=(dim_ffn, )).astype(DTYPE) + ffn1_b = np.random.uniform(-1, 1, size=(dim_ffn,)).astype(DTYPE) ffn2_w = np.random.uniform(-1, 1, size=(dim_ffn, hidden)).astype(DTYPE) - ffn2_b = np.random.uniform(-1, 1, size=(hidden, )).astype(DTYPE) + ffn2_b = np.random.uniform(-1, 1, size=(hidden,)).astype(DTYPE) if rank is not None: start = 0 if rank == 0 else (num_head // MODEL_PARALLEL_SIZE) end = start + (num_head // MODEL_PARALLEL_SIZE) col_qkv_w = qkv_w[:, start:end, :, :] col_qkv_b = qkv_b[:, start:end, :] - row_linear_w = linear_w[(start * dim_head):(end * dim_head), :] + row_linear_w = linear_w[(start * dim_head) : (end * dim_head), :] ln_w_attr, ln_b_attr = get_param_attr(ln_w, ln_b) qkv_w_attr, qkv_b_attr = get_param_attr(col_qkv_w, col_qkv_b) @@ -98,7 +102,8 @@ def create_model(data, rank): ffn2_weight_attrs=[ffn2_w_attr], ffn2_bias_attrs=[ffn2_b_attr], nranks=MODEL_PARALLEL_SIZE, - ring_id=0) + ring_id=0, + ) result = multi_transformer(data) else: ln_w_attr, ln_b_attr = get_param_attr(ln_w, ln_b) @@ -127,7 +132,8 @@ def create_model(data, rank): ffn1_weight_attrs=[ffn1_w_attr], ffn1_bias_attrs=[ffn1_b_attr], ffn2_weight_attrs=[ffn2_w_attr], - ffn2_bias_attrs=[ffn2_b_attr]) + ffn2_bias_attrs=[ffn2_b_attr], + ) result = multi_transformer(data) # fused_multi_transformer have no backward @@ -137,20 +143,20 @@ def create_model(data, rank): class TestModelParallel(TestDistRunnerBase): - def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): # Input data seq_len = 2 - data_in = fluid.data(name='data_in', - shape=[batch_size, seq_len, hidden], - dtype=DTYPE) + data_in = fluid.data( + name='data_in', shape=[batch_size, seq_len, hidden], dtype=DTYPE + ) if dist_strategy: data_loader = fluid.io.DataLoader.from_generator( feed_list=[data_in], capacity=64, use_double_buffer=False, - iterable=False) + iterable=False, + ) if dist_strategy: fleet.init(is_collective=True) @@ -163,8 +169,9 @@ class TestModelParallel(TestDistRunnerBase): opt = fluid.optimizer.SGD(0.1) if dist_strategy: - dist_opt = fleet.distributed_optimizer(optimizer=opt, - strategy=strategy) + dist_opt = fleet.distributed_optimizer( + optimizer=opt, strategy=strategy + ) dist_opt.minimize(avg_cost) else: opt.minimize(avg_cost) diff --git a/python/paddle/fluid/tests/unittests/test_Tensor_type.py b/python/paddle/fluid/tests/unittests/test_Tensor_type.py index 2211aed945e31a8ad84f50bf3e833d3367dac273..f18818b8ef5d9e02edf781613cb3c9caadd51d42 100644 --- a/python/paddle/fluid/tests/unittests/test_Tensor_type.py +++ b/python/paddle/fluid/tests/unittests/test_Tensor_type.py @@ -20,7 +20,6 @@ from paddle.fluid.framework import _test_eager_guard class TensorTypeTest(unittest.TestCase): - def func_type_totensor(self): paddle.disable_static() inx = np.array([1, 2]) diff --git a/python/paddle/fluid/tests/unittests/test_accuracy_op.py b/python/paddle/fluid/tests/unittests/test_accuracy_op.py index 834ab8f77d864991c6032f67e25afe3c95da9849..0de81034d3373d10515f9aa40004f354b3934f5b 100755 --- a/python/paddle/fluid/tests/unittests/test_accuracy_op.py +++ b/python/paddle/fluid/tests/unittests/test_accuracy_op.py @@ -21,7 +21,6 @@ from paddle.fluid import Program, program_guard class TestAccuracyOp(OpTest): - def setUp(self): self.op_type = "accuracy" self.dtype = np.float32 @@ -40,7 +39,7 @@ class TestAccuracyOp(OpTest): self.outputs = { 'Accuracy': np.array([num_correct / float(n)]).astype(self.dtype), 'Correct': np.array([num_correct]).astype("int32"), - 'Total': np.array([n]).astype("int32") + 'Total': np.array([n]).astype("int32"), } def init_dtype(self): @@ -51,7 +50,6 @@ class TestAccuracyOp(OpTest): class TestAccuracyOpFp16(TestAccuracyOp): - def init_dtype(self): self.dtype = np.float16 @@ -60,15 +58,15 @@ class TestAccuracyOpFp16(TestAccuracyOp): class TestAccuracyOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # The input type of accuracy_op must be Variable. - x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - fluid.CPUPlace()) - label = fluid.layers.data(name='label', - shape=[-1, 1], - dtype="int32") + x1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace() + ) + label = fluid.layers.data( + name='label', shape=[-1, 1], dtype="int32" + ) self.assertRaises(TypeError, fluid.layers.accuracy, x1, label) self.assertRaises(TypeError, paddle.metric.accuracy, x1, label) # The input dtype of accuracy_op must be float32 or float64. @@ -81,40 +79,42 @@ class TestAccuracyOpError(unittest.TestCase): class TestAccuracyAPI1(unittest.TestCase): - def setUp(self): - self.predictions = paddle.static.data(shape=[2, 5], - name="predictions", - dtype="float32") - self.label = paddle.static.data(shape=[2, 1], - name="labels", - dtype="int64") - self.result = paddle.static.accuracy(input=self.predictions, - label=self.label, - k=1) + self.predictions = paddle.static.data( + shape=[2, 5], name="predictions", dtype="float32" + ) + self.label = paddle.static.data( + shape=[2, 1], name="labels", dtype="int64" + ) + self.result = paddle.static.accuracy( + input=self.predictions, label=self.label, k=1 + ) self.input_predictions = np.array( [[0.2, 0.1, 0.4, 0.1, 0.1], [0.2, 0.3, 0.1, 0.15, 0.25]], - dtype="float32") + dtype="float32", + ) self.input_labels = np.array([[2], [0]], dtype="int64") self.expect_value = np.array([0.5], dtype='float32') def test_api(self): exe = paddle.static.Executor() - result, = exe.run(feed={ - "predictions": self.input_predictions, - 'labels': self.input_labels - }, - fetch_list=[self.result.name]) + (result,) = exe.run( + feed={ + "predictions": self.input_predictions, + 'labels': self.input_labels, + }, + fetch_list=[self.result.name], + ) self.assertEqual((result == self.expect_value).all(), True) class TestAccuracyAPI2(unittest.TestCase): - def test_api(self): with fluid.dygraph.guard(): predictions = paddle.to_tensor( [[0.2, 0.1, 0.4, 0.1, 0.1], [0.2, 0.3, 0.1, 0.15, 0.25]], - dtype='float32') + dtype='float32', + ) label = paddle.to_tensor([[2], [0]], dtype="int64") result = paddle.static.accuracy(input=predictions, label=label, k=1) expect_value = np.array([0.5], dtype='float32') @@ -122,12 +122,12 @@ class TestAccuracyAPI2(unittest.TestCase): class TestAccuracyAPI(unittest.TestCase): - def test_api(self): with fluid.dygraph.guard(): predictions = paddle.to_tensor( [[0.2, 0.1, 0.4, 0.1, 0.1], [0.2, 0.3, 0.1, 0.15, 0.25]], - dtype='float32') + dtype='float32', + ) label = paddle.to_tensor([[2], [0]], dtype="int64") result = paddle.metric.accuracy(input=predictions, label=label, k=1) expect_value = np.array([0.5], dtype='float32') diff --git a/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py b/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py index cee8310cd1805882da8e998f85d5076de3be2729..bc5a14070e5cef130b1bb5d7cb492406fb5bb9b3 100644 --- a/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py +++ b/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py @@ -26,7 +26,6 @@ from decorator_helper import prog_scope class TestSigmoidTripleGradCheck(unittest.TestCase): - @prog_scope() def func(self, place): shape = [2, 3, 7, 9] @@ -37,11 +36,9 @@ class TestSigmoidTripleGradCheck(unittest.TestCase): y = layers.sigmoid(x) x_arr = np.random.random(shape).astype(dtype) x_arr[np.abs(x_arr) < 0.005] = 0.002 - gradient_checker.triple_grad_check([x], - y, - x_init=x_arr, - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [x], y, x_init=x_arr, place=place, eps=eps + ) def test_grad(self): paddle.enable_static() @@ -53,7 +50,6 @@ class TestSigmoidTripleGradCheck(unittest.TestCase): class TestSigmoidDoubleGradCheck(unittest.TestCase): - def sigmoid_wrapper(self, x): return fluid.layers.sigmoid(x[0]) @@ -67,17 +63,13 @@ class TestSigmoidDoubleGradCheck(unittest.TestCase): y = layers.sigmoid(x) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) x_arr[np.abs(x_arr) < 0.005] = 0.002 - gradient_checker.double_grad_check([x], - y, - x_init=x_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x], y, x_init=x_arr, place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.double_grad_check_for_dygraph(self.sigmoid_wrapper, - [x], - y, - x_init=x_arr, - place=place) + gradient_checker.double_grad_check_for_dygraph( + self.sigmoid_wrapper, [x], y, x_init=x_arr, place=place + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) def test_grad(self): @@ -90,7 +82,6 @@ class TestSigmoidDoubleGradCheck(unittest.TestCase): class TestTanhTripleGradCheck(unittest.TestCase): - def tanh_wrapper(self, x): return paddle.tanh(x[0]) @@ -104,16 +95,13 @@ class TestTanhTripleGradCheck(unittest.TestCase): y = layers.tanh(x) x_arr = np.random.random(shape).astype(dtype) x_arr[np.abs(x_arr) < 0.005] = 0.002 - gradient_checker.triple_grad_check([x], - y, - x_init=x_arr, - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [x], y, x_init=x_arr, place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.triple_grad_check_for_dygraph(self.tanh_wrapper, [x], - y, - x_init=x_arr, - place=place) + gradient_checker.triple_grad_check_for_dygraph( + self.tanh_wrapper, [x], y, x_init=x_arr, place=place + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) def test_grad(self): @@ -126,7 +114,6 @@ class TestTanhTripleGradCheck(unittest.TestCase): class TestTanhDoubleGradCheck(unittest.TestCase): - def tanh_wrapper(self, x): return paddle.tanh(x[0]) @@ -140,16 +127,13 @@ class TestTanhDoubleGradCheck(unittest.TestCase): y = paddle.tanh(x) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) x_arr[np.abs(x_arr) < 0.005] = 0.002 - gradient_checker.double_grad_check([x], - y, - x_init=x_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x], y, x_init=x_arr, place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.double_grad_check_for_dygraph(self.tanh_wrapper, [x], - y, - x_init=x_arr, - place=place) + gradient_checker.double_grad_check_for_dygraph( + self.tanh_wrapper, [x], y, x_init=x_arr, place=place + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) def test_grad(self): @@ -162,7 +146,6 @@ class TestTanhDoubleGradCheck(unittest.TestCase): class TestAbsDoubleGradCheck(unittest.TestCase): - def abs_wrapper(self, x): return paddle.abs(x[0]) @@ -176,16 +159,13 @@ class TestAbsDoubleGradCheck(unittest.TestCase): y = paddle.abs(x) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) x_arr[np.abs(x_arr) < 0.005] = 0.002 - gradient_checker.double_grad_check([x], - y, - x_init=x_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x], y, x_init=x_arr, place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.double_grad_check_for_dygraph(self.abs_wrapper, [x], - y, - x_init=x_arr, - place=place) + gradient_checker.double_grad_check_for_dygraph( + self.abs_wrapper, [x], y, x_init=x_arr, place=place + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) def test_grad(self): @@ -198,7 +178,6 @@ class TestAbsDoubleGradCheck(unittest.TestCase): class TestReluDoubleGradCheck(unittest.TestCase): - @prog_scope() def func(self, place): shape = [2, 3, 7, 9] @@ -211,11 +190,9 @@ class TestReluDoubleGradCheck(unittest.TestCase): x_arr = np.random.uniform(-1, 1, shape).astype(dtype) x_arr[np.abs(x_arr) < 0.005] = 0.02 - gradient_checker.double_grad_check([x], - y, - x_init=x_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x], y, x_init=x_arr, place=place, eps=eps + ) def test_grad(self): paddle.enable_static() @@ -227,7 +204,6 @@ class TestReluDoubleGradCheck(unittest.TestCase): class TestLeakyReluDoubleGradCheck(unittest.TestCase): - def leaky_relu_wrapper(self, x): return paddle.nn.functional.leaky_relu(x[0], negative_slope=0.2) @@ -245,16 +221,12 @@ class TestLeakyReluDoubleGradCheck(unittest.TestCase): x_arr = np.random.uniform(-1, 1, shape).astype(dtype) x_arr[np.abs(x_arr) < 0.005] = 0.02 - gradient_checker.double_grad_check([x], - y, - x_init=x_arr, - place=place, - eps=eps) - gradient_checker.double_grad_check_for_dygraph(self.leaky_relu_wrapper, - [x], - y, - x_init=x_arr, - place=place) + gradient_checker.double_grad_check( + [x], y, x_init=x_arr, place=place, eps=eps + ) + gradient_checker.double_grad_check_for_dygraph( + self.leaky_relu_wrapper, [x], y, x_init=x_arr, place=place + ) def test_grad(self): paddle.enable_static() @@ -266,7 +238,6 @@ class TestLeakyReluDoubleGradCheck(unittest.TestCase): class TestELUDoubleGradCheck(unittest.TestCase): - def elu_wrapper(self, x): return paddle.nn.functional.elu(x[0], alpha=0.2) @@ -284,16 +255,13 @@ class TestELUDoubleGradCheck(unittest.TestCase): y = layers.elu(x, alpha=alpha) np.random.RandomState(SEED) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) - gradient_checker.double_grad_check([x], - y, - x_init=x_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x], y, x_init=x_arr, place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.double_grad_check_for_dygraph(self.elu_wrapper, [x], - y, - x_init=x_arr, - place=place) + gradient_checker.double_grad_check_for_dygraph( + self.elu_wrapper, [x], y, x_init=x_arr, place=place + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) def test_grad(self): @@ -306,7 +274,6 @@ class TestELUDoubleGradCheck(unittest.TestCase): class TestCELUDoubleGradCheck(unittest.TestCase): - def celu_wrapper(self, x): return paddle.nn.functional.celu(x[0], alpha=0.2) @@ -324,16 +291,13 @@ class TestCELUDoubleGradCheck(unittest.TestCase): y = F.celu(x, alpha=alpha) np.random.RandomState(SEED) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) - gradient_checker.double_grad_check([x], - y, - x_init=x_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x], y, x_init=x_arr, place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.double_grad_check_for_dygraph(self.celu_wrapper, [x], - y, - x_init=x_arr, - place=place) + gradient_checker.double_grad_check_for_dygraph( + self.celu_wrapper, [x], y, x_init=x_arr, place=place + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) def test_grad(self): @@ -346,7 +310,6 @@ class TestCELUDoubleGradCheck(unittest.TestCase): class TestSqrtDoubleGradCheck(unittest.TestCase): - def sqrt_wrapper(self, x): return paddle.sqrt(x[0]) @@ -362,15 +325,12 @@ class TestSqrtDoubleGradCheck(unittest.TestCase): y = layers.sqrt(x) x_arr = np.random.uniform(0.1, 1, shape).astype(dtype) - gradient_checker.double_grad_check([x], - y, - x_init=x_arr, - place=place, - eps=eps) - gradient_checker.double_grad_check_for_dygraph(self.sqrt_wrapper, [x], - y, - x_init=x_arr, - place=place) + gradient_checker.double_grad_check( + [x], y, x_init=x_arr, place=place, eps=eps + ) + gradient_checker.double_grad_check_for_dygraph( + self.sqrt_wrapper, [x], y, x_init=x_arr, place=place + ) def test_grad(self): paddle.enable_static() @@ -382,7 +342,6 @@ class TestSqrtDoubleGradCheck(unittest.TestCase): class TestRsqrtDoubleGradCheck(unittest.TestCase): - def rsqrt_wrapper(self, x): return paddle.rsqrt(x[0]) @@ -398,15 +357,12 @@ class TestRsqrtDoubleGradCheck(unittest.TestCase): y = layers.rsqrt(x) x_arr = np.random.uniform(0.1, 1, shape).astype(dtype) - gradient_checker.double_grad_check([x], - y, - x_init=x_arr, - place=place, - eps=eps) - gradient_checker.double_grad_check_for_dygraph(self.rsqrt_wrapper, [x], - y, - x_init=x_arr, - place=place) + gradient_checker.double_grad_check( + [x], y, x_init=x_arr, place=place, eps=eps + ) + gradient_checker.double_grad_check_for_dygraph( + self.rsqrt_wrapper, [x], y, x_init=x_arr, place=place + ) def test_grad(self): paddle.enable_static() @@ -418,7 +374,6 @@ class TestRsqrtDoubleGradCheck(unittest.TestCase): class TestSquareDoubleGradCheck(unittest.TestCase): - def square_wrapper(self, x): return paddle.square(x[0]) @@ -434,16 +389,13 @@ class TestSquareDoubleGradCheck(unittest.TestCase): y = layers.square(x) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) - gradient_checker.double_grad_check([x], - y, - x_init=x_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x], y, x_init=x_arr, place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.double_grad_check_for_dygraph(self.square_wrapper, [x], - y, - x_init=x_arr, - place=place) + gradient_checker.double_grad_check_for_dygraph( + self.square_wrapper, [x], y, x_init=x_arr, place=place + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) def test_grad(self): @@ -456,7 +408,6 @@ class TestSquareDoubleGradCheck(unittest.TestCase): class TestAbsDoubleGradCheck(unittest.TestCase): - @prog_scope() def func(self, place): # the shape of input variable should be clearly specified, not inlcude -1. @@ -473,11 +424,9 @@ class TestAbsDoubleGradCheck(unittest.TestCase): # we should avoid this x_arr[np.abs(x_arr) < 0.005] = 0.02 - gradient_checker.double_grad_check([x], - y, - x_init=x_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x], y, x_init=x_arr, place=place, eps=eps + ) def test_grad(self): paddle.enable_static() @@ -489,7 +438,6 @@ class TestAbsDoubleGradCheck(unittest.TestCase): class TestLogDoubleGradCheck(unittest.TestCase): - def log_wrapper(self, x): return paddle.log(x[0]) @@ -505,16 +453,13 @@ class TestLogDoubleGradCheck(unittest.TestCase): x_arr = np.random.uniform(0.1, 1, shape).astype(dtype) - gradient_checker.double_grad_check([x], - y, - x_init=x_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x], y, x_init=x_arr, place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.double_grad_check_for_dygraph(self.log_wrapper, [x], - y, - x_init=x_arr, - place=place) + gradient_checker.double_grad_check_for_dygraph( + self.log_wrapper, [x], y, x_init=x_arr, place=place + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) def test_grad(self): diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index 49c19f1cbff9a1a0990d0c329ecc5a1b673dd7e0..a933d591a8798469ce5657b8a88b60da59c31097 100755 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -29,26 +29,24 @@ paddle.enable_static() class TestSqrtOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # The input type of sqrt op must be Variable or numpy.ndarray. in1 = 1 self.assertRaises(TypeError, fluid.layers.sqrt, in1) # The input dtype of sqrt op must be float16, float32, float64. - in2 = fluid.layers.data(name='input2', - shape=[12, 10], - dtype="int32") + in2 = fluid.layers.data( + name='input2', shape=[12, 10], dtype="int32" + ) self.assertRaises(TypeError, fluid.layers.sqrt, in2) - in3 = fluid.layers.data(name='input3', - shape=[12, 10], - dtype="float16") + in3 = fluid.layers.data( + name='input3', shape=[12, 10], dtype="float16" + ) fluid.layers.sqrt(x=in3) class TestActivation(OpTest): - def setUp(self): self.op_type = "exp" self.init_dtype() @@ -89,13 +87,11 @@ class TestActivation(OpTest): class TestActivation_ZeroDim(TestActivation): - def init_shape(self): self.shape = [] class TestExpm1(TestActivation): - def setUp(self): self.op_type = "expm1" self.python_api = paddle.expm1 @@ -117,13 +113,11 @@ class TestExpm1(TestActivation): class TestExpm1_ZeroDim(TestExpm1): - def init_shape(self): self.shape = [] class TestExpm1API(unittest.TestCase): - def init_dtype(self): self.dtype = 'float64' self.shape = [11, 17] @@ -153,7 +147,6 @@ class TestExpm1API(unittest.TestCase): run(place) def test_dygraph_api(self): - def run(place): paddle.disable_static(place) X = paddle.to_tensor(self.x) @@ -173,7 +166,6 @@ class TestExpm1API(unittest.TestCase): class TestParameter(object): - def test_out_name(self): with fluid.program_guard(fluid.Program()): np_x = np.array([0.1]) @@ -181,7 +173,7 @@ class TestParameter(object): out = eval("paddle.%s(data, name='Y')" % self.op_type) place = fluid.CPUPlace() exe = fluid.Executor(place) - result, = exe.run(feed={"X": np_x}, fetch_list=[out]) + (result,) = exe.run(feed={"X": np_x}, fetch_list=[out]) expected = eval("np.%s(np_x)" % self.op_type) np.testing.assert_allclose(result, expected, rtol=1e-05) @@ -195,7 +187,6 @@ class TestParameter(object): class TestSigmoid(TestActivation): - def setUp(self): self.op_type = "sigmoid" self.init_dtype() @@ -218,15 +209,14 @@ class TestSigmoid(TestActivation): class TestSigmoid_ZeroDim(TestSigmoid): - def init_shape(self): self.shape = [] -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestSigmoidBF16(OpTest): - def setUp(self): self.op_type = "sigmoid" self.init_dtype() @@ -265,7 +255,6 @@ class TestSigmoidBF16_ZeroDim(TestSigmoidBF16): class TestSilu(TestActivation): - def setUp(self): self.op_type = "silu" self.init_dtype() @@ -288,7 +277,6 @@ class TestSilu(TestActivation): class TestSilu_ZeroDim(TestSilu): - def init_shape(self): self.shape = [] @@ -297,8 +285,11 @@ class TestSiluAPI(unittest.TestCase): # test paddle.nn.Silu, paddle.nn.functional.silu def setUp(self): self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32') - self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_static_api(self): paddle.enable_static() @@ -329,19 +320,18 @@ class TestSiluAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.silu, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data(name='x_int32', - shape=[11, 17], - dtype='int32') + x_int32 = paddle.fluid.data( + name='x_int32', shape=[11, 17], dtype='int32' + ) self.assertRaises(TypeError, F.silu, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data(name='x_fp16', - shape=[11, 17], - dtype='float16') + x_fp16 = paddle.fluid.data( + name='x_fp16', shape=[11, 17], dtype='float16' + ) F.silu(x_fp16) class TestLogSigmoid(TestActivation): - def setUp(self): self.op_type = "logsigmoid" self.init_dtype() @@ -361,7 +351,6 @@ class TestLogSigmoid(TestActivation): class TestLogSigmoid_ZeroDim(TestLogSigmoid): - def init_shape(self): self.shape = [] @@ -371,8 +360,11 @@ class TestLogSigmoidAPI(unittest.TestCase): def setUp(self): np.random.seed(1024) self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32') - self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_static_api(self): paddle.enable_static() @@ -414,19 +406,18 @@ class TestLogSigmoidAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.log_sigmoid, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data(name='x_int32', - shape=[11, 17], - dtype='int32') + x_int32 = paddle.fluid.data( + name='x_int32', shape=[11, 17], dtype='int32' + ) self.assertRaises(TypeError, F.log_sigmoid, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data(name='x_fp16', - shape=[11, 17], - dtype='float16') + x_fp16 = paddle.fluid.data( + name='x_fp16', shape=[11, 17], dtype='float16' + ) F.log_sigmoid(x_fp16) class TestTanh(TestActivation, TestParameter): - def setUp(self): self.op_type = "tanh" self.init_dtype() @@ -445,14 +436,13 @@ class TestTanh(TestActivation, TestParameter): self.check_grad(['X'], 'Out') def init_dtype(self): - #TODO If dtype is float64, the output (Out) has diff at CPUPlace + # TODO If dtype is float64, the output (Out) has diff at CPUPlace # when using and not using inplace. Therefore, set dtype as float32 # for now. self.dtype = np.float32 class TestTanh_ZeroDim(TestTanh): - def init_shape(self): self.shape = [] @@ -463,8 +453,11 @@ class TestTanhAPI(unittest.TestCase): self.dtype = 'float32' np.random.seed(1024) self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype) - self.place = paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + ) self.executed_api() def executed_api(self): @@ -511,14 +504,14 @@ class TestTanhAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, self.tanh, 1) # The input dtype must be float16, float32. - x_int32 = paddle.fluid.data(name='x_int32', - shape=[12, 10], - dtype='int32') + x_int32 = paddle.fluid.data( + name='x_int32', shape=[12, 10], dtype='int32' + ) self.assertRaises(TypeError, self.tanh, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data(name='x_fp16', - shape=[12, 10], - dtype='float16') + x_fp16 = paddle.fluid.data( + name='x_fp16', shape=[12, 10], dtype='float16' + ) self.tanh(x_fp16) @@ -529,7 +522,6 @@ class TestTanhInplaceAPI(TestTanhAPI): class TestAtan(TestActivation, TestParameter): - def setUp(self): self.op_type = "atan" self.init_dtype() @@ -554,7 +546,7 @@ class TestAtan(TestActivation, TestParameter): out = paddle.atan(data, name='Y') place = fluid.CPUPlace() exe = fluid.Executor(place) - result, = exe.run(feed={"X": np_x}, fetch_list=[out]) + (result,) = exe.run(feed={"X": np_x}, fetch_list=[out]) expected = np.arctan(np_x) self.assertEqual(result, expected) @@ -568,13 +560,11 @@ class TestAtan(TestActivation, TestParameter): class TestAtan_ZeroDim(TestTanh): - def init_shape(self): self.shape = [] class TestSinh(TestActivation): - def setUp(self): self.op_type = "sinh" self.init_dtype() @@ -594,13 +584,11 @@ class TestSinh(TestActivation): class TestSinh_ZeroDim(TestSinh): - def init_shape(self): self.shape = [] class TestSinhAPI(unittest.TestCase): - def test_dygraph(self): with fluid.dygraph.guard(): np_x = np.array([0.1]) @@ -612,19 +600,24 @@ class TestSinhAPI(unittest.TestCase): def test_api(self): test_data_shape = [11, 17] with fluid.program_guard(fluid.Program(), fluid.Program()): - input_x = np.random.uniform(0.1, 1, - test_data_shape).astype("float32") - data_x = fluid.layers.data(name="data_x", - shape=test_data_shape, - append_batch_size=False, - dtype="float32") + input_x = np.random.uniform(0.1, 1, test_data_shape).astype( + "float32" + ) + data_x = fluid.layers.data( + name="data_x", + shape=test_data_shape, + append_batch_size=False, + dtype="float32", + ) pd_sinh_out = fluid.layers.sinh(data_x) exe = fluid.Executor(place=fluid.CPUPlace()) exe.run(fluid.default_startup_program()) - np_sinh_res, = exe.run(fluid.default_main_program(), - feed={"data_x": input_x}, - fetch_list=[pd_sinh_out]) + (np_sinh_res,) = exe.run( + fluid.default_main_program(), + feed={"data_x": input_x}, + fetch_list=[pd_sinh_out], + ) expected_res = np.sinh(input_x) np.testing.assert_allclose(np_sinh_res, expected_res, rtol=1e-05) @@ -632,8 +625,9 @@ class TestSinhAPI(unittest.TestCase): def test_backward(self): test_data_shape = [11, 17] with fluid.dygraph.guard(): - input_x = np.random.uniform(0.1, 1, - test_data_shape).astype("float32") + input_x = np.random.uniform(0.1, 1, test_data_shape).astype( + "float32" + ) var = fluid.dygraph.to_variable(input_x) var.stop_gradient = False loss = fluid.layers.sinh(var) @@ -643,7 +637,6 @@ class TestSinhAPI(unittest.TestCase): class TestSinhOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program()): # The input type must be Variable. @@ -657,7 +650,6 @@ class TestSinhOpError(unittest.TestCase): class TestCosh(TestActivation): - def setUp(self): self.op_type = "cosh" self.init_dtype() @@ -677,13 +669,11 @@ class TestCosh(TestActivation): class TestCosh_ZeroDim(TestCosh): - def init_shape(self): self.shape = [] class TestCoshAPI(unittest.TestCase): - def test_dygraph(self): with fluid.dygraph.guard(): np_x = np.array([0.1]) @@ -695,19 +685,24 @@ class TestCoshAPI(unittest.TestCase): def test_api(self): test_data_shape = [11, 17] with fluid.program_guard(fluid.Program(), fluid.Program()): - input_x = np.random.uniform(0.1, 1, - test_data_shape).astype("float32") - data_x = fluid.layers.data(name="data_x", - shape=test_data_shape, - append_batch_size=False, - dtype="float32") + input_x = np.random.uniform(0.1, 1, test_data_shape).astype( + "float32" + ) + data_x = fluid.layers.data( + name="data_x", + shape=test_data_shape, + append_batch_size=False, + dtype="float32", + ) pd_cosh_out = paddle.cosh(data_x) exe = fluid.Executor(place=fluid.CPUPlace()) exe.run(fluid.default_startup_program()) - np_cosh_res, = exe.run(fluid.default_main_program(), - feed={"data_x": input_x}, - fetch_list=[pd_cosh_out]) + (np_cosh_res,) = exe.run( + fluid.default_main_program(), + feed={"data_x": input_x}, + fetch_list=[pd_cosh_out], + ) expected_res = np.cosh(input_x) np.testing.assert_allclose(np_cosh_res, expected_res, rtol=1e-05) @@ -715,8 +710,9 @@ class TestCoshAPI(unittest.TestCase): def test_backward(self): test_data_shape = [11, 17] with fluid.dygraph.guard(): - input_x = np.random.uniform(0.1, 1, - test_data_shape).astype("float32") + input_x = np.random.uniform(0.1, 1, test_data_shape).astype( + "float32" + ) var = fluid.dygraph.to_variable(input_x) var.stop_gradient = False loss = fluid.layers.cosh(var) @@ -726,7 +722,6 @@ class TestCoshAPI(unittest.TestCase): class TestCoshOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program()): # The input type must be Variable. @@ -745,7 +740,6 @@ def ref_tanhshrink(x): class TestTanhshrink(TestActivation): - def setUp(self): self.op_type = "tanh_shrink" self.init_dtype() @@ -765,7 +759,6 @@ class TestTanhshrink(TestActivation): class TestTanhshrink_ZeroDim(TestTanhshrink): - def init_shape(self): self.shape = [] @@ -775,8 +768,11 @@ class TestTanhshrinkAPI(unittest.TestCase): def setUp(self): np.random.seed(1024) self.x_np = np.random.uniform(10, 20, [10, 17]).astype(np.float64) - self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_static_api(self): paddle.enable_static() @@ -818,14 +814,14 @@ class TestTanhshrinkAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.tanhshrink, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data(name='x_int32', - shape=[12, 10], - dtype='int32') + x_int32 = paddle.fluid.data( + name='x_int32', shape=[12, 10], dtype='int32' + ) self.assertRaises(TypeError, F.tanhshrink, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data(name='x_fp16', - shape=[12, 10], - dtype='float16') + x_fp16 = paddle.fluid.data( + name='x_fp16', shape=[12, 10], dtype='float16' + ) F.tanhshrink(x_fp16) @@ -836,7 +832,6 @@ def ref_hardshrink(x, threshold): class TestHardShrink(TestActivation): - def setUp(self): self.op_type = "hard_shrink" self.init_dtype() @@ -865,7 +860,6 @@ class TestHardShrink(TestActivation): class TestHardShrink_threshold_negative(TestHardShrink): - def set_attrs(self): self.threshold = -0.1 @@ -883,8 +877,11 @@ class TestHardShrinkAPI(unittest.TestCase): def setUp(self): np.random.seed(1024) self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32') - self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_static_api(self): paddle.enable_static() @@ -933,14 +930,14 @@ class TestHardShrinkAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.hardshrink, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data(name='x_int32', - shape=[12, 10], - dtype='int32') + x_int32 = paddle.fluid.data( + name='x_int32', shape=[12, 10], dtype='int32' + ) self.assertRaises(TypeError, F.hardshrink, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data(name='x_fp16', - shape=[12, 10], - dtype='float16') + x_fp16 = paddle.fluid.data( + name='x_fp16', shape=[12, 10], dtype='float16' + ) F.hardshrink(x_fp16) @@ -957,8 +954,11 @@ class TestHardtanhAPI(unittest.TestCase): def setUp(self): np.random.seed(1024) self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32') - self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_static_api(self): paddle.enable_static() @@ -997,26 +997,26 @@ class TestHardtanhAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.hardtanh, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data(name='x_int32', - shape=[12, 10], - dtype='int32') + x_int32 = paddle.fluid.data( + name='x_int32', shape=[12, 10], dtype='int32' + ) self.assertRaises(TypeError, F.hardtanh, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data(name='x_fp16', - shape=[12, 10], - dtype='float16') + x_fp16 = paddle.fluid.data( + name='x_fp16', shape=[12, 10], dtype='float16' + ) F.hardtanh(x_fp16) def ref_softshrink(x, threshold=0.5): out = np.copy(x) out = (out < -threshold) * (out + threshold) + (out > threshold) * ( - out - threshold) + out - threshold + ) return out class TestSoftshrink(TestActivation): - def setUp(self): self.op_type = "softshrink" self.check_eager = True @@ -1040,7 +1040,6 @@ class TestSoftshrink(TestActivation): class TestSoftshrink_ZeroDim(TestSoftshrink): - def init_shape(self): self.shape = [] @@ -1051,8 +1050,11 @@ class TestSoftshrinkAPI(unittest.TestCase): self.threshold = 0.8 np.random.seed(1024) self.x_np = np.random.uniform(0.25, 10, [10, 12]).astype(np.float64) - self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_static_api(self): paddle.enable_static() @@ -1094,24 +1096,23 @@ class TestSoftshrinkAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.softshrink, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data(name='x_int32', - shape=[12, 10], - dtype='int32') + x_int32 = paddle.fluid.data( + name='x_int32', shape=[12, 10], dtype='int32' + ) self.assertRaises(TypeError, F.softshrink, x_int32) # The threshold must be no less than zero - x_fp32 = paddle.fluid.data(name='x_fp32', - shape=[12, 10], - dtype='float32') + x_fp32 = paddle.fluid.data( + name='x_fp32', shape=[12, 10], dtype='float32' + ) self.assertRaises(ValueError, F.softshrink, x_fp32, -1.0) # support the input dtype is float16 - x_fp16 = paddle.fluid.data(name='x_fp16', - shape=[12, 10], - dtype='float16') + x_fp16 = paddle.fluid.data( + name='x_fp16', shape=[12, 10], dtype='float16' + ) F.softshrink(x_fp16) class TestSqrt(TestActivation, TestParameter): - def setUp(self): self.op_type = "sqrt" self.python_api = paddle.sqrt @@ -1135,15 +1136,14 @@ class TestSqrt(TestActivation, TestParameter): class TestSqrt_ZeroDim(TestSqrt): - def init_shape(self): self.shape = [] -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestSqrtBF16(OpTest): - def setUp(self): self.op_type = "sqrt" self.python_api = paddle.sqrt @@ -1175,7 +1175,6 @@ class TestSqrtBF16(OpTest): class TestRsqrt(TestActivation): - def setUp(self): self.op_type = "rsqrt" self.python_api = paddle.rsqrt @@ -1195,10 +1194,9 @@ class TestRsqrt(TestActivation): def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], - 'Out', - max_relative_error=0.0005, - check_eager=True) + self.check_grad( + ['X'], 'Out', max_relative_error=0.0005, check_eager=True + ) ''' @@ -1210,7 +1208,6 @@ class TestRsqrt_ZeroDim(TestRsqrt): class TestAbs(TestActivation): - def setUp(self): self.op_type = "abs" self.init_dtype() @@ -1238,13 +1235,11 @@ class TestAbs(TestActivation): class TestAbs_ZeroDim(TestAbs): - def init_shape(self): self.shape = [] class TestCeil(TestActivation): - def setUp(self): self.op_type = "ceil" self.check_eager = True @@ -1268,13 +1263,11 @@ class TestCeil(TestActivation): class TestCeil_ZeroDim(TestCeil): - def init_shape(self): self.shape = [] class TestFloor(TestActivation): - def setUp(self): self.op_type = "floor" self.check_eager = True @@ -1300,13 +1293,11 @@ class TestFloor(TestActivation): class TestFloor_ZeroDim(TestFloor): - def init_shape(self): self.shape = [] class TestCos(TestActivation): - def setUp(self): self.op_type = "cos" self.init_dtype() @@ -1329,13 +1320,11 @@ class TestCos(TestActivation): class TestCos_ZeroDim(TestCos): - def init_shape(self): self.shape = [] class TestTan(TestActivation): - def setUp(self): np.random.seed(1024) self.op_type = "tan" @@ -1344,8 +1333,11 @@ class TestTan(TestActivation): self.dtype = 'float32' self.x_np = np.random.uniform(-1, 1, self.shape).astype(self.dtype) - self.place = paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + ) out = np.tan(self.x_np) @@ -1362,19 +1354,20 @@ class TestTan(TestActivation): class TestTan_ZeroDim(TestTan): - def init_shape(self): self.shape = [] class TestTanAPI(unittest.TestCase): - def setUp(self): np.random.seed(1024) self.dtype = 'float32' self.x_np = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) - self.place = paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_dygraph_api(self): paddle.disable_static(self.place) @@ -1397,8 +1390,9 @@ class TestTanAPI(unittest.TestCase): def test_backward(self): test_data_shape = [11, 17] with fluid.dygraph.guard(): - input_x = np.random.uniform(0.1, 1, - test_data_shape).astype("float32") + input_x = np.random.uniform(0.1, 1, test_data_shape).astype( + "float32" + ) var = paddle.to_tensor(input_x) var.stop_gradient = False loss = paddle.tan(var) @@ -1408,7 +1402,6 @@ class TestTanAPI(unittest.TestCase): class TestAcos(TestActivation): - def setUp(self): self.op_type = "acos" self.init_dtype() @@ -1431,13 +1424,11 @@ class TestAcos(TestActivation): class TestAcos_ZeroDim(TestAcos): - def init_shape(self): self.shape = [] class TestSin(TestActivation, TestParameter): - def setUp(self): self.op_type = "sin" self.init_dtype() @@ -1460,13 +1451,11 @@ class TestSin(TestActivation, TestParameter): class TestSin_ZeroDim(TestSin): - def init_shape(self): self.shape = [] class TestAsin(TestActivation): - def setUp(self): self.op_type = "asin" self.init_dtype() @@ -1489,13 +1478,11 @@ class TestAsin(TestActivation): class TestAsin_ZeroDim(TestAsin): - def init_shape(self): self.shape = [] class TestAcosh(TestActivation): - def setUp(self): self.op_type = "acosh" self.init_dtype() @@ -1518,13 +1505,11 @@ class TestAcosh(TestActivation): class TestAcosh_ZeroDim(TestAcosh): - def init_shape(self): self.shape = [] class TestAsinh(TestActivation): - def setUp(self): self.op_type = "asinh" self.init_dtype() @@ -1547,13 +1532,11 @@ class TestAsinh(TestActivation): class TestAsinh_ZeroDim(TestAsinh): - def init_shape(self): self.shape = [] class TestAtanh(TestActivation): - def setUp(self): self.op_type = "atanh" self.init_dtype() @@ -1576,13 +1559,11 @@ class TestAtanh(TestActivation): class TestAtanh_ZeroDim(TestAtanh): - def init_shape(self): self.shape = [] class TestRound(TestActivation): - def setUp(self): self.op_type = "round" self.check_eager = True @@ -1605,13 +1586,11 @@ class TestRound(TestActivation): class TestRound_ZeroDim(TestRound): - def init_shape(self): self.shape = [] class TestRelu(TestActivation): - def setUp(self): self.op_type = "relu" self.init_dtype() @@ -1640,7 +1619,6 @@ class TestRelu(TestActivation): class TestRelu_ZeroDim(TestRelu): - def init_shape(self): self.shape = [] @@ -1650,8 +1628,11 @@ class TestReluAPI(unittest.TestCase): def setUp(self): np.random.seed(1024) self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32') - self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + ) self.executed_api() def executed_api(self): @@ -1687,14 +1668,14 @@ class TestReluAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, self.relu, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data(name='x_int32', - shape=[10, 12], - dtype='int32') + x_int32 = paddle.fluid.data( + name='x_int32', shape=[10, 12], dtype='int32' + ) self.assertRaises(TypeError, self.relu, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data(name='x_fp16', - shape=[10, 12], - dtype='float16') + x_fp16 = paddle.fluid.data( + name='x_fp16', shape=[10, 12], dtype='float16' + ) self.relu(x_fp16) @@ -1711,7 +1692,6 @@ def ref_leaky_relu(x, alpha=0.01): class TestLeakyRelu(TestActivation): - def get_alpha(self): return 0.02 @@ -1738,25 +1718,21 @@ class TestLeakyRelu(TestActivation): class TestLeakyReluAlpha1(TestLeakyRelu): - def get_alpha(self): return 2 class TestLeakyReluAlpha2(TestLeakyRelu): - def get_alpha(self): return -0.01 class TestLeakyReluAlpha3(TestLeakyRelu): - def get_alpha(self): return -2.0 class TestLeakyRelu_ZeroDim(TestLeakyRelu): - def init_shape(self): self.shape = [] @@ -1767,8 +1743,11 @@ class TestLeakyReluAPI(unittest.TestCase): def setUp(self): np.random.seed(1024) self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32') - self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_static_api(self): paddle.enable_static() @@ -1817,28 +1796,33 @@ class TestLeakyReluAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.leaky_relu, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data(name='x_int32', - shape=[12, 10], - dtype='int32') + x_int32 = paddle.fluid.data( + name='x_int32', shape=[12, 10], dtype='int32' + ) self.assertRaises(TypeError, F.leaky_relu, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data(name='x_fp16', - shape=[12, 10], - dtype='float16') + x_fp16 = paddle.fluid.data( + name='x_fp16', shape=[12, 10], dtype='float16' + ) F.leaky_relu(x_fp16) def gelu(x, approximate): if approximate: - y_ref = 0.5 * x * ( - 1.0 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3)))) + y_ref = ( + 0.5 + * x + * ( + 1.0 + + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))) + ) + ) else: y_ref = 0.5 * x * (1 + erf(x / np.sqrt(2))) return y_ref.astype(x.dtype) class TestGeluApproximate(TestActivation): - def setUp(self): self.op_type = "gelu" self.init_dtype() @@ -1859,7 +1843,6 @@ class TestGeluApproximate(TestActivation): class TestGelu(TestActivation): - def setUp(self): self.op_type = "gelu" self.init_dtype() @@ -1880,7 +1863,6 @@ class TestGelu(TestActivation): class TestGelu_ZeroDim(TestGelu): - def init_shape(self): self.shape = [] @@ -1890,8 +1872,11 @@ class TestGELUAPI(unittest.TestCase): def setUp(self): np.random.seed(1024) self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32') - self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_static_api(self): paddle.enable_static() @@ -1930,19 +1915,18 @@ class TestGELUAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.gelu, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data(name='x_int32', - shape=[11, 17], - dtype='int32') + x_int32 = paddle.fluid.data( + name='x_int32', shape=[11, 17], dtype='int32' + ) self.assertRaises(TypeError, F.gelu, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data(name='x_fp16', - shape=[11, 17], - dtype='float16') + x_fp16 = paddle.fluid.data( + name='x_fp16', shape=[11, 17], dtype='float16' + ) F.gelu(x_fp16) class TestBRelu(TestActivation): - def setUp(self): self.op_type = "brelu" self.init_dtype() @@ -1972,15 +1956,18 @@ class TestBreluAPI(unittest.TestCase): # test paddle.fluid.layers.brelu def setUp(self): np.random.seed(1024) - self.t_min = 0. - self.t_max = 24. + self.t_min = 0.0 + self.t_max = 24.0 self.x_np = np.random.uniform(-1, 30, [10, 12]).astype('float32') self.out_ref = np.copy(self.x_np) self.out_ref[self.out_ref < self.t_min] = self.t_min self.out_ref[self.out_ref > self.t_max] = self.t_max self.out_ref = self.out_ref.astype('float32') - self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_fluid_api(self): with paddle.static.program_guard(paddle.static.Program()): @@ -2004,9 +1991,9 @@ class TestBreluAPI(unittest.TestCase): x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32') self.assertRaises(TypeError, fluid.layers.brelu, x_int32) # support the input dtype is float16 - x_fp16 = fluid.layers.data(name='x_fp16', - shape=[12, 10], - dtype='float16') + x_fp16 = fluid.layers.data( + name='x_fp16', shape=[12, 10], dtype='float16' + ) fluid.layers.brelu(x_fp16) @@ -2018,7 +2005,6 @@ def ref_relu6(x, threshold=6.0): class TestRelu6(TestActivation): - def setUp(self): self.op_type = "relu6" self.init_dtype() @@ -2044,7 +2030,6 @@ class TestRelu6(TestActivation): class TestRelu6_ZeroDim(TestRelu6): - def init_shape(self): self.shape = [] @@ -2055,8 +2040,11 @@ class TestRelu6API(unittest.TestCase): np.random.seed(1024) self.x_np = np.random.uniform(-1, 10, [10, 12]).astype(np.float64) self.x_np[np.abs(self.x_np) < 0.005] = 0.02 - self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_static_api(self): paddle.enable_static() @@ -2098,14 +2086,14 @@ class TestRelu6API(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.relu6, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data(name='x_int32', - shape=[12, 10], - dtype='int32') + x_int32 = paddle.fluid.data( + name='x_int32', shape=[12, 10], dtype='int32' + ) self.assertRaises(TypeError, F.relu6, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data(name='x_fp16', - shape=[12, 10], - dtype='float16') + x_fp16 = paddle.fluid.data( + name='x_fp16', shape=[12, 10], dtype='float16' + ) F.relu6(x_fp16) @@ -2114,12 +2102,12 @@ def ref_hardswish(x, threshold=6.0, scale=6.0, offset=3.0): if x_dtype == 'float16': x_dtype = 'float16' x = x.astype('float32') - return (x * np.minimum(np.maximum(x + offset, 0.), threshold) / - scale).astype(x_dtype) + return ( + x * np.minimum(np.maximum(x + offset, 0.0), threshold) / scale + ).astype(x_dtype) class TestHardSwish(TestActivation): - def setUp(self): self.op_type = 'hard_swish' self.init_dtype() @@ -2131,7 +2119,7 @@ class TestHardSwish(TestActivation): threshold = 6.0 scale = 6.0 offset = 3.0 - #the same with TestAbs + # the same with TestAbs x[np.abs(x + offset) < 0.005] = 0.02 x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02 out = ref_hardswish(x, threshold, scale, offset) @@ -2151,7 +2139,6 @@ class TestHardSwish(TestActivation): class TestHardSwish_ZeroDim(TestHardSwish): - def init_shape(self): self.shape = [] @@ -2160,8 +2147,11 @@ class TestHardswishAPI(unittest.TestCase): # test paddle.nn.Hardswish, paddle.nn.functional.hardswish def setUp(self): self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64) - self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_static_api(self): with paddle.static.program_guard(paddle.static.Program()): @@ -2177,11 +2167,11 @@ class TestHardswishAPI(unittest.TestCase): def test_dygraph_api(self): paddle.disable_static(self.place) - x = paddle.to_tensor([11648., 11448.]) + x = paddle.to_tensor([11648.0, 11448.0]) out1 = F.hardswish(x) m = paddle.nn.Hardswish() out2 = m(x) - out_ref = [11648., 11448.] + out_ref = [11648.0, 11448.0] for r in [out1, out2]: np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) paddle.enable_static() @@ -2206,14 +2196,14 @@ class TestHardswishAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.hardswish, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data(name='x_int32', - shape=[12, 10], - dtype='int32') + x_int32 = paddle.fluid.data( + name='x_int32', shape=[12, 10], dtype='int32' + ) self.assertRaises(TypeError, F.hardswish, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data(name='x_fp16', - shape=[12, 10], - dtype='float16') + x_fp16 = paddle.fluid.data( + name='x_fp16', shape=[12, 10], dtype='float16' + ) F.hardswish(x_fp16) def test_api_eager_dygraph(self): @@ -2223,7 +2213,6 @@ class TestHardswishAPI(unittest.TestCase): class TestSoftRelu(TestActivation): - def setUp(self): self.op_type = "soft_relu" self.init_dtype() @@ -2250,7 +2239,6 @@ class TestSoftRelu(TestActivation): class TestSoftReluOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program()): # The input type must be Variable. @@ -2269,7 +2257,6 @@ def elu(x, alpha): class TestELU(TestActivation): - def setUp(self): self.op_type = "elu" self.init_dtype() @@ -2294,17 +2281,15 @@ class TestELU(TestActivation): self.check_grad(['X'], 'Out') def get_alpha(self): - return 1. + return 1.0 class TestELUAlpha(TestELU): - def get_alpha(self): return -0.2 class TestELU_ZeroDim(TestELU): - def init_shape(self): self.shape = [] @@ -2314,8 +2299,11 @@ class TestELUAPI(unittest.TestCase): def setUp(self): np.random.seed(1024) self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32') - self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + ) self.executed_api() def executed_api(self): @@ -2360,14 +2348,14 @@ class TestELUAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, self.elu, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data(name='x_int32', - shape=[10, 12], - dtype='int32') + x_int32 = paddle.fluid.data( + name='x_int32', shape=[10, 12], dtype='int32' + ) self.assertRaises(TypeError, self.elu, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data(name='x_fp16', - shape=[10, 12], - dtype='float16') + x_fp16 = paddle.fluid.data( + name='x_fp16', shape=[10, 12], dtype='float16' + ) self.elu(x_fp16) @@ -2389,7 +2377,6 @@ def celu(x, alpha): class TestCELU(TestActivation): - def setUp(self): self.op_type = "celu" self.init_dtype() @@ -2414,7 +2401,6 @@ class TestCELU(TestActivation): class TestCELU_ZeroDim(TestCELU): - def init_shape(self): self.shape = [] @@ -2424,8 +2410,11 @@ class TestCELUAPI(unittest.TestCase): def setUp(self): np.random.seed(1024) self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32') - self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + ) self.executed_api() def executed_api(self): @@ -2470,19 +2459,19 @@ class TestCELUAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, self.celu, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data(name='x_int32', - shape=[10, 12], - dtype='int32') + x_int32 = paddle.fluid.data( + name='x_int32', shape=[10, 12], dtype='int32' + ) self.assertRaises(TypeError, self.celu, x_int32) # The alpha must be not equal 0 - x_fp32 = paddle.fluid.data(name='x_fp32', - shape=[10, 12], - dtype='float32') + x_fp32 = paddle.fluid.data( + name='x_fp32', shape=[10, 12], dtype='float32' + ) self.assertRaises(ZeroDivisionError, F.celu, x_fp32, 0) # support the input dtype is float16 - x_fp16 = paddle.fluid.data(name='x_fp16', - shape=[10, 12], - dtype='float16') + x_fp16 = paddle.fluid.data( + name='x_fp16', shape=[10, 12], dtype='float16' + ) self.celu(x_fp16) def test_api_eager_dygraph(self): @@ -2492,7 +2481,6 @@ class TestCELUAPI(unittest.TestCase): class TestReciprocal(TestActivation): - def setUp(self): self.op_type = "reciprocal" self.python_api = paddle.reciprocal @@ -2516,13 +2504,11 @@ class TestReciprocal(TestActivation): class TestReciprocal_ZeroDim(TestReciprocal): - def init_shape(self): self.shape = [] class TestLog(TestActivation): - def setUp(self): self.op_type = "log" self.check_eager = True @@ -2543,27 +2529,23 @@ class TestLog(TestActivation): self.check_grad(['X'], 'Out', check_eager=True) def test_error(self): - in1 = fluid.layers.data(name="in1", - shape=[11, 17], - append_batch_size=False, - dtype="int32") - in2 = fluid.layers.data(name="in2", - shape=[11, 17], - append_batch_size=False, - dtype="int64") + in1 = fluid.layers.data( + name="in1", shape=[11, 17], append_batch_size=False, dtype="int32" + ) + in2 = fluid.layers.data( + name="in2", shape=[11, 17], append_batch_size=False, dtype="int64" + ) self.assertRaises(TypeError, fluid.layers.log, in1) self.assertRaises(TypeError, fluid.layers.log, in2) class TestLog_ZeroDim(TestLog): - def init_shape(self): self.shape = [] class TestLog2(TestActivation): - def setUp(self): self.op_type = "log2" self.check_eager = True @@ -2590,19 +2572,22 @@ class TestLog2(TestActivation): self.assertRaises(TypeError, paddle.log2, in2) def test_api(self): - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64") - data_x = paddle.static.data(name="data_x", - shape=[11, 17], - dtype="float64") + data_x = paddle.static.data( + name="data_x", shape=[11, 17], dtype="float64" + ) out1 = paddle.log2(data_x) exe = paddle.static.Executor(place=fluid.CPUPlace()) exe.run(paddle.static.default_startup_program()) - res1, = exe.run(paddle.static.default_main_program(), - feed={"data_x": input_x}, - fetch_list=[out1]) + (res1,) = exe.run( + paddle.static.default_main_program(), + feed={"data_x": input_x}, + fetch_list=[out1], + ) expected_res = np.log2(input_x) np.testing.assert_allclose(res1, expected_res, rtol=1e-05) @@ -2617,13 +2602,11 @@ class TestLog2(TestActivation): class TestLog2_ZeroDim(TestLog2): - def init_shape(self): self.shape = [] class TestLog10(TestActivation): - def setUp(self): self.op_type = "log10" self.check_eager = True @@ -2644,13 +2627,11 @@ class TestLog10(TestActivation): class TestLog10_ZeroDim(TestLog10): - def init_shape(self): self.shape = [] class TestLog10API(unittest.TestCase): - def test_error(self): in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32") in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64") @@ -2659,19 +2640,22 @@ class TestLog10API(unittest.TestCase): self.assertRaises(TypeError, paddle.log10, in2) def test_api(self): - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64") - data_x = paddle.static.data(name="data_x", - shape=[11, 17], - dtype="float64") + data_x = paddle.static.data( + name="data_x", shape=[11, 17], dtype="float64" + ) out1 = paddle.log10(data_x) exe = paddle.static.Executor(place=paddle.CPUPlace()) exe.run(paddle.static.default_startup_program()) - res1, = exe.run(paddle.static.default_main_program(), - feed={"data_x": input_x}, - fetch_list=[out1]) + (res1,) = exe.run( + paddle.static.default_main_program(), + feed={"data_x": input_x}, + fetch_list=[out1], + ) expected_res = np.log10(input_x) np.testing.assert_allclose(res1, expected_res, rtol=1e-05) @@ -2686,7 +2670,6 @@ class TestLog10API(unittest.TestCase): class TestLog1p(TestActivation): - def setUp(self): self.op_type = "log1p" self.check_eager = True @@ -2708,27 +2691,29 @@ class TestLog1p(TestActivation): class TestLog1p_ZeroDim(TestLog1p): - def init_shape(self): self.shape = [] class TestLog1pAPI(unittest.TestCase): - def test_api(self): with fluid.program_guard(fluid.Program(), fluid.Program()): input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64") - data_x = fluid.layers.data(name="data_x", - shape=[11, 17], - append_batch_size=False, - dtype="float64") + data_x = fluid.layers.data( + name="data_x", + shape=[11, 17], + append_batch_size=False, + dtype="float64", + ) out1 = paddle.log1p(data_x) exe = fluid.Executor(place=fluid.CPUPlace()) exe.run(fluid.default_startup_program()) - res1, = exe.run(fluid.default_main_program(), - feed={"data_x": input_x}, - fetch_list=[out1]) + (res1,) = exe.run( + fluid.default_main_program(), + feed={"data_x": input_x}, + fetch_list=[out1], + ) expected_res = np.log1p(input_x) np.testing.assert_allclose(res1, expected_res, rtol=1e-05) @@ -2743,7 +2728,6 @@ class TestLog1pAPI(unittest.TestCase): class TestSquare(TestActivation): - def setUp(self): self.op_type = "square" self.python_api = paddle.square @@ -2760,25 +2744,23 @@ class TestSquare(TestActivation): def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], - 'Out', - max_relative_error=0.007, - check_eager=True) + self.check_grad( + ['X'], 'Out', max_relative_error=0.007, check_eager=True + ) def test_check_output(self): self.check_output(check_eager=True) class TestSquare_ZeroDim(TestSquare): - def init_shape(self): self.shape = [] -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestSquareBF16(OpTest): - def setUp(self): self.op_type = "square" self.python_api = paddle.square @@ -2802,14 +2784,12 @@ class TestSquareBF16(OpTest): def test_check_grad(self): place = core.CUDAPlace(0) - self.check_grad_with_place(place, ['X'], - 'Out', - numeric_grad_delta=0.5, - check_eager=True) + self.check_grad_with_place( + place, ['X'], 'Out', numeric_grad_delta=0.5, check_eager=True + ) class TestPow(TestActivation): - def setUp(self): self.op_type = "pow" self.python_api = paddle.pow @@ -2835,13 +2815,11 @@ class TestPow(TestActivation): class TestPow_ZeroDim(TestPow): - def init_shape(self): self.shape = [] class TestPow_factor_tensor(TestActivation): - def setUp(self): self.op_type = "pow" self.check_eager = False @@ -2854,7 +2832,7 @@ class TestPow_factor_tensor(TestActivation): self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(x), - 'FactorTensor': np.array([3.0]).astype("float32") + 'FactorTensor': np.array([3.0]).astype("float32"), } self.attrs = {} @@ -2870,14 +2848,12 @@ class TestPow_factor_tensor(TestActivation): def test_api(self): input = np.random.uniform(1, 2, [11, 17]).astype("float32") - x = fluid.layers.data(name="x", - shape=[11, 17], - append_batch_size=False, - dtype="float32") - res = fluid.layers.data(name="res", - shape=[11, 17], - append_batch_size=False, - dtype="float32") + x = fluid.layers.data( + name="x", shape=[11, 17], append_batch_size=False, dtype="float32" + ) + res = fluid.layers.data( + name="res", shape=[11, 17], append_batch_size=False, dtype="float32" + ) factor_1 = 2.0 factor_2 = fluid.layers.fill_constant([1], "float32", 3.0) @@ -2891,29 +2867,26 @@ class TestPow_factor_tensor(TestActivation): res_1, res_2, res, res_6 = exe.run( fluid.default_main_program(), feed={"x": input}, - fetch_list=[out_1, out_2, res, out_6]) + fetch_list=[out_1, out_2, res, out_6], + ) assert np.allclose(res_1, np.power(input, 2)) assert np.allclose(res_2, np.power(input, 3)) assert np.allclose(res_6, np.power(input, 3)) def test_error(self): - in1 = fluid.layers.data(name="in1", - shape=[11, 17], - append_batch_size=False, - dtype="int32") - in2 = fluid.layers.data(name="in2", - shape=[11, 17], - append_batch_size=False, - dtype="int64") - in3 = fluid.layers.data(name="in3", - shape=[11, 17], - append_batch_size=False, - dtype="float32") - in4 = fluid.layers.data(name="in4", - shape=[11, 17], - append_batch_size=False, - dtype="float64") + in1 = fluid.layers.data( + name="in1", shape=[11, 17], append_batch_size=False, dtype="int32" + ) + in2 = fluid.layers.data( + name="in2", shape=[11, 17], append_batch_size=False, dtype="int64" + ) + in3 = fluid.layers.data( + name="in3", shape=[11, 17], append_batch_size=False, dtype="float32" + ) + in4 = fluid.layers.data( + name="in4", shape=[11, 17], append_batch_size=False, dtype="float64" + ) factor_1 = fluid.layers.fill_constant([1], "float64", 3.0) @@ -2929,7 +2902,6 @@ def ref_stanh(x, scale_a=0.67, scale_b=1.7159): class TestSTanh(TestActivation): - def get_scale_a(self): return 0.67 @@ -2960,19 +2932,16 @@ class TestSTanh(TestActivation): class TestSTanhScaleA(TestSTanh): - def get_scale_a(self): return 2.0 class TestSTanhScaleB(TestSTanh): - def get_scale_b(self): return 0.5 class TestSTanh_ZeroDim(TestSTanh): - def init_shape(self): self.shape = [] @@ -2990,8 +2959,11 @@ class TestSTanhAPI(unittest.TestCase): self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32') self.scale_a = self.get_scale_a() self.scale_b = self.get_scale_b() - self.place=paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_static_api(self): paddle.enable_static() @@ -3029,38 +3001,37 @@ class TestSTanhAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, paddle.stanh, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data(name='x_int32', - shape=[12, 10], - dtype='int32') + x_int32 = paddle.fluid.data( + name='x_int32', shape=[12, 10], dtype='int32' + ) self.assertRaises(TypeError, paddle.stanh, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data(name='x_fp16', - shape=[12, 10], - dtype='float16') + x_fp16 = paddle.fluid.data( + name='x_fp16', shape=[12, 10], dtype='float16' + ) paddle.stanh(x_fp16) class TestSTanhAPIScaleA(TestSTanhAPI): - def get_scale_a(self): return 2.0 class TestSTanhAPIScaleB(TestSTanhAPI): - def get_scale_b(self): return 0.5 def ref_softplus(x, beta=1, threshold=20): x_beta = beta * x - out = np.select([x_beta <= threshold, x_beta > threshold], - [np.log(1 + np.exp(x_beta)) / beta, x]) + out = np.select( + [x_beta <= threshold, x_beta > threshold], + [np.log(1 + np.exp(x_beta)) / beta, x], + ) return out class TestSoftplus(TestActivation): - def setUp(self): self.op_type = "softplus" self.python_api = paddle.nn.functional.softplus @@ -3091,15 +3062,14 @@ class TestSoftplus(TestActivation): class TestSoftplus_ZeroDim(TestSoftplus): - def init_shape(self): self.shape = [] -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestSoftplusBF16(OpTest): - def setUp(self): self.op_type = "softplus" self.init_dtype() @@ -3133,8 +3103,11 @@ class TestSoftplusAPI(unittest.TestCase): self.threshold = 15 np.random.seed(1024) self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64) - self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_static_api(self): paddle.enable_static() @@ -3176,14 +3149,14 @@ class TestSoftplusAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.softplus, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data(name='x_int32', - shape=[12, 10], - dtype='int32') + x_int32 = paddle.fluid.data( + name='x_int32', shape=[12, 10], dtype='int32' + ) self.assertRaises(TypeError, F.softplus, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data(name='x_fp16', - shape=[12, 10], - dtype='float16') + x_fp16 = paddle.fluid.data( + name='x_fp16', shape=[12, 10], dtype='float16' + ) F.softplus(x_fp16) @@ -3193,7 +3166,6 @@ def ref_softsign(x): class TestSoftsign(TestActivation): - def setUp(self): self.op_type = "softsign" self.init_dtype() @@ -3217,7 +3189,6 @@ class TestSoftsign(TestActivation): class TestSoftsign_ZeroDim(TestSoftsign): - def init_shape(self): self.shape = [] @@ -3227,8 +3198,11 @@ class TestSoftsignAPI(unittest.TestCase): def setUp(self): np.random.seed(1024) self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64) - self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_static_api(self): paddle.enable_static() @@ -3270,14 +3244,14 @@ class TestSoftsignAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.softsign, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data(name='x_int32', - shape=[12, 10], - dtype='int32') + x_int32 = paddle.fluid.data( + name='x_int32', shape=[12, 10], dtype='int32' + ) self.assertRaises(TypeError, F.softsign, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data(name='x_fp16', - shape=[12, 10], - dtype='float16') + x_fp16 = paddle.fluid.data( + name='x_fp16', shape=[12, 10], dtype='float16' + ) F.softsign(x_fp16) @@ -3287,7 +3261,6 @@ def ref_thresholded_relu(x, threshold=1.0): class TestThresholdedRelu(TestActivation): - def setUp(self): self.op_type = "thresholded_relu" self.init_dtype() @@ -3313,7 +3286,6 @@ class TestThresholdedRelu(TestActivation): class TestThresholdedRelu_ZeroDim(TestThresholdedRelu): - def init_shape(self): self.shape = [] @@ -3325,8 +3297,11 @@ class TestThresholdedReluAPI(unittest.TestCase): np.random.seed(1024) self.x_np = np.random.uniform(-20, 20, [10, 12]).astype(np.float64) self.x_np[np.abs(self.x_np) < 0.005] = 0.02 - self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_static_api(self): paddle.enable_static() @@ -3368,23 +3343,22 @@ class TestThresholdedReluAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.thresholded_relu, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data(name='x_int32', - shape=[12, 10], - dtype='int32') + x_int32 = paddle.fluid.data( + name='x_int32', shape=[12, 10], dtype='int32' + ) self.assertRaises(TypeError, F.thresholded_relu, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data(name='x_fp16', - shape=[12, 10], - dtype='float16') + x_fp16 = paddle.fluid.data( + name='x_fp16', shape=[12, 10], dtype='float16' + ) F.thresholded_relu(x_fp16) def ref_hardsigmoid(x, slope=0.166666666666667, offset=0.5): - return np.maximum(np.minimum(x * slope + offset, 1.), 0.).astype(x.dtype) + return np.maximum(np.minimum(x * slope + offset, 1.0), 0.0).astype(x.dtype) class TestHardSigmoid(TestActivation): - def setUp(self): self.op_type = "hard_sigmoid" self.dtype = 'float64' @@ -3395,7 +3369,7 @@ class TestHardSigmoid(TestActivation): x = np.random.uniform(-5, 5, self.shape).astype(self.dtype) lower_threshold = -self.offset / self.slope - upper_threshold = (1. - self.offset) / self.slope + upper_threshold = (1.0 - self.offset) / self.slope # Same reason as TestAbs delta = 0.005 @@ -3416,20 +3390,17 @@ class TestHardSigmoid(TestActivation): class TestHardSigmoidFP32(TestHardSigmoid): - def set_attrs(self): self.dtype = 'float32' class TestHardSigmoidSlopeOffset(TestHardSigmoid): - def set_attrs(self): self.slope = 0.2 self.offset = 0.4 class TestHardSigmoid_ZeroDim(TestHardSigmoid): - def init_shape(self): self.shape = [] @@ -3438,8 +3409,11 @@ class TestHardsigmoidAPI(unittest.TestCase): # test paddle.nn.Hardsigmoid, paddle.nn.functional.hardsigmoid def setUp(self): self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64) - self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_static_api(self): with paddle.static.program_guard(paddle.static.Program()): @@ -3484,14 +3458,14 @@ class TestHardsigmoidAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.hardsigmoid, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data(name='x_int32', - shape=[12, 10], - dtype='int32') + x_int32 = paddle.fluid.data( + name='x_int32', shape=[12, 10], dtype='int32' + ) self.assertRaises(TypeError, F.hardsigmoid, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data(name='x_fp16', - shape=[12, 10], - dtype='float16') + x_fp16 = paddle.fluid.data( + name='x_fp16', shape=[12, 10], dtype='float16' + ) F.hardsigmoid(x_fp16) @@ -3501,7 +3475,6 @@ def ref_swish(x): class TestSwish(TestActivation): - def setUp(self): self.op_type = "swish" self.python_api = paddle.nn.functional.swish @@ -3530,7 +3503,6 @@ class TestSwish(TestActivation): class TestSwish_ZeroDim(TestSwish): - def init_shape(self): self.shape = [] @@ -3540,8 +3512,11 @@ class TestSwishAPI(unittest.TestCase): def setUp(self): np.random.seed(1024) self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64) - self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_static_api(self): paddle.enable_static() @@ -3588,25 +3563,25 @@ class TestSwishAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.swish, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data(name='x_int32', - shape=[12, 10], - dtype='int32') + x_int32 = paddle.fluid.data( + name='x_int32', shape=[12, 10], dtype='int32' + ) self.assertRaises(TypeError, F.swish, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data(name='x_fp16', - shape=[12, 10], - dtype='float16') + x_fp16 = paddle.fluid.data( + name='x_fp16', shape=[12, 10], dtype='float16' + ) F.swish(x_fp16) -def ref_mish(x, threshold=20.): - softplus = np.select([x <= threshold, x > threshold], - [np.log(1 + np.exp(x)), x]) +def ref_mish(x, threshold=20.0): + softplus = np.select( + [x <= threshold, x > threshold], [np.log(1 + np.exp(x)), x] + ) return x * np.tanh(softplus) class TestMish(TestActivation): - def setUp(self): self.op_type = "mish" self.python_api = paddle.fluid.layers.nn.mish @@ -3632,7 +3607,6 @@ class TestMish(TestActivation): class TestMish_ZeroDim(TestMish): - def init_shape(self): self.shape = [] @@ -3642,8 +3616,11 @@ class TestMishAPI(unittest.TestCase): def setUp(self): np.random.seed(1024) self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64) - self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_static_api(self): paddle.enable_static() @@ -3685,32 +3662,30 @@ class TestMishAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.mish, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data(name='x_int32', - shape=[12, 10], - dtype='int32') + x_int32 = paddle.fluid.data( + name='x_int32', shape=[12, 10], dtype='int32' + ) self.assertRaises(TypeError, F.mish, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data(name='x_fp16', - shape=[12, 10], - dtype='float16') + x_fp16 = paddle.fluid.data( + name='x_fp16', shape=[12, 10], dtype='float16' + ) F.mish(x_fp16) -#------------------ Test Error Activation---------------------- +# ------------------ Test Error Activation---------------------- def create_test_error_class(op_type): - class TestOpErrors(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): op = getattr(fluid.layers, op_type) # The input dtype of op_type must be float32, float64. - in1 = fluid.layers.data(name='input2', - shape=[12, 10], - dtype="int32") - in2 = fluid.layers.data(name='input3', - shape=[12, 10], - dtype="int64") + in1 = fluid.layers.data( + name='input2', shape=[12, 10], dtype="int32" + ) + in2 = fluid.layers.data( + name='input3', shape=[12, 10], dtype="int64" + ) self.assertRaises(TypeError, op, in1) self.assertRaises(TypeError, op, in2) @@ -3737,13 +3712,12 @@ create_test_error_class('asinh') create_test_error_class('atanh') -#------------------ Test Cudnn Activation---------------------- +# ------------------ Test Cudnn Activation---------------------- def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3): - - @unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) class TestActCudnn(parent): - def init_kernel_type(self): self.attrs = {"use_cudnn": True} @@ -3758,16 +3732,14 @@ create_test_act_cudnn_class(TestSigmoid) create_test_act_cudnn_class(TestTanh) -#------------------ Test Fp16 ---------------------- -def create_test_act_fp16_class(parent, - atol=1e-3, - grad_check=True, - grad_atol=0.80): - - @unittest.skipIf(not paddle.is_compiled_with_cuda(), - "core is not compiled with CUDA") +# ------------------ Test Fp16 ---------------------- +def create_test_act_fp16_class( + parent, atol=1e-3, grad_check=True, grad_atol=0.80 +): + @unittest.skipIf( + not paddle.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) class TestActFp16(parent): - def init_dtype(self): self.dtype = np.float16 @@ -3781,9 +3753,9 @@ def create_test_act_fp16_class(parent, place = core.CUDAPlace(0) support_fp16 = core.is_float16_supported(place) if support_fp16 and grad_check: - self.check_grad_with_place(place, ['X'], - 'Out', - max_relative_error=grad_atol) + self.check_grad_with_place( + place, ['X'], 'Out', max_relative_error=grad_atol + ) cls_name = "{0}_{1}".format(parent.__name__, "fp16") TestActFp16.__name__ = cls_name @@ -3843,15 +3815,13 @@ create_test_act_fp16_class(TestHardSwish) create_test_act_fp16_class(TestMish, grad_atol=0.9) -def create_test_act_bf16_class(parent, - atol=1e-2, - grad_check=True, - grad_atol=0.80): - - @unittest.skipIf(not paddle.is_compiled_with_cuda(), - "core is not compiled with CUDA") +def create_test_act_bf16_class( + parent, atol=1e-2, grad_check=True, grad_atol=0.80 +): + @unittest.skipIf( + not paddle.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) class TestActBF16(parent): - def init_dtype(self): self.dtype = np.uint16 @@ -3861,9 +3831,9 @@ def create_test_act_bf16_class(parent, def test_check_grad(self): place = core.CUDAPlace(0) - self.check_grad_with_place(place, ['X'], - 'Out', - max_relative_error=grad_atol) + self.check_grad_with_place( + place, ['X'], 'Out', max_relative_error=grad_atol + ) cls_name = "{0}_{1}".format(parent.__name__, "bf16") TestActBF16.__name__ = cls_name diff --git a/python/paddle/fluid/tests/unittests/test_activation_sparse_op.py b/python/paddle/fluid/tests/unittests/test_activation_sparse_op.py index ceab2d48ecf742e44d4450dccf5dc76122d25a22..472dc334d66524c5dcc0eb2142543f36fbc722f5 100644 --- a/python/paddle/fluid/tests/unittests/test_activation_sparse_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_sparse_op.py @@ -20,7 +20,6 @@ import paddle class TestSparseSquareOp(unittest.TestCase): - def check_with_place(self, place): scope = core.Scope() @@ -58,7 +57,6 @@ class TestSparseSquareOp(unittest.TestCase): class TestSparseSqrtOp(unittest.TestCase): - def check_with_place(self, place): scope = core.Scope() diff --git a/python/paddle/fluid/tests/unittests/test_adadelta_op.py b/python/paddle/fluid/tests/unittests/test_adadelta_op.py index 3e246517ccba06beb7239b2573445406ea47f3d2..12122c8e05f37467acc74b0b7f2f38f9d3374843 100644 --- a/python/paddle/fluid/tests/unittests/test_adadelta_op.py +++ b/python/paddle/fluid/tests/unittests/test_adadelta_op.py @@ -20,7 +20,6 @@ import paddle.fluid as fluid class TestAdadeltaOp1(OpTest): - def setUp(self): self.op_type = "adadelta" param = np.random.uniform(-1, 1, (102, 105)).astype("float32") @@ -37,27 +36,33 @@ class TestAdadeltaOp1(OpTest): 'Param': param, 'Grad': grad, 'AvgSquaredGrad': avg_squared_grad, - 'AvgSquaredUpdate': avg_squared_update + 'AvgSquaredUpdate': avg_squared_update, } self.attrs = {'rho': rho, 'epsilon': epsilon} - avg_squared_grad_out = rho * avg_squared_grad + \ - (1 - rho) * np.square(grad) + avg_squared_grad_out = rho * avg_squared_grad + (1 - rho) * np.square( + grad + ) update = -np.multiply( np.sqrt( - np.divide(avg_squared_update + epsilon, - avg_squared_grad_out + epsilon)), grad) + np.divide( + avg_squared_update + epsilon, avg_squared_grad_out + epsilon + ) + ), + grad, + ) - avg_squared_update_out = rho * avg_squared_update + \ - (1 - rho) * np.square(update) + avg_squared_update_out = rho * avg_squared_update + ( + 1 - rho + ) * np.square(update) param_out = param + update self.outputs = { 'ParamOut': param_out, 'AvgSquaredGradOut': avg_squared_grad_out, - 'AvgSquaredUpdateOut': avg_squared_update_out + 'AvgSquaredUpdateOut': avg_squared_update_out, } def test_check_output(self): @@ -65,8 +70,7 @@ class TestAdadeltaOp1(OpTest): class TestAdadeltaOp2(OpTest): - '''Test Adadelta op with default attribute values - ''' + '''Test Adadelta op with default attribute values''' def setUp(self): self.op_type = "adadelta" @@ -84,25 +88,31 @@ class TestAdadeltaOp2(OpTest): 'Param': param, 'Grad': grad, 'AvgSquaredGrad': avg_squared_grad, - 'AvgSquaredUpdate': avg_squared_update + 'AvgSquaredUpdate': avg_squared_update, } - avg_squared_grad_out = rho * avg_squared_grad + \ - (1 - rho) * np.square(grad) + avg_squared_grad_out = rho * avg_squared_grad + (1 - rho) * np.square( + grad + ) update = -np.multiply( np.sqrt( - np.divide(avg_squared_update + epsilon, - avg_squared_grad_out + epsilon)), grad) + np.divide( + avg_squared_update + epsilon, avg_squared_grad_out + epsilon + ) + ), + grad, + ) - avg_squared_update_out = rho * avg_squared_update + \ - (1 - rho) * np.square(update) + avg_squared_update_out = rho * avg_squared_update + ( + 1 - rho + ) * np.square(update) param_out = param + update self.outputs = { 'ParamOut': param_out, 'AvgSquaredGradOut': avg_squared_grad_out, - 'AvgSquaredUpdateOut': avg_squared_update_out + 'AvgSquaredUpdateOut': avg_squared_update_out, } def test_check_output(self): @@ -110,16 +120,17 @@ class TestAdadeltaOp2(OpTest): class TestAdadeltaV2(unittest.TestCase): - def test_adadelta_dygraph(self): paddle.disable_static(paddle.CPUPlace()) value = np.arange(26).reshape(2, 13).astype("float32") a = paddle.to_tensor(value) linear = paddle.nn.Linear(13, 5) # This can be any optimizer supported by dygraph. - adam = paddle.optimizer.Adadelta(learning_rate=0.01, - parameters=linear.parameters(), - weight_decay=0.01) + adam = paddle.optimizer.Adadelta( + learning_rate=0.01, + parameters=linear.parameters(), + weight_decay=0.01, + ) out = linear(a) out.backward() adam.step() @@ -140,8 +151,9 @@ class TestAdadeltaV2(unittest.TestCase): rms_optimizer.minimize(avg_cost) fetch_list = [avg_cost] - train_reader = paddle.batch(paddle.dataset.uci_housing.train(), - batch_size=1) + train_reader = paddle.batch( + paddle.dataset.uci_housing.train(), batch_size=1 + ) feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) @@ -150,18 +162,18 @@ class TestAdadeltaV2(unittest.TestCase): def test_raise_error(self): self.assertRaises(ValueError, paddle.optimizer.Adadelta, None) - self.assertRaises(ValueError, - paddle.optimizer.Adadelta, - learning_rate=0.1, - rho=None) - self.assertRaises(ValueError, - paddle.optimizer.Adadelta, - learning_rate=0.1, - epsilon=None) + self.assertRaises( + ValueError, paddle.optimizer.Adadelta, learning_rate=0.1, rho=None + ) + self.assertRaises( + ValueError, + paddle.optimizer.Adadelta, + learning_rate=0.1, + epsilon=None, + ) class TestAdadeltaV2Group(TestAdadeltaV2): - def test_adadelta_dygraph(self): paddle.disable_static(paddle.CPUPlace()) value = np.arange(26).reshape(2, 13).astype("float32") @@ -169,17 +181,17 @@ class TestAdadeltaV2Group(TestAdadeltaV2): linear_1 = paddle.nn.Linear(13, 5) linear_2 = paddle.nn.Linear(5, 5) # This can be any optimizer supported by dygraph. - adam = paddle.optimizer.Adadelta(learning_rate=0.01, - parameters=[{ - 'params': - linear_1.parameters() - }, { - 'params': - linear_2.parameters(), - 'weight_decay': - 0.001, - }], - weight_decay=0.1) + adam = paddle.optimizer.Adadelta( + learning_rate=0.01, + parameters=[ + {'params': linear_1.parameters()}, + { + 'params': linear_2.parameters(), + 'weight_decay': 0.001, + }, + ], + weight_decay=0.1, + ) out = linear_1(a) out = linear_2(out) out.backward() diff --git a/python/paddle/fluid/tests/unittests/test_adagrad_op.py b/python/paddle/fluid/tests/unittests/test_adagrad_op.py index fcac8599d0c22c2e3871b2279fb9a171122ae6e9..036322ee62d615dd69bc22bcb54b1d8d5478112c 100644 --- a/python/paddle/fluid/tests/unittests/test_adagrad_op.py +++ b/python/paddle/fluid/tests/unittests/test_adagrad_op.py @@ -22,8 +22,7 @@ import paddle class TestAdagradOp1(OpTest): - ''' Test Adagrad operator with explicit attributes - ''' + '''Test Adagrad operator with explicit attributes''' def setUp(self): self.op_type = "adagrad" @@ -37,7 +36,7 @@ class TestAdagradOp1(OpTest): 'Param': param, 'Grad': grad, 'Moment': moment, - 'LearningRate': np.array([lr]).astype("float32") + 'LearningRate': np.array([lr]).astype("float32"), } self.attrs = {'epsilon': epsilon} @@ -52,8 +51,7 @@ class TestAdagradOp1(OpTest): class TestAdagradOp2(OpTest): - ''' Test Adagrad operator with default attributes - ''' + '''Test Adagrad operator with default attributes''' def setUp(self): self.op_type = "adagrad" @@ -68,7 +66,7 @@ class TestAdagradOp2(OpTest): 'Param': param, 'Grad': grad, 'Moment': moment, - 'LearningRate': np.array([lr]).astype("float32") + 'LearningRate': np.array([lr]).astype("float32"), } self.attrs = {'epsilon': epsilon} @@ -83,7 +81,6 @@ class TestAdagradOp2(OpTest): class TestSparseAdagradOp(unittest.TestCase): - def check_with_place(self, place): scope = core.Scope() @@ -118,14 +115,16 @@ class TestSparseAdagradOp(unittest.TestCase): moment.set(moment_np_array, place) # create and run sgd operator - adagrad_op = Operator("adagrad", - Param='Param', - Grad='Grad', - ParamOut='Param', - Moment='Moment', - MomentOut='Moment', - LearningRate='LearningRate', - epsilon=2.0) + adagrad_op = Operator( + "adagrad", + Param='Param', + Grad='Grad', + ParamOut='Param', + Moment='Moment', + MomentOut='Moment', + LearningRate='LearningRate', + epsilon=2.0, + ) adagrad_op.run(scope, place) @@ -149,31 +148,35 @@ class TestSparseAdagradOp(unittest.TestCase): def get_out(param, lr, grad, m, epsilon): return param - lr * grad / (math.sqrt(m) + epsilon) - self.assertAlmostEqual(get_out(5.0, 2.0, 2.0, 6.0, 2.0), - result_array[rows[0], 0], - places=5) - self.assertAlmostEqual(get_out(5.0, 2.0, 1.0, 3.0, 2.0), - result_array[rows[0], 2], - places=5) - self.assertAlmostEqual(get_out(5.0, 2.0, 0.0, 2.0, 2.0), - result_array[1, 0], - places=5) + self.assertAlmostEqual( + get_out(5.0, 2.0, 2.0, 6.0, 2.0), result_array[rows[0], 0], places=5 + ) + self.assertAlmostEqual( + get_out(5.0, 2.0, 1.0, 3.0, 2.0), result_array[rows[0], 2], places=5 + ) + self.assertAlmostEqual( + get_out(5.0, 2.0, 0.0, 2.0, 2.0), result_array[1, 0], places=5 + ) # grad_merge = 1.0 + 1.0 # m = 6.0 - self.assertAlmostEqual(get_out(5.0, 2.0, 2.0, 6.0, 2.0), - result_array[rows[1], 10], - places=5) - - self.assertAlmostEqual(get_out(5.0, 2.0, 0.0, 2.0, 2.0), - result_array[5, 8], - places=5) - self.assertAlmostEqual(get_out(5.0, 2.0, 1.0, 3.0, 2.0), - result_array[rows[2], 1], - places=5) - self.assertAlmostEqual(get_out(5.0, 2.0, 4.0, 18.0, 2.0), - result_array[rows[2], 8], - places=5) + self.assertAlmostEqual( + get_out(5.0, 2.0, 2.0, 6.0, 2.0), + result_array[rows[1], 10], + places=5, + ) + + self.assertAlmostEqual( + get_out(5.0, 2.0, 0.0, 2.0, 2.0), result_array[5, 8], places=5 + ) + self.assertAlmostEqual( + get_out(5.0, 2.0, 1.0, 3.0, 2.0), result_array[rows[2], 1], places=5 + ) + self.assertAlmostEqual( + get_out(5.0, 2.0, 4.0, 18.0, 2.0), + result_array[rows[2], 8], + places=5, + ) def test_sparse_adagrad(self): places = [core.CPUPlace()] diff --git a/python/paddle/fluid/tests/unittests/test_adagrad_op_v2.py b/python/paddle/fluid/tests/unittests/test_adagrad_op_v2.py index e483dc1fa894960da444cc4a7fd6dbebba93c732..c3eee954b610bfa26337745d5790941f37c98ebc 100644 --- a/python/paddle/fluid/tests/unittests/test_adagrad_op_v2.py +++ b/python/paddle/fluid/tests/unittests/test_adagrad_op_v2.py @@ -17,22 +17,21 @@ import paddle class TestAdagradOpV2(unittest.TestCase): - def test_v20_coverage(self): paddle.disable_static() inp = paddle.rand(shape=[10, 10]) linear = paddle.nn.Linear(10, 10) out = linear(inp) loss = paddle.mean(out) - adagrad = paddle.optimizer.Adagrad(learning_rate=0.1, - parameters=linear.parameters()) + adagrad = paddle.optimizer.Adagrad( + learning_rate=0.1, parameters=linear.parameters() + ) out.backward() adagrad.step() adagrad.clear_grad() class TestAdagradOpV2Group(TestAdagradOpV2): - def test_v20_coverage(self): paddle.disable_static() inp = paddle.rand(shape=[10, 10]) @@ -41,17 +40,17 @@ class TestAdagradOpV2Group(TestAdagradOpV2): out = linear_1(inp) out = linear_2(out) loss = paddle.mean(out) - adagrad = paddle.optimizer.Adagrad(learning_rate=0.01, - parameters=[{ - 'params': - linear_1.parameters() - }, { - 'params': - linear_2.parameters(), - 'weight_decay': - 0.001, - }], - weight_decay=0.1) + adagrad = paddle.optimizer.Adagrad( + learning_rate=0.01, + parameters=[ + {'params': linear_1.parameters()}, + { + 'params': linear_2.parameters(), + 'weight_decay': 0.001, + }, + ], + weight_decay=0.1, + ) out.backward() adagrad.step() adagrad.clear_grad() diff --git a/python/paddle/fluid/tests/unittests/test_adam_op.py b/python/paddle/fluid/tests/unittests/test_adam_op.py index 76309ab15eb9709efcb532da9f06b20e38155be9..bfe1429342f29e3bc624360f49eb65da8c3d2d8e 100644 --- a/python/paddle/fluid/tests/unittests/test_adam_op.py +++ b/python/paddle/fluid/tests/unittests/test_adam_op.py @@ -23,10 +23,8 @@ from paddle.fluid.framework import _test_eager_guard class TestAdamOp1(OpTest): - def setUp(self): - '''Test Adam Op with supplied attributes - ''' + '''Test Adam Op with supplied attributes''' self.op_type = "adam" param = np.random.uniform(-1, 1, (102, 105)).astype("float32") grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") @@ -48,20 +46,19 @@ class TestAdamOp1(OpTest): 'Moment2': moment2, 'LearningRate': np.array([learning_rate]).astype("float32"), 'Beta1Pow': np.array([beta1_pow]).astype("float32"), - 'Beta2Pow': np.array([beta2_pow]).astype("float32") + 'Beta2Pow': np.array([beta2_pow]).astype("float32"), } self.attrs = {'epsilon': epsilon, 'beta1': beta1, 'beta2': beta2} - param_out, moment1_out, \ - moment2_out = adam_step(self.inputs, self.attrs) + param_out, moment1_out, moment2_out = adam_step(self.inputs, self.attrs) self.outputs = { 'Moment1Out': moment1_out, 'Moment2Out': moment2_out, 'ParamOut': param_out, 'Beta1PowOut': np.array([beta1_pow]).astype("float32") * beta1, - 'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2 + 'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2, } def test_check_output(self): @@ -69,13 +66,11 @@ class TestAdamOp1(OpTest): class TestAdamOp2(OpTest): - def set_shape(self): self.shape = (102, 105) def setUp(self): - '''Test Adam Op with supplied attributes - ''' + '''Test Adam Op with supplied attributes''' self.op_type = "adam" self.set_shape() param = np.random.uniform(-1, 1, self.shape).astype("float32") @@ -98,20 +93,19 @@ class TestAdamOp2(OpTest): 'Moment2': moment2, 'LearningRate': np.array([learning_rate]).astype("float32"), 'Beta1Pow': np.array([beta1_pow]).astype("float32"), - 'Beta2Pow': np.array([beta2_pow]).astype("float32") + 'Beta2Pow': np.array([beta2_pow]).astype("float32"), } attributes = {'epsilon': epsilon, 'beta1': beta1, 'beta2': beta2} - param_out, moment1_out, \ - moment2_out = adam_step(self.inputs, attributes) + param_out, moment1_out, moment2_out = adam_step(self.inputs, attributes) self.outputs = { 'Moment1Out': moment1_out, 'Moment2Out': moment2_out, 'ParamOut': param_out, 'Beta1PowOut': np.array([beta1_pow]).astype("float32") * beta1, - 'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2 + 'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2, } def test_check_output(self): @@ -119,16 +113,13 @@ class TestAdamOp2(OpTest): class TestAdamOnlyTailOp(TestAdamOp2): - def set_shape(self): - self.shape = (3) + self.shape = 3 class TestAdamOpMultipleSteps(OpTest): - def setUp(self): - '''Test Adam Operator with supplied attributes - ''' + '''Test Adam Operator with supplied attributes''' self.op_type = "adam" self.num_steps = 10 @@ -152,19 +143,20 @@ class TestAdamOpMultipleSteps(OpTest): 'Moment2': moment2, 'LearningRate': np.array([learning_rate]).astype("float32"), 'Beta1Pow': np.array([self.beta1_pow]).astype("float32"), - 'Beta2Pow': np.array([self.beta2_pow]).astype("float32") + 'Beta2Pow': np.array([self.beta2_pow]).astype("float32"), } self.attrs = { 'epsilon': epsilon, 'beta1': self.beta1, - 'beta2': self.beta2 + 'beta2': self.beta2, } def test_check_output(self): for _ in range(self.num_steps): - param_out, moment1_out, \ - moment2_out = adam_step(self.inputs, self.attrs) + param_out, moment1_out, moment2_out = adam_step( + self.inputs, self.attrs + ) beta1_pow_out = self.inputs['Beta1Pow'] * self.beta1 beta2_pow_out = self.inputs['Beta2Pow'] * self.beta2 @@ -173,7 +165,7 @@ class TestAdamOpMultipleSteps(OpTest): 'Moment2Out': moment2_out, 'ParamOut': param_out, 'Beta1PowOut': beta1_pow_out, - 'Beta2PowOut': beta2_pow_out + 'Beta2PowOut': beta2_pow_out, } # Verify output for this step @@ -189,8 +181,9 @@ class TestAdamOpMultipleSteps(OpTest): self.inputs['Beta2Pow'] = beta2_pow_out # Randomize gradient for next step - self.inputs['Grad'] = np.random.uniform( - -1, 1, (102, 105)).astype("float32") + self.inputs['Grad'] = np.random.uniform(-1, 1, (102, 105)).astype( + "float32" + ) def test_api_eager_dygraph(self): with _test_eager_guard(): @@ -270,8 +263,9 @@ def adamw_step(inputs, attributes): return param_out, moment1_out, moment2_out -def adam_step_sparse(inputs, attributes, height, rows, row_numel, np_grad, - lazy_mode): +def adam_step_sparse( + inputs, attributes, height, rows, row_numel, np_grad, lazy_mode +): ''' Simulate one step of the adam optimizer :param inputs: dict of inputs @@ -296,13 +290,16 @@ def adam_step_sparse(inputs, attributes, height, rows, row_numel, np_grad, param_out = np.zeros(shape=[height, row_numel]) def update_row(row_id, update_value): - moment1_out[row_id] = beta1 * moment1[row_id] + (1 - - beta1) * update_value - moment2_out[row_id] = beta2 * moment2[row_id] + ( - 1 - beta2) * np.square(update_value) + moment1_out[row_id] = ( + beta1 * moment1[row_id] + (1 - beta1) * update_value + ) + moment2_out[row_id] = beta2 * moment2[row_id] + (1 - beta2) * np.square( + update_value + ) lr_t = lr * np.sqrt(1 - beta2_pow) / (1 - beta1_pow) param_out[row_id] = param[row_id] - lr_t * ( - moment1_out[row_id] / (np.sqrt(moment2_out[row_id]) + epsilon)) + moment1_out[row_id] / (np.sqrt(moment2_out[row_id]) + epsilon) + ) if lazy_mode: for idx, row_id in enumerate(rows): @@ -318,7 +315,6 @@ def adam_step_sparse(inputs, attributes, height, rows, row_numel, np_grad, class TestSparseAdamOp(unittest.TestCase): - def setup(self, scope, place, lazy_mode): beta1 = 0.78 beta2 = 0.836 @@ -337,14 +333,14 @@ class TestSparseAdamOp(unittest.TestCase): "Moment2": np.full((height, row_numel), 5.0).astype("float32"), 'Beta1Pow': beta1_pow, 'Beta2Pow': beta2_pow, - "LearningRate": np.full((1), 2.0).astype("float32") + "LearningRate": np.full((1), 2.0).astype("float32"), } self.init_output = np.full((height, row_numel), 0.0).astype("float32") self.attrs = { 'epsilon': epsilon, 'beta1': beta1, 'beta2': beta2, - 'min_row_size_to_use_multithread': 2 + 'min_row_size_to_use_multithread': 2, } grad_selected_rows = scope.var('Grad').get_selected_rows() @@ -359,15 +355,21 @@ class TestSparseAdamOp(unittest.TestCase): self.sparse_inputs = ["Grad"] - param_out, mom1, mom2 = adam_step_sparse(self.dense_inputs, self.attrs, - height, rows, row_numel, - np_array, lazy_mode) + param_out, mom1, mom2 = adam_step_sparse( + self.dense_inputs, + self.attrs, + height, + rows, + row_numel, + np_array, + lazy_mode, + ) self.outputs = { "ParamOut": param_out, "Moment1Out": mom1, "Moment2Out": mom2, 'Beta1PowOut': beta1_pow * beta1, - 'Beta2PowOut': beta2_pow * beta2 + 'Beta2PowOut': beta2_pow * beta2, } def check_with_place(self, place, lazy_mode): @@ -412,10 +414,8 @@ class TestSparseAdamOp(unittest.TestCase): class TestAdamOpBetaVariable(OpTest): - def setUp(self): - '''Test Adam Op with beta as Variable - ''' + '''Test Adam Op with beta as Variable''' self.op_type = "adam" param = np.random.uniform(-1, 1, (102, 105)).astype("float32") grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") @@ -444,15 +444,14 @@ class TestAdamOpBetaVariable(OpTest): attributes = {'epsilon': epsilon} - param_out, moment1_out, \ - moment2_out = adam_step(self.inputs, attributes) + param_out, moment1_out, moment2_out = adam_step(self.inputs, attributes) self.outputs = { 'Moment1Out': moment1_out, 'Moment2Out': moment2_out, 'ParamOut': param_out, 'Beta1PowOut': np.array([beta1_pow]).astype("float32") * beta1, - 'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2 + 'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2, } def test_check_output(self): @@ -460,10 +459,8 @@ class TestAdamOpBetaVariable(OpTest): class TestAdamOpBetaEpsilonVariable(OpTest): - def setUp(self): - '''Test Adam Op with beta/epsilon as Variable - ''' + '''Test Adam Op with beta/epsilon as Variable''' self.op_type = "adam" param = np.random.uniform(-1, 1, (102, 105)).astype("float32") grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") @@ -493,15 +490,14 @@ class TestAdamOpBetaEpsilonVariable(OpTest): attributes = {'epsilon': epsilon} - param_out, moment1_out, \ - moment2_out = adam_step(self.inputs, attributes) + param_out, moment1_out, moment2_out = adam_step(self.inputs, attributes) self.outputs = { 'Moment1Out': moment1_out, 'Moment2Out': moment2_out, 'ParamOut': param_out, 'Beta1PowOut': np.array([beta1_pow]).astype("float32") * beta1, - 'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2 + 'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2, } def test_check_output(self): @@ -509,10 +505,8 @@ class TestAdamOpBetaEpsilonVariable(OpTest): class TestAdamOpWithGlobalBetaPow(OpTest): - def setUp(self): - '''Test Adam Op with global_beta_pow - ''' + '''Test Adam Op with global_beta_pow''' self.op_type = "adam" param = np.random.uniform(-1, 1, (102, 105)).astype("float32") grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") @@ -542,8 +536,7 @@ class TestAdamOpWithGlobalBetaPow(OpTest): attributes = {'epsilon': epsilon} - param_out, moment1_out, \ - moment2_out = adam_step(self.inputs, attributes) + param_out, moment1_out, moment2_out = adam_step(self.inputs, attributes) self.attrs = {'use_global_beta_pow': True} @@ -553,7 +546,7 @@ class TestAdamOpWithGlobalBetaPow(OpTest): 'Moment2Out': moment2_out, 'ParamOut': param_out, 'Beta1PowOut': np.array([]), - 'Beta2PowOut': np.array([]) + 'Beta2PowOut': np.array([]), } def test_check_output(self): @@ -561,10 +554,8 @@ class TestAdamOpWithGlobalBetaPow(OpTest): class TestAdamOpWithSkipUpdate(OpTest): - def setUp(self): - '''Test Adam Op with global_beta_pow - ''' + '''Test Adam Op with global_beta_pow''' self.op_type = "adam" param = np.random.uniform(-1, 1, (102, 105)).astype("float32") grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") @@ -611,7 +602,6 @@ class TestAdamOpWithSkipUpdate(OpTest): class TestAdamOpV2(unittest.TestCase): - def test_adam_op(self): place = fluid.CPUPlace() shape = [2, 3, 8, 8] @@ -624,20 +614,20 @@ class TestAdamOpV2(unittest.TestCase): conv = fluid.layers.conv2d(data, 8, 3) loss = fluid.layers.reduce_mean(conv) - beta1 = fluid.layers.create_global_var(shape=[1], - value=0.85, - dtype='float32', - persistable=True) - beta2 = fluid.layers.create_global_var(shape=[1], - value=0.95, - dtype='float32', - persistable=True) + beta1 = fluid.layers.create_global_var( + shape=[1], value=0.85, dtype='float32', persistable=True + ) + beta2 = fluid.layers.create_global_var( + shape=[1], value=0.95, dtype='float32', persistable=True + ) betas = [beta1, beta2] - opt = paddle.optimizer.Adam(learning_rate=1e-5, - beta1=beta1, - beta2=beta2, - weight_decay=0.01, - epsilon=1e-8) + opt = paddle.optimizer.Adam( + learning_rate=1e-5, + beta1=beta1, + beta2=beta2, + weight_decay=0.01, + epsilon=1e-8, + ) opt.minimize(loss) exe.run(startup) @@ -651,8 +641,9 @@ class TestAdamOpV2(unittest.TestCase): a = fluid.dygraph.to_variable(value) linear = fluid.Linear(13, 5, dtype="float32") - adam = paddle.optimizer.Adam(learning_rate=0.01, - parameters=linear.parameters()) + adam = paddle.optimizer.Adam( + learning_rate=0.01, parameters=linear.parameters() + ) out = linear(a) out.backward() adam.step() @@ -668,26 +659,29 @@ class TestAdamOpV2(unittest.TestCase): state_dict = adam.state_dict() adam.set_state_dict(state_dict) - #learning_rate is LRScheduler + # learning_rate is LRScheduler learning_rate = paddle.optimizer.lr.CosineAnnealingDecay( - learning_rate=0.1, T_max=10) + learning_rate=0.1, T_max=10 + ) adam = paddle.optimizer.Adam( learning_rate=learning_rate, weight_decay=fluid.regularizer.L2Decay(0.001), - parameters=emb.parameters()) + parameters=emb.parameters(), + ) lr = adam.get_lr() state_dict = adam.state_dict() adam.set_state_dict(state_dict) - #leanrning_rate is Tensor + # leanrning_rate is Tensor with self.assertRaises(TypeError): learning_rate = np.array([0.01]).astype("float32") learning_rate = paddle.to_tensor(learning_rate) - adam = paddle.optimizer.Adam(learning_rate=learning_rate, - parameters=emb.parameters()) + adam = paddle.optimizer.Adam( + learning_rate=learning_rate, parameters=emb.parameters() + ) params = adam.get_opti_var_name_list() - assert (params is not None) + assert params is not None paddle.enable_static() def test_adam_with_grad_clip(self): @@ -696,9 +690,9 @@ class TestAdamOpV2(unittest.TestCase): a = fluid.dygraph.to_variable(value) linear = fluid.Linear(13, 5, dtype="float32") clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0) - adam = paddle.optimizer.Adam(0.1, - parameters=linear.parameters(), - grad_clip=clip) + adam = paddle.optimizer.Adam( + 0.1, parameters=linear.parameters(), grad_clip=clip + ) out = linear(a) out.backward() adam.step() @@ -713,11 +707,11 @@ class TestAdamOpV2(unittest.TestCase): lr = 0.01 adam.set_lr(lr) cur_lr = adam.get_lr() - assert (lr == cur_lr) + assert lr == cur_lr with self.assertRaises(TypeError): - lr_var = paddle.fluid.layers.create_global_var(shape=[1], - value=lr, - dtype='float32') + lr_var = paddle.fluid.layers.create_global_var( + shape=[1], value=lr, dtype='float32' + ) adam.set_lr(lr_var) paddle.enable_static() @@ -725,17 +719,17 @@ class TestAdamOpV2(unittest.TestCase): paddle.disable_static() linear = paddle.nn.Linear(10, 10) with self.assertRaises(ValueError): - adam = paddle.optimizer.Adam(0.1, - beta1=-1, - parameters=linear.parameters()) + adam = paddle.optimizer.Adam( + 0.1, beta1=-1, parameters=linear.parameters() + ) with self.assertRaises(ValueError): - adam = paddle.optimizer.Adam(0.1, - beta2=-1, - parameters=linear.parameters()) + adam = paddle.optimizer.Adam( + 0.1, beta2=-1, parameters=linear.parameters() + ) with self.assertRaises(ValueError): - adam = paddle.optimizer.Adam(0.1, - epsilon=-1, - parameters=linear.parameters()) + adam = paddle.optimizer.Adam( + 0.1, epsilon=-1, parameters=linear.parameters() + ) paddle.enable_static() def test_adam_op_with_sparse_input_and_weight_decay(self): @@ -744,9 +738,9 @@ class TestAdamOpV2(unittest.TestCase): x_data = np.arange(0, 10).reshape((10, 1)).astype(np.int64) x = paddle.to_tensor(x_data, stop_gradient=False) emb = paddle.nn.Embedding(10, 10, sparse=True) - adam = paddle.optimizer.Adam(0.001, - parameters=emb.parameters(), - weight_decay=0.01) + adam = paddle.optimizer.Adam( + 0.001, parameters=emb.parameters(), weight_decay=0.01 + ) with self.assertRaises(RuntimeError): out = emb(x) @@ -764,13 +758,14 @@ class TestAdamOpV2(unittest.TestCase): class TestAdamOptimizer(unittest.TestCase): - - def _test(self, - place, - use_tensor=True, - use_fluid_api=True, - use_global_beta_pow=False, - flatten_param_grads=False): + def _test( + self, + place, + use_tensor=True, + use_fluid_api=True, + use_global_beta_pow=False, + flatten_param_grads=False, + ): paddle.enable_static() main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -784,29 +779,30 @@ class TestAdamOptimizer(unittest.TestCase): weight_attr1 = paddle.ParamAttr( name="weight1", initializer=fluid.initializer.Constant(value=1.0), - trainable=True) + trainable=True, + ) weight_attr2 = paddle.ParamAttr( name="weight2", initializer=fluid.initializer.Constant(value=2.0), - trainable=True) + trainable=True, + ) clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0) with paddle.static.program_guard(main_prog, startup_prog): with paddle.utils.unique_name.guard(): a = paddle.static.data(name="a", shape=[2, 2], dtype='float32') b = paddle.static.data(name="b", shape=[2, 2], dtype='float32') - label = paddle.static.data(name="label", - shape=[2, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[2, 1], dtype='int64' + ) sum = paddle.add(a, b) z = paddle.pow(sum, 2.0) fc_1 = fluid.layers.fc(input=z, size=2, param_attr=weight_attr1) - prediction = fluid.layers.fc(input=fc_1, - size=2, - param_attr=weight_attr2, - act='softmax') + prediction = fluid.layers.fc( + input=fc_1, size=2, param_attr=weight_attr2, act='softmax' + ) cost = fluid.layers.cross_entropy(input=prediction, label=label) loss = fluid.layers.reduce_mean(cost) @@ -819,19 +815,22 @@ class TestAdamOptimizer(unittest.TestCase): value=float(beta1_init), dtype='float32', persistable=True, - name="beta1") + name="beta1", + ) beta2 = fluid.layers.create_global_var( shape=[1], value=float(beta2_init), dtype='float32', persistable=True, - name="beta2") + name="beta2", + ) epsilon = fluid.layers.create_global_var( shape=[1], value=float(epsilon_init), dtype='float32', persistable=True, - name="epsilon") + name="epsilon", + ) if use_fluid_api: adam = fluid.optimizer.Adam( learning_rate=0.01, @@ -841,13 +840,16 @@ class TestAdamOptimizer(unittest.TestCase): use_global_beta_pow=use_global_beta_pow, flatten_param_grads=flatten_param_grads, align_size=256, - grad_clip=clip) + grad_clip=clip, + ) else: - adam = paddle.optimizer.Adam(learning_rate=0.01, - beta1=beta1, - beta2=beta2, - epsilon=epsilon, - grad_clip=clip) + adam = paddle.optimizer.Adam( + learning_rate=0.01, + beta1=beta1, + beta2=beta2, + epsilon=epsilon, + grad_clip=clip, + ) else: if use_fluid_api: adam = fluid.optimizer.Adam( @@ -858,13 +860,16 @@ class TestAdamOptimizer(unittest.TestCase): use_global_beta_pow=use_global_beta_pow, flatten_param_grads=flatten_param_grads, align_size=256, - grad_clip=clip) + grad_clip=clip, + ) else: - adam = fluid.optimizer.Adam(learning_rate=0.01, - beta1=beta1_init, - beta2=beta2_init, - epsilon=epsilon_init, - grad_clip=clip) + adam = fluid.optimizer.Adam( + learning_rate=0.01, + beta1=beta1_init, + beta2=beta2_init, + epsilon=epsilon_init, + grad_clip=clip, + ) adam.minimize(loss) @@ -875,15 +880,16 @@ class TestAdamOptimizer(unittest.TestCase): print("Start run on {}".format(place)) for epoch in range(10): - pred_res, loss_res = exe.run(main_prog, - feed={ - "a": a_np, - "b": b_np, - "label": label_np - }, - fetch_list=[prediction, loss]) - print("Epoch {} | Prediction[0]: {}, Loss: {}".format( - epoch, pred_res[0], loss_res)) + pred_res, loss_res = exe.run( + main_prog, + feed={"a": a_np, "b": b_np, "label": label_np}, + fetch_list=[prediction, loss], + ) + print( + "Epoch {} | Prediction[0]: {}, Loss: {}".format( + epoch, pred_res[0], loss_res + ) + ) paddle.disable_static() return pred_res, loss_res @@ -895,10 +901,13 @@ class TestAdamOptimizer(unittest.TestCase): for use_fluid_api in [True, False]: for use_global_beta_pow in [True, False]: for flatten_param_grads in [True, False]: - pred, loss = self._test(place, use_tensor, - use_fluid_api, - use_global_beta_pow, - flatten_param_grads) + pred, loss = self._test( + place, + use_tensor, + use_fluid_api, + use_global_beta_pow, + flatten_param_grads, + ) preds.append(pred) losses.append(loss) for pred in preds: @@ -920,21 +929,22 @@ class TestAdamOptimizer(unittest.TestCase): name="weight1", initializer=fluid.initializer.Constant(value=1.0), regularizer=fluid.regularizer.L1DecayRegularizer( - regularization_coeff=0.1), - trainable=True) + regularization_coeff=0.1 + ), + trainable=True, + ) with fluid.program_guard(main): x = fluid.data(name='x', shape=[None, 13], dtype='float32') y = fluid.data(name='y', shape=[None, 1], dtype='float32') - y_predict = fluid.layers.fc(input=x, - size=1, - act=None, - param_attr=weight_attr) + y_predict = fluid.layers.fc( + input=x, size=1, act=None, param_attr=weight_attr + ) cost = fluid.layers.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) - adam = fluid.optimizer.AdamOptimizer(0.01, - flatten_param_grads=True, - align_size=256) + adam = fluid.optimizer.AdamOptimizer( + 0.01, flatten_param_grads=True, align_size=256 + ) adam.minimize(avg_cost) paddle.disable_static() @@ -957,13 +967,16 @@ class TestAdamOptimizer(unittest.TestCase): adam = fluid.optimizer.Adam(use_global_beta_pow=True) adam.minimize(loss) self.assertRaises(Exception, adam._get_global_accumulator, 'tmp') - adam._add_global_accumulator('tmp', - type=core.VarDesc.VarType.LOD_TENSOR) + adam._add_global_accumulator( + 'tmp', type=core.VarDesc.VarType.LOD_TENSOR + ) adam._get_global_accumulator('tmp') - self.assertRaises(Exception, - adam._add_global_accumulator, - adam._beta1_pow_acc_str, - type=core.VarDesc.VarType.LOD_TENSOR) + self.assertRaises( + Exception, + adam._add_global_accumulator, + adam._beta1_pow_acc_str, + type=core.VarDesc.VarType.LOD_TENSOR, + ) paddle.disable_static() def test_adam_save_load(self): @@ -974,12 +987,14 @@ class TestAdamOptimizer(unittest.TestCase): state_dict = linear.state_dict() fluid.save_dygraph(state_dict, "paddle_dy") - scheduler = paddle.optimizer.lr.NoamDecay(d_model=0.01, - warmup_steps=100, - verbose=True) - adam = paddle.fluid.optimizer.Adam(learning_rate=scheduler, - parameter_list=linear.parameters(), - use_global_beta_pow=True) + scheduler = paddle.optimizer.lr.NoamDecay( + d_model=0.01, warmup_steps=100, verbose=True + ) + adam = paddle.fluid.optimizer.Adam( + learning_rate=scheduler, + parameter_list=linear.parameters(), + use_global_beta_pow=True, + ) adam.minimize(b) state_dict = adam.state_dict() fluid.save_dygraph(state_dict, "paddle_dy") @@ -1000,13 +1015,14 @@ class TestAdamOptimizer(unittest.TestCase): state_dict = linear.state_dict() fluid.save_dygraph(state_dict, "paddle_dy") - scheduler = paddle.optimizer.lr.NoamDecay(d_model=0.01, - warmup_steps=100, - verbose=True) + scheduler = paddle.optimizer.lr.NoamDecay( + d_model=0.01, warmup_steps=100, verbose=True + ) adam = paddle.fluid.optimizer.Adam( learning_rate=scheduler, parameter_list=linear.parameters(), - use_global_beta_pow=True) + use_global_beta_pow=True, + ) adam.minimize(b) return adam @@ -1021,14 +1037,14 @@ class TestAdamOptimizer(unittest.TestCase): self.assertRaises(AssertionError, adam2.set_state_dict, opt_state_dict) adam3 = get_opt('float32', [10, 10]) # shape not match - opt_state_dict['beta1_pow_acc_0'] = np.array([0.9, 0.9], - dtype='float32') + opt_state_dict['beta1_pow_acc_0'] = np.array( + [0.9, 0.9], dtype='float32' + ) self.assertRaises(AssertionError, adam3.set_state_dict, opt_state_dict) paddle.enable_static() class TestAdamOpV2Group(TestAdamOpV2): - def test_adam_op(self): paddle.disable_static() value = np.arange(26).reshape(2, 13).astype("float32") @@ -1036,16 +1052,19 @@ class TestAdamOpV2Group(TestAdamOpV2): linear_1 = paddle.nn.Linear(13, 5) linear_2 = paddle.nn.Linear(5, 3) # This can be any optimizer supported by dygraph. - adam = paddle.optimizer.Adam(learning_rate=0.01, - parameters=[{ - 'params': linear_1.parameters() - }, { - 'params': linear_2.parameters(), - 'weight_decay': 0.001, - 'beta1': 0.1, - 'beta2': 0.99 - }], - weight_decay=0.1) + adam = paddle.optimizer.Adam( + learning_rate=0.01, + parameters=[ + {'params': linear_1.parameters()}, + { + 'params': linear_2.parameters(), + 'weight_decay': 0.001, + 'beta1': 0.1, + 'beta2': 0.99, + }, + ], + weight_decay=0.1, + ) out = linear_1(a) out = linear_2(out) out.backward() @@ -1054,13 +1073,14 @@ class TestAdamOpV2Group(TestAdamOpV2): class TestMultiTensorAdam(unittest.TestCase): - - def _adam_optimize_dygraph(self, - place, - use_param_attr=False, - use_param_group=False, - use_amp=False, - use_multi_tensor=False): + def _adam_optimize_dygraph( + self, + place, + use_param_attr=False, + use_param_group=False, + use_amp=False, + use_multi_tensor=False, + ): paddle.disable_static() paddle.seed(10) paddle.set_device(place) @@ -1070,29 +1090,32 @@ class TestMultiTensorAdam(unittest.TestCase): weight_attr = paddle.ParamAttr( learning_rate=0.5, regularizer=paddle.regularizer.L2Decay(1.0), - trainable=True) + trainable=True, + ) if use_param_attr: model = paddle.nn.Linear(5, 5, weight_attr) else: model = paddle.nn.Linear(5, 5) if not use_param_group: - optimizer = paddle.optimizer.Adam(parameters=model.parameters(), - use_multi_tensor=use_multi_tensor, - multi_precision=use_amp) + optimizer = paddle.optimizer.Adam( + parameters=model.parameters(), + use_multi_tensor=use_multi_tensor, + multi_precision=use_amp, + ) else: - optimizer = paddle.optimizer.Adam(parameters=[{ - 'params': - model.parameters(), - 'weight_decay': - 0.001, - 'beta1': - 0.1, - 'beta2': - 0.99 - }], - use_multi_tensor=use_multi_tensor, - multi_precision=use_amp) + optimizer = paddle.optimizer.Adam( + parameters=[ + { + 'params': model.parameters(), + 'weight_decay': 0.001, + 'beta1': 0.1, + 'beta2': 0.99, + } + ], + use_multi_tensor=use_multi_tensor, + multi_precision=use_amp, + ) for idx in range(2): if place == 'gpu' and use_amp == True: @@ -1116,10 +1139,9 @@ class TestMultiTensorAdam(unittest.TestCase): return output, model.parameters() - def _adam_optimize_static(self, - place, - use_amp=False, - use_multi_tensor=False): + def _adam_optimize_static( + self, place, use_amp=False, use_multi_tensor=False + ): paddle.enable_static() paddle.seed(10) np.random.seed(10) @@ -1128,24 +1150,26 @@ class TestMultiTensorAdam(unittest.TestCase): exe = paddle.static.Executor(place=place) train_program = paddle.static.Program() startup_program = paddle.static.Program() - optimizer = paddle.optimizer.Adam(multi_precision=use_amp, - use_multi_tensor=use_multi_tensor) + optimizer = paddle.optimizer.Adam( + multi_precision=use_amp, use_multi_tensor=use_multi_tensor + ) if use_amp: optimizer = paddle.static.amp.decorate( optimizer, init_loss_scaling=128.0, use_dynamic_loss_scaling=True, use_pure_fp16=True, - use_fp16_guard=False) + use_fp16_guard=False, + ) with paddle.static.program_guard(train_program, startup_program): if use_amp: - data = paddle.static.data(shape=[2, 2], - name='X', - dtype='float16') + data = paddle.static.data( + shape=[2, 2], name='X', dtype='float16' + ) else: - data = paddle.static.data(shape=[2, 2], - name='X', - dtype='float32') + data = paddle.static.data( + shape=[2, 2], name='X', dtype='float32' + ) hidden = paddle.static.nn.fc(x=data, size=10) loss = paddle.mean(hidden) optimizer.minimize(loss) @@ -1157,9 +1181,9 @@ class TestMultiTensorAdam(unittest.TestCase): x = np.random.random(size=(2, 2)).astype('float32') out = [] for idx in range(5): - loss_data, = exe.run(train_program, - feed={"X": x}, - fetch_list=[loss.name]) + (loss_data,) = exe.run( + train_program, feed={"X": x}, fetch_list=[loss.name] + ) out.append(loss_data) return out @@ -1172,49 +1196,59 @@ class TestMultiTensorAdam(unittest.TestCase): def _check_with_place_amp(self, place, use_amp): # test dygraph mode output_dygraph1, params_dygraph1 = self._adam_optimize_dygraph( - place=place, use_amp=use_amp, use_multi_tensor=True) + place=place, use_amp=use_amp, use_multi_tensor=True + ) output_dygraph2, params_dygraph2 = self._adam_optimize_dygraph( - place=place, use_amp=use_amp, use_multi_tensor=False) + place=place, use_amp=use_amp, use_multi_tensor=False + ) np.testing.assert_allclose(output_dygraph1, output_dygraph2, rtol=1e-05) for idx in range(len(params_dygraph1)): - np.testing.assert_allclose(params_dygraph1[idx], - params_dygraph2[idx], - rtol=1e-05) + np.testing.assert_allclose( + params_dygraph1[idx], params_dygraph2[idx], rtol=1e-05 + ) # test static mode - output_static1 = self._adam_optimize_static(place=place, - use_amp=use_amp, - use_multi_tensor=True) - output_static2 = self._adam_optimize_static(place=place, - use_amp=use_amp, - use_multi_tensor=False) + output_static1 = self._adam_optimize_static( + place=place, use_amp=use_amp, use_multi_tensor=True + ) + output_static2 = self._adam_optimize_static( + place=place, use_amp=use_amp, use_multi_tensor=False + ) for idx in range(len(output_static1)): - np.testing.assert_allclose(output_static1[idx], - output_static2[idx], - rtol=1e-05) + np.testing.assert_allclose( + output_static1[idx], output_static2[idx], rtol=1e-05 + ) def _check_with_param_arrt(self, place, use_amp): - output1, params1 = self._adam_optimize_dygraph(place=place, - use_amp=use_amp, - use_param_attr=True, - use_multi_tensor=True) - output2, params2 = self._adam_optimize_dygraph(place=place, - use_amp=use_amp, - use_param_attr=True, - use_multi_tensor=False) + output1, params1 = self._adam_optimize_dygraph( + place=place, + use_amp=use_amp, + use_param_attr=True, + use_multi_tensor=True, + ) + output2, params2 = self._adam_optimize_dygraph( + place=place, + use_amp=use_amp, + use_param_attr=True, + use_multi_tensor=False, + ) np.testing.assert_allclose(output1, output2, rtol=1e-05) for idx in range(len(params1)): np.testing.assert_allclose(params1[idx], params2[idx], rtol=1e-05) def _check_with_param_group(self, place, use_amp): - output1, params1 = self._adam_optimize_dygraph(place=place, - use_amp=use_amp, - use_param_group=True, - use_multi_tensor=True) - output2, params2 = self._adam_optimize_dygraph(place=place, - use_amp=use_amp, - use_param_group=True, - use_multi_tensor=False) + output1, params1 = self._adam_optimize_dygraph( + place=place, + use_amp=use_amp, + use_param_group=True, + use_multi_tensor=True, + ) + output2, params2 = self._adam_optimize_dygraph( + place=place, + use_amp=use_amp, + use_param_group=True, + use_multi_tensor=False, + ) np.testing.assert_allclose(output1, output2, rtol=1e-05) for idx in range(len(params1)): diff --git a/python/paddle/fluid/tests/unittests/test_adam_optimizer_fp32_fp64.py b/python/paddle/fluid/tests/unittests/test_adam_optimizer_fp32_fp64.py index 1f08eb085a3c58b4bf36028572447830b1141ec7..904b02e778c25fe79a38f23e084826c91185d290 100644 --- a/python/paddle/fluid/tests/unittests/test_adam_optimizer_fp32_fp64.py +++ b/python/paddle/fluid/tests/unittests/test_adam_optimizer_fp32_fp64.py @@ -39,8 +39,9 @@ def main_test_func(place, dtype): adam_optimizer.minimize(avg_cost) fetch_list = [avg_cost] - train_reader = fluid.io.batch(paddle.dataset.uci_housing.train(), - batch_size=1) + train_reader = fluid.io.batch( + paddle.dataset.uci_housing.train(), batch_size=1 + ) feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) @@ -49,7 +50,6 @@ def main_test_func(place, dtype): class AdamFp32Test(unittest.TestCase): - def setUp(self): self.dtype = 'float32' @@ -59,7 +59,6 @@ class AdamFp32Test(unittest.TestCase): class AdamFp64Test(AdamFp32Test): - def setUp(self): self.dtype = 'float64' diff --git a/python/paddle/fluid/tests/unittests/test_adamax_api.py b/python/paddle/fluid/tests/unittests/test_adamax_api.py index 87aea099ac5fcb06cd455b12dcfc16c7259693a0..78e9553a806d65433d952e41ea1bf93105773283 100644 --- a/python/paddle/fluid/tests/unittests/test_adamax_api.py +++ b/python/paddle/fluid/tests/unittests/test_adamax_api.py @@ -20,15 +20,16 @@ from paddle.fluid.framework import _test_eager_guard class TestAdamaxAPI(unittest.TestCase): - def func_adamax_api_dygraph(self): paddle.disable_static() value = np.arange(26).reshape(2, 13).astype("float32") a = paddle.to_tensor(value) linear = paddle.nn.Linear(13, 5) - adam = paddle.optimizer.Adamax(learning_rate=0.01, - parameters=linear.parameters(), - weight_decay=0.01) + adam = paddle.optimizer.Adamax( + learning_rate=0.01, + parameters=linear.parameters(), + weight_decay=0.01, + ) out = linear(a) out.backward() adam.step() @@ -53,11 +54,13 @@ class TestAdamaxAPI(unittest.TestCase): loss = paddle.mean(conv) beta1 = 0.85 beta2 = 0.95 - opt = paddle.optimizer.Adamax(learning_rate=1e-5, - beta1=beta1, - beta2=beta2, - weight_decay=0.01, - epsilon=1e-8) + opt = paddle.optimizer.Adamax( + learning_rate=1e-5, + beta1=beta1, + beta2=beta2, + weight_decay=0.01, + epsilon=1e-8, + ) opt.minimize(loss) exe.run(startup) @@ -72,7 +75,6 @@ class TestAdamaxAPI(unittest.TestCase): class TestAdamaxAPIGroup(TestAdamaxAPI): - def func_adamax_api_dygraph(self): paddle.disable_static() value = np.arange(26).reshape(2, 13).astype("float32") @@ -80,21 +82,19 @@ class TestAdamaxAPIGroup(TestAdamaxAPI): linear_1 = paddle.nn.Linear(13, 5) linear_2 = paddle.nn.Linear(5, 3) # This can be any optimizer supported by dygraph. - adam = paddle.optimizer.Adamax(learning_rate=0.01, - parameters=[{ - 'params': - linear_1.parameters() - }, { - 'params': - linear_2.parameters(), - 'weight_decay': - 0.001, - 'beta1': - 0.1, - 'beta2': - 0.99 - }], - weight_decay=0.1) + adam = paddle.optimizer.Adamax( + learning_rate=0.01, + parameters=[ + {'params': linear_1.parameters()}, + { + 'params': linear_2.parameters(), + 'weight_decay': 0.001, + 'beta1': 0.1, + 'beta2': 0.99, + }, + ], + weight_decay=0.1, + ) out = linear_1(a) out = linear_2(out) out.backward() diff --git a/python/paddle/fluid/tests/unittests/test_adamax_op.py b/python/paddle/fluid/tests/unittests/test_adamax_op.py index 6ea12544d87068a8b1a9efe6a456f985f488d118..b64327144346952bdf760dad776f9580c795b8bd 100644 --- a/python/paddle/fluid/tests/unittests/test_adamax_op.py +++ b/python/paddle/fluid/tests/unittests/test_adamax_op.py @@ -18,10 +18,8 @@ from op_test import OpTest class TestAdamaxOp1(OpTest): - def setUp(self): - '''Test Adamax Operator with supplied attributes - ''' + '''Test Adamax Operator with supplied attributes''' self.op_type = "adamax" param = np.random.uniform(-1, 1, (102, 105)).astype("float32") grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") @@ -41,18 +39,19 @@ class TestAdamaxOp1(OpTest): 'Moment': moment, 'InfNorm': inf_norm, 'LearningRate': np.array([learning_rate]).astype("float32"), - 'Beta1Pow': np.array([beta1_pow]).astype("float32") + 'Beta1Pow': np.array([beta1_pow]).astype("float32"), } self.attrs = {'beta1': beta1, 'beta2': beta2, 'epsilon': epsilon} param_out, moment_out, inf_norm_out = adamax_step( - self.inputs, self.attrs) + self.inputs, self.attrs + ) self.outputs = { 'ParamOut': param_out, 'MomentOut': moment_out, - 'InfNormOut': inf_norm_out + 'InfNormOut': inf_norm_out, } def test_check_output(self): @@ -60,8 +59,7 @@ class TestAdamaxOp1(OpTest): class TestAdamaxOp2(OpTest): - '''Test Adamax Operator with default attributes - ''' + '''Test Adamax Operator with default attributes''' def setUp(self): self.op_type = "adamax" @@ -83,7 +81,7 @@ class TestAdamaxOp2(OpTest): 'Moment': moment, 'InfNorm': inf_norm, 'LearningRate': np.array([learning_rate]).astype("float32"), - 'Beta1Pow': np.array([beta1_pow]).astype("float32") + 'Beta1Pow': np.array([beta1_pow]).astype("float32"), } attrs = {'beta1': beta1, 'beta2': beta2, 'epsilon': epsilon} @@ -92,7 +90,7 @@ class TestAdamaxOp2(OpTest): self.outputs = { 'ParamOut': param_out, 'MomentOut': moment_out, - 'InfNormOut': inf_norm_out + 'InfNormOut': inf_norm_out, } def test_check_output(self): @@ -100,10 +98,8 @@ class TestAdamaxOp2(OpTest): class TestAdamaxOpMultipleSteps(OpTest): - def setUp(self): - '''Test Adamax Operator with supplied attributes - ''' + '''Test Adamax Operator with supplied attributes''' self.op_type = "adamax" self.num_steps = 10 @@ -125,7 +121,7 @@ class TestAdamaxOpMultipleSteps(OpTest): 'Moment': moment, 'InfNorm': inf_norm, 'LearningRate': np.array([learning_rate]).astype("float32"), - 'Beta1Pow': np.array([beta1_pow]).astype("float32") + 'Beta1Pow': np.array([beta1_pow]).astype("float32"), } self.attrs = {'beta1': beta1, 'beta2': beta2, 'epsilon': epsilon} @@ -133,12 +129,13 @@ class TestAdamaxOpMultipleSteps(OpTest): def test_check_output(self): for _ in range(self.num_steps): param_out, moment_out, inf_norm_out = adamax_step( - self.inputs, self.attrs) + self.inputs, self.attrs + ) self.outputs = { 'ParamOut': param_out, 'MomentOut': moment_out, - 'InfNormOut': inf_norm_out + 'InfNormOut': inf_norm_out, } # Verify output for this step @@ -153,8 +150,9 @@ class TestAdamaxOpMultipleSteps(OpTest): self.inputs['Beta1Pow'] *= self.attrs['beta1'] # Randomize gradient for next step - self.inputs['Grad'] = np.random.uniform( - -1, 1, (102, 105)).astype("float32") + self.inputs['Grad'] = np.random.uniform(-1, 1, (102, 105)).astype( + "float32" + ) def adamax_step(inputs, attributes): @@ -178,30 +176,30 @@ def adamax_step(inputs, attributes): moment_out = beta1 * moment + (1 - beta1) * grad inf_norm_out = np.maximum(beta2 * inf_norm + epsilon, np.abs(grad)) - lr_t = (lr / (1 - beta1_pow)) + lr_t = lr / (1 - beta1_pow) param_out = param - lr_t * np.divide(moment_out, inf_norm_out) return param_out, moment_out, inf_norm_out class TestAdamaxOpV2(unittest.TestCase): - def test_adamax_op_invalid_input(self): import paddle + paddle.disable_static() linear = paddle.nn.Linear(10, 10) with self.assertRaises(ValueError): - adam = paddle.optimizer.Adamax(0.1, - beta1=-1, - parameters=linear.parameters()) + adam = paddle.optimizer.Adamax( + 0.1, beta1=-1, parameters=linear.parameters() + ) with self.assertRaises(ValueError): - adam = paddle.optimizer.Adamax(0.1, - beta2=-1, - parameters=linear.parameters()) + adam = paddle.optimizer.Adamax( + 0.1, beta2=-1, parameters=linear.parameters() + ) with self.assertRaises(ValueError): - adam = paddle.optimizer.Adamax(0.1, - epsilon=-1, - parameters=linear.parameters()) + adam = paddle.optimizer.Adamax( + 0.1, epsilon=-1, parameters=linear.parameters() + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_adamw_op.py b/python/paddle/fluid/tests/unittests/test_adamw_op.py index e39638d86555e1e160a89b78571a0949967f992a..6e4a7b43f20bf7ce252a5d01637fd5fec6536ffe 100644 --- a/python/paddle/fluid/tests/unittests/test_adamw_op.py +++ b/python/paddle/fluid/tests/unittests/test_adamw_op.py @@ -60,10 +60,8 @@ def adamw_step(inputs, attributes): class TestAdamW(OpTest): - def setUp(self): - '''Test AdamW Op with supplied attributes - ''' + '''Test AdamW Op with supplied attributes''' self.op_type = "adamw" param = np.random.uniform(-1, 1, (102, 105)).astype("float32") grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") @@ -85,7 +83,7 @@ class TestAdamW(OpTest): 'Moment2': moment2, 'LearningRate': np.array([learning_rate]).astype("float32"), 'Beta1Pow': np.array([beta1_pow]).astype("float32"), - 'Beta2Pow': np.array([beta2_pow]).astype("float32") + 'Beta2Pow': np.array([beta2_pow]).astype("float32"), } self.attrs = { @@ -93,31 +91,31 @@ class TestAdamW(OpTest): 'beta1': beta1, 'beta2': beta2, "coeff": 0.5, - "with_decay": True + "with_decay": True, } - param_out, moment1_out, \ - moment2_out = adamw_step(self.inputs, self.attrs) + param_out, moment1_out, moment2_out = adamw_step( + self.inputs, self.attrs + ) self.outputs = { 'Moment1Out': moment1_out, 'Moment2Out': moment2_out, 'ParamOut': param_out, 'Beta1PowOut': np.array([beta1_pow]).astype("float32") * beta1, - 'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2 + 'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2, } def test_check_output(self): self.check_output() -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestAdamW2(OpTest): - def setUp(self): - '''Test AdamW Op with supplied attributes - ''' + '''Test AdamW Op with supplied attributes''' self.op_type = "adamw" param = np.random.uniform(-1, 1, (2, 2)).astype("float32") grad = np.random.uniform(-1, 1, (2, 2)).astype("float32") @@ -139,7 +137,7 @@ class TestAdamW2(OpTest): 'Moment2': moment2, 'LearningRate': np.array([learning_rate]).astype("float32"), 'Beta1Pow': np.array([beta1_pow]).astype("float32"), - 'Beta2Pow': np.array([beta2_pow]).astype("float32") + 'Beta2Pow': np.array([beta2_pow]).astype("float32"), } self.attrs = { @@ -148,18 +146,19 @@ class TestAdamW2(OpTest): 'beta2': beta2, "lr_ratio": 0.1, "coeff": 0.5, - "with_decay": True + "with_decay": True, } param_out, moment1_out, moment2_out = adamw_step( - self.inputs, self.attrs) + self.inputs, self.attrs + ) self.outputs = { 'Moment1Out': moment1_out, 'Moment2Out': moment2_out, 'ParamOut': param_out, 'Beta1PowOut': np.array([beta1_pow]).astype("float32") * beta1, - 'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2 + 'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2, } def test_check_output(self): @@ -167,16 +166,17 @@ class TestAdamW2(OpTest): class TestAdamWOp(unittest.TestCase): - def test_adamw_op_dygraph(self): paddle.disable_static() value = np.arange(26).reshape(2, 13).astype("float32") a = paddle.to_tensor(value) linear = paddle.nn.Linear(13, 5) - adam = paddle.optimizer.AdamW(learning_rate=0.01, - parameters=linear.parameters(), - apply_decay_param_fun=lambda name: True, - weight_decay=0.01) + adam = paddle.optimizer.AdamW( + learning_rate=0.01, + parameters=linear.parameters(), + apply_decay_param_fun=lambda name: True, + weight_decay=0.01, + ) for _ in range(2): out = linear(a) @@ -189,11 +189,13 @@ class TestAdamWOp(unittest.TestCase): value = np.arange(26).reshape(2, 13).astype("float32") a = paddle.to_tensor(value) linear = paddle.nn.Linear(13, 5) - adam = paddle.optimizer.AdamW(learning_rate=0.0, - parameters=linear.parameters(), - apply_decay_param_fun=lambda name: True, - weight_decay=0.01) - assert (adam.__str__() is not None) + adam = paddle.optimizer.AdamW( + learning_rate=0.0, + parameters=linear.parameters(), + apply_decay_param_fun=lambda name: True, + weight_decay=0.01, + ) + assert adam.__str__() is not None def test_adamw_op(self): paddle.enable_static() @@ -208,20 +210,20 @@ class TestAdamWOp(unittest.TestCase): conv = fluid.layers.conv2d(data, 8, 3) loss = paddle.mean(conv) - beta1 = fluid.layers.create_global_var(shape=[1], - value=0.85, - dtype='float32', - persistable=True) - beta2 = fluid.layers.create_global_var(shape=[1], - value=0.95, - dtype='float32', - persistable=True) + beta1 = fluid.layers.create_global_var( + shape=[1], value=0.85, dtype='float32', persistable=True + ) + beta2 = fluid.layers.create_global_var( + shape=[1], value=0.95, dtype='float32', persistable=True + ) betas = [beta1, beta2] - opt = paddle.optimizer.AdamW(learning_rate=1e-5, - beta1=beta1, - beta2=beta2, - weight_decay=0.01, - epsilon=1e-8) + opt = paddle.optimizer.AdamW( + learning_rate=1e-5, + beta1=beta1, + beta2=beta2, + weight_decay=0.01, + epsilon=1e-8, + ) opt.minimize(loss) exe.run(startup) @@ -234,17 +236,17 @@ class TestAdamWOp(unittest.TestCase): paddle.disable_static() linear = paddle.nn.Linear(10, 10) with self.assertRaises(ValueError): - adam = paddle.optimizer.AdamW(0.1, - beta1=-1, - parameters=linear.parameters()) + adam = paddle.optimizer.AdamW( + 0.1, beta1=-1, parameters=linear.parameters() + ) with self.assertRaises(ValueError): - adam = paddle.optimizer.AdamW(0.1, - beta2=-1, - parameters=linear.parameters()) + adam = paddle.optimizer.AdamW( + 0.1, beta2=-1, parameters=linear.parameters() + ) with self.assertRaises(ValueError): - adam = paddle.optimizer.AdamW(0.1, - epsilon=-1, - parameters=linear.parameters()) + adam = paddle.optimizer.AdamW( + 0.1, epsilon=-1, parameters=linear.parameters() + ) def test_api_eager_dygraph(self): with _test_eager_guard(): @@ -253,25 +255,21 @@ class TestAdamWOp(unittest.TestCase): class TestAdamWOpGroup(TestAdamWOp): - def test_adamw_op_dygraph(self): paddle.disable_static() value = np.arange(26).reshape(2, 13).astype("float32") a = paddle.to_tensor(value) linear_1 = paddle.nn.Linear(13, 5) linear_2 = paddle.nn.Linear(5, 3) - adam = paddle.optimizer.AdamW(learning_rate=0.01, - parameters=[{ - 'params': - linear_1.parameters() - }, { - 'params': - linear_2.parameters(), - 'weight_decay': - 0.001 - }], - apply_decay_param_fun=lambda name: True, - weight_decay=0.01) + adam = paddle.optimizer.AdamW( + learning_rate=0.01, + parameters=[ + {'params': linear_1.parameters()}, + {'params': linear_2.parameters(), 'weight_decay': 0.001}, + ], + apply_decay_param_fun=lambda name: True, + weight_decay=0.01, + ) for _ in range(2): out = linear_1(a) @@ -282,7 +280,6 @@ class TestAdamWOpGroup(TestAdamWOp): class TestAdamWOpMultiPrecison(unittest.TestCase): - def _test_adamw_op_dygraph_place_amp(self, place, use_amp=False): paddle.disable_static() paddle.seed(10) @@ -292,17 +289,17 @@ class TestAdamWOpMultiPrecison(unittest.TestCase): model = paddle.nn.Linear(5, 5) - optimizer = paddle.optimizer.AdamW(parameters=[{ - 'params': - model.parameters(), - 'weight_decay': - 0.001, - 'beta1': - 0.1, - 'beta2': - 0.99 - }], - multi_precision=use_amp) + optimizer = paddle.optimizer.AdamW( + parameters=[ + { + 'params': model.parameters(), + 'weight_decay': 0.001, + 'beta1': 0.1, + 'beta2': 0.99, + } + ], + multi_precision=use_amp, + ) for idx in range(2): if place == 'gpu' and use_amp == True: @@ -338,51 +335,59 @@ class TestAdamWOpMultiPrecison(unittest.TestCase): class TestAdamWOpError(unittest.TestCase): - def test_api_errors(self): - def test_weight_decay_dtype(): linear = paddle.nn.Linear(13, 5) - adam = paddle.optimizer.AdamW(learning_rate=0.01, - parameters=linear.parameters(), - weight_decay=1) + adam = paddle.optimizer.AdamW( + learning_rate=0.01, + parameters=linear.parameters(), + weight_decay=1, + ) def test_parameters_dtype1(): - adam = paddle.optimizer.AdamW(learning_rate=0.01, - parameters=paddle.randn((5, 5)), - weight_decay=0.1) + adam = paddle.optimizer.AdamW( + learning_rate=0.01, + parameters=paddle.randn((5, 5)), + weight_decay=0.1, + ) def test_parameters_dtype2(): linear = paddle.nn.Linear(13, 5) adam = paddle.optimizer.AdamW( learning_rate=0.01, parameters={'params': linear.parameters()}, - weight_decay=0.1) + weight_decay=0.1, + ) def test_parameters_dtype3(): - adam = paddle.optimizer.AdamW(learning_rate=0.01, - parameters=None, - weight_decay=0.1) + adam = paddle.optimizer.AdamW( + learning_rate=0.01, parameters=None, weight_decay=0.1 + ) def test_parameters_dtype4(): linear = paddle.nn.Linear(13, 5) adam = paddle.optimizer.AdamW( learning_rate=0.01, parameters={'params': set(linear.parameters())}, - weight_decay=0.1) + weight_decay=0.1, + ) def test_learning_rate_dtype(): linear = paddle.nn.Linear(13, 5) - adam = paddle.optimizer.AdamW(learning_rate=1, - parameters=linear.parameters(), - weight_decay=0.1) + adam = paddle.optimizer.AdamW( + learning_rate=1, + parameters=linear.parameters(), + weight_decay=0.1, + ) def test_grad_clip_dtype(): linear = paddle.nn.Linear(13, 5) - adam = paddle.optimizer.AdamW(learning_rate=0.01, - parameters=linear.parameters(), - weight_decay=0.1, - grad_clip=0.1) + adam = paddle.optimizer.AdamW( + learning_rate=0.01, + parameters=linear.parameters(), + weight_decay=0.1, + grad_clip=0.1, + ) self.assertRaises(TypeError, test_weight_decay_dtype) self.assertRaises(TypeError, test_parameters_dtype1) @@ -394,7 +399,6 @@ class TestAdamWOpError(unittest.TestCase): class TestAdamWOpGroupWithLR(TestAdamWOp): - def test_adamw_op_dygraph(self): paddle.disable_static() value = np.arange(26).reshape(2, 13).astype("float32") @@ -403,16 +407,21 @@ class TestAdamWOpGroupWithLR(TestAdamWOp): linear_2 = paddle.nn.Linear(5, 3) adam = paddle.optimizer.AdamW( learning_rate=paddle.optimizer.lr.PiecewiseDecay( - boundaries=[3, 6], values=[0.1, 0.2, 0.3]), - parameters=[{ - 'params': linear_1.parameters(), - 'learning_rate': 0.1, - }, { - 'params': linear_2.parameters(), - 'weight_decay': 0.001, - }], + boundaries=[3, 6], values=[0.1, 0.2, 0.3] + ), + parameters=[ + { + 'params': linear_1.parameters(), + 'learning_rate': 0.1, + }, + { + 'params': linear_2.parameters(), + 'weight_decay': 0.001, + }, + ], apply_decay_param_fun=lambda name: True, - weight_decay=0.01) + weight_decay=0.01, + ) for _ in range(2): out = linear_1(a) @@ -430,13 +439,13 @@ def simple_lr_setting(param, decay_rate, n_layers): else: depth = 0 - return decay_rate**(n_layers + 2 - depth) + return decay_rate ** (n_layers + 2 - depth) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestAdamWOpLayerwiseLR(TestAdamWOp): - def setUp(self): random.seed(2022) np.random.seed(2022) @@ -445,9 +454,11 @@ class TestAdamWOpLayerwiseLR(TestAdamWOp): def test_adamw_op_dygraph(self): paddle.disable_static() linear1 = paddle.nn.Linear( - 13, 8, bias_attr=paddle.nn.initializer.Constant(value=1.0)) + 13, 8, bias_attr=paddle.nn.initializer.Constant(value=1.0) + ) linear2 = paddle.nn.Linear( - 8, 5, bias_attr=paddle.nn.initializer.Constant(value=1.0)) + 8, 5, bias_attr=paddle.nn.initializer.Constant(value=1.0) + ) # fix the linear name, simple_lr_setting function will use the name linear1.weight.name = "linear_1.w_0" @@ -475,15 +486,18 @@ class TestAdamWOpLayerwiseLR(TestAdamWOp): beta1 = 0.9 beta2 = 0.999 - opt = paddle.optimizer.AdamW(learning_rate=learning_rate, - parameters=[{ - 'params': linear1.parameters() - }, { - 'params': linear2.parameters(), - }], - apply_decay_param_fun=lambda name: True, - weight_decay=weight_decay, - lr_ratio=simple_lr_fun) + opt = paddle.optimizer.AdamW( + learning_rate=learning_rate, + parameters=[ + {'params': linear1.parameters()}, + { + 'params': linear2.parameters(), + }, + ], + apply_decay_param_fun=lambda name: True, + weight_decay=weight_decay, + lr_ratio=simple_lr_fun, + ) def get_numpy_output(param, grad, moment1, moment2, lr_ratio, t): np_inputs = { @@ -493,7 +507,7 @@ class TestAdamWOpLayerwiseLR(TestAdamWOp): 'Moment2': moment2, 'LearningRate': np.array([learning_rate]).astype("float32"), 'Beta1Pow': np.array([beta1**t]).astype("float32"), - 'Beta2Pow': np.array([beta2**t]).astype("float32") + 'Beta2Pow': np.array([beta2**t]).astype("float32"), } np_attrs = { @@ -502,32 +516,54 @@ class TestAdamWOpLayerwiseLR(TestAdamWOp): 'beta2': beta2, "lr_ratio": lr_ratio, "coeff": weight_decay, - "with_decay": True + "with_decay": True, } param_out, moment1_out, moment2_out = adamw_step( - np_inputs, np_attrs) + np_inputs, np_attrs + ) return param_out, moment1_out, moment2_out for i in range(5): a = paddle.to_tensor( - np.random.uniform(-1, 1, (2, 13)).astype("float32")) + np.random.uniform(-1, 1, (2, 13)).astype("float32") + ) a1 = linear1(a) out = linear2(a1) out = paddle.mean(out) out.backward() fc1_w, fc1_w_mon1, fc1_w_mon2 = get_numpy_output( - fc1_w, np.array(linear1.weight.grad), fc1_w_mon1, fc1_w_mon2, - simple_lr_fun(linear1.weight), i + 1) + fc1_w, + np.array(linear1.weight.grad), + fc1_w_mon1, + fc1_w_mon2, + simple_lr_fun(linear1.weight), + i + 1, + ) fc1_b, fc1_b_mon1, fc1_b_mon2 = get_numpy_output( - fc1_b, np.array(linear1.bias.grad), fc1_b_mon1, fc1_b_mon2, - simple_lr_fun(linear1.bias), i + 1) + fc1_b, + np.array(linear1.bias.grad), + fc1_b_mon1, + fc1_b_mon2, + simple_lr_fun(linear1.bias), + i + 1, + ) fc2_w, fc2_w_mon1, fc2_w_mon2 = get_numpy_output( - fc2_w, np.array(linear2.weight.grad), fc2_w_mon1, fc2_w_mon2, - simple_lr_fun(linear2.weight), i + 1) + fc2_w, + np.array(linear2.weight.grad), + fc2_w_mon1, + fc2_w_mon2, + simple_lr_fun(linear2.weight), + i + 1, + ) fc2_b, fc2_b_mon1, fc2_b_mon2 = get_numpy_output( - fc2_b, np.array(linear2.bias.grad), fc2_b_mon1, fc2_b_mon2, - simple_lr_fun(linear2.bias), i + 1) + fc2_b, + np.array(linear2.bias.grad), + fc2_b_mon1, + fc2_b_mon2, + simple_lr_fun(linear2.bias), + i + 1, + ) opt.step() opt.clear_gradients() @@ -557,19 +593,19 @@ class TestAdamWOpLayerwiseLR(TestAdamWOp): weight_attr1 = paddle.framework.ParamAttr(name="linear_0.w_0") bias_attr1 = paddle.framework.ParamAttr( name="linear_0.b_0", - initializer=paddle.nn.initializer.Constant(value=1.0)) + initializer=paddle.nn.initializer.Constant(value=1.0), + ) weight_attr2 = paddle.framework.ParamAttr(name="linear_1.w_0") bias_attr2 = paddle.framework.ParamAttr( name="linear_1.b_0", - initializer=paddle.nn.initializer.Constant(value=1.0)) - linear1 = paddle.nn.Linear(10, - 32, - weight_attr=weight_attr1, - bias_attr=bias_attr1) - linear2 = paddle.nn.Linear(32, - 1, - weight_attr=weight_attr2, - bias_attr=bias_attr2) + initializer=paddle.nn.initializer.Constant(value=1.0), + ) + linear1 = paddle.nn.Linear( + 10, 32, weight_attr=weight_attr1, bias_attr=bias_attr1 + ) + linear2 = paddle.nn.Linear( + 32, 1, weight_attr=weight_attr2, bias_attr=bias_attr2 + ) out = linear1(x) out = linear2(out) @@ -586,16 +622,18 @@ class TestAdamWOpLayerwiseLR(TestAdamWOp): cost = fluid.layers.square_error_cost(input=out, label=y) avg_cost = paddle.mean(cost) - simple_lr_fun = partial(simple_lr_setting, - decay_rate=0.8, - n_layers=2) - - opt = paddle.optimizer.AdamW(learning_rate=learning_rate, - beta1=beta1, - beta2=beta2, - weight_decay=weight_decay, - epsilon=epsilon, - lr_ratio=simple_lr_fun) + simple_lr_fun = partial( + simple_lr_setting, decay_rate=0.8, n_layers=2 + ) + + opt = paddle.optimizer.AdamW( + learning_rate=learning_rate, + beta1=beta1, + beta2=beta2, + weight_decay=weight_decay, + epsilon=epsilon, + lr_ratio=simple_lr_fun, + ) opt.minimize(avg_cost) def get_numpy_output(param, grad, moment1, moment2, lr_ratio, t): @@ -606,7 +644,7 @@ class TestAdamWOpLayerwiseLR(TestAdamWOp): 'Moment2': moment2, 'LearningRate': np.array([learning_rate]).astype("float32"), 'Beta1Pow': np.array([beta1**t]).astype("float32"), - 'Beta2Pow': np.array([beta2**t]).astype("float32") + 'Beta2Pow': np.array([beta2**t]).astype("float32"), } np_attrs = { @@ -615,19 +653,28 @@ class TestAdamWOpLayerwiseLR(TestAdamWOp): 'beta2': beta2, "lr_ratio": lr_ratio, "coeff": weight_decay, - "with_decay": True + "with_decay": True, } param_out, moment1_out, moment2_out = adamw_step( - np_inputs, np_attrs) + np_inputs, np_attrs + ) return param_out, moment1_out, moment2_out fetch_list1 = [ - "linear_0.w_0", "linear_0.b_0", "linear_1.w_0", "linear_1.b_0" + "linear_0.w_0", + "linear_0.b_0", + "linear_1.w_0", + "linear_1.b_0", ] fetch_list2 = [ - "linear_0.w_0", "linear_0.w_0@GRAD", "linear_0.b_0", - "linear_0.b_0@GRAD", "linear_1.w_0", "linear_1.w_0@GRAD", - "linear_1.b_0", "linear_1.b_0@GRAD" + "linear_0.w_0", + "linear_0.w_0@GRAD", + "linear_0.b_0", + "linear_0.b_0@GRAD", + "linear_1.w_0", + "linear_1.w_0@GRAD", + "linear_1.b_0", + "linear_1.b_0@GRAD", ] exe = fluid.Executor(place) @@ -638,18 +685,16 @@ class TestAdamWOpLayerwiseLR(TestAdamWOp): inputs = np.random.random(size=[8, 10]).astype('float32') outputs = np.random.random(size=[8, 1]).astype('float32') - param = exe.run(test_prog, - feed={ - "x": inputs, - "y": outputs - }, - fetch_list=fetch_list1) - params_and_gras = exe.run(train_prog, - feed={ - "x": inputs, - "y": outputs - }, - fetch_list=fetch_list2) + param = exe.run( + test_prog, + feed={"x": inputs, "y": outputs}, + fetch_list=fetch_list1, + ) + params_and_gras = exe.run( + train_prog, + feed={"x": inputs, "y": outputs}, + fetch_list=fetch_list2, + ) fc1_w = param[0] fc1_w_grad = params_and_gras[1] @@ -661,17 +706,37 @@ class TestAdamWOpLayerwiseLR(TestAdamWOp): fc2_b_grad = params_and_gras[7] fc1_w, fc1_w_mon1, fc1_w_mon2 = get_numpy_output( - fc1_w, fc1_w_grad, fc1_w_mon1, fc1_w_mon2, - simple_lr_fun(linear1.weight), i + 1) + fc1_w, + fc1_w_grad, + fc1_w_mon1, + fc1_w_mon2, + simple_lr_fun(linear1.weight), + i + 1, + ) fc1_b, fc1_b_mon1, fc1_b_mon2 = get_numpy_output( - fc1_b, fc1_b_grad, fc1_b_mon1, fc1_b_mon2, - simple_lr_fun(linear1.bias), i + 1) + fc1_b, + fc1_b_grad, + fc1_b_mon1, + fc1_b_mon2, + simple_lr_fun(linear1.bias), + i + 1, + ) fc2_w, fc2_w_mon1, fc2_w_mon2 = get_numpy_output( - fc2_w, fc2_w_grad, fc2_w_mon1, fc2_w_mon2, - simple_lr_fun(linear2.weight), i + 1) + fc2_w, + fc2_w_grad, + fc2_w_mon1, + fc2_w_mon2, + simple_lr_fun(linear2.weight), + i + 1, + ) fc2_b, fc2_b_mon1, fc2_b_mon2 = get_numpy_output( - fc2_b, fc2_b_grad, fc2_b_mon1, fc2_b_mon2, - simple_lr_fun(linear2.bias), i + 1) + fc2_b, + fc2_b_grad, + fc2_b_mon1, + fc2_b_mon2, + simple_lr_fun(linear2.bias), + i + 1, + ) np.testing.assert_allclose(params_and_gras[0], fc1_w, rtol=1e-6) np.testing.assert_allclose(params_and_gras[2], fc1_b, rtol=1e-6) diff --git a/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool1d.py b/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool1d.py index 36c102128cba08f6cf63fea6c22e1ccc02ea1292..532c2e8a45b0a936b34a3570501192a18ca62672 100644 --- a/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool1d.py +++ b/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool1d.py @@ -30,24 +30,28 @@ def adaptive_end_index(index, input_size, output_size): return int(np.ceil((index + 1) * input_size / output_size)) -def avg_pool1D_forward_naive(x, - ksize, - strides, - paddings, - global_pool=0, - ceil_mode=False, - exclusive=False, - adaptive=False, - data_type=np.float64): +def avg_pool1D_forward_naive( + x, + ksize, + strides, + paddings, + global_pool=0, + ceil_mode=False, + exclusive=False, + adaptive=False, + data_type=np.float64, +): N, C, L = x.shape if global_pool == 1: ksize = [L] if adaptive: L_out = ksize[0] else: - L_out = (L - ksize[0] + 2 * paddings[0] + strides[0] - - 1) // strides[0] + 1 if ceil_mode else ( - L - ksize[0] + 2 * paddings[0]) // strides[0] + 1 + L_out = ( + (L - ksize[0] + 2 * paddings[0] + strides[0] - 1) // strides[0] + 1 + if ceil_mode + else (L - ksize[0] + 2 * paddings[0]) // strides[0] + 1 + ) out = np.zeros((N, C, L_out)) for i in range(L_out): @@ -59,19 +63,21 @@ def avg_pool1D_forward_naive(x, r_end = np.min((i * strides[0] + ksize[0] - paddings[0], L)) x_masked = x[:, :, r_start:r_end] - field_size = (r_end - r_start) \ - if (exclusive or adaptive) else (ksize[0]) + field_size = ( + (r_end - r_start) if (exclusive or adaptive) else (ksize[0]) + ) if data_type == np.int8 or data_type == np.uint8: - out[:, :, i] = (np.rint(np.sum(x_masked, axis=(2, 3)) / - field_size)).astype(data_type) + out[:, :, i] = ( + np.rint(np.sum(x_masked, axis=(2, 3)) / field_size) + ).astype(data_type) else: - out[:, :, - i] = (np.sum(x_masked, axis=(2)) / field_size).astype(data_type) + out[:, :, i] = (np.sum(x_masked, axis=(2)) / field_size).astype( + data_type + ) return out class TestPool1D_API(unittest.TestCase): - def setUp(self): np.random.seed(123) self.places = [fluid.CPUPlace()] @@ -83,22 +89,21 @@ class TestPool1D_API(unittest.TestCase): input_np = np.random.random([2, 3, 32]).astype("float32") input = fluid.dygraph.to_variable(input_np) result = F.adaptive_avg_pool1d(input, output_size=16) - result_np = avg_pool1D_forward_naive(input_np, - ksize=[16], - strides=[0], - paddings=[0], - adaptive=True) + result_np = avg_pool1D_forward_naive( + input_np, ksize=[16], strides=[0], paddings=[0], adaptive=True + ) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) ada_max_pool1d_dg = paddle.nn.layer.AdaptiveAvgPool1D( - output_size=16) + output_size=16 + ) result = ada_max_pool1d_dg(input) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) - result = paddle.nn.functional.common.interpolate(input, - mode="area", - size=16) + result = paddle.nn.functional.common.interpolate( + input, mode="area", size=16 + ) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_adaptive_avg_static_results(self, place): @@ -107,16 +112,16 @@ class TestPool1D_API(unittest.TestCase): result = F.adaptive_avg_pool1d(input, output_size=16) input_np = np.random.random([2, 3, 32]).astype("float32") - result_np = avg_pool1D_forward_naive(input_np, - ksize=[16], - strides=[2], - paddings=[0], - adaptive=True) + result_np = avg_pool1D_forward_naive( + input_np, ksize=[16], strides=[2], paddings=[0], adaptive=True + ) exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={"input": input_np}, - fetch_list=[result]) + fetches = exe.run( + fluid.default_main_program(), + feed={"input": input_np}, + fetch_list=[result], + ) np.testing.assert_allclose(fetches[0], result_np, rtol=1e-05) def test_adaptive_avg_pool1d(self): diff --git a/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool2d.py b/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool2d.py index 9ca75c6b8b1eb3440e4d3b478aaada9ff93ec9ec..9c12d5977d42b08be998f35582c3984807a5f653 100644 --- a/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool2d.py +++ b/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool2d.py @@ -32,16 +32,18 @@ def adaptive_end_index(index, input_size, output_size): return int(np.ceil((index + 1) * input_size / output_size)) -def adaptive_pool2d_forward(x, - output_size, - data_format='NCHW', - pool_type="avg"): +def adaptive_pool2d_forward( + x, output_size, data_format='NCHW', pool_type="avg" +): N = x.shape[0] - C, H, W = [x.shape[1], x.shape[2], x.shape[3]] if data_format == 'NCHW' \ + C, H, W = ( + [x.shape[1], x.shape[2], x.shape[3]] + if data_format == 'NCHW' else [x.shape[3], x.shape[1], x.shape[2]] + ) - if (isinstance(output_size, int) or output_size == None): + if isinstance(output_size, int) or output_size == None: H_out = output_size W_out = output_size output_size = [H_out, W_out] @@ -55,8 +57,11 @@ def adaptive_pool2d_forward(x, output_size[1] = W W_out = W - out = np.zeros((N, C, H_out, W_out)) if data_format=='NCHW' \ + out = ( + np.zeros((N, C, H_out, W_out)) + if data_format == 'NCHW' else np.zeros((N, H_out, W_out, C)) + ) for i in range(H_out): in_h_start = adaptive_start_index(i, H, output_size[0]) @@ -69,16 +74,18 @@ def adaptive_pool2d_forward(x, if data_format == 'NCHW': x_masked = x[:, :, in_h_start:in_h_end, in_w_start:in_w_end] if pool_type == 'avg': - field_size = ((in_h_end - in_h_start) * - (in_w_end - in_w_start)) + field_size = (in_h_end - in_h_start) * ( + in_w_end - in_w_start + ) out[:, :, i, j] = np.sum(x_masked, axis=(2, 3)) / field_size elif pool_type == 'max': out[:, :, i, j] = np.max(x_masked, axis=(2, 3)) elif data_format == 'NHWC': x_masked = x[:, in_h_start:in_h_end, in_w_start:in_w_end, :] if pool_type == 'avg': - field_size = ((in_h_end - in_h_start) * - (in_w_end - in_w_start)) + field_size = (in_h_end - in_h_start) * ( + in_w_end - in_w_start + ) out[:, i, j, :] = np.sum(x_masked, axis=(1, 2)) / field_size elif pool_type == 'max': out[:, i, j, :] = np.max(x_masked, axis=(1, 2)) @@ -86,57 +93,60 @@ def adaptive_pool2d_forward(x, class TestAdaptiveAvgPool2DAPI(unittest.TestCase): - def setUp(self): self.x_np = np.random.random([2, 3, 7, 7]).astype("float32") - self.res_1_np = adaptive_pool2d_forward(x=self.x_np, - output_size=[3, 3], - pool_type="avg") + self.res_1_np = adaptive_pool2d_forward( + x=self.x_np, output_size=[3, 3], pool_type="avg" + ) - self.res_2_np = adaptive_pool2d_forward(x=self.x_np, - output_size=5, - pool_type="avg") + self.res_2_np = adaptive_pool2d_forward( + x=self.x_np, output_size=5, pool_type="avg" + ) - self.res_3_np = adaptive_pool2d_forward(x=self.x_np, - output_size=[2, 5], - pool_type="avg") + self.res_3_np = adaptive_pool2d_forward( + x=self.x_np, output_size=[2, 5], pool_type="avg" + ) - self.res_4_np = adaptive_pool2d_forward(x=self.x_np, - output_size=[3, 3], - pool_type="avg", - data_format="NHWC") + self.res_4_np = adaptive_pool2d_forward( + x=self.x_np, output_size=[3, 3], pool_type="avg", data_format="NHWC" + ) - self.res_5_np = adaptive_pool2d_forward(x=self.x_np, - output_size=[None, 3], - pool_type="avg") + self.res_5_np = adaptive_pool2d_forward( + x=self.x_np, output_size=[None, 3], pool_type="avg" + ) def test_static_graph(self): - for use_cuda in ([False, True] - if core.is_compiled_with_cuda() else [False]): + for use_cuda in ( + [False, True] if core.is_compiled_with_cuda() else [False] + ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32") - out_1 = paddle.nn.functional.adaptive_avg_pool2d(x=x, - output_size=[3, 3]) + out_1 = paddle.nn.functional.adaptive_avg_pool2d( + x=x, output_size=[3, 3] + ) out_2 = paddle.nn.functional.adaptive_avg_pool2d(x=x, output_size=5) - out_3 = paddle.nn.functional.adaptive_avg_pool2d(x=x, - output_size=[2, 5]) + out_3 = paddle.nn.functional.adaptive_avg_pool2d( + x=x, output_size=[2, 5] + ) - out_4 = paddle.nn.functional.adaptive_avg_pool2d(x=x, - output_size=[3, 3], - data_format="NHWC") + out_4 = paddle.nn.functional.adaptive_avg_pool2d( + x=x, output_size=[3, 3], data_format="NHWC" + ) out_5 = paddle.nn.functional.adaptive_avg_pool2d( - x=x, output_size=[None, 3]) + x=x, output_size=[None, 3] + ) exe = paddle.static.Executor(place=place) - [res_1, res_2, res_3, res_4, - res_5] = exe.run(fluid.default_main_program(), - feed={"x": self.x_np}, - fetch_list=[out_1, out_2, out_3, out_4, out_5]) + [res_1, res_2, res_3, res_4, res_5] = exe.run( + fluid.default_main_program(), + feed={"x": self.x_np}, + fetch_list=[out_1, out_2, out_3, out_4, out_5], + ) assert np.allclose(res_1, self.res_1_np) @@ -149,30 +159,34 @@ class TestAdaptiveAvgPool2DAPI(unittest.TestCase): assert np.allclose(res_5, self.res_5_np) def test_dynamic_graph(self): - for use_cuda in ([False, True] - if core.is_compiled_with_cuda() else [False]): + for use_cuda in ( + [False, True] if core.is_compiled_with_cuda() else [False] + ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.disable_static(place=place) x = paddle.to_tensor(self.x_np) - out_1 = paddle.nn.functional.adaptive_avg_pool2d(x=x, - output_size=[3, 3]) + out_1 = paddle.nn.functional.adaptive_avg_pool2d( + x=x, output_size=[3, 3] + ) out_2 = paddle.nn.functional.adaptive_avg_pool2d(x=x, output_size=5) - out_3 = paddle.nn.functional.adaptive_avg_pool2d(x=x, - output_size=[2, 5]) + out_3 = paddle.nn.functional.adaptive_avg_pool2d( + x=x, output_size=[2, 5] + ) - out_4 = paddle.nn.functional.adaptive_avg_pool2d(x=x, - output_size=[3, 3], - data_format="NHWC") + out_4 = paddle.nn.functional.adaptive_avg_pool2d( + x=x, output_size=[3, 3], data_format="NHWC" + ) out_5 = paddle.nn.functional.adaptive_avg_pool2d( - x=x, output_size=[None, 3]) + x=x, output_size=[None, 3] + ) - out_6 = paddle.nn.functional.interpolate(x=x, - mode="area", - size=[2, 5]) + out_6 = paddle.nn.functional.interpolate( + x=x, mode="area", size=[2, 5] + ) assert np.allclose(out_1.numpy(), self.res_1_np) @@ -188,33 +202,32 @@ class TestAdaptiveAvgPool2DAPI(unittest.TestCase): class TestAdaptiveAvgPool2DClassAPI(unittest.TestCase): - def setUp(self): self.x_np = np.random.random([2, 3, 7, 7]).astype("float32") - self.res_1_np = adaptive_pool2d_forward(x=self.x_np, - output_size=[3, 3], - pool_type="avg") + self.res_1_np = adaptive_pool2d_forward( + x=self.x_np, output_size=[3, 3], pool_type="avg" + ) - self.res_2_np = adaptive_pool2d_forward(x=self.x_np, - output_size=5, - pool_type="avg") + self.res_2_np = adaptive_pool2d_forward( + x=self.x_np, output_size=5, pool_type="avg" + ) - self.res_3_np = adaptive_pool2d_forward(x=self.x_np, - output_size=[2, 5], - pool_type="avg") + self.res_3_np = adaptive_pool2d_forward( + x=self.x_np, output_size=[2, 5], pool_type="avg" + ) - self.res_4_np = adaptive_pool2d_forward(x=self.x_np, - output_size=[3, 3], - pool_type="avg", - data_format="NHWC") + self.res_4_np = adaptive_pool2d_forward( + x=self.x_np, output_size=[3, 3], pool_type="avg", data_format="NHWC" + ) - self.res_5_np = adaptive_pool2d_forward(x=self.x_np, - output_size=[None, 3], - pool_type="avg") + self.res_5_np = adaptive_pool2d_forward( + x=self.x_np, output_size=[None, 3], pool_type="avg" + ) def test_static_graph(self): - for use_cuda in ([False, True] - if core.is_compiled_with_cuda() else [False]): + for use_cuda in ( + [False, True] if core.is_compiled_with_cuda() else [False] + ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32") @@ -228,19 +241,22 @@ class TestAdaptiveAvgPool2DClassAPI(unittest.TestCase): adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=[2, 5]) out_3 = adaptive_avg_pool(x=x) - adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=[3, 3], - data_format="NHWC") + adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D( + output_size=[3, 3], data_format="NHWC" + ) out_4 = adaptive_avg_pool(x=x) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D( - output_size=[None, 3]) + output_size=[None, 3] + ) out_5 = adaptive_avg_pool(x=x) exe = paddle.static.Executor(place=place) - [res_1, res_2, res_3, res_4, - res_5] = exe.run(fluid.default_main_program(), - feed={"x": self.x_np}, - fetch_list=[out_1, out_2, out_3, out_4, out_5]) + [res_1, res_2, res_3, res_4, res_5] = exe.run( + fluid.default_main_program(), + feed={"x": self.x_np}, + fetch_list=[out_1, out_2, out_3, out_4, out_5], + ) assert np.allclose(res_1, self.res_1_np) @@ -253,8 +269,9 @@ class TestAdaptiveAvgPool2DClassAPI(unittest.TestCase): assert np.allclose(res_5, self.res_5_np) def test_dynamic_graph(self): - for use_cuda in ([False, True] - if core.is_compiled_with_cuda() else [False]): + for use_cuda in ( + [False, True] if core.is_compiled_with_cuda() else [False] + ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.disable_static(place=place) x = paddle.to_tensor(self.x_np) @@ -268,12 +285,14 @@ class TestAdaptiveAvgPool2DClassAPI(unittest.TestCase): adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=[2, 5]) out_3 = adaptive_avg_pool(x=x) - adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=[3, 3], - data_format="NHWC") + adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D( + output_size=[3, 3], data_format="NHWC" + ) out_4 = adaptive_avg_pool(x=x) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D( - output_size=[None, 3]) + output_size=[None, 3] + ) out_5 = adaptive_avg_pool(x=x) assert np.allclose(out_1.numpy(), self.res_1_np) @@ -288,7 +307,6 @@ class TestAdaptiveAvgPool2DClassAPI(unittest.TestCase): class TestOutputSizeTensor(UnittestBase): - def init_info(self): self.shapes = [[1, 3, 6, 6]] self.save_path = os.path.join(self.temp_dir.name, self.path_prefix()) @@ -313,8 +331,9 @@ class TestOutputSizeTensor(UnittestBase): exe.run(starup_prog) res = exe.run(fetch_list=[out1, out2]) np.testing.assert_allclose(res[0], res[1]) - paddle.static.save_inference_model(self.save_path, [x], - [out1, out2], exe) + paddle.static.save_inference_model( + self.save_path, [x], [out1, out2], exe + ) # Test for Inference Predictor infer_outs = self.infer_prog() np.testing.assert_array_equal(infer_outs[0].shape, (1, 3, 3, 3)) @@ -330,13 +349,13 @@ class TestOutputSizeTensor(UnittestBase): # list[Tensor] output_size = [paddle.assign([3]), paddle.assign([3])] out1 = paddle.nn.functional.adaptive_avg_pool2d(x=x, output_size=[3, 3]) - out2 = paddle.nn.functional.adaptive_avg_pool2d(x=x, - output_size=output_size) + out2 = paddle.nn.functional.adaptive_avg_pool2d( + x=x, output_size=output_size + ) return out1, out2 class TestOutputSizeListTensor(TestOutputSizeTensor): - def path_prefix(self): return 'pool2d_tensors' @@ -344,13 +363,13 @@ class TestOutputSizeListTensor(TestOutputSizeTensor): # list[int, Tensor] output_size = [paddle.assign([3]), 3] out1 = paddle.nn.functional.adaptive_avg_pool2d(x=x, output_size=[3, 3]) - out2 = paddle.nn.functional.adaptive_avg_pool2d(x=x, - output_size=output_size) + out2 = paddle.nn.functional.adaptive_avg_pool2d( + x=x, output_size=output_size + ) return out1, out2 class TestOutputSizeListTensor2(TestOutputSizeTensor): - def path_prefix(self): return 'pool2d_tensor2' @@ -358,8 +377,9 @@ class TestOutputSizeListTensor2(TestOutputSizeTensor): # A Tensor output_size = paddle.assign([3, 3]) out1 = paddle.nn.functional.adaptive_avg_pool2d(x=x, output_size=[3, 3]) - out2 = paddle.nn.functional.adaptive_avg_pool2d(x=x, - output_size=output_size) + out2 = paddle.nn.functional.adaptive_avg_pool2d( + x=x, output_size=output_size + ) return out1, out2 diff --git a/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool3d.py b/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool3d.py index 5f3e2d38eb144c03720774176c942a6018063ceb..e7a8685a8f16419a6ebfc2f53be5005eab58fc4e 100755 --- a/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool3d.py +++ b/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool3d.py @@ -28,17 +28,18 @@ def adaptive_end_index(index, input_size, output_size): return int(np.ceil((index + 1) * input_size / output_size)) -def adaptive_pool3d_forward(x, - output_size, - adaptive=True, - data_format='NCDHW', - pool_type='avg'): +def adaptive_pool3d_forward( + x, output_size, adaptive=True, data_format='NCDHW', pool_type='avg' +): N = x.shape[0] - C, D, H, W = [x.shape[1], x.shape[2], x.shape[3], x.shape[4]] \ - if data_format == 'NCDHW' else [x.shape[4], x.shape[1], x.shape[2],x.shape[3]] + C, D, H, W = ( + [x.shape[1], x.shape[2], x.shape[3], x.shape[4]] + if data_format == 'NCDHW' + else [x.shape[4], x.shape[1], x.shape[2], x.shape[3]] + ) - if (isinstance(output_size, int) or output_size == None): + if isinstance(output_size, int) or output_size == None: H_out = output_size W_out = output_size D_out = output_size @@ -56,8 +57,11 @@ def adaptive_pool3d_forward(x, output_size[2] = W W_out = W - out = np.zeros((N, C, D_out, H_out, W_out)) if data_format=='NCDHW' \ + out = ( + np.zeros((N, C, D_out, H_out, W_out)) + if data_format == 'NCDHW' else np.zeros((N, D_out, H_out, W_out, C)) + ) for k in range(D_out): d_start = adaptive_start_index(k, D, output_size[0]) d_end = adaptive_end_index(k, D, output_size[0]) @@ -71,82 +75,99 @@ def adaptive_pool3d_forward(x, w_end = adaptive_end_index(j, W, output_size[2]) if data_format == 'NCDHW': - x_masked = x[:, :, d_start:d_end, h_start:h_end, - w_start:w_end] + x_masked = x[ + :, :, d_start:d_end, h_start:h_end, w_start:w_end + ] if pool_type == 'avg': - field_size = (d_end - d_start) * (h_end - h_start) * ( - w_end - w_start) - out[:, :, k, i, - j] = np.sum(x_masked, axis=(2, 3, 4)) / field_size + field_size = ( + (d_end - d_start) + * (h_end - h_start) + * (w_end - w_start) + ) + out[:, :, k, i, j] = ( + np.sum(x_masked, axis=(2, 3, 4)) / field_size + ) elif pool_type == 'max': out[:, :, k, i, j] = np.max(x_masked, axis=(2, 3, 4)) elif data_format == 'NDHWC': - x_masked = x[:, d_start:d_end, h_start:h_end, - w_start:w_end, :] + x_masked = x[ + :, d_start:d_end, h_start:h_end, w_start:w_end, : + ] if pool_type == 'avg': - field_size = (d_end - d_start) * (h_end - h_start) * ( - w_end - w_start) - out[:, k, i, j, :] = np.sum(x_masked, - axis=(1, 2, 3)) / field_size + field_size = ( + (d_end - d_start) + * (h_end - h_start) + * (w_end - w_start) + ) + out[:, k, i, j, :] = ( + np.sum(x_masked, axis=(1, 2, 3)) / field_size + ) elif pool_type == 'max': out[:, k, i, j, :] = np.max(x_masked, axis=(1, 2, 3)) return out class TestAdaptiveAvgPool3DAPI(unittest.TestCase): - def setUp(self): self.x_np = np.random.random([2, 3, 5, 7, 7]).astype("float32") - self.res_1_np = adaptive_pool3d_forward(x=self.x_np, - output_size=[3, 3, 3], - pool_type="avg") + self.res_1_np = adaptive_pool3d_forward( + x=self.x_np, output_size=[3, 3, 3], pool_type="avg" + ) - self.res_2_np = adaptive_pool3d_forward(x=self.x_np, - output_size=5, - pool_type="avg") + self.res_2_np = adaptive_pool3d_forward( + x=self.x_np, output_size=5, pool_type="avg" + ) - self.res_3_np = adaptive_pool3d_forward(x=self.x_np, - output_size=[2, 3, 5], - pool_type="avg") + self.res_3_np = adaptive_pool3d_forward( + x=self.x_np, output_size=[2, 3, 5], pool_type="avg" + ) - self.res_4_np = adaptive_pool3d_forward(x=self.x_np, - output_size=[3, 3, 3], - pool_type="avg", - data_format="NDHWC") + self.res_4_np = adaptive_pool3d_forward( + x=self.x_np, + output_size=[3, 3, 3], + pool_type="avg", + data_format="NDHWC", + ) - self.res_5_np = adaptive_pool3d_forward(x=self.x_np, - output_size=[None, 3, None], - pool_type="avg") + self.res_5_np = adaptive_pool3d_forward( + x=self.x_np, output_size=[None, 3, None], pool_type="avg" + ) def test_static_graph(self): - for use_cuda in ([False, True] - if core.is_compiled_with_cuda() else [False]): + for use_cuda in ( + [False, True] if core.is_compiled_with_cuda() else [False] + ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x = paddle.fluid.data(name="x", - shape=[2, 3, 5, 7, 7], - dtype="float32") + x = paddle.fluid.data( + name="x", shape=[2, 3, 5, 7, 7], dtype="float32" + ) out_1 = paddle.nn.functional.adaptive_avg_pool3d( - x=x, output_size=[3, 3, 3]) + x=x, output_size=[3, 3, 3] + ) out_2 = paddle.nn.functional.adaptive_avg_pool3d(x=x, output_size=5) out_3 = paddle.nn.functional.adaptive_avg_pool3d( - x=x, output_size=[2, 3, 5]) + x=x, output_size=[2, 3, 5] + ) out_4 = paddle.nn.functional.adaptive_avg_pool3d( - x=x, output_size=[3, 3, 3], data_format="NDHWC") + x=x, output_size=[3, 3, 3], data_format="NDHWC" + ) out_5 = paddle.nn.functional.adaptive_avg_pool3d( - x=x, output_size=[None, 3, None]) + x=x, output_size=[None, 3, None] + ) exe = paddle.static.Executor(place=place) - [res_1, res_2, res_3, res_4, - res_5] = exe.run(fluid.default_main_program(), - feed={"x": self.x_np}, - fetch_list=[out_1, out_2, out_3, out_4, out_5]) + [res_1, res_2, res_3, res_4, res_5] = exe.run( + fluid.default_main_program(), + feed={"x": self.x_np}, + fetch_list=[out_1, out_2, out_3, out_4, out_5], + ) assert np.allclose(res_1, self.res_1_np) @@ -159,29 +180,34 @@ class TestAdaptiveAvgPool3DAPI(unittest.TestCase): assert np.allclose(res_5, self.res_5_np) def test_dynamic_graph(self): - for use_cuda in ([False, True] - if core.is_compiled_with_cuda() else [False]): + for use_cuda in ( + [False, True] if core.is_compiled_with_cuda() else [False] + ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.disable_static(place=place) x = paddle.to_tensor(self.x_np) out_1 = paddle.nn.functional.adaptive_avg_pool3d( - x=x, output_size=[3, 3, 3]) + x=x, output_size=[3, 3, 3] + ) out_2 = paddle.nn.functional.adaptive_avg_pool3d(x=x, output_size=5) out_3 = paddle.nn.functional.adaptive_avg_pool3d( - x=x, output_size=[2, 3, 5]) + x=x, output_size=[2, 3, 5] + ) out_4 = paddle.nn.functional.adaptive_avg_pool3d( - x=x, output_size=[3, 3, 3], data_format="NDHWC") + x=x, output_size=[3, 3, 3], data_format="NDHWC" + ) out_5 = paddle.nn.functional.adaptive_avg_pool3d( - x=x, output_size=[None, 3, None]) + x=x, output_size=[None, 3, None] + ) - out_6 = paddle.nn.functional.interpolate(x=x, - mode="area", - size=[2, 3, 5]) + out_6 = paddle.nn.functional.interpolate( + x=x, mode="area", size=[2, 3, 5] + ) assert np.allclose(out_1.numpy(), self.res_1_np) @@ -197,63 +223,70 @@ class TestAdaptiveAvgPool3DAPI(unittest.TestCase): class TestAdaptiveAvgPool3DClassAPI(unittest.TestCase): - def setUp(self): self.x_np = np.random.random([2, 3, 5, 7, 7]).astype("float32") - self.res_1_np = adaptive_pool3d_forward(x=self.x_np, - output_size=[3, 3, 3], - pool_type="avg") + self.res_1_np = adaptive_pool3d_forward( + x=self.x_np, output_size=[3, 3, 3], pool_type="avg" + ) - self.res_2_np = adaptive_pool3d_forward(x=self.x_np, - output_size=5, - pool_type="avg") + self.res_2_np = adaptive_pool3d_forward( + x=self.x_np, output_size=5, pool_type="avg" + ) - self.res_3_np = adaptive_pool3d_forward(x=self.x_np, - output_size=[2, 3, 5], - pool_type="avg") + self.res_3_np = adaptive_pool3d_forward( + x=self.x_np, output_size=[2, 3, 5], pool_type="avg" + ) - self.res_4_np = adaptive_pool3d_forward(x=self.x_np, - output_size=[3, 3, 3], - pool_type="avg", - data_format="NDHWC") + self.res_4_np = adaptive_pool3d_forward( + x=self.x_np, + output_size=[3, 3, 3], + pool_type="avg", + data_format="NDHWC", + ) - self.res_5_np = adaptive_pool3d_forward(x=self.x_np, - output_size=[None, 3, None], - pool_type="avg") + self.res_5_np = adaptive_pool3d_forward( + x=self.x_np, output_size=[None, 3, None], pool_type="avg" + ) def test_static_graph(self): - for use_cuda in ([False, True] - if core.is_compiled_with_cuda() else [False]): + for use_cuda in ( + [False, True] if core.is_compiled_with_cuda() else [False] + ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x = paddle.fluid.data(name="x", - shape=[2, 3, 5, 7, 7], - dtype="float32") + x = paddle.fluid.data( + name="x", shape=[2, 3, 5, 7, 7], dtype="float32" + ) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D( - output_size=[3, 3, 3]) + output_size=[3, 3, 3] + ) out_1 = adaptive_avg_pool(x=x) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D(output_size=5) out_2 = adaptive_avg_pool(x=x) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D( - output_size=[2, 3, 5]) + output_size=[2, 3, 5] + ) out_3 = adaptive_avg_pool(x=x) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D( - output_size=[3, 3, 3], data_format="NDHWC") + output_size=[3, 3, 3], data_format="NDHWC" + ) out_4 = adaptive_avg_pool(x=x) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D( - output_size=[None, 3, None]) + output_size=[None, 3, None] + ) out_5 = adaptive_avg_pool(x=x) exe = paddle.static.Executor(place=place) - [res_1, res_2, res_3, res_4, - res_5] = exe.run(fluid.default_main_program(), - feed={"x": self.x_np}, - fetch_list=[out_1, out_2, out_3, out_4, out_5]) + [res_1, res_2, res_3, res_4, res_5] = exe.run( + fluid.default_main_program(), + feed={"x": self.x_np}, + fetch_list=[out_1, out_2, out_3, out_4, out_5], + ) assert np.allclose(res_1, self.res_1_np) @@ -266,29 +299,34 @@ class TestAdaptiveAvgPool3DClassAPI(unittest.TestCase): assert np.allclose(res_5, self.res_5_np) def test_dynamic_graph(self): - for use_cuda in ([False, True] - if core.is_compiled_with_cuda() else [False]): + for use_cuda in ( + [False, True] if core.is_compiled_with_cuda() else [False] + ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.disable_static(place=place) x = paddle.to_tensor(self.x_np) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D( - output_size=[3, 3, 3]) + output_size=[3, 3, 3] + ) out_1 = adaptive_avg_pool(x=x) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D(output_size=5) out_2 = adaptive_avg_pool(x=x) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D( - output_size=[2, 3, 5]) + output_size=[2, 3, 5] + ) out_3 = adaptive_avg_pool(x=x) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D( - output_size=[3, 3, 3], data_format="NDHWC") + output_size=[3, 3, 3], data_format="NDHWC" + ) out_4 = adaptive_avg_pool(x=x) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3D( - output_size=[None, 3, None]) + output_size=[None, 3, None] + ) out_5 = adaptive_avg_pool(x=x) assert np.allclose(out_1.numpy(), self.res_1_np) diff --git a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool1d.py b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool1d.py index d64ff0a3c76d392aa8c11b26b97a3210608cfd3e..a83ef2bfd5bae623922780f97d73d535fad10daa 100644 --- a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool1d.py +++ b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool1d.py @@ -29,24 +29,28 @@ def adaptive_end_index(index, input_size, output_size): return int(np.ceil((index + 1) * input_size / output_size)) -def max_pool1D_forward_naive(x, - ksize, - strides, - paddings, - global_pool=0, - ceil_mode=False, - exclusive=False, - adaptive=False, - data_type=np.float64): +def max_pool1D_forward_naive( + x, + ksize, + strides, + paddings, + global_pool=0, + ceil_mode=False, + exclusive=False, + adaptive=False, + data_type=np.float64, +): N, C, L = x.shape if global_pool == 1: ksize = [L] if adaptive: L_out = ksize[0] else: - L_out = (L - ksize[0] + 2 * paddings[0] + strides[0] - - 1) // strides[0] + 1 if ceil_mode else ( - L - ksize[0] + 2 * paddings[0]) // strides[0] + 1 + L_out = ( + (L - ksize[0] + 2 * paddings[0] + strides[0] - 1) // strides[0] + 1 + if ceil_mode + else (L - ksize[0] + 2 * paddings[0]) // strides[0] + 1 + ) out = np.zeros((N, C, L_out)) for i in range(L_out): @@ -63,7 +67,6 @@ def max_pool1D_forward_naive(x, class TestPool1D_API(unittest.TestCase): - def setUp(self): np.random.seed(123) self.places = [fluid.CPUPlace()] @@ -76,15 +79,14 @@ class TestPool1D_API(unittest.TestCase): input = fluid.dygraph.to_variable(input_np) result = F.adaptive_max_pool1d(input, output_size=16) - result_np = max_pool1D_forward_naive(input_np, - ksize=[16], - strides=[0], - paddings=[0], - adaptive=True) + result_np = max_pool1D_forward_naive( + input_np, ksize=[16], strides=[0], paddings=[0], adaptive=True + ) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) ada_max_pool1d_dg = paddle.nn.layer.AdaptiveMaxPool1D( - output_size=16) + output_size=16 + ) result = ada_max_pool1d_dg(input) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) @@ -94,16 +96,16 @@ class TestPool1D_API(unittest.TestCase): result = F.adaptive_max_pool1d(input, output_size=16) input_np = np.random.random([2, 3, 32]).astype("float32") - result_np = max_pool1D_forward_naive(input_np, - ksize=[16], - strides=[2], - paddings=[0], - adaptive=True) + result_np = max_pool1D_forward_naive( + input_np, ksize=[16], strides=[2], paddings=[0], adaptive=True + ) exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={"input": input_np}, - fetch_list=[result]) + fetches = exe.run( + fluid.default_main_program(), + feed={"input": input_np}, + fetch_list=[result], + ) np.testing.assert_allclose(fetches[0], result_np, rtol=1e-05) def test_adaptive_max_pool1d(self): @@ -113,14 +115,15 @@ class TestPool1D_API(unittest.TestCase): class TestOutDtype(unittest.TestCase): - def test_max_pool(self): api_fn = F.adaptive_max_pool1d shape = [1, 3, 32] - check_out_dtype(api_fn, - in_specs=[(shape, )], - expect_dtypes=['float32', 'float64'], - output_size=16) + check_out_dtype( + api_fn, + in_specs=[(shape,)], + expect_dtypes=['float32', 'float64'], + output_size=16, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool2d.py b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool2d.py index 5ee4918a753c0553aca5d9797f28ffcc92ca1e84..9884cb6c90da89c939abfc6f494da68f5c8fc115 100644 --- a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool2d.py +++ b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool2d.py @@ -30,16 +30,18 @@ def adaptive_end_index(index, input_size, output_size): return int(np.ceil((index + 1) * input_size / output_size)) -def adaptive_pool2d_forward(x, - output_size, - data_format='NCHW', - pool_type="max"): +def adaptive_pool2d_forward( + x, output_size, data_format='NCHW', pool_type="max" +): N = x.shape[0] - C, H, W = [x.shape[1], x.shape[2], x.shape[3]] if data_format == 'NCHW' \ + C, H, W = ( + [x.shape[1], x.shape[2], x.shape[3]] + if data_format == 'NCHW' else [x.shape[3], x.shape[1], x.shape[2]] + ) - if (isinstance(output_size, int) or output_size == None): + if isinstance(output_size, int) or output_size == None: H_out = output_size W_out = output_size output_size = [H_out, W_out] @@ -53,8 +55,11 @@ def adaptive_pool2d_forward(x, output_size[1] = W W_out = W - out = np.zeros((N, C, H_out, W_out)) if data_format=='NCHW' \ + out = ( + np.zeros((N, C, H_out, W_out)) + if data_format == 'NCHW' else np.zeros((N, H_out, W_out, C)) + ) for i in range(H_out): in_h_start = adaptive_start_index(i, H, output_size[0]) @@ -67,16 +72,18 @@ def adaptive_pool2d_forward(x, if data_format == 'NCHW': x_masked = x[:, :, in_h_start:in_h_end, in_w_start:in_w_end] if pool_type == 'avg': - field_size = ((in_h_end - in_h_start) * - (in_w_end - in_w_start)) + field_size = (in_h_end - in_h_start) * ( + in_w_end - in_w_start + ) out[:, :, i, j] = np.sum(x_masked, axis=(2, 3)) / field_size elif pool_type == 'max': out[:, :, i, j] = np.max(x_masked, axis=(2, 3)) elif data_format == 'NHWC': x_masked = x[:, in_h_start:in_h_end, in_w_start:in_w_end, :] if pool_type == 'avg': - field_size = ((in_h_end - in_h_start) * - (in_w_end - in_w_start)) + field_size = (in_h_end - in_h_start) * ( + in_w_end - in_w_start + ) out[:, i, j, :] = np.sum(x_masked, axis=(1, 2)) / field_size elif pool_type == 'max': out[:, i, j, :] = np.max(x_masked, axis=(1, 2)) @@ -84,20 +91,19 @@ def adaptive_pool2d_forward(x, class TestAdaptiveMaxPool2DAPI(unittest.TestCase): - def setUp(self): self.x_np = np.random.random([2, 3, 7, 7]).astype("float32") - self.res_1_np = adaptive_pool2d_forward(x=self.x_np, - output_size=[3, 3], - pool_type="max") + self.res_1_np = adaptive_pool2d_forward( + x=self.x_np, output_size=[3, 3], pool_type="max" + ) - self.res_2_np = adaptive_pool2d_forward(x=self.x_np, - output_size=5, - pool_type="max") + self.res_2_np = adaptive_pool2d_forward( + x=self.x_np, output_size=5, pool_type="max" + ) - self.res_3_np = adaptive_pool2d_forward(x=self.x_np, - output_size=[2, 5], - pool_type="max") + self.res_3_np = adaptive_pool2d_forward( + x=self.x_np, output_size=[2, 5], pool_type="max" + ) """ self.res_4_np = adaptive_pool2d_forward( x=self.x_np, @@ -105,36 +111,41 @@ class TestAdaptiveMaxPool2DAPI(unittest.TestCase): pool_type="max", data_format="NHWC") """ - self.res_5_np = adaptive_pool2d_forward(x=self.x_np, - output_size=[None, 3], - pool_type="max") + self.res_5_np = adaptive_pool2d_forward( + x=self.x_np, output_size=[None, 3], pool_type="max" + ) def test_static_graph(self): - for use_cuda in ([False, True] - if core.is_compiled_with_cuda() else [False]): + for use_cuda in ( + [False, True] if core.is_compiled_with_cuda() else [False] + ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32") - out_1 = paddle.nn.functional.adaptive_max_pool2d(x=x, - output_size=[3, 3]) + out_1 = paddle.nn.functional.adaptive_max_pool2d( + x=x, output_size=[3, 3] + ) out_2 = paddle.nn.functional.adaptive_max_pool2d(x=x, output_size=5) - out_3 = paddle.nn.functional.adaptive_max_pool2d(x=x, - output_size=[2, 5]) + out_3 = paddle.nn.functional.adaptive_max_pool2d( + x=x, output_size=[2, 5] + ) - #out_4 = paddle.nn.functional.adaptive_max_pool2d( + # out_4 = paddle.nn.functional.adaptive_max_pool2d( # x=x, output_size=[3, 3], data_format="NHWC") out_5 = paddle.nn.functional.adaptive_max_pool2d( - x=x, output_size=[None, 3]) + x=x, output_size=[None, 3] + ) exe = paddle.static.Executor(place=place) - [res_1, res_2, res_3, - res_5] = exe.run(fluid.default_main_program(), - feed={"x": self.x_np}, - fetch_list=[out_1, out_2, out_3, out_5]) + [res_1, res_2, res_3, res_5] = exe.run( + fluid.default_main_program(), + feed={"x": self.x_np}, + fetch_list=[out_1, out_2, out_3, out_5], + ) assert np.allclose(res_1, self.res_1_np) @@ -142,31 +153,34 @@ class TestAdaptiveMaxPool2DAPI(unittest.TestCase): assert np.allclose(res_3, self.res_3_np) - #assert np.allclose(res_4, self.res_4_np) + # assert np.allclose(res_4, self.res_4_np) assert np.allclose(res_5, self.res_5_np) def test_dynamic_graph(self): - for use_cuda in ([False, True] - if core.is_compiled_with_cuda() else [False]): + for use_cuda in ( + [False, True] if core.is_compiled_with_cuda() else [False] + ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.disable_static(place=place) x = paddle.to_tensor(self.x_np) - out_1 = paddle.nn.functional.adaptive_max_pool2d(x=x, - return_mask=False, - output_size=[3, 3]) + out_1 = paddle.nn.functional.adaptive_max_pool2d( + x=x, return_mask=False, output_size=[3, 3] + ) out_2 = paddle.nn.functional.adaptive_max_pool2d(x=x, output_size=5) - out_3 = paddle.nn.functional.adaptive_max_pool2d(x=x, - output_size=[2, 5]) + out_3 = paddle.nn.functional.adaptive_max_pool2d( + x=x, output_size=[2, 5] + ) - #out_4 = paddle.nn.functional.adaptive_max_pool2d( + # out_4 = paddle.nn.functional.adaptive_max_pool2d( # x=x, output_size=[3, 3], data_format="NHWC") out_5 = paddle.nn.functional.adaptive_max_pool2d( - x=x, output_size=[None, 3]) + x=x, output_size=[None, 3] + ) assert np.allclose(out_1.numpy(), self.res_1_np) @@ -174,40 +188,40 @@ class TestAdaptiveMaxPool2DAPI(unittest.TestCase): assert np.allclose(out_3.numpy(), self.res_3_np) - #assert np.allclose(out_4.numpy(), self.res_4_np) + # assert np.allclose(out_4.numpy(), self.res_4_np) assert np.allclose(out_5.numpy(), self.res_5_np) class TestAdaptiveMaxPool2DClassAPI(unittest.TestCase): - def setUp(self): self.x_np = np.random.random([2, 3, 7, 7]).astype("float32") - self.res_1_np = adaptive_pool2d_forward(x=self.x_np, - output_size=[3, 3], - pool_type="max") + self.res_1_np = adaptive_pool2d_forward( + x=self.x_np, output_size=[3, 3], pool_type="max" + ) - self.res_2_np = adaptive_pool2d_forward(x=self.x_np, - output_size=5, - pool_type="max") + self.res_2_np = adaptive_pool2d_forward( + x=self.x_np, output_size=5, pool_type="max" + ) - self.res_3_np = adaptive_pool2d_forward(x=self.x_np, - output_size=[2, 5], - pool_type="max") + self.res_3_np = adaptive_pool2d_forward( + x=self.x_np, output_size=[2, 5], pool_type="max" + ) - #self.res_4_np = adaptive_pool2d_forward( + # self.res_4_np = adaptive_pool2d_forward( # x=self.x_np, # output_size=[3, 3], # pool_type="max", # data_format="NHWC") - self.res_5_np = adaptive_pool2d_forward(x=self.x_np, - output_size=[None, 3], - pool_type="max") + self.res_5_np = adaptive_pool2d_forward( + x=self.x_np, output_size=[None, 3], pool_type="max" + ) def test_static_graph(self): - for use_cuda in ([False, True] - if core.is_compiled_with_cuda() else [False]): + for use_cuda in ( + [False, True] if core.is_compiled_with_cuda() else [False] + ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32") @@ -226,14 +240,16 @@ class TestAdaptiveMaxPool2DClassAPI(unittest.TestCase): # out_4 = adaptive_max_pool(x=x) adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D( - output_size=[None, 3]) + output_size=[None, 3] + ) out_5 = adaptive_max_pool(x=x) exe = paddle.static.Executor(place=place) - [res_1, res_2, res_3, - res_5] = exe.run(fluid.default_main_program(), - feed={"x": self.x_np}, - fetch_list=[out_1, out_2, out_3, out_5]) + [res_1, res_2, res_3, res_5] = exe.run( + fluid.default_main_program(), + feed={"x": self.x_np}, + fetch_list=[out_1, out_2, out_3, out_5], + ) assert np.allclose(res_1, self.res_1_np) @@ -241,13 +257,14 @@ class TestAdaptiveMaxPool2DClassAPI(unittest.TestCase): assert np.allclose(res_3, self.res_3_np) - #assert np.allclose(res_4, self.res_4_np) + # assert np.allclose(res_4, self.res_4_np) assert np.allclose(res_5, self.res_5_np) def test_dynamic_graph(self): - for use_cuda in ([False, True] - if core.is_compiled_with_cuda() else [False]): + for use_cuda in ( + [False, True] if core.is_compiled_with_cuda() else [False] + ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.disable_static(place=place) x = paddle.to_tensor(self.x_np) @@ -261,12 +278,13 @@ class TestAdaptiveMaxPool2DClassAPI(unittest.TestCase): adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=[2, 5]) out_3 = adaptive_max_pool(x=x) - #adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D( + # adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D( # output_size=[3, 3], data_format="NHWC") - #out_4 = adaptive_max_pool(x=x) + # out_4 = adaptive_max_pool(x=x) adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D( - output_size=[None, 3]) + output_size=[None, 3] + ) out_5 = adaptive_max_pool(x=x) assert np.allclose(out_1.numpy(), self.res_1_np) @@ -275,20 +293,21 @@ class TestAdaptiveMaxPool2DClassAPI(unittest.TestCase): assert np.allclose(out_3.numpy(), self.res_3_np) - #assert np.allclose(out_4.numpy(), self.res_4_np) + # assert np.allclose(out_4.numpy(), self.res_4_np) assert np.allclose(out_5.numpy(), self.res_5_np) class TestOutDtype(unittest.TestCase): - def test_max_pool(self): api_fn = F.adaptive_max_pool2d shape = [1, 3, 32, 32] - check_out_dtype(api_fn, - in_specs=[(shape, )], - expect_dtypes=['float32', 'float64'], - output_size=16) + check_out_dtype( + api_fn, + in_specs=[(shape,)], + expect_dtypes=['float32', 'float64'], + output_size=16, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool3d.py b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool3d.py index 214b6d90b61bbf7d488a95e766b7166e92d36569..e90c4061980383d4c38bca0039819fb4b495ed75 100755 --- a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool3d.py +++ b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool3d.py @@ -30,17 +30,18 @@ def adaptive_end_index(index, input_size, output_size): return int(np.ceil((index + 1) * input_size / output_size)) -def adaptive_pool3d_forward(x, - output_size, - adaptive=True, - data_format='NCDHW', - pool_type='max'): +def adaptive_pool3d_forward( + x, output_size, adaptive=True, data_format='NCDHW', pool_type='max' +): N = x.shape[0] - C, D, H, W = [x.shape[1], x.shape[2], x.shape[3], x.shape[4]] \ - if data_format == 'NCDHW' else [x.shape[4], x.shape[1], x.shape[2],x.shape[3]] + C, D, H, W = ( + [x.shape[1], x.shape[2], x.shape[3], x.shape[4]] + if data_format == 'NCDHW' + else [x.shape[4], x.shape[1], x.shape[2], x.shape[3]] + ) - if (isinstance(output_size, int) or output_size == None): + if isinstance(output_size, int) or output_size == None: H_out = output_size W_out = output_size D_out = output_size @@ -58,8 +59,11 @@ def adaptive_pool3d_forward(x, output_size[2] = W W_out = W - out = np.zeros((N, C, D_out, H_out, W_out)) if data_format=='NCDHW' \ + out = ( + np.zeros((N, C, D_out, H_out, W_out)) + if data_format == 'NCDHW' else np.zeros((N, D_out, H_out, W_out, C)) + ) for k in range(D_out): d_start = adaptive_start_index(k, D, output_size[0]) d_end = adaptive_end_index(k, D, output_size[0]) @@ -73,82 +77,98 @@ def adaptive_pool3d_forward(x, w_end = adaptive_end_index(j, W, output_size[2]) if data_format == 'NCDHW': - x_masked = x[:, :, d_start:d_end, h_start:h_end, - w_start:w_end] + x_masked = x[ + :, :, d_start:d_end, h_start:h_end, w_start:w_end + ] if pool_type == 'avg': - field_size = (d_end - d_start) * (h_end - h_start) * ( - w_end - w_start) - out[:, :, k, i, - j] = np.sum(x_masked, axis=(2, 3, 4)) / field_size + field_size = ( + (d_end - d_start) + * (h_end - h_start) + * (w_end - w_start) + ) + out[:, :, k, i, j] = ( + np.sum(x_masked, axis=(2, 3, 4)) / field_size + ) elif pool_type == 'max': out[:, :, k, i, j] = np.max(x_masked, axis=(2, 3, 4)) elif data_format == 'NDHWC': - x_masked = x[:, d_start:d_end, h_start:h_end, - w_start:w_end, :] + x_masked = x[ + :, d_start:d_end, h_start:h_end, w_start:w_end, : + ] if pool_type == 'avg': - field_size = (d_end - d_start) * (h_end - h_start) * ( - w_end - w_start) - out[:, k, i, j, :] = np.sum(x_masked, - axis=(1, 2, 3)) / field_size + field_size = ( + (d_end - d_start) + * (h_end - h_start) + * (w_end - w_start) + ) + out[:, k, i, j, :] = ( + np.sum(x_masked, axis=(1, 2, 3)) / field_size + ) elif pool_type == 'max': out[:, k, i, j, :] = np.max(x_masked, axis=(1, 2, 3)) return out class TestAdaptiveMaxPool3DAPI(unittest.TestCase): - def setUp(self): self.x_np = np.random.random([2, 3, 5, 7, 7]).astype("float32") - self.res_1_np = adaptive_pool3d_forward(x=self.x_np, - output_size=[3, 3, 3], - pool_type="max") + self.res_1_np = adaptive_pool3d_forward( + x=self.x_np, output_size=[3, 3, 3], pool_type="max" + ) - self.res_2_np = adaptive_pool3d_forward(x=self.x_np, - output_size=5, - pool_type="max") + self.res_2_np = adaptive_pool3d_forward( + x=self.x_np, output_size=5, pool_type="max" + ) - self.res_3_np = adaptive_pool3d_forward(x=self.x_np, - output_size=[2, 3, 5], - pool_type="max") + self.res_3_np = adaptive_pool3d_forward( + x=self.x_np, output_size=[2, 3, 5], pool_type="max" + ) - self.res_4_np = adaptive_pool3d_forward(x=self.x_np, - output_size=[3, 3, 3], - pool_type="max", - data_format="NDHWC") + self.res_4_np = adaptive_pool3d_forward( + x=self.x_np, + output_size=[3, 3, 3], + pool_type="max", + data_format="NDHWC", + ) - self.res_5_np = adaptive_pool3d_forward(x=self.x_np, - output_size=[None, 3, None], - pool_type="max") + self.res_5_np = adaptive_pool3d_forward( + x=self.x_np, output_size=[None, 3, None], pool_type="max" + ) def test_static_graph(self): - for use_cuda in ([False, True] - if core.is_compiled_with_cuda() else [False]): + for use_cuda in ( + [False, True] if core.is_compiled_with_cuda() else [False] + ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x = paddle.fluid.data(name="x", - shape=[2, 3, 5, 7, 7], - dtype="float32") + x = paddle.fluid.data( + name="x", shape=[2, 3, 5, 7, 7], dtype="float32" + ) out_1 = paddle.nn.functional.adaptive_max_pool3d( - x=x, output_size=[3, 3, 3]) + x=x, output_size=[3, 3, 3] + ) out_2 = paddle.nn.functional.adaptive_max_pool3d(x=x, output_size=5) out_3 = paddle.nn.functional.adaptive_max_pool3d( - x=x, output_size=[2, 3, 5]) + x=x, output_size=[2, 3, 5] + ) - #out_4 = paddle.nn.functional.adaptive_max_pool3d( + # out_4 = paddle.nn.functional.adaptive_max_pool3d( # x=x, output_size=[3, 3, 3], data_format="NDHWC") out_5 = paddle.nn.functional.adaptive_max_pool3d( - x=x, output_size=[None, 3, None]) + x=x, output_size=[None, 3, None] + ) exe = paddle.static.Executor(place=place) - [res_1, res_2, res_3, - res_5] = exe.run(fluid.default_main_program(), - feed={"x": self.x_np}, - fetch_list=[out_1, out_2, out_3, out_5]) + [res_1, res_2, res_3, res_5] = exe.run( + fluid.default_main_program(), + feed={"x": self.x_np}, + fetch_list=[out_1, out_2, out_3, out_5], + ) assert np.allclose(res_1, self.res_1_np) @@ -156,30 +176,34 @@ class TestAdaptiveMaxPool3DAPI(unittest.TestCase): assert np.allclose(res_3, self.res_3_np) - #assert np.allclose(res_4, self.res_4_np) + # assert np.allclose(res_4, self.res_4_np) assert np.allclose(res_5, self.res_5_np) def func_dynamic_graph(self): - for use_cuda in ([False, True] - if core.is_compiled_with_cuda() else [False]): + for use_cuda in ( + [False, True] if core.is_compiled_with_cuda() else [False] + ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.disable_static(place=place) x = paddle.to_tensor(self.x_np) out_1 = paddle.nn.functional.adaptive_max_pool3d( - x=x, output_size=[3, 3, 3]) + x=x, output_size=[3, 3, 3] + ) out_2 = paddle.nn.functional.adaptive_max_pool3d(x=x, output_size=5) out_3 = paddle.nn.functional.adaptive_max_pool3d( - x=x, output_size=[2, 3, 5]) + x=x, output_size=[2, 3, 5] + ) - #out_4 = paddle.nn.functional.adaptive_max_pool3d( + # out_4 = paddle.nn.functional.adaptive_max_pool3d( # x=x, output_size=[3, 3, 3], data_format="NDHWC") out_5 = paddle.nn.functional.adaptive_max_pool3d( - x=x, output_size=[None, 3, None]) + x=x, output_size=[None, 3, None] + ) assert np.allclose(out_1.numpy(), self.res_1_np) @@ -187,7 +211,7 @@ class TestAdaptiveMaxPool3DAPI(unittest.TestCase): assert np.allclose(out_3.numpy(), self.res_3_np) - #assert np.allclose(out_4.numpy(), self.res_4_np) + # assert np.allclose(out_4.numpy(), self.res_4_np) assert np.allclose(out_5.numpy(), self.res_5_np) @@ -198,20 +222,19 @@ class TestAdaptiveMaxPool3DAPI(unittest.TestCase): class TestAdaptiveMaxPool3DClassAPI(unittest.TestCase): - def setUp(self): self.x_np = np.random.random([2, 3, 5, 7, 7]).astype("float32") - self.res_1_np = adaptive_pool3d_forward(x=self.x_np, - output_size=[3, 3, 3], - pool_type="max") + self.res_1_np = adaptive_pool3d_forward( + x=self.x_np, output_size=[3, 3, 3], pool_type="max" + ) - self.res_2_np = adaptive_pool3d_forward(x=self.x_np, - output_size=5, - pool_type="max") + self.res_2_np = adaptive_pool3d_forward( + x=self.x_np, output_size=5, pool_type="max" + ) - self.res_3_np = adaptive_pool3d_forward(x=self.x_np, - output_size=[2, 3, 5], - pool_type="max") + self.res_3_np = adaptive_pool3d_forward( + x=self.x_np, output_size=[2, 3, 5], pool_type="max" + ) # self.res_4_np = adaptive_pool3d_forward( # x=self.x_np, @@ -219,28 +242,31 @@ class TestAdaptiveMaxPool3DClassAPI(unittest.TestCase): # pool_type="max", # data_format="NDHWC") - self.res_5_np = adaptive_pool3d_forward(x=self.x_np, - output_size=[None, 3, None], - pool_type="max") + self.res_5_np = adaptive_pool3d_forward( + x=self.x_np, output_size=[None, 3, None], pool_type="max" + ) def test_static_graph(self): - for use_cuda in ([False, True] - if core.is_compiled_with_cuda() else [False]): + for use_cuda in ( + [False, True] if core.is_compiled_with_cuda() else [False] + ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x = paddle.fluid.data(name="x", - shape=[2, 3, 5, 7, 7], - dtype="float32") + x = paddle.fluid.data( + name="x", shape=[2, 3, 5, 7, 7], dtype="float32" + ) adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D( - output_size=[3, 3, 3]) + output_size=[3, 3, 3] + ) out_1 = adaptive_max_pool(x=x) adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(output_size=5) out_2 = adaptive_max_pool(x=x) adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D( - output_size=[2, 3, 5]) + output_size=[2, 3, 5] + ) out_3 = adaptive_max_pool(x=x) # adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D( @@ -248,14 +274,16 @@ class TestAdaptiveMaxPool3DClassAPI(unittest.TestCase): # out_4 = adaptive_max_pool(x=x) adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D( - output_size=[None, 3, None]) + output_size=[None, 3, None] + ) out_5 = adaptive_max_pool(x=x) exe = paddle.static.Executor(place=place) - [res_1, res_2, res_3, - res_5] = exe.run(fluid.default_main_program(), - feed={"x": self.x_np}, - fetch_list=[out_1, out_2, out_3, out_5]) + [res_1, res_2, res_3, res_5] = exe.run( + fluid.default_main_program(), + feed={"x": self.x_np}, + fetch_list=[out_1, out_2, out_3, out_5], + ) assert np.allclose(res_1, self.res_1_np) @@ -268,21 +296,24 @@ class TestAdaptiveMaxPool3DClassAPI(unittest.TestCase): assert np.allclose(res_5, self.res_5_np) def test_dynamic_graph(self): - for use_cuda in ([False, True] - if core.is_compiled_with_cuda() else [False]): + for use_cuda in ( + [False, True] if core.is_compiled_with_cuda() else [False] + ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.disable_static(place=place) x = paddle.to_tensor(self.x_np) adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D( - output_size=[3, 3, 3]) + output_size=[3, 3, 3] + ) out_1 = adaptive_max_pool(x=x) adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D(output_size=5) out_2 = adaptive_max_pool(x=x) adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D( - output_size=[2, 3, 5]) + output_size=[2, 3, 5] + ) out_3 = adaptive_max_pool(x=x) # adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D( @@ -290,7 +321,8 @@ class TestAdaptiveMaxPool3DClassAPI(unittest.TestCase): # out_4 = adaptive_max_pool(x=x) adaptive_max_pool = paddle.nn.AdaptiveMaxPool3D( - output_size=[None, 3, None]) + output_size=[None, 3, None] + ) out_5 = adaptive_max_pool(x=x) assert np.allclose(out_1.numpy(), self.res_1_np) @@ -305,14 +337,15 @@ class TestAdaptiveMaxPool3DClassAPI(unittest.TestCase): class TestOutDtype(unittest.TestCase): - def test_max_pool(self): api_fn = F.adaptive_max_pool3d shape = [1, 3, 32, 32, 32] - check_out_dtype(api_fn, - in_specs=[(shape, )], - expect_dtypes=['float32', 'float64'], - output_size=16) + check_out_dtype( + api_fn, + in_specs=[(shape,)], + expect_dtypes=['float32', 'float64'], + output_size=16, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_add_position_encoding_op.py b/python/paddle/fluid/tests/unittests/test_add_position_encoding_op.py index aa510efc59db8e5a3e860ce509d067f7f5974c8d..791c2351d7340c7296d4b018aa89b1d3e3155eaf 100644 --- a/python/paddle/fluid/tests/unittests/test_add_position_encoding_op.py +++ b/python/paddle/fluid/tests/unittests/test_add_position_encoding_op.py @@ -31,13 +31,15 @@ def add_position_encoding(input, alpha=1.0, beta=1.0): for i in range(batch_size): for j in range(max_length): for k in range(half_shape): - val = j / pow( - 10000.0, k * 1.0 / - (half_shape - 1)) if half_shape > 1 else j / 10000.0 - out[i, j, k] = \ - input[i, j, k] * alpha + math.sin(val) * beta - out[i, j, half_shape + k] = \ + val = ( + j / pow(10000.0, k * 1.0 / (half_shape - 1)) + if half_shape > 1 + else j / 10000.0 + ) + out[i, j, k] = input[i, j, k] * alpha + math.sin(val) * beta + out[i, j, half_shape + k] = ( input[i, j, half_shape + k] * alpha + math.cos(val) * beta + ) return out @@ -133,45 +135,49 @@ class TestAddPositionEncodingLoDTensorOp(OpTest): max_length = self.lod[0][i] for j in range(max_length): for k in range(half_shape): - val = j / pow( - 10000.0, k * 1.0 / - (half_shape - 1)) if half_shape > 1 else j / 10000.0 + val = ( + j / pow(10000.0, k * 1.0 / (half_shape - 1)) + if half_shape > 1 + else j / 10000.0 + ) pos = start + j - self.out[pos, k] = \ + self.out[pos, k] = ( self.x[pos, k] * self.alpha + math.sin(val) * self.beta - self.out[pos, half_shape + k] = \ - self.x[pos, half_shape + k] * self.alpha + math.cos(val) * self.beta + ) + self.out[pos, half_shape + k] = ( + self.x[pos, half_shape + k] * self.alpha + + math.cos(val) * self.beta + ) start += max_length class TestAddPositionEncodingOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): input_data = np.random.random((4, 16, 8)).astype("float32") def test_Variable(): # the input type must be Variable - fluid.layers.add_position_encoding(input=input_data, - alpha=1.0, - beta=1.0) + fluid.layers.add_position_encoding( + input=input_data, alpha=1.0, beta=1.0 + ) self.assertRaises(TypeError, test_Variable) class TestAddPositionEncodingOpDygraph(unittest.TestCase): - def test_dygraph(self): paddle.disable_static() tensor = np.random.randn(16, 32, 64) position_tensor = paddle.fluid.layers.add_position_encoding( - input=paddle.to_tensor(tensor), alpha=1.0, beta=1.0).numpy() + input=paddle.to_tensor(tensor), alpha=1.0, beta=1.0 + ).numpy() paddle.enable_static() position_tensor_np = add_position_encoding(tensor, 1.0, 1.0) - np.testing.assert_allclose(position_tensor, - position_tensor_np, - rtol=1e-05) + np.testing.assert_allclose( + position_tensor, position_tensor_np, rtol=1e-05 + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_add_reader_dependency.py b/python/paddle/fluid/tests/unittests/test_add_reader_dependency.py index c7479e059b48f65ee9d748e647462da8c3b31ac5..1604445a873574e0f45dab9508437b6ebf6f2603 100644 --- a/python/paddle/fluid/tests/unittests/test_add_reader_dependency.py +++ b/python/paddle/fluid/tests/unittests/test_add_reader_dependency.py @@ -21,15 +21,16 @@ import time def inplace_add(x, bias): helper = LayerHelper('scale', **locals()) - helper.append_op(type='scale', - inputs={'X': [x]}, - outputs={'Out': [x]}, - attrs={'bias': bias}) + helper.append_op( + type='scale', + inputs={'X': [x]}, + outputs={'Out': [x]}, + attrs={'bias': bias}, + ) return x class TestAddReaderDependency(unittest.TestCase): - def setUp(self): self.batch_num = 3 self.sleep_time = 2 @@ -49,17 +50,19 @@ class TestAddReaderDependency(unittest.TestCase): feed_list=[tmp_in], capacity=16, iterable=False, - use_double_buffer=self.use_double_buffer) + use_double_buffer=self.use_double_buffer, + ) def data_source(): for _ in range(self.batch_num): time.sleep(self.sleep_time) # sleep some times - yield np.random.uniform(low=-1, high=1, - size=[1]).astype('float32'), + yield np.random.uniform( + low=-1, high=1, size=[1] + ).astype('float32'), - persistable_in = fluid.data(name='persistable_in', - dtype='float32', - shape=[1]) + persistable_in = fluid.data( + name='persistable_in', dtype='float32', shape=[1] + ) persistable_in.persistable = True persistable_in = inplace_add(persistable_in, bias=1) @@ -74,31 +77,34 @@ class TestAddReaderDependency(unittest.TestCase): while True: if batch_id == 0: feed = { - persistable_in.name: - np.array([-1]).astype('float32') + persistable_in.name: np.array([-1]).astype( + 'float32' + ) } else: feed = None - ret, = exe.run(prog, - feed=feed, - fetch_list=[persistable_in]) - self.assertEqual(ret.shape, (1, )) + (ret,) = exe.run( + prog, feed=feed, fetch_list=[persistable_in] + ) + self.assertEqual(ret.shape, (1,)) self.assertEqual(ret[0], batch_id) batch_id += 1 except fluid.core.EOFException: loader.reset() self.assertEqual(batch_id, self.batch_num) - t = fluid.global_scope().find_var( - persistable_in.name).get_tensor() + t = ( + fluid.global_scope() + .find_var(persistable_in.name) + .get_tensor() + ) t_val = np.array(t) - self.assertEqual(t_val.shape, (1, )) + self.assertEqual(t_val.shape, (1,)) self.assertEqual(t_val[0] + 1, batch_id) class TestAddReaderDependencyWithoutDoubleBuffer(TestAddReaderDependency): - def setUp(self): self.batch_num = 3 self.sleep_time = 2 diff --git a/python/paddle/fluid/tests/unittests/test_addmm_op.py b/python/paddle/fluid/tests/unittests/test_addmm_op.py index de87233cdb59b919b4581e48e8ec138241bf9a00..f101f55dc739ea346c09f09fb416eb080a070bc4 100644 --- a/python/paddle/fluid/tests/unittests/test_addmm_op.py +++ b/python/paddle/fluid/tests/unittests/test_addmm_op.py @@ -33,8 +33,8 @@ class TestAddMMOp(OpTest): 'Y': np.random.random((10, 20)).astype(self.dtype), } self.outputs = { - 'Out': - self.inputs['Input'] + np.dot(self.inputs['X'], self.inputs['Y']) + 'Out': self.inputs['Input'] + + np.dot(self.inputs['X'], self.inputs['Y']) } def init_dtype_type(self): @@ -62,91 +62,122 @@ class TestAddMMOpError(unittest.TestCase): with program_guard(Program(), Program()): # The input type of addmm_op must be Variable. - input = fluid.create_lod_tensor(np.array([[-1, -1], [-1, -1]]), - [[2]], fluid.CPUPlace()) - x1 = fluid.create_lod_tensor(np.array([[-1, -1], [-1, -1]]), [[2]], - fluid.CPUPlace()) - x2 = fluid.create_lod_tensor(np.array([[-1, -1], [-1, -1]]), [[2]], - fluid.CPUPlace()) + input = fluid.create_lod_tensor( + np.array([[-1, -1], [-1, -1]]), [[2]], fluid.CPUPlace() + ) + x1 = fluid.create_lod_tensor( + np.array([[-1, -1], [-1, -1]]), [[2]], fluid.CPUPlace() + ) + x2 = fluid.create_lod_tensor( + np.array([[-1, -1], [-1, -1]]), [[2]], fluid.CPUPlace() + ) self.assertRaises(TypeError, paddle.addmm, input, x1, x2) # The input dtype of mul_op must be float32 or float64. - input = fluid.layers.data(name='input', - shape=[4, 4], - dtype="int32", - append_batch_size=False) - x3 = fluid.layers.data(name='x3', - shape=[4, 4], - dtype="int32", - append_batch_size=False) - x4 = fluid.layers.data(name='x4', - shape=[4, 4], - dtype="int32", - append_batch_size=False) + input = fluid.layers.data( + name='input', + shape=[4, 4], + dtype="int32", + append_batch_size=False, + ) + x3 = fluid.layers.data( + name='x3', shape=[4, 4], dtype="int32", append_batch_size=False + ) + x4 = fluid.layers.data( + name='x4', shape=[4, 4], dtype="int32", append_batch_size=False + ) self.assertRaises(TypeError, paddle.addmm, input, x3, x4) # x and y dimension mismatch - x5 = fluid.layers.data(name='x5', - shape=[4, 5], - dtype="float32", - append_batch_size=False) - x6 = fluid.layers.data(name='x6', - shape=[4, 4], - dtype="float32", - append_batch_size=False) + x5 = fluid.layers.data( + name='x5', + shape=[4, 5], + dtype="float32", + append_batch_size=False, + ) + x6 = fluid.layers.data( + name='x6', + shape=[4, 4], + dtype="float32", + append_batch_size=False, + ) self.assertRaises(ValueError, paddle.addmm, input, x5, x6) # input and x are not broadcastable - x7 = fluid.layers.data(name='x7', - shape=[4, 4], - dtype="float32", - append_batch_size=False) - x8 = fluid.layers.data(name='x8', - shape=[4, 4], - dtype="float32", - append_batch_size=False) - input1 = fluid.layers.data(name='input1', - shape=[2, 4], - dtype="float32", - append_batch_size=False) + x7 = fluid.layers.data( + name='x7', + shape=[4, 4], + dtype="float32", + append_batch_size=False, + ) + x8 = fluid.layers.data( + name='x8', + shape=[4, 4], + dtype="float32", + append_batch_size=False, + ) + input1 = fluid.layers.data( + name='input1', + shape=[2, 4], + dtype="float32", + append_batch_size=False, + ) self.assertRaises(ValueError, paddle.addmm, input1, x7, x8) # input and x are not broadcastable - x9 = fluid.layers.data(name='x9', - shape=[4, 4], - dtype="float32", - append_batch_size=False) - x10 = fluid.layers.data(name='x10', - shape=[4, 4], - dtype="float32", - append_batch_size=False) - input2 = fluid.layers.data(name='input2', - shape=[1, 2], - dtype="float32", - append_batch_size=False) + x9 = fluid.layers.data( + name='x9', + shape=[4, 4], + dtype="float32", + append_batch_size=False, + ) + x10 = fluid.layers.data( + name='x10', + shape=[4, 4], + dtype="float32", + append_batch_size=False, + ) + input2 = fluid.layers.data( + name='input2', + shape=[1, 2], + dtype="float32", + append_batch_size=False, + ) self.assertRaises(ValueError, paddle.addmm, input2, x9, x10) - x11 = fluid.layers.data(name='x11', - shape=[4, 4], - dtype="float32", - append_batch_size=False) - x12 = fluid.layers.data(name='x12', - shape=[4, 4], - dtype="float32", - append_batch_size=False) - input3 = fluid.layers.data(name='input3', - shape=[4, 2], - dtype="float32", - append_batch_size=False) + x11 = fluid.layers.data( + name='x11', + shape=[4, 4], + dtype="float32", + append_batch_size=False, + ) + x12 = fluid.layers.data( + name='x12', + shape=[4, 4], + dtype="float32", + append_batch_size=False, + ) + input3 = fluid.layers.data( + name='input3', + shape=[4, 2], + dtype="float32", + append_batch_size=False, + ) self.assertRaises(ValueError, paddle.addmm, input3, x11, x12) - x13 = fluid.layers.data(name='x13', - shape=[4, 4], - dtype="float32", - append_batch_size=False) - x14 = fluid.layers.data(name='x14', - shape=[4, 4], - dtype="float32", - append_batch_size=False) - input4 = fluid.layers.data(name='input4', - shape=[3, 1], - dtype="float32", - append_batch_size=False) + x13 = fluid.layers.data( + name='x13', + shape=[4, 4], + dtype="float32", + append_batch_size=False, + ) + x14 = fluid.layers.data( + name='x14', + shape=[4, 4], + dtype="float32", + append_batch_size=False, + ) + input4 = fluid.layers.data( + name='input4', + shape=[3, 1], + dtype="float32", + append_batch_size=False, + ) self.assertRaises(ValueError, paddle.addmm, input4, x13, x14) @@ -166,8 +197,10 @@ class TestAddMMOp2(TestAddMMOp): 'Alpha': 0.1, 'Beta': 1.0, } - self.outputs = {'Out': self.attrs['Beta'] * self.inputs['Input'] + \ - self.attrs['Alpha'] * np.dot(self.inputs['X'], self.inputs['Y'])} + self.outputs = { + 'Out': self.attrs['Beta'] * self.inputs['Input'] + + self.attrs['Alpha'] * np.dot(self.inputs['X'], self.inputs['Y']) + } class TestAddMMOp3(OpTest): @@ -185,8 +218,10 @@ class TestAddMMOp3(OpTest): 'Alpha': 0.5, 'Beta': 2.0, } - self.outputs = {'Out': self.attrs['Beta'] * self.inputs['Input'] + \ - self.attrs['Alpha'] * np.dot(self.inputs['X'], self.inputs['Y'])} + self.outputs = { + 'Out': self.attrs['Beta'] * self.inputs['Input'] + + self.attrs['Alpha'] * np.dot(self.inputs['X'], self.inputs['Y']) + } def init_dtype_type(self): pass @@ -222,8 +257,10 @@ class TestAddMMOp4(OpTest): 'Alpha': 0.5, 'Beta': 2.0, } - self.outputs = {'Out': self.attrs['Beta'] * self.inputs['Input'] + \ - self.attrs['Alpha'] * np.dot(self.inputs['X'], self.inputs['Y'])} + self.outputs = { + 'Out': self.attrs['Beta'] * self.inputs['Input'] + + self.attrs['Alpha'] * np.dot(self.inputs['X'], self.inputs['Y']) + } def init_dtype_type(self): pass @@ -245,7 +282,6 @@ class TestAddMMOp4(OpTest): class TestAddMMOp5(unittest.TestCase): - def test_api_with_dygraph(self): np_input = np.random.random((20, 30)).astype(np.float32) np_x = np.random.random((20, 6)).astype(np.float32) @@ -260,7 +296,6 @@ class TestAddMMOp5(unittest.TestCase): class TestAddMMAPI(unittest.TestCase): - def test_api_error(self): data_x = np.ones((2, 2)).astype(np.float32) data_y = np.ones((2, 2)).astype(np.float32) @@ -273,11 +308,9 @@ class TestAddMMAPI(unittest.TestCase): x = paddle.to_tensor(data_x_wrong) y = paddle.to_tensor(data_y) input = paddle.to_tensor(data_input) - out = paddle.tensor.addmm(input=input, - x=x, - y=y, - beta=0.5, - alpha=5.0) + out = paddle.tensor.addmm( + input=input, x=x, y=y, beta=0.5, alpha=5.0 + ) self.assertRaises(ValueError, test_error1) @@ -286,11 +319,9 @@ class TestAddMMAPI(unittest.TestCase): x = paddle.to_tensor(data_x_wrong) y = paddle.to_tensor(data_y) input = paddle.to_tensor(data_input) - out = paddle.tensor.addmm(input=input, - x=x, - y=y, - beta=0.5, - alpha=5.0) + out = paddle.tensor.addmm( + input=input, x=x, y=y, beta=0.5, alpha=5.0 + ) self.assertRaises(ValueError, test_error2) @@ -299,11 +330,9 @@ class TestAddMMAPI(unittest.TestCase): x = paddle.to_tensor(data_x) y = paddle.to_tensor(data_y) input = paddle.to_tensor(data_input_wrong) - out = paddle.tensor.addmm(input=input, - x=x, - y=y, - beta=0.5, - alpha=5.0) + out = paddle.tensor.addmm( + input=input, x=x, y=y, beta=0.5, alpha=5.0 + ) self.assertRaises(ValueError, test_error3) @@ -312,11 +341,9 @@ class TestAddMMAPI(unittest.TestCase): x = paddle.to_tensor(data_x) y = paddle.to_tensor(data_y) input = paddle.to_tensor(data_input_wrong) - out = paddle.tensor.addmm(input=input, - x=x, - y=y, - beta=0.5, - alpha=5.0) + out = paddle.tensor.addmm( + input=input, x=x, y=y, beta=0.5, alpha=5.0 + ) self.assertRaises(ValueError, test_error4) @@ -334,17 +361,16 @@ class TestAddMMAPI(unittest.TestCase): x = paddle.to_tensor(data_x) y = paddle.to_tensor(data_y) input = paddle.to_tensor(data_input) - paddle_output = paddle.tensor.addmm(input=input, - x=x, - y=y, - beta=data_beta, - alpha=data_alpha) + paddle_output = paddle.tensor.addmm( + input=input, x=x, y=y, beta=data_beta, alpha=data_alpha + ) numpy_output = data_beta * data_input + data_alpha * np.dot( - data_x, data_y) + data_x, data_y + ) - np.testing.assert_allclose(numpy_output, - paddle_output.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + numpy_output, paddle_output.numpy(), rtol=1e-05 + ) paddle.enable_static() @@ -360,17 +386,16 @@ class TestAddMMAPI(unittest.TestCase): x = paddle.to_tensor(data_x) y = paddle.to_tensor(data_y) input = paddle.to_tensor(data_input) - paddle_output = paddle.tensor.addmm(input=input, - x=x, - y=y, - beta=data_beta, - alpha=data_alpha) + paddle_output = paddle.tensor.addmm( + input=input, x=x, y=y, beta=data_beta, alpha=data_alpha + ) numpy_output = data_beta * data_input + data_alpha * np.dot( - data_x, data_y) + data_x, data_y + ) - np.testing.assert_allclose(numpy_output, - paddle_output.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + numpy_output, paddle_output.numpy(), rtol=1e-05 + ) paddle.enable_static() @@ -386,17 +411,16 @@ class TestAddMMAPI(unittest.TestCase): x = paddle.to_tensor(data_x) y = paddle.to_tensor(data_y) input = paddle.to_tensor(data_input) - paddle_output = paddle.tensor.addmm(input=input, - x=x, - y=y, - beta=data_beta, - alpha=data_alpha) + paddle_output = paddle.tensor.addmm( + input=input, x=x, y=y, beta=data_beta, alpha=data_alpha + ) numpy_output = data_beta * data_input + data_alpha * np.dot( - data_x, data_y) + data_x, data_y + ) - np.testing.assert_allclose(numpy_output, - paddle_output.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + numpy_output, paddle_output.numpy(), rtol=1e-05 + ) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_affine_channel_op.py b/python/paddle/fluid/tests/unittests/test_affine_channel_op.py index 4bd910022560425dfb11baa791ab47d165706d82..f09e5be5775065feb352d2d060b007201c1b125a 100644 --- a/python/paddle/fluid/tests/unittests/test_affine_channel_op.py +++ b/python/paddle/fluid/tests/unittests/test_affine_channel_op.py @@ -33,7 +33,6 @@ def affine_channel(x, scale, bias, layout): class TestAffineChannelOp(OpTest): - def setUp(self): self.op_type = "affine_channel" self.init_test_case() @@ -67,7 +66,6 @@ class TestAffineChannelOp(OpTest): class TestAffineChannelOpError(unittest.TestCase): - def test_errors(self): with fluid.program_guard(fluid.Program()): @@ -78,32 +76,31 @@ class TestAffineChannelOpError(unittest.TestCase): self.assertRaises(TypeError, test_x_type) def test_x_dtype(): - x2 = fluid.layers.data(name='x2', - shape=[None, 1, 2, 2], - dtype='int32') + x2 = fluid.layers.data( + name='x2', shape=[None, 1, 2, 2], dtype='int32' + ) fluid.layers.affine_channel(x2) self.assertRaises(TypeError, test_x_dtype) def test_scale_type(): - x3 = fluid.layers.data(name='x3', - shape=[None, 1, 2, 2], - dtype='float32') + x3 = fluid.layers.data( + name='x3', shape=[None, 1, 2, 2], dtype='float32' + ) fluid.layers.affine_channel(x3, scale=1) self.assertRaises(TypeError, test_scale_type) def test_bias_type(): - x4 = fluid.layers.data(name='x4', - shape=[None, 1, 2, 2], - dtype='float32') + x4 = fluid.layers.data( + name='x4', shape=[None, 1, 2, 2], dtype='float32' + ) fluid.layers.affine_channel(x4, bias=1) self.assertRaises(TypeError, test_bias_type) class TestAffineChannelNHWC(TestAffineChannelOp): - def init_test_case(self): self.shape = [2, 3, 3, 100] self.C = 100 @@ -117,7 +114,6 @@ class TestAffineChannelNHWC(TestAffineChannelOp): class TestAffineChannel2D(TestAffineChannelOp): - def init_test_case(self): self.shape = [2, 100] self.C = 100 @@ -131,7 +127,7 @@ class TestAffineChannel2D(TestAffineChannelOp): # TODO(qingqing): disable unit testing for large shape -#class TestAffineChannelNCHWLargeShape(TestAffineChannelOp): +# class TestAffineChannelNCHWLargeShape(TestAffineChannelOp): # def init_test_case(self): # self.shape = [4, 128, 112, 112] # self.C = 128 @@ -147,7 +143,7 @@ class TestAffineChannel2D(TestAffineChannelOp): # def test_check_grad_stopgrad_dscale_dbias(self): # pass -#class TestAffineChannelNHWCLargeShape(TestAffineChannelNCHWLargeShape): +# class TestAffineChannelNHWCLargeShape(TestAffineChannelNCHWLargeShape): # def init_test_case(self): # self.shape = [64, 32, 32, 128] # self.C = 128 diff --git a/python/paddle/fluid/tests/unittests/test_affine_grid_function.py b/python/paddle/fluid/tests/unittests/test_affine_grid_function.py index 9091a1c9b6ddd191aec737adb65da54645e9330f..cd1cecfb618ca43ed0b0837046231c078555a005 100644 --- a/python/paddle/fluid/tests/unittests/test_affine_grid_function.py +++ b/python/paddle/fluid/tests/unittests/test_affine_grid_function.py @@ -21,15 +21,16 @@ import unittest class AffineGridTestCase(unittest.TestCase): - - def __init__(self, - methodName='runTest', - theta_shape=(20, 2, 3), - output_shape=[20, 2, 5, 7], - align_corners=True, - dtype="float32", - invalid_theta=False, - variable_output_shape=False): + def __init__( + self, + methodName='runTest', + theta_shape=(20, 2, 3), + output_shape=[20, 2, 5, 7], + align_corners=True, + dtype="float32", + invalid_theta=False, + variable_output_shape=False, + ): super(AffineGridTestCase, self).__init__(methodName) self.theta_shape = theta_shape @@ -48,14 +49,14 @@ class AffineGridTestCase(unittest.TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - theta_var = fluid.data("input", - self.theta_shape, - dtype=self.dtype) + theta_var = fluid.data( + "input", self.theta_shape, dtype=self.dtype + ) y_var = fluid.layers.affine_grid(theta_var, self.output_shape) feed_dict = {"input": self.theta} exe = fluid.Executor(place) exe.run(start) - y_np, = exe.run(main, feed=feed_dict, fetch_list=[y_var]) + (y_np,) = exe.run(main, feed=feed_dict, fetch_list=[y_var]) return y_np def functional(self, place): @@ -64,28 +65,33 @@ class AffineGridTestCase(unittest.TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - theta_var = fluid.data("input", - self.theta_shape, - dtype=self.dtype) - y_var = F.affine_grid(theta_var, - self.output_shape, - align_corners=self.align_corners) + theta_var = fluid.data( + "input", self.theta_shape, dtype=self.dtype + ) + y_var = F.affine_grid( + theta_var, + self.output_shape, + align_corners=self.align_corners, + ) feed_dict = {"input": self.theta} exe = fluid.Executor(place) exe.run(start) - y_np, = exe.run(main, feed=feed_dict, fetch_list=[y_var]) + (y_np,) = exe.run(main, feed=feed_dict, fetch_list=[y_var]) return y_np def paddle_dygraph_layer(self): paddle.disable_static() - theta_var = dg.to_variable( - self.theta) if not self.invalid_theta else "invalid" - output_shape = dg.to_variable( - self.output_shape - ) if self.variable_output_shape else self.output_shape - y_var = F.affine_grid(theta_var, - output_shape, - align_corners=self.align_corners) + theta_var = ( + dg.to_variable(self.theta) if not self.invalid_theta else "invalid" + ) + output_shape = ( + dg.to_variable(self.output_shape) + if self.variable_output_shape + else self.output_shape + ) + y_var = F.affine_grid( + theta_var, output_shape, align_corners=self.align_corners + ) y_np = y_var.numpy() return y_np @@ -108,7 +114,6 @@ class AffineGridTestCase(unittest.TestCase): class AffineGridErrorTestCase(AffineGridTestCase): - def runTest(self): place = fluid.CPUPlace() with dg.guard(place): @@ -122,22 +127,26 @@ def add_cases(suite): suite.addTest(AffineGridTestCase(methodName='runTest', align_corners=False)) suite.addTest( - AffineGridTestCase(methodName='runTest', variable_output_shape=True)) + AffineGridTestCase(methodName='runTest', variable_output_shape=True) + ) suite.addTest( - AffineGridTestCase(methodName='runTest', - theta_shape=(20, 2, 3), - output_shape=[20, 1, 7, 7], - align_corners=True)) + AffineGridTestCase( + methodName='runTest', + theta_shape=(20, 2, 3), + output_shape=[20, 1, 7, 7], + align_corners=True, + ) + ) def add_error_cases(suite): suite.addTest( - AffineGridErrorTestCase(methodName='runTest', output_shape="not_valid")) + AffineGridErrorTestCase(methodName='runTest', output_shape="not_valid") + ) suite.addTest( - AffineGridErrorTestCase( - methodName='runTest', - invalid_theta=True)) # to test theta not variable error checking + AffineGridErrorTestCase(methodName='runTest', invalid_theta=True) + ) # to test theta not variable error checking def load_tests(loader, standard_tests, pattern): diff --git a/python/paddle/fluid/tests/unittests/test_affine_grid_op.py b/python/paddle/fluid/tests/unittests/test_affine_grid_op.py index f8c0dedc18423cfac5a52f9816e53fe7001383f5..096b4fe12777dd4cbe8dd2fad4413acb816b57af 100644 --- a/python/paddle/fluid/tests/unittests/test_affine_grid_op.py +++ b/python/paddle/fluid/tests/unittests/test_affine_grid_op.py @@ -26,12 +26,21 @@ def AffineGrid4D(theta, size, align_corners): if not align_corners: h_factor = (h - 1) / float(h) w_factor = (w - 1) / float(w) - h_idx = np.repeat(np.linspace(-1, 1, h)[np.newaxis, :], w, - axis=0).T[:, :, np.newaxis] * h_factor - w_idx = np.repeat(np.linspace(-1, 1, w)[np.newaxis, :], h, - axis=0)[:, :, np.newaxis] * w_factor - grid = np.concatenate([w_idx, h_idx, np.ones([h, w, 1])], - axis=2) # h * w * 3 + h_idx = ( + np.repeat(np.linspace(-1, 1, h)[np.newaxis, :], w, axis=0).T[ + :, :, np.newaxis + ] + * h_factor + ) + w_idx = ( + np.repeat(np.linspace(-1, 1, w)[np.newaxis, :], h, axis=0)[ + :, :, np.newaxis + ] + * w_factor + ) + grid = np.concatenate( + [w_idx, h_idx, np.ones([h, w, 1])], axis=2 + ) # h * w * 3 grid = np.repeat(grid[np.newaxis, :], size[0], axis=0) # n * h * w *3 ret = np.zeros([n, h * w, 2]) @@ -51,20 +60,39 @@ def AffineGrid5D(theta, size, align_corners): d_factor = (d - 1) / float(d) h_factor = (h - 1) / float(h) w_factor = (w - 1) / float(w) - d_idx = np.repeat(np.repeat( - np.linspace(-1, 1, d)[:, np.newaxis, np.newaxis], h, axis=1), - w, - axis=2)[:, :, :, np.newaxis] * d_factor - h_idx = np.repeat(np.repeat( - np.linspace(-1, 1, h)[np.newaxis, :, np.newaxis], w, axis=2), - d, - axis=0)[:, :, :, np.newaxis] * h_factor - w_idx = np.repeat(np.repeat( - np.linspace(-1, 1, w)[np.newaxis, np.newaxis, :], h, axis=1), - d, - axis=0)[:, :, :, np.newaxis] * w_factor + d_idx = ( + np.repeat( + np.repeat( + np.linspace(-1, 1, d)[:, np.newaxis, np.newaxis], h, axis=1 + ), + w, + axis=2, + )[:, :, :, np.newaxis] + * d_factor + ) + h_idx = ( + np.repeat( + np.repeat( + np.linspace(-1, 1, h)[np.newaxis, :, np.newaxis], w, axis=2 + ), + d, + axis=0, + )[:, :, :, np.newaxis] + * h_factor + ) + w_idx = ( + np.repeat( + np.repeat( + np.linspace(-1, 1, w)[np.newaxis, np.newaxis, :], h, axis=1 + ), + d, + axis=0, + )[:, :, :, np.newaxis] + * w_factor + ) grid = np.concatenate( - [w_idx, h_idx, d_idx, np.ones([d, h, w, 1])], axis=3) # d * h * w * 4 + [w_idx, h_idx, d_idx, np.ones([d, h, w, 1])], axis=3 + ) # d * h * w * 4 grid = np.repeat(grid[np.newaxis, :], size[0], axis=0) # n * d * h * w * 4 ret = np.zeros([n, d * h * w, 3]) @@ -75,7 +103,6 @@ def AffineGrid5D(theta, size, align_corners): class TestAffineGridOp(OpTest): - def setUp(self): self.initTestCase() self.op_type = "affine_grid" @@ -84,31 +111,32 @@ class TestAffineGridOp(OpTest): self.inputs = {'Theta': theta} self.attrs = { "use_cudnn": self.use_cudnn, - "align_corners": self.align_corners + "align_corners": self.align_corners, } if self.dynamic_shape: self.inputs['OutputShape'] = self.output_shape else: self.attrs['output_shape'] = self.output_shape - if (self.theta_shape[1] == 2 and self.theta_shape[2] == 3): + if self.theta_shape[1] == 2 and self.theta_shape[2] == 3: self.outputs = { - 'Output': AffineGrid4D(theta, self.output_shape, - self.align_corners) + 'Output': AffineGrid4D( + theta, self.output_shape, self.align_corners + ) } else: self.outputs = { - 'Output': AffineGrid5D(theta, self.output_shape, - self.align_corners) + 'Output': AffineGrid5D( + theta, self.output_shape, self.align_corners + ) } def test_check_output(self): self.check_output(check_eager=True) def test_check_grad_normal(self): - self.check_grad(['Theta'], - 'Output', - no_grad_set=['OutputShape'], - check_eager=True) + self.check_grad( + ['Theta'], 'Output', no_grad_set=['OutputShape'], check_eager=True + ) def initTestCase(self): self.theta_shape = (17, 2, 3) @@ -119,19 +147,19 @@ class TestAffineGridOp(OpTest): class TestAffineGridOpCase1(TestAffineGridOp): - def initTestCase(self): self.theta_shape = (20, 2, 3) self.output_shape = np.array([20, 2, 5, 7]).astype("int32") self.dynamic_shape = True self.use_cudnn = True if paddle.fluid.core.is_compiled_with_rocm(): - self.use_cudnn = False # ROCM platform do not have MIOPEN kernel for affine_grid + self.use_cudnn = ( + False # ROCM platform do not have MIOPEN kernel for affine_grid + ) self.align_corners = True class TestAffineGridOpCase2(TestAffineGridOp): - def initTestCase(self): self.theta_shape = (20, 2, 3) self.output_shape = np.array([20, 2, 5, 7]).astype("int32") @@ -141,7 +169,6 @@ class TestAffineGridOpCase2(TestAffineGridOp): class TestAffineGridOpCase3(TestAffineGridOp): - def initTestCase(self): self.theta_shape = (20, 2, 3) self.output_shape = np.array([20, 2, 5, 7]).astype("int32") @@ -151,7 +178,6 @@ class TestAffineGridOpCase3(TestAffineGridOp): class TestAffineGridOpCase4(TestAffineGridOp): - def initTestCase(self): self.theta_shape = (25, 2, 3) self.output_shape = np.array([25, 2, 5, 6]).astype("int32") @@ -161,7 +187,6 @@ class TestAffineGridOpCase4(TestAffineGridOp): class TestAffineGridOp5DCase1(TestAffineGridOp): - def initTestCase(self): self.theta_shape = (20, 3, 4) self.output_shape = np.array([20, 1, 2, 5, 7]).astype("int32") @@ -171,7 +196,6 @@ class TestAffineGridOp5DCase1(TestAffineGridOp): class TestAffineGridOp5DCase2(TestAffineGridOp): - def initTestCase(self): self.theta_shape = (20, 3, 4) self.output_shape = np.array([20, 1, 2, 5, 7]).astype("int32") @@ -181,7 +205,6 @@ class TestAffineGridOp5DCase2(TestAffineGridOp): class TestAffineGridOp5DCase3(TestAffineGridOp): - def initTestCase(self): self.theta_shape = (20, 3, 4) self.output_shape = np.array([20, 1, 2, 5, 7]).astype("int32") @@ -191,7 +214,6 @@ class TestAffineGridOp5DCase3(TestAffineGridOp): class TestAffineGridOp5DCase4(TestAffineGridOp): - def initTestCase(self): self.theta_shape = (25, 3, 4) self.output_shape = np.array([25, 1, 2, 5, 6]).astype("int32") diff --git a/python/paddle/fluid/tests/unittests/test_allclose_layer.py b/python/paddle/fluid/tests/unittests/test_allclose_layer.py index 66afbcfe20947127e27434ffa93ae1dc44de18b0..c406241c65be1d23c1a2c9adca8e05f71e73b33c 100644 --- a/python/paddle/fluid/tests/unittests/test_allclose_layer.py +++ b/python/paddle/fluid/tests/unittests/test_allclose_layer.py @@ -20,68 +20,53 @@ from paddle.fluid.framework import _test_eager_guard class TestAllcloseLayer(unittest.TestCase): - def allclose_check(self, use_cuda, dtype='float32'): a = fluid.data(name="a", shape=[2], dtype=dtype) b = fluid.data(name="b", shape=[2], dtype=dtype) - result = paddle.allclose(a, - b, - rtol=1e-05, - atol=1e-08, - equal_nan=False, - name="ignore_nan") - result_nan = paddle.allclose(a, - b, - rtol=1e-05, - atol=1e-08, - equal_nan=True, - name="equal_nan") - result_corner = paddle.allclose(a, - b, - rtol=0.01, - atol=0.0, - name="corner_case") + result = paddle.allclose( + a, b, rtol=1e-05, atol=1e-08, equal_nan=False, name="ignore_nan" + ) + result_nan = paddle.allclose( + a, b, rtol=1e-05, atol=1e-08, equal_nan=True, name="equal_nan" + ) + result_corner = paddle.allclose( + a, b, rtol=0.01, atol=0.0, name="corner_case" + ) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - x = np.array([10000., 1e-07]).astype(dtype) + x = np.array([10000.0, 1e-07]).astype(dtype) y = np.array([10000.1, 1e-08]).astype(dtype) - result_v, result_nan_v = exe.run(feed={ - 'a': x, - 'b': y - }, - fetch_list=[result, result_nan]) + result_v, result_nan_v = exe.run( + feed={'a': x, 'b': y}, fetch_list=[result, result_nan] + ) self.assertEqual(result_v[0], False) self.assertEqual(result_nan_v[0], False) - x = np.array([10000., 1e-08]).astype(dtype) + x = np.array([10000.0, 1e-08]).astype(dtype) y = np.array([10000.1, 1e-09]).astype(dtype) - result_v, result_nan_v = exe.run(feed={ - 'a': x, - 'b': y - }, - fetch_list=[result, result_nan]) + result_v, result_nan_v = exe.run( + feed={'a': x, 'b': y}, fetch_list=[result, result_nan] + ) self.assertEqual(result_v[0], True) self.assertEqual(result_nan_v[0], True) x = np.array([1.0, float('nan')]).astype(dtype) y = np.array([1.0, float('nan')]).astype(dtype) - result_v, result_nan_v = exe.run(feed={ - 'a': x, - 'b': y - }, - fetch_list=[result, result_nan]) + result_v, result_nan_v = exe.run( + feed={'a': x, 'b': y}, fetch_list=[result, result_nan] + ) self.assertEqual(result_v[0], False) self.assertEqual(result_nan_v[0], True) # for corner case x = np.array([10.1, 10.1]).astype(dtype) y = np.array([10, 10]).astype(dtype) - result_c, = exe.run(feed={'a': x, 'b': y}, fetch_list=[result_corner]) - corner_res = (dtype == 'float64') + (result_c,) = exe.run(feed={'a': x, 'b': y}, fetch_list=[result_corner]) + corner_res = dtype == 'float64' self.assertEqual(result_c[0], corner_res) def test_allclose_cpu_fp32(self): @@ -115,9 +100,9 @@ class TestAllcloseLayer(unittest.TestCase): self.allclose_check(use_cuda=True, dtype='float64') def func_dygraph_mode(self): - x_1 = np.array([10000., 1e-07]).astype("float32") + x_1 = np.array([10000.0, 1e-07]).astype("float32") y_1 = np.array([10000.1, 1e-08]).astype("float32") - x_2 = np.array([10000., 1e-08]).astype("float32") + x_2 = np.array([10000.0, 1e-08]).astype("float32") y_2 = np.array([10000.1, 1e-09]).astype("float32") x_3 = np.array([1.0, float('nan')]).astype("float32") y_3 = np.array([1.0, float('nan')]).astype("float32") @@ -129,68 +114,76 @@ class TestAllcloseLayer(unittest.TestCase): with fluid.dygraph.guard(): x_v_1 = paddle.to_tensor(x_1) y_v_1 = paddle.to_tensor(y_1) - ret_1 = paddle.allclose(x_v_1, - y_v_1, - rtol=1e-05, - atol=1e-08, - equal_nan=False, - name='test_1') + ret_1 = paddle.allclose( + x_v_1, + y_v_1, + rtol=1e-05, + atol=1e-08, + equal_nan=False, + name='test_1', + ) self.assertEqual(ret_1.numpy()[0], False) - ret_1 = paddle.allclose(x_v_1, - y_v_1, - rtol=1e-05, - atol=1e-08, - equal_nan=True, - name='test_2') + ret_1 = paddle.allclose( + x_v_1, + y_v_1, + rtol=1e-05, + atol=1e-08, + equal_nan=True, + name='test_2', + ) self.assertEqual(ret_1.numpy()[0], False) x_v_2 = paddle.to_tensor(x_2) y_v_2 = paddle.to_tensor(y_2) - ret_2 = paddle.allclose(x_v_2, - y_v_2, - rtol=1e-05, - atol=1e-08, - equal_nan=False, - name='test_3') + ret_2 = paddle.allclose( + x_v_2, + y_v_2, + rtol=1e-05, + atol=1e-08, + equal_nan=False, + name='test_3', + ) self.assertEqual(ret_2.numpy()[0], True) - ret_2 = paddle.allclose(x_v_2, - y_v_2, - rtol=1e-05, - atol=1e-08, - equal_nan=True, - name='test_4') + ret_2 = paddle.allclose( + x_v_2, + y_v_2, + rtol=1e-05, + atol=1e-08, + equal_nan=True, + name='test_4', + ) self.assertEqual(ret_2.numpy()[0], True) x_v_3 = paddle.to_tensor(x_3) y_v_3 = paddle.to_tensor(y_3) - ret_3 = paddle.allclose(x_v_3, - y_v_3, - rtol=1e-05, - atol=1e-08, - equal_nan=False, - name='test_5') + ret_3 = paddle.allclose( + x_v_3, + y_v_3, + rtol=1e-05, + atol=1e-08, + equal_nan=False, + name='test_5', + ) self.assertEqual(ret_3.numpy()[0], False) - ret_3 = paddle.allclose(x_v_3, - y_v_3, - rtol=1e-05, - atol=1e-08, - equal_nan=True, - name='test_6') + ret_3 = paddle.allclose( + x_v_3, + y_v_3, + rtol=1e-05, + atol=1e-08, + equal_nan=True, + name='test_6', + ) self.assertEqual(ret_3.numpy()[0], True) # for corner case x_v_4 = paddle.to_tensor(x_4) y_v_4 = paddle.to_tensor(y_4) - ret_4 = paddle.allclose(x_v_4, - y_v_4, - rtol=0.01, - atol=0.0, - name='test_7') + ret_4 = paddle.allclose( + x_v_4, y_v_4, rtol=0.01, atol=0.0, name='test_7' + ) self.assertEqual(ret_4.numpy()[0], False) x_v_5 = paddle.to_tensor(x_5) y_v_5 = paddle.to_tensor(y_5) - ret_5 = paddle.allclose(x_v_5, - y_v_5, - rtol=0.015, - atol=0.0, - name='test_8') + ret_5 = paddle.allclose( + x_v_5, y_v_5, rtol=0.015, atol=0.0, name='test_8' + ) self.assertEqual(ret_5.numpy()[0], True) def test_dygraph_mode(self): diff --git a/python/paddle/fluid/tests/unittests/test_allclose_op.py b/python/paddle/fluid/tests/unittests/test_allclose_op.py index 26351abe802dce1df013c1992bb30f7cc77fbb32..0eb47aa4441b90be0d396212c7312035a4959c31 100644 --- a/python/paddle/fluid/tests/unittests/test_allclose_op.py +++ b/python/paddle/fluid/tests/unittests/test_allclose_op.py @@ -19,9 +19,8 @@ import paddle class TestAllcloseOp(OpTest): - def set_args(self): - self.input = np.array([10000., 1e-07]).astype("float32") + self.input = np.array([10000.0, 1e-07]).astype("float32") self.other = np.array([10000.1, 1e-08]).astype("float32") self.rtol = np.array([1e-05]).astype("float64") self.atol = np.array([1e-08]).astype("float64") @@ -35,18 +34,21 @@ class TestAllcloseOp(OpTest): 'Input': self.input, 'Other': self.other, "Rtol": self.rtol, - "Atol": self.atol + "Atol": self.atol, } self.attrs = {'equal_nan': self.equal_nan} self.outputs = { - 'Out': - np.array([ - np.allclose(self.inputs['Input'], - self.inputs['Other'], - rtol=self.rtol, - atol=self.atol, - equal_nan=self.equal_nan) - ]) + 'Out': np.array( + [ + np.allclose( + self.inputs['Input'], + self.inputs['Other'], + rtol=self.rtol, + atol=self.atol, + equal_nan=self.equal_nan, + ) + ] + ) } def test_check_output(self): @@ -54,9 +56,7 @@ class TestAllcloseOp(OpTest): class TestAllcloseOpException(TestAllcloseOp): - def test_check_output(self): - def test_rtol_num(): self.inputs['Rtol'] = np.array([1e-05, 1e-05]).astype("float64") self.inputs['Atol'] = np.array([1e-08]).astype("float64") @@ -87,9 +87,8 @@ class TestAllcloseOpException(TestAllcloseOp): class TestAllcloseOpSmallNum(TestAllcloseOp): - def set_args(self): - self.input = np.array([10000., 1e-08]).astype("float32") + self.input = np.array([10000.0, 1e-08]).astype("float32") self.other = np.array([10000.1, 1e-09]).astype("float32") self.rtol = np.array([1e-05]).astype("float64") self.atol = np.array([1e-08]).astype("float64") @@ -97,7 +96,6 @@ class TestAllcloseOpSmallNum(TestAllcloseOp): class TestAllcloseOpNanFalse(TestAllcloseOp): - def set_args(self): self.input = np.array([1.0, float('nan')]).astype("float32") self.other = np.array([1.0, float('nan')]).astype("float32") @@ -107,7 +105,6 @@ class TestAllcloseOpNanFalse(TestAllcloseOp): class TestAllcloseOpNanTrue(TestAllcloseOp): - def set_args(self): self.input = np.array([1.0, float('nan')]).astype("float32") self.other = np.array([1.0, float('nan')]).astype("float32") @@ -117,7 +114,6 @@ class TestAllcloseOpNanTrue(TestAllcloseOp): class TestAllcloseDygraph(unittest.TestCase): - def test_api_case(self): paddle.disable_static() x_data = np.random.rand(10, 10) @@ -131,12 +127,11 @@ class TestAllcloseDygraph(unittest.TestCase): class TestAllcloseError(unittest.TestCase): - def test_input_dtype(self): - def test_x_dtype(): - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float16') y = paddle.fluid.data(name='y', shape=[10, 10], dtype='float64') result = paddle.allclose(x, y) @@ -144,8 +139,9 @@ class TestAllcloseError(unittest.TestCase): self.assertRaises(TypeError, test_x_dtype) def test_y_dtype(): - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64') y = paddle.fluid.data(name='y', shape=[10, 10], dtype='int32') result = paddle.allclose(x, y) @@ -173,7 +169,6 @@ class TestAllcloseError(unittest.TestCase): class TestAllcloseOpFloat32(TestAllcloseOp): - def set_args(self): self.input = np.array([10.1]).astype("float32") self.other = np.array([10]).astype("float32") @@ -183,7 +178,6 @@ class TestAllcloseOpFloat32(TestAllcloseOp): class TestAllcloseOpFloat64(TestAllcloseOp): - def set_args(self): self.input = np.array([10.1]).astype("float64") self.other = np.array([10]).astype("float64") @@ -193,7 +187,6 @@ class TestAllcloseOpFloat64(TestAllcloseOp): class TestAllcloseOpLargeDimInput(TestAllcloseOp): - def set_args(self): self.input = np.array(np.zeros([2048, 1024])).astype("float64") self.other = np.array(np.zeros([2048, 1024])).astype("float64") diff --git a/python/paddle/fluid/tests/unittests/test_allgather.py b/python/paddle/fluid/tests/unittests/test_allgather.py index 90da7128dcf436ea911bb160091e85c26c1ad54d..89441f394cff744d6e56e529ae942c320e0c91a3 100644 --- a/python/paddle/fluid/tests/unittests/test_allgather.py +++ b/python/paddle/fluid/tests/unittests/test_allgather.py @@ -21,7 +21,6 @@ paddle.enable_static() class TestAllGatherOp(TestDistBase): - def _setup_config(self): pass diff --git a/python/paddle/fluid/tests/unittests/test_amp_check_finite_and_scale_op.py b/python/paddle/fluid/tests/unittests/test_amp_check_finite_and_scale_op.py index b44c294f3acb6f85e85c3cb0c92dfdcadf6a0431..7101d3439f64298897ec5e7bb4b68a04240edd30 100644 --- a/python/paddle/fluid/tests/unittests/test_amp_check_finite_and_scale_op.py +++ b/python/paddle/fluid/tests/unittests/test_amp_check_finite_and_scale_op.py @@ -24,7 +24,6 @@ def check_finite_and_unscale_wrapper(x, scale): class TestCheckFiniteAndUnscaleOp(OpTest): - def setUp(self): self.op_type = "check_finite_and_unscale" self.python_api = check_finite_and_unscale_wrapper @@ -47,7 +46,6 @@ class TestCheckFiniteAndUnscaleOp(OpTest): class TestCheckFiniteAndUnscaleOpWithNan(OpTest): - def setUp(self): self.op_type = "check_finite_and_unscale" self.init_dtype() @@ -73,7 +71,6 @@ class TestCheckFiniteAndUnscaleOpWithNan(OpTest): class TestCheckFiniteAndUnscaleOpWithInf(OpTest): - def setUp(self): self.op_type = "check_finite_and_unscale" self.init_dtype() diff --git a/python/paddle/fluid/tests/unittests/test_anchor_generator_op.py b/python/paddle/fluid/tests/unittests/test_anchor_generator_op.py index b5ce42ee7582c4c3988ef12085bb1596add2c89d..3766dc95ab93e17989716d1eb812095edd69b6a3 100644 --- a/python/paddle/fluid/tests/unittests/test_anchor_generator_op.py +++ b/python/paddle/fluid/tests/unittests/test_anchor_generator_op.py @@ -17,8 +17,9 @@ import numpy as np from op_test import OpTest -def anchor_generator_in_python(input_feat, anchor_sizes, aspect_ratios, - variances, stride, offset): +def anchor_generator_in_python( + input_feat, anchor_sizes, aspect_ratios, variances, stride, offset +): num_anchors = len(aspect_ratios) * len(anchor_sizes) layer_h = input_feat.shape[2] layer_w = input_feat.shape[3] @@ -42,11 +43,12 @@ def anchor_generator_in_python(input_feat, anchor_sizes, aspect_ratios, scale_h = anchor_size / stride[1] w = scale_w * base_w h = scale_h * base_h - out_anchors[h_idx, w_idx, - idx, :] = [(x_ctr - 0.5 * (w - 1)), - (y_ctr - 0.5 * (h - 1)), - (x_ctr + 0.5 * (w - 1)), - (y_ctr + 0.5 * (h - 1))] + out_anchors[h_idx, w_idx, idx, :] = [ + (x_ctr - 0.5 * (w - 1)), + (y_ctr - 0.5 * (h - 1)), + (x_ctr + 0.5 * (w - 1)), + (y_ctr + 0.5 * (h - 1)), + ] idx += 1 # set the variance. @@ -57,7 +59,6 @@ def anchor_generator_in_python(input_feat, anchor_sizes, aspect_ratios, class TestAnchorGeneratorOp(OpTest): - def set_data(self): self.init_test_params() self.init_test_input() @@ -87,9 +88,9 @@ class TestAnchorGeneratorOp(OpTest): self.layer_h = 2 self.layer_w = 2 - self.anchor_sizes = [64., 128., 256., 512.] - self.aspect_ratios = [0.5, 1., 2.] - self.stride = [16., 16.] + self.anchor_sizes = [64.0, 128.0, 256.0, 512.0] + self.aspect_ratios = [0.5, 1.0, 2.0] + self.stride = [16.0, 16.0] self.offset = 0.5 @@ -97,13 +98,18 @@ class TestAnchorGeneratorOp(OpTest): def init_test_input(self): self.input = np.random.random( - (self.batch_size, self.input_channels, self.layer_h, - self.layer_w)).astype('float32') + (self.batch_size, self.input_channels, self.layer_h, self.layer_w) + ).astype('float32') def init_test_output(self): self.out_anchors, self.out_var = anchor_generator_in_python( - self.input, self.anchor_sizes, self.aspect_ratios, self.variances, - self.stride, self.offset) + self.input, + self.anchor_sizes, + self.aspect_ratios, + self.variances, + self.stride, + self.offset, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_angle_op.py b/python/paddle/fluid/tests/unittests/test_angle_op.py index 9d4282150105c4cfff11185dbbeb745b18f62eed..767a274ad322482a4afba28a30cb8aaa8e2fc50b 100644 --- a/python/paddle/fluid/tests/unittests/test_angle_op.py +++ b/python/paddle/fluid/tests/unittests/test_angle_op.py @@ -29,7 +29,7 @@ def angle_grad(x, dout): def angle_grad_element(xi, douti): if xi == 0: return 0 - rsquare = np.abs(xi)**2 + rsquare = np.abs(xi) ** 2 return -douti * xi.imag / rsquare + 1j * douti * xi.real / rsquare return np.vectorize(angle_grad_element)(x, dout) @@ -38,7 +38,6 @@ def angle_grad(x, dout): class TestAngleOpFloat(OpTest): - def setUp(self): self.op_type = "angle" self.python_api = paddle.angle @@ -52,17 +51,17 @@ class TestAngleOpFloat(OpTest): self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X'], - 'Out', - user_defined_grads=[ - angle_grad(self.x, - np.ones_like(self.x) / self.x.size) - ], - check_eager=True) + self.check_grad( + ['X'], + 'Out', + user_defined_grads=[ + angle_grad(self.x, np.ones_like(self.x) / self.x.size) + ], + check_eager=True, + ) class TestAngleOpComplex(OpTest): - def setUp(self): self.op_type = "angle" self.python_api = paddle.angle @@ -78,17 +77,17 @@ class TestAngleOpComplex(OpTest): self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X'], - 'Out', - user_defined_grads=[ - angle_grad(self.x, - np.ones_like(self.x) / self.x.size) - ], - check_eager=True) + self.check_grad( + ['X'], + 'Out', + user_defined_grads=[ + angle_grad(self.x, np.ones_like(self.x) / self.x.size) + ], + check_eager=True, + ) class TestAngleAPI(unittest.TestCase): - def setUp(self): self.x = np.random.randn(2, 3) + 1j * np.random.randn(2, 3) self.out = np.angle(self.x) diff --git a/python/paddle/fluid/tests/unittests/test_apply_pass_to_program.py b/python/paddle/fluid/tests/unittests/test_apply_pass_to_program.py index 54a83d2a5ec7591b4b1a4bb3e94d8c958f658b18..fff492e9176738fcbe4c643b5ca90d03ac65f7bb 100644 --- a/python/paddle/fluid/tests/unittests/test_apply_pass_to_program.py +++ b/python/paddle/fluid/tests/unittests/test_apply_pass_to_program.py @@ -26,9 +26,9 @@ def get_resnet50_model(): main = paddle.static.Program() startup = paddle.static.Program() with paddle.static.program_guard(main, startup): - image = paddle.static.data(name="image", - shape=[None, 3, 224, 224], - dtype="float32") + image = paddle.static.data( + name="image", shape=[None, 3, 224, 224], dtype="float32" + ) label = paddle.static.data(name="label", shape=[None, 1], dtype="int64") model = resnet50() loss_fn = CrossEntropyLoss() @@ -48,7 +48,6 @@ def global_block_contains_op(program, op_type): class TestApplyPassToProgram(unittest.TestCase): - def setUp(self): paddle.enable_static() @@ -67,21 +66,23 @@ class TestApplyPassToProgram(unittest.TestCase): "size_t_attr": "size_t", "float32_attr": "float32", } - ret_attrs = _apply_pass(main, startup, "fuse_elewise_add_act_pass", - attrs, attr_types) + ret_attrs = _apply_pass( + main, startup, "fuse_elewise_add_act_pass", attrs, attr_types + ) self.assertEqual(attrs, ret_attrs) self.assertTrue(global_block_contains_op(main, fused_op)) class TestIRPassBase(unittest.TestCase): - def setUp(self): paddle.enable_static() if paddle.is_compiled_with_cuda(): - fluid.set_flags({ - 'FLAGS_cudnn_deterministic': 1, - 'FLAGS_max_inplace_grad_add': 6, - }) + fluid.set_flags( + { + 'FLAGS_cudnn_deterministic': 1, + 'FLAGS_max_inplace_grad_add': 6, + } + ) self.place = paddle.CUDAPlace(0) else: self.place = paddle.CPUPlace() @@ -104,7 +105,8 @@ class TestIRPassBase(unittest.TestCase): self.assertFalse(global_block_contains_op(main, "share_buffer")) self.assertFalse(global_block_contains_op(main, "coalesce_tensor")) self.assertFalse( - global_block_contains_op(main, "fused_elemwise_add_activation")) + global_block_contains_op(main, "fused_elemwise_add_activation") + ) adam_cnt = 0 for op in main.global_block().ops: @@ -119,7 +121,8 @@ class TestIRPassBase(unittest.TestCase): self.assertTrue(global_block_contains_op(main, "coalesce_tensor")) self.assertTrue(global_block_contains_op(main, "depend")) self.assertTrue( - global_block_contains_op(main, "fused_elemwise_add_activation")) + global_block_contains_op(main, "fused_elemwise_add_activation") + ) share_dims_cnt = 0 non_share_dims_cnt = 0 @@ -167,8 +170,9 @@ class TestIRPassBase(unittest.TestCase): setattr(build_strategy, k, v) self.check_before_applied(main2, startup2) - apply_build_strategy(main2, startup2, build_strategy, - {"use_cuda": self.use_cuda}) + apply_build_strategy( + main2, startup2, build_strategy, {"use_cuda": self.use_cuda} + ) self.check_after_applied(main2, startup2) image_shape = [batch_size] + list(image.shape)[1:] @@ -186,22 +190,22 @@ class TestIRPassBase(unittest.TestCase): for idx in range(batch_num): feed = { - image.name: - np.random.rand(*image_shape).astype('float32'), - label.name: - np.random.randint(low=0, - high=self.num_classes, - size=label_shape, - dtype='int64'), + image.name: np.random.rand(*image_shape).astype('float32'), + label.name: np.random.randint( + low=0, + high=self.num_classes, + size=label_shape, + dtype='int64', + ), } with paddle.static.scope_guard(scope1): - loss_value1 = self.executor.run(main1, - feed=feed, - fetch_list=[loss1])[0] + loss_value1 = self.executor.run( + main1, feed=feed, fetch_list=[loss1] + )[0] with paddle.static.scope_guard(scope2): - loss_value2 = self.executor.run(main2, - feed=feed, - fetch_list=[loss2])[0] + loss_value2 = self.executor.run( + main2, feed=feed, fetch_list=[loss2] + )[0] self.assertEqual(loss_value1, loss_value2, "batch {}".format(idx)) diff --git a/python/paddle/fluid/tests/unittests/test_arange.py b/python/paddle/fluid/tests/unittests/test_arange.py index 277b6d67d94d9d13bb794ed4b5025ef636e37870..a7c5c4231d44de9d6ffab654369cae24c5eb105e 100644 --- a/python/paddle/fluid/tests/unittests/test_arange.py +++ b/python/paddle/fluid/tests/unittests/test_arange.py @@ -21,20 +21,19 @@ from op_test import OpTest class TestArangeOp(OpTest): - def setUp(self): self.op_type = "range" self.init_config() self.inputs = { 'Start': np.array([self.case[0]]).astype(self.dtype), 'End': np.array([self.case[1]]).astype(self.dtype), - 'Step': np.array([self.case[2]]).astype(self.dtype) + 'Step': np.array([self.case[2]]).astype(self.dtype), } self.outputs = { - 'Out': - np.arange(self.case[0], self.case[1], - self.case[2]).astype(self.dtype) + 'Out': np.arange(self.case[0], self.case[1], self.case[2]).astype( + self.dtype + ) } def init_config(self): @@ -46,48 +45,45 @@ class TestArangeOp(OpTest): class TestFloatArangeOp(TestArangeOp): - def init_config(self): self.dtype = np.float32 self.case = (0, 5, 1) class TestInt32ArangeOp(TestArangeOp): - def init_config(self): self.dtype = np.int32 self.case = (0, 5, 2) class TestFloat64ArangeOp(TestArangeOp): - def init_config(self): self.dtype = np.float64 self.case = (10, 1, -2) class TestInt64ArangeOp(TestArangeOp): - def init_config(self): self.dtype = np.int64 self.case = (-1, -10, -2) class TestArangeOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): self.assertRaises(TypeError, paddle.arange, 10, dtype='int8') class TestArangeAPI(unittest.TestCase): - def test_out(self): with program_guard(Program(), Program()): x1 = paddle.arange(0, 5, 1, 'float32') - place = paddle.CUDAPlace( - 0) if core.is_compiled_with_cuda() else paddle.CPUPlace() + place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() + else paddle.CPUPlace() + ) exe = paddle.static.Executor(place) out = exe.run(fetch_list=[x1]) @@ -96,10 +92,12 @@ class TestArangeAPI(unittest.TestCase): class TestArangeImperative(unittest.TestCase): - def test_out(self): - place = paddle.CUDAPlace( - 0) if core.is_compiled_with_cuda() else paddle.CPUPlace() + place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() + else paddle.CPUPlace() + ) paddle.disable_static(place) x1 = paddle.arange(0, 5, 1) x2 = paddle.tensor.arange(5) diff --git a/python/paddle/fluid/tests/unittests/test_arg_min_max_op.py b/python/paddle/fluid/tests/unittests/test_arg_min_max_op.py index 05e38f4706e04853790af323315f2063a96b7e9d..6c9d09bbab9df103e4df8dc6dfb6594b41442fb4 100644 --- a/python/paddle/fluid/tests/unittests/test_arg_min_max_op.py +++ b/python/paddle/fluid/tests/unittests/test_arg_min_max_op.py @@ -23,7 +23,6 @@ from test_attribute_var import UnittestBase class BaseTestCase(OpTest): - def initTestCase(self): self.op_type = 'arg_min' self.dims = (3, 4, 5) @@ -45,7 +44,6 @@ class BaseTestCase(OpTest): class TestCase0(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (3, 4, 5) @@ -54,7 +52,6 @@ class TestCase0(BaseTestCase): class TestCase1(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_min' self.dims = (3, 4) @@ -63,7 +60,6 @@ class TestCase1(BaseTestCase): class TestCase2(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (3, 4) @@ -71,10 +67,10 @@ class TestCase2(BaseTestCase): self.axis = 0 -@unittest.skipIf(not paddle.is_compiled_with_cuda(), - "FP16 test runs only on GPU") +@unittest.skipIf( + not paddle.is_compiled_with_cuda(), "FP16 test runs only on GPU" +) class TestCase0FP16(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (3, 4, 5) @@ -82,10 +78,10 @@ class TestCase0FP16(BaseTestCase): self.axis = 0 -@unittest.skipIf(not paddle.is_compiled_with_cuda(), - "FP16 test runs only on GPU") +@unittest.skipIf( + not paddle.is_compiled_with_cuda(), "FP16 test runs only on GPU" +) class TestCase1FP16(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_min' self.dims = (3, 4) @@ -94,7 +90,6 @@ class TestCase1FP16(BaseTestCase): class TestCase2_1(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (3, 4) @@ -103,33 +98,29 @@ class TestCase2_1(BaseTestCase): class TestCase3(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' - self.dims = (3, ) + self.dims = (3,) self.dtype = 'int64' self.axis = 0 class TestCase4(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_min' - self.dims = (1, ) + self.dims = (1,) self.dtype = 'int32' self.axis = 0 class TestCase3_(BaseTestCase): - def initTestCase(self): self.op_type = 'arg_max' - self.dims = (3, ) + self.dims = (3,) self.axis = 0 class BaseTestComplex1_1(OpTest): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (4, 5, 6) @@ -153,7 +144,6 @@ class BaseTestComplex1_1(OpTest): class BaseTestComplex1_2(OpTest): - def initTestCase(self): self.op_type = 'arg_min' self.dims = (4, 5, 6) @@ -177,7 +167,6 @@ class BaseTestComplex1_2(OpTest): class BaseTestComplex2_1(OpTest): - def initTestCase(self): self.op_type = 'arg_max' self.dims = (4, 5, 6) @@ -193,20 +182,19 @@ class BaseTestComplex2_1(OpTest): self.attrs = {'keep_dims': True} if self.op_type == "arg_min": self.outputs = { - 'Out': - np.argmin(self.x, - axis=self.axis).asdtype("int32").reshape(4, 5, 1) + 'Out': np.argmin(self.x, axis=self.axis) + .asdtype("int32") + .reshape(4, 5, 1) } else: self.outputs = { - 'Out': - np.argmax(self.x, - axis=self.axis).asdtype("int32").reshape(4, 5, 1) + 'Out': np.argmax(self.x, axis=self.axis) + .asdtype("int32") + .reshape(4, 5, 1) } class BaseTestComplex2_2(OpTest): - def initTestCase(self): self.op_type = 'arg_min' self.dims = (4, 5, 6) @@ -222,20 +210,19 @@ class BaseTestComplex2_2(OpTest): self.attrs = {'keep_dims': True} if self.op_type == "arg_min": self.outputs = { - 'Out': - np.argmin(self.x, - axis=self.axis).asdtype("int32").reshape(4, 5, 1) + 'Out': np.argmin(self.x, axis=self.axis) + .asdtype("int32") + .reshape(4, 5, 1) } else: self.outputs = { - 'Out': - np.argmax(self.x, - axis=self.axis).asdtype("int32").reshape(4, 5, 1) + 'Out': np.argmax(self.x, axis=self.axis) + .asdtype("int32") + .reshape(4, 5, 1) } class TestArgMaxTensorAxis(UnittestBase): - def init_info(self): self.shapes = [[2, 3, 4]] self.x = [np.random.randn(*shape) for shape in self.shapes] @@ -259,8 +246,9 @@ class TestArgMaxTensorAxis(UnittestBase): exe = paddle.static.Executor() exe.run(starup_prog) res = exe.run(fetch_list=[feat, out]) - paddle.static.save_inference_model(self.save_path, [x], [feat, out], - exe) + paddle.static.save_inference_model( + self.save_path, [x], [feat, out], exe + ) gt = np.argmax(res[0], 0) np.testing.assert_allclose(res[1], gt) @@ -282,7 +270,6 @@ class TestArgMaxTensorAxis(UnittestBase): class TestArgMinTensorAxis(TestArgMaxTensorAxis): - def test_static(self): main_prog = Program() starup_prog = Program() @@ -301,8 +288,9 @@ class TestArgMinTensorAxis(TestArgMaxTensorAxis): exe = paddle.static.Executor() exe.run(starup_prog) res = exe.run(fetch_list=[feat, out]) - paddle.static.save_inference_model(self.save_path, [x], [feat, out], - exe) + paddle.static.save_inference_model( + self.save_path, [x], [feat, out], exe + ) gt = np.argmin(res[0], 1) np.testing.assert_allclose(np.squeeze(res[1]), gt) diff --git a/python/paddle/fluid/tests/unittests/test_arg_min_max_v2_op.py b/python/paddle/fluid/tests/unittests/test_arg_min_max_v2_op.py index b59a4d169982b6dd4e8533f5328ffc4e17fbfbe4..a6f3fee21095d5871287f49cbb774282d0d6e794 100644 --- a/python/paddle/fluid/tests/unittests/test_arg_min_max_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_arg_min_max_v2_op.py @@ -22,9 +22,7 @@ from paddle.fluid import Program, program_guard def create_kernel_case(op_type, numpy_op_type): - class ArgMinMaxKernelBaseCase(OpTest): - def initTestCase(self): self.op_type = op_type self.numpy_op_type = numpy_op_type @@ -35,7 +33,7 @@ def create_kernel_case(op_type, numpy_op_type): self.initTestCase() self.dims = (4, 5, 6) self.dtype = "float64" - self.x = (1000 * np.random.random(self.dims).astype(self.dtype)) + self.x = 1000 * np.random.random(self.dims).astype(self.dtype) self.inputs = {'X': self.x} self.attrs = {"axis": self.axis} self.numpy_op = eval("np.%s" % (numpy_op_type)) @@ -46,40 +44,35 @@ def create_kernel_case(op_type, numpy_op_type): self.check_output() class ArgMinMaxKernelCase0(ArgMinMaxKernelBaseCase): - def initTestCase(self): self.op_type = op_type self.numpy_op_type = numpy_op_type self.axis = 1 class ArgMinMaxKernelCase1(ArgMinMaxKernelBaseCase): - def initTestCase(self): self.op_type = op_type self.numpy_op_type = numpy_op_type self.axis = 2 class ArgMinMaxKernelCase2(ArgMinMaxKernelBaseCase): - def initTestCase(self): self.op_type = op_type self.numpy_op_type = numpy_op_type self.axis = -1 class ArgMinMaxKernelCase3(ArgMinMaxKernelBaseCase): - def initTestCase(self): self.op_type = op_type self.numpy_op_type = numpy_op_type self.axis = -2 class ArgMinMaxKernelCase4(ArgMinMaxKernelBaseCase): - def setUp(self): self.initTestCase() self.dims = (4, 5, 6) self.dtype = "float64" - self.x = (1000 * np.random.random(self.dims).astype(self.dtype)) + self.x = 1000 * np.random.random(self.dims).astype(self.dtype) self.inputs = {'X': self.x} self.attrs = {"axis": self.axis, "keepdims": True} self.numpy_op = eval("np.%s" % (numpy_op_type)) @@ -88,12 +81,11 @@ def create_kernel_case(op_type, numpy_op_type): } class ArgMinMaxKernelCase5(ArgMinMaxKernelBaseCase): - def setUp(self): self.initTestCase() - self.dims = (4) + self.dims = 4 self.dtype = "float64" - self.x = (1000 * np.random.random(self.dims).astype(self.dtype)) + self.x = 1000 * np.random.random(self.dims).astype(self.dtype) self.inputs = {'X': self.x} self.attrs = {"axis": self.axis, "flatten": True} self.numpy_op = eval("np.%s" % (numpy_op_type)) @@ -102,12 +94,11 @@ def create_kernel_case(op_type, numpy_op_type): } class ArgMinMaxKernelCase6(ArgMinMaxKernelBaseCase): - def setUp(self): self.initTestCase() - self.dims = (4) + self.dims = 4 self.dtype = "float64" - self.x = (1000 * np.random.random(self.dims).astype(self.dtype)) + self.x = 1000 * np.random.random(self.dims).astype(self.dtype) self.inputs = {'X': self.x} self.attrs = {"axis": self.axis, "flatten": True, "keepdims": True} self.numpy_op = eval("np.%s" % (numpy_op_type)) @@ -153,9 +144,7 @@ for op_type, numpy_op_type in zip(['arg_max', 'arg_min'], ['argmax', 'argmin']): def create_test_case(op_type): - class ArgMaxMinTestCase(unittest.TestCase): - def setUp(self): np.random.seed(123) self.input_data = np.random.rand(10, 10).astype("float32") @@ -169,61 +158,67 @@ def create_test_case(op_type): def run_static(self, place): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - data_var = paddle.static.data(name="data", - shape=[10, 10], - dtype="float32") + data_var = paddle.static.data( + name="data", shape=[10, 10], dtype="float32" + ) op = eval("paddle.%s" % (op_type)) result = op(data_var) exe = paddle.static.Executor(place) - result_data = exe.run(feed={"data": self.input_data}, - fetch_list=[result]) + result_data = exe.run( + feed={"data": self.input_data}, fetch_list=[result] + ) expected_data = self.numpy_op(self.input_data) - self.assertTrue((result_data == np.array(expected_data)).all(), - True) + self.assertTrue( + (result_data == np.array(expected_data)).all(), True + ) with paddle.static.program_guard(paddle.static.Program()): - data_var = paddle.static.data(name="data", - shape=[10, 10], - dtype="float32") + data_var = paddle.static.data( + name="data", shape=[10, 10], dtype="float32" + ) op = eval("paddle.%s" % (op_type)) result = op(data_var, axis=1) exe = paddle.static.Executor(place) - result_data = exe.run(feed={"data": self.input_data}, - fetch_list=[result]) + result_data = exe.run( + feed={"data": self.input_data}, fetch_list=[result] + ) expected_data = self.numpy_op(self.input_data, axis=1) self.assertTrue((result_data == expected_data).all(), True) with paddle.static.program_guard(paddle.static.Program()): - data_var = paddle.static.data(name="data", - shape=[10, 10], - dtype="float32") + data_var = paddle.static.data( + name="data", shape=[10, 10], dtype="float32" + ) op = eval("paddle.%s" % (op_type)) result = op(data_var, axis=-1) exe = paddle.static.Executor(place) - result_data = exe.run(feed={"data": self.input_data}, - fetch_list=[result]) + result_data = exe.run( + feed={"data": self.input_data}, fetch_list=[result] + ) expected_data = self.numpy_op(self.input_data, axis=-1) self.assertTrue((result_data == expected_data).all(), True) with paddle.static.program_guard(paddle.static.Program()): - data_var = paddle.static.data(name="data", - shape=[10, 10], - dtype="float32") + data_var = paddle.static.data( + name="data", shape=[10, 10], dtype="float32" + ) op = eval("paddle.%s" % (op_type)) result = op(data_var, axis=-1, keepdim=True) exe = paddle.static.Executor(place) - result_data = exe.run(feed={"data": self.input_data}, - fetch_list=[result]) + result_data = exe.run( + feed={"data": self.input_data}, fetch_list=[result] + ) expected_data = self.numpy_op(self.input_data, axis=-1).reshape( - (10, 1)) + (10, 1) + ) self.assertTrue((result_data == expected_data).all(), True) with paddle.static.program_guard(paddle.static.Program()): op = eval("paddle.%s" % (op_type)) - data_var = paddle.static.data(name="data", - shape=[10, 10], - dtype="float32") + data_var = paddle.static.data( + name="data", shape=[10, 10], dtype="float32" + ) result = op(data_var, axis=-1, name="test_arg_api") self.assertTrue("test_arg_api" in result.name) @@ -232,28 +227,28 @@ def create_test_case(op_type): op = eval("paddle.%s" % (op_type)) data_tensor = paddle.to_tensor(self.input_data) - #case 1 + # case 1 result_data = op(data_tensor) excepted_data = self.numpy_op(self.input_data) self.assertTrue((result_data.numpy() == excepted_data).all(), True) - #case 2 + # case 2 result_data = op(data_tensor, axis=1) excepted_data = self.numpy_op(self.input_data, axis=1) self.assertTrue((result_data.numpy() == excepted_data).all(), True) - #case 3 + # case 3 result_data = op(data_tensor, axis=-1) excepted_data = self.numpy_op(self.input_data, axis=-1) self.assertTrue((result_data.numpy() == excepted_data).all(), True) - #case 4 + # case 4 result_data = op(data_tensor, axis=-1, keepdim=True) excepted_data = self.numpy_op(self.input_data, axis=-1) excepted_data = excepted_data.reshape((10, 1)) self.assertTrue((result_data.numpy() == excepted_data).all(), True) - #case 5 + # case 5 result_data = op(data_tensor, axis=-1, keepdim=True, dtype="int32") self.assertTrue(result_data.numpy().dtype == np.int32) @@ -288,7 +283,6 @@ for op_type in ['argmin', 'argmax']: class TestArgMinMaxOpError(unittest.TestCase): - def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): @@ -306,49 +300,49 @@ class TestArgMinMaxOpError(unittest.TestCase): self.assertRaises(TypeError, test_argmin_x_type) def test_argmax_attr_type(): - data = paddle.static.data(name="test_argmax", - shape=[10], - dtype="float32") + data = paddle.static.data( + name="test_argmax", shape=[10], dtype="float32" + ) output = paddle.argmax(x=data, dtype="float32") self.assertRaises(TypeError, test_argmax_attr_type) def test_argmin_attr_type(): - data = paddle.static.data(name="test_argmax", - shape=[10], - dtype="float32") + data = paddle.static.data( + name="test_argmax", shape=[10], dtype="float32" + ) output = paddle.argmin(x=data, dtype="float32") self.assertRaises(TypeError, test_argmin_attr_type) def test_argmax_axis_type(): - data = paddle.static.data(name="test_argmax", - shape=[10], - dtype="float32") + data = paddle.static.data( + name="test_argmax", shape=[10], dtype="float32" + ) output = paddle.argmax(x=data, axis=1.2) self.assertRaises(TypeError, test_argmax_axis_type) def test_argmin_axis_type(): - data = paddle.static.data(name="test_argmin", - shape=[10], - dtype="float32") + data = paddle.static.data( + name="test_argmin", shape=[10], dtype="float32" + ) output = paddle.argmin(x=data, axis=1.2) self.assertRaises(TypeError, test_argmin_axis_type) def test_argmax_dtype_type(): - data = paddle.static.data(name="test_argmax", - shape=[10], - dtype="float32") + data = paddle.static.data( + name="test_argmax", shape=[10], dtype="float32" + ) output = paddle.argmax(x=data, dtype=None) self.assertRaises(ValueError, test_argmax_dtype_type) def test_argmin_dtype_type(): - data = paddle.static.data(name="test_argmin", - shape=[10], - dtype="float32") + data = paddle.static.data( + name="test_argmin", shape=[10], dtype="float32" + ) output = paddle.argmin(x=data, dtype=None) self.assertRaises(ValueError, test_argmin_dtype_type) diff --git a/python/paddle/fluid/tests/unittests/test_argsort_op.py b/python/paddle/fluid/tests/unittests/test_argsort_op.py index d9106e26627be5ef3e24d5ced4b54d0cde035f43..d8df9fefbbd43831a4c1f7837b7826ae16637110 100644 --- a/python/paddle/fluid/tests/unittests/test_argsort_op.py +++ b/python/paddle/fluid/tests/unittests/test_argsort_op.py @@ -26,7 +26,6 @@ np.random.seed(123) class PyArgsort(object): - def __init__(self, input_shape, axis, descending, dtype): self.x = np.random.random(input_shape).astype(dtype) self.label = np.random.random(input_shape).astype(dtype) @@ -39,17 +38,21 @@ class PyArgsort(object): def forward(self): if self.descending: self.indices = np.flip( - np.argsort(self.x, kind='quicksort', axis=self.axis), self.axis) + np.argsort(self.x, kind='quicksort', axis=self.axis), self.axis + ) self.sorted_x = np.flip( - np.sort(self.x, kind='quicksort', axis=self.axis), self.axis) + np.sort(self.x, kind='quicksort', axis=self.axis), self.axis + ) else: self.indices = np.argsort(self.x, kind='quicksort', axis=self.axis) self.sorted_x = np.sort(self.x, kind='quicksort', axis=self.axis) self.loss = self.sorted_x * self.label self.loss = np.sum(self.loss) - out = (np.array(self.indices, dtype=self.indices.dtype), - np.array(self.sorted_x, dtype=self.sorted_x.dtype), - np.array([self.loss], dtype=self.loss.dtype)) + out = ( + np.array(self.indices, dtype=self.indices.dtype), + np.array(self.sorted_x, dtype=self.sorted_x.dtype), + np.array([self.loss], dtype=self.loss.dtype), + ) return out @@ -60,7 +63,6 @@ def create_tensor(np_data, place): class TestArgsortOpCPU(unittest.TestCase): - def setup_program(self): self.main_program = Program() self.startup_program = Program() @@ -76,19 +78,21 @@ class TestArgsortOpCPU(unittest.TestCase): self.feed_data_field = {"x", "label"} self.grad_data_field = {"x"} - self.py_argsort = PyArgsort(self.input_shape, self.axis, - self.descending, self.dtype) + self.py_argsort = PyArgsort( + self.input_shape, self.axis, self.descending, self.dtype + ) with fluid.program_guard(self.main_program, self.startup_program): - x = fluid.layers.data(name="x", - shape=self.input_shape, - dtype=self.dtype) + x = fluid.layers.data( + name="x", shape=self.input_shape, dtype=self.dtype + ) x.stop_gradient = False - label = fluid.layers.data(name="label", - shape=self.input_shape, - dtype=self.dtype) + label = fluid.layers.data( + name="label", shape=self.input_shape, dtype=self.dtype + ) self.sorted_x, self.index = fluid.layers.argsort( - input=x, axis=self.axis, descending=self.descending) + input=x, axis=self.axis, descending=self.descending + ) self.sorted_x.stop_gradient = False loss = fluid.layers.elementwise_mul(self.sorted_x, label) self.loss = fluid.layers.reduce_sum(loss) @@ -99,9 +103,11 @@ class TestArgsortOpCPU(unittest.TestCase): for x in self.feed_data_field } exe = Executor(self.place) - out = exe.run(self.main_program, - feed=self.feed_map, - fetch_list=[self.index, self.sorted_x, self.loss]) + out = exe.run( + self.main_program, + feed=self.feed_map, + fetch_list=[self.index, self.sorted_x, self.loss], + ) return out def backward(self): @@ -114,10 +120,12 @@ class TestArgsortOpCPU(unittest.TestCase): for x in self.grad_data_field ] exe = Executor(self.place) - out = exe.run(self.main_program, - feed=self.feed_map, - fetch_list=fetch_list, - return_numpy=False) + out = exe.run( + self.main_program, + feed=self.feed_map, + fetch_list=fetch_list, + return_numpy=False, + ) return out def test_backward(self, numeric_grad_delta=1e-5, max_relative_error=1e-7): @@ -129,23 +137,22 @@ class TestArgsortOpCPU(unittest.TestCase): ana_grad = [np.array(x) for x in self.backward()] num_grad = self.get_numerical_gradient(delta=numeric_grad_delta) - self.assert_is_close(num_grad, - ana_grad, - 'x', - max_relative_error=max_relative_error, - msg_prefix="Gradient Check On %s" % - str(self.place)) + self.assert_is_close( + num_grad, + ana_grad, + 'x', + max_relative_error=max_relative_error, + msg_prefix="Gradient Check On %s" % str(self.place), + ) def check_forward(self): pd_outputs = self.forward() py_outputs = self.py_argsort.forward() for pd_output, py_output in zip(pd_outputs, py_outputs): self.assertEqual(pd_output.shape, py_output.shape) - np.testing.assert_allclose(pd_output, - py_output, - rtol=1e-05, - atol=0, - equal_nan=False) + np.testing.assert_allclose( + pd_output, py_output, rtol=1e-05, atol=0, equal_nan=False + ) def get_numerical_gradient(self, delta=1e-7): if self.dtype == 'float16': @@ -167,8 +174,14 @@ class TestArgsortOpCPU(unittest.TestCase): return grad_list - def assert_is_close(self, numeric_grads, analytic_grads, names, - max_relative_error, msg_prefix): + def assert_is_close( + self, + numeric_grads, + analytic_grads, + names, + max_relative_error, + msg_prefix, + ): for a, b, name in zip(numeric_grads, analytic_grads, names): abs_a = np.abs(a) abs_a[abs_a < 1e-3] = 1 @@ -178,10 +191,19 @@ class TestArgsortOpCPU(unittest.TestCase): def err_msg(): offset = np.argmax(diff_mat > max_relative_error) - return ("%s error, %s variable %s max gradient diff %f over limit %f, " - "the first error element is %d, expected %f, but got %f.") \ - % ('argsort', msg_prefix, name, max_diff, max_relative_error, - offset, a.flatten()[offset], b.flatten()[offset]) + return ( + "%s error, %s variable %s max gradient diff %f over limit %f, " + "the first error element is %d, expected %f, but got %f." + ) % ( + 'argsort', + msg_prefix, + name, + max_diff, + max_relative_error, + offset, + a.flatten()[offset], + b.flatten()[offset], + ) self.assertLessEqual(max_diff, max_relative_error, err_msg()) @@ -202,7 +224,6 @@ class TestArgsortOpCPU(unittest.TestCase): class TestArgsortOpGPU(TestArgsortOpCPU): - def init_place(self): if core.is_compiled_with_cuda(): self.place = core.CUDAPlace(0) @@ -211,144 +232,120 @@ class TestArgsortOpGPU(TestArgsortOpCPU): class TestArgsortOpAxis0CPU(TestArgsortOpCPU): - def init_axis(self): self.axis = 0 class TestArgsortOpAxis0GPU(TestArgsortOpGPU): - def init_axis(self): self.axis = 0 class TestArgsortOpAxis1CPU(TestArgsortOpCPU): - def init_axis(self): self.axis = 1 class TestArgsortOpAxis1GPU(TestArgsortOpGPU): - def init_axis(self): self.axis = 1 class TestArgsortOpAxis2CPU(TestArgsortOpCPU): - def init_axis(self): self.axis = 2 class TestArgsortOpAxis2GPU(TestArgsortOpGPU): - def init_axis(self): self.axis = 2 class TestArgsortOpAxisNeg1CPU(TestArgsortOpCPU): - def init_axis(self): self.axis = -1 class TestArgsortOpAxisNeg1GPU(TestArgsortOpGPU): - def init_axis(self): self.axis = -1 class TestArgsortOpAxisNeg2CPU(TestArgsortOpCPU): - def init_axis(self): self.axis = -2 class TestArgsortOpAxisNeg2GPU(TestArgsortOpGPU): - def init_axis(self): self.axis = -2 class TestArgsortOpDescendingAxisCPU(TestArgsortOpCPU): - def init_direction(self): self.descending = True class TestArgsortOpDescendingAxisGPU(TestArgsortOpGPU): - def init_direction(self): self.descending = True class TestArgsortOpDescendingAxis0CPU(TestArgsortOpAxis0CPU): - def init_direction(self): self.descending = True class TestArgsortOpDescendingAxis0GPU(TestArgsortOpAxis0GPU): - def init_direction(self): self.descending = True class TestArgsortOpDescendingAxis1CPU(TestArgsortOpAxis1CPU): - def init_direction(self): self.descending = True class TestArgsortOpDescendingAxis1GPU(TestArgsortOpAxis1GPU): - def init_direction(self): self.descending = True class TestArgsortOpDescendingAxis2CPU(TestArgsortOpAxis2CPU): - def init_direction(self): self.descending = True class TestArgsortOpDescendingAxis2GPU(TestArgsortOpAxis2GPU): - def init_direction(self): self.descending = True class TestArgsortOpDescendingAxisNeg1CPU(TestArgsortOpAxisNeg1CPU): - def init_direction(self): self.descending = True class TestArgsortOpDescendingAxisNeg1GPU(TestArgsortOpAxisNeg1GPU): - def init_direction(self): self.descending = True class TestArgsortOpDescendingAxisNeg2CPU(TestArgsortOpAxisNeg2CPU): - def init_direction(self): self.descending = True class TestArgsortOpDescendingAxisNeg2GPU(TestArgsortOpAxisNeg2GPU): - def init_direction(self): self.descending = True class TestArgsortErrorOnCPU(unittest.TestCase): - def setUp(self): self.place = core.CPUPlace() def test_error(self): - def test_fluid_var_type(): with fluid.program_guard(fluid.Program()): x = [1] @@ -363,7 +360,6 @@ class TestArgsortErrorOnCPU(unittest.TestCase): class TestArgsortErrorOnGPU(TestArgsortErrorOnCPU): - def setUp(self): if core.is_compiled_with_cuda(): self.place = core.CUDAPlace(0) @@ -372,7 +368,6 @@ class TestArgsortErrorOnGPU(TestArgsortErrorOnCPU): class TestArgsort(unittest.TestCase): - def init(self): self.input_shape = [ 10000, @@ -389,16 +384,17 @@ class TestArgsort(unittest.TestCase): def test_api(self): with fluid.program_guard(fluid.Program()): - input = fluid.data(name="input", - shape=self.input_shape, - dtype="float64") + input = fluid.data( + name="input", shape=self.input_shape, dtype="float64" + ) output = paddle.argsort(input, axis=self.axis) output2 = paddle.argsort(input, axis=self.axis, descending=True) exe = fluid.Executor(self.place) - result, result2 = exe.run(feed={'input': self.data}, - fetch_list=[output, output2]) + result, result2 = exe.run( + feed={'input': self.data}, fetch_list=[output, output2] + ) np_result = np.argsort(self.data, axis=self.axis) self.assertEqual((result == np_result).all(), True) @@ -408,28 +404,24 @@ class TestArgsort(unittest.TestCase): class TestArgsort2(TestArgsort): - def init(self): self.input_shape = [10000, 1] self.axis = 0 class TestArgsort3(TestArgsort): - def init(self): self.input_shape = [1, 10000] self.axis = 1 class TestArgsort4(TestArgsort): - def init(self): self.input_shape = [2, 3, 4] self.axis = 1 class TestArgsortImperative(unittest.TestCase): - def init(self): self.input_shape = [ 10000, @@ -459,28 +451,24 @@ class TestArgsortImperative(unittest.TestCase): class TestArgsortImperative2(TestArgsortImperative): - def init(self): self.input_shape = [10000, 1] self.axis = 0 class TestArgsortImperative3(TestArgsortImperative): - def init(self): self.input_shape = [1, 10000] self.axis = 1 class TestArgsortImperative4(TestArgsortImperative): - def init(self): self.input_shape = [2, 3, 4] self.axis = 1 class TestArgsortWithInputNaN(unittest.TestCase): - def init(self): self.axis = 0 diff --git a/python/paddle/fluid/tests/unittests/test_array_read_write_op.py b/python/paddle/fluid/tests/unittests/test_array_read_write_op.py index 4c1c7563d8b3d5dcc9c2ae0e29154016247e8897..f5f54017d9983a9c0d420bceaeed3a002e9e6cbf 100644 --- a/python/paddle/fluid/tests/unittests/test_array_read_write_op.py +++ b/python/paddle/fluid/tests/unittests/test_array_read_write_op.py @@ -58,12 +58,11 @@ def _test_read_write(x): class TestArrayReadWrite(unittest.TestCase): - def test_read_write(self): x = [ layers.data(name='x0', shape=[100]), layers.data(name='x1', shape=[100]), - layers.data(name='x2', shape=[100]) + layers.data(name='x2', shape=[100]), ] for each_x in x: each_x.stop_gradient = False @@ -73,13 +72,11 @@ class TestArrayReadWrite(unittest.TestCase): place = core.CPUPlace() exe = Executor(place) - outs = exe.run(feed={ - 'x0': tensor, - 'x1': tensor, - 'x2': tensor - }, - fetch_list=[a_sum, x_sum], - scope=core.Scope()) + outs = exe.run( + feed={'x0': tensor, 'x1': tensor, 'x2': tensor}, + fetch_list=[a_sum, x_sum], + scope=core.Scope(), + ) self.assertEqual(outs[0], outs[1]) total_sum = layers.sums(input=[a_sum, x_sum]) @@ -88,15 +85,17 @@ class TestArrayReadWrite(unittest.TestCase): append_backward(total_sum_scaled) g_vars = list( - map(default_main_program().global_block().var, - [each_x.name + "@GRAD" for each_x in x])) + map( + default_main_program().global_block().var, + [each_x.name + "@GRAD" for each_x in x], + ) + ) g_out = [ - item.sum() for item in exe.run(feed={ - 'x0': tensor, - 'x1': tensor, - 'x2': tensor - }, - fetch_list=g_vars) + item.sum() + for item in exe.run( + feed={'x0': tensor, 'x1': tensor, 'x2': tensor}, + fetch_list=g_vars, + ) ] g_out_sum = np.array(g_out).sum() @@ -116,9 +115,11 @@ class TestArrayReadWrite(unittest.TestCase): self.assertEqual(a_sum_dygraph, x_sum_dygraph) total_sum_dygraph = layers.sums( - input=[a_sum_dygraph, x_sum_dygraph]) - total_sum_scaled_dygraph = layers.scale(x=total_sum_dygraph, - scale=1 / 6.0) + input=[a_sum_dygraph, x_sum_dygraph] + ) + total_sum_scaled_dygraph = layers.scale( + x=total_sum_dygraph, scale=1 / 6.0 + ) total_sum_scaled_dygraph.backward() g_out_dygraph = [ item._grad_ivar().numpy().sum() for item in x_dygraph @@ -129,40 +130,33 @@ class TestArrayReadWrite(unittest.TestCase): class TestArrayReadWriteOpError(unittest.TestCase): - def _test_errors(self, use_fluid_api=True): if use_fluid_api: with program_guard(Program(), Program()): x1 = np.random.randn(2, 4).astype('int32') - x2 = fluid.layers.fill_constant(shape=[1], - dtype='int32', - value=1) + x2 = fluid.layers.fill_constant( + shape=[1], dtype='int32', value=1 + ) x3 = np.random.randn(2, 4).astype('int32') - self.assertRaises(TypeError, - fluid.layers.array_read, - array=x1, - i=x2) - self.assertRaises(TypeError, - fluid.layers.array_write, - array=x1, - i=x2, - out=x3) + self.assertRaises( + TypeError, fluid.layers.array_read, array=x1, i=x2 + ) + self.assertRaises( + TypeError, fluid.layers.array_write, array=x1, i=x2, out=x3 + ) else: with program_guard(Program(), Program()): x1 = np.random.randn(2, 4).astype('int32') x2 = paddle.ones(shape=[1], dtype='int32') x3 = np.random.randn(2, 4).astype('int32') - self.assertRaises(TypeError, - paddle.tensor.array_read, - array=x1, - i=x2) - self.assertRaises(TypeError, - paddle.tensor.array_write, - array=x1, - i=x2, - out=x3) + self.assertRaises( + TypeError, paddle.tensor.array_read, array=x1, i=x2 + ) + self.assertRaises( + TypeError, paddle.tensor.array_write, array=x1, i=x2, out=x3 + ) def test_fluid_api(self): self._test_errors(use_fluid_api=True) @@ -172,7 +166,6 @@ class TestArrayReadWriteOpError(unittest.TestCase): class TestArrayReadWriteApi(unittest.TestCase): - def test_api(self): paddle.disable_static() arr = paddle.tensor.create_array(dtype="float32") diff --git a/python/paddle/fluid/tests/unittests/test_ascend_trigger.py b/python/paddle/fluid/tests/unittests/test_ascend_trigger.py index 8d8a896a424420de328133b365057b98995dea8e..16b38865593a6002b667d7382e59689719e6b801 100644 --- a/python/paddle/fluid/tests/unittests/test_ascend_trigger.py +++ b/python/paddle/fluid/tests/unittests/test_ascend_trigger.py @@ -18,7 +18,7 @@ import unittest class TestAscendTriggerOP(unittest.TestCase): - """ TestCases for ascend_trigger op""" + """TestCases for ascend_trigger op""" def test_ascend_trigger_op(self): paddle.enable_static() @@ -27,10 +27,12 @@ class TestAscendTriggerOP(unittest.TestCase): with fluid.program_guard(program): x = fluid.data(name='x', shape=[1], dtype='int64', lod_level=0) y = fluid.data(name='y', shape=[1], dtype='int64', lod_level=0) - block.append_op(type="ascend_trigger", - inputs={"FeedList": [x]}, - outputs={"FetchList": [y]}, - attrs={'graph_idx': 0}) + block.append_op( + type="ascend_trigger", + inputs={"FeedList": [x]}, + outputs={"FetchList": [y]}, + attrs={'graph_idx': 0}, + ) exe = paddle.static.Executor(paddle.CPUPlace()) try: diff --git a/python/paddle/fluid/tests/unittests/test_assert_op.py b/python/paddle/fluid/tests/unittests/test_assert_op.py index 481a58b441e555e89c57c036dd44e6a9ce0c3a68..9a91ebca5d89cf8312745a3df6dec3d4eaab0ae8 100644 --- a/python/paddle/fluid/tests/unittests/test_assert_op.py +++ b/python/paddle/fluid/tests/unittests/test_assert_op.py @@ -19,7 +19,6 @@ import unittest class TestAssertOp(unittest.TestCase): - def run_network(self, net_func): main_program = fluid.Program() startup_program = fluid.Program() @@ -29,39 +28,35 @@ class TestAssertOp(unittest.TestCase): exe.run(main_program) def test_assert_true(self): - def net_func(): - condition = layers.fill_constant(shape=[1], - dtype='bool', - value=True) + condition = layers.fill_constant( + shape=[1], dtype='bool', value=True + ) layers.Assert(condition, []) self.run_network(net_func) def test_assert_false(self): - def net_func(): - condition = layers.fill_constant(shape=[1], - dtype='bool', - value=False) + condition = layers.fill_constant( + shape=[1], dtype='bool', value=False + ) layers.Assert(condition) with self.assertRaises(ValueError): self.run_network(net_func) def test_assert_cond_numel_error(self): - def net_func(): - condition = layers.fill_constant(shape=[1, 2], - dtype='bool', - value=True) + condition = layers.fill_constant( + shape=[1, 2], dtype='bool', value=True + ) layers.Assert(condition, []) with self.assertRaises(ValueError): self.run_network(net_func) def test_assert_print_data(self): - def net_func(): zero = layers.fill_constant(shape=[1], dtype='int64', value=0) one = layers.fill_constant(shape=[1], dtype='int64', value=1) @@ -73,18 +68,16 @@ class TestAssertOp(unittest.TestCase): self.run_network(net_func) def test_assert_summary(self): - def net_func(): x = layers.fill_constant(shape=[10], dtype='float32', value=2.0) condition = layers.reduce_max(x) < 1.0 - layers.Assert(condition, (x, ), 5) + layers.Assert(condition, (x,), 5) print("test_assert_summary") with self.assertRaises(ValueError): self.run_network(net_func) def test_assert_summary_greater_than_size(self): - def net_func(): x = layers.fill_constant(shape=[2, 3], dtype='float32', value=2.0) condition = layers.reduce_max(x) < 1.0 diff --git a/python/paddle/fluid/tests/unittests/test_assign_op.py b/python/paddle/fluid/tests/unittests/test_assign_op.py index f1ae5362e085de5322f4acbde23272806f1f831c..a59d9fb6688ff113ed58b7d2a5f9b9a1116490a0 100644 --- a/python/paddle/fluid/tests/unittests/test_assign_op.py +++ b/python/paddle/fluid/tests/unittests/test_assign_op.py @@ -27,7 +27,6 @@ import paddle.fluid.layers as layers class TestAssignOp(op_test.OpTest): - def setUp(self): self.python_api = paddle.assign self.op_type = "assign" @@ -53,7 +52,6 @@ class TestAssignOp(op_test.OpTest): class TestAssignFP16Op(op_test.OpTest): - def setUp(self): self.python_api = paddle.assign self.op_type = "assign" @@ -79,7 +77,6 @@ class TestAssignFP16Op(op_test.OpTest): class TestAssignOpWithLoDTensorArray(unittest.TestCase): - def test_assign_LoDTensorArray(self): paddle.enable_static() fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) @@ -88,9 +85,9 @@ class TestAssignOpWithLoDTensorArray(unittest.TestCase): with program_guard(main_program): x = fluid.data(name='x', shape=[100, 10], dtype='float32') x.stop_gradient = False - y = fluid.layers.fill_constant(shape=[100, 10], - dtype='float32', - value=1) + y = fluid.layers.fill_constant( + shape=[100, 10], dtype='float32', value=1 + ) z = fluid.layers.elementwise_add(x=x, y=y) i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) init_array = fluid.layers.array_write(x=z, i=i) @@ -100,28 +97,33 @@ class TestAssignOpWithLoDTensorArray(unittest.TestCase): append_backward(mean) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) feed_x = np.random.random(size=(100, 10)).astype('float32') ones = np.ones((100, 10)).astype('float32') feed_add = feed_x + ones - res = exe.run(main_program, - feed={'x': feed_x}, - fetch_list=[sums.name, x.grad_name]) + res = exe.run( + main_program, + feed={'x': feed_x}, + fetch_list=[sums.name, x.grad_name], + ) np.testing.assert_allclose(res[0], feed_add, rtol=1e-05) np.testing.assert_allclose(res[1], ones / 1000.0, rtol=1e-05) paddle.disable_static() class TestAssignOpError(unittest.TestCase): - def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): # The type of input must be Variable or numpy.ndarray. - x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace() + ) self.assertRaises(TypeError, fluid.layers.assign, x1) # When the type of input is numpy.ndarray, the dtype of input must be float32, int32. x2 = np.array([[2.5, 2.5]], dtype='uint8') @@ -130,7 +132,6 @@ class TestAssignOpError(unittest.TestCase): class TestAssignOApi(unittest.TestCase): - def test_assign_LoDTensorArray(self): paddle.enable_static() main_program = Program() @@ -138,9 +139,9 @@ class TestAssignOApi(unittest.TestCase): with program_guard(main_program): x = fluid.data(name='x', shape=[100, 10], dtype='float32') x.stop_gradient = False - y = fluid.layers.fill_constant(shape=[100, 10], - dtype='float32', - value=1) + y = fluid.layers.fill_constant( + shape=[100, 10], dtype='float32', value=1 + ) z = fluid.layers.elementwise_add(x=x, y=y) i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) init_array = fluid.layers.array_write(x=z, i=i) @@ -149,15 +150,20 @@ class TestAssignOApi(unittest.TestCase): mean = paddle.mean(sums) append_backward(mean) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) feed_x = np.random.random(size=(100, 10)).astype('float32') ones = np.ones((100, 10)).astype('float32') feed_add = feed_x + ones - res = exe.run(main_program, - feed={'x': feed_x}, - fetch_list=[sums.name, x.grad_name]) + res = exe.run( + main_program, + feed={'x': feed_x}, + fetch_list=[sums.name, x.grad_name], + ) np.testing.assert_allclose(res[0], feed_add, rtol=1e-05) np.testing.assert_allclose(res[1], ones / 1000.0, rtol=1e-05) paddle.disable_static() @@ -225,23 +231,25 @@ class TestAssignOApi(unittest.TestCase): x = paddle.static.data("X", shape=[2, 3]) clone_x = paddle.clone(x) exe = paddle.static.Executor() - y_np = exe.run(paddle.static.default_main_program(), - feed={'X': x_np}, - fetch_list=[clone_x])[0] + y_np = exe.run( + paddle.static.default_main_program(), + feed={'X': x_np}, + fetch_list=[clone_x], + )[0] np.testing.assert_array_equal(y_np, x_np) paddle.disable_static() class TestAssignOpErrorApi(unittest.TestCase): - def test_errors(self): paddle.enable_static() fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) with program_guard(Program(), Program()): # The type of input must be Variable or numpy.ndarray. - x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace() + ) self.assertRaises(TypeError, paddle.assign, x1) # When the type of input is numpy.ndarray, the dtype of input must be float32, int32. x2 = np.array([[2.5, 2.5]], dtype='uint8') @@ -259,7 +267,6 @@ class TestAssignOpErrorApi(unittest.TestCase): class TestAssignDoubleGradCheck(unittest.TestCase): - def assign_wrapper(self, x): return paddle.fluid.layers.assign(x[0]) @@ -274,17 +281,13 @@ class TestAssignDoubleGradCheck(unittest.TestCase): out = paddle.fluid.layers.assign(data) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) - gradient_checker.double_grad_check([data], - out, - x_init=[data_arr], - place=place, - eps=eps) + gradient_checker.double_grad_check( + [data], out, x_init=[data_arr], place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.double_grad_check_for_dygraph(self.assign_wrapper, - [data], - out, - x_init=[data_arr], - place=place) + gradient_checker.double_grad_check_for_dygraph( + self.assign_wrapper, [data], out, x_init=[data_arr], place=place + ) def test_grad(self): paddle.enable_static() @@ -296,7 +299,6 @@ class TestAssignDoubleGradCheck(unittest.TestCase): class TestAssignTripleGradCheck(unittest.TestCase): - def assign_wrapper(self, x): return paddle.fluid.layers.assign(x[0]) @@ -311,17 +313,13 @@ class TestAssignTripleGradCheck(unittest.TestCase): out = paddle.fluid.layers.assign(data) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) - gradient_checker.triple_grad_check([data], - out, - x_init=[data_arr], - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [data], out, x_init=[data_arr], place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.triple_grad_check_for_dygraph(self.assign_wrapper, - [data], - out, - x_init=[data_arr], - place=place) + gradient_checker.triple_grad_check_for_dygraph( + self.assign_wrapper, [data], out, x_init=[data_arr], place=place + ) def test_grad(self): paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_assign_pos_op.py b/python/paddle/fluid/tests/unittests/test_assign_pos_op.py index 2440d349e0a3a8539f2c38cbf0c9fdf2bb859266..d04fecd7b0500b323349f2b17a517b54604215e8 100644 --- a/python/paddle/fluid/tests/unittests/test_assign_pos_op.py +++ b/python/paddle/fluid/tests/unittests/test_assign_pos_op.py @@ -24,7 +24,7 @@ from paddle.fluid.framework import _test_eager_guard def assign_pos(x, _cum_count): cum_count = np.copy(_cum_count) x = x.reshape(-1) - res = np.zeros((cum_count[-1], ), dtype=np.int64) + res = np.zeros((cum_count[-1],), dtype=np.int64) for i, idx in enumerate(x): p = cum_count[idx] cum_count[idx] -= 1 @@ -34,7 +34,7 @@ def assign_pos(x, _cum_count): def count(x, upper_num): - res = np.zeros((upper_num, )).astype(int) + res = np.zeros((upper_num,)).astype(int) for i in x.reshape(-1): if i >= 0 and i < len(res): res[i] += 1 @@ -62,17 +62,16 @@ def assert_allclose(res, out, cum_count): def get_redefined_allclose(cum_count): - def redefined_allclose(x, y, *args, **kwargs): return assert_allclose(x, y, cum_count) return redefined_allclose -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestAssignPosOpInt64(op_test.OpTest): - def setUp(self): x = np.random.randint(0, 16, size=(100, 2)).astype("int64") y = count(x, 16) @@ -81,7 +80,7 @@ class TestAssignPosOpInt64(op_test.OpTest): self.inputs = { 'X': x, "cum_count": cum_count, - "eff_num_len": np.array([cum_count[-1]]) + "eff_num_len": np.array([cum_count[-1]]), } self.outputs = {'Out': assign_pos(x, cum_count)} self.cum_count = cum_count @@ -91,10 +90,10 @@ class TestAssignPosOpInt64(op_test.OpTest): self.check_output_with_place(paddle.CUDAPlace(0)) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestAssignPosAPI(unittest.TestCase): - def setUp(self): self.x = np.random.randint(0, 16, size=(100, 2)).astype("int64") y = count(self.x, 16) @@ -106,16 +105,15 @@ class TestAssignPosAPI(unittest.TestCase): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): x = paddle.fluid.data('x', self.x.shape, dtype="int64") - cum_count = paddle.fluid.data('cum_count', - self.cum_count.shape, - dtype="int64") + cum_count = paddle.fluid.data( + 'cum_count', self.cum_count.shape, dtype="int64" + ) out = utils._assign_pos(x, cum_count) exe = paddle.static.Executor(self.place) - res = exe.run(feed={ - 'x': self.x, - "cum_count": self.cum_count - }, - fetch_list=[out]) + res = exe.run( + feed={'x': self.x, "cum_count": self.cum_count}, + fetch_list=[out], + ) assert_allclose(res[0], self.out, self.cum_count) def func_api_dygraph(self): diff --git a/python/paddle/fluid/tests/unittests/test_assign_value_op.py b/python/paddle/fluid/tests/unittests/test_assign_value_op.py index df3becbbd9c40303c9c3b44f172983e5d95c7d99..cac321c2a27d3230b699500a8517bae65fca6003 100644 --- a/python/paddle/fluid/tests/unittests/test_assign_value_op.py +++ b/python/paddle/fluid/tests/unittests/test_assign_value_op.py @@ -25,7 +25,6 @@ paddle.enable_static() class TestAssignValueOp(op_test.OpTest): - def setUp(self): self.op_type = "assign_value" self.inputs = {} @@ -33,7 +32,8 @@ class TestAssignValueOp(op_test.OpTest): self.init_data() self.attrs["shape"] = self.value.shape self.attrs["dtype"] = framework.convert_np_dtype_to_dtype_( - self.value.dtype) + self.value.dtype + ) self.outputs = {"Out": self.value} def init_data(self): @@ -45,35 +45,36 @@ class TestAssignValueOp(op_test.OpTest): class TestAssignValueOp2(TestAssignValueOp): - def init_data(self): self.value = np.random.random(size=(2, 5)).astype(np.int32) self.attrs["int32_values"] = [int(v) for v in self.value.flat] class TestAssignValueOp3(TestAssignValueOp): - def init_data(self): self.value = np.random.random(size=(2, 5)).astype(np.int64) self.attrs["int64_values"] = [int(v) for v in self.value.flat] class TestAssignValueOp4(TestAssignValueOp): - def init_data(self): - self.value = np.random.choice(a=[False, True], - size=(2, 5)).astype(np.bool) + self.value = np.random.choice(a=[False, True], size=(2, 5)).astype( + np.bool + ) self.attrs["bool_values"] = [int(v) for v in self.value.flat] class TestAssignApi(unittest.TestCase): - def setUp(self): self.init_dtype() self.value = (-100 + 200 * np.random.random(size=(2, 5))).astype( - self.dtype) - self.place = fluid.CUDAPlace( - 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + self.dtype + ) + self.place = ( + fluid.CUDAPlace(0) + if fluid.is_compiled_with_cuda() + else fluid.CPUPlace() + ) def init_dtype(self): self.dtype = "float32" @@ -91,25 +92,26 @@ class TestAssignApi(unittest.TestCase): class TestAssignApi2(TestAssignApi): - def init_dtype(self): self.dtype = "int32" class TestAssignApi3(TestAssignApi): - def init_dtype(self): self.dtype = "int64" class TestAssignApi4(TestAssignApi): - def setUp(self): self.init_dtype() - self.value = np.random.choice(a=[False, True], - size=(2, 5)).astype(np.bool) - self.place = fluid.CUDAPlace( - 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + self.value = np.random.choice(a=[False, True], size=(2, 5)).astype( + np.bool + ) + self.place = ( + fluid.CUDAPlace(0) + if fluid.is_compiled_with_cuda() + else fluid.CPUPlace() + ) def init_dtype(self): self.dtype = "bool" diff --git a/python/paddle/fluid/tests/unittests/test_async_ssa_graph_executor_mnist.py b/python/paddle/fluid/tests/unittests/test_async_ssa_graph_executor_mnist.py index f46d5303f52a2938d1b3037a238d7921496ef1ca..c56235cd0d12925bba33e7e25b00fb514987941e 100644 --- a/python/paddle/fluid/tests/unittests/test_async_ssa_graph_executor_mnist.py +++ b/python/paddle/fluid/tests/unittests/test_async_ssa_graph_executor_mnist.py @@ -34,21 +34,26 @@ def convolutional_neural_network(use_py_reader): capacity=64, feed_list=[img, label], iterable=False, - use_double_buffer=False) - - conv_pool_1 = fluid.nets.simple_img_conv_pool(input=img, - filter_size=5, - num_filters=20, - pool_size=2, - pool_stride=2, - act="relu") + use_double_buffer=False, + ) + + conv_pool_1 = fluid.nets.simple_img_conv_pool( + input=img, + filter_size=5, + num_filters=20, + pool_size=2, + pool_stride=2, + act="relu", + ) conv_pool_1 = fluid.layers.batch_norm(conv_pool_1) - conv_pool_2 = fluid.nets.simple_img_conv_pool(input=conv_pool_1, - filter_size=5, - num_filters=50, - pool_size=2, - pool_stride=2, - act="relu") + conv_pool_2 = fluid.nets.simple_img_conv_pool( + input=conv_pool_1, + filter_size=5, + num_filters=50, + pool_size=2, + pool_stride=2, + act="relu", + ) prediction = fluid.layers.fc(input=conv_pool_2, size=10, act='softmax') loss = fluid.layers.cross_entropy(input=prediction, label=label) @@ -65,20 +70,30 @@ def test(): place = fluid.CPUPlace() exe = fluid.Executor(place) - test_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=BATCH_SIZE) - - array, img, label, prediction, avg_loss, acc, py_reader = convolutional_neural_network( - use_py_reader=False) + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=BATCH_SIZE + ) + + ( + array, + img, + label, + prediction, + avg_loss, + acc, + py_reader, + ) = convolutional_neural_network(use_py_reader=False) feeder = fluid.DataFeeder(feed_list=[img, label], place=place) def train_test(train_test_program, train_test_feed, train_test_reader): acc_set = [] avg_loss_set = [] for test_data in train_test_reader(): - acc_np, avg_loss_np = exe.run(program=train_test_program, - feed=train_test_feed.feed(test_data), - fetch_list=[acc, avg_loss]) + acc_np, avg_loss_np = exe.run( + program=train_test_program, + feed=train_test_feed.feed(test_data), + fetch_list=[acc, avg_loss], + ) acc_set.append(float(acc_np)) avg_loss_set.append(float(avg_loss_np)) # get test acc and loss @@ -90,7 +105,8 @@ def test(): avg_loss_val, acc_val = train_test( train_test_program=fluid.default_main_program(), train_test_reader=test_reader, - train_test_feed=feeder) + train_test_feed=feeder, + ) print("Test: avg_cost: %s, acc: %s" % (avg_loss_val, acc_val)) assert acc_val > 0.96 @@ -101,17 +117,25 @@ def train(use_cuda, thread_num, cpu_num): print("paddle is not compiled with cuda, exit!") return - array, img, label, prediction, avg_loss, acc, py_reader = convolutional_neural_network( - use_py_reader=True) + ( + array, + img, + label, + prediction, + avg_loss, + acc, + py_reader, + ) = convolutional_neural_network(use_py_reader=True) print("build convolutional neural network done.") optimizer = fluid.optimizer.Adam(learning_rate=0.001) optimizer.minimize(avg_loss) print("Adam optimizer minimize done.") - train_reader = paddle.batch(paddle.reader.shuffle( - paddle.dataset.mnist.train(), buf_size=500), - batch_size=BATCH_SIZE) + train_reader = paddle.batch( + paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=500), + batch_size=BATCH_SIZE, + ) print("declared train reader done.") place = fluid.CPUPlace() @@ -133,11 +157,13 @@ def train(use_cuda, thread_num, cpu_num): exec_strategy.num_iteration_per_run = 10 main_program = fluid.default_main_program() - pe = fluid.ParallelExecutor(use_cuda=False, - loss_name=avg_loss.name, - main_program=main_program, - build_strategy=build_strategy, - exec_strategy=exec_strategy) + pe = fluid.ParallelExecutor( + use_cuda=False, + loss_name=avg_loss.name, + main_program=main_program, + build_strategy=build_strategy, + exec_strategy=exec_strategy, + ) print("declare parallel executor done.") py_reader.set_sample_list_generator(train_reader) @@ -148,15 +174,18 @@ def train(use_cuda, thread_num, cpu_num): try: while True: array_v, acc_v, prediction_v, loss_val = pe.run( - fetch_list=[array, acc, prediction, avg_loss.name]) + fetch_list=[array, acc, prediction, avg_loss.name] + ) assert numpy.allclose(array_v[0], prediction_v) == True assert numpy.allclose(array_v[1], acc_v) == True loss_val = numpy.mean(loss_val) if step % 10 == 0: - print("Pass %d, Batch %d, Cost %f, queue size %d" % - (pass_id, step, loss_val, py_reader.queue.size())) + print( + "Pass %d, Batch %d, Cost %f, queue size %d" + % (pass_id, step, loss_val, py_reader.queue.size()) + ) step += 1 except fluid.core.EOFException: print("train end pass = " + str(pass_id)) @@ -166,24 +195,33 @@ def train(use_cuda, thread_num, cpu_num): class TestAsyncSSAGraphExecutor(unittest.TestCase): - def test_check_async_ssa_exe_train(self): step_list = [] for cpu_num in [1, 2, 4]: print("run cpu_num -> " + str(cpu_num)) with fluid.scope_guard(fluid.core.Scope()): - with fluid.program_guard(main_program=fluid.Program(), - startup_program=fluid.Program()): + with fluid.program_guard( + main_program=fluid.Program(), + startup_program=fluid.Program(), + ): start_time = time.time() - step = train(use_cuda=False, - thread_num=cpu_num, - cpu_num=cpu_num) + step = train( + use_cuda=False, thread_num=cpu_num, cpu_num=cpu_num + ) end_time = time.time() step_list.append(step) - print("cpu_num -> " + str(cpu_num) + " step -> " + str(step) + - " time -> " + str(end_time - start_time)) - with fluid.program_guard(main_program=fluid.Program(), - startup_program=fluid.Program()): + print( + "cpu_num -> " + + str(cpu_num) + + " step -> " + + str(step) + + " time -> " + + str(end_time - start_time) + ) + with fluid.program_guard( + main_program=fluid.Program(), + startup_program=fluid.Program(), + ): test() assert abs(int(step_list[0] / 2) - int(step_list[1])) < 5 assert abs(int(step_list[1] / 2) - int(step_list[2])) < 5 diff --git a/python/paddle/fluid/tests/unittests/test_atan2_op.py b/python/paddle/fluid/tests/unittests/test_atan2_op.py index c0200d3c4a166b3f46c51436958cf19e3b5f62a4..b74aeb9057004abe698fe9c97ea053fad22dca21 100644 --- a/python/paddle/fluid/tests/unittests/test_atan2_op.py +++ b/python/paddle/fluid/tests/unittests/test_atan2_op.py @@ -30,7 +30,6 @@ def atan2_grad(x1, x2, dout): class TestAtan2(OpTest): - def setUp(self): self.op_type = "atan2" self.python_api = paddle.atan2 @@ -54,40 +53,39 @@ class TestAtan2(OpTest): class TestAtan2_float(TestAtan2): - def init_dtype(self): self.dtype = np.float32 def test_check_grad(self): if self.dtype not in [np.int32, np.int64]: - self.check_grad(['X1', 'X2'], - 'Out', - user_defined_grads=atan2_grad( - self.inputs['X1'], self.inputs['X2'], - 1 / self.inputs['X1'].size), - check_eager=True) + self.check_grad( + ['X1', 'X2'], + 'Out', + user_defined_grads=atan2_grad( + self.inputs['X1'], + self.inputs['X2'], + 1 / self.inputs['X1'].size, + ), + check_eager=True, + ) class TestAtan2_float16(TestAtan2_float): - def init_dtype(self): self.dtype = np.float16 class TestAtan2_int32(TestAtan2_float): - def init_dtype(self): self.dtype = np.int32 class TestAtan2_int64(TestAtan2_float): - def init_dtype(self): self.dtype = np.int64 class TestAtan2API(unittest.TestCase): - def init_dtype(self): self.dtype = 'float64' self.shape = [11, 17] @@ -118,7 +116,6 @@ class TestAtan2API(unittest.TestCase): run(place) def test_dygraph_api(self): - def run(place): paddle.disable_static(place) X1 = paddle.to_tensor(self.x1) diff --git a/python/paddle/fluid/tests/unittests/test_attention_lstm_op.py b/python/paddle/fluid/tests/unittests/test_attention_lstm_op.py index b67ab48494a0f87af30c744637decae67187940f..053912005cc1b21332c5a37f27bcaa1fb9796045 100644 --- a/python/paddle/fluid/tests/unittests/test_attention_lstm_op.py +++ b/python/paddle/fluid/tests/unittests/test_attention_lstm_op.py @@ -20,17 +20,18 @@ from test_softmax_op import stable_softmax def attention_lstm( - x, # T x M - lod, # 1 x N - h0, # N x D - c0, # N x D - fcws, # (M+D) x 1, 1x1 - fcbs, # 1 x 1, 1x1 - w, # (M+D) x 4D - b, # 1 x 4D - act_gate, - act_cell, - act_cand): + x, # T x M + lod, # 1 x N + h0, # N x D + c0, # N x D + fcws, # (M+D) x 1, 1x1 + fcbs, # 1 x 1, 1x1 + w, # (M+D) x 4D + b, # 1 x 4D + act_gate, + act_cell, + act_cand, +): T = sum(lod[0]) N = len(lod[0]) @@ -44,8 +45,9 @@ def attention_lstm( start_offset = 0 for bid in range(N): seq_len = lod[0][bid] - xi = np.copy(x[start_offset:start_offset + seq_len, :]).reshape( - seq_len, M) + xi = np.copy(x[start_offset : start_offset + seq_len, :]).reshape( + seq_len, M + ) prev_cell = np.copy(c0[bid]).reshape([1, D]) prev_hidden = np.copy(h0[bid]).reshape([1, D]) for step in range(seq_len): @@ -86,7 +88,6 @@ def attention_lstm( class TestAttentionLSTMOp(OpTest): - def set_conf(self): pass @@ -117,14 +118,24 @@ class TestAttentionLSTMOp(OpTest): fcb2 = np.random.normal(size=(1, 1)).astype('float32') # lstm weight and bias - w = np.random.normal(size=(self.M + self.D, - self.D * 4)).astype('float32') + w = np.random.normal(size=(self.M + self.D, self.D * 4)).astype( + 'float32' + ) b = np.random.normal(size=(1, self.D * 4)).astype('float32') - h, c = attention_lstm(x, self.lod, h0, c0, [fcw1, fcw2], [fcb1, fcb2], - w, b, ACTIVATION[self.act_gate], - ACTIVATION[self.act_cell], - ACTIVATION[self.act_cand]) + h, c = attention_lstm( + x, + self.lod, + h0, + c0, + [fcw1, fcw2], + [fcb1, fcb2], + w, + b, + ACTIVATION[self.act_gate], + ACTIVATION[self.act_cell], + ACTIVATION[self.act_cand], + ) self.inputs = { 'X': (x, self.lod), @@ -134,7 +145,7 @@ class TestAttentionLSTMOp(OpTest): 'AttentionScalar': fcw2, 'AttentionScalarBias': fcb2, 'LSTMWeight': w, - 'LSTMBias': b + 'LSTMBias': b, } if self.has_initial_hidden: @@ -147,7 +158,7 @@ class TestAttentionLSTMOp(OpTest): self.attrs = { 'gate_activation': self.act_gate, 'cell_activation': self.act_cell, - 'candidate_activation': self.act_cand + 'candidate_activation': self.act_cand, } def test_check_output(self): @@ -155,13 +166,11 @@ class TestAttentionLSTMOp(OpTest): class TestAttentionOpNonInit(TestAttentionLSTMOp): - def set_conf(self): self.has_initial_hidden = False class TestAttentionOpAct(TestAttentionLSTMOp): - def set_conf(self): self.M = 3 self.D = 2 @@ -171,28 +180,24 @@ class TestAttentionOpAct(TestAttentionLSTMOp): class TestAttentionOpMD1(TestAttentionLSTMOp): - def set_conf(self): self.M = 36 self.D = 8 class TestAttentionOpMD2(TestAttentionLSTMOp): - def set_conf(self): self.M = 8 self.D = 8 class TestAttentionOpMD3(TestAttentionLSTMOp): - def set_conf(self): self.M = 15 self.D = 30 class TestAttentionOpBS1(TestAttentionLSTMOp): - def set_conf(self): self.lod = [[5]] self.M = 16 @@ -200,13 +205,11 @@ class TestAttentionOpBS1(TestAttentionLSTMOp): class TestAttentionOpBS2(TestAttentionLSTMOp): - def set_conf(self): self.lod = [[3, 6]] class TestAttentionOpBS5(TestAttentionLSTMOp): - def set_conf(self): self.lod = [[3, 2, 4, 7, 5]] diff --git a/python/paddle/fluid/tests/unittests/test_attribute_var.py b/python/paddle/fluid/tests/unittests/test_attribute_var.py index 6e8e3c6675087c7df463b304bd3834a461689e05..60d202eb1b589cc42a59b867dce0c5289f9c420f 100644 --- a/python/paddle/fluid/tests/unittests/test_attribute_var.py +++ b/python/paddle/fluid/tests/unittests/test_attribute_var.py @@ -25,7 +25,6 @@ paddle.enable_static() class UnittestBase(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() self.init_info() @@ -41,8 +40,9 @@ class UnittestBase(unittest.TestCase): return type(self).__name__ def infer_prog(self): - config = paddle_infer.Config(self.save_path + '.pdmodel', - self.save_path + '.pdiparams') + config = paddle_infer.Config( + self.save_path + '.pdmodel', self.save_path + '.pdiparams' + ) predictor = paddle_infer.create_predictor(config) input_names = predictor.get_input_names() for i, shape in enumerate(self.shapes): @@ -65,7 +65,6 @@ class UnittestBase(unittest.TestCase): class TestDropout(UnittestBase): - def init_info(self): self.shapes = [[10, 10]] self.save_path = os.path.join(self.temp_dir.name, 'dropout') @@ -98,11 +97,11 @@ class TestDropout(UnittestBase): self.assertEqual( main_prog.block(0).ops[4].all_attrs()['dropout_prob'].name, - p.name) + p.name, + ) class TestTileTensorList(UnittestBase): - def init_info(self): self.shapes = [[2, 3, 4]] self.save_path = os.path.join(self.temp_dir.name, 'tile_tensors') @@ -136,7 +135,6 @@ class TestTileTensorList(UnittestBase): class TestTileTensor(UnittestBase): - def init_info(self): self.shapes = [[2, 3, 4]] self.save_path = os.path.join(self.temp_dir.name, 'tile_tensor') @@ -169,13 +167,12 @@ class TestTileTensor(UnittestBase): class TestRegiterSupportTensorInOpMaker(unittest.TestCase): - def setUp(self): self.all_protos = OpProtoHolder.instance() self.support_tensor_attrs = { 'dropout': ['dropout_prob'], 'tile': ['repeat_times'], - 'concat': ['axis'] + 'concat': ['axis'], } # Just add a op example to test not support tensor self.not_support_tensor_attrs = {'svd': ['full_matrices']} @@ -189,8 +186,9 @@ class TestRegiterSupportTensorInOpMaker(unittest.TestCase): # All Attribute not tagged with .SupportTensor() in OpMaker will return False for op_type, attr_names in self.not_support_tensor_attrs.items(): for attr_name in attr_names: - self.assertFalse(self.is_support_tensor_attr( - op_type, attr_name)) + self.assertFalse( + self.is_support_tensor_attr(op_type, attr_name) + ) def is_support_tensor_attr(self, op_type, attr_name): proto = self.all_protos.get_op_proto(op_type) diff --git a/python/paddle/fluid/tests/unittests/test_auc_op.py b/python/paddle/fluid/tests/unittests/test_auc_op.py index 56c0b1c69778aafddb35f03427d2e967dc238fea..0ea67b19b13fcf0e16d87febf21fd0357dc1b3d3 100644 --- a/python/paddle/fluid/tests/unittests/test_auc_op.py +++ b/python/paddle/fluid/tests/unittests/test_auc_op.py @@ -21,7 +21,6 @@ import paddle class TestAucOp(OpTest): - def setUp(self): self.op_type = "auc" pred = np.random.random((128, 2)).astype("float32") @@ -30,25 +29,27 @@ class TestAucOp(OpTest): slide_steps = 1 stat_pos = np.zeros( - (1 + slide_steps) * (num_thresholds + 1) + 1, ).astype("int64") + (1 + slide_steps) * (num_thresholds + 1) + 1, + ).astype("int64") stat_neg = np.zeros( - (1 + slide_steps) * (num_thresholds + 1) + 1, ).astype("int64") + (1 + slide_steps) * (num_thresholds + 1) + 1, + ).astype("int64") self.inputs = { 'Predict': pred, 'Label': labels, "StatPos": stat_pos, - "StatNeg": stat_neg + "StatNeg": stat_neg, } self.attrs = { 'curve': 'ROC', 'num_thresholds': num_thresholds, - "slide_steps": slide_steps + "slide_steps": slide_steps, } - python_auc = metrics.Auc(name="auc", - curve='ROC', - num_thresholds=num_thresholds) + python_auc = metrics.Auc( + name="auc", curve='ROC', num_thresholds=num_thresholds + ) python_auc.update(pred, labels) pos = python_auc._stat_pos * 2 @@ -58,7 +59,7 @@ class TestAucOp(OpTest): self.outputs = { 'AUC': np.array(python_auc.eval()), 'StatPosOut': np.array(pos), - 'StatNegOut': np.array(neg) + 'StatNegOut': np.array(neg), } def test_check_output(self): @@ -66,7 +67,6 @@ class TestAucOp(OpTest): class TestGlobalAucOp(OpTest): - def setUp(self): self.op_type = "auc" pred = np.random.random((128, 2)).astype("float32") @@ -81,17 +81,17 @@ class TestGlobalAucOp(OpTest): 'Predict': pred, 'Label': labels, "StatPos": stat_pos, - "StatNeg": stat_neg + "StatNeg": stat_neg, } self.attrs = { 'curve': 'ROC', 'num_thresholds': num_thresholds, - "slide_steps": slide_steps + "slide_steps": slide_steps, } - python_auc = metrics.Auc(name="auc", - curve='ROC', - num_thresholds=num_thresholds) + python_auc = metrics.Auc( + name="auc", curve='ROC', num_thresholds=num_thresholds + ) python_auc.update(pred, labels) pos = python_auc._stat_pos @@ -99,7 +99,7 @@ class TestGlobalAucOp(OpTest): self.outputs = { 'AUC': np.array(python_auc.eval()), 'StatPosOut': np.array(pos), - 'StatNegOut': np.array(neg) + 'StatNegOut': np.array(neg), } def test_check_output(self): @@ -107,17 +107,16 @@ class TestGlobalAucOp(OpTest): class TestAucAPI(unittest.TestCase): - def test_static(self): paddle.enable_static() data = paddle.static.data(name="input", shape=[-1, 1], dtype="float32") label = paddle.static.data(name="label", shape=[4], dtype="int64") - ins_tag_weight = paddle.static.data(name="ins_tag_weight", - shape=[4], - dtype="float32") - result = paddle.static.auc(input=data, - label=label, - ins_tag_weight=ins_tag_weight) + ins_tag_weight = paddle.static.data( + name="ins_tag_weight", shape=[4], dtype="float32" + ) + result = paddle.static.auc( + input=data, label=label, ins_tag_weight=ins_tag_weight + ) place = paddle.CPUPlace() exe = paddle.static.Executor(place) @@ -128,37 +127,34 @@ class TestAucAPI(unittest.TestCase): y = np.array([0, 0, 1, 0]).astype('int64') z = np.array([1, 1, 1, 1]).astype('float32') - output, = exe.run(feed={ - "input": x, - "label": y, - "ins_tag_weight": z - }, - fetch_list=[result[0]]) + (output,) = exe.run( + feed={"input": x, "label": y, "ins_tag_weight": z}, + fetch_list=[result[0]], + ) auc_np = np.array([0.66666667]).astype("float32") np.testing.assert_allclose(output, auc_np, rtol=1e-05) class TestAucOpError(unittest.TestCase): - def test_errors(self): with fluid.program_guard(fluid.Program(), fluid.Program()): def test_type1(): data1 = fluid.data(name="input1", shape=[-1, 2], dtype="int") label1 = fluid.data(name="label1", shape=[-1], dtype="int") - ins_tag_w1 = paddle.static.data(name="label1", - shape=[-1], - dtype="int") - result1 = paddle.static.auc(input=data1, - label=label1, - ins_tag_weight=ins_tag_w1) + ins_tag_w1 = paddle.static.data( + name="label1", shape=[-1], dtype="int" + ) + result1 = paddle.static.auc( + input=data1, label=label1, ins_tag_weight=ins_tag_w1 + ) self.assertRaises(TypeError, test_type1) def test_type2(): - data2 = fluid.data(name="input2", - shape=[-1, 2], - dtype="float32") + data2 = fluid.data( + name="input2", shape=[-1, 2], dtype="float32" + ) label2 = fluid.data(name="label2", shape=[-1], dtype="float32") result2 = fluid.layers.auc(input=data2, label=label2) diff --git a/python/paddle/fluid/tests/unittests/test_auc_single_pred_op.py b/python/paddle/fluid/tests/unittests/test_auc_single_pred_op.py index 28e2808be350d6d9e237846c531f726fcc11f54a..13482d0af3abaf3c7088d1c999f30cd2513f6d10 100644 --- a/python/paddle/fluid/tests/unittests/test_auc_single_pred_op.py +++ b/python/paddle/fluid/tests/unittests/test_auc_single_pred_op.py @@ -19,7 +19,6 @@ from paddle.fluid import metrics class TestAucSinglePredOp(OpTest): - def setUp(self): self.op_type = "auc" pred = np.random.random((128, 2)).astype("float32") @@ -29,25 +28,27 @@ class TestAucSinglePredOp(OpTest): slide_steps = 1 stat_pos = np.zeros( - (1 + slide_steps) * (num_thresholds + 1) + 1, ).astype("int64") + (1 + slide_steps) * (num_thresholds + 1) + 1, + ).astype("int64") stat_neg = np.zeros( - (1 + slide_steps) * (num_thresholds + 1) + 1, ).astype("int64") + (1 + slide_steps) * (num_thresholds + 1) + 1, + ).astype("int64") self.inputs = { 'Predict': pred0, 'Label': labels, "StatPos": stat_pos, - "StatNeg": stat_neg + "StatNeg": stat_neg, } self.attrs = { 'curve': 'ROC', 'num_thresholds': num_thresholds, - "slide_steps": slide_steps + "slide_steps": slide_steps, } - python_auc = metrics.Auc(name="auc", - curve='ROC', - num_thresholds=num_thresholds) + python_auc = metrics.Auc( + name="auc", curve='ROC', num_thresholds=num_thresholds + ) for i in range(128): pred[i][1] = pred[i][0] python_auc.update(pred, labels) @@ -59,7 +60,7 @@ class TestAucSinglePredOp(OpTest): self.outputs = { 'AUC': np.array(python_auc.eval()), 'StatPosOut': np.array(pos), - 'StatNegOut': np.array(neg) + 'StatNegOut': np.array(neg), } def test_check_output(self): @@ -67,7 +68,6 @@ class TestAucSinglePredOp(OpTest): class TestAucGlobalSinglePredOp(OpTest): - def setUp(self): self.op_type = "auc" pred = np.random.random((128, 2)).astype("float32") @@ -83,17 +83,17 @@ class TestAucGlobalSinglePredOp(OpTest): 'Predict': pred0, 'Label': labels, "StatPos": stat_pos, - "StatNeg": stat_neg + "StatNeg": stat_neg, } self.attrs = { 'curve': 'ROC', 'num_thresholds': num_thresholds, - "slide_steps": slide_steps + "slide_steps": slide_steps, } - python_auc = metrics.Auc(name="auc", - curve='ROC', - num_thresholds=num_thresholds) + python_auc = metrics.Auc( + name="auc", curve='ROC', num_thresholds=num_thresholds + ) for i in range(128): pred[i][1] = pred[i][0] python_auc.update(pred, labels) @@ -103,7 +103,7 @@ class TestAucGlobalSinglePredOp(OpTest): self.outputs = { 'AUC': np.array(python_auc.eval()), 'StatPosOut': np.array(pos), - 'StatNegOut': np.array(neg) + 'StatNegOut': np.array(neg), } def test_check_output(self): diff --git a/python/paddle/fluid/tests/unittests/test_auto_growth_gpu_memory_limit.py b/python/paddle/fluid/tests/unittests/test_auto_growth_gpu_memory_limit.py index 948aa58990d0c27b79ef29e49c0ee7c6b2993fa3..7f4f6c78c194556793c1608e4452ce05491b2331 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_growth_gpu_memory_limit.py +++ b/python/paddle/fluid/tests/unittests/test_auto_growth_gpu_memory_limit.py @@ -23,7 +23,6 @@ if fluid.is_compiled_with_cuda(): class TestBase(unittest.TestCase): - def setUp(self): if fluid.is_compiled_with_cuda(): self._limit = fluid.core.globals()['FLAGS_gpu_memory_limit_mb'] @@ -36,8 +35,10 @@ class TestBase(unittest.TestCase): place = fluid.CUDAPlace(0) t = fluid.LoDTensor() - t.set(np.ndarray([int(self._limit / 2), other_dim], dtype='float32'), - place) + t.set( + np.ndarray([int(self._limit / 2), other_dim], dtype='float32'), + place, + ) del t t = fluid.LoDTensor() diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_autoconvert.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_autoconvert.py index 0777f5217b573fb7430c10af7ef0a3d0cb52570e..82969299efd7de1e0745d84df474692b068281bf 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_autoconvert.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_autoconvert.py @@ -18,7 +18,6 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus class TestAutoParallelAutoConvert(TestMultipleGpus): - def test_auto_parallel_autoconvert(self): self.run_mnist_2gpu('auto_parallel_autoconvert.py') diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_cluster.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_cluster.py index df11100755ef363c9126f7887bd7aa27fca33107..a632d29f6915be718e19db586ee84c6461a4b340 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_cluster.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_cluster.py @@ -199,7 +199,6 @@ cluster_json = """ class TestAutoParallelCluster(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -207,8 +206,9 @@ class TestAutoParallelCluster(unittest.TestCase): self.temp_dir.cleanup() def test_cluster(self): - cluster_json_path = os.path.join(self.temp_dir.name, - "auto_parallel_cluster.json") + cluster_json_path = os.path.join( + self.temp_dir.name, "auto_parallel_cluster.json" + ) cluster_json_object = json.loads(cluster_json) with open(cluster_json_path, "w") as cluster_json_file: json.dump(cluster_json_object, cluster_json_file) @@ -295,8 +295,9 @@ class TestAutoParallelCluster(unittest.TestCase): self.assertEqual(device2_machine0.global_id, 2) self.assertEqual(device2_machine0.local_id, 0) self.assertEqual(device2_machine0.type, DeviceType.CPU) - self.assertEqual(device2_machine0.model, - "Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GH") + self.assertEqual( + device2_machine0.model, "Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GH" + ) self.assertAlmostEqual(device2_machine0.sp_gflops, 150) self.assertAlmostEqual(device2_machine0.dp_gflops, 75) self.assertAlmostEqual(device2_machine0.memory, 1510) @@ -401,8 +402,9 @@ class TestAutoParallelCluster(unittest.TestCase): self.assertEqual(device6_machine1.global_id, 6) self.assertEqual(device6_machine1.local_id, 0) self.assertEqual(device6_machine1.type, DeviceType.CPU) - self.assertEqual(device6_machine1.model, - "Intel(R) Xeon(R) Gold 6271C CPU @ 2.60G") + self.assertEqual( + device6_machine1.model, "Intel(R) Xeon(R) Gold 6271C CPU @ 2.60G" + ) self.assertAlmostEqual(device6_machine1.sp_gflops, 150) self.assertAlmostEqual(device6_machine1.dp_gflops, 75) self.assertAlmostEqual(device6_machine1.memory, 503) diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_completion.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_completion.py index c757e3a45afba851a9a7f106fef2ba5519a7da62..5e1659b49c1b53261fc34a04f254b61183891742 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_completion.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_completion.py @@ -33,38 +33,42 @@ _global_process_mesh2 = None class MLPLayer(nn.Layer): - - def __init__(self, - hidden_size=1024, - intermediate_size=4 * 1024, - dropout_ratio=0.1, - initializer_range=0.02): + def __init__( + self, + hidden_size=1024, + intermediate_size=4 * 1024, + dropout_ratio=0.1, + initializer_range=0.02, + ): super(MLPLayer, self).__init__() d_model = hidden_size dim_feedforward = intermediate_size weight_attr = paddle.ParamAttr( - initializer=nn.initializer.Normal(mean=0.0, std=initializer_range)) + initializer=nn.initializer.Normal(mean=0.0, std=initializer_range) + ) bias_attr = None - self.linear0 = nn.Linear(d_model, - dim_feedforward, - weight_attr, - bias_attr=bias_attr) - self.linear1 = nn.Linear(dim_feedforward, - d_model, - weight_attr, - bias_attr=bias_attr) + self.linear0 = nn.Linear( + d_model, dim_feedforward, weight_attr, bias_attr=bias_attr + ) + self.linear1 = nn.Linear( + dim_feedforward, d_model, weight_attr, bias_attr=bias_attr + ) self.norm = nn.LayerNorm(d_model, epsilon=1e-5) self.dropout = nn.Dropout(dropout_ratio, mode="upscale_in_train") def forward(self, input): if _global_parallel_strategy in ["mp", "dp_mp"]: - auto.shard_tensor(self.linear0.weight, - process_mesh=_global_process_mesh, - shard_spec=[None, "mp"]) - auto.shard_tensor(self.linear1.weight, - process_mesh=_global_process_mesh, - shard_spec=["mp", None]) + auto.shard_tensor( + self.linear0.weight, + process_mesh=_global_process_mesh, + shard_spec=[None, "mp"], + ) + auto.shard_tensor( + self.linear1.weight, + process_mesh=_global_process_mesh, + shard_spec=["mp", None], + ) out = self.norm(input) out = self.linear0(out) @@ -76,79 +80,93 @@ class MLPLayer(nn.Layer): def mlp_pretrain_forward(train_program, start_program): - with static.program_guard(train_program, - start_program), utils.unique_name.guard(): + with static.program_guard( + train_program, start_program + ), utils.unique_name.guard(): batch_size = 4 hidden_size = 1024 sequence_len = 512 - input = static.data(name="input", - shape=[batch_size, sequence_len, hidden_size], - dtype='float32') + input = static.data( + name="input", + shape=[batch_size, sequence_len, hidden_size], + dtype='float32', + ) if _global_parallel_strategy in ["dp", "dp_mp"]: - auto.shard_tensor(input, - process_mesh=_global_process_mesh, - shard_spec=["dp", None, None]) - - mlp = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - dropout_ratio=0.1, - initializer_range=0.02) + auto.shard_tensor( + input, + process_mesh=_global_process_mesh, + shard_spec=["dp", None, None], + ) + + mlp = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + dropout_ratio=0.1, + initializer_range=0.02, + ) out = mlp(input) return train_program, start_program class TestMLPAutoCompletion(unittest.TestCase): - def test_mlp_dp(self): global _global_parallel_strategy _global_parallel_strategy = "dp" global _global_process_mesh - _global_process_mesh = auto.ProcessMesh(mesh=[0, 1, 2, 3], - dim_names=["dp"]) + _global_process_mesh = auto.ProcessMesh( + mesh=[0, 1, 2, 3], dim_names=["dp"] + ) train_program = static.Program() start_program = static.Program() dist_context = DistributedContext() train_program, start_program = mlp_pretrain_forward( - train_program, start_program) + train_program, start_program + ) completer = Completer(dist_context) complete_train_program = completer.complete_forward_annotation( - train_program) + train_program + ) self.assertTrue(dist_context.validate_dist_attr_for_program()) def test_mlp_mp(self): global _global_parallel_strategy _global_parallel_strategy = "mp" global _global_process_mesh - _global_process_mesh = auto.ProcessMesh(mesh=[0, 1, 2, 3], - dim_names=["mp"]) + _global_process_mesh = auto.ProcessMesh( + mesh=[0, 1, 2, 3], dim_names=["mp"] + ) train_program = static.Program() start_program = static.Program() dist_context = DistributedContext() train_program, start_program = mlp_pretrain_forward( - train_program, start_program) + train_program, start_program + ) completer = Completer(dist_context) complete_train_program = completer.complete_forward_annotation( - train_program) + train_program + ) self.assertTrue(dist_context.validate_dist_attr_for_program()) def test_mlp_dp_mp(self): global _global_parallel_strategy _global_parallel_strategy = "dp_mp" global _global_process_mesh - _global_process_mesh = auto.ProcessMesh(mesh=[[0, 1, 2, 3], - [4, 5, 6, 7]], - dim_names=["dp", "mp"]) + _global_process_mesh = auto.ProcessMesh( + mesh=[[0, 1, 2, 3], [4, 5, 6, 7]], dim_names=["dp", "mp"] + ) train_program = static.Program() start_program = static.Program() dist_context = DistributedContext() train_program, start_program = mlp_pretrain_forward( - train_program, start_program) + train_program, start_program + ) completer = Completer(dist_context) complete_train_program = completer.complete_forward_annotation( - train_program) + train_program + ) self.assertTrue(dist_context.validate_dist_attr_for_program()) # def test_mlp_misc(self): @@ -203,14 +221,15 @@ class TestMLPAutoCompletion(unittest.TestCase): class AttentionLayer(nn.Layer): - - def __init__(self, - hidden_size=1024, - sequence_len=512, - intermediate_size=4 * 1024, - num_heads=16, - dropout_ratio=0.1, - initializer_range=0.02): + def __init__( + self, + hidden_size=1024, + sequence_len=512, + intermediate_size=4 * 1024, + num_heads=16, + dropout_ratio=0.1, + initializer_range=0.02, + ): super(AttentionLayer, self).__init__() self.hidden_size = hidden_size self.sequence_len = sequence_len @@ -219,38 +238,38 @@ class AttentionLayer(nn.Layer): self.vdim = self.embed_dim self.num_heads = num_heads self.head_dim = self.embed_dim // self.num_heads - assert self.head_dim * self.num_heads == self.embed_dim, \ - "embed_dim must be divisible by num_heads" + assert ( + self.head_dim * self.num_heads == self.embed_dim + ), "embed_dim must be divisible by num_heads" self.dropout_ratio = dropout_ratio self.initializer_range = initializer_range self.training = True self.attn_mask = None weight_attr = paddle.ParamAttr( - initializer=nn.initializer.Normal(mean=0.0, std=initializer_range)) + initializer=nn.initializer.Normal(mean=0.0, std=initializer_range) + ) bias_attr = None - self.q_proj = nn.Linear(self.embed_dim, - self.embed_dim, - weight_attr, - bias_attr=bias_attr) - self.k_proj = nn.Linear(self.kdim, - self.embed_dim, - weight_attr, - bias_attr=bias_attr) - self.v_proj = nn.Linear(self.vdim, - self.embed_dim, - weight_attr, - bias_attr=bias_attr) - self.out_proj = nn.Linear(self.embed_dim, - self.embed_dim, - weight_attr, - bias_attr=bias_attr) + self.q_proj = nn.Linear( + self.embed_dim, self.embed_dim, weight_attr, bias_attr=bias_attr + ) + self.k_proj = nn.Linear( + self.kdim, self.embed_dim, weight_attr, bias_attr=bias_attr + ) + self.v_proj = nn.Linear( + self.vdim, self.embed_dim, weight_attr, bias_attr=bias_attr + ) + self.out_proj = nn.Linear( + self.embed_dim, self.embed_dim, weight_attr, bias_attr=bias_attr + ) def forward(self, input): if _global_parallel_strategy in ["dp", "dp_mp"]: - auto.shard_tensor(input, - process_mesh=_global_process_mesh, - shard_spec=["dp", None, None]) + auto.shard_tensor( + input, + process_mesh=_global_process_mesh, + shard_spec=["dp", None, None], + ) q = self.q_proj(input) q = tensor.reshape(x=q, shape=[0, 0, self.num_heads, self.head_dim]) @@ -260,15 +279,21 @@ class AttentionLayer(nn.Layer): v = self.v_proj(input) if _global_parallel_strategy in ["mp", "dp_mp"]: - auto.shard_tensor(self.q_proj.weight, - process_mesh=_global_process_mesh, - shard_spec=[None, "mp"]) - auto.shard_tensor(self.k_proj.weight, - process_mesh=_global_process_mesh, - shard_spec=[None, "mp"]) - auto.shard_tensor(self.v_proj.weight, - process_mesh=_global_process_mesh, - shard_spec=[None, "mp"]) + auto.shard_tensor( + self.q_proj.weight, + process_mesh=_global_process_mesh, + shard_spec=[None, "mp"], + ) + auto.shard_tensor( + self.k_proj.weight, + process_mesh=_global_process_mesh, + shard_spec=[None, "mp"], + ) + auto.shard_tensor( + self.v_proj.weight, + process_mesh=_global_process_mesh, + shard_spec=[None, "mp"], + ) k = tensor.reshape(x=k, shape=[0, 0, self.num_heads, self.head_dim]) k = tensor.transpose(x=k, perm=[0, 2, 1, 3]) @@ -276,10 +301,9 @@ class AttentionLayer(nn.Layer): v = tensor.transpose(x=v, perm=[0, 2, 1, 3]) # scale dot product attention - product = layers.matmul(x=q, - y=k, - transpose_y=True, - alpha=self.head_dim**-0.5) + product = layers.matmul( + x=q, y=k, transpose_y=True, alpha=self.head_dim**-0.5 + ) if self.attn_mask is not None: product = product + self.attn_mask @@ -287,10 +311,12 @@ class AttentionLayer(nn.Layer): weights = F.softmax(product) if self.dropout_ratio: - weights = F.dropout(weights, - self.dropout_ratio, - training=self.training, - mode="upscale_in_train") + weights = F.dropout( + weights, + self.dropout_ratio, + training=self.training, + mode="upscale_in_train", + ) out = tensor.matmul(weights, v) @@ -301,98 +327,113 @@ class AttentionLayer(nn.Layer): # project to output out = self.out_proj(out) if _global_parallel_strategy in ["mp", "dp_mp"]: - auto.shard_tensor(self.out_proj.weight, - process_mesh=_global_process_mesh, - shard_spec=["mp", None]) + auto.shard_tensor( + self.out_proj.weight, + process_mesh=_global_process_mesh, + shard_spec=["mp", None], + ) return out def attn_pretrain_forward(train_program, start_program): - with static.program_guard(train_program, - start_program), utils.unique_name.guard(): + with static.program_guard( + train_program, start_program + ), utils.unique_name.guard(): batch_size = 4 hidden_size = 1024 sequence_len = 512 - input = static.data(name="query", - shape=[batch_size, sequence_len, hidden_size], - dtype='float32') - attn = AttentionLayer(hidden_size=hidden_size, - sequence_len=sequence_len, - intermediate_size=4 * hidden_size, - num_heads=16, - dropout_ratio=0.1, - initializer_range=0.02) + input = static.data( + name="query", + shape=[batch_size, sequence_len, hidden_size], + dtype='float32', + ) + attn = AttentionLayer( + hidden_size=hidden_size, + sequence_len=sequence_len, + intermediate_size=4 * hidden_size, + num_heads=16, + dropout_ratio=0.1, + initializer_range=0.02, + ) out = attn(input) return train_program, start_program class TestAttentionAutoCompletion(unittest.TestCase): - def test_attn_dp(self): global _global_parallel_strategy _global_parallel_strategy = "dp" global _global_process_mesh - _global_process_mesh = auto.ProcessMesh(mesh=[0, 1, 2, 3], - dim_names=["dp"]) + _global_process_mesh = auto.ProcessMesh( + mesh=[0, 1, 2, 3], dim_names=["dp"] + ) train_program = static.Program() start_program = static.Program() dist_context = DistributedContext() train_program, start_program = attn_pretrain_forward( - train_program, start_program) + train_program, start_program + ) completer = Completer(dist_context) complete_train_program = completer.complete_forward_annotation( - train_program) + train_program + ) self.assertTrue(dist_context.validate_dist_attr_for_program()) def test_attn_mp(self): global _global_parallel_strategy _global_parallel_strategy = "mp" global _global_process_mesh - _global_process_mesh = auto.ProcessMesh(mesh=[0, 1, 2, 3], - dim_names=["mp"]) + _global_process_mesh = auto.ProcessMesh( + mesh=[0, 1, 2, 3], dim_names=["mp"] + ) train_program = static.Program() start_program = static.Program() dist_context = DistributedContext() train_program, start_program = attn_pretrain_forward( - train_program, start_program) + train_program, start_program + ) completer = Completer(dist_context) complete_train_program = completer.complete_forward_annotation( - train_program) + train_program + ) self.assertTrue(dist_context.validate_dist_attr_for_program()) def test_attn_dp_mp(self): global _global_parallel_strategy _global_parallel_strategy = "dp_mp" global _global_process_mesh - _global_process_mesh = auto.ProcessMesh(mesh=[[0, 1, 2, 3], - [4, 5, 6, 7]], - dim_names=["dp", "mp"]) + _global_process_mesh = auto.ProcessMesh( + mesh=[[0, 1, 2, 3], [4, 5, 6, 7]], dim_names=["dp", "mp"] + ) train_program = static.Program() start_program = static.Program() dist_context = DistributedContext() train_program, start_program = attn_pretrain_forward( - train_program, start_program) + train_program, start_program + ) completer = Completer(dist_context) complete_train_program = completer.complete_forward_annotation( - train_program) + train_program + ) self.assertTrue(dist_context.validate_dist_attr_for_program()) class DecoderLayer(nn.Layer): - - def __init__(self, - vocab_size=32768, - hidden_size=1024, - sequence_len=512, - max_position_embeddings=512, - intermediate_size=4 * 1024, - num_heads=16, - dropout_ratio=0.1, - initializer_range=0.02): + def __init__( + self, + vocab_size=32768, + hidden_size=1024, + sequence_len=512, + max_position_embeddings=512, + intermediate_size=4 * 1024, + num_heads=16, + dropout_ratio=0.1, + initializer_range=0.02, + ): super(DecoderLayer, self).__init__() self.vocab_size = vocab_size self.hidden_size = hidden_size @@ -408,57 +449,64 @@ class DecoderLayer(nn.Layer): self.attn_mask = None self.head_dim = self.embed_dim // self.num_heads - assert self.head_dim * self.num_heads == self.embed_dim, \ - "embed_dim must be divisible by num_heads" + assert ( + self.head_dim * self.num_heads == self.embed_dim + ), "embed_dim must be divisible by num_heads" self.word_embeddings = nn.Embedding( self.vocab_size, self.hidden_size, - weight_attr=paddle.ParamAttr(name="word_embeddings", - initializer=nn.initializer.Normal( - mean=0.0, - std=self.initializer_range))) + weight_attr=paddle.ParamAttr( + name="word_embeddings", + initializer=nn.initializer.Normal( + mean=0.0, std=self.initializer_range + ), + ), + ) self.position_embeddings = nn.Embedding( self.max_position_embeddings, self.hidden_size, - weight_attr=paddle.ParamAttr(name="pos_embeddings", - initializer=nn.initializer.Normal( - mean=0.0, - std=self.initializer_range))) + weight_attr=paddle.ParamAttr( + name="pos_embeddings", + initializer=nn.initializer.Normal( + mean=0.0, std=self.initializer_range + ), + ), + ) - weight_attr = paddle.ParamAttr(initializer=nn.initializer.Normal( - mean=0.0, std=self.initializer_range)) + weight_attr = paddle.ParamAttr( + initializer=nn.initializer.Normal( + mean=0.0, std=self.initializer_range + ) + ) bias_attr = None - self.q_proj = nn.Linear(self.embed_dim, - self.embed_dim, - weight_attr, - bias_attr=bias_attr) - self.k_proj = nn.Linear(self.kdim, - self.embed_dim, - weight_attr, - bias_attr=bias_attr) - self.v_proj = nn.Linear(self.vdim, - self.embed_dim, - weight_attr, - bias_attr=bias_attr) - self.out_proj = nn.Linear(self.embed_dim, - self.embed_dim, - weight_attr, - bias_attr=bias_attr) + self.q_proj = nn.Linear( + self.embed_dim, self.embed_dim, weight_attr, bias_attr=bias_attr + ) + self.k_proj = nn.Linear( + self.kdim, self.embed_dim, weight_attr, bias_attr=bias_attr + ) + self.v_proj = nn.Linear( + self.vdim, self.embed_dim, weight_attr, bias_attr=bias_attr + ) + self.out_proj = nn.Linear( + self.embed_dim, self.embed_dim, weight_attr, bias_attr=bias_attr + ) intermediate_size = 4 * self.hidden_size d_model = self.hidden_size dim_feedforward = intermediate_size - weight_attr = paddle.ParamAttr(initializer=nn.initializer.Normal( - mean=0.0, std=self.initializer_range)) + weight_attr = paddle.ParamAttr( + initializer=nn.initializer.Normal( + mean=0.0, std=self.initializer_range + ) + ) bias_attr = None - self.linear0 = nn.Linear(d_model, - dim_feedforward, - weight_attr, - bias_attr=bias_attr) - self.linear1 = nn.Linear(dim_feedforward, - d_model, - weight_attr, - bias_attr=bias_attr) + self.linear0 = nn.Linear( + d_model, dim_feedforward, weight_attr, bias_attr=bias_attr + ) + self.linear1 = nn.Linear( + dim_feedforward, d_model, weight_attr, bias_attr=bias_attr + ) self.norm1 = nn.LayerNorm(d_model, epsilon=1e-5) self.norm2 = nn.LayerNorm(d_model, epsilon=1e-5) self.dropout1 = nn.Dropout(self.dropout_ratio) @@ -467,17 +515,21 @@ class DecoderLayer(nn.Layer): def forward(self, input_ids, position_ids): if _global_parallel_strategy in ["dp", "dp_mp"]: - auto.shard_tensor(input_ids, - process_mesh=_global_process_mesh, - shard_spec=["dp", None]) + auto.shard_tensor( + input_ids, + process_mesh=_global_process_mesh, + shard_spec=["dp", None], + ) input_embeddings = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) if _global_parallel_strategy in ["mp", "dp_mp"]: - auto.shard_tensor(self.word_embeddings.weight, - process_mesh=_global_process_mesh, - shard_spec=["mp", None]) + auto.shard_tensor( + self.word_embeddings.weight, + process_mesh=_global_process_mesh, + shard_spec=["mp", None], + ) embeddings = input_embeddings + position_embeddings embeddings = self.dropout1(embeddings) @@ -494,15 +546,21 @@ class DecoderLayer(nn.Layer): v = self.v_proj(target) if _global_parallel_strategy in ["mp", "dp_mp"]: - auto.shard_tensor(self.q_proj.weight, - process_mesh=_global_process_mesh, - shard_spec=[None, "mp"]) - auto.shard_tensor(self.k_proj.weight, - process_mesh=_global_process_mesh, - shard_spec=[None, "mp"]) - auto.shard_tensor(self.v_proj.weight, - process_mesh=_global_process_mesh, - shard_spec=[None, "mp"]) + auto.shard_tensor( + self.q_proj.weight, + process_mesh=_global_process_mesh, + shard_spec=[None, "mp"], + ) + auto.shard_tensor( + self.k_proj.weight, + process_mesh=_global_process_mesh, + shard_spec=[None, "mp"], + ) + auto.shard_tensor( + self.v_proj.weight, + process_mesh=_global_process_mesh, + shard_spec=[None, "mp"], + ) k = tensor.reshape(x=k, shape=[0, 0, self.num_heads, self.head_dim]) k = tensor.transpose(x=k, perm=[0, 2, 1, 3]) @@ -510,10 +568,9 @@ class DecoderLayer(nn.Layer): v = tensor.transpose(x=v, perm=[0, 2, 1, 3]) # scale dot product attention - product = layers.matmul(x=q, - y=k, - transpose_y=True, - alpha=self.head_dim**-0.5) + product = layers.matmul( + x=q, y=k, transpose_y=True, alpha=self.head_dim**-0.5 + ) if self.attn_mask is not None: product = product + self.attn_mask @@ -521,10 +578,12 @@ class DecoderLayer(nn.Layer): weights = F.softmax(product) if self.dropout_ratio: - weights = F.dropout(weights, - self.dropout_ratio, - training=self.training, - mode="upscale_in_train") + weights = F.dropout( + weights, + self.dropout_ratio, + training=self.training, + mode="upscale_in_train", + ) out = tensor.matmul(weights, v) @@ -536,9 +595,11 @@ class DecoderLayer(nn.Layer): out = self.out_proj(out) if _global_parallel_strategy in ["mp", "dp_mp"]: - auto.shard_tensor(self.out_proj.weight, - process_mesh=_global_process_mesh, - shard_spec=["mp", None]) + auto.shard_tensor( + self.out_proj.weight, + process_mesh=_global_process_mesh, + shard_spec=["mp", None], + ) # Add residual residual = embeddings + self.dropout2(out) @@ -552,12 +613,16 @@ class DecoderLayer(nn.Layer): out3 = self.linear1(out2) if _global_parallel_strategy in ["mp", "dp_mp"]: - auto.shard_tensor(self.linear0.weight, - process_mesh=_global_process_mesh, - shard_spec=[None, "mp"]) - auto.shard_tensor(self.linear1.weight, - process_mesh=_global_process_mesh, - shard_spec=["mp", None]) + auto.shard_tensor( + self.linear0.weight, + process_mesh=_global_process_mesh, + shard_spec=[None, "mp"], + ) + auto.shard_tensor( + self.linear1.weight, + process_mesh=_global_process_mesh, + shard_spec=["mp", None], + ) # Add residual final = residual + self.dropout3(out3) @@ -565,81 +630,91 @@ class DecoderLayer(nn.Layer): def decoder_pretrain_forward(train_program, start_program): - with static.program_guard(train_program, - start_program), utils.unique_name.guard(): + with static.program_guard( + train_program, start_program + ), utils.unique_name.guard(): batch_size = 4 hidden_size = 1024 sequence_len = 512 - input_ids = static.data(name="input_ids", - shape=[batch_size, sequence_len], - dtype='int64') - position_ids = static.data(name="position_ids", - shape=[batch_size, sequence_len], - dtype='int64') - decoder = DecoderLayer(vocab_size=32768, - hidden_size=hidden_size, - sequence_len=sequence_len, - max_position_embeddings=512, - intermediate_size=4 * hidden_size, - num_heads=16, - dropout_ratio=0.1, - initializer_range=0.02) + input_ids = static.data( + name="input_ids", shape=[batch_size, sequence_len], dtype='int64' + ) + position_ids = static.data( + name="position_ids", shape=[batch_size, sequence_len], dtype='int64' + ) + decoder = DecoderLayer( + vocab_size=32768, + hidden_size=hidden_size, + sequence_len=sequence_len, + max_position_embeddings=512, + intermediate_size=4 * hidden_size, + num_heads=16, + dropout_ratio=0.1, + initializer_range=0.02, + ) out = decoder(input_ids, position_ids) return train_program, start_program class TestDecoderLayerAutoCompletion(unittest.TestCase): - def test_decoder_dp(self): global _global_parallel_strategy _global_parallel_strategy = "dp" global _global_process_mesh - _global_process_mesh = auto.ProcessMesh(mesh=[0, 1, 2, 3], - dim_names=["dp"]) + _global_process_mesh = auto.ProcessMesh( + mesh=[0, 1, 2, 3], dim_names=["dp"] + ) train_program = static.Program() start_program = static.Program() dist_context = DistributedContext() train_program, start_program = decoder_pretrain_forward( - train_program, start_program) + train_program, start_program + ) completer = Completer(dist_context) complete_train_program = completer.complete_forward_annotation( - train_program) + train_program + ) self.assertTrue(dist_context.validate_dist_attr_for_program()) def test_decoder_mp(self): global _global_parallel_strategy _global_parallel_strategy = "mp" global _global_process_mesh - _global_process_mesh = auto.ProcessMesh(mesh=[0, 1, 2, 3], - dim_names=["mp"]) + _global_process_mesh = auto.ProcessMesh( + mesh=[0, 1, 2, 3], dim_names=["mp"] + ) train_program = static.Program() start_program = static.Program() dist_context = DistributedContext() train_program, start_program = decoder_pretrain_forward( - train_program, start_program) + train_program, start_program + ) completer = Completer(dist_context) complete_train_program = completer.complete_forward_annotation( - train_program) + train_program + ) self.assertTrue(dist_context.validate_dist_attr_for_program()) def test_decoder_dp_mp(self): global _global_parallel_strategy _global_parallel_strategy = "dp_mp" global _global_process_mesh - _global_process_mesh = auto.ProcessMesh(mesh=[[0, 1, 2, 3], - [4, 5, 6, 7]], - dim_names=["dp", "mp"]) + _global_process_mesh = auto.ProcessMesh( + mesh=[[0, 1, 2, 3], [4, 5, 6, 7]], dim_names=["dp", "mp"] + ) train_program = static.Program() start_program = static.Program() dist_context = DistributedContext() train_program, start_program = decoder_pretrain_forward( - train_program, start_program) + train_program, start_program + ) completer = Completer(dist_context) complete_train_program = completer.complete_forward_annotation( - train_program) + train_program + ) self.assertTrue(dist_context.validate_dist_attr_for_program()) diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_completion_gpt.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_completion_gpt.py index 239033e7c8b949c554fbe30ebf76627d2e70b8e2..0511ee97e55ebda4815b713e1b17f42b9bf3a444 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_completion_gpt.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_completion_gpt.py @@ -42,17 +42,19 @@ class MultiHeadAttention(nn.Layer): Cache = collections.namedtuple("Cache", ["k", "v"]) StaticCache = collections.namedtuple("StaticCache", ["k", "v"]) - def __init__(self, - embed_dim, - num_heads, - dropout=0., - kdim=None, - vdim=None, - need_weights=False, - weight_attr=None, - bias_attr=None, - topo=None, - fuse=False): + def __init__( + self, + embed_dim, + num_heads, + dropout=0.0, + kdim=None, + vdim=None, + need_weights=False, + weight_attr=None, + bias_attr=None, + topo=None, + fuse=False, + ): super(MultiHeadAttention, self).__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim @@ -63,38 +65,36 @@ class MultiHeadAttention(nn.Layer): self.fuse = fuse self.head_dim = embed_dim // num_heads - assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" + assert ( + self.head_dim * num_heads == self.embed_dim + ), "embed_dim must be divisible by num_heads" if topo is None or topo.mp_info.size == 1: if self.fuse: assert self.kdim == embed_dim assert self.vdim == embed_dim - self.qkv_proj = nn.Linear(embed_dim, - 3 * embed_dim, - weight_attr, - bias_attr=bias_attr) + self.qkv_proj = nn.Linear( + embed_dim, 3 * embed_dim, weight_attr, bias_attr=bias_attr + ) else: - self.q_proj = nn.Linear(embed_dim, - embed_dim, - weight_attr, - bias_attr=bias_attr) - self.k_proj = nn.Linear(self.kdim, - embed_dim, - weight_attr, - bias_attr=bias_attr) - self.v_proj = nn.Linear(self.vdim, - embed_dim, - weight_attr, - bias_attr=bias_attr) - self.out_proj = nn.Linear(embed_dim, - embed_dim, - weight_attr, - bias_attr=bias_attr) + self.q_proj = nn.Linear( + embed_dim, embed_dim, weight_attr, bias_attr=bias_attr + ) + self.k_proj = nn.Linear( + self.kdim, embed_dim, weight_attr, bias_attr=bias_attr + ) + self.v_proj = nn.Linear( + self.vdim, embed_dim, weight_attr, bias_attr=bias_attr + ) + self.out_proj = nn.Linear( + embed_dim, embed_dim, weight_attr, bias_attr=bias_attr + ) def _fuse_prepare_qkv(self, query): mix_layer = self.qkv_proj(query) - mix_layer = paddle.reshape_(mix_layer, - [0, 0, self.num_heads, 3 * self.head_dim]) + mix_layer = paddle.reshape_( + mix_layer, [0, 0, self.num_heads, 3 * self.head_dim] + ) mix_layer = paddle.transpose(mix_layer, [0, 2, 1, 3]) q, k, v = paddle.split(mix_layer, num_or_sections=3, axis=-1) return q, k, v @@ -108,9 +108,11 @@ class MultiHeadAttention(nn.Layer): q = self.q_proj(query) if _global_parallel_strategy in ["mp", "dp_mp"]: - auto.shard_tensor(self.q_proj.weight, - process_mesh=_global_process_mesh, - shard_spec=[None, "mp"]) + auto.shard_tensor( + self.q_proj.weight, + process_mesh=_global_process_mesh, + shard_spec=[None, "mp"], + ) q = tensor.reshape(x=q, shape=[0, 0, self.num_heads, self.head_dim]) q = tensor.transpose(x=q, perm=[0, 2, 1, 3]) @@ -144,12 +146,16 @@ class MultiHeadAttention(nn.Layer): v = self.v_proj(value) if _global_parallel_strategy in ["mp", "dp_mp"]: - auto.shard_tensor(self.k_proj.weight, - process_mesh=_global_process_mesh, - shard_spec=[None, "mp"]) - auto.shard_tensor(self.v_proj.weight, - process_mesh=_global_process_mesh, - shard_spec=[None, "mp"]) + auto.shard_tensor( + self.k_proj.weight, + process_mesh=_global_process_mesh, + shard_spec=[None, "mp"], + ) + auto.shard_tensor( + self.v_proj.weight, + process_mesh=_global_process_mesh, + shard_spec=[None, "mp"], + ) k = tensor.reshape(x=k, shape=[0, 0, self.num_heads, self.head_dim]) k = tensor.transpose(x=k, perm=[0, 2, 1, 3]) @@ -171,24 +177,22 @@ class MultiHeadAttention(nn.Layer): input=key, shape=[-1, self.num_heads, 0, self.head_dim], dtype=key.dtype, - value=0) + value=0, + ) v = layers.fill_constant_batch_size_like( input=key, shape=[-1, self.num_heads, 0, self.head_dim], dtype=key.dtype, - value=0) + value=0, + ) return self.Cache(k, v) else: # incremental_state with initial value, mainly for usage like UniLM return self.Cache(key, value) - def forward(self, - query, - key, - value, - attn_mask=None, - use_cache=False, - cache=None): + def forward( + self, query, key, value, attn_mask=None, use_cache=False, cache=None + ): r""" Applies multi-head attention to map queries and a set of key-value pairs to outputs. @@ -202,23 +206,25 @@ class MultiHeadAttention(nn.Layer): else: q, k, v = self._prepare_qkv(query, key, value, use_cache, cache) else: - q, k, v, cache = self._prepare_qkv(query, key, value, use_cache, - cache) + q, k, v, cache = self._prepare_qkv( + query, key, value, use_cache, cache + ) # scale dot product attention - product = layers.matmul(x=q, - y=k, - transpose_y=True, - alpha=self.head_dim**-0.5) + product = layers.matmul( + x=q, y=k, transpose_y=True, alpha=self.head_dim**-0.5 + ) if attn_mask is not None: product = product + attn_mask weights = F.softmax(product) if self.dropout: - weights = F.dropout(weights, - self.dropout, - training=self.training, - mode="upscale_in_train") + weights = F.dropout( + weights, + self.dropout, + training=self.training, + mode="upscale_in_train", + ) out = tensor.matmul(weights, v) @@ -230,9 +236,11 @@ class MultiHeadAttention(nn.Layer): out = self.out_proj(out) if _global_parallel_strategy in ["mp", "dp_mp"]: - auto.shard_tensor(self.out_proj.weight, - process_mesh=_global_process_mesh, - shard_spec=["mp", None]) + auto.shard_tensor( + self.out_proj.weight, + process_mesh=_global_process_mesh, + shard_spec=["mp", None], + ) outs = [out] if self.need_weights: @@ -247,12 +255,9 @@ class TransformerDecoder(nn.Layer): TransformerDecoder is a stack of N decoder layers. """ - def __init__(self, - decoder_layers, - num_layers, - norm=None, - hidden_size=None, - topo=None): + def __init__( + self, decoder_layers, num_layers, norm=None, hidden_size=None, topo=None + ): super(TransformerDecoder, self).__init__() self.topo = topo @@ -265,13 +270,15 @@ class TransformerDecoder(nn.Layer): raise ValueError("Only support LayerNorm") self.checkpoints = [] - def forward(self, - tgt, - memory, - tgt_mask=None, - memory_mask=None, - use_cache=False, - cache=None): + def forward( + self, + tgt, + memory, + tgt_mask=None, + memory_mask=None, + use_cache=False, + cache=None, + ): r""" Applies a stack of N Transformer decoder layers on inputs. If `norm` is provided, also applies layer normalization on the output of last decoder @@ -284,25 +291,31 @@ class TransformerDecoder(nn.Layer): for i, mod in enumerate(self.layers): if cache is None: if use_cache: - output, new_cache = mod(output, - memory, - tgt_mask=tgt_mask, - use_cache=use_cache, - cache=cache) + output, new_cache = mod( + output, + memory, + tgt_mask=tgt_mask, + use_cache=use_cache, + cache=cache, + ) new_caches.append(new_cache) else: - output = mod(output, - memory, - tgt_mask=tgt_mask, - use_cache=use_cache, - cache=cache) + output = mod( + output, + memory, + tgt_mask=tgt_mask, + use_cache=use_cache, + cache=cache, + ) else: - output, new_cache = mod(output, - memory, - tgt_mask=tgt_mask, - use_cache=use_cache, - cache=cache[i]) + output, new_cache = mod( + output, + memory, + tgt_mask=tgt_mask, + use_cache=use_cache, + cache=cache[i], + ) new_caches.append(new_cache) self.checkpoints.append(output.name) @@ -317,7 +330,7 @@ class TransformerDecoder(nn.Layer): produced by `TransformerDecoderLayer.gen_cache`. See `TransformerDecoderLayer.gen_cache` for more details. If `do_zip` is True, apply `zip` on these tuples to get a list with two elements. - """ + """ cache = [layer.gen_cache(memory) for layer in self.layers] if do_zip: cache = list(zip(*cache)) @@ -330,18 +343,20 @@ class TransformerDecoderLayer(nn.Layer): It contains multiheadattention and some linear layers. """ - def __init__(self, - d_model, - nhead, - dim_feedforward, - dropout=0.1, - activation="gelu", - attn_dropout=None, - act_dropout=None, - normalize_before=True, - weight_attr=None, - bias_attr=None, - topo=None): + def __init__( + self, + d_model, + nhead, + dim_feedforward, + dropout=0.1, + activation="gelu", + attn_dropout=None, + act_dropout=None, + normalize_before=True, + weight_attr=None, + bias_attr=None, + topo=None, + ): self._config = locals() self._config.pop("self") self._config.pop("__class__", None) # py3 @@ -354,21 +369,27 @@ class TransformerDecoderLayer(nn.Layer): weight_attrs = _convert_param_attr_to_list(weight_attr, 3) bias_attrs = _convert_param_attr_to_list(bias_attr, 3) - self.self_attn = MultiHeadAttention(d_model, - nhead, - dropout=attn_dropout, - weight_attr=weight_attrs[0], - bias_attr=bias_attrs[0], - topo=topo) + self.self_attn = MultiHeadAttention( + d_model, + nhead, + dropout=attn_dropout, + weight_attr=weight_attrs[0], + bias_attr=bias_attrs[0], + topo=topo, + ) if topo is None or topo.mp_info.size == 1: - self.linear1 = nn.Linear(d_model, - dim_feedforward, - weight_attrs[2], - bias_attr=bias_attrs[2]) - self.linear2 = nn.Linear(dim_feedforward, - d_model, - weight_attrs[2], - bias_attr=bias_attrs[2]) + self.linear1 = nn.Linear( + d_model, + dim_feedforward, + weight_attrs[2], + bias_attr=bias_attrs[2], + ) + self.linear2 = nn.Linear( + dim_feedforward, + d_model, + weight_attrs[2], + bias_attr=bias_attrs[2], + ) self.norm1 = nn.LayerNorm(d_model, epsilon=1e-5) self.norm2 = nn.LayerNorm(d_model, epsilon=1e-5) @@ -385,8 +406,9 @@ class TransformerDecoderLayer(nn.Layer): if use_cache is False: tgt = self.self_attn(tgt, tgt, tgt, tgt_mask, use_cache, cache) else: - tgt, incremental_cache = self.self_attn(tgt, tgt, tgt, tgt_mask, - use_cache, cache) + tgt, incremental_cache = self.self_attn( + tgt, tgt, tgt, tgt_mask, use_cache, cache + ) tgt = residual + self.dropout1(tgt) if not self.normalize_before: tgt = self.norm1(tgt) @@ -396,12 +418,16 @@ class TransformerDecoderLayer(nn.Layer): tgt = self.norm2(tgt) if _global_parallel_strategy in ["mp", "dp_mp"]: - auto.shard_tensor(self.linear1.weight, - process_mesh=_global_process_mesh, - shard_spec=[None, "mp"]) - auto.shard_tensor(self.linear2.weight, - process_mesh=_global_process_mesh, - shard_spec=["mp", None]) + auto.shard_tensor( + self.linear1.weight, + process_mesh=_global_process_mesh, + shard_spec=[None, "mp"], + ) + auto.shard_tensor( + self.linear2.weight, + process_mesh=_global_process_mesh, + shard_spec=["mp", None], + ) # tgt = self.dropout2( # self.linear2(F.gelu( @@ -417,8 +443,9 @@ class TransformerDecoderLayer(nn.Layer): return tgt if use_cache is False else (tgt, incremental_cache) def gen_cache(self, memory): - incremental_cache = self.self_attn.gen_cache(memory, - type=self.self_attn.Cache) + incremental_cache = self.self_attn.gen_cache( + memory, type=self.self_attn.Cache + ) return incremental_cache @@ -427,29 +454,38 @@ class GPTEmbeddings(nn.Layer): Include embeddings from word, position and token_type embeddings """ - def __init__(self, - vocab_size, - hidden_size=768, - hidden_dropout_prob=0.1, - max_position_embeddings=512, - type_vocab_size=16, - initializer_range=0.02, - topo=None): + def __init__( + self, + vocab_size, + hidden_size=768, + hidden_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=16, + initializer_range=0.02, + topo=None, + ): super(GPTEmbeddings, self).__init__() if topo is None or topo.mp_info.size == 1: self.word_embeddings = nn.Embedding( vocab_size, hidden_size, - weight_attr=paddle.ParamAttr(name="word_embeddings", - initializer=nn.initializer.Normal( - mean=0.0, - std=initializer_range))) + weight_attr=paddle.ParamAttr( + name="word_embeddings", + initializer=nn.initializer.Normal( + mean=0.0, std=initializer_range + ), + ), + ) self.position_embeddings = nn.Embedding( max_position_embeddings, hidden_size, - weight_attr=paddle.ParamAttr(name="pos_embeddings", - initializer=nn.initializer.Normal( - mean=0.0, std=initializer_range))) + weight_attr=paddle.ParamAttr( + name="pos_embeddings", + initializer=nn.initializer.Normal( + mean=0.0, std=initializer_range + ), + ), + ) self.dropout = nn.Dropout(hidden_dropout_prob) @@ -462,9 +498,11 @@ class GPTEmbeddings(nn.Layer): input_embedings = self.word_embeddings(input_ids) if _global_parallel_strategy in ["mp", "dp_mp"]: - auto.shard_tensor(self.word_embeddings.weight, - process_mesh=_global_process_mesh, - shard_spec=["mp", None]) + auto.shard_tensor( + self.word_embeddings.weight, + process_mesh=_global_process_mesh, + shard_spec=["mp", None], + ) position_embeddings = self.position_embeddings(position_ids) embeddings = input_embedings + position_embeddings @@ -477,20 +515,22 @@ class GPTModel(nn.Layer): The base model of gpt. """ - def __init__(self, - vocab_size, - hidden_size=768, - num_hidden_layers=12, - num_attention_heads=12, - intermediate_size=3072, - hidden_act="gelu", - hidden_dropout_prob=0.1, - attention_probs_dropout_prob=0.1, - max_position_embeddings=512, - type_vocab_size=16, - initializer_range=0.02, - pad_token_id=0, - topo=None): + def __init__( + self, + vocab_size, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=16, + initializer_range=0.02, + pad_token_id=0, + topo=None, + ): super(GPTModel, self).__init__() self.pad_token_id = pad_token_id @@ -503,71 +543,94 @@ class GPTModel(nn.Layer): if self.pipline_mode: self.layer_per_stage = num_hidden_layers // self.topo.pp_info.size - self.embeddings = GPTEmbeddings(vocab_size, hidden_size, - hidden_dropout_prob, - max_position_embeddings, - type_vocab_size, self.initializer_range, - topo) + self.embeddings = GPTEmbeddings( + vocab_size, + hidden_size, + hidden_dropout_prob, + max_position_embeddings, + type_vocab_size, + self.initializer_range, + topo, + ) decoder_layers = nn.LayerList() for i in range(num_hidden_layers): DecoderLayer = TransformerDecoderLayer decoder_layers.append( - DecoderLayer(d_model=hidden_size, - nhead=num_attention_heads, - dim_feedforward=intermediate_size, - dropout=hidden_dropout_prob, - activation=hidden_act, - attn_dropout=attention_probs_dropout_prob, - act_dropout=hidden_dropout_prob, - weight_attr=paddle.ParamAttr( - initializer=nn.initializer.Normal( - mean=0.0, std=self.initializer_range)), - bias_attr=None, - topo=topo)) + DecoderLayer( + d_model=hidden_size, + nhead=num_attention_heads, + dim_feedforward=intermediate_size, + dropout=hidden_dropout_prob, + activation=hidden_act, + attn_dropout=attention_probs_dropout_prob, + act_dropout=hidden_dropout_prob, + weight_attr=paddle.ParamAttr( + initializer=nn.initializer.Normal( + mean=0.0, std=self.initializer_range + ) + ), + bias_attr=None, + topo=topo, + ) + ) Decoder = TransformerDecoder - self.decoder = Decoder(decoder_layers, - num_hidden_layers, - norm="LayerNorm", - hidden_size=hidden_size, - topo=topo) + self.decoder = Decoder( + decoder_layers, + num_hidden_layers, + norm="LayerNorm", + hidden_size=hidden_size, + topo=topo, + ) self.checkpoints = [] - def forward(self, - input_ids, - position_ids=None, - attention_mask=None, - use_cache=False, - cache=None): + def forward( + self, + input_ids, + position_ids=None, + attention_mask=None, + use_cache=False, + cache=None, + ): self.checkpoints = [] if attention_mask is None: length = paddle.shape(input_ids)[1] # Use bool mask attention_mask = paddle.tensor.tril( - paddle.ones((length, length), - dtype=self.embeddings.word_embeddings.weight.dtype)) + paddle.ones( + (length, length), + dtype=self.embeddings.word_embeddings.weight.dtype, + ) + ) if position_ids is None: past_length = 0 if cache is not None: past_length = paddle.shape(cache[0].k)[-2] - position_ids = paddle.arange(past_length, - paddle.shape(input_ids)[-1] + - past_length, - dtype='int64') + position_ids = paddle.arange( + past_length, + paddle.shape(input_ids)[-1] + past_length, + dtype='int64', + ) position_ids = position_ids.unsqueeze(0) # .expand_as(input_ids) position_ids = paddle.fluid.layers.expand_as( - position_ids, input_ids) - embedding_output = self.embeddings(input_ids=input_ids, - position_ids=position_ids) + position_ids, input_ids + ) + embedding_output = self.embeddings( + input_ids=input_ids, position_ids=position_ids + ) # TODO, use registered buffer - causal_mask = paddle.tensor.triu(paddle.ones( - (paddle.shape(input_ids)[-1], paddle.shape(input_ids)[-1])) * -1e9, - diagonal=1) + causal_mask = paddle.tensor.triu( + paddle.ones( + (paddle.shape(input_ids)[-1], paddle.shape(input_ids)[-1]) + ) + * -1e9, + diagonal=1, + ) if attention_mask is not None: attention_mask = attention_mask + causal_mask @@ -577,11 +640,13 @@ class GPTModel(nn.Layer): # The tensor returned by triu not in static graph. attention_mask.stop_gradient = True - encoder_outputs = self.decoder(embedding_output, - memory=None, - tgt_mask=attention_mask, - use_cache=use_cache, - cache=cache) + encoder_outputs = self.decoder( + embedding_output, + memory=None, + tgt_mask=attention_mask, + use_cache=use_cache, + cache=cache, + ) self.checkpoints.extend(self.decoder.checkpoints) return encoder_outputs @@ -603,11 +668,12 @@ class GPTForPretraining(nn.Layer): def parallel_matmul(self, lm_output, logit_weights, parallel_output, topo): if topo is not None and topo.mp_info.size > 1: input_parallel = paddle.distributed.collective._c_identity( - lm_output, group=None) + lm_output, group=None + ) - logits = paddle.matmul(input_parallel, - logit_weights, - transpose_y=True) + logits = paddle.matmul( + input_parallel, logit_weights, transpose_y=True + ) if parallel_output: return logits @@ -617,24 +683,29 @@ class GPTForPretraining(nn.Layer): logits = paddle.matmul(lm_output, logit_weights, transpose_y=True) return logits - def forward(self, - input_ids, - position_ids=None, - attention_mask=None, - masked_positions=None, - use_cache=False, - cache=None): - outputs = self.gpt(input_ids, - position_ids=position_ids, - attention_mask=attention_mask, - use_cache=use_cache, - cache=cache) + def forward( + self, + input_ids, + position_ids=None, + attention_mask=None, + masked_positions=None, + use_cache=False, + cache=None, + ): + outputs = self.gpt( + input_ids, + position_ids=position_ids, + attention_mask=attention_mask, + use_cache=use_cache, + cache=cache, + ) if use_cache: encoder_outputs, cached_kvs = outputs[:2] else: encoder_outputs = outputs - logits = self.parallel_matmul(encoder_outputs, self.weight, True, - self.gpt.topo) + logits = self.parallel_matmul( + encoder_outputs, self.weight, True, self.gpt.topo + ) if use_cache: return logits, cached_kvs @@ -653,11 +724,14 @@ class GPTPretrainingCriterion(nn.Layer): if topo is None or topo.mp_info.size == 1: self.loss_func = paddle.nn.CrossEntropyLoss(reduction="none") else: - self.loss_func = paddle.distributed.collective._c_softmax_with_cross_entropy + self.loss_func = ( + paddle.distributed.collective._c_softmax_with_cross_entropy + ) def forward(self, prediction_scores, masked_lm_labels, loss_mask): - masked_lm_loss = self.loss_func(prediction_scores, - masked_lm_labels.unsqueeze(2)) + masked_lm_loss = self.loss_func( + prediction_scores, masked_lm_labels.unsqueeze(2) + ) loss_mask = loss_mask.reshape([-1]) masked_lm_loss = paddle.sum(masked_lm_loss.reshape([-1]) * loss_mask) @@ -666,45 +740,51 @@ class GPTPretrainingCriterion(nn.Layer): def gpt_pretrain_forward(train_program, start_program): - with static.program_guard(train_program, - start_program), utils.unique_name.guard(): + with static.program_guard( + train_program, start_program + ), utils.unique_name.guard(): batch_size = 16 sequence_len = 512 - input_ids = static.data(name="input_ids", - shape=[batch_size, sequence_len], - dtype='int64') - position_ids = static.data(name="position_ids", - shape=[batch_size, sequence_len], - dtype='int64') + input_ids = static.data( + name="input_ids", shape=[batch_size, sequence_len], dtype='int64' + ) + position_ids = static.data( + name="position_ids", shape=[batch_size, sequence_len], dtype='int64' + ) attention_mask = static.data( name="attention_mask", shape=[batch_size, 1, sequence_len, sequence_len], - dtype='float64') - labels = static.data(name="labels", - shape=[batch_size, sequence_len], - dtype='int64') - loss_mask = static.data(name="loss_mask", - shape=[batch_size, sequence_len], - dtype='float64') + dtype='float64', + ) + labels = static.data( + name="labels", shape=[batch_size, sequence_len], dtype='int64' + ) + loss_mask = static.data( + name="loss_mask", shape=[batch_size, sequence_len], dtype='float64' + ) if _global_parallel_strategy in ["dp", "dp_mp"]: - auto.shard_tensor(input_ids, - process_mesh=_global_process_mesh, - shard_spec=["dp", None]) - - gpt = GPTModel(vocab_size=32768, - hidden_size=1024, - num_hidden_layers=2, - num_attention_heads=16, - intermediate_size=4096, - hidden_act="gelu", - hidden_dropout_prob=0.1, - attention_probs_dropout_prob=0.1, - max_position_embeddings=1024, - type_vocab_size=16, - initializer_range=0.02, - pad_token_id=0, - topo=None) + auto.shard_tensor( + input_ids, + process_mesh=_global_process_mesh, + shard_spec=["dp", None], + ) + + gpt = GPTModel( + vocab_size=32768, + hidden_size=1024, + num_hidden_layers=2, + num_attention_heads=16, + intermediate_size=4096, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=1024, + type_vocab_size=16, + initializer_range=0.02, + pad_token_id=0, + topo=None, + ) model = GPTForPretraining(gpt) @@ -718,57 +798,64 @@ def gpt_pretrain_forward(train_program, start_program): class TestGPTAutoCompletion(unittest.TestCase): - def test_gpt_dp(self): global _global_parallel_strategy _global_parallel_strategy = "dp" global _global_process_mesh - _global_process_mesh = auto.ProcessMesh(mesh=[0, 1, 2, 3], - dim_names=["dp"]) + _global_process_mesh = auto.ProcessMesh( + mesh=[0, 1, 2, 3], dim_names=["dp"] + ) train_program = static.Program() start_program = static.Program() dist_context = DistributedContext() train_program, start_program = gpt_pretrain_forward( - train_program, start_program) + train_program, start_program + ) completer = Completer(dist_context) complete_train_program = completer.complete_forward_annotation( - train_program) + train_program + ) self.assertTrue(dist_context.validate_dist_attr_for_program()) def test_gpt_mp(self): global _global_parallel_strategy _global_parallel_strategy = "mp" global _global_process_mesh - _global_process_mesh = auto.ProcessMesh(mesh=[0, 1, 2, 3], - dim_names=["mp"]) + _global_process_mesh = auto.ProcessMesh( + mesh=[0, 1, 2, 3], dim_names=["mp"] + ) train_program = static.Program() start_program = static.Program() dist_context = DistributedContext() train_program, start_program = gpt_pretrain_forward( - train_program, start_program) + train_program, start_program + ) completer = Completer(dist_context) complete_train_program = completer.complete_forward_annotation( - train_program) + train_program + ) self.assertTrue(dist_context.validate_dist_attr_for_program()) def test_gpt_dp_mp(self): global _global_parallel_strategy _global_parallel_strategy = "dp_mp" global _global_process_mesh - _global_process_mesh = auto.ProcessMesh(mesh=[[0, 1, 2, 3], - [4, 5, 6, 7]], - dim_names=["dp", "mp"]) + _global_process_mesh = auto.ProcessMesh( + mesh=[[0, 1, 2, 3], [4, 5, 6, 7]], dim_names=["dp", "mp"] + ) train_program = static.Program() start_program = static.Program() dist_context = DistributedContext() train_program, start_program = gpt_pretrain_forward( - train_program, start_program) + train_program, start_program + ) completer = Completer(dist_context) complete_train_program = completer.complete_forward_annotation( - train_program) + train_program + ) self.assertTrue(dist_context.validate_dist_attr_for_program()) diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_cost_model.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_cost_model.py index 821fee4df16a7dc4ab3c0772accbba931c519511..bef9619ead3ea6beb133de95d8562546a717d4a5 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_cost_model.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_cost_model.py @@ -42,27 +42,27 @@ device = "gpu" if core.is_compiled_with_cuda() else "cpu" class MLPLayer(nn.Layer): - - def __init__(self, - hidden_size=256, - intermediate_size=4 * 256, - initializer_range=0.02, - is_distributed=True): + def __init__( + self, + hidden_size=256, + intermediate_size=4 * 256, + initializer_range=0.02, + is_distributed=True, + ): super(MLPLayer, self).__init__() d_model = hidden_size dim_feedforward = intermediate_size weight_attr = paddle.ParamAttr( - initializer=nn.initializer.Normal(mean=0.0, std=initializer_range)) + initializer=nn.initializer.Normal(mean=0.0, std=initializer_range) + ) bias_attr = None - self.linear0 = nn.Linear(d_model, - dim_feedforward, - weight_attr, - bias_attr=bias_attr) - self.linear1 = nn.Linear(dim_feedforward, - d_model, - weight_attr, - bias_attr=bias_attr) + self.linear0 = nn.Linear( + d_model, dim_feedforward, weight_attr, bias_attr=bias_attr + ) + self.linear1 = nn.Linear( + dim_feedforward, d_model, weight_attr, bias_attr=bias_attr + ) self.norm = nn.LayerNorm(d_model, epsilon=1e-5) self.is_distributed = is_distributed @@ -84,13 +84,14 @@ def get_single_node_data(): train_program = paddle.static.Program() startup_program = paddle.static.Program() - loss, train_program, startup_program = mlp_forward(train_program, - startup_program, - is_distributed=False) + loss, train_program, startup_program = mlp_forward( + train_program, startup_program, is_distributed=False + ) cost_model = core.CostModel() - cost_data = cost_model.profile_measure(train_program, startup_program, - device, ["time"]) + cost_data = cost_model.profile_measure( + train_program, startup_program, device, ["time"] + ) op_name2cost = [{}, {}] for idx, op in enumerate(train_program.blocks[0].ops): @@ -102,34 +103,37 @@ def get_single_node_data(): def mlp_forward(train_program, start_program, is_distributed=True): - with static.program_guard(train_program, - start_program), utils.unique_name.guard(): + with static.program_guard( + train_program, start_program + ), utils.unique_name.guard(): batch_size = 4 hidden_size = 256 sequence_len = 128 if is_distributed: - input = static.data(name="input", - shape=[batch_size, hidden_size], - dtype='float32') - label = static.data(name="label", - shape=[batch_size, 1], - dtype='float32') + input = static.data( + name="input", shape=[batch_size, hidden_size], dtype='float32' + ) + label = static.data( + name="label", shape=[batch_size, 1], dtype='float32' + ) else: - input = paddle.ones(name="input", - shape=[batch_size, hidden_size], - dtype='float32') - label = paddle.ones(name="label", - shape=[batch_size, 1], - dtype='float32') + input = paddle.ones( + name="input", shape=[batch_size, hidden_size], dtype='float32' + ) + label = paddle.ones( + name="label", shape=[batch_size, 1], dtype='float32' + ) if is_distributed: auto.shard_tensor(input, PP_MESH_0, ["x", None]) auto.shard_tensor(label, PP_MESH_1, ["x", None]) - mlp = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - initializer_range=0.02, - is_distributed=is_distributed) + mlp = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + initializer_range=0.02, + is_distributed=is_distributed, + ) predict = mlp(input) error_cost = paddle.nn.functional.square_error_cost(predict, label) @@ -139,8 +143,9 @@ def mlp_forward(train_program, start_program, is_distributed=True): def get_dist_prog(train_program, startup_program, dist_context, rank_id): - loss, train_program, startup_program = mlp_forward(train_program, - startup_program) + loss, train_program, startup_program = mlp_forward( + train_program, startup_program + ) fleet._user_defined_strategy = fleet.DistributedStrategy() fleet.user_defined_optimizer = paddle.fluid.optimizer.AdamOptimizer() @@ -150,25 +155,38 @@ def get_dist_prog(train_program, startup_program, dist_context, rank_id): # serial forward & backward completion completer = Completer(dist_context) complete_train_program = completer.complete_forward_annotation( - train_program) + train_program + ) dist_context.block_state.parse_forward_blocks(complete_train_program) - params_grads = parallelizer._generate_backward(complete_train_program, - startup_program, - loss, - parameter_list=None, - no_grad_set=None, - callbacks=None) + params_grads = parallelizer._generate_backward( + complete_train_program, + startup_program, + loss, + parameter_list=None, + no_grad_set=None, + callbacks=None, + ) # logical partition partitioner = Partitioner(dist_context, rank_id) - auto_parallel_main_prog, auto_parallel_startup_prog, dist_params_grads = partitioner.partition( - complete_train_program, startup_program, params_grads) + ( + auto_parallel_main_prog, + auto_parallel_startup_prog, + dist_params_grads, + ) = partitioner.partition( + complete_train_program, startup_program, params_grads + ) partitioned_optimize_ops = parallelizer._apply_optimize( - auto_parallel_main_prog, auto_parallel_startup_prog, dist_params_grads) + auto_parallel_main_prog, auto_parallel_startup_prog, dist_params_grads + ) - return auto_parallel_main_prog, auto_parallel_startup_prog, dist_params_grads + return ( + auto_parallel_main_prog, + auto_parallel_startup_prog, + dist_params_grads, + ) def check_runtime_estimation(cost): @@ -199,18 +217,19 @@ def check_empty_program_memory(cost): class TestCostModel(unittest.TestCase): - def test_empty_program_cost_model(self): empty_program = paddle.static.Program() startup_program = paddle.static.Program() standalone_cost_data = [{}] empty_pp_cfg = None cluster = None - cost = estimate_cost([empty_program], - cluster=cluster, - pipeline_config=empty_pp_cfg, - standalone_cost_data=standalone_cost_data, - batch_size=1) + cost = estimate_cost( + [empty_program], + cluster=cluster, + pipeline_config=empty_pp_cfg, + standalone_cost_data=standalone_cost_data, + batch_size=1, + ) self.assertTrue(check_empty_program_runtime(cost)) self.assertTrue(check_empty_program_memory(cost)) @@ -222,18 +241,30 @@ class TestCostModel(unittest.TestCase): train_program = paddle.static.Program() startup_program = paddle.static.Program() dist_context = DistributedContext() - distributed_program, dist_startup_prog, dist_params_grads = get_dist_prog( - train_program, startup_program, dist_context, rank_id) - resharder = Resharder(distributed_program, dist_startup_prog, - rank_id, dist_context, dist_params_grads) + ( + distributed_program, + dist_startup_prog, + dist_params_grads, + ) = get_dist_prog( + train_program, startup_program, dist_context, rank_id + ) + resharder = Resharder( + distributed_program, + dist_startup_prog, + rank_id, + dist_context, + dist_params_grads, + ) resharder.reshard() dist_program.append(distributed_program) cluster = None - cost = estimate_cost(dist_program, - cluster=cluster, - pipeline_config=pp_cfg, - standalone_cost_data=standalone_cost_data, - batch_size=4) + cost = estimate_cost( + dist_program, + cluster=cluster, + pipeline_config=pp_cfg, + standalone_cost_data=standalone_cost_data, + batch_size=4, + ) self.assertTrue(check_runtime_estimation(cost)) self.assertTrue(check_memory_estimation(cost)) diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_data_unshard.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_data_unshard.py index 34b1f5dd6d4a1897d13a00e625deedd85f16a433..f641961b5afbcc43e70fd5273eb9e22bd1bb0573 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_data_unshard.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_data_unshard.py @@ -18,7 +18,6 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus class TestAutoParallelDataUnshard(TestMultipleGpus): - def test_auto_parallel_data_unshard(self): self.run_mnist_2gpu('auto_parallel_data_unshard.py') diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_dist_tensor.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_dist_tensor.py index a3b842bed8f4562d0c4f2f83ddc4a84e80073799..e1f504602398e654d032e67bbe87866e2e1e7568 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_dist_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_dist_tensor.py @@ -23,18 +23,23 @@ from paddle.distributed.auto_parallel.parallelizer import AutoParallelizer from paddle.distributed.auto_parallel.partitioner import Partitioner from paddle.distributed.auto_parallel.dist_context import DistributedContext from paddle.distributed.auto_parallel.dist_tensor import DistributedTensor -from paddle.distributed.auto_parallel.dist_attribute import TensorDistributedAttribute +from paddle.distributed.auto_parallel.dist_attribute import ( + TensorDistributedAttribute, +) import test_auto_parallel_reshard from test_auto_parallel_reshard import mlp_forward -def get_dist_prog(train_program, - startup_program, - dist_context, - rank_id, - complete_train_program=None): - loss, train_program, startup_program = mlp_forward(train_program, - startup_program) +def get_dist_prog( + train_program, + startup_program, + dist_context, + rank_id, + complete_train_program=None, +): + loss, train_program, startup_program = mlp_forward( + train_program, startup_program + ) fleet._user_defined_strategy = fleet.DistributedStrategy() fleet.user_defined_optimizer = paddle.fluid.optimizer.AdamOptimizer() @@ -43,49 +48,68 @@ def get_dist_prog(train_program, # serial forward & backward completion completer = Completer(dist_context) - complete_train_program = completer.complete_forward_annotation( - train_program - ) if complete_train_program is None else complete_train_program + complete_train_program = ( + completer.complete_forward_annotation(train_program) + if complete_train_program is None + else complete_train_program + ) dist_context.block_state.parse_forward_blocks(complete_train_program) - params_grads = parallelizer._generate_backward(complete_train_program, - startup_program, - loss, - parameter_list=None, - no_grad_set=None, - callbacks=None) + params_grads = parallelizer._generate_backward( + complete_train_program, + startup_program, + loss, + parameter_list=None, + no_grad_set=None, + callbacks=None, + ) # logical partition partitioner = Partitioner(dist_context, rank_id) - auto_parallel_main_prog, auto_parallel_startup_prog, dist_params_grads = partitioner.partition( - complete_train_program, startup_program, params_grads) + ( + auto_parallel_main_prog, + auto_parallel_startup_prog, + dist_params_grads, + ) = partitioner.partition( + complete_train_program, startup_program, params_grads + ) partitioned_optimize_ops = parallelizer._apply_optimize( - auto_parallel_main_prog, auto_parallel_startup_prog, dist_params_grads) + auto_parallel_main_prog, auto_parallel_startup_prog, dist_params_grads + ) - return auto_parallel_main_prog, auto_parallel_startup_prog, complete_train_program + return ( + auto_parallel_main_prog, + auto_parallel_startup_prog, + complete_train_program, + ) class TestDistributedTensor(unittest.TestCase): - def test_new_local_tensor(self): test_auto_parallel_reshard._global_process_mesh = auto.ProcessMesh( - mesh=[0, 1], dim_names=["x"]) + mesh=[0, 1], dim_names=["x"] + ) test_auto_parallel_reshard._global_parallel_strategy = "dp" train_program = paddle.static.Program() startup_program = paddle.static.Program() dist_context = DistributedContext() rank_id = 0 - dist_main_prog, dist_startup_prog, complete_train_program = get_dist_prog( - train_program, startup_program, dist_context, rank_id) + ( + dist_main_prog, + dist_startup_prog, + complete_train_program, + ) = get_dist_prog(train_program, startup_program, dist_context, rank_id) dist_context.dist_main_programs[rank_id] = dist_main_prog dist_context.dist_startup_programs[rank_id] = dist_startup_prog name = "layer_norm_1.tmp_2" dist_tensor = dist_context.get_dist_tensor_for_program( - complete_train_program.global_block().vars[name]) + complete_train_program.global_block().vars[name] + ) dist_tensor._dist_context = dist_context intermediate_var_0 = dist_tensor.new_local_tensor( - name="intermediate_var_0") + name="intermediate_var_0" + ) self.assertEqual(intermediate_var_0.shape, (2, 1024)) self.assertEqual(intermediate_var_0.name, "intermediate_var_0") @@ -93,25 +117,34 @@ class TestDistributedTensor(unittest.TestCase): train_program = paddle.static.Program() startup_program = paddle.static.Program() dist_context = DistributedContext() - dist_main_prog, dist_startup_prog, complete_train_program = get_dist_prog( - train_program, startup_program, dist_context, rank_id, None) + ( + dist_main_prog, + dist_startup_prog, + complete_train_program, + ) = get_dist_prog( + train_program, startup_program, dist_context, rank_id, None + ) dist_context.dist_main_programs[rank_id] = dist_main_prog dist_context.dist_startup_programs[rank_id] = dist_startup_prog name = "layer_norm_1.tmp_2" dist_tensor = dist_context.get_dist_tensor_for_program( - complete_train_program.global_block().vars[name]) + complete_train_program.global_block().vars[name] + ) dist_tensor._dist_context = dist_context intermediate_var_1 = dist_tensor.new_local_tensor( - rank=rank_id, name="intermediate_var_1") + rank=rank_id, name="intermediate_var_1" + ) self.assertEqual(intermediate_var_0.shape, (2, 1024)) self.assertEqual(intermediate_var_1.name, "intermediate_var_1") name = "linear_0.w_0" dist_tensor = dist_context.get_dist_tensor_for_program( - complete_train_program.global_block().vars[name]) + complete_train_program.global_block().vars[name] + ) dist_tensor._dist_context = dist_context intermediate_var_1 = dist_tensor.new_local_tensor( - rank=rank_id, name="linear_0.w_0_intermediate") + rank=rank_id, name="linear_0.w_0_intermediate" + ) self.assertEqual(intermediate_var_1.shape, (1024, 4096)) self.assertEqual(intermediate_var_1.name, "linear_0.w_0_intermediate") @@ -121,7 +154,10 @@ class TestDistributedTensor(unittest.TestCase): id(copied_dist_context), id( copied_dist_context.get_dist_tensor_for_program( - dist_tensor.serial_tensor).dist_context)) + dist_tensor.serial_tensor + ).dist_context + ), + ) def test_static_method(self): dims_mapping = [1, 0] @@ -133,58 +169,63 @@ class TestDistributedTensor(unittest.TestCase): # rank 1 [(2, 4), (0, 3)] # rank 4 [(2, 4), (3, 6)] rank = 0 - local_sizes = DistributedTensor.get_local_sizes(global_sizes, - dims_mapping, topology, - processes) + local_sizes = DistributedTensor.get_local_sizes( + global_sizes, dims_mapping, topology, processes + ) self.assertEqual(local_sizes, [2, 3]) local_offsets = DistributedTensor.get_local_offsets( - global_sizes, dims_mapping, topology, processes, rank) + global_sizes, dims_mapping, topology, processes, rank + ) self.assertEqual(local_offsets, [0, 0]) - local_shard = DistributedTensor.get_local_shard(global_sizes, - dims_mapping, topology, - processes, rank) + local_shard = DistributedTensor.get_local_shard( + global_sizes, dims_mapping, topology, processes, rank + ) self.assertEqual(local_shard, [(0, 2), (0, 3)]) rank = 1 - local_sizes = DistributedTensor.get_local_sizes(global_sizes, - dims_mapping, topology, - processes) + local_sizes = DistributedTensor.get_local_sizes( + global_sizes, dims_mapping, topology, processes + ) self.assertEqual(local_sizes, [2, 3]) local_offsets = DistributedTensor.get_local_offsets( - global_sizes, dims_mapping, topology, processes, rank) + global_sizes, dims_mapping, topology, processes, rank + ) self.assertEqual(local_offsets, [2, 0]) - local_shard = DistributedTensor.get_local_shard(global_sizes, - dims_mapping, topology, - processes, rank) + local_shard = DistributedTensor.get_local_shard( + global_sizes, dims_mapping, topology, processes, rank + ) self.assertEqual(local_shard, [(2, 4), (0, 3)]) rank = 4 - local_sizes = DistributedTensor.get_local_sizes(global_sizes, - dims_mapping, topology, - processes) + local_sizes = DistributedTensor.get_local_sizes( + global_sizes, dims_mapping, topology, processes + ) self.assertEqual(local_sizes, [2, 3]) local_offsets = DistributedTensor.get_local_offsets( - global_sizes, dims_mapping, topology, processes, rank) + global_sizes, dims_mapping, topology, processes, rank + ) self.assertEqual(local_offsets, [2, 3]) - local_shard = DistributedTensor.get_local_shard(global_sizes, - dims_mapping, topology, - processes, rank) + local_shard = DistributedTensor.get_local_shard( + global_sizes, dims_mapping, topology, processes, rank + ) self.assertEqual(local_shard, [(2, 4), (3, 6)]) # global sizes local_sizes = [2, 3] global_sizes = DistributedTensor.get_global_sizes( - local_sizes, dims_mapping, topology, processes) + local_sizes, dims_mapping, topology, processes + ) self.assertEqual(global_sizes, [6, 6]) def test_instance_method(self): tensor_dist_attr = TensorDistributedAttribute() tensor_dist_attr.dims_mapping = [1, 0] tensor_dist_attr.process_mesh = auto.ProcessMesh( - mesh=[[0, 1, 2], [3, 4, 5]]) - serial_tensor = paddle.static.data(name="data", - shape=[6, 6], - dtype='float32') + mesh=[[0, 1, 2], [3, 4, 5]] + ) + serial_tensor = paddle.static.data( + name="data", shape=[6, 6], dtype='float32' + ) dist_tensor = DistributedTensor(serial_tensor, tensor_dist_attr) # rank 0 [(0, 2), (0, 3)] diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_graph.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_graph.py index d8abad85ccaa97cca47ee8724237bfa886dc21b7..2e43bf6f928cb9fa15b27b35f40e04493cc865ad 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_graph.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_graph.py @@ -17,7 +17,6 @@ from paddle.distributed.auto_parallel.graph import Graph class TestAutoParallelGraph(unittest.TestCase): - def test_graph(self): graph = Graph(name="foo") self.assertEqual(graph.attrs["name"], "foo") diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_mapper.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_mapper.py index 5d473fe55075e7abe8f85c7ec477a6c06d4d5685..e18a585d33f3836844f93b02d8c8f0a6b2d8dacf 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_mapper.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_mapper.py @@ -361,11 +361,9 @@ cluster_json = """ class MLPLayer(nn.Layer): - - def __init__(self, - hidden_size=64, - intermediate_size=4 * 64, - initializer_range=0.02): + def __init__( + self, hidden_size=64, intermediate_size=4 * 64, initializer_range=0.02 + ): super(MLPLayer, self).__init__() d_model = hidden_size dim_feedforward = intermediate_size @@ -379,37 +377,37 @@ class MLPLayer(nn.Layer): weight_attr2 = paddle.ParamAttr(initializer=NumpyArrayInitializer(arr2)) weight_attr3 = paddle.ParamAttr(initializer=NumpyArrayInitializer(arr3)) bias_attr = None - self.linear0 = nn.Linear(d_model, - dim_feedforward, - weight_attr0, - bias_attr=bias_attr) - self.linear1 = nn.Linear(dim_feedforward, - d_model, - weight_attr1, - bias_attr=bias_attr) + self.linear0 = nn.Linear( + d_model, dim_feedforward, weight_attr0, bias_attr=bias_attr + ) + self.linear1 = nn.Linear( + dim_feedforward, d_model, weight_attr1, bias_attr=bias_attr + ) self.norm = nn.LayerNorm(d_model, epsilon=1e-5) - self.linear2 = nn.Linear(d_model, - dim_feedforward, - weight_attr2, - bias_attr=bias_attr) - self.linear3 = nn.Linear(dim_feedforward, - d_model, - weight_attr3, - bias_attr=bias_attr) + self.linear2 = nn.Linear( + d_model, dim_feedforward, weight_attr2, bias_attr=bias_attr + ) + self.linear3 = nn.Linear( + dim_feedforward, d_model, weight_attr3, bias_attr=bias_attr + ) def forward(self, input): if _global_parallel_strategy == "dp_mp_pp": - auto.shard_tensor(self.linear0.weight, _global_process_mesh[0], - [None, "y"]) + auto.shard_tensor( + self.linear0.weight, _global_process_mesh[0], [None, "y"] + ) - auto.shard_tensor(self.linear1.weight, _global_process_mesh[0], - ["y", None]) + auto.shard_tensor( + self.linear1.weight, _global_process_mesh[0], ["y", None] + ) - auto.shard_tensor(self.linear2.weight, _global_process_mesh[1], - [None, "y"]) + auto.shard_tensor( + self.linear2.weight, _global_process_mesh[1], [None, "y"] + ) - auto.shard_tensor(self.linear3.weight, _global_process_mesh[1], - ["y", None]) + auto.shard_tensor( + self.linear3.weight, _global_process_mesh[1], ["y", None] + ) out = self.norm(input) out = self.linear0(out) @@ -425,22 +423,25 @@ class MLPLayer(nn.Layer): def mlp_forward(train_program, start_program): - with static.program_guard(train_program,start_program), \ - utils.unique_name.guard(): + with static.program_guard( + train_program, start_program + ), utils.unique_name.guard(): batch_size = 4 hidden_size = 64 - input = static.data(name="input", - shape=[batch_size, hidden_size], - dtype='float32') - label = static.data(name="label", - shape=[batch_size, 1], - dtype='float32') + input = static.data( + name="input", shape=[batch_size, hidden_size], dtype='float32' + ) + label = static.data( + name="label", shape=[batch_size, 1], dtype='float32' + ) if _global_parallel_strategy == "dp_mp_pp": auto.shard_tensor(input, _global_process_mesh[0], ["x", None]) - mlp = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - initializer_range=0.02) + mlp = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + initializer_range=0.02, + ) predict = mlp(input) error_cost = paddle.nn.functional.square_error_cost(predict, label) loss = paddle.mean(error_cost) @@ -448,8 +449,9 @@ def mlp_forward(train_program, start_program): def get_dist_prog(train_program, startup_program, dist_context, rank_id): - loss, train_program, startup_program = mlp_forward(train_program, - startup_program) + loss, train_program, startup_program = mlp_forward( + train_program, startup_program + ) fleet._user_defined_strategy = fleet.DistributedStrategy() fleet.user_defined_optimizer = paddle.fluid.optimizer.AdamOptimizer() @@ -459,24 +461,38 @@ def get_dist_prog(train_program, startup_program, dist_context, rank_id): # auto completion completer = Completer(dist_context) complete_train_program = completer.complete_forward_annotation( - train_program) + train_program + ) dist_context.block_state.parse_forward_blocks(complete_train_program) - params_grads = parallelizer._generate_backward(complete_train_program, - startup_program, - loss, - parameter_list=None, - no_grad_set=None, - callbacks=None) + params_grads = parallelizer._generate_backward( + complete_train_program, + startup_program, + loss, + parameter_list=None, + no_grad_set=None, + callbacks=None, + ) partitioner = Partitioner(dist_context, rank_id) - dist_train_program, dist_startup_prog, dist_params_grads = partitioner.partition( - complete_train_program, startup_program, params_grads) + ( + dist_train_program, + dist_startup_prog, + dist_params_grads, + ) = partitioner.partition( + complete_train_program, startup_program, params_grads + ) partitioned_optimize_ops = parallelizer._apply_optimize( - dist_train_program, dist_startup_prog, dist_params_grads) - - resharder = Resharder(dist_train_program, dist_startup_prog, rank_id, - dist_context, dist_params_grads) + dist_train_program, dist_startup_prog, dist_params_grads + ) + + resharder = Resharder( + dist_train_program, + dist_startup_prog, + rank_id, + dist_context, + dist_params_grads, + ) resharder.reshard() return dist_train_program, dist_startup_prog @@ -496,7 +512,6 @@ def get_device_local_ids(machine): class TestAutoParallelMapper(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -504,8 +519,9 @@ class TestAutoParallelMapper(unittest.TestCase): self.temp_dir.cleanup() def test_mapper_dp_mp_pp(self): - cluster_json_path = os.path.join(self.temp_dir.name, - "auto_parallel_cluster.json") + cluster_json_path = os.path.join( + self.temp_dir.name, "auto_parallel_cluster.json" + ) cluster_json_object = json.loads(cluster_json) with open(cluster_json_path, "w") as cluster_json_file: json.dump(cluster_json_object, cluster_json_file) @@ -519,7 +535,7 @@ class TestAutoParallelMapper(unittest.TestCase): global _global_process_mesh _global_process_mesh = [ auto.ProcessMesh([[0, 1], [2, 3]], dim_names=["x", "y"]), - auto.ProcessMesh([[4, 5], [6, 7]], dim_names=["x", "y"]) + auto.ProcessMesh([[4, 5], [6, 7]], dim_names=["x", "y"]), ] processes = [0, 1, 2, 3, 4, 5, 6, 7] @@ -529,7 +545,8 @@ class TestAutoParallelMapper(unittest.TestCase): startup_program = static.Program() dist_context = DistributedContext() dist_train_program, dist_startup_prog = get_dist_prog( - train_program, startup_program, dist_context, rank_id) + train_program, startup_program, dist_context, rank_id + ) # if rank_id == 0: # print_program_with_dist_attr(dist_train_program, dist_context) dist_programs[rank_id] = [dist_train_program, None] @@ -547,8 +564,9 @@ class TestAutoParallelMapper(unittest.TestCase): self.assertTrue(is_in_machine(device_ids[0], machine)) machine_mapped_ranks.add(rank) machine_mapped_device_local_ids.add(device_ids[0]) - self.assertEqual(len(machine_mapped_ranks), - len(machine_mapped_device_local_ids)) + self.assertEqual( + len(machine_mapped_ranks), len(machine_mapped_device_local_ids) + ) all_mapped_ranks.update(machine_mapped_ranks) self.assertEqual(set(processes), all_mapped_ranks) @@ -575,35 +593,30 @@ class TestAutoParallelMapper(unittest.TestCase): dtype='float32', type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=False) + stop_gradient=False, + ) broadcast_op = train_program.global_block().append_op( type="c_broadcast", inputs={'X': input}, - attrs={ - 'ring_id': ring_id, - 'root': root_id - }, - outputs={'Out': output}) + attrs={'ring_id': ring_id, 'root': root_id}, + outputs={'Out': output}, + ) self.assertEqual(get_comm_volume(broadcast_op, 0, 1), 400) self.assertEqual(get_comm_volume(broadcast_op, 1, 0), None) allgather_op = train_program.global_block().append_op( type="c_allgather", inputs={'X': input}, - attrs={ - 'ring_id': ring_id, - 'nranks': nranks - }, - outputs={'Out': output}) + attrs={'ring_id': ring_id, 'nranks': nranks}, + outputs={'Out': output}, + ) self.assertEqual(get_comm_volume(allgather_op, 0, 1), 400) self.assertEqual(get_comm_volume(allgather_op, 0, 0), None) reduce_op = train_program.global_block().append_op( type="c_reduce_sum", inputs={'X': input}, - attrs={ - 'ring_id': ring_id, - 'root_id': root_id - }, - outputs={'Out': output}) + attrs={'ring_id': ring_id, 'root_id': root_id}, + outputs={'Out': output}, + ) self.assertEqual(get_comm_volume(reduce_op, 0, 1), None) self.assertEqual(get_comm_volume(reduce_op, 1, 0), 400) cast_op = train_program.global_block().append_op( @@ -612,8 +625,9 @@ class TestAutoParallelMapper(unittest.TestCase): outputs={"Out": output}, attrs={ "in_dtype": fluid.core.VarDesc.VarType.FP32, - "out_dtype": fluid.core.VarDesc.VarType.FP32 - }) + "out_dtype": fluid.core.VarDesc.VarType.FP32, + }, + ) self.assertRaises(ValueError, get_comm_volume, cast_op, 0, 1) diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner.py index b5af410a2d0d28c98d3f7e1a52b0b5fb14a23e83..366e42cc68672f1b9bce1e8270a0d1d07cef3bcc 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner.py @@ -44,16 +44,26 @@ def get_programs(annotated_func): train_program, start_program = annotated_func(train_program, start_program) completer = Completer(dist_context) complete_train_program = completer.complete_forward_annotation( - train_program) + train_program + ) dist_context.block_state.parse_forward_blocks(complete_train_program) rank_id = 3 dist_strategy = fleet.DistributedStrategy() partitioner = Partitioner(dist_context, rank_id) - test_auto_parallel_dist_main_prog, test_auto_parallel_dist_startup_prog, _ = partitioner.partition( - complete_train_program, start_program, []) + ( + test_auto_parallel_dist_main_prog, + test_auto_parallel_dist_startup_prog, + _, + ) = partitioner.partition(complete_train_program, start_program, []) - return complete_train_program, start_program, test_auto_parallel_dist_main_prog, test_auto_parallel_dist_startup_prog, dist_context + return ( + complete_train_program, + start_program, + test_auto_parallel_dist_main_prog, + test_auto_parallel_dist_startup_prog, + dist_context, + ) def is_all_parameters_shape_equal(prog1, prog2): @@ -84,41 +94,62 @@ def check_tensor_split(prog1, varnames1, prog2, varnames2, axis, nsplit): return True -def initialization_check(mode, dist_context, dist_startup_prog, - serial_startup_prog, var_need_broadcast, process_mesh, - mp_parallel_axis, dp_parallel_axis): +def initialization_check( + mode, + dist_context, + dist_startup_prog, + serial_startup_prog, + var_need_broadcast, + process_mesh, + mp_parallel_axis, + dp_parallel_axis, +): if 'mp' in mode: - group_ranks = _get_comm_group(process_mesh.processes, - process_mesh.topology, mp_parallel_axis, - 3) + group_ranks = _get_comm_group( + process_mesh.processes, process_mesh.topology, mp_parallel_axis, 3 + ) mp_ring_id = new_process_group(group_ranks).id broadcast_ops = [ - op for op in dist_startup_prog.global_block().ops if - (op.type == "c_broadcast" and op.desc.attr("ring_id") == mp_ring_id) + op + for op in dist_startup_prog.global_block().ops + if ( + op.type == "c_broadcast" + and op.desc.attr("ring_id") == mp_ring_id + ) ] broadcast_varnames = sorted( - [op.desc.output_arg_names()[0] for op in broadcast_ops]) + [op.desc.output_arg_names()[0] for op in broadcast_ops] + ) if broadcast_varnames != var_need_broadcast: return False if 'dp' in mode: - group_ranks = _get_comm_group(process_mesh.processes, - process_mesh.topology, dp_parallel_axis, - 3) + group_ranks = _get_comm_group( + process_mesh.processes, process_mesh.topology, dp_parallel_axis, 3 + ) dp_ring_id = new_process_group(group_ranks).id nparam = len(serial_startup_prog.all_parameters()) - nbroadcast_dp = len([ - op for op in dist_startup_prog.global_block().ops if - (op.type == "c_broadcast" and op.desc.attr("ring_id") == dp_ring_id) - ]) + nbroadcast_dp = len( + [ + op + for op in dist_startup_prog.global_block().ops + if ( + op.type == "c_broadcast" + and op.desc.attr("ring_id") == dp_ring_id + ) + ] + ) if nparam != nbroadcast_dp: return False if "dp" in mode and 'mp' in mode: - nbroadcast = len([ - op for op in dist_startup_prog.global_block().ops - if op.type == "c_broadcast" - ]) + nbroadcast = len( + [ + op + for op in dist_startup_prog.global_block().ops + if op.type == "c_broadcast" + ] + ) if len(var_need_broadcast) + nbroadcast_dp != nbroadcast: return False @@ -141,14 +172,17 @@ def get_output_var_dist_attr(op, main_program, dist_context): def check_equal_var_dist_attr(serial_dist_attr, dist_attr): equal = True - if serial_dist_attr.process_mesh != dist_attr.process_mesh or \ - serial_dist_attr.dims_mapping != dist_attr.dims_mapping: + if ( + serial_dist_attr.process_mesh != dist_attr.process_mesh + or serial_dist_attr.dims_mapping != dist_attr.dims_mapping + ): equal = False return equal -def check_equal_dist_op_attr(dist_context, dist_main_prog, serial_op, dist_ops, - dist_op_idx): +def check_equal_dist_op_attr( + dist_context, dist_main_prog, serial_op, dist_ops, dist_op_idx +): equal = True # get serial op's process_mesh and impl_idx serial_op_dist_attr = dist_context.get_op_dist_attr_for_program(serial_op) @@ -161,34 +195,40 @@ def check_equal_dist_op_attr(dist_context, dist_main_prog, serial_op, dist_ops, for in_varname in dist_ops[i].desc.input_arg_names(): in_var = dist_main_prog.global_block().var(in_varname) tensor_dist_attr = dist_context.get_tensor_dist_attr_for_program( - in_var) + in_var + ) tensor_dims_mapping = tensor_dist_attr.dims_mapping in_var_dims_mapping = op_dist_attr.get_input_dims_mapping( - in_varname) + in_varname + ) if tensor_dims_mapping != in_var_dims_mapping: equal = False for out_varname in dist_ops[i].desc.output_arg_names(): out_var = dist_main_prog.global_block().var(out_varname) tensor_dist_attr = dist_context.get_tensor_dist_attr_for_program( - out_var) + out_var + ) tensor_dims_mapping = tensor_dist_attr.dims_mapping out_var_dims_mapping = op_dist_attr.get_output_dims_mapping( - out_varname) + out_varname + ) if tensor_dims_mapping != out_var_dims_mapping: equal = False dist_op_process_mesh = op_dist_attr.process_mesh dist_op_impl_idx = op_dist_attr.impl_idx - if serial_op.desc.id() == dist_ops[i].desc.id() or \ - serial_process_mesh != dist_op_process_mesh or \ - serial_impl_idx != dist_op_impl_idx: + if ( + serial_op.desc.id() == dist_ops[i].desc.id() + or serial_process_mesh != dist_op_process_mesh + or serial_impl_idx != dist_op_impl_idx + ): equal = False return equal -def distributed_attr_check_for_dist_op(serial_main_prog, dist_main_prog, - dist_context, serial_op_idx, - dist_op_idx): +def distributed_attr_check_for_dist_op( + serial_main_prog, dist_main_prog, dist_context, serial_op_idx, dist_op_idx +): equal = True serial_ops = serial_main_prog.global_block().ops @@ -200,27 +240,34 @@ def distributed_attr_check_for_dist_op(serial_main_prog, dist_main_prog, if dist_op_0.type == "c_identity": # serial op input's dist_attr serial_in_dist_attr = get_input_var_dist_attr( - serial_op, serial_main_prog, dist_context) + serial_op, serial_main_prog, dist_context + ) # c_identity output's(new var) dist_attr identity_out_dist_attr = get_output_var_dist_attr( - dist_op_0, dist_main_prog, dist_context) + dist_op_0, dist_main_prog, dist_context + ) # check var dist_attr - equal = check_equal_var_dist_attr(serial_in_dist_attr, - identity_out_dist_attr) + equal = check_equal_var_dist_attr( + serial_in_dist_attr, identity_out_dist_attr + ) else: # serial op output's dist_attr serial_out_dist_attr = get_output_var_dist_attr( - serial_op, serial_main_prog, dist_context) + serial_op, serial_main_prog, dist_context + ) # dist op output's(new var) dist_attr - out_dist_attr = get_output_var_dist_attr(dist_op_0, dist_main_prog, - dist_context) + out_dist_attr = get_output_var_dist_attr( + dist_op_0, dist_main_prog, dist_context + ) # check var dist_attr - equal = check_equal_var_dist_attr(serial_out_dist_attr, - out_dist_attr) + equal = check_equal_var_dist_attr( + serial_out_dist_attr, out_dist_attr + ) # check op's dist_attr - equal = check_equal_dist_op_attr(dist_context, dist_main_prog, - serial_op, dist_ops, dist_op_idx[i]) + equal = check_equal_dist_op_attr( + dist_context, dist_main_prog, serial_op, dist_ops, dist_op_idx[i] + ) return equal @@ -242,45 +289,53 @@ def distributed_attr_check_for_program(dist_main_prog, dist_context): class MLPLayer(nn.Layer): - - def __init__(self, - hidden_size=1024, - intermediate_size=4 * 1024, - dropout_ratio=0.1, - initializer_range=0.02): + def __init__( + self, + hidden_size=1024, + intermediate_size=4 * 1024, + dropout_ratio=0.1, + initializer_range=0.02, + ): super(MLPLayer, self).__init__() d_model = hidden_size dim_feedforward = intermediate_size weight_attr = paddle.ParamAttr( - initializer=nn.initializer.Normal(mean=0.0, std=initializer_range)) + initializer=nn.initializer.Normal(mean=0.0, std=initializer_range) + ) bias_attr = None - self.linear0 = nn.Linear(d_model, - dim_feedforward, - weight_attr, - bias_attr=bias_attr) - self.linear1 = nn.Linear(dim_feedforward, - d_model, - weight_attr, - bias_attr=bias_attr) + self.linear0 = nn.Linear( + d_model, dim_feedforward, weight_attr, bias_attr=bias_attr + ) + self.linear1 = nn.Linear( + dim_feedforward, d_model, weight_attr, bias_attr=bias_attr + ) self.norm = nn.LayerNorm(d_model, epsilon=1e-5) self.dropout = nn.Dropout(dropout_ratio, mode="upscale_in_train") def forward(self, input): if _global_parallel_strategy in ["mp", "dp_mp"]: - auto.shard_tensor(self.linear0.weight, - process_mesh=_global_process_mesh, - shard_spec=[None, "mp"]) - auto.shard_tensor(self.linear1.weight, - process_mesh=_global_process_mesh, - shard_spec=["mp", None]) + auto.shard_tensor( + self.linear0.weight, + process_mesh=_global_process_mesh, + shard_spec=[None, "mp"], + ) + auto.shard_tensor( + self.linear1.weight, + process_mesh=_global_process_mesh, + shard_spec=["mp", None], + ) else: - auto.shard_tensor(self.linear0.weight, - process_mesh=_global_process_mesh, - shard_spec=[None, None]) - auto.shard_tensor(self.linear1.weight, - process_mesh=_global_process_mesh, - shard_spec=[None, None]) + auto.shard_tensor( + self.linear0.weight, + process_mesh=_global_process_mesh, + shard_spec=[None, None], + ) + auto.shard_tensor( + self.linear1.weight, + process_mesh=_global_process_mesh, + shard_spec=[None, None], + ) out = self.norm(input) out = self.linear0(out) @@ -292,46 +347,61 @@ class MLPLayer(nn.Layer): def mlp_pretrain_forward(train_program, start_program): - with static.program_guard(train_program, - start_program), utils.unique_name.guard(): + with static.program_guard( + train_program, start_program + ), utils.unique_name.guard(): batch_size = 4 hidden_size = 1024 sequence_len = 512 - input = static.data(name="input", - shape=[batch_size, sequence_len, hidden_size], - dtype='float32') + input = static.data( + name="input", + shape=[batch_size, sequence_len, hidden_size], + dtype='float32', + ) if _global_parallel_strategy in ["dp", "dp_mp"]: - auto.shard_tensor(input, - process_mesh=_global_process_mesh, - shard_spec=["dp", None, None]) - - mlp = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - dropout_ratio=0.1, - initializer_range=0.02) + auto.shard_tensor( + input, + process_mesh=_global_process_mesh, + shard_spec=["dp", None, None], + ) + + mlp = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + dropout_ratio=0.1, + initializer_range=0.02, + ) out = mlp(input) return train_program, start_program class TestMLPAutoPartitioner(unittest.TestCase): - def test_mlp_dp(self): global _global_parallel_strategy _global_parallel_strategy = "dp" global _global_process_mesh - _global_process_mesh = auto.ProcessMesh(mesh=[0, 1, 2, 3], - dim_names=["dp"]) - - serial_main_prog, serial_startup_prog, dist_main_prog, dist_startup_prog, dist_context = get_programs( - mlp_pretrain_forward) + _global_process_mesh = auto.ProcessMesh( + mesh=[0, 1, 2, 3], dim_names=["dp"] + ) + + ( + serial_main_prog, + serial_startup_prog, + dist_main_prog, + dist_startup_prog, + dist_context, + ) = get_programs(mlp_pretrain_forward) # parameter should not be partitioned self.assertTrue( - is_all_parameters_shape_equal(serial_main_prog, dist_main_prog)) + is_all_parameters_shape_equal(serial_main_prog, dist_main_prog) + ) self.assertTrue( - is_all_parameters_shape_equal(serial_startup_prog, - dist_startup_prog)) + is_all_parameters_shape_equal( + serial_startup_prog, dist_startup_prog + ) + ) # op in main prog should be the same serial_ops = serial_main_prog.global_block().ops @@ -343,152 +413,217 @@ class TestMLPAutoPartitioner(unittest.TestCase): # parameter initialization var_need_broadcast = [] self.assertTrue( - initialization_check(_global_parallel_strategy, - dist_context, - dist_startup_prog, - serial_startup_prog, - var_need_broadcast, - _global_process_mesh, - mp_parallel_axis=None, - dp_parallel_axis=0)) + initialization_check( + _global_parallel_strategy, + dist_context, + dist_startup_prog, + serial_startup_prog, + var_need_broadcast, + _global_process_mesh, + mp_parallel_axis=None, + dp_parallel_axis=0, + ) + ) def test_mlp_mp(self): global _global_parallel_strategy _global_parallel_strategy = "mp" global _global_process_mesh - _global_process_mesh = auto.ProcessMesh(mesh=[0, 1, 2, 3], - dim_names=["mp"]) - serial_main_prog, serial_startup_prog, dist_main_prog, dist_startup_prog, dist_context = get_programs( - mlp_pretrain_forward) + _global_process_mesh = auto.ProcessMesh( + mesh=[0, 1, 2, 3], dim_names=["mp"] + ) + ( + serial_main_prog, + serial_startup_prog, + dist_main_prog, + dist_startup_prog, + dist_context, + ) = get_programs(mlp_pretrain_forward) # param should be partition nrank = 4 # col parallel weights = ['linear_0.w_0'] self.assertTrue( - check_tensor_split(dist_main_prog, weights, serial_main_prog, - weights, 1, nrank)) + check_tensor_split( + dist_main_prog, weights, serial_main_prog, weights, 1, nrank + ) + ) weights = ['linear_0.b_0'] self.assertTrue( - check_tensor_split(dist_main_prog, weights, serial_main_prog, - weights, 0, nrank)) + check_tensor_split( + dist_main_prog, weights, serial_main_prog, weights, 0, nrank + ) + ) # row parallel weights = ['linear_1.w_0'] self.assertTrue( - check_tensor_split(dist_main_prog, weights, serial_main_prog, - weights, 0, nrank)) + check_tensor_split( + dist_main_prog, weights, serial_main_prog, weights, 0, nrank + ) + ) weights = ['linear_1.b_0'] self.assertTrue( - check_tensor_split(dist_main_prog, weights, serial_main_prog, - weights, 0, 1)) + check_tensor_split( + dist_main_prog, weights, serial_main_prog, weights, 0, 1 + ) + ) # row and col allreduce dist_ops = dist_main_prog.global_block().ops dist_ops = [op.type for op in dist_ops] ref_ops = [ - 'layer_norm', 'c_identity', 'matmul_v2', 'elementwise_add', 'gelu', - 'matmul_v2', 'c_allreduce_sum', 'elementwise_add', 'dropout' + 'layer_norm', + 'c_identity', + 'matmul_v2', + 'elementwise_add', + 'gelu', + 'matmul_v2', + 'c_allreduce_sum', + 'elementwise_add', + 'dropout', ] self.assertTrue(dist_ops == ref_ops) # parameter initialization var_need_broadcast = sorted( - ['layer_norm_0.b_0', 'layer_norm_0.w_0', 'linear_1.b_0']) + ['layer_norm_0.b_0', 'layer_norm_0.w_0', 'linear_1.b_0'] + ) self.assertTrue( - initialization_check(_global_parallel_strategy, - dist_context, - dist_startup_prog, - serial_startup_prog, - var_need_broadcast, - _global_process_mesh, - mp_parallel_axis=0, - dp_parallel_axis=None)) + initialization_check( + _global_parallel_strategy, + dist_context, + dist_startup_prog, + serial_startup_prog, + var_need_broadcast, + _global_process_mesh, + mp_parallel_axis=0, + dp_parallel_axis=None, + ) + ) # check var and op all have dist_attr in dist_main_program self.assertTrue( - distributed_attr_check_for_program(dist_main_prog, dist_context)) + distributed_attr_check_for_program(dist_main_prog, dist_context) + ) # check distribured attr for dist op serial_op_idx = [1, 4] dist_op_idx = [[1, 2], [5, 6]] self.assertTrue( - distributed_attr_check_for_dist_op(serial_main_prog, dist_main_prog, - dist_context, serial_op_idx, - dist_op_idx)) + distributed_attr_check_for_dist_op( + serial_main_prog, + dist_main_prog, + dist_context, + serial_op_idx, + dist_op_idx, + ) + ) def test_mlp_dp_mp(self): global _global_parallel_strategy _global_parallel_strategy = "dp_mp" global _global_process_mesh - _global_process_mesh = auto.ProcessMesh(mesh=[[0, 1, 2, 3], - [4, 5, 6, 7]], - dim_names=["dp", "mp"]) - serial_main_prog, serial_startup_prog, dist_main_prog, dist_startup_prog, dist_context = get_programs( - mlp_pretrain_forward) + _global_process_mesh = auto.ProcessMesh( + mesh=[[0, 1, 2, 3], [4, 5, 6, 7]], dim_names=["dp", "mp"] + ) + ( + serial_main_prog, + serial_startup_prog, + dist_main_prog, + dist_startup_prog, + dist_context, + ) = get_programs(mlp_pretrain_forward) # param should be partition nrank = 4 # col parallel weights = ['linear_0.w_0'] self.assertTrue( - check_tensor_split(dist_main_prog, weights, serial_main_prog, - weights, 1, nrank)) + check_tensor_split( + dist_main_prog, weights, serial_main_prog, weights, 1, nrank + ) + ) weights = ['linear_0.b_0'] self.assertTrue( - check_tensor_split(dist_main_prog, weights, serial_main_prog, - weights, 0, nrank)) + check_tensor_split( + dist_main_prog, weights, serial_main_prog, weights, 0, nrank + ) + ) # row parallel weights = ['linear_1.w_0'] self.assertTrue( - check_tensor_split(dist_main_prog, weights, serial_main_prog, - weights, 0, nrank)) + check_tensor_split( + dist_main_prog, weights, serial_main_prog, weights, 0, nrank + ) + ) weights = ['linear_1.b_0'] self.assertTrue( - check_tensor_split(dist_main_prog, weights, serial_main_prog, - weights, 0, 1)) + check_tensor_split( + dist_main_prog, weights, serial_main_prog, weights, 0, 1 + ) + ) # row and col allreduce dist_ops = dist_main_prog.global_block().ops dist_ops = [op.type for op in dist_ops] ref_ops = [ - 'layer_norm', 'c_identity', 'matmul_v2', 'elementwise_add', 'gelu', - 'matmul_v2', 'c_allreduce_sum', 'elementwise_add', 'dropout' + 'layer_norm', + 'c_identity', + 'matmul_v2', + 'elementwise_add', + 'gelu', + 'matmul_v2', + 'c_allreduce_sum', + 'elementwise_add', + 'dropout', ] self.assertTrue(dist_ops == ref_ops) # parameter initialization var_need_broadcast = sorted( - ['layer_norm_0.b_0', 'layer_norm_0.w_0', 'linear_1.b_0']) + ['layer_norm_0.b_0', 'layer_norm_0.w_0', 'linear_1.b_0'] + ) self.assertTrue( - initialization_check(_global_parallel_strategy, - dist_context, - dist_startup_prog, - serial_startup_prog, - var_need_broadcast, - _global_process_mesh, - mp_parallel_axis=1, - dp_parallel_axis=0)) + initialization_check( + _global_parallel_strategy, + dist_context, + dist_startup_prog, + serial_startup_prog, + var_need_broadcast, + _global_process_mesh, + mp_parallel_axis=1, + dp_parallel_axis=0, + ) + ) # check var and op all have dist_attr in dist_main_program self.assertTrue( - distributed_attr_check_for_program(dist_main_prog, dist_context)) + distributed_attr_check_for_program(dist_main_prog, dist_context) + ) # check distribured attr for dist op serial_op_idx = [1, 4] dist_op_idx = [[1, 2], [5, 6]] self.assertTrue( - distributed_attr_check_for_dist_op(serial_main_prog, dist_main_prog, - dist_context, serial_op_idx, - dist_op_idx)) + distributed_attr_check_for_dist_op( + serial_main_prog, + dist_main_prog, + dist_context, + serial_op_idx, + dist_op_idx, + ) + ) class AttentionLayer(nn.Layer): - - def __init__(self, - hidden_size=1024, - sequence_len=512, - intermediate_size=4 * 1024, - num_heads=16, - dropout_ratio=0.1, - initializer_range=0.02): + def __init__( + self, + hidden_size=1024, + sequence_len=512, + intermediate_size=4 * 1024, + num_heads=16, + dropout_ratio=0.1, + initializer_range=0.02, + ): super(AttentionLayer, self).__init__() self.hidden_size = hidden_size self.sequence_len = sequence_len @@ -497,38 +632,38 @@ class AttentionLayer(nn.Layer): self.vdim = self.embed_dim self.num_heads = num_heads self.head_dim = self.embed_dim // self.num_heads - assert self.head_dim * self.num_heads == self.embed_dim, \ - "embed_dim must be divisible by num_heads" + assert ( + self.head_dim * self.num_heads == self.embed_dim + ), "embed_dim must be divisible by num_heads" self.dropout_ratio = dropout_ratio self.initializer_range = initializer_range self.training = True self.attn_mask = None weight_attr = paddle.ParamAttr( - initializer=nn.initializer.Normal(mean=0.0, std=initializer_range)) + initializer=nn.initializer.Normal(mean=0.0, std=initializer_range) + ) bias_attr = None - self.q_proj = nn.Linear(self.embed_dim, - self.embed_dim, - weight_attr, - bias_attr=bias_attr) - self.k_proj = nn.Linear(self.kdim, - self.embed_dim, - weight_attr, - bias_attr=bias_attr) - self.v_proj = nn.Linear(self.vdim, - self.embed_dim, - weight_attr, - bias_attr=bias_attr) - self.out_proj = nn.Linear(self.embed_dim, - self.embed_dim, - weight_attr, - bias_attr=bias_attr) + self.q_proj = nn.Linear( + self.embed_dim, self.embed_dim, weight_attr, bias_attr=bias_attr + ) + self.k_proj = nn.Linear( + self.kdim, self.embed_dim, weight_attr, bias_attr=bias_attr + ) + self.v_proj = nn.Linear( + self.vdim, self.embed_dim, weight_attr, bias_attr=bias_attr + ) + self.out_proj = nn.Linear( + self.embed_dim, self.embed_dim, weight_attr, bias_attr=bias_attr + ) def forward(self, input): if _global_parallel_strategy in ["dp", "dp_mp"]: - auto.shard_tensor(input, - process_mesh=_global_process_mesh, - shard_spec=["dp", None, None]) + auto.shard_tensor( + input, + process_mesh=_global_process_mesh, + shard_spec=["dp", None, None], + ) q = self.q_proj(input) q = tensor.reshape(x=q, shape=[0, 0, self.num_heads, self.head_dim]) @@ -538,15 +673,21 @@ class AttentionLayer(nn.Layer): v = self.v_proj(input) if _global_parallel_strategy in ["mp", "dp_mp"]: - auto.shard_tensor(self.q_proj.weight, - process_mesh=_global_process_mesh, - shard_spec=[None, "mp"]) - auto.shard_tensor(self.k_proj.weight, - process_mesh=_global_process_mesh, - shard_spec=[None, "mp"]) - auto.shard_tensor(self.v_proj.weight, - process_mesh=_global_process_mesh, - shard_spec=[None, "mp"]) + auto.shard_tensor( + self.q_proj.weight, + process_mesh=_global_process_mesh, + shard_spec=[None, "mp"], + ) + auto.shard_tensor( + self.k_proj.weight, + process_mesh=_global_process_mesh, + shard_spec=[None, "mp"], + ) + auto.shard_tensor( + self.v_proj.weight, + process_mesh=_global_process_mesh, + shard_spec=[None, "mp"], + ) k = tensor.reshape(x=k, shape=[0, 0, self.num_heads, self.head_dim]) k = tensor.transpose(x=k, perm=[0, 2, 1, 3]) @@ -554,10 +695,9 @@ class AttentionLayer(nn.Layer): v = tensor.transpose(x=v, perm=[0, 2, 1, 3]) # scale dot product attention - product = layers.matmul(x=q, - y=k, - transpose_y=True, - alpha=self.head_dim**-0.5) + product = layers.matmul( + x=q, y=k, transpose_y=True, alpha=self.head_dim**-0.5 + ) if self.attn_mask is not None: product = product + self.attn_mask @@ -565,10 +705,12 @@ class AttentionLayer(nn.Layer): weights = F.softmax(product) if self.dropout_ratio: - weights = F.dropout(weights, - self.dropout_ratio, - training=self.training, - mode="upscale_in_train") + weights = F.dropout( + weights, + self.dropout_ratio, + training=self.training, + mode="upscale_in_train", + ) out = tensor.matmul(weights, v) @@ -580,50 +722,65 @@ class AttentionLayer(nn.Layer): out = self.out_proj(out) if _global_parallel_strategy in ["mp", "dp_mp"]: - auto.shard_tensor(self.out_proj.weight, - process_mesh=_global_process_mesh, - shard_spec=["mp", None]) + auto.shard_tensor( + self.out_proj.weight, + process_mesh=_global_process_mesh, + shard_spec=["mp", None], + ) return out def attn_pretrain_forward(train_program, start_program): - with static.program_guard(train_program, - start_program), utils.unique_name.guard(): + with static.program_guard( + train_program, start_program + ), utils.unique_name.guard(): batch_size = 4 hidden_size = 1024 sequence_len = 512 - input = static.data(name="query", - shape=[batch_size, sequence_len, hidden_size], - dtype='float32') - attn = AttentionLayer(hidden_size=hidden_size, - sequence_len=sequence_len, - intermediate_size=4 * hidden_size, - num_heads=16, - dropout_ratio=0.1, - initializer_range=0.02) + input = static.data( + name="query", + shape=[batch_size, sequence_len, hidden_size], + dtype='float32', + ) + attn = AttentionLayer( + hidden_size=hidden_size, + sequence_len=sequence_len, + intermediate_size=4 * hidden_size, + num_heads=16, + dropout_ratio=0.1, + initializer_range=0.02, + ) out = attn(input) return train_program, start_program class TestAttentionAutoPartitioner(unittest.TestCase): - def test_attn_dp(self): global _global_parallel_strategy _global_parallel_strategy = "dp" global _global_process_mesh - _global_process_mesh = auto.ProcessMesh(mesh=[0, 1, 2, 3], - dim_names=["dp"]) - - serial_main_prog, serial_startup_prog, dist_main_prog, dist_startup_prog, dist_context = get_programs( - attn_pretrain_forward) + _global_process_mesh = auto.ProcessMesh( + mesh=[0, 1, 2, 3], dim_names=["dp"] + ) + + ( + serial_main_prog, + serial_startup_prog, + dist_main_prog, + dist_startup_prog, + dist_context, + ) = get_programs(attn_pretrain_forward) # parameter should not be partitioned self.assertTrue( - is_all_parameters_shape_equal(serial_main_prog, dist_main_prog)) + is_all_parameters_shape_equal(serial_main_prog, dist_main_prog) + ) self.assertTrue( - is_all_parameters_shape_equal(serial_startup_prog, - dist_startup_prog)) + is_all_parameters_shape_equal( + serial_startup_prog, dist_startup_prog + ) + ) # op in main prog should be the same serial_ops = serial_main_prog.global_block().ops @@ -635,162 +792,247 @@ class TestAttentionAutoPartitioner(unittest.TestCase): # parameter initialization var_need_broadcast = [] self.assertTrue( - initialization_check(_global_parallel_strategy, - dist_context, - dist_startup_prog, - serial_startup_prog, - var_need_broadcast, - _global_process_mesh, - mp_parallel_axis=None, - dp_parallel_axis=0)) + initialization_check( + _global_parallel_strategy, + dist_context, + dist_startup_prog, + serial_startup_prog, + var_need_broadcast, + _global_process_mesh, + mp_parallel_axis=None, + dp_parallel_axis=0, + ) + ) def test_attn_mp(self): global _global_parallel_strategy _global_parallel_strategy = "mp" global _global_process_mesh - _global_process_mesh = auto.ProcessMesh(mesh=[0, 1, 2, 3], - dim_names=["mp"]) - - serial_main_prog, serial_startup_prog, dist_main_prog, dist_startup_prog, dist_context = get_programs( - attn_pretrain_forward) + _global_process_mesh = auto.ProcessMesh( + mesh=[0, 1, 2, 3], dim_names=["mp"] + ) + + ( + serial_main_prog, + serial_startup_prog, + dist_main_prog, + dist_startup_prog, + dist_context, + ) = get_programs(attn_pretrain_forward) # param should be partition nrank = 4 # col parallel weights = ['linear_0.w_0', 'linear_1.w_0', 'linear_2.w_0'] self.assertTrue( - check_tensor_split(dist_main_prog, weights, serial_main_prog, - weights, 1, nrank)) + check_tensor_split( + dist_main_prog, weights, serial_main_prog, weights, 1, nrank + ) + ) weights = ['linear_0.b_0', 'linear_1.b_0', 'linear_2.b_0'] self.assertTrue( - check_tensor_split(dist_main_prog, weights, serial_main_prog, - weights, 0, nrank)) + check_tensor_split( + dist_main_prog, weights, serial_main_prog, weights, 0, nrank + ) + ) # row parallel weights = ['linear_3.w_0'] self.assertTrue( - check_tensor_split(dist_main_prog, weights, serial_main_prog, - weights, 0, nrank)) + check_tensor_split( + dist_main_prog, weights, serial_main_prog, weights, 0, nrank + ) + ) weights = ['linear_3.b_0'] self.assertTrue( - check_tensor_split(dist_main_prog, weights, serial_main_prog, - weights, 0, 1)) + check_tensor_split( + dist_main_prog, weights, serial_main_prog, weights, 0, 1 + ) + ) # row and col allreduce dist_ops = dist_main_prog.global_block().ops dist_ops = [op.type for op in dist_ops] ref_ops = [ - 'c_identity', 'matmul_v2', 'elementwise_add', 'reshape2', - 'transpose2', 'c_identity', 'matmul_v2', 'elementwise_add', - 'c_identity', 'matmul_v2', 'elementwise_add', 'reshape2', - 'transpose2', 'reshape2', 'transpose2', 'matmul', 'softmax', - 'dropout', 'matmul_v2', 'transpose2', 'reshape2', 'matmul_v2', - 'c_allreduce_sum', 'elementwise_add' + 'c_identity', + 'matmul_v2', + 'elementwise_add', + 'reshape2', + 'transpose2', + 'c_identity', + 'matmul_v2', + 'elementwise_add', + 'c_identity', + 'matmul_v2', + 'elementwise_add', + 'reshape2', + 'transpose2', + 'reshape2', + 'transpose2', + 'matmul', + 'softmax', + 'dropout', + 'matmul_v2', + 'transpose2', + 'reshape2', + 'matmul_v2', + 'c_allreduce_sum', + 'elementwise_add', ] self.assertTrue(dist_ops == ref_ops) # parameter initialization var_need_broadcast = ['linear_3.b_0'] self.assertTrue( - initialization_check(_global_parallel_strategy, - dist_context, - dist_startup_prog, - serial_startup_prog, - var_need_broadcast, - _global_process_mesh, - mp_parallel_axis=0, - dp_parallel_axis=None)) + initialization_check( + _global_parallel_strategy, + dist_context, + dist_startup_prog, + serial_startup_prog, + var_need_broadcast, + _global_process_mesh, + mp_parallel_axis=0, + dp_parallel_axis=None, + ) + ) # check var and op all have dist_attr in dist_main_program self.assertTrue( - distributed_attr_check_for_program(dist_main_prog, dist_context)) + distributed_attr_check_for_program(dist_main_prog, dist_context) + ) # check distribured attr for dist op serial_op_idx = [0, 4, 6, 18] dist_op_idx = [[0, 1], [5, 6], [8, 9], [21, 22]] self.assertTrue( - distributed_attr_check_for_dist_op(serial_main_prog, dist_main_prog, - dist_context, serial_op_idx, - dist_op_idx)) + distributed_attr_check_for_dist_op( + serial_main_prog, + dist_main_prog, + dist_context, + serial_op_idx, + dist_op_idx, + ) + ) def test_attn_dp_mp(self): global _global_parallel_strategy _global_parallel_strategy = "dp_mp" global _global_process_mesh - _global_process_mesh = auto.ProcessMesh(mesh=[[0, 1, 2, 3], - [4, 5, 6, 7]], - dim_names=["dp", "mp"]) - - serial_main_prog, serial_startup_prog, dist_main_prog, dist_startup_prog, dist_context = get_programs( - attn_pretrain_forward) + _global_process_mesh = auto.ProcessMesh( + mesh=[[0, 1, 2, 3], [4, 5, 6, 7]], dim_names=["dp", "mp"] + ) + + ( + serial_main_prog, + serial_startup_prog, + dist_main_prog, + dist_startup_prog, + dist_context, + ) = get_programs(attn_pretrain_forward) # param should be partition nrank = 4 # col parallel weights = ['linear_0.w_0', 'linear_1.w_0', 'linear_2.w_0'] self.assertTrue( - check_tensor_split(dist_main_prog, weights, serial_main_prog, - weights, 1, nrank)) + check_tensor_split( + dist_main_prog, weights, serial_main_prog, weights, 1, nrank + ) + ) weights = ['linear_0.b_0', 'linear_1.b_0', 'linear_2.b_0'] self.assertTrue( - check_tensor_split(dist_main_prog, weights, serial_main_prog, - weights, 0, nrank)) + check_tensor_split( + dist_main_prog, weights, serial_main_prog, weights, 0, nrank + ) + ) # row parallel weights = ['linear_3.w_0'] self.assertTrue( - check_tensor_split(dist_main_prog, weights, serial_main_prog, - weights, 0, nrank)) + check_tensor_split( + dist_main_prog, weights, serial_main_prog, weights, 0, nrank + ) + ) weights = ['linear_3.b_0'] self.assertTrue( - check_tensor_split(dist_main_prog, weights, serial_main_prog, - weights, 0, 1)) + check_tensor_split( + dist_main_prog, weights, serial_main_prog, weights, 0, 1 + ) + ) # row and col allreduce dist_ops = dist_main_prog.global_block().ops dist_ops = [op.type for op in dist_ops] ref_ops = [ - 'c_identity', 'matmul_v2', 'elementwise_add', 'reshape2', - 'transpose2', 'c_identity', 'matmul_v2', 'elementwise_add', - 'c_identity', 'matmul_v2', 'elementwise_add', 'reshape2', - 'transpose2', 'reshape2', 'transpose2', 'matmul', 'softmax', - 'dropout', 'matmul_v2', 'transpose2', 'reshape2', 'matmul_v2', - 'c_allreduce_sum', 'elementwise_add' + 'c_identity', + 'matmul_v2', + 'elementwise_add', + 'reshape2', + 'transpose2', + 'c_identity', + 'matmul_v2', + 'elementwise_add', + 'c_identity', + 'matmul_v2', + 'elementwise_add', + 'reshape2', + 'transpose2', + 'reshape2', + 'transpose2', + 'matmul', + 'softmax', + 'dropout', + 'matmul_v2', + 'transpose2', + 'reshape2', + 'matmul_v2', + 'c_allreduce_sum', + 'elementwise_add', ] self.assertTrue(dist_ops == ref_ops) # parameter initialization var_need_broadcast = ['linear_3.b_0'] self.assertTrue( - initialization_check(_global_parallel_strategy, - dist_context, - dist_startup_prog, - serial_startup_prog, - var_need_broadcast, - _global_process_mesh, - mp_parallel_axis=1, - dp_parallel_axis=0)) + initialization_check( + _global_parallel_strategy, + dist_context, + dist_startup_prog, + serial_startup_prog, + var_need_broadcast, + _global_process_mesh, + mp_parallel_axis=1, + dp_parallel_axis=0, + ) + ) # check var and op all have dist_attr in dist_main_program self.assertTrue( - distributed_attr_check_for_program(dist_main_prog, dist_context)) + distributed_attr_check_for_program(dist_main_prog, dist_context) + ) # check distribured attr for dist op serial_op_idx = [0, 4, 6, 18] dist_op_idx = [[0, 1], [5, 6], [8, 9], [21, 22]] self.assertTrue( - distributed_attr_check_for_dist_op(serial_main_prog, dist_main_prog, - dist_context, serial_op_idx, - dist_op_idx)) + distributed_attr_check_for_dist_op( + serial_main_prog, + dist_main_prog, + dist_context, + serial_op_idx, + dist_op_idx, + ) + ) class DecoderLayer(nn.Layer): - - def __init__(self, - vocab_size=32768, - hidden_size=1024, - sequence_len=512, - max_position_embeddings=512, - intermediate_size=4 * 1024, - num_heads=16, - dropout_ratio=0.1, - initializer_range=0.02): + def __init__( + self, + vocab_size=32768, + hidden_size=1024, + sequence_len=512, + max_position_embeddings=512, + intermediate_size=4 * 1024, + num_heads=16, + dropout_ratio=0.1, + initializer_range=0.02, + ): super(DecoderLayer, self).__init__() self.vocab_size = vocab_size self.hidden_size = hidden_size @@ -806,57 +1048,64 @@ class DecoderLayer(nn.Layer): self.attn_mask = None self.head_dim = self.embed_dim // self.num_heads - assert self.head_dim * self.num_heads == self.embed_dim, \ - "embed_dim must be divisible by num_heads" + assert ( + self.head_dim * self.num_heads == self.embed_dim + ), "embed_dim must be divisible by num_heads" self.word_embeddings = nn.Embedding( self.vocab_size, self.hidden_size, - weight_attr=paddle.ParamAttr(name="word_embeddings", - initializer=nn.initializer.Normal( - mean=0.0, - std=self.initializer_range))) + weight_attr=paddle.ParamAttr( + name="word_embeddings", + initializer=nn.initializer.Normal( + mean=0.0, std=self.initializer_range + ), + ), + ) self.position_embeddings = nn.Embedding( self.max_position_embeddings, self.hidden_size, - weight_attr=paddle.ParamAttr(name="pos_embeddings", - initializer=nn.initializer.Normal( - mean=0.0, - std=self.initializer_range))) + weight_attr=paddle.ParamAttr( + name="pos_embeddings", + initializer=nn.initializer.Normal( + mean=0.0, std=self.initializer_range + ), + ), + ) - weight_attr = paddle.ParamAttr(initializer=nn.initializer.Normal( - mean=0.0, std=self.initializer_range)) + weight_attr = paddle.ParamAttr( + initializer=nn.initializer.Normal( + mean=0.0, std=self.initializer_range + ) + ) bias_attr = None - self.q_proj = nn.Linear(self.embed_dim, - self.embed_dim, - weight_attr, - bias_attr=bias_attr) - self.k_proj = nn.Linear(self.kdim, - self.embed_dim, - weight_attr, - bias_attr=bias_attr) - self.v_proj = nn.Linear(self.vdim, - self.embed_dim, - weight_attr, - bias_attr=bias_attr) - self.out_proj = nn.Linear(self.embed_dim, - self.embed_dim, - weight_attr, - bias_attr=bias_attr) + self.q_proj = nn.Linear( + self.embed_dim, self.embed_dim, weight_attr, bias_attr=bias_attr + ) + self.k_proj = nn.Linear( + self.kdim, self.embed_dim, weight_attr, bias_attr=bias_attr + ) + self.v_proj = nn.Linear( + self.vdim, self.embed_dim, weight_attr, bias_attr=bias_attr + ) + self.out_proj = nn.Linear( + self.embed_dim, self.embed_dim, weight_attr, bias_attr=bias_attr + ) intermediate_size = 4 * self.hidden_size d_model = self.hidden_size dim_feedforward = intermediate_size - weight_attr = paddle.ParamAttr(initializer=nn.initializer.Normal( - mean=0.0, std=self.initializer_range)) + weight_attr = paddle.ParamAttr( + initializer=nn.initializer.Normal( + mean=0.0, std=self.initializer_range + ) + ) bias_attr = None - self.linear0 = nn.Linear(d_model, - dim_feedforward, - weight_attr, - bias_attr=bias_attr) - self.linear1 = nn.Linear(dim_feedforward, - d_model, - weight_attr, - bias_attr=bias_attr) + self.linear0 = nn.Linear( + d_model, dim_feedforward, weight_attr, bias_attr=bias_attr + ) + self.linear1 = nn.Linear( + dim_feedforward, d_model, weight_attr, bias_attr=bias_attr + ) self.norm = nn.LayerNorm(d_model, epsilon=1e-5) self.dropout1 = nn.Dropout(self.dropout_ratio) self.dropout2 = nn.Dropout(self.dropout_ratio, mode="upscale_in_train") @@ -864,17 +1113,21 @@ class DecoderLayer(nn.Layer): def forward(self, input_ids, position_ids): if _global_parallel_strategy in ["dp", "dp_mp"]: - auto.shard_tensor(input_ids, - process_mesh=_global_process_mesh, - shard_spec=["dp", None]) + auto.shard_tensor( + input_ids, + process_mesh=_global_process_mesh, + shard_spec=["dp", None], + ) input_embeddings = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) if _global_parallel_strategy in ["mp", "dp_mp"]: - auto.shard_tensor(self.word_embeddings.weight, - process_mesh=_global_process_mesh, - shard_spec=["mp", None]) + auto.shard_tensor( + self.word_embeddings.weight, + process_mesh=_global_process_mesh, + shard_spec=["mp", None], + ) embeddings = input_embeddings + position_embeddings embeddings = self.dropout1(embeddings) @@ -891,15 +1144,21 @@ class DecoderLayer(nn.Layer): v = self.v_proj(target) if _global_parallel_strategy in ["mp", "dp_mp"]: - auto.shard_tensor(self.q_proj.weight, - process_mesh=_global_process_mesh, - shard_spec=[None, "mp"]) - auto.shard_tensor(self.k_proj.weight, - process_mesh=_global_process_mesh, - shard_spec=[None, "mp"]) - auto.shard_tensor(self.v_proj.weight, - process_mesh=_global_process_mesh, - shard_spec=[None, "mp"]) + auto.shard_tensor( + self.q_proj.weight, + process_mesh=_global_process_mesh, + shard_spec=[None, "mp"], + ) + auto.shard_tensor( + self.k_proj.weight, + process_mesh=_global_process_mesh, + shard_spec=[None, "mp"], + ) + auto.shard_tensor( + self.v_proj.weight, + process_mesh=_global_process_mesh, + shard_spec=[None, "mp"], + ) k = tensor.reshape(x=k, shape=[0, 0, self.num_heads, self.head_dim]) k = tensor.transpose(x=k, perm=[0, 2, 1, 3]) @@ -907,10 +1166,9 @@ class DecoderLayer(nn.Layer): v = tensor.transpose(x=v, perm=[0, 2, 1, 3]) # scale dot product attention - product = layers.matmul(x=q, - y=k, - transpose_y=True, - alpha=self.head_dim**-0.5) + product = layers.matmul( + x=q, y=k, transpose_y=True, alpha=self.head_dim**-0.5 + ) if self.attn_mask is not None: product = product + self.attn_mask @@ -918,10 +1176,12 @@ class DecoderLayer(nn.Layer): weights = F.softmax(product) if self.dropout_ratio: - weights = F.dropout(weights, - self.dropout_ratio, - training=self.training, - mode="upscale_in_train") + weights = F.dropout( + weights, + self.dropout_ratio, + training=self.training, + mode="upscale_in_train", + ) out = tensor.matmul(weights, v) @@ -933,13 +1193,17 @@ class DecoderLayer(nn.Layer): out = self.out_proj(out) if _global_parallel_strategy in ["mp", "dp_mp"]: - auto.shard_tensor(self.out_proj.weight, - process_mesh=_global_process_mesh, - shard_spec=["mp", None]) + auto.shard_tensor( + self.out_proj.weight, + process_mesh=_global_process_mesh, + shard_spec=["mp", None], + ) else: - auto.shard_tensor(self.out_proj.weight, - process_mesh=_global_process_mesh, - shard_spec=[None, None]) + auto.shard_tensor( + self.out_proj.weight, + process_mesh=_global_process_mesh, + shard_spec=[None, None], + ) # Add residual residual = embeddings + self.dropout2(out) @@ -953,12 +1217,16 @@ class DecoderLayer(nn.Layer): out3 = self.linear1(out2) if _global_parallel_strategy in ["mp", "dp_mp"]: - auto.shard_tensor(self.linear0.weight, - process_mesh=_global_process_mesh, - shard_spec=[None, "mp"]) - auto.shard_tensor(self.linear1.weight, - process_mesh=_global_process_mesh, - shard_spec=["mp", None]) + auto.shard_tensor( + self.linear0.weight, + process_mesh=_global_process_mesh, + shard_spec=[None, "mp"], + ) + auto.shard_tensor( + self.linear1.weight, + process_mesh=_global_process_mesh, + shard_spec=["mp", None], + ) # Add residual final = residual + self.dropout3(out3) @@ -966,184 +1234,343 @@ class DecoderLayer(nn.Layer): def decoder_pretrain_forward(train_program, start_program): - with static.program_guard(train_program, - start_program), utils.unique_name.guard(): + with static.program_guard( + train_program, start_program + ), utils.unique_name.guard(): batch_size = 4 hidden_size = 1024 sequence_len = 512 - input_ids = static.data(name="input_ids", - shape=[batch_size, sequence_len], - dtype='int64') - position_ids = static.data(name="position_ids", - shape=[batch_size, sequence_len], - dtype='int64') - decoder = DecoderLayer(vocab_size=32768, - hidden_size=hidden_size, - sequence_len=sequence_len, - max_position_embeddings=512, - intermediate_size=4 * hidden_size, - num_heads=16, - dropout_ratio=0.1, - initializer_range=0.02) + input_ids = static.data( + name="input_ids", shape=[batch_size, sequence_len], dtype='int64' + ) + position_ids = static.data( + name="position_ids", shape=[batch_size, sequence_len], dtype='int64' + ) + decoder = DecoderLayer( + vocab_size=32768, + hidden_size=hidden_size, + sequence_len=sequence_len, + max_position_embeddings=512, + intermediate_size=4 * hidden_size, + num_heads=16, + dropout_ratio=0.1, + initializer_range=0.02, + ) out = decoder(input_ids, position_ids) return train_program, start_program class TestDecoderLayerPartitioner(unittest.TestCase): - def test_decoder_dp_mp(self): global _global_parallel_strategy _global_parallel_strategy = "dp_mp" global _global_process_mesh - _global_process_mesh = auto.ProcessMesh(mesh=[[0, 1, 2, 3], - [4, 5, 6, 7]], - dim_names=["dp", "mp"]) - serial_main_prog, serial_startup_prog, dist_main_prog, dist_startup_prog, dist_context = get_programs( - decoder_pretrain_forward) + _global_process_mesh = auto.ProcessMesh( + mesh=[[0, 1, 2, 3], [4, 5, 6, 7]], dim_names=["dp", "mp"] + ) + ( + serial_main_prog, + serial_startup_prog, + dist_main_prog, + dist_startup_prog, + dist_context, + ) = get_programs(decoder_pretrain_forward) # param should be partition nrank = 4 # col parallel weights = [ - 'linear_0.w_0', 'linear_1.w_0', 'linear_2.w_0', 'linear_4.w_0' + 'linear_0.w_0', + 'linear_1.w_0', + 'linear_2.w_0', + 'linear_4.w_0', ] self.assertTrue( - check_tensor_split(dist_main_prog, weights, serial_main_prog, - weights, 1, nrank)) + check_tensor_split( + dist_main_prog, weights, serial_main_prog, weights, 1, nrank + ) + ) weights = [ - 'linear_0.b_0', 'linear_1.b_0', 'linear_2.b_0', 'linear_4.b_0' + 'linear_0.b_0', + 'linear_1.b_0', + 'linear_2.b_0', + 'linear_4.b_0', ] self.assertTrue( - check_tensor_split(dist_main_prog, weights, serial_main_prog, - weights, 0, nrank)) + check_tensor_split( + dist_main_prog, weights, serial_main_prog, weights, 0, nrank + ) + ) # row parallel weights = ['word_embeddings', 'linear_3.w_0', 'linear_5.w_0'] self.assertTrue( - check_tensor_split(dist_main_prog, weights, serial_main_prog, - weights, 0, nrank)) + check_tensor_split( + dist_main_prog, weights, serial_main_prog, weights, 0, nrank + ) + ) weights = [ - 'linear_3.b_0', 'pos_embeddings', 'layer_norm_0.b_0', - 'layer_norm_0.w_0', 'linear_5.b_0' + 'linear_3.b_0', + 'pos_embeddings', + 'layer_norm_0.b_0', + 'layer_norm_0.w_0', + 'linear_5.b_0', ] self.assertTrue( - check_tensor_split(dist_main_prog, weights, serial_main_prog, - weights, 0, 1)) + check_tensor_split( + dist_main_prog, weights, serial_main_prog, weights, 0, 1 + ) + ) # row and col allreduce dist_ops = dist_main_prog.global_block().ops dist_ops = [op.type for op in dist_ops] ref_ops = [ - 'c_embedding', 'c_allreduce_sum', 'lookup_table_v2', - 'elementwise_add', 'dropout', 'layer_norm', 'c_identity', - 'matmul_v2', 'elementwise_add', 'reshape2', 'transpose2', - 'c_identity', 'matmul_v2', 'elementwise_add', 'c_identity', - 'matmul_v2', 'elementwise_add', 'reshape2', 'transpose2', - 'reshape2', 'transpose2', 'matmul', 'softmax', 'dropout', - 'matmul_v2', 'transpose2', 'reshape2', 'matmul_v2', - 'c_allreduce_sum', 'elementwise_add', 'dropout', 'elementwise_add', - 'layer_norm', 'c_identity', 'matmul_v2', 'elementwise_add', 'gelu', - 'matmul_v2', 'c_allreduce_sum', 'elementwise_add', 'dropout', - 'elementwise_add' + 'c_embedding', + 'c_allreduce_sum', + 'lookup_table_v2', + 'elementwise_add', + 'dropout', + 'layer_norm', + 'c_identity', + 'matmul_v2', + 'elementwise_add', + 'reshape2', + 'transpose2', + 'c_identity', + 'matmul_v2', + 'elementwise_add', + 'c_identity', + 'matmul_v2', + 'elementwise_add', + 'reshape2', + 'transpose2', + 'reshape2', + 'transpose2', + 'matmul', + 'softmax', + 'dropout', + 'matmul_v2', + 'transpose2', + 'reshape2', + 'matmul_v2', + 'c_allreduce_sum', + 'elementwise_add', + 'dropout', + 'elementwise_add', + 'layer_norm', + 'c_identity', + 'matmul_v2', + 'elementwise_add', + 'gelu', + 'matmul_v2', + 'c_allreduce_sum', + 'elementwise_add', + 'dropout', + 'elementwise_add', ] self.assertTrue(dist_ops == ref_ops) # parameter initialization - var_need_broadcast = sorted([ - 'linear_3.b_0', 'pos_embeddings', 'layer_norm_0.b_0', - 'layer_norm_0.w_0', 'linear_5.b_0' - ]) + var_need_broadcast = sorted( + [ + 'linear_3.b_0', + 'pos_embeddings', + 'layer_norm_0.b_0', + 'layer_norm_0.w_0', + 'linear_5.b_0', + ] + ) self.assertTrue( - initialization_check(_global_parallel_strategy, - dist_context, - dist_startup_prog, - serial_startup_prog, - var_need_broadcast, - _global_process_mesh, - mp_parallel_axis=1, - dp_parallel_axis=0)) + initialization_check( + _global_parallel_strategy, + dist_context, + dist_startup_prog, + serial_startup_prog, + var_need_broadcast, + _global_process_mesh, + mp_parallel_axis=1, + dp_parallel_axis=0, + ) + ) # check var and op all have dist_attr in dist_main_program self.assertTrue( - distributed_attr_check_for_program(dist_main_prog, dist_context)) + distributed_attr_check_for_program(dist_main_prog, dist_context) + ) # check distribured attr serial_op_idx = [0, 5, 9, 11, 23, 28, 31] - dist_op_idx = [[0, 1], [6, 7], [11, 12], [14, 15], [27, 28], [33, 34], - [37, 38]] + dist_op_idx = [ + [0, 1], + [6, 7], + [11, 12], + [14, 15], + [27, 28], + [33, 34], + [37, 38], + ] self.assertTrue( - distributed_attr_check_for_dist_op(serial_main_prog, dist_main_prog, - dist_context, serial_op_idx, - dist_op_idx)) + distributed_attr_check_for_dist_op( + serial_main_prog, + dist_main_prog, + dist_context, + serial_op_idx, + dist_op_idx, + ) + ) def test_decoder_noparallel(self): global _global_parallel_strategy _global_parallel_strategy = "None" global _global_process_mesh - _global_process_mesh = auto.ProcessMesh(mesh=[[0, 1, 2, 3], - [4, 5, 6, 7]], - dim_names=["x", "y"]) - serial_main_prog, serial_startup_prog, dist_main_prog, dist_startup_prog, dist_context = get_programs( - decoder_pretrain_forward) + _global_process_mesh = auto.ProcessMesh( + mesh=[[0, 1, 2, 3], [4, 5, 6, 7]], dim_names=["x", "y"] + ) + ( + serial_main_prog, + serial_startup_prog, + dist_main_prog, + dist_startup_prog, + dist_context, + ) = get_programs(decoder_pretrain_forward) # param should be partition nrank = 1 # col parallel weights = [ - 'linear_0.w_0', 'linear_1.w_0', 'linear_2.w_0', 'linear_4.w_0' + 'linear_0.w_0', + 'linear_1.w_0', + 'linear_2.w_0', + 'linear_4.w_0', ] self.assertTrue( - check_tensor_split(dist_main_prog, weights, serial_main_prog, - weights, 1, nrank)) + check_tensor_split( + dist_main_prog, weights, serial_main_prog, weights, 1, nrank + ) + ) weights = [ - 'linear_0.b_0', 'linear_1.b_0', 'linear_2.b_0', 'linear_4.b_0' + 'linear_0.b_0', + 'linear_1.b_0', + 'linear_2.b_0', + 'linear_4.b_0', ] self.assertTrue( - check_tensor_split(dist_main_prog, weights, serial_main_prog, - weights, 0, nrank)) + check_tensor_split( + dist_main_prog, weights, serial_main_prog, weights, 0, nrank + ) + ) # row parallel weights = ['word_embeddings', 'linear_3.w_0', 'linear_5.w_0'] self.assertTrue( - check_tensor_split(dist_main_prog, weights, serial_main_prog, - weights, 0, nrank)) + check_tensor_split( + dist_main_prog, weights, serial_main_prog, weights, 0, nrank + ) + ) weights = [ - 'linear_3.b_0', 'pos_embeddings', 'layer_norm_0.b_0', - 'layer_norm_0.w_0', 'linear_5.b_0' + 'linear_3.b_0', + 'pos_embeddings', + 'layer_norm_0.b_0', + 'layer_norm_0.w_0', + 'linear_5.b_0', ] self.assertTrue( - check_tensor_split(dist_main_prog, weights, serial_main_prog, - weights, 0, 1)) + check_tensor_split( + dist_main_prog, weights, serial_main_prog, weights, 0, 1 + ) + ) # row and col allreduce dist_ops = dist_main_prog.global_block().ops dist_ops = [op.type for op in dist_ops] ref_ops = [ - 'lookup_table_v2', 'lookup_table_v2', 'elementwise_add', 'dropout', - 'layer_norm', 'matmul_v2', 'elementwise_add', 'reshape2', - 'transpose2', 'matmul_v2', 'elementwise_add', 'matmul_v2', - 'elementwise_add', 'reshape2', 'transpose2', 'reshape2', - 'transpose2', 'matmul', 'softmax', 'dropout', 'matmul_v2', - 'transpose2', 'reshape2', 'matmul_v2', 'elementwise_add', 'dropout', - 'elementwise_add', 'layer_norm', 'matmul_v2', 'elementwise_add', - 'gelu', 'matmul_v2', 'elementwise_add', 'dropout', 'elementwise_add' + 'lookup_table_v2', + 'lookup_table_v2', + 'elementwise_add', + 'dropout', + 'layer_norm', + 'matmul_v2', + 'elementwise_add', + 'reshape2', + 'transpose2', + 'matmul_v2', + 'elementwise_add', + 'matmul_v2', + 'elementwise_add', + 'reshape2', + 'transpose2', + 'reshape2', + 'transpose2', + 'matmul', + 'softmax', + 'dropout', + 'matmul_v2', + 'transpose2', + 'reshape2', + 'matmul_v2', + 'elementwise_add', + 'dropout', + 'elementwise_add', + 'layer_norm', + 'matmul_v2', + 'elementwise_add', + 'gelu', + 'matmul_v2', + 'elementwise_add', + 'dropout', + 'elementwise_add', ] self.assertTrue(dist_ops == ref_ops) dist_ops = dist_startup_prog.global_block().ops dist_ops = [op.type for op in dist_ops] ref_ops = [ - 'gaussian_random', 'gaussian_random', 'gaussian_random', - 'fill_constant', 'gaussian_random', 'fill_constant', - 'gaussian_random', 'fill_constant', 'gaussian_random', - 'fill_constant', 'gaussian_random', 'fill_constant', - 'gaussian_random', 'fill_constant', 'fill_constant', - 'fill_constant', 'c_broadcast', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_broadcast', - 'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_broadcast', - 'c_broadcast' + 'gaussian_random', + 'gaussian_random', + 'gaussian_random', + 'fill_constant', + 'gaussian_random', + 'fill_constant', + 'gaussian_random', + 'fill_constant', + 'gaussian_random', + 'fill_constant', + 'gaussian_random', + 'fill_constant', + 'gaussian_random', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', + 'c_broadcast', ] self.assertTrue(dist_ops == ref_ops) diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner_gpt.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner_gpt.py index 990b42947e6dd30ffcd008ffc7ffb7f1e9c5e9c9..852c6ab74b1283ef50f2b588ec913bf65de16796 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner_gpt.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner_gpt.py @@ -88,17 +88,19 @@ class MultiHeadAttention(nn.Layer): Cache = collections.namedtuple("Cache", ["k", "v"]) StaticCache = collections.namedtuple("StaticCache", ["k", "v"]) - def __init__(self, - embed_dim, - num_heads, - dropout=0., - kdim=None, - vdim=None, - need_weights=False, - weight_attr=None, - bias_attr=None, - topo=None, - fuse=False): + def __init__( + self, + embed_dim, + num_heads, + dropout=0.0, + kdim=None, + vdim=None, + need_weights=False, + weight_attr=None, + bias_attr=None, + topo=None, + fuse=False, + ): super(MultiHeadAttention, self).__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim @@ -109,38 +111,36 @@ class MultiHeadAttention(nn.Layer): self.fuse = fuse self.head_dim = embed_dim // num_heads - assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" + assert ( + self.head_dim * num_heads == self.embed_dim + ), "embed_dim must be divisible by num_heads" if topo is None or topo.mp_info.size == 1: if self.fuse: assert self.kdim == embed_dim assert self.vdim == embed_dim - self.qkv_proj = nn.Linear(embed_dim, - 3 * embed_dim, - weight_attr, - bias_attr=bias_attr) + self.qkv_proj = nn.Linear( + embed_dim, 3 * embed_dim, weight_attr, bias_attr=bias_attr + ) else: - self.q_proj = nn.Linear(embed_dim, - embed_dim, - weight_attr, - bias_attr=bias_attr) - self.k_proj = nn.Linear(self.kdim, - embed_dim, - weight_attr, - bias_attr=bias_attr) - self.v_proj = nn.Linear(self.vdim, - embed_dim, - weight_attr, - bias_attr=bias_attr) - self.out_proj = nn.Linear(embed_dim, - embed_dim, - weight_attr, - bias_attr=bias_attr) + self.q_proj = nn.Linear( + embed_dim, embed_dim, weight_attr, bias_attr=bias_attr + ) + self.k_proj = nn.Linear( + self.kdim, embed_dim, weight_attr, bias_attr=bias_attr + ) + self.v_proj = nn.Linear( + self.vdim, embed_dim, weight_attr, bias_attr=bias_attr + ) + self.out_proj = nn.Linear( + embed_dim, embed_dim, weight_attr, bias_attr=bias_attr + ) def _fuse_prepare_qkv(self, query): mix_layer = self.qkv_proj(query) - mix_layer = paddle.reshape_(mix_layer, - [0, 0, self.num_heads, 3 * self.head_dim]) + mix_layer = paddle.reshape_( + mix_layer, [0, 0, self.num_heads, 3 * self.head_dim] + ) mix_layer = paddle.transpose(mix_layer, [0, 2, 1, 3]) q, k, v = paddle.split(mix_layer, num_or_sections=3, axis=-1) return q, k, v @@ -154,9 +154,11 @@ class MultiHeadAttention(nn.Layer): q = self.q_proj(query) if _global_parallel_strategy in ["mp", "dp_mp"]: - auto.shard_tensor(self.q_proj.weight, - process_mesh=_global_process_mesh, - shard_spec=[None, "mp"]) + auto.shard_tensor( + self.q_proj.weight, + process_mesh=_global_process_mesh, + shard_spec=[None, "mp"], + ) q = tensor.reshape(x=q, shape=[0, 0, self.num_heads, self.head_dim]) q = tensor.transpose(x=q, perm=[0, 2, 1, 3]) @@ -190,12 +192,16 @@ class MultiHeadAttention(nn.Layer): v = self.v_proj(value) if _global_parallel_strategy in ["mp", "dp_mp"]: - auto.shard_tensor(self.k_proj.weight, - process_mesh=_global_process_mesh, - shard_spec=[None, "mp"]) - auto.shard_tensor(self.v_proj.weight, - process_mesh=_global_process_mesh, - shard_spec=[None, "mp"]) + auto.shard_tensor( + self.k_proj.weight, + process_mesh=_global_process_mesh, + shard_spec=[None, "mp"], + ) + auto.shard_tensor( + self.v_proj.weight, + process_mesh=_global_process_mesh, + shard_spec=[None, "mp"], + ) k = tensor.reshape(x=k, shape=[0, 0, self.num_heads, self.head_dim]) k = tensor.transpose(x=k, perm=[0, 2, 1, 3]) @@ -217,24 +223,22 @@ class MultiHeadAttention(nn.Layer): input=key, shape=[-1, self.num_heads, 0, self.head_dim], dtype=key.dtype, - value=0) + value=0, + ) v = layers.fill_constant_batch_size_like( input=key, shape=[-1, self.num_heads, 0, self.head_dim], dtype=key.dtype, - value=0) + value=0, + ) return self.Cache(k, v) else: # incremental_state with initial value, mainly for usage like UniLM return self.Cache(key, value) - def forward(self, - query, - key, - value, - attn_mask=None, - use_cache=False, - cache=None): + def forward( + self, query, key, value, attn_mask=None, use_cache=False, cache=None + ): r""" Applies multi-head attention to map queries and a set of key-value pairs to outputs. @@ -248,23 +252,25 @@ class MultiHeadAttention(nn.Layer): else: q, k, v = self._prepare_qkv(query, key, value, use_cache, cache) else: - q, k, v, cache = self._prepare_qkv(query, key, value, use_cache, - cache) + q, k, v, cache = self._prepare_qkv( + query, key, value, use_cache, cache + ) # scale dot product attention - product = layers.matmul(x=q, - y=k, - transpose_y=True, - alpha=self.head_dim**-0.5) + product = layers.matmul( + x=q, y=k, transpose_y=True, alpha=self.head_dim**-0.5 + ) if attn_mask is not None: product = product + attn_mask weights = F.softmax(product) if self.dropout: - weights = F.dropout(weights, - self.dropout, - training=self.training, - mode="upscale_in_train") + weights = F.dropout( + weights, + self.dropout, + training=self.training, + mode="upscale_in_train", + ) out = tensor.matmul(weights, v) @@ -276,9 +282,11 @@ class MultiHeadAttention(nn.Layer): out = self.out_proj(out) if _global_parallel_strategy in ["mp", "dp_mp"]: - auto.shard_tensor(self.out_proj.weight, - process_mesh=_global_process_mesh, - shard_spec=["mp", None]) + auto.shard_tensor( + self.out_proj.weight, + process_mesh=_global_process_mesh, + shard_spec=["mp", None], + ) outs = [out] if self.need_weights: @@ -293,12 +301,9 @@ class TransformerDecoder(nn.Layer): TransformerDecoder is a stack of N decoder layers. """ - def __init__(self, - decoder_layers, - num_layers, - norm=None, - hidden_size=None, - topo=None): + def __init__( + self, decoder_layers, num_layers, norm=None, hidden_size=None, topo=None + ): super(TransformerDecoder, self).__init__() self.topo = topo @@ -311,13 +316,15 @@ class TransformerDecoder(nn.Layer): raise ValueError("Only support LayerNorm") self.checkpoints = [] - def forward(self, - tgt, - memory, - tgt_mask=None, - memory_mask=None, - use_cache=False, - cache=None): + def forward( + self, + tgt, + memory, + tgt_mask=None, + memory_mask=None, + use_cache=False, + cache=None, + ): r""" Applies a stack of N Transformer decoder layers on inputs. If `norm` is provided, also applies layer normalization on the output of last decoder @@ -330,25 +337,31 @@ class TransformerDecoder(nn.Layer): for i, mod in enumerate(self.layers): if cache is None: if use_cache: - output, new_cache = mod(output, - memory, - tgt_mask=tgt_mask, - use_cache=use_cache, - cache=cache) + output, new_cache = mod( + output, + memory, + tgt_mask=tgt_mask, + use_cache=use_cache, + cache=cache, + ) new_caches.append(new_cache) else: - output = mod(output, - memory, - tgt_mask=tgt_mask, - use_cache=use_cache, - cache=cache) + output = mod( + output, + memory, + tgt_mask=tgt_mask, + use_cache=use_cache, + cache=cache, + ) else: - output, new_cache = mod(output, - memory, - tgt_mask=tgt_mask, - use_cache=use_cache, - cache=cache[i]) + output, new_cache = mod( + output, + memory, + tgt_mask=tgt_mask, + use_cache=use_cache, + cache=cache[i], + ) new_caches.append(new_cache) self.checkpoints.append(output.name) @@ -363,7 +376,7 @@ class TransformerDecoder(nn.Layer): produced by `TransformerDecoderLayer.gen_cache`. See `TransformerDecoderLayer.gen_cache` for more details. If `do_zip` is True, apply `zip` on these tuples to get a list with two elements. - """ + """ cache = [layer.gen_cache(memory) for layer in self.layers] if do_zip: cache = list(zip(*cache)) @@ -376,18 +389,20 @@ class TransformerDecoderLayer(nn.Layer): It contains multiheadattention and some linear layers. """ - def __init__(self, - d_model, - nhead, - dim_feedforward, - dropout=0.1, - activation="gelu", - attn_dropout=None, - act_dropout=None, - normalize_before=True, - weight_attr=None, - bias_attr=None, - topo=None): + def __init__( + self, + d_model, + nhead, + dim_feedforward, + dropout=0.1, + activation="gelu", + attn_dropout=None, + act_dropout=None, + normalize_before=True, + weight_attr=None, + bias_attr=None, + topo=None, + ): self._config = locals() self._config.pop("self") self._config.pop("__class__", None) # py3 @@ -400,21 +415,27 @@ class TransformerDecoderLayer(nn.Layer): weight_attrs = _convert_param_attr_to_list(weight_attr, 3) bias_attrs = _convert_param_attr_to_list(bias_attr, 3) - self.self_attn = MultiHeadAttention(d_model, - nhead, - dropout=attn_dropout, - weight_attr=weight_attrs[0], - bias_attr=bias_attrs[0], - topo=topo) + self.self_attn = MultiHeadAttention( + d_model, + nhead, + dropout=attn_dropout, + weight_attr=weight_attrs[0], + bias_attr=bias_attrs[0], + topo=topo, + ) if topo is None or topo.mp_info.size == 1: - self.linear1 = nn.Linear(d_model, - dim_feedforward, - weight_attrs[2], - bias_attr=bias_attrs[2]) - self.linear2 = nn.Linear(dim_feedforward, - d_model, - weight_attrs[2], - bias_attr=bias_attrs[2]) + self.linear1 = nn.Linear( + d_model, + dim_feedforward, + weight_attrs[2], + bias_attr=bias_attrs[2], + ) + self.linear2 = nn.Linear( + dim_feedforward, + d_model, + weight_attrs[2], + bias_attr=bias_attrs[2], + ) self.norm1 = nn.LayerNorm(d_model, epsilon=1e-5) self.norm2 = nn.LayerNorm(d_model, epsilon=1e-5) @@ -431,8 +452,9 @@ class TransformerDecoderLayer(nn.Layer): if use_cache is False: tgt = self.self_attn(tgt, tgt, tgt, tgt_mask, use_cache, cache) else: - tgt, incremental_cache = self.self_attn(tgt, tgt, tgt, tgt_mask, - use_cache, cache) + tgt, incremental_cache = self.self_attn( + tgt, tgt, tgt, tgt_mask, use_cache, cache + ) tgt = residual + self.dropout1(tgt) if not self.normalize_before: tgt = self.norm1(tgt) @@ -442,12 +464,16 @@ class TransformerDecoderLayer(nn.Layer): tgt = self.norm2(tgt) if _global_parallel_strategy in ["mp", "dp_mp"]: - auto.shard_tensor(self.linear1.weight, - process_mesh=_global_process_mesh, - shard_spec=[None, "mp"]) - auto.shard_tensor(self.linear2.weight, - process_mesh=_global_process_mesh, - shard_spec=["mp", None]) + auto.shard_tensor( + self.linear1.weight, + process_mesh=_global_process_mesh, + shard_spec=[None, "mp"], + ) + auto.shard_tensor( + self.linear2.weight, + process_mesh=_global_process_mesh, + shard_spec=["mp", None], + ) # tgt = self.dropout2( # self.linear2(F.gelu( @@ -463,8 +489,9 @@ class TransformerDecoderLayer(nn.Layer): return tgt if use_cache is False else (tgt, incremental_cache) def gen_cache(self, memory): - incremental_cache = self.self_attn.gen_cache(memory, - type=self.self_attn.Cache) + incremental_cache = self.self_attn.gen_cache( + memory, type=self.self_attn.Cache + ) return incremental_cache @@ -473,29 +500,38 @@ class GPTEmbeddings(nn.Layer): Include embeddings from word, position and token_type embeddings """ - def __init__(self, - vocab_size, - hidden_size=768, - hidden_dropout_prob=0.1, - max_position_embeddings=512, - type_vocab_size=16, - initializer_range=0.02, - topo=None): + def __init__( + self, + vocab_size, + hidden_size=768, + hidden_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=16, + initializer_range=0.02, + topo=None, + ): super(GPTEmbeddings, self).__init__() if topo is None or topo.mp_info.size == 1: self.word_embeddings = nn.Embedding( vocab_size, hidden_size, - weight_attr=paddle.ParamAttr(name="word_embeddings", - initializer=nn.initializer.Normal( - mean=0.0, - std=initializer_range))) + weight_attr=paddle.ParamAttr( + name="word_embeddings", + initializer=nn.initializer.Normal( + mean=0.0, std=initializer_range + ), + ), + ) self.position_embeddings = nn.Embedding( max_position_embeddings, hidden_size, - weight_attr=paddle.ParamAttr(name="pos_embeddings", - initializer=nn.initializer.Normal( - mean=0.0, std=initializer_range))) + weight_attr=paddle.ParamAttr( + name="pos_embeddings", + initializer=nn.initializer.Normal( + mean=0.0, std=initializer_range + ), + ), + ) self.dropout = nn.Dropout(hidden_dropout_prob) @@ -508,9 +544,11 @@ class GPTEmbeddings(nn.Layer): input_embedings = self.word_embeddings(input_ids) if _global_parallel_strategy in ["mp", "dp_mp"]: - auto.shard_tensor(self.word_embeddings.weight, - process_mesh=_global_process_mesh, - shard_spec=["mp", None]) + auto.shard_tensor( + self.word_embeddings.weight, + process_mesh=_global_process_mesh, + shard_spec=["mp", None], + ) position_embeddings = self.position_embeddings(position_ids) embeddings = input_embedings + position_embeddings @@ -523,20 +561,22 @@ class GPTModel(nn.Layer): The base model of gpt. """ - def __init__(self, - vocab_size, - hidden_size=768, - num_hidden_layers=4, - num_attention_heads=12, - intermediate_size=3072, - hidden_act="gelu", - hidden_dropout_prob=0.1, - attention_probs_dropout_prob=0.1, - max_position_embeddings=512, - type_vocab_size=16, - initializer_range=0.02, - pad_token_id=0, - topo=None): + def __init__( + self, + vocab_size, + hidden_size=768, + num_hidden_layers=4, + num_attention_heads=12, + intermediate_size=3072, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=16, + initializer_range=0.02, + pad_token_id=0, + topo=None, + ): super(GPTModel, self).__init__() self.pad_token_id = pad_token_id @@ -549,71 +589,94 @@ class GPTModel(nn.Layer): if self.pipline_mode: self.layer_per_stage = num_hidden_layers // self.topo.pp_info.size - self.embeddings = GPTEmbeddings(vocab_size, hidden_size, - hidden_dropout_prob, - max_position_embeddings, - type_vocab_size, self.initializer_range, - topo) + self.embeddings = GPTEmbeddings( + vocab_size, + hidden_size, + hidden_dropout_prob, + max_position_embeddings, + type_vocab_size, + self.initializer_range, + topo, + ) decoder_layers = nn.LayerList() for i in range(num_hidden_layers): DecoderLayer = TransformerDecoderLayer decoder_layers.append( - DecoderLayer(d_model=hidden_size, - nhead=num_attention_heads, - dim_feedforward=intermediate_size, - dropout=hidden_dropout_prob, - activation=hidden_act, - attn_dropout=attention_probs_dropout_prob, - act_dropout=hidden_dropout_prob, - weight_attr=paddle.ParamAttr( - initializer=nn.initializer.Normal( - mean=0.0, std=self.initializer_range)), - bias_attr=None, - topo=topo)) + DecoderLayer( + d_model=hidden_size, + nhead=num_attention_heads, + dim_feedforward=intermediate_size, + dropout=hidden_dropout_prob, + activation=hidden_act, + attn_dropout=attention_probs_dropout_prob, + act_dropout=hidden_dropout_prob, + weight_attr=paddle.ParamAttr( + initializer=nn.initializer.Normal( + mean=0.0, std=self.initializer_range + ) + ), + bias_attr=None, + topo=topo, + ) + ) Decoder = TransformerDecoder - self.decoder = Decoder(decoder_layers, - num_hidden_layers, - norm="LayerNorm", - hidden_size=hidden_size, - topo=topo) + self.decoder = Decoder( + decoder_layers, + num_hidden_layers, + norm="LayerNorm", + hidden_size=hidden_size, + topo=topo, + ) self.checkpoints = [] - def forward(self, - input_ids, - position_ids=None, - attention_mask=None, - use_cache=False, - cache=None): + def forward( + self, + input_ids, + position_ids=None, + attention_mask=None, + use_cache=False, + cache=None, + ): self.checkpoints = [] if attention_mask is None: length = paddle.shape(input_ids)[1] # Use bool mask attention_mask = paddle.tensor.tril( - paddle.ones((length, length), - dtype=self.embeddings.word_embeddings.weight.dtype)) + paddle.ones( + (length, length), + dtype=self.embeddings.word_embeddings.weight.dtype, + ) + ) if position_ids is None: past_length = 0 if cache is not None: past_length = paddle.shape(cache[0].k)[-2] - position_ids = paddle.arange(past_length, - paddle.shape(input_ids)[-1] + - past_length, - dtype='int64') + position_ids = paddle.arange( + past_length, + paddle.shape(input_ids)[-1] + past_length, + dtype='int64', + ) position_ids = position_ids.unsqueeze(0) # .expand_as(input_ids) position_ids = paddle.fluid.layers.expand_as( - position_ids, input_ids) - embedding_output = self.embeddings(input_ids=input_ids, - position_ids=position_ids) + position_ids, input_ids + ) + embedding_output = self.embeddings( + input_ids=input_ids, position_ids=position_ids + ) # TODO, use registered buffer - causal_mask = paddle.tensor.triu(paddle.ones( - (paddle.shape(input_ids)[-1], paddle.shape(input_ids)[-1])) * -1e9, - diagonal=1) + causal_mask = paddle.tensor.triu( + paddle.ones( + (paddle.shape(input_ids)[-1], paddle.shape(input_ids)[-1]) + ) + * -1e9, + diagonal=1, + ) if attention_mask is not None: attention_mask = attention_mask + causal_mask @@ -623,11 +686,13 @@ class GPTModel(nn.Layer): # The tensor returned by triu not in static graph. attention_mask.stop_gradient = True - encoder_outputs = self.decoder(embedding_output, - memory=None, - tgt_mask=attention_mask, - use_cache=use_cache, - cache=cache) + encoder_outputs = self.decoder( + embedding_output, + memory=None, + tgt_mask=attention_mask, + use_cache=use_cache, + cache=cache, + ) self.checkpoints.extend(self.decoder.checkpoints) return encoder_outputs @@ -649,11 +714,12 @@ class GPTForPretraining(nn.Layer): def parallel_matmul(self, lm_output, logit_weights, parallel_output, topo): if topo is not None and topo.mp_info.size > 1: input_parallel = paddle.distributed.collective._c_identity( - lm_output, group=None) + lm_output, group=None + ) - logits = paddle.matmul(input_parallel, - logit_weights, - transpose_y=True) + logits = paddle.matmul( + input_parallel, logit_weights, transpose_y=True + ) if parallel_output: return logits @@ -663,24 +729,29 @@ class GPTForPretraining(nn.Layer): logits = paddle.matmul(lm_output, logit_weights, transpose_y=True) return logits - def forward(self, - input_ids, - position_ids=None, - attention_mask=None, - masked_positions=None, - use_cache=False, - cache=None): - outputs = self.gpt(input_ids, - position_ids=position_ids, - attention_mask=attention_mask, - use_cache=use_cache, - cache=cache) + def forward( + self, + input_ids, + position_ids=None, + attention_mask=None, + masked_positions=None, + use_cache=False, + cache=None, + ): + outputs = self.gpt( + input_ids, + position_ids=position_ids, + attention_mask=attention_mask, + use_cache=use_cache, + cache=cache, + ) if use_cache: encoder_outputs, cached_kvs = outputs[:2] else: encoder_outputs = outputs - logits = self.parallel_matmul(encoder_outputs, self.weight, True, - self.gpt.topo) + logits = self.parallel_matmul( + encoder_outputs, self.weight, True, self.gpt.topo + ) if use_cache: return logits, cached_kvs @@ -699,11 +770,14 @@ class GPTPretrainingCriterion(nn.Layer): if topo is None or topo.mp_info.size == 1: self.loss_func = paddle.nn.CrossEntropyLoss(reduction="none") else: - self.loss_func = paddle.distributed.collective._c_softmax_with_cross_entropy + self.loss_func = ( + paddle.distributed.collective._c_softmax_with_cross_entropy + ) def forward(self, prediction_scores, masked_lm_labels, loss_mask): - masked_lm_loss = self.loss_func(prediction_scores, - masked_lm_labels.unsqueeze(2)) + masked_lm_loss = self.loss_func( + prediction_scores, masked_lm_labels.unsqueeze(2) + ) loss_mask = loss_mask.reshape([-1]) masked_lm_loss = paddle.sum(masked_lm_loss.reshape([-1]) * loss_mask) @@ -712,45 +786,51 @@ class GPTPretrainingCriterion(nn.Layer): def gpt_pretrain_forward(train_program, startup_program): - with static.program_guard(train_program, - startup_program), utils.unique_name.guard(): + with static.program_guard( + train_program, startup_program + ), utils.unique_name.guard(): batch_size = 16 sequence_len = 512 - input_ids = static.data(name="input_ids", - shape=[batch_size, sequence_len], - dtype='int64') - position_ids = static.data(name="position_ids", - shape=[batch_size, sequence_len], - dtype='int64') + input_ids = static.data( + name="input_ids", shape=[batch_size, sequence_len], dtype='int64' + ) + position_ids = static.data( + name="position_ids", shape=[batch_size, sequence_len], dtype='int64' + ) attention_mask = static.data( name="attention_mask", shape=[batch_size, 1, sequence_len, sequence_len], - dtype='float64') - labels = static.data(name="labels", - shape=[batch_size, sequence_len], - dtype='int64') - loss_mask = static.data(name="loss_mask", - shape=[batch_size, sequence_len], - dtype='float64') + dtype='float64', + ) + labels = static.data( + name="labels", shape=[batch_size, sequence_len], dtype='int64' + ) + loss_mask = static.data( + name="loss_mask", shape=[batch_size, sequence_len], dtype='float64' + ) if _global_parallel_strategy in ["dp", "dp_mp"]: - auto.shard_tensor(input_ids, - process_mesh=_global_process_mesh, - shard_spec=["dp", None]) - - gpt = GPTModel(vocab_size=32768, - hidden_size=768, - num_hidden_layers=2, - num_attention_heads=12, - intermediate_size=4096, - hidden_act="gelu", - hidden_dropout_prob=0.1, - attention_probs_dropout_prob=0.1, - max_position_embeddings=1024, - type_vocab_size=16, - initializer_range=0.02, - pad_token_id=0, - topo=None) + auto.shard_tensor( + input_ids, + process_mesh=_global_process_mesh, + shard_spec=["dp", None], + ) + + gpt = GPTModel( + vocab_size=32768, + hidden_size=768, + num_hidden_layers=2, + num_attention_heads=12, + intermediate_size=4096, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=1024, + type_vocab_size=16, + initializer_range=0.02, + pad_token_id=0, + topo=None, + ) model = GPTForPretraining(gpt) @@ -764,29 +844,26 @@ def gpt_pretrain_forward(train_program, startup_program): class FakeStrategy(object): - def __init__(self): self.amp = False self.recompute = False class FakeFleet(object): - def __init__(self): self.user_defined_optimizer = None self._user_defined_strategy = FakeStrategy() class TestGPTPartitioner(unittest.TestCase): - def test_gpt_dp_mp(self): global _global_parallel_strategy _global_parallel_strategy = "dp_mp" global _global_process_mesh - _global_process_mesh = auto.ProcessMesh(mesh=[[0, 1, 2, 3], - [4, 5, 6, 7]], - dim_names=["dp", "mp"]) + _global_process_mesh = auto.ProcessMesh( + mesh=[[0, 1, 2, 3], [4, 5, 6, 7]], dim_names=["dp", "mp"] + ) train_program = static.Program() startup_program = static.Program() @@ -795,24 +872,33 @@ class TestGPTPartitioner(unittest.TestCase): dist_context.process_mesh = _global_process_mesh train_program, startup_program, loss = gpt_pretrain_forward( - train_program, startup_program) + train_program, startup_program + ) completer = Completer(dist_context) complete_train_program = completer.complete_forward_annotation( - train_program) + train_program + ) dist_context.block_state.parse_forward_blocks(complete_train_program) # serial backward pass - params_grads = parallelizer._generate_backward(complete_train_program, - startup_program, - loss, - parameter_list=None, - no_grad_set=None, - callbacks=None) + params_grads = parallelizer._generate_backward( + complete_train_program, + startup_program, + loss, + parameter_list=None, + no_grad_set=None, + callbacks=None, + ) rank_id = 3 partitioner = Partitioner(dist_context, rank_id) - auto_parallel_main_prog, auto_parallel_startup_prog, params_grads = partitioner.partition( - complete_train_program, startup_program, params_grads) + ( + auto_parallel_main_prog, + auto_parallel_startup_prog, + params_grads, + ) = partitioner.partition( + complete_train_program, startup_program, params_grads + ) nrank = 4 # col parallel @@ -822,59 +908,96 @@ class TestGPTPartitioner(unittest.TestCase): 'linear_10.w_0', ] self.assertTrue( - check_tensor_split(auto_parallel_main_prog, weights, - complete_train_program, weights, 1, nrank)) + check_tensor_split( + auto_parallel_main_prog, + weights, + complete_train_program, + weights, + 1, + nrank, + ) + ) # row parallel weights = ['word_embeddings', 'linear_9.w_0', 'linear_11.w_0'] self.assertTrue( - check_tensor_split(auto_parallel_main_prog, weights, - complete_train_program, weights, 0, nrank)) + check_tensor_split( + auto_parallel_main_prog, + weights, + complete_train_program, + weights, + 0, + nrank, + ) + ) weights = ['pos_embeddings', 'layer_norm_0.b_0', 'layer_norm_4.w_0'] self.assertTrue( - check_tensor_split(auto_parallel_main_prog, weights, - complete_train_program, weights, 0, 1)) + check_tensor_split( + auto_parallel_main_prog, + weights, + complete_train_program, + weights, + 0, + 1, + ) + ) all_params = sorted( - [param.name for param in startup_program.all_parameters()]) + [param.name for param in startup_program.all_parameters()] + ) allreduce_grads = [ - 'layer_norm_5.tmp_2', 'layer_norm_5.tmp_2', 'layer_norm_5.tmp_2', - 'layer_norm_6.tmp_2', 'layer_norm_7.tmp_2', 'layer_norm_7.tmp_2', - 'layer_norm_7.tmp_2', 'layer_norm_8.tmp_2' + 'layer_norm_5.tmp_2', + 'layer_norm_5.tmp_2', + 'layer_norm_5.tmp_2', + 'layer_norm_6.tmp_2', + 'layer_norm_7.tmp_2', + 'layer_norm_7.tmp_2', + 'layer_norm_7.tmp_2', + 'layer_norm_8.tmp_2', ] process_mesh = _global_process_mesh mp_parallel_axis = 1 dp_parallel_axis = 0 - group_ranks = _get_comm_group(process_mesh.processes, - process_mesh.topology, mp_parallel_axis, - 3) + group_ranks = _get_comm_group( + process_mesh.processes, process_mesh.topology, mp_parallel_axis, 3 + ) mp_ring_id = new_process_group(group_ranks).id - group_ranks = _get_comm_group(process_mesh.processes, - process_mesh.topology, dp_parallel_axis, - 3) + group_ranks = _get_comm_group( + process_mesh.processes, process_mesh.topology, dp_parallel_axis, 3 + ) dp_ring_id = new_process_group(group_ranks).id - tensor_parallel_allreduce_vars = sorted([ - op.desc.output_arg_names()[0].split("@")[0] - for op in auto_parallel_main_prog.global_block().ops - if (op.type == "c_allreduce_sum" and op.attr('op_role') == 1 - and op.desc.attr("ring_id") == mp_ring_id) - ]) - data_parallel_allreduce_vars = sorted([ - op.desc.output_arg_names()[0].split("@")[0] - for op in auto_parallel_main_prog.global_block().ops - if (op.type == "c_allreduce_sum" - and op.desc.attr("ring_id") == dp_ring_id) - ]) + tensor_parallel_allreduce_vars = sorted( + [ + op.desc.output_arg_names()[0].split("@")[0] + for op in auto_parallel_main_prog.global_block().ops + if ( + op.type == "c_allreduce_sum" + and op.attr('op_role') == 1 + and op.desc.attr("ring_id") == mp_ring_id + ) + ] + ) + data_parallel_allreduce_vars = sorted( + [ + op.desc.output_arg_names()[0].split("@")[0] + for op in auto_parallel_main_prog.global_block().ops + if ( + op.type == "c_allreduce_sum" + and op.desc.attr("ring_id") == dp_ring_id + ) + ] + ) self.assertTrue(all_params == data_parallel_allreduce_vars) self.assertTrue(allreduce_grads == tensor_parallel_allreduce_vars) self.assertTrue( - is_valid_completed_program(dist_context, auto_parallel_main_prog)) + is_valid_completed_program(dist_context, auto_parallel_main_prog) + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard.py index c604bf299afacb96832356652b88b6b09dae1768..e47cc35d6afa7fc6f7a425de9a7460e78f3f086b 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard.py @@ -26,7 +26,10 @@ from paddle.distributed import fleet from paddle.distributed.auto_parallel.parallelizer import AutoParallelizer from paddle.distributed.auto_parallel.partitioner import Partitioner from paddle.distributed.auto_parallel.reshard import Resharder -from paddle.distributed.auto_parallel.process_group import _g_process_group_map, ProcessGroup +from paddle.distributed.auto_parallel.process_group import ( + _g_process_group_map, + ProcessGroup, +) paddle.enable_static() _global_parallel_strategy = None @@ -36,26 +39,26 @@ PP_MESH_1 = None class MLPLayer(nn.Layer): - - def __init__(self, - hidden_size=1024, - intermediate_size=4 * 1024, - initializer_range=0.02): + def __init__( + self, + hidden_size=1024, + intermediate_size=4 * 1024, + initializer_range=0.02, + ): super(MLPLayer, self).__init__() d_model = hidden_size dim_feedforward = intermediate_size weight_attr = paddle.ParamAttr( - initializer=nn.initializer.Normal(mean=0.0, std=initializer_range)) + initializer=nn.initializer.Normal(mean=0.0, std=initializer_range) + ) bias_attr = None - self.linear0 = nn.Linear(d_model, - dim_feedforward, - weight_attr, - bias_attr=bias_attr) - self.linear1 = nn.Linear(dim_feedforward, - d_model, - weight_attr, - bias_attr=bias_attr) + self.linear0 = nn.Linear( + d_model, dim_feedforward, weight_attr, bias_attr=bias_attr + ) + self.linear1 = nn.Linear( + dim_feedforward, d_model, weight_attr, bias_attr=bias_attr + ) self.norm = nn.LayerNorm(d_model, epsilon=1e-5) def forward(self, input): @@ -63,10 +66,12 @@ class MLPLayer(nn.Layer): auto.shard_tensor(self.linear0.weight, PP_MESH_0, [None, None]) auto.shard_tensor(self.linear1.weight, PP_MESH_1, [None, None]) else: - auto.shard_tensor(self.linear0.weight, _global_process_mesh, - [None, None]) - auto.shard_tensor(self.linear1.weight, _global_process_mesh, - [None, None]) + auto.shard_tensor( + self.linear0.weight, _global_process_mesh, [None, None] + ) + auto.shard_tensor( + self.linear1.weight, _global_process_mesh, [None, None] + ) out = self.norm(input) out = self.linear0(out) @@ -77,17 +82,18 @@ class MLPLayer(nn.Layer): def mlp_forward(train_program, start_program): - with static.program_guard(train_program, - start_program), utils.unique_name.guard(): + with static.program_guard( + train_program, start_program + ), utils.unique_name.guard(): batch_size = 4 hidden_size = 1024 sequence_len = 512 - input = static.data(name="input", - shape=[batch_size, hidden_size], - dtype='float32') - label = static.data(name="label", - shape=[batch_size, 1], - dtype='float32') + input = static.data( + name="input", shape=[batch_size, hidden_size], dtype='float32' + ) + label = static.data( + name="label", shape=[batch_size, 1], dtype='float32' + ) if _global_parallel_strategy == "pp": auto.shard_tensor(input, PP_MESH_0, [None, None]) @@ -97,9 +103,11 @@ def mlp_forward(train_program, start_program): else: auto.shard_tensor(input, _global_process_mesh, [None, None]) - mlp = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - initializer_range=0.02) + mlp = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + initializer_range=0.02, + ) predict = mlp(input) error_cost = paddle.nn.functional.square_error_cost(predict, label) @@ -108,13 +116,16 @@ def mlp_forward(train_program, start_program): return loss, train_program, start_program -def get_dist_prog(train_program, - startup_program, - dist_context, - rank_id, - change_process_mesh=False): - loss, train_program, startup_program = mlp_forward(train_program, - startup_program) +def get_dist_prog( + train_program, + startup_program, + dist_context, + rank_id, + change_process_mesh=False, +): + loss, train_program, startup_program = mlp_forward( + train_program, startup_program + ) fleet._user_defined_strategy = fleet.DistributedStrategy() fleet.user_defined_optimizer = paddle.fluid.optimizer.AdamOptimizer() @@ -124,30 +135,43 @@ def get_dist_prog(train_program, # serial forward & backward completion completer = Completer(dist_context) complete_train_program = completer.complete_forward_annotation( - train_program) + train_program + ) dist_context.block_state.parse_forward_blocks(complete_train_program) if change_process_mesh: global PP_MESH_1 dist_context.get_tensor_dist_attr_for_program( - train_program.global_block( - ).vars["gelu_0.tmp_0"]).process_mesh = PP_MESH_1 - - params_grads = parallelizer._generate_backward(complete_train_program, - startup_program, - loss, - parameter_list=None, - no_grad_set=None, - callbacks=None) + train_program.global_block().vars["gelu_0.tmp_0"] + ).process_mesh = PP_MESH_1 + + params_grads = parallelizer._generate_backward( + complete_train_program, + startup_program, + loss, + parameter_list=None, + no_grad_set=None, + callbacks=None, + ) # logical partition partitioner = Partitioner(dist_context, rank_id) - auto_parallel_main_prog, auto_parallel_startup_prog, dist_params_grads = partitioner.partition( - complete_train_program, startup_program, params_grads) + ( + auto_parallel_main_prog, + auto_parallel_startup_prog, + dist_params_grads, + ) = partitioner.partition( + complete_train_program, startup_program, params_grads + ) partitioned_optimize_ops = parallelizer._apply_optimize( - auto_parallel_main_prog, auto_parallel_startup_prog, dist_params_grads) + auto_parallel_main_prog, auto_parallel_startup_prog, dist_params_grads + ) - return auto_parallel_main_prog, auto_parallel_startup_prog, dist_params_grads + return ( + auto_parallel_main_prog, + auto_parallel_startup_prog, + dist_params_grads, + ) def check_backward_dist_attr(dist_context, dist_main_prog, op_need_check): @@ -159,16 +183,28 @@ def check_backward_dist_attr(dist_context, dist_main_prog, op_need_check): has_dist_attr = False for var_name in op_need_check.input_arg_names: - if not op_dist_attr.get_input_dims_mapping(var_name) or \ - not dist_context.get_tensor_dist_attr_for_program(vars[var_name]).dims_mapping or \ - not dist_context.get_tensor_dist_attr_for_program(vars[var_name]).process_mesh: + if ( + not op_dist_attr.get_input_dims_mapping(var_name) + or not dist_context.get_tensor_dist_attr_for_program( + vars[var_name] + ).dims_mapping + or not dist_context.get_tensor_dist_attr_for_program( + vars[var_name] + ).process_mesh + ): has_dist_attr = False break if has_dist_attr: for var_name in op_need_check.output_arg_names: - if not dist_context.get_tensor_dist_attr_for_program(vars[var_name]).dims_mapping or \ - not dist_context.get_tensor_dist_attr_for_program(vars[var_name]).process_mesh: + if ( + not dist_context.get_tensor_dist_attr_for_program( + vars[var_name] + ).dims_mapping + or not dist_context.get_tensor_dist_attr_for_program( + vars[var_name] + ).process_mesh + ): has_dist_attr = False break @@ -184,14 +220,22 @@ def check_send_recv_result(dist_main_prog, rank_id): for idx, op in enumerate(ops): if op.type == "send_v2" and "gelu_0.tmp_0" in op.input_arg_names: send_result = True - if op.type == "recv_v2" and "gelu_0.tmp_0@GRAD" in op.output_arg_names[ - 0]: + if ( + op.type == "recv_v2" + and "gelu_0.tmp_0@GRAD" in op.output_arg_names[0] + ): recv_result = True else: for idx, op in enumerate(ops): - if op.type == "send_v2" and "gelu_0.tmp_0@GRAD" in op.input_arg_names: + if ( + op.type == "send_v2" + and "gelu_0.tmp_0@GRAD" in op.input_arg_names + ): send_result = True - if op.type == "recv_v2" and "gelu_0.tmp_0" in op.output_arg_names[0]: + if ( + op.type == "recv_v2" + and "gelu_0.tmp_0" in op.output_arg_names[0] + ): recv_result = True return send_result and recv_result @@ -200,8 +244,10 @@ def check_send_recv_result(dist_main_prog, rank_id): def check_initialization(dist_startup_prog, rank_id): if rank_id == 0: need_check_params = [ - "layer_norm_0.b_0", "layer_norm_0.w_0", "linear_0.w_0", - "linear_0.b_0" + "layer_norm_0.b_0", + "layer_norm_0.w_0", + "linear_0.w_0", + "linear_0.b_0", ] else: need_check_params = ['linear_1.w_0', 'linear_1.b_0'] @@ -216,7 +262,10 @@ def check_initialization(dist_startup_prog, rank_id): def check_initialization_for_dp(dist_startup_prog): need_check_params = [ - "layer_norm_0.b_0", "layer_norm_0.w_0", "linear_0.w_0", "linear_0.b_0" + "layer_norm_0.b_0", + "layer_norm_0.w_0", + "linear_0.w_0", + "linear_0.b_0", ] + ['linear_1.w_0', 'linear_1.b_0'] params = [] for var_name, var in dist_startup_prog.global_block().vars.items(): @@ -227,12 +276,14 @@ def check_initialization_for_dp(dist_startup_prog): if op.type == "c_broadcast": broadcast_varnames.append(op.output_arg_names[0]) - return sorted(params) == sorted(need_check_params) == sorted( - broadcast_varnames) + return ( + sorted(params) + == sorted(need_check_params) + == sorted(broadcast_varnames) + ) class TestMLPReshard(unittest.TestCase): - def test_complete_backward_annotation(self): global _global_process_mesh _global_process_mesh = auto.ProcessMesh(mesh=[0, 1]) @@ -242,7 +293,8 @@ class TestMLPReshard(unittest.TestCase): dist_context = DistributedContext() rank_id = 0 dist_main_prog, dist_startup_prog, dist_params_grads = get_dist_prog( - train_program, startup_program, dist_context, 0) + train_program, startup_program, dist_context, 0 + ) op_need_check = None for op in dist_main_prog.global_block().ops: @@ -252,8 +304,10 @@ class TestMLPReshard(unittest.TestCase): # grad op should have dist attr self.assertTrue( - check_backward_dist_attr(dist_context, dist_main_prog, - op_need_check)) + check_backward_dist_attr( + dist_context, dist_main_prog, op_need_check + ) + ) # clear _g_process_group_map _g_process_group_map.clear() @@ -274,9 +328,15 @@ class TestMLPReshard(unittest.TestCase): dist_context = DistributedContext() rank_id = 1 dist_main_prog, dist_startup_prog, dist_params_grads = get_dist_prog( - train_program, startup_program, dist_context, rank_id) - resharder = Resharder(dist_main_prog, dist_startup_prog, rank_id, - dist_context, dist_params_grads) + train_program, startup_program, dist_context, rank_id + ) + resharder = Resharder( + dist_main_prog, + dist_startup_prog, + rank_id, + dist_context, + dist_params_grads, + ) resharder.reshard() # check send and recv result @@ -303,9 +363,15 @@ class TestMLPReshard(unittest.TestCase): dist_context = DistributedContext() rank_id = 1 dist_main_prog, dist_startup_prog, dist_params_grads = get_dist_prog( - train_program, startup_program, dist_context, rank_id, True) - resharder = Resharder(dist_main_prog, dist_startup_prog, rank_id, - dist_context, dist_params_grads) + train_program, startup_program, dist_context, rank_id, True + ) + resharder = Resharder( + dist_main_prog, + dist_startup_prog, + rank_id, + dist_context, + dist_params_grads, + ) resharder.reshard() # check send and recv result self.assertTrue(check_send_recv_result(dist_main_prog, rank_id)) @@ -326,9 +392,15 @@ class TestMLPReshard(unittest.TestCase): dist_context = DistributedContext() rank_id = 0 dist_main_prog, dist_startup_prog, dist_params_grads = get_dist_prog( - train_program, startup_program, dist_context, rank_id) - resharder = Resharder(dist_main_prog, dist_startup_prog, rank_id, - dist_context, dist_params_grads) + train_program, startup_program, dist_context, rank_id + ) + resharder = Resharder( + dist_main_prog, + dist_startup_prog, + rank_id, + dist_context, + dist_params_grads, + ) resharder.reshard() # send and recv should not exist in dp scene. diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_dpmppp.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_dpmppp.py index 4e4991579e9c7224739ab303aa6cd48075231a82..502d5dae1728d26fe9a1e35ec58b795a574c26f6 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_dpmppp.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_dpmppp.py @@ -31,33 +31,34 @@ from paddle.distributed.auto_parallel.cluster import Cluster paddle.enable_static() _global_parallel_strategy = "dp_mp_pp" -_global_process_mesh = auto.ProcessMesh([[[0, 1], [4, 5]], [[2, 3], [6, 7]]], - dim_names=["x", "y", "z"]) +_global_process_mesh = auto.ProcessMesh( + [[[0, 1], [4, 5]], [[2, 3], [6, 7]]], dim_names=["x", "y", "z"] +) PP_MESH_0 = auto.ProcessMesh([[0, 1], [4, 5]], dim_names=["x", "y"]) PP_MESH_1 = auto.ProcessMesh([[2, 3], [6, 7]], dim_names=["x", "y"]) class MLPLayer(nn.Layer): - - def __init__(self, - hidden_size=1024, - intermediate_size=4 * 1024, - initializer_range=0.02): + def __init__( + self, + hidden_size=1024, + intermediate_size=4 * 1024, + initializer_range=0.02, + ): super(MLPLayer, self).__init__() d_model = hidden_size dim_feedforward = intermediate_size weight_attr = paddle.ParamAttr( - initializer=nn.initializer.Normal(mean=0.0, std=initializer_range)) + initializer=nn.initializer.Normal(mean=0.0, std=initializer_range) + ) bias_attr = None - self.linear0 = nn.Linear(d_model, - dim_feedforward, - weight_attr, - bias_attr=bias_attr) - self.linear1 = nn.Linear(dim_feedforward, - d_model, - weight_attr, - bias_attr=bias_attr) + self.linear0 = nn.Linear( + d_model, dim_feedforward, weight_attr, bias_attr=bias_attr + ) + self.linear1 = nn.Linear( + dim_feedforward, d_model, weight_attr, bias_attr=bias_attr + ) self.norm = nn.LayerNorm(d_model, epsilon=1e-5) def forward(self, input): @@ -68,8 +69,9 @@ class MLPLayer(nn.Layer): out = self.linear0(out) out = F.gelu(out, approximate=True) out = self.linear1(out) - param = paddle.fluid.layers.create_parameter([1024, 4096], - paddle.float32) + param = paddle.fluid.layers.create_parameter( + [1024, 4096], paddle.float32 + ) auto.shard_tensor(param, PP_MESH_1, [None, "y"]) out = paddle.fluid.layers.mul(out, param) @@ -77,24 +79,27 @@ class MLPLayer(nn.Layer): def mlp_forward(train_program, start_program): - with static.program_guard(train_program, - start_program), utils.unique_name.guard(): + with static.program_guard( + train_program, start_program + ), utils.unique_name.guard(): batch_size = 4 hidden_size = 1024 sequence_len = 512 - input = static.data(name="input", - shape=[batch_size, hidden_size], - dtype='float32') - label = static.data(name="label", - shape=[batch_size, 1], - dtype='float32') + input = static.data( + name="input", shape=[batch_size, hidden_size], dtype='float32' + ) + label = static.data( + name="label", shape=[batch_size, 1], dtype='float32' + ) auto.shard_tensor(input, PP_MESH_0, ["x", None]) auto.shard_tensor(label, PP_MESH_1, ["x", None]) - mlp = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - initializer_range=0.02) + mlp = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + initializer_range=0.02, + ) predict = mlp(input) error_cost = paddle.nn.functional.square_error_cost(predict, label) @@ -106,8 +111,9 @@ def mlp_forward(train_program, start_program): def get_dist_prog(train_program, startup_program, dist_context, rank_id): global _global_process_mesh dist_context.process_mesh = _global_process_mesh - loss, train_program, startup_program = mlp_forward(train_program, - startup_program) + loss, train_program, startup_program = mlp_forward( + train_program, startup_program + ) fleet._user_defined_strategy = fleet.DistributedStrategy() fleet.user_defined_optimizer = paddle.fluid.optimizer.AdamOptimizer() @@ -117,24 +123,37 @@ def get_dist_prog(train_program, startup_program, dist_context, rank_id): # serial forward & backward completion completer = Completer(dist_context) complete_train_program = completer.complete_forward_annotation( - train_program) + train_program + ) dist_context.block_state.parse_forward_blocks(complete_train_program) - params_grads = parallelizer._generate_backward(complete_train_program, - startup_program, - loss, - parameter_list=None, - no_grad_set=None, - callbacks=None) + params_grads = parallelizer._generate_backward( + complete_train_program, + startup_program, + loss, + parameter_list=None, + no_grad_set=None, + callbacks=None, + ) # logical partition partitioner = Partitioner(dist_context, rank_id) - auto_parallel_main_prog, auto_parallel_startup_prog, dist_params_grads = partitioner.partition( - complete_train_program, startup_program, params_grads) + ( + auto_parallel_main_prog, + auto_parallel_startup_prog, + dist_params_grads, + ) = partitioner.partition( + complete_train_program, startup_program, params_grads + ) partitioned_optimize_ops = parallelizer._apply_optimize( - auto_parallel_main_prog, auto_parallel_startup_prog, dist_params_grads) + auto_parallel_main_prog, auto_parallel_startup_prog, dist_params_grads + ) - return auto_parallel_main_prog, auto_parallel_startup_prog, dist_params_grads + return ( + auto_parallel_main_prog, + auto_parallel_startup_prog, + dist_params_grads, + ) def check_send_recv_result(dist_main_prog, rank_id): @@ -145,14 +164,22 @@ def check_send_recv_result(dist_main_prog, rank_id): for idx, op in enumerate(ops): if op.type == "send_v2" and "gelu_0.tmp_0" in op.input_arg_names: send_result = True - if op.type == "recv_v2" and "gelu_0.tmp_0@GRAD" in op.output_arg_names[ - 0]: + if ( + op.type == "recv_v2" + and "gelu_0.tmp_0@GRAD" in op.output_arg_names[0] + ): recv_result = True else: for idx, op in enumerate(ops): - if op.type == "send_v2" and "gelu_0.tmp_0@GRAD" in op.input_arg_names: + if ( + op.type == "send_v2" + and "gelu_0.tmp_0@GRAD" in op.input_arg_names + ): send_result = True - if op.type == "recv_v2" and "gelu_0.tmp_0" in op.output_arg_names[0]: + if ( + op.type == "recv_v2" + and "gelu_0.tmp_0" in op.output_arg_names[0] + ): recv_result = True return send_result and recv_result @@ -168,14 +195,14 @@ def check_initialization_for_dpmppp(dist_startup_prog): class TestMLPReshard(unittest.TestCase): - def test_mlp_dpmppp(self): train_program = paddle.static.Program() startup_program = paddle.static.Program() dist_context = DistributedContext() rank_id = 2 dist_main_prog, dist_startup_prog, dist_params_grads = get_dist_prog( - train_program, startup_program, dist_context, rank_id) + train_program, startup_program, dist_context, rank_id + ) # test estimator cluster = Cluster() @@ -183,16 +210,23 @@ class TestMLPReshard(unittest.TestCase): cost_estimator = CostEstimator(train_program, cluster) global_cost = cost_estimator.estimate(dist_context) max_memory = cost_estimator._estimate_max_memory_by_dist_op( - dist_context) + dist_context + ) # test cache global_cost = cost_estimator.estimate(dist_context) max_memory = cost_estimator._estimate_max_memory_by_dist_op( - dist_context) + dist_context + ) assert global_cost.time > 0 assert max_memory > 0 - resharder = Resharder(dist_main_prog, dist_startup_prog, rank_id, - dist_context, dist_params_grads) + resharder = Resharder( + dist_main_prog, + dist_startup_prog, + rank_id, + dist_context, + dist_params_grads, + ) resharder.reshard() # print_program_with_dist_attr(dist_main_prog, dist_context) # check send and recv result diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_mppp.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_mppp.py index 21efd284b5d69b1b3e378b248eb55e6cce1d568f..62ebc76c6fa7b3ac8b00b2c01a21cab0105f2231 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_mppp.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_mppp.py @@ -37,37 +37,40 @@ PP_MESH_1 = auto.ProcessMesh([2, 3], dim_names=["x"]) class MLPLayer(nn.Layer): - - def __init__(self, - hidden_size=1024, - intermediate_size=4 * 1024, - initializer_range=0.02): + def __init__( + self, + hidden_size=1024, + intermediate_size=4 * 1024, + initializer_range=0.02, + ): super(MLPLayer, self).__init__() d_model = hidden_size dim_feedforward = intermediate_size weight_attr = paddle.ParamAttr( - initializer=nn.initializer.Normal(mean=0.0, std=initializer_range)) + initializer=nn.initializer.Normal(mean=0.0, std=initializer_range) + ) bias_attr = None self.word_embeddings = nn.Embedding( hidden_size, hidden_size, - weight_attr=paddle.ParamAttr(name="word_embeddings", - initializer=nn.initializer.Normal( - mean=0.0, std=initializer_range))) - - self.linear0 = nn.Linear(d_model, - dim_feedforward, - weight_attr, - bias_attr=bias_attr) - self.linear1 = nn.Linear(dim_feedforward, - d_model, - weight_attr, - bias_attr=bias_attr) - self.linear2 = nn.Linear(dim_feedforward, - d_model, - weight_attr, - bias_attr=bias_attr) + weight_attr=paddle.ParamAttr( + name="word_embeddings", + initializer=nn.initializer.Normal( + mean=0.0, std=initializer_range + ), + ), + ) + + self.linear0 = nn.Linear( + d_model, dim_feedforward, weight_attr, bias_attr=bias_attr + ) + self.linear1 = nn.Linear( + dim_feedforward, d_model, weight_attr, bias_attr=bias_attr + ) + self.linear2 = nn.Linear( + dim_feedforward, d_model, weight_attr, bias_attr=bias_attr + ) def forward(self, input): auto.shard_tensor(self.word_embeddings.weight, PP_MESH_0, ["x", None]) @@ -76,8 +79,9 @@ class MLPLayer(nn.Layer): auto.shard_tensor(self.linear2.weight, PP_MESH_1, ["x", None]) w_out = self.word_embeddings(input) out = self.linear0(w_out) - param = paddle.fluid.layers.create_parameter([4096, 4096], - paddle.float32) + param = paddle.fluid.layers.create_parameter( + [4096, 4096], paddle.float32 + ) auto.shard_tensor(param, PP_MESH_0, ["x", None]) out = paddle.fluid.layers.mul(out, param) gelu_out = F.gelu(out, approximate=True) @@ -89,22 +93,25 @@ class MLPLayer(nn.Layer): def mlp_forward(train_program, start_program): - with static.program_guard(train_program, - start_program), utils.unique_name.guard(): + with static.program_guard( + train_program, start_program + ), utils.unique_name.guard(): batch_size = 4 hidden_size = 1024 sequence_len = 512 input = static.data(name="input", shape=[batch_size], dtype='int32') - label = static.data(name="label", - shape=[batch_size, 1], - dtype='float32') + label = static.data( + name="label", shape=[batch_size, 1], dtype='float32' + ) auto.shard_tensor(input, PP_MESH_0, [None]) auto.shard_tensor(label, PP_MESH_1, [None, None]) - mlp = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - initializer_range=0.02) + mlp = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + initializer_range=0.02, + ) predict = mlp(input) error_cost = paddle.nn.functional.square_error_cost(predict, label) @@ -116,8 +123,9 @@ def mlp_forward(train_program, start_program): def get_dist_prog(train_program, startup_program, dist_context, rank_id): global _global_process_mesh dist_context.process_mesh = _global_process_mesh - loss, train_program, startup_program = mlp_forward(train_program, - startup_program) + loss, train_program, startup_program = mlp_forward( + train_program, startup_program + ) fleet._user_defined_strategy = fleet.DistributedStrategy() fleet.user_defined_optimizer = paddle.fluid.optimizer.AdamOptimizer() @@ -127,23 +135,36 @@ def get_dist_prog(train_program, startup_program, dist_context, rank_id): # serial forward & backward completion completer = Completer(dist_context) complete_train_program = completer.complete_forward_annotation( - train_program) + train_program + ) dist_context.block_state.parse_forward_blocks(complete_train_program) - params_grads = parallelizer._generate_backward(complete_train_program, - startup_program, - loss, - parameter_list=None, - no_grad_set=None, - callbacks=None) + params_grads = parallelizer._generate_backward( + complete_train_program, + startup_program, + loss, + parameter_list=None, + no_grad_set=None, + callbacks=None, + ) # logical partition partitioner = Partitioner(dist_context, rank_id) - auto_parallel_main_prog, auto_parallel_startup_prog, dist_params_grads = partitioner.partition( - complete_train_program, startup_program, params_grads) + ( + auto_parallel_main_prog, + auto_parallel_startup_prog, + dist_params_grads, + ) = partitioner.partition( + complete_train_program, startup_program, params_grads + ) partitioned_optimize_ops = parallelizer._apply_optimize( - auto_parallel_main_prog, auto_parallel_startup_prog, dist_params_grads) - return auto_parallel_main_prog, auto_parallel_startup_prog, dist_params_grads + auto_parallel_main_prog, auto_parallel_startup_prog, dist_params_grads + ) + return ( + auto_parallel_main_prog, + auto_parallel_startup_prog, + dist_params_grads, + ) def check_send_recv_result(dist_main_prog, rank_id): @@ -154,15 +175,22 @@ def check_send_recv_result(dist_main_prog, rank_id): for idx, op in enumerate(ops): if op.type == "send_v2" and "gelu_0.tmp_0" in op.input_arg_names: send_result = True - if op.type == "recv_v2" and "gelu_0.tmp_0@GRAD" in op.output_arg_names[ - 0]: + if ( + op.type == "recv_v2" + and "gelu_0.tmp_0@GRAD" in op.output_arg_names[0] + ): recv_result = True else: for idx, op in enumerate(ops): - if op.type == "send_v2" and "gelu_0.tmp_0@GRAD" in op.input_arg_names[ - 0]: + if ( + op.type == "send_v2" + and "gelu_0.tmp_0@GRAD" in op.input_arg_names[0] + ): send_result = True - if op.type == "recv_v2" and "gelu_0.tmp_0" in op.output_arg_names[0]: + if ( + op.type == "recv_v2" + and "gelu_0.tmp_0" in op.output_arg_names[0] + ): recv_result = True return send_result and recv_result @@ -196,23 +224,29 @@ def check_allgather(dist_main_program): class TestMLPReshard(unittest.TestCase): - def test_mlp_mppp(self): train_program = paddle.static.Program() startup_program = paddle.static.Program() dist_context = DistributedContext() rank_id = 2 dist_main_prog, dist_startup_prog, dist_params_grads = get_dist_prog( - train_program, startup_program, dist_context, rank_id) - resharder = Resharder(dist_main_prog, dist_startup_prog, rank_id, - dist_context, dist_params_grads) + train_program, startup_program, dist_context, rank_id + ) + resharder = Resharder( + dist_main_prog, + dist_startup_prog, + rank_id, + dist_context, + dist_params_grads, + ) resharder.reshard() # check send and recv result self.assertTrue(check_send_recv_result(dist_main_prog, rank_id)) # parameter which not been sliced should be the same in the mp scene self.assertTrue( - check_initialization_for_mppp(dist_startup_prog, rank_id)) + check_initialization_for_mppp(dist_startup_prog, rank_id) + ) def test_allgather(self): train_program = paddle.static.Program() @@ -224,8 +258,9 @@ class TestMLPReshard(unittest.TestCase): w = paddle.static.data(name="w", shape=[4, 4], dtype='float32') w = auto.shard_tensor(w, process_mesh, [None, None]) - y = paddle.distributed.shard_op(paddle.matmul, process_mesh, - [[None, None], [None, None]])(x, w) + y = paddle.distributed.shard_op( + paddle.matmul, process_mesh, [[None, None], [None, None]] + )(x, w) rank_id = 0 dist_context = DistributedContext() @@ -233,10 +268,14 @@ class TestMLPReshard(unittest.TestCase): partitioner = Partitioner(dist_context, rank_id) completer = Completer(dist_context) complete_train_program = completer.complete_forward_annotation( - train_program) + train_program + ) dist_context.block_state.parse_forward_blocks(complete_train_program) - partitioned_main_prog, partitioned_startup_prog, partitioned_params_grads = partitioner.partition( - complete_train_program, startup_program, []) + ( + partitioned_main_prog, + partitioned_startup_prog, + partitioned_params_grads, + ) = partitioner.partition(complete_train_program, startup_program, []) # test estimator cluster = Cluster() @@ -244,16 +283,23 @@ class TestMLPReshard(unittest.TestCase): cost_estimator = CostEstimator(train_program, cluster) global_cost = cost_estimator.estimate(dist_context) max_memory = cost_estimator._estimate_max_memory_by_dist_op( - dist_context) + dist_context + ) # test cache global_cost = cost_estimator.estimate(dist_context) max_memory = cost_estimator._estimate_max_memory_by_dist_op( - dist_context) + dist_context + ) assert global_cost.time > 0 assert max_memory > 0 - resharder = Resharder(partitioned_main_prog, partitioned_startup_prog, - rank_id, dist_context, partitioned_params_grads) + resharder = Resharder( + partitioned_main_prog, + partitioned_startup_prog, + rank_id, + dist_context, + partitioned_params_grads, + ) resharder.reshard() # the x should not be slice self.assertTrue(check_allgather(partitioned_main_prog)) diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_serial.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_serial.py index ff91bfe19f5ba9cb1be022b20007024c643d166f..697998b9de00fc032ee3fdcc214d65d350bb8d5e 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_serial.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_serial.py @@ -15,6 +15,7 @@ import unittest import os + if os.getenv("CUDA_VISIBLE_DEVICES", None) is None: os.environ["CUDA_VISIBLE_DEVICES"] = '0' @@ -24,7 +25,9 @@ import paddle.static as static import paddle.nn.functional as F import paddle.utils as utils from paddle.distributed.fleet import auto -from paddle.distributed.auto_parallel.dist_context import get_default_distributed_context +from paddle.distributed.auto_parallel.dist_context import ( + get_default_distributed_context, +) from paddle.distributed import fleet paddle.enable_static() @@ -33,26 +36,26 @@ _global_process_mesh = None class MLPLayer(nn.Layer): - - def __init__(self, - hidden_size=1024, - intermediate_size=4 * 1024, - initializer_range=0.02): + def __init__( + self, + hidden_size=1024, + intermediate_size=4 * 1024, + initializer_range=0.02, + ): super(MLPLayer, self).__init__() d_model = hidden_size dim_feedforward = intermediate_size weight_attr = paddle.ParamAttr( - initializer=nn.initializer.Normal(mean=0.0, std=initializer_range)) + initializer=nn.initializer.Normal(mean=0.0, std=initializer_range) + ) bias_attr = None - self.linear0 = nn.Linear(d_model, - dim_feedforward, - weight_attr, - bias_attr=bias_attr) - self.linear1 = nn.Linear(dim_feedforward, - d_model, - weight_attr, - bias_attr=bias_attr) + self.linear0 = nn.Linear( + d_model, dim_feedforward, weight_attr, bias_attr=bias_attr + ) + self.linear1 = nn.Linear( + dim_feedforward, d_model, weight_attr, bias_attr=bias_attr + ) self.norm = nn.LayerNorm(d_model, epsilon=1e-5) def forward(self, input): @@ -60,10 +63,12 @@ class MLPLayer(nn.Layer): auto.shard_tensor(self.linear0.weight, PP_MESH_0, [None, None]) auto.shard_tensor(self.linear1.weight, PP_MESH_1, [None, None]) else: - auto.shard_tensor(self.linear0.weight, _global_process_mesh, - [None, None]) - auto.shard_tensor(self.linear1.weight, _global_process_mesh, - [None, None]) + auto.shard_tensor( + self.linear0.weight, _global_process_mesh, [None, None] + ) + auto.shard_tensor( + self.linear1.weight, _global_process_mesh, [None, None] + ) out = self.norm(input) out = self.linear0(out) @@ -74,17 +79,18 @@ class MLPLayer(nn.Layer): def mlp_forward(train_program, start_program): - with static.program_guard(train_program, - start_program), utils.unique_name.guard(): + with static.program_guard( + train_program, start_program + ), utils.unique_name.guard(): batch_size = 4 hidden_size = 1024 sequence_len = 512 - input = static.data(name="input", - shape=[batch_size, hidden_size], - dtype='float32') - label = static.data(name="label", - shape=[batch_size, 1], - dtype='float32') + input = static.data( + name="input", shape=[batch_size, hidden_size], dtype='float32' + ) + label = static.data( + name="label", shape=[batch_size, 1], dtype='float32' + ) if _global_parallel_strategy == "pp": auto.shard_tensor(input, PP_MESH_0, [None, None]) @@ -94,9 +100,11 @@ def mlp_forward(train_program, start_program): else: auto.shard_tensor(input, _global_process_mesh, [None, None]) - mlp = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - initializer_range=0.02) + mlp = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + initializer_range=0.02, + ) predict = mlp(input) error_cost = paddle.nn.functional.square_error_cost(predict, label) @@ -105,8 +113,9 @@ def mlp_forward(train_program, start_program): return loss, train_program, start_program -def get_dist_prog_with_parallelizer(train_program, startup_program, - dist_context): +def get_dist_prog_with_parallelizer( + train_program, startup_program, dist_context +): global _global_process_mesh dist_strategy = fleet.DistributedStrategy() @@ -118,18 +127,25 @@ def get_dist_prog_with_parallelizer(train_program, startup_program, dist_strategy.semi_auto = True fleet.init(is_collective=True, strategy=dist_strategy) - loss, train_program, startup_program = mlp_forward(train_program, - startup_program) - - optimizer = paddle.fluid.optimizer.AdamOptimizer(learning_rate=0.00001, - beta1=0.9, - beta2=0.999, - epsilon=1e-08, - grad_clip=None) + loss, train_program, startup_program = mlp_forward( + train_program, startup_program + ) + + optimizer = paddle.fluid.optimizer.AdamOptimizer( + learning_rate=0.00001, + beta1=0.9, + beta2=0.999, + epsilon=1e-08, + grad_clip=None, + ) optimizer = fleet.distributed_optimizer(optimizer) - _, _, distributed_startup_program, distributed_main_program = optimizer.minimize( - loss, startup_program) + ( + _, + _, + distributed_startup_program, + distributed_main_program, + ) = optimizer.minimize(loss, startup_program) return distributed_main_program, distributed_startup_program @@ -142,21 +158,28 @@ def check_send_recv_result(dist_main_prog, rank_id): for idx, op in enumerate(ops): if op.type == "send_v2" and "gelu_0.tmp_0" in op.input_arg_names: send_result = True - if op.type == "recv_v2" and "gelu_0.tmp_0@GRAD" in op.output_arg_names[ - 0]: + if ( + op.type == "recv_v2" + and "gelu_0.tmp_0@GRAD" in op.output_arg_names[0] + ): recv_result = True else: for idx, op in enumerate(ops): - if op.type == "send_v2" and "gelu_0.tmp_0@GRAD" in op.input_arg_names: + if ( + op.type == "send_v2" + and "gelu_0.tmp_0@GRAD" in op.input_arg_names + ): send_result = True - if op.type == "recv_v2" and "gelu_0.tmp_0" in op.output_arg_names[0]: + if ( + op.type == "recv_v2" + and "gelu_0.tmp_0" in op.output_arg_names[0] + ): recv_result = True return send_result and recv_result class TestMLPReshard(unittest.TestCase): - def test_mlp_serial(self): global _global_parallel_strategy _global_parallel_strategy = None @@ -168,7 +191,8 @@ class TestMLPReshard(unittest.TestCase): dist_context = get_default_distributed_context() rank_id = 0 dist_main_prog, dist_startup_prog = get_dist_prog_with_parallelizer( - train_program, startup_program, dist_context) + train_program, startup_program, dist_context + ) # send and recv should not exist in serial scene. self.assertFalse(check_send_recv_result(dist_main_prog, rank_id)) diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_save_load.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_save_load.py index b2a478b365b1b74cabd948a255c782e29dd92e0f..7b9cf4c0004a49e84f860290b7ae5d2058a781a4 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_save_load.py @@ -18,7 +18,6 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus class TestAutoParallelSaveLoad(TestMultipleGpus): - def test_auto_parallel_save_load(self): self.run_mnist_2gpu('auto_parallel_save_load.py') diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_searcher.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_searcher.py index cb5e4e00e9ac2fcf81a9adce229eeac370a141a4..98880667e3410eb3401db55c7e49ad5ea0da524e 100755 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_searcher.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_searcher.py @@ -22,35 +22,43 @@ import paddle.utils as utils from paddle.distributed.fleet import auto from paddle.distributed.auto_parallel.planner import PlanSpace from paddle.distributed.auto_parallel.dist_context import DistributedContext -from paddle.distributed.auto_parallel.dist_attribute import TensorDistributedAttribute -from paddle.distributed.auto_parallel.dist_attribute import OperatorDistributedAttribute -from paddle.distributed.auto_parallel.utils import update_op_dims_mapping_by_default_dist_impl -from paddle.distributed.auto_parallel.utils import update_op_dims_mapping_by_elementwise_like_dist_impl +from paddle.distributed.auto_parallel.dist_attribute import ( + TensorDistributedAttribute, +) +from paddle.distributed.auto_parallel.dist_attribute import ( + OperatorDistributedAttribute, +) +from paddle.distributed.auto_parallel.utils import ( + update_op_dims_mapping_by_default_dist_impl, +) +from paddle.distributed.auto_parallel.utils import ( + update_op_dims_mapping_by_elementwise_like_dist_impl, +) paddle.enable_static() class MLPLayer(nn.Layer): - - def __init__(self, - hidden_size=1024, - intermediate_size=4 * 1024, - initializer_range=0.02): + def __init__( + self, + hidden_size=1024, + intermediate_size=4 * 1024, + initializer_range=0.02, + ): super(MLPLayer, self).__init__() d_model = hidden_size dim_feedforward = intermediate_size weight_attr = paddle.ParamAttr( - initializer=nn.initializer.Normal(mean=0.0, std=initializer_range)) + initializer=nn.initializer.Normal(mean=0.0, std=initializer_range) + ) bias_attr = None - self.linear0 = nn.Linear(d_model, - dim_feedforward, - weight_attr, - bias_attr=bias_attr) - self.linear1 = nn.Linear(dim_feedforward, - d_model, - weight_attr, - bias_attr=bias_attr) + self.linear0 = nn.Linear( + d_model, dim_feedforward, weight_attr, bias_attr=bias_attr + ) + self.linear1 = nn.Linear( + dim_feedforward, d_model, weight_attr, bias_attr=bias_attr + ) self.norm = nn.LayerNorm(d_model, epsilon=1e-5) def forward(self, input): @@ -64,21 +72,24 @@ class MLPLayer(nn.Layer): def mlp_forward(train_program, start_program): - with static.program_guard(train_program, - start_program), utils.unique_name.guard(): + with static.program_guard( + train_program, start_program + ), utils.unique_name.guard(): batch_size = 4 hidden_size = 1024 sequence_len = 512 - input = static.data(name="input", - shape=[batch_size, hidden_size], - dtype='float32') - label = static.data(name="label", - shape=[batch_size, 1], - dtype='float32') + input = static.data( + name="input", shape=[batch_size, hidden_size], dtype='float32' + ) + label = static.data( + name="label", shape=[batch_size, 1], dtype='float32' + ) loss_func = paddle.nn.CrossEntropyLoss(reduction="none") - mlp = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - initializer_range=0.02) + mlp = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + initializer_range=0.02, + ) predict = mlp(input) error_cost = loss_func(predict, label) @@ -98,18 +109,22 @@ def set_default_dist_attr(program, dist_context, process_mesh): tensor_dist_attr.process_mesh = process_mesh tensor_dist_attr.dims_mapping = [-1 for i in vars[var_name].shape] dist_context.set_tensor_dist_attr_for_program( - vars[var_name], tensor_dist_attr) - op_dist_attr.set_input_dims_mapping(var_name, - tensor_dist_attr.dims_mapping) + vars[var_name], tensor_dist_attr + ) + op_dist_attr.set_input_dims_mapping( + var_name, tensor_dist_attr.dims_mapping + ) for var_name in op.output_arg_names: tensor_dist_attr = TensorDistributedAttribute() tensor_dist_attr.process_mesh = process_mesh tensor_dist_attr.dims_mapping = [-1 for i in vars[var_name].shape] dist_context.set_tensor_dist_attr_for_program( - vars[var_name], tensor_dist_attr) - op_dist_attr.set_output_dims_mapping(var_name, - tensor_dist_attr.dims_mapping) + vars[var_name], tensor_dist_attr + ) + op_dist_attr.set_output_dims_mapping( + var_name, tensor_dist_attr.dims_mapping + ) dist_context.set_op_dist_attr_for_program(op, op_dist_attr) dist_context.add_process_mesh(process_mesh) @@ -123,73 +138,111 @@ def check_process_meshes(processes): def check_pipeline_enumerater(program, process_mesh_topology): - valid_dist_attr_dict, pipeline_process_meshes, global_process_mesh = PlanSpace.enum_valid_dist_attr_for_program( - program, process_mesh_topology, True) - if valid_dist_attr_dict and len( - pipeline_process_meshes) > 1 and not global_process_mesh: + ( + valid_dist_attr_dict, + pipeline_process_meshes, + global_process_mesh, + ) = PlanSpace.enum_valid_dist_attr_for_program( + program, process_mesh_topology, True + ) + if ( + valid_dist_attr_dict + and len(pipeline_process_meshes) > 1 + and not global_process_mesh + ): return True return False def check_nonpipeline_enumerater(program, process_mesh_topology): - valid_dist_attr_dict, pipeline_process_meshes, global_process_mesh = PlanSpace.enum_valid_dist_attr_for_program( - program, process_mesh_topology, False) - if valid_dist_attr_dict and not pipeline_process_meshes and global_process_mesh: + ( + valid_dist_attr_dict, + pipeline_process_meshes, + global_process_mesh, + ) = PlanSpace.enum_valid_dist_attr_for_program( + program, process_mesh_topology, False + ) + if ( + valid_dist_attr_dict + and not pipeline_process_meshes + and global_process_mesh + ): return True return False class TestMLPSearcher(unittest.TestCase): - def test_update(self): train_program = paddle.static.Program() startup_program = paddle.static.Program() - _, train_program, startup_program = mlp_forward(train_program, - startup_program) + _, train_program, startup_program = mlp_forward( + train_program, startup_program + ) global_process_mesh = auto.ProcessMesh(mesh=[0, 1]) dist_context = DistributedContext() set_default_dist_attr(train_program, dist_context, global_process_mesh) ops = train_program.global_block().ops vars = train_program.global_block().vars - from paddle.distributed.auto_parallel.operators.common import get_distributed_operator_impl_container - from paddle.distributed.auto_parallel.operators.common import is_elementwise_op + from paddle.distributed.auto_parallel.operators.common import ( + get_distributed_operator_impl_container, + ) + from paddle.distributed.auto_parallel.operators.common import ( + is_elementwise_op, + ) from paddle.distributed.auto_parallel.dist_op import DistributedOperator for op in ops: dist_op_impl_container = get_distributed_operator_impl_container( - op.type) + op.type + ) if dist_op_impl_container is None: op_dist_attr = dist_context.get_op_dist_attr_for_program(op) dist_op = DistributedOperator(op, op_dist_attr) if is_elementwise_op(op.type): - changed = update_op_dims_mapping_by_elementwise_like_dist_impl( - dist_op) + changed = ( + update_op_dims_mapping_by_elementwise_like_dist_impl( + dist_op + ) + ) self.assertFalse(changed) dist_op.dist_attr.set_output_dims_mapping( - op.output_arg_names[0], [0] + [ - -1 for i in range( - 1, len(vars[op.output_arg_names[0]].shape)) - ]) + op.output_arg_names[0], + [0] + + [ + -1 + for i in range( + 1, len(vars[op.output_arg_names[0]].shape) + ) + ], + ) try: changed = update_op_dims_mapping_by_elementwise_like_dist_impl( - dist_op) + dist_op + ) except: continue self.assertTrue(changed) else: changed = update_op_dims_mapping_by_default_dist_impl( - dist_op) + dist_op + ) self.assertFalse(changed) dist_op.dist_attr.set_output_dims_mapping( - op.output_arg_names[0], [0] + [ - -1 for i in range( - 1, len(vars[op.output_arg_names[0]].shape)) - ]) + op.output_arg_names[0], + [0] + + [ + -1 + for i in range( + 1, len(vars[op.output_arg_names[0]].shape) + ) + ], + ) try: changed = update_op_dims_mapping_by_default_dist_impl( - dist_op) + dist_op + ) except: continue self.assertTrue(changed) @@ -200,13 +253,16 @@ class TestMLPSearcher(unittest.TestCase): train_program = paddle.static.Program() startup_program = paddle.static.Program() - _, train_program, startup_program = mlp_forward(train_program, - startup_program) + _, train_program, startup_program = mlp_forward( + train_program, startup_program + ) process_mesh_topology = [4] self.assertTrue( - check_pipeline_enumerater(train_program, process_mesh_topology)) + check_pipeline_enumerater(train_program, process_mesh_topology) + ) self.assertTrue( - check_nonpipeline_enumerater(train_program, process_mesh_topology)) + check_nonpipeline_enumerater(train_program, process_mesh_topology) + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_auto_search_dist_matmul_op.py b/python/paddle/fluid/tests/unittests/test_auto_search_dist_matmul_op.py index fd84c9e6f0c9906c081897a2196ee453cc2c1905..0962b2e8d5bf26fd5e61f7975663a9219da45f0a 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_search_dist_matmul_op.py +++ b/python/paddle/fluid/tests/unittests/test_auto_search_dist_matmul_op.py @@ -20,8 +20,12 @@ import paddle.nn.functional as F import paddle.utils as utils import paddle.fluid.core as core from paddle.fluid import layers -from paddle.distributed.auto_parallel.operators.common import get_distributed_operator_impl_container -from paddle.distributed.auto_parallel.dist_attribute import OperatorDistributedAttribute +from paddle.distributed.auto_parallel.operators.common import ( + get_distributed_operator_impl_container, +) +from paddle.distributed.auto_parallel.dist_attribute import ( + OperatorDistributedAttribute, +) from paddle.distributed.auto_parallel.dist_op import DistributedOperator paddle.enable_static() @@ -29,26 +33,26 @@ device = "gpu" if core.is_compiled_with_cuda() else "cpu" class MLPLayer(nn.Layer): - - def __init__(self, - hidden_size=1024, - intermediate_size=4 * 1024, - initializer_range=0.02): + def __init__( + self, + hidden_size=1024, + intermediate_size=4 * 1024, + initializer_range=0.02, + ): super(MLPLayer, self).__init__() d_model = hidden_size dim_feedforward = intermediate_size weight_attr = paddle.ParamAttr( - initializer=nn.initializer.Normal(mean=0.0, std=initializer_range)) + initializer=nn.initializer.Normal(mean=0.0, std=initializer_range) + ) bias_attr = None - self.linear0 = nn.Linear(d_model, - dim_feedforward, - weight_attr, - bias_attr=bias_attr) - self.linear1 = nn.Linear(dim_feedforward, - d_model, - weight_attr, - bias_attr=bias_attr) + self.linear0 = nn.Linear( + d_model, dim_feedforward, weight_attr, bias_attr=bias_attr + ) + self.linear1 = nn.Linear( + dim_feedforward, d_model, weight_attr, bias_attr=bias_attr + ) self.norm = nn.LayerNorm(d_model, epsilon=1e-5) def forward(self, input): @@ -61,8 +65,9 @@ class MLPLayer(nn.Layer): def mlp_forward(train_program, start_program): - with static.program_guard(train_program, - start_program), utils.unique_name.guard(): + with static.program_guard( + train_program, start_program + ), utils.unique_name.guard(): batch_size = 4 hidden_size = 1024 sqrt_hidden_size = 32 @@ -75,16 +80,20 @@ def mlp_forward(train_program, start_program): input = embedding(input) input = paddle.reshape(input, [hidden_size, batch_size]) input = paddle.transpose(input, perm=[1, 0]) - matmulinput = static.data(name="matmulinput", - shape=[hidden_size, hidden_size], - dtype='float32') + matmulinput = static.data( + name="matmulinput", + shape=[hidden_size, hidden_size], + dtype='float32', + ) input = layers.matmul(x=input, y=matmulinput) - label = static.data(name="label", - shape=[batch_size, 1], - dtype='float32') - mlp = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - initializer_range=0.02) + label = static.data( + name="label", shape=[batch_size, 1], dtype='float32' + ) + mlp = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + initializer_range=0.02, + ) predict = mlp(input) error_cost = paddle.nn.functional.square_error_cost(predict, label) @@ -95,37 +104,38 @@ def mlp_forward(train_program, start_program): class TestCompatible(unittest.TestCase): - def test_matmulv2_matmul_2_compatible(self): valid_op_dist_attr_list = [] program = paddle.static.Program() startup_program = paddle.static.Program() loss, program, start_program = mlp_forward(program, startup_program) - with static.program_guard(program, - start_program), utils.unique_name.guard(): - matmulx3 = static.data(name="matmulx3", - shape=[6, 2, 6], - dtype='float32') - matmuly3 = static.data(name="matmuly3", - shape=[6, 6], - dtype='float32') + with static.program_guard( + program, start_program + ), utils.unique_name.guard(): + matmulx3 = static.data( + name="matmulx3", shape=[6, 2, 6], dtype='float32' + ) + matmuly3 = static.data( + name="matmuly3", shape=[6, 6], dtype='float32' + ) output1 = paddle.matmul(x=matmulx3, y=matmuly3) output_1 = layers.matmul(x=matmulx3, y=matmuly3) - matmulx4 = static.data(name="matmulx4", - shape=[6, 6, 2, 6], - dtype='float32') - matmuly4 = static.data(name="matmuly4", - shape=[6, 6, 6, 6], - dtype='float32') + matmulx4 = static.data( + name="matmulx4", shape=[6, 6, 2, 6], dtype='float32' + ) + matmuly4 = static.data( + name="matmuly4", shape=[6, 6, 6, 6], dtype='float32' + ) output2 = paddle.matmul(x=matmulx4, y=matmuly4) output_2 = layers.matmul(x=matmulx4, y=matmuly4) ops = program.global_block().ops vars = program.global_block().vars for idx, op in enumerate(ops): if op.type == 'matmul_v2' or op.type == 'matmul': - dist_op_impl_container = get_distributed_operator_impl_container( - op.type) + dist_op_impl_container = ( + get_distributed_operator_impl_container(op.type) + ) impls = dist_op_impl_container.impls op_dist_attr = OperatorDistributedAttribute() X = op.input_arg_names[0] @@ -135,98 +145,157 @@ class TestCompatible(unittest.TestCase): op_dist_attr.set_input_dims_mapping(X, [-1, -1]) op_dist_attr.set_input_dims_mapping(Y, [-1, -1]) op_dist_attr.set_output_dims_mapping(out, [-1, -1]) - self.assertTrue(impls[2].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertTrue( + impls[2].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_input_dims_mapping(X, [1, -1]) - self.assertFalse(impls[2].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[2].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_input_dims_mapping(X, [-1, 1]) - self.assertFalse(impls[2].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[2].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_input_dims_mapping(Y, [1, -1]) - self.assertFalse(impls[2].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[2].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_input_dims_mapping(Y, [-1, 1]) - self.assertFalse(impls[2].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[2].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_output_dims_mapping(out, [-1, 1]) - self.assertFalse(impls[2].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[2].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_output_dims_mapping(out, [1, -1]) - self.assertFalse(impls[2].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[2].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) if len(vars[X].shape) == 3 and len(vars[Y].shape) == 2: op_dist_attr.set_input_dims_mapping(X, [-1, -1, -1]) op_dist_attr.set_input_dims_mapping(Y, [-1, -1]) op_dist_attr.set_output_dims_mapping(out, [-1, -1, -1]) - self.assertTrue(impls[2].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertTrue( + impls[2].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_output_dims_mapping(out, [1, -1, -1]) op_dist_attr.set_input_dims_mapping(X, [-1, -1, 1]) - self.assertFalse(impls[2].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[2].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_input_dims_mapping(Y, [1, -1]) - self.assertFalse(impls[2].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) - self.assertFalse(impls[2].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[2].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) + self.assertFalse( + impls[2].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_output_dims_mapping(out, [-1, 1, -1]) - self.assertFalse(impls[2].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[2].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) if len(vars[X].shape) == 4 and len(vars[Y].shape) == 4: op_dist_attr.set_input_dims_mapping(X, [-1, -1, -1, -1]) op_dist_attr.set_input_dims_mapping(Y, [-1, -1, -1, -1]) op_dist_attr.set_output_dims_mapping(out, [-1, -1, -1, -1]) - self.assertTrue(impls[2].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertTrue( + impls[2].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_input_dims_mapping(Y, [0, -1, -1, -1]) - self.assertFalse(impls[2].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[2].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_output_dims_mapping(out, [0, -1, -1, -1]) - self.assertFalse(impls[2].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[2].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_input_dims_mapping(Y, [-1, -1, 0, -1]) - self.assertFalse(impls[2].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[2].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_output_dims_mapping(out, [-1, -1, 0, -1]) - self.assertFalse(impls[2].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[2].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_input_dims_mapping(Y, [-1, -1, -1, 1]) - self.assertFalse(impls[2].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[2].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_output_dims_mapping(out, [-1, -1, 0, -1]) - self.assertFalse(impls[2].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[2].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) def test_matmulv2_matmul_1_compatible(self): valid_op_dist_attr_list = [] program = paddle.static.Program() startup_program = paddle.static.Program() loss, program, start_program = mlp_forward(program, startup_program) - with static.program_guard(program, - start_program), utils.unique_name.guard(): - matmulx3 = static.data(name="matmulx3", - shape=[6, 2, 6], - dtype='float32') - matmuly3 = static.data(name="matmuly3", - shape=[6, 6], - dtype='float32') + with static.program_guard( + program, start_program + ), utils.unique_name.guard(): + matmulx3 = static.data( + name="matmulx3", shape=[6, 2, 6], dtype='float32' + ) + matmuly3 = static.data( + name="matmuly3", shape=[6, 6], dtype='float32' + ) output1 = paddle.matmul(x=matmulx3, y=matmuly3) output_1 = layers.matmul(x=matmulx3, y=matmuly3) - matmulx4 = static.data(name="matmulx4", - shape=[6, 6, 6, 6], - dtype='float32') - matmuly4 = static.data(name="matmuly4", - shape=[6, 6, 6, 6], - dtype='float32') + matmulx4 = static.data( + name="matmulx4", shape=[6, 6, 6, 6], dtype='float32' + ) + matmuly4 = static.data( + name="matmuly4", shape=[6, 6, 6, 6], dtype='float32' + ) output2 = paddle.matmul(x=matmulx4, y=matmuly4) output_2 = layers.matmul(x=matmulx4, y=matmuly4) ops = program.global_block().ops vars = program.global_block().vars for idx, op in enumerate(ops): if op.type == 'matmul_v2' or op.type == 'matmul': - dist_op_impl_container = get_distributed_operator_impl_container( - op.type) + dist_op_impl_container = ( + get_distributed_operator_impl_container(op.type) + ) impls = dist_op_impl_container.impls op_dist_attr = OperatorDistributedAttribute() X = op.input_arg_names[0] @@ -238,86 +307,133 @@ class TestCompatible(unittest.TestCase): op_dist_attr.set_output_dims_mapping(out, [-1, -1]) dist_op = DistributedOperator(op, op_dist_attr) op_dist_attr.set_output_dims_mapping(out, [1, -1]) - self.assertFalse(impls[1].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[1].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_input_dims_mapping(X, [-1, -1]) - self.assertFalse(impls[1].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[1].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_input_dims_mapping(Y, [-1, -1]) - self.assertFalse(impls[1].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[1].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) if len(vars[X].shape) == 3 and len(vars[Y].shape) == 2: op_dist_attr.set_input_dims_mapping(X, [-1, -1, 1]) op_dist_attr.set_input_dims_mapping(Y, [1, -1]) op_dist_attr.set_output_dims_mapping(out, [-1, -1, -1]) - self.assertTrue(impls[1].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertTrue( + impls[1].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_output_dims_mapping(out, [1, -1, 1]) - self.assertFalse(impls[1].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[1].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_input_dims_mapping(out, [-1, -1, -1]) - self.assertFalse(impls[1].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[1].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_output_dims_mapping(out, [-1, 0, -1]) - self.assertFalse(impls[1].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[1].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_input_dims_mapping(X, [-1, -1, -1]) - self.assertFalse(impls[1].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[1].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) if len(vars[X].shape) == 4 and len(vars[Y].shape) == 4: op_dist_attr.set_input_dims_mapping(X, [-1, -1, -1, 1]) op_dist_attr.set_input_dims_mapping(Y, [-1, -1, 1, -1]) op_dist_attr.set_output_dims_mapping(out, [-1, -1, -1, -1]) - self.assertTrue(impls[1].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertTrue( + impls[1].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_input_dims_mapping(Y, [0, -1, -1, -1]) - self.assertFalse(impls[1].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[1].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_output_dims_mapping(out, [0, -1, -1, -1]) - self.assertFalse(impls[1].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[1].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_input_dims_mapping(Y, [-1, -1, 0, -1]) - self.assertFalse(impls[1].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[1].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_output_dims_mapping(out, [-1, -1, 0, -1]) - self.assertFalse(impls[1].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[1].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_input_dims_mapping(Y, [-1, -1, -1, 1]) - self.assertFalse(impls[1].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[1].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_output_dims_mapping(out, [-1, -1, 0, -1]) - self.assertFalse(impls[1].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[1].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) def test_matmulv2_matmul_0_compatible(self): valid_op_dist_attr_list = [] program = paddle.static.Program() startup_program = paddle.static.Program() loss, program, start_program = mlp_forward(program, startup_program) - with static.program_guard(program, - start_program), utils.unique_name.guard(): - matmulx3 = static.data(name="matmulx3", - shape=[6, 2, 6], - dtype='float32') - matmuly3 = static.data(name="matmuly3", - shape=[6, 6], - dtype='float32') + with static.program_guard( + program, start_program + ), utils.unique_name.guard(): + matmulx3 = static.data( + name="matmulx3", shape=[6, 2, 6], dtype='float32' + ) + matmuly3 = static.data( + name="matmuly3", shape=[6, 6], dtype='float32' + ) output1 = paddle.matmul(x=matmulx3, y=matmuly3) output_1 = layers.matmul(x=matmulx3, y=matmuly3) - matmulx4 = static.data(name="matmulx4", - shape=[6, 6, 2, 6], - dtype='float32') - matmuly4 = static.data(name="matmuly4", - shape=[6, 6, 6, 6], - dtype='float32') + matmulx4 = static.data( + name="matmulx4", shape=[6, 6, 2, 6], dtype='float32' + ) + matmuly4 = static.data( + name="matmuly4", shape=[6, 6, 6, 6], dtype='float32' + ) output2 = paddle.matmul(x=matmulx4, y=matmuly4) output_2 = layers.matmul(x=matmulx4, y=matmuly4) ops = program.global_block().ops vars = program.global_block().vars for idx, op in enumerate(ops): if op.type == 'matmul_v2' or op.type == 'matmul': - dist_op_impl_container = get_distributed_operator_impl_container( - op.type) + dist_op_impl_container = ( + get_distributed_operator_impl_container(op.type) + ) impls = dist_op_impl_container.impls op_dist_attr = OperatorDistributedAttribute() X = op.input_arg_names[0] @@ -327,81 +443,150 @@ class TestCompatible(unittest.TestCase): op_dist_attr.set_input_dims_mapping(X, [-1, -1]) op_dist_attr.set_input_dims_mapping(Y, [-1, 1]) op_dist_attr.set_output_dims_mapping(out, [-1, 1]) - self.assertTrue(impls[0].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertTrue( + impls[0].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_input_dims_mapping(X, [-1, 1]) - self.assertFalse(impls[0].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[0].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_input_dims_mapping(Y, [1, 1]) - self.assertFalse(impls[0].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[0].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_output_dims_mapping(out, [0, 0]) - self.assertFalse(impls[0].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[0].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_input_dims_mapping(X, [0, -1]) op_dist_attr.set_output_dims_mapping(out, [1, 1]) - self.assertFalse(impls[0].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[0].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_input_dims_mapping(Y, [1, -1]) - self.assertFalse(impls[0].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[0].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) if len(vars[X].shape) == 3 and len(vars[Y].shape) == 2: op_dist_attr.set_input_dims_mapping(X, [-1, -1, -1]) op_dist_attr.set_input_dims_mapping(Y, [-1, 1]) op_dist_attr.set_output_dims_mapping(out, [-1, -1, 1]) - self.assertTrue(impls[0].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertTrue( + impls[0].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_input_dims_mapping(X, [-1, 0, -1]) - self.assertFalse(impls[0].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[0].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_input_dims_mapping(X, [-1, 1, -1]) - self.assertFalse(impls[0].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[0].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_input_dims_mapping(Y, [-1, -1]) - self.assertFalse(impls[0].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[0].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_output_dims_mapping(out, [1, -1, 1]) - self.assertFalse(impls[0].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[0].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_output_dims_mapping(out, [-1, -1, -1]) - self.assertFalse(impls[0].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[0].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_output_dims_mapping(out, [-1, 1, -1]) - self.assertFalse(impls[0].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[0].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) if len(vars[X].shape) == 4 and len(vars[Y].shape) == 4: op_dist_attr.set_input_dims_mapping(X, [-1, -1, -1, -1]) op_dist_attr.set_input_dims_mapping(Y, [-1, -1, -1, 1]) op_dist_attr.set_output_dims_mapping(out, [-1, -1, -1, 1]) - self.assertTrue(impls[0].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertTrue( + impls[0].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_output_dims_mapping(out, [0, -1, -1, 1]) - self.assertFalse(impls[0].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[0].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_input_dims_mapping(X, [-1, 1, 1, -1]) - self.assertFalse(impls[0].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[0].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_input_dims_mapping(X, [-1, 1, -1, -1]) - self.assertFalse(impls[0].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[0].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_input_dims_mapping(X, [-1, -1, 1, -1]) - self.assertFalse(impls[0].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[0].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_input_dims_mapping(Y, [0, -1, -1, 1]) - self.assertFalse(impls[0].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[0].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_output_dims_mapping(out, [-1, 1, 1, 1]) - self.assertFalse(impls[0].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[0].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_input_dims_mapping(Y, [-1, -1, -1, -1]) - self.assertFalse(impls[0].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[0].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_output_dims_mapping(out, [-1, -1, 1, -1]) - self.assertFalse(impls[0].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[0].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_input_dims_mapping(Y, [-1, -1, 1, -1]) - self.assertFalse(impls[0].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + self.assertFalse( + impls[0].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_auto_search_dist_op.py b/python/paddle/fluid/tests/unittests/test_auto_search_dist_op.py index 8c80a7ae8087a4bc1588e062648ac63d526c558f..169926038965d4b674ab4f02f66d7ec64a81d203 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_search_dist_op.py +++ b/python/paddle/fluid/tests/unittests/test_auto_search_dist_op.py @@ -20,8 +20,12 @@ import paddle.nn.functional as F import paddle.utils as utils import paddle.fluid.core as core from paddle.fluid import layers -from paddle.distributed.auto_parallel.operators.common import get_distributed_operator_impl_container -from paddle.distributed.auto_parallel.dist_attribute import OperatorDistributedAttribute +from paddle.distributed.auto_parallel.operators.common import ( + get_distributed_operator_impl_container, +) +from paddle.distributed.auto_parallel.dist_attribute import ( + OperatorDistributedAttribute, +) from paddle.distributed.auto_parallel.dist_op import DistributedOperator paddle.enable_static() @@ -29,26 +33,26 @@ device = "gpu" if core.is_compiled_with_cuda() else "cpu" class MLPLayer(nn.Layer): - - def __init__(self, - hidden_size=1024, - intermediate_size=4 * 1024, - initializer_range=0.02): + def __init__( + self, + hidden_size=1024, + intermediate_size=4 * 1024, + initializer_range=0.02, + ): super(MLPLayer, self).__init__() d_model = hidden_size dim_feedforward = intermediate_size weight_attr = paddle.ParamAttr( - initializer=nn.initializer.Normal(mean=0.0, std=initializer_range)) + initializer=nn.initializer.Normal(mean=0.0, std=initializer_range) + ) bias_attr = None - self.linear0 = nn.Linear(d_model, - dim_feedforward, - weight_attr, - bias_attr=bias_attr) - self.linear1 = nn.Linear(dim_feedforward, - d_model, - weight_attr, - bias_attr=bias_attr) + self.linear0 = nn.Linear( + d_model, dim_feedforward, weight_attr, bias_attr=bias_attr + ) + self.linear1 = nn.Linear( + dim_feedforward, d_model, weight_attr, bias_attr=bias_attr + ) self.norm = nn.LayerNorm(d_model, epsilon=1e-5) def forward(self, input): @@ -61,8 +65,9 @@ class MLPLayer(nn.Layer): def mlp_forward(train_program, start_program): - with static.program_guard(train_program, - start_program), utils.unique_name.guard(): + with static.program_guard( + train_program, start_program + ), utils.unique_name.guard(): batch_size = 4 hidden_size = 1024 sqrt_hidden_size = 32 @@ -75,16 +80,20 @@ def mlp_forward(train_program, start_program): input = embedding(input) input = paddle.reshape(input, [hidden_size, batch_size]) input = paddle.transpose(input, perm=[1, 0]) - matmulinput = static.data(name="matmulinput", - shape=[hidden_size, hidden_size], - dtype='float32') + matmulinput = static.data( + name="matmulinput", + shape=[hidden_size, hidden_size], + dtype='float32', + ) input = layers.matmul(x=input, y=matmulinput) - label = static.data(name="label", - shape=[batch_size, 1], - dtype='float32') - mlp = MLPLayer(hidden_size=hidden_size, - intermediate_size=4 * hidden_size, - initializer_range=0.02) + label = static.data( + name="label", shape=[batch_size, 1], dtype='float32' + ) + mlp = MLPLayer( + hidden_size=hidden_size, + intermediate_size=4 * hidden_size, + initializer_range=0.02, + ) predict = mlp(input) error_cost = paddle.nn.functional.square_error_cost(predict, label) @@ -95,7 +104,6 @@ def mlp_forward(train_program, start_program): class TestCompatible(unittest.TestCase): - def test_reshape_remove_compatible(self): valid_op_dist_attr_list = [] program = paddle.static.Program() @@ -104,55 +112,95 @@ class TestCompatible(unittest.TestCase): ops = program.global_block().ops for idx, op in enumerate(ops): if op.type == 'reshape2': - dist_op_impl_container = get_distributed_operator_impl_container( - op.type) + dist_op_impl_container = ( + get_distributed_operator_impl_container(op.type) + ) impls = dist_op_impl_container.impls op_dist_attr = OperatorDistributedAttribute() - op_dist_attr.set_input_dims_mapping(op.input_arg_names[0], - [-1, -1, -1]) - op_dist_attr.set_output_dims_mapping(op.output_arg_names[0], - [-1, -1]) - op_dist_attr.set_output_dims_mapping(op.output_arg_names[1], - [-1, -1, -1, -1]) - self.assertTrue(impls[1].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) - op_dist_attr.set_output_dims_mapping(op.output_arg_names[1], - [-1, -1, -1, 1]) - self.assertFalse(impls[1].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) - op_dist_attr.set_output_dims_mapping(op.output_arg_names[1], - [0, -1, -1, 1]) - self.assertFalse(impls[1].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) - op_dist_attr.set_output_dims_mapping(op.output_arg_names[1], - [-1, 1, -1, -1]) - self.assertFalse(impls[1].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) - op_dist_attr.set_output_dims_mapping(op.output_arg_names[1], - [-1, -1, 1, -1]) - self.assertFalse(impls[1].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) - - op_dist_attr.set_input_dims_mapping(op.input_arg_names[0], - [1, -1, -1]) - self.assertFalse(impls[1].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) - op_dist_attr.set_input_dims_mapping(op.input_arg_names[0], - [0, -1, -1]) - self.assertFalse(impls[1].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) - op_dist_attr.set_output_dims_mapping(op.output_arg_names[1], - [0, -1, -1, -1]) - self.assertFalse(impls[1].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) - - op_dist_attr.set_input_dims_mapping(op.input_arg_names[0], - [-1, 0, -1]) - op_dist_attr.set_output_dims_mapping(op.output_arg_names[0], - [-1, -1]) - - self.assertFalse(impls[1].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + op_dist_attr.set_input_dims_mapping( + op.input_arg_names[0], [-1, -1, -1] + ) + op_dist_attr.set_output_dims_mapping( + op.output_arg_names[0], [-1, -1] + ) + op_dist_attr.set_output_dims_mapping( + op.output_arg_names[1], [-1, -1, -1, -1] + ) + self.assertTrue( + impls[1].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) + op_dist_attr.set_output_dims_mapping( + op.output_arg_names[1], [-1, -1, -1, 1] + ) + self.assertFalse( + impls[1].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) + op_dist_attr.set_output_dims_mapping( + op.output_arg_names[1], [0, -1, -1, 1] + ) + self.assertFalse( + impls[1].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) + op_dist_attr.set_output_dims_mapping( + op.output_arg_names[1], [-1, 1, -1, -1] + ) + self.assertFalse( + impls[1].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) + op_dist_attr.set_output_dims_mapping( + op.output_arg_names[1], [-1, -1, 1, -1] + ) + self.assertFalse( + impls[1].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) + + op_dist_attr.set_input_dims_mapping( + op.input_arg_names[0], [1, -1, -1] + ) + self.assertFalse( + impls[1].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) + op_dist_attr.set_input_dims_mapping( + op.input_arg_names[0], [0, -1, -1] + ) + self.assertFalse( + impls[1].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) + op_dist_attr.set_output_dims_mapping( + op.output_arg_names[1], [0, -1, -1, -1] + ) + self.assertFalse( + impls[1].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) + + op_dist_attr.set_input_dims_mapping( + op.input_arg_names[0], [-1, 0, -1] + ) + op_dist_attr.set_output_dims_mapping( + op.output_arg_names[0], [-1, -1] + ) + + self.assertFalse( + impls[1].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) def test_reshape_add_compatible(self): valid_op_dist_attr_list = [] @@ -162,55 +210,91 @@ class TestCompatible(unittest.TestCase): ops = program.global_block().ops for idx, op in enumerate(ops): if op.type == 'reshape2': - dist_op_impl_container = get_distributed_operator_impl_container( - op.type) + dist_op_impl_container = ( + get_distributed_operator_impl_container(op.type) + ) impls = dist_op_impl_container.impls op_dist_attr = OperatorDistributedAttribute() op_dist_attr.set_input_dims_mapping(op.input_arg_names[0], [-1]) - op_dist_attr.set_output_dims_mapping(op.output_arg_names[0], - [-1, -1]) - op_dist_attr.set_output_dims_mapping(op.output_arg_names[1], - [-1, -1]) - op_dist_attr.set_output_dims_mapping(op.output_arg_names[1], - [-1, -1]) - self.assertTrue(impls[0].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) - op_dist_attr.set_output_dims_mapping(op.output_arg_names[1], - [-1, 0]) - self.assertFalse(impls[0].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + op_dist_attr.set_output_dims_mapping( + op.output_arg_names[0], [-1, -1] + ) + op_dist_attr.set_output_dims_mapping( + op.output_arg_names[1], [-1, -1] + ) + op_dist_attr.set_output_dims_mapping( + op.output_arg_names[1], [-1, -1] + ) + self.assertTrue( + impls[0].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) + op_dist_attr.set_output_dims_mapping( + op.output_arg_names[1], [-1, 0] + ) + self.assertFalse( + impls[0].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_input_dims_mapping(op.input_arg_names[0], [-1]) - op_dist_attr.set_output_dims_mapping(op.output_arg_names[0], - [0, -1]) - - op_dist_attr.set_output_dims_mapping(op.output_arg_names[1], - [-1]) - self.assertFalse(impls[0].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) - - op_dist_attr.set_output_dims_mapping(op.output_arg_names[1], - [-1, 1]) - self.assertFalse(impls[0].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) - - op_dist_attr.set_output_dims_mapping(op.output_arg_names[1], - [1, -1]) - self.assertFalse(impls[0].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) - op_dist_attr.set_output_dims_mapping(op.output_arg_names[1], - [1, 1]) - self.assertFalse(impls[0].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) - op_dist_attr.set_output_dims_mapping(op.output_arg_names[0], - [-1, -1, 1]) - self.assertFalse(impls[0].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + op_dist_attr.set_output_dims_mapping( + op.output_arg_names[0], [0, -1] + ) + + op_dist_attr.set_output_dims_mapping( + op.output_arg_names[1], [-1] + ) + self.assertFalse( + impls[0].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) + + op_dist_attr.set_output_dims_mapping( + op.output_arg_names[1], [-1, 1] + ) + self.assertFalse( + impls[0].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) + + op_dist_attr.set_output_dims_mapping( + op.output_arg_names[1], [1, -1] + ) + self.assertFalse( + impls[0].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) + op_dist_attr.set_output_dims_mapping( + op.output_arg_names[1], [1, 1] + ) + self.assertFalse( + impls[0].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) + op_dist_attr.set_output_dims_mapping( + op.output_arg_names[0], [-1, -1, 1] + ) + self.assertFalse( + impls[0].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) op_dist_attr.set_input_dims_mapping(op.input_arg_names[0], [-1]) - op_dist_attr.set_output_dims_mapping(op.output_arg_names[0], - [0, -1]) - self.assertFalse(impls[0].is_auto_compatible( - DistributedOperator(op, op_dist_attr))) + op_dist_attr.set_output_dims_mapping( + op.output_arg_names[0], [0, -1] + ) + self.assertFalse( + impls[0].is_auto_compatible( + DistributedOperator(op, op_dist_attr) + ) + ) def test_transpose_compatible(self): valid_op_dist_attr_list = [] @@ -220,47 +304,58 @@ class TestCompatible(unittest.TestCase): ops = program.global_block().ops for idx, op in enumerate(ops): if op.type == 'transpose2': - dist_op_impl_container = get_distributed_operator_impl_container( - op.type) + dist_op_impl_container = ( + get_distributed_operator_impl_container(op.type) + ) impls = dist_op_impl_container.impls op_dist_attr = OperatorDistributedAttribute() - op_dist_attr.set_input_dims_mapping(op.input_arg_names[0], - [-1, -1]) - op_dist_attr.set_output_dims_mapping(op.output_arg_names[0], - [-1, -1]) - op_dist_attr.set_output_dims_mapping(op.output_arg_names[1], - [-1, -1, -1]) + op_dist_attr.set_input_dims_mapping( + op.input_arg_names[0], [-1, -1] + ) + op_dist_attr.set_output_dims_mapping( + op.output_arg_names[0], [-1, -1] + ) + op_dist_attr.set_output_dims_mapping( + op.output_arg_names[1], [-1, -1, -1] + ) dist_op = DistributedOperator(op, op_dist_attr) self.assertTrue(impls[0].is_auto_compatible(dist_op)) - op_dist_attr.set_output_dims_mapping(op.output_arg_names[1], - [-1, 0, 0]) + op_dist_attr.set_output_dims_mapping( + op.output_arg_names[1], [-1, 0, 0] + ) dist_op = DistributedOperator(op, op_dist_attr) self.assertFalse(impls[0].is_auto_compatible(dist_op)) - op_dist_attr.set_output_dims_mapping(op.output_arg_names[1], - [0, 0, 0]) + op_dist_attr.set_output_dims_mapping( + op.output_arg_names[1], [0, 0, 0] + ) dist_op = DistributedOperator(op, op_dist_attr) self.assertFalse(impls[0].is_auto_compatible(dist_op)) - op_dist_attr.set_input_dims_mapping(op.input_arg_names[0], - [1, -1]) + op_dist_attr.set_input_dims_mapping( + op.input_arg_names[0], [1, -1] + ) dist_op = DistributedOperator(op, op_dist_attr) self.assertFalse(impls[0].is_auto_compatible(dist_op)) - op_dist_attr.set_output_dims_mapping(op.output_arg_names[1], - [-1, 0, 0]) + op_dist_attr.set_output_dims_mapping( + op.output_arg_names[1], [-1, 0, 0] + ) dist_op = DistributedOperator(op, op_dist_attr) self.assertFalse(impls[0].is_auto_compatible(dist_op)) - op_dist_attr.set_output_dims_mapping(op.output_arg_names[1], - [0, -1, -1]) + op_dist_attr.set_output_dims_mapping( + op.output_arg_names[1], [0, -1, -1] + ) self.assertFalse(impls[0].is_auto_compatible(dist_op)) - op_dist_attr.set_input_dims_mapping(op.input_arg_names[0], - [-1, -1]) + op_dist_attr.set_input_dims_mapping( + op.input_arg_names[0], [-1, -1] + ) - op_dist_attr.set_output_dims_mapping(op.output_arg_names[1], - [0, 1, 1]) + op_dist_attr.set_output_dims_mapping( + op.output_arg_names[1], [0, 1, 1] + ) self.assertFalse(impls[0].is_auto_compatible(dist_op)) def test_softmax_compatible(self): @@ -271,23 +366,28 @@ class TestCompatible(unittest.TestCase): ops = program.global_block().ops for idx, op in enumerate(ops): if op.type == 'softmax': - dist_op_impl_container = get_distributed_operator_impl_container( - op.type) + dist_op_impl_container = ( + get_distributed_operator_impl_container(op.type) + ) impls = dist_op_impl_container.impls op_dist_attr = OperatorDistributedAttribute() - op_dist_attr.set_input_dims_mapping(op.input_arg_names[0], - [-1, -1]) - op_dist_attr.set_output_dims_mapping(op.output_arg_names[0], - [-1, -1]) + op_dist_attr.set_input_dims_mapping( + op.input_arg_names[0], [-1, -1] + ) + op_dist_attr.set_output_dims_mapping( + op.output_arg_names[0], [-1, -1] + ) dist_op = DistributedOperator(op, op_dist_attr) self.assertTrue(impls[0].is_auto_compatible(dist_op)) - op_dist_attr.set_output_dims_mapping(op.output_arg_names[0], - [1]) + op_dist_attr.set_output_dims_mapping( + op.output_arg_names[0], [1] + ) dist_op = DistributedOperator(op, op_dist_attr) self.assertFalse(impls[0].is_auto_compatible(dist_op)) - op_dist_attr.set_input_dims_mapping(op.input_arg_names[0], - [-1, 1]) + op_dist_attr.set_input_dims_mapping( + op.input_arg_names[0], [-1, 1] + ) dist_op = DistributedOperator(op, op_dist_attr) self.assertFalse(impls[0].is_auto_compatible(dist_op)) op.all_attrs()['axis'] = 2 @@ -301,52 +401,66 @@ class TestCompatible(unittest.TestCase): ops = program.global_block().ops for idx, op in enumerate(ops): if op.type == 'c_embedding' or op.type == 'lookup_table_v2': - dist_op_impl_container = get_distributed_operator_impl_container( - op.type) + dist_op_impl_container = ( + get_distributed_operator_impl_container(op.type) + ) impls = dist_op_impl_container.impls op_dist_attr = OperatorDistributedAttribute() - op_dist_attr.set_input_dims_mapping(op.input_arg_names[0], - [-1, -1]) - op_dist_attr.set_input_dims_mapping(op.input_arg_names[1], - [1, -1]) - op_dist_attr.set_output_dims_mapping(op.output_arg_names[0], - [-1, -1, -1]) + op_dist_attr.set_input_dims_mapping( + op.input_arg_names[0], [-1, -1] + ) + op_dist_attr.set_input_dims_mapping( + op.input_arg_names[1], [1, -1] + ) + op_dist_attr.set_output_dims_mapping( + op.output_arg_names[0], [-1, -1, -1] + ) dist_op = DistributedOperator(op, op_dist_attr) self.assertTrue(impls[0].is_auto_compatible(dist_op)) - op_dist_attr.set_output_dims_mapping(op.output_arg_names[0], - [-1, 0, 0]) + op_dist_attr.set_output_dims_mapping( + op.output_arg_names[0], [-1, 0, 0] + ) dist_op = DistributedOperator(op, op_dist_attr) self.assertFalse(impls[0].is_auto_compatible(dist_op)) - op_dist_attr.set_input_dims_mapping(op.input_arg_names[0], - [-1, 1]) + op_dist_attr.set_input_dims_mapping( + op.input_arg_names[0], [-1, 1] + ) dist_op = DistributedOperator(op, op_dist_attr) self.assertFalse(impls[0].is_auto_compatible(dist_op)) - op_dist_attr.set_input_dims_mapping(op.input_arg_names[1], - [-1, 1]) + op_dist_attr.set_input_dims_mapping( + op.input_arg_names[1], [-1, 1] + ) dist_op = DistributedOperator(op, op_dist_attr) - op_dist_attr.set_input_dims_mapping(op.input_arg_names[1], - [1, 1]) - op_dist_attr.set_output_dims_mapping(op.output_arg_names[0], - [-1, -1, -1]) + op_dist_attr.set_input_dims_mapping( + op.input_arg_names[1], [1, 1] + ) + op_dist_attr.set_output_dims_mapping( + op.output_arg_names[0], [-1, -1, -1] + ) dist_op = DistributedOperator(op, op_dist_attr) self.assertFalse(impls[0].is_auto_compatible(dist_op)) self.assertFalse(impls[0].is_auto_compatible(dist_op)) - op_dist_attr.set_input_dims_mapping(op.input_arg_names[0], - [-1, 1]) + op_dist_attr.set_input_dims_mapping( + op.input_arg_names[0], [-1, 1] + ) dist_op = DistributedOperator(op, op_dist_attr) self.assertFalse(impls[0].is_auto_compatible(dist_op)) - op_dist_attr.set_input_dims_mapping(op.input_arg_names[1], - [1, 1]) - op_dist_attr.set_output_dims_mapping(op.output_arg_names[0], - [-1, -1, -1]) + op_dist_attr.set_input_dims_mapping( + op.input_arg_names[1], [1, 1] + ) + op_dist_attr.set_output_dims_mapping( + op.output_arg_names[0], [-1, -1, -1] + ) dist_op = DistributedOperator(op, op_dist_attr) self.assertFalse(impls[0].is_auto_compatible(dist_op)) - op_dist_attr.set_input_dims_mapping(op.input_arg_names[0], - [-1, -1]) - op_dist_attr.set_output_dims_mapping(op.output_arg_names[0], - [1, 1, -1]) + op_dist_attr.set_input_dims_mapping( + op.input_arg_names[0], [-1, -1] + ) + op_dist_attr.set_output_dims_mapping( + op.output_arg_names[0], [1, 1, -1] + ) dist_op = DistributedOperator(op, op_dist_attr) self.assertFalse(impls[0].is_auto_compatible(dist_op)) diff --git a/python/paddle/fluid/tests/unittests/test_avoid_twice_initialization.py b/python/paddle/fluid/tests/unittests/test_avoid_twice_initialization.py index 87a074c2b50646a25040ee7ab197b838d65115a1..43a2d08731d1151c75f4312995547f5cddce0516 100644 --- a/python/paddle/fluid/tests/unittests/test_avoid_twice_initialization.py +++ b/python/paddle/fluid/tests/unittests/test_avoid_twice_initialization.py @@ -17,7 +17,6 @@ import paddle.fluid as fluid class TestAvoidTwiceInitialization(unittest.TestCase): - def test_avoid_twice_initialization(self): cur_program = fluid.Program() cur_block = cur_program.current_block() @@ -25,24 +24,26 @@ class TestAvoidTwiceInitialization(unittest.TestCase): initializer=fluid.initializer.Constant(value=0.01), shape=[2, 2], dtype='float32', - name='var_a') - cur_block.append_op(type="c_broadcast", - inputs={"X": [var]}, - outputs={"Out": [var]}, - attrs={ - 'root': 0, - 'ring_id': 0, - 'use_calc_stream': False - }) - cur_block.append_op(type="c_sync_comm_stream", - inputs={'X': [var]}, - outputs={'Out': [var]}, - attrs={'ring_id': 0}) + name='var_a', + ) + cur_block.append_op( + type="c_broadcast", + inputs={"X": [var]}, + outputs={"Out": [var]}, + attrs={'root': 0, 'ring_id': 0, 'use_calc_stream': False}, + ) + cur_block.append_op( + type="c_sync_comm_stream", + inputs={'X': [var]}, + outputs={'Out': [var]}, + attrs={'ring_id': 0}, + ) var2 = cur_block.create_parameter( initializer=fluid.initializer.Constant(value=0.01), shape=[2, 2], dtype='float32', - name='var_a') + name='var_a', + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_backward.py b/python/paddle/fluid/tests/unittests/test_backward.py index 28c42b49d337156be8d60df5aa2555bea86eaa14..a186d57a1fea5ca7d96b47a04d755921c406aceb 100644 --- a/python/paddle/fluid/tests/unittests/test_backward.py +++ b/python/paddle/fluid/tests/unittests/test_backward.py @@ -56,8 +56,11 @@ class TestBackward(unittest.TestCase): """ def _check_all(self, net): - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) main = fluid.Program() @@ -79,21 +82,28 @@ class TestBackward(unittest.TestCase): no_grad_dict = self._check_stop_gradient(main_program) # 1.2 find_op_path op_path, block_no_grad_set = self._check_op_path( - main_program.block(global_block_idx), [loss], [], no_grad_dict) + main_program.block(global_block_idx), [loss], [], no_grad_dict + ) # 1.3 _find_no_grad_vars no_grad_vars = self._check_find_no_grad_vars( - main_program.block(global_block_idx), op_path, [loss], - block_no_grad_set) + main_program.block(global_block_idx), + op_path, + [loss], + block_no_grad_set, + ) # update no_grad_dict block_no_grad_set.update(no_grad_vars) no_grad_dict[global_block_idx].update( - list(map(fluid.backward._append_grad_suffix_, block_no_grad_set))) + list(map(fluid.backward._append_grad_suffix_, block_no_grad_set)) + ) def _check_params_grad(self, loss, parameter_list=None, no_grad_set=None): - params_grads = fluid.backward.append_backward(loss, parameter_list, - no_grad_set) + params_grads = fluid.backward.append_backward( + loss, parameter_list, no_grad_set + ) params_names = set( - [param_var.name for (param_var, grad_var) in params_grads]) + [param_var.name for (param_var, grad_var) in params_grads] + ) self.assertSetEqual(params_names, self.net.params_names) return params_grads @@ -101,8 +111,10 @@ class TestBackward(unittest.TestCase): def _check_stop_gradient(self, program): no_grad_dict = fluid.backward._get_stop_gradients_(program) if no_grad_dict is not None and isinstance(no_grad_dict, dict): - self.assertSetEqual(no_grad_dict[self.global_block_idx], - self.net.stop_gradient_grad_vars) + self.assertSetEqual( + no_grad_dict[self.global_block_idx], + self.net.stop_gradient_grad_vars, + ) return no_grad_dict @@ -111,26 +123,35 @@ class TestBackward(unittest.TestCase): block_no_grad_set = None else: block_no_grad_set = set( - map(fluid.backward._strip_grad_suffix_, - no_grad_dict[self.global_block_idx])) - op_path = fluid.backward._find_op_path_(root_block, outputs, inputs, - block_no_grad_set) + map( + fluid.backward._strip_grad_suffix_, + no_grad_dict[self.global_block_idx], + ) + ) + op_path = fluid.backward._find_op_path_( + root_block, outputs, inputs, block_no_grad_set + ) op_types = [op.type for op in op_path] self.assertListEqual(op_types, self.net.op_path) return op_path, block_no_grad_set - def _check_find_no_grad_vars(self, root_block, op_path, targets, - block_no_grad_set): + def _check_find_no_grad_vars( + self, root_block, op_path, targets, block_no_grad_set + ): no_grad_vars = fluid.backward._find_no_grad_vars( - root_block, op_path, targets, block_no_grad_set) + root_block, op_path, targets, block_no_grad_set + ) self.assertSetEqual(no_grad_vars, self.net.no_grad_vars) return no_grad_vars def _check_error_param_list(self, net, parameter_list): - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) main = fluid.Program() @@ -144,8 +165,11 @@ class TestBackward(unittest.TestCase): exe.run(feed=net.init_data()) def _check_error_no_grad_set(self, net, no_grad_set): - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) main = fluid.Program() @@ -160,13 +184,16 @@ class TestBackward(unittest.TestCase): class SimpleNet(BackwardNet): - def __init__(self): super(SimpleNet, self).__init__() - self.stop_gradient_grad_vars = set([ - u'x_no_grad@GRAD', u'x2_no_grad@GRAD', u'x3_no_grad@GRAD', - u'label_no_grad@GRAD' - ]) + self.stop_gradient_grad_vars = set( + [ + u'x_no_grad@GRAD', + u'x2_no_grad@GRAD', + u'x3_no_grad@GRAD', + u'label_no_grad@GRAD', + ] + ) self.no_grad_vars = set() self.params_names = set([u'w2v', u'fc_predict.b_0', u'fc_w']) self.op_path = [ @@ -178,7 +205,7 @@ class SimpleNet(BackwardNet): u'softmax', # fc u'elementwise_sub', u'square', - u'reduce_mean' + u'reduce_mean', ] # loss self.shape = [16, 50] @@ -192,7 +219,7 @@ class SimpleNet(BackwardNet): 'x_no_grad': x, 'x2_no_grad': x2, 'x3_no_grad': x3, - 'label_no_grad': label + 'label_no_grad': label, } def build_model(self): @@ -200,37 +227,41 @@ class SimpleNet(BackwardNet): x = fluid.data(name='x_no_grad', shape=self.shape, dtype='int64') x2 = fluid.data(name='x2_no_grad', shape=self.shape, dtype='int64') x3 = fluid.data(name='x3_no_grad', shape=self.shape, dtype='int64') - label = fluid.data(name='label_no_grad', - shape=[self.shape[0], 1], - dtype='float32') + label = fluid.data( + name='label_no_grad', shape=[self.shape[0], 1], dtype='float32' + ) # shared layer, the grad of 'w2v' will be summed and renamed. # To test _addup_repetitive_outputs_ - x_emb = fluid.embedding(x, - size=[100, 64], - param_attr=fluid.ParamAttr(name='w2v')) - x2_emb = fluid.embedding(x2, - size=[100, 64], - param_attr=fluid.ParamAttr(name='w2v')) - x3_emb = fluid.embedding(x3, - size=[100, 64], - param_attr=fluid.ParamAttr(name='w2v')) + x_emb = fluid.embedding( + x, size=[100, 64], param_attr=fluid.ParamAttr(name='w2v') + ) + x2_emb = fluid.embedding( + x2, size=[100, 64], param_attr=fluid.ParamAttr(name='w2v') + ) + x3_emb = fluid.embedding( + x3, size=[100, 64], param_attr=fluid.ParamAttr(name='w2v') + ) # merge layers x_merge = fluid.layers.elementwise_add(x_emb, x2_emb, name='x_add_x2') - x2_merge = fluid.layers.elementwise_add(x2_emb, - x3_emb, - name='x2_add_x3') + x2_merge = fluid.layers.elementwise_add( + x2_emb, x3_emb, name='x2_add_x3' + ) # shared fc_w - predict = fluid.layers.fc(input=x_merge, - size=1, - act='softmax', - param_attr=fluid.ParamAttr(name='fc_w'), - name='fc_predict') + predict = fluid.layers.fc( + input=x_merge, + size=1, + act='softmax', + param_attr=fluid.ParamAttr(name='fc_w'), + name='fc_predict', + ) # useless layer for calculating loss - fc_no_use = fluid.layers.fc(input=x2_merge, - size=1, - act='sigmoid', - param_attr=fluid.ParamAttr(name='fc_w'), - name='fc_no_use') + fc_no_use = fluid.layers.fc( + input=x2_merge, + size=1, + act='sigmoid', + param_attr=fluid.ParamAttr(name='fc_w'), + name='fc_no_use', + ) # loss cost = fluid.layers.square_error_cost(input=predict, label=label) loss = paddle.mean(cost, name='mean_loss') @@ -239,7 +270,6 @@ class SimpleNet(BackwardNet): class TestSimpleNet(TestBackward): - def test_backward(self): """ Instantiate each NetClass to test backward. @@ -250,7 +280,6 @@ class TestSimpleNet(TestBackward): class TestGradientsError(unittest.TestCase): - def test_error(self): x = fluid.data(name='x', shape=[None, 2, 8, 8], dtype='float32') x.stop_gradient = False @@ -271,7 +300,6 @@ class TestGradientsError(unittest.TestCase): class TestSimpleNetWithErrorParamList(TestBackward): - def test_parameter_list_type_error(self): self.global_block_idx = 0 self.net = SimpleNet() @@ -285,7 +313,6 @@ class TestSimpleNetWithErrorParamList(TestBackward): class TestSimpleNetWithErrorNoGradSet(TestBackward): - def test_no_grad_set_type_error(self): self.global_block_idx = 0 self.net = SimpleNet() @@ -299,7 +326,6 @@ class TestSimpleNetWithErrorNoGradSet(TestBackward): class TestAppendBackwardWithError(unittest.TestCase): - def build_net(self): x = fluid.data(name='x', shape=[None, 13], dtype='int64') y = fluid.data(name='y', shape=[None, 1], dtype='float32') @@ -326,8 +352,9 @@ class TestAppendBackwardWithError(unittest.TestCase): def test_parameter_list_type_error(self): with self.assertRaises(TypeError): self.param_names[0] = np.random.random([10]) - fluid.backward.append_backward(loss=self.avg_loss, - parameter_list=self.param_names) + fluid.backward.append_backward( + loss=self.avg_loss, parameter_list=self.param_names + ) def test_callback_type_error(self): with self.assertRaises(TypeError): @@ -335,18 +362,19 @@ class TestAppendBackwardWithError(unittest.TestCase): def callback(block, context): return - fluid.backward.append_backward(loss=self.avg_loss, - callbacks=callback) + fluid.backward.append_backward( + loss=self.avg_loss, callbacks=callback + ) class TestGradientsWithOptimizer(unittest.TestCase): - def _check_grad_op_name(self, forward_list, optimiezed_list): backward_list = [op + "_grad" for op in reversed(forward_list)] idx = optimiezed_list.index(backward_list[0], len(backward_list)) - self.assertListEqual(backward_list, - optimiezed_list[idx:idx + len(backward_list)]) + self.assertListEqual( + backward_list, optimiezed_list[idx : idx + len(backward_list)] + ) def test_gradient_with_optimizer(self): main = fluid.Program() @@ -359,8 +387,12 @@ class TestGradientsWithOptimizer(unittest.TestCase): opt = paddle.optimizer.Momentum(learning_rate=0.01, momentum=0.9) forward_list = [o.type for o in main.current_block().ops] - optimize_ops, pram_grads = paddle.autograd.backward_mode.gradients_with_optimizer( - main, opt) + ( + optimize_ops, + pram_grads, + ) = paddle.autograd.backward_mode.gradients_with_optimizer( + main, opt + ) optimized_list = [o.type for o in main.current_block().ops] @@ -371,7 +403,6 @@ class TestGradientsWithOptimizer(unittest.TestCase): # TODO(Aurelius84): add conditional network test class ConditionalNet(BackwardNet): - def __init__(self): super(ConditionalNet, self).__init__() diff --git a/python/paddle/fluid/tests/unittests/test_backward_infer_var_data_type_shape.py b/python/paddle/fluid/tests/unittests/test_backward_infer_var_data_type_shape.py index 9f94124fca6fda56077c106add11ead35a4a39d3..2a16d6d57e3ba404e9d63b18c5b95cbc64e387ba 100644 --- a/python/paddle/fluid/tests/unittests/test_backward_infer_var_data_type_shape.py +++ b/python/paddle/fluid/tests/unittests/test_backward_infer_var_data_type_shape.py @@ -20,17 +20,16 @@ import warnings class TestBackwardInferVarDataTypeShape(unittest.TestCase): - def test_backward_infer_var_data_type_shape(self): paddle.enable_static() program = fluid.default_main_program() - dy = program.global_block().create_var(name="Tmp@GRAD", - shape=[1, 1], - dtype=np.float32, - persistable=True) + dy = program.global_block().create_var( + name="Tmp@GRAD", shape=[1, 1], dtype=np.float32, persistable=True + ) # invoke warning - fluid.backward._infer_var_data_type_shape_("Tmp@GRAD", - program.global_block()) + fluid.backward._infer_var_data_type_shape_( + "Tmp@GRAD", program.global_block() + ) res = False with warnings.catch_warnings(): res = True diff --git a/python/paddle/fluid/tests/unittests/test_base_layer.py b/python/paddle/fluid/tests/unittests/test_base_layer.py index 993bad8363013c52ad5ee09e016165dc5808f5cd..66cf1b489f6e64d76b7cd97c43ff0a376d0c0bcd 100644 --- a/python/paddle/fluid/tests/unittests/test_base_layer.py +++ b/python/paddle/fluid/tests/unittests/test_base_layer.py @@ -24,26 +24,23 @@ from paddle.fluid.framework import _test_eager_guard, in_dygraph_mode class L1(fluid.Layer): - def __init__(self): super(L1, self).__init__() self._param_attr = fluid.ParamAttr( - initializer=fluid.initializer.Constant(value=0.1)) - self.w1 = self.create_parameter(attr=self._param_attr, - shape=[2, 2], - dtype='float32', - is_bias=False) - self.w2 = self.create_parameter(attr=self._param_attr, - shape=[2, 2], - dtype='float32', - is_bias=False) + initializer=fluid.initializer.Constant(value=0.1) + ) + self.w1 = self.create_parameter( + attr=self._param_attr, shape=[2, 2], dtype='float32', is_bias=False + ) + self.w2 = self.create_parameter( + attr=self._param_attr, shape=[2, 2], dtype='float32', is_bias=False + ) def forward(self): return self.w1 + self.w2 class L2(fluid.Layer): - def __init__(self): super(L2, self).__init__() self.layer1 = L1() @@ -54,7 +51,6 @@ class L2(fluid.Layer): class L3(fluid.Layer): - def __init__(self): super(L3, self).__init__() self.layer1 = L2() @@ -65,7 +61,6 @@ class L3(fluid.Layer): class TestBaseLayer(unittest.TestCase): - def func_test_one_level(self): with fluid.dygraph.guard(): l = L1() @@ -75,9 +70,9 @@ class TestBaseLayer(unittest.TestCase): for name, _ in l.named_parameters(prefix='l1'): self.assertEqual(name, expected_names[idx]) idx += 1 - np.testing.assert_allclose(ret.numpy(), - 0.2 * np.ones([2, 2]), - rtol=1e-05) + np.testing.assert_allclose( + ret.numpy(), 0.2 * np.ones([2, 2]), rtol=1e-05 + ) def test_one_level(self): with _test_eager_guard(): @@ -102,9 +97,9 @@ class TestBaseLayer(unittest.TestCase): self.assertEqual(name, expected_names[idx]) idx += 1 ret = l() - np.testing.assert_allclose(ret.numpy(), - 0.8 * np.ones([2, 2]), - rtol=1e-05) + np.testing.assert_allclose( + ret.numpy(), 0.8 * np.ones([2, 2]), rtol=1e-05 + ) def test_three_level(self): with _test_eager_guard(): @@ -143,7 +138,6 @@ class TestBaseLayer(unittest.TestCase): class BufferLayer(fluid.Layer): - def __init__(self): super(BufferLayer, self).__init__() buffer_var = to_variable(np.zeros([2, 4]).astype('int32')) @@ -154,13 +148,12 @@ class BufferLayer(fluid.Layer): class BufferNet(fluid.Layer): - def __init__(self): super(BufferNet, self).__init__() self.buffer_layer = BufferLayer() - self.w1 = self.create_parameter(shape=[2, 2], - dtype='float32', - is_bias=False) + self.w1 = self.create_parameter( + shape=[2, 2], dtype='float32', is_bias=False + ) buffer_var = to_variable(np.ones([2, 4]).astype('int32')) self.register_buffer("net_buffer", buffer_var) @@ -171,9 +164,7 @@ class BufferNet(fluid.Layer): class TestBuffer(unittest.TestCase): - def func_test_buffers_and_named_buffers(self): - def names(named_buffers): return [name for name, _ in named_buffers] @@ -187,11 +178,14 @@ class TestBuffer(unittest.TestCase): self.assertEqual(len(net.buffers()), 3) self.assertEqual( names(net.named_buffers()), - ['net_buffer', 'new_buffer', 'buffer_layer.layer_buffer']) + ['net_buffer', 'new_buffer', 'buffer_layer.layer_buffer'], + ) self.assertEqual(len(net.buffers(include_sublayers=False)), 2) - self.assertEqual(names(net.named_buffers(include_sublayers=False)), - ['net_buffer', 'new_buffer']) + self.assertEqual( + names(net.named_buffers(include_sublayers=False)), + ['net_buffer', 'new_buffer'], + ) def test_buffers_and_named_buffers(self): with _test_eager_guard(): @@ -203,25 +197,31 @@ class TestBuffer(unittest.TestCase): net = fluid.Layer() var = to_variable(np.zeros([1])) - with self.assertRaisesRegexp(TypeError, - "name of buffer should be a string"): + with self.assertRaisesRegexp( + TypeError, "name of buffer should be a string" + ): net.register_buffer(12, var) - with self.assertRaisesRegexp(TypeError, - "buffer should be a Paddle.Tensor"): + with self.assertRaisesRegexp( + TypeError, "buffer should be a Paddle.Tensor" + ): if in_dygraph_mode(): - net.register_buffer("buffer_name", - EagerParamBase([2, 2], 'float32')) + net.register_buffer( + "buffer_name", EagerParamBase([2, 2], 'float32') + ) else: - net.register_buffer("buffer_name", - ParamBase([2, 2], 'float32')) + net.register_buffer( + "buffer_name", ParamBase([2, 2], 'float32') + ) - with self.assertRaisesRegexp(KeyError, - "name of buffer can not contain"): + with self.assertRaisesRegexp( + KeyError, "name of buffer can not contain" + ): net.register_buffer("buffer.name", var) - with self.assertRaisesRegexp(KeyError, - "name of buffer can not be empty"): + with self.assertRaisesRegexp( + KeyError, "name of buffer can not be empty" + ): net.register_buffer("", var) net.attr_name = 10 @@ -358,8 +358,9 @@ class TestBuffer(unittest.TestCase): net.register_buffer("buffer_var2", var2, persistable=False) self.assertEqual(len(net.state_dict()), 1) - self.assertEqual([name for name, _ in net.state_dict().items()], - ["buffer_var1"]) + self.assertEqual( + [name for name, _ in net.state_dict().items()], ["buffer_var1"] + ) # load state_dict net_load = fluid.Layer() @@ -379,7 +380,6 @@ class TestBuffer(unittest.TestCase): class BufferNetWithModification(paddle.nn.Layer): - def __init__(self, shape): super(BufferNetWithModification, self).__init__() @@ -397,7 +397,6 @@ class BufferNetWithModification(paddle.nn.Layer): class TestModifiedBuffer(unittest.TestCase): - def funcsetUp(self): paddle.disable_static() self.prog_trans = ProgramTranslator() @@ -418,8 +417,9 @@ class TestModifiedBuffer(unittest.TestCase): st_outs = self._run(True) for i in range(len(dy_outs)): - np.testing.assert_array_equal(dy_outs[i].numpy(), - st_outs[i].numpy()) + np.testing.assert_array_equal( + dy_outs[i].numpy(), st_outs[i].numpy() + ) def test_modified(self): with _test_eager_guard(): @@ -428,7 +428,6 @@ class TestModifiedBuffer(unittest.TestCase): class TestLayerTo(unittest.TestCase): - def funcsetUp(self): paddle.disable_static() self.linear = paddle.nn.Linear(2, 2) @@ -442,30 +441,39 @@ class TestLayerTo(unittest.TestCase): def func_test_to_api(self): self.linear.to(dtype='double') - self.assertEqual(self.linear.weight.dtype, - paddle.fluid.core.VarDesc.VarType.FP64) - self.assertEqual(self.linear.buf_name.dtype, - paddle.fluid.core.VarDesc.VarType.FP64) - np.testing.assert_allclose(self.linear.weight.grad.numpy(), - self.new_grad, - rtol=1e-05) - self.assertEqual(self.linear.weight._grad_ivar().dtype, - paddle.fluid.core.VarDesc.VarType.FP64) + self.assertEqual( + self.linear.weight.dtype, paddle.fluid.core.VarDesc.VarType.FP64 + ) + self.assertEqual( + self.linear.buf_name.dtype, paddle.fluid.core.VarDesc.VarType.FP64 + ) + np.testing.assert_allclose( + self.linear.weight.grad.numpy(), self.new_grad, rtol=1e-05 + ) + self.assertEqual( + self.linear.weight._grad_ivar().dtype, + paddle.fluid.core.VarDesc.VarType.FP64, + ) self.linear.to() - self.assertEqual(self.linear.weight.dtype, - paddle.fluid.core.VarDesc.VarType.FP64) - self.assertEqual(self.linear.buf_name.dtype, - paddle.fluid.core.VarDesc.VarType.FP64) - np.testing.assert_allclose(self.linear.weight.grad.numpy(), - self.new_grad, - rtol=1e-05) - self.assertEqual(self.linear.weight._grad_ivar().dtype, - paddle.fluid.core.VarDesc.VarType.FP64) + self.assertEqual( + self.linear.weight.dtype, paddle.fluid.core.VarDesc.VarType.FP64 + ) + self.assertEqual( + self.linear.buf_name.dtype, paddle.fluid.core.VarDesc.VarType.FP64 + ) + np.testing.assert_allclose( + self.linear.weight.grad.numpy(), self.new_grad, rtol=1e-05 + ) + self.assertEqual( + self.linear.weight._grad_ivar().dtype, + paddle.fluid.core.VarDesc.VarType.FP64, + ) for p in self.linear.parameters(): if in_dygraph_mode(): self.assertTrue( - isinstance(p, paddle.fluid.framework.EagerParamBase)) + isinstance(p, paddle.fluid.framework.EagerParamBase) + ) else: self.assertTrue(isinstance(p, paddle.fluid.framework.ParamBase)) @@ -476,9 +484,11 @@ class TestLayerTo(unittest.TestCase): self.assertTrue(self.linear.buf_name.place.is_gpu_place()) self.assertEqual(self.linear.buf_name.place.gpu_device_id(), 0) self.assertTrue( - self.linear.weight._grad_ivar().place.is_gpu_place()) + self.linear.weight._grad_ivar().place.is_gpu_place() + ) self.assertEqual( - self.linear.weight._grad_ivar().place.gpu_device_id(), 0) + self.linear.weight._grad_ivar().place.gpu_device_id(), 0 + ) self.linear.to(device='gpu:0') self.assertTrue(self.linear.weight.place.is_gpu_place()) @@ -486,16 +496,20 @@ class TestLayerTo(unittest.TestCase): self.assertTrue(self.linear.buf_name.place.is_gpu_place()) self.assertEqual(self.linear.buf_name.place.gpu_device_id(), 0) self.assertTrue( - self.linear.weight._grad_ivar().place.is_gpu_place()) + self.linear.weight._grad_ivar().place.is_gpu_place() + ) self.assertEqual( - self.linear.weight._grad_ivar().place.gpu_device_id(), 0) + self.linear.weight._grad_ivar().place.gpu_device_id(), 0 + ) for p in self.linear.parameters(): if in_dygraph_mode(): self.assertTrue( - isinstance(p, paddle.fluid.framework.EagerParamBase)) + isinstance(p, paddle.fluid.framework.EagerParamBase) + ) else: self.assertTrue( - isinstance(p, paddle.fluid.framework.ParamBase)) + isinstance(p, paddle.fluid.framework.ParamBase) + ) self.linear.to(device=paddle.CPUPlace()) self.assertTrue(self.linear.weight.place.is_cpu_place()) @@ -513,59 +527,77 @@ class TestLayerTo(unittest.TestCase): def func_test_to_api_paddle_dtype(self): self.linear.to(dtype=paddle.float64) - self.assertEqual(self.linear.weight.dtype, - paddle.fluid.core.VarDesc.VarType.FP64) - self.assertEqual(self.linear.buf_name.dtype, - paddle.fluid.core.VarDesc.VarType.FP64) - np.testing.assert_allclose(self.linear.weight.grad.numpy(), - self.new_grad, - rtol=1e-05) - self.assertEqual(self.linear.weight._grad_ivar().dtype, - paddle.fluid.core.VarDesc.VarType.FP64) + self.assertEqual( + self.linear.weight.dtype, paddle.fluid.core.VarDesc.VarType.FP64 + ) + self.assertEqual( + self.linear.buf_name.dtype, paddle.fluid.core.VarDesc.VarType.FP64 + ) + np.testing.assert_allclose( + self.linear.weight.grad.numpy(), self.new_grad, rtol=1e-05 + ) + self.assertEqual( + self.linear.weight._grad_ivar().dtype, + paddle.fluid.core.VarDesc.VarType.FP64, + ) self.linear.to() - self.assertEqual(self.linear.weight.dtype, - paddle.fluid.core.VarDesc.VarType.FP64) - self.assertEqual(self.linear.buf_name.dtype, - paddle.fluid.core.VarDesc.VarType.FP64) - np.testing.assert_allclose(self.linear.weight.grad.numpy(), - self.new_grad, - rtol=1e-05) - self.assertEqual(self.linear.weight._grad_ivar().dtype, - paddle.fluid.core.VarDesc.VarType.FP64) + self.assertEqual( + self.linear.weight.dtype, paddle.fluid.core.VarDesc.VarType.FP64 + ) + self.assertEqual( + self.linear.buf_name.dtype, paddle.fluid.core.VarDesc.VarType.FP64 + ) + np.testing.assert_allclose( + self.linear.weight.grad.numpy(), self.new_grad, rtol=1e-05 + ) + self.assertEqual( + self.linear.weight._grad_ivar().dtype, + paddle.fluid.core.VarDesc.VarType.FP64, + ) for p in self.linear.parameters(): if in_dygraph_mode(): self.assertTrue( - isinstance(p, paddle.fluid.framework.EagerParamBase)) + isinstance(p, paddle.fluid.framework.EagerParamBase) + ) else: self.assertTrue(isinstance(p, paddle.fluid.framework.ParamBase)) def func_test_to_api_numpy_dtype(self): self.linear.to(dtype=np.float64) - self.assertEqual(self.linear.weight.dtype, - paddle.fluid.core.VarDesc.VarType.FP64) - self.assertEqual(self.linear.buf_name.dtype, - paddle.fluid.core.VarDesc.VarType.FP64) - np.testing.assert_allclose(self.linear.weight.grad.numpy(), - self.new_grad, - rtol=1e-05) - self.assertEqual(self.linear.weight._grad_ivar().dtype, - paddle.fluid.core.VarDesc.VarType.FP64) + self.assertEqual( + self.linear.weight.dtype, paddle.fluid.core.VarDesc.VarType.FP64 + ) + self.assertEqual( + self.linear.buf_name.dtype, paddle.fluid.core.VarDesc.VarType.FP64 + ) + np.testing.assert_allclose( + self.linear.weight.grad.numpy(), self.new_grad, rtol=1e-05 + ) + self.assertEqual( + self.linear.weight._grad_ivar().dtype, + paddle.fluid.core.VarDesc.VarType.FP64, + ) self.linear.to() - self.assertEqual(self.linear.weight.dtype, - paddle.fluid.core.VarDesc.VarType.FP64) - self.assertEqual(self.linear.buf_name.dtype, - paddle.fluid.core.VarDesc.VarType.FP64) - np.testing.assert_allclose(self.linear.weight.grad.numpy(), - self.new_grad, - rtol=1e-05) - self.assertEqual(self.linear.weight._grad_ivar().dtype, - paddle.fluid.core.VarDesc.VarType.FP64) + self.assertEqual( + self.linear.weight.dtype, paddle.fluid.core.VarDesc.VarType.FP64 + ) + self.assertEqual( + self.linear.buf_name.dtype, paddle.fluid.core.VarDesc.VarType.FP64 + ) + np.testing.assert_allclose( + self.linear.weight.grad.numpy(), self.new_grad, rtol=1e-05 + ) + self.assertEqual( + self.linear.weight._grad_ivar().dtype, + paddle.fluid.core.VarDesc.VarType.FP64, + ) for p in self.linear.parameters(): if in_dygraph_mode(): self.assertTrue( - isinstance(p, paddle.fluid.framework.EagerParamBase)) + isinstance(p, paddle.fluid.framework.EagerParamBase) + ) else: self.assertTrue(isinstance(p, paddle.fluid.framework.ParamBase)) diff --git a/python/paddle/fluid/tests/unittests/test_basic_gru_api.py b/python/paddle/fluid/tests/unittests/test_basic_gru_api.py index c0719d7643c13aded6a7c11b2b6ce8f3f0a3867c..339323110a116b1aad27798620cc9e5c22f9e592 100644 --- a/python/paddle/fluid/tests/unittests/test_basic_gru_api.py +++ b/python/paddle/fluid/tests/unittests/test_basic_gru_api.py @@ -34,27 +34,28 @@ def sigmoid(x): y = np.copy(x) y[x < SIGMOID_THRESHOLD_MIN] = SIGMOID_THRESHOLD_MIN y[x > SIGMOID_THRESHOLD_MAX] = SIGMOID_THRESHOLD_MAX - return 1. / (1. + np.exp(-y)) + return 1.0 / (1.0 + np.exp(-y)) def tanh(x): - y = -2. * x + y = -2.0 * x y[y > EXP_MAX_INPUT] = EXP_MAX_INPUT - return (2. / (1. + np.exp(y))) - 1. - - -def gru_np(input, - init_h, - hidden_size, - gate_weight, - gate_bias, - candidate_weight, - candidate_bias, - num_layers=1, - batch_first=False, - is_bidirect=False, - sequence_length=None): - + return (2.0 / (1.0 + np.exp(y))) - 1.0 + + +def gru_np( + input, + init_h, + hidden_size, + gate_weight, + gate_bias, + candidate_weight, + candidate_bias, + num_layers=1, + batch_first=False, + is_bidirect=False, + sequence_length=None, +): def step(step_in, pre_hidden, gate_w, gate_b, candidate_w, candidate_b): concat_1 = np.concatenate([step_in, pre_hidden], 1) @@ -65,8 +66,9 @@ def gru_np(input, r_hidden = r * pre_hidden - candidate = np.matmul(np.concatenate([step_in, r_hidden], 1), - candidate_w) + candidate = np.matmul( + np.concatenate([step_in, r_hidden], 1), candidate_w + ) candidate += candidate_b c = tanh(candidate) @@ -94,8 +96,9 @@ def gru_np(input, if is_bidirect: direc_num = 2 if init_h: - init_h = np.reshape(init_h, - shape=[num_layers, direc_num, -1, hidden_size]) + init_h = np.reshape( + init_h, shape=[num_layers, direc_num, -1, hidden_size] + ) else: init_h = np.zeros([num_layers, direc_num, batch_size, hidden_size]) @@ -117,15 +120,19 @@ def gru_np(input, for i in range(num_layers): new_hidden = step( - step_input, pre_hidden_array[i], + step_input, + pre_hidden_array[i], gate_weight[direc_index * num_layers + i], gate_bias[direc_index * num_layers + i], candidate_weight[direc_index * num_layers + i], - candidate_bias[direc_index * num_layers + i]) + candidate_bias[direc_index * num_layers + i], + ) if mask is not None: - new_hidden = new_hidden * step_mask + ( - 1 - step_mask) * pre_hidden_array[i] + new_hidden = ( + new_hidden * step_mask + + (1 - step_mask) * pre_hidden_array[i] + ) pre_hidden_array[i] = new_hidden @@ -135,14 +142,15 @@ def gru_np(input, rnn_out = np.reshape(rnn_out, [seq_len, -1, hidden_size]) last_hidden_out = np.concatenate(pre_hidden_array, 0) - last_hidden_out = np.reshape(last_hidden_out, - [num_layers, -1, hidden_size]) + last_hidden_out = np.reshape( + last_hidden_out, [num_layers, -1, hidden_size] + ) return rnn_out, last_hidden_out - fw_rnn_out, fw_last_hidden = get_single_direction_output(input, - mask, - direc_index=0) + fw_rnn_out, fw_last_hidden = get_single_direction_output( + input, mask, direc_index=0 + ) if is_bidirect: bw_input = input[::-1] @@ -150,16 +158,17 @@ def gru_np(input, if mask is not None: bw_mask = mask[::-1] - bw_rnn_out, bw_last_hidden = get_single_direction_output(bw_input, - bw_mask, - direc_index=1) + bw_rnn_out, bw_last_hidden = get_single_direction_output( + bw_input, bw_mask, direc_index=1 + ) bw_rnn_out = bw_rnn_out[::-1] rnn_out = np.concatenate([fw_rnn_out, bw_rnn_out], 2) last_hidden = np.concatenate([fw_last_hidden, bw_last_hidden], 1) - last_hidden = np.reshape(last_hidden, - [num_layers * direc_num, -1, hidden_size]) + last_hidden = np.reshape( + last_hidden, [num_layers * direc_num, -1, hidden_size] + ) if batch_first: rnn_out = np.transpose(rnn_out, [1, 0, 2]) @@ -176,7 +185,6 @@ def gru_np(input, class TestBasicGRUApi(unittest.TestCase): - def setUp(self): self.hidden_size = 10 self.batch_size = 5 @@ -186,15 +194,24 @@ class TestBasicGRUApi(unittest.TestCase): self.batch_first = False def test_run(self): - x = layers.data(name='x', - shape=[-1, self.batch_size, self.hidden_size], - dtype='float32') - sequence_length = layers.data(name="sequence_length", - shape=[-1], - dtype='float32') - - rnn_out, last_hidden = basic_gru( x, None, self.hidden_size, num_layers=self.num_layers, \ - batch_first = self.batch_first, bidirectional=self.is_bidirect, sequence_length=sequence_length ) + x = layers.data( + name='x', + shape=[-1, self.batch_size, self.hidden_size], + dtype='float32', + ) + sequence_length = layers.data( + name="sequence_length", shape=[-1], dtype='float32' + ) + + rnn_out, last_hidden = basic_gru( + x, + None, + self.hidden_size, + num_layers=self.num_layers, + batch_first=self.batch_first, + bidirectional=self.is_bidirect, + sequence_length=sequence_length, + ) last_hidden.persisbale = True rnn_out.persisbale = True @@ -218,38 +235,52 @@ class TestBasicGRUApi(unittest.TestCase): for i in range(self.num_layers): gate_w_name = "basic_gru_layers_" + str(i) + "/BasicGRUUnit_0.w_0" gate_b_name = "basic_gru_layers_" + str(i) + "/BasicGRUUnit_0.b_0" - candidate_w_name = "basic_gru_layers_" + str( - i) + "/BasicGRUUnit_0.w_1" - candidate_b_name = "basic_gru_layers_" + str( - i) + "/BasicGRUUnit_0.b_1" + candidate_w_name = ( + "basic_gru_layers_" + str(i) + "/BasicGRUUnit_0.w_1" + ) + candidate_b_name = ( + "basic_gru_layers_" + str(i) + "/BasicGRUUnit_0.b_1" + ) gate_w = np.array( - fluid.global_scope().find_var(gate_w_name).get_tensor()) - gate_w = np.random.uniform(-0.1, 0.1, - size=gate_w.shape).astype('float32') + fluid.global_scope().find_var(gate_w_name).get_tensor() + ) + gate_w = np.random.uniform(-0.1, 0.1, size=gate_w.shape).astype( + 'float32' + ) fluid.global_scope().find_var(gate_w_name).get_tensor().set( - gate_w, place) + gate_w, place + ) gate_b = np.array( - fluid.global_scope().find_var(gate_b_name).get_tensor()) - gate_b = np.random.uniform(-0.1, 0.1, - size=gate_b.shape).astype('float32') + fluid.global_scope().find_var(gate_b_name).get_tensor() + ) + gate_b = np.random.uniform(-0.1, 0.1, size=gate_b.shape).astype( + 'float32' + ) fluid.global_scope().find_var(gate_b_name).get_tensor().set( - gate_b, place) + gate_b, place + ) candidate_w = np.array( - fluid.global_scope().find_var(candidate_w_name).get_tensor()) + fluid.global_scope().find_var(candidate_w_name).get_tensor() + ) candidate_w = np.random.uniform( - -0.1, 0.1, size=candidate_w.shape).astype('float32') + -0.1, 0.1, size=candidate_w.shape + ).astype('float32') fluid.global_scope().find_var(candidate_w_name).get_tensor().set( - candidate_w, place) + candidate_w, place + ) candidate_b = np.array( - fluid.global_scope().find_var(candidate_b_name).get_tensor()) + fluid.global_scope().find_var(candidate_b_name).get_tensor() + ) candidate_b = np.random.uniform( - -0.1, 0.1, size=candidate_b.shape).astype('float32') + -0.1, 0.1, size=candidate_b.shape + ).astype('float32') fluid.global_scope().find_var(candidate_b_name).get_tensor().set( - candidate_b, place) + candidate_b, place + ) gate_weight.append(gate_w) gate_bias.append(gate_b) @@ -258,42 +289,58 @@ class TestBasicGRUApi(unittest.TestCase): if self.is_bidirect: for i in range(self.num_layers): - gate_w_name = "basic_gru_reverse_layers_" + str( - i) + "/BasicGRUUnit_0.w_0" - gate_b_name = "basic_gru_reverse_layers_" + str( - i) + "/BasicGRUUnit_0.b_0" - candidate_w_name = "basic_gru_reverse_layers_" + str( - i) + "/BasicGRUUnit_0.w_1" - candidate_b_name = "basic_gru_reverse_layers_" + str( - i) + "/BasicGRUUnit_0.b_1" + gate_w_name = ( + "basic_gru_reverse_layers_" + str(i) + "/BasicGRUUnit_0.w_0" + ) + gate_b_name = ( + "basic_gru_reverse_layers_" + str(i) + "/BasicGRUUnit_0.b_0" + ) + candidate_w_name = ( + "basic_gru_reverse_layers_" + str(i) + "/BasicGRUUnit_0.w_1" + ) + candidate_b_name = ( + "basic_gru_reverse_layers_" + str(i) + "/BasicGRUUnit_0.b_1" + ) gate_w = np.array( - fluid.global_scope().find_var(gate_w_name).get_tensor()) - gate_w = np.random.uniform(-0.1, 0.1, - size=gate_w.shape).astype('float32') + fluid.global_scope().find_var(gate_w_name).get_tensor() + ) + gate_w = np.random.uniform(-0.1, 0.1, size=gate_w.shape).astype( + 'float32' + ) fluid.global_scope().find_var(gate_w_name).get_tensor().set( - gate_w, place) + gate_w, place + ) gate_b = np.array( - fluid.global_scope().find_var(gate_b_name).get_tensor()) - gate_b = np.random.uniform(-0.1, 0.1, - size=gate_b.shape).astype('float32') + fluid.global_scope().find_var(gate_b_name).get_tensor() + ) + gate_b = np.random.uniform(-0.1, 0.1, size=gate_b.shape).astype( + 'float32' + ) fluid.global_scope().find_var(gate_b_name).get_tensor().set( - gate_b, place) + gate_b, place + ) - candidate_w = np.array(fluid.global_scope().find_var( - candidate_w_name).get_tensor()) + candidate_w = np.array( + fluid.global_scope().find_var(candidate_w_name).get_tensor() + ) candidate_w = np.random.uniform( - -0.1, 0.1, size=candidate_w.shape).astype('float32') + -0.1, 0.1, size=candidate_w.shape + ).astype('float32') fluid.global_scope().find_var( - candidate_w_name).get_tensor().set(candidate_w, place) + candidate_w_name + ).get_tensor().set(candidate_w, place) - candidate_b = np.array(fluid.global_scope().find_var( - candidate_b_name).get_tensor()) + candidate_b = np.array( + fluid.global_scope().find_var(candidate_b_name).get_tensor() + ) candidate_b = np.random.uniform( - -0.1, 0.1, size=candidate_b.shape).astype('float32') + -0.1, 0.1, size=candidate_b.shape + ).astype('float32') fluid.global_scope().find_var( - candidate_b_name).get_tensor().set(candidate_b, place) + candidate_b_name + ).get_tensor().set(candidate_b, place) gate_weight.append(gate_w) gate_bias.append(gate_b) @@ -301,39 +348,39 @@ class TestBasicGRUApi(unittest.TestCase): candidate_bias.append(candidate_b) step_input_np = np.random.uniform( - -0.1, 0.1, - (self.seq_len, self.batch_size, self.hidden_size)).astype('float32') + -0.1, 0.1, (self.seq_len, self.batch_size, self.hidden_size) + ).astype('float32') sequence_length_np = np.random.randint( - self.seq_len // 2, self.seq_len, - size=(self.batch_size)).astype('int64') + self.seq_len // 2, self.seq_len, size=(self.batch_size) + ).astype('int64') - out = exe.run(feed={ - 'x': step_input_np, - 'sequence_length': sequence_length_np - }, - fetch_list=[rnn_out, last_hidden]) + out = exe.run( + feed={'x': step_input_np, 'sequence_length': sequence_length_np}, + fetch_list=[rnn_out, last_hidden], + ) api_rnn_out = out[0] api_last_hidden = out[1] - np_out = gru_np(step_input_np, - None, - self.hidden_size, - gate_weight, - gate_bias, - candidate_weight, - candidate_bias, - num_layers=self.num_layers, - batch_first=self.batch_first, - is_bidirect=self.is_bidirect, - sequence_length=sequence_length_np) + np_out = gru_np( + step_input_np, + None, + self.hidden_size, + gate_weight, + gate_bias, + candidate_weight, + candidate_bias, + num_layers=self.num_layers, + batch_first=self.batch_first, + is_bidirect=self.is_bidirect, + sequence_length=sequence_length_np, + ) np.testing.assert_allclose(api_rnn_out, np_out[0], rtol=0.0001, atol=0) - np.testing.assert_allclose(api_last_hidden, - np_out[1], - rtol=0.0001, - atol=0) + np.testing.assert_allclose( + api_last_hidden, np_out[1], rtol=0.0001, atol=0 + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_basic_gru_unit_op.py b/python/paddle/fluid/tests/unittests/test_basic_gru_unit_op.py index ae3e6dc4f77d2653909e2ea5e62135ab3859e314..660aa184f578802db6a49439e57fe01200163a0f 100644 --- a/python/paddle/fluid/tests/unittests/test_basic_gru_unit_op.py +++ b/python/paddle/fluid/tests/unittests/test_basic_gru_unit_op.py @@ -34,13 +34,13 @@ def sigmoid(x): y = np.copy(x) y[x < SIGMOID_THRESHOLD_MIN] = SIGMOID_THRESHOLD_MIN y[x > SIGMOID_THRESHOLD_MAX] = SIGMOID_THRESHOLD_MAX - return 1. / (1. + np.exp(-y)) + return 1.0 / (1.0 + np.exp(-y)) def tanh(x): - y = -2. * x + y = -2.0 * x y[y > EXP_MAX_INPUT] = EXP_MAX_INPUT - return (2. / (1. + np.exp(y))) - 1. + return (2.0 / (1.0 + np.exp(y))) - 1.0 def step(step_in, pre_hidden, gate_w, gate_b, candidate_w, candidate_b): @@ -64,16 +64,15 @@ def step(step_in, pre_hidden, gate_w, gate_b, candidate_w, candidate_b): class TestBasicGRUUnit(unittest.TestCase): - def setUp(self): self.hidden_size = 5 self.batch_size = 5 def test_run(self): x = layers.data(name='x', shape=[-1, self.hidden_size], dtype='float32') - pre_hidden = layers.data(name="pre_hidden", - shape=[-1, self.hidden_size], - dtype='float32') + pre_hidden = layers.data( + name="pre_hidden", shape=[-1, self.hidden_size], dtype='float32' + ) gru_unit = BasicGRUUnit("gru_unit", self.hidden_size) new_hidden = gru_unit(x, pre_hidden) @@ -98,48 +97,67 @@ class TestBasicGRUUnit(unittest.TestCase): candidate_b_name = "gru_unit/BasicGRUUnit_0.b_1" gate_w = np.array( - fluid.global_scope().find_var(gate_w_name).get_tensor()) - gate_w = np.random.uniform(-0.1, 0.1, - size=gate_w.shape).astype('float32') + fluid.global_scope().find_var(gate_w_name).get_tensor() + ) + gate_w = np.random.uniform(-0.1, 0.1, size=gate_w.shape).astype( + 'float32' + ) fluid.global_scope().find_var(gate_w_name).get_tensor().set( - gate_w, place) + gate_w, place + ) gate_b = np.array( - fluid.global_scope().find_var(gate_b_name).get_tensor()) - gate_b = np.random.uniform(-0.1, 0.1, - size=gate_b.shape).astype('float32') + fluid.global_scope().find_var(gate_b_name).get_tensor() + ) + gate_b = np.random.uniform(-0.1, 0.1, size=gate_b.shape).astype( + 'float32' + ) fluid.global_scope().find_var(gate_b_name).get_tensor().set( - gate_b, place) + gate_b, place + ) candidate_w = np.array( - fluid.global_scope().find_var(candidate_w_name).get_tensor()) + fluid.global_scope().find_var(candidate_w_name).get_tensor() + ) candidate_w = np.random.uniform( - -0.1, 0.1, size=candidate_w.shape).astype('float32') + -0.1, 0.1, size=candidate_w.shape + ).astype('float32') fluid.global_scope().find_var(candidate_w_name).get_tensor().set( - candidate_w, place) + candidate_w, place + ) candidate_b = np.array( - fluid.global_scope().find_var(candidate_b_name).get_tensor()) + fluid.global_scope().find_var(candidate_b_name).get_tensor() + ) candidate_b = np.random.uniform( - -0.1, 0.1, size=candidate_b.shape).astype('float32') + -0.1, 0.1, size=candidate_b.shape + ).astype('float32') fluid.global_scope().find_var(candidate_b_name).get_tensor().set( - candidate_b, place) + candidate_b, place + ) step_input_np = np.random.uniform( - -0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32') + -0.1, 0.1, (self.batch_size, self.hidden_size) + ).astype('float32') pre_hidden_np = np.random.uniform( - -0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32') + -0.1, 0.1, (self.batch_size, self.hidden_size) + ).astype('float32') - out = exe.run(feed={ - 'x': step_input_np, - 'pre_hidden': pre_hidden_np - }, - fetch_list=[new_hidden]) + out = exe.run( + feed={'x': step_input_np, 'pre_hidden': pre_hidden_np}, + fetch_list=[new_hidden], + ) api_out = out[0] - np_out = step(step_input_np, pre_hidden_np, gate_w, gate_b, candidate_w, - candidate_b) + np_out = step( + step_input_np, + pre_hidden_np, + gate_w, + gate_b, + candidate_w, + candidate_b, + ) np.testing.assert_allclose(api_out, np_out, rtol=0.0001, atol=0) diff --git a/python/paddle/fluid/tests/unittests/test_basic_lstm_api.py b/python/paddle/fluid/tests/unittests/test_basic_lstm_api.py index 9574a1ea973ff920717c6d9adc2c45c23ff3bfe1..befc7e2c5ffa5c9c93e912b3e4a86d5ab3afc826 100644 --- a/python/paddle/fluid/tests/unittests/test_basic_lstm_api.py +++ b/python/paddle/fluid/tests/unittests/test_basic_lstm_api.py @@ -34,27 +34,28 @@ def sigmoid(x): y = np.copy(x) y[x < SIGMOID_THRESHOLD_MIN] = SIGMOID_THRESHOLD_MIN y[x > SIGMOID_THRESHOLD_MAX] = SIGMOID_THRESHOLD_MAX - return 1. / (1. + np.exp(-y)) + return 1.0 / (1.0 + np.exp(-y)) def tanh(x): - y = -2. * x + y = -2.0 * x y[y > EXP_MAX_INPUT] = EXP_MAX_INPUT - return (2. / (1. + np.exp(y))) - 1. - - -def lstm_np(input, - init_h, - init_c, - hidden_size, - gate_weight, - gate_bias, - num_layers=1, - batch_first=False, - is_bidirect=False, - sequence_length=None, - forget_bias=1.0): - + return (2.0 / (1.0 + np.exp(y))) - 1.0 + + +def lstm_np( + input, + init_h, + init_c, + hidden_size, + gate_weight, + gate_bias, + num_layers=1, + batch_first=False, + is_bidirect=False, + sequence_length=None, + forget_bias=1.0, +): def step(step_in, pre_hidden, pre_cell, gate_w, gate_b): concat_1 = np.concatenate([step_in, pre_hidden], 1) @@ -112,23 +113,27 @@ def lstm_np(input, if mask is not None: step_mask = mask[i] step_mask = np.reshape(step_mask, [-1, 1]) - #print("np mask", step_mask.shape ) + # print("np mask", step_mask.shape ) for i in range(num_layers): new_hidden, new_cell = step( - step_input, pre_hidden_array[i], pre_cell_array[i], + step_input, + pre_hidden_array[i], + pre_cell_array[i], gate_weight[direc_index * num_layers + i], - gate_bias[direc_index * num_layers + i]) + gate_bias[direc_index * num_layers + i], + ) if mask is not None: new_hidden = np.multiply( - new_hidden, step_mask) - np.multiply( - pre_hidden_array[i], (step_mask - 1.0)) - #new_hidden = new_hidden * step_mask - pre_hidden_array[i] * ( step_mask -1 ) - #new_cell = new_cell * step_mask - pre_cell_array[i] * (step_mask -1) + new_hidden, step_mask + ) - np.multiply(pre_hidden_array[i], (step_mask - 1.0)) + # new_hidden = new_hidden * step_mask - pre_hidden_array[i] * ( step_mask -1 ) + # new_cell = new_cell * step_mask - pre_cell_array[i] * (step_mask -1) new_cell = np.multiply(new_cell, step_mask) - np.multiply( - pre_cell_array[i], (step_mask - 1.0)) + pre_cell_array[i], (step_mask - 1.0) + ) pre_hidden_array[i] = new_hidden pre_cell_array[i] = new_cell @@ -139,8 +144,9 @@ def lstm_np(input, rnn_out = np.reshape(rnn_out, [seq_len, -1, hidden_size]) last_hidden_out = np.concatenate(pre_hidden_array, 0) - last_hidden_out = np.reshape(last_hidden_out, - [num_layers, -1, hidden_size]) + last_hidden_out = np.reshape( + last_hidden_out, [num_layers, -1, hidden_size] + ) last_cell_out = np.concatenate(pre_cell_array, 0) last_cell_out = np.reshape(last_cell_out, [num_layers, -1, hidden_size]) @@ -148,7 +154,8 @@ def lstm_np(input, return rnn_out, last_hidden_out, last_cell_out fw_rnn_out, fw_last_hidden, fw_last_cell = get_single_direction_output( - input, mask, direc_index=0) + input, mask, direc_index=0 + ) if is_bidirect: bw_input = input[::-1] @@ -157,18 +164,21 @@ def lstm_np(input, bw_mask = mask[::-1] bw_rnn_out, bw_last_hidden, bw_last_cell = get_single_direction_output( - bw_input, bw_mask, direc_index=1) + bw_input, bw_mask, direc_index=1 + ) bw_rnn_out = bw_rnn_out[::-1] rnn_out = np.concatenate([fw_rnn_out, bw_rnn_out], 2) last_hidden = np.concatenate([fw_last_hidden, bw_last_hidden], 1) - last_hidden = np.reshape(last_hidden, - [num_layers * direc_num, -1, hidden_size]) + last_hidden = np.reshape( + last_hidden, [num_layers * direc_num, -1, hidden_size] + ) last_cell = np.concatenate([fw_last_cell, bw_last_cell], 1) - last_cell = np.reshape(last_cell, - [num_layers * direc_num, -1, hidden_size]) + last_cell = np.reshape( + last_cell, [num_layers * direc_num, -1, hidden_size] + ) if batch_first: rnn_out = np.transpose(rnn_out, [1, 0, 2]) @@ -186,7 +196,6 @@ def lstm_np(input, class TestBasicLSTMApi(unittest.TestCase): - def setUp(self): self.hidden_size = 10 self.batch_size = 5 @@ -197,15 +206,26 @@ class TestBasicLSTMApi(unittest.TestCase): self.forget_bias = 1.0 def test_run(self): - x = layers.data(name='x', - shape=[-1, self.batch_size, self.hidden_size], - dtype='float32') - sequence_length = layers.data(name="sequence_length", - shape=[-1], - dtype='float32') - - rnn_out, last_hidden, last_cell = basic_lstm( x, None, None, self.hidden_size, num_layers=self.num_layers, \ - batch_first = self.batch_first, bidirectional=self.is_bidirect, sequence_length=sequence_length, forget_bias = self.forget_bias ) + x = layers.data( + name='x', + shape=[-1, self.batch_size, self.hidden_size], + dtype='float32', + ) + sequence_length = layers.data( + name="sequence_length", shape=[-1], dtype='float32' + ) + + rnn_out, last_hidden, last_cell = basic_lstm( + x, + None, + None, + self.hidden_size, + num_layers=self.num_layers, + batch_first=self.batch_first, + bidirectional=self.is_bidirect, + sequence_length=sequence_length, + forget_bias=self.forget_bias, + ) last_hidden.persisbale = True rnn_out.persisbale = True @@ -228,83 +248,100 @@ class TestBasicLSTMApi(unittest.TestCase): gate_b_name = "basic_lstm_layers_" + str(i) + "/BasicLSTMUnit_0.b_0" gate_w = np.array( - fluid.global_scope().find_var(gate_w_name).get_tensor()) - gate_w = np.random.uniform(-0.1, 0.1, - size=gate_w.shape).astype('float32') + fluid.global_scope().find_var(gate_w_name).get_tensor() + ) + gate_w = np.random.uniform(-0.1, 0.1, size=gate_w.shape).astype( + 'float32' + ) fluid.global_scope().find_var(gate_w_name).get_tensor().set( - gate_w, place) + gate_w, place + ) gate_b = np.array( - fluid.global_scope().find_var(gate_b_name).get_tensor()) - gate_b = np.random.uniform(-0.1, 0.1, - size=gate_b.shape).astype('float32') + fluid.global_scope().find_var(gate_b_name).get_tensor() + ) + gate_b = np.random.uniform(-0.1, 0.1, size=gate_b.shape).astype( + 'float32' + ) fluid.global_scope().find_var(gate_b_name).get_tensor().set( - gate_b, place) + gate_b, place + ) gate_weight.append(gate_w) gate_bias.append(gate_b) if self.is_bidirect: for i in range(self.num_layers): - gate_w_name = "basic_lstm_reverse_layers_" + str( - i) + "/BasicLSTMUnit_0.w_0" - gate_b_name = "basic_lstm_reverse_layers_" + str( - i) + "/BasicLSTMUnit_0.b_0" + gate_w_name = ( + "basic_lstm_reverse_layers_" + + str(i) + + "/BasicLSTMUnit_0.w_0" + ) + gate_b_name = ( + "basic_lstm_reverse_layers_" + + str(i) + + "/BasicLSTMUnit_0.b_0" + ) gate_w = np.array( - fluid.global_scope().find_var(gate_w_name).get_tensor()) - gate_w = np.random.uniform(-0.1, 0.1, - size=gate_w.shape).astype('float32') + fluid.global_scope().find_var(gate_w_name).get_tensor() + ) + gate_w = np.random.uniform(-0.1, 0.1, size=gate_w.shape).astype( + 'float32' + ) fluid.global_scope().find_var(gate_w_name).get_tensor().set( - gate_w, place) + gate_w, place + ) gate_b = np.array( - fluid.global_scope().find_var(gate_b_name).get_tensor()) - gate_b = np.random.uniform(-0.1, 0.1, - size=gate_b.shape).astype('float32') + fluid.global_scope().find_var(gate_b_name).get_tensor() + ) + gate_b = np.random.uniform(-0.1, 0.1, size=gate_b.shape).astype( + 'float32' + ) fluid.global_scope().find_var(gate_b_name).get_tensor().set( - gate_b, place) + gate_b, place + ) gate_weight.append(gate_w) gate_bias.append(gate_b) step_input_np = np.random.uniform( - -0.1, 0.1, - (self.seq_len, self.batch_size, self.hidden_size)).astype('float32') + -0.1, 0.1, (self.seq_len, self.batch_size, self.hidden_size) + ).astype('float32') sequence_length_np = np.random.randint( - self.seq_len // 2, self.seq_len, - size=(self.batch_size)).astype('int64') + self.seq_len // 2, self.seq_len, size=(self.batch_size) + ).astype('int64') - out = exe.run(feed={ - 'x': step_input_np, - 'sequence_length': sequence_length_np - }, - fetch_list=[rnn_out, last_hidden, last_cell]) + out = exe.run( + feed={'x': step_input_np, 'sequence_length': sequence_length_np}, + fetch_list=[rnn_out, last_hidden, last_cell], + ) api_rnn_out = out[0] api_last_hidden = out[1] api_last_cell = out[2] - np_out = lstm_np(step_input_np, - None, - None, - self.hidden_size, - gate_weight, - gate_bias, - num_layers=self.num_layers, - batch_first=self.batch_first, - is_bidirect=self.is_bidirect, - sequence_length=sequence_length_np) + np_out = lstm_np( + step_input_np, + None, + None, + self.hidden_size, + gate_weight, + gate_bias, + num_layers=self.num_layers, + batch_first=self.batch_first, + is_bidirect=self.is_bidirect, + sequence_length=sequence_length_np, + ) np.testing.assert_allclose(api_rnn_out, np_out[0], rtol=0.0001, atol=0) - np.testing.assert_allclose(api_last_hidden, - np_out[1], - rtol=0.0001, - atol=0) - np.testing.assert_allclose(api_last_cell, - np_out[2], - rtol=0.0001, - atol=0) + np.testing.assert_allclose( + api_last_hidden, np_out[1], rtol=0.0001, atol=0 + ) + np.testing.assert_allclose( + api_last_cell, np_out[2], rtol=0.0001, atol=0 + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_basic_lstm_unit_op.py b/python/paddle/fluid/tests/unittests/test_basic_lstm_unit_op.py index 50ef22467dfe17ba60eaefc79147fa14cd7949e1..cf32b95d76f36952317fafd5fffc8800cd1c4ef6 100644 --- a/python/paddle/fluid/tests/unittests/test_basic_lstm_unit_op.py +++ b/python/paddle/fluid/tests/unittests/test_basic_lstm_unit_op.py @@ -34,13 +34,13 @@ def sigmoid(x): y = np.copy(x) y[x < SIGMOID_THRESHOLD_MIN] = SIGMOID_THRESHOLD_MIN y[x > SIGMOID_THRESHOLD_MAX] = SIGMOID_THRESHOLD_MAX - return 1. / (1. + np.exp(-y)) + return 1.0 / (1.0 + np.exp(-y)) def tanh(x): - y = -2. * x + y = -2.0 * x y[y > EXP_MAX_INPUT] = EXP_MAX_INPUT - return (2. / (1. + np.exp(y))) - 1. + return (2.0 / (1.0 + np.exp(y))) - 1.0 def step(step_in, pre_hidden, pre_cell, gate_w, gate_b, forget_bias=1.0): @@ -57,19 +57,18 @@ def step(step_in, pre_hidden, pre_cell, gate_w, gate_b, forget_bias=1.0): class TestBasicGRUUnit(unittest.TestCase): - def setUp(self): self.hidden_size = 5 self.batch_size = 5 def test_run(self): x = layers.data(name='x', shape=[-1, self.hidden_size], dtype='float32') - pre_hidden = layers.data(name="pre_hidden", - shape=[-1, self.hidden_size], - dtype='float32') - pre_cell = layers.data(name="pre_cell", - shape=[-1, self.hidden_size], - dtype='float32') + pre_hidden = layers.data( + name="pre_hidden", shape=[-1, self.hidden_size], dtype='float32' + ) + pre_cell = layers.data( + name="pre_cell", shape=[-1, self.hidden_size], dtype='float32' + ) lstm_unit = BasicLSTMUnit("lstm_unit", self.hidden_size) @@ -94,44 +93,57 @@ class TestBasicGRUUnit(unittest.TestCase): gate_b_name = "lstm_unit/BasicLSTMUnit_0.b_0" gate_w = np.array( - fluid.global_scope().find_var(gate_w_name).get_tensor()) - gate_w = np.random.uniform(-0.1, 0.1, - size=gate_w.shape).astype('float32') + fluid.global_scope().find_var(gate_w_name).get_tensor() + ) + gate_w = np.random.uniform(-0.1, 0.1, size=gate_w.shape).astype( + 'float32' + ) fluid.global_scope().find_var(gate_w_name).get_tensor().set( - gate_w, place) + gate_w, place + ) gate_b = np.array( - fluid.global_scope().find_var(gate_b_name).get_tensor()) - gate_b = np.random.uniform(-0.1, 0.1, - size=gate_b.shape).astype('float32') + fluid.global_scope().find_var(gate_b_name).get_tensor() + ) + gate_b = np.random.uniform(-0.1, 0.1, size=gate_b.shape).astype( + 'float32' + ) fluid.global_scope().find_var(gate_b_name).get_tensor().set( - gate_b, place) + gate_b, place + ) step_input_np = np.random.uniform( - -0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32') + -0.1, 0.1, (self.batch_size, self.hidden_size) + ).astype('float32') pre_hidden_np = np.random.uniform( - -0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32') + -0.1, 0.1, (self.batch_size, self.hidden_size) + ).astype('float32') pre_cell_np = np.random.uniform( - -0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32') - - out = exe.run( feed={ 'x' : step_input_np, 'pre_hidden' : pre_hidden_np, \ - 'pre_cell' : pre_cell_np }, - fetch_list=[ new_hidden, new_cell]) + -0.1, 0.1, (self.batch_size, self.hidden_size) + ).astype('float32') + + out = exe.run( + feed={ + 'x': step_input_np, + 'pre_hidden': pre_hidden_np, + 'pre_cell': pre_cell_np, + }, + fetch_list=[new_hidden, new_cell], + ) api_hidden_out = out[0] api_cell_out = out[1] - np_hidden_out, np_cell_out = step(step_input_np, pre_hidden_np, - pre_cell_np, gate_w, gate_b) - - np.testing.assert_allclose(api_hidden_out, - np_hidden_out, - rtol=0.0001, - atol=0) - np.testing.assert_allclose(api_cell_out, - np_cell_out, - rtol=0.0001, - atol=0) + np_hidden_out, np_cell_out = step( + step_input_np, pre_hidden_np, pre_cell_np, gate_w, gate_b + ) + + np.testing.assert_allclose( + api_hidden_out, np_hidden_out, rtol=0.0001, atol=0 + ) + np.testing.assert_allclose( + api_cell_out, np_cell_out, rtol=0.0001, atol=0 + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_basic_rnn_name.py b/python/paddle/fluid/tests/unittests/test_basic_rnn_name.py index ca56cd62a46b55a788cf8ca8337b56c0c4db4de5..17bec037e842b9d91ec0c9c6ee904e247f34af1a 100644 --- a/python/paddle/fluid/tests/unittests/test_basic_rnn_name.py +++ b/python/paddle/fluid/tests/unittests/test_basic_rnn_name.py @@ -20,14 +20,19 @@ from test_imperative_base import new_program_scope class TestBasicGRUApiName(unittest.TestCase): - def setUp(self): - self.name_set = set([ - "test1_fw_w_0_gate", "test1_fw_w_0_candidate", "test1_fw_b_0_gate", - "test1_fw_b_0_candidate", "test1_bw_w_0_gate", - "test1_bw_w_0_candidate", "test1_bw_b_0_gate", - "test1_bw_b_0_candidate" - ]) + self.name_set = set( + [ + "test1_fw_w_0_gate", + "test1_fw_w_0_candidate", + "test1_fw_b_0_gate", + "test1_fw_b_0_candidate", + "test1_bw_w_0_gate", + "test1_bw_w_0_candidate", + "test1_bw_b_0_gate", + "test1_bw_b_0_candidate", + ] + ) def test_name(self): batch_size = 20 @@ -39,35 +44,54 @@ class TestBasicGRUApiName(unittest.TestCase): batch_first = False with new_program_scope(): - input = layers.data(name="input", - shape=[-1, batch_size, input_size], - dtype='float32') - pre_hidden = layers.data(name="pre_hidden", - shape=[-1, hidden_size], - dtype='float32') - sequence_length = layers.data(name="sequence_length", - shape=[-1], - dtype='int32') - - - rnn_out, last_hidden = basic_gru( input, pre_hidden, hidden_size, num_layers = num_layers, \ - sequence_length = sequence_length, dropout_prob=dropout, bidirectional = bidirectional, \ - batch_first = batch_first, param_attr=fluid.ParamAttr( name ="test1"), bias_attr=fluid.ParamAttr( name="test1"), name="basic_gru") + input = layers.data( + name="input", + shape=[-1, batch_size, input_size], + dtype='float32', + ) + pre_hidden = layers.data( + name="pre_hidden", shape=[-1, hidden_size], dtype='float32' + ) + sequence_length = layers.data( + name="sequence_length", shape=[-1], dtype='int32' + ) + + rnn_out, last_hidden = basic_gru( + input, + pre_hidden, + hidden_size, + num_layers=num_layers, + sequence_length=sequence_length, + dropout_prob=dropout, + bidirectional=bidirectional, + batch_first=batch_first, + param_attr=fluid.ParamAttr(name="test1"), + bias_attr=fluid.ParamAttr(name="test1"), + name="basic_gru", + ) var_list = fluid.io.get_program_parameter( - fluid.default_main_program()) + fluid.default_main_program() + ) for var in var_list: self.assertTrue(var.name in self.name_set) class TestBasicLSTMApiName(unittest.TestCase): - def setUp(self): - self.name_set = set([ - "test1_fw_w_0", "test1_fw_b_0", "test1_fw_w_1", "test1_fw_b_1", - "test1_bw_w_0", "test1_bw_b_0", "test1_bw_w_1", "test1_bw_b_1" - ]) + self.name_set = set( + [ + "test1_fw_w_0", + "test1_fw_b_0", + "test1_fw_w_1", + "test1_fw_b_1", + "test1_bw_w_0", + "test1_bw_b_0", + "test1_bw_w_1", + "test1_bw_b_1", + ] + ) def test_name(self): batch_size = 20 @@ -79,27 +103,38 @@ class TestBasicLSTMApiName(unittest.TestCase): batch_first = False with new_program_scope(): - input = layers.data(name="input", - shape=[-1, batch_size, input_size], - dtype='float32') - pre_hidden = layers.data(name="pre_hidden", - shape=[-1, hidden_size], - dtype='float32') - pre_cell = layers.data(name="pre_cell", - shape=[-1, hidden_size], - dtype='float32') - sequence_length = layers.data(name="sequence_length", - shape=[-1], - dtype='int32') - - rnn_out, last_hidden, last_cell = basic_lstm( input, pre_hidden, pre_cell, \ - hidden_size, num_layers = num_layers, \ - sequence_length = sequence_length, dropout_prob=dropout, bidirectional = bidirectional, \ - param_attr=fluid.ParamAttr( name ="test1"), bias_attr=fluid.ParamAttr( name = "test1"), \ - batch_first = batch_first) + input = layers.data( + name="input", + shape=[-1, batch_size, input_size], + dtype='float32', + ) + pre_hidden = layers.data( + name="pre_hidden", shape=[-1, hidden_size], dtype='float32' + ) + pre_cell = layers.data( + name="pre_cell", shape=[-1, hidden_size], dtype='float32' + ) + sequence_length = layers.data( + name="sequence_length", shape=[-1], dtype='int32' + ) + + rnn_out, last_hidden, last_cell = basic_lstm( + input, + pre_hidden, + pre_cell, + hidden_size, + num_layers=num_layers, + sequence_length=sequence_length, + dropout_prob=dropout, + bidirectional=bidirectional, + param_attr=fluid.ParamAttr(name="test1"), + bias_attr=fluid.ParamAttr(name="test1"), + batch_first=batch_first, + ) var_list = fluid.io.get_program_parameter( - fluid.default_main_program()) + fluid.default_main_program() + ) for var in var_list: self.assertTrue(var.name in self.name_set) diff --git a/python/paddle/fluid/tests/unittests/test_batch_fc_op.py b/python/paddle/fluid/tests/unittests/test_batch_fc_op.py index 6214859931f801b7bec6089cfa7731af894606dc..2414514b2bd25037ad823fcc612b5ef9d8432981 100644 --- a/python/paddle/fluid/tests/unittests/test_batch_fc_op.py +++ b/python/paddle/fluid/tests/unittests/test_batch_fc_op.py @@ -32,7 +32,6 @@ def np_cal_batchfc(input, w, bias): class TestBatchFCOp(OpTest): - def config(self): self.slot_pairs_num = 10 self.batch_size = 5 @@ -42,12 +41,15 @@ class TestBatchFCOp(OpTest): def setUp(self): self.config() - self.input = np.random.random((self.slot_pairs_num, self.batch_size, - self.in_dim)).astype(self.dtype) + self.input = np.random.random( + (self.slot_pairs_num, self.batch_size, self.in_dim) + ).astype(self.dtype) self.w = np.random.random( - (self.slot_pairs_num, self.in_dim, self.out_dim)).astype(self.dtype) + (self.slot_pairs_num, self.in_dim, self.out_dim) + ).astype(self.dtype) self.bias = np.random.random( - (self.slot_pairs_num, self.out_dim)).astype(self.dtype) + (self.slot_pairs_num, self.out_dim) + ).astype(self.dtype) self.op_type = "batch_fc" np_out = np_cal_batchfc(self.input, self.w, self.bias) np_out = np_out.astype(self.dtype) @@ -60,12 +62,12 @@ class TestBatchFCOp(OpTest): def test_check_grad_gpu(self): if core.is_compiled_with_cuda(): - self.check_grad_with_place(core.CUDAPlace(0), - ["Bias", "W", "Input"], "Out") + self.check_grad_with_place( + core.CUDAPlace(0), ["Bias", "W", "Input"], "Out" + ) class TestBatchFCOp1(OpTest): - def config(self): self.slot_pairs_num = 10 self.batch_size = 5 @@ -75,12 +77,15 @@ class TestBatchFCOp1(OpTest): def setUp(self): self.config() - self.input = np.random.random((self.slot_pairs_num, self.batch_size, - self.in_dim)).astype(self.dtype) + self.input = np.random.random( + (self.slot_pairs_num, self.batch_size, self.in_dim) + ).astype(self.dtype) self.w = np.random.random( - (self.slot_pairs_num, self.in_dim, self.out_dim)).astype(self.dtype) + (self.slot_pairs_num, self.in_dim, self.out_dim) + ).astype(self.dtype) self.bias = np.random.random( - (self.slot_pairs_num, self.out_dim)).astype(self.dtype) + (self.slot_pairs_num, self.out_dim) + ).astype(self.dtype) self.op_type = "batch_fc" np_out = np_cal_batchfc(self.input, self.w, self.bias) np_out = np_out.astype(self.dtype) @@ -95,8 +100,9 @@ class TestBatchFCOp1(OpTest): def test_check_grad_cpu(self): try: - self.check_grad_with_place(core.CPUPlace(), ["Bias", "W", "Input"], - "Out") + self.check_grad_with_place( + core.CPUPlace(), ["Bias", "W", "Input"], "Out" + ) except: print("do not support cpu test, skip") diff --git a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py index 33265b33bd1b77acac68cf35a7492293b7870485..381640621cb1cf43c03bb87150bf6e7ed0174ae3 100644 --- a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py @@ -154,12 +154,20 @@ def _reference_grad(x, y_grad, scale, mean, var, epsilon, data_format): x = np.transpose(x, (0, 2, 3, 1)) y_grad = np.transpose(y_grad, (0, 2, 3, 1)) - x_grad = scale * (y_grad - np.mean(y_grad, axis=(0, 1, 2)) - - (x - mean) * np.mean(y_grad * - (x - mean), axis=(0, 1, 2)) / - (var + epsilon)) / np.sqrt(var + epsilon) - grad_scale = np.sum(y_grad * (x - mean) / np.sqrt(var + epsilon), - axis=(0, 1, 2)) + x_grad = ( + scale + * ( + y_grad + - np.mean(y_grad, axis=(0, 1, 2)) + - (x - mean) + * np.mean(y_grad * (x - mean), axis=(0, 1, 2)) + / (var + epsilon) + ) + / np.sqrt(var + epsilon) + ) + grad_scale = np.sum( + y_grad * (x - mean) / np.sqrt(var + epsilon), axis=(0, 1, 2) + ) grad_offset = np.sum(y_grad, axis=(0, 1, 2)) # transfer back to N, C, H, W @@ -184,7 +192,6 @@ def create_or_get_tensor(scope, var_name, var, place): def set_output_grad(scope, outputs, place, feed_dict=None): - def __set_tensor__(name, data=None): out_tensor = scope.find_var(name).get_tensor() grad_tensor = scope.var(grad_var_name(name)).get_tensor() @@ -206,7 +213,6 @@ def set_output_grad(scope, outputs, place, feed_dict=None): class TestBatchNormOpInference(unittest.TestCase): - def setUp(self): self.dtype = np.float32 self.use_mkldnn = False @@ -214,11 +220,9 @@ class TestBatchNormOpInference(unittest.TestCase): self.init_kernel_type() def __assert_close(self, tensor, np_array, msg, atol=1e-4): - np.testing.assert_allclose(np.array(tensor), - np_array, - rtol=1e-05, - atol=atol, - err_msg=msg) + np.testing.assert_allclose( + np.array(tensor), np_array, rtol=1e-05, atol=atol, err_msg=msg + ) def check_with_place(self, place, data_layout, dtype, shape): epsilon = 0.00001 @@ -244,34 +248,39 @@ class TestBatchNormOpInference(unittest.TestCase): mean = np.zeros(scale_shape).astype(np.float32) variance = np.ones(scale_shape).astype(np.float32) - y_out = _reference_testing(x_val, scale_val, bias_val, mean, variance, - epsilon, data_layout).astype(dtype) + y_out = _reference_testing( + x_val, scale_val, bias_val, mean, variance, epsilon, data_layout + ).astype(dtype) if self.fuse_with_relu: y_out = np.maximum(y_out, 0) scope = core.Scope() # create input - x_tensor = create_or_get_tensor(scope, "x_val", - OpTest.np_dtype_to_fluid_dtype(x_val), - place) + x_tensor = create_or_get_tensor( + scope, "x_val", OpTest.np_dtype_to_fluid_dtype(x_val), place + ) scale_tensor = create_or_get_tensor( - scope, "scale_val", OpTest.np_dtype_to_fluid_dtype(scale_val), - place) + scope, "scale_val", OpTest.np_dtype_to_fluid_dtype(scale_val), place + ) bias_tensor = create_or_get_tensor( - scope, "bias_val", OpTest.np_dtype_to_fluid_dtype(bias_val), place) - mean_tensor = create_or_get_tensor(scope, "mean", - OpTest.np_dtype_to_fluid_dtype(mean), - place) + scope, "bias_val", OpTest.np_dtype_to_fluid_dtype(bias_val), place + ) + mean_tensor = create_or_get_tensor( + scope, "mean", OpTest.np_dtype_to_fluid_dtype(mean), place + ) variance_tensor = create_or_get_tensor( - scope, "variance", OpTest.np_dtype_to_fluid_dtype(variance), place) + scope, "variance", OpTest.np_dtype_to_fluid_dtype(variance), place + ) # create output y_tensor = create_or_get_tensor(scope, "y_out", None, place) - saved_mean_tensor = create_or_get_tensor(scope, "saved_mean", None, - place) - saved_variance_tensor = create_or_get_tensor(scope, "saved_variance", - None, place) + saved_mean_tensor = create_or_get_tensor( + scope, "saved_mean", None, place + ) + saved_variance_tensor = create_or_get_tensor( + scope, "saved_variance", None, place + ) mean_out_tensor = mean_tensor variance_out_tensor = variance_tensor @@ -294,7 +303,8 @@ class TestBatchNormOpInference(unittest.TestCase): data_layout=data_layout, use_mkldnn=self.use_mkldnn, fuse_with_relu=self.fuse_with_relu, - epsilon=epsilon) + epsilon=epsilon, + ) batch_norm_op.run(scope, place) @@ -314,12 +324,19 @@ class TestBatchNormOpInference(unittest.TestCase): y_tensor._set_dims(dims) # check inference result - self.__assert_close(y_tensor, - y_out, - "inference output are different at " + str(place) + - ", " + data_layout + ", " + str(np.dtype(dtype)) + - str(np.array(y_tensor)) + str(y_out), - atol=1e-3) + self.__assert_close( + y_tensor, + y_out, + "inference output are different at " + + str(place) + + ", " + + data_layout + + ", " + + str(np.dtype(dtype)) + + str(np.array(y_tensor)) + + str(y_out), + atol=1e-3, + ) def test_check_output(self): places = [core.CPUPlace()] @@ -328,8 +345,9 @@ class TestBatchNormOpInference(unittest.TestCase): for place in places: for data_format in ["NCHW", "NHWC"]: - self.check_with_place(place, data_format, self.dtype, - [2, 3, 4, 5]) + self.check_with_place( + place, data_format, self.dtype, [2, 3, 4, 5] + ) self.check_with_place(place, data_format, self.dtype, [2, 3]) def init_kernel_type(self): @@ -337,7 +355,6 @@ class TestBatchNormOpInference(unittest.TestCase): class TestFP16BatchNormOpInference(TestBatchNormOpInference): - def setUp(self): self.dtype = np.float16 self.use_mkldnn = False @@ -351,15 +368,15 @@ class TestFP16BatchNormOpInference(TestBatchNormOpInference): if core.is_float16_supported(place): places.append(place) for place in places: - #for data_format in ["NCHW", "NHWC"]: + # for data_format in ["NCHW", "NHWC"]: for data_format in ["NCHW"]: - self.check_with_place(place, data_format, self.dtype, - [2, 3, 4, 5]) + self.check_with_place( + place, data_format, self.dtype, [2, 3, 4, 5] + ) self.check_with_place(place, data_format, self.dtype, [2, 3]) class TestBatchNormOpTraining(unittest.TestCase): - def setUp(self): self.use_mkldnn = False self.fuse_with_relu = False @@ -374,27 +391,54 @@ class TestBatchNormOpTraining(unittest.TestCase): self.use_global_stats = False self.no_grad_set = set() self.fetch_list = [ - 'y', 'mean', 'variance', 'saved_mean', 'saved_variance', 'x@GRAD', - 'scale@GRAD', 'bias@GRAD' + 'y', + 'mean', + 'variance', + 'saved_mean', + 'saved_variance', + 'x@GRAD', + 'scale@GRAD', + 'bias@GRAD', ] def __assert_close(self, tensor, np_array, msg, atol=1e-4): np.allclose(np.array(tensor), np_array, atol=atol) - def ref_forward_backward(self, x, y_grad, scale, bias, mean, variance, - epsilon, momentum, shape, data_layout): + def ref_forward_backward( + self, + x, + y_grad, + scale, + bias, + mean, + variance, + epsilon, + momentum, + shape, + data_layout, + ): # run forward - y, saved_mean, var_ref = _reference_training(x, scale, bias, epsilon, - data_layout) - mean_out = saved_mean * (1. - momentum) + momentum * mean - variance_out = var_ref * (1. - momentum) + momentum * variance - saved_variance = 1. / np.sqrt(var_ref + epsilon) + y, saved_mean, var_ref = _reference_training( + x, scale, bias, epsilon, data_layout + ) + mean_out = saved_mean * (1.0 - momentum) + momentum * mean + variance_out = var_ref * (1.0 - momentum) + momentum * variance + saved_variance = 1.0 / np.sqrt(var_ref + epsilon) # run backward - x_grad, scale_grad, bias_grad = _reference_grad(x, y_grad, scale, - saved_mean, var_ref, - epsilon, data_layout) - - return y, mean_out, variance_out, saved_mean, saved_variance, x_grad, scale_grad, bias_grad + x_grad, scale_grad, bias_grad = _reference_grad( + x, y_grad, scale, saved_mean, var_ref, epsilon, data_layout + ) + + return ( + y, + mean_out, + variance_out, + saved_mean, + saved_variance, + x_grad, + scale_grad, + bias_grad, + ) def set_mean_variance(self, scale_shape, x, data_layout): mean, variance = _cal_mean_variance(x, self.epsilon, data_layout) @@ -403,12 +447,11 @@ class TestBatchNormOpTraining(unittest.TestCase): # computing global mean/variance for one step if self.use_global_stats: mom = self.momentum - mean = mean * (1. - mom) + mom * mean_pre - variance = variance * (1. - mom) + mom * variance_pre + mean = mean * (1.0 - mom) + mom * mean_pre + variance = variance * (1.0 - mom) + mom * variance_pre return mean, variance def test_forward_backward(self): - def test_with_place(place, data_layout, shape): # attr epsilon = self.epsilon @@ -427,9 +470,27 @@ class TestBatchNormOpTraining(unittest.TestCase): y_grad = np.random.random_sample(shape).astype(np.float32) momentum_var = np.array([momentum]).astype(np.float32) - y, mean_out, variance_out, saved_mean, saved_variance, x_grad, scale_grad, bias_grad = self.ref_forward_backward( - x, y_grad, scale, bias, mean, variance, epsilon, momentum, - shape, data_layout) + ( + y, + mean_out, + variance_out, + saved_mean, + saved_variance, + x_grad, + scale_grad, + bias_grad, + ) = self.ref_forward_backward( + x, + y_grad, + scale, + bias, + mean, + variance, + epsilon, + momentum, + shape, + data_layout, + ) var_dict = locals() var_dict['y@GRAD'] = y_grad @@ -438,8 +499,15 @@ class TestBatchNormOpTraining(unittest.TestCase): var_dict['bias@GRAD'] = bias_grad var_names = [ - 'x', 'scale', 'bias', 'mean', 'variance', 'y', 'saved_mean', - 'saved_variance', 'momentum_var' + 'x', + 'scale', + 'bias', + 'mean', + 'variance', + 'y', + 'saved_mean', + 'saved_variance', + 'momentum_var', ] ground_truth = {name: var_dict[name] for name in var_names} @@ -447,15 +515,17 @@ class TestBatchNormOpTraining(unittest.TestCase): with fluid.program_guard(program): block = program.global_block() for name in ground_truth: - block.create_var(name=name, - dtype='float32', - shape=ground_truth[name].shape) + block.create_var( + name=name, + dtype='float32', + shape=ground_truth[name].shape, + ) inputs = { "X": block.var('x'), "Scale": block.var('scale'), "Bias": block.var('bias'), "Mean": block.var('mean'), - "Variance": block.var('variance') + "Variance": block.var('variance'), } attrs = { "epsilon": epsilon, @@ -463,7 +533,7 @@ class TestBatchNormOpTraining(unittest.TestCase): "data_layout": data_layout, "use_mkldnn": self.use_mkldnn, "fuse_with_relu": self.fuse_with_relu, - "use_global_stats": self.use_global_stats + "use_global_stats": self.use_global_stats, } if self.use_momentum_variable: inputs['MomentumTensor'] = block.var('momentum_var') @@ -475,19 +545,22 @@ class TestBatchNormOpTraining(unittest.TestCase): "MeanOut": block.var('mean'), # share memory "VarianceOut": block.var('variance'), # share memory "SavedMean": block.var('saved_mean'), - "SavedVariance": block.var('saved_variance') + "SavedVariance": block.var('saved_variance'), } block.create_var(name="reserve_space", dtype='float32') outputs["ReserveSpace"] = block.var('reserve_space') - bn_op = block.append_op(type="batch_norm", - inputs=inputs, - outputs=outputs, - attrs=attrs) + bn_op = block.append_op( + type="batch_norm", + inputs=inputs, + outputs=outputs, + attrs=attrs, + ) block.create_var(name='y@GRAD', dtype='float32', shape=y.shape) # generate backward op_desc grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc( - bn_op.desc, self.no_grad_set, []) + bn_op.desc, self.no_grad_set, [] + ) grad_op_desc = grad_op_desc_list[0] new_op_desc = block.desc.append_op() new_op_desc.copy_from(grad_op_desc) @@ -502,22 +575,28 @@ class TestBatchNormOpTraining(unittest.TestCase): program._sync_with_cpp() exe = fluid.Executor(place) - out = exe.run(program, - feed={ - name: var_dict[name] - for name in [ - 'x', 'scale', 'bias', 'mean', 'variance', - 'y@GRAD', 'momentum_var' - ] - }, - fetch_list=self.fetch_list) + out = exe.run( + program, + feed={ + name: var_dict[name] + for name in [ + 'x', + 'scale', + 'bias', + 'mean', + 'variance', + 'y@GRAD', + 'momentum_var', + ] + }, + fetch_list=self.fetch_list, + ) for id, name in enumerate(self.fetch_list): if name == 'variance': - self.__assert_close(var_dict[name], - out[id], - name, - atol=1e-3) + self.__assert_close( + var_dict[name], out[id], name, atol=1e-3 + ) continue self.__assert_close(var_dict[name], out[id], name) print("op test forward passed: ", str(place), data_layout) @@ -536,7 +615,6 @@ class TestBatchNormOpTraining(unittest.TestCase): class TestBatchNormOpTrainingCase1(TestBatchNormOpTraining): - def init_test_case(self): self.use_global_stats = False self.no_grad_set = set(['scale@GRAD', 'bias@GRAD']) @@ -544,19 +622,23 @@ class TestBatchNormOpTrainingCase1(TestBatchNormOpTraining): class TestBatchNormOpTrainingCase2(TestBatchNormOpTraining): - def init_test_case(self): self.use_global_stats = False self.no_grad_set = set() self.fetch_list = [ - 'y', 'mean', 'variance', 'saved_mean', 'saved_variance', 'x@GRAD', - 'scale@GRAD', 'bias@GRAD' + 'y', + 'mean', + 'variance', + 'saved_mean', + 'saved_variance', + 'x@GRAD', + 'scale@GRAD', + 'bias@GRAD', ] os.environ['FLAGS_cudnn_batchnorm_spatial_persistent'] = "1" class TestBatchNormOpTrainingCase3(TestBatchNormOpTraining): - def init_test_case(self): self.use_global_stats = False self.no_grad_set = set(['x@GRAD']) @@ -564,24 +646,33 @@ class TestBatchNormOpTrainingCase3(TestBatchNormOpTraining): class TestBatchNormOpTrainingMomentumVariable(TestBatchNormOpTraining): - def init_test_case(self): self.use_momentum_variable = True self.use_global_stats = False self.no_grad_set = set() self.fetch_list = [ - 'y', 'mean', 'variance', 'saved_mean', 'saved_variance', 'x@GRAD', - 'scale@GRAD', 'bias@GRAD' + 'y', + 'mean', + 'variance', + 'saved_mean', + 'saved_variance', + 'x@GRAD', + 'scale@GRAD', + 'bias@GRAD', ] class TestBatchNormOpFreezeStatsTraining(TestBatchNormOpTraining): - def init_test_case(self): self.use_global_stats = True self.no_grad_set = set() self.fetch_list = [ - 'y', 'mean', 'variance', 'x@GRAD', 'scale@GRAD', 'bias@GRAD' + 'y', + 'mean', + 'variance', + 'x@GRAD', + 'scale@GRAD', + 'bias@GRAD', ] def reference_grad(self, x, y_grad, scale, mean, var, epsilon, data_format): @@ -590,8 +681,9 @@ class TestBatchNormOpFreezeStatsTraining(TestBatchNormOpTraining): y_grad = np.transpose(y_grad, (0, 2, 3, 1)) x_grad = scale * y_grad / np.sqrt(var + epsilon) - grad_scale = np.sum(y_grad * (x - mean) / np.sqrt(var + epsilon), - axis=(0, 1, 2)) + grad_scale = np.sum( + y_grad * (x - mean) / np.sqrt(var + epsilon), axis=(0, 1, 2) + ) grad_offset = np.sum(y_grad, axis=(0, 1, 2)) # transfer back to N, C, H, W @@ -602,8 +694,19 @@ class TestBatchNormOpFreezeStatsTraining(TestBatchNormOpTraining): return x_grad, grad_scale, grad_offset - def ref_forward_backward(self, x, y_grad, scale, bias, mean, variance, - epsilon, momentum, shape, data_layout): + def ref_forward_backward( + self, + x, + y_grad, + scale, + bias, + mean, + variance, + epsilon, + momentum, + shape, + data_layout, + ): if data_layout != "NCHW" and data_layout != "NHWC": raise ValueError("Unknown data order.") @@ -621,17 +724,27 @@ class TestBatchNormOpFreezeStatsTraining(TestBatchNormOpTraining): mean_out = mean variance_out = variance - saved_variance = 1. / np.sqrt(variance + epsilon) + saved_variance = 1.0 / np.sqrt(variance + epsilon) # run backward x_grad, scale_grad, bias_grad = self.reference_grad( - x, y_grad, scale, mean, variance, epsilon, data_layout) + x, y_grad, scale, mean, variance, epsilon, data_layout + ) - return y, mean_out, variance_out, mean, saved_variance, x_grad, scale_grad, bias_grad + return ( + y, + mean_out, + variance_out, + mean, + saved_variance, + x_grad, + scale_grad, + bias_grad, + ) class TestBatchNormOpFreezeStatsAndScaleBiasTraining( - TestBatchNormOpFreezeStatsTraining): - + TestBatchNormOpFreezeStatsTraining +): def init_test_case(self): self.use_global_stats = True self.no_grad_set = set(['scale@GRAD', 'bias@GRAD']) @@ -639,12 +752,12 @@ class TestBatchNormOpFreezeStatsAndScaleBiasTraining( class TestBatchNormOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # the input of batch_norm must be Variable. - x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + ) self.assertRaises(TypeError, fluid.layers.batch_norm, x1) # the input dtype of batch_norm must be float16 or float32 or float64 @@ -654,13 +767,13 @@ class TestBatchNormOpError(unittest.TestCase): class TestDygraphBatchNormAPIError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): batch_norm = fluid.dygraph.BatchNorm(10) # the input of BatchNorm must be Variable. - x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + ) self.assertRaises(TypeError, batch_norm, x1) # the input dtype of BatchNorm must be float16 or float32 or float64 @@ -670,7 +783,6 @@ class TestDygraphBatchNormAPIError(unittest.TestCase): class TestDygraphBatchNormTrainableStats(unittest.TestCase): - def test_dygraph(self): places = [fluid.CPUPlace()] if core.is_compiled_with_cuda(): @@ -683,7 +795,8 @@ class TestDygraphBatchNormTrainableStats(unittest.TestCase): bn = fluid.dygraph.BatchNorm( shape[1], is_test=is_test, - trainable_statistics=trainable_statistics) + trainable_statistics=trainable_statistics, + ) y = bn(fluid.dygraph.to_variable(x)) return y.numpy() @@ -705,7 +818,8 @@ class TestDygraphBatchNormTrainableStats(unittest.TestCase): bn = fluid.dygraph.BatchNorm( shape[1], is_test=is_test, - trainable_statistics=trainable_statistics) + trainable_statistics=trainable_statistics, + ) x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) y = bn(x) exe.run(fluid.default_startup_program()) @@ -719,7 +833,6 @@ class TestDygraphBatchNormTrainableStats(unittest.TestCase): class TestDygraphBatchNormOpenReserveSpace(unittest.TestCase): - def test_reservespace(self): with program_guard(Program(), Program()): paddle.enable_static() @@ -734,5 +847,6 @@ class TestDygraphBatchNormOpenReserveSpace(unittest.TestCase): if __name__ == '__main__': import paddle + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_batch_norm_op_v2.py b/python/paddle/fluid/tests/unittests/test_batch_norm_op_v2.py index ed3b0560b52949355562f0e2f88e142f610c92ce..778489eb668dce30edafd6934cff425313a6548e 100644 --- a/python/paddle/fluid/tests/unittests/test_batch_norm_op_v2.py +++ b/python/paddle/fluid/tests/unittests/test_batch_norm_op_v2.py @@ -23,7 +23,6 @@ import paddle class TestBatchNorm(unittest.TestCase): - def test_name(self): places = [fluid.CPUPlace()] if core.is_compiled_with_cuda(): @@ -37,7 +36,7 @@ class TestBatchNorm(unittest.TestCase): if core.is_compiled_with_cuda(): places.append(fluid.CUDAPlace(0)) for p in places: - #paddle.disable_static() + # paddle.disable_static() x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32') x_data_3 = np.random.random(size=(2, 1, 3)).astype('float32') @@ -80,7 +79,6 @@ class TestBatchNorm(unittest.TestCase): self.assertRaises(ValueError, error3d_dataformat) def test_large_batch(self): - def compute_baseline(x): with fluid.dygraph.guard(p): bn = fluid.dygraph.BatchNorm(shape[1]) @@ -130,7 +128,7 @@ class TestBatchNorm(unittest.TestCase): def compute_v1(x): with fluid.dygraph.guard(p): bn = fluid.dygraph.BatchNorm(shape[1]) - #bn = paddle.nn.BatchNorm2D(shape[1]) + # bn = paddle.nn.BatchNorm2D(shape[1]) x1 = paddle.to_tensor(x) x1.stop_gradient = False y = bn(x1) @@ -166,7 +164,8 @@ class TestBatchNorm(unittest.TestCase): bn = fluid.dygraph.BatchNorm( shape[1], is_test=is_test, - trainable_statistics=trainable_statistics) + trainable_statistics=trainable_statistics, + ) y = bn(paddle.to_tensor(x)) return y.numpy() @@ -188,19 +187,22 @@ class TestBatchNorm(unittest.TestCase): is_test=is_test, param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(1.0), - trainable=False), + trainable=False, + ), bias_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(0.0), - trainable=False), - trainable_statistics=trainable_statistics) + trainable=False, + ), + trainable_statistics=trainable_statistics, + ) y = bn(paddle.to_tensor(x)) return y.numpy() def compute_v4(x): with fluid.dygraph.guard(p): - bn = paddle.nn.BatchNorm2D(shape[1], - weight_attr=False, - bias_attr=False) + bn = paddle.nn.BatchNorm2D( + shape[1], weight_attr=False, bias_attr=False + ) y = bn(paddle.to_tensor(x)) return y.numpy() @@ -225,7 +227,8 @@ class TestBatchNorm(unittest.TestCase): bn = fluid.dygraph.BatchNorm( shape[1], is_test=is_test, - trainable_statistics=trainable_statistics) + trainable_statistics=trainable_statistics, + ) x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) y = bn(x) exe.run(fluid.default_startup_program()) @@ -248,7 +251,6 @@ class TestBatchNorm(unittest.TestCase): class TestBatchNormChannelLast(unittest.TestCase): - def setUp(self): self.original_dtyep = paddle.get_default_dtype() # MIOPEN not support data type of double @@ -277,14 +279,13 @@ class TestBatchNormChannelLast(unittest.TestCase): y2 = paddle.transpose(y2, [0, 2, 1]) if core.is_compiled_with_rocm(): # HIP will fail if no atol - np.testing.assert_allclose(y1.numpy(), - y2.numpy(), - rtol=1e-05, - atol=1e-07) + np.testing.assert_allclose( + y1.numpy(), y2.numpy(), rtol=1e-05, atol=1e-07 + ) else: - np.testing.assert_allclose(y1.numpy(), - y2.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + y1.numpy(), y2.numpy(), rtol=1e-05 + ) def test_2d(self): for p in self.places: @@ -300,14 +301,13 @@ class TestBatchNormChannelLast(unittest.TestCase): y2 = paddle.transpose(y2, [0, 2, 3, 1]) if core.is_compiled_with_rocm(): # HIP will fail if no atol - np.testing.assert_allclose(y1.numpy(), - y2.numpy(), - rtol=1e-05, - atol=1e-07) + np.testing.assert_allclose( + y1.numpy(), y2.numpy(), rtol=1e-05, atol=1e-07 + ) else: - np.testing.assert_allclose(y1.numpy(), - y2.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + y1.numpy(), y2.numpy(), rtol=1e-05 + ) def test_3d(self): for p in self.places: @@ -323,14 +323,13 @@ class TestBatchNormChannelLast(unittest.TestCase): y2 = paddle.transpose(y2, [0, 2, 3, 4, 1]) if core.is_compiled_with_rocm(): # HIP will fail if no atol - np.testing.assert_allclose(y1.numpy(), - y2.numpy(), - rtol=1e-05, - atol=1e-07) + np.testing.assert_allclose( + y1.numpy(), y2.numpy(), rtol=1e-05, atol=1e-07 + ) else: - np.testing.assert_allclose(y1.numpy(), - y2.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + y1.numpy(), y2.numpy(), rtol=1e-05 + ) def test_1d_opt(self): with fluid.dygraph.guard(): @@ -352,18 +351,18 @@ class TestBatchNormChannelLast(unittest.TestCase): y.backward() y2.backward() - assert np.allclose(y.numpy().flatten(), - y2.numpy().flatten(), - atol=1e-5, - rtol=1e-5) - assert np.allclose(bn1d.weight.grad.numpy().flatten(), - bn2d.weight.grad.numpy().flatten(), - atol=1e-5, - rtol=1e-5) + assert np.allclose( + y.numpy().flatten(), y2.numpy().flatten(), atol=1e-5, rtol=1e-5 + ) + assert np.allclose( + bn1d.weight.grad.numpy().flatten(), + bn2d.weight.grad.numpy().flatten(), + atol=1e-5, + rtol=1e-5, + ) class TestBatchNormUseGlobalStats(unittest.TestCase): - def setUp(self): self.places = [fluid.CPUPlace()] if core.is_compiled_with_cuda(): @@ -382,11 +381,14 @@ class TestBatchNormUseGlobalStats(unittest.TestCase): net1 = paddle.fluid.dygraph.BatchNorm( 6, param_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(1.0)), + initializer=fluid.initializer.Constant(1.0) + ), use_global_stats=self.use_global_stats, - trainable_statistics=self.trainable_statistics) + trainable_statistics=self.trainable_statistics, + ) net2 = paddle.nn.BatchNorm2D( - 6, use_global_stats=self.use_global_stats) + 6, use_global_stats=self.use_global_stats + ) net2.weight = net1.weight net2.bias = net1.bias if self.trainable_statistics == True: @@ -420,5 +422,6 @@ class TestBatchNormUseGlobalStatsCase3(TestBatchNormUseGlobalStats): if __name__ == '__main__': import paddle + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_batch_sampler.py b/python/paddle/fluid/tests/unittests/test_batch_sampler.py index 833d96e8e3c4d7ae3325ea080cfdfe642bebb86a..a848898c53f89bf6ad35a36080ff224aa222ec34 100644 --- a/python/paddle/fluid/tests/unittests/test_batch_sampler.py +++ b/python/paddle/fluid/tests/unittests/test_batch_sampler.py @@ -15,14 +15,19 @@ import unittest import numpy as np -from paddle.io import BatchSampler, Dataset, Sampler, SequenceSampler, \ - RandomSampler, WeightedRandomSampler +from paddle.io import ( + BatchSampler, + Dataset, + Sampler, + SequenceSampler, + RandomSampler, + WeightedRandomSampler, +) IMAGE_SIZE = 32 class RandomDataset(Dataset): - def __init__(self, sample_num, class_num): self.sample_num = sample_num self.class_num = class_num @@ -30,7 +35,7 @@ class RandomDataset(Dataset): def __getitem__(self, idx): np.random.seed(idx) image = np.random.random([IMAGE_SIZE]).astype('float32') - label = np.random.randint(0, self.class_num - 1, (1, )).astype('int64') + label = np.random.randint(0, self.class_num - 1, (1,)).astype('int64') return image, label def __len__(self): @@ -38,7 +43,6 @@ class RandomDataset(Dataset): class TestSampler(unittest.TestCase): - def test_main(self): dataset = RandomDataset(100, 10) sampler = Sampler(dataset) @@ -50,7 +54,6 @@ class TestSampler(unittest.TestCase): class TestSequenceSampler(unittest.TestCase): - def test_main(self): dataset = RandomDataset(100, 10) sampler = SequenceSampler(dataset) @@ -61,7 +64,6 @@ class TestSequenceSampler(unittest.TestCase): class TestRandomSampler(unittest.TestCase): - def test_main(self): dataset = RandomDataset(100, 10) sampler = RandomSampler(dataset) @@ -96,10 +98,9 @@ class TestRandomSampler(unittest.TestCase): def test_with_generator_num_samples(self): dataset = RandomDataset(100, 10) generator = iter(range(0, 60)) - sampler = RandomSampler(dataset, - generator=generator, - num_samples=50, - replacement=True) + sampler = RandomSampler( + dataset, generator=generator, num_samples=50, replacement=True + ) assert len(sampler) == 50 rets = [] @@ -109,7 +110,6 @@ class TestRandomSampler(unittest.TestCase): class TestBatchSampler(unittest.TestCase): - def setUp(self): self.num_samples = 1000 self.num_classes = 10 @@ -119,17 +119,20 @@ class TestBatchSampler(unittest.TestCase): def init_batch_sampler(self): dataset = RandomDataset(self.num_samples, self.num_classes) - bs = BatchSampler(dataset=dataset, - batch_size=self.batch_size, - shuffle=self.shuffle, - drop_last=self.drop_last) + bs = BatchSampler( + dataset=dataset, + batch_size=self.batch_size, + shuffle=self.shuffle, + drop_last=self.drop_last, + ) return bs def test_main(self): bs = self.init_batch_sampler() # length check - bs_len = (self.num_samples + int(not self.drop_last) \ - * (self.batch_size - 1)) // self.batch_size + bs_len = ( + self.num_samples + int(not self.drop_last) * (self.batch_size - 1) + ) // self.batch_size self.assertTrue(bs_len == len(bs)) # output indices check @@ -142,7 +145,6 @@ class TestBatchSampler(unittest.TestCase): class TestBatchSamplerDropLast(TestBatchSampler): - def setUp(self): self.num_samples = 1000 self.num_classes = 10 @@ -152,7 +154,6 @@ class TestBatchSamplerDropLast(TestBatchSampler): class TestBatchSamplerShuffle(TestBatchSampler): - def setUp(self): self.num_samples = 1000 self.num_classes = 10 @@ -162,18 +163,18 @@ class TestBatchSamplerShuffle(TestBatchSampler): class TestBatchSamplerWithSampler(TestBatchSampler): - def init_batch_sampler(self): dataset = RandomDataset(1000, 10) sampler = SequenceSampler(dataset) - bs = BatchSampler(sampler=sampler, - batch_size=self.batch_size, - drop_last=self.drop_last) + bs = BatchSampler( + sampler=sampler, + batch_size=self.batch_size, + drop_last=self.drop_last, + ) return bs class TestBatchSamplerWithSamplerDropLast(unittest.TestCase): - def setUp(self): self.num_samples = 1000 self.num_classes = 10 @@ -183,7 +184,6 @@ class TestBatchSamplerWithSamplerDropLast(unittest.TestCase): class TestBatchSamplerWithSamplerShuffle(unittest.TestCase): - def setUp(self): self.num_samples = 1000 self.num_classes = 10 @@ -195,20 +195,21 @@ class TestBatchSamplerWithSamplerShuffle(unittest.TestCase): try: dataset = RandomDataset(self.num_samples, self.num_classes) sampler = RandomSampler(dataset) - bs = BatchSampler(sampler=sampler, - shuffle=self.shuffle, - batch_size=self.batch_size, - drop_last=self.drop_last) + bs = BatchSampler( + sampler=sampler, + shuffle=self.shuffle, + batch_size=self.batch_size, + drop_last=self.drop_last, + ) self.assertTrue(False) except AssertionError: pass class TestWeightedRandomSampler(unittest.TestCase): - def init_probs(self, total, pos): - pos_probs = np.random.random((pos, )).astype('float32') - probs = np.zeros((total, )).astype('float32') + pos_probs = np.random.random((pos,)).astype('float32') + probs = np.zeros((total,)).astype('float32') probs[:pos] = pos_probs np.random.shuffle(probs) return probs @@ -218,7 +219,7 @@ class TestWeightedRandomSampler(unittest.TestCase): sampler = WeightedRandomSampler(probs, 30, True) assert len(sampler) == 30 for idx in iter(sampler): - assert probs[idx] > 0. + assert probs[idx] > 0.0 def test_no_replacement(self): probs = self.init_probs(20, 10) @@ -226,13 +227,13 @@ class TestWeightedRandomSampler(unittest.TestCase): assert len(sampler) == 10 idxs = [] for idx in iter(sampler): - assert probs[idx] > 0. + assert probs[idx] > 0.0 idxs.append(idx) assert len(set(idxs)) == len(idxs) def test_assert(self): # all zeros - probs = np.zeros((10, )).astype('float32') + probs = np.zeros((10,)).astype('float32') sampler = WeightedRandomSampler(probs, 10, True) try: for idx in iter(sampler): @@ -252,7 +253,7 @@ class TestWeightedRandomSampler(unittest.TestCase): self.assertTrue(True) # neg probs - probs = -1.0 * np.ones((10, )).astype('float32') + probs = -1.0 * np.ones((10,)).astype('float32') sampler = WeightedRandomSampler(probs, 10, True) try: for idx in iter(sampler): diff --git a/python/paddle/fluid/tests/unittests/test_bce_loss.py b/python/paddle/fluid/tests/unittests/test_bce_loss.py index 417f4795c4e7e49ea84b8d976bee84a366b191fc..c77196c1d0d94ef1ad8ecd4e4b414b30997a89d3 100644 --- a/python/paddle/fluid/tests/unittests/test_bce_loss.py +++ b/python/paddle/fluid/tests/unittests/test_bce_loss.py @@ -19,88 +19,76 @@ import unittest from op_test import OpTest -def test_static_layer(place, - input_np, - label_np, - reduction='mean', - weight_np=None): +def test_static_layer( + place, input_np, label_np, reduction='mean', weight_np=None +): prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - input = paddle.fluid.data(name='input', - shape=input_np.shape, - dtype='float64') - label = paddle.fluid.data(name='label', - shape=label_np.shape, - dtype='float64') + input = paddle.fluid.data( + name='input', shape=input_np.shape, dtype='float64' + ) + label = paddle.fluid.data( + name='label', shape=label_np.shape, dtype='float64' + ) if weight_np is not None: - weight = paddle.fluid.data(name='weight', - shape=weight_np.shape, - dtype='float64') - bce_loss = paddle.nn.loss.BCELoss(weight=weight, - reduction=reduction) + weight = paddle.fluid.data( + name='weight', shape=weight_np.shape, dtype='float64' + ) + bce_loss = paddle.nn.loss.BCELoss( + weight=weight, reduction=reduction + ) else: bce_loss = paddle.nn.loss.BCELoss(reduction=reduction) res = bce_loss(input, label) exe = paddle.static.Executor(place) - static_result, = exe.run(prog, - feed={ - "input": input_np, - "label": label_np - } if weight_np is None else { - "input": input_np, - "label": label_np, - "weight": weight_np - }, - fetch_list=[res]) + (static_result,) = exe.run( + prog, + feed={"input": input_np, "label": label_np} + if weight_np is None + else {"input": input_np, "label": label_np, "weight": weight_np}, + fetch_list=[res], + ) return static_result -def test_static_functional(place, - input_np, - label_np, - reduction='mean', - weight_np=None): +def test_static_functional( + place, input_np, label_np, reduction='mean', weight_np=None +): prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - input = paddle.fluid.data(name='input', - shape=input_np.shape, - dtype='float64') - label = paddle.fluid.data(name='label', - shape=label_np.shape, - dtype='float64') + input = paddle.fluid.data( + name='input', shape=input_np.shape, dtype='float64' + ) + label = paddle.fluid.data( + name='label', shape=label_np.shape, dtype='float64' + ) if weight_np is not None: - weight = paddle.fluid.data(name='weight', - shape=weight_np.shape, - dtype='float64') - res = paddle.nn.functional.binary_cross_entropy(input, - label, - weight=weight, - reduction=reduction) + weight = paddle.fluid.data( + name='weight', shape=weight_np.shape, dtype='float64' + ) + res = paddle.nn.functional.binary_cross_entropy( + input, label, weight=weight, reduction=reduction + ) else: - res = paddle.nn.functional.binary_cross_entropy(input, - label, - reduction=reduction) + res = paddle.nn.functional.binary_cross_entropy( + input, label, reduction=reduction + ) exe = paddle.static.Executor(place) - static_result, = exe.run(prog, - feed={ - "input": input_np, - "label": label_np - } if weight_np is None else { - "input": input_np, - "label": label_np, - "weight": weight_np - }, - fetch_list=[res]) + (static_result,) = exe.run( + prog, + feed={"input": input_np, "label": label_np} + if weight_np is None + else {"input": input_np, "label": label_np, "weight": weight_np}, + fetch_list=[res], + ) return static_result -def test_dygraph_layer(place, - input_np, - label_np, - reduction='mean', - weight_np=None): +def test_dygraph_layer( + place, input_np, label_np, reduction='mean', weight_np=None +): paddle.disable_static() if weight_np is not None: weight = paddle.to_tensor(weight_np) @@ -113,25 +101,22 @@ def test_dygraph_layer(place, return dy_result -def test_dygraph_functional(place, - input_np, - label_np, - reduction='mean', - weight_np=None): +def test_dygraph_functional( + place, input_np, label_np, reduction='mean', weight_np=None +): paddle.disable_static() input = paddle.to_tensor(input_np) label = paddle.to_tensor(label_np) if weight_np is not None: weight = paddle.to_tensor(weight_np) - dy_res = paddle.nn.functional.binary_cross_entropy(input, - label, - weight=weight, - reduction=reduction) + dy_res = paddle.nn.functional.binary_cross_entropy( + input, label, weight=weight, reduction=reduction + ) else: - dy_res = paddle.nn.functional.binary_cross_entropy(input, - label, - reduction=reduction) + dy_res = paddle.nn.functional.binary_cross_entropy( + input, label, reduction=reduction + ) dy_result = dy_res.numpy() paddle.enable_static() return dy_result @@ -139,11 +124,19 @@ def test_dygraph_functional(place, def calc_bceloss(input_np, label_np, reduction='mean', weight_np=None): if weight_np is None: - expected = -1 * (label_np * np.log(input_np) + - (1. - label_np) * np.log(1. - input_np)) + expected = -1 * ( + label_np * np.log(input_np) + + (1.0 - label_np) * np.log(1.0 - input_np) + ) else: - expected = -1 * weight_np * (label_np * np.log(input_np) + - (1. - label_np) * np.log(1. - input_np)) + expected = ( + -1 + * weight_np + * ( + label_np * np.log(input_np) + + (1.0 - label_np) * np.log(1.0 - input_np) + ) + ) if reduction == 'mean': expected = np.mean(expected) @@ -156,7 +149,6 @@ def calc_bceloss(input_np, label_np, reduction='mean', weight_np=None): class TestBCELoss(unittest.TestCase): - def test_BCELoss(self): input_np = np.random.uniform(0.1, 0.8, size=(20, 30)).astype(np.float64) label_np = np.random.randint(0, 2, size=(20, 30)).astype(np.float64) @@ -166,89 +158,90 @@ class TestBCELoss(unittest.TestCase): reductions = ['sum', 'mean', 'none'] for place in places: for reduction in reductions: - static_result = test_static_layer(place, input_np, label_np, - reduction) - dy_result = test_dygraph_layer(place, input_np, label_np, - reduction) + static_result = test_static_layer( + place, input_np, label_np, reduction + ) + dy_result = test_dygraph_layer( + place, input_np, label_np, reduction + ) expected = calc_bceloss(input_np, label_np, reduction) np.testing.assert_allclose(static_result, expected, rtol=1e-05) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05) np.testing.assert_allclose(dy_result, expected, rtol=1e-05) static_functional = test_static_functional( - place, input_np, label_np, reduction) - dy_functional = test_dygraph_functional(place, input_np, - label_np, reduction) - np.testing.assert_allclose(static_functional, - expected, - rtol=1e-05) - np.testing.assert_allclose(static_functional, - dy_functional, - rtol=1e-05) + place, input_np, label_np, reduction + ) + dy_functional = test_dygraph_functional( + place, input_np, label_np, reduction + ) + np.testing.assert_allclose( + static_functional, expected, rtol=1e-05 + ) + np.testing.assert_allclose( + static_functional, dy_functional, rtol=1e-05 + ) np.testing.assert_allclose(dy_functional, expected, rtol=1e-05) def test_BCELoss_weight(self): - input_np = np.random.uniform(0.1, 0.8, - size=(2, 3, 4, 10)).astype(np.float64) - label_np = np.random.randint(0, 2, - size=(2, 3, 4, 10)).astype(np.float64) + input_np = np.random.uniform(0.1, 0.8, size=(2, 3, 4, 10)).astype( + np.float64 + ) + label_np = np.random.randint(0, 2, size=(2, 3, 4, 10)).astype( + np.float64 + ) weight_np = np.random.random(size=(3, 4, 10)).astype(np.float64) - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) for reduction in ['sum', 'mean', 'none']: - static_result = test_static_layer(place, - input_np, - label_np, - reduction, - weight_np=weight_np) - dy_result = test_dygraph_layer(place, - input_np, - label_np, - reduction, - weight_np=weight_np) - expected = calc_bceloss(input_np, - label_np, - reduction, - weight_np=weight_np) + static_result = test_static_layer( + place, input_np, label_np, reduction, weight_np=weight_np + ) + dy_result = test_dygraph_layer( + place, input_np, label_np, reduction, weight_np=weight_np + ) + expected = calc_bceloss( + input_np, label_np, reduction, weight_np=weight_np + ) np.testing.assert_allclose(static_result, expected, rtol=1e-05) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05) np.testing.assert_allclose(dy_result, expected, rtol=1e-05) - static_functional = test_static_functional(place, - input_np, - label_np, - reduction, - weight_np=weight_np) - dy_functional = test_dygraph_functional(place, - input_np, - label_np, - reduction, - weight_np=weight_np) + static_functional = test_static_functional( + place, input_np, label_np, reduction, weight_np=weight_np + ) + dy_functional = test_dygraph_functional( + place, input_np, label_np, reduction, weight_np=weight_np + ) np.testing.assert_allclose(static_functional, expected, rtol=1e-05) - np.testing.assert_allclose(static_functional, - dy_functional, - rtol=1e-05) + np.testing.assert_allclose( + static_functional, dy_functional, rtol=1e-05 + ) np.testing.assert_allclose(dy_functional, expected, rtol=1e-05) def test_BCELoss_error(self): paddle.disable_static() - self.assertRaises(ValueError, - paddle.nn.loss.BCELoss, - reduction="unsupport reduction") + self.assertRaises( + ValueError, paddle.nn.loss.BCELoss, reduction="unsupport reduction" + ) input = paddle.to_tensor([[0.1, 0.3]], dtype='float32') label = paddle.to_tensor([[0.0, 1.0]], dtype='float32') - self.assertRaises(ValueError, - paddle.nn.functional.binary_cross_entropy, - input=input, - label=label, - reduction="unsupport reduction") + self.assertRaises( + ValueError, + paddle.nn.functional.binary_cross_entropy, + input=input, + label=label, + reduction="unsupport reduction", + ) paddle.enable_static() def bce_loss(input, label): - return -1 * (label * np.log(input) + (1. - label) * np.log(1. - input)) + return -1 * (label * np.log(input) + (1.0 - label) * np.log(1.0 - input)) class TestBceLossOp(OpTest): - def setUp(self): self.init_test_case() self.op_type = "bce_loss" @@ -270,13 +263,11 @@ class TestBceLossOp(OpTest): class TestBceLossOpCase1(OpTest): - def init_test_cast(self): self.shape = [2, 3, 4, 5] class TestBceLossOpCase2(OpTest): - def init_test_cast(self): self.shape = [2, 3, 20] diff --git a/python/paddle/fluid/tests/unittests/test_bce_with_logits_loss.py b/python/paddle/fluid/tests/unittests/test_bce_with_logits_loss.py index 55da8aea9b560d9e7b80ee86a23032373fb57e82..8d921215bfa322e7a19eecb05fd2b1c2fc1c5098 100644 --- a/python/paddle/fluid/tests/unittests/test_bce_with_logits_loss.py +++ b/python/paddle/fluid/tests/unittests/test_bce_with_logits_loss.py @@ -19,76 +19,78 @@ import unittest from paddle.fluid.framework import _test_eager_guard -def call_bce_layer(logit, - label, - weight=None, - reduction='mean', - pos_weight=None): - bce_logit_loss = paddle.nn.loss.BCEWithLogitsLoss(weight=weight, - reduction=reduction, - pos_weight=pos_weight) +def call_bce_layer( + logit, label, weight=None, reduction='mean', pos_weight=None +): + bce_logit_loss = paddle.nn.loss.BCEWithLogitsLoss( + weight=weight, reduction=reduction, pos_weight=pos_weight + ) res = bce_logit_loss(logit, label) return res -def call_bce_functional(logit, - label, - weight=None, - reduction='mean', - pos_weight=None): +def call_bce_functional( + logit, label, weight=None, reduction='mean', pos_weight=None +): res = paddle.nn.functional.binary_cross_entropy_with_logits( - logit, label, weight=weight, reduction=reduction, pos_weight=pos_weight) + logit, label, weight=weight, reduction=reduction, pos_weight=pos_weight + ) return res -def test_static(place, - logit_np, - label_np, - weight_np=None, - reduction='mean', - pos_weight_np=None, - functional=False): +def test_static( + place, + logit_np, + label_np, + weight_np=None, + reduction='mean', + pos_weight_np=None, + functional=False, +): paddle.enable_static() prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - logit = paddle.fluid.data(name='logit', - shape=logit_np.shape, - dtype='float64') - label = paddle.fluid.data(name='label', - shape=label_np.shape, - dtype='float64') + logit = paddle.fluid.data( + name='logit', shape=logit_np.shape, dtype='float64' + ) + label = paddle.fluid.data( + name='label', shape=label_np.shape, dtype='float64' + ) feed_dict = {"logit": logit_np, "label": label_np} pos_weight = None weight = None if pos_weight_np is not None: - pos_weight = paddle.fluid.data(name='pos_weight', - shape=pos_weight_np.shape, - dtype='float64') + pos_weight = paddle.fluid.data( + name='pos_weight', shape=pos_weight_np.shape, dtype='float64' + ) feed_dict["pos_weight"] = pos_weight_np if weight_np is not None: - weight = paddle.fluid.data(name='weight', - shape=weight_np.shape, - dtype='float64') + weight = paddle.fluid.data( + name='weight', shape=weight_np.shape, dtype='float64' + ) feed_dict["weight"] = weight_np if functional: - res = call_bce_functional(logit, label, weight, reduction, - pos_weight) + res = call_bce_functional( + logit, label, weight, reduction, pos_weight + ) else: res = call_bce_layer(logit, label, weight, reduction, pos_weight) exe = paddle.static.Executor(place) - static_result, = exe.run(prog, feed=feed_dict, fetch_list=[res]) + (static_result,) = exe.run(prog, feed=feed_dict, fetch_list=[res]) return static_result -def test_dygraph(place, - logit_np, - label_np, - weight_np=None, - reduction='mean', - pos_weight_np=None, - functional=False): +def test_dygraph( + place, + logit_np, + label_np, + weight_np=None, + reduction='mean', + pos_weight_np=None, + functional=False, +): with paddle.fluid.dygraph.base.guard(): logit = paddle.to_tensor(logit_np) label = paddle.to_tensor(label_np) @@ -99,22 +101,23 @@ def test_dygraph(place, if pos_weight_np is not None: pos_weight = paddle.to_tensor(pos_weight_np) if functional: - dy_res = call_bce_functional(logit, label, weight, reduction, - pos_weight) + dy_res = call_bce_functional( + logit, label, weight, reduction, pos_weight + ) else: dy_res = call_bce_layer(logit, label, weight, reduction, pos_weight) dy_result = dy_res.numpy() return dy_result -def calc_bce_with_logits_loss(logit_np, - label_np, - reduction='mean', - weight_np=None, - pos_weight=None): - expected = np.maximum( - logit_np, - 0) - logit_np * label_np + np.log(1 + np.exp(-np.abs(logit_np))) +def calc_bce_with_logits_loss( + logit_np, label_np, reduction='mean', weight_np=None, pos_weight=None +): + expected = ( + np.maximum(logit_np, 0) + - logit_np * label_np + + np.log(1 + np.exp(-np.abs(logit_np))) + ) if pos_weight is not None: expected = expected * ((pos_weight - 1) * label_np + 1) if weight_np is not None: @@ -131,7 +134,6 @@ def calc_bce_with_logits_loss(logit_np, class TestBCEWithLogitsLoss(unittest.TestCase): - def test_BCEWithLogitsLoss(self): logit_np = np.random.uniform(0.1, 0.8, size=(20, 30)).astype(np.float64) label_np = np.random.randint(0, 2, size=(20, 30)).astype(np.float64) @@ -141,141 +143,174 @@ class TestBCEWithLogitsLoss(unittest.TestCase): reductions = ['sum', 'mean', 'none'] for place in places: for reduction in reductions: - static_result = test_static(place, - logit_np, - label_np, - reduction=reduction) - dy_result = test_dygraph(place, - logit_np, - label_np, - reduction=reduction) - expected = calc_bce_with_logits_loss(logit_np, label_np, - reduction) + static_result = test_static( + place, logit_np, label_np, reduction=reduction + ) + dy_result = test_dygraph( + place, logit_np, label_np, reduction=reduction + ) + expected = calc_bce_with_logits_loss( + logit_np, label_np, reduction + ) np.testing.assert_allclose(static_result, expected, rtol=1e-05) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05) np.testing.assert_allclose(dy_result, expected, rtol=1e-05) - static_functional = test_static(place, - logit_np, - label_np, - reduction=reduction, - functional=True) - dy_functional = test_dygraph(place, - logit_np, - label_np, - reduction=reduction, - functional=True) + static_functional = test_static( + place, + logit_np, + label_np, + reduction=reduction, + functional=True, + ) + dy_functional = test_dygraph( + place, + logit_np, + label_np, + reduction=reduction, + functional=True, + ) with _test_eager_guard(): - eager_functional = test_dygraph(place, - logit_np, - label_np, - reduction=reduction, - functional=True) + eager_functional = test_dygraph( + place, + logit_np, + label_np, + reduction=reduction, + functional=True, + ) - np.testing.assert_allclose(static_functional, - expected, - rtol=1e-05) - np.testing.assert_allclose(static_functional, - dy_functional, - rtol=1e-05) + np.testing.assert_allclose( + static_functional, expected, rtol=1e-05 + ) + np.testing.assert_allclose( + static_functional, dy_functional, rtol=1e-05 + ) np.testing.assert_allclose(dy_functional, expected, rtol=1e-05) - np.testing.assert_allclose(eager_functional, - expected, - rtol=1e-05) + np.testing.assert_allclose( + eager_functional, expected, rtol=1e-05 + ) def test_BCEWithLogitsLoss_weight(self): - logit_np = np.random.uniform(0.1, 0.8, - size=(2, 3, 4, 10)).astype(np.float64) - label_np = np.random.randint(0, 2, - size=(2, 3, 4, 10)).astype(np.float64) + logit_np = np.random.uniform(0.1, 0.8, size=(2, 3, 4, 10)).astype( + np.float64 + ) + label_np = np.random.randint(0, 2, size=(2, 3, 4, 10)).astype( + np.float64 + ) weight_np = np.random.random(size=(2, 3, 4, 10)).astype(np.float64) - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) for reduction in ['sum', 'mean', 'none']: - static_result = test_static(place, - logit_np, - label_np, - weight_np=weight_np, - reduction=reduction) - dy_result = test_dygraph(place, - logit_np, - label_np, - weight_np=weight_np, - reduction=reduction) - expected = calc_bce_with_logits_loss(logit_np, - label_np, - reduction, - weight_np=weight_np) + static_result = test_static( + place, + logit_np, + label_np, + weight_np=weight_np, + reduction=reduction, + ) + dy_result = test_dygraph( + place, + logit_np, + label_np, + weight_np=weight_np, + reduction=reduction, + ) + expected = calc_bce_with_logits_loss( + logit_np, label_np, reduction, weight_np=weight_np + ) np.testing.assert_allclose(static_result, expected, rtol=1e-05) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05) np.testing.assert_allclose(dy_result, expected, rtol=1e-05) - static_functional = test_static(place, - logit_np, - label_np, - weight_np=weight_np, - reduction=reduction, - functional=True) - dy_functional = test_dygraph(place, - logit_np, - label_np, - weight_np=weight_np, - reduction=reduction, - functional=True) + static_functional = test_static( + place, + logit_np, + label_np, + weight_np=weight_np, + reduction=reduction, + functional=True, + ) + dy_functional = test_dygraph( + place, + logit_np, + label_np, + weight_np=weight_np, + reduction=reduction, + functional=True, + ) np.testing.assert_allclose(static_functional, expected, rtol=1e-05) - np.testing.assert_allclose(static_functional, - dy_functional, - rtol=1e-05) + np.testing.assert_allclose( + static_functional, dy_functional, rtol=1e-05 + ) np.testing.assert_allclose(dy_functional, expected, rtol=1e-05) def test_BCEWithLogitsLoss_pos_weight(self): - logit_np = np.random.uniform(0.1, 0.8, - size=(2, 3, 4, 10)).astype(np.float64) - label_np = np.random.randint(0, 2, - size=(2, 3, 4, 10)).astype(np.float64) + logit_np = np.random.uniform(0.1, 0.8, size=(2, 3, 4, 10)).astype( + np.float64 + ) + label_np = np.random.randint(0, 2, size=(2, 3, 4, 10)).astype( + np.float64 + ) pos_weight_np = np.random.random(size=(3, 4, 10)).astype(np.float64) weight_np = np.random.random(size=(2, 3, 4, 10)).astype(np.float64) - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) reduction = "mean" - static_result = test_static(place, logit_np, label_np, weight_np, - reduction, pos_weight_np) - dy_result = test_dygraph(place, logit_np, label_np, weight_np, - reduction, pos_weight_np) - expected = calc_bce_with_logits_loss(logit_np, label_np, reduction, - weight_np, pos_weight_np) + static_result = test_static( + place, logit_np, label_np, weight_np, reduction, pos_weight_np + ) + dy_result = test_dygraph( + place, logit_np, label_np, weight_np, reduction, pos_weight_np + ) + expected = calc_bce_with_logits_loss( + logit_np, label_np, reduction, weight_np, pos_weight_np + ) np.testing.assert_allclose(static_result, expected, rtol=1e-05) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05) np.testing.assert_allclose(dy_result, expected, rtol=1e-05) - static_functional = test_static(place, - logit_np, - label_np, - weight_np, - reduction, - pos_weight_np, - functional=True) - dy_functional = test_dygraph(place, - logit_np, - label_np, - weight_np, - reduction, - pos_weight_np, - functional=True) + static_functional = test_static( + place, + logit_np, + label_np, + weight_np, + reduction, + pos_weight_np, + functional=True, + ) + dy_functional = test_dygraph( + place, + logit_np, + label_np, + weight_np, + reduction, + pos_weight_np, + functional=True, + ) np.testing.assert_allclose(static_functional, expected, rtol=1e-05) np.testing.assert_allclose(static_functional, dy_functional, rtol=1e-05) np.testing.assert_allclose(dy_functional, expected, rtol=1e-05) def test_BCEWithLogitsLoss_error(self): paddle.disable_static() - self.assertRaises(ValueError, - paddle.nn.BCEWithLogitsLoss, - reduction="unsupport reduction") + self.assertRaises( + ValueError, + paddle.nn.BCEWithLogitsLoss, + reduction="unsupport reduction", + ) logit = paddle.to_tensor([[0.1, 0.3]], dtype='float32') label = paddle.to_tensor([[0.0, 1.0]], dtype='float32') - self.assertRaises(ValueError, - paddle.nn.functional.binary_cross_entropy_with_logits, - logit=logit, - label=label, - reduction="unsupport reduction") + self.assertRaises( + ValueError, + paddle.nn.functional.binary_cross_entropy_with_logits, + logit=logit, + label=label, + reduction="unsupport reduction", + ) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_beam_search_decode_op.py b/python/paddle/fluid/tests/unittests/test_beam_search_decode_op.py index c2a379f9982ca7e4ea82696c114fb58b3efc26f7..ee1e2fd64585f667d8b683fbf913bfb4a1df3ad1 100644 --- a/python/paddle/fluid/tests/unittests/test_beam_search_decode_op.py +++ b/python/paddle/fluid/tests/unittests/test_beam_search_decode_op.py @@ -41,28 +41,41 @@ class TestBeamSearchDecodeOp(unittest.TestCase): # beam_size = 2, end_id = 1 # start with start_id [ - self.append_lod_tensor(array, [[0, 1, 2], [0, 1, 2]], - np.array([0, 0], dtype=dtype)) + self.append_lod_tensor( + array, [[0, 1, 2], [0, 1, 2]], np.array([0, 0], dtype=dtype) + ) for array, dtype in ((ids, "int64"), (scores, "float32")) ] [ - self.append_lod_tensor(array, [[0, 1, 2], [0, 2, 4]], - np.array([2, 3, 4, 5], dtype=dtype)) + self.append_lod_tensor( + array, + [[0, 1, 2], [0, 2, 4]], + np.array([2, 3, 4, 5], dtype=dtype), + ) for array, dtype in ((ids, "int64"), (scores, "float32")) ] [ - self.append_lod_tensor(array, [[0, 2, 4], [0, 2, 2, 4, 4]], - np.array([3, 1, 5, 4], dtype=dtype)) + self.append_lod_tensor( + array, + [[0, 2, 4], [0, 2, 2, 4, 4]], + np.array([3, 1, 5, 4], dtype=dtype), + ) for array, dtype in ((ids, "int64"), (scores, "float32")) ] [ - self.append_lod_tensor(array, [[0, 2, 4], [0, 1, 2, 3, 4]], - np.array([1, 1, 3, 5], dtype=dtype)) + self.append_lod_tensor( + array, + [[0, 2, 4], [0, 1, 2, 3, 4]], + np.array([1, 1, 3, 5], dtype=dtype), + ) for array, dtype in ((ids, "int64"), (scores, "float32")) ] [ - self.append_lod_tensor(array, [[0, 2, 4], [0, 0, 0, 2, 2]], - np.array([5, 1], dtype=dtype)) + self.append_lod_tensor( + array, + [[0, 2, 4], [0, 0, 0, 2, 2]], + np.array([5, 1], dtype=dtype), + ) for array, dtype in ((ids, "int64"), (scores, "float32")) ] @@ -88,22 +101,22 @@ class TestBeamSearchDecodeOp(unittest.TestCase): self.assertEqual(sentence_scores.lod(), expected_lod) expected_data = np.array( - [0, 2, 3, 1, 0, 2, 1, 0, 4, 5, 3, 5, 0, 4, 5, 3, 1], "int64") + [0, 2, 3, 1, 0, 2, 1, 0, 4, 5, 3, 5, 0, 4, 5, 3, 1], "int64" + ) np.testing.assert_array_equal(np.array(sentence_ids), expected_data) np.testing.assert_array_equal(np.array(sentence_scores), expected_data) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestBeamSearchDecodeOpGPU(TestBeamSearchDecodeOp): - def setUp(self): self.scope = core.Scope() self.place = core.CUDAPlace(0) class TestBeamSearchDecodeOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): @@ -111,10 +124,9 @@ class TestBeamSearchDecodeOpError(unittest.TestCase): # the input pre_ids must be Variable test_ids = np.random.randint(1, 5, [5, 1]).astype("int64") scores = fluid.layers.create_array(dtype='float32') - fluid.layers.beam_search_decode(test_ids, - scores, - beam_size=5, - end_id=0) + fluid.layers.beam_search_decode( + test_ids, scores, beam_size=5, end_id=0 + ) self.assertRaises(TypeError, test_id_Variable) @@ -122,10 +134,9 @@ class TestBeamSearchDecodeOpError(unittest.TestCase): # the input pre_scores must be Variable ids = fluid.layers.create_array(dtype='int64') test_scores = np.random.uniform(1, 5, [5, 1]).astype("float32") - fluid.layers.beam_search_decode(ids, - test_scores, - beam_size=5, - end_id=0) + fluid.layers.beam_search_decode( + ids, test_scores, beam_size=5, end_id=0 + ) self.assertRaises(TypeError, test_score_Variable) @@ -133,10 +144,9 @@ class TestBeamSearchDecodeOpError(unittest.TestCase): # the dtype of input pre_ids must be int64 type_ids = fluid.layers.create_array(dtype='float32') scores = fluid.layers.create_array(dtype='float32') - fluid.layers.beam_search_decode(type_ids, - scores, - beam_size=5, - end_id=0) + fluid.layers.beam_search_decode( + type_ids, scores, beam_size=5, end_id=0 + ) self.assertRaises(TypeError, test_id_dtype) @@ -144,10 +154,9 @@ class TestBeamSearchDecodeOpError(unittest.TestCase): # the dtype of input pre_scores must be float32 ids = fluid.layers.create_array(dtype='int64') type_scores = fluid.layers.create_array(dtype='int64') - fluid.layers.beam_search_decode(ids, - type_scores, - beam_size=5, - end_id=0) + fluid.layers.beam_search_decode( + ids, type_scores, beam_size=5, end_id=0 + ) self.assertRaises(TypeError, test_score_dtype) diff --git a/python/paddle/fluid/tests/unittests/test_beam_search_op.py b/python/paddle/fluid/tests/unittests/test_beam_search_op.py index a274ae92d3b265cd62d9ab7819687a1db087b57f..7d99fad64229261a1694d3fb00343fb19ef4f044 100644 --- a/python/paddle/fluid/tests/unittests/test_beam_search_op.py +++ b/python/paddle/fluid/tests/unittests/test_beam_search_op.py @@ -41,32 +41,34 @@ class BeamSearchOpTester(unittest.TestCase): self.scope.var('parent_idx').get_tensor() def test_run(self): - op = Operator('beam_search', - pre_ids='pre_ids', - pre_scores='pre_scores', - ids='ids', - scores='scores', - selected_ids='selected_ids', - selected_scores='selected_scores', - parent_idx='parent_idx', - level=0, - beam_size=self.beam_size, - end_id=0, - is_accumulated=self.is_accumulated) + op = Operator( + 'beam_search', + pre_ids='pre_ids', + pre_scores='pre_scores', + ids='ids', + scores='scores', + selected_ids='selected_ids', + selected_scores='selected_scores', + parent_idx='parent_idx', + level=0, + beam_size=self.beam_size, + end_id=0, + is_accumulated=self.is_accumulated, + ) op.run(self.scope, core.CPUPlace()) selected_ids = self.scope.find_var("selected_ids").get_tensor() selected_scores = self.scope.find_var("selected_scores").get_tensor() parent_idx = self.scope.find_var("parent_idx").get_tensor() - np.testing.assert_allclose(np.array(selected_ids), - self.output_ids, - rtol=1e-05) - np.testing.assert_allclose(np.array(selected_scores), - self.output_scores, - rtol=1e-05) + np.testing.assert_allclose( + np.array(selected_ids), self.output_ids, rtol=1e-05 + ) + np.testing.assert_allclose( + np.array(selected_scores), self.output_scores, rtol=1e-05 + ) self.assertEqual(selected_ids.lod(), self.output_lod) - np.testing.assert_allclose(np.array(parent_idx), - self.output_parent_idx, - rtol=1e-05) + np.testing.assert_allclose( + np.array(parent_idx), self.output_parent_idx, rtol=1e-05 + ) def _create_pre_ids(self): np_data = np.array([[1, 2, 3, 4]], dtype='int64') @@ -78,19 +80,22 @@ class BeamSearchOpTester(unittest.TestCase): def _create_ids(self): self.lod = [[0, 2, 4], [0, 1, 2, 3, 4]] - np_data = np.array([[4, 2, 5], [2, 1, 3], [3, 5, 2], [8, 2, 1]], - dtype='int64') + np_data = np.array( + [[4, 2, 5], [2, 1, 3], [3, 5, 2], [8, 2, 1]], dtype='int64' + ) tensor = create_tensor(self.scope, "ids", np_data) tensor.set_lod(self.lod) def _create_scores(self): - np_data = np.array([ - [0.5, 0.3, 0.2], - [0.6, 0.3, 0.1], - [0.9, 0.5, 0.1], - [0.7, 0.5, 0.1], - ], - dtype='float32') + np_data = np.array( + [ + [0.5, 0.3, 0.2], + [0.6, 0.3, 0.1], + [0.9, 0.5, 0.1], + [0.7, 0.5, 0.1], + ], + dtype='float32', + ) tensor = create_tensor(self.scope, "scores", np_data) tensor.set_lod(self.lod) @@ -104,7 +109,6 @@ class BeamSearchOpTester(unittest.TestCase): class BeamSearchOpTester2(BeamSearchOpTester): - def _create_pre_ids(self): np_data = np.array([[1], [2], [3], [4]], dtype='int64') tensor = create_tensor(self.scope, 'pre_ids', np_data) @@ -120,13 +124,15 @@ class BeamSearchOpTester2(BeamSearchOpTester): tensor.set_lod(self.lod) def _create_scores(self): - np_data = np.array([ - [0.6, 0.9], - [0.5, 0.3], - [0.9, 0.5], - [0.1, 0.7], - ], - dtype='float32') + np_data = np.array( + [ + [0.6, 0.9], + [0.5, 0.3], + [0.9, 0.5], + [0.1, 0.7], + ], + dtype='float32', + ) tensor = create_tensor(self.scope, "scores", np_data) tensor.set_lod(self.lod) @@ -156,13 +162,15 @@ class BeamSearchOpTester3(BeamSearchOpTester): tensor.set_lod(self.lod) def _create_scores(self): - np_data = np.array([ - [0.6, 0.9], - [0.5, 0.3], - [0.9, 0.5], - [0.6, 0.7], - ], - dtype='float32') + np_data = np.array( + [ + [0.6, 0.9], + [0.5, 0.3], + [0.9, 0.5], + [0.6, 0.7], + ], + dtype='float32', + ) tensor = create_tensor(self.scope, "scores", np_data) tensor.set_lod(self.lod) @@ -192,13 +200,15 @@ class BeamSearchOpTester4(BeamSearchOpTester): tensor.set_lod(self.lod) def _create_scores(self): - np_data = np.array([ - [0.6, 0.9], - [0.5, 0.3], - [0.9, 0.5], - [0.6, 0.7], - ], - dtype='float32') + np_data = np.array( + [ + [0.6, 0.9], + [0.5, 0.3], + [0.9, 0.5], + [0.6, 0.7], + ], + dtype='float32', + ) tensor = create_tensor(self.scope, "scores", np_data) tensor.set_lod(self.lod) @@ -228,13 +238,15 @@ class BeamSearchOpTester5(BeamSearchOpTester): tensor.set_lod(self.lod) def _create_scores(self): - np_data = np.array([ - [0.6, 0.9], - [0.5, 0.3], - [0.9, 0.5], - [0.1, 0.7], - ], - dtype='float32') + np_data = np.array( + [ + [0.6, 0.9], + [0.5, 0.3], + [0.9, 0.5], + [0.1, 0.7], + ], + dtype='float32', + ) tensor = create_tensor(self.scope, "scores", np_data) tensor.set_lod(self.lod) @@ -242,8 +254,9 @@ class BeamSearchOpTester5(BeamSearchOpTester): self.beam_size = 2 self.is_accumulated = False self.output_ids = np.array([7, 3, 3, 1])[:, np.newaxis] - self.output_scores = np.array([1.50685, 0.996027, 0.194639, - 0.043325])[:, np.newaxis] + self.output_scores = np.array([1.50685, 0.996027, 0.194639, 0.043325])[ + :, np.newaxis + ] self.output_lod = [[0, 2, 4], [0, 0, 2, 3, 4]] self.output_parent_idx = np.array([1, 1, 2, 3]) @@ -265,13 +278,15 @@ class BeamSearchOpTester6(BeamSearchOpTester): tensor.set_lod(self.lod) def _create_scores(self): - np_data = np.array([ - [0.6, 0.9], - [0.5, 0.3], - [0.9, 0.5], - [0.1, 0.7], - ], - dtype='float32') + np_data = np.array( + [ + [0.6, 0.9], + [0.5, 0.3], + [0.9, 0.5], + [0.1, 0.7], + ], + dtype='float32', + ) tensor = create_tensor(self.scope, "scores", np_data) tensor.set_lod(self.lod) @@ -285,100 +300,115 @@ class BeamSearchOpTester6(BeamSearchOpTester): class TestBeamSearchOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): - pre_ids = fluid.data(name='pre_id', - shape=[1], - lod_level=2, - dtype='int64') - pre_scores = fluid.data(name='pre_scores', - shape=[1], - lod_level=2, - dtype='float32') + pre_ids = fluid.data( + name='pre_id', shape=[1], lod_level=2, dtype='int64' + ) + pre_scores = fluid.data( + name='pre_scores', shape=[1], lod_level=2, dtype='float32' + ) probs = fluid.data(name='probs', shape=[10000], dtype='float32') topk_scores, topk_indices = fluid.layers.topk(probs, k=4) accu_scores = fluid.layers.elementwise_add( x=fluid.layers.log(x=topk_scores), y=fluid.layers.reshape(pre_scores, shape=[-1]), - axis=0) + axis=0, + ) def test_preids_Variable(): # the input pre_ids must be Variable preids_data = np.random.randint(1, 5, [5, 1]).astype("int64") - fluid.layers.beam_search(pre_ids=preids_data, - pre_scores=pre_scores, - ids=topk_indices, - scores=accu_scores, - beam_size=4, - end_id=1) + fluid.layers.beam_search( + pre_ids=preids_data, + pre_scores=pre_scores, + ids=topk_indices, + scores=accu_scores, + beam_size=4, + end_id=1, + ) self.assertRaises(TypeError, test_preids_Variable) def test_prescores_Variable(): # the input pre_scores must be Variable - prescores_data = np.random.uniform(1, 5, - [5, 1]).astype("float32") - fluid.layers.beam_search(pre_ids=pre_ids, - pre_scores=prescores_data, - ids=topk_indices, - scores=accu_scores, - beam_size=4, - end_id=1) + prescores_data = np.random.uniform(1, 5, [5, 1]).astype( + "float32" + ) + fluid.layers.beam_search( + pre_ids=pre_ids, + pre_scores=prescores_data, + ids=topk_indices, + scores=accu_scores, + beam_size=4, + end_id=1, + ) self.assertRaises(TypeError, test_prescores_Variable) def test_ids_Variable(): # the input ids must be Variable or None ids_data = np.random.randint(1, 5, [5, 1]).astype("int64") - fluid.layers.beam_search(pre_ids=pre_ids, - pre_scores=pre_scores, - ids=ids_data, - scores=accu_scores, - beam_size=4, - end_id=1) + fluid.layers.beam_search( + pre_ids=pre_ids, + pre_scores=pre_scores, + ids=ids_data, + scores=accu_scores, + beam_size=4, + end_id=1, + ) self.assertRaises(TypeError, test_ids_Variable) def test_scores_Variable(): # the input scores must be Variable scores_data = np.random.uniform(1, 5, [5, 1]).astype("float32") - fluid.layers.beam_search(pre_ids=pre_ids, - pre_scores=pre_scores, - ids=topk_indices, - scores=scores_data, - beam_size=4, - end_id=1) + fluid.layers.beam_search( + pre_ids=pre_ids, + pre_scores=pre_scores, + ids=topk_indices, + scores=scores_data, + beam_size=4, + end_id=1, + ) self.assertRaises(TypeError, test_scores_Variable) def test_preids_dtype(): # the dtype of input pre_ids must be int64 - preids_type_data = fluid.data(name='preids_type_data', - shape=[1], - lod_level=2, - dtype='float32') - fluid.layers.beam_search(pre_ids=preids_type_data, - pre_scores=pre_scores, - ids=topk_indices, - scores=accu_scores, - beam_size=4, - end_id=1) + preids_type_data = fluid.data( + name='preids_type_data', + shape=[1], + lod_level=2, + dtype='float32', + ) + fluid.layers.beam_search( + pre_ids=preids_type_data, + pre_scores=pre_scores, + ids=topk_indices, + scores=accu_scores, + beam_size=4, + end_id=1, + ) self.assertRaises(TypeError, test_preids_dtype) def test_prescores_dtype(): # the dtype of input pre_scores must be float32 - prescores_type_data = fluid.data(name='prescores_type_data', - shape=[1], - lod_level=2, - dtype='int64') - fluid.layers.beam_search(pre_ids=pre_ids, - pre_scores=prescores_type_data, - ids=topk_indices, - scores=accu_scores, - beam_size=4, - end_id=1) + prescores_type_data = fluid.data( + name='prescores_type_data', + shape=[1], + lod_level=2, + dtype='int64', + ) + fluid.layers.beam_search( + pre_ids=pre_ids, + pre_scores=prescores_type_data, + ids=topk_indices, + scores=accu_scores, + beam_size=4, + end_id=1, + ) self.assertRaises(TypeError, test_prescores_dtype) diff --git a/python/paddle/fluid/tests/unittests/test_bernoulli_op.py b/python/paddle/fluid/tests/unittests/test_bernoulli_op.py index 662fbfffe88e245bb70f2b9ec8983f04fa7bd6b4..027afdb177a29e8db92a29c5d66fc59b336272a3 100644 --- a/python/paddle/fluid/tests/unittests/test_bernoulli_op.py +++ b/python/paddle/fluid/tests/unittests/test_bernoulli_op.py @@ -27,7 +27,6 @@ def output_hist(out): class TestBernoulliOp(OpTest): - def setUp(self): self.op_type = "bernoulli" self.inputs = {"X": np.random.uniform(size=(1000, 784))} @@ -43,7 +42,6 @@ class TestBernoulliOp(OpTest): class TestBernoulliApi(unittest.TestCase): - def test_dygraph(self): paddle.disable_static() x = paddle.rand([1024, 1024]) @@ -56,14 +54,14 @@ class TestBernoulliApi(unittest.TestCase): x = paddle.rand([1024, 1024]) out = paddle.bernoulli(x) exe = paddle.static.Executor(paddle.CPUPlace()) - out = exe.run(paddle.static.default_main_program(), - fetch_list=[out.name]) + out = exe.run( + paddle.static.default_main_program(), fetch_list=[out.name] + ) hist, prob = output_hist(out[0]) np.testing.assert_allclose(hist, prob, rtol=0, atol=0.01) class TestRandomValue(unittest.TestCase): - def test_fixed_random_number(self): # Test GPU Fixed random number, which is generated by 'curandStatePhilox4_32_10_t' if not paddle.is_compiled_with_cuda(): @@ -83,7 +81,7 @@ class TestRandomValue(unittest.TestCase): self.assertEqual(np.sum(index0), 260028995) self.assertEqual(np.sum(index1), 8582429431) self.assertEqual(np.sum(index2), 8581445798) - expect = [0., 0., 0., 0., 0., 0., 0., 1., 1., 1.] + expect = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0] np.testing.assert_array_equal(y[16, 500, 500:510], expect) x = paddle.to_tensor(x_np, dtype='float32') @@ -92,7 +90,7 @@ class TestRandomValue(unittest.TestCase): self.assertEqual(np.sum(index0), 260092343) self.assertEqual(np.sum(index1), 8583509076) self.assertEqual(np.sum(index2), 8582778540) - expect = [0., 0., 1., 1., 1., 1., 0., 1., 1., 1.] + expect = [0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0] np.testing.assert_array_equal(y[16, 500, 500:510], expect) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_bfgs.py b/python/paddle/fluid/tests/unittests/test_bfgs.py index f7023fba86b577b1409765c6cd6bce07b067f450..07b15d3aefca7f980d14fc54a90cd47a1b4b97e3 100644 --- a/python/paddle/fluid/tests/unittests/test_bfgs.py +++ b/python/paddle/fluid/tests/unittests/test_bfgs.py @@ -45,37 +45,35 @@ def test_static_graph_H0(func, x0, H0, dtype='float32'): startup = paddle.static.Program() with paddle.static.program_guard(main, startup): X = paddle.static.data(name='x', shape=[x0.shape[0]], dtype=dtype) - H = paddle.static.data(name='h', - shape=[H0.shape[0], H0.shape[1]], - dtype=dtype) - Y = minimize_bfgs(func, - X, - initial_inverse_hessian_estimate=H, - dtype=dtype) + H = paddle.static.data( + name='h', shape=[H0.shape[0], H0.shape[1]], dtype=dtype + ) + Y = minimize_bfgs( + func, X, initial_inverse_hessian_estimate=H, dtype=dtype + ) exe = paddle.static.Executor() exe.run(startup) return exe.run(main, feed={'x': x0, 'h': H0}, fetch_list=[Y]) -def test_dynamic_graph(func, - x0, - H0=None, - line_search_fn='strong_wolfe', - dtype='float32'): +def test_dynamic_graph( + func, x0, H0=None, line_search_fn='strong_wolfe', dtype='float32' +): paddle.disable_static() x0 = paddle.to_tensor(x0) if H0 is not None: H0 = paddle.to_tensor(H0) - return minimize_bfgs(func, - x0, - initial_inverse_hessian_estimate=H0, - line_search_fn=line_search_fn, - dtype=dtype) + return minimize_bfgs( + func, + x0, + initial_inverse_hessian_estimate=H0, + line_search_fn=line_search_fn, + dtype=dtype, + ) class TestBfgs(unittest.TestCase): - def test_quadratic_nd(self): for dimension in [1, 10]: minimum = np.random.random(size=[dimension]).astype('float32') @@ -85,20 +83,19 @@ class TestBfgs(unittest.TestCase): minimum_ = paddle.assign(minimum) scale_ = paddle.assign(scale) return paddle.sum( - paddle.multiply(scale_, (F.square_error_cost(x, minimum_)))) + paddle.multiply(scale_, (F.square_error_cost(x, minimum_))) + ) x0 = np.random.random(size=[dimension]).astype('float32') results = test_static_graph(func=func, x0=x0) - np.testing.assert_allclose(minimum, - results[2], - rtol=1e-05, - atol=1e-8) + np.testing.assert_allclose( + minimum, results[2], rtol=1e-05, atol=1e-8 + ) results = test_dynamic_graph(func=func, x0=x0) - np.testing.assert_allclose(minimum, - results[2].numpy(), - rtol=1e-05, - atol=1e-8) + np.testing.assert_allclose( + minimum, results[2].numpy(), rtol=1e-05, atol=1e-8 + ) def test_inf_minima(self): extream_point = np.array([-1, 2]).astype('float32') @@ -106,16 +103,17 @@ class TestBfgs(unittest.TestCase): def func(x): # df = 3(x - 1.01)(x - 0.99) # f = x^3 - 3x^2 + 3*1.01*0.99x - return x * x * x / 3.0 - ( - extream_point[0] + extream_point[1] - ) * x * x / 2 + extream_point[0] * extream_point[1] * x + return ( + x * x * x / 3.0 + - (extream_point[0] + extream_point[1]) * x * x / 2 + + extream_point[0] * extream_point[1] * x + ) x0 = np.array([-1.7]).astype('float32') results = test_static_graph(func, x0) self.assertFalse(results[0][0]) def test_multi_minima(self): - def func(x): # df = 12(x + 1.1)(x - 0.2)(x - 0.8) # f = 3*x^4+0.4*x^3-5.46*x^2+2.112*x @@ -138,7 +136,7 @@ class TestBfgs(unittest.TestCase): # f(x, y) = (a - x)^2 + b (y - x^2)^2 # minimum = (a, a^2) x, y = position[0], position[1] - c = (a - x)**2 + b * (y - x**2)**2 + c = (a - x) ** 2 + b * (y - x**2) ** 2 # the return cant be np array[1], or in jacobin will cause flat error return c[0] @@ -153,7 +151,6 @@ class TestBfgs(unittest.TestCase): self.func_rosenbrock() def test_exception(self): - def func(x): return paddle.dot(x, x) @@ -162,10 +159,9 @@ class TestBfgs(unittest.TestCase): # test initial_inverse_hessian_estimate is good results = test_static_graph_H0(func, x0, H0, dtype='float32') - np.testing.assert_allclose([0.0, 0.0], - results[2], - rtol=1e-05, - atol=1e-8) + np.testing.assert_allclose( + [0.0, 0.0], results[2], rtol=1e-05, atol=1e-8 + ) self.assertTrue(results[0][0]) # test initial_inverse_hessian_estimate is bad @@ -173,11 +169,13 @@ class TestBfgs(unittest.TestCase): self.assertRaises(ValueError, test_dynamic_graph, func, x0, H0=H1) # test line_search_fn is bad - self.assertRaises(NotImplementedError, - test_static_graph, - func, - x0, - line_search_fn='other') + self.assertRaises( + NotImplementedError, + test_static_graph, + func, + x0, + line_search_fn='other', + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_bicubic_interp_op.py b/python/paddle/fluid/tests/unittests/test_bicubic_interp_op.py index ed8bc9113784a798cea83948c3a27e971bb84281..886441429c25dd98d5206a4437ce004f3b981283 100644 --- a/python/paddle/fluid/tests/unittests/test_bicubic_interp_op.py +++ b/python/paddle/fluid/tests/unittests/test_bicubic_interp_op.py @@ -47,13 +47,15 @@ def value_bound(input, w, h, x, y): return input[:, :, access_y, access_x] -def bicubic_interp_np(input, - out_h, - out_w, - out_size=None, - actual_shape=None, - align_corners=True, - data_layout='kNCHW'): +def bicubic_interp_np( + input, + out_h, + out_w, + out_size=None, + actual_shape=None, + align_corners=True, + data_layout='kNCHW', +): """trilinear interpolation implement in shape [N, C, H, W]""" if data_layout == "NHWC": input = np.transpose(input, (0, 3, 1, 2)) # NHWC => NCHW @@ -67,13 +69,13 @@ def bicubic_interp_np(input, ratio_h = ratio_w = 0.0 if out_h > 1: - if (align_corners): + if align_corners: ratio_h = (in_h - 1.0) / (out_h - 1.0) else: ratio_h = 1.0 * in_h / out_h if out_w > 1: - if (align_corners): + if align_corners: ratio_w = (in_w - 1.0) / (out_w - 1.0) else: ratio_w = 1.0 * in_w / out_w @@ -81,14 +83,14 @@ def bicubic_interp_np(input, out = np.zeros((batch_size, channel, out_h, out_w)) for k in range(out_h): - if (align_corners): + if align_corners: h = ratio_h * k else: h = ratio_h * (k + 0.5) - 0.5 input_y = np.floor(h) y_t = h - input_y for l in range(out_w): - if (align_corners): + if align_corners: w = ratio_w * l else: w = ratio_w * (l + 0.5) - 0.5 @@ -107,20 +109,23 @@ def bicubic_interp_np(input, coefficients[ii] = cubic_interp1d( input[i, j, access_y, access_x_0], input[i, j, access_y, access_x_1], - input[i, j, access_y, - access_x_2], input[i, j, access_y, - access_x_3], x_t) - out[i, j, k, - l] = cubic_interp1d(coefficients[0], coefficients[1], - coefficients[2], coefficients[3], - y_t) + input[i, j, access_y, access_x_2], + input[i, j, access_y, access_x_3], + x_t, + ) + out[i, j, k, l] = cubic_interp1d( + coefficients[0], + coefficients[1], + coefficients[2], + coefficients[3], + y_t, + ) if data_layout == "NHWC": out = np.transpose(out, (0, 2, 3, 1)) # NCHW => NHWC return out.astype(input.dtype) class TestBicubicInterpOp(OpTest): - def setUp(self): self.out_size = None self.actual_shape = None @@ -146,9 +151,15 @@ class TestBicubicInterpOp(OpTest): out_h = self.out_h out_w = self.out_w - output_np = bicubic_interp_np(input_np, out_h, out_w, self.out_size, - self.actual_shape, self.align_corners, - self.data_layout) + output_np = bicubic_interp_np( + input_np, + out_h, + out_w, + self.out_size, + self.actual_shape, + self.align_corners, + self.data_layout, + ) self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size @@ -163,7 +174,7 @@ class TestBicubicInterpOp(OpTest): 'scale': self.scale, 'interp_method': self.interp_method, 'align_corners': self.align_corners, - 'data_layout': self.data_layout + 'data_layout': self.data_layout, } self.outputs = {'Out': output_np} @@ -171,80 +182,73 @@ class TestBicubicInterpOp(OpTest): self.check_output(check_eager=self.check_eager) def test_check_grad(self): - self.check_grad(['X'], - 'Out', - in_place=True, - check_eager=self.check_eager) + self.check_grad( + ['X'], 'Out', in_place=True, check_eager=self.check_eager + ) def init_test_case(self): self.interp_method = 'bicubic' self.input_shape = [2, 3, 5, 5] self.out_h = 2 self.out_w = 2 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([3, 3]).astype("int32") self.align_corners = True class TestBicubicInterpCase1(TestBicubicInterpOp): - def init_test_case(self): self.interp_method = 'bicubic' self.input_shape = [4, 1, 7, 8] self.out_h = 1 self.out_w = 1 - self.scale = 0. + self.scale = 0.0 self.align_corners = True class TestBicubicInterpCase2(TestBicubicInterpOp): - def init_test_case(self): self.interp_method = 'bicubic' self.input_shape = [3, 3, 9, 6] self.out_h = 10 self.out_w = 8 - self.scale = 0. + self.scale = 0.0 self.align_corners = True class TestBicubicInterpCase3(TestBicubicInterpOp): - def init_test_case(self): self.interp_method = 'bicubic' self.input_shape = [1, 1, 32, 64] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.align_corners = False class TestBicubicInterpCase4(TestBicubicInterpOp): - def init_test_case(self): self.interp_method = 'bicubic' self.input_shape = [4, 1, 7, 8] self.out_h = 1 self.out_w = 1 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([2, 2]).astype("int32") self.align_corners = True class TestBicubicInterpCase5(TestBicubicInterpOp): - def init_test_case(self): self.interp_method = 'bicubic' self.input_shape = [3, 3, 9, 6] self.out_h = 11 self.out_w = 11 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([6, 4]).astype("int32") self.align_corners = False class TestBicubicInterpCase6(TestBicubicInterpOp): - def init_test_case(self): self.interp_method = 'bicubic' self.input_shape = [1, 1, 32, 64] @@ -256,31 +260,28 @@ class TestBicubicInterpCase6(TestBicubicInterpOp): class TestBicubicInterpSame(TestBicubicInterpOp): - def init_test_case(self): self.interp_method = 'bicubic' self.input_shape = [2, 3, 32, 64] self.out_h = 32 self.out_w = 64 - self.scale = 0. + self.scale = 0.0 self.align_corners = True class TestBicubicInterpDataLayout(TestBicubicInterpOp): - def init_test_case(self): self.interp_method = 'bicubic' self.input_shape = [2, 5, 5, 3] self.out_h = 2 self.out_w = 2 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([3, 3]).astype("int32") self.align_corners = True self.data_layout = "NHWC" class TestBicubicInterpOpAPI(unittest.TestCase): - def test_case(self): np.random.seed(200) x_data = np.random.random((2, 3, 6, 6)).astype("float32") @@ -291,103 +292,101 @@ class TestBicubicInterpOpAPI(unittest.TestCase): prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") dim = fluid.data(name="dim", shape=[1], dtype="int32") - shape_tensor = fluid.data(name="shape_tensor", - shape=[2], - dtype="int32") - actual_size = fluid.data(name="actual_size", - shape=[2], - dtype="int32") - scale_tensor = fluid.data(name="scale_tensor", - shape=[1], - dtype="float32") - - out1 = interpolate(x, - size=[12, 12], - mode='bicubic', - align_corners=False) - out2 = interpolate(x, - size=[12, dim], - mode='bicubic', - align_corners=False) - out3 = interpolate(x, - size=shape_tensor, - mode='bicubic', - align_corners=False) - out4 = interpolate(x, - size=[12, 12], - mode='bicubic', - align_corners=False) - out5 = interpolate(x, - scale_factor=scale_tensor, - mode='bicubic', - align_corners=False) + shape_tensor = fluid.data( + name="shape_tensor", shape=[2], dtype="int32" + ) + actual_size = fluid.data( + name="actual_size", shape=[2], dtype="int32" + ) + scale_tensor = fluid.data( + name="scale_tensor", shape=[1], dtype="float32" + ) + + out1 = interpolate( + x, size=[12, 12], mode='bicubic', align_corners=False + ) + out2 = interpolate( + x, size=[12, dim], mode='bicubic', align_corners=False + ) + out3 = interpolate( + x, size=shape_tensor, mode='bicubic', align_corners=False + ) + out4 = interpolate( + x, size=[12, 12], mode='bicubic', align_corners=False + ) + out5 = interpolate( + x, + scale_factor=scale_tensor, + mode='bicubic', + align_corners=False, + ) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - results = exe.run(fluid.default_main_program(), - feed={ - "x": x_data, - "dim": dim_data, - "shape_tensor": shape_data, - "actual_size": actual_size_data, - "scale_tensor": scale_data - }, - fetch_list=[out1, out2, out3, out4, out5], - return_numpy=True) - - expect_res = bicubic_interp_np(x_data, - out_h=12, - out_w=12, - align_corners=False) + results = exe.run( + fluid.default_main_program(), + feed={ + "x": x_data, + "dim": dim_data, + "shape_tensor": shape_data, + "actual_size": actual_size_data, + "scale_tensor": scale_data, + }, + fetch_list=[out1, out2, out3, out4, out5], + return_numpy=True, + ) + + expect_res = bicubic_interp_np( + x_data, out_h=12, out_w=12, align_corners=False + ) for res in results: np.testing.assert_allclose(res, expect_res, rtol=1e-05) with fluid.dygraph.guard(): x = fluid.dygraph.to_variable(x_data) - interp = interpolate(x, - size=[12, 12], - mode='bicubic', - align_corners=False) + interp = interpolate( + x, size=[12, 12], mode='bicubic', align_corners=False + ) dy_result = interp.numpy() - expect = bicubic_interp_np(x_data, - out_h=12, - out_w=12, - align_corners=False) + expect = bicubic_interp_np( + x_data, out_h=12, out_w=12, align_corners=False + ) np.testing.assert_allclose(dy_result, expect, rtol=1e-05) class TestBicubicOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # the input of interpoalte must be Variable. - x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + ) self.assertRaises(TypeError, interpolate, x1) def test_mode_type(): # mode must be "BILINEAR" "TRILINEAR" "NEAREST" "BICUBIC" x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") - out = interpolate(x, - size=[12, 12], - mode='UNKONWN', - align_corners=False) + out = interpolate( + x, size=[12, 12], mode='UNKONWN', align_corners=False + ) def test_input_shape(): x = fluid.data(name="x", shape=[2], dtype="float32") - out = interpolate(x, - size=[12, 12], - mode='BICUBIC', - align_corners=False) + out = interpolate( + x, size=[12, 12], mode='BICUBIC', align_corners=False + ) def test_align_corcers(): x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") @@ -395,77 +394,88 @@ class TestBicubicOpError(unittest.TestCase): def test_out_shape(): x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") - out = interpolate(x, - size=[12], - mode='bicubic', - align_corners=False) + out = interpolate( + x, size=[12], mode='bicubic', align_corners=False + ) def test_attr_data_format(): # for 5-D input, data_format only can be NCDHW or NDHWC - input = fluid.data(name="input", - shape=[2, 3, 6, 9, 4], - dtype="float32") - out = interpolate(input, - size=[4, 8, 4, 5], - mode='trilinear', - data_format='NHWC') + input = fluid.data( + name="input", shape=[2, 3, 6, 9, 4], dtype="float32" + ) + out = interpolate( + input, + size=[4, 8, 4, 5], + mode='trilinear', + data_format='NHWC', + ) def test_actual_shape(): # the actual_shape must be Variable. - x = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.CPUPlace()) - out = interpolate(x, - size=[12, 12], - mode='BICUBIC', - align_corners=False) + x = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + ) + out = interpolate( + x, size=[12, 12], mode='BICUBIC', align_corners=False + ) def test_scale_value(): # the scale must be greater than zero. x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") - out = interpolate(x, - size=None, - mode='BICUBIC', - align_corners=False, - scale_factor=-2.0) + out = interpolate( + x, + size=None, + mode='BICUBIC', + align_corners=False, + scale_factor=-2.0, + ) def test_attr_5D_input(): # for 5-D input, data_format only can be NCDHW or NDHWC - input = fluid.data(name="input", - shape=[2, 3, 6, 9, 4], - dtype="float32") - out = interpolate(input, - size=[4, 8, 4, 5], - mode='trilinear', - data_format='NDHWC') + input = fluid.data( + name="input", shape=[2, 3, 6, 9, 4], dtype="float32" + ) + out = interpolate( + input, + size=[4, 8, 4, 5], + mode='trilinear', + data_format='NDHWC', + ) def test_scale_type(): # the scale must be greater than zero. x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") - scale = fluid.create_lod_tensor(np.array([-1, 3, 5, - 5]), [[1, 1, 1, 1]], - fluid.CPUPlace()) - out = interpolate(x, - size=None, - mode='bicubic', - align_corners=False, - scale_factor=scale) + scale = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + ) + out = interpolate( + x, + size=None, + mode='bicubic', + align_corners=False, + scale_factor=scale, + ) def test_align_mode(): x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") - out = interpolate(x, - size=None, - mode='nearest', - align_corners=False, - align_mode=2, - scale_factor=1.0) + out = interpolate( + x, + size=None, + mode='nearest', + align_corners=False, + align_mode=2, + scale_factor=1.0, + ) def test_outshape_and_scale(): x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") - out = interpolate(x, - size=None, - mode='bicubic', - align_corners=False, - scale_factor=None) + out = interpolate( + x, + size=None, + mode='bicubic', + align_corners=False, + scale_factor=None, + ) self.assertRaises(ValueError, test_mode_type) self.assertRaises(ValueError, test_input_shape) diff --git a/python/paddle/fluid/tests/unittests/test_bicubic_interp_v2_op.py b/python/paddle/fluid/tests/unittests/test_bicubic_interp_v2_op.py index 8a58f8ec3694a98fb8633fee2b7bb4a4d8dd2f36..2bfe40abe53cbd8259cd81aaffa2b8f5092c4be3 100644 --- a/python/paddle/fluid/tests/unittests/test_bicubic_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_bicubic_interp_v2_op.py @@ -22,18 +22,20 @@ from paddle.fluid.framework import _test_eager_guard from paddle.nn.functional import interpolate -def bicubic_interp_test(x, - OutSize=None, - SizeTensor=None, - Scale=None, - data_layout='kNCHW', - out_d=-1, - out_h=-1, - out_w=-1, - scale=[], - interp_method='bicubic', - align_corners=True, - align_mode=0): +def bicubic_interp_test( + x, + OutSize=None, + SizeTensor=None, + Scale=None, + data_layout='kNCHW', + out_d=-1, + out_h=-1, + out_w=-1, + scale=[], + interp_method='bicubic', + align_corners=True, + align_mode=0, +): if isinstance(scale, float) or isinstance(scale, int): scale_list = [] for _ in range(len(x.shape) - 2): @@ -43,12 +45,23 @@ def bicubic_interp_test(x, scale = list(map(float, scale)) if SizeTensor is not None: if not isinstance(SizeTensor, list) and not isinstance( - SizeTensor, tuple): + SizeTensor, tuple + ): SizeTensor = [SizeTensor] - return paddle._C_ops.bicubic_interp(x, OutSize, SizeTensor, Scale, - data_layout, out_d, out_h, out_w, scale, - interp_method, align_corners, - align_mode) + return paddle._C_ops.bicubic_interp( + x, + OutSize, + SizeTensor, + Scale, + data_layout, + out_d, + out_h, + out_w, + scale, + interp_method, + align_corners, + align_mode, + ) def cubic_1(x, a): @@ -77,15 +90,17 @@ def value_bound(input, w, h, x, y): return input[:, :, access_y, access_x] -def bicubic_interp_np(input, - out_h, - out_w, - scale_h=0, - scale_w=0, - out_size=None, - actual_shape=None, - align_corners=True, - data_layout='kNCHW'): +def bicubic_interp_np( + input, + out_h, + out_w, + scale_h=0, + scale_w=0, + out_size=None, + actual_shape=None, + align_corners=True, + data_layout='kNCHW', +): """trilinear interpolation implement in shape [N, C, H, W]""" if data_layout == "NHWC": input = np.transpose(input, (0, 3, 1, 2)) # NHWC => NCHW @@ -99,7 +114,7 @@ def bicubic_interp_np(input, ratio_h = ratio_w = 0.0 if out_h > 1: - if (align_corners): + if align_corners: ratio_h = (in_h - 1.0) / (out_h - 1.0) else: if scale_h > 0: @@ -108,7 +123,7 @@ def bicubic_interp_np(input, ratio_h = 1.0 * in_h / out_h if out_w > 1: - if (align_corners): + if align_corners: ratio_w = (in_w - 1.0) / (out_w - 1.0) else: if scale_w > 0: @@ -119,14 +134,14 @@ def bicubic_interp_np(input, out = np.zeros((batch_size, channel, out_h, out_w)) for k in range(out_h): - if (align_corners): + if align_corners: h = ratio_h * k else: h = ratio_h * (k + 0.5) - 0.5 input_y = np.floor(h) y_t = h - input_y for l in range(out_w): - if (align_corners): + if align_corners: w = ratio_w * l else: w = ratio_w * (l + 0.5) - 0.5 @@ -145,20 +160,23 @@ def bicubic_interp_np(input, coefficients[ii] = cubic_interp1d( input[i, j, access_y, access_x_0], input[i, j, access_y, access_x_1], - input[i, j, access_y, - access_x_2], input[i, j, access_y, - access_x_3], x_t) - out[i, j, k, - l] = cubic_interp1d(coefficients[0], coefficients[1], - coefficients[2], coefficients[3], - y_t) + input[i, j, access_y, access_x_2], + input[i, j, access_y, access_x_3], + x_t, + ) + out[i, j, k, l] = cubic_interp1d( + coefficients[0], + coefficients[1], + coefficients[2], + coefficients[3], + y_t, + ) if data_layout == "NHWC": out = np.transpose(out, (0, 2, 3, 1)) # NCHW => NHWC return out.astype(input.dtype) class TestBicubicInterpOp(OpTest): - def setUp(self): self.python_api = bicubic_interp_test self.out_size = None @@ -181,7 +199,7 @@ class TestBicubicInterpOp(OpTest): if self.scale: if isinstance(self.scale, float) or isinstance(self.scale, int): - if self.scale > 0.: + if self.scale > 0.0: scale_h = scale_w = float(self.scale) if isinstance(self.scale, list) and len(self.scale) == 1: scale_w = scale_h = self.scale[0] @@ -194,9 +212,17 @@ class TestBicubicInterpOp(OpTest): out_h = self.out_h out_w = self.out_w - output_np = bicubic_interp_np(input_np, out_h, out_w, scale_h, scale_w, - self.out_size, self.actual_shape, - self.align_corners, self.data_layout) + output_np = bicubic_interp_np( + input_np, + out_h, + out_w, + scale_h, + scale_w, + self.out_size, + self.actual_shape, + self.align_corners, + self.data_layout, + ) self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size @@ -210,11 +236,11 @@ class TestBicubicInterpOp(OpTest): 'out_w': self.out_w, 'interp_method': self.interp_method, 'align_corners': self.align_corners, - 'data_layout': self.data_layout + 'data_layout': self.data_layout, } if self.scale: if isinstance(self.scale, float) or isinstance(self.scale, int): - if self.scale > 0.: + if self.scale > 0.0: self.scale = [self.scale] if isinstance(self.scale, list) and len(self.scale) == 1: self.scale = [self.scale[0], self.scale[0]] @@ -225,10 +251,9 @@ class TestBicubicInterpOp(OpTest): self.check_output(check_eager=self.check_eager) def test_check_grad(self): - self.check_grad(['X'], - 'Out', - in_place=True, - check_eager=self.check_eager) + self.check_grad( + ['X'], 'Out', in_place=True, check_eager=self.check_eager + ) def init_test_case(self): self.interp_method = 'bicubic' @@ -241,7 +266,6 @@ class TestBicubicInterpOp(OpTest): class TestBicubicInterpCase1(TestBicubicInterpOp): - def init_test_case(self): self.interp_method = 'bicubic' self.input_shape = [4, 1, 7, 8] @@ -252,7 +276,6 @@ class TestBicubicInterpCase1(TestBicubicInterpOp): class TestBicubicInterpCase2(TestBicubicInterpOp): - def init_test_case(self): self.interp_method = 'bicubic' self.input_shape = [3, 3, 9, 6] @@ -263,7 +286,6 @@ class TestBicubicInterpCase2(TestBicubicInterpOp): class TestBicubicInterpCase3(TestBicubicInterpOp): - def init_test_case(self): self.interp_method = 'bicubic' self.input_shape = [1, 1, 32, 64] @@ -274,7 +296,6 @@ class TestBicubicInterpCase3(TestBicubicInterpOp): class TestBicubicInterpCase4(TestBicubicInterpOp): - def init_test_case(self): self.interp_method = 'bicubic' self.input_shape = [4, 1, 7, 8] @@ -286,7 +307,6 @@ class TestBicubicInterpCase4(TestBicubicInterpOp): class TestBicubicInterpCase5(TestBicubicInterpOp): - def init_test_case(self): self.interp_method = 'bicubic' self.input_shape = [3, 3, 9, 6] @@ -298,7 +318,6 @@ class TestBicubicInterpCase5(TestBicubicInterpOp): class TestBicubicInterpCase6(TestBicubicInterpOp): - def init_test_case(self): self.interp_method = 'bicubic' self.input_shape = [1, 1, 32, 64] @@ -310,7 +329,6 @@ class TestBicubicInterpCase6(TestBicubicInterpOp): class TestBicubicInterpSame(TestBicubicInterpOp): - def init_test_case(self): self.interp_method = 'bicubic' self.input_shape = [2, 3, 32, 64] @@ -321,18 +339,16 @@ class TestBicubicInterpSame(TestBicubicInterpOp): class TestBicubicInterpScale(TestBicubicInterpOp): - def init_test_case(self): self.interp_method = 'bicubic' self.input_shape = [2, 3, 32, 64] self.out_h = 32 self.out_w = 64 - self.scale = [1., 1.] + self.scale = [1.0, 1.0] self.align_corners = True class TestBicubicInterpDataLayout(TestBicubicInterpOp): - def init_test_case(self): self.interp_method = 'bicubic' self.input_shape = [2, 5, 5, 3] @@ -345,7 +361,6 @@ class TestBicubicInterpDataLayout(TestBicubicInterpOp): class TestBicubicInterpOpAPI(unittest.TestCase): - def test_imperative_case(self): with _test_eager_guard(): self.func_case() @@ -361,52 +376,51 @@ class TestBicubicInterpOpAPI(unittest.TestCase): prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") dim = fluid.data(name="dim", shape=[1], dtype="int32") - shape_tensor = fluid.data(name="shape_tensor", - shape=[2], - dtype="int32") - actual_size = fluid.data(name="actual_size", - shape=[2], - dtype="int32") - scale_tensor = fluid.data(name="scale_tensor", - shape=[1], - dtype="float32") - - out1 = interpolate(x, - size=[12, 12], - mode='bicubic', - align_corners=False) - out2 = interpolate(x, - size=[12, dim], - mode='bicubic', - align_corners=False) - out3 = interpolate(x, - size=shape_tensor, - mode='bicubic', - align_corners=False) - out4 = interpolate(x, - size=[12, 12], - mode='bicubic', - align_corners=False) - out5 = interpolate(x, - scale_factor=scale_tensor, - mode='bicubic', - align_corners=False) - out6 = interpolate(x, - scale_factor=2.0, - mode='bicubic', - align_corners=False) - out7 = interpolate(x, - scale_factor=[2.0, 2.0], - mode='bicubic', - align_corners=False) + shape_tensor = fluid.data( + name="shape_tensor", shape=[2], dtype="int32" + ) + actual_size = fluid.data( + name="actual_size", shape=[2], dtype="int32" + ) + scale_tensor = fluid.data( + name="scale_tensor", shape=[1], dtype="float32" + ) + + out1 = interpolate( + x, size=[12, 12], mode='bicubic', align_corners=False + ) + out2 = interpolate( + x, size=[12, dim], mode='bicubic', align_corners=False + ) + out3 = interpolate( + x, size=shape_tensor, mode='bicubic', align_corners=False + ) + out4 = interpolate( + x, size=[12, 12], mode='bicubic', align_corners=False + ) + out5 = interpolate( + x, + scale_factor=scale_tensor, + mode='bicubic', + align_corners=False, + ) + out6 = interpolate( + x, scale_factor=2.0, mode='bicubic', align_corners=False + ) + out7 = interpolate( + x, scale_factor=[2.0, 2.0], mode='bicubic', align_corners=False + ) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) @@ -417,55 +431,51 @@ class TestBicubicInterpOpAPI(unittest.TestCase): "dim": dim_data, "shape_tensor": shape_data, "actual_size": actual_size_data, - "scale_tensor": scale_data + "scale_tensor": scale_data, }, fetch_list=[out1, out2, out3, out4, out5, out6, out7], - return_numpy=True) + return_numpy=True, + ) - expect_res = bicubic_interp_np(x_data, - out_h=12, - out_w=12, - align_corners=False) + expect_res = bicubic_interp_np( + x_data, out_h=12, out_w=12, align_corners=False + ) for res in results: np.testing.assert_allclose(res, expect_res, rtol=1e-05) with fluid.dygraph.guard(): x = fluid.dygraph.to_variable(x_data) - interp = interpolate(x, - size=[12, 12], - mode='bicubic', - align_corners=False) + interp = interpolate( + x, size=[12, 12], mode='bicubic', align_corners=False + ) dy_result = interp.numpy() - expect = bicubic_interp_np(x_data, - out_h=12, - out_w=12, - align_corners=False) + expect = bicubic_interp_np( + x_data, out_h=12, out_w=12, align_corners=False + ) np.testing.assert_allclose(dy_result, expect, rtol=1e-05) class TestBicubicOpError(unittest.TestCase): - def test_imperative_errors(self): # the input of interpoalte must be Variable. - x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], - fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + ) self.assertRaises(TypeError, interpolate, x1) def test_mode_type(): # mode must be "BILINEAR" "TRILINEAR" "NEAREST" "BICUBIC" x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") - out = interpolate(x, - size=[12, 12], - mode='UNKONWN', - align_corners=False) + out = interpolate( + x, size=[12, 12], mode='UNKONWN', align_corners=False + ) def test_input_shape(): x = fluid.data(name="x", shape=[2], dtype="float32") - out = interpolate(x, - size=[12, 12], - mode='BICUBIC', - align_corners=False) + out = interpolate( + x, size=[12, 12], mode='BICUBIC', align_corners=False + ) def test_align_corcers(): x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") @@ -477,123 +487,138 @@ class TestBicubicOpError(unittest.TestCase): def test_attr_data_format(): # for 5-D input, data_format only can be NCDHW or NDHWC - input = fluid.data(name="input", - shape=[2, 3, 6, 9, 4], - dtype="float32") - out = interpolate(input, - size=[4, 8, 4, 5], - mode='trilinear', - data_format='NHWC') + input = fluid.data( + name="input", shape=[2, 3, 6, 9, 4], dtype="float32" + ) + out = interpolate( + input, size=[4, 8, 4, 5], mode='trilinear', data_format='NHWC' + ) def test_actual_shape(): # the actual_shape must be Variable. - x = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], - fluid.CPUPlace()) - out = interpolate(x, - size=[12, 12], - mode='BICUBIC', - align_corners=False) + x = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + ) + out = interpolate( + x, size=[12, 12], mode='BICUBIC', align_corners=False + ) def test_scale_value(): # the scale must be greater than zero. x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") - out = interpolate(x, - size=None, - mode='BICUBIC', - align_corners=False, - scale_factor=-2.0) + out = interpolate( + x, + size=None, + mode='BICUBIC', + align_corners=False, + scale_factor=-2.0, + ) def test_attr_5D_input(): # for 5-D input, data_format only can be NCDHW or NDHWC - input = fluid.data(name="input", - shape=[2, 3, 6, 9, 4], - dtype="float32") - out = interpolate(input, - size=[4, 8, 4, 5], - mode='trilinear', - data_format='NDHWC') + input = fluid.data( + name="input", shape=[2, 3, 6, 9, 4], dtype="float32" + ) + out = interpolate( + input, size=[4, 8, 4, 5], mode='trilinear', data_format='NDHWC' + ) def test_scale_type(): # the scale must be greater than zero. x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") - scale = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.CPUPlace()) - out = interpolate(x, - size=None, - mode='bicubic', - align_corners=False, - scale_factor=scale) + scale = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + ) + out = interpolate( + x, + size=None, + mode='bicubic', + align_corners=False, + scale_factor=scale, + ) def test_align_mode(): x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") - out = interpolate(x, - size=None, - mode='nearest', - align_corners=False, - align_mode=2, - scale_factor=1.0) + out = interpolate( + x, + size=None, + mode='nearest', + align_corners=False, + align_mode=2, + scale_factor=1.0, + ) def test_outshape_and_scale(): x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") - out = interpolate(x, - size=None, - mode='bicubic', - align_corners=False, - scale_factor=None) + out = interpolate( + x, + size=None, + mode='bicubic', + align_corners=False, + scale_factor=None, + ) def test_align_corners_and_nearest(): x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") - out = interpolate(x, - size=None, - mode='nearest', - align_corners=True, - scale_factor=None) + out = interpolate( + x, + size=None, + mode='nearest', + align_corners=True, + scale_factor=None, + ) def test_scale_shape(): x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") - out = interpolate(x, - size=None, - mode='nearest', - align_corners=False, - scale_factor=[1, 2, 2]) + out = interpolate( + x, + size=None, + mode='nearest', + align_corners=False, + scale_factor=[1, 2, 2], + ) def test_scale_value_1(): x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") - out = interpolate(x, - size=None, - mode='bicubic', - align_corners=False, - scale_factor=[1, 2, 2]) + out = interpolate( + x, + size=None, + mode='bicubic', + align_corners=False, + scale_factor=[1, 2, 2], + ) def test_size_and_scale(): x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") - out = interpolate(x, - size=None, - mode='bicubic', - align_corners=False, - scale_factor=None) + out = interpolate( + x, + size=None, + mode='bicubic', + align_corners=False, + scale_factor=None, + ) def test_size_and_scale2(): x = fluid.data(name="input", shape=[2, 3, 6, 9, 4], dtype="float32") - out = interpolate(x, - size=[2, 2, 2], - mode='trilinear', - align_corners=False, - scale_factor=2.0) + out = interpolate( + x, + size=[2, 2, 2], + mode='trilinear', + align_corners=False, + scale_factor=2.0, + ) def test_size_type(): x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") - out = interpolate(x, - size={2, 2}, - mode='bicubic', - align_corners=False) + out = interpolate( + x, size={2, 2}, mode='bicubic', align_corners=False + ) def test_input_shape_1(): x = fluid.data(name="x", shape=[2, 1, 0, 0], dtype="float32") - out = interpolate(x, - size=[3, 3], - mode="bicubic", - align_corners=False) + out = interpolate( + x, size=[3, 3], mode="bicubic", align_corners=False + ) self.assertRaises(ValueError, test_mode_type) self.assertRaises(ValueError, test_input_shape) @@ -619,10 +644,10 @@ class TestBicubicOpError(unittest.TestCase): self.test_imperative_errors() -@unittest.skipIf(not fluid.core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestBicubicInterpOpForFloat16(unittest.TestCase): - def init_test_case(self): self.interp_method = 'bicubic' self.input_shape = [2, 3, 5, 5] @@ -635,11 +660,13 @@ class TestBicubicInterpOpForFloat16(unittest.TestCase): x_np = x_np.astype(dtype) x = paddle.to_tensor(x_np) x.stop_gradient = False - y = interpolate(x, - size=self.out_size.tolist(), - mode=self.interp_method, - align_corners=self.align_corners, - data_format=self.data_layout) + y = interpolate( + x, + size=self.out_size.tolist(), + mode=self.interp_method, + align_corners=self.align_corners, + data_format=self.data_layout, + ) x_g = paddle.grad(y, x) y_np = y[0].numpy().astype('float32') x_g_np = x_g[0].numpy().astype('float32') diff --git a/python/paddle/fluid/tests/unittests/test_bilateral_slice_op.py b/python/paddle/fluid/tests/unittests/test_bilateral_slice_op.py index 976e7df60b80ce7592d7fb52beb80e817a78a628..db0b8c182de9213f5707d7697cf6ee3c5236cf07 100644 --- a/python/paddle/fluid/tests/unittests/test_bilateral_slice_op.py +++ b/python/paddle/fluid/tests/unittests/test_bilateral_slice_op.py @@ -20,7 +20,6 @@ import math class Gsz: - def __init__(self, h, w, gd, gh, gw, input_chans): self.h = h self.w = w @@ -53,8 +52,9 @@ def d_weight_z(x): return d_diff_abs(x) -def naive_bilateral_slice_forward(output, grid, guide, input, gsz, has_offset, - total_count, output_chans): +def naive_bilateral_slice_forward( + output, grid, guide, input, gsz, has_offset, total_count, output_chans +): h = gsz.h w = gsz.w gd = gsz.gd @@ -72,7 +72,7 @@ def naive_bilateral_slice_forward(output, grid, guide, input, gsz, has_offset, x = idx % w y = idx // w % h out_c = (idx // (h * w)) % output_chans - b = (idx // (output_chans * w * h)) + b = idx // (output_chans * w * h) gx = (x + 0.5) * gw / (1.0 * w) gy = (y + 0.5) * gh / (1.0 * h) @@ -99,11 +99,12 @@ def naive_bilateral_slice_forward(output, grid, guide, input, gsz, has_offset, wz = weight_z(zz + 0.5 - gz) c_ = coeff_stride * out_c + in_c - coeff_sample += grid[int(b), - int(c_), - int(z_), - int(y_), - int(x_)] * wx * wy * wz + coeff_sample += ( + grid[int(b), int(c_), int(z_), int(y_), int(x_)] + * wx + * wy + * wz + ) if in_c < input_chans: value += coeff_sample * input[int(b), int(in_c), int(y), int(x)] @@ -132,15 +133,16 @@ def naive_bilateral_slice(x, guide, grid, has_offset): gsz = Gsz(h, w, gd, gh, gw, input_chans) total_count = bs * h * w * output.shape[1] - naive_bilateral_slice_forward(output, grid, guide, x, gsz, has_offset, - total_count, output.shape[1]) + naive_bilateral_slice_forward( + output, grid, guide, x, gsz, has_offset, total_count, output.shape[1] + ) return output -@unittest.skipIf(not paddle.fluid.is_compiled_with_cuda(), - 'CPU testing is not supported') +@unittest.skipIf( + not paddle.fluid.is_compiled_with_cuda(), 'CPU testing is not supported' +) class TestBilateralSliceOp(OpTest): - def setUp(self): self.initTestCase() self.op_type = 'bilateral_slice' @@ -177,27 +179,26 @@ class TestBilateralSliceOp(OpTest): self.data_type = 'float64' -@unittest.skipIf(not paddle.fluid.is_compiled_with_cuda(), - 'CPU testing is not supported') +@unittest.skipIf( + not paddle.fluid.is_compiled_with_cuda(), 'CPU testing is not supported' +) class TestBilateralSliceOp1(TestBilateralSliceOp): - def initTestCase(self): self.has_offset = True self.data_type = 'float32' class TestBilateralSliceApi(unittest.TestCase): - def test_api(self): - x = paddle.fluid.data(name='x', - shape=[None, 3, 25, 15], - dtype='float32') - guide = paddle.fluid.data(name='guide', - shape=[None, 25, 15], - dtype='float32') - grid = paddle.fluid.data(name='grid', - shape=[None, None, 8, 5, 3], - dtype='float32') + x = paddle.fluid.data( + name='x', shape=[None, 3, 25, 15], dtype='float32' + ) + guide = paddle.fluid.data( + name='guide', shape=[None, 25, 15], dtype='float32' + ) + grid = paddle.fluid.data( + name='grid', shape=[None, None, 8, 5, 3], dtype='float32' + ) paddle.fluid.contrib.layers.bilateral_slice(x, guide, grid, False) if not paddle.fluid.is_compiled_with_cuda(): diff --git a/python/paddle/fluid/tests/unittests/test_bilinear_api.py b/python/paddle/fluid/tests/unittests/test_bilinear_api.py index 031163cdc435d796917ce3e2cb273171b912f5fd..dcdb03d05bad90ebd4721d55446df5140a69479c 100644 --- a/python/paddle/fluid/tests/unittests/test_bilinear_api.py +++ b/python/paddle/fluid/tests/unittests/test_bilinear_api.py @@ -21,10 +21,10 @@ import numpy as np class TestBilinearAPI(unittest.TestCase): - def test_api(self): - with fluid.program_guard(fluid.default_startup_program(), - fluid.default_main_program()): + with fluid.program_guard( + fluid.default_startup_program(), fluid.default_main_program() + ): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) else: @@ -37,29 +37,26 @@ class TestBilinearAPI(unittest.TestCase): layer1 = np.random.random((5, 5)).astype('float32') layer2 = np.random.random((5, 4)).astype('float32') - bilinear = paddle.nn.Bilinear(in1_features=5, - in2_features=4, - out_features=1000) + bilinear = paddle.nn.Bilinear( + in1_features=5, in2_features=4, out_features=1000 + ) ret = bilinear(data1, data2) exe.run(fluid.default_startup_program()) - ret_fetch = exe.run(feed={ - 'X1': layer1, - 'X2': layer2 - }, - fetch_list=[ret.name]) + ret_fetch = exe.run( + feed={'X1': layer1, 'X2': layer2}, fetch_list=[ret.name] + ) self.assertEqual(ret_fetch[0].shape, (5, 1000)) class TestBilinearAPIDygraph(unittest.TestCase): - def test_api(self): paddle.disable_static() layer1 = np.random.random((5, 5)).astype('float32') layer2 = np.random.random((5, 4)).astype('float32') - bilinear = paddle.nn.Bilinear(in1_features=5, - in2_features=4, - out_features=1000) + bilinear = paddle.nn.Bilinear( + in1_features=5, in2_features=4, out_features=1000 + ) ret = bilinear(paddle.to_tensor(layer1), paddle.to_tensor(layer2)) self.assertEqual(ret.shape, [5, 1000]) diff --git a/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py b/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py index bb46558c09395bd38591b163a0fae3a2bfd02c29..b281d30a2221f2f07aad061df939620fb2659425 100755 --- a/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py +++ b/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py @@ -22,14 +22,16 @@ import paddle paddle.enable_static() -def bilinear_interp_np(input, - out_h, - out_w, - out_size=None, - actual_shape=None, - align_corners=True, - align_mode=0, - data_layout='NCHW'): +def bilinear_interp_np( + input, + out_h, + out_w, + out_size=None, + actual_shape=None, + align_corners=True, + align_mode=0, + data_layout='NCHW', +): """bilinear interpolation implement in shape [N, C, H, W]""" if data_layout == "NHWC": input = np.transpose(input, (0, 3, 1, 2)) # NHWC => NCHW @@ -43,12 +45,12 @@ def bilinear_interp_np(input, ratio_h = ratio_w = 0.0 if out_h > 1: - if (align_corners): + if align_corners: ratio_h = (in_h - 1.0) / (out_h - 1.0) else: ratio_h = 1.0 * in_h / out_h if out_w > 1: - if (align_corners): + if align_corners: ratio_w = (in_w - 1.0) / (out_w - 1.0) else: ratio_w = 1.0 * in_w / out_w @@ -56,37 +58,40 @@ def bilinear_interp_np(input, out = np.zeros((batch_size, channel, out_h, out_w)) for i in range(out_h): - if (align_mode == 0 and not align_corners): + if align_mode == 0 and not align_corners: h = int(ratio_h * (i + 0.5) - 0.5) else: h = int(ratio_h * i) h = max(0, h) hid = 1 if h < in_h - 1 else 0 - if (align_mode == 0 and not align_corners): + if align_mode == 0 and not align_corners: idx_src_h = max(ratio_h * (i + 0.5) - 0.5, 0) h1lambda = idx_src_h - h else: h1lambda = ratio_h * i - h h2lambda = 1.0 - h1lambda for j in range(out_w): - if (align_mode == 0 and not align_corners): + if align_mode == 0 and not align_corners: w = int(ratio_w * (j + 0.5) - 0.5) else: w = int(ratio_w * j) w = max(0, w) wid = 1 if w < in_w - 1 else 0 - if (align_mode == 0 and not align_corners): + if align_mode == 0 and not align_corners: idx_src_w = max(ratio_w * (j + 0.5) - 0.5, 0) w1lambda = idx_src_w - w else: w1lambda = ratio_w * j - w w2lambda = 1.0 - w1lambda - out[:, :, i, j] = h2lambda*(w2lambda*input[:, :, h, w] + - w1lambda*input[:, :, h, w+wid]) + \ - h1lambda*(w2lambda*input[:, :, h+hid, w] + - w1lambda*input[:, :, h+hid, w+wid]) + out[:, :, i, j] = h2lambda * ( + w2lambda * input[:, :, h, w] + + w1lambda * input[:, :, h, w + wid] + ) + h1lambda * ( + w2lambda * input[:, :, h + hid, w] + + w1lambda * input[:, :, h + hid, w + wid] + ) if data_layout == "NHWC": out = np.transpose(out, (0, 2, 3, 1)) # NCHW => NHWC @@ -95,7 +100,6 @@ def bilinear_interp_np(input, class TestBilinearInterpOp(OpTest): - def setUp(self): self.out_size = None self.actual_shape = None @@ -121,9 +125,16 @@ class TestBilinearInterpOp(OpTest): out_h = self.out_h out_w = self.out_w - output_np = bilinear_interp_np(input_np, out_h, out_w, self.out_size, - self.actual_shape, self.align_corners, - self.align_mode, self.data_layout) + output_np = bilinear_interp_np( + input_np, + out_h, + out_w, + self.out_size, + self.actual_shape, + self.align_corners, + self.align_mode, + self.data_layout, + ) self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size @@ -139,7 +150,7 @@ class TestBilinearInterpOp(OpTest): 'interp_method': self.interp_method, 'align_corners': self.align_corners, 'align_mode': self.align_mode, - 'data_layout': self.data_layout + 'data_layout': self.data_layout, } self.outputs = {'Out': output_np} @@ -147,130 +158,120 @@ class TestBilinearInterpOp(OpTest): self.check_output(check_eager=self.check_eager) def test_check_grad(self): - self.check_grad(['X'], - 'Out', - in_place=True, - check_eager=self.check_eager) + self.check_grad( + ['X'], 'Out', in_place=True, check_eager=self.check_eager + ) def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [2, 3, 5, 5] self.out_h = 2 self.out_w = 2 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([3, 3]).astype("int32") self.align_corners = True self.align_mode = 1 class TestBilinearInterpCase1(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [4, 1, 7, 8] self.out_h = 1 self.out_w = 1 - self.scale = 0. + self.scale = 0.0 self.align_corners = True self.align_mode = 1 class TestBilinearInterpCase2(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [3, 3, 9, 6] self.out_h = 12 self.out_w = 12 - self.scale = 0. + self.scale = 0.0 self.align_corners = True self.align_mode = 1 class TestBilinearInterpCase3(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [1, 1, 32, 64] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.align_corners = True self.align_mode = 1 class TestBilinearInterpCase4(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [4, 1, 7, 8] self.out_h = 1 self.out_w = 1 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([2, 2]).astype("int32") self.align_corners = True self.align_mode = 1 class TestBilinearInterpCase5(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [3, 3, 9, 6] self.out_h = 12 self.out_w = 12 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([11, 11]).astype("int32") self.align_corners = True self.align_mode = 1 class TestBilinearInterpCase6(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [1, 1, 32, 64] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([65, 33]).astype("int32") self.align_corners = True self.align_mode = 1 class TestBilinearInterpSame(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [2, 3, 32, 64] self.out_h = 32 self.out_w = 64 - self.scale = 0. + self.scale = 0.0 self.align_corners = True self.align_mode = 1 class TestBilinearInterpActualShape(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [3, 2, 32, 16] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([66, 40]).astype("int32") self.align_corners = True self.align_mode = 1 class TestBilinearInterpDataLayout(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [2, 5, 5, 3] self.out_h = 2 self.out_w = 2 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([3, 3]).astype("int32") self.align_corners = True self.align_mode = 1 @@ -278,15 +279,15 @@ class TestBilinearInterpDataLayout(TestBilinearInterpOp): class TestBilinearInterpOpUint8(OpTest): - def setUp(self): self.out_size = None self.actual_shape = None self.init_test_case() self.op_type = "bilinear_interp" self.check_eager = True - input_np = np.random.randint(low=0, high=256, - size=self.input_shape).astype("uint8") + input_np = np.random.randint( + low=0, high=256, size=self.input_shape + ).astype("uint8") if self.scale > 0: out_h = int(self.input_shape[2] * self.scale) @@ -295,9 +296,15 @@ class TestBilinearInterpOpUint8(OpTest): out_h = self.out_h out_w = self.out_w - output_np = bilinear_interp_np(input_np, out_h, out_w, self.out_size, - self.actual_shape, self.align_corners, - self.align_mode) + output_np = bilinear_interp_np( + input_np, + out_h, + out_w, + self.out_size, + self.actual_shape, + self.align_corners, + self.align_mode, + ) self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size @@ -309,97 +316,89 @@ class TestBilinearInterpOpUint8(OpTest): 'scale': self.scale, 'interp_method': self.interp_method, 'align_corners': self.align_corners, - 'align_mode': self.align_mode + 'align_mode': self.align_mode, } self.outputs = {'Out': output_np} def test_check_output(self): - self.check_output_with_place(place=core.CPUPlace(), - atol=1, - check_eager=self.check_eager) + self.check_output_with_place( + place=core.CPUPlace(), atol=1, check_eager=self.check_eager + ) def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [1, 3, 9, 6] self.out_h = 10 self.out_w = 9 - self.scale = 0. + self.scale = 0.0 self.align_corners = True self.align_mode = 1 class TestBilinearInterpCase1Uint8(TestBilinearInterpOpUint8): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [2, 3, 32, 64] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.align_corners = True self.align_mode = 1 class TestBilinearInterpCase2Uint8(TestBilinearInterpOpUint8): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [4, 1, 7, 8] self.out_h = 5 self.out_w = 13 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([6, 15]).astype("int32") self.align_corners = True self.align_mode = 1 class TestBilinearInterpOtherMethod1(TestBilinearInterpOp): - def set_align_mode(self): self.align_corners = False self.align_mode = 1 class TestBilinearInterpWithMethod2(TestBilinearInterpOp): - def set_align_mode(self): self.align_corners = False self.align_mode = 0 class TestBilinearInterpWithMethod3(TestBilinearInterpOp): - def set_align_mode(self): self.align_corners = True self.align_mode = 0 class TestBilinearInterpScale1(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [2, 3, 5, 7] self.out_h = 60 self.out_w = 25 - self.scale = 2. + self.scale = 2.0 self.align_corners = True self.align_mode = 1 class TestBilinearInterpScale2(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [2, 3, 5, 7] self.out_h = 60 self.out_w = 25 - self.scale = 1. + self.scale = 1.0 self.align_corners = True self.align_mode = 1 class TestBilinearInterpScale3(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [2, 3, 5, 7] @@ -411,7 +410,6 @@ class TestBilinearInterpScale3(TestBilinearInterpOp): class TestBilinearInterpZero(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [2, 3, 5, 7] @@ -423,7 +421,6 @@ class TestBilinearInterpZero(TestBilinearInterpOp): class TestBilinearInterpOp_attr_tensor(OpTest): - def setUp(self): self.out_size = None self.actual_shape = None @@ -456,58 +453,62 @@ class TestBilinearInterpOp_attr_tensor(OpTest): elif self.out_size is not None: size_tensor = [] for index, ele in enumerate(self.out_size): - size_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + size_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs['SizeTensor'] = size_tensor self.check_eager = False self.attrs['out_h'] = self.out_h self.attrs['out_w'] = self.out_w - output_np = bilinear_interp_np(input_np, out_h, out_w, self.out_size, - self.actual_shape, self.align_corners) + output_np = bilinear_interp_np( + input_np, + out_h, + out_w, + self.out_size, + self.actual_shape, + self.align_corners, + ) self.outputs = {'Out': output_np} def test_check_output(self): self.check_output(check_eager=self.check_eager) def test_check_grad(self): - self.check_grad(['X'], - 'Out', - in_place=True, - check_eager=self.check_eager) + self.check_grad( + ['X'], 'Out', in_place=True, check_eager=self.check_eager + ) def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [2, 3, 5, 5] self.out_h = 3 self.out_w = 3 - self.scale = 0. + self.scale = 0.0 self.out_size = [3, 3] self.align_corners = True # out_size is a 1-D tensor class TestBilinearInterp_attr_tensor_Case1(TestBilinearInterpOp_attr_tensor): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [3, 3, 9, 6] self.out_h = 12 self.out_w = 12 - self.scale = 0. + self.scale = 0.0 self.out_size = [8, 12] self.align_corners = True # scale is a 1-D tensor class TestBilinearInterp_attr_tensor_Case2(TestBilinearInterpOp_attr_tensor): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [3, 2, 32, 16] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([66, 40]).astype("int32") self.align_corners = True self.shape_by_1Dtensor = True @@ -515,7 +516,6 @@ class TestBilinearInterp_attr_tensor_Case2(TestBilinearInterpOp_attr_tensor): # scale is a 1-D tensor class TestBilinearInterp_attr_tensor_Case3(TestBilinearInterpOp_attr_tensor): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [3, 2, 32, 16] @@ -528,23 +528,22 @@ class TestBilinearInterp_attr_tensor_Case3(TestBilinearInterpOp_attr_tensor): class TestBilinearInterpOpAPI(unittest.TestCase): - def test_case(self): x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") dim = fluid.data(name="dim", shape=[1], dtype="int32") shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32") actual_size = fluid.data(name="actual_size", shape=[2], dtype="int32") - scale_tensor = fluid.data(name="scale_tensor", - shape=[1], - dtype="float32") + scale_tensor = fluid.data( + name="scale_tensor", shape=[1], dtype="float32" + ) out1 = fluid.layers.resize_bilinear(x, out_shape=[12, 12]) out2 = fluid.layers.resize_bilinear(x, out_shape=[12, dim]) out3 = fluid.layers.resize_bilinear(x, out_shape=shape_tensor) - out4 = fluid.layers.resize_bilinear(x, - out_shape=[4, 4], - actual_shape=actual_size) + out4 = fluid.layers.resize_bilinear( + x, out_shape=[4, 4], actual_shape=actual_size + ) out5 = fluid.layers.resize_bilinear(x, scale=scale_tensor) x_data = np.random.random((2, 3, 6, 6)).astype("float32") @@ -559,21 +558,22 @@ class TestBilinearInterpOpAPI(unittest.TestCase): place = core.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - results = exe.run(fluid.default_main_program(), - feed={ - "x": x_data, - "dim": dim_data, - "shape_tensor": shape_data, - "actual_size": actual_size_data, - "scale_tensor": scale_data - }, - fetch_list=[out1, out2, out3, out4, out5], - return_numpy=True) - - expect_res = bilinear_interp_np(x_data, - out_h=12, - out_w=12, - align_corners=True) + results = exe.run( + fluid.default_main_program(), + feed={ + "x": x_data, + "dim": dim_data, + "shape_tensor": shape_data, + "actual_size": actual_size_data, + "scale_tensor": scale_data, + }, + fetch_list=[out1, out2, out3, out4, out5], + return_numpy=True, + ) + + expect_res = bilinear_interp_np( + x_data, out_h=12, out_w=12, align_corners=True + ) for res in results: np.testing.assert_allclose(res, expect_res, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_bilinear_interp_v2_op.py b/python/paddle/fluid/tests/unittests/test_bilinear_interp_v2_op.py index e25ffeaee53a966681dd25b76ba6f1badbee1503..1a3786dbdd5d5495d11d39a139cdacdf45246db1 100755 --- a/python/paddle/fluid/tests/unittests/test_bilinear_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_bilinear_interp_v2_op.py @@ -21,18 +21,20 @@ from paddle.nn.functional import interpolate import paddle -def bilinear_interp_test(x, - OutSize=None, - SizeTensor=None, - Scale=None, - data_layout='NCHW', - out_d=-1, - out_h=-1, - out_w=-1, - scale=[], - interp_method='bilinear', - align_corners=True, - align_mode=0): +def bilinear_interp_test( + x, + OutSize=None, + SizeTensor=None, + Scale=None, + data_layout='NCHW', + out_d=-1, + out_h=-1, + out_w=-1, + scale=[], + interp_method='bilinear', + align_corners=True, + align_mode=0, +): if isinstance(scale, float) or isinstance(scale, int): scale_list = [] for _ in range(len(x.shape) - 2): @@ -42,24 +44,37 @@ def bilinear_interp_test(x, scale = list(map(float, scale)) if SizeTensor is not None: if not isinstance(SizeTensor, list) and not isinstance( - SizeTensor, tuple): + SizeTensor, tuple + ): SizeTensor = [SizeTensor] - return paddle._C_ops.bilinear_interp(x, OutSize, SizeTensor, Scale, - data_layout, out_d, out_h, out_w, - scale, interp_method, align_corners, - align_mode) - - -def bilinear_interp_np(input, - out_h, - out_w, - scale_w=0, - scale_h=0, - out_size=None, - actual_shape=None, - align_corners=True, - align_mode=0, - data_layout='NCHW'): + return paddle._C_ops.bilinear_interp( + x, + OutSize, + SizeTensor, + Scale, + data_layout, + out_d, + out_h, + out_w, + scale, + interp_method, + align_corners, + align_mode, + ) + + +def bilinear_interp_np( + input, + out_h, + out_w, + scale_w=0, + scale_h=0, + out_size=None, + actual_shape=None, + align_corners=True, + align_mode=0, + data_layout='NCHW', +): """bilinear interpolation implement in shape [N, C, H, W]""" if data_layout == "NHWC": input = np.transpose(input, (0, 3, 1, 2)) # NHWC => NCHW @@ -73,7 +88,7 @@ def bilinear_interp_np(input, ratio_h = ratio_w = 0.0 if out_h > 1: - if (align_corners): + if align_corners: ratio_h = (in_h - 1.0) / (out_h - 1.0) else: if scale_h > 0: @@ -81,7 +96,7 @@ def bilinear_interp_np(input, else: ratio_h = 1.0 * in_h / out_h if out_w > 1: - if (align_corners): + if align_corners: ratio_w = (in_w - 1.0) / (out_w - 1.0) else: if scale_w > 0: @@ -92,37 +107,40 @@ def bilinear_interp_np(input, out = np.zeros((batch_size, channel, out_h, out_w)) for i in range(out_h): - if (align_mode == 0 and not align_corners): + if align_mode == 0 and not align_corners: h = int(ratio_h * (i + 0.5) - 0.5) else: h = int(ratio_h * i) h = max(0, h) hid = 1 if h < in_h - 1 else 0 - if (align_mode == 0 and not align_corners): + if align_mode == 0 and not align_corners: idx_src_h = max(ratio_h * (i + 0.5) - 0.5, 0) h1lambda = idx_src_h - h else: h1lambda = ratio_h * i - h h2lambda = 1.0 - h1lambda for j in range(out_w): - if (align_mode == 0 and not align_corners): + if align_mode == 0 and not align_corners: w = int(ratio_w * (j + 0.5) - 0.5) else: w = int(ratio_w * j) w = max(0, w) wid = 1 if w < in_w - 1 else 0 - if (align_mode == 0 and not align_corners): + if align_mode == 0 and not align_corners: idx_src_w = max(ratio_w * (j + 0.5) - 0.5, 0) w1lambda = idx_src_w - w else: w1lambda = ratio_w * j - w w2lambda = 1.0 - w1lambda - out[:, :, i, j] = h2lambda*(w2lambda*input[:, :, h, w] + - w1lambda*input[:, :, h, w+wid]) + \ - h1lambda*(w2lambda*input[:, :, h+hid, w] + - w1lambda*input[:, :, h+hid, w+wid]) + out[:, :, i, j] = h2lambda * ( + w2lambda * input[:, :, h, w] + + w1lambda * input[:, :, h, w + wid] + ) + h1lambda * ( + w2lambda * input[:, :, h + hid, w] + + w1lambda * input[:, :, h + hid, w + wid] + ) if data_layout == "NHWC": out = np.transpose(out, (0, 2, 3, 1)) # NCHW => NHWC @@ -131,7 +149,6 @@ def bilinear_interp_np(input, class TestBilinearInterpOp(OpTest): - def setUp(self): self.python_api = bilinear_interp_test self.out_size = None @@ -151,7 +168,7 @@ class TestBilinearInterpOp(OpTest): scale_w = 0 if self.scale: if isinstance(self.scale, float) or isinstance(self.scale, int): - if self.scale > 0.: + if self.scale > 0.0: scale_h = scale_w = float(self.scale) if isinstance(self.scale, list) and len(self.scale) == 1: scale_w = scale_h = self.scale[0] @@ -164,10 +181,18 @@ class TestBilinearInterpOp(OpTest): out_h = self.out_h out_w = self.out_w - output_np = bilinear_interp_np(input_np, out_h, out_w, 0, 0, - self.out_size, self.actual_shape, - self.align_corners, self.align_mode, - self.data_layout) + output_np = bilinear_interp_np( + input_np, + out_h, + out_w, + 0, + 0, + self.out_size, + self.actual_shape, + self.align_corners, + self.align_mode, + self.data_layout, + ) self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size @@ -180,11 +205,11 @@ class TestBilinearInterpOp(OpTest): 'interp_method': self.interp_method, 'align_corners': self.align_corners, 'align_mode': self.align_mode, - 'data_layout': self.data_layout + 'data_layout': self.data_layout, } if self.scale: if isinstance(self.scale, float) or isinstance(self.scale, int): - if self.scale > 0.: + if self.scale > 0.0: self.scale = [self.scale] if isinstance(self.scale, list) and len(self.scale) == 1: self.scale = [self.scale[0], self.scale[0]] @@ -209,7 +234,6 @@ class TestBilinearInterpOp(OpTest): class TestBilinearInterpCase1(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [4, 1, 7, 8] @@ -221,7 +245,6 @@ class TestBilinearInterpCase1(TestBilinearInterpOp): class TestBilinearInterpCase2(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [3, 3, 9, 6] @@ -233,7 +256,6 @@ class TestBilinearInterpCase2(TestBilinearInterpOp): class TestBilinearInterpCase3(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [1, 1, 32, 64] @@ -245,7 +267,6 @@ class TestBilinearInterpCase3(TestBilinearInterpOp): class TestBilinearInterpCase4(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [4, 1, 7, 8] @@ -258,7 +279,6 @@ class TestBilinearInterpCase4(TestBilinearInterpOp): class TestBilinearInterpCase5(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [3, 3, 9, 6] @@ -271,7 +291,6 @@ class TestBilinearInterpCase5(TestBilinearInterpOp): class TestBilinearInterpCase6(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [1, 1, 32, 64] @@ -284,7 +303,6 @@ class TestBilinearInterpCase6(TestBilinearInterpOp): class TestBilinearInterpCase7(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [1, 1, 32, 64] @@ -296,7 +314,6 @@ class TestBilinearInterpCase7(TestBilinearInterpOp): class TestBilinearInterpSame(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [2, 3, 32, 64] @@ -308,7 +325,6 @@ class TestBilinearInterpSame(TestBilinearInterpOp): class TestBilinearInterpActualShape(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [3, 2, 32, 16] @@ -321,7 +337,6 @@ class TestBilinearInterpActualShape(TestBilinearInterpOp): class TestBilinearInterpDataLayout(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [2, 5, 5, 3] @@ -335,15 +350,15 @@ class TestBilinearInterpDataLayout(TestBilinearInterpOp): class TestBilinearInterpOpUint8(OpTest): - def setUp(self): self.python_api = bilinear_interp_test self.out_size = None self.actual_shape = None self.init_test_case() self.op_type = "bilinear_interp_v2" - input_np = np.random.randint(low=0, high=256, - size=self.input_shape).astype("uint8") + input_np = np.random.randint( + low=0, high=256, size=self.input_shape + ).astype("uint8") if self.scale: if isinstance(self.scale, float) or isinstance(self.scale, int): @@ -360,9 +375,17 @@ class TestBilinearInterpOpUint8(OpTest): out_h = self.out_h out_w = self.out_w - output_np = bilinear_interp_np(input_np, out_h, out_w, 0, 0, - self.out_size, self.actual_shape, - self.align_corners, self.align_mode) + output_np = bilinear_interp_np( + input_np, + out_h, + out_w, + 0, + 0, + self.out_size, + self.actual_shape, + self.align_corners, + self.align_mode, + ) self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size @@ -372,7 +395,7 @@ class TestBilinearInterpOpUint8(OpTest): 'out_w': self.out_w, 'interp_method': self.interp_method, 'align_corners': self.align_corners, - 'align_mode': self.align_mode + 'align_mode': self.align_mode, } if self.scale: if isinstance(self.scale, float) or isinstance(self.scale, int): @@ -384,9 +407,9 @@ class TestBilinearInterpOpUint8(OpTest): self.outputs = {'Out': output_np} def test_check_output(self): - self.check_output_with_place(place=core.CPUPlace(), - atol=1, - check_eager=True) + self.check_output_with_place( + place=core.CPUPlace(), atol=1, check_eager=True + ) def init_test_case(self): self.interp_method = 'bilinear' @@ -399,7 +422,6 @@ class TestBilinearInterpOpUint8(OpTest): class TestBilinearInterpCase1Uint8(TestBilinearInterpOpUint8): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [2, 3, 32, 64] @@ -411,7 +433,6 @@ class TestBilinearInterpCase1Uint8(TestBilinearInterpOpUint8): class TestBilinearInterpCase2Uint8(TestBilinearInterpOpUint8): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [4, 1, 7, 8] @@ -424,52 +445,46 @@ class TestBilinearInterpCase2Uint8(TestBilinearInterpOpUint8): class TestBilinearInterpOtherMethod1(TestBilinearInterpOp): - def set_align_mode(self): self.align_corners = False self.align_mode = 1 class TestBilinearInterpWithMethod2(TestBilinearInterpOp): - def set_align_mode(self): self.align_corners = False self.align_mode = 0 class TestBilinearInterpWithMethod3(TestBilinearInterpOp): - def set_align_mode(self): self.align_corners = True self.align_mode = 0 class TestBilinearInterpScale1(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [2, 3, 5, 7] self.out_h = 60 self.out_w = 25 - self.scale = 2. + self.scale = 2.0 self.align_corners = True self.align_mode = 1 class TestBilinearInterpScale2(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [2, 3, 5, 7] self.out_h = 60 self.out_w = 25 - self.scale = 1. + self.scale = 1.0 self.align_corners = True self.align_mode = 1 class TestBilinearInterpScale3(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [2, 3, 5, 7] @@ -481,7 +496,6 @@ class TestBilinearInterpScale3(TestBilinearInterpOp): class TestBilinearInterpScale4(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [2, 3, 5, 7] @@ -493,7 +507,6 @@ class TestBilinearInterpScale4(TestBilinearInterpOp): class TestBilinearInterpZero(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [2, 3, 5, 7] @@ -505,7 +518,6 @@ class TestBilinearInterpZero(TestBilinearInterpOp): class TestBilinearInterpOp_attr_tensor(OpTest): - def setUp(self): self.python_api = bilinear_interp_test self.out_size = None @@ -544,8 +556,9 @@ class TestBilinearInterpOp_attr_tensor(OpTest): elif self.out_size is not None: size_tensor = [] for index, ele in enumerate(self.out_size): - size_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + size_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs['SizeTensor'] = size_tensor self.attrs['out_h'] = self.out_h @@ -557,9 +570,16 @@ class TestBilinearInterpOp_attr_tensor(OpTest): if isinstance(self.scale, list) and len(self.scale) == 1: self.scale = [self.scale[0], self.scale[0]] self.attrs['scale'] = self.scale - output_np = bilinear_interp_np(input_np, out_h, out_w, 0, 0, - self.out_size, self.actual_shape, - self.align_corners) + output_np = bilinear_interp_np( + input_np, + out_h, + out_w, + 0, + 0, + self.out_size, + self.actual_shape, + self.align_corners, + ) self.outputs = {'Out': output_np} def test_check_output(self): @@ -580,7 +600,6 @@ class TestBilinearInterpOp_attr_tensor(OpTest): # out_size is a 1-D tensor class TestBilinearInterp_attr_tensor_Case1(TestBilinearInterpOp_attr_tensor): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [3, 3, 9, 6] @@ -593,7 +612,6 @@ class TestBilinearInterp_attr_tensor_Case1(TestBilinearInterpOp_attr_tensor): # scale is a 1-D tensor class TestBilinearInterp_attr_tensor_Case2(TestBilinearInterpOp_attr_tensor): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [3, 2, 32, 16] @@ -607,7 +625,6 @@ class TestBilinearInterp_attr_tensor_Case2(TestBilinearInterpOp_attr_tensor): # scale is a 1-D tensor class TestBilinearInterp_attr_tensor_Case3(TestBilinearInterpOp_attr_tensor): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [3, 2, 32, 16] @@ -620,23 +637,22 @@ class TestBilinearInterp_attr_tensor_Case3(TestBilinearInterpOp_attr_tensor): class TestBilinearInterpOpAPI(unittest.TestCase): - def test_case(self): x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") dim = fluid.data(name="dim", shape=[1], dtype="int32") shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32") actual_size = fluid.data(name="actual_size", shape=[2], dtype="int32") - scale_tensor = fluid.data(name="scale_tensor", - shape=[1], - dtype="float32") + scale_tensor = fluid.data( + name="scale_tensor", shape=[1], dtype="float32" + ) out1 = fluid.layers.resize_bilinear(x, out_shape=[12, 12]) out2 = fluid.layers.resize_bilinear(x, out_shape=[12, dim]) out3 = fluid.layers.resize_bilinear(x, out_shape=shape_tensor) - out4 = fluid.layers.resize_bilinear(x, - out_shape=[4, 4], - actual_shape=actual_size) + out4 = fluid.layers.resize_bilinear( + x, out_shape=[4, 4], actual_shape=actual_size + ) out5 = fluid.layers.resize_bilinear(x, scale=scale_tensor) x_data = np.random.random((2, 3, 6, 6)).astype("float32") @@ -651,29 +667,30 @@ class TestBilinearInterpOpAPI(unittest.TestCase): place = core.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - results = exe.run(fluid.default_main_program(), - feed={ - "x": x_data, - "dim": dim_data, - "shape_tensor": shape_data, - "actual_size": actual_size_data, - "scale_tensor": scale_data - }, - fetch_list=[out1, out2, out3, out4, out5], - return_numpy=True) - - expect_res = bilinear_interp_np(x_data, - out_h=12, - out_w=12, - align_corners=True) + results = exe.run( + fluid.default_main_program(), + feed={ + "x": x_data, + "dim": dim_data, + "shape_tensor": shape_data, + "actual_size": actual_size_data, + "scale_tensor": scale_data, + }, + fetch_list=[out1, out2, out3, out4, out5], + return_numpy=True, + ) + + expect_res = bilinear_interp_np( + x_data, out_h=12, out_w=12, align_corners=True + ) for res in results: np.testing.assert_allclose(res, expect_res, rtol=1e-05) class TestBilinearInterpOpAPI_dy(unittest.TestCase): - def test_case(self): import paddle + if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) else: @@ -681,21 +698,19 @@ class TestBilinearInterpOpAPI_dy(unittest.TestCase): with fluid.dygraph.guard(place): input_data = np.random.random((2, 3, 6, 6)).astype("float32") input_x = paddle.to_tensor(input_data) - expect_res = bilinear_interp_np(input_data, - out_h=12, - out_w=12, - align_corners=False) - out = interpolate(x=input_x, - size=[12, 12], - mode="bilinear", - align_corners=False) + expect_res = bilinear_interp_np( + input_data, out_h=12, out_w=12, align_corners=False + ) + out = interpolate( + x=input_x, size=[12, 12], mode="bilinear", align_corners=False + ) np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-05) class TestBilinearInterpOpAPI_dy2(unittest.TestCase): - def test_case(self): import paddle + if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) else: @@ -705,21 +720,19 @@ class TestBilinearInterpOpAPI_dy2(unittest.TestCase): size_np = np.array([12, 12]).astype("int64") input_x = paddle.to_tensor(input_data) size = paddle.to_tensor(size_np) - expect_res = bilinear_interp_np(input_data, - out_h=12, - out_w=12, - align_corners=False) - out = interpolate(x=input_x, - size=size, - mode="bilinear", - align_corners=False) + expect_res = bilinear_interp_np( + input_data, out_h=12, out_w=12, align_corners=False + ) + out = interpolate( + x=input_x, size=size, mode="bilinear", align_corners=False + ) np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-05) class TestBilinearInterpOpAPI_dy3(unittest.TestCase): - def test_case(self): import paddle + if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) else: @@ -729,21 +742,22 @@ class TestBilinearInterpOpAPI_dy3(unittest.TestCase): size_1 = np.array([12]).astype("int64") input_x = paddle.to_tensor(input_data) size = paddle.to_tensor(size_1) - expect_res = bilinear_interp_np(input_data, - out_h=12, - out_w=12, - align_corners=False) - out = interpolate(x=input_x, - size=[size, size], - mode="bilinear", - align_corners=False) + expect_res = bilinear_interp_np( + input_data, out_h=12, out_w=12, align_corners=False + ) + out = interpolate( + x=input_x, + size=[size, size], + mode="bilinear", + align_corners=False, + ) np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-05) class TestBilinearInterpOpAPI_dy4(unittest.TestCase): - def test_case(self): import paddle + if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) else: @@ -753,21 +767,22 @@ class TestBilinearInterpOpAPI_dy4(unittest.TestCase): scale_np = np.array([2, 2]).astype("int64") input_x = paddle.to_tensor(input_data) scale = paddle.to_tensor(scale_np) - expect_res = bilinear_interp_np(input_data, - out_h=12, - out_w=12, - align_corners=False) - out = interpolate(x=input_x, - scale_factor=scale, - mode="bilinear", - align_corners=False) + expect_res = bilinear_interp_np( + input_data, out_h=12, out_w=12, align_corners=False + ) + out = interpolate( + x=input_x, + scale_factor=scale, + mode="bilinear", + align_corners=False, + ) np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-05) -@unittest.skipIf(not fluid.core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestBilinearInterpOpForFloat16(unittest.TestCase): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [2, 3, 5, 5] @@ -781,12 +796,14 @@ class TestBilinearInterpOpForFloat16(unittest.TestCase): x_np = x_np.astype(dtype) x = paddle.to_tensor(x_np) x.stop_gradient = False - y = interpolate(x, - size=self.out_size.tolist(), - mode=self.interp_method, - align_mode=self.align_mode, - align_corners=self.align_corners, - data_format=self.data_layout) + y = interpolate( + x, + size=self.out_size.tolist(), + mode=self.interp_method, + align_mode=self.align_mode, + align_corners=self.align_corners, + data_format=self.data_layout, + ) x_g = paddle.grad(y, x) y_np = y[0].numpy().astype('float32') x_g_np = x_g[0].numpy().astype('float32') diff --git a/python/paddle/fluid/tests/unittests/test_bilinear_tensor_product_op.py b/python/paddle/fluid/tests/unittests/test_bilinear_tensor_product_op.py index 6ffbbfdb99911a628a81a6f0b73dc3e4b3e51a72..79ef2ffabbc1c43523013ada3f44d302ea04f194 100644 --- a/python/paddle/fluid/tests/unittests/test_bilinear_tensor_product_op.py +++ b/python/paddle/fluid/tests/unittests/test_bilinear_tensor_product_op.py @@ -20,15 +20,15 @@ import paddle class TestDygraphBilinearTensorProductAPIError(unittest.TestCase): - def test_errors(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - layer = fluid.dygraph.nn.BilinearTensorProduct(input1_dim=5, - input2_dim=4, - output_dim=1000) + layer = fluid.dygraph.nn.BilinearTensorProduct( + input1_dim=5, input2_dim=4, output_dim=1000 + ) # the input must be Variable. - x0 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.CPUPlace()) + x0 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + ) self.assertRaises(TypeError, layer, x0) # the input dtype must be float32 or float64 x1 = fluid.data(name='x1', shape=[-1, 5], dtype="float16") @@ -37,7 +37,6 @@ class TestDygraphBilinearTensorProductAPIError(unittest.TestCase): class TestBilinearTensorProductOp(OpTest): - def setUp(self): self.op_type = "bilinear_tensor_product" self.python_api = paddle.nn.functional.bilinear diff --git a/python/paddle/fluid/tests/unittests/test_bincount_op.py b/python/paddle/fluid/tests/unittests/test_bincount_op.py index e191b09fe4291b6cf674ef11a3d5e05529794ca6..c5e39f1b13fa34b05868582c174395f8b46e1ae9 100644 --- a/python/paddle/fluid/tests/unittests/test_bincount_op.py +++ b/python/paddle/fluid/tests/unittests/test_bincount_op.py @@ -42,16 +42,17 @@ class TestBincountOpAPI(unittest.TestCase): exe.run(startup_program) img = np.array([0, 1, 1, 3, 2, 1, 7]).astype(np.int64) w = np.array([0, 1, 1, 2, 2, 1, 0]).astype(np.int64) - res = exe.run(train_program, - feed={ - 'input': img, - 'weights': w - }, - fetch_list=[output]) + res = exe.run( + train_program, + feed={'input': img, 'weights': w}, + fetch_list=[output], + ) actual = np.array(res[0]) expected = np.bincount(img, weights=w) - self.assertTrue((actual == expected).all(), - msg='bincount output is wrong, out =' + str(actual)) + self.assertTrue( + (actual == expected).all(), + msg='bincount output is wrong, out =' + str(actual), + ) def test_dygraph(self): with fluid.dygraph.guard(): @@ -61,7 +62,8 @@ class TestBincountOpAPI(unittest.TestCase): expected = np.bincount(inputs) self.assertTrue( (actual.numpy() == expected).all(), - msg='bincount output is wrong, out =' + str(actual.numpy())) + msg='bincount output is wrong, out =' + str(actual.numpy()), + ) class TestBincountOpError(unittest.TestCase): @@ -112,7 +114,7 @@ class TestBincountOpError(unittest.TestCase): """Test input tensor should only contain non-negative ints.""" def net_func(): - input_value = paddle.to_tensor([1., 2., 3., 4., 5.]) + input_value = paddle.to_tensor([1.0, 2.0, 3.0, 4.0, 5.0]) paddle.bincount(input_value) with self.assertRaises(TypeError): @@ -161,12 +163,13 @@ class TestCase1(TestBincountOp): def init_test_case(self): self.minlength = 0 - self.np_weights = np.random.randint(low=0, high=20, - size=10).astype(np.float32) + self.np_weights = np.random.randint(low=0, high=20, size=10).astype( + np.float32 + ) self.np_input = np.random.randint(low=0, high=20, size=10) - self.Out = np.bincount(self.np_input, - weights=self.np_weights, - minlength=self.minlength).astype(np.float32) + self.Out = np.bincount( + self.np_input, weights=self.np_weights, minlength=self.minlength + ).astype(np.float32) class TestCase2(TestBincountOp): @@ -183,9 +186,9 @@ class TestCase2(TestBincountOp): self.minlength = 0 self.np_weights = np.random.randint(low=0, high=20, size=10) self.np_input = np.random.randint(low=0, high=20, size=10) - self.Out = np.bincount(self.np_input, - weights=self.np_weights, - minlength=self.minlength) + self.Out = np.bincount( + self.np_input, weights=self.np_weights, minlength=self.minlength + ) class TestCase3(TestBincountOp): @@ -200,8 +203,9 @@ class TestCase4(TestBincountOp): # with input(INT32) def init_test_case(self): self.minlength = 0 - self.np_input = np.random.randint(low=0, high=20, - size=10).astype(np.int32) + self.np_input = np.random.randint(low=0, high=20, size=10).astype( + np.int32 + ) self.Out = np.bincount(self.np_input, minlength=self.minlength) @@ -214,23 +218,27 @@ class TestCase5(TestBincountOp): class TestTensorMinlength(unittest.TestCase): - def setUp(self): paddle.disable_static() paddle.seed(2022) self.temp_dir = tempfile.TemporaryDirectory() - self.save_path = os.path.join(self.temp_dir.name, - 'tensor_minlength_bincount') - self.place = paddle.CUDAPlace( - 0) if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + self.save_path = os.path.join( + self.temp_dir.name, 'tensor_minlength_bincount' + ) + self.place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() + else paddle.CPUPlace() + ) def test_dygraph(self): paddle.disable_static() x = np.random.randint(0, 10, [20]) minlength = 2 np_out = np.bincount(x, minlength=minlength) - pd_out = paddle.bincount(paddle.to_tensor(x), - minlength=paddle.to_tensor([2], dtype='int32')) + pd_out = paddle.bincount( + paddle.to_tensor(x), minlength=paddle.to_tensor([2], dtype='int32') + ) np.testing.assert_allclose(np_out, pd_out.numpy()) def test_static_and_infer(self): @@ -245,8 +253,9 @@ class TestTensorMinlength(unittest.TestCase): linear_out = linear(x) relu_out = paddle.nn.functional.relu(linear_out) minlength = paddle.full([1], 3, dtype='int32') - out = paddle.bincount(paddle.cast(relu_out, 'int32'), - minlength=minlength) + out = paddle.bincount( + paddle.cast(relu_out, 'int32'), minlength=minlength + ) exe = paddle.static.Executor(self.place) exe.run(starup_prog) @@ -254,8 +263,9 @@ class TestTensorMinlength(unittest.TestCase): # run infer paddle.static.save_inference_model(self.save_path, [x], [out], exe) - config = paddle_infer.Config(self.save_path + '.pdmodel', - self.save_path + '.pdiparams') + config = paddle_infer.Config( + self.save_path + '.pdmodel', self.save_path + '.pdiparams' + ) if paddle.is_compiled_with_cuda(): config.enable_use_gpu(100, 0) else: diff --git a/python/paddle/fluid/tests/unittests/test_bipartite_match_op.py b/python/paddle/fluid/tests/unittests/test_bipartite_match_op.py index 39bed077ebae216521950f5d825a34712586fd36..0412c04d8ca0d33df554b6bd68e56193a00fa6e6 100644 --- a/python/paddle/fluid/tests/unittests/test_bipartite_match_op.py +++ b/python/paddle/fluid/tests/unittests/test_bipartite_match_op.py @@ -1,16 +1,16 @@ # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # -#Licensed under the Apache License, Version 2.0 (the "License"); -#you may not use this file except in compliance with the License. -#You may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -#Unless required by applicable law or agreed to in writing, software -#distributed under the License is distributed on an "AS IS" BASIS, -#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#See the License for the specific language governing permissions and -#limitations under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import unittest import numpy as np @@ -34,7 +34,7 @@ def bipartite_match(distance, match_indices, match_dist): match_sorted = sorted(match_pair, key=lambda tup: tup[2], reverse=True) - row_indices = -1 * np.ones((row, ), dtype=np.int_) + row_indices = -1 * np.ones((row,), dtype=np.int_) idx = 0 for i, j, dist in match_sorted: @@ -72,18 +72,25 @@ def batch_bipartite_match(distance, lod, match_type=None, dist_threshold=None): match_dist = np.zeros((n, m), dtype=np.float32) cur_offset = 0 for i in range(n): - if lod[i] == 0: continue - bipartite_match(distance[cur_offset:(cur_offset + lod[i]), :], - match_indices[i, :], match_dist[i, :]) + if lod[i] == 0: + continue + bipartite_match( + distance[cur_offset : (cur_offset + lod[i]), :], + match_indices[i, :], + match_dist[i, :], + ) if match_type == 'per_prediction': - argmax_match(distance[cur_offset:(cur_offset + lod[i]), :], - match_indices[i, :], match_dist[i, :], dist_threshold) + argmax_match( + distance[cur_offset : (cur_offset + lod[i]), :], + match_indices[i, :], + match_dist[i, :], + dist_threshold, + ) cur_offset += lod[i] return match_indices, match_dist class TestBipartiteMatchOpWithLoD(OpTest): - def setUp(self): self.op_type = 'bipartite_match' lod = [[5, 6, 12]] @@ -101,7 +108,6 @@ class TestBipartiteMatchOpWithLoD(OpTest): class TestBipartiteMatchOpWithoutLoD(OpTest): - def setUp(self): self.op_type = 'bipartite_match' lod = [[8]] @@ -119,7 +125,6 @@ class TestBipartiteMatchOpWithoutLoD(OpTest): class TestBipartiteMatchOpWithoutLoDLargeScaleInput(OpTest): - def setUp(self): self.op_type = 'bipartite_match' lod = [[300]] @@ -137,13 +142,13 @@ class TestBipartiteMatchOpWithoutLoDLargeScaleInput(OpTest): class TestBipartiteMatchOpWithPerPredictionType(OpTest): - def setUp(self): self.op_type = 'bipartite_match' lod = [[5, 6, 12]] dist = np.random.random((23, 237)).astype('float32') match_indices, match_dist = batch_bipartite_match( - dist, lod[0], 'per_prediction', 0.5) + dist, lod[0], 'per_prediction', 0.5 + ) self.inputs = {'DistMat': (dist, lod)} self.outputs = { @@ -160,7 +165,6 @@ class TestBipartiteMatchOpWithPerPredictionType(OpTest): class TestBipartiteMatchOpWithEmptyLoD(OpTest): - def setUp(self): self.op_type = 'bipartite_match' lod = [[5, 6, 0, 12]] diff --git a/python/paddle/fluid/tests/unittests/test_bitwise_op.py b/python/paddle/fluid/tests/unittests/test_bitwise_op.py index c387555ccda64fd8ec892e0279779b2513570955..6a7b039380b9ef3fc83fdb6c12275bdc1f20acde 100644 --- a/python/paddle/fluid/tests/unittests/test_bitwise_op.py +++ b/python/paddle/fluid/tests/unittests/test_bitwise_op.py @@ -22,21 +22,18 @@ paddle.enable_static() ################## TEST OP: BitwiseAnd ################## class TestBitwiseAnd(OpTest): - def setUp(self): self.op_type = "bitwise_and" self.init_dtype() self.init_shape() self.init_bound() - x = np.random.randint(self.low, - self.high, - self.x_shape, - dtype=self.dtype) - y = np.random.randint(self.low, - self.high, - self.y_shape, - dtype=self.dtype) + x = np.random.randint( + self.low, self.high, self.x_shape, dtype=self.dtype + ) + y = np.random.randint( + self.low, self.high, self.y_shape, dtype=self.dtype + ) out = np.bitwise_and(x, y) self.inputs = {'X': x, 'Y': y} @@ -61,7 +58,6 @@ class TestBitwiseAnd(OpTest): class TestBitwiseAndUInt8(TestBitwiseAnd): - def init_dtype(self): self.dtype = np.uint8 @@ -71,7 +67,6 @@ class TestBitwiseAndUInt8(TestBitwiseAnd): class TestBitwiseAndInt8(TestBitwiseAnd): - def init_dtype(self): self.dtype = np.int8 @@ -81,7 +76,6 @@ class TestBitwiseAndInt8(TestBitwiseAnd): class TestBitwiseAndInt16(TestBitwiseAnd): - def init_dtype(self): self.dtype = np.int16 @@ -91,7 +85,6 @@ class TestBitwiseAndInt16(TestBitwiseAnd): class TestBitwiseAndInt64(TestBitwiseAnd): - def init_dtype(self): self.dtype = np.int64 @@ -101,7 +94,6 @@ class TestBitwiseAndInt64(TestBitwiseAnd): class TestBitwiseAndBool(TestBitwiseAnd): - def setUp(self): self.op_type = "bitwise_and" self.init_shape() @@ -116,21 +108,18 @@ class TestBitwiseAndBool(TestBitwiseAnd): ################## TEST OP: BitwiseOr ################## class TestBitwiseOr(OpTest): - def setUp(self): self.op_type = "bitwise_or" self.init_dtype() self.init_shape() self.init_bound() - x = np.random.randint(self.low, - self.high, - self.x_shape, - dtype=self.dtype) - y = np.random.randint(self.low, - self.high, - self.y_shape, - dtype=self.dtype) + x = np.random.randint( + self.low, self.high, self.x_shape, dtype=self.dtype + ) + y = np.random.randint( + self.low, self.high, self.y_shape, dtype=self.dtype + ) out = np.bitwise_or(x, y) self.inputs = {'X': x, 'Y': y} @@ -155,7 +144,6 @@ class TestBitwiseOr(OpTest): class TestBitwiseOrUInt8(TestBitwiseOr): - def init_dtype(self): self.dtype = np.uint8 @@ -165,7 +153,6 @@ class TestBitwiseOrUInt8(TestBitwiseOr): class TestBitwiseOrInt8(TestBitwiseOr): - def init_dtype(self): self.dtype = np.int8 @@ -175,7 +162,6 @@ class TestBitwiseOrInt8(TestBitwiseOr): class TestBitwiseOrInt16(TestBitwiseOr): - def init_dtype(self): self.dtype = np.int16 @@ -185,7 +171,6 @@ class TestBitwiseOrInt16(TestBitwiseOr): class TestBitwiseOrInt64(TestBitwiseOr): - def init_dtype(self): self.dtype = np.int64 @@ -195,7 +180,6 @@ class TestBitwiseOrInt64(TestBitwiseOr): class TestBitwiseOrBool(TestBitwiseOr): - def setUp(self): self.op_type = "bitwise_or" self.init_shape() @@ -210,21 +194,18 @@ class TestBitwiseOrBool(TestBitwiseOr): ################## TEST OP: BitwiseXor ################## class TestBitwiseXor(OpTest): - def setUp(self): self.op_type = "bitwise_xor" self.init_dtype() self.init_shape() self.init_bound() - x = np.random.randint(self.low, - self.high, - self.x_shape, - dtype=self.dtype) - y = np.random.randint(self.low, - self.high, - self.y_shape, - dtype=self.dtype) + x = np.random.randint( + self.low, self.high, self.x_shape, dtype=self.dtype + ) + y = np.random.randint( + self.low, self.high, self.y_shape, dtype=self.dtype + ) out = np.bitwise_xor(x, y) self.inputs = {'X': x, 'Y': y} @@ -249,7 +230,6 @@ class TestBitwiseXor(OpTest): class TestBitwiseXorUInt8(TestBitwiseXor): - def init_dtype(self): self.dtype = np.uint8 @@ -259,7 +239,6 @@ class TestBitwiseXorUInt8(TestBitwiseXor): class TestBitwiseXorInt8(TestBitwiseXor): - def init_dtype(self): self.dtype = np.int8 @@ -269,7 +248,6 @@ class TestBitwiseXorInt8(TestBitwiseXor): class TestBitwiseXorInt16(TestBitwiseXor): - def init_dtype(self): self.dtype = np.int16 @@ -279,7 +257,6 @@ class TestBitwiseXorInt16(TestBitwiseXor): class TestBitwiseXorInt64(TestBitwiseXor): - def init_dtype(self): self.dtype = np.int64 @@ -289,7 +266,6 @@ class TestBitwiseXorInt64(TestBitwiseXor): class TestBitwiseXorBool(TestBitwiseXor): - def setUp(self): self.op_type = "bitwise_xor" self.init_shape() @@ -304,17 +280,15 @@ class TestBitwiseXorBool(TestBitwiseXor): ################## TEST OP: BitwiseNot ################## class TestBitwiseNot(OpTest): - def setUp(self): self.op_type = "bitwise_not" self.init_dtype() self.init_shape() self.init_bound() - x = np.random.randint(self.low, - self.high, - self.x_shape, - dtype=self.dtype) + x = np.random.randint( + self.low, self.high, self.x_shape, dtype=self.dtype + ) out = np.bitwise_not(x) self.inputs = {'X': x} @@ -338,7 +312,6 @@ class TestBitwiseNot(OpTest): class TestBitwiseNotUInt8(TestBitwiseNot): - def init_dtype(self): self.dtype = np.uint8 @@ -348,7 +321,6 @@ class TestBitwiseNotUInt8(TestBitwiseNot): class TestBitwiseNotInt8(TestBitwiseNot): - def init_dtype(self): self.dtype = np.int8 @@ -357,7 +329,6 @@ class TestBitwiseNotInt8(TestBitwiseNot): class TestBitwiseNotInt16(TestBitwiseNot): - def init_dtype(self): self.dtype = np.int16 @@ -367,7 +338,6 @@ class TestBitwiseNotInt16(TestBitwiseNot): class TestBitwiseNotInt64(TestBitwiseNot): - def init_dtype(self): self.dtype = np.int64 @@ -376,7 +346,6 @@ class TestBitwiseNotInt64(TestBitwiseNot): class TestBitwiseNotBool(TestBitwiseNot): - def setUp(self): self.op_type = "bitwise_not" self.init_shape() diff --git a/python/paddle/fluid/tests/unittests/test_block_rename_var.py b/python/paddle/fluid/tests/unittests/test_block_rename_var.py index 322cb8bc4471f591377bdaa4006039880609e347..02908052278ebca691d87d9fd033b8df8e8c6136 100644 --- a/python/paddle/fluid/tests/unittests/test_block_rename_var.py +++ b/python/paddle/fluid/tests/unittests/test_block_rename_var.py @@ -18,16 +18,15 @@ import paddle class TestBlockRenameVar(unittest.TestCase): - def setUp(self): self.program = paddle.static.Program() self.block = self.program.current_block() - self.var = self.block.create_var(name="X", - shape=[-1, 23, 48], - dtype='float32') - self.op = self.block.append_op(type="abs", - inputs={"X": [self.var]}, - outputs={"Out": [self.var]}) + self.var = self.block.create_var( + name="X", shape=[-1, 23, 48], dtype='float32' + ) + self.op = self.block.append_op( + type="abs", inputs={"X": [self.var]}, outputs={"Out": [self.var]} + ) self.new_var_name = self.get_new_var_name() def get_new_var_name(self): @@ -35,19 +34,20 @@ class TestBlockRenameVar(unittest.TestCase): def test_rename_var(self): self.block._rename_var(self.var.name, self.new_var_name) - new_var_name_str = self.new_var_name if isinstance( - self.new_var_name, str) else self.new_var_name.decode() + new_var_name_str = ( + self.new_var_name + if isinstance(self.new_var_name, str) + else self.new_var_name.decode() + ) self.assertTrue(new_var_name_str in self.block.vars) class TestBlockRenameVarStrCase2(TestBlockRenameVar): - def get_new_var_name(self): return "ABC" class TestBlockRenameVarBytes(TestBlockRenameVar): - def get_new_var_name(self): return b"Y" diff --git a/python/paddle/fluid/tests/unittests/test_bmm_op.py b/python/paddle/fluid/tests/unittests/test_bmm_op.py index 44a6be83b32eed4363ddddfc855353470521fdf3..5c99a2e62cc59bdf71039a18078abf89872145a1 100644 --- a/python/paddle/fluid/tests/unittests/test_bmm_op.py +++ b/python/paddle/fluid/tests/unittests/test_bmm_op.py @@ -20,7 +20,6 @@ import paddle.fluid as fluid class TestBmmOp(OpTest): - def setUp(self): self.op_type = "bmm" self.python_api = paddle.tensor.bmm @@ -38,36 +37,40 @@ class TestBmmOp(OpTest): class API_TestBmm(unittest.TestCase): - def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = fluid.layers.data('data1', - shape=[-1, 3, 4], - dtype='float64') - data2 = fluid.layers.data('data2', - shape=[-1, 4, 5], - dtype='float64') + data1 = fluid.layers.data( + 'data1', shape=[-1, 3, 4], dtype='float64' + ) + data2 = fluid.layers.data( + 'data2', shape=[-1, 4, 5], dtype='float64' + ) result_bmm = paddle.bmm(data1, data2) place = fluid.CPUPlace() exe = fluid.Executor(place) input1 = np.random.random([10, 3, 4]).astype('float64') input2 = np.random.random([10, 4, 5]).astype('float64') - result, = exe.run(feed={ - "data1": input1, - "data2": input2 - }, - fetch_list=[result_bmm]) + (result,) = exe.run( + feed={"data1": input1, "data2": input2}, fetch_list=[result_bmm] + ) expected_result = np.matmul(input1, input2) np.testing.assert_allclose(expected_result, result, rtol=1e-05) class API_TestDygraphBmm(unittest.TestCase): - def test_out(self): - input1 = np.array([[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]], - [[3.0, 3.0, 3.0], [4.0, 4.0, 4.0]]]) - input2 = np.array([[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]], - [[4.0, 4.0], [5.0, 5.0], [6.0, 6.0]]]) + input1 = np.array( + [ + [[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]], + [[3.0, 3.0, 3.0], [4.0, 4.0, 4.0]], + ] + ) + input2 = np.array( + [ + [[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]], + [[4.0, 4.0], [5.0, 5.0], [6.0, 6.0]], + ] + ) with fluid.dygraph.guard(): x = fluid.dygraph.to_variable(input1) y = fluid.dygraph.to_variable(input2) @@ -78,7 +81,6 @@ class API_TestDygraphBmm(unittest.TestCase): class TestBmmAPIError(unittest.TestCase): - def test_api_error(self): x_data = np.arange(24, dtype='float32').reshape((2, 3, 4)) y_data = np.arange(16, dtype='float32').reshape((2, 4, 2)) diff --git a/python/paddle/fluid/tests/unittests/test_box_clip_op.py b/python/paddle/fluid/tests/unittests/test_box_clip_op.py index e3c9c8e6eb4d699e7dd1b2b7f55cda24b51f4a9b..2ee356976f6c62f4ade79f042f8146e14b06e653 100644 --- a/python/paddle/fluid/tests/unittests/test_box_clip_op.py +++ b/python/paddle/fluid/tests/unittests/test_box_clip_op.py @@ -20,14 +20,18 @@ from op_test import OpTest def box_clip(input_box, im_info, output_box): im_w = round(im_info[1] / im_info[2]) im_h = round(im_info[0] / im_info[2]) - output_box[:, :, 0] = np.maximum(np.minimum(input_box[:, :, 0], im_w - 1), - 0) - output_box[:, :, 1] = np.maximum(np.minimum(input_box[:, :, 1], im_h - 1), - 0) - output_box[:, :, 2] = np.maximum(np.minimum(input_box[:, :, 2], im_w - 1), - 0) - output_box[:, :, 3] = np.maximum(np.minimum(input_box[:, :, 3], im_h - 1), - 0) + output_box[:, :, 0] = np.maximum( + np.minimum(input_box[:, :, 0], im_w - 1), 0 + ) + output_box[:, :, 1] = np.maximum( + np.minimum(input_box[:, :, 1], im_h - 1), 0 + ) + output_box[:, :, 2] = np.maximum( + np.minimum(input_box[:, :, 2], im_w - 1), 0 + ) + output_box[:, :, 3] = np.maximum( + np.minimum(input_box[:, :, 3], im_h - 1), 0 + ) def batch_box_clip(input_boxes, im_info, lod): @@ -36,15 +40,16 @@ def batch_box_clip(input_boxes, im_info, lod): output_boxes = np.zeros((n, m, 4), dtype=np.float32) cur_offset = 0 for i in range(len(lod)): - box_clip(input_boxes[cur_offset:(cur_offset + lod[i]), :, :], - im_info[i, :], - output_boxes[cur_offset:(cur_offset + lod[i]), :, :]) + box_clip( + input_boxes[cur_offset : (cur_offset + lod[i]), :, :], + im_info[i, :], + output_boxes[cur_offset : (cur_offset + lod[i]), :, :], + ) cur_offset += lod[i] return output_boxes class TestBoxClipOp(OpTest): - def test_check_output(self): self.check_output() @@ -52,7 +57,7 @@ class TestBoxClipOp(OpTest): self.op_type = "box_clip" lod = [[1, 2, 3]] input_boxes = np.random.random((6, 10, 4)) * 5 - im_info = np.array([[5, 8, 1.], [6, 6, 1.], [7, 5, 1.]]) + im_info = np.array([[5, 8, 1.0], [6, 6, 1.0], [7, 5, 1.0]]) output_boxes = batch_box_clip(input_boxes, im_info, lod[0]) self.inputs = { diff --git a/python/paddle/fluid/tests/unittests/test_box_coder_op.py b/python/paddle/fluid/tests/unittests/test_box_coder_op.py index 0e70f9904b50aa7d07ed494aa458174ca7d36a9c..6813a88070d214cb6ea140f7042017fe7dbe274e 100644 --- a/python/paddle/fluid/tests/unittests/test_box_coder_op.py +++ b/python/paddle/fluid/tests/unittests/test_box_coder_op.py @@ -32,9 +32,11 @@ def box_decoder(t_box, p_box, pb_v, output_box, norm, axis=0): pb_y = pb_y.reshape(shape) if pb_v.ndim == 2: - var_shape = (1, pb_v.shape[0], - pb_v.shape[1]) if axis == 0 else (pb_v.shape[0], 1, - pb_v.shape[1]) + var_shape = ( + (1, pb_v.shape[0], pb_v.shape[1]) + if axis == 0 + else (pb_v.shape[0], 1, pb_v.shape[1]) + ) pb_v = pb_v.reshape(var_shape) if pb_v.ndim == 1: tb_x = pb_v[0] * t_box[:, :, 0] * pb_w + pb_x @@ -90,18 +92,21 @@ def batch_box_coder(p_box, pb_v, t_box, lod, code_type, norm, axis=0): output_box = np.zeros((n, m, 4), dtype=np.float32) cur_offset = 0 for i in range(len(lod)): - if (code_type == "EncodeCenterSize"): - box_encoder(t_box[cur_offset:(cur_offset + lod[i]), :], p_box, pb_v, - output_box[cur_offset:(cur_offset + lod[i]), :, :], - norm) - elif (code_type == "DecodeCenterSize"): + if code_type == "EncodeCenterSize": + box_encoder( + t_box[cur_offset : (cur_offset + lod[i]), :], + p_box, + pb_v, + output_box[cur_offset : (cur_offset + lod[i]), :, :], + norm, + ) + elif code_type == "DecodeCenterSize": box_decoder(t_box, p_box, pb_v, output_box, norm, axis) cur_offset += lod[i] return output_box class TestBoxCoderOp(OpTest): - def test_check_output(self): self.check_output(check_eager=True) @@ -114,8 +119,14 @@ class TestBoxCoderOp(OpTest): target_box = np.random.random((20, 81, 4)).astype('float32') code_type = "DecodeCenterSize" box_normalized = False - output_box = batch_box_coder(prior_box, prior_box_var, target_box, - lod[0], code_type, box_normalized) + output_box = batch_box_coder( + prior_box, + prior_box_var, + target_box, + lod[0], + code_type, + box_normalized, + ) self.inputs = { 'PriorBox': prior_box, 'PriorBoxVar': prior_box_var, @@ -123,13 +134,12 @@ class TestBoxCoderOp(OpTest): } self.attrs = { 'code_type': 'decode_center_size', - 'box_normalized': False + 'box_normalized': False, } self.outputs = {'OutputBox': output_box} class TestBoxCoderOpWithoutBoxVar(OpTest): - def test_check_output(self): self.check_output(check_eager=True) @@ -142,8 +152,14 @@ class TestBoxCoderOpWithoutBoxVar(OpTest): target_box = np.random.random((20, 81, 4)).astype('float32') code_type = "DecodeCenterSize" box_normalized = False - output_box = batch_box_coder(prior_box, prior_box_var, target_box, - lod[0], code_type, box_normalized) + output_box = batch_box_coder( + prior_box, + prior_box_var, + target_box, + lod[0], + code_type, + box_normalized, + ) self.inputs = { 'PriorBox': prior_box, @@ -152,13 +168,12 @@ class TestBoxCoderOpWithoutBoxVar(OpTest): } self.attrs = { 'code_type': 'decode_center_size', - 'box_normalized': False + 'box_normalized': False, } self.outputs = {'OutputBox': output_box} class TestBoxCoderOpWithLoD(OpTest): - def test_check_output(self): self.check_output(check_eager=True) @@ -171,8 +186,14 @@ class TestBoxCoderOpWithLoD(OpTest): target_box = np.random.random((50, 4)).astype('float32') code_type = "EncodeCenterSize" box_normalized = True - output_box = batch_box_coder(prior_box, prior_box_var, target_box, - lod[0], code_type, box_normalized) + output_box = batch_box_coder( + prior_box, + prior_box_var, + target_box, + lod[0], + code_type, + box_normalized, + ) self.inputs = { 'PriorBox': prior_box, @@ -184,7 +205,6 @@ class TestBoxCoderOpWithLoD(OpTest): class TestBoxCoderOpWithAxis(OpTest): - def test_check_output(self): self.check_output(check_eager=True) @@ -198,8 +218,15 @@ class TestBoxCoderOpWithAxis(OpTest): code_type = "DecodeCenterSize" box_normalized = False axis = 1 - output_box = batch_box_coder(prior_box, prior_box_var, target_box, - lod[0], code_type, box_normalized, axis) + output_box = batch_box_coder( + prior_box, + prior_box_var, + target_box, + lod[0], + code_type, + box_normalized, + axis, + ) self.inputs = { 'PriorBox': prior_box, @@ -209,13 +236,12 @@ class TestBoxCoderOpWithAxis(OpTest): self.attrs = { 'code_type': 'decode_center_size', 'box_normalized': False, - 'axis': axis + 'axis': axis, } self.outputs = {'OutputBox': output_box} class TestBoxCoderOpWithVariance(OpTest): - def test_check_output(self): self.check_output() @@ -228,8 +254,15 @@ class TestBoxCoderOpWithVariance(OpTest): code_type = "DecodeCenterSize" box_normalized = False axis = 1 - output_box = batch_box_coder(prior_box, prior_box_var, target_box, - lod[0], code_type, box_normalized, axis) + output_box = batch_box_coder( + prior_box, + prior_box_var, + target_box, + lod[0], + code_type, + box_normalized, + axis, + ) self.inputs = { 'PriorBox': prior_box, @@ -239,13 +272,12 @@ class TestBoxCoderOpWithVariance(OpTest): 'code_type': 'decode_center_size', 'box_normalized': False, 'variance': prior_box_var.astype(np.float64).flatten(), - 'axis': axis + 'axis': axis, } self.outputs = {'OutputBox': output_box} class TestBoxCoderOpWithVarianceDygraphAPI(unittest.TestCase): - def setUp(self): self.lod = [[1, 1, 1, 1, 1]] self.prior_box = np.random.random((30, 4)).astype('float32') @@ -254,16 +286,20 @@ class TestBoxCoderOpWithVarianceDygraphAPI(unittest.TestCase): self.code_type = "DecodeCenterSize" self.box_normalized = False self.axis = 1 - self.output_ref = batch_box_coder(self.prior_box, self.prior_box_var, - self.target_box, self.lod[0], - self.code_type, self.box_normalized, - self.axis) + self.output_ref = batch_box_coder( + self.prior_box, + self.prior_box_var, + self.target_box, + self.lod[0], + self.code_type, + self.box_normalized, + self.axis, + ) self.place = [paddle.CPUPlace()] if core.is_compiled_with_cuda(): self.place.append(paddle.CUDAPlace(0)) def test_dygraph_api(self): - def run(place): paddle.disable_static(place) output_box = paddle.fluid.layers.box_coder( @@ -272,10 +308,11 @@ class TestBoxCoderOpWithVarianceDygraphAPI(unittest.TestCase): paddle.to_tensor(self.target_box), "decode_center_size", self.box_normalized, - axis=self.axis) - np.testing.assert_allclose(np.sum(self.output_ref), - np.sum(output_box.numpy()), - rtol=1e-05) + axis=self.axis, + ) + np.testing.assert_allclose( + np.sum(self.output_ref), np.sum(output_box.numpy()), rtol=1e-05 + ) paddle.enable_static() for place in self.place: diff --git a/python/paddle/fluid/tests/unittests/test_box_decoder_and_assign_op.py b/python/paddle/fluid/tests/unittests/test_box_decoder_and_assign_op.py index 41158533a62d8a32e3e576054ee0aef57f9e88ed..0b826e362e7bd061dbb91b257a51c76f27b32f0e 100644 --- a/python/paddle/fluid/tests/unittests/test_box_decoder_and_assign_op.py +++ b/python/paddle/fluid/tests/unittests/test_box_decoder_and_assign_op.py @@ -60,7 +60,6 @@ def box_decoder_and_assign(deltas, weights, boxes, box_score, box_clip): class TestBoxDecoderAndAssignOpWithLoD(OpTest): - def test_check_output(self): self.check_output() @@ -74,7 +73,8 @@ class TestBoxDecoderAndAssignOpWithLoD(OpTest): box_score = np.random.random((20, num_classes)).astype('float32') box_clip = 4.135 output_box, output_assign_box = box_decoder_and_assign( - target_box, prior_box_var, prior_box, box_score, box_clip) + target_box, prior_box_var, prior_box, box_score, box_clip + ) self.inputs = { 'PriorBox': (prior_box, lod), @@ -85,7 +85,7 @@ class TestBoxDecoderAndAssignOpWithLoD(OpTest): self.attrs = {'box_clip': box_clip} self.outputs = { 'DecodeBox': output_box, - 'OutputAssignBox': output_assign_box + 'OutputAssignBox': output_assign_box, } diff --git a/python/paddle/fluid/tests/unittests/test_boxps.py b/python/paddle/fluid/tests/unittests/test_boxps.py index a3d3607fd8e0f935f33c9b48fea270a2330a9c9a..a51f86a7b2b725994627e7ef5c1fb9c70f899b52 100644 --- a/python/paddle/fluid/tests/unittests/test_boxps.py +++ b/python/paddle/fluid/tests/unittests/test_boxps.py @@ -21,7 +21,7 @@ from paddle.fluid.transpiler import collective class TestTranspile(unittest.TestCase): - """ TestCases for BoxPS Preload """ + """TestCases for BoxPS Preload""" def get_transpile(self, mode, trainers="127.0.0.1:6174"): config = fluid.DistributeTranspilerConfig() @@ -34,44 +34,52 @@ class TestTranspile(unittest.TestCase): main_program = fluid.Program() startup_program = fluid.Program() t = self.get_transpile("single_process_multi_thread") - t.transpile(trainer_id=0, - startup_program=startup_program, - trainers="127.0.0.1:6174", - program=main_program) + t.transpile( + trainer_id=0, + startup_program=startup_program, + trainers="127.0.0.1:6174", + program=main_program, + ) t = self.get_transpile("grad_allreduce") try: - t.transpile(trainer_id=0, - startup_program=startup_program, - trainers="127.0.0.1:6174", - program=main_program) + t.transpile( + trainer_id=0, + startup_program=startup_program, + trainers="127.0.0.1:6174", + program=main_program, + ) except ValueError as e: print(e) def test_single_trainers(self): transpiler = collective.GradAllReduce(0) try: - transpiler.transpile(startup_program=fluid.Program(), - main_program=fluid.Program(), - rank=1, - endpoints="127.0.0.1:6174", - current_endpoint="127.0.0.1:6174", - wait_port="6174") + transpiler.transpile( + startup_program=fluid.Program(), + main_program=fluid.Program(), + rank=1, + endpoints="127.0.0.1:6174", + current_endpoint="127.0.0.1:6174", + wait_port="6174", + ) except ValueError as e: print(e) transpiler = collective.LocalSGD(0) try: - transpiler.transpile(startup_program=fluid.Program(), - main_program=fluid.Program(), - rank=1, - endpoints="127.0.0.1:6174", - current_endpoint="127.0.0.1:6174", - wait_port="6174") + transpiler.transpile( + startup_program=fluid.Program(), + main_program=fluid.Program(), + rank=1, + endpoints="127.0.0.1:6174", + current_endpoint="127.0.0.1:6174", + wait_port="6174", + ) except ValueError as e: print(e) class TestRunCmd(unittest.TestCase): - """ TestCases for run_cmd""" + """TestCases for run_cmd""" def test_run_cmd(self): ret1 = int(core.run_cmd("ls; echo $?").strip().split('\n')[-1]) @@ -81,20 +89,18 @@ class TestRunCmd(unittest.TestCase): class TestPullBoxSparseOP(unittest.TestCase): - """ TestCases for _pull_box_sparse op""" + """TestCases for _pull_box_sparse op""" def test_pull_box_sparse_op(self): paddle.enable_static() program = fluid.Program() with fluid.program_guard(program): - x = fluid.layers.data(name='x', - shape=[1], - dtype='int64', - lod_level=0) - y = fluid.layers.data(name='y', - shape=[1], - dtype='int64', - lod_level=0) + x = fluid.layers.data( + name='x', shape=[1], dtype='int64', lod_level=0 + ) + y = fluid.layers.data( + name='y', shape=[1], dtype='int64', lod_level=0 + ) emb_x, emb_y = _pull_box_sparse([x, y], size=1) diff --git a/python/paddle/fluid/tests/unittests/test_bpr_loss_op.py b/python/paddle/fluid/tests/unittests/test_bpr_loss_op.py index d527f4579ed732afa5b2357fb35cdd5c9da8c8c1..5f8953aaea3631a9ec5cc08b718a2fa534655180 100644 --- a/python/paddle/fluid/tests/unittests/test_bpr_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_bpr_loss_op.py @@ -18,8 +18,7 @@ from op_test import OpTest, randomize_probability class TestBprLossOp1(OpTest): - """Test BprLoss with discrete one-hot labels. - """ + """Test BprLoss with discrete one-hot labels.""" def setUp(self): self.op_type = "bpr_loss" @@ -33,7 +32,7 @@ class TestBprLossOp1(OpTest): for j in range(class_num): if j == label[i][0]: continue - sum += (-np.log(1.0 + np.exp(X[i][j] - X[i][label[i][0]]))) + sum += -np.log(1.0 + np.exp(X[i][j] - X[i][label[i][0]])) bpr_loss_result.append(-sum / (class_num - 1)) bpr_loss = np.asmatrix([[x] for x in bpr_loss_result], dtype="float64") self.inputs = {"X": X, "Label": label} diff --git a/python/paddle/fluid/tests/unittests/test_broadcast_error.py b/python/paddle/fluid/tests/unittests/test_broadcast_error.py index bc1d026815856fe9e3e817e518a1230dbdac5d22..517de67fd6dddf1d0a74df6ffed659720862b20c 100644 --- a/python/paddle/fluid/tests/unittests/test_broadcast_error.py +++ b/python/paddle/fluid/tests/unittests/test_broadcast_error.py @@ -19,7 +19,6 @@ import paddle.fluid.core as core class TestBroadcastOpCpu(OpTest): - def setUp(self): self.op_type = "broadcast" input = np.random.random((100, 2)).astype("float32") diff --git a/python/paddle/fluid/tests/unittests/test_broadcast_shape.py b/python/paddle/fluid/tests/unittests/test_broadcast_shape.py index fedcb9bb465d5b55a7556580e8aa3017e5ac169f..1afb046b68b467ac469e8822c677591deed91a23 100644 --- a/python/paddle/fluid/tests/unittests/test_broadcast_shape.py +++ b/python/paddle/fluid/tests/unittests/test_broadcast_shape.py @@ -17,18 +17,19 @@ import paddle class TestBroadcastShape(unittest.TestCase): - def test_result(self): shape = paddle.broadcast_shape([2, 1, 3], [1, 3, 1]) self.assertEqual(shape, [2, 3, 3]) shape = paddle.broadcast_shape( - [-1, 1, 3], [1, 3, 1]) #support compile time infershape + [-1, 1, 3], [1, 3, 1] + ) # support compile time infershape self.assertEqual(shape, [-1, 3, 3]) def test_error(self): - self.assertRaises(ValueError, paddle.broadcast_shape, [2, 1, 3], - [3, 3, 1]) + self.assertRaises( + ValueError, paddle.broadcast_shape, [2, 1, 3], [3, 3, 1] + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_broadcast_tensors_op.py b/python/paddle/fluid/tests/unittests/test_broadcast_tensors_op.py index 3c262048561f5b34671776e06738654a7dd99d17..3872dadebd82d9cbdd36680a01cc68a9f98e8f10 100644 --- a/python/paddle/fluid/tests/unittests/test_broadcast_tensors_op.py +++ b/python/paddle/fluid/tests/unittests/test_broadcast_tensors_op.py @@ -80,7 +80,6 @@ def gen_mixed_tensors_test(dtype): class TestCPUBroadcastTensorsOp(OpTest): - def set_place(self): self.place = core.CPUPlace() @@ -92,7 +91,9 @@ class TestCPUBroadcastTensorsOp(OpTest): self.use_mkldnn = False self.attrs = {'use_mkldnn': self.use_mkldnn} self.test_gen_func_list = [ - gen_rank_diff_test, gen_no_broadcast_test, gen_mixed_tensors_test + gen_rank_diff_test, + gen_no_broadcast_test, + gen_mixed_tensors_test, ] self.set_place() self.set_dtypes() @@ -117,35 +118,38 @@ class TestCPUBroadcastTensorsOp(OpTest): test_func(**args) def test_check_output(self): - self.run_dual_test(self.check_output_with_place, { - "place": self.place, - "atol": 1e-1, - "check_eager": True - }) + self.run_dual_test( + self.check_output_with_place, + {"place": self.place, "atol": 1e-1, "check_eager": True}, + ) def test_check_grad_normal(self): self.run_dual_test( - self.check_grad_with_place, { + self.check_grad_with_place, + { "place": self.place, "inputs_to_check": ['x0', 'x1'], "output_names": ['out0', 'out1'], "max_relative_error": 0.05, - "check_eager": True - }) + "check_eager": True, + }, + ) self.run_triple_in_test( - self.check_grad_with_place, { + self.check_grad_with_place, + { "place": self.place, "inputs_to_check": ['x0', 'x1', 'x2'], "output_names": ['out0', 'out1', "out2"], "max_relative_error": 0.05, - "check_eager": True - }) + "check_eager": True, + }, + ) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDABroadcastTensorsOp(TestCPUBroadcastTensorsOp): - def set_place(self): self.place = core.CUDAPlace(0) @@ -156,17 +160,15 @@ class TestCUDABroadcastTensorsOp(TestCPUBroadcastTensorsOp): class TestBroadcastTensorsAPI(unittest.TestCase): - def test_api(self): - def test_static(): inputs = [ - paddle.fluid.layers.data(shape=[4, 1, 4, 1], - dtype='float32', - name="x0"), - paddle.fluid.layers.data(shape=[1, 4, 1, 4], - dtype='float32', - name="x1") + paddle.fluid.layers.data( + shape=[4, 1, 4, 1], dtype='float32', name="x0" + ), + paddle.fluid.layers.data( + shape=[1, 4, 1, 4], dtype='float32', name="x1" + ), ] paddle.broadcast_tensors(inputs) @@ -175,9 +177,11 @@ class TestBroadcastTensorsAPI(unittest.TestCase): try: inputs = [ paddle.to_tensor( - np.random.random([4, 1, 4, 1]).astype("float32")), + np.random.random([4, 1, 4, 1]).astype("float32") + ), paddle.to_tensor( - np.random.random([1, 4, 1, 4]).astype("float32")) + np.random.random([1, 4, 1, 4]).astype("float32") + ), ] paddle.broadcast_tensors(inputs) finally: @@ -188,39 +192,37 @@ class TestBroadcastTensorsAPI(unittest.TestCase): class TestRaiseBroadcastTensorsError(unittest.TestCase): - def test_errors(self): - def test_type(): inputs = [ - paddle.fluid.layers.data(shape=[1, 1, 1, 1], - dtype='float32', - name="x4"), - paddle.fluid.layers.data(shape=[1, 4, 1, 1], - dtype='float64', - name="x5") + paddle.fluid.layers.data( + shape=[1, 1, 1, 1], dtype='float32', name="x4" + ), + paddle.fluid.layers.data( + shape=[1, 4, 1, 1], dtype='float64', name="x5" + ), ] paddle.broadcast_tensors(inputs) def test_dtype(): inputs = [ - paddle.fluid.layers.data(shape=[1, 1, 1, 1], - dtype='int8', - name="x6"), - paddle.fluid.layers.data(shape=[1, 4, 1, 1], - dtype='int8', - name="x7") + paddle.fluid.layers.data( + shape=[1, 1, 1, 1], dtype='int8', name="x6" + ), + paddle.fluid.layers.data( + shape=[1, 4, 1, 1], dtype='int8', name="x7" + ), ] paddle.broadcast_tensors(inputs) def test_bcast_semantics(): inputs = [ - paddle.fluid.layers.data(shape=[1, 3, 1, 1], - dtype='float32', - name="x9"), - paddle.fluid.layers.data(shape=[1, 8, 1, 1], - dtype='float32', - name="x10") + paddle.fluid.layers.data( + shape=[1, 3, 1, 1], dtype='float32', name="x9" + ), + paddle.fluid.layers.data( + shape=[1, 8, 1, 1], dtype='float32', name="x10" + ), ] paddle.broadcast_tensors(inputs) @@ -230,33 +232,37 @@ class TestRaiseBroadcastTensorsError(unittest.TestCase): class TestRaiseBroadcastTensorsErrorDyGraph(unittest.TestCase): - def test_errors(self): - def test_type(): inputs = [ paddle.to_tensor( - np.ones(shape=[1, 1, 1, 1], dtype='float32', name="x4")), + np.ones(shape=[1, 1, 1, 1], dtype='float32', name="x4") + ), paddle.to_tensor( - np.ones(shape=[1, 4, 1, 1], dtype='float64', name="x5")) + np.ones(shape=[1, 4, 1, 1], dtype='float64', name="x5") + ), ] paddle.broadcast_tensors(inputs) def test_dtype(): inputs = [ paddle.to_tensor( - np.ones(shape=[1, 1, 1, 1], dtype='int8', name="x6")), + np.ones(shape=[1, 1, 1, 1], dtype='int8', name="x6") + ), paddle.to_tensor( - np.ones(shape=[1, 4, 1, 1], dtype='int8', name="x7")) + np.ones(shape=[1, 4, 1, 1], dtype='int8', name="x7") + ), ] paddle.broadcast_tensors(inputs) def test_bcast_semantics(): inputs = [ paddle.to_tensor( - np.ones(shape=[1, 3, 1, 1], dtype='float32', name="x9")), + np.ones(shape=[1, 3, 1, 1], dtype='float32', name="x9") + ), paddle.to_tensor( - np.ones(shape=[1, 8, 1, 1], dtype='float32', name="x10")) + np.ones(shape=[1, 8, 1, 1], dtype='float32', name="x10") + ), ] paddle.broadcast_tensors(inputs) diff --git a/python/paddle/fluid/tests/unittests/test_broadcast_to_op.py b/python/paddle/fluid/tests/unittests/test_broadcast_to_op.py index aa97e316a3d491021a9ea857ae8cbac5217afedd..e8e876766c332612819900597260cd2953ed5f15 100644 --- a/python/paddle/fluid/tests/unittests/test_broadcast_to_op.py +++ b/python/paddle/fluid/tests/unittests/test_broadcast_to_op.py @@ -22,11 +22,11 @@ paddle.enable_static() class TestBroadcastToError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): - x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace() + ) shape = [2, 2] self.assertRaises(TypeError, paddle.tensor.broadcast_to, x1, shape) x2 = fluid.layers.data(name='x2', shape=[4], dtype="uint8") @@ -38,19 +38,19 @@ class TestBroadcastToError(unittest.TestCase): # Test python API class TestBroadcastToAPI(unittest.TestCase): - def test_api(self): input = np.random.random([12, 14]).astype("float32") - x = fluid.layers.data(name='x', - shape=[12, 14], - append_batch_size=False, - dtype="float32") + x = fluid.layers.data( + name='x', shape=[12, 14], append_batch_size=False, dtype="float32" + ) positive_2 = fluid.layers.fill_constant([1], "int32", 12) - expand_shape = fluid.layers.data(name="expand_shape", - shape=[2], - append_batch_size=False, - dtype="int32") + expand_shape = fluid.layers.data( + name="expand_shape", + shape=[2], + append_batch_size=False, + dtype="int32", + ) out_1 = paddle.broadcast_to(x, shape=[12, 14]) out_2 = paddle.broadcast_to(x, shape=[positive_2, 14]) @@ -59,14 +59,14 @@ class TestBroadcastToAPI(unittest.TestCase): g0 = fluid.backward.calc_gradient(out_2, x) exe = fluid.Executor(place=fluid.CPUPlace()) - res_1, res_2, res_3 = exe.run(fluid.default_main_program(), - feed={ - "x": - input, - "expand_shape": - np.array([12, 14]).astype("int32") - }, - fetch_list=[out_1, out_2, out_3]) + res_1, res_2, res_3 = exe.run( + fluid.default_main_program(), + feed={ + "x": input, + "expand_shape": np.array([12, 14]).astype("int32"), + }, + fetch_list=[out_1, out_2, out_3], + ) assert np.array_equal(res_1, np.tile(input, (1, 1))) assert np.array_equal(res_2, np.tile(input, (1, 1))) assert np.array_equal(res_3, np.tile(input, (1, 1))) diff --git a/python/paddle/fluid/tests/unittests/test_bucketize_api.py b/python/paddle/fluid/tests/unittests/test_bucketize_api.py index e62c1a6028f424634235b459b8a15794f5da3f5d..e82d6e9d15c862e8a8efbf7505a5ea91f19a9601 100644 --- a/python/paddle/fluid/tests/unittests/test_bucketize_api.py +++ b/python/paddle/fluid/tests/unittests/test_bucketize_api.py @@ -38,20 +38,20 @@ class TestBucketizeAPI(unittest.TestCase): sorted_sequence = paddle.static.data( 'SortedSequence', shape=self.sorted_sequence.shape, - dtype="float64") + dtype="float64", + ) x = paddle.static.data('x', shape=self.x.shape, dtype="float64") out1 = paddle.bucketize(x, sorted_sequence) out2 = paddle.bucketize(x, sorted_sequence, right=True) exe = paddle.static.Executor(place) - res = exe.run(feed={ - 'SortedSequence': self.sorted_sequence, - 'x': self.x - }, - fetch_list=[out1, out2]) + res = exe.run( + feed={'SortedSequence': self.sorted_sequence, 'x': self.x}, + fetch_list=[out1, out2], + ) out_ref = np.searchsorted(self.sorted_sequence, self.x) - out_ref1 = np.searchsorted(self.sorted_sequence, - self.x, - side='right') + out_ref1 = np.searchsorted( + self.sorted_sequence, self.x, side='right' + ) np.testing.assert_allclose(out_ref, res[0], rtol=1e-05) np.testing.assert_allclose(out_ref1, res[1], rtol=1e-05) @@ -59,7 +59,6 @@ class TestBucketizeAPI(unittest.TestCase): run(place) def test_api_dygraph(self): - def run(place): paddle.disable_static(place) sorted_sequence = paddle.to_tensor(self.sorted_sequence) @@ -67,9 +66,9 @@ class TestBucketizeAPI(unittest.TestCase): out1 = paddle.bucketize(x, sorted_sequence) out2 = paddle.bucketize(x, sorted_sequence, right=True) out_ref1 = np.searchsorted(self.sorted_sequence, self.x) - out_ref2 = np.searchsorted(self.sorted_sequence, - self.x, - side='right') + out_ref2 = np.searchsorted( + self.sorted_sequence, self.x, side='right' + ) np.testing.assert_allclose(out_ref1, out1.numpy(), rtol=1e-05) np.testing.assert_allclose(out_ref2, out2.numpy(), rtol=1e-05) paddle.enable_static() @@ -86,9 +85,9 @@ class TestBucketizeAPI(unittest.TestCase): def test_bucketize_dims_error(self): with paddle.static.program_guard(paddle.static.Program()): - sorted_sequence = paddle.static.data('SortedSequence', - shape=[2, 2], - dtype="float64") + sorted_sequence = paddle.static.data( + 'SortedSequence', shape=[2, 2], dtype="float64" + ) x = paddle.static.data('x', shape=[2, 5], dtype="float64") self.assertRaises(ValueError, paddle.bucketize, x, sorted_sequence) @@ -96,16 +95,18 @@ class TestBucketizeAPI(unittest.TestCase): for place in self.place: paddle.disable_static(place) sorted_sequence = paddle.to_tensor(self.sorted_sequence) - self.assertRaises(ValueError, paddle.bucketize, self.x, - sorted_sequence) + self.assertRaises( + ValueError, paddle.bucketize, self.x, sorted_sequence + ) def test_empty_input_error(self): for place in self.place: paddle.disable_static(place) sorted_sequence = paddle.to_tensor(self.sorted_sequence) x = paddle.to_tensor(self.x) - self.assertRaises(ValueError, paddle.bucketize, None, - sorted_sequence) + self.assertRaises( + ValueError, paddle.bucketize, None, sorted_sequence + ) self.assertRaises(AttributeError, paddle.bucketize, x, None) diff --git a/python/paddle/fluid/tests/unittests/test_buffer_shared_memory_reuse_pass.py b/python/paddle/fluid/tests/unittests/test_buffer_shared_memory_reuse_pass.py index e7d9b8e485d54ffea19fed17fc768e93a15b8df6..5534ff67915b2bf90f5dc82dc6ed35fa6c5e7087 100644 --- a/python/paddle/fluid/tests/unittests/test_buffer_shared_memory_reuse_pass.py +++ b/python/paddle/fluid/tests/unittests/test_buffer_shared_memory_reuse_pass.py @@ -22,16 +22,14 @@ import unittest batch_size = 32 feed_dict = { - 'image': - np.random.random([batch_size, 784]).astype('float32'), - 'label': - np.random.random_integers(low=0, high=9, size=[batch_size, - 1]).astype('int64') + 'image': np.random.random([batch_size, 784]).astype('float32'), + 'label': np.random.random_integers( + low=0, high=9, size=[batch_size, 1] + ).astype('int64'), } class InplaceTestBase(unittest.TestCase): - def initParameter(self): self.use_cuda = True self.fuse_all_optimizer_ops = False @@ -61,8 +59,10 @@ class InplaceTestBase(unittest.TestCase): with fluid.scope_guard(scope): exe = fluid.Executor( - fluid.CUDAPlace(0) if self.use_cuda else fluid.CPUPlace( - )) + fluid.CUDAPlace(0) + if self.use_cuda + else fluid.CPUPlace() + ) exe.run(startup_program) return main_program, scope, exe, loss @@ -93,11 +93,14 @@ class InplaceTestBase(unittest.TestCase): build_strategy = fluid.BuildStrategy() build_strategy.memory_optimize = memory_optimize build_strategy.enable_inplace = enable_inplace - build_strategy.fuse_all_optimizer_ops = self.fuse_all_optimizer_ops + build_strategy.fuse_all_optimizer_ops = ( + self.fuse_all_optimizer_ops + ) compiled_prog = fluid.CompiledProgram(prog).with_data_parallel( loss_name=loss.name, build_strategy=build_strategy, - places=self.place) + places=self.place, + ) compiled_programs.append(compiled_prog) all_vars_name = self.get_all_vars(prog1) @@ -107,24 +110,26 @@ class InplaceTestBase(unittest.TestCase): for fetch_var in repeated_var_names[:4]: for _ in range(2): with fluid.scope_guard(scope1): - fetch_val1, = exe.run(prog1, - feed=feed_dict, - fetch_list=[fetch_var]) + (fetch_val1,) = exe.run( + prog1, feed=feed_dict, fetch_list=[fetch_var] + ) for scope, compiled_prog in zip(scopes, compiled_programs): with fluid.scope_guard(scope): - fetch_val2, = exe.run(compiled_prog, - feed=feed_dict, - fetch_list=[fetch_var]) + (fetch_val2,) = exe.run( + compiled_prog, + feed=feed_dict, + fetch_list=[fetch_var], + ) np.testing.assert_array_equal( fetch_val1, fetch_val2, - err_msg= - 'error var name: {}, fetch_val1: {}, fetch_val2: {}' - .format( + err_msg='error var name: {}, fetch_val1: {}, fetch_val2: {}'.format( fetch_var, fetch_val1[~np.equal(fetch_val1, fetch_val2)], - fetch_val2[~np.equal(fetch_val1, fetch_val2)])) + fetch_val2[~np.equal(fetch_val1, fetch_val2)], + ), + ) def check_multi_card_fetch_var(self): if self.is_invalid_test(): @@ -146,11 +151,16 @@ class InplaceTestBase(unittest.TestCase): build_strategy = fluid.BuildStrategy() build_strategy.memory_optimize = memory_optimize build_strategy.enable_inplace = enable_inplace - build_strategy.fuse_all_optimizer_ops = self.fuse_all_optimizer_ops + build_strategy.fuse_all_optimizer_ops = ( + self.fuse_all_optimizer_ops + ) compiled_program = fluid.CompiledProgram( - prog).with_data_parallel(loss_name=loss.name, - build_strategy=build_strategy, - places=places) + prog + ).with_data_parallel( + loss_name=loss.name, + build_strategy=build_strategy, + places=places, + ) compiled_programs.append(compiled_program) repeated_var_names = self.get_all_vars(prog1) @@ -161,9 +171,11 @@ class InplaceTestBase(unittest.TestCase): fetch_vals = [] for scope, compiled_prog in zip(scopes, compiled_programs): with fluid.scope_guard(scope): - fetch_val, = exe.run(compiled_prog, - feed=feed_dict, - fetch_list=[fetch_var]) + (fetch_val,) = exe.run( + compiled_prog, + feed=feed_dict, + fetch_list=[fetch_var], + ) fetch_vals.append(fetch_val) for item in fetch_vals: @@ -171,14 +183,15 @@ class InplaceTestBase(unittest.TestCase): np.testing.assert_array_equal( fetch_vals[0], item, - err_msg='error var name: {}, fetch_vals[0]: {}, item: {}' - .format(fetch_var, - fetch_vals[0][~np.equal(fetch_vals[0], item)], - item[~np.equal(fetch_vals[0], item)])) + err_msg='error var name: {}, fetch_vals[0]: {}, item: {}'.format( + fetch_var, + fetch_vals[0][~np.equal(fetch_vals[0], item)], + item[~np.equal(fetch_vals[0], item)], + ), + ) class CUDAInplaceTest(InplaceTestBase): - def initParameter(self): self.use_cuda = True self.fuse_all_optimizer_ops = False @@ -191,7 +204,6 @@ class CUDAInplaceTest(InplaceTestBase): class CPUInplaceTest(InplaceTestBase): - def initParameter(self): self.use_cuda = False self.fuse_all_optimizer_ops = False diff --git a/python/paddle/fluid/tests/unittests/test_buffer_shared_memory_reuse_pass_and_fuse_optimization_op_pass.py b/python/paddle/fluid/tests/unittests/test_buffer_shared_memory_reuse_pass_and_fuse_optimization_op_pass.py index 6ce5e64b0ee5757daea3ff736600ee3a95ada668..e9e62bee006801e98b3171d1c90e58160d50385f 100644 --- a/python/paddle/fluid/tests/unittests/test_buffer_shared_memory_reuse_pass_and_fuse_optimization_op_pass.py +++ b/python/paddle/fluid/tests/unittests/test_buffer_shared_memory_reuse_pass_and_fuse_optimization_op_pass.py @@ -17,7 +17,6 @@ import unittest class CUDAInplaceTestWithFuseOptimizationOps(InplaceTestBase): - def initParameter(self): self.use_cuda = True self.fuse_all_optimizer_ops = True @@ -31,7 +30,6 @@ class CUDAInplaceTestWithFuseOptimizationOps(InplaceTestBase): class CPUInplaceTestWithFuseOptimizationOps(InplaceTestBase): - def initParameter(self): self.use_cuda = False self.fuse_all_optimizer_ops = True diff --git a/python/paddle/fluid/tests/unittests/test_c_comm_init_all_op.py b/python/paddle/fluid/tests/unittests/test_c_comm_init_all_op.py index eb03c13602fd028bf8e9d21d44ec9930a904ecc9..7f4eac0d3fb36ac83d2a27776d56777f396896ed 100644 --- a/python/paddle/fluid/tests/unittests/test_c_comm_init_all_op.py +++ b/python/paddle/fluid/tests/unittests/test_c_comm_init_all_op.py @@ -18,7 +18,6 @@ import paddle.fluid as fluid class TestCCommInitAllOp(unittest.TestCase): - def setUp(self): self.place = fluid.CUDAPlace(0) self.exe = fluid.Executor(self.place) @@ -39,11 +38,9 @@ class TestCCommInitAllOp(unittest.TestCase): def test_specifying_devices(self): program = fluid.Program() block = program.global_block() - block.append_op(type='c_comm_init_all', - attrs={ - 'devices': [0], - 'ring_id': 1 - }) + block.append_op( + type='c_comm_init_all', attrs={'devices': [0], 'ring_id': 1} + ) self.exe.run(program) diff --git a/python/paddle/fluid/tests/unittests/test_c_embedding_op.py b/python/paddle/fluid/tests/unittests/test_c_embedding_op.py index 8b32a1857b2557a8871546fa5d0cd4e65b574af8..66c68097127a2d42cddde46225b99f25e00381f0 100644 --- a/python/paddle/fluid/tests/unittests/test_c_embedding_op.py +++ b/python/paddle/fluid/tests/unittests/test_c_embedding_op.py @@ -13,7 +13,11 @@ # limitations under the License. import unittest -from paddle.fluid.tests.unittests.c_embedding_op_base import TestCEmbeddingCPU, TestCEmbeddingOpBase, TestCEmbeddingOpFP32 +from paddle.fluid.tests.unittests.c_embedding_op_base import ( + TestCEmbeddingCPU, + TestCEmbeddingOpBase, + TestCEmbeddingOpFP32, +) TestCEmbeddingCPU() diff --git a/python/paddle/fluid/tests/unittests/test_calc_gradient.py b/python/paddle/fluid/tests/unittests/test_calc_gradient.py index 350f30aaf1a559a74b4c050c3bb43cbf600769e5..59bc1f574e443aed65c5723d9942dc7cd58a594a 100644 --- a/python/paddle/fluid/tests/unittests/test_calc_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_calc_gradient.py @@ -21,7 +21,6 @@ from paddle.fluid.backward import calc_gradient class TestCalcGradient(unittest.TestCase): - def test_calc_gradient(self): main = fluid.Program() startup = fluid.Program() @@ -39,7 +38,6 @@ class TestCalcGradient(unittest.TestCase): class TestDoubleGrad(unittest.TestCase): - def test1(self): main = fluid.Program() startup = fluid.Program() @@ -49,10 +47,11 @@ class TestDoubleGrad(unittest.TestCase): name='x', shape=[1], dtype='float32', - default_initializer=fluid.initializer.Constant(3)) - grad1, = fluid.gradients(net(x), x) # 2x = 6 + default_initializer=fluid.initializer.Constant(3), + ) + (grad1,) = fluid.gradients(net(x), x) # 2x = 6 z = net(x - grad1) - grad2, = fluid.gradients(z, x) # gradients( (x - 2x)^2) = 2x = 6 + (grad2,) = fluid.gradients(z, x) # gradients( (x - 2x)^2) = 2x = 6 place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -69,21 +68,21 @@ class TestDoubleGrad(unittest.TestCase): name='x', shape=[1], dtype='float32', - default_initializer=fluid.initializer.Constant(1)) + default_initializer=fluid.initializer.Constant(1), + ) y = x * x - dx1, = fluid.gradients(y, x) + (dx1,) = fluid.gradients(y, x) z = dx1 * dx1 + y * y - dx2, = fluid.gradients(z, x) + (dx2,) = fluid.gradients(z, x) place = fluid.CPUPlace() exe = fluid.Executor(place) exe.run(startup) - out, = exe.run(main, fetch_list=[dx2]) + (out,) = exe.run(main, fetch_list=[dx2]) self.assertEqual(12, out[0]) class TestGradientWithPrune(unittest.TestCase): - def test_prune(self): with paddle.fluid.scope_guard(paddle.static.Scope()): x = fluid.data(name='x', shape=[3], dtype='float32') @@ -95,14 +94,15 @@ class TestGradientWithPrune(unittest.TestCase): exe = fluid.Executor(fluid.CPUPlace()) main = fluid.default_main_program() exe.run(fluid.default_startup_program()) - out = exe.run(main, - feed={'x': np.ones([3]).astype('float32')}, - fetch_list=[x1_grad]) + out = exe.run( + main, + feed={'x': np.ones([3]).astype('float32')}, + fetch_list=[x1_grad], + ) np.testing.assert_array_equal(out[0], [2.0, 0.0, 0.0]) class TestDoubleGradient(unittest.TestCase): - def build_program(self): start_prog = paddle.static.Program() main_prog = paddle.static.Program() @@ -128,16 +128,17 @@ class TestDoubleGradient(unittest.TestCase): start_prog, main_prog, fetch_list = self.build_program() exe = paddle.static.Executor() exe.run(start_prog) - ans = exe.run(main_prog, - feed={'x': np.ones([2, 2]).astype(np.float32)}, - fetch_list=fetch_list) + ans = exe.run( + main_prog, + feed={'x': np.ones([2, 2]).astype(np.float32)}, + fetch_list=fetch_list, + ) self.assertEqual(len(ans), 2) - self.assertListEqual(ans[0].tolist(), [[0., 0.], [0., 0.]]) - self.assertListEqual(ans[1].tolist(), [[2., 2.], [2., 2.]]) + self.assertListEqual(ans[0].tolist(), [[0.0, 0.0], [0.0, 0.0]]) + self.assertListEqual(ans[1].tolist(), [[2.0, 2.0], [2.0, 2.0]]) class TestDoubleGradient2(unittest.TestCase): - def build_program(self): start_prog = paddle.static.Program() main_prog = paddle.static.Program() @@ -156,8 +157,9 @@ class TestDoubleGradient2(unittest.TestCase): grad_x = paddle.static.gradients(y, x, grad_y) grad_x2 = paddle.static.gradients(y2, x, grad_y) # test with multi targets - jvp = paddle.static.gradients([grad_x[0], grad_x2[0]], grad_y, - [v, v]) + jvp = paddle.static.gradients( + [grad_x[0], grad_x2[0]], grad_y, [v, v] + ) return start_prog, main_prog, [grad_x, jvp] @@ -166,12 +168,14 @@ class TestDoubleGradient2(unittest.TestCase): start_prog, main_prog, fetch_list = self.build_program() exe = paddle.static.Executor() exe.run(start_prog) - ans = exe.run(main_prog, - feed={'x': np.ones([2, 2]).astype(np.float32)}, - fetch_list=fetch_list) + ans = exe.run( + main_prog, + feed={'x': np.ones([2, 2]).astype(np.float32)}, + fetch_list=fetch_list, + ) self.assertEqual(len(ans), 2) - self.assertListEqual(ans[0].tolist(), [[0., 0.], [0., 0.]]) - self.assertListEqual(ans[1].tolist(), [[5., 5.], [5., 5.]]) + self.assertListEqual(ans[0].tolist(), [[0.0, 0.0], [0.0, 0.0]]) + self.assertListEqual(ans[1].tolist(), [[5.0, 5.0], [5.0, 5.0]]) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_case.py b/python/paddle/fluid/tests/unittests/test_case.py index e550adc6a9ae1179ae9117f13b97a652cf47e862..62a3898fb9789386a872cbee29cdfdf4553ac887 100644 --- a/python/paddle/fluid/tests/unittests/test_case.py +++ b/python/paddle/fluid/tests/unittests/test_case.py @@ -25,9 +25,7 @@ import paddle.fluid.optimizer as optimizer class TestAPICase(unittest.TestCase): - def test_return_single_var(self): - def fn_1(): return layers.fill_constant(shape=[4, 2], dtype='int32', value=1) @@ -47,16 +45,19 @@ class TestAPICase(unittest.TestCase): pred_1 = layers.less_than(z, x) # true: 0.2 < 0.3 # call fn_1 - out_0 = layers.case(pred_fn_pairs=[(pred_1, fn_1), (pred_1, fn_2)], - default=fn_3) + out_0 = layers.case( + pred_fn_pairs=[(pred_1, fn_1), (pred_1, fn_2)], default=fn_3 + ) # call fn_2 - out_1 = layers.case(pred_fn_pairs=[(pred_2, fn_1), (pred_1, fn_2)], - default=fn_3) + out_1 = layers.case( + pred_fn_pairs=[(pred_2, fn_1), (pred_1, fn_2)], default=fn_3 + ) # call default fn_3 - out_2 = layers.case(pred_fn_pairs=((pred_2, fn_1), (pred_2, fn_2)), - default=fn_3) + out_2 = layers.case( + pred_fn_pairs=((pred_2, fn_1), (pred_2, fn_2)), default=fn_3 + ) # no default, call fn_2 out_3 = layers.case(pred_fn_pairs=[(pred_1, fn_2)]) @@ -64,12 +65,16 @@ class TestAPICase(unittest.TestCase): # no default, call fn_2. but pred_2 is false out_4 = layers.case(pred_fn_pairs=[(pred_2, fn_2)]) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) - res = exe.run(main_program, - fetch_list=[out_0, out_1, out_2, out_3, out_4]) + res = exe.run( + main_program, fetch_list=[out_0, out_1, out_2, out_3, out_4] + ) np.testing.assert_allclose(res[0], 1, rtol=1e-05) np.testing.assert_allclose(res[1], 2, rtol=1e-05) @@ -78,27 +83,20 @@ class TestAPICase(unittest.TestCase): np.testing.assert_allclose(res[4], 2, rtol=1e-05) def test_return_var_tuple(self): - def fn_1(): - return layers.fill_constant(shape=[1, 2], dtype='int32', - value=1), layers.fill_constant( - shape=[2, 3], - dtype='float32', - value=2) + return layers.fill_constant( + shape=[1, 2], dtype='int32', value=1 + ), layers.fill_constant(shape=[2, 3], dtype='float32', value=2) def fn_2(): - return layers.fill_constant(shape=[3, 4], dtype='int32', - value=3), layers.fill_constant( - shape=[4, 5], - dtype='float32', - value=4) + return layers.fill_constant( + shape=[3, 4], dtype='int32', value=3 + ), layers.fill_constant(shape=[4, 5], dtype='float32', value=4) def fn_3(): - return layers.fill_constant(shape=[5], dtype='int32', - value=5), layers.fill_constant( - shape=[5, 6], - dtype='float32', - value=6) + return layers.fill_constant( + shape=[5], dtype='int32', value=5 + ), layers.fill_constant(shape=[5, 6], dtype='float32', value=6) main_program = Program() startup_program = Program() @@ -112,56 +110,87 @@ class TestAPICase(unittest.TestCase): out = layers.case(((pred_1, fn_1), (pred_2, fn_2)), fn_3) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) ret = exe.run(main_program, fetch_list=out) - np.testing.assert_allclose(np.asarray(ret[0]), - np.full((1, 2), 1, np.int32), - rtol=1e-05) - np.testing.assert_allclose(np.asarray(ret[1]), - np.full((2, 3), 2, np.float32), - rtol=1e-05) + np.testing.assert_allclose( + np.asarray(ret[0]), np.full((1, 2), 1, np.int32), rtol=1e-05 + ) + np.testing.assert_allclose( + np.asarray(ret[1]), np.full((2, 3), 2, np.float32), rtol=1e-05 + ) class TestAPICase_Nested(unittest.TestCase): - def test_nested_case(self): - def fn_1(x=1): var_5 = layers.fill_constant(shape=[1], dtype='int32', value=5) var_6 = layers.fill_constant(shape=[1], dtype='int32', value=6) - out = layers.case(pred_fn_pairs=[ - (var_5 < var_6, - partial( - layers.fill_constant, shape=[1], dtype='int32', value=x)), - (var_5 == var_6, - partial( - layers.fill_constant, shape=[2], dtype='int32', value=x)) - ]) + out = layers.case( + pred_fn_pairs=[ + ( + var_5 < var_6, + partial( + layers.fill_constant, + shape=[1], + dtype='int32', + value=x, + ), + ), + ( + var_5 == var_6, + partial( + layers.fill_constant, + shape=[2], + dtype='int32', + value=x, + ), + ), + ] + ) return out def fn_2(x=2): var_5 = layers.fill_constant(shape=[1], dtype='int32', value=5) var_6 = layers.fill_constant(shape=[1], dtype='int32', value=6) - out = layers.case(pred_fn_pairs=[ - (var_5 < var_6, partial(fn_1, x=x)), - (var_5 == var_6, - partial( - layers.fill_constant, shape=[2], dtype='int32', value=x)) - ]) + out = layers.case( + pred_fn_pairs=[ + (var_5 < var_6, partial(fn_1, x=x)), + ( + var_5 == var_6, + partial( + layers.fill_constant, + shape=[2], + dtype='int32', + value=x, + ), + ), + ] + ) return out def fn_3(): var_5 = layers.fill_constant(shape=[1], dtype='int32', value=5) var_6 = layers.fill_constant(shape=[1], dtype='int32', value=6) - out = layers.case(pred_fn_pairs=[ - (var_5 < var_6, partial(fn_2, x=3)), - (var_5 == var_6, - partial( - layers.fill_constant, shape=[2], dtype='int32', value=7)) - ]) + out = layers.case( + pred_fn_pairs=[ + (var_5 < var_6, partial(fn_2, x=3)), + ( + var_5 == var_6, + partial( + layers.fill_constant, + shape=[2], + dtype='int32', + value=7, + ), + ), + ] + ) return out main_program = Program() @@ -173,17 +202,23 @@ class TestAPICase_Nested(unittest.TestCase): pred_2 = layers.less_than(x, y) # false: 0.3 < 0.1 pred_1 = layers.less_than(z, x) # true: 0.2 < 0.3 - out_1 = layers.case(pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], - default=fn_3) + out_1 = layers.case( + pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], default=fn_3 + ) - out_2 = layers.case(pred_fn_pairs=[(pred_2, fn_1), (pred_1, fn_2)], - default=fn_3) + out_2 = layers.case( + pred_fn_pairs=[(pred_2, fn_1), (pred_1, fn_2)], default=fn_3 + ) - out_3 = layers.case(pred_fn_pairs=[(x == y, fn_1), (x == z, fn_2)], - default=fn_3) + out_3 = layers.case( + pred_fn_pairs=[(x == y, fn_1), (x == z, fn_2)], default=fn_3 + ) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) res = exe.run(main_program, fetch_list=[out_1, out_2, out_3]) @@ -194,9 +229,7 @@ class TestAPICase_Nested(unittest.TestCase): class TestAPICase_Error(unittest.TestCase): - def test_error(self): - def fn_1(): return layers.fill_constant(shape=[4, 2], dtype='int32', value=1) @@ -246,18 +279,17 @@ class TestAPICase_Error(unittest.TestCase): # when optimizer in case class TestMutiTask(unittest.TestCase): - def test_optimizer_in_case(self): BATCH_SIZE = 1 INPUT_SIZE = 784 EPOCH_NUM = 2 - x = fluid.data(name='x', - shape=[BATCH_SIZE, INPUT_SIZE], - dtype='float32') - y = fluid.data(name='y', - shape=[BATCH_SIZE, INPUT_SIZE], - dtype='float32') + x = fluid.data( + name='x', shape=[BATCH_SIZE, INPUT_SIZE], dtype='float32' + ) + y = fluid.data( + name='y', shape=[BATCH_SIZE, INPUT_SIZE], dtype='float32' + ) switch_id = fluid.data(name='switch_id', shape=[1], dtype='int32') @@ -282,16 +314,19 @@ class TestMutiTask(unittest.TestCase): for epoch in range(EPOCH_NUM): np.random.seed(epoch) - feed_image = np.random.random( - size=[BATCH_SIZE, INPUT_SIZE]).astype('float32') + feed_image = np.random.random(size=[BATCH_SIZE, INPUT_SIZE]).astype( + 'float32' + ) main_program = fluid.default_main_program() - out = exe.run(main_program, - feed={ - 'x': feed_image, - 'y': feed_image, - 'switch_id': np.array([epoch]).astype('int32') - }, - fetch_list=[]) + out = exe.run( + main_program, + feed={ + 'x': feed_image, + 'y': feed_image, + 'switch_id': np.array([epoch]).astype('int32'), + }, + fetch_list=[], + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_cast_op.py b/python/paddle/fluid/tests/unittests/test_cast_op.py index f4cabd6519c2e7b62ac8b25af0bc5c05c1e98ba8..8665e285fa5cfb8256737a13362d4571f98e7388 100644 --- a/python/paddle/fluid/tests/unittests/test_cast_op.py +++ b/python/paddle/fluid/tests/unittests/test_cast_op.py @@ -27,14 +27,13 @@ import paddle.fluid.layers as layers class TestCastOpFp32ToFp64(OpTest): - def setUp(self): ipt = np.random.random(size=[10, 10]) self.inputs = {'X': ipt.astype('float32')} self.outputs = {'Out': ipt.astype('float64')} self.attrs = { 'in_dtype': int(core.VarDesc.VarType.FP32), - 'out_dtype': int(core.VarDesc.VarType.FP64) + 'out_dtype': int(core.VarDesc.VarType.FP64), } self.op_type = 'cast' @@ -46,14 +45,13 @@ class TestCastOpFp32ToFp64(OpTest): class TestCastOpFp16ToFp32(OpTest): - def setUp(self): ipt = np.random.random(size=[10, 10]) self.inputs = {'X': ipt.astype('float16')} self.outputs = {'Out': ipt.astype('float32')} self.attrs = { 'in_dtype': int(core.VarDesc.VarType.FP16), - 'out_dtype': int(core.VarDesc.VarType.FP32) + 'out_dtype': int(core.VarDesc.VarType.FP32), } self.op_type = 'cast' self.__class__.no_need_check_grad = True @@ -63,14 +61,13 @@ class TestCastOpFp16ToFp32(OpTest): class TestCastOpFp32ToFp16(OpTest): - def setUp(self): ipt = np.random.random(size=[10, 10]) self.inputs = {'X': ipt.astype('float32')} self.outputs = {'Out': ipt.astype('float16')} self.attrs = { 'in_dtype': int(core.VarDesc.VarType.FP32), - 'out_dtype': int(core.VarDesc.VarType.FP16) + 'out_dtype': int(core.VarDesc.VarType.FP16), } self.op_type = 'cast' self.__class__.no_need_check_grad = True @@ -80,14 +77,13 @@ class TestCastOpFp32ToFp16(OpTest): class TestCastOpBf16ToFp32(OpTest): - def setUp(self): ipt = np.array(np.random.randint(10, size=[10, 10])).astype('uint16') self.inputs = {'X': ipt} self.outputs = {'Out': convert_uint16_to_float(ipt)} self.attrs = { 'in_dtype': int(core.VarDesc.VarType.BF16), - 'out_dtype': int(core.VarDesc.VarType.FP32) + 'out_dtype': int(core.VarDesc.VarType.FP32), } self.op_type = 'cast' self.__class__.no_need_check_grad = True @@ -97,14 +93,13 @@ class TestCastOpBf16ToFp32(OpTest): class TestCastOpFp32ToBf16(OpTest): - def setUp(self): ipt = np.random.random(size=[10, 10]).astype('float32') self.inputs = {'X': ipt} self.outputs = {'Out': convert_float_to_uint16(ipt)} self.attrs = { 'in_dtype': int(core.VarDesc.VarType.FP32), - 'out_dtype': int(core.VarDesc.VarType.BF16) + 'out_dtype': int(core.VarDesc.VarType.BF16), } self.op_type = 'cast' self.__class__.no_need_check_grad = True @@ -114,32 +109,31 @@ class TestCastOpFp32ToBf16(OpTest): class TestCastOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # The input type of cast_op must be Variable. - x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace() + ) self.assertRaises(TypeError, fluid.layers.cast, x1, 'int32') class TestCastOpEager(unittest.TestCase): - def test_eager(self): with paddle.fluid.dygraph.base.guard(): with _test_eager_guard(): x = paddle.ones([2, 2], dtype="float16") x.stop_gradient = False out = paddle.cast(x, "float32") - np.testing.assert_array_equal(out, - np.ones([2, 2]).astype('float32')) + np.testing.assert_array_equal( + out, np.ones([2, 2]).astype('float32') + ) out.backward() np.testing.assert_array_equal(x.gradient(), x.numpy()) self.assertTrue(x.gradient().dtype == np.float16) class TestCastDoubleGradCheck(unittest.TestCase): - def cast_wrapper(self, x): return paddle.cast(x[0], 'float64') @@ -154,17 +148,13 @@ class TestCastDoubleGradCheck(unittest.TestCase): out = paddle.cast(data, 'float64') data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) - gradient_checker.double_grad_check([data], - out, - x_init=[data_arr], - place=place, - eps=eps) + gradient_checker.double_grad_check( + [data], out, x_init=[data_arr], place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.double_grad_check_for_dygraph(self.cast_wrapper, - [data], - out, - x_init=[data_arr], - place=place) + gradient_checker.double_grad_check_for_dygraph( + self.cast_wrapper, [data], out, x_init=[data_arr], place=place + ) def test_grad(self): paddle.enable_static() @@ -176,7 +166,6 @@ class TestCastDoubleGradCheck(unittest.TestCase): class TestCastTripleGradCheck(unittest.TestCase): - def cast_wrapper(self, x): return paddle.cast(x[0], 'float64') @@ -191,17 +180,13 @@ class TestCastTripleGradCheck(unittest.TestCase): out = paddle.cast(data, 'float64') data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) - gradient_checker.triple_grad_check([data], - out, - x_init=[data_arr], - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [data], out, x_init=[data_arr], place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.triple_grad_check_for_dygraph(self.cast_wrapper, - [data], - out, - x_init=[data_arr], - place=place) + gradient_checker.triple_grad_check_for_dygraph( + self.cast_wrapper, [data], out, x_init=[data_arr], place=place + ) def test_grad(self): paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_center_loss.py b/python/paddle/fluid/tests/unittests/test_center_loss.py index 59c8b417d10024ace0c12650ba1f1b7e8eebe99e..b7eda71c0217b03e95ce9d057806366a4cf506e3 100644 --- a/python/paddle/fluid/tests/unittests/test_center_loss.py +++ b/python/paddle/fluid/tests/unittests/test_center_loss.py @@ -19,7 +19,6 @@ import paddle.fluid as fluid class TestCenterLossOp(OpTest): - def setUp(self): self.op_type = "center_loss" self.dtype = np.float64 @@ -47,7 +46,7 @@ class TestCenterLossOp(OpTest): cout[labels[i]] += 1 var_sum[labels[i]] += output[i] for i in range(cluster_num): - var_sum[i] /= (1 + cout[i]) + var_sum[i] /= 1 + cout[i] var_sum *= 0.1 result = centers + var_sum rate = np.array([0.1]).astype(np.float64) @@ -56,20 +55,20 @@ class TestCenterLossOp(OpTest): 'X': feat, 'Label': labels, 'Centers': centers, - 'CenterUpdateRate': rate + 'CenterUpdateRate': rate, } if self.need_update == True: self.outputs = { 'SampleCenterDiff': output, 'Loss': loss, - 'CentersOut': result + 'CentersOut': result, } else: self.outputs = { 'SampleCenterDiff': output, 'Loss': loss, - 'CentersOut': centers + 'CentersOut': centers, } def config(self): @@ -86,35 +85,34 @@ class TestCenterLossOp(OpTest): class TestCenterLossOpNoUpdate(TestCenterLossOp): - def config(self): self.need_update = False class BadInputTestCenterLoss(unittest.TestCase): - def test_error(self): with fluid.program_guard(fluid.Program()): def test_bad_x(): data = [[1, 2, 3, 4], [5, 6, 7, 8]] - label = fluid.layers.data(name='label', - shape=[2, 1], - dtype='int32') + label = fluid.layers.data( + name='label', shape=[2, 1], dtype='int32' + ) res = fluid.layers.center_loss( data, label, num_classes=1000, alpha=0.2, param_attr=fluid.initializer.Xavier(uniform=False), - update_center=True) + update_center=True, + ) self.assertRaises(TypeError, test_bad_x) def test_bad_y(): - data = fluid.layers.data(name='data', - shape=[2, 32], - dtype='float32') + data = fluid.layers.data( + name='data', shape=[2, 32], dtype='float32' + ) label = [[2], [3]] res = fluid.layers.center_loss( data, @@ -122,30 +120,38 @@ class BadInputTestCenterLoss(unittest.TestCase): num_classes=1000, alpha=0.2, param_attr=fluid.initializer.Xavier(uniform=False), - update_center=True) + update_center=True, + ) self.assertRaises(TypeError, test_bad_y) def test_bad_alpha(): - data = fluid.layers.data(name='data2', - shape=[2, 32], - dtype='float32', - append_batch_size=False) - label = fluid.layers.data(name='label2', - shape=[2, 1], - dtype='int32', - append_batch_size=False) - alpha = fluid.layers.data(name='alpha', - shape=[1], - dtype='int64', - append_batch_size=False) + data = fluid.layers.data( + name='data2', + shape=[2, 32], + dtype='float32', + append_batch_size=False, + ) + label = fluid.layers.data( + name='label2', + shape=[2, 1], + dtype='int32', + append_batch_size=False, + ) + alpha = fluid.layers.data( + name='alpha', + shape=[1], + dtype='int64', + append_batch_size=False, + ) res = fluid.layers.center_loss( data, label, num_classes=1000, alpha=alpha, param_attr=fluid.initializer.Xavier(uniform=False), - update_center=True) + update_center=True, + ) self.assertRaises(TypeError, test_bad_alpha) diff --git a/python/paddle/fluid/tests/unittests/test_channel_shuffle.py b/python/paddle/fluid/tests/unittests/test_channel_shuffle.py index 50df6bbc024cf6ebc6b76e30a845b9f255c56224..0da2a651477e2bad0f74dcb6ae2836913535ec16 100644 --- a/python/paddle/fluid/tests/unittests/test_channel_shuffle.py +++ b/python/paddle/fluid/tests/unittests/test_channel_shuffle.py @@ -42,7 +42,6 @@ def channel_shuffle_np(x, groups, data_format="NCHW"): class TestChannelShuffleOp(OpTest): - def setUp(self): self.op_type = "channel_shuffle" self.init_data_format() @@ -73,13 +72,11 @@ class TestChannelShuffleOp(OpTest): class TestChannelLast(TestChannelShuffleOp): - def init_data_format(self): self.format = "NHWC" class TestChannelShuffleAPI(unittest.TestCase): - def setUp(self): self.x_1_np = np.random.random([2, 9, 4, 4]).astype("float64") self.x_2_np = np.random.random([2, 4, 4, 9]).astype("float64") @@ -87,47 +84,53 @@ class TestChannelShuffleAPI(unittest.TestCase): self.out_2_np = channel_shuffle_np(self.x_2_np, 3, "NHWC") def test_static_graph_functional(self): - for use_cuda in ([False, True] - if core.is_compiled_with_cuda() else [False]): + for use_cuda in ( + [False, True] if core.is_compiled_with_cuda() else [False] + ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x_1 = paddle.fluid.data(name="x", - shape=[2, 9, 4, 4], - dtype="float64") - x_2 = paddle.fluid.data(name="x2", - shape=[2, 4, 4, 9], - dtype="float64") + x_1 = paddle.fluid.data( + name="x", shape=[2, 9, 4, 4], dtype="float64" + ) + x_2 = paddle.fluid.data( + name="x2", shape=[2, 4, 4, 9], dtype="float64" + ) out_1 = F.channel_shuffle(x_1, 3) out_2 = F.channel_shuffle(x_2, 3, "NHWC") exe = paddle.static.Executor(place=place) - res_1 = exe.run(fluid.default_main_program(), - feed={"x": self.x_1_np}, - fetch_list=out_1, - use_prune=True) - - res_2 = exe.run(fluid.default_main_program(), - feed={"x2": self.x_2_np}, - fetch_list=out_2, - use_prune=True) + res_1 = exe.run( + fluid.default_main_program(), + feed={"x": self.x_1_np}, + fetch_list=out_1, + use_prune=True, + ) + + res_2 = exe.run( + fluid.default_main_program(), + feed={"x2": self.x_2_np}, + fetch_list=out_2, + use_prune=True, + ) assert np.allclose(res_1, self.out_1_np) assert np.allclose(res_2, self.out_2_np) # same test between layer and functional in this op. def test_static_graph_layer(self): - for use_cuda in ([False, True] - if core.is_compiled_with_cuda() else [False]): + for use_cuda in ( + [False, True] if core.is_compiled_with_cuda() else [False] + ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x_1 = paddle.fluid.data(name="x", - shape=[2, 9, 4, 4], - dtype="float64") - x_2 = paddle.fluid.data(name="x2", - shape=[2, 4, 4, 9], - dtype="float64") + x_1 = paddle.fluid.data( + name="x", shape=[2, 9, 4, 4], dtype="float64" + ) + x_2 = paddle.fluid.data( + name="x2", shape=[2, 4, 4, 9], dtype="float64" + ) # init instance ps_1 = paddle.nn.ChannelShuffle(3) ps_2 = paddle.nn.ChannelShuffle(3, "NHWC") @@ -137,15 +140,19 @@ class TestChannelShuffleAPI(unittest.TestCase): out_2_np = channel_shuffle_np(self.x_2_np, 3, "NHWC") exe = paddle.static.Executor(place=place) - res_1 = exe.run(fluid.default_main_program(), - feed={"x": self.x_1_np}, - fetch_list=out_1, - use_prune=True) - - res_2 = exe.run(fluid.default_main_program(), - feed={"x2": self.x_2_np}, - fetch_list=out_2, - use_prune=True) + res_1 = exe.run( + fluid.default_main_program(), + feed={"x": self.x_1_np}, + fetch_list=out_1, + use_prune=True, + ) + + res_2 = exe.run( + fluid.default_main_program(), + feed={"x2": self.x_2_np}, + fetch_list=out_2, + use_prune=True, + ) assert np.allclose(res_1, out_1_np) assert np.allclose(res_2, out_2_np) @@ -163,23 +170,26 @@ class TestChannelShuffleAPI(unittest.TestCase): npresult = channel_shuffle_np(x, groups, data_format) - for use_cuda in ([False, True] - if core.is_compiled_with_cuda() else [False]): + for use_cuda in ( + [False, True] if core.is_compiled_with_cuda() else [False] + ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.disable_static(place=place) - channel_shuffle = paddle.nn.ChannelShuffle(groups, - data_format=data_format) + channel_shuffle = paddle.nn.ChannelShuffle( + groups, data_format=data_format + ) result = channel_shuffle(paddle.to_tensor(x)) np.testing.assert_allclose(result.numpy(), npresult, rtol=1e-05) - result_functional = F.channel_shuffle(paddle.to_tensor(x), 3, - data_format) - np.testing.assert_allclose(result_functional.numpy(), - npresult, - rtol=1e-05) + result_functional = F.channel_shuffle( + paddle.to_tensor(x), 3, data_format + ) + np.testing.assert_allclose( + result_functional.numpy(), npresult, rtol=1e-05 + ) channel_shuffle_str = 'groups={}'.format(groups) if data_format != 'NCHW': @@ -194,9 +204,7 @@ class TestChannelShuffleAPI(unittest.TestCase): class TestChannelShuffleError(unittest.TestCase): - def test_error_functional(self): - def error_input(): with paddle.fluid.dygraph.guard(): x = np.random.random([9, 4, 4]).astype("float64") @@ -221,13 +229,13 @@ class TestChannelShuffleError(unittest.TestCase): def error_data_format(): with paddle.fluid.dygraph.guard(): x = np.random.random([2, 9, 4, 4]).astype("float64") - channel_shuffle = F.channel_shuffle(paddle.to_tensor(x), 3, - "WOW") + channel_shuffle = F.channel_shuffle( + paddle.to_tensor(x), 3, "WOW" + ) self.assertRaises(ValueError, error_data_format) def test_error_layer(self): - def error_input_layer(): with paddle.fluid.dygraph.guard(): x = np.random.random([9, 4, 4]).astype("float64") diff --git a/python/paddle/fluid/tests/unittests/test_check_import_scipy.py b/python/paddle/fluid/tests/unittests/test_check_import_scipy.py index 1707ff232a1a4e488abc666a9403fe25b757ebf8..f06b5c97e3f904b59734aa28124880d8add76c7b 100644 --- a/python/paddle/fluid/tests/unittests/test_check_import_scipy.py +++ b/python/paddle/fluid/tests/unittests/test_check_import_scipy.py @@ -21,7 +21,6 @@ def my_import(name, globals=None, locals=None, fromlist=(), level=0): class importTest(unittest.TestCase): - def test_import(self): testOsName = 'nt' old_import = builtins.__import__ diff --git a/python/paddle/fluid/tests/unittests/test_checkpoint_saver.py b/python/paddle/fluid/tests/unittests/test_checkpoint_saver.py index b9851a283fe09b7daab67c76b6094ae23bf19a49..f3b539d8057aeb3b5ee0314f32ffc86d5d416e4a 100644 --- a/python/paddle/fluid/tests/unittests/test_checkpoint_saver.py +++ b/python/paddle/fluid/tests/unittests/test_checkpoint_saver.py @@ -20,7 +20,6 @@ from paddle.fluid.incubate.checkpoint.checkpoint_saver import CheckpointSaver class CheckpointerSaverTest(unittest.TestCase): - def test(self): fs = HDFSClient("/usr/local/hadoop-2.7.7", None) dir_path = "./checkpointsaver_test" diff --git a/python/paddle/fluid/tests/unittests/test_cholesky_op.py b/python/paddle/fluid/tests/unittests/test_cholesky_op.py index b903cd9159da2ea94bacd460ad932d57ec582d6c..1f5d60843139aa0943431d10cf40401ec439941f 100644 --- a/python/paddle/fluid/tests/unittests/test_cholesky_op.py +++ b/python/paddle/fluid/tests/unittests/test_cholesky_op.py @@ -24,16 +24,15 @@ from decorator_helper import prog_scope @skip_check_grad_ci( - reason= - "The input of cholesky_op should always be symmetric positive-definite. " + reason="The input of cholesky_op should always be symmetric positive-definite. " "However, OpTest calculates the numeric gradient of each element in input " "via small finite difference, which makes the input no longer symmetric " "positive-definite thus can not compute the Cholesky decomposition. " "While we can use the gradient_checker.grad_check to perform gradient " "check of cholesky_op, since it supports check gradient with a program " - "and we can construct symmetric positive-definite matrices in the program") + "and we can construct symmetric positive-definite matrices in the program" +) class TestCholeskyOp(OpTest): - def setUp(self): self.op_type = "cholesky" self._input_shape = (2, 32, 32) @@ -41,12 +40,14 @@ class TestCholeskyOp(OpTest): self.init_config() self.trans_dims = list(range(len(self._input_shape) - 2)) + [ len(self._input_shape) - 1, - len(self._input_shape) - 2 + len(self._input_shape) - 2, ] self.root_data = np.random.random(self._input_shape).astype("float64") # construct symmetric positive-definite matrice - input_data = np.matmul( - self.root_data, self.root_data.transpose(self.trans_dims)) + 1e-05 + input_data = ( + np.matmul(self.root_data, self.root_data.transpose(self.trans_dims)) + + 1e-05 + ) output_data = np.linalg.cholesky(input_data).astype("float64") if self._upper: output_data = output_data.transpose(self.trans_dims) @@ -70,8 +71,9 @@ class TestCholeskyOp(OpTest): root_data = self.root_data[..., :3, :3] prog = fluid.Program() with fluid.program_guard(prog): - root = layers.create_parameter(dtype=root_data.dtype, - shape=root_data.shape) + root = layers.create_parameter( + dtype=root_data.dtype, shape=root_data.shape + ) root_t = layers.transpose(root, self.trans_dims) x = layers.matmul(x=root, y=root_t) + 1e-05 out = paddle.cholesky(x, upper=self.attrs["upper"]) @@ -82,19 +84,16 @@ class TestCholeskyOp(OpTest): class TestCholeskyOpLower(TestCholeskyOp): - def init_config(self): self._upper = False class TestCholeskyOp2D(TestCholeskyOp): - def init_config(self): self._input_shape = (64, 64) class TestDygraph(unittest.TestCase): - def test_dygraph(self): if core.is_compiled_with_rocm(): paddle.disable_static(place=fluid.CPUPlace()) @@ -108,7 +107,6 @@ class TestDygraph(unittest.TestCase): class TestCholeskySingularAPI(unittest.TestCase): - def setUp(self): self.places = [fluid.CPUPlace()] if core.is_compiled_with_cuda() and (not core.is_compiled_with_rocm()): @@ -123,9 +121,11 @@ class TestCholeskySingularAPI(unittest.TestCase): exe = fluid.Executor(place) try: - fetches = exe.run(fluid.default_main_program(), - feed={"input": input_np}, - fetch_list=[result]) + fetches = exe.run( + fluid.default_main_program(), + feed={"input": input_np}, + fetch_list=[result], + ) except RuntimeError as ex: print("The mat is singular") except ValueError as ex: @@ -138,9 +138,12 @@ class TestCholeskySingularAPI(unittest.TestCase): def test_dygraph(self): for place in self.places: with fluid.dygraph.guard(place): - input_np = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]], - [[10, 11, 12], [13, 14, 15], - [16, 17, 18]]]).astype("float64") + input_np = np.array( + [ + [[1, 2, 3], [4, 5, 6], [7, 8, 9]], + [[10, 11, 12], [13, 14, 15], [16, 17, 18]], + ] + ).astype("float64") input = fluid.dygraph.to_variable(input_np) try: result = paddle.cholesky(input) diff --git a/python/paddle/fluid/tests/unittests/test_cholesky_solve_op.py b/python/paddle/fluid/tests/unittests/test_cholesky_solve_op.py index fda558f13bcb45d50cb140ee238830c73da932fa..5ed62fe2e0076de9b41bae6023f81614a01e1b27 100644 --- a/python/paddle/fluid/tests/unittests/test_cholesky_solve_op.py +++ b/python/paddle/fluid/tests/unittests/test_cholesky_solve_op.py @@ -28,7 +28,7 @@ from paddle.fluid import Program, program_guard, core paddle.enable_static() -#cholesky_solve implement 1 +# cholesky_solve implement 1 def cholesky_solution(X, B, upper=True): if upper: A = np.triu(X) @@ -39,10 +39,11 @@ def cholesky_solution(X, B, upper=True): L = A U = A.T return scipy.linalg.solve_triangular( - U, scipy.linalg.solve_triangular(L, B, lower=True)) + U, scipy.linalg.solve_triangular(L, B, lower=True) + ) -#cholesky_solve implement 2 +# cholesky_solve implement 2 def scipy_cholesky_solution(X, B, upper=True): if upper: umat = np.triu(X) @@ -54,7 +55,7 @@ def scipy_cholesky_solution(X, B, upper=True): return scipy.linalg.cho_solve(K, B) -#broadcast function used by cholesky_solve +# broadcast function used by cholesky_solve def broadcast_shape(matA, matB): shapeA = matA.shape shapeB = matB.shape @@ -67,14 +68,16 @@ def broadcast_shape(matA, matB): Broadshape.append(max(shapeA[idx], shapeB[idx])) else: raise Exception( - 'shapeA and shapeB should be broadcasted, but got {} and {}'. - format(shapeA, shapeB)) + 'shapeA and shapeB should be broadcasted, but got {} and {}'.format( + shapeA, shapeB + ) + ) bsA = Broadshape + list(shapeA[-2:]) bsB = Broadshape + list(shapeB[-2:]) return np.broadcast_to(matA, bsA), np.broadcast_to(matB, bsB) -#cholesky_solve implement in batch +# cholesky_solve implement in batch def scipy_cholesky_solution_batch(bumat, bB, upper=True): bumat, bB = broadcast_shape(bumat, bB) ushape = bumat.shape @@ -99,19 +102,21 @@ class TestCholeskySolveOp(OpTest): case 1 """ - #test condition set + # test condition set def config(self): self.y_shape = [15, 15] self.x_shape = [15, 5] self.upper = False - self.dtype = np.float64 #Here cholesky_solve Op only supports float64/float32 type, please check others if Op supports more types. + self.dtype = ( + np.float64 + ) # Here cholesky_solve Op only supports float64/float32 type, please check others if Op supports more types. - #get scipy result + # get scipy result def set_output(self): umat = self.inputs['Y'] - self.output = scipy_cholesky_solution_batch(umat, - self.inputs['X'], - upper=self.upper) + self.output = scipy_cholesky_solution_batch( + umat, self.inputs['X'], upper=self.upper + ) def setUp(self): self.op_type = "cholesky_solve" @@ -124,17 +129,17 @@ class TestCholeskySolveOp(OpTest): self.inputs = { 'X': np.random.random(self.x_shape).astype(self.dtype), - 'Y': umat + 'Y': umat, } self.attrs = {'upper': self.upper} self.set_output() self.outputs = {'Out': self.output} - #check Op forward result + # check Op forward result def test_check_output(self): self.check_output() - #check Op grad + # check Op grad def test_check_grad_normal(self): self.check_grad(['Y'], 'Out', max_relative_error=0.01) @@ -152,9 +157,8 @@ class TestCholeskySolveOp3(TestCholeskySolveOp): self.dtype = np.float64 -#API function test +# API function test class TestCholeskySolveAPI(unittest.TestCase): - def setUp(self): np.random.seed(2021) self.place = [paddle.CPUPlace()] @@ -180,22 +184,20 @@ class TestCholeskySolveAPI(unittest.TestCase): z2_np = scipy_cholesky_solution(umat, x_np, upper=self.upper) exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={ - "x": x_np, - "y": umat - }, - fetch_list=[z]) + fetches = exe.run( + fluid.default_main_program(), + feed={"x": x_np, "y": umat}, + fetch_list=[z], + ) np.testing.assert_allclose(fetches[0], z_np, rtol=1e-05) - #test in static mode + # test in static mode def test_static(self): for place in self.place: self.check_static_result(place=place) - #test in dynamic mode + # test in dynamic mode def test_dygraph(self): - def run(place): paddle.disable_static(place) x_np = np.random.random([20, 2]).astype(self.dtype) @@ -213,9 +215,8 @@ class TestCholeskySolveAPI(unittest.TestCase): for idx, place in enumerate(self.place): run(place) - #test input with broadcast + # test input with broadcast def test_broadcast(self): - def run(place): paddle.disable_static() x_np = np.random.random([1, 30, 2]).astype(self.dtype) @@ -234,17 +235,18 @@ class TestCholeskySolveAPI(unittest.TestCase): run(place) -#test condition out of bounds +# test condition out of bounds class TestCholeskySolveOpError(unittest.TestCase): - def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): # The input type of solve_op must be Variable. - x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - fluid.CPUPlace()) - y1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace() + ) + y1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace() + ) self.assertRaises(TypeError, paddle.linalg.cholesky_solve, x1, y1) # The data type of input must be float32 or float64. diff --git a/python/paddle/fluid/tests/unittests/test_chunk_eval_op.py b/python/paddle/fluid/tests/unittests/test_chunk_eval_op.py index b6110a296f031c8961f2c57c3bc97d5ee9c0099b..b82404e01a403f6f7dad4bbd16a9874ccc30a77c 100644 --- a/python/paddle/fluid/tests/unittests/test_chunk_eval_op.py +++ b/python/paddle/fluid/tests/unittests/test_chunk_eval_op.py @@ -21,15 +21,17 @@ from paddle import fluid class Segment(object): - def __init__(self, chunk_type, start_idx, end_idx): self.chunk_type = chunk_type self.start_idx = start_idx self.end_idx = end_idx def __str__(self): - return '(Segment: %s, %s, %s)' % (self.chunk_type, self.start_idx, - self.end_idx) + return '(Segment: %s, %s, %s)' % ( + self.chunk_type, + self.start_idx, + self.end_idx, + ) __repr__ = __str__ @@ -48,17 +50,24 @@ class TestChunkEvalOp(OpTest): for chunk in chunks: if self.scheme == 'IOB': data[chunk.start_idx] = chunk.chunk_type * self.num_tag_types - data[chunk.start_idx + 1:chunk. - end_idx] = chunk.chunk_type * self.num_tag_types + ( - self.num_tag_types - 1) - data[chunk.end_idx] = chunk.chunk_type * self.num_tag_types + ( + data[ + chunk.start_idx + 1 : chunk.end_idx + ] = chunk.chunk_type * self.num_tag_types + ( self.num_tag_types - 1 - ) if chunk.start_idx < chunk.end_idx else data[chunk.start_idx] + ) + data[chunk.end_idx] = ( + chunk.chunk_type * self.num_tag_types + + (self.num_tag_types - 1) + if chunk.start_idx < chunk.end_idx + else data[chunk.start_idx] + ) elif self.scheme == 'IOE': - data[chunk.start_idx:chunk. - end_idx] = chunk.chunk_type * self.num_tag_types + data[chunk.start_idx : chunk.end_idx] = ( + chunk.chunk_type * self.num_tag_types + ) data[chunk.end_idx] = chunk.chunk_type * self.num_tag_types + ( - self.num_tag_types - 1) + self.num_tag_types - 1 + ) def rand_chunks(self, starts, num_chunks): if num_chunks < 0: @@ -66,15 +75,17 @@ class TestChunkEvalOp(OpTest): chunks = [] # generate chunk beginnings chunk_begins = sorted( - np.random.choice(list(range(starts[-1])), num_chunks, - replace=False)) + np.random.choice(list(range(starts[-1])), num_chunks, replace=False) + ) seq_chunk_begins = [] begin_idx = 0 # divide chunks into sequences for i in range(len(starts) - 1): tmp_chunk_begins = [] - while begin_idx < len( - chunk_begins) and chunk_begins[begin_idx] < starts[i + 1]: + while ( + begin_idx < len(chunk_begins) + and chunk_begins[begin_idx] < starts[i + 1] + ): tmp_chunk_begins.append(chunk_begins[begin_idx]) begin_idx += 1 seq_chunk_begins.append(tmp_chunk_begins) @@ -83,9 +94,11 @@ class TestChunkEvalOp(OpTest): for i in range(len(seq_chunk_begins)): for j in range(len(seq_chunk_begins[i])): low = seq_chunk_begins[i][j] - high = seq_chunk_begins[i][ - j + 1] if j < len(seq_chunk_begins[i]) - 1 else starts[i + - 1] + high = ( + seq_chunk_begins[i][j + 1] + if j < len(seq_chunk_begins[i]) - 1 + else starts[i + 1] + ) chunk_ends.append(np.random.randint(low, high)) # generate chunks for chunk_pos in zip(chunk_begins, chunk_ends): @@ -95,20 +108,25 @@ class TestChunkEvalOp(OpTest): def gen_chunks(self, infer, label, starts): chunks = self.rand_chunks( - starts, self.num_infer_chunks + self.num_label_chunks - - self.num_correct_chunks) - correct_chunks = np.random.choice(list(range(len(chunks))), - self.num_correct_chunks, - replace=False) + starts, + self.num_infer_chunks + + self.num_label_chunks + - self.num_correct_chunks, + ) + correct_chunks = np.random.choice( + list(range(len(chunks))), self.num_correct_chunks, replace=False + ) infer_chunks = np.random.choice( [x for x in range(len(chunks)) if x not in correct_chunks], self.num_infer_chunks - self.num_correct_chunks, - replace=False) + replace=False, + ) infer_chunks = sorted(correct_chunks.tolist() + infer_chunks.tolist()) label_chunks = np.random.choice( [x for x in range(len(chunks)) if x not in infer_chunks], self.num_label_chunks - self.num_correct_chunks, - replace=False) + replace=False, + ) label_chunks = sorted(correct_chunks.tolist() + label_chunks.tolist()) self.fill_with_chunks(infer, [chunks[idx] for idx in infer_chunks]) self.fill_with_chunks(label, [chunks[idx] for idx in label_chunks]) @@ -123,7 +141,11 @@ class TestChunkEvalOp(OpTest): for idx in label_chunks: if chunks[idx].chunk_type in self.excluded_chunk_types: self.num_label_chunks -= 1 - return self.num_correct_chunks, self.num_infer_chunks, self.num_label_chunks + return ( + self.num_correct_chunks, + self.num_infer_chunks, + self.num_label_chunks, + ) def set_confs(self): # Use the IOB scheme and labels with 2 chunk types @@ -134,43 +156,63 @@ class TestChunkEvalOp(OpTest): self.attrs = { 'num_chunk_types': self.num_chunk_types, 'chunk_scheme': self.scheme, - 'excluded_chunk_types': self.excluded_chunk_types + 'excluded_chunk_types': self.excluded_chunk_types, } self.parse_scheme() - self.num_correct_chunks, self.num_infer_chunks, self.num_label_chunks = 4, 5, 9 + ( + self.num_correct_chunks, + self.num_infer_chunks, + self.num_label_chunks, + ) = (4, 5, 9) def set_data(self): - infer = np.zeros((self.batch_size, )).astype('int64') + infer = np.zeros((self.batch_size,)).astype('int64') infer.fill(self.num_chunk_types * self.num_tag_types) label = np.copy(infer) - starts = np.random.choice(list(range(1, self.batch_size)), - self.num_sequences - 1, - replace=False).tolist() + starts = np.random.choice( + list(range(1, self.batch_size)), + self.num_sequences - 1, + replace=False, + ).tolist() starts.extend([0, self.batch_size]) starts = sorted(starts) - self.num_correct_chunks, self.num_infer_chunks, self.num_label_chunks = self.gen_chunks( - infer, label, starts) + ( + self.num_correct_chunks, + self.num_infer_chunks, + self.num_label_chunks, + ) = self.gen_chunks(infer, label, starts) lod = [] for i in range(len(starts) - 1): lod.append(starts[i + 1] - starts[i]) self.set_input(infer, label, lod) - precision = float( - self.num_correct_chunks - ) / self.num_infer_chunks if self.num_infer_chunks else 0 - recall = float(self.num_correct_chunks - ) / self.num_label_chunks if self.num_label_chunks else 0 - f1 = float(2 * precision * recall) / ( - precision + recall) if self.num_correct_chunks else 0 + precision = ( + float(self.num_correct_chunks) / self.num_infer_chunks + if self.num_infer_chunks + else 0 + ) + recall = ( + float(self.num_correct_chunks) / self.num_label_chunks + if self.num_label_chunks + else 0 + ) + f1 = ( + float(2 * precision * recall) / (precision + recall) + if self.num_correct_chunks + else 0 + ) self.outputs = { 'Precision': np.asarray([precision], dtype='float32'), 'Recall': np.asarray([recall], dtype='float32'), 'F1-Score': np.asarray([f1], dtype='float32'), - 'NumInferChunks': np.asarray([self.num_infer_chunks], - dtype='int64'), - 'NumLabelChunks': np.asarray([self.num_label_chunks], - dtype='int64'), - 'NumCorrectChunks': np.asarray([self.num_correct_chunks], - dtype='int64') + 'NumInferChunks': np.asarray( + [self.num_infer_chunks], dtype='int64' + ), + 'NumLabelChunks': np.asarray( + [self.num_label_chunks], dtype='int64' + ), + 'NumCorrectChunks': np.asarray( + [self.num_correct_chunks], dtype='int64' + ), } def set_input(self, infer, label, lod): @@ -186,7 +228,6 @@ class TestChunkEvalOp(OpTest): class TestChunkEvalOpWithExclude(TestChunkEvalOp): - def set_confs(self): # Use the IOE scheme and labels with 3 chunk types self.scheme = 'IOE' @@ -196,14 +237,17 @@ class TestChunkEvalOpWithExclude(TestChunkEvalOp): self.attrs = { 'num_chunk_types': self.num_chunk_types, 'chunk_scheme': self.scheme, - 'excluded_chunk_types': self.excluded_chunk_types + 'excluded_chunk_types': self.excluded_chunk_types, } self.parse_scheme() - self.num_correct_chunks, self.num_infer_chunks, self.num_label_chunks = 15, 18, 20 + ( + self.num_correct_chunks, + self.num_infer_chunks, + self.num_label_chunks, + ) = (15, 18, 20) class TestChunkEvalOpWithTensorInput(TestChunkEvalOp): - def set_input(self, infer, label, lod): max_len = np.max(lod) pad_infer = [] @@ -212,13 +256,21 @@ class TestChunkEvalOpWithTensorInput(TestChunkEvalOp): for i in range(len(lod)): end = lod[i] + start pad_infer.append( - np.pad(infer[start:end], (0, max_len - lod[i]), - 'constant', - constant_values=(-1, ))) + np.pad( + infer[start:end], + (0, max_len - lod[i]), + 'constant', + constant_values=(-1,), + ) + ) pad_label.append( - np.pad(label[start:end], (0, max_len - lod[i]), - 'constant', - constant_values=(-1, ))) + np.pad( + label[start:end], + (0, max_len - lod[i]), + 'constant', + constant_values=(-1,), + ) + ) start = end pad_infer = np.expand_dims(np.array(pad_infer, dtype='int64'), 2) @@ -227,46 +279,51 @@ class TestChunkEvalOpWithTensorInput(TestChunkEvalOp): self.inputs = { 'Inference': pad_infer, 'Label': pad_label, - 'SeqLength': lod + 'SeqLength': lod, } class TestChunkEvalOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): def test_input(): input_data = np.random.random(1, 1).astype("int64") label_data = np.random.random(1).astype("int64") - fluid.layers.chunk_eval(input=input_data, - label=label_data, - chunk_scheme="IOB", - num_chunk_types=3) + fluid.layers.chunk_eval( + input=input_data, + label=label_data, + chunk_scheme="IOB", + num_chunk_types=3, + ) self.assertRaises(TypeError, test_input) def test_label(): - input_ = fluid.data(name="input", - shape=[None, 1], - dtype="int64") + input_ = fluid.data( + name="input", shape=[None, 1], dtype="int64" + ) label_data = np.random.random(1).astype("int64") - fluid.layers.chunk_eval(input=input_, - label=label_data, - chunk_scheme="IOB", - num_chunk_types=3) + fluid.layers.chunk_eval( + input=input_, + label=label_data, + chunk_scheme="IOB", + num_chunk_types=3, + ) self.assertRaises(TypeError, test_label) def test_type(): - in_data = fluid.data(name="input_", - shape=[None, 1], - dtype="int32") + in_data = fluid.data( + name="input_", shape=[None, 1], dtype="int32" + ) label = fluid.data(name="label_", shape=[1], dtype="int64") - fluid.layers.chunk_eval(input=in_data, - label=label, - chunk_scheme="IOB", - num_chunk_types=3) + fluid.layers.chunk_eval( + input=in_data, + label=label, + chunk_scheme="IOB", + num_chunk_types=3, + ) self.assertRaises(TypeError, test_type) diff --git a/python/paddle/fluid/tests/unittests/test_chunk_op.py b/python/paddle/fluid/tests/unittests/test_chunk_op.py index 896da7d4c1d2ea3d6d12bba752ce31dbc6d90eee..f29d90ceb73dc5ccea8ff8938c3151e823eda3e8 100644 --- a/python/paddle/fluid/tests/unittests/test_chunk_op.py +++ b/python/paddle/fluid/tests/unittests/test_chunk_op.py @@ -21,7 +21,6 @@ import paddle class TestChunkOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # The type of axis in chunk_op should be int or Variable. @@ -54,7 +53,6 @@ class TestChunkOpError(unittest.TestCase): class API_TestChunk(unittest.TestCase): - def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): data1 = paddle.fluid.data('data1', shape=[4, 6, 6], dtype='float64') @@ -64,11 +62,9 @@ class API_TestChunk(unittest.TestCase): exe = paddle.static.Executor(place) input1 = np.random.random([4, 6, 6]).astype('float64') input2 = np.array([2]).astype('int32') - r0, r1, r2, = exe.run(feed={ - "data1": input1, - "data2": input2 - }, - fetch_list=[x0, x1, x2]) + r0, r1, r2, = exe.run( + feed={"data1": input1, "data2": input2}, fetch_list=[x0, x1, x2] + ) ex_x0, ex_x1, ex_x2 = np.array_split(input1, 3, axis=2) np.testing.assert_allclose(ex_x0, r0, rtol=1e-05) np.testing.assert_allclose(ex_x1, r1, rtol=1e-05) @@ -76,7 +72,6 @@ class API_TestChunk(unittest.TestCase): class API_TestChunk1(unittest.TestCase): - def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): data1 = paddle.fluid.data('data1', shape=[4, 6, 6], dtype='float64') @@ -84,8 +79,11 @@ class API_TestChunk1(unittest.TestCase): place = paddle.CPUPlace() exe = paddle.static.Executor(place) input1 = np.random.random([4, 6, 6]).astype('float64') - r0, r1, r2, = exe.run(feed={"data1": input1}, - fetch_list=[x0, x1, x2]) + ( + r0, + r1, + r2, + ) = exe.run(feed={"data1": input1}, fetch_list=[x0, x1, x2]) ex_x0, ex_x1, ex_x2 = np.array_split(input1, 3, axis=2) np.testing.assert_allclose(ex_x0, r0, rtol=1e-05) np.testing.assert_allclose(ex_x1, r1, rtol=1e-05) @@ -93,7 +91,6 @@ class API_TestChunk1(unittest.TestCase): class API_TestDygraphChunk(unittest.TestCase): - def test_out1(self): with fluid.dygraph.guard(): input_1 = np.random.random([4, 6, 6]).astype("int32") diff --git a/python/paddle/fluid/tests/unittests/test_class_center_sample_op.py b/python/paddle/fluid/tests/unittests/test_class_center_sample_op.py index 28b9ffd88b4065fc31724a9996fe53a0d90e67b9..53b9cfad3453012b62e53d889a82487c09b6b831 100644 --- a/python/paddle/fluid/tests/unittests/test_class_center_sample_op.py +++ b/python/paddle/fluid/tests/unittests/test_class_center_sample_op.py @@ -28,10 +28,13 @@ def class_center_sample_numpy(label, classes_list, num_samples): unique_label_per_device = [] for i in range(nranks): - index = np.logical_and(unique_label >= class_interval[i], - unique_label < class_interval[i + 1]) - pos_class_center_per_device.append(unique_label[index] - - class_interval[i]) + index = np.logical_and( + unique_label >= class_interval[i], + unique_label < class_interval[i + 1], + ) + pos_class_center_per_device.append( + unique_label[index] - class_interval[i] + ) unique_label_per_device.append(unique_label[index]) num_samples_per_device = [] @@ -41,8 +44,9 @@ def class_center_sample_numpy(label, classes_list, num_samples): remapped_dict = {} for i in range(nranks): - for idx, v in enumerate(unique_label_per_device[i], - sampled_class_interval[i]): + for idx, v in enumerate( + unique_label_per_device[i], sampled_class_interval[i] + ): remapped_dict[v] = idx remapped_label = [] @@ -62,14 +66,12 @@ def python_api( fix_seed=False, seed=0, ): - return paddle.nn.functional.class_center_sample(label, - num_classes=num_classes, - num_samples=num_samples, - group=None) + return paddle.nn.functional.class_center_sample( + label, num_classes=num_classes, num_samples=num_samples, group=None + ) class TestClassCenterSampleOp(OpTest): - def initParams(self): self.op_type = "class_center_sample" self.python_api = python_api @@ -88,17 +90,18 @@ class TestClassCenterSampleOp(OpTest): self.initParams() self.init_dtype() self.init_fix_seed() - label = np.random.randint(0, - self.num_classes, (self.batch_size, ), - dtype=self.dtype) + label = np.random.randint( + 0, self.num_classes, (self.batch_size,), dtype=self.dtype + ) remapped_label, sampled_class_center = class_center_sample_numpy( - label, [self.num_classes], self.num_samples) + label, [self.num_classes], self.num_samples + ) self.inputs = {'Label': label} self.outputs = { 'RemappedLabel': remapped_label.astype(self.dtype), - 'SampledLocalClassCenter': sampled_class_center.astype(self.dtype) + 'SampledLocalClassCenter': sampled_class_center.astype(self.dtype), } self.attrs = { @@ -109,24 +112,22 @@ class TestClassCenterSampleOp(OpTest): } def test_check_output(self): - self.check_output(no_check_set=['SampledLocalClassCenter'], - check_eager=True) + self.check_output( + no_check_set=['SampledLocalClassCenter'], check_eager=True + ) class TestClassCenterSampleOpINT32(TestClassCenterSampleOp): - def init_dtype(self): self.dtype = np.int32 class TestClassCenterSampleOpFixSeed(TestClassCenterSampleOp): - def init_fix_seed(self): self.fix_seed = True class TestClassCenterSampleV2(unittest.TestCase): - def setUp(self): self.initParams() np.random.seed(self.seed) @@ -151,27 +152,37 @@ class TestClassCenterSampleV2(unittest.TestCase): def check_static_result(self, place): with program_guard(Program(), Program()): - label_np = np.random.randint(0, - self.num_classes, (self.batch_size, ), - dtype=self.dtype) - - label = paddle.static.data(name='label', - shape=[self.batch_size], - dtype=self.dtype) - remapped_label, sampled_class_index = paddle.nn.functional.class_center_sample( - label, self.num_classes, self.num_samples) - - remapped_label_np, sampled_class_center_np = class_center_sample_numpy( - label_np, [self.num_classes], self.num_samples) + label_np = np.random.randint( + 0, self.num_classes, (self.batch_size,), dtype=self.dtype + ) + + label = paddle.static.data( + name='label', shape=[self.batch_size], dtype=self.dtype + ) + ( + remapped_label, + sampled_class_index, + ) = paddle.nn.functional.class_center_sample( + label, self.num_classes, self.num_samples + ) + + ( + remapped_label_np, + sampled_class_center_np, + ) = class_center_sample_numpy( + label_np, [self.num_classes], self.num_samples + ) exe = paddle.fluid.Executor(place) - [remapped_label_res, sampled_class_index_res - ] = exe.run(paddle.fluid.default_main_program(), - feed={'label': label_np}, - fetch_list=[remapped_label, sampled_class_index]) + [remapped_label_res, sampled_class_index_res] = exe.run( + paddle.fluid.default_main_program(), + feed={'label': label_np}, + fetch_list=[remapped_label, sampled_class_index], + ) np.testing.assert_allclose(remapped_label_res, remapped_label_np) np.testing.assert_allclose( - sampled_class_index_res[:len(sampled_class_center_np[0])], - sampled_class_center_np[0]) + sampled_class_index_res[: len(sampled_class_center_np[0])], + sampled_class_center_np[0], + ) def test_dynamic(self): for place in self.places: @@ -179,33 +190,40 @@ class TestClassCenterSampleV2(unittest.TestCase): def check_dynamic_result(self, place): with paddle.fluid.dygraph.guard(place): - label_np = np.random.randint(0, - self.num_classes, (self.batch_size, ), - dtype=self.dtype) + label_np = np.random.randint( + 0, self.num_classes, (self.batch_size,), dtype=self.dtype + ) label = paddle.to_tensor(label_np, dtype=self.dtype) - remapped_label, sampled_class_index = paddle.nn.functional.class_center_sample( - label, self.num_classes, self.num_samples) + ( + remapped_label, + sampled_class_index, + ) = paddle.nn.functional.class_center_sample( + label, self.num_classes, self.num_samples + ) - remapped_label_np, sampled_class_center_np = class_center_sample_numpy( - label_np, [self.num_classes], self.num_samples) + ( + remapped_label_np, + sampled_class_center_np, + ) = class_center_sample_numpy( + label_np, [self.num_classes], self.num_samples + ) remapped_label_res = remapped_label.numpy() sampled_class_index_res = sampled_class_index.numpy() np.testing.assert_allclose(remapped_label_res, remapped_label_np) np.testing.assert_allclose( - sampled_class_index_res[:len(sampled_class_center_np[0])], - sampled_class_center_np[0]) + sampled_class_index_res[: len(sampled_class_center_np[0])], + sampled_class_center_np[0], + ) class TestClassCenterSampleV2INT32(TestClassCenterSampleV2): - def init_dtype(self): self.dtype = np.int32 class TestClassCenterSampleAPIError(unittest.TestCase): - def setUp(self): self.initParams() np.random.seed(self.seed) @@ -224,24 +242,28 @@ class TestClassCenterSampleAPIError(unittest.TestCase): self.dtype = np.int64 def test_dynamic_errors(self): - def test_num_samples(): for place in self.places: with paddle.fluid.dygraph.guard(place): - label_np = np.random.randint(0, - self.num_classes, - (self.batch_size, ), - dtype=self.dtype) + label_np = np.random.randint( + 0, + self.num_classes, + (self.batch_size,), + dtype=self.dtype, + ) label = paddle.to_tensor(label_np) - remapped_label, sampled_class_index = paddle.nn.functional.class_center_sample( - label, self.num_classes, self.num_samples) + ( + remapped_label, + sampled_class_index, + ) = paddle.nn.functional.class_center_sample( + label, self.num_classes, self.num_samples + ) self.assertRaises(ValueError, test_num_samples) class TestClassCenterSampleAPIError1(unittest.TestCase): - def setUp(self): self.initParams() np.random.seed(self.seed) @@ -260,26 +282,35 @@ class TestClassCenterSampleAPIError1(unittest.TestCase): self.dtype = np.int64 def test_dynamic_errors(self): - def test_empty_label(): for place in self.places: with paddle.fluid.dygraph.guard(place): label = paddle.to_tensor(np.array([], dtype=self.dtype)) - remapped_label, sampled_class_index = paddle.nn.functional.class_center_sample( - label, self.num_classes, self.num_samples) + ( + remapped_label, + sampled_class_index, + ) = paddle.nn.functional.class_center_sample( + label, self.num_classes, self.num_samples + ) def test_group_value(): for place in self.places: with paddle.fluid.dygraph.guard(place): - label_np = np.random.randint(0, - self.num_classes, - (self.batch_size, ), - dtype=self.dtype) + label_np = np.random.randint( + 0, + self.num_classes, + (self.batch_size,), + dtype=self.dtype, + ) label = paddle.to_tensor(label_np) - remapped_label, sampled_class_index = paddle.nn.functional.class_center_sample( - label, self.num_classes, self.num_samples, group=True) + ( + remapped_label, + sampled_class_index, + ) = paddle.nn.functional.class_center_sample( + label, self.num_classes, self.num_samples, group=True + ) self.assertRaises(ValueError, test_empty_label) self.assertRaises(ValueError, test_group_value) diff --git a/python/paddle/fluid/tests/unittests/test_clip_by_norm_op.py b/python/paddle/fluid/tests/unittests/test_clip_by_norm_op.py index ec95442a37cb37cc39c8a11d71df27e4276c9607..d7b9c6cd528552fd8f1a37418389407293bf6033 100644 --- a/python/paddle/fluid/tests/unittests/test_clip_by_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_clip_by_norm_op.py @@ -22,7 +22,6 @@ import paddle.fluid.core as core class TestClipByNormOp(OpTest): - def setUp(self): self.max_relative_error = 0.006 self.python_api = fluid.layers.clip_by_norm @@ -47,7 +46,7 @@ class TestClipByNormOp(OpTest): self.check_output(check_eager=True) def initTestCase(self): - self.shape = (100, ) + self.shape = (100,) self.max_norm = 1.0 def init_dtype(self): @@ -55,28 +54,24 @@ class TestClipByNormOp(OpTest): class TestCase1(TestClipByNormOp): - def initTestCase(self): - self.shape = (100, ) + self.shape = (100,) self.max_norm = 1e20 class TestCase2(TestClipByNormOp): - def initTestCase(self): self.shape = (16, 16) self.max_norm = 0.1 class TestCase3(TestClipByNormOp): - def initTestCase(self): self.shape = (4, 8, 16) self.max_norm = 1.0 class TestClipByNormOpFp16(TestClipByNormOp): - def init_dtype(self): self.dtype = np.float16 @@ -84,34 +79,30 @@ class TestClipByNormOpFp16(TestClipByNormOp): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): - self.check_output_with_place(place, - atol=0.001, - check_eager=True) + self.check_output_with_place( + place, atol=0.001, check_eager=True + ) class TestClipByNormOpFp16Case1(TestClipByNormOpFp16): - def initTestCase(self): - self.shape = (100, ) + self.shape = (100,) self.max_norm = 1e20 class TestClipByNormOpFp16Case2(TestClipByNormOpFp16): - def initTestCase(self): self.shape = (16, 16) self.max_norm = 0.1 class TestClipByNormOpFp16Case3(TestClipByNormOpFp16): - def initTestCase(self): self.shape = (4, 8, 16) self.max_norm = 1.0 class TestClipByNormOpWithSelectedRows(unittest.TestCase): - def check_with_place(self, place): self.config_test_case() scope = core.Scope() @@ -128,10 +119,9 @@ class TestClipByNormOpWithSelectedRows(unittest.TestCase): out_selected_rows = scope.var('Out').get_selected_rows() # run clip_by_norm_op - clip_by_norm_op = fluid.op.Operator("clip_by_norm", - max_norm=self.max_norm, - X='X', - Out='Out') + clip_by_norm_op = fluid.op.Operator( + "clip_by_norm", max_norm=self.max_norm, X='X', Out='Out' + ) clip_by_norm_op.run(scope, place) # check output @@ -146,11 +136,13 @@ class TestClipByNormOpWithSelectedRows(unittest.TestCase): output = self.max_norm * y_np / norm else: output = y_np - np.testing.assert_allclose(np.array(out_tensor), - output, - rtol=1e-05, - atol=1e-05, - equal_nan=False) + np.testing.assert_allclose( + np.array(out_tensor), + output, + rtol=1e-05, + atol=1e-05, + equal_nan=False, + ) def test_clip_by_norm_with_selected_ros(self): places = [core.CPUPlace()] diff --git a/python/paddle/fluid/tests/unittests/test_clip_op.py b/python/paddle/fluid/tests/unittests/test_clip_op.py index a8e3b90af90f8e74a0d29a1678e45d6cffd763ba..e955bd71a643077fe2f30c832aa891c1cdf8db0b 100644 --- a/python/paddle/fluid/tests/unittests/test_clip_op.py +++ b/python/paddle/fluid/tests/unittests/test_clip_op.py @@ -22,7 +22,6 @@ from paddle.fluid.framework import _test_eager_guard class TestClipOp(OpTest): - def setUp(self): self.max_relative_error = 0.006 self.python_api = paddle.clip @@ -70,7 +69,6 @@ class TestClipOp(OpTest): class TestCase1(TestClipOp): - def initTestCase(self): self.dtype = np.float32 self.shape = (8, 16, 8) @@ -79,7 +77,6 @@ class TestCase1(TestClipOp): class TestCase2(TestClipOp): - def initTestCase(self): self.dtype = np.float32 self.shape = (8, 16) @@ -88,7 +85,6 @@ class TestCase2(TestClipOp): class TestCase3(TestClipOp): - def initTestCase(self): self.dtype = np.float32 self.shape = (4, 8, 16) @@ -97,7 +93,6 @@ class TestCase3(TestClipOp): class TestCase4(TestClipOp): - def initTestCase(self): self.dtype = np.float32 self.shape = (4, 8, 8) @@ -108,7 +103,6 @@ class TestCase4(TestClipOp): class TestCase5(TestClipOp): - def initTestCase(self): self.dtype = np.float32 self.shape = (4, 8, 16) @@ -117,7 +111,6 @@ class TestCase5(TestClipOp): class TestCase6(TestClipOp): - def initTestCase(self): self.dtype == np.float16 self.shape = (4, 8, 8) @@ -128,7 +121,6 @@ class TestCase6(TestClipOp): class TestClipOpError(unittest.TestCase): - def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): @@ -148,7 +140,6 @@ class TestClipOpError(unittest.TestCase): class TestClipAPI(unittest.TestCase): - def _executed_api(self, x, min=None, max=None): return paddle.clip(x, min, max) @@ -160,8 +151,11 @@ class TestClipAPI(unittest.TestCase): min = fluid.data(name='min', shape=[1], dtype='float32') max = fluid.data(name='max', shape=[1], dtype='float32') - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) out_1 = self._executed_api(images, min=min, max=max) @@ -170,29 +164,51 @@ class TestClipAPI(unittest.TestCase): out_4 = self._executed_api(images, max=0.7) out_5 = self._executed_api(images, min=min) out_6 = self._executed_api(images, max=max) - out_7 = self._executed_api(images, max=-1.) + out_7 = self._executed_api(images, max=-1.0) out_8 = self._executed_api(images) - out_9 = self._executed_api(paddle.cast(images, 'float64'), - min=0.2, - max=0.9) - out_10 = self._executed_api(paddle.cast(images * 10, 'int32'), - min=2, - max=8) - out_11 = self._executed_api(paddle.cast(images * 10, 'int64'), - min=2, - max=8) - - res1, res2, res3, res4, res5, res6, res7, res8, res9, res10, res11 = exe.run( + out_9 = self._executed_api( + paddle.cast(images, 'float64'), min=0.2, max=0.9 + ) + out_10 = self._executed_api( + paddle.cast(images * 10, 'int32'), min=2, max=8 + ) + out_11 = self._executed_api( + paddle.cast(images * 10, 'int64'), min=2, max=8 + ) + + ( + res1, + res2, + res3, + res4, + res5, + res6, + res7, + res8, + res9, + res10, + res11, + ) = exe.run( fluid.default_main_program(), feed={ "image": data, "min": np.array([0.2]).astype('float32'), - "max": np.array([0.8]).astype('float32') + "max": np.array([0.8]).astype('float32'), }, fetch_list=[ - out_1, out_2, out_3, out_4, out_5, out_6, out_7, out_8, out_9, - out_10, out_11 - ]) + out_1, + out_2, + out_3, + out_4, + out_5, + out_6, + out_7, + out_8, + out_9, + out_10, + out_11, + ], + ) np.testing.assert_allclose(res1, data.clip(0.2, 0.8), rtol=1e-05) np.testing.assert_allclose(res2, data.clip(0.2, 0.9), rtol=1e-05) @@ -202,21 +218,24 @@ class TestClipAPI(unittest.TestCase): np.testing.assert_allclose(res6, data.clip(max=0.8), rtol=1e-05) np.testing.assert_allclose(res7, data.clip(max=-1), rtol=1e-05) np.testing.assert_allclose(res8, data, rtol=1e-05) - np.testing.assert_allclose(res9, - data.astype(np.float64).clip(0.2, 0.9), - rtol=1e-05) - np.testing.assert_allclose(res10, - (data * 10).astype(np.int32).clip(2, 8), - rtol=1e-05) - np.testing.assert_allclose(res11, - (data * 10).astype(np.int64).clip(2, 8), - rtol=1e-05) + np.testing.assert_allclose( + res9, data.astype(np.float64).clip(0.2, 0.9), rtol=1e-05 + ) + np.testing.assert_allclose( + res10, (data * 10).astype(np.int32).clip(2, 8), rtol=1e-05 + ) + np.testing.assert_allclose( + res11, (data * 10).astype(np.int64).clip(2, 8), rtol=1e-05 + ) paddle.disable_static() def func_clip_dygraph(self): paddle.disable_static() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) paddle.disable_static(place) data_shape = [1, 9, 9, 4] data = np.random.random(data_shape).astype('float32') @@ -230,33 +249,33 @@ class TestClipAPI(unittest.TestCase): images = paddle.to_tensor(data, dtype='float32') out_3 = self._executed_api(images, min=v_min, max=v_max) - out_4 = self._executed_api(paddle.cast(images * 10, 'int32'), - min=2, - max=8) - out_5 = self._executed_api(paddle.cast(images * 10, 'int64'), - min=2, - max=8) + out_4 = self._executed_api( + paddle.cast(images * 10, 'int32'), min=2, max=8 + ) + out_5 = self._executed_api( + paddle.cast(images * 10, 'int64'), min=2, max=8 + ) # test with numpy.generic out_6 = self._executed_api(images, min=np.abs(0.2), max=np.abs(0.8)) - np.testing.assert_allclose(out_1.numpy(), - data.clip(0.2, 0.8), - rtol=1e-05) - np.testing.assert_allclose(out_2.numpy(), - data.clip(0.2, 0.9), - rtol=1e-05) - np.testing.assert_allclose(out_3.numpy(), - data.clip(0.2, 0.8), - rtol=1e-05) - np.testing.assert_allclose(out_4.numpy(), - (data * 10).astype(np.int32).clip(2, 8), - rtol=1e-05) - np.testing.assert_allclose(out_5.numpy(), - (data * 10).astype(np.int64).clip(2, 8), - rtol=1e-05) - np.testing.assert_allclose(out_6.numpy(), - data.clip(0.2, 0.8), - rtol=1e-05) + np.testing.assert_allclose( + out_1.numpy(), data.clip(0.2, 0.8), rtol=1e-05 + ) + np.testing.assert_allclose( + out_2.numpy(), data.clip(0.2, 0.9), rtol=1e-05 + ) + np.testing.assert_allclose( + out_3.numpy(), data.clip(0.2, 0.8), rtol=1e-05 + ) + np.testing.assert_allclose( + out_4.numpy(), (data * 10).astype(np.int32).clip(2, 8), rtol=1e-05 + ) + np.testing.assert_allclose( + out_5.numpy(), (data * 10).astype(np.int64).clip(2, 8), rtol=1e-05 + ) + np.testing.assert_allclose( + out_6.numpy(), data.clip(0.2, 0.8), rtol=1e-05 + ) def test_clip_dygraph(self): with _test_eager_guard(): @@ -292,7 +311,6 @@ class TestClipAPI(unittest.TestCase): class TestInplaceClipAPI(TestClipAPI): - def _executed_api(self, x, min=None, max=None): return x.clip_(min, max) diff --git a/python/paddle/fluid/tests/unittests/test_coalesce_tensor_op.py b/python/paddle/fluid/tests/unittests/test_coalesce_tensor_op.py index 2bbac7a09e9b303ce432216a75dc9c10f6232062..bcd9eb412bc9965005529def81b5f5620dbd9407 100644 --- a/python/paddle/fluid/tests/unittests/test_coalesce_tensor_op.py +++ b/python/paddle/fluid/tests/unittests/test_coalesce_tensor_op.py @@ -20,30 +20,40 @@ import paddle.fluid as fluid import paddle -def coalesce_tensor_eager_api(Input, - datatype=core.VarDesc.VarType.FP32, - copy_data=False, - set_constant=False, - persist_output=False, - constant=0.0, - use_align=True, - align_size=-1, - user_defined_size_of_dtype=-1, - concated_shapes=[], - concated_ranks=[]): +def coalesce_tensor_eager_api( + Input, + datatype=core.VarDesc.VarType.FP32, + copy_data=False, + set_constant=False, + persist_output=False, + constant=0.0, + use_align=True, + align_size=-1, + user_defined_size_of_dtype=-1, + concated_shapes=[], + concated_ranks=[], +): if datatype == int(core.VarDesc.VarType.FP32): datatype = core.VarDesc.VarType.FP32 - return paddle._C_ops.coalesce_tensor(Input, datatype, copy_data, - set_constant, persist_output, constant, - use_align, align_size, - user_defined_size_of_dtype, - concated_shapes, concated_ranks) - - -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + return paddle._C_ops.coalesce_tensor( + Input, + datatype, + copy_data, + set_constant, + persist_output, + constant, + use_align, + align_size, + user_defined_size_of_dtype, + concated_shapes, + concated_ranks, + ) + + +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestAllocContinuousSpace(OpTest): - def setUp(self): self.python_api = coalesce_tensor_eager_api self.op_type = "coalesce_tensor" @@ -51,7 +61,8 @@ class TestAllocContinuousSpace(OpTest): self.attrs = self.init_attr() self.Inputs = self.init_input() self.Outputs, self.FusedOutput = self.init_output( - self.Inputs, self.attrs["set_constant"], self.attrs["constant"]) + self.Inputs, self.attrs["set_constant"], self.attrs["constant"] + ) self.inputs = {'Input': self.Inputs} self.outputs = {'Output': self.Outputs, 'FusedOutput': self.FusedOutput} @@ -73,7 +84,7 @@ class TestAllocContinuousSpace(OpTest): "copy_data": True, "set_constant": False, "constant": 0.0, - "dtype": self.fluid_dtype + "dtype": self.fluid_dtype, } def init_output(self, input_list, set_constant, constant): @@ -94,9 +105,10 @@ class TestAllocContinuousSpace(OpTest): coalesce_tensor_var = np.concatenate([input for input in inputs]) if set_constant: coalesce_tensor_var = np.ones((len(coalesce_tensor_var))) * constant - outputs = [(out[0], - np.ones(out[1].shape).astype(self.dtype) * constant) - for out in outputs] + outputs = [ + (out[0], np.ones(out[1].shape).astype(self.dtype) * constant) + for out in outputs + ] return outputs, coalesce_tensor_var def verify_output(self, place): @@ -109,54 +121,65 @@ class TestAllocContinuousSpace(OpTest): tensor_input, datatype=self.attrs["dtype"], copy_data=self.attrs["copy_data"] - if "copy_data" in self.attrs else False, + if "copy_data" in self.attrs + else False, set_constant=self.attrs["set_constant"] - if "set_constant" in self.attrs else False, + if "set_constant" in self.attrs + else False, persist_output=False, constant=self.attrs["constant"] - if "constant" in self.attrs else 0.0, + if "constant" in self.attrs + else 0.0, use_align=True, align_size=-1, - user_defined_size_of_dtype=self. - attrs["user_defined_size_of_dtype"] - if "user_defined_size_of_dtype" in self.attrs else -1, + user_defined_size_of_dtype=self.attrs[ + "user_defined_size_of_dtype" + ] + if "user_defined_size_of_dtype" in self.attrs + else -1, concated_shapes=[], - concated_ranks=[]) + concated_ranks=[], + ) for idx, (expected, eager_output) in enumerate( - zip(self.outputs['Output'], eager_outputs)): - np.testing.assert_allclose(expected[1], - eager_output, - atol=1e-5, - err_msg=f'not equal {idx}') - np.testing.assert_allclose(self.outputs['FusedOutput'], - eager_fused_output, - atol=1e-5, - err_msg='not equal fusedoutput') + zip(self.outputs['Output'], eager_outputs) + ): + np.testing.assert_allclose( + expected[1], + eager_output, + atol=1e-5, + err_msg=f'not equal {idx}', + ) + np.testing.assert_allclose( + self.outputs['FusedOutput'], + eager_fused_output, + atol=1e-5, + err_msg='not equal fusedoutput', + ) def test_check_output(self): - self.check_output_with_place(place=core.CUDAPlace(0), - no_check_set=["FusedOutput"], - atol=1e-5) + self.check_output_with_place( + place=core.CUDAPlace(0), no_check_set=["FusedOutput"], atol=1e-5 + ) self.verify_output(core.CUDAPlace(0)) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestAllocContinuousSpace2(TestAllocContinuousSpace): - def init_attr(self): return { "copy_data": False, "set_constant": True, "constant": 0.5, "dtype": self.fluid_dtype, - "user_defined_size_of_dtype": 2 + "user_defined_size_of_dtype": 2, } def test_check_output(self): - self.check_output_with_place(place=core.CUDAPlace(0), - no_check_set=["FusedOutput"], - atol=1e-5) + self.check_output_with_place( + place=core.CUDAPlace(0), no_check_set=["FusedOutput"], atol=1e-5 + ) self.verify_output(core.CUDAPlace(0)) diff --git a/python/paddle/fluid/tests/unittests/test_collect_fpn_proposals_op.py b/python/paddle/fluid/tests/unittests/test_collect_fpn_proposals_op.py index f06102fb2f2b6279d453a5626925bc4f1f8d1da8..06011ee62d8dfe4e0b6b556272583c8a43d50020 100644 --- a/python/paddle/fluid/tests/unittests/test_collect_fpn_proposals_op.py +++ b/python/paddle/fluid/tests/unittests/test_collect_fpn_proposals_op.py @@ -18,27 +18,29 @@ from op_test import OpTest class TestCollectFPNProposalstOp(OpTest): - def set_data(self): self.init_test_case() self.make_rois() - self.scores_input = [('y%d' % i, (self.scores[i].reshape(-1, 1), - self.rois_lod[i])) - for i in range(self.num_level)] + self.scores_input = [ + ('y%d' % i, (self.scores[i].reshape(-1, 1), self.rois_lod[i])) + for i in range(self.num_level) + ] self.rois, self.lod = self.calc_rois_collect() - inputs_x = [('x%d' % i, (self.roi_inputs[i][:, 1:], self.rois_lod[i])) - for i in range(self.num_level)] + inputs_x = [ + ('x%d' % i, (self.roi_inputs[i][:, 1:], self.rois_lod[i])) + for i in range(self.num_level) + ] self.inputs = { 'MultiLevelRois': inputs_x, "MultiLevelScores": self.scores_input, - 'MultiLevelRoIsNum': [] + 'MultiLevelRoIsNum': [], } self.attrs = { 'post_nms_topN': self.post_nms_top_n, } self.outputs = { 'FpnRois': (self.rois, [self.lod]), - 'RoisNum': np.array(self.lod).astype('int32') + 'RoisNum': np.array(self.lod).astype('int32'), } def init_test_case(self): @@ -61,7 +63,7 @@ class TestCollectFPNProposalstOp(OpTest): def calc_rois_collect(self): roi_inputs = np.concatenate(self.roi_inputs) scores = np.concatenate(self.scores) - inds = np.argsort(-scores)[:self.post_nms_top_n] + inds = np.argsort(-scores)[: self.post_nms_top_n] rois = roi_inputs[inds, :] new_rois, new_lod = self.resort_roi_by_batch_id(rois) return new_rois, new_lod @@ -100,31 +102,34 @@ class TestCollectFPNProposalstOp(OpTest): class TestCollectFPNProposalstOpWithRoisNum(TestCollectFPNProposalstOp): - def set_data(self): self.init_test_case() self.make_rois() - self.scores_input = [('y%d' % i, (self.scores[i].reshape(-1, 1), - self.rois_lod[i])) - for i in range(self.num_level)] + self.scores_input = [ + ('y%d' % i, (self.scores[i].reshape(-1, 1), self.rois_lod[i])) + for i in range(self.num_level) + ] self.rois, self.lod = self.calc_rois_collect() - inputs_x = [('x%d' % i, (self.roi_inputs[i][:, 1:], self.rois_lod[i])) - for i in range(self.num_level)] - rois_num_per_level = [('rois%d' % i, - np.array(self.rois_lod[i][0]).astype('int32')) - for i in range(self.num_level)] + inputs_x = [ + ('x%d' % i, (self.roi_inputs[i][:, 1:], self.rois_lod[i])) + for i in range(self.num_level) + ] + rois_num_per_level = [ + ('rois%d' % i, np.array(self.rois_lod[i][0]).astype('int32')) + for i in range(self.num_level) + ] self.inputs = { 'MultiLevelRois': inputs_x, "MultiLevelScores": self.scores_input, - 'MultiLevelRoIsNum': rois_num_per_level + 'MultiLevelRoIsNum': rois_num_per_level, } self.attrs = { 'post_nms_topN': self.post_nms_top_n, } self.outputs = { 'FpnRois': (self.rois, [self.lod]), - 'RoisNum': np.array(self.lod).astype('int32') + 'RoisNum': np.array(self.lod).astype('int32'), } diff --git a/python/paddle/fluid/tests/unittests/test_collective_api_base.py b/python/paddle/fluid/tests/unittests/test_collective_api_base.py index 3a3e5d99ca7095786288d02859de3c27d4dbeaea..4f4be4e2dcf4d5ec36af3908aeaef37a75210013 100644 --- a/python/paddle/fluid/tests/unittests/test_collective_api_base.py +++ b/python/paddle/fluid/tests/unittests/test_collective_api_base.py @@ -82,7 +82,12 @@ def create_test_data(shape=None, dtype=None, seed=None): return create_float_test_data(shape=shape, dtype=bfloat16, seed=seed) elif dtype == "bool": return create_bool_test_data(shape=shape, seed=seed) - elif dtype == "int32" or dtype == "int64" or dtype == "int8" or dtype == "uint8": + elif ( + dtype == "int32" + or dtype == "int64" + or dtype == "int8" + or dtype == "uint8" + ): return create_int_test_data(shape=shape, dtype=dtype, seed=seed) elif dtype == "complex64" or dtype == "complex128": return create_complex_test_data(shape=shape, dtype=dtype, seed=seed) @@ -95,15 +100,12 @@ def create_test_data(shape=None, dtype=None, seed=None): class TestCollectiveAPIRunnerBase(object): - - def get_model(self, - train_prog, - startup_prog, - rank, - indata=None, - dtype=None): + def get_model( + self, train_prog, startup_prog, rank, indata=None, dtype=None + ): raise NotImplementedError( - "get model should be implemented by child class.") + "get model should be implemented by child class." + ) def run_trainer(self, args): train_prog = fluid.Program() @@ -116,15 +118,16 @@ class TestCollectiveAPIRunnerBase(object): if args['backend'] == 'nccl': device_id = int(os.getenv("FLAGS_selected_gpus", "0")) place = fluid.CUDAPlace( - device_id) #if args.use_gpu else fluid.CPUPlace() + device_id + ) # if args.use_gpu else fluid.CPUPlace() elif args['backend'] == 'bkcl': device_id = int(os.getenv("FLAGS_selected_xpus", "0")) place = fluid.XPUPlace(device_id) else: place = fluid.CPUPlace() - indata = create_test_data(shape=(10, 1000), - dtype=args["dtype"], - seed=os.getpid()) + indata = create_test_data( + shape=(10, 1000), dtype=args["dtype"], seed=os.getpid() + ) if args['static_mode']: result = self.get_model(train_prog, startup_prog, rank) exe = fluid.Executor(place) @@ -132,12 +135,12 @@ class TestCollectiveAPIRunnerBase(object): fetch_list = [] for elem in result: fetch_list.append(elem.name) - out = exe.run(train_prog, - feed={'tindata': indata}, - fetch_list=fetch_list) + out = exe.run( + train_prog, feed={'tindata': indata}, fetch_list=fetch_list + ) else: out = self.get_model(train_prog, startup_prog, rank, indata) - #print(out, sys.stderr) + # print(out, sys.stderr) sys.stdout.buffer.write(pickle.dumps(out)) @@ -161,12 +164,13 @@ from contextlib import closing class TestDistBase(unittest.TestCase): - def setUp(self): self._port_set = set() self._trainers = 2 self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % ( - self._find_free_port(), self._find_free_port()) + self._find_free_port(), + self._find_free_port(), + ) self._python_interp = sys.executable self.temp_dir = tempfile.TemporaryDirectory() @@ -176,18 +180,20 @@ class TestDistBase(unittest.TestCase): nccl_version_str = subprocess.check_output( r"ldconfig -v | grep 'libnccl.so' | tail -n1 | sed -r 's/^.*\.so\.//'", stderr=subprocess.DEVNULL, - shell=True).decode('utf-8') - self._nccl_version = int("".join( - nccl_version_str.split("."))) if nccl_version_str else 0 + shell=True, + ).decode('utf-8') + self._nccl_version = ( + int("".join(nccl_version_str.split("."))) if nccl_version_str else 0 + ) def tearDown(self): self.temp_dir.cleanup() def _find_free_port(self): - def __free_port(): - with closing(socket.socket(socket.AF_INET, - socket.SOCK_STREAM)) as s: + with closing( + socket.socket(socket.AF_INET, socket.SOCK_STREAM) + ) as s: s.bind(('', 0)) return s.getsockname()[1] @@ -200,14 +206,14 @@ class TestDistBase(unittest.TestCase): def _run_cluster(self, model_file, envs): worker_endpoints = self._ps_endpoints.split(",") w0_ep, w1_ep = worker_endpoints - #print("w0_ep:",w0_ep," w1_ep:",w1_ep) + # print("w0_ep:",w0_ep," w1_ep:",w1_ep) if core.is_compiled_with_cuda(): env0 = { "FLAGS_selected_gpus": "0", "PADDLE_TRAINER_ID": "0", "PADDLE_TRAINERS_NUM": "2", "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints, - "PADDLE_CURRENT_ENDPOINT": w0_ep + "PADDLE_CURRENT_ENDPOINT": w0_ep, } env1 = { @@ -215,7 +221,7 @@ class TestDistBase(unittest.TestCase): "PADDLE_TRAINER_ID": "1", "PADDLE_TRAINERS_NUM": "2", "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints, - "PADDLE_CURRENT_ENDPOINT": w1_ep + "PADDLE_CURRENT_ENDPOINT": w1_ep, } elif core.is_compiled_with_xpu(): env0 = { @@ -223,7 +229,7 @@ class TestDistBase(unittest.TestCase): "PADDLE_TRAINER_ID": "0", "PADDLE_TRAINERS_NUM": "2", "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints, - "PADDLE_CURRENT_ENDPOINT": w0_ep + "PADDLE_CURRENT_ENDPOINT": w0_ep, } env1 = { @@ -231,9 +237,9 @@ class TestDistBase(unittest.TestCase): "PADDLE_TRAINER_ID": "1", "PADDLE_TRAINERS_NUM": "2", "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints, - "PADDLE_CURRENT_ENDPOINT": w1_ep + "PADDLE_CURRENT_ENDPOINT": w1_ep, } - #update environment + # update environment env0.update(envs) env1.update(envs) if os.getenv('WITH_COVERAGE', 'OFF') == 'ON': @@ -242,22 +248,28 @@ class TestDistBase(unittest.TestCase): tr_cmd = "%s %s" tr0_cmd = tr_cmd % (self._python_interp, model_file) tr1_cmd = tr_cmd % (self._python_interp, model_file) - path0 = os.path.join(self.temp_dir.name, - "/tmp/tr0_err_%d.log" % os.getpid()) - path1 = os.path.join(self.temp_dir.name, - "/tmp/tr1_err_%d.log" % os.getpid()) + path0 = os.path.join( + self.temp_dir.name, "/tmp/tr0_err_%d.log" % os.getpid() + ) + path1 = os.path.join( + self.temp_dir.name, "/tmp/tr1_err_%d.log" % os.getpid() + ) tr0_pipe = open(path0, "w") tr1_pipe = open(path1, "w") - #print(tr0_cmd) - tr0_proc = subprocess.Popen(tr0_cmd.strip().split(), - stdout=subprocess.PIPE, - stderr=tr0_pipe, - env=env0) - - tr1_proc = subprocess.Popen(tr0_cmd.strip().split(), - stdout=subprocess.PIPE, - stderr=tr1_pipe, - env=env1) + # print(tr0_cmd) + tr0_proc = subprocess.Popen( + tr0_cmd.strip().split(), + stdout=subprocess.PIPE, + stderr=tr0_pipe, + env=env0, + ) + + tr1_proc = subprocess.Popen( + tr0_cmd.strip().split(), + stdout=subprocess.PIPE, + stderr=tr1_pipe, + env=env1, + ) tr0_out, tr0_err = tr0_proc.communicate() tr1_out, tr1_err = tr1_proc.communicate() @@ -270,19 +282,25 @@ class TestDistBase(unittest.TestCase): sys.stderr.write('trainer 0 stderr file: %s\n' % f.read()) with open(path1, "r") as f: sys.stderr.write('trainer 1 stderr file: %s\n' % f.read()) - return pickle.loads(tr0_out), pickle.loads( - tr1_out), tr0_proc.pid, tr1_proc.pid - - def check_with_place(self, - model_file, - col_type, - backend="nccl", - path_id="0", - static_mode="1", - check_error_log=False, - need_envs={}, - eager_mode=True, - dtype=None): + return ( + pickle.loads(tr0_out), + pickle.loads(tr1_out), + tr0_proc.pid, + tr1_proc.pid, + ) + + def check_with_place( + self, + model_file, + col_type, + backend="nccl", + path_id="0", + static_mode="1", + check_error_log=False, + need_envs={}, + eager_mode=True, + dtype=None, + ): if backend == "nccl" or backend == "bkcl": with_gloo = '0' else: @@ -296,7 +314,7 @@ class TestDistBase(unittest.TestCase): "PADDLE_DISTRI_BACKEND": backend, "BACKEND": backend, "PATH_ID": path_id, - "DTYPE": dtype + "DTYPE": dtype, } required_envs.update(additional_envs) required_envs.update(need_envs) @@ -307,7 +325,8 @@ class TestDistBase(unittest.TestCase): if os.getenv('NVIDIA_TF32_OVERRIDE', '') is not None: required_envs['NVIDIA_TF32_OVERRIDE'] = os.getenv( - 'NVIDIA_TF32_OVERRIDE', '') + 'NVIDIA_TF32_OVERRIDE', '' + ) if eager_mode: required_envs["FLAGS_enable_eager_mode"] = "%d" % 1 @@ -315,7 +334,8 @@ class TestDistBase(unittest.TestCase): required_envs["FLAGS_enable_eager_mode"] = "%d" % 0 tr0_out, tr1_out, pid0, pid1 = self._run_cluster( - model_file, required_envs) + model_file, required_envs + ) input1 = create_test_data(shape=(10, 1000), dtype=dtype, seed=pid0) input2 = create_test_data(shape=(10, 1000), dtype=dtype, seed=pid1) # cast bfloat16 to float32 for numeric comparison @@ -347,14 +367,14 @@ class TestDistBase(unittest.TestCase): np.testing.assert_allclose(tr0_out[0], need_result, rtol=rtol) elif col_type == "scatter": need_result = input2 - need_result1 = need_result[0:need_result.shape[0] // 2] - need_result2 = need_result[need_result.shape[0] // 2:] + need_result1 = need_result[0 : need_result.shape[0] // 2] + need_result2 = need_result[need_result.shape[0] // 2 :] np.testing.assert_allclose(tr0_out[0], need_result1, rtol=1e-05) np.testing.assert_allclose(tr1_out[0], need_result2, rtol=1e-05) elif col_type == "reduce_scatter": need_result = input1 + input2 - need_result1 = need_result[0:need_result.shape[0] // 2] - need_result2 = need_result[need_result.shape[0] // 2:] + need_result1 = need_result[0 : need_result.shape[0] // 2] + need_result2 = need_result[need_result.shape[0] // 2 :] if dtype == "bfloat16": rtol = 8e-03 else: @@ -369,14 +389,12 @@ class TestDistBase(unittest.TestCase): else: rtol = 1e-05 atol = 1e-05 - np.testing.assert_allclose(tr0_out[0], - need_result, - rtol=rtol, - atol=atol) - np.testing.assert_allclose(tr1_out[0], - need_result, - rtol=rtol, - atol=atol) + np.testing.assert_allclose( + tr0_out[0], need_result, rtol=rtol, atol=atol + ) + np.testing.assert_allclose( + tr1_out[0], need_result, rtol=rtol, atol=atol + ) elif col_type == "parallel_embedding": result_data = tr0_out[0] np.random.seed(2020) @@ -384,48 +402,51 @@ class TestDistBase(unittest.TestCase): for i in range(result_data.shape[0]): for j in range(result_data.shape[1]): data = result_data[i][j] - assert np.allclose(tr0_out[1][i][j], - need_result[data], - atol=1e-08) + assert np.allclose( + tr0_out[1][i][j], need_result[data], atol=1e-08 + ) elif col_type == "row_parallel_linear": result_data = tr0_out[0] np.random.seed(2020) weight = np.random.rand(1000, 16) need_result = np.matmul(input1, weight) - np.testing.assert_allclose(result_data, - need_result, - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + result_data, need_result, rtol=1e-05, atol=1e-05 + ) elif col_type == "column_parallel_linear": result_data = tr0_out[0] np.random.seed(2020) weight = np.random.rand(1000, 16) need_result = np.matmul(input1, weight) - np.testing.assert_allclose(result_data, - need_result, - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + result_data, need_result, rtol=1e-05, atol=1e-05 + ) elif col_type == "alltoall": - need_result1 = np.vstack((input1[0:input1.shape[0] // 2, :], - input2[0:input2.shape[0] // 2, :])) - need_result2 = np.vstack((input1[input1.shape[0] // 2:, :], - input2[input2.shape[0] // 2:, :])) + need_result1 = np.vstack( + ( + input1[0 : input1.shape[0] // 2, :], + input2[0 : input2.shape[0] // 2, :], + ) + ) + need_result2 = np.vstack( + ( + input1[input1.shape[0] // 2 :, :], + input2[input2.shape[0] // 2 :, :], + ) + ) tr0_out = np.vstack(tr0_out) tr1_out = np.vstack(tr1_out) - np.testing.assert_allclose(tr0_out, - need_result1, - rtol=1e-05, - atol=1e-05) - np.testing.assert_allclose(tr1_out, - need_result2, - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + tr0_out, need_result1, rtol=1e-05, atol=1e-05 + ) + np.testing.assert_allclose( + tr1_out, need_result2, rtol=1e-05, atol=1e-05 + ) elif col_type == "sendrecv": result_data = tr1_out[0] - np.testing.assert_allclose(input1, - result_data, - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + input1, result_data, rtol=1e-05, atol=1e-05 + ) elif col_type == "global_gather": in_feat = 2 n_expert = 2 @@ -434,7 +455,8 @@ class TestDistBase(unittest.TestCase): np.random.seed(pid0) local_expert_count1 = np.random.randint( - 1, 4, size=tot_expert).astype("int") + 1, 4, size=tot_expert + ).astype("int") expert_ptr1 = np.ones(tot_expert, dtype=np.int32) expert_ptr1[0] = 0 for i in range(1, tot_expert): @@ -442,7 +464,8 @@ class TestDistBase(unittest.TestCase): np.random.seed(pid1) local_expert_count2 = np.random.randint( - 1, 4, size=tot_expert).astype("int") + 1, 4, size=tot_expert + ).astype("int") expert_ptr2 = np.ones(tot_expert, dtype=np.int32) expert_ptr2[0] = 0 for i in range(1, tot_expert): @@ -457,12 +480,14 @@ class TestDistBase(unittest.TestCase): np.random.seed(pid0) fwd_expert_count = sum(global_expert_count1).astype("int") - local_input_buf1 = np.random.rand(fwd_expert_count, - in_feat).astype("float32") + local_input_buf1 = np.random.rand(fwd_expert_count, in_feat).astype( + "float32" + ) np.random.seed(pid1) fwd_expert_count = sum(global_expert_count2).astype("int") - local_input_buf2 = np.random.rand(fwd_expert_count, - in_feat).astype("float32") + local_input_buf2 = np.random.rand(fwd_expert_count, in_feat).astype( + "float32" + ) output1 = [[], [], [], []] output2 = [[], [], [], []] send_ptr1 = 0 @@ -472,17 +497,21 @@ class TestDistBase(unittest.TestCase): for j in range(world_size): idx = j * n_expert + i if j == 0: - output1_part1 = local_input_buf1[send_ptr1: \ - send_ptr1 + global_expert_count1[idx], :] - output1_part2 = local_input_buf2[send_ptr2: \ - send_ptr2 + global_expert_count2[idx], :] + output1_part1 = local_input_buf1[ + send_ptr1 : send_ptr1 + global_expert_count1[idx], : + ] + output1_part2 = local_input_buf2[ + send_ptr2 : send_ptr2 + global_expert_count2[idx], : + ] output1[i].extend(output1_part1) output1[i + n_expert].extend(output1_part2) else: - output2_part1 = local_input_buf1[send_ptr1: \ - send_ptr1 + global_expert_count1[idx]] - output2_part2 = local_input_buf2[send_ptr2: \ - send_ptr2 + global_expert_count2[idx]] + output2_part1 = local_input_buf1[ + send_ptr1 : send_ptr1 + global_expert_count1[idx] + ] + output2_part2 = local_input_buf2[ + send_ptr2 : send_ptr2 + global_expert_count2[idx] + ] output2[i].extend(output2_part1) output2[i + n_expert].extend(output2_part2) send_ptr1 = send_ptr1 + global_expert_count1[idx] @@ -503,12 +532,14 @@ class TestDistBase(unittest.TestCase): output1 = np.array([]) else: output1 = np.concatenate(result1, axis=0).reshape( - sum(local_expert_count1), in_feat) + sum(local_expert_count1), in_feat + ) if result2 == []: output2 = np.array([]) else: output2 = np.concatenate(result2, axis=0).reshape( - sum(local_expert_count2), in_feat) + sum(local_expert_count2), in_feat + ) if tr0_out[0] is None or tr0_out[0].shape[0] == 0: tr0_out[0] = np.array([]) @@ -516,30 +547,27 @@ class TestDistBase(unittest.TestCase): if tr1_out[0] is None or tr1_out[0].shape[0] == 0: tr1_out[0] = np.array([]) - np.testing.assert_allclose(tr0_out[0], - output1, - rtol=1e-05, - atol=1e-05) - np.testing.assert_allclose(tr1_out[0], - output2, - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + tr0_out[0], output1, rtol=1e-05, atol=1e-05 + ) + np.testing.assert_allclose( + tr1_out[0], output2, rtol=1e-05, atol=1e-05 + ) if static_mode == 0: - np.testing.assert_allclose(tr0_out[1], - 2 * local_input_buf1, - rtol=1e-05, - atol=1e-05) - np.testing.assert_allclose(tr1_out[1], - 2 * local_input_buf2, - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + tr0_out[1], 2 * local_input_buf1, rtol=1e-05, atol=1e-05 + ) + np.testing.assert_allclose( + tr1_out[1], 2 * local_input_buf2, rtol=1e-05, atol=1e-05 + ) elif col_type == "global_scatter": np.random.seed(pid0) local_expert_count1 = np.random.randint(1, 4, size=4).astype("int") fwd_expert_count = sum(local_expert_count1) - local_input_buf1 = np.random.rand(fwd_expert_count, - 2).astype("float32") + local_input_buf1 = np.random.rand(fwd_expert_count, 2).astype( + "float32" + ) expert_ptr1 = np.ones(4, dtype=np.int32) expert_ptr1[0] = 0 for i in range(1, 4): @@ -547,8 +575,9 @@ class TestDistBase(unittest.TestCase): np.random.seed(pid1) local_expert_count2 = np.random.randint(1, 4, size=4).astype("int") fwd_expert_count = sum(local_expert_count2) - local_input_buf2 = np.random.rand(fwd_expert_count, - 2).astype("float32") + local_input_buf2 = np.random.rand(fwd_expert_count, 2).astype( + "float32" + ) expert_ptr2 = np.ones(4, dtype=np.int32) expert_ptr2[0] = 0 for i in range(1, 4): @@ -561,15 +590,31 @@ class TestDistBase(unittest.TestCase): idx = j * 2 + i if j == 0: # send data to 0 card - output1.append(local_input_buf1[expert_ptr1[idx]: \ - expert_ptr1[idx]+local_expert_count1[idx]]) - output1.append(local_input_buf2[expert_ptr2[idx]:\ - expert_ptr2[idx]+local_expert_count2[idx]]) + output1.append( + local_input_buf1[ + expert_ptr1[idx] : expert_ptr1[idx] + + local_expert_count1[idx] + ] + ) + output1.append( + local_input_buf2[ + expert_ptr2[idx] : expert_ptr2[idx] + + local_expert_count2[idx] + ] + ) else: - output2.append(local_input_buf1[expert_ptr1[idx]: \ - expert_ptr1[idx]+local_expert_count1[idx]]) - output2.append(local_input_buf2[expert_ptr2[idx]:\ - expert_ptr2[idx]+local_expert_count2[idx]]) + output2.append( + local_input_buf1[ + expert_ptr1[idx] : expert_ptr1[idx] + + local_expert_count1[idx] + ] + ) + output2.append( + local_input_buf2[ + expert_ptr2[idx] : expert_ptr2[idx] + + local_expert_count2[idx] + ] + ) if output1 == []: output1 = np.array([]) else: @@ -585,22 +630,18 @@ class TestDistBase(unittest.TestCase): if tr1_out[0] is None or tr1_out[0].shape[0] == 0: tr1_out[0] = np.array([]) - np.testing.assert_allclose(tr0_out[0], - output1, - rtol=1e-05, - atol=1e-05) - np.testing.assert_allclose(tr1_out[0], - output2, - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + tr0_out[0], output1, rtol=1e-05, atol=1e-05 + ) + np.testing.assert_allclose( + tr1_out[0], output2, rtol=1e-05, atol=1e-05 + ) if static_mode == 0: - np.testing.assert_allclose(tr0_out[1], - 2 * local_input_buf1, - rtol=1e-05, - atol=1e-05) - np.testing.assert_allclose(tr1_out[1], - 2 * local_input_buf2, - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + tr0_out[1], 2 * local_input_buf1, rtol=1e-05, atol=1e-05 + ) + np.testing.assert_allclose( + tr1_out[1], 2 * local_input_buf2, rtol=1e-05, atol=1e-05 + ) else: pass diff --git a/python/paddle/fluid/tests/unittests/test_collective_base.py b/python/paddle/fluid/tests/unittests/test_collective_base.py index 7300c0d13dc48bd634b18b34284230173f124aab..2104f1865f4a9d664959a8439136deb63f2bc779 100644 --- a/python/paddle/fluid/tests/unittests/test_collective_base.py +++ b/python/paddle/fluid/tests/unittests/test_collective_base.py @@ -27,10 +27,10 @@ from paddle.fluid import core class TestCollectiveRunnerBase(object): - def get_model(self, train_prog, startup_prog): raise NotImplementedError( - "get model should be implemented by child class.") + "get model should be implemented by child class." + ) def wait_server_ready(self, endpoints): while True: @@ -38,13 +38,15 @@ class TestCollectiveRunnerBase(object): not_ready_endpoints = [] for ep in endpoints: ip_port = ep.split(":") - with closing(socket.socket(socket.AF_INET, - socket.SOCK_STREAM)) as sock: + with closing( + socket.socket(socket.AF_INET, socket.SOCK_STREAM) + ) as sock: sock.settimeout(2) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) if hasattr(socket, 'SO_REUSEPORT'): - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, - 1) + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT, 1 + ) result = sock.connect_ex((ip_port[0], int(ip_port[1]))) if result != 0: @@ -52,44 +54,51 @@ class TestCollectiveRunnerBase(object): not_ready_endpoints.append(ep) if not all_ok: sys.stderr.write("server not ready, wait 3 sec to retry...\n") - sys.stderr.write("not ready endpoints:" + - str(not_ready_endpoints) + "\n") + sys.stderr.write( + "not ready endpoints:" + str(not_ready_endpoints) + "\n" + ) sys.stderr.flush() time.sleep(3) else: break + # endpoints should be ["ip1:port1","ip2:port2"] -#endpoints should be ["ip1:port1","ip2:port2"] - - def initCommunicator(self, program, rank, nranks, wait_port, - current_endpoint, endpoints): + def initCommunicator( + self, program, rank, nranks, wait_port, current_endpoint, endpoints + ): other_endpoints = endpoints[:] other_endpoints.remove(current_endpoint) if rank == 0 and wait_port: self.wait_server_ready(other_endpoints) block = program.global_block() - nccl_id_var = block.create_var(name=nameGen.generate('nccl_id'), - persistable=True, - type=core.VarDesc.VarType.RAW) + nccl_id_var = block.create_var( + name=nameGen.generate('nccl_id'), + persistable=True, + type=core.VarDesc.VarType.RAW, + ) - block.append_op(type='c_gen_nccl_id', - inputs={}, - outputs={'Out': nccl_id_var}, - attrs={ - 'rank': rank, - 'endpoint': current_endpoint, - 'other_endpoints': other_endpoints - }) + block.append_op( + type='c_gen_nccl_id', + inputs={}, + outputs={'Out': nccl_id_var}, + attrs={ + 'rank': rank, + 'endpoint': current_endpoint, + 'other_endpoints': other_endpoints, + }, + ) - block.append_op(type='c_comm_init', - inputs={'X': nccl_id_var}, - outputs={}, - attrs={ - 'nranks': nranks, - 'rank': rank, - 'ring_id': self.global_ring_id - }) + block.append_op( + type='c_comm_init', + inputs={'X': nccl_id_var}, + outputs={}, + attrs={ + 'nranks': nranks, + 'rank': rank, + 'ring_id': self.global_ring_id, + }, + ) def run_trainer(self, args): train_prog = fluid.Program() @@ -98,20 +107,22 @@ class TestCollectiveRunnerBase(object): rank = args["trainerid"] current_endpoint = args["currentendpoint"] nranks = 2 - self.initCommunicator(startup_prog, rank, nranks, True, - current_endpoint, endpoints) + self.initCommunicator( + startup_prog, rank, nranks, True, current_endpoint, endpoints + ) self.rank = rank result = self.get_model(train_prog, startup_prog) device_id = int(os.getenv("FLAGS_selected_gpus", "0")) place = fluid.CUDAPlace( - device_id) #if args.use_gpu else fluid.CPUPlace() + device_id + ) # if args.use_gpu else fluid.CPUPlace() exe = fluid.Executor(place) exe.run(startup_prog) np.random.seed(os.getpid()) indata = np.random.random((10, 1000)) - out = exe.run(train_prog, - feed={'tindata': indata}, - fetch_list=[result.name]) + out = exe.run( + train_prog, feed={'tindata': indata}, fetch_list=[result.name] + ) sys.stdout.buffer.write(pickle.dumps(out)) @@ -132,12 +143,13 @@ from contextlib import closing class TestDistBase(unittest.TestCase): - def setUp(self): self._port_set = set() self._trainers = 2 self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % ( - self._find_free_port(), self._find_free_port()) + self._find_free_port(), + self._find_free_port(), + ) self._python_interp = sys.executable self.temp_dir = tempfile.TemporaryDirectory() @@ -146,10 +158,10 @@ class TestDistBase(unittest.TestCase): self.temp_dir.cleanup() def _find_free_port(self): - def __free_port(): - with closing(socket.socket(socket.AF_INET, - socket.SOCK_STREAM)) as s: + with closing( + socket.socket(socket.AF_INET, socket.SOCK_STREAM) + ) as s: s.bind(('', 0)) return s.getsockname()[1] @@ -162,13 +174,13 @@ class TestDistBase(unittest.TestCase): def _run_cluster(self, model_file, envs): worker_endpoints = self._ps_endpoints.split(",") w0_ep, w1_ep = worker_endpoints - #print("w0_ep:",w0_ep," w1_ep:",w1_ep) + # print("w0_ep:",w0_ep," w1_ep:",w1_ep) env0 = { "FLAGS_selected_gpus": "0", "PADDLE_TRAINER_ID": "0", "PADDLE_TRAINERS_NUM": "2", "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints, - "PADDLE_CURRENT_ENDPOINT": w0_ep + "PADDLE_CURRENT_ENDPOINT": w0_ep, } env1 = { @@ -176,9 +188,9 @@ class TestDistBase(unittest.TestCase): "PADDLE_TRAINER_ID": "1", "PADDLE_TRAINERS_NUM": "2", "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints, - "PADDLE_CURRENT_ENDPOINT": w1_ep + "PADDLE_CURRENT_ENDPOINT": w1_ep, } - #update environment + # update environment env0.update(envs) env1.update(envs) tr_cmd = "%s %s" @@ -188,16 +200,20 @@ class TestDistBase(unittest.TestCase): path1 = os.path.join(self.temp_dir.name, "/tmp/tr1_err.log") tr0_pipe = open(path0, "wb") tr1_pipe = open(path1, "wb") - #print(tr0_cmd) - tr0_proc = subprocess.Popen(tr0_cmd.strip().split(), - stdout=subprocess.PIPE, - stderr=tr0_pipe, - env=env0) + # print(tr0_cmd) + tr0_proc = subprocess.Popen( + tr0_cmd.strip().split(), + stdout=subprocess.PIPE, + stderr=tr0_pipe, + env=env0, + ) - tr1_proc = subprocess.Popen(tr0_cmd.strip().split(), - stdout=subprocess.PIPE, - stderr=tr1_pipe, - env=env1) + tr1_proc = subprocess.Popen( + tr0_cmd.strip().split(), + stdout=subprocess.PIPE, + stderr=tr1_pipe, + env=env1, + ) tr0_out, tr0_err = tr0_proc.communicate() tr1_out, tr1_err = tr1_proc.communicate() @@ -206,14 +222,16 @@ class TestDistBase(unittest.TestCase): # close trainer file tr0_pipe.close() tr1_pipe.close() - return pickle.loads(tr0_out), pickle.loads( - tr1_out), tr0_proc.pid, tr1_proc.pid + return ( + pickle.loads(tr0_out), + pickle.loads(tr1_out), + tr0_proc.pid, + tr1_proc.pid, + ) - def check_with_place(self, - model_file, - col_type, - check_error_log=False, - need_envs={}): + def check_with_place( + self, model_file, col_type, check_error_log=False, need_envs={} + ): required_envs = { "FLAGS_fraction_of_gpu_memory_to_use": "0.15", "FLAGS_eager_delete_tensor_gb": "0.0", @@ -222,14 +240,15 @@ class TestDistBase(unittest.TestCase): "LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""), "LD_PRELOAD": os.getenv("LD_PRELOAD", ""), "GLOG_v": "3", - "NCCL_P2P_DISABLE": "1" + "NCCL_P2P_DISABLE": "1", } required_envs.update(need_envs) if check_error_log: required_envs["GLOG_v"] = "3" required_envs["GLOG_logtostderr"] = "1" tr0_out, tr1_out, pid0, pid1 = self._run_cluster( - model_file, required_envs) + model_file, required_envs + ) np.random.seed(pid0) input1 = np.random.random((10, 1000)) np.random.seed(pid1) @@ -247,38 +266,33 @@ class TestDistBase(unittest.TestCase): np.testing.assert_allclose(tr1_out[0], need_result, rtol=1e-05) elif col_type == "scatter": need_result = input2 - need_result1 = need_result[0:need_result.shape[0] // 2] - need_result2 = need_result[need_result.shape[0] // 2:] + need_result1 = need_result[0 : need_result.shape[0] // 2] + need_result2 = need_result[need_result.shape[0] // 2 :] np.testing.assert_allclose(tr0_out[0], need_result1, rtol=1e-05) np.testing.assert_allclose(tr1_out[0], need_result2, rtol=1e-05) elif col_type == "allreduce": need_result = input1 + input2 - np.testing.assert_allclose(tr0_out[0], - need_result, - rtol=1e-05, - atol=1e-05) - np.testing.assert_allclose(tr1_out[0], - need_result, - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + tr0_out[0], need_result, rtol=1e-05, atol=1e-05 + ) + np.testing.assert_allclose( + tr1_out[0], need_result, rtol=1e-05, atol=1e-05 + ) elif col_type == "reduce_scatter": tmp = input1 + input2 - need_result1 = tmp[0:tmp.shape[0] // 2] - need_result2 = tmp[tmp.shape[0] // 2:] - np.testing.assert_allclose(tr0_out[0], - need_result1, - rtol=1e-05, - atol=1e-05) - np.testing.assert_allclose(tr1_out[0], - need_result2, - rtol=1e-05, - atol=1e-05) + need_result1 = tmp[0 : tmp.shape[0] // 2] + need_result2 = tmp[tmp.shape[0] // 2 :] + np.testing.assert_allclose( + tr0_out[0], need_result1, rtol=1e-05, atol=1e-05 + ) + np.testing.assert_allclose( + tr1_out[0], need_result2, rtol=1e-05, atol=1e-05 + ) elif col_type == "sendrecv": need_result = input1 - np.testing.assert_allclose(tr1_out[0], - need_result, - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + tr1_out[0], need_result, rtol=1e-05, atol=1e-05 + ) elif col_type == "identity": need_result1 = input1 need_result2 = input2 @@ -296,35 +310,29 @@ class TestDistBase(unittest.TestCase): np.testing.assert_allclose(tr1_out, need_result2, rtol=1e-05) elif col_type == "concat": need_result = np.concatenate((input1, input2), axis=1) - np.testing.assert_allclose(tr0_out[0], - need_result, - rtol=1e-05, - atol=1e-05) - np.testing.assert_allclose(tr1_out[0], - need_result, - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + tr0_out[0], need_result, rtol=1e-05, atol=1e-05 + ) + np.testing.assert_allclose( + tr1_out[0], need_result, rtol=1e-05, atol=1e-05 + ) elif col_type == "split": need_result1 = np.split(input1, 2, axis=1)[0] need_result2 = np.split(input2, 2, axis=1)[1] - np.testing.assert_allclose(tr0_out[0], - need_result1, - rtol=1e-05, - atol=1e-05) - np.testing.assert_allclose(tr1_out[0], - need_result2, - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + tr0_out[0], need_result1, rtol=1e-05, atol=1e-05 + ) + np.testing.assert_allclose( + tr1_out[0], need_result2, rtol=1e-05, atol=1e-05 + ) elif col_type == "sendrecv_array": need_result1 = np.array([[0, 1, 2]]) need_result2 = np.array([[3, 4, 5]]) - np.testing.assert_allclose(tr1_out[0][0], - need_result1, - rtol=1e-05, - atol=1e-05) - np.testing.assert_allclose(tr1_out[0][1], - need_result2, - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + tr1_out[0][0], need_result1, rtol=1e-05, atol=1e-05 + ) + np.testing.assert_allclose( + tr1_out[0][1], need_result2, rtol=1e-05, atol=1e-05 + ) else: pass diff --git a/python/paddle/fluid/tests/unittests/test_communicator_async.py b/python/paddle/fluid/tests/unittests/test_communicator_async.py index 0908dbc8df1ee9b44eaacaf7c82301c0135147ae..33a98a5c894c2e5d926dada60f3ad372835e84be 100644 --- a/python/paddle/fluid/tests/unittests/test_communicator_async.py +++ b/python/paddle/fluid/tests/unittests/test_communicator_async.py @@ -26,7 +26,6 @@ import paddle.distributed.fleet as fleet class TestCommunicator(unittest.TestCase): - def net(self): x = fluid.layers.data(name='x', shape=[1], dtype='float32') y = fluid.layers.data(name='y', shape=[1], dtype='float32') @@ -40,7 +39,8 @@ class TestCommunicator(unittest.TestCase): current_id=0, role=role_maker.Role.WORKER, worker_num=2, - server_endpoints=["127.0.0.1:6001", "127.0.0.1:6002"]) + server_endpoints=["127.0.0.1:6001", "127.0.0.1:6002"], + ) fleet.init(role) avg_cost = self.net() diff --git a/python/paddle/fluid/tests/unittests/test_communicator_geo.py b/python/paddle/fluid/tests/unittests/test_communicator_geo.py index 6c99eaac737d9a4a4f8b2360cb1c965c69a90790..0efea743f3314473f0c4416bddc17117c3ba420f 100644 --- a/python/paddle/fluid/tests/unittests/test_communicator_geo.py +++ b/python/paddle/fluid/tests/unittests/test_communicator_geo.py @@ -31,7 +31,6 @@ paddle.enable_static() class TestCommunicatorGeoEnd2End(unittest.TestCase): - def net(self): x = fluid.layers.data(name='x', shape=[13], dtype='float32') x1 = fluid.layers.data(name='x1', shape=[1], dtype='int64', lod_level=1) @@ -41,8 +40,10 @@ class TestCommunicatorGeoEnd2End(unittest.TestCase): size=[10000, 10], param_attr=fluid.ParamAttr( name="embedding", - initializer=fluid.initializer.Constant(value=0.01)), - is_sparse=True) + initializer=fluid.initializer.Constant(value=0.01), + ), + is_sparse=True, + ) pool = fluid.layers.sequence_pool(input=emb, pool_type="sum") z = fluid.layers.concat(input=[x, pool], axis=1) @@ -54,7 +55,6 @@ class TestCommunicatorGeoEnd2End(unittest.TestCase): return avg_cost, x, x1, y def fake_reader(self): - def reader(): for i in range(10000): x = numpy.random.random((1, 13)).astype('float32') @@ -91,9 +91,11 @@ class TestCommunicatorGeoEnd2End(unittest.TestCase): feeder = fluid.DataFeeder(place=place, feed_list=[x, z, y]) for batch_id, data in enumerate(train_reader()): - exe.run(fluid.default_main_program(), - feed=feeder.feed(data), - fetch_list=[]) + exe.run( + fluid.default_main_program(), + feed=feeder.feed(data), + fetch_list=[], + ) fleet.stop_worker() @@ -166,9 +168,11 @@ half_run_server.run_ut() ps_cmd = "{} {}".format(_python, server_file) - ps_proc = subprocess.Popen(ps_cmd.strip().split(" "), - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + ps_proc = subprocess.Popen( + ps_cmd.strip().split(" "), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) time.sleep(5) diff --git a/python/paddle/fluid/tests/unittests/test_communicator_ps_gpu.py b/python/paddle/fluid/tests/unittests/test_communicator_ps_gpu.py index 23af083af566f4620a77d06147f1b1d3ed7ee0eb..328874b3f55656857f71fb76d79fdee53e4fa465 100644 --- a/python/paddle/fluid/tests/unittests/test_communicator_ps_gpu.py +++ b/python/paddle/fluid/tests/unittests/test_communicator_ps_gpu.py @@ -27,7 +27,6 @@ import paddle.distributed.fleet as fleet class TestCommunicator(unittest.TestCase): - def test_communicator_ps_gpu(self): temp_dir = tempfile.TemporaryDirectory() path = os.path.join(temp_dir.name, "test_communicator_ps_gpu.txt") @@ -42,9 +41,11 @@ class TestCommunicator(unittest.TestCase): os.environ["PADDLE_TRAINER_ID"] = "0" os.environ["PADDLE_TRAINERS_NUM"] = "2" os.environ[ - "PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001,127.0.0.2:36001" + "PADDLE_TRAINER_ENDPOINTS" + ] = "127.0.0.1:36001,127.0.0.2:36001" os.environ[ - "PADDLE_PSERVERS_IP_PORT_LIST"] = "127.0.0.1:36002,127.0.0.2:36002" + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:36002,127.0.0.2:36002" os.environ["TRAINING_ROLE"] = "TRAINER" os.environ["FLAGS_selected_gpus"] = "0" role = role_maker.PaddleCloudRoleMaker() @@ -71,10 +72,9 @@ class TestCommunicator(unittest.TestCase): optimizer.minimize(avg_cost) dataset = paddle.distributed.InMemoryDataset() - dataset.init(batch_size=32, - thread_num=1, - pipe_command="cat", - use_var=slots_vars) + dataset.init( + batch_size=32, thread_num=1, pipe_command="cat", use_var=slots_vars + ) dataset.set_filelist(["test_communicator_ps_gpu.txt"]) dataset.set_date("20211111") dataset.load_into_memory(is_shuffle=True) diff --git a/python/paddle/fluid/tests/unittests/test_compare_op.py b/python/paddle/fluid/tests/unittests/test_compare_op.py index a06ac3a9b02183bbb5a4dba8dedca8e7ef613fc5..d9636972a13e30b6d6c501700b247f7e4e7334e0 100755 --- a/python/paddle/fluid/tests/unittests/test_compare_op.py +++ b/python/paddle/fluid/tests/unittests/test_compare_op.py @@ -23,9 +23,7 @@ from paddle.fluid import Program, program_guard def create_test_class(op_type, typename, callback): - class Cls(op_test.OpTest): - def setUp(self): a = numpy.random.random(size=(10, 7)).astype(typename) b = numpy.random.random(size=(10, 7)).astype(typename) @@ -45,11 +43,9 @@ def create_test_class(op_type, typename, callback): y = fluid.layers.data(name='y', shape=[2], dtype='int32') a = fluid.layers.data(name='a', shape=[2], dtype='int16') if self.op_type == "less_than": - self.assertRaises(TypeError, - fluid.layers.less_than, - x=x, - y=y, - force_cpu=1) + self.assertRaises( + TypeError, fluid.layers.less_than, x=x, y=y, force_cpu=1 + ) op = eval("fluid.layers.%s" % self.op_type) self.assertRaises(TypeError, op, x=x, y=y, cond=1) self.assertRaises(TypeError, op, x=x, y=a) @@ -75,9 +71,7 @@ for _type_name in {'float32', 'float64', 'int32', 'int64', 'float16'}: def create_paddle_case(op_type, callback): - class PaddleCls(unittest.TestCase): - def setUp(self): self.op_type = op_type self.input_x = np.array([1, 2, 3, 4]).astype(np.int64) @@ -95,11 +89,10 @@ def create_paddle_case(op_type, callback): op = eval("paddle.%s" % (self.op_type)) out = op(x, y) exe = fluid.Executor(self.place) - res, = exe.run(feed={ - "x": self.input_x, - "y": self.input_y - }, - fetch_list=[out]) + (res,) = exe.run( + feed={"x": self.input_x, "y": self.input_y}, + fetch_list=[out], + ) self.assertEqual((res == self.real_result).all(), True) def test_api_float(self): @@ -111,11 +104,9 @@ def create_paddle_case(op_type, callback): op = eval("paddle.%s" % (self.op_type)) out = op(x, y) exe = fluid.Executor(self.place) - res, = exe.run(feed={ - "x": self.input_x, - "y": 1.0 - }, - fetch_list=[out]) + (res,) = exe.run( + feed={"x": self.input_x, "y": 1.0}, fetch_list=[out] + ) self.real_result = np.array([1, 0, 0, 0]).astype(np.int64) self.assertEqual((res == self.real_result).all(), True) @@ -160,31 +151,36 @@ def create_paddle_case(op_type, callback): self.real_result = (x1 == y1).astype(np.int64) self.assertEqual( (out.numpy().astype(np.int64) == self.real_result).all(), - True) + True, + ) paddle.enable_static() def test_dynamic_api_inf_2(self): if self.op_type == "equal": paddle.disable_static() - x1 = np.array([1, float('inf'), - float('inf')]).astype(np.float32) + x1 = np.array([1, float('inf'), float('inf')]).astype( + np.float32 + ) x = paddle.to_tensor(x1) - y1 = np.array([1, float('-inf'), - float('inf')]).astype(np.float32) + y1 = np.array([1, float('-inf'), float('inf')]).astype( + np.float32 + ) y = paddle.to_tensor(y1) op = eval("paddle.%s" % (self.op_type)) out = op(x, y) self.real_result = (x1 == y1).astype(np.int64) self.assertEqual( (out.numpy().astype(np.int64) == self.real_result).all(), - True) + True, + ) paddle.enable_static() def test_dynamic_api_inf_3(self): if self.op_type == "equal": paddle.disable_static() - x1 = np.array([1, float('inf'), - float('-inf')]).astype(np.float32) + x1 = np.array([1, float('inf'), float('-inf')]).astype( + np.float32 + ) x = paddle.to_tensor(x1) y1 = np.array([1, 2, 3]).astype(np.float32) y = paddle.to_tensor(y1) @@ -193,7 +189,8 @@ def create_paddle_case(op_type, callback): self.real_result = (x1 == y1).astype(np.int64) self.assertEqual( (out.numpy().astype(np.int64) == self.real_result).all(), - True) + True, + ) paddle.enable_static() def test_dynamic_api_nan_1(self): @@ -208,31 +205,36 @@ def create_paddle_case(op_type, callback): self.real_result = (x1 == y1).astype(np.int64) self.assertEqual( (out.numpy().astype(np.int64) == self.real_result).all(), - True) + True, + ) paddle.enable_static() def test_dynamic_api_nan_2(self): if self.op_type == "equal": paddle.disable_static() - x1 = np.array([1, float('nan'), - float('nan')]).astype(np.float32) + x1 = np.array([1, float('nan'), float('nan')]).astype( + np.float32 + ) x = paddle.to_tensor(x1) - y1 = np.array([1, float('-nan'), - float('nan')]).astype(np.float32) + y1 = np.array([1, float('-nan'), float('nan')]).astype( + np.float32 + ) y = paddle.to_tensor(y1) op = eval("paddle.%s" % (self.op_type)) out = op(x, y) self.real_result = (x1 == y1).astype(np.int64) self.assertEqual( (out.numpy().astype(np.int64) == self.real_result).all(), - True) + True, + ) paddle.enable_static() def test_dynamic_api_nan_3(self): if self.op_type == "equal": paddle.disable_static() - x1 = np.array([1, float('-nan'), - float('nan')]).astype(np.float32) + x1 = np.array([1, float('-nan'), float('nan')]).astype( + np.float32 + ) x = paddle.to_tensor(x1) y1 = np.array([1, 2, 1]).astype(np.float32) y = paddle.to_tensor(y1) @@ -241,16 +243,19 @@ def create_paddle_case(op_type, callback): self.real_result = (x1 == y1).astype(np.int64) self.assertEqual( (out.numpy().astype(np.int64) == self.real_result).all(), - True) + True, + ) paddle.enable_static() def test_not_equal(self): if self.op_type == "not_equal": paddle.disable_static() - x = paddle.to_tensor(np.array([1.2e-8, 2, 2, 1]), - dtype="float32") - y = paddle.to_tensor(np.array([1.1e-8, 2, 2, 1]), - dtype="float32") + x = paddle.to_tensor( + np.array([1.2e-8, 2, 2, 1]), dtype="float32" + ) + y = paddle.to_tensor( + np.array([1.1e-8, 2, 2, 1]), dtype="float32" + ) op = eval("paddle.%s" % (self.op_type)) out = op(x, y) self.real_result = np.array([0, 0, 0, 0]).astype(np.int64) @@ -258,7 +263,6 @@ def create_paddle_case(op_type, callback): paddle.enable_static() def test_assert(self): - def test_dynamic_api_string(self): if self.op_type == "equal": paddle.disable_static() @@ -282,9 +286,9 @@ def create_paddle_case(op_type, callback): def test_broadcast_api_1(self): paddle.enable_static() with program_guard(Program(), Program()): - x = paddle.static.data(name='x', - shape=[1, 2, 1, 3], - dtype='int32') + x = paddle.static.data( + name='x', shape=[1, 2, 1, 3], dtype='int32' + ) y = paddle.static.data(name='y', shape=[1, 2, 3], dtype='int32') op = eval("paddle.%s" % (self.op_type)) out = op(x, y) @@ -292,31 +296,27 @@ def create_paddle_case(op_type, callback): input_x = np.arange(1, 7).reshape((1, 2, 1, 3)).astype(np.int32) input_y = np.arange(0, 6).reshape((1, 2, 3)).astype(np.int32) real_result = callback(input_x, input_y) - res, = exe.run(feed={ - "x": input_x, - "y": input_y - }, - fetch_list=[out]) + (res,) = exe.run( + feed={"x": input_x, "y": input_y}, fetch_list=[out] + ) self.assertEqual((res == real_result).all(), True) def test_broadcast_api_2(self): paddle.enable_static() with program_guard(Program(), Program()): x = paddle.static.data(name='x', shape=[1, 2, 3], dtype='int32') - y = paddle.static.data(name='y', - shape=[1, 2, 1, 3], - dtype='int32') + y = paddle.static.data( + name='y', shape=[1, 2, 1, 3], dtype='int32' + ) op = eval("paddle.%s" % (self.op_type)) out = op(x, y) exe = paddle.static.Executor(self.place) input_x = np.arange(0, 6).reshape((1, 2, 3)).astype(np.int32) input_y = np.arange(1, 7).reshape((1, 2, 1, 3)).astype(np.int32) real_result = callback(input_x, input_y) - res, = exe.run(feed={ - "x": input_x, - "y": input_y - }, - fetch_list=[out]) + (res,) = exe.run( + feed={"x": input_x, "y": input_y}, fetch_list=[out] + ) self.assertEqual((res == real_result).all(), True) def test_broadcast_api_3(self): @@ -330,11 +330,9 @@ def create_paddle_case(op_type, callback): input_x = np.arange(0, 5).reshape((5)).astype(np.int32) input_y = np.array([5, 3, 2]).reshape((3, 1)).astype(np.int32) real_result = callback(input_x, input_y) - res, = exe.run(feed={ - "x": input_x, - "y": input_y - }, - fetch_list=[out]) + (res,) = exe.run( + feed={"x": input_x, "y": input_y}, fetch_list=[out] + ) self.assertEqual((res == real_result).all(), True) def test_bool_api_4(self): @@ -348,11 +346,9 @@ def create_paddle_case(op_type, callback): input_x = np.array([True, False, True]).astype(np.bool_) input_y = np.array([True, True, False]).astype(np.bool_) real_result = callback(input_x, input_y) - res, = exe.run(feed={ - "x": input_x, - "y": input_y - }, - fetch_list=[out]) + (res,) = exe.run( + feed={"x": input_x, "y": input_y}, fetch_list=[out] + ) self.assertEqual((res == real_result).all(), True) def test_bool_broadcast_api_4(self): @@ -366,11 +362,9 @@ def create_paddle_case(op_type, callback): input_x = np.array([True, False, True]).astype(np.bool_) input_y = np.array([True]).astype(np.bool_) real_result = callback(input_x, input_y) - res, = exe.run(feed={ - "x": input_x, - "y": input_y - }, - fetch_list=[out]) + (res,) = exe.run( + feed={"x": input_x, "y": input_y}, fetch_list=[out] + ) self.assertEqual((res == real_result).all(), True) def test_attr_name(self): @@ -396,19 +390,18 @@ create_paddle_case('not_equal', lambda _a, _b: _a != _b) class TestCompareOpError(unittest.TestCase): - def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): # The input x and y of compare_op must be Variable. x = fluid.layers.data(name='x', shape=[1], dtype="float32") - y = fluid.create_lod_tensor(numpy.array([[-1]]), [[1]], - fluid.CPUPlace()) + y = fluid.create_lod_tensor( + numpy.array([[-1]]), [[1]], fluid.CPUPlace() + ) self.assertRaises(TypeError, fluid.layers.greater_equal, x, y) class API_TestElementwise_Equal(unittest.TestCase): - def test_api(self): paddle.enable_static() with fluid.program_guard(fluid.Program(), fluid.Program()): @@ -417,7 +410,7 @@ class API_TestElementwise_Equal(unittest.TestCase): out = paddle.equal(x=label, y=limit) place = fluid.CPUPlace() exe = fluid.Executor(place) - res, = exe.run(fetch_list=[out]) + (res,) = exe.run(fetch_list=[out]) self.assertEqual((res == np.array([True, False])).all(), True) with fluid.program_guard(fluid.Program(), fluid.Program()): @@ -426,12 +419,11 @@ class API_TestElementwise_Equal(unittest.TestCase): out = paddle.equal(x=label, y=limit) place = fluid.CPUPlace() exe = fluid.Executor(place) - res, = exe.run(fetch_list=[out]) + (res,) = exe.run(fetch_list=[out]) self.assertEqual((res == np.array([True, True])).all(), True) class TestCompareOpPlace(unittest.TestCase): - def test_place_1(self): paddle.enable_static() place = paddle.CPUPlace() @@ -441,7 +433,7 @@ class TestCompareOpPlace(unittest.TestCase): limit = fluid.layers.assign(np.array([3, 2], dtype="int32")) out = fluid.layers.less_than(label, limit, force_cpu=True) exe = fluid.Executor(place) - res, = exe.run(fetch_list=[out]) + (res,) = exe.run(fetch_list=[out]) self.assertEqual((res == np.array([False, False])).all(), True) def test_place_2(self): diff --git a/python/paddle/fluid/tests/unittests/test_compare_reduce_op.py b/python/paddle/fluid/tests/unittests/test_compare_reduce_op.py index 5508f17a3bc888dc98178cb7593a4780c254b746..bc88cba96eb6e659e6494a297eb5de5c40902a65 100644 --- a/python/paddle/fluid/tests/unittests/test_compare_reduce_op.py +++ b/python/paddle/fluid/tests/unittests/test_compare_reduce_op.py @@ -20,9 +20,7 @@ import paddle.fluid as fluid def create_test_not_equal_class(op_type, typename, callback): - class Cls(op_test.OpTest): - def setUp(self): x = np.random.random(size=(10, 7)).astype(typename) y = np.random.random(size=(10, 7)).astype(typename) @@ -41,9 +39,7 @@ def create_test_not_equal_class(op_type, typename, callback): def create_test_not_shape_equal_class(op_type, typename, callback): - class Cls(op_test.OpTest): - def setUp(self): x = np.random.random(size=(10, 7)).astype(typename) y = np.random.random(size=(10)).astype(typename) @@ -62,9 +58,7 @@ def create_test_not_shape_equal_class(op_type, typename, callback): def create_test_equal_class(op_type, typename, callback): - class Cls(op_test.OpTest): - def setUp(self): x = y = np.random.random(size=(10, 7)).astype(typename) z = callback(x, y) @@ -82,9 +76,7 @@ def create_test_equal_class(op_type, typename, callback): def create_test_dim1_class(op_type, typename, callback): - class Cls(op_test.OpTest): - def setUp(self): x = y = np.random.random(size=(1)).astype(typename) x = np.array([True, False, True]).astype(typename) @@ -112,7 +104,6 @@ for _type_name in {'float32', 'float64', 'int32', 'int64', 'bool'}: class TestEqualReduceAPI(unittest.TestCase): - def test_name(self): x = fluid.layers.assign(np.array([3, 4], dtype="int32")) y = fluid.layers.assign(np.array([3, 4], dtype="int32")) diff --git a/python/paddle/fluid/tests/unittests/test_compat.py b/python/paddle/fluid/tests/unittests/test_compat.py index e4fedede47ce3dfcb5e78408c8edff845798ebb0..d4ad8a8274e3a8b36f4eb195fae2bade5716b280 100644 --- a/python/paddle/fluid/tests/unittests/test_compat.py +++ b/python/paddle/fluid/tests/unittests/test_compat.py @@ -17,7 +17,6 @@ import paddle.compat as cpt class TestCompatible(unittest.TestCase): - def test_to_text(self): self.assertIsNone(cpt.to_text(None)) diff --git a/python/paddle/fluid/tests/unittests/test_compiled_program.py b/python/paddle/fluid/tests/unittests/test_compiled_program.py index 9406d4d898ee199efc53a708fee436f4a9b6f744..8418de88fd86589574588b9caebfb3ac4374e6a3 100644 --- a/python/paddle/fluid/tests/unittests/test_compiled_program.py +++ b/python/paddle/fluid/tests/unittests/test_compiled_program.py @@ -22,84 +22,87 @@ from simple_nets import simple_fc_net class TestCompiledProgram(unittest.TestCase): - def setUp(self): self.seed = 100 self.img = np.random.random(size=(16, 784)).astype('float32') - self.label = np.random.randint(low=0, - high=10, - size=[16, 1], - dtype=np.int64) + self.label = np.random.randint( + low=0, high=10, size=[16, 1], dtype=np.int64 + ) with new_program_scope(): paddle.seed(self.seed) paddle.framework.random._manual_program_seed(self.seed) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) loss = simple_fc_net() exe.run(fluid.default_startup_program()) - loss_data, = exe.run(fluid.default_main_program(), - feed={ - "image": self.img, - "label": self.label - }, - fetch_list=[loss.name]) + (loss_data,) = exe.run( + fluid.default_main_program(), + feed={"image": self.img, "label": self.label}, + fetch_list=[loss.name], + ) self.loss = loss_data[0] def test_compiled_program_base(self): with new_program_scope(): paddle.seed(self.seed) paddle.framework.random._manual_program_seed(self.seed) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) loss = simple_fc_net() exe.run(fluid.default_startup_program()) compiled_prog = fluid.CompiledProgram(fluid.default_main_program()) - loss_data, = exe.run(compiled_prog, - feed={ - "image": self.img, - "label": self.label - }, - fetch_list=[loss.name]) + (loss_data,) = exe.run( + compiled_prog, + feed={"image": self.img, "label": self.label}, + fetch_list=[loss.name], + ) np.testing.assert_array_equal(loss_data[0], self.loss) def test_compiled_program_with_data_parallel(self): with new_program_scope(): paddle.seed(self.seed) paddle.framework.random._manual_program_seed(self.seed) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) loss = simple_fc_net() exe.run(fluid.default_startup_program()) compiled_prog = fluid.CompiledProgram( - fluid.default_main_program()).with_data_parallel( - loss_name=loss.name, places=[place]) - - loss_data, = exe.run(compiled_prog, - feed={ - "image": self.img, - "label": self.label - }, - fetch_list=[loss.name]) + fluid.default_main_program() + ).with_data_parallel(loss_name=loss.name, places=[place]) + + (loss_data,) = exe.run( + compiled_prog, + feed={"image": self.img, "label": self.label}, + fetch_list=[loss.name], + ) np.testing.assert_array_equal(loss_data[0], self.loss) class TestCompiledProgramError(unittest.TestCase): - def test_program_or_graph_error(self): self.assertRaises(TypeError, fluid.CompiledProgram, "program") def build_simple_model(self): - img = fluid.layers.data(name='image', - shape=[1, 28, 28], - dtype='float32') + img = fluid.layers.data( + name='image', shape=[1, 28, 28], dtype='float32' + ) label = fluid.layers.data(name='label', shape=[1], dtype='int64') prediction = fluid.layers.fc(input=img, size=10, act='softmax') loss = fluid.layers.cross_entropy(input=prediction, label=label) @@ -112,7 +115,8 @@ class TestCompiledProgramError(unittest.TestCase): # compile program program = fluid.default_main_program() compiled_program = fluid.CompiledProgram( - program).with_data_parallel() + program + ).with_data_parallel() return compiled_program def compile_program(self): @@ -148,7 +152,8 @@ class TestCompiledProgramError(unittest.TestCase): # compile program program = fluid.default_main_program() compiled_program = fluid.CompiledProgram( - program).with_data_parallel(share_vars_from=source_program) + program + ).with_data_parallel(share_vars_from=source_program) scope = fluid.global_scope() place = fluid.CPUPlace() with self.assertRaises(ValueError): @@ -161,7 +166,8 @@ class TestCompiledProgramError(unittest.TestCase): # compile program program = fluid.default_main_program() compiled_program = fluid.CompiledProgram( - program).with_data_parallel(share_vars_from=source_program) + program + ).with_data_parallel(share_vars_from=source_program) scope = fluid.global_scope() place = fluid.CPUPlace() with self.assertRaises(ValueError): diff --git a/python/paddle/fluid/tests/unittests/test_complex_abs.py b/python/paddle/fluid/tests/unittests/test_complex_abs.py index e61732eba57b8e92a60b368dbe392d834b94b280..968bcb79613180781315cadf8f09abcd7f627c89 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_abs.py +++ b/python/paddle/fluid/tests/unittests/test_complex_abs.py @@ -22,7 +22,6 @@ from paddle.fluid.framework import _test_eager_guard class TestComplexAbsOp(OpTest): - def setUp(self): paddle.enable_static() self.python_api = paddle.abs @@ -37,7 +36,8 @@ class TestComplexAbsOp(OpTest): def init_input_output(self): self.x = np.random.random(self.shape).astype( - self.dtype) + 1J * np.random.random(self.shape).astype(self.dtype) + self.dtype + ) + 1j * np.random.random(self.shape).astype(self.dtype) self.out = np.abs(self.x) def init_grad_input_output(self): @@ -48,15 +48,16 @@ class TestComplexAbsOp(OpTest): self.check_output(check_eager=False) def test_check_grad(self): - self.check_grad(['X'], - 'Out', - user_defined_grads=[self.grad_x], - user_defined_grad_outputs=[self.grad_out], - check_eager=False) + self.check_grad( + ['X'], + 'Out', + user_defined_grads=[self.grad_x], + user_defined_grad_outputs=[self.grad_out], + check_eager=False, + ) class TestComplexAbsOpZeroValues(OpTest): - def setUp(self): paddle.enable_static() self.op_type = "abs" @@ -70,8 +71,9 @@ class TestComplexAbsOpZeroValues(OpTest): self.outputs = {'Out': self.out} def init_input_output(self): - self.x = np.zeros(self.shape).astype( - self.dtype) + 1J * np.zeros(self.shape).astype(self.dtype) + self.x = np.zeros(self.shape).astype(self.dtype) + 1j * np.zeros( + self.shape + ).astype(self.dtype) self.out = np.abs(self.x) def init_grad_input_output(self): @@ -82,15 +84,16 @@ class TestComplexAbsOpZeroValues(OpTest): self.check_output(check_eager=False) def test_check_grad(self): - self.check_grad(['X'], - 'Out', - user_defined_grads=[self.grad_x], - user_defined_grad_outputs=[self.grad_out], - check_eager=False) + self.check_grad( + ['X'], + 'Out', + user_defined_grads=[self.grad_x], + user_defined_grad_outputs=[self.grad_out], + check_eager=False, + ) class TestAbs(unittest.TestCase): - def setUp(self): self._dtypes = ["float32", "float64"] self._places = [paddle.CPUPlace()] @@ -111,7 +114,6 @@ class TestAbs(unittest.TestCase): class TestRealAbsOp(OpTest): - def setUp(self): paddle.enable_static() self.python_api = paddle.abs @@ -136,11 +138,13 @@ class TestRealAbsOp(OpTest): self.check_output(check_eager=False) def test_check_grad(self): - self.check_grad(['X'], - 'Out', - user_defined_grads=[self.grad_x], - user_defined_grad_outputs=[self.grad_out], - check_eager=False) + self.check_grad( + ['X'], + 'Out', + user_defined_grads=[self.grad_x], + user_defined_grad_outputs=[self.grad_out], + check_eager=False, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_complex_cast.py b/python/paddle/fluid/tests/unittests/test_complex_cast.py index 015c56965df64447f4a89a7379806655840912be..9065b2d008d33328d91ec34d792eed0cbd126bfb 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_cast.py +++ b/python/paddle/fluid/tests/unittests/test_complex_cast.py @@ -20,12 +20,11 @@ from paddle.fluid.framework import _test_eager_guard class TestComplexCastOp(unittest.TestCase): - def test_complex_to_real(self): r = np.random.random(size=[10, 10]) * 10 i = np.random.random(size=[10, 10]) - c_t = paddle.to_tensor(r + i * 1J, dtype='complex64') + c_t = paddle.to_tensor(r + i * 1j, dtype='complex64') self.assertEqual(c_t.cast('int64').dtype, paddle.int64) self.assertEqual(c_t.cast('int32').dtype, paddle.int32) @@ -33,21 +32,21 @@ class TestComplexCastOp(unittest.TestCase): self.assertEqual(c_t.cast('float64').dtype, paddle.float64) self.assertEqual(c_t.cast('bool').dtype, paddle.bool) - np.testing.assert_allclose(c_t.cast('int64').numpy(), - r.astype('int64'), - rtol=1e-05) - np.testing.assert_allclose(c_t.cast('int32').numpy(), - r.astype('int32'), - rtol=1e-05) - np.testing.assert_allclose(c_t.cast('float32').numpy(), - r.astype('float32'), - rtol=1e-05) - np.testing.assert_allclose(c_t.cast('float64').numpy(), - r.astype('float64'), - rtol=1e-05) - np.testing.assert_allclose(c_t.cast('bool').numpy(), - r.astype('bool'), - rtol=1e-05) + np.testing.assert_allclose( + c_t.cast('int64').numpy(), r.astype('int64'), rtol=1e-05 + ) + np.testing.assert_allclose( + c_t.cast('int32').numpy(), r.astype('int32'), rtol=1e-05 + ) + np.testing.assert_allclose( + c_t.cast('float32').numpy(), r.astype('float32'), rtol=1e-05 + ) + np.testing.assert_allclose( + c_t.cast('float64').numpy(), r.astype('float64'), rtol=1e-05 + ) + np.testing.assert_allclose( + c_t.cast('bool').numpy(), r.astype('bool'), rtol=1e-05 + ) def test_real_to_complex(self): r = np.random.random(size=[10, 10]) * 10 @@ -56,29 +55,29 @@ class TestComplexCastOp(unittest.TestCase): self.assertEqual(r_t.cast('complex64').dtype, paddle.complex64) self.assertEqual(r_t.cast('complex128').dtype, paddle.complex128) - np.testing.assert_allclose(r_t.cast('complex64').real().numpy(), - r, - rtol=1e-05) - np.testing.assert_allclose(r_t.cast('complex128').real().numpy(), - r, - rtol=1e-05) + np.testing.assert_allclose( + r_t.cast('complex64').real().numpy(), r, rtol=1e-05 + ) + np.testing.assert_allclose( + r_t.cast('complex128').real().numpy(), r, rtol=1e-05 + ) def test_complex64_complex128(self): r = np.random.random(size=[10, 10]) i = np.random.random(size=[10, 10]) - c = r + i * 1J + c = r + i * 1j c_64 = paddle.to_tensor(c, dtype='complex64') c_128 = paddle.to_tensor(c, dtype='complex128') self.assertTrue(c_64.cast('complex128').dtype, paddle.complex128) self.assertTrue(c_128.cast('complex128').dtype, paddle.complex64) - np.testing.assert_allclose(c_64.cast('complex128').numpy(), - c_128.numpy(), - rtol=1e-05) - np.testing.assert_allclose(c_128.cast('complex128').numpy(), - c_64.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + c_64.cast('complex128').numpy(), c_128.numpy(), rtol=1e-05 + ) + np.testing.assert_allclose( + c_128.cast('complex128').numpy(), c_64.numpy(), rtol=1e-05 + ) def test_eager(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_complex_elementwise_layers.py b/python/paddle/fluid/tests/unittests/test_complex_elementwise_layers.py index cd66246bfd5e7ee09798d81b228dc181bf09d649..14bdec6101ab2d19adffe510a95c8d785eceed18 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_elementwise_layers.py +++ b/python/paddle/fluid/tests/unittests/test_complex_elementwise_layers.py @@ -30,7 +30,6 @@ paddle_apis = { class TestComplexElementwiseLayers(unittest.TestCase): - def setUp(self): self._dtypes = ["float32", "float64"] self._places = [paddle.CPUPlace()] @@ -48,21 +47,27 @@ class TestComplexElementwiseLayers(unittest.TestCase): pd_result, np_result, rtol=1e-05, - err_msg= - '\nplace: {}\npaddle diff result:\n {}\nnumpy diff result:\n {}\n'. - format(place, pd_result[~np.isclose(pd_result, np_result)], - np_result[~np.isclose(pd_result, np_result)])) + err_msg='\nplace: {}\npaddle diff result:\n {}\nnumpy diff result:\n {}\n'.format( + place, + pd_result[~np.isclose(pd_result, np_result)], + np_result[~np.isclose(pd_result, np_result)], + ), + ) def compare_by_basic_api(self, x, y): for place in self._places: - self.assert_check(self.paddle_calc(x, y, "add", place), x + y, - place) - self.assert_check(self.paddle_calc(x, y, "sub", place), x - y, - place) - self.assert_check(self.paddle_calc(x, y, "mul", place), x * y, - place) - self.assert_check(self.paddle_calc(x, y, "div", place), x / y, - place) + self.assert_check( + self.paddle_calc(x, y, "add", place), x + y, place + ) + self.assert_check( + self.paddle_calc(x, y, "sub", place), x - y, place + ) + self.assert_check( + self.paddle_calc(x, y, "mul", place), x * y, place + ) + self.assert_check( + self.paddle_calc(x, y, "div", place), x / y, place + ) def compare_op_by_basic_api(self, x, y): for place in self._places: @@ -76,18 +81,21 @@ class TestComplexElementwiseLayers(unittest.TestCase): def test_complex_xy(self): for dtype in self._dtypes: - x = rand([2, 3, 4, 5 - ]).astype(dtype) + 1j * rand([2, 3, 4, 5]).astype(dtype) - y = rand([2, 3, 4, 5 - ]).astype(dtype) + 1j * rand([2, 3, 4, 5]).astype(dtype) + x = rand([2, 3, 4, 5]).astype(dtype) + 1j * rand( + [2, 3, 4, 5] + ).astype(dtype) + y = rand([2, 3, 4, 5]).astype(dtype) + 1j * rand( + [2, 3, 4, 5] + ).astype(dtype) self.compare_by_basic_api(x, y) self.compare_op_by_basic_api(x, y) def test_complex_x_real_y(self): for dtype in self._dtypes: - x = rand([2, 3, 4, 5 - ]).astype(dtype) + 1j * rand([2, 3, 4, 5]).astype(dtype) + x = rand([2, 3, 4, 5]).astype(dtype) + 1j * rand( + [2, 3, 4, 5] + ).astype(dtype) y = rand([4, 5]).astype(dtype) # promote types cases diff --git a/python/paddle/fluid/tests/unittests/test_complex_getitem.py b/python/paddle/fluid/tests/unittests/test_complex_getitem.py index 2d5428ce933531d0604abe05711e66f0a3926c4f..7446e8f6dfefa58a4bd7bcbb6a570627e77862d3 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_getitem.py +++ b/python/paddle/fluid/tests/unittests/test_complex_getitem.py @@ -20,7 +20,6 @@ from paddle.fluid.framework import _test_eager_guard class TestComplexGetitemLayer(unittest.TestCase): - def setUp(self): self._places = [fluid.CPUPlace()] if fluid.core.is_compiled_with_cuda(): diff --git a/python/paddle/fluid/tests/unittests/test_complex_grad_accumulated.py b/python/paddle/fluid/tests/unittests/test_complex_grad_accumulated.py index 10c4eed62fdd2e4ada24a4494c1a0bfd3fd701d1..9505ea3011afcae329136446c34f01823f752366 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_grad_accumulated.py +++ b/python/paddle/fluid/tests/unittests/test_complex_grad_accumulated.py @@ -22,52 +22,56 @@ from paddle.fluid.framework import _test_eager_guard class Optimization_ex1(paddle.nn.Layer): - - def __init__(self, - shape, - dtype, - param_attr=paddle.nn.initializer.Uniform(low=-5., high=5.)): + def __init__( + self, + shape, + dtype, + param_attr=paddle.nn.initializer.Uniform(low=-5.0, high=5.0), + ): super(Optimization_ex1, self).__init__() - self.theta0 = self.create_parameter(shape=shape, - attr=param_attr, - dtype=dtype, - is_bias=False) - self.theta1 = self.create_parameter(shape=shape, - attr=param_attr, - dtype=dtype, - is_bias=False) + self.theta0 = self.create_parameter( + shape=shape, attr=param_attr, dtype=dtype, is_bias=False + ) + self.theta1 = self.create_parameter( + shape=shape, attr=param_attr, dtype=dtype, is_bias=False + ) self.A = paddle.to_tensor( - np.random.random((4, 4)).astype(dtype) + - np.random.random((4, 4)).astype(dtype) * 1j) - self.B = paddle.to_tensor(np.random.random( - (4, 4)).astype(dtype) + np.random.random((4, 4)).astype(dtype) * 1j, - stop_gradient=False) + np.random.random((4, 4)).astype(dtype) + + np.random.random((4, 4)).astype(dtype) * 1j + ) + self.B = paddle.to_tensor( + np.random.random((4, 4)).astype(dtype) + + np.random.random((4, 4)).astype(dtype) * 1j, + stop_gradient=False, + ) def forward(self, mode=1): jj = paddle.to_tensor(np.array([1j]).astype(np.complex64)) if mode == 1: # run all calc in one step loss = paddle.sum(self.A + (self.theta0 + self.theta1 * jj)) * ( - paddle.sum(self.A + (self.theta0 + self.theta1 * jj)).conj()) + paddle.sum(self.A + (self.theta0 + self.theta1 * jj)).conj() + ) return loss.real() elif mode == 2: # run in two step self.theta = self.theta0 + self.theta1 * jj loss = paddle.sum(self.A + self.theta) * ( - paddle.sum(self.A + self.theta).conj()) + paddle.sum(self.A + self.theta).conj() + ) return loss.real() elif mode == 3: # run without param - loss = paddle.sum(self.A + self.B) * (paddle.sum(self.A + - self.B).conj()) + loss = paddle.sum(self.A + self.B) * ( + paddle.sum(self.A + self.B).conj() + ) return loss.real() else: raise NotImplementedError class TestComplexGradAccumulated(unittest.TestCase): - def setUp(self): self.devices = ['cpu'] if core.is_compiled_with_cuda(): @@ -81,8 +85,9 @@ class TestComplexGradAccumulated(unittest.TestCase): paddle.set_device(device) myLayer = Optimization_ex1(self.theta_size, dtype) - optimizer = paddle.optimizer.SGD(learning_rate=self.learning_rate, - parameters=myLayer.parameters()) + optimizer = paddle.optimizer.SGD( + learning_rate=self.learning_rate, parameters=myLayer.parameters() + ) for iter in range(self.iter): loss = myLayer(mode) @@ -95,8 +100,9 @@ class TestComplexGradAccumulated(unittest.TestCase): paddle.set_device(device) myLayer = Optimization_ex1(self.theta_size, dtype) - optimizer = paddle.optimizer.SGD(learning_rate=self.learning_rate, - parameters=myLayer.parameters()) + optimizer = paddle.optimizer.SGD( + learning_rate=self.learning_rate, parameters=myLayer.parameters() + ) for iter in range(self.iter): loss = myLayer(mode) diff --git a/python/paddle/fluid/tests/unittests/test_complex_kron.py b/python/paddle/fluid/tests/unittests/test_complex_kron.py index f216e6d037b254f5a6f83df8ceca86d1ce471419..7be6a3737edb4b9f0f022ddca5573428963d4c13 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_kron.py +++ b/python/paddle/fluid/tests/unittests/test_complex_kron.py @@ -21,7 +21,6 @@ from paddle.fluid.framework import _test_eager_guard class ComplexKronTestCase(unittest.TestCase): - def __init__(self, methodName='runTest', x=None, y=None): super(ComplexKronTestCase, self).__init__(methodName) self.x = x @@ -43,9 +42,9 @@ class ComplexKronTestCase(unittest.TestCase): x_var = dg.to_variable(self.x) y_var = dg.to_variable(self.y) out_var = paddle.kron(x_var, y_var) - np.testing.assert_allclose(out_var.numpy(), - self.ref_result, - rtol=1e-05) + np.testing.assert_allclose( + out_var.numpy(), self.ref_result, rtol=1e-05 + ) def test_eager(self, place): with _test_eager_guard(): @@ -56,23 +55,35 @@ def load_tests(loader, standard_tests, pattern): suite = unittest.TestSuite() for dtype in ["float32", "float64"]: suite.addTest( - ComplexKronTestCase(x=np.random.randn(2, 2).astype(dtype) + - 1j * np.random.randn(2, 2).astype(dtype), - y=np.random.randn(3, 3).astype(dtype) + - 1j * np.random.randn(3, 3).astype(dtype))) + ComplexKronTestCase( + x=np.random.randn(2, 2).astype(dtype) + + 1j * np.random.randn(2, 2).astype(dtype), + y=np.random.randn(3, 3).astype(dtype) + + 1j * np.random.randn(3, 3).astype(dtype), + ) + ) suite.addTest( - ComplexKronTestCase(x=np.random.randn(2, 2).astype(dtype), - y=np.random.randn(3, 3).astype(dtype) + - 1j * np.random.randn(3, 3).astype(dtype))) + ComplexKronTestCase( + x=np.random.randn(2, 2).astype(dtype), + y=np.random.randn(3, 3).astype(dtype) + + 1j * np.random.randn(3, 3).astype(dtype), + ) + ) suite.addTest( - ComplexKronTestCase(x=np.random.randn(2, 2).astype(dtype) + - 1j * np.random.randn(2, 2).astype(dtype), - y=np.random.randn(3, 3).astype(dtype))) + ComplexKronTestCase( + x=np.random.randn(2, 2).astype(dtype) + + 1j * np.random.randn(2, 2).astype(dtype), + y=np.random.randn(3, 3).astype(dtype), + ) + ) suite.addTest( - ComplexKronTestCase(x=np.random.randn(2, 2).astype(dtype) + - 1j * np.random.randn(2, 2).astype(dtype), - y=np.random.randn(2, 2, 3).astype(dtype))) + ComplexKronTestCase( + x=np.random.randn(2, 2).astype(dtype) + + 1j * np.random.randn(2, 2).astype(dtype), + y=np.random.randn(2, 2, 3).astype(dtype), + ) + ) return suite diff --git a/python/paddle/fluid/tests/unittests/test_complex_matmul.py b/python/paddle/fluid/tests/unittests/test_complex_matmul.py index 55cf8516078fab55883cb0218ca05348fa4fb75d..f803930d2ad3e86bd17d1eeee614456ccea83c85 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_matmul.py +++ b/python/paddle/fluid/tests/unittests/test_complex_matmul.py @@ -21,7 +21,6 @@ from paddle.fluid.framework import _test_eager_guard class TestComplexMatMulLayer(unittest.TestCase): - def setUp(self): self._dtypes = ["float32", "float64"] self._places = [fluid.CPUPlace()] @@ -39,10 +38,12 @@ class TestComplexMatMulLayer(unittest.TestCase): pd_result, np_result, rtol=1e-05, - err_msg= - '\nplace: {}\npaddle diff result:\n {}\nnumpy diff result:\n {}\n' - .format(place, pd_result[~np.isclose(pd_result, np_result)], - np_result[~np.isclose(pd_result, np_result)])) + err_msg='\nplace: {}\npaddle diff result:\n {}\nnumpy diff result:\n {}\n'.format( + place, + pd_result[~np.isclose(pd_result, np_result)], + np_result[~np.isclose(pd_result, np_result)], + ), + ) def compare_op_by_basic_api(self, x, y, np_result): for place in self._places: @@ -55,19 +56,21 @@ class TestComplexMatMulLayer(unittest.TestCase): pd_result, np_result, rtol=1e-05, - err_msg= - '\nplace: {}\npaddle diff result:\n {}\nnumpy diff result:\n {}\n' - .format(place, pd_result[~np.isclose(pd_result, np_result)], - np_result[~np.isclose(pd_result, np_result)])) + err_msg='\nplace: {}\npaddle diff result:\n {}\nnumpy diff result:\n {}\n'.format( + place, + pd_result[~np.isclose(pd_result, np_result)], + np_result[~np.isclose(pd_result, np_result)], + ), + ) def test_complex_xy(self): for dtype in self._dtypes: - x = np.random.random( - (2, 3, 4, 5)).astype(dtype) + 1J * np.random.random( - (2, 3, 4, 5)).astype(dtype) - y = np.random.random( - (2, 3, 5, 4)).astype(dtype) + 1J * np.random.random( - (2, 3, 5, 4)).astype(dtype) + x = np.random.random((2, 3, 4, 5)).astype( + dtype + ) + 1j * np.random.random((2, 3, 4, 5)).astype(dtype) + y = np.random.random((2, 3, 5, 4)).astype( + dtype + ) + 1j * np.random.random((2, 3, 5, 4)).astype(dtype) np_result = np.matmul(x, y) @@ -76,9 +79,9 @@ class TestComplexMatMulLayer(unittest.TestCase): def test_complex_x_real_y(self): for dtype in self._dtypes: - x = np.random.random( - (2, 3, 4, 5)).astype(dtype) + 1J * np.random.random( - (2, 3, 4, 5)).astype(dtype) + x = np.random.random((2, 3, 4, 5)).astype( + dtype + ) + 1j * np.random.random((2, 3, 4, 5)).astype(dtype) y = np.random.random((2, 3, 5, 4)).astype(dtype) np_result = np.matmul(x, y) @@ -90,9 +93,9 @@ class TestComplexMatMulLayer(unittest.TestCase): def test_real_x_complex_y(self): for dtype in self._dtypes: x = np.random.random((2, 3, 4, 5)).astype(dtype) - y = np.random.random( - (2, 3, 5, 4)).astype(dtype) + 1J * np.random.random( - (2, 3, 5, 4)).astype(dtype) + y = np.random.random((2, 3, 5, 4)).astype( + dtype + ) + 1j * np.random.random((2, 3, 5, 4)).astype(dtype) np_result = np.matmul(x, y) @@ -103,11 +106,12 @@ class TestComplexMatMulLayer(unittest.TestCase): # for coverage def test_complex_xy_gemv(self): for dtype in self._dtypes: - x = np.random.random( - (2, 1, 100)).astype(dtype) + 1J * np.random.random( - (2, 1, 100)).astype(dtype) - y = np.random.random((100)).astype(dtype) + 1J * np.random.random( - (100)).astype(dtype) + x = np.random.random((2, 1, 100)).astype( + dtype + ) + 1j * np.random.random((2, 1, 100)).astype(dtype) + y = np.random.random((100)).astype(dtype) + 1j * np.random.random( + (100) + ).astype(dtype) np_result = np.matmul(x, y) @@ -117,12 +121,12 @@ class TestComplexMatMulLayer(unittest.TestCase): # for coverage def test_complex_xy_gemm(self): for dtype in self._dtypes: - x = np.random.random( - (1, 2, 50)).astype(dtype) + 1J * np.random.random( - (1, 2, 50)).astype(dtype) - y = np.random.random( - (1, 50, 2)).astype(dtype) + 1J * np.random.random( - (1, 50, 2)).astype(dtype) + x = np.random.random((1, 2, 50)).astype( + dtype + ) + 1j * np.random.random((1, 2, 50)).astype(dtype) + y = np.random.random((1, 50, 2)).astype( + dtype + ) + 1j * np.random.random((1, 50, 2)).astype(dtype) np_result = np.matmul(x, y) diff --git a/python/paddle/fluid/tests/unittests/test_complex_op.py b/python/paddle/fluid/tests/unittests/test_complex_op.py index 498e8b43a9e2fc6fc90430d7cafd34bc79cde539..01c6b55921bd21ea521d174e7b101b2f7c8fa615 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_op.py +++ b/python/paddle/fluid/tests/unittests/test_complex_op.py @@ -48,7 +48,6 @@ def ref_complex_grad(x, y, dout): class TestComplexOp(OpTest): - def init_spec(self): self.x_shape = [10, 10] self.y_shape = [10, 10] @@ -61,8 +60,9 @@ class TestComplexOp(OpTest): x = np.random.randn(*self.x_shape).astype(self.dtype) y = np.random.randn(*self.y_shape).astype(self.dtype) out_ref = ref_complex(x, y) - self.out_grad = np.random.randn(*self.x_shape).astype(self.dtype) \ - + 1j * np.random.randn(*self.y_shape).astype(self.dtype) + self.out_grad = np.random.randn(*self.x_shape).astype( + self.dtype + ) + 1j * np.random.randn(*self.y_shape).astype(self.dtype) self.inputs = {'X': x, 'Y': y} self.outputs = {'Out': out_ref} @@ -71,41 +71,49 @@ class TestComplexOp(OpTest): def test_check_grad(self): dout = self.out_grad - dx, dy = ref_complex_grad(self.inputs['X'], self.inputs['Y'], - self.out_grad) - self.check_grad(['X', 'Y'], - 'Out', - user_defined_grads=[dx, dy], - user_defined_grad_outputs=[dout], - check_eager=True) + dx, dy = ref_complex_grad( + self.inputs['X'], self.inputs['Y'], self.out_grad + ) + self.check_grad( + ['X', 'Y'], + 'Out', + user_defined_grads=[dx, dy], + user_defined_grad_outputs=[dout], + check_eager=True, + ) def test_check_grad_ignore_x(self): dout = self.out_grad - dx, dy = ref_complex_grad(self.inputs['X'], self.inputs['Y'], - self.out_grad) + dx, dy = ref_complex_grad( + self.inputs['X'], self.inputs['Y'], self.out_grad + ) self.assertTupleEqual(dx.shape, tuple(self.x_shape)) self.assertTupleEqual(dy.shape, tuple(self.y_shape)) - self.check_grad(['Y'], - 'Out', - no_grad_set=set('X'), - user_defined_grads=[dy], - user_defined_grad_outputs=[dout], - check_eager=True) + self.check_grad( + ['Y'], + 'Out', + no_grad_set=set('X'), + user_defined_grads=[dy], + user_defined_grad_outputs=[dout], + check_eager=True, + ) def test_check_grad_ignore_y(self): dout = self.out_grad - dx, dy = ref_complex_grad(self.inputs['X'], self.inputs['Y'], - self.out_grad) - self.check_grad(['X'], - 'Out', - no_grad_set=set('Y'), - user_defined_grads=[dx], - user_defined_grad_outputs=[dout], - check_eager=True) + dx, dy = ref_complex_grad( + self.inputs['X'], self.inputs['Y'], self.out_grad + ) + self.check_grad( + ['X'], + 'Out', + no_grad_set=set('Y'), + user_defined_grads=[dx], + user_defined_grad_outputs=[dout], + check_eager=True, + ) class TestComplexOpBroadcast1(TestComplexOp): - def init_spec(self): self.x_shape = [10, 3, 1, 4] self.y_shape = [100, 1] @@ -113,7 +121,6 @@ class TestComplexOpBroadcast1(TestComplexOp): class TestComplexOpBroadcast2(TestComplexOp): - def init_spec(self): self.x_shape = [100, 1] self.y_shape = [10, 3, 1, 4] @@ -121,7 +128,6 @@ class TestComplexOpBroadcast2(TestComplexOp): class TestComplexOpBroadcast3(TestComplexOp): - def init_spec(self): self.x_shape = [1, 100] self.y_shape = [100] @@ -129,7 +135,6 @@ class TestComplexOpBroadcast3(TestComplexOp): class TestComplexAPI(unittest.TestCase): - def setUp(self): self.x = np.random.randn(10, 10) self.y = np.random.randn(10, 10) @@ -151,12 +156,9 @@ class TestComplexAPI(unittest.TestCase): exe = static.Executor() exe.run(sp) - [out_np] = exe.run(mp, - feed={ - "x": self.x, - "y": self.y - }, - fetch_list=[out]) + [out_np] = exe.run( + mp, feed={"x": self.x, "y": self.y}, fetch_list=[out] + ) np.testing.assert_allclose(self.out, out_np, rtol=1e-05) def test_eager(self): diff --git a/python/paddle/fluid/tests/unittests/test_complex_reshape.py b/python/paddle/fluid/tests/unittests/test_complex_reshape.py index 29bdb5d7c89ace3aa6027fca04387735bfbac996..0e8398cf816eab5783ba6fefaae9eb331decdb98 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_reshape.py +++ b/python/paddle/fluid/tests/unittests/test_complex_reshape.py @@ -21,7 +21,6 @@ from paddle.fluid.framework import _test_eager_guard class TestComplexReshape(unittest.TestCase): - def setUp(self): self._dtypes = ["float32", "float64"] self._places = [paddle.CPUPlace()] @@ -30,24 +29,24 @@ class TestComplexReshape(unittest.TestCase): def test_shape_norm_dims(self): for dtype in self._dtypes: - x_np = np.random.randn( - 2, 3, - 4).astype(dtype) + 1j * np.random.randn(2, 3, 4).astype(dtype) + x_np = np.random.randn(2, 3, 4).astype( + dtype + ) + 1j * np.random.randn(2, 3, 4).astype(dtype) shape = (2, -1) for place in self._places: with dg.guard(place): x_var = dg.to_variable(x_np) y_var = paddle.reshape(x_var, shape) y_np = y_var.numpy() - np.testing.assert_allclose(np.reshape(x_np, shape), - y_np, - rtol=1e-05) + np.testing.assert_allclose( + np.reshape(x_np, shape), y_np, rtol=1e-05 + ) def test_shape_omit_dims(self): for dtype in self._dtypes: - x_np = np.random.randn( - 2, 3, - 4).astype(dtype) + 1j * np.random.randn(2, 3, 4).astype(dtype) + x_np = np.random.randn(2, 3, 4).astype( + dtype + ) + 1j * np.random.randn(2, 3, 4).astype(dtype) shape = (0, -1) shape_ = (2, 12) for place in self._places: @@ -55,9 +54,9 @@ class TestComplexReshape(unittest.TestCase): x_var = dg.to_variable(x_np) y_var = paddle.reshape(x_var, shape) y_np = y_var.numpy() - np.testing.assert_allclose(np.reshape(x_np, shape_), - y_np, - rtol=1e-05) + np.testing.assert_allclose( + np.reshape(x_np, shape_), y_np, rtol=1e-05 + ) def test_eager(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_complex_simplenet.py b/python/paddle/fluid/tests/unittests/test_complex_simplenet.py index ead80c158905de63fadde2ec783a1253d2118b8f..125b21888985ab2318a73f590fd1049063e7d6a6 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_simplenet.py +++ b/python/paddle/fluid/tests/unittests/test_complex_simplenet.py @@ -22,19 +22,20 @@ from paddle.fluid.framework import _test_eager_guard class Optimization_ex1(paddle.nn.Layer): - - def __init__(self, - shape, - param_attr=paddle.nn.initializer.Uniform(low=-5., high=5.), - dtype='float32'): + def __init__( + self, + shape, + param_attr=paddle.nn.initializer.Uniform(low=-5.0, high=5.0), + dtype='float32', + ): super(Optimization_ex1, self).__init__() - self.theta = self.create_parameter(shape=shape, - attr=param_attr, - dtype=dtype, - is_bias=False) + self.theta = self.create_parameter( + shape=shape, attr=param_attr, dtype=dtype, is_bias=False + ) self.A = paddle.to_tensor( - np.random.randn(4, 4) + np.random.randn(4, 4) * 1j) + np.random.randn(4, 4) + np.random.randn(4, 4) * 1j + ) def forward(self): loss = paddle.add(self.theta, self.A) @@ -42,7 +43,6 @@ class Optimization_ex1(paddle.nn.Layer): class TestComplexSimpleNet(unittest.TestCase): - def setUp(self): self.devices = ['cpu'] if core.is_compiled_with_cuda(): @@ -55,8 +55,9 @@ class TestComplexSimpleNet(unittest.TestCase): paddle.set_device(device) myLayer = Optimization_ex1(self.theta_size) - optimizer = paddle.optimizer.Adam(learning_rate=self.learning_rate, - parameters=myLayer.parameters()) + optimizer = paddle.optimizer.Adam( + learning_rate=self.learning_rate, parameters=myLayer.parameters() + ) for itr in range(self.iter): loss = myLayer() diff --git a/python/paddle/fluid/tests/unittests/test_complex_sum_layer.py b/python/paddle/fluid/tests/unittests/test_complex_sum_layer.py index 8dc3b433cd55abb880ba3dd67dbf0983928291b7..7de44b33b0fce1194db33d31ba769ddbf56aaca2 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_sum_layer.py +++ b/python/paddle/fluid/tests/unittests/test_complex_sum_layer.py @@ -23,7 +23,6 @@ from paddle.fluid.framework import _test_eager_guard class TestComplexSumLayer(unittest.TestCase): - def setUp(self): self._dtypes = ["float32", "float64"] self._places = [paddle.CPUPlace()] @@ -32,9 +31,9 @@ class TestComplexSumLayer(unittest.TestCase): def test_complex_basic_api(self): for dtype in self._dtypes: - input = rand([ - 2, 10, 10 - ]).astype(dtype) + 1j * rand([2, 10, 10]).astype(dtype) + input = rand([2, 10, 10]).astype(dtype) + 1j * rand( + [2, 10, 10] + ).astype(dtype) for place in self._places: with dg.guard(place): var_x = dg.to_variable(input) diff --git a/python/paddle/fluid/tests/unittests/test_complex_trace_layer.py b/python/paddle/fluid/tests/unittests/test_complex_trace_layer.py index c0400d6fc5d1f490cee893a328bd16d457a0638d..bb8bc7f445c19f68fe89fc8105828f9bc7e6e3b8 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_trace_layer.py +++ b/python/paddle/fluid/tests/unittests/test_complex_trace_layer.py @@ -22,7 +22,6 @@ from paddle.fluid.framework import _test_eager_guard class TestComplexTraceLayer(unittest.TestCase): - def setUp(self): self._dtypes = ["float32", "float64"] self._places = [fluid.CPUPlace()] @@ -31,14 +30,15 @@ class TestComplexTraceLayer(unittest.TestCase): def test_basic_api(self): for dtype in self._dtypes: - input = rand([ - 2, 20, 2, 3 - ]).astype(dtype) + 1j * rand([2, 20, 2, 3]).astype(dtype) + input = rand([2, 20, 2, 3]).astype(dtype) + 1j * rand( + [2, 20, 2, 3] + ).astype(dtype) for place in self._places: with dg.guard(place): var_x = dg.to_variable(input) - result = tensor.trace(var_x, offset=1, axis1=0, - axis2=2).numpy() + result = tensor.trace( + var_x, offset=1, axis1=0, axis2=2 + ).numpy() target = np.trace(input, offset=1, axis1=0, axis2=2) np.testing.assert_allclose(result, target, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_complex_transpose.py b/python/paddle/fluid/tests/unittests/test_complex_transpose.py index cfb8040674fa35d2912c7441022a006f2f6c426d..b08f7e1be0716d6ff409db90dea87c85f71da0b3 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_transpose.py +++ b/python/paddle/fluid/tests/unittests/test_complex_transpose.py @@ -21,7 +21,6 @@ from paddle.fluid.framework import _test_eager_guard class TestComplexTransposeLayer(unittest.TestCase): - def setUp(self): self._dtypes = ["float32", "float64"] self._places = [paddle.CPUPlace()] @@ -30,9 +29,9 @@ class TestComplexTransposeLayer(unittest.TestCase): def test_transpose_by_complex_api(self): for dtype in self._dtypes: - data = np.random.random( - (2, 3, 4, 5)).astype(dtype) + 1J * np.random.random( - (2, 3, 4, 5)).astype(dtype) + data = np.random.random((2, 3, 4, 5)).astype( + dtype + ) + 1j * np.random.random((2, 3, 4, 5)).astype(dtype) perm = [3, 2, 0, 1] np_trans = np.transpose(data, perm) for place in self._places: diff --git a/python/paddle/fluid/tests/unittests/test_complex_variable.py b/python/paddle/fluid/tests/unittests/test_complex_variable.py index c7173a7b814b97adcb8e7060a2c074af0a1ef1d1..b3afc1d9df9df260eaab478f168794473419be60 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_variable.py +++ b/python/paddle/fluid/tests/unittests/test_complex_variable.py @@ -23,10 +23,10 @@ from paddle.fluid.framework import _test_eager_guard class TestComplexVariable(unittest.TestCase): - def compare(self): - a = np.array([[1.0 + 1.0j, 2.0 + 1.0j], - [3.0 + 1.0j, 4.0 + 1.0j]]).astype(self._dtype) + a = np.array( + [[1.0 + 1.0j, 2.0 + 1.0j], [3.0 + 1.0j, 4.0 + 1.0j]] + ).astype(self._dtype) b = np.array([[1.0 + 1.0j, 1.0 + 1.0j]]).astype(self._dtype) with dg.guard(): @@ -46,16 +46,22 @@ class TestComplexVariable(unittest.TestCase): self.compare() def test_convert_np_dtype_to_dtype(self): - self.assertEqual(convert_np_dtype_to_dtype_(np.complex64), - core.VarDesc.VarType.COMPLEX64) - self.assertEqual(convert_np_dtype_to_dtype_(np.complex64), - core.VarDesc.VarType.COMPLEX64) + self.assertEqual( + convert_np_dtype_to_dtype_(np.complex64), + core.VarDesc.VarType.COMPLEX64, + ) + self.assertEqual( + convert_np_dtype_to_dtype_(np.complex64), + core.VarDesc.VarType.COMPLEX64, + ) def test_convert_dtype(self): - self.assertEqual(convert_dtype(core.VarDesc.VarType.COMPLEX64), - "complex64") - self.assertEqual(convert_dtype(core.VarDesc.VarType.COMPLEX128), - "complex128") + self.assertEqual( + convert_dtype(core.VarDesc.VarType.COMPLEX64), "complex64" + ) + self.assertEqual( + convert_dtype(core.VarDesc.VarType.COMPLEX128), "complex128" + ) def test_eager(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_complex_view_op.py b/python/paddle/fluid/tests/unittests/test_complex_view_op.py index ad283a058396b041583752cd8bebdb8aa67a55d0..451469dfa21010715346611dc7930c3159d4cb22 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_view_op.py +++ b/python/paddle/fluid/tests/unittests/test_complex_view_op.py @@ -34,14 +34,14 @@ def ref_view_as_real(x): class TestViewAsComplexOp(OpTest): - def setUp(self): self.op_type = "as_complex" self.python_api = paddle.as_complex x = np.random.randn(10, 10, 2).astype("float64") out_ref = ref_view_as_complex(x) - self.out_grad = np.ones( - [10, 10], dtype="float64") + 1j * np.ones([10, 10], dtype="float64") + self.out_grad = np.ones([10, 10], dtype="float64") + 1j * np.ones( + [10, 10], dtype="float64" + ) self.inputs = {'X': x} self.outputs = {'Out': out_ref} @@ -49,15 +49,16 @@ class TestViewAsComplexOp(OpTest): self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X'], - 'Out', - user_defined_grads=[ref_view_as_real(self.out_grad)], - user_defined_grad_outputs=[self.out_grad], - check_eager=True) + self.check_grad( + ['X'], + 'Out', + user_defined_grads=[ref_view_as_real(self.out_grad)], + user_defined_grad_outputs=[self.out_grad], + check_eager=True, + ) class TestViewAsRealOp(OpTest): - def setUp(self): self.op_type = "as_real" real = np.random.randn(10, 10).astype("float64") @@ -73,15 +74,16 @@ class TestViewAsRealOp(OpTest): self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X'], - 'Out', - user_defined_grads=[ref_view_as_complex(self.out_grad)], - user_defined_grad_outputs=[self.out_grad], - check_eager=True) + self.check_grad( + ['X'], + 'Out', + user_defined_grads=[ref_view_as_complex(self.out_grad)], + user_defined_grad_outputs=[self.out_grad], + check_eager=True, + ) class TestViewAsComplexAPI(unittest.TestCase): - def setUp(self): self.x = np.random.randn(10, 10, 2) self.out = ref_view_as_complex(self.x) @@ -109,7 +111,6 @@ class TestViewAsComplexAPI(unittest.TestCase): class TestViewAsRealAPI(unittest.TestCase): - def setUp(self): self.x = np.random.randn(10, 10) + 1j * np.random.randn(10, 10) self.out = ref_view_as_real(self.x) diff --git a/python/paddle/fluid/tests/unittests/test_concat_op.py b/python/paddle/fluid/tests/unittests/test_concat_op.py index 76397a2083bc52f065f36afacdda1fc13ab2caab..624f2be4a9f8ec6e2903273796f5e2bf142b4e81 100644 --- a/python/paddle/fluid/tests/unittests/test_concat_op.py +++ b/python/paddle/fluid/tests/unittests/test_concat_op.py @@ -14,7 +14,11 @@ import unittest import numpy as np -from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16 +from paddle.fluid.tests.unittests.op_test import ( + OpTest, + skip_check_grad_ci, + convert_float_to_uint16, +) import paddle.fluid as fluid from paddle.fluid import Program, core, program_guard from paddle.fluid.framework import _test_eager_guard @@ -25,7 +29,6 @@ import paddle.fluid.layers as layers class TestConcatOp(OpTest): - def setUp(self): self.op_type = "concat" self.python_api = paddle.concat @@ -40,8 +43,9 @@ class TestConcatOp(OpTest): self.actual_axis = self.axis self.outputs = { - 'Out': - np.concatenate((self.x0, self.x1, self.x2), axis=self.actual_axis) + 'Out': np.concatenate( + (self.x0, self.x1, self.x2), axis=self.actual_axis + ) } def get_dtype(self): @@ -81,7 +85,6 @@ class TestConcatOp(OpTest): class TestConcatOp2(TestConcatOp): - def init_test_data(self): self.x0 = np.random.random((2, 3, 4, 5)).astype(self.dtype) self.x1 = np.random.random((2, 3, 4, 5)).astype(self.dtype) @@ -90,9 +93,9 @@ class TestConcatOp2(TestConcatOp): @skip_check_grad_ci( - reason="The function 'check_grad' for large inputs is too slow.") + reason="The function 'check_grad' for large inputs is too slow." +) class TestConcatOp3(TestConcatOp): - def init_test_data(self): self.x0 = np.random.random((1, 256, 170, 256)).astype(self.dtype) self.x1 = np.random.random((1, 128, 170, 256)).astype(self.dtype) @@ -104,11 +107,9 @@ class TestConcatOp3(TestConcatOp): @skip_check_grad_ci( - reason= - "This test will meet fetch error when there is a null grad. The detailed information is in PR#17015." + reason="This test will meet fetch error when there is a null grad. The detailed information is in PR#17015." ) class TestConcatOp4(TestConcatOp): - def init_test_data(self): self.x0 = np.random.random((2, 3, 4, 5)).astype(self.dtype) self.x1 = np.random.random((2, 3, 4, 5)).astype(self.dtype) @@ -120,7 +121,6 @@ class TestConcatOp4(TestConcatOp): class TestConcatOp5(TestConcatOp): - def init_test_data(self): self.x0 = np.random.random((5, 1, 4, 5)).astype(self.dtype) self.x1 = np.random.random((5, 2, 4, 5)).astype(self.dtype) @@ -129,7 +129,6 @@ class TestConcatOp5(TestConcatOp): class TestConcatOp6(TestConcatOp): - def setUp(self): self.op_type = "concat" self.dtype = self.get_dtype() @@ -138,8 +137,11 @@ class TestConcatOp6(TestConcatOp): self.lod = [[20, 80]] self.out_lod = [[20, 80, 20, 80, 20, 80]] self.inputs = { - 'X': [('x0', (self.x0, self.lod)), ('x1', (self.x1, self.lod)), - ('x2', (self.x2, self.lod))] + 'X': [ + ('x0', (self.x0, self.lod)), + ('x1', (self.x1, self.lod)), + ('x2', (self.x2, self.lod)), + ] } self.attrs = {'axis': self.axis} if self.axis < 0: @@ -166,9 +168,7 @@ class TestConcatOp6(TestConcatOp): def create_test_AxisTensor(parent): - class TestConcatAxisTensor(parent): - def setUp(self): self.op_type = "concat" self.python_api = paddle.concat @@ -177,20 +177,22 @@ def create_test_AxisTensor(parent): self.inputs = { 'X': [('x0', self.x0), ('x1', self.x1), ('x2', self.x2)], - 'AxisTensor': np.array([self.axis]).astype("int32") + 'AxisTensor': np.array([self.axis]).astype("int32"), } self.attrs = {} if self.axis < 0: self.actual_axis = self.axis + len(self.x0.shape) - self.actual_axis = self.actual_axis if self.actual_axis > 0 else 0 + self.actual_axis = ( + self.actual_axis if self.actual_axis > 0 else 0 + ) else: self.actual_axis = self.axis self.outputs = { - 'Out': - np.concatenate((self.x0, self.x1, self.x2), - axis=self.actual_axis) + 'Out': np.concatenate( + (self.x0, self.x1, self.x2), axis=self.actual_axis + ) } cls_name = "{0}_{1}".format(parent.__name__, "AxisTensor") @@ -205,13 +207,11 @@ create_test_AxisTensor(TestConcatOp4) create_test_AxisTensor(TestConcatOp5) create_test_AxisTensor(TestConcatOp6) -#----------------Concat Fp16---------------- +# ----------------Concat Fp16---------------- def create_test_fp16(parent): - class TestConcatFp16(parent): - def get_dtype(self): return np.float16 @@ -228,13 +228,12 @@ create_test_fp16(TestConcatOp5) create_test_fp16(TestConcatOp6) -#----------------Concat Bf16---------------- +# ----------------Concat Bf16---------------- def create_test_bf16(parent): - - @unittest.skipIf(not paddle.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not paddle.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) class TestConcatBf16(parent): - def get_dtype(self): return np.uint16 @@ -247,17 +246,18 @@ create_test_bf16(TestConcatOp) class TestConcatOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # The input type of concat_op should be list. x1 = fluid.layers.data(shape=[4], dtype='int32', name='x1') fluid.layers.concat(x1) # The item in input must be Variable. - x2 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - fluid.CPUPlace()) - x3 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - fluid.CPUPlace()) + x2 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace() + ) + x3 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace() + ) self.assertRaises(TypeError, fluid.layers.concat, [x2]) # The input dtype of concat_op must be float16, float32, float64, int32, int64. x4 = fluid.layers.data(shape=[4], dtype='uint8', name='x4') @@ -281,7 +281,6 @@ class TestConcatOpError(unittest.TestCase): class TestConcatAPI(unittest.TestCase): - def test_fluid_api(self): paddle.enable_static() x_1 = fluid.data(shape=[None, 1, 4, 5], dtype='int32', name='x_1') @@ -298,22 +297,20 @@ class TestConcatAPI(unittest.TestCase): out_3 = fluid.layers.concat(input=[x_2, x_3], axis=positive_1_int64) exe = fluid.Executor(place=fluid.CPUPlace()) - [res_1, res_2, res_3] = exe.run(fluid.default_main_program(), - feed={ - "x_1": input_2, - "x_2": input_2, - "x_3": input_3 - }, - fetch_list=[out_1, out_2, out_3]) + [res_1, res_2, res_3] = exe.run( + fluid.default_main_program(), + feed={"x_1": input_2, "x_2": input_2, "x_3": input_3}, + fetch_list=[out_1, out_2, out_3], + ) assert np.array_equal(res_1, np.concatenate((input_2, input_3), axis=1)) assert np.array_equal(res_2, np.concatenate((input_2, input_3), axis=1)) assert np.array_equal(res_3, np.concatenate((input_2, input_3), axis=1)) def test_api(self): paddle.enable_static() - x_1 = paddle.fluid.data(shape=[None, 1, 4, 5], - dtype='int32', - name='x_1') + x_1 = paddle.fluid.data( + shape=[None, 1, 4, 5], dtype='int32', name='x_1' + ) paddle.concat([x_1, x_1], 0) input_2 = np.random.random([2, 1, 4, 5]).astype("int32") @@ -329,14 +326,11 @@ class TestConcatAPI(unittest.TestCase): out_4 = paddle.concat(x=[x_2, x_3], axis=negative_int64) exe = paddle.static.Executor(place=paddle.CPUPlace()) - [res_1, res_2, res_3, - res_4] = exe.run(paddle.static.default_main_program(), - feed={ - "x_1": input_2, - "x_2": input_2, - "x_3": input_3 - }, - fetch_list=[out_1, out_2, out_3, out_4]) + [res_1, res_2, res_3, res_4] = exe.run( + paddle.static.default_main_program(), + feed={"x_1": input_2, "x_2": input_2, "x_3": input_3}, + fetch_list=[out_1, out_2, out_3, out_4], + ) assert np.array_equal(res_1, np.concatenate((input_2, input_3), axis=1)) assert np.array_equal(res_2, np.concatenate((input_2, input_3), axis=1)) assert np.array_equal(res_3, np.concatenate((input_2, input_3), axis=1)) @@ -367,10 +361,12 @@ class TestConcatAPI(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): # The item in input must be Variable. - x2 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - fluid.CPUPlace()) - x3 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - fluid.CPUPlace()) + x2 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace() + ) + x3 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace() + ) self.assertRaises(TypeError, paddle.concat, [x2]) # The input dtype of concat_op must be float16, float32, float64, int32, int64. x4 = paddle.fluid.data(shape=[4], dtype='uint8', name='x4') @@ -404,8 +400,11 @@ class TestConcatAPIWithLoDTensorArray(unittest.TestCase): self.iter_num = 3 self.input_shape = [2, 3] self.x = np.random.random(self.input_shape).astype("float32") - self.place = fluid.CUDAPlace(0) \ - if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + self.place = ( + fluid.CUDAPlace(0) + if fluid.is_compiled_with_cuda() + else fluid.CPUPlace() + ) def set_program(self, use_fluid_api): paddle.enable_static() @@ -414,9 +413,9 @@ class TestConcatAPIWithLoDTensorArray(unittest.TestCase): with fluid.program_guard(self.program): input = fluid.layers.assign(self.x) tensor_array = fluid.layers.create_array(dtype='float32') - zero = fluid.layers.fill_constant(shape=[1], - value=0, - dtype="int64") + zero = fluid.layers.fill_constant( + shape=[1], value=0, dtype="int64" + ) for i in range(self.iter_num): fluid.layers.array_write(input, zero + i, tensor_array) @@ -449,11 +448,11 @@ class TestConcatAPIWithLoDTensorArray(unittest.TestCase): exe = fluid.Executor(self.place) res = exe.run(self.program, fetch_list=self.out_var) np.testing.assert_array_equal( - res[0], np.concatenate([self.x] * self.iter_num, axis=self.axis)) + res[0], np.concatenate([self.x] * self.iter_num, axis=self.axis) + ) class TestConcatDoubleGradCheck(unittest.TestCase): - def concat_wrapper(self, x): return paddle.concat(x) @@ -470,17 +469,21 @@ class TestConcatDoubleGradCheck(unittest.TestCase): out = paddle.concat([data1, data2]) data1_arr = np.random.uniform(-1, 1, data1.shape).astype(dtype) data2_arr = np.random.uniform(-1, 1, data2.shape).astype(dtype) - gradient_checker.double_grad_check([data1, data2], - out, - x_init=[data1_arr, data2_arr], - place=place, - eps=eps) + gradient_checker.double_grad_check( + [data1, data2], + out, + x_init=[data1_arr, data2_arr], + place=place, + eps=eps, + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) gradient_checker.double_grad_check_for_dygraph( - self.concat_wrapper, [data1, data2], + self.concat_wrapper, + [data1, data2], out, x_init=[data1_arr, data2_arr], - place=place) + place=place, + ) def test_grad(self): paddle.enable_static() @@ -492,7 +495,6 @@ class TestConcatDoubleGradCheck(unittest.TestCase): class TestConcatTripleGradCheck(unittest.TestCase): - def concat_wrapper(self, x): return paddle.concat(x, 1) @@ -509,17 +511,21 @@ class TestConcatTripleGradCheck(unittest.TestCase): out = paddle.concat([data1, data2], 1) data1_arr = np.random.uniform(-1, 1, data1.shape).astype(dtype) data2_arr = np.random.uniform(-1, 1, data2.shape).astype(dtype) - gradient_checker.double_grad_check([data1, data2], - out, - x_init=[data1_arr, data2_arr], - place=place, - eps=eps) + gradient_checker.double_grad_check( + [data1, data2], + out, + x_init=[data1_arr, data2_arr], + place=place, + eps=eps, + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) gradient_checker.double_grad_check_for_dygraph( - self.concat_wrapper, [data1, data2], + self.concat_wrapper, + [data1, data2], out, x_init=[data1_arr, data2_arr], - place=place) + place=place, + ) def test_grad(self): paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_cond.py b/python/paddle/fluid/tests/unittests/test_cond.py index f0c5516eef46ae50ec86b495284f6ec9603eb562..3d05d7694fa217f39ef1ffcfd32a4da7d264911c 100644 --- a/python/paddle/fluid/tests/unittests/test_cond.py +++ b/python/paddle/fluid/tests/unittests/test_cond.py @@ -29,7 +29,6 @@ np.random.seed(123) class TestCondInputOutput(unittest.TestCase): - def test_return_single_var(self): """ pseudocode: @@ -57,13 +56,16 @@ class TestCondInputOutput(unittest.TestCase): out = layers.cond(pred, true_func, false_func) # out is one tensor - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) - ret, = exe.run(main_program, fetch_list=[out.name]) - np.testing.assert_allclose(np.asarray(ret), - np.full((3, 2), -1, np.int32), - rtol=1e-05) + (ret,) = exe.run(main_program, fetch_list=[out.name]) + np.testing.assert_allclose( + np.asarray(ret), np.full((3, 2), -1, np.int32), rtol=1e-05 + ) def test_return_var_tuple(self): """ @@ -78,18 +80,14 @@ class TestCondInputOutput(unittest.TestCase): paddle.enable_static() def true_func(): - return layers.fill_constant(shape=[1, 2], dtype='int32', - value=1), layers.fill_constant( - shape=[2, 3], - dtype='bool', - value=True) + return layers.fill_constant( + shape=[1, 2], dtype='int32', value=1 + ), layers.fill_constant(shape=[2, 3], dtype='bool', value=True) def false_func(): - return layers.fill_constant(shape=[3, 4], dtype='float32', - value=3), layers.fill_constant( - shape=[4, 5], - dtype='int64', - value=2) + return layers.fill_constant( + shape=[3, 4], dtype='float32', value=3 + ), layers.fill_constant(shape=[4, 5], dtype='int64', value=2) main_program = Program() startup_program = Program() @@ -98,16 +96,19 @@ class TestCondInputOutput(unittest.TestCase): out = layers.cond(pred, true_func, false_func) # out is a tuple containing 2 tensors - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) ret = exe.run(main_program, fetch_list=out) - np.testing.assert_allclose(np.asarray(ret[0]), - np.full((1, 2), 1, np.int32), - rtol=1e-05) - np.testing.assert_allclose(np.asarray(ret[1]), - np.full((2, 3), True, bool), - rtol=1e-05) + np.testing.assert_allclose( + np.asarray(ret[0]), np.full((1, 2), 1, np.int32), rtol=1e-05 + ) + np.testing.assert_allclose( + np.asarray(ret[1]), np.full((2, 3), True, bool), rtol=1e-05 + ) def test_pass_and_modify_var(self): """ @@ -135,20 +136,28 @@ class TestCondInputOutput(unittest.TestCase): with program_guard(main_program, startup_program): a = layers.fill_constant(shape=[3, 2, 1], dtype='int32', value=7) i = fluid.data(name="i", shape=[1], dtype='int32') - pred = ((i % 2) == 0) - a = layers.cond(pred, lambda: true_func(a, i), - lambda: false_func(a, i)) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + pred = (i % 2) == 0 + a = layers.cond( + pred, lambda: true_func(a, i), lambda: false_func(a, i) + ) + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) for feed_i in range(5): expected_a = 7 * (feed_i + 1) if feed_i % 2 == 0 else 8 - feed_i - ret, = exe.run(main_program, - feed={'i': np.full((1), feed_i, np.int32)}, - fetch_list=[a]) - np.testing.assert_allclose(np.asarray(ret), - np.full((3, 2, 1), expected_a, np.int32), - rtol=1e-05) + (ret,) = exe.run( + main_program, + feed={'i': np.full((1), feed_i, np.int32)}, + fetch_list=[a], + ) + np.testing.assert_allclose( + np.asarray(ret), + np.full((3, 2, 1), expected_a, np.int32), + rtol=1e-05, + ) def test_return_none(self): """ @@ -172,12 +181,15 @@ class TestCondInputOutput(unittest.TestCase): startup_program = Program() with program_guard(main_program, startup_program): i = fluid.data(name="i", shape=[1], dtype='int32') - pred = ((i % 2) == 0) + pred = (i % 2) == 0 out1 = layers.cond(pred, true_func, false_func) out2 = layers.cond(pred, None, false_func) out3 = layers.cond(pred, true_func, None) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) for feed_i in range(5): # Test that output is None is runnable @@ -200,17 +212,15 @@ class TestCondInputOutput(unittest.TestCase): return layers.fill_constant(shape=[2, 7], dtype='int32', value=3) def func_return_two_tensors(): - return layers.fill_constant(shape=[3, 1], dtype='int32', - value=7), layers.fill_constant( - shape=[3, 1], - dtype='int32', - value=8) + return layers.fill_constant( + shape=[3, 1], dtype='int32', value=7 + ), layers.fill_constant(shape=[3, 1], dtype='int32', value=8) main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): i = fluid.data(name="i", shape=[1], dtype='int32') - pred = ((i % 2) == 0) + pred = (i % 2) == 0 with self.assertRaises(TypeError): out = layers.cond(pred, i, func_return_one_tensor) @@ -218,47 +228,57 @@ class TestCondInputOutput(unittest.TestCase): out = layers.cond(pred, func_return_one_tensor, np.asarray([3])) with self.assertRaises(Exception) as e: - out = layers.cond(pred, func_return_none, - func_return_one_tensor) + out = layers.cond( + pred, func_return_none, func_return_one_tensor + ) self.assertTrue( - "Incompatible return values of true_fn and false_fn in cond" in - str(e.exception)) + "Incompatible return values of true_fn and false_fn in cond" + in str(e.exception) + ) with self.assertRaises(Exception) as e: - out = layers.cond(pred, func_return_two_tensors, - func_return_none) + out = layers.cond( + pred, func_return_two_tensors, func_return_none + ) self.assertTrue( - "Incompatible return values of true_fn and false_fn in cond" in - str(e.exception)) + "Incompatible return values of true_fn and false_fn in cond" + in str(e.exception) + ) with self.assertRaises(Exception) as e: - out = layers.cond(pred, func_return_one_tensor, - func_return_two_tensors) + out = layers.cond( + pred, func_return_one_tensor, func_return_two_tensors + ) self.assertTrue( "true fn returns 1 vars, but false fn returns 2 vars, which is not equals" - in str(e.exception)) + in str(e.exception) + ) def test_extremely_simple_net_with_op_in_condition(self): paddle.enable_static() main_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(main_program, startup_program): - a = fluid.layers.fill_constant(shape=[1], - dtype='float32', - value=1.23) + a = fluid.layers.fill_constant( + shape=[1], dtype='float32', value=1.23 + ) a.stop_gradient = False - b = fluid.layers.fill_constant(shape=[1], - dtype='float32', - value=1.25) + b = fluid.layers.fill_constant( + shape=[1], dtype='float32', value=1.25 + ) b.stop_gradient = False out = layers.cond(a - b < -1.0, lambda: a, lambda: b) append_backward(out) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) - ret = exe.run(main_program, - fetch_list=[out, b, a.grad_name, b.grad_name]) + ret = exe.run( + main_program, fetch_list=[out, b, a.grad_name, b.grad_name] + ) # Note: fill_constant has loss of precision, you have to assertEqual # with values doens't lose precision in float-point number. self.assertEqual(ret[0][0], ret[1][0]) @@ -267,7 +287,6 @@ class TestCondInputOutput(unittest.TestCase): class TestCondNestedControlFlow(unittest.TestCase): - def test_cond_inside_cond(self): """ pseudocode: @@ -288,25 +307,37 @@ class TestCondNestedControlFlow(unittest.TestCase): paddle.enable_static() def less_than_branch(i, a): - return layers.cond(i >= 3.0, lambda: layers.elementwise_add(a, a), - lambda: layers.elementwise_sub(a, a)) + return layers.cond( + i >= 3.0, + lambda: layers.elementwise_add(a, a), + lambda: layers.elementwise_sub(a, a), + ) def greater_equal_branch(i, a): - return layers.cond(i < 8.0, lambda: layers.elementwise_mul(a, a), - lambda: layers.elementwise_div(a, a)) + return layers.cond( + i < 8.0, + lambda: layers.elementwise_mul(a, a), + lambda: layers.elementwise_div(a, a), + ) main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): i = fluid.data(name="i", shape=[1], dtype='float32') a = 2.0 * i - out = layers.cond(i < 5.0, lambda: less_than_branch(i, a), - lambda: greater_equal_branch(i, a)) + out = layers.cond( + i < 5.0, + lambda: less_than_branch(i, a), + lambda: greater_equal_branch(i, a), + ) mean = paddle.mean(out) append_backward(mean) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) for feed_i in range(0, 10): expected_a = 2.0 * feed_i @@ -316,9 +347,11 @@ class TestCondNestedControlFlow(unittest.TestCase): else: expected_ret = expected_a * expected_a if feed_i < 8 else 1.0 expected_a_grad = 2.0 * expected_a if feed_i < 8 else 0.0 - ret = exe.run(main_program, - feed={'i': np.full((1), feed_i, np.float32)}, - fetch_list=[out.name, a.grad_name]) + ret = exe.run( + main_program, + feed={'i': np.full((1), feed_i, np.float32)}, + fetch_list=[out.name, a.grad_name], + ) self.assertEqual(ret[0][0], expected_ret) self.assertEqual(ret[1][0], expected_a_grad) @@ -328,24 +361,34 @@ class TestCondNestedControlFlow(unittest.TestCase): startup_program = fluid.Program() with fluid.program_guard(main_program, startup_program): - a = fluid.layers.fill_constant(shape=[1], - dtype='float32', - value=1.23) + a = fluid.layers.fill_constant( + shape=[1], dtype='float32', value=1.23 + ) a.stop_gradient = False - b = fluid.layers.fill_constant(shape=[1], - dtype='float32', - value=1.24) + b = fluid.layers.fill_constant( + shape=[1], dtype='float32', value=1.24 + ) b.stop_gradient = False out = fluid.layers.cond( - a < b, lambda: fluid.layers.cond( - a - b < -1.0, lambda: fluid.layers.elementwise_add(a, b), - lambda: fluid.layers.elementwise_mul(a, b)), lambda: - fluid.layers.cond(a == b, lambda: fluid.layers.elementwise_sub( - a, b), lambda: fluid.layers.elementwise_pow(a, b))) + a < b, + lambda: fluid.layers.cond( + a - b < -1.0, + lambda: fluid.layers.elementwise_add(a, b), + lambda: fluid.layers.elementwise_mul(a, b), + ), + lambda: fluid.layers.cond( + a == b, + lambda: fluid.layers.elementwise_sub(a, b), + lambda: fluid.layers.elementwise_pow(a, b), + ), + ) append_backward(out) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) ret = exe.run(main_program, fetch_list=[out, a.grad_name, b.grad_name]) # Note: fill_constant has loss of precision, so we assertAlmostEqual. @@ -355,7 +398,6 @@ class TestCondNestedControlFlow(unittest.TestCase): class TestCondBackward(unittest.TestCase): - def backward_value_helper(self, cond_func, use_cuda, use_parallel_exe): """ Helper function that compares calculated backward value is close to dy/dx @@ -379,70 +421,76 @@ class TestCondBackward(unittest.TestCase): num_devices = 1 if use_parallel_exe: os.environ['CPU_NUM'] = str(2) - exe = fluid.ParallelExecutor(use_cuda=use_cuda, - main_program=main_program, - loss_name=loss.name) + exe = fluid.ParallelExecutor( + use_cuda=use_cuda, + main_program=main_program, + loss_name=loss.name, + ) num_devices = exe.device_count delta = 0.005 for feed_i in range(0, 10): feed_img = np.random.random(size=[1, 9]).astype(np.float32) - feed_label = np.random.randint(low=0, - high=10, - size=[1, 1], - dtype=np.int64) + feed_label = np.random.randint( + low=0, high=10, size=[1, 1], dtype=np.int64 + ) if use_parallel_exe: img_grad, loss_value = exe.run( feed={ 'i': np.full((num_devices), feed_i, np.int32), 'image': np.repeat(feed_img, num_devices, axis=0), - 'label': np.repeat(feed_label, num_devices, axis=0) + 'label': np.repeat(feed_label, num_devices, axis=0), }, - fetch_list=[img.grad_name, loss.name]) + fetch_list=[img.grad_name, loss.name], + ) else: img_grad, loss_value = exe.run( main_program, feed={ 'i': np.full((1), feed_i, np.int32), 'image': feed_img, - 'label': feed_label + 'label': feed_label, }, - fetch_list=[img.grad_name, loss.name]) + fetch_list=[img.grad_name, loss.name], + ) numerical_grad = np.zeros(shape=[num_devices, 9], dtype=np.float32) feed_img_delta = np.copy(feed_img) for j in range(9): feed_img_delta[0][j] = feed_img[0][j] + delta if use_parallel_exe: - loss_delta = exe.run(feed={ - 'i': - np.full((num_devices), feed_i, np.int32), - 'image': - np.repeat(feed_img_delta, num_devices, axis=0), - 'label': - np.repeat(feed_label, num_devices, axis=0) - }, - fetch_list=[loss.name]) - multi_device_grad = (loss_delta[0] - - loss_value[0]) / delta / num_devices + loss_delta = exe.run( + feed={ + 'i': np.full((num_devices), feed_i, np.int32), + 'image': np.repeat( + feed_img_delta, num_devices, axis=0 + ), + 'label': np.repeat(feed_label, num_devices, axis=0), + }, + fetch_list=[loss.name], + ) + multi_device_grad = ( + (loss_delta[0] - loss_value[0]) / delta / num_devices + ) for d in range(num_devices): numerical_grad[d][j] = multi_device_grad[d] else: - loss_delta = exe.run(main_program, - feed={ - 'i': np.full((1), feed_i, - np.int32), - 'image': feed_img_delta, - 'label': feed_label - }, - fetch_list=[loss.name]) - numerical_grad[0][j] = (loss_delta[0] - - loss_value[0]) / delta + loss_delta = exe.run( + main_program, + feed={ + 'i': np.full((1), feed_i, np.int32), + 'image': feed_img_delta, + 'label': feed_label, + }, + fetch_list=[loss.name], + ) + numerical_grad[0][j] = ( + loss_delta[0] - loss_value[0] + ) / delta feed_img_delta[0][j] = feed_img[0][j] - np.testing.assert_allclose(img_grad, - numerical_grad, - rtol=0.05, - atol=0.05) + np.testing.assert_allclose( + img_grad, numerical_grad, rtol=0.05, atol=0.05 + ) def add_optimizer_helper(self, cond_func, use_cuda, use_parallel_exe): """ @@ -463,43 +511,49 @@ class TestCondBackward(unittest.TestCase): exe.run(startup_program) if use_parallel_exe: os.environ['CPU_NUM'] = str(2) - exe = fluid.ParallelExecutor(use_cuda=use_cuda, - main_program=main_program, - loss_name=loss.name) + exe = fluid.ParallelExecutor( + use_cuda=use_cuda, + main_program=main_program, + loss_name=loss.name, + ) num_devices = exe.device_count for feed_i in range(0, 10): feed_img = np.random.random(size=[16, 784]).astype(np.float32) - feed_label = np.random.randint(low=0, - high=10, - size=[16, 1], - dtype=np.int64) + feed_label = np.random.randint( + low=0, high=10, size=[16, 1], dtype=np.int64 + ) if use_parallel_exe: - exe.run(feed={ - 'i': np.full((num_devices), feed_i, np.int32), - 'image': np.repeat(feed_img, num_devices, axis=0), - 'label': np.repeat(feed_label, num_devices, axis=0) - }, - fetch_list=[loss.name]) + exe.run( + feed={ + 'i': np.full((num_devices), feed_i, np.int32), + 'image': np.repeat(feed_img, num_devices, axis=0), + 'label': np.repeat(feed_label, num_devices, axis=0), + }, + fetch_list=[loss.name], + ) else: - exe.run(main_program, - feed={ - 'i': np.full((1), feed_i, np.int32), - 'image': feed_img, - 'label': feed_label - }, - fetch_list=[loss]) + exe.run( + main_program, + feed={ + 'i': np.full((1), feed_i, np.int32), + 'image': feed_img, + 'label': feed_label, + }, + fetch_list=[loss], + ) def test_cond_backward(self): paddle.enable_static() def cond_func(i, img, label): - predicate = ((i % 2) == 0) + predicate = (i % 2) == 0 return layers.cond( predicate, lambda: simple_fc_net_with_inputs(img, label, class_num=10), - lambda: batchnorm_fc_with_inputs(img, label, class_num=10)) + lambda: batchnorm_fc_with_inputs(img, label, class_num=10), + ) for use_parallel_exe in [False, True]: if use_parallel_exe and os.name == "nt": @@ -508,10 +562,12 @@ class TestCondBackward(unittest.TestCase): ) continue - self.backward_value_helper(cond_func, core.is_compiled_with_cuda(), - use_parallel_exe) - self.add_optimizer_helper(cond_func, core.is_compiled_with_cuda(), - use_parallel_exe) + self.backward_value_helper( + cond_func, core.is_compiled_with_cuda(), use_parallel_exe + ) + self.add_optimizer_helper( + cond_func, core.is_compiled_with_cuda(), use_parallel_exe + ) def test_half_nested_cond_backward(self): paddle.enable_static() @@ -520,15 +576,18 @@ class TestCondBackward(unittest.TestCase): return layers.cond( (i % 2) == 0, lambda: simple_fc_net_with_inputs(img, label, class_num=10), - lambda: batchnorm_fc_with_inputs(img, label, class_num=10)) + lambda: batchnorm_fc_with_inputs(img, label, class_num=10), + ) def cond_func_simple_net_at_true(i, img, label): - return layers.cond(i < 5, lambda: branch(i, img, label), - lambda: paddle.mean(img)) + return layers.cond( + i < 5, lambda: branch(i, img, label), lambda: paddle.mean(img) + ) def cond_func_simple_net_at_false(i, img, label): - return layers.cond(i < 5, lambda: paddle.mean(img), - lambda: branch(i, img, label)) + return layers.cond( + i < 5, lambda: paddle.mean(img), lambda: branch(i, img, label) + ) for use_parallel_exe in [False, True]: if use_parallel_exe and os.name == "nt": @@ -537,35 +596,47 @@ class TestCondBackward(unittest.TestCase): ) continue - self.backward_value_helper(cond_func_simple_net_at_true, - core.is_compiled_with_cuda(), - use_parallel_exe) - self.add_optimizer_helper(cond_func_simple_net_at_true, - core.is_compiled_with_cuda(), - use_parallel_exe) - self.backward_value_helper(cond_func_simple_net_at_false, - core.is_compiled_with_cuda(), - use_parallel_exe) - self.add_optimizer_helper(cond_func_simple_net_at_false, - core.is_compiled_with_cuda(), - use_parallel_exe) + self.backward_value_helper( + cond_func_simple_net_at_true, + core.is_compiled_with_cuda(), + use_parallel_exe, + ) + self.add_optimizer_helper( + cond_func_simple_net_at_true, + core.is_compiled_with_cuda(), + use_parallel_exe, + ) + self.backward_value_helper( + cond_func_simple_net_at_false, + core.is_compiled_with_cuda(), + use_parallel_exe, + ) + self.add_optimizer_helper( + cond_func_simple_net_at_false, + core.is_compiled_with_cuda(), + use_parallel_exe, + ) def test_nested_cond_backward(self): paddle.enable_static() def branch(i, img, label, mod_two): if mod_two: - predicate = ((i % 2) == 0) + predicate = (i % 2) == 0 else: - predicate = ((i % 2) != 0) + predicate = (i % 2) != 0 return layers.cond( predicate, lambda: simple_fc_net_with_inputs(img, label, class_num=10), - lambda: batchnorm_fc_with_inputs(img, label, class_num=10)) + lambda: batchnorm_fc_with_inputs(img, label, class_num=10), + ) def cond_func(i, img, label): - return layers.cond(i < 5, lambda: branch(i, img, label, True), - lambda: branch(i, img, label, False)) + return layers.cond( + i < 5, + lambda: branch(i, img, label, True), + lambda: branch(i, img, label, False), + ) for use_parallel_exe in [False, True]: if use_parallel_exe and os.name == "nt": @@ -573,14 +644,15 @@ class TestCondBackward(unittest.TestCase): "Skip use_parallel_exe=True in Windows because of flaky test when using PE under old Windows machine" ) continue - self.backward_value_helper(cond_func, core.is_compiled_with_cuda(), - use_parallel_exe) - self.add_optimizer_helper(cond_func, core.is_compiled_with_cuda(), - use_parallel_exe) + self.backward_value_helper( + cond_func, core.is_compiled_with_cuda(), use_parallel_exe + ) + self.add_optimizer_helper( + cond_func, core.is_compiled_with_cuda(), use_parallel_exe + ) class TestCondWithError(unittest.TestCase): - def test_input_type_error(self): paddle.enable_static() main_program = framework.Program() diff --git a/python/paddle/fluid/tests/unittests/test_conditional_block.py b/python/paddle/fluid/tests/unittests/test_conditional_block.py index 6c0a290794cc516798354c6742f5dbbcedc987b1..fc8d926070e3e99a77eee4feb6fde67c805de213 100644 --- a/python/paddle/fluid/tests/unittests/test_conditional_block.py +++ b/python/paddle/fluid/tests/unittests/test_conditional_block.py @@ -24,7 +24,6 @@ from paddle.fluid.layers.control_flow import ConditionalBlock class ConditionalBlockTest(unittest.TestCase): - def test_forward(self): main_program = fluid.Program() startup_program = fluid.Program() @@ -50,12 +49,12 @@ class ConditionalBlockTest(unittest.TestCase): outs = exe.run( main_program, feed={'X': x}, - fetch_list=[main_program.block(0).var(data.name + "@GRAD")])[0] + fetch_list=[main_program.block(0).var(data.name + "@GRAD")], + )[0] print(outs) class TestConditionalBlockOpInferShape(unittest.TestCase): - def test_infer_shape(self): main_program = fluid.Program() startup_program = fluid.Program() @@ -64,24 +63,21 @@ class TestConditionalBlockOpInferShape(unittest.TestCase): sub_block = main_program._create_block() main_program._rollback() step_scope = global_block.create_var( - type=core.VarDesc.VarType.STEP_SCOPES) - cond_var = layers.fill_constant(shape=[1], - dtype='bool', - value=False) + type=core.VarDesc.VarType.STEP_SCOPES + ) + cond_var = layers.fill_constant( + shape=[1], dtype='bool', value=False + ) - op = global_block.append_op(type='conditional_block', - inputs={ - 'Cond': [cond_var], - 'Input': [], - }, - outputs={ - 'Out': [], - 'Scope': [step_scope] - }, - attrs={ - 'sub_block': sub_block, - 'is_scalar_condition': True - }) + op = global_block.append_op( + type='conditional_block', + inputs={ + 'Cond': [cond_var], + 'Input': [], + }, + outputs={'Out': [], 'Scope': [step_scope]}, + attrs={'sub_block': sub_block, 'is_scalar_condition': True}, + ) op.desc.infer_shape(global_block.desc) diff --git a/python/paddle/fluid/tests/unittests/test_conj_op.py b/python/paddle/fluid/tests/unittests/test_conj_op.py index 577d8afbef036ef00c5ba5cf3fe3c51833bcddb0..67647458f5da7eb2b292654b58c6f7972ae93275 100644 --- a/python/paddle/fluid/tests/unittests/test_conj_op.py +++ b/python/paddle/fluid/tests/unittests/test_conj_op.py @@ -27,7 +27,6 @@ paddle.enable_static() class TestConjOp(OpTest): - def setUp(self): self.op_type = "conj" self.python_api = paddle.tensor.conj @@ -39,31 +38,34 @@ class TestConjOp(OpTest): self.dtype = np.complex64 def init_input_output(self): - x = (np.random.random((12, 14)) + 1j * np.random.random( - (12, 14))).astype(self.dtype) + x = ( + np.random.random((12, 14)) + 1j * np.random.random((12, 14)) + ).astype(self.dtype) out = np.conj(x) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} def init_grad_input_output(self): - self.grad_out = (np.ones((12, 14)) + 1j * np.ones( - (12, 14))).astype(self.dtype) + self.grad_out = (np.ones((12, 14)) + 1j * np.ones((12, 14))).astype( + self.dtype + ) self.grad_in = np.conj(self.grad_out) def test_check_output(self): self.check_output(check_eager=True) def test_check_grad_normal(self): - self.check_grad(['X'], - 'Out', - user_defined_grads=[self.grad_in], - user_defined_grad_outputs=[self.grad_out], - check_eager=True) + self.check_grad( + ['X'], + 'Out', + user_defined_grads=[self.grad_in], + user_defined_grad_outputs=[self.grad_out], + check_eager=True, + ) class TestComplexConjOp(unittest.TestCase): - def setUp(self): self._dtypes = ["float32", "float64"] self._places = [paddle.CPUPlace()] @@ -72,9 +74,9 @@ class TestComplexConjOp(unittest.TestCase): def test_conj_api(self): for dtype in self._dtypes: - input = rand([ - 2, 20, 2, 3 - ]).astype(dtype) + 1j * rand([2, 20, 2, 3]).astype(dtype) + input = rand([2, 20, 2, 3]).astype(dtype) + 1j * rand( + [2, 20, 2, 3] + ).astype(dtype) for place in self._places: with dg.guard(place): var_x = paddle.to_tensor(input) @@ -84,9 +86,9 @@ class TestComplexConjOp(unittest.TestCase): def test_conj_operator(self): for dtype in self._dtypes: - input = rand([ - 2, 20, 2, 3 - ]).astype(dtype) + 1j * rand([2, 20, 2, 3]).astype(dtype) + input = rand([2, 20, 2, 3]).astype(dtype) + 1j * rand( + [2, 20, 2, 3] + ).astype(dtype) for place in self._places: with dg.guard(place): var_x = paddle.to_tensor(input) @@ -95,21 +97,22 @@ class TestComplexConjOp(unittest.TestCase): np.testing.assert_array_equal(result, target) def test_conj_static_mode(self): - def init_input_output(dtype): - input = rand([ - 2, 20, 2, 3 - ]).astype(dtype) + 1j * rand([2, 20, 2, 3]).astype(dtype) + input = rand([2, 20, 2, 3]).astype(dtype) + 1j * rand( + [2, 20, 2, 3] + ).astype(dtype) return {'x': input}, np.conj(input) for dtype in self._dtypes: input_dict, np_res = init_input_output(dtype) for place in self._places: with static.program_guard(static.Program()): - x_dtype = np.complex64 if dtype == "float32" else np.complex128 - x = static.data(name="x", - shape=[2, 20, 2, 3], - dtype=x_dtype) + x_dtype = ( + np.complex64 if dtype == "float32" else np.complex128 + ) + x = static.data( + name="x", shape=[2, 20, 2, 3], dtype=x_dtype + ) out = paddle.conj(x) exe = static.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/test_const_value.py b/python/paddle/fluid/tests/unittests/test_const_value.py index e346f24ba6849b9cd1ea3f44531b1e843326c8c4..58ac6fa0a9a30a08a831111513777cca59062724 100644 --- a/python/paddle/fluid/tests/unittests/test_const_value.py +++ b/python/paddle/fluid/tests/unittests/test_const_value.py @@ -17,7 +17,6 @@ import paddle.fluid.framework as framework class ConstantTest(unittest.TestCase): - def test_const_value(self): self.assertEqual(framework.GRAD_VAR_SUFFIX, "@GRAD") self.assertEqual(framework.TEMP_VAR_NAME, "@TEMP@") diff --git a/python/paddle/fluid/tests/unittests/test_context_manager.py b/python/paddle/fluid/tests/unittests/test_context_manager.py index 93c1d7fa1a499ffdd6c36c7d1cc40fb24e6af35b..9ae5630b0aff92dcac937e2a8a1246989f019a9d 100644 --- a/python/paddle/fluid/tests/unittests/test_context_manager.py +++ b/python/paddle/fluid/tests/unittests/test_context_manager.py @@ -19,7 +19,6 @@ import unittest class TestContextManagerRaiseException(unittest.TestCase): # When exception raised in 'with' context, we should safely exit the context def test_func1(self): - def foo(): with fluid.dygraph.guard(): print("raise error in context manager") diff --git a/python/paddle/fluid/tests/unittests/test_conv1d_layer.py b/python/paddle/fluid/tests/unittests/test_conv1d_layer.py index 0c1ffc6ddbeeb4d00ee85527e8facffcd51bd397..bc0353218443de5350e403479e01654515a91238 100644 --- a/python/paddle/fluid/tests/unittests/test_conv1d_layer.py +++ b/python/paddle/fluid/tests/unittests/test_conv1d_layer.py @@ -21,22 +21,23 @@ import unittest class Conv1DTestCase(unittest.TestCase): - - def __init__(self, - methodName='runTest', - batch_size=4, - spartial_shape=(16, ), - num_channels=6, - num_filters=8, - filter_size=3, - padding=0, - padding_mode="zeros", - stride=1, - dilation=1, - groups=1, - no_bias=False, - dtype="float32", - data_format="NCL"): + def __init__( + self, + methodName='runTest', + batch_size=4, + spartial_shape=(16,), + num_channels=6, + num_filters=8, + filter_size=3, + padding=0, + padding_mode="zeros", + stride=1, + dilation=1, + groups=1, + no_bias=False, + dtype="float32", + data_format="NCL", + ): super(Conv1DTestCase, self).__init__(methodName) self.batch_size = batch_size self.num_channels = num_channels @@ -44,7 +45,7 @@ class Conv1DTestCase(unittest.TestCase): self.spartial_shape = spartial_shape self.filter_size = filter_size self.data_format = data_format - self.channel_last = (self.data_format == "NLC") + self.channel_last = self.data_format == "NLC" self.padding = padding self.padding_mode = padding_mode @@ -55,23 +56,28 @@ class Conv1DTestCase(unittest.TestCase): self.dtype = dtype def setUp(self): - input_shape = (self.batch_size, self.num_channels - ) + self.spartial_shape if not self.channel_last else ( - self.batch_size, ) + self.spartial_shape + ( - self.num_channels, ) + input_shape = ( + (self.batch_size, self.num_channels) + self.spartial_shape + if not self.channel_last + else (self.batch_size,) + self.spartial_shape + (self.num_channels,) + ) self.input = np.random.randn(*input_shape).astype(self.dtype) if isinstance(self.filter_size, int): filter_size = [self.filter_size] else: filter_size = self.filter_size - self.weight_shape = weight_shape = (self.num_filters, self.num_channels - // self.groups) + tuple(filter_size) - self.weight = np.random.uniform(-1, 1, - size=weight_shape).astype(self.dtype) + self.weight_shape = weight_shape = ( + self.num_filters, + self.num_channels // self.groups, + ) + tuple(filter_size) + self.weight = np.random.uniform(-1, 1, size=weight_shape).astype( + self.dtype + ) if not self.no_bias: self.bias = np.random.uniform( - -1, 1, size=(self.num_filters, )).astype(self.dtype) + -1, 1, size=(self.num_filters,) + ).astype(self.dtype) else: self.bias = None @@ -80,42 +86,49 @@ class Conv1DTestCase(unittest.TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - input_shape = (-1, self.num_channels, - -1) if not self.channel_last else ( - -1, -1, self.num_channels) + input_shape = ( + (-1, self.num_channels, -1) + if not self.channel_last + else (-1, -1, self.num_channels) + ) x_var = fluid.data("input", input_shape, dtype=self.dtype) - w_var = fluid.data("weight", - self.weight_shape, - dtype=self.dtype) - b_var = fluid.data("bias", (self.num_filters, ), - dtype=self.dtype) - y_var = F.conv1d(x_var, - w_var, - b_var if not self.no_bias else None, - padding=self.padding, - stride=self.stride, - dilation=self.dilation, - groups=self.groups, - data_format=self.data_format) + w_var = fluid.data( + "weight", self.weight_shape, dtype=self.dtype + ) + b_var = fluid.data( + "bias", (self.num_filters,), dtype=self.dtype + ) + y_var = F.conv1d( + x_var, + w_var, + b_var if not self.no_bias else None, + padding=self.padding, + stride=self.stride, + dilation=self.dilation, + groups=self.groups, + data_format=self.data_format, + ) feed_dict = {"input": self.input, "weight": self.weight} if self.bias is not None: feed_dict["bias"] = self.bias exe = fluid.Executor(place) exe.run(start) - y_np, = exe.run(main, feed=feed_dict, fetch_list=[y_var]) + (y_np,) = exe.run(main, feed=feed_dict, fetch_list=[y_var]) return y_np def paddle_nn_layer(self): x_var = paddle.to_tensor(self.input) - conv = nn.Conv1D(self.num_channels, - self.num_filters, - self.filter_size, - padding=self.padding, - padding_mode=self.padding_mode, - stride=self.stride, - dilation=self.dilation, - groups=self.groups, - data_format=self.data_format) + conv = nn.Conv1D( + self.num_channels, + self.num_filters, + self.filter_size, + padding=self.padding, + padding_mode=self.padding_mode, + stride=self.stride, + dilation=self.dilation, + groups=self.groups, + data_format=self.data_format, + ) conv.weight.set_value(self.weight) if not self.no_bias: conv.bias.set_value(self.bias) @@ -139,7 +152,6 @@ class Conv1DTestCase(unittest.TestCase): class Conv1DErrorTestCase(Conv1DTestCase): - def runTest(self): place = fluid.CPUPlace() with dg.guard(place): @@ -148,7 +160,6 @@ class Conv1DErrorTestCase(Conv1DTestCase): class Conv1DTypeErrorTestCase(Conv1DTestCase): - def runTest(self): place = fluid.CPUPlace() with dg.guard(place): @@ -161,58 +172,75 @@ def add_cases(suite): suite.addTest(Conv1DTestCase(methodName='runTest', stride=[1], dilation=2)) suite.addTest(Conv1DTestCase(methodName='runTest', stride=2, dilation=(1))) suite.addTest( - Conv1DTestCase(methodName='runTest', padding="same", no_bias=True)) + Conv1DTestCase(methodName='runTest', padding="same", no_bias=True) + ) suite.addTest( - Conv1DTestCase(methodName='runTest', filter_size=3, padding='valid')) + Conv1DTestCase(methodName='runTest', filter_size=3, padding='valid') + ) suite.addTest( - Conv1DTestCase(methodName='runTest', num_filters=512, padding='valid')) + Conv1DTestCase(methodName='runTest', num_filters=512, padding='valid') + ) suite.addTest( - Conv1DTestCase(methodName='runTest', num_filters=512, padding=[1, 2])) + Conv1DTestCase(methodName='runTest', num_filters=512, padding=[1, 2]) + ) suite.addTest( - Conv1DTestCase(methodName='runTest', padding=2, data_format='NLC')) + Conv1DTestCase(methodName='runTest', padding=2, data_format='NLC') + ) suite.addTest(Conv1DTestCase(methodName='runTest', padding=[1])) suite.addTest(Conv1DTestCase(methodName='runTest', padding=[1, 2])) suite.addTest( - Conv1DTestCase(methodName='runTest', padding=[1, 2], data_format='NLC')) + Conv1DTestCase(methodName='runTest', padding=[1, 2], data_format='NLC') + ) suite.addTest(Conv1DTestCase(methodName='runTest', padding=2)) suite.addTest(Conv1DTestCase(methodName='runTest')) suite.addTest( - Conv1DTestCase(methodName='runTest', groups=2, padding="valid")) + Conv1DTestCase(methodName='runTest', groups=2, padding="valid") + ) suite.addTest( - Conv1DTestCase(methodName='runTest', - num_filters=6, - num_channels=3, - groups=3, - padding="valid", - data_format='NLC')) + Conv1DTestCase( + methodName='runTest', + num_filters=6, + num_channels=3, + groups=3, + padding="valid", + data_format='NLC', + ) + ) def add_error_cases(suite): suite.addTest( - Conv1DTypeErrorTestCase(methodName='runTest', - padding_mode="reflect", - padding="valid")) - suite.addTest(Conv1DErrorTestCase(methodName='runTest', - data_format="VALID")) + Conv1DTypeErrorTestCase( + methodName='runTest', padding_mode="reflect", padding="valid" + ) + ) + suite.addTest( + Conv1DErrorTestCase(methodName='runTest', data_format="VALID") + ) suite.addTest( - Conv1DErrorTestCase(methodName='runTest', padding_mode="VALID")) + Conv1DErrorTestCase(methodName='runTest', padding_mode="VALID") + ) suite.addTest( - Conv1DErrorTestCase(methodName='runTest', num_channels=5, groups=2)) + Conv1DErrorTestCase(methodName='runTest', num_channels=5, groups=2) + ) suite.addTest( - Conv1DErrorTestCase(methodName='runTest', - num_filters=8, - num_channels=15, - groups=3)) + Conv1DErrorTestCase( + methodName='runTest', num_filters=8, num_channels=15, groups=3 + ) + ) suite.addTest( - Conv1DErrorTestCase(methodName='runTest', padding=[1, 2, 3, 4, 5])) + Conv1DErrorTestCase(methodName='runTest', padding=[1, 2, 3, 4, 5]) + ) suite.addTest( - Conv1DErrorTestCase(methodName='runTest', - padding=[1, 2, 3, 4, 5], - data_format='NLC')) + Conv1DErrorTestCase( + methodName='runTest', padding=[1, 2, 3, 4, 5], data_format='NLC' + ) + ) suite.addTest( - Conv1DErrorTestCase(methodName='runTest', - num_filters=512, - padding=[1, 2, 3, 4, 5])) + Conv1DErrorTestCase( + methodName='runTest', num_filters=512, padding=[1, 2, 3, 4, 5] + ) + ) suite.addTest(Conv1DErrorTestCase(methodName='runTest', dilation=-10)) diff --git a/python/paddle/fluid/tests/unittests/test_conv1d_transpose_layer.py b/python/paddle/fluid/tests/unittests/test_conv1d_transpose_layer.py index fbd65ebbf7c90d096aa8675c7a76275e64d39334..ec8b7ee6ef78aa92df39dfba4efd74980cd9eb44 100644 --- a/python/paddle/fluid/tests/unittests/test_conv1d_transpose_layer.py +++ b/python/paddle/fluid/tests/unittests/test_conv1d_transpose_layer.py @@ -21,23 +21,24 @@ import unittest class Conv1DTransposeTestCase(unittest.TestCase): - - def __init__(self, - methodName='runTest', - batch_size=4, - spartial_shape=16, - in_channels=6, - out_channels=8, - filter_size=3, - output_size=None, - padding=0, - output_padding=0, - stride=1, - dilation=1, - groups=1, - no_bias=False, - data_format="NCL", - dtype="float32"): + def __init__( + self, + methodName='runTest', + batch_size=4, + spartial_shape=16, + in_channels=6, + out_channels=8, + filter_size=3, + output_size=None, + padding=0, + output_padding=0, + stride=1, + dilation=1, + groups=1, + no_bias=False, + data_format="NCL", + dtype="float32", + ): super(Conv1DTransposeTestCase, self).__init__(methodName) self.batch_size = batch_size self.in_channels = in_channels @@ -58,25 +59,32 @@ class Conv1DTransposeTestCase(unittest.TestCase): def setUp(self): self.channel_last = False if self.data_format == "NCL" else True - input_shape = (self.batch_size, self.in_channels, - self.spartial_shape) if not self.channel_last else ( - self.batch_size, - self.spartial_shape, - self.in_channels, - ) + input_shape = ( + (self.batch_size, self.in_channels, self.spartial_shape) + if not self.channel_last + else ( + self.batch_size, + self.spartial_shape, + self.in_channels, + ) + ) self.input = np.random.randn(*input_shape).astype(self.dtype) if isinstance(self.filter_size, int): filter_size = [self.filter_size] else: filter_size = self.filter_size - self.weight_shape = weight_shape = (self.in_channels, self.out_channels - // self.groups) + tuple(filter_size) - self.weight = np.random.uniform(-1, 1, - size=weight_shape).astype(self.dtype) + self.weight_shape = weight_shape = ( + self.in_channels, + self.out_channels // self.groups, + ) + tuple(filter_size) + self.weight = np.random.uniform(-1, 1, size=weight_shape).astype( + self.dtype + ) if not self.no_bias: self.bias = np.random.uniform( - -1, 1, size=(self.out_channels, )).astype(self.dtype) + -1, 1, size=(self.out_channels,) + ).astype(self.dtype) else: self.bias = None @@ -85,44 +93,51 @@ class Conv1DTransposeTestCase(unittest.TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - input_shape = (-1, self.in_channels, - -1) if not self.channel_last else ( - -1, -1, self.in_channels) + input_shape = ( + (-1, self.in_channels, -1) + if not self.channel_last + else (-1, -1, self.in_channels) + ) x_var = fluid.data("input", input_shape, dtype=self.dtype) - w_var = fluid.data("weight", - self.weight_shape, - dtype=self.dtype) - b_var = fluid.data("bias", (self.out_channels, ), - dtype=self.dtype) - y_var = F.conv1d_transpose(x_var, - w_var, - None if self.no_bias else b_var, - output_size=self.output_size, - padding=self.padding, - output_padding=self.output_padding, - stride=self.stride, - dilation=self.dilation, - groups=self.groups, - data_format=self.data_format) + w_var = fluid.data( + "weight", self.weight_shape, dtype=self.dtype + ) + b_var = fluid.data( + "bias", (self.out_channels,), dtype=self.dtype + ) + y_var = F.conv1d_transpose( + x_var, + w_var, + None if self.no_bias else b_var, + output_size=self.output_size, + padding=self.padding, + output_padding=self.output_padding, + stride=self.stride, + dilation=self.dilation, + groups=self.groups, + data_format=self.data_format, + ) feed_dict = {"input": self.input, "weight": self.weight} if self.bias is not None: feed_dict["bias"] = self.bias exe = fluid.Executor(place) exe.run(start) - y_np, = exe.run(main, feed=feed_dict, fetch_list=[y_var]) + (y_np,) = exe.run(main, feed=feed_dict, fetch_list=[y_var]) return y_np def paddle_nn_layer(self): x_var = paddle.to_tensor(self.input) - conv = nn.Conv1DTranspose(self.in_channels, - self.out_channels, - self.filter_size, - padding=self.padding, - output_padding=self.output_padding, - stride=self.stride, - dilation=self.dilation, - groups=self.groups, - data_format=self.data_format) + conv = nn.Conv1DTranspose( + self.in_channels, + self.out_channels, + self.filter_size, + padding=self.padding, + output_padding=self.output_padding, + stride=self.stride, + dilation=self.dilation, + groups=self.groups, + data_format=self.data_format, + ) conv.weight.set_value(self.weight) if not self.no_bias: conv.bias.set_value(self.bias) @@ -146,7 +161,6 @@ class Conv1DTransposeTestCase(unittest.TestCase): class Conv1DTransposeErrorTestCase(Conv1DTransposeTestCase): - def runTest(self): place = fluid.CPUPlace() with dg.guard(place): @@ -157,64 +171,84 @@ class Conv1DTransposeErrorTestCase(Conv1DTransposeTestCase): def add_cases(suite): suite.addTest(Conv1DTransposeTestCase(methodName='runTest')) suite.addTest( - Conv1DTransposeTestCase(methodName='runTest', - stride=[2], - no_bias=True, - dilation=2)) + Conv1DTransposeTestCase( + methodName='runTest', stride=[2], no_bias=True, dilation=2 + ) + ) + suite.addTest( + Conv1DTransposeTestCase( + methodName='runTest', + filter_size=(3), + output_size=[36], + stride=[2], + dilation=2, + ) + ) + suite.addTest( + Conv1DTransposeTestCase(methodName='runTest', stride=2, dilation=(2)) + ) suite.addTest( - Conv1DTransposeTestCase(methodName='runTest', - filter_size=(3), - output_size=[36], - stride=[2], - dilation=2)) + Conv1DTransposeTestCase(methodName='runTest', padding="valid") + ) suite.addTest( - Conv1DTransposeTestCase(methodName='runTest', stride=2, dilation=(2))) - suite.addTest(Conv1DTransposeTestCase(methodName='runTest', - padding="valid")) - suite.addTest(Conv1DTransposeTestCase(methodName='runTest', - padding='valid')) + Conv1DTransposeTestCase(methodName='runTest', padding='valid') + ) suite.addTest( - Conv1DTransposeTestCase(methodName='runTest', filter_size=1, padding=3)) + Conv1DTransposeTestCase(methodName='runTest', filter_size=1, padding=3) + ) suite.addTest(Conv1DTransposeTestCase(methodName='runTest', padding=[2])) suite.addTest( - Conv1DTransposeTestCase(methodName='runTest', data_format="NLC")) + Conv1DTransposeTestCase(methodName='runTest', data_format="NLC") + ) suite.addTest( - Conv1DTransposeTestCase(methodName='runTest', groups=2, - padding="valid")) + Conv1DTransposeTestCase(methodName='runTest', groups=2, padding="valid") + ) suite.addTest( - Conv1DTransposeTestCase(methodName='runTest', - out_channels=6, - in_channels=3, - groups=3, - padding="valid")) + Conv1DTransposeTestCase( + methodName='runTest', + out_channels=6, + in_channels=3, + groups=3, + padding="valid", + ) + ) suite.addTest( - Conv1DTransposeTestCase(methodName='runTest', - data_format="NLC", - spartial_shape=16, - output_size=18)) + Conv1DTransposeTestCase( + methodName='runTest', + data_format="NLC", + spartial_shape=16, + output_size=18, + ) + ) suite.addTest( - Conv1DTransposeTestCase(methodName='runTest', - data_format="NLC", - stride=3, - output_padding=2)) + Conv1DTransposeTestCase( + methodName='runTest', data_format="NLC", stride=3, output_padding=2 + ) + ) suite.addTest(Conv1DTransposeTestCase(methodName='runTest', padding=[1, 2])) def add_error_cases(suite): suite.addTest( - Conv1DTransposeErrorTestCase(methodName='runTest', - data_format="not_valid")) + Conv1DTransposeErrorTestCase( + methodName='runTest', data_format="not_valid" + ) + ) suite.addTest( - Conv1DTransposeErrorTestCase(methodName='runTest', - in_channels=5, - groups=2)) + Conv1DTransposeErrorTestCase( + methodName='runTest', in_channels=5, groups=2 + ) + ) suite.addTest( - Conv1DTransposeErrorTestCase(methodName='runTest', - stride=2, - output_padding=3)) + Conv1DTransposeErrorTestCase( + methodName='runTest', stride=2, output_padding=3 + ) + ) suite.addTest( - Conv1DTransposeErrorTestCase(methodName='runTest', - output_size="not_valid")) + Conv1DTransposeErrorTestCase( + methodName='runTest', output_size="not_valid" + ) + ) def load_tests(loader, standard_tests, pattern): diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_api.py b/python/paddle/fluid/tests/unittests/test_conv2d_api.py index d7b9339c39cfcdfc9b04652f05a3fc312be4cf2c..b95bf27cdc8841c198842fbb7221d4f78a08b281 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_api.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_api.py @@ -23,251 +23,295 @@ import paddle.fluid as fluid class TestConv2DAPI(unittest.TestCase): - def test_api(self): - input_NHWC = fluid.layers.data(name="input_NHWC", - shape=[2, 5, 5, 3], - append_batch_size=False, - dtype="float32") - - input_NCHW = fluid.layers.data(name="input_NCHW", - shape=[2, 3, 5, 5], - append_batch_size=False, - dtype="float32") - - fluid.layers.conv2d(input=input_NHWC, - num_filters=3, - filter_size=[3, 3], - stride=[1, 1], - padding=0, - dilation=[1, 1], - groups=1, - data_format="NCHW") - - fluid.layers.conv2d(input=input_NCHW, - num_filters=3, - filter_size=[3, 3], - stride=[1, 1], - padding=[1, 2, 1, 0], - dilation=[1, 1], - groups=1, - data_format="NCHW") - - fluid.layers.conv2d(input=input_NCHW, - num_filters=3, - filter_size=[3, 3], - stride=[1, 1], - padding=[[0, 0], [0, 0], [1, 1], [1, 1]], - dilation=[1, 1], - groups=1, - data_format="NCHW") - - fluid.layers.conv2d(input=input_NHWC, - num_filters=3, - filter_size=[3, 3], - stride=[1, 1], - padding=[[0, 0], [1, 1], [1, 1], [0, 0]], - dilation=[1, 1], - groups=1, - data_format="NHWC") - - fluid.layers.conv2d(input=input_NCHW, - num_filters=3, - filter_size=[3, 3], - stride=[1, 1], - padding="SAME", - dilation=[1, 1], - groups=1, - data_format="NCHW") - - fluid.layers.conv2d(input=input_NCHW, - num_filters=3, - filter_size=[3, 3], - stride=[1, 1], - padding="VALID", - dilation=[1, 1], - groups=1, - data_format="NCHW") + input_NHWC = fluid.layers.data( + name="input_NHWC", + shape=[2, 5, 5, 3], + append_batch_size=False, + dtype="float32", + ) + + input_NCHW = fluid.layers.data( + name="input_NCHW", + shape=[2, 3, 5, 5], + append_batch_size=False, + dtype="float32", + ) + + fluid.layers.conv2d( + input=input_NHWC, + num_filters=3, + filter_size=[3, 3], + stride=[1, 1], + padding=0, + dilation=[1, 1], + groups=1, + data_format="NCHW", + ) + + fluid.layers.conv2d( + input=input_NCHW, + num_filters=3, + filter_size=[3, 3], + stride=[1, 1], + padding=[1, 2, 1, 0], + dilation=[1, 1], + groups=1, + data_format="NCHW", + ) + + fluid.layers.conv2d( + input=input_NCHW, + num_filters=3, + filter_size=[3, 3], + stride=[1, 1], + padding=[[0, 0], [0, 0], [1, 1], [1, 1]], + dilation=[1, 1], + groups=1, + data_format="NCHW", + ) + + fluid.layers.conv2d( + input=input_NHWC, + num_filters=3, + filter_size=[3, 3], + stride=[1, 1], + padding=[[0, 0], [1, 1], [1, 1], [0, 0]], + dilation=[1, 1], + groups=1, + data_format="NHWC", + ) + + fluid.layers.conv2d( + input=input_NCHW, + num_filters=3, + filter_size=[3, 3], + stride=[1, 1], + padding="SAME", + dilation=[1, 1], + groups=1, + data_format="NCHW", + ) + + fluid.layers.conv2d( + input=input_NCHW, + num_filters=3, + filter_size=[3, 3], + stride=[1, 1], + padding="VALID", + dilation=[1, 1], + groups=1, + data_format="NCHW", + ) def test_depthwise_conv2d(self): - x_var = paddle.uniform((2, 8, 8, 4), dtype='float32', min=-1., max=1.) - conv = paddle.nn.Conv2D(in_channels=4, - out_channels=4, - kernel_size=(3, 3), - groups=4, - data_format='NHWC') + x_var = paddle.uniform((2, 8, 8, 4), dtype='float32', min=-1.0, max=1.0) + conv = paddle.nn.Conv2D( + in_channels=4, + out_channels=4, + kernel_size=(3, 3), + groups=4, + data_format='NHWC', + ) y_var = conv(x_var) class TestConv2DAPI_Error(unittest.TestCase): - def test_api(self): - input = fluid.layers.data(name="input", - shape=[2, 5, 5, 5], - append_batch_size=False, - dtype="float32") + input = fluid.layers.data( + name="input", + shape=[2, 5, 5, 5], + append_batch_size=False, + dtype="float32", + ) # ValueError: cudnn def run_1(): - fluid.layers.conv2d(input=input, - num_filters=3, - filter_size=[3, 3], - stride=[1, 1], - padding=0, - dilation=[1, 1], - groups=1, - use_cudnn=[0], - data_format="NCHW") + fluid.layers.conv2d( + input=input, + num_filters=3, + filter_size=[3, 3], + stride=[1, 1], + padding=0, + dilation=[1, 1], + groups=1, + use_cudnn=[0], + data_format="NCHW", + ) self.assertRaises(ValueError, run_1) # ValueError: data_format def run_2(): - fluid.layers.conv2d(input=input, - num_filters=3, - filter_size=[3, 3], - stride=[1, 1], - padding=0, - dilation=[1, 1], - groups=1, - use_cudnn=False, - data_format="NCHWC") + fluid.layers.conv2d( + input=input, + num_filters=3, + filter_size=[3, 3], + stride=[1, 1], + padding=0, + dilation=[1, 1], + groups=1, + use_cudnn=False, + data_format="NCHWC", + ) self.assertRaises(ValueError, run_2) # ValueError: padding def run_3(): - fluid.layers.conv2d(input=input, - num_filters=3, - filter_size=[3, 3], - stride=[1, 1], - padding="SAMEE", - dilation=[1, 1], - groups=1, - use_cudnn=False, - data_format="NCHW") + fluid.layers.conv2d( + input=input, + num_filters=3, + filter_size=[3, 3], + stride=[1, 1], + padding="SAMEE", + dilation=[1, 1], + groups=1, + use_cudnn=False, + data_format="NCHW", + ) self.assertRaises(ValueError, run_3) def run_4(): - fluid.layers.conv2d(input=input, - num_filters=3, - filter_size=[3, 3], - stride=[1, 1], - padding=[[0, 1], [0, 1], [0, 1], [0, 1]], - dilation=[1, 1], - groups=1, - use_cudnn=False, - data_format="NCHW") + fluid.layers.conv2d( + input=input, + num_filters=3, + filter_size=[3, 3], + stride=[1, 1], + padding=[[0, 1], [0, 1], [0, 1], [0, 1]], + dilation=[1, 1], + groups=1, + use_cudnn=False, + data_format="NCHW", + ) self.assertRaises(ValueError, run_4) def run_5(): - fluid.layers.conv2d(input=input, - num_filters=3, - filter_size=[3, 3], - stride=[1, 1], - padding=[[0, 1], [0, 1], [0, 1], [0, 1]], - dilation=[1, 1], - groups=1, - use_cudnn=False, - data_format="NHWC") + fluid.layers.conv2d( + input=input, + num_filters=3, + filter_size=[3, 3], + stride=[1, 1], + padding=[[0, 1], [0, 1], [0, 1], [0, 1]], + dilation=[1, 1], + groups=1, + use_cudnn=False, + data_format="NHWC", + ) self.assertRaises(ValueError, run_5) # ValueError: channel dimmention - x = fluid.layers.data(name="x", - shape=[2, 5, 5, -1], - append_batch_size=False, - dtype="float32") + x = fluid.layers.data( + name="x", + shape=[2, 5, 5, -1], + append_batch_size=False, + dtype="float32", + ) def run_6(): - fluid.layers.conv2d(input=x, - num_filters=3, - filter_size=[3, 3], - stride=[1, 1], - padding=0, - dilation=[1, 1], - groups=1, - use_cudnn=False, - data_format="NHWC") + fluid.layers.conv2d( + input=x, + num_filters=3, + filter_size=[3, 3], + stride=[1, 1], + padding=0, + dilation=[1, 1], + groups=1, + use_cudnn=False, + data_format="NHWC", + ) self.assertRaises(ValueError, run_6) # ValueError: groups def run_7(): - fluid.layers.conv2d(input=input, - num_filters=3, - filter_size=[3, 3], - stride=[1, 1], - padding=0, - dilation=[1, 1], - groups=3, - use_cudnn=False, - data_format="NHWC") + fluid.layers.conv2d( + input=input, + num_filters=3, + filter_size=[3, 3], + stride=[1, 1], + padding=0, + dilation=[1, 1], + groups=3, + use_cudnn=False, + data_format="NHWC", + ) self.assertRaises(ValueError, run_7) # ValueError: filter num def run_8(): - fluid.layers.conv2d(input=input, - num_filters=0, - filter_size=0, - stride=0, - padding=0, - dilation=0, - groups=1, - use_cudnn=False, - data_format="NCHW") + fluid.layers.conv2d( + input=input, + num_filters=0, + filter_size=0, + stride=0, + padding=0, + dilation=0, + groups=1, + use_cudnn=False, + data_format="NCHW", + ) self.assertRaises(ValueError, run_8) # ValueError: groups def run_9(): - fluid.layers.conv2d(input=input, - num_filters=0, - filter_size=0, - stride=0, - padding=0, - dilation=0, - groups=0, - use_cudnn=False, - data_format="NCHW") + fluid.layers.conv2d( + input=input, + num_filters=0, + filter_size=0, + stride=0, + padding=0, + dilation=0, + groups=0, + use_cudnn=False, + data_format="NCHW", + ) self.assertRaises(ValueError, run_9) # ValueError: stride def run_10(): - fluid.layers.conv2d(input=input, - num_filters=1, - filter_size=1, - stride=0, - padding=0, - dilation=0, - groups=1, - use_cudnn=False, - data_format="NCHW") + fluid.layers.conv2d( + input=input, + num_filters=1, + filter_size=1, + stride=0, + padding=0, + dilation=0, + groups=1, + use_cudnn=False, + data_format="NCHW", + ) self.assertRaises(ValueError, run_10) def test_api_with_error_input(self): - input = fluid.layers.data(name="error_input", - shape=[1], - append_batch_size=False, - dtype="float32") + input = fluid.layers.data( + name="error_input", + shape=[1], + append_batch_size=False, + dtype="float32", + ) # ValueError: cudnn def run_1(): - fluid.layers.conv2d(input=input, - num_filters=0, - filter_size=0, - stride=0, - padding=0, - dilation=0, - groups=0, - use_cudnn=False, - data_format="NCHW") + fluid.layers.conv2d( + input=input, + num_filters=0, + filter_size=0, + stride=0, + padding=0, + dilation=0, + groups=0, + use_cudnn=False, + data_format="NCHW", + ) self.assertRaises(ValueError, run_1) @@ -275,36 +319,44 @@ class TestConv2DAPI_Error(unittest.TestCase): # --------- test environment variable ------ @unittest.skipIf( not (core.is_compiled_with_cuda() or core.is_compiled_with_rocm()), - "core is not compiled with CUDA or ROCM") + "core is not compiled with CUDA or ROCM", +) class TestConv2DEnviron(unittest.TestCase): - def run1(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - inputs = fluid.layers.data(shape=[2, 3, 5, 5], - append_batch_size=False, - name="inputs", - dtype="float32") - result = fluid.layers.conv2d(input=inputs, - num_filters=4, - filter_size=[3, 3], - stride=[1, 1], - padding=0, - dilation=[1, 1], - groups=1, - data_format="NCHW") + inputs = fluid.layers.data( + shape=[2, 3, 5, 5], + append_batch_size=False, + name="inputs", + dtype="float32", + ) + result = fluid.layers.conv2d( + input=inputs, + num_filters=4, + filter_size=[3, 3], + stride=[1, 1], + padding=0, + dilation=[1, 1], + groups=1, + data_format="NCHW", + ) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - fetches = exe.run(fluid.default_main_program(), - feed={"inputs": self.input_np}, - fetch_list=[result]) + fetches = exe.run( + fluid.default_main_program(), + feed={"inputs": self.input_np}, + fetch_list=[result], + ) def run2(self, place): with fluid.dygraph.guard(place): inputs = fluid.dygraph.to_variable(self.input_np) - conv = paddle.nn.Conv2D(in_channels=3, - out_channels=4, - kernel_size=(3, 3), - data_format="NCHW") + conv = paddle.nn.Conv2D( + in_channels=3, + out_channels=4, + kernel_size=(3, 3), + data_format="NCHW", + ) result = conv(inputs) def run3(self, place): diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_fusion_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_fusion_op.py index f92c0c7090ee6c88cc73be11edb6129a58bf6456..7e4a01331578be14582f89c20065ad34c1d980b8 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_fusion_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_fusion_op.py @@ -22,9 +22,7 @@ from test_conv2d_op import conv2d_forward_naive def create_test_padding_SAME_class(parent): - class TestPaddingSAMECase(parent): - def init_paddings(self): self.pad = [0, 0] self.padding_algorithm = "SAME" @@ -35,9 +33,7 @@ def create_test_padding_SAME_class(parent): def create_test_padding_VALID_class(parent): - class TestPaddingVALIDCase(parent): - def init_paddings(self): self.pad = [1, 1] self.padding_algorithm = "VALID" @@ -48,7 +44,6 @@ def create_test_padding_VALID_class(parent): class TestConv2DFusionOp(OpTest): - def setUp(self): self.op_type = "conv2d_fusion" self.exhaustive_search = False @@ -71,32 +66,37 @@ class TestConv2DFusionOp(OpTest): conv2d_param = { 'stride': self.stride, 'pad': self.pad, - 'dilation': self.dilations + 'dilation': self.dilations, } input = np.random.random(self.input_size).astype(self.dtype) filter = np.random.random(self.filter_size).astype(self.dtype) bias = np.random.random(self.filter_size[0]).astype(self.dtype) - self.output, _, _, _, _ = conv2d_forward_naive(input, filter, - self.groups, - conv2d_param, - self.padding_algorithm, - self.data_format) + self.output, _, _, _, _ = conv2d_forward_naive( + input, + filter, + self.groups, + conv2d_param, + self.padding_algorithm, + self.data_format, + ) self.output = self.output.astype(self.dtype) self.inputs = { 'Input': OpTest.np_dtype_to_fluid_dtype(input), 'Filter': OpTest.np_dtype_to_fluid_dtype(filter), - 'Bias': OpTest.np_dtype_to_fluid_dtype(bias) + 'Bias': OpTest.np_dtype_to_fluid_dtype(bias), } if self.add_residual_data: residual_data = np.random.random(self.output.shape).astype( - self.dtype) + self.dtype + ) self.inputs['ResidualData'] = OpTest.np_dtype_to_fluid_dtype( - residual_data) + residual_data + ) self.output += residual_data # Add bias @@ -114,7 +114,7 @@ class TestConv2DFusionOp(OpTest): 'data_format': self.data_format, 'exhaustive_search': self.exhaustive_search, 'activation': self.activation, - 'padding_algorithm': self.padding_algorithm + 'padding_algorithm': self.padding_algorithm, } if self.split_channels is not None: self.attrs['split_channels'] = self.split_channels @@ -163,32 +163,27 @@ class TestConv2DFusionOp(OpTest): class TestWithoutResidual(TestConv2DFusionOp): - def init_residual(self): self.add_residual_data = False class TestIdentityActivation(TestConv2DFusionOp): - def init_activation(self): self.activation = 'identity' class TestIdentityActivation1(TestConv2DFusionOp): - def init_activation(self): self.activation = 'identity' self.add_residual_data = False class TestWithGroup(TestConv2DFusionOp): - def init_group(self): self.groups = 3 class TestWithDilation(TestConv2DFusionOp): - def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] @@ -205,13 +200,11 @@ class TestWithDilation(TestConv2DFusionOp): class TestCUDNNExhaustiveSearch(TestConv2DFusionOp): - def set_search_method(self): self.exhaustive_search = True class TestMultipleOutputs(TestConv2DFusionOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -228,14 +221,12 @@ class TestMultipleOutputs(TestConv2DFusionOp): class TestAsyPadding(TestConv2DFusionOp): - def init_paddings(self): self.pad = [0, 0, 1, 2] self.padding_algorithm = "EXPLICIT" class TestWithPad_AsyPadding(TestConv2DFusionOp): - def init_test_case(self): self.stride = [1, 1] self.input_size = [2, 3, 10, 10] # NCHW @@ -249,7 +240,6 @@ class TestWithPad_AsyPadding(TestConv2DFusionOp): class TestWithStride_AsyPadding(TestConv2DFusionOp): - def init_test_case(self): self.stride = [2, 2] self.input_size = [2, 3, 6, 6] # NCHW @@ -263,7 +253,6 @@ class TestWithStride_AsyPadding(TestConv2DFusionOp): class TestWith1x1_AsyPadding(TestConv2DFusionOp): - def init_test_case(self): self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW @@ -280,13 +269,11 @@ class TestWith1x1_AsyPadding(TestConv2DFusionOp): class TestWithGroup_AsyPadding(TestConv2DFusionOp): - def init_group(self): self.groups = 3 class TestWithDepthWise3x3_AsyPadding(TestConv2DFusionOp): - def init_test_case(self): self.stride = [1, 1] self.input_size = [3, 4, 10, 10] # NCHW @@ -306,7 +293,6 @@ class TestWithDepthWise3x3_AsyPadding(TestConv2DFusionOp): class TestWithDepthWise5x5_AsyPadding(TestConv2DFusionOp): - def init_test_case(self): self.stride = [1, 1] self.input_size = [2, 4, 10, 10] # NCHW @@ -323,7 +309,6 @@ class TestWithDepthWise5x5_AsyPadding(TestConv2DFusionOp): class TestWithDepthWise7x7_AsyPadding(TestConv2DFusionOp): - def init_test_case(self): self.stride = [2, 2] self.input_size = [2, 8, 10, 10] # NCHW @@ -340,7 +325,6 @@ class TestWithDepthWise7x7_AsyPadding(TestConv2DFusionOp): class TestWithDilation_AsyPadding(TestConv2DFusionOp): - def init_test_case(self): self.stride = [1, 1] self.input_size = [2, 3, 10, 10] # NCHW @@ -360,7 +344,6 @@ class TestWithDilation_AsyPadding(TestConv2DFusionOp): class TestWithInput1x1Filter1x1_AsyPadding(TestConv2DFusionOp): - def init_test_case(self): self.stride = [1, 1] self.input_size = [2, 3, 1, 1] # NCHW diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_layer.py b/python/paddle/fluid/tests/unittests/test_conv2d_layer.py index 8782ee84a26a059acf159d4af659d0401a7e877a..22e6f0a56342c20e6a7fa0081cd3ae9b7d79a2dd 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_layer.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_layer.py @@ -27,22 +27,23 @@ def _reverse_repeat_list(t, n): class Conv2DTestCase(unittest.TestCase): - - def __init__(self, - methodName='runTest', - batch_size=4, - spartial_shape=(16, 16), - num_channels=6, - num_filters=8, - filter_size=3, - padding=0, - padding_mode='zeros', - stride=1, - dilation=1, - groups=1, - no_bias=False, - data_format="NCHW", - dtype="float32"): + def __init__( + self, + methodName='runTest', + batch_size=4, + spartial_shape=(16, 16), + num_channels=6, + num_filters=8, + filter_size=3, + padding=0, + padding_mode='zeros', + stride=1, + dilation=1, + groups=1, + no_bias=False, + data_format="NCHW", + dtype="float32", + ): super(Conv2DTestCase, self).__init__(methodName) self.batch_size = batch_size self.num_channels = num_channels @@ -53,9 +54,11 @@ class Conv2DTestCase(unittest.TestCase): self.padding = padding if padding_mode in {'reflect', 'replicate', 'circular'}: _paired_padding = fluid.layers.utils.convert_to_list( - padding, 2, 'padding') + padding, 2, 'padding' + ) self._reversed_padding_repeated_twice = _reverse_repeat_list( - _paired_padding, 2) + _paired_padding, 2 + ) self.padding_mode = padding_mode self.stride = stride self.dilation = dilation @@ -67,24 +70,31 @@ class Conv2DTestCase(unittest.TestCase): def setUp(self): self.channel_last = self.data_format == "NHWC" if self.channel_last: - input_shape = (self.batch_size, ) + self.spartial_shape + ( - self.num_channels, ) + input_shape = ( + (self.batch_size,) + self.spartial_shape + (self.num_channels,) + ) else: - input_shape = (self.batch_size, - self.num_channels) + self.spartial_shape + input_shape = ( + self.batch_size, + self.num_channels, + ) + self.spartial_shape self.input = np.random.randn(*input_shape).astype(self.dtype) if isinstance(self.filter_size, int): filter_size = [self.filter_size] * 2 else: filter_size = self.filter_size - self.weight_shape = weight_shape = (self.num_filters, self.num_channels - // self.groups) + tuple(filter_size) - self.weight = np.random.uniform(-1, 1, - size=weight_shape).astype(self.dtype) + self.weight_shape = weight_shape = ( + self.num_filters, + self.num_channels // self.groups, + ) + tuple(filter_size) + self.weight = np.random.uniform(-1, 1, size=weight_shape).astype( + self.dtype + ) if not self.no_bias: self.bias = np.random.uniform( - -1, 1, size=(self.num_filters, )).astype(self.dtype) + -1, 1, size=(self.num_filters,) + ).astype(self.dtype) else: self.bias = None @@ -93,8 +103,11 @@ class Conv2DTestCase(unittest.TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - input_shape = (-1, -1, -1,self.num_channels) \ - if self.channel_last else (-1, self.num_channels, -1, -1) + input_shape = ( + (-1, -1, -1, self.num_channels) + if self.channel_last + else (-1, self.num_channels, -1, -1) + ) x_var = fluid.data("input", input_shape, dtype=self.dtype) weight_attr = I.NumpyArrayInitializer(self.weight) if self.bias is None: @@ -102,29 +115,33 @@ class Conv2DTestCase(unittest.TestCase): else: bias_attr = I.NumpyArrayInitializer(self.bias) if self.padding_mode != 'zeros': - x_var = F.pad(x_var, - self._reversed_padding_repeated_twice, - mode=self.padding_mode, - data_format=self.data_format) + x_var = F.pad( + x_var, + self._reversed_padding_repeated_twice, + mode=self.padding_mode, + data_format=self.data_format, + ) padding = 0 else: padding = self.padding - y_var = fluid.layers.conv2d(x_var, - self.num_filters, - self.filter_size, - padding=padding, - stride=self.stride, - dilation=self.dilation, - groups=self.groups, - param_attr=weight_attr, - bias_attr=bias_attr, - data_format=self.data_format) + y_var = fluid.layers.conv2d( + x_var, + self.num_filters, + self.filter_size, + padding=padding, + stride=self.stride, + dilation=self.dilation, + groups=self.groups, + param_attr=weight_attr, + bias_attr=bias_attr, + data_format=self.data_format, + ) feed_dict = {"input": self.input} exe = fluid.Executor(place) exe.run(start) - y_np, = exe.run(main, feed=feed_dict, fetch_list=[y_var]) + (y_np,) = exe.run(main, feed=feed_dict, fetch_list=[y_var]) return y_np def functional(self, place): @@ -132,52 +149,62 @@ class Conv2DTestCase(unittest.TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - input_shape = (-1, -1, -1,self.num_channels) \ - if self.channel_last else (-1, self.num_channels, -1, -1) + input_shape = ( + (-1, -1, -1, self.num_channels) + if self.channel_last + else (-1, self.num_channels, -1, -1) + ) x_var = fluid.data("input", input_shape, dtype=self.dtype) - w_var = fluid.data("weight", - self.weight_shape, - dtype=self.dtype) - b_var = fluid.data("bias", (self.num_filters, ), - dtype=self.dtype) + w_var = fluid.data( + "weight", self.weight_shape, dtype=self.dtype + ) + b_var = fluid.data( + "bias", (self.num_filters,), dtype=self.dtype + ) if self.padding_mode != 'zeros': - x_var = F.pad(x_var, - self._reversed_padding_repeated_twice, - mode=self.padding_mode, - data_format=self.data_format) + x_var = F.pad( + x_var, + self._reversed_padding_repeated_twice, + mode=self.padding_mode, + data_format=self.data_format, + ) padding = 0 else: padding = self.padding - y_var = F.conv2d(x_var, - w_var, - b_var if not self.no_bias else None, - padding=padding, - stride=self.stride, - dilation=self.dilation, - groups=self.groups, - data_format=self.data_format) + y_var = F.conv2d( + x_var, + w_var, + b_var if not self.no_bias else None, + padding=padding, + stride=self.stride, + dilation=self.dilation, + groups=self.groups, + data_format=self.data_format, + ) feed_dict = {"input": self.input, "weight": self.weight} if self.bias is not None: feed_dict["bias"] = self.bias exe = fluid.Executor(place) exe.run(start) - y_np, = exe.run(main, feed=feed_dict, fetch_list=[y_var]) + (y_np,) = exe.run(main, feed=feed_dict, fetch_list=[y_var]) return y_np def paddle_nn_layer(self): x_var = paddle.to_tensor(self.input) x_var.stop_gradient = False - conv = nn.Conv2D(self.num_channels, - self.num_filters, - self.filter_size, - padding=self.padding, - padding_mode=self.padding_mode, - stride=self.stride, - dilation=self.dilation, - groups=self.groups, - data_format=self.data_format) + conv = nn.Conv2D( + self.num_channels, + self.num_filters, + self.filter_size, + padding=self.padding, + padding_mode=self.padding_mode, + stride=self.stride, + dilation=self.dilation, + groups=self.groups, + data_format=self.data_format, + ) conv.weight.set_value(self.weight) if not self.no_bias: conv.bias.set_value(self.bias) @@ -210,7 +237,6 @@ class Conv2DTestCase(unittest.TestCase): class Conv2DErrorTestCase(Conv2DTestCase): - def runTest(self): place = fluid.CPUPlace() with dg.guard(place): @@ -221,63 +247,86 @@ class Conv2DErrorTestCase(Conv2DTestCase): def add_cases(suite): suite.addTest(Conv2DTestCase(methodName='runTest')) suite.addTest( - Conv2DTestCase(methodName='runTest', stride=[1, 2], dilation=2)) + Conv2DTestCase(methodName='runTest', stride=[1, 2], dilation=2) + ) suite.addTest( - Conv2DTestCase(methodName='runTest', stride=2, dilation=(2, 1))) + Conv2DTestCase(methodName='runTest', stride=2, dilation=(2, 1)) + ) suite.addTest( - Conv2DTestCase(methodName='runTest', padding="same", no_bias=True)) + Conv2DTestCase(methodName='runTest', padding="same", no_bias=True) + ) suite.addTest( - Conv2DTestCase(methodName='runTest', - filter_size=(3, 3), - padding='valid')) + Conv2DTestCase( + methodName='runTest', filter_size=(3, 3), padding='valid' + ) + ) suite.addTest(Conv2DTestCase(methodName='runTest', padding=(2, 3))) suite.addTest(Conv2DTestCase(methodName='runTest', padding=[1, 2, 2, 1])) suite.addTest( - Conv2DTestCase(methodName='runTest', - padding=[[0, 0], [0, 0], [1, 2], [2, 1]])) + Conv2DTestCase( + methodName='runTest', padding=[[0, 0], [0, 0], [1, 2], [2, 1]] + ) + ) suite.addTest(Conv2DTestCase(methodName='runTest', data_format="NHWC")) suite.addTest( - Conv2DTestCase(methodName='runTest', - data_format="NHWC", - padding=[[0, 0], [1, 1], [2, 2], [0, 0]])) + Conv2DTestCase( + methodName='runTest', + data_format="NHWC", + padding=[[0, 0], [1, 1], [2, 2], [0, 0]], + ) + ) suite.addTest( - Conv2DTestCase(methodName='runTest', groups=2, padding="valid")) + Conv2DTestCase(methodName='runTest', groups=2, padding="valid") + ) suite.addTest( - Conv2DTestCase(methodName='runTest', - num_filters=6, - num_channels=3, - groups=3, - padding="valid")) + Conv2DTestCase( + methodName='runTest', + num_filters=6, + num_channels=3, + groups=3, + padding="valid", + ) + ) suite.addTest( - Conv2DTestCase(methodName='runTest', - filter_size=(3, 3), - padding=1, - padding_mode='reflect')) + Conv2DTestCase( + methodName='runTest', + filter_size=(3, 3), + padding=1, + padding_mode='reflect', + ) + ) suite.addTest( - Conv2DTestCase(methodName='runTest', - filter_size=(3, 3), - padding=1, - padding_mode='replicate')) + Conv2DTestCase( + methodName='runTest', + filter_size=(3, 3), + padding=1, + padding_mode='replicate', + ) + ) suite.addTest( - Conv2DTestCase(methodName='runTest', - filter_size=(3, 3), - padding=1, - padding_mode='circular')) + Conv2DTestCase( + methodName='runTest', + filter_size=(3, 3), + padding=1, + padding_mode='circular', + ) + ) def add_error_cases(suite): suite.addTest( - Conv2DErrorTestCase(methodName='runTest', num_channels=5, groups=2)) + Conv2DErrorTestCase(methodName='runTest', num_channels=5, groups=2) + ) suite.addTest( - Conv2DErrorTestCase(methodName='runTest', - num_channels=5, - groups=2, - stride=0)) + Conv2DErrorTestCase( + methodName='runTest', num_channels=5, groups=2, stride=0 + ) + ) suite.addTest( - Conv2DErrorTestCase(methodName='runTest', - num_channels=5, - groups=2, - padding=[-1, -1])) + Conv2DErrorTestCase( + methodName='runTest', num_channels=5, groups=2, padding=[-1, -1] + ) + ) def load_tests(loader, standard_tests, pattern): diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_op.py index 056a7c52efb9f8687c924ba98c7506bac1d2ca6e..34a34f062efb8b7684a2e6c5da87729e43038b1d 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_op.py @@ -19,29 +19,36 @@ import paddle import paddle import paddle.fluid.core as core import paddle.fluid as fluid -from paddle.fluid.tests.unittests.op_test import (OpTest, - convert_float_to_uint16, - get_numeric_gradient) +from paddle.fluid.tests.unittests.op_test import ( + OpTest, + convert_float_to_uint16, + get_numeric_gradient, +) from paddle.fluid.tests.unittests.testsuite import create_op from paddle.fluid import Program, program_guard -def conv2d_forward_naive(input, - filter, - group, - conv_param, - padding_algorithm='EXPLICIT', - data_format='NCHW'): +def conv2d_forward_naive( + input, + filter, + group, + conv_param, + padding_algorithm='EXPLICIT', + data_format='NCHW', +): if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]: - raise ValueError("Unknown Attr(padding_algorithm): '%s'. " - "It can only be 'SAME' or 'VALID'." % - str(padding_algorithm)) + raise ValueError( + "Unknown Attr(padding_algorithm): '%s'. " + "It can only be 'SAME' or 'VALID'." % str(padding_algorithm) + ) if data_format not in ["NCHW", "NHWC"]: - raise ValueError("Unknown Attr(data_format): '%s' ." - "It can only be 'NCHW' or 'NHWC'." % str(data_format)) + raise ValueError( + "Unknown Attr(data_format): '%s' ." + "It can only be 'NCHW' or 'NHWC'." % str(data_format) + ) - channel_last = (data_format == "NHWC") + channel_last = data_format == "NHWC" if channel_last: input = np.transpose(input, [0, 3, 1, 2]) @@ -54,17 +61,22 @@ def conv2d_forward_naive(input, sub_out_c = out_c // group sub_f_n = f_n // group - stride, pad, dilation = conv_param['stride'], conv_param['pad'], conv_param[ - 'dilation'] + stride, pad, dilation = ( + conv_param['stride'], + conv_param['pad'], + conv_param['dilation'], + ) # update pad and dilation def _get_padding_with_SAME(input_shape, pool_size, pool_stride): padding = [] - for input_size, filter_size, stride_size in zip(input_shape, pool_size, - pool_stride): + for input_size, filter_size, stride_size in zip( + input_shape, pool_size, pool_stride + ): out_size = int((input_size + stride_size - 1) / stride_size) pad_sum = np.max( - ((out_size - 1) * stride_size + filter_size - input_size, 0)) + ((out_size - 1) * stride_size + filter_size - input_size, 0) + ) pad_0 = int(pad_sum / 2) pad_1 = int(pad_sum - pad_0) padding.append(pad_0) @@ -84,39 +96,52 @@ def conv2d_forward_naive(input, if len(pad) == 4: pad_h_0, pad_h_1 = pad[0], pad[1] pad_w_0, pad_w_1 = pad[2], pad[3] - out_h = 1 + (in_h + pad_h_0 + pad_h_1 - (dilation[0] * - (f_h - 1) + 1)) // stride[0] - out_w = 1 + (in_w + pad_w_0 + pad_w_1 - (dilation[1] * - (f_w - 1) + 1)) // stride[1] + out_h = ( + 1 + + (in_h + pad_h_0 + pad_h_1 - (dilation[0] * (f_h - 1) + 1)) + // stride[0] + ) + out_w = ( + 1 + + (in_w + pad_w_0 + pad_w_1 - (dilation[1] * (f_w - 1) + 1)) + // stride[1] + ) out = np.zeros((out_n, out_c, out_h, out_w)) - d_bolck_h = (dilation[0] * (f_h - 1) + 1) - d_bolck_w = (dilation[1] * (f_w - 1) + 1) + d_bolck_h = dilation[0] * (f_h - 1) + 1 + d_bolck_w = dilation[1] * (f_w - 1) + 1 - input_pad = np.pad(input, - ((0, 0), (0, 0), (pad_h_0, pad_h_1), (pad_w_0, pad_w_1)), - mode='constant', - constant_values=0) + input_pad = np.pad( + input, + ((0, 0), (0, 0), (pad_h_0, pad_h_1), (pad_w_0, pad_w_1)), + mode='constant', + constant_values=0, + ) filter_dilation = np.zeros((f_n, f_c, d_bolck_h, d_bolck_w)) - filter_dilation[:, :, 0:d_bolck_h:dilation[0], - 0:d_bolck_w:dilation[1]] = filter + filter_dilation[ + :, :, 0 : d_bolck_h : dilation[0], 0 : d_bolck_w : dilation[1] + ] = filter for i in range(out_h): for j in range(out_w): for g in range(group): - input_pad_masked = \ - input_pad[:, g * f_c:(g + 1) * f_c, - i * stride[0]:i * stride[0] + d_bolck_h, - j * stride[1]:j * stride[1] + d_bolck_w] - - f_sub = filter_dilation[g * sub_f_n:(g + 1) * sub_f_n, :, :, :] + input_pad_masked = input_pad[ + :, + g * f_c : (g + 1) * f_c, + i * stride[0] : i * stride[0] + d_bolck_h, + j * stride[1] : j * stride[1] + d_bolck_w, + ] + + f_sub = filter_dilation[ + g * sub_f_n : (g + 1) * sub_f_n, :, :, : + ] # sub_f_n == sub_out_c for k in range(sub_out_c): # Multiplication of Corresponding Elements, then sum all - out[:, g * sub_out_c + k, i, j] = \ - np.sum(input_pad_masked * f_sub[k, :, :, :], - axis=(1, 2, 3)) + out[:, g * sub_out_c + k, i, j] = np.sum( + input_pad_masked * f_sub[k, :, :, :], axis=(1, 2, 3) + ) if channel_last: out = np.transpose(out, [0, 2, 3, 1]) @@ -125,15 +150,15 @@ def conv2d_forward_naive(input, def create_test_cudnn_class(parent): - - @unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) class TestCUDNNCase(parent): - def init_kernel_type(self): self.use_cudnn = True - self.dtype = np.float32 if core.is_compiled_with_rocm( - ) else np.float64 + self.dtype = ( + np.float32 if core.is_compiled_with_rocm() else np.float64 + ) cls_name = "{0}_{1}".format(parent.__name__, "CUDNN") TestCUDNNCase.__name__ = cls_name @@ -141,11 +166,10 @@ def create_test_cudnn_class(parent): def create_test_cudnn_fp16_class(parent, grad_check=True): - - @unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) class TestConv2DCUDNNFp16(parent): - def init_kernel_type(self): self.use_cudnn = True self.dtype = np.float16 @@ -159,16 +183,16 @@ def create_test_cudnn_fp16_class(parent, grad_check=True): def test_check_grad_no_filter(self): place = core.CUDAPlace(0) if core.is_float16_supported(place) and grad_check: - self.check_grad_with_place(place, ['Input'], - 'Output', - no_grad_set=set(['Filter'])) + self.check_grad_with_place( + place, ['Input'], 'Output', no_grad_set=set(['Filter']) + ) def test_check_grad_no_input(self): place = core.CUDAPlace(0) if core.is_float16_supported(place) and grad_check: - self.check_grad_with_place(place, ['Filter'], - 'Output', - no_grad_set=set(['Input'])) + self.check_grad_with_place( + place, ['Filter'], 'Output', no_grad_set=set(['Input']) + ) cls_name = "{0}_{1}".format(parent.__name__, "CUDNNFp16") TestConv2DCUDNNFp16.__name__ = cls_name @@ -176,20 +200,21 @@ def create_test_cudnn_fp16_class(parent, grad_check=True): def create_test_cudnn_bf16_class(parent): - @unittest.skipIf( not core.is_compiled_with_cuda() or not core.is_bfloat16_supported(core.CUDAPlace(0)), - "core is not compiled with CUDA and do not support bfloat16") + "core is not compiled with CUDA and do not support bfloat16", + ) class TestConv2DCUDNNBF16(parent): - def get_numeric_grad(self, place, check_name): scope = core.Scope() self._check_grad_helper() - op = create_op(scope, self.op_type, self.inputs, self.outputs, - self.attrs) - return get_numeric_gradient(place, scope, op, self.inputs_fp32, - check_name, ['Output']) + op = create_op( + scope, self.op_type, self.inputs, self.outputs, self.attrs + ) + return get_numeric_gradient( + place, scope, op, self.inputs_fp32, check_name, ['Output'] + ) def init_kernel_type(self): self.use_cudnn = True @@ -203,18 +228,24 @@ def create_test_cudnn_bf16_class(parent): def test_check_grad_no_filter(self): place = core.CUDAPlace(0) numeric_grads = self.get_numeric_grad(place, 'Input') - self.check_grad_with_place(place, ['Input'], - 'Output', - no_grad_set=set(['Filter']), - user_defined_grads=[numeric_grads]) + self.check_grad_with_place( + place, + ['Input'], + 'Output', + no_grad_set=set(['Filter']), + user_defined_grads=[numeric_grads], + ) def test_check_grad_no_input(self): place = core.CUDAPlace(0) numeric_grads = self.get_numeric_grad(place, 'Filter') - self.check_grad_with_place(place, ['Filter'], - 'Output', - no_grad_set=set(['Input']), - user_defined_grads=[numeric_grads]) + self.check_grad_with_place( + place, + ['Filter'], + 'Output', + no_grad_set=set(['Input']), + user_defined_grads=[numeric_grads], + ) cls_name = "{0}_{1}".format(parent.__name__, "CUDNNBF16") TestConv2DCUDNNBF16.__name__ = cls_name @@ -222,9 +253,7 @@ def create_test_cudnn_bf16_class(parent): def create_test_channel_last_class(parent): - class TestChannelLastCase(parent): - def init_data_format(self): self.data_format = "NHWC" @@ -238,15 +267,15 @@ def create_test_channel_last_class(parent): def create_test_cudnn_channel_last_class(parent): - - @unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) class TestCudnnChannelLastCase(parent): - def init_kernel_type(self): self.use_cudnn = True - self.dtype = np.float32 if core.is_compiled_with_rocm( - ) else np.float64 + self.dtype = ( + np.float32 if core.is_compiled_with_rocm() else np.float64 + ) def init_data_format(self): self.data_format = "NHWC" @@ -261,11 +290,10 @@ def create_test_cudnn_channel_last_class(parent): def create_test_cudnn_channel_last_fp16_class(parent, grad_check=True): - - @unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) class TestCudnnChannelLastFp16(parent): - def init_kernel_type(self): self.use_cudnn = True self.dtype = np.float16 @@ -279,16 +307,16 @@ def create_test_cudnn_channel_last_fp16_class(parent, grad_check=True): def test_check_grad_no_filter(self): place = core.CUDAPlace(0) if core.is_float16_supported(place) and grad_check: - self.check_grad_with_place(place, ['Input'], - 'Output', - no_grad_set=set(['Filter'])) + self.check_grad_with_place( + place, ['Input'], 'Output', no_grad_set=set(['Filter']) + ) def test_check_grad_no_input(self): place = core.CUDAPlace(0) if core.is_float16_supported(place) and grad_check: - self.check_grad_with_place(place, ['Filter'], - 'Output', - no_grad_set=set(['Input'])) + self.check_grad_with_place( + place, ['Filter'], 'Output', no_grad_set=set(['Input']) + ) def init_data_format(self): self.data_format = "NHWC" @@ -303,9 +331,7 @@ def create_test_cudnn_channel_last_fp16_class(parent, grad_check=True): def create_test_padding_SAME_class(parent): - class TestPaddingSMAECase(parent): - def init_paddings(self): self.pad = [0, 0] self.padding_algorithm = "SAME" @@ -316,9 +342,7 @@ def create_test_padding_SAME_class(parent): def create_test_padding_VALID_class(parent): - class TestPaddingVALIDCase(parent): - def init_paddings(self): self.pad = [1, 1] self.padding_algorithm = "VALID" @@ -329,15 +353,15 @@ def create_test_padding_VALID_class(parent): def create_test_cudnn_padding_SAME_class(parent): - - @unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) class TestCUDNNPaddingSMAECase(parent): - def init_kernel_type(self): self.use_cudnn = True - self.dtype = np.float32 if core.is_compiled_with_rocm( - ) else np.float64 + self.dtype = ( + np.float32 if core.is_compiled_with_rocm() else np.float64 + ) def init_paddings(self): self.pad = [1, 1] @@ -349,15 +373,15 @@ def create_test_cudnn_padding_SAME_class(parent): def create_test_cudnn_padding_VALID_class(parent): - - @unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) class TestCUDNNPaddingVALIDCase(parent): - def init_kernel_type(self): self.use_cudnn = True - self.dtype = np.float32 if core.is_compiled_with_rocm( - ) else np.float64 + self.dtype = ( + np.float32 if core.is_compiled_with_rocm() else np.float64 + ) def init_paddings(self): self.pad = [1, 1] @@ -369,7 +393,6 @@ def create_test_cudnn_padding_VALID_class(parent): class TestConv2DOp(OpTest): - def setUp(self): self.op_type = "conv2d" self.use_cudnn = False @@ -387,17 +410,19 @@ class TestConv2DOp(OpTest): conv2d_param = { 'stride': self.stride, 'pad': self.pad, - 'dilation': self.dilations + 'dilation': self.dilations, } if self.is_bfloat16_op(): input = np.random.random(self.input_size).astype(np.float32) - filter = np.random.uniform(-1, 1, - self.filter_size).astype(np.float32) + filter = np.random.uniform(-1, 1, self.filter_size).astype( + np.float32 + ) else: input = np.random.random(self.input_size).astype(self.dtype) - filter = np.random.uniform(-1, 1, - self.filter_size).astype(self.dtype) + filter = np.random.uniform(-1, 1, self.filter_size).astype( + self.dtype + ) if not self.has_cuda(): self.fuse_relu_before_depthwise_conv = False @@ -409,24 +434,25 @@ class TestConv2DOp(OpTest): else: input2 = input - output, _, _, _, _ = conv2d_forward_naive(input2, filter, self.groups, - conv2d_param) + output, _, _, _, _ = conv2d_forward_naive( + input2, filter, self.groups, conv2d_param + ) if self.is_bfloat16_op(): output = output.astype(np.float32) self.inputs = { 'Input': convert_float_to_uint16(input), - 'Filter': convert_float_to_uint16(filter) + 'Filter': convert_float_to_uint16(filter), } self.inputs_fp32 = { 'Input': OpTest.np_dtype_to_fluid_dtype(input), - 'Filter': OpTest.np_dtype_to_fluid_dtype(filter) + 'Filter': OpTest.np_dtype_to_fluid_dtype(filter), } else: output = output.astype(self.dtype) self.inputs = { 'Input': OpTest.np_dtype_to_fluid_dtype(input), - 'Filter': OpTest.np_dtype_to_fluid_dtype(filter) + 'Filter': OpTest.np_dtype_to_fluid_dtype(filter), } self.attrs = { @@ -437,56 +463,71 @@ class TestConv2DOp(OpTest): 'use_cudnn': self.use_cudnn, 'use_mkldnn': self.use_mkldnn, 'data_format': self.data_format, - 'fuse_relu_before_depthwise_conv': - self.fuse_relu_before_depthwise_conv, - 'exhaustive_search': self.exhaustive_search + 'fuse_relu_before_depthwise_conv': self.fuse_relu_before_depthwise_conv, + 'exhaustive_search': self.exhaustive_search, } self.outputs = {'Output': output} def has_cuda(self): - return core.is_compiled_with_cuda() and (self.use_cudnn - or self.use_cuda) + return core.is_compiled_with_cuda() and ( + self.use_cudnn or self.use_cuda + ) def test_check_output(self): place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() # TODO(wangzhongpu): support mkldnn op in dygraph mode - self.check_output_with_place(place, - atol=1e-5, - check_dygraph=(self.use_mkldnn == False)) + self.check_output_with_place( + place, atol=1e-5, check_dygraph=(self.use_mkldnn == False) + ) def test_check_grad(self): - if self.dtype == np.float16 or (hasattr(self, "no_need_check_grad") - and self.no_need_check_grad == True): + if self.dtype == np.float16 or ( + hasattr(self, "no_need_check_grad") + and self.no_need_check_grad == True + ): return place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() # TODO(wangzhongpu): support mkldnn op in dygraph mode - self.check_grad_with_place(place, {'Input', 'Filter'}, - 'Output', - max_relative_error=0.02, - check_dygraph=(self.use_mkldnn == False)) + self.check_grad_with_place( + place, + {'Input', 'Filter'}, + 'Output', + max_relative_error=0.02, + check_dygraph=(self.use_mkldnn == False), + ) def test_check_grad_no_filter(self): - if self.dtype == np.float16 or (hasattr(self, "no_need_check_grad") - and self.no_need_check_grad == True): + if self.dtype == np.float16 or ( + hasattr(self, "no_need_check_grad") + and self.no_need_check_grad == True + ): return place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() # TODO(wangzhongpu): support mkldnn op in dygraph mode - self.check_grad_with_place(place, ['Input'], - 'Output', - max_relative_error=0.02, - no_grad_set=set(['Filter']), - check_dygraph=(self.use_mkldnn == False)) + self.check_grad_with_place( + place, + ['Input'], + 'Output', + max_relative_error=0.02, + no_grad_set=set(['Filter']), + check_dygraph=(self.use_mkldnn == False), + ) def test_check_grad_no_input(self): - if self.dtype == np.float16 or (hasattr(self, "no_need_check_grad") - and self.no_need_check_grad == True): + if self.dtype == np.float16 or ( + hasattr(self, "no_need_check_grad") + and self.no_need_check_grad == True + ): return place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() # TODO(wangzhongpu): support mkldnn op in dygraph mode - self.check_grad_with_place(place, ['Filter'], - 'Output', - no_grad_set=set(['Input']), - check_dygraph=(self.use_mkldnn == False)) + self.check_grad_with_place( + place, + ['Filter'], + 'Output', + no_grad_set=set(['Input']), + check_dygraph=(self.use_mkldnn == False), + ) def init_test_case(self): self.pad = [0, 0] @@ -510,7 +551,6 @@ class TestConv2DOp(OpTest): class TestWithPad(TestConv2DOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -521,7 +561,6 @@ class TestWithPad(TestConv2DOp): class TestWithStride(TestConv2DOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [2, 2] @@ -532,7 +571,6 @@ class TestWithStride(TestConv2DOp): class TestWithGroup(TestConv2DOp): - def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] @@ -544,7 +582,6 @@ class TestWithGroup(TestConv2DOp): class TestWith1x1(TestConv2DOp): - def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] @@ -558,7 +595,6 @@ class TestWith1x1(TestConv2DOp): class TestWithDepthWise3x3(TestConv2DOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -575,7 +611,6 @@ class TestWithDepthWise3x3(TestConv2DOp): class TestWithDepthWise5x5(TestConv2DOp): - def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] @@ -589,7 +624,6 @@ class TestWithDepthWise5x5(TestConv2DOp): class TestWithDepthWise7x7(TestConv2DOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [2, 2] @@ -603,7 +637,6 @@ class TestWithDepthWise7x7(TestConv2DOp): class TestWithDilation(TestConv2DOp): - def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] @@ -620,7 +653,6 @@ class TestWithDilation(TestConv2DOp): class TestWithInput1x1Filter1x1(TestConv2DOp): - def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] @@ -642,7 +674,7 @@ create_test_cudnn_class(TestWithGroup) create_test_cudnn_class(TestWith1x1) create_test_cudnn_class(TestWithInput1x1Filter1x1) -#----------------Conv2DCUDNN fp16---------------- +# ----------------Conv2DCUDNN fp16---------------- create_test_cudnn_fp16_class(TestConv2DOp, grad_check=False) create_test_cudnn_fp16_class(TestWithPad, grad_check=False) @@ -651,7 +683,7 @@ create_test_cudnn_fp16_class(TestWithGroup, grad_check=False) create_test_cudnn_fp16_class(TestWith1x1, grad_check=False) create_test_cudnn_fp16_class(TestWithInput1x1Filter1x1, grad_check=False) -#----------------Conv2DCUDNN bf16---------------- +# ----------------Conv2DCUDNN bf16---------------- create_test_cudnn_bf16_class(TestConv2DOp) create_test_cudnn_bf16_class(TestWithPad) @@ -662,7 +694,6 @@ create_test_cudnn_bf16_class(TestWithInput1x1Filter1x1) class TestCUDNNExhaustiveSearch(TestConv2DOp): - def init_kernel_type(self): self.use_cudnn = True self.exhaustive_search = True @@ -670,14 +701,14 @@ class TestCUDNNExhaustiveSearch(TestConv2DOp): class TestConv2DOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): def test_Variable(): # the input of conv2d must be Variable. - x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + ) fluid.layers.conv2d(x1, 1, 1) self.assertRaises(TypeError, test_Variable) @@ -685,9 +716,9 @@ class TestConv2DOpError(unittest.TestCase): def test_dtype(): # the input dtype of conv2d must be float16 or float32 or float64 # float16 only can be set on GPU place - x2 = fluid.layers.data(name='x2', - shape=[3, 4, 5, 6], - dtype="int32") + x2 = fluid.layers.data( + name='x2', shape=[3, 4, 5, 6], dtype="int32" + ) fluid.layers.conv2d(x2, 1, 1) self.assertRaises(TypeError, test_dtype) @@ -703,7 +734,6 @@ class TestConv2DOpError(unittest.TestCase): class TestConv2DOp_v2(OpTest): - def setUp(self): self.op_type = "conv2d" self.use_cudnn = False @@ -723,7 +753,7 @@ class TestConv2DOp_v2(OpTest): conv2d_param = { 'stride': self.stride, 'pad': self.pad, - 'dilation': self.dilations + 'dilation': self.dilations, } input = np.random.random(self.input_size).astype(self.dtype) @@ -737,15 +767,19 @@ class TestConv2DOp_v2(OpTest): else: input2 = input filter = np.random.uniform(-1, 1, self.filter_size).astype(self.dtype) - output, _, _, _, _ = conv2d_forward_naive(input2, filter, self.groups, - conv2d_param, - self.padding_algorithm, - self.data_format) + output, _, _, _, _ = conv2d_forward_naive( + input2, + filter, + self.groups, + conv2d_param, + self.padding_algorithm, + self.data_format, + ) output = output.astype(self.dtype) self.inputs = { 'Input': OpTest.np_dtype_to_fluid_dtype(input), - 'Filter': OpTest.np_dtype_to_fluid_dtype(filter) + 'Filter': OpTest.np_dtype_to_fluid_dtype(filter), } self.attrs = { 'strides': self.stride, @@ -756,53 +790,62 @@ class TestConv2DOp_v2(OpTest): 'use_cudnn': self.use_cudnn, 'use_mkldnn': self.use_mkldnn, 'data_format': self.data_format, - 'fuse_relu_before_depthwise_conv': - self.fuse_relu_before_depthwise_conv, - 'exhaustive_search': self.exhaustive_search + 'fuse_relu_before_depthwise_conv': self.fuse_relu_before_depthwise_conv, + 'exhaustive_search': self.exhaustive_search, } self.outputs = {'Output': output} def has_cuda(self): - return core.is_compiled_with_cuda() and (self.use_cudnn - or self.use_cuda) + return core.is_compiled_with_cuda() and ( + self.use_cudnn or self.use_cuda + ) def test_check_output(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() - self.check_output_with_place(place, - atol=1e-5, - check_dygraph=(self.use_mkldnn == False)) + self.check_output_with_place( + place, atol=1e-5, check_dygraph=(self.use_mkldnn == False) + ) def test_check_grad(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode if self.dtype == np.float16: return place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() - self.check_grad_with_place(place, {'Input', 'Filter'}, - 'Output', - max_relative_error=0.02, - check_dygraph=(self.use_mkldnn == False)) + self.check_grad_with_place( + place, + {'Input', 'Filter'}, + 'Output', + max_relative_error=0.02, + check_dygraph=(self.use_mkldnn == False), + ) def test_check_grad_no_filter(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode if self.dtype == np.float16: return place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() - self.check_grad_with_place(place, ['Input'], - 'Output', - max_relative_error=0.02, - no_grad_set=set(['Filter']), - check_dygraph=(self.use_mkldnn == False)) + self.check_grad_with_place( + place, + ['Input'], + 'Output', + max_relative_error=0.02, + no_grad_set=set(['Filter']), + check_dygraph=(self.use_mkldnn == False), + ) def test_check_grad_no_input(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode if self.dtype == np.float16: return place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() - self.check_grad_with_place(place, ['Filter'], - 'Output', - no_grad_set=set(['Input']), - check_dygraph=(self.use_mkldnn == False)) + self.check_grad_with_place( + place, + ['Filter'], + 'Output', + no_grad_set=set(['Input']), + check_dygraph=(self.use_mkldnn == False), + ) def init_test_case(self): self.pad = [0, 0] @@ -833,14 +876,12 @@ class TestConv2DOp_v2(OpTest): class TestConv2DOp_AsyPadding(TestConv2DOp_v2): - def init_paddings(self): self.pad = [0, 0, 1, 2] self.padding_algorithm = "EXPLICIT" class TestWithPad_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW @@ -854,7 +895,6 @@ class TestWithPad_AsyPadding(TestConv2DOp_v2): class TestWithStride_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.stride = [2, 2] self.input_size = [2, 3, 6, 6] # NCHW @@ -868,7 +908,6 @@ class TestWithStride_AsyPadding(TestConv2DOp_v2): class TestWithGroup_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.pad = [0, 0] self.stride = [1, 2] @@ -880,7 +919,6 @@ class TestWithGroup_AsyPadding(TestConv2DOp_v2): class TestWith1x1_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW @@ -897,7 +935,6 @@ class TestWith1x1_AsyPadding(TestConv2DOp_v2): class TestWithDepthWise3x3_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.stride = [1, 1] self.input_size = [3, 4, 10, 10] # NCHW @@ -917,7 +954,6 @@ class TestWithDepthWise3x3_AsyPadding(TestConv2DOp_v2): class TestWithDepthWise5x5_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.stride = [1, 1] self.input_size = [2, 4, 10, 10] # NCHW @@ -934,7 +970,6 @@ class TestWithDepthWise5x5_AsyPadding(TestConv2DOp_v2): class TestWithDepthWise7x7_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.stride = [2, 2] self.input_size = [2, 8, 10, 10] # NCHW @@ -951,7 +986,6 @@ class TestWithDepthWise7x7_AsyPadding(TestConv2DOp_v2): class TestWithDilation_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.stride = [1, 1] self.input_size = [2, 3, 10, 10] # NCHW @@ -971,7 +1005,6 @@ class TestWithDilation_AsyPadding(TestConv2DOp_v2): class TestWithInput1x1Filter1x1_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.stride = [1, 1] self.input_size = [40, 3, 1, 1] # NCHW @@ -994,7 +1027,7 @@ create_test_cudnn_class(TestWithGroup_AsyPadding) create_test_cudnn_class(TestWith1x1_AsyPadding) create_test_cudnn_class(TestWithInput1x1Filter1x1_AsyPadding) -#---------- test SAME VALID ----------- +# ---------- test SAME VALID ----------- create_test_padding_SAME_class(TestConv2DOp_AsyPadding) create_test_padding_SAME_class(TestWithPad_AsyPadding) create_test_padding_SAME_class(TestWithStride_AsyPadding) @@ -1032,16 +1065,21 @@ create_test_cudnn_channel_last_class(TestWithStride_AsyPadding) create_test_cudnn_channel_last_class(TestWithGroup_AsyPadding) create_test_cudnn_channel_last_class(TestWithDilation_AsyPadding) -create_test_cudnn_channel_last_fp16_class(TestConv2DOp_AsyPadding, - grad_check=False) -create_test_cudnn_channel_last_fp16_class(TestWithPad_AsyPadding, - grad_check=False) -create_test_cudnn_channel_last_fp16_class(TestWithStride_AsyPadding, - grad_check=False) -create_test_cudnn_channel_last_fp16_class(TestWithGroup_AsyPadding, - grad_check=False) -create_test_cudnn_channel_last_fp16_class(TestWithDilation_AsyPadding, - grad_check=False) +create_test_cudnn_channel_last_fp16_class( + TestConv2DOp_AsyPadding, grad_check=False +) +create_test_cudnn_channel_last_fp16_class( + TestWithPad_AsyPadding, grad_check=False +) +create_test_cudnn_channel_last_fp16_class( + TestWithStride_AsyPadding, grad_check=False +) +create_test_cudnn_channel_last_fp16_class( + TestWithGroup_AsyPadding, grad_check=False +) +create_test_cudnn_channel_last_fp16_class( + TestWithDilation_AsyPadding, grad_check=False +) if __name__ == '__main__': paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_op_depthwise_conv.py b/python/paddle/fluid/tests/unittests/test_conv2d_op_depthwise_conv.py index 73f9b9e6ced53f4069f305792f89fdb88ecc1bcd..d6abdc0bcf41f1ead0b7671531d6b3670a13ec46 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_op_depthwise_conv.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_op_depthwise_conv.py @@ -19,13 +19,20 @@ import paddle paddle.enable_static() import paddle.fluid.core as core -from test_conv2d_op import TestConv2DOp, TestConv2DOp_v2, create_test_padding_SAME_class, create_test_padding_VALID_class, create_test_channel_last_class, create_test_cudnn_padding_SAME_class, create_test_cudnn_channel_last_class +from test_conv2d_op import ( + TestConv2DOp, + TestConv2DOp_v2, + create_test_padding_SAME_class, + create_test_padding_VALID_class, + create_test_channel_last_class, + create_test_cudnn_padding_SAME_class, + create_test_cudnn_channel_last_class, +) -#----------------TestDepthwiseConv ----- +# ----------------TestDepthwiseConv ----- class TestDepthwiseConv(TestConv2DOp): - def init_test_case(self): self.use_cuda = True self.pad = [1, 1] @@ -39,7 +46,6 @@ class TestDepthwiseConv(TestConv2DOp): class TestDepthwiseConv2(TestConv2DOp): - def init_test_case(self): self.use_cuda = True self.pad = [1, 1] @@ -53,7 +59,6 @@ class TestDepthwiseConv2(TestConv2DOp): class TestDepthwiseConv3(TestConv2DOp): - def init_test_case(self): self.use_cuda = True self.pad = [1, 1] @@ -67,7 +72,6 @@ class TestDepthwiseConv3(TestConv2DOp): class TestDepthwiseConvWithDilation(TestConv2DOp): - def init_test_case(self): self.use_cuda = True self.pad = [1, 1] @@ -82,7 +86,6 @@ class TestDepthwiseConvWithDilation(TestConv2DOp): class TestDepthwiseConvWithDilation2(TestConv2DOp): - def init_test_case(self): self.use_cuda = True self.pad = [1, 1] @@ -97,7 +100,6 @@ class TestDepthwiseConvWithDilation2(TestConv2DOp): class TestDepthwiseConvandFuse(TestConv2DOp): - def init_test_case(self): self.fuse_relu_before_depthwise_conv = True self.use_cuda = True @@ -112,7 +114,6 @@ class TestDepthwiseConvandFuse(TestConv2DOp): class TestDepthwiseConv2andFuse(TestConv2DOp): - def init_test_case(self): self.fuse_relu_before_depthwise_conv = True self.use_cuda = True @@ -127,7 +128,6 @@ class TestDepthwiseConv2andFuse(TestConv2DOp): class TestDepthwiseConv3andFuse(TestConv2DOp): - def init_test_case(self): self.fuse_relu_before_depthwise_conv = True self.use_cuda = True @@ -142,7 +142,6 @@ class TestDepthwiseConv3andFuse(TestConv2DOp): class TestDepthwiseConvWithDilationandFuse(TestConv2DOp): - def init_test_case(self): self.fuse_relu_before_depthwise_conv = True self.use_cuda = True @@ -158,7 +157,6 @@ class TestDepthwiseConvWithDilationandFuse(TestConv2DOp): class TestDepthwiseConvWithDilation2andFuse(TestConv2DOp): - def init_test_case(self): self.fuse_relu_before_depthwise_conv = True self.use_cuda = True @@ -174,7 +172,6 @@ class TestDepthwiseConvWithDilation2andFuse(TestConv2DOp): class TestDepthwiseConv_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.use_cuda = True self.stride = [2, 2] @@ -191,7 +188,6 @@ class TestDepthwiseConv_AsyPadding(TestConv2DOp_v2): class TestDepthwiseConv2_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.use_cuda = True self.stride = [1, 1] @@ -208,7 +204,6 @@ class TestDepthwiseConv2_AsyPadding(TestConv2DOp_v2): class TestDepthwiseConv3_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.use_cuda = True self.stride = [1, 1] @@ -225,7 +220,6 @@ class TestDepthwiseConv3_AsyPadding(TestConv2DOp_v2): class TestDepthwiseConvWithDilation_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.use_cuda = True self.pad = [1, 1] @@ -244,7 +238,6 @@ class TestDepthwiseConvWithDilation_AsyPadding(TestConv2DOp_v2): class TestDepthwiseConvWithDilation2_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.use_cuda = True self.pad = [1, 1] @@ -263,7 +256,6 @@ class TestDepthwiseConvWithDilation2_AsyPadding(TestConv2DOp_v2): class TestDepthwiseConvandFuse_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.fuse_relu_before_depthwise_conv = True self.use_cuda = True @@ -282,7 +274,6 @@ class TestDepthwiseConvandFuse_AsyPadding(TestConv2DOp_v2): class TestDepthwiseConv2andFuse_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.fuse_relu_before_depthwise_conv = True self.use_cuda = True @@ -301,7 +292,6 @@ class TestDepthwiseConv2andFuse_AsyPadding(TestConv2DOp_v2): class TestDepthwiseConv3andFuse_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.fuse_relu_before_depthwise_conv = True self.use_cuda = True @@ -320,7 +310,6 @@ class TestDepthwiseConv3andFuse_AsyPadding(TestConv2DOp_v2): class TestDepthwiseConvWithDilationandFuse_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.fuse_relu_before_depthwise_conv = True self.use_cuda = True @@ -340,7 +329,6 @@ class TestDepthwiseConvWithDilationandFuse_AsyPadding(TestConv2DOp_v2): class TestDepthwiseConvWithDilation2andFuse_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.fuse_relu_before_depthwise_conv = True self.use_cuda = True @@ -382,12 +370,14 @@ create_test_channel_last_class(TestDepthwiseConvWithDilationandFuse_AsyPadding) if core.is_compiled_with_rocm(): create_test_cudnn_padding_SAME_class(TestDepthwiseConv_AsyPadding) create_test_cudnn_padding_SAME_class( - TestDepthwiseConvWithDilation_AsyPadding) + TestDepthwiseConvWithDilation_AsyPadding + ) create_test_padding_VALID_class(TestDepthwiseConv_AsyPadding) create_test_padding_VALID_class(TestDepthwiseConvWithDilation_AsyPadding) create_test_cudnn_channel_last_class(TestDepthwiseConv_AsyPadding) create_test_cudnn_channel_last_class( - TestDepthwiseConvWithDilation2_AsyPadding) + TestDepthwiseConvWithDilation2_AsyPadding + ) if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_transpose_layer.py b/python/paddle/fluid/tests/unittests/test_conv2d_transpose_layer.py index 74d50c545c658a19965225e3679f687b3b223ffd..8e6bdefc5adeb48e58780aa1913398bc31d121f8 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_transpose_layer.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_transpose_layer.py @@ -21,23 +21,24 @@ import unittest class Conv2DTransposeTestCase(unittest.TestCase): - - def __init__(self, - methodName='runTest', - batch_size=4, - spartial_shape=(16, 16), - num_channels=6, - num_filters=8, - filter_size=3, - output_size=None, - output_padding=0, - padding=0, - stride=1, - dilation=1, - groups=1, - no_bias=False, - data_format="NCHW", - dtype="float32"): + def __init__( + self, + methodName='runTest', + batch_size=4, + spartial_shape=(16, 16), + num_channels=6, + num_filters=8, + filter_size=3, + output_size=None, + output_padding=0, + padding=0, + stride=1, + dilation=1, + groups=1, + no_bias=False, + data_format="NCHW", + dtype="float32", + ): super(Conv2DTransposeTestCase, self).__init__(methodName) self.batch_size = batch_size self.num_channels = num_channels @@ -58,24 +59,31 @@ class Conv2DTransposeTestCase(unittest.TestCase): def setUp(self): self.channel_last = self.data_format == "NHWC" if self.channel_last: - input_shape = (self.batch_size, ) + self.spartial_shape + ( - self.num_channels, ) + input_shape = ( + (self.batch_size,) + self.spartial_shape + (self.num_channels,) + ) else: - input_shape = (self.batch_size, - self.num_channels) + self.spartial_shape + input_shape = ( + self.batch_size, + self.num_channels, + ) + self.spartial_shape self.input = np.random.randn(*input_shape).astype(self.dtype) if isinstance(self.filter_size, int): filter_size = [self.filter_size] * 2 else: filter_size = self.filter_size - self.weight_shape = weight_shape = (self.num_channels, self.num_filters - // self.groups) + tuple(filter_size) - self.weight = np.random.uniform(-1, 1, - size=weight_shape).astype(self.dtype) + self.weight_shape = weight_shape = ( + self.num_channels, + self.num_filters // self.groups, + ) + tuple(filter_size) + self.weight = np.random.uniform(-1, 1, size=weight_shape).astype( + self.dtype + ) if not self.no_bias: self.bias = np.random.uniform( - -1, 1, size=(self.num_filters, )).astype(self.dtype) + -1, 1, size=(self.num_filters,) + ).astype(self.dtype) else: self.bias = None @@ -84,8 +92,11 @@ class Conv2DTransposeTestCase(unittest.TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - input_shape = (-1, -1, -1,self.num_channels) \ - if self.channel_last else (-1, self.num_channels, -1, -1) + input_shape = ( + (-1, -1, -1, self.num_channels) + if self.channel_last + else (-1, self.num_channels, -1, -1) + ) x_var = fluid.data("input", input_shape, dtype=self.dtype) weight_attr = I.NumpyArrayInitializer(self.weight) if self.bias is None: @@ -104,11 +115,12 @@ class Conv2DTransposeTestCase(unittest.TestCase): groups=self.groups, param_attr=weight_attr, bias_attr=bias_attr, - data_format=self.data_format) + data_format=self.data_format, + ) feed_dict = {"input": self.input} exe = fluid.Executor(place) exe.run(start) - y_np, = exe.run(main, feed=feed_dict, fetch_list=[y_var]) + (y_np,) = exe.run(main, feed=feed_dict, fetch_list=[y_var]) return y_np def functional(self, place): @@ -116,36 +128,42 @@ class Conv2DTransposeTestCase(unittest.TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - input_shape = (-1, -1, -1,self.num_channels) \ - if self.channel_last else (-1, self.num_channels, -1, -1) + input_shape = ( + (-1, -1, -1, self.num_channels) + if self.channel_last + else (-1, self.num_channels, -1, -1) + ) x_var = fluid.data("input", input_shape, dtype=self.dtype) - w_var = fluid.data("weight", - self.weight_shape, - dtype=self.dtype) - b_var = fluid.data("bias", (self.num_filters, ), - dtype=self.dtype) + w_var = fluid.data( + "weight", self.weight_shape, dtype=self.dtype + ) + b_var = fluid.data( + "bias", (self.num_filters,), dtype=self.dtype + ) if self.output_padding != 0: output_size = None else: output_size = self.output_size - y_var = F.conv2d_transpose(x_var, - w_var, - None if self.no_bias else b_var, - output_size=output_size, - padding=self.padding, - output_padding=self.output_padding, - stride=self.stride, - dilation=self.dilation, - groups=self.groups, - data_format=self.data_format) + y_var = F.conv2d_transpose( + x_var, + w_var, + None if self.no_bias else b_var, + output_size=output_size, + padding=self.padding, + output_padding=self.output_padding, + stride=self.stride, + dilation=self.dilation, + groups=self.groups, + data_format=self.data_format, + ) feed_dict = {"input": self.input, "weight": self.weight} if self.bias is not None: feed_dict["bias"] = self.bias exe = fluid.Executor(place) exe.run(start) - y_np, = exe.run(main, feed=feed_dict, fetch_list=[y_var]) + (y_np,) = exe.run(main, feed=feed_dict, fetch_list=[y_var]) return y_np def paddle_nn_layer(self): @@ -156,15 +174,17 @@ class Conv2DTransposeTestCase(unittest.TestCase): else: output_size = self.output_size - conv = nn.Conv2DTranspose(self.num_channels, - self.num_filters, - self.filter_size, - padding=self.padding, - output_padding=self.output_padding, - stride=self.stride, - dilation=self.dilation, - groups=self.groups, - data_format=self.data_format) + conv = nn.Conv2DTranspose( + self.num_channels, + self.num_filters, + self.filter_size, + padding=self.padding, + output_padding=self.output_padding, + stride=self.stride, + dilation=self.dilation, + groups=self.groups, + data_format=self.data_format, + ) conv.weight.set_value(self.weight) if not self.no_bias: conv.bias.set_value(self.bias) @@ -194,7 +214,6 @@ class Conv2DTransposeTestCase(unittest.TestCase): class Conv2DTransposeErrorTestCase(Conv2DTransposeTestCase): - def runTest(self): place = fluid.CPUPlace() with dg.guard(place): @@ -205,46 +224,61 @@ class Conv2DTransposeErrorTestCase(Conv2DTransposeTestCase): def add_cases(suite): suite.addTest(Conv2DTransposeTestCase(methodName='runTest')) suite.addTest( - Conv2DTransposeTestCase(methodName='runTest', - stride=[1, 2], - no_bias=True, - dilation=2)) + Conv2DTransposeTestCase( + methodName='runTest', stride=[1, 2], no_bias=True, dilation=2 + ) + ) suite.addTest( - Conv2DTransposeTestCase(methodName='runTest', - filter_size=(3, 3), - output_size=[20, 36], - stride=[1, 2], - dilation=2)) + Conv2DTransposeTestCase( + methodName='runTest', + filter_size=(3, 3), + output_size=[20, 36], + stride=[1, 2], + dilation=2, + ) + ) suite.addTest( - Conv2DTransposeTestCase(methodName='runTest', stride=2, - dilation=(2, 1))) - suite.addTest(Conv2DTransposeTestCase(methodName='runTest', - padding="valid")) + Conv2DTransposeTestCase(methodName='runTest', stride=2, dilation=(2, 1)) + ) + suite.addTest( + Conv2DTransposeTestCase(methodName='runTest', padding="valid") + ) suite.addTest(Conv2DTransposeTestCase(methodName='runTest', padding="same")) suite.addTest( - Conv2DTransposeTestCase(methodName='runTest', - filter_size=1, - padding=(2, 3))) + Conv2DTransposeTestCase( + methodName='runTest', filter_size=1, padding=(2, 3) + ) + ) suite.addTest( - Conv2DTransposeTestCase(methodName='runTest', padding=[1, 2, 2, 1])) + Conv2DTransposeTestCase(methodName='runTest', padding=[1, 2, 2, 1]) + ) suite.addTest( - Conv2DTransposeTestCase(methodName='runTest', - padding=[[0, 0], [0, 0], [1, 2], [2, 1]])) + Conv2DTransposeTestCase( + methodName='runTest', padding=[[0, 0], [0, 0], [1, 2], [2, 1]] + ) + ) suite.addTest( - Conv2DTransposeTestCase(methodName='runTest', data_format="NHWC")) + Conv2DTransposeTestCase(methodName='runTest', data_format="NHWC") + ) suite.addTest( - Conv2DTransposeTestCase(methodName='runTest', - data_format="NHWC", - padding=[[0, 0], [1, 1], [2, 2], [0, 0]])) + Conv2DTransposeTestCase( + methodName='runTest', + data_format="NHWC", + padding=[[0, 0], [1, 1], [2, 2], [0, 0]], + ) + ) suite.addTest( - Conv2DTransposeTestCase(methodName='runTest', groups=2, - padding="valid")) + Conv2DTransposeTestCase(methodName='runTest', groups=2, padding="valid") + ) suite.addTest( - Conv2DTransposeTestCase(methodName='runTest', - num_filters=6, - num_channels=3, - groups=3, - padding="valid")) + Conv2DTransposeTestCase( + methodName='runTest', + num_filters=6, + num_channels=3, + groups=3, + padding="valid", + ) + ) suite.addTest( Conv2DTransposeTestCase( methodName='runTest', @@ -257,17 +291,21 @@ def add_cases(suite): stride=2, output_size=[14, 14], output_padding=[1, 1], - )) + ) + ) def add_error_cases(suite): suite.addTest( - Conv2DTransposeErrorTestCase(methodName='runTest', - num_channels=5, - groups=2)) + Conv2DTransposeErrorTestCase( + methodName='runTest', num_channels=5, groups=2 + ) + ) suite.addTest( - Conv2DTransposeErrorTestCase(methodName='runTest', - output_size="not_valid")) + Conv2DTransposeErrorTestCase( + methodName='runTest', output_size="not_valid" + ) + ) def load_tests(loader, standard_tests, pattern): diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py index da822c3b124d5cc04da2a65e45f1f65d356320b8..29ffbd80d3623f2e26fca6d50dca110f05251535 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py @@ -30,9 +30,10 @@ from op_test import OpTest def conv2dtranspose_forward_naive(input_, filter_, attrs): padding_algorithm = attrs['padding_algorithm'] if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]: - raise ValueError("Unknown Attr(padding_algorithm): '%s'. " - "It can only be 'SAME' or 'VALID'." % - str(padding_algorithm)) + raise ValueError( + "Unknown Attr(padding_algorithm): '%s'. " + "It can only be 'SAME' or 'VALID'." % str(padding_algorithm) + ) if attrs['data_format'] == 'NHWC': input_ = np.transpose(input_, [0, 3, 1, 2]) @@ -43,18 +44,22 @@ def conv2dtranspose_forward_naive(input_, filter_, attrs): out_c = f_out_c * groups sub_in_c = in_c // groups - stride, pad, dilations = attrs['strides'], attrs['paddings'], attrs[ - 'dilations'] + stride, pad, dilations = ( + attrs['strides'], + attrs['paddings'], + attrs['dilations'], + ) # update pad and dilation def _get_padding_with_SAME(input_shape, kernel_size, kernel_stride): padding = [] - for input_size, filter_size, stride_size in zip(input_shape, - kernel_size, - kernel_stride): + for input_size, filter_size, stride_size in zip( + input_shape, kernel_size, kernel_stride + ): out_size = int((input_size + stride_size - 1) / stride_size) pad_sum = np.max( - ((out_size - 1) * stride_size + filter_size - input_size, 0)) + ((out_size - 1) * stride_size + filter_size - input_size, 0) + ) pad_0 = int(pad_sum / 2) pad_1 = int(pad_sum - pad_0) padding.append(pad_0) @@ -88,37 +93,49 @@ def conv2dtranspose_forward_naive(input_, filter_, attrs): if 'output_padding' in attrs: out_pad_h = attrs['output_padding'][0] out_pad_w = attrs['output_padding'][1] - out = np.zeros((in_n, out_c, out_h + out_pad_h, out_w + out_pad_w), - dtype=input_.dtype) + out = np.zeros( + (in_n, out_c, out_h + out_pad_h, out_w + out_pad_w), dtype=input_.dtype + ) for n in range(in_n): for i in range(in_h): for j in range(in_w): for g in range(groups): - input_masked = input_[n, g * sub_in_c:(g + 1) * sub_in_c, i, - j] # (c) + input_masked = input_[ + n, g * sub_in_c : (g + 1) * sub_in_c, i, j + ] # (c) input_masked = np.reshape(input_masked, (sub_in_c, 1, 1)) input_masked = np.tile(input_masked, (1, f_h, f_w)) for k in range(f_out_c): tmp_out = np.sum( - input_masked * - filter_[g * sub_in_c:(g + 1) * sub_in_c, k, :, :], - axis=0) + input_masked + * filter_[ + g * sub_in_c : (g + 1) * sub_in_c, k, :, : + ], + axis=0, + ) i1, i2 = i * stride[0], i * stride[0] + d_bolck_h j1, j2 = j * stride[1], j * stride[1] + d_bolck_w - out[n, g * f_out_c + k, i1:i2:dilations[0], - j1:j2:dilations[1]] += tmp_out - - out = out[:, :, pad_h_0:out_h - pad_h_1 + out_pad_h, - pad_w_0:out_w - pad_w_1 + out_pad_w] + out[ + n, + g * f_out_c + k, + i1 : i2 : dilations[0], + j1 : j2 : dilations[1], + ] += tmp_out + + out = out[ + :, + :, + pad_h_0 : out_h - pad_h_1 + out_pad_h, + pad_w_0 : out_w - pad_w_1 + out_pad_w, + ] if attrs['data_format'] == 'NHWC': out = np.transpose(out, [0, 2, 3, 1]) return out class TestConv2DTransposeOp(OpTest): - def setUp(self): # init as conv transpose self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64 @@ -147,7 +164,7 @@ class TestConv2DTransposeOp(OpTest): 'use_cudnn': self.use_cudnn, 'is_test': self.is_test, 'use_mkldnn': self.use_mkldnn, - 'data_format': self.data_format + 'data_format': self.data_format, } if self.output_size is not None: self.attrs['output_size'] = self.output_size @@ -155,8 +172,9 @@ class TestConv2DTransposeOp(OpTest): if len(self.output_padding) > 0: self.attrs['output_padding'] = self.output_padding - output = conv2dtranspose_forward_naive(input_, filter_, - self.attrs).astype(self.dtype) + output = conv2dtranspose_forward_naive( + input_, filter_, self.attrs + ).astype(self.dtype) self.outputs = {'Output': output} @@ -165,7 +183,8 @@ class TestConv2DTransposeOp(OpTest): if self.use_cudnn: place = core.CUDAPlace(0) self.check_output_with_place( - place, atol=1e-5, check_dygraph=(self.use_mkldnn == False)) + place, atol=1e-5, check_dygraph=(self.use_mkldnn == False) + ) else: self.check_output(check_dygraph=(self.use_mkldnn == False)) @@ -173,39 +192,44 @@ class TestConv2DTransposeOp(OpTest): if self.need_check_grad: if self.use_cudnn: place = core.CUDAPlace(0) - self.check_grad_with_place(place, ['Filter'], - 'Output', - max_relative_error=0.02, - no_grad_set=set(['Input'])) + self.check_grad_with_place( + place, + ['Filter'], + 'Output', + max_relative_error=0.02, + no_grad_set=set(['Input']), + ) else: - self.check_grad(['Filter'], - 'Output', - no_grad_set=set(['Input'])) + self.check_grad( + ['Filter'], 'Output', no_grad_set=set(['Input']) + ) def test_check_grad_no_filter(self): if self.need_check_grad: if self.use_cudnn: place = core.CUDAPlace(0) - self.check_grad_with_place(place, ['Input'], - 'Output', - no_grad_set=set(['Filter'])) + self.check_grad_with_place( + place, ['Input'], 'Output', no_grad_set=set(['Filter']) + ) else: - self.check_grad(['Input'], - 'Output', - no_grad_set=set(['Filter'])) + self.check_grad( + ['Input'], 'Output', no_grad_set=set(['Filter']) + ) def test_check_grad(self): if self.need_check_grad: if self.use_cudnn: place = core.CUDAPlace(0) - self.check_grad_with_place(place, - set(['Input', 'Filter']), - 'Output', - max_relative_error=0.02) + self.check_grad_with_place( + place, + set(['Input', 'Filter']), + 'Output', + max_relative_error=0.02, + ) else: - self.check_grad(set(['Input', 'Filter']), - 'Output', - max_relative_error=0.02) + self.check_grad( + set(['Input', 'Filter']), 'Output', max_relative_error=0.02 + ) def init_test_case(self): self.pad = [0, 0] @@ -221,7 +245,6 @@ class TestConv2DTransposeOp(OpTest): class TestWithSymmetricPad(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -233,7 +256,6 @@ class TestWithSymmetricPad(TestConv2DTransposeOp): class TestWithAsymmetricPad(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 0, 1, 2] self.stride = [1, 1] @@ -245,7 +267,6 @@ class TestWithAsymmetricPad(TestConv2DTransposeOp): class TestWithSAMEPad(TestConv2DTransposeOp): - def init_test_case(self): self.stride = [2, 1] self.dilations = [1, 2] @@ -257,7 +278,6 @@ class TestWithSAMEPad(TestConv2DTransposeOp): class TestWithVALIDPad(TestConv2DTransposeOp): - def init_test_case(self): self.stride = [1, 1] self.dilations = [1, 1] @@ -269,7 +289,6 @@ class TestWithVALIDPad(TestConv2DTransposeOp): class TestWithGroups(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -281,7 +300,6 @@ class TestWithGroups(TestConv2DTransposeOp): class TestWithStride(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [2, 2] @@ -293,7 +311,6 @@ class TestWithStride(TestConv2DTransposeOp): class TestWithDilation(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -305,7 +322,6 @@ class TestWithDilation(TestConv2DTransposeOp): class TestWithEvenUpsample(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [2, 2] self.stride = [2, 2] @@ -318,7 +334,6 @@ class TestWithEvenUpsample(TestConv2DTransposeOp): class TestWithEvenUpsampleOutputPadding(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [2, 2] self.stride = [2, 2] @@ -331,7 +346,6 @@ class TestWithEvenUpsampleOutputPadding(TestConv2DTransposeOp): class Test_NHWC(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] @@ -344,7 +358,6 @@ class Test_NHWC(TestConv2DTransposeOp): class TestWithSymmetricPad_NHWC(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -357,7 +370,6 @@ class TestWithSymmetricPad_NHWC(TestConv2DTransposeOp): class TestWithAsymmetricPad_NHWC(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 0, 1, 2] self.stride = [1, 1] @@ -370,7 +382,6 @@ class TestWithAsymmetricPad_NHWC(TestConv2DTransposeOp): class TestWithGroups_NHWC(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -383,7 +394,6 @@ class TestWithGroups_NHWC(TestConv2DTransposeOp): class TestWithStride_NHWC(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [2, 2] @@ -396,7 +406,6 @@ class TestWithStride_NHWC(TestConv2DTransposeOp): class TestWithDilation_NHWC(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -409,7 +418,6 @@ class TestWithDilation_NHWC(TestConv2DTransposeOp): class TestWithEvenUpsample_NHWC(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [2, 2] self.stride = [2, 2] @@ -423,7 +431,6 @@ class TestWithEvenUpsample_NHWC(TestConv2DTransposeOp): class TestWithEvenUpsample_NHWC_output_padding(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [2, 2] self.stride = [2, 2] @@ -437,19 +444,19 @@ class TestWithEvenUpsample_NHWC_output_padding(TestConv2DTransposeOp): # ------------ test_cudnn ------------ -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNN(TestConv2DTransposeOp): - def init_op_type(self): self.use_cudnn = True self.op_type = "conv2d_transpose" -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNNWithSymmetricPad(TestWithSymmetricPad): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -464,10 +471,10 @@ class TestCUDNNWithSymmetricPad(TestWithSymmetricPad): self.op_type = "conv2d_transpose" -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNNWithAsymmetricPad(TestWithAsymmetricPad): - def init_test_case(self): self.pad = [1, 0, 1, 2] self.stride = [1, 1] @@ -482,10 +489,10 @@ class TestCUDNNWithAsymmetricPad(TestWithAsymmetricPad): self.op_type = "conv2d_transpose" -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNNWithSAMEPad(TestWithSAMEPad): - def init_test_case(self): self.pad = [1, 0, 1, 2] self.stride = [1, 2] @@ -500,10 +507,10 @@ class TestCUDNNWithSAMEPad(TestWithSAMEPad): self.op_type = "conv2d_transpose" -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNNWithVALIDPad(TestWithVALIDPad): - def init_test_case(self): self.pad = [1, 0, 1, 2] self.stride = [1, 1] @@ -518,10 +525,10 @@ class TestCUDNNWithVALIDPad(TestWithVALIDPad): self.op_type = "conv2d_transpose" -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNNWithStride(TestWithStride): - def init_test_case(self): self.pad = [1, 1] self.stride = [2, 2] @@ -536,10 +543,10 @@ class TestCUDNNWithStride(TestWithStride): self.op_type = "conv2d_transpose" -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNNWithGroups(TestWithGroups): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -555,10 +562,10 @@ class TestCUDNNWithGroups(TestWithGroups): # ------------ test_cudnn ------------ -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNNWithEvenUpsample(TestWithEvenUpsample): - def init_op_type(self): self.use_cudnn = True self.op_type = "conv2d_transpose" @@ -579,10 +586,10 @@ class TestCUDNNWithEvenUpsample(TestWithEvenUpsample): # self.op_type = "conv2d_transpose" -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNN_NHWC(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] @@ -598,10 +605,10 @@ class TestCUDNN_NHWC(TestConv2DTransposeOp): self.op_type = "conv2d_transpose" -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNNWithSymmetricPad_NHWC(TestWithSymmetricPad): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -617,10 +624,10 @@ class TestCUDNNWithSymmetricPad_NHWC(TestWithSymmetricPad): self.op_type = "conv2d_transpose" -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNNWithAsymmetricPad_NHWC(TestWithSymmetricPad): - def init_test_case(self): self.pad = [1, 0, 2, 3] self.stride = [2, 2] @@ -636,10 +643,10 @@ class TestCUDNNWithAsymmetricPad_NHWC(TestWithSymmetricPad): self.op_type = "conv2d_transpose" -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNNWithStride_NHWC(TestWithStride): - def init_test_case(self): self.pad = [1, 1] self.stride = [2, 2] @@ -655,10 +662,10 @@ class TestCUDNNWithStride_NHWC(TestWithStride): self.op_type = "conv2d_transpose" -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNNWithGroups_NHWC(TestWithGroups): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -674,10 +681,10 @@ class TestCUDNNWithGroups_NHWC(TestWithGroups): self.op_type = "conv2d_transpose" -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNNWithEvenUpsample_NHWC(TestWithEvenUpsample): - def init_test_case(self): self.pad = [2, 2] self.stride = [2, 2] @@ -694,10 +701,10 @@ class TestCUDNNWithEvenUpsample_NHWC(TestWithEvenUpsample): self.op_type = "conv2d_transpose" -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNN_FP16(TestConv2DTransposeOp): - def init_test_case(self): self.dtype = np.float16 self.pad = [1, 1] @@ -717,15 +724,16 @@ class TestCUDNN_FP16(TestConv2DTransposeOp): if self.use_cudnn: place = core.CUDAPlace(0) self.check_output_with_place( - place, atol=0.02, check_dygraph=(self.use_mkldnn == False)) + place, atol=0.02, check_dygraph=(self.use_mkldnn == False) + ) else: self.check_output(check_dygraph=(self.use_mkldnn == False)) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNN_NHWC_FP16(TestCUDNN_FP16): - def init_test_case(self): self.dtype = np.float16 self.pad = [0, 0] @@ -738,10 +746,10 @@ class TestCUDNN_NHWC_FP16(TestCUDNN_FP16): self.data_format = 'NHWC' -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNNWithSymmetricPad_NHWC_FP16(TestCUDNN_FP16): - def init_test_case(self): self.dtype = np.float16 self.pad = [1, 1] @@ -754,10 +762,10 @@ class TestCUDNNWithSymmetricPad_NHWC_FP16(TestCUDNN_FP16): self.data_format = 'NHWC' -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNNWithAsymmetricPad_NHWC_FP16(TestCUDNN_FP16): - def init_test_case(self): self.dtype = np.float16 self.pad = [1, 0, 2, 3] @@ -770,10 +778,10 @@ class TestCUDNNWithAsymmetricPad_NHWC_FP16(TestCUDNN_FP16): self.data_format = 'NHWC' -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNNWithStride_NHWC_FP16(TestCUDNN_FP16): - def init_test_case(self): self.dtype = np.float16 self.pad = [1, 1] @@ -786,10 +794,10 @@ class TestCUDNNWithStride_NHWC_FP16(TestCUDNN_FP16): self.data_format = 'NHWC' -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNNWithGroups_NHWC_FP16(TestCUDNN_FP16): - def init_test_case(self): self.dtype = np.float16 self.pad = [1, 1] @@ -802,10 +810,10 @@ class TestCUDNNWithGroups_NHWC_FP16(TestCUDNN_FP16): self.data_format = 'NHWC' -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNNWithEvenUpsample_NHWC_FP16(TestCUDNN_FP16): - def init_test_case(self): self.dtype = np.float16 self.pad = [2, 2] @@ -820,56 +828,67 @@ class TestCUDNNWithEvenUpsample_NHWC_FP16(TestCUDNN_FP16): class TestConv2DTransposeAPI(unittest.TestCase): - def test_case1(self): - data1 = fluid.layers.data(name='data1', - shape=[3, 5, 5], - dtype='float32') - data2 = fluid.layers.data(name='data2', - shape=[5, 5, 3], - dtype='float32') - out1 = fluid.layers.conv2d_transpose(input=data1, - groups=1, - num_filters=6, - filter_size=3, - data_format='NCHW') - out2 = fluid.layers.conv2d_transpose(input=data2, - groups=1, - num_filters=6, - filter_size=3, - data_format='NHWC') - out3 = fluid.layers.conv2d_transpose(input=data1, - groups=1, - num_filters=6, - filter_size=3, - padding=[[0, 0], [1, 1], [1, 1], - [0, 0]], - data_format='NHWC') - out4 = fluid.layers.conv2d_transpose(input=data1, - groups=3, - num_filters=6, - filter_size=3, - padding=[[0, 0], [0, 0], [2, 1], - [0, 0]], - data_format='NCHW') - out5 = fluid.layers.conv2d_transpose(input=data2, - groups=1, - num_filters=6, - filter_size=3, - padding='SAME', - data_format='NCHW') - out6 = fluid.layers.conv2d_transpose(input=data1, - groups=1, - num_filters=6, - filter_size=3, - padding='VALID', - data_format='NHWC') - out7 = fluid.layers.conv2d_transpose(input=data1, - groups=1, - num_filters=6, - output_size=[7, 7], - padding=[0, 0], - data_format='NHWC') + data1 = fluid.layers.data( + name='data1', shape=[3, 5, 5], dtype='float32' + ) + data2 = fluid.layers.data( + name='data2', shape=[5, 5, 3], dtype='float32' + ) + out1 = fluid.layers.conv2d_transpose( + input=data1, + groups=1, + num_filters=6, + filter_size=3, + data_format='NCHW', + ) + out2 = fluid.layers.conv2d_transpose( + input=data2, + groups=1, + num_filters=6, + filter_size=3, + data_format='NHWC', + ) + out3 = fluid.layers.conv2d_transpose( + input=data1, + groups=1, + num_filters=6, + filter_size=3, + padding=[[0, 0], [1, 1], [1, 1], [0, 0]], + data_format='NHWC', + ) + out4 = fluid.layers.conv2d_transpose( + input=data1, + groups=3, + num_filters=6, + filter_size=3, + padding=[[0, 0], [0, 0], [2, 1], [0, 0]], + data_format='NCHW', + ) + out5 = fluid.layers.conv2d_transpose( + input=data2, + groups=1, + num_filters=6, + filter_size=3, + padding='SAME', + data_format='NCHW', + ) + out6 = fluid.layers.conv2d_transpose( + input=data1, + groups=1, + num_filters=6, + filter_size=3, + padding='VALID', + data_format='NHWC', + ) + out7 = fluid.layers.conv2d_transpose( + input=data1, + groups=1, + num_filters=6, + output_size=[7, 7], + padding=[0, 0], + data_format='NHWC', + ) data1_np = np.random.random((2, 3, 5, 5)).astype("float32") data2_np = np.random.random((2, 5, 5, 3)).astype("float32") @@ -880,13 +899,12 @@ class TestConv2DTransposeAPI(unittest.TestCase): place = core.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - results = exe.run(fluid.default_main_program(), - feed={ - "data1": data1_np, - "data2": data2_np - }, - fetch_list=[out1, out2, out3, out4, out5, out6, out7], - return_numpy=True) + results = exe.run( + fluid.default_main_program(), + feed={"data1": data1_np, "data2": data2_np}, + fetch_list=[out1, out2, out3, out4, out5, out6, out7], + return_numpy=True, + ) self.assertIsNotNone(results[0]) self.assertIsNotNone(results[1]) self.assertIsNotNone(results[2]) @@ -897,76 +915,81 @@ class TestConv2DTransposeAPI(unittest.TestCase): class TestConv2DTransposeOpException(unittest.TestCase): - def test_exception(self): data = fluid.layers.data(name='data', shape=[3, 5, 5], dtype="float32") def attr_data_format(): - out = fluid.layers.conv2d_transpose(input=data, - groups=1, - num_filters=6, - filter_size=3, - data_format="NCDHW") + out = fluid.layers.conv2d_transpose( + input=data, + groups=1, + num_filters=6, + filter_size=3, + data_format="NCDHW", + ) self.assertRaises(ValueError, attr_data_format) def attr_padding_str(): - out = fluid.layers.conv2d_transpose(input=data, - groups=1, - num_filters=6, - filter_size=3, - padding='Vald') + out = fluid.layers.conv2d_transpose( + input=data, + groups=1, + num_filters=6, + filter_size=3, + padding='Vald', + ) self.assertRaises(ValueError, attr_padding_str) def attr_padding_list(): - out = fluid.layers.conv2d_transpose(input=data, - groups=1, - num_filters=6, - filter_size=3, - padding=[[1, 1], [1, 1], [0, 0], - [0, 0]]) + out = fluid.layers.conv2d_transpose( + input=data, + groups=1, + num_filters=6, + filter_size=3, + padding=[[1, 1], [1, 1], [0, 0], [0, 0]], + ) self.assertRaises(ValueError, attr_padding_list) def attr_padding_with_data_format(): - out = fluid.layers.conv2d_transpose(input=data, - groups=1, - num_filters=6, - filter_size=3, - padding=[[1, 1], [0, 0], [0, 0], - [1, 1]], - data_format='NHWC') + out = fluid.layers.conv2d_transpose( + input=data, + groups=1, + num_filters=6, + filter_size=3, + padding=[[1, 1], [0, 0], [0, 0], [1, 1]], + data_format='NHWC', + ) self.assertRaises(ValueError, attr_padding_with_data_format) - error_input = fluid.layers.data(name='error_data', - shape=[1], - dtype="float32") + error_input = fluid.layers.data( + name='error_data', shape=[1], dtype="float32" + ) def error_input_size(): - out = fluid.layers.conv2d_transpose(input=error_input, - groups=1, - num_filters=6, - filter_size=3) + out = fluid.layers.conv2d_transpose( + input=error_input, groups=1, num_filters=6, filter_size=3 + ) self.assertRaises(ValueError, error_input_size) def error_groups(): - out = fluid.layers.conv2d_transpose(input=data, - groups=0, - num_filters=6, - filter_size=3, - data_format='NHWC') + out = fluid.layers.conv2d_transpose( + input=data, + groups=0, + num_filters=6, + filter_size=3, + data_format='NHWC', + ) self.assertRaises(ValueError, error_groups) class TestConv2DTransposeRepr(unittest.TestCase): - def test_case(self): paddle.disable_static() - x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.) + x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1.0, max=1.0) conv = nn.Conv2DTranspose(4, 6, (3, 3), output_padding=1, stride=2) print(conv) y_var = conv(x_var) @@ -976,7 +999,6 @@ class TestConv2DTransposeRepr(unittest.TestCase): class TestTensorOutputSize1(UnittestBase): - def init_info(self): self.shapes = [[2, 3, 8, 8]] self.save_path = os.path.join(self.temp_dir.name, self.path_prefix()) @@ -991,7 +1013,8 @@ class TestTensorOutputSize1(UnittestBase): w_var = paddle.randn((3, 6, 3, 3), dtype='float32') output_size = paddle.assign([17]) out = paddle.paddle.nn.functional.conv2d_transpose( - x, w_var, stride=2, output_size=output_size) + x, w_var, stride=2, output_size=output_size + ) return out def test_static(self): @@ -1013,15 +1036,15 @@ class TestTensorOutputSize1(UnittestBase): res = exe.run(fetch_list=[feat, out]) np.testing.assert_allclose(res[1].shape, (2, 6, 17, 17)) - paddle.static.save_inference_model(self.save_path, [x], [feat, out], - exe) + paddle.static.save_inference_model( + self.save_path, [x], [feat, out], exe + ) # Test for Inference Predictor infer_outs = self.infer_prog() np.testing.assert_allclose(infer_outs[1].shape, (2, 6, 17, 17)) class TestTensorOutputSize2(TestTensorOutputSize1): - def path_prefix(self): return 'conv2d_transpose_tensor_output_size2' @@ -1029,43 +1052,37 @@ class TestTensorOutputSize2(TestTensorOutputSize1): w_var = paddle.randn((3, 6, 3, 3), dtype='float32') output_size = [17, paddle.assign([17])] out = paddle.paddle.nn.functional.conv2d_transpose( - x, w_var, stride=2, output_size=output_size) + x, w_var, stride=2, output_size=output_size + ) return out class TestTensorOutputSize3(TestTensorOutputSize1): - def path_prefix(self): return 'conv2d_transpose_tensor_output_size3' def call_func(self, x): w_var = paddle.randn((3, 6, 3, 3), dtype='float32') output_size = paddle.assign([17]) - out = paddle.fluid.layers.conv2d_transpose(x, - num_filters=6, - output_size=output_size, - filter_size=3, - stride=2) + out = paddle.fluid.layers.conv2d_transpose( + x, num_filters=6, output_size=output_size, filter_size=3, stride=2 + ) return out class TestTensorOutputSize4(TestTensorOutputSize1): - def path_prefix(self): return 'conv2d_transpose_tensor_output_size4' def call_func(self, x): output_size = [17, paddle.assign([17])] - out = paddle.fluid.layers.conv2d_transpose(x, - num_filters=6, - output_size=output_size, - filter_size=3, - stride=2) + out = paddle.fluid.layers.conv2d_transpose( + x, num_filters=6, output_size=output_size, filter_size=3, stride=2 + ) return out class TestTensorOutputSize5(TestTensorOutputSize1): - def path_prefix(self): return 'conv2d_transpose_tensor_output_size5' @@ -1077,13 +1094,13 @@ class TestTensorOutputSize5(TestTensorOutputSize1): num_filters=6, filter_size=3, output_size=output_size, - stride=2) + stride=2, + ) out = conv2d_trans(x) return out class TestTensorOutputSize6(TestTensorOutputSize1): - def path_prefix(self): return 'conv2d_transpose_tensor_output_size6' @@ -1098,13 +1115,13 @@ class TestTensorOutputSize6(TestTensorOutputSize1): num_filters=6, filter_size=3, output_size=output_size, - stride=2) + stride=2, + ) out = conv2d_trans(x) return out class TestTensorOutputSize7(TestTensorOutputSize1): - def path_prefix(self): return 'conv2d_transpose_tensor_output_size7' @@ -1119,13 +1136,13 @@ class TestTensorOutputSize7(TestTensorOutputSize1): num_filters=6, filter_size=3, output_size=output_size, - stride=2) + stride=2, + ) out = conv2d_trans(x) return out class TestTensorOutputSize8(TestTensorOutputSize1): - def path_prefix(self): return 'conv2d_transpose_tensor_output_size8' @@ -1140,7 +1157,8 @@ class TestTensorOutputSize8(TestTensorOutputSize1): num_filters=6, filter_size=3, output_size=output_size, - stride=2) + stride=2, + ) out = conv2d_trans(x) return out diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op_depthwise_conv.py b/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op_depthwise_conv.py index b837efaa783f4b9e337a84368b22eebac7661f0c..a50b0e2cb967307c24ffb8dd6ef95f4315f7c59f 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op_depthwise_conv.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op_depthwise_conv.py @@ -22,7 +22,6 @@ from test_conv2d_transpose_op import TestConv2DTransposeOp class TestDepthwiseConvTranspose(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -36,7 +35,6 @@ class TestDepthwiseConvTranspose(TestConv2DTransposeOp): class TestDepthwiseConvTransposeAsymmetricPad(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 1, 1, 2] self.stride = [1, 1] @@ -51,7 +49,6 @@ class TestDepthwiseConvTransposeAsymmetricPad(TestConv2DTransposeOp): class TestDepthwiseConvTransposeSAMEPad(TestConv2DTransposeOp): - def init_test_case(self): self.stride = [1, 1] self.dilations = [1, 1] @@ -65,7 +62,6 @@ class TestDepthwiseConvTransposeSAMEPad(TestConv2DTransposeOp): class TestDepthwiseConvTransposeVALIDPad(TestConv2DTransposeOp): - def init_test_case(self): self.stride = [1, 1] self.dilations = [1, 1] @@ -79,7 +75,6 @@ class TestDepthwiseConvTransposeVALIDPad(TestConv2DTransposeOp): class TestDepthwiseConvTranspose_NHWC_3x3kernel(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] diff --git a/python/paddle/fluid/tests/unittests/test_conv3d_layer.py b/python/paddle/fluid/tests/unittests/test_conv3d_layer.py index 5672fe0dd88e40fd8553ae34bff48a82faa5d0cd..053bbb641665e396efd1f023014e65c56c74c922 100644 --- a/python/paddle/fluid/tests/unittests/test_conv3d_layer.py +++ b/python/paddle/fluid/tests/unittests/test_conv3d_layer.py @@ -23,21 +23,22 @@ import unittest class Conv3DTestCase(unittest.TestCase): - - def __init__(self, - methodName='runTest', - batch_size=4, - spartial_shape=(8, 8, 8), - num_channels=6, - num_filters=8, - filter_size=3, - padding=0, - stride=1, - dilation=1, - groups=1, - no_bias=False, - data_format="NCDHW", - dtype="float32"): + def __init__( + self, + methodName='runTest', + batch_size=4, + spartial_shape=(8, 8, 8), + num_channels=6, + num_filters=8, + filter_size=3, + padding=0, + stride=1, + dilation=1, + groups=1, + no_bias=False, + data_format="NCDHW", + dtype="float32", + ): super(Conv3DTestCase, self).__init__(methodName) self.batch_size = batch_size self.num_channels = num_channels @@ -56,24 +57,31 @@ class Conv3DTestCase(unittest.TestCase): def setUp(self): self.channel_last = self.data_format == "NDHWC" if self.channel_last: - input_shape = (self.batch_size, ) + self.spartial_shape + ( - self.num_channels, ) + input_shape = ( + (self.batch_size,) + self.spartial_shape + (self.num_channels,) + ) else: - input_shape = (self.batch_size, - self.num_channels) + self.spartial_shape + input_shape = ( + self.batch_size, + self.num_channels, + ) + self.spartial_shape self.input = np.random.randn(*input_shape).astype(self.dtype) if isinstance(self.filter_size, int): filter_size = [self.filter_size] * 3 else: filter_size = self.filter_size - self.weight_shape = weight_shape = (self.num_filters, self.num_channels - // self.groups) + tuple(filter_size) - self.weight = np.random.uniform(-1, 1, - size=weight_shape).astype(self.dtype) + self.weight_shape = weight_shape = ( + self.num_filters, + self.num_channels // self.groups, + ) + tuple(filter_size) + self.weight = np.random.uniform(-1, 1, size=weight_shape).astype( + self.dtype + ) if not self.no_bias: self.bias = np.random.uniform( - -1, 1, size=(self.num_filters, )).astype(self.dtype) + -1, 1, size=(self.num_filters,) + ).astype(self.dtype) else: self.bias = None @@ -82,28 +90,33 @@ class Conv3DTestCase(unittest.TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - input_shape = (-1, -1, -1, -1, self.num_channels) \ - if self.channel_last else (-1, self.num_channels, -1, -1, -1) + input_shape = ( + (-1, -1, -1, -1, self.num_channels) + if self.channel_last + else (-1, self.num_channels, -1, -1, -1) + ) x_var = fluid.data("input", input_shape, dtype=self.dtype) weight_attr = I.NumpyArrayInitializer(self.weight) if self.bias is None: bias_attr = False else: bias_attr = I.NumpyArrayInitializer(self.bias) - y_var = fluid.layers.conv3d(x_var, - self.num_filters, - self.filter_size, - padding=self.padding, - stride=self.stride, - dilation=self.dilation, - groups=self.groups, - param_attr=weight_attr, - bias_attr=bias_attr, - data_format=self.data_format) + y_var = fluid.layers.conv3d( + x_var, + self.num_filters, + self.filter_size, + padding=self.padding, + stride=self.stride, + dilation=self.dilation, + groups=self.groups, + param_attr=weight_attr, + bias_attr=bias_attr, + data_format=self.data_format, + ) feed_dict = {"input": self.input} exe = fluid.Executor(place) exe.run(start) - y_np, = exe.run(main, feed=feed_dict, fetch_list=[y_var]) + (y_np,) = exe.run(main, feed=feed_dict, fetch_list=[y_var]) return y_np def functional(self, place): @@ -111,41 +124,49 @@ class Conv3DTestCase(unittest.TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - input_shape = (-1, -1, -1, -1, self.num_channels) \ - if self.channel_last else (-1, self.num_channels, -1, -1, -1) + input_shape = ( + (-1, -1, -1, -1, self.num_channels) + if self.channel_last + else (-1, self.num_channels, -1, -1, -1) + ) x_var = fluid.data("input", input_shape, dtype=self.dtype) - w_var = fluid.data("weight", - self.weight_shape, - dtype=self.dtype) - b_var = fluid.data("bias", (self.num_filters, ), - dtype=self.dtype) - y_var = F.conv3d(x_var, - w_var, - None if self.no_bias else b_var, - padding=self.padding, - stride=self.stride, - dilation=self.dilation, - groups=self.groups, - data_format=self.data_format) + w_var = fluid.data( + "weight", self.weight_shape, dtype=self.dtype + ) + b_var = fluid.data( + "bias", (self.num_filters,), dtype=self.dtype + ) + y_var = F.conv3d( + x_var, + w_var, + None if self.no_bias else b_var, + padding=self.padding, + stride=self.stride, + dilation=self.dilation, + groups=self.groups, + data_format=self.data_format, + ) feed_dict = {"input": self.input, "weight": self.weight} if self.bias is not None: feed_dict["bias"] = self.bias exe = fluid.Executor(place) exe.run(start) - y_np, = exe.run(main, feed=feed_dict, fetch_list=[y_var]) + (y_np,) = exe.run(main, feed=feed_dict, fetch_list=[y_var]) return y_np def paddle_nn_layer(self): x_var = paddle.to_tensor(self.input) x_var.stop_gradient = False - conv = nn.Conv3D(self.num_channels, - self.num_filters, - self.filter_size, - padding=self.padding, - stride=self.stride, - dilation=self.dilation, - groups=self.groups, - data_format=self.data_format) + conv = nn.Conv3D( + self.num_channels, + self.num_filters, + self.filter_size, + padding=self.padding, + stride=self.stride, + dilation=self.dilation, + groups=self.groups, + data_format=self.data_format, + ) conv.weight.set_value(self.weight) if not self.no_bias: conv.bias.set_value(self.bias) @@ -178,7 +199,6 @@ class Conv3DTestCase(unittest.TestCase): class Conv3DErrorTestCase(Conv3DTestCase): - def runTest(self): place = fluid.CPUPlace() with dg.guard(place): @@ -189,44 +209,60 @@ class Conv3DErrorTestCase(Conv3DTestCase): def add_cases(suite): suite.addTest(Conv3DTestCase(methodName='runTest')) suite.addTest( - Conv3DTestCase(methodName='runTest', stride=[1, 2, 1], dilation=2)) + Conv3DTestCase(methodName='runTest', stride=[1, 2, 1], dilation=2) + ) suite.addTest( - Conv3DTestCase(methodName='runTest', stride=2, dilation=(2, 1, 2))) + Conv3DTestCase(methodName='runTest', stride=2, dilation=(2, 1, 2)) + ) suite.addTest( - Conv3DTestCase(methodName='runTest', padding="same", no_bias=True)) + Conv3DTestCase(methodName='runTest', padding="same", no_bias=True) + ) suite.addTest( - Conv3DTestCase(methodName='runTest', - filter_size=(3, 2, 3), - padding='valid')) + Conv3DTestCase( + methodName='runTest', filter_size=(3, 2, 3), padding='valid' + ) + ) suite.addTest(Conv3DTestCase(methodName='runTest', padding=(2, 3, 1))) suite.addTest( - Conv3DTestCase(methodName='runTest', padding=[1, 2, 2, 1, 2, 3])) + Conv3DTestCase(methodName='runTest', padding=[1, 2, 2, 1, 2, 3]) + ) suite.addTest( - Conv3DTestCase(methodName='runTest', - padding=[[0, 0], [0, 0], [1, 2], [2, 1], [2, 2]])) + Conv3DTestCase( + methodName='runTest', + padding=[[0, 0], [0, 0], [1, 2], [2, 1], [2, 2]], + ) + ) suite.addTest(Conv3DTestCase(methodName='runTest', data_format="NDHWC")) suite.addTest( - Conv3DTestCase(methodName='runTest', - data_format="NDHWC", - padding=[[0, 0], [1, 1], [3, 3], [2, 2], [0, 0]])) + Conv3DTestCase( + methodName='runTest', + data_format="NDHWC", + padding=[[0, 0], [1, 1], [3, 3], [2, 2], [0, 0]], + ) + ) suite.addTest( - Conv3DTestCase(methodName='runTest', groups=2, padding="valid")) + Conv3DTestCase(methodName='runTest', groups=2, padding="valid") + ) suite.addTest( - Conv3DTestCase(methodName='runTest', - num_filters=6, - num_channels=3, - groups=3, - padding="valid")) + Conv3DTestCase( + methodName='runTest', + num_filters=6, + num_channels=3, + groups=3, + padding="valid", + ) + ) def add_error_cases(suite): suite.addTest( - Conv3DErrorTestCase(methodName='runTest', num_channels=5, groups=2)) + Conv3DErrorTestCase(methodName='runTest', num_channels=5, groups=2) + ) suite.addTest( - Conv3DErrorTestCase(methodName='runTest', - num_channels=5, - groups=2, - padding=[-1, 1, 3])) + Conv3DErrorTestCase( + methodName='runTest', num_channels=5, groups=2, padding=[-1, 1, 3] + ) + ) def load_tests(loader, standard_tests, pattern): diff --git a/python/paddle/fluid/tests/unittests/test_conv3d_op.py b/python/paddle/fluid/tests/unittests/test_conv3d_op.py index a65d1c7310411a3cecd9307014dd2ec01202a2a7..eaa6ba04c64e6dc2d1388c04c3c04acc87a543d9 100644 --- a/python/paddle/fluid/tests/unittests/test_conv3d_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv3d_op.py @@ -21,24 +21,28 @@ import paddle.fluid as fluid import paddle -def conv3d_forward_naive(input, - filter, - group, - conv_param, - padding_algorithm='EXPLICIT', - data_format="NCDHW"): +def conv3d_forward_naive( + input, + filter, + group, + conv_param, + padding_algorithm='EXPLICIT', + data_format="NCDHW", +): if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]: - raise ValueError("Unknown Attr(padding_algorithm): '%s'. " - "It can only be 'SAME' or 'VALID'." % - str(padding_algorithm)) + raise ValueError( + "Unknown Attr(padding_algorithm): '%s'. " + "It can only be 'SAME' or 'VALID'." % str(padding_algorithm) + ) if data_format not in ["NCDHW", "NDHWC"]: - raise ValueError("Unknown Attr(data_format): '%s' ." - "It can only be 'NCDHW' or 'NDHWC'." % - str(data_format)) + raise ValueError( + "Unknown Attr(data_format): '%s' ." + "It can only be 'NCDHW' or 'NDHWC'." % str(data_format) + ) - channel_last = (data_format == "NDHWC") + channel_last = data_format == "NDHWC" if channel_last: input = np.transpose(input, [0, 4, 1, 2, 3]) @@ -52,17 +56,22 @@ def conv3d_forward_naive(input, sub_out_c = out_c // group sub_f_n = f_n // group - stride, pad, dilation = conv_param['stride'], conv_param['pad'], conv_param[ - 'dilations'] + stride, pad, dilation = ( + conv_param['stride'], + conv_param['pad'], + conv_param['dilations'], + ) # update pad and dilation def _get_padding_with_SAME(input_shape, pool_size, pool_stride): padding = [] - for input_size, filter_size, stride_size in zip(input_shape, pool_size, - pool_stride): + for input_size, filter_size, stride_size in zip( + input_shape, pool_size, pool_stride + ): out_size = int((input_size + stride_size - 1) / stride_size) pad_sum = np.max( - ((out_size - 1) * stride_size + filter_size - input_size, 0)) + ((out_size - 1) * stride_size + filter_size - input_size, 0) + ) pad_0 = int(pad_sum / 2) pad_1 = int(pad_sum - pad_0) padding.append(pad_0) @@ -85,59 +94,85 @@ def conv3d_forward_naive(input, pad_h_0, pad_h_1 = pad[2], pad[3] pad_w_0, pad_w_1 = pad[4], pad[5] - out_d = 1 + (in_d + pad_d_0 + pad_d_1 - (dilation[0] * - (f_d - 1) + 1)) // stride[0] - out_h = 1 + (in_h + pad_h_0 + pad_h_1 - (dilation[1] * - (f_h - 1) + 1)) // stride[1] - out_w = 1 + (in_w + pad_w_0 + pad_w_1 - (dilation[2] * - (f_w - 1) + 1)) // stride[2] + out_d = ( + 1 + + (in_d + pad_d_0 + pad_d_1 - (dilation[0] * (f_d - 1) + 1)) + // stride[0] + ) + out_h = ( + 1 + + (in_h + pad_h_0 + pad_h_1 - (dilation[1] * (f_h - 1) + 1)) + // stride[1] + ) + out_w = ( + 1 + + (in_w + pad_w_0 + pad_w_1 - (dilation[2] * (f_w - 1) + 1)) + // stride[2] + ) out = np.zeros((in_n, out_c, out_d, out_h, out_w)) - d_bolck_d = (dilation[0] * (f_d - 1) + 1) - d_bolck_h = (dilation[1] * (f_h - 1) + 1) - d_bolck_w = (dilation[2] * (f_w - 1) + 1) - - input_pad = np.pad(input, ((0, 0), (0, 0), (pad_d_0, pad_d_1), - (pad_h_0, pad_h_1), (pad_w_0, pad_w_1)), - mode='constant', - constant_values=0) + d_bolck_d = dilation[0] * (f_d - 1) + 1 + d_bolck_h = dilation[1] * (f_h - 1) + 1 + d_bolck_w = dilation[2] * (f_w - 1) + 1 + + input_pad = np.pad( + input, + ( + (0, 0), + (0, 0), + (pad_d_0, pad_d_1), + (pad_h_0, pad_h_1), + (pad_w_0, pad_w_1), + ), + mode='constant', + constant_values=0, + ) filter_dilation = np.zeros((f_n, f_c, d_bolck_d, d_bolck_h, d_bolck_w)) - filter_dilation[:, :, 0:d_bolck_d:dilation[0], 0:d_bolck_h:dilation[1], - 0:d_bolck_w:dilation[2]] = filter + filter_dilation[ + :, + :, + 0 : d_bolck_d : dilation[0], + 0 : d_bolck_h : dilation[1], + 0 : d_bolck_w : dilation[2], + ] = filter for d in range(out_d): for i in range(out_h): for j in range(out_w): for g in range(group): - input_pad_masked = \ - input_pad[:, g * f_c:(g + 1) * f_c, - d * stride[0]:d * stride[0] + d_bolck_d, - i * stride[1]:i * stride[1] + d_bolck_h, - j * stride[2]:j * stride[2] + d_bolck_w] - - f_sub = filter_dilation[g * sub_f_n:(g + 1) * - sub_f_n, :, :, :, :] + input_pad_masked = input_pad[ + :, + g * f_c : (g + 1) * f_c, + d * stride[0] : d * stride[0] + d_bolck_d, + i * stride[1] : i * stride[1] + d_bolck_h, + j * stride[2] : j * stride[2] + d_bolck_w, + ] + + f_sub = filter_dilation[ + g * sub_f_n : (g + 1) * sub_f_n, :, :, :, : + ] for k in range(sub_out_c): - out[:, g * sub_out_c + k, d, i, j] = \ - np.sum(input_pad_masked * f_sub[k, :, :, :, :], - axis=(1, 2, 3, 4)) + out[:, g * sub_out_c + k, d, i, j] = np.sum( + input_pad_masked * f_sub[k, :, :, :, :], + axis=(1, 2, 3, 4), + ) if channel_last: out = np.transpose(out, [0, 2, 3, 4, 1]) return out def create_test_cudnn_class(parent): - - @unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) class TestCUDNNCase(parent): - def init_kernel_type(self): self.use_cudnn = True - self.dtype = np.float32 if core.is_compiled_with_rocm( - ) else np.float64 + self.dtype = ( + np.float32 if core.is_compiled_with_rocm() else np.float64 + ) cls_name = "{0}_{1}".format(parent.__name__, "CUDNN") TestCUDNNCase.__name__ = cls_name @@ -145,9 +180,7 @@ def create_test_cudnn_class(parent): def create_test_padding_SAME_class(parent): - class TestPaddingSMAECase(parent): - def init_paddings(self): self.pad = [0, 0, 0] self.padding_algorithm = "SAME" @@ -158,9 +191,7 @@ def create_test_padding_SAME_class(parent): def create_test_padding_VALID_class(parent): - class TestPaddingVALIDCase(parent): - def init_paddings(self): self.pad = [1, 1, 1] self.padding_algorithm = "VALID" @@ -171,15 +202,15 @@ def create_test_padding_VALID_class(parent): def create_test_cudnn_padding_SAME_class(parent): - - @unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) class TestCUDNNPaddingSMAECase(parent): - def init_kernel_type(self): self.use_cudnn = True - self.dtype = np.float32 if core.is_compiled_with_rocm( - ) else np.float64 + self.dtype = ( + np.float32 if core.is_compiled_with_rocm() else np.float64 + ) def init_paddings(self): self.pad = [1, 1, 1] @@ -191,15 +222,15 @@ def create_test_cudnn_padding_SAME_class(parent): def create_test_cudnn_padding_VALID_class(parent): - - @unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) class TestCUDNNPaddingVALIDCase(parent): - def init_kernel_type(self): self.use_cudnn = True - self.dtype = np.float32 if core.is_compiled_with_rocm( - ) else np.float64 + self.dtype = ( + np.float32 if core.is_compiled_with_rocm() else np.float64 + ) def init_paddings(self): self.pad = [1, 1, 1] @@ -211,9 +242,7 @@ def create_test_cudnn_padding_VALID_class(parent): def create_test_channel_last_class(parent): - class TestChannelLastCase(parent): - def init_data_format(self): self.data_format = "NDHWC" @@ -227,15 +256,15 @@ def create_test_channel_last_class(parent): def create_test_cudnn_channel_last_class(parent): - - @unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) class TestCudnnChannelLastCase(parent): - def init_kernel_type(self): self.use_cudnn = True - self.dtype = np.float32 if core.is_compiled_with_rocm( - ) else np.float64 + self.dtype = ( + np.float32 if core.is_compiled_with_rocm() else np.float64 + ) def init_data_format(self): self.data_format = "NDHWC" @@ -250,7 +279,6 @@ def create_test_cudnn_channel_last_class(parent): class TestConv3DOp(OpTest): - def setUp(self): self.op_type = "conv3d" self.use_cudnn = False @@ -265,7 +293,7 @@ class TestConv3DOp(OpTest): conv3d_param = { 'stride': self.stride, 'pad': self.pad, - 'dilations': self.dilations + 'dilations': self.dilations, } input = np.random.random(self.input_size).astype(self.dtype) @@ -279,7 +307,7 @@ class TestConv3DOp(OpTest): self.inputs = { 'Input': OpTest.np_dtype_to_fluid_dtype(input), - 'Filter': OpTest.np_dtype_to_fluid_dtype(filter) + 'Filter': OpTest.np_dtype_to_fluid_dtype(filter), } self.attrs = { 'strides': self.stride, @@ -288,7 +316,7 @@ class TestConv3DOp(OpTest): 'dilations': self.dilations, 'use_cudnn': self.use_cudnn, 'use_mkldnn': self.use_mkldnn, - 'data_format': self.data_format + 'data_format': self.data_format, } self.outputs = {'Output': output} @@ -298,41 +326,50 @@ class TestConv3DOp(OpTest): def test_check_output(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace() - self.check_output_with_place(place, - atol=1e-5, - check_dygraph=(self.use_mkldnn == False)) + self.check_output_with_place( + place, atol=1e-5, check_dygraph=(self.use_mkldnn == False) + ) def test_check_grad(self): if self.dtype == np.float16: return place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace() # TODO(wangzhongpu): support mkldnn op in dygraph mode - self.check_grad_with_place(place, {'Input', 'Filter'}, - 'Output', - max_relative_error=0.03, - check_dygraph=(self.use_mkldnn == False)) + self.check_grad_with_place( + place, + {'Input', 'Filter'}, + 'Output', + max_relative_error=0.03, + check_dygraph=(self.use_mkldnn == False), + ) def test_check_grad_no_filter(self): if self.dtype == np.float16: return place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace() # TODO(wangzhongpu): support mkldnn op in dygraph mode - self.check_grad_with_place(place, ['Input'], - 'Output', - max_relative_error=0.03, - no_grad_set=set(['Filter']), - check_dygraph=(self.use_mkldnn == False)) + self.check_grad_with_place( + place, + ['Input'], + 'Output', + max_relative_error=0.03, + no_grad_set=set(['Filter']), + check_dygraph=(self.use_mkldnn == False), + ) def test_check_grad_no_input(self): if self.dtype == np.float16: return place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace() # TODO(wangzhongpu): support mkldnn op in dygraph mode - self.check_grad_with_place(place, ['Filter'], - 'Output', - max_relative_error=0.03, - no_grad_set=set(['Input']), - check_dygraph=(self.use_mkldnn == False)) + self.check_grad_with_place( + place, + ['Filter'], + 'Output', + max_relative_error=0.03, + no_grad_set=set(['Input']), + check_dygraph=(self.use_mkldnn == False), + ) def init_test_case(self): self.pad = [0, 0, 0] @@ -356,7 +393,6 @@ class TestConv3DOp(OpTest): class TestCase1(TestConv3DOp): - def init_test_case(self): self.pad = [1, 1, 1] self.stride = [1, 1, 1] @@ -367,19 +403,16 @@ class TestCase1(TestConv3DOp): class TestWithGroup1(TestConv3DOp): - def init_group(self): self.groups = 3 class TestWithGroup2(TestCase1): - def init_group(self): self.groups = 3 class TestWith1x1(TestConv3DOp): - def init_test_case(self): self.pad = [0, 0, 0] self.stride = [1, 1, 1] @@ -396,7 +429,6 @@ class TestWith1x1(TestConv3DOp): class TestWithInput1x1Filter1x1(TestConv3DOp): - def init_test_case(self): self.pad = [0, 0, 0] self.stride = [1, 1, 1] @@ -413,7 +445,6 @@ class TestWithInput1x1Filter1x1(TestConv3DOp): class TestWithDilation(TestConv3DOp): - def init_test_case(self): self.pad = [0, 0, 0] self.stride = [1, 1, 1] @@ -429,22 +460,22 @@ class TestWithDilation(TestConv3DOp): self.groups = 3 -#---------------- Conv3DCUDNN ---------------- +# ---------------- Conv3DCUDNN ---------------- -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNN(TestConv3DOp): - def init_kernel_type(self): self.use_cudnn = True self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64 -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFP16CUDNN(TestConv3DOp): - def init_kernel_type(self): self.use_cudnn = True self.dtype = np.float16 @@ -456,19 +487,19 @@ class TestFP16CUDNN(TestConv3DOp): self.check_output_with_place(place, atol=2e-2) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestWithGroup1CUDNN(TestWithGroup1): - def init_kernel_type(self): self.use_cudnn = True self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64 -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFP16WithGroup1CUDNN(TestWithGroup1): - def init_kernel_type(self): self.use_cudnn = True self.dtype = np.float16 @@ -480,19 +511,19 @@ class TestFP16WithGroup1CUDNN(TestWithGroup1): self.check_output_with_place(place, atol=2e-2) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestWithGroup2CUDNN(TestWithGroup2): - def init_kernel_type(self): self.use_cudnn = True self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64 -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFP16WithGroup2CUDNN(TestWithGroup2): - def init_kernel_type(self): self.use_cudnn = True self.dtype = np.float16 @@ -504,19 +535,19 @@ class TestFP16WithGroup2CUDNN(TestWithGroup2): self.check_output_with_place(place, atol=2e-2) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestWith1x1CUDNN(TestWith1x1): - def init_kernel_type(self): self.use_cudnn = True self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64 -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFP16With1x1CUDNN(TestWith1x1): - def init_kernel_type(self): self.use_cudnn = True self.dtype = np.float16 @@ -528,19 +559,19 @@ class TestFP16With1x1CUDNN(TestWith1x1): self.check_output_with_place(place, atol=2e-2) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestWithInput1x1Filter1x1CUDNN(TestWithInput1x1Filter1x1): - def init_kernel_type(self): self.use_cudnn = True self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64 -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFP16WithInput1x1Filter1x1CUDNN(TestWithInput1x1Filter1x1): - def init_kernel_type(self): self.use_cudnn = True self.dtype = np.float16 @@ -553,7 +584,6 @@ class TestFP16WithInput1x1Filter1x1CUDNN(TestWithInput1x1Filter1x1): class TestCUDNNExhaustiveSearch(TestCUDNN): - def init_kernel_type(self): self.use_cudnn = True self.exhaustive_search = True @@ -564,7 +594,6 @@ class TestCUDNNExhaustiveSearch(TestCUDNN): class TestConv3DOp_2(OpTest): - def setUp(self): self.op_type = "conv3d" self.use_cudnn = False @@ -583,18 +612,23 @@ class TestConv3DOp_2(OpTest): conv3d_param = { 'stride': self.stride, 'pad': self.pad, - 'dilations': self.dilations + 'dilations': self.dilations, } input = np.random.random(self.input_size).astype(self.dtype) filter = np.random.random(self.filter_size).astype(self.dtype) - output = conv3d_forward_naive(input, filter, self.groups, conv3d_param, - self.padding_algorithm, - self.data_format).astype(self.dtype) + output = conv3d_forward_naive( + input, + filter, + self.groups, + conv3d_param, + self.padding_algorithm, + self.data_format, + ).astype(self.dtype) self.inputs = { 'Input': OpTest.np_dtype_to_fluid_dtype(input), - 'Filter': OpTest.np_dtype_to_fluid_dtype(filter) + 'Filter': OpTest.np_dtype_to_fluid_dtype(filter), } self.attrs = { 'strides': self.stride, @@ -604,7 +638,7 @@ class TestConv3DOp_2(OpTest): 'dilations': self.dilations, 'use_cudnn': self.use_cudnn, 'use_mkldnn': self.use_mkldnn, - 'data_format': self.data_format + 'data_format': self.data_format, } self.outputs = {'Output': output} @@ -619,27 +653,33 @@ class TestConv3DOp_2(OpTest): if self.dtype == np.float16: return place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace() - self.check_grad_with_place(place, {'Input', 'Filter'}, - 'Output', - max_relative_error=0.03) + self.check_grad_with_place( + place, {'Input', 'Filter'}, 'Output', max_relative_error=0.03 + ) def test_check_grad_no_filter(self): if self.dtype == np.float16: return place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace() - self.check_grad_with_place(place, ['Input'], - 'Output', - max_relative_error=0.03, - no_grad_set=set(['Filter'])) + self.check_grad_with_place( + place, + ['Input'], + 'Output', + max_relative_error=0.03, + no_grad_set=set(['Filter']), + ) def test_check_grad_no_input(self): if self.dtype == np.float16: return place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace() - self.check_grad_with_place(place, ['Filter'], - 'Output', - max_relative_error=0.03, - no_grad_set=set(['Input'])) + self.check_grad_with_place( + place, + ['Filter'], + 'Output', + max_relative_error=0.03, + no_grad_set=set(['Input']), + ) def init_test_case(self): self.stride = [1, 1, 1] @@ -669,7 +709,6 @@ class TestConv3DOp_2(OpTest): class TestConv3DOp_AsyPadding(TestConv3DOp_2): - def init_test_case(self): self.stride = [1, 1, 2] self.input_size = [2, 3, 4, 4, 4] # NCDHW @@ -683,7 +722,6 @@ class TestConv3DOp_AsyPadding(TestConv3DOp_2): class TestConv3DOp_DiffDataInDiffDim(TestConv3DOp_2): - def init_test_case(self): self.stride = [1, 1, 2] self.input_size = [2, 3, 4, 5, 5] # NCDHW @@ -702,7 +740,6 @@ create_test_channel_last_class(TestConv3DOp_DiffDataInDiffDim) class TestCase1_AsyPadding(TestConv3DOp_2): - def init_test_case(self): self.stride = [1, 1, 1] self.input_size = [2, 3, 4, 4, 4] # NCDHW @@ -716,7 +753,6 @@ class TestCase1_AsyPadding(TestConv3DOp_2): class TestWithGroup1_AsyPadding(TestConv3DOp_2): - def init_group(self): self.groups = 3 @@ -726,7 +762,6 @@ class TestWithGroup1_AsyPadding(TestConv3DOp_2): class TestWithGroup2_AsyPadding(TestConv3DOp_2): - def init_test_case(self): self.stride = [1, 1, 1] self.input_size = [2, 3, 4, 4, 4] # NCDHW @@ -743,7 +778,6 @@ class TestWithGroup2_AsyPadding(TestConv3DOp_2): class TestWith1x1_AsyPadding(TestConv3DOp_2): - def init_test_case(self): self.stride = [1, 1, 1] self.input_size = [2, 3, 4, 4, 4] @@ -763,7 +797,6 @@ class TestWith1x1_AsyPadding(TestConv3DOp_2): class TestWithDilation_AsyPadding(TestConv3DOp_2): - def init_test_case(self): self.stride = [1, 1, 1] self.input_size = [2, 3, 6, 6, 6] @@ -829,196 +862,228 @@ create_test_cudnn_channel_last_class(TestWith1x1_AsyPadding) # --------- test python API --------------- class TestConv3DAPI(unittest.TestCase): - def test_api(self): - input_NDHWC = fluid.layers.data(name="input_NDHWC", - shape=[2, 5, 5, 5, 3], - append_batch_size=False, - dtype="float32") - - input_NCDHW = fluid.layers.data(name="input_NCDHW", - shape=[2, 3, 5, 5, 3], - append_batch_size=False, - dtype="float32") - - fluid.layers.conv3d(input=input_NDHWC, - num_filters=3, - filter_size=[3, 3, 3], - stride=[1, 1, 1], - padding=0, - dilation=[1, 1, 1], - groups=1, - data_format="NCDHW") - - fluid.layers.conv3d(input=input_NCDHW, - num_filters=3, - filter_size=[3, 3, 3], - stride=[1, 1, 1], - padding=[1, 2, 1, 0, 1, 0], - dilation=[1, 1, 1], - groups=1, - data_format="NCDHW") - - fluid.layers.conv3d(input=input_NCDHW, - num_filters=3, - filter_size=[3, 3, 3], - stride=[1, 1, 1], - padding=[[0, 0], [0, 0], [1, 1], [1, 1], [1, 1]], - dilation=[1, 1, 1], - groups=1, - data_format="NCDHW") - - fluid.layers.conv3d(input=input_NDHWC, - num_filters=3, - filter_size=[3, 3, 3], - stride=[1, 1, 1], - padding=[[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]], - dilation=[1, 1, 1], - groups=1, - data_format="NDHWC") - - fluid.layers.conv3d(input=input_NCDHW, - num_filters=3, - filter_size=[3, 3, 3], - stride=[1, 1, 1], - padding="SAME", - dilation=[1, 1, 1], - groups=1, - data_format="NCDHW") - - fluid.layers.conv3d(input=input_NCDHW, - num_filters=3, - filter_size=[3, 3, 3], - stride=[1, 1, 1], - padding="VALID", - dilation=[1, 1, 1], - groups=1, - data_format="NCDHW") + input_NDHWC = fluid.layers.data( + name="input_NDHWC", + shape=[2, 5, 5, 5, 3], + append_batch_size=False, + dtype="float32", + ) + + input_NCDHW = fluid.layers.data( + name="input_NCDHW", + shape=[2, 3, 5, 5, 3], + append_batch_size=False, + dtype="float32", + ) + + fluid.layers.conv3d( + input=input_NDHWC, + num_filters=3, + filter_size=[3, 3, 3], + stride=[1, 1, 1], + padding=0, + dilation=[1, 1, 1], + groups=1, + data_format="NCDHW", + ) + + fluid.layers.conv3d( + input=input_NCDHW, + num_filters=3, + filter_size=[3, 3, 3], + stride=[1, 1, 1], + padding=[1, 2, 1, 0, 1, 0], + dilation=[1, 1, 1], + groups=1, + data_format="NCDHW", + ) + + fluid.layers.conv3d( + input=input_NCDHW, + num_filters=3, + filter_size=[3, 3, 3], + stride=[1, 1, 1], + padding=[[0, 0], [0, 0], [1, 1], [1, 1], [1, 1]], + dilation=[1, 1, 1], + groups=1, + data_format="NCDHW", + ) + + fluid.layers.conv3d( + input=input_NDHWC, + num_filters=3, + filter_size=[3, 3, 3], + stride=[1, 1, 1], + padding=[[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]], + dilation=[1, 1, 1], + groups=1, + data_format="NDHWC", + ) + + fluid.layers.conv3d( + input=input_NCDHW, + num_filters=3, + filter_size=[3, 3, 3], + stride=[1, 1, 1], + padding="SAME", + dilation=[1, 1, 1], + groups=1, + data_format="NCDHW", + ) + + fluid.layers.conv3d( + input=input_NCDHW, + num_filters=3, + filter_size=[3, 3, 3], + stride=[1, 1, 1], + padding="VALID", + dilation=[1, 1, 1], + groups=1, + data_format="NCDHW", + ) class TestConv3DAPI_Error(unittest.TestCase): - def test_api(self): - input = fluid.layers.data(name="input", - shape=[2, 5, 5, 5, 4], - append_batch_size=False, - dtype="float32") + input = fluid.layers.data( + name="input", + shape=[2, 5, 5, 5, 4], + append_batch_size=False, + dtype="float32", + ) # ValueError: cudnn def run_1(): - fluid.layers.conv3d(input=input, - num_filters=3, - filter_size=3, - stride=1, - padding=0, - dilation=1, - groups=1, - use_cudnn=[0], - data_format="NCDHW") + fluid.layers.conv3d( + input=input, + num_filters=3, + filter_size=3, + stride=1, + padding=0, + dilation=1, + groups=1, + use_cudnn=[0], + data_format="NCDHW", + ) self.assertRaises(ValueError, run_1) # ValueError: data_format def run_2(): - fluid.layers.conv3d(input=input, - num_filters=3, - filter_size=[3, 3, 3], - stride=[1, 1, 1], - padding=0, - dilation=[1, 1, 1], - groups=1, - use_cudnn=False, - data_format="NCHWC") + fluid.layers.conv3d( + input=input, + num_filters=3, + filter_size=[3, 3, 3], + stride=[1, 1, 1], + padding=0, + dilation=[1, 1, 1], + groups=1, + use_cudnn=False, + data_format="NCHWC", + ) self.assertRaises(ValueError, run_2) # ValueError: padding def run_3(): - fluid.layers.conv3d(input=input, - num_filters=3, - filter_size=3, - stride=1, - padding="SAMEE", - dilation=1, - groups=1, - use_cudnn=False, - data_format="NCDHW") + fluid.layers.conv3d( + input=input, + num_filters=3, + filter_size=3, + stride=1, + padding="SAMEE", + dilation=1, + groups=1, + use_cudnn=False, + data_format="NCDHW", + ) self.assertRaises(ValueError, run_3) def run_4(): - fluid.layers.conv3d(input=input, - num_filters=3, - filter_size=3, - stride=1, - padding=[[0, 1], [0, 0], [0, 1], [0, 1], [0, - 1]], - dilation=1, - groups=1, - use_cudnn=False, - data_format="NCDHW") + fluid.layers.conv3d( + input=input, + num_filters=3, + filter_size=3, + stride=1, + padding=[[0, 1], [0, 0], [0, 1], [0, 1], [0, 1]], + dilation=1, + groups=1, + use_cudnn=False, + data_format="NCDHW", + ) self.assertRaises(ValueError, run_4) def run_5(): - fluid.layers.conv3d(input=input, - num_filters=3, - filter_size=0, - stride=0, - padding=[[0, 1], [0, 1], [0, 1], [0, 1], [0, - 1]], - dilation=1, - groups=1, - use_cudnn=False, - data_format="NDHWC") + fluid.layers.conv3d( + input=input, + num_filters=3, + filter_size=0, + stride=0, + padding=[[0, 1], [0, 1], [0, 1], [0, 1], [0, 1]], + dilation=1, + groups=1, + use_cudnn=False, + data_format="NDHWC", + ) self.assertRaises(ValueError, run_5) # ValueError: channel dimmention - x = fluid.layers.data(name="x", - shape=[2, 5, 5, 5, -1], - append_batch_size=False, - dtype="float32") + x = fluid.layers.data( + name="x", + shape=[2, 5, 5, 5, -1], + append_batch_size=False, + dtype="float32", + ) def run_6(): - fluid.layers.conv3d(input=x, - num_filters=3, - filter_size=3, - stride=1, - padding=0, - dilation=1, - groups=1, - use_cudnn=False, - data_format="NDHWC") + fluid.layers.conv3d( + input=x, + num_filters=3, + filter_size=3, + stride=1, + padding=0, + dilation=1, + groups=1, + use_cudnn=False, + data_format="NDHWC", + ) self.assertRaises(ValueError, run_6) # ValueError: groups def run_7(): - fluid.layers.conv3d(input=input, - num_filters=3, - filter_size=3, - stride=1, - padding=0, - dilation=1, - groups=3, - use_cudnn=False, - data_format="NDHWC") + fluid.layers.conv3d( + input=input, + num_filters=3, + filter_size=3, + stride=1, + padding=0, + dilation=1, + groups=3, + use_cudnn=False, + data_format="NDHWC", + ) self.assertRaises(ValueError, run_7) # ValueError: filter num def run_8(): - fluid.layers.conv3d(input=input, - num_filters=0, - filter_size=0, - stride=0, - padding=0, - dilation=0, - groups=1, - use_cudnn=False, - data_format="NDHWC") + fluid.layers.conv3d( + input=input, + num_filters=0, + filter_size=0, + stride=0, + padding=0, + dilation=0, + groups=1, + use_cudnn=False, + data_format="NDHWC", + ) self.assertRaises(ValueError, run_8) diff --git a/python/paddle/fluid/tests/unittests/test_conv3d_transpose_layer.py b/python/paddle/fluid/tests/unittests/test_conv3d_transpose_layer.py index 9ad3eaaccfcf21e7bdea986be14f850eb7227039..fa2138956b9498230cdab88a8b9624ca25b4c48a 100644 --- a/python/paddle/fluid/tests/unittests/test_conv3d_transpose_layer.py +++ b/python/paddle/fluid/tests/unittests/test_conv3d_transpose_layer.py @@ -21,22 +21,23 @@ import unittest class Conv3DTransposeTestCase(unittest.TestCase): - - def __init__(self, - methodName='runTest', - batch_size=2, - spartial_shape=(8, 8, 8), - num_channels=6, - num_filters=8, - filter_size=3, - output_size=None, - padding=0, - stride=1, - dilation=1, - groups=1, - no_bias=False, - data_format="NCDHW", - dtype="float32"): + def __init__( + self, + methodName='runTest', + batch_size=2, + spartial_shape=(8, 8, 8), + num_channels=6, + num_filters=8, + filter_size=3, + output_size=None, + padding=0, + stride=1, + dilation=1, + groups=1, + no_bias=False, + data_format="NCDHW", + dtype="float32", + ): super(Conv3DTransposeTestCase, self).__init__(methodName) self.batch_size = batch_size self.num_channels = num_channels @@ -56,34 +57,44 @@ class Conv3DTransposeTestCase(unittest.TestCase): def setUp(self): self.channel_last = self.data_format == "NDHWC" if self.channel_last: - input_shape = (self.batch_size, ) + self.spartial_shape + ( - self.num_channels, ) + input_shape = ( + (self.batch_size,) + self.spartial_shape + (self.num_channels,) + ) else: - input_shape = (self.batch_size, - self.num_channels) + self.spartial_shape + input_shape = ( + self.batch_size, + self.num_channels, + ) + self.spartial_shape self.input = np.random.randn(*input_shape).astype(self.dtype) if isinstance(self.filter_size, int): filter_size = [self.filter_size] * 3 else: filter_size = self.filter_size - self.weight_shape = weight_shape = (self.num_channels, self.num_filters - // self.groups) + tuple(filter_size) - self.weight = np.random.uniform(-1, 1, - size=weight_shape).astype(self.dtype) + self.weight_shape = weight_shape = ( + self.num_channels, + self.num_filters // self.groups, + ) + tuple(filter_size) + self.weight = np.random.uniform(-1, 1, size=weight_shape).astype( + self.dtype + ) if self.no_bias: self.bias = None else: self.bias = np.random.uniform( - -1, 1, size=(self.num_filters, )).astype(self.dtype) + -1, 1, size=(self.num_filters,) + ).astype(self.dtype) def fluid_layer(self, place): main = fluid.Program() start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - input_shape = (-1, -1, -1, -1, self.num_channels) \ - if self.channel_last else (-1, self.num_channels, -1, -1, -1) + input_shape = ( + (-1, -1, -1, -1, self.num_channels) + if self.channel_last + else (-1, self.num_channels, -1, -1, -1) + ) x_var = fluid.data("input", input_shape, dtype=self.dtype) weight_attr = I.NumpyArrayInitializer(self.weight) if self.bias is None: @@ -101,11 +112,12 @@ class Conv3DTransposeTestCase(unittest.TestCase): groups=self.groups, param_attr=weight_attr, bias_attr=bias_attr, - data_format=self.data_format) + data_format=self.data_format, + ) feed_dict = {"input": self.input} exe = fluid.Executor(place) exe.run(start) - y_np, = exe.run(main, feed=feed_dict, fetch_list=[y_var]) + (y_np,) = exe.run(main, feed=feed_dict, fetch_list=[y_var]) return y_np def functional(self, place): @@ -113,41 +125,49 @@ class Conv3DTransposeTestCase(unittest.TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - input_shape = (-1, -1, -1, -1, self.num_channels) \ - if self.channel_last else (-1, self.num_channels, -1, -1, -1) + input_shape = ( + (-1, -1, -1, -1, self.num_channels) + if self.channel_last + else (-1, self.num_channels, -1, -1, -1) + ) x_var = fluid.data("input", input_shape, dtype=self.dtype) - w_var = fluid.data("weight", - self.weight_shape, - dtype=self.dtype) - b_var = fluid.data("bias", (self.num_filters, ), - dtype=self.dtype) - y_var = F.conv3d_transpose(x_var, - w_var, - None if self.no_bias else b_var, - output_size=self.output_size, - padding=self.padding, - stride=self.stride, - dilation=self.dilation, - groups=self.groups, - data_format=self.data_format) + w_var = fluid.data( + "weight", self.weight_shape, dtype=self.dtype + ) + b_var = fluid.data( + "bias", (self.num_filters,), dtype=self.dtype + ) + y_var = F.conv3d_transpose( + x_var, + w_var, + None if self.no_bias else b_var, + output_size=self.output_size, + padding=self.padding, + stride=self.stride, + dilation=self.dilation, + groups=self.groups, + data_format=self.data_format, + ) feed_dict = {"input": self.input, "weight": self.weight} if self.bias is not None: feed_dict["bias"] = self.bias exe = fluid.Executor(place) exe.run(start) - y_np, = exe.run(main, feed=feed_dict, fetch_list=[y_var]) + (y_np,) = exe.run(main, feed=feed_dict, fetch_list=[y_var]) return y_np def paddle_nn_layer(self): x_var = dg.to_variable(self.input) - conv = nn.Conv3DTranspose(self.num_channels, - self.num_filters, - self.filter_size, - padding=self.padding, - stride=self.stride, - dilation=self.dilation, - groups=self.groups, - data_format=self.data_format) + conv = nn.Conv3DTranspose( + self.num_channels, + self.num_filters, + self.filter_size, + padding=self.padding, + stride=self.stride, + dilation=self.dilation, + groups=self.groups, + data_format=self.data_format, + ) conv.weight.set_value(self.weight) if not self.no_bias: conv.bias.set_value(self.bias) @@ -174,7 +194,6 @@ class Conv3DTransposeTestCase(unittest.TestCase): class Conv3DTransposeErrorTestCase(Conv3DTransposeTestCase): - def runTest(self): place = fluid.CPUPlace() with dg.guard(place): @@ -185,65 +204,85 @@ class Conv3DTransposeErrorTestCase(Conv3DTransposeTestCase): def add_cases(suite): suite.addTest(Conv3DTransposeTestCase(methodName='runTest')) suite.addTest( - Conv3DTransposeTestCase(methodName='runTest', - stride=[1, 2, 1], - dilation=2, - no_bias=True)) + Conv3DTransposeTestCase( + methodName='runTest', stride=[1, 2, 1], dilation=2, no_bias=True + ) + ) + suite.addTest( + Conv3DTransposeTestCase( + methodName='runTest', + output_size=[12, 19, 12], + stride=[1, 2, 1], + dilation=2, + ) + ) + suite.addTest( + Conv3DTransposeTestCase( + methodName='runTest', stride=2, dilation=(2, 1, 2) + ) + ) suite.addTest( - Conv3DTransposeTestCase(methodName='runTest', - output_size=[12, 19, 12], - stride=[1, 2, 1], - dilation=2)) + Conv3DTransposeTestCase(methodName='runTest', padding="valid") + ) suite.addTest( - Conv3DTransposeTestCase(methodName='runTest', - stride=2, - dilation=(2, 1, 2))) - suite.addTest(Conv3DTransposeTestCase(methodName='runTest', - padding="valid")) - suite.addTest(Conv3DTransposeTestCase(methodName='runTest', - padding='valid')) + Conv3DTransposeTestCase(methodName='runTest', padding='valid') + ) suite.addTest( - Conv3DTransposeTestCase(methodName='runTest', - filter_size=1, - padding=(2, 3, 1))) + Conv3DTransposeTestCase( + methodName='runTest', filter_size=1, padding=(2, 3, 1) + ) + ) suite.addTest( - Conv3DTransposeTestCase(methodName='runTest', - padding=[1, 2, 2, 3, 2, 1])) + Conv3DTransposeTestCase( + methodName='runTest', padding=[1, 2, 2, 3, 2, 1] + ) + ) suite.addTest( - Conv3DTransposeTestCase(methodName='runTest', - padding=[[0, 0], [0, 0], [2, 3], [1, 2], [2, - 1]])) + Conv3DTransposeTestCase( + methodName='runTest', + padding=[[0, 0], [0, 0], [2, 3], [1, 2], [2, 1]], + ) + ) suite.addTest( - Conv3DTransposeTestCase(methodName='runTest', data_format="NDHWC")) + Conv3DTransposeTestCase(methodName='runTest', data_format="NDHWC") + ) suite.addTest( - Conv3DTransposeTestCase(methodName='runTest', - data_format="NDHWC", - padding=[[0, 0], [1, 1], [2, 2], [3, 3], [0, - 0]])) + Conv3DTransposeTestCase( + methodName='runTest', + data_format="NDHWC", + padding=[[0, 0], [1, 1], [2, 2], [3, 3], [0, 0]], + ) + ) suite.addTest( - Conv3DTransposeTestCase(methodName='runTest', groups=2, - padding="valid")) + Conv3DTransposeTestCase(methodName='runTest', groups=2, padding="valid") + ) suite.addTest( - Conv3DTransposeTestCase(methodName='runTest', - num_filters=6, - num_channels=3, - groups=3, - padding="valid")) + Conv3DTransposeTestCase( + methodName='runTest', + num_filters=6, + num_channels=3, + groups=3, + padding="valid", + ) + ) def add_error_cases(suite): suite.addTest( - Conv3DTransposeErrorTestCase(methodName='runTest', - num_channels=5, - groups=2)) + Conv3DTransposeErrorTestCase( + methodName='runTest', num_channels=5, groups=2 + ) + ) suite.addTest( - Conv3DTransposeErrorTestCase(methodName='runTest', - output_size="not_valid")) + Conv3DTransposeErrorTestCase( + methodName='runTest', output_size="not_valid" + ) + ) suite.addTest( - Conv3DTransposeErrorTestCase(methodName='runTest', - num_channels=5, - groups=2, - padding=[-1, 1, 3])) + Conv3DTransposeErrorTestCase( + methodName='runTest', num_channels=5, groups=2, padding=[-1, 1, 3] + ) + ) def load_tests(loader, standard_tests, pattern): diff --git a/python/paddle/fluid/tests/unittests/test_conv3d_transpose_op.py b/python/paddle/fluid/tests/unittests/test_conv3d_transpose_op.py index ccda74b498ab380d1f0d4a72e5567f7efe369e39..869d1d5f29a1329fb4e2725fcc718b0c88d85031 100644 --- a/python/paddle/fluid/tests/unittests/test_conv3d_transpose_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv3d_transpose_op.py @@ -25,9 +25,10 @@ from op_test import OpTest def conv3dtranspose_forward_naive(input_, filter_, attrs): padding_algorithm = attrs['padding_algorithm'] if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]: - raise ValueError("Unknown Attr(padding_algorithm): '%s'. " - "It can only be 'SAME' or 'VALID'." % - str(padding_algorithm)) + raise ValueError( + "Unknown Attr(padding_algorithm): '%s'. " + "It can only be 'SAME' or 'VALID'." % str(padding_algorithm) + ) if attrs['data_format'] == 'NHWC': input_ = np.transpose(input_, [0, 4, 1, 2, 3]) @@ -38,17 +39,21 @@ def conv3dtranspose_forward_naive(input_, filter_, attrs): out_c = f_out_c * groups sub_in_c = in_c // groups - stride, pad, dilations = attrs['strides'], attrs['paddings'], attrs[ - 'dilations'] + stride, pad, dilations = ( + attrs['strides'], + attrs['paddings'], + attrs['dilations'], + ) def _get_padding_with_SAME(input_shape, kernel_size, kernel_stride): padding = [] - for input_size, filter_size, stride_size in zip(input_shape, - kernel_size, - kernel_stride): + for input_size, filter_size, stride_size in zip( + input_shape, kernel_size, kernel_stride + ): out_size = int((input_size + stride_size - 1) / stride_size) pad_sum = np.max( - ((out_size - 1) * stride_size + filter_size - input_size, 0)) + ((out_size - 1) * stride_size + filter_size - input_size, 0) + ) pad_0 = int(pad_sum / 2) pad_1 = int(pad_sum - pad_0) padding.append(pad_0) @@ -84,34 +89,50 @@ def conv3dtranspose_forward_naive(input_, filter_, attrs): for i in range(in_h): for j in range(in_w): for g in range(groups): - input_masked = input_[n, - g * sub_in_c:(g + 1) * sub_in_c, - d, i, j] # (c) - input_masked = np.reshape(input_masked, - (sub_in_c, 1, 1, 1)) + input_masked = input_[ + n, g * sub_in_c : (g + 1) * sub_in_c, d, i, j + ] # (c) + input_masked = np.reshape( + input_masked, (sub_in_c, 1, 1, 1) + ) input_masked = np.tile(input_masked, (1, f_d, f_h, f_w)) for k in range(f_out_c): - tmp_out = np.sum(input_masked * - filter_[g * sub_in_c:(g + 1) * - sub_in_c, k, :, :, :], - axis=0) + tmp_out = np.sum( + input_masked + * filter_[ + g * sub_in_c : (g + 1) * sub_in_c, + k, + :, + :, + :, + ], + axis=0, + ) d1, d2 = d * stride[0], d * stride[0] + d_bolck_d i1, i2 = i * stride[1], i * stride[1] + d_bolck_h j1, j2 = j * stride[2], j * stride[2] + d_bolck_w - out[n, g * f_out_c + k, d1:d2:dilations[0], - i1:i2:dilations[1], - j1:j2:dilations[2]] += tmp_out - - out = out[:, :, pad_d_0:out_d - pad_d_1, pad_h_0:out_h - pad_h_1, - pad_w_0:out_w - pad_w_1] + out[ + n, + g * f_out_c + k, + d1 : d2 : dilations[0], + i1 : i2 : dilations[1], + j1 : j2 : dilations[2], + ] += tmp_out + + out = out[ + :, + :, + pad_d_0 : out_d - pad_d_1, + pad_h_0 : out_h - pad_h_1, + pad_w_0 : out_w - pad_w_1, + ] if attrs['data_format'] == 'NHWC': out = np.transpose(out, [0, 2, 3, 4, 1]) return out class TestConv3DTransposeOp(OpTest): - def setUp(self): # init as conv transpose self.use_cudnn = False @@ -134,11 +155,12 @@ class TestConv3DTransposeOp(OpTest): 'dilations': self.dilations, 'groups': self.groups, 'use_cudnn': self.use_cudnn, - 'data_format': self.data_format + 'data_format': self.data_format, } - output = conv3dtranspose_forward_naive(input_, filter_, - self.attrs).astype("float32") + output = conv3dtranspose_forward_naive( + input_, filter_, self.attrs + ).astype("float32") self.outputs = {'Output': output} @@ -152,40 +174,52 @@ class TestConv3DTransposeOp(OpTest): def test_check_grad(self): if self.use_cudnn: place = core.CUDAPlace(0) - self.check_grad_with_place(place, - set(['Input', 'Filter']), - 'Output', - max_relative_error=0.03) + self.check_grad_with_place( + place, + set(['Input', 'Filter']), + 'Output', + max_relative_error=0.03, + ) else: - self.check_grad(set(['Input', 'Filter']), - 'Output', - max_relative_error=0.03) + self.check_grad( + set(['Input', 'Filter']), 'Output', max_relative_error=0.03 + ) def test_check_grad_no_filter(self): if self.use_cudnn: place = core.CUDAPlace(0) - self.check_grad_with_place(place, ['Input'], - 'Output', - max_relative_error=0.03, - no_grad_set=set(['Filter'])) + self.check_grad_with_place( + place, + ['Input'], + 'Output', + max_relative_error=0.03, + no_grad_set=set(['Filter']), + ) elif self.check_no_filter: - self.check_grad(['Input'], - 'Output', - max_relative_error=0.03, - no_grad_set=set(['Filter'])) + self.check_grad( + ['Input'], + 'Output', + max_relative_error=0.03, + no_grad_set=set(['Filter']), + ) def test_check_grad_no_input(self): if self.use_cudnn: place = core.CUDAPlace(0) - self.check_grad_with_place(place, ['Filter'], - 'Output', - max_relative_error=0.03, - no_grad_set=set(['Input'])) + self.check_grad_with_place( + place, + ['Filter'], + 'Output', + max_relative_error=0.03, + no_grad_set=set(['Input']), + ) elif self.check_no_input: - self.check_grad(['Filter'], - 'Output', - max_relative_error=0.03, - no_grad_set=set(['Input'])) + self.check_grad( + ['Filter'], + 'Output', + max_relative_error=0.03, + no_grad_set=set(['Input']), + ) def init_test_case(self): self.pad = [0, 0, 0] @@ -201,7 +235,6 @@ class TestConv3DTransposeOp(OpTest): class TestWithSymmetricPad(TestConv3DTransposeOp): - def init_test_case(self): self.check_no_input = True self.pad = [1, 1, 1] @@ -214,7 +247,6 @@ class TestWithSymmetricPad(TestConv3DTransposeOp): class TestWithAsymmetricPad(TestConv3DTransposeOp): - def init_test_case(self): self.pad = [1, 0, 1, 0, 1, 2] self.stride = [1, 1, 1] @@ -226,7 +258,6 @@ class TestWithAsymmetricPad(TestConv3DTransposeOp): class TestWithSAMEPad(TestConv3DTransposeOp): - def init_test_case(self): self.stride = [1, 1, 2] self.dilations = [1, 2, 1] @@ -238,7 +269,6 @@ class TestWithSAMEPad(TestConv3DTransposeOp): class TestWithVALIDPad(TestConv3DTransposeOp): - def init_test_case(self): self.stride = [2, 1, 1] self.dilations = [1, 1, 1] @@ -250,7 +280,6 @@ class TestWithVALIDPad(TestConv3DTransposeOp): class TestWithStride(TestConv3DTransposeOp): - def init_test_case(self): self.check_no_filter = True self.pad = [1, 1, 1] @@ -263,7 +292,6 @@ class TestWithStride(TestConv3DTransposeOp): class TestWithGroups(TestConv3DTransposeOp): - def init_test_case(self): self.pad = [1, 1, 1] self.stride = [1, 1, 1] @@ -275,7 +303,6 @@ class TestWithGroups(TestConv3DTransposeOp): class TestWithDilation(TestConv3DTransposeOp): - def init_test_case(self): self.pad = [1, 1, 1] self.stride = [1, 1, 1] @@ -287,7 +314,6 @@ class TestWithDilation(TestConv3DTransposeOp): class Test_NHWC(TestConv3DTransposeOp): - def init_test_case(self): self.pad = [0, 0, 0] self.stride = [1, 1, 1] @@ -300,19 +326,19 @@ class Test_NHWC(TestConv3DTransposeOp): # ------------ test_cudnn ------------ -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNN(TestConv3DTransposeOp): - def init_op_type(self): self.use_cudnn = True self.op_type = "conv3d_transpose" -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNNWithSymmetricPad(TestWithSymmetricPad): - def init_test_case(self): self.pad = [1, 1, 1] self.stride = [1, 1, 1] @@ -327,10 +353,10 @@ class TestCUDNNWithSymmetricPad(TestWithSymmetricPad): self.op_type = "conv3d_transpose" -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNNWithAsymmetricPad(TestWithAsymmetricPad): - def init_test_case(self): self.pad = [1, 1, 1, 0, 0, 2] self.stride = [1, 1, 1] @@ -345,10 +371,10 @@ class TestCUDNNWithAsymmetricPad(TestWithAsymmetricPad): self.op_type = "conv3d_transpose" -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNNWithSAMEPad(TestWithSAMEPad): - def init_test_case(self): self.stride = [1, 1, 2] self.dilations = [1, 2, 1] @@ -363,10 +389,10 @@ class TestCUDNNWithSAMEPad(TestWithSAMEPad): self.op_type = "conv3d_transpose" -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNNWithVALIDPad(TestWithVALIDPad): - def init_test_case(self): self.stride = [1, 1, 1] self.dilations = [1, 1, 1] @@ -381,10 +407,10 @@ class TestCUDNNWithVALIDPad(TestWithVALIDPad): self.op_type = "conv3d_transpose" -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNNWithStride(TestWithStride): - def init_test_case(self): self.pad = [1, 1, 1] self.stride = [2, 2, 2] @@ -399,10 +425,10 @@ class TestCUDNNWithStride(TestWithStride): self.op_type = "conv3d_transpose" -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNNWithGroups(TestWithGroups): - def init_test_case(self): self.pad = [1, 1, 1] self.stride = [1, 1, 1] @@ -432,10 +458,10 @@ class TestCUDNNWithGroups(TestWithGroups): # self.op_type = "conv3d_transpose" -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNN_NHWC(TestConv3DTransposeOp): - def init_test_case(self): self.pad = [0, 0, 0] self.stride = [1, 1, 1] @@ -451,10 +477,10 @@ class TestCUDNN_NHWC(TestConv3DTransposeOp): self.op_type = "conv3d_transpose" -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNNWithSymmetricPad_NHWC(TestWithSymmetricPad): - def init_test_case(self): self.pad = [1, 1, 1] self.stride = [1, 1, 1] @@ -470,10 +496,10 @@ class TestCUDNNWithSymmetricPad_NHWC(TestWithSymmetricPad): self.op_type = "conv3d_transpose" -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNNWithAsymmetricPad_NHWC(TestWithAsymmetricPad): - def init_test_case(self): self.pad = [1, 0, 1, 0, 0, 2] self.stride = [1, 1, 1] @@ -489,10 +515,10 @@ class TestCUDNNWithAsymmetricPad_NHWC(TestWithAsymmetricPad): self.op_type = "conv3d_transpose" -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNNWithStride_NHWC(TestWithStride): - def init_test_case(self): self.pad = [1, 1, 1] self.stride = [2, 2, 2] @@ -508,10 +534,10 @@ class TestCUDNNWithStride_NHWC(TestWithStride): self.op_type = "conv3d_transpose" -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNNWithGroups_NHWC(TestWithGroups): - def init_test_case(self): self.pad = [1, 1, 1] self.stride = [1, 1, 1] diff --git a/python/paddle/fluid/tests/unittests/test_conv3d_transpose_part2_op.py b/python/paddle/fluid/tests/unittests/test_conv3d_transpose_part2_op.py index 12d21378215993a4b5065b98901ef611bb76c1bb..0aed6c1c515be8f24c14a970c1a10be86e979c21 100644 --- a/python/paddle/fluid/tests/unittests/test_conv3d_transpose_part2_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv3d_transpose_part2_op.py @@ -21,7 +21,6 @@ from test_conv3d_transpose_op import TestConv3DTransposeOp class TestWithSymmetricPad_NHWC(TestConv3DTransposeOp): - def init_test_case(self): self.pad = [1, 1, 1] self.stride = [1, 1, 1] @@ -34,7 +33,6 @@ class TestWithSymmetricPad_NHWC(TestConv3DTransposeOp): class TestWithAsymmetricPad_NHWC(TestConv3DTransposeOp): - def init_test_case(self): self.pad = [1, 0, 1, 0, 1, 2] self.stride = [1, 1, 1] @@ -47,7 +45,6 @@ class TestWithAsymmetricPad_NHWC(TestConv3DTransposeOp): class TestWithGroups_NHWC(TestConv3DTransposeOp): - def init_test_case(self): self.check_no_filter = True self.pad = [1, 1, 1] @@ -61,7 +58,6 @@ class TestWithGroups_NHWC(TestConv3DTransposeOp): class TestWithStride_NHWC(TestConv3DTransposeOp): - def init_test_case(self): self.pad = [1, 1, 1] self.stride = [2, 2, 2] @@ -74,7 +70,6 @@ class TestWithStride_NHWC(TestConv3DTransposeOp): class TestWithDilation_NHWC(TestConv3DTransposeOp): - def init_test_case(self): self.check_no_input = True self.pad = [1, 1, 1] @@ -88,57 +83,68 @@ class TestWithDilation_NHWC(TestConv3DTransposeOp): class TestConv3DTransposeAPI(unittest.TestCase): - def test_case1(self): - data1 = fluid.layers.data(name='data1', - shape=[3, 5, 5, 5], - dtype='float32') - data2 = fluid.layers.data(name='data2', - shape=[5, 5, 5, 3], - dtype='float32') - - out1 = fluid.layers.conv3d_transpose(input=data1, - groups=1, - num_filters=6, - filter_size=3, - data_format='NCDHW') - out2 = fluid.layers.conv3d_transpose(input=data2, - groups=1, - num_filters=6, - filter_size=3, - data_format='NDHWC') - out3 = fluid.layers.conv3d_transpose(input=data1, - groups=1, - num_filters=6, - filter_size=3, - padding=[[0, 0], [0, 0], [1, 1], - [0, 0], [1, 1]], - data_format='NCDHW') - out4 = fluid.layers.conv3d_transpose(input=data2, - groups=3, - num_filters=6, - filter_size=3, - padding=[[0, 0], [0, 0], [1, 1], - [1, 2], [0, 0]], - data_format='NDHWC') - out5 = fluid.layers.conv3d_transpose(input=data2, - groups=1, - num_filters=6, - filter_size=3, - padding='SAME', - data_format='NCDHW') - out6 = fluid.layers.conv3d_transpose(input=data2, - groups=1, - num_filters=6, - filter_size=3, - padding='VALID', - data_format='NDHWC') - out7 = fluid.layers.conv3d_transpose(input=data2, - groups=1, - num_filters=6, - output_size=[7, 7, 7], - padding=[0, 0, 0], - data_format='NDHWC') + data1 = fluid.layers.data( + name='data1', shape=[3, 5, 5, 5], dtype='float32' + ) + data2 = fluid.layers.data( + name='data2', shape=[5, 5, 5, 3], dtype='float32' + ) + + out1 = fluid.layers.conv3d_transpose( + input=data1, + groups=1, + num_filters=6, + filter_size=3, + data_format='NCDHW', + ) + out2 = fluid.layers.conv3d_transpose( + input=data2, + groups=1, + num_filters=6, + filter_size=3, + data_format='NDHWC', + ) + out3 = fluid.layers.conv3d_transpose( + input=data1, + groups=1, + num_filters=6, + filter_size=3, + padding=[[0, 0], [0, 0], [1, 1], [0, 0], [1, 1]], + data_format='NCDHW', + ) + out4 = fluid.layers.conv3d_transpose( + input=data2, + groups=3, + num_filters=6, + filter_size=3, + padding=[[0, 0], [0, 0], [1, 1], [1, 2], [0, 0]], + data_format='NDHWC', + ) + out5 = fluid.layers.conv3d_transpose( + input=data2, + groups=1, + num_filters=6, + filter_size=3, + padding='SAME', + data_format='NCDHW', + ) + out6 = fluid.layers.conv3d_transpose( + input=data2, + groups=1, + num_filters=6, + filter_size=3, + padding='VALID', + data_format='NDHWC', + ) + out7 = fluid.layers.conv3d_transpose( + input=data2, + groups=1, + num_filters=6, + output_size=[7, 7, 7], + padding=[0, 0, 0], + data_format='NDHWC', + ) data1_np = np.random.random((2, 3, 5, 5, 5)).astype("float32") data2_np = np.random.random((2, 5, 5, 5, 3)).astype("float32") @@ -149,13 +155,12 @@ class TestConv3DTransposeAPI(unittest.TestCase): place = core.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - results = exe.run(fluid.default_main_program(), - feed={ - "data1": data1_np, - "data2": data2_np - }, - fetch_list=[out1, out2, out3, out4, out5, out6, out7], - return_numpy=True) + results = exe.run( + fluid.default_main_program(), + feed={"data1": data1_np, "data2": data2_np}, + fetch_list=[out1, out2, out3, out4, out5, out6, out7], + return_numpy=True, + ) self.assertIsNotNone(results[0]) self.assertIsNotNone(results[1]) self.assertIsNotNone(results[2]) @@ -166,48 +171,53 @@ class TestConv3DTransposeAPI(unittest.TestCase): class TestConv3DTransposeOpException(unittest.TestCase): - def test_exception(self): - data = fluid.layers.data(name='data', - shape=[3, 5, 5, 5], - dtype="float32") + data = fluid.layers.data( + name='data', shape=[3, 5, 5, 5], dtype="float32" + ) def attr_data_format(): - out = fluid.layers.conv2d_transpose(input=data, - groups=1, - num_filters=6, - filter_size=3, - data_format="NCDW") + out = fluid.layers.conv2d_transpose( + input=data, + groups=1, + num_filters=6, + filter_size=3, + data_format="NCDW", + ) self.assertRaises(ValueError, attr_data_format) def attr_padding_str(): - out = fluid.layers.conv2d_transpose(input=data, - groups=1, - num_filters=6, - filter_size=3, - padding='Vald') + out = fluid.layers.conv2d_transpose( + input=data, + groups=1, + num_filters=6, + filter_size=3, + padding='Vald', + ) self.assertRaises(ValueError, attr_padding_str) def attr_padding_list(): - out = fluid.layers.conv2d_transpose(input=data, - groups=1, - num_filters=6, - filter_size=3, - padding=[[1, 1], [1, 1], [0, 0], - [0, 0], [1, 1]]) + out = fluid.layers.conv2d_transpose( + input=data, + groups=1, + num_filters=6, + filter_size=3, + padding=[[1, 1], [1, 1], [0, 0], [0, 0], [1, 1]], + ) self.assertRaises(ValueError, attr_padding_list) def attr_padding_with_data_format(): - out = fluid.layers.conv2d_transpose(input=data, - groups=1, - num_filters=6, - filter_size=3, - padding=[[1, 1], [0, 0], [0, 0], - [1, 0], [1, 1]], - data_format='NDHWC') + out = fluid.layers.conv2d_transpose( + input=data, + groups=1, + num_filters=6, + filter_size=3, + padding=[[1, 1], [0, 0], [0, 0], [1, 0], [1, 1]], + data_format='NDHWC', + ) self.assertRaises(ValueError, attr_padding_with_data_format) diff --git a/python/paddle/fluid/tests/unittests/test_conv_nn_grad.py b/python/paddle/fluid/tests/unittests/test_conv_nn_grad.py index 99d457c073e376e686677fd163ced93fa65663ab..b09a86f5bfa0b7abc0ab1c045d21d5285dbc10c1 100644 --- a/python/paddle/fluid/tests/unittests/test_conv_nn_grad.py +++ b/python/paddle/fluid/tests/unittests/test_conv_nn_grad.py @@ -25,7 +25,6 @@ from decorator_helper import prog_scope class TestConvDoubleGradCheck(unittest.TestCase): - @prog_scope() def func(self, place): shape = [2, 4, 3, 3] @@ -39,11 +38,9 @@ class TestConvDoubleGradCheck(unittest.TestCase): w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) - gradient_checker.double_grad_check([x] + w, - y, - x_init=[x_arr] + w_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -55,7 +52,6 @@ class TestConvDoubleGradCheck(unittest.TestCase): class TestConvDoubleGradCheckTest0(unittest.TestCase): - @prog_scope() def func(self, place): shape = [2, 4, 3, 3] @@ -69,11 +65,9 @@ class TestConvDoubleGradCheckTest0(unittest.TestCase): w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) - gradient_checker.double_grad_check([x] + w, - y, - x_init=[x_arr] + w_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -84,7 +78,6 @@ class TestConvDoubleGradCheckTest0(unittest.TestCase): class TestConvDoubleGradCheckTest1(unittest.TestCase): - @prog_scope() def func(self, place): shape = [2, 3, 3, 3] @@ -98,11 +91,9 @@ class TestConvDoubleGradCheckTest1(unittest.TestCase): w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) - gradient_checker.double_grad_check([x] + w, - y, - x_init=[x_arr] + w_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -113,7 +104,6 @@ class TestConvDoubleGradCheckTest1(unittest.TestCase): class TestConv3DDoubleGradCheck(unittest.TestCase): - @prog_scope() def func(self, place): shape = [2, 4, 3, 4, 2] @@ -127,14 +117,12 @@ class TestConv3DDoubleGradCheck(unittest.TestCase): w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) - gradient_checker.double_grad_check([x] + w, - y, - x_init=[x_arr] + w_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps + ) def test_grad(self): - #places = [fluid.CPUPlace()] + # places = [fluid.CPUPlace()] places = [] if core.is_compiled_with_cuda(): places.append(fluid.CUDAPlace(0)) @@ -143,7 +131,6 @@ class TestConv3DDoubleGradCheck(unittest.TestCase): class TestConv3DDoubleGradCheckTest1(unittest.TestCase): - @prog_scope() def func(self, place): shape = [2, 4, 5, 3, 2] @@ -157,11 +144,9 @@ class TestConv3DDoubleGradCheckTest1(unittest.TestCase): w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) - gradient_checker.double_grad_check([x] + w, - y, - x_init=[x_arr] + w_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -172,30 +157,29 @@ class TestConv3DDoubleGradCheckTest1(unittest.TestCase): class TestConv2DoubleGradCheck_AsyPadding(unittest.TestCase): - @prog_scope() def func(self, place): shape = [2, 2, 3, 3] eps = 0.005 dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 x = layers.data('x', shape, False, dtype) - y = layers.conv2d(input=x, - num_filters=2, - filter_size=1, - padding=[1, 0, 0, 1], - bias_attr=False, - use_cudnn=True) + y = layers.conv2d( + input=x, + num_filters=2, + filter_size=1, + padding=[1, 0, 0, 1], + bias_attr=False, + use_cudnn=True, + ) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) w = fluid.default_main_program().global_block().all_parameters() w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) - gradient_checker.double_grad_check([x] + w, - y, - x_init=[x_arr] + w_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -206,30 +190,29 @@ class TestConv2DoubleGradCheck_AsyPadding(unittest.TestCase): class TestConv2DoubleGradCheck_PaddingSAME(unittest.TestCase): - @prog_scope() def func(self, place): shape = [2, 2, 3, 3] eps = 0.005 dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 x = layers.data('x', shape, False, dtype) - y = layers.conv2d(input=x, - num_filters=2, - filter_size=1, - padding="SAME", - bias_attr=False, - use_cudnn=True) + y = layers.conv2d( + input=x, + num_filters=2, + filter_size=1, + padding="SAME", + bias_attr=False, + use_cudnn=True, + ) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) w = fluid.default_main_program().global_block().all_parameters() w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) - gradient_checker.double_grad_check([x] + w, - y, - x_init=[x_arr] + w_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -240,30 +223,29 @@ class TestConv2DoubleGradCheck_PaddingSAME(unittest.TestCase): class TestConv2DoubleGradCheck_PaddingVALID(unittest.TestCase): - @prog_scope() def func(self, place): shape = [2, 2, 3, 3] eps = 0.005 dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 x = layers.data('x', shape, False, dtype) - y = layers.conv2d(input=x, - num_filters=2, - filter_size=1, - padding="VALID", - bias_attr=False, - use_cudnn=True) + y = layers.conv2d( + input=x, + num_filters=2, + filter_size=1, + padding="VALID", + bias_attr=False, + use_cudnn=True, + ) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) w = fluid.default_main_program().global_block().all_parameters() w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) - gradient_checker.double_grad_check([x] + w, - y, - x_init=[x_arr] + w_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -274,32 +256,31 @@ class TestConv2DoubleGradCheck_PaddingVALID(unittest.TestCase): class TestConv2DoubleGradCheck_ChannelLast(unittest.TestCase): - @prog_scope() def func(self, place): shape = [2, 2, 3, 3] eps = 0.005 dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 x = layers.data('x', shape, False, dtype) - y = layers.conv2d(input=x, - num_filters=2, - filter_size=1, - padding=[1, 1], - bias_attr=False, - use_cudnn=True, - groups=1, - data_format="NHWC") + y = layers.conv2d( + input=x, + num_filters=2, + filter_size=1, + padding=[1, 1], + bias_attr=False, + use_cudnn=True, + groups=1, + data_format="NHWC", + ) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) w = fluid.default_main_program().global_block().all_parameters() w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) - gradient_checker.double_grad_check([x] + w, - y, - x_init=[x_arr] + w_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -310,32 +291,31 @@ class TestConv2DoubleGradCheck_ChannelLast(unittest.TestCase): class TestConv2DoubleGradCheck_ChannelLast_AsyPadding(unittest.TestCase): - @prog_scope() def func(self, place): shape = [2, 2, 3, 3] eps = 0.005 dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 x = layers.data('x', shape, False, dtype) - y = layers.conv2d(input=x, - num_filters=2, - filter_size=1, - padding=[1, 0, 1, 0], - bias_attr=False, - use_cudnn=True, - groups=1, - data_format="NHWC") + y = layers.conv2d( + input=x, + num_filters=2, + filter_size=1, + padding=[1, 0, 1, 0], + bias_attr=False, + use_cudnn=True, + groups=1, + data_format="NHWC", + ) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) w = fluid.default_main_program().global_block().all_parameters() w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) - gradient_checker.double_grad_check([x] + w, - y, - x_init=[x_arr] + w_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -346,30 +326,29 @@ class TestConv2DoubleGradCheck_ChannelLast_AsyPadding(unittest.TestCase): class TestConv3DDoubleGradCheck_AsyPadding(unittest.TestCase): - @prog_scope() def func(self, place): shape = [2, 2, 2, 2, 2] eps = 0.005 dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 x = layers.data('x', shape, False, dtype) - y = layers.conv3d(input=x, - num_filters=2, - filter_size=1, - padding=[1, 0, 0, 1, 1, 2], - bias_attr=False, - use_cudnn=True) + y = layers.conv3d( + input=x, + num_filters=2, + filter_size=1, + padding=[1, 0, 0, 1, 1, 2], + bias_attr=False, + use_cudnn=True, + ) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) w = fluid.default_main_program().global_block().all_parameters() w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) - gradient_checker.double_grad_check([x] + w, - y, - x_init=[x_arr] + w_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -380,31 +359,30 @@ class TestConv3DDoubleGradCheck_AsyPadding(unittest.TestCase): class TestConv3DoubleGradCheck_PaddingSAME(unittest.TestCase): - @prog_scope() def func(self, place): shape = [2, 2, 2, 2, 2] eps = 0.005 dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 x = layers.data('x', shape, False, dtype) - y = layers.conv3d(input=x, - num_filters=2, - filter_size=1, - padding="SAME", - groups=1, - bias_attr=False, - use_cudnn=True) + y = layers.conv3d( + input=x, + num_filters=2, + filter_size=1, + padding="SAME", + groups=1, + bias_attr=False, + use_cudnn=True, + ) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) w = fluid.default_main_program().global_block().all_parameters() w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) - gradient_checker.double_grad_check([x] + w, - y, - x_init=[x_arr] + w_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -415,30 +393,29 @@ class TestConv3DoubleGradCheck_PaddingSAME(unittest.TestCase): class TestConv3DoubleGradCheck_PaddingVALID(unittest.TestCase): - @prog_scope() def func(self, place): shape = [2, 2, 3, 3, 2] eps = 0.005 dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 x = layers.data('x', shape, False, dtype) - y = layers.conv3d(input=x, - num_filters=2, - filter_size=1, - padding="VALID", - bias_attr=False, - use_cudnn=True) + y = layers.conv3d( + input=x, + num_filters=2, + filter_size=1, + padding="VALID", + bias_attr=False, + use_cudnn=True, + ) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) w = fluid.default_main_program().global_block().all_parameters() w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) - gradient_checker.double_grad_check([x] + w, - y, - x_init=[x_arr] + w_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -449,32 +426,31 @@ class TestConv3DoubleGradCheck_PaddingVALID(unittest.TestCase): class TestConv3DDoubleGradCheck_ChannelLast(unittest.TestCase): - @prog_scope() def func(self, place): shape = [2, 2, 2, 2, 3] eps = 0.005 dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 x = layers.data('x', shape, False, dtype) - y = layers.conv3d(input=x, - num_filters=2, - filter_size=1, - padding=[1, 1, 1], - bias_attr=False, - use_cudnn=True, - groups=1, - data_format="NDHWC") + y = layers.conv3d( + input=x, + num_filters=2, + filter_size=1, + padding=[1, 1, 1], + bias_attr=False, + use_cudnn=True, + groups=1, + data_format="NDHWC", + ) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) w = fluid.default_main_program().global_block().all_parameters() w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) - gradient_checker.double_grad_check([x] + w, - y, - x_init=[x_arr] + w_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -485,32 +461,31 @@ class TestConv3DDoubleGradCheck_ChannelLast(unittest.TestCase): class TestConv3DDoubleGradCheck_ChannelLast_AsyPadding(unittest.TestCase): - @prog_scope() def func(self, place): shape = [2, 2, 2, 2, 3] eps = 0.005 dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 x = layers.data('x', shape, False, dtype) - y = layers.conv3d(input=x, - num_filters=2, - filter_size=1, - padding=[1, 0, 1, 0, 1, 0], - bias_attr=False, - use_cudnn=True, - groups=1, - data_format="NDHWC") + y = layers.conv3d( + input=x, + num_filters=2, + filter_size=1, + padding=[1, 0, 1, 0, 1, 0], + bias_attr=False, + use_cudnn=True, + groups=1, + data_format="NDHWC", + ) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) w = fluid.default_main_program().global_block().all_parameters() w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) - gradient_checker.double_grad_check([x] + w, - y, - x_init=[x_arr] + w_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -521,7 +496,6 @@ class TestConv3DDoubleGradCheck_ChannelLast_AsyPadding(unittest.TestCase): class TestDepthWiseConvDoubleGradCheck(unittest.TestCase): - @prog_scope() def func(self, place): shape = [2, 4, 3, 3] @@ -533,23 +507,18 @@ class TestDepthWiseConvDoubleGradCheck(unittest.TestCase): # use_cudnn == False # groups == filters # num_filters % num_channels == 0 - y = layers.conv2d(x, - shape[1], - 1, - groups=shape[1], - bias_attr=False, - use_cudnn=False) + y = layers.conv2d( + x, shape[1], 1, groups=shape[1], bias_attr=False, use_cudnn=False + ) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) w = fluid.default_main_program().global_block().all_parameters() w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) - gradient_checker.double_grad_check([x] + w, - y, - x_init=[x_arr] + w_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps + ) def test_grad(self): places = [] @@ -560,7 +529,6 @@ class TestDepthWiseConvDoubleGradCheck(unittest.TestCase): class TestDepthWiseConvDoubleGradCheckCase1(unittest.TestCase): - def depthwise_conv2d_wrapper(self, x): return paddle.nn.functional.conv2d(x[0], x[1], groups=4) @@ -582,16 +550,16 @@ class TestDepthWiseConvDoubleGradCheckCase1(unittest.TestCase): x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) w_arr = np.random.uniform(-1, 1, w_shape).astype(dtype) - gradient_checker.double_grad_check([x, w], - y, - x_init=[x_arr, w_arr], - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x, w], y, x_init=[x_arr, w_arr], place=place, eps=eps + ) gradient_checker.double_grad_check_for_dygraph( - self.depthwise_conv2d_wrapper, [x, w], + self.depthwise_conv2d_wrapper, + [x, w], y, x_init=[x_arr, w_arr], - place=place) + place=place, + ) def test_grad(self): places = [] @@ -602,7 +570,6 @@ class TestDepthWiseConvDoubleGradCheckCase1(unittest.TestCase): class TestConv3DDoubleGradCheck_NN(unittest.TestCase): - def conv3d_wrapper(self, x): return paddle.nn.functional.conv3d(x[0], x[1]) @@ -620,16 +587,12 @@ class TestConv3DDoubleGradCheck_NN(unittest.TestCase): x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) w_arr = np.random.uniform(-1, 1, w_shape).astype(dtype) - gradient_checker.double_grad_check([x, w], - y, - x_init=[x_arr, w_arr], - place=place, - eps=eps) - gradient_checker.double_grad_check_for_dygraph(self.conv3d_wrapper, - [x, w], - y, - x_init=[x_arr, w_arr], - place=place) + gradient_checker.double_grad_check( + [x, w], y, x_init=[x_arr, w_arr], place=place, eps=eps + ) + gradient_checker.double_grad_check_for_dygraph( + self.conv3d_wrapper, [x, w], y, x_init=[x_arr, w_arr], place=place + ) def test_grad(self): places = [] diff --git a/python/paddle/fluid/tests/unittests/test_conv_shift_op.py b/python/paddle/fluid/tests/unittests/test_conv_shift_op.py index aa9d612c5db092218192b5d262cba462042afc5a..57a2038d3f59bbb037d0d3bd5537488fa501a833 100644 --- a/python/paddle/fluid/tests/unittests/test_conv_shift_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv_shift_op.py @@ -29,7 +29,6 @@ def conv_shift_forward(x, y): class TestConvShiftOp(OpTest): - def setUp(self): self.op_type = "conv_shift" diff --git a/python/paddle/fluid/tests/unittests/test_conv_transpose_nn_grad.py b/python/paddle/fluid/tests/unittests/test_conv_transpose_nn_grad.py index 235f4a64f858ea65facadc93bbe28612768f9966..f4c139a5463eeb8e8d8ab3eb4f09f2d7af461a5a 100644 --- a/python/paddle/fluid/tests/unittests/test_conv_transpose_nn_grad.py +++ b/python/paddle/fluid/tests/unittests/test_conv_transpose_nn_grad.py @@ -25,7 +25,6 @@ from decorator_helper import prog_scope class TestConvTransposeDoubleGradCheck(unittest.TestCase): - def conv_transpose_wrapper(self, x): return paddle.nn.functional.conv2d_transpose(x[0], x[1], groups=1) @@ -37,11 +36,9 @@ class TestConvTransposeDoubleGradCheck(unittest.TestCase): if core.is_compiled_with_rocm(): dtype = np.float32 x = layers.data('x', shape, False, dtype) - y = layers.conv2d_transpose(x, - 2, - filter_size=1, - groups=1, - bias_attr=False) + y = layers.conv2d_transpose( + x, 2, filter_size=1, groups=1, bias_attr=False + ) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) w = fluid.default_main_program().global_block().all_parameters() @@ -50,23 +47,25 @@ class TestConvTransposeDoubleGradCheck(unittest.TestCase): w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) if core.is_compiled_with_rocm(): # HIP will sometimes fail if no atol - gradient_checker.double_grad_check([x] + w, - y, - x_init=[x_arr] + w_arr, - place=place, - eps=eps, - atol=1e-4) + gradient_checker.double_grad_check( + [x] + w, + y, + x_init=[x_arr] + w_arr, + place=place, + eps=eps, + atol=1e-4, + ) else: - gradient_checker.double_grad_check([x] + w, - y, - x_init=[x_arr] + w_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps + ) gradient_checker.double_grad_check_for_dygraph( - self.conv_transpose_wrapper, [x] + w, + self.conv_transpose_wrapper, + [x] + w, y, x_init=[x_arr] + w_arr, - place=place) + place=place, + ) def test_grad(self): places = [] @@ -78,13 +77,12 @@ class TestConvTransposeDoubleGradCheck(unittest.TestCase): class TestConvTranspose2DoubleGradCheck_AsyPadding( - TestConvTransposeDoubleGradCheck): - + TestConvTransposeDoubleGradCheck +): def conv_transpose_wrapper(self, x): - return paddle.nn.functional.conv2d_transpose(x[0], - x[1], - groups=1, - padding=[1, 0, 0, 1]) + return paddle.nn.functional.conv2d_transpose( + x[0], x[1], groups=1, padding=[1, 0, 0, 1] + ) @prog_scope() def func(self, place): @@ -94,12 +92,14 @@ class TestConvTranspose2DoubleGradCheck_AsyPadding( if core.is_compiled_with_rocm(): dtype = np.float32 x = layers.data('x', shape, False, dtype) - y = layers.conv2d_transpose(input=x, - num_filters=2, - filter_size=1, - padding=[1, 0, 0, 1], - bias_attr=False, - use_cudnn=True) + y = layers.conv2d_transpose( + input=x, + num_filters=2, + filter_size=1, + padding=[1, 0, 0, 1], + bias_attr=False, + use_cudnn=True, + ) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) w = fluid.default_main_program().global_block().all_parameters() @@ -108,33 +108,34 @@ class TestConvTranspose2DoubleGradCheck_AsyPadding( w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) if core.is_compiled_with_rocm(): # HIP will sometimes fail if no atol - gradient_checker.double_grad_check([x] + w, - y, - x_init=[x_arr] + w_arr, - place=place, - eps=eps, - atol=1e-4) + gradient_checker.double_grad_check( + [x] + w, + y, + x_init=[x_arr] + w_arr, + place=place, + eps=eps, + atol=1e-4, + ) else: - gradient_checker.double_grad_check([x] + w, - y, - x_init=[x_arr] + w_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps + ) gradient_checker.double_grad_check_for_dygraph( - self.conv_transpose_wrapper, [x] + w, + self.conv_transpose_wrapper, + [x] + w, y, x_init=[x_arr] + w_arr, - place=place) + place=place, + ) class TestConvTranspose2DoubleGradCheck_PaddingSAME( - TestConvTransposeDoubleGradCheck): - + TestConvTransposeDoubleGradCheck +): def conv_transpose_wrapper(self, x): - return paddle.nn.functional.conv2d_transpose(x[0], - x[1], - groups=1, - padding="SAME") + return paddle.nn.functional.conv2d_transpose( + x[0], x[1], groups=1, padding="SAME" + ) @prog_scope() def func(self, place): @@ -144,12 +145,14 @@ class TestConvTranspose2DoubleGradCheck_PaddingSAME( if core.is_compiled_with_rocm(): dtype = np.float32 x = layers.data('x', shape, False, dtype) - y = layers.conv2d_transpose(input=x, - num_filters=2, - filter_size=1, - padding="SAME", - bias_attr=False, - use_cudnn=True) + y = layers.conv2d_transpose( + input=x, + num_filters=2, + filter_size=1, + padding="SAME", + bias_attr=False, + use_cudnn=True, + ) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) w = fluid.default_main_program().global_block().all_parameters() @@ -158,33 +161,34 @@ class TestConvTranspose2DoubleGradCheck_PaddingSAME( w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) if core.is_compiled_with_rocm(): # HIP will sometimes fail if no atol - gradient_checker.double_grad_check([x] + w, - y, - x_init=[x_arr] + w_arr, - place=place, - eps=eps, - atol=1e-4) + gradient_checker.double_grad_check( + [x] + w, + y, + x_init=[x_arr] + w_arr, + place=place, + eps=eps, + atol=1e-4, + ) else: - gradient_checker.double_grad_check([x] + w, - y, - x_init=[x_arr] + w_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps + ) gradient_checker.double_grad_check_for_dygraph( - self.conv_transpose_wrapper, [x] + w, + self.conv_transpose_wrapper, + [x] + w, y, x_init=[x_arr] + w_arr, - place=place) + place=place, + ) class TestConvTranspose2DoubleGradCheck_PaddingVALID( - TestConvTransposeDoubleGradCheck): - + TestConvTransposeDoubleGradCheck +): def conv_transpose_wrapper(self, x): - return paddle.nn.functional.conv2d_transpose(x[0], - x[1], - groups=1, - padding="VALID") + return paddle.nn.functional.conv2d_transpose( + x[0], x[1], groups=1, padding="VALID" + ) @prog_scope() def func(self, place): @@ -194,12 +198,14 @@ class TestConvTranspose2DoubleGradCheck_PaddingVALID( if core.is_compiled_with_rocm(): dtype = np.float32 x = layers.data('x', shape, False, dtype) - y = layers.conv2d_transpose(input=x, - num_filters=2, - filter_size=1, - padding="VALID", - bias_attr=False, - use_cudnn=True) + y = layers.conv2d_transpose( + input=x, + num_filters=2, + filter_size=1, + padding="VALID", + bias_attr=False, + use_cudnn=True, + ) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) w = fluid.default_main_program().global_block().all_parameters() @@ -208,34 +214,34 @@ class TestConvTranspose2DoubleGradCheck_PaddingVALID( w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) if core.is_compiled_with_rocm(): # HIP will sometimes fail if no atol - gradient_checker.double_grad_check([x] + w, - y, - x_init=[x_arr] + w_arr, - place=place, - eps=eps, - atol=1e-4) + gradient_checker.double_grad_check( + [x] + w, + y, + x_init=[x_arr] + w_arr, + place=place, + eps=eps, + atol=1e-4, + ) else: - gradient_checker.double_grad_check([x] + w, - y, - x_init=[x_arr] + w_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps + ) gradient_checker.double_grad_check_for_dygraph( - self.conv_transpose_wrapper, [x] + w, + self.conv_transpose_wrapper, + [x] + w, y, x_init=[x_arr] + w_arr, - place=place) + place=place, + ) class TestConvTranspose2DoubleGradCheck_ChannelLast( - TestConvTransposeDoubleGradCheck): - + TestConvTransposeDoubleGradCheck +): def conv_transpose_wrapper(self, x): - return paddle.nn.functional.conv2d_transpose(x[0], - x[1], - groups=1, - padding=[1, 1], - data_format="NHWC") + return paddle.nn.functional.conv2d_transpose( + x[0], x[1], groups=1, padding=[1, 1], data_format="NHWC" + ) @prog_scope() def func(self, place): @@ -245,14 +251,16 @@ class TestConvTranspose2DoubleGradCheck_ChannelLast( if core.is_compiled_with_rocm(): dtype = np.float32 x = layers.data('x', shape, False, dtype) - y = layers.conv2d_transpose(input=x, - num_filters=2, - filter_size=1, - padding=[1, 1], - bias_attr=False, - use_cudnn=True, - groups=1, - data_format="NHWC") + y = layers.conv2d_transpose( + input=x, + num_filters=2, + filter_size=1, + padding=[1, 1], + bias_attr=False, + use_cudnn=True, + groups=1, + data_format="NHWC", + ) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) w = fluid.default_main_program().global_block().all_parameters() @@ -261,23 +269,25 @@ class TestConvTranspose2DoubleGradCheck_ChannelLast( w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) if core.is_compiled_with_rocm(): # HIP will sometimes fail if no atol - gradient_checker.double_grad_check([x] + w, - y, - x_init=[x_arr] + w_arr, - place=place, - eps=eps, - atol=1e-4) + gradient_checker.double_grad_check( + [x] + w, + y, + x_init=[x_arr] + w_arr, + place=place, + eps=eps, + atol=1e-4, + ) else: - gradient_checker.double_grad_check([x] + w, - y, - x_init=[x_arr] + w_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps + ) gradient_checker.double_grad_check_for_dygraph( - self.conv_transpose_wrapper, [x] + w, + self.conv_transpose_wrapper, + [x] + w, y, x_init=[x_arr] + w_arr, - place=place) + place=place, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_corr.py b/python/paddle/fluid/tests/unittests/test_corr.py index 252948d8eed4885a26c898e395896c8dd95f3b7b..82f28d3cd0046bf6632a5c3e8a5f1652907666ea 100644 --- a/python/paddle/fluid/tests/unittests/test_corr.py +++ b/python/paddle/fluid/tests/unittests/test_corr.py @@ -28,7 +28,6 @@ def numpy_corr(np_arr, rowvar=True, dtype='float64'): class Corr_Test(unittest.TestCase): - def setUp(self): self.shape = [4, 5] @@ -49,14 +48,13 @@ class Corr_Test(unittest.TestCase): corr = paddle.linalg.corrcoef(tensor) np_corr = numpy_corr(np_arr, rowvar=True, dtype=dtype) if dtype == 'float32': - np.testing.assert_allclose(np_corr, - corr.numpy(), - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + np_corr, corr.numpy(), rtol=1e-05, atol=1e-05 + ) else: - np.testing.assert_allclose(np_corr, - corr.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + np_corr, corr.numpy(), rtol=1e-05 + ) def test_tensor_corr_rowvar(self): typelist = ['float64', 'float32'] @@ -76,37 +74,32 @@ class Corr_Test(unittest.TestCase): corr = paddle.linalg.corrcoef(tensor, rowvar=False) np_corr = numpy_corr(np_arr, rowvar=False, dtype=dtype) if dtype == 'float32': - np.testing.assert_allclose(np_corr, - corr.numpy(), - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + np_corr, corr.numpy(), rtol=1e-05, atol=1e-05 + ) else: - np.testing.assert_allclose(np_corr, - corr.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + np_corr, corr.numpy(), rtol=1e-05 + ) # Input(x) only support N-D (1<=N<=2) tensor class Corr_Test2(Corr_Test): - def setUp(self): self.shape = [10] class Corr_Test3(Corr_Test): - def setUp(self): self.shape = [4, 5] # Input(x) only support N-D (1<=N<=2) tensor class Corr_Test4(unittest.TestCase): - def setUp(self): self.shape = [2, 5, 2] def test_errors(self): - def test_err(): np_arr = np.random.rand(*self.shape).astype('float64') tensor = paddle.to_tensor(np_arr) @@ -117,7 +110,6 @@ class Corr_Test4(unittest.TestCase): # test unsupported complex input class Corr_Comeplex_Test(unittest.TestCase): - def setUp(self): self.dtype = 'complex128' @@ -129,7 +121,6 @@ class Corr_Comeplex_Test(unittest.TestCase): class Corr_Test5(Corr_Comeplex_Test): - def setUp(self): self.dtype = 'complex64' diff --git a/python/paddle/fluid/tests/unittests/test_cos_sim_op.py b/python/paddle/fluid/tests/unittests/test_cos_sim_op.py index 5943214e9a3241f6807e79163ccccf933c6a71fa..86d1e0e1a8ce7a4ddf2b13e2d90eb9395727af9d 100644 --- a/python/paddle/fluid/tests/unittests/test_cos_sim_op.py +++ b/python/paddle/fluid/tests/unittests/test_cos_sim_op.py @@ -20,21 +20,23 @@ from paddle.fluid import Program, program_guard class TestCosSimOp(OpTest): - def setUp(self): self.op_type = "cos_sim" self.inputs = { 'X': np.random.random((6, 20)).astype("float32"), - 'Y': np.random.random((6, 20)).astype("float32") + 'Y': np.random.random((6, 20)).astype("float32"), } expect_x_norm = np.linalg.norm(self.inputs['X'], axis=1) expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=1) - expect_out = (self.inputs['X'] * self.inputs['Y']).sum(axis=1) / \ - expect_x_norm / expect_y_norm + expect_out = ( + (self.inputs['X'] * self.inputs['Y']).sum(axis=1) + / expect_x_norm + / expect_y_norm + ) self.outputs = { 'XNorm': np.expand_dims(expect_x_norm, 1), 'YNorm': np.expand_dims(expect_y_norm, 1), - 'Out': np.expand_dims(expect_out, 1) + 'Out': np.expand_dims(expect_out, 1), } def test_check_output(self): @@ -44,84 +46,89 @@ class TestCosSimOp(OpTest): self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.06) def test_check_grad_ingore_x(self): - self.check_grad(['Y'], - 'Out', - max_relative_error=0.06, - no_grad_set=set("X")) + self.check_grad( + ['Y'], 'Out', max_relative_error=0.06, no_grad_set=set("X") + ) def test_check_grad_ingore_y(self): - self.check_grad(['X'], - 'Out', - max_relative_error=0.06, - no_grad_set=set('Y')) + self.check_grad( + ['X'], 'Out', max_relative_error=0.06, no_grad_set=set('Y') + ) class TestCosSimOp2(TestCosSimOp): - def setUp(self): self.op_type = "cos_sim" self.inputs = { 'X': np.random.random((6, 100)).astype("float32"), - 'Y': np.random.random((1, 100)).astype("float32") + 'Y': np.random.random((1, 100)).astype("float32"), } expect_x_norm = np.linalg.norm(self.inputs['X'], axis=1) expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=1) - expect_out = (self.inputs['X'] * self.inputs['Y']).sum(axis=1) / \ - expect_x_norm / expect_y_norm + expect_out = ( + (self.inputs['X'] * self.inputs['Y']).sum(axis=1) + / expect_x_norm + / expect_y_norm + ) self.outputs = { 'XNorm': np.expand_dims(expect_x_norm, 1), 'YNorm': np.expand_dims(expect_y_norm, 1), - 'Out': np.expand_dims(expect_out, 1) + 'Out': np.expand_dims(expect_out, 1), } class TestCosSimOp3(TestCosSimOp): - def setUp(self): self.op_type = "cos_sim" self.inputs = { 'X': np.random.random((6, 5, 4)).astype("float32"), - 'Y': np.random.random((6, 5, 4)).astype("float32") + 'Y': np.random.random((6, 5, 4)).astype("float32"), } expect_x_norm = np.linalg.norm(self.inputs['X'], axis=(1, 2)) expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=(1, 2)) - expect_out = (self.inputs['X'] * self.inputs['Y']).sum(axis=(1, 2)) / \ - expect_x_norm / expect_y_norm + expect_out = ( + (self.inputs['X'] * self.inputs['Y']).sum(axis=(1, 2)) + / expect_x_norm + / expect_y_norm + ) self.outputs = { 'XNorm': np.expand_dims(expect_x_norm, 1), 'YNorm': np.expand_dims(expect_y_norm, 1), - 'Out': np.expand_dims(expect_out, 1) + 'Out': np.expand_dims(expect_out, 1), } class TestCosSimOp4(TestCosSimOp): - def setUp(self): self.op_type = "cos_sim" self.inputs = { 'X': np.random.random((6, 5, 20)).astype("float32"), - 'Y': np.random.random((1, 5, 20)).astype("float32") + 'Y': np.random.random((1, 5, 20)).astype("float32"), } expect_x_norm = np.linalg.norm(self.inputs['X'], axis=(1, 2)) expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=(1, 2)) - expect_out = (self.inputs['X'] * self.inputs['Y']).sum(axis=(1, 2)) / \ - expect_x_norm / expect_y_norm + expect_out = ( + (self.inputs['X'] * self.inputs['Y']).sum(axis=(1, 2)) + / expect_x_norm + / expect_y_norm + ) self.outputs = { 'XNorm': np.expand_dims(expect_x_norm, 1), 'YNorm': np.expand_dims(expect_y_norm, 1), - 'Out': np.expand_dims(expect_out, 1) + 'Out': np.expand_dims(expect_out, 1), } class TestCosSimOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # the input of batch_norm must be Variable. - x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.CPUPlace()) - x2 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + ) + x2 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + ) self.assertRaises(TypeError, fluid.layers.cos_sim, x1, x2) # the input dtype of batch_norm must be float32 diff --git a/python/paddle/fluid/tests/unittests/test_cosine_embedding_loss.py b/python/paddle/fluid/tests/unittests/test_cosine_embedding_loss.py index 765ed8a67ac72e4352cc7fb7f949cce37380e1b8..784e583b4f5f83f113e9533547b2a2bbdffbf2ff 100644 --- a/python/paddle/fluid/tests/unittests/test_cosine_embedding_loss.py +++ b/python/paddle/fluid/tests/unittests/test_cosine_embedding_loss.py @@ -39,7 +39,6 @@ def cosine_embedding_loss(input1, input2, label, margin=0.5, reduction='mean'): class TestFunctionCosineEmbeddingLoss(unittest.TestCase): - def setUp(self): self.input1_np = np.random.random(size=(5, 3)).astype(np.float64) self.input2_np = np.random.random(size=(5, 3)).astype(np.float64) @@ -52,43 +51,43 @@ class TestFunctionCosineEmbeddingLoss(unittest.TestCase): input1 = paddle.to_tensor(self.input1_np) input2 = paddle.to_tensor(self.input2_np) label = paddle.to_tensor(self.label_np) - dy_result = paddle.nn.functional.cosine_embedding_loss(input1, - input2, - label, - margin=0.5, - reduction='mean') - expected1 = cosine_embedding_loss(self.input1_np, - self.input2_np, - self.label_np, - margin=0.5, - reduction='mean') + dy_result = paddle.nn.functional.cosine_embedding_loss( + input1, input2, label, margin=0.5, reduction='mean' + ) + expected1 = cosine_embedding_loss( + self.input1_np, + self.input2_np, + self.label_np, + margin=0.5, + reduction='mean', + ) np.testing.assert_allclose(dy_result.numpy(), expected1, rtol=1e-05) self.assertTrue(dy_result.shape, [1]) - dy_result = paddle.nn.functional.cosine_embedding_loss(input1, - input2, - label, - margin=0.5, - reduction='sum') - expected2 = cosine_embedding_loss(self.input1_np, - self.input2_np, - self.label_np, - margin=0.5, - reduction='sum') + dy_result = paddle.nn.functional.cosine_embedding_loss( + input1, input2, label, margin=0.5, reduction='sum' + ) + expected2 = cosine_embedding_loss( + self.input1_np, + self.input2_np, + self.label_np, + margin=0.5, + reduction='sum', + ) np.testing.assert_allclose(dy_result.numpy(), expected2, rtol=1e-05) self.assertTrue(dy_result.shape, [1]) - dy_result = paddle.nn.functional.cosine_embedding_loss(input1, - input2, - label, - margin=0.5, - reduction='none') - expected3 = cosine_embedding_loss(self.input1_np, - self.input2_np, - self.label_np, - margin=0.5, - reduction='none') + dy_result = paddle.nn.functional.cosine_embedding_loss( + input1, input2, label, margin=0.5, reduction='none' + ) + expected3 = cosine_embedding_loss( + self.input1_np, + self.input2_np, + self.label_np, + margin=0.5, + reduction='none', + ) np.testing.assert_allclose(dy_result.numpy(), expected3, rtol=1e-05) self.assertTrue(dy_result.shape, [5]) @@ -97,50 +96,52 @@ class TestFunctionCosineEmbeddingLoss(unittest.TestCase): input1 = static.data(name='input1', shape=[5, 3], dtype='float64') input2 = static.data(name='input2', shape=[5, 3], dtype='float64') label = static.data(name='label', shape=[5], dtype='int32') - result0 = paddle.nn.functional.cosine_embedding_loss(input1, - input2, - label, - margin=0.5, - reduction='none') - result1 = paddle.nn.functional.cosine_embedding_loss(input1, - input2, - label, - margin=0.5, - reduction='sum') - result2 = paddle.nn.functional.cosine_embedding_loss(input1, - input2, - label, - margin=0.5, - reduction='mean') + result0 = paddle.nn.functional.cosine_embedding_loss( + input1, input2, label, margin=0.5, reduction='none' + ) + result1 = paddle.nn.functional.cosine_embedding_loss( + input1, input2, label, margin=0.5, reduction='sum' + ) + result2 = paddle.nn.functional.cosine_embedding_loss( + input1, input2, label, margin=0.5, reduction='mean' + ) place = paddle.CUDAPlace(0) if use_gpu else paddle.CPUPlace() exe = static.Executor(place) exe.run(static.default_startup_program()) - static_result = exe.run(feed={ - "input1": self.input1_np, - "input2": self.input2_np, - "label": self.label_np - }, - fetch_list=[result0, result1, result2]) - expected = cosine_embedding_loss(self.input1_np, - self.input2_np, - self.label_np, - margin=0.5, - reduction='none') + static_result = exe.run( + feed={ + "input1": self.input1_np, + "input2": self.input2_np, + "label": self.label_np, + }, + fetch_list=[result0, result1, result2], + ) + expected = cosine_embedding_loss( + self.input1_np, + self.input2_np, + self.label_np, + margin=0.5, + reduction='none', + ) np.testing.assert_allclose(static_result[0], expected, rtol=1e-05) - expected = cosine_embedding_loss(self.input1_np, - self.input2_np, - self.label_np, - margin=0.5, - reduction='sum') + expected = cosine_embedding_loss( + self.input1_np, + self.input2_np, + self.label_np, + margin=0.5, + reduction='sum', + ) np.testing.assert_allclose(static_result[1], expected, rtol=1e-05) - expected = cosine_embedding_loss(self.input1_np, - self.input2_np, - self.label_np, - margin=0.5, - reduction='mean') + expected = cosine_embedding_loss( + self.input1_np, + self.input2_np, + self.label_np, + margin=0.5, + reduction='mean', + ) np.testing.assert_allclose(static_result[2], expected, rtol=1e-05) @@ -171,72 +172,62 @@ class TestFunctionCosineEmbeddingLoss(unittest.TestCase): def test_label_shape_error(): label = paddle.to_tensor( - np.random.randint(low=0, high=2, size=(2, 3))) - paddle.nn.functional.cosine_embedding_loss(input1, - input2, - label, - margin=0.5, - reduction='mean') + np.random.randint(low=0, high=2, size=(2, 3)) + ) + paddle.nn.functional.cosine_embedding_loss( + input1, input2, label, margin=0.5, reduction='mean' + ) self.assertRaises(ValueError, test_label_shape_error) def test_input_different_shape_error(): input1 = paddle.to_tensor(self.input1_np[0]) label = paddle.to_tensor(np.ndarray([1])) - paddle.nn.functional.cosine_embedding_loss(input1, - input2, - label, - margin=0.5, - reduction='mean') + paddle.nn.functional.cosine_embedding_loss( + input1, input2, label, margin=0.5, reduction='mean' + ) self.assertRaises(ValueError, test_input_different_shape_error) def test_input_shape2D_error(): input1 = paddle.to_tensor( - np.random.random(size=(2, 3, 4)).astype(np.float64)) + np.random.random(size=(2, 3, 4)).astype(np.float64) + ) input2 = paddle.to_tensor( - np.random.random(size=(2, 3, 4)).astype(np.float64)) - paddle.nn.functional.cosine_embedding_loss(input1, - input2, - label, - margin=0.5, - reduction='mean') + np.random.random(size=(2, 3, 4)).astype(np.float64) + ) + paddle.nn.functional.cosine_embedding_loss( + input1, input2, label, margin=0.5, reduction='mean' + ) self.assertRaises(ValueError, test_input_shape2D_error) def test_label_value_error(): label = paddle.to_tensor(np.ndarray([-1, -2])) - paddle.nn.functional.cosine_embedding_loss(input1, - input2, - label, - margin=0.5, - reduction='mean') + paddle.nn.functional.cosine_embedding_loss( + input1, input2, label, margin=0.5, reduction='mean' + ) self.assertRaises(ValueError, test_label_value_error) def test_input_type_error(): input1 = paddle.to_tensor(self.input1_np.astype(np.int64)) - paddle.nn.functional.cosine_embedding_loss(input1, - input2, - label, - margin=0.5, - reduction='mean') + paddle.nn.functional.cosine_embedding_loss( + input1, input2, label, margin=0.5, reduction='mean' + ) self.assertRaises(ValueError, test_input_type_error) def test_label_type_error(): label = paddle.to_tensor(self.label_np.astype(np.int16)) - paddle.nn.functional.cosine_embedding_loss(input1, - input2, - label, - margin=0.5, - reduction='mean') + paddle.nn.functional.cosine_embedding_loss( + input1, input2, label, margin=0.5, reduction='mean' + ) self.assertRaises(ValueError, test_label_type_error) class TestClassCosineEmbeddingLoss(unittest.TestCase): - def setUp(self): self.input1_np = np.random.random(size=(10, 3)).astype(np.float32) self.input2_np = np.random.random(size=(10, 3)).astype(np.float32) @@ -252,14 +243,17 @@ class TestClassCosineEmbeddingLoss(unittest.TestCase): input1 = paddle.to_tensor(self.input1_np) input2 = paddle.to_tensor(self.input2_np) label = paddle.to_tensor(self.label_np) - CosineEmbeddingLoss = paddle.nn.CosineEmbeddingLoss(margin=0.5, - reduction='mean') + CosineEmbeddingLoss = paddle.nn.CosineEmbeddingLoss( + margin=0.5, reduction='mean' + ) dy_result = CosineEmbeddingLoss(input1, input2, label) - expected1 = cosine_embedding_loss(self.input1_np, - self.input2_np, - self.label_np, - margin=0.5, - reduction='mean') + expected1 = cosine_embedding_loss( + self.input1_np, + self.input2_np, + self.label_np, + margin=0.5, + reduction='mean', + ) np.testing.assert_allclose(dy_result.numpy(), expected1, rtol=1e-05) self.assertTrue(dy_result.shape, [1]) @@ -267,35 +261,42 @@ class TestClassCosineEmbeddingLoss(unittest.TestCase): input2_1D = paddle.to_tensor(self.input2_np_1D) label_1D = paddle.to_tensor(self.label_np_1D) dy_result = CosineEmbeddingLoss(input1_1D, input2_1D, label_1D) - expected2 = cosine_embedding_loss(self.input1_np_1D, - self.input2_np_1D, - self.label_np_1D, - margin=0.5, - reduction='mean') + expected2 = cosine_embedding_loss( + self.input1_np_1D, + self.input2_np_1D, + self.label_np_1D, + margin=0.5, + reduction='mean', + ) np.testing.assert_allclose(dy_result.numpy(), expected2, rtol=1e-05) def run_static(self): input1 = static.data(name='input1', shape=[10, 3], dtype='float32') input2 = static.data(name='input2', shape=[10, 3], dtype='float32') label = static.data(name='label', shape=[10], dtype='int64') - CosineEmbeddingLoss = paddle.nn.CosineEmbeddingLoss(margin=0.5, - reduction='mean') + CosineEmbeddingLoss = paddle.nn.CosineEmbeddingLoss( + margin=0.5, reduction='mean' + ) result = CosineEmbeddingLoss(input1, input2, label) place = paddle.CPUPlace() exe = static.Executor(place) exe.run(static.default_startup_program()) - static_result = exe.run(feed={ - "input1": self.input1_np, - "input2": self.input2_np, - "label": self.label_np - }, - fetch_list=[result]) - expected = cosine_embedding_loss(self.input1_np, - self.input2_np, - self.label_np, - margin=0.5, - reduction='mean') + static_result = exe.run( + feed={ + "input1": self.input1_np, + "input2": self.input2_np, + "label": self.label_np, + }, + fetch_list=[result], + ) + expected = cosine_embedding_loss( + self.input1_np, + self.input2_np, + self.label_np, + margin=0.5, + reduction='mean', + ) np.testing.assert_allclose(static_result[0], expected, rtol=1e-05) @@ -308,16 +309,17 @@ class TestClassCosineEmbeddingLoss(unittest.TestCase): self.run_static() def test_errors(self): - def test_margin_error(): CosineEmbeddingLoss = paddle.nn.CosineEmbeddingLoss( - margin=2, reduction='mean') + margin=2, reduction='mean' + ) self.assertRaises(ValueError, test_margin_error) def test_reduction_error(): CosineEmbeddingLoss = paddle.nn.CosineEmbeddingLoss( - margin=2, reduction='reduce_mean') + margin=2, reduction='reduce_mean' + ) self.assertRaises(ValueError, test_reduction_error) diff --git a/python/paddle/fluid/tests/unittests/test_cosine_similarity_api.py b/python/paddle/fluid/tests/unittests/test_cosine_similarity_api.py index db0bb0147cdeb0ca9f8009b67290122aacb980ca..1c658d0ecc45836d69b4dfcd757d738e7c761187 100644 --- a/python/paddle/fluid/tests/unittests/test_cosine_similarity_api.py +++ b/python/paddle/fluid/tests/unittests/test_cosine_similarity_api.py @@ -23,7 +23,6 @@ from paddle.fluid import Program, program_guard, Executor, default_main_program class TestCosineSimilarityAPI(unittest.TestCase): - def setUp(self): self.places = [paddle.CPUPlace()] if core.is_compiled_with_cuda(): @@ -52,12 +51,11 @@ class TestCosineSimilarityAPI(unittest.TestCase): x2 = paddle.fluid.data(name="x2", shape=shape) result = F.cosine_similarity(x1, x2, axis=axis, eps=eps) exe = Executor(place) - fetches = exe.run(default_main_program(), - feed={ - "x1": np_x1, - "x2": np_x2 - }, - fetch_list=[result]) + fetches = exe.run( + default_main_program(), + feed={"x1": np_x1, "x2": np_x2}, + fetch_list=[result], + ) np_out = self._get_numpy_out(np_x1, np_x2, axis=axis, eps=eps) np.testing.assert_allclose(fetches[0], np_out, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_cost_model.py b/python/paddle/fluid/tests/unittests/test_cost_model.py index ba9b4c3572147232debf6f254954bd6a661a52a3..92fa062ad7a8aa15b67718036812b6603ac32def 100644 --- a/python/paddle/fluid/tests/unittests/test_cost_model.py +++ b/python/paddle/fluid/tests/unittests/test_cost_model.py @@ -24,13 +24,13 @@ device = "gpu" if core.is_compiled_with_cuda() else "cpu" class TestCostModel(unittest.TestCase): - def test_profiler_measure_empty_program(self): cost_model = core.CostModel() empty_program = paddle.static.Program() startup_program = paddle.static.Program() - cost_data = cost_model.profile_measure(empty_program, startup_program, - device, ["time"]) + cost_data = cost_model.profile_measure( + empty_program, startup_program, device, ["time"] + ) self.assertEqual(cost_data.get_whole_time_ms(), 0) def test_profiler_measure_program(self): @@ -42,14 +42,16 @@ class TestCostModel(unittest.TestCase): hidden = paddle.static.nn.fc(data, 10) loss = paddle.mean(hidden) cost_model = core.CostModel() - cost_data = cost_model.profile_measure(main_program, startup_program, - device, ["time"]) + cost_data = cost_model.profile_measure( + main_program, startup_program, device, ["time"] + ) fc_op_time = cost_data.get_op_time_ms(0) mean_op_time = cost_data.get_op_time_ms(1) self.assertGreater(fc_op_time, 0) self.assertGreater(mean_op_time, 0) - self.assertGreaterEqual(cost_data.get_whole_time_ms(), - fc_op_time + mean_op_time) + self.assertGreaterEqual( + cost_data.get_whole_time_ms(), fc_op_time + mean_op_time + ) def test_static_op_benchmark_cost_model(self): op_name = "abs" @@ -70,16 +72,18 @@ class TestCostModel(unittest.TestCase): print("conv2d_op_time:", conv2d_op_time) print("conv2d_op_config:", conv2d_op_config) - conv2d_backward_op_cost = cost_model.get_static_op_time("conv2d", - forward=False) + conv2d_backward_op_cost = cost_model.get_static_op_time( + "conv2d", forward=False + ) conv2d_backward_op_time = conv2d_backward_op_cost["op_time"] conv2d_backward_op_config = conv2d_backward_op_cost["config"] self.assertGreater(float(conv2d_backward_op_time), 0) print("conv2d_backward_op_time:", conv2d_backward_op_time) print("conv2d_backward_op_config:", conv2d_backward_op_config) - conv2d_fp16_op_cost = cost_model.get_static_op_time("conv2d", - dtype="float16") + conv2d_fp16_op_cost = cost_model.get_static_op_time( + "conv2d", dtype="float16" + ) conv2d_fp16_op_time = conv2d_fp16_op_cost["op_time"] conv2d_fp16_op_config = conv2d_fp16_op_cost["config"] self.assertGreater(float(conv2d_fp16_op_time), 0) diff --git a/python/paddle/fluid/tests/unittests/test_count_nonzero_api.py b/python/paddle/fluid/tests/unittests/test_count_nonzero_api.py index 48a0f08fd4aa4f69fc9e7a94096b7518c5445a3e..55f34dd261218732ad7c89ae21457ff411577e12 100644 --- a/python/paddle/fluid/tests/unittests/test_count_nonzero_api.py +++ b/python/paddle/fluid/tests/unittests/test_count_nonzero_api.py @@ -26,8 +26,11 @@ class TestCountNonzeroAPI(unittest.TestCase): def setUp(self): self.x_shape = [2, 3, 4, 5] self.x = np.random.uniform(-1, 1, self.x_shape).astype(np.float32) - self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_api_static(self): paddle.enable_static() @@ -40,8 +43,9 @@ class TestCountNonzeroAPI(unittest.TestCase): out4 = paddle.count_nonzero(x, axis) out5 = paddle.count_nonzero(x, tuple(axis)) exe = paddle.static.Executor(self.place) - res = exe.run(feed={'X': self.x}, - fetch_list=[out1, out2, out3, out4, out5]) + res = exe.run( + feed={'X': self.x}, fetch_list=[out1, out2, out3, out4, out5] + ) out_ref = np.count_nonzero(self.x) for out in res: np.testing.assert_allclose(out, out_ref, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_cov.py b/python/paddle/fluid/tests/unittests/test_cov.py index d7fec9028ab034282684201ba49133fcbfbac6df..b00fc5b3fe21109be203d5c0d3e9a64f009689e9 100644 --- a/python/paddle/fluid/tests/unittests/test_cov.py +++ b/python/paddle/fluid/tests/unittests/test_cov.py @@ -20,15 +20,16 @@ from paddle.fluid.framework import _test_eager_guard def numpy_cov(np_arr, rowvar=True, ddof=1, fweights=None, aweights=None): - return np.cov(np_arr, - rowvar=rowvar, - ddof=int(ddof), - fweights=fweights, - aweights=aweights) + return np.cov( + np_arr, + rowvar=rowvar, + ddof=int(ddof), + fweights=fweights, + aweights=aweights, + ) class Cov_Test(unittest.TestCase): - def setUp(self): self.shape = [20, 10] self.weightshape = [10] @@ -48,16 +49,12 @@ class Cov_Test(unittest.TestCase): for dtype in typelist: np_arr = np.random.rand(*self.shape).astype(dtype) tensor = paddle.to_tensor(np_arr, place=p) - cov = paddle.linalg.cov(tensor, - rowvar=True, - ddof=True, - fweights=None, - aweights=None) - np_cov = numpy_cov(np_arr, - rowvar=True, - ddof=1, - fweights=None, - aweights=None) + cov = paddle.linalg.cov( + tensor, rowvar=True, ddof=True, fweights=None, aweights=None + ) + np_cov = numpy_cov( + np_arr, rowvar=True, ddof=1, fweights=None, aweights=None + ) np.testing.assert_allclose(np_cov, cov.numpy(), rtol=1e-05) def test_tensor_cov_default(self): @@ -80,16 +77,16 @@ class Cov_Test(unittest.TestCase): for dtype in typelist: np_arr = np.random.rand(*self.shape).astype(dtype) tensor = paddle.to_tensor(np_arr, place=p) - cov = paddle.linalg.cov(tensor, - rowvar=False, - ddof=True, - fweights=None, - aweights=None) - np_cov = numpy_cov(np_arr, - rowvar=False, - ddof=1, - fweights=None, - aweights=None) + cov = paddle.linalg.cov( + tensor, + rowvar=False, + ddof=True, + fweights=None, + aweights=None, + ) + np_cov = numpy_cov( + np_arr, rowvar=False, ddof=1, fweights=None, aweights=None + ) np.testing.assert_allclose(np_cov, cov.numpy(), rtol=1e-05) def test_tensor_cov_rowvar(self): @@ -112,16 +109,16 @@ class Cov_Test(unittest.TestCase): for dtype in typelist: np_arr = np.random.rand(*self.shape).astype(dtype) tensor = paddle.to_tensor(np_arr, place=p) - cov = paddle.linalg.cov(tensor, - rowvar=True, - ddof=False, - fweights=None, - aweights=None) - np_cov = numpy_cov(np_arr, - rowvar=True, - ddof=0, - fweights=None, - aweights=None) + cov = paddle.linalg.cov( + tensor, + rowvar=True, + ddof=False, + fweights=None, + aweights=None, + ) + np_cov = numpy_cov( + np_arr, rowvar=True, ddof=0, fweights=None, aweights=None + ) np.testing.assert_allclose(np_cov, cov.numpy(), rtol=1e-05) def test_tensor_cov_ddof(self): @@ -143,20 +140,21 @@ class Cov_Test(unittest.TestCase): for dtype in typelist: np_arr = np.random.rand(*self.shape).astype(dtype) - np_fw = np.random.randint(10, - size=self.weightshape).astype('int32') + np_fw = np.random.randint(10, size=self.weightshape).astype( + 'int32' + ) tensor = paddle.to_tensor(np_arr, place=p) fweights = paddle.to_tensor(np_fw, place=p) - cov = paddle.linalg.cov(tensor, - rowvar=True, - ddof=True, - fweights=fweights, - aweights=None) - np_cov = numpy_cov(np_arr, - rowvar=True, - ddof=1, - fweights=np_fw, - aweights=None) + cov = paddle.linalg.cov( + tensor, + rowvar=True, + ddof=True, + fweights=fweights, + aweights=None, + ) + np_cov = numpy_cov( + np_arr, rowvar=True, ddof=1, fweights=np_fw, aweights=None + ) np.testing.assert_allclose(np_cov, cov.numpy(), rtol=1e-05) def test_tensor_cov_fweights(self): @@ -178,20 +176,21 @@ class Cov_Test(unittest.TestCase): for dtype in typelist: np_arr = np.random.rand(*self.shape).astype(dtype) - np_aw = np.random.randint(10, - size=self.weightshape).astype('int32') + np_aw = np.random.randint(10, size=self.weightshape).astype( + 'int32' + ) tensor = paddle.to_tensor(np_arr, place=p) aweights = paddle.to_tensor(np_aw, place=p) - cov = paddle.linalg.cov(tensor, - rowvar=True, - ddof=True, - fweights=None, - aweights=aweights) - np_cov = numpy_cov(np_arr, - rowvar=True, - ddof=1, - fweights=None, - aweights=np_aw) + cov = paddle.linalg.cov( + tensor, + rowvar=True, + ddof=True, + fweights=None, + aweights=aweights, + ) + np_cov = numpy_cov( + np_arr, rowvar=True, ddof=1, fweights=None, aweights=np_aw + ) np.testing.assert_allclose(np_cov, cov.numpy(), rtol=1e-05) def test_tensor_cov_aweights(self): @@ -213,22 +212,23 @@ class Cov_Test(unittest.TestCase): for dtype in typelist: np_arr = np.random.rand(*self.shape).astype(dtype) - np_fw = np.random.randint(10, - size=self.weightshape).astype('int64') + np_fw = np.random.randint(10, size=self.weightshape).astype( + 'int64' + ) np_aw = np.random.rand(*self.weightshape).astype('float64') tensor = paddle.to_tensor(np_arr, place=p) fweights = paddle.to_tensor(np_fw, place=p) aweights = paddle.to_tensor(np_aw, place=p) - cov = paddle.linalg.cov(tensor, - rowvar=True, - ddof=True, - fweights=fweights, - aweights=aweights) - np_cov = numpy_cov(np_arr, - rowvar=True, - ddof=1, - fweights=np_fw, - aweights=np_aw) + cov = paddle.linalg.cov( + tensor, + rowvar=True, + ddof=True, + fweights=fweights, + aweights=aweights, + ) + np_cov = numpy_cov( + np_arr, rowvar=True, ddof=1, fweights=np_fw, aweights=np_aw + ) np.testing.assert_allclose(np_cov, cov.numpy(), rtol=1e-05) def test_tensor_cov_weights(self): @@ -238,7 +238,6 @@ class Cov_Test(unittest.TestCase): class Cov_Test2(Cov_Test): - def setUp(self): self.shape = [10] self.weightshape = [10] @@ -246,30 +245,32 @@ class Cov_Test2(Cov_Test): # Input(x) only support N-D (1<=N<=2) tensor class Cov_Test3(unittest.TestCase): - def setUp(self): self.shape = [2, 5, 10] self.fweightshape = [10] self.aweightshape = [10] - self.fw_s = 1. - self.aw_s = 1. + self.fw_s = 1.0 + self.aw_s = 1.0 def func_test_errors(self): - def test_err(): np_arr = np.random.rand(*self.shape).astype('float64') - np_fw = self.fw_s * np.random.rand( - *self.fweightshape).astype('int32') - np_aw = self.aw_s * np.random.rand( - *self.aweightshape).astype('float64') + np_fw = self.fw_s * np.random.rand(*self.fweightshape).astype( + 'int32' + ) + np_aw = self.aw_s * np.random.rand(*self.aweightshape).astype( + 'float64' + ) tensor = paddle.to_tensor(np_arr) fweights = paddle.to_tensor(np_fw) aweights = paddle.to_tensor(np_aw) - cov = paddle.linalg.cov(tensor, - rowvar=True, - ddof=True, - fweights=fweights, - aweights=aweights) + cov = paddle.linalg.cov( + tensor, + rowvar=True, + ddof=True, + fweights=fweights, + aweights=aweights, + ) self.assertRaises(ValueError, test_err) @@ -279,70 +280,64 @@ class Cov_Test3(unittest.TestCase): self.func_test_errors() -#Input(fweights) only support N-D (N<=1) tensor +# Input(fweights) only support N-D (N<=1) tensor class Cov_Test4(Cov_Test3): - def setUp(self): self.shape = [5, 10] self.fweightshape = [2, 10] self.aweightshape = [10] - self.fw_s = 1. - self.aw_s = 1. + self.fw_s = 1.0 + self.aw_s = 1.0 -#The number of Input(fweights) should equal to x's dim[1] +# The number of Input(fweights) should equal to x's dim[1] class Cov_Test5(Cov_Test3): - def setUp(self): self.shape = [5, 10] self.fweightshape = [5] self.aweightshape = [10] - self.fw_s = 1. - self.aw_s = 1. + self.fw_s = 1.0 + self.aw_s = 1.0 -#The value of Input(fweights) cannot be negtive +# The value of Input(fweights) cannot be negtive class Cov_Test6(Cov_Test3): - def setUp(self): self.shape = [5, 10] self.fweightshape = [10] self.aweightshape = [10] - self.fw_s = -1. - self.aw_s = 1. + self.fw_s = -1.0 + self.aw_s = 1.0 -#Input(aweights) only support N-D (N<=1) tensor +# Input(aweights) only support N-D (N<=1) tensor class Cov_Test7(Cov_Test3): - def setUp(self): self.shape = [5, 10] self.fweightshape = [10] self.aweightshape = [2, 10] - self.fw_s = 1. - self.aw_s = 1. + self.fw_s = 1.0 + self.aw_s = 1.0 -#The number of Input(aweights) should equal to x's dim[1] +# The number of Input(aweights) should equal to x's dim[1] class Cov_Test8(Cov_Test3): - def setUp(self): self.shape = [5, 10] self.fweightshape = [10] self.aweightshape = [5] - self.fw_s = 1. - self.aw_s = 1. + self.fw_s = 1.0 + self.aw_s = 1.0 -#The value of Input(aweights) cannot be negtive +# The value of Input(aweights) cannot be negtive class Cov_Test9(Cov_Test3): - def setUp(self): self.shape = [5, 10] self.fweightshape = [10] self.aweightshape = [10] - self.fw_s = 1. - self.aw_s = -1. + self.fw_s = 1.0 + self.aw_s = -1.0 if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_cpuonly_spawn.py b/python/paddle/fluid/tests/unittests/test_cpuonly_spawn.py index 3f5fadff3af7c8df9b85100451cbf2f49cc78807..c0e6d173f5771b28489624552233dd27d4babab3 100644 --- a/python/paddle/fluid/tests/unittests/test_cpuonly_spawn.py +++ b/python/paddle/fluid/tests/unittests/test_cpuonly_spawn.py @@ -21,7 +21,6 @@ import paddle.distributed as dist class LinearNet(nn.Layer): - def __init__(self): super(LinearNet, self).__init__() self._linear1 = nn.Linear(10, 10) @@ -58,7 +57,6 @@ def train(print_result=False): class TestSpawn(unittest.TestCase): - def test_spawn(self): dist.spawn(train, backend='gloo', nprocs=4) diff --git a/python/paddle/fluid/tests/unittests/test_create_global_var.py b/python/paddle/fluid/tests/unittests/test_create_global_var.py index 6a9f188b363195441ff6ec594e871e879aec7408..45dc1e5202285364ed40e847e3459963d8cd2e80 100644 --- a/python/paddle/fluid/tests/unittests/test_create_global_var.py +++ b/python/paddle/fluid/tests/unittests/test_create_global_var.py @@ -19,7 +19,6 @@ from paddle.fluid import Program, program_guard class TestCreateGlobalVarError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): diff --git a/python/paddle/fluid/tests/unittests/test_create_op_doc_string.py b/python/paddle/fluid/tests/unittests/test_create_op_doc_string.py index af9b162e9093985cda8f41c55c750febd992d3d3..07c89eefc32fab37ce093e91d96fbe4471ecddc6 100644 --- a/python/paddle/fluid/tests/unittests/test_create_op_doc_string.py +++ b/python/paddle/fluid/tests/unittests/test_create_op_doc_string.py @@ -17,7 +17,6 @@ import paddle.fluid.layers as layers class TestDocString(unittest.TestCase): - def test_layer_doc_string(self): print(layers.dropout.__doc__) diff --git a/python/paddle/fluid/tests/unittests/test_create_parameter.py b/python/paddle/fluid/tests/unittests/test_create_parameter.py index 53260374c5d20d06fdb5837477a8d7fa9e3b3781..d70e06f184356ec8dc547b56531f27456b2ded5f 100644 --- a/python/paddle/fluid/tests/unittests/test_create_parameter.py +++ b/python/paddle/fluid/tests/unittests/test_create_parameter.py @@ -20,7 +20,6 @@ import paddle class TestCreateParameterError(unittest.TestCase): - def func_errors(self): paddle.enable_static() with program_guard(Program(), Program()): @@ -36,18 +35,18 @@ class TestCreateParameterError(unittest.TestCase): self.assertRaises(TypeError, test_shape_item) def test_attr(): - fluid.layers.create_parameter([1, 2, 3], - np.float32, - attr=np.array( - [i for i in range(6)])) + fluid.layers.create_parameter( + [1, 2, 3], np.float32, attr=np.array([i for i in range(6)]) + ) self.assertRaises(TypeError, test_attr) def test_default_initializer(): - fluid.layers.create_parameter([1, 2, 3], - np.float32, - default_initializer=np.array( - [i for i in range(6)])) + fluid.layers.create_parameter( + [1, 2, 3], + np.float32, + default_initializer=np.array([i for i in range(6)]), + ) self.assertRaises(TypeError, test_default_initializer) diff --git a/python/paddle/fluid/tests/unittests/test_crf_decoding_op.py b/python/paddle/fluid/tests/unittests/test_crf_decoding_op.py index 2dc0853f7190c0b38554bd4ac6c40292ce1418fc..8336bad6de1bf447df029ba882842b62cf06220a 100644 --- a/python/paddle/fluid/tests/unittests/test_crf_decoding_op.py +++ b/python/paddle/fluid/tests/unittests/test_crf_decoding_op.py @@ -20,10 +20,10 @@ from op_test import OpTest class CRFDecoding(object): - - def __init__(self, emission_weights, transition_weights, - seq_start_positions): - assert (emission_weights.shape[0] == sum(seq_start_positions)) + def __init__( + self, emission_weights, transition_weights, seq_start_positions + ): + assert emission_weights.shape[0] == sum(seq_start_positions) self.tag_num = emission_weights.shape[1] self.seq_num = len(seq_start_positions) @@ -34,10 +34,12 @@ class CRFDecoding(object): self.b = transition_weights[1, :] self.w = transition_weights[2:, :] - self.track = np.zeros((sum(seq_start_positions), self.tag_num), - dtype="int64") - self.decoded_path = np.zeros((sum(seq_start_positions), 1), - dtype="int64") + self.track = np.zeros( + (sum(seq_start_positions), self.tag_num), dtype="int64" + ) + self.decoded_path = np.zeros( + (sum(seq_start_positions), 1), dtype="int64" + ) def _decode_one_sequence(self, decoded_path, x): seq_len, tag_num = x.shape @@ -77,8 +79,9 @@ class CRFDecoding(object): start = cur_pos cur_pos += self.seq_start_positions[i] end = cur_pos - self._decode_one_sequence(self.decoded_path[start:end, :], - self.x[start:end, :]) + self._decode_one_sequence( + self.decoded_path[start:end, :], self.x[start:end, :] + ) return self.decoded_path @@ -98,10 +101,12 @@ class TestCRFDecodingOp1(OpTest): for i in range(SEQ_NUM): lod[-1].append(random.randint(1, MAX_SEQ_LEN)) total_len += lod[-1][-1] - emission = np.random.uniform(-1, 1, - [total_len, TAG_NUM]).astype("float64") - transition = np.random.uniform(-0.5, 0.5, - [TAG_NUM + 2, TAG_NUM]).astype("float64") + emission = np.random.uniform(-1, 1, [total_len, TAG_NUM]).astype( + "float64" + ) + transition = np.random.uniform( + -0.5, 0.5, [TAG_NUM + 2, TAG_NUM] + ).astype("float64") self.inputs = { "Emission": (emission, lod), @@ -136,27 +141,29 @@ class TestCRFDecodingOp2(OpTest): self.init_lod() total_len = sum(self.lod[-1]) - transition = np.repeat(np.arange(TAG_NUM, - dtype="float64").reshape(1, TAG_NUM), - TAG_NUM + 2, - axis=0) - emission = np.repeat(np.arange(TAG_NUM, - dtype="float64").reshape(1, TAG_NUM), - total_len, - axis=0) - - labels = np.random.randint(low=0, - high=TAG_NUM, - size=(total_len, 1), - dtype="int64") - predicted_labels = np.ones( - (total_len, 1), dtype="int64") * (TAG_NUM - 1) + transition = np.repeat( + np.arange(TAG_NUM, dtype="float64").reshape(1, TAG_NUM), + TAG_NUM + 2, + axis=0, + ) + emission = np.repeat( + np.arange(TAG_NUM, dtype="float64").reshape(1, TAG_NUM), + total_len, + axis=0, + ) + + labels = np.random.randint( + low=0, high=TAG_NUM, size=(total_len, 1), dtype="int64" + ) + predicted_labels = np.ones((total_len, 1), dtype="int64") * ( + TAG_NUM - 1 + ) expected_output = (labels == predicted_labels).astype("int64") self.inputs = { "Emission": (emission, self.lod), "Transition": transition, - "Label": (labels, self.lod) + "Label": (labels, self.lod), } self.outputs = {"ViterbiPath": expected_output} @@ -166,13 +173,11 @@ class TestCRFDecodingOp2(OpTest): class TestCRFDecodingOp3(TestCRFDecodingOp2): - def init_lod(self): self.lod = [[1, 0, 0, 4]] class TestCRFDecodingOp4(TestCRFDecodingOp2): - def init_lod(self): self.lod = [[0, 2, 3, 0]] @@ -183,7 +188,7 @@ def seq_pad(data, length): padded = np.zeros(shape).astype(data.dtype) offset = 0 for i, l in enumerate(length): - padded[i, 0:l] = data[offset:offset + l] + padded[i, 0:l] = data[offset : offset + l] offset += l return np.squeeze(padded) @@ -204,10 +209,12 @@ class TestCRFDecodingOp5(OpTest): for i in range(SEQ_NUM): lod[-1].append(random.randint(1, MAX_SEQ_LEN)) total_len += lod[-1][-1] - emission = np.random.uniform(-1, 1, - [total_len, TAG_NUM]).astype("float64") - transition = np.random.uniform(-0.5, 0.5, - [TAG_NUM + 2, TAG_NUM]).astype("float64") + emission = np.random.uniform(-1, 1, [total_len, TAG_NUM]).astype( + "float64" + ) + transition = np.random.uniform( + -0.5, 0.5, [TAG_NUM + 2, TAG_NUM] + ).astype("float64") self.inputs = { "Emission": seq_pad(emission, lod[0]), @@ -229,7 +236,6 @@ class TestCRFDecodingOp5(OpTest): class TestCRFDecodingOp6(OpTest): - def init_lod(self): self.lod = [[1, 2, 3, 4]] @@ -239,21 +245,23 @@ class TestCRFDecodingOp6(OpTest): self.init_lod() total_len = sum(self.lod[-1]) - transition = np.repeat(np.arange(TAG_NUM, - dtype="float64").reshape(1, TAG_NUM), - TAG_NUM + 2, - axis=0) - emission = np.repeat(np.arange(TAG_NUM, - dtype="float64").reshape(1, TAG_NUM), - total_len, - axis=0) - - labels = np.random.randint(low=0, - high=TAG_NUM, - size=(total_len, 1), - dtype="int64") - predicted_labels = np.ones( - (total_len, 1), dtype="int64") * (TAG_NUM - 1) + transition = np.repeat( + np.arange(TAG_NUM, dtype="float64").reshape(1, TAG_NUM), + TAG_NUM + 2, + axis=0, + ) + emission = np.repeat( + np.arange(TAG_NUM, dtype="float64").reshape(1, TAG_NUM), + total_len, + axis=0, + ) + + labels = np.random.randint( + low=0, high=TAG_NUM, size=(total_len, 1), dtype="int64" + ) + predicted_labels = np.ones((total_len, 1), dtype="int64") * ( + TAG_NUM - 1 + ) expected_output = (labels == predicted_labels).astype("int64") self.inputs = { diff --git a/python/paddle/fluid/tests/unittests/test_crop_op.py b/python/paddle/fluid/tests/unittests/test_crop_op.py index 32e31e5366fdf93940f1103b79632f85bc87a6d5..f495be8cbefaee5817e054ab9d72f89c85d94b80 100644 --- a/python/paddle/fluid/tests/unittests/test_crop_op.py +++ b/python/paddle/fluid/tests/unittests/test_crop_op.py @@ -20,7 +20,6 @@ import paddle.fluid as fluid def crop(data, offsets, crop_shape): - def indexOf(shape, index): result = [] for dim in reversed(shape): @@ -34,15 +33,17 @@ def crop(data, offsets, crop_shape): selected = True if len(index) == len(offsets): for j, offset in enumerate(offsets): - selected = selected and index[j] >= offset and index[ - j] < crop_shape[j] + offset + selected = ( + selected + and index[j] >= offset + and index[j] < crop_shape[j] + offset + ) if selected: result.append(value) return np.array(result).reshape(crop_shape) class TestCropOp(OpTest): - def setUp(self): self.op_type = "crop" self.crop_by_input = False @@ -52,7 +53,7 @@ class TestCropOp(OpTest): if self.crop_by_input: self.inputs = { 'X': np.random.random(self.x_shape).astype("float64"), - 'Y': np.random.random(self.crop_shape).astype("float64") + 'Y': np.random.random(self.crop_shape).astype("float64"), } else: self.attrs['shape'] = self.crop_shape @@ -85,7 +86,6 @@ class TestCropOp(OpTest): class TestCase1(TestCropOp): - def initTestCase(self): self.x_shape = (16, 8, 32) self.crop_shape = [2, 2, 3] @@ -93,7 +93,6 @@ class TestCase1(TestCropOp): class TestCase2(TestCropOp): - def initTestCase(self): self.x_shape = (15, 8) self.crop_shape = [15, 8] @@ -101,7 +100,6 @@ class TestCase2(TestCropOp): class TestCase3(TestCropOp): - def initTestCase(self): self.x_shape = (4, 8, 16) self.crop_shape = [2, 2, 3] @@ -110,7 +108,6 @@ class TestCase3(TestCropOp): class TestCase4(TestCropOp): - def initTestCase(self): self.x_shape = (10, 10) self.crop_shape = [10, 10] @@ -119,7 +116,6 @@ class TestCase4(TestCropOp): class TestCase5(TestCropOp): - def initTestCase(self): self.x_shape = (3, 4, 10) self.crop_shape = [2, 2, 3] @@ -128,7 +124,6 @@ class TestCase5(TestCropOp): class TestCase6(TestCropOp): - def initTestCase(self): self.x_shape = (10, 9, 14) self.crop_shape = [3, 3, 5] @@ -138,7 +133,6 @@ class TestCase6(TestCropOp): class TestCropNoneOffset(unittest.TestCase): - def test_crop_none_offset(self): x = fluid.data(name="input1", shape=[3, 6, 6], dtype="float32") crop_shape = [2, 2, 2] @@ -147,7 +141,6 @@ class TestCropNoneOffset(unittest.TestCase): class TestCropNoneShape(unittest.TestCase): - def test_crop_none_shape(self): x = fluid.data(name="input1", shape=[3, 6, 6], dtype="float32") crop = paddle.crop(x) @@ -156,5 +149,6 @@ class TestCropNoneShape(unittest.TestCase): if __name__ == '__main__': import paddle + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_crop_tensor_op.py b/python/paddle/fluid/tests/unittests/test_crop_tensor_op.py index 8af23c6be5ee66f0b172e1bcd1497088047f0858..aeb0cfb6414a88043414980d7cdc725502c80ff3 100644 --- a/python/paddle/fluid/tests/unittests/test_crop_tensor_op.py +++ b/python/paddle/fluid/tests/unittests/test_crop_tensor_op.py @@ -20,7 +20,6 @@ import paddle.fluid as fluid def crop(data, offsets, crop_shape): - def indexOf(shape, index): result = [] for dim in reversed(shape): @@ -34,15 +33,17 @@ def crop(data, offsets, crop_shape): selected = True if len(index) == len(offsets): for j, offset in enumerate(offsets): - selected = selected and index[j] >= offset and index[ - j] < crop_shape[j] + offset + selected = ( + selected + and index[j] >= offset + and index[j] < crop_shape[j] + offset + ) if selected: result.append(value) return np.array(result).reshape(crop_shape) class TestCropTensorOp(OpTest): - def setUp(self): self.op_type = "crop_tensor" self.shape_by_input = False @@ -55,7 +56,7 @@ class TestCropTensorOp(OpTest): if self.shape_by_input: self.inputs = { 'X': np.random.random(self.x_shape).astype("float64"), - 'Shape': np.array(self.crop_shape).astype("int32") + 'Shape': np.array(self.crop_shape).astype("int32"), } else: self.attrs['shape'] = self.crop_shape @@ -86,15 +87,13 @@ class TestCropTensorOp(OpTest): class TestCase1(TestCropTensorOp): - def initTestCase(self): - self.x_shape = (100) + self.x_shape = 100 self.crop_shape = [64] self.offsets = [13] class TestCase2(TestCropTensorOp): - def initTestCase(self): self.x_shape = (12, 24) self.crop_shape = [-1, 8] @@ -102,7 +101,6 @@ class TestCase2(TestCropTensorOp): class TestCase3(TestCropTensorOp): - def initTestCase(self): self.x_shape = (4, 8, 16) self.crop_shape = [2, 2, 3] @@ -111,7 +109,6 @@ class TestCase3(TestCropTensorOp): class TestCase4(TestCropTensorOp): - def initTestCase(self): self.x_shape = (8, 3, 6, 6) self.crop_shape = [-1, 3, -1, 4] @@ -120,7 +117,6 @@ class TestCase4(TestCropTensorOp): class TestCase5(TestCropTensorOp): - def initTestCase(self): self.x_shape = (2, 4, 5, 8, 8) self.crop_shape = [1, 1, 2, 4, 4] @@ -129,7 +125,6 @@ class TestCase5(TestCropTensorOp): class TestCase6(TestCropTensorOp): - def initTestCase(self): self.x_shape = (2, 2, 4, 4, 4, 2) self.crop_shape = [1, 1, 4, 2, 2, 2] @@ -139,7 +134,6 @@ class TestCase6(TestCropTensorOp): class TestCropTensorOpTensorAttr(OpTest): - def setUp(self): self.op_type = "crop_tensor" self.OffsetsTensor = False @@ -151,22 +145,24 @@ class TestCropTensorOpTensorAttr(OpTest): if self.ShapeTensor: shape_tensor = [] for index, ele in enumerate(self.crop_shape): - shape_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + shape_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = { 'X': np.random.random(self.x_shape).astype("float64"), - 'ShapeTensor': shape_tensor + 'ShapeTensor': shape_tensor, } self.attrs['shape'] = self.shape_attr if self.OffsetsTensor: offsets_tensor = [] for index, ele in enumerate(self.offsets): - offsets_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + offsets_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = { 'X': np.random.random(self.x_shape).astype("float64"), - 'OffsetsTensor': offsets_tensor + 'OffsetsTensor': offsets_tensor, } self.attrs['offsets'] = self.offsets_attr @@ -192,7 +188,6 @@ class TestCropTensorOpTensorAttr(OpTest): class TestCropTensorOpTensorAttrCase1(TestCropTensorOpTensorAttr): - def initTestCase(self): self.x_shape = (16, 8, 32) self.crop_shape = [-1, -1, 3] @@ -201,7 +196,6 @@ class TestCropTensorOpTensorAttrCase1(TestCropTensorOpTensorAttr): class TestCropTensorOpTensorAttrCase2(TestCropTensorOpTensorAttr): - def initTestCase(self): self.x_shape = (4, 8, 16, 8) self.crop_shape = [2, 2, 3, 4] @@ -210,7 +204,6 @@ class TestCropTensorOpTensorAttrCase2(TestCropTensorOpTensorAttr): class TestCropTensorOpTensorAttrCase3(TestCropTensorOpTensorAttr): - def initTestCase(self): self.x_shape = (16, 8, 32) self.crop_shape = [2, 2, 3] @@ -221,7 +214,6 @@ class TestCropTensorOpTensorAttrCase3(TestCropTensorOpTensorAttr): class TestCropTensorOpTensorAttrCase4(TestCropTensorOpTensorAttr): - def initTestCase(self): self.x_shape = (16, 8, 32) self.crop_shape = [2, 2, 3] @@ -232,7 +224,6 @@ class TestCropTensorOpTensorAttrCase4(TestCropTensorOpTensorAttr): class TestCropTensorException(unittest.TestCase): - def test_exception(self): input1 = fluid.data(name="input1", shape=[2, 3, 6, 6], dtype="float32") input2 = fluid.data(name="input2", shape=[2, 3, 6, 6], dtype="float16") @@ -255,14 +246,14 @@ class TestCropTensorException(unittest.TestCase): out = paddle.crop(input1, shape=[2, 2, 3, 3], offsets=0) def attr_offsets_dtype(): - out = paddle.crop(input1, - shape=[2, 2, 3, 3], - offsets=[0, 1.0, 0, 0]) + out = paddle.crop( + input1, shape=[2, 2, 3, 3], offsets=[0, 1.0, 0, 0] + ) def attr_offsets_value(): - out = paddle.crop(input1, - shape=[2, 2, 3, 3], - offsets=[0, -1, offset, 0]) + out = paddle.crop( + input1, shape=[2, 2, 3, 3], offsets=[0, -1, offset, 0] + ) def input_dtype(): out = paddle.crop(input2, shape=[2, 2, 3, 3]) @@ -279,5 +270,6 @@ class TestCropTensorException(unittest.TestCase): if __name__ == '__main__': import paddle + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_cross_entropy2_op.py b/python/paddle/fluid/tests/unittests/test_cross_entropy2_op.py index 13fbd933899b89e2b54b50ebc1f8ee1c2a2e41e9..9b8acb884ab4581bd9b91af432f8ab3a4aa7f550 100644 --- a/python/paddle/fluid/tests/unittests/test_cross_entropy2_op.py +++ b/python/paddle/fluid/tests/unittests/test_cross_entropy2_op.py @@ -18,7 +18,6 @@ import numpy as np class CrossEntropy2OpTestBase(OpTest): - def initParameters(self): return [32, 64], 'float64', -100, False @@ -33,26 +32,33 @@ class CrossEntropy2OpTestBase(OpTest): return ret, match_x def setUp(self): - self.shape, self.dtype, self.ignore_index, self.drop_last_dim = self.initParameters( - ) + ( + self.shape, + self.dtype, + self.ignore_index, + self.drop_last_dim, + ) = self.initParameters() self.op_type = 'cross_entropy2' feature_size = int(self.shape[-1]) batch_size = int(np.prod(self.shape) / feature_size) logits = (np.random.random(size=self.shape) + 1).astype(self.dtype) - label_shape = self.shape[ - 0:-1] if self.drop_last_dim else self.shape[0:-1] + [1] - label = np.random.random_integers(low=0, - high=feature_size - 1, - size=label_shape).astype('int64') + label_shape = ( + self.shape[0:-1] if self.drop_last_dim else self.shape[0:-1] + [1] + ) + label = np.random.random_integers( + low=0, high=feature_size - 1, size=label_shape + ).astype('int64') outputs, match_x = self.calc_output( np.reshape(logits, [batch_size, feature_size]), - np.reshape(label, [batch_size, 1]), self.ignore_index) + np.reshape(label, [batch_size, 1]), + self.ignore_index, + ) self.inputs = {'X': logits, 'Label': label} out_shape = label_shape self.outputs = { 'Y': np.reshape(outputs, out_shape), 'MatchX': np.reshape(match_x, self.shape[:-1] + [1]), - 'XShape': np.zeros(shape=logits.shape, dtype=logits.dtype) + 'XShape': np.zeros(shape=logits.shape, dtype=logits.dtype), } self.attrs = {'ignore_index': self.ignore_index} @@ -60,37 +66,34 @@ class CrossEntropy2OpTestBase(OpTest): self.check_output(no_check_set=['XShape']) def test_check_grad(self): - self.check_grad(inputs_to_check=['X'], - output_names=['Y'], - no_grad_set=['XShape', 'MatchX', 'Label']) + self.check_grad( + inputs_to_check=['X'], + output_names=['Y'], + no_grad_set=['XShape', 'MatchX', 'Label'], + ) class CrossEntropy2OpTest2(CrossEntropy2OpTestBase): - def initParameters(self): return [32, 64], 'float64', 3, False class CrossEntropy2OpTest2RemoveLastDim(CrossEntropy2OpTestBase): - def initParameters(self): return [32, 64], 'float64', 3, True class CrossEntropy2OpTest3(CrossEntropy2OpTestBase): - def initParameters(self): return [4, 8, 16, 32], 'float64', -100, False class CrossEntropy2OpTest3RemoveLastDim(CrossEntropy2OpTestBase): - def initParameters(self): return [4, 8, 16, 32], 'float64', -100, True class CrossEntropy2OpTest4(CrossEntropy2OpTestBase): - def initParameters(self): return [4, 8, 16, 32], 'float64', 3, False diff --git a/python/paddle/fluid/tests/unittests/test_cross_entropy_loss.py b/python/paddle/fluid/tests/unittests/test_cross_entropy_loss.py index 4ee9b8037fec76c7fe655210bf064fe512d9a469..f3e66f9bb8dbf501cf96081304650ba56cde5574 100644 --- a/python/paddle/fluid/tests/unittests/test_cross_entropy_loss.py +++ b/python/paddle/fluid/tests/unittests/test_cross_entropy_loss.py @@ -27,11 +27,9 @@ def log_softmax(x, axis=-1): return np.log(softmax_out) -def cross_entropy_loss_1d(input, - label, - weight=None, - reduction='mean', - ignore_index=-100): +def cross_entropy_loss_1d( + input, label, weight=None, reduction='mean', ignore_index=-100 +): log_softmax_out = log_softmax(input) input_shape = log_softmax_out.shape N = input_shape[0] @@ -59,11 +57,9 @@ def cross_entropy_loss_1d(input, return out -def cross_entropy_loss_2d(input, - label, - weight=None, - reduction='mean', - ignore_index=-100): +def cross_entropy_loss_2d( + input, label, weight=None, reduction='mean', ignore_index=-100 +): log_softmax_out = log_softmax(input) input_shape = log_softmax_out.shape N = input_shape[0] @@ -81,8 +77,9 @@ def cross_entropy_loss_2d(input, continue cur_weight = weight[cur_target] if weight is not None else 1 total_weight += cur_weight - out[i][h][ - w] = -log_softmax_out[i][h][w][cur_target] * cur_weight + out[i][h][w] = ( + -log_softmax_out[i][h][w][cur_target] * cur_weight + ) if reduction == 'sum': return np.sum(out), np.array([total_weight]).astype('float64') elif reduction == 'mean': @@ -92,27 +89,20 @@ def cross_entropy_loss_2d(input, return out -def cross_entropy_soft(softmax, - label, - axis, - N, - weight=None, - reduction='mean', - ignore_index=-100): - #1.loss +def cross_entropy_soft( + softmax, label, axis, N, weight=None, reduction='mean', ignore_index=-100 +): + # 1.loss loss = cross_entropy( - softmax, - label, - True, #soft_label, - axis, - ignore_index) + softmax, label, True, axis, ignore_index # soft_label, + ) if weight is None and reduction == 'none': return loss - #2.weight + # 2.weight weighted_loss = loss - total_weight = N #for weight is None + total_weight = N # for weight is None if weight is not None: weighted_loss = np.zeros_like(loss).astype(np.float64) total_weight = 0 @@ -122,7 +112,7 @@ def cross_entropy_soft(softmax, total_weight += cur_weight weighted_loss[i] = loss[i] * cur_weight - #3.reduce + # 3.reduce if reduction == 'none': return weighted_loss @@ -136,29 +126,28 @@ def cross_entropy_soft(softmax, return weighted_loss_sum -def cross_entropy_soft_2d(softmax, - label, - axis, - N, - H, - W, - weight=None, - reduction='mean', - ignore_index=-100): - #1.loss +def cross_entropy_soft_2d( + softmax, + label, + axis, + N, + H, + W, + weight=None, + reduction='mean', + ignore_index=-100, +): + # 1.loss loss = cross_entropy( - softmax, - label, - True, #soft_label, - axis, - ignore_index) + softmax, label, True, axis, ignore_index # soft_label, + ) if weight is None and reduction == 'none': return loss - #2.weight + # 2.weight weighted_loss = loss - total_weight = N #for weight is None + total_weight = N # for weight is None if weight is not None: weighted_loss = np.zeros_like(loss).astype(np.float64) total_weight = 0 @@ -170,7 +159,7 @@ def cross_entropy_soft_2d(softmax, total_weight += cur_weight weighted_loss[i][h][w] = loss[i][h][w] * cur_weight - #3.reduce + # 3.reduce if reduction == 'none': return weighted_loss @@ -185,19 +174,20 @@ def cross_entropy_soft_2d(softmax, class CrossEntropyLoss(unittest.TestCase): - def setUp(self): - self.dtype = 'float32' if fluid.core.is_compiled_with_rocm( - ) else 'float64' + self.dtype = ( + 'float32' if fluid.core.is_compiled_with_rocm() else 'float64' + ) ###test for deprecated softmax_with_cross_entropy def test_softmax_with_cross_entropy(self): self.numeric_stable_mode = False self.soft_label = True - self.dtype = 'float32' if fluid.core.is_compiled_with_rocm( - ) else 'float64' + self.dtype = ( + 'float32' if fluid.core.is_compiled_with_rocm() else 'float64' + ) self.axis = -1 - self.ignore_index = -100 #should not be changed + self.ignore_index = -100 # should not be changed self.N = 4 self.C = 3 self.shape = [self.N, self.C] @@ -205,20 +195,24 @@ class CrossEntropyLoss(unittest.TestCase): self.reduction = 'none' self.weight = None self.logits = getattr( - self, "logits", - np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype)) + self, + "logits", + np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype), + ) softmax = np.apply_along_axis(stable_softmax, self.axis, self.logits) self.labels = np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype) self.labels /= np.sum(self.labels, axis=self.axis, keepdims=True) - expected = cross_entropy_soft(softmax, - self.labels, - self.axis, - self.N, - weight=self.weight, - reduction=self.reduction, - ignore_index=self.ignore_index) + expected = cross_entropy_soft( + softmax, + self.labels, + self.axis, + self.N, + weight=self.weight, + reduction=self.reduction, + ignore_index=self.ignore_index, + ) paddle.set_device("cpu") @@ -227,7 +221,8 @@ class CrossEntropyLoss(unittest.TestCase): fluid.dygraph.to_variable(self.logits), fluid.dygraph.to_variable(self.labels), soft_label=True, - axis=self.axis) + axis=self.axis, + ) paddle_loss_ce = paddle.nn.functional.cross_entropy( fluid.dygraph.to_variable(self.logits), @@ -235,12 +230,14 @@ class CrossEntropyLoss(unittest.TestCase): soft_label=True, axis=self.axis, weight=fluid.dygraph.to_variable(self.weight) - if self.weight is not None else None, - reduction=self.reduction) - - np.testing.assert_allclose(paddle_loss_swce.numpy(), - expected, - rtol=1e-05) + if self.weight is not None + else None, + reduction=self.reduction, + ) + + np.testing.assert_allclose( + paddle_loss_swce.numpy(), expected, rtol=1e-05 + ) np.testing.assert_allclose(paddle_loss_ce.numpy(), expected, rtol=1e-05) ###soft_label test start @@ -248,10 +245,11 @@ class CrossEntropyLoss(unittest.TestCase): def test_cross_entropy_loss_soft_1d(self): self.numeric_stable_mode = False self.soft_label = True - self.dtype = 'float32' if fluid.core.is_compiled_with_rocm( - ) else 'float64' + self.dtype = ( + 'float32' if fluid.core.is_compiled_with_rocm() else 'float64' + ) self.axis = -1 - self.ignore_index = -100 #should not be changed + self.ignore_index = -100 # should not be changed self.N = 4 self.C = 3 self.shape = [self.N, self.C] @@ -259,24 +257,28 @@ class CrossEntropyLoss(unittest.TestCase): self.reduction = 'none' self.weight = None self.logits = getattr( - self, "logits", - np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype)) + self, + "logits", + np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype), + ) softmax = np.apply_along_axis(stable_softmax, self.axis, self.logits) self.labels = np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype) self.labels /= np.sum(self.labels, axis=self.axis, keepdims=True) - expected = cross_entropy_soft(softmax, - self.labels, - self.axis, - self.N, - weight=self.weight, - reduction=self.reduction, - ignore_index=self.ignore_index) + expected = cross_entropy_soft( + softmax, + self.labels, + self.axis, + self.N, + weight=self.weight, + reduction=self.reduction, + ignore_index=self.ignore_index, + ) paddle.set_device("cpu") - #2. dygraph + # 2. dygraph paddle.disable_static() paddle_loss_none_weight = paddle.nn.functional.cross_entropy( fluid.dygraph.to_variable(self.logits), @@ -284,35 +286,43 @@ class CrossEntropyLoss(unittest.TestCase): soft_label=True, axis=self.axis, weight=fluid.dygraph.to_variable(self.weight) - if self.weight is not None else None, - reduction=self.reduction) + if self.weight is not None + else None, + reduction=self.reduction, + ) dy_ret_value = paddle_loss_none_weight.numpy() - #3. static + # 3. static paddle.enable_static() prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', - shape=[self.N, self.C], - dtype=self.dtype) - label = fluid.data(name='label', - shape=[self.N, self.C], - dtype=self.dtype) + input = fluid.data( + name='input', shape=[self.N, self.C], dtype=self.dtype + ) + label = fluid.data( + name='label', shape=[self.N, self.C], dtype=self.dtype + ) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - reduction=self.reduction, soft_label=True) + reduction=self.reduction, soft_label=True + ) ret = cross_entropy_loss(input, label) exe = fluid.Executor(place) - static_ret = exe.run(prog, - feed={ - 'input': self.logits, - 'label': self.labels, - }, - fetch_list=[ret]) + static_ret = exe.run( + prog, + feed={ + 'input': self.logits, + 'label': self.labels, + }, + fetch_list=[ret], + ) self.assertIsNotNone(static_ret) paddle.disable_static() @@ -323,10 +333,11 @@ class CrossEntropyLoss(unittest.TestCase): def test_cross_entropy_loss_soft_1d_weight(self): self.numeric_stable_mode = False self.soft_label = True - self.dtype = 'float32' if fluid.core.is_compiled_with_rocm( - ) else 'float64' + self.dtype = ( + 'float32' if fluid.core.is_compiled_with_rocm() else 'float64' + ) self.axis = -1 - self.ignore_index = -100 #should not be changed + self.ignore_index = -100 # should not be changed self.N = 4 self.C = 3 self.shape = [self.N, self.C] @@ -334,34 +345,38 @@ class CrossEntropyLoss(unittest.TestCase): self.reduction = 'none' self.weight = np.random.uniform(0.1, 1.0, self.C).astype(self.dtype) self.logits = getattr( - self, "logits", - np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype)) + self, + "logits", + np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype), + ) softmax = np.apply_along_axis(stable_softmax, self.axis, self.logits) if self.soft_label: - self.labels = np.random.uniform(0.1, 1.0, - self.shape).astype(self.dtype) + self.labels = np.random.uniform(0.1, 1.0, self.shape).astype( + self.dtype + ) self.labels /= np.sum(self.labels, axis=self.axis, keepdims=True) else: axis_dim = self.shape[self.axis] self.shape[self.axis] = 1 - self.labels = np.random.randint(0, - axis_dim, - self.shape, - dtype="int64") - - #1. numpy - expected = cross_entropy_soft(softmax, - self.labels, - self.axis, - self.N, - weight=self.weight, - reduction=self.reduction, - ignore_index=self.ignore_index) + self.labels = np.random.randint( + 0, axis_dim, self.shape, dtype="int64" + ) + + # 1. numpy + expected = cross_entropy_soft( + softmax, + self.labels, + self.axis, + self.N, + weight=self.weight, + reduction=self.reduction, + ignore_index=self.ignore_index, + ) paddle.set_device("cpu") - #2. dygraph + # 2. dygraph paddle.disable_static() paddle_loss_none_weight = paddle.nn.functional.cross_entropy( fluid.dygraph.to_variable(self.logits), @@ -369,36 +384,43 @@ class CrossEntropyLoss(unittest.TestCase): soft_label=True, axis=self.axis, weight=fluid.dygraph.to_variable(self.weight), - reduction=self.reduction) + reduction=self.reduction, + ) dy_ret_value = paddle_loss_none_weight.numpy() # 3.static paddle.enable_static() prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', - shape=[self.N, self.C], - dtype=self.dtype) - label = fluid.data(name='label', - shape=[self.N, self.C], - dtype=self.dtype) + input = fluid.data( + name='input', shape=[self.N, self.C], dtype=self.dtype + ) + label = fluid.data( + name='label', shape=[self.N, self.C], dtype=self.dtype + ) weight = fluid.data(name='weight', shape=[self.C], dtype=self.dtype) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - weight=weight, reduction=self.reduction, soft_label=True) + weight=weight, reduction=self.reduction, soft_label=True + ) ret = cross_entropy_loss(input, label) exe = fluid.Executor(place) - static_ret = exe.run(prog, - feed={ - 'input': self.logits, - 'label': self.labels, - "weight": self.weight - }, - fetch_list=[ret]) + static_ret = exe.run( + prog, + feed={ + 'input': self.logits, + 'label': self.labels, + "weight": self.weight, + }, + fetch_list=[ret], + ) self.assertIsNotNone(static_ret) paddle.disable_static() @@ -409,10 +431,11 @@ class CrossEntropyLoss(unittest.TestCase): def test_cross_entropy_loss_soft_1d_mean(self): self.numeric_stable_mode = False self.soft_label = True - self.dtype = 'float32' if fluid.core.is_compiled_with_rocm( - ) else 'float64' + self.dtype = ( + 'float32' if fluid.core.is_compiled_with_rocm() else 'float64' + ) self.axis = -1 - self.ignore_index = -100 #should not be changed + self.ignore_index = -100 # should not be changed self.N = 4 self.C = 3 self.shape = [self.N, self.C] @@ -420,25 +443,29 @@ class CrossEntropyLoss(unittest.TestCase): self.reduction = 'mean' self.weight = None self.logits = getattr( - self, "logits", - np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype)) + self, + "logits", + np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype), + ) softmax = np.apply_along_axis(stable_softmax, self.axis, self.logits) self.labels = np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype) self.labels /= np.sum(self.labels, axis=self.axis, keepdims=True) - #1. numpy - expected = cross_entropy_soft(softmax, - self.labels, - self.axis, - self.N, - weight=self.weight, - reduction=self.reduction, - ignore_index=self.ignore_index) + # 1. numpy + expected = cross_entropy_soft( + softmax, + self.labels, + self.axis, + self.N, + weight=self.weight, + reduction=self.reduction, + ignore_index=self.ignore_index, + ) paddle.set_device("cpu") - #2 dygraph + # 2 dygraph paddle.disable_static() paddle_loss_mean = paddle.nn.functional.cross_entropy( fluid.dygraph.to_variable(self.logits), @@ -446,34 +473,38 @@ class CrossEntropyLoss(unittest.TestCase): soft_label=True, axis=self.axis, weight=self.weight, - reduction=self.reduction) + reduction=self.reduction, + ) dy_ret_value = paddle_loss_mean.numpy() - #3. static + # 3. static paddle.enable_static() prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', - shape=[self.N, self.C], - dtype=self.dtype) - label = fluid.data(name='label', - shape=[self.N, self.C], - dtype=self.dtype) + input = fluid.data( + name='input', shape=[self.N, self.C], dtype=self.dtype + ) + label = fluid.data( + name='label', shape=[self.N, self.C], dtype=self.dtype + ) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - reduction=self.reduction, soft_label=True) + reduction=self.reduction, soft_label=True + ) ret = cross_entropy_loss(input, label) exe = fluid.Executor(place) - static_ret = exe.run(prog, - feed={ - 'input': self.logits, - 'label': self.labels - }, - fetch_list=[ret]) + static_ret = exe.run( + prog, + feed={'input': self.logits, 'label': self.labels}, + fetch_list=[ret], + ) self.assertIsNotNone(static_ret) paddle.disable_static() @@ -484,10 +515,11 @@ class CrossEntropyLoss(unittest.TestCase): def test_cross_entropy_loss_soft_1d_weight_mean(self): self.numeric_stable_mode = False self.soft_label = True - self.dtype = 'float32' if fluid.core.is_compiled_with_rocm( - ) else 'float64' + self.dtype = ( + 'float32' if fluid.core.is_compiled_with_rocm() else 'float64' + ) self.axis = -1 - self.ignore_index = -100 #should not be changed + self.ignore_index = -100 # should not be changed self.N = 4 self.C = 3 self.shape = [self.N, self.C] @@ -495,61 +527,72 @@ class CrossEntropyLoss(unittest.TestCase): self.reduction = 'mean' self.weight = np.random.uniform(0.1, 1.0, self.C).astype(self.dtype) self.logits = getattr( - self, "logits", - np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype)) + self, + "logits", + np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype), + ) softmax = np.apply_along_axis(stable_softmax, self.axis, self.logits) self.labels = np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype) self.labels /= np.sum(self.labels, axis=self.axis, keepdims=True) - #1. numpy - expected = cross_entropy_soft(softmax, - self.labels, - self.axis, - self.N, - weight=self.weight, - reduction=self.reduction, - ignore_index=self.ignore_index) + # 1. numpy + expected = cross_entropy_soft( + softmax, + self.labels, + self.axis, + self.N, + weight=self.weight, + reduction=self.reduction, + ignore_index=self.ignore_index, + ) paddle.set_device("cpu") paddle.disable_static() - #2. dygraph + # 2. dygraph paddle_loss_none_weight = paddle.nn.functional.cross_entropy( fluid.dygraph.to_variable(self.logits), fluid.dygraph.to_variable(self.labels), soft_label=True, axis=self.axis, weight=fluid.dygraph.to_variable(self.weight), - reduction=self.reduction) + reduction=self.reduction, + ) dy_ret_value = paddle_loss_none_weight.numpy() - #3. static + # 3. static paddle.enable_static() prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', - shape=[self.N, self.C], - dtype=self.dtype) - label = fluid.data(name='label', - shape=[self.N, self.C], - dtype=self.dtype) + input = fluid.data( + name='input', shape=[self.N, self.C], dtype=self.dtype + ) + label = fluid.data( + name='label', shape=[self.N, self.C], dtype=self.dtype + ) weight = fluid.data(name='weight', shape=[self.C], dtype=self.dtype) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - weight=weight, reduction=self.reduction, soft_label=True) + weight=weight, reduction=self.reduction, soft_label=True + ) ret = cross_entropy_loss(input, label) exe = fluid.Executor(place) - static_ret = exe.run(prog, - feed={ - 'input': self.logits, - 'label': self.labels, - "weight": self.weight - }, - fetch_list=[ret]) + static_ret = exe.run( + prog, + feed={ + 'input': self.logits, + 'label': self.labels, + "weight": self.weight, + }, + fetch_list=[ret], + ) self.assertIsNotNone(static_ret) paddle.disable_static() @@ -558,14 +601,14 @@ class CrossEntropyLoss(unittest.TestCase): ###soft_label test 5 def test_cross_entropy_loss_soft_2d(self): - def inner_cross_entropy_loss_soft_2d(soft_label): self.numeric_stable_mode = False self.soft_label = soft_label - self.dtype = 'float32' if fluid.core.is_compiled_with_rocm( - ) else 'float64' + self.dtype = ( + 'float32' if fluid.core.is_compiled_with_rocm() else 'float64' + ) self.axis = -1 - self.ignore_index = -100 #should not be changed + self.ignore_index = -100 # should not be changed self.N = 3 self.H = 2 self.W = 2 @@ -575,64 +618,82 @@ class CrossEntropyLoss(unittest.TestCase): self.reduction = 'none' self.weight = None self.logits = getattr( - self, "logits", - np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype)) - softmax = np.apply_along_axis(stable_softmax, self.axis, - self.logits) + self, + "logits", + np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype), + ) + softmax = np.apply_along_axis( + stable_softmax, self.axis, self.logits + ) - self.labels = np.random.uniform(0.1, 1.0, - self.shape).astype(self.dtype) + self.labels = np.random.uniform(0.1, 1.0, self.shape).astype( + self.dtype + ) self.labels /= np.sum(self.labels, axis=self.axis, keepdims=True) - #1. numpy - expected = cross_entropy_soft_2d(softmax, - self.labels, - self.axis, - self.N, - self.H, - self.W, - weight=self.weight, - reduction=self.reduction, - ignore_index=self.ignore_index) + # 1. numpy + expected = cross_entropy_soft_2d( + softmax, + self.labels, + self.axis, + self.N, + self.H, + self.W, + weight=self.weight, + reduction=self.reduction, + ignore_index=self.ignore_index, + ) paddle.set_device("cpu") paddle.disable_static() - #2. dygraph + # 2. dygraph paddle_loss_none_weight = paddle.nn.functional.cross_entropy( fluid.dygraph.to_variable(self.logits), fluid.dygraph.to_variable(self.labels), soft_label=True, axis=self.axis, weight=fluid.dygraph.to_variable(self.weight) - if self.weight is not None else None, - reduction=self.reduction) + if self.weight is not None + else None, + reduction=self.reduction, + ) dy_ret_value = paddle_loss_none_weight.numpy() - #3. static + # 3. static paddle.enable_static() prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', - shape=[self.N, self.H, self.W, self.C], - dtype=self.dtype) - label = fluid.data(name='label', - shape=[self.N, self.H, self.W, self.C], - dtype=self.dtype) + input = fluid.data( + name='input', + shape=[self.N, self.H, self.W, self.C], + dtype=self.dtype, + ) + label = fluid.data( + name='label', + shape=[self.N, self.H, self.W, self.C], + dtype=self.dtype, + ) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - reduction=self.reduction, soft_label=True) + reduction=self.reduction, soft_label=True + ) ret = cross_entropy_loss(input, label) exe = fluid.Executor(place) - static_ret = exe.run(prog, - feed={ - 'input': self.logits, - 'label': self.labels, - }, - fetch_list=[ret]) + static_ret = exe.run( + prog, + feed={ + 'input': self.logits, + 'label': self.labels, + }, + fetch_list=[ret], + ) self.assertIsNotNone(static_ret) paddle.disable_static() @@ -647,10 +708,11 @@ class CrossEntropyLoss(unittest.TestCase): def test_cross_entropy_loss_soft_2d_weight_mean(self): self.numeric_stable_mode = False self.soft_label = True - self.dtype = 'float32' if fluid.core.is_compiled_with_rocm( - ) else 'float64' + self.dtype = ( + 'float32' if fluid.core.is_compiled_with_rocm() else 'float64' + ) self.axis = -1 - self.ignore_index = -100 #should not be changed + self.ignore_index = -100 # should not be changed self.N = 3 self.H = 2 self.W = 2 @@ -660,63 +722,78 @@ class CrossEntropyLoss(unittest.TestCase): self.reduction = 'mean' self.weight = np.random.uniform(0.1, 1.0, self.C).astype(self.dtype) self.logits = getattr( - self, "logits", - np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype)) + self, + "logits", + np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype), + ) softmax = np.apply_along_axis(stable_softmax, self.axis, self.logits) self.labels = np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype) self.labels /= np.sum(self.labels, axis=self.axis, keepdims=True) - #1. numpy - expected = cross_entropy_soft_2d(softmax, - self.labels, - self.axis, - self.N, - self.H, - self.W, - weight=self.weight, - reduction=self.reduction, - ignore_index=self.ignore_index) + # 1. numpy + expected = cross_entropy_soft_2d( + softmax, + self.labels, + self.axis, + self.N, + self.H, + self.W, + weight=self.weight, + reduction=self.reduction, + ignore_index=self.ignore_index, + ) paddle.set_device("cpu") paddle.disable_static() - #2. dygraph + # 2. dygraph paddle_loss_none_weight = paddle.nn.functional.cross_entropy( fluid.dygraph.to_variable(self.logits), fluid.dygraph.to_variable(self.labels), soft_label=True, axis=self.axis, weight=fluid.dygraph.to_variable(self.weight), - reduction=self.reduction) + reduction=self.reduction, + ) dy_ret_value = paddle_loss_none_weight.numpy() - #3. static + # 3. static paddle.enable_static() prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', - shape=[self.N, self.H, self.W, self.C], - dtype=self.dtype) - label = fluid.data(name='label', - shape=[self.N, self.H, self.W, self.C], - dtype=self.dtype) + input = fluid.data( + name='input', + shape=[self.N, self.H, self.W, self.C], + dtype=self.dtype, + ) + label = fluid.data( + name='label', + shape=[self.N, self.H, self.W, self.C], + dtype=self.dtype, + ) weight = fluid.data(name='weight', shape=[self.C], dtype=self.dtype) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - weight=weight, reduction=self.reduction, soft_label=True) + weight=weight, reduction=self.reduction, soft_label=True + ) ret = cross_entropy_loss(input, label) exe = fluid.Executor(place) - static_ret = exe.run(prog, - feed={ - 'input': self.logits, - 'label': self.labels, - "weight": self.weight - }, - fetch_list=[ret]) + static_ret = exe.run( + prog, + feed={ + 'input': self.logits, + 'label': self.labels, + "weight": self.weight, + }, + fetch_list=[ret], + ) self.assertIsNotNone(static_ret) paddle.disable_static() @@ -732,8 +809,11 @@ class CrossEntropyLoss(unittest.TestCase): paddle.enable_static() prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): input = fluid.data(name='input', shape=[2, 4], dtype=self.dtype) label = fluid.data(name='label', shape=[2], dtype='int64') @@ -741,20 +821,25 @@ class CrossEntropyLoss(unittest.TestCase): ret = cross_entropy_loss(input, label) exe = fluid.Executor(place) - static_ret = exe.run(prog, - feed={ - 'input': input_np, - 'label': label_np, - }, - fetch_list=[ret]) + static_ret = exe.run( + prog, + feed={ + 'input': input_np, + 'label': label_np, + }, + fetch_list=[ret], + ) self.assertIsNotNone(static_ret) expected = cross_entropy_loss_1d(input_np, label_np)[0] with fluid.dygraph.guard(): - cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(axis=1, - ignore_index=0) - dy_ret = cross_entropy_loss(fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( + axis=1, ignore_index=0 + ) + dy_ret = cross_entropy_loss( + fluid.dygraph.to_variable(input_np), + fluid.dygraph.to_variable(label_np), + ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) expected = cross_entropy_loss_1d(input_np, label_np, ignore_index=0)[0] @@ -770,28 +855,37 @@ class CrossEntropyLoss(unittest.TestCase): paddle.enable_static() prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): input = fluid.data(name='input', shape=[N, C], dtype=self.dtype) label = fluid.data(name='label', shape=[N], dtype='int64') cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - ignore_index=-1) + ignore_index=-1 + ) ret = cross_entropy_loss(input, label) exe = fluid.Executor(place) - static_ret = exe.run(prog, - feed={ - 'input': input_np, - 'label': label_np, - }, - fetch_list=[ret]) + static_ret = exe.run( + prog, + feed={ + 'input': input_np, + 'label': label_np, + }, + fetch_list=[ret], + ) self.assertIsNotNone(static_ret) with fluid.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - axis=1, ignore_index=-1) - dy_ret = cross_entropy_loss(fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + axis=1, ignore_index=-1 + ) + dy_ret = cross_entropy_loss( + fluid.dygraph.to_variable(input_np), + fluid.dygraph.to_variable(label_np), + ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) expected = cross_entropy_loss_1d(input_np, label_np, ignore_index=-1)[0] @@ -809,40 +903,49 @@ class CrossEntropyLoss(unittest.TestCase): paddle.enable_static() prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): input = fluid.data(name='input', shape=[N, C], dtype=self.dtype) label = fluid.data(name='label', shape=[N], dtype='int64') - weight = fluid.data(name='weight', shape=[C], - dtype=self.dtype) #weight for each class - cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(weight=weight, - ignore_index=0) + weight = fluid.data( + name='weight', shape=[C], dtype=self.dtype + ) # weight for each class + cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( + weight=weight, ignore_index=0 + ) ret = cross_entropy_loss(input, label) exe = fluid.Executor(place) - static_ret = exe.run(prog, - feed={ - 'input': input_np, - 'label': label_np, - "weight": weight_np - }, - fetch_list=[ret]) + static_ret = exe.run( + prog, + feed={ + 'input': input_np, + 'label': label_np, + "weight": weight_np, + }, + fetch_list=[ret], + ) self.assertIsNotNone(static_ret) with fluid.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( weight=fluid.dygraph.to_variable(weight_np), axis=1, - ignore_index=0) - dy_ret = cross_entropy_loss(fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + ignore_index=0, + ) + dy_ret = cross_entropy_loss( + fluid.dygraph.to_variable(input_np), + fluid.dygraph.to_variable(label_np), + ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) - expected = cross_entropy_loss_1d(input_np, - label_np, - weight=weight_np, - ignore_index=0)[0] + expected = cross_entropy_loss_1d( + input_np, label_np, weight=weight_np, ignore_index=0 + )[0] np.testing.assert_allclose(static_ret[0], dy_ret_value, rtol=1e-05) np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05) @@ -858,172 +961,205 @@ class CrossEntropyLoss(unittest.TestCase): with fluid.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - weight=fluid.dygraph.to_variable(weight_np), ignore_index=255) - dy_ret = cross_entropy_loss(fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + weight=fluid.dygraph.to_variable(weight_np), ignore_index=255 + ) + dy_ret = cross_entropy_loss( + fluid.dygraph.to_variable(input_np), + fluid.dygraph.to_variable(label_np), + ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) - expected = cross_entropy_loss_1d(input_np, - label_np, - weight=weight_np, - ignore_index=255)[0] + expected = cross_entropy_loss_1d( + input_np, label_np, weight=weight_np, ignore_index=255 + )[0] np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) def test_cross_entropy_loss_1d_with_weight_mean(self): input_np = np.random.random([2, 4]).astype(self.dtype) label_np = np.random.randint(0, 4, size=(2)).astype(np.int64) - weight_np = np.random.random([4]).astype(self.dtype) #shape:C + weight_np = np.random.random([4]).astype(self.dtype) # shape:C paddle.enable_static() prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): input = fluid.data(name='input', shape=[2, 4], dtype=self.dtype) label = fluid.data(name='label', shape=[2], dtype='int64') - weight = fluid.data(name='weight', shape=[4], - dtype=self.dtype) #weight for each class + weight = fluid.data( + name='weight', shape=[4], dtype=self.dtype + ) # weight for each class cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(weight=weight) ret = cross_entropy_loss(input, label) exe = fluid.Executor(place) - static_ret = exe.run(prog, - feed={ - 'input': input_np, - 'label': label_np, - "weight": weight_np - }, - fetch_list=[ret]) + static_ret = exe.run( + prog, + feed={ + 'input': input_np, + 'label': label_np, + "weight": weight_np, + }, + fetch_list=[ret], + ) self.assertIsNotNone(static_ret) - expected = cross_entropy_loss_1d(input_np, label_np, - weight=weight_np)[0] + expected = cross_entropy_loss_1d(input_np, label_np, weight=weight_np)[ + 0 + ] with fluid.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - weight=fluid.dygraph.to_variable(weight_np), axis=1) - dy_ret = cross_entropy_loss(fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + weight=fluid.dygraph.to_variable(weight_np), axis=1 + ) + dy_ret = cross_entropy_loss( + fluid.dygraph.to_variable(input_np), + fluid.dygraph.to_variable(label_np), + ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) - expected = cross_entropy_loss_1d(input_np, label_np, - weight=weight_np)[0] + expected = cross_entropy_loss_1d(input_np, label_np, weight=weight_np)[ + 0 + ] np.testing.assert_allclose(static_ret[0], dy_ret_value, rtol=1e-05) np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05) np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) def test_cross_entropy_loss_1d_with_weight_sum(self): - input_np = np.random.random([100, 200]).astype(self.dtype) #N,C - label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) #N,1 - weight_np = np.random.random([200]).astype(self.dtype) #C + input_np = np.random.random([100, 200]).astype(self.dtype) # N,C + label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) # N,1 + weight_np = np.random.random([200]).astype(self.dtype) # C paddle.enable_static() prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): input = fluid.data(name='input', shape=[100, 200], dtype=self.dtype) label = fluid.data(name='label', shape=[100], dtype='int64') weight = fluid.data(name='weight', shape=[200], dtype=self.dtype) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - weight=weight, reduction='sum') + weight=weight, reduction='sum' + ) ret = cross_entropy_loss(input, label) exe = fluid.Executor(place) - static_ret = exe.run(prog, - feed={ - 'input': input_np, - 'label': label_np, - "weight": weight_np - }, - fetch_list=[ret]) + static_ret = exe.run( + prog, + feed={ + 'input': input_np, + 'label': label_np, + "weight": weight_np, + }, + fetch_list=[ret], + ) self.assertIsNotNone(static_ret) with fluid.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - weight=fluid.dygraph.to_variable(weight_np), reduction='sum') - dy_ret = cross_entropy_loss(fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + weight=fluid.dygraph.to_variable(weight_np), reduction='sum' + ) + dy_ret = cross_entropy_loss( + fluid.dygraph.to_variable(input_np), + fluid.dygraph.to_variable(label_np), + ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) - expected = cross_entropy_loss_1d(input_np, - label_np, - weight=weight_np, - reduction='sum')[0] + expected = cross_entropy_loss_1d( + input_np, label_np, weight=weight_np, reduction='sum' + )[0] np.testing.assert_allclose(static_ret[0], dy_ret_value, rtol=1e-05) np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05) np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) def test_cross_entropy_loss_1d_with_weight_none(self): - input_np = np.random.random([100, 200]).astype(self.dtype) #N,C - label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) #N,1 - weight_np = np.random.random([200]).astype(self.dtype) #C + input_np = np.random.random([100, 200]).astype(self.dtype) # N,C + label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) # N,1 + weight_np = np.random.random([200]).astype(self.dtype) # C paddle.enable_static() prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): input = fluid.data(name='input', shape=[100, 200], dtype=self.dtype) label = fluid.data(name='label', shape=[100], dtype='int64') weight = fluid.data(name='weight', shape=[200], dtype=self.dtype) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - weight=weight, reduction='none') + weight=weight, reduction='none' + ) ret = cross_entropy_loss(input, label) exe = fluid.Executor(place) - static_ret = exe.run(prog, - feed={ - 'input': input_np, - 'label': label_np, - "weight": weight_np - }, - fetch_list=[ret]) + static_ret = exe.run( + prog, + feed={ + 'input': input_np, + 'label': label_np, + "weight": weight_np, + }, + fetch_list=[ret], + ) static_ret = np.squeeze(static_ret) self.assertIsNotNone(static_ret) with fluid.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - weight=fluid.dygraph.to_variable(weight_np), reduction='none') - dy_ret = cross_entropy_loss(fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + weight=fluid.dygraph.to_variable(weight_np), reduction='none' + ) + dy_ret = cross_entropy_loss( + fluid.dygraph.to_variable(input_np), + fluid.dygraph.to_variable(label_np), + ) dy_ret_value = dy_ret.numpy() dy_ret_value = np.squeeze(dy_ret_value) self.assertIsNotNone(dy_ret_value) - expected = cross_entropy_loss_1d(input_np, - label_np, - weight=weight_np, - reduction='none') + expected = cross_entropy_loss_1d( + input_np, label_np, weight=weight_np, reduction='none' + ) np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05) np.testing.assert_allclose(static_ret, expected, rtol=1e-05) np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) def test_cross_entropy_loss_1d_with_weight_none_func(self): - input_np = np.random.random([100, 200]).astype(self.dtype) #N,C - label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) #N - weight_np = np.random.random([200]).astype(self.dtype) #C + input_np = np.random.random([100, 200]).astype(self.dtype) # N,C + label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) # N + weight_np = np.random.random([200]).astype(self.dtype) # C paddle.enable_static() prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): input = fluid.data(name='input', shape=[100, 200], dtype=self.dtype) label = fluid.data(name='label', shape=[100], dtype='int64') weight = fluid.data(name='weight', shape=[200], dtype=self.dtype) - ret = paddle.nn.functional.cross_entropy(input, - label, - weight=weight, - reduction='none') + ret = paddle.nn.functional.cross_entropy( + input, label, weight=weight, reduction='none' + ) exe = fluid.Executor(place) - static_ret = exe.run(prog, - feed={ - 'input': input_np, - 'label': label_np, - "weight": weight_np - }, - fetch_list=[ret]) + static_ret = exe.run( + prog, + feed={ + 'input': input_np, + 'label': label_np, + "weight": weight_np, + }, + fetch_list=[ret], + ) static_ret = np.squeeze(static_ret) self.assertIsNotNone(static_ret) with fluid.dygraph.guard(): @@ -1031,43 +1167,47 @@ class CrossEntropyLoss(unittest.TestCase): fluid.dygraph.to_variable(input_np), fluid.dygraph.to_variable(label_np), weight=fluid.dygraph.to_variable(weight_np), - reduction='none') + reduction='none', + ) dy_ret_value = dy_ret.numpy() dy_ret_value = np.squeeze(dy_ret_value) self.assertIsNotNone(dy_ret_value) - expected = cross_entropy_loss_1d(input_np, - label_np, - weight=weight_np, - reduction='none') + expected = cross_entropy_loss_1d( + input_np, label_np, weight=weight_np, reduction='none' + ) np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05) np.testing.assert_allclose(static_ret, expected, rtol=1e-05) np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) def test_cross_entropy_loss_1d_mean(self): - input_np = np.random.random([100, 200]).astype(self.dtype) #N,C - label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) #N,1 + input_np = np.random.random([100, 200]).astype(self.dtype) # N,C + label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) # N,1 paddle.enable_static() prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): input = fluid.data(name='input', shape=[100, 200], dtype=self.dtype) label = fluid.data(name='label', shape=[100], dtype='int64') cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss() ret = cross_entropy_loss(input, label) exe = fluid.Executor(place) - static_ret = exe.run(prog, - feed={ - 'input': input_np, - 'label': label_np - }, - fetch_list=[ret]) + static_ret = exe.run( + prog, + feed={'input': input_np, 'label': label_np}, + fetch_list=[ret], + ) self.assertIsNotNone(static_ret) with fluid.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss() - dy_ret = cross_entropy_loss(fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + dy_ret = cross_entropy_loss( + fluid.dygraph.to_variable(input_np), + fluid.dygraph.to_variable(label_np), + ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) expected = cross_entropy_loss_1d(input_np, label_np)[0] @@ -1076,32 +1216,38 @@ class CrossEntropyLoss(unittest.TestCase): np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) def test_cross_entropy_loss_1d_sum(self): - input_np = np.random.random([100, 200]).astype(self.dtype) #N,C - label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) #N,1 + input_np = np.random.random([100, 200]).astype(self.dtype) # N,C + label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) # N,1 paddle.enable_static() prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): input = fluid.data(name='input', shape=[100, 200], dtype=self.dtype) label = fluid.data(name='label', shape=[100], dtype='int64') cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - reduction='sum') + reduction='sum' + ) ret = cross_entropy_loss(input, label) exe = fluid.Executor(place) - static_ret = exe.run(prog, - feed={ - 'input': input_np, - 'label': label_np - }, - fetch_list=[ret]) + static_ret = exe.run( + prog, + feed={'input': input_np, 'label': label_np}, + fetch_list=[ret], + ) self.assertIsNotNone(static_ret) with fluid.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - reduction='sum') - dy_ret = cross_entropy_loss(fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + reduction='sum' + ) + dy_ret = cross_entropy_loss( + fluid.dygraph.to_variable(input_np), + fluid.dygraph.to_variable(label_np), + ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) expected = cross_entropy_loss_1d(input_np, label_np, reduction='sum')[0] @@ -1110,33 +1256,39 @@ class CrossEntropyLoss(unittest.TestCase): np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) def test_cross_entropy_loss_1d_none(self): - input_np = np.random.random([100, 200]).astype(self.dtype) #N,C - label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) #N,1 + input_np = np.random.random([100, 200]).astype(self.dtype) # N,C + label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) # N,1 paddle.enable_static() prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): input = fluid.data(name='input', shape=[100, 200], dtype=self.dtype) label = fluid.data(name='label', shape=[100], dtype='int64') cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - reduction='none') + reduction='none' + ) ret = cross_entropy_loss(input, label) exe = fluid.Executor(place) - static_ret = exe.run(prog, - feed={ - 'input': input_np, - 'label': label_np - }, - fetch_list=[ret]) + static_ret = exe.run( + prog, + feed={'input': input_np, 'label': label_np}, + fetch_list=[ret], + ) static_ret = np.squeeze(static_ret) self.assertIsNotNone(static_ret) with fluid.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - reduction='none') - dy_ret = cross_entropy_loss(fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + reduction='none' + ) + dy_ret = cross_entropy_loss( + fluid.dygraph.to_variable(input_np), + fluid.dygraph.to_variable(label_np), + ) dy_ret_value = dy_ret.numpy() dy_ret_value = np.squeeze(dy_ret_value) self.assertIsNotNone(dy_ret_value) @@ -1146,97 +1298,122 @@ class CrossEntropyLoss(unittest.TestCase): np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) def test_cross_entropy_loss_2d_with_weight_none(self): - input_np = np.random.random(size=(2, 2, 2, 3)).astype(self.dtype) #NHWC - label_np = np.random.randint(0, 3, - size=(2, 2, 2)).astype(np.int64) #NHW1 - weight_np = np.random.random(size=(3, )).astype(self.dtype) #C + input_np = np.random.random(size=(2, 2, 2, 3)).astype( + self.dtype + ) # NHWC + label_np = np.random.randint(0, 3, size=(2, 2, 2)).astype( + np.int64 + ) # NHW1 + weight_np = np.random.random(size=(3,)).astype(self.dtype) # C paddle.enable_static() prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', - shape=[2, 2, 2, 3], - dtype=self.dtype) + input = fluid.data( + name='input', shape=[2, 2, 2, 3], dtype=self.dtype + ) label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64') weight = fluid.data(name='weight', shape=[3], dtype=self.dtype) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - weight=weight, reduction='none') + weight=weight, reduction='none' + ) ret = cross_entropy_loss(input, label) exe = fluid.Executor(place) - static_ret = exe.run(prog, - feed={ - 'input': input_np, - 'label': label_np, - "weight": weight_np - }, - fetch_list=[ret]) + static_ret = exe.run( + prog, + feed={ + 'input': input_np, + 'label': label_np, + "weight": weight_np, + }, + fetch_list=[ret], + ) static_ret = np.squeeze(static_ret) self.assertIsNotNone(static_ret) with fluid.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - weight=fluid.dygraph.to_variable(weight_np), reduction='none') - dy_ret = cross_entropy_loss(fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + weight=fluid.dygraph.to_variable(weight_np), reduction='none' + ) + dy_ret = cross_entropy_loss( + fluid.dygraph.to_variable(input_np), + fluid.dygraph.to_variable(label_np), + ) dy_ret_value = dy_ret.numpy() dy_ret_value = np.squeeze(dy_ret_value) self.assertIsNotNone(dy_ret_value) - expected = cross_entropy_loss_2d(input_np, - label_np, - weight=weight_np, - reduction='none') + expected = cross_entropy_loss_2d( + input_np, label_np, weight=weight_np, reduction='none' + ) np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05) np.testing.assert_allclose(static_ret, expected, rtol=1e-05) np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) def test_cross_entropy_loss_2d_with_weight_axis_change_mean(self): - input_np = np.random.random(size=(2, 3, 2, 2)).astype(self.dtype) #NCHW - label_np = np.random.randint(0, 3, - size=(2, 2, 2)).astype(np.int64) #NHW - weight_np = np.random.random(size=(3, )).astype(self.dtype) #C + input_np = np.random.random(size=(2, 3, 2, 2)).astype( + self.dtype + ) # NCHW + label_np = np.random.randint(0, 3, size=(2, 2, 2)).astype( + np.int64 + ) # NHW + weight_np = np.random.random(size=(3,)).astype(self.dtype) # C paddle.enable_static() prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', - shape=[2, 3, 2, 2], - dtype=self.dtype) + input = fluid.data( + name='input', shape=[2, 3, 2, 2], dtype=self.dtype + ) label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64') weight = fluid.data(name='weight', shape=[3], dtype=self.dtype) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - weight=weight, reduction='mean', axis=1) + weight=weight, reduction='mean', axis=1 + ) # specify the class channels to axis 1 ret = cross_entropy_loss(input, label) exe = fluid.Executor(place) - static_ret = exe.run(prog, - feed={ - 'input': input_np, - 'label': label_np, - "weight": weight_np - }, - fetch_list=[ret]) + static_ret = exe.run( + prog, + feed={ + 'input': input_np, + 'label': label_np, + "weight": weight_np, + }, + fetch_list=[ret], + ) self.assertIsNotNone(static_ret) with fluid.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( weight=fluid.dygraph.to_variable(weight_np), reduction='mean', - axis=1) - dy_ret = cross_entropy_loss(fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + axis=1, + ) + dy_ret = cross_entropy_loss( + fluid.dygraph.to_variable(input_np), + fluid.dygraph.to_variable(label_np), + ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) - expected = cross_entropy_loss_2d(np.transpose(input_np, [0, 2, 3, 1]), - label_np, - weight=weight_np, - reduction='mean')[0] + expected = cross_entropy_loss_2d( + np.transpose(input_np, [0, 2, 3, 1]), + label_np, + weight=weight_np, + reduction='mean', + )[0] np.testing.assert_allclose(static_ret[0], dy_ret_value, rtol=1e-05) np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05) np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) @@ -1252,137 +1429,173 @@ class CrossEntropyLoss(unittest.TestCase): weight_np = np.random.random([C]).astype(self.dtype) with fluid.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - weight=fluid.dygraph.to_variable(weight_np), ignore_index=255) - dy_ret = cross_entropy_loss(fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + weight=fluid.dygraph.to_variable(weight_np), ignore_index=255 + ) + dy_ret = cross_entropy_loss( + fluid.dygraph.to_variable(input_np), + fluid.dygraph.to_variable(label_np), + ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) - expected = cross_entropy_loss_2d(input_np, - label_np, - weight=weight_np, - ignore_index=255)[0] + expected = cross_entropy_loss_2d( + input_np, label_np, weight=weight_np, ignore_index=255 + )[0] np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) def test_cross_entropy_loss_2d_with_weight_mean(self): - input_np = np.random.random(size=(2, 2, 2, 3)).astype(self.dtype) #NHWC - label_np = np.random.randint(0, 3, - size=(2, 2, 2)).astype(np.int64) #NHW - weight_np = np.random.random(size=(3, )).astype(self.dtype) #C + input_np = np.random.random(size=(2, 2, 2, 3)).astype( + self.dtype + ) # NHWC + label_np = np.random.randint(0, 3, size=(2, 2, 2)).astype( + np.int64 + ) # NHW + weight_np = np.random.random(size=(3,)).astype(self.dtype) # C paddle.enable_static() prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', - shape=[2, 2, 2, 3], - dtype=self.dtype) + input = fluid.data( + name='input', shape=[2, 2, 2, 3], dtype=self.dtype + ) label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64') weight = fluid.data(name='weight', shape=[3], dtype=self.dtype) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - weight=weight, reduction='mean') + weight=weight, reduction='mean' + ) ret = cross_entropy_loss(input, label) exe = fluid.Executor(place) - static_ret = exe.run(prog, - feed={ - 'input': input_np, - 'label': label_np, - "weight": weight_np - }, - fetch_list=[ret]) + static_ret = exe.run( + prog, + feed={ + 'input': input_np, + 'label': label_np, + "weight": weight_np, + }, + fetch_list=[ret], + ) self.assertIsNotNone(static_ret) with fluid.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - weight=fluid.dygraph.to_variable(weight_np), reduction='mean') - dy_ret = cross_entropy_loss(fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + weight=fluid.dygraph.to_variable(weight_np), reduction='mean' + ) + dy_ret = cross_entropy_loss( + fluid.dygraph.to_variable(input_np), + fluid.dygraph.to_variable(label_np), + ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) - expected = cross_entropy_loss_2d(input_np, - label_np, - weight=weight_np, - reduction='mean')[0] + expected = cross_entropy_loss_2d( + input_np, label_np, weight=weight_np, reduction='mean' + )[0] np.testing.assert_allclose(static_ret[0], dy_ret_value, rtol=1e-05) np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05) np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) def test_cross_entropy_loss_2d_with_weight_sum(self): - input_np = np.random.random(size=(2, 2, 2, 3)).astype(self.dtype) #NHWC - label_np = np.random.randint(0, 3, - size=(2, 2, 2)).astype(np.int64) #NHW - weight_np = np.random.random(size=(3, )).astype(self.dtype) #C + input_np = np.random.random(size=(2, 2, 2, 3)).astype( + self.dtype + ) # NHWC + label_np = np.random.randint(0, 3, size=(2, 2, 2)).astype( + np.int64 + ) # NHW + weight_np = np.random.random(size=(3,)).astype(self.dtype) # C paddle.enable_static() prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', - shape=[2, 2, 2, 3], - dtype=self.dtype) + input = fluid.data( + name='input', shape=[2, 2, 2, 3], dtype=self.dtype + ) label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64') weight = fluid.data(name='weight', shape=[3], dtype=self.dtype) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - weight=weight, reduction='sum') + weight=weight, reduction='sum' + ) ret = cross_entropy_loss(input, label) exe = fluid.Executor(place) - static_ret = exe.run(prog, - feed={ - 'input': input_np, - 'label': label_np, - "weight": weight_np - }, - fetch_list=[ret]) + static_ret = exe.run( + prog, + feed={ + 'input': input_np, + 'label': label_np, + "weight": weight_np, + }, + fetch_list=[ret], + ) self.assertIsNotNone(static_ret) with fluid.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - weight=fluid.dygraph.to_variable(weight_np), reduction='sum') - dy_ret = cross_entropy_loss(fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + weight=fluid.dygraph.to_variable(weight_np), reduction='sum' + ) + dy_ret = cross_entropy_loss( + fluid.dygraph.to_variable(input_np), + fluid.dygraph.to_variable(label_np), + ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) - expected = cross_entropy_loss_2d(input_np, - label_np, - weight=weight_np, - reduction='sum')[0] + expected = cross_entropy_loss_2d( + input_np, label_np, weight=weight_np, reduction='sum' + )[0] np.testing.assert_allclose(static_ret[0], dy_ret_value, rtol=1e-05) np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05) np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) def test_cross_entropy_loss_2d_none(self): - input_np = np.random.random(size=(2, 2, 2, 3)).astype(self.dtype) #NHWC - label_np = np.random.randint(0, 3, - size=(2, 2, 2)).astype(np.int64) #NHW + input_np = np.random.random(size=(2, 2, 2, 3)).astype( + self.dtype + ) # NHWC + label_np = np.random.randint(0, 3, size=(2, 2, 2)).astype( + np.int64 + ) # NHW paddle.enable_static() prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', - shape=[2, 2, 2, 3], - dtype=self.dtype) + input = fluid.data( + name='input', shape=[2, 2, 2, 3], dtype=self.dtype + ) label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64') cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - reduction='none') + reduction='none' + ) ret = cross_entropy_loss(input, label) exe = fluid.Executor(place) - static_ret = exe.run(prog, - feed={ - 'input': input_np, - 'label': label_np, - }, - fetch_list=[ret]) + static_ret = exe.run( + prog, + feed={ + 'input': input_np, + 'label': label_np, + }, + fetch_list=[ret], + ) static_ret = np.squeeze(static_ret) self.assertIsNotNone(static_ret) with fluid.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - reduction='none') - dy_ret = cross_entropy_loss(fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + reduction='none' + ) + dy_ret = cross_entropy_loss( + fluid.dygraph.to_variable(input_np), + fluid.dygraph.to_variable(label_np), + ) dy_ret_value = dy_ret.numpy() dy_ret_value = np.squeeze(dy_ret_value) self.assertIsNotNone(dy_ret_value) @@ -1392,75 +1605,100 @@ class CrossEntropyLoss(unittest.TestCase): np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) def test_cross_entropy_loss_2d_mean(self): - input_np = np.random.random(size=(2, 2, 2, 3)).astype(self.dtype) #NHWC - label_np = np.random.randint(0, 3, - size=(2, 2, 2)).astype(np.int64) #NHW + input_np = np.random.random(size=(2, 2, 2, 3)).astype( + self.dtype + ) # NHWC + label_np = np.random.randint(0, 3, size=(2, 2, 2)).astype( + np.int64 + ) # NHW paddle.enable_static() prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', - shape=[2, 2, 2, 3], - dtype=self.dtype) + input = fluid.data( + name='input', shape=[2, 2, 2, 3], dtype=self.dtype + ) label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64') cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - reduction='mean') + reduction='mean' + ) ret = cross_entropy_loss(input, label) exe = fluid.Executor(place) - static_ret = exe.run(prog, - feed={ - 'input': input_np, - 'label': label_np, - }, - fetch_list=[ret]) + static_ret = exe.run( + prog, + feed={ + 'input': input_np, + 'label': label_np, + }, + fetch_list=[ret], + ) self.assertIsNotNone(static_ret) with fluid.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - reduction='mean') - dy_ret = cross_entropy_loss(fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + reduction='mean' + ) + dy_ret = cross_entropy_loss( + fluid.dygraph.to_variable(input_np), + fluid.dygraph.to_variable(label_np), + ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) - expected = cross_entropy_loss_2d(input_np, label_np, - reduction='mean')[0] + expected = cross_entropy_loss_2d(input_np, label_np, reduction='mean')[ + 0 + ] np.testing.assert_allclose(static_ret[0], dy_ret_value, rtol=1e-05) np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05) np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) def test_cross_entropy_loss_2d_sum(self): - input_np = np.random.random(size=(2, 2, 2, 3)).astype(self.dtype) #NHWC - label_np = np.random.randint(0, 3, - size=(2, 2, 2)).astype(np.int64) #NHW + input_np = np.random.random(size=(2, 2, 2, 3)).astype( + self.dtype + ) # NHWC + label_np = np.random.randint(0, 3, size=(2, 2, 2)).astype( + np.int64 + ) # NHW paddle.enable_static() prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', - shape=[2, 2, 2, 3], - dtype=self.dtype) + input = fluid.data( + name='input', shape=[2, 2, 2, 3], dtype=self.dtype + ) label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64') cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - reduction='sum') + reduction='sum' + ) ret = cross_entropy_loss(input, label) exe = fluid.Executor(place) - static_ret = exe.run(prog, - feed={ - 'input': input_np, - 'label': label_np, - }, - fetch_list=[ret]) + static_ret = exe.run( + prog, + feed={ + 'input': input_np, + 'label': label_np, + }, + fetch_list=[ret], + ) self.assertIsNotNone(static_ret) with fluid.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - reduction='sum') - dy_ret = cross_entropy_loss(fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + reduction='sum' + ) + dy_ret = cross_entropy_loss( + fluid.dygraph.to_variable(input_np), + fluid.dygraph.to_variable(label_np), + ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) expected = cross_entropy_loss_2d(input_np, label_np, reduction='sum')[0] @@ -1486,8 +1724,7 @@ class CrossEntropyLoss(unittest.TestCase): self.test_cross_entropy_loss_1d_with_mean_ignore() self.test_cross_entropy_loss_1d_with_mean_ignore_negative() self.test_cross_entropy_loss_1d_with_weight_mean_ignore() - self.test_cross_entropy_loss_1d_with_weight_mean_ignore_exceedlabel( - ) + self.test_cross_entropy_loss_1d_with_weight_mean_ignore_exceedlabel() self.test_cross_entropy_loss_1d_with_weight_mean() self.test_cross_entropy_loss_1d_with_weight_sum() self.test_cross_entropy_loss_1d_with_weight_none() @@ -1497,8 +1734,7 @@ class CrossEntropyLoss(unittest.TestCase): self.test_cross_entropy_loss_1d_none() self.test_cross_entropy_loss_2d_with_weight_none() self.test_cross_entropy_loss_2d_with_weight_axis_change_mean() - self.test_cross_entropy_loss_2d_with_weight_mean_ignore_exceedlabel( - ) + self.test_cross_entropy_loss_2d_with_weight_mean_ignore_exceedlabel() self.test_cross_entropy_loss_2d_with_weight_mean() self.test_cross_entropy_loss_2d_with_weight_sum() self.test_cross_entropy_loss_2d_none() @@ -1507,51 +1743,53 @@ class CrossEntropyLoss(unittest.TestCase): class TestCrossEntropyFAPIError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): def test_WeightLength_NotEqual(): input_data = paddle.rand(shape=[20, 100]) - label_data = paddle.randint(0, - 100, - shape=[20, 1], - dtype="int64") + label_data = paddle.randint( + 0, 100, shape=[20, 1], dtype="int64" + ) weight_data = paddle.rand([100 + 1]) - paddle.nn.functional.cross_entropy(input=input_data, - label=label_data, - weight=weight_data, - ignore_index=-100) + paddle.nn.functional.cross_entropy( + input=input_data, + label=label_data, + weight=weight_data, + ignore_index=-100, + ) self.assertRaises(ValueError, test_WeightLength_NotEqual) def test_LabelValue_ExceedMax(): input_data = paddle.rand(shape=[20, 100]) - label_data = paddle.randint(0, - 100, - shape=[20, 1], - dtype="int64") + label_data = paddle.randint( + 0, 100, shape=[20, 1], dtype="int64" + ) label_data[0] = 100 weight_data = paddle.rand([100]) - paddle.nn.functional.cross_entropy(input=input_data, - label=label_data, - weight=weight_data, - ignore_index=-100) + paddle.nn.functional.cross_entropy( + input=input_data, + label=label_data, + weight=weight_data, + ignore_index=-100, + ) self.assertRaises(ValueError, test_LabelValue_ExceedMax) def test_LabelValue_ExceedMin(): input_data = paddle.rand(shape=[20, 100]) - label_data = paddle.randint(0, - 100, - shape=[20, 1], - dtype="int64") + label_data = paddle.randint( + 0, 100, shape=[20, 1], dtype="int64" + ) label_data[0] = -1 weight_data = paddle.rand([100]) - paddle.nn.functional.cross_entropy(input=input_data, - label=label_data, - weight=weight_data, - ignore_index=-100) + paddle.nn.functional.cross_entropy( + input=input_data, + label=label_data, + weight=weight_data, + ignore_index=-100, + ) self.assertRaises(ValueError, test_LabelValue_ExceedMin) @@ -1562,28 +1800,34 @@ class TestCrossEntropyFAPIError(unittest.TestCase): paddle.enable_static() prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda( - ) else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', - shape=[2, 4], - dtype='float32') + input = fluid.data( + name='input', shape=[2, 4], dtype='float32' + ) label = fluid.data(name='label', shape=[2], dtype='int64') - weight = fluid.data(name='weight', - shape=[3], - dtype='float32') #weight for each class + weight = fluid.data( + name='weight', shape=[3], dtype='float32' + ) # weight for each class cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - weight=weight) + weight=weight + ) ret = cross_entropy_loss(input, label) exe = fluid.Executor(place) - static_ret = exe.run(prog, - feed={ - 'input': input_np, - 'label': label_np, - "weight": weight_np - }, - fetch_list=[ret]) + static_ret = exe.run( + prog, + feed={ + 'input': input_np, + 'label': label_np, + "weight": weight_np, + }, + fetch_list=[ret], + ) self.assertIsNotNone(static_ret) self.assertRaises(ValueError, static_test_WeightLength_NotEqual) diff --git a/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py b/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py index a0f74302628870b35a4c827d13cdda4fe411f58c..c9fe88f318e88a1ad88c07035451744cc39bb11c 100644 --- a/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py +++ b/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py @@ -21,8 +21,7 @@ from paddle.fluid import Program, program_guard class TestCrossEntropyOp(OpTest): - """Test cross-entropy with discrete one-hot labels. - """ + """Test cross-entropy with discrete one-hot labels.""" def setUp(self): self.op_type = "cross_entropy" @@ -43,24 +42,27 @@ class TestCrossEntropyOp(OpTest): self.outputs = {"Y": self.cross_entropy} self.attrs = { "soft_label": self.soft_label, - "ignore_index": self.ignore_index + "ignore_index": self.ignore_index, } def init_x(self): - self.x = randomize_probability(self.batch_size, - self.class_num, - dtype=self.dtype) + self.x = randomize_probability( + self.batch_size, self.class_num, dtype=self.dtype + ) def init_label(self): - self.label = np.random.randint(0, - self.class_num, (self.batch_size, 1), - dtype="int64") + self.label = np.random.randint( + 0, self.class_num, (self.batch_size, 1), dtype="int64" + ) def get_cross_entropy(self): self.cross_entropy = np.asmatrix( - [[-np.log(self.x[i][self.label[i][0]])] - for i in range(self.x.shape[0])], - dtype="float64") + [ + [-np.log(self.x[i][self.label[i][0]])] + for i in range(self.x.shape[0]) + ], + dtype="float64", + ) def init_attr_type(self): pass @@ -79,32 +81,35 @@ class TestCrossEntropyOp(OpTest): class TestCrossEntropyOpRemoveLastDim(TestCrossEntropyOp): - """Test cross-entropy with discrete one-hot labels with shape [batch_size] - """ + """Test cross-entropy with discrete one-hot labels with shape [batch_size]""" def init_label(self): - self.label = np.random.randint(0, - self.class_num, (self.batch_size), - dtype="int64") + self.label = np.random.randint( + 0, self.class_num, (self.batch_size), dtype="int64" + ) def get_cross_entropy(self): self.cross_entropy = np.asmatrix( [-np.log(self.x[i][self.label[i]]) for i in range(self.x.shape[0])], - dtype="float64") + dtype="float64", + ) class TestCrossEntropyOp2(TestCrossEntropyOp): - """Test cross-entropy with vectorized soft labels. - """ + """Test cross-entropy with vectorized soft labels.""" def init_label(self): self.label = np.random.uniform( - 0.1, 1.0, [self.batch_size, self.class_num]).astype(self.dtype) + 0.1, 1.0, [self.batch_size, self.class_num] + ).astype(self.dtype) self.label /= self.label.sum(axis=1, keepdims=True) def get_cross_entropy(self): - self.cross_entropy = (-self.label * np.log(self.x)).sum( - axis=1, keepdims=True).astype(self.dtype) + self.cross_entropy = ( + (-self.label * np.log(self.x)) + .sum(axis=1, keepdims=True) + .astype(self.dtype) + ) def init_attr_type(self): self.soft_label = True @@ -117,26 +122,28 @@ class TestCrossEntropyOp2(TestCrossEntropyOp): self.class_num = 37 def test_check_grad(self): - self.check_grad(["X"], - "Y", - max_relative_error=0.05, - numeric_grad_delta=0.001) + self.check_grad( + ["X"], "Y", max_relative_error=0.05, numeric_grad_delta=0.001 + ) class TestCrossEntropyOp3(TestCrossEntropyOp): - """Test cross-entropy with vectorized one-hot representation of labels. - """ + """Test cross-entropy with vectorized one-hot representation of labels.""" def init_label(self): - self.label_index = np.random.randint(0, self.class_num, - (self.batch_size)) + self.label_index = np.random.randint( + 0, self.class_num, (self.batch_size) + ) self.label = np.zeros(self.x.shape).astype(self.dtype) self.label[np.arange(self.batch_size), self.label_index] = 1 def get_cross_entropy(self): self.cross_entropy = np.asmatrix( - [[-np.log(self.x[i][self.label_index[i]])] - for i in range(self.x.shape[0])]).astype(self.dtype) + [ + [-np.log(self.x[i][self.label_index[i]])] + for i in range(self.x.shape[0]) + ] + ).astype(self.dtype) def init_attr_type(self): self.soft_label = True @@ -149,35 +156,38 @@ class TestCrossEntropyOp3(TestCrossEntropyOp): self.class_num = 27 def test_check_grad(self): - self.check_grad(["X"], - "Y", - max_relative_error=0.05, - numeric_grad_delta=0.001) + self.check_grad( + ["X"], "Y", max_relative_error=0.05, numeric_grad_delta=0.001 + ) class TestCrossEntropyOp4(TestCrossEntropyOp): - """Test high rank tensor cross-entropy with discrete one-hot labels. - """ + """Test high rank tensor cross-entropy with discrete one-hot labels.""" def init_x(self): self.shape = [10, 2, 4] self.ins_num = np.prod(np.array(self.shape)) - self.X_2d = randomize_probability(self.ins_num, - self.class_num).astype(self.dtype) + self.X_2d = randomize_probability(self.ins_num, self.class_num).astype( + self.dtype + ) self.x = self.X_2d.reshape(self.shape + [self.class_num]) def init_label(self): - self.label_2d = np.random.randint(0, - self.class_num, (self.ins_num, 1), - dtype="int64") + self.label_2d = np.random.randint( + 0, self.class_num, (self.ins_num, 1), dtype="int64" + ) self.label = self.label_2d.reshape(self.shape + [1]) def get_cross_entropy(self): cross_entropy_2d = np.asmatrix( - [[-np.log(self.X_2d[i][self.label_2d[i][0]])] - for i in range(self.X_2d.shape[0])]).astype(self.dtype) - self.cross_entropy = np.array(cross_entropy_2d).reshape(self.shape + - [1]) + [ + [-np.log(self.X_2d[i][self.label_2d[i][0]])] + for i in range(self.X_2d.shape[0]) + ] + ).astype(self.dtype) + self.cross_entropy = np.array(cross_entropy_2d).reshape( + self.shape + [1] + ) def init_attr_type(self): self.soft_label = False @@ -190,44 +200,51 @@ class TestCrossEntropyOp4(TestCrossEntropyOp): class TestCrossEntropyOp4RemoveLastDim(TestCrossEntropyOp4): - """Test high rank tensor cross-entropy with discrete one-hot labels with shape [batch_size] - """ + """Test high rank tensor cross-entropy with discrete one-hot labels with shape [batch_size]""" def init_label(self): - self.label_2d = np.random.randint(0, - self.class_num, (self.ins_num, 1), - dtype="int64") + self.label_2d = np.random.randint( + 0, self.class_num, (self.ins_num, 1), dtype="int64" + ) self.label = self.label_2d.reshape(self.shape) def get_cross_entropy(self): cross_entropy_2d = np.asmatrix( - [[-np.log(self.X_2d[i][self.label_2d[i][0]])] - for i in range(self.X_2d.shape[0])]).astype(self.dtype) + [ + [-np.log(self.X_2d[i][self.label_2d[i][0]])] + for i in range(self.X_2d.shape[0]) + ] + ).astype(self.dtype) self.cross_entropy = np.array(cross_entropy_2d).reshape(self.shape) class TestCrossEntropyOp5(TestCrossEntropyOp): - """Test high rank tensor cross-entropy with vectorized soft labels. - """ + """Test high rank tensor cross-entropy with vectorized soft labels.""" def init_x(self): self.shape = [4, 3] self.ins_num = np.prod(np.array(self.shape)) - self.X_2d = randomize_probability(self.ins_num, - self.class_num).astype(self.dtype) + self.X_2d = randomize_probability(self.ins_num, self.class_num).astype( + self.dtype + ) self.x = self.X_2d.reshape(self.shape + [self.class_num]) def init_label(self): self.label_2d = np.random.uniform( - 0.1, 1.0, [self.ins_num, self.class_num]).astype(self.dtype) + 0.1, 1.0, [self.ins_num, self.class_num] + ).astype(self.dtype) self.label_2d /= self.label_2d.sum(axis=1, keepdims=True) self.label = self.label_2d.reshape(self.shape + [self.class_num]) def get_cross_entropy(self): - cross_entropy_2d = (-self.label_2d * np.log(self.X_2d)).sum( - axis=1, keepdims=True).astype(self.dtype) - self.cross_entropy = np.array(cross_entropy_2d).reshape(self.shape + - [1]) + cross_entropy_2d = ( + (-self.label_2d * np.log(self.X_2d)) + .sum(axis=1, keepdims=True) + .astype(self.dtype) + ) + self.cross_entropy = np.array(cross_entropy_2d).reshape( + self.shape + [1] + ) def init_attr_type(self): self.soft_label = True @@ -239,39 +256,44 @@ class TestCrossEntropyOp5(TestCrossEntropyOp): self.class_num = 37 def test_check_grad(self): - self.check_grad(["X"], - "Y", - max_relative_error=0.05, - numeric_grad_delta=0.001) + self.check_grad( + ["X"], "Y", max_relative_error=0.05, numeric_grad_delta=0.001 + ) class TestCrossEntropyOp6(TestCrossEntropyOp): - """Test high rank tensor cross-entropy with vectorized one-hot representation of labels. - """ + """Test high rank tensor cross-entropy with vectorized one-hot representation of labels.""" def init_x(self): self.shape = [4, 3, 2] self.ins_num = np.prod(np.array(self.shape)) - self.X_2d = randomize_probability(self.ins_num, - self.class_num).astype(self.dtype) + self.X_2d = randomize_probability(self.ins_num, self.class_num).astype( + self.dtype + ) self.x = self.X_2d.reshape(self.shape + [self.class_num]) def init_label(self): - self.label_index_2d = np.random.randint(0, - self.class_num, (self.ins_num), - dtype="int64") + self.label_index_2d = np.random.randint( + 0, self.class_num, (self.ins_num), dtype="int64" + ) label_2d = np.zeros(self.X_2d.shape) label_2d[np.arange(self.ins_num), self.label_index_2d] = 1 self.label = label_2d.reshape(self.shape + [self.class_num]).astype( - self.dtype) + self.dtype + ) def get_cross_entropy(self): cross_entropy_2d = np.asmatrix( - [[-np.log(self.X_2d[i][self.label_index_2d[i]])] - for i in range(self.X_2d.shape[0])]) - self.cross_entropy = np.array(cross_entropy_2d).reshape(self.shape + - [1]).astype( - self.dtype) + [ + [-np.log(self.X_2d[i][self.label_index_2d[i]])] + for i in range(self.X_2d.shape[0]) + ] + ) + self.cross_entropy = ( + np.array(cross_entropy_2d) + .reshape(self.shape + [1]) + .astype(self.dtype) + ) def init_attr_type(self): self.soft_label = True @@ -283,26 +305,28 @@ class TestCrossEntropyOp6(TestCrossEntropyOp): self.class_num = 17 def test_check_grad(self): - self.check_grad(["X"], - "Y", - max_relative_error=0.05, - numeric_grad_delta=0.001) + self.check_grad( + ["X"], "Y", max_relative_error=0.05, numeric_grad_delta=0.001 + ) class TestCrossEntropyOp7(TestCrossEntropyOp): - """Test cross-entropy with ignore index. - """ + """Test cross-entropy with ignore index.""" def init_label(self): - self.label = np.random.randint(0, - self.class_num, (self.batch_size, 1), - dtype="int64") + self.label = np.random.randint( + 0, self.class_num, (self.batch_size, 1), dtype="int64" + ) def get_cross_entropy(self): self.cross_entropy = np.asmatrix( - [[-np.log(self.x[i][self.label[i][0]])] - if self.label[i][0] != self.ignore_index else [0] - for i in range(self.x.shape[0])]).astype(self.dtype) + [ + [-np.log(self.x[i][self.label[i][0]])] + if self.label[i][0] != self.ignore_index + else [0] + for i in range(self.x.shape[0]) + ] + ).astype(self.dtype) def init_attr_type(self): self.soft_label = False @@ -317,30 +341,35 @@ class TestCrossEntropyOp7(TestCrossEntropyOp): class TestCrossEntropyOp7RemoveLastDim(TestCrossEntropyOp7): - """Test cross-entropy with ignore index with shape [batch_size]. - """ + """Test cross-entropy with ignore index with shape [batch_size].""" def init_label(self): - self.label = np.random.randint(0, - self.class_num, (self.batch_size), - dtype="int64") + self.label = np.random.randint( + 0, self.class_num, (self.batch_size), dtype="int64" + ) def get_cross_entropy(self): self.cross_entropy = np.asmatrix( - [[-np.log(self.x[i][self.label[i]])] - if self.label[i] != self.ignore_index else [0] - for i in range(self.x.shape[0])]).astype(self.dtype) - self.cross_entropy = np.array(self.cross_entropy).reshape( - [self.batch_size]).astype(self.dtype) + [ + [-np.log(self.x[i][self.label[i]])] + if self.label[i] != self.ignore_index + else [0] + for i in range(self.x.shape[0]) + ] + ).astype(self.dtype) + self.cross_entropy = ( + np.array(self.cross_entropy) + .reshape([self.batch_size]) + .astype(self.dtype) + ) # Add Fp16 test def create_test_class(parent, cls_name): - - @unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) class TestCrossEntropyFP16Op(parent): - def init_dtype_type(self): return np.float16 @@ -352,9 +381,9 @@ def create_test_class(parent, cls_name): def test_check_grad(self): place = core.CUDAPlace(0) if core.is_float16_supported(place): - self.check_grad_with_place(place, ['X'], - 'Y', - max_relative_error=0.9) + self.check_grad_with_place( + place, ['X'], 'Y', max_relative_error=0.9 + ) cls_name = "{0}".format(cls_name) TestCrossEntropyFP16Op.__name__ = cls_name @@ -362,29 +391,32 @@ def create_test_class(parent, cls_name): create_test_class(TestCrossEntropyOp, "TestCrossEntropyF16Op") -#create_test_class(TestCrossEntropyOp2, "TestCrossEntropyF16Op2") +# create_test_class(TestCrossEntropyOp2, "TestCrossEntropyF16Op2") create_test_class(TestCrossEntropyOp3, "TestCrossEntropyF16Op3") create_test_class(TestCrossEntropyOp4, "TestCrossEntropyF16Op4") -create_test_class(TestCrossEntropyOp4RemoveLastDim, - "TestCrossEntropyF16Op4RemoveLastDim") -#create_test_class(TestCrossEntropyOp5, "TestCrossEntropyF16Op5") +create_test_class( + TestCrossEntropyOp4RemoveLastDim, "TestCrossEntropyF16Op4RemoveLastDim" +) +# create_test_class(TestCrossEntropyOp5, "TestCrossEntropyF16Op5") create_test_class(TestCrossEntropyOp6, "TestCrossEntropyF16Op6") create_test_class(TestCrossEntropyOp7, "TestCrossEntropyF16Op7") -create_test_class(TestCrossEntropyOp7RemoveLastDim, - "TestCrossEntropyF16Op7RemoveLastDim") +create_test_class( + TestCrossEntropyOp7RemoveLastDim, "TestCrossEntropyF16Op7RemoveLastDim" +) class TestCrossEntropyOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): def test_Variable(): # the input of cross_entropy must be Variable. - x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.CPUPlace()) - lab1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + ) + lab1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + ) fluid.layers.cross_entropy(x1, lab1) self.assertRaises(TypeError, test_Variable) @@ -392,12 +424,12 @@ class TestCrossEntropyOpError(unittest.TestCase): def test_dtype(): # the input dtype of cross_entropy must be float16 or float32 or float64 # float16 only can be set on GPU place - x2 = fluid.layers.data(name='x2', - shape=[3, 4, 5, 6], - dtype="int32") - lab2 = fluid.layers.data(name='lab2', - shape=[3, 4, 5, 6], - dtype="int32") + x2 = fluid.layers.data( + name='x2', shape=[3, 4, 5, 6], dtype="int32" + ) + lab2 = fluid.layers.data( + name='lab2', shape=[3, 4, 5, 6], dtype="int32" + ) fluid.layers.cross_entropy(x2, lab2) self.assertRaises(TypeError, test_dtype) diff --git a/python/paddle/fluid/tests/unittests/test_cross_op.py b/python/paddle/fluid/tests/unittests/test_cross_op.py index 7ff724d2b5a53bffee45b4a40eefa82f97597460..73580d6fc8fcd7888ae7505833dd3a9c9ce7ce10 100644 --- a/python/paddle/fluid/tests/unittests/test_cross_op.py +++ b/python/paddle/fluid/tests/unittests/test_cross_op.py @@ -21,14 +21,13 @@ from paddle.fluid import Program, program_guard class TestCrossOp(OpTest): - def setUp(self): self.op_type = "cross" self.python_api = paddle.cross self.initTestCase() self.inputs = { 'X': np.random.random(self.shape).astype(self.dtype), - 'Y': np.random.random(self.shape).astype(self.dtype) + 'Y': np.random.random(self.shape).astype(self.dtype), } self.init_output() @@ -53,7 +52,6 @@ class TestCrossOp(OpTest): class TestCrossOpCase1(TestCrossOp): - def initTestCase(self): self.shape = (2048, 3) self.dtype = np.float32 @@ -66,12 +64,13 @@ class TestCrossOpCase1(TestCrossOp): class TestCrossAPI(unittest.TestCase): - def input_data(self): - self.data_x = np.array([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], - [3.0, 3.0, 3.0]]) - self.data_y = np.array([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], - [1.0, 1.0, 1.0]]) + self.data_x = np.array( + [[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]] + ) + self.data_y = np.array( + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]] + ) def test_cross_api(self): self.input_data() @@ -82,14 +81,14 @@ class TestCrossAPI(unittest.TestCase): y = fluid.layers.data(name='y', shape=[-1, 3]) z = paddle.cross(x, y, axis=1) exe = fluid.Executor(fluid.CPUPlace()) - res, = exe.run(feed={ - 'x': self.data_x, - 'y': self.data_y - }, - fetch_list=[z.name], - return_numpy=False) - expect_out = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], - [0.0, 0.0, 0.0]]) + (res,) = exe.run( + feed={'x': self.data_x, 'y': self.data_y}, + fetch_list=[z.name], + return_numpy=False, + ) + expect_out = np.array( + [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]] + ) np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) # case 2: @@ -98,14 +97,14 @@ class TestCrossAPI(unittest.TestCase): y = fluid.layers.data(name='y', shape=[-1, 3]) z = paddle.cross(x, y) exe = fluid.Executor(fluid.CPUPlace()) - res, = exe.run(feed={ - 'x': self.data_x, - 'y': self.data_y - }, - fetch_list=[z.name], - return_numpy=False) - expect_out = np.array([[-1.0, -1.0, -1.0], [2.0, 2.0, 2.0], - [-1.0, -1.0, -1.0]]) + (res,) = exe.run( + feed={'x': self.data_x, 'y': self.data_y}, + fetch_list=[z.name], + return_numpy=False, + ) + expect_out = np.array( + [[-1.0, -1.0, -1.0], [2.0, 2.0, 2.0], [-1.0, -1.0, -1.0]] + ) np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) # case 3: @@ -134,8 +133,9 @@ class TestCrossAPI(unittest.TestCase): y = fluid.dygraph.to_variable(self.data_y) z = paddle.cross(x, y, axis=1) np_z = z.numpy() - expect_out = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], - [0.0, 0.0, 0.0]]) + expect_out = np.array( + [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]] + ) np.testing.assert_allclose(expect_out, np_z, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_crypto.py b/python/paddle/fluid/tests/unittests/test_crypto.py index 120a4588773a62a6e2cfa889cb5bc2eea685190e..147615905fd8532d94d845607348cd443452b706 100644 --- a/python/paddle/fluid/tests/unittests/test_crypto.py +++ b/python/paddle/fluid/tests/unittests/test_crypto.py @@ -19,7 +19,6 @@ import unittest class CipherUtilsTestCase(unittest.TestCase): - def test_gen_key(self): key1 = CipherUtils.gen_key(256) key2 = CipherUtils.gen_key_to_file(256, "paddle_aes_test.keyfile") @@ -31,7 +30,6 @@ class CipherUtilsTestCase(unittest.TestCase): class CipherTestCase(unittest.TestCase): - def test_aes_cipher(self): plaintext = "hello world" key = CipherUtils.gen_key(256) diff --git a/python/paddle/fluid/tests/unittests/test_ctc_align.py b/python/paddle/fluid/tests/unittests/test_ctc_align.py index 38e4a0721c6efd2b20e4635a73f68f6398295143..b11dfe0254a252713b664c6bfabfd9718d9231fb 100644 --- a/python/paddle/fluid/tests/unittests/test_ctc_align.py +++ b/python/paddle/fluid/tests/unittests/test_ctc_align.py @@ -28,8 +28,9 @@ def CTCAlign(input, lod, blank, merge_repeated, padding=0, input_length=None): prev_token = -1 for j in range(cur_offset, cur_offset + lod0[i]): token = input[j][0] - if (token != blank) and not (merge_repeated - and token == prev_token): + if (token != blank) and not ( + merge_repeated and token == prev_token + ): result.append(token) prev_token = token cur_offset += lod0[i] @@ -44,37 +45,44 @@ def CTCAlign(input, lod, blank, merge_repeated, padding=0, input_length=None): prev_token = -1 for j in range(input_length[i][0]): token = input[i][j] - if (token != blank) and not (merge_repeated - and token == prev_token): + if (token != blank) and not ( + merge_repeated and token == prev_token + ): result[i].append(token) prev_token = token start = len(result[i]) output_length.append([start]) for j in range(start, len(input[i])): result[i].append(padding) - result = np.array(result).reshape([len(input), - len(input[0])]).astype("int32") - output_length = np.array(output_length).reshape([len(input), - 1]).astype("int32") + result = ( + np.array(result) + .reshape([len(input), len(input[0])]) + .astype("int32") + ) + output_length = ( + np.array(output_length).reshape([len(input), 1]).astype("int32") + ) return result, output_length class TestCTCAlignOp(OpTest): - def config(self): self.op_type = "ctc_align" self.input_lod = [[11, 7]] self.blank = 0 self.merge_repeated = False - self.input = np.array( - [0, 1, 2, 2, 0, 4, 0, 4, 5, 0, 6, 6, 0, 0, 7, 7, 7, - 0]).reshape([18, 1]).astype("int32") + self.input = ( + np.array([0, 1, 2, 2, 0, 4, 0, 4, 5, 0, 6, 6, 0, 0, 7, 7, 7, 0]) + .reshape([18, 1]) + .astype("int32") + ) def setUp(self): self.config() - output = CTCAlign(self.input, self.input_lod, self.blank, - self.merge_repeated) + output = CTCAlign( + self.input, self.input_lod, self.blank, self.merge_repeated + ) self.inputs = { "Input": (self.input, self.input_lod), @@ -82,7 +90,7 @@ class TestCTCAlignOp(OpTest): self.outputs = {"Output": output} self.attrs = { "blank": self.blank, - "merge_repeated": self.merge_repeated + "merge_repeated": self.merge_repeated, } def test_check_output(self): @@ -90,19 +98,19 @@ class TestCTCAlignOp(OpTest): class TestCTCAlignOpCase1(TestCTCAlignOp): - def config(self): self.op_type = "ctc_align" self.input_lod = [[11, 8]] self.blank = 0 self.merge_repeated = True - self.input = np.array( - [0, 1, 2, 2, 0, 4, 0, 4, 5, 0, 6, 6, 0, 0, 7, 7, 7, 0, - 0]).reshape([19, 1]).astype("int32") + self.input = ( + np.array([0, 1, 2, 2, 0, 4, 0, 4, 5, 0, 6, 6, 0, 0, 7, 7, 7, 0, 0]) + .reshape([19, 1]) + .astype("int32") + ) class TestCTCAlignOpCase2(TestCTCAlignOp): - def config(self): self.op_type = "ctc_align" self.input_lod = [[4]] @@ -112,32 +120,43 @@ class TestCTCAlignOpCase2(TestCTCAlignOp): class TestCTCAlignPaddingOp(OpTest): - def config(self): self.op_type = "ctc_align" self.input_lod = [] self.blank = 0 self.padding_value = 0 self.merge_repeated = True - self.input = np.array([[0, 2, 4, 4, 0, 6, 3, 6, 6, 0, 0], - [1, 1, 3, 0, 0, 4, 5, 6, 0, 0, - 0]]).reshape([2, 11]).astype("int32") + self.input = ( + np.array( + [ + [0, 2, 4, 4, 0, 6, 3, 6, 6, 0, 0], + [1, 1, 3, 0, 0, 4, 5, 6, 0, 0, 0], + ] + ) + .reshape([2, 11]) + .astype("int32") + ) self.input_length = np.array([[9], [8]]).reshape([2, 1]).astype("int32") def setUp(self): self.config() - output, output_length = CTCAlign(self.input, self.input_lod, self.blank, - self.merge_repeated, - self.padding_value, self.input_length) + output, output_length = CTCAlign( + self.input, + self.input_lod, + self.blank, + self.merge_repeated, + self.padding_value, + self.input_length, + ) self.inputs = { "Input": (self.input, self.input_lod), - "InputLength": self.input_length + "InputLength": self.input_length, } self.outputs = {"Output": output, "OutputLength": output_length} self.attrs = { "blank": self.blank, "merge_repeated": self.merge_repeated, - "padding_value": self.padding_value + "padding_value": self.padding_value, } def test_check_output(self): @@ -145,18 +164,22 @@ class TestCTCAlignPaddingOp(OpTest): class TestCTCAlignOpCase3(TestCTCAlignPaddingOp): - def config(self): self.op_type = "ctc_align" self.blank = 0 self.input_lod = [] self.merge_repeated = True self.padding_value = 0 - self.input = np.array([[0, 1, 2, 2, 0, 4], [0, 4, 5, 0, 6, 0], - [0, 7, 7, 7, 0, 0]]).reshape([3, - 6]).astype("int32") - self.input_length = np.array([[6], [5], - [4]]).reshape([3, 1]).astype("int32") + self.input = ( + np.array( + [[0, 1, 2, 2, 0, 4], [0, 4, 5, 0, 6, 0], [0, 7, 7, 7, 0, 0]] + ) + .reshape([3, 6]) + .astype("int32") + ) + self.input_length = ( + np.array([[6], [5], [4]]).reshape([3, 1]).astype("int32") + ) class TestCTCAlignOpCase4(TestCTCAlignPaddingOp): @@ -170,30 +193,38 @@ class TestCTCAlignOpCase4(TestCTCAlignPaddingOp): self.input_lod = [] self.merge_repeated = False self.padding_value = 0 - self.input = np.array([[0, 1, 2, 2, 0, 4], [0, 4, 5, 0, 6, 0], - [0, 7, 7, 7, 0, 0]]).reshape([3, - 6]).astype("int32") - self.input_length = np.array([[6], [5], - [4]]).reshape([3, 1]).astype("int32") + self.input = ( + np.array( + [[0, 1, 2, 2, 0, 4], [0, 4, 5, 0, 6, 0], [0, 7, 7, 7, 0, 0]] + ) + .reshape([3, 6]) + .astype("int32") + ) + self.input_length = ( + np.array([[6], [5], [4]]).reshape([3, 1]).astype("int32") + ) class TestCTCAlignOpCase5(TestCTCAlignPaddingOp): - def config(self): self.op_type = "ctc_align" self.blank = 0 self.input_lod = [] self.merge_repeated = False self.padding_value = 1 - self.input = np.array([[0, 1, 2, 2, 0, 4], [0, 4, 5, 0, 6, 0], - [0, 7, 1, 7, 0, 0]]).reshape([3, - 6]).astype("int32") - self.input_length = np.array([[6], [5], - [4]]).reshape([3, 1]).astype("int32") + self.input = ( + np.array( + [[0, 1, 2, 2, 0, 4], [0, 4, 5, 0, 6, 0], [0, 7, 1, 7, 0, 0]] + ) + .reshape([3, 6]) + .astype("int32") + ) + self.input_length = ( + np.array([[6], [5], [4]]).reshape([3, 1]).astype("int32") + ) class TestCTCAlignOpApi(unittest.TestCase): - def test_api(self): x = fluid.layers.data('x', shape=[4], dtype='float32') y = fluid.layers.ctc_greedy_decoder(x, blank=0) @@ -201,11 +232,13 @@ class TestCTCAlignOpApi(unittest.TestCase): x_pad = fluid.layers.data('x_pad', shape=[4, 4], dtype='float32') x_pad_len = fluid.layers.data('x_pad_len', shape=[1], dtype='int64') y_pad, y_pad_len = fluid.layers.ctc_greedy_decoder( - x_pad, blank=0, input_length=x_pad_len) + x_pad, blank=0, input_length=x_pad_len + ) place = fluid.CPUPlace() x_tensor = fluid.create_lod_tensor( - np.random.rand(8, 4).astype("float32"), [[4, 4]], place) + np.random.rand(8, 4).astype("float32"), [[4, 4]], place + ) x_pad_tensor = np.random.rand(2, 4, 4).astype("float32") x_pad_len_tensor = np.array([[4], [4]]).reshape([2, 1]).astype("int64") @@ -213,17 +246,18 @@ class TestCTCAlignOpApi(unittest.TestCase): exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - ret = exe.run(feed={ - 'x': x_tensor, - 'x_pad': x_pad_tensor, - 'x_pad_len': x_pad_len_tensor - }, - fetch_list=[y, y_pad, y_pad_len], - return_numpy=False) + ret = exe.run( + feed={ + 'x': x_tensor, + 'x_pad': x_pad_tensor, + 'x_pad_len': x_pad_len_tensor, + }, + fetch_list=[y, y_pad, y_pad_len], + return_numpy=False, + ) class BadInputTestCTCAlignr(unittest.TestCase): - def test_error(self): with fluid.program_guard(fluid.Program()): diff --git a/python/paddle/fluid/tests/unittests/test_cuda_cudnn_version.py b/python/paddle/fluid/tests/unittests/test_cuda_cudnn_version.py index 36637971f9e48e0f40cab308da4666d5a0c8a185..d8229247a817f66eb19da35a6d5506059701af2e 100644 --- a/python/paddle/fluid/tests/unittests/test_cuda_cudnn_version.py +++ b/python/paddle/fluid/tests/unittests/test_cuda_cudnn_version.py @@ -17,7 +17,6 @@ import paddle class TestCPUVersion(unittest.TestCase): - def test_cuda_cudnn_version_in_cpu_package(self): if not paddle.is_compiled_with_cuda(): self.assertEqual(paddle.version.cuda(), 'False') diff --git a/python/paddle/fluid/tests/unittests/test_cuda_device_count.py b/python/paddle/fluid/tests/unittests/test_cuda_device_count.py index 482a3413caf464fa883cd07141ae3859137e3edf..7789bcd944dedcfb8043b0722a97a3e29ad004d8 100644 --- a/python/paddle/fluid/tests/unittests/test_cuda_device_count.py +++ b/python/paddle/fluid/tests/unittests/test_cuda_device_count.py @@ -17,7 +17,6 @@ import unittest class TestDeviceCount(unittest.TestCase): - def test_device_count(self): s = paddle.device.cuda.device_count() self.assertIsNotNone(s) diff --git a/python/paddle/fluid/tests/unittests/test_cuda_device_name_capability.py b/python/paddle/fluid/tests/unittests/test_cuda_device_name_capability.py index 0d749c5d17729941c5c3f3fdde1f683d7b95ae7a..07ef914f0a311cce60098afa16af956a3abe5bd8 100644 --- a/python/paddle/fluid/tests/unittests/test_cuda_device_name_capability.py +++ b/python/paddle/fluid/tests/unittests/test_cuda_device_name_capability.py @@ -17,7 +17,6 @@ import unittest class TestDeviceName(unittest.TestCase): - def test_device_name_default(self): if paddle.is_compiled_with_cuda(): name = paddle.device.cuda.get_device_name() @@ -35,7 +34,6 @@ class TestDeviceName(unittest.TestCase): class TestDeviceCapability(unittest.TestCase): - def test_device_capability_default(self): if paddle.is_compiled_with_cuda(): capability = paddle.device.cuda.get_device_capability() @@ -49,7 +47,8 @@ class TestDeviceCapability(unittest.TestCase): def test_device_capability_CUDAPlace(self): if paddle.is_compiled_with_cuda(): capability = paddle.device.cuda.get_device_capability( - paddle.CUDAPlace(0)) + paddle.CUDAPlace(0) + ) self.assertIsNotNone(capability) diff --git a/python/paddle/fluid/tests/unittests/test_cuda_empty_cache.py b/python/paddle/fluid/tests/unittests/test_cuda_empty_cache.py index 0ec066eb7cdcee36c5c02f245804c5abde8504b0..4aefb234bbfc144304c1687b4dd21387bb91a2a9 100644 --- a/python/paddle/fluid/tests/unittests/test_cuda_empty_cache.py +++ b/python/paddle/fluid/tests/unittests/test_cuda_empty_cache.py @@ -17,7 +17,6 @@ import unittest class TestEmptyCache(unittest.TestCase): - def test_empty_cache(self): x = paddle.randn((2, 10, 12)).astype('float32') del x diff --git a/python/paddle/fluid/tests/unittests/test_cuda_graph.py b/python/paddle/fluid/tests/unittests/test_cuda_graph.py index 035c71b1dad564faea56da151b2e079e1da2b226..ce446264f39d52554106713f0919360a9d90a56e 100644 --- a/python/paddle/fluid/tests/unittests/test_cuda_graph.py +++ b/python/paddle/fluid/tests/unittests/test_cuda_graph.py @@ -28,19 +28,21 @@ def can_use_cuda_graph(): class TestCUDAGraph(unittest.TestCase): - def setUp(self): if can_use_cuda_graph(): - paddle.set_flags({ - 'FLAGS_allocator_strategy': 'auto_growth', - 'FLAGS_sync_nccl_allreduce': False, - 'FLAGS_cudnn_deterministic': True, - 'FLAGS_use_stream_safe_cuda_allocator': False, - }) + paddle.set_flags( + { + 'FLAGS_allocator_strategy': 'auto_growth', + 'FLAGS_sync_nccl_allreduce': False, + 'FLAGS_cudnn_deterministic': True, + 'FLAGS_use_stream_safe_cuda_allocator': False, + } + ) def random_tensor(self, shape): return paddle.to_tensor( - np.random.randint(low=0, high=10, size=shape).astype("float32")) + np.random.randint(low=0, high=10, size=shape).astype("float32") + ) @switch_to_static_graph def test_cuda_graph_static_graph(self): @@ -48,10 +50,12 @@ class TestCUDAGraph(unittest.TestCase): return seed = 100 - loss_cuda_graph = self.cuda_graph_static_graph_main(seed, - use_cuda_graph=True) + loss_cuda_graph = self.cuda_graph_static_graph_main( + seed, use_cuda_graph=True + ) loss_no_cuda_graph = self.cuda_graph_static_graph_main( - seed, use_cuda_graph=False) + seed, use_cuda_graph=False + ) self.assertEqual(loss_cuda_graph, loss_no_cuda_graph) def cuda_graph_static_graph_main(self, seed, use_cuda_graph): @@ -65,18 +69,19 @@ class TestCUDAGraph(unittest.TestCase): startup = paddle.static.Program() main = paddle.static.Program() with paddle.static.program_guard(main, startup): - image = paddle.static.data(name="image", - shape=image_shape, - dtype='float32') - label = paddle.static.data(name="label", - shape=label_shape, - dtype='int64') + image = paddle.static.data( + name="image", shape=image_shape, dtype='float32' + ) + label = paddle.static.data( + name="label", shape=label_shape, dtype='int64' + ) image.persistable = True label.persistable = True loss = simple_fc_net_with_inputs(image, label, class_num) loss.persistable = True lr = paddle.optimizer.lr.PiecewiseDecay( - boundaries=[2, 3, 4], values=[0.01, 0.02, 0.03, 0.04]) + boundaries=[2, 3, 4], values=[0.01, 0.02, 0.03, 0.04] + ) optimizer = paddle.optimizer.SGD(learning_rate=lr) optimizer.minimize(loss) place = paddle.CUDAPlace(0) @@ -89,9 +94,10 @@ class TestCUDAGraph(unittest.TestCase): build_strategy.fix_op_run_order = True build_strategy.fuse_all_optimizer_ops = True compiled_program = paddle.static.CompiledProgram( - main).with_data_parallel(loss_name=loss.name, - build_strategy=build_strategy, - places=place) + main + ).with_data_parallel( + loss_name=loss.name, build_strategy=build_strategy, places=place + ) image_t = scope.var(image.name).get_tensor() label_t = scope.var(label.name).get_tensor() loss_t = scope.var(loss.name).get_tensor() @@ -101,12 +107,14 @@ class TestCUDAGraph(unittest.TestCase): cuda_graph = None for batch_id in range(20): image_t.set( - np.random.rand(*image_shape).astype('float32'), place) + np.random.rand(*image_shape).astype('float32'), place + ) label_t.set( - np.random.randint(low=0, - high=class_num, - size=label_shape, - dtype='int64'), place) + np.random.randint( + low=0, high=class_num, size=label_shape, dtype='int64' + ), + place, + ) if batch_id == 1 and use_cuda_graph: cuda_graph = CUDAGraph(place, mode="global") @@ -195,7 +203,6 @@ class TestCUDAGraph(unittest.TestCase): return class AutoIncDataset(paddle.io.Dataset): - def __init__(self, n, dtype): self.n = n self.dtype = dtype @@ -209,10 +216,9 @@ class TestCUDAGraph(unittest.TestCase): n = 100 dtype = 'int64' dataset = AutoIncDataset(n, dtype) - data_loader = paddle.io.DataLoader(dataset, - batch_size=1, - num_workers=2, - use_buffer_reader=True) + data_loader = paddle.io.DataLoader( + dataset, batch_size=1, num_workers=2, use_buffer_reader=True + ) x = None y = None diff --git a/python/paddle/fluid/tests/unittests/test_cuda_graph_partial_graph.py b/python/paddle/fluid/tests/unittests/test_cuda_graph_partial_graph.py index ecd3f406e088db556a87c74425e24ed3a4db3697..7ae6a9f8e1998958b5e04ef56565da24d7ae563a 100644 --- a/python/paddle/fluid/tests/unittests/test_cuda_graph_partial_graph.py +++ b/python/paddle/fluid/tests/unittests/test_cuda_graph_partial_graph.py @@ -20,7 +20,6 @@ from paddle.device.cuda.graphs import wrap_cuda_graph, is_cuda_graph_supported class SimpleModel(nn.Layer): - def __init__(self, in_size, out_size): super(SimpleModel, self).__init__() self.linear = nn.Linear(in_size, out_size) @@ -39,7 +38,6 @@ class SimpleModel(nn.Layer): class TestSimpleModel(unittest.TestCase): - def setUp(self): paddle.set_flags({'FLAGS_eager_delete_tensor_gb': 0.0}) diff --git a/python/paddle/fluid/tests/unittests/test_cuda_graph_partial_graph_static.py b/python/paddle/fluid/tests/unittests/test_cuda_graph_partial_graph_static.py index b45a862ffbf30970f5231a9a584420f47ed4a027..077caebc03f872ed42bccd0c81db37e9f04335f8 100644 --- a/python/paddle/fluid/tests/unittests/test_cuda_graph_partial_graph_static.py +++ b/python/paddle/fluid/tests/unittests/test_cuda_graph_partial_graph_static.py @@ -21,7 +21,6 @@ paddle.enable_static() class SimpleModel(nn.Layer): - def __init__(self, in_size, out_size): super(SimpleModel, self).__init__() self.linear = nn.Linear(in_size, out_size) @@ -40,7 +39,6 @@ class SimpleModel(nn.Layer): class TestCudaGraphAttrAll(unittest.TestCase): - def test_all_program(self): if not is_cuda_graph_supported(): return @@ -59,8 +57,10 @@ class TestCudaGraphAttrAll(unittest.TestCase): if op._cuda_graph_attr is None: # the loss and opt are not wrapped assert op.type in [ - 'sgd', 'reduce_mean', 'fill_constant', - 'reduce_mean_grad' + 'sgd', + 'reduce_mean', + 'fill_constant', + 'reduce_mean_grad', ] else: assert op._cuda_graph_attr == 'thread_local;0;0' diff --git a/python/paddle/fluid/tests/unittests/test_cuda_graph_partial_graph_static_run.py b/python/paddle/fluid/tests/unittests/test_cuda_graph_partial_graph_static_run.py index 445211d35a1d944db624c607682ac7050a2463c1..0d2b737816bca5c02f2092c8c641cf8afe392a0e 100644 --- a/python/paddle/fluid/tests/unittests/test_cuda_graph_partial_graph_static_run.py +++ b/python/paddle/fluid/tests/unittests/test_cuda_graph_partial_graph_static_run.py @@ -16,13 +16,16 @@ import paddle import paddle.nn as nn import unittest import numpy as np -from paddle.device.cuda.graphs import wrap_cuda_graph, is_cuda_graph_supported, cuda_graph_transform +from paddle.device.cuda.graphs import ( + wrap_cuda_graph, + is_cuda_graph_supported, + cuda_graph_transform, +) paddle.enable_static() class SimpleModel(nn.Layer): - def __init__(self, in_size, out_size): super(SimpleModel, self).__init__() self.linear = nn.Linear(in_size, out_size) @@ -41,7 +44,6 @@ class SimpleModel(nn.Layer): class TestCudaGraphAttrAll(unittest.TestCase): - def setUp(self): paddle.set_flags({'FLAGS_eager_delete_tensor_gb': 0.0}) diff --git a/python/paddle/fluid/tests/unittests/test_cuda_max_memory_allocated.py b/python/paddle/fluid/tests/unittests/test_cuda_max_memory_allocated.py index 5f047977ed354561e72333f4401c367efb5421fb..909b9a44b11b74d2a0465850934826c10052baa6 100644 --- a/python/paddle/fluid/tests/unittests/test_cuda_max_memory_allocated.py +++ b/python/paddle/fluid/tests/unittests/test_cuda_max_memory_allocated.py @@ -15,12 +15,15 @@ import paddle import unittest from paddle.fluid import core -from paddle.device.cuda import device_count, memory_allocated, max_memory_allocated +from paddle.device.cuda import ( + device_count, + memory_allocated, + max_memory_allocated, +) from paddle.fluid.framework import _test_eager_guard class TestMaxMemoryAllocated(unittest.TestCase): - def func_test_max_memory_allocated(self, device=None): if core.is_compiled_with_cuda(): alloc_time = 100 @@ -29,13 +32,15 @@ class TestMaxMemoryAllocated(unittest.TestCase): for i in range(alloc_time): shape = paddle.randint(max_alloc_size) tensor = paddle.zeros(shape) - peak_memory_allocated_size = max(peak_memory_allocated_size, - memory_allocated(device)) + peak_memory_allocated_size = max( + peak_memory_allocated_size, memory_allocated(device) + ) del shape del tensor - self.assertEqual(peak_memory_allocated_size, - max_memory_allocated(device)) + self.assertEqual( + peak_memory_allocated_size, max_memory_allocated(device) + ) def test_max_memory_allocated(self): with _test_eager_guard(): @@ -60,7 +65,11 @@ class TestMaxMemoryAllocated(unittest.TestCase): if core.is_compiled_with_cuda(): wrong_device = [ core.CPUPlace(), - device_count() + 1, -2, 0.5, "gpu1", "npu" + device_count() + 1, + -2, + 0.5, + "gpu1", + "npu", ] for device in wrong_device: with self.assertRaises(BaseException): diff --git a/python/paddle/fluid/tests/unittests/test_cuda_max_memory_reserved.py b/python/paddle/fluid/tests/unittests/test_cuda_max_memory_reserved.py index 936a084abb70420e8583ea0c724ed5968e65a047..e067d293074ab8425085c24f6b0c4679daa344eb 100644 --- a/python/paddle/fluid/tests/unittests/test_cuda_max_memory_reserved.py +++ b/python/paddle/fluid/tests/unittests/test_cuda_max_memory_reserved.py @@ -15,11 +15,14 @@ import paddle import unittest from paddle.fluid import core -from paddle.device.cuda import device_count, memory_reserved, max_memory_reserved +from paddle.device.cuda import ( + device_count, + memory_reserved, + max_memory_reserved, +) class TestMaxMemoryreserved(unittest.TestCase): - def test_max_memory_reserved(self, device=None): if core.is_compiled_with_cuda(): alloc_time = 100 @@ -28,13 +31,15 @@ class TestMaxMemoryreserved(unittest.TestCase): for i in range(alloc_time): shape = paddle.randint(max_alloc_size) tensor = paddle.zeros(shape) - peak_memory_reserved_size = max(peak_memory_reserved_size, - memory_reserved(device)) + peak_memory_reserved_size = max( + peak_memory_reserved_size, memory_reserved(device) + ) del shape del tensor - self.assertEqual(peak_memory_reserved_size, - max_memory_reserved(device)) + self.assertEqual( + peak_memory_reserved_size, max_memory_reserved(device) + ) def test_max_memory_reserved_for_all_places(self): if core.is_compiled_with_cuda(): @@ -49,7 +54,11 @@ class TestMaxMemoryreserved(unittest.TestCase): if core.is_compiled_with_cuda(): wrong_device = [ core.CPUPlace(), - device_count() + 1, -2, 0.5, "gpu1", "npu" + device_count() + 1, + -2, + 0.5, + "gpu1", + "npu", ] for device in wrong_device: with self.assertRaises(BaseException): diff --git a/python/paddle/fluid/tests/unittests/test_cuda_memory_allocated.py b/python/paddle/fluid/tests/unittests/test_cuda_memory_allocated.py index 4e82ab7326cb5a8d0030a8a049e1188fab760e10..5b21a9b07e88f0db1fc877307bfda150719a7f0f 100644 --- a/python/paddle/fluid/tests/unittests/test_cuda_memory_allocated.py +++ b/python/paddle/fluid/tests/unittests/test_cuda_memory_allocated.py @@ -19,7 +19,6 @@ from paddle.device.cuda import device_count, memory_allocated class TestMemoryAllocated(unittest.TestCase): - def test_memory_allocated(self, device=None): if core.is_compiled_with_cuda(): tensor = paddle.zeros(shape=[256]) @@ -40,7 +39,11 @@ class TestMemoryAllocated(unittest.TestCase): if core.is_compiled_with_cuda(): wrong_device = [ core.CPUPlace(), - device_count() + 1, -2, 0.5, "gpu1", "npu" + device_count() + 1, + -2, + 0.5, + "gpu1", + "npu", ] for device in wrong_device: with self.assertRaises(BaseException): diff --git a/python/paddle/fluid/tests/unittests/test_cuda_memory_reserved.py b/python/paddle/fluid/tests/unittests/test_cuda_memory_reserved.py index f5db61a1e65dec468dad4434ce5de2a32d49d096..6b2cdc45f1924200beaa680a9083a1e05563f553 100644 --- a/python/paddle/fluid/tests/unittests/test_cuda_memory_reserved.py +++ b/python/paddle/fluid/tests/unittests/test_cuda_memory_reserved.py @@ -20,7 +20,6 @@ from paddle.fluid.framework import _test_eager_guard class TestMemoryreserved(unittest.TestCase): - def func_test_memory_reserved(self, device=None): if core.is_compiled_with_cuda(): tensor = paddle.zeros(shape=[256]) @@ -51,7 +50,11 @@ class TestMemoryreserved(unittest.TestCase): if core.is_compiled_with_cuda(): wrong_device = [ core.CPUPlace(), - device_count() + 1, -2, 0.5, "gpu1", "npu" + device_count() + 1, + -2, + 0.5, + "gpu1", + "npu", ] for device in wrong_device: with self.assertRaises(BaseException): diff --git a/python/paddle/fluid/tests/unittests/test_cuda_random_seed.py b/python/paddle/fluid/tests/unittests/test_cuda_random_seed.py index f12d3a5a4d64d40a45ac3e5de2b7f25c67ab0e33..d9bef511c9c51686afaac60772fed0411dc77802 100644 --- a/python/paddle/fluid/tests/unittests/test_cuda_random_seed.py +++ b/python/paddle/fluid/tests/unittests/test_cuda_random_seed.py @@ -24,8 +24,9 @@ import shutil import tempfile -@unittest.skipIf(not core.is_compiled_with_cuda(), - "Only test cuda Random Generator") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "Only test cuda Random Generator" +) class TestGeneratorSeed(unittest.TestCase): """ Test cases for cpu generator seed. @@ -39,18 +40,15 @@ class TestGeneratorSeed(unittest.TestCase): gen.manual_seed(111111111) st = paddle.get_cuda_rng_state() - x = fluid.layers.uniform_random([2, 10], - dtype="float32", - min=0.0, - max=1.0) - x_again = fluid.layers.uniform_random([2, 10], - dtype="float32", - min=0.0, - max=1.0) - x_third = fluid.layers.uniform_random([2, 10], - dtype="float32", - min=0.0, - max=1.0) + x = fluid.layers.uniform_random( + [2, 10], dtype="float32", min=0.0, max=1.0 + ) + x_again = fluid.layers.uniform_random( + [2, 10], dtype="float32", min=0.0, max=1.0 + ) + x_third = fluid.layers.uniform_random( + [2, 10], dtype="float32", min=0.0, max=1.0 + ) print("x: {}".format(x.numpy())) print("x_again: {}".format(x_again.numpy())) x = x + x_again + x_third @@ -58,18 +56,15 @@ class TestGeneratorSeed(unittest.TestCase): paddle.set_cuda_rng_state(st) - x1 = fluid.layers.uniform_random([2, 10], - dtype="float32", - min=0.0, - max=1.0) - x1_again = fluid.layers.uniform_random([2, 10], - dtype="float32", - min=0.0, - max=1.0) - x1_third = fluid.layers.uniform_random([2, 10], - dtype="float32", - min=0.0, - max=1.0) + x1 = fluid.layers.uniform_random( + [2, 10], dtype="float32", min=0.0, max=1.0 + ) + x1_again = fluid.layers.uniform_random( + [2, 10], dtype="float32", min=0.0, max=1.0 + ) + x1_third = fluid.layers.uniform_random( + [2, 10], dtype="float32", min=0.0, max=1.0 + ) x1 = x1 + x1_again + x1_third y1 = fluid.layers.dropout(x1, 0.5) y_np = y.numpy() @@ -136,26 +131,30 @@ class TestGeneratorSeed(unittest.TestCase): result_1 = fluid.layers.fc( input=x, size=10, - param_attr=fluid.initializer.TruncatedNormal(loc=0.0, - scale=2.0)) + param_attr=fluid.initializer.TruncatedNormal( + loc=0.0, scale=2.0 + ), + ) result_2 = fluid.layers.fc( input=x, size=10, - param_attr=fluid.initializer.TruncatedNormal(loc=0.0, - scale=2.0)) + param_attr=fluid.initializer.TruncatedNormal( + loc=0.0, scale=2.0 + ), + ) exe = fluid.Executor(fluid.CPUPlace()) exe.run(startup_program) - out1 = exe.run(train_program, - feed={}, - fetch_list=[result_1, result_2]) + out1 = exe.run( + train_program, feed={}, fetch_list=[result_1, result_2] + ) paddle.seed(123123143) with fluid.program_guard(train_program, startup_program): exe.run(startup_program) - out2 = exe.run(train_program, - feed={}, - fetch_list=[result_1, result_2]) + out2 = exe.run( + train_program, feed={}, fetch_list=[result_1, result_2] + ) out1_res1 = np.array(out1[0]) out1_res2 = np.array(out1[1]) diff --git a/python/paddle/fluid/tests/unittests/test_cuda_stream_event.py b/python/paddle/fluid/tests/unittests/test_cuda_stream_event.py index 099f19bd03ef082bf944320dea7b1e71fc1cb9b0..600cd04d1970f81e02c01658e312ebfcbbdd239d 100644 --- a/python/paddle/fluid/tests/unittests/test_cuda_stream_event.py +++ b/python/paddle/fluid/tests/unittests/test_cuda_stream_event.py @@ -21,7 +21,6 @@ import numpy as np class TestCurrentStream(unittest.TestCase): - def test_current_stream(self): if paddle.is_compiled_with_cuda(): s = cuda.current_stream() @@ -39,7 +38,6 @@ class TestCurrentStream(unittest.TestCase): class TestSynchronize(unittest.TestCase): - def test_synchronize(self): if paddle.is_compiled_with_cuda(): self.assertIsNone(cuda.synchronize()) @@ -50,7 +48,6 @@ class TestSynchronize(unittest.TestCase): class TestCUDAStream(unittest.TestCase): - def test_cuda_stream(self): if paddle.is_compiled_with_cuda(): s = paddle.device.cuda.Stream() @@ -88,7 +85,6 @@ class TestCUDAStream(unittest.TestCase): class TestCUDAEvent(unittest.TestCase): - def test_cuda_event(self): if paddle.is_compiled_with_cuda(): e = paddle.device.cuda.Event(True, False, False) @@ -158,14 +154,15 @@ class TestStreamGuard(unittest.TestCase): def test_set_current_stream_raise_error(self): if paddle.is_compiled_with_cuda(): - self.assertRaises(TypeError, paddle.device.cuda._set_current_stream, - np.zeros(5)) - self.assertRaises(TypeError, paddle.device.cuda._set_current_stream, - None) + self.assertRaises( + TypeError, paddle.device.cuda._set_current_stream, np.zeros(5) + ) + self.assertRaises( + TypeError, paddle.device.cuda._set_current_stream, None + ) class TestRawStream(unittest.TestCase): - def test_cuda_stream(self): if paddle.is_compiled_with_cuda(): cuda_stream = paddle.device.cuda.current_stream().cuda_stream diff --git a/python/paddle/fluid/tests/unittests/test_cudnn_grucell.py b/python/paddle/fluid/tests/unittests/test_cudnn_grucell.py index d82da93ba04d93ed5afdc573321da415f39a3dfa..39c25c909fdc6dc986c5f2e94d23bdac6847a6c8 100644 --- a/python/paddle/fluid/tests/unittests/test_cudnn_grucell.py +++ b/python/paddle/fluid/tests/unittests/test_cudnn_grucell.py @@ -23,15 +23,16 @@ np.random.seed = 123 def sigmoid(x): - return 1. / (1. + np.exp(-x)) + return 1.0 / (1.0 + np.exp(-x)) def tanh(x): - return 2. * sigmoid(2. * x) - 1. + return 2.0 * sigmoid(2.0 * x) - 1.0 -def cudnn_step(step_input_np, pre_hidden_np, weight_ih, bias_ih, weight_hh, - bias_hh): +def cudnn_step( + step_input_np, pre_hidden_np, weight_ih, bias_ih, weight_hh, bias_hh +): igates = np.matmul(step_input_np, weight_ih.transpose(1, 0)) igates += bias_ih hgates = np.matmul(pre_hidden_np, weight_hh.transpose(1, 0)) @@ -55,8 +56,9 @@ def cudnn_step(step_input_np, pre_hidden_np, weight_ih, bias_ih, weight_hh, return new_hidden -def non_cudnn_step(step_in, pre_hidden, gate_w, gate_b, candidate_w, - candidate_b): +def non_cudnn_step( + step_in, pre_hidden, gate_w, gate_b, candidate_w, candidate_b +): concat_1 = np.concatenate([step_in, pre_hidden], 1) gate_input = np.matmul(concat_1, gate_w) @@ -77,7 +79,6 @@ def non_cudnn_step(step_in, pre_hidden, gate_w, gate_b, candidate_w, class TestCudnnGRU(unittest.TestCase): - def setUp(self): self.input_size = 100 self.hidden_size = 200 @@ -93,8 +94,9 @@ class TestCudnnGRU(unittest.TestCase): with fluid.dygraph.guard(place): param_attr = fluid.ParamAttr(name="param_attr") bias_attr = fluid.ParamAttr(name="bias_attr") - named_cudnn_gru = GRUCell(self.hidden_size, self.input_size, - param_attr, bias_attr) + named_cudnn_gru = GRUCell( + self.hidden_size, self.input_size, param_attr, bias_attr + ) cudnn_gru = GRUCell(self.hidden_size, self.input_size) param_list = cudnn_gru.state_dict() @@ -109,51 +111,55 @@ class TestCudnnGRU(unittest.TestCase): weight_ih = param_list[weight_ih_name].numpy() weight_ih = np.random.uniform( - -0.1, 0.1, size=weight_ih.shape).astype('float64') + -0.1, 0.1, size=weight_ih.shape + ).astype('float64') param_list[weight_ih_name].set_value(weight_ih) named_param_list[weight_ih_name].set_value(weight_ih) bias_ih = param_list[bias_ih_name].numpy() - bias_ih = np.random.uniform(-0.1, 0.1, - size=bias_ih.shape).astype('float64') + bias_ih = np.random.uniform(-0.1, 0.1, size=bias_ih.shape).astype( + 'float64' + ) param_list[bias_ih_name].set_value(bias_ih) named_param_list[bias_ih_name].set_value(bias_ih) weight_hh = param_list[weight_hh_name].numpy() weight_hh = np.random.uniform( - -0.1, 0.1, size=weight_hh.shape).astype('float64') + -0.1, 0.1, size=weight_hh.shape + ).astype('float64') param_list[weight_hh_name].set_value(weight_hh) named_param_list[weight_hh_name].set_value(weight_hh) bias_hh = param_list[bias_hh_name].numpy() - bias_hh = np.random.uniform(-0.1, 0.1, - size=bias_hh.shape).astype('float64') + bias_hh = np.random.uniform(-0.1, 0.1, size=bias_hh.shape).astype( + 'float64' + ) param_list[bias_hh_name].set_value(bias_hh) named_param_list[bias_hh_name].set_value(bias_hh) step_input_np = np.random.uniform( - -0.1, 0.1, (self.batch_size, self.input_size)).astype('float64') + -0.1, 0.1, (self.batch_size, self.input_size) + ).astype('float64') pre_hidden_np = np.random.uniform( - -0.1, 0.1, - (self.batch_size, self.hidden_size)).astype('float64') + -0.1, 0.1, (self.batch_size, self.hidden_size) + ).astype('float64') step_input_var = fluid.dygraph.to_variable(step_input_np) pre_hidden_var = fluid.dygraph.to_variable(pre_hidden_np) api_out = cudnn_gru(step_input_var, pre_hidden_var) named_api_out = named_cudnn_gru(step_input_var, pre_hidden_var) - np_out = cudnn_step(step_input_np, pre_hidden_np, weight_ih, bias_ih, - weight_hh, bias_hh) + np_out = cudnn_step( + step_input_np, pre_hidden_np, weight_ih, bias_ih, weight_hh, bias_hh + ) np.testing.assert_allclose(api_out.numpy(), np_out, rtol=1e-05, atol=0) - np.testing.assert_allclose(named_api_out.numpy(), - np_out, - rtol=1e-05, - atol=0) + np.testing.assert_allclose( + named_api_out.numpy(), np_out, rtol=1e-05, atol=0 + ) class TestNonCudnnGRU(unittest.TestCase): - def setUp(self): self.input_size = 100 self.hidden_size = 200 @@ -169,14 +175,16 @@ class TestNonCudnnGRU(unittest.TestCase): with fluid.dygraph.guard(place): param_attr = fluid.ParamAttr(name="param_attr") bias_attr = fluid.ParamAttr(name="bias_attr") - named_non_cudnn_gru = GRUCell(self.hidden_size, - self.input_size, - param_attr, - bias_attr, - use_cudnn_impl=False) - non_cudnn_gru = GRUCell(self.hidden_size, - self.input_size, - use_cudnn_impl=False) + named_non_cudnn_gru = GRUCell( + self.hidden_size, + self.input_size, + param_attr, + bias_attr, + use_cudnn_impl=False, + ) + non_cudnn_gru = GRUCell( + self.hidden_size, self.input_size, use_cudnn_impl=False + ) param_list = non_cudnn_gru.state_dict() named_param_list = named_non_cudnn_gru.state_dict() @@ -189,48 +197,58 @@ class TestNonCudnnGRU(unittest.TestCase): candidate_b_name = "_candidate_bias" gate_w = param_list[gate_w_name].numpy() - gate_w = np.random.uniform(-0.1, 0.1, - size=gate_w.shape).astype('float64') + gate_w = np.random.uniform(-0.1, 0.1, size=gate_w.shape).astype( + 'float64' + ) param_list[gate_w_name].set_value(gate_w) named_param_list[gate_w_name].set_value(gate_w) gate_b = param_list[gate_b_name].numpy() - gate_b = np.random.uniform(-0.1, 0.1, - size=gate_b.shape).astype('float64') + gate_b = np.random.uniform(-0.1, 0.1, size=gate_b.shape).astype( + 'float64' + ) param_list[gate_b_name].set_value(gate_b) named_param_list[gate_b_name].set_value(gate_b) candidate_w = param_list[candidate_w_name].numpy() candidate_w = np.random.uniform( - -0.1, 0.1, size=candidate_w.shape).astype('float64') + -0.1, 0.1, size=candidate_w.shape + ).astype('float64') param_list[candidate_w_name].set_value(candidate_w) named_param_list[candidate_w_name].set_value(candidate_w) candidate_b = param_list[candidate_b_name].numpy() candidate_b = np.random.uniform( - -0.1, 0.1, size=candidate_b.shape).astype('float64') + -0.1, 0.1, size=candidate_b.shape + ).astype('float64') param_list[candidate_b_name].set_value(candidate_b) named_param_list[candidate_b_name].set_value(candidate_b) step_input_np = np.random.uniform( - -0.1, 0.1, (self.batch_size, self.input_size)).astype('float64') + -0.1, 0.1, (self.batch_size, self.input_size) + ).astype('float64') pre_hidden_np = np.random.uniform( - -0.1, 0.1, - (self.batch_size, self.hidden_size)).astype('float64') + -0.1, 0.1, (self.batch_size, self.hidden_size) + ).astype('float64') step_input_var = fluid.dygraph.to_variable(step_input_np) pre_hidden_var = fluid.dygraph.to_variable(pre_hidden_np) api_out = non_cudnn_gru(step_input_var, pre_hidden_var) named_api_out = named_non_cudnn_gru(step_input_var, pre_hidden_var) - np_out = non_cudnn_step(step_input_np, pre_hidden_np, gate_w, gate_b, - candidate_w, candidate_b) + np_out = non_cudnn_step( + step_input_np, + pre_hidden_np, + gate_w, + gate_b, + candidate_w, + candidate_b, + ) np.testing.assert_allclose(api_out.numpy(), np_out, rtol=1e-05, atol=0) - np.testing.assert_allclose(named_api_out.numpy(), - np_out, - rtol=1e-05, - atol=0) + np.testing.assert_allclose( + named_api_out.numpy(), np_out, rtol=1e-05, atol=0 + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_cudnn_lstmcell.py b/python/paddle/fluid/tests/unittests/test_cudnn_lstmcell.py index d21ec3033ed687616ddf50d29bd9b8a20c7d43a5..4eacff27d1432498fa759b510debd05e5bd57eb1 100644 --- a/python/paddle/fluid/tests/unittests/test_cudnn_lstmcell.py +++ b/python/paddle/fluid/tests/unittests/test_cudnn_lstmcell.py @@ -23,19 +23,16 @@ np.random.seed = 123 def sigmoid(x): - return 1. / (1. + np.exp(-x)) + return 1.0 / (1.0 + np.exp(-x)) def tanh(x): - return 2. * sigmoid(2. * x) - 1. + return 2.0 * sigmoid(2.0 * x) - 1.0 -def non_cudnn_step(step_in, - pre_hidden, - pre_cell, - gate_w, - gate_b, - forget_bias=1.0): +def non_cudnn_step( + step_in, pre_hidden, pre_cell, gate_w, gate_b, forget_bias=1.0 +): concat_1 = np.concatenate([step_in, pre_hidden], 1) gate_input = np.matmul(concat_1, gate_w) @@ -48,8 +45,15 @@ def non_cudnn_step(step_in, return new_hidden, new_cell -def cudnn_step(step_input_np, pre_hidden_np, pre_cell_np, weight_ih, bias_ih, - weight_hh, bias_hh): +def cudnn_step( + step_input_np, + pre_hidden_np, + pre_cell_np, + weight_ih, + bias_ih, + weight_hh, + bias_hh, +): igates = np.matmul(step_input_np, weight_ih.transpose(1, 0)) igates = igates + bias_ih @@ -78,7 +82,6 @@ def cudnn_step(step_input_np, pre_hidden_np, pre_cell_np, weight_ih, bias_ih, class TestCudnnLSTM(unittest.TestCase): - def setUp(self): self.input_size = 100 self.hidden_size = 200 @@ -93,8 +96,9 @@ class TestCudnnLSTM(unittest.TestCase): with fluid.dygraph.guard(place): param_attr = fluid.ParamAttr(name="param_attr") bias_attr = fluid.ParamAttr(name="bias_attr") - named_cudnn_lstm = LSTMCell(self.hidden_size, self.input_size, - param_attr, bias_attr) + named_cudnn_lstm = LSTMCell( + self.hidden_size, self.input_size, param_attr, bias_attr + ) cudnn_lstm = LSTMCell(self.hidden_size, self.input_size) param_list = cudnn_lstm.state_dict() @@ -108,73 +112,79 @@ class TestCudnnLSTM(unittest.TestCase): bias_hh_name = "_bias_hh" weight_ih = param_list[weight_ih_name].numpy() weight_ih = np.random.uniform( - -0.1, 0.1, size=weight_ih.shape).astype('float64') + -0.1, 0.1, size=weight_ih.shape + ).astype('float64') param_list[weight_ih_name].set_value(weight_ih) named_param_list[weight_ih_name].set_value(weight_ih) bias_ih = param_list[bias_ih_name].numpy() - bias_ih = np.random.uniform(-0.1, 0.1, - size=bias_ih.shape).astype('float64') + bias_ih = np.random.uniform(-0.1, 0.1, size=bias_ih.shape).astype( + 'float64' + ) param_list[bias_ih_name].set_value(bias_ih) named_param_list[bias_ih_name].set_value(bias_ih) weight_hh = param_list[weight_hh_name].numpy() weight_hh = np.random.uniform( - -0.1, 0.1, size=weight_hh.shape).astype('float64') + -0.1, 0.1, size=weight_hh.shape + ).astype('float64') param_list[weight_hh_name].set_value(weight_hh) named_param_list[weight_hh_name].set_value(weight_hh) bias_hh = param_list[bias_hh_name].numpy() - bias_hh = np.random.uniform(-0.1, 0.1, - size=bias_hh.shape).astype('float64') + bias_hh = np.random.uniform(-0.1, 0.1, size=bias_hh.shape).astype( + 'float64' + ) param_list[bias_hh_name].set_value(bias_hh) named_param_list[bias_hh_name].set_value(bias_hh) step_input_np = np.random.uniform( - -0.1, 0.1, (self.batch_size, self.input_size)).astype('float64') + -0.1, 0.1, (self.batch_size, self.input_size) + ).astype('float64') pre_hidden_np = np.random.uniform( - -0.1, 0.1, - (self.batch_size, self.hidden_size)).astype('float64') + -0.1, 0.1, (self.batch_size, self.hidden_size) + ).astype('float64') pre_cell_np = np.random.uniform( - -0.1, 0.1, - (self.batch_size, self.hidden_size)).astype('float64') + -0.1, 0.1, (self.batch_size, self.hidden_size) + ).astype('float64') step_input_var = fluid.dygraph.to_variable(step_input_np) pre_hidden_var = fluid.dygraph.to_variable(pre_hidden_np) pre_cell_var = fluid.dygraph.to_variable(pre_cell_np) api_out = cudnn_lstm(step_input_var, pre_hidden_var, pre_cell_var) - named_api_out = named_cudnn_lstm(step_input_var, pre_hidden_var, - pre_cell_var) + named_api_out = named_cudnn_lstm( + step_input_var, pre_hidden_var, pre_cell_var + ) api_hidden_out = api_out[0] api_cell_out = api_out[1] named_api_hidden_out = named_api_out[0] named_api_cell_out = named_api_out[1] - np_hidden_out, np_cell_out = cudnn_step(step_input_np, - pre_hidden_np, pre_cell_np, - weight_ih, bias_ih, - weight_hh, bias_hh) - np.testing.assert_allclose(api_hidden_out.numpy(), - np_hidden_out, - rtol=1e-05, - atol=0) - np.testing.assert_allclose(api_cell_out.numpy(), - np_cell_out, - rtol=1e-05, - atol=0) - np.testing.assert_allclose(named_api_hidden_out.numpy(), - np_hidden_out, - rtol=1e-05, - atol=0) - np.testing.assert_allclose(named_api_cell_out.numpy(), - np_cell_out, - rtol=1e-05, - atol=0) + np_hidden_out, np_cell_out = cudnn_step( + step_input_np, + pre_hidden_np, + pre_cell_np, + weight_ih, + bias_ih, + weight_hh, + bias_hh, + ) + np.testing.assert_allclose( + api_hidden_out.numpy(), np_hidden_out, rtol=1e-05, atol=0 + ) + np.testing.assert_allclose( + api_cell_out.numpy(), np_cell_out, rtol=1e-05, atol=0 + ) + np.testing.assert_allclose( + named_api_hidden_out.numpy(), np_hidden_out, rtol=1e-05, atol=0 + ) + np.testing.assert_allclose( + named_api_cell_out.numpy(), np_cell_out, rtol=1e-05, atol=0 + ) class TestNonCudnnLSTM(unittest.TestCase): - def setUp(self): self.input_size = 100 self.hidden_size = 200 @@ -189,14 +199,16 @@ class TestNonCudnnLSTM(unittest.TestCase): with fluid.dygraph.guard(place): param_attr = fluid.ParamAttr(name="param_attr") bias_attr = fluid.ParamAttr(name="bias_attr") - named_cudnn_lstm = LSTMCell(self.hidden_size, - self.input_size, - param_attr, - bias_attr, - use_cudnn_impl=False) - cudnn_lstm = LSTMCell(self.hidden_size, - self.input_size, - use_cudnn_impl=False) + named_cudnn_lstm = LSTMCell( + self.hidden_size, + self.input_size, + param_attr, + bias_attr, + use_cudnn_impl=False, + ) + cudnn_lstm = LSTMCell( + self.hidden_size, self.input_size, use_cudnn_impl=False + ) param_list = cudnn_lstm.state_dict() named_param_list = named_cudnn_lstm.state_dict() @@ -207,59 +219,58 @@ class TestNonCudnnLSTM(unittest.TestCase): gate_b_name = "_bias" gate_w = param_list[gate_w_name].numpy() - gate_w = np.random.uniform(-0.1, 0.1, - size=gate_w.shape).astype('float64') + gate_w = np.random.uniform(-0.1, 0.1, size=gate_w.shape).astype( + 'float64' + ) param_list[gate_w_name].set_value(gate_w) named_param_list[gate_w_name].set_value(gate_w) gate_b = param_list[gate_b_name].numpy() - gate_b = np.random.uniform(-0.1, 0.1, - size=gate_b.shape).astype('float64') + gate_b = np.random.uniform(-0.1, 0.1, size=gate_b.shape).astype( + 'float64' + ) param_list[gate_b_name].set_value(gate_b) named_param_list[gate_b_name].set_value(gate_b) step_input_np = np.random.uniform( - -0.1, 0.1, (self.batch_size, self.input_size)).astype('float64') + -0.1, 0.1, (self.batch_size, self.input_size) + ).astype('float64') pre_hidden_np = np.random.uniform( - -0.1, 0.1, - (self.batch_size, self.hidden_size)).astype('float64') + -0.1, 0.1, (self.batch_size, self.hidden_size) + ).astype('float64') pre_cell_np = np.random.uniform( - -0.1, 0.1, - (self.batch_size, self.hidden_size)).astype('float64') + -0.1, 0.1, (self.batch_size, self.hidden_size) + ).astype('float64') step_input_var = fluid.dygraph.to_variable(step_input_np) pre_hidden_var = fluid.dygraph.to_variable(pre_hidden_np) pre_cell_var = fluid.dygraph.to_variable(pre_cell_np) api_out = cudnn_lstm(step_input_var, pre_hidden_var, pre_cell_var) - named_api_out = named_cudnn_lstm(step_input_var, pre_hidden_var, - pre_cell_var) + named_api_out = named_cudnn_lstm( + step_input_var, pre_hidden_var, pre_cell_var + ) api_hidden_out = api_out[0] api_cell_out = api_out[1] named_api_hidden_out = named_api_out[0] named_api_cell_out = named_api_out[1] - np_hidden_out, np_cell_out = non_cudnn_step(step_input_np, - pre_hidden_np, - pre_cell_np, gate_w, - gate_b) - - np.testing.assert_allclose(api_hidden_out.numpy(), - np_hidden_out, - rtol=1e-05, - atol=0) - np.testing.assert_allclose(api_cell_out.numpy(), - np_cell_out, - rtol=1e-05, - atol=0) - np.testing.assert_allclose(named_api_hidden_out.numpy(), - np_hidden_out, - rtol=1e-05, - atol=0) - np.testing.assert_allclose(named_api_cell_out.numpy(), - np_cell_out, - rtol=1e-05, - atol=0) + np_hidden_out, np_cell_out = non_cudnn_step( + step_input_np, pre_hidden_np, pre_cell_np, gate_w, gate_b + ) + + np.testing.assert_allclose( + api_hidden_out.numpy(), np_hidden_out, rtol=1e-05, atol=0 + ) + np.testing.assert_allclose( + api_cell_out.numpy(), np_cell_out, rtol=1e-05, atol=0 + ) + np.testing.assert_allclose( + named_api_hidden_out.numpy(), np_hidden_out, rtol=1e-05, atol=0 + ) + np.testing.assert_allclose( + named_api_cell_out.numpy(), np_cell_out, rtol=1e-05, atol=0 + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_cumprod_op.py b/python/paddle/fluid/tests/unittests/test_cumprod_op.py index c4c6724468f22c9bd2a8d2e6aaf350c5501d0c80..a6aea5e33fd3391f109097fd2cd63e7cd3b7f89a 100644 --- a/python/paddle/fluid/tests/unittests/test_cumprod_op.py +++ b/python/paddle/fluid/tests/unittests/test_cumprod_op.py @@ -47,8 +47,9 @@ def cumprod_grad(x, y, dy, dx, shape, dim): else: elem = dy[pos] * y[index - inner_dim] if pos > index: - for m in range(index + inner_dim, pos + inner_dim, - inner_dim): + for m in range( + index + inner_dim, pos + inner_dim, inner_dim + ): elem *= x[m] elif pos < index: elem = 0 @@ -57,7 +58,6 @@ def cumprod_grad(x, y, dy, dx, shape, dim): # test function. class TestCumprod(OpTest): - def init_params(self): self.shape = (2, 3, 4, 5) self.zero_nums = [0, 10, 20, 30, int(np.prod(self.shape))] @@ -98,8 +98,9 @@ class TestCumprod(OpTest): if self.dtype == np.complex128 or self.dtype == np.complex64: reshape_x = np.conj(reshape_x) out_data = np.conj(out_data) - cumprod_grad(reshape_x, out_data, self.grad_out, self.grad_x, - self.shape, dim) + cumprod_grad( + reshape_x, out_data, self.grad_out, self.grad_x, self.shape, dim + ) self.grad_x = self.grad_x.reshape(self.shape) self.grad_out = self.grad_out.reshape(self.shape) @@ -119,37 +120,35 @@ class TestCumprod(OpTest): if self.dtype == np.float64: self.check_grad(['X'], 'Out', check_eager=True) else: - self.check_grad(['X'], - 'Out', - user_defined_grads=[self.grad_x], - user_defined_grad_outputs=[self.grad_out], - check_eager=True) + self.check_grad( + ['X'], + 'Out', + user_defined_grads=[self.grad_x], + user_defined_grad_outputs=[self.grad_out], + check_eager=True, + ) # test float32 case. class TestCumprod_float32(TestCumprod): - def init_dtype(self): self.dtype = np.float32 # test complex64 case. class TestCumprod_complex64(TestCumprod): - def init_dtype(self): self.dtype = np.complex64 # test complex128 case. class TestCumprod_complex128(TestCumprod): - def init_dtype(self): self.dtype = np.complex128 # test api. class TestCumprodAPI(unittest.TestCase): - def init_dtype(self): self.dtype = 'float64' self.shape = [2, 3, 10, 10] @@ -182,7 +181,6 @@ class TestCumprodAPI(unittest.TestCase): # test dynamic graph api. def test_dygraph_api(self): - def run(place): paddle.disable_static(place) x = paddle.to_tensor(self.x) diff --git a/python/paddle/fluid/tests/unittests/test_cumsum_op.py b/python/paddle/fluid/tests/unittests/test_cumsum_op.py index e63252c2c0897d2347668b3112573de408115514..b28c2863d187256891c67883221360c17cc6b07f 100644 --- a/python/paddle/fluid/tests/unittests/test_cumsum_op.py +++ b/python/paddle/fluid/tests/unittests/test_cumsum_op.py @@ -27,7 +27,6 @@ import paddle.fluid.layers as layers class TestCumsumOp(unittest.TestCase): - def run_cases(self): data_np = np.arange(12).reshape(3, 4) data = paddle.to_tensor(data_np) @@ -68,11 +67,17 @@ class TestCumsumOp(unittest.TestCase): place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - out = exe.run(feed={'X': data_np}, - fetch_list=[ - y.name, y2.name, y3.name, y4.name, y5.name, - y6.name - ]) + out = exe.run( + feed={'X': data_np}, + fetch_list=[ + y.name, + y2.name, + y3.name, + y4.name, + y5.name, + y6.name, + ], + ) z = np.cumsum(data_np) np.testing.assert_allclose(z, out[0], rtol=1e-05) @@ -109,7 +114,6 @@ class TestCumsumOp(unittest.TestCase): class TestSumOp1(OpTest): - def setUp(self): self.op_type = "cumsum" self.attrs = {'axis': 2} @@ -124,14 +128,14 @@ class TestSumOp1(OpTest): class TestSumOp2(OpTest): - def setUp(self): self.op_type = "cumsum" self.attrs = {'axis': -1, 'reverse': True} self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.outputs = { - 'Out': np.flip(np.flip(self.inputs['X'], axis=2).cumsum(axis=2), - axis=2) + 'Out': np.flip( + np.flip(self.inputs['X'], axis=2).cumsum(axis=2), axis=2 + ) } def test_check_output(self): @@ -142,7 +146,6 @@ class TestSumOp2(OpTest): class TestSumOp3(OpTest): - def setUp(self): self.op_type = "cumsum" self.attrs = {'axis': 1} @@ -157,7 +160,6 @@ class TestSumOp3(OpTest): class TestSumOp4(OpTest): - def setUp(self): self.op_type = "cumsum" self.attrs = {'axis': 0} @@ -172,7 +174,6 @@ class TestSumOp4(OpTest): class TestSumOp5(OpTest): - def setUp(self): self.op_type = "cumsum" self.inputs = {'X': np.random.random((5, 20)).astype("float64")} @@ -186,7 +187,6 @@ class TestSumOp5(OpTest): class TestSumOp7(OpTest): - def setUp(self): self.op_type = "cumsum" self.inputs = {'X': np.random.random((100)).astype("float64")} @@ -200,7 +200,6 @@ class TestSumOp7(OpTest): class TestCumsumFP16(unittest.TestCase): - def check_main(self, x_np, dtype): paddle.disable_static() x = paddle.to_tensor(x_np.astype(dtype)) @@ -226,17 +225,19 @@ class TestCumsumFP16(unittest.TestCase): class TestSumOpExclusive1(OpTest): - def setUp(self): self.op_type = "cumsum" self.attrs = {'axis': 2, "exclusive": True} a = np.random.random((4, 5, 65)).astype("float64") self.inputs = {'X': a} self.outputs = { - 'Out': - np.concatenate((np.zeros( - (4, 5, 1), dtype=np.float64), a[:, :, :-1].cumsum(axis=2)), - axis=2) + 'Out': np.concatenate( + ( + np.zeros((4, 5, 1), dtype=np.float64), + a[:, :, :-1].cumsum(axis=2), + ), + axis=2, + ) } def test_check_output(self): @@ -244,17 +245,19 @@ class TestSumOpExclusive1(OpTest): class TestSumOpExclusive2(OpTest): - def setUp(self): self.op_type = "cumsum" self.attrs = {'axis': 2, "exclusive": True} a = np.random.random((1, 1, 888)).astype("float64") self.inputs = {'X': a} self.outputs = { - 'Out': - np.concatenate((np.zeros( - (1, 1, 1), dtype=np.float64), a[:, :, :-1].cumsum(axis=2)), - axis=2) + 'Out': np.concatenate( + ( + np.zeros((1, 1, 1), dtype=np.float64), + a[:, :, :-1].cumsum(axis=2), + ), + axis=2, + ) } def test_check_output(self): @@ -262,17 +265,19 @@ class TestSumOpExclusive2(OpTest): class TestSumOpExclusive3(OpTest): - def setUp(self): self.op_type = "cumsum" self.attrs = {'axis': 2, "exclusive": True} a = np.random.random((4, 5, 888)).astype("float32") self.inputs = {'X': a} self.outputs = { - 'Out': - np.concatenate((np.zeros( - (4, 5, 1), dtype=np.float64), a[:, :, :-1].cumsum(axis=2)), - axis=2) + 'Out': np.concatenate( + ( + np.zeros((4, 5, 1), dtype=np.float64), + a[:, :, :-1].cumsum(axis=2), + ), + axis=2, + ) } def test_check_output(self): @@ -280,17 +285,19 @@ class TestSumOpExclusive3(OpTest): class TestSumOpExclusive4(OpTest): - def setUp(self): self.op_type = "cumsum" self.attrs = {'axis': 2, "exclusive": True} a = np.random.random((1, 1, 3049)).astype("float64") self.inputs = {'X': a} self.outputs = { - 'Out': - np.concatenate((np.zeros( - (1, 1, 1), dtype=np.float64), a[:, :, :-1].cumsum(axis=2)), - axis=2) + 'Out': np.concatenate( + ( + np.zeros((1, 1, 1), dtype=np.float64), + a[:, :, :-1].cumsum(axis=2), + ), + axis=2, + ) } def test_check_output(self): @@ -298,17 +305,19 @@ class TestSumOpExclusive4(OpTest): class TestSumOpExclusive5(OpTest): - def setUp(self): self.op_type = "cumsum" self.attrs = {'axis': 2, "exclusive": True} a = np.random.random((4, 5, 3096)).astype("float64") self.inputs = {'X': a} self.outputs = { - 'Out': - np.concatenate((np.zeros( - (4, 5, 1), dtype=np.float64), a[:, :, :-1].cumsum(axis=2)), - axis=2) + 'Out': np.concatenate( + ( + np.zeros((4, 5, 1), dtype=np.float64), + a[:, :, :-1].cumsum(axis=2), + ), + axis=2, + ) } def test_check_output(self): @@ -316,17 +325,19 @@ class TestSumOpExclusive5(OpTest): class TestSumOpExclusiveFP16(OpTest): - def setUp(self): self.op_type = "cumsum" self.attrs = {'axis': 2, "exclusive": True, "dtype": "float16"} a = np.random.random((4, 5, 3096)).astype("float64") self.inputs = {'X': a} self.outputs = { - 'Out': - np.concatenate((np.zeros( - (4, 5, 1), dtype=np.float64), a[:, :, :-1].cumsum(axis=2)), - axis=2) + 'Out': np.concatenate( + ( + np.zeros((4, 5, 1), dtype=np.float64), + a[:, :, :-1].cumsum(axis=2), + ), + axis=2, + ) } def test_check_output(self): @@ -334,7 +345,6 @@ class TestSumOpExclusiveFP16(OpTest): class TestSumOpReverseExclusive(OpTest): - def setUp(self): self.op_type = "cumsum" self.attrs = {'axis': 2, 'reverse': True, "exclusive": True} @@ -342,11 +352,13 @@ class TestSumOpReverseExclusive(OpTest): self.inputs = {'X': a} a = np.flip(a, axis=2) self.outputs = { - 'Out': - np.concatenate( - (np.flip(a[:, :, :-1].cumsum(axis=2), - axis=2), np.zeros((4, 5, 1), dtype=np.float64)), - axis=2) + 'Out': np.concatenate( + ( + np.flip(a[:, :, :-1].cumsum(axis=2), axis=2), + np.zeros((4, 5, 1), dtype=np.float64), + ), + axis=2, + ) } def test_check_output(self): @@ -354,7 +366,6 @@ class TestSumOpReverseExclusive(OpTest): class BadInputTest(unittest.TestCase): - def test_error(self): with fluid.program_guard(fluid.Program()): @@ -366,21 +377,24 @@ class BadInputTest(unittest.TestCase): class TestTensorAxis(unittest.TestCase): - def setUp(self): paddle.seed(2022) self.temp_dir = tempfile.TemporaryDirectory() self.save_path = os.path.join(self.temp_dir.name, 'tensor_axis_cumsum') - self.place = paddle.CUDAPlace( - 0) if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + self.place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() + else paddle.CPUPlace() + ) def test_dygraph(self): paddle.disable_static() x = np.random.randn(5, 6) axis = 1 np_out = np.cumsum(x, axis) - pd_out = paddle.cumsum(paddle.to_tensor(x), - axis=paddle.to_tensor([axis], dtype='int32')) + pd_out = paddle.cumsum( + paddle.to_tensor(x), axis=paddle.to_tensor([axis], dtype='int32') + ) np.testing.assert_allclose(np_out, pd_out.numpy()) def test_static_and_infer(self): @@ -398,7 +412,7 @@ class TestTensorAxis(unittest.TestCase): axis = paddle.full([1], 2, dtype='int64') out = paddle.cumsum(relu_out, axis=axis) loss = paddle.mean(out) - sgd = paddle.optimizer.SGD(learning_rate=0.) + sgd = paddle.optimizer.SGD(learning_rate=0.0) sgd.minimize(paddle.mean(out)) exe = paddle.static.Executor(self.place) @@ -407,8 +421,9 @@ class TestTensorAxis(unittest.TestCase): # run infer paddle.static.save_inference_model(self.save_path, [x], [out], exe) - config = paddle_infer.Config(self.save_path + '.pdmodel', - self.save_path + '.pdiparams') + config = paddle_infer.Config( + self.save_path + '.pdmodel', self.save_path + '.pdiparams' + ) if paddle.is_compiled_with_cuda(): config.enable_use_gpu(100, 0) else: @@ -428,7 +443,6 @@ class TestTensorAxis(unittest.TestCase): class TestCumsumDoubleGradCheck(unittest.TestCase): - def cumsum_wrapper(self, x): return paddle.cumsum(x[0], 0) @@ -443,17 +457,13 @@ class TestCumsumDoubleGradCheck(unittest.TestCase): out = paddle.cumsum(data, 0) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) - gradient_checker.double_grad_check([data], - out, - x_init=[data_arr], - place=place, - eps=eps) + gradient_checker.double_grad_check( + [data], out, x_init=[data_arr], place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.double_grad_check_for_dygraph(self.cumsum_wrapper, - [data], - out, - x_init=[data_arr], - place=place) + gradient_checker.double_grad_check_for_dygraph( + self.cumsum_wrapper, [data], out, x_init=[data_arr], place=place + ) def test_grad(self): paddle.enable_static() @@ -465,7 +475,6 @@ class TestCumsumDoubleGradCheck(unittest.TestCase): class TestCumsumTripleGradCheck(unittest.TestCase): - def cumsum_wrapper(self, x): return paddle.cumsum(x[0], 0) @@ -480,17 +489,13 @@ class TestCumsumTripleGradCheck(unittest.TestCase): out = paddle.cumsum(data, 0) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) - gradient_checker.triple_grad_check([data], - out, - x_init=[data_arr], - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [data], out, x_init=[data_arr], place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.triple_grad_check_for_dygraph(self.cumsum_wrapper, - [data], - out, - x_init=[data_arr], - place=place) + gradient_checker.triple_grad_check_for_dygraph( + self.cumsum_wrapper, [data], out, x_init=[data_arr], place=place + ) def test_grad(self): paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_custom_grad_input.py b/python/paddle/fluid/tests/unittests/test_custom_grad_input.py index 562290c41cfb68510c1b992efcbab61b418c64d0..74f63f03f451f32f5e1351897fcb92aa6504678b 100644 --- a/python/paddle/fluid/tests/unittests/test_custom_grad_input.py +++ b/python/paddle/fluid/tests/unittests/test_custom_grad_input.py @@ -21,7 +21,6 @@ from paddle.fluid.framework import _test_eager_guard class TestTensorBackward(unittest.TestCase): - def setUp(self): self._dtypes = ["float32", "float64"] self._places = [paddle.CPUPlace()] @@ -45,9 +44,9 @@ class TestTensorBackward(unittest.TestCase): x_grad = np.matmul(grad, y.T) - np.testing.assert_allclose(x_grad, - x_tensor.grad.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + x_grad, x_tensor.grad.numpy(), rtol=1e-05 + ) def test_tensor_backward(self): with _test_eager_guard(): @@ -56,7 +55,6 @@ class TestTensorBackward(unittest.TestCase): class TestBackwardAPI(unittest.TestCase): - def setUp(self): self._dtypes = ["float32", "float64"] self._places = [paddle.CPUPlace()] @@ -77,14 +75,15 @@ class TestBackwardAPI(unittest.TestCase): z_tensor2 = paddle.matmul(x_tensor, y_tensor) grad_tensor = paddle.to_tensor(grad) - paddle.autograd.backward([z_tensor1, z_tensor2], - [grad_tensor, grad_tensor], True) + paddle.autograd.backward( + [z_tensor1, z_tensor2], [grad_tensor, grad_tensor], True + ) x_grad = np.matmul(grad, y.T) - np.testing.assert_allclose(x_grad * 2, - x_tensor.grad.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + x_grad * 2, x_tensor.grad.numpy(), rtol=1e-05 + ) def test_backward_api(self): with _test_eager_guard(): @@ -108,9 +107,9 @@ class TestBackwardAPI(unittest.TestCase): x_grad = np.matmul(grad, y.T) - np.testing.assert_allclose(x_grad, - x_tensor.grad.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + x_grad, x_tensor.grad.numpy(), rtol=1e-05 + ) def test_backward_single_tensor(self): with _test_eager_guard(): @@ -133,9 +132,9 @@ class TestBackwardAPI(unittest.TestCase): x_grad = np.matmul(grad, y.T) - np.testing.assert_allclose(x_grad, - x_tensor.grad.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + x_grad, x_tensor.grad.numpy(), rtol=1e-05 + ) def test_backward_none_grad_tensor(self): with _test_eager_guard(): @@ -144,15 +143,21 @@ class TestBackwardAPI(unittest.TestCase): def func_backward_accumulator_with_init_grad(self): for dtype in self._dtypes: - x = np.random.random([ - 10, - ]).astype(dtype) - y_grad = np.random.random([ - 10, - ]).astype(dtype) - z_grad = np.random.random([ - 10, - ]).astype(dtype) + x = np.random.random( + [ + 10, + ] + ).astype(dtype) + y_grad = np.random.random( + [ + 10, + ] + ).astype(dtype) + z_grad = np.random.random( + [ + 10, + ] + ).astype(dtype) self._places = [paddle.CPUPlace()] for place in self._places: with dg.guard(place): @@ -162,16 +167,17 @@ class TestBackwardAPI(unittest.TestCase): y_grad_tensor = paddle.to_tensor(y_grad) z_grad_tensor = paddle.to_tensor(z_grad) - paddle.autograd.backward([y_tensor, z_tensor], - [y_grad_tensor, z_grad_tensor]) + paddle.autograd.backward( + [y_tensor, z_tensor], [y_grad_tensor, z_grad_tensor] + ) y = x**2 z = x**3 x_grad = 2 * x * (y_grad + 3 * y * y * z_grad) - np.testing.assert_allclose(x_grad, - x_tensor.grad.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + x_grad, x_tensor.grad.numpy(), rtol=1e-05 + ) def test_backward_accumulator_with_init_grad(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_cvm_op.py b/python/paddle/fluid/tests/unittests/test_cvm_op.py index b4f8af82b3c3395edc732c0a07a30c397925cb44..a6197e7d7261104c07e1d7b1180cdcbea35fc473 100644 --- a/python/paddle/fluid/tests/unittests/test_cvm_op.py +++ b/python/paddle/fluid/tests/unittests/test_cvm_op.py @@ -52,7 +52,7 @@ def cvm_grad_compute(DY, CVM, item_width, use_cvm): class TestCVMOpWithLodTensor(OpTest): """ - Test cvm op with discrete one-hot labels. + Test cvm op with discrete one-hot labels. """ def setUp(self): @@ -64,11 +64,13 @@ class TestCVMOpWithLodTensor(OpTest): lod = [[1]] self.inputs = { - 'X': (np.random.uniform( - 0, 1, - [self.batch_size, self.item_width]).astype("float32"), lod), - 'CVM': - np.array([[0.6, 0.4]]).astype("float32"), + 'X': ( + np.random.uniform( + 0, 1, [self.batch_size, self.item_width] + ).astype("float32"), + lod, + ), + 'CVM': np.array([[0.6, 0.4]]).astype("float32"), } self.attrs = {'use_cvm': False} out = [] @@ -80,15 +82,16 @@ class TestCVMOpWithLodTensor(OpTest): self.check_output(check_dygraph=False) def test_check_grad(self): - user_grads = np.array( - [1.0 / (self.item_width - 2)] * self.item_width).reshape( - (self.batch_size, self.item_width)).astype("float32") + user_grads = ( + np.array([1.0 / (self.item_width - 2)] * self.item_width) + .reshape((self.batch_size, self.item_width)) + .astype("float32") + ) user_grads[:, :2] = self.inputs['CVM'].reshape(self.batch_size, 2) user_grads = [user_grads] - self.check_grad(['X'], - 'Y', - user_defined_grads=user_grads, - check_dygraph=False) + self.check_grad( + ['X'], 'Y', user_defined_grads=user_grads, check_dygraph=False + ) class TestCVMOpWithOutLodTensor1(OpTest): @@ -104,10 +107,14 @@ class TestCVMOpWithOutLodTensor1(OpTest): self.item_width = 11 input = np.random.uniform( - 0, 1, (self.batch_size, self.item_width)).astype('float32') + 0, 1, (self.batch_size, self.item_width) + ).astype('float32') output = cvm_compute(input, self.item_width, self.use_cvm) - cvm = np.array([[0.6, 0.4] * self.batch_size]).reshape( - (self.batch_size, 2)).astype("float32") + cvm = ( + np.array([[0.6, 0.4] * self.batch_size]) + .reshape((self.batch_size, 2)) + .astype("float32") + ) self.inputs = {'X': input, 'CVM': cvm} self.attrs = {'use_cvm': self.use_cvm} @@ -118,14 +125,16 @@ class TestCVMOpWithOutLodTensor1(OpTest): def test_check_grad(self): numel = self.batch_size * self.item_width - user_grads = np.array([1.0 / numel] * numel).reshape( - (self.batch_size, self.item_width)).astype("float32") + user_grads = ( + np.array([1.0 / numel] * numel) + .reshape((self.batch_size, self.item_width)) + .astype("float32") + ) user_grads[:, :2] = self.inputs['CVM'].reshape(self.batch_size, 2) user_grads = [user_grads] - self.check_grad(['X'], - 'Y', - user_defined_grads=user_grads, - check_dygraph=False) + self.check_grad( + ['X'], 'Y', user_defined_grads=user_grads, check_dygraph=False + ) class TestCVMOpWithOutLodTensor2(OpTest): @@ -141,10 +150,14 @@ class TestCVMOpWithOutLodTensor2(OpTest): self.item_width = 11 input = np.random.uniform( - 0, 1, (self.batch_size, self.item_width)).astype('float32') + 0, 1, (self.batch_size, self.item_width) + ).astype('float32') output = cvm_compute(input, self.item_width, self.use_cvm) - cvm = np.array([[0.6, 0.4] * self.batch_size]).reshape( - (self.batch_size, 2)).astype("float32") + cvm = ( + np.array([[0.6, 0.4] * self.batch_size]) + .reshape((self.batch_size, 2)) + .astype("float32") + ) self.inputs = {'X': input, 'CVM': cvm} self.attrs = {'use_cvm': self.use_cvm} @@ -155,15 +168,16 @@ class TestCVMOpWithOutLodTensor2(OpTest): def test_check_grad(self): numel = self.batch_size * self.item_width - user_grads = np.array( - [1.0 / (self.batch_size * (self.item_width - 2))] * numel).reshape( - (self.batch_size, self.item_width)).astype("float32") + user_grads = ( + np.array([1.0 / (self.batch_size * (self.item_width - 2))] * numel) + .reshape((self.batch_size, self.item_width)) + .astype("float32") + ) user_grads[:, :2] = self.inputs['CVM'].reshape(self.batch_size, 2) user_grads = [user_grads] - self.check_grad(['X'], - 'Y', - user_defined_grads=user_grads, - check_dygraph=False) + self.check_grad( + ['X'], 'Y', user_defined_grads=user_grads, check_dygraph=False + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_cyclic_cifar_dataset.py b/python/paddle/fluid/tests/unittests/test_cyclic_cifar_dataset.py index e014a25ab791bd20d46fd77293f6ccd1238b90d5..01a588c4058a4cc17bfc1d3e593086809ac893c9 100644 --- a/python/paddle/fluid/tests/unittests/test_cyclic_cifar_dataset.py +++ b/python/paddle/fluid/tests/unittests/test_cyclic_cifar_dataset.py @@ -17,7 +17,6 @@ import unittest class TestCifar10(unittest.TestCase): - def test_main(self): reader = paddle.dataset.cifar.train10(cycle=False) sample_num = 0 diff --git a/python/paddle/fluid/tests/unittests/test_data.py b/python/paddle/fluid/tests/unittests/test_data.py index 9beb89205f56bfd0c80cfc8c971a0fbfb9076f19..bcb956e25c38a330d7b32fadffd2c343dad312ba 100644 --- a/python/paddle/fluid/tests/unittests/test_data.py +++ b/python/paddle/fluid/tests/unittests/test_data.py @@ -22,7 +22,6 @@ import paddle.fluid.core as core class TestApiDataError(unittest.TestCase): - def test_fluid_data(self): with program_guard(Program(), Program()): @@ -55,7 +54,6 @@ class TestApiDataError(unittest.TestCase): class TestApiStaticDataError(unittest.TestCase): - def test_fluid_dtype(self): with program_guard(Program(), Program()): x1 = paddle.static.data(name="x1", shape=[2, 25]) @@ -100,19 +98,16 @@ class TestApiStaticDataError(unittest.TestCase): class TestApiErrorWithDynamicMode(unittest.TestCase): - def test_error(self): with program_guard(Program(), Program()): paddle.disable_static() self.assertRaises(AssertionError, fluid.data, 'a', [2, 25]) - self.assertRaises(AssertionError, - fluid.layers.data, - 'b', - shape=[2, 25]) - self.assertRaises(AssertionError, - paddle.static.data, - 'c', - shape=[2, 25]) + self.assertRaises( + AssertionError, fluid.layers.data, 'b', shape=[2, 25] + ) + self.assertRaises( + AssertionError, paddle.static.data, 'c', shape=[2, 25] + ) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_data_generator.py b/python/paddle/fluid/tests/unittests/test_data_generator.py index 6134a1dd86cf3d49907c05764edc407d6218877c..b804e6a96025eeba6658137c7ae066282aded7c9 100644 --- a/python/paddle/fluid/tests/unittests/test_data_generator.py +++ b/python/paddle/fluid/tests/unittests/test_data_generator.py @@ -15,9 +15,7 @@ import paddle.distributed.fleet as fleet class MyMultiSlotDataGenerator(fleet.MultiSlotDataGenerator): - def generate_sample(self, line): - def data_iter(): for i in range(40): if i == 1: @@ -28,9 +26,7 @@ class MyMultiSlotDataGenerator(fleet.MultiSlotDataGenerator): class MyMultiSlotStringDataGenerator(fleet.MultiSlotStringDataGenerator): - def generate_sample(self, line): - def data_iter(): for i in range(40): if i == 1: @@ -41,9 +37,7 @@ class MyMultiSlotStringDataGenerator(fleet.MultiSlotStringDataGenerator): class MyMultiSlotDataGenerator_error(fleet.MultiSlotDataGenerator): - def generate_sample(self, line): - def data_iter(): for i in range(40): if i == 1: @@ -54,9 +48,7 @@ class MyMultiSlotDataGenerator_error(fleet.MultiSlotDataGenerator): class MyMultiSlotDataGenerator_error_2(fleet.MultiSlotStringDataGenerator): - def generate_sample(self, line): - def data_iter(): for i in range(40): if i == 1: @@ -67,9 +59,7 @@ class MyMultiSlotDataGenerator_error_2(fleet.MultiSlotStringDataGenerator): class MyMultiSlotDataGenerator_error_3(fleet.MultiSlotDataGenerator): - def generate_sample(self, line): - def data_iter(): for i in range(40): if i == 1: @@ -80,9 +70,7 @@ class MyMultiSlotDataGenerator_error_3(fleet.MultiSlotDataGenerator): class MyMultiSlotDataGenerator_error_4(fleet.MultiSlotDataGenerator): - def generate_sample(self, line): - def data_iter(): for i in range(40): if i == 1: @@ -93,9 +81,7 @@ class MyMultiSlotDataGenerator_error_4(fleet.MultiSlotDataGenerator): class MyMultiSlotDataGenerator_error_5(fleet.MultiSlotDataGenerator): - def generate_sample(self, line): - def data_iter(): for i in range(40): if i == 1: @@ -106,9 +92,7 @@ class MyMultiSlotDataGenerator_error_5(fleet.MultiSlotDataGenerator): class MyMultiSlotStringDataGenerator_zip(fleet.MultiSlotStringDataGenerator): - def generate_sample(self, line): - def data_iter(): for i in range(40): if i == 1: @@ -121,9 +105,7 @@ class MyMultiSlotStringDataGenerator_zip(fleet.MultiSlotStringDataGenerator): class MyMultiSlotDataGenerator_zip(fleet.MultiSlotDataGenerator): - def generate_sample(self, line): - def data_iter(): for i in range(40): if i == 1: @@ -136,7 +118,6 @@ class MyMultiSlotDataGenerator_zip(fleet.MultiSlotDataGenerator): class TestMultiSlotDataGenerator(unittest.TestCase): - def test_MultiSlotDataGenerator_basic(self): my_ms_dg = MyMultiSlotDataGenerator() my_ms_dg.set_batch(1) @@ -144,7 +125,6 @@ class TestMultiSlotDataGenerator(unittest.TestCase): class TestMultiSlotStringDataGenerator(unittest.TestCase): - def test_MyMultiSlotStringDataGenerator_basic(self): my_ms_dg = MyMultiSlotStringDataGenerator() my_ms_dg.set_batch(1) @@ -152,7 +132,6 @@ class TestMultiSlotStringDataGenerator(unittest.TestCase): class TestMultiSlotDataGenerator_error(unittest.TestCase): - def test_MultiSlotDataGenerator_error(self): with self.assertRaises(ValueError): my_ms_dg = MyMultiSlotDataGenerator_error() @@ -161,7 +140,6 @@ class TestMultiSlotDataGenerator_error(unittest.TestCase): class TestMultiSlotDataGenerator_error_2(unittest.TestCase): - def test_MultiSlotDataGenerator_error(self): with self.assertRaises(ValueError): my_ms_dg = MyMultiSlotDataGenerator_error_2() @@ -170,7 +148,6 @@ class TestMultiSlotDataGenerator_error_2(unittest.TestCase): class TestMultiSlotDataGenerator_error_3(unittest.TestCase): - def test_MultiSlotDataGenerator_error(self): with self.assertRaises(ValueError): my_ms_dg = MyMultiSlotDataGenerator_error_3() @@ -179,7 +156,6 @@ class TestMultiSlotDataGenerator_error_3(unittest.TestCase): class TestMultiSlotDataGenerator_error_4(unittest.TestCase): - def test_MultiSlotDataGenerator_error(self): with self.assertRaises(ValueError): my_ms_dg = MyMultiSlotDataGenerator_error_4() @@ -188,7 +164,6 @@ class TestMultiSlotDataGenerator_error_4(unittest.TestCase): class TestMultiSlotDataGenerator_error_5(unittest.TestCase): - def test_MultiSlotDataGenerator_error(self): with self.assertRaises(ValueError): my_ms_dg = MyMultiSlotDataGenerator_error_5() @@ -197,7 +172,6 @@ class TestMultiSlotDataGenerator_error_5(unittest.TestCase): class TestMultiSlotStringDataGeneratorZip(unittest.TestCase): - def test_MultiSlotStringDataGenerator_zip(self): my_ms_dg = MyMultiSlotStringDataGenerator_zip() my_ms_dg.set_batch(1) @@ -205,7 +179,6 @@ class TestMultiSlotStringDataGeneratorZip(unittest.TestCase): class TestMultiSlotDataGeneratorZip(unittest.TestCase): - def test_MultiSlotDataGenerator_zip(self): my_ms_dg = MyMultiSlotDataGenerator_zip() my_ms_dg.set_batch(1) diff --git a/python/paddle/fluid/tests/unittests/test_data_norm_op.py b/python/paddle/fluid/tests/unittests/test_data_norm_op.py index cf56d57173481a48645d968158d3b350ef5f6f7e..1f32feb35276ea373bbfd34af203f007cb47f9cc 100644 --- a/python/paddle/fluid/tests/unittests/test_data_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_data_norm_op.py @@ -38,8 +38,9 @@ def _reference_testing(x, batch_size, batch_sum, batch_square_sum, slot_dim=-1): for j in range(0, x_shape[1], slot_dim): if x[i][j] <= -min_precision or x[i][j] >= min_precision: for k in range(0, slot_dim): - y[i][j + k] = (x[i][j + k] - - means_arr[j + k]) * scales_arr[j + k] + y[i][j + k] = ( + x[i][j + k] - means_arr[j + k] + ) * scales_arr[j + k] return y @@ -66,19 +67,19 @@ class TestDataNormOpInference(unittest.TestCase): self.use_mkldnn = False def __assert_close(self, tensor, np_array, msg, atol=1e-4): - np.testing.assert_allclose(np.array(tensor), - np_array, - rtol=1e-05, - atol=atol, - err_msg=msg) - - def check_with_place(self, - place, - data_layout, - dtype, - shape, - slot_dim=-1, - enable_scale_and_shift=False): + np.testing.assert_allclose( + np.array(tensor), np_array, rtol=1e-05, atol=atol, err_msg=msg + ) + + def check_with_place( + self, + place, + data_layout, + dtype, + shape, + slot_dim=-1, + enable_scale_and_shift=False, + ): """ do forward and check @@ -109,24 +110,31 @@ class TestDataNormOpInference(unittest.TestCase): batch_square_sum = np.ones(scale_shape).astype(np.float32) batch_square_sum *= 1e4 - y_out = _reference_testing(x_val, batch_size, batch_sum, - batch_square_sum, slot_dim).astype(dtype) + y_out = _reference_testing( + x_val, batch_size, batch_sum, batch_square_sum, slot_dim + ).astype(dtype) scope = core.Scope() # create input - x_tensor = create_or_get_tensor(scope, "x_val", - OpTest.np_dtype_to_fluid_dtype(x_val), - place) + x_tensor = create_or_get_tensor( + scope, "x_val", OpTest.np_dtype_to_fluid_dtype(x_val), place + ) batch_size_tensor = create_or_get_tensor( - scope, "batch_size", OpTest.np_dtype_to_fluid_dtype(batch_size), - place) + scope, + "batch_size", + OpTest.np_dtype_to_fluid_dtype(batch_size), + place, + ) batch_sum_tensor = create_or_get_tensor( - scope, "batch_sum", OpTest.np_dtype_to_fluid_dtype(batch_sum), - place) + scope, "batch_sum", OpTest.np_dtype_to_fluid_dtype(batch_sum), place + ) batch_square_sum_tensor = create_or_get_tensor( - scope, "batch_square_sum", - OpTest.np_dtype_to_fluid_dtype(batch_square_sum), place) + scope, + "batch_square_sum", + OpTest.np_dtype_to_fluid_dtype(batch_square_sum), + place, + ) # create output y_tensor = create_or_get_tensor(scope, "y_out", None, place) @@ -149,15 +157,17 @@ class TestDataNormOpInference(unittest.TestCase): epsilon=epsilon, use_mkldnn=self.use_mkldnn, slot_dim=slot_dim, - enable_scale_and_shift=False) + enable_scale_and_shift=False, + ) else: scale_w = np.ones(scale_shape).astype(np.float32) bias = np.zeros(scale_shape).astype(np.float32) scale_w_tensor = create_or_get_tensor( - scope, "scale_w", OpTest.np_dtype_to_fluid_dtype(scale_w), - place) + scope, "scale_w", OpTest.np_dtype_to_fluid_dtype(scale_w), place + ) bias_tensor = create_or_get_tensor( - scope, "bias", OpTest.np_dtype_to_fluid_dtype(bias), place) + scope, "bias", OpTest.np_dtype_to_fluid_dtype(bias), place + ) data_norm_op = Operator( "data_norm", # inputs @@ -175,17 +185,25 @@ class TestDataNormOpInference(unittest.TestCase): epsilon=epsilon, use_mkldnn=self.use_mkldnn, slot_dim=slot_dim, - enable_scale_and_shift=True) + enable_scale_and_shift=True, + ) data_norm_op.run(scope, place) # check inference result - self.__assert_close(y_tensor, - y_out, - "inference output are different at " + str(place) + - ", " + data_layout + ", " + str(np.dtype(dtype)) + - str(np.array(y_tensor)) + str(y_out), - atol=1e-3) + self.__assert_close( + y_tensor, + y_out, + "inference output are different at " + + str(place) + + ", " + + data_layout + + ", " + + str(np.dtype(dtype)) + + str(np.array(y_tensor)) + + str(y_out), + atol=1e-3, + ) def test_check_output(self): """ @@ -199,9 +217,11 @@ class TestDataNormOpInference(unittest.TestCase): self.check_with_place( place, data_format, - self.dtype, [2, 3], + self.dtype, + [2, 3], slot_dim=slot_dim, - enable_scale_and_shift=enable_scale_and_shift) + enable_scale_and_shift=enable_scale_and_shift, + ) class TestDataNormOp(OpTest): @@ -237,7 +257,7 @@ class TestDataNormOp(OpTest): "X": x_val, "BatchSize": batch_size, "BatchSum": batch_sum, - "BatchSquareSum": batch_square_sum + "BatchSquareSum": batch_square_sum, } self.outputs = {"Y": y, "Means": mean, "Scales": scale} self.attrs = {"epsilon": epsilon, "use_mkldnn": self.use_mkldnn} @@ -294,14 +314,14 @@ class TestDataNormOpWithEnableScaleAndShift(OpTest): "BatchSum": batch_sum, "BatchSquareSum": batch_square_sum, "scale_w": scale_w, - "bias": bias + "bias": bias, } self.outputs = {"Y": y, "Means": mean, "Scales": scale} self.attrs = { "epsilon": epsilon, "use_mkldnn": self.use_mkldnn, "slot_dim": slot_dim, - "enable_scale_and_shift": True + "enable_scale_and_shift": True, } def test_check_output(self): @@ -356,7 +376,7 @@ class TestDataNormOpWithoutEnableScaleAndShift(OpTest): "BatchSum": batch_sum, "BatchSquareSum": batch_square_sum, "scale_w": scale_w, - "bias": bias + "bias": bias, } self.outputs = {"Y": y, "Means": mean, "Scales": scale} self.attrs = {"epsilon": epsilon, "use_mkldnn": self.use_mkldnn} @@ -413,14 +433,14 @@ class TestDataNormOpWithEnableScaleAndShift_1(OpTest): "BatchSum": batch_sum, "BatchSquareSum": batch_square_sum, "scale_w": scale_w, - "bias": bias + "bias": bias, } self.outputs = {"Y": y, "Means": mean, "Scales": scale} self.attrs = { "epsilon": epsilon, "use_mkldnn": self.use_mkldnn, "slot_dim": slot_dim, - "enable_scale_and_shift": True + "enable_scale_and_shift": True, } def test_check_output(self): @@ -470,13 +490,13 @@ class TestDataNormOpWithSlotDim(OpTest): "X": x_val, "BatchSize": batch_size, "BatchSum": batch_sum, - "BatchSquareSum": batch_square_sum + "BatchSquareSum": batch_square_sum, } self.outputs = {"Y": y, "Means": mean, "Scales": scale} self.attrs = { "epsilon": epsilon, "use_mkldnn": self.use_mkldnn, - "slot_dim": slot_dim + "slot_dim": slot_dim, } def test_check_output(self): @@ -493,14 +513,13 @@ class TestDataNormOpWithSlotDim(OpTest): class TestDataNormOpErrorr(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): x2 = fluid.layers.data(name='x2', shape=[3, 4], dtype="int32") - #self.assertRaises(TypeError, fluid.data_norm, x2) - fluid.layers.data_norm(input=x2, - param_attr={}, - enable_scale_and_shift=True) + # self.assertRaises(TypeError, fluid.data_norm, x2) + fluid.layers.data_norm( + input=x2, param_attr={}, enable_scale_and_shift=True + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_dataloader_autotune.py b/python/paddle/fluid/tests/unittests/test_dataloader_autotune.py index 29169ca6d765408a50c24d9ecaba4f5c450fcce0..00630a398223f1337b1f39e9f3bbc80195c45f46 100755 --- a/python/paddle/fluid/tests/unittests/test_dataloader_autotune.py +++ b/python/paddle/fluid/tests/unittests/test_dataloader_autotune.py @@ -25,13 +25,12 @@ import os class RandomDataset(Dataset): - def __init__(self, num_samples): self.num_samples = num_samples def __getitem__(self, idx): image = np.random.random([10]).astype('float32') - label = np.random.randint(0, 10 - 1, (1, )).astype('int64') + label = np.random.randint(0, 10 - 1, (1,)).astype('int64') return image, label def __len__(self): @@ -39,7 +38,6 @@ class RandomDataset(Dataset): class SimpleNet(nn.Layer): - def __init__(self): super(SimpleNet, self).__init__() self.fc = nn.Linear(10, 10) @@ -49,20 +47,22 @@ class SimpleNet(nn.Layer): class TestAutoTune(unittest.TestCase): - def setUp(self): self.batch_size = 1 self.dataset = RandomDataset(10) def test_dataloader_use_autotune(self): paddle.incubate.autotune.set_config( - config={"dataloader": { - "enable": True, - "tuning_steps": 1, - }}) - loader = DataLoader(self.dataset, - batch_size=self.batch_size, - num_workers=0) + config={ + "dataloader": { + "enable": True, + "tuning_steps": 1, + } + } + ) + loader = DataLoader( + self.dataset, batch_size=self.batch_size, num_workers=0 + ) def test_dataloader_disable_autotune(self): config = {"dataloader": {"enable": False, "tuning_steps": 1}} @@ -71,29 +71,32 @@ class TestAutoTune(unittest.TestCase): tfile.close() paddle.incubate.autotune.set_config(tfile.name) os.remove(tfile.name) - loader = DataLoader(self.dataset, - batch_size=self.batch_size, - num_workers=2) - if (sys.platform == 'darwin' or sys.platform == 'win32'): + loader = DataLoader( + self.dataset, batch_size=self.batch_size, num_workers=2 + ) + if sys.platform == 'darwin' or sys.platform == 'win32': self.assertEqual(loader.num_workers, 0) else: self.assertEqual(loader.num_workers, 2) def test_distributer_batch_sampler_autotune(self): paddle.incubate.autotune.set_config( - config={"dataloader": { - "enable": True, - "tuning_steps": 1, - }}) + config={ + "dataloader": { + "enable": True, + "tuning_steps": 1, + } + } + ) batch_sampler = paddle.io.DistributedBatchSampler( - self.dataset, batch_size=self.batch_size) - loader = DataLoader(self.dataset, - batch_sampler=batch_sampler, - num_workers=2) + self.dataset, batch_size=self.batch_size + ) + loader = DataLoader( + self.dataset, batch_sampler=batch_sampler, num_workers=2 + ) class TestAutoTuneAPI(unittest.TestCase): - def test_set_config_warnings(self): with warnings.catch_warnings(record=True) as w: config = {"kernel": {"enable": 1, "tuning_range": True}} diff --git a/python/paddle/fluid/tests/unittests/test_dataloader_dataset.py b/python/paddle/fluid/tests/unittests/test_dataloader_dataset.py index 6b3ab060c3dbd82b6215464bbf92ba54293c30c4..2c4bcfa444b0fc451c5f6297d92bade56566d199 100644 --- a/python/paddle/fluid/tests/unittests/test_dataloader_dataset.py +++ b/python/paddle/fluid/tests/unittests/test_dataloader_dataset.py @@ -22,7 +22,6 @@ from paddle.fluid.framework import _test_eager_guard class TestDatasetAbstract(unittest.TestCase): - def func_test_main(self): dataset = Dataset() try: @@ -44,20 +43,21 @@ class TestDatasetAbstract(unittest.TestCase): class TestDatasetWithDiffOutputPlace(unittest.TestCase): - def get_dataloader(self, num_workers): dataset = paddle.vision.datasets.MNIST( mode='test', - transform=transforms.Compose([ - transforms.CenterCrop(20), - transforms.RandomResizedCrop(14), - transforms.Normalize(), - transforms.ToTensor() - ])) - loader = paddle.io.DataLoader(dataset, - batch_size=32, - num_workers=num_workers, - shuffle=True) + transform=transforms.Compose( + [ + transforms.CenterCrop(20), + transforms.RandomResizedCrop(14), + transforms.Normalize(), + transforms.ToTensor(), + ] + ), + ) + loader = paddle.io.DataLoader( + dataset, batch_size=32, num_workers=num_workers, shuffle=True + ) return loader def run_check_on_cpu(self): diff --git a/python/paddle/fluid/tests/unittests/test_dataloader_early_reset.py b/python/paddle/fluid/tests/unittests/test_dataloader_early_reset.py index 3c03a289728a2f4ee0bbc254a438d8795d8ccbd0..fa0bac6c5bda905db3ce508fb669c158355f7f8d 100644 --- a/python/paddle/fluid/tests/unittests/test_dataloader_early_reset.py +++ b/python/paddle/fluid/tests/unittests/test_dataloader_early_reset.py @@ -25,7 +25,6 @@ def infinite_reader(): class TestDataLoaderEarlyReset(unittest.TestCase): - def setUp(self): self.stop_batch = 10 self.iterable = True @@ -45,9 +44,9 @@ class TestDataLoaderEarlyReset(unittest.TestCase): def create_data_loader(self): self.x = fluid.data(name='x', shape=[None, 32], dtype='float32') - return fluid.io.DataLoader.from_generator(feed_list=[self.x], - capacity=10, - iterable=self.iterable) + return fluid.io.DataLoader.from_generator( + feed_list=[self.x], capacity=10, iterable=self.iterable + ) def test_main(self): with fluid.program_guard(fluid.Program(), fluid.Program()): @@ -68,7 +67,7 @@ class TestDataLoaderEarlyReset(unittest.TestCase): batch_id = 0 if loader.iterable: for data in loader(): - x_val, = exe.run(prog, feed=data, fetch_list=[self.x]) + (x_val,) = exe.run(prog, feed=data, fetch_list=[self.x]) self.assertTrue(np.all(x_val == batch_id)) batch_id += 1 if batch_id >= self.stop_batch: @@ -89,7 +88,6 @@ class TestDataLoaderEarlyReset(unittest.TestCase): class TestDataLoaderEarlyReset2(TestDataLoaderEarlyReset): - def setUp(self): self.stop_batch = 20 self.iterable = False diff --git a/python/paddle/fluid/tests/unittests/test_dataloader_keep_order.py b/python/paddle/fluid/tests/unittests/test_dataloader_keep_order.py index 5d07586c7e62c0f80c129ebc52e53c116518fba0..f2dd18c008082e755bec72af166c06fc991b0af1 100644 --- a/python/paddle/fluid/tests/unittests/test_dataloader_keep_order.py +++ b/python/paddle/fluid/tests/unittests/test_dataloader_keep_order.py @@ -19,7 +19,6 @@ import os def create_reader(shape, batch_number): - def __impl__(): idx = 0 for _ in range(batch_number): @@ -30,7 +29,6 @@ def create_reader(shape, batch_number): class DataLoaderKeepOrderTestBase(unittest.TestCase): - def initParameters(self): self.iterable = False self.break_num = 100 @@ -43,15 +41,17 @@ class DataLoaderKeepOrderTestBase(unittest.TestCase): def build_network(self, places): input_data = fluid.data(shape=self.shape, dtype='float32', name="input") - loader = fluid.io.DataLoader.from_generator(capacity=16, - feed_list=[input_data], - iterable=self.iterable) + loader = fluid.io.DataLoader.from_generator( + capacity=16, feed_list=[input_data], iterable=self.iterable + ) fc = fluid.layers.fc(input_data, size=10) loss = fluid.layers.reduce_mean(fc) - loader.set_batch_generator(create_reader(self.shape, self.batch_num), - places=places if loader.iterable else None) + loader.set_batch_generator( + create_reader(self.shape, self.batch_num), + places=places if loader.iterable else None, + ) return input_data, loss, loader @@ -65,12 +65,15 @@ class DataLoaderKeepOrderTestBase(unittest.TestCase): self.assertTrue((input_tensor == start_val).all()) start_val += 1 else: - self.assertEqual(list(input_data.shape), - [self.shape[0] * dev_cnt] + self.shape[1:]) + self.assertEqual( + list(input_data.shape), + [self.shape[0] * dev_cnt] + self.shape[1:], + ) start_val = dev_cnt * batch_id for idx in range(dev_cnt): - data_part = input_data[idx * self.shape[0]:(idx + 1) * - self.shape[0], :] + data_part = input_data[ + idx * self.shape[0] : (idx + 1) * self.shape[0], : + ] self.assertTrue((data_part == start_val).all()) start_val += 1 @@ -81,8 +84,8 @@ class DataLoaderKeepOrderTestBase(unittest.TestCase): place_list.extend([fluid.cuda_places(0)]) else: place_list.extend( - [fluid.cuda_places(0), - fluid.cuda_places([0, 1])]) + [fluid.cuda_places(0), fluid.cuda_places([0, 1])] + ) return place_list def test_main(self): @@ -107,11 +110,12 @@ class DataLoaderKeepOrderTestBase(unittest.TestCase): main_program = fluid.default_main_program() if use_compiled_program: main_program = fluid.CompiledProgram( - main_program).with_data_parallel(loss_name=loss.name, - places=places) + main_program + ).with_data_parallel(loss_name=loss.name, places=places) - max_batch_num = min(self.break_num, - int(self.batch_num / dev_cnt)) + max_batch_num = min( + self.break_num, int(self.batch_num / dev_cnt) + ) if loader.iterable: early_break = False @@ -123,9 +127,11 @@ class DataLoaderKeepOrderTestBase(unittest.TestCase): early_break = True break self.assertInputData(batch_id, data, dev_cnt) - fetch_val, = exe.run(program=main_program, - feed=data, - fetch_list=fetch_list) + (fetch_val,) = exe.run( + program=main_program, + feed=data, + fetch_list=fetch_list, + ) self.assertInputData(batch_id, fetch_val, dev_cnt) batch_id += 1 @@ -142,10 +148,12 @@ class DataLoaderKeepOrderTestBase(unittest.TestCase): if batch_id >= self.break_num: loader.reset() break - fetch_val, = exe.run(program=main_program, - fetch_list=fetch_list) - self.assertInputData(batch_id, fetch_val, - dev_cnt) + (fetch_val,) = exe.run( + program=main_program, fetch_list=fetch_list + ) + self.assertInputData( + batch_id, fetch_val, dev_cnt + ) batch_id += 1 except fluid.core.EOFException: loader.reset() @@ -154,35 +162,30 @@ class DataLoaderKeepOrderTestBase(unittest.TestCase): class IterableDataLoaderKeepOrderTest2(DataLoaderKeepOrderTestBase): - def initParameters(self): self.iterable = True self.break_num = 100 class IterableDataLoaderKeepOrderTest3(DataLoaderKeepOrderTestBase): - def initParameters(self): self.iterable = False self.break_num = 2 class IterableDataLoaderKeepOrderTest4(DataLoaderKeepOrderTestBase): - def initParameters(self): self.iterable = True self.break_num = 2 class IterableDataLoaderKeepOrderTest5(DataLoaderKeepOrderTestBase): - def initParameters(self): self.iterable = False self.break_num = 0 class IterableDataLoaderKeepOrderTest6(DataLoaderKeepOrderTestBase): - def initParameters(self): self.iterable = True self.break_num = 0 diff --git a/python/paddle/fluid/tests/unittests/test_dataloader_unkeep_order.py b/python/paddle/fluid/tests/unittests/test_dataloader_unkeep_order.py index 236ffe19b6e2bb1737a5bb2fe9e166eb10ae661b..8e121ba401ee22fd9f2c17241247c7a310acd66d 100644 --- a/python/paddle/fluid/tests/unittests/test_dataloader_unkeep_order.py +++ b/python/paddle/fluid/tests/unittests/test_dataloader_unkeep_order.py @@ -22,7 +22,6 @@ keep_data_loader_order(False) def create_reader(shape, batch_number): - def __impl__(): idx = 0 for _ in range(batch_number): @@ -33,7 +32,6 @@ def create_reader(shape, batch_number): class DataLoaderKeepOrderTestBase(unittest.TestCase): - def initParameters(self): self.iterable = False self.break_num = 10000 @@ -49,23 +47,23 @@ class DataLoaderKeepOrderTestBase(unittest.TestCase): def build_network(self, places): input_data = fluid.data(shape=self.shape, dtype='float32', name="input") - loader = fluid.io.DataLoader.from_generator(capacity=16, - feed_list=[input_data], - iterable=self.iterable) + loader = fluid.io.DataLoader.from_generator( + capacity=16, feed_list=[input_data], iterable=self.iterable + ) fc = fluid.layers.fc(input_data, size=10) loss = fluid.layers.reduce_mean(fc) - loader.set_batch_generator(create_reader(self.shape, self.batch_num), - places=places if loader.iterable else None) + loader.set_batch_generator( + create_reader(self.shape, self.batch_num), + places=places if loader.iterable else None, + ) return input_data, loss, loader - def assertInputData(self, - batch_id, - input_data, - dev_cnt, - check_visited=True): + def assertInputData( + self, batch_id, input_data, dev_cnt, check_visited=True + ): if isinstance(input_data, list): self.assertTrue(len(input_data), dev_cnt) start_val = dev_cnt * batch_id @@ -82,12 +80,15 @@ class DataLoaderKeepOrderTestBase(unittest.TestCase): start_val += 1 else: - self.assertEqual(list(input_data.shape), - [self.shape[0] * dev_cnt] + self.shape[1:]) + self.assertEqual( + list(input_data.shape), + [self.shape[0] * dev_cnt] + self.shape[1:], + ) start_val = dev_cnt * batch_id for idx in range(dev_cnt): - data_part = input_data[idx * self.shape[0]:(idx + 1) * - self.shape[0], :] + data_part = input_data[ + idx * self.shape[0] : (idx + 1) * self.shape[0], : + ] num = data_part.flatten()[0] self.assertTrue((data_part == num).all()) if check_visited: @@ -103,8 +104,8 @@ class DataLoaderKeepOrderTestBase(unittest.TestCase): place_list.extend([fluid.cuda_places(0)]) else: place_list.extend( - [fluid.cuda_places(0), - fluid.cuda_places([0, 1])]) + [fluid.cuda_places(0), fluid.cuda_places([0, 1])] + ) return place_list def test_main(self): @@ -129,11 +130,12 @@ class DataLoaderKeepOrderTestBase(unittest.TestCase): main_program = fluid.default_main_program() if use_compiled_program: main_program = fluid.CompiledProgram( - main_program).with_data_parallel(loss_name=loss.name, - places=places) + main_program + ).with_data_parallel(loss_name=loss.name, places=places) - max_batch_num = min(self.break_num, - int(self.batch_num / dev_cnt)) + max_batch_num = min( + self.break_num, int(self.batch_num / dev_cnt) + ) if loader.iterable: early_break = False @@ -145,13 +147,14 @@ class DataLoaderKeepOrderTestBase(unittest.TestCase): if batch_id >= self.break_num: early_break = True break - self.assertInputData(batch_id, - data, - dev_cnt, - check_visited=False) - fetch_val, = exe.run(program=main_program, - feed=data, - fetch_list=fetch_list) + self.assertInputData( + batch_id, data, dev_cnt, check_visited=False + ) + (fetch_val,) = exe.run( + program=main_program, + feed=data, + fetch_list=fetch_list, + ) self.assertInputData(batch_id, fetch_val, dev_cnt) batch_id += 1 @@ -172,10 +175,12 @@ class DataLoaderKeepOrderTestBase(unittest.TestCase): if batch_id >= self.break_num: loader.reset() break - fetch_val, = exe.run(program=main_program, - fetch_list=fetch_list) - self.assertInputData(batch_id, fetch_val, - dev_cnt) + (fetch_val,) = exe.run( + program=main_program, fetch_list=fetch_list + ) + self.assertInputData( + batch_id, fetch_val, dev_cnt + ) batch_id += 1 except fluid.core.EOFException: loader.reset() @@ -187,35 +192,30 @@ class DataLoaderKeepOrderTestBase(unittest.TestCase): class IterableDataLoaderKeepOrderTest2(DataLoaderKeepOrderTestBase): - def initParameters(self): self.iterable = True self.break_num = 10000 class IterableDataLoaderKeepOrderTest3(DataLoaderKeepOrderTestBase): - def initParameters(self): self.iterable = False self.break_num = 2 class IterableDataLoaderKeepOrderTest4(DataLoaderKeepOrderTestBase): - def initParameters(self): self.iterable = True self.break_num = 2 class IterableDataLoaderKeepOrderTest5(DataLoaderKeepOrderTestBase): - def initParameters(self): self.iterable = False self.break_num = 0 class IterableDataLoaderKeepOrderTest6(DataLoaderKeepOrderTestBase): - def initParameters(self): self.iterable = True self.break_num = 0 diff --git a/python/paddle/fluid/tests/unittests/test_dataset.py b/python/paddle/fluid/tests/unittests/test_dataset.py index 8e09e8f75c90c8a89498a289f3db1a6f647e4e5a..5c0dc9db6a24443f8d901ec034ff91814799be5d 100644 --- a/python/paddle/fluid/tests/unittests/test_dataset.py +++ b/python/paddle/fluid/tests/unittests/test_dataset.py @@ -25,7 +25,7 @@ import unittest class TestDataset(unittest.TestCase): - """ TestCases for Dataset. """ + """TestCases for Dataset.""" def setUp(self): self.use_data_loader = False @@ -33,7 +33,7 @@ class TestDataset(unittest.TestCase): self.drop_last = False def test_dataset_create(self): - """ Testcase for dataset create. """ + """Testcase for dataset create.""" try: dataset = paddle.distributed.InMemoryDataset() except: @@ -99,22 +99,22 @@ class TestDataset(unittest.TestCase): slots = ["slot1", "slot2", "slot3", "slot4"] slots_vars = [] for slot in slots: - var = fluid.layers.data(name=slot, - shape=[1], - dtype="int64", - lod_level=1) + var = fluid.layers.data( + name=slot, shape=[1], dtype="int64", lod_level=1 + ) slots_vars.append(var) dataset = paddle.distributed.InMemoryDataset() - dataset.init(batch_size=32, - thread_num=3, - pipe_command="cat", - use_var=slots_vars) + dataset.init( + batch_size=32, thread_num=3, pipe_command="cat", use_var=slots_vars + ) dataset.update_settings(pipe_command="cat1") - dataset._init_distributed_settings(parse_ins_id=True, - parse_content=True, - fea_eval=True, - candidate_size=10000) + dataset._init_distributed_settings( + parse_ins_id=True, + parse_content=True, + fea_eval=True, + candidate_size=10000, + ) dataset.set_filelist([dump_a_path, dump_b_path]) dataset.load_into_memory() dataset.local_shuffle() @@ -136,7 +136,7 @@ class TestDataset(unittest.TestCase): temp_dir.cleanup() def test_dataset_config(self): - """ Testcase for dataset configuration. """ + """Testcase for dataset configuration.""" dataset = fluid.core.Dataset("MultiSlotDataset") dataset.set_thread_num(12) dataset.set_filelist(["a.txt", "b.txt", "c.txt"]) @@ -169,10 +169,12 @@ class TestDataset(unittest.TestCase): Testcase for InMemoryDataset from create to run. """ temp_dir = tempfile.TemporaryDirectory() - filename1 = os.path.join(temp_dir.name, - "afs:test_in_memory_dataset_run_a.txt") - filename2 = os.path.join(temp_dir.name, - "afs:test_in_memory_dataset_run_b.txt") + filename1 = os.path.join( + temp_dir.name, "afs:test_in_memory_dataset_run_a.txt" + ) + filename2 = os.path.join( + temp_dir.name, "afs:test_in_memory_dataset_run_b.txt" + ) with open(filename1, "w") as f: data = "1 1 2 3 3 4 5 5 5 5 1 1\n" @@ -189,18 +191,19 @@ class TestDataset(unittest.TestCase): slots = ["slot1", "slot2", "slot3", "slot4"] slots_vars = [] for slot in slots: - var = fluid.layers.data(name=slot, - shape=[1], - dtype="int64", - lod_level=1) + var = fluid.layers.data( + name=slot, shape=[1], dtype="int64", lod_level=1 + ) slots_vars.append(var) dataset = paddle.distributed.InMemoryDataset() - dataset.init(batch_size=32, - thread_num=3, - pipe_command="cat", - download_cmd="cat", - use_var=slots_vars) + dataset.init( + batch_size=32, + thread_num=3, + pipe_command="cat", + download_cmd="cat", + use_var=slots_vars, + ) dataset.set_filelist([filename1, filename2]) dataset.load_into_memory() paddle.enable_static() @@ -212,7 +215,8 @@ class TestDataset(unittest.TestCase): exe.run(startup_program) if self.use_data_loader: data_loader = fluid.io.DataLoader.from_dataset( - dataset, fluid.cpu_places(), self.drop_last) + dataset, fluid.cpu_places(), self.drop_last + ) for i in range(self.epoch_num): for data in data_loader(): exe.run(main_program, feed=data) @@ -230,10 +234,12 @@ class TestDataset(unittest.TestCase): Testcase for InMemoryDataset from create to run. """ temp_dir = tempfile.TemporaryDirectory() - filename1 = os.path.join(temp_dir.name, - "test_in_memory_dataset_run_a.txt") - filename2 = os.path.join(temp_dir.name, - "test_in_memory_dataset_run_b.txt") + filename1 = os.path.join( + temp_dir.name, "test_in_memory_dataset_run_a.txt" + ) + filename2 = os.path.join( + temp_dir.name, "test_in_memory_dataset_run_b.txt" + ) with open(filename1, "w") as f: data = "1 1 2 3 3 4 5 5 5 5 1 1\n" @@ -250,17 +256,15 @@ class TestDataset(unittest.TestCase): slots = ["slot1", "slot2", "slot3", "slot4"] slots_vars = [] for slot in slots: - var = fluid.layers.data(name=slot, - shape=[1], - dtype="int64", - lod_level=1) + var = fluid.layers.data( + name=slot, shape=[1], dtype="int64", lod_level=1 + ) slots_vars.append(var) dataset = paddle.distributed.InMemoryDataset() - dataset.init(batch_size=32, - thread_num=3, - pipe_command="cat", - use_var=slots_vars) + dataset.init( + batch_size=32, thread_num=3, pipe_command="cat", use_var=slots_vars + ) dataset._init_distributed_settings(fea_eval=True, candidate_size=1) dataset.set_filelist([filename1, filename2]) dataset.load_into_memory() @@ -272,15 +276,17 @@ class TestDataset(unittest.TestCase): exe.run(fluid.default_startup_program()) if self.use_data_loader: data_loader = fluid.io.DataLoader.from_dataset( - dataset, fluid.cpu_places(), self.drop_last) + dataset, fluid.cpu_places(), self.drop_last + ) for i in range(self.epoch_num): for data in data_loader(): exe.run(fluid.default_main_program(), feed=data) else: for i in range(self.epoch_num): try: - exe.train_from_dataset(fluid.default_main_program(), - dataset) + exe.train_from_dataset( + fluid.default_main_program(), dataset + ) except Exception as e: self.assertTrue(False) @@ -291,10 +297,12 @@ class TestDataset(unittest.TestCase): Testcase for InMemoryDataset from create to run. """ temp_dir = tempfile.TemporaryDirectory() - filename1 = os.path.join(temp_dir.name, - "test_in_memory_dataset_masterpatch_a.txt") - filename2 = os.path.join(temp_dir.name, - "test_in_memory_dataset_masterpatch_b.txt") + filename1 = os.path.join( + temp_dir.name, "test_in_memory_dataset_masterpatch_a.txt" + ) + filename2 = os.path.join( + temp_dir.name, "test_in_memory_dataset_masterpatch_b.txt" + ) with open(filename1, "w") as f: data = "1 id1 1 1 2 3 3 4 5 5 5 5 1 1\n" @@ -320,28 +328,27 @@ class TestDataset(unittest.TestCase): startup_program = fluid.Program() with fluid.program_guard(train_program, startup_program): for slot in slots[:2]: - var = fluid.layers.data(name=slot, - shape=[1], - dtype="int64", - lod_level=1) + var = fluid.layers.data( + name=slot, shape=[1], dtype="int64", lod_level=1 + ) slots_vars.append(var) for slot in slots[2:]: - var = fluid.layers.data(name=slot, - shape=[1], - dtype="float32", - lod_level=1) + var = fluid.layers.data( + name=slot, shape=[1], dtype="float32", lod_level=1 + ) slots_vars.append(var) dataset = paddle.distributed.InMemoryDataset() - dataset.init(batch_size=32, - thread_num=1, - pipe_command="cat", - use_var=slots_vars) + dataset.init( + batch_size=32, thread_num=1, pipe_command="cat", use_var=slots_vars + ) dataset._init_distributed_settings(parse_ins_id=True) - dataset.set_filelist([ - "test_in_memory_dataset_masterpatch_a.txt", - "test_in_memory_dataset_masterpatch_b.txt" - ]) + dataset.set_filelist( + [ + "test_in_memory_dataset_masterpatch_a.txt", + "test_in_memory_dataset_masterpatch_b.txt", + ] + ) dataset.load_into_memory() dataset.local_shuffle() @@ -356,7 +363,7 @@ class TestDataset(unittest.TestCase): except Exception as e: self.assertTrue(False) - #dataset._set_merge_by_lineid(2) + # dataset._set_merge_by_lineid(2) dataset.update_settings(merge_size=2) dataset.dataset.merge_by_lineid() @@ -367,10 +374,12 @@ class TestDataset(unittest.TestCase): Testcase for InMemoryDataset from create to run. """ temp_dir = tempfile.TemporaryDirectory() - filename1 = os.path.join(temp_dir.name, - "test_in_memory_dataset_masterpatch1_a.txt") - filename2 = os.path.join(temp_dir.name, - "test_in_memory_dataset_masterpatch1_b.txt") + filename1 = os.path.join( + temp_dir.name, "test_in_memory_dataset_masterpatch1_a.txt" + ) + filename2 = os.path.join( + temp_dir.name, "test_in_memory_dataset_masterpatch1_b.txt" + ) with open(filename1, "w") as f: data = "1 id1 1 1 2 3 3 4 5 5 5 5 1 1\n" @@ -394,34 +403,31 @@ class TestDataset(unittest.TestCase): train_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - var1 = fluid.layers.data(name="slot1", - shape=[1], - dtype="int64", - lod_level=0) - var2 = fluid.layers.data(name="slot2", - shape=[1], - dtype="int64", - lod_level=0) - var3 = fluid.layers.data(name="slot3", - shape=[1], - dtype="float32", - lod_level=0) - var4 = fluid.layers.data(name="slot4", - shape=[1], - dtype="float32", - lod_level=0) + var1 = fluid.layers.data( + name="slot1", shape=[1], dtype="int64", lod_level=0 + ) + var2 = fluid.layers.data( + name="slot2", shape=[1], dtype="int64", lod_level=0 + ) + var3 = fluid.layers.data( + name="slot3", shape=[1], dtype="float32", lod_level=0 + ) + var4 = fluid.layers.data( + name="slot4", shape=[1], dtype="float32", lod_level=0 + ) slots_vars = [var1, var2, var3, var4] dataset = paddle.distributed.InMemoryDataset() - dataset.init(batch_size=32, - thread_num=1, - pipe_command="cat", - use_var=slots_vars) + dataset.init( + batch_size=32, thread_num=1, pipe_command="cat", use_var=slots_vars + ) dataset._init_distributed_settings(parse_ins_id=True) - dataset.set_filelist([ - "test_in_memory_dataset_masterpatch1_a.txt", - "test_in_memory_dataset_masterpatch1_b.txt" - ]) + dataset.set_filelist( + [ + "test_in_memory_dataset_masterpatch1_a.txt", + "test_in_memory_dataset_masterpatch1_b.txt", + ] + ) dataset.load_into_memory() dataset.local_shuffle() @@ -448,10 +454,12 @@ class TestDataset(unittest.TestCase): Use float type id """ temp_dir = tempfile.TemporaryDirectory() - filename1 = os.path.join(temp_dir.name, - "test_in_memory_dataset_run_a.txt") - filename2 = os.path.join(temp_dir.name, - "test_in_memory_dataset_run_b.txt") + filename1 = os.path.join( + temp_dir.name, "test_in_memory_dataset_run_a.txt" + ) + filename2 = os.path.join( + temp_dir.name, "test_in_memory_dataset_run_b.txt" + ) with open(filename1, "w") as f: data = "1 1 2 3 3 4 5 5 5 5 1 1\n" @@ -468,43 +476,44 @@ class TestDataset(unittest.TestCase): slots = ["slot1_f", "slot2_f", "slot3_f", "slot4_f"] slots_vars = [] for slot in slots: - var = fluid.layers.data(name=slot, - shape=[1], - dtype="float32", - lod_level=1) + var = fluid.layers.data( + name=slot, shape=[1], dtype="float32", lod_level=1 + ) slots_vars.append(var) dataset = paddle.distributed.InMemoryDataset() - dataset.init(batch_size=32, - thread_num=3, - pipe_command="cat", - use_var=slots_vars) + dataset.init( + batch_size=32, thread_num=3, pipe_command="cat", use_var=slots_vars + ) dataset.set_filelist([filename1, filename2]) dataset.load_into_memory() dataset.local_shuffle() - exe = fluid.Executor(fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) + exe = fluid.Executor( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) exe.run(fluid.default_startup_program()) for i in range(2): try: exe.train_from_dataset(fluid.default_main_program(), dataset) - exe.train_from_dataset(fluid.default_main_program(), - dataset, - thread=1) - exe.train_from_dataset(fluid.default_main_program(), - dataset, - thread=2) - exe.train_from_dataset(fluid.default_main_program(), - dataset, - thread=2) - exe.train_from_dataset(fluid.default_main_program(), - dataset, - thread=3) - exe.train_from_dataset(fluid.default_main_program(), - dataset, - thread=4) + exe.train_from_dataset( + fluid.default_main_program(), dataset, thread=1 + ) + exe.train_from_dataset( + fluid.default_main_program(), dataset, thread=2 + ) + exe.train_from_dataset( + fluid.default_main_program(), dataset, thread=2 + ) + exe.train_from_dataset( + fluid.default_main_program(), dataset, thread=3 + ) + exe.train_from_dataset( + fluid.default_main_program(), dataset, thread=4 + ) except ImportError as e: pass except Exception as e: @@ -512,15 +521,17 @@ class TestDataset(unittest.TestCase): if self.use_data_loader: data_loader = fluid.io.DataLoader.from_dataset( - dataset, fluid.cpu_places(), self.drop_last) + dataset, fluid.cpu_places(), self.drop_last + ) for i in range(self.epoch_num): for data in data_loader(): exe.run(fluid.default_main_program(), feed=data) else: for i in range(self.epoch_num): try: - exe.train_from_dataset(fluid.default_main_program(), - dataset) + exe.train_from_dataset( + fluid.default_main_program(), dataset + ) except Exception as e: self.assertTrue(False) @@ -536,20 +547,22 @@ class TestDataset(unittest.TestCase): dataset._set_parse_ins_id(False) dataset.load_into_memory() dataset.dataset.merge_by_lineid() - dataset.update_settings(batch_size=1, - thread_num=2, - input_type=1, - pipe_command="cat", - use_var=[], - fs_name="", - fs_ugi="", - download_cmd="cat", - merge_size=-1, - parse_ins_id=False, - parse_content=False, - fleet_send_batch_size=2, - fleet_send_sleep_seconds=2, - fea_eval=True) + dataset.update_settings( + batch_size=1, + thread_num=2, + input_type=1, + pipe_command="cat", + use_var=[], + fs_name="", + fs_ugi="", + download_cmd="cat", + merge_size=-1, + parse_ins_id=False, + parse_content=False, + fleet_send_batch_size=2, + fleet_send_sleep_seconds=2, + fea_eval=True, + ) fleet_ptr = fluid.core.Fleet() fleet_ptr.set_client2client_config(1, 1, 1) fleet_ptr.get_cache_threshold(0) @@ -579,40 +592,39 @@ class TestDataset(unittest.TestCase): slots = ["slot1", "slot2", "slot3", "slot4"] slots_vars = [] for slot in slots: - var = fluid.layers.data(name=slot, - shape=[1], - dtype="int64", - lod_level=1) + var = fluid.layers.data( + name=slot, shape=[1], dtype="int64", lod_level=1 + ) slots_vars.append(var) dataset = paddle.distributed.QueueDataset() - dataset.init(batch_size=32, - thread_num=3, - pipe_command="cat", - use_var=slots_vars) + dataset.init( + batch_size=32, thread_num=3, pipe_command="cat", use_var=slots_vars + ) dataset.set_filelist([filename1, filename2]) exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_startup_program()) if self.use_data_loader: data_loader = fluid.io.DataLoader.from_dataset( - dataset, fluid.cpu_places(), self.drop_last) + dataset, fluid.cpu_places(), self.drop_last + ) for i in range(self.epoch_num): for data in data_loader(): exe.run(fluid.default_main_program(), feed=data) else: for i in range(self.epoch_num): try: - exe.train_from_dataset(fluid.default_main_program(), - dataset) + exe.train_from_dataset( + fluid.default_main_program(), dataset + ) except Exception as e: self.assertTrue(False) dataset2 = paddle.distributed.QueueDataset() - dataset2.init(batch_size=32, - thread_num=3, - pipe_command="cat", - use_var=slots_vars) + dataset2.init( + batch_size=32, thread_num=3, pipe_command="cat", use_var=slots_vars + ) dataset.set_filelist([]) try: exe.train_from_dataset(fluid.default_main_program(), dataset2) @@ -648,33 +660,36 @@ class TestDataset(unittest.TestCase): slots = ["slot1_f", "slot2_f", "slot3_f", "slot4_f"] slots_vars = [] for slot in slots: - var = fluid.layers.data(name=slot, - shape=[1], - dtype="float32", - lod_level=1) + var = fluid.layers.data( + name=slot, shape=[1], dtype="float32", lod_level=1 + ) slots_vars.append(var) dataset = paddle.distributed.QueueDataset() - dataset.init(batch_size=32, - thread_num=3, - pipe_command="cat", - use_var=slots_vars) + dataset.init( + batch_size=32, thread_num=3, pipe_command="cat", use_var=slots_vars + ) dataset.set_filelist([filename1, filename2]) - exe = fluid.Executor(fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) + exe = fluid.Executor( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) exe.run(fluid.default_startup_program()) if self.use_data_loader: data_loader = fluid.io.DataLoader.from_dataset( - dataset, fluid.cpu_places(), self.drop_last) + dataset, fluid.cpu_places(), self.drop_last + ) for i in range(self.epoch_num): for data in data_loader(): exe.run(fluid.default_main_program(), feed=data) else: for i in range(self.epoch_num): try: - exe.train_from_dataset(fluid.default_main_program(), - dataset) + exe.train_from_dataset( + fluid.default_main_program(), dataset + ) except Exception as e: self.assertTrue(False) @@ -706,35 +721,41 @@ class TestDataset(unittest.TestCase): slots = ["slot1", "slot2", "slot3", "slot4"] slots_vars = [] for slot in slots: - var = fluid.data(name=slot, - shape=[None, 1], - dtype="int64", - lod_level=1) + var = fluid.data( + name=slot, shape=[None, 1], dtype="int64", lod_level=1 + ) slots_vars.append(var) dataset = paddle.distributed.InMemoryDataset() - dataset.init(batch_size=1, - thread_num=2, - input_type=1, - pipe_command="cat", - use_var=slots_vars) + dataset.init( + batch_size=1, + thread_num=2, + input_type=1, + pipe_command="cat", + use_var=slots_vars, + ) dataset.set_filelist([filename1, filename2]) dataset.load_into_memory() - exe = fluid.Executor(fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) + exe = fluid.Executor( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) exe.run(fluid.default_startup_program()) if self.use_data_loader: data_loader = fluid.io.DataLoader.from_dataset( - dataset, fluid.cpu_places(), self.drop_last) + dataset, fluid.cpu_places(), self.drop_last + ) for i in range(self.epoch_num): for data in data_loader(): exe.run(fluid.default_main_program(), feed=data) else: for i in range(self.epoch_num): try: - exe.train_from_dataset(fluid.default_main_program(), - dataset) + exe.train_from_dataset( + fluid.default_main_program(), dataset + ) except Exception as e: self.assertTrue(False) @@ -764,22 +785,25 @@ class TestDataset(unittest.TestCase): slots = ["slot1", "slot2", "slot3", "slot4"] slots_vars = [] for slot in slots: - var = fluid.layers.data(name=slot, - shape=[1], - dtype="int64", - lod_level=1) + var = fluid.layers.data( + name=slot, shape=[1], dtype="int64", lod_level=1 + ) slots_vars.append(var) dataset = paddle.distributed.InMemoryDataset() - dataset.init(batch_size=32, - thread_num=1, - pipe_command="cat", - data_feed_type="SlotRecordInMemoryDataFeed", - use_var=slots_vars) - dataset._init_distributed_settings(parse_ins_id=True, - parse_content=True, - fea_eval=True, - candidate_size=10000) + dataset.init( + batch_size=32, + thread_num=1, + pipe_command="cat", + data_feed_type="SlotRecordInMemoryDataFeed", + use_var=slots_vars, + ) + dataset._init_distributed_settings( + parse_ins_id=True, + parse_content=True, + fea_eval=True, + candidate_size=10000, + ) dataset.set_filelist([dump_a_path, dump_b_path]) dataset.load_into_memory() @@ -827,10 +851,9 @@ class TestDatasetWithFetchHandler(unittest.TestCase): slots_vars = [] poolings = [] for slot in slots: - data = fluid.layers.data(name=slot, - shape=[1], - dtype="int64", - lod_level=1) + data = fluid.layers.data( + name=slot, shape=[1], dtype="int64", lod_level=1 + ) var = fluid.layers.cast(x=data, dtype='float32') pool = fluid.layers.sequence_pool(input=var, pool_type='AVERAGE') @@ -850,10 +873,9 @@ class TestDatasetWithFetchHandler(unittest.TestCase): files(list): files of get_dataset """ dataset = paddle.distributed.QueueDataset() - dataset.init(batch_size=32, - thread_num=3, - pipe_command="cat", - use_var=inputs) + dataset.init( + batch_size=32, thread_num=3, pipe_command="cat", use_var=inputs + ) dataset.set_filelist(files) return dataset @@ -862,10 +884,12 @@ class TestDatasetWithFetchHandler(unittest.TestCase): Test Dataset With Fetch Handler. TestCases. """ self.temp_dir = tempfile.TemporaryDirectory() - self.filename1 = os.path.join(self.temp_dir.name, - "test_queue_dataset_run_a.txt") - self.filename2 = os.path.join(self.temp_dir.name, - "test_queue_dataset_run_b.txt") + self.filename1 = os.path.join( + self.temp_dir.name, "test_queue_dataset_run_a.txt" + ) + self.filename2 = os.path.join( + self.temp_dir.name, "test_queue_dataset_run_b.txt" + ) with open(self.filename1, "w") as f: data = "1 1 2 3 3 4 5 5 5 5 1 1\n" @@ -940,9 +964,11 @@ class TestDatasetWithFetchHandler(unittest.TestCase): fh.help() try: - exe.train_from_dataset(program=fluid.default_main_program(), - dataset=dataset, - fetch_handler=fh) + exe.train_from_dataset( + program=fluid.default_main_program(), + dataset=dataset, + fetch_handler=fh, + ) except ImportError as e: print("warning: we skip trainer_desc_pb2 import problem in windows") except RuntimeError as e: @@ -953,10 +979,10 @@ class TestDatasetWithFetchHandler(unittest.TestCase): class TestDataset2(unittest.TestCase): - """ TestCases for Dataset. """ + """TestCases for Dataset.""" def setUp(self): - """ TestCases for Dataset. """ + """TestCases for Dataset.""" self.use_data_loader = False self.epoch_num = 10 self.drop_last = False @@ -966,10 +992,12 @@ class TestDataset2(unittest.TestCase): Testcase for InMemoryDataset from create to run. """ temp_dir = tempfile.TemporaryDirectory() - filename1 = os.path.join(temp_dir.name, - "test_in_memory_dataset2_run_a.txt") - filename2 = os.path.join(temp_dir.name, - "test_in_memory_dataset2_run_b.txt") + filename1 = os.path.join( + temp_dir.name, "test_in_memory_dataset2_run_a.txt" + ) + filename2 = os.path.join( + temp_dir.name, "test_in_memory_dataset2_run_b.txt" + ) self.skipTest("parameter server will add pslib UT later") @@ -988,16 +1016,21 @@ class TestDataset2(unittest.TestCase): train_program = fluid.Program() startup_program = fluid.Program() scope = fluid.Scope() - from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet + from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import ( + fleet, + ) + with fluid.program_guard(train_program, startup_program): slots = ["slot1_ff", "slot2_ff", "slot3_ff", "slot4_ff"] slots_vars = [] for slot in slots: - var = fluid.layers.data(\ - name=slot, shape=[1], dtype="float32", lod_level=1) + var = fluid.layers.data( + name=slot, shape=[1], dtype="float32", lod_level=1 + ) slots_vars.append(var) - fake_cost = \ - fluid.layers.elementwise_sub(slots_vars[0], slots_vars[-1]) + fake_cost = fluid.layers.elementwise_sub( + slots_vars[0], slots_vars[-1] + ) fake_cost = paddle.mean(fake_cost) with fluid.scope_guard(scope): place = fluid.CPUPlace() @@ -1017,10 +1050,12 @@ class TestDataset2(unittest.TestCase): exe.run(startup_program) dataset = paddle.distributed.InMemoryDataset() - dataset.init(batch_size=32, - thread_num=3, - pipe_command="cat", - use_var=slots_vars) + dataset.init( + batch_size=32, + thread_num=3, + pipe_command="cat", + use_var=slots_vars, + ) dataset.set_filelist([filename1, filename2]) dataset.load_into_memory() fleet._opt_info = None @@ -1033,10 +1068,12 @@ class TestDataset2(unittest.TestCase): Testcase for InMemoryDataset from create to run. """ temp_dir = tempfile.TemporaryDirectory() - filename1 = os.path.join(temp_dir.name, - "test_in_memory_dataset2_run2_a.txt") - filename2 = os.path.join(temp_dir.name, - "test_in_memory_dataset2_run2_b.txt") + filename1 = os.path.join( + temp_dir.name, "test_in_memory_dataset2_run2_a.txt" + ) + filename2 = os.path.join( + temp_dir.name, "test_in_memory_dataset2_run2_b.txt" + ) with open(filename1, "w") as f: data = "1 1 2 3 3 4 5 5 5 5 1 1\n" @@ -1054,15 +1091,18 @@ class TestDataset2(unittest.TestCase): startup_program = fluid.Program() scope = fluid.Scope() from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet + with fluid.program_guard(train_program, startup_program): slots = ["slot1_ff", "slot2_ff", "slot3_ff", "slot4_ff"] slots_vars = [] for slot in slots: - var = fluid.layers.data(\ - name=slot, shape=[1], dtype="float32", lod_level=1) + var = fluid.layers.data( + name=slot, shape=[1], dtype="float32", lod_level=1 + ) slots_vars.append(var) - fake_cost = \ - fluid.layers.elementwise_sub(slots_vars[0], slots_vars[-1]) + fake_cost = fluid.layers.elementwise_sub( + slots_vars[0], slots_vars[-1] + ) fake_cost = paddle.mean(fake_cost) with fluid.scope_guard(scope): place = fluid.CPUPlace() @@ -1073,17 +1113,15 @@ class TestDataset2(unittest.TestCase): print("warning: no mpi4py") adam = fluid.optimizer.Adam(learning_rate=0.000005) try: - adam = fleet.distributed_optimizer(adam, - strategy={ - "fs_uri": - "fs_uri_xxx", - "fs_user": - "fs_user_xxx", - "fs_passwd": - "fs_passwd_xxx", - "fs_hadoop_bin": - "fs_hadoop_bin_xxx" - }) + adam = fleet.distributed_optimizer( + adam, + strategy={ + "fs_uri": "fs_uri_xxx", + "fs_user": "fs_user_xxx", + "fs_passwd": "fs_passwd_xxx", + "fs_hadoop_bin": "fs_hadoop_bin_xxx", + }, + ) adam.minimize([fake_cost], [scope]) except AttributeError as e: print("warning: no mpi") @@ -1091,10 +1129,12 @@ class TestDataset2(unittest.TestCase): print("warning: no mpi4py") exe.run(startup_program) dataset = paddle.distributed.InMemoryDataset() - dataset.init(batch_size=32, - thread_num=3, - pipe_command="cat", - use_var=slots_vars) + dataset.init( + batch_size=32, + thread_num=3, + pipe_command="cat", + use_var=slots_vars, + ) dataset.set_filelist([filename1, filename2]) dataset.load_into_memory() try: @@ -1132,7 +1172,7 @@ class TestDataset2(unittest.TestCase): dataset.global_shuffle() except: print("warning: catch expected error") - #dataset.get_pv_data_size() + # dataset.get_pv_data_size() dataset.get_memory_data_size() dataset.get_shuffle_data_size() dataset = paddle.distributed.QueueDataset() @@ -1161,10 +1201,12 @@ class TestDataset2(unittest.TestCase): Testcase for InMemoryDataset from create to run. """ temp_dir = tempfile.TemporaryDirectory() - filename1 = os.path.join(temp_dir.name, - "test_in_memory_dataset2_run2_a.txt") - filename2 = os.path.join(temp_dir.name, - "test_in_memory_dataset2_run2_b.txt") + filename1 = os.path.join( + temp_dir.name, "test_in_memory_dataset2_run2_a.txt" + ) + filename2 = os.path.join( + temp_dir.name, "test_in_memory_dataset2_run2_b.txt" + ) with open(filename1, "w") as f: data = "1 1 2 3 3 4 5 5 5 5 1 1\n" @@ -1182,15 +1224,18 @@ class TestDataset2(unittest.TestCase): startup_program = fluid.Program() scope = fluid.Scope() from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet + with fluid.program_guard(train_program, startup_program): slots = ["slot1_ff", "slot2_ff", "slot3_ff", "slot4_ff"] slots_vars = [] for slot in slots: - var = fluid.layers.data(\ - name=slot, shape=[1], dtype="float32", lod_level=1) + var = fluid.layers.data( + name=slot, shape=[1], dtype="float32", lod_level=1 + ) slots_vars.append(var) - fake_cost = \ - fluid.layers.elementwise_sub(slots_vars[0], slots_vars[-1]) + fake_cost = fluid.layers.elementwise_sub( + slots_vars[0], slots_vars[-1] + ) fake_cost = paddle.mean(fake_cost) with fluid.scope_guard(scope): place = fluid.CPUPlace() @@ -1201,17 +1246,15 @@ class TestDataset2(unittest.TestCase): print("warning: no mpi4py") adam = fluid.optimizer.Adam(learning_rate=0.000005) try: - adam = fleet.distributed_optimizer(adam, - strategy={ - "fs_uri": - "fs_uri_xxx", - "fs_user": - "fs_user_xxx", - "fs_passwd": - "fs_passwd_xxx", - "fs_hadoop_bin": - "fs_hadoop_bin_xxx" - }) + adam = fleet.distributed_optimizer( + adam, + strategy={ + "fs_uri": "fs_uri_xxx", + "fs_user": "fs_user_xxx", + "fs_passwd": "fs_passwd_xxx", + "fs_hadoop_bin": "fs_hadoop_bin_xxx", + }, + ) adam.minimize([fake_cost], [scope]) except AttributeError as e: print("warning: no mpi") @@ -1219,10 +1262,12 @@ class TestDataset2(unittest.TestCase): print("warning: no mpi4py") exe.run(startup_program) dataset = paddle.distributed.fleet.BoxPSDataset() - dataset.init(batch_size=32, - thread_num=3, - pipe_command="cat", - use_var=slots_vars) + dataset.init( + batch_size=32, + thread_num=3, + pipe_command="cat", + use_var=slots_vars, + ) dataset.set_filelist([filename1, filename2]) dataset.load_into_memory() try: @@ -1232,14 +1277,16 @@ class TestDataset2(unittest.TestCase): fleet._opt_info = None fleet._fleet_ptr = None dataset = paddle.distributed.fleet.BoxPSDataset() - dataset.init(rank_offset="", - pv_batch_size=1, - fs_name="", - fs_ugi="", - data_feed_type="MultiSlotInMemoryDataFeed", - parse_logkey=True, - merge_by_sid=True, - enable_pv_merge=True) + dataset.init( + rank_offset="", + pv_batch_size=1, + fs_name="", + fs_ugi="", + data_feed_type="MultiSlotInMemoryDataFeed", + parse_logkey=True, + merge_by_sid=True, + enable_pv_merge=True, + ) d = paddle.distributed.fleet.DatasetBase() try: dataset._set_feed_type("MultiSlotInMemoryDataFeed") @@ -1270,7 +1317,7 @@ class TestDataset2(unittest.TestCase): dataset.global_shuffle() except: print("warning: catch expected error") - #dataset.get_pv_data_size() + # dataset.get_pv_data_size() dataset.get_memory_data_size() dataset.get_shuffle_data_size() temp_dir.cleanup() diff --git a/python/paddle/fluid/tests/unittests/test_dataset_consistency_inspection.py b/python/paddle/fluid/tests/unittests/test_dataset_consistency_inspection.py index 46ba155f017534afc8fc2bab17255ff884f5bc26..51463e7ff6287b9c4141f2d319540cf2c52502ef 100644 --- a/python/paddle/fluid/tests/unittests/test_dataset_consistency_inspection.py +++ b/python/paddle/fluid/tests/unittests/test_dataset_consistency_inspection.py @@ -23,46 +23,57 @@ import tempfile import unittest import paddle.fluid.incubate.data_generator as dg -#paddle.enable_static() +# paddle.enable_static() # fluid.disable_dygraph() fluid.disable_dygraph() url_schema_len = 5 query_schema = [ - 'Q_query_basic', 'Q_query_phrase', 'Q_quq', 'Q_timelevel', - 'Q_context_title_basic1', 'Q_context_title_basic2', - 'Q_context_title_basic3', 'Q_context_title_basic4', - 'Q_context_title_basic5', 'Q_context_title_phrase1', - 'Q_context_title_phrase2', 'Q_context_title_phrase3', - 'Q_context_title_phrase4', 'Q_context_title_phrase5', 'Q_context_site1', - 'Q_context_site2', 'Q_context_site3', 'Q_context_site4', 'Q_context_site5' + 'Q_query_basic', + 'Q_query_phrase', + 'Q_quq', + 'Q_timelevel', + 'Q_context_title_basic1', + 'Q_context_title_basic2', + 'Q_context_title_basic3', + 'Q_context_title_basic4', + 'Q_context_title_basic5', + 'Q_context_title_phrase1', + 'Q_context_title_phrase2', + 'Q_context_title_phrase3', + 'Q_context_title_phrase4', + 'Q_context_title_phrase5', + 'Q_context_site1', + 'Q_context_site2', + 'Q_context_site3', + 'Q_context_site4', + 'Q_context_site5', ] class CTRDataset(dg.MultiSlotDataGenerator): - def __init__(self, mode): self.test = mode def generate_sample(self, line): - def reader(): ins = line.strip().split(';') label_pos_num = int(ins[1].split(' ')[0]) label_neg_num = int(ins[1].split(' ')[1]) - #query fea parse + # query fea parse bias = 2 query_len = 0 sparse_query_feature = [] for index in range(len(query_schema)): pos = index + bias sparse_query_feature.append( - [int(x) for x in ins[pos].split(' ')]) + [int(x) for x in ins[pos].split(' ')] + ) if index == 0: query_len = len(ins[pos].split(' ')) query_len = 1.0 / (1 + pow(2.7182818, 3 - 1.0 * query_len)) - #positive url fea parse + # positive url fea parse bias = 2 + len(query_schema) pos_url_feas = [] pos_click_feas = [] @@ -73,14 +84,24 @@ class CTRDataset(dg.MultiSlotDataGenerator): for index in range(url_schema_len - 1): pos = bias + k * (url_schema_len) + index pos_url_fea.append([int(x) for x in ins[pos].split(' ')]) - #click info - if (ins[pos + 1] == ''): + # click info + if ins[pos + 1] == '': continue item = ins[pos + 1].split(' ') if len(item) != 17: continue - stat_fea = [[max(float(item[i]), 0.0)] for i in range(len(item)) \ - if not (i == 5 or i == 9 or i == 13 or i == 14 or i ==15 or i ==16)] + stat_fea = [ + [max(float(item[i]), 0.0)] + for i in range(len(item)) + if not ( + i == 5 + or i == 9 + or i == 13 + or i == 14 + or i == 15 + or i == 16 + ) + ] pos_url_feas.append(pos_url_fea) pos_click_feas.append(stat_fea) @@ -90,7 +111,7 @@ class CTRDataset(dg.MultiSlotDataGenerator): pos_context_fea = [[query_serach], [query_len]] pos_context_feas.append(pos_context_fea) - #negative url fea parse + # negative url fea parse bias = 2 + len(query_schema) + label_pos_num * (url_schema_len) neg_url_feas = [] neg_click_feas = [] @@ -101,15 +122,25 @@ class CTRDataset(dg.MultiSlotDataGenerator): for index in range(url_schema_len - 1): pos = bias + k * (url_schema_len) + index neg_url_fea.append([int(x) for x in ins[pos].split(' ')]) - if (ins[pos + 1] == ''): + if ins[pos + 1] == '': continue item = ins[pos + 1].split(' ') - #zdf_tmp + # zdf_tmp if len(item) != 17: continue - #print ins[pos + 1] - stat_fea = [[max(float(item[i]), 0.0)] for i in range(len(item)) \ - if not (i == 5 or i == 9 or i == 13 or i == 14 or i == 15 or i == 16)] + # print ins[pos + 1] + stat_fea = [ + [max(float(item[i]), 0.0)] + for i in range(len(item)) + if not ( + i == 5 + or i == 9 + or i == 13 + or i == 14 + or i == 15 + or i == 16 + ) + ] neg_click_feas.append(stat_fea) neg_url_feas.append(neg_url_fea) @@ -119,7 +150,7 @@ class CTRDataset(dg.MultiSlotDataGenerator): neg_context_fea = [[query_serach], [query_len]] neg_context_feas.append(neg_context_fea) - #make train data + # make train data if self.test == 1: for p in range(len(pos_url_feas)): # feature_name = ["click"] + query_schema + url_schema[:4] + click_info_schema[:11] + context_schema[:2] @@ -129,9 +160,17 @@ class CTRDataset(dg.MultiSlotDataGenerator): pos_url_fea = pos_url_feas[p] pos_click_fea = pos_click_feas[p] pos_context_fea = pos_context_feas[p] - yield zip(feature_name, [[1]] + sparse_query_feature + - pos_url_fea + pos_click_fea + pos_context_fea + - pos_url_fea + pos_click_fea + pos_context_fea) + yield zip( + feature_name, + [[1]] + + sparse_query_feature + + pos_url_fea + + pos_click_fea + + pos_context_fea + + pos_url_fea + + pos_click_fea + + pos_context_fea, + ) for n in range(len(neg_url_feas)): feature_name = ["click"] for i in range(1, 54): @@ -139,18 +178,26 @@ class CTRDataset(dg.MultiSlotDataGenerator): neg_url_fea = neg_url_feas[n] neg_click_fea = neg_click_feas[n] neg_context_fea = neg_context_feas[n] - yield zip(feature_name, [[0]] + sparse_query_feature + - neg_url_fea + neg_click_fea + neg_context_fea + - neg_url_fea + neg_click_fea + neg_context_fea) + yield zip( + feature_name, + [[0]] + + sparse_query_feature + + neg_url_fea + + neg_click_fea + + neg_context_fea + + neg_url_fea + + neg_click_fea + + neg_context_fea, + ) elif self.test == 0: for p in range(len(pos_url_feas)): - #feature_name = ["click"] + query_schema + url_schema[:4] + click_info_schema[:11] + context_schema[:2] + url_schema[4:] + click_info_schema[11:] + context_schema[2:] + # feature_name = ["click"] + query_schema + url_schema[:4] + click_info_schema[:11] + context_schema[:2] + url_schema[4:] + click_info_schema[11:] + context_schema[2:] feature_name = ["click"] for i in range(1, 54): feature_name.append(str(i)) - #print("#######") - #print(feature_name) - #print("#######") + # print("#######") + # print(feature_name) + # print("#######") pos_url_fea = pos_url_feas[p] pos_click_fea = pos_click_feas[p] pos_context_fea = pos_context_feas[p] @@ -160,21 +207,32 @@ class CTRDataset(dg.MultiSlotDataGenerator): neg_url_fea = neg_url_feas[n] neg_click_fea = neg_click_feas[n] neg_context_fea = neg_context_feas[n] - #print("q:", query_feas) - #print("pos:", pos_url_fea) - #print("neg:", neg_url_fea) + # print("q:", query_feas) + # print("pos:", pos_url_fea) + # print("neg:", neg_url_fea) # yield zip(feature_name[:3], sparse_query_feature[:3]) - yield list(zip(feature_name, [[1]] + sparse_query_feature + pos_url_fea + pos_click_fea + pos_context_fea + \ - neg_url_fea + neg_click_fea + neg_context_fea)) + yield list( + zip( + feature_name, + [[1]] + + sparse_query_feature + + pos_url_fea + + pos_click_fea + + pos_context_fea + + neg_url_fea + + neg_click_fea + + neg_context_fea, + ) + ) elif self.test == 2: for p in range(len(pos_url_feas)): - #feature_name = ["click"] + query_schema + url_schema[:4] + click_info_schema[:11] + context_schema[:2] + url_schema[4:] + click_info_schema[11:] + context_schema[2:] + # feature_name = ["click"] + query_schema + url_schema[:4] + click_info_schema[:11] + context_schema[:2] + url_schema[4:] + click_info_schema[11:] + context_schema[2:] feature_name = ["click"] for i in range(1, 54): feature_name.append(str(i)) - #print("#######") - #print(feature_name) - #print("#######") + # print("#######") + # print(feature_name) + # print("#######") pos_url_fea = pos_url_feas[p] pos_click_fea = pos_click_feas[p] pos_context_fea = pos_context_feas[p] @@ -184,21 +242,32 @@ class CTRDataset(dg.MultiSlotDataGenerator): neg_url_fea = neg_url_feas[n] neg_click_fea = neg_click_feas[n] neg_context_fea = neg_context_feas[n] - #print("q:", query_feas) - #print("pos:", pos_url_fea) - #print("neg:", neg_url_fea) + # print("q:", query_feas) + # print("pos:", pos_url_fea) + # print("neg:", neg_url_fea) # yield zip(feature_name[:3], sparse_query_feature[:3]) - yield list(zip(feature_name, [[1], [2]] + sparse_query_feature + pos_url_fea + pos_click_fea + pos_context_fea + \ - neg_url_fea + neg_click_fea + neg_context_fea)) + yield list( + zip( + feature_name, + [[1], [2]] + + sparse_query_feature + + pos_url_fea + + pos_click_fea + + pos_context_fea + + neg_url_fea + + neg_click_fea + + neg_context_fea, + ) + ) elif self.test == 3: for p in range(len(pos_url_feas)): - #feature_name = ["click"] + query_schema + url_schema[:4] + click_info_schema[:11] + context_schema[:2] + url_schema[4:] + click_info_schema[11:] + context_schema[2:] + # feature_name = ["click"] + query_schema + url_schema[:4] + click_info_schema[:11] + context_schema[:2] + url_schema[4:] + click_info_schema[11:] + context_schema[2:] feature_name = ["click"] for i in range(1, 54): feature_name.append(str(i)) - #print("#######") - #print(feature_name) - #print("#######") + # print("#######") + # print(feature_name) + # print("#######") pos_url_fea = pos_url_feas[p] pos_click_fea = pos_click_feas[p] pos_context_fea = pos_context_feas[p] @@ -208,21 +277,32 @@ class CTRDataset(dg.MultiSlotDataGenerator): neg_url_fea = neg_url_feas[n] neg_click_fea = neg_click_feas[n] neg_context_fea = neg_context_feas[n] - #print("q:", query_feas) - #print("pos:", pos_url_fea) - #print("neg:", neg_url_fea) + # print("q:", query_feas) + # print("pos:", pos_url_fea) + # print("neg:", neg_url_fea) # yield zip(feature_name[:3], sparse_query_feature[:3]) - yield list(zip(feature_name, [[1], [2.0]] + sparse_query_feature + pos_url_fea + pos_click_fea + pos_context_fea + \ - neg_url_fea + neg_click_fea + neg_context_fea)) + yield list( + zip( + feature_name, + [[1], [2.0]] + + sparse_query_feature + + pos_url_fea + + pos_click_fea + + pos_context_fea + + neg_url_fea + + neg_click_fea + + neg_context_fea, + ) + ) elif self.test == 4: for p in range(len(pos_url_feas)): - #feature_name = ["click"] + query_schema + url_schema[:4] + click_info_schema[:11] + context_schema[:2] + url_schema[4:] + click_info_schema[11:] + context_schema[2:] + # feature_name = ["click"] + query_schema + url_schema[:4] + click_info_schema[:11] + context_schema[:2] + url_schema[4:] + click_info_schema[11:] + context_schema[2:] feature_name = ["click"] for i in range(1, 54): feature_name.append(str(i)) - #print("#######") - #print(feature_name) - #print("#######") + # print("#######") + # print(feature_name) + # print("#######") pos_url_fea = pos_url_feas[p] pos_click_fea = pos_click_feas[p] pos_context_fea = pos_context_feas[p] @@ -232,21 +312,32 @@ class CTRDataset(dg.MultiSlotDataGenerator): neg_url_fea = neg_url_feas[n] neg_click_fea = neg_click_feas[n] neg_context_fea = neg_context_feas[n] - #print("q:", query_feas) - #print("pos:", pos_url_fea) - #print("neg:", neg_url_fea) + # print("q:", query_feas) + # print("pos:", pos_url_fea) + # print("neg:", neg_url_fea) # yield zip(feature_name[:3], sparse_query_feature[:3]) - yield list(zip(feature_name, [[], [2.0]] + sparse_query_feature + pos_url_fea + pos_click_fea + pos_context_fea + \ - neg_url_fea + neg_click_fea + neg_context_fea)) + yield list( + zip( + feature_name, + [[], [2.0]] + + sparse_query_feature + + pos_url_fea + + pos_click_fea + + pos_context_fea + + neg_url_fea + + neg_click_fea + + neg_context_fea, + ) + ) elif self.test == 5: for p in range(len(pos_url_feas)): - #feature_name = ["click"] + query_schema + url_schema[:4] + click_info_schema[:11] + context_schema[:2] + url_schema[4:] + click_info_schema[11:] + context_schema[2:] + # feature_name = ["click"] + query_schema + url_schema[:4] + click_info_schema[:11] + context_schema[:2] + url_schema[4:] + click_info_schema[11:] + context_schema[2:] feature_name = ["click"] for i in range(1, 54): feature_name.append(str(i)) - #print("#######") - #print(feature_name) - #print("#######") + # print("#######") + # print(feature_name) + # print("#######") pos_url_fea = pos_url_feas[p] pos_click_fea = pos_click_feas[p] pos_context_fea = pos_context_feas[p] @@ -256,18 +347,28 @@ class CTRDataset(dg.MultiSlotDataGenerator): neg_url_fea = neg_url_feas[n] neg_click_fea = neg_click_feas[n] neg_context_fea = neg_context_feas[n] - #print("q:", query_feas) - #print("pos:", pos_url_fea) - #print("neg:", neg_url_fea) + # print("q:", query_feas) + # print("pos:", pos_url_fea) + # print("neg:", neg_url_fea) # yield zip(feature_name[:3], sparse_query_feature[:3]) - yield list(zip(feature_name, sparse_query_feature + pos_url_fea + pos_click_fea + pos_context_fea + \ - neg_url_fea + neg_click_fea + neg_context_fea)) + yield list( + zip( + feature_name, + sparse_query_feature + + pos_url_fea + + pos_click_fea + + pos_context_fea + + neg_url_fea + + neg_click_fea + + neg_context_fea, + ) + ) return reader class TestDataset(unittest.TestCase): - """ TestCases for Dataset. """ + """TestCases for Dataset.""" def setUp(self): pass @@ -292,74 +393,80 @@ class TestDataset(unittest.TestCase): f.write(data) slot_data = [] - label = fluid.layers.data(name="click", - shape=[-1, 1], - dtype="int64", - lod_level=0, - append_batch_size=False) + label = fluid.layers.data( + name="click", + shape=[-1, 1], + dtype="int64", + lod_level=0, + append_batch_size=False, + ) slot_data.append(label) # sprase_query_feat_names len_sparse_query = 19 for feat_name in range(1, len_sparse_query + 1): slot_data.append( - fluid.layers.data(name=str(feat_name), - shape=[1], - dtype='int64', - lod_level=1)) + fluid.layers.data( + name=str(feat_name), shape=[1], dtype='int64', lod_level=1 + ) + ) # sparse_url_feat_names for feat_name in range(len_sparse_query + 1, len_sparse_query + 5): slot_data.append( - fluid.layers.data(name=str(feat_name), - shape=[1], - dtype='int64', - lod_level=1)) + fluid.layers.data( + name=str(feat_name), shape=[1], dtype='int64', lod_level=1 + ) + ) # dense_feat_names for feat_name in range(len_sparse_query + 5, len_sparse_query + 16): slot_data.append( - fluid.layers.data(name=str(feat_name), - shape=[1], - dtype='float32')) + fluid.layers.data( + name=str(feat_name), shape=[1], dtype='float32' + ) + ) # context_feat_namess for feat_name in range(len_sparse_query + 16, len_sparse_query + 18): slot_data.append( - fluid.layers.data(name=str(feat_name), - shape=[1], - dtype='float32')) + fluid.layers.data( + name=str(feat_name), shape=[1], dtype='float32' + ) + ) # neg sparse_url_feat_names for feat_name in range(len_sparse_query + 18, len_sparse_query + 22): slot_data.append( - fluid.layers.data(name=str(feat_name), - shape=[1], - dtype='int64', - lod_level=1)) + fluid.layers.data( + name=str(feat_name), shape=[1], dtype='int64', lod_level=1 + ) + ) # neg dense_feat_names for feat_name in range(len_sparse_query + 22, len_sparse_query + 33): slot_data.append( - fluid.layers.data(name=str(feat_name), - shape=[1], - dtype='float32')) + fluid.layers.data( + name=str(feat_name), shape=[1], dtype='float32' + ) + ) # neg context_feat_namess for feat_name in range(len_sparse_query + 33, len_sparse_query + 35): slot_data.append( - fluid.layers.data(name=str(feat_name), - shape=[1], - dtype='float32')) + fluid.layers.data( + name=str(feat_name), shape=[1], dtype='float32' + ) + ) dataset = paddle.distributed.InMemoryDataset() print("========================================") generator_class = CTRDataset(mode=0) try: - dataset._check_use_var_with_data_generator(slot_data, - generator_class, - dump_a_path) + dataset._check_use_var_with_data_generator( + slot_data, generator_class, dump_a_path + ) print("case 1: check passed!") except Exception as e: print("warning: catch expected error") @@ -370,9 +477,9 @@ class TestDataset(unittest.TestCase): print("========================================") generator_class = CTRDataset(mode=2) try: - dataset._check_use_var_with_data_generator(slot_data, - generator_class, - dump_a_path) + dataset._check_use_var_with_data_generator( + slot_data, generator_class, dump_a_path + ) except Exception as e: print("warning: case 2 catch expected error") print(e) @@ -382,9 +489,9 @@ class TestDataset(unittest.TestCase): print("========================================") generator_class = CTRDataset(mode=3) try: - dataset._check_use_var_with_data_generator(slot_data, - generator_class, - dump_a_path) + dataset._check_use_var_with_data_generator( + slot_data, generator_class, dump_a_path + ) except Exception as e: print("warning: case 3 catch expected error") print(e) @@ -394,9 +501,9 @@ class TestDataset(unittest.TestCase): print("========================================") generator_class = CTRDataset(mode=4) try: - dataset._check_use_var_with_data_generator(slot_data, - generator_class, - dump_a_path) + dataset._check_use_var_with_data_generator( + slot_data, generator_class, dump_a_path + ) except Exception as e: print("warning: case 4 catch expected error") print(e) @@ -406,9 +513,9 @@ class TestDataset(unittest.TestCase): print("========================================") generator_class = CTRDataset(mode=5) try: - dataset._check_use_var_with_data_generator(slot_data, - generator_class, - dump_a_path) + dataset._check_use_var_with_data_generator( + slot_data, generator_class, dump_a_path + ) except Exception as e: print("warning: case 5 catch expected error") print(e) diff --git a/python/paddle/fluid/tests/unittests/test_dataset_dataloader.py b/python/paddle/fluid/tests/unittests/test_dataset_dataloader.py index ec61ee3a838125039d56f205dd6a21e811f90eaf..5c7c97991d63a99f6052a55c713b33faca4062f1 100644 --- a/python/paddle/fluid/tests/unittests/test_dataset_dataloader.py +++ b/python/paddle/fluid/tests/unittests/test_dataset_dataloader.py @@ -44,9 +44,12 @@ def write_reader_data_to_file(filename, reader): with open(filename, 'w') as fid: for instance_list in reader(): for i, instance in enumerate(instance_list): - instance = np.reshape(instance, [ - instance.size, - ]) + instance = np.reshape( + instance, + [ + instance.size, + ], + ) fid.write(str(instance.size) + ' ') fid.write(' '.join(map(str, instance))) fid.write(' ') @@ -55,21 +58,20 @@ def write_reader_data_to_file(filename, reader): def fake_reader(batch_size=BATCH_SIZE, batch_num=BATCH_NUM): - def __reader__(): iteration = BATCH_SIZE * BATCH_NUM iteration = int(iteration + BATCH_SIZE / 2) for _ in range(iteration): image = np.random.random(size=IMAGE_SHAPE).astype('float32') - label = np.random.random_integers(size=LABEL_SHAPE, low=0, - high=9).astype('int64') + label = np.random.random_integers( + size=LABEL_SHAPE, low=0, high=9 + ).astype('int64') yield image, label return __reader__ class DatasetLoaderTestBase(unittest.TestCase): - def setUp(self): self.dataset_name = "QueueDataset" self.drop_last = False @@ -82,12 +84,12 @@ class DatasetLoaderTestBase(unittest.TestCase): main_prog = fluid.Program() startup_prog = fluid.Program() with fluid.program_guard(main_prog, startup_prog): - image = fluid.layers.data(name='image', - shape=IMAGE_SHAPE, - dtype='float32') - label = fluid.layers.data(name='label', - shape=LABEL_SHAPE, - dtype='int64') + image = fluid.layers.data( + name='image', shape=IMAGE_SHAPE, dtype='float32' + ) + label = fluid.layers.data( + name='label', shape=LABEL_SHAPE, dtype='int64' + ) simple_fc_net_with_inputs(image, label) @@ -114,19 +116,23 @@ class DatasetLoaderTestBase(unittest.TestCase): filelist = [] if file_num > 1 and randomize_batch_num: random_delta_batch_size = np.random.random_integers( - low=-BATCH_NUM / 2, high=BATCH_NUM / 2, size=[file_num]) + low=-BATCH_NUM / 2, high=BATCH_NUM / 2, size=[file_num] + ) random_delta_batch_size[-1] = -int( - np.sum(random_delta_batch_size[0:-1])) + np.sum(random_delta_batch_size[0:-1]) + ) else: random_delta_batch_size = np.zeros(shape=[file_num]) for i in range(file_num): - filename = os.path.join(self.temp_dir.name, - 'dataset_test_{}.txt'.format(i)) + filename = os.path.join( + self.temp_dir.name, 'dataset_test_{}.txt'.format(i) + ) filelist.append(filename) write_reader_data_to_file( filename, - fake_reader(batch_num=BATCH_NUM + random_delta_batch_size[i])) + fake_reader(batch_num=BATCH_NUM + random_delta_batch_size[i]), + ) dataset.set_filelist(filelist) dataset._set_use_var(feeds) @@ -134,9 +140,9 @@ class DatasetLoaderTestBase(unittest.TestCase): if self.dataset_name == 'InMemoryDataset': dataset.load_into_memory() - dataloader = fluid.io.DataLoader.from_dataset(dataset=dataset, - places=places, - drop_last=self.drop_last) + dataloader = fluid.io.DataLoader.from_dataset( + dataset=dataset, places=places, drop_last=self.drop_last + ) prog = fluid.CompiledProgram(main_prog).with_data_parallel() exe = fluid.Executor(place) @@ -159,22 +165,29 @@ class DatasetLoaderTestBase(unittest.TestCase): batch_size = BATCH_SIZE self.assertEquals(image.shape()[1:], IMAGE_SHAPE) - self.assertTrue(image._place()._equals(places[idx]), - msg=get_place_string(image._place()) + - ' vs ' + get_place_string(places[idx])) + self.assertTrue( + image._place()._equals(places[idx]), + msg=get_place_string(image._place()) + + ' vs ' + + get_place_string(places[idx]), + ) if self.drop_last: self.assertEquals(image.shape()[0], BATCH_SIZE) else: - self.assertTrue(image.shape()[0] == BATCH_SIZE - or image.shape()[0] == BATCH_SIZE / 2) + self.assertTrue( + image.shape()[0] == BATCH_SIZE + or image.shape()[0] == BATCH_SIZE / 2 + ) self.assertEquals(label.shape()[1:], LABEL_SHAPE) self.assertTrue(label._place()._equals(places[idx])) if self.drop_last: self.assertEquals(label.shape()[0], BATCH_SIZE) else: - self.assertTrue(label.shape()[0] == BATCH_SIZE - or label.shape()[0] == BATCH_SIZE / 2) + self.assertTrue( + label.shape()[0] == BATCH_SIZE + or label.shape()[0] == BATCH_SIZE / 2 + ) self.assertEquals(image.shape()[0], label.shape()[0]) @@ -203,7 +216,6 @@ class DatasetLoaderTestBase(unittest.TestCase): class QueueDatasetTestWithoutDropLast(DatasetLoaderTestBase): - def setUp(self): self.dataset_name = "QueueDataset" self.drop_last = True @@ -211,7 +223,6 @@ class QueueDatasetTestWithoutDropLast(DatasetLoaderTestBase): class InMemoryDatasetTestWithoutDropLast(DatasetLoaderTestBase): - def setUp(self): self.dataset_name = "InMemoryDataset" self.drop_last = False @@ -219,7 +230,6 @@ class InMemoryDatasetTestWithoutDropLast(DatasetLoaderTestBase): class InMemoryDatasetTestWithDropLast(DatasetLoaderTestBase): - def setUp(self): self.dataset_name = "InMemoryDataset" self.drop_last = True diff --git a/python/paddle/fluid/tests/unittests/test_dataset_download.py b/python/paddle/fluid/tests/unittests/test_dataset_download.py index 06f015edf95611e344590c6a4a32539674db2e18..f1fba215b931f092406a82a988e9e20265a978c9 100644 --- a/python/paddle/fluid/tests/unittests/test_dataset_download.py +++ b/python/paddle/fluid/tests/unittests/test_dataset_download.py @@ -18,7 +18,6 @@ from paddle.dataset.common import download, DATA_HOME, md5file class TestDataSetDownload(unittest.TestCase): - def setUp(self): flower_path = DATA_HOME + "/flowers/imagelabels.mat" diff --git a/python/paddle/fluid/tests/unittests/test_debugger.py b/python/paddle/fluid/tests/unittests/test_debugger.py index 7e2e40e4ed04e3cc264c4cff780977f18a636f73..23beb7f1a638b80c7d251ed7972dcc0f503874ab 100644 --- a/python/paddle/fluid/tests/unittests/test_debugger.py +++ b/python/paddle/fluid/tests/unittests/test_debugger.py @@ -19,42 +19,41 @@ from paddle.fluid.framework import Program class TestDebugger(unittest.TestCase): - def test_debug_str(self): p = Program() b = p.current_block() - #selected_rows - b.create_var(name='selected_rows', - dtype="float32", - shape=[5, 10], - type=core.VarDesc.VarType.SELECTED_ROWS) + # selected_rows + b.create_var( + name='selected_rows', + dtype="float32", + shape=[5, 10], + type=core.VarDesc.VarType.SELECTED_ROWS, + ) - #tensor array - b.create_var(name='tensor_array', - shape=[5, 10], - type=core.VarDesc.VarType.LOD_TENSOR_ARRAY) + # tensor array + b.create_var( + name='tensor_array', + shape=[5, 10], + type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, + ) - #operator - mul_x = b.create_parameter(dtype="float32", - shape=[5, 10], - lod_level=0, - name="mul.x") - mul_y = b.create_var(dtype="float32", - shape=[10, 8], - lod_level=0, - name="mul.y") - mul_out = b.create_var(dtype="float32", - shape=[5, 8], - lod_level=0, - name="mul.out") - b.append_op(type="mul", - inputs={ - "X": mul_x, - "Y": mul_y - }, - outputs={"Out": mul_out}, - attrs={"x_num_col_dims": 1}) + # operator + mul_x = b.create_parameter( + dtype="float32", shape=[5, 10], lod_level=0, name="mul.x" + ) + mul_y = b.create_var( + dtype="float32", shape=[10, 8], lod_level=0, name="mul.y" + ) + mul_out = b.create_var( + dtype="float32", shape=[5, 8], lod_level=0, name="mul.out" + ) + b.append_op( + type="mul", + inputs={"X": mul_x, "Y": mul_y}, + outputs={"Out": mul_out}, + attrs={"x_num_col_dims": 1}, + ) print(debugger.pprint_program_codes(p)) diff --git a/python/paddle/fluid/tests/unittests/test_decayed_adagrad_op.py b/python/paddle/fluid/tests/unittests/test_decayed_adagrad_op.py index 5025186349bddf60b1727f313c5bd81b14f8d548..dae5fd7b7b787460030127d6db976837c7feb0de 100644 --- a/python/paddle/fluid/tests/unittests/test_decayed_adagrad_op.py +++ b/python/paddle/fluid/tests/unittests/test_decayed_adagrad_op.py @@ -18,8 +18,7 @@ from op_test import OpTest class TestDecayedAdagradOp1(OpTest): - ''' Test DecayedAdagrad operator with explicit attributes - ''' + '''Test DecayedAdagrad operator with explicit attributes''' def setUp(self): self.op_type = "decayed_adagrad" @@ -35,7 +34,7 @@ class TestDecayedAdagradOp1(OpTest): 'Param': param, 'Grad': grad, 'Moment': moment, - 'LearningRate': np.array([lr]).astype("float32") + 'LearningRate': np.array([lr]).astype("float32"), } self.attrs = {'decay': decay, 'epsilon': epsilon} @@ -50,8 +49,7 @@ class TestDecayedAdagradOp1(OpTest): class TestDecayedAdagradOp2(OpTest): - ''' Test DecayedAdagrad operator with default attributes - ''' + '''Test DecayedAdagrad operator with default attributes''' def setUp(self): self.op_type = "decayed_adagrad" @@ -67,7 +65,7 @@ class TestDecayedAdagradOp2(OpTest): 'Param': param, 'Grad': grad, 'Moment': moment, - 'LearningRate': np.array([lr]).astype("float32") + 'LearningRate': np.array([lr]).astype("float32"), } self.attrs = {'decay': decay, 'epsilon': epsilon} @@ -83,5 +81,6 @@ class TestDecayedAdagradOp2(OpTest): if __name__ == "__main__": import paddle + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_decoupled_py_reader.py b/python/paddle/fluid/tests/unittests/test_decoupled_py_reader.py index 9b5ed11d9b9bfde9b9430a1a4a89e58466448ab7..d7a5d3714317cdf94711e914e68244e12c8aa320 100644 --- a/python/paddle/fluid/tests/unittests/test_decoupled_py_reader.py +++ b/python/paddle/fluid/tests/unittests/test_decoupled_py_reader.py @@ -40,14 +40,16 @@ def simple_fc_net(places, use_legacy_py_reader, use_double_buffer): with fluid.unique_name.guard(): with fluid.program_guard(main_prog, startup_prog): - image = fluid.layers.data(name='image', - shape=[784], - dtype='float32') + image = fluid.layers.data( + name='image', shape=[784], dtype='float32' + ) label = fluid.layers.data(name='label', shape=[1], dtype='int64') - py_reader = fluid.io.PyReader(feed_list=[image, label], - capacity=4, - iterable=not use_legacy_py_reader, - use_double_buffer=use_double_buffer) + py_reader = fluid.io.PyReader( + feed_list=[image, label], + capacity=4, + iterable=not use_legacy_py_reader, + use_double_buffer=use_double_buffer, + ) hidden = image for hidden_size in [10, 20, 30]: hidden = fluid.layers.fc( @@ -55,13 +57,16 @@ def simple_fc_net(places, use_legacy_py_reader, use_double_buffer): size=hidden_size, act='tanh', bias_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(value=1.0))) + initializer=fluid.initializer.Constant(value=1.0) + ), + ) - predict_label = fluid.layers.fc(hidden, - size=CLASS_NUM, - act='softmax') + predict_label = fluid.layers.fc( + hidden, size=CLASS_NUM, act='softmax' + ) loss = paddle.mean( - fluid.layers.cross_entropy(input=predict_label, label=label)) + fluid.layers.cross_entropy(input=predict_label, label=label) + ) optimizer = fluid.optimizer.Adam() optimizer.minimize(loss) @@ -69,28 +74,35 @@ def simple_fc_net(places, use_legacy_py_reader, use_double_buffer): class TestBase(unittest.TestCase): - - def run_main(self, use_legacy_py_reader, with_data_parallel, places, - use_double_buffer): + def run_main( + self, + use_legacy_py_reader, + with_data_parallel, + places, + use_double_buffer, + ): scope = fluid.Scope() with fluid.scope_guard(scope): startup_prog, main_prog, py_reader, loss = simple_fc_net( - places, use_legacy_py_reader, use_double_buffer) + places, use_legacy_py_reader, use_double_buffer + ) reader = paddle.batch(random_reader, batch_size=BATCH_SIZE) ps = places if use_double_buffer else fluid.cpu_places(len(places)) py_reader.decorate_sample_list_generator( - reader, places=ps if py_reader.iterable else None) + reader, places=ps if py_reader.iterable else None + ) exe = fluid.Executor(place=places[0]) exe.run(startup_prog) prog = fluid.CompiledProgram(main_prog) if with_data_parallel: - prog = prog.with_data_parallel(loss_name=loss.name, - places=places) + prog = prog.with_data_parallel( + loss_name=loss.name, places=places + ) step = 0 step_list = [] @@ -102,9 +114,11 @@ class TestBase(unittest.TestCase): py_reader.start() while True: try: - L, = exe.run(program=prog, - fetch_list=[loss], - use_program_cache=True) + (L,) = exe.run( + program=prog, + fetch_list=[loss], + use_program_cache=True, + ) loss_list.append(np.mean(L)) step += 1 except fluid.core.EOFException: @@ -123,10 +137,12 @@ class TestBase(unittest.TestCase): assert label.shape() == [BATCH_SIZE, 1] assert image._place()._equals(ps[i]) assert label._place()._equals(ps[i]) - L, = exe.run(program=prog, - feed=d, - fetch_list=[loss], - use_program_cache=True) + (L,) = exe.run( + program=prog, + feed=d, + fetch_list=[loss], + use_program_cache=True, + ) loss_list.append(np.mean(L)) step += 1 step_list.append(step) @@ -134,7 +150,7 @@ class TestBase(unittest.TestCase): ret = { "time": end_t - start_t, "step": step_list, - "loss": np.array(loss_list) + "loss": np.array(loss_list), } return ret @@ -163,11 +179,13 @@ class TestBase(unittest.TestCase): use_legacy_py_reader=use_legacy_py_reader, with_data_parallel=with_data_parallel, places=p, - use_double_buffer=use_double_buffer) + use_double_buffer=use_double_buffer, + ) results.append(ret) if not use_double_buffer: diff = np.max( - np.abs(results[0]['loss'] - results[1]['loss'])) + np.abs(results[0]['loss'] - results[1]['loss']) + ) self.assertLess(diff, 1e-3) diff --git a/python/paddle/fluid/tests/unittests/test_decoupled_py_reader_data_check.py b/python/paddle/fluid/tests/unittests/test_decoupled_py_reader_data_check.py index 0d271981276834740fd2c96e344766c9702de5ab..107792a892de341d07aead7c6b88b7cc236df0fc 100644 --- a/python/paddle/fluid/tests/unittests/test_decoupled_py_reader_data_check.py +++ b/python/paddle/fluid/tests/unittests/test_decoupled_py_reader_data_check.py @@ -19,7 +19,6 @@ import unittest class TestClass(unittest.TestCase): - def setUp(self): self.use_double_buffer = True self.use_py_reader = True @@ -34,7 +33,8 @@ class TestClass(unittest.TestCase): for _ in range(batch_size * batch_num): img = np.random.random(size=img_shape).astype('float32') label = np.random.random_integers( - low=0, high=9, size=label_shape).astype('int64') + low=0, high=9, size=label_shape + ).astype('int64') yield img, label reader = paddle.reader.cache(fake_reader) @@ -48,18 +48,20 @@ class TestClass(unittest.TestCase): main_prog = fluid.Program() startup_prog = fluid.Program() with fluid.program_guard(main_prog, startup_prog): - img = fluid.layers.data(shape=img_shape, - dtype='float32', - name='image') - label = fluid.layers.data(shape=label_shape, - dtype='int64', - name='label') + img = fluid.layers.data( + shape=img_shape, dtype='float32', name='image' + ) + label = fluid.layers.data( + shape=label_shape, dtype='int64', name='label' + ) feeder = fluid.DataFeeder(feed_list=[img, label], place=p) use_double_buffer = self.use_double_buffer - if p._type() != fluid.CPUPlace()._type( - ) and not use_double_buffer: + if ( + p._type() != fluid.CPUPlace()._type() + and not use_double_buffer + ): use_double_buffer = True if self.use_py_reader: @@ -67,15 +69,17 @@ class TestClass(unittest.TestCase): feed_list=[img, label], capacity=4, iterable=True, - use_double_buffer=use_double_buffer) - py_reader.decorate_sample_list_generator(batch_reader, - places=p) + use_double_buffer=use_double_buffer, + ) + py_reader.decorate_sample_list_generator( + batch_reader, places=p + ) else: py_reader = fluid.io.DataLoader.from_generator( feed_list=[img, label], capacity=4, iterable=True, - use_double_buffer=use_double_buffer + use_double_buffer=use_double_buffer, ).set_sample_list_generator(batch_reader, places=p) for break_beforehand in [True, False]: @@ -97,7 +101,8 @@ class TestClass(unittest.TestCase): batch_id += 1 if break_beforehand and batch_id >= int( - batch_num / 2): + batch_num / 2 + ): break if break_beforehand: @@ -107,21 +112,18 @@ class TestClass(unittest.TestCase): class TestClass2(TestClass): - def setUp(self): self.use_double_buffer = False self.use_py_reader = True class TestClass3(TestClass): - def setUp(self): self.use_double_buffer = True self.use_py_reader = False class TestClass4(TestClass): - def setUp(self): self.use_double_buffer = False self.use_py_reader = False diff --git a/python/paddle/fluid/tests/unittests/test_default_dtype.py b/python/paddle/fluid/tests/unittests/test_default_dtype.py index 70c23d62073fcf899d1583b08611007c2d81e19b..ca95c820a718e5cdb413b9d87f819e009ba9cc1a 100644 --- a/python/paddle/fluid/tests/unittests/test_default_dtype.py +++ b/python/paddle/fluid/tests/unittests/test_default_dtype.py @@ -18,7 +18,6 @@ from paddle.framework import set_default_dtype, get_default_dtype class TestDefaultType(unittest.TestCase): - def check_default(self): self.assertEqual("float32", get_default_dtype()) @@ -45,7 +44,6 @@ class TestDefaultType(unittest.TestCase): class TestRaiseError(unittest.TestCase): - def test_error(self): self.assertRaises(TypeError, set_default_dtype, "int32") self.assertRaises(TypeError, set_default_dtype, np.int32) diff --git a/python/paddle/fluid/tests/unittests/test_default_scope_funcs.py b/python/paddle/fluid/tests/unittests/test_default_scope_funcs.py index beae3124ced823ecfd98a813e6af639168057203..88686585853f5f2b069a706dc9941f22f44c4a98 100644 --- a/python/paddle/fluid/tests/unittests/test_default_scope_funcs.py +++ b/python/paddle/fluid/tests/unittests/test_default_scope_funcs.py @@ -12,12 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle.fluid.default_scope_funcs import enter_local_scope, find_var, get_cur_scope, leave_local_scope, scoped_function, var +from paddle.fluid.default_scope_funcs import ( + enter_local_scope, + find_var, + get_cur_scope, + leave_local_scope, + scoped_function, + var, +) import unittest class TestDefaultScopeFuncs(unittest.TestCase): - def test_cur_scope(self): self.assertIsNotNone(get_cur_scope()) @@ -33,7 +39,6 @@ class TestDefaultScopeFuncs(unittest.TestCase): leave_local_scope() def test_var_get_int(self): - def __new_scope__(): i = var("var_i") self.assertFalse(i.is_int()) diff --git a/python/paddle/fluid/tests/unittests/test_deform_conv2d.py b/python/paddle/fluid/tests/unittests/test_deform_conv2d.py index 53eea9ac4ab27bb9acf3170c8c273d8267885481..80bf06fa6db39ab6c5ef822b7fc297b681a47a9d 100644 --- a/python/paddle/fluid/tests/unittests/test_deform_conv2d.py +++ b/python/paddle/fluid/tests/unittests/test_deform_conv2d.py @@ -40,45 +40,70 @@ class TestDeformConv2D(TestCase): np.random.seed(1) paddle.seed(1) if isinstance(self.kernel_size, int): - filter_shape = (self.kernel_size, ) * 2 + filter_shape = (self.kernel_size,) * 2 else: filter_shape = tuple(self.kernel_size) self.filter_shape = filter_shape self.weight = np.random.uniform( - -1, 1, (self.out_channels, self.in_channels // self.groups) + - filter_shape).astype(self.dtype) + -1, + 1, + (self.out_channels, self.in_channels // self.groups) + filter_shape, + ).astype(self.dtype) if not self.no_bias: - self.bias = np.random.uniform(-1, 1, (self.out_channels, )).astype( - self.dtype) + self.bias = np.random.uniform(-1, 1, (self.out_channels,)).astype( + self.dtype + ) - def out_size(in_size, pad_size, dilation_size, kernel_size, - stride_size): - return (in_size + 2 * pad_size - - (dilation_size * (kernel_size - 1) + 1)) / stride_size + 1 + def out_size( + in_size, pad_size, dilation_size, kernel_size, stride_size + ): + return ( + in_size + 2 * pad_size - (dilation_size * (kernel_size - 1) + 1) + ) / stride_size + 1 out_h = int( - out_size(self.spatial_shape[0], self.padding[0], self.dilation[0], - self.kernel_size[0], self.stride[0])) + out_size( + self.spatial_shape[0], + self.padding[0], + self.dilation[0], + self.kernel_size[0], + self.stride[0], + ) + ) out_w = int( - out_size(self.spatial_shape[1], self.padding[1], self.dilation[1], - self.kernel_size[1], self.stride[1])) + out_size( + self.spatial_shape[1], + self.padding[1], + self.dilation[1], + self.kernel_size[1], + self.stride[1], + ) + ) out_shape = (out_h, out_w) - self.input_shape = (self.batch_size, - self.in_channels) + self.spatial_shape + self.input_shape = ( + self.batch_size, + self.in_channels, + ) + self.spatial_shape - self.offset_shape = (self.batch_size, self.deformable_groups * 2 * - filter_shape[0] * filter_shape[1]) + out_shape + self.offset_shape = ( + self.batch_size, + self.deformable_groups * 2 * filter_shape[0] * filter_shape[1], + ) + out_shape - self.mask_shape = (self.batch_size, self.deformable_groups * - filter_shape[0] * filter_shape[1]) + out_shape + self.mask_shape = ( + self.batch_size, + self.deformable_groups * filter_shape[0] * filter_shape[1], + ) + out_shape - self.input = np.random.uniform(-1, 1, - self.input_shape).astype(self.dtype) + self.input = np.random.uniform(-1, 1, self.input_shape).astype( + self.dtype + ) - self.offset = np.random.uniform(-1, 1, - self.offset_shape).astype(self.dtype) + self.offset = np.random.uniform(-1, 1, self.offset_shape).astype( + self.dtype + ) self.mask = np.random.uniform(-1, 1, self.mask_shape).astype(self.dtype) @@ -87,16 +112,34 @@ class TestDeformConv2D(TestCase): start = paddle.static.Program() paddle.enable_static() with paddle.static.program_guard(main, start): - x = paddle.static.data("input", (-1, self.in_channels, -1, -1), - dtype=self.dtype) + x = paddle.static.data( + "input", (-1, self.in_channels, -1, -1), dtype=self.dtype + ) offset = paddle.static.data( - "offset", (-1, self.deformable_groups * 2 * - self.filter_shape[0] * self.filter_shape[1], -1, -1), - dtype=self.dtype) + "offset", + ( + -1, + self.deformable_groups + * 2 + * self.filter_shape[0] + * self.filter_shape[1], + -1, + -1, + ), + dtype=self.dtype, + ) mask = paddle.static.data( - "mask", (-1, self.deformable_groups * self.filter_shape[0] * - self.filter_shape[1], -1, -1), - dtype=self.dtype) + "mask", + ( + -1, + self.deformable_groups + * self.filter_shape[0] + * self.filter_shape[1], + -1, + -1, + ), + dtype=self.dtype, + ) y_v1 = paddle.fluid.layers.deformable_conv( input=x, @@ -112,7 +155,8 @@ class TestDeformConv2D(TestCase): im2col_step=1, param_attr=I.Assign(self.weight), bias_attr=False if self.no_bias else I.Assign(self.bias), - modulated=False) + modulated=False, + ) y_v2 = paddle.fluid.layers.deformable_conv( input=x, @@ -127,17 +171,20 @@ class TestDeformConv2D(TestCase): deformable_groups=self.deformable_groups, im2col_step=1, param_attr=I.Assign(self.weight), - bias_attr=False if self.no_bias else I.Assign(self.bias)) + bias_attr=False if self.no_bias else I.Assign(self.bias), + ) exe = paddle.static.Executor(self.place) exe.run(start) - out_v1, out_v2 = exe.run(main, - feed={ - "input": self.input, - "offset": self.offset, - "mask": self.mask - }, - fetch_list=[y_v1, y_v2]) + out_v1, out_v2 = exe.run( + main, + feed={ + "input": self.input, + "offset": self.offset, + "mask": self.mask, + }, + fetch_list=[y_v1, y_v2], + ) return out_v1, out_v2 def dygraph_case_dcn(self): @@ -158,7 +205,8 @@ class TestDeformConv2D(TestCase): deformable_groups=self.deformable_groups, groups=self.groups, weight_attr=I.Assign(self.weight), - bias_attr=False if self.no_bias else I.Assign(self.bias)) + bias_attr=False if self.no_bias else I.Assign(self.bias), + ) y_v1 = deform_conv2d(x, offset) y_v2 = deform_conv2d(x, offset, mask) @@ -208,45 +256,70 @@ class TestDeformConv2DFunctional(TestCase): np.random.seed(1) paddle.seed(1) if isinstance(self.kernel_size, int): - filter_shape = (self.kernel_size, ) * 2 + filter_shape = (self.kernel_size,) * 2 else: filter_shape = tuple(self.kernel_size) self.filter_shape = filter_shape self.weight = np.random.uniform( - -1, 1, (self.out_channels, self.in_channels // self.groups) + - filter_shape).astype(self.dtype) + -1, + 1, + (self.out_channels, self.in_channels // self.groups) + filter_shape, + ).astype(self.dtype) if not self.no_bias: - self.bias = np.random.uniform(-1, 1, (self.out_channels, )).astype( - self.dtype) + self.bias = np.random.uniform(-1, 1, (self.out_channels,)).astype( + self.dtype + ) - def out_size(in_size, pad_size, dilation_size, kernel_size, - stride_size): - return (in_size + 2 * pad_size - - (dilation_size * (kernel_size - 1) + 1)) / stride_size + 1 + def out_size( + in_size, pad_size, dilation_size, kernel_size, stride_size + ): + return ( + in_size + 2 * pad_size - (dilation_size * (kernel_size - 1) + 1) + ) / stride_size + 1 out_h = int( - out_size(self.spatial_shape[0], self.padding[0], self.dilation[0], - self.kernel_size[0], self.stride[0])) + out_size( + self.spatial_shape[0], + self.padding[0], + self.dilation[0], + self.kernel_size[0], + self.stride[0], + ) + ) out_w = int( - out_size(self.spatial_shape[1], self.padding[1], self.dilation[1], - self.kernel_size[1], self.stride[1])) + out_size( + self.spatial_shape[1], + self.padding[1], + self.dilation[1], + self.kernel_size[1], + self.stride[1], + ) + ) out_shape = (out_h, out_w) - self.input_shape = (self.batch_size, - self.in_channels) + self.spatial_shape + self.input_shape = ( + self.batch_size, + self.in_channels, + ) + self.spatial_shape - self.offset_shape = (self.batch_size, self.deformable_groups * 2 * - filter_shape[0] * filter_shape[1]) + out_shape + self.offset_shape = ( + self.batch_size, + self.deformable_groups * 2 * filter_shape[0] * filter_shape[1], + ) + out_shape - self.mask_shape = (self.batch_size, self.deformable_groups * - filter_shape[0] * filter_shape[1]) + out_shape + self.mask_shape = ( + self.batch_size, + self.deformable_groups * filter_shape[0] * filter_shape[1], + ) + out_shape - self.input = np.random.uniform(-1, 1, - self.input_shape).astype(self.dtype) + self.input = np.random.uniform(-1, 1, self.input_shape).astype( + self.dtype + ) - self.offset = np.random.uniform(-1, 1, - self.offset_shape).astype(self.dtype) + self.offset = np.random.uniform(-1, 1, self.offset_shape).astype( + self.dtype + ) self.mask = np.random.uniform(-1, 1, self.mask_shape).astype(self.dtype) @@ -255,16 +328,34 @@ class TestDeformConv2DFunctional(TestCase): start = paddle.static.Program() paddle.enable_static() with paddle.static.program_guard(main, start): - x = paddle.static.data("input", (-1, self.in_channels, -1, -1), - dtype=self.dtype) + x = paddle.static.data( + "input", (-1, self.in_channels, -1, -1), dtype=self.dtype + ) offset = paddle.static.data( - "offset", (-1, self.deformable_groups * 2 * - self.filter_shape[0] * self.filter_shape[1], -1, -1), - dtype=self.dtype) + "offset", + ( + -1, + self.deformable_groups + * 2 + * self.filter_shape[0] + * self.filter_shape[1], + -1, + -1, + ), + dtype=self.dtype, + ) mask = paddle.static.data( - "mask", (-1, self.deformable_groups * self.filter_shape[0] * - self.filter_shape[1], -1, -1), - dtype=self.dtype) + "mask", + ( + -1, + self.deformable_groups + * self.filter_shape[0] + * self.filter_shape[1], + -1, + -1, + ), + dtype=self.dtype, + ) y_v1 = paddle.fluid.layers.deformable_conv( input=x, @@ -280,7 +371,8 @@ class TestDeformConv2DFunctional(TestCase): im2col_step=1, param_attr=I.Assign(self.weight), bias_attr=False if self.no_bias else I.Assign(self.bias), - modulated=False) + modulated=False, + ) y_v2 = paddle.fluid.layers.deformable_conv( input=x, @@ -295,17 +387,20 @@ class TestDeformConv2DFunctional(TestCase): deformable_groups=self.deformable_groups, im2col_step=1, param_attr=I.Assign(self.weight), - bias_attr=False if self.no_bias else I.Assign(self.bias)) + bias_attr=False if self.no_bias else I.Assign(self.bias), + ) exe = paddle.static.Executor(self.place) exe.run(start) - out_v1, out_v2 = exe.run(main, - feed={ - "input": self.input, - "offset": self.offset, - "mask": self.mask - }, - fetch_list=[y_v1, y_v2]) + out_v1, out_v2 = exe.run( + main, + feed={ + "input": self.input, + "offset": self.offset, + "mask": self.mask, + }, + fetch_list=[y_v1, y_v2], + ) return out_v1, out_v2 def dygraph_case_dcn(self): @@ -351,20 +446,38 @@ class TestDeformConv2DFunctional(TestCase): start = paddle.static.Program() paddle.enable_static() with paddle.static.program_guard(main, start): - x = paddle.static.data("input", (-1, self.in_channels, -1, -1), - dtype=self.dtype) + x = paddle.static.data( + "input", (-1, self.in_channels, -1, -1), dtype=self.dtype + ) offset = paddle.static.data( - "offset", (-1, self.deformable_groups * 2 * - self.filter_shape[0] * self.filter_shape[1], -1, -1), - dtype=self.dtype) + "offset", + ( + -1, + self.deformable_groups + * 2 + * self.filter_shape[0] + * self.filter_shape[1], + -1, + -1, + ), + dtype=self.dtype, + ) mask = paddle.static.data( - "mask", (-1, self.deformable_groups * self.filter_shape[0] * - self.filter_shape[1], -1, -1), - dtype=self.dtype) + "mask", + ( + -1, + self.deformable_groups + * self.filter_shape[0] + * self.filter_shape[1], + -1, + -1, + ), + dtype=self.dtype, + ) - weight = paddle.static.data("weight", - list(self.weight.shape), - dtype=self.dtype) + weight = paddle.static.data( + "weight", list(self.weight.shape), dtype=self.dtype + ) if not self.no_bias: bias = paddle.static.data("bias", [-1], dtype=self.dtype) @@ -400,7 +513,7 @@ class TestDeformConv2DFunctional(TestCase): "input": self.input, "offset": self.offset, "mask": self.mask, - "weight": self.weight + "weight": self.weight, } if not self.no_bias: feed_dict["bias"] = self.bias @@ -412,8 +525,10 @@ class TestDeformConv2DFunctional(TestCase): self.prepare() static_dcn_v1, static_dcn_v2 = self.static_graph_case_dcn() dy_dcn_v1, dy_dcn_v2 = self.dygraph_case_dcn() - new_static_dcn_v1, new_static_dcn_v2 = self.new_api_static_graph_case_dcn( - ) + ( + new_static_dcn_v1, + new_static_dcn_v2, + ) = self.new_api_static_graph_case_dcn() np.testing.assert_array_almost_equal(static_dcn_v1, dy_dcn_v1) np.testing.assert_array_almost_equal(static_dcn_v2, dy_dcn_v2) np.testing.assert_array_almost_equal(static_dcn_v1, new_static_dcn_v1) @@ -434,7 +549,6 @@ class TestDeformConv2DFunctional(TestCase): # testcases for DeformConv2D class TestDeformConv2DWithPadding(TestDeformConv2D): - def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -448,7 +562,6 @@ class TestDeformConv2DWithPadding(TestDeformConv2D): class TestDeformConv2DWithBias(TestDeformConv2D): - def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -462,7 +575,6 @@ class TestDeformConv2DWithBias(TestDeformConv2D): class TestDeformConv2DWithAsynPadding(TestDeformConv2D): - def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -476,7 +588,6 @@ class TestDeformConv2DWithAsynPadding(TestDeformConv2D): class TestDeformConv2DWithDilation(TestDeformConv2D): - def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -490,7 +601,6 @@ class TestDeformConv2DWithDilation(TestDeformConv2D): class TestDeformConv2DWithStride(TestDeformConv2D): - def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -504,7 +614,6 @@ class TestDeformConv2DWithStride(TestDeformConv2D): class TestDeformConv2DWithDeformable_Groups(TestDeformConv2D): - def setUp(self): self.in_channels = 5 self.out_channels = 5 @@ -518,7 +627,6 @@ class TestDeformConv2DWithDeformable_Groups(TestDeformConv2D): class TestDeformConv2DWithGroups(TestDeformConv2D): - def setUp(self): self.in_channels = 5 self.out_channels = 5 @@ -533,7 +641,6 @@ class TestDeformConv2DWithGroups(TestDeformConv2D): # testcases for deform_conv2d class TestDeformConv2DFunctionalWithPadding(TestDeformConv2DFunctional): - def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -547,7 +654,6 @@ class TestDeformConv2DFunctionalWithPadding(TestDeformConv2DFunctional): class TestDeformConv2DFunctionalWithBias(TestDeformConv2DFunctional): - def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -561,7 +667,6 @@ class TestDeformConv2DFunctionalWithBias(TestDeformConv2DFunctional): class TestDeformConv2DFunctionalWithAsynPadding(TestDeformConv2DFunctional): - def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -575,7 +680,6 @@ class TestDeformConv2DFunctionalWithAsynPadding(TestDeformConv2DFunctional): class TestDeformConv2DFunctionalWithDilation(TestDeformConv2DFunctional): - def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -589,7 +693,6 @@ class TestDeformConv2DFunctionalWithDilation(TestDeformConv2DFunctional): class TestDeformConv2DFunctionalWithStride(TestDeformConv2DFunctional): - def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -602,9 +705,9 @@ class TestDeformConv2DFunctionalWithStride(TestDeformConv2DFunctional): self.no_bias = False -class TestDeformConv2DFunctionalWithDeformable_Groups(TestDeformConv2DFunctional - ): - +class TestDeformConv2DFunctionalWithDeformable_Groups( + TestDeformConv2DFunctional +): def setUp(self): self.in_channels = 5 self.out_channels = 5 @@ -618,7 +721,6 @@ class TestDeformConv2DFunctionalWithDeformable_Groups(TestDeformConv2DFunctional class TestDeformConv2DFunctionalWithGroups(TestDeformConv2DFunctional): - def setUp(self): self.in_channels = 5 self.out_channels = 5 diff --git a/python/paddle/fluid/tests/unittests/test_deformable_conv_op.py b/python/paddle/fluid/tests/unittests/test_deformable_conv_op.py index 6c0d7360e58db68e865f3362e4042bf0a642c96b..f89ef98e95056b7cfcd25e38f9f7cf80511d832a 100644 --- a/python/paddle/fluid/tests/unittests/test_deformable_conv_op.py +++ b/python/paddle/fluid/tests/unittests/test_deformable_conv_op.py @@ -61,8 +61,11 @@ def dconv_im2col_gemm(input, offset, mask, filter, group, conv_param): assert f_c * group == in_c assert np.mod(out_c, group) == 0 - stride, pad, dilation = conv_param['stride'], conv_param['pad'],\ - conv_param['dilation'] + stride, pad, dilation = ( + conv_param['stride'], + conv_param['pad'], + conv_param['dilation'], + ) out_h = 1 + (in_h + 2 * pad[0] - (dilation[0] * (f_h - 1) + 1)) // stride[0] out_w = 1 + (in_w + 2 * pad[1] - (dilation[1] * (f_w - 1) + 1)) // stride[1] assert out_h == in_h @@ -75,31 +78,47 @@ def dconv_im2col_gemm(input, offset, mask, filter, group, conv_param): for w in range(out_w): for kh in range(f_h): for kw in range(f_w): - offset_h_table = \ - offset[n, ::2, h, w].reshape(f_h, f_w) - offset_w_table = \ - offset[n, 1::2, h, w].reshape(f_h, f_w) - mask_table = \ - mask[n, :, h, w].reshape(f_h, f_w) + offset_h_table = offset[n, ::2, h, w].reshape( + f_h, f_w + ) + offset_w_table = offset[n, 1::2, h, w].reshape( + f_h, f_w + ) + mask_table = mask[n, :, h, w].reshape(f_h, f_w) offset_h = offset_h_table[kh, kw] offset_w = offset_w_table[kh, kw] val = 0 - im_h = h * stride[0] + kh * dilation[0] \ - + offset_h - pad[0] - im_w = w * stride[0] + kw * dilation[0] \ - + offset_w - pad[1] - if im_h > -1 and im_w > -1 and \ - im_h < in_h and im_w < in_h: - val = dmc_bilinear(input[n, c], in_h, in_w, - im_h, im_w) + im_h = ( + h * stride[0] + + kh * dilation[0] + + offset_h + - pad[0] + ) + im_w = ( + w * stride[0] + + kw * dilation[0] + + offset_w + - pad[1] + ) + if ( + im_h > -1 + and im_w > -1 + and im_h < in_h + and im_w < in_h + ): + val = dmc_bilinear( + input[n, c], in_h, in_w, im_h, im_w + ) val_out = val * mask_table[kh, kw] - col_buffer[n, c * f_h * f_w + kh * f_w + kw, - h * in_w + w] = val_out + col_buffer[ + n, c * f_h * f_w + kh * f_w + kw, h * in_w + w + ] = val_out out = np.zeros((in_n, group, int(out_c // group), out_h * out_w)) weight = filter.reshape(group, int(out_c // group), f_c * f_h * f_w) col_buffer = col_buffer.reshape( - (in_n, group, int(in_c // group * f_h * f_w), in_h * in_w)) + (in_n, group, int(in_c // group * f_h * f_w), in_h * in_w) + ) for n in range(in_n): for g in range(group): out[n, g] = np.matmul(weight[g], col_buffer[n, g]) @@ -107,23 +126,33 @@ def dconv_im2col_gemm(input, offset, mask, filter, group, conv_param): return out -def deform_conv2d_wrapper(x, - offset, - weight, - mask=None, - stride=1, - padding=0, - dilation=1, - deformable_groups=1, - groups=1, - im2col_step=1): - return paddle.vision.ops.deform_conv2d(x, offset, weight, None, stride, - padding, dilation, deformable_groups, - groups, mask) +def deform_conv2d_wrapper( + x, + offset, + weight, + mask=None, + stride=1, + padding=0, + dilation=1, + deformable_groups=1, + groups=1, + im2col_step=1, +): + return paddle.vision.ops.deform_conv2d( + x, + offset, + weight, + None, + stride, + padding, + dilation, + deformable_groups, + groups, + mask, + ) class TestModulatedDeformableConvOp(OpTest): - def setUp(self): self.python_api = deform_conv2d_wrapper self.op_type = "deformable_conv" @@ -135,7 +164,7 @@ class TestModulatedDeformableConvOp(OpTest): conv_param = { 'stride': self.stride, 'pad': self.pad, - 'dilation': self.dilations + 'dilation': self.dilations, } input = np.random.random(self.input_size).astype(self.dtype) @@ -143,15 +172,16 @@ class TestModulatedDeformableConvOp(OpTest): mask = 10 * np.random.random(self.mask_size).astype(self.dtype) filter = np.random.random(self.filter_size).astype(self.dtype) - output = dconv_im2col_gemm(input, offset, mask, filter, self.groups, - conv_param) + output = dconv_im2col_gemm( + input, offset, mask, filter, self.groups, conv_param + ) output = output.astype(self.dtype) self.inputs = { 'Input': OpTest.np_dtype_to_fluid_dtype(input), 'Offset': OpTest.np_dtype_to_fluid_dtype(offset), 'Mask': OpTest.np_dtype_to_fluid_dtype(mask), - 'Filter': OpTest.np_dtype_to_fluid_dtype(filter) + 'Filter': OpTest.np_dtype_to_fluid_dtype(filter), } self.attrs = { 'strides': self.stride, @@ -167,10 +197,12 @@ class TestModulatedDeformableConvOp(OpTest): self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad({'Input', 'Offset', 'Mask', 'Filter'}, - 'Output', - max_relative_error=0.05, - check_eager=True) + self.check_grad( + {'Input', 'Offset', 'Mask', 'Filter'}, + 'Output', + max_relative_error=0.05, + check_eager=True, + ) def init_test_case(self): self.pad = [1, 1] @@ -182,15 +214,26 @@ class TestModulatedDeformableConvOp(OpTest): self.filter_size = [4, f_c, 3, 3] self.im2col_step = 1 self.deformable_groups = 1 - offset_c = 2 * self.deformable_groups * self.filter_size[ - 2] * self.filter_size[3] - mask_c = self.deformable_groups * self.filter_size[ - 2] * self.filter_size[3] + offset_c = ( + 2 + * self.deformable_groups + * self.filter_size[2] + * self.filter_size[3] + ) + mask_c = ( + self.deformable_groups * self.filter_size[2] * self.filter_size[3] + ) self.offset_size = [ - self.input_size[0], offset_c, self.input_size[2], self.input_size[3] + self.input_size[0], + offset_c, + self.input_size[2], + self.input_size[3], ] self.mask_size = [ - self.input_size[0], mask_c, self.input_size[2], self.input_size[3] + self.input_size[0], + mask_c, + self.input_size[2], + self.input_size[3], ] def init_dilation(self): @@ -204,7 +247,6 @@ class TestModulatedDeformableConvOp(OpTest): class TestWithStride(TestModulatedDeformableConvOp): - def init_test_case(self): self.pad = [3, 3] self.stride = [2, 2] @@ -214,20 +256,30 @@ class TestWithStride(TestModulatedDeformableConvOp): self.filter_size = [6, f_c, 3, 3] self.im2col_step = 1 self.deformable_groups = 1 - offset_c = 2 * self.deformable_groups * self.filter_size[ - 2] * self.filter_size[3] - mask_c = self.deformable_groups * self.filter_size[ - 2] * self.filter_size[3] + offset_c = ( + 2 + * self.deformable_groups + * self.filter_size[2] + * self.filter_size[3] + ) + mask_c = ( + self.deformable_groups * self.filter_size[2] * self.filter_size[3] + ) self.offset_size = [ - self.input_size[0], offset_c, self.input_size[2], self.input_size[3] + self.input_size[0], + offset_c, + self.input_size[2], + self.input_size[3], ] self.mask_size = [ - self.input_size[0], mask_c, self.input_size[2], self.input_size[3] + self.input_size[0], + mask_c, + self.input_size[2], + self.input_size[3], ] class TestWithDilation(TestModulatedDeformableConvOp): - def init_test_case(self): self.pad = [2, 2] self.stride = [1, 1] @@ -237,15 +289,26 @@ class TestWithDilation(TestModulatedDeformableConvOp): self.filter_size = [6, f_c, 3, 3] self.im2col_step = 1 self.deformable_groups = 1 - offset_c = 2 * self.deformable_groups * self.filter_size[ - 2] * self.filter_size[3] - mask_c = self.deformable_groups * self.filter_size[ - 2] * self.filter_size[3] + offset_c = ( + 2 + * self.deformable_groups + * self.filter_size[2] + * self.filter_size[3] + ) + mask_c = ( + self.deformable_groups * self.filter_size[2] * self.filter_size[3] + ) self.offset_size = [ - self.input_size[0], offset_c, self.input_size[2], self.input_size[3] + self.input_size[0], + offset_c, + self.input_size[2], + self.input_size[3], ] self.mask_size = [ - self.input_size[0], mask_c, self.input_size[2], self.input_size[3] + self.input_size[0], + mask_c, + self.input_size[2], + self.input_size[3], ] def init_dilation(self): @@ -253,7 +316,6 @@ class TestWithDilation(TestModulatedDeformableConvOp): class TestWith3x3(TestModulatedDeformableConvOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -263,26 +325,35 @@ class TestWith3x3(TestModulatedDeformableConvOp): self.filter_size = [6, f_c, 3, 3] self.im2col_step = 1 self.deformable_groups = 1 - offset_c = 2 * self.deformable_groups * self.filter_size[ - 2] * self.filter_size[3] - mask_c = self.deformable_groups * self.filter_size[ - 2] * self.filter_size[3] + offset_c = ( + 2 + * self.deformable_groups + * self.filter_size[2] + * self.filter_size[3] + ) + mask_c = ( + self.deformable_groups * self.filter_size[2] * self.filter_size[3] + ) self.offset_size = [ - self.input_size[0], offset_c, self.input_size[2], self.input_size[3] + self.input_size[0], + offset_c, + self.input_size[2], + self.input_size[3], ] self.mask_size = [ - self.input_size[0], mask_c, self.input_size[2], self.input_size[3] + self.input_size[0], + mask_c, + self.input_size[2], + self.input_size[3], ] class TestWithGroup(TestModulatedDeformableConvOp): - def init_group(self): self.groups = 2 class TestWithDouble(TestModulatedDeformableConvOp): - def init_type(self): self.dtype = np.float64 @@ -296,74 +367,77 @@ class TestWithDouble(TestModulatedDeformableConvOp): self.filter_size = [4, f_c, 3, 3] self.im2col_step = 1 self.deformable_groups = 1 - offset_c = 2 * self.deformable_groups * self.filter_size[ - 2] * self.filter_size[3] - mask_c = self.deformable_groups * self.filter_size[ - 2] * self.filter_size[3] + offset_c = ( + 2 + * self.deformable_groups + * self.filter_size[2] + * self.filter_size[3] + ) + mask_c = ( + self.deformable_groups * self.filter_size[2] * self.filter_size[3] + ) self.offset_size = [ - self.input_size[0], offset_c, self.input_size[2], self.input_size[3] + self.input_size[0], + offset_c, + self.input_size[2], + self.input_size[3], ] self.mask_size = [ - self.input_size[0], mask_c, self.input_size[2], self.input_size[3] + self.input_size[0], + mask_c, + self.input_size[2], + self.input_size[3], ] class TestModulatedDeformableConvInvalidInput(unittest.TestCase): - def test_error(self): - def test_invalid_input(): paddle.enable_static() input = [1, 3, 32, 32] - offset = fluid.data(name='offset', - shape=[None, 3, 32, 32], - dtype='float32') - mask = fluid.data(name='mask', - shape=[None, 3, 32, 32], - dtype='float32') - loss = fluid.layers.deformable_conv(input, - offset, - mask, - num_filters=4, - filter_size=1) + offset = fluid.data( + name='offset', shape=[None, 3, 32, 32], dtype='float32' + ) + mask = fluid.data( + name='mask', shape=[None, 3, 32, 32], dtype='float32' + ) + loss = fluid.layers.deformable_conv( + input, offset, mask, num_filters=4, filter_size=1 + ) self.assertRaises(TypeError, test_invalid_input) def test_invalid_offset(): paddle.enable_static() - input = fluid.data(name='input', - shape=[None, 3, 32, 32], - dtype='int32') - offset = fluid.data(name='offset', - shape=[None, 3, 32, 32], - dtype='float32') - mask = fluid.data(name='mask', - shape=[None, 3, 32, 32], - dtype='float32') - loss = fluid.layers.deformable_conv(input, - offset, - mask, - num_filters=4, - filter_size=1) + input = fluid.data( + name='input', shape=[None, 3, 32, 32], dtype='int32' + ) + offset = fluid.data( + name='offset', shape=[None, 3, 32, 32], dtype='float32' + ) + mask = fluid.data( + name='mask', shape=[None, 3, 32, 32], dtype='float32' + ) + loss = fluid.layers.deformable_conv( + input, offset, mask, num_filters=4, filter_size=1 + ) self.assertRaises(TypeError, test_invalid_offset) def test_invalid_filter(): paddle.enable_static() - input = fluid.data(name='input_filter', - shape=[None, 3, 32, 32], - dtype='float32') - offset = fluid.data(name='offset_filter', - shape=[None, 3, 32, 32], - dtype='float32') - mask = fluid.data(name='mask_filter', - shape=[None, 3, 32, 32], - dtype='float32') - loss = fluid.layers.deformable_conv(input, - offset, - mask, - num_filters=4, - filter_size=0) + input = fluid.data( + name='input_filter', shape=[None, 3, 32, 32], dtype='float32' + ) + offset = fluid.data( + name='offset_filter', shape=[None, 3, 32, 32], dtype='float32' + ) + mask = fluid.data( + name='mask_filter', shape=[None, 3, 32, 32], dtype='float32' + ) + loss = fluid.layers.deformable_conv( + input, offset, mask, num_filters=4, filter_size=0 + ) self.assertRaises(ValueError, test_invalid_filter) @@ -373,45 +447,39 @@ class TestModulatedDeformableConvInvalidInput(unittest.TestCase): class TestDeformConv2DAPI(unittest.TestCase): - def test_api(self): - def test_deform_conv2d_v1(): paddle.enable_static() - input = paddle.static.data(name='input_v1', - shape=[None, 3, 32, 32], - dtype='float32') - offset = paddle.static.data(name='offset_v1', - shape=[None, 4, 32, 32], - dtype='float32') - out = paddle.static.nn.deform_conv2d(input, - offset, - None, - num_filters=4, - filter_size=1) - - assert (out.shape == (-1, 4, 32, 32)) + input = paddle.static.data( + name='input_v1', shape=[None, 3, 32, 32], dtype='float32' + ) + offset = paddle.static.data( + name='offset_v1', shape=[None, 4, 32, 32], dtype='float32' + ) + out = paddle.static.nn.deform_conv2d( + input, offset, None, num_filters=4, filter_size=1 + ) + + assert out.shape == (-1, 4, 32, 32) test_deform_conv2d_v1() def test_deform_conv2d_v2(): paddle.enable_static() - input = paddle.static.data(name='input_v2', - shape=[None, 3, 32, 32], - dtype='float32') - offset = paddle.static.data(name='offset_v2', - shape=[None, 4, 32, 32], - dtype='float32') - mask = paddle.static.data(name='mask_v2', - shape=[None, 2, 32, 32], - dtype='float32') - out = paddle.static.nn.deform_conv2d(input, - offset, - mask, - num_filters=4, - filter_size=1) - - assert (out.shape == (-1, 4, 32, 32)) + input = paddle.static.data( + name='input_v2', shape=[None, 3, 32, 32], dtype='float32' + ) + offset = paddle.static.data( + name='offset_v2', shape=[None, 4, 32, 32], dtype='float32' + ) + mask = paddle.static.data( + name='mask_v2', shape=[None, 2, 32, 32], dtype='float32' + ) + out = paddle.static.nn.deform_conv2d( + input, offset, mask, num_filters=4, filter_size=1 + ) + + assert out.shape == (-1, 4, 32, 32) test_deform_conv2d_v2() diff --git a/python/paddle/fluid/tests/unittests/test_deformable_conv_v1_op.py b/python/paddle/fluid/tests/unittests/test_deformable_conv_v1_op.py index eacf6dba27312bdcd86c03560d04d192b742144a..19a81e21a6a11ebe6d39cc326cbed121abb6a49a 100644 --- a/python/paddle/fluid/tests/unittests/test_deformable_conv_v1_op.py +++ b/python/paddle/fluid/tests/unittests/test_deformable_conv_v1_op.py @@ -58,8 +58,11 @@ def dconv_im2col_gemm(input, offset, filter, group, conv_param): assert f_c * group == in_c assert np.mod(out_c, group) == 0 - stride, pad, dilation = conv_param['stride'], conv_param['pad'],\ - conv_param['dilation'] + stride, pad, dilation = ( + conv_param['stride'], + conv_param['pad'], + conv_param['dilation'], + ) out_h = 1 + (in_h + 2 * pad[0] - (dilation[0] * (f_h - 1) + 1)) // stride[0] out_w = 1 + (in_w + 2 * pad[1] - (dilation[1] * (f_w - 1) + 1)) // stride[1] assert out_h == in_h @@ -72,30 +75,47 @@ def dconv_im2col_gemm(input, offset, filter, group, conv_param): for w in range(out_w): for kh in range(f_h): for kw in range(f_w): - offset_h_table = \ - offset[n, ::2, h, w].reshape(f_h, f_w) - offset_w_table = \ - offset[n, 1::2, h, w].reshape(f_h, f_w) + offset_h_table = offset[n, ::2, h, w].reshape( + f_h, f_w + ) + offset_w_table = offset[n, 1::2, h, w].reshape( + f_h, f_w + ) offset_h = offset_h_table[kh, kw] offset_w = offset_w_table[kh, kw] val = 0 - im_h = h * stride[0] + kh * dilation[0] \ - + offset_h - pad[0] - im_w = w * stride[0] + kw * dilation[0] \ - + offset_w - pad[1] - if im_h > -1 and im_w > -1 and \ - im_h < in_h and im_w < in_h: - val = dmc_bilinear(input[n, c], in_h, in_w, - im_h, im_w) + im_h = ( + h * stride[0] + + kh * dilation[0] + + offset_h + - pad[0] + ) + im_w = ( + w * stride[0] + + kw * dilation[0] + + offset_w + - pad[1] + ) + if ( + im_h > -1 + and im_w > -1 + and im_h < in_h + and im_w < in_h + ): + val = dmc_bilinear( + input[n, c], in_h, in_w, im_h, im_w + ) val_out = val - col_buffer[n, c * f_h * f_w + kh * f_w + kw, - h * in_w + w] = val_out + col_buffer[ + n, c * f_h * f_w + kh * f_w + kw, h * in_w + w + ] = val_out out = np.zeros((in_n, group, int(out_c // group), out_h * out_w)) weight = filter.reshape(group, int(out_c // group), f_c * f_h * f_w) col_buffer = col_buffer.reshape( - (in_n, group, int(in_c // group * f_h * f_w), in_h * in_w)) + (in_n, group, int(in_c // group * f_h * f_w), in_h * in_w) + ) for n in range(in_n): for g in range(group): out[n, g] = np.matmul(weight[g], col_buffer[n, g]) @@ -103,23 +123,33 @@ def dconv_im2col_gemm(input, offset, filter, group, conv_param): return out -def deform_conv2d_wrapper(x, - offset, - weight, - mask=None, - stride=1, - padding=0, - dilation=1, - deformable_groups=1, - groups=1, - im2col_step=1): - return paddle.vision.ops.deform_conv2d(x, offset, weight, None, stride, - padding, dilation, deformable_groups, - groups, mask) +def deform_conv2d_wrapper( + x, + offset, + weight, + mask=None, + stride=1, + padding=0, + dilation=1, + deformable_groups=1, + groups=1, + im2col_step=1, +): + return paddle.vision.ops.deform_conv2d( + x, + offset, + weight, + None, + stride, + padding, + dilation, + deformable_groups, + groups, + mask, + ) class TestModulatedDeformableConvOp(OpTest): - def setUp(self): self.python_api = deform_conv2d_wrapper self.op_type = "deformable_conv_v1" @@ -131,20 +161,21 @@ class TestModulatedDeformableConvOp(OpTest): conv_param = { 'stride': self.stride, 'pad': self.pad, - 'dilation': self.dilations + 'dilation': self.dilations, } input = np.random.random(self.input_size).astype(self.dtype) offset = 10 * np.random.random(self.offset_size).astype(self.dtype) filter = np.random.random(self.filter_size).astype(self.dtype) - output = dconv_im2col_gemm(input, offset, filter, self.groups, - conv_param) + output = dconv_im2col_gemm( + input, offset, filter, self.groups, conv_param + ) output = output.astype(self.dtype) self.inputs = { 'Input': OpTest.np_dtype_to_fluid_dtype(input), 'Offset': OpTest.np_dtype_to_fluid_dtype(offset), - 'Filter': OpTest.np_dtype_to_fluid_dtype(filter) + 'Filter': OpTest.np_dtype_to_fluid_dtype(filter), } self.attrs = { 'strides': self.stride, @@ -160,17 +191,21 @@ class TestModulatedDeformableConvOp(OpTest): self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['Input', 'Offset', 'Filter'], - 'Output', - max_relative_error=0.05, - check_eager=True) + self.check_grad( + ['Input', 'Offset', 'Filter'], + 'Output', + max_relative_error=0.05, + check_eager=True, + ) def test_check_grad_no_filter(self): - self.check_grad(['Input', 'Offset'], - 'Output', - max_relative_error=0.1, - no_grad_set=set(['Filter']), - check_eager=True) + self.check_grad( + ['Input', 'Offset'], + 'Output', + max_relative_error=0.1, + no_grad_set=set(['Filter']), + check_eager=True, + ) def init_test_case(self): self.pad = [1, 1] @@ -182,10 +217,17 @@ class TestModulatedDeformableConvOp(OpTest): self.filter_size = [4, f_c, 3, 3] self.im2col_step = 1 self.deformable_groups = 1 - offset_c = 2 * self.deformable_groups * self.filter_size[ - 2] * self.filter_size[3] + offset_c = ( + 2 + * self.deformable_groups + * self.filter_size[2] + * self.filter_size[3] + ) self.offset_size = [ - self.input_size[0], offset_c, self.input_size[2], self.input_size[3] + self.input_size[0], + offset_c, + self.input_size[2], + self.input_size[3], ] def init_dilation(self): @@ -199,7 +241,6 @@ class TestModulatedDeformableConvOp(OpTest): class TestWithStride(TestModulatedDeformableConvOp): - def init_test_case(self): self.pad = [3, 3] self.stride = [2, 2] @@ -209,15 +250,21 @@ class TestWithStride(TestModulatedDeformableConvOp): self.filter_size = [6, f_c, 3, 3] self.im2col_step = 1 self.deformable_groups = 1 - offset_c = 2 * self.deformable_groups * self.filter_size[ - 2] * self.filter_size[3] + offset_c = ( + 2 + * self.deformable_groups + * self.filter_size[2] + * self.filter_size[3] + ) self.offset_size = [ - self.input_size[0], offset_c, self.input_size[2], self.input_size[3] + self.input_size[0], + offset_c, + self.input_size[2], + self.input_size[3], ] class TestWithDilation(TestModulatedDeformableConvOp): - def init_test_case(self): self.pad = [2, 2] self.stride = [1, 1] @@ -227,10 +274,17 @@ class TestWithDilation(TestModulatedDeformableConvOp): self.filter_size = [6, f_c, 3, 3] self.im2col_step = 1 self.deformable_groups = 1 - offset_c = 2 * self.deformable_groups * self.filter_size[ - 2] * self.filter_size[3] + offset_c = ( + 2 + * self.deformable_groups + * self.filter_size[2] + * self.filter_size[3] + ) self.offset_size = [ - self.input_size[0], offset_c, self.input_size[2], self.input_size[3] + self.input_size[0], + offset_c, + self.input_size[2], + self.input_size[3], ] def init_dilation(self): @@ -238,7 +292,6 @@ class TestWithDilation(TestModulatedDeformableConvOp): class TestWith1x1(TestModulatedDeformableConvOp): - def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] @@ -248,15 +301,21 @@ class TestWith1x1(TestModulatedDeformableConvOp): self.filter_size = [40, f_c, 1, 1] self.im2col_step = 1 self.deformable_groups = 1 - offset_c = 2 * self.deformable_groups * self.filter_size[ - 2] * self.filter_size[3] + offset_c = ( + 2 + * self.deformable_groups + * self.filter_size[2] + * self.filter_size[3] + ) self.offset_size = [ - self.input_size[0], offset_c, self.input_size[2], self.input_size[3] + self.input_size[0], + offset_c, + self.input_size[2], + self.input_size[3], ] class TestWithGroup(TestModulatedDeformableConvOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -267,10 +326,17 @@ class TestWithGroup(TestModulatedDeformableConvOp): self.filter_size = [4, f_c, 3, 3] self.im2col_step = 1 self.deformable_groups = 1 - offset_c = 2 * self.deformable_groups * self.filter_size[ - 2] * self.filter_size[3] + offset_c = ( + 2 + * self.deformable_groups + * self.filter_size[2] + * self.filter_size[3] + ) self.offset_size = [ - self.input_size[0], offset_c, self.input_size[2], self.input_size[3] + self.input_size[0], + offset_c, + self.input_size[2], + self.input_size[3], ] def init_group(self): @@ -278,42 +344,43 @@ class TestWithGroup(TestModulatedDeformableConvOp): class TestWithDouble(TestModulatedDeformableConvOp): - def init_type(self): self.dtype = np.float64 class TestModulatedDeformableConvV1InvalidInput(unittest.TestCase): - def test_error(self): - def test_invalid_input(): input = [1, 3, 32, 32] - offset = fluid.data(name='offset', - shape=[None, 3, 32, 32], - dtype='float32') - loss = fluid.layers.deformable_conv(input, - offset, - mask=None, - num_filters=4, - filter_size=1, - modulated=False) + offset = fluid.data( + name='offset', shape=[None, 3, 32, 32], dtype='float32' + ) + loss = fluid.layers.deformable_conv( + input, + offset, + mask=None, + num_filters=4, + filter_size=1, + modulated=False, + ) self.assertRaises(TypeError, test_invalid_input) def test_invalid_offset(): - input = fluid.data(name='input', - shape=[None, 3, 32, 32], - dtype='int32') - offset = fluid.data(name='offset', - shape=[None, 3, 32, 32], - dtype='float32') - loss = fluid.layers.deformable_conv(input, - offset, - mask=None, - num_filters=4, - filter_size=1, - modulated=False) + input = fluid.data( + name='input', shape=[None, 3, 32, 32], dtype='int32' + ) + offset = fluid.data( + name='offset', shape=[None, 3, 32, 32], dtype='float32' + ) + loss = fluid.layers.deformable_conv( + input, + offset, + mask=None, + num_filters=4, + filter_size=1, + modulated=False, + ) self.assertRaises(TypeError, test_invalid_offset) diff --git a/python/paddle/fluid/tests/unittests/test_deformable_psroi_pooling.py b/python/paddle/fluid/tests/unittests/test_deformable_psroi_pooling.py index 4bf8a99137b416c1b9732de458b3b067146eb203..8dbe2dfa32252bea4011b216a3322753e83cee02 100644 --- a/python/paddle/fluid/tests/unittests/test_deformable_psroi_pooling.py +++ b/python/paddle/fluid/tests/unittests/test_deformable_psroi_pooling.py @@ -24,9 +24,17 @@ def set_input(input, rois, trans): return inputs -def set_attrs(no_trans, spatial_scale, output_channels, group_size, - pooled_height, pooled_width, part_size, sample_per_part, - trans_std): +def set_attrs( + no_trans, + spatial_scale, + output_channels, + group_size, + pooled_height, + pooled_width, + part_size, + sample_per_part, + trans_std, +): attrs = { 'no_trans': no_trans, 'spatial_scale': spatial_scale, @@ -36,7 +44,7 @@ def set_attrs(no_trans, spatial_scale, output_channels, group_size, 'pooled_width': pooled_width, 'part_size': part_size, 'sample_per_part': sample_per_part, - 'trans_std': trans_std + 'trans_std': trans_std, } return attrs @@ -44,13 +52,12 @@ def set_attrs(no_trans, spatial_scale, output_channels, group_size, def set_outputs(output, top_count): outputs = { 'Output': output.astype('float32'), - 'TopCount': top_count.astype('float32') + 'TopCount': top_count.astype('float32'), } return outputs class TestDeformablePSROIPoolOp(OpTest): - def set_data(self): self.start_test1() self.start_test2() @@ -77,9 +84,17 @@ class TestDeformablePSROIPoolOp(OpTest): sample_per_part = self.sample_per_part trans_std = self.trans_std - self.attrs = set_attrs(no_trans, spatial_scale, output_channels, - group_size, pooled_height, pooled_width, - part_size, sample_per_part, trans_std) + self.attrs = set_attrs( + no_trans, + spatial_scale, + output_channels, + group_size, + pooled_height, + pooled_width, + part_size, + sample_per_part, + trans_std, + ) output = self.out.astype('float32') top_count = self.top_count.astype('float32') @@ -105,9 +120,17 @@ class TestDeformablePSROIPoolOp(OpTest): sample_per_part = self.sample_per_part trans_std = self.trans_std - self.attrs = set_attrs(no_trans, spatial_scale, output_channels, - group_size, pooled_height, pooled_width, - part_size, sample_per_part, trans_std) + self.attrs = set_attrs( + no_trans, + spatial_scale, + output_channels, + group_size, + pooled_height, + pooled_width, + part_size, + sample_per_part, + trans_std, + ) output = self.out.astype('float32') top_count = self.top_count.astype('float32') @@ -133,9 +156,17 @@ class TestDeformablePSROIPoolOp(OpTest): sample_per_part = self.sample_per_part trans_std = self.trans_std - self.attrs = set_attrs(no_trans, spatial_scale, output_channels, - group_size, pooled_height, pooled_width, - part_size, sample_per_part, trans_std) + self.attrs = set_attrs( + no_trans, + spatial_scale, + output_channels, + group_size, + pooled_height, + pooled_width, + part_size, + sample_per_part, + trans_std, + ) output = self.out.astype('float32') top_count = self.top_count.astype('float32') @@ -161,9 +192,17 @@ class TestDeformablePSROIPoolOp(OpTest): sample_per_part = self.sample_per_part trans_std = self.trans_std - self.attrs = set_attrs(no_trans, spatial_scale, output_channels, - group_size, pooled_height, pooled_width, - part_size, sample_per_part, trans_std) + self.attrs = set_attrs( + no_trans, + spatial_scale, + output_channels, + group_size, + pooled_height, + pooled_width, + part_size, + sample_per_part, + trans_std, + ) output = self.out.astype('float32') top_count = self.top_count.astype('float32') @@ -175,7 +214,10 @@ class TestDeformablePSROIPoolOp(OpTest): self.height = 12 self.width = 12 self.input_dim = [ - self.batch_size, self.channels, self.height, self.width + self.batch_size, + self.channels, + self.height, + self.width, ] self.no_trans = False self.spatial_scale = 1.0 / 4.0 @@ -194,7 +236,10 @@ class TestDeformablePSROIPoolOp(OpTest): self.height = 12 self.width = 12 self.input_dim = [ - self.batch_size, self.channels, self.height, self.width + self.batch_size, + self.channels, + self.height, + self.width, ] self.no_trans = True self.spatial_scale = 1.0 / 2.0 @@ -213,7 +258,10 @@ class TestDeformablePSROIPoolOp(OpTest): self.height = 12 self.width = 12 self.input_dim = [ - self.batch_size, self.channels, self.height, self.width + self.batch_size, + self.channels, + self.height, + self.width, ] self.no_trans = False self.spatial_scale = 1.0 / 4.0 @@ -232,7 +280,10 @@ class TestDeformablePSROIPoolOp(OpTest): self.height = 12 self.width = 12 self.input_dim = [ - self.batch_size, self.channels, self.height, self.width + self.batch_size, + self.channels, + self.height, + self.width, ] self.no_trans = True self.spatial_scale = 1.0 / 2.0 @@ -252,13 +303,17 @@ class TestDeformablePSROIPoolOp(OpTest): self.rois_lod[0].append(bno + 1) for i in range(bno + 1): x_1 = np.random.randint( - 0, self.width // self.spatial_scale - self.pooled_width) + 0, self.width // self.spatial_scale - self.pooled_width + ) y_1 = np.random.randint( - 0, self.height // self.spatial_scale - self.pooled_height) - x_2 = np.random.randint(x_1 + self.pooled_width, - self.width // self.spatial_scale) - y_2 = np.random.randint(y_1 + self.pooled_height, - self.height // self.spatial_scale) + 0, self.height // self.spatial_scale - self.pooled_height + ) + x_2 = np.random.randint( + x_1 + self.pooled_width, self.width // self.spatial_scale + ) + y_2 = np.random.randint( + y_1 + self.pooled_height, self.height // self.spatial_scale + ) roi = [bno, x_1, y_1, x_2, y_2] rois.append(roi) self.rois_num = len(rois) @@ -290,20 +345,38 @@ class TestDeformablePSROIPoolOp(OpTest): return val def calc_deformable_psroi_pooling(self): - output_shape = (self.rois_num, self.output_channels, self.pooled_height, - self.pooled_width) + output_shape = ( + self.rois_num, + self.output_channels, + self.pooled_height, + self.pooled_width, + ) self.out = np.zeros(output_shape) - self.trans = np.random.rand(self.rois_num, 2, self.part_size[0], - self.part_size[1]).astype('float32') + self.trans = np.random.rand( + self.rois_num, 2, self.part_size[0], self.part_size[1] + ).astype('float32') self.top_count = np.random.random((output_shape)).astype('float32') - count = self.rois_num * self.output_channels * self.pooled_height * self.pooled_width + count = ( + self.rois_num + * self.output_channels + * self.pooled_height + * self.pooled_width + ) for index in range(count): p_w = int(index % self.pooled_width) p_h = int(index / self.pooled_width % self.pooled_height) - ctop = int(index / self.pooled_width / self.pooled_height % - self.output_channels) - n_out = int(index / self.pooled_width / self.pooled_height / - self.output_channels) + ctop = int( + index + / self.pooled_width + / self.pooled_height + % self.output_channels + ) + n_out = int( + index + / self.pooled_width + / self.pooled_height + / self.output_channels + ) roi = self.rois[n_out] roi_batch_id = int(roi[0]) roi_start_w = int(np.round(roi[1])) * self.spatial_scale - 0.5 @@ -339,15 +412,22 @@ class TestDeformablePSROIPoolOp(OpTest): for i_h in range(self.sample_per_part): w_sample = wstart + i_w * sub_bin_size_w h_sample = hstart + i_h * sub_bin_size_h - if w_sample < -0.5 or w_sample > self.width - 0.5 or \ - h_sample < -0.5 or h_sample > self.height - 0.5: + if ( + w_sample < -0.5 + or w_sample > self.width - 0.5 + or h_sample < -0.5 + or h_sample > self.height - 0.5 + ): continue - w_sample = min(max(w_sample, 0.), self.width - 1.) - h_sample = min(max(h_sample, 0.), self.height - 1.) - c_sample = int((ctop * self.group_size[0] + g_h) * - self.group_size[1] + g_w) - val = self.dmc_bilinear(input_i[c_sample], h_sample, - w_sample) + w_sample = min(max(w_sample, 0.0), self.width - 1.0) + h_sample = min(max(h_sample, 0.0), self.height - 1.0) + c_sample = int( + (ctop * self.group_size[0] + g_h) * self.group_size[1] + + g_w + ) + val = self.dmc_bilinear( + input_i[c_sample], h_sample, w_sample + ) sum = sum + val num_sample = num_sample + 1 if num_sample == 0: @@ -368,131 +448,144 @@ class TestDeformablePSROIPoolOp(OpTest): class TestDeformablePSROIPoolOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): - input1 = fluid.data(name="input1", - shape=[2, 192, 64, 64], - dtype='float32') - rois1 = fluid.data(name="rois1", - shape=[-1, 4], - dtype='float32', - lod_level=1) - trans1 = fluid.data(name="trans1", - shape=[2, 384, 64, 64], - dtype='float32') + input1 = fluid.data( + name="input1", shape=[2, 192, 64, 64], dtype='float32' + ) + rois1 = fluid.data( + name="rois1", shape=[-1, 4], dtype='float32', lod_level=1 + ) + trans1 = fluid.data( + name="trans1", shape=[2, 384, 64, 64], dtype='float32' + ) # The `input` must be Variable and the data type of `input` Tensor must be one of float32 and float64. def test_input_type(): - fluid.layers.deformable_roi_pooling(input=[3, 4], - rois=rois1, - trans=trans1, - pooled_height=8, - pooled_width=8, - part_size=(8, 8), - sample_per_part=4, - position_sensitive=True) + fluid.layers.deformable_roi_pooling( + input=[3, 4], + rois=rois1, + trans=trans1, + pooled_height=8, + pooled_width=8, + part_size=(8, 8), + sample_per_part=4, + position_sensitive=True, + ) self.assertRaises(TypeError, test_input_type) def test_input_tensor_dtype(): - input2 = fluid.data(name="input2", - shape=[2, 192, 64, 64], - dtype='int32') - fluid.layers.deformable_roi_pooling(input=input2, - rois=rois1, - trans=trans1, - pooled_height=8, - pooled_width=8, - part_size=(8, 8), - sample_per_part=4, - position_sensitive=True) + input2 = fluid.data( + name="input2", shape=[2, 192, 64, 64], dtype='int32' + ) + fluid.layers.deformable_roi_pooling( + input=input2, + rois=rois1, + trans=trans1, + pooled_height=8, + pooled_width=8, + part_size=(8, 8), + sample_per_part=4, + position_sensitive=True, + ) self.assertRaises(TypeError, test_input_tensor_dtype) # The `rois` must be Variable and the data type of `rois` Tensor must be one of float32 and float64. def test_rois_type(): - fluid.layers.deformable_roi_pooling(input=input1, - rois=2, - trans=trans1, - pooled_height=8, - pooled_width=8, - part_size=(8, 8), - sample_per_part=4, - position_sensitive=True) + fluid.layers.deformable_roi_pooling( + input=input1, + rois=2, + trans=trans1, + pooled_height=8, + pooled_width=8, + part_size=(8, 8), + sample_per_part=4, + position_sensitive=True, + ) self.assertRaises(TypeError, test_rois_type) def test_rois_tensor_dtype(): - rois2 = fluid.data(name="rois2", - shape=[-1, 4], - dtype='int32', - lod_level=1) - fluid.layers.deformable_roi_pooling(input=input1, - rois=rois2, - trans=trans1, - pooled_height=8, - pooled_width=8, - part_size=(8, 8), - sample_per_part=4, - position_sensitive=True) + rois2 = fluid.data( + name="rois2", shape=[-1, 4], dtype='int32', lod_level=1 + ) + fluid.layers.deformable_roi_pooling( + input=input1, + rois=rois2, + trans=trans1, + pooled_height=8, + pooled_width=8, + part_size=(8, 8), + sample_per_part=4, + position_sensitive=True, + ) self.assertRaises(TypeError, test_rois_tensor_dtype) # The `trans` must be Variable and the data type of `trans` Tensor must be one of float32 and float64. def test_trans_type(): - fluid.layers.deformable_roi_pooling(input=input1, - rois=rois1, - trans=[2], - pooled_height=8, - pooled_width=8, - part_size=(8, 8), - sample_per_part=4, - position_sensitive=True) + fluid.layers.deformable_roi_pooling( + input=input1, + rois=rois1, + trans=[2], + pooled_height=8, + pooled_width=8, + part_size=(8, 8), + sample_per_part=4, + position_sensitive=True, + ) self.assertRaises(TypeError, test_trans_type) def test_trans_tensor_dtype(): - trans2 = fluid.data(name="trans2", - shape=[2, 384, 64, 64], - dtype='int32') - fluid.layers.deformable_roi_pooling(input=input1, - rois=rois1, - trans=trans2, - pooled_height=8, - pooled_width=8, - part_size=(8, 8), - sample_per_part=4, - position_sensitive=True) + trans2 = fluid.data( + name="trans2", shape=[2, 384, 64, 64], dtype='int32' + ) + fluid.layers.deformable_roi_pooling( + input=input1, + rois=rois1, + trans=trans2, + pooled_height=8, + pooled_width=8, + part_size=(8, 8), + sample_per_part=4, + position_sensitive=True, + ) self.assertRaises(TypeError, test_trans_tensor_dtype) # The `group_size` must be one of list and tuple. # Each element must be int. def test_group_size_type(): - fluid.layers.deformable_roi_pooling(input=input1, - rois=rois1, - trans=trans1, - group_size=1, - pooled_height=8, - pooled_width=8, - part_size=(8, 8), - sample_per_part=4, - position_sensitive=True) + fluid.layers.deformable_roi_pooling( + input=input1, + rois=rois1, + trans=trans1, + group_size=1, + pooled_height=8, + pooled_width=8, + part_size=(8, 8), + sample_per_part=4, + position_sensitive=True, + ) self.assertRaises(TypeError, test_group_size_type) # The `part_size` must be one of list, tuple and None. # Each element must be int. def test_part_size_type(): - fluid.layers.deformable_roi_pooling(input=input1, - rois=rois1, - trans=trans1, - pooled_height=8, - pooled_width=8, - part_size=8, - sample_per_part=4, - position_sensitive=True) + fluid.layers.deformable_roi_pooling( + input=input1, + rois=rois1, + trans=trans1, + pooled_height=8, + pooled_width=8, + part_size=8, + sample_per_part=4, + position_sensitive=True, + ) self.assertRaises(TypeError, test_part_size_type) diff --git a/python/paddle/fluid/tests/unittests/test_deg2rad.py b/python/paddle/fluid/tests/unittests/test_deg2rad.py index 4c9ec1becc4a2cd44f30eb5990c5a4f17118d659..79e4541b635e1432cbf066be1c8e243706e251eb 100644 --- a/python/paddle/fluid/tests/unittests/test_deg2rad.py +++ b/python/paddle/fluid/tests/unittests/test_deg2rad.py @@ -22,11 +22,11 @@ paddle.enable_static() class TestDeg2radAPI(unittest.TestCase): - def setUp(self): self.x_dtype = 'float64' - self.x_np = np.array([180.0, -180.0, 360.0, -360.0, 90.0, - -90.0]).astype(np.float64) + self.x_np = np.array( + [180.0, -180.0, 360.0, -360.0, 90.0, -90.0] + ).astype(np.float64) self.x_shape = [6] self.out_np = np.deg2rad(self.x_np) @@ -37,12 +37,17 @@ class TestDeg2radAPI(unittest.TestCase): x = fluid.data(name='input', dtype=self.x_dtype, shape=self.x_shape) out = paddle.deg2rad(x) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) - res = exe.run(fluid.default_main_program(), - feed={'input': self.x_np}, - fetch_list=[out]) + res = exe.run( + fluid.default_main_program(), + feed={'input': self.x_np}, + fetch_list=[out], + ) self.assertTrue((np.array(out[0]) == self.out_np).all()) def test_dygraph(self): diff --git a/python/paddle/fluid/tests/unittests/test_density_prior_box_op.py b/python/paddle/fluid/tests/unittests/test_density_prior_box_op.py index ec747d5aef2e75091739c9b97b37c1dcbd198088..7254f9cc2f8bcecbecd55f6f2ef9145d748a36e5 100644 --- a/python/paddle/fluid/tests/unittests/test_density_prior_box_op.py +++ b/python/paddle/fluid/tests/unittests/test_density_prior_box_op.py @@ -19,7 +19,6 @@ from op_test import OpTest class TestDensityPriorBoxOp(OpTest): - def set_data(self): self.init_test_params() self.init_test_input() @@ -35,7 +34,7 @@ class TestDensityPriorBoxOp(OpTest): 'densities': self.densities, 'fixed_sizes': self.fixed_sizes, 'fixed_ratios': self.fixed_ratios, - 'flatten_to_2d': self.flatten_to_2d + 'flatten_to_2d': self.flatten_to_2d, } self.outputs = {'Boxes': self.out_boxes, 'Variances': self.out_var} @@ -74,18 +73,19 @@ class TestDensityPriorBoxOp(OpTest): if len(self.fixed_sizes) > 0 and len(self.densities) > 0: for density in self.densities: if len(self.fixed_ratios) > 0: - self.num_priors += len(self.fixed_ratios) * (pow( - density, 2)) + self.num_priors += len(self.fixed_ratios) * ( + pow(density, 2) + ) self.offset = 0.5 def init_test_input(self): self.image = np.random.random( - (self.batch_size, self.image_channels, self.image_w, - self.image_h)).astype('float32') + (self.batch_size, self.image_channels, self.image_w, self.image_h) + ).astype('float32') self.input = np.random.random( - (self.batch_size, self.input_channels, self.layer_w, - self.layer_h)).astype('float32') + (self.batch_size, self.input_channels, self.layer_w, self.layer_h) + ).astype('float32') def init_test_output(self): out_dim = (self.layer_h, self.layer_w, self.num_priors, 4) @@ -99,32 +99,56 @@ class TestDensityPriorBoxOp(OpTest): c_x = (w + self.offset) * self.step_w c_y = (h + self.offset) * self.step_h # Generate density prior boxes with fixed size - for density, fixed_size in zip(self.densities, - self.fixed_sizes): - if (len(self.fixed_ratios) > 0): + for density, fixed_size in zip( + self.densities, self.fixed_sizes + ): + if len(self.fixed_ratios) > 0: for ar in self.fixed_ratios: shift = int(step_average / density) box_width_ratio = fixed_size * math.sqrt(ar) box_height_ratio = fixed_size / math.sqrt(ar) for di in range(density): for dj in range(density): - c_x_temp = c_x - step_average / 2.0 + shift / 2.0 + dj * shift - c_y_temp = c_y - step_average / 2.0 + shift / 2.0 + di * shift + c_x_temp = ( + c_x + - step_average / 2.0 + + shift / 2.0 + + dj * shift + ) + c_y_temp = ( + c_y + - step_average / 2.0 + + shift / 2.0 + + di * shift + ) out_boxes[h, w, idx, :] = [ - max((c_x_temp - box_width_ratio / 2.0) / - self.image_w, 0), - max((c_y_temp - box_height_ratio / 2.0) - / self.image_h, 0), - min((c_x_temp + box_width_ratio / 2.0) / - self.image_w, 1), - min((c_y_temp + box_height_ratio / 2.0) - / self.image_h, 1) + max( + (c_x_temp - box_width_ratio / 2.0) + / self.image_w, + 0, + ), + max( + (c_y_temp - box_height_ratio / 2.0) + / self.image_h, + 0, + ), + min( + (c_x_temp + box_width_ratio / 2.0) + / self.image_w, + 1, + ), + min( + (c_y_temp + box_height_ratio / 2.0) + / self.image_h, + 1, + ), ] idx += 1 if self.clip: out_boxes = np.clip(out_boxes, 0.0, 1.0) - out_var = np.tile(self.variances, - (self.layer_h, self.layer_w, self.num_priors, 1)) + out_var = np.tile( + self.variances, (self.layer_h, self.layer_w, self.num_priors, 1) + ) self.out_boxes = out_boxes.astype('float32') self.out_var = out_var.astype('float32') if self.flatten_to_2d: @@ -133,7 +157,6 @@ class TestDensityPriorBoxOp(OpTest): class TestDensityPriorBox(TestDensityPriorBoxOp): - def set_density(self): self.densities = [3, 4] self.fixed_sizes = [1.0, 2.0] diff --git a/python/paddle/fluid/tests/unittests/test_deprecated_decorator.py b/python/paddle/fluid/tests/unittests/test_deprecated_decorator.py index 9933a2243fb0432f60b74e9fd063e7ded0bcfe98..ae24334fea572117729ce82116f6ba1c6334045c 100755 --- a/python/paddle/fluid/tests/unittests/test_deprecated_decorator.py +++ b/python/paddle/fluid/tests/unittests/test_deprecated_decorator.py @@ -51,9 +51,11 @@ def get_warning_index(api): doc_lst = api.__doc__.splitlines() for idx, val in enumerate(doc_lst): - if val.startswith("Warning: ") and val.endswith( - " instead." - ) and "and will be removed in future versions." in val: + if ( + val.startswith("Warning: ") + and val.endswith(" instead.") + and "and will be removed in future versions." in val + ): return idx return ERROR_WARNING_POSTION @@ -152,7 +154,7 @@ class TestDeprecatedDocorator(unittest.TestCase): def test_tensor_gradient(self): paddle.__version__ = '2.1.0' - x = paddle.to_tensor(5., stop_gradient=False) + x = paddle.to_tensor(5.0, stop_gradient=False) y = paddle.pow(x, 4.0) y.backward() @@ -160,7 +162,8 @@ class TestDeprecatedDocorator(unittest.TestCase): grad = x.gradient() assert ( 'API "paddle.fluid.dygraph.varbase_patch_methods.gradient" is ' - 'deprecated since 2.1.0') in str(w[-1].message) + 'deprecated since 2.1.0' + ) in str(w[-1].message) def test_softmax_with_cross_entropy(self): paddle.__version__ = '2.0.0' @@ -173,11 +176,13 @@ class TestDeprecatedDocorator(unittest.TestCase): x = linear(data) with warnings.catch_warnings(record=True) as w: - out = paddle.nn.functional.softmax_with_cross_entropy(logits=x, - label=label) + out = paddle.nn.functional.softmax_with_cross_entropy( + logits=x, label=label + ) assert ( 'API "paddle.nn.functional.loss.softmax_with_cross_entropy" is ' - 'deprecated since 2.0.0') in str(w[-1].message) + 'deprecated since 2.0.0' + ) in str(w[-1].message) def test_deprecated_error(self): paddle.__version__ = '2.1.0' diff --git a/python/paddle/fluid/tests/unittests/test_deprecated_memory_optimize_interfaces.py b/python/paddle/fluid/tests/unittests/test_deprecated_memory_optimize_interfaces.py index bd91e14e34d38500dd8161cb68ea6e4144a10c04..c3a21ba0bcbb656ccbf6945e778b0f80f18045c6 100644 --- a/python/paddle/fluid/tests/unittests/test_deprecated_memory_optimize_interfaces.py +++ b/python/paddle/fluid/tests/unittests/test_deprecated_memory_optimize_interfaces.py @@ -18,7 +18,6 @@ from simple_nets import simple_fc_net class DeprecatedMemoryOptimizationInterfaceTest(unittest.TestCase): - def setUp(self): self.method = fluid.memory_optimize @@ -61,7 +60,6 @@ class DeprecatedMemoryOptimizationInterfaceTest(unittest.TestCase): class ReleaseMemoryTest(DeprecatedMemoryOptimizationInterfaceTest): - def setUp(self): self.method = fluid.release_memory diff --git a/python/paddle/fluid/tests/unittests/test_dequantize_abs_max_op.py b/python/paddle/fluid/tests/unittests/test_dequantize_abs_max_op.py index 039686777bec6cd364f03feca8a812d3f1c7a547..c4806866eff6e4bb367a493f10040a6060be12ea 100644 --- a/python/paddle/fluid/tests/unittests/test_dequantize_abs_max_op.py +++ b/python/paddle/fluid/tests/unittests/test_dequantize_abs_max_op.py @@ -30,7 +30,6 @@ def dequantize_max_abs(x, scale, max_range): class TestDequantizeMaxAbsOp(OpTest): - def set_args(self): self.num_bits = 8 self.max_range = math.pow(2, self.num_bits - 1) - 1 @@ -45,7 +44,7 @@ class TestDequantizeMaxAbsOp(OpTest): self.inputs = { 'X': np.array(yq).astype(self.data_type), - 'Scale': np.array(scale).astype('float32') + 'Scale': np.array(scale).astype('float32'), } self.attrs = {'max_range': self.max_range} self.outputs = {'Out': ydq} @@ -55,7 +54,6 @@ class TestDequantizeMaxAbsOp(OpTest): class TestDequantizeMaxAbsOp5Bits(TestDequantizeMaxAbsOp): - def set_args(self): self.num_bits = 5 self.max_range = math.pow(2, self.num_bits - 1) - 1 @@ -63,7 +61,6 @@ class TestDequantizeMaxAbsOp5Bits(TestDequantizeMaxAbsOp): class TestDequantizeMaxAbsOpInt16(TestDequantizeMaxAbsOp): - def set_args(self): self.num_bits = 16 self.max_range = math.pow(2, self.num_bits - 1) - 1 diff --git a/python/paddle/fluid/tests/unittests/test_dequantize_log_op.py b/python/paddle/fluid/tests/unittests/test_dequantize_log_op.py index 939d5d6afcfb6b80596e7b91439ce036a0b0310f..5fb9d07d77a42e06f82152d20f0c94d6028d3e15 100644 --- a/python/paddle/fluid/tests/unittests/test_dequantize_log_op.py +++ b/python/paddle/fluid/tests/unittests/test_dequantize_log_op.py @@ -30,7 +30,6 @@ def dequantize_log(x, dict_data): class TestDequantizeLogOp(OpTest): - def setUp(self): self.op_type = "dequantize_log" x = np.random.randint(low=-128, high=127, size=(20, 10)).astype('int8') @@ -39,7 +38,7 @@ class TestDequantizeLogOp(OpTest): self.inputs = { 'X': np.array(x).astype('int8'), - 'Dict': np.array(dict_data).astype('float32') + 'Dict': np.array(dict_data).astype('float32'), } self.outputs = {'Out': xdq} diff --git a/python/paddle/fluid/tests/unittests/test_desc_clone.py b/python/paddle/fluid/tests/unittests/test_desc_clone.py index 2744487b5883be8de14c5463a9fed91a21f65e3c..ad0e95d4bbcde2e8c683c9d975048ffd44f4331c 100644 --- a/python/paddle/fluid/tests/unittests/test_desc_clone.py +++ b/python/paddle/fluid/tests/unittests/test_desc_clone.py @@ -27,33 +27,41 @@ paddle.dataset.mnist.fetch() # random seed must set before configuring the network. # fluid.default_startup_program().random_seed = SEED def cnn_model(data): - conv_pool_1 = fluid.nets.simple_img_conv_pool(input=data, - filter_size=5, - num_filters=20, - pool_size=2, - pool_stride=2, - act="relu") - conv_pool_2 = fluid.nets.simple_img_conv_pool(input=conv_pool_1, - filter_size=5, - num_filters=50, - pool_size=2, - pool_stride=2, - act="relu") + conv_pool_1 = fluid.nets.simple_img_conv_pool( + input=data, + filter_size=5, + num_filters=20, + pool_size=2, + pool_stride=2, + act="relu", + ) + conv_pool_2 = fluid.nets.simple_img_conv_pool( + input=conv_pool_1, + filter_size=5, + num_filters=50, + pool_size=2, + pool_stride=2, + act="relu", + ) # TODO(dzhwinter) : refine the initializer and random seed settting SIZE = 10 input_shape = conv_pool_2.shape - param_shape = [functools.reduce(lambda a, b: a * b, input_shape[1:], 1) - ] + [SIZE] - scale = (2.0 / (param_shape[0]**2 * SIZE))**0.5 + param_shape = [functools.reduce(lambda a, b: a * b, input_shape[1:], 1)] + [ + SIZE + ] + scale = (2.0 / (param_shape[0] ** 2 * SIZE)) ** 0.5 predict = fluid.layers.fc( input=conv_pool_2, size=SIZE, act="softmax", param_attr=fluid.param_attr.ParamAttr( - initializer=fluid.initializer.NormalInitializer(loc=0.0, - scale=scale))) + initializer=fluid.initializer.NormalInitializer( + loc=0.0, scale=scale + ) + ), + ) return predict @@ -69,23 +77,32 @@ def get_model(batch_size): # Evaluator batch_size_tensor = fluid.layers.create_tensor(dtype='int64') - batch_acc = fluid.layers.accuracy(input=predict, - label=label, - total=batch_size_tensor) + batch_acc = fluid.layers.accuracy( + input=predict, label=label, total=batch_size_tensor + ) inference_program = fluid.default_main_program().clone() # Optimization - opt = fluid.optimizer.AdamOptimizer(learning_rate=0.001, - beta1=0.9, - beta2=0.999) + opt = fluid.optimizer.AdamOptimizer( + learning_rate=0.001, beta1=0.9, beta2=0.999 + ) # Reader - train_reader = paddle.batch(paddle.dataset.mnist.train(), - batch_size=batch_size) - test_reader = paddle.batch(paddle.dataset.mnist.test(), - batch_size=batch_size) + train_reader = paddle.batch( + paddle.dataset.mnist.train(), batch_size=batch_size + ) + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size + ) opt.minimize(avg_cost) - return inference_program, avg_cost, train_reader, test_reader, batch_acc, predict + return ( + inference_program, + avg_cost, + train_reader, + test_reader, + batch_acc, + predict, + ) def operator_equal(a, b): @@ -93,8 +110,9 @@ def operator_equal(a, b): raise ValueError("In operator_equal not equal\n") for k, v in a.__dict__.items(): - if isinstance(v, fluid.framework.Program) or \ - isinstance(v, fluid.framework.Block): + if isinstance(v, fluid.framework.Program) or isinstance( + v, fluid.framework.Block + ): continue elif isinstance(v, core.OpDesc): @@ -107,7 +125,7 @@ def operator_equal(a, b): if v0 != v1: raise ValueError("In operator_equal not equal:{0}\n".format(k)) - elif (v != b.__dict__[k]): + elif v != b.__dict__[k]: raise ValueError("In operator_equal not equal:{0}\n".format(k)) return True @@ -115,12 +133,15 @@ def operator_equal(a, b): def block_equal(a, b): for k, v in a.__dict__.items(): - if isinstance(v, core.ProgramDesc) or isinstance( - v, fluid.framework.Program) or isinstance(v, core.BlockDesc): + if ( + isinstance(v, core.ProgramDesc) + or isinstance(v, fluid.framework.Program) + or isinstance(v, core.BlockDesc) + ): continue elif k == "ops": - assert (len(a.ops) == len(b.ops)) + assert len(a.ops) == len(b.ops) for i in range(0, len(a.ops)): if not operator_equal(a.ops[i], b.ops[i]): raise ValueError("In block_equal not equal:{0}\n".format(k)) @@ -130,7 +151,7 @@ def block_equal(a, b): if str(value) != str(b.__dict__[k][key]): raise ValueError("In block_equal not equal:{0}\n".format(k)) - elif (v != b.__dict__[k]): + elif v != b.__dict__[k]: raise ValueError("In block_equal not equal:{0}\n".format(k)) return True @@ -145,19 +166,19 @@ def program_equal(a, b): for i in range(0, len(a.blocks)): if not block_equal(a.blocks[i], b.blocks[i]): raise ValueError( - "In operator_equal not equal:{0}\n".format(k)) + "In operator_equal not equal:{0}\n".format(k) + ) return False - assert (len(a.blocks) == len(b.blocks)) + assert len(a.blocks) == len(b.blocks) elif k == '_auto_checkpoint_name': continue - elif (v != b.__dict__[k]): + elif v != b.__dict__[k]: raise ValueError("In program_equal not equal:{0}\n".format(k)) return True class TestCloneWithStopGradient(unittest.TestCase): - def test_clone_with_stop_gradient(self): train_program = fluid.Program() startup_program = fluid.Program() @@ -168,18 +189,20 @@ class TestCloneWithStopGradient(unittest.TestCase): hidden2 = fluid.layers.dropout(hidden1, dropout_prob=0.5) loss = fluid.layers.cross_entropy( input=fluid.layers.fc(hidden2, size=10, act='softmax'), - label=fluid.layers.data(name='label', shape=[1], dtype='int64')) + label=fluid.layers.data(name='label', shape=[1], dtype='int64'), + ) avg_loss = paddle.mean(loss) test_program = train_program.clone(for_test=False) self.assertEqual( - test_program.block(0).var(hidden1.name).stop_gradient, True) + test_program.block(0).var(hidden1.name).stop_gradient, True + ) self.assertEqual( - test_program.block(0).var(hidden2.name).stop_gradient, False) + test_program.block(0).var(hidden2.name).stop_gradient, False + ) class TestCloneWithStopGradientInSubBlock(unittest.TestCase): - def test_clone_with_stop_gradient(self): train_program = fluid.Program() startup_program = fluid.Program() @@ -204,12 +227,14 @@ class TestCloneWithStopGradientInSubBlock(unittest.TestCase): loss = fluid.layers.cross_entropy( input=fluid.layers.fc(hidden2, size=10, act='softmax'), - label=fluid.layers.data(name='label', shape=[1], dtype='int64')) + label=fluid.layers.data(name='label', shape=[1], dtype='int64'), + ) avg_loss = paddle.mean(loss) test_program = train_program.clone(for_test=False) self.assertEqual( - test_program.block(0).var(hidden1.name).stop_gradient, True) + test_program.block(0).var(hidden1.name).stop_gradient, True + ) for var in test_program.block(1).vars.values(): var2 = train_program.block(1).var(var.name) self.assertEqual(var.stop_gradient, var2.stop_gradient) @@ -219,7 +244,6 @@ class TestCloneWithStopGradientInSubBlock(unittest.TestCase): class TestCloneWithRaise(unittest.TestCase): - def test_clone_with_stop_gradient(self): train_program = fluid.Program() startup_program = fluid.Program() @@ -243,14 +267,19 @@ class TestCloneWithRaise(unittest.TestCase): hidden2 = fluid.layers.cond(cond, true_fn, false_fn) loss = fluid.layers.cross_entropy( input=fluid.layers.fc(hidden2, size=10, act='softmax'), - label=fluid.layers.data(name='label', shape=[1], dtype='int64')) + label=fluid.layers.data(name='label', shape=[1], dtype='int64'), + ) avg_loss = paddle.mean(loss) test_program = train_program.clone(for_test=False) - self.assertRaises(ValueError, train_program._copy_data_info_from, - startup_program) - self.assertRaises(TypeError, train_program._copy_data_info_from, - startup_program.block(0)) + self.assertRaises( + ValueError, train_program._copy_data_info_from, startup_program + ) + self.assertRaises( + TypeError, + train_program._copy_data_info_from, + startup_program.block(0), + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_detach.py b/python/paddle/fluid/tests/unittests/test_detach.py index 901f35a756bca938331e04c8c85215490fbbb789..48abc41e3c11c140732bd993567d1176d194de99 100644 --- a/python/paddle/fluid/tests/unittests/test_detach.py +++ b/python/paddle/fluid/tests/unittests/test_detach.py @@ -23,39 +23,51 @@ import unittest class Test_Detach(unittest.TestCase): - def generate_Data(self): - data = np.array([[1, 8, 3, 9], [7, 20, 9, 6], [4, 6, 8, - 10]]).astype('float32') + data = np.array([[1, 8, 3, 9], [7, 20, 9, 6], [4, 6, 8, 10]]).astype( + 'float32' + ) return data def no_detach_multi(self): data = self.generate_Data() with fluid.dygraph.guard(): linear_w_param_attrs = fluid.ParamAttr( - initializer=fluid.initializer.Constant(5.0)) + initializer=fluid.initializer.Constant(5.0) + ) linear_b_param_attrs = fluid.ParamAttr( - initializer=fluid.initializer.Constant(6.0)) - linear = Linear(4, - 10, - param_attr=linear_w_param_attrs, - bias_attr=linear_b_param_attrs) + initializer=fluid.initializer.Constant(6.0) + ) + linear = Linear( + 4, + 10, + param_attr=linear_w_param_attrs, + bias_attr=linear_b_param_attrs, + ) linear1_w_param_attrs = fluid.ParamAttr( - initializer=fluid.initializer.Constant(7.0)) + initializer=fluid.initializer.Constant(7.0) + ) linear1_b_param_attrs = fluid.ParamAttr( - initializer=fluid.initializer.Constant(8.0)) - linear1 = Linear(10, - 1, - param_attr=linear1_w_param_attrs, - bias_attr=linear1_b_param_attrs) + initializer=fluid.initializer.Constant(8.0) + ) + linear1 = Linear( + 10, + 1, + param_attr=linear1_w_param_attrs, + bias_attr=linear1_b_param_attrs, + ) linear2_w_param_attrs = fluid.ParamAttr( - initializer=fluid.initializer.Constant(9.0)) + initializer=fluid.initializer.Constant(9.0) + ) linear2_b_param_attrs = fluid.ParamAttr( - initializer=fluid.initializer.Constant(10.0)) - linear2 = Linear(10, - 1, - param_attr=linear2_w_param_attrs, - bias_attr=linear2_b_param_attrs) + initializer=fluid.initializer.Constant(10.0) + ) + linear2 = Linear( + 10, + 1, + param_attr=linear2_w_param_attrs, + bias_attr=linear2_b_param_attrs, + ) data = to_variable(data) x = linear(data) x1 = linear1(x) @@ -69,21 +81,29 @@ class Test_Detach(unittest.TestCase): data = self.generate_Data() with fluid.dygraph.guard(): linear_w_param_attrs = fluid.ParamAttr( - initializer=fluid.initializer.Constant(5.0)) + initializer=fluid.initializer.Constant(5.0) + ) linear_b_param_attrs = fluid.ParamAttr( - initializer=fluid.initializer.Constant(6.0)) - linear = Linear(4, - 10, - param_attr=linear_w_param_attrs, - bias_attr=linear_b_param_attrs) + initializer=fluid.initializer.Constant(6.0) + ) + linear = Linear( + 4, + 10, + param_attr=linear_w_param_attrs, + bias_attr=linear_b_param_attrs, + ) linear1_w_param_attrs = fluid.ParamAttr( - initializer=fluid.initializer.Constant(7.0)) + initializer=fluid.initializer.Constant(7.0) + ) linear1_b_param_attrs = fluid.ParamAttr( - initializer=fluid.initializer.Constant(8.0)) - linear1 = Linear(10, - 1, - param_attr=linear1_w_param_attrs, - bias_attr=linear1_b_param_attrs) + initializer=fluid.initializer.Constant(8.0) + ) + linear1 = Linear( + 10, + 1, + param_attr=linear1_w_param_attrs, + bias_attr=linear1_b_param_attrs, + ) data = to_variable(data) x = linear(data) x1 = linear1(x) @@ -96,29 +116,41 @@ class Test_Detach(unittest.TestCase): data = self.generate_Data() with fluid.dygraph.guard(): linear_w_param_attrs = fluid.ParamAttr( - initializer=fluid.initializer.Constant(5.0)) + initializer=fluid.initializer.Constant(5.0) + ) linear_b_param_attrs = fluid.ParamAttr( - initializer=fluid.initializer.Constant(6.0)) - linear = Linear(4, - 10, - param_attr=linear_w_param_attrs, - bias_attr=linear_b_param_attrs) + initializer=fluid.initializer.Constant(6.0) + ) + linear = Linear( + 4, + 10, + param_attr=linear_w_param_attrs, + bias_attr=linear_b_param_attrs, + ) linear1_w_param_attrs = fluid.ParamAttr( - initializer=fluid.initializer.Constant(7.0)) + initializer=fluid.initializer.Constant(7.0) + ) linear1_b_param_attrs = fluid.ParamAttr( - initializer=fluid.initializer.Constant(8.0)) - linear1 = Linear(10, - 1, - param_attr=linear1_w_param_attrs, - bias_attr=linear1_b_param_attrs) + initializer=fluid.initializer.Constant(8.0) + ) + linear1 = Linear( + 10, + 1, + param_attr=linear1_w_param_attrs, + bias_attr=linear1_b_param_attrs, + ) linear2_w_param_attrs = fluid.ParamAttr( - initializer=fluid.initializer.Constant(9.0)) + initializer=fluid.initializer.Constant(9.0) + ) linear2_b_param_attrs = fluid.ParamAttr( - initializer=fluid.initializer.Constant(10.0)) - linear2 = Linear(10, - 1, - param_attr=linear2_w_param_attrs, - bias_attr=linear2_b_param_attrs) + initializer=fluid.initializer.Constant(10.0) + ) + linear2 = Linear( + 10, + 1, + param_attr=linear2_w_param_attrs, + bias_attr=linear2_b_param_attrs, + ) data = to_variable(data) x = linear(data) x_detach = x.detach() @@ -144,7 +176,6 @@ class Test_Detach(unittest.TestCase): class TestInplace(unittest.TestCase): - def test_forward_version(self): with paddle.fluid.dygraph.guard(): var = paddle.to_tensor(np.ones((4, 2, 3)).astype(np.float32)) @@ -180,9 +211,11 @@ class TestInplace(unittest.TestCase): loss = paddle.nn.functional.relu(var_c + var_d) with self.assertRaisesRegexp( - RuntimeError, - "received tensor_version:{} != wrapper_version_snapshot:{}". - format(1, 0)): + RuntimeError, + "received tensor_version:{} != wrapper_version_snapshot:{}".format( + 1, 0 + ), + ): loss.backward() diff --git a/python/paddle/fluid/tests/unittests/test_detection_map_op.py b/python/paddle/fluid/tests/unittests/test_detection_map_op.py index 68afa3feed908e22f525d06a955003a05d9cc6b6..00b2c0700ca8053f144aeac4494b691b87b83f24 100644 --- a/python/paddle/fluid/tests/unittests/test_detection_map_op.py +++ b/python/paddle/fluid/tests/unittests/test_detection_map_op.py @@ -20,7 +20,6 @@ from op_test import OpTest class TestDetectionMAPOp(OpTest): - def set_data(self): self.class_num = 4 self.init_test_case() @@ -30,8 +29,9 @@ class TestDetectionMAPOp(OpTest): self.mAP = np.array(self.mAP).astype('float32') if len(self.class_pos_count) > 0: - self.class_pos_count = np.array( - self.class_pos_count).astype('int32') + self.class_pos_count = np.array(self.class_pos_count).astype( + 'int32' + ) self.true_pos = np.array(self.true_pos).astype('float32') self.false_pos = np.array(self.false_pos).astype('float32') self.has_state = np.array([1]).astype('int32') @@ -42,7 +42,7 @@ class TestDetectionMAPOp(OpTest): 'HasState': self.has_state, 'PosCount': self.class_pos_count, 'TruePos': (self.true_pos, self.true_pos_lod), - 'FalsePos': (self.false_pos, self.false_pos_lod) + 'FalsePos': (self.false_pos, self.false_pos_lod), } else: self.inputs = { @@ -54,11 +54,12 @@ class TestDetectionMAPOp(OpTest): 'overlap_threshold': self.overlap_threshold, 'evaluate_difficult': self.evaluate_difficult, 'ap_type': self.ap_type, - 'class_num': self.class_num + 'class_num': self.class_num, } - self.out_class_pos_count = np.array( - self.out_class_pos_count).astype('int') + self.out_class_pos_count = np.array(self.out_class_pos_count).astype( + 'int' + ) self.out_true_pos = np.array(self.out_true_pos).astype('float32') self.out_false_pos = np.array(self.out_false_pos).astype('float32') @@ -66,7 +67,7 @@ class TestDetectionMAPOp(OpTest): 'MAP': self.mAP, 'AccumPosCount': self.out_class_pos_count, 'AccumTruePos': (self.out_true_pos, self.out_true_pos_lod), - 'AccumFalsePos': (self.out_false_pos, self.out_false_pos_lod) + 'AccumFalsePos': (self.out_false_pos, self.out_false_pos_lod), } def init_test_case(self): @@ -76,24 +77,36 @@ class TestDetectionMAPOp(OpTest): self.label_lod = [[2, 2]] # label difficult xmin ymin xmax ymax - self.label = [[1, 0, 0.1, 0.1, 0.3, 0.3], [1, 1, 0.6, 0.6, 0.8, 0.8], - [2, 0, 0.3, 0.3, 0.6, 0.5], [1, 0, 0.7, 0.1, 0.9, 0.3]] + self.label = [ + [1, 0, 0.1, 0.1, 0.3, 0.3], + [1, 1, 0.6, 0.6, 0.8, 0.8], + [2, 0, 0.3, 0.3, 0.6, 0.5], + [1, 0, 0.7, 0.1, 0.9, 0.3], + ] # label score xmin ymin xmax ymax difficult self.detect_lod = [[3, 4]] - self.detect = [[1, 0.3, 0.1, 0.0, 0.4, - 0.3], [1, 0.7, 0.0, 0.1, 0.2, 0.3], - [1, 0.9, 0.7, 0.6, 0.8, - 0.8], [2, 0.8, 0.2, 0.1, 0.4, 0.4], - [2, 0.1, 0.4, 0.3, 0.7, - 0.5], [1, 0.2, 0.8, 0.1, 1.0, 0.3], - [3, 0.2, 0.8, 0.1, 1.0, 0.3]] + self.detect = [ + [1, 0.3, 0.1, 0.0, 0.4, 0.3], + [1, 0.7, 0.0, 0.1, 0.2, 0.3], + [1, 0.9, 0.7, 0.6, 0.8, 0.8], + [2, 0.8, 0.2, 0.1, 0.4, 0.4], + [2, 0.1, 0.4, 0.3, 0.7, 0.5], + [1, 0.2, 0.8, 0.1, 1.0, 0.3], + [3, 0.2, 0.8, 0.1, 1.0, 0.3], + ] # label score true_pos false_pos self.tf_pos_lod = [[3, 4]] - self.tf_pos = [[1, 0.9, 1, 0], [1, 0.7, 1, 0], [1, 0.3, 0, 1], - [1, 0.2, 1, 0], [2, 0.8, 0, 1], [2, 0.1, 1, 0], - [3, 0.2, 0, 1]] + self.tf_pos = [ + [1, 0.9, 1, 0], + [1, 0.7, 1, 0], + [1, 0.3, 0, 1], + [1, 0.2, 1, 0], + [2, 0.8, 0, 1], + [2, 0.1, 1, 0], + [3, 0.2, 0, 1], + ] self.class_pos_count = [] self.true_pos_lod = [[]] @@ -105,8 +118,9 @@ class TestDetectionMAPOp(OpTest): mAP = 0.0 count = 0 - def get_input_pos(class_pos_count, true_pos, true_pos_lod, false_pos, - false_pos_lod): + def get_input_pos( + class_pos_count, true_pos, true_pos_lod, false_pos, false_pos_lod + ): class_pos_count_dict = collections.Counter() true_pos_dict = collections.defaultdict(list) false_pos_dict = collections.defaultdict(list) @@ -149,9 +163,13 @@ class TestDetectionMAPOp(OpTest): out_false_pos += false_pos_list out_false_pos_lod.append(len(false_pos_list)) - return out_class_pos_count, out_true_pos, [ - out_true_pos_lod - ], out_false_pos, [out_false_pos_lod] + return ( + out_class_pos_count, + out_true_pos, + [out_true_pos_lod], + out_false_pos, + [out_false_pos_lod], + ) def get_accumulation(pos_list): sorted_list = sorted(pos_list, key=lambda pos: pos[0], reverse=True) @@ -163,8 +181,12 @@ class TestDetectionMAPOp(OpTest): return accu_list label_count, true_pos, false_pos = get_input_pos( - self.class_pos_count, self.true_pos, self.true_pos_lod, - self.false_pos, self.false_pos_lod) + self.class_pos_count, + self.true_pos, + self.true_pos_lod, + self.false_pos, + self.false_pos_lod, + ) for v in self.label: label = v[0] difficult = False if len(v) == 5 else v[1] @@ -178,7 +200,8 @@ class TestDetectionMAPOp(OpTest): false_pos[label].append([score, fp]) for (label, label_pos_num) in label_count.items(): - if label_pos_num == 0: continue + if label_pos_num == 0: + continue if label not in true_pos: count += 1 continue @@ -193,8 +216,9 @@ class TestDetectionMAPOp(OpTest): for i in range(len(accu_tp_sum)): precision.append( - float(accu_tp_sum[i]) / - float(accu_tp_sum[i] + accu_fp_sum[i])) + float(accu_tp_sum[i]) + / float(accu_tp_sum[i] + accu_fp_sum[i]) + ) recall.append(float(accu_tp_sum[i]) / label_pos_num) if self.ap_type == "11point": @@ -218,14 +242,16 @@ class TestDetectionMAPOp(OpTest): prev_recall = 0.0 for i in range(len(accu_tp_sum)): if math.fabs(recall[i] - prev_recall) > 1e-6: - average_precisions += precision[i] * \ - math.fabs(recall[i] - prev_recall) + average_precisions += precision[i] * math.fabs( + recall[i] - prev_recall + ) prev_recall = recall[i] mAP += average_precisions count += 1 - pcnt, tp, tp_lod, fp, fp_lod = get_output_pos(label_count, true_pos, - false_pos) + pcnt, tp, tp_lod, fp, fp_lod = get_output_pos( + label_count, true_pos, false_pos + ) self.out_class_pos_count = pcnt self.out_true_pos = tp self.out_true_pos_lod = tp_lod @@ -244,7 +270,6 @@ class TestDetectionMAPOp(OpTest): class TestDetectionMAPOpSkipDiff(TestDetectionMAPOp): - def init_test_case(self): super(TestDetectionMAPOpSkipDiff, self).init_test_case() @@ -252,22 +277,30 @@ class TestDetectionMAPOpSkipDiff(TestDetectionMAPOp): self.tf_pos_lod = [[2, 4]] # label score true_pos false_pos - self.tf_pos = [[1, 0.7, 1, 0], [1, 0.3, 0, 1], [1, 0.2, 1, 0], - [2, 0.8, 0, 1], [2, 0.1, 1, 0], [3, 0.2, 0, 1]] + self.tf_pos = [ + [1, 0.7, 1, 0], + [1, 0.3, 0, 1], + [1, 0.2, 1, 0], + [2, 0.8, 0, 1], + [2, 0.1, 1, 0], + [3, 0.2, 0, 1], + ] class TestDetectionMAPOpWithoutDiff(TestDetectionMAPOp): - def init_test_case(self): super(TestDetectionMAPOpWithoutDiff, self).init_test_case() # label xmin ymin xmax ymax - self.label = [[1, 0.1, 0.1, 0.3, 0.3], [1, 0.6, 0.6, 0.8, 0.8], - [2, 0.3, 0.3, 0.6, 0.5], [1, 0.7, 0.1, 0.9, 0.3]] + self.label = [ + [1, 0.1, 0.1, 0.3, 0.3], + [1, 0.6, 0.6, 0.8, 0.8], + [2, 0.3, 0.3, 0.6, 0.5], + [1, 0.7, 0.1, 0.9, 0.3], + ] class TestDetectionMAPOp11Point(TestDetectionMAPOp): - def init_test_case(self): super(TestDetectionMAPOp11Point, self).init_test_case() @@ -275,18 +308,28 @@ class TestDetectionMAPOp11Point(TestDetectionMAPOp): class TestDetectionMAPOpMultiBatch(TestDetectionMAPOp): - def init_test_case(self): super(TestDetectionMAPOpMultiBatch, self).init_test_case() self.class_pos_count = [0, 2, 1, 0] self.true_pos_lod = [[0, 3, 2]] - self.true_pos = [[0.7, 1.], [0.3, 0.], [0.2, 1.], [0.8, 0.], [0.1, 1.]] + self.true_pos = [ + [0.7, 1.0], + [0.3, 0.0], + [0.2, 1.0], + [0.8, 0.0], + [0.1, 1.0], + ] self.false_pos_lod = [[0, 3, 2]] - self.false_pos = [[0.7, 0.], [0.3, 1.], [0.2, 0.], [0.8, 1.], [0.1, 0.]] + self.false_pos = [ + [0.7, 0.0], + [0.3, 1.0], + [0.2, 0.0], + [0.8, 1.0], + [0.1, 0.0], + ] class TestDetectionMAPOp11PointWithClassNoTP(TestDetectionMAPOp): - def init_test_case(self): self.overlap_threshold = 0.3 self.evaluate_difficult = True diff --git a/python/paddle/fluid/tests/unittests/test_determinant_op.py b/python/paddle/fluid/tests/unittests/test_determinant_op.py index abd44669b25e67b0ab321d4a86fa46ae426ece57..730864eb23097de38cf0f765e020bf37a907d840 100644 --- a/python/paddle/fluid/tests/unittests/test_determinant_op.py +++ b/python/paddle/fluid/tests/unittests/test_determinant_op.py @@ -22,7 +22,6 @@ paddle.enable_static() class TestDeterminantOp(OpTest): - def setUp(self): self.python_api = paddle.linalg.det self.init_data() @@ -43,7 +42,6 @@ class TestDeterminantOp(OpTest): class TestDeterminantOpCase1(TestDeterminantOp): - def init_data(self): np.random.seed(0) self.case = np.random.rand(10, 10).astype('float32') @@ -52,7 +50,6 @@ class TestDeterminantOpCase1(TestDeterminantOp): class TestDeterminantOpCase2(TestDeterminantOp): - def init_data(self): np.random.seed(0) # not invertible matrix @@ -62,7 +59,6 @@ class TestDeterminantOpCase2(TestDeterminantOp): class TestDeterminantAPI(unittest.TestCase): - def setUp(self): np.random.seed(0) self.shape = [3, 3, 5, 5] @@ -95,7 +91,6 @@ class TestDeterminantAPI(unittest.TestCase): class TestSlogDeterminantOp(OpTest): - def setUp(self): self.op_type = "slogdeterminant" self.python_api = paddle.linalg.slogdet @@ -107,9 +102,9 @@ class TestSlogDeterminantOp(OpTest): def test_check_grad(self): # the slog det's grad value is always huge - self.check_grad(['Input'], ['Out'], - max_relative_error=0.1, - check_eager=True) + self.check_grad( + ['Input'], ['Out'], max_relative_error=0.1, check_eager=True + ) def init_data(self): np.random.seed(0) @@ -119,7 +114,6 @@ class TestSlogDeterminantOp(OpTest): class TestSlogDeterminantOpCase1(TestSlogDeterminantOp): - def init_data(self): np.random.seed(0) self.case = np.random.rand(2, 2, 5, 5).astype(np.float32) @@ -128,7 +122,6 @@ class TestSlogDeterminantOpCase1(TestSlogDeterminantOp): class TestSlogDeterminantAPI(unittest.TestCase): - def setUp(self): np.random.seed(0) self.shape = [3, 3, 5, 5] diff --git a/python/paddle/fluid/tests/unittests/test_device.py b/python/paddle/fluid/tests/unittests/test_device.py index 3d1653f123ffd71b8789e3db18fb29aaa9082a94..cf95ea96f7bada8077cdfe243624cb2e5fc8f61f 100644 --- a/python/paddle/fluid/tests/unittests/test_device.py +++ b/python/paddle/fluid/tests/unittests/test_device.py @@ -21,7 +21,6 @@ import paddle.fluid.framework as framework class TestStaticDeviceManage(unittest.TestCase): - def _test_device(self, device_name, device_class): paddle.set_device(device_name) @@ -54,7 +53,6 @@ class TestStaticDeviceManage(unittest.TestCase): class TestImperativeDeviceManage(unittest.TestCase): - def test_cpu(self): with fluid.dygraph.guard(): paddle.set_device('cpu') @@ -64,7 +62,8 @@ class TestImperativeDeviceManage(unittest.TestCase): device = paddle.get_device() self.assertEqual( isinstance(framework._current_expected_place(), core.CPUPlace), - True) + True, + ) self.assertEqual(device, "cpu") def test_gpu(self): @@ -76,8 +75,11 @@ class TestImperativeDeviceManage(unittest.TestCase): out3 = paddle.concat(x=[out1, out2], axis=0) device = paddle.get_device() self.assertEqual( - isinstance(framework._current_expected_place(), - core.CUDAPlace), True) + isinstance( + framework._current_expected_place(), core.CUDAPlace + ), + True, + ) self.assertEqual(device, "gpu:0") def test_xpu(self): @@ -86,8 +88,11 @@ class TestImperativeDeviceManage(unittest.TestCase): out = paddle.to_tensor([1, 2]) device = paddle.get_device() self.assertEqual( - isinstance(framework._current_expected_place(), - core.XPUPlace), True) + isinstance( + framework._current_expected_place(), core.XPUPlace + ), + True, + ) self.assertTrue(out.place.is_xpu_place()) self.assertEqual(device, "xpu:0") @@ -100,8 +105,11 @@ class TestImperativeDeviceManage(unittest.TestCase): out3 = paddle.concat(x=[out1, out2], axis=0) device = paddle.get_device() self.assertEqual( - isinstance(framework._current_expected_place(), - core.NPUPlace), True) + isinstance( + framework._current_expected_place(), core.NPUPlace + ), + True, + ) self.assertTrue(out1.place.is_npu_place()) self.assertTrue(out2.place.is_npu_place()) self.assertTrue(out3.place.is_npu_place()) diff --git a/python/paddle/fluid/tests/unittests/test_device_guard.py b/python/paddle/fluid/tests/unittests/test_device_guard.py index f3ac33a4f854cc3d756e7da142b7f5f9d379f797..b43fbb6e87f37937bb417aba9d4ebe699b35b679 100644 --- a/python/paddle/fluid/tests/unittests/test_device_guard.py +++ b/python/paddle/fluid/tests/unittests/test_device_guard.py @@ -39,17 +39,16 @@ def get_vaild_warning_num(warning, w): class TestDeviceGuard(unittest.TestCase): - def test_device_guard(self): main_program = paddle.static.Program() startup_program = paddle.static.Program() with paddle.static.program_guard(main_program, startup_program): - data1 = paddle.full(shape=[1, 3, 8, 8], - fill_value=0.5, - dtype='float32') - data2 = paddle.full(shape=[1, 3, 5, 5], - fill_value=0.5, - dtype='float32') + data1 = paddle.full( + shape=[1, 3, 8, 8], fill_value=0.5, dtype='float32' + ) + data2 = paddle.full( + shape=[1, 3, 5, 5], fill_value=0.5, dtype='float32' + ) shape = paddle.shape(data2) with paddle.static.device_guard("cpu"): shape = paddle.slice(shape, axes=[0], starts=[0], ends=[4]) @@ -70,12 +69,12 @@ class TestDeviceGuard(unittest.TestCase): main_program = paddle.static.Program() startup_program = paddle.static.Program() with paddle.static.program_guard(main_program, startup_program): - data1 = paddle.full(shape=[1, 3, 8, 8], - fill_value=0.5, - dtype='float32') - data2 = paddle.full(shape=[1, 3, 5, 5], - fill_value=0.5, - dtype='float32') + data1 = paddle.full( + shape=[1, 3, 8, 8], fill_value=0.5, dtype='float32' + ) + data2 = paddle.full( + shape=[1, 3, 5, 5], fill_value=0.5, dtype='float32' + ) shape = paddle.shape(data2) with paddle.static.device_guard("cpu"): shape = paddle.slice(shape, axes=[0], starts=[0], ends=[4]) @@ -96,32 +95,50 @@ class TestDeviceGuard(unittest.TestCase): main_program = paddle.static.Program() startup_program = paddle.static.Program() with paddle.static.program_guard(main_program, startup_program): - x = paddle.full(shape=[2, 255, 13, 13], - fill_value=0.3, - dtype='float32') - gt_box = paddle.full(shape=[2, 6, 4], - fill_value=0.5, - dtype='float32') + x = paddle.full( + shape=[2, 255, 13, 13], fill_value=0.3, dtype='float32' + ) + gt_box = paddle.full( + shape=[2, 6, 4], fill_value=0.5, dtype='float32' + ) gt_label = paddle.full(shape=[2, 6], fill_value=1.0, dtype='int32') - gt_score = paddle.full(shape=[2, 6], - fill_value=0.5, - dtype='float32') + gt_score = paddle.full( + shape=[2, 6], fill_value=0.5, dtype='float32' + ) anchors = [ - 10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, - 198, 373, 326 + 10, + 13, + 16, + 30, + 33, + 23, + 30, + 61, + 62, + 45, + 59, + 119, + 116, + 90, + 156, + 198, + 373, + 326, ] anchor_mask = [0, 1, 2] with paddle.static.device_guard("gpu"): # yolov3_loss only has cpu kernel, so its cpu kernel will be executed - loss = fluid.layers.yolov3_loss(x=x, - gt_box=gt_box, - gt_label=gt_label, - gt_score=gt_score, - anchors=anchors, - anchor_mask=anchor_mask, - class_num=80, - ignore_thresh=0.7, - downsample_ratio=32) + loss = fluid.layers.yolov3_loss( + x=x, + gt_box=gt_box, + gt_label=gt_label, + gt_score=gt_score, + anchors=anchors, + anchor_mask=anchor_mask, + class_num=80, + ignore_thresh=0.7, + downsample_ratio=32, + ) execute(main_program, startup_program) @@ -154,7 +171,6 @@ class TestDeviceGuard(unittest.TestCase): execute(main_program, startup_program) def test_error(self): - def device_attr(): with paddle.static.device_guard("cpu1"): out = paddle.full(shape=[1], fill_value=0.2, dtype='float32') @@ -171,17 +187,18 @@ class TestDeviceGuard(unittest.TestCase): main_program = paddle.static.Program() startup_program = paddle.static.Program() with paddle.static.program_guard(main_program, startup_program): - data1 = paddle.static.data(name="data_1", - shape=[4, 2], - dtype="float32") - label = paddle.static.data(name="label", - shape=[4, 1], - dtype="int64") + data1 = paddle.static.data( + name="data_1", shape=[4, 2], dtype="float32" + ) + label = paddle.static.data( + name="label", shape=[4, 1], dtype="int64" + ) fc1 = paddle.static.nn.fc(x=data1, size=10) fc2 = paddle.static.nn.fc(x=fc1, size=10) with paddle.static.device_guard("gpu"): out = paddle.nn.functional.softmax_with_cross_entropy( - logits=fc1 + fc2, label=label) + logits=fc1 + fc2, label=label + ) loss = paddle.mean(out) opt = paddle.optimizer.SGD(0.1) opt.minimize(loss) diff --git a/python/paddle/fluid/tests/unittests/test_diag.py b/python/paddle/fluid/tests/unittests/test_diag.py index 5148efe1b08f2983070fc4cc410472265e289ca6..4135db34218ba7c1f7af40c9cb183f28c56c3292 100644 --- a/python/paddle/fluid/tests/unittests/test_diag.py +++ b/python/paddle/fluid/tests/unittests/test_diag.py @@ -21,7 +21,6 @@ from paddle.fluid import Program, program_guard class TestDiagOp(OpTest): - def setUp(self): self.op_type = "diag" self.init_config() @@ -38,13 +37,11 @@ class TestDiagOp(OpTest): class TestDiagOpCase1(TestDiagOp): - def init_config(self): self.case = np.array([3], dtype='int32') class TestDiagError(unittest.TestCase): - def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): diff --git a/python/paddle/fluid/tests/unittests/test_diag_embed.py b/python/paddle/fluid/tests/unittests/test_diag_embed.py index 8eeb7849da22d6683af4b459057953a65e4b3518..c91f5d9b86c9c72c16f572ff20b068467738e052 100644 --- a/python/paddle/fluid/tests/unittests/test_diag_embed.py +++ b/python/paddle/fluid/tests/unittests/test_diag_embed.py @@ -21,7 +21,6 @@ import paddle.fluid.core as core class TestDiagEmbedOp(OpTest): - def setUp(self): self.op_type = "diag_embed" self.python_api = F.diag_embed @@ -39,17 +38,16 @@ class TestDiagEmbedOp(OpTest): class TestDiagEmbedOpCase1(TestDiagEmbedOp): - def init_config(self): self.case = np.random.randn(2, 3).astype('float32') self.inputs = {'Input': self.case} self.attrs = {'offset': -1, 'dim1': 0, 'dim2': 2} - self.target = np.stack([np.diag(r, -1) for r in self.inputs['Input']], - 1) + self.target = np.stack( + [np.diag(r, -1) for r in self.inputs['Input']], 1 + ) class TestDiagEmbedAPICase(unittest.TestCase): - def test_case1(self): diag_embed = np.random.randn(2, 3, 4).astype('float32') data1 = fluid.data(name='data1', shape=[2, 3, 4], dtype='float32') @@ -58,14 +56,18 @@ class TestDiagEmbedAPICase(unittest.TestCase): place = core.CPUPlace() exe = fluid.Executor(place) - results = exe.run(fluid.default_main_program(), - feed={"data1": diag_embed}, - fetch_list=[out1, out2], - return_numpy=True) + results = exe.run( + fluid.default_main_program(), + feed={"data1": diag_embed}, + fetch_list=[out1, out2], + return_numpy=True, + ) target1 = np.stack( - [np.stack([np.diag(s, 0) for s in r], 0) for r in diag_embed], 0) + [np.stack([np.diag(s, 0) for s in r], 0) for r in diag_embed], 0 + ) target2 = np.stack( - [np.stack([np.diag(s, 1) for s in r], 0) for r in diag_embed], 0) + [np.stack([np.diag(s, 1) for s in r], 0) for r in diag_embed], 0 + ) np.testing.assert_allclose(results[0], target1, rtol=1e-05) np.testing.assert_allclose(results[1], target2, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_diag_v2.py b/python/paddle/fluid/tests/unittests/test_diag_v2.py index 5fb93f3b19a10a939376f4269db55e0e3519e703..f1ff2b77d40411e1ed52da33a562a31bacf14b74 100644 --- a/python/paddle/fluid/tests/unittests/test_diag_v2.py +++ b/python/paddle/fluid/tests/unittests/test_diag_v2.py @@ -22,7 +22,6 @@ from paddle.fluid.framework import _test_eager_guard class TestDiagV2Op(OpTest): - def setUp(self): self.op_type = "diag_v2" self.python_api = paddle.diag @@ -35,7 +34,7 @@ class TestDiagV2Op(OpTest): self.inputs = {'X': self.x} self.attrs = { 'offset': self.offset, - 'padding_value': self.padding_value + 'padding_value': self.padding_value, } self.outputs = {'Out': self.out} @@ -52,38 +51,36 @@ class TestDiagV2Op(OpTest): class TestDiagV2OpCase1(TestDiagV2Op): - def init_config(self): self.offset = 1 self.out = np.diag(self.x, self.offset) class TestDiagV2OpCase2(TestDiagV2Op): - def init_config(self): self.offset = -1 self.out = np.diag(self.x, self.offset) class TestDiagV2OpCase3(TestDiagV2Op): - def init_config(self): self.x = np.random.randint(-10, 10, size=(10, 10)).astype("float64") self.out = np.diag(self.x, self.offset) class TestDiagV2OpCase4(TestDiagV2Op): - def init_config(self): self.x = np.random.rand(100) self.padding_value = 2 n = self.x.size - self.out = self.padding_value * np.ones((n, n)) + np.diag( - self.x, self.offset) - np.diag(self.padding_value * np.ones(n)) + self.out = ( + self.padding_value * np.ones((n, n)) + + np.diag(self.x, self.offset) + - np.diag(self.padding_value * np.ones(n)) + ) class TestDiagV2Error(unittest.TestCase): - def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): @@ -104,7 +101,6 @@ class TestDiagV2Error(unittest.TestCase): class TestDiagV2API(unittest.TestCase): - def setUp(self): self.input_np = np.random.random(size=(10, 10)).astype(np.float32) self.expected0 = np.diag(self.input_np) @@ -115,21 +111,27 @@ class TestDiagV2API(unittest.TestCase): self.offset = 0 self.padding_value = 8 n = self.input_np2.size - self.expected3 = self.padding_value * np.ones( - (n, n)) + np.diag(self.input_np2, self.offset) - np.diag( - self.padding_value * np.ones(n)) + self.expected3 = ( + self.padding_value * np.ones((n, n)) + + np.diag(self.input_np2, self.offset) + - np.diag(self.padding_value * np.ones(n)) + ) self.input_np3 = np.random.randint(-10, 10, size=(100)).astype(np.int64) self.padding_value = 8.0 n = self.input_np3.size - self.expected4 = self.padding_value * np.ones( - (n, n)) + np.diag(self.input_np3, self.offset) - np.diag( - self.padding_value * np.ones(n)) + self.expected4 = ( + self.padding_value * np.ones((n, n)) + + np.diag(self.input_np3, self.offset) + - np.diag(self.padding_value * np.ones(n)) + ) self.padding_value = -8 - self.expected5 = self.padding_value * np.ones( - (n, n)) + np.diag(self.input_np3, self.offset) - np.diag( - self.padding_value * np.ones(n)) + self.expected5 = ( + self.padding_value * np.ones((n, n)) + + np.diag(self.input_np3, self.offset) + - np.diag(self.padding_value * np.ones(n)) + ) self.input_np4 = np.random.random(size=(2000, 2000)).astype(np.float32) self.expected6 = np.diag(self.input_np4) @@ -194,13 +196,13 @@ class TestDiagV2API(unittest.TestCase): x = paddle.static.data(name='input', shape=[10, 10], dtype='float32') x2 = paddle.static.data(name='input2', shape=[100], dtype='float64') x3 = paddle.static.data(name='input3', shape=[100], dtype='int64') - x4 = paddle.static.data(name='input4', - shape=[2000, 2000], - dtype='float32') + x4 = paddle.static.data( + name='input4', shape=[2000, 2000], dtype='float32' + ) x5 = paddle.static.data(name='input5', shape=[2000], dtype='float32') - x6 = paddle.static.data(name='input6', - shape=[2000, 1500], - dtype='float32') + x6 = paddle.static.data( + name='input6', shape=[2000, 1500], dtype='float32' + ) result0 = paddle.diag(x) result1 = paddle.diag(x, offset=1) result2 = paddle.diag(x, offset=-1) @@ -219,19 +221,45 @@ class TestDiagV2API(unittest.TestCase): place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - res0, res1, res2, res4, res5, res6, res7, res8, res9, res10, res11, res12, res13 = exe.run( + ( + res0, + res1, + res2, + res4, + res5, + res6, + res7, + res8, + res9, + res10, + res11, + res12, + res13, + ) = exe.run( feed={ "input": self.input_np, "input2": self.input_np2, 'input3': self.input_np3, 'input4': self.input_np4, 'input5': self.input_np5, - 'input6': self.input_np6 + 'input6': self.input_np6, }, fetch_list=[ - result0, result1, result2, result4, result5, result6, result7, - result8, result9, result10, result11, result12, result13 - ]) + result0, + result1, + result2, + result4, + result5, + result6, + result7, + result8, + result9, + result10, + result11, + result12, + result13, + ], + ) np.testing.assert_allclose(res0, self.expected0, rtol=1e-05) np.testing.assert_allclose(res1, self.expected1, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_diagflat.py b/python/paddle/fluid/tests/unittests/test_diagflat.py index ec6bf982991888d85215ce0e4dfc95676dc66d2d..f23c5051ddbbc9ec586f3a9b79837d58fafbd5ca 100644 --- a/python/paddle/fluid/tests/unittests/test_diagflat.py +++ b/python/paddle/fluid/tests/unittests/test_diagflat.py @@ -19,7 +19,6 @@ from paddle.static import Program, program_guard class TestDiagFlatError(unittest.TestCase): - def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): @@ -35,7 +34,6 @@ class TestDiagFlatError(unittest.TestCase): class TestDiagFlatAPI(unittest.TestCase): - def setUp(self): self.input_np = np.random.random(size=(10, 10)).astype(np.float64) self.expected0 = np.diagflat(self.input_np) @@ -77,11 +75,10 @@ class TestDiagFlatAPI(unittest.TestCase): place = paddle.CUDAPlace(0) if use_gpu else paddle.CPUPlace() exe = paddle.static.Executor(place) exe.run(paddle.static.default_startup_program()) - res0, res3 = exe.run(feed={ - "input": self.input_np, - 'input2': self.input_np2 - }, - fetch_list=[result0, result3]) + res0, res3 = exe.run( + feed={"input": self.input_np, 'input2': self.input_np2}, + fetch_list=[result0, result3], + ) np.testing.assert_allclose(res0, self.expected0, rtol=1e-05) np.testing.assert_allclose(res3, self.expected3, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_diagonal_op.py b/python/paddle/fluid/tests/unittests/test_diagonal_op.py index c7976856bd667e3a703f6b37b5a3d9bda14069ae..bdc64827fd2a821d1f9f112e4da4d4e9fc14a0ac 100644 --- a/python/paddle/fluid/tests/unittests/test_diagonal_op.py +++ b/python/paddle/fluid/tests/unittests/test_diagonal_op.py @@ -22,7 +22,6 @@ paddle.enable_static() class TestDiagonalOp(OpTest): - def setUp(self): self.op_type = "diagonal" self.python_api = paddle.diagonal @@ -39,62 +38,68 @@ class TestDiagonalOp(OpTest): self.case = np.random.randn(10, 5, 2).astype('float64') self.inputs = {'Input': self.case} self.attrs = {'offset': 0, 'axis1': 0, 'axis2': 1} - self.target = np.diagonal(self.inputs['Input'], - offset=self.attrs['offset'], - axis1=self.attrs['axis1'], - axis2=self.attrs['axis2']) + self.target = np.diagonal( + self.inputs['Input'], + offset=self.attrs['offset'], + axis1=self.attrs['axis1'], + axis2=self.attrs['axis2'], + ) class TestDiagonalOpCase1(TestDiagonalOp): - def init_config(self): self.case = np.random.randn(4, 2, 4, 4).astype('float32') self.inputs = {'Input': self.case} self.attrs = {'offset': -2, 'axis1': 3, 'axis2': 0} - self.target = np.diagonal(self.inputs['Input'], - offset=self.attrs['offset'], - axis1=self.attrs['axis1'], - axis2=self.attrs['axis2']) + self.target = np.diagonal( + self.inputs['Input'], + offset=self.attrs['offset'], + axis1=self.attrs['axis1'], + axis2=self.attrs['axis2'], + ) class TestDiagonalOpCase2(TestDiagonalOp): - def init_config(self): self.case = np.random.randn(100, 100).astype('int64') self.inputs = {'Input': self.case} self.attrs = {'offset': 0, 'axis1': 0, 'axis2': 1} - self.target = np.diagonal(self.inputs['Input'], - offset=self.attrs['offset'], - axis1=self.attrs['axis1'], - axis2=self.attrs['axis2']) + self.target = np.diagonal( + self.inputs['Input'], + offset=self.attrs['offset'], + axis1=self.attrs['axis1'], + axis2=self.attrs['axis2'], + ) self.grad_x = np.eye(100).astype('int64') self.grad_out = np.ones(100).astype('int64') def test_check_grad(self): - self.check_grad(['Input'], - 'Out', - user_defined_grads=[self.grad_x], - user_defined_grad_outputs=[self.grad_out], - check_eager=True) + self.check_grad( + ['Input'], + 'Out', + user_defined_grads=[self.grad_x], + user_defined_grad_outputs=[self.grad_out], + check_eager=True, + ) class TestDiagonalOpCase3(TestDiagonalOp): - def init_config(self): self.case = np.random.randint(0, 2, (4, 2, 4, 4)).astype('bool') self.inputs = {'Input': self.case} self.attrs = {'offset': -2, 'axis1': 3, 'axis2': 0} - self.target = np.diagonal(self.inputs['Input'], - offset=self.attrs['offset'], - axis1=self.attrs['axis1'], - axis2=self.attrs['axis2']) + self.target = np.diagonal( + self.inputs['Input'], + offset=self.attrs['offset'], + axis1=self.attrs['axis1'], + axis2=self.attrs['axis2'], + ) def test_check_grad(self): pass class TestDiagonalAPI(unittest.TestCase): - def setUp(self): self.shape = [10, 3, 4] self.x = np.random.random((10, 3, 4)).astype(np.float32) diff --git a/python/paddle/fluid/tests/unittests/test_diff_op.py b/python/paddle/fluid/tests/unittests/test_diff_op.py index 8111d1b57b3b032b5510e09cd23228a6472d3bb4..4b3981075a18d2b48ce29e05619c01a2a18b14bd 100644 --- a/python/paddle/fluid/tests/unittests/test_diff_op.py +++ b/python/paddle/fluid/tests/unittests/test_diff_op.py @@ -21,7 +21,6 @@ from paddle.fluid.framework import _test_eager_guard class TestDiffOp(unittest.TestCase): - def set_args(self): self.input = np.array([1, 4, 5, 2]).astype('float32') self.n = 1 @@ -31,21 +30,21 @@ class TestDiffOp(unittest.TestCase): def get_output(self): if self.prepend is not None and self.append is not None: - self.output = np.diff(self.input, - n=self.n, - axis=self.axis, - prepend=self.prepend, - append=self.append) + self.output = np.diff( + self.input, + n=self.n, + axis=self.axis, + prepend=self.prepend, + append=self.append, + ) elif self.prepend is not None: - self.output = np.diff(self.input, - n=self.n, - axis=self.axis, - prepend=self.prepend) + self.output = np.diff( + self.input, n=self.n, axis=self.axis, prepend=self.prepend + ) elif self.append is not None: - self.output = np.diff(self.input, - n=self.n, - axis=self.axis, - append=self.append) + self.output = np.diff( + self.input, n=self.n, axis=self.axis, append=self.append + ) else: self.output = np.diff(self.input, n=self.n, axis=self.axis) @@ -64,11 +63,13 @@ class TestDiffOp(unittest.TestCase): self.prepend = paddle.to_tensor(self.prepend, place=place) if self.append is not None: self.append = paddle.to_tensor(self.append, place=place) - out = paddle.diff(x, - n=self.n, - axis=self.axis, - prepend=self.prepend, - append=self.append) + out = paddle.diff( + x, + n=self.n, + axis=self.axis, + prepend=self.prepend, + append=self.append, + ) self.assertTrue((out.numpy() == self.output).all(), True) def test_dygraph(self): @@ -85,36 +86,40 @@ class TestDiffOp(unittest.TestCase): places.append(fluid.CUDAPlace(0)) for place in places: with fluid.program_guard(fluid.Program(), fluid.Program()): - x = paddle.fluid.data(name="input", - shape=self.input.shape, - dtype=self.input.dtype) + x = paddle.fluid.data( + name="input", shape=self.input.shape, dtype=self.input.dtype + ) has_pend = False prepend = None append = None if self.prepend is not None: has_pend = True - prepend = paddle.fluid.data(name="prepend", - shape=self.prepend.shape, - dtype=self.prepend.dtype) + prepend = paddle.fluid.data( + name="prepend", + shape=self.prepend.shape, + dtype=self.prepend.dtype, + ) if self.append is not None: has_pend = True - append = paddle.fluid.data(name="append", - shape=self.append.shape, - dtype=self.append.dtype) + append = paddle.fluid.data( + name="append", + shape=self.append.shape, + dtype=self.append.dtype, + ) exe = fluid.Executor(place) - out = paddle.diff(x, - n=self.n, - axis=self.axis, - prepend=prepend, - append=append) - fetches = exe.run(fluid.default_main_program(), - feed={ - "input": self.input, - "prepend": self.prepend, - "append": self.append - }, - fetch_list=[out]) + out = paddle.diff( + x, n=self.n, axis=self.axis, prepend=prepend, append=append + ) + fetches = exe.run( + fluid.default_main_program(), + feed={ + "input": self.input, + "prepend": self.prepend, + "append": self.append, + }, + fetch_list=[out], + ) self.assertTrue((fetches[0] == self.output).all(), True) def func_grad(self): @@ -124,11 +129,13 @@ class TestDiffOp(unittest.TestCase): self.prepend = paddle.to_tensor(self.prepend, place=place) if self.append is not None: self.append = paddle.to_tensor(self.append, place=place) - out = paddle.diff(x, - n=self.n, - axis=self.axis, - prepend=self.prepend, - append=self.append) + out = paddle.diff( + x, + n=self.n, + axis=self.axis, + prepend=self.prepend, + append=self.append, + ) try: out.backward() x_grad = x.grad @@ -144,7 +151,6 @@ class TestDiffOp(unittest.TestCase): class TestDiffOpAxis(TestDiffOp): - def set_args(self): self.input = np.array([[1, 4, 5, 2], [1, 5, 4, 2]]).astype('float32') self.n = 1 @@ -154,7 +160,6 @@ class TestDiffOpAxis(TestDiffOp): class TestDiffOpNDim(TestDiffOp): - def set_args(self): self.input = np.random.rand(10, 10).astype('float32') self.n = 1 @@ -164,7 +169,6 @@ class TestDiffOpNDim(TestDiffOp): class TestDiffOpBool(TestDiffOp): - def set_args(self): self.input = np.array([0, 1, 1, 0, 1, 0]).astype('bool') self.n = 1 @@ -174,7 +178,6 @@ class TestDiffOpBool(TestDiffOp): class TestDiffOpPrepend(TestDiffOp): - def set_args(self): self.input = np.array([[1, 4, 5, 2], [1, 5, 4, 2]]).astype('float32') self.n = 1 @@ -184,18 +187,17 @@ class TestDiffOpPrepend(TestDiffOp): class TestDiffOpPrependAxis(TestDiffOp): - def set_args(self): self.input = np.array([[1, 4, 5, 2], [1, 5, 4, 2]]).astype('float32') self.n = 1 self.axis = 0 - self.prepend = np.array([[0, 2, 3, 4], [1, 3, 5, 7], - [2, 5, 8, 0]]).astype('float32') + self.prepend = np.array( + [[0, 2, 3, 4], [1, 3, 5, 7], [2, 5, 8, 0]] + ).astype('float32') self.append = None class TestDiffOpAppend(TestDiffOp): - def set_args(self): self.input = np.array([[1, 4, 5, 2], [1, 5, 4, 2]]).astype('float32') self.n = 1 @@ -205,7 +207,6 @@ class TestDiffOpAppend(TestDiffOp): class TestDiffOpAppendAxis(TestDiffOp): - def set_args(self): self.input = np.array([[1, 4, 5, 2], [1, 5, 4, 2]]).astype('float32') self.n = 1 @@ -215,7 +216,6 @@ class TestDiffOpAppendAxis(TestDiffOp): class TestDiffOpPreAppend(TestDiffOp): - def set_args(self): self.input = np.array([[1, 4, 5, 2], [1, 5, 4, 2]]).astype('float32') self.n = 1 @@ -225,7 +225,6 @@ class TestDiffOpPreAppend(TestDiffOp): class TestDiffOpPreAppendAxis(TestDiffOp): - def set_args(self): self.input = np.array([[1, 4, 5, 2], [1, 5, 4, 2]]).astype('float32') self.n = 1 diff --git a/python/paddle/fluid/tests/unittests/test_digamma_op.py b/python/paddle/fluid/tests/unittests/test_digamma_op.py index 546744485b084c511b77842848798a808f3757c5..ff9e2b182759a9121d4f2e4735eb2cbf8d6d9399 100644 --- a/python/paddle/fluid/tests/unittests/test_digamma_op.py +++ b/python/paddle/fluid/tests/unittests/test_digamma_op.py @@ -23,7 +23,6 @@ from paddle.fluid.framework import _test_eager_guard class TestDigammaOp(OpTest): - def setUp(self): # switch to static paddle.enable_static() @@ -49,7 +48,6 @@ class TestDigammaOp(OpTest): class TestDigammaOpFp32(TestDigammaOp): - def init_dtype_type(self): self.dtype = np.float32 @@ -58,7 +56,6 @@ class TestDigammaOpFp32(TestDigammaOp): class TestDigammaAPI(unittest.TestCase): - def setUp(self): # switch to static paddle.enable_static() @@ -70,7 +67,6 @@ class TestDigammaAPI(unittest.TestCase): self._shape = [8, 3, 32, 32] def test_in_static_mode(self): - def init_input_output(dtype): input = np.random.random(self._shape).astype(dtype) return {'x': input}, psi(input) diff --git a/python/paddle/fluid/tests/unittests/test_directory_migration.py b/python/paddle/fluid/tests/unittests/test_directory_migration.py index 46a2c53a73112d82cb12a60f45585e3a6c0fa4b8..408ad42379e6204dae573a86c5ea5e8fa045685a 100644 --- a/python/paddle/fluid/tests/unittests/test_directory_migration.py +++ b/python/paddle/fluid/tests/unittests/test_directory_migration.py @@ -20,7 +20,6 @@ import unittest class TestDirectory(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -38,14 +37,25 @@ class TestDirectory(unittest.TestCase): def test_new_directory(self): new_directory = [ - 'paddle.enable_static', 'paddle.disable_static', - 'paddle.in_dynamic_mode', 'paddle.to_tensor', 'paddle.grad', - 'paddle.no_grad', 'paddle.static.save', 'paddle.static.load', - 'paddle.distributed.ParallelEnv', 'paddle.DataParallel', - 'paddle.jit', 'paddle.jit.TracedLayer', 'paddle.jit.to_static', - 'paddle.jit.ProgramTranslator', 'paddle.jit.TranslatedLayer', - 'paddle.jit.save', 'paddle.jit.load', - 'paddle.optimizer.lr.LRScheduler', 'paddle.optimizer.lr.NoamDecay', + 'paddle.enable_static', + 'paddle.disable_static', + 'paddle.in_dynamic_mode', + 'paddle.to_tensor', + 'paddle.grad', + 'paddle.no_grad', + 'paddle.static.save', + 'paddle.static.load', + 'paddle.distributed.ParallelEnv', + 'paddle.DataParallel', + 'paddle.jit', + 'paddle.jit.TracedLayer', + 'paddle.jit.to_static', + 'paddle.jit.ProgramTranslator', + 'paddle.jit.TranslatedLayer', + 'paddle.jit.save', + 'paddle.jit.load', + 'paddle.optimizer.lr.LRScheduler', + 'paddle.optimizer.lr.NoamDecay', 'paddle.optimizer.lr.PiecewiseDecay', 'paddle.optimizer.lr.NaturalExpDecay', 'paddle.optimizer.lr.ExponentialDecay', @@ -53,30 +63,47 @@ class TestDirectory(unittest.TestCase): 'paddle.optimizer.lr.PolynomialDecay', 'paddle.optimizer.lr.CosineAnnealingDecay', 'paddle.optimizer.lr.MultiStepDecay', - 'paddle.optimizer.lr.StepDecay', 'paddle.optimizer.lr.LambdaDecay', + 'paddle.optimizer.lr.StepDecay', + 'paddle.optimizer.lr.LambdaDecay', 'paddle.optimizer.lr.ReduceOnPlateau', - 'paddle.optimizer.lr.LinearWarmup', 'paddle.static.Executor', - 'paddle.static.global_scope', 'paddle.static.scope_guard', - 'paddle.static.append_backward', 'paddle.static.gradients', - 'paddle.static.BuildStrategy', 'paddle.static.CompiledProgram', + 'paddle.optimizer.lr.LinearWarmup', + 'paddle.static.Executor', + 'paddle.static.global_scope', + 'paddle.static.scope_guard', + 'paddle.static.append_backward', + 'paddle.static.gradients', + 'paddle.static.BuildStrategy', + 'paddle.static.CompiledProgram', 'paddle.static.ExecutionStrategy', 'paddle.static.default_main_program', - 'paddle.static.default_startup_program', 'paddle.static.Program', - 'paddle.static.name_scope', 'paddle.static.program_guard', - 'paddle.static.Print', 'paddle.static.py_func', + 'paddle.static.default_startup_program', + 'paddle.static.Program', + 'paddle.static.name_scope', + 'paddle.static.program_guard', + 'paddle.static.Print', + 'paddle.static.py_func', 'paddle.static.ParallelExecutor', - 'paddle.static.WeightNormParamAttr', 'paddle.static.nn.fc', + 'paddle.static.WeightNormParamAttr', + 'paddle.static.nn.fc', 'paddle.static.nn.batch_norm', 'paddle.static.nn.bilinear_tensor_product', - 'paddle.static.nn.conv2d', 'paddle.static.nn.conv2d_transpose', - 'paddle.static.nn.conv3d', 'paddle.static.nn.conv3d_transpose', + 'paddle.static.nn.conv2d', + 'paddle.static.nn.conv2d_transpose', + 'paddle.static.nn.conv3d', + 'paddle.static.nn.conv3d_transpose', 'paddle.static.nn.create_parameter', - 'paddle.static.nn.crf_decoding', 'paddle.static.nn.data_norm', - 'paddle.static.nn.deform_conv2d', 'paddle.static.nn.group_norm', - 'paddle.static.nn.instance_norm', 'paddle.static.nn.layer_norm', - 'paddle.static.nn.multi_box_head', 'paddle.static.nn.nce', - 'paddle.static.nn.prelu', 'paddle.static.nn.row_conv', - 'paddle.static.nn.spectral_norm', 'paddle.static.nn.embedding' + 'paddle.static.nn.crf_decoding', + 'paddle.static.nn.data_norm', + 'paddle.static.nn.deform_conv2d', + 'paddle.static.nn.group_norm', + 'paddle.static.nn.instance_norm', + 'paddle.static.nn.layer_norm', + 'paddle.static.nn.multi_box_head', + 'paddle.static.nn.nce', + 'paddle.static.nn.prelu', + 'paddle.static.nn.row_conv', + 'paddle.static.nn.spectral_norm', + 'paddle.static.nn.embedding', ] import_file = os.path.join(self.temp_dir.name, 'run_import_modules.py') @@ -89,58 +116,89 @@ class TestDirectory(unittest.TestCase): _python = sys.executable ps_cmd = "{} {}".format(_python, import_file) - ps_proc = subprocess.Popen(ps_cmd.strip().split(" "), - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + ps_proc = subprocess.Popen( + ps_cmd.strip().split(" "), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) stdout, stderr = ps_proc.communicate() - self.assertFalse("Error" in str(stderr), - "ErrorMessage:\n{}".format(bytes.decode(stderr))) + self.assertFalse( + "Error" in str(stderr), + "ErrorMessage:\n{}".format(bytes.decode(stderr)), + ) def test_old_directory(self): old_directory = [ - 'paddle.enable_imperative', 'paddle.disable_imperative', - 'paddle.in_imperative_mode', 'paddle.imperative.to_variable', - 'paddle.imperative.enable', 'paddle.imperative.guard', - 'paddle.imperative.grad', 'paddle.imperative.no_grad', - 'paddle.imperative.save', 'paddle.imperative.load', + 'paddle.enable_imperative', + 'paddle.disable_imperative', + 'paddle.in_imperative_mode', + 'paddle.imperative.to_variable', + 'paddle.imperative.enable', + 'paddle.imperative.guard', + 'paddle.imperative.grad', + 'paddle.imperative.no_grad', + 'paddle.imperative.save', + 'paddle.imperative.load', 'paddle.imperative.ParallelEnv', 'paddle.imperative.prepare_context', - 'paddle.imperative.DataParalell', 'paddle.imperative.jit', - 'paddle.imperative.TracedLayer', 'paddle.imperative.declarative', + 'paddle.imperative.DataParalell', + 'paddle.imperative.jit', + 'paddle.imperative.TracedLayer', + 'paddle.imperative.declarative', 'paddle.imperative.ProgramTranslator', - 'paddle.imperative.TranslatedLayer', 'paddle.imperative.jit.save', - 'paddle.imperative.jit.load', 'paddle.imperative.NoamDecay' - 'paddle.imperative.PiecewiseDecay', + 'paddle.imperative.TranslatedLayer', + 'paddle.imperative.jit.save', + 'paddle.imperative.jit.load', + 'paddle.imperative.NoamDecay' 'paddle.imperative.PiecewiseDecay', 'paddle.imperative.NaturalExpDecay', 'paddle.imperative.ExponentialDecay', 'paddle.imperative.InverseTimeDecay', 'paddle.imperative.PolynomialDecay', - 'paddle.imperative.CosineDecay', 'paddle.Executor', - 'paddle.global_scope', 'paddle.scope_guard', - 'paddle.append_backward', 'paddle.gradients', - 'paddle.BuildStrategy', 'paddle.CompiledProgram', - 'paddle.ExecutionStrategy', 'paddle.name_scope', - 'paddle.program_guard', 'paddle.Print', 'paddle.py_func', - 'paddle.ParallelExecutor', 'paddle.default_main_program', - 'paddle.default_startup_program', 'paddle.Program', - 'paddle.WeightNormParamAttr', 'paddle.declarative.fc', + 'paddle.imperative.CosineDecay', + 'paddle.Executor', + 'paddle.global_scope', + 'paddle.scope_guard', + 'paddle.append_backward', + 'paddle.gradients', + 'paddle.BuildStrategy', + 'paddle.CompiledProgram', + 'paddle.ExecutionStrategy', + 'paddle.name_scope', + 'paddle.program_guard', + 'paddle.Print', + 'paddle.py_func', + 'paddle.ParallelExecutor', + 'paddle.default_main_program', + 'paddle.default_startup_program', + 'paddle.Program', + 'paddle.WeightNormParamAttr', + 'paddle.declarative.fc', 'paddle.declarative.batch_norm', 'paddle.declarative.bilinear_tensor_product', - 'paddle.declarative.conv2d', 'paddle.declarative.conv2d_transpose', - 'paddle.declarative.conv3d', 'paddle.declarative.conv3d_transpose', + 'paddle.declarative.conv2d', + 'paddle.declarative.conv2d_transpose', + 'paddle.declarative.conv3d', + 'paddle.declarative.conv3d_transpose', 'paddle.declarative.create_parameter', - 'paddle.declarative.crf_decoding', 'paddle.declarative.data_norm', + 'paddle.declarative.crf_decoding', + 'paddle.declarative.data_norm', 'paddle.declarative.deformable_conv', - 'paddle.declarative.group_norm', 'paddle.declarative.hsigmoid', - 'paddle.declarative.instance_norm', 'paddle.declarative.layer_norm', - 'paddle.declarative.multi_box_head', 'paddle.declarative.nce', - 'paddle.declarative.prelu', 'paddle.declarative.row_conv', - 'paddle.declarative.spectral_norm', 'paddle.declarative.embedding' + 'paddle.declarative.group_norm', + 'paddle.declarative.hsigmoid', + 'paddle.declarative.instance_norm', + 'paddle.declarative.layer_norm', + 'paddle.declarative.multi_box_head', + 'paddle.declarative.nce', + 'paddle.declarative.prelu', + 'paddle.declarative.row_conv', + 'paddle.declarative.spectral_norm', + 'paddle.declarative.embedding', ] - import_file = os.path.join(self.temp_dir.name, - 'run_old_import_modules.py') + import_file = os.path.join( + self.temp_dir.name, 'run_old_import_modules.py' + ) with open(import_file, "w") as wb: cmd_context_count = """ @@ -159,22 +217,26 @@ else: err_module = "{module}" """ cmd_context_loop = cmd_context_loop_template.format( - run_cmd=run_cmd, module=module) + run_cmd=run_cmd, module=module + ) wb.write(cmd_context_loop) cmd_context_print_template = """ if count != {len_old_directory}: print("Error: Module " + err_module + " should not be imported") """ cmd_context_print = cmd_context_print_template.format( - len_old_directory=str(len(old_directory))) + len_old_directory=str(len(old_directory)) + ) wb.write(cmd_context_print) _python = sys.executable ps_cmd = "{} {}".format(_python, import_file) - ps_proc = subprocess.Popen(ps_cmd.strip().split(" "), - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + ps_proc = subprocess.Popen( + ps_cmd.strip().split(" "), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) stdout, stderr = ps_proc.communicate() self.assertFalse("Error" in str(stdout), bytes.decode(stdout)) diff --git a/python/paddle/fluid/tests/unittests/test_disable_signal_handler.py b/python/paddle/fluid/tests/unittests/test_disable_signal_handler.py index 50867ad8b0e03457d36a2f437545f084810f4b6b..f0de021eaadf0ef6f43f0a4ecb2793b72117afb0 100644 --- a/python/paddle/fluid/tests/unittests/test_disable_signal_handler.py +++ b/python/paddle/fluid/tests/unittests/test_disable_signal_handler.py @@ -17,21 +17,27 @@ import signal import subprocess SignalsToTest = { - signal.SIGTERM, signal.SIGBUS, signal.SIGABRT, signal.SIGSEGV, - signal.SIGILL, signal.SIGFPE + signal.SIGTERM, + signal.SIGBUS, + signal.SIGABRT, + signal.SIGSEGV, + signal.SIGILL, + signal.SIGFPE, } class TestSignOpError(unittest.TestCase): - def test_errors(self): try: for sig in SignalsToTest: - output = subprocess.check_output([ - "python", "-c", - f"import paddle; import signal,os; paddle.disable_signal_handler(); os.kill(os.getpid(), {sig})" - ], - stderr=subprocess.STDOUT) + output = subprocess.check_output( + [ + "python", + "-c", + f"import paddle; import signal,os; paddle.disable_signal_handler(); os.kill(os.getpid(), {sig})", + ], + stderr=subprocess.STDOUT, + ) except Exception as e: # If paddle signal handler is enabled # One would expect "paddle::framework::SignalHandle" in STDERR diff --git a/python/paddle/fluid/tests/unittests/test_dist_allreduce_op.py b/python/paddle/fluid/tests/unittests/test_dist_allreduce_op.py index af2c8f1aee7104f55f6be0ec00c3001590127bc3..c2469ab92b7e5e90f44ea7f02365f73f94b8573d 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_allreduce_op.py +++ b/python/paddle/fluid/tests/unittests/test_dist_allreduce_op.py @@ -20,7 +20,6 @@ paddle.enable_static() class TestDistMnistNCCL2(TestDistBase): - def _setup_config(self): self._sync_mode = True self._use_reduce = False @@ -30,6 +29,7 @@ class TestDistMnistNCCL2(TestDistBase): def test_dist_train(self): import paddle.fluid as fluid + if fluid.core.is_compiled_with_cuda(): self.check_with_place("dist_allreduce_op.py", delta=1e-5) diff --git a/python/paddle/fluid/tests/unittests/test_dist_base.py b/python/paddle/fluid/tests/unittests/test_dist_base.py index d5287270769549064204ec96e8d37f0803ca47b3..4c109feaef2358659ec9ab7de37147db376d6dee 100755 --- a/python/paddle/fluid/tests/unittests/test_dist_base.py +++ b/python/paddle/fluid/tests/unittests/test_dist_base.py @@ -53,26 +53,30 @@ def eprint(*args, **kwargs): class TestDistRunnerBase(object): - - def get_model(self, - batch_size=DEFAULT_BATCH_SIZE, - lr=0.1, - single_device=False, - use_dgc=False, - dist_strategy=None): + def get_model( + self, + batch_size=DEFAULT_BATCH_SIZE, + lr=0.1, + single_device=False, + use_dgc=False, + dist_strategy=None, + ): raise NotImplementedError( - "get_model should be implemented by child classes.") + "get_model should be implemented by child classes." + ) @staticmethod - def get_transpiler(trainer_id, - main_program, - pserver_endpoints, - trainers, - sync_mode, - dc_asgd=False, - current_endpoint=None, - nccl_comm_num=1, - hogwild_mode=False): + def get_transpiler( + trainer_id, + main_program, + pserver_endpoints, + trainers, + sync_mode, + dc_asgd=False, + current_endpoint=None, + nccl_comm_num=1, + hogwild_mode=False, + ): # NOTE: import fluid until runtime, or else forking processes will cause error. config = fluid.DistributeTranspilerConfig() config.enable_dc_asgd = dc_asgd @@ -83,12 +87,14 @@ class TestDistRunnerBase(object): config.nccl_comm_num = nccl_comm_num # config.runtime_split_send_recv = True t = fluid.DistributeTranspiler(config=config) - t.transpile(trainer_id=trainer_id, - program=main_program, - pservers=pserver_endpoints, - trainers=trainers, - sync_mode=sync_mode, - current_endpoint=current_endpoint) + t.transpile( + trainer_id=trainer_id, + program=main_program, + pservers=pserver_endpoints, + trainers=trainers, + sync_mode=sync_mode, + current_endpoint=current_endpoint, + ) return t @staticmethod @@ -96,6 +102,7 @@ class TestDistRunnerBase(object): lr_sheduler = None if hasattr(program, 'lr_sheduler'): from paddle.optimizer.lr import LRScheduler + lr_sheduler = program.lr_sheduler assert isinstance(lr_sheduler, LRScheduler), "must be LRScheduler" return lr_sheduler @@ -105,16 +112,19 @@ class TestDistRunnerBase(object): self.get_model(batch_size=args.batch_size) # NOTE: pserver should not call memory optimize - t = self.get_transpiler(trainer_id=args.trainer_id, - main_program=fluid.default_main_program(), - pserver_endpoints=args.endpoints, - trainers=args.trainers, - sync_mode=args.sync_mode, - dc_asgd=args.dc_asgd, - hogwild_mode=args.hogwild) + t = self.get_transpiler( + trainer_id=args.trainer_id, + main_program=fluid.default_main_program(), + pserver_endpoints=args.endpoints, + trainers=args.trainers, + sync_mode=args.sync_mode, + dc_asgd=args.dc_asgd, + hogwild_mode=args.hogwild, + ) pserver_prog = t.get_pserver_program(args.current_endpoint) - startup_prog = t.get_startup_program(args.current_endpoint, - pserver_prog) + startup_prog = t.get_startup_program( + args.current_endpoint, pserver_prog + ) place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -127,8 +137,17 @@ class TestDistRunnerBase(object): self.lr = args.lr dist_strategy = DistributedStrategy() - test_program, avg_cost, train_reader, test_reader, batch_acc, predict, data_loader = \ - self.get_model(batch_size=args.batch_size, dist_strategy=dist_strategy) + ( + test_program, + avg_cost, + train_reader, + test_reader, + batch_acc, + predict, + data_loader, + ) = self.get_model( + batch_size=args.batch_size, dist_strategy=dist_strategy + ) device_id = int(os.getenv("FLAGS_selected_gpus", "0")) eprint(type(self).__name__, "device_id: %d." % device_id) @@ -169,8 +188,14 @@ class TestDistRunnerBase(object): self.lr = args.lr print_to_err("use_fleet 2.0", "fleet.node_num:") - test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \ - self.get_model(batch_size=args.batch_size) + ( + test_program, + avg_cost, + train_reader, + test_reader, + batch_acc, + predict, + ) = self.get_model(batch_size=args.batch_size) if fluid.core.is_compiled_with_cuda(): device_id = int(os.getenv("FLAGS_selected_gpus", "0")) @@ -188,8 +213,8 @@ class TestDistRunnerBase(object): eprint(type(self).__name__, "run worker startup program done.") feed_var_list = [ - var for var in - fluid.default_main_program().global_block().vars.values() + var + for var in fluid.default_main_program().global_block().vars.values() if var.is_data ] @@ -203,8 +228,10 @@ class TestDistRunnerBase(object): def get_data(): origin_batch = next(reader_generator) - if paddle.distributed.get_world_size( - ) == 1 and args.update_method == 'gloo': # Gloo single mode + if ( + paddle.distributed.get_world_size() == 1 + and args.update_method == 'gloo' + ): # Gloo single mode return origin_batch elif args.update_method != "local" and args.use_reader_alloc: @@ -219,9 +246,11 @@ class TestDistRunnerBase(object): print_to_err(type(self).__name__, "begin to train on trainer") out_losses = [] for i in range(RUN_STEP): - loss, = exe.run(fluid.default_main_program(), - fetch_list=[avg_cost.name], - feed=feeder.feed(get_data())) + (loss,) = exe.run( + fluid.default_main_program(), + fetch_list=[avg_cost.name], + feed=feeder.feed(get_data()), + ) out_losses.append(loss[0]) print_to_err(type(self).__name__, "run step %d finished" % i) print_to_err(type(self).__name__, "trainer run finished") @@ -254,8 +283,16 @@ class TestDistRunnerBase(object): # "fleet.node_id:", fleet.node_id(), # "fleet.trainer_num:", fleet.worker_num()) - test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \ - self.get_model(batch_size=args.batch_size, dist_strategy=dist_strategy) + ( + test_program, + avg_cost, + train_reader, + test_reader, + batch_acc, + predict, + ) = self.get_model( + batch_size=args.batch_size, dist_strategy=dist_strategy + ) trainer_prog = fleet._origin_program dist_prog = fleet.main_program @@ -276,7 +313,8 @@ class TestDistRunnerBase(object): eprint(type(self).__name__, "run worker startup program done.") feed_var_list = [ - var for var in trainer_prog.global_block().vars.values() + var + for var in trainer_prog.global_block().vars.values() if var.is_data ] @@ -304,9 +342,11 @@ class TestDistRunnerBase(object): print_to_err(type(self).__name__, "begin to train on trainer") out_losses = [] for i in range(RUN_STEP): - loss, = exe.run(dist_prog, - fetch_list=[avg_cost.name], - feed=feeder.feed(get_data())) + (loss,) = exe.run( + dist_prog, + fetch_list=[avg_cost.name], + feed=feeder.feed(get_data()), + ) out_losses.append(loss[0]) print_to_err(type(self).__name__, "run step %d finished" % i) print_to_err(type(self).__name__, "trainer run finished") @@ -316,86 +356,131 @@ class TestDistRunnerBase(object): if args.save_model: model_save_dir = "/tmp" if fleet.worker_index() == 0: - model_save_dir_fluid = os.path.join(model_save_dir, - "fluid_persistables") - model_save_dir_fleet = os.path.join(model_save_dir, - "fleet_persistables") - infer_save_dir_fluid = os.path.join(model_save_dir, - "fluid_infer") - infer_save_dir_fleet = os.path.join(model_save_dir, - "fleet_infer") + model_save_dir_fluid = os.path.join( + model_save_dir, "fluid_persistables" + ) + model_save_dir_fleet = os.path.join( + model_save_dir, "fleet_persistables" + ) + infer_save_dir_fluid = os.path.join( + model_save_dir, "fluid_infer" + ) + infer_save_dir_fleet = os.path.join( + model_save_dir, "fleet_infer" + ) else: - model_save_dir_fluid = os.path.join(model_save_dir, - "fluid_persistables_2") - model_save_dir_fleet = os.path.join(model_save_dir, - "fleet_persistables_2") - infer_save_dir_fluid = os.path.join(model_save_dir, - "fluid_infer_2") - infer_save_dir_fleet = os.path.join(model_save_dir, - "fleet_infer_2") - fluid.io.save_persistables(exe, model_save_dir_fluid, - fleet._origin_program) + model_save_dir_fluid = os.path.join( + model_save_dir, "fluid_persistables_2" + ) + model_save_dir_fleet = os.path.join( + model_save_dir, "fleet_persistables_2" + ) + infer_save_dir_fluid = os.path.join( + model_save_dir, "fluid_infer_2" + ) + infer_save_dir_fleet = os.path.join( + model_save_dir, "fleet_infer_2" + ) + fluid.io.save_persistables( + exe, model_save_dir_fluid, fleet._origin_program + ) fleet.save_persistables(executor=exe, dirname=model_save_dir_fleet) feeded_var_names = [var.name for var in feed_var_list] - fluid.io.save_inference_model(infer_save_dir_fluid, - feeded_var_names, [avg_cost], exe, - fleet._origin_program) - fleet.save_inference_model(exe, infer_save_dir_fleet, - feeded_var_names, [avg_cost]) + fluid.io.save_inference_model( + infer_save_dir_fluid, + feeded_var_names, + [avg_cost], + exe, + fleet._origin_program, + ) + fleet.save_inference_model( + exe, infer_save_dir_fleet, feeded_var_names, [avg_cost] + ) def run_trainer(self, args): self.lr = args.lr if args.nccl2_reduce_layer_local_run: - test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \ - self.get_model(batch_size=args.batch_size, single_device=True) + ( + test_program, + avg_cost, + train_reader, + test_reader, + batch_acc, + predict, + ) = self.get_model(batch_size=args.batch_size, single_device=True) elif args.use_dgc: - test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \ - self.get_model(batch_size=args.batch_size, use_dgc=args.use_dgc) + ( + test_program, + avg_cost, + train_reader, + test_reader, + batch_acc, + predict, + ) = self.get_model(batch_size=args.batch_size, use_dgc=args.use_dgc) else: - test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \ - self.get_model(batch_size=args.batch_size) + ( + test_program, + avg_cost, + train_reader, + test_reader, + batch_acc, + predict, + ) = self.get_model(batch_size=args.batch_size) if args.update_method == "pserver": print_to_err( type(self).__name__, - "begin to run transpile on trainer with pserver mode") - t = self.get_transpiler(trainer_id=args.trainer_id, - main_program=fluid.default_main_program(), - pserver_endpoints=args.endpoints, - trainers=args.trainers, - sync_mode=args.sync_mode, - dc_asgd=args.dc_asgd, - hogwild_mode=args.hogwild) + "begin to run transpile on trainer with pserver mode", + ) + t = self.get_transpiler( + trainer_id=args.trainer_id, + main_program=fluid.default_main_program(), + pserver_endpoints=args.endpoints, + trainers=args.trainers, + sync_mode=args.sync_mode, + dc_asgd=args.dc_asgd, + hogwild_mode=args.hogwild, + ) trainer_prog = t.get_trainer_program() print_to_err( type(self).__name__, - "get trainer program done with pserver mode.") - elif args.update_method == "nccl2" or args.update_method == "nccl2_reduce_layer": + "get trainer program done with pserver mode.", + ) + elif ( + args.update_method == "nccl2" + or args.update_method == "nccl2_reduce_layer" + ): # transpile for nccl2 config = fluid.DistributeTranspilerConfig() config.mode = "nccl2" config.nccl_comm_num = args.nccl_comm_num if args.use_hallreduce: config.use_hierarchical_allreduce = True - config.hierarchical_allreduce_inter_nranks = args.hallreduce_inter_nranks + config.hierarchical_allreduce_inter_nranks = ( + args.hallreduce_inter_nranks + ) print_to_err( type(self).__name__, - "begin to run transpile on trainer with nccl2 mode") + "begin to run transpile on trainer with nccl2 mode", + ) nccl2_t = fluid.DistributeTranspiler(config=config) - nccl2_t.transpile(args.trainer_id, - program=fluid.default_main_program(), - startup_program=fluid.default_startup_program(), - trainers=args.endpoints, - current_endpoint=args.current_endpoint) + nccl2_t.transpile( + args.trainer_id, + program=fluid.default_main_program(), + startup_program=fluid.default_startup_program(), + trainers=args.endpoints, + current_endpoint=args.current_endpoint, + ) print_to_err( - type(self).__name__, - "get trainer program done. with nccl2 mode") + type(self).__name__, "get trainer program done. with nccl2 mode" + ) trainer_prog = fluid.default_main_program() else: print_to_err( type(self).__name__, - "do nothing about main program, just use it") + "do nothing about main program, just use it", + ) trainer_prog = fluid.default_main_program() print_to_err(type(self).__name__, "use main program done.") @@ -431,9 +516,13 @@ class TestDistRunnerBase(object): build_stra.enable_backward_optimizer_op_deps = True if args.use_reduce: - build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce + build_stra.reduce_strategy = ( + fluid.BuildStrategy.ReduceStrategy.Reduce + ) else: - build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce + build_stra.reduce_strategy = ( + fluid.BuildStrategy.ReduceStrategy.AllReduce + ) pass_builder = None if args.batch_merge_repeat > 1: @@ -441,7 +530,10 @@ class TestDistRunnerBase(object): mypass = pass_builder.insert_pass(0, "multi_batch_merge_pass") mypass.set("num_repeats", args.batch_merge_repeat) - if args.update_method == "nccl2" or args.update_method == "nccl2_reduce_layer": + if ( + args.update_method == "nccl2" + or args.update_method == "nccl2_reduce_layer" + ): build_stra.num_trainers = len(args.endpoints.split(",")) build_stra.trainer_id = args.trainer_id else: @@ -453,11 +545,13 @@ class TestDistRunnerBase(object): binary = compiler.CompiledProgram(trainer_prog).with_data_parallel( loss_name=avg_cost.name, build_strategy=build_stra, - exec_strategy=exec_strategy) + exec_strategy=exec_strategy, + ) print_to_err(type(self).__name__, "program compiled with data parallel") feed_var_list = [ - var for var in trainer_prog.global_block().vars.values() + var + for var in trainer_prog.global_block().vars.values() if var.is_data ] @@ -479,9 +573,9 @@ class TestDistRunnerBase(object): print_to_err(type(self).__name__, "begin to train on trainer") out_losses = [] for i in range(RUN_STEP): - loss, = exe.run(binary, - fetch_list=[avg_cost.name], - feed=feeder.feed(get_data())) + (loss,) = exe.run( + binary, fetch_list=[avg_cost.name], feed=feeder.feed(get_data()) + ) out_losses.append(loss[0]) print_to_err(type(self).__name__, "run step %d finished" % i) if lr_scheduler is not None: @@ -493,18 +587,21 @@ class TestDistRunnerBase(object): class TestParallelDyGraphRunnerBase(object): - def get_model(self): raise NotImplementedError( - "get_model should be implemented by child classes.") + "get_model should be implemented by child classes." + ) def run_one_loop(self, model, opt, data): raise NotImplementedError( - "train_one_loop should be implemented by the child classes.") + "train_one_loop should be implemented by the child classes." + ) def _get_data(self, batch, args): - if paddle.distributed.get_world_size( - ) == 1 and args.update_method == 'gloo': # Gloo single mode + if ( + paddle.distributed.get_world_size() == 1 + and args.update_method == 'gloo' + ): # Gloo single mode return batch elif args.update_method != "local": new_batch = [] @@ -514,8 +611,9 @@ class TestParallelDyGraphRunnerBase(object): # the second rank will get [3,4,5]. # this function is for test sparse_embedding_differ_length if hasattr(args, "diff_batch") and args.diff_batch: - assert len( - batch) > 2, "in differ_batch mode, len(batch) must > 2." + assert ( + len(batch) > 2 + ), "in differ_batch mode, len(batch) must > 2." if paddle.distributed.get_rank() == 0: new_batch.append(batch[0]) elif paddle.distributed.get_rank() == 1: @@ -550,19 +648,25 @@ class TestParallelDyGraphRunnerBase(object): device_id = int(os.getenv("FLAGS_selected_mlus", "0")) place = fluid.MLUPlace(device_id) else: - assert ("Only support CUDAPlace or XPUPlace or CPU(Gloo) for now.") + assert "Only support CUDAPlace or XPUPlace or CPU(Gloo) for now." with fluid.dygraph.guard(place): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed np.random.seed(seed) import random + random.seed(seed) model, train_reader, opt = self.get_model() nranks = len(args.endpoints.split(",")) if args.endpoints else 1 - #if args.update_method == "nccl2": - if args.update_method == "nccl2" or args.update_method == "bkcl" or args.update_method == "hccl" or args.update_method == "cncl": + # if args.update_method == "nccl2": + if ( + args.update_method == "nccl2" + or args.update_method == "bkcl" + or args.update_method == "hccl" + or args.update_method == "cncl" + ): strategy = dygraph.parallel.ParallelStrategy() strategy.nranks = nranks strategy.local_rank = args.trainer_id @@ -571,24 +675,29 @@ class TestParallelDyGraphRunnerBase(object): paddle.distributed.init_parallel_env() print_to_err( type(self).__name__, - "begin to prepare context in dygraph with nccl2") + "begin to prepare context in dygraph with nccl2", + ) dygraph.parallel.prepare_context(strategy) if not args.find_unused_parameters: model = dygraph.parallel.DataParallel( - model, strategy, find_unused_parameters=False) + model, strategy, find_unused_parameters=False + ) else: model = dygraph.parallel.DataParallel( - model, strategy, find_unused_parameters=True) + model, strategy, find_unused_parameters=True + ) print_to_err(type(self).__name__, "model built in dygraph") elif args.update_method == "gloo": paddle.distributed.init_parallel_env() if not args.find_unused_parameters: model = dygraph.parallel.DataParallel( - model, find_unused_parameters=False) + model, find_unused_parameters=False + ) else: model = dygraph.parallel.DataParallel( - model, find_unused_parameters=True) + model, find_unused_parameters=True + ) out_losses = [] print_to_err(type(self).__name__, "begin to run dygraph training") @@ -600,7 +709,8 @@ class TestParallelDyGraphRunnerBase(object): if step_id % 10 == 0: print_to_err( type(self).__name__, - "loss at step %d: %f" % (step_id, loss.numpy())) + "loss at step %d: %f" % (step_id, loss.numpy()), + ) out_losses.append(loss.numpy()) loss.backward() @@ -632,7 +742,8 @@ class TestParallelDyGraphRunnerBase(object): model, train_reader, opt = self.get_model() if args.update_method in ["nccl2", "gloo"]: model = paddle.DataParallel( - model, find_unused_parameters=args.find_unused_parameters) + model, find_unused_parameters=args.find_unused_parameters + ) out_losses = [] for step_id, data in enumerate(train_reader()): @@ -650,6 +761,7 @@ class TestParallelDyGraphRunnerBase(object): def run_use_fleet_api_trainer(self, args): import paddle.distributed.fleet as fleet + # 1. enable dygraph paddle.disable_static() @@ -696,18 +808,25 @@ class TestParallelDyGraphRunnerBase(object): def runtime_main(test_class): parser = argparse.ArgumentParser(description='Run dist test.') - parser.add_argument('--role', - type=str, - required=True, - choices=['pserver', 'trainer']) + parser.add_argument( + '--role', type=str, required=True, choices=['pserver', 'trainer'] + ) parser.add_argument('--endpoints', type=str, required=False, default="") - parser.add_argument('--update_method', - type=str, - default="local", - choices=[ - "pserver", "nccl2", "bkcl", "local", - "nccl2_reduce_layer", "gloo", "hccl", "cncl" - ]) + parser.add_argument( + '--update_method', + type=str, + default="local", + choices=[ + "pserver", + "nccl2", + "bkcl", + "local", + "nccl2_reduce_layer", + "gloo", + "hccl", + "cncl", + ], + ) parser.add_argument('--trainer_id', type=int, required=False, default=0) parser.add_argument('--trainers', type=int, required=False, default=1) parser.add_argument('--nccl_comm_num', type=int, required=False, default=1) @@ -719,14 +838,12 @@ def runtime_main(test_class): parser.add_argument('--use_local_sgd', action='store_true') parser.add_argument('--diff_batch', action='store_true') parser.add_argument('--ut4grad_allreduce', action='store_true') - parser.add_argument('--hallreduce_inter_nranks', - type=int, - required=False, - default=2) - parser.add_argument('--current_endpoint', - type=str, - required=False, - default="") + parser.add_argument( + '--hallreduce_inter_nranks', type=int, required=False, default=2 + ) + parser.add_argument( + '--current_endpoint', type=str, required=False, default="" + ) parser.add_argument('--sync_mode', action='store_true') parser.add_argument('--use_cuda', action='store_true') parser.add_argument('--use_cpu', action='store_true') @@ -740,24 +857,24 @@ def runtime_main(test_class): parser.add_argument('--dc_asgd', action='store_true') parser.add_argument('--hogwild', action='store_true') parser.add_argument('--save_model', action='store_true') - parser.add_argument('--use_reader_alloc', - action='store_true', - required=False) + parser.add_argument( + '--use_reader_alloc', action='store_true', required=False + ) parser.add_argument('--batch_size', required=False, type=int, default=2) parser.add_argument('--lr', required=False, type=float, default=0.001) - parser.add_argument('--batch_merge_repeat', - required=False, - type=int, - default=1) - parser.add_argument('--nccl2_reduce_layer_local_run', - required=False, - type=bool, - default=False) + parser.add_argument( + '--batch_merge_repeat', required=False, type=int, default=1 + ) + parser.add_argument( + '--nccl2_reduce_layer_local_run', + required=False, + type=bool, + default=False, + ) parser.add_argument('--sync_batch_norm', action='store_true') - parser.add_argument('--fuse_all_reduce', - required=False, - type=ast.literal_eval, - default=None) + parser.add_argument( + '--fuse_all_reduce', required=False, type=ast.literal_eval, default=None + ) args = parser.parse_args() @@ -782,7 +899,6 @@ from contextlib import closing class TestDistBase(unittest.TestCase): - def _setup_config(self): raise NotImplementedError("tests should have _setup_config implemented") @@ -872,10 +988,14 @@ class TestDistBase(unittest.TestCase): if DIST_UT_PORT == 0: self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % ( - self._find_free_port(), self._find_free_port()) + self._find_free_port(), + self._find_free_port(), + ) else: self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % ( - DIST_UT_PORT, DIST_UT_PORT + 1) + DIST_UT_PORT, + DIST_UT_PORT + 1, + ) DIST_UT_PORT += 2 self._dist_port = DIST_UT_PORT @@ -887,13 +1007,14 @@ class TestDistBase(unittest.TestCase): self.temp_dir.cleanup() def _find_free_port(self): - def __free_port(): - with closing(socket.socket(socket.AF_INET, - socket.SOCK_STREAM)) as s: + with closing( + socket.socket(socket.AF_INET, socket.SOCK_STREAM) + ) as s: s.bind(('', 0)) print_to_err( - type(self).__name__, "socket name: %s" % s.getsockname()[1]) + type(self).__name__, "socket name: %s" % s.getsockname()[1] + ) return s.getsockname()[1] while True: @@ -902,11 +1023,9 @@ class TestDistBase(unittest.TestCase): self._port_set.add(port) return port - def start_pserver(self, - model_file, - check_error_log, - required_envs, - log_name=""): + def start_pserver( + self, model_file, check_error_log, required_envs, log_name="" + ): ps0_ep, ps1_ep = self._ps_endpoints.split(",") ps_cmd = "%s" @@ -916,12 +1035,20 @@ class TestDistBase(unittest.TestCase): ps_cmd += " %s --role pserver --endpoints %s --trainer_id 0 --current_endpoint %s --trainers %d --update_method pserver" - ps0_cmd = ps_cmd % \ - (self._python_interp, model_file, self._ps_endpoints, ps0_ep, - self._trainers) - ps1_cmd = ps_cmd % \ - (self._python_interp, model_file, self._ps_endpoints, ps1_ep, - self._trainers) + ps0_cmd = ps_cmd % ( + self._python_interp, + model_file, + self._ps_endpoints, + ps0_ep, + self._trainers, + ) + ps1_cmd = ps_cmd % ( + self._python_interp, + model_file, + self._ps_endpoints, + ps1_ep, + self._trainers, + ) if self._sync_mode: ps0_cmd += " --sync_mode" @@ -935,26 +1062,32 @@ class TestDistBase(unittest.TestCase): ps1_pipe = open(path1, "wb") print_to_err(type(self).__name__, "going to start pserver process 0") - ps0_proc = subprocess.Popen(ps0_cmd.strip().split(" "), - stdout=subprocess.PIPE, - stderr=ps0_pipe, - env=required_envs) + ps0_proc = subprocess.Popen( + ps0_cmd.strip().split(" "), + stdout=subprocess.PIPE, + stderr=ps0_pipe, + env=required_envs, + ) print_to_err(type(self).__name__, "going to start pserver process 1") - ps1_proc = subprocess.Popen(ps1_cmd.strip().split(" "), - stdout=subprocess.PIPE, - stderr=ps1_pipe, - env=required_envs) + ps1_proc = subprocess.Popen( + ps1_cmd.strip().split(" "), + stdout=subprocess.PIPE, + stderr=ps1_pipe, + env=required_envs, + ) return ps0_proc, ps1_proc, ps0_pipe, ps1_pipe - def _run_local(self, - model, - envs, - check_error_log=False, - batch_size=DEFAULT_BATCH_SIZE, - batch_merge_repeat=1, - log_name="", - devices="1"): + def _run_local( + self, + model, + envs, + check_error_log=False, + batch_size=DEFAULT_BATCH_SIZE, + batch_merge_repeat=1, + log_name="", + devices="1", + ): cmd = self._python_interp @@ -962,8 +1095,10 @@ class TestDistBase(unittest.TestCase): envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '') cmd += " -m coverage run --branch -p" - cmd += " %s --role trainer --update_method local --lr %f" % (model, - self._lr) + cmd += " %s --role trainer --update_method local --lr %f" % ( + model, + self._lr, + ) if batch_size != DEFAULT_BATCH_SIZE: cmd += " --batch_size %d" % batch_size @@ -977,21 +1112,21 @@ class TestDistBase(unittest.TestCase): env_local = { "CUDA_VISIBLE_DEVICES": devices, "PADDLE_TRAINERS_NUM": "1", - "PADDLE_TRAINER_ID": "0" + "PADDLE_TRAINER_ID": "0", } elif self.__use_xpu: cmd += " --use_xpu" env_local = { "FLAGS_selected_xpus": devices, "PADDLE_TRAINERS_NUM": "1", - "PADDLE_TRAINER_ID": "0" + "PADDLE_TRAINER_ID": "0", } elif self.__use_npu: cmd += " --use_npu" env_local = { "FLAGS_selected_npus": devices, "PADDLE_TRAINERS_NUM": "1", - "PADDLE_TRAINER_ID": "0" + "PADDLE_TRAINER_ID": "0", } else: env_local = {'CPU_NUM': '1'} @@ -1012,15 +1147,19 @@ class TestDistBase(unittest.TestCase): if check_error_log: path = os.path.join(self.temp_dir.name, log_name + "_local.log") err_log = open(path, "wb") - local_proc = subprocess.Popen(cmd.split(" "), - stdout=subprocess.PIPE, - stderr=err_log, - env=env_local) + local_proc = subprocess.Popen( + cmd.split(" "), + stdout=subprocess.PIPE, + stderr=err_log, + env=env_local, + ) else: - local_proc = subprocess.Popen(cmd.split(" "), - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - env=env_local) + local_proc = subprocess.Popen( + cmd.split(" "), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=env_local, + ) local_out, local_err = local_proc.communicate() @@ -1032,27 +1171,29 @@ class TestDistBase(unittest.TestCase): return pickle.loads(local_out) - def _run_local_gloo(self, - model, - envs, - check_error_log=False, - batch_size=DEFAULT_BATCH_SIZE, - batch_merge_repeat=1, - log_name="", - devices="0"): + def _run_local_gloo( + self, + model, + envs, + check_error_log=False, + batch_size=DEFAULT_BATCH_SIZE, + batch_merge_repeat=1, + log_name="", + devices="0", + ): saved_endpoints = self._ps_endpoints self._ps_endpoints = self._ps_endpoints.split(',')[0] - result = self._run_cluster_gloo(model, envs, 'gloo', check_error_log, - log_name) + result = self._run_cluster_gloo( + model, envs, 'gloo', check_error_log, log_name + ) self._ps_endpoints = saved_endpoints return result def _run_cluster(self, model, envs, check_error_log, log_name): # Run dist train to compare with local results - ps0, ps1, ps0_pipe, ps1_pipe = self.start_pserver(model, - check_error_log, - envs, - log_name=log_name) + ps0, ps1, ps0_pipe, ps1_pipe = self.start_pserver( + model, check_error_log, envs, log_name=log_name + ) ps0_ep, ps1_ep = self._ps_endpoints.split(",") @@ -1064,12 +1205,24 @@ class TestDistBase(unittest.TestCase): tr_cmd += " %s --role trainer --endpoints %s --trainer_id %d --current_endpoint %s --trainers %d --update_method pserver --lr %f" - tr0_cmd = tr_cmd % \ - (self._python_interp, model, self._ps_endpoints, - 0, ps0_ep, self._trainers, self._lr) - tr1_cmd = tr_cmd % \ - (self._python_interp, model, self._ps_endpoints, - 1, ps1_ep, self._trainers, self._lr) + tr0_cmd = tr_cmd % ( + self._python_interp, + model, + self._ps_endpoints, + 0, + ps0_ep, + self._trainers, + self._lr, + ) + tr1_cmd = tr_cmd % ( + self._python_interp, + model, + self._ps_endpoints, + 1, + ps1_ep, + self._trainers, + self._lr, + ) if self._sync_mode: tr0_cmd += " --sync_mode" @@ -1104,15 +1257,19 @@ class TestDistBase(unittest.TestCase): tr1_pipe = open(path1, "wb") print_to_err(type(self).__name__, "going to start trainer process 0") - tr0_proc = subprocess.Popen(tr0_cmd.strip().split(" "), - stdout=subprocess.PIPE, - stderr=tr0_pipe, - env=env0) + tr0_proc = subprocess.Popen( + tr0_cmd.strip().split(" "), + stdout=subprocess.PIPE, + stderr=tr0_pipe, + env=env0, + ) print_to_err(type(self).__name__, "going to start trainer process 1") - tr1_proc = subprocess.Popen(tr1_cmd.strip().split(" "), - stdout=subprocess.PIPE, - stderr=tr1_pipe, - env=env1) + tr1_proc = subprocess.Popen( + tr1_cmd.strip().split(" "), + stdout=subprocess.PIPE, + stderr=tr1_pipe, + env=env1, + ) # Wait until trainer process terminate while True: @@ -1140,8 +1297,9 @@ class TestDistBase(unittest.TestCase): return pickle.loads(tr0_out), pickle.loads(tr1_out) - def _get_gloo_trainer_cmd(self, model, ep, update_method, trainer_id, - trainer_num): + def _get_gloo_trainer_cmd( + self, model, ep, update_method, trainer_id, trainer_num + ): env = {} tr_cmd = "%s -u" @@ -1150,16 +1308,22 @@ class TestDistBase(unittest.TestCase): tr_cmd += " %s --role trainer --endpoints %s --trainer_id %d --current_endpoint %s --update_method %s --lr %f" - tr_cmd = tr_cmd % \ - (self._python_interp, model, self._ps_endpoints, - trainer_id, ep, update_method, self._lr) + tr_cmd = tr_cmd % ( + self._python_interp, + model, + self._ps_endpoints, + trainer_id, + ep, + update_method, + self._lr, + ) if self._use_reduce: tr_cmd += " --use_reduce" if self._use_reader_alloc: tr_cmd += " --use_reader_alloc" - #assert self._use_reduce == False, "gloo not support _use_reduce" - #assert self._use_reader_alloc == False, "gloo not support _use_reduce" + # assert self._use_reduce == False, "gloo not support _use_reduce" + # assert self._use_reader_alloc == False, "gloo not support _use_reduce" if self._save_model: tr_cmd += " --save_model" if self._diff_batch: @@ -1169,15 +1333,17 @@ class TestDistBase(unittest.TestCase): assert self.__use_cuda == False, "gloo not support use cuda" assert self.__use_xpu == False, "gloo not support use xpu" tr_cmd += " --use_cpu" - env.update({ - "PADDLE_TRAINERS_NUM": "{}".format(trainer_num), - "PADDLE_TRAINER_ID": "{}".format(trainer_id), - "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints, - "PADDLE_CURRENT_ENDPOINT": ep, - "PADDLE_CURRENT_ENDPOINT": ep, - "PADDLE_DISTRI_BACKEND": "gloo", - "GLOG_v": "2", - }) + env.update( + { + "PADDLE_TRAINERS_NUM": "{}".format(trainer_num), + "PADDLE_TRAINER_ID": "{}".format(trainer_id), + "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints, + "PADDLE_CURRENT_ENDPOINT": ep, + "PADDLE_CURRENT_ENDPOINT": ep, + "PADDLE_DISTRI_BACKEND": "gloo", + "GLOG_v": "2", + } + ) assert self._use_dgc == False, "gloo not support use dgc" @@ -1199,8 +1365,9 @@ class TestDistBase(unittest.TestCase): assert self._use_fleet_api_20 == False, "gloo not support use fleet api" return tr_cmd, env - def _get_nccl2_trainer_cmd(self, model, ep, update_method, trainer_id, - trainer_num): + def _get_nccl2_trainer_cmd( + self, model, ep, update_method, trainer_id, trainer_num + ): env = {} tr_cmd = "%s -u" @@ -1209,9 +1376,15 @@ class TestDistBase(unittest.TestCase): tr_cmd += " %s --role trainer --endpoints %s --trainer_id %d --current_endpoint %s --update_method %s --lr %f" - tr_cmd = tr_cmd % \ - (self._python_interp, model, self._ps_endpoints, - trainer_id, ep, update_method, self._lr) + tr_cmd = tr_cmd % ( + self._python_interp, + model, + self._ps_endpoints, + trainer_id, + ep, + update_method, + self._lr, + ) if self._use_reduce: tr_cmd += " --use_reduce" @@ -1221,47 +1394,55 @@ class TestDistBase(unittest.TestCase): tr_cmd += " --save_model" if self.__use_cuda: tr_cmd += " --use_cuda" - env.update({ - "FLAGS_selected_gpus": "{}".format(0), - "CUDA_VISIBLE_DEVICES": "{}".format(trainer_id), - "PADDLE_TRAINERS_NUM": "{}".format(trainer_num), - "PADDLE_TRAINER_ID": "{}".format(trainer_id), - "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints, - "PADDLE_CURRENT_ENDPOINT": ep, - }) + env.update( + { + "FLAGS_selected_gpus": "{}".format(0), + "CUDA_VISIBLE_DEVICES": "{}".format(trainer_id), + "PADDLE_TRAINERS_NUM": "{}".format(trainer_num), + "PADDLE_TRAINER_ID": "{}".format(trainer_id), + "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints, + "PADDLE_CURRENT_ENDPOINT": ep, + } + ) # TODO(liuyuhui):XPU_VISIBLE_DEVICES is not working right now, # will update it after Badiu Kunlun partners' support. elif self.__use_xpu: tr_cmd += " --use_xpu" - env.update({ - "FLAGS_selected_xpus": "{}".format(trainer_id), - #"XPU_VISIBLE_DEVICES": "{}".format(trainer_id + 1), - "PADDLE_TRAINERS_NUM": "{}".format(trainer_num), - "PADDLE_TRAINER_ID": "{}".format(trainer_id), - "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints, - "PADDLE_CURRENT_ENDPOINT": ep, - "GLOG_v": "2", - }) + env.update( + { + "FLAGS_selected_xpus": "{}".format(trainer_id), + # "XPU_VISIBLE_DEVICES": "{}".format(trainer_id + 1), + "PADDLE_TRAINERS_NUM": "{}".format(trainer_num), + "PADDLE_TRAINER_ID": "{}".format(trainer_id), + "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints, + "PADDLE_CURRENT_ENDPOINT": ep, + "GLOG_v": "2", + } + ) elif self.__use_npu: tr_cmd += " --use_npu" - env.update({ - "FLAGS_selected_npus": "{}".format(trainer_id), - "PADDLE_TRAINERS_NUM": "{}".format(trainer_num), - "PADDLE_TRAINER_ID": "{}".format(trainer_id), - "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints, - "PADDLE_CURRENT_ENDPOINT": ep, - "GLOG_v": "2", - }) + env.update( + { + "FLAGS_selected_npus": "{}".format(trainer_id), + "PADDLE_TRAINERS_NUM": "{}".format(trainer_num), + "PADDLE_TRAINER_ID": "{}".format(trainer_id), + "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints, + "PADDLE_CURRENT_ENDPOINT": ep, + "GLOG_v": "2", + } + ) elif self._use_mlu: tr_cmd += " --use_mlu" - env.update({ - "FLAGS_selected_mlus": "{}".format(trainer_id), - "PADDLE_TRAINERS_NUM": "{}".format(trainer_num), - "PADDLE_TRAINER_ID": "{}".format(trainer_id), - "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints, - "PADDLE_CURRENT_ENDPOINT": ep, - "GLOG_v": "4", - }) + env.update( + { + "FLAGS_selected_mlus": "{}".format(trainer_id), + "PADDLE_TRAINERS_NUM": "{}".format(trainer_num), + "PADDLE_TRAINER_ID": "{}".format(trainer_id), + "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints, + "PADDLE_CURRENT_ENDPOINT": ep, + "GLOG_v": "4", + } + ) else: env.update({'CPU_NUM': '1'}) @@ -1292,7 +1473,11 @@ class TestDistBase(unittest.TestCase): tr_cmd += " --fuse_all_reduce {}".format(self._fuse_all_reduce) if self._use_fleet_api: - tr_cmd += " --use_fleet_api_20" if self._use_fleet_api_20 else " --use_fleet_api" + tr_cmd += ( + " --use_fleet_api_20" + if self._use_fleet_api_20 + else " --use_fleet_api" + ) if self._use_local_sgd: tr_cmd += " --use_local_sgd" if self._ut4grad_allreduce: @@ -1305,10 +1490,16 @@ class TestDistBase(unittest.TestCase): return tr_cmd, env - def _run_cluster_gloo(self, model, envs, update_method, check_error_log, - log_name): - assert update_method == "gloo", "_run_cluster_gloo must have update_method: gloo, but get %s" % update_method - assert not self._use_hallreduce, "_run_cluster_gloo must have _use_hallreduce = false" + def _run_cluster_gloo( + self, model, envs, update_method, check_error_log, log_name + ): + assert update_method == "gloo", ( + "_run_cluster_gloo must have update_method: gloo, but get %s" + % update_method + ) + assert ( + not self._use_hallreduce + ), "_run_cluster_gloo must have _use_hallreduce = false" worker_endpoints = self._ps_endpoints.split(",") @@ -1317,27 +1508,33 @@ class TestDistBase(unittest.TestCase): procs = [] pipes = [] for i in range(0, trainer_num): - tr_cmd, tr_env = self._get_gloo_trainer_cmd(model, - worker_endpoints[i], - update_method, i, - trainer_num) + tr_cmd, tr_env = self._get_gloo_trainer_cmd( + model, worker_endpoints[i], update_method, i, trainer_num + ) tr_env.update(envs) tr_env["GLOG_vmodule"] = 'gloo_context=4' tr_env["GLOG_v"] = '3' - print("use_hallreduce:{} tr_cmd:{}, env: {}".format( - self._use_hallreduce, tr_cmd, tr_env)) + print( + "use_hallreduce:{} tr_cmd:{}, env: {}".format( + self._use_hallreduce, tr_cmd, tr_env + ) + ) - path = os.path.join(self.temp_dir.name, - log_name + "_tr{}_err.log".format(i)) + path = os.path.join( + self.temp_dir.name, log_name + "_tr{}_err.log".format(i) + ) tr_pipe = open(path, "wb") print_to_err( type(self).__name__, - "going to start process {} with nccl2".format(i)) - tr_proc = subprocess.Popen(tr_cmd.strip().split(" "), - stdout=subprocess.PIPE, - stderr=tr_pipe, - env=tr_env) + "going to start process {} with nccl2".format(i), + ) + tr_proc = subprocess.Popen( + tr_cmd.strip().split(" "), + stdout=subprocess.PIPE, + stderr=tr_pipe, + env=tr_env, + ) procs.append(tr_proc) pipes.append(tr_pipe) @@ -1350,7 +1547,8 @@ class TestDistBase(unittest.TestCase): sys.stderr.write('trainer {} stderr: {}\n'.format(i, tr_err)) if trainer_num == 1: - if check_error_log: print("outs[0]:", outs[0]) + if check_error_log: + print("outs[0]:", outs[0]) return pickle.loads(outs[0]) else: @@ -1359,8 +1557,9 @@ class TestDistBase(unittest.TestCase): print("outs[1]:", outs[1]) return pickle.loads(outs[0]), pickle.loads(outs[1]) - def _run_cluster_nccl2(self, model, envs, update_method, check_error_log, - log_name): + def _run_cluster_nccl2( + self, model, envs, update_method, check_error_log, log_name + ): if self._use_hallreduce: self._ps_endpoints = "" @@ -1369,7 +1568,8 @@ class TestDistBase(unittest.TestCase): # NOTE(wangxi). hallreduce test must use 4cards after nccl>=2.7 for i in range(0, 4): self._ps_endpoints += "127.0.0.1:%s," % ( - self._find_free_port()) + self._find_free_port() + ) else: for i in range(0, 4): self._ps_endpoints += "127.0.0.1:%s," % (DIST_UT_PORT + i) @@ -1385,22 +1585,30 @@ class TestDistBase(unittest.TestCase): pipes = [] for i in range(0, trainer_num): tr_cmd, tr_env = self._get_nccl2_trainer_cmd( - model, worker_endpoints[i], update_method, i, trainer_num) + model, worker_endpoints[i], update_method, i, trainer_num + ) tr_env.update(envs) - print("use_hallreduce:{} tr_cmd:{}, env: {}".format( - self._use_hallreduce, tr_cmd, tr_env)) + print( + "use_hallreduce:{} tr_cmd:{}, env: {}".format( + self._use_hallreduce, tr_cmd, tr_env + ) + ) - path = os.path.join(self.temp_dir.name, - log_name + "_tr{}_err.log".format(i)) + path = os.path.join( + self.temp_dir.name, log_name + "_tr{}_err.log".format(i) + ) tr_pipe = open(path, "wb") print_to_err( type(self).__name__, - "going to start process {} with nccl2".format(i)) - tr_proc = subprocess.Popen(tr_cmd.strip().split(" "), - stdout=subprocess.PIPE, - stderr=tr_pipe, - env=tr_env) + "going to start process {} with nccl2".format(i), + ) + tr_proc = subprocess.Popen( + tr_cmd.strip().split(" "), + stdout=subprocess.PIPE, + stderr=tr_pipe, + env=tr_env, + ) procs.append(tr_proc) pipes.append(tr_pipe) @@ -1429,7 +1637,8 @@ class TestDistBase(unittest.TestCase): pipes = [] for i in range(0, trainer_num): tr_cmd, tr_env = self._get_nccl2_trainer_cmd( - model, worker_endpoints[i], update_method, i, trainer_num) + model, worker_endpoints[i], update_method, i, trainer_num + ) tr_env.update(envs) tr_env['CUDA_VISIBLE_DEVICES'] = "0,1" tr_env['NCCL_SHM_DISABLE'] = '1' @@ -1442,11 +1651,14 @@ class TestDistBase(unittest.TestCase): print_to_err( type(self).__name__, - "going to start process {} with nccl2".format(i)) - tr_proc = subprocess.Popen(tr_cmd.strip().split(" "), - stdout=subprocess.PIPE, - stderr=tr_pipe, - env=tr_env) + "going to start process {} with nccl2".format(i), + ) + tr_proc = subprocess.Popen( + tr_cmd.strip().split(" "), + stdout=subprocess.PIPE, + stderr=tr_pipe, + env=tr_env, + ) procs.append(tr_proc) pipes.append(tr_pipe) @@ -1477,67 +1689,79 @@ class TestDistBase(unittest.TestCase): "http_proxy": "", "NCCL_P2P_DISABLE": "1", "NCCL_SHM_DISABLE": "1", - "FLAGS_CONVERT_GRAPH_TO_PROGRAM": "1" + "FLAGS_CONVERT_GRAPH_TO_PROGRAM": "1", } if check_error_log: - required_envs["GLOG_vmodule"] = \ - "fused_all_reduce_op_handle=10,all_reduce_op_handle=10,alloc_continuous_space_op=10,fuse_all_reduce_op_pass=10," \ - "alloc_continuous_space_for_grad_pass=10,fast_threaded_ssa_graph_executor=10,executor=10,operator=10," \ - "sparse_all_reduce_op_handle=10,gen_nccl_id_op=10,gen_nccl_id_op_help=10,nccl_helper=10,grpc_client=10," \ + required_envs["GLOG_vmodule"] = ( + "fused_all_reduce_op_handle=10,all_reduce_op_handle=10,alloc_continuous_space_op=10,fuse_all_reduce_op_pass=10," + "alloc_continuous_space_for_grad_pass=10,fast_threaded_ssa_graph_executor=10,executor=10,operator=10," + "sparse_all_reduce_op_handle=10,gen_nccl_id_op=10,gen_nccl_id_op_help=10,nccl_helper=10,grpc_client=10," "grpc_server=10,request_handler_impl=10,section_worker=10" + ) required_envs["GLOG_logtostderr"] = "1" if os.getenv('NVIDIA_TF32_OVERRIDE', '') is not None: required_envs['NVIDIA_TF32_OVERRIDE'] = os.getenv( - 'NVIDIA_TF32_OVERRIDE', '') + 'NVIDIA_TF32_OVERRIDE', '' + ) required_envs.update(need_envs) return required_envs - def check_with_place(self, - model_file, - delta=1e-3, - check_error_log=False, - need_envs={}, - log_name=""): + def check_with_place( + self, + model_file, + delta=1e-3, + check_error_log=False, + need_envs={}, + log_name="", + ): if self._dygraph and (self._gloo_mode or self._nccl2_mode): need_envs.update({"FLAGS_enable_eager_mode": "1"}) with _test_eager_guard(): - self.check_with_place_func(model_file=model_file, - delta=delta, - check_error_log=check_error_log, - need_envs=need_envs, - log_name=log_name) + self.check_with_place_func( + model_file=model_file, + delta=delta, + check_error_log=check_error_log, + need_envs=need_envs, + log_name=log_name, + ) need_envs.update({"FLAGS_enable_eager_mode": "0"}) - self.check_with_place_func(model_file=model_file, - delta=delta, - check_error_log=check_error_log, - need_envs=need_envs, - log_name=log_name) + self.check_with_place_func( + model_file=model_file, + delta=delta, + check_error_log=check_error_log, + need_envs=need_envs, + log_name=log_name, + ) else: - self.check_with_place_func(model_file=model_file, - delta=delta, - check_error_log=check_error_log, - need_envs=need_envs, - log_name=log_name) - - def check_with_place_func(self, - model_file, - delta=1e-3, - check_error_log=False, - need_envs={}, - log_name=""): + self.check_with_place_func( + model_file=model_file, + delta=delta, + check_error_log=check_error_log, + need_envs=need_envs, + log_name=log_name, + ) + + def check_with_place_func( + self, + model_file, + delta=1e-3, + check_error_log=False, + need_envs={}, + log_name="", + ): required_envs = self._get_required_envs(check_error_log, need_envs) if self._gloo_mode: - local_losses \ - = self._run_local_gloo(model_file, required_envs, - check_error_log, log_name=log_name) + local_losses = self._run_local_gloo( + model_file, required_envs, check_error_log, log_name=log_name + ) else: - local_losses \ - = self._run_local(model_file, required_envs, - check_error_log, log_name=log_name) + local_losses = self._run_local( + model_file, required_envs, check_error_log, log_name=log_name + ) if self._nccl2_mode: if self._nccl2_reduce_layer: @@ -1546,21 +1770,24 @@ class TestDistBase(unittest.TestCase): required_envs, update_method="nccl2_reduce_layer", check_error_log=check_error_log, - log_name=log_name) + log_name=log_name, + ) else: tr0_losses, tr1_losses = self._run_cluster_nccl2( model_file, required_envs, update_method='nccl2', check_error_log=check_error_log, - log_name=log_name) + log_name=log_name, + ) elif self._bkcl_mode: tr0_losses, tr1_losses = self._run_cluster_nccl2( model_file, required_envs, update_method='bkcl', check_error_log=check_error_log, - log_name=log_name) + log_name=log_name, + ) elif self._gloo_mode: # gloo mode, cpu only parallel train @xiongkun03 tr0_losses, tr1_losses = self._run_cluster_gloo( @@ -1568,31 +1795,32 @@ class TestDistBase(unittest.TestCase): required_envs, update_method='gloo', check_error_log=check_error_log, - log_name=log_name) + log_name=log_name, + ) elif self._hccl_mode: tr0_losses, tr1_losses = self._run_cluster_nccl2( model_file, required_envs, update_method='hccl', check_error_log=check_error_log, - log_name=log_name) + log_name=log_name, + ) elif self._cncl_mode: tr0_losses, tr1_losses = self._run_cluster_nccl2( model_file, required_envs, update_method='cncl', check_error_log=check_error_log, - log_name=log_name) + log_name=log_name, + ) elif self._pipeline_mode: - tr0_losses, tr1_losses = self._run_pipeline(model_file, - required_envs, - check_error_log, - log_name=log_name) + tr0_losses, tr1_losses = self._run_pipeline( + model_file, required_envs, check_error_log, log_name=log_name + ) else: - tr0_losses, tr1_losses = self._run_cluster(model_file, - required_envs, - check_error_log, - log_name=log_name) + tr0_losses, tr1_losses = self._run_cluster( + model_file, required_envs, check_error_log, log_name=log_name + ) for step_id in range(RUN_STEP): local_loss = local_losses[step_id] @@ -1605,12 +1833,14 @@ class TestDistBase(unittest.TestCase): print("=======", local_loss, ":", dist_loss[0], "=======") self.assertAlmostEqual(local_loss, dist_loss[0], delta=delta) - def check_with_place_multi_cards(self, - model_file, - delta=1e-3, - check_error_log=False, - need_envs={}, - log_name=""): + def check_with_place_multi_cards( + self, + model_file, + delta=1e-3, + check_error_log=False, + need_envs={}, + log_name="", + ): # need open p2p or shm otherwise multi cards mode will hang need_envs.update({"NCCL_P2P_DISABLE": "0", "NCCL_SHM_DISABLE": "0"}) @@ -1618,19 +1848,22 @@ class TestDistBase(unittest.TestCase): required_envs = self._get_required_envs(check_error_log, need_envs) if self._use_dgc: - multi_cards_losses = self._run_local(model_file, - required_envs, - check_error_log, - log_name=log_name + - "_dgc_2cards", - devices="0,1") + multi_cards_losses = self._run_local( + model_file, + required_envs, + check_error_log, + log_name=log_name + "_dgc_2cards", + devices="0,1", + ) self._use_dgc = False - base_losses = self._run_local(model_file, - required_envs, - check_error_log, - log_name=log_name + "_base_2cards", - devices="0,1") + base_losses = self._run_local( + model_file, + required_envs, + check_error_log, + log_name=log_name + "_base_2cards", + devices="0,1", + ) self._use_dgc = True diff --git a/python/paddle/fluid/tests/unittests/test_dist_dygraph_apis.py b/python/paddle/fluid/tests/unittests/test_dist_dygraph_apis.py index a99d8221fe19e2b7f461642e5698232a50b93c02..aa5e1e93064fb94545ca3bfe3337167efe4e7216 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_dygraph_apis.py +++ b/python/paddle/fluid/tests/unittests/test_dist_dygraph_apis.py @@ -17,7 +17,6 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus class TestDygraphFleetApi(TestMultipleGpus): - def test_dygraph_fleet_api(self): self.run_mnist_2gpu('dygraph_fleet_api.py') diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_async.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_async.py index b269e61ec7842d3292a55962aa35d1685cb737b6..4aaf596d5709031e086277c521c30af9ad5a6b56 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_async.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_async.py @@ -22,7 +22,6 @@ paddle.enable_static() class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): - def setUp(self): os.environ["PADDLE_PSERVER_NUMS"] = "2" os.environ["PADDLE_TRAINERS_NUM"] = "2" @@ -30,8 +29,9 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): os.environ["PADDLE_PORT"] = "36001" os.environ["PADDLE_TRAINER_ID"] = "0" os.environ["PADDLE_TRAINERS_NUM"] = "2" - os.environ["PADDLE_PSERVERS_IP_PORT_LIST"] = \ - "127.0.0.1:36001,127.0.0.2:36001" + os.environ[ + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:36001,127.0.0.2:36001" def test_a_sync_optimizer_trainer(self): os.environ["TRAINING_ROLE"] = "TRAINER" diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto.py index f0cf60296cace12dac2606d381aefcbfb891d6f4..523170367fb3ce992e0b065a76fd186c01b478fa 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto.py @@ -21,7 +21,6 @@ paddle.enable_static() class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): - def setUp(self): os.environ["PADDLE_PSERVER_NUMS"] = "2" os.environ["PADDLE_TRAINERS_NUM"] = "2" @@ -29,8 +28,9 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): os.environ["PADDLE_PORT"] = "36001" os.environ["PADDLE_TRAINER_ID"] = "0" os.environ["PADDLE_TRAINERS_NUM"] = "2" - os.environ["PADDLE_PSERVERS_IP_PORT_LIST"] = \ - "127.0.0.1:36001,127.0.0.2:36001" + os.environ[ + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:36001,127.0.0.2:36001" def test_a_sync_optimizer1(self): os.environ["TRAINING_ROLE"] = "TRAINER" @@ -43,16 +43,17 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): paddle.fluid.framework.switch_startup_program(startup_program) fleet.init(role_maker.PaddleCloudRoleMaker()) - input_x = paddle.fluid.layers.data(name="x", - shape=[32], - dtype='float32') + input_x = paddle.fluid.layers.data( + name="x", shape=[32], dtype='float32' + ) input_y = paddle.fluid.layers.data(name="y", shape=[1], dtype='int64') fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh') fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') prediction = paddle.fluid.layers.fc(input=[fc_2], size=2, act='softmax') - cost = paddle.fluid.layers.cross_entropy(input=prediction, - label=input_y) + cost = paddle.fluid.layers.cross_entropy( + input=prediction, label=input_y + ) avg_cost = paddle.mean(x=cost) os.environ["FLAGS_LAUNCH_BARRIER"] = "0" diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto_async.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto_async.py index dded4e7c7ba206e15ae1137bfb85a6e68359d0e4..a51b01209b1d7ab80abc8ea481a5cd6529a85e15 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto_async.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto_async.py @@ -23,7 +23,6 @@ paddle.enable_static() class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): - def setUp(self): os.environ["PADDLE_PSERVER_NUMS"] = "2" os.environ["PADDLE_TRAINERS_NUM"] = "2" @@ -31,8 +30,9 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): os.environ["PADDLE_PORT"] = "36001" os.environ["PADDLE_TRAINER_ID"] = "0" os.environ["PADDLE_TRAINERS_NUM"] = "2" - os.environ["PADDLE_PSERVERS_IP_PORT_LIST"] = \ - "127.0.0.1:36001,127.0.0.2:36001" + os.environ[ + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:36001,127.0.0.2:36001" def test_a_sync_optimizer3(self): os.environ["TRAINING_ROLE"] = "TRAINER" @@ -45,26 +45,31 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): paddle.fluid.framework.switch_startup_program(startup_program) fleet.init(role_maker.PaddleCloudRoleMaker()) - input_x = paddle.fluid.layers.data(name="x", - shape=[-1, 1], - dtype="int64", - lod_level=1, - append_batch_size=False) + input_x = paddle.fluid.layers.data( + name="x", + shape=[-1, 1], + dtype="int64", + lod_level=1, + append_batch_size=False, + ) x_embedding = paddle.fluid.layers.embedding( is_distributed=False, input=input_x, size=[1000000000, 100000], param_attr=paddle.fluid.ParamAttr( name="embedding", - initializer=paddle.fluid.initializer.Constant(value=0.01)), - is_sparse=True) + initializer=paddle.fluid.initializer.Constant(value=0.01), + ), + is_sparse=True, + ) input_y = paddle.fluid.layers.data(name="y", shape=[1], dtype='int64') fc_1 = paddle.fluid.layers.fc(input=x_embedding, size=64, act='tanh') fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') prediction = paddle.fluid.layers.fc(input=[fc_2], size=2, act='softmax') - cost = paddle.fluid.layers.cross_entropy(input=prediction, - label=input_y) + cost = paddle.fluid.layers.cross_entropy( + input=prediction, label=input_y + ) avg_cost = paddle.mean(x=cost) os.environ["FLAGS_LAUNCH_BARRIER"] = "0" diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto_geo.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto_geo.py index 035506607e3116adbb34ba33b74afb98043ab3ba..e6f618b63a6582defc2984b77df0ae9006c8c8b6 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto_geo.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto_geo.py @@ -22,7 +22,6 @@ paddle.enable_static() class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): - def setUp(self): os.environ["PADDLE_PSERVER_NUMS"] = "2" os.environ["PADDLE_TRAINERS_NUM"] = "2" @@ -30,8 +29,9 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): os.environ["PADDLE_PORT"] = "36001" os.environ["PADDLE_TRAINER_ID"] = "0" os.environ["PADDLE_TRAINERS_NUM"] = "2" - os.environ["PADDLE_PSERVERS_IP_PORT_LIST"] = \ - "127.0.0.1:36001,127.0.0.2:36001" + os.environ[ + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:36001,127.0.0.2:36001" def test_a_sync_optimizer2(self): os.environ["TRAINING_ROLE"] = "TRAINER" @@ -48,15 +48,16 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): input_x = paddle.fluid.layers.data(name="x", shape=[1], dtype='int64') input_y = paddle.fluid.layers.data(name="y", shape=[1], dtype='int64') - emb = paddle.fluid.layers.embedding(input=input_x, - size=[100, 10], - is_sparse=True) + emb = paddle.fluid.layers.embedding( + input=input_x, size=[100, 10], is_sparse=True + ) fc_1 = paddle.fluid.layers.fc(input=emb, size=64, act='tanh') fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') prediction = paddle.fluid.layers.fc(input=[fc_2], size=2, act='softmax') - cost = paddle.fluid.layers.cross_entropy(input=prediction, - label=input_y) + cost = paddle.fluid.layers.cross_entropy( + input=prediction, label=input_y + ) avg_cost = paddle.mean(x=cost) os.environ["FLAGS_LAUNCH_BARRIER"] = "0" strategy = paddle.distributed.fleet.DistributedStrategy() diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_geo.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_geo.py index 45d3b315bcde5bd03a64f6f6db0e4a2da762a732..05abc40a1d04184712df28a8c1c85d0e73c1a3ec 100755 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_geo.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_geo.py @@ -21,7 +21,6 @@ paddle.enable_static() class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): - def setUp(self): os.environ["PADDLE_PSERVER_NUMS"] = "2" os.environ["PADDLE_TRAINERS_NUM"] = "2" @@ -29,8 +28,9 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): os.environ["PADDLE_PORT"] = "36001" os.environ["PADDLE_TRAINER_ID"] = "0" os.environ["PADDLE_TRAINERS_NUM"] = "2" - os.environ["PADDLE_PSERVERS_IP_PORT_LIST"] = \ - "127.0.0.1:36001,127.0.0.2:36001" + os.environ[ + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:36001,127.0.0.2:36001" def test_a_sync_optimizer_trainer(self): os.environ["TRAINING_ROLE"] = "TRAINER" @@ -43,16 +43,17 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): paddle.fluid.framework.switch_startup_program(startup_program) fleet.init(role_maker.PaddleCloudRoleMaker()) - input_x = paddle.fluid.layers.data(name="x", - shape=[32], - dtype='float32') + input_x = paddle.fluid.layers.data( + name="x", shape=[32], dtype='float32' + ) input_y = paddle.fluid.layers.data(name="y", shape=[1], dtype='int64') fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh') fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') prediction = paddle.fluid.layers.fc(input=[fc_2], size=2, act='softmax') - cost = paddle.fluid.layers.cross_entropy(input=prediction, - label=input_y) + cost = paddle.fluid.layers.cross_entropy( + input=prediction, label=input_y + ) avg_cost = paddle.mean(x=cost) strategy = paddle.distributed.fleet.DistributedStrategy() @@ -75,16 +76,17 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): paddle.fluid.framework.switch_startup_program(startup_program) fleet.init(role_maker.PaddleCloudRoleMaker()) - input_x = paddle.fluid.layers.data(name="x", - shape=[32], - dtype='float32') + input_x = paddle.fluid.layers.data( + name="x", shape=[32], dtype='float32' + ) input_y = paddle.fluid.layers.data(name="y", shape=[1], dtype='int64') fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh') fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') prediction = paddle.fluid.layers.fc(input=[fc_2], size=2, act='softmax') - cost = paddle.fluid.layers.cross_entropy(input=prediction, - label=input_y) + cost = paddle.fluid.layers.cross_entropy( + input=prediction, label=input_y + ) avg_cost = paddle.mean(x=cost) strategy = paddle.distributed.fleet.DistributedStrategy() diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_sync.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_sync.py index 753635e69043787916db2db005cc764e4977f1c0..837ea65d54f48be723cf3118f9298c842d0ff2cb 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_sync.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_sync.py @@ -22,7 +22,6 @@ paddle.enable_static() class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): - def setUp(self): os.environ["PADDLE_PSERVER_NUMS"] = "2" os.environ["PADDLE_TRAINERS_NUM"] = "2" @@ -31,8 +30,9 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): os.environ["TRAINING_ROLE"] = "TRAINER" os.environ["PADDLE_TRAINER_ID"] = "0" os.environ["PADDLE_TRAINERS_NUM"] = "2" - os.environ["PADDLE_PSERVERS_IP_PORT_LIST"] = \ - "127.0.0.1:36001,127.0.0.2:36001" + os.environ[ + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:36001,127.0.0.2:36001" def test_gradient_merge_optimizer(self): fleet.init(role_maker.PaddleCloudRoleMaker()) diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_base.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_base.py index c8ae35d3d55ae773237216ab5d61ff3b30502c48..6686fb03a01feac78ce1368c69f5f227f0b37904 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_base.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_base.py @@ -17,6 +17,7 @@ import paddle.distributed.fleet as fleet import paddle.distributed.fleet.base.role_maker as role_maker import paddle.fluid as fluid import paddle + """ high level unit test for distribute fleet. """ @@ -46,9 +47,9 @@ DIST_UT_PORT = 0 class FleetDistRunnerBase(object): """ - run_pserver,run_trainer : after init role, using transpiler split program - net : implment by child class, the network of model - do training : exe run program + run_pserver,run_trainer : after init role, using transpiler split program + net : implment by child class, the network of model + do training : exe run program """ def __init__(self): @@ -64,7 +65,8 @@ class FleetDistRunnerBase(object): current_id=args.current_id, role=role_maker.Role.SERVER, worker_endpoints=args.trainer_endpoints.split(","), - server_endpoints=args.endpoints.split(",")) + server_endpoints=args.endpoints.split(","), + ) else: role = role_maker.UserDefinedRoleMaker( is_collective=False, @@ -73,7 +75,8 @@ class FleetDistRunnerBase(object): current_id=args.current_id, role=role_maker.Role.WORKER, worker_endpoints=args.trainer_endpoints.split(","), - server_endpoints=args.endpoints.split(",")) + server_endpoints=args.endpoints.split(","), + ) self.role = role return role @@ -100,14 +103,13 @@ class FleetDistRunnerBase(object): debug = int(os.getenv("Debug", "0")) # TODO(update strategy to support dump params) if False: # debug: - self.strategy.set_debug_opt({ - "dump_param": - self.dump_param, - "dump_fields": - self.dump_fields, - "dump_fields_path": - self.dump_fields_path - }) + self.strategy.set_debug_opt( + { + "dump_param": self.dump_param, + "dump_fields": self.dump_fields, + "dump_fields_path": self.dump_fields_path, + } + ) return self.strategy @@ -126,7 +128,8 @@ class FleetDistRunnerBase(object): use_decay = int(os.getenv("USE_DECAY", "0")) if use_decay: scheduler = paddle.optimizer.lr.ExponentialDecay( - learning_rate=LEARNING_RATE, gamma=0.999, verbose=True) + learning_rate=LEARNING_RATE, gamma=0.999, verbose=True + ) optimizer = fluid.optimizer.SGD(scheduler, grad_clip=grad_clip) """ # learning rate decay method before 2.0 @@ -154,7 +157,8 @@ class FleetDistRunnerBase(object): def net(self, args, batch_size=4, lr=0.01): raise NotImplementedError( - "get_model should be implemented by child classes.") + "get_model should be implemented by child classes." + ) def get_executor(self): if self._exe is None: @@ -168,21 +172,24 @@ class FleetDistRunnerBase(object): def do_dataset_training(self, fleet): raise NotImplementedError( - "do_dataset_training should be implemented by child classes.") + "do_dataset_training should be implemented by child classes." + ) def do_pyreader_training(self, fleet): raise NotImplementedError( - "do_pyreader_training should be implemented by child classes.") + "do_pyreader_training should be implemented by child classes." + ) def do_distributed_testing(self, fleet): raise NotImplementedError( - "do_distributed_testing should be implemented by child classes.") + "do_distributed_testing should be implemented by child classes." + ) class TestFleetBase(unittest.TestCase): """ - start_pserver,start_trainer : add start cmd to test - run_cluster : using multi process to test distribute program + start_pserver,start_trainer : add start cmd to test + run_cluster : using multi process to test distribute program """ def _setup_config(self): @@ -210,15 +217,23 @@ class TestFleetBase(unittest.TestCase): if DIST_UT_PORT: print("set begin_port:", DIST_UT_PORT) self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % ( - DIST_UT_PORT, DIST_UT_PORT + 1) + DIST_UT_PORT, + DIST_UT_PORT + 1, + ) self._tr_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % ( - DIST_UT_PORT + 2, DIST_UT_PORT + 3) + DIST_UT_PORT + 2, + DIST_UT_PORT + 3, + ) DIST_UT_PORT += 4 else: self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % ( - self._find_free_port(), self._find_free_port()) + self._find_free_port(), + self._find_free_port(), + ) self._tr_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % ( - self._find_free_port(), self._find_free_port()) + self._find_free_port(), + self._find_free_port(), + ) self._python_interp = sys.executable self._geo_sgd_need_push_nums = 5 @@ -226,10 +241,10 @@ class TestFleetBase(unittest.TestCase): self._setup_config() def _find_free_port(self): - def __free_port(): - with closing(socket.socket(socket.AF_INET, - socket.SOCK_STREAM)) as s: + with closing( + socket.socket(socket.AF_INET, socket.SOCK_STREAM) + ) as s: s.bind(('', 0)) return s.getsockname()[1] @@ -259,18 +274,24 @@ class TestFleetBase(unittest.TestCase): ps0_out = open(ps0_out_log, "wb+") ps1_out = open(ps1_out_log, "wb+") - ps0_proc = subprocess.Popen(ps0_cmd.strip().split(" "), - stdout=ps0_out, - stderr=ps0_err, - env=required_envs) - - ps1_proc = subprocess.Popen(ps1_cmd.strip().split(" "), - stdout=ps1_out, - stderr=ps1_err, - env=required_envs) - - return ((ps0_proc, ps0_out, ps0_err, ps0_out_log, ps0_err_log), - (ps1_proc, ps1_out, ps1_err, ps1_out_log, ps1_err_log)) + ps0_proc = subprocess.Popen( + ps0_cmd.strip().split(" "), + stdout=ps0_out, + stderr=ps0_err, + env=required_envs, + ) + + ps1_proc = subprocess.Popen( + ps1_cmd.strip().split(" "), + stdout=ps1_out, + stderr=ps1_err, + env=required_envs, + ) + + return ( + (ps0_proc, ps0_out, ps0_err, ps0_out_log, ps0_err_log), + (ps1_proc, ps1_out, ps1_err, ps1_out_log, ps1_err_log), + ) def _start_trainer(self, cmd, required_envs): tr0_cmd, tr1_cmd = cmd.format(0), cmd.format(1) @@ -292,18 +313,24 @@ class TestFleetBase(unittest.TestCase): tr0_out = open(tr0_out_log, "wb+") tr1_out = open(tr1_out_log, "wb+") - tr0_proc = subprocess.Popen(tr0_cmd.strip().split(" "), - stdout=tr0_out, - stderr=tr0_err, - env=required_envs) - - tr1_proc = subprocess.Popen(tr1_cmd.strip().split(" "), - stdout=tr1_out, - stderr=tr1_err, - env=required_envs) - - return ((tr0_proc, tr0_out, tr0_err, tr0_out_log, tr0_err_log), - (tr1_proc, tr1_out, tr1_err, tr1_out_log, tr1_err_log)) + tr0_proc = subprocess.Popen( + tr0_cmd.strip().split(" "), + stdout=tr0_out, + stderr=tr0_err, + env=required_envs, + ) + + tr1_proc = subprocess.Popen( + tr1_cmd.strip().split(" "), + stdout=tr1_out, + stderr=tr1_err, + env=required_envs, + ) + + return ( + (tr0_proc, tr0_out, tr0_err, tr0_out_log, tr0_err_log), + (tr1_proc, tr1_out, tr1_err, tr1_out_log, tr1_err_log), + ) def _run_cluster(self, model, envs): env = {'GRAD_CLIP': str(self._grad_clip_mode), 'WITH_DISTRIBUTE': 'ON'} @@ -316,14 +343,30 @@ class TestFleetBase(unittest.TestCase): env.update(envs) tr_cmd = "{0} {1} --role trainer --endpoints {2} --trainer_endpoints {3} --current_id {{}} --trainers {4} --mode {5} --geo_sgd_need_push_nums {6} --reader {7} --gloo_path {8} --test {9}".format( - python_path, model, self._ps_endpoints, self._tr_endpoints, - self._trainers, self._mode, self._geo_sgd_need_push_nums, - self._reader, gloo_path, self._need_test) + python_path, + model, + self._ps_endpoints, + self._tr_endpoints, + self._trainers, + self._mode, + self._geo_sgd_need_push_nums, + self._reader, + gloo_path, + self._need_test, + ) ps_cmd = "{0} {1} --role pserver --endpoints {2} --trainer_endpoints {3} --current_id {{}} --trainers {4} --mode {5} --geo_sgd_need_push_nums {6} --reader {7} --gloo_path {8} --test {9}".format( - python_path, model, self._ps_endpoints, self._tr_endpoints, - self._trainers, self._mode, self._geo_sgd_need_push_nums, - self._reader, gloo_path, self._need_test) + python_path, + model, + self._ps_endpoints, + self._tr_endpoints, + self._trainers, + self._mode, + self._geo_sgd_need_push_nums, + self._reader, + gloo_path, + self._need_test, + ) if self._model_dir: tr_cmd += " --model_dir {}".format(self._model_dir) @@ -340,7 +383,7 @@ class TestFleetBase(unittest.TestCase): tr1_proc, tr1_out, tr1_err, tr1_out_log, tr1_err_log = tr1 # Wait until trainer process terminate - #time_out = 120 + # time_out = 120 time_out = 60 cur_time = 0 @@ -383,27 +426,41 @@ class TestFleetBase(unittest.TestCase): def catlog(logx): basename = os.path.basename(logx) - print("\n================== Error {} begin =====================". - format(basename)) + print( + "\n================== Error {} begin =====================".format( + basename + ) + ) os.system("cat {}".format(logx)) - print("================== Error {} end =====================\n". - format(basename)) + print( + "================== Error {} end =====================\n".format( + basename + ) + ) if tr0_ret != 0 or tr1_ret != 0: if is_listen_failed(ps0_err) or is_listen_failed(ps1_err): print("find parameter server port bind failed, skip the error") tr0_ret, tr1_ret = 0, 0 else: - for out, err in [(ps0_out_log, ps0_err_log), - (ps1_out_log, ps1_err_log), - (tr0_out_log, tr0_err_log), - (tr1_out_log, tr1_err_log)]: + for out, err in [ + (ps0_out_log, ps0_err_log), + (ps1_out_log, ps1_err_log), + (tr0_out_log, tr0_err_log), + (tr1_out_log, tr1_err_log), + ]: catlog(out) catlog(err) for pipe in [ - tr0_err, tr0_out, tr1_err, tr1_out, ps0_err, ps0_out, ps1_err, - ps1_out + tr0_err, + tr0_out, + tr1_err, + tr1_out, + ps0_err, + ps0_out, + ps1_err, + ps1_out, ]: pipe.close() @@ -414,17 +471,15 @@ class TestFleetBase(unittest.TestCase): return 0, 0 - def check_with_place(self, - model_file, - delta=1e-3, - check_error_log=False, - need_envs={}): + def check_with_place( + self, model_file, delta=1e-3, check_error_log=False, need_envs={} + ): required_envs = { "PATH": os.getenv("PATH", ""), "PYTHONPATH": os.getenv("PYTHONPATH", ""), "LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""), "FLAGS_rpc_deadline": "5000", # 5sec to fail fast - "http_proxy": "" + "http_proxy": "", } required_envs.update(need_envs) @@ -438,23 +493,20 @@ class TestFleetBase(unittest.TestCase): def runtime_main(test_class): parser = argparse.ArgumentParser(description='Run Fleet test.') - parser.add_argument('--role', - type=str, - required=True, - choices=['pserver', 'trainer']) + parser.add_argument( + '--role', type=str, required=True, choices=['pserver', 'trainer'] + ) parser.add_argument('--endpoints', type=str, required=False, default="") - parser.add_argument('--trainer_endpoints', - type=str, - required=False, - default="") + parser.add_argument( + '--trainer_endpoints', type=str, required=False, default="" + ) parser.add_argument('--gloo_path', type=str, required=False, default="") parser.add_argument('--current_id', type=int, required=False, default=0) parser.add_argument('--trainers', type=int, required=False, default=1) parser.add_argument('--mode', type=str, required=False, default='geo') - parser.add_argument('--geo_sgd_need_push_nums', - type=int, - required=False, - default=2) + parser.add_argument( + '--geo_sgd_need_push_nums', type=int, required=False, default=2 + ) parser.add_argument('--reader', type=str, required=False, default='dataset') parser.add_argument('--test', type=int, required=False, default=0) parser.add_argument('--model_dir', type=str, required=False, default="") @@ -467,14 +519,17 @@ def runtime_main(test_class): if args.test and args.model_dir != "": avg_cost = model.net(args, is_train=False) dist_infer = DistributedInfer() - dist_infer.init_distributed_infer_env(exe=model.get_executor(), - loss=model.avg_cost, - role_maker=role, - dirname=args.model_dir) + dist_infer.init_distributed_infer_env( + exe=model.get_executor(), + loss=model.avg_cost, + role_maker=role, + dirname=args.model_dir, + ) if fleet.is_worker(): with paddle.static.program_guard( - main_program=dist_infer.get_dist_infer_program()): + main_program=dist_infer.get_dist_infer_program() + ): model.do_distributed_testing(fleet) fleet.stop_worker() return @@ -499,13 +554,17 @@ def runtime_main(test_class): test_origin_program = paddle.static.Program() test_startup_program = paddle.static.Program() with paddle.static.program_guard( - main_program=test_origin_program, - startup_program=test_startup_program): + main_program=test_origin_program, + startup_program=test_startup_program, + ): with paddle.utils.unique_name.guard(): avg_cost = model.net(args, is_train=False) - dist_infer = DistributedInfer(main_program=test_origin_program, - startup_program=test_startup_program) + dist_infer = DistributedInfer( + main_program=test_origin_program, + startup_program=test_startup_program, + ) with paddle.static.program_guard( - main_program=dist_infer.get_dist_infer_program()): + main_program=dist_infer.get_dist_infer_program() + ): model.do_distributed_testing(fleet) fleet.stop_worker() diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ctr.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ctr.py index 06d57c86bd70a49ee7c04c3163bfdd16db1f3cf4..33006b9d9e82c9915ade150a0c0a653d89dbb391 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ctr.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ctr.py @@ -18,17 +18,14 @@ from test_dist_fleet_base import TestFleetBase class TestDistMnistAsyncInMemoryDataset2x2(TestFleetBase): - def _setup_config(self): self._mode = "async" - #self._reader = "pyreader" + # self._reader = "pyreader" self._reader = "dataset" - def check_with_place(self, - model_file, - delta=1e-3, - check_error_log=False, - need_envs={}): + def check_with_place( + self, model_file, delta=1e-3, check_error_log=False, need_envs={} + ): required_envs = { "PATH": os.getenv("PATH", ""), "PYTHONPATH": os.getenv("PYTHONPATH", ""), @@ -38,14 +35,10 @@ class TestDistMnistAsyncInMemoryDataset2x2(TestFleetBase): "CPU_NUM": "2", "LOG_DIRNAME": "/tmp", "SAVE_DIRNAME": "/tmp/TestDistMnistAsyncInMemoryDataset2x2/model", - "SAVE_CACHE_DIRNAME": - "/tmp/TestDistMnistAsyncInMemoryDataset2x2/cache_model", - "SAVE_DENSE_PARAM_DIRNAME": - "/tmp/TestDistMnistAsyncInMemoryDataset2x2/dense_param", - "SAVE_ONE_TABLE_DIRNAME": - "/tmp/TestDistMnistAsyncInMemoryDataset2x2/table_0", - "SAVE_PATCH_DIRNAME": - "/tmp/TestDistMnistAsyncInMemoryDataset2x2/patch_model", + "SAVE_CACHE_DIRNAME": "/tmp/TestDistMnistAsyncInMemoryDataset2x2/cache_model", + "SAVE_DENSE_PARAM_DIRNAME": "/tmp/TestDistMnistAsyncInMemoryDataset2x2/dense_param", + "SAVE_ONE_TABLE_DIRNAME": "/tmp/TestDistMnistAsyncInMemoryDataset2x2/table_0", + "SAVE_PATCH_DIRNAME": "/tmp/TestDistMnistAsyncInMemoryDataset2x2/patch_model", "LOG_PREFIX": self.__class__.__name__, } @@ -58,22 +51,19 @@ class TestDistMnistAsyncInMemoryDataset2x2(TestFleetBase): tr0_losses, tr1_losses = self._run_cluster(model_file, required_envs) def test_dist_train(self): - self.check_with_place("dist_fleet_ctr.py", - delta=1e-5, - check_error_log=False) + self.check_with_place( + "dist_fleet_ctr.py", delta=1e-5, check_error_log=False + ) class TestDistMnistAsync2x2(TestFleetBase): - def _setup_config(self): self._mode = "async" self._reader = "pyreader" - def check_with_place(self, - model_file, - delta=1e-3, - check_error_log=False, - need_envs={}): + def check_with_place( + self, model_file, delta=1e-3, check_error_log=False, need_envs={} + ): required_envs = { "PATH": os.getenv("PATH", ""), "PYTHONPATH": os.getenv("PYTHONPATH", ""), @@ -94,22 +84,19 @@ class TestDistMnistAsync2x2(TestFleetBase): tr0_losses, tr1_losses = self._run_cluster(model_file, required_envs) def test_dist_train(self): - self.check_with_place("dist_fleet_ctr.py", - delta=1e-5, - check_error_log=False) + self.check_with_place( + "dist_fleet_ctr.py", delta=1e-5, check_error_log=False + ) class TestDistCtrHalfAsync2x2(TestFleetBase): - def _setup_config(self): self._mode = "async" self._reader = "pyreader" - def check_with_place(self, - model_file, - delta=1e-3, - check_error_log=False, - need_envs={}): + def check_with_place( + self, model_file, delta=1e-3, check_error_log=False, need_envs={} + ): required_envs = { "PATH": os.getenv("PATH", ""), "PYTHONPATH": os.getenv("PYTHONPATH", ""), @@ -133,9 +120,9 @@ class TestDistCtrHalfAsync2x2(TestFleetBase): tr0_losses, tr1_losses = self._run_cluster(model_file, required_envs) def test_dist_train(self): - self.check_with_place("dist_fleet_ctr.py", - delta=1e-5, - check_error_log=False) + self.check_with_place( + "dist_fleet_ctr.py", delta=1e-5, check_error_log=False + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ctr2.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ctr2.py index b86fabde6209e48b2d651c49f56620ae41e12f57..d9c725c3a89c4886a4dd6bc6d889b42303bb8d40 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ctr2.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ctr2.py @@ -20,17 +20,14 @@ from test_dist_fleet_base import TestFleetBase @unittest.skip(reason="Skip unstable ut, need paddle sync mode fix") class TestDistMnistSync2x2(TestFleetBase): - def _setup_config(self): self._mode = "sync" self._reader = "pyreader" self._need_test = 1 - def check_with_place(self, - model_file, - delta=1e-3, - check_error_log=False, - need_envs={}): + def check_with_place( + self, model_file, delta=1e-3, check_error_log=False, need_envs={} + ): required_envs = { "PATH": os.getenv("PATH", ""), "PYTHONPATH": os.getenv("PYTHONPATH", ""), @@ -51,23 +48,20 @@ class TestDistMnistSync2x2(TestFleetBase): tr0_losses, tr1_losses = self._run_cluster(model_file, required_envs) def test_dist_train(self): - self.check_with_place("dist_fleet_ctr.py", - delta=1e-5, - check_error_log=False) + self.check_with_place( + "dist_fleet_ctr.py", delta=1e-5, check_error_log=False + ) # @unittest.skip(reason="Skip unstable ut, reader need to be rewrite") class TestDistMnistAsyncDataset2x2(TestFleetBase): - def _setup_config(self): self._mode = "async" self._reader = "dataset" - def check_with_place(self, - model_file, - delta=1e-3, - check_error_log=False, - need_envs={}): + def check_with_place( + self, model_file, delta=1e-3, check_error_log=False, need_envs={} + ): required_envs = { "PATH": os.getenv("PATH", ""), "PYTHONPATH": os.getenv("PYTHONPATH", ""), @@ -92,9 +86,9 @@ class TestDistMnistAsyncDataset2x2(TestFleetBase): tr0_losses, tr1_losses = self._run_cluster(model_file, required_envs) def test_dist_train(self): - self.check_with_place("dist_fleet_ctr.py", - delta=1e-5, - check_error_log=False) + self.check_with_place( + "dist_fleet_ctr.py", delta=1e-5, check_error_log=False + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_decay.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_decay.py index a5e2e7073d0a9c91bffe3862948a0af9213b8be5..9d5ac645b6136ac8bdc6f0a30f4d3cb6c501f88b 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_decay.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_decay.py @@ -32,18 +32,17 @@ batch_size = 4 class TestNoamDecay(unittest.TestCase): - def net(self): - input_data = paddle.static.data(name="sparse_input", - shape=[None, 1], - dtype="int64") - input_label = paddle.static.data(name="label", - shape=[None, 1], - dtype="int64") + input_data = paddle.static.data( + name="sparse_input", shape=[None, 1], dtype="int64" + ) + input_label = paddle.static.data( + name="label", shape=[None, 1], dtype="int64" + ) label = paddle.cast(input_label, dtype="float32") - embedding = paddle.static.nn.embedding(input_data, - is_sparse=True, - size=[1000, 128]) + embedding = paddle.static.nn.embedding( + input_data, is_sparse=True, size=[1000, 128] + ) fc1 = paddle.static.nn.fc(embedding, size=1024, activation="relu") fc2 = paddle.static.nn.fc(fc1, size=512, activation="relu") @@ -56,20 +55,24 @@ class TestNoamDecay(unittest.TestCase): def test(self): endpoints = [ - "127.0.0.1:36004", "127.0.0.1:36005", "127.0.0.1:36006", - "127.0.0.1:36007" + "127.0.0.1:36004", + "127.0.0.1:36005", + "127.0.0.1:36006", + "127.0.0.1:36007", ] - role = role_maker.UserDefinedRoleMaker(current_id=0, - role=role_maker.Role.WORKER, - worker_num=2, - server_endpoints=endpoints) + role = role_maker.UserDefinedRoleMaker( + current_id=0, + role=role_maker.Role.WORKER, + worker_num=2, + server_endpoints=endpoints, + ) fleet.init(role) loss = self.net() - scheduler = paddle.optimizer.lr.NoamDecay(d_model=0.01, - warmup_steps=100, - verbose=True) + scheduler = paddle.optimizer.lr.NoamDecay( + d_model=0.01, warmup_steps=100, verbose=True + ) optimizer = fluid.optimizer.Adam(scheduler) strategy = paddle.distributed.fleet.DistributedStrategy() diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_geo.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_geo.py index 37ed07534d96d70d86fe30ee6fc5c1d4276b743d..2e63a28cd247840de4a28f26d5ceefb6648451c0 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_geo.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_geo.py @@ -28,17 +28,14 @@ paddle.enable_static() class TestDistGeoCtr_2x2(TestFleetBase): - def _setup_config(self): self._mode = "geo" self._reader = "pyreader" self._geo_sgd_need_push_nums = 5 - def check_with_place(self, - model_file, - delta=1e-3, - check_error_log=False, - need_envs={}): + def check_with_place( + self, model_file, delta=1e-3, check_error_log=False, need_envs={} + ): required_envs = { "PATH": os.getenv("PATH", ""), "PYTHONPATH": os.getenv("PYTHONPATH", ""), @@ -58,19 +55,19 @@ class TestDistGeoCtr_2x2(TestFleetBase): tr0_losses, tr1_losses = self._run_cluster(model_file, required_envs) def test_dist_train(self): - self.check_with_place("dist_fleet_ctr.py", - delta=1e-5, - check_error_log=False) + self.check_with_place( + "dist_fleet_ctr.py", delta=1e-5, check_error_log=False + ) class TestGeoSgdTranspiler(unittest.TestCase): - def test_pserver(self): role = role_maker.UserDefinedRoleMaker( current_id=0, role=role_maker.Role.SERVER, worker_num=2, - server_endpoints=["127.0.0.1:36011", "127.0.0.1:36012"]) + server_endpoints=["127.0.0.1:36011", "127.0.0.1:36012"], + ) fleet.init(role) diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_gloo.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_gloo.py index 26590f5a75e534478f10b850c2fbaffe7ff224c0..dd31dc8ef0db86a892433cff4d399c507390d3c5 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_gloo.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_gloo.py @@ -18,25 +18,25 @@ import tempfile import unittest import subprocess import time -#import paddle.fluid.incubate.fleet.base.role_maker as role_maker + +# import paddle.fluid.incubate.fleet.base.role_maker as role_maker from test_dist_fleet_base import TestFleetBase -#from dist_simnet_bow import train_network +# from dist_simnet_bow import train_network class TestDistGloo_2x2(TestFleetBase): - def _setup_config(self): self._mode = "sync" self._reader = "pyreader" self._path = "./tmp4" - if (os.path.exists(self._path)): + if os.path.exists(self._path): shutil.rmtree(self._path) # if not os.path.exists(self._path): # os.mkdir(self._path) def _start_pserver(self, cmd, required_envs): - #env.update(required_envs) + # env.update(required_envs) ps0_cmd = cmd ps1_cmd = cmd @@ -46,24 +46,28 @@ class TestDistGloo_2x2(TestFleetBase): required_envs["POD_IP"] = "127.0.0.1" required_envs["PADDLE_PSERVER_ID"] = "0" required_envs["PADDLE_PORT"] = "36011" - ps0_proc = subprocess.Popen(ps0_cmd.strip().split(" "), - stdout=subprocess.PIPE, - stderr=ps0_pipe, - env=required_envs) + ps0_proc = subprocess.Popen( + ps0_cmd.strip().split(" "), + stdout=subprocess.PIPE, + stderr=ps0_pipe, + env=required_envs, + ) print("PADDLE_PSERVER_ID=0:") print(required_envs) required_envs["PADDLE_PSERVER_ID"] = "1" required_envs["PADDLE_PORT"] = "36012" - ps1_proc = subprocess.Popen(ps1_cmd.strip().split(" "), - stdout=subprocess.PIPE, - stderr=ps1_pipe, - env=required_envs) + ps1_proc = subprocess.Popen( + ps1_cmd.strip().split(" "), + stdout=subprocess.PIPE, + stderr=ps1_pipe, + env=required_envs, + ) print("PADDLE_PSERVER_ID=1:") print(required_envs) return ps0_proc, ps1_proc, ps0_pipe, ps1_pipe def _start_trainer(self, cmd, required_envs): - #env.update(required_envs) + # env.update(required_envs) tr0_cmd = cmd tr1_cmd = cmd @@ -71,17 +75,21 @@ class TestDistGloo_2x2(TestFleetBase): tr0_pipe = open(tempfile.gettempdir() + "/tr0_err.log", "wb+") tr1_pipe = open(tempfile.gettempdir() + "/tr1_err.log", "wb+") required_envs["PADDLE_TRAINER_ID"] = "0" - tr0_proc = subprocess.Popen(tr0_cmd.strip().split(" "), - stdout=subprocess.PIPE, - stderr=tr0_pipe, - env=required_envs) + tr0_proc = subprocess.Popen( + tr0_cmd.strip().split(" "), + stdout=subprocess.PIPE, + stderr=tr0_pipe, + env=required_envs, + ) print("PADDLE_TRAINER_ID=0:") print(required_envs) required_envs["PADDLE_TRAINER_ID"] = "1" - tr1_proc = subprocess.Popen(tr1_cmd.strip().split(" "), - stdout=subprocess.PIPE, - stderr=tr1_pipe, - env=required_envs) + tr1_proc = subprocess.Popen( + tr1_cmd.strip().split(" "), + stdout=subprocess.PIPE, + stderr=tr1_pipe, + env=required_envs, + ) print("PADDLE_TRAINER_ID=1:") print(required_envs) return tr0_proc, tr1_proc, tr0_pipe, tr1_pipe @@ -138,11 +146,9 @@ class TestDistGloo_2x2(TestFleetBase): return 0, 0 - def check_with_place(self, - model_file, - delta=1e-3, - check_error_log=False, - need_envs={}): + def check_with_place( + self, model_file, delta=1e-3, check_error_log=False, need_envs={} + ): required_envs = { "PATH": os.getenv("PATH", ""), "PYTHONPATH": os.getenv("PYTHONPATH", ""), @@ -150,16 +156,16 @@ class TestDistGloo_2x2(TestFleetBase): "FLAGS_rpc_deadline": "5000", # 5sec to fail fast "http_proxy": "", "CPU_NUM": "2", - #PSERVER + # PSERVER "PADDLE_PSERVERS_IP_PORT_LIST": "127.0.0.1:36011,127.0.0.1:36012", - #"PADDLE_PSERVER_PORT_ARRAY":"(36011 36012)", + # "PADDLE_PSERVER_PORT_ARRAY":"(36011 36012)", "PADDLE_PSERVER_NUMS": "2", "PADDLE_TRAINER_ID": "0", - #TRAINER + # TRAINER "PADDLE_TRAINER_ENDPOINTS": "127.0.0.1:36013,127.0.0.1:36014", "PADDLE_TRAINERS_NUM": "2", "PADDLE_PSERVER_ID": "0", - #GLOO FLAG + # GLOO FLAG "PADDLE_WITH_GLOO": "1", } @@ -173,9 +179,9 @@ class TestDistGloo_2x2(TestFleetBase): def test_dist_train(self): print("path is not delete", os.path.exists("./tmp4")) - self.check_with_place("dist_fleet_debug_gloo.py", - delta=1e-5, - check_error_log=True) + self.check_with_place( + "dist_fleet_debug_gloo.py", delta=1e-5, check_error_log=True + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_heter_base.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_heter_base.py index 13178ad233e2e52c78d545c5d8edf56078bb2d91..18c20ef675473a98cc4a065f7af9128bbd0c5cfb 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_heter_base.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_heter_base.py @@ -41,9 +41,9 @@ DIST_UT_PORT = 0 class FleetDistHeterRunnerBase(object): """ - run_pserver,run_trainer : after init role, using transpiler split program - net : implment by child class, the network of model - do training : exe run program + run_pserver,run_trainer : after init role, using transpiler split program + net : implment by child class, the network of model + do training : exe run program """ def build_role(self, args): @@ -54,33 +54,45 @@ class FleetDistHeterRunnerBase(object): environs["PADDLE_PSERVERS_IP_PORT_LIST"] = args.endpoints environs["PADDLE_TRAINER_ENDPOINTS"] = args.trainer_endpoints environs[ - "PADDLE_ALL_HETER_TRAINER_IP_PORT_LIST"] = all_heter_trainer_endpoints - environs["POD_IP"] = args.endpoints.split(",")[int( - args.current_id)].split(":")[0] - environs["PADDLE_PORT"] = args.endpoints.split(",")[int( - args.current_id)].split(":")[1] + "PADDLE_ALL_HETER_TRAINER_IP_PORT_LIST" + ] = all_heter_trainer_endpoints + environs["POD_IP"] = args.endpoints.split(",")[ + int(args.current_id) + ].split(":")[0] + environs["PADDLE_PORT"] = args.endpoints.split(",")[ + int(args.current_id) + ].split(":")[1] environs["TRAINING_ROLE"] = args.role.upper() environs["PADDLE_TRAINERS_NUM"] = args.trainers elif args.role.upper() == "HETER_TRAINER": - previous_endpoints = args.trainer_endpoints if args.stage_id == 2 else heter_trainer_endpoints[ - 0] - next_endpoints = heter_trainer_endpoints[ - 1] if args.stage_id == 2 else "" - heter_device = args.heter_trainer_device.split(";")[args.stage_id - - 2] + previous_endpoints = ( + args.trainer_endpoints + if args.stage_id == 2 + else heter_trainer_endpoints[0] + ) + next_endpoints = ( + heter_trainer_endpoints[1] if args.stage_id == 2 else "" + ) + heter_device = args.heter_trainer_device.split(";")[ + args.stage_id - 2 + ] environs["PADDLE_PSERVERS_IP_PORT_LIST"] = args.endpoints environs["PADDLE_TRAINER_ENDPOINTS"] = args.trainer_endpoints environs["PADDLE_NEXT_HETER_TRAINER_IP_PORT_LIST"] = next_endpoints environs[ - "PADDLE_PREVIOUS_HETER_TRAINER_IP_PORT_LIST"] = previous_endpoints + "PADDLE_PREVIOUS_HETER_TRAINER_IP_PORT_LIST" + ] = previous_endpoints environs[ - "PADDLE_ALL_HETER_TRAINER_IP_PORT_LIST"] = all_heter_trainer_endpoints + "PADDLE_ALL_HETER_TRAINER_IP_PORT_LIST" + ] = all_heter_trainer_endpoints environs["HETER_DEVICE_TYPE"] = heter_device environs["TRAINING_ROLE"] = args.role.upper() - environs["POD_IP"] = all_heter_trainer_endpoints.split(",")[int( - args.current_id)].split(":")[0] + environs["POD_IP"] = all_heter_trainer_endpoints.split(",")[ + int(args.current_id) + ].split(":")[0] environs["PADDLE_PORT"] = all_heter_trainer_endpoints.split(",")[ - int(args.current_id)].split(":")[1] + int(args.current_id) + ].split(":")[1] environs["PADDLE_TRAINERS_NUM"] = args.trainers environs["PADDLE_STAGE_TRAINERS_NUM"] = [2, 2, 2] environs["FLAGS_selected_gpus"] = 0 @@ -93,18 +105,21 @@ class FleetDistHeterRunnerBase(object): environs["PADDLE_PSERVERS_IP_PORT_LIST"] = args.endpoints environs["PADDLE_TRAINER_ENDPOINTS"] = args.trainer_endpoints environs[ - "PADDLE_NEXT_HETER_TRAINER_IP_PORT_LIST"] = heter_trainer_endpoints[ - 0] + "PADDLE_NEXT_HETER_TRAINER_IP_PORT_LIST" + ] = heter_trainer_endpoints[0] environs["PADDLE_PREVIOUS_HETER_TRAINER_IP_PORT_LIST"] = "" environs[ - "PADDLE_ALL_HETER_TRAINER_IP_PORT_LIST"] = all_heter_trainer_endpoints + "PADDLE_ALL_HETER_TRAINER_IP_PORT_LIST" + ] = all_heter_trainer_endpoints environs["HETER_DEVICE_TYPE"] = "cpu" environs["TRAINING_ROLE"] = args.role.upper() environs["PADDLE_TRAINER_ID"] = args.current_id - environs["POD_IP"] = args.trainer_endpoints.split(",")[int( - args.current_id)].split(":")[0] - environs["PADDLE_PORT"] = args.trainer_endpoints.split(",")[int( - args.current_id)].split(":")[1] + environs["POD_IP"] = args.trainer_endpoints.split(",")[ + int(args.current_id) + ].split(":")[0] + environs["PADDLE_PORT"] = args.trainer_endpoints.split(",")[ + int(args.current_id) + ].split(":")[1] environs["PADDLE_TRAINERS_NUM"] = args.trainers environs["PADDLE_STAGE_TRAINERS_NUM"] = [2, 2, 2] environs["FLAGS_selected_gpus"] = 0 @@ -126,12 +141,12 @@ class FleetDistHeterRunnerBase(object): self.strategy.a_sync = True self.strategy.a_sync_configs = { "launch_barrier": True, - "heter_worker_device_guard": 'gpu' + "heter_worker_device_guard": 'gpu', } self.strategy.pipeline = True self.strategy.pipeline_configs = { "accumulate_steps": 1, - "micro_batch_size": 2048 + "micro_batch_size": 2048, } return self.strategy @@ -152,21 +167,24 @@ class FleetDistHeterRunnerBase(object): def net(self, args, batch_size=4, lr=0.01): raise NotImplementedError( - "get_model should be implemented by child classes.") + "get_model should be implemented by child classes." + ) def do_dataset_training(self, fleet): raise NotImplementedError( - "do_dataset_training should be implemented by child classes.") + "do_dataset_training should be implemented by child classes." + ) def do_dataset_heter_training(self, fleet): raise NotImplementedError( - "do_dataset_heter_training should be implemented by child classes.") + "do_dataset_heter_training should be implemented by child classes." + ) class TestFleetHeterBase(unittest.TestCase): """ - start_pserver,start_trainer : add start cmd to test - run_cluster : using multi process to test distribute program + start_pserver,start_trainer : add start cmd to test + run_cluster : using multi process to test distribute program """ def _setup_config(self): @@ -194,23 +212,39 @@ class TestFleetHeterBase(unittest.TestCase): if DIST_UT_PORT: print("set begin_port:", DIST_UT_PORT) self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % ( - DIST_UT_PORT, DIST_UT_PORT + 1) + DIST_UT_PORT, + DIST_UT_PORT + 1, + ) self._tr_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % ( - DIST_UT_PORT + 2, DIST_UT_PORT + 3) + DIST_UT_PORT + 2, + DIST_UT_PORT + 3, + ) self._heter_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % ( - DIST_UT_PORT + 4, DIST_UT_PORT + 5) + DIST_UT_PORT + 4, + DIST_UT_PORT + 5, + ) self._heter_endpoints_2 = "127.0.0.1:%s,127.0.0.1:%s" % ( - DIST_UT_PORT + 6, DIST_UT_PORT + 7) + DIST_UT_PORT + 6, + DIST_UT_PORT + 7, + ) DIST_UT_PORT += 8 else: self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % ( - self._find_free_port(), self._find_free_port()) + self._find_free_port(), + self._find_free_port(), + ) self._tr_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % ( - self._find_free_port(), self._find_free_port()) + self._find_free_port(), + self._find_free_port(), + ) self._heter_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % ( - self._find_free_port(), self._find_free_port()) + self._find_free_port(), + self._find_free_port(), + ) self._heter_endpoints_2 = "127.0.0.1:%s,127.0.0.1:%s" % ( - self._find_free_port(), self._find_free_port()) + self._find_free_port(), + self._find_free_port(), + ) self._python_interp = sys.executable self._geo_sgd_need_push_nums = 5 @@ -218,10 +252,10 @@ class TestFleetHeterBase(unittest.TestCase): self._setup_config() def _find_free_port(self): - def __free_port(): - with closing(socket.socket(socket.AF_INET, - socket.SOCK_STREAM)) as s: + with closing( + socket.socket(socket.AF_INET, socket.SOCK_STREAM) + ) as s: s.bind(('', 0)) return s.getsockname()[1] @@ -237,14 +271,18 @@ class TestFleetHeterBase(unittest.TestCase): ps0_pipe = open(tempfile.gettempdir() + "/ps0_err.log", "wb+") ps1_pipe = open(tempfile.gettempdir() + "/ps1_err.log", "wb+") - ps0_proc = subprocess.Popen(ps0_cmd.strip().split(" "), - stdout=subprocess.PIPE, - stderr=ps0_pipe, - env=required_envs) - ps1_proc = subprocess.Popen(ps1_cmd.strip().split(" "), - stdout=subprocess.PIPE, - stderr=ps1_pipe, - env=required_envs) + ps0_proc = subprocess.Popen( + ps0_cmd.strip().split(" "), + stdout=subprocess.PIPE, + stderr=ps0_pipe, + env=required_envs, + ) + ps1_proc = subprocess.Popen( + ps1_cmd.strip().split(" "), + stdout=subprocess.PIPE, + stderr=ps1_pipe, + env=required_envs, + ) return ps0_proc, ps1_proc, ps0_pipe, ps1_pipe def _start_trainer(self, cmd, required_envs): @@ -256,20 +294,28 @@ class TestFleetHeterBase(unittest.TestCase): tr0_out = open(tempfile.gettempdir() + "/tr0_out.log", "wb+") tr1_out = open(tempfile.gettempdir() + "/tr1_out.log", "wb+") - tr0_proc = subprocess.Popen(tr0_cmd.strip().split(" "), - stdout=tr0_out, - stderr=tr0_pipe, - env=required_envs) - tr1_proc = subprocess.Popen(tr1_cmd.strip().split(" "), - stdout=tr1_out, - stderr=tr1_pipe, - env=required_envs) + tr0_proc = subprocess.Popen( + tr0_cmd.strip().split(" "), + stdout=tr0_out, + stderr=tr0_pipe, + env=required_envs, + ) + tr1_proc = subprocess.Popen( + tr1_cmd.strip().split(" "), + stdout=tr1_out, + stderr=tr1_pipe, + env=required_envs, + ) return tr0_proc, tr1_proc, tr0_pipe, tr1_pipe def _start_heter_trainer(self, cmd, required_envs): - heter0_cmd, heter1_cmd, heter2_cmd, heter3_cmd = cmd.format( - 0, 2), cmd.format(1, 2), cmd.format(2, 3), cmd.format(3, 3) + heter0_cmd, heter1_cmd, heter2_cmd, heter3_cmd = ( + cmd.format(0, 2), + cmd.format(1, 2), + cmd.format(2, 3), + cmd.format(3, 3), + ) heter0_pipe = open(tempfile.gettempdir() + "/heter0_err.log", "wb+") heter1_pipe = open(tempfile.gettempdir() + "/heter1_err.log", "wb+") @@ -280,29 +326,46 @@ class TestFleetHeterBase(unittest.TestCase): heter2_out = open(tempfile.gettempdir() + "/heter2_out.log", "wb+") heter3_out = open(tempfile.gettempdir() + "/heter3_out.log", "wb+") - heter0_proc = subprocess.Popen(heter0_cmd.strip().split(" "), - stdout=heter0_out, - stderr=heter0_pipe, - env=required_envs) - heter1_proc = subprocess.Popen(heter1_cmd.strip().split(" "), - stdout=heter1_out, - stderr=heter1_pipe, - env=required_envs) - heter2_proc = subprocess.Popen(heter2_cmd.strip().split(" "), - stdout=heter2_out, - stderr=heter2_pipe, - env=required_envs) - heter3_proc = subprocess.Popen(heter3_cmd.strip().split(" "), - stdout=heter3_out, - stderr=heter3_pipe, - env=required_envs) - - return heter0_proc, heter1_proc, heter2_proc, heter3_proc, heter0_pipe, heter1_pipe, heter2_pipe, heter3_pipe + heter0_proc = subprocess.Popen( + heter0_cmd.strip().split(" "), + stdout=heter0_out, + stderr=heter0_pipe, + env=required_envs, + ) + heter1_proc = subprocess.Popen( + heter1_cmd.strip().split(" "), + stdout=heter1_out, + stderr=heter1_pipe, + env=required_envs, + ) + heter2_proc = subprocess.Popen( + heter2_cmd.strip().split(" "), + stdout=heter2_out, + stderr=heter2_pipe, + env=required_envs, + ) + heter3_proc = subprocess.Popen( + heter3_cmd.strip().split(" "), + stdout=heter3_out, + stderr=heter3_pipe, + env=required_envs, + ) + + return ( + heter0_proc, + heter1_proc, + heter2_proc, + heter3_proc, + heter0_pipe, + heter1_pipe, + heter2_pipe, + heter3_pipe, + ) def _run_cluster(self, model, envs): env = { 'GRAD_CLIP': str(self._grad_clip_mode), - 'FLAGS_eager_delete_tensor_gb': str(-1) + 'FLAGS_eager_delete_tensor_gb': str(-1), } python_path = self._python_interp gloo_path = tempfile.mkdtemp() @@ -312,31 +375,64 @@ class TestFleetHeterBase(unittest.TestCase): python_path += " -m coverage run --branch -p" env.update(envs) self._all_heter_endpoints = ";".join( - (self._heter_endpoints, self._heter_endpoints_2)) + (self._heter_endpoints, self._heter_endpoints_2) + ) tr_cmd = "{0} {1} --role trainer --endpoints {2} --trainer_endpoints {3} --current_id {{}} --trainers {4} --mode {5} --geo_sgd_need_push_nums {6} --reader {7} --gloo_path {8} --heter_trainer_endpoints {9} --heter_trainer_device {10}".format( - python_path, model, self._ps_endpoints, self._tr_endpoints, - self._trainers, self._mode, self._geo_sgd_need_push_nums, - self._reader, gloo_path, self._all_heter_endpoints, - self._heter_device) + python_path, + model, + self._ps_endpoints, + self._tr_endpoints, + self._trainers, + self._mode, + self._geo_sgd_need_push_nums, + self._reader, + gloo_path, + self._all_heter_endpoints, + self._heter_device, + ) ps_cmd = "{0} {1} --role pserver --endpoints {2} --trainer_endpoints {3} --current_id {{}} --trainers {4} --mode {5} --geo_sgd_need_push_nums {6} --reader {7} --gloo_path {8} --heter_trainer_endpoints {9} --heter_trainer_device {10}".format( - python_path, model, self._ps_endpoints, self._tr_endpoints, - self._trainers, self._mode, self._geo_sgd_need_push_nums, - self._reader, gloo_path, self._all_heter_endpoints, - self._heter_device) + python_path, + model, + self._ps_endpoints, + self._tr_endpoints, + self._trainers, + self._mode, + self._geo_sgd_need_push_nums, + self._reader, + gloo_path, + self._all_heter_endpoints, + self._heter_device, + ) heter_cmd = "{0} {1} --role heter_trainer --endpoints {2} --trainer_endpoints {3} --current_id {{}} --stage_id {{}} --trainers {4} --mode {5} --geo_sgd_need_push_nums {6} --reader {7} --gloo_path {8} --heter_trainer_endpoints {9} --heter_trainer_device {10}".format( - python_path, model, self._ps_endpoints, self._tr_endpoints, - self._trainers, self._mode, self._geo_sgd_need_push_nums, - self._reader, gloo_path, self._all_heter_endpoints, - self._heter_device) + python_path, + model, + self._ps_endpoints, + self._tr_endpoints, + self._trainers, + self._mode, + self._geo_sgd_need_push_nums, + self._reader, + gloo_path, + self._all_heter_endpoints, + self._heter_device, + ) # Run dist train to compare with local results ps0, ps1, ps0_pipe, ps1_pipe = self._start_pserver(ps_cmd, env) tr0, tr1, tr0_pipe, tr1_pipe = self._start_trainer(tr_cmd, env) - heter0, heter1, heter2, heter3, heter0_pipe, heter1_pipe, heter2_pipe, heter3_pipe = self._start_heter_trainer( - heter_cmd, env) + ( + heter0, + heter1, + heter2, + heter3, + heter0_pipe, + heter1_pipe, + heter2_pipe, + heter3_pipe, + ) = self._start_heter_trainer(heter_cmd, env) # Wait until trainer process terminate while True: @@ -379,17 +475,15 @@ class TestFleetHeterBase(unittest.TestCase): shutil.rmtree(gloo_path) return 0, 0 - def check_with_place(self, - model_file, - delta=1e-3, - check_error_log=False, - need_envs={}): + def check_with_place( + self, model_file, delta=1e-3, check_error_log=False, need_envs={} + ): required_envs = { "PATH": os.getenv("PATH", ""), "PYTHONPATH": os.getenv("PYTHONPATH", ""), "LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""), "FLAGS_rpc_deadline": "5000", # 5sec to fail fast - "http_proxy": "" + "http_proxy": "", } required_envs.update(need_envs) @@ -403,32 +497,30 @@ class TestFleetHeterBase(unittest.TestCase): def runtime_main(test_class): parser = argparse.ArgumentParser(description='Run Fleet test.') - parser.add_argument('--role', - type=str, - required=True, - choices=['pserver', 'trainer', 'heter_trainer']) + parser.add_argument( + '--role', + type=str, + required=True, + choices=['pserver', 'trainer', 'heter_trainer'], + ) parser.add_argument('--endpoints', type=str, required=False, default="") - parser.add_argument('--trainer_endpoints', - type=str, - required=False, - default="") - parser.add_argument('--heter_trainer_endpoints', - type=str, - required=False, - default="") - parser.add_argument('--heter_trainer_device', - type=str, - required=False, - default="gpu") + parser.add_argument( + '--trainer_endpoints', type=str, required=False, default="" + ) + parser.add_argument( + '--heter_trainer_endpoints', type=str, required=False, default="" + ) + parser.add_argument( + '--heter_trainer_device', type=str, required=False, default="gpu" + ) parser.add_argument('--gloo_path', type=str, required=False, default="") parser.add_argument('--current_id', type=int, required=False, default=0) parser.add_argument('--trainers', type=int, required=False, default=1) parser.add_argument('--stage_id', type=int, required=False, default=1) parser.add_argument('--mode', type=str, required=False, default='async') - parser.add_argument('--geo_sgd_need_push_nums', - type=int, - required=False, - default=2) + parser.add_argument( + '--geo_sgd_need_push_nums', type=int, required=False, default=2 + ) parser.add_argument('--reader', type=str, required=False, default='dataset') args = parser.parse_args() diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_heter_program.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_heter_program.py index bf7ced9ce601378a2d9816224df3a136d52e4488..251a2bf25d9db74becd80d9d3e5809f173ff4dc0 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_heter_program.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_heter_program.py @@ -25,16 +25,18 @@ paddle.enable_static() class TestDistFleetHeterProgram(unittest.TestCase): - def build_role(self): environs = {} environs[ - "PADDLE_PSERVERS_IP_PORT_LIST"] = "127.0.0.1:36012,127.0.0.1:36013" + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:36012,127.0.0.1:36013" environs["PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36014,127.0.0.1:36015" environs[ - "PADDLE_ALL_HETER_TRAINER_IP_PORT_LIST"] = "127.0.0.1:36016,127.0.0.1:36017" + "PADDLE_ALL_HETER_TRAINER_IP_PORT_LIST" + ] = "127.0.0.1:36016,127.0.0.1:36017" environs[ - "PADDLE_PREVIOUS_HETER_TRAINER_IP_PORT_LIST"] = "127.0.0.1:36014,127.0.0.1:36015" + "PADDLE_PREVIOUS_HETER_TRAINER_IP_PORT_LIST" + ] = "127.0.0.1:36014,127.0.0.1:36015" environs["PADDLE_HETER_TRAINER_DEVICE"] = "gpu" environs["TRAINING_ROLE"] = "HETER_TRAINER" environs["STAGE_ID"] = 2 @@ -58,20 +60,20 @@ class TestDistFleetHeterProgram(unittest.TestCase): self.strategy.a_sync = True self.strategy.a_sync_configs = { "launch_barrier": False, - "heter_worker_device_guard": "gpu" + "heter_worker_device_guard": "gpu", } return self.strategy def build_input(self): - dense_input = fluid.layers.data(name="dense_input", - shape=[10], - dtype="float32") + dense_input = fluid.layers.data( + name="dense_input", shape=[10], dtype="float32" + ) sparse_input_ids = [ - fluid.layers.data(name="C" + str(i), - shape=[1], - lod_level=1, - dtype="int64") for i in range(1, 27) + fluid.layers.data( + name="C" + str(i), shape=[1], lod_level=1, dtype="int64" + ) + for i in range(1, 27) ] label = fluid.layers.data(name="label", shape=[1], dtype="float32") @@ -80,7 +82,6 @@ class TestDistFleetHeterProgram(unittest.TestCase): return inputs def build_net(self, inputs): - def embedding_layer(input): return fluid.layers.embedding( input=input, @@ -88,7 +89,8 @@ class TestDistFleetHeterProgram(unittest.TestCase): size=[100001, 10], param_attr=fluid.ParamAttr( name="SparseFeatFactors", - initializer=fluid.initializer.Uniform()), + initializer=fluid.initializer.Uniform(), + ), ) sparse_embed_seq = list(map(embedding_layer, inputs[1:-1])) @@ -100,35 +102,50 @@ class TestDistFleetHeterProgram(unittest.TestCase): input=concated, size=400, act="relu", - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Normal( - scale=1 / math.sqrt(concated.shape[1]))), - name="fc1") + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Normal( + scale=1 / math.sqrt(concated.shape[1]) + ) + ), + name="fc1", + ) with fluid.device_guard("cpu"): fc2 = fluid.layers.fc( input=fc1, size=400, act="relu", - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Normal( - scale=1 / math.sqrt(fc1.shape[1]))), - name="fc2") + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Normal( + scale=1 / math.sqrt(fc1.shape[1]) + ) + ), + name="fc2", + ) with fluid.device_guard("gpu"): fc3 = fluid.layers.fc( input=fc2, size=400, act="relu", - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Normal( - scale=1 / math.sqrt(fc2.shape[1]))), - name="fc3") + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Normal( + scale=1 / math.sqrt(fc2.shape[1]) + ) + ), + name="fc3", + ) with fluid.device_guard("cpu"): predict = fluid.layers.fc( input=fc3, size=2, act="softmax", - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Normal( - scale=1 / math.sqrt(fc3.shape[1]))), + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Normal( + scale=1 / math.sqrt(fc3.shape[1]) + ) + ), ) with fluid.device_guard("gpu"): diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_infer.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_infer.py index 362474e27678f6e75336dbad85768731ad7042c8..c0c7070cf803bf61ff2a1e7899581a91b31b4603 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_infer.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_infer.py @@ -20,16 +20,13 @@ from test_dist_fleet_base import TestFleetBase class TestDistCtrInfer(TestFleetBase): - def _setup_config(self): self._mode = "async" self._reader = "pyreader" - def check_with_place(self, - model_file, - delta=1e-3, - check_error_log=False, - need_envs={}): + def check_with_place( + self, model_file, delta=1e-3, check_error_log=False, need_envs={} + ): required_envs = { "PATH": os.getenv("PATH", ""), "PYTHONPATH": os.getenv("PYTHONPATH", ""), @@ -54,35 +51,34 @@ class TestDistCtrInfer(TestFleetBase): def test_dist_infer(self): model_dirname = tempfile.mkdtemp() - self.check_with_place("dist_fleet_ctr.py", - delta=1e-5, - check_error_log=False, - need_envs={ - "SAVE_DIRNAME": model_dirname, - }) + self.check_with_place( + "dist_fleet_ctr.py", + delta=1e-5, + check_error_log=False, + need_envs={ + "SAVE_DIRNAME": model_dirname, + }, + ) self._need_test = 1 self._model_dir = model_dirname - self.check_with_place("dist_fleet_ctr.py", - delta=1e-5, - check_error_log=False) + self.check_with_place( + "dist_fleet_ctr.py", delta=1e-5, check_error_log=False + ) shutil.rmtree(model_dirname) class TestDistCtrTrainInfer(TestFleetBase): - def _setup_config(self): self._mode = "async" self._reader = "pyreader" self._need_test = 1 - def check_with_place(self, - model_file, - delta=1e-3, - check_error_log=False, - need_envs={}): + def check_with_place( + self, model_file, delta=1e-3, check_error_log=False, need_envs={} + ): required_envs = { "PATH": os.getenv("PATH", ""), @@ -106,9 +102,9 @@ class TestDistCtrTrainInfer(TestFleetBase): tr0_losses, tr1_losses = self._run_cluster(model_file, required_envs) def test_dist_train_infer(self): - self.check_with_place("dist_fleet_ctr.py", - delta=1e-5, - check_error_log=False) + self.check_with_place( + "dist_fleet_ctr.py", delta=1e-5, check_error_log=False + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps.py index 137beef5f2dff33c4c3e99f9debdc7918f70f7fe..8d5ac58d62aaa1d7d38a79bbe2d5d249a3ac5099 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps.py @@ -34,35 +34,34 @@ batch_size = 4 class TestPSPassWithBow(unittest.TestCase): - def net(self): - def get_acc(cos_q_nt, cos_q_pt, batch_size): cond = fluid.layers.less_than(cos_q_nt, cos_q_pt) cond = fluid.layers.cast(cond, dtype='float64') cond_3 = fluid.layers.reduce_sum(cond) - acc = fluid.layers.elementwise_div(cond_3, - fluid.layers.fill_constant( - shape=[1], - value=batch_size * 1.0, - dtype='float64'), - name="simnet_acc") + acc = fluid.layers.elementwise_div( + cond_3, + fluid.layers.fill_constant( + shape=[1], value=batch_size * 1.0, dtype='float64' + ), + name="simnet_acc", + ) return acc def get_loss(cos_q_pt, cos_q_nt): loss_op1 = fluid.layers.elementwise_sub( - fluid.layers.fill_constant_batch_size_like(input=cos_q_pt, - shape=[-1, 1], - value=margin, - dtype='float32'), - cos_q_pt) + fluid.layers.fill_constant_batch_size_like( + input=cos_q_pt, shape=[-1, 1], value=margin, dtype='float32' + ), + cos_q_pt, + ) loss_op2 = fluid.layers.elementwise_add(loss_op1, cos_q_nt) loss_op3 = fluid.layers.elementwise_max( - fluid.layers.fill_constant_batch_size_like(input=loss_op2, - shape=[-1, 1], - value=0.0, - dtype='float32'), - loss_op2) + fluid.layers.fill_constant_batch_size_like( + input=loss_op2, shape=[-1, 1], value=0.0, dtype='float32' + ), + loss_op2, + ) avg_cost = paddle.mean(loss_op3) return avg_cost @@ -70,10 +69,9 @@ class TestPSPassWithBow(unittest.TestCase): is_sparse = True # query - q = fluid.layers.data(name="query_ids", - shape=[1], - dtype="int64", - lod_level=1) + q = fluid.layers.data( + name="query_ids", shape=[1], dtype="int64", lod_level=1 + ) # embedding q_emb = fluid.layers.embedding( input=q, @@ -82,8 +80,10 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__emb__", - learning_rate=emb_lr), - is_sparse=is_sparse) + learning_rate=emb_lr, + ), + is_sparse=is_sparse, + ) q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim]) # vsum q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') @@ -95,14 +95,15 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__q_fc__", - learning_rate=base_lr)) + learning_rate=base_lr, + ), + ) # label data label = fluid.layers.data(name="label", shape=[1], dtype="int64") # pt - pt = fluid.layers.data(name="pos_title_ids", - shape=[1], - dtype="int64", - lod_level=1) + pt = fluid.layers.data( + name="pos_title_ids", shape=[1], dtype="int64", lod_level=1 + ) # embedding pt_emb = fluid.layers.embedding( input=pt, @@ -111,8 +112,10 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__emb__", - learning_rate=emb_lr), - is_sparse=is_sparse) + learning_rate=emb_lr, + ), + is_sparse=is_sparse, + ) pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim]) # vsum pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') @@ -124,13 +127,14 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__fc__", - learning_rate=base_lr), - bias_attr=fluid.ParamAttr(name="__fc_b__")) + learning_rate=base_lr, + ), + bias_attr=fluid.ParamAttr(name="__fc_b__"), + ) # nt - nt = fluid.layers.data(name="neg_title_ids", - shape=[1], - dtype="int64", - lod_level=1) + nt = fluid.layers.data( + name="neg_title_ids", shape=[1], dtype="int64", lod_level=1 + ) # embedding nt_emb = fluid.layers.embedding( input=nt, @@ -139,8 +143,10 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__emb__", - learning_rate=emb_lr), - is_sparse=is_sparse) + learning_rate=emb_lr, + ), + is_sparse=is_sparse, + ) nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim]) # vsum nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') @@ -152,8 +158,10 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__fc__", - learning_rate=base_lr), - bias_attr=fluid.ParamAttr(name="__fc_b__")) + learning_rate=base_lr, + ), + bias_attr=fluid.ParamAttr(name="__fc_b__"), + ) cos_q_pt = fluid.layers.cos_sim(q_fc, pt_fc) cos_q_nt = fluid.layers.cos_sim(q_fc, nt_fc) # loss @@ -164,14 +172,18 @@ class TestPSPassWithBow(unittest.TestCase): def test(self): endpoints = [ - "127.0.0.1:36004", "127.0.0.1:36005", "127.0.0.1:36006", - "127.0.0.1:36007" + "127.0.0.1:36004", + "127.0.0.1:36005", + "127.0.0.1:36006", + "127.0.0.1:36007", ] - role = fleet.UserDefinedRoleMaker(current_id=0, - role=role_maker.Role.SERVER, - worker_num=2, - server_endpoints=endpoints) + role = fleet.UserDefinedRoleMaker( + current_id=0, + role=role_maker.Role.SERVER, + worker_num=2, + server_endpoints=endpoints, + ) fleet.init(role) loss, acc, _ = self.net() diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps10.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps10.py index 332f314106b7e91e58cdc30c7eeb0ac94ce757bd..db553b990343b2c216cca549ef362c650e5b99f4 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps10.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps10.py @@ -35,18 +35,17 @@ batch_size = 4 class TestExponentialDecay(unittest.TestCase): - def net(self): - input_data = paddle.static.data(name="sparse_input", - shape=[None, 1], - dtype="int64") - input_label = paddle.static.data(name="label", - shape=[None, 1], - dtype="int64") + input_data = paddle.static.data( + name="sparse_input", shape=[None, 1], dtype="int64" + ) + input_label = paddle.static.data( + name="label", shape=[None, 1], dtype="int64" + ) label = paddle.cast(input_label, dtype="float32") - embedding = paddle.static.nn.embedding(input_data, - is_sparse=True, - size=[1000, 128]) + embedding = paddle.static.nn.embedding( + input_data, is_sparse=True, size=[1000, 128] + ) fc1 = paddle.static.nn.fc(embedding, size=1024, activation="relu") fc2 = paddle.static.nn.fc(fc1, size=512, activation="relu") @@ -59,20 +58,24 @@ class TestExponentialDecay(unittest.TestCase): def test(self): endpoints = [ - "127.0.0.1:36004", "127.0.0.1:36005", "127.0.0.1:36006", - "127.0.0.1:36007" + "127.0.0.1:36004", + "127.0.0.1:36005", + "127.0.0.1:36006", + "127.0.0.1:36007", ] - role = role_maker.UserDefinedRoleMaker(current_id=0, - role=role_maker.Role.SERVER, - worker_num=2, - server_endpoints=endpoints) + role = role_maker.UserDefinedRoleMaker( + current_id=0, + role=role_maker.Role.SERVER, + worker_num=2, + server_endpoints=endpoints, + ) fleet.init(role) loss = self.net() - scheduler = paddle.optimizer.lr.InverseTimeDecay(learning_rate=base_lr, - gamma=0.999, - verbose=True) + scheduler = paddle.optimizer.lr.InverseTimeDecay( + learning_rate=base_lr, gamma=0.999, verbose=True + ) optimizer = fluid.optimizer.Adam(scheduler) strategy = paddle.distributed.fleet.DistributedStrategy() diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps11.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps11.py index 38fccfa5d886508f281584e2f4b3fc5a8bf61495..b0d8df316a8ab1216e99e85c0983783c8565a0c4 100755 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps11.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps11.py @@ -34,35 +34,34 @@ batch_size = 4 class TestPSPassWithBow(unittest.TestCase): - def net(self): - def get_acc(cos_q_nt, cos_q_pt, batch_size): cond = fluid.layers.less_than(cos_q_nt, cos_q_pt) cond = fluid.layers.cast(cond, dtype='float64') cond_3 = fluid.layers.reduce_sum(cond) - acc = fluid.layers.elementwise_div(cond_3, - fluid.layers.fill_constant( - shape=[1], - value=batch_size * 1.0, - dtype='float64'), - name="simnet_acc") + acc = fluid.layers.elementwise_div( + cond_3, + fluid.layers.fill_constant( + shape=[1], value=batch_size * 1.0, dtype='float64' + ), + name="simnet_acc", + ) return acc def get_loss(cos_q_pt, cos_q_nt): loss_op1 = fluid.layers.elementwise_sub( - fluid.layers.fill_constant_batch_size_like(input=cos_q_pt, - shape=[-1, 1], - value=margin, - dtype='float32'), - cos_q_pt) + fluid.layers.fill_constant_batch_size_like( + input=cos_q_pt, shape=[-1, 1], value=margin, dtype='float32' + ), + cos_q_pt, + ) loss_op2 = fluid.layers.elementwise_add(loss_op1, cos_q_nt) loss_op3 = fluid.layers.elementwise_max( - fluid.layers.fill_constant_batch_size_like(input=loss_op2, - shape=[-1, 1], - value=0.0, - dtype='float32'), - loss_op2) + fluid.layers.fill_constant_batch_size_like( + input=loss_op2, shape=[-1, 1], value=0.0, dtype='float32' + ), + loss_op2, + ) avg_cost = paddle.mean(loss_op3) return avg_cost @@ -78,7 +77,9 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__emb__", - learning_rate=emb_lr)) + learning_rate=emb_lr, + ), + ) q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim]) # vsum q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') @@ -90,7 +91,9 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__q_fc__", - learning_rate=base_lr)) + learning_rate=base_lr, + ), + ) # label data label = fluid.layers.data(name="label", shape=[1], dtype="int64") # pt @@ -102,7 +105,9 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__emb__", - learning_rate=emb_lr)) + learning_rate=emb_lr, + ), + ) pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim]) # vsum pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') @@ -114,8 +119,10 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__fc__", - learning_rate=base_lr), - bias_attr=fluid.ParamAttr(name="__fc_b__")) + learning_rate=base_lr, + ), + bias_attr=fluid.ParamAttr(name="__fc_b__"), + ) # nt nt = fluid.layers.data(name="3", shape=[1], dtype="int64", lod_level=1) # embedding @@ -125,7 +132,9 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__emb__", - learning_rate=emb_lr)) + learning_rate=emb_lr, + ), + ) nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim]) # vsum nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') @@ -137,8 +146,10 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__fc__", - learning_rate=base_lr), - bias_attr=fluid.ParamAttr(name="__fc_b__")) + learning_rate=base_lr, + ), + bias_attr=fluid.ParamAttr(name="__fc_b__"), + ) cos_q_pt = fluid.layers.cos_sim(q_fc, pt_fc) cos_q_nt = fluid.layers.cos_sim(q_fc, nt_fc) # loss @@ -155,9 +166,11 @@ class TestPSPassWithBow(unittest.TestCase): os.environ["PADDLE_TRAINER_ID"] = "0" os.environ["PADDLE_TRAINERS_NUM"] = "2" os.environ[ - "PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001,127.0.0.2:36001" + "PADDLE_TRAINER_ENDPOINTS" + ] = "127.0.0.1:36001,127.0.0.2:36001" os.environ[ - "PADDLE_PSERVERS_IP_PORT_LIST"] = "127.0.0.1:36002,127.0.0.2:36002" + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:36002,127.0.0.2:36002" os.environ["TRAINING_ROLE"] = "TRAINER" os.environ["FLAGS_selected_gpus"] = "0" role = role_maker.PaddleCloudRoleMaker() @@ -191,22 +204,22 @@ class TestPSPassWithBow(unittest.TestCase): slots = ["slot1", "slot2", "slot3", "slot4"] slots_vars = [] for slot in slots: - var = fluid.layers.data(name=slot, - shape=[1], - dtype="int64", - lod_level=1) + var = fluid.layers.data( + name=slot, shape=[1], dtype="int64", lod_level=1 + ) slots_vars.append(var) dataset = paddle.distributed.InMemoryDataset() dataset._set_use_ps_gpu(True) - dataset.init(batch_size=32, - thread_num=3, - pipe_command="cat", - use_var=slots_vars) - dataset.set_filelist([ - "test_in_memory_dataset_run_a.txt", - "test_in_memory_dataset_run_b.txt" - ]) + dataset.init( + batch_size=32, thread_num=3, pipe_command="cat", use_var=slots_vars + ) + dataset.set_filelist( + [ + "test_in_memory_dataset_run_a.txt", + "test_in_memory_dataset_run_b.txt", + ] + ) os.remove("./test_in_memory_dataset_run_a.txt") os.remove("./test_in_memory_dataset_run_b.txt") diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps12.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps12.py index 814e49247dcaf9be91b035cd4570378b0023fceb..b4c10116a55c3f3d05aea497296ef97fb81e33ac 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps12.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps12.py @@ -37,35 +37,34 @@ batch_size = 4 class TestPSPassWithBow(unittest.TestCase): - def net(self): - def get_acc(cos_q_nt, cos_q_pt, batch_size): cond = fluid.layers.less_than(cos_q_nt, cos_q_pt) cond = fluid.layers.cast(cond, dtype='float64') cond_3 = fluid.layers.reduce_sum(cond) - acc = fluid.layers.elementwise_div(cond_3, - fluid.layers.fill_constant( - shape=[1], - value=batch_size * 1.0, - dtype='float64'), - name="simnet_acc") + acc = fluid.layers.elementwise_div( + cond_3, + fluid.layers.fill_constant( + shape=[1], value=batch_size * 1.0, dtype='float64' + ), + name="simnet_acc", + ) return acc def get_loss(cos_q_pt, cos_q_nt): loss_op1 = fluid.layers.elementwise_sub( - fluid.layers.fill_constant_batch_size_like(input=cos_q_pt, - shape=[-1, 1], - value=margin, - dtype='float32'), - cos_q_pt) + fluid.layers.fill_constant_batch_size_like( + input=cos_q_pt, shape=[-1, 1], value=margin, dtype='float32' + ), + cos_q_pt, + ) loss_op2 = fluid.layers.elementwise_add(loss_op1, cos_q_nt) loss_op3 = fluid.layers.elementwise_max( - fluid.layers.fill_constant_batch_size_like(input=loss_op2, - shape=[-1, 1], - value=0.0, - dtype='float32'), - loss_op2) + fluid.layers.fill_constant_batch_size_like( + input=loss_op2, shape=[-1, 1], value=0.0, dtype='float32' + ), + loss_op2, + ) avg_cost = paddle.mean(loss_op3) return avg_cost @@ -81,7 +80,9 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__emb__", - learning_rate=emb_lr)) + learning_rate=emb_lr, + ), + ) q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim]) # vsum q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') @@ -93,7 +94,9 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__q_fc__", - learning_rate=base_lr)) + learning_rate=base_lr, + ), + ) # label data label = fluid.layers.data(name="label", shape=[1], dtype="int64") # pt @@ -105,7 +108,9 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__emb__", - learning_rate=emb_lr)) + learning_rate=emb_lr, + ), + ) pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim]) # vsum pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') @@ -117,8 +122,10 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__fc__", - learning_rate=base_lr), - bias_attr=fluid.ParamAttr(name="__fc_b__")) + learning_rate=base_lr, + ), + bias_attr=fluid.ParamAttr(name="__fc_b__"), + ) # nt nt = fluid.layers.data(name="3", shape=[1], dtype="int64", lod_level=1) # embedding @@ -128,7 +135,9 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__emb__", - learning_rate=emb_lr)) + learning_rate=emb_lr, + ), + ) nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim]) # vsum nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') @@ -140,8 +149,10 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__fc__", - learning_rate=base_lr), - bias_attr=fluid.ParamAttr(name="__fc_b__")) + learning_rate=base_lr, + ), + bias_attr=fluid.ParamAttr(name="__fc_b__"), + ) cos_q_pt = fluid.layers.cos_sim(q_fc, pt_fc) cos_q_nt = fluid.layers.cos_sim(q_fc, nt_fc) # loss @@ -158,7 +169,8 @@ class TestPSPassWithBow(unittest.TestCase): os.environ["PADDLE_TRAINER_ID"] = "0" os.environ["PADDLE_TRAINERS_NUM"] = "2" os.environ[ - "PADDLE_PSERVERS_IP_PORT_LIST"] = "127.0.0.1:36001,127.0.0.2:36001" + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:36001,127.0.0.2:36001" os.environ["TRAINING_ROLE"] = "PSERVER" role = role_maker.PaddleCloudRoleMaker() diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps13.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps13.py index 99dd076e82913766b569f04529faf25ddc426a37..47cbaefd68d7b98e4f071d2a93ea5b56226207c7 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps13.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps13.py @@ -38,35 +38,34 @@ batch_size = 4 # this unittest is tested for SparseSharedAdamSGDRule class TestPSPassWithBow(unittest.TestCase): - def net(self): - def get_acc(cos_q_nt, cos_q_pt, batch_size): cond = fluid.layers.less_than(cos_q_nt, cos_q_pt) cond = fluid.layers.cast(cond, dtype='float64') cond_3 = fluid.layers.reduce_sum(cond) - acc = fluid.layers.elementwise_div(cond_3, - fluid.layers.fill_constant( - shape=[1], - value=batch_size * 1.0, - dtype='float64'), - name="simnet_acc") + acc = fluid.layers.elementwise_div( + cond_3, + fluid.layers.fill_constant( + shape=[1], value=batch_size * 1.0, dtype='float64' + ), + name="simnet_acc", + ) return acc def get_loss(cos_q_pt, cos_q_nt): loss_op1 = fluid.layers.elementwise_sub( - fluid.layers.fill_constant_batch_size_like(input=cos_q_pt, - shape=[-1, 1], - value=margin, - dtype='float32'), - cos_q_pt) + fluid.layers.fill_constant_batch_size_like( + input=cos_q_pt, shape=[-1, 1], value=margin, dtype='float32' + ), + cos_q_pt, + ) loss_op2 = fluid.layers.elementwise_add(loss_op1, cos_q_nt) loss_op3 = fluid.layers.elementwise_max( - fluid.layers.fill_constant_batch_size_like(input=loss_op2, - shape=[-1, 1], - value=0.0, - dtype='float32'), - loss_op2) + fluid.layers.fill_constant_batch_size_like( + input=loss_op2, shape=[-1, 1], value=0.0, dtype='float32' + ), + loss_op2, + ) avg_cost = fluid.layers.mean(loss_op3) return avg_cost @@ -74,10 +73,9 @@ class TestPSPassWithBow(unittest.TestCase): is_sparse = True # query - q = fluid.layers.data(name="query_ids", - shape=[1], - dtype="int64", - lod_level=1) + q = fluid.layers.data( + name="query_ids", shape=[1], dtype="int64", lod_level=1 + ) # embedding q_emb = fluid.contrib.layers.sparse_embedding( input=q, @@ -85,7 +83,9 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__emb__", - learning_rate=emb_lr)) + learning_rate=emb_lr, + ), + ) q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim]) # vsum q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') @@ -97,14 +97,15 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__q_fc__", - learning_rate=base_lr)) + learning_rate=base_lr, + ), + ) # label data label = fluid.layers.data(name="label", shape=[1], dtype="int64") # pt - pt = fluid.layers.data(name="pos_title_ids", - shape=[1], - dtype="int64", - lod_level=1) + pt = fluid.layers.data( + name="pos_title_ids", shape=[1], dtype="int64", lod_level=1 + ) # embedding pt_emb = fluid.contrib.layers.sparse_embedding( input=pt, @@ -112,7 +113,9 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__emb__", - learning_rate=emb_lr)) + learning_rate=emb_lr, + ), + ) pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim]) # vsum pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') @@ -124,13 +127,14 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__fc__", - learning_rate=base_lr), - bias_attr=fluid.ParamAttr(name="__fc_b__")) + learning_rate=base_lr, + ), + bias_attr=fluid.ParamAttr(name="__fc_b__"), + ) # nt - nt = fluid.layers.data(name="neg_title_ids", - shape=[1], - dtype="int64", - lod_level=1) + nt = fluid.layers.data( + name="neg_title_ids", shape=[1], dtype="int64", lod_level=1 + ) # embedding nt_emb = fluid.contrib.layers.sparse_embedding( input=nt, @@ -138,7 +142,9 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__emb__", - learning_rate=emb_lr)) + learning_rate=emb_lr, + ), + ) nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim]) # vsum nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') @@ -150,8 +156,10 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__fc__", - learning_rate=base_lr), - bias_attr=fluid.ParamAttr(name="__fc_b__")) + learning_rate=base_lr, + ), + bias_attr=fluid.ParamAttr(name="__fc_b__"), + ) cos_q_pt = fluid.layers.cos_sim(q_fc, pt_fc) cos_q_nt = fluid.layers.cos_sim(q_fc, nt_fc) # loss @@ -168,7 +176,8 @@ class TestPSPassWithBow(unittest.TestCase): os.environ["PADDLE_TRAINER_ID"] = "0" os.environ["PADDLE_TRAINERS_NUM"] = "2" os.environ[ - "PADDLE_PSERVERS_IP_PORT_LIST"] = "127.0.0.1:36001,127.0.0.2:36001" + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:36001,127.0.0.2:36001" os.environ["TRAINING_ROLE"] = "PSERVER" role = role_maker.PaddleCloudRoleMaker() @@ -180,10 +189,8 @@ class TestPSPassWithBow(unittest.TestCase): configs = {} configs['__emb__'] = { - "table_parameters.__emb__.accessor.embed_sgd_param.name": - "SparseSharedAdamSGDRule", - "table_parameters.__emb__.accessor.embedx_sgd_param.name": - "SparseSharedAdamSGDRule", + "table_parameters.__emb__.accessor.embed_sgd_param.name": "SparseSharedAdamSGDRule", + "table_parameters.__emb__.accessor.embedx_sgd_param.name": "SparseSharedAdamSGDRule", } strategy.sparse_table_configs = configs optimizer = paddle.fluid.optimizer.SGD(learning_rate=0.01) diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps2.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps2.py index 0ab59f7ca88d5d7a70411b5fe17d3d7e54920b87..a4cdcb32bd4120f0f82b7c394838db711e7dbd7c 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps2.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps2.py @@ -37,35 +37,34 @@ batch_size = 4 class TestPSPassWithBow(unittest.TestCase): - def net(self): - def get_acc(cos_q_nt, cos_q_pt, batch_size): cond = fluid.layers.less_than(cos_q_nt, cos_q_pt) cond = fluid.layers.cast(cond, dtype='float64') cond_3 = fluid.layers.reduce_sum(cond) - acc = fluid.layers.elementwise_div(cond_3, - fluid.layers.fill_constant( - shape=[1], - value=batch_size * 1.0, - dtype='float64'), - name="simnet_acc") + acc = fluid.layers.elementwise_div( + cond_3, + fluid.layers.fill_constant( + shape=[1], value=batch_size * 1.0, dtype='float64' + ), + name="simnet_acc", + ) return acc def get_loss(cos_q_pt, cos_q_nt): loss_op1 = fluid.layers.elementwise_sub( - fluid.layers.fill_constant_batch_size_like(input=cos_q_pt, - shape=[-1, 1], - value=margin, - dtype='float32'), - cos_q_pt) + fluid.layers.fill_constant_batch_size_like( + input=cos_q_pt, shape=[-1, 1], value=margin, dtype='float32' + ), + cos_q_pt, + ) loss_op2 = fluid.layers.elementwise_add(loss_op1, cos_q_nt) loss_op3 = fluid.layers.elementwise_max( - fluid.layers.fill_constant_batch_size_like(input=loss_op2, - shape=[-1, 1], - value=0.0, - dtype='float32'), - loss_op2) + fluid.layers.fill_constant_batch_size_like( + input=loss_op2, shape=[-1, 1], value=0.0, dtype='float32' + ), + loss_op2, + ) avg_cost = paddle.mean(loss_op3) return avg_cost @@ -73,10 +72,9 @@ class TestPSPassWithBow(unittest.TestCase): is_sparse = True # query - q = fluid.layers.data(name="query_ids", - shape=[1], - dtype="int64", - lod_level=1) + q = fluid.layers.data( + name="query_ids", shape=[1], dtype="int64", lod_level=1 + ) # embedding q_emb = fluid.contrib.layers.sparse_embedding( input=q, @@ -84,7 +82,9 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__emb__", - learning_rate=emb_lr)) + learning_rate=emb_lr, + ), + ) q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim]) # vsum q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') @@ -97,14 +97,15 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__q_fc__", - learning_rate=base_lr)) + learning_rate=base_lr, + ), + ) # label data label = fluid.layers.data(name="label", shape=[1], dtype="int64") # pt - pt = fluid.layers.data(name="pos_title_ids", - shape=[1], - dtype="int64", - lod_level=1) + pt = fluid.layers.data( + name="pos_title_ids", shape=[1], dtype="int64", lod_level=1 + ) # embedding pt_emb = fluid.contrib.layers.sparse_embedding( input=pt, @@ -112,7 +113,9 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__emb__", - learning_rate=emb_lr)) + learning_rate=emb_lr, + ), + ) pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim]) # vsum pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') @@ -124,13 +127,14 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__fc__", - learning_rate=base_lr), - bias_attr=fluid.ParamAttr(name="__fc_b__")) + learning_rate=base_lr, + ), + bias_attr=fluid.ParamAttr(name="__fc_b__"), + ) # nt - nt = fluid.layers.data(name="neg_title_ids", - shape=[1], - dtype="int64", - lod_level=1) + nt = fluid.layers.data( + name="neg_title_ids", shape=[1], dtype="int64", lod_level=1 + ) # embedding nt_emb = fluid.contrib.layers.sparse_embedding( input=nt, @@ -138,7 +142,9 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__emb__", - learning_rate=emb_lr)) + learning_rate=emb_lr, + ), + ) nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim]) # vsum nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') @@ -150,8 +156,10 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__fc__", - learning_rate=base_lr), - bias_attr=fluid.ParamAttr(name="__fc_b__")) + learning_rate=base_lr, + ), + bias_attr=fluid.ParamAttr(name="__fc_b__"), + ) cos_q_pt = fluid.layers.cos_sim(q_fc, pt_fc) cos_q_nt = fluid.layers.cos_sim(q_fc, nt_fc) # loss @@ -168,7 +176,8 @@ class TestPSPassWithBow(unittest.TestCase): os.environ["PADDLE_TRAINER_ID"] = "0" os.environ["PADDLE_TRAINERS_NUM"] = "2" os.environ[ - "PADDLE_PSERVERS_IP_PORT_LIST"] = "127.0.0.1:36001,127.0.0.2:36001" + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:36001,127.0.0.2:36001" os.environ["TRAINING_ROLE"] = "PSERVER" role = role_maker.PaddleCloudRoleMaker() @@ -180,14 +189,10 @@ class TestPSPassWithBow(unittest.TestCase): configs = {} configs['__emb__'] = { - "table_parameters.__emb__.enable_sparse_table_cache": - True, - "table_parameters.__emb__.shard_merge_rate": - 1, - "table_parameters.__emb__.accessor.embed_sgd_param.name": - "SparseNaiveSGDRule", - "table_parameters.__emb__.accessor.embedx_sgd_param.name": - "SparseAdamSGDRule", + "table_parameters.__emb__.enable_sparse_table_cache": True, + "table_parameters.__emb__.shard_merge_rate": 1, + "table_parameters.__emb__.accessor.embed_sgd_param.name": "SparseNaiveSGDRule", + "table_parameters.__emb__.accessor.embedx_sgd_param.name": "SparseAdamSGDRule", } strategy.sparse_table_configs = configs optimizer = paddle.fluid.optimizer.SGD(learning_rate=0.01) diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps3.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps3.py index 8a7556a26855932f1e852f72a0ebbe9ab4b1f6a8..2eb62770184415dc40d2f709a2928269298f5c57 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps3.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps3.py @@ -34,35 +34,34 @@ batch_size = 4 class TestPSPassWithBow(unittest.TestCase): - def net(self): - def get_acc(cos_q_nt, cos_q_pt, batch_size): cond = fluid.layers.less_than(cos_q_nt, cos_q_pt) cond = fluid.layers.cast(cond, dtype='float64') cond_3 = fluid.layers.reduce_sum(cond) - acc = fluid.layers.elementwise_div(cond_3, - fluid.layers.fill_constant( - shape=[1], - value=batch_size * 1.0, - dtype='float64'), - name="simnet_acc") + acc = fluid.layers.elementwise_div( + cond_3, + fluid.layers.fill_constant( + shape=[1], value=batch_size * 1.0, dtype='float64' + ), + name="simnet_acc", + ) return acc def get_loss(cos_q_pt, cos_q_nt): loss_op1 = fluid.layers.elementwise_sub( - fluid.layers.fill_constant_batch_size_like(input=cos_q_pt, - shape=[-1, 1], - value=margin, - dtype='float32'), - cos_q_pt) + fluid.layers.fill_constant_batch_size_like( + input=cos_q_pt, shape=[-1, 1], value=margin, dtype='float32' + ), + cos_q_pt, + ) loss_op2 = fluid.layers.elementwise_add(loss_op1, cos_q_nt) loss_op3 = fluid.layers.elementwise_max( - fluid.layers.fill_constant_batch_size_like(input=loss_op2, - shape=[-1, 1], - value=0.0, - dtype='float32'), - loss_op2) + fluid.layers.fill_constant_batch_size_like( + input=loss_op2, shape=[-1, 1], value=0.0, dtype='float32' + ), + loss_op2, + ) avg_cost = paddle.mean(loss_op3) return avg_cost @@ -70,10 +69,9 @@ class TestPSPassWithBow(unittest.TestCase): is_sparse = False # query - q = fluid.layers.data(name="query_ids", - shape=[1], - dtype="int64", - lod_level=1) + q = fluid.layers.data( + name="query_ids", shape=[1], dtype="int64", lod_level=1 + ) # embedding q_emb = fluid.layers.embedding( input=q, @@ -82,8 +80,10 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__emb__", - learning_rate=emb_lr), - is_sparse=is_sparse) + learning_rate=emb_lr, + ), + is_sparse=is_sparse, + ) q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim]) # vsum q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') @@ -95,14 +95,15 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__q_fc__", - learning_rate=base_lr)) + learning_rate=base_lr, + ), + ) # label data label = fluid.layers.data(name="label", shape=[1], dtype="int64") # pt - pt = fluid.layers.data(name="pos_title_ids", - shape=[1], - dtype="int64", - lod_level=1) + pt = fluid.layers.data( + name="pos_title_ids", shape=[1], dtype="int64", lod_level=1 + ) # embedding pt_emb = fluid.layers.embedding( input=pt, @@ -111,8 +112,10 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__emb__", - learning_rate=emb_lr), - is_sparse=is_sparse) + learning_rate=emb_lr, + ), + is_sparse=is_sparse, + ) pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim]) # vsum pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') @@ -124,13 +127,14 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__fc__", - learning_rate=base_lr), - bias_attr=fluid.ParamAttr(name="__fc_b__")) + learning_rate=base_lr, + ), + bias_attr=fluid.ParamAttr(name="__fc_b__"), + ) # nt - nt = fluid.layers.data(name="neg_title_ids", - shape=[1], - dtype="int64", - lod_level=1) + nt = fluid.layers.data( + name="neg_title_ids", shape=[1], dtype="int64", lod_level=1 + ) # embedding nt_emb = fluid.layers.embedding( input=nt, @@ -139,8 +143,10 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__emb__", - learning_rate=emb_lr), - is_sparse=is_sparse) + learning_rate=emb_lr, + ), + is_sparse=is_sparse, + ) nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim]) # vsum nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') @@ -152,8 +158,10 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__fc__", - learning_rate=base_lr), - bias_attr=fluid.ParamAttr(name="__fc_b__")) + learning_rate=base_lr, + ), + bias_attr=fluid.ParamAttr(name="__fc_b__"), + ) cos_q_pt = fluid.layers.cos_sim(q_fc, pt_fc) cos_q_nt = fluid.layers.cos_sim(q_fc, nt_fc) # loss @@ -164,14 +172,18 @@ class TestPSPassWithBow(unittest.TestCase): def test(self): endpoints = [ - "127.0.0.1:36004", "127.0.0.1:36005", "127.0.0.1:36006", - "127.0.0.1:36007" + "127.0.0.1:36004", + "127.0.0.1:36005", + "127.0.0.1:36006", + "127.0.0.1:36007", ] - role = fleet.UserDefinedRoleMaker(current_id=0, - role=role_maker.Role.WORKER, - worker_num=2, - server_endpoints=endpoints) + role = fleet.UserDefinedRoleMaker( + current_id=0, + role=role_maker.Role.WORKER, + worker_num=2, + server_endpoints=endpoints, + ) fleet.init(role) loss, acc, _ = self.net() diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps4.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps4.py index fae0cd926187ae48649d3c57da358d7f1cb12f9a..0cb4ee6e3af2f6ea19976a5d4602b453da30a282 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps4.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps4.py @@ -34,35 +34,34 @@ batch_size = 4 class TestPSPassWithBow(unittest.TestCase): - def net(self): - def get_acc(cos_q_nt, cos_q_pt, batch_size): cond = fluid.layers.less_than(cos_q_nt, cos_q_pt) cond = fluid.layers.cast(cond, dtype='float64') cond_3 = fluid.layers.reduce_sum(cond) - acc = fluid.layers.elementwise_div(cond_3, - fluid.layers.fill_constant( - shape=[1], - value=batch_size * 1.0, - dtype='float64'), - name="simnet_acc") + acc = fluid.layers.elementwise_div( + cond_3, + fluid.layers.fill_constant( + shape=[1], value=batch_size * 1.0, dtype='float64' + ), + name="simnet_acc", + ) return acc def get_loss(cos_q_pt, cos_q_nt): loss_op1 = fluid.layers.elementwise_sub( - fluid.layers.fill_constant_batch_size_like(input=cos_q_pt, - shape=[-1, 1], - value=margin, - dtype='float32'), - cos_q_pt) + fluid.layers.fill_constant_batch_size_like( + input=cos_q_pt, shape=[-1, 1], value=margin, dtype='float32' + ), + cos_q_pt, + ) loss_op2 = fluid.layers.elementwise_add(loss_op1, cos_q_nt) loss_op3 = fluid.layers.elementwise_max( - fluid.layers.fill_constant_batch_size_like(input=loss_op2, - shape=[-1, 1], - value=0.0, - dtype='float32'), - loss_op2) + fluid.layers.fill_constant_batch_size_like( + input=loss_op2, shape=[-1, 1], value=0.0, dtype='float32' + ), + loss_op2, + ) avg_cost = paddle.mean(loss_op3) return avg_cost @@ -70,10 +69,9 @@ class TestPSPassWithBow(unittest.TestCase): is_sparse = True # query - q = fluid.layers.data(name="query_ids", - shape=[1], - dtype="int64", - lod_level=1) + q = fluid.layers.data( + name="query_ids", shape=[1], dtype="int64", lod_level=1 + ) # embedding q_emb = fluid.contrib.layers.sparse_embedding( input=q, @@ -81,7 +79,9 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__emb__", - learning_rate=emb_lr)) + learning_rate=emb_lr, + ), + ) q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim]) # vsum q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') @@ -93,14 +93,15 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__q_fc__", - learning_rate=base_lr)) + learning_rate=base_lr, + ), + ) # label data label = fluid.layers.data(name="label", shape=[1], dtype="int64") # pt - pt = fluid.layers.data(name="pos_title_ids", - shape=[1], - dtype="int64", - lod_level=1) + pt = fluid.layers.data( + name="pos_title_ids", shape=[1], dtype="int64", lod_level=1 + ) # embedding pt_emb = fluid.contrib.layers.sparse_embedding( input=pt, @@ -108,7 +109,9 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__emb__", - learning_rate=emb_lr)) + learning_rate=emb_lr, + ), + ) pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim]) # vsum pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') @@ -120,13 +123,14 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__fc__", - learning_rate=base_lr), - bias_attr=fluid.ParamAttr(name="__fc_b__")) + learning_rate=base_lr, + ), + bias_attr=fluid.ParamAttr(name="__fc_b__"), + ) # nt - nt = fluid.layers.data(name="neg_title_ids", - shape=[1], - dtype="int64", - lod_level=1) + nt = fluid.layers.data( + name="neg_title_ids", shape=[1], dtype="int64", lod_level=1 + ) # embedding nt_emb = fluid.contrib.layers.sparse_embedding( input=nt, @@ -134,7 +138,9 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__emb__", - learning_rate=emb_lr)) + learning_rate=emb_lr, + ), + ) nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim]) # vsum nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') @@ -146,8 +152,10 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__fc__", - learning_rate=base_lr), - bias_attr=fluid.ParamAttr(name="__fc_b__")) + learning_rate=base_lr, + ), + bias_attr=fluid.ParamAttr(name="__fc_b__"), + ) cos_q_pt = fluid.layers.cos_sim(q_fc, pt_fc) cos_q_nt = fluid.layers.cos_sim(q_fc, nt_fc) # loss @@ -158,14 +166,18 @@ class TestPSPassWithBow(unittest.TestCase): def test(self): endpoints = [ - "127.0.0.1:36004", "127.0.0.1:36005", "127.0.0.1:36006", - "127.0.0.1:36007" + "127.0.0.1:36004", + "127.0.0.1:36005", + "127.0.0.1:36006", + "127.0.0.1:36007", ] - role = role_maker.UserDefinedRoleMaker(current_id=0, - role=role_maker.Role.SERVER, - worker_num=2, - server_endpoints=endpoints) + role = role_maker.UserDefinedRoleMaker( + current_id=0, + role=role_maker.Role.SERVER, + worker_num=2, + server_endpoints=endpoints, + ) fleet.init(role) loss, acc, _ = self.net() diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps5.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps5.py index c91d86d89ca6b1f2dab85aefd5e955960855cf11..fbb640fc8bbc84d8845fcab8ac0a4b1030a2787d 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps5.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps5.py @@ -34,35 +34,34 @@ batch_size = 4 class TestPSPassWithBow(unittest.TestCase): - def net(self): - def get_acc(cos_q_nt, cos_q_pt, batch_size): cond = fluid.layers.less_than(cos_q_nt, cos_q_pt) cond = fluid.layers.cast(cond, dtype='float64') cond_3 = fluid.layers.reduce_sum(cond) - acc = fluid.layers.elementwise_div(cond_3, - fluid.layers.fill_constant( - shape=[1], - value=batch_size * 1.0, - dtype='float64'), - name="simnet_acc") + acc = fluid.layers.elementwise_div( + cond_3, + fluid.layers.fill_constant( + shape=[1], value=batch_size * 1.0, dtype='float64' + ), + name="simnet_acc", + ) return acc def get_loss(cos_q_pt, cos_q_nt): loss_op1 = fluid.layers.elementwise_sub( - fluid.layers.fill_constant_batch_size_like(input=cos_q_pt, - shape=[-1, 1], - value=margin, - dtype='float32'), - cos_q_pt) + fluid.layers.fill_constant_batch_size_like( + input=cos_q_pt, shape=[-1, 1], value=margin, dtype='float32' + ), + cos_q_pt, + ) loss_op2 = fluid.layers.elementwise_add(loss_op1, cos_q_nt) loss_op3 = fluid.layers.elementwise_max( - fluid.layers.fill_constant_batch_size_like(input=loss_op2, - shape=[-1, 1], - value=0.0, - dtype='float32'), - loss_op2) + fluid.layers.fill_constant_batch_size_like( + input=loss_op2, shape=[-1, 1], value=0.0, dtype='float32' + ), + loss_op2, + ) avg_cost = paddle.mean(loss_op3) return avg_cost @@ -70,10 +69,9 @@ class TestPSPassWithBow(unittest.TestCase): is_sparse = True # query - q = fluid.layers.data(name="query_ids", - shape=[1], - dtype="int64", - lod_level=1) + q = fluid.layers.data( + name="query_ids", shape=[1], dtype="int64", lod_level=1 + ) # embedding q_emb = fluid.layers.embedding( input=q, @@ -82,8 +80,10 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__emb__", - learning_rate=emb_lr), - is_sparse=is_sparse) + learning_rate=emb_lr, + ), + is_sparse=is_sparse, + ) q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim]) # vsum q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') @@ -95,14 +95,15 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__q_fc__", - learning_rate=base_lr)) + learning_rate=base_lr, + ), + ) # label data label = fluid.layers.data(name="label", shape=[1], dtype="int64") # pt - pt = fluid.layers.data(name="pos_title_ids", - shape=[1], - dtype="int64", - lod_level=1) + pt = fluid.layers.data( + name="pos_title_ids", shape=[1], dtype="int64", lod_level=1 + ) # embedding pt_emb = fluid.layers.embedding( input=pt, @@ -111,8 +112,10 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__emb__", - learning_rate=emb_lr), - is_sparse=is_sparse) + learning_rate=emb_lr, + ), + is_sparse=is_sparse, + ) pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim]) # vsum pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') @@ -124,13 +127,14 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__fc__", - learning_rate=base_lr), - bias_attr=fluid.ParamAttr(name="__fc_b__")) + learning_rate=base_lr, + ), + bias_attr=fluid.ParamAttr(name="__fc_b__"), + ) # nt - nt = fluid.layers.data(name="neg_title_ids", - shape=[1], - dtype="int64", - lod_level=1) + nt = fluid.layers.data( + name="neg_title_ids", shape=[1], dtype="int64", lod_level=1 + ) # embedding nt_emb = fluid.layers.embedding( input=nt, @@ -139,8 +143,10 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__emb__tmp_", - learning_rate=emb_lr), - is_sparse=False) + learning_rate=emb_lr, + ), + is_sparse=False, + ) nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim]) # vsum nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') @@ -152,8 +158,10 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__fc__", - learning_rate=base_lr), - bias_attr=fluid.ParamAttr(name="__fc_b__")) + learning_rate=base_lr, + ), + bias_attr=fluid.ParamAttr(name="__fc_b__"), + ) cos_q_pt = fluid.layers.cos_sim(q_fc, pt_fc) cos_q_nt = fluid.layers.cos_sim(q_fc, nt_fc) # loss @@ -164,23 +172,30 @@ class TestPSPassWithBow(unittest.TestCase): def test(self): endpoints = [ - "127.0.0.1:36004", "127.0.0.1:36005", "127.0.0.1:36006", - "127.0.0.1:36007" + "127.0.0.1:36004", + "127.0.0.1:36005", + "127.0.0.1:36006", + "127.0.0.1:36007", ] - role = role_maker.UserDefinedRoleMaker(current_id=0, - role=role_maker.Role.SERVER, - worker_num=2, - server_endpoints=endpoints) + role = role_maker.UserDefinedRoleMaker( + current_id=0, + role=role_maker.Role.SERVER, + worker_num=2, + server_endpoints=endpoints, + ) fleet.init(role) loss, acc, _ = self.net() optimizer = fluid.optimizer.Adam( - learning_rate=fluid.layers.exponential_decay(learning_rate=base_lr, - decay_steps=500, - decay_rate=0.969, - staircase=True)) + learning_rate=fluid.layers.exponential_decay( + learning_rate=base_lr, + decay_steps=500, + decay_rate=0.969, + staircase=True, + ) + ) strategy = paddle.distributed.fleet.DistributedStrategy() strategy.a_sync = True diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps6.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps6.py index 21dfcee7e8ed5c2e5ee83cd10cb168a8ff0e4b87..e0b73b4344c68431b1a8466670df680ba6d956a2 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps6.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps6.py @@ -34,35 +34,34 @@ batch_size = 4 class TestPSPassWithBow(unittest.TestCase): - def net(self): - def get_acc(cos_q_nt, cos_q_pt, batch_size): cond = fluid.layers.less_than(cos_q_nt, cos_q_pt) cond = fluid.layers.cast(cond, dtype='float64') cond_3 = fluid.layers.reduce_sum(cond) - acc = fluid.layers.elementwise_div(cond_3, - fluid.layers.fill_constant( - shape=[1], - value=batch_size * 1.0, - dtype='float64'), - name="simnet_acc") + acc = fluid.layers.elementwise_div( + cond_3, + fluid.layers.fill_constant( + shape=[1], value=batch_size * 1.0, dtype='float64' + ), + name="simnet_acc", + ) return acc def get_loss(cos_q_pt, cos_q_nt): loss_op1 = fluid.layers.elementwise_sub( - fluid.layers.fill_constant_batch_size_like(input=cos_q_pt, - shape=[-1, 1], - value=margin, - dtype='float32'), - cos_q_pt) + fluid.layers.fill_constant_batch_size_like( + input=cos_q_pt, shape=[-1, 1], value=margin, dtype='float32' + ), + cos_q_pt, + ) loss_op2 = fluid.layers.elementwise_add(loss_op1, cos_q_nt) loss_op3 = fluid.layers.elementwise_max( - fluid.layers.fill_constant_batch_size_like(input=loss_op2, - shape=[-1, 1], - value=0.0, - dtype='float32'), - loss_op2) + fluid.layers.fill_constant_batch_size_like( + input=loss_op2, shape=[-1, 1], value=0.0, dtype='float32' + ), + loss_op2, + ) avg_cost = paddle.mean(loss_op3) return avg_cost @@ -70,10 +69,9 @@ class TestPSPassWithBow(unittest.TestCase): is_sparse = True # query - q = fluid.layers.data(name="query_ids", - shape=[1], - dtype="int64", - lod_level=1) + q = fluid.layers.data( + name="query_ids", shape=[1], dtype="int64", lod_level=1 + ) # embedding q_emb = fluid.contrib.layers.sparse_embedding( input=q, @@ -81,7 +79,9 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__emb__", - learning_rate=emb_lr)) + learning_rate=emb_lr, + ), + ) q_emb = fluid.layers.reshape(q_emb, [-1, emb_dim]) # vsum q_sum = fluid.layers.sequence_pool(input=q_emb, pool_type='sum') @@ -93,14 +93,15 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__q_fc__", - learning_rate=base_lr)) + learning_rate=base_lr, + ), + ) # label data label = fluid.layers.data(name="label", shape=[1], dtype="int64") # pt - pt = fluid.layers.data(name="pos_title_ids", - shape=[1], - dtype="int64", - lod_level=1) + pt = fluid.layers.data( + name="pos_title_ids", shape=[1], dtype="int64", lod_level=1 + ) # embedding pt_emb = fluid.contrib.layers.sparse_embedding( input=pt, @@ -108,7 +109,9 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__emb__", - learning_rate=emb_lr)) + learning_rate=emb_lr, + ), + ) pt_emb = fluid.layers.reshape(pt_emb, [-1, emb_dim]) # vsum pt_sum = fluid.layers.sequence_pool(input=pt_emb, pool_type='sum') @@ -120,13 +123,14 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__fc__", - learning_rate=base_lr), - bias_attr=fluid.ParamAttr(name="__fc_b__")) + learning_rate=base_lr, + ), + bias_attr=fluid.ParamAttr(name="__fc_b__"), + ) # nt - nt = fluid.layers.data(name="neg_title_ids", - shape=[1], - dtype="int64", - lod_level=1) + nt = fluid.layers.data( + name="neg_title_ids", shape=[1], dtype="int64", lod_level=1 + ) # embedding nt_emb = fluid.contrib.layers.sparse_embedding( input=nt, @@ -134,7 +138,9 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__emb__", - learning_rate=emb_lr)) + learning_rate=emb_lr, + ), + ) nt_emb = fluid.layers.reshape(nt_emb, [-1, emb_dim]) # vsum nt_sum = fluid.layers.sequence_pool(input=nt_emb, pool_type='sum') @@ -146,8 +152,10 @@ class TestPSPassWithBow(unittest.TestCase): param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.01), name="__fc__", - learning_rate=base_lr), - bias_attr=fluid.ParamAttr(name="__fc_b__")) + learning_rate=base_lr, + ), + bias_attr=fluid.ParamAttr(name="__fc_b__"), + ) cos_q_pt = fluid.layers.cos_sim(q_fc, pt_fc) cos_q_nt = fluid.layers.cos_sim(q_fc, nt_fc) # loss @@ -158,14 +166,18 @@ class TestPSPassWithBow(unittest.TestCase): def test(self): endpoints = [ - "127.0.0.1:36004", "127.0.0.1:36005", "127.0.0.1:36006", - "127.0.0.1:36007" + "127.0.0.1:36004", + "127.0.0.1:36005", + "127.0.0.1:36006", + "127.0.0.1:36007", ] - role = role_maker.UserDefinedRoleMaker(current_id=0, - role=role_maker.Role.SERVER, - worker_num=2, - server_endpoints=endpoints) + role = role_maker.UserDefinedRoleMaker( + current_id=0, + role=role_maker.Role.SERVER, + worker_num=2, + server_endpoints=endpoints, + ) fleet.init(role) loss, acc, _ = self.net() diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps7.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps7.py index bdba0d90bb0f8c5fe778671494f6ccb2bcd974b3..9351791260b2af9ba84d6e5abc30c7cb078a81ad 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps7.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps7.py @@ -36,18 +36,17 @@ batch_size = 4 class TestNaturalExpDecay(unittest.TestCase): - def net(self): - input_data = paddle.static.data(name="sparse_input", - shape=[None, 1], - dtype="int64") - input_label = paddle.static.data(name="label", - shape=[None, 1], - dtype="int64") + input_data = paddle.static.data( + name="sparse_input", shape=[None, 1], dtype="int64" + ) + input_label = paddle.static.data( + name="label", shape=[None, 1], dtype="int64" + ) label = paddle.cast(input_label, dtype="float32") - embedding = paddle.static.nn.embedding(input_data, - is_sparse=True, - size=[1000, 128]) + embedding = paddle.static.nn.embedding( + input_data, is_sparse=True, size=[1000, 128] + ) fc1 = paddle.static.nn.fc(embedding, size=1024, activation="relu") fc2 = paddle.static.nn.fc(fc1, size=512, activation="relu") @@ -60,20 +59,24 @@ class TestNaturalExpDecay(unittest.TestCase): def test(self): endpoints = [ - "127.0.0.1:36004", "127.0.0.1:36005", "127.0.0.1:36006", - "127.0.0.1:36007" + "127.0.0.1:36004", + "127.0.0.1:36005", + "127.0.0.1:36006", + "127.0.0.1:36007", ] - role = role_maker.UserDefinedRoleMaker(current_id=0, - role=role_maker.Role.SERVER, - worker_num=2, - server_endpoints=endpoints) + role = role_maker.UserDefinedRoleMaker( + current_id=0, + role=role_maker.Role.SERVER, + worker_num=2, + server_endpoints=endpoints, + ) fleet.init(role) loss = self.net() - scheduler = paddle.optimizer.lr.NaturalExpDecay(learning_rate=base_lr, - gamma=0.999, - verbose=True) + scheduler = paddle.optimizer.lr.NaturalExpDecay( + learning_rate=base_lr, gamma=0.999, verbose=True + ) optimizer = fluid.optimizer.Adam(scheduler) strategy = paddle.distributed.fleet.DistributedStrategy() diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps8.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps8.py index 3dcb16f5444bce7b569c6e6927284040794472cd..cbf12f19714ff5ead1cc75380e4b8f9c563344f9 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps8.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps8.py @@ -35,18 +35,17 @@ batch_size = 4 class TestNoamDecay(unittest.TestCase): - def net(self): - input_data = paddle.static.data(name="sparse_input", - shape=[None, 1], - dtype="int64") - input_label = paddle.static.data(name="label", - shape=[None, 1], - dtype="int64") + input_data = paddle.static.data( + name="sparse_input", shape=[None, 1], dtype="int64" + ) + input_label = paddle.static.data( + name="label", shape=[None, 1], dtype="int64" + ) label = paddle.cast(input_label, dtype="float32") - embedding = paddle.static.nn.embedding(input_data, - is_sparse=True, - size=[1000, 128]) + embedding = paddle.static.nn.embedding( + input_data, is_sparse=True, size=[1000, 128] + ) fc1 = paddle.static.nn.fc(embedding, size=1024, activation="relu") fc2 = paddle.static.nn.fc(fc1, size=512, activation="relu") @@ -59,20 +58,24 @@ class TestNoamDecay(unittest.TestCase): def test(self): endpoints = [ - "127.0.0.1:36004", "127.0.0.1:36005", "127.0.0.1:36006", - "127.0.0.1:36007" + "127.0.0.1:36004", + "127.0.0.1:36005", + "127.0.0.1:36006", + "127.0.0.1:36007", ] - role = role_maker.UserDefinedRoleMaker(current_id=0, - role=role_maker.Role.SERVER, - worker_num=2, - server_endpoints=endpoints) + role = role_maker.UserDefinedRoleMaker( + current_id=0, + role=role_maker.Role.SERVER, + worker_num=2, + server_endpoints=endpoints, + ) fleet.init(role) loss = self.net() - scheduler = paddle.optimizer.lr.NoamDecay(d_model=0.01, - warmup_steps=100, - verbose=True) + scheduler = paddle.optimizer.lr.NoamDecay( + d_model=0.01, warmup_steps=100, verbose=True + ) optimizer = fluid.optimizer.Adam(scheduler) strategy = paddle.distributed.fleet.DistributedStrategy() diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps9.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps9.py index 958ad4c6052f0af0a81b9ae1db6ae19972550214..bebc6bbb96536af1ad9f1e54d32cc8d1c54413b4 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps9.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps9.py @@ -35,18 +35,17 @@ batch_size = 4 class TestExponentialDecay(unittest.TestCase): - def net(self): - input_data = paddle.static.data(name="sparse_input", - shape=[None, 1], - dtype="int64") - input_label = paddle.static.data(name="label", - shape=[None, 1], - dtype="int64") + input_data = paddle.static.data( + name="sparse_input", shape=[None, 1], dtype="int64" + ) + input_label = paddle.static.data( + name="label", shape=[None, 1], dtype="int64" + ) label = paddle.cast(input_label, dtype="float32") - embedding = paddle.static.nn.embedding(input_data, - is_sparse=True, - size=[1000, 128]) + embedding = paddle.static.nn.embedding( + input_data, is_sparse=True, size=[1000, 128] + ) fc1 = paddle.static.nn.fc(embedding, size=1024, activation="relu") fc2 = paddle.static.nn.fc(fc1, size=512, activation="relu") @@ -59,20 +58,24 @@ class TestExponentialDecay(unittest.TestCase): def test(self): endpoints = [ - "127.0.0.1:36004", "127.0.0.1:36005", "127.0.0.1:36006", - "127.0.0.1:36007" + "127.0.0.1:36004", + "127.0.0.1:36005", + "127.0.0.1:36006", + "127.0.0.1:36007", ] - role = role_maker.UserDefinedRoleMaker(current_id=0, - role=role_maker.Role.SERVER, - worker_num=2, - server_endpoints=endpoints) + role = role_maker.UserDefinedRoleMaker( + current_id=0, + role=role_maker.Role.SERVER, + worker_num=2, + server_endpoints=endpoints, + ) fleet.init(role) loss = self.net() - scheduler = paddle.optimizer.lr.ExponentialDecay(learning_rate=base_lr, - gamma=0.999, - verbose=True) + scheduler = paddle.optimizer.lr.ExponentialDecay( + learning_rate=base_lr, gamma=0.999, verbose=True + ) optimizer = fluid.optimizer.Adam(scheduler) strategy = paddle.distributed.fleet.DistributedStrategy() diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_raw_program_optimizer.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_raw_program_optimizer.py index b49a2599b76a5f52eb83d3d144a058bc04908258..ca3c5badc0922e1c0a2977555efd5445726750c9 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_raw_program_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_raw_program_optimizer.py @@ -22,7 +22,6 @@ flag_name = os.path.splitext(__file__)[0] class TestFleetMetaOptimizerPrecision(TestDistBase): - def _setup_config(self): self._sync_mode = True self._use_reduce = False @@ -34,11 +33,14 @@ class TestFleetMetaOptimizerPrecision(TestDistBase): def test_dist_train(self): import paddle.fluid as fluid + if fluid.core.is_compiled_with_cuda(): - self.check_with_place("dist_fleet_raw_program_optimizer.py", - delta=1e-5, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "dist_fleet_raw_program_optimizer.py", + delta=1e-5, + check_error_log=True, + log_name=flag_name, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_raw_program_optimizer_fuse_allreduce.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_raw_program_optimizer_fuse_allreduce.py index be85ea71040abc203d4565dbb9e127c4c7b68fd5..a6b994dde6d785a569f6bf01f3e3e8bccac75fc7 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_raw_program_optimizer_fuse_allreduce.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_raw_program_optimizer_fuse_allreduce.py @@ -22,7 +22,6 @@ flag_name = os.path.splitext(__file__)[0] class TestFleetMetaOptimizerAllReduceFusePrecision(TestDistBase): - def _setup_config(self): self._sync_mode = True self._use_reduce = False @@ -34,12 +33,14 @@ class TestFleetMetaOptimizerAllReduceFusePrecision(TestDistBase): def test_dist_train(self): import paddle.fluid as fluid + if fluid.core.is_compiled_with_cuda(): self.check_with_place( "dist_fleet_raw_program_optimizer_fuse_allreduce.py", delta=1e-5, check_error_log=True, - log_name=flag_name) + log_name=flag_name, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_simnet.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_simnet.py index b2bf73511ce0cb1d4f9d1d741e23e62f3f3dbe8f..27c76bee6ed9c233401987e23a2fcaea37bb3f74 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_simnet.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_simnet.py @@ -21,23 +21,20 @@ paddle.enable_static() class TestDistSimnetASync2x2(TestFleetBase): - def _setup_config(self): self._mode = "async" self._reader = "pyreader" - def check_with_place(self, - model_file, - delta=1e-3, - check_error_log=False, - need_envs={}): + def check_with_place( + self, model_file, delta=1e-3, check_error_log=False, need_envs={} + ): required_envs = { "PATH": os.getenv("PATH", ""), "PYTHONPATH": os.getenv("PYTHONPATH", ""), "LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""), "FLAGS_rpc_deadline": "5000", # 5sec to fail fast "http_proxy": "", - "CPU_NUM": "2" + "CPU_NUM": "2", } required_envs.update(need_envs) @@ -49,9 +46,9 @@ class TestDistSimnetASync2x2(TestFleetBase): tr0_losses, tr1_losses = self._run_cluster(model_file, required_envs) def test_dist_train(self): - self.check_with_place("dist_fleet_simnet_bow.py", - delta=1e-5, - check_error_log=True) + self.check_with_place( + "dist_fleet_simnet_bow.py", delta=1e-5, check_error_log=True + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_sparse_embedding_ctr.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_sparse_embedding_ctr.py index 7fc0cc60aecfd71cd1595c8fe176378e5c465cf4..13eaa703ddc0c7b20267f8c95f18ab280481c6b4 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_sparse_embedding_ctr.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_sparse_embedding_ctr.py @@ -28,16 +28,13 @@ from dist_fleet_sparse_embedding_ctr import fake_ctr_reader @unittest.skip(reason="Skip unstable ut, need paddle sync mode fix") class TestDistMnistSync2x2(TestFleetBase): - def _setup_config(self): self._mode = "sync" self._reader = "pyreader" - def check_with_place(self, - model_file, - delta=1e-3, - check_error_log=False, - need_envs={}): + def check_with_place( + self, model_file, delta=1e-3, check_error_log=False, need_envs={} + ): required_envs = { "PATH": os.getenv("PATH", ""), "PYTHONPATH": os.getenv("PYTHONPATH", ""), @@ -58,22 +55,21 @@ class TestDistMnistSync2x2(TestFleetBase): tr0_losses, tr1_losses = self._run_cluster(model_file, required_envs) def test_dist_train(self): - self.check_with_place("dist_fleet_sparse_embedding_ctr.py", - delta=1e-5, - check_error_log=True) + self.check_with_place( + "dist_fleet_sparse_embedding_ctr.py", + delta=1e-5, + check_error_log=True, + ) class TestDistMnistAsync2x2(TestFleetBase): - def _setup_config(self): self._mode = "async" self._reader = "pyreader" - def check_with_place(self, - model_file, - delta=1e-3, - check_error_log=False, - need_envs={}): + def check_with_place( + self, model_file, delta=1e-3, check_error_log=False, need_envs={} + ): required_envs = { "PATH": os.getenv("PATH", ""), "PYTHONPATH": os.getenv("PYTHONPATH", ""), @@ -94,22 +90,21 @@ class TestDistMnistAsync2x2(TestFleetBase): tr0_losses, tr1_losses = self._run_cluster(model_file, required_envs) def test_dist_train(self): - self.check_with_place("dist_fleet_sparse_embedding_ctr.py", - delta=1e-5, - check_error_log=True) + self.check_with_place( + "dist_fleet_sparse_embedding_ctr.py", + delta=1e-5, + check_error_log=True, + ) class TestDistMnistAsync2x2WithDecay(TestFleetBase): - def _setup_config(self): self._mode = "async" self._reader = "pyreader" - def check_with_place(self, - model_file, - delta=1e-3, - check_error_log=False, - need_envs={}): + def check_with_place( + self, model_file, delta=1e-3, check_error_log=False, need_envs={} + ): required_envs = { "PATH": os.getenv("PATH", ""), "PYTHONPATH": os.getenv("PYTHONPATH", ""), @@ -131,22 +126,21 @@ class TestDistMnistAsync2x2WithDecay(TestFleetBase): tr0_losses, tr1_losses = self._run_cluster(model_file, required_envs) def test_dist_train(self): - self.check_with_place("dist_fleet_sparse_embedding_ctr.py", - delta=1e-5, - check_error_log=True) + self.check_with_place( + "dist_fleet_sparse_embedding_ctr.py", + delta=1e-5, + check_error_log=True, + ) class TestDistMnistAsync2x2WithUnifrom(TestFleetBase): - def _setup_config(self): self._mode = "async" self._reader = "pyreader" - def check_with_place(self, - model_file, - delta=1e-3, - check_error_log=False, - need_envs={}): + def check_with_place( + self, model_file, delta=1e-3, check_error_log=False, need_envs={} + ): required_envs = { "PATH": os.getenv("PATH", ""), "PYTHONPATH": os.getenv("PYTHONPATH", ""), @@ -168,20 +162,20 @@ class TestDistMnistAsync2x2WithUnifrom(TestFleetBase): tr0_losses, tr1_losses = self._run_cluster(model_file, required_envs) def test_dist_train(self): - self.check_with_place("dist_fleet_sparse_embedding_ctr.py", - delta=1e-5, - check_error_log=True) + self.check_with_place( + "dist_fleet_sparse_embedding_ctr.py", + delta=1e-5, + check_error_log=True, + ) @unittest.skip(reason="Skip unstable ut, need tensor table to enhance") class TestDistMnistAsync2x2WithGauss(TestFleetBase): - def _setup_config(self): self._mode = "async" self._reader = "pyreader" def _run_local_infer(self, model_file): - def net(): """ network definition @@ -194,21 +188,27 @@ class TestDistMnistAsync2x2WithGauss(TestFleetBase): """ dnn_input_dim, lr_input_dim = 10, 10 - dnn_data = fluid.layers.data(name="dnn_data", - shape=[-1, 1], - dtype="int64", - lod_level=1, - append_batch_size=False) - lr_data = fluid.layers.data(name="lr_data", - shape=[-1, 1], - dtype="int64", - lod_level=1, - append_batch_size=False) - label = fluid.layers.data(name="click", - shape=[-1, 1], - dtype="int64", - lod_level=0, - append_batch_size=False) + dnn_data = fluid.layers.data( + name="dnn_data", + shape=[-1, 1], + dtype="int64", + lod_level=1, + append_batch_size=False, + ) + lr_data = fluid.layers.data( + name="lr_data", + shape=[-1, 1], + dtype="int64", + lod_level=1, + append_batch_size=False, + ) + label = fluid.layers.data( + name="click", + shape=[-1, 1], + dtype="int64", + lod_level=0, + append_batch_size=False, + ) datas = [dnn_data, lr_data, label] @@ -220,10 +220,13 @@ class TestDistMnistAsync2x2WithGauss(TestFleetBase): input=dnn_data, size=[dnn_input_dim, dnn_layer_dims[0]], is_test=inference, - param_attr=fluid.ParamAttr(name="deep_embedding", - initializer=init)) - dnn_pool = fluid.layers.sequence_pool(input=dnn_embedding, - pool_type="sum") + param_attr=fluid.ParamAttr( + name="deep_embedding", initializer=init + ), + ) + dnn_pool = fluid.layers.sequence_pool( + input=dnn_embedding, pool_type="sum" + ) dnn_out = dnn_pool for i, dim in enumerate(dnn_layer_dims[1:]): fc = fluid.layers.fc( @@ -231,8 +234,10 @@ class TestDistMnistAsync2x2WithGauss(TestFleetBase): size=dim, act="relu", param_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(value=0.01)), - name='dnn-fc-%d' % i) + initializer=fluid.initializer.Constant(value=0.01) + ), + name='dnn-fc-%d' % i, + ) dnn_out = fc # build lr model @@ -242,10 +247,13 @@ class TestDistMnistAsync2x2WithGauss(TestFleetBase): is_test=inference, param_attr=fluid.ParamAttr( name="wide_embedding", - initializer=fluid.initializer.Constant(value=0.01))) + initializer=fluid.initializer.Constant(value=0.01), + ), + ) - lr_pool = fluid.layers.sequence_pool(input=lr_embbding, - pool_type="sum") + lr_pool = fluid.layers.sequence_pool( + input=lr_embbding, pool_type="sum" + ) merge_layer = fluid.layers.concat(input=[dnn_out, lr_pool], axis=1) predict = fluid.layers.fc(input=merge_layer, size=2, act='softmax') return datas, predict @@ -259,15 +267,15 @@ class TestDistMnistAsync2x2WithGauss(TestFleetBase): fluid.io.load_persistables(exe, model_file) for batch_id, data in enumerate(reader()): - score = exe.run(fluid.default_main_program(), - feed=feeder.feed(data), - fetch_list=[predict]) - - def check_with_place(self, - model_file, - delta=1e-3, - check_error_log=False, - need_envs={}): + score = exe.run( + fluid.default_main_program(), + feed=feeder.feed(data), + fetch_list=[predict], + ) + + def check_with_place( + self, model_file, delta=1e-3, check_error_log=False, need_envs={} + ): model_dir = tempfile.mkdtemp() required_envs = { @@ -293,9 +301,11 @@ class TestDistMnistAsync2x2WithGauss(TestFleetBase): shutil.rmtree(model_dir) def test_dist_train(self): - self.check_with_place("dist_fleet_sparse_embedding_ctr.py", - delta=1e-5, - check_error_log=True) + self.check_with_place( + "dist_fleet_sparse_embedding_ctr.py", + delta=1e-5, + check_error_log=True, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_trainer_desc_config.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_trainer_desc_config.py index 0090d6bef159777500c27c3c1be20bc137b7e5cb..e64b7d8010ef59f29ac5507594ff9daf6375e3df 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_trainer_desc_config.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_trainer_desc_config.py @@ -23,15 +23,15 @@ paddle.enable_static() class TestDistStrategyTrainerDescConfig(unittest.TestCase): - def setUp(self): os.environ["PADDLE_PSERVER_NUMS"] = "2" os.environ["PADDLE_TRAINERS_NUM"] = "2" os.environ["POD_IP"] = "127.0.0.1" os.environ["PADDLE_PORT"] = "36001" os.environ["PADDLE_TRAINER_ID"] = "0" - os.environ["PADDLE_PSERVERS_IP_PORT_LIST"] = \ - "127.0.0.1:36001,127.0.0.2:36001" + os.environ[ + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:36001,127.0.0.2:36001" def test_trainer_desc_config(self): os.environ["TRAINING_ROLE"] = "TRAINER" @@ -50,7 +50,7 @@ class TestDistStrategyTrainerDescConfig(unittest.TestCase): config = { "dump_fields_path": "dump_data", "dump_fields": ["xxx", "yyy"], - "dump_param": ['zzz'] + "dump_param": ['zzz'], } strategy.trainer_desc_configs = config @@ -62,8 +62,10 @@ class TestDistStrategyTrainerDescConfig(unittest.TestCase): self.assertEqual(program._fleet_opt["dump_fields_path"], "dump_data") self.assertEqual(len(program._fleet_opt["dump_fields"]), 2) self.assertEqual(len(program._fleet_opt["dump_param"]), 1) - self.assertEqual(program._fleet_opt["mpi_size"], - int(os.environ["PADDLE_TRAINERS_NUM"])) + self.assertEqual( + program._fleet_opt["mpi_size"], + int(os.environ["PADDLE_TRAINERS_NUM"]), + ) optimizer = paddle.fluid.optimizer.SGD(learning_rate=0.01) optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy) @@ -73,8 +75,10 @@ class TestDistStrategyTrainerDescConfig(unittest.TestCase): self.assertEqual(program._fleet_opt["dump_fields_path"], "dump_data") self.assertEqual(len(program._fleet_opt["dump_fields"]), 2) self.assertEqual(len(program._fleet_opt["dump_param"]), 1) - self.assertEqual(program._fleet_opt["mpi_size"], - int(os.environ["PADDLE_TRAINERS_NUM"])) + self.assertEqual( + program._fleet_opt["mpi_size"], + int(os.environ["PADDLE_TRAINERS_NUM"]), + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_dist_lookup_sparse_table_fuse_ops.py b/python/paddle/fluid/tests/unittests/test_dist_lookup_sparse_table_fuse_ops.py index 8a753e52de7c138b1f8ff73f51f67896b6462fc8..f64a7e6882e12fde9f643deba68e745fe9cbc386 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_lookup_sparse_table_fuse_ops.py +++ b/python/paddle/fluid/tests/unittests/test_dist_lookup_sparse_table_fuse_ops.py @@ -25,7 +25,6 @@ paddle.enable_static() @unittest.skip("do not need currently") class TestLookupTableFuseOp(unittest.TestCase): - def test_fuse(self): places = [core.CPUPlace()] # currently only support CPU @@ -44,20 +43,23 @@ class TestLookupTableFuseOp(unittest.TestCase): persistable=True, type=fluid.core.VarDesc.VarType.LOD_TENSOR, shape=[1], - dtype="float32") + dtype="float32", + ) ids = init_program.global_block().create_var( name="Ids", persistable=True, type=fluid.core.VarDesc.VarType.LOD_TENSOR, shape=[100], - dtype="int64") + dtype="int64", + ) output = init_program.global_block().create_var( name="output", type=fluid.core.VarDesc.VarType.LOD_TENSOR, shape=[100, 8], - dtype="float32") + dtype="float32", + ) metas = [] metas.append( @@ -71,27 +73,30 @@ class TestLookupTableFuseOp(unittest.TestCase): type="lookup_sparse_table_init", inputs=None, outputs=None, - attrs={"large_scale_metas": metas}) - - init_program.global_block().append_op(type="lookup_sparse_table_read", - inputs={"Ids": ids}, - outputs={"Out": output}, - attrs={ - "tablename": - "embedding_1.block0", - "init": True, - "value_names": ["Param"], - }) - - init_program.global_block().append_op(type="lookup_sparse_table_read", - inputs={"Ids": ids}, - outputs={"Out": output}, - attrs={ - "tablename": - "embedding_2.block0", - "init": True, - "value_names": ["Param"], - }) + attrs={"large_scale_metas": metas}, + ) + + init_program.global_block().append_op( + type="lookup_sparse_table_read", + inputs={"Ids": ids}, + outputs={"Out": output}, + attrs={ + "tablename": "embedding_1.block0", + "init": True, + "value_names": ["Param"], + }, + ) + + init_program.global_block().append_op( + type="lookup_sparse_table_read", + inputs={"Ids": ids}, + outputs={"Out": output}, + attrs={ + "tablename": "embedding_2.block0", + "init": True, + "value_names": ["Param"], + }, + ) executor = fluid.Executor(place) executor.run(init_program) @@ -99,9 +104,11 @@ class TestLookupTableFuseOp(unittest.TestCase): training_program = fluid.Program() scope.var('Beta1Pow').get_tensor().set( - np.array([0]).astype("float32"), place) + np.array([0]).astype("float32"), place + ) scope.var('Beta2Pow').get_tensor().set( - np.array([0]).astype("float32"), place) + np.array([0]).astype("float32"), place + ) rows = [0, 1, 2, 3, 4, 5, 6] row_numel = 8 @@ -119,28 +126,32 @@ class TestLookupTableFuseOp(unittest.TestCase): persistable=True, type=fluid.core.VarDesc.VarType.LOD_TENSOR, shape=[1], - dtype="float32") + dtype="float32", + ) grads = training_program.global_block().create_var( name="Grad", persistable=True, type=fluid.core.VarDesc.VarType.SELECTED_ROWS, shape=[100, 8], - dtype="float32") + dtype="float32", + ) beta1 = training_program.global_block().create_var( name="Beta1Pow", persistable=True, type=fluid.core.VarDesc.VarType.LOD_TENSOR, shape=[1], - dtype="float32") + dtype="float32", + ) beta2 = training_program.global_block().create_var( name="Beta2Pow", persistable=True, type=fluid.core.VarDesc.VarType.LOD_TENSOR, shape=[1], - dtype="float32") + dtype="float32", + ) training_program.global_block().append_op( type="lookup_sparse_table_fuse_adam", @@ -150,27 +161,23 @@ class TestLookupTableFuseOp(unittest.TestCase): "Beta1Pow": beta1, "Beta2Pow": beta2, }, - outputs={ - "Beta1PowOut": beta1, - "Beta2PowOut": beta2 - }, + outputs={"Beta1PowOut": beta1, "Beta2PowOut": beta2}, attrs={ "is_entry": False, "tablename": "embedding_1.block0", "value_names": ["Param", "Moment1", "Moment2"], - }) + }, + ) training_program.global_block().append_op( type="lookup_sparse_table_fuse_sgd", - inputs={ - "Grad": grads, - "LearningRate": lr - }, + inputs={"Grad": grads, "LearningRate": lr}, attrs={ "is_entry": False, "tablename": "embedding_2.block0", "value_names": ["Param"], - }) + }, + ) executor.run(training_program) diff --git a/python/paddle/fluid/tests/unittests/test_dist_mnist_backward_deps.py b/python/paddle/fluid/tests/unittests/test_dist_mnist_backward_deps.py index 425e8c21aae3b30abff38627d6c51535db788f5e..ba1037b05730c4cdb8342e9a97e8b420c395a51e 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_mnist_backward_deps.py +++ b/python/paddle/fluid/tests/unittests/test_dist_mnist_backward_deps.py @@ -20,7 +20,6 @@ paddle.enable_static() class TestDistMnistNCCL2BackWardDeps(TestDistBase): - def _setup_config(self): self._sync_mode = True self._use_reduce = False @@ -30,6 +29,7 @@ class TestDistMnistNCCL2BackWardDeps(TestDistBase): def test_dist_train(self): import paddle.fluid as fluid + if fluid.core.is_compiled_with_cuda(): self.check_with_place("dist_mnist.py", delta=1e-5) diff --git a/python/paddle/fluid/tests/unittests/test_dist_mnist_batch_merge.py b/python/paddle/fluid/tests/unittests/test_dist_mnist_batch_merge.py index b21e7b208b2969d777fa8e1dd50158a93e64c9f1..a480b987317557d7837fc79a59b6dab6a96fd0da 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_mnist_batch_merge.py +++ b/python/paddle/fluid/tests/unittests/test_dist_mnist_batch_merge.py @@ -23,7 +23,6 @@ flag_name = os.path.splitext(__file__)[0] class TestDistMnist2x2(TestDistBase): - def _setup_config(self): self._sync_mode = True self._use_reduce = False @@ -31,11 +30,9 @@ class TestDistMnist2x2(TestDistBase): def test_dist_train(self): self.check_with_place("dist_mnist_batch_merge.py", delta=1e-5) - def check_with_place(self, - model_file, - delta=1e-3, - check_error_log=False, - need_envs={}): + def check_with_place( + self, model_file, delta=1e-3, check_error_log=False, need_envs={} + ): # TODO(typhoonzero): should auto adapt GPU count on the machine. required_envs = { "PATH": os.getenv("PATH", ""), @@ -48,22 +45,27 @@ class TestDistMnist2x2(TestDistBase): required_envs.update(need_envs) if check_error_log: - required_envs["GLOG_vmodule"] = \ - "fused_all_reduce_op_handle=10,all_reduce_op_handle=10,alloc_continuous_space_op=10,fuse_all_reduce_op_pass=10,alloc_continuous_space_for_grad_pass=10,fast_threaded_ssa_graph_executor=10" + required_envs[ + "GLOG_vmodule" + ] = "fused_all_reduce_op_handle=10,all_reduce_op_handle=10,alloc_continuous_space_op=10,fuse_all_reduce_op_pass=10,alloc_continuous_space_for_grad_pass=10,fast_threaded_ssa_graph_executor=10" required_envs["GLOG_logtostderr"] = "1" - no_merge_losses = self._run_local(model_file, - required_envs, - check_error_log=check_error_log, - batch_size=4, - log_name=flag_name) + no_merge_losses = self._run_local( + model_file, + required_envs, + check_error_log=check_error_log, + batch_size=4, + log_name=flag_name, + ) - batch_merge_losses = self._run_local(model_file, - required_envs, - check_error_log=check_error_log, - batch_size=2, - batch_merge_repeat=2, - log_name=flag_name) + batch_merge_losses = self._run_local( + model_file, + required_envs, + check_error_log=check_error_log, + batch_size=2, + batch_merge_repeat=2, + log_name=flag_name, + ) # Ensure both result have values. self.assertGreater(len(no_merge_losses), 1) self.assertEqual(len(no_merge_losses), len(batch_merge_losses)) diff --git a/python/paddle/fluid/tests/unittests/test_dist_mnist_fleet_save.py b/python/paddle/fluid/tests/unittests/test_dist_mnist_fleet_save.py index a83a3e83eca0618a72142fdb899f550c8f2dcd93..9a1afd250d8ef857c23b60146f405574c4937491 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_mnist_fleet_save.py +++ b/python/paddle/fluid/tests/unittests/test_dist_mnist_fleet_save.py @@ -22,7 +22,6 @@ paddle.enable_static() class TestDistMnistFleetSave(TestDistBase): - def _setup_config(self): self._sync_mode = True self._use_reduce = False @@ -78,12 +77,14 @@ class TestDistMnistFleetSave(TestDistBase): self._rm_temp_files(dirname) return True - def check_with_place(self, - model_file, - delta=1e-3, - check_error_log=False, - need_envs={}, - log_name=""): + def check_with_place( + self, + model_file, + delta=1e-3, + check_error_log=False, + need_envs={}, + log_name="", + ): required_envs = self._get_required_envs(check_error_log, need_envs) tr0_losses, tr1_losses = self._run_cluster_nccl2( @@ -91,13 +92,15 @@ class TestDistMnistFleetSave(TestDistBase): required_envs, update_method='nccl2', check_error_log=check_error_log, - log_name=log_name) + log_name=log_name, + ) dirname = '/tmp' self._test_saved_files(dirname) def test_dist_train(self): import paddle.fluid as fluid + if fluid.core.is_compiled_with_cuda(): self.check_with_place("dist_mnist.py", delta=1e-5) diff --git a/python/paddle/fluid/tests/unittests/test_dist_mnist_fleetapi.py b/python/paddle/fluid/tests/unittests/test_dist_mnist_fleetapi.py index 6766b1c1baea22d48b3d66d7a553af881856457c..fbfdccfeff41cb129521049d48bb3dd8a8296a2a 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_mnist_fleetapi.py +++ b/python/paddle/fluid/tests/unittests/test_dist_mnist_fleetapi.py @@ -20,7 +20,6 @@ paddle.enable_static() class TestDistMnistNCCL2FleetApi(TestDistBase): - def _setup_config(self): self._sync_mode = True self._use_reduce = False @@ -31,20 +30,24 @@ class TestDistMnistNCCL2FleetApi(TestDistBase): def test_dist_train(self): import paddle.fluid as fluid + if fluid.core.is_compiled_with_cuda(): self.check_with_place( "dist_mnist.py", delta=1e-5, check_error_log=True, - need_envs={'FLAGS_allreduce_record_one_event': '1'}) + need_envs={'FLAGS_allreduce_record_one_event': '1'}, + ) class FleetCollectiveTest(unittest.TestCase): - def test_open_sync_batch_norm(self): import paddle.fluid as fluid import paddle.fluid.incubate.fleet.base.role_maker as role_maker - from paddle.fluid.incubate.fleet.collective import fleet, DistributedStrategy + from paddle.fluid.incubate.fleet.collective import ( + fleet, + DistributedStrategy, + ) if not fluid.core.is_compiled_with_cuda(): # Operator "gen_nccl_id" has not been registered @@ -62,8 +65,9 @@ class FleetCollectiveTest(unittest.TestCase): dist_strategy = DistributedStrategy() dist_strategy.sync_batch_norm = True - dist_optimizer = fleet.distributed_optimizer(optimizer, - strategy=dist_strategy) + dist_optimizer = fleet.distributed_optimizer( + optimizer, strategy=dist_strategy + ) dist_optimizer.minimize(loss) self.assertEqual(dist_strategy.exec_strategy.num_threads, 1) diff --git a/python/paddle/fluid/tests/unittests/test_dist_mnist_fp16_allreduce.py b/python/paddle/fluid/tests/unittests/test_dist_mnist_fp16_allreduce.py index cff295d5802c5ee6bc7fb18f33d83a1638d25e98..ce74d92a7b81af2ca73e2e9e793554e3c621294e 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_mnist_fp16_allreduce.py +++ b/python/paddle/fluid/tests/unittests/test_dist_mnist_fp16_allreduce.py @@ -17,7 +17,6 @@ from test_dist_base import TestDistBase class TestDistMnist2x2FP16AllReduce(TestDistBase): - def _setup_config(self): self._sync_mode = True self._use_reduce = False @@ -25,6 +24,7 @@ class TestDistMnist2x2FP16AllReduce(TestDistBase): def test_dist_train(self): import paddle.fluid as fluid + if fluid.core.is_compiled_with_cuda(): self.check_with_place("dist_mnist_fp16_allreduce.py", delta=1e-5) diff --git a/python/paddle/fluid/tests/unittests/test_dist_mnist_hallreduce.py b/python/paddle/fluid/tests/unittests/test_dist_mnist_hallreduce.py index 93640b06ee6f15827ddefc91deda1b5571516d81..ca1b7299b8c63df418b6e254a4cc31c7bd57dc5a 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_mnist_hallreduce.py +++ b/python/paddle/fluid/tests/unittests/test_dist_mnist_hallreduce.py @@ -23,7 +23,6 @@ flag_name = os.path.splitext(__file__)[0] class TestDistMnistNCCL2HAllreduce(TestDistBase): - def _setup_config(self): self._sync_mode = True self._use_reduce = False @@ -34,11 +33,14 @@ class TestDistMnistNCCL2HAllreduce(TestDistBase): def test_dist_train(self): import paddle.fluid as fluid + if fluid.core.is_compiled_with_cuda(): - self.check_with_place("dist_mnist.py", - delta=1e-5, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "dist_mnist.py", + delta=1e-5, + check_error_log=True, + log_name=flag_name, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_dist_mnist_lars.py b/python/paddle/fluid/tests/unittests/test_dist_mnist_lars.py index 03a4da1f45e388d7a2aa833587b1b918adfef79e..81b96e7c8991cd7d5c3521b357b66f532e543d99 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_mnist_lars.py +++ b/python/paddle/fluid/tests/unittests/test_dist_mnist_lars.py @@ -17,7 +17,6 @@ from test_dist_base import TestDistBase class TestDistMnist2x2Lars(TestDistBase): - def _setup_config(self): self._sync_mode = True self._use_reduce = False diff --git a/python/paddle/fluid/tests/unittests/test_dist_mnist_multi_comm.py b/python/paddle/fluid/tests/unittests/test_dist_mnist_multi_comm.py index 57d89b2d23d31c6c69b09d7ef4b0ce3356a85d43..82f09bcfae1d0120f42de0c7e1205a52ee5166c7 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_mnist_multi_comm.py +++ b/python/paddle/fluid/tests/unittests/test_dist_mnist_multi_comm.py @@ -23,7 +23,6 @@ flag_name = os.path.splitext(__file__)[0] class TestDistMnistNCCL2MultiNCCLComm(TestDistBase): - def _setup_config(self): self._sync_mode = True self._use_reduce = False @@ -33,11 +32,14 @@ class TestDistMnistNCCL2MultiNCCLComm(TestDistBase): def test_dist_train(self): import paddle.fluid as fluid + if fluid.core.is_compiled_with_cuda(): - self.check_with_place("dist_mnist.py", - delta=1e-5, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "dist_mnist.py", + delta=1e-5, + check_error_log=True, + log_name=flag_name, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_dist_mnist_pg.py b/python/paddle/fluid/tests/unittests/test_dist_mnist_pg.py index b813e96971eecd1ad6735cbde198937c7e9f86e6..e517e38978e48f1ff5737f109278bd8347c0a425 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_mnist_pg.py +++ b/python/paddle/fluid/tests/unittests/test_dist_mnist_pg.py @@ -20,7 +20,6 @@ paddle.enable_static() class TestDistMnistNCCL2(TestDistBase): - def _setup_config(self): self._sync_mode = True self._use_reduce = False @@ -29,13 +28,16 @@ class TestDistMnistNCCL2(TestDistBase): def test_dist_train(self): import paddle.fluid as fluid + if fluid.core.is_compiled_with_cuda(): - self.check_with_place("dist_mnist.py", - delta=1, - need_envs={ - "FLAGS_enable_parallel_graph": "1", - "FLAGS_sync_nccl_allreduce": "1" - }) + self.check_with_place( + "dist_mnist.py", + delta=1, + need_envs={ + "FLAGS_enable_parallel_graph": "1", + "FLAGS_sync_nccl_allreduce": "1", + }, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_dist_mnist_ring_allreduce.py b/python/paddle/fluid/tests/unittests/test_dist_mnist_ring_allreduce.py index c455dbe270a361456e677064b81084dc9b97550b..40ae6686ed98ce8c18d672f39c53ba216543d9c7 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_mnist_ring_allreduce.py +++ b/python/paddle/fluid/tests/unittests/test_dist_mnist_ring_allreduce.py @@ -20,7 +20,6 @@ paddle.enable_static() class TestDistMnistNCCL2(TestDistBase): - def _setup_config(self): self._sync_mode = True self._use_reduce = False @@ -29,6 +28,7 @@ class TestDistMnistNCCL2(TestDistBase): def test_dist_train(self): import paddle.fluid as fluid + if fluid.core.is_compiled_with_cuda(): self.check_with_place("dist_mnist.py", delta=1e-5) diff --git a/python/paddle/fluid/tests/unittests/test_dist_mnist_train.py b/python/paddle/fluid/tests/unittests/test_dist_mnist_train.py index af2392b68528b5612b13b64f653c9176bb3058ee..f6dfa739846605580c418ccf99f4ef00522303ce 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_mnist_train.py +++ b/python/paddle/fluid/tests/unittests/test_dist_mnist_train.py @@ -21,55 +21,53 @@ flag_name = os.path.splitext(__file__)[0] class TestDistMnist2x2(TestDistBase): - def _setup_config(self): self._sync_mode = True self._use_reduce = False def test_dist_train(self): - self.check_with_place("dist_mnist.py", - delta=1e-5, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "dist_mnist.py", + delta=1e-5, + check_error_log=True, + log_name=flag_name, + ) class TestDistMnist2x2WithMemopt(TestDistBase): - def _setup_config(self): self._sync_mode = True self._mem_opt = True def test_dist_train(self): - self.check_with_place("dist_mnist.py", - delta=1e-5, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "dist_mnist.py", + delta=1e-5, + check_error_log=True, + log_name=flag_name, + ) class TestDistMnistAsync(TestDistBase): - def _setup_config(self): self._sync_mode = False self._use_reduce = False def test_dist_train(self): - self.check_with_place("dist_mnist.py", - delta=200, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "dist_mnist.py", delta=200, check_error_log=True, log_name=flag_name + ) class TestDistMnistDcAsgd(TestDistBase): - def _setup_config(self): self._sync_mode = False self._dc_asgd = True def test_se_resnext(self): - self.check_with_place("dist_mnist.py", - delta=200, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "dist_mnist.py", delta=200, check_error_log=True, log_name=flag_name + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_dist_mnist_with_program.py b/python/paddle/fluid/tests/unittests/test_dist_mnist_with_program.py index 4475b52ee79e41e0cc371129c1b0cffba2363f18..83c9a340e32cb0bca1450a036eeb4e6b42684a71 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_mnist_with_program.py +++ b/python/paddle/fluid/tests/unittests/test_dist_mnist_with_program.py @@ -20,7 +20,6 @@ paddle.enable_static() class TestDistMnistLocalSGDFleetApi(TestDistBase): - def _setup_config(self): self._sync_mode = True self._use_reduce = False @@ -31,12 +30,12 @@ class TestDistMnistLocalSGDFleetApi(TestDistBase): def test_dist_train(self): import paddle.fluid as fluid + if fluid.core.is_compiled_with_cuda(): self.check_with_place("dist_mnist.py", delta=1e-5) class TestDistMnistGradAllReduceFleetApi(TestDistBase): - def _setup_config(self): self._sync_mode = True self._use_reduce = False @@ -47,6 +46,7 @@ class TestDistMnistGradAllReduceFleetApi(TestDistBase): def test_dist_train(self): import paddle.fluid as fluid + if fluid.core.is_compiled_with_cuda(): self.check_with_place("dist_mnist.py", delta=1e-5) diff --git a/python/paddle/fluid/tests/unittests/test_dist_oneps.py b/python/paddle/fluid/tests/unittests/test_dist_oneps.py index 7704a4c715efcf638e5d1fbf65fcacc3af482461..c6b25263936ac6f70972a530003cccc5383d01a8 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_oneps.py +++ b/python/paddle/fluid/tests/unittests/test_dist_oneps.py @@ -22,7 +22,6 @@ from paddle.distributed.fleet.runtime.the_one_ps import Table class TestTable(unittest.TestCase): - def test_table_tensor(self): table = Table() table.id = 1001 diff --git a/python/paddle/fluid/tests/unittests/test_dist_op.py b/python/paddle/fluid/tests/unittests/test_dist_op.py index 37835d54176f3c9a6117d4186316edb0a01b8bc3..e627567b1afd9adf05428a8e3382582572823153 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_op.py +++ b/python/paddle/fluid/tests/unittests/test_dist_op.py @@ -23,7 +23,7 @@ paddle.enable_static() def dist(x, y, p): - if p == 0.: + if p == 0.0: out = np.count_nonzero(x - y) elif p == float("inf"): out = np.max(np.abs(x - y)) @@ -35,7 +35,6 @@ def dist(x, y, p): class TestDistOp(OpTest): - def setUp(self): self.op_type = 'dist' self.python_api = paddle.dist @@ -44,7 +43,7 @@ class TestDistOp(OpTest): self.init_data_type() self.inputs = { "X": np.random.random(self.x_shape).astype(self.data_type), - "Y": np.random.random(self.y_shape).astype(self.data_type) + "Y": np.random.random(self.y_shape).astype(self.data_type), } self.attrs["p"] = self.p @@ -54,13 +53,14 @@ class TestDistOp(OpTest): self.gradient = self.calc_gradient() def init_case(self): - self.x_shape = (120) - self.y_shape = (120) - self.p = 0. + self.x_shape = 120 + self.y_shape = 120 + self.p = 0.0 def init_data_type(self): - self.data_type = np.float32 if core.is_compiled_with_rocm( - ) else np.float64 + self.data_type = ( + np.float32 if core.is_compiled_with_rocm() else np.float64 + ) def calc_gradient(self): x = self.inputs["X"] @@ -75,8 +75,11 @@ class TestDistOp(OpTest): grad[x_minux_y_abs != norm] = 0 else: norm = dist(x, y, p) - grad = np.power(norm, 1 - p) * np.power(np.abs(x - y), - p - 1) * np.sign(x - y) + grad = ( + np.power(norm, 1 - p) + * np.power(np.abs(x - y), p - 1) + * np.sign(x - y) + ) def get_reduce_dims(x, y): x_reduce_dims = [] @@ -111,30 +114,29 @@ class TestDistOp(OpTest): self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(["X", "Y"], - "Out", - user_defined_grads=self.gradient, - check_eager=True) + self.check_grad( + ["X", "Y"], + "Out", + user_defined_grads=self.gradient, + check_eager=True, + ) class TestDistOpCase1(TestDistOp): - def init_case(self): self.x_shape = (3, 5, 5, 6) self.y_shape = (5, 5, 6) - self.p = 1. + self.p = 1.0 class TestDistOpCase2(TestDistOp): - def init_case(self): self.x_shape = (10, 10) self.y_shape = (4, 10, 10) - self.p = 2. + self.p = 2.0 class TestDistOpCase3(TestDistOp): - def init_case(self): self.x_shape = (15, 10) self.y_shape = (15, 10) @@ -142,7 +144,6 @@ class TestDistOpCase3(TestDistOp): class TestDistOpCase4(TestDistOp): - def init_case(self): self.x_shape = (2, 3, 4, 5, 8) self.y_shape = (3, 1, 5, 8) @@ -150,7 +151,6 @@ class TestDistOpCase4(TestDistOp): class TestDistOpCase5(TestDistOp): - def init_case(self): self.x_shape = (4, 1, 4, 8) self.y_shape = (2, 2, 1, 4, 4, 8) @@ -158,10 +158,10 @@ class TestDistOpCase5(TestDistOp): class TestDistAPI(unittest.TestCase): - def init_data_type(self): - self.data_type = 'float32' if core.is_compiled_with_rocm( - ) else 'float64' + self.data_type = ( + 'float32' if core.is_compiled_with_rocm() else 'float64' + ) def test_api(self): self.init_data_type() @@ -174,15 +174,17 @@ class TestDistAPI(unittest.TestCase): x_i = np.random.random((2, 3, 4, 5)).astype(self.data_type) y_i = np.random.random((3, 1, 5)).astype(self.data_type) result = paddle.dist(x, y, p) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) - out = exe.run(fluid.default_main_program(), - feed={ - 'x': x_i, - 'y': y_i - }, - fetch_list=[result]) + out = exe.run( + fluid.default_main_program(), + feed={'x': x_i, 'y': y_i}, + fetch_list=[result], + ) np.testing.assert_allclose(dist(x_i, y_i, p), out[0], rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_dist_save_load.py b/python/paddle/fluid/tests/unittests/test_dist_save_load.py index bee267b941a247ed07105e214f6454445162a1fc..d4de87866227fc63b911bec13320fc09fc26754a 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_dist_save_load.py @@ -27,29 +27,31 @@ flag_name = os.path.splitext(__file__)[0] class TestDistSaveLoadDense2x2(TestDistBase): - def _setup_config(self): self._sync_mode = True self._enforce_place = "CPU" - def check_with_place(self, - model_file, - delta=1e-3, - check_error_log=False, - need_envs={}, - log_name=""): + def check_with_place( + self, + model_file, + delta=1e-3, + check_error_log=False, + need_envs={}, + log_name="", + ): required_envs = { "PATH": os.getenv("PATH", ""), "PYTHONPATH": os.getenv("PYTHONPATH", ""), "LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""), - "http_proxy": "" + "http_proxy": "", } required_envs.update(need_envs) if check_error_log: - required_envs["GLOG_vmodule"] = \ - "fused_all_reduce_op_handle=10,all_reduce_op_handle=10,alloc_continuous_space_op=10,fuse_all_reduce_op_pass=10,alloc_continuous_space_for_grad_pass=10,fast_threaded_ssa_graph_executor=10" + required_envs[ + "GLOG_vmodule" + ] = "fused_all_reduce_op_handle=10,all_reduce_op_handle=10,alloc_continuous_space_op=10,fuse_all_reduce_op_pass=10,alloc_continuous_space_for_grad_pass=10,fast_threaded_ssa_graph_executor=10" required_envs["GLOG_logtostderr"] = "1" model_dir = tempfile.mkdtemp() @@ -65,10 +67,9 @@ class TestDistSaveLoadDense2x2(TestDistBase): cluster_env.update(required_envs) local_var = self._run_local(model_file, local_env, check_error_log) - tr0_var, tr1_var = self._run_cluster(model_file, - cluster_env, - check_error_log, - log_name=flag_name) + tr0_var, tr1_var = self._run_cluster( + model_file, cluster_env, check_error_log, log_name=flag_name + ) shutil.rmtree(model_dir) @@ -87,36 +88,40 @@ class TestDistSaveLoadDense2x2(TestDistBase): 'IS_SELF_CONTAINED_LR': '1', 'SAVE_MODE': 'LOCAL', } - self.check_with_place("dist_save_load.py", - delta=0, - check_error_log=False, - need_envs=need_envs) + self.check_with_place( + "dist_save_load.py", + delta=0, + check_error_log=False, + need_envs=need_envs, + ) class TestDistSaveLoadWithPServerStateDense2x2(TestDistBase): - def _setup_config(self): self._sync_mode = True self._enforce_place = "CPU" - def check_with_place(self, - model_file, - delta=1e-3, - check_error_log=False, - need_envs={}, - log_name=""): + def check_with_place( + self, + model_file, + delta=1e-3, + check_error_log=False, + need_envs={}, + log_name="", + ): required_envs = { "PATH": os.getenv("PATH", ""), "PYTHONPATH": os.getenv("PYTHONPATH", ""), "LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""), - "http_proxy": "" + "http_proxy": "", } required_envs.update(need_envs) if check_error_log: - required_envs["GLOG_vmodule"] = \ - "fused_all_reduce_op_handle=10,all_reduce_op_handle=10,alloc_continuous_space_op=10,fuse_all_reduce_op_pass=10,alloc_continuous_space_for_grad_pass=10,fast_threaded_ssa_graph_executor=10" + required_envs[ + "GLOG_vmodule" + ] = "fused_all_reduce_op_handle=10,all_reduce_op_handle=10,alloc_continuous_space_op=10,fuse_all_reduce_op_pass=10,alloc_continuous_space_for_grad_pass=10,fast_threaded_ssa_graph_executor=10" required_envs["GLOG_logtostderr"] = "1" model_dir = tempfile.mkdtemp() @@ -127,19 +132,17 @@ class TestDistSaveLoadWithPServerStateDense2x2(TestDistBase): save_env["MODEL_DIR"] = model_dir save_env.update(required_envs) - tr0_var_1, tr1_var_1 = self._run_cluster(model_file, - save_env, - check_error_log, - log_name=flag_name) + tr0_var_1, tr1_var_1 = self._run_cluster( + model_file, save_env, check_error_log, log_name=flag_name + ) load_env = {} load_env["LOAD"] = "1" load_env["MODEL_DIR"] = model_dir load_env.update(required_envs) - tr0_var_2, tr1_var_2 = self._run_cluster(model_file, - load_env, - check_error_log, - log_name=flag_name) + tr0_var_2, tr1_var_2 = self._run_cluster( + model_file, load_env, check_error_log, log_name=flag_name + ) shutil.rmtree(model_dir) @@ -158,13 +161,15 @@ class TestDistSaveLoadWithPServerStateDense2x2(TestDistBase): 'IS_SELF_CONTAINED_LR': '1', 'SAVE_MODE': 'DIST', 'OPTIMIZER': 'ADAM', - 'SKIP_STEPS': str(np.random.randint(2, 6)) + 'SKIP_STEPS': str(np.random.randint(2, 6)), } - self.check_with_place("dist_save_load.py", - delta=0, - check_error_log=True, - need_envs=need_envs, - log_name=flag_name) + self.check_with_place( + "dist_save_load.py", + delta=0, + check_error_log=True, + need_envs=need_envs, + log_name=flag_name, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_dist_se_resnext_nccl.py b/python/paddle/fluid/tests/unittests/test_dist_se_resnext_nccl.py index aeacf2c00a6d73b81d615e8dcd48809ae95f85dc..35ffbbb80ca04367210c52d3d867e9f39b2e8e31 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_se_resnext_nccl.py +++ b/python/paddle/fluid/tests/unittests/test_dist_se_resnext_nccl.py @@ -24,7 +24,6 @@ flag_name = os.path.splitext(__file__)[0] class TestDistSeResneXtNCCL(TestDistBase): - def _setup_config(self): self._sync_mode = True self._use_reader_alloc = False @@ -32,15 +31,17 @@ class TestDistSeResneXtNCCL(TestDistBase): def test_dist_train(self): import paddle.fluid as fluid + if fluid.core.is_compiled_with_cuda(): - self.check_with_place("dist_se_resnext.py", - delta=1e-5, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "dist_se_resnext.py", + delta=1e-5, + check_error_log=True, + log_name=flag_name, + ) class TestDistSeResneXtNCCLMP(TestDistBase): - def _setup_config(self): self._sync_mode = True self._use_reader_alloc = False @@ -49,12 +50,15 @@ class TestDistSeResneXtNCCLMP(TestDistBase): def test_dist_train(self): import paddle.fluid as fluid + if fluid.core.is_compiled_with_cuda(): - self.check_with_place("dist_se_resnext.py", - delta=1e-5, - check_error_log=True, - need_envs={"NCCL_P2P_DISABLE": "1"}, - log_name=flag_name) + self.check_with_place( + "dist_se_resnext.py", + delta=1e-5, + check_error_log=True, + need_envs={"NCCL_P2P_DISABLE": "1"}, + log_name=flag_name, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_dist_se_resnext_sync.py b/python/paddle/fluid/tests/unittests/test_dist_se_resnext_sync.py index 2d288df2ae4a3ed6960c7f962137a5475e071eb1..cc5905506e6d0714ff5de3d6bbb50ef59f4b0cc9 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_se_resnext_sync.py +++ b/python/paddle/fluid/tests/unittests/test_dist_se_resnext_sync.py @@ -22,17 +22,18 @@ flag_name = os.path.splitext(__file__)[0] class TestDistSeResneXt2x2(TestDistBase): - def _setup_config(self): self._sync_mode = True self._use_reader_alloc = False @unittest.skip(reason="Skip unstable ci") def test_dist_train(self): - self.check_with_place("dist_se_resnext.py", - delta=1e-7, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "dist_se_resnext.py", + delta=1e-7, + check_error_log=True, + log_name=flag_name, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_dist_sharding_save.py b/python/paddle/fluid/tests/unittests/test_dist_sharding_save.py index 43c849d8a8c38728dbc9b7ae6ac24211b21f61d2..29442a9fb6838932ba1702b20015b3094478cdd6 100755 --- a/python/paddle/fluid/tests/unittests/test_dist_sharding_save.py +++ b/python/paddle/fluid/tests/unittests/test_dist_sharding_save.py @@ -22,7 +22,6 @@ paddle.enable_static() class TestDistMnistFleetSave(TestDistBase): - def _setup_config(self): self._sync_mode = True self._use_reduce = False @@ -40,11 +39,19 @@ class TestDistMnistFleetSave(TestDistBase): sharding_save_files = sorted(os.listdir(dirname)) check_files = [ - 'fc_0.b_0', 'fc_0.b_0_velocity_0', 'fc_0.w_0', - 'fc_0.w_0_velocity_0', 'fc_1.b_0', 'fc_1.b_0_velocity_0', - 'fc_1.w_0', 'fc_1.w_0_velocity_0', 'fc_2.b_0', - 'fc_2.b_0_velocity_0', 'fc_2.w_0', 'fc_2.w_0_velocity_0', - 'learning_rate_0' + 'fc_0.b_0', + 'fc_0.b_0_velocity_0', + 'fc_0.w_0', + 'fc_0.w_0_velocity_0', + 'fc_1.b_0', + 'fc_1.b_0_velocity_0', + 'fc_1.w_0', + 'fc_1.w_0_velocity_0', + 'fc_2.b_0', + 'fc_2.b_0_velocity_0', + 'fc_2.w_0', + 'fc_2.w_0_velocity_0', + 'learning_rate_0', ] if sharding_save_files != check_files: @@ -54,12 +61,14 @@ class TestDistMnistFleetSave(TestDistBase): return True - def check_with_place(self, - model_file, - delta=1e-3, - check_error_log=True, - need_envs={}, - log_name=""): + def check_with_place( + self, + model_file, + delta=1e-3, + check_error_log=True, + need_envs={}, + log_name="", + ): required_envs = self._get_required_envs(check_error_log, need_envs) tr0_losses, tr1_losses = self._run_cluster_nccl2( @@ -67,13 +76,15 @@ class TestDistMnistFleetSave(TestDistBase): required_envs, update_method='nccl2', check_error_log=check_error_log, - log_name=log_name) + log_name=log_name, + ) dirname = './ut_sharding_save_model' self._test_saved_files(dirname) def test_dist_train(self): import paddle.fluid as fluid + if fluid.core.is_compiled_with_cuda(): self.check_with_place("dist_sharding_save.py", delta=1e-5) diff --git a/python/paddle/fluid/tests/unittests/test_dist_sparse_load_ps0.py b/python/paddle/fluid/tests/unittests/test_dist_sparse_load_ps0.py index b4aa24bc9e3f22ab013a5082c17108ce2ff0b43a..bff5754df1fe880d73dc9d91a22b4d0ea0888d63 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_sparse_load_ps0.py +++ b/python/paddle/fluid/tests/unittests/test_dist_sparse_load_ps0.py @@ -24,8 +24,7 @@ from paddle.distributed.fleet import fleet class SparseLoadOp(unittest.TestCase): - """ Test load operator. - """ + """Test load operator.""" def net(self, emb_array, fc_array): with fluid.unique_name.guard(): @@ -38,7 +37,9 @@ class SparseLoadOp(unittest.TestCase): param_attr=fluid.ParamAttr( name="embedding", initializer=fluid.initializer.NumpyArrayInitializer( - emb_array)), + emb_array + ), + ), ) fc1 = fluid.layers.fc( @@ -48,7 +49,10 @@ class SparseLoadOp(unittest.TestCase): param_attr=fluid.ParamAttr( name='fc', initializer=fluid.initializer.NumpyArrayInitializer( - fc_array))) + fc_array + ), + ), + ) loss = fluid.layers.reduce_mean(fc1) return loss @@ -70,7 +74,6 @@ class SparseLoadOp(unittest.TestCase): @unittest.skip(reason="Skip unstable ut, need rewrite with new implement") class TestSparseLoadOpCase1(SparseLoadOp): - def test_2ps_0_load(self): # init No.0 server env env = {} @@ -110,7 +113,8 @@ class TestSparseLoadOpCase1(SparseLoadOp): fc_w = np.array(fluid.global_scope().find_var("fc").get_tensor()) emb = np.array( - fluid.global_scope().find_var("embedding.block0").get_tensor()) + fluid.global_scope().find_var("embedding.block0").get_tensor() + ) assert fc_w.all() == fc_array.all() assert emb.all() == emb_array[::2].all() diff --git a/python/paddle/fluid/tests/unittests/test_dist_sparse_load_ps1.py b/python/paddle/fluid/tests/unittests/test_dist_sparse_load_ps1.py index bbbddd5d7c1ba0b1c354332b3d7f1f47cfda9711..1152ff3e82b2073935b851a1fdc9a8cfaad6662c 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_sparse_load_ps1.py +++ b/python/paddle/fluid/tests/unittests/test_dist_sparse_load_ps1.py @@ -25,7 +25,6 @@ from test_dist_sparse_load_ps0 import SparseLoadOp @unittest.skip(reason="Skip unstable ut, need rewrite with new implement") class TestSparseLoadOpCase2(SparseLoadOp): - def test_2ps_0_load(self): # init No.1 server env env = {} @@ -64,7 +63,8 @@ class TestSparseLoadOpCase2(SparseLoadOp): optimizer.minimize(loss) fleet.init_server(model_path) emb = np.array( - fluid.global_scope().find_var("embedding.block1").get_tensor()) + fluid.global_scope().find_var("embedding.block1").get_tensor() + ) assert emb.all() == emb_array[1::2].all() shutil.rmtree(model_path) diff --git a/python/paddle/fluid/tests/unittests/test_dist_sparse_tensor_load_adagrad.py b/python/paddle/fluid/tests/unittests/test_dist_sparse_tensor_load_adagrad.py index 32e5a0ac1f3e0e3f06f69743ebdf147eb68a49ba..5fca7c3afa7b700788b9b8c95762090679351d92 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_sparse_tensor_load_adagrad.py +++ b/python/paddle/fluid/tests/unittests/test_dist_sparse_tensor_load_adagrad.py @@ -29,8 +29,9 @@ class TestSparseLoadProgramAdagrad(TestSparseLoadProgram): with fluid.scope_guard(scope): with fluid.program_guard(train_program, startup_program): optimizer = fluid.optimizer.Adam(1e-3) - optimizer = fleet.distributed_optimizer(optimizer, - self.strategy) + optimizer = fleet.distributed_optimizer( + optimizer, self.strategy + ) optimizer.minimize(loss) fleet.init_server() diff --git a/python/paddle/fluid/tests/unittests/test_dist_sparse_tensor_load_adam.py b/python/paddle/fluid/tests/unittests/test_dist_sparse_tensor_load_adam.py index 292e6e64fb5c3a1c435cc2a3daf4a28786781375..8305ade7b3e981b11841eda0fcee4e64ded88e5f 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_sparse_tensor_load_adam.py +++ b/python/paddle/fluid/tests/unittests/test_dist_sparse_tensor_load_adam.py @@ -29,8 +29,9 @@ class TestSparseLoadProgramAdam(TestSparseLoadProgram): with fluid.scope_guard(scope): with fluid.program_guard(train_program, startup_program): optimizer = fluid.optimizer.Adam(1e-3) - optimizer = fleet.distributed_optimizer(optimizer, - self.strategy) + optimizer = fleet.distributed_optimizer( + optimizer, self.strategy + ) optimizer.minimize(loss) fleet.init_server() diff --git a/python/paddle/fluid/tests/unittests/test_dist_sparse_tensor_load_ftrl.py b/python/paddle/fluid/tests/unittests/test_dist_sparse_tensor_load_ftrl.py index 5bee7b73ef58acafa8076e106117c6fb8ff5de0e..8193e20037a38730619f99848bc43ab7ae33adb4 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_sparse_tensor_load_ftrl.py +++ b/python/paddle/fluid/tests/unittests/test_dist_sparse_tensor_load_ftrl.py @@ -29,8 +29,9 @@ class TestSparseLoadProgramFtrl(TestSparseLoadProgram): with fluid.scope_guard(scope): with fluid.program_guard(train_program, startup_program): optimizer = fluid.optimizer.SGD(1e-3) - optimizer = fleet.distributed_optimizer(optimizer, - self.strategy) + optimizer = fleet.distributed_optimizer( + optimizer, self.strategy + ) optimizer.minimize(loss) fleet.init_server() diff --git a/python/paddle/fluid/tests/unittests/test_dist_sparse_tensor_load_momentum.py b/python/paddle/fluid/tests/unittests/test_dist_sparse_tensor_load_momentum.py index e3b01ee0b50028afc7be652678e3ac5d9cd5be10..83fc785bddd9f887e6a351b70bece31809593c42 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_sparse_tensor_load_momentum.py +++ b/python/paddle/fluid/tests/unittests/test_dist_sparse_tensor_load_momentum.py @@ -29,8 +29,9 @@ class TestSparseLoadProgramMomentum(TestSparseLoadProgram): with fluid.scope_guard(scope): with fluid.program_guard(train_program, startup_program): optimizer = fluid.optimizer.SGD(1e-3) - optimizer = fleet.distributed_optimizer(optimizer, - self.strategy) + optimizer = fleet.distributed_optimizer( + optimizer, self.strategy + ) optimizer.minimize(loss) fleet.init_server() diff --git a/python/paddle/fluid/tests/unittests/test_dist_sparse_tensor_load_rmsprop.py b/python/paddle/fluid/tests/unittests/test_dist_sparse_tensor_load_rmsprop.py index b3d9cde8b0a9d68c7f7c57eb481218799f646d17..049a9043cf1a231d68d23e99afa81ff2953a8b5d 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_sparse_tensor_load_rmsprop.py +++ b/python/paddle/fluid/tests/unittests/test_dist_sparse_tensor_load_rmsprop.py @@ -29,8 +29,9 @@ class TestSparseLoadProgramRmsprop(TestSparseLoadProgram): with fluid.scope_guard(scope): with fluid.program_guard(train_program, startup_program): optimizer = fluid.optimizer.SGD(1e-3) - optimizer = fleet.distributed_optimizer(optimizer, - self.strategy) + optimizer = fleet.distributed_optimizer( + optimizer, self.strategy + ) optimizer.minimize(loss) fleet.init_server() diff --git a/python/paddle/fluid/tests/unittests/test_dist_sparse_tensor_load_sgd.py b/python/paddle/fluid/tests/unittests/test_dist_sparse_tensor_load_sgd.py index b3b20df47daec4c41b2d3b537c0d2f529fcf10a9..42737d3d69b4f1f1c3130b04ff47c31434042c45 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_sparse_tensor_load_sgd.py +++ b/python/paddle/fluid/tests/unittests/test_dist_sparse_tensor_load_sgd.py @@ -27,7 +27,8 @@ class TestSparseLoadProgram(unittest.TestCase): def setUp(self): os.environ[ - "PADDLE_PSERVERS_IP_PORT_LIST"] = "127.0.0.1:4001,127.0.0.1:4002" + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:4001,127.0.0.1:4002" os.environ["PADDLE_TRAINERS_NUM"] = str(2) os.environ["TRAINING_ROLE"] = "PSERVER" os.environ["PADDLE_PORT"] = "4001" @@ -45,9 +46,9 @@ class TestSparseLoadProgram(unittest.TestCase): with fluid.program_guard(train_program, startup_program): with fluid.unique_name.guard(): inputs = fluid.data('input', shape=[None, 1], dtype="int64") - emb = fluid.layers.embedding(inputs, - is_sparse=True, - size=[10000, 128]) + emb = fluid.layers.embedding( + inputs, is_sparse=True, size=[10000, 128] + ) fc1 = fluid.layers.fc(input=emb, size=128, act="relu") fc2 = fluid.layers.fc(input=fc1, size=64, act="relu") loss = fluid.layers.reduce_mean(fc2) @@ -55,14 +56,14 @@ class TestSparseLoadProgram(unittest.TestCase): class TestSparseLoadProgramSGD(TestSparseLoadProgram): - def test_server_init(self): scope, train_program, startup_program, loss = self.net() with fluid.scope_guard(scope): with fluid.program_guard(train_program, startup_program): optimizer = fluid.optimizer.SGD(1e-3) - optimizer = fleet.distributed_optimizer(optimizer, - self.strategy) + optimizer = fleet.distributed_optimizer( + optimizer, self.strategy + ) optimizer.minimize(loss) fleet.init_server() diff --git a/python/paddle/fluid/tests/unittests/test_dist_text_classification.py b/python/paddle/fluid/tests/unittests/test_dist_text_classification.py index 7b16aa3f20040bf0321289efe1a99473009d3f27..36215f4e26c487c824dd64df981c5c7f175d64e7 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_text_classification.py +++ b/python/paddle/fluid/tests/unittests/test_dist_text_classification.py @@ -22,29 +22,31 @@ flag_name = os.path.splitext(__file__)[0] class TestDistTextClassification2x2(TestDistBase): - def _setup_config(self): self._sync_mode = True self._enforce_place = "CPU" def test_text_classification(self): - self.check_with_place("dist_text_classification.py", - delta=1e-6, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "dist_text_classification.py", + delta=1e-6, + check_error_log=True, + log_name=flag_name, + ) class TestDistTextClassification2x2Async(TestDistBase): - def _setup_config(self): self._sync_mode = False self._enforce_place = "CPU" def test_se_resnext(self): - self.check_with_place("dist_text_classification.py", - delta=100, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "dist_text_classification.py", + delta=100, + check_error_log=True, + log_name=flag_name, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_dist_train.py b/python/paddle/fluid/tests/unittests/test_dist_train.py index 7299854241907972a6cc97dad725554facb68796..65753803282fa977bbc410629070ef55d29d7026 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_train.py +++ b/python/paddle/fluid/tests/unittests/test_dist_train.py @@ -30,19 +30,19 @@ from dist_test_utils import remove_ps_flag from paddle.fluid import core -RPC_OP_ROLE_ATTR_NAME = op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName( -) +RPC_OP_ROLE_ATTR_NAME = ( + op_role_attr_name +) = core.op_proto_and_checker_maker.kOpRoleAttrName() RPC_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.RPC class TestSendOp(unittest.TestCase): - def test_send(self): remove_ps_flag(os.getpid()) # Run init_serv in a thread place = fluid.CPUPlace() # NOTE: python thread will not work here due to GIL. - p = Process(target=self.init_serv, args=(place, )) + p = Process(target=self.init_serv, args=(place,)) p.daemon = True p.start() @@ -79,14 +79,18 @@ class TestSendOp(unittest.TestCase): with fluid.program_guard(main): serv = ListenAndServ("127.0.0.1:0", ["X"], optimizer_mode=False) with serv.do(): - out_var = main.global_block().create_var(name="scale_0.tmp_0", - psersistable=True, - dtype="float32", - shape=[32, 32]) - x = layers.data(shape=[32, 32], - dtype='float32', - name="X", - append_batch_size=False) + out_var = main.global_block().create_var( + name="scale_0.tmp_0", + psersistable=True, + dtype="float32", + shape=[32, 32], + ) + x = layers.data( + shape=[32, 32], + dtype='float32', + name="X", + append_batch_size=False, + ) fluid.initializer.Constant(value=1.0)(x, main.global_block()) ops._scale(x=x, scale=10.0, out=out_var) @@ -96,20 +100,22 @@ class TestSendOp(unittest.TestCase): def init_client(self, place, port): main = fluid.Program() with fluid.program_guard(main): - main.global_block().append_op(type="fetch_barrier", - inputs={}, - outputs={"Out": []}, - attrs={ - "endpoints": - ["127.0.0.1:{0}".format(port)], - RPC_OP_ROLE_ATTR_NAME: - RPC_OP_ROLE_ATTR_VALUE - }) - - x = layers.data(shape=[32, 32], - dtype='float32', - name='X', - append_batch_size=False) + main.global_block().append_op( + type="fetch_barrier", + inputs={}, + outputs={"Out": []}, + attrs={ + "endpoints": ["127.0.0.1:{0}".format(port)], + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE, + }, + ) + + x = layers.data( + shape=[32, 32], + dtype='float32', + name='X', + append_batch_size=False, + ) x.persistable = True fluid.initializer.Constant(value=2.3)(x, main.global_block()) @@ -117,7 +123,8 @@ class TestSendOp(unittest.TestCase): name="scale_0.tmp_0", # server side var dtype="float32", persistable=False, - shape=[32, 32]) + shape=[32, 32], + ) fluid.initializer.Constant(value=2.3)(get_var, main.global_block()) # NOTE(zjl): `Send` is async send, which means that the sent @@ -136,10 +143,12 @@ class TestSendOp(unittest.TestCase): def run_local(self, place): main = fluid.Program() with fluid.program_guard(main): - x = layers.data(shape=[32, 32], - dtype='float32', - name='X', - append_batch_size=False) + x = layers.data( + shape=[32, 32], + dtype='float32', + name='X', + append_batch_size=False, + ) fluid.initializer.Constant(value=2.3)(x, main.global_block()) o = layers.scale(x=x, scale=10.0) exe = fluid.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/test_dist_transformer.py b/python/paddle/fluid/tests/unittests/test_dist_transformer.py index a4d5a5a2cbfae0338f4a3e375a98867d408002c6..470468f7fe3a15c3024bf8640de3f4194cc3c12a 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_transformer.py +++ b/python/paddle/fluid/tests/unittests/test_dist_transformer.py @@ -22,59 +22,65 @@ def download_files(): url_prefix = 'http://paddle-unittest-data.bj.bcebos.com/dist_transformer/' vocab_url = url_prefix + 'vocab.bpe.32000' vocab_md5 = 'a86d345ca6e27f6591d0dccb1b9be853' - paddle.dataset.common.download(vocab_url, 'test_dist_transformer', - vocab_md5) + paddle.dataset.common.download( + vocab_url, 'test_dist_transformer', vocab_md5 + ) local_train_url = url_prefix + 'train.tok.clean.bpe.32000.en-de' local_train_md5 = '033eb02b9449e6dd823f050782ac8914' - paddle.dataset.common.download(local_train_url, 'test_dist_transformer', - local_train_md5) + paddle.dataset.common.download( + local_train_url, 'test_dist_transformer', local_train_md5 + ) train0_url = url_prefix + 'train.tok.clean.bpe.32000.en-de.train_0' train0_md5 = 'ddce7f602f352a0405267285379a38b1' - paddle.dataset.common.download(train0_url, 'test_dist_transformer', - train0_md5) + paddle.dataset.common.download( + train0_url, 'test_dist_transformer', train0_md5 + ) train1_url = url_prefix + 'train.tok.clean.bpe.32000.en-de.train_1' train1_md5 = '8757798200180285b1a619cd7f408747' - paddle.dataset.common.download(train1_url, 'test_dist_transformer', - train1_md5) + paddle.dataset.common.download( + train1_url, 'test_dist_transformer', train1_md5 + ) test_url = url_prefix + 'newstest2013.tok.bpe.32000.en-de' test_md5 = '9dd74a266dbdb25314183899f269b4a2' paddle.dataset.common.download(test_url, 'test_dist_transformer', test_md5) # cut test data for faster CI - orig_path = os.path.join(paddle.dataset.common.DATA_HOME, - "test_dist_transformer", - "newstest2013.tok.bpe.32000.en-de") - head_path = os.path.join(paddle.dataset.common.DATA_HOME, - "test_dist_transformer", - "newstest2013.tok.bpe.32000.en-de.cut") + orig_path = os.path.join( + paddle.dataset.common.DATA_HOME, + "test_dist_transformer", + "newstest2013.tok.bpe.32000.en-de", + ) + head_path = os.path.join( + paddle.dataset.common.DATA_HOME, + "test_dist_transformer", + "newstest2013.tok.bpe.32000.en-de.cut", + ) os.system("head -n10 %s > %s" % (orig_path, head_path)) class TestDistTransformer2x2Sync(TestDistBase): - def _setup_config(self): self._sync_mode = True def test_dist_train(self): download_files() - self.check_with_place("dist_transformer.py", - delta=1e-5, - check_error_log=False) + self.check_with_place( + "dist_transformer.py", delta=1e-5, check_error_log=False + ) class TestDistTransformer2x2Async(TestDistBase): - def _setup_config(self): self._sync_mode = False def test_dist_train(self): download_files() - self.check_with_place("dist_transformer.py", - delta=1.0, - check_error_log=False) + self.check_with_place( + "dist_transformer.py", delta=1.0, check_error_log=False + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_dist_transpiler.py b/python/paddle/fluid/tests/unittests/test_dist_transpiler.py index 8fc636af2de054fe36a549861bc66d4c00878c40..b5419eca108e9f7ec3db38f4b6d6e2b2b418cd9e 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_transpiler.py +++ b/python/paddle/fluid/tests/unittests/test_dist_transpiler.py @@ -27,7 +27,6 @@ import paddle.fluid as fluid class TranspilerTest(unittest.TestCase): - def setUp(self): self.trainer_id = 0 self.trainers = 2 @@ -41,11 +40,13 @@ class TranspilerTest(unittest.TestCase): def net_conf(self): x = fluid.layers.data(name='x', shape=[1000], dtype='float32') - y_predict = fluid.layers.fc(input=x, - size=1000, - act=None, - param_attr=fluid.ParamAttr(name='fc_w'), - bias_attr=fluid.ParamAttr(name='fc_b')) + y_predict = fluid.layers.fc( + input=x, + size=1000, + act=None, + param_attr=fluid.ParamAttr(name='fc_w'), + bias_attr=fluid.ParamAttr(name='fc_b'), + ) y = fluid.layers.data(name='y', shape=[1], dtype='float32') cost = fluid.layers.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) @@ -68,8 +69,8 @@ class TranspilerTest(unittest.TestCase): trainer_main = t.get_trainer_program(wait_port=False) trainer_startup = fluid.default_startup_program() - assert (src.num_blocks == 1) - assert (trainer_startup.num_blocks == src.num_blocks) + assert src.num_blocks == 1 + assert trainer_startup.num_blocks == src.num_blocks return trainer_main, trainer_startup @@ -83,11 +84,13 @@ class TranspilerTest(unittest.TestCase): if not self.transpiler: main = self.get_main_program() self.transpiler = fluid.DistributeTranspiler(config=config) - self.transpiler.transpile(self.trainer_id, - program=main, - pservers=self.pserver_eps, - trainers=self.trainers, - sync_mode=sync_mode) + self.transpiler.transpile( + self.trainer_id, + program=main, + pservers=self.pserver_eps, + trainers=self.trainers, + sync_mode=sync_mode, + ) return self.transpiler @@ -109,7 +112,6 @@ class TranspilerTest(unittest.TestCase): class TestBasicModel(TranspilerTest): - def transpiler_test_impl(self): pserver, startup = self.get_pserver(self.pserver1_ep) pserver2, startup2 = self.get_pserver(self.pserver2_ep) @@ -125,28 +127,57 @@ class TestBasicModel(TranspilerTest): self.assertTrue("fc_b@GRAD" not in trainer_startup.global_block().vars) src = [op.type for op in trainer_startup.global_block().ops] - dst = ['fill_constant', 'fill_constant', 'uniform_random', 'recv', 'recv', \ - 'fetch_barrier', 'concat'] + dst = [ + 'fill_constant', + 'fill_constant', + 'uniform_random', + 'recv', + 'recv', + 'fetch_barrier', + 'concat', + ] self.assertEqual(src, dst) - self.assertEqual([op.type for op in trainer.global_block().ops], [ - 'mul', 'elementwise_add', 'elementwise_sub', 'square', 'mean', - 'fill_constant', 'mean_grad', 'square_grad', 'elementwise_sub_grad', - 'elementwise_add_grad', 'send', 'mul_grad', 'split_byref', 'send', - 'send_barrier', 'recv', 'recv', 'fetch_barrier', 'concat' - ]) + self.assertEqual( + [op.type for op in trainer.global_block().ops], + [ + 'mul', + 'elementwise_add', + 'elementwise_sub', + 'square', + 'mean', + 'fill_constant', + 'mean_grad', + 'square_grad', + 'elementwise_sub_grad', + 'elementwise_add_grad', + 'send', + 'mul_grad', + 'split_byref', + 'send', + 'send_barrier', + 'recv', + 'recv', + 'fetch_barrier', + 'concat', + ], + ) self.assertEqual(len(pserver.blocks), 3) # block0: listen_and_serv - self.assertEqual([op.type for op in pserver.blocks[0].ops], - ["listen_and_serv"]) + self.assertEqual( + [op.type for op in pserver.blocks[0].ops], ["listen_and_serv"] + ) # block1~2: optimize pass - self.assertEqual([op.type for op in pserver.blocks[1].ops], - ["sum", "scale", "sgd"]) + self.assertEqual( + [op.type for op in pserver.blocks[1].ops], ["sum", "scale", "sgd"] + ) # confirm startup program - self.assertEqual([op.type for op in startup.global_block().ops], - ["fill_constant", "fill_constant", "uniform_random"]) + self.assertEqual( + [op.type for op in startup.global_block().ops], + ["fill_constant", "fill_constant", "uniform_random"], + ) # the variable #fc_w will be split into two blocks fc_w_var = startup.global_block().var("fc_w.block1") self.assertEqual(fc_w_var.shape, (500, 1000)) @@ -172,7 +203,6 @@ class TestBasicModel(TranspilerTest): class TestBasicModelWithLargeBlockSize(TranspilerTest): - def transpiler_test_impl(self): config = fluid.DistributeTranspilerConfig() config.min_block_size = 1048576 @@ -182,23 +212,43 @@ class TestBasicModelWithLargeBlockSize(TranspilerTest): trainer, _ = self.get_trainer(config) - self.assertEqual([op.type for op in trainer.global_block().ops], [ - 'mul', 'elementwise_add', 'elementwise_sub', 'square', 'mean', - 'fill_constant', 'mean_grad', 'square_grad', 'elementwise_sub_grad', - 'elementwise_add_grad', 'send', 'mul_grad', 'send', 'send_barrier', - 'recv', 'recv', 'fetch_barrier' - ]) + self.assertEqual( + [op.type for op in trainer.global_block().ops], + [ + 'mul', + 'elementwise_add', + 'elementwise_sub', + 'square', + 'mean', + 'fill_constant', + 'mean_grad', + 'square_grad', + 'elementwise_sub_grad', + 'elementwise_add_grad', + 'send', + 'mul_grad', + 'send', + 'send_barrier', + 'recv', + 'recv', + 'fetch_barrier', + ], + ) self.assertEqual(len(pserver.blocks), 2) # block0: listen_and_serv - self.assertEqual([op.type for op in pserver.blocks[0].ops], - ["listen_and_serv"]) + self.assertEqual( + [op.type for op in pserver.blocks[0].ops], ["listen_and_serv"] + ) # block1~2: optimize pass - self.assertEqual([op.type for op in pserver.blocks[1].ops], - ["sum", "scale", "sgd"]) + self.assertEqual( + [op.type for op in pserver.blocks[1].ops], ["sum", "scale", "sgd"] + ) # confirm startup program - self.assertEqual([op.type for op in startup.global_block().ops], - ["fill_constant", "fill_constant"]) + self.assertEqual( + [op.type for op in startup.global_block().ops], + ["fill_constant", "fill_constant"], + ) # the variable #fc_w will be split into two blocks fc_w_var = startup2.global_block().var("fc_w") self.assertEqual(fc_w_var.shape, (1000, 1000)) @@ -224,7 +274,6 @@ class TestBasicModelWithLargeBlockSize(TranspilerTest): class TestNoSliceVar(TranspilerTest): - def setUp(self): super(TestNoSliceVar, self).setUp() @@ -244,22 +293,26 @@ class TestNoSliceVar(TranspilerTest): class TestLRDecay(TranspilerTest): - def net_conf(self): x = fluid.layers.data(name='x', shape=[1000], dtype='float32') - y_predict = fluid.layers.fc(input=x, - size=1000, - act=None, - param_attr=fluid.ParamAttr(name='fc_w'), - bias_attr=fluid.ParamAttr(name='fc_b')) + y_predict = fluid.layers.fc( + input=x, + size=1000, + act=None, + param_attr=fluid.ParamAttr(name='fc_w'), + bias_attr=fluid.ParamAttr(name='fc_b'), + ) y = fluid.layers.data(name='y', shape=[1], dtype='float32') cost = fluid.layers.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) sgd_optimizer = fluid.optimizer.SGD( - learning_rate=fluid.layers.exponential_decay(learning_rate=1.0, - decay_steps=2100, - decay_rate=0.1, - staircase=True)) + learning_rate=fluid.layers.exponential_decay( + learning_rate=1.0, + decay_steps=2100, + decay_rate=0.1, + staircase=True, + ) + ) sgd_optimizer.minimize(avg_cost) def transpiler_test_impl(self): @@ -268,30 +321,35 @@ class TestLRDecay(TranspilerTest): self.assertEqual(len(pserver.blocks), 4) lr_decay_ops = [op.type for op in pserver.blocks[1].ops] - self.assertEqual(lr_decay_ops, [ - "increment", "cast", "fill_constant", "elementwise_div", "floor", - "fill_constant", "elementwise_pow", "fill_constant", - "elementwise_mul" - ]) + self.assertEqual( + lr_decay_ops, + [ + "increment", + "cast", + "fill_constant", + "elementwise_div", + "floor", + "fill_constant", + "elementwise_pow", + "fill_constant", + "elementwise_mul", + ], + ) class TestFakeInit(TranspilerTest): - def net_conf(self): dict_size, embedding_size, neg_num = 10000, 8, 5 - input_word = fluid.layers.data(name="input_word", - shape=[1], - dtype='int64', - lod_level=1) - true_word = fluid.layers.data(name='true_label', - shape=[1], - dtype='int64', - lod_level=1) - neg_word = fluid.layers.data(name="neg_label", - shape=[1], - dtype='int64', - lod_level=1) + input_word = fluid.layers.data( + name="input_word", shape=[1], dtype='int64', lod_level=1 + ) + true_word = fluid.layers.data( + name='true_label', shape=[1], dtype='int64', lod_level=1 + ) + neg_word = fluid.layers.data( + name="neg_label", shape=[1], dtype='int64', lod_level=1 + ) inputs = [input_word, true_word, neg_word] init_width = 0.5 / embedding_size @@ -299,82 +357,99 @@ class TestFakeInit(TranspilerTest): input=inputs[0], is_sparse=True, size=[dict_size, embedding_size], - param_attr=fluid.ParamAttr(name='emb', - initializer=fluid.initializer.Uniform( - -init_width, init_width))) + param_attr=fluid.ParamAttr( + name='emb', + initializer=fluid.initializer.Uniform(-init_width, init_width), + ), + ) true_emb_w = fluid.layers.embedding( input=inputs[1], is_sparse=True, size=[dict_size, embedding_size], param_attr=fluid.ParamAttr( - name='emb_w', - initializer=fluid.initializer.Constant(value=0.0))) + name='emb_w', initializer=fluid.initializer.Constant(value=0.0) + ), + ) true_emb_b = fluid.layers.embedding( input=inputs[1], is_sparse=True, size=[dict_size, 1], param_attr=fluid.ParamAttr( - name='emb_b', - initializer=fluid.initializer.Constant(value=0.0))) + name='emb_b', initializer=fluid.initializer.Constant(value=0.0) + ), + ) neg_word_reshape = fluid.layers.reshape(inputs[2], shape=[-1, 1]) neg_word_reshape.stop_gradient = True - neg_emb_w = fluid.layers.embedding(input=neg_word_reshape, - is_sparse=True, - size=[dict_size, embedding_size], - param_attr=fluid.ParamAttr( - name='emb_w', learning_rate=1.0)) + neg_emb_w = fluid.layers.embedding( + input=neg_word_reshape, + is_sparse=True, + size=[dict_size, embedding_size], + param_attr=fluid.ParamAttr(name='emb_w', learning_rate=1.0), + ) - neg_emb_w_re = fluid.layers.reshape(neg_emb_w, - shape=[-1, neg_num, embedding_size]) + neg_emb_w_re = fluid.layers.reshape( + neg_emb_w, shape=[-1, neg_num, embedding_size] + ) - neg_emb_b = fluid.layers.embedding(input=neg_word_reshape, - is_sparse=True, - size=[dict_size, 1], - param_attr=fluid.ParamAttr( - name='emb_b', learning_rate=1.0)) + neg_emb_b = fluid.layers.embedding( + input=neg_word_reshape, + is_sparse=True, + size=[dict_size, 1], + param_attr=fluid.ParamAttr(name='emb_b', learning_rate=1.0), + ) neg_emb_b_vec = fluid.layers.reshape(neg_emb_b, shape=[-1, neg_num]) true_logits = fluid.layers.elementwise_add( - fluid.layers.reduce_sum(fluid.layers.elementwise_mul( - input_emb, true_emb_w), - dim=1, - keep_dim=True), true_emb_b) - - input_emb_re = fluid.layers.reshape(input_emb, - shape=[-1, 1, embedding_size]) - - neg_matmul = fluid.layers.matmul(input_emb_re, - neg_emb_w_re, - transpose_y=True) + fluid.layers.reduce_sum( + fluid.layers.elementwise_mul(input_emb, true_emb_w), + dim=1, + keep_dim=True, + ), + true_emb_b, + ) + + input_emb_re = fluid.layers.reshape( + input_emb, shape=[-1, 1, embedding_size] + ) + + neg_matmul = fluid.layers.matmul( + input_emb_re, neg_emb_w_re, transpose_y=True + ) neg_matmul_re = fluid.layers.reshape(neg_matmul, shape=[-1, neg_num]) neg_logits = fluid.layers.elementwise_add(neg_matmul_re, neg_emb_b_vec) # nce loss - label_ones = fluid.layers.fill_constant_batch_size_like(true_logits, - shape=[-1, 1], - value=1.0, - dtype='float32') + label_ones = fluid.layers.fill_constant_batch_size_like( + true_logits, shape=[-1, 1], value=1.0, dtype='float32' + ) label_zeros = fluid.layers.fill_constant_batch_size_like( - true_logits, shape=[-1, neg_num], value=0.0, dtype='float32') + true_logits, shape=[-1, neg_num], value=0.0, dtype='float32' + ) true_xent = fluid.layers.sigmoid_cross_entropy_with_logits( - true_logits, label_ones) + true_logits, label_ones + ) neg_xent = fluid.layers.sigmoid_cross_entropy_with_logits( - neg_logits, label_zeros) + neg_logits, label_zeros + ) cost = fluid.layers.elementwise_add( fluid.layers.reduce_sum(true_xent, dim=1), - fluid.layers.reduce_sum(neg_xent, dim=1)) + fluid.layers.reduce_sum(neg_xent, dim=1), + ) avg_cost = fluid.layers.reduce_mean(cost) sgd_optimizer = fluid.optimizer.SGD( - learning_rate=fluid.layers.exponential_decay(learning_rate=1.0, - decay_steps=2100, - decay_rate=0.1, - staircase=True)) + learning_rate=fluid.layers.exponential_decay( + learning_rate=1.0, + decay_steps=2100, + decay_rate=0.1, + staircase=True, + ) + ) sgd_optimizer.minimize(avg_cost) def transpiler_test_impl(self): @@ -389,14 +464,15 @@ class TestFakeInit(TranspilerTest): class TestDecayedAdagrad(TranspilerTest): - def net_conf(self): x = fluid.layers.data(name='x', shape=[1000], dtype='float32') - y_predict = fluid.layers.fc(input=x, - size=1000, - act=None, - param_attr=fluid.ParamAttr(name='fc_w'), - bias_attr=fluid.ParamAttr(name='fc_b')) + y_predict = fluid.layers.fc( + input=x, + size=1000, + act=None, + param_attr=fluid.ParamAttr(name='fc_w'), + bias_attr=fluid.ParamAttr(name='fc_b'), + ) y = fluid.layers.data(name='y', shape=[1], dtype='float32') cost = fluid.layers.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) @@ -409,14 +485,15 @@ class TestDecayedAdagrad(TranspilerTest): class TestFtrl(TranspilerTest): - def net_conf(self): x = fluid.layers.data(name='x', shape=[1000], dtype='float32') - y_predict = fluid.layers.fc(input=x, - size=1000, - act=None, - param_attr=fluid.ParamAttr(name='fc_w'), - bias_attr=fluid.ParamAttr(name='fc_b')) + y_predict = fluid.layers.fc( + input=x, + size=1000, + act=None, + param_attr=fluid.ParamAttr(name='fc_w'), + bias_attr=fluid.ParamAttr(name='fc_b'), + ) y = fluid.layers.data(name='y', shape=[1], dtype='float32') cost = fluid.layers.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) @@ -429,20 +506,23 @@ class TestFtrl(TranspilerTest): class TestLRDecayConditional(TranspilerTest): - def net_conf(self): x = fluid.layers.data(name='x', shape=[1000], dtype='float32') - y_predict = fluid.layers.fc(input=x, - size=1000, - act=None, - param_attr=fluid.ParamAttr(name='fc_w'), - bias_attr=fluid.ParamAttr(name='fc_b')) + y_predict = fluid.layers.fc( + input=x, + size=1000, + act=None, + param_attr=fluid.ParamAttr(name='fc_w'), + bias_attr=fluid.ParamAttr(name='fc_b'), + ) y = fluid.layers.data(name='y', shape=[1], dtype='float32') cost = fluid.layers.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) sgd_optimizer = fluid.optimizer.SGD( - learning_rate=fluid.layers.piecewise_decay([10000, 20000], - [1.0, 0.5, 1.0])) + learning_rate=fluid.layers.piecewise_decay( + [10000, 20000], [1.0, 0.5, 1.0] + ) + ) sgd_optimizer.minimize(avg_cost) def transpiler_test_impl(self): @@ -460,13 +540,27 @@ class TestLRDecayConditional(TranspilerTest): self.assertEqual(len(pserver.blocks), 7) lr_decay_ops = [op.type for op in pserver.blocks[1].ops] - self.assertEqual(lr_decay_ops, [ - "increment", "cast", "fill_constant", "fill_constant", "less_than", - "logical_not", "conditional_block", "fill_constant", - "fill_constant", "less_than", "logical_not", "logical_and", - "logical_and", "conditional_block", "fill_constant", - "conditional_block" - ]) + self.assertEqual( + lr_decay_ops, + [ + "increment", + "cast", + "fill_constant", + "fill_constant", + "less_than", + "logical_not", + "conditional_block", + "fill_constant", + "fill_constant", + "less_than", + "logical_not", + "logical_and", + "logical_and", + "conditional_block", + "fill_constant", + "conditional_block", + ], + ) # test the condition blocks for b in sub_blocks: if b == 0: @@ -476,16 +570,17 @@ class TestLRDecayConditional(TranspilerTest): class TestL2Decay(TranspilerTest): - def net_conf(self): x = fluid.layers.data(name='x', shape=[1000], dtype='float32') y_predict = fluid.layers.fc( input=x, size=1000, act=None, - param_attr=fluid.ParamAttr(name='fc_w', - regularizer=fluid.regularizer.L2Decay()), - bias_attr=fluid.ParamAttr(name='fc_b')) + param_attr=fluid.ParamAttr( + name='fc_w', regularizer=fluid.regularizer.L2Decay() + ), + bias_attr=fluid.ParamAttr(name='fc_b'), + ) y = fluid.layers.data(name='y', shape=[1], dtype='float32') cost = fluid.layers.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) @@ -502,22 +597,27 @@ class TestL2Decay(TranspilerTest): trainer, _ = self.get_trainer() self.assertEqual(len(pserver.blocks), 3) - self.assertEqual([op.type for op in pserver.blocks[1].ops], - ["sum", "scale", "clip", "sgd"]) - self.assertEqual([op.type for op in pserver.blocks[2].ops], - ["sum", "scale", "clip", "scale", "sum", "sgd"]) + self.assertEqual( + [op.type for op in pserver.blocks[1].ops], + ["sum", "scale", "clip", "sgd"], + ) + self.assertEqual( + [op.type for op in pserver.blocks[2].ops], + ["sum", "scale", "clip", "scale", "sum", "sgd"], + ) # TODO(typhoonzero): test clipping and L2Decay ops are removed from trainer class TestL2DecayWithPiecewise(TranspilerTest): - def net_conf(self): x = fluid.layers.data(name='x', shape=[1000], dtype='float32') - y_predict = fluid.layers.fc(input=x, - size=1000, - act=None, - param_attr=fluid.ParamAttr(name='fc_w'), - bias_attr=fluid.ParamAttr(name='fc_b')) + y_predict = fluid.layers.fc( + input=x, + size=1000, + act=None, + param_attr=fluid.ParamAttr(name='fc_w'), + bias_attr=fluid.ParamAttr(name='fc_b'), + ) y = fluid.layers.data(name='y', shape=[1], dtype='float32') cost = fluid.layers.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) @@ -525,10 +625,12 @@ class TestL2DecayWithPiecewise(TranspilerTest): bd = [1, 10, 20, 30] lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)] sgd_optimizer = fluid.optimizer.Momentum( - learning_rate=fluid.layers.piecewise_decay(boundaries=bd, - values=lr), + learning_rate=fluid.layers.piecewise_decay( + boundaries=bd, values=lr + ), momentum=0.9, - regularization=fluid.regularizer.L2Decay(1e-4)) + regularization=fluid.regularizer.L2Decay(1e-4), + ) sgd_optimizer.minimize(avg_cost) def transpiler_test_impl(self): @@ -536,33 +638,62 @@ class TestL2DecayWithPiecewise(TranspilerTest): trainer, _ = self.get_trainer() self.assertEqual(len(pserver.blocks), 9) - self.assertEqual([op.type for op in pserver.blocks[1].ops], [ - "increment", "cast", "fill_constant", "fill_constant", "less_than", - "logical_not", "conditional_block", "fill_constant", - "fill_constant", "less_than", "logical_not", "logical_and", - "logical_and", "conditional_block", "fill_constant", - "fill_constant", "less_than", "logical_not", "logical_and", - "logical_and", "conditional_block", "fill_constant", - "fill_constant", "less_than", "logical_not", "logical_and", - "logical_and", "conditional_block", "fill_constant", - "conditional_block" - ]) - self.assertEqual([op.type for op in pserver.blocks[7].ops], - ["sum", "scale", "scale", "sum", "momentum"]) - self.assertEqual([op.type for op in pserver.blocks[8].ops], - ["sum", "scale", "scale", "sum", "momentum"]) + self.assertEqual( + [op.type for op in pserver.blocks[1].ops], + [ + "increment", + "cast", + "fill_constant", + "fill_constant", + "less_than", + "logical_not", + "conditional_block", + "fill_constant", + "fill_constant", + "less_than", + "logical_not", + "logical_and", + "logical_and", + "conditional_block", + "fill_constant", + "fill_constant", + "less_than", + "logical_not", + "logical_and", + "logical_and", + "conditional_block", + "fill_constant", + "fill_constant", + "less_than", + "logical_not", + "logical_and", + "logical_and", + "conditional_block", + "fill_constant", + "conditional_block", + ], + ) + self.assertEqual( + [op.type for op in pserver.blocks[7].ops], + ["sum", "scale", "scale", "sum", "momentum"], + ) + self.assertEqual( + [op.type for op in pserver.blocks[8].ops], + ["sum", "scale", "scale", "sum", "momentum"], + ) class TestEmptyPserverOptimizeBlocks(TranspilerTest): - def net_conf(self): x = fluid.layers.data(name='x', shape=[1000], dtype='float32') # only one parameter - y_predict = fluid.layers.fc(input=x, - size=1000, - act=None, - param_attr=fluid.ParamAttr(name='fc_w'), - bias_attr=False) + y_predict = fluid.layers.fc( + input=x, + size=1000, + act=None, + param_attr=fluid.ParamAttr(name='fc_w'), + bias_attr=False, + ) y = fluid.layers.data(name='y', shape=[1], dtype='float32') cost = fluid.layers.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) @@ -580,44 +711,45 @@ class TestEmptyPserverOptimizeBlocks(TranspilerTest): class TestDistLookupTableBase(TranspilerTest): - def network_with_table(self, is_sparse, is_distributed): self.table_size = 1000 self.emb_size = 64 self.lookup_table_name = 'shared_w' def emb_pool(ids, table_name, is_distributed): - emb = fluid.layers.embedding(input=ids, - size=[self.table_size, self.emb_size], - dtype='float32', - param_attr=table_name, - is_sparse=is_sparse, - is_distributed=is_distributed) + emb = fluid.layers.embedding( + input=ids, + size=[self.table_size, self.emb_size], + dtype='float32', + param_attr=table_name, + is_sparse=is_sparse, + is_distributed=is_distributed, + ) pool = fluid.layers.sequence_pool(input=emb, pool_type='average') return pool - title_ids = fluid.layers.data(name='title_ids', - shape=[1], - dtype='int64', - lod_level=1) - brand_ids = fluid.layers.data(name='brand_ids', - shape=[1], - dtype='int64', - lod_level=1) - profile_ids = fluid.layers.data(name='brand_ids', - shape=[1], - dtype='int64', - lod_level=1) + title_ids = fluid.layers.data( + name='title_ids', shape=[1], dtype='int64', lod_level=1 + ) + brand_ids = fluid.layers.data( + name='brand_ids', shape=[1], dtype='int64', lod_level=1 + ) + profile_ids = fluid.layers.data( + name='brand_ids', shape=[1], dtype='int64', lod_level=1 + ) title_emb = emb_pool(title_ids, self.lookup_table_name, is_distributed) brand_emb = emb_pool(brand_ids, self.lookup_table_name, is_distributed) profile_emb = emb_pool(profile_ids, "profile_emb", False) - fc0 = fluid.layers.concat(input=[title_emb, brand_emb, profile_emb], - axis=1) - predict = fluid.layers.fc(input=fc0, - size=2, - act=None, - param_attr=fluid.ParamAttr(name='fc_w'), - bias_attr=fluid.ParamAttr(name='fc_b')) + fc0 = fluid.layers.concat( + input=[title_emb, brand_emb, profile_emb], axis=1 + ) + predict = fluid.layers.fc( + input=fc0, + size=2, + act=None, + param_attr=fluid.ParamAttr(name='fc_w'), + bias_attr=fluid.ParamAttr(name='fc_b'), + ) label = fluid.layers.data(name='label', shape=[1], dtype='int64') cost = fluid.layers.cross_entropy(input=predict, label=label) @@ -627,7 +759,6 @@ class TestDistLookupTableBase(TranspilerTest): class TestLocalLookupTable(TestDistLookupTableBase): - def net_conf(self): self.network_with_table(is_sparse=True, is_distributed=False) @@ -637,36 +768,66 @@ class TestLocalLookupTable(TestDistLookupTableBase): self.assertEqual(len(pserver1.blocks), 4) # 0 listen_and_serv # 1 optimize for fc_w or fc_b adam - self.assertEqual([op.type for op in pserver1.blocks[1].ops], - ["sum", "scale", "adam", "scale", "scale"]) + self.assertEqual( + [op.type for op in pserver1.blocks[1].ops], + ["sum", "scale", "adam", "scale", "scale"], + ) # 2 optimize for table adam # NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num - self.assertEqual([op.type for op in pserver1.blocks[2].ops], - ["sum", "scale", "adam", "scale", "scale"]) + self.assertEqual( + [op.type for op in pserver1.blocks[2].ops], + ["sum", "scale", "adam", "scale", "scale"], + ) # 3 optimize for table 2 adam # NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num - self.assertEqual([op.type for op in pserver1.blocks[3].ops], - ["sum", "scale", "adam", "scale", "scale"]) + self.assertEqual( + [op.type for op in pserver1.blocks[3].ops], + ["sum", "scale", "adam", "scale", "scale"], + ) trainer, _ = self.get_trainer() self.assertEqual(len(trainer.blocks), 1) ops = [ - 'lookup_table', 'sequence_pool', 'lookup_table', 'sequence_pool', - 'lookup_table', 'sequence_pool', 'concat', 'mul', 'elementwise_add', - 'cross_entropy2', 'mean', 'fill_constant', 'mean_grad', - 'cross_entropy_grad2', 'elementwise_add_grad', 'send', 'mul_grad', - 'send', 'concat_grad', 'sequence_pool_grad', 'lookup_table_grad', - 'split_selected_rows', 'send', 'sequence_pool_grad', - 'lookup_table_grad', 'sequence_pool_grad', 'lookup_table_grad', - 'sum', 'split_selected_rows', 'send', 'send_barrier', 'recv', - 'recv', 'fetch_barrier' + 'lookup_table', + 'sequence_pool', + 'lookup_table', + 'sequence_pool', + 'lookup_table', + 'sequence_pool', + 'concat', + 'mul', + 'elementwise_add', + 'cross_entropy2', + 'mean', + 'fill_constant', + 'mean_grad', + 'cross_entropy_grad2', + 'elementwise_add_grad', + 'send', + 'mul_grad', + 'send', + 'concat_grad', + 'sequence_pool_grad', + 'lookup_table_grad', + 'split_selected_rows', + 'send', + 'sequence_pool_grad', + 'lookup_table_grad', + 'sequence_pool_grad', + 'lookup_table_grad', + 'sum', + 'split_selected_rows', + 'send', + 'send_barrier', + 'recv', + 'recv', + 'fetch_barrier', ] self.assertEqual([op.type for op in trainer.blocks[0].ops], ops) class TestDistLookupTable(TestDistLookupTableBase): - def net_conf(self): self.network_with_table(is_sparse=True, is_distributed=True) @@ -676,48 +837,97 @@ class TestDistLookupTable(TestDistLookupTableBase): self.assertEqual(len(pserver1.blocks), 6) # 0 listen_and_serv # 1 optimize for fc_w or fc_b adam - self.assertEqual([op.type for op in pserver1.blocks[1].ops], - ["sum", "scale", "adam", "scale", "scale"]) + self.assertEqual( + [op.type for op in pserver1.blocks[1].ops], + ["sum", "scale", "adam", "scale", "scale"], + ) # 4 prefetch -> lookup_sparse_table_read for data0 - self.assertEqual([op.type for op in pserver1.blocks[2].ops], - ["sum", "scale", "adam", "scale", "scale"]) + self.assertEqual( + [op.type for op in pserver1.blocks[2].ops], + ["sum", "scale", "adam", "scale", "scale"], + ) # 2 optimize for table sgd - self.assertEqual([op.type for op in pserver1.blocks[3].ops], - ["sum", "sgd"]) + self.assertEqual( + [op.type for op in pserver1.blocks[3].ops], ["sum", "sgd"] + ) # 3 prefetch -> lookup_sparse_table_read for data0 - self.assertEqual([op.type for op in pserver1.blocks[4].ops], - ["lookup_sparse_table_read"]) + self.assertEqual( + [op.type for op in pserver1.blocks[4].ops], + ["lookup_sparse_table_read"], + ) # 5 save table self.assertEqual([op.type for op in pserver1.blocks[5].ops], ["save"]) trainer, trainer_startup = self.get_trainer() self.assertEqual(len(trainer.blocks), 1) ops = [ - 'split_ids', 'prefetch', 'merge_ids', 'sequence_pool', - 'sequence_pool', 'lookup_table', 'sequence_pool', 'concat', 'mul', - 'elementwise_add', 'cross_entropy2', 'mean', 'fill_constant', - 'mean_grad', 'cross_entropy_grad2', 'elementwise_add_grad', 'send', - 'mul_grad', 'send', 'concat_grad', 'sequence_pool_grad', - 'lookup_table_grad', 'split_selected_rows', 'send', - 'sequence_pool_grad', 'lookup_table_grad', 'sequence_pool_grad', - 'lookup_table_grad', 'sum', 'split_ids', 'send', 'send_barrier', - 'recv', 'recv', 'fetch_barrier' + 'split_ids', + 'prefetch', + 'merge_ids', + 'sequence_pool', + 'sequence_pool', + 'lookup_table', + 'sequence_pool', + 'concat', + 'mul', + 'elementwise_add', + 'cross_entropy2', + 'mean', + 'fill_constant', + 'mean_grad', + 'cross_entropy_grad2', + 'elementwise_add_grad', + 'send', + 'mul_grad', + 'send', + 'concat_grad', + 'sequence_pool_grad', + 'lookup_table_grad', + 'split_selected_rows', + 'send', + 'sequence_pool_grad', + 'lookup_table_grad', + 'sequence_pool_grad', + 'lookup_table_grad', + 'sum', + 'split_ids', + 'send', + 'send_barrier', + 'recv', + 'recv', + 'fetch_barrier', ] self.assertEqual([op.type for op in trainer.blocks[0].ops], ops) startup_ops = [ - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'uniform_random', - 'uniform_random', 'recv', 'recv', 'recv', 'fetch_barrier', 'concat', - 'fake_init' + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'uniform_random', + 'uniform_random', + 'recv', + 'recv', + 'recv', + 'fetch_barrier', + 'concat', + 'fake_init', ] - self.assertEqual([op.type for op in trainer_startup.blocks[0].ops], - startup_ops) + self.assertEqual( + [op.type for op in trainer_startup.blocks[0].ops], startup_ops + ) class TestAsyncLocalLookupTable(TestDistLookupTableBase): - def net_conf(self): self.network_with_table(is_sparse=True, is_distributed=False) @@ -728,34 +938,63 @@ class TestAsyncLocalLookupTable(TestDistLookupTableBase): self.assertEqual(len(pserver1.blocks), 4) # 0 listen_and_serv # 1 optimize for fc_w or fc_b adam - self.assertEqual([op.type for op in pserver1.blocks[1].ops], - ["adam", "scale", "scale"]) + self.assertEqual( + [op.type for op in pserver1.blocks[1].ops], + ["adam", "scale", "scale"], + ) # 2 optimize for table adam # NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num - self.assertEqual([op.type for op in pserver1.blocks[2].ops], - ["adam", "scale", "scale"]) + self.assertEqual( + [op.type for op in pserver1.blocks[2].ops], + ["adam", "scale", "scale"], + ) # 3 optimize for table adam # NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num - self.assertEqual([op.type for op in pserver1.blocks[3].ops], - ["adam", "scale", "scale"]) + self.assertEqual( + [op.type for op in pserver1.blocks[3].ops], + ["adam", "scale", "scale"], + ) trainer, _ = self.get_trainer(config) self.assertEqual(len(trainer.blocks), 1) ops = [ - 'lookup_table', 'sequence_pool', 'lookup_table', 'sequence_pool', - 'lookup_table', 'sequence_pool', 'concat', 'mul', 'elementwise_add', - 'cross_entropy2', 'mean', 'fill_constant', 'mean_grad', - 'cross_entropy_grad2', 'elementwise_add_grad', 'send', 'mul_grad', - 'send', 'concat_grad', 'sequence_pool_grad', 'lookup_table_grad', - 'split_selected_rows', 'send', 'sequence_pool_grad', - 'lookup_table_grad', 'sequence_pool_grad', 'lookup_table_grad', - 'sum', 'split_selected_rows', 'send', 'recv', 'recv' + 'lookup_table', + 'sequence_pool', + 'lookup_table', + 'sequence_pool', + 'lookup_table', + 'sequence_pool', + 'concat', + 'mul', + 'elementwise_add', + 'cross_entropy2', + 'mean', + 'fill_constant', + 'mean_grad', + 'cross_entropy_grad2', + 'elementwise_add_grad', + 'send', + 'mul_grad', + 'send', + 'concat_grad', + 'sequence_pool_grad', + 'lookup_table_grad', + 'split_selected_rows', + 'send', + 'sequence_pool_grad', + 'lookup_table_grad', + 'sequence_pool_grad', + 'lookup_table_grad', + 'sum', + 'split_selected_rows', + 'send', + 'recv', + 'recv', ] self.assertEqual([op.type for op in trainer.blocks[0].ops], ops) class TestAsyncDistLookupTable(TestDistLookupTableBase): - def net_conf(self): self.network_with_table(is_sparse=True, is_distributed=True) @@ -767,46 +1006,93 @@ class TestAsyncDistLookupTable(TestDistLookupTableBase): self.assertEqual(len(pserver1.blocks), 6) # 0 listen_and_serv # 1 optimize for fc_w or fc_b adam - self.assertEqual([op.type for op in pserver1.blocks[1].ops], - ["adam", "scale", "scale"]) + self.assertEqual( + [op.type for op in pserver1.blocks[1].ops], + ["adam", "scale", "scale"], + ) # 2 optimize for table adam - self.assertEqual([op.type for op in pserver1.blocks[2].ops], - ["adam", "scale", "scale"]) + self.assertEqual( + [op.type for op in pserver1.blocks[2].ops], + ["adam", "scale", "scale"], + ) # 3 optimize for table sgd self.assertEqual([op.type for op in pserver1.blocks[3].ops], ["sgd"]) # 4 prefetch -> lookup_sparse_table_read for data0 - self.assertEqual([op.type for op in pserver1.blocks[4].ops], - ["lookup_sparse_table_read"]) + self.assertEqual( + [op.type for op in pserver1.blocks[4].ops], + ["lookup_sparse_table_read"], + ) # 5 save table self.assertEqual([op.type for op in pserver1.blocks[5].ops], ["save"]) trainer, trainer_startup = self.get_trainer(config) self.assertEqual(len(trainer.blocks), 1) ops = [ - 'split_ids', 'prefetch', 'merge_ids', 'sequence_pool', - 'sequence_pool', 'lookup_table', 'sequence_pool', 'concat', 'mul', - 'elementwise_add', 'cross_entropy2', 'mean', 'fill_constant', - 'mean_grad', 'cross_entropy_grad2', 'elementwise_add_grad', 'send', - 'mul_grad', 'send', 'concat_grad', 'sequence_pool_grad', - 'lookup_table_grad', 'split_selected_rows', 'send', - 'sequence_pool_grad', 'lookup_table_grad', 'sequence_pool_grad', - 'lookup_table_grad', 'sum', 'split_ids', 'send', 'recv', 'recv' + 'split_ids', + 'prefetch', + 'merge_ids', + 'sequence_pool', + 'sequence_pool', + 'lookup_table', + 'sequence_pool', + 'concat', + 'mul', + 'elementwise_add', + 'cross_entropy2', + 'mean', + 'fill_constant', + 'mean_grad', + 'cross_entropy_grad2', + 'elementwise_add_grad', + 'send', + 'mul_grad', + 'send', + 'concat_grad', + 'sequence_pool_grad', + 'lookup_table_grad', + 'split_selected_rows', + 'send', + 'sequence_pool_grad', + 'lookup_table_grad', + 'sequence_pool_grad', + 'lookup_table_grad', + 'sum', + 'split_ids', + 'send', + 'recv', + 'recv', ] self.assertEqual([op.type for op in trainer.blocks[0].ops], ops) startup_ops = [ - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant', - 'fill_constant', 'fill_constant', 'uniform_random', - 'uniform_random', 'recv', 'recv', 'recv', 'fetch_barrier', 'concat', - 'fake_init' + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'fill_constant', + 'uniform_random', + 'uniform_random', + 'recv', + 'recv', + 'recv', + 'fetch_barrier', + 'concat', + 'fake_init', ] - self.assertEqual([op.type for op in trainer_startup.blocks[0].ops], - startup_ops) + self.assertEqual( + [op.type for op in trainer_startup.blocks[0].ops], startup_ops + ) class TestDistLookupTableSliceSize(TestDistLookupTableBase): - def net_conf(self): self.network_with_table(is_sparse=True, is_distributed=True) @@ -816,14 +1102,14 @@ class TestDistLookupTableSliceSize(TestDistLookupTableBase): self.assertTrue(self.transpiler.has_distributed_lookup_table) lookup_table_var = pserver1.global_block().vars[ - self.transpiler.table_name] + self.transpiler.table_name + ] row_size = lookup_table_var.shape[0] calc_row_size = int(math.ceil(self.table_size / self.pservers)) self.assertEqual(row_size, calc_row_size) class TestDistArgsInProgram(TestDistLookupTableBase): - def net_conf(self): self.network_with_table(is_sparse=True, is_distributed=True) @@ -832,21 +1118,24 @@ class TestDistArgsInProgram(TestDistLookupTableBase): self.assertTrue(trainer._is_distributed) self.assertTrue(trainer._is_chief) - self.assertEqual(trainer._distributed_lookup_table, - self.lookup_table_name) - self.assertEqual(trainer._endpoints, - [self.pserver1_ep, self.pserver2_ep]) + self.assertEqual( + trainer._distributed_lookup_table, self.lookup_table_name + ) + self.assertEqual( + trainer._endpoints, [self.pserver1_ep, self.pserver2_ep] + ) class TestRMSPropOptimizer(TranspilerTest): - def net_conf(self): x = fluid.layers.data(name='x', shape=[1000], dtype='float32') - y_predict = fluid.layers.fc(input=x, - size=1000, - act=None, - param_attr=fluid.ParamAttr(name='fc_w'), - bias_attr=fluid.ParamAttr(name='fc_b')) + y_predict = fluid.layers.fc( + input=x, + size=1000, + act=None, + param_attr=fluid.ParamAttr(name='fc_w'), + bias_attr=fluid.ParamAttr(name='fc_b'), + ) y = fluid.layers.data(name='y', shape=[1], dtype='float32') cost = fluid.layers.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) @@ -859,8 +1148,10 @@ class TestRMSPropOptimizer(TranspilerTest): self.assertEqual(len(pserver.blocks), 3) # block1~2: optimize pass - self.assertEqual([op.type for op in pserver.blocks[1].ops], - ["sum", "scale", "rmsprop"]) + self.assertEqual( + [op.type for op in pserver.blocks[1].ops], + ["sum", "scale", "rmsprop"], + ) # the variable #fc_w will be split into two blocks fc_w_var = startup.global_block().var("fc_w.block1") self.assertEqual(fc_w_var.shape, (500, 1000)) @@ -869,14 +1160,15 @@ class TestRMSPropOptimizer(TranspilerTest): class TestLoadSliceVar(TranspilerTest): - def net_conf(self): x = fluid.layers.data(name='x', shape=[1000], dtype='float32') - y_predict = fluid.layers.fc(input=x, - size=1000, - act=None, - param_attr=fluid.ParamAttr(name='fc_w'), - bias_attr=fluid.ParamAttr(name='fc_b')) + y_predict = fluid.layers.fc( + input=x, + size=1000, + act=None, + param_attr=fluid.ParamAttr(name='fc_w'), + bias_attr=fluid.ParamAttr(name='fc_b'), + ) y = fluid.layers.data(name='y', shape=[1], dtype='float32') cost = fluid.layers.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) @@ -888,9 +1180,11 @@ class TestLoadSliceVar(TranspilerTest): pserver2, _ = self.get_pserver(self.pserver2_ep) vars_ps1 = pserver._parameters_on_pservers.get_distributed_vars_by_ep( - self.pserver1_ep) + self.pserver1_ep + ) vars_ps2 = pserver._parameters_on_pservers.get_distributed_vars_by_ep( - self.pserver2_ep) + self.pserver2_ep + ) self.assertTrue(vars_ps1) self.assertTrue(vars_ps2) @@ -902,10 +1196,12 @@ class TestLoadSliceVar(TranspilerTest): ps1_var = vars_ps1[idx] if not ps1_var.is_slice: - total_numel = functools.reduce(lambda x, y: x * y, - vars_ps1[idx].origin.shape) - ps1_numel = functools.reduce(lambda x, y: x * y, - vars_ps1[idx].slice.shape) + total_numel = functools.reduce( + lambda x, y: x * y, vars_ps1[idx].origin.shape + ) + ps1_numel = functools.reduce( + lambda x, y: x * y, vars_ps1[idx].slice.shape + ) else: ps2_var = None for var in vars_ps2: @@ -913,18 +1209,20 @@ class TestLoadSliceVar(TranspilerTest): ps2_var = var break - total_numel = functools.reduce(lambda x, y: x * y, - ps1_var.origin.shape) - ps1_numel = functools.reduce(lambda x, y: x * y, - ps1_var.slice.shape) - ps2_numel = functools.reduce(lambda x, y: x * y, - ps2_var.slice.shape) + total_numel = functools.reduce( + lambda x, y: x * y, ps1_var.origin.shape + ) + ps1_numel = functools.reduce( + lambda x, y: x * y, ps1_var.slice.shape + ) + ps2_numel = functools.reduce( + lambda x, y: x * y, ps2_var.slice.shape + ) self.assertEqual(total_numel, ps1_numel + ps2_numel) class TestNCCL2Transpile(TranspilerTest): - def test_nccl2_transpile(self): if fluid.core.is_compiled_with_cuda(): # test nccl2 only with cuda main = fluid.Program() @@ -936,10 +1234,12 @@ class TestNCCL2Transpile(TranspilerTest): config.mode = "nccl2" config.wait_port = False t = fluid.DistributeTranspiler(config=config) - t.transpile(0, - trainers="127.0.0.1:6174,127.0.0.1:6175", - current_endpoint="127.0.0.1:6174", - startup_program=startup) + t.transpile( + 0, + trainers="127.0.0.1:6174,127.0.0.1:6175", + current_endpoint="127.0.0.1:6174", + startup_program=startup, + ) print([op.type for op in startup.global_block().ops]) self.assertEqual(startup.global_block().ops[-1].type, "gen_nccl_id") self.assertIsNotNone(startup.global_block().vars.get("NCCLID")) @@ -950,9 +1250,9 @@ class TestNCCL2Transpile(TranspilerTest): # test for remote prefetch class TestRemoteLookupTable(TestDistLookupTableBase): - def net_conf(self): import os + os.environ['PADDLE_ENABLE_REMOTE_PREFETCH'] = "1" self.network_with_table(is_sparse=True, is_distributed=False) @@ -962,37 +1262,67 @@ class TestRemoteLookupTable(TestDistLookupTableBase): self.assertEqual(len(pserver1.blocks), 4) # 0 listen_and_serv # 1 optimize for fc_w or fc_b adam - self.assertEqual([op.type for op in pserver1.blocks[1].ops], - ["sum", "scale", "adam", "scale", "scale"]) + self.assertEqual( + [op.type for op in pserver1.blocks[1].ops], + ["sum", "scale", "adam", "scale", "scale"], + ) # 2 optimize for table adam # NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num - self.assertEqual([op.type for op in pserver1.blocks[2].ops], - ["sum", "scale", "adam", "scale", "scale"]) + self.assertEqual( + [op.type for op in pserver1.blocks[2].ops], + ["sum", "scale", "adam", "scale", "scale"], + ) # 3 optimize for table 2 adam # NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num - self.assertEqual([op.type for op in pserver1.blocks[3].ops], - ["sum", "scale", "adam", "scale", "scale"]) + self.assertEqual( + [op.type for op in pserver1.blocks[3].ops], + ["sum", "scale", "adam", "scale", "scale"], + ) trainer, _ = self.get_trainer() self.assertEqual(len(trainer.blocks), 1) ops = [ - 'lookup_table', 'sequence_pool', 'lookup_table', 'sequence_pool', - 'lookup_table', 'sequence_pool', 'concat', 'mul', 'elementwise_add', - 'cross_entropy2', 'mean', 'fill_constant', 'mean_grad', - 'cross_entropy_grad2', 'elementwise_add_grad', 'send', 'mul_grad', - 'send', 'concat_grad', 'sequence_pool_grad', 'lookup_table_grad', - 'split_selected_rows', 'send', 'sequence_pool_grad', - 'lookup_table_grad', 'sequence_pool_grad', 'lookup_table_grad', - 'sum', 'split_selected_rows', 'send', 'send_barrier', 'recv', - 'recv', 'fetch_barrier' + 'lookup_table', + 'sequence_pool', + 'lookup_table', + 'sequence_pool', + 'lookup_table', + 'sequence_pool', + 'concat', + 'mul', + 'elementwise_add', + 'cross_entropy2', + 'mean', + 'fill_constant', + 'mean_grad', + 'cross_entropy_grad2', + 'elementwise_add_grad', + 'send', + 'mul_grad', + 'send', + 'concat_grad', + 'sequence_pool_grad', + 'lookup_table_grad', + 'split_selected_rows', + 'send', + 'sequence_pool_grad', + 'lookup_table_grad', + 'sequence_pool_grad', + 'lookup_table_grad', + 'sum', + 'split_selected_rows', + 'send', + 'send_barrier', + 'recv', + 'recv', + 'fetch_barrier', ] self.assertEqual([op.type for op in trainer.blocks[0].ops], ops) # test for remote prefetch class TestRemoteNce(TestDistLookupTableBase): - def network_with_table(self, is_sparse, is_distributed): num_total_classes = 20 @@ -1002,28 +1332,40 @@ class TestRemoteNce(TestDistLookupTableBase): input = fluid.layers.data(name="input", shape=[10], dtype="float32") label = fluid.layers.data(name="label", shape=[1], dtype="int64") - w_param = fluid.default_main_program().global_block().create_parameter( - shape=[num_total_classes, 10], - dtype='float32', - name='nce_w', - initializer=fluid.initializer.ConstantInitializer()) - b_param = fluid.default_main_program().global_block().create_parameter( - shape=[num_total_classes, 1], - dtype='float32', - name='nce_b', - initializer=fluid.initializer.ConstantInitializer()) - - cost = fluid.layers.nce(input=input, - label=label, - num_total_classes=num_total_classes, - sampler=sampler, - custom_dist=nid_freq_arr.tolist(), - sample_weight=None, - param_attr='nce_w', - bias_attr='nce_b', - seed=1, - num_neg_samples=5, - is_sparse=is_sparse) + w_param = ( + fluid.default_main_program() + .global_block() + .create_parameter( + shape=[num_total_classes, 10], + dtype='float32', + name='nce_w', + initializer=fluid.initializer.ConstantInitializer(), + ) + ) + b_param = ( + fluid.default_main_program() + .global_block() + .create_parameter( + shape=[num_total_classes, 1], + dtype='float32', + name='nce_b', + initializer=fluid.initializer.ConstantInitializer(), + ) + ) + + cost = fluid.layers.nce( + input=input, + label=label, + num_total_classes=num_total_classes, + sampler=sampler, + custom_dist=nid_freq_arr.tolist(), + sample_weight=None, + param_attr='nce_w', + bias_attr='nce_b', + seed=1, + num_neg_samples=5, + is_sparse=is_sparse, + ) avg_cost = paddle.mean(cost) # optimizer optimizer = fluid.optimizer.Adam(learning_rate=0.003) @@ -1031,6 +1373,7 @@ class TestRemoteNce(TestDistLookupTableBase): def net_conf(self): import os + os.environ['PADDLE_ENABLE_REMOTE_PREFETCH'] = "1" self.network_with_table(is_sparse=True, is_distributed=False) @@ -1055,44 +1398,59 @@ class TestRemoteNce(TestDistLookupTableBase): # test for remote prefetch class TestRemoteHsigmoid(TestDistLookupTableBase): - def network_with_table(self, is_sparse, is_distributed): num_total_classes = 3 input = fluid.layers.data(name="input", shape=[1], dtype="float32") label = fluid.layers.data(name="label", shape=[1], dtype="int64") - path_table = fluid.layers.data(name='path_table', - shape=[3], - dtype='int64') - path_code = fluid.layers.data(name='path_code', - shape=[3], - dtype='int64') - w_param = fluid.default_main_program().global_block().create_parameter( - shape=[num_total_classes, 10], - dtype='float32', - name='hs_w', - initializer=fluid.initializer.ConstantInitializer()) - b_param = fluid.default_main_program().global_block().create_parameter( - shape=[3, 1], - dtype='float32', - name='hs_b', - initializer=fluid.initializer.ConstantInitializer()) + path_table = fluid.layers.data( + name='path_table', shape=[3], dtype='int64' + ) + path_code = fluid.layers.data( + name='path_code', shape=[3], dtype='int64' + ) + w_param = ( + fluid.default_main_program() + .global_block() + .create_parameter( + shape=[num_total_classes, 10], + dtype='float32', + name='hs_w', + initializer=fluid.initializer.ConstantInitializer(), + ) + ) + b_param = ( + fluid.default_main_program() + .global_block() + .create_parameter( + shape=[3, 1], + dtype='float32', + name='hs_b', + initializer=fluid.initializer.ConstantInitializer(), + ) + ) emb = fluid.layers.embedding( input=input, is_sparse=is_sparse, size=[3, 3], - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Normal( - scale=1 / math.sqrt(num_total_classes)))) - - cost = fluid.layers.hsigmoid(input=emb, - label=label, - num_classes=num_total_classes, - path_table=path_table, - path_code=path_code, - is_custom=True, - is_sparse=is_sparse) + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Normal( + scale=1 / math.sqrt(num_total_classes) + ) + ), + ) + + cost = fluid.layers.hsigmoid( + input=emb, + label=label, + num_classes=num_total_classes, + path_table=path_table, + path_code=path_code, + is_custom=True, + is_sparse=is_sparse, + ) avg_cost = paddle.mean(cost) # optimizer optimizer = fluid.optimizer.SGD(learning_rate=0.003) @@ -1100,6 +1458,7 @@ class TestRemoteHsigmoid(TestDistLookupTableBase): def net_conf(self): import os + os.environ['PADDLE_ENABLE_REMOTE_PREFETCH'] = "1" self.network_with_table(is_sparse=True, is_distributed=False) diff --git a/python/paddle/fluid/tests/unittests/test_dist_tree_index.py b/python/paddle/fluid/tests/unittests/test_dist_tree_index.py index 0fae4fe09773e06e0a1e066ed9263e0ff4d616c8..d3d3d249c41254704312ebe9cd33fff4ca33a297 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_tree_index.py +++ b/python/paddle/fluid/tests/unittests/test_dist_tree_index.py @@ -24,35 +24,32 @@ paddle.enable_static() def create_feeds(): - user_input = fluid.layers.data(name="item_id", - shape=[1], - dtype="int64", - lod_level=1) - - item = fluid.layers.data(name="unit_id", - shape=[1], - dtype="int64", - lod_level=1) - - label = fluid.layers.data(name="label", - shape=[1], - dtype="int64", - lod_level=1) - labels = fluid.layers.data(name="labels", - shape=[1], - dtype="int64", - lod_level=1) + user_input = fluid.layers.data( + name="item_id", shape=[1], dtype="int64", lod_level=1 + ) + + item = fluid.layers.data( + name="unit_id", shape=[1], dtype="int64", lod_level=1 + ) + + label = fluid.layers.data( + name="label", shape=[1], dtype="int64", lod_level=1 + ) + labels = fluid.layers.data( + name="labels", shape=[1], dtype="int64", lod_level=1 + ) feed_list = [user_input, item, label, labels] return feed_list class TestTreeIndex(unittest.TestCase): - def test_tree_index(self): path = download( "https://paddlerec.bj.bcebos.com/tree-based/data/mini_tree.pb", - "tree_index_unittest", "e2ba4561c2e9432b532df40546390efa") + "tree_index_unittest", + "e2ba4561c2e9432b532df40546390efa", + ) ''' path = download( "https://paddlerec.bj.bcebos.com/tree-based/data/mini_tree.pb", @@ -72,7 +69,8 @@ class TestTreeIndex(unittest.TestCase): for i in range(tree.height()): layer_node_codes.append(tree.get_layer_codes(i)) layer_node_ids.append( - [node.id() for node in tree.get_nodes(layer_node_codes[-1])]) + [node.id() for node in tree.get_nodes(layer_node_codes[-1])] + ) all_leaf_ids = [node.id() for node in tree.get_all_leafs()] self.assertEqual(sum(all_leaf_ids), sum(layer_node_ids[-1])) @@ -97,8 +95,9 @@ class TestTreeIndex(unittest.TestCase): self.assertEqual(pi_relation[all_leaf_ids[0]], ancestor_codes[0]) # get_travel_path - travel_path_codes = tree.get_travel_path(travel_codes[0], - travel_codes[-1]) + travel_path_codes = tree.get_travel_path( + travel_codes[0], travel_codes[-1] + ) travel_path_ids = [ node.id() for node in tree.get_nodes(travel_path_codes) ] @@ -113,7 +112,6 @@ class TestTreeIndex(unittest.TestCase): class TestIndexSampler(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -123,14 +121,17 @@ class TestIndexSampler(unittest.TestCase): def test_layerwise_sampler(self): path = download( "https://paddlerec.bj.bcebos.com/tree-based/data/mini_tree.pb", - "tree_index_unittest", "e2ba4561c2e9432b532df40546390efa") + "tree_index_unittest", + "e2ba4561c2e9432b532df40546390efa", + ) tdm_layer_counts = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] - #tree = TreeIndex("demo", path) - file_name = os.path.join(self.temp_dir.name, - "test_in_memory_dataset_tdm_sample_run.txt") + # tree = TreeIndex("demo", path) + file_name = os.path.join( + self.temp_dir.name, "test_in_memory_dataset_tdm_sample_run.txt" + ) with open(file_name, "w") as f: - #data = "29 d 29 d 29 29 29 29 29 29 29 29 29 29 29 29\n" + # data = "29 d 29 d 29 29 29 29 29 29 29 29 29 29 29 29\n" data = "1 1 1 15 15 15\n" data += "1 1 1 15 15 15\n" f.write(data) @@ -142,26 +143,30 @@ class TestIndexSampler(unittest.TestCase): slots_vars.append(var) dataset = paddle.distributed.InMemoryDataset() - dataset.init(batch_size=1, - pipe_command="cat", - download_cmd="cat", - use_var=slots_vars) + dataset.init( + batch_size=1, + pipe_command="cat", + download_cmd="cat", + use_var=slots_vars, + ) dataset.set_filelist([file_name]) - #dataset.update_settings(pipe_command="cat") - #dataset._init_distributed_settings( + # dataset.update_settings(pipe_command="cat") + # dataset._init_distributed_settings( # parse_ins_id=True, # parse_content=True, # fea_eval=True, # candidate_size=10000) dataset.load_into_memory() - dataset.tdm_sample('demo', - tree_path=path, - tdm_layer_counts=tdm_layer_counts, - start_sample_layer=1, - with_hierachy=False, - seed=0, - id_slot=2) + dataset.tdm_sample( + 'demo', + tree_path=path, + tdm_layer_counts=tdm_layer_counts, + start_sample_layer=1, + with_hierachy=False, + seed=0, + id_slot=2, + ) self.assertTrue(dataset.get_shuffle_data_size() == 8) diff --git a/python/paddle/fluid/tests/unittests/test_dist_word2vec.py b/python/paddle/fluid/tests/unittests/test_dist_word2vec.py index 6d9727f6f3ac52a92f9d963a29d7d34e893eba25..be100d7699f0ff0d94f077017b4657d36f16e6d8 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_word2vec.py +++ b/python/paddle/fluid/tests/unittests/test_dist_word2vec.py @@ -21,43 +21,46 @@ flag_name = os.path.splitext(__file__)[0] class TestDistW2V2x2(TestDistBase): - def _setup_config(self): self._sync_mode = True self._enforce_place = "CPU" def test_dist_train(self): - self.check_with_place("dist_word2vec.py", - delta=1e-4, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "dist_word2vec.py", + delta=1e-4, + check_error_log=True, + log_name=flag_name, + ) class TestDistW2V2x2WithMemOpt(TestDistBase): - def _setup_config(self): self._sync_mode = True self._mem_opt = True self._enforce_place = "CPU" def test_dist_train(self): - self.check_with_place("dist_word2vec.py", - delta=1e-4, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "dist_word2vec.py", + delta=1e-4, + check_error_log=True, + log_name=flag_name, + ) class TestDistW2V2x2Async(TestDistBase): - def _setup_config(self): self._sync_mode = False self._enforce_place = "CPU" def test_dist_train(self): - self.check_with_place("dist_word2vec.py", - delta=100, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "dist_word2vec.py", + delta=100, + check_error_log=True, + log_name=flag_name, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_distribute_fpn_proposals_op.py b/python/paddle/fluid/tests/unittests/test_distribute_fpn_proposals_op.py index 29bd02a578e18dbaee12e1b8c972e2617ef00b94..354a630bc3847a8a5c3d26fa6a1537e28cc52ae8 100644 --- a/python/paddle/fluid/tests/unittests/test_distribute_fpn_proposals_op.py +++ b/python/paddle/fluid/tests/unittests/test_distribute_fpn_proposals_op.py @@ -19,18 +19,26 @@ import paddle from op_test import OpTest -def distribute_fpn_proposals_wrapper(fpn_rois, rois_num, min_level, max_level, - refer_level, refer_scale, pixel_offset): - return paddle.vision.ops.distribute_fpn_proposals(fpn_rois=fpn_rois, - min_level=min_level, - max_level=max_level, - refer_level=refer_level, - refer_scale=refer_scale, - rois_num=rois_num) +def distribute_fpn_proposals_wrapper( + fpn_rois, + rois_num, + min_level, + max_level, + refer_level, + refer_scale, + pixel_offset, +): + return paddle.vision.ops.distribute_fpn_proposals( + fpn_rois=fpn_rois, + min_level=min_level, + max_level=max_level, + refer_level=refer_level, + refer_scale=refer_scale, + rois_num=rois_num, + ) class TestDistributeFPNProposalsOp(OpTest): - def set_data(self): self.init_test_case() self.make_rois() @@ -43,8 +51,9 @@ class TestDistributeFPNProposalsOp(OpTest): 'refer_level': self.canonical_level, 'pixel_offset': self.pixel_offset, } - output = [('out%d' % i, self.rois_fpn[i]) - for i in range(len(self.rois_fpn))] + output = [ + ('out%d' % i, self.rois_fpn[i]) for i in range(len(self.rois_fpn)) + ] self.outputs = { 'MultiFpnRois': output, @@ -63,8 +72,8 @@ class TestDistributeFPNProposalsOp(OpTest): def boxes_area(self, boxes): offset = 1 if self.pixel_offset else 0 - w = (boxes[:, 2] - boxes[:, 0] + offset) - h = (boxes[:, 3] - boxes[:, 1] + offset) + w = boxes[:, 2] - boxes[:, 0] + offset + h = boxes[:, 3] - boxes[:, 1] + offset areas = w * h assert np.all(areas >= 0), 'Negative areas founds' return areas @@ -85,7 +94,7 @@ class TestDistributeFPNProposalsOp(OpTest): return sub_lod def add_multilevel_roi(self, rois, target_lvls, lvl_min, lvl_max): - rois_idx_order = np.empty((0, )) + rois_idx_order = np.empty((0,)) rois_fpn = [] for lvl in range(lvl_min, lvl_max + 1): idx_lvl = np.where(target_lvls == lvl)[0] @@ -95,17 +104,20 @@ class TestDistributeFPNProposalsOp(OpTest): sub_lod = self.get_sub_lod(rois[idx_lvl, 0]) rois_fpn.append((rois[idx_lvl, 1:], [sub_lod])) rois_idx_order = np.concatenate((rois_idx_order, idx_lvl)) - rois_idx_restore = np.argsort(rois_idx_order).astype(np.int32, - copy=False) + rois_idx_restore = np.argsort(rois_idx_order).astype( + np.int32, copy=False + ) return rois_fpn, rois_idx_restore def calc_rois_distribute(self): lvl_min = self.roi_min_level lvl_max = self.roi_max_level - target_lvls = self.map_rois_to_fpn_levels(self.rois[:, 1:5], lvl_min, - lvl_max) + target_lvls = self.map_rois_to_fpn_levels( + self.rois[:, 1:5], lvl_min, lvl_max + ) rois_fpn, rois_idx_restore = self.add_multilevel_roi( - self.rois, target_lvls, lvl_min, lvl_max) + self.rois, target_lvls, lvl_min, lvl_max + ) return rois_fpn, rois_idx_restore def make_rois(self): @@ -133,14 +145,13 @@ class TestDistributeFPNProposalsOp(OpTest): class TestDistributeFPNProposalsOpWithRoisNum(TestDistributeFPNProposalsOp): - def set_data(self): self.init_test_case() self.make_rois() self.rois_fpn, self.rois_idx_restore = self.calc_rois_distribute() self.inputs = { 'FpnRois': (self.rois[:, 1:5], self.rois_lod), - 'RoisNum': np.array(self.rois_lod[0]).astype('int32') + 'RoisNum': np.array(self.rois_lod[0]).astype('int32'), } self.attrs = { 'max_level': self.roi_max_level, @@ -149,26 +160,30 @@ class TestDistributeFPNProposalsOpWithRoisNum(TestDistributeFPNProposalsOp): 'refer_level': self.canonical_level, 'pixel_offset': self.pixel_offset, } - output = [('out%d' % i, self.rois_fpn[i]) - for i in range(len(self.rois_fpn))] - rois_num_per_level = [('rois_num%d' % i, - np.array(self.rois_fpn[i][1][0]).astype('int32')) - for i in range(len(self.rois_fpn))] + output = [ + ('out%d' % i, self.rois_fpn[i]) for i in range(len(self.rois_fpn)) + ] + rois_num_per_level = [ + ('rois_num%d' % i, np.array(self.rois_fpn[i][1][0]).astype('int32')) + for i in range(len(self.rois_fpn)) + ] self.outputs = { 'MultiFpnRois': output, 'RestoreIndex': self.rois_idx_restore.reshape(-1, 1), - 'MultiLevelRoIsNum': rois_num_per_level + 'MultiLevelRoIsNum': rois_num_per_level, } self.python_api = distribute_fpn_proposals_wrapper self.python_out_sig = [ - 'MultiFpnRois', 'MultiLevelRoIsNum', 'RestoreIndex' + 'MultiFpnRois', + 'MultiLevelRoIsNum', + 'RestoreIndex', ] class TestDistributeFPNProposalsOpNoOffset( - TestDistributeFPNProposalsOpWithRoisNum): - + TestDistributeFPNProposalsOpWithRoisNum +): def init_test_case(self): self.roi_max_level = 5 self.roi_min_level = 2 @@ -179,7 +194,6 @@ class TestDistributeFPNProposalsOpNoOffset( class TestDistributeFpnProposalsAPI(unittest.TestCase): - def setUp(self): np.random.seed(678) self.rois_np = np.random.rand(10, 4).astype('float32') @@ -188,26 +202,30 @@ class TestDistributeFpnProposalsAPI(unittest.TestCase): def test_dygraph_with_static(self): paddle.enable_static() rois = paddle.static.data(name='rois', shape=[10, 4], dtype='float32') - rois_num = paddle.static.data(name='rois_num', - shape=[None], - dtype='int32') - multi_rois, restore_ind, rois_num_per_level = paddle.vision.ops.distribute_fpn_proposals( + rois_num = paddle.static.data( + name='rois_num', shape=[None], dtype='int32' + ) + ( + multi_rois, + restore_ind, + rois_num_per_level, + ) = paddle.vision.ops.distribute_fpn_proposals( fpn_rois=rois, min_level=2, max_level=5, refer_level=4, refer_scale=224, - rois_num=rois_num) + rois_num=rois_num, + ) fetch_list = multi_rois + [restore_ind] + rois_num_per_level exe = paddle.static.Executor() - output_stat = exe.run(paddle.static.default_main_program(), - feed={ - 'rois': self.rois_np, - 'rois_num': self.rois_num_np - }, - fetch_list=fetch_list, - return_numpy=False) + output_stat = exe.run( + paddle.static.default_main_program(), + feed={'rois': self.rois_np, 'rois_num': self.rois_num_np}, + fetch_list=fetch_list, + return_numpy=False, + ) output_stat_np = [] for output in output_stat: output_np = np.array(output) @@ -217,13 +235,18 @@ class TestDistributeFpnProposalsAPI(unittest.TestCase): paddle.disable_static() rois_dy = paddle.to_tensor(self.rois_np) rois_num_dy = paddle.to_tensor(self.rois_num_np) - multi_rois_dy, restore_ind_dy, rois_num_per_level_dy = paddle.vision.ops.distribute_fpn_proposals( + ( + multi_rois_dy, + restore_ind_dy, + rois_num_per_level_dy, + ) = paddle.vision.ops.distribute_fpn_proposals( fpn_rois=rois_dy, min_level=2, max_level=5, refer_level=4, refer_scale=224, - rois_num=rois_num_dy) + rois_num=rois_num_dy, + ) output_dy = multi_rois_dy + [restore_ind_dy] + rois_num_per_level_dy output_dy_np = [] for output in output_dy: diff --git a/python/paddle/fluid/tests/unittests/test_distributed_fused_lamb_op_with_clip.py b/python/paddle/fluid/tests/unittests/test_distributed_fused_lamb_op_with_clip.py index fac4e79ce139c13cb436287a4e323ab4d0814f4e..7638f7cf2436fc28d763839455d7f04d5d75522b 100644 --- a/python/paddle/fluid/tests/unittests/test_distributed_fused_lamb_op_with_clip.py +++ b/python/paddle/fluid/tests/unittests/test_distributed_fused_lamb_op_with_clip.py @@ -35,10 +35,12 @@ def remove_file_if_exists(file_name): shutil.rmtree(file_name) -def run_test(clip_after_allreduce=True, - max_global_norm=-1.0, - gradient_merge_steps=1, - use_master_acc_grad=True): +def run_test( + clip_after_allreduce=True, + max_global_norm=-1.0, + gradient_merge_steps=1, + use_master_acc_grad=True, +): temp_dir = tempfile.TemporaryDirectory() if not paddle.is_compiled_with_cuda(): return @@ -66,17 +68,18 @@ def run_test(clip_after_allreduce=True, touch_file_env = 'SUCCESS_TOUCH_FILE' touch_file_name = os.path.join( temp_dir.name, - 'distributed_fused_lamb_touch_file_{}'.format(os.getpid())) + 'distributed_fused_lamb_touch_file_{}'.format(os.getpid()), + ) os.environ[touch_file_env] = touch_file_name try: assert os.system(cmd) == 0 and os.path.exists( - touch_file_name), 'Test failed when {}'.format(args) + touch_file_name + ), 'Test failed when {}'.format(args) finally: temp_dir.cleanup() class TestDistributedFusedLambWithClip(unittest.TestCase): - def test_1(self): run_test(clip_after_allreduce=True, max_global_norm=0.01) diff --git a/python/paddle/fluid/tests/unittests/test_distributed_fused_lamb_op_with_gradient_merge.py b/python/paddle/fluid/tests/unittests/test_distributed_fused_lamb_op_with_gradient_merge.py index 01ca09916a1e65755d1c16ee2a9b6a941a54a5d9..37b315e61a808aeee593b7f188551c8e8b5d2b7b 100644 --- a/python/paddle/fluid/tests/unittests/test_distributed_fused_lamb_op_with_gradient_merge.py +++ b/python/paddle/fluid/tests/unittests/test_distributed_fused_lamb_op_with_gradient_merge.py @@ -17,17 +17,20 @@ import unittest class TestDistributedFusedLambGradientMerge(unittest.TestCase): - def test_gm(self): - run_test(clip_after_allreduce=True, - max_global_norm=-1.0, - gradient_merge_steps=2) + run_test( + clip_after_allreduce=True, + max_global_norm=-1.0, + gradient_merge_steps=2, + ) def test_gm_with_fp16_acc_grad(self): - run_test(clip_after_allreduce=True, - max_global_norm=-1.0, - gradient_merge_steps=2, - use_master_acc_grad=False) + run_test( + clip_after_allreduce=True, + max_global_norm=-1.0, + gradient_merge_steps=2, + use_master_acc_grad=False, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_distributed_fused_lamb_op_without_clip.py b/python/paddle/fluid/tests/unittests/test_distributed_fused_lamb_op_without_clip.py index 8d4dfa84d2f1bd982a780af9b5b0f03544dc9e68..6e9d8ddea172879eb5b61c9cf130df2004740635 100644 --- a/python/paddle/fluid/tests/unittests/test_distributed_fused_lamb_op_without_clip.py +++ b/python/paddle/fluid/tests/unittests/test_distributed_fused_lamb_op_without_clip.py @@ -17,7 +17,6 @@ import unittest class TestDistributedFusedLambWithoutClip(unittest.TestCase): - def test_1(self): run_test(clip_after_allreduce=True, max_global_norm=-1.0) diff --git a/python/paddle/fluid/tests/unittests/test_distributions.py b/python/paddle/fluid/tests/unittests/test_distributions.py index d79e8784e2b22037fe109ac9e751ad6b59121842..a032b74f796bd1452fdc7917fcdf06b4fa1c827d 100644 --- a/python/paddle/fluid/tests/unittests/test_distributions.py +++ b/python/paddle/fluid/tests/unittests/test_distributions.py @@ -16,13 +16,18 @@ import numpy as np import unittest from paddle import fluid from paddle.fluid import layers -from paddle.fluid.layers.distributions import Categorical, MultivariateNormalDiag, Normal, Uniform +from paddle.fluid.layers.distributions import ( + Categorical, + MultivariateNormalDiag, + Normal, + Uniform, +) import math -class DistributionNumpy(): +class DistributionNumpy: """ - Distribution is the abstract base class for probability distributions. + Distribution is the abstract base class for probability distributions. """ def sample(self): @@ -43,15 +48,15 @@ class DistributionNumpy(): class UniformNumpy(DistributionNumpy): - def __init__(self, low, high): self.low = np.array(low).astype('float32') self.high = np.array(high).astype('float32') def sample(self, shape): shape = tuple(shape) + (self.low + self.high).shape - return self.low + (np.random.uniform(size=shape) * - (self.high - self.low)) + return self.low + ( + np.random.uniform(size=shape) * (self.high - self.low) + ) def log_prob(self, value): lb = np.less(self.low, value).astype('float32') @@ -63,7 +68,6 @@ class UniformNumpy(DistributionNumpy): class NormalNumpy(DistributionNumpy): - def __init__(self, loc, scale): self.loc = np.array(loc).astype('float32') self.scale = np.array(scale).astype('float32') @@ -75,24 +79,28 @@ class NormalNumpy(DistributionNumpy): def log_prob(self, value): var = self.scale * self.scale log_scale = np.log(self.scale) - return -((value - self.loc) * - (value - self.loc)) / (2. * var) - log_scale - math.log( - math.sqrt(2. * math.pi)) + return ( + -((value - self.loc) * (value - self.loc)) / (2.0 * var) + - log_scale + - math.log(math.sqrt(2.0 * math.pi)) + ) def entropy(self): - return 0.5 + 0.5 * np.log(np.array( - 2. * math.pi).astype('float32')) + np.log(self.scale) + return ( + 0.5 + + 0.5 * np.log(np.array(2.0 * math.pi).astype('float32')) + + np.log(self.scale) + ) def kl_divergence(self, other): - var_ratio = (self.scale / other.scale) + var_ratio = self.scale / other.scale var_ratio = var_ratio * var_ratio - t1 = ((self.loc - other.loc) / other.scale) - t1 = (t1 * t1) + t1 = (self.loc - other.loc) / other.scale + t1 = t1 * t1 return 0.5 * (var_ratio + t1 - 1 - np.log(var_ratio)) class CategoricalNumpy(DistributionNumpy): - def __init__(self, logits): self.logits = np.array(logits).astype('float32') @@ -101,23 +109,28 @@ class CategoricalNumpy(DistributionNumpy): e_logits = np.exp(logits) z = np.sum(e_logits, axis=-1, keepdims=True) prob = e_logits / z - return -1. * np.sum(prob * (logits - np.log(z)), axis=-1, keepdims=True) + return -1.0 * np.sum( + prob * (logits - np.log(z)), axis=-1, keepdims=True + ) def kl_divergence(self, other): logits = self.logits - np.max(self.logits, axis=-1, keepdims=True) other_logits = other.logits - np.max( - other.logits, axis=-1, keepdims=True) + other.logits, axis=-1, keepdims=True + ) e_logits = np.exp(logits) other_e_logits = np.exp(other_logits) z = np.sum(e_logits, axis=-1, keepdims=True) other_z = np.sum(other_e_logits, axis=-1, keepdims=True) prob = e_logits / z - return np.sum(prob * (logits - np.log(z) - other_logits \ - + np.log(other_z)), axis=-1, keepdims=True) + return np.sum( + prob * (logits - np.log(z) - other_logits + np.log(other_z)), + axis=-1, + keepdims=True, + ) class MultivariateNormalDiagNumpy(DistributionNumpy): - def __init__(self, loc, scale): self.loc = np.array(loc).astype('float32') self.scale = np.array(scale).astype('float32') @@ -139,14 +152,17 @@ class MultivariateNormalDiagNumpy(DistributionNumpy): return inv_diag def entropy(self): - return 0.5 * (self.scale.shape[0] * - (1.0 + np.log(np.array(2 * math.pi).astype('float32'))) + - np.log(self._det(self.scale))) + return 0.5 * ( + self.scale.shape[0] + * (1.0 + np.log(np.array(2 * math.pi).astype('float32'))) + + np.log(self._det(self.scale)) + ) def kl_divergence(self, other): tr_cov_matmul = np.sum(self._inv(other.scale) * self.scale) - loc_matmul_cov = np.matmul((other.loc - self.loc), - self._inv(other.scale)) + loc_matmul_cov = np.matmul( + (other.loc - self.loc), self._inv(other.scale) + ) tri_matmul = np.matmul(loc_matmul_cov, (other.loc - self.loc)) k = list(self.scale.shape)[0] ln_cov = np.log(self._det(other.scale)) - np.log(self._det(self.scale)) @@ -156,7 +172,6 @@ class MultivariateNormalDiagNumpy(DistributionNumpy): class DistributionTest(unittest.TestCase): - def setUp(self, use_gpu=False): self.use_gpu = use_gpu if not use_gpu: @@ -167,20 +182,31 @@ class DistributionTest(unittest.TestCase): self.gpu_id = 0 self.executor = fluid.Executor(place) - def build_normal_program(self, test_program, batch_size, dims, loc_float, - scale_float, other_loc_float, other_scale_float, - scale_np, other_scale_np, loc_np, other_loc_np, - values_np): + def build_normal_program( + self, + test_program, + batch_size, + dims, + loc_float, + scale_float, + other_loc_float, + other_scale_float, + scale_np, + other_scale_np, + loc_np, + other_loc_np, + values_np, + ): with fluid.program_guard(test_program): loc = layers.data(name='loc', shape=[dims], dtype='float32') scale = layers.data(name='scale', shape=[dims], dtype='float32') - other_loc = layers.data(name='other_loc', - shape=[dims], - dtype='float32') - other_scale = layers.data(name='other_scale', - shape=[dims], - dtype='float32') + other_loc = layers.data( + name='other_loc', shape=[dims], dtype='float32' + ) + other_scale = layers.data( + name='other_scale', shape=[dims], dtype='float32' + ) values = layers.data(name='values', shape=[dims], dtype='float32') @@ -188,8 +214,9 @@ class DistributionTest(unittest.TestCase): other_normal_float = Normal(other_loc_float, other_scale_float) normal_float_np_broadcast = Normal(loc_float, scale_np) - other_normal_float_np_broadcast = Normal(other_loc_float, - other_scale_np) + other_normal_float_np_broadcast = Normal( + other_loc_float, other_scale_np + ) normal_np = Normal(loc_np, scale_np) other_normal_np = Normal(other_loc_np, other_scale_np) @@ -199,7 +226,8 @@ class DistributionTest(unittest.TestCase): sample_float = normal_float.sample([batch_size, dims]) sample_float_np_broadcast = normal_float_np_broadcast.sample( - [batch_size, dims]) + [batch_size, dims] + ) sample_np = normal_np.sample([batch_size, dims]) sample_variable = normal_variable.sample([batch_size, dims]) @@ -214,22 +242,34 @@ class DistributionTest(unittest.TestCase): kl_float = normal_float.kl_divergence(other_normal_float) kl_float_np_broadcast = normal_float_np_broadcast.kl_divergence( - other_normal_float_np_broadcast) + other_normal_float_np_broadcast + ) kl_np = normal_np.kl_divergence(other_normal_np) kl_variable = normal_variable.kl_divergence(other_normal_variable) fetch_list = [ - sample_float, sample_float_np_broadcast, sample_np, sample_variable, - entropy_float, entropy_float_np_broadcast, entropy_np, - entropy_variable, lp_float_np_broadcast, lp_np, lp_variable, - kl_float, kl_float_np_broadcast, kl_np, kl_variable + sample_float, + sample_float_np_broadcast, + sample_np, + sample_variable, + entropy_float, + entropy_float_np_broadcast, + entropy_np, + entropy_variable, + lp_float_np_broadcast, + lp_np, + lp_variable, + kl_float, + kl_float_np_broadcast, + kl_np, + kl_variable, ] feed_vars = { 'loc': loc_np, 'scale': scale_np, 'other_loc': other_loc_np, 'other_scale': other_scale_np, - 'values': values_np + 'values': values_np, } return feed_vars, fetch_list @@ -255,117 +295,176 @@ class DistributionTest(unittest.TestCase): scale_np = np.random.randn(batch_size, dims).astype('float32') while not np.all(other_scale_np > 0): other_scale_np = np.random.randn(batch_size, dims).astype('float32') - return loc_np, other_loc_np, loc_float, scale_float, other_loc_float, \ - other_scale_float, scale_np, other_scale_np, values_np + return ( + loc_np, + other_loc_np, + loc_float, + scale_float, + other_loc_float, + other_scale_float, + scale_np, + other_scale_np, + values_np, + ) def test_normal_distribution(self, batch_size=2, dims=3, tolerance=1e-6): test_program = fluid.Program() - loc_np, other_loc_np, loc_float, scale_float, other_loc_float, other_scale_float, scale_np, other_scale_np, values_np = self.get_normal_random_input( - batch_size, dims) + ( + loc_np, + other_loc_np, + loc_float, + scale_float, + other_loc_float, + other_scale_float, + scale_np, + other_scale_np, + values_np, + ) = self.get_normal_random_input(batch_size, dims) feed_vars, fetch_list = self.build_normal_program( - test_program, batch_size, dims, loc_float, scale_float, - other_loc_float, other_scale_float, scale_np, other_scale_np, - loc_np, other_loc_np, values_np) + test_program, + batch_size, + dims, + loc_float, + scale_float, + other_loc_float, + other_scale_float, + scale_np, + other_scale_np, + loc_np, + other_loc_np, + values_np, + ) self.executor.run(fluid.default_startup_program()) np_normal_float = NormalNumpy(loc_float, scale_float) np_other_normal_float = NormalNumpy(other_loc_float, other_scale_float) np_normal_float_np_broadcast = NormalNumpy(loc_float, scale_np) np_other_normal_float_np_broadcast = NormalNumpy( - other_loc_float, other_scale_np) + other_loc_float, other_scale_np + ) np_normal = NormalNumpy(loc_np, scale_np) np_other_normal = NormalNumpy(other_loc_np, other_scale_np) gt_sample_float = np_normal_float.sample([batch_size, dims]) gt_sample_float_np_broadcast = np_normal_float_np_broadcast.sample( - [batch_size, dims]) + [batch_size, dims] + ) gt_sample_np = np_normal.sample([batch_size, dims]) gt_entropy_float = np_normal_float.entropy() gt_entropy_float_np_broadcast = np_normal_float_np_broadcast.entropy() gt_entropy = np_normal.entropy() gt_lp_float_np_broadcast = np_normal_float_np_broadcast.log_prob( - values_np) + values_np + ) gt_lp = np_normal.log_prob(values_np) gt_kl_float = np_normal_float.kl_divergence(np_other_normal_float) gt_kl_float_np_broadcast = np_normal_float_np_broadcast.kl_divergence( - np_other_normal_float_np_broadcast) + np_other_normal_float_np_broadcast + ) gt_kl = np_normal.kl_divergence(np_other_normal) [ - output_sample_float, output_sample_float_np_broadcast, - output_sample_np, output_sample_variable, output_entropy_float, - output_entropy_float_np_broadcast, output_entropy_np, - output_entropy_variable, output_lp_float_np_broadcast, output_lp_np, - output_lp_variable, output_kl_float, output_kl_float_np_broadcast, - output_kl_np, output_kl_variable - ] = self.executor.run(program=test_program, - feed=feed_vars, - fetch_list=fetch_list) - - np.testing.assert_allclose(output_sample_float.shape, - gt_sample_float.shape, - rtol=tolerance, - atol=tolerance) - np.testing.assert_allclose(output_sample_float_np_broadcast.shape, - gt_sample_float_np_broadcast.shape, - rtol=tolerance, - atol=tolerance) - np.testing.assert_allclose(output_sample_np.shape, - gt_sample_np.shape, - rtol=tolerance, - atol=tolerance) - np.testing.assert_allclose(output_sample_variable.shape, - gt_sample_np.shape, - rtol=tolerance, - atol=tolerance) - np.testing.assert_allclose(output_entropy_float, - gt_entropy_float, - rtol=tolerance, - atol=tolerance) - np.testing.assert_allclose(output_entropy_float_np_broadcast, - gt_entropy_float_np_broadcast, - rtol=tolerance, - atol=tolerance) - np.testing.assert_allclose(output_entropy_np, - gt_entropy, - rtol=tolerance, - atol=tolerance) - np.testing.assert_allclose(output_entropy_variable, - gt_entropy, - rtol=tolerance, - atol=tolerance) - np.testing.assert_allclose(output_lp_float_np_broadcast, - gt_lp_float_np_broadcast, - rtol=tolerance, - atol=tolerance) - np.testing.assert_allclose(output_lp_np, - gt_lp, - rtol=tolerance, - atol=tolerance) - np.testing.assert_allclose(output_lp_variable, - gt_lp, - rtol=tolerance, - atol=tolerance) - np.testing.assert_allclose(output_kl_float, - gt_kl_float, - rtol=tolerance, - atol=tolerance) - np.testing.assert_allclose(output_kl_float_np_broadcast, - gt_kl_float_np_broadcast, - rtol=tolerance, - atol=tolerance) - np.testing.assert_allclose(output_kl_np, - gt_kl, - rtol=tolerance, - atol=tolerance) - np.testing.assert_allclose(output_kl_variable, - gt_kl, - rtol=tolerance, - atol=tolerance) - - def build_uniform_program(self, test_program, batch_size, dims, low_float, - high_float, high_np, low_np, values_np): + output_sample_float, + output_sample_float_np_broadcast, + output_sample_np, + output_sample_variable, + output_entropy_float, + output_entropy_float_np_broadcast, + output_entropy_np, + output_entropy_variable, + output_lp_float_np_broadcast, + output_lp_np, + output_lp_variable, + output_kl_float, + output_kl_float_np_broadcast, + output_kl_np, + output_kl_variable, + ] = self.executor.run( + program=test_program, feed=feed_vars, fetch_list=fetch_list + ) + + np.testing.assert_allclose( + output_sample_float.shape, + gt_sample_float.shape, + rtol=tolerance, + atol=tolerance, + ) + np.testing.assert_allclose( + output_sample_float_np_broadcast.shape, + gt_sample_float_np_broadcast.shape, + rtol=tolerance, + atol=tolerance, + ) + np.testing.assert_allclose( + output_sample_np.shape, + gt_sample_np.shape, + rtol=tolerance, + atol=tolerance, + ) + np.testing.assert_allclose( + output_sample_variable.shape, + gt_sample_np.shape, + rtol=tolerance, + atol=tolerance, + ) + np.testing.assert_allclose( + output_entropy_float, + gt_entropy_float, + rtol=tolerance, + atol=tolerance, + ) + np.testing.assert_allclose( + output_entropy_float_np_broadcast, + gt_entropy_float_np_broadcast, + rtol=tolerance, + atol=tolerance, + ) + np.testing.assert_allclose( + output_entropy_np, gt_entropy, rtol=tolerance, atol=tolerance + ) + np.testing.assert_allclose( + output_entropy_variable, gt_entropy, rtol=tolerance, atol=tolerance + ) + np.testing.assert_allclose( + output_lp_float_np_broadcast, + gt_lp_float_np_broadcast, + rtol=tolerance, + atol=tolerance, + ) + np.testing.assert_allclose( + output_lp_np, gt_lp, rtol=tolerance, atol=tolerance + ) + np.testing.assert_allclose( + output_lp_variable, gt_lp, rtol=tolerance, atol=tolerance + ) + np.testing.assert_allclose( + output_kl_float, gt_kl_float, rtol=tolerance, atol=tolerance + ) + np.testing.assert_allclose( + output_kl_float_np_broadcast, + gt_kl_float_np_broadcast, + rtol=tolerance, + atol=tolerance, + ) + np.testing.assert_allclose( + output_kl_np, gt_kl, rtol=tolerance, atol=tolerance + ) + np.testing.assert_allclose( + output_kl_variable, gt_kl, rtol=tolerance, atol=tolerance + ) + + def build_uniform_program( + self, + test_program, + batch_size, + dims, + low_float, + high_float, + high_np, + low_np, + values_np, + ): with fluid.program_guard(test_program): low = layers.data(name='low', shape=[dims], dtype='float32') high = layers.data(name='high', shape=[dims], dtype='float32') @@ -379,7 +478,8 @@ class DistributionTest(unittest.TestCase): sample_float = uniform_float.sample([batch_size, dims]) sample_float_np_broadcast = uniform_float_np_broadcast.sample( - [batch_size, dims]) + [batch_size, dims] + ) sample_np = uniform_np.sample([batch_size, dims]) sample_variable = uniform_variable.sample([batch_size, dims]) @@ -393,9 +493,17 @@ class DistributionTest(unittest.TestCase): lp_variable = uniform_variable.log_prob(values) fetch_list = [ - sample_float, sample_float_np_broadcast, sample_np, sample_variable, - entropy_float, entropy_float_np_broadcast, entropy_np, - entropy_variable, lp_float_np_broadcast, lp_np, lp_variable + sample_float, + sample_float_np_broadcast, + sample_np, + sample_variable, + entropy_float, + entropy_float_np_broadcast, + entropy_np, + entropy_variable, + lp_float_np_broadcast, + lp_np, + lp_variable, ] feed_vars = {'low': low_np, 'high': high_np, 'values': values_np} return feed_vars, fetch_list @@ -406,13 +514,21 @@ class DistributionTest(unittest.TestCase): low_np = np.random.randn(batch_size, dims).astype('float32') low_float = np.random.uniform(-2, 1) high_float = np.random.uniform(1, 3) - high_np = np.random.uniform(-5.0, 5.0, - (batch_size, dims)).astype('float32') + high_np = np.random.uniform(-5.0, 5.0, (batch_size, dims)).astype( + 'float32' + ) values_np = np.random.randn(batch_size, dims).astype('float32') feed_vars, fetch_list = self.build_uniform_program( - test_program, batch_size, dims, low_float, high_float, high_np, - low_np, values_np) + test_program, + batch_size, + dims, + low_float, + high_float, + high_np, + low_np, + values_np, + ) self.executor.run(fluid.default_startup_program()) @@ -422,75 +538,92 @@ class DistributionTest(unittest.TestCase): gt_sample_float = np_uniform_float.sample([batch_size, dims]) gt_sample_float_np_broadcast = np_uniform_float_np_broadcast.sample( - [batch_size, dims]) + [batch_size, dims] + ) gt_sample_np = np_uniform.sample([batch_size, dims]) gt_entropy_float = np_uniform_float.entropy() gt_entropy_float_np_broadcast = np_uniform_float_np_broadcast.entropy() gt_entropy = np_uniform.entropy() gt_lp_float_np_broadcast = np_uniform_float_np_broadcast.log_prob( - values_np) + values_np + ) gt_lp = np_uniform.log_prob(values_np) # result calculated by paddle [ - output_sample_float, output_sample_float_np_broadcast, - output_sample_np, output_sample_variable, output_entropy_float, - output_entropy_float_np_broadcast, output_entropy_np, - output_entropy_variable, output_lp_float_np_broadcast, output_lp_np, - output_lp_variable - ] = self.executor.run(program=test_program, - feed=feed_vars, - fetch_list=fetch_list) - - np.testing.assert_allclose(output_sample_float.shape, - gt_sample_float.shape, - rtol=tolerance, - atol=tolerance) - np.testing.assert_allclose(output_sample_float_np_broadcast.shape, - gt_sample_float_np_broadcast.shape, - rtol=tolerance, - atol=tolerance) - np.testing.assert_allclose(output_sample_np.shape, - gt_sample_np.shape, - rtol=tolerance, - atol=tolerance) - np.testing.assert_allclose(output_sample_variable.shape, - gt_sample_np.shape, - rtol=tolerance, - atol=tolerance) - np.testing.assert_allclose(output_entropy_float, - gt_entropy_float, - rtol=tolerance, - atol=tolerance) - np.testing.assert_allclose(output_entropy_float_np_broadcast, - gt_entropy_float_np_broadcast, - rtol=tolerance, - atol=tolerance) - np.testing.assert_allclose(output_entropy_np, - gt_entropy, - rtol=tolerance, - atol=tolerance) - np.testing.assert_allclose(output_entropy_variable, - gt_entropy, - rtol=tolerance, - atol=tolerance) - np.testing.assert_allclose(output_lp_float_np_broadcast, - gt_lp_float_np_broadcast, - rtol=tolerance, - atol=tolerance) - np.testing.assert_allclose(output_lp_np, - gt_lp, - rtol=tolerance, - atol=tolerance) - np.testing.assert_allclose(output_lp_variable, - gt_lp, - rtol=tolerance, - atol=tolerance) - - def test_categorical_distribution(self, - batch_size=2, - dims=3, - tolerance=1e-6): + output_sample_float, + output_sample_float_np_broadcast, + output_sample_np, + output_sample_variable, + output_entropy_float, + output_entropy_float_np_broadcast, + output_entropy_np, + output_entropy_variable, + output_lp_float_np_broadcast, + output_lp_np, + output_lp_variable, + ] = self.executor.run( + program=test_program, feed=feed_vars, fetch_list=fetch_list + ) + + np.testing.assert_allclose( + output_sample_float.shape, + gt_sample_float.shape, + rtol=tolerance, + atol=tolerance, + ) + np.testing.assert_allclose( + output_sample_float_np_broadcast.shape, + gt_sample_float_np_broadcast.shape, + rtol=tolerance, + atol=tolerance, + ) + np.testing.assert_allclose( + output_sample_np.shape, + gt_sample_np.shape, + rtol=tolerance, + atol=tolerance, + ) + np.testing.assert_allclose( + output_sample_variable.shape, + gt_sample_np.shape, + rtol=tolerance, + atol=tolerance, + ) + np.testing.assert_allclose( + output_entropy_float, + gt_entropy_float, + rtol=tolerance, + atol=tolerance, + ) + np.testing.assert_allclose( + output_entropy_float_np_broadcast, + gt_entropy_float_np_broadcast, + rtol=tolerance, + atol=tolerance, + ) + np.testing.assert_allclose( + output_entropy_np, gt_entropy, rtol=tolerance, atol=tolerance + ) + np.testing.assert_allclose( + output_entropy_variable, gt_entropy, rtol=tolerance, atol=tolerance + ) + np.testing.assert_allclose( + output_lp_float_np_broadcast, + gt_lp_float_np_broadcast, + rtol=tolerance, + atol=tolerance, + ) + np.testing.assert_allclose( + output_lp_np, gt_lp, rtol=tolerance, atol=tolerance + ) + np.testing.assert_allclose( + output_lp_variable, gt_lp, rtol=tolerance, atol=tolerance + ) + + def test_categorical_distribution( + self, batch_size=2, dims=3, tolerance=1e-6 + ): test_program = fluid.Program() logits_np = np.random.randn(batch_size, dims).astype('float32') @@ -498,9 +631,9 @@ class DistributionTest(unittest.TestCase): with fluid.program_guard(test_program): logits = layers.data(name='logits', shape=[dims], dtype='float32') - other_logits = layers.data(name='other_logits', - shape=[dims], - dtype='float32') + other_logits = layers.data( + name='other_logits', shape=[dims], dtype='float32' + ) categorical_np = Categorical(logits_np) other_categorical_np = Categorical(other_logits_np) @@ -516,55 +649,74 @@ class DistributionTest(unittest.TestCase): gt_kl_np = np_categorical.kl_divergence(np_other_categorical) # result calculated by paddle - [output_entropy_np, - output_kl_np] = self.executor.run(program=test_program, - feed={'logits': logits_np}, - fetch_list=[entropy_np, kl_np]) - np.testing.assert_allclose(output_entropy_np, - gt_entropy_np, - rtol=tolerance, - atol=tolerance) - np.testing.assert_allclose(output_kl_np, - gt_kl_np, - rtol=tolerance, - atol=tolerance) - - def test_multivariateNormalDiag_distribution(self, - batch_size=2, - tolerance=1e-6): + [output_entropy_np, output_kl_np] = self.executor.run( + program=test_program, + feed={'logits': logits_np}, + fetch_list=[entropy_np, kl_np], + ) + np.testing.assert_allclose( + output_entropy_np, gt_entropy_np, rtol=tolerance, atol=tolerance + ) + np.testing.assert_allclose( + output_kl_np, gt_kl_np, rtol=tolerance, atol=tolerance + ) + + def test_multivariateNormalDiag_distribution( + self, batch_size=2, tolerance=1e-6 + ): test_program = fluid.Program() - loc_np = np.random.random(batch_size, ).astype('float32') - scale_np = np.diag(np.random.random(batch_size, )).astype('float32') - other_loc_np = np.random.random(batch_size, ).astype('float32') - other_scale_np = np.diag(np.random.random( - batch_size, )).astype('float32') + loc_np = np.random.random( + batch_size, + ).astype('float32') + scale_np = np.diag( + np.random.random( + batch_size, + ) + ).astype('float32') + other_loc_np = np.random.random( + batch_size, + ).astype('float32') + other_scale_np = np.diag( + np.random.random( + batch_size, + ) + ).astype('float32') with fluid.program_guard(test_program): - loc = layers.data(name='loc', - shape=[ - batch_size, - ], - dtype='float32', - append_batch_size=False) - scale = layers.data(name='scale', - shape=[batch_size, batch_size], - dtype='float32', - append_batch_size=False) - other_loc = layers.data(name='other_loc', - shape=[ - batch_size, - ], - dtype='float32', - append_batch_size=False) - other_scale = layers.data(name='other_scale', - shape=[batch_size, batch_size], - dtype='float32', - append_batch_size=False) + loc = layers.data( + name='loc', + shape=[ + batch_size, + ], + dtype='float32', + append_batch_size=False, + ) + scale = layers.data( + name='scale', + shape=[batch_size, batch_size], + dtype='float32', + append_batch_size=False, + ) + other_loc = layers.data( + name='other_loc', + shape=[ + batch_size, + ], + dtype='float32', + append_batch_size=False, + ) + other_scale = layers.data( + name='other_scale', + shape=[batch_size, batch_size], + dtype='float32', + append_batch_size=False, + ) multivariate_np = MultivariateNormalDiag(loc, scale) other_multivariate_np = MultivariateNormalDiag( - other_loc, other_scale) + other_loc, other_scale + ) entropy_np = multivariate_np.entropy() other_entropy_np = other_multivariate_np.entropy() @@ -574,32 +726,31 @@ class DistributionTest(unittest.TestCase): np_multivariate = MultivariateNormalDiagNumpy(loc_np, scale_np) np_other_multivariate = MultivariateNormalDiagNumpy( - other_loc_np, other_scale_np) + other_loc_np, other_scale_np + ) gt_entropy_np = np_multivariate.entropy() gt_kl_np = np_multivariate.kl_divergence(np_other_multivariate) # result calculated by paddle - [output_entropy_np, - output_kl_np] = self.executor.run(program=test_program, - feed={ - 'loc': loc_np, - 'scale': scale_np, - 'other_loc': other_loc_np, - 'other_scale': other_scale_np - }, - fetch_list=[entropy_np, kl_np]) - np.testing.assert_allclose(output_entropy_np, - gt_entropy_np, - rtol=tolerance, - atol=tolerance) - np.testing.assert_allclose(output_kl_np, - gt_kl_np, - rtol=tolerance, - atol=tolerance) + [output_entropy_np, output_kl_np] = self.executor.run( + program=test_program, + feed={ + 'loc': loc_np, + 'scale': scale_np, + 'other_loc': other_loc_np, + 'other_scale': other_scale_np, + }, + fetch_list=[entropy_np, kl_np], + ) + np.testing.assert_allclose( + output_entropy_np, gt_entropy_np, rtol=tolerance, atol=tolerance + ) + np.testing.assert_allclose( + output_kl_np, gt_kl_np, rtol=tolerance, atol=tolerance + ) class DistributionTestError(unittest.TestCase): - def test_normal_error(self): loc = int(1) scale = int(1) @@ -658,8 +809,9 @@ class DistributionTestError(unittest.TestCase): categorical_other = Normal(1.0, 2.0) # type of other must be an instance of Normal - self.assertRaises(TypeError, categorical.kl_divergence, - categorical_other) + self.assertRaises( + TypeError, categorical.kl_divergence, categorical_other + ) def test_multivariate_normal_diag_error(self): loc = 1.0 diff --git a/python/paddle/fluid/tests/unittests/test_dot_op.py b/python/paddle/fluid/tests/unittests/test_dot_op.py index d9850a88d0dc4da2cc5353b222ddc80259f1032d..006c91d60e64b1274533a411371ce7b45f41de07 100644 --- a/python/paddle/fluid/tests/unittests/test_dot_op.py +++ b/python/paddle/fluid/tests/unittests/test_dot_op.py @@ -22,7 +22,6 @@ from paddle.fluid import Program, program_guard class DotOp(OpTest): - def setUp(self): self.op_type = "dot" self.python_api = paddle.dot @@ -31,7 +30,7 @@ class DotOp(OpTest): self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.outputs = {'Out': self.out} self.attrs = {} @@ -45,35 +44,38 @@ class DotOp(OpTest): ['X', 'Y'], 'Out', user_defined_grads=[self.inputs['Y'], self.inputs['X']], - check_eager=True) + check_eager=True, + ) else: self.check_grad(['X', 'Y'], 'Out', check_eager=True) def test_check_grad_ingore_x(self): if core.is_compiled_with_rocm(): - self.check_grad(['Y'], - 'Out', - no_grad_set=set("X"), - user_defined_grads=[self.inputs['X']], - check_eager=True) + self.check_grad( + ['Y'], + 'Out', + no_grad_set=set("X"), + user_defined_grads=[self.inputs['X']], + check_eager=True, + ) else: - self.check_grad(['Y'], - 'Out', - no_grad_set=set("X"), - check_eager=True) + self.check_grad( + ['Y'], 'Out', no_grad_set=set("X"), check_eager=True + ) def test_check_grad_ingore_y(self): if core.is_compiled_with_rocm(): - self.check_grad(['X'], - 'Out', - no_grad_set=set('Y'), - user_defined_grads=[self.inputs['Y']], - check_eager=True) + self.check_grad( + ['X'], + 'Out', + no_grad_set=set('Y'), + user_defined_grads=[self.inputs['Y']], + check_eager=True, + ) else: - self.check_grad(['X'], - 'Out', - no_grad_set=set('Y'), - check_eager=True) + self.check_grad( + ['X'], 'Out', no_grad_set=set('Y'), check_eager=True + ) def init_input_output(self): self.x = np.random.uniform(0.1, 1, [121]).astype(self.dtype) @@ -85,12 +87,15 @@ class DotOp(OpTest): class DotOpBatch(DotOp): - def init_input_output(self): - self.x = np.random.uniform(0.1, 1, - [132]).astype(self.dtype).reshape([11, 12]) - self.y = np.random.uniform(1, 3, - [132]).astype(self.dtype).reshape([11, 12]) + self.x = ( + np.random.uniform(0.1, 1, [132]) + .astype(self.dtype) + .reshape([11, 12]) + ) + self.y = ( + np.random.uniform(1, 3, [132]).astype(self.dtype).reshape([11, 12]) + ) self.out = np.sum(self.x * self.y, axis=1).reshape([11, 1]) def test_check_grad_normal(self): @@ -104,7 +109,6 @@ class DotOpBatch(DotOp): class TestDotOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): @@ -124,25 +128,26 @@ class TestDotOpError(unittest.TestCase): class TestDygraph(unittest.TestCase): - def test_dygraph(self): with fluid.dygraph.guard(): x1 = fluid.dygraph.to_variable(np.array([1, 3]).astype(np.float32)) y1 = fluid.dygraph.to_variable(np.array([2, 5]).astype(np.float32)) - np.testing.assert_allclose(paddle.dot(x1, y1).numpy(), - np.array([17]), - rtol=1e-05) + np.testing.assert_allclose( + paddle.dot(x1, y1).numpy(), np.array([17]), rtol=1e-05 + ) x1 = fluid.dygraph.to_variable( - np.array([[1, 3], [3, 5]]).astype(np.float32)) + np.array([[1, 3], [3, 5]]).astype(np.float32) + ) y1 = fluid.dygraph.to_variable( - np.array([[2, 5], [6, 8]]).astype(np.float32)) + np.array([[2, 5], [6, 8]]).astype(np.float32) + ) np.testing.assert_array_equal( - paddle.dot(x1, y1).numpy(), np.array([[17], [58]])) + paddle.dot(x1, y1).numpy(), np.array([[17], [58]]) + ) class TestComplexDotOp(OpTest): - def setUp(self): self.op_type = "dot" self.python_api = paddle.dot @@ -152,7 +157,7 @@ class TestComplexDotOp(OpTest): self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.outputs = {'Out': self.out} @@ -161,13 +166,15 @@ class TestComplexDotOp(OpTest): def init_input_output(self): self.x = np.random.random(100).astype( - self.dtype) + 1J * np.random.random(100).astype(self.dtype) + self.dtype + ) + 1j * np.random.random(100).astype(self.dtype) self.y = np.random.random(100).astype( - self.dtype) + 1J * np.random.random(100).astype(self.dtype) + self.dtype + ) + 1j * np.random.random(100).astype(self.dtype) self.out = np.dot(self.x, self.y) def init_grad_input_output(self): - self.grad_out = np.ones(1, self.dtype) + 1J * np.ones(1, self.dtype) + self.grad_out = np.ones(1, self.dtype) + 1j * np.ones(1, self.dtype) self.grad_x = self.grad_out * np.conj(self.y) self.grad_y = self.grad_out * np.conj(self.x) @@ -175,31 +182,36 @@ class TestComplexDotOp(OpTest): self.check_output(check_eager=True) def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], - 'Out', - user_defined_grads=[self.grad_x, self.grad_y], - user_defined_grad_outputs=[self.grad_out], - check_eager=True) + self.check_grad( + ['X', 'Y'], + 'Out', + user_defined_grads=[self.grad_x, self.grad_y], + user_defined_grad_outputs=[self.grad_out], + check_eager=True, + ) def test_check_grad_ingore_x(self): - self.check_grad(['Y'], - 'Out', - no_grad_set=set("X"), - user_defined_grads=[self.grad_y], - user_defined_grad_outputs=[self.grad_out], - check_eager=True) + self.check_grad( + ['Y'], + 'Out', + no_grad_set=set("X"), + user_defined_grads=[self.grad_y], + user_defined_grad_outputs=[self.grad_out], + check_eager=True, + ) def test_check_grad_ingore_y(self): - self.check_grad(['X'], - 'Out', - no_grad_set=set('Y'), - user_defined_grads=[self.grad_x], - user_defined_grad_outputs=[self.grad_out], - check_eager=True) + self.check_grad( + ['X'], + 'Out', + no_grad_set=set('Y'), + user_defined_grads=[self.grad_x], + user_defined_grad_outputs=[self.grad_out], + check_eager=True, + ) class TestComplexDotOp2D(OpTest): - def setUp(self): self.op_type = "dot" self.init_base_dtype() @@ -208,7 +220,7 @@ class TestComplexDotOp2D(OpTest): self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.outputs = {'Out': self.out} @@ -216,17 +228,18 @@ class TestComplexDotOp2D(OpTest): self.dtype = np.float64 def init_input_output(self): - self.x = np.random.random( - (2, 100)).astype(self.dtype) + 1J * np.random.random( - (2, 100)).astype(self.dtype) - self.y = np.random.random( - (2, 100)).astype(self.dtype) + 1J * np.random.random( - (2, 100)).astype(self.dtype) + self.x = np.random.random((2, 100)).astype( + self.dtype + ) + 1j * np.random.random((2, 100)).astype(self.dtype) + self.y = np.random.random((2, 100)).astype( + self.dtype + ) + 1j * np.random.random((2, 100)).astype(self.dtype) self.out = np.diag(np.dot(self.x, self.y.T)).reshape(-1, 1) def init_grad_input_output(self): - self.grad_out = np.ones((2, 1), self.dtype) + 1J * np.ones( - (2, 1), self.dtype) + self.grad_out = np.ones((2, 1), self.dtype) + 1j * np.ones( + (2, 1), self.dtype + ) self.grad_x = self._get_grad(self.grad_out, self.y) self.grad_y = self._get_grad(self.grad_out, self.x) @@ -240,24 +253,30 @@ class TestComplexDotOp2D(OpTest): self.check_output() def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], - 'Out', - user_defined_grads=[self.grad_x, self.grad_y], - user_defined_grad_outputs=[self.grad_out]) + self.check_grad( + ['X', 'Y'], + 'Out', + user_defined_grads=[self.grad_x, self.grad_y], + user_defined_grad_outputs=[self.grad_out], + ) def test_check_grad_ingore_x(self): - self.check_grad(['Y'], - 'Out', - no_grad_set=set("X"), - user_defined_grads=[self.grad_y], - user_defined_grad_outputs=[self.grad_out]) + self.check_grad( + ['Y'], + 'Out', + no_grad_set=set("X"), + user_defined_grads=[self.grad_y], + user_defined_grad_outputs=[self.grad_out], + ) def test_check_grad_ingore_y(self): - self.check_grad(['X'], - 'Out', - no_grad_set=set('Y'), - user_defined_grads=[self.grad_x], - user_defined_grad_outputs=[self.grad_out]) + self.check_grad( + ['X'], + 'Out', + no_grad_set=set('Y'), + user_defined_grads=[self.grad_x], + user_defined_grad_outputs=[self.grad_out], + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_downpoursgd.py b/python/paddle/fluid/tests/unittests/test_downpoursgd.py index f241a854c89c3ddd79fadc59303d7d15abd1cda2..6626f0ebfa0478c4b1d580c8d9b77b4b8fc432bd 100644 --- a/python/paddle/fluid/tests/unittests/test_downpoursgd.py +++ b/python/paddle/fluid/tests/unittests/test_downpoursgd.py @@ -18,7 +18,10 @@ import paddle.fluid as fluid import os import unittest import sys -from paddle.fluid.incubate.fleet.parameter_server.pslib.node import DownpourWorker, DownpourServer +from paddle.fluid.incubate.fleet.parameter_server.pslib.node import ( + DownpourWorker, + DownpourServer, +) from google.protobuf import text_format import paddle.fluid.incubate.fleet.parameter_server.pslib.ps_pb2 as pslib from paddle.fluid.trainer_factory import TrainerFactory @@ -40,15 +43,17 @@ class TestListenAndServOp(unittest.TestCase): pass else: print(sys.platform) - if not os.path.exists('{}/{}'.format(cache_path, - 'fleet_desc.prototxt')): + if not os.path.exists( + '{}/{}'.format(cache_path, 'fleet_desc.prototxt') + ): cmd = "wget --no-check-certificate https://pslib.bj.bcebos.com/fleet_desc.prototxt -P {}/".format( - cache_path) + cache_path + ) os.system(cmd) x = fluid.layers.data(name='x', shape=[1], dtype='int64') - x_emb = fluid.layers.embedding(input=x, - size=[1, 2], - is_distributed=True) + x_emb = fluid.layers.embedding( + input=x, size=[1, 2], is_distributed=True + ) y_predict = fluid.layers.fc(input=x_emb, size=1, act=None) y = fluid.layers.data(name='y', shape=[1], dtype='float32') cost = fluid.layers.square_error_cost(input=y_predict, label=y) @@ -67,7 +72,7 @@ class TestListenAndServOp(unittest.TestCase): program_configs = {} program_configs[program_id] = { "pull_sparse": [0], - "push_sparse": [0] + "push_sparse": [0], } program_configs[program_id]["pull_dense"] = [1] program_configs[program_id]["push_dense"] = [1] @@ -100,15 +105,17 @@ class TestListenAndServOp(unittest.TestCase): pass else: print(sys.platform) - if not os.path.exists('{}/{}'.format(cache_path, - 'fleet_desc.prototxt')): + if not os.path.exists( + '{}/{}'.format(cache_path, 'fleet_desc.prototxt') + ): cmd = "wget --no-check-certificate https://pslib.bj.bcebos.com/fleet_desc.prototxt -P {}/".format( - cache_path) + cache_path + ) os.system(cmd) x = fluid.layers.data(name='x', shape=[1], dtype='int64') - x_emb = fluid.layers.embedding(input=x, - size=[1, 2], - is_distributed=True) + x_emb = fluid.layers.embedding( + input=x, size=[1, 2], is_distributed=True + ) y_predict = fluid.layers.fc(input=x_emb, size=1, act=None) y = fluid.layers.data(name='y', shape=[1], dtype='float32') cost = fluid.layers.square_error_cost(input=y_predict, label=y) @@ -127,7 +134,7 @@ class TestListenAndServOp(unittest.TestCase): program_configs = {} program_configs[program_id] = { "pull_sparse": [0], - "push_sparse": [0] + "push_sparse": [0], } program_configs[program_id]["pull_dense"] = [1] program_configs[program_id]["push_dense"] = [1] @@ -158,15 +165,17 @@ class TestListenAndServOp(unittest.TestCase): pass else: print(sys.platform) - if not os.path.exists('{}/{}'.format(cache_path, - 'fleet_desc.prototxt')): + if not os.path.exists( + '{}/{}'.format(cache_path, 'fleet_desc.prototxt') + ): cmd = "wget --no-check-certificate https://pslib.bj.bcebos.com/fleet_desc.prototxt -P {}/".format( - cache_path) + cache_path + ) os.system(cmd) x = fluid.layers.data(name='x', shape=[1], dtype='int64') - x_emb = fluid.layers.embedding(input=x, - size=[1, 2], - is_distributed=True) + x_emb = fluid.layers.embedding( + input=x, size=[1, 2], is_distributed=True + ) y_predict = fluid.layers.fc(input=x_emb, size=1, act=None) y = fluid.layers.data(name='y', shape=[1], dtype='float32') cost = fluid.layers.square_error_cost(input=y_predict, label=y) @@ -185,7 +194,7 @@ class TestListenAndServOp(unittest.TestCase): program_configs = {} program_configs[program_id] = { "pull_sparse": [0], - "push_sparse": [0] + "push_sparse": [0], } program_configs[program_id]["pull_dense"] = [1] program_configs[program_id]["push_dense"] = [1] diff --git a/python/paddle/fluid/tests/unittests/test_dpsgd_op.py b/python/paddle/fluid/tests/unittests/test_dpsgd_op.py index 1b529d1760128421c908d79eb3c1bbc4e79e0763..2e505c05a891f419064b4faacf15a531d77d8865 100644 --- a/python/paddle/fluid/tests/unittests/test_dpsgd_op.py +++ b/python/paddle/fluid/tests/unittests/test_dpsgd_op.py @@ -18,10 +18,8 @@ from op_test import OpTest class TestDpsgdOp(OpTest): - def setUp(self): - '''Test Dpsgd Operator with supplied attributes - ''' + '''Test Dpsgd Operator with supplied attributes''' self.op_type = "dpsgd" param = np.random.uniform(-1, 1, (102, 105)).astype("float32") grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") @@ -34,7 +32,7 @@ class TestDpsgdOp(OpTest): self.inputs = { 'Param': param, 'Grad': grad, - 'LearningRate': np.array([learning_rate]).astype("float32") + 'LearningRate': np.array([learning_rate]).astype("float32"), } self.attrs = {'clip': clip, 'batch_size': batch_size, 'sigma': sigma} @@ -70,5 +68,6 @@ def dpsgd_step(inputs, attributes): if __name__ == "__main__": import paddle + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_dropout_nd_op.py b/python/paddle/fluid/tests/unittests/test_dropout_nd_op.py index d558032dd37276cbe815c556080c879b92db0078..a21ec9625f927873a5cfe6c3120288b37642ffb4 100644 --- a/python/paddle/fluid/tests/unittests/test_dropout_nd_op.py +++ b/python/paddle/fluid/tests/unittests/test_dropout_nd_op.py @@ -24,34 +24,44 @@ from paddle import _legacy_C_ops from paddle.static import default_main_program -def dropout_nd(x, - p=0.5, - axis=None, - training=True, - mode="upscale_in_train", - name=None): +def dropout_nd( + x, p=0.5, axis=None, training=True, mode="upscale_in_train", name=None +): drop_axes = [axis] if isinstance(axis, int) else list(axis) seed = None - mode = 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode #semantic transfer + mode = ( + 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode + ) # semantic transfer if _non_static_mode(): if default_main_program().random_seed != 0: seed = default_main_program().random_seed - out, mask = _legacy_C_ops.dropout_nd(x, 'dropout_prob', p, 'is_test', - not training, 'fix_seed', seed - is not None, 'seed', - seed if seed is not None else 0, - 'dropout_implementation', mode, - 'axis', drop_axes) + out, mask = _legacy_C_ops.dropout_nd( + x, + 'dropout_prob', + p, + 'is_test', + not training, + 'fix_seed', + seed is not None, + 'seed', + seed if seed is not None else 0, + 'dropout_implementation', + mode, + 'axis', + drop_axes, + ) return out helper = LayerHelper('dropout_nd', **locals()) - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'dropout') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], 'dropout' + ) out = helper.create_variable_for_type_inference(dtype=x.dtype) mask = helper.create_variable_for_type_inference( - dtype=core.VarDesc.VarType.UINT8, stop_gradient=True) + dtype=core.VarDesc.VarType.UINT8, stop_gradient=True + ) def get_attrs(prog, dropout_prob, is_test, seed): if (seed is None or seed == 0) and prog.random_seed != 0: @@ -62,19 +72,18 @@ def dropout_nd(x, 'fix_seed': seed is not None, 'seed': seed if seed is not None else 0, 'dropout_implementation': mode, - 'axis': drop_axes + 'axis': drop_axes, } return attrs attrs = get_attrs(helper.main_program, p, not training, seed) - helper.append_op(type='dropout_nd', - inputs={'X': [x]}, - outputs={ - 'Out': [out], - 'Mask': [mask] - }, - attrs=attrs) + helper.append_op( + type='dropout_nd', + inputs={'X': [x]}, + outputs={'Out': [out], 'Mask': [mask]}, + attrs=attrs, + ) return out @@ -82,7 +91,6 @@ paddle.enable_static() class TestDropoutNdOp(OpTest): - def setUp(self): self.op_type = "dropout_nd" self.inputs = {'X': np.random.random((4, 32, 16)).astype("float64")} @@ -90,11 +98,11 @@ class TestDropoutNdOp(OpTest): 'dropout_prob': 0.0, 'fix_seed': True, 'is_test': False, - 'axis': [1] + 'axis': [1], } self.outputs = { 'Out': self.inputs['X'], - 'Mask': np.ones((1, 32, 1)).astype('uint8') + 'Mask': np.ones((1, 32, 1)).astype('uint8'), } def test_check_output(self): @@ -105,7 +113,6 @@ class TestDropoutNdOp(OpTest): class TestDropoutNdAPI(unittest.TestCase): - def setUp(self): np.random.seed(123) self.places = [fluid.CPUPlace()] @@ -118,7 +125,7 @@ class TestDropoutNdAPI(unittest.TestCase): with fluid.dygraph.guard(place): in_np = np.random.random([4, 32, 16]).astype("float32") input = paddle.to_tensor(in_np) - res1 = dropout_nd(x=input, p=0., axis=[0, 1]) + res1 = dropout_nd(x=input, p=0.0, axis=[0, 1]) res2 = dropout_nd(x=input, p=0.5, axis=[0, 1]) np.testing.assert_allclose(res1.numpy(), in_np, rtol=1e-05) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_dropout_op.py b/python/paddle/fluid/tests/unittests/test_dropout_op.py index b7da098182b9fd930e5bbd653755c1b08d5ae34c..862ca0c1f9135ac1c574b4b3fe1b746a5a6d204d 100644 --- a/python/paddle/fluid/tests/unittests/test_dropout_op.py +++ b/python/paddle/fluid/tests/unittests/test_dropout_op.py @@ -26,14 +26,13 @@ from paddle import _C_ops class TestDropoutOp(OpTest): - def setUp(self): self.op_type = "dropout" self.inputs = {'X': np.random.random((32, 64)).astype("float32")} self.attrs = {'dropout_prob': 0.0, 'fix_seed': True, 'is_test': False} self.outputs = { 'Out': self.inputs['X'], - 'Mask': np.ones((32, 64)).astype('uint8') + 'Mask': np.ones((32, 64)).astype('uint8'), } def test_check_output(self): @@ -44,14 +43,13 @@ class TestDropoutOp(OpTest): class TestDropoutOpInput1d(OpTest): - def setUp(self): self.op_type = "dropout" - self.inputs = {'X': np.random.random((2000, )).astype("float32")} + self.inputs = {'X': np.random.random((2000,)).astype("float32")} self.attrs = {'dropout_prob': 0.0, 'fix_seed': True, 'is_test': False} self.outputs = { 'Out': self.inputs['X'], - 'Mask': np.ones((2000)).astype('uint8') + 'Mask': np.ones((2000)).astype('uint8'), } def test_check_output(self): @@ -62,32 +60,29 @@ class TestDropoutOpInput1d(OpTest): class TestDropoutOp2(TestDropoutOp): - def setUp(self): self.op_type = "dropout" self.inputs = {'X': np.random.random((32, 64)).astype("float32")} self.attrs = {'dropout_prob': 1.0, 'fix_seed': True, 'is_test': False} self.outputs = { 'Out': np.zeros((32, 64)).astype('float32'), - 'Mask': np.zeros((32, 64)).astype('uint8') + 'Mask': np.zeros((32, 64)).astype('uint8'), } class TestDropoutOp3(TestDropoutOp): - def setUp(self): self.op_type = "dropout" self.inputs = {'X': np.random.random((32, 64, 2)).astype("float32")} self.attrs = {'dropout_prob': 0.0, 'fix_seed': True, 'is_test': False} self.outputs = { 'Out': self.inputs['X'], - 'Mask': np.ones((32, 64, 2)).astype('uint8') + 'Mask': np.ones((32, 64, 2)).astype('uint8'), } @skip_check_grad_ci(reason="For inference, check_grad is not required.") class TestDropoutOp4(OpTest): - def setUp(self): self.op_type = "dropout" self.inputs = {'X': np.random.random((32, 64)).astype("float32")} @@ -102,7 +97,6 @@ class TestDropoutOp4(OpTest): @skip_check_grad_ci(reason="For inference, check_grad is not required.") class TestDropoutOp5(OpTest): - def setUp(self): self.op_type = "dropout" self.inputs = {'X': np.random.random((32, 64, 3)).astype("float32")} @@ -116,7 +110,6 @@ class TestDropoutOp5(OpTest): class TestDropoutOp6(TestDropoutOp): - def setUp(self): self.op_type = "dropout" self.inputs = {'X': np.random.random((32, 64)).astype("float32")} @@ -124,16 +117,15 @@ class TestDropoutOp6(TestDropoutOp): 'dropout_prob': 1.0, 'fix_seed': True, 'is_test': False, - 'dropout_implementation': 'upscale_in_train' + 'dropout_implementation': 'upscale_in_train', } self.outputs = { 'Out': np.zeros((32, 64)).astype('float32'), - 'Mask': np.zeros((32, 64)).astype('uint8') + 'Mask': np.zeros((32, 64)).astype('uint8'), } class TestDropoutOp7(TestDropoutOp): - def setUp(self): self.op_type = "dropout" self.inputs = {'X': np.random.random((32, 64, 2)).astype("float32")} @@ -141,17 +133,16 @@ class TestDropoutOp7(TestDropoutOp): 'dropout_prob': 0.0, 'fix_seed': True, 'is_test': False, - 'dropout_implementation': 'upscale_in_train' + 'dropout_implementation': 'upscale_in_train', } self.outputs = { 'Out': self.inputs['X'], - 'Mask': np.ones((32, 64, 2)).astype('uint8') + 'Mask': np.ones((32, 64, 2)).astype('uint8'), } @skip_check_grad_ci(reason="For inference, check_grad is not required.") class TestDropoutOp8(OpTest): - def setUp(self): self.op_type = "dropout" self.inputs = {'X': np.random.random((32, 64)).astype("float32")} @@ -159,7 +150,7 @@ class TestDropoutOp8(OpTest): 'dropout_prob': 0.35, 'fix_seed': True, 'is_test': True, - 'dropout_implementation': 'upscale_in_train' + 'dropout_implementation': 'upscale_in_train', } self.outputs = {'Out': self.inputs['X']} @@ -169,14 +160,13 @@ class TestDropoutOp8(OpTest): @skip_check_grad_ci(reason="For inference, check_grad is not required.") class TestDropoutOp9(OpTest): - def setUp(self): self.op_type = "dropout" self.inputs = {'X': np.random.random((32, 64, 3)).astype("float32")} self.attrs = { 'dropout_prob': 0.75, 'is_test': True, - 'dropout_implementation': 'upscale_in_train' + 'dropout_implementation': 'upscale_in_train', } self.outputs = {'Out': self.inputs['X']} @@ -185,19 +175,18 @@ class TestDropoutOp9(OpTest): class TestDropoutOpWithSeed(OpTest): - def setUp(self): self.op_type = "dropout" self.inputs = { "X": np.random.random((32, 64)).astype("float32"), - "Seed": np.asarray([125], dtype="int32") + "Seed": np.asarray([125], dtype="int32"), } self.attrs = { 'dropout_prob': 0.0, } self.outputs = { 'Out': self.inputs['X'], - 'Mask': np.ones((32, 64)).astype('uint8') + 'Mask': np.ones((32, 64)).astype('uint8'), } def test_check_output(self): @@ -207,13 +196,12 @@ class TestDropoutOpWithSeed(OpTest): self.check_grad(['X'], 'Out', max_relative_error=0.05) -@unittest.skipIf(not core.is_compiled_with_cuda() - or not core.op_support_gpu("dropout"), - "core is not compiled with CUDA or core is not support dropout" - ) +@unittest.skipIf( + not core.is_compiled_with_cuda() or not core.op_support_gpu("dropout"), + "core is not compiled with CUDA or core is not support dropout", +) @skip_check_grad_ci(reason="For inference, check_grad is not required.") class TestFP16DropoutOp(OpTest): - def setUp(self): self.op_type = "dropout" self.init_test_case() @@ -224,7 +212,7 @@ class TestFP16DropoutOp(OpTest): self.attrs = { 'dropout_prob': self.prob, 'fix_seed': self.fix_seed, - 'is_test': True + 'is_test': True, } self.outputs = {'Out': out} @@ -237,13 +225,12 @@ class TestFP16DropoutOp(OpTest): self.check_output_with_place(core.CUDAPlace(0), atol=1e-3) -@unittest.skipIf(not core.is_compiled_with_cuda() - or not core.op_support_gpu("dropout"), - "core is not compiled with CUDA or core is not support dropout" - ) +@unittest.skipIf( + not core.is_compiled_with_cuda() or not core.op_support_gpu("dropout"), + "core is not compiled with CUDA or core is not support dropout", +) @skip_check_grad_ci(reason="For inference, check_grad is not required.") class TestFP16DropoutOp2(TestFP16DropoutOp): - def init_test_case(self): self.input_size = [32, 64, 3] self.prob = 0.75 @@ -251,7 +238,6 @@ class TestFP16DropoutOp2(TestFP16DropoutOp): class TestBF16DropoutOp(OpTest): - def setUp(self): self.op_type = "dropout" self.dtype = np.uint16 @@ -260,9 +246,10 @@ class TestBF16DropoutOp(OpTest): self.inputs = {'X': convert_float_to_uint16(x)} self.attrs = {'dropout_prob': 1.0, 'fix_seed': True, 'is_test': False} self.outputs = { - 'Out': - convert_float_to_uint16(np.zeros((32, 64)).astype('float32')), - 'Mask': np.zeros((32, 64)).astype('uint8') + 'Out': convert_float_to_uint16( + np.zeros((32, 64)).astype('float32') + ), + 'Mask': np.zeros((32, 64)).astype('uint8'), } def test_check_output(self): @@ -273,7 +260,6 @@ class TestBF16DropoutOp(OpTest): class TestDropoutOpWithSeedOnCPUPlace(unittest.TestCase): - def test_seed_cpu_place(self): paddle.enable_static() main_program = Program() @@ -288,51 +274,52 @@ class TestDropoutOpWithSeedOnCPUPlace(unittest.TestCase): shape=[1], dtype='int32', persistable=False, - stop_gradient=True) + stop_gradient=True, + ) x_out_var = main_program.global_block().create_var( name=x_out_var, shape=[40, 40], dtype='float32', persistable=False, - stop_gradient=True) - x_var = main_program.global_block().create_var(name=x_var_name, - shape=[40, 40], - dtype='float32', - persistable=False, - stop_gradient=True) + stop_gradient=True, + ) + x_var = main_program.global_block().create_var( + name=x_var_name, + shape=[40, 40], + dtype='float32', + persistable=False, + stop_gradient=True, + ) mask_var = main_program.global_block().create_var( name=mask_var_name, shape=[1], dtype='int', persistable=False, - stop_gradient=True) - - main_program.global_block().append_op(type="fill_constant", - outputs={"Out": x_var_name}, - attrs={ - "shape": [40, 40], - "dtype": x_var.dtype, - "value": 1.0, - "place_type": 0 - }) + stop_gradient=True, + ) + + main_program.global_block().append_op( + type="fill_constant", + outputs={"Out": x_var_name}, + attrs={ + "shape": [40, 40], + "dtype": x_var.dtype, + "value": 1.0, + "place_type": 0, + }, + ) main_program.global_block().append_op( type='seed', inputs={}, outputs={'Out': seed_input_var}, - attrs={ - 'seed': 1, - 'force_cpu': True - }) - main_program.global_block().append_op(type='dropout', - inputs={ - 'X': x_var, - 'Seed': seed_input_var - }, - attrs={'dropout_prob': 0.}, - outputs={ - 'Out': x_out_var, - 'Mask': mask_var - }) + attrs={'seed': 1, 'force_cpu': True}, + ) + main_program.global_block().append_op( + type='dropout', + inputs={'X': x_var, 'Seed': seed_input_var}, + attrs={'dropout_prob': 0.0}, + outputs={'Out': x_out_var, 'Mask': mask_var}, + ) place = fluid.CPUPlace() if core.is_compiled_with_cuda(): place = fluid.CUDAPlace(0) @@ -340,20 +327,21 @@ class TestDropoutOpWithSeedOnCPUPlace(unittest.TestCase): x_out, mask_out = exe.run( main_program, feed={}, - fetch_list=[x_out_var.name, mask_var.name]) + fetch_list=[x_out_var.name, mask_var.name], + ) x_in_np = np.ones([40, 40]).astype("float32") np.testing.assert_allclose(x_out, x_in_np, rtol=1e-05) class TestDropoutOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): def test_Variable(): # the input of dropout must be Variable. - x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + ) fluid.layers.dropout(x1, dropout_prob=0.5) self.assertRaises(TypeError, test_Variable) @@ -361,16 +349,15 @@ class TestDropoutOpError(unittest.TestCase): def test_dtype(): # the input dtype of dropout must be float16 or float32 or float64 # float16 only can be set on GPU place - x2 = fluid.layers.data(name='x2', - shape=[3, 4, 5, 6], - dtype="int32") + x2 = fluid.layers.data( + name='x2', shape=[3, 4, 5, 6], dtype="int32" + ) fluid.layers.dropout(x2, dropout_prob=0.5) self.assertRaises(TypeError, test_dtype) class TestDropoutFAPI(unittest.TestCase): - def setUp(self): np.random.seed(123) self.places = [fluid.CPUPlace()] @@ -380,60 +367,64 @@ class TestDropoutFAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): input = fluid.data(name="input", shape=[-1, -1], dtype="float32") - res1 = paddle.nn.functional.dropout(x=input, p=0., training=False) - res2 = paddle.nn.functional.dropout(x=input, - p=0., - axis=0, - training=True, - mode='upscale_in_train') - res3 = paddle.nn.functional.dropout(x=input, - p=0., - axis=0, - training=True, - mode='downscale_in_infer') - res4 = paddle.nn.functional.dropout(x=input, - p=0., - axis=0, - training=False, - mode='upscale_in_train') - res5 = paddle.nn.functional.dropout(x=input, - p=0., - axis=0, - training=False, - mode='downscale_in_infer') - res6 = paddle.nn.functional.dropout(x=input, - p=0., - axis=[0, 1], - training=True, - mode='upscale_in_train') - res7 = paddle.nn.functional.dropout(x=input, - p=0., - axis=[0, 1], - training=True, - mode='downscale_in_infer') - res8 = paddle.nn.functional.dropout(x=input, - p=0., - axis=[0, 1], - training=False, - mode='upscale_in_train') - res9 = paddle.nn.functional.dropout(x=input, - p=0., - axis=[0, 1], - training=False, - mode='downscale_in_infer') - res10 = paddle.nn.functional.dropout(x=input, p=1., training=True) - res11 = paddle.fluid.layers.dropout(x=input, dropout_prob=0.) - res12 = paddle.nn.functional.dropout(x=input, - p=0., - axis=(0, 1), - training=False, - mode='upscale_in_train') - - res13 = paddle.nn.functional.dropout(x=input, - p=0.7, - axis=1, - training=True, - mode='upscale_in_train') + res1 = paddle.nn.functional.dropout(x=input, p=0.0, training=False) + res2 = paddle.nn.functional.dropout( + x=input, p=0.0, axis=0, training=True, mode='upscale_in_train' + ) + res3 = paddle.nn.functional.dropout( + x=input, p=0.0, axis=0, training=True, mode='downscale_in_infer' + ) + res4 = paddle.nn.functional.dropout( + x=input, p=0.0, axis=0, training=False, mode='upscale_in_train' + ) + res5 = paddle.nn.functional.dropout( + x=input, + p=0.0, + axis=0, + training=False, + mode='downscale_in_infer', + ) + res6 = paddle.nn.functional.dropout( + x=input, + p=0.0, + axis=[0, 1], + training=True, + mode='upscale_in_train', + ) + res7 = paddle.nn.functional.dropout( + x=input, + p=0.0, + axis=[0, 1], + training=True, + mode='downscale_in_infer', + ) + res8 = paddle.nn.functional.dropout( + x=input, + p=0.0, + axis=[0, 1], + training=False, + mode='upscale_in_train', + ) + res9 = paddle.nn.functional.dropout( + x=input, + p=0.0, + axis=[0, 1], + training=False, + mode='downscale_in_infer', + ) + res10 = paddle.nn.functional.dropout(x=input, p=1.0, training=True) + res11 = paddle.fluid.layers.dropout(x=input, dropout_prob=0.0) + res12 = paddle.nn.functional.dropout( + x=input, + p=0.0, + axis=(0, 1), + training=False, + mode='upscale_in_train', + ) + + res13 = paddle.nn.functional.dropout( + x=input, p=0.7, axis=1, training=True, mode='upscale_in_train' + ) in_np = np.ones([40, 40]).astype("float32") res_np = in_np @@ -441,21 +432,36 @@ class TestDropoutFAPI(unittest.TestCase): exe = fluid.Executor(place) res_list = [ - res1, res2, res3, res4, res5, res6, res7, res8, res9, res11, - res12 + res1, + res2, + res3, + res4, + res5, + res6, + res7, + res8, + res9, + res11, + res12, ] for res in res_list: - fetches = exe.run(fluid.default_main_program(), - feed={"input": in_np}, - fetch_list=[res]) + fetches = exe.run( + fluid.default_main_program(), + feed={"input": in_np}, + fetch_list=[res], + ) np.testing.assert_allclose(fetches[0], res_np, rtol=1e-05) - fetches2 = exe.run(fluid.default_main_program(), - feed={"input": in_np}, - fetch_list=[res10]) + fetches2 = exe.run( + fluid.default_main_program(), + feed={"input": in_np}, + fetch_list=[res10], + ) np.testing.assert_allclose(fetches2[0], res_np2, rtol=1e-05) - fetches3 = exe.run(fluid.default_main_program(), - feed={"input": in_np}, - fetch_list=[res13]) + fetches3 = exe.run( + fluid.default_main_program(), + feed={"input": in_np}, + fetch_list=[res13], + ) def test_static(self): for place in self.places: @@ -469,68 +475,99 @@ class TestDropoutFAPI(unittest.TestCase): res_np2 = np.zeros_like(in_np) input = fluid.dygraph.to_variable(in_np) - res1 = paddle.nn.functional.dropout(x=input, - p=0., - training=False) - res2 = paddle.nn.functional.dropout(x=input, - p=0., - axis=0, - training=True, - mode='upscale_in_train') - res3 = paddle.nn.functional.dropout(x=input, - p=0., - axis=0, - training=True, - mode='downscale_in_infer') - res4 = paddle.nn.functional.dropout(x=input, - p=0., - axis=0, - training=False, - mode='upscale_in_train') - res5 = paddle.nn.functional.dropout(x=input, - p=0., - axis=0, - training=False, - mode='downscale_in_infer') - res6 = paddle.nn.functional.dropout(x=input, - p=0., - axis=[0, 1], - training=True, - mode='upscale_in_train') - res7 = paddle.nn.functional.dropout(x=input, - p=0., - axis=[0, 1], - training=True, - mode='downscale_in_infer') - res8 = paddle.nn.functional.dropout(x=input, - p=0., - axis=[0, 1], - training=False, - mode='upscale_in_train') - res9 = paddle.nn.functional.dropout(x=input, - p=0., - axis=[0, 1], - training=False, - mode='downscale_in_infer') - res10 = paddle.nn.functional.dropout(x=input, - p=1., - training=True) - dropout = paddle.fluid.dygraph.Dropout(p=0, ) + res1 = paddle.nn.functional.dropout( + x=input, p=0.0, training=False + ) + res2 = paddle.nn.functional.dropout( + x=input, + p=0.0, + axis=0, + training=True, + mode='upscale_in_train', + ) + res3 = paddle.nn.functional.dropout( + x=input, + p=0.0, + axis=0, + training=True, + mode='downscale_in_infer', + ) + res4 = paddle.nn.functional.dropout( + x=input, + p=0.0, + axis=0, + training=False, + mode='upscale_in_train', + ) + res5 = paddle.nn.functional.dropout( + x=input, + p=0.0, + axis=0, + training=False, + mode='downscale_in_infer', + ) + res6 = paddle.nn.functional.dropout( + x=input, + p=0.0, + axis=[0, 1], + training=True, + mode='upscale_in_train', + ) + res7 = paddle.nn.functional.dropout( + x=input, + p=0.0, + axis=[0, 1], + training=True, + mode='downscale_in_infer', + ) + res8 = paddle.nn.functional.dropout( + x=input, + p=0.0, + axis=[0, 1], + training=False, + mode='upscale_in_train', + ) + res9 = paddle.nn.functional.dropout( + x=input, + p=0.0, + axis=[0, 1], + training=False, + mode='downscale_in_infer', + ) + res10 = paddle.nn.functional.dropout( + x=input, p=1.0, training=True + ) + dropout = paddle.fluid.dygraph.Dropout( + p=0, + ) res11 = dropout(input) - res12 = paddle.nn.functional.dropout(x=input, - p=0., - axis=(0, 1), - training=False, - mode='upscale_in_train') - res13 = paddle.nn.functional.dropout(x=input, - p=0.5, - axis=1, - training=True, - mode='upscale_in_train') + res12 = paddle.nn.functional.dropout( + x=input, + p=0.0, + axis=(0, 1), + training=False, + mode='upscale_in_train', + ) + res13 = paddle.nn.functional.dropout( + x=input, + p=0.5, + axis=1, + training=True, + mode='upscale_in_train', + ) res_list = [ - res1, res2, res3, res4, res5, res6, res7, res8, res9, res11, - res12 + res1, + res2, + res3, + res4, + res5, + res6, + res7, + res8, + res9, + res11, + res12, ] for res in res_list: np.testing.assert_allclose(res.numpy(), res_np, rtol=1e-05) @@ -538,22 +575,23 @@ class TestDropoutFAPI(unittest.TestCase): class TestDropoutFAPIError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): def test_Variable(): # the input of dropout must be Variable. - x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + ) paddle.nn.functional.dropout(x1, p=0.5) self.assertRaises(TypeError, test_Variable) def test_Variable2(): # the input of dropout must be Variable. - x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + ) paddle.nn.functional.dropout(x1, p=0.5, axis=0) self.assertRaises(TypeError, test_Variable2) @@ -617,7 +655,6 @@ class TestDropoutFAPIError(unittest.TestCase): class TestDropoutCAPI(unittest.TestCase): - def setUp(self): np.random.seed(123) self.places = [fluid.CPUPlace()] @@ -630,16 +667,15 @@ class TestDropoutCAPI(unittest.TestCase): input_np = np.random.random([40, 40]).astype("float32") result_np = input_np input = fluid.dygraph.to_variable(input_np) - m = paddle.nn.Dropout(p=0.) + m = paddle.nn.Dropout(p=0.0) m.eval() result = m(input) - np.testing.assert_allclose(result.numpy(), - result_np, - rtol=1e-05) + np.testing.assert_allclose( + result.numpy(), result_np, rtol=1e-05 + ) class TestDropout2DFAPI(unittest.TestCase): - def setUp(self): np.random.seed(123) self.places = [fluid.CPUPlace()] @@ -648,17 +684,15 @@ class TestDropout2DFAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", - shape=[2, 3, 4, 5], - dtype="float32") - res1 = paddle.nn.functional.dropout2d(x=input, - p=0., - training=False, - data_format='NCHW') - res2 = paddle.nn.functional.dropout2d(x=input, - p=0., - training=False, - data_format='NHWC') + input = fluid.data( + name="input", shape=[2, 3, 4, 5], dtype="float32" + ) + res1 = paddle.nn.functional.dropout2d( + x=input, p=0.0, training=False, data_format='NCHW' + ) + res2 = paddle.nn.functional.dropout2d( + x=input, p=0.0, training=False, data_format='NHWC' + ) in_np = np.random.random([2, 3, 4, 5]).astype("float32") res_np = in_np @@ -666,9 +700,11 @@ class TestDropout2DFAPI(unittest.TestCase): exe = fluid.Executor(place) res_list = [res1, res2] for res in res_list: - fetches = exe.run(fluid.default_main_program(), - feed={"input": in_np}, - fetch_list=[res]) + fetches = exe.run( + fluid.default_main_program(), + feed={"input": in_np}, + fetch_list=[res], + ) np.testing.assert_allclose(fetches[0], res_np, rtol=1e-05) def test_static(self): @@ -682,14 +718,12 @@ class TestDropout2DFAPI(unittest.TestCase): res_np = in_np input = fluid.dygraph.to_variable(in_np) - res1 = paddle.nn.functional.dropout2d(x=input, - p=0., - training=False, - data_format='NCHW') - res2 = paddle.nn.functional.dropout2d(x=input, - p=0., - training=False, - data_format='NHWC') + res1 = paddle.nn.functional.dropout2d( + x=input, p=0.0, training=False, data_format='NCHW' + ) + res2 = paddle.nn.functional.dropout2d( + x=input, p=0.0, training=False, data_format='NHWC' + ) res_list = [res1, res2] for res in res_list: @@ -697,7 +731,6 @@ class TestDropout2DFAPI(unittest.TestCase): class TestDropout2DFAPIError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): @@ -717,7 +750,6 @@ class TestDropout2DFAPIError(unittest.TestCase): class TestDropout2DCAPI(unittest.TestCase): - def setUp(self): np.random.seed(123) self.places = [fluid.CPUPlace()] @@ -730,16 +762,15 @@ class TestDropout2DCAPI(unittest.TestCase): input_np = np.random.random([2, 3, 4, 5]).astype("float32") result_np = input_np input = fluid.dygraph.to_variable(input_np) - m = paddle.nn.Dropout2D(p=0.) + m = paddle.nn.Dropout2D(p=0.0) m.eval() result = m(input) - np.testing.assert_allclose(result.numpy(), - result_np, - rtol=1e-05) + np.testing.assert_allclose( + result.numpy(), result_np, rtol=1e-05 + ) class TestDropout3DFAPI(unittest.TestCase): - def setUp(self): np.random.seed(123) self.places = [fluid.CPUPlace()] @@ -748,17 +779,15 @@ class TestDropout3DFAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", - shape=[2, 3, 4, 5, 6], - dtype="float32") - res1 = paddle.nn.functional.dropout3d(x=input, - p=0., - training=False, - data_format='NCDHW') - res2 = paddle.nn.functional.dropout3d(x=input, - p=0., - training=False, - data_format='NDHWC') + input = fluid.data( + name="input", shape=[2, 3, 4, 5, 6], dtype="float32" + ) + res1 = paddle.nn.functional.dropout3d( + x=input, p=0.0, training=False, data_format='NCDHW' + ) + res2 = paddle.nn.functional.dropout3d( + x=input, p=0.0, training=False, data_format='NDHWC' + ) in_np = np.random.random([2, 3, 4, 5, 6]).astype("float32") res_np = in_np @@ -766,9 +795,11 @@ class TestDropout3DFAPI(unittest.TestCase): exe = fluid.Executor(place) res_list = [res1, res2] for res in res_list: - fetches = exe.run(fluid.default_main_program(), - feed={"input": in_np}, - fetch_list=[res]) + fetches = exe.run( + fluid.default_main_program(), + feed={"input": in_np}, + fetch_list=[res], + ) np.testing.assert_allclose(fetches[0], res_np, rtol=1e-05) def test_static(self): @@ -782,14 +813,12 @@ class TestDropout3DFAPI(unittest.TestCase): res_np = in_np input = fluid.dygraph.to_variable(in_np) - res1 = paddle.nn.functional.dropout3d(x=input, - p=0., - training=False, - data_format='NCDHW') - res2 = paddle.nn.functional.dropout3d(x=input, - p=0., - training=False, - data_format='NDHWC') + res1 = paddle.nn.functional.dropout3d( + x=input, p=0.0, training=False, data_format='NCDHW' + ) + res2 = paddle.nn.functional.dropout3d( + x=input, p=0.0, training=False, data_format='NDHWC' + ) res_list = [res1, res2] for res in res_list: @@ -797,7 +826,6 @@ class TestDropout3DFAPI(unittest.TestCase): class TestDropout3DFAPIError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): @@ -817,7 +845,6 @@ class TestDropout3DFAPIError(unittest.TestCase): class TestDropout3DCAPI(unittest.TestCase): - def setUp(self): np.random.seed(123) self.places = [fluid.CPUPlace()] @@ -830,16 +857,15 @@ class TestDropout3DCAPI(unittest.TestCase): input_np = np.random.random([2, 3, 4, 5, 6]).astype("float32") result_np = input_np input = fluid.dygraph.to_variable(input_np) - m = paddle.nn.Dropout3D(p=0.) + m = paddle.nn.Dropout3D(p=0.0) m.eval() result = m(input) - np.testing.assert_allclose(result.numpy(), - result_np, - rtol=1e-05) + np.testing.assert_allclose( + result.numpy(), result_np, rtol=1e-05 + ) class TestAlphaDropoutFAPI(unittest.TestCase): - def setUp(self): np.random.seed(123) self.places = [fluid.CPUPlace()] @@ -849,11 +875,11 @@ class TestAlphaDropoutFAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): input = fluid.data(name="input", shape=[40, 40], dtype="float32") - res1 = paddle.nn.functional.alpha_dropout(x=input, p=0.) - res2 = paddle.nn.functional.alpha_dropout(x=input, - p=0., - training=False) - res3 = paddle.nn.functional.alpha_dropout(x=input, p=1.) + res1 = paddle.nn.functional.alpha_dropout(x=input, p=0.0) + res2 = paddle.nn.functional.alpha_dropout( + x=input, p=0.0, training=False + ) + res3 = paddle.nn.functional.alpha_dropout(x=input, p=1.0) in_np = np.random.random([40, 40]).astype("float32") res_np = in_np @@ -862,13 +888,17 @@ class TestAlphaDropoutFAPI(unittest.TestCase): exe = fluid.Executor(place) res_list = [res1, res2] for res in res_list: - fetches = exe.run(fluid.default_main_program(), - feed={"input": in_np}, - fetch_list=[res]) + fetches = exe.run( + fluid.default_main_program(), + feed={"input": in_np}, + fetch_list=[res], + ) np.testing.assert_allclose(fetches[0], res_np, rtol=1e-05) - fetches = exe.run(fluid.default_main_program(), - feed={"input": in_np}, - fetch_list=[res3]) + fetches = exe.run( + fluid.default_main_program(), + feed={"input": in_np}, + fetch_list=[res3], + ) np.testing.assert_allclose(fetches[0], res_np3, rtol=1e-05) def test_static(self): @@ -883,11 +913,11 @@ class TestAlphaDropoutFAPI(unittest.TestCase): res_np3 = np.zeros_like(in_np) input = fluid.dygraph.to_variable(in_np) - res1 = paddle.nn.functional.alpha_dropout(x=input, p=0.) - res2 = paddle.nn.functional.alpha_dropout(x=input, - p=0., - training=False) - res3 = paddle.nn.functional.alpha_dropout(x=input, p=1.) + res1 = paddle.nn.functional.alpha_dropout(x=input, p=0.0) + res2 = paddle.nn.functional.alpha_dropout( + x=input, p=0.0, training=False + ) + res3 = paddle.nn.functional.alpha_dropout(x=input, p=1.0) res_list = [res1, res2] for res in res_list: @@ -896,14 +926,14 @@ class TestAlphaDropoutFAPI(unittest.TestCase): class TestAlphaDropoutFAPIError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): def test_Variable(): # the input of dropout must be Variable. - x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + ) paddle.nn.functional.alpha_dropout(x1, p=0.5) self.assertRaises(TypeError, test_Variable) @@ -931,7 +961,6 @@ class TestAlphaDropoutFAPIError(unittest.TestCase): class TestAlphaDropoutCAPI(unittest.TestCase): - def setUp(self): np.random.seed(123) self.places = [fluid.CPUPlace()] @@ -944,16 +973,15 @@ class TestAlphaDropoutCAPI(unittest.TestCase): input_np = np.random.random([40, 40]).astype("float32") result_np = input_np input = fluid.dygraph.to_variable(input_np) - m = paddle.nn.AlphaDropout(p=0.) + m = paddle.nn.AlphaDropout(p=0.0) m.eval() result = m(input) - np.testing.assert_allclose(result.numpy(), - result_np, - rtol=1e-05) + np.testing.assert_allclose( + result.numpy(), result_np, rtol=1e-05 + ) class TestDropoutWithDeterminateSeedGenerator(unittest.TestCase): - def setUp(self): paddle.framework.random.set_random_seed_generator('seed0', 123) paddle.framework.random.set_random_seed_generator('seed1', 123) @@ -964,19 +992,26 @@ class TestDropoutWithDeterminateSeedGenerator(unittest.TestCase): self.places.append(paddle.CUDAPlace(0)) def check_static_result(self, place): - from paddle.distributed.fleet.meta_parallel.parallel_layers.random import dropout + from paddle.distributed.fleet.meta_parallel.parallel_layers.random import ( + dropout, + ) + with static.program_guard(static.Program(), static.Program()): input = static.data(name="input", shape=[40, 40], dtype="float32") - res1 = dropout(input, - p=0.3, - training=True, - mode='upscale_in_train', - rng_name='seed0') - res2 = dropout(input, - p=0.3, - training=True, - mode='upscale_in_train', - rng_name='seed1') + res1 = dropout( + input, + p=0.3, + training=True, + mode='upscale_in_train', + rng_name='seed0', + ) + res2 = dropout( + input, + p=0.3, + training=True, + mode='upscale_in_train', + rng_name='seed1', + ) res3 = dropout(input, p=0.3) in_np = np.random.random([40, 40]).astype("float32") @@ -984,9 +1019,11 @@ class TestDropoutWithDeterminateSeedGenerator(unittest.TestCase): exe = static.Executor(place) res_list = [res1, res2] for i in range(2): - out1, out2 = exe.run(static.default_main_program(), - feed={"input": in_np}, - fetch_list=res_list) + out1, out2 = exe.run( + static.default_main_program(), + feed={"input": in_np}, + fetch_list=res_list, + ) np.testing.assert_allclose(out1, out2, rtol=1e-05) def test_static(self): @@ -995,7 +1032,6 @@ class TestDropoutWithDeterminateSeedGenerator(unittest.TestCase): class TestDropoutBackward(unittest.TestCase): - def setUp(self): np.random.seed(123) self.places = [fluid.CPUPlace()] @@ -1020,7 +1056,8 @@ class TestDropoutBackward(unittest.TestCase): np.testing.assert_array_equal( input.gradient(), - self.cal_grad_downscale_in_infer(mask.numpy())) + self.cal_grad_downscale_in_infer(mask.numpy()), + ) def test_backward_downscale_in_infer_eager(self): for place in self.places: @@ -1028,12 +1065,14 @@ class TestDropoutBackward(unittest.TestCase): with _test_eager_guard(): input = paddle.uniform([40, 40], dtype="float32") input.stop_gradient = False - out, mask = _C_ops.dropout(input, None, 0.5, False, - "downgrade_in_infer", 0, False) + out, mask = _C_ops.dropout( + input, None, 0.5, False, "downgrade_in_infer", 0, False + ) out.backward() np.testing.assert_array_equal( input.gradient(), - self.cal_grad_downscale_in_infer(mask.numpy())) + self.cal_grad_downscale_in_infer(mask.numpy()), + ) def test_backward_upscale_train(self): _enable_legacy_dygraph() @@ -1043,15 +1082,20 @@ class TestDropoutBackward(unittest.TestCase): prob = 0.5 input = paddle.uniform([40, 40], dtype="float32") input.stop_gradient = False - out, mask = core.ops.dropout(input, 'dropout_prob', prob, - "dropout_implementation", - "upscale_in_train") + out, mask = core.ops.dropout( + input, + 'dropout_prob', + prob, + "dropout_implementation", + "upscale_in_train", + ) out.backward() - np.testing.assert_allclose(input.gradient(), - self.cal_grad_upscale_train( - mask.numpy(), prob), - rtol=1e-05) + np.testing.assert_allclose( + input.gradient(), + self.cal_grad_upscale_train(mask.numpy(), prob), + rtol=1e-05, + ) def test_backward_upscale_train_eager(self): for place in self.places: @@ -1060,14 +1104,16 @@ class TestDropoutBackward(unittest.TestCase): prob = 0.5 input = paddle.uniform([40, 40], dtype="float32") input.stop_gradient = False - out, mask = _C_ops.dropout(input, None, 0.5, False, - "upscale_in_train", 0, False) + out, mask = _C_ops.dropout( + input, None, 0.5, False, "upscale_in_train", 0, False + ) out.backward() - np.testing.assert_allclose(input.gradient(), - self.cal_grad_upscale_train( - mask.numpy(), prob), - rtol=1e-05) + np.testing.assert_allclose( + input.gradient(), + self.cal_grad_upscale_train(mask.numpy(), prob), + rtol=1e-05, + ) def test_backward_upscale_train_2(self): _enable_legacy_dygraph() @@ -1077,15 +1123,20 @@ class TestDropoutBackward(unittest.TestCase): prob = 0.3 input = paddle.uniform([40, 40], dtype="float32") input.stop_gradient = False - out, mask = core.ops.dropout(input, 'dropout_prob', prob, - "dropout_implementation", - "upscale_in_train") + out, mask = core.ops.dropout( + input, + 'dropout_prob', + prob, + "dropout_implementation", + "upscale_in_train", + ) out.backward() - np.testing.assert_allclose(input.gradient(), - self.cal_grad_upscale_train( - mask.numpy(), prob), - rtol=1e-05) + np.testing.assert_allclose( + input.gradient(), + self.cal_grad_upscale_train(mask.numpy(), prob), + rtol=1e-05, + ) def test_backward_upscale_train_2_eager(self): for place in self.places: @@ -1095,24 +1146,28 @@ class TestDropoutBackward(unittest.TestCase): prob = 0.3 input = paddle.uniform([40, 40], dtype="float32") input.stop_gradient = False - out, mask = _C_ops.dropout(input, None, 0.3, False, - "upscale_in_train", 0, False) + out, mask = _C_ops.dropout( + input, None, 0.3, False, "upscale_in_train", 0, False + ) out.backward() - np.testing.assert_allclose(input.gradient(), - self.cal_grad_upscale_train( - mask.numpy(), prob), - rtol=1e-05) + np.testing.assert_allclose( + input.gradient(), + self.cal_grad_upscale_train(mask.numpy(), prob), + rtol=1e-05, + ) class TestDropOutWithProbTensor(unittest.TestCase): - def setUp(self): self.init_info() self.input = np.random.random(self.shape).astype("float32") - self.place = paddle.CUDAPlace( - 0) if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + self.place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() + else paddle.CPUPlace() + ) def init_info(self): self.shape = [10, 10] @@ -1151,21 +1206,18 @@ class TestDropOutWithProbTensor(unittest.TestCase): class TestDropOut2DWithProbTensor(TestDropOutWithProbTensor): - def init_info(self): self.shape = [2, 3, 10, 10] self.api = paddle.nn.functional.dropout2d class TestDropOut3DWithProbTensor(TestDropOutWithProbTensor): - def init_info(self): self.shape = [2, 3, 8, 8, 8] self.api = paddle.nn.functional.dropout3d class TestRandomValue(unittest.TestCase): - def test_fixed_random_number(self): # Test GPU Fixed random number, which is generated by 'curandStatePhilox4_32_10_t' if not paddle.is_compiled_with_cuda(): @@ -1188,8 +1240,16 @@ class TestRandomValue(unittest.TestCase): self.assertEqual(np.sum(index2), 12872777397) self.assertEqual(np.sum(out), 16778744.0) expect = [ - 0.6914956, 0.5294584, 0.19032137, 0.6996228, 0.3338527, 0.8442094, - 0.96965003, 1.1726775, 0., 0.28037727 + 0.6914956, + 0.5294584, + 0.19032137, + 0.6996228, + 0.3338527, + 0.8442094, + 0.96965003, + 1.1726775, + 0.0, + 0.28037727, ] np.testing.assert_allclose(out[10, 100, 500:510], expect, rtol=1e-05) @@ -1201,8 +1261,16 @@ class TestRandomValue(unittest.TestCase): self.assertEqual(np.sum(index2), 8582219962) self.assertEqual(np.sum(out), 16778396.563660286) expect = [ - 1.28587354, 0.15563703, 0., 0.28799703, 0., 0., 0., 0.54964, - 0.51355682, 0.33818988 + 1.28587354, + 0.15563703, + 0.0, + 0.28799703, + 0.0, + 0.0, + 0.0, + 0.54964, + 0.51355682, + 0.33818988, ] np.testing.assert_allclose(out[20, 100, 500:510], expect, rtol=1e-05) @@ -1212,7 +1280,7 @@ class TestRandomValue(unittest.TestCase): self.assertEqual(np.sum(index0), 130086900) self.assertEqual(np.sum(index1), 4291190105) self.assertEqual(np.sum(index2), 4292243807) - expect = [0., 0., 0., 0., 0., 0., 0., 0., 4., 4.] + expect = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 4.0] np.testing.assert_allclose(out[0, 100, 500:510], expect, rtol=1e-05) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_dygraph_mnist_fp16.py b/python/paddle/fluid/tests/unittests/test_dygraph_mnist_fp16.py index 30109d430be9b1cfe3c0cab5820338fefea9bf1d..0d435691560d4043f52e9416cc0850d8f0fc4fec 100644 --- a/python/paddle/fluid/tests/unittests/test_dygraph_mnist_fp16.py +++ b/python/paddle/fluid/tests/unittests/test_dygraph_mnist_fp16.py @@ -22,46 +22,51 @@ from paddle.fluid.framework import _test_eager_guard class SimpleImgConvPool(fluid.dygraph.Layer): - - def __init__(self, - num_channels, - num_filters, - filter_size, - pool_size, - pool_stride, - pool_padding=0, - pool_type='max', - global_pooling=False, - conv_stride=1, - conv_padding=0, - conv_dilation=1, - conv_groups=1, - act=None, - use_cudnn=False, - dtype='float32', - param_attr=None, - bias_attr=None): + def __init__( + self, + num_channels, + num_filters, + filter_size, + pool_size, + pool_stride, + pool_padding=0, + pool_type='max', + global_pooling=False, + conv_stride=1, + conv_padding=0, + conv_dilation=1, + conv_groups=1, + act=None, + use_cudnn=False, + dtype='float32', + param_attr=None, + bias_attr=None, + ): super(SimpleImgConvPool, self).__init__() - self._conv2d = Conv2D(num_channels=num_channels, - num_filters=num_filters, - filter_size=filter_size, - stride=conv_stride, - padding=conv_padding, - dilation=conv_dilation, - groups=conv_groups, - param_attr=param_attr, - bias_attr=bias_attr, - use_cudnn=use_cudnn, - dtype=dtype, - act=act) - - self._pool2d = Pool2D(pool_size=pool_size, - pool_type=pool_type, - pool_stride=pool_stride, - pool_padding=pool_padding, - global_pooling=global_pooling, - use_cudnn=use_cudnn) + self._conv2d = Conv2D( + num_channels=num_channels, + num_filters=num_filters, + filter_size=filter_size, + stride=conv_stride, + padding=conv_padding, + dilation=conv_dilation, + groups=conv_groups, + param_attr=param_attr, + bias_attr=bias_attr, + use_cudnn=use_cudnn, + dtype=dtype, + act=act, + ) + + self._pool2d = Pool2D( + pool_size=pool_size, + pool_type=pool_type, + pool_stride=pool_stride, + pool_padding=pool_padding, + global_pooling=global_pooling, + use_cudnn=use_cudnn, + ) def forward(self, inputs): x = self._conv2d(inputs) @@ -70,39 +75,45 @@ class SimpleImgConvPool(fluid.dygraph.Layer): class MNIST(fluid.dygraph.Layer): - def __init__(self, dtype="float32"): super(MNIST, self).__init__() - self._simple_img_conv_pool_1 = SimpleImgConvPool(num_channels=3, - num_filters=20, - filter_size=5, - pool_size=2, - pool_stride=2, - act="relu", - dtype=dtype, - use_cudnn=True) - - self._simple_img_conv_pool_2 = SimpleImgConvPool(num_channels=20, - num_filters=50, - filter_size=5, - pool_size=2, - pool_stride=2, - act="relu", - dtype=dtype, - use_cudnn=True) + self._simple_img_conv_pool_1 = SimpleImgConvPool( + num_channels=3, + num_filters=20, + filter_size=5, + pool_size=2, + pool_stride=2, + act="relu", + dtype=dtype, + use_cudnn=True, + ) + + self._simple_img_conv_pool_2 = SimpleImgConvPool( + num_channels=20, + num_filters=50, + filter_size=5, + pool_size=2, + pool_stride=2, + act="relu", + dtype=dtype, + use_cudnn=True, + ) self.pool_2_shape = 50 * 53 * 53 SIZE = 10 - scale = (2.0 / (self.pool_2_shape**2 * SIZE))**0.5 + scale = (2.0 / (self.pool_2_shape**2 * SIZE)) ** 0.5 self._linear = Linear( self.pool_2_shape, 10, param_attr=fluid.param_attr.ParamAttr( - initializer=fluid.initializer.NormalInitializer(loc=0.0, - scale=scale)), + initializer=fluid.initializer.NormalInitializer( + loc=0.0, scale=scale + ) + ), act="softmax", - dtype=dtype) + dtype=dtype, + ) def forward(self, inputs, label): x = self._simple_img_conv_pool_1(inputs) @@ -115,7 +126,6 @@ class MNIST(fluid.dygraph.Layer): class TestMnist(unittest.TestCase): - def func_mnist_fp16(self): if not fluid.is_compiled_with_cuda(): return diff --git a/python/paddle/fluid/tests/unittests/test_dygraph_mode_of_unittest.py b/python/paddle/fluid/tests/unittests/test_dygraph_mode_of_unittest.py index ce8bb122f637f9f701c31cbc3cd72732d4230a5e..8da813c00ef447c65ab737b6f048b0e12c3c91e0 100644 --- a/python/paddle/fluid/tests/unittests/test_dygraph_mode_of_unittest.py +++ b/python/paddle/fluid/tests/unittests/test_dygraph_mode_of_unittest.py @@ -17,11 +17,10 @@ import paddle class TestDygraphModeOfUnittest(unittest.TestCase): - def test_dygraph_mode(self): self.assertTrue( paddle.in_dynamic_mode(), - 'Default Mode of Unittest should be dygraph mode, but get static mode.' + 'Default Mode of Unittest should be dygraph mode, but get static mode.', ) diff --git a/python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py b/python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py index 559424c7fb440fa1f51a22eefafb1c0645e155fb..4486da7eb3039b4961d473226bfdeb022076833f 100644 --- a/python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py +++ b/python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py @@ -27,43 +27,48 @@ SEED = 123123111 class SimpleImgConvPool(fluid.dygraph.Layer): - - def __init__(self, - num_channels, - num_filters, - filter_size, - pool_size, - pool_stride, - pool_padding=0, - pool_type='max', - global_pooling=False, - conv_stride=1, - conv_padding=0, - conv_dilation=1, - conv_groups=1, - act=None, - use_cudnn=False, - param_attr=None, - bias_attr=None): + def __init__( + self, + num_channels, + num_filters, + filter_size, + pool_size, + pool_stride, + pool_padding=0, + pool_type='max', + global_pooling=False, + conv_stride=1, + conv_padding=0, + conv_dilation=1, + conv_groups=1, + act=None, + use_cudnn=False, + param_attr=None, + bias_attr=None, + ): super(SimpleImgConvPool, self).__init__() - self._conv2d = Conv2D(num_channels=num_channels, - num_filters=num_filters, - filter_size=filter_size, - stride=conv_stride, - padding=conv_padding, - dilation=conv_dilation, - groups=conv_groups, - param_attr=None, - bias_attr=None, - use_cudnn=use_cudnn) - - self._pool2d = Pool2D(pool_size=pool_size, - pool_type=pool_type, - pool_stride=pool_stride, - pool_padding=pool_padding, - global_pooling=global_pooling, - use_cudnn=use_cudnn) + self._conv2d = Conv2D( + num_channels=num_channels, + num_filters=num_filters, + filter_size=filter_size, + stride=conv_stride, + padding=conv_padding, + dilation=conv_dilation, + groups=conv_groups, + param_attr=None, + bias_attr=None, + use_cudnn=use_cudnn, + ) + + self._pool2d = Pool2D( + pool_size=pool_size, + pool_type=pool_type, + pool_stride=pool_stride, + pool_padding=pool_padding, + global_pooling=global_pooling, + use_cudnn=use_cudnn, + ) def forward(self, inputs): x = self._conv2d(inputs) @@ -72,33 +77,30 @@ class SimpleImgConvPool(fluid.dygraph.Layer): class MNIST(fluid.dygraph.Layer): - def __init__(self): super(MNIST, self).__init__() - self._simple_img_conv_pool_1 = SimpleImgConvPool(1, - 20, - 5, - 2, - 2, - act="relu") + self._simple_img_conv_pool_1 = SimpleImgConvPool( + 1, 20, 5, 2, 2, act="relu" + ) - self._simple_img_conv_pool_2 = SimpleImgConvPool(20, - 50, - 5, - 2, - 2, - act="relu") + self._simple_img_conv_pool_2 = SimpleImgConvPool( + 20, 50, 5, 2, 2, act="relu" + ) self.pool_2_shape = 50 * 4 * 4 - SIZE = 100 #10 - scale = (2.0 / (self.pool_2_shape**2 * SIZE))**0.5 - self._fc = Linear(self.pool_2_shape, - SIZE, - param_attr=fluid.param_attr.ParamAttr( - initializer=fluid.initializer.NormalInitializer( - loc=0.0, scale=scale)), - act="softmax") + SIZE = 100 # 10 + scale = (2.0 / (self.pool_2_shape**2 * SIZE)) ** 0.5 + self._fc = Linear( + self.pool_2_shape, + SIZE, + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.NormalInitializer( + loc=0.0, scale=scale + ) + ), + act="softmax", + ) def forward(self, inputs): x = self._simple_img_conv_pool_1(inputs) @@ -109,7 +111,6 @@ class MNIST(fluid.dygraph.Layer): class TestDygraphMultiForward(unittest.TestCase): - def test_mnist_forward_float32(self): epoch_num = 1 @@ -117,21 +118,25 @@ class TestDygraphMultiForward(unittest.TestCase): paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) mnist = MNIST() - sgd = SGDOptimizer(learning_rate=1e-3, - parameter_list=mnist.parameters()) - train_reader = paddle.batch(paddle.dataset.mnist.train(), - batch_size=128, - drop_last=True) + sgd = SGDOptimizer( + learning_rate=1e-3, parameter_list=mnist.parameters() + ) + train_reader = paddle.batch( + paddle.dataset.mnist.train(), batch_size=128, drop_last=True + ) dy_param_init_value = {} mnist.eval() for epoch in range(epoch_num): for batch_id, data in enumerate(train_reader()): - dy_x_data = np.array([ - x[0].reshape(1, 28, 28) for x in data - ]).astype('float32') - y_data = np.array([x[1] for x in data - ]).astype('int64').reshape(128, 1) + dy_x_data = np.array( + [x[0].reshape(1, 28, 28) for x in data] + ).astype('float32') + y_data = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape(128, 1) + ) img = to_variable(dy_x_data) label = to_variable(y_data) @@ -150,18 +155,21 @@ class TestDygraphMultiForward(unittest.TestCase): with new_program_scope(): paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) - exe = fluid.Executor(fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) + exe = fluid.Executor( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) mnist = MNIST() sgd = SGDOptimizer(learning_rate=1e-3) - train_reader = paddle.batch(paddle.dataset.mnist.train(), - batch_size=128, - drop_last=True) + train_reader = paddle.batch( + paddle.dataset.mnist.train(), batch_size=128, drop_last=True + ) - img = fluid.layers.data(name='pixel', - shape=[1, 28, 28], - dtype='float32') + img = fluid.layers.data( + name='pixel', shape=[1, 28, 28], dtype='float32' + ) label = fluid.layers.data(name='label', shape=[1], dtype='int64') cost = mnist(img) loss = fluid.layers.cross_entropy(cost, label) @@ -173,38 +181,42 @@ class TestDygraphMultiForward(unittest.TestCase): for param in mnist.parameters(): static_param_name_list.append(param.name) - out = exe.run(fluid.default_startup_program(), - fetch_list=static_param_name_list) + out = exe.run( + fluid.default_startup_program(), + fetch_list=static_param_name_list, + ) for i in range(len(static_param_name_list)): static_param_init_value[static_param_name_list[i]] = out[i] for epoch in range(epoch_num): for batch_id, data in enumerate(train_reader()): - static_x_data = np.array([ - x[0].reshape(1, 28, 28) for x in data - ]).astype('float32') - y_data = np.array([x[1] for x in data - ]).astype('int64').reshape([128, 1]) + static_x_data = np.array( + [x[0].reshape(1, 28, 28) for x in data] + ).astype('float32') + y_data = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape([128, 1]) + ) fetch_list = [avg_loss.name] - out = exe.run(fluid.default_main_program(), - feed={ - "pixel": static_x_data, - "label": y_data - }, - fetch_list=fetch_list) + out = exe.run( + fluid.default_main_program(), + feed={"pixel": static_x_data, "label": y_data}, + fetch_list=fetch_list, + ) static_out = out[0] - np.testing.assert_allclose(dy_x_data.all(), - static_x_data.all(), - rtol=1e-05) + np.testing.assert_allclose( + dy_x_data.all(), static_x_data.all(), rtol=1e-05 + ) for key, value in static_param_init_value.items(): - np.testing.assert_allclose(value, - dy_param_init_value[key], - rtol=1e-05) + np.testing.assert_allclose( + value, dy_param_init_value[key], rtol=1e-05 + ) np.testing.assert_allclose(static_out, dy_out, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_dygraph_spectral_norm.py b/python/paddle/fluid/tests/unittests/test_dygraph_spectral_norm.py index 9c3e6e1529e4322a7d21e592830594be62b41ebf..b865a1b1799339376f1c2b4155f5df0c4fb6245d 100644 --- a/python/paddle/fluid/tests/unittests/test_dygraph_spectral_norm.py +++ b/python/paddle/fluid/tests/unittests/test_dygraph_spectral_norm.py @@ -21,14 +21,13 @@ from paddle.nn.utils import spectral_norm class TestDygraphSpectralNorm(unittest.TestCase): - def setUp(self): self.init_test_case() self.set_data() def init_test_case(self): self.batch_size = 3 - self.data_desc = (['x', [2, 12, 12]], ) + self.data_desc = (['x', [2, 12, 12]],) self.n_power_iterations = 1 self.eps = 1e-12 self.dim = None @@ -38,8 +37,9 @@ class TestDygraphSpectralNorm(unittest.TestCase): for desc in self.data_desc: data_name = desc[0] data_shape = desc[1] - data_value = np.random.random(size=[self.batch_size] + - data_shape).astype('float32') + data_value = np.random.random( + size=[self.batch_size] + data_shape + ).astype('float32') self.data[data_name] = data_value def spectral_normalize(self, weight, u, v, dim, power_iters, eps): @@ -68,18 +68,27 @@ class TestDygraphSpectralNorm(unittest.TestCase): linear = paddle.nn.Conv2D(2, 1, 3) before_weight = linear.weight.numpy().copy() if self.dim == None: - if isinstance(linear, (nn.Conv1DTranspose, nn.Conv2DTranspose, - nn.Conv3DTranspose, nn.Linear)): + if isinstance( + linear, + ( + nn.Conv1DTranspose, + nn.Conv2DTranspose, + nn.Conv3DTranspose, + nn.Linear, + ), + ): self.dim = 1 else: self.dim = 0 else: self.dim = (self.dim + len(before_weight)) % len(before_weight) - sn = spectral_norm(linear, - n_power_iterations=self.n_power_iterations, - eps=self.eps, - dim=self.dim) + sn = spectral_norm( + linear, + n_power_iterations=self.n_power_iterations, + eps=self.eps, + dim=self.dim, + ) u = sn.weight_u.numpy().copy() v = sn.weight_v.numpy().copy() outputs = [] @@ -88,52 +97,47 @@ class TestDygraphSpectralNorm(unittest.TestCase): outputs.append(output.numpy()) self.actual_outputs = linear.weight.numpy() - expect_output = self.spectral_normalize(before_weight, u, v, self.dim, - self.n_power_iterations, - self.eps) + expect_output = self.spectral_normalize( + before_weight, u, v, self.dim, self.n_power_iterations, self.eps + ) for expect, actual in zip(expect_output, self.actual_outputs): - np.testing.assert_allclose(np.array(actual), - np.array(expect), - rtol=1e-05, - atol=0.001) + np.testing.assert_allclose( + np.array(actual), np.array(expect), rtol=1e-05, atol=0.001 + ) class TestDygraphWeightNormCase(TestDygraphSpectralNorm): - def init_test_case(self): self.batch_size = 3 - self.data_desc = (['x', [2, 3, 3]], ) + self.data_desc = (['x', [2, 3, 3]],) self.n_power_iterations = 1 self.eps = 1e-12 self.dim = None class TestDygraphWeightNormWithIterations(TestDygraphSpectralNorm): - def init_test_case(self): self.batch_size = 3 - self.data_desc = (['x', [2, 3, 3]], ) + self.data_desc = (['x', [2, 3, 3]],) self.n_power_iterations = 2 self.eps = 1e-12 self.dim = None class TestDygraphWeightNormWithDim(TestDygraphSpectralNorm): - def init_test_case(self): self.batch_size = 3 - self.data_desc = (['x', [2, 3, 3]], ) + self.data_desc = (['x', [2, 3, 3]],) self.n_power_iterations = 1 self.eps = 1e-12 self.dim = 1 class TestDygraphWeightNormWithEps(TestDygraphSpectralNorm): - def init_test_case(self): self.batch_size = 3 - self.data_desc = (['x', [2, 3, 3]], ) + self.data_desc = (['x', [2, 3, 3]],) self.n_power_iterations = 1 self.eps = 1e-10 self.dim = None diff --git a/python/paddle/fluid/tests/unittests/test_dygraph_weight_norm.py b/python/paddle/fluid/tests/unittests/test_dygraph_weight_norm.py index 270a1afe04b4a015b6a2934943a360a868df9e83..ebae140f88c257469c6c2fb76bceaf3d2a49e91e 100644 --- a/python/paddle/fluid/tests/unittests/test_dygraph_weight_norm.py +++ b/python/paddle/fluid/tests/unittests/test_dygraph_weight_norm.py @@ -22,14 +22,13 @@ from paddle.nn.utils import weight_norm, remove_weight_norm class TestDygraphWeightNorm(unittest.TestCase): - def setUp(self): self.init_test_case() self.set_data() def init_test_case(self): self.batch_size = 3 - self.data_desc = (['x', [2, 3, 3]], ) + self.data_desc = (['x', [2, 3, 3]],) self.dim = None def set_data(self): @@ -37,8 +36,9 @@ class TestDygraphWeightNorm(unittest.TestCase): for desc in self.data_desc: data_name = desc[0] data_shape = desc[1] - data_value = np.random.random(size=[self.batch_size] + - data_shape).astype('float32') + data_value = np.random.random( + size=[self.batch_size] + data_shape + ).astype('float32') self.data[data_name] = data_value def norm_except_dim(self, w, dim=None): @@ -88,14 +88,22 @@ class TestDygraphWeightNorm(unittest.TestCase): perm[dim] = 0 p_transposed = np.transpose(v, perm) transposed_shape = p_transposed.shape - transposed_shape_numel = reduce(lambda x, y: x * y, - transposed_shape) + transposed_shape_numel = reduce( + lambda x, y: x * y, transposed_shape + ) p_matrix = np.reshape( - p_transposed, (p_transposed.shape[0], - transposed_shape_numel // p_transposed.shape[0])) - v_norm = v / np.expand_dims(np.expand_dims( - np.linalg.norm(p_matrix, axis=1, keepdims=True), axis=0), - axis=(ndims - 1)) + p_transposed, + ( + p_transposed.shape[0], + transposed_shape_numel // p_transposed.shape[0], + ), + ) + v_norm = v / np.expand_dims( + np.expand_dims( + np.linalg.norm(p_matrix, axis=1, keepdims=True), axis=0 + ), + axis=(ndims - 1), + ) v_norm = np.reshape(v_norm, transposed_shape) v_norm = np.transpose(v_norm, perm) g = np.squeeze(g, axis=1) @@ -103,9 +111,10 @@ class TestDygraphWeightNorm(unittest.TestCase): eaxis = 2 elif dim == 2: eaxis = 1 - g_mul = np.expand_dims(np.expand_dims(np.expand_dims(g, axis=0), - axis=eaxis), - axis=(ndims - 1)) + g_mul = np.expand_dims( + np.expand_dims(np.expand_dims(g, axis=0), axis=eaxis), + axis=(ndims - 1), + ) w = g_mul * v_norm return g, v @@ -129,52 +138,46 @@ class TestDygraphWeightNorm(unittest.TestCase): expect_output = self.weight_normalize(before_weight, self.dim) for expect, actual in zip(expect_output, self.actual_outputs): - np.testing.assert_allclose(np.array(actual), - expect, - rtol=1e-05, - atol=0.001) + np.testing.assert_allclose( + np.array(actual), expect, rtol=1e-05, atol=0.001 + ) class TestDygraphWeightNormCase1(TestDygraphWeightNorm): - def init_test_case(self): self.batch_size = 3 - self.data_desc = (['x', [2, 3, 3]], ) + self.data_desc = (['x', [2, 3, 3]],) self.dim = 0 class TestDygraphWeightNormCase2(TestDygraphWeightNorm): - def init_test_case(self): self.batch_size = 3 - self.data_desc = (['x', [2, 3, 3]], ) + self.data_desc = (['x', [2, 3, 3]],) self.dim = 1 class TestDygraphWeightNormCase3(TestDygraphWeightNorm): - def init_test_case(self): self.batch_size = 3 - self.data_desc = (['x', [2, 3, 3]], ) + self.data_desc = (['x', [2, 3, 3]],) self.dim = 3 class TestDygraphWeightNormCase4(TestDygraphWeightNorm): - def init_test_case(self): self.batch_size = 3 - self.data_desc = (['x', [2, 3, 3]], ) + self.data_desc = (['x', [2, 3, 3]],) self.dim = -3 class TestDygraphRemoveWeightNorm(unittest.TestCase): - def setUp(self): self.init_test_case() def init_test_case(self): self.batch_size = 3 - self.data_desc = (['x', [2, 3, 3]], ) + self.data_desc = (['x', [2, 3, 3]],) self.dim = None def test_check_output(self): @@ -184,10 +187,9 @@ class TestDygraphRemoveWeightNorm(unittest.TestCase): wn = weight_norm(linear, dim=self.dim) rwn = remove_weight_norm(linear) after_weight = linear.weight - np.testing.assert_allclose(before_weight.numpy(), - after_weight.numpy(), - rtol=1e-05, - atol=0.001) + np.testing.assert_allclose( + before_weight.numpy(), after_weight.numpy(), rtol=1e-05, atol=0.001 + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_dyn_rnn.py b/python/paddle/fluid/tests/unittests/test_dyn_rnn.py index de03749769d8d9692fce6de54bb4943a73d7a78b..e4f0a3a173a3f34242f81e6e26175028fc83baab 100644 --- a/python/paddle/fluid/tests/unittests/test_dyn_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_dyn_rnn.py @@ -29,20 +29,21 @@ numpy.random.seed(2020) class TestDynamicRNN(unittest.TestCase): - def setUp(self): self.word_dict_len = 5147 self.BATCH_SIZE = 2 reader = fake_imdb_reader(self.word_dict_len, self.BATCH_SIZE * 100) self.train_data = paddle.batch(reader, batch_size=self.BATCH_SIZE) - def _train(self, - main_program, - startup_program, - feed_list, - fetch_list, - is_nested=False, - max_iters=1): + def _train( + self, + main_program, + startup_program, + feed_list, + fetch_list, + is_nested=False, + max_iters=1, + ): place = fluid.CPUPlace() exe = fluid.Executor(place) exe.run(startup_program) @@ -50,10 +51,12 @@ class TestDynamicRNN(unittest.TestCase): data = next(self.train_data()) for iter_id in range(max_iters): - fetch_outs = exe.run(main_program, - feed=feeder.feed(data), - fetch_list=fetch_list, - return_numpy=False) + fetch_outs = exe.run( + main_program, + feed=feeder.feed(data), + fetch_list=fetch_list, + return_numpy=False, + ) if len(fetch_list) == 3: rnn_in_seq = fetch_outs[0] rnn_out_seq = fetch_outs[1] @@ -65,9 +68,9 @@ class TestDynamicRNN(unittest.TestCase): loss_i = numpy.array(fetch_outs[2]) elif len(fetch_list) == 1: loss_i = numpy.array(fetch_outs[0]) - #print(loss_i) + # print(loss_i) - self.assertEqual((1, ), loss_i.shape) + self.assertEqual((1,), loss_i.shape) self.assertFalse(numpy.isnan(loss_i)) if iter_id == 0: loss_0 = loss_i @@ -81,13 +84,12 @@ class TestDynamicRNN(unittest.TestCase): startup_program = fluid.Program() with fluid.program_guard(main_program, startup_program): - sentence = fluid.layers.data(name='word', - shape=[1], - dtype='int64', - lod_level=1) - sent_emb = fluid.layers.embedding(input=sentence, - size=[self.word_dict_len, 32], - dtype='float32') + sentence = fluid.layers.data( + name='word', shape=[1], dtype='int64', lod_level=1 + ) + sent_emb = fluid.layers.embedding( + input=sentence, size=[self.word_dict_len, 32], dtype='float32' + ) rank_table = lod_rank_table(x=sent_emb) sent_emb_array = lod_tensor_to_array(x=sent_emb, table=rank_table) @@ -100,7 +102,8 @@ class TestDynamicRNN(unittest.TestCase): input=fluid.layers.array_read(array=sent_emb_array, i=i), value=0, shape=[-1, 100], - dtype='float32') + dtype='float32', + ) boot_mem.stop_gradient = False mem_array = fluid.layers.array_write(x=boot_mem, i=i) @@ -127,8 +130,9 @@ class TestDynamicRNN(unittest.TestCase): logits = fluid.layers.fc(input=last, size=1, act=None) label = fluid.layers.data(name='label', shape=[1], dtype='float32') - loss = fluid.layers.sigmoid_cross_entropy_with_logits(x=logits, - label=label) + loss = fluid.layers.sigmoid_cross_entropy_with_logits( + x=logits, label=label + ) loss = paddle.mean(loss) sgd = fluid.optimizer.SGD(1e-4) sgd.minimize(loss=loss) @@ -136,12 +140,14 @@ class TestDynamicRNN(unittest.TestCase): # Check for lod_level set in compile-time. self.assertEqual(sent_emb.lod_level, result_all_timesteps.lod_level) - self._train(main_program=main_program, - startup_program=startup_program, - feed_list=[sentence, label], - fetch_list=[sent_emb, result_all_timesteps, loss], - is_nested=False, - max_iters=1) + self._train( + main_program=main_program, + startup_program=startup_program, + feed_list=[sentence, label], + fetch_list=[sent_emb, result_all_timesteps, loss], + is_nested=False, + max_iters=1, + ) def test_train_dynamic_rnn(self): main_program = fluid.Program() @@ -149,13 +155,12 @@ class TestDynamicRNN(unittest.TestCase): main_program.random_seed = 10 startup_program.random_seed = 10 with fluid.program_guard(main_program, startup_program): - sentence = fluid.layers.data(name='word', - shape=[1], - dtype='int64', - lod_level=1) - sent_emb = fluid.layers.embedding(input=sentence, - size=[self.word_dict_len, 32], - dtype='float32') + sentence = fluid.layers.data( + name='word', shape=[1], dtype='int64', lod_level=1 + ) + sent_emb = fluid.layers.embedding( + input=sentence, size=[self.word_dict_len, 32], dtype='float32' + ) drnn = fluid.layers.DynamicRNN() with drnn.block(): @@ -170,8 +175,9 @@ class TestDynamicRNN(unittest.TestCase): logits = fluid.layers.fc(input=last, size=1, act=None) label = fluid.layers.data(name='label', shape=[1], dtype='float32') - loss = fluid.layers.sigmoid_cross_entropy_with_logits(x=logits, - label=label) + loss = fluid.layers.sigmoid_cross_entropy_with_logits( + x=logits, label=label + ) loss = paddle.mean(loss) sgd = fluid.optimizer.Adam(1e-3) sgd.minimize(loss=loss) @@ -179,12 +185,14 @@ class TestDynamicRNN(unittest.TestCase): # Check for lod_level set in compile-time. self.assertEqual(sent_emb.lod_level, drnn_result.lod_level) - self._train(main_program=main_program, - startup_program=startup_program, - feed_list=[sentence, label], - fetch_list=[sent_emb, drnn_result, loss], - is_nested=False, - max_iters=100) + self._train( + main_program=main_program, + startup_program=startup_program, + feed_list=[sentence, label], + fetch_list=[sent_emb, drnn_result, loss], + is_nested=False, + max_iters=100, + ) def _fake_reader(self): seq_len, label = [[2, 2]], [0, 1] @@ -205,30 +213,30 @@ class TestDynamicRNN(unittest.TestCase): main_program.random_seed = 10 startup_program.random_seed = 10 with fluid.program_guard(main_program, startup_program): - sentence = fluid.layers.data(name='word', - shape=[1], - dtype='int64', - lod_level=2) - label = fluid.layers.data(name='label', - shape=[1], - dtype='float32', - lod_level=1) + sentence = fluid.layers.data( + name='word', shape=[1], dtype='int64', lod_level=2 + ) + label = fluid.layers.data( + name='label', shape=[1], dtype='float32', lod_level=1 + ) drnn0 = fluid.layers.DynamicRNN() with drnn0.block(): in_0 = drnn0.step_input(sentence) assert in_0.lod_level == 1, "the lod level of in_ should be 1" - sentence_emb = fluid.layers.embedding(input=in_0, - size=[len(word_dict), 32], - dtype='float32') - out_0 = fluid.layers.fc(input=sentence_emb, - size=100, - act='tanh') + sentence_emb = fluid.layers.embedding( + input=in_0, size=[len(word_dict), 32], dtype='float32' + ) + out_0 = fluid.layers.fc( + input=sentence_emb, size=100, act='tanh' + ) drnn1 = fluid.layers.DynamicRNN() with drnn1.block(): in_1 = drnn1.step_input(out_0) - assert in_1.lod_level == 0, "the lod level of in_1 should be 0" + assert ( + in_1.lod_level == 0 + ), "the lod level of in_1 should be 0" out_1 = fluid.layers.fc(input=[in_1], size=100, act='tanh') drnn1.output(out_1) @@ -238,20 +246,23 @@ class TestDynamicRNN(unittest.TestCase): last = drnn0() logits = fluid.layers.fc(input=last, size=1, act=None) - loss = fluid.layers.sigmoid_cross_entropy_with_logits(x=logits, - label=label) + loss = fluid.layers.sigmoid_cross_entropy_with_logits( + x=logits, label=label + ) loss = paddle.mean(loss) sgd = fluid.optimizer.SGD(1e-3) sgd.minimize(loss=loss) train_data_orig = self.train_data self.train_data = paddle.batch(self._fake_reader, batch_size=2) - self._train(main_program=main_program, - startup_program=startup_program, - feed_list=[sentence, label], - fetch_list=[loss], - is_nested=True, - max_iters=100) + self._train( + main_program=main_program, + startup_program=startup_program, + feed_list=[sentence, label], + fetch_list=[loss], + is_nested=True, + max_iters=100, + ) self.train_data = train_data_orig # this unit test is just used to the two layer nested dyn_rnn. @@ -264,14 +275,12 @@ class TestDynamicRNN(unittest.TestCase): main_program.random_seed = 10 startup_program.random_seed = 10 with fluid.program_guard(main_program, startup_program): - sentence = fluid.layers.data(name='word', - shape=[1], - dtype='int64', - lod_level=2) - label = fluid.layers.data(name='label', - shape=[1], - dtype='float32', - lod_level=1) + sentence = fluid.layers.data( + name='word', shape=[1], dtype='int64', lod_level=2 + ) + label = fluid.layers.data( + name='label', shape=[1], dtype='float32', lod_level=1 + ) drnn0 = fluid.layers.DynamicRNN() with drnn0.block(): @@ -279,14 +288,19 @@ class TestDynamicRNN(unittest.TestCase): sentence_emb = fluid.layers.embedding( input=in_0, size=[len(word_dict), hidden_size], - dtype='float32') - input_forward_proj = fluid.layers.fc(input=sentence_emb, - size=hidden_size * 4, - act=None, - bias_attr=False) - forward, _ = fluid.layers.dynamic_lstm(input=input_forward_proj, - size=hidden_size * 4, - use_peepholes=False) + dtype='float32', + ) + input_forward_proj = fluid.layers.fc( + input=sentence_emb, + size=hidden_size * 4, + act=None, + bias_attr=False, + ) + forward, _ = fluid.layers.dynamic_lstm( + input=input_forward_proj, + size=hidden_size * 4, + use_peepholes=False, + ) drnn1 = fluid.layers.DynamicRNN() with drnn1.block(): @@ -299,33 +313,34 @@ class TestDynamicRNN(unittest.TestCase): last = drnn0() logits = fluid.layers.fc(input=last, size=1, act=None) - loss = fluid.layers.sigmoid_cross_entropy_with_logits(x=logits, - label=label) + loss = fluid.layers.sigmoid_cross_entropy_with_logits( + x=logits, label=label + ) loss = paddle.mean(loss) sgd = fluid.optimizer.SGD(1e-3) sgd.minimize(loss=loss) train_data_orig = self.train_data self.train_data = paddle.batch(self._fake_reader, batch_size=2) - self._train(main_program=main_program, - startup_program=startup_program, - feed_list=[sentence, label], - fetch_list=[loss], - is_nested=True, - max_iters=100) + self._train( + main_program=main_program, + startup_program=startup_program, + feed_list=[sentence, label], + fetch_list=[loss], + is_nested=True, + max_iters=100, + ) self.train_data = train_data_orig class TestDynamicRNNErrors(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): init = fluid.layers.zeros(shape=[1], dtype='float32') shape = 'shape' - sentence = fluid.data(name='sentence', - shape=[None, 32], - dtype='float32', - lod_level=1) + sentence = fluid.data( + name='sentence', shape=[None, 32], dtype='float32', lod_level=1 + ) # The type of Input(shape) in API(memory) must be list or tuple def input_shape_type_of_memory(): @@ -341,9 +356,9 @@ class TestDynamicRNNErrors(unittest.TestCase): with drnn.block(): word = drnn.step_input(sentence) memory = drnn.memory(shape=[10], dtype='float32', value=0) - hidden = fluid.layers.fc(input=[word, memory], - size=10, - act='tanh') + hidden = fluid.layers.fc( + input=[word, memory], size=10, act='tanh' + ) out = numpy.ones(1).astype('float32') drnn.update_memory(ex_mem=memory, new_mem=hidden) drnn.output(hidden, out) diff --git a/python/paddle/fluid/tests/unittests/test_dynamic_rnn_stop_gradient.py b/python/paddle/fluid/tests/unittests/test_dynamic_rnn_stop_gradient.py index 0a186b0c8afbada1ca9ffc679de37fb3ccd8e140..3922f6a8c229d9a13f177195715d77a0ed467545 100644 --- a/python/paddle/fluid/tests/unittests/test_dynamic_rnn_stop_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_dynamic_rnn_stop_gradient.py @@ -24,16 +24,15 @@ def build_and_run_program(place, batch_size, beam_size, stop_gradient=False): np.random.seed(2) x = layers.assign( - np.random.rand(batch_size, beam_size, 32).astype("float32")) + np.random.rand(batch_size, beam_size, 32).astype("float32") + ) indices = fluid.data(shape=[None, beam_size], dtype="int64", name="indices") - step_idx = layers.fill_constant(shape=[1], - dtype="int64", - value=0, - force_cpu=True) - max_len = layers.fill_constant(shape=[1], - dtype="int64", - value=10, - force_cpu=True) + step_idx = layers.fill_constant( + shape=[1], dtype="int64", value=0, force_cpu=True + ) + max_len = layers.fill_constant( + shape=[1], dtype="int64", value=10, force_cpu=True + ) cond = layers.less_than(x=step_idx, y=max_len) while_op = layers.While(cond) scores = layers.array_write(x, step_idx) @@ -44,7 +43,8 @@ def build_and_run_program(place, batch_size, beam_size, stop_gradient=False): bs.stop_gradient = stop_gradient batch_pos = layers.expand( layers.unsqueeze(layers.range(0, bs, 1, dtype=bs.dtype), [1]), - [1, beam_size]) + [1, beam_size], + ) topk_coordinates = layers.stack([batch_pos, indices], axis=2) topk_coordinates.stop_gradient = stop_gradient score = layers.gather_nd(x, topk_coordinates) @@ -58,17 +58,15 @@ def build_and_run_program(place, batch_size, beam_size, stop_gradient=False): opt = fluid.optimizer.Adam(0.01) opt.minimize(loss) exe = fluid.Executor(place) - data = np.random.random_integers(low=0, - high=beam_size - 1, - size=(batch_size, - beam_size)).astype("int64") - loss_val, = exe.run(feed={"indices": data}, fetch_list=[loss]) + data = np.random.random_integers( + low=0, high=beam_size - 1, size=(batch_size, beam_size) + ).astype("int64") + (loss_val,) = exe.run(feed={"indices": data}, fetch_list=[loss]) return loss_val class TestDynRNNStopGradient(unittest.TestCase): - def setUp(self): self.batch_size = 20 self.beam_size = 64 @@ -76,10 +74,12 @@ class TestDynRNNStopGradient(unittest.TestCase): def run_main(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.scope_guard(fluid.Scope()): - value1 = build_and_run_program(place, self.batch_size, - self.beam_size, False) - value2 = build_and_run_program(place, self.batch_size, - self.beam_size, True) + value1 = build_and_run_program( + place, self.batch_size, self.beam_size, False + ) + value2 = build_and_run_program( + place, self.batch_size, self.beam_size, True + ) np.testing.assert_array_equal(value1, value2) diff --git a/python/paddle/fluid/tests/unittests/test_dynrnn_gradient_check.py b/python/paddle/fluid/tests/unittests/test_dynrnn_gradient_check.py index 7b7921d91c22acb99a3f95ae501f020a24f7e609..01cfedf063ecef265d9c849f24c9ddb62f008fd9 100644 --- a/python/paddle/fluid/tests/unittests/test_dynrnn_gradient_check.py +++ b/python/paddle/fluid/tests/unittests/test_dynrnn_gradient_check.py @@ -22,7 +22,6 @@ from decorator_helper import prog_scope class Memory(object): - def __init__(self, shape, dtype='float32'): self.ex = np.zeros(shape=shape, dtype=dtype) self.cur = None @@ -45,7 +44,6 @@ class Memory(object): class Output(object): - def __init__(self): self.outs = [] @@ -60,7 +58,6 @@ class Output(object): class BaseRNN(object): - def __init__(self, ins, mems, params, outs, num_seq=5, max_seq_len=15): self.num_seq = num_seq self.inputs = collections.defaultdict(list) @@ -159,8 +156,10 @@ class BaseRNN(object): def get_numeric_gradient_of_param(self, param_name, delta=0.001): p = self.params[param_name] if len(p.shape) != 2: - raise ValueError("Not support get numeric gradient of an parameter," - " which is not matrix") + raise ValueError( + "Not support get numeric gradient of an parameter," + " which is not matrix" + ) g = np.zeros(shape=p.shape, dtype=p.dtype) for i in range(p.shape[0]): @@ -174,10 +173,9 @@ class BaseRNN(object): g[i][j] = (pos - neg) / (delta * 2) return g - def get_numeric_gradient_of_input(self, - input_name, - delta=0.001, - return_one_tensor=True): + def get_numeric_gradient_of_input( + self, input_name, delta=0.001, return_one_tensor=True + ): ipt = self.inputs[input_name] grad = [] @@ -213,7 +211,6 @@ class BaseRNN(object): class SeedFixedTestCase(unittest.TestCase): - @classmethod def setUpClass(cls): """Fix random seeds to remove randomness from tests""" @@ -238,17 +235,18 @@ class TestSimpleMul(SeedFixedTestCase): OUT_NAME = 'Out' class SimpleMul(BaseRNN): - def __init__(self): base = TestSimpleMul - super(base.SimpleMul, - self).__init__({base.DATA_NAME: { - 'shape': [base.DATA_WIDTH] - }}, {}, { - base.PARAM_NAME: { - 'shape': [base.DATA_WIDTH, base.HIDDEN_WIDTH] - } - }, [base.OUT_NAME]) + super(base.SimpleMul, self).__init__( + {base.DATA_NAME: {'shape': [base.DATA_WIDTH]}}, + {}, + { + base.PARAM_NAME: { + 'shape': [base.DATA_WIDTH, base.HIDDEN_WIDTH] + } + }, + [base.OUT_NAME], + ) def step(self, X, W, Out): Out.out(np.matmul(X, W)) @@ -258,19 +256,21 @@ class TestSimpleMul(SeedFixedTestCase): @prog_scope() def test_forward_backward(self): py_rnn = TestSimpleMul.SimpleMul() - dat = fluid.layers.data(name=self.DATA_NAME, - shape=[self.DATA_WIDTH], - lod_level=1) + dat = fluid.layers.data( + name=self.DATA_NAME, shape=[self.DATA_WIDTH], lod_level=1 + ) dat.stop_gradient = False rnn = fluid.layers.DynamicRNN() with rnn.block(): d = rnn.step_input(dat) - o = fluid.layers.fc(input=d, - param_attr=self.PARAM_NAME, - bias_attr=False, - size=self.HIDDEN_WIDTH, - act=None) + o = fluid.layers.fc( + input=d, + param_attr=self.PARAM_NAME, + bias_attr=False, + size=self.HIDDEN_WIDTH, + act=None, + ) rnn.output(o) out = rnn() @@ -283,18 +283,24 @@ class TestSimpleMul(SeedFixedTestCase): out, w_g, i_g = list( map( np.array, - exe.run(feed=py_rnn.to_feed(cpu), - fetch_list=[ - out, self.PARAM_NAME + "@GRAD", - self.DATA_NAME + "@GRAD" - ], - return_numpy=False))) + exe.run( + feed=py_rnn.to_feed(cpu), + fetch_list=[ + out, + self.PARAM_NAME + "@GRAD", + self.DATA_NAME + "@GRAD", + ], + return_numpy=False, + ), + ) + ) out_by_python = py_rnn.exe()[self.OUT_NAME] np.testing.assert_allclose(out, out_by_python, rtol=1e-05) w_g_num = py_rnn.get_numeric_gradient_of_param(self.PARAM_NAME) np.testing.assert_allclose(w_g_num, w_g, rtol=0.05) i_g_num = py_rnn.get_numeric_gradient_of_input( - input_name=self.DATA_NAME) + input_name=self.DATA_NAME + ) i_g_num = i_g_num.reshape(i_g.shape) np.testing.assert_allclose(i_g_num, i_g, rtol=0.05) @@ -306,23 +312,24 @@ class TestSimpleMulWithMemory(SeedFixedTestCase): PARAM_NAME = 'W' class SimpleMulWithMemory(BaseRNN): - def __init__(self): super(TestSimpleMulWithMemory.SimpleMulWithMemory, self).__init__( { TestSimpleMulWithMemory.DATA_NAME: { 'shape': [TestSimpleMulWithMemory.DATA_WIDTH] } - }, {'Mem': { - 'shape': [TestSimpleMulWithMemory.HIDDEN_WIDTH] - }}, { + }, + {'Mem': {'shape': [TestSimpleMulWithMemory.HIDDEN_WIDTH]}}, + { TestSimpleMulWithMemory.PARAM_NAME: { 'shape': [ TestSimpleMulWithMemory.DATA_WIDTH, - TestSimpleMulWithMemory.HIDDEN_WIDTH + TestSimpleMulWithMemory.HIDDEN_WIDTH, ] } - }, ['Out']) + }, + ['Out'], + ) def step(self, X, Mem, W, Out): o = np.matmul(X, W) @@ -337,19 +344,21 @@ class TestSimpleMulWithMemory(SeedFixedTestCase): @prog_scope() def test_forward_backward(self): py_rnn = TestSimpleMulWithMemory.SimpleMulWithMemory() - data = fluid.layers.data(name=self.DATA_NAME, - shape=[self.DATA_WIDTH], - lod_level=1) + data = fluid.layers.data( + name=self.DATA_NAME, shape=[self.DATA_WIDTH], lod_level=1 + ) data.stop_gradient = False rnn = fluid.layers.DynamicRNN() with rnn.block(): d = rnn.step_input(data) mem = rnn.memory(value=0.0, shape=[self.HIDDEN_WIDTH]) - hidden = fluid.layers.fc(input=d, - size=self.HIDDEN_WIDTH, - param_attr=self.PARAM_NAME, - bias_attr=False, - act=None) + hidden = fluid.layers.fc( + input=d, + size=self.HIDDEN_WIDTH, + param_attr=self.PARAM_NAME, + bias_attr=False, + act=None, + ) o = fluid.layers.elementwise_add(x=hidden, y=mem) rnn.update_memory(mem, o) rnn.output(o) @@ -365,13 +374,18 @@ class TestSimpleMulWithMemory(SeedFixedTestCase): last_np, w_g, i_g = list( map( np.array, - exe.run(feed=feed, - fetch_list=[ - last, self.PARAM_NAME + "@GRAD", - self.DATA_NAME + "@GRAD" - ], - return_numpy=False))) - last_by_py, = list(py_rnn.exe().values()) + exe.run( + feed=feed, + fetch_list=[ + last, + self.PARAM_NAME + "@GRAD", + self.DATA_NAME + "@GRAD", + ], + return_numpy=False, + ), + ) + ) + (last_by_py,) = list(py_rnn.exe().values()) w_g_num = py_rnn.get_numeric_gradient_of_param(self.PARAM_NAME) np.testing.assert_allclose(last_np, last_by_py, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py b/python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py index 3bbf9ff3266b2652cfcbb83e5b617625b86a8cd3..2f0a99247564c4ac851b9cbfe5bb9a5fa0769b0e 100644 --- a/python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py +++ b/python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py @@ -27,7 +27,6 @@ np.random.seed(1) class TestDyRnnStaticInput(unittest.TestCase): - def setUp(self): self._delta = 0.005 self._max_sequence_len = 3 @@ -58,14 +57,14 @@ class TestDyRnnStaticInput(unittest.TestCase): self.static_input_tensor.set(self.static_input_data, self.place) def fetch_value(self, var): - fetch_outs = self.exe.run(feed={ - 'x_tensor': - self.x_tensor, - 'static_input_tensor': - self.static_input_tensor - }, - fetch_list=[var], - return_numpy=False) + fetch_outs = self.exe.run( + feed={ + 'x_tensor': self.x_tensor, + 'static_input_tensor': self.static_input_tensor, + }, + fetch_list=[var], + return_numpy=False, + ) return self._lodtensor_to_ndarray(fetch_outs[0]) def _lodtensor_to_ndarray(self, lod_tensor): @@ -76,24 +75,28 @@ class TestDyRnnStaticInput(unittest.TestCase): return ndarray, lod_tensor.recursive_sequence_lengths() def build_graph(self, only_forward=False): - x_tensor = fluid.layers.data(name='x_tensor', - shape=[self.x_tensor_dim], - dtype='float32', - lod_level=1) + x_tensor = fluid.layers.data( + name='x_tensor', + shape=[self.x_tensor_dim], + dtype='float32', + lod_level=1, + ) x_tensor.stop_gradient = False static_input_tensor = fluid.layers.data( name='static_input_tensor', shape=[self.static_input_tensor_dim], dtype='float32', - lod_level=1) + lod_level=1, + ) static_input_tensor.stop_gradient = False if only_forward: static_input_out_array = self._program.global_block().create_var( name='static_input_out_array', type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, - dtype='float32') + dtype='float32', + ) static_input_out_array.stop_gradient = True rnn = fluid.layers.DynamicRNN() @@ -101,25 +104,30 @@ class TestDyRnnStaticInput(unittest.TestCase): step_x = rnn.step_input(x_tensor) step_static_input = rnn.static_input(static_input_tensor) if only_forward: - fluid.layers.array_write(x=step_static_input, - i=rnn.step_idx, - array=static_input_out_array) - last = fluid.layers.sequence_pool(input=step_static_input, - pool_type='last') - projected = fluid.layers.fc(input=[step_x, last], - size=self.output_dim) + fluid.layers.array_write( + x=step_static_input, + i=rnn.step_idx, + array=static_input_out_array, + ) + last = fluid.layers.sequence_pool( + input=step_static_input, pool_type='last' + ) + projected = fluid.layers.fc( + input=[step_x, last], size=self.output_dim + ) rnn.output(projected) if only_forward: static_input_step_outs = [] - step_idx = fluid.layers.fill_constant(shape=[1], - dtype='int64', - value=0) + step_idx = fluid.layers.fill_constant( + shape=[1], dtype='int64', value=0 + ) step_idx.stop_gradient = True for i in range(self._max_sequence_len): - step_out = fluid.layers.array_read(static_input_out_array, - step_idx) + step_out = fluid.layers.array_read( + static_input_out_array, step_idx + ) step_out.stop_gradient = True static_input_step_outs.append(step_out) fluid.layers.increment(x=step_idx, value=1.0, in_place=True) @@ -131,7 +139,8 @@ class TestDyRnnStaticInput(unittest.TestCase): loss = paddle.mean(last) append_backward(loss) static_input_grad = self._program.global_block().var( - framework.grad_var_name('static_input_tensor')) + framework.grad_var_name('static_input_tensor') + ) return static_input_grad, loss def get_expected_static_step_outs(self): @@ -145,8 +154,10 @@ class TestDyRnnStaticInput(unittest.TestCase): cur_offset = 0 for i in range(len(static_lod[0])): static_sliced.append( - self.static_input_data[cur_offset:(cur_offset + - static_lod[0][i])]) + self.static_input_data[ + cur_offset : (cur_offset + static_lod[0][i]) + ] + ) cur_offset += static_lod[0][i] static_seq_len = static_lod[0] static_reordered = [] @@ -170,7 +181,8 @@ class TestDyRnnStaticInput(unittest.TestCase): static_step_lods.append([lod]) end = total_len static_step_outs.append( - np.array(static_reordered[:end]).astype('float32')) + np.array(static_reordered[:end]).astype('float32') + ) return static_step_outs, static_step_lods @@ -203,13 +215,14 @@ class TestDyRnnStaticInput(unittest.TestCase): y_neg = self.fetch_value(loss)[0][0] self.static_input_tensor._set_float_element(i, origin) numeric_gradients.ravel()[i] = (y_pos - y_neg) / self._delta / 2 - np.testing.assert_allclose(actual_gradients, - numeric_gradients, - rtol=0.001) + np.testing.assert_allclose( + actual_gradients, numeric_gradients, rtol=0.001 + ) np.testing.assert_allclose( actual_lod, self.static_input_tensor.recursive_sequence_lengths(), - rtol=1e-05) + rtol=1e-05, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_delete_vars.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_delete_vars.py index b6052d33c95cb7018d692ba9fea06e330def482b..eebc321b1a8f6b8c9b4e6b93dcfcf750999c5d21 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_delete_vars.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_delete_vars.py @@ -39,8 +39,10 @@ def simple_fc_net(): hidden, size=200, act='tanh', - bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=1.0))) + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=1.0) + ), + ) prediction = fluid.layers.fc(hidden, size=10, act='softmax') loss = fluid.layers.cross_entropy(input=prediction, label=label) loss = paddle.mean(loss) @@ -65,7 +67,6 @@ def get_persistables_and_non_persistables(prog, fetch_list): class TestExecutor(unittest.TestCase): - def test_executor_main(self): places = [fluid.CPUPlace()] if fluid.core.is_compiled_with_cuda(): @@ -87,12 +88,13 @@ class TestExecutor(unittest.TestCase): def prepare_feed(self, image, label, dev_cnt=1): batch_size = 32 * dev_cnt - image_shape = (batch_size, ) + tuple(image.shape[1:]) - label_shape = (batch_size, ) + tuple(label.shape[1:]) + image_shape = (batch_size,) + tuple(image.shape[1:]) + label_shape = (batch_size,) + tuple(label.shape[1:]) image_np = np.random.random(size=image_shape).astype('float32') - label_np = np.random.random_integers(low=0, high=9, - size=label_shape).astype('int64') + label_np = np.random.random_integers( + low=0, high=9, size=label_shape + ).astype('int64') return image_np, label_np @@ -113,10 +115,16 @@ class TestExecutor(unittest.TestCase): if t._is_initialized(): outline_np_vars.append(name) - print('Non-alive persistable vars {} in {}'.format( - outline_p_vars, persitables)) - print('Alive non-persistable vars {} in {}'.format( - outline_np_vars, non_persistables)) + print( + 'Non-alive persistable vars {} in {}'.format( + outline_p_vars, persitables + ) + ) + print( + 'Alive non-persistable vars {} in {}'.format( + outline_np_vars, non_persistables + ) + ) self.assertEqual(len(outline_p_vars), 0) self.assertEqual(len(outline_np_vars), 0) @@ -130,12 +138,14 @@ class TestExecutor(unittest.TestCase): image, label, loss = simple_fc_net() loss.persistable = False persistables, non_persistables = get_persistables_and_non_persistables( - fluid.default_main_program(), [loss.name]) + fluid.default_main_program(), [loss.name] + ) print('Non-persistable var number {}'.format(len(non_persistables))) print(non_persistables) - self.assert_gc_vars(fluid.default_main_program(), [loss.name], - non_persistables) + self.assert_gc_vars( + fluid.default_main_program(), [loss.name], non_persistables + ) exe = fluid.Executor(self.place) exe.run(fluid.default_startup_program()) @@ -147,23 +157,34 @@ class TestExecutor(unittest.TestCase): for _ in range(10): image_np, label_np = self.prepare_feed(image, label) fluid.global_scope().var(image.name).get_tensor().set( - image_np, self.place) + image_np, self.place + ) fluid.global_scope().var(label.name).get_tensor().set( - label_np, self.place) + label_np, self.place + ) # exe.run would not create local scope # so that we can detect whether gc clears temporary variables - exe.run(fluid.default_main_program().desc, fluid.global_scope(), 0, - False, True, [loss.name]) - self.assertScopeVar(fluid.global_scope(), persistables, - non_persistables) + exe.run( + fluid.default_main_program().desc, + fluid.global_scope(), + 0, + False, + True, + [loss.name], + ) + self.assertScopeVar( + fluid.global_scope(), persistables, non_persistables + ) def pe_main(self): image, label, loss = simple_fc_net() loss.persistable = False persistables, non_persistables = get_persistables_and_non_persistables( - fluid.default_main_program(), [loss.name]) - self.assert_gc_vars(fluid.default_main_program(), [loss.name], - non_persistables) + fluid.default_main_program(), [loss.name] + ) + self.assert_gc_vars( + fluid.default_main_program(), [loss.name], non_persistables + ) exe = fluid.Executor(self.place) exe.run(fluid.default_startup_program()) @@ -176,11 +197,14 @@ class TestExecutor(unittest.TestCase): build_strategy.enable_inplace = False prog = fluid.CompiledProgram( - fluid.default_main_program()).with_data_parallel( - loss_name=loss.name, exec_strategy=exec_strategy) + fluid.default_main_program() + ).with_data_parallel(loss_name=loss.name, exec_strategy=exec_strategy) - dev_cnt = fluid.core.get_cuda_device_count() if isinstance(self.place, fluid.CUDAPlace) \ + dev_cnt = ( + fluid.core.get_cuda_device_count() + if isinstance(self.place, fluid.CUDAPlace) else int(os.environ.get('CPU_NUM', multiprocessing.cpu_count())) + ) for idx in range(10): image_np, label_np = self.prepare_feed(image, label, dev_cnt) diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_dynamic_rnn_base.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_dynamic_rnn_base.py index 6e1ed17a3b67931ce8742aee5a5b26775b3fb9cf..86952d74e397b4d943c3e84639141e05d7b31ce5 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_dynamic_rnn_base.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_dynamic_rnn_base.py @@ -40,10 +40,9 @@ def train(network, use_cuda, use_parallel_executor, batch_size=32, pass_num=2): reader = fake_imdb_reader(word_dict_size, batch_size * 40) train_reader = paddle.batch(reader, batch_size=batch_size) - data = fluid.layers.data(name="words", - shape=[1], - dtype="int64", - lod_level=1) + data = fluid.layers.data( + name="words", shape=[1], dtype="int64", lod_level=1 + ) label = fluid.layers.data(name="label", shape=[1], dtype="int64") @@ -54,8 +53,9 @@ def train(network, use_cuda, use_parallel_executor, batch_size=32, pass_num=2): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() feeder = fluid.DataFeeder(feed_list=[data, label], place=place) - reader = feeder.decorate_reader(train_reader, - multi_devices=use_parallel_executor) + reader = feeder.decorate_reader( + train_reader, multi_devices=use_parallel_executor + ) exe = fluid.Executor(place) fluid.default_startup_program().random_seed = 1 @@ -65,8 +65,8 @@ def train(network, use_cuda, use_parallel_executor, batch_size=32, pass_num=2): train_cp = fluid.default_main_program() if use_parallel_executor: train_cp = compiler.CompiledProgram( - fluid.default_main_program()).with_data_parallel( - loss_name=cost.name) + fluid.default_main_program() + ).with_data_parallel(loss_name=cost.name) fetch_list = [cost.name] else: fetch_list = [cost] @@ -74,16 +74,17 @@ def train(network, use_cuda, use_parallel_executor, batch_size=32, pass_num=2): for pass_id in range(pass_num): batch_id = 0 for data in reader(): - exe.run(train_cp, - feed=data, - fetch_list=fetch_list if batch_id % 4 == 0 else []) + exe.run( + train_cp, + feed=data, + fetch_list=fetch_list if batch_id % 4 == 0 else [], + ) batch_id += 1 if batch_id > 16: break class TestBase(unittest.TestCase): - def setUp(self): self.net = None @@ -93,9 +94,11 @@ class TestBase(unittest.TestCase): for use_cuda in [True, False]: for use_parallel_executor in [False, True]: - print('network: {}, use_cuda: {}, use_parallel_executor: {}'. - format(self.net.__name__, use_cuda, - use_parallel_executor)) + print( + 'network: {}, use_cuda: {}, use_parallel_executor: {}'.format( + self.net.__name__, use_cuda, use_parallel_executor + ) + ) with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.scope_guard(core.Scope()): train(self.net, use_cuda, use_parallel_executor) diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_gru_net.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_gru_net.py index ac501a43ca75e0d6c470ce9b8bde14de458e9718..f785936dcb99f5815ac3c930375ac364b04bd0f8 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_gru_net.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_gru_net.py @@ -20,18 +20,21 @@ import paddle.fluid as fluid fluid.core._set_eager_deletion_mode(0.0, 1.0, True) -def gru_net(data, - label, - dict_dim, - emb_dim=128, - hid_dim=128, - hid_dim2=96, - class_dim=2, - emb_lr=400.0): +def gru_net( + data, + label, + dict_dim, + emb_dim=128, + hid_dim=128, + hid_dim2=96, + class_dim=2, + emb_lr=400.0, +): emb = fluid.layers.embedding( input=data, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr(learning_rate=emb_lr)) + param_attr=fluid.ParamAttr(learning_rate=emb_lr), + ) fc0 = fluid.layers.fc(input=emb, size=hid_dim * 3) gru_h = fluid.layers.dynamic_gru(input=fc0, size=hid_dim, is_reverse=False) gru_max = fluid.layers.sequence_pool(input=gru_h, pool_type='max') @@ -44,7 +47,6 @@ def gru_net(data, class GRUTest(TestBase): - def setUp(self): self.net = gru_net diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_lstm_net.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_lstm_net.py index bb6f60820157392d485b558784f06972be9b8e31..7709460111ac742fa98bb4ccb172f85e37bc9b17 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_lstm_net.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_lstm_net.py @@ -20,22 +20,25 @@ import unittest fluid.core._set_eager_deletion_mode(0.0, 1.0, True) -def lstm_net(data, - label, - dict_dim, - emb_dim=128, - hid_dim=128, - hid_dim2=96, - class_dim=2, - emb_lr=30.0): +def lstm_net( + data, + label, + dict_dim, + emb_dim=128, + hid_dim=128, + hid_dim2=96, + class_dim=2, + emb_lr=30.0, +): emb = fluid.layers.embedding( input=data, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr(learning_rate=emb_lr)) + param_attr=fluid.ParamAttr(learning_rate=emb_lr), + ) fc0 = fluid.layers.fc(input=emb, size=hid_dim * 4) - lstm_h, c = fluid.layers.dynamic_lstm(input=fc0, - size=hid_dim * 4, - is_reverse=False) + lstm_h, c = fluid.layers.dynamic_lstm( + input=fc0, size=hid_dim * 4, is_reverse=False + ) lstm_max = fluid.layers.sequence_pool(input=lstm_h, pool_type='max') lstm_max_tanh = fluid.layers.tanh(lstm_max) fc1 = fluid.layers.fc(input=lstm_max_tanh, size=hid_dim2, act='tanh') @@ -46,7 +49,6 @@ def lstm_net(data, class LSTMTest(TestBase): - def setUp(self): self.net = lstm_net diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py index e31595a4b016d7a0817f9a915f84925b7309e236..e48a8056d030ff124bcba3ef58802347561c2b75 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py @@ -28,7 +28,6 @@ os.environ["CPU_NUM"] = "1" class RNNConfig(object): - def __init__(self, model_type, rnn_model): self.model_type = model_type self.rnn_model = rnn_model @@ -95,26 +94,30 @@ class RNNConfig(object): # Fake data reader for test class Reader(object): - def get_data_iter(self, rnn_config): for i in range(rnn_config.max_epoch): - x = np.zeros(shape=(rnn_config.batch_size, rnn_config.num_steps), - dtype='int64') - y = np.ones(shape=(rnn_config.batch_size, rnn_config.num_steps), - dtype='int64') + x = np.zeros( + shape=(rnn_config.batch_size, rnn_config.num_steps), + dtype='int64', + ) + y = np.ones( + shape=(rnn_config.batch_size, rnn_config.num_steps), + dtype='int64', + ) yield (x, y) # Model from PaddleNLP/models/language_model/lm_model.py in Paddle Models repo -def lm_model(hidden_size, - vocab_size, - batch_size, - num_layers=2, - num_steps=20, - init_scale=0.1, - dropout=None, - rnn_model='static'): - +def lm_model( + hidden_size, + vocab_size, + batch_size, + num_layers=2, + num_steps=20, + init_scale=0.1, + dropout=None, + rnn_model='static', +): def padding_rnn(input_embedding, len=3, init_hidden=None, init_cell=None): weight_1_arr = [] weight_2_arr = [] @@ -128,23 +131,24 @@ def lm_model(hidden_size, dtype="float32", name="fc_weight1_" + str(i), default_initializer=fluid.initializer.UniformInitializer( - low=-init_scale, high=init_scale)) + low=-init_scale, high=init_scale + ), + ) weight_1_arr.append(weight_1) bias_1 = layers.create_parameter( [hidden_size * 4], dtype="float32", name="fc_bias1_" + str(i), - default_initializer=fluid.initializer.Constant(0.0)) + default_initializer=fluid.initializer.Constant(0.0), + ) bias_arr.append(bias_1) - pre_hidden = layers.slice(init_hidden, - axes=[0], - starts=[i], - ends=[i + 1]) - pre_cell = layers.slice(init_cell, - axes=[0], - starts=[i], - ends=[i + 1]) + pre_hidden = layers.slice( + init_hidden, axes=[0], starts=[i], ends=[i + 1] + ) + pre_cell = layers.slice( + init_cell, axes=[0], starts=[i], ends=[i + 1] + ) pre_hidden = layers.reshape(pre_hidden, shape=[-1, hidden_size]) pre_cell = layers.reshape(pre_cell, shape=[-1, hidden_size]) hidden_array.append(pre_hidden) @@ -165,25 +169,31 @@ def lm_model(hidden_size, gate_input = layers.matmul(x=nn, y=weight_1) gate_input = layers.elementwise_add(gate_input, bias) - i = layers.slice(gate_input, - axes=[1], - starts=[0], - ends=[hidden_size]) - j = layers.slice(gate_input, - axes=[1], - starts=[hidden_size], - ends=[hidden_size * 2]) - f = layers.slice(gate_input, - axes=[1], - starts=[hidden_size * 2], - ends=[hidden_size * 3]) - o = layers.slice(gate_input, - axes=[1], - starts=[hidden_size * 3], - ends=[hidden_size * 4]) + i = layers.slice( + gate_input, axes=[1], starts=[0], ends=[hidden_size] + ) + j = layers.slice( + gate_input, + axes=[1], + starts=[hidden_size], + ends=[hidden_size * 2], + ) + f = layers.slice( + gate_input, + axes=[1], + starts=[hidden_size * 2], + ends=[hidden_size * 3], + ) + o = layers.slice( + gate_input, + axes=[1], + starts=[hidden_size * 3], + ends=[hidden_size * 4], + ) c = pre_cell * layers.sigmoid(f) + layers.sigmoid( - i) * layers.tanh(j) + i + ) * layers.tanh(j) m = layers.tanh(c) * layers.sigmoid(o) rnn.update_memory(pre_hidden, m) @@ -198,7 +208,8 @@ def lm_model(hidden_size, input = layers.dropout( input, dropout_prob=dropout, - dropout_implementation='upscale_in_train') + dropout_implementation='upscale_in_train', + ) rnn.step_output(input) rnnout = rnn() @@ -211,15 +222,13 @@ def lm_model(hidden_size, c = rnnout[i * 2 + 1] m.stop_gradient = True c.stop_gradient = True - last_h = layers.slice(m, - axes=[0], - starts=[num_steps - 1], - ends=[num_steps]) + last_h = layers.slice( + m, axes=[0], starts=[num_steps - 1], ends=[num_steps] + ) last_hidden_array.append(last_h) - last_c = layers.slice(c, - axes=[0], - starts=[num_steps - 1], - ends=[num_steps]) + last_c = layers.slice( + c, axes=[0], starts=[num_steps - 1], ends=[num_steps] + ) last_cell_array.append(last_c) real_res = layers.transpose(x=real_res, perm=[1, 0, 2]) last_hidden = layers.concat(last_hidden_array, 0) @@ -227,10 +236,9 @@ def lm_model(hidden_size, return real_res, last_hidden, last_cell - def encoder_static(input_embedding, - len=3, - init_hidden=None, - init_cell=None): + def encoder_static( + input_embedding, len=3, init_hidden=None, init_cell=None + ): weight_1_arr = [] weight_2_arr = [] @@ -244,36 +252,37 @@ def lm_model(hidden_size, dtype="float32", name="fc_weight1_" + str(i), default_initializer=fluid.initializer.UniformInitializer( - low=-init_scale, high=init_scale)) + low=-init_scale, high=init_scale + ), + ) weight_1_arr.append(weight_1) bias_1 = layers.create_parameter( [hidden_size * 4], dtype="float32", name="fc_bias1_" + str(i), - default_initializer=fluid.initializer.Constant(0.0)) + default_initializer=fluid.initializer.Constant(0.0), + ) bias_arr.append(bias_1) - pre_hidden = layers.slice(init_hidden, - axes=[0], - starts=[i], - ends=[i + 1]) - pre_cell = layers.slice(init_cell, - axes=[0], - starts=[i], - ends=[i + 1]) - pre_hidden = layers.reshape(pre_hidden, - shape=[-1, hidden_size], - inplace=True) - pre_cell = layers.reshape(pre_cell, - shape=[-1, hidden_size], - inplace=True) + pre_hidden = layers.slice( + init_hidden, axes=[0], starts=[i], ends=[i + 1] + ) + pre_cell = layers.slice( + init_cell, axes=[0], starts=[i], ends=[i + 1] + ) + pre_hidden = layers.reshape( + pre_hidden, shape=[-1, hidden_size], inplace=True + ) + pre_cell = layers.reshape( + pre_cell, shape=[-1, hidden_size], inplace=True + ) hidden_array.append(pre_hidden) cell_array.append(pre_cell) res = [] - sliced_inputs = layers.split(input_embedding, - num_or_sections=len, - dim=1) + sliced_inputs = layers.split( + input_embedding, num_or_sections=len, dim=1 + ) for index in range(len): input = sliced_inputs[index] @@ -291,7 +300,8 @@ def lm_model(hidden_size, i, j, f, o = layers.split(gate_input, num_or_sections=4, dim=-1) c = pre_cell * layers.sigmoid(f) + layers.sigmoid( - i) * layers.tanh(j) + i + ) * layers.tanh(j) m = layers.tanh(c) * layers.sigmoid(o) hidden_array[k] = m @@ -302,55 +312,67 @@ def lm_model(hidden_size, input = layers.dropout( input, dropout_prob=dropout, - dropout_implementation='upscale_in_train') + dropout_implementation='upscale_in_train', + ) res.append(input) last_hidden = layers.concat(hidden_array, 1) - last_hidden = layers.reshape(last_hidden, - shape=[-1, num_layers, hidden_size], - inplace=True) + last_hidden = layers.reshape( + last_hidden, shape=[-1, num_layers, hidden_size], inplace=True + ) last_hidden = layers.transpose(x=last_hidden, perm=[1, 0, 2]) last_cell = layers.concat(cell_array, 1) - last_cell = layers.reshape(last_cell, - shape=[-1, num_layers, hidden_size]) + last_cell = layers.reshape( + last_cell, shape=[-1, num_layers, hidden_size] + ) last_cell = layers.transpose(x=last_cell, perm=[1, 0, 2]) real_res = layers.concat(res, 0) - real_res = layers.reshape(real_res, - shape=[len, -1, hidden_size], - inplace=True) + real_res = layers.reshape( + real_res, shape=[len, -1, hidden_size], inplace=True + ) real_res = layers.transpose(x=real_res, perm=[1, 0, 2]) return real_res, last_hidden, last_cell batch_size_each = batch_size - x = layers.data(name="x", - shape=[batch_size_each, num_steps, 1], - dtype='int64', - append_batch_size=False) - y = layers.data(name="y", - shape=[batch_size_each * num_steps, 1], - dtype='int64', - append_batch_size=False) - - init_hidden = layers.data(name="init_hidden", - shape=[num_layers, batch_size_each, hidden_size], - dtype='float32', - append_batch_size=False) - init_cell = layers.data(name="init_cell", - shape=[num_layers, batch_size_each, hidden_size], - dtype='float32', - append_batch_size=False) + x = layers.data( + name="x", + shape=[batch_size_each, num_steps, 1], + dtype='int64', + append_batch_size=False, + ) + y = layers.data( + name="y", + shape=[batch_size_each * num_steps, 1], + dtype='int64', + append_batch_size=False, + ) + + init_hidden = layers.data( + name="init_hidden", + shape=[num_layers, batch_size_each, hidden_size], + dtype='float32', + append_batch_size=False, + ) + init_cell = layers.data( + name="init_cell", + shape=[num_layers, batch_size_each, hidden_size], + dtype='float32', + append_batch_size=False, + ) init_cell.persistable = True init_hidden.persistable = True - init_hidden_reshape = layers.reshape(init_hidden, - shape=[num_layers, -1, hidden_size]) - init_cell_reshape = layers.reshape(init_cell, - shape=[num_layers, -1, hidden_size]) + init_hidden_reshape = layers.reshape( + init_hidden, shape=[num_layers, -1, hidden_size] + ) + init_cell_reshape = layers.reshape( + init_cell, shape=[num_layers, -1, hidden_size] + ) x_emb = layers.embedding( input=x, @@ -359,29 +381,36 @@ def lm_model(hidden_size, is_sparse=False, param_attr=fluid.ParamAttr( name='embedding_para', - initializer=fluid.initializer.UniformInitializer(low=-init_scale, - high=init_scale))) - - x_emb = layers.reshape(x_emb, - shape=[-1, num_steps, hidden_size], - inplace=True) + initializer=fluid.initializer.UniformInitializer( + low=-init_scale, high=init_scale + ), + ), + ) + + x_emb = layers.reshape( + x_emb, shape=[-1, num_steps, hidden_size], inplace=True + ) if dropout != None and dropout > 0.0: - x_emb = layers.dropout(x_emb, - dropout_prob=dropout, - dropout_implementation='upscale_in_train') + x_emb = layers.dropout( + x_emb, + dropout_prob=dropout, + dropout_implementation='upscale_in_train', + ) if rnn_model == "padding": rnn_out, last_hidden, last_cell = padding_rnn( x_emb, len=num_steps, init_hidden=init_hidden_reshape, - init_cell=init_cell_reshape) + init_cell=init_cell_reshape, + ) elif rnn_model == "static": rnn_out, last_hidden, last_cell = encoder_static( x_emb, len=num_steps, init_hidden=init_hidden_reshape, - init_cell=init_cell_reshape) + init_cell=init_cell_reshape, + ) elif rnn_model == "cudnn": x_emb = layers.transpose(x_emb, perm=[1, 0, 2]) rnn_out, last_hidden, last_cell = layers.lstm( @@ -393,44 +422,61 @@ def lm_model(hidden_size, num_layers, is_bidirec=False, default_initializer=fluid.initializer.UniformInitializer( - low=-init_scale, high=init_scale)) + low=-init_scale, high=init_scale + ), + ) rnn_out = layers.transpose(rnn_out, perm=[1, 0, 2]) elif rnn_model == "basic_lstm": - rnn_out, last_hidden, last_cell = basic_lstm( x_emb, init_hidden, init_cell, hidden_size, \ - num_layers=num_layers, batch_first=True, dropout_prob=dropout, \ - param_attr = ParamAttr( initializer=fluid.initializer.UniformInitializer(low=-init_scale, high=init_scale) ), \ - bias_attr = ParamAttr( initializer = fluid.initializer.Constant(0.0) ), \ - forget_bias = 0.0) + rnn_out, last_hidden, last_cell = basic_lstm( + x_emb, + init_hidden, + init_cell, + hidden_size, + num_layers=num_layers, + batch_first=True, + dropout_prob=dropout, + param_attr=ParamAttr( + initializer=fluid.initializer.UniformInitializer( + low=-init_scale, high=init_scale + ) + ), + bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0)), + forget_bias=0.0, + ) else: print("type not support") return - rnn_out = layers.reshape(rnn_out, - shape=[-1, num_steps, hidden_size], - inplace=True) + rnn_out = layers.reshape( + rnn_out, shape=[-1, num_steps, hidden_size], inplace=True + ) softmax_weight = layers.create_parameter( [hidden_size, vocab_size], dtype="float32", name="softmax_weight", default_initializer=fluid.initializer.UniformInitializer( - low=-init_scale, high=init_scale)) + low=-init_scale, high=init_scale + ), + ) softmax_bias = layers.create_parameter( [vocab_size], dtype="float32", name='softmax_bias', default_initializer=fluid.initializer.UniformInitializer( - low=-init_scale, high=init_scale)) + low=-init_scale, high=init_scale + ), + ) projection = layers.matmul(rnn_out, softmax_weight) projection = layers.elementwise_add(projection, softmax_bias) - projection = layers.reshape(projection, - shape=[-1, vocab_size], - inplace=True) + projection = layers.reshape( + projection, shape=[-1, vocab_size], inplace=True + ) - loss = layers.softmax_with_cross_entropy(logits=projection, - label=y, - soft_label=False) + loss = layers.softmax_with_cross_entropy( + logits=projection, label=y, soft_label=False + ) loss = layers.reshape(loss, shape=[-1, num_steps], inplace=True) loss = layers.reduce_mean(loss, dim=[0]) @@ -452,7 +498,6 @@ def lm_model(hidden_size, class PaddingRNNTestBase(unittest.TestCase): - def setUp(self): self.reader = Reader() self.device_count = 1 @@ -485,64 +530,91 @@ class PaddingRNNTestBase(unittest.TestCase): self.startup_program = fluid.Program() with fluid.program_guard(self.main_program, self.startup_program): with fluid.unique_name.guard(): - res_vars = lm_model(config.hidden_size, - config.vocab_size, - config.batch_size, - num_layers=config.num_layers, - num_steps=config.num_steps, - init_scale=config.init_scale, - dropout=config.dropout, - rnn_model=config.rnn_model) - self.loss, self.last_hidden, self.last_cell, self.feed_order = res_vars + res_vars = lm_model( + config.hidden_size, + config.vocab_size, + config.batch_size, + num_layers=config.num_layers, + num_steps=config.num_steps, + init_scale=config.init_scale, + dropout=config.dropout, + rnn_model=config.rnn_model, + ) + ( + self.loss, + self.last_hidden, + self.last_cell, + self.feed_order, + ) = res_vars fluid.clip.set_gradient_clip( clip=fluid.clip.GradientClipByGlobalNorm( - clip_norm=config.max_grad_norm)) + clip_norm=config.max_grad_norm + ) + ) self.learning_rate = fluid.layers.create_global_var( name="learning_rate", shape=[1], value=1.0, dtype='float32', - persistable=True) + persistable=True, + ) optimizer = fluid.optimizer.SGD( - learning_rate=self.learning_rate) + learning_rate=self.learning_rate + ) optimizer.minimize(self.loss) self.exe.run(self.startup_program) if parallel: self.train_program = fluid.compiler.CompiledProgram( - self.main_program).with_data_parallel( - loss_name=self.loss.name, - build_strategy=self.build_strategy, - exec_strategy=self.exec_strategy) + self.main_program + ).with_data_parallel( + loss_name=self.loss.name, + build_strategy=self.build_strategy, + exec_strategy=self.exec_strategy, + ) else: self.train_program = self.main_program def _generate_init_data(self): - init_hidden = np.zeros((self.config.num_layers, self.config.batch_size, - self.config.hidden_size), - dtype='float32') - init_cell = np.zeros((self.config.num_layers, self.config.batch_size, - self.config.hidden_size), - dtype='float32') + init_hidden = np.zeros( + ( + self.config.num_layers, + self.config.batch_size, + self.config.hidden_size, + ), + dtype='float32', + ) + init_cell = np.zeros( + ( + self.config.num_layers, + self.config.batch_size, + self.config.hidden_size, + ), + dtype='float32', + ) return init_hidden, init_cell def _generate_new_lr(self, epoch_id=0, device_count=1): - new_lr = self.config.base_learning_rate * (self.config.lr_decay**max( - epoch_id + 1 - self.config.epoch_start_decay, 0.0)) + new_lr = self.config.base_learning_rate * ( + self.config.lr_decay + ** max(epoch_id + 1 - self.config.epoch_start_decay, 0.0) + ) lr = np.ones((self.device_count), dtype='float32') * new_lr return lr - def _prepare_input(self, - batch, - init_hidden=None, - init_cell=None, - epoch_id=0, - with_lr=True, - device_count=1): + def _prepare_input( + self, + batch, + init_hidden=None, + init_cell=None, + epoch_id=0, + with_lr=True, + device_count=1, + ): x, y = batch x = x.reshape((-1, self.config.num_steps, 1)) y = y.reshape((-1, 1)) @@ -573,16 +645,20 @@ class PaddingRNNTestBase(unittest.TestCase): init_cell=init_cell, epoch_id=epoch_id, with_lr=True, - device_count=self.device_count) - - fetch_outs = self.exe.run(self.train_program, - feed=input_data_feed, - fetch_list=[ - self.loss.name, "learning_rate", - self.last_hidden.name, - self.last_cell.name - ], - use_program_cache=use_program_cache) + device_count=self.device_count, + ) + + fetch_outs = self.exe.run( + self.train_program, + feed=input_data_feed, + fetch_list=[ + self.loss.name, + "learning_rate", + self.last_hidden.name, + self.last_cell.name, + ], + use_program_cache=use_program_cache, + ) cost_train = np.array(fetch_outs[0]) lr = np.array(fetch_outs[1]) @@ -607,9 +683,9 @@ class PaddingRNNTestBase(unittest.TestCase): ppl = np.append(ppl, train_ppl) return ppl - def compare_padding_static_mode(self, - parallel=True, - use_program_cache=True): + def compare_padding_static_mode( + self, parallel=True, use_program_cache=True + ): ''' Test that train ppl of padding mode is same to that of static mode ''' @@ -623,7 +699,6 @@ class PaddingRNNTestBase(unittest.TestCase): class EagerDeletionPaddingRNNTest(PaddingRNNTestBase): - def test_padding_mode_no_eager_deletion(self): ''' Test that train ppl of padding mode is same to that of static mode without eager deletion diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_recurrent_op.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_recurrent_op.py index 018ab42c50a55602da8c948f9758b4889f22a09b..b67a72a5da2fdaed5404275df60e7f87000ff5a0 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_recurrent_op.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_recurrent_op.py @@ -33,7 +33,6 @@ fluid.core._set_eager_deletion_mode(0.0, 1.0, True) class PyRNNBase(object): - def __init__(self, input_shape, output_shape): self.x = np.ones(shape=input_shape).astype("float32") self.y = np.zeros(shape=output_shape).astype("float32") @@ -51,13 +50,13 @@ class PyRNNBase(object): class PySimpleRNN1(PyRNNBase): - def __init__(self, input_shape, output_shape): super(PySimpleRNN1, self).__init__(input_shape, output_shape) seq_len, batch_size, input_dim = input_shape - self.h_boot = np.random.normal(size=(batch_size, - input_dim)).astype("float32") + self.h_boot = np.random.normal(size=(batch_size, input_dim)).astype( + "float32" + ) self.scale = 1.0 / 2.0 men_dim = (seq_len, batch_size, input_dim) @@ -73,7 +72,6 @@ class PySimpleRNN1(PyRNNBase): class PySimpleRNN2(PyRNNBase): - def __init__(self, input_shape, output_shape): super(PySimpleRNN2, self).__init__(input_shape, output_shape) @@ -94,7 +92,7 @@ class PySimpleRNN2(PyRNNBase): hU = np.matmul(pre_mem, self.U).astype("float32") def py_sigmoid(x): - return 1. / (1. + np.exp(-x)) + return 1.0 / (1.0 + np.exp(-x)) self.mems[step_id] = py_sigmoid(xW + hU) self.y[step_id] = self.mems[step_id] @@ -140,14 +138,16 @@ class EagerDeletionRecurrentOpTest1(unittest.TestCase): self.output = paddle.mean(self.create_rnn_op()) def create_rnn_op(self): - x = layers.data(shape=[self.sent_len, self.batch_size, self.input_dim], - dtype='float32', - name='x', - append_batch_size=False) + x = layers.data( + shape=[self.sent_len, self.batch_size, self.input_dim], + dtype='float32', + name='x', + append_batch_size=False, + ) x.stop_gradient = False - h_boot = layers.data(shape=[self.input_dim], - dtype='float32', - name='h_boot') + h_boot = layers.data( + shape=[self.input_dim], dtype='float32', name='h_boot' + ) h_boot.stop_gradient = False rnn = layers.StaticRNN() @@ -155,8 +155,10 @@ class EagerDeletionRecurrentOpTest1(unittest.TestCase): h_pre = rnn.memory(init=h_boot) x_t = rnn.step_input(x) - h = layers.scale(x=layers.elementwise_add(x=h_pre, y=x_t), - scale=self.py_rnn.scale) + h = layers.scale( + x=layers.elementwise_add(x=h_pre, y=x_t), + scale=self.py_rnn.scale, + ) rnn.update_memory(h_pre, h) rnn.output(h) @@ -164,17 +166,18 @@ class EagerDeletionRecurrentOpTest1(unittest.TestCase): return rnn() def forward(self): - gc_vars = core._get_eager_deletion_vars(self.main_program.desc, - [self.output.name]) + gc_vars = core._get_eager_deletion_vars( + self.main_program.desc, [self.output.name] + ) self.assertEqual(len(gc_vars), self.main_program.num_blocks) self.feed_map = { x: create_tensor(getattr(self.py_rnn, x), self.place) for x in self.data_field } exe = Executor(self.place) - out = exe.run(self.main_program, - feed=self.feed_map, - fetch_list=[self.output]) + out = exe.run( + self.main_program, feed=self.feed_map, fetch_list=[self.output] + ) return out[0] @@ -189,14 +192,17 @@ class EagerDeletionRecurrentOpTest1(unittest.TestCase): ] gc_vars = core._get_eager_deletion_vars( - self.main_program.desc, [var.name for var in fetch_list]) + self.main_program.desc, [var.name for var in fetch_list] + ) self.assertEqual(len(gc_vars), self.main_program.num_blocks) exe = Executor(self.place) - return exe.run(self.main_program, - feed=self.feed_map, - fetch_list=fetch_list, - return_numpy=False) + return exe.run( + self.main_program, + feed=self.feed_map, + fetch_list=fetch_list, + return_numpy=False, + ) def test_backward(self, rtol=0.01): self.check_forward() @@ -213,10 +219,18 @@ class EagerDeletionRecurrentOpTest1(unittest.TestCase): num_grad[idx], ana_grad[idx], rtol=rtol, - err_msg='num_grad (' + name + ') has diff at ' + - str(self.place) + '\nExpect ' + str(num_grad[idx]) + '\n' + - 'But Got' + str(ana_grad[idx]) + ' in class ' + - self.__class__.__name__) + err_msg='num_grad (' + + name + + ') has diff at ' + + str(self.place) + + '\nExpect ' + + str(num_grad[idx]) + + '\n' + + 'But Got' + + str(ana_grad[idx]) + + ' in class ' + + self.__class__.__name__, + ) def check_forward(self): pd_output = self.forward() @@ -277,14 +291,16 @@ class EagerDeletionRecurrentOpTest2(EagerDeletionRecurrentOpTest1): self.output = paddle.mean(self.create_rnn_op()) def create_rnn_op(self): - x = layers.data(shape=[self.sent_len, self.batch_size, self.input_dim], - dtype='float32', - name='x', - append_batch_size=False) + x = layers.data( + shape=[self.sent_len, self.batch_size, self.input_dim], + dtype='float32', + name='x', + append_batch_size=False, + ) x.stop_gradient = False - h_boot = layers.data(shape=[self.input_dim], - dtype='float32', - name='h_boot') + h_boot = layers.data( + shape=[self.input_dim], dtype='float32', name='h_boot' + ) h_boot.stop_gradient = False rnn = layers.StaticRNN() @@ -297,15 +313,19 @@ class EagerDeletionRecurrentOpTest2(EagerDeletionRecurrentOpTest1): size=self.input_dim, param_attr=ParamAttr( name='W', - initializer=fluid.initializer.ConstantInitializer(1.0)), - bias_attr=False) + initializer=fluid.initializer.ConstantInitializer(1.0), + ), + bias_attr=False, + ) temp_r = layers.fc( input=h_pre, size=self.input_dim, param_attr=ParamAttr( name='U', - initializer=fluid.initializer.ConstantInitializer(0.0)), - bias_attr=False) + initializer=fluid.initializer.ConstantInitializer(0.0), + ), + bias_attr=False, + ) h = layers.sigmoid(x=layers.elementwise_add(x=temp_l, y=temp_r)) @@ -334,16 +354,18 @@ class EagerDeletionRecurrentOpMultipleMemoryTest(EagerDeletionRecurrentOpTest1): ''' class PySimpleRNN3(PyRNNBase): - def __init__(self, input_shape, output_shape): - super(EagerDeletionRecurrentOpMultipleMemoryTest.PySimpleRNN3, - self).__init__(input_shape, output_shape) + super( + EagerDeletionRecurrentOpMultipleMemoryTest.PySimpleRNN3, self + ).__init__(input_shape, output_shape) seq_len, batch_size, input_dim = input_shape - self.h_boot1 = np.random.normal(size=(batch_size, - input_dim)).astype("float32") - self.h_boot2 = np.random.normal(size=(batch_size, - input_dim)).astype("float32") + self.h_boot1 = np.random.normal( + size=(batch_size, input_dim) + ).astype("float32") + self.h_boot2 = np.random.normal( + size=(batch_size, input_dim) + ).astype("float32") men_dim = (seq_len, batch_size, input_dim) self.mems1 = np.zeros(shape=men_dim).astype("float32") @@ -372,26 +394,33 @@ class EagerDeletionRecurrentOpMultipleMemoryTest(EagerDeletionRecurrentOpTest1): self.input_shape = (self.sent_len, self.batch_size, self.input_dim) self.output_shape = (self.sent_len, self.batch_size, self.input_dim) self.py_rnn = EagerDeletionRecurrentOpMultipleMemoryTest.PySimpleRNN3( - self.input_shape, self.output_shape) + self.input_shape, self.output_shape + ) with fluid.program_guard(self.main_program, self.startup_program): self.output = paddle.mean(self.create_rnn_op()) def create_rnn_op(self): - x = layers.data(shape=[self.sent_len, self.batch_size, self.input_dim], - dtype='float32', - name='x', - append_batch_size=False) + x = layers.data( + shape=[self.sent_len, self.batch_size, self.input_dim], + dtype='float32', + name='x', + append_batch_size=False, + ) x.stop_gradient = False - h_boot1 = layers.data(shape=[self.batch_size, self.input_dim], - dtype='float32', - name='h_boot1', - append_batch_size=False) + h_boot1 = layers.data( + shape=[self.batch_size, self.input_dim], + dtype='float32', + name='h_boot1', + append_batch_size=False, + ) h_boot1.stop_gradient = False - h_boot2 = layers.data(shape=[self.batch_size, self.input_dim], - dtype='float32', - name='h_boot2', - append_batch_size=False) + h_boot2 = layers.data( + shape=[self.batch_size, self.input_dim], + dtype='float32', + name='h_boot2', + append_batch_size=False, + ) h_boot2.stop_gradient = False rnn = layers.StaticRNN() @@ -426,10 +455,10 @@ class EagerDeletionRecurrentOpNoMemBootTest(EagerDeletionRecurrentOpTest1): ''' class PySimpleRNN4(PyRNNBase): - def __init__(self, input_shape, output_shape): - super(EagerDeletionRecurrentOpNoMemBootTest.PySimpleRNN4, - self).__init__(input_shape, output_shape) + super( + EagerDeletionRecurrentOpNoMemBootTest.PySimpleRNN4, self + ).__init__(input_shape, output_shape) men_dim = input_shape self.mems = np.zeros(shape=men_dim).astype("float32") @@ -453,16 +482,19 @@ class EagerDeletionRecurrentOpNoMemBootTest(EagerDeletionRecurrentOpTest1): self.input_shape = (self.sent_len, self.batch_size, self.input_dim) self.output_shape = (self.sent_len, self.batch_size, self.input_dim) self.py_rnn = EagerDeletionRecurrentOpNoMemBootTest.PySimpleRNN4( - self.input_shape, self.output_shape) + self.input_shape, self.output_shape + ) with fluid.program_guard(self.main_program, self.startup_program): self.output = paddle.mean(self.create_rnn_op()) def create_rnn_op(self): - x = layers.data(shape=[self.sent_len, self.batch_size, self.input_dim], - dtype='float32', - name='x', - append_batch_size=False) + x = layers.data( + shape=[self.sent_len, self.batch_size, self.input_dim], + dtype='float32', + name='x', + append_batch_size=False, + ) x.stop_gradient = False rnn = layers.StaticRNN() @@ -496,10 +528,10 @@ class EagerDeletionTwoRecurrentOpsTest(EagerDeletionRecurrentOpTest1): ''' class PySimpleRNN5(PyRNNBase): - def __init__(self, input_shape, output_shape): - super(EagerDeletionTwoRecurrentOpsTest.PySimpleRNN5, - self).__init__(input_shape, output_shape) + super(EagerDeletionTwoRecurrentOpsTest.PySimpleRNN5, self).__init__( + input_shape, output_shape + ) self.mem_0 = np.zeros(shape=input_shape).astype("float32") self.mem_1 = np.zeros(shape=input_shape).astype("float32") self.rnn_0_output = np.zeros(shape=input_shape).astype("float32") @@ -508,13 +540,15 @@ class EagerDeletionTwoRecurrentOpsTest(EagerDeletionRecurrentOpTest1): # First Rnn for step in range(self.x.shape[0]): x_t = self.x[step] - pre_mem = np.zeros_like(x_t) if step == 0 else self.mem_0[step - - 1] + pre_mem = ( + np.zeros_like(x_t) if step == 0 else self.mem_0[step - 1] + ) self.mem_0[step] = x_t + pre_mem self.rnn_0_output[step] = self.mem_0[step] # Second RNN - pre_mem = np.zeros_like(x) if step_id == 0 else self.mem_1[step_id - - 1] + pre_mem = ( + np.zeros_like(x) if step_id == 0 else self.mem_1[step_id - 1] + ) self.mem_1[step_id] = x + np.sum(self.rnn_0_output) self.y[step_id] = self.mem_1[step_id] + pre_mem @@ -530,16 +564,19 @@ class EagerDeletionTwoRecurrentOpsTest(EagerDeletionRecurrentOpTest1): self.input_shape = (self.sent_len, self.batch_size, self.input_dim) self.output_shape = (self.sent_len, self.batch_size, self.input_dim) self.py_rnn = EagerDeletionTwoRecurrentOpsTest.PySimpleRNN5( - self.input_shape, self.output_shape) + self.input_shape, self.output_shape + ) with fluid.program_guard(self.main_program, self.startup_program): self.output = paddle.mean(self.create_rnn_op()) def create_rnn_op(self): - x = layers.data(shape=[self.sent_len, self.batch_size, self.input_dim], - dtype='float32', - name='x', - append_batch_size=False) + x = layers.data( + shape=[self.sent_len, self.batch_size, self.input_dim], + dtype='float32', + name='x', + append_batch_size=False, + ) x.stop_gradient = False rnn_0 = layers.StaticRNN() @@ -563,8 +600,9 @@ class EagerDeletionTwoRecurrentOpsTest(EagerDeletionRecurrentOpTest1): return rnn_1() -class EagerDeletionRecurrentOpParallelExecutorTest(EagerDeletionRecurrentOpTest1 - ): +class EagerDeletionRecurrentOpParallelExecutorTest( + EagerDeletionRecurrentOpTest1 +): ''' Test RNNOp with ParallelExecutor equation: @@ -586,10 +624,12 @@ class EagerDeletionRecurrentOpParallelExecutorTest(EagerDeletionRecurrentOpTest1 build_strategy = fluid.BuildStrategy() build_strategy.enable_inplace = True exec_strategy = fluid.ExecutionStrategy() - parallel_exe = fluid.ParallelExecutor(use_cuda=False, - main_program=self.main_program, - build_strategy=build_strategy, - exec_strategy=exec_strategy) + parallel_exe = fluid.ParallelExecutor( + use_cuda=False, + main_program=self.main_program, + build_strategy=build_strategy, + exec_strategy=exec_strategy, + ) out = parallel_exe.run(feed=self.feed_map, fetch_list=[self.output]) return out[0] @@ -606,20 +646,23 @@ class EagerDeletionRecurrentOpParallelExecutorTest(EagerDeletionRecurrentOpTest1 build_strategy = fluid.BuildStrategy() build_strategy.enable_inplace = True exec_strategy = fluid.ExecutionStrategy() - parallel_exe = fluid.ParallelExecutor(use_cuda=False, - loss_name=self.output.name, - main_program=self.main_program, - build_strategy=build_strategy, - exec_strategy=exec_strategy) - return parallel_exe.run(feed=self.feed_map, - fetch_list=fetch_list, - return_numpy=False) + parallel_exe = fluid.ParallelExecutor( + use_cuda=False, + loss_name=self.output.name, + main_program=self.main_program, + build_strategy=build_strategy, + exec_strategy=exec_strategy, + ) + return parallel_exe.run( + feed=self.feed_map, fetch_list=fetch_list, return_numpy=False + ) class EagerDeletionFarwardOnlyRnnAndBackwardRnnTest( - EagerDeletionRecurrentOpTest1): + EagerDeletionRecurrentOpTest1 +): ''' - Test one forward only RNN and one backward RNN in one program + Test one forward only RNN and one backward RNN in one program ''' def setUp(self): @@ -635,11 +678,12 @@ class EagerDeletionFarwardOnlyRnnAndBackwardRnnTest( shape=[self.sent_len, self.batch_size, self.input_dim], dtype='float32', name='x', - append_batch_size=False) + append_batch_size=False, + ) x.stop_gradient = False - h_boot = layers.data(shape=[self.input_dim], - dtype='float32', - name='h_boot') + h_boot = layers.data( + shape=[self.input_dim], dtype='float32', name='h_boot' + ) h_boot.stop_gradient = False forward_only_rnn = layers.StaticRNN() @@ -647,8 +691,10 @@ class EagerDeletionFarwardOnlyRnnAndBackwardRnnTest( h_pre = forward_only_rnn.memory(init=h_boot) x_t = forward_only_rnn.step_input(x) - h = layers.scale(x=layers.elementwise_add(x=h_pre, y=x_t), - scale=self.py_rnn.scale) + h = layers.scale( + x=layers.elementwise_add(x=h_pre, y=x_t), + scale=self.py_rnn.scale, + ) forward_only_rnn.update_memory(h_pre, h) forward_only_rnn.output(h) @@ -661,8 +707,10 @@ class EagerDeletionFarwardOnlyRnnAndBackwardRnnTest( h_pre = rnn.memory(init=h_boot) x_t = rnn.step_input(x) - h = layers.scale(x=layers.elementwise_add(x=h_pre, y=x_t), - scale=self.py_rnn.scale) + h = layers.scale( + x=layers.elementwise_add(x=h_pre, y=x_t), + scale=self.py_rnn.scale, + ) rnn.update_memory(h_pre, h) rnn.output(h) @@ -675,9 +723,11 @@ class EagerDeletionFarwardOnlyRnnAndBackwardRnnTest( for x in self.data_field } exe = Executor(self.place) - out = exe.run(self.main_program, - feed=self.feed_map, - fetch_list=[self.forward_only_output, self.output]) + out = exe.run( + self.main_program, + feed=self.feed_map, + fetch_list=[self.forward_only_output, self.output], + ) return out[0], out[1] @@ -691,7 +741,6 @@ class EagerDeletionFarwardOnlyRnnAndBackwardRnnTest( class RecurrentNet(paddle.nn.Layer): - def __init__(self): super(RecurrentNet, self).__init__() self.cell = paddle.nn.SimpleRNNCell(16, 32) @@ -703,7 +752,6 @@ class RecurrentNet(paddle.nn.Layer): class TestDy2StRecurrentOpBackward(unittest.TestCase): - def setUp(self): paddle.disable_static() paddle.seed(100) diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_while_op.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_while_op.py index 5aa5ff4eeccd3a53107eeca2d28d540d8c967d7e..1793d69f48f18f605618e9a48c753e619acf58a9 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_while_op.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_while_op.py @@ -32,7 +32,6 @@ fluid.core._set_eager_deletion_mode(0.0, 1.0, True) class TestEagerDeletionWhileOpBase(unittest.TestCase): - def test_main(self): places = [ core.CPUPlace(), @@ -51,29 +50,30 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase): self.with_data_parallel = with_data_parallel if not core.is_compiled_with_cuda() and isinstance( - self.place, core.CUDAPlace): + self.place, core.CUDAPlace + ): return if isinstance(self.place, core.CUDAPlace): - device_cnt = core.get_cuda_device_count( - ) if self.with_data_parallel else 1 + device_cnt = ( + core.get_cuda_device_count() if self.with_data_parallel else 1 + ) else: - device_cnt = int( - os.environ.get('CPU_NUM', multiprocessing.cpu_count()) - ) if self.with_data_parallel else 1 - - d0 = layers.data("d0", - shape=[10], - append_batch_size=False, - dtype='float32') - d1 = layers.data("d1", - shape=[10], - append_batch_size=False, - dtype='float32') - d2 = layers.data("d2", - shape=[10], - append_batch_size=False, - dtype='float32') + device_cnt = ( + int(os.environ.get('CPU_NUM', multiprocessing.cpu_count())) + if self.with_data_parallel + else 1 + ) + + d0 = layers.data( + "d0", shape=[10], append_batch_size=False, dtype='float32' + ) + d1 = layers.data( + "d1", shape=[10], append_batch_size=False, dtype='float32' + ) + d2 = layers.data( + "d2", shape=[10], append_batch_size=False, dtype='float32' + ) i = layers.zeros(shape=[1], dtype='int64') i.stop_gradient = True @@ -136,7 +136,8 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase): optim.minimize(loss) gc_vars = core._get_eager_deletion_vars( - fluid.default_main_program().desc, [loss.name]) + fluid.default_main_program().desc, [loss.name] + ) self.assertEqual(len(gc_vars), 5) exe = Executor(self.place) @@ -145,8 +146,8 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase): prog = fluid.default_main_program() if self.with_data_parallel: prog = compiler.CompiledProgram( - fluid.default_main_program()).with_data_parallel( - loss_name=loss.name) + fluid.default_main_program() + ).with_data_parallel(loss_name=loss.name) for _ in range(5): d = [] @@ -157,13 +158,11 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase): else: d.append(numpy.array([tmp] * device_cnt)) - outs = exe.run(program=prog, - feed={ - 'd0': d[0], - 'd1': d[1], - 'd2': d[2] - }, - fetch_list=[sum_result]) + outs = exe.run( + program=prog, + feed={'d0': d[0], 'd1': d[1], 'd2': d[2]}, + fetch_list=[sum_result], + ) self.assertAlmostEqual(numpy.sum(d), numpy.sum(outs[0]), delta=0.01) diff --git a/python/paddle/fluid/tests/unittests/test_eager_run_program.py b/python/paddle/fluid/tests/unittests/test_eager_run_program.py index df151f27ec21a5ffd9026ac7e9a52a4cfb76a7d6..847bdc3d1a8fc3374ffe52954538108e6a961fec 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_run_program.py +++ b/python/paddle/fluid/tests/unittests/test_eager_run_program.py @@ -15,11 +15,18 @@ import paddle import numpy as np from paddle import _legacy_C_ops -from paddle.fluid.framework import _test_eager_guard, Variable, _in_legacy_dygraph +from paddle.fluid.framework import ( + _test_eager_guard, + Variable, + _in_legacy_dygraph, +) from paddle.fluid import core from paddle.fluid.layers.utils import _hash_with_id from paddle.fluid.dygraph.base import switch_to_static_graph -from paddle.fluid.executor import _is_enable_standalone_executor, _is_dy2st_enable_standalone_executor +from paddle.fluid.executor import ( + _is_enable_standalone_executor, + _is_dy2st_enable_standalone_executor, +) import unittest @@ -60,11 +67,21 @@ def _create_out(var): var_desc = var.desc varbase = None if _in_legacy_dygraph(): - var_base = core.VarBase(var_desc.dtype(), var_desc.shape(), - var_desc.name(), var_desc.type(), False) + var_base = core.VarBase( + var_desc.dtype(), + var_desc.shape(), + var_desc.name(), + var_desc.type(), + False, + ) else: - var_base = core.eager.Tensor(var_desc.dtype(), var_desc.shape(), - var_desc.name(), var_desc.type(), False) + var_base = core.eager.Tensor( + var_desc.dtype(), + var_desc.shape(), + var_desc.name(), + var_desc.type(), + False, + ) return var_base @@ -72,16 +89,17 @@ def _create_out(var): def _add_build_strategy_for(input_program, start_op_index, end_op_index): compiled_program = paddle.static.CompiledProgram( core.Graph(input_program.desc, start_op_index, end_op_index), - build_strategy=paddle.static.BuildStrategy()) - compiled_program._compile(core.Scope(), - paddle.framework._current_expected_place()) + build_strategy=paddle.static.BuildStrategy(), + ) + compiled_program._compile( + core.Scope(), paddle.framework._current_expected_place() + ) ir_graph = paddle.fluid.framework.IrGraph(compiled_program._graph) builded_program = ir_graph.to_program() return builded_program class TestRunProgram(unittest.TestCase): - def test_eager(self): paddle.set_device('cpu') paddle.enable_static() @@ -95,12 +113,13 @@ class TestRunProgram(unittest.TestCase): main_program = paddle.static.default_main_program() program = _append_backward_desc(main_program, [out]) forward_program = _add_build_strategy_for( - program, 0, - main_program.desc.block(0).op_size()) + program, 0, main_program.desc.block(0).op_size() + ) backward_program = _add_build_strategy_for( program, main_program.desc.block(0).op_size() + 2, - program.desc.block(0).op_size()) + program.desc.block(0).op_size(), + ) paddle.disable_static('cpu') # step 2: call run_program in eager mode @@ -120,31 +139,52 @@ class TestRunProgram(unittest.TestCase): scope = core.Scope() attrs = [ 'global_block', - program.desc.block(0), 'start_op_index', 0, 'end_op_index', - main_program.desc.block(0).op_size(), 'is_test', False, + program.desc.block(0), + 'start_op_index', + 0, + 'end_op_index', + main_program.desc.block(0).op_size(), + 'is_test', + False, 'program_id', - _hash_with_id(program) + _hash_with_id(program), ] - use_interpretorcore = _is_enable_standalone_executor( - ) and _is_dy2st_enable_standalone_executor() + use_interpretorcore = ( + _is_enable_standalone_executor() + and _is_dy2st_enable_standalone_executor() + ) attrs.extend(('use_interpretorcore', use_interpretorcore)) if use_interpretorcore: attrs.extend( - ('forward_global_block', forward_program.desc.block(0), - 'backward_global_block', backward_program.desc.block(0))) - - _legacy_C_ops.run_program([x_t, y_t], [fake_var], [out_t], [scope], - [fake_var], None, *attrs) + ( + 'forward_global_block', + forward_program.desc.block(0), + 'backward_global_block', + backward_program.desc.block(0), + ) + ) + + _legacy_C_ops.run_program( + [x_t, y_t], + [fake_var], + [out_t], + [scope], + [fake_var], + None, + *attrs + ) loss = paddle.mean(out_t) loss.backward() np.testing.assert_array_equal(np.ones([2, 2]) * 4, out_t.numpy()) np.testing.assert_array_equal( - np.ones([2, 4]) * 0.5, x_t.grad.numpy()) + np.ones([2, 4]) * 0.5, x_t.grad.numpy() + ) np.testing.assert_array_equal( - np.ones([4, 2]) * 0.5, y_t.grad.numpy()) + np.ones([4, 2]) * 0.5, y_t.grad.numpy() + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_eager_trace_op.py b/python/paddle/fluid/tests/unittests/test_eager_trace_op.py index 771395cd2239f4573be40b9a7654ba146540bff0..6f93c4bd19dc16649edc1f0294c1a8d0dac26e99 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_trace_op.py +++ b/python/paddle/fluid/tests/unittests/test_eager_trace_op.py @@ -19,26 +19,25 @@ from paddle.fluid.framework import _test_eager_guard class TestEagerTraceOp(unittest.TestCase): - def test_branches(self): with _test_eager_guard(): data = np.random.random([1, 1]).astype(np.float32) x = paddle.to_tensor(data) paddle.fluid.framework._dygraph_tracer().trace_op( - 'broadcast_tensors', { - 'X': [x, x], - 'Out': [x, x] - }, {'Out': [x, x]}, {}) + 'broadcast_tensors', + {'X': [x, x], 'Out': [x, x]}, + {'Out': [x, x]}, + {}, + ) paddle.fluid.framework._dygraph_tracer().trace_op( - 'scale', {'X': x}, {'Out': x}, {'scale': 0.5}) + 'scale', {'X': x}, {'Out': x}, {'scale': 0.5} + ) scale = paddle.to_tensor(np.random.random([1]).astype(np.float32)) paddle.fluid.framework._dygraph_tracer().trace_op( - 'instance_norm', { - 'Scale': [scale], - 'X': [x] - }, {'Y': [x]}, {}) + 'instance_norm', {'Scale': [scale], 'X': [x]}, {'Y': [x]}, {} + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_edit_distance_op.py b/python/paddle/fluid/tests/unittests/test_edit_distance_op.py index 0a245d851cf0359c74c8ee33f61c0fde3a041afa..c737ed2e41ca3582313aa0de3f9f2d563ed99097 100644 --- a/python/paddle/fluid/tests/unittests/test_edit_distance_op.py +++ b/python/paddle/fluid/tests/unittests/test_edit_distance_op.py @@ -18,23 +18,26 @@ from op_test import OpTest import paddle -def python_edit_distance(input, - label, - input_length=None, - label_length=None, - normalized=True, - ignored_tokens=None): +def python_edit_distance( + input, + label, + input_length=None, + label_length=None, + normalized=True, + ignored_tokens=None, +): return paddle.nn.functional.loss.edit_distance( input, label, normalized=normalized, ignored_tokens=ignored_tokens, input_length=input_length, - label_length=label_length) + label_length=label_length, + ) def Levenshtein(hyp, ref): - """ Compute the Levenshtein distance between two strings. + """Compute the Levenshtein distance between two strings. :param hyp: hypothesis string in index :type hyp: list @@ -65,7 +68,6 @@ def Levenshtein(hyp, ref): class TestEditDistanceOp(OpTest): - def setUp(self): self.op_type = "edit_distance" self.python_api = python_edit_distance @@ -85,8 +87,9 @@ class TestEditDistanceOp(OpTest): x2_offset = 0 for i in range(0, num_strs): distance[i] = Levenshtein( - hyp=x1[x1_offset:(x1_offset + self.x1_lod[i])], - ref=x2[x2_offset:(x2_offset + self.x2_lod[i])]) + hyp=x1[x1_offset : (x1_offset + self.x1_lod[i])], + ref=x2[x2_offset : (x2_offset + self.x2_lod[i])], + ) x1_offset += self.x1_lod[i] x2_offset += self.x2_lod[i] if normalized is True: @@ -102,7 +105,6 @@ class TestEditDistanceOp(OpTest): class TestEditDistanceOpNormalizedCase0(OpTest): - def reset_config(self): pass @@ -130,8 +132,9 @@ class TestEditDistanceOpNormalizedCase0(OpTest): x2_offset = 0 for i in range(0, num_strs): distance[i] = Levenshtein( - hyp=self.x1[x1_offset:(x1_offset + self.x1_lod[i])], - ref=self.x2[x2_offset:(x2_offset + self.x2_lod[i])]) + hyp=self.x1[x1_offset : (x1_offset + self.x1_lod[i])], + ref=self.x2[x2_offset : (x2_offset + self.x2_lod[i])], + ) x1_offset += self.x1_lod[i] x2_offset += self.x2_lod[i] if normalized is True: @@ -141,7 +144,7 @@ class TestEditDistanceOpNormalizedCase0(OpTest): self.attrs = {'normalized': normalized} self.inputs = { 'Hyps': (self.x1, [self.x1_lod]), - 'Refs': (self.x2, [self.x2_lod]) + 'Refs': (self.x2, [self.x2_lod]), } self.outputs = {'Out': distance, 'SequenceNum': sequence_num} @@ -152,21 +155,18 @@ class TestEditDistanceOpNormalizedCase0(OpTest): class TestEditDistanceOpNormalizedCase1(TestEditDistanceOpNormalizedCase0): - def reset_config(self): self.x1_lod = [0, 6, 0] self.x2_lod = [2, 1, 2] class TestEditDistanceOpNormalizedCase2(TestEditDistanceOpNormalizedCase0): - def reset_config(self): self.x1_lod = [0, 0, 6] self.x2_lod = [2, 2, 1] class TestEditDistanceOpNormalizedTensor(OpTest): - def reset_config(self): self.x1 = np.array([[10, 3, 0, 0], [6, 5, 8, 2]], dtype=np.int64) self.x2 = np.array([[10, 4, 0], [6, 7, 8]], dtype=np.int64) @@ -185,8 +185,10 @@ class TestEditDistanceOpNormalizedTensor(OpTest): sequence_num = np.array(num_strs).astype("int64") for i in range(0, num_strs): - distance[i] = Levenshtein(hyp=self.x1[i][0:self.x1_lod[i]], - ref=self.x2[i][0:self.x2_lod[i]]) + distance[i] = Levenshtein( + hyp=self.x1[i][0 : self.x1_lod[i]], + ref=self.x2[i][0 : self.x2_lod[i]], + ) if normalized is True: len_ref = self.x2_lod[i] distance[i] = distance[i] / len_ref @@ -196,7 +198,7 @@ class TestEditDistanceOpNormalizedTensor(OpTest): 'Hyps': self.x1, 'Refs': self.x2, 'HypsLength': self.x1_lod, - 'RefsLength': self.x2_lod + 'RefsLength': self.x2_lod, } self.outputs = {'Out': distance, 'SequenceNum': sequence_num} diff --git a/python/paddle/fluid/tests/unittests/test_egr_code_generate_api.py b/python/paddle/fluid/tests/unittests/test_egr_code_generate_api.py index 3ed4962cff66c0d922480b19a41ef9ac11b0ac39..aea56537654da58ff393ad9f94d125179d8a70b5 100644 --- a/python/paddle/fluid/tests/unittests/test_egr_code_generate_api.py +++ b/python/paddle/fluid/tests/unittests/test_egr_code_generate_api.py @@ -19,7 +19,6 @@ import unittest class EagerOpAPIGenerateTestCase(unittest.TestCase): - def test_elementwise_add(self): with _test_eager_guard(): paddle.set_device("cpu") @@ -35,8 +34,9 @@ class EagerOpAPIGenerateTestCase(unittest.TestCase): def test_sum(self): with _test_eager_guard(): - x_data = np.array([[0.2, 0.3, 0.5, 0.9], [0.1, 0.2, 0.6, - 0.7]]).astype('float32') + x_data = np.array( + [[0.2, 0.3, 0.5, 0.9], [0.1, 0.2, 0.6, 0.7]] + ).astype('float32') x = paddle.to_tensor(x_data, 'float32') out = paddle.sum(x, axis=0) out_arr = out.numpy() @@ -61,8 +61,8 @@ class EagerOpAPIGenerateTestCase(unittest.TestCase): out = paddle.nn.functional.sigmoid(x) out_arr = out.numpy() out_arr_expected = np.array( - [0.40131234, 0.450166, 0.52497919, - 0.57444252]).astype('float32') + [0.40131234, 0.450166, 0.52497919, 0.57444252] + ).astype('float32') np.testing.assert_allclose(out_arr, out_arr_expected, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_egr_python_api.py b/python/paddle/fluid/tests/unittests/test_egr_python_api.py index 82881ee0da6190be9d6d39f0ef2b6280bd637a3c..eee9e8eac4b0e303127ab7247cdf9a5e35208929 100644 --- a/python/paddle/fluid/tests/unittests/test_egr_python_api.py +++ b/python/paddle/fluid/tests/unittests/test_egr_python_api.py @@ -15,13 +15,18 @@ import paddle.fluid.core as core import paddle import numpy as np -from paddle.fluid.framework import EagerParamBase, _current_expected_place, _disable_legacy_dygraph, _test_eager_guard, in_dygraph_mode +from paddle.fluid.framework import ( + EagerParamBase, + _current_expected_place, + _disable_legacy_dygraph, + _test_eager_guard, + in_dygraph_mode, +) import unittest import copy class EagerScaleTestCase(unittest.TestCase): - def test_scale_base(self): with _test_eager_guard(): paddle.set_device("cpu") @@ -40,8 +45,9 @@ class EagerScaleTestCase(unittest.TestCase): paddle.set_device("cpu") input_data = np.ones([4, 16, 16, 32]).astype('float32') - data_eager = paddle.to_tensor(input_data, 'float32', - core.CPUPlace(), False) + data_eager = paddle.to_tensor( + input_data, 'float32', core.CPUPlace(), False + ) grad_data = np.ones([4, 16, 16, 32]).astype('float32') grad_eager = paddle.to_tensor(grad_data, 'float32', core.CPUPlace()) @@ -59,32 +65,34 @@ class EagerScaleTestCase(unittest.TestCase): paddle.set_device("cpu") input_data = np.ones([4, 16, 16, 32]).astype('float32') - data_eager = paddle.to_tensor(input_data, 'float32', - core.CPUPlace(), False) + data_eager = paddle.to_tensor( + input_data, 'float32', core.CPUPlace(), False + ) grad_data = np.ones([4, 16, 16, 32]).astype('float32') grad_data2 = np.ones([4, 16]).astype('float32') grad_eager = paddle.to_tensor(grad_data, 'float32', core.CPUPlace()) - grad_eager2 = paddle.to_tensor(grad_data2, 'float32', - core.CPUPlace()) + grad_eager2 = paddle.to_tensor( + grad_data2, 'float32', core.CPUPlace() + ) data_eager.retain_grads() out_eager = core.eager.scale(data_eager, 1.0, 0.9, True, True) self.assertIsNone(data_eager.grad) with self.assertRaisesRegexp( - AssertionError, - "The type of grad_tensor must be paddle.Tensor"): + AssertionError, "The type of grad_tensor must be paddle.Tensor" + ): out_eager.backward(grad_data, False) with self.assertRaisesRegexp( - AssertionError, - "Tensor shape not match, Tensor of grad_tensor /*"): + AssertionError, + "Tensor shape not match, Tensor of grad_tensor /*", + ): out_eager.backward(grad_eager2, False) class EagerDtypeTestCase(unittest.TestCase): - def check_to_tesnsor_and_numpy(self, dtype, proto_dtype): with _test_eager_guard(): arr = np.random.random([4, 16, 16, 32]).astype(dtype) @@ -103,14 +111,15 @@ class EagerDtypeTestCase(unittest.TestCase): self.check_to_tesnsor_and_numpy('float16', core.VarDesc.VarType.FP16) self.check_to_tesnsor_and_numpy('float32', core.VarDesc.VarType.FP32) self.check_to_tesnsor_and_numpy('float64', core.VarDesc.VarType.FP64) - self.check_to_tesnsor_and_numpy('complex64', - core.VarDesc.VarType.COMPLEX64) - self.check_to_tesnsor_and_numpy('complex128', - core.VarDesc.VarType.COMPLEX128) + self.check_to_tesnsor_and_numpy( + 'complex64', core.VarDesc.VarType.COMPLEX64 + ) + self.check_to_tesnsor_and_numpy( + 'complex128', core.VarDesc.VarType.COMPLEX128 + ) class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): - def constructor(self, place): egr_tensor = core.eager.Tensor() self.assertEqual(egr_tensor.persistable, False) @@ -119,17 +128,22 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor.stop_gradient, True) - egr_tensor0 = core.eager.Tensor(core.VarDesc.VarType.FP32, - [4, 16, 16, 32], "test_eager_tensor", - core.VarDesc.VarType.LOD_TENSOR, True) + egr_tensor0 = core.eager.Tensor( + core.VarDesc.VarType.FP32, + [4, 16, 16, 32], + "test_eager_tensor", + core.VarDesc.VarType.LOD_TENSOR, + True, + ) self.assertEqual(egr_tensor0.persistable, True) self.assertEqual(egr_tensor0.name, "test_eager_tensor") self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32]) self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP32) arr0 = np.random.rand(4, 16, 16, 32).astype('float32') - egr_tensor1 = core.eager.Tensor(arr0, place, True, False, - "numpy_tensor1", False) + egr_tensor1 = core.eager.Tensor( + arr0, place, True, False, "numpy_tensor1", False + ) self.assertEqual(egr_tensor1.persistable, True) self.assertEqual(egr_tensor1.name, "numpy_tensor1") self.assertEqual(egr_tensor1.shape, [4, 16, 16, 32]) @@ -139,8 +153,9 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): np.testing.assert_array_equal(egr_tensor1.numpy(), arr0) arr1 = np.random.randint(100, size=(4, 16, 16, 32), dtype=np.int64) - egr_tensor2 = core.eager.Tensor(arr1, place, False, True, - "numpy_tensor2", True) + egr_tensor2 = core.eager.Tensor( + arr1, place, False, True, "numpy_tensor2", True + ) self.assertEqual(egr_tensor2.persistable, False) self.assertEqual(egr_tensor2.name, "numpy_tensor2") self.assertEqual(egr_tensor2.shape, [4, 16, 16, 32]) @@ -158,7 +173,9 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor3.stop_gradient, True) self.assertTrue( egr_tensor3.place._equals( - paddle.fluid.framework._current_expected_place())) + paddle.fluid.framework._current_expected_place() + ) + ) np.testing.assert_array_equal(egr_tensor3.numpy(), arr2) egr_tensor3.stop_gradient = False @@ -170,7 +187,9 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor4.stop_gradient, True) self.assertTrue( egr_tensor4.place._equals( - paddle.fluid.framework._current_expected_place())) + paddle.fluid.framework._current_expected_place() + ) + ) np.testing.assert_array_equal(egr_tensor4.numpy(), egr_tensor3.numpy()) arr4 = np.random.rand(4, 16, 16, 32).astype('float32') @@ -253,16 +272,18 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(zero_dim_param.shape, []) with self.assertRaisesRegexp( - ValueError, "The shape of Parameter should not be None"): + ValueError, "The shape of Parameter should not be None" + ): eager_param = EagerParamBase(shape=None, dtype="float32") with self.assertRaisesRegexp( - ValueError, "The dtype of Parameter should not be None"): + ValueError, "The dtype of Parameter should not be None" + ): eager_param = EagerParamBase(shape=[1, 1], dtype=None) with self.assertRaisesRegexp( - ValueError, - "Each dimension of shape for Parameter must be greater than 0, but received /*" + ValueError, + "Each dimension of shape for Parameter must be greater than 0, but received /*", ): eager_param = EagerParamBase(shape=[-1], dtype="float32") @@ -271,19 +292,19 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): eager_param.trainable = False self.assertFalse(eager_param.trainable) with self.assertRaisesRegexp( - ValueError, - "The type of trainable MUST be bool, but the type is /*"): + ValueError, "The type of trainable MUST be bool, but the type is /*" + ): eager_param.trainable = "False" - eager_param_2 = EagerParamBase(shape=paddle.shape( - paddle.to_tensor([1, 2, 3, 4])), - dtype="float32") + eager_param_2 = EagerParamBase( + shape=paddle.shape(paddle.to_tensor([1, 2, 3, 4])), dtype="float32" + ) self.assertTrue(eager_param_2.trainable) eager_param_2.trainable = False self.assertFalse(eager_param_2.trainable) with self.assertRaisesRegexp( - ValueError, - "The type of trainable MUST be bool, but the type is /*"): + ValueError, "The type of trainable MUST be bool, but the type is /*" + ): eager_param_2.trainable = "False" def test_constructor(self): @@ -306,7 +327,9 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32]) self.assertTrue( egr_tensor0.place._equals( - paddle.fluid.framework._current_expected_place())) + paddle.fluid.framework._current_expected_place() + ) + ) self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor0.stop_gradient, True) @@ -326,9 +349,9 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor2.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor2.stop_gradient, True) - egr_tensor3 = core.eager.Tensor(arr, - place=place, - name="new_eager_tensor") + egr_tensor3 = core.eager.Tensor( + arr, place=place, name="new_eager_tensor" + ) self.assertEqual(egr_tensor3.persistable, False) self.assertTrue("new_eager_tensor" in egr_tensor3.name) self.assertEqual(egr_tensor3.shape, [4, 16, 16, 32]) @@ -336,10 +359,9 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor3.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor3.stop_gradient, True) - egr_tensor4 = core.eager.Tensor(arr, - place=place, - persistable=True, - name="new_eager_tensor") + egr_tensor4 = core.eager.Tensor( + arr, place=place, persistable=True, name="new_eager_tensor" + ) self.assertEqual(egr_tensor4.persistable, True) self.assertTrue("new_eager_tensor" in egr_tensor4.name) self.assertEqual(egr_tensor4.shape, [4, 16, 16, 32]) @@ -347,11 +369,13 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor4.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor4.stop_gradient, True) - egr_tensor5 = core.eager.Tensor(arr, - core.CPUPlace(), - persistable=True, - name="new_eager_tensor", - zero_copy=True) + egr_tensor5 = core.eager.Tensor( + arr, + core.CPUPlace(), + persistable=True, + name="new_eager_tensor", + zero_copy=True, + ) self.assertEqual(egr_tensor5.persistable, True) self.assertTrue("new_eager_tensor" in egr_tensor5.name) self.assertEqual(egr_tensor5.shape, [4, 16, 16, 32]) @@ -359,11 +383,13 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor5.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor5.stop_gradient, True) - egr_tensor6 = core.eager.Tensor(arr, - place=core.CPUPlace(), - persistable=True, - name="new_eager_tensor", - zero_copy=True) + egr_tensor6 = core.eager.Tensor( + arr, + place=core.CPUPlace(), + persistable=True, + name="new_eager_tensor", + zero_copy=True, + ) self.assertEqual(egr_tensor6.persistable, True) self.assertTrue("new_eager_tensor" in egr_tensor6.name) self.assertEqual(egr_tensor6.shape, [4, 16, 16, 32]) @@ -371,11 +397,13 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor6.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor6.stop_gradient, True) - egr_tensor7 = core.eager.Tensor(arr, - place=place, - persistable=True, - name="new_eager_tensor", - zero_copy=True) + egr_tensor7 = core.eager.Tensor( + arr, + place=place, + persistable=True, + name="new_eager_tensor", + zero_copy=True, + ) self.assertEqual(egr_tensor7.persistable, True) self.assertTrue("new_eager_tensor" in egr_tensor7.name) self.assertEqual(egr_tensor7.shape, [4, 16, 16, 32]) @@ -383,12 +411,14 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor7.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor7.stop_gradient, True) - egr_tensor8 = core.eager.Tensor(arr, - place=place, - persistable=True, - name="new_eager_tensor", - zero_copy=True, - stop_gradient=False) + egr_tensor8 = core.eager.Tensor( + arr, + place=place, + persistable=True, + name="new_eager_tensor", + zero_copy=True, + stop_gradient=False, + ) self.assertEqual(egr_tensor8.persistable, True) self.assertTrue("new_eager_tensor" in egr_tensor8.name) self.assertEqual(egr_tensor8.shape, [4, 16, 16, 32]) @@ -396,12 +426,9 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor8.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor8.stop_gradient, False) - egr_tensor9 = core.eager.Tensor(arr, - place, - True, - True, - "new_eager_tensor", - stop_gradient=False) + egr_tensor9 = core.eager.Tensor( + arr, place, True, True, "new_eager_tensor", stop_gradient=False + ) self.assertEqual(egr_tensor9.persistable, True) self.assertTrue("new_eager_tensor" in egr_tensor9.name) self.assertEqual(egr_tensor9.shape, [4, 16, 16, 32]) @@ -409,12 +436,9 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor9.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor9.stop_gradient, False) - egr_tensor10 = core.eager.Tensor(arr, - place, - True, - True, - name="new_eager_tensor", - stop_gradient=False) + egr_tensor10 = core.eager.Tensor( + arr, place, True, True, name="new_eager_tensor", stop_gradient=False + ) self.assertEqual(egr_tensor10.persistable, True) self.assertTrue("new_eager_tensor" in egr_tensor10.name) self.assertEqual(egr_tensor10.shape, [4, 16, 16, 32]) @@ -422,12 +446,14 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor10.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor10.stop_gradient, False) - egr_tensor11 = core.eager.Tensor(arr, - place, - True, - zero_copy=True, - name="new_eager_tensor", - stop_gradient=False) + egr_tensor11 = core.eager.Tensor( + arr, + place, + True, + zero_copy=True, + name="new_eager_tensor", + stop_gradient=False, + ) self.assertEqual(egr_tensor11.persistable, True) self.assertTrue("new_eager_tensor" in egr_tensor11.name) self.assertEqual(egr_tensor11.shape, [4, 16, 16, 32]) @@ -435,12 +461,14 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor11.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor11.stop_gradient, False) - egr_tensor12 = core.eager.Tensor(arr, - place, - persistable=True, - zero_copy=True, - name="new_eager_tensor", - stop_gradient=False) + egr_tensor12 = core.eager.Tensor( + arr, + place, + persistable=True, + zero_copy=True, + name="new_eager_tensor", + stop_gradient=False, + ) self.assertEqual(egr_tensor12.persistable, True) self.assertTrue("new_eager_tensor" in egr_tensor12.name) self.assertEqual(egr_tensor12.shape, [4, 16, 16, 32]) @@ -448,12 +476,14 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor12.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor12.stop_gradient, False) - egr_tensor13 = core.eager.Tensor(value=arr, - place=place, - persistable=True, - zero_copy=True, - name="new_eager_tensor", - stop_gradient=False) + egr_tensor13 = core.eager.Tensor( + value=arr, + place=place, + persistable=True, + zero_copy=True, + name="new_eager_tensor", + stop_gradient=False, + ) self.assertEqual(egr_tensor13.persistable, True) self.assertTrue("new_eager_tensor" in egr_tensor13.name) self.assertEqual(egr_tensor13.shape, [4, 16, 16, 32]) @@ -462,11 +492,13 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor13.stop_gradient, False) # special case - egr_tensor14 = core.eager.Tensor(dtype=core.VarDesc.VarType.FP32, - dims=[4, 16, 16, 32], - name="special_eager_tensor", - type=core.VarDesc.VarType.LOD_TENSOR, - persistable=True) + egr_tensor14 = core.eager.Tensor( + dtype=core.VarDesc.VarType.FP32, + dims=[4, 16, 16, 32], + name="special_eager_tensor", + type=core.VarDesc.VarType.LOD_TENSOR, + persistable=True, + ) self.assertEqual(egr_tensor14.persistable, True) self.assertEqual(egr_tensor14.name, "special_eager_tensor") self.assertEqual(egr_tensor14.shape, [4, 16, 16, 32]) @@ -481,11 +513,14 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor15.stop_gradient, True) self.assertTrue( egr_tensor15.place._equals( - paddle.fluid.framework._current_expected_place())) + paddle.fluid.framework._current_expected_place() + ) + ) np.testing.assert_array_equal(egr_tensor15.numpy(), egr_tensor4.numpy()) - egr_tensor16 = core.eager.Tensor(value=egr_tensor4, - name="new_eager_tensor") + egr_tensor16 = core.eager.Tensor( + value=egr_tensor4, name="new_eager_tensor" + ) self.assertEqual(egr_tensor16.persistable, True) self.assertTrue("new_eager_tensor" in egr_tensor16.name) self.assertEqual(egr_tensor16.shape, egr_tensor4.shape) @@ -493,7 +528,9 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor16.stop_gradient, True) self.assertTrue( egr_tensor16.place._equals( - paddle.fluid.framework._current_expected_place())) + paddle.fluid.framework._current_expected_place() + ) + ) np.testing.assert_array_equal(egr_tensor16.numpy(), egr_tensor4.numpy()) egr_tensor17 = core.eager.Tensor( @@ -547,7 +584,9 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor20.stop_gradient, True) self.assertTrue( egr_tensor20.place._equals( - paddle.fluid.framework._current_expected_place())) + paddle.fluid.framework._current_expected_place() + ) + ) np.testing.assert_array_equal(egr_tensor20.numpy(), x) egr_tensor21 = core.eager.Tensor(value=t, place=place) @@ -577,9 +616,9 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertTrue(egr_tensor23.place._equals(place)) np.testing.assert_array_equal(egr_tensor23.numpy(), x) - egr_tensor24 = core.eager.Tensor(value=t, - place=place, - name="from_framework_tensor") + egr_tensor24 = core.eager.Tensor( + value=t, place=place, name="from_framework_tensor" + ) self.assertEqual(egr_tensor24.persistable, False) self.assertTrue("from_framework_tensor" in egr_tensor24.name) self.assertEqual(egr_tensor24.shape, [3, 3]) @@ -609,15 +648,18 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): arr = np.ones([4, 16, 16, 32]).astype('float32') arr1 = np.zeros([4, 16]).astype('float32') arr2 = np.ones([4, 16, 16, 32]).astype('float32') + np.ones( - [4, 16, 16, 32]).astype('float32') - tensor = paddle.to_tensor(arr, core.VarDesc.VarType.FP32, - core.CPUPlace()) + [4, 16, 16, 32] + ).astype('float32') + tensor = paddle.to_tensor( + arr, core.VarDesc.VarType.FP32, core.CPUPlace() + ) self.assertEqual(tensor.stop_gradient, True) tensor.stop_gradient = False print("Set persistable") tensor.persistable = False - tensor1 = paddle.to_tensor(arr1, core.VarDesc.VarType.FP32, - core.CPUPlace()) + tensor1 = paddle.to_tensor( + arr1, core.VarDesc.VarType.FP32, core.CPUPlace() + ) tensor1.persistable = True self.assertEqual(tensor1.stop_gradient, True) np.testing.assert_array_equal(tensor.numpy(), arr) @@ -629,8 +671,9 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): np.testing.assert_array_equal(tensor.numpy(), arr1) print("Test _copy_to") - tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32, - core.CPUPlace()) + tensor2 = paddle.to_tensor( + arr2, core.VarDesc.VarType.FP32, core.CPUPlace() + ) np.testing.assert_array_equal(tensor2.numpy(), arr2) self.assertTrue(tensor2.place.is_cpu_place()) tensor2.persistable = True @@ -656,8 +699,9 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): tensor10 = paddle.to_tensor([1, 2, 3], place='gpu_pinned') tensor11 = tensor10._copy_to(core.CUDAPlace(0), True) - np.testing.assert_array_equal(tensor10.numpy(), - tensor11.numpy()) + np.testing.assert_array_equal( + tensor10.numpy(), tensor11.numpy() + ) else: tensor3 = tensor2._copy_to(core.CPUPlace(), True) np.testing.assert_array_equal(tensor3.numpy(), arr2) @@ -676,18 +720,22 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): arr = np.ones([4, 16, 16, 32]).astype('float32') arr1 = np.zeros([4, 16]).astype('float32') arr2 = np.ones([4, 16, 16, 32]).astype('float32') + np.ones( - [4, 16, 16, 32]).astype('float32') + [4, 16, 16, 32] + ).astype('float32') tensor = None tensor2 = None - tensor = paddle.to_tensor(arr, core.VarDesc.VarType.FP32, - core.CPUPlace()) + tensor = paddle.to_tensor( + arr, core.VarDesc.VarType.FP32, core.CPUPlace() + ) tensor3 = core.eager.Tensor(value=tensor, place=core.CPUPlace()) if core.is_compiled_with_cuda(): - tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32, - core.CUDAPlace(0)) + tensor2 = paddle.to_tensor( + arr2, core.VarDesc.VarType.FP32, core.CUDAPlace(0) + ) else: - tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32, - core.CPUPlace()) + tensor2 = paddle.to_tensor( + arr2, core.VarDesc.VarType.FP32, core.CPUPlace() + ) np.testing.assert_array_equal(tensor.numpy(), arr) np.testing.assert_array_equal(tensor2.numpy(), arr2) tensor2._share_buffer_to(tensor) @@ -704,18 +752,22 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): arr = np.ones([4, 16, 16, 32]).astype('float32') arr1 = np.zeros([4, 16]).astype('float32') arr2 = np.ones([4, 16, 16, 32]).astype('float32') + np.ones( - [4, 16, 16, 32]).astype('float32') + [4, 16, 16, 32] + ).astype('float32') tensor = None tensor2 = None - tensor = paddle.to_tensor(arr, core.VarDesc.VarType.FP32, - core.CPUPlace()) + tensor = paddle.to_tensor( + arr, core.VarDesc.VarType.FP32, core.CPUPlace() + ) tensor3 = core.eager.Tensor() if core.is_compiled_with_cuda(): - tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32, - core.CUDAPlace(0)) + tensor2 = paddle.to_tensor( + arr2, core.VarDesc.VarType.FP32, core.CUDAPlace(0) + ) else: - tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32, - core.CPUPlace()) + tensor2 = paddle.to_tensor( + arr2, core.VarDesc.VarType.FP32, core.CPUPlace() + ) np.testing.assert_array_equal(tensor.numpy(), arr) np.testing.assert_array_equal(tensor2.numpy(), arr2) tensor2._share_underline_tensor_to(tensor) @@ -732,8 +784,9 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): with _test_eager_guard(): paddle.set_device("cpu") arr = np.ones([4, 16, 16, 32]).astype('float32') - tensor = paddle.to_tensor(arr, core.VarDesc.VarType.FP32, - core.CPUPlace()) + tensor = paddle.to_tensor( + arr, core.VarDesc.VarType.FP32, core.CPUPlace() + ) self.assertEqual(tensor.shape, [4, 16, 16, 32]) tensor.name = 'tensor_name_test' self.assertEqual(tensor.name, 'tensor_name_test') @@ -764,14 +817,14 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): paddle.set_device("gpu:0") with paddle.fluid.framework._dygraph_place_guard(core.CPUPlace()): self.assertTrue( - isinstance(_current_expected_place(), - type(core.CPUPlace()))) + isinstance(_current_expected_place(), type(core.CPUPlace())) + ) else: paddle.set_device("cpu") with paddle.fluid.framework._dygraph_place_guard(core.CPUPlace()): self.assertTrue( - isinstance(_current_expected_place(), - type(core.CPUPlace()))) + isinstance(_current_expected_place(), type(core.CPUPlace())) + ) def test_value(self): with _test_eager_guard(): @@ -783,13 +836,19 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32]) self.assertTrue( egr_tensor0.place._equals( - paddle.fluid.framework._current_expected_place())) + paddle.fluid.framework._current_expected_place() + ) + ) self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP64) self.assertEqual(egr_tensor0.stop_gradient, True) - self.assertTrue(egr_tensor0.value().get_tensor()._dtype(), - core.VarDesc.VarType.FP64) - self.assertTrue(egr_tensor0.value().get_tensor()._place(), - paddle.fluid.framework._current_expected_place()) + self.assertTrue( + egr_tensor0.value().get_tensor()._dtype(), + core.VarDesc.VarType.FP64, + ) + self.assertTrue( + egr_tensor0.value().get_tensor()._place(), + paddle.fluid.framework._current_expected_place(), + ) self.assertTrue(egr_tensor0.value().get_tensor()._is_initialized()) def test_set_value(self): @@ -813,8 +872,9 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): def test_sharding_related_api(self): with _test_eager_guard(): arr0 = np.random.rand(4, 16, 16, 32).astype('float32') - egr_tensor1 = core.eager.Tensor(arr0, core.CPUPlace(), True, False, - "numpy_tensor1", False) + egr_tensor1 = core.eager.Tensor( + arr0, core.CPUPlace(), True, False, "numpy_tensor1", False + ) self.assertEqual(egr_tensor1._numel(), 32768) self.assertEqual(egr_tensor1._slice(0, 2)._numel(), 16384) @@ -839,7 +899,6 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): class EagerParamBaseUsageTestCase(unittest.TestCase): - def test_print(self): with _test_eager_guard(): linear = paddle.nn.Linear(3, 3, bias_attr=False) @@ -850,10 +909,12 @@ class EagerParamBaseUsageTestCase(unittest.TestCase): linear = paddle.nn.Linear(1, 3) linear_copy = copy.deepcopy(linear) linear_copy2 = linear.weight._copy_to(core.CPUPlace(), True) - np.testing.assert_array_equal(linear.weight.numpy(), - linear_copy.weight.numpy()) - np.testing.assert_array_equal(linear.weight.numpy(), - linear_copy2.numpy()) + np.testing.assert_array_equal( + linear.weight.numpy(), linear_copy.weight.numpy() + ) + np.testing.assert_array_equal( + linear.weight.numpy(), linear_copy2.numpy() + ) def func_fp16_initilaizer(self): paddle.set_default_dtype("float16") @@ -862,22 +923,25 @@ class EagerParamBaseUsageTestCase(unittest.TestCase): 1, 3, bias_attr=False, - weight_attr=paddle.fluid.initializer.Uniform()) + weight_attr=paddle.fluid.initializer.Uniform(), + ) linear3 = paddle.nn.Linear( 1, 3, bias_attr=False, - weight_attr=paddle.fluid.initializer.TruncatedNormalInitializer()) + weight_attr=paddle.fluid.initializer.TruncatedNormalInitializer(), + ) linear4 = paddle.nn.Linear( 1, 3, bias_attr=False, - weight_attr=paddle.fluid.initializer.MSRAInitializer()) + weight_attr=paddle.fluid.initializer.MSRAInitializer(), + ) res = [ linear1.weight.numpy(), linear2.weight.numpy(), linear3.weight.numpy(), - linear4.weight.numpy() + linear4.weight.numpy(), ] paddle.set_default_dtype("float32") return res @@ -896,7 +960,8 @@ class EagerParamBaseUsageTestCase(unittest.TestCase): def func_layer_helper_base(self, value): base = paddle.fluid.layer_helper_base.LayerHelperBase( - "test_layer", "test_layer") + "test_layer", "test_layer" + ) return base.to_variable(value).numpy() def func_base_to_variable(self, value): @@ -945,7 +1010,6 @@ class EagerParamBaseUsageTestCase(unittest.TestCase): class EagerGuardTestCase(unittest.TestCase): - def test__test_eager_guard(self): tracer = paddle.fluid.dygraph.tracer.Tracer() with _test_eager_guard(tracer): diff --git a/python/paddle/fluid/tests/unittests/test_egr_string_tensor_api.py b/python/paddle/fluid/tests/unittests/test_egr_string_tensor_api.py index e0a1058e6189487b970c9d8d404f6743f3529b8a..b25e40149bac70c001dd452ba382747c9344481e 100644 --- a/python/paddle/fluid/tests/unittests/test_egr_string_tensor_api.py +++ b/python/paddle/fluid/tests/unittests/test_egr_string_tensor_api.py @@ -19,13 +19,15 @@ import unittest class EagerStringTensorTestCase(unittest.TestCase): - def setUp(self): - self.str_arr = np.array([ - ["15.4寸笔记本的键盘确实爽,基本跟台式机差不多了,蛮喜欢数字小键盘,输数字特方便,样子也很美观,做工也相当不错" - ], # From ChnSentiCorp - ["One of the very best Three Stooges shorts ever."] - ]) # From IMDB + self.str_arr = np.array( + [ + [ + "15.4寸笔记本的键盘确实爽,基本跟台式机差不多了,蛮喜欢数字小键盘,输数字特方便,样子也很美观,做工也相当不错" + ], # From ChnSentiCorp + ["One of the very best Three Stooges shorts ever."], + ] + ) # From IMDB def test_constructor_with_args(self): with _test_eager_guard(): @@ -38,8 +40,9 @@ class EagerStringTensorTestCase(unittest.TestCase): ST2 = core.eager.StringTensor(shape, "ST2") # constructor 2 self.assertEqual(ST2.name, "ST2") self.assertEqual(ST2.shape, shape) - np.testing.assert_array_equal(ST2.numpy(), - np.empty(shape, dtype=np.unicode_)) + np.testing.assert_array_equal( + ST2.numpy(), np.empty(shape, dtype=np.unicode_) + ) ST3 = core.eager.StringTensor(self.str_arr, "ST3") # constructor 3 self.assertEqual(ST3.name, "ST3") @@ -68,15 +71,18 @@ class EagerStringTensorTestCase(unittest.TestCase): def test_constructor_with_kwargs(self): with _test_eager_guard(): shape = [2, 3] - ST1 = core.eager.StringTensor(dims=shape, - name="ST1") # constructor 2 + ST1 = core.eager.StringTensor( + dims=shape, name="ST1" + ) # constructor 2 self.assertEqual(ST1.name, "ST1") self.assertEqual(ST1.shape, shape) - np.testing.assert_array_equal(ST1.numpy(), - np.empty(shape, dtype=np.unicode_)) + np.testing.assert_array_equal( + ST1.numpy(), np.empty(shape, dtype=np.unicode_) + ) - ST2 = core.eager.StringTensor(self.str_arr, - name="ST2") # constructor 3 + ST2 = core.eager.StringTensor( + self.str_arr, name="ST2" + ) # constructor 3 self.assertEqual(ST2.name, "ST2") self.assertEqual(ST2.shape, list(self.str_arr.shape)) np.testing.assert_array_equal(ST2.numpy(), self.str_arr) @@ -86,8 +92,9 @@ class EagerStringTensorTestCase(unittest.TestCase): self.assertEqual(ST3.shape, list(self.str_arr.shape)) np.testing.assert_array_equal(ST3.numpy(), self.str_arr) - ST4 = core.eager.StringTensor(value=ST2, - name="ST4") # constructor 6 + ST4 = core.eager.StringTensor( + value=ST2, name="ST4" + ) # constructor 6 self.assertEqual(ST4.name, "ST4") self.assertEqual(ST4.shape, list(self.str_arr.shape)) np.testing.assert_array_equal(ST4.numpy(), self.str_arr) diff --git a/python/paddle/fluid/tests/unittests/test_eig_op.py b/python/paddle/fluid/tests/unittests/test_eig_op.py index d8af7048c2a64bbc48aac17c617dddfcac8d06c0..338eb3512e57a3c3e64944959b79ce028d3dea3e 100644 --- a/python/paddle/fluid/tests/unittests/test_eig_op.py +++ b/python/paddle/fluid/tests/unittests/test_eig_op.py @@ -22,9 +22,9 @@ import unittest # cast output to complex for numpy.linalg.eig def cast_to_complex(input, output): - if (input.dtype == np.float32): + if input.dtype == np.float32: output = output.astype(np.complex64) - elif (input.dtype == np.float64): + elif input.dtype == np.float64: output = output.astype(np.complex128) return output @@ -57,7 +57,6 @@ def eig_backward(w, v, grad_w, grad_v): class TestEigOp(OpTest): - def setUp(self): paddle.enable_static() paddle.device.set_device("cpu") @@ -72,8 +71,10 @@ class TestEigOp(OpTest): self.set_dims() self.x = np.random.random(self.shape).astype(self.dtype) self.out = np.linalg.eig(self.x) - self.out = (cast_to_complex(self.x, self.out[0]), - cast_to_complex(self.x, self.out[1])) + self.out = ( + cast_to_complex(self.x, self.out[0]), + cast_to_complex(self.x, self.out[1]), + ) # for the real input, a customized checker is needed def checker(self, outs): @@ -84,13 +85,17 @@ class TestEigOp(OpTest): length_w = len(expect_out_w) act_w_real = np.sort( - np.array([np.abs(actual_out_w[i].real) for i in range(length_w)])) + np.array([np.abs(actual_out_w[i].real) for i in range(length_w)]) + ) act_w_imag = np.sort( - np.array([np.abs(actual_out_w[i].imag) for i in range(length_w)])) + np.array([np.abs(actual_out_w[i].imag) for i in range(length_w)]) + ) exp_w_real = np.sort( - np.array([np.abs(expect_out_w[i].real) for i in range(length_w)])) + np.array([np.abs(expect_out_w[i].real) for i in range(length_w)]) + ) exp_w_imag = np.sort( - np.array([np.abs(expect_out_w[i].imag) for i in range(length_w)])) + np.array([np.abs(expect_out_w[i].imag) for i in range(length_w)]) + ) for i in range(length_w): np.testing.assert_allclose( @@ -98,25 +103,37 @@ class TestEigOp(OpTest): exp_w_real[i], rtol=1e-06, atol=1e-05, - err_msg='The eigenvalues real part have diff: \nExpected ' + - str(act_w_real[i]) + '\n' + 'But got: ' + str(exp_w_real[i])) + err_msg='The eigenvalues real part have diff: \nExpected ' + + str(act_w_real[i]) + + '\n' + + 'But got: ' + + str(exp_w_real[i]), + ) np.testing.assert_allclose( act_w_imag[i], exp_w_imag[i], rtol=1e-06, atol=1e-05, - err_msg='The eigenvalues image part have diff: \nExpected ' + - str(act_w_imag[i]) + '\n' + 'But got: ' + str(exp_w_imag[i])) + err_msg='The eigenvalues image part have diff: \nExpected ' + + str(act_w_imag[i]) + + '\n' + + 'But got: ' + + str(exp_w_imag[i]), + ) length_v = len(expect_out_v) act_v_real = np.sort( - np.array([np.abs(actual_out_v[i].real) for i in range(length_v)])) + np.array([np.abs(actual_out_v[i].real) for i in range(length_v)]) + ) act_v_imag = np.sort( - np.array([np.abs(actual_out_v[i].imag) for i in range(length_v)])) + np.array([np.abs(actual_out_v[i].imag) for i in range(length_v)]) + ) exp_v_real = np.sort( - np.array([np.abs(expect_out_v[i].real) for i in range(length_v)])) + np.array([np.abs(expect_out_v[i].real) for i in range(length_v)]) + ) exp_v_imag = np.sort( - np.array([np.abs(expect_out_v[i].imag) for i in range(length_v)])) + np.array([np.abs(expect_out_v[i].imag) for i in range(length_v)]) + ) for i in range(length_v): np.testing.assert_allclose( @@ -124,15 +141,23 @@ class TestEigOp(OpTest): exp_v_real[i], rtol=1e-06, atol=1e-05, - err_msg='The eigenvectors real part have diff: \nExpected ' + - str(act_v_real[i]) + '\n' + 'But got: ' + str(exp_v_real[i])) + err_msg='The eigenvectors real part have diff: \nExpected ' + + str(act_v_real[i]) + + '\n' + + 'But got: ' + + str(exp_v_real[i]), + ) np.testing.assert_allclose( act_v_imag[i], exp_v_imag[i], rtol=1e-06, atol=1e-05, - err_msg='The eigenvectors image part have diff: \nExpected ' + - str(act_v_imag[i]) + '\n' + 'But got: ' + str(exp_v_imag[i])) + err_msg='The eigenvectors image part have diff: \nExpected ' + + str(act_v_imag[i]) + + '\n' + + 'But got: ' + + str(exp_v_imag[i]), + ) def set_dtype(self): self.dtype = np.complex64 @@ -149,32 +174,34 @@ class TestEigOp(OpTest): gtype = np.complex128 self.grad_w = np.ones(self.out[0].shape, gtype) self.grad_v = np.ones(self.out[1].shape, gtype) - self.grad_x = eig_backward(self.out[0], self.out[1], self.grad_w, - self.grad_v) + self.grad_x = eig_backward( + self.out[0], self.out[1], self.grad_w, self.grad_v + ) def test_check_output(self): - self.check_output_with_place_customized(checker=self.checker, - place=core.CPUPlace()) + self.check_output_with_place_customized( + checker=self.checker, place=core.CPUPlace() + ) def test_check_grad(self): self.init_grad() - self.check_grad(['X'], ['Eigenvalues', 'Eigenvectors'], - user_defined_grads=[self.grad_x], - user_defined_grad_outputs=[self.grad_w, self.grad_v]) + self.check_grad( + ['X'], + ['Eigenvalues', 'Eigenvectors'], + user_defined_grads=[self.grad_x], + user_defined_grad_outputs=[self.grad_w, self.grad_v], + ) class TestComplex128(TestEigOp): - def set_dtype(self): self.dtype = np.complex128 @skip_check_grad_ci( - reason= - "For float dtype, numpy.linalg.eig forward outputs real or complex when input is real, therefore the grad computation may be not the same with paddle.linalg.eig" + reason="For float dtype, numpy.linalg.eig forward outputs real or complex when input is real, therefore the grad computation may be not the same with paddle.linalg.eig" ) class TestDouble(TestEigOp): - def set_dtype(self): self.dtype = np.float64 @@ -183,11 +210,9 @@ class TestDouble(TestEigOp): @skip_check_grad_ci( - reason= - "For float dtype, numpy.linalg.eig forward outputs real or complex when input is real, therefore the grad computation may be not the same with paddle.linalg.eig" + reason="For float dtype, numpy.linalg.eig forward outputs real or complex when input is real, therefore the grad computation may be not the same with paddle.linalg.eig" ) class TestEigBatchMarices(TestEigOp): - def set_dtype(self): self.dtype = np.float64 @@ -199,11 +224,9 @@ class TestEigBatchMarices(TestEigOp): @skip_check_grad_ci( - reason= - "For float dtype, numpy.linalg.eig forward outputs real or complex when input is real, therefore the grad computation may be not the same with paddle.linalg.eig" + reason="For float dtype, numpy.linalg.eig forward outputs real or complex when input is real, therefore the grad computation may be not the same with paddle.linalg.eig" ) class TestFloat(TestEigOp): - def set_dtype(self): self.dtype = np.float32 @@ -212,7 +235,6 @@ class TestFloat(TestEigOp): class TestEigStatic(TestEigOp): - def test_check_output_with_place(self): paddle.enable_static() place = core.CPUPlace() @@ -223,28 +245,36 @@ class TestEigStatic(TestEigOp): act_val, act_vec = paddle.linalg.eig(input) exe = fluid.Executor(place) - fetch_val, fetch_vec = exe.run(fluid.default_main_program(), - feed={"input": input_np}, - fetch_list=[act_val, act_vec]) + fetch_val, fetch_vec = exe.run( + fluid.default_main_program(), + feed={"input": input_np}, + fetch_list=[act_val, act_vec], + ) np.testing.assert_allclose( expect_val, fetch_val, rtol=1e-06, atol=1e-06, - err_msg='The eigen values have diff: \nExpected ' + - str(expect_val) + '\n' + 'But got: ' + str(fetch_val)) + err_msg='The eigen values have diff: \nExpected ' + + str(expect_val) + + '\n' + + 'But got: ' + + str(fetch_val), + ) np.testing.assert_allclose( np.abs(expect_vec), np.abs(fetch_vec), rtol=1e-06, atol=1e-06, - err_msg='The eigen vectors have diff: \nExpected ' + - str(np.abs(expect_vec)) + '\n' + 'But got: ' + - str(np.abs(fetch_vec))) + err_msg='The eigen vectors have diff: \nExpected ' + + str(np.abs(expect_vec)) + + '\n' + + 'But got: ' + + str(np.abs(fetch_vec)), + ) class TestEigDyGraph(unittest.TestCase): - def test_check_output_with_place(self): input_np = np.random.random([3, 3]).astype('complex') expect_val, expect_vec = np.linalg.eig(input_np) @@ -260,16 +290,23 @@ class TestEigDyGraph(unittest.TestCase): fetch_val.numpy(), rtol=1e-06, atol=1e-06, - err_msg='The eigen values have diff: \nExpected ' + - str(expect_val) + '\n' + 'But got: ' + str(fetch_val)) + err_msg='The eigen values have diff: \nExpected ' + + str(expect_val) + + '\n' + + 'But got: ' + + str(fetch_val), + ) np.testing.assert_allclose( np.abs(expect_vec), np.abs(fetch_vec.numpy()), rtol=1e-06, atol=1e-06, - err_msg='The eigen vectors have diff: \nExpected ' + - str(np.abs(expect_vec)) + '\n' + 'But got: ' + - str(np.abs(fetch_vec.numpy()))) + err_msg='The eigen vectors have diff: \nExpected ' + + str(np.abs(expect_vec)) + + '\n' + + 'But got: ' + + str(np.abs(fetch_vec.numpy())), + ) def test_check_grad(self): test_shape = [3, 3] @@ -289,17 +326,20 @@ class TestEigDyGraph(unittest.TestCase): w, v = paddle.linalg.eig(x) (w.sum() + v.sum()).backward() - np.testing.assert_allclose(np.abs(x.grad.numpy()), - np.abs(grad_x), - rtol=1e-05, - atol=1e-05, - err_msg='The grad x have diff: \nExpected ' + - str(np.abs(grad_x)) + '\n' + 'But got: ' + - str(np.abs(x.grad.numpy()))) + np.testing.assert_allclose( + np.abs(x.grad.numpy()), + np.abs(grad_x), + rtol=1e-05, + atol=1e-05, + err_msg='The grad x have diff: \nExpected ' + + str(np.abs(grad_x)) + + '\n' + + 'But got: ' + + str(np.abs(x.grad.numpy())), + ) class TestEigWrongDimsError(unittest.TestCase): - def test_error(self): paddle.device.set_device("cpu") paddle.disable_static() @@ -309,7 +349,6 @@ class TestEigWrongDimsError(unittest.TestCase): class TestEigNotSquareError(unittest.TestCase): - def test_error(self): paddle.device.set_device("cpu") paddle.disable_static() @@ -319,7 +358,6 @@ class TestEigNotSquareError(unittest.TestCase): class TestEigUnsupportedDtypeError(unittest.TestCase): - def test_error(self): paddle.device.set_device("cpu") paddle.disable_static() diff --git a/python/paddle/fluid/tests/unittests/test_eigh_op.py b/python/paddle/fluid/tests/unittests/test_eigh_op.py index 6118b761283a84d3177f0c5a10e78e07826e2990..928234a138f13b38cb4f8a3ed5a13d02dcf9d2c9 100644 --- a/python/paddle/fluid/tests/unittests/test_eigh_op.py +++ b/python/paddle/fluid/tests/unittests/test_eigh_op.py @@ -57,8 +57,8 @@ def valid_single_eigh_result(A, eigh_value, eigh_vector, uplo): # ||A - Q*T*Q'|| / (N*||A||) < rtol np.testing.assert_array_less( - np.linalg.norm(residual, np.inf) / (N * np.linalg.norm(A, np.inf)), - rtol) + np.linalg.norm(residual, np.inf) / (N * np.linalg.norm(A, np.inf)), rtol + ) # ||I - Q*Q'|| / M < rtol residual = np.eye(M) - eigh_vector @ np.linalg.inv(eigh_vector) @@ -66,7 +66,6 @@ def valid_single_eigh_result(A, eigh_value, eigh_vector, uplo): class TestEighOp(OpTest): - def setUp(self): paddle.enable_static() self.op_type = "eigh" @@ -94,13 +93,11 @@ class TestEighOp(OpTest): class TestEighUPLOCase(TestEighOp): - def init_config(self): self.UPLO = 'U' class TestEighGPUCase(unittest.TestCase): - def setUp(self): self.x_shape = [32, 32] self.dtype = "float32" @@ -113,19 +110,22 @@ class TestEighGPUCase(unittest.TestCase): paddle.disable_static(place=paddle.CUDAPlace(0)) input_real_data = paddle.to_tensor(self.x_np) actual_w, actual_v = paddle.linalg.eigh(input_real_data, self.UPLO) - valid_eigh_result(self.x_np, actual_w.numpy(), actual_v.numpy(), - self.UPLO) + valid_eigh_result( + self.x_np, actual_w.numpy(), actual_v.numpy(), self.UPLO + ) class TestEighAPI(unittest.TestCase): - def setUp(self): self.init_input_data() self.UPLO = 'L' self.rtol = 1e-5 # for test_eigh_grad self.atol = 1e-5 # for test_eigh_grad - self.place = paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + ) np.random.seed(123) def init_input_shape(self): @@ -136,26 +136,31 @@ class TestEighAPI(unittest.TestCase): self.dtype = "float32" self.real_data = np.random.random(self.x_shape).astype(self.dtype) complex_data = np.random.random(self.x_shape).astype( - self.dtype) + 1J * np.random.random(self.x_shape).astype(self.dtype) + self.dtype + ) + 1j * np.random.random(self.x_shape).astype(self.dtype) self.trans_dims = list(range(len(self.x_shape) - 2)) + [ - len(self.x_shape) - 1, len(self.x_shape) - 2 + len(self.x_shape) - 1, + len(self.x_shape) - 2, ] - #build a random conjugate matrix + # build a random conjugate matrix self.complex_symm = np.divide( - complex_data + np.conj(complex_data.transpose(self.trans_dims)), 2) + complex_data + np.conj(complex_data.transpose(self.trans_dims)), 2 + ) def check_static_float_result(self): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(main_prog, startup_prog): - input_x = paddle.static.data('input_x', - shape=self.x_shape, - dtype=self.dtype) + input_x = paddle.static.data( + 'input_x', shape=self.x_shape, dtype=self.dtype + ) output_w, output_v = paddle.linalg.eigh(input_x) exe = paddle.static.Executor(self.place) - actual_w, actual_v = exe.run(main_prog, - feed={"input_x": self.real_data}, - fetch_list=[output_w, output_v]) + actual_w, actual_v = exe.run( + main_prog, + feed={"input_x": self.real_data}, + fetch_list=[output_w, output_v], + ) valid_eigh_result(self.real_data, actual_w, actual_v, self.UPLO) def check_static_complex_result(self): @@ -163,14 +168,16 @@ class TestEighAPI(unittest.TestCase): startup_prog = paddle.static.Program() with paddle.static.program_guard(main_prog, startup_prog): x_dtype = np.complex64 if self.dtype == "float32" else np.complex128 - input_x = paddle.static.data('input_x', - shape=self.x_shape, - dtype=x_dtype) + input_x = paddle.static.data( + 'input_x', shape=self.x_shape, dtype=x_dtype + ) output_w, output_v = paddle.linalg.eigh(input_x) exe = paddle.static.Executor(self.place) - actual_w, actual_v = exe.run(main_prog, - feed={"input_x": self.complex_symm}, - fetch_list=[output_w, output_v]) + actual_w, actual_v = exe.run( + main_prog, + feed={"input_x": self.complex_symm}, + fetch_list=[output_w, output_v], + ) valid_eigh_result(self.complex_symm, actual_w, actual_v, self.UPLO) def test_in_static_mode(self): @@ -182,61 +189,62 @@ class TestEighAPI(unittest.TestCase): paddle.disable_static() input_real_data = paddle.to_tensor(self.real_data) actual_w, actual_v = paddle.linalg.eigh(input_real_data) - valid_eigh_result(self.real_data, actual_w.numpy(), actual_v.numpy(), - self.UPLO) + valid_eigh_result( + self.real_data, actual_w.numpy(), actual_v.numpy(), self.UPLO + ) input_complex_data = paddle.to_tensor(self.complex_symm) actual_w, actual_v = paddle.linalg.eigh(input_complex_data) - valid_eigh_result(self.complex_symm, actual_w.numpy(), actual_v.numpy(), - self.UPLO) + valid_eigh_result( + self.complex_symm, actual_w.numpy(), actual_v.numpy(), self.UPLO + ) def test_eigh_grad(self): paddle.disable_static() x = paddle.to_tensor(self.complex_symm, stop_gradient=False) w, v = paddle.linalg.eigh(x) (w.sum() + paddle.abs(v).sum()).backward() - np.testing.assert_allclose(abs(x.grad.numpy()), - abs(x.grad.numpy().conj().transpose( - self.trans_dims)), - rtol=self.rtol, - atol=self.atol) + np.testing.assert_allclose( + abs(x.grad.numpy()), + abs(x.grad.numpy().conj().transpose(self.trans_dims)), + rtol=self.rtol, + atol=self.atol, + ) class TestEighBatchAPI(TestEighAPI): - def init_input_shape(self): self.x_shape = [2, 5, 5] class TestEighAPIError(unittest.TestCase): - def test_error(self): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(main_prog, startup_prog): - #input maxtrix must greater than 2 dimensions - input_x = paddle.static.data(name='x_1', - shape=[12], - dtype='float32') + # input maxtrix must greater than 2 dimensions + input_x = paddle.static.data( + name='x_1', shape=[12], dtype='float32' + ) self.assertRaises(ValueError, paddle.linalg.eigh, input_x) - #input matrix must be square matrix - input_x = paddle.static.data(name='x_2', - shape=[12, 32], - dtype='float32') + # input matrix must be square matrix + input_x = paddle.static.data( + name='x_2', shape=[12, 32], dtype='float32' + ) self.assertRaises(ValueError, paddle.linalg.eigh, input_x) - #uplo must be in 'L' or 'U' - input_x = paddle.static.data(name='x_3', - shape=[4, 4], - dtype="float32") + # uplo must be in 'L' or 'U' + input_x = paddle.static.data( + name='x_3', shape=[4, 4], dtype="float32" + ) uplo = 'R' self.assertRaises(ValueError, paddle.linalg.eigh, input_x, uplo) - #x_data cannot be integer - input_x = paddle.static.data(name='x_4', - shape=[4, 4], - dtype="int32") + # x_data cannot be integer + input_x = paddle.static.data( + name='x_4', shape=[4, 4], dtype="int32" + ) self.assertRaises(TypeError, paddle.linalg.eigh, input_x) diff --git a/python/paddle/fluid/tests/unittests/test_eigvals_op.py b/python/paddle/fluid/tests/unittests/test_eigvals_op.py index a361ad24024598da740a1e617bc780706db1c477..df8d6b001cffbf5990a9d0cad92560037173ad2a 100644 --- a/python/paddle/fluid/tests/unittests/test_eigvals_op.py +++ b/python/paddle/fluid/tests/unittests/test_eigvals_op.py @@ -23,7 +23,7 @@ np.set_printoptions(threshold=np.inf) def np_eigvals(a): res = np.linalg.eigvals(a) - if (a.dtype == np.float32 or a.dtype == np.complex64): + if a.dtype == np.float32 or a.dtype == np.complex64: res = res.astype(np.complex64) else: res = res.astype(np.complex128) @@ -32,7 +32,6 @@ def np_eigvals(a): class TestEigvalsOp(OpTest): - def setUp(self): np.random.seed(0) paddle.enable_static() @@ -53,111 +52,121 @@ class TestEigvalsOp(OpTest): self.input_dims = (5, 5) def set_input_data(self): - if (self.dtype == np.float32 or self.dtype == np.float64): + if self.dtype == np.float32 or self.dtype == np.float64: self.input_data = np.random.random(self.input_dims).astype( - self.dtype) + self.dtype + ) else: - self.input_data = (np.random.random(self.input_dims) + - np.random.random(self.input_dims) * 1j).astype( - self.dtype) + self.input_data = ( + np.random.random(self.input_dims) + + np.random.random(self.input_dims) * 1j + ).astype(self.dtype) def test_check_output(self): self.__class__.no_need_check_grad = True - self.check_output_with_place_customized(checker=self.verify_output, - place=core.CPUPlace()) + self.check_output_with_place_customized( + checker=self.verify_output, place=core.CPUPlace() + ) def verify_output(self, outs): actual_outs = np.sort(np.array(outs[0])) expect_outs = np.sort(np.array(self.outputs['Out'])) self.assertTrue( - actual_outs.shape == expect_outs.shape, "Output shape has diff.\n" - "Expect shape " + str(expect_outs.shape) + "\n" + "But Got" + - str(actual_outs.shape) + " in class " + self.__class__.__name__) + actual_outs.shape == expect_outs.shape, + "Output shape has diff.\n" + "Expect shape " + + str(expect_outs.shape) + + "\n" + + "But Got" + + str(actual_outs.shape) + + " in class " + + self.__class__.__name__, + ) n_dim = actual_outs.shape[-1] - for actual_row, expect_row in zip(actual_outs.reshape((-1, n_dim)), - expect_outs.reshape((-1, n_dim))): - is_mapped_index = np.zeros((n_dim, )) + for actual_row, expect_row in zip( + actual_outs.reshape((-1, n_dim)), expect_outs.reshape((-1, n_dim)) + ): + is_mapped_index = np.zeros((n_dim,)) for i in range(n_dim): is_mapped = False for j in range(n_dim): if is_mapped_index[j] == 0 and np.isclose( - np.array(actual_row[i]), - np.array(expect_row[j]), - atol=1e-5): + np.array(actual_row[i]), + np.array(expect_row[j]), + atol=1e-5, + ): is_mapped_index[j] = True is_mapped = True break self.assertTrue( is_mapped, - "Output has diff in class " + self.__class__.__name__ + - "\nExpect " + str(expect_outs) + "\n" + "But Got" + - str(actual_outs) + "\nThe data " + str(actual_row[i]) + - " in " + str(actual_row) + " mismatch.") + "Output has diff in class " + + self.__class__.__name__ + + "\nExpect " + + str(expect_outs) + + "\n" + + "But Got" + + str(actual_outs) + + "\nThe data " + + str(actual_row[i]) + + " in " + + str(actual_row) + + " mismatch.", + ) class TestEigvalsOpFloat64(TestEigvalsOp): - def set_dtype(self): self.dtype = np.float64 class TestEigvalsOpComplex64(TestEigvalsOp): - def set_dtype(self): self.dtype = np.complex64 class TestEigvalsOpComplex128(TestEigvalsOp): - def set_dtype(self): self.dtype = np.complex128 class TestEigvalsOpLargeScare(TestEigvalsOp): - def set_input_dims(self): self.input_dims = (128, 128) class TestEigvalsOpLargeScareFloat64(TestEigvalsOpLargeScare): - def set_dtype(self): self.dtype = np.float64 class TestEigvalsOpLargeScareComplex64(TestEigvalsOpLargeScare): - def set_dtype(self): self.dtype = np.complex64 class TestEigvalsOpLargeScareComplex128(TestEigvalsOpLargeScare): - def set_dtype(self): self.dtype = np.complex128 class TestEigvalsOpBatch1(TestEigvalsOp): - def set_input_dims(self): self.input_dims = (1, 2, 3, 4, 4) class TestEigvalsOpBatch2(TestEigvalsOp): - def set_input_dims(self): self.input_dims = (3, 1, 4, 5, 5) class TestEigvalsOpBatch3(TestEigvalsOp): - def set_input_dims(self): self.input_dims = (6, 2, 9, 6, 6) class TestEigvalsAPI(unittest.TestCase): - def setUp(self): np.random.seed(0) @@ -183,42 +192,62 @@ class TestEigvalsAPI(unittest.TestCase): self.dtype = np.float32 def set_input_data(self): - if (self.dtype == np.float32 or self.dtype == np.float64): + if self.dtype == np.float32 or self.dtype == np.float64: self.input_data = np.random.random(self.input_dims).astype( - self.dtype) + self.dtype + ) else: - self.input_data = (np.random.random(self.input_dims) + - np.random.random(self.input_dims) * 1j).astype( - self.dtype) + self.input_data = ( + np.random.random(self.input_dims) + + np.random.random(self.input_dims) * 1j + ).astype(self.dtype) def verify_output(self, actural_outs, expect_outs): actual_outs = np.array(actural_outs) expect_outs = np.array(expect_outs) self.assertTrue( - actual_outs.shape == expect_outs.shape, "Output shape has diff." - "\nExpect shape " + str(expect_outs.shape) + "\n" + "But Got" + - str(actual_outs.shape) + " in class " + self.__class__.__name__) + actual_outs.shape == expect_outs.shape, + "Output shape has diff." + "\nExpect shape " + + str(expect_outs.shape) + + "\n" + + "But Got" + + str(actual_outs.shape) + + " in class " + + self.__class__.__name__, + ) n_dim = actual_outs.shape[-1] - for actual_row, expect_row in zip(actual_outs.reshape((-1, n_dim)), - expect_outs.reshape((-1, n_dim))): - is_mapped_index = np.zeros((n_dim, )) + for actual_row, expect_row in zip( + actual_outs.reshape((-1, n_dim)), expect_outs.reshape((-1, n_dim)) + ): + is_mapped_index = np.zeros((n_dim,)) for i in range(n_dim): is_mapped = False for j in range(n_dim): if is_mapped_index[j] == 0 and np.isclose( - np.array(actual_row[i]), - np.array(expect_row[j]), - atol=1e-5): + np.array(actual_row[i]), + np.array(expect_row[j]), + atol=1e-5, + ): is_mapped_index[j] = True is_mapped = True break self.assertTrue( is_mapped, - "Output has diff in class " + self.__class__.__name__ + - "\nExpect " + str(expect_outs) + "\n" + "But Got" + - str(actual_outs) + "\nThe data " + str(actual_row[i]) + - " in " + str(actual_row) + " mismatch.") + "Output has diff in class " + + self.__class__.__name__ + + "\nExpect " + + str(expect_outs) + + "\n" + + "But Got" + + str(actual_outs) + + "\nThe data " + + str(actual_row[i]) + + " in " + + str(actual_row) + + " mismatch.", + ) def run_dygraph(self, place): paddle.disable_static() @@ -241,24 +270,28 @@ class TestEigvalsAPI(unittest.TestCase): def run_static(self, place): paddle.enable_static() - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): - small_input_tensor = paddle.static.data(name='small_x', - shape=self.small_dims, - dtype=self.dtype) - large_input_tensor = paddle.static.data(name='large_x', - shape=self.large_dims, - dtype=self.dtype) - batch_input_tensor = paddle.static.data(name='batch_x', - shape=self.batch_dims, - dtype=self.dtype) - - small_outs = paddle.linalg.eigvals(small_input_tensor, - name='small_x') - large_outs = paddle.linalg.eigvals(large_input_tensor, - name='large_x') - batch_outs = paddle.linalg.eigvals(batch_input_tensor, - name='batch_x') + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + small_input_tensor = paddle.static.data( + name='small_x', shape=self.small_dims, dtype=self.dtype + ) + large_input_tensor = paddle.static.data( + name='large_x', shape=self.large_dims, dtype=self.dtype + ) + batch_input_tensor = paddle.static.data( + name='batch_x', shape=self.batch_dims, dtype=self.dtype + ) + + small_outs = paddle.linalg.eigvals( + small_input_tensor, name='small_x' + ) + large_outs = paddle.linalg.eigvals( + large_input_tensor, name='large_x' + ) + batch_outs = paddle.linalg.eigvals( + batch_input_tensor, name='batch_x' + ) exe = paddle.static.Executor(place) @@ -266,9 +299,10 @@ class TestEigvalsAPI(unittest.TestCase): feed={ "small_x": self.small_input, "large_x": self.large_input, - "batch_x": self.batch_input + "batch_x": self.batch_input, }, - fetch_list=[small_outs, large_outs, batch_outs]) + fetch_list=[small_outs, large_outs, batch_outs], + ) np_outs = np_eigvals(self.small_input) self.verify_output(paddle_outs[0], np_outs) @@ -281,7 +315,7 @@ class TestEigvalsAPI(unittest.TestCase): def test_cases(self): places = [core.CPUPlace()] - #if core.is_compiled_with_cuda(): + # if core.is_compiled_with_cuda(): # places.append(core.CUDAPlace(0)) for place in places: self.run_dygraph(place) @@ -301,19 +335,16 @@ class TestEigvalsAPI(unittest.TestCase): class TestEigvalsAPIFloat64(TestEigvalsAPI): - def set_dtype(self): self.dtype = np.float64 class TestEigvalsAPIComplex64(TestEigvalsAPI): - def set_dtype(self): self.dtype = np.complex64 class TestEigvalsAPIComplex128(TestEigvalsAPI): - def set_dtype(self): self.dtype = np.complex128 diff --git a/python/paddle/fluid/tests/unittests/test_eigvalsh_op.py b/python/paddle/fluid/tests/unittests/test_eigvalsh_op.py index 08d3da2103cfefa359fa69ed20d05b39010f28ff..2dc378a0810dc5e24588b044d2bc3ee760849e61 100644 --- a/python/paddle/fluid/tests/unittests/test_eigvalsh_op.py +++ b/python/paddle/fluid/tests/unittests/test_eigvalsh_op.py @@ -34,7 +34,11 @@ def valid_eigenvalues(actual, expected): FP32_MAX_RELATIVE_ERR = 5e-5 FP64_MAX_RELATIVE_ERR = 1e-14 - rtol = FP32_MAX_RELATIVE_ERR if actual.dtype == np.single else FP64_MAX_RELATIVE_ERR + rtol = ( + FP32_MAX_RELATIVE_ERR + if actual.dtype == np.single + else FP64_MAX_RELATIVE_ERR + ) diff = np.abs(expected - actual) max_diff = np.max(diff) @@ -44,7 +48,6 @@ def valid_eigenvalues(actual, expected): class TestEigvalshOp(OpTest): - def setUp(self): paddle.enable_static() self.op_type = "eigvalsh" @@ -75,13 +78,11 @@ class TestEigvalshOp(OpTest): class TestEigvalshUPLOCase(TestEigvalshOp): - def init_config(self): self.UPLO = 'U' class TestEigvalshGPUCase(unittest.TestCase): - def setUp(self): self.x_shape = [32, 32] self.dtype = "float32" @@ -98,14 +99,16 @@ class TestEigvalshGPUCase(unittest.TestCase): class TestEigvalshAPI(unittest.TestCase): - def setUp(self): self.dtype = "float32" self.UPLO = 'L' self.rtol = 1e-5 # test_eigvalsh_grad self.atol = 1e-5 # test_eigvalsh_grad - self.place = paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + ) np.random.seed(123) self.init_input_shape() self.init_input_data() @@ -116,25 +119,30 @@ class TestEigvalshAPI(unittest.TestCase): def init_input_data(self): self.real_data = np.random.random(self.x_shape).astype(self.dtype) complex_data = np.random.random(self.x_shape).astype( - self.dtype) + 1J * np.random.random(self.x_shape).astype(self.dtype) + self.dtype + ) + 1j * np.random.random(self.x_shape).astype(self.dtype) self.trans_dims = list(range(len(self.x_shape) - 2)) + [ - len(self.x_shape) - 1, len(self.x_shape) - 2 + len(self.x_shape) - 1, + len(self.x_shape) - 2, ] self.complex_symm = np.divide( - complex_data + np.conj(complex_data.transpose(self.trans_dims)), 2) + complex_data + np.conj(complex_data.transpose(self.trans_dims)), 2 + ) def check_static_float_result(self): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(main_prog, startup_prog): - input_x = paddle.static.data('input_x', - shape=self.x_shape, - dtype=self.dtype) + input_x = paddle.static.data( + 'input_x', shape=self.x_shape, dtype=self.dtype + ) output_w = paddle.linalg.eigvalsh(input_x) exe = paddle.static.Executor(self.place) - actual_w = exe.run(main_prog, - feed={"input_x": self.real_data}, - fetch_list=[output_w]) + actual_w = exe.run( + main_prog, + feed={"input_x": self.real_data}, + fetch_list=[output_w], + ) expected_w = np.linalg.eigvalsh(self.real_data) compare_result(actual_w[0], expected_w) @@ -144,14 +152,16 @@ class TestEigvalshAPI(unittest.TestCase): startup_prog = paddle.static.Program() with paddle.static.program_guard(main_prog, startup_prog): x_dtype = np.complex64 if self.dtype == "float32" else np.complex128 - input_x = paddle.static.data('input_x', - shape=self.x_shape, - dtype=x_dtype) + input_x = paddle.static.data( + 'input_x', shape=self.x_shape, dtype=x_dtype + ) output_w = paddle.linalg.eigvalsh(input_x) exe = paddle.static.Executor(self.place) - actual_w = exe.run(main_prog, - feed={"input_x": self.complex_symm}, - fetch_list=[output_w]) + actual_w = exe.run( + main_prog, + feed={"input_x": self.complex_symm}, + fetch_list=[output_w], + ) expected_w = np.linalg.eigvalsh(self.complex_symm) compare_result(actual_w[0], expected_w) @@ -177,48 +187,47 @@ class TestEigvalshAPI(unittest.TestCase): x = paddle.to_tensor(self.complex_symm, stop_gradient=False) w = paddle.linalg.eigvalsh(x) (w.sum()).backward() - np.testing.assert_allclose(abs(x.grad.numpy()), - abs(x.grad.numpy().conj().transpose( - self.trans_dims)), - rtol=self.rtol, - atol=self.atol) + np.testing.assert_allclose( + abs(x.grad.numpy()), + abs(x.grad.numpy().conj().transpose(self.trans_dims)), + rtol=self.rtol, + atol=self.atol, + ) class TestEigvalshBatchAPI(TestEigvalshAPI): - def init_input_shape(self): self.x_shape = [2, 5, 5] class TestEigvalshAPIError(unittest.TestCase): - def test_error(self): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(main_prog, startup_prog): - #input maxtrix must greater than 2 dimensions - input_x = paddle.static.data(name='x_1', - shape=[12], - dtype='float32') + # input maxtrix must greater than 2 dimensions + input_x = paddle.static.data( + name='x_1', shape=[12], dtype='float32' + ) self.assertRaises(ValueError, paddle.linalg.eigvalsh, input_x) - #input matrix must be square matrix - input_x = paddle.static.data(name='x_2', - shape=[12, 32], - dtype='float32') + # input matrix must be square matrix + input_x = paddle.static.data( + name='x_2', shape=[12, 32], dtype='float32' + ) self.assertRaises(ValueError, paddle.linalg.eigvalsh, input_x) - #uplo must be in 'L' or 'U' - input_x = paddle.static.data(name='x_3', - shape=[4, 4], - dtype="float32") + # uplo must be in 'L' or 'U' + input_x = paddle.static.data( + name='x_3', shape=[4, 4], dtype="float32" + ) uplo = 'R' self.assertRaises(ValueError, paddle.linalg.eigvalsh, input_x, uplo) - #x_data cannot be integer - input_x = paddle.static.data(name='x_4', - shape=[4, 4], - dtype="int32") + # x_data cannot be integer + input_x = paddle.static.data( + name='x_4', shape=[4, 4], dtype="int32" + ) self.assertRaises(TypeError, paddle.linalg.eigvalsh, input_x) diff --git a/python/paddle/fluid/tests/unittests/test_einsum.py b/python/paddle/fluid/tests/unittests/test_einsum.py index 40583c01f34c5600502445403312cf7dd8f5d3c2..2e8a0a6c7cb5e54046e1f447c9b49b731f5da286 100644 --- a/python/paddle/fluid/tests/unittests/test_einsum.py +++ b/python/paddle/fluid/tests/unittests/test_einsum.py @@ -23,85 +23,109 @@ os.environ['FLAGS_new_einsum'] = "0" class TestErrors(unittest.TestCase): - def setUp(self): pass def test_diagonalize_errors(self): a = np.arange(4 * 3 * 4 * 4).reshape(4, 3, 4, 4).astype('float') a = paddle.to_tensor(a) - with self.assertRaisesRegex(AssertionError, - ('Duplicate labels are not supported.')): + with self.assertRaisesRegex( + AssertionError, ('Duplicate labels are not supported.') + ): paddle.einsum('...ii->...i', a) - with self.assertRaisesRegex(AssertionError, - ('Duplicate labels are not supported.')): + with self.assertRaisesRegex( + AssertionError, ('Duplicate labels are not supported.') + ): paddle.einsum('i...i', a) - with self.assertRaisesRegex(AssertionError, - ('Duplicate labels are not supported.')): + with self.assertRaisesRegex( + AssertionError, ('Duplicate labels are not supported.') + ): paddle.einsum('i...i->i...', a) def test_param_errors(self): a = np.arange(4 * 3 * 4 * 4).reshape(4, 3, 4, 4).astype('float') a = paddle.to_tensor(a) - with self.assertRaisesRegex(AssertionError, - ('At least one operand is expected.')): + with self.assertRaisesRegex( + AssertionError, ('At least one operand is expected.') + ): paddle.einsum('ijk') with self.assertRaisesRegex( - AssertionError, - ('Invalid equation: multiple `->` were found.')): + AssertionError, ('Invalid equation: multiple `->` were found.') + ): paddle.einsum('i -> j -> k', a) with self.assertRaisesRegex( - AssertionError, - ("Invalid equation: the number of operands is 2, " - "but found 3 segments in the label equation.")): + AssertionError, + ( + "Invalid equation: the number of operands is 2, " + "but found 3 segments in the label equation." + ), + ): paddle.einsum('i,j,k', a, a) with self.assertRaisesRegex( - AssertionError, - ("Invalid equation: the number of operands is 2, " - "but found 1 segments in the label equation.")): + AssertionError, + ( + "Invalid equation: the number of operands is 2, " + "but found 1 segments in the label equation." + ), + ): paddle.einsum('ij -> k', a, a) with self.assertRaisesRegex( - AssertionError, - ("Invalid equation: the number of operands is 1, " - "but found 2 segments in the label equation.")): + AssertionError, + ( + "Invalid equation: the number of operands is 1, " + "but found 2 segments in the label equation." + ), + ): paddle.einsum('i, -> k', a) with self.assertRaisesRegex( - AssertionError, - ("Invalid equation: the label string '' misses dimensions.")): + AssertionError, + ("Invalid equation: the label string '' misses dimensions."), + ): paddle.einsum('->', a) with self.assertRaisesRegex( - AssertionError, - ("Invalid equation: the label string 'i' misses dimensions.")): + AssertionError, + ("Invalid equation: the label string 'i' misses dimensions."), + ): paddle.einsum('i', a) with self.assertRaisesRegex( - AssertionError, ("Invalid equation: _ is not a valid label, " - "which should be letters.")): + AssertionError, + ( + "Invalid equation: _ is not a valid label, " + "which should be letters." + ), + ): paddle.einsum('i_', a) with self.assertRaisesRegex( - AssertionError, - ("Invalid equation: `.` is found outside of an ellipsis.")): + AssertionError, + ("Invalid equation: `.` is found outside of an ellipsis."), + ): paddle.einsum('i..j', a) with self.assertRaisesRegex( - AssertionError, - ("Invalid equation: `.` is found outside of an ellipsis.")): + AssertionError, + ("Invalid equation: `.` is found outside of an ellipsis."), + ): paddle.einsum('...k...', a) with self.assertRaisesRegex( - AssertionError, - ("Invalid equation: missing ellipsis in output labels.")): + AssertionError, + ("Invalid equation: missing ellipsis in output labels."), + ): paddle.einsum('i...->i', a) with self.assertRaisesRegex( - AssertionError, - ("Invalid equation: duplicate output labels are found.")): + AssertionError, + ("Invalid equation: duplicate output labels are found."), + ): paddle.einsum('i...->i...i', a) with self.assertRaisesRegex( - AssertionError, - ("Invalid operands: label i " - "corresponds to non-broadcastable dimensions.")): + AssertionError, + ( + "Invalid operands: label i " + "corresponds to non-broadcastable dimensions." + ), + ): paddle.einsum('ij...,ji...', a, a) class TestEinsum(unittest.TestCase): - @classmethod def setUpClass(cls): np.random.seed(12345) @@ -132,15 +156,17 @@ class TestEinsum(unittest.TestCase): return core.CUDAPlace(0) return core.CPUPlace() - def check_output_equal(self, actual, expect, rtol=1.e-5, atol=1.e-8): + def check_output_equal(self, actual, expect, rtol=1.0e-5, atol=1.0e-8): error_msg = 'Output has diff at place:{}. \nExpect: {} \nBut Got: {} in class {}' - np.testing.assert_allclose(actual, - expect, - rtol=rtol, - atol=atol, - err_msg=error_msg.format( - paddle.get_device(), expect, actual, - self.__class__.__name__)) + np.testing.assert_allclose( + actual, + expect, + rtol=rtol, + atol=atol, + err_msg=error_msg.format( + paddle.get_device(), expect, actual, self.__class__.__name__ + ), + ) def setUp(self): self.sample = {"paradigm": "i->", "data": ["x"]} @@ -153,7 +179,8 @@ class TestEinsum(unittest.TestCase): equation = self.sample["paradigm"] with paddle.fluid.dygraph.guard( - self._get_place(force_to_use_cpu=False)): + self._get_place(force_to_use_cpu=False) + ): pd_operands = [paddle.to_tensor(operand) for operand in operands] result = paddle.einsum(equation, *pd_operands) self.check_output_equal(result.numpy(), expected_result) @@ -165,163 +192,136 @@ class TestEinsum(unittest.TestCase): class TestEinsumVectorDot(TestEinsum): - def setUp(self): self.sample = {"paradigm": "i,i->", "data": ["x", "x"]} class TestEinsumVectorMul(TestEinsum): - def setUp(self): self.sample = {"paradigm": "i,i->i", "data": ["x", "x"]} class TestEinsumVectorOuter(TestEinsum): - def setUp(self): self.sample = {"paradigm": "i,j->ij", "data": ["x", "y"]} class TestEinsumMatrixTranspose(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ij->ji", "data": ["A"]} class TestEinsumMatrixRowSum(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ij->j", "data": ["A"]} class TestEinsumMatrixColSum(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ij->i", "data": ["A"]} class TestEinsumMatrixEleMul(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ij,ij->ij", "data": ["A", "A"]} class TestEinsumDegenerateMatrixVecMul(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ij,j", "data": ["a", "b"]} class TestEinsumMatrixVecMul(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ij,j->i", "data": ["A", "x"]} class TestEinsumMatrixMul(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ij,kj->ik", "data": ["A", "B"]} class TestEinsumMatrixOuter(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ij,kl->ijkl", "data": ["A", "C"]} class TestEinsumTensorBMM(TestEinsum): - def setUp(self): self.sample = {"paradigm": "bij,bjk->bik", "data": ["D", "E"]} class TestEinsumTensorContract1(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ijk,jk->i", "data": ["D", "A"]} class TestEinsumTensorContract2(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ijk,lk->ijl", "data": ["D", "B"]} class TestEinsumTensorContract3(TestEinsum): - def setUp(self): self.sample = {"paradigm": "abcd,dfg->abcfg", "data": ["F", "D"]} class TestEinsumTensorContract4(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ijk,jk->ik", "data": ["D", "A"]} class TestEinsumTensorContract5(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ijk,jk->ij", "data": ["D", "A"]} class TestEinsumTensorContract6(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ik, ijk->j", "data": ["A", "G"]} class TestEinsumTensorContract7(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ijk, ik->jk", "data": ["G", "A"]} class TestEinsumEllipsis1(TestEinsum): - def setUp(self): self.sample = {"paradigm": "i...->...", "data": ["G"]} class TestEinsumEllipsis2(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ij,...i->j...", "data": ["A", "H"]} class TestEinsumEllipsis3(TestEinsum): - def setUp(self): self.sample = {"paradigm": "k...,jk", "data": ["F", "I"]} class TestEinsumTestEinsumBilinear(TestEinsum): - def setUp(self): self.sample = {"paradigm": "bn,anm,bm->ba", "data": ["B", "E", "I"]} class TestEinsumTestEinsumOthers1(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ijkl, lmn->kmn", "data": ["F", "H"]} class TestEinsumTestEinsumOthers2(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ijkl, lmn->ijn", "data": ["F", "H"]} class TestEinsumBatch1(TestEinsum): - def setUp(self): self.sample = {"paradigm": "blq,bhlk->bhlqk", "data": ["J", "K"]} class TestNumpyTests(unittest.TestCase): - def setUp(self): pass @@ -333,20 +333,23 @@ class TestNumpyTests(unittest.TestCase): return core.CUDAPlace(0) return core.CPUPlace() - def check_output_equal(self, actual, expect, rtol=1.e-5, atol=1.e-8): + def check_output_equal(self, actual, expect, rtol=1.0e-5, atol=1.0e-8): error_msg = 'Output has diff at place:{}. \nExpect: {} \nBut Got: {} in class {}' - np.testing.assert_allclose(actual, - expect, - rtol=rtol, - atol=atol, - err_msg=error_msg.format( - paddle.get_device(), expect, actual, - self.__class__.__name__)) + np.testing.assert_allclose( + actual, + expect, + rtol=rtol, + atol=atol, + err_msg=error_msg.format( + paddle.get_device(), expect, actual, self.__class__.__name__ + ), + ) def check_output(self, eqn, *ops): expect = np.einsum(eqn, *ops) with paddle.fluid.dygraph.guard( - self._get_place(force_to_use_cpu=False)): + self._get_place(force_to_use_cpu=False) + ): pd_operands = [paddle.to_tensor(op) for op in ops] actual = paddle.einsum(eqn, *pd_operands) self.check_output_equal(actual.numpy(), expect) @@ -421,8 +424,8 @@ class TestNumpyTests(unittest.TestCase): q = np.ones((1, 2)).astype('float') self.check_output('ij,ij->j', p, q) - x = np.array([2., 3.]).astype('float') - y = np.array([4.]).astype('float') + x = np.array([2.0, 3.0]).astype('float') + y = np.array([4.0]).astype('float') self.check_output("i, i", x, y) p = np.ones((1, 5)) / 2 @@ -453,21 +456,21 @@ class TestNumpyTests(unittest.TestCase): main = fluid.Program() startup = fluid.Program() with fluid.program_guard(main, startup): - a = paddle.static.data(name='a', - shape=[3, None, None, None], - dtype='float') - b = paddle.static.data(name='b', - shape=[2, None, None, None], - dtype='float') - c = paddle.static.data(name='c', - shape=[None, None, 2, None], - dtype='float') - d = paddle.static.data(name='d', - shape=[None, None, 5], - dtype='float') - e = paddle.static.data(name='e', - shape=[None, 2, None], - dtype='float') + a = paddle.static.data( + name='a', shape=[3, None, None, None], dtype='float' + ) + b = paddle.static.data( + name='b', shape=[2, None, None, None], dtype='float' + ) + c = paddle.static.data( + name='c', shape=[None, None, 2, None], dtype='float' + ) + d = paddle.static.data( + name='d', shape=[None, None, 5], dtype='float' + ) + e = paddle.static.data( + name='e', shape=[None, 2, None], dtype='float' + ) outs = [] outs.append(paddle.einsum("ibnd,jbnd->bnij", a, b)) diff --git a/python/paddle/fluid/tests/unittests/test_einsum_op.py b/python/paddle/fluid/tests/unittests/test_einsum_op.py index f3f7b9093664cf1348b0252fd79d7eb96fbe5cf1..9db367a23357f8dc8266741c8a0b0a039bcd58bc 100644 --- a/python/paddle/fluid/tests/unittests/test_einsum_op.py +++ b/python/paddle/fluid/tests/unittests/test_einsum_op.py @@ -19,7 +19,6 @@ from op_test import OpTest class TestEinsumBinary(OpTest): - def setUp(self): paddle.enable_static() self.op_type = "einsum" @@ -34,12 +33,15 @@ class TestEinsumBinary(OpTest): self.inputs = {"Operands": self.operands} self.attrs = {"equation": self.equation} self.outputs = { - 'Out': - out, - "InnerCache": [('cache_' + str(i), np.array([1.0])) - for i in range(len(self.operands))], - "XShape": [('xshape_' + str(i), np.array([1.0])) - for i in range(len(self.operands))], + 'Out': out, + "InnerCache": [ + ('cache_' + str(i), np.array([1.0])) + for i in range(len(self.operands)) + ], + "XShape": [ + ('xshape_' + str(i), np.array([1.0])) + for i in range(len(self.operands)) + ], } def init_input(self): @@ -62,7 +64,6 @@ class TestEinsumBinary(OpTest): class TestEinsum1(TestEinsumBinary): - def set_mandatory(self): self.shapes = [(20, 3, 3), (20, 3, 3)] self.types = [np.float64, np.float64] @@ -70,7 +71,6 @@ class TestEinsum1(TestEinsumBinary): class TestEinsum2(TestEinsumBinary): - def set_mandatory(self): self.shapes = [(20, 3, 3), (20, 3, 3)] self.types = [np.float64, np.float64] @@ -78,7 +78,6 @@ class TestEinsum2(TestEinsumBinary): class TestEinsum3(TestEinsumBinary): - def set_mandatory(self): self.shapes = [(10, 10), (10, 10)] self.types = [np.float64, np.float64] @@ -86,7 +85,6 @@ class TestEinsum3(TestEinsumBinary): class TestEinsumWithReduction(TestEinsumBinary): - def set_mandatory(self): self.shapes = [(10, 3, 5), (5, 30)] self.types = [np.float64, np.float64] @@ -94,7 +92,6 @@ class TestEinsumWithReduction(TestEinsumBinary): class TestEinsumWithReduction1(TestEinsumBinary): - def set_mandatory(self): self.shapes = [(10, 3, 3, 5), (10, 5, 10, 10)] self.types = [np.float64, np.float64] @@ -102,7 +99,6 @@ class TestEinsumWithReduction1(TestEinsumBinary): class TestEinsumWithUnary(TestEinsumBinary): - def set_mandatory(self): self.shapes = [(10, 10, 3, 5)] self.types = [np.float64] @@ -110,7 +106,6 @@ class TestEinsumWithUnary(TestEinsumBinary): class TestEinsumWithUnary1(TestEinsumBinary): - def set_mandatory(self): self.shapes = [(5, 10, 3, 3), (3, 6, 3, 10)] self.types = [np.float64, np.float64] @@ -118,7 +113,6 @@ class TestEinsumWithUnary1(TestEinsumBinary): class TestEinsumWithBroadcast1(TestEinsumBinary): - def set_mandatory(self): self.shapes = [(5, 10, 3, 3)] self.types = [np.float64] @@ -126,7 +120,6 @@ class TestEinsumWithBroadcast1(TestEinsumBinary): class TestEinsumWithBroadcast2(TestEinsumBinary): - def set_mandatory(self): self.shapes = [(10, 11), (3, 4, 5, 10)] self.types = [np.float64, np.float64] @@ -134,7 +127,6 @@ class TestEinsumWithBroadcast2(TestEinsumBinary): class TestEinsumWithBroadcast3(TestEinsumBinary): - def set_mandatory(self): self.shapes = [(10, 3, 2, 3, 4), (12, 10)] self.types = [np.float64, np.float64] @@ -142,7 +134,6 @@ class TestEinsumWithBroadcast3(TestEinsumBinary): class TestEinsumWithBroadcast4(TestEinsumBinary): - def set_mandatory(self): self.shapes = [(10, 3, 2, 3, 4), (12, 10)] self.types = [np.float64, np.float64] @@ -150,7 +141,6 @@ class TestEinsumWithBroadcast4(TestEinsumBinary): class TestEinsumWithBroadcast5(TestEinsumBinary): - def set_mandatory(self): self.shapes = [(3, 2, 2, 10), (10, 3, 2, 2)] self.types = [np.float64, np.float64] @@ -158,7 +148,6 @@ class TestEinsumWithBroadcast5(TestEinsumBinary): class TestEinsumWithBroadcast6(TestEinsumBinary): - def set_mandatory(self): self.shapes = [(100), (100)] self.types = [np.float64, np.float64] diff --git a/python/paddle/fluid/tests/unittests/test_einsum_v2.py b/python/paddle/fluid/tests/unittests/test_einsum_v2.py index 0cfe6bdece91eea13c43d16237ae8a7950ca3dbb..e7b041124c25771419b2e26a494b116724b36013 100644 --- a/python/paddle/fluid/tests/unittests/test_einsum_v2.py +++ b/python/paddle/fluid/tests/unittests/test_einsum_v2.py @@ -31,91 +31,117 @@ def error_trans(func, *args, **kargs): out = func(*args, **kargs) except ValueError as e: if "Same label have different shapes" in str(e): - raise AssertionError("Invalid operands: label i " - "corresponds to non-broadcastable dimensions.") + raise AssertionError( + "Invalid operands: label i " + "corresponds to non-broadcastable dimensions." + ) class TestErrors(unittest.TestCase): - def setUp(self): pass def test_diagonalize_errors(self): a = np.arange(4 * 3 * 4 * 4).reshape(4, 3, 4, 4).astype('float') a = paddle.to_tensor(a) - with self.assertRaisesRegex(AssertionError, - ('Duplicate labels are not supported.')): + with self.assertRaisesRegex( + AssertionError, ('Duplicate labels are not supported.') + ): paddle.einsum('...ii->...i', a) - with self.assertRaisesRegex(AssertionError, - ('Duplicate labels are not supported.')): + with self.assertRaisesRegex( + AssertionError, ('Duplicate labels are not supported.') + ): paddle.einsum('i...i', a) - with self.assertRaisesRegex(AssertionError, - ('Duplicate labels are not supported.')): + with self.assertRaisesRegex( + AssertionError, ('Duplicate labels are not supported.') + ): paddle.einsum('i...i->i...', a) def test_param_errors(self): a = np.arange(4 * 3 * 4 * 4).reshape(4, 3, 4, 4).astype('float') a = paddle.to_tensor(a) with self.assertRaisesRegex( - AssertionError, - ("Required at least one operand in Einsum API, but received 0 ")): + AssertionError, + ("Required at least one operand in Einsum API, but received 0 "), + ): paddle.einsum('ijk') with self.assertRaisesRegex( - AssertionError, - ('Invalid equation: multiple `->` were found.')): + AssertionError, ('Invalid equation: multiple `->` were found.') + ): paddle.einsum('i -> j -> k', a) with self.assertRaisesRegex( - AssertionError, - ("Invalid equation: the number of operands is 2, " - "but found 3 segments in the label equation.")): + AssertionError, + ( + "Invalid equation: the number of operands is 2, " + "but found 3 segments in the label equation." + ), + ): paddle.einsum('i,j,k', a, a) with self.assertRaisesRegex( - AssertionError, - ("Invalid equation: the number of operands is 2, " - "but found 1 segments in the label equation.")): + AssertionError, + ( + "Invalid equation: the number of operands is 2, " + "but found 1 segments in the label equation." + ), + ): paddle.einsum('ij -> k', a, a) with self.assertRaisesRegex( - AssertionError, - ("Invalid equation: the number of operands is 1, " - "but found 2 segments in the label equation.")): + AssertionError, + ( + "Invalid equation: the number of operands is 1, " + "but found 2 segments in the label equation." + ), + ): paddle.einsum('i, -> k', a) with self.assertRaisesRegex( - AssertionError, - ("Invalid equation: the label string '' misses dimensions.")): + AssertionError, + ("Invalid equation: the label string '' misses dimensions."), + ): paddle.einsum('->', a) with self.assertRaisesRegex( - AssertionError, - ("Invalid equation: the label string 'i' misses dimensions.")): + AssertionError, + ("Invalid equation: the label string 'i' misses dimensions."), + ): paddle.einsum('i', a) with self.assertRaisesRegex( - AssertionError, ("Invalid equation: _ is not a valid label, " - "which should be letters.")): + AssertionError, + ( + "Invalid equation: _ is not a valid label, " + "which should be letters." + ), + ): paddle.einsum('i_', a) with self.assertRaisesRegex( - AssertionError, - ("Invalid equation: `.` is found outside of an ellipsis.")): + AssertionError, + ("Invalid equation: `.` is found outside of an ellipsis."), + ): paddle.einsum('i..j', a) with self.assertRaisesRegex( - AssertionError, - ("Invalid equation: `.` is found outside of an ellipsis.")): + AssertionError, + ("Invalid equation: `.` is found outside of an ellipsis."), + ): paddle.einsum('...k...', a) with self.assertRaisesRegex( - AssertionError, - ("Invalid equation: missing ellipsis in output labels.")): + AssertionError, + ("Invalid equation: missing ellipsis in output labels."), + ): paddle.einsum('i...->i', a) with self.assertRaisesRegex( - AssertionError, - ("Invalid equation: duplicate output labels are found.")): + AssertionError, + ("Invalid equation: duplicate output labels are found."), + ): paddle.einsum('i...->i...i', a) with self.assertRaisesRegex( - AssertionError, - ("Invalid operands: label i " - "corresponds to non-broadcastable dimensions.")): + AssertionError, + ( + "Invalid operands: label i " + "corresponds to non-broadcastable dimensions." + ), + ): error_trans(paddle.einsum, 'ij...,ji...', a, a) class TestEinsum(unittest.TestCase): - @classmethod def setUpClass(cls): np.random.seed(12345) @@ -146,15 +172,17 @@ class TestEinsum(unittest.TestCase): return core.CUDAPlace(0) return core.CPUPlace() - def check_output_equal(self, actual, expect, rtol=1.e-5, atol=1.e-8): + def check_output_equal(self, actual, expect, rtol=1.0e-5, atol=1.0e-8): error_msg = 'Output has diff at place:{}. \nExpect: {} \nBut Got: {} in class {}' - np.testing.assert_allclose(actual, - expect, - rtol=rtol, - atol=atol, - err_msg=error_msg.format( - paddle.get_device(), expect, actual, - self.__class__.__name__)) + np.testing.assert_allclose( + actual, + expect, + rtol=rtol, + atol=atol, + err_msg=error_msg.format( + paddle.get_device(), expect, actual, self.__class__.__name__ + ), + ) def setUp(self): self.sample = {"paradigm": "i->", "data": ["x"]} @@ -167,7 +195,8 @@ class TestEinsum(unittest.TestCase): equation = self.sample["paradigm"] with paddle.fluid.dygraph.guard( - self._get_place(force_to_use_cpu=False)): + self._get_place(force_to_use_cpu=False) + ): pd_operands = [paddle.to_tensor(operand) for operand in operands] result = paddle.einsum(equation, *pd_operands) self.check_output_equal(result.numpy(), expected_result) @@ -179,163 +208,136 @@ class TestEinsum(unittest.TestCase): class TestEinsumVectorDot(TestEinsum): - def setUp(self): self.sample = {"paradigm": "i,i->", "data": ["x", "x"]} class TestEinsumVectorMul(TestEinsum): - def setUp(self): self.sample = {"paradigm": "i,i->i", "data": ["x", "x"]} class TestEinsumVectorOuter(TestEinsum): - def setUp(self): self.sample = {"paradigm": "i,j->ij", "data": ["x", "y"]} class TestEinsumMatrixTranspose(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ij->ji", "data": ["A"]} class TestEinsumMatrixRowSum(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ij->j", "data": ["A"]} class TestEinsumMatrixColSum(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ij->i", "data": ["A"]} class TestEinsumMatrixEleMul(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ij,ij->ij", "data": ["A", "A"]} class TestEinsumDegenerateMatrixVecMul(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ij,j", "data": ["a", "b"]} class TestEinsumMatrixVecMul(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ij,j->i", "data": ["A", "x"]} class TestEinsumMatrixMul(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ij,kj->ik", "data": ["A", "B"]} class TestEinsumMatrixOuter(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ij,kl->ijkl", "data": ["A", "C"]} class TestEinsumTensorBMM(TestEinsum): - def setUp(self): self.sample = {"paradigm": "bij,bjk->bik", "data": ["D", "E"]} class TestEinsumTensorContract1(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ijk,jk->i", "data": ["D", "A"]} class TestEinsumTensorContract2(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ijk,lk->ijl", "data": ["D", "B"]} class TestEinsumTensorContract3(TestEinsum): - def setUp(self): self.sample = {"paradigm": "abcd,dfg->abcfg", "data": ["F", "D"]} class TestEinsumTensorContract4(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ijk,jk->ik", "data": ["D", "A"]} class TestEinsumTensorContract5(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ijk,jk->ij", "data": ["D", "A"]} class TestEinsumTensorContract6(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ik, ijk->j", "data": ["A", "G"]} class TestEinsumTensorContract7(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ijk, ik->jk", "data": ["G", "A"]} class TestEinsumEllipsis1(TestEinsum): - def setUp(self): self.sample = {"paradigm": "i...->...", "data": ["G"]} class TestEinsumEllipsis2(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ij,...i->j...", "data": ["A", "H"]} class TestEinsumEllipsis3(TestEinsum): - def setUp(self): self.sample = {"paradigm": "k...,jk", "data": ["F", "I"]} class TestEinsumTestEinsumBilinear(TestEinsum): - def setUp(self): self.sample = {"paradigm": "bn,anm,bm->ba", "data": ["B", "E", "I"]} class TestEinsumTestEinsumOthers1(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ijkl, lmn->kmn", "data": ["F", "H"]} class TestEinsumTestEinsumOthers2(TestEinsum): - def setUp(self): self.sample = {"paradigm": "ijkl, lmn->ijn", "data": ["F", "H"]} class TestEinsumBatch1(TestEinsum): - def setUp(self): self.sample = {"paradigm": "blq,bhlk->bhlqk", "data": ["J", "K"]} class TestNumpyTests(unittest.TestCase): - def setUp(self): pass @@ -347,20 +349,23 @@ class TestNumpyTests(unittest.TestCase): return core.CUDAPlace(0) return core.CPUPlace() - def check_output_equal(self, actual, expect, rtol=1.e-5, atol=1.e-8): + def check_output_equal(self, actual, expect, rtol=1.0e-5, atol=1.0e-8): error_msg = 'Output has diff at place:{}. \nExpect: {} \nBut Got: {} in class {}' - np.testing.assert_allclose(actual, - expect, - rtol=rtol, - atol=atol, - err_msg=error_msg.format( - paddle.get_device(), expect, actual, - self.__class__.__name__)) + np.testing.assert_allclose( + actual, + expect, + rtol=rtol, + atol=atol, + err_msg=error_msg.format( + paddle.get_device(), expect, actual, self.__class__.__name__ + ), + ) def check_output(self, eqn, *ops): expect = np.einsum(eqn, *ops) with paddle.fluid.dygraph.guard( - self._get_place(force_to_use_cpu=False)): + self._get_place(force_to_use_cpu=False) + ): pd_operands = [paddle.to_tensor(op) for op in ops] actual = paddle.einsum(eqn, *pd_operands) self.check_output_equal(actual.numpy(), expect) @@ -432,20 +437,20 @@ class TestNumpyTests(unittest.TestCase): self.check_output("i,i", a, a) # TODO(@xiongkun): explict broadcast in EinsumOp is not supported, it's not recommend to use einsum like this. - #p = np.ones((10, 2)).astype('float') - #q = np.ones((1, 2)).astype('float') - #self.check_output('ij,ij->j', p, q) + # p = np.ones((10, 2)).astype('float') + # q = np.ones((1, 2)).astype('float') + # self.check_output('ij,ij->j', p, q) # TODO(@xiongkun): explict-label-broadcast in EinsumOp is not supported, it's not recommend to use einsum like this. - #x = np.array([2., 3.]).astype('float') - #y = np.array([4.]).astype('float') - #self.check_output("i, i", x, y) + # x = np.array([2., 3.]).astype('float') + # y = np.array([4.]).astype('float') + # self.check_output("i, i", x, y) # TODO(@xiongkun): explict-label-broadcast in EinsumOp is not supported, it's not recommend to use einsum like this. - #p = np.ones((1, 5)) / 2 - #q = np.ones((5, 5)) / 2 - #self.check_output("...ij,...jk->...ik", p, p) - #self.check_output("...ij,...jk->...ik", p, q) + # p = np.ones((1, 5)) / 2 + # q = np.ones((5, 5)) / 2 + # self.check_output("...ij,...jk->...ik", p, p) + # self.check_output("...ij,...jk->...ik", p, q) x = np.eye(2).astype('float') y = np.ones(2).astype('float') @@ -456,11 +461,11 @@ class TestNumpyTests(unittest.TestCase): def test_large_nops(self): pass # TODO(@xiongkun): explict broadcast in EinsumOp is not supported, it's not recommend to use einsum like this. - #a = np.arange(4 * 3 * 1 * 4).reshape(4, 3, 1, 4).astype('float') - #self.check_output('a...b,b...c,c...d', a, a, a) - #self.check_output('a...b,b...c,c...a', a, a, a) - #self.check_output('a...b,b...c,c...a', a, a, a) - #self.check_output('...ab,...ba,...ab,...ab', a, a, a, a) + # a = np.arange(4 * 3 * 1 * 4).reshape(4, 3, 1, 4).astype('float') + # self.check_output('a...b,b...c,c...d', a, a, a) + # self.check_output('a...b,b...c,c...a', a, a, a) + # self.check_output('a...b,b...c,c...a', a, a, a) + # self.check_output('...ab,...ba,...ab,...ab', a, a, a, a) def test_static_graph(self): paddle.enable_static() @@ -472,21 +477,21 @@ class TestNumpyTests(unittest.TestCase): main = fluid.Program() startup = fluid.Program() with fluid.program_guard(main, startup): - a = paddle.static.data(name='a', - shape=[3, None, None, None], - dtype='float') - b = paddle.static.data(name='b', - shape=[2, None, None, None], - dtype='float') - c = paddle.static.data(name='c', - shape=[None, None, 2, None], - dtype='float') - d = paddle.static.data(name='d', - shape=[None, None, 5], - dtype='float') - e = paddle.static.data(name='e', - shape=[None, 2, None], - dtype='float') + a = paddle.static.data( + name='a', shape=[3, None, None, None], dtype='float' + ) + b = paddle.static.data( + name='b', shape=[2, None, None, None], dtype='float' + ) + c = paddle.static.data( + name='c', shape=[None, None, 2, None], dtype='float' + ) + d = paddle.static.data( + name='d', shape=[None, None, 5], dtype='float' + ) + e = paddle.static.data( + name='e', shape=[None, 2, None], dtype='float' + ) outs = [] outs.append(paddle.einsum("ibnd,jbnd->bnij", a, b)) @@ -514,7 +519,6 @@ class TestNumpyTests(unittest.TestCase): class TestStaticGraphShape(unittest.TestCase): - def setUp(self): paddle.enable_static() @@ -528,9 +532,11 @@ class TestStaticGraphShape(unittest.TestCase): self.assertEqual(C.shape, (-1, 384)) -@unittest.skipIf(not core.is_compiled_with_cuda() - or not core.is_bfloat16_supported(core.CUDAPlace(0)), - "core is not compiled with CUDA or not support the bfloat16") +@unittest.skipIf( + not core.is_compiled_with_cuda() + or not core.is_bfloat16_supported(core.CUDAPlace(0)), + "core is not compiled with CUDA or not support the bfloat16", +) class TestBF16(unittest.TestCase): """ EinsumOp support bfloat16 type, add unittest here for the correctness. @@ -539,8 +545,7 @@ class TestBF16(unittest.TestCase): def test_shape(self): cuda_major = paddle.version.cuda().split('.')[0].strip() if int(cuda_major) >= 11: - """ MatmulKernel support bfloat16 only if cuda_major > 11.0. - """ + """MatmulKernel support bfloat16 only if cuda_major > 11.0.""" A = paddle.to_tensor(np.array([1.0, 2.0])).astype(paddle.bfloat16) A = A.cuda() B = paddle.to_tensor(np.array([2.0, 3.0])).astype(paddle.bfloat16) diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py index f99f8ea1c8e35850c3373a2edafabbc0c2b42be7..ae516bc44cad3e645964193809ee3ca7d490d412 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py @@ -16,14 +16,17 @@ import unittest import numpy as np import paddle import paddle.fluid.core as core -from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16 +from paddle.fluid.tests.unittests.op_test import ( + OpTest, + skip_check_grad_ci, + convert_float_to_uint16, +) import paddle.fluid as fluid from paddle.fluid import Program, program_guard from paddle.fluid.framework import _test_eager_guard class TestElementwiseAddOp(OpTest): - def init_kernel_type(self): self.use_mkldnn = False @@ -37,47 +40,55 @@ class TestElementwiseAddOp(OpTest): self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} self.outputs = {'Out': self.out} def check_eager(self): - return (self.use_mkldnn == False and self.axis == -1) + return self.use_mkldnn == False and self.axis == -1 def test_check_output(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode - self.check_output(check_dygraph=(self.use_mkldnn == False), - check_eager=self.check_eager()) + self.check_output( + check_dygraph=(self.use_mkldnn == False), + check_eager=self.check_eager(), + ) def test_check_grad_normal(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode if self.dtype == np.float16: return - self.check_grad(['X', 'Y'], - 'Out', - check_dygraph=(self.use_mkldnn == False), - check_eager=self.check_eager()) + self.check_grad( + ['X', 'Y'], + 'Out', + check_dygraph=(self.use_mkldnn == False), + check_eager=self.check_eager(), + ) def test_check_grad_ingore_x(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode if self.dtype == np.float16: return - self.check_grad(['Y'], - 'Out', - no_grad_set=set("X"), - check_dygraph=(self.use_mkldnn == False), - check_eager=self.check_eager()) + self.check_grad( + ['Y'], + 'Out', + no_grad_set=set("X"), + check_dygraph=(self.use_mkldnn == False), + check_eager=self.check_eager(), + ) def test_check_grad_ingore_y(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode if self.dtype == np.float16: return - self.check_grad(['X'], - 'Out', - no_grad_set=set('Y'), - check_dygraph=(self.use_mkldnn == False), - check_eager=self.check_eager()) + self.check_grad( + ['X'], + 'Out', + no_grad_set=set('Y'), + check_dygraph=(self.use_mkldnn == False), + check_eager=self.check_eager(), + ) def init_input_output(self): self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) @@ -91,10 +102,10 @@ class TestElementwiseAddOp(OpTest): self.axis = -1 -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFP16ElementwiseAddOp(TestElementwiseAddOp): - def init_dtype(self): self.dtype = np.float16 @@ -104,16 +115,17 @@ class TestFP16ElementwiseAddOp(TestElementwiseAddOp): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place( - place, atol=1e-3, check_dygraph=(self.use_mkldnn == False)) + place, atol=1e-3, check_dygraph=(self.use_mkldnn == False) + ) @unittest.skipIf( - not core.is_compiled_with_cuda() or core.cudnn_version() < 8100 + not core.is_compiled_with_cuda() + or core.cudnn_version() < 8100 or paddle.device.cuda.get_device_capability()[0] < 8, - "only support compiled with CUDA and cudnn version need larger than 8.1.0 and device's compute capability is at least 8.0" + "only support compiled with CUDA and cudnn version need larger than 8.1.0 and device's compute capability is at least 8.0", ) class TestBF16ElementwiseAddOp(OpTest): - def setUp(self): self.op_type = "elementwise_add" self.dtype = np.uint16 @@ -125,9 +137,12 @@ class TestBF16ElementwiseAddOp(OpTest): self.axis = -1 self.inputs = { - 'X': - OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(self.x)), - 'Y': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(self.y)) + 'X': OpTest.np_dtype_to_fluid_dtype( + convert_float_to_uint16(self.x) + ), + 'Y': OpTest.np_dtype_to_fluid_dtype( + convert_float_to_uint16(self.y) + ), } self.attrs = {'axis': self.axis, 'use_mkldnn': False} self.outputs = {'Out': convert_float_to_uint16(self.out)} @@ -142,23 +157,21 @@ class TestBF16ElementwiseAddOp(OpTest): def test_check_grad_ingore_x(self): place = core.CUDAPlace(0) - self.check_grad_with_place(place, ['Y'], - 'Out', - no_grad_set=set("X"), - check_eager=False) + self.check_grad_with_place( + place, ['Y'], 'Out', no_grad_set=set("X"), check_eager=False + ) def test_check_grad_ingore_y(self): place = core.CUDAPlace(0) - self.check_grad_with_place(place, ['X'], - 'Out', - no_grad_set=set('Y'), - check_eager=False) + self.check_grad_with_place( + place, ['X'], 'Out', no_grad_set=set('Y'), check_eager=False + ) @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." +) class TestElementwiseAddOp_scalar(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 3, 4).astype(self.dtype) self.y = np.random.rand(1).astype(self.dtype) @@ -166,9 +179,9 @@ class TestElementwiseAddOp_scalar(TestElementwiseAddOp): @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." +) class TestFP16ElementwiseAddOp_scalar(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 3, 4).astype(self.dtype) self.y = np.random.rand(1).astype(self.dtype) @@ -176,9 +189,9 @@ class TestFP16ElementwiseAddOp_scalar(TestFP16ElementwiseAddOp): @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1,1) to test broadcast.") + reason="[skip shape check] Use y_shape(1,1) to test broadcast." +) class TestElementwiseAddOp_scalar2(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 3, 4).astype(self.dtype) self.y = np.random.rand(1, 1).astype(self.dtype) @@ -186,9 +199,9 @@ class TestElementwiseAddOp_scalar2(TestElementwiseAddOp): @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1,1) to test broadcast.") + reason="[skip shape check] Use y_shape(1,1) to test broadcast." +) class TestFP16ElementwiseAddOp_scalar2(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 3, 4).astype(self.dtype) self.y = np.random.rand(1, 1).astype(self.dtype) @@ -196,23 +209,20 @@ class TestFP16ElementwiseAddOp_scalar2(TestFP16ElementwiseAddOp): class TestElementwiseAddOp_Vector(TestElementwiseAddOp): - def init_input_output(self): - self.x = np.random.random((100, )).astype(self.dtype) - self.y = np.random.random((100, )).astype(self.dtype) + self.x = np.random.random((100,)).astype(self.dtype) + self.y = np.random.random((100,)).astype(self.dtype) self.out = np.add(self.x, self.y) class TestFP16ElementwiseAddOp_Vector(TestFP16ElementwiseAddOp): - def init_input_output(self): - self.x = np.random.random((100, )).astype(self.dtype) - self.y = np.random.random((100, )).astype(self.dtype) + self.x = np.random.random((100,)).astype(self.dtype) + self.y = np.random.random((100,)).astype(self.dtype) self.out = np.add(self.x, self.y) class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(100, 2, 3).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype) @@ -223,7 +233,6 @@ class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp): class TestFP16ElementwiseAddOp_broadcast_0(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(100, 2, 3).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype) @@ -234,7 +243,6 @@ class TestFP16ElementwiseAddOp_broadcast_0(TestFP16ElementwiseAddOp): class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 100, 3).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype) @@ -245,7 +253,6 @@ class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp): class TestFP16ElementwiseAddOp_broadcast_1(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 100, 3).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype) @@ -256,7 +263,6 @@ class TestFP16ElementwiseAddOp_broadcast_1(TestFP16ElementwiseAddOp): class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 3, 100).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype) @@ -264,7 +270,6 @@ class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp): class TestFP16ElementwiseAddOp_broadcast_2(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 3, 100).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype) @@ -272,7 +277,6 @@ class TestFP16ElementwiseAddOp_broadcast_2(TestFP16ElementwiseAddOp): class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 10, 12, 1).astype(self.dtype) self.y = np.random.rand(10, 12).astype(self.dtype) @@ -283,7 +287,6 @@ class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp): class TestFP16ElementwiseAddOp_broadcast_3(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype) self.y = np.random.rand(10, 12).astype(self.dtype) @@ -294,7 +297,6 @@ class TestFP16ElementwiseAddOp_broadcast_3(TestFP16ElementwiseAddOp): class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(100, 2, 1, 2).astype(self.dtype) self.y = np.random.rand(100, 1).astype(self.dtype) @@ -305,7 +307,6 @@ class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp): class TestFP16ElementwiseAddOp_broadcast_4(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(100, 2, 1, 2).astype(self.dtype) self.y = np.random.rand(100, 1).astype(self.dtype) @@ -316,7 +317,6 @@ class TestFP16ElementwiseAddOp_broadcast_4(TestFP16ElementwiseAddOp): class TestElementwiseAddOp_broadcast_5(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(10, 3, 12).astype(self.dtype) self.y = np.random.rand(10, 1, 12).astype(self.dtype) @@ -324,7 +324,6 @@ class TestElementwiseAddOp_broadcast_5(TestElementwiseAddOp): class TestFP16ElementwiseAddOp_broadcast_5(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(10, 3, 12).astype(self.dtype) self.y = np.random.rand(10, 1, 12).astype(self.dtype) @@ -332,7 +331,6 @@ class TestFP16ElementwiseAddOp_broadcast_5(TestFP16ElementwiseAddOp): class TestElementwiseAddOp_broadcast_6(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype) self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype) @@ -340,7 +338,6 @@ class TestElementwiseAddOp_broadcast_6(TestElementwiseAddOp): class TestElementwiseAddOp_broadcast_7(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(1, 1, 20, 5).astype(self.dtype) self.y = np.random.rand(20, 5, 1, 1).astype(self.dtype) @@ -348,7 +345,6 @@ class TestElementwiseAddOp_broadcast_7(TestElementwiseAddOp): class TestFP16ElementwiseAddOp_broadcast_6(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype) self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype) @@ -356,7 +352,6 @@ class TestFP16ElementwiseAddOp_broadcast_6(TestFP16ElementwiseAddOp): class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 10, 12).astype(self.dtype) self.y = np.random.rand(10, 12).astype(self.dtype) @@ -367,7 +362,6 @@ class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp): class TestFP16ElementwiseAddOp_rowwise_add_0(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 10, 12).astype(self.dtype) self.y = np.random.rand(10, 12).astype(self.dtype) @@ -378,9 +372,9 @@ class TestFP16ElementwiseAddOp_rowwise_add_0(TestFP16ElementwiseAddOp): @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." +) class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(100, 1).astype(self.dtype) self.y = np.random.rand(1).astype(self.dtype) @@ -391,9 +385,9 @@ class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp): @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." +) class TestFP16ElementwiseAddOp_rowwise_add_1(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(100, 1).astype(self.dtype) self.y = np.random.rand(1).astype(self.dtype) @@ -404,7 +398,6 @@ class TestFP16ElementwiseAddOp_rowwise_add_1(TestFP16ElementwiseAddOp): class TestElementwiseAddOp_channelwise_add(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(100, 2, 3).astype(self.dtype) self.y = np.random.rand(100, 1, 1).astype(self.dtype) @@ -415,7 +408,6 @@ class TestElementwiseAddOp_channelwise_add(TestElementwiseAddOp): class TestFP16ElementwiseAddOp_channelwise_add(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(100, 2, 3).astype(self.dtype) self.y = np.random.rand(100, 1, 1).astype(self.dtype) @@ -426,7 +418,6 @@ class TestFP16ElementwiseAddOp_channelwise_add(TestFP16ElementwiseAddOp): class TestElementwiseAddOp_commonuse_add1(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 3, 100).astype(self.dtype) self.y = np.random.rand(1, 1, 100).astype(self.dtype) @@ -437,7 +428,6 @@ class TestElementwiseAddOp_commonuse_add1(TestElementwiseAddOp): class TestElementwiseFP16AddOp_commonuse_add1(TestFP16ElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 3, 100).astype(self.dtype) self.y = np.random.rand(1, 1, 100).astype(self.dtype) @@ -448,7 +438,6 @@ class TestElementwiseFP16AddOp_commonuse_add1(TestFP16ElementwiseAddOp): class TestElementwiseAddOp_commonuse_add2(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(10, 3, 1, 4).astype(self.dtype) self.y = np.random.rand(10, 1, 12, 1).astype(self.dtype) @@ -459,7 +448,6 @@ class TestElementwiseAddOp_commonuse_add2(TestElementwiseAddOp): class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(10, 12).astype(self.dtype) self.y = np.random.rand(2, 2, 10, 12).astype(self.dtype) @@ -470,7 +458,6 @@ class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestElementwiseAddOp): class TestElementwiseAddOp_same_shape_ysize_large(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(10, 1, 12).astype(self.dtype) self.y = np.random.rand(10, 2, 12).astype(self.dtype) @@ -481,14 +468,15 @@ class TestElementwiseAddOp_same_shape_ysize_large(TestElementwiseAddOp): class TestElementwiseAddOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # the input of elementwise_add must be Variable. - x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.CPUPlace()) - y1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + ) + y1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + ) self.assertRaises(TypeError, fluid.layers.elementwise_add, x1, y1) # the input dtype of elementwise_add must be float16 or float32 or float64 or int32 or int64 @@ -499,7 +487,6 @@ class TestElementwiseAddOpError(unittest.TestCase): class TestAddApi(unittest.TestCase): - def _executed_api(self, x, y, name=None): return paddle.add(x, y, name) @@ -517,7 +504,7 @@ class TestAddApi(unittest.TestCase): def gen_data(): return { "x": np.array([2, 3, 4]).astype('float32'), - "y": np.array([1, 5, 2]).astype('float32') + "y": np.array([1, 5, 2]).astype('float32'), } x = fluid.data(name="x", shape=[3], dtype='float32') @@ -527,7 +514,7 @@ class TestAddApi(unittest.TestCase): place = fluid.CPUPlace() exe = fluid.Executor(place) z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) - z_expected = np.array([3., 8., 6.]) + z_expected = np.array([3.0, 8.0, 6.0]) self.assertEqual((z_value == z_expected).all(), True) def test_dygraph(self): @@ -538,18 +525,16 @@ class TestAddApi(unittest.TestCase): y = fluid.dygraph.to_variable(np_y) z = self._executed_api(x, y) np_z = z.numpy() - z_expected = np.array([3., 8., 6.]) + z_expected = np.array([3.0, 8.0, 6.0]) self.assertEqual((np_z == z_expected).all(), True) class TestAddInplaceApi(TestAddApi): - def _executed_api(self, x, y, name=None): return x.add_(y, name) class TestAddInplaceBroadcastSuccess(unittest.TestCase): - def init_data(self): self.x_numpy = np.random.rand(2, 3, 4).astype('float') self.y_numpy = np.random.rand(3, 4).astype('float') @@ -566,21 +551,18 @@ class TestAddInplaceBroadcastSuccess(unittest.TestCase): class TestAddInplaceBroadcastSuccess2(TestAddInplaceBroadcastSuccess): - def init_data(self): self.x_numpy = np.random.rand(1, 2, 3, 1).astype('float') self.y_numpy = np.random.rand(3, 1).astype('float') class TestAddInplaceBroadcastSuccess3(TestAddInplaceBroadcastSuccess): - def init_data(self): self.x_numpy = np.random.rand(2, 3, 1, 5).astype('float') self.y_numpy = np.random.rand(1, 3, 1, 5).astype('float') class TestAddInplaceBroadcastError(unittest.TestCase): - def init_data(self): self.x_numpy = np.random.rand(3, 4).astype('float') self.y_numpy = np.random.rand(2, 3, 4).astype('float') @@ -599,21 +581,18 @@ class TestAddInplaceBroadcastError(unittest.TestCase): class TestAddInplaceBroadcastError2(TestAddInplaceBroadcastError): - def init_data(self): self.x_numpy = np.random.rand(2, 1, 4).astype('float') self.y_numpy = np.random.rand(2, 3, 4).astype('float') class TestAddInplaceBroadcastError3(TestAddInplaceBroadcastError): - def init_data(self): self.x_numpy = np.random.rand(5, 2, 1, 4).astype('float') self.y_numpy = np.random.rand(2, 3, 4).astype('float') class TestComplexElementwiseAddOp(OpTest): - def setUp(self): self.op_type = "elementwise_add" self.dtype = np.float64 @@ -623,7 +602,7 @@ class TestComplexElementwiseAddOp(OpTest): self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.attrs = {'axis': -1, 'use_mkldnn': False} self.outputs = {'Out': self.out} @@ -633,14 +612,17 @@ class TestComplexElementwiseAddOp(OpTest): def init_input_output(self): self.x = np.random.random(self.shape).astype( - self.dtype) + 1J * np.random.random(self.shape).astype(self.dtype) + self.dtype + ) + 1j * np.random.random(self.shape).astype(self.dtype) self.y = np.random.random(self.shape).astype( - self.dtype) + 1J * np.random.random(self.shape).astype(self.dtype) + self.dtype + ) + 1j * np.random.random(self.shape).astype(self.dtype) self.out = self.x + self.y def init_grad_input_output(self): - self.grad_out = np.ones( - self.shape, self.dtype) + 1J * np.ones(self.shape, self.dtype) + self.grad_out = np.ones(self.shape, self.dtype) + 1j * np.ones( + self.shape, self.dtype + ) self.grad_x = self.grad_out self.grad_y = self.grad_out @@ -648,43 +630,49 @@ class TestComplexElementwiseAddOp(OpTest): self.check_output(check_eager=False) def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], - 'Out', - user_defined_grads=[self.grad_x, self.grad_y], - user_defined_grad_outputs=[self.grad_out]) + self.check_grad( + ['X', 'Y'], + 'Out', + user_defined_grads=[self.grad_x, self.grad_y], + user_defined_grad_outputs=[self.grad_out], + ) def test_check_grad_ingore_x(self): - self.check_grad(['Y'], - 'Out', - no_grad_set=set("X"), - user_defined_grads=[self.grad_y], - user_defined_grad_outputs=[self.grad_out]) + self.check_grad( + ['Y'], + 'Out', + no_grad_set=set("X"), + user_defined_grads=[self.grad_y], + user_defined_grad_outputs=[self.grad_out], + ) def test_check_grad_ingore_y(self): - self.check_grad(['X'], - 'Out', - no_grad_set=set('Y'), - user_defined_grads=[self.grad_x], - user_defined_grad_outputs=[self.grad_out]) + self.check_grad( + ['X'], + 'Out', + no_grad_set=set('Y'), + user_defined_grads=[self.grad_x], + user_defined_grad_outputs=[self.grad_out], + ) class TestRealComplexElementwiseAddOp(TestComplexElementwiseAddOp): - def init_input_output(self): self.x = np.random.random(self.shape).astype(self.dtype) self.y = np.random.random(self.shape).astype( - self.dtype) + 1J * np.random.random(self.shape).astype(self.dtype) + self.dtype + ) + 1j * np.random.random(self.shape).astype(self.dtype) self.out = self.x + self.y def init_grad_input_output(self): - self.grad_out = np.ones( - self.shape, self.dtype) + 1J * np.ones(self.shape, self.dtype) + self.grad_out = np.ones(self.shape, self.dtype) + 1j * np.ones( + self.shape, self.dtype + ) self.grad_x = np.real(self.grad_out) self.grad_y = self.grad_out class TestBoolAddFloatElementwiseAddop(unittest.TestCase): - def test_static_add(self): paddle.enable_static() a = 1.5 diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py index 3cea2fdf5cd0d188c3f88414685082b90d0984d7..16bf0df5af38b686249007d496f938f4509edd95 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py @@ -21,7 +21,6 @@ from paddle.fluid import core class ElementwiseDivOp(OpTest): - def setUp(self): self.op_type = "elementwise_div" self.python_api = paddle.divide @@ -34,8 +33,9 @@ class ElementwiseDivOp(OpTest): out = self.compute_output(x, y).astype(self.val_dtype) grad_out = np.ones(out.shape).astype(self.val_dtype) grad_x = self.compute_gradient_x(grad_out, y).astype(self.val_dtype) - grad_y = self.compute_gradient_y(grad_out, out, - y).astype(self.val_dtype) + grad_y = self.compute_gradient_y(grad_out, out, y).astype( + self.val_dtype + ) # Convert np.float32 data to np.uint16 for bfloat16 Paddle OP if self.dtype == np.uint16: @@ -84,28 +84,26 @@ class ElementwiseDivOp(OpTest): def test_check_gradient(self): check_list = [] - check_list.append({ - 'grad': ['X', 'Y'], - 'no_grad': None, - 'val_grad': [self.grad_x, self.grad_y] - }) - check_list.append({ - 'grad': ['Y'], - 'no_grad': set('X'), - 'val_grad': [self.grad_y] - }) - check_list.append({ - 'grad': ['X'], - 'no_grad': set('Y'), - 'val_grad': [self.grad_x] - }) + check_list.append( + { + 'grad': ['X', 'Y'], + 'no_grad': None, + 'val_grad': [self.grad_x, self.grad_y], + } + ) + check_list.append( + {'grad': ['Y'], 'no_grad': set('X'), 'val_grad': [self.grad_y]} + ) + check_list.append( + {'grad': ['X'], 'no_grad': set('Y'), 'val_grad': [self.grad_x]} + ) for check_option in check_list: check_args = [check_option['grad'], 'Out'] check_kwargs = { 'no_grad_set': check_option['no_grad'], 'user_defined_grads': check_option['val_grad'], 'user_defined_grad_outputs': [self.grad_out], - 'check_dygraph': self.check_dygraph + 'check_dygraph': self.check_dygraph, } if self.place is None: self.check_grad(*check_args, **check_kwargs) @@ -114,11 +112,12 @@ class ElementwiseDivOp(OpTest): self.check_grad_with_place(*check_args, **check_kwargs) -@unittest.skipIf(not core.is_compiled_with_cuda() - or not core.is_bfloat16_supported(core.CUDAPlace(0)), - "core is not compiled with CUDA or not support the bfloat16") +@unittest.skipIf( + not core.is_compiled_with_cuda() + or not core.is_bfloat16_supported(core.CUDAPlace(0)), + "core is not compiled with CUDA or not support the bfloat16", +) class TestElementwiseDivOpBF16(ElementwiseDivOp): - def init_args(self): # In due to output data type inconsistence of bfloat16 paddle op, we disable the dygraph check. self.check_dygraph = False @@ -134,9 +133,9 @@ class TestElementwiseDivOpBF16(ElementwiseDivOp): @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." +) class TestElementwiseDivOpScalar(ElementwiseDivOp): - def init_shape(self): self.x_shape = [20, 3, 4] self.y_shape = [1] @@ -146,14 +145,12 @@ class TestElementwiseDivOpScalar(ElementwiseDivOp): class TestElementwiseDivOpVector(ElementwiseDivOp): - def init_shape(self): self.x_shape = [100] self.y_shape = [100] class TestElementwiseDivOpBroadcast0(ElementwiseDivOp): - def init_shape(self): self.x_shape = [100, 3, 4] self.y_shape = [100] @@ -170,7 +167,6 @@ class TestElementwiseDivOpBroadcast0(ElementwiseDivOp): class TestElementwiseDivOpBroadcast1(ElementwiseDivOp): - def init_shape(self): self.x_shape = [2, 100, 4] self.y_shape = [100] @@ -187,7 +183,6 @@ class TestElementwiseDivOpBroadcast1(ElementwiseDivOp): class TestElementwiseDivOpBroadcast2(ElementwiseDivOp): - def init_shape(self): self.x_shape = [2, 3, 100] self.y_shape = [100] @@ -203,7 +198,6 @@ class TestElementwiseDivOpBroadcast2(ElementwiseDivOp): class TestElementwiseDivOpBroadcast3(ElementwiseDivOp): - def init_shape(self): self.x_shape = [2, 10, 12, 5] self.y_shape = [10, 12] @@ -216,12 +210,12 @@ class TestElementwiseDivOpBroadcast3(ElementwiseDivOp): return grad_out / y.reshape(1, 10, 12, 1) def compute_gradient_y(self, grad_out, out, y): - return np.sum(-1 * grad_out * out / y.reshape(1, 10, 12, 1), - axis=(0, 3)) + return np.sum( + -1 * grad_out * out / y.reshape(1, 10, 12, 1), axis=(0, 3) + ) class TestElementwiseDivOpBroadcast4(ElementwiseDivOp): - def init_shape(self): self.x_shape = [2, 3, 50] self.y_shape = [2, 1, 50] @@ -231,7 +225,6 @@ class TestElementwiseDivOpBroadcast4(ElementwiseDivOp): class TestElementwiseDivOpBroadcast5(ElementwiseDivOp): - def init_shape(self): self.x_shape = [2, 3, 4, 20] self.y_shape = [2, 3, 1, 20] @@ -241,7 +234,6 @@ class TestElementwiseDivOpBroadcast5(ElementwiseDivOp): class TestElementwiseDivOpCommonuse1(ElementwiseDivOp): - def init_shape(self): self.x_shape = [2, 3, 100] self.y_shape = [1, 1, 100] @@ -251,7 +243,6 @@ class TestElementwiseDivOpCommonuse1(ElementwiseDivOp): class TestElementwiseDivOpCommonuse2(ElementwiseDivOp): - def init_shape(self): self.x_shape = [30, 3, 1, 5] self.y_shape = [30, 1, 4, 1] @@ -264,7 +255,6 @@ class TestElementwiseDivOpCommonuse2(ElementwiseDivOp): class TestElementwiseDivOpXsizeLessThanYsize(ElementwiseDivOp): - def init_shape(self): self.x_shape = [10, 12] self.y_shape = [2, 3, 10, 12] @@ -275,7 +265,6 @@ class TestElementwiseDivOpXsizeLessThanYsize(ElementwiseDivOp): class TestElementwiseDivOpInt(ElementwiseDivOp): - def init_dtype(self): self.dtype = np.int32 self.val_dtype = np.int32 @@ -287,32 +276,30 @@ class TestElementwiseDivOpInt(ElementwiseDivOp): return x // y -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestElementwiseDivOpFp16(ElementwiseDivOp): - def init_dtype(self): self.dtype = np.float16 self.val_dtype = np.float16 class TestElementwiseDivBroadcast(unittest.TestCase): - def test_shape_with_batch_sizes(self): with fluid.program_guard(fluid.Program()): - x_var = fluid.data(name='x', - dtype='float32', - shape=[None, 3, None, None]) - one = 2. + x_var = fluid.data( + name='x', dtype='float32', shape=[None, 3, None, None] + ) + one = 2.0 out = one / x_var exe = fluid.Executor(fluid.CPUPlace()) x = np.random.uniform(0.1, 0.6, (1, 3, 32, 32)).astype("float32") - out_result, = exe.run(feed={'x': x}, fetch_list=[out]) + (out_result,) = exe.run(feed={'x': x}, fetch_list=[out]) self.assertEqual((out_result == (2 / x)).all(), True) class TestDivideOp(unittest.TestCase): - def test_name(self): with fluid.program_guard(fluid.Program()): x = fluid.data(name="x", shape=[2, 3], dtype="float32") @@ -329,12 +316,11 @@ class TestDivideOp(unittest.TestCase): y = paddle.to_tensor(np_y) z = paddle.divide(x, y) np_z = z.numpy() - z_expected = np.array([2., 0.6, 2.]) + z_expected = np.array([2.0, 0.6, 2.0]) self.assertEqual((np_z == z_expected).all(), True) class TestComplexElementwiseDivOp(OpTest): - def setUp(self): self.op_type = "elementwise_div" self.python_api = paddle.divide @@ -344,7 +330,7 @@ class TestComplexElementwiseDivOp(OpTest): self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.attrs = {'axis': -1, 'use_mkldnn': False} self.outputs = {'Out': self.out} @@ -353,17 +339,18 @@ class TestComplexElementwiseDivOp(OpTest): self.dtype = np.float64 def init_input_output(self): - self.x = np.random.random( - (2, 3, 4, 5)).astype(self.dtype) + 1J * np.random.random( - (2, 3, 4, 5)).astype(self.dtype) - self.y = np.random.random( - (2, 3, 4, 5)).astype(self.dtype) + 1J * np.random.random( - (2, 3, 4, 5)).astype(self.dtype) + self.x = np.random.random((2, 3, 4, 5)).astype( + self.dtype + ) + 1j * np.random.random((2, 3, 4, 5)).astype(self.dtype) + self.y = np.random.random((2, 3, 4, 5)).astype( + self.dtype + ) + 1j * np.random.random((2, 3, 4, 5)).astype(self.dtype) self.out = self.x / self.y def init_grad_input_output(self): - self.grad_out = np.ones((2, 3, 4, 5), self.dtype) + 1J * np.ones( - (2, 3, 4, 5), self.dtype) + self.grad_out = np.ones((2, 3, 4, 5), self.dtype) + 1j * np.ones( + (2, 3, 4, 5), self.dtype + ) self.grad_x = self.grad_out / np.conj(self.y) self.grad_y = -self.grad_out * np.conj(self.x / self.y / self.y) @@ -371,38 +358,44 @@ class TestComplexElementwiseDivOp(OpTest): self.check_output(check_eager=False) def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], - 'Out', - user_defined_grads=[self.grad_x, self.grad_y], - user_defined_grad_outputs=[self.grad_out]) + self.check_grad( + ['X', 'Y'], + 'Out', + user_defined_grads=[self.grad_x, self.grad_y], + user_defined_grad_outputs=[self.grad_out], + ) def test_check_grad_ingore_x(self): - self.check_grad(['Y'], - 'Out', - no_grad_set=set("X"), - user_defined_grads=[self.grad_y], - user_defined_grad_outputs=[self.grad_out]) + self.check_grad( + ['Y'], + 'Out', + no_grad_set=set("X"), + user_defined_grads=[self.grad_y], + user_defined_grad_outputs=[self.grad_out], + ) def test_check_grad_ingore_y(self): - self.check_grad(['X'], - 'Out', - no_grad_set=set('Y'), - user_defined_grads=[self.grad_x], - user_defined_grad_outputs=[self.grad_out]) + self.check_grad( + ['X'], + 'Out', + no_grad_set=set('Y'), + user_defined_grads=[self.grad_x], + user_defined_grad_outputs=[self.grad_out], + ) class TestRealComplexElementwiseDivOp(TestComplexElementwiseDivOp): - def init_input_output(self): self.x = np.random.random((2, 3, 4, 5)).astype(self.dtype) - self.y = np.random.random( - (2, 3, 4, 5)).astype(self.dtype) + 1J * np.random.random( - (2, 3, 4, 5)).astype(self.dtype) + self.y = np.random.random((2, 3, 4, 5)).astype( + self.dtype + ) + 1j * np.random.random((2, 3, 4, 5)).astype(self.dtype) self.out = self.x / self.y def init_grad_input_output(self): - self.grad_out = np.ones((2, 3, 4, 5), self.dtype) + 1J * np.ones( - (2, 3, 4, 5), self.dtype) + self.grad_out = np.ones((2, 3, 4, 5), self.dtype) + 1j * np.ones( + (2, 3, 4, 5), self.dtype + ) self.grad_x = np.real(self.grad_out / np.conj(self.y)) self.grad_y = -self.grad_out * np.conj(self.x / self.y / self.y) diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_floordiv_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_floordiv_op.py index 746bb8886695110e29850305a06e1b047ae864b2..de058ed2b3b09e1fc1e14580cbc080f0987c033e 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_floordiv_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_floordiv_op.py @@ -22,7 +22,6 @@ import random class TestElementwiseModOp(OpTest): - def init_kernel_type(self): self.use_mkldnn = False @@ -38,7 +37,7 @@ class TestElementwiseModOp(OpTest): self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} self.outputs = {'Out': self.out} @@ -59,7 +58,6 @@ class TestElementwiseModOp(OpTest): class TestElementwiseModOp_scalar(TestElementwiseModOp): - def init_input_output(self): scale_x = random.randint(0, 100000000) scale_y = random.randint(1, 100000000) @@ -69,7 +67,6 @@ class TestElementwiseModOp_scalar(TestElementwiseModOp): class TestElementwiseModOpInverse(TestElementwiseModOp): - def init_input_output(self): self.x = np.random.uniform(0, 10000, [10]).astype(self.dtype) self.y = np.random.uniform(0, 1000, [10, 10]).astype(self.dtype) @@ -77,7 +74,6 @@ class TestElementwiseModOpInverse(TestElementwiseModOp): class TestFloorDivideOp(unittest.TestCase): - def test_name(self): with fluid.program_guard(fluid.Program()): x = fluid.data(name="x", shape=[2, 3], dtype="int64") diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_gradient_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_gradient_op.py index 823714a9a59e9be6eae2ec014e52e6881e83edba..91401ff574f6a2c7be1882cdc289b47f4a7358c3 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_gradient_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_gradient_op.py @@ -20,16 +20,12 @@ import paddle.fluid as fluid class TestElementWiseAddOp(unittest.TestCase): - def __assert_close(self, tensor, np_array, msg, atol=1e-4): - np.testing.assert_allclose(np.array(tensor), - np_array, - rtol=1e-05, - atol=atol, - err_msg=msg) + np.testing.assert_allclose( + np.array(tensor), np_array, rtol=1e-05, atol=atol, err_msg=msg + ) def check_forward_backward(self): - def test_with_place(place): out_grad = np.random.random_sample(self.x.shape).astype(np.float32) x_grad = out_grad @@ -52,25 +48,29 @@ class TestElementWiseAddOp(unittest.TestCase): with fluid.program_guard(program): block = program.global_block() for name in ground_truth: - block.create_var(name=name, - dtype='float32', - shape=ground_truth[name].shape) - elementwise_add_op = block.append_op(type="elementwise_add", - inputs={ - "X": block.var('x'), - "Y": block.var('y'), - }, - outputs={ - "Out": - block.var('out'), - }, - attrs={ - "axis": self.axis, - }) + block.create_var( + name=name, + dtype='float32', + shape=ground_truth[name].shape, + ) + elementwise_add_op = block.append_op( + type="elementwise_add", + inputs={ + "X": block.var('x'), + "Y": block.var('y'), + }, + outputs={ + "Out": block.var('out'), + }, + attrs={ + "axis": self.axis, + }, + ) # generate backward op_desc grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc( - elementwise_add_op.desc, set(), []) + elementwise_add_op.desc, set(), [] + ) grad_op_desc = grad_op_desc_list[0] new_op_desc = block.desc.append_op() new_op_desc.copy_from(grad_op_desc) @@ -83,18 +83,20 @@ class TestElementWiseAddOp(unittest.TestCase): grad_var.set_dtype(core.VarDesc.VarType.FP32) exe = fluid.Executor(place) - out = exe.run(program, - feed={ - name: var_dict[name] - for name in ['x', 'y', 'out@GRAD'] - }, - fetch_list=['x@GRAD', 'y@GRAD']) + out = exe.run( + program, + feed={ + name: var_dict[name] for name in ['x', 'y', 'out@GRAD'] + }, + fetch_list=['x@GRAD', 'y@GRAD'], + ) self.__assert_close(x_grad, out[0], "x@GRAD") self.__assert_close(y_grad, out[1], "y@GRAD", atol=1.4) places = [core.CPUPlace()] if core.is_compiled_with_cuda() and core.op_support_gpu( - "elementwise_add"): + "elementwise_add" + ): places.append(core.CUDAPlace(0)) for place in places: diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_heaviside_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_heaviside_op.py index 7789f872d4ccbd5c536dc8e7a7782019777d76e0..aebf1c3f4fe82e95a0d2a8d86f3cd56f2c64b823 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_heaviside_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_heaviside_op.py @@ -26,7 +26,6 @@ def Heaviside_grad(x, y, dout): class TestElementwiseOp(OpTest): - def setUp(self): self.op_type = "elementwise_heaviside" x = np.random.random((13, 17)).astype("float64") @@ -48,7 +47,6 @@ class TestElementwiseOp(OpTest): class TestHeavisideBroadcast(unittest.TestCase): - def setUp(self): self.input_1 = np.random.rand(2, 100, 13, 17).astype("float32") self.input_2 = np.random.rand(100, 13, 17).astype("float32") @@ -87,7 +85,6 @@ class TestHeavisideBroadcast(unittest.TestCase): class TestHeavisideAPI_float64(unittest.TestCase): - def setUp(self): self.x_np = np.random.random((13, 17)).astype("float64") self.y_np = np.random.random((13, 17)).astype("float64") @@ -95,45 +92,49 @@ class TestHeavisideAPI_float64(unittest.TestCase): self.dtype = "float64" def test_static(self): - for use_cuda in ([False, True] - if paddle.device.is_compiled_with_cuda() else [False]): + for use_cuda in ( + [False, True] if paddle.device.is_compiled_with_cuda() else [False] + ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() prog = paddle.static.Program() with paddle.static.program_guard(prog): - x = paddle.static.data(name=f"x_{self.dtype}", - shape=[13, 17], - dtype=self.dtype) - y = paddle.static.data(name=f"y_{self.dtype}", - shape=[13, 17], - dtype=self.dtype) + x = paddle.static.data( + name=f"x_{self.dtype}", shape=[13, 17], dtype=self.dtype + ) + y = paddle.static.data( + name=f"y_{self.dtype}", shape=[13, 17], dtype=self.dtype + ) out = paddle.heaviside(x, y) exe = paddle.static.Executor(place=place) - res, = exe.run(prog, - feed={ - f"x_{self.dtype}": self.x_np, - f"y_{self.dtype}": self.y_np - }, - fetch_list=out, - use_prune=True) + (res,) = exe.run( + prog, + feed={ + f"x_{self.dtype}": self.x_np, + f"y_{self.dtype}": self.y_np, + }, + fetch_list=out, + use_prune=True, + ) np.testing.assert_allclose(res, self.out_np, rtol=1e-05) def test_dygraph(self): - for use_cuda in ([False, True] - if paddle.device.is_compiled_with_cuda() else [False]): + for use_cuda in ( + [False, True] if paddle.device.is_compiled_with_cuda() else [False] + ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.disable_static(place=place) - result = paddle.heaviside(paddle.to_tensor(self.x_np), - paddle.to_tensor(self.y_np)) + result = paddle.heaviside( + paddle.to_tensor(self.x_np), paddle.to_tensor(self.y_np) + ) np.testing.assert_allclose(result.numpy(), self.out_np, rtol=1e-05) class TestHeavisideAPI_float32(TestHeavisideAPI_float64): - def setUp(self): self.x_np = np.random.random((13, 17)).astype("float32") self.y_np = np.random.random((13, 17)).astype("float32") @@ -142,7 +143,6 @@ class TestHeavisideAPI_float32(TestHeavisideAPI_float64): class TestHeavisideAPI_int64(TestHeavisideAPI_float64): - def setUp(self): self.x_np = np.random.random((13, 17)).astype("int64") self.y_np = np.random.random((13, 17)).astype("int64") @@ -151,7 +151,6 @@ class TestHeavisideAPI_int64(TestHeavisideAPI_float64): class TestHeavisideAPI_int32(TestHeavisideAPI_float64): - def setUp(self): self.x_np = np.random.random((13, 17)).astype("int32") self.y_np = np.random.random((13, 17)).astype("int32") @@ -160,14 +159,13 @@ class TestHeavisideAPI_int32(TestHeavisideAPI_float64): class TestHeavisideAPI_float16(OpTest): - def setUp(self): self.dtype = np.float16 self.op_type = "elementwise_heaviside" self.python_api = paddle.heaviside self.inputs = { 'X': np.random.uniform(1, 2, [20, 5]).astype("float16"), - 'Y': np.random.uniform(1, 2, [20, 5]).astype("float16") + 'Y': np.random.uniform(1, 2, [20, 5]).astype("float16"), } self.outputs = {'Out': np.heaviside(self.inputs['X'], self.inputs['Y'])} @@ -175,16 +173,17 @@ class TestHeavisideAPI_float16(OpTest): self.check_output() def test_check_grad(self): - self.check_grad(['X', 'Y'], - 'Out', - user_defined_grads=Heaviside_grad( - self.inputs['X'], self.inputs['Y'], - 1 / self.inputs['X'].size), - check_eager=True) + self.check_grad( + ['X', 'Y'], + 'Out', + user_defined_grads=Heaviside_grad( + self.inputs['X'], self.inputs['Y'], 1 / self.inputs['X'].size + ), + check_eager=True, + ) class TestHeavisideError(unittest.TestCase): - def test_input(self): paddle.disable_static() @@ -199,8 +198,9 @@ class TestHeavisideError(unittest.TestCase): self.assertRaises(ValueError, test_input_y) def test_input_xy(): - paddle.heaviside(paddle.randn([100], 'float32'), - paddle.randn([100], 'float64')) + paddle.heaviside( + paddle.randn([100], 'float32'), paddle.randn([100], 'float64') + ) self.assertRaises(ValueError, test_input_xy) diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py index d44eac529b4be2e9bf19d8c244b696ea29950bc3..018a44c2be96494e2b0b19d82d1c263d9a82eef1 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py @@ -20,7 +20,6 @@ import paddle class TestElementwiseOp(OpTest): - def setUp(self): self.op_type = "elementwise_max" self.python_api = paddle.maximum @@ -46,25 +45,25 @@ class TestElementwiseOp(OpTest): self.check_grad(['X', 'Y'], 'Out', check_eager=True) def test_check_grad_ingore_x(self): - self.check_grad(['Y'], - 'Out', - max_relative_error=0.005, - no_grad_set=set("X")) + self.check_grad( + ['Y'], 'Out', max_relative_error=0.005, no_grad_set=set("X") + ) def test_check_grad_ingore_y(self): - self.check_grad(['X'], - 'Out', - max_relative_error=0.005, - no_grad_set=set('Y')) - - -@unittest.skipIf(core.is_compiled_with_cuda() and ( - core.cudnn_version() < 8100 - or paddle.device.cuda.get_device_capability()[0] < 8 -), "run test when gpu is availble and the minimum cudnn version is 8.1.0 and gpu's compute capability is at least 8.0." - ) + self.check_grad( + ['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y') + ) + + +@unittest.skipIf( + core.is_compiled_with_cuda() + and ( + core.cudnn_version() < 8100 + or paddle.device.cuda.get_device_capability()[0] < 8 + ), + "run test when gpu is availble and the minimum cudnn version is 8.1.0 and gpu's compute capability is at least 8.0.", +) class TestElementwiseBF16Op(OpTest): - def setUp(self): self.op_type = "elementwise_max" self.python_api = paddle.maximum @@ -77,7 +76,7 @@ class TestElementwiseBF16Op(OpTest): y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype(np.float32) self.inputs = { 'X': convert_float_to_uint16(x), - 'Y': convert_float_to_uint16(y) + 'Y': convert_float_to_uint16(y), } self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))} @@ -101,9 +100,9 @@ class TestElementwiseBF16Op(OpTest): @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." +) class TestElementwiseMaxOp_scalar(TestElementwiseOp): - def setUp(self): self.op_type = "elementwise_max" self.python_api = paddle.maximum @@ -114,97 +113,98 @@ class TestElementwiseMaxOp_scalar(TestElementwiseOp): class TestElementwiseMaxOp_Vector(TestElementwiseOp): - def setUp(self): self.op_type = "elementwise_max" self.python_api = paddle.maximum - x = np.random.random((100, )).astype("float64") - sgn = np.random.choice([-1, 1], (100, )).astype("float64") - y = x + sgn * np.random.uniform(0.1, 1, (100, )).astype("float64") + x = np.random.random((100,)).astype("float64") + sgn = np.random.choice([-1, 1], (100,)).astype("float64") + y = x + sgn * np.random.uniform(0.1, 1, (100,)).astype("float64") self.inputs = {'X': x, 'Y': y} self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp): - def setUp(self): self.op_type = "elementwise_max" self.python_api = paddle.maximum x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(np.float64) - sgn = np.random.choice([-1, 1], (100, )).astype(np.float64) - y = x[:, 0, 0] + sgn * \ - np.random.uniform(1, 2, (100, )).astype(np.float64) + sgn = np.random.choice([-1, 1], (100,)).astype(np.float64) + y = x[:, 0, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( + np.float64 + ) self.inputs = {'X': x, 'Y': y} self.attrs = {'axis': 0} self.outputs = { - 'Out': np.maximum(self.inputs['X'], - self.inputs['Y'].reshape(100, 1, 1)) + 'Out': np.maximum( + self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1) + ) } class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp): - def setUp(self): self.op_type = "elementwise_max" self.python_api = paddle.maximum x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float64) - sgn = np.random.choice([-1, 1], (100, )).astype(np.float64) - y = x[0, :, 0] + sgn * \ - np.random.uniform(1, 2, (100, )).astype(np.float64) + sgn = np.random.choice([-1, 1], (100,)).astype(np.float64) + y = x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( + np.float64 + ) self.inputs = {'X': x, 'Y': y} self.attrs = {'axis': 1} self.outputs = { - 'Out': np.maximum(self.inputs['X'], - self.inputs['Y'].reshape(1, 100, 1)) + 'Out': np.maximum( + self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1) + ) } class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp): - def setUp(self): self.op_type = "elementwise_max" self.python_api = paddle.maximum x = np.random.uniform(0.5, 1, (1, 3, 100)).astype(np.float64) - sgn = np.random.choice([-1, 1], (100, )).astype(np.float64) - y = x[0, 0, :] + sgn * \ - np.random.uniform(1, 2, (100, )).astype(np.float64) + sgn = np.random.choice([-1, 1], (100,)).astype(np.float64) + y = x[0, 0, :] + sgn * np.random.uniform(1, 2, (100,)).astype( + np.float64 + ) self.inputs = {'X': x, 'Y': y} self.outputs = { - 'Out': np.maximum(self.inputs['X'], - self.inputs['Y'].reshape(1, 1, 100)) + 'Out': np.maximum( + self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100) + ) } class TestElementwiseMaxOp_broadcast_3(TestElementwiseOp): - def setUp(self): self.op_type = "elementwise_max" self.python_api = paddle.maximum x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(np.float64) sgn = np.random.choice([-1, 1], (50, 2)).astype(np.float64) - y = x[0, :, :, 0] + sgn * \ - np.random.uniform(1, 2, (50, 2)).astype(np.float64) + y = x[0, :, :, 0] + sgn * np.random.uniform(1, 2, (50, 2)).astype( + np.float64 + ) self.inputs = {'X': x, 'Y': y} self.attrs = {'axis': 1} self.outputs = { - 'Out': - np.maximum(self.inputs['X'], self.inputs['Y'].reshape(1, 50, 2, 1)) + 'Out': np.maximum( + self.inputs['X'], self.inputs['Y'].reshape(1, 50, 2, 1) + ) } class TestElementwiseMaxOp_broadcast_4(TestElementwiseOp): - def setUp(self): self.op_type = "elementwise_max" self.python_api = paddle.maximum x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float64) sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(np.float64) - y = x + sgn * \ - np.random.uniform(1, 2, (2, 3, 1, 5)).astype(np.float64) + y = x + sgn * np.random.uniform(1, 2, (2, 3, 1, 5)).astype(np.float64) self.inputs = {'X': x, 'Y': y} self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_min_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_min_op.py index e4c08742b01c3275f4341365a1b8a4b3fb36b497..5a2cdc691faeb903ec83fd2c8b473f0083d5f2d1 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_min_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_min_op.py @@ -23,7 +23,6 @@ paddle.enable_static() class TestElementwiseOp(OpTest): - def setUp(self): self.op_type = "elementwise_min" self.python_api = paddle.minimum @@ -49,22 +48,20 @@ class TestElementwiseOp(OpTest): self.check_grad(['X', 'Y'], 'Out', check_eager=True) def test_check_grad_ingore_x(self): - self.check_grad(['Y'], - 'Out', - max_relative_error=0.005, - no_grad_set=set("X")) + self.check_grad( + ['Y'], 'Out', max_relative_error=0.005, no_grad_set=set("X") + ) def test_check_grad_ingore_y(self): - self.check_grad(['X'], - 'Out', - max_relative_error=0.005, - no_grad_set=set('Y')) + self.check_grad( + ['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y') + ) @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." +) class TestElementwiseMinOp_scalar(TestElementwiseOp): - def setUp(self): self.op_type = "elementwise_min" self.python_api = paddle.minimum @@ -75,104 +72,104 @@ class TestElementwiseMinOp_scalar(TestElementwiseOp): class TestElementwiseMinOp_Vector(TestElementwiseOp): - def setUp(self): self.op_type = "elementwise_min" self.python_api = paddle.minimum - x = np.random.random((100, )).astype("float64") - sgn = np.random.choice([-1, 1], (100, )).astype("float64") - y = x + sgn * np.random.uniform(0.1, 1, (100, )).astype("float64") + x = np.random.random((100,)).astype("float64") + sgn = np.random.choice([-1, 1], (100,)).astype("float64") + y = x + sgn * np.random.uniform(0.1, 1, (100,)).astype("float64") self.inputs = {'X': x, 'Y': y} self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])} class TestElementwiseMinOp_broadcast_0(TestElementwiseOp): - def setUp(self): self.op_type = "elementwise_min" self.python_api = paddle.minimum x = np.random.uniform(0.5, 1, (100, 3, 2)).astype(np.float64) - sgn = np.random.choice([-1, 1], (100, )).astype(np.float64) - y = x[:, 0, 0] + sgn * \ - np.random.uniform(1, 2, (100, )).astype(np.float64) + sgn = np.random.choice([-1, 1], (100,)).astype(np.float64) + y = x[:, 0, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( + np.float64 + ) self.inputs = {'X': x, 'Y': y} self.attrs = {'axis': 0} self.outputs = { - 'Out': np.minimum(self.inputs['X'], - self.inputs['Y'].reshape(100, 1, 1)) + 'Out': np.minimum( + self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1) + ) } class TestElementwiseMinOp_broadcast_1(TestElementwiseOp): - def setUp(self): self.op_type = "elementwise_min" self.python_api = paddle.minimum x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float64) - sgn = np.random.choice([-1, 1], (100, )).astype(np.float64) - y = x[0, :, 0] + sgn * \ - np.random.uniform(1, 2, (100, )).astype(np.float64) + sgn = np.random.choice([-1, 1], (100,)).astype(np.float64) + y = x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( + np.float64 + ) self.inputs = {'X': x, 'Y': y} self.attrs = {'axis': 1} self.outputs = { - 'Out': np.minimum(self.inputs['X'], - self.inputs['Y'].reshape(1, 100, 1)) + 'Out': np.minimum( + self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1) + ) } class TestElementwiseMinOp_broadcast_2(TestElementwiseOp): - def setUp(self): self.op_type = "elementwise_min" self.python_api = paddle.minimum x = np.random.uniform(0.5, 1, (2, 3, 100)).astype(np.float64) - sgn = np.random.choice([-1, 1], (100, )).astype(np.float64) - y = x[0, 0, :] + sgn * \ - np.random.uniform(1, 2, (100, )).astype(np.float64) + sgn = np.random.choice([-1, 1], (100,)).astype(np.float64) + y = x[0, 0, :] + sgn * np.random.uniform(1, 2, (100,)).astype( + np.float64 + ) self.inputs = {'X': x, 'Y': y} self.outputs = { - 'Out': np.minimum(self.inputs['X'], - self.inputs['Y'].reshape(1, 1, 100)) + 'Out': np.minimum( + self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100) + ) } class TestElementwiseMinOp_broadcast_3(TestElementwiseOp): - def setUp(self): self.op_type = "elementwise_min" self.python_api = paddle.minimum x = np.random.uniform(0.5, 1, (2, 25, 4, 1)).astype(np.float64) sgn = np.random.choice([-1, 1], (25, 4)).astype(np.float64) - y = x[0, :, :, 0] + sgn * \ - np.random.uniform(1, 2, (25, 4)).astype(np.float64) + y = x[0, :, :, 0] + sgn * np.random.uniform(1, 2, (25, 4)).astype( + np.float64 + ) self.inputs = {'X': x, 'Y': y} self.attrs = {'axis': 1} self.outputs = { - 'Out': - np.minimum(self.inputs['X'], self.inputs['Y'].reshape(1, 25, 4, 1)) + 'Out': np.minimum( + self.inputs['X'], self.inputs['Y'].reshape(1, 25, 4, 1) + ) } class TestElementwiseMinOp_broadcast_4(TestElementwiseOp): - def setUp(self): self.op_type = "elementwise_min" self.python_api = paddle.minimum x = np.random.uniform(0.5, 1, (2, 10, 2, 5)).astype(np.float64) sgn = np.random.choice([-1, 1], (2, 10, 1, 5)).astype(np.float64) - y = x + sgn * \ - np.random.uniform(1, 2, (2, 10, 1, 5)).astype(np.float64) + y = x + sgn * np.random.uniform(1, 2, (2, 10, 1, 5)).astype(np.float64) self.inputs = {'X': x, 'Y': y} self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])} class TestElementwiseMinOpFP16(unittest.TestCase): - def get_out_and_grad(self, x_np, y_np, axis, place, use_fp32=False): assert x_np.dtype == np.float16 assert y_np.dtype == np.float16 @@ -188,8 +185,11 @@ class TestElementwiseMinOpFP16(unittest.TestCase): y.stop_gradient = False z = fluid.layers.elementwise_min(x, y, axis) x_g, y_g = paddle.grad([z], [x, y]) - return z.numpy().astype(dtype), x_g.numpy().astype( - dtype), y_g.numpy().astype(dtype) + return ( + z.numpy().astype(dtype), + x_g.numpy().astype(dtype), + y_g.numpy().astype(dtype), + ) def check_main(self, x_shape, y_shape, axis=-1): if not paddle.is_compiled_with_cuda(): @@ -201,8 +201,9 @@ class TestElementwiseMinOpFP16(unittest.TestCase): x_np = np.random.random(size=x_shape).astype(np.float16) y_np = np.random.random(size=y_shape).astype(np.float16) - z_1, x_g_1, y_g_1 = self.get_out_and_grad(x_np, y_np, axis, place, - False) + z_1, x_g_1, y_g_1 = self.get_out_and_grad( + x_np, y_np, axis, place, False + ) z_2, x_g_2, y_g_2 = self.get_out_and_grad(x_np, y_np, axis, place, True) np.testing.assert_array_equal(z_1, z_2) np.testing.assert_array_equal(x_g_1, x_g_2) @@ -210,11 +211,11 @@ class TestElementwiseMinOpFP16(unittest.TestCase): def test_main(self): self.check_main((13, 17), (13, 17)) - self.check_main((10, 3, 4), (1, )) - self.check_main((100, ), (100, )) - self.check_main((100, 3, 2), (100, ), 0) - self.check_main((2, 100, 3), (100, ), 1) - self.check_main((2, 3, 100), (100, )) + self.check_main((10, 3, 4), (1,)) + self.check_main((100,), (100,)) + self.check_main((100, 3, 2), (100,), 0) + self.check_main((2, 100, 3), (100,), 1) + self.check_main((2, 3, 100), (100,)) self.check_main((2, 25, 4, 1), (25, 4), 1) self.check_main((2, 10, 2, 5), (2, 10, 1, 5)) diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_mod_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_mod_op.py index 8c4ba799171fd693508313349a9e33f693b9362c..8969d76ce5165e42a5dca8682a8878ca6b63f67f 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_mod_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_mod_op.py @@ -22,7 +22,6 @@ import random class TestElementwiseModOp(OpTest): - def init_kernel_type(self): self.use_mkldnn = False @@ -37,7 +36,7 @@ class TestElementwiseModOp(OpTest): self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} self.outputs = {'Out': self.out} @@ -61,7 +60,6 @@ class TestElementwiseModOp(OpTest): class TestElementwiseModOp_scalar(TestElementwiseModOp): - def init_input_output(self): scale_x = random.randint(0, 100000000) scale_y = random.randint(1, 100000000) @@ -71,7 +69,6 @@ class TestElementwiseModOp_scalar(TestElementwiseModOp): class TestElementwiseModOpFloat(TestElementwiseModOp): - def init_dtype(self): self.dtype = np.float32 @@ -88,7 +85,6 @@ class TestElementwiseModOpFloat(TestElementwiseModOp): class TestElementwiseModOpFp16(TestElementwiseModOp): - def init_dtype(self): self.dtype = np.float16 @@ -105,13 +101,11 @@ class TestElementwiseModOpFp16(TestElementwiseModOp): class TestElementwiseModOpDouble(TestElementwiseModOpFloat): - def init_dtype(self): self.dtype = np.float64 class TestRemainderOp(unittest.TestCase): - def _executed_api(self, x, y, name=None): return paddle.remainder(x, y, name) @@ -135,7 +129,7 @@ class TestRemainderOp(unittest.TestCase): self.assertEqual((np_z == z_expected).all(), True) np_x = np.array([-3.3, 11.5, -2, 3.5]) - np_y = np.array([-1.2, 2., 3.3, -2.3]) + np_y = np.array([-1.2, 2.0, 3.3, -2.3]) x = paddle.to_tensor(np_x) y = paddle.to_tensor(np_y) z = x % y @@ -152,13 +146,11 @@ class TestRemainderOp(unittest.TestCase): class TestRemainderInplaceOp(TestRemainderOp): - def _executed_api(self, x, y, name=None): return x.remainder_(y, name) class TestRemainderInplaceBroadcastSuccess(unittest.TestCase): - def init_data(self): self.x_numpy = np.random.rand(2, 3, 4).astype('float') self.y_numpy = np.random.rand(3, 4).astype('float') @@ -174,17 +166,17 @@ class TestRemainderInplaceBroadcastSuccess(unittest.TestCase): paddle.enable_static() -class TestRemainderInplaceBroadcastSuccess2(TestRemainderInplaceBroadcastSuccess - ): - +class TestRemainderInplaceBroadcastSuccess2( + TestRemainderInplaceBroadcastSuccess +): def init_data(self): self.x_numpy = np.random.rand(1, 2, 3, 1).astype('float') self.y_numpy = np.random.rand(3, 1).astype('float') -class TestRemainderInplaceBroadcastSuccess3(TestRemainderInplaceBroadcastSuccess - ): - +class TestRemainderInplaceBroadcastSuccess3( + TestRemainderInplaceBroadcastSuccess +): def init_data(self): self.x_numpy = np.random.rand(2, 3, 1, 5).astype('float') self.y_numpy = np.random.rand(1, 3, 1, 5).astype('float') diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py index 05ed9cc9ded0387cd3d8c4dbc2e3e492c440bacb..cc3cd9be8236c144df5d501e933a38450c8f99d1 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py @@ -20,11 +20,14 @@ import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid import Program, program_guard -from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16 +from paddle.fluid.tests.unittests.op_test import ( + OpTest, + skip_check_grad_ci, + convert_float_to_uint16, +) class ElementwiseMulOp(OpTest): - def init_kernel_type(self): self.use_mkldnn = False @@ -39,7 +42,7 @@ class ElementwiseMulOp(OpTest): self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.outputs = {'Out': self.out} self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} @@ -50,23 +53,27 @@ class ElementwiseMulOp(OpTest): def test_check_grad_normal(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode - self.check_grad(['X', 'Y'], - 'Out', - check_dygraph=(self.use_mkldnn == False)) + self.check_grad( + ['X', 'Y'], 'Out', check_dygraph=(self.use_mkldnn == False) + ) def test_check_grad_ingore_x(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode - self.check_grad(['Y'], - 'Out', - no_grad_set=set("X"), - check_dygraph=(self.use_mkldnn == False)) + self.check_grad( + ['Y'], + 'Out', + no_grad_set=set("X"), + check_dygraph=(self.use_mkldnn == False), + ) def test_check_grad_ingore_y(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode - self.check_grad(['X'], - 'Out', - no_grad_set=set('Y'), - check_dygraph=(self.use_mkldnn == False)) + self.check_grad( + ['X'], + 'Out', + no_grad_set=set('Y'), + check_dygraph=(self.use_mkldnn == False), + ) def init_input_output(self): self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) @@ -81,7 +88,6 @@ class ElementwiseMulOp(OpTest): class TestBF16ElementwiseMulOp(OpTest): - def setUp(self): self.op_type = "elementwise_mul" self.dtype = np.uint16 @@ -93,9 +99,12 @@ class TestBF16ElementwiseMulOp(OpTest): self.axis = -1 self.inputs = { - 'X': - OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(self.x)), - 'Y': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(self.y)) + 'X': OpTest.np_dtype_to_fluid_dtype( + convert_float_to_uint16(self.x) + ), + 'Y': OpTest.np_dtype_to_fluid_dtype( + convert_float_to_uint16(self.y) + ), } self.outputs = {'Out': convert_float_to_uint16(self.out)} self.attrs = {'axis': self.axis, 'use_mkldnn': False} @@ -114,33 +123,31 @@ class TestBF16ElementwiseMulOp(OpTest): @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." +) class TestElementwiseMulOp_scalar(ElementwiseMulOp): - def setUp(self): self.op_type = "elementwise_mul" self.inputs = { 'X': np.random.rand(10, 3, 4).astype(np.float64), - 'Y': np.random.rand(1).astype(np.float64) + 'Y': np.random.rand(1).astype(np.float64), } self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} self.init_kernel_type() class TestElementwiseMulOp_Vector(ElementwiseMulOp): - def setUp(self): self.op_type = "elementwise_mul" self.inputs = { - 'X': np.random.random((100, )).astype("float64"), - 'Y': np.random.random((100, )).astype("float64") + 'X': np.random.random((100,)).astype("float64"), + 'Y': np.random.random((100,)).astype("float64"), } self.outputs = {'Out': np.multiply(self.inputs['X'], self.inputs['Y'])} self.init_kernel_type() class TestElementwiseMulOp_broadcast_0(ElementwiseMulOp): - def init_input_output(self): self.x = np.random.rand(100, 2, 3).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype) @@ -151,12 +158,11 @@ class TestElementwiseMulOp_broadcast_0(ElementwiseMulOp): class TestElementwiseMulOp_broadcast_1(ElementwiseMulOp): - def setUp(self): self.op_type = "elementwise_mul" self.inputs = { 'X': np.random.rand(2, 100, 3).astype(np.float64), - 'Y': np.random.rand(100).astype(np.float64) + 'Y': np.random.rand(100).astype(np.float64), } self.attrs = {'axis': 1} @@ -167,12 +173,11 @@ class TestElementwiseMulOp_broadcast_1(ElementwiseMulOp): class TestElementwiseMulOp_broadcast_2(ElementwiseMulOp): - def setUp(self): self.op_type = "elementwise_mul" self.inputs = { 'X': np.random.rand(2, 3, 100).astype(np.float64), - 'Y': np.random.rand(100).astype(np.float64) + 'Y': np.random.rand(100).astype(np.float64), } self.outputs = { @@ -182,12 +187,11 @@ class TestElementwiseMulOp_broadcast_2(ElementwiseMulOp): class TestElementwiseMulOp_broadcast_3(ElementwiseMulOp): - def setUp(self): self.op_type = "elementwise_mul" self.inputs = { 'X': np.random.rand(2, 10, 12, 3).astype(np.float64), - 'Y': np.random.rand(10, 12).astype(np.float64) + 'Y': np.random.rand(10, 12).astype(np.float64), } self.attrs = {'axis': 1} @@ -198,68 +202,63 @@ class TestElementwiseMulOp_broadcast_3(ElementwiseMulOp): class TestElementwiseMulOp_broadcast_4(ElementwiseMulOp): - def setUp(self): self.op_type = "elementwise_mul" self.inputs = { 'X': np.random.rand(10, 2, 11).astype(np.float64), - 'Y': np.random.rand(10, 1, 11).astype(np.float64) + 'Y': np.random.rand(10, 1, 11).astype(np.float64), } self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} self.init_kernel_type() class TestElementwiseMulOp_broadcast_5(ElementwiseMulOp): - def setUp(self): self.op_type = "elementwise_mul" self.inputs = { 'X': np.random.rand(10, 4, 2, 3).astype(np.float64), - 'Y': np.random.rand(10, 4, 1, 3).astype(np.float64) + 'Y': np.random.rand(10, 4, 1, 3).astype(np.float64), } self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} self.init_kernel_type() -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestElementwiseMulOpFp16(ElementwiseMulOp): - def init_dtype(self): self.dtype = np.float16 class TestElementwiseMulOp_commonuse_1(ElementwiseMulOp): - def setUp(self): self.op_type = "elementwise_mul" self.inputs = { 'X': np.random.rand(2, 3, 100).astype(np.float64), - 'Y': np.random.rand(1, 1, 100).astype(np.float64) + 'Y': np.random.rand(1, 1, 100).astype(np.float64), } self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} self.init_kernel_type() class TestElementwiseMulOp_commonuse_2(ElementwiseMulOp): - def setUp(self): self.op_type = "elementwise_mul" self.inputs = { 'X': np.random.rand(30, 3, 1, 5).astype(np.float64), - 'Y': np.random.rand(30, 1, 4, 1).astype(np.float64) + 'Y': np.random.rand(30, 1, 4, 1).astype(np.float64), } self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} self.init_kernel_type() class TestElementwiseMulOp_xsize_lessthan_ysize(ElementwiseMulOp): - def setUp(self): self.op_type = "elementwise_mul" self.inputs = { 'X': np.random.rand(10, 10).astype(np.float64), - 'Y': np.random.rand(2, 2, 10, 10).astype(np.float64) + 'Y': np.random.rand(2, 2, 10, 10).astype(np.float64), } self.attrs = {'axis': 2} @@ -271,14 +270,15 @@ class TestElementwiseMulOp_xsize_lessthan_ysize(ElementwiseMulOp): class TestElementwiseMulOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # the input of elementwise_mul must be Variable. - x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.CPUPlace()) - y1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + ) + y1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + ) self.assertRaises(TypeError, fluid.layers.elementwise_mul, x1, y1) # the input dtype of elementwise_mul must be float16 or float32 or float64 or int32 or int64 @@ -289,7 +289,6 @@ class TestElementwiseMulOpError(unittest.TestCase): class TestComplexElementwiseMulOp(OpTest): - def setUp(self): self.op_type = "elementwise_mul" self.init_base_dtype() @@ -298,7 +297,7 @@ class TestComplexElementwiseMulOp(OpTest): self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.attrs = {'axis': -1, 'use_mkldnn': False} self.outputs = {'Out': self.out} @@ -307,17 +306,18 @@ class TestComplexElementwiseMulOp(OpTest): self.dtype = np.float64 def init_input_output(self): - self.x = np.random.random( - (2, 3, 4, 5)).astype(self.dtype) + 1J * np.random.random( - (2, 3, 4, 5)).astype(self.dtype) - self.y = np.random.random( - (2, 3, 4, 5)).astype(self.dtype) + 1J * np.random.random( - (2, 3, 4, 5)).astype(self.dtype) + self.x = np.random.random((2, 3, 4, 5)).astype( + self.dtype + ) + 1j * np.random.random((2, 3, 4, 5)).astype(self.dtype) + self.y = np.random.random((2, 3, 4, 5)).astype( + self.dtype + ) + 1j * np.random.random((2, 3, 4, 5)).astype(self.dtype) self.out = self.x * self.y def init_grad_input_output(self): - self.grad_out = np.ones((2, 3, 4, 5), self.dtype) + 1J * np.ones( - (2, 3, 4, 5), self.dtype) + self.grad_out = np.ones((2, 3, 4, 5), self.dtype) + 1j * np.ones( + (2, 3, 4, 5), self.dtype + ) self.grad_x = self.grad_out * np.conj(self.y) self.grad_y = self.grad_out * np.conj(self.x) @@ -325,38 +325,44 @@ class TestComplexElementwiseMulOp(OpTest): self.check_output() def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], - 'Out', - user_defined_grads=[self.grad_x, self.grad_y], - user_defined_grad_outputs=[self.grad_out]) + self.check_grad( + ['X', 'Y'], + 'Out', + user_defined_grads=[self.grad_x, self.grad_y], + user_defined_grad_outputs=[self.grad_out], + ) def test_check_grad_ingore_x(self): - self.check_grad(['Y'], - 'Out', - no_grad_set=set("X"), - user_defined_grads=[self.grad_y], - user_defined_grad_outputs=[self.grad_out]) + self.check_grad( + ['Y'], + 'Out', + no_grad_set=set("X"), + user_defined_grads=[self.grad_y], + user_defined_grad_outputs=[self.grad_out], + ) def test_check_grad_ingore_y(self): - self.check_grad(['X'], - 'Out', - no_grad_set=set('Y'), - user_defined_grads=[self.grad_x], - user_defined_grad_outputs=[self.grad_out]) + self.check_grad( + ['X'], + 'Out', + no_grad_set=set('Y'), + user_defined_grads=[self.grad_x], + user_defined_grad_outputs=[self.grad_out], + ) class TestRealComplexElementwiseMulOp(TestComplexElementwiseMulOp): - def init_input_output(self): self.x = np.random.random((2, 3, 4, 5)).astype(self.dtype) - self.y = np.random.random( - (2, 3, 4, 5)).astype(self.dtype) + 1J * np.random.random( - (2, 3, 4, 5)).astype(self.dtype) + self.y = np.random.random((2, 3, 4, 5)).astype( + self.dtype + ) + 1j * np.random.random((2, 3, 4, 5)).astype(self.dtype) self.out = self.x * self.y def init_grad_input_output(self): - self.grad_out = np.ones((2, 3, 4, 5), self.dtype) + 1J * np.ones( - (2, 3, 4, 5), self.dtype) + self.grad_out = np.ones((2, 3, 4, 5), self.dtype) + 1j * np.ones( + (2, 3, 4, 5), self.dtype + ) self.grad_x = np.real(self.grad_out * np.conj(self.y)) self.grad_y = self.grad_out * np.conj(self.x) diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_nn_grad.py b/python/paddle/fluid/tests/unittests/test_elementwise_nn_grad.py index 875da9df79bfe9266bb249a4220a02731ca784f3..3f816b616501348f49abee214cfa5550e070f46a 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_nn_grad.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_nn_grad.py @@ -25,7 +25,6 @@ from decorator_helper import prog_scope class TestElementwiseMulDoubleGradCheck(unittest.TestCase): - @prog_scope() def func(self, place): # the shape of input variable should be clearly specified, not inlcude -1. @@ -41,11 +40,9 @@ class TestElementwiseMulDoubleGradCheck(unittest.TestCase): x_arr = np.random.uniform(-1, 1, shape).astype(dtype) y_arr = np.random.uniform(-1, 1, shape).astype(dtype) - gradient_checker.double_grad_check([x, y], - out, - x_init=[x_arr, y_arr], - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps + ) def test_grad(self): paddle.enable_static() @@ -57,7 +54,6 @@ class TestElementwiseMulDoubleGradCheck(unittest.TestCase): class TestElementwiseMulBroadcastDoubleGradCheck(unittest.TestCase): - @prog_scope() def func(self, place): # the shape of input variable should be clearly specified, not inlcude -1. @@ -73,11 +69,9 @@ class TestElementwiseMulBroadcastDoubleGradCheck(unittest.TestCase): x_arr = np.random.uniform(-1, 1, shape).astype(dtype) y_arr = np.random.uniform(-1, 1, shape[:-1]).astype(dtype) - gradient_checker.double_grad_check([x, y], - out, - x_init=[x_arr, y_arr], - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps + ) def test_grad(self): paddle.enable_static() @@ -89,7 +83,6 @@ class TestElementwiseMulBroadcastDoubleGradCheck(unittest.TestCase): class TestElementwiseAddDoubleGradCheck(unittest.TestCase): - @prog_scope() def func(self, place): # the shape of input variable should be clearly specified, not inlcude -1. @@ -105,11 +98,9 @@ class TestElementwiseAddDoubleGradCheck(unittest.TestCase): x_arr = np.random.uniform(-1, 1, shape).astype(dtype) y_arr = np.random.uniform(-1, 1, shape).astype(dtype) - gradient_checker.double_grad_check([x, y], - out, - x_init=[x_arr, y_arr], - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps + ) def test_grad(self): paddle.enable_static() @@ -121,7 +112,6 @@ class TestElementwiseAddDoubleGradCheck(unittest.TestCase): class TestElementwiseAddBroadcastDoubleGradCheck(unittest.TestCase): - @prog_scope() def func(self, place): # the shape of input variable should be clearly specified, not inlcude -1. @@ -137,11 +127,9 @@ class TestElementwiseAddBroadcastDoubleGradCheck(unittest.TestCase): x_arr = np.random.uniform(-1, 1, shape).astype(dtype) y_arr = np.random.uniform(-1, 1, shape[:-1]).astype(dtype) - gradient_checker.double_grad_check([x, y], - out, - x_init=[x_arr, y_arr], - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps + ) def test_grad(self): paddle.enable_static() @@ -153,7 +141,6 @@ class TestElementwiseAddBroadcastDoubleGradCheck(unittest.TestCase): class TestElementwiseSubDoubleGradCheck(unittest.TestCase): - def subtract_wrapper(self, x): return paddle.subtract(x[0], x[1]) @@ -172,16 +159,16 @@ class TestElementwiseSubDoubleGradCheck(unittest.TestCase): x_arr = np.random.uniform(-1, 1, shape).astype(dtype) y_arr = np.random.uniform(-1, 1, shape).astype(dtype) - gradient_checker.double_grad_check([x, y], - out, - x_init=[x_arr, y_arr], - place=place, - eps=eps) - gradient_checker.double_grad_check_for_dygraph(self.subtract_wrapper, - [x, y], - out, - x_init=[x_arr, y_arr], - place=place) + gradient_checker.double_grad_check( + [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps + ) + gradient_checker.double_grad_check_for_dygraph( + self.subtract_wrapper, + [x, y], + out, + x_init=[x_arr, y_arr], + place=place, + ) def test_grad(self): paddle.enable_static() @@ -193,7 +180,6 @@ class TestElementwiseSubDoubleGradCheck(unittest.TestCase): class TestElementwiseSubBroadcastDoubleGradCheck(unittest.TestCase): - @prog_scope() def func(self, place): # the shape of input variable should be clearly specified, not inlcude -1. @@ -209,11 +195,9 @@ class TestElementwiseSubBroadcastDoubleGradCheck(unittest.TestCase): x_arr = np.random.uniform(-1, 1, shape).astype(dtype) y_arr = np.random.uniform(-1, 1, shape[:-1]).astype(dtype) - gradient_checker.double_grad_check([x, y], - out, - x_init=[x_arr, y_arr], - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps + ) def test_grad(self): paddle.enable_static() @@ -225,7 +209,6 @@ class TestElementwiseSubBroadcastDoubleGradCheck(unittest.TestCase): class TestElementwiseDivDoubleGradCheck(unittest.TestCase): - def divide_wrapper(self, x): return paddle.divide(x[0], x[1]) @@ -245,18 +228,17 @@ class TestElementwiseDivDoubleGradCheck(unittest.TestCase): y_arr = np.random.uniform(-1, 1, shape).astype(dtype) y_arr[np.abs(y_arr) < 0.005] = 0.02 - gradient_checker.double_grad_check([x, y], - out, - x_init=[x_arr, y_arr], - place=place, - eps=eps, - atol=1e-3) - gradient_checker.double_grad_check_for_dygraph(self.divide_wrapper, - [x, y], - out, - x_init=[x_arr, y_arr], - place=place, - atol=1e-3) + gradient_checker.double_grad_check( + [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps, atol=1e-3 + ) + gradient_checker.double_grad_check_for_dygraph( + self.divide_wrapper, + [x, y], + out, + x_init=[x_arr, y_arr], + place=place, + atol=1e-3, + ) def test_grad(self): paddle.enable_static() @@ -268,7 +250,6 @@ class TestElementwiseDivDoubleGradCheck(unittest.TestCase): class TestElementwiseDivBroadcastDoubleGradCheck(unittest.TestCase): - @prog_scope() def func(self, place): # the shape of input variable should be clearly specified, not inlcude -1. @@ -285,12 +266,9 @@ class TestElementwiseDivBroadcastDoubleGradCheck(unittest.TestCase): y_arr = np.random.uniform(-1, 1, shape[1:-1]).astype(dtype) y_arr[np.abs(y_arr) < 0.005] = 0.02 - gradient_checker.double_grad_check([x, y], - out, - x_init=[x_arr, y_arr], - place=place, - eps=eps, - atol=1e-3) + gradient_checker.double_grad_check( + [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps, atol=1e-3 + ) def test_grad(self): paddle.enable_static() @@ -302,7 +280,6 @@ class TestElementwiseDivBroadcastDoubleGradCheck(unittest.TestCase): class TestElementwiseAddTripleGradCheck(unittest.TestCase): - @prog_scope() def func(self, place): # the shape of input variable should be clearly specified, not inlcude -1. @@ -318,11 +295,9 @@ class TestElementwiseAddTripleGradCheck(unittest.TestCase): x_arr = np.random.uniform(-1, 1, shape).astype(dtype) y_arr = np.random.uniform(-1, 1, shape).astype(dtype) - gradient_checker.triple_grad_check([x, y], - out, - x_init=[x_arr, y_arr], - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps + ) def test_grad(self): paddle.enable_static() @@ -334,7 +309,6 @@ class TestElementwiseAddTripleGradCheck(unittest.TestCase): class TestElementwiseAddBroadcastTripleGradCheck(unittest.TestCase): - @prog_scope() def func(self, place): # the shape of input variable should be clearly specified, not inlcude -1. @@ -350,11 +324,9 @@ class TestElementwiseAddBroadcastTripleGradCheck(unittest.TestCase): x_arr = np.random.uniform(-1, 1, shape).astype(dtype) y_arr = np.random.uniform(-1, 1, shape[:-1]).astype(dtype) - gradient_checker.triple_grad_check([x, y], - out, - x_init=[x_arr, y_arr], - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps + ) def test_grad(self): paddle.enable_static() @@ -366,7 +338,6 @@ class TestElementwiseAddBroadcastTripleGradCheck(unittest.TestCase): class TestElementwiseMulTripleGradCheck(unittest.TestCase): - def multiply_wrapper(self, x): return paddle.multiply(x[0], x[1]) @@ -385,17 +356,17 @@ class TestElementwiseMulTripleGradCheck(unittest.TestCase): x_arr = np.random.uniform(-1, 1, shape).astype(dtype) y_arr = np.random.uniform(-1, 1, shape).astype(dtype) - gradient_checker.triple_grad_check([x, y], - out, - x_init=[x_arr, y_arr], - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.triple_grad_check_for_dygraph(self.multiply_wrapper, - [x, y], - out, - x_init=[x_arr, y_arr], - place=place) + gradient_checker.triple_grad_check_for_dygraph( + self.multiply_wrapper, + [x, y], + out, + x_init=[x_arr, y_arr], + place=place, + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) def test_grad(self): @@ -408,7 +379,6 @@ class TestElementwiseMulTripleGradCheck(unittest.TestCase): class TestElementwiseMulBroadcastTripleGradCheck(unittest.TestCase): - @prog_scope() def func(self, place): # the shape of input variable should be clearly specified, not inlcude -1. @@ -424,11 +394,9 @@ class TestElementwiseMulBroadcastTripleGradCheck(unittest.TestCase): x_arr = np.random.uniform(-1, 1, shape).astype(dtype) y_arr = np.random.uniform(-1, 1, shape[:-1]).astype(dtype) - gradient_checker.triple_grad_check([x, y], - out, - x_init=[x_arr, y_arr], - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps + ) def test_grad(self): paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_pow_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_pow_op.py index 4050e4a4c607d6cf5060139b793cf74498457a59..53cb18f8aa33bc542e09fb18de7de3c2717086c8 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_pow_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_pow_op.py @@ -26,13 +26,12 @@ def pow_grad(x, y, dout): class TestElementwisePowOp(OpTest): - def setUp(self): self.op_type = "elementwise_pow" self.python_api = paddle.pow self.inputs = { 'X': np.random.uniform(1, 2, [20, 5]).astype("float64"), - 'Y': np.random.uniform(1, 2, [20, 5]).astype("float64") + 'Y': np.random.uniform(1, 2, [20, 5]).astype("float64"), } self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} @@ -50,75 +49,70 @@ class TestElementwisePowOp(OpTest): class TestElementwisePowOp_big_shape_1(TestElementwisePowOp): - def setUp(self): self.op_type = "elementwise_pow" self.python_api = paddle.pow self.inputs = { 'X': np.random.uniform(1, 2, [10, 10]).astype("float64"), - 'Y': np.random.uniform(0.1, 1, [10, 10]).astype("float64") + 'Y': np.random.uniform(0.1, 1, [10, 10]).astype("float64"), } self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} class TestElementwisePowOp_big_shape_2(TestElementwisePowOp): - def setUp(self): self.op_type = "elementwise_pow" self.python_api = paddle.pow self.inputs = { 'X': np.random.uniform(1, 2, [10, 10]).astype("float64"), - 'Y': np.random.uniform(0.2, 2, [10, 10]).astype("float64") + 'Y': np.random.uniform(0.2, 2, [10, 10]).astype("float64"), } self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." +) class TestElementwisePowOp_scalar(TestElementwisePowOp): - def setUp(self): self.op_type = "elementwise_pow" self.python_api = paddle.pow self.inputs = { 'X': np.random.uniform(0.1, 1, [3, 3, 4]).astype(np.float64), - 'Y': np.random.uniform(0.1, 1, [1]).astype(np.float64) + 'Y': np.random.uniform(0.1, 1, [1]).astype(np.float64), } self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} class TestElementwisePowOp_tensor(TestElementwisePowOp): - def setUp(self): self.op_type = "elementwise_pow" self.python_api = paddle.pow self.inputs = { 'X': np.random.uniform(0.1, 1, [100]).astype("float64"), - 'Y': np.random.uniform(1, 3, [100]).astype("float64") + 'Y': np.random.uniform(1, 3, [100]).astype("float64"), } self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} class TestElementwisePowOp_broadcast_0(TestElementwisePowOp): - def setUp(self): self.op_type = "elementwise_pow" self.python_api = paddle.pow self.inputs = { 'X': np.random.uniform(0.1, 1, [2, 1, 100]).astype("float64"), - 'Y': np.random.uniform(0.1, 1, [100]).astype("float64") + 'Y': np.random.uniform(0.1, 1, [100]).astype("float64"), } self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} class TestElementwisePowOp_broadcast_1(TestElementwisePowOp): - def setUp(self): self.op_type = "elementwise_pow" self.python_api = paddle.pow self.inputs = { 'X': np.random.uniform(0.1, 1, [2, 100, 1]).astype("float64"), - 'Y': np.random.uniform(0.1, 1, [100]).astype("float64") + 'Y': np.random.uniform(0.1, 1, [100]).astype("float64"), } self.attrs = {'axis': 1} self.outputs = { @@ -127,51 +121,49 @@ class TestElementwisePowOp_broadcast_1(TestElementwisePowOp): class TestElementwisePowOp_broadcast_2(TestElementwisePowOp): - def setUp(self): self.op_type = "elementwise_pow" self.python_api = paddle.pow self.inputs = { 'X': np.random.uniform(0.1, 1, [100, 3, 1]).astype("float64"), - 'Y': np.random.uniform(0.1, 1, [100]).astype("float64") + 'Y': np.random.uniform(0.1, 1, [100]).astype("float64"), } self.attrs = {'axis': 0} self.outputs = { - 'Out': np.power(self.inputs['X'], - self.inputs['Y'].reshape(100, 1, 1)) + 'Out': np.power( + self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1) + ) } class TestElementwisePowOp_broadcast_3(TestElementwisePowOp): - def setUp(self): self.op_type = "elementwise_pow" self.python_api = paddle.pow self.inputs = { 'X': np.random.uniform(0.1, 1, [2, 20, 5, 1]).astype("float64"), - 'Y': np.random.uniform(0.1, 1, [20, 5]).astype("float64") + 'Y': np.random.uniform(0.1, 1, [20, 5]).astype("float64"), } self.attrs = {'axis': 1} self.outputs = { - 'Out': np.power(self.inputs['X'], - self.inputs['Y'].reshape(1, 20, 5, 1)) + 'Out': np.power( + self.inputs['X'], self.inputs['Y'].reshape(1, 20, 5, 1) + ) } class TestElementwisePowOp_broadcast_4(TestElementwisePowOp): - def setUp(self): self.op_type = "elementwise_pow" self.python_api = paddle.pow self.inputs = { 'X': np.random.uniform(0.1, 1, [2, 10, 3, 5]).astype("float64"), - 'Y': np.random.uniform(0.1, 1, [2, 10, 1, 5]).astype("float64") + 'Y': np.random.uniform(0.1, 1, [2, 10, 1, 5]).astype("float64"), } self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} class TestElementwisePowOpInt(OpTest): - def setUp(self): self.op_type = "elementwise_pow" self.python_api = paddle.pow @@ -186,7 +178,6 @@ class TestElementwisePowOpInt(OpTest): class TestElementwisePowGradOpInt(unittest.TestCase): - def setUp(self): self.x = np.asarray([1, 3, 6]) self.y = np.asarray([1, 1, 1]) @@ -194,11 +185,13 @@ class TestElementwisePowGradOpInt(unittest.TestCase): # dout = 1 self.grad_res = np.asarray([1, 1, 1]) # dx = dout * y * pow(x, y-1) - self.grad_x = self.grad_res * self.y * (self.x - **(self.y - 1)).astype("int") + self.grad_x = ( + self.grad_res * self.y * (self.x ** (self.y - 1)).astype("int") + ) # dy = dout * log(x) * pow(x, y) - self.grad_y = (self.grad_res * np.log(self.x) * - (self.x**self.y)).astype("int") + self.grad_y = ( + self.grad_res * np.log(self.x) * (self.x**self.y) + ).astype("int") def test_grad(self): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) @@ -220,13 +213,12 @@ class TestElementwisePowGradOpInt(unittest.TestCase): class TestElementwisePowOpFP16(OpTest): - def setUp(self): self.op_type = "elementwise_pow" self.python_api = paddle.pow self.inputs = { 'X': np.random.uniform(1, 2, [20, 5]).astype("float16"), - 'Y': np.random.uniform(1, 2, [20, 5]).astype("float16") + 'Y': np.random.uniform(1, 2, [20, 5]).astype("float16"), } self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} @@ -237,12 +229,14 @@ class TestElementwisePowOpFP16(OpTest): self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X', 'Y'], - 'Out', - user_defined_grads=pow_grad(self.inputs['X'], - self.inputs['Y'], - 1 / self.inputs['X'].size), - check_eager=True) + self.check_grad( + ['X', 'Y'], + 'Out', + user_defined_grads=pow_grad( + self.inputs['X'], self.inputs['Y'], 1 / self.inputs['X'].size + ), + check_eager=True, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_sub_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_sub_op.py index 9c8a0002bd03a81bce0eb64eabc8df1ad28ac50b..f8f050d6f6b086e21e353ecdae0c4bdd50b8679f 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_sub_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_sub_op.py @@ -21,12 +21,11 @@ from paddle.fluid.framework import _test_eager_guard class TestElementwiseOp(OpTest): - def setUp(self): self.op_type = "elementwise_sub" self.inputs = { 'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64"), - 'Y': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64") + 'Y': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64"), } self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} @@ -37,20 +36,17 @@ class TestElementwiseOp(OpTest): self.check_grad(['X', 'Y'], 'Out') def test_check_grad_ingore_x(self): - self.check_grad(['Y'], - 'Out', - max_relative_error=0.005, - no_grad_set=set("X")) + self.check_grad( + ['Y'], 'Out', max_relative_error=0.005, no_grad_set=set("X") + ) def test_check_grad_ingore_y(self): - self.check_grad(['X'], - 'Out', - max_relative_error=0.005, - no_grad_set=set('Y')) + self.check_grad( + ['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y') + ) class TestBF16ElementwiseOp(OpTest): - def setUp(self): self.op_type = "elementwise_sub" self.dtype = np.uint16 @@ -60,7 +56,7 @@ class TestBF16ElementwiseOp(OpTest): self.inputs = { 'X': convert_float_to_uint16(x), - 'Y': convert_float_to_uint16(y) + 'Y': convert_float_to_uint16(y), } self.outputs = {'Out': convert_float_to_uint16(out)} @@ -78,36 +74,34 @@ class TestBF16ElementwiseOp(OpTest): @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." +) class TestElementwiseSubOp_scalar(TestElementwiseOp): - def setUp(self): self.op_type = "elementwise_sub" self.inputs = { 'X': np.random.rand(10, 3, 4).astype(np.float64), - 'Y': np.random.rand(1).astype(np.float64) + 'Y': np.random.rand(1).astype(np.float64), } self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} class TestElementwiseSubOp_Vector(TestElementwiseOp): - def setUp(self): self.op_type = "elementwise_sub" self.inputs = { - 'X': np.random.random((100, )).astype("float64"), - 'Y': np.random.random((100, )).astype("float64") + 'X': np.random.random((100,)).astype("float64"), + 'Y': np.random.random((100,)).astype("float64"), } self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} class TestElementwiseSubOp_broadcast_0(TestElementwiseOp): - def setUp(self): self.op_type = "elementwise_sub" self.inputs = { 'X': np.random.rand(100, 3, 2).astype(np.float64), - 'Y': np.random.rand(100).astype(np.float64) + 'Y': np.random.rand(100).astype(np.float64), } self.attrs = {'axis': 0} @@ -117,12 +111,11 @@ class TestElementwiseSubOp_broadcast_0(TestElementwiseOp): class TestElementwiseSubOp_broadcast_1(TestElementwiseOp): - def setUp(self): self.op_type = "elementwise_sub" self.inputs = { 'X': np.random.rand(2, 100, 3).astype(np.float64), - 'Y': np.random.rand(100).astype(np.float64) + 'Y': np.random.rand(100).astype(np.float64), } self.attrs = {'axis': 1} @@ -132,12 +125,11 @@ class TestElementwiseSubOp_broadcast_1(TestElementwiseOp): class TestElementwiseSubOp_broadcast_2(TestElementwiseOp): - def setUp(self): self.op_type = "elementwise_sub" self.inputs = { 'X': np.random.rand(2, 3, 100).astype(np.float64), - 'Y': np.random.rand(100).astype(np.float64) + 'Y': np.random.rand(100).astype(np.float64), } self.outputs = { @@ -146,12 +138,11 @@ class TestElementwiseSubOp_broadcast_2(TestElementwiseOp): class TestElementwiseSubOp_broadcast_3(TestElementwiseOp): - def setUp(self): self.op_type = "elementwise_sub" self.inputs = { 'X': np.random.rand(2, 10, 12, 3).astype(np.float64), - 'Y': np.random.rand(10, 12).astype(np.float64) + 'Y': np.random.rand(10, 12).astype(np.float64), } self.attrs = {'axis': 1} @@ -161,45 +152,41 @@ class TestElementwiseSubOp_broadcast_3(TestElementwiseOp): class TestElementwiseSubOp_broadcast_4(TestElementwiseOp): - def setUp(self): self.op_type = "elementwise_sub" self.inputs = { 'X': np.random.rand(2, 5, 3, 12).astype(np.float64), - 'Y': np.random.rand(2, 5, 1, 12).astype(np.float64) + 'Y': np.random.rand(2, 5, 1, 12).astype(np.float64), } self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} class TestElementwiseSubOp_commonuse_1(TestElementwiseOp): - def setUp(self): self.op_type = "elementwise_sub" self.inputs = { 'X': np.random.rand(2, 3, 100).astype(np.float64), - 'Y': np.random.rand(1, 1, 100).astype(np.float64) + 'Y': np.random.rand(1, 1, 100).astype(np.float64), } self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} class TestElementwiseSubOp_commonuse_2(TestElementwiseOp): - def setUp(self): self.op_type = "elementwise_sub" self.inputs = { 'X': np.random.rand(10, 3, 1, 4).astype(np.float64), - 'Y': np.random.rand(10, 1, 12, 1).astype(np.float64) + 'Y': np.random.rand(10, 1, 12, 1).astype(np.float64), } self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} class TestElementwiseSubOp_xsize_lessthan_ysize(TestElementwiseOp): - def setUp(self): self.op_type = "elementwise_sub" self.inputs = { 'X': np.random.rand(10, 12).astype(np.float64), - 'Y': np.random.rand(2, 3, 10, 12).astype(np.float64) + 'Y': np.random.rand(2, 3, 10, 12).astype(np.float64), } self.attrs = {'axis': 2} @@ -210,7 +197,6 @@ class TestElementwiseSubOp_xsize_lessthan_ysize(TestElementwiseOp): class TestComplexElementwiseSubOp(OpTest): - def setUp(self): self.op_type = "elementwise_sub" self.dtype = np.float64 @@ -220,7 +206,7 @@ class TestComplexElementwiseSubOp(OpTest): self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.attrs = {'axis': -1, 'use_mkldnn': False} self.outputs = {'Out': self.out} @@ -230,14 +216,17 @@ class TestComplexElementwiseSubOp(OpTest): def init_input_output(self): self.x = np.random.random(self.shape).astype( - self.dtype) + 1J * np.random.random(self.shape).astype(self.dtype) + self.dtype + ) + 1j * np.random.random(self.shape).astype(self.dtype) self.y = np.random.random(self.shape).astype( - self.dtype) + 1J * np.random.random(self.shape).astype(self.dtype) + self.dtype + ) + 1j * np.random.random(self.shape).astype(self.dtype) self.out = self.x - self.y def init_grad_input_output(self): - self.grad_out = np.ones( - self.shape, self.dtype) + 1J * np.ones(self.shape, self.dtype) + self.grad_out = np.ones(self.shape, self.dtype) + 1j * np.ones( + self.shape, self.dtype + ) self.grad_x = self.grad_out self.grad_y = -self.grad_out @@ -245,43 +234,49 @@ class TestComplexElementwiseSubOp(OpTest): self.check_output() def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], - 'Out', - user_defined_grads=[self.grad_x, self.grad_y], - user_defined_grad_outputs=[self.grad_out]) + self.check_grad( + ['X', 'Y'], + 'Out', + user_defined_grads=[self.grad_x, self.grad_y], + user_defined_grad_outputs=[self.grad_out], + ) def test_check_grad_ingore_x(self): - self.check_grad(['Y'], - 'Out', - no_grad_set=set("X"), - user_defined_grads=[self.grad_y], - user_defined_grad_outputs=[self.grad_out]) + self.check_grad( + ['Y'], + 'Out', + no_grad_set=set("X"), + user_defined_grads=[self.grad_y], + user_defined_grad_outputs=[self.grad_out], + ) def test_check_grad_ingore_y(self): - self.check_grad(['X'], - 'Out', - no_grad_set=set('Y'), - user_defined_grads=[self.grad_x], - user_defined_grad_outputs=[self.grad_out]) + self.check_grad( + ['X'], + 'Out', + no_grad_set=set('Y'), + user_defined_grads=[self.grad_x], + user_defined_grad_outputs=[self.grad_out], + ) class TestRealComplexElementwiseSubOp(TestComplexElementwiseSubOp): - def init_input_output(self): self.x = np.random.random(self.shape).astype(self.dtype) self.y = np.random.random(self.shape).astype( - self.dtype) + 1J * np.random.random(self.shape).astype(self.dtype) + self.dtype + ) + 1j * np.random.random(self.shape).astype(self.dtype) self.out = self.x - self.y def init_grad_input_output(self): - self.grad_out = np.ones( - self.shape, self.dtype) + 1J * np.ones(self.shape, self.dtype) + self.grad_out = np.ones(self.shape, self.dtype) + 1j * np.ones( + self.shape, self.dtype + ) self.grad_x = np.real(self.grad_out) self.grad_y = -self.grad_out class TestSubtractApi(unittest.TestCase): - def _executed_api(self, x, y, name=None): return paddle.subtract(x, y, name) @@ -299,7 +294,7 @@ class TestSubtractApi(unittest.TestCase): def gen_data(): return { "x": np.array([2, 3, 4]).astype('float32'), - "y": np.array([1, 5, 2]).astype('float32') + "y": np.array([1, 5, 2]).astype('float32'), } x = fluid.data(name="x", shape=[3], dtype='float32') @@ -308,7 +303,7 @@ class TestSubtractApi(unittest.TestCase): place = fluid.CPUPlace() exe = fluid.Executor(place) z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) - z_expected = np.array([1., -2., 2.]) + z_expected = np.array([1.0, -2.0, 2.0]) self.assertEqual((z_value == z_expected).all(), True) def test_dygraph(self): @@ -319,18 +314,16 @@ class TestSubtractApi(unittest.TestCase): y = fluid.dygraph.to_variable(np_y) z = self._executed_api(x, y) np_z = z.numpy() - z_expected = np.array([1., -2., 2.]) + z_expected = np.array([1.0, -2.0, 2.0]) self.assertEqual((np_z == z_expected).all(), True) class TestSubtractInplaceApi(TestSubtractApi): - def _executed_api(self, x, y, name=None): return x.subtract_(y, name) class TestSubtractInplaceBroadcastSuccess(unittest.TestCase): - def init_data(self): self.x_numpy = np.random.rand(2, 3, 4).astype('float') self.y_numpy = np.random.rand(3, 4).astype('float') @@ -347,21 +340,18 @@ class TestSubtractInplaceBroadcastSuccess(unittest.TestCase): class TestSubtractInplaceBroadcastSuccess2(TestSubtractInplaceBroadcastSuccess): - def init_data(self): self.x_numpy = np.random.rand(1, 2, 3, 1).astype('float') self.y_numpy = np.random.rand(3, 1).astype('float') class TestSubtractInplaceBroadcastSuccess3(TestSubtractInplaceBroadcastSuccess): - def init_data(self): self.x_numpy = np.random.rand(2, 3, 1, 5).astype('float') self.y_numpy = np.random.rand(1, 3, 1, 5).astype('float') class TestSubtractInplaceBroadcastError(unittest.TestCase): - def init_data(self): self.x_numpy = np.random.rand(3, 4).astype('float') self.y_numpy = np.random.rand(2, 3, 4).astype('float') @@ -380,21 +370,18 @@ class TestSubtractInplaceBroadcastError(unittest.TestCase): class TestSubtractInplaceBroadcastError2(TestSubtractInplaceBroadcastError): - def init_data(self): self.x_numpy = np.random.rand(2, 1, 4).astype('float') self.y_numpy = np.random.rand(2, 3, 4).astype('float') class TestSubtractInplaceBroadcastError3(TestSubtractInplaceBroadcastError): - def init_data(self): self.x_numpy = np.random.rand(5, 2, 1, 4).astype('float') self.y_numpy = np.random.rand(2, 3, 4).astype('float') class TestFloatElementwiseSubop(unittest.TestCase): - def func_dygraph_sub(self): paddle.disable_static() @@ -407,26 +394,23 @@ class TestFloatElementwiseSubop(unittest.TestCase): # normal case: tensor - tensor expect_out = np_a - np_b actual_out = tensor_a - tensor_b - np.testing.assert_allclose(actual_out, - expect_out, - rtol=1e-07, - atol=1e-07) + np.testing.assert_allclose( + actual_out, expect_out, rtol=1e-07, atol=1e-07 + ) # normal case: tensor - scalar expect_out = np_a - 1 actual_out = tensor_a - 1 - np.testing.assert_allclose(actual_out, - expect_out, - rtol=1e-07, - atol=1e-07) + np.testing.assert_allclose( + actual_out, expect_out, rtol=1e-07, atol=1e-07 + ) # normal case: scalar - tenor expect_out = 1 - np_a actual_out = 1 - tensor_a - np.testing.assert_allclose(actual_out, - expect_out, - rtol=1e-07, - atol=1e-07) + np.testing.assert_allclose( + actual_out, expect_out, rtol=1e-07, atol=1e-07 + ) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_ema.py b/python/paddle/fluid/tests/unittests/test_ema.py index 1e2434a6a52564d119945204fa1570cc79c99bec..fe4178c825980573d2be6dff933401b54d980613 100644 --- a/python/paddle/fluid/tests/unittests/test_ema.py +++ b/python/paddle/fluid/tests/unittests/test_ema.py @@ -19,7 +19,6 @@ import paddle.fluid as fluid class TestExponentialMovingAverage(unittest.TestCase): - def setUp(self): self._places = [fluid.CPUPlace()] if fluid.core.is_compiled_with_cuda(): @@ -32,19 +31,21 @@ class TestExponentialMovingAverage(unittest.TestCase): with fluid.program_guard(self._train_program, self._startup_prog): with fluid.unique_name.guard(): data = fluid.data(name='x', shape=[-1, 5], dtype='float32') - hidden = fluid.layers.fc(input=data, - size=10, - param_attr=self._param_name) + hidden = fluid.layers.fc( + input=data, size=10, param_attr=self._param_name + ) cost = paddle.mean(hidden) self._test_program = fluid.default_main_program().clone( - for_test=True) + for_test=True + ) optimizer = fluid.optimizer.Adam(learning_rate=0.001) optimizer.minimize(cost) self._ema = fluid.optimizer.ExponentialMovingAverage( - self._ema_decay) + self._ema_decay + ) self._ema.update() def train(self, place): @@ -55,16 +56,19 @@ class TestExponentialMovingAverage(unittest.TestCase): for pass_id in range(2): for batch_id in range(3): data = np.random.random(size=(10, 5)).astype('float32') - tmp_param = np.array(fluid.global_scope().find_var( - self._param_name).get_tensor()) + tmp_param = np.array( + fluid.global_scope().find_var(self._param_name).get_tensor() + ) exe.run(program=self._train_program, feed={'x': data}) - tmp_param = np.array(fluid.global_scope().find_var( - self._param_name).get_tensor()) + tmp_param = np.array( + fluid.global_scope().find_var(self._param_name).get_tensor() + ) params.append(tmp_param) with self._ema.apply(exe): - final_ema = np.array(fluid.global_scope().find_var( - self._param_name).get_tensor()) + final_ema = np.array( + fluid.global_scope().find_var(self._param_name).get_tensor() + ) data = np.random.random(size=(10, 5)).astype('float32') exe.run(program=self._test_program, feed={'x': data}) return params, final_ema @@ -75,9 +79,11 @@ class TestExponentialMovingAverage(unittest.TestCase): manu_ema = np.zeros_like(final_ema) if len(params) > 0: for param in params: - manu_ema = self._ema_decay * manu_ema + ( - 1 - self._ema_decay) * param - manu_ema = manu_ema / (1.0 - self._ema_decay**len(params)) + manu_ema = ( + self._ema_decay * manu_ema + + (1 - self._ema_decay) * param + ) + manu_ema = manu_ema / (1.0 - self._ema_decay ** len(params)) np.testing.assert_allclose(manu_ema, final_ema, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_ema_fleet.py b/python/paddle/fluid/tests/unittests/test_ema_fleet.py index 206e3cf736277b0f4f5680a9d3e7b842e734c650..8a3a6993d050d648b45f18617071811bd2c947bd 100644 --- a/python/paddle/fluid/tests/unittests/test_ema_fleet.py +++ b/python/paddle/fluid/tests/unittests/test_ema_fleet.py @@ -24,7 +24,6 @@ def gen_data(): class TestFleetStaticEMA(unittest.TestCase): - def setUp(self): self._places = [paddle.CPUPlace()] if paddle.device.is_compiled_with_cuda(): @@ -41,17 +40,19 @@ class TestFleetStaticEMA(unittest.TestCase): with static.program_guard(self._train_program, self._startup_prog): with utils.unique_name.guard(): data = static.data(name='x', shape=[-1, 5], dtype='float32') - hidden = static.nn.fc(x=data, - size=10, - weight_attr=self._param_name) + hidden = static.nn.fc( + x=data, size=10, weight_attr=self._param_name + ) cost = paddle.mean(hidden) self._test_program = static.default_main_program().clone( - for_test=True) + for_test=True + ) optimizer = paddle.optimizer.Adam(learning_rate=0.001) optimizer = paddle.distributed.fleet.distributed_optimizer( - optimizer, strategy) + optimizer, strategy + ) optimizer.minimize(cost) self._ema = static.ExponentialMovingAverage(self._ema_decay) @@ -65,13 +66,19 @@ class TestFleetStaticEMA(unittest.TestCase): for pass_id in range(2): for batch_id in range(3): exe.run(program=self._train_program, feed={'x': gen_data()}) - tmp_param = np.array(static.global_scope().find_var( - self._param_name).get_tensor()) + tmp_param = np.array( + static.global_scope() + .find_var(self._param_name) + .get_tensor() + ) params.append(tmp_param) with self._ema.apply(exe, restore): - final_ema = np.array(static.global_scope().find_var( - self._param_name).get_tensor()) + final_ema = np.array( + static.global_scope() + .find_var(self._param_name) + .get_tensor() + ) exe.run(program=self._test_program, feed={'x': gen_data()}) if not restore: self._ema.restore(exe) @@ -85,9 +92,11 @@ class TestFleetStaticEMA(unittest.TestCase): manu_ema = np.zeros_like(final_ema) if len(params) > 0: for param in params: - manu_ema = self._ema_decay * manu_ema + ( - 1 - self._ema_decay) * param - manu_ema = manu_ema / (1.0 - self._ema_decay**len(params)) + manu_ema = ( + self._ema_decay * manu_ema + + (1 - self._ema_decay) * param + ) + manu_ema = manu_ema / (1.0 - self._ema_decay ** len(params)) np.testing.assert_allclose(manu_ema, final_ema, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_embedding_id_stop_gradient.py b/python/paddle/fluid/tests/unittests/test_embedding_id_stop_gradient.py index db3af060d26452c8f70e5279b328865b7a7a911d..68f9696ffe226ee5d0f37e66386d5f80bc6bb812 100644 --- a/python/paddle/fluid/tests/unittests/test_embedding_id_stop_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_embedding_id_stop_gradient.py @@ -19,7 +19,6 @@ import unittest class TestEmbeddingIdStopGradientBase(unittest.TestCase): - def setUp(self): self.reshape_times = 1 self.iteration = 10 @@ -70,17 +69,15 @@ class TestEmbeddingIdStopGradientBase(unittest.TestCase): fetch_val = None for _ in range(self.iteration): - fetch_val = exe.run(feed={ - x_1.name: x1_data, - x_2.name: x2_data - }, - fetch_list=[emb])[0] + fetch_val = exe.run( + feed={x_1.name: x1_data, x_2.name: x2_data}, + fetch_list=[emb], + )[0] return fetch_val class TestEmbeddingIdStopGradient2(TestEmbeddingIdStopGradientBase): - def setUp(self): self.reshape_times = 100 self.iteration = 10 diff --git a/python/paddle/fluid/tests/unittests/test_empty_like_op.py b/python/paddle/fluid/tests/unittests/test_empty_like_op.py index 6c1d179fa4efc31166d8308c6de8cca6794afe1f..4ce4ab6a6d52700d043c8243d94c6e0aed8584b7 100644 --- a/python/paddle/fluid/tests/unittests/test_empty_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_empty_like_op.py @@ -21,37 +21,43 @@ from paddle.static import program_guard, Program class TestEmptyLikeAPICommon(unittest.TestCase): - def __check_out__(self, out): data_type = convert_dtype(out.dtype) self.assertEqual( - data_type, self.dst_dtype, - 'dtype should be %s, but get %s' % (self.dst_dtype, data_type)) + data_type, + self.dst_dtype, + 'dtype should be %s, but get %s' % (self.dst_dtype, data_type), + ) shape = out.shape self.assertTupleEqual( - shape, self.dst_shape, - 'shape should be %s, but get %s' % (self.dst_shape, shape)) + shape, + self.dst_shape, + 'shape should be %s, but get %s' % (self.dst_shape, shape), + ) if data_type in ['float32', 'float64', 'int32', 'int64']: max_value = np.nanmax(out) min_value = np.nanmin(out) always_non_full_zero = max_value >= min_value always_full_zero = max_value == 0.0 and min_value == 0.0 - self.assertTrue(always_full_zero or always_non_full_zero, - 'always_full_zero or always_non_full_zero.') + self.assertTrue( + always_full_zero or always_non_full_zero, + 'always_full_zero or always_non_full_zero.', + ) elif data_type in ['bool']: total_num = out.size true_num = np.sum(out == True) false_num = np.sum(out == False) - self.assertTrue(total_num == true_num + false_num, - 'The value should always be True or False.') + self.assertTrue( + total_num == true_num + false_num, + 'The value should always be True or False.', + ) else: self.assertTrue(False, 'invalid data type') class TestEmptyLikeAPI(TestEmptyLikeAPICommon): - def setUp(self): self.init_config() @@ -69,7 +75,6 @@ class TestEmptyLikeAPI(TestEmptyLikeAPICommon): class TestEmptyLikeAPI2(TestEmptyLikeAPI): - def init_config(self): self.x = np.random.random((200, 3)).astype("float64") self.dtype = self.x.dtype @@ -78,7 +83,6 @@ class TestEmptyLikeAPI2(TestEmptyLikeAPI): class TestEmptyLikeAPI3(TestEmptyLikeAPI): - def init_config(self): self.x = np.random.random((200, 3)).astype("int") self.dtype = self.x.dtype @@ -87,7 +91,6 @@ class TestEmptyLikeAPI3(TestEmptyLikeAPI): class TestEmptyLikeAPI4(TestEmptyLikeAPI): - def init_config(self): self.x = np.random.random((200, 3)).astype("int64") self.dtype = self.x.dtype @@ -96,7 +99,6 @@ class TestEmptyLikeAPI4(TestEmptyLikeAPI): class TestEmptyLikeAPI5(TestEmptyLikeAPI): - def init_config(self): self.x = np.random.random((200, 3)).astype("bool") self.dtype = self.x.dtype @@ -105,7 +107,6 @@ class TestEmptyLikeAPI5(TestEmptyLikeAPI): class TestEmptyLikeAPI6(TestEmptyLikeAPI): - def init_config(self): self.x = np.random.random((200, 3)).astype("float64") self.dtype = "float32" @@ -114,7 +115,6 @@ class TestEmptyLikeAPI6(TestEmptyLikeAPI): class TestEmptyLikeAPI7(TestEmptyLikeAPI): - def init_config(self): self.x = np.random.random((200, 3)).astype("int") self.dtype = "float32" @@ -123,7 +123,6 @@ class TestEmptyLikeAPI7(TestEmptyLikeAPI): class TestEmptyLikeAPI8(TestEmptyLikeAPI): - def init_config(self): self.x = np.random.random((200, 3)).astype("int64") self.dtype = "float32" @@ -132,7 +131,6 @@ class TestEmptyLikeAPI8(TestEmptyLikeAPI): class TestEmptyLikeAPI9(TestEmptyLikeAPI): - def init_config(self): self.x = np.random.random((200, 3)).astype("bool") self.dtype = "float32" @@ -141,7 +139,6 @@ class TestEmptyLikeAPI9(TestEmptyLikeAPI): class TestEmptyLikeAPI10(TestEmptyLikeAPI): - def init_config(self): self.x = np.random.random((200, 3)).astype("float32") self.dtype = "bool" @@ -150,7 +147,6 @@ class TestEmptyLikeAPI10(TestEmptyLikeAPI): class TestEmptyLikeAPI_Static(TestEmptyLikeAPICommon): - def setUp(self): self.init_config() @@ -164,14 +160,17 @@ class TestEmptyLikeAPI_Static(TestEmptyLikeAPICommon): with program_guard(train_program, startup_program): x = np.random.random(self.x_shape).astype(dtype) - data_x = paddle.static.data('x', - shape=self.data_x_shape, - dtype=dtype) + data_x = paddle.static.data( + 'x', shape=self.data_x_shape, dtype=dtype + ) out = paddle.empty_like(data_x) - place = paddle.CUDAPlace( - 0) if core.is_compiled_with_cuda() else paddle.CPUPlace() + place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() + else paddle.CPUPlace() + ) exe = paddle.static.Executor(place) res = exe.run(train_program, feed={'x': x}, fetch_list=[out]) @@ -187,16 +186,13 @@ class TestEmptyLikeAPI_Static(TestEmptyLikeAPICommon): class TestEmptyLikeAPI_Static2(TestEmptyLikeAPI_Static): - def init_config(self): self.x_shape = (3, 200, 3) self.data_x_shape = [-1, 200, 3] class TestEmptyError(unittest.TestCase): - def test_attr(self): - def test_dtype(): x = np.random.random((200, 3)).astype("float64") dtype = 'uint8' diff --git a/python/paddle/fluid/tests/unittests/test_empty_op.py b/python/paddle/fluid/tests/unittests/test_empty_op.py index d9994110f1d278ddc00ed3ae778ef701139df0d9..11b66325c1f5b8d2e8875482dbc5ee95c0452706 100644 --- a/python/paddle/fluid/tests/unittests/test_empty_op.py +++ b/python/paddle/fluid/tests/unittests/test_empty_op.py @@ -1,4 +1,4 @@ -#Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -22,7 +22,6 @@ from paddle.fluid.framework import convert_np_dtype_to_dtype_ # Situation 1: Attr(shape) is a list(without tensor) class TestEmptyOp(OpTest): - def setUp(self): self.op_type = "empty" self.init_config() @@ -38,14 +37,18 @@ class TestEmptyOp(OpTest): always_full_zero = max_value == 0.0 and min_value == 0.0 always_non_full_zero = max_value >= min_value - self.assertTrue(always_full_zero or always_non_full_zero, - 'always_full_zero or always_non_full_zero.') + self.assertTrue( + always_full_zero or always_non_full_zero, + 'always_full_zero or always_non_full_zero.', + ) elif data_type in ['bool']: total_num = outs[0].size true_num = np.sum(outs[0] == True) false_num = np.sum(outs[0] == False) - self.assertTrue(total_num == true_num + false_num, - 'The value should always be True or False.') + self.assertTrue( + total_num == true_num + false_num, + 'The value should always be True or False.', + ) else: self.assertTrue(False, 'invalid data type') @@ -59,7 +62,6 @@ class TestEmptyOp(OpTest): class TestEmptyOp2(TestEmptyOp): - def init_config(self): shape = [500, 3] dtype = 'float64' @@ -70,7 +72,6 @@ class TestEmptyOp2(TestEmptyOp): class TestEmptyOp3(TestEmptyOp): - def init_config(self): shape = [500, 3] dtype = 'int32' @@ -81,7 +82,6 @@ class TestEmptyOp3(TestEmptyOp): class TestEmptyOp4(TestEmptyOp): - def init_config(self): shape = [500, 3] dtype = 'int64' @@ -92,7 +92,6 @@ class TestEmptyOp4(TestEmptyOp): class TestEmptyOp5(TestEmptyOp): - def init_config(self): shape = [500, 3] dtype = 'bool' @@ -104,7 +103,6 @@ class TestEmptyOp5(TestEmptyOp): # Situation 2: shape is a tensor class TestEmptyOp_ShapeTensor(OpTest): - def setUp(self): self.op_type = "empty" self.init_config() @@ -128,21 +126,24 @@ class TestEmptyOp_ShapeTensor(OpTest): always_full_zero = max_value == 0.0 and min_value == 0.0 always_non_full_zero = max_value >= min_value - self.assertTrue(always_full_zero or always_non_full_zero, - 'always_full_zero or always_non_full_zero.') + self.assertTrue( + always_full_zero or always_non_full_zero, + 'always_full_zero or always_non_full_zero.', + ) elif data_type in ['bool']: total_num = outs[0].size true_num = np.sum(outs[0] == True) false_num = np.sum(outs[0] == False) - self.assertTrue(total_num == true_num + false_num, - 'The value should always be True or False.') + self.assertTrue( + total_num == true_num + false_num, + 'The value should always be True or False.', + ) else: self.assertTrue(False, 'invalid data type') # Situation 3: Attr(shape) is a list(with tensor) class TestEmptyOp_ShapeTensorList(OpTest): - def setUp(self): self.op_type = "empty" self.init_config() @@ -156,8 +157,9 @@ class TestEmptyOp_ShapeTensorList(OpTest): shape_tensor_list = [] for index, ele in enumerate(self.shape): - shape_tensor_list.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + shape_tensor_list.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = {"ShapeTensorList": shape_tensor_list} self.attrs = {'shape': self.infer_shape, 'dtype': dtype_inner} @@ -174,27 +176,32 @@ class TestEmptyOp_ShapeTensorList(OpTest): always_full_zero = max_value == 0.0 and min_value == 0.0 always_non_full_zero = max_value >= min_value - self.assertTrue(always_full_zero or always_non_full_zero, - 'always_full_zero or always_non_full_zero.') + self.assertTrue( + always_full_zero or always_non_full_zero, + 'always_full_zero or always_non_full_zero.', + ) elif data_type in ['bool']: total_num = outs[0].size true_num = np.sum(outs[0] == True) false_num = np.sum(outs[0] == False) - self.assertTrue(total_num == true_num + false_num, - 'The value should always be True or False.') + self.assertTrue( + total_num == true_num + false_num, + 'The value should always be True or False.', + ) else: self.assertTrue(False, 'invalid data type') class TestEmptyAPI(unittest.TestCase): - def __check_out__(self, out, dtype='float32'): max_value = np.nanmax(np.array(out)) min_value = np.nanmin(np.array(out)) always_non_full_zero = max_value >= min_value always_full_zero = max_value == 0.0 and min_value == 0.0 - self.assertTrue(always_full_zero or always_non_full_zero, - 'always_full_zero or always_non_full_zero.') + self.assertTrue( + always_full_zero or always_non_full_zero, + 'always_full_zero or always_non_full_zero.', + ) def test_dygraph_api_out(self): paddle.disable_static() @@ -233,15 +240,15 @@ class TestEmptyAPI(unittest.TestCase): positive_2_int32 = fluid.layers.fill_constant([1], "int32", 3) positive_2_int64 = fluid.layers.fill_constant([1], "int64", 3) - shape_tensor_int32 = fluid.data(name="shape_tensor_int32", - shape=[2], - dtype="int32") - shape_tensor_int64 = fluid.data(name="shape_tensor_int64", - shape=[2], - dtype="int64") - shape_tensor_unknown = fluid.data(name="shape_tensor_unknown", - shape=[-1], - dtype="int64") + shape_tensor_int32 = fluid.data( + name="shape_tensor_int32", shape=[2], dtype="int32" + ) + shape_tensor_int64 = fluid.data( + name="shape_tensor_int64", shape=[2], dtype="int64" + ) + shape_tensor_unknown = fluid.data( + name="shape_tensor_unknown", shape=[-1], dtype="int64" + ) out_1 = paddle.empty(shape=[200, 3], dtype=dtype) out_2 = paddle.empty(shape=shape_tensor_int32, dtype=dtype) @@ -259,7 +266,8 @@ class TestEmptyAPI(unittest.TestCase): "shape_tensor_int64": np.array([200, 3]).astype("int64"), "shape_tensor_unknown": np.array([200, 3]).astype("int64"), }, - fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6]) + fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6], + ) self.__check_out__(res_1, dtype) self.__check_out__(res_2, dtype) @@ -270,9 +278,7 @@ class TestEmptyAPI(unittest.TestCase): class TestEmptyError(unittest.TestCase): - def test_attr(self): - def test_dtype(): shape = [200, 3] dtype = 'uint8' diff --git a/python/paddle/fluid/tests/unittests/test_entry_attr.py b/python/paddle/fluid/tests/unittests/test_entry_attr.py index eb5bd30d1c2111a5f00c8fad3f7a1f7a0d13cb66..cfbff2113c3046c24a61989d123399efb035d55c 100644 --- a/python/paddle/fluid/tests/unittests/test_entry_attr.py +++ b/python/paddle/fluid/tests/unittests/test_entry_attr.py @@ -18,14 +18,18 @@ paddle.enable_static() import unittest import paddle.fluid as fluid -from paddle.distributed import ProbabilityEntry, CountFilterEntry, ShowClickEntry +from paddle.distributed import ( + ProbabilityEntry, + CountFilterEntry, + ShowClickEntry, +) class EntryAttrChecks(unittest.TestCase): - def base(self): with self.assertRaises(NotImplementedError): from paddle.distributed.entry_attr import EntryAttr + base = EntryAttr() base._to_attr() @@ -62,18 +66,21 @@ class EntryAttrChecks(unittest.TestCase): with fluid.scope_guard(scope): with fluid.program_guard(prog): - input = fluid.layers.data(name="dnn_data", - shape=[-1, 1], - dtype="int64", - lod_level=1, - append_batch_size=False) + input = fluid.layers.data( + name="dnn_data", + shape=[-1, 1], + dtype="int64", + lod_level=1, + append_batch_size=False, + ) prob = ProbabilityEntry(0.5) emb = paddle.static.nn.sparse_embedding( input=input, size=[100, 10], is_test=False, entry=prob, - param_attr=fluid.ParamAttr(name="deep_embedding")) + param_attr=fluid.ParamAttr(name="deep_embedding"), + ) pool = fluid.layers.sequence_pool(input=emb, pool_type="sum") predict = fluid.layers.fc(input=pool, size=2, act='softmax') @@ -92,7 +99,6 @@ class EntryAttrChecks(unittest.TestCase): class TestEntryAttrs(EntryAttrChecks): - def test_base(self): self.base() diff --git a/python/paddle/fluid/tests/unittests/test_entry_attr2.py b/python/paddle/fluid/tests/unittests/test_entry_attr2.py index b65fd12eba7d6c86773735fe1178d388dd77b97b..f4f098798ac01dd69c00cf671871cffa73535ed5 100644 --- a/python/paddle/fluid/tests/unittests/test_entry_attr2.py +++ b/python/paddle/fluid/tests/unittests/test_entry_attr2.py @@ -21,24 +21,26 @@ import paddle.fluid as fluid class EntryAttrChecks(unittest.TestCase): - def embedding_layer(self): prog = fluid.Program() scope = fluid.core.Scope() with fluid.scope_guard(scope): with fluid.program_guard(prog): - input = fluid.layers.data(name="dnn_data", - shape=[-1, 1], - dtype="int64", - lod_level=1, - append_batch_size=False) + input = fluid.layers.data( + name="dnn_data", + shape=[-1, 1], + dtype="int64", + lod_level=1, + append_batch_size=False, + ) emb = fluid.layers.embedding( input=input, size=[100, 10], is_sparse=True, is_distributed=True, - param_attr=fluid.ParamAttr(name="deep_embedding")) + param_attr=fluid.ParamAttr(name="deep_embedding"), + ) pool = fluid.layers.sequence_pool(input=emb, pool_type="sum") predict = fluid.layers.fc(input=pool, size=2, act='softmax') @@ -53,7 +55,6 @@ class EntryAttrChecks(unittest.TestCase): class TestEntryAttrs(EntryAttrChecks): - def test_embedding_layer(self): self.embedding_layer() diff --git a/python/paddle/fluid/tests/unittests/test_erf_op.py b/python/paddle/fluid/tests/unittests/test_erf_op.py index 338ca508db6663b3c796e927cbf7213b01bb432d..089fdc0a0b4bcf3de12c513b24c8ecf86c8653e8 100644 --- a/python/paddle/fluid/tests/unittests/test_erf_op.py +++ b/python/paddle/fluid/tests/unittests/test_erf_op.py @@ -23,7 +23,6 @@ import paddle.fluid.dygraph as dg class TestErfOp(OpTest): - def setUp(self): self.op_type = "erf" self.dtype = self._init_dtype() @@ -44,7 +43,6 @@ class TestErfOp(OpTest): class TestErfLayer(unittest.TestCase): - def _test_case(self, place): x = np.random.uniform(-1, 1, size=(11, 17)).astype(np.float64) y_ref = erf(x) diff --git a/python/paddle/fluid/tests/unittests/test_erfinv_op.py b/python/paddle/fluid/tests/unittests/test_erfinv_op.py index 8822b3e07650d9ec2df5ef4b89b2758600d63bad..e605d8e0e62a4d00a965da6515c0c82592aebbea 100644 --- a/python/paddle/fluid/tests/unittests/test_erfinv_op.py +++ b/python/paddle/fluid/tests/unittests/test_erfinv_op.py @@ -24,7 +24,6 @@ np.random.seed(0) class TestErfinv(OpTest): - def setUp(self): self.op_type = "erfinv" self.python_api = paddle.erfinv @@ -33,8 +32,9 @@ class TestErfinv(OpTest): self.x = np.random.uniform(-1, 1, size=self.shape).astype(self.dtype) self.res_ref = erfinv(self.x).astype(self.dtype) self.grad_out = np.ones(self.shape, self.dtype) - self.gradient = np.sqrt(np.pi) / 2 * np.exp(np.square( - self.res_ref)) * self.grad_out + self.gradient = ( + np.sqrt(np.pi) / 2 * np.exp(np.square(self.res_ref)) * self.grad_out + ) self.inputs = {'X': self.x} self.outputs = {'Out': self.res_ref} @@ -45,20 +45,20 @@ class TestErfinv(OpTest): self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X'], - 'Out', - user_defined_grads=[self.gradient], - user_defined_grad_outputs=self.grad_out) + self.check_grad( + ['X'], + 'Out', + user_defined_grads=[self.gradient], + user_defined_grad_outputs=self.grad_out, + ) class TestErfinvFP32(TestErfinv): - def init_dtype(self): self.dtype = np.float32 class TestErfinvAPI(unittest.TestCase): - def init_dtype(self): self.dtype = 'float32' @@ -86,7 +86,6 @@ class TestErfinvAPI(unittest.TestCase): run(place) def test_dygraph_api(self): - def run(place): paddle.disable_static(place) x = paddle.to_tensor(self.x) @@ -98,7 +97,6 @@ class TestErfinvAPI(unittest.TestCase): run(place) def test_inplace_api(self): - def run(place): paddle.disable_static(place) x = paddle.to_tensor(self.x) diff --git a/python/paddle/fluid/tests/unittests/test_exception.py b/python/paddle/fluid/tests/unittests/test_exception.py index 6d39f9f5f41c516d3b46adc76fe076703993c81f..bb53ae950287eb109af1575ee7a09c6cff56e913 100644 --- a/python/paddle/fluid/tests/unittests/test_exception.py +++ b/python/paddle/fluid/tests/unittests/test_exception.py @@ -21,7 +21,6 @@ import paddle.fluid.core as core class TestException(unittest.TestCase): - def test_exception(self): exception = None try: @@ -34,7 +33,6 @@ class TestException(unittest.TestCase): class TestExceptionNoCStack(unittest.TestCase): - def setUp(self): paddle.enable_static() # test no C++ stack format @@ -57,12 +55,11 @@ class TestExceptionNoCStack(unittest.TestCase): y = numpy.random.random(size=(8, 1)).astype('float32') with self.assertRaises(ValueError): - exe.run(fluid.default_main_program(), - feed={ - 'X': x, - 'Y': y - }, - fetch_list=[avg_loss.name]) + exe.run( + fluid.default_main_program(), + feed={'X': x, 'Y': y}, + fetch_list=[avg_loss.name], + ) def test_exception_in_dynamic_mode(self): place = fluid.CPUPlace() diff --git a/python/paddle/fluid/tests/unittests/test_executor_and_mul.py b/python/paddle/fluid/tests/unittests/test_executor_and_mul.py index a9cd5c4f587ae5f84d8442bf07a23368741804d8..f85f7b97ec8578ea8ad450f07192decb71d4342f 100644 --- a/python/paddle/fluid/tests/unittests/test_executor_and_mul.py +++ b/python/paddle/fluid/tests/unittests/test_executor_and_mul.py @@ -20,17 +20,15 @@ from paddle.fluid.layers import mul, data, zeros, array_write, increment class TestExecutor(unittest.TestCase): - def test_mul(self): i = zeros(shape=[1], dtype='int64') a = data(name='a', shape=[784], dtype='float32') array = array_write(x=a, i=i) i = increment(i) - b = data(name='b', - shape=[784, 100], - dtype='float32', - append_batch_size=False) + b = data( + name='b', shape=[784, 100], dtype='float32', append_batch_size=False + ) array_write(x=b, i=i, array=array) i = increment(i) @@ -41,11 +39,9 @@ class TestExecutor(unittest.TestCase): b_np = np.random.random((784, 100)).astype('float32') exe = Executor() - res, res_array = exe.run(feed={ - 'a': a_np, - 'b': b_np - }, - fetch_list=[out, array]) + res, res_array = exe.run( + feed={'a': a_np, 'b': b_np}, fetch_list=[out, array] + ) self.assertEqual((100, 100), res.shape) np.testing.assert_allclose(res, np.dot(a_np, b_np), rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_executor_and_use_program_cache.py b/python/paddle/fluid/tests/unittests/test_executor_and_use_program_cache.py index 34f9674f27d89e2d08295b2307b7b2bf637de003..2412794929c055786cd161d7ebd9062721bc9d7a 100644 --- a/python/paddle/fluid/tests/unittests/test_executor_and_use_program_cache.py +++ b/python/paddle/fluid/tests/unittests/test_executor_and_use_program_cache.py @@ -21,16 +21,17 @@ from test_eager_deletion_padding_rnn import RNNConfig, PaddingRNNTestBase class TestExecutor(unittest.TestCase): - def test_mul(self): main_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(main_program, startup_program): a = fluid.layers.data(name='a', shape=[784], dtype='float32') - b = fluid.layers.data(name='b', - shape=[784, 100], - dtype='float32', - append_batch_size=False) + b = fluid.layers.data( + name='b', + shape=[784, 100], + dtype='float32', + append_batch_size=False, + ) output = fluid.layers.mul(x=a, y=b) # Compute with numpy @@ -47,13 +48,12 @@ class TestExecutor(unittest.TestCase): run_time = 0.0 for i in range(max_iters): begin = time.time() - outs = exe.run(program=main_program, - feed={ - 'a': a_np, - 'b': b_np - }, - fetch_list=[output.name], - use_program_cache=use_program_cache) + outs = exe.run( + program=main_program, + feed={'a': a_np, 'b': b_np}, + fetch_list=[output.name], + use_program_cache=use_program_cache, + ) end = time.time() run_time += end - begin out = outs[0] @@ -62,29 +62,31 @@ class TestExecutor(unittest.TestCase): return run_time max_iters = 3 - run_time_with_cache = _train(use_program_cache=True, - max_iters=max_iters) + run_time_with_cache = _train( + use_program_cache=True, max_iters=max_iters + ) print("run time with program cache: %f" % run_time_with_cache) - run_time_without_cache = _train(use_program_cache=False, - max_iters=max_iters) + run_time_without_cache = _train( + use_program_cache=False, max_iters=max_iters + ) print("run time without program cache: %f" % run_time_without_cache) - run_time_with_cache = _train(use_program_cache=True, - max_iters=max_iters) + run_time_with_cache = _train( + use_program_cache=True, max_iters=max_iters + ) print("run time with program cache: %f" % run_time_with_cache) - run_time_with_cache = _train(use_program_cache=True, - max_iters=max_iters) + run_time_with_cache = _train( + use_program_cache=True, max_iters=max_iters + ) print("run time with program cache: %f" % run_time_with_cache) class ExecutorPaddingRNNTest(PaddingRNNTestBase): - - def train_and_save_inference_program(self, - rnn_model="static", - parallel=True, - use_program_cache=True): + def train_and_save_inference_program( + self, rnn_model="static", parallel=True, use_program_cache=True + ): config = RNNConfig("test", rnn_model) with fluid.scope_guard(fluid.Scope()): self.train(config, parallel, use_program_cache) @@ -94,55 +96,75 @@ class ExecutorPaddingRNNTest(PaddingRNNTestBase): target_vars=[self.loss, self.last_hidden, self.last_cell], executor=self.exe, dirname="padding_rnn." + rnn_model + ".inference_model", - params_filename="__params__") + params_filename="__params__", + ) def test_inference_output(self): for rnn_model in ["static", "padding"]: # Set parallel to False to use the default executor. - self.train_and_save_inference_program(rnn_model=rnn_model, - parallel=True, - use_program_cache=True) + self.train_and_save_inference_program( + rnn_model=rnn_model, parallel=True, use_program_cache=True + ) - x_np = np.random.random((self.config.batch_size, - self.config.num_steps, 1)).astype("int64") + x_np = np.random.random( + (self.config.batch_size, self.config.num_steps, 1) + ).astype("int64") y_np = np.random.random( - (self.config.batch_size * self.config.num_steps, - 1)).astype("int64") + (self.config.batch_size * self.config.num_steps, 1) + ).astype("int64") init_hidden_np = np.random.random( - (self.config.num_layers, self.config.batch_size, - self.config.hidden_size)).astype("float32") + ( + self.config.num_layers, + self.config.batch_size, + self.config.hidden_size, + ) + ).astype("float32") init_cell_np = np.random.random( - (self.config.num_layers, self.config.batch_size, - self.config.hidden_size)).astype("float32") + ( + self.config.num_layers, + self.config.batch_size, + self.config.hidden_size, + ) + ).astype("float32") for use_program_cache in [False, True]: with fluid.scope_guard(fluid.Scope()): - save_dirname = "padding_rnn." + rnn_model + ".inference_model" - [inference_program, feed_target_names, - fetch_targets] = fluid.io.load_inference_model( - save_dirname, self.exe, params_filename="__params__") - - results = self.exe.run(program=inference_program, - feed={ - "x": x_np, - "y": y_np, - "init_hidden": init_hidden_np, - "init_cell": init_cell_np - }, - fetch_list=fetch_targets, - use_program_cache=use_program_cache) + save_dirname = ( + "padding_rnn." + rnn_model + ".inference_model" + ) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = fluid.io.load_inference_model( + save_dirname, self.exe, params_filename="__params__" + ) + + results = self.exe.run( + program=inference_program, + feed={ + "x": x_np, + "y": y_np, + "init_hidden": init_hidden_np, + "init_cell": init_cell_np, + }, + fetch_list=fetch_targets, + use_program_cache=use_program_cache, + ) if use_program_cache is True: results_with_cache = results else: results_without_cache = results - self.assertEqual(len(results_with_cache), - len(results_without_cache)) + self.assertEqual( + len(results_with_cache), len(results_without_cache) + ) for i in range(len(results_with_cache)): - self.assertEqual(results_with_cache[i].shape, - results_without_cache[i].shape) - np.testing.assert_allclose(results_with_cache[i], - results_without_cache[i], - rtol=1e-05) + self.assertEqual( + results_with_cache[i].shape, results_without_cache[i].shape + ) + np.testing.assert_allclose( + results_with_cache[i], results_without_cache[i], rtol=1e-05 + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_executor_check_feed.py b/python/paddle/fluid/tests/unittests/test_executor_check_feed.py index cfcd81a0a35802072cbff3a1ac00a9ca24e1f5f1..77122f46bdcf5d099e451011a9e9a2c8b6f8e871 100644 --- a/python/paddle/fluid/tests/unittests/test_executor_check_feed.py +++ b/python/paddle/fluid/tests/unittests/test_executor_check_feed.py @@ -19,7 +19,6 @@ import paddle.fluid as fluid class TestExecutor(unittest.TestCase): - def net(self): lr = fluid.data(name="lr", shape=[1], dtype='float32') x = fluid.data(name="x", shape=[None, 1], dtype='float32') @@ -48,13 +47,12 @@ class TestExecutor(unittest.TestCase): y_true = [[2.0], [4.0], [6.0], [8.0]] a = 0 with self.assertRaises(ValueError): - exe.run(feed={ - 'x': train_data, - 'lr': a - }, - fetch_list=[lr, cost], - return_numpy=False, - use_prune=True) + exe.run( + feed={'x': train_data, 'lr': a}, + fetch_list=[lr, cost], + return_numpy=False, + use_prune=True, + ) def test_compiled_program_check_feed(self): main_program = fluid.Program() @@ -67,19 +65,19 @@ class TestExecutor(unittest.TestCase): lr, cost = self.net() exe.run(startup_program) compiled_prog = fluid.CompiledProgram( - main_program).with_data_parallel(loss_name=cost.name) + main_program + ).with_data_parallel(loss_name=cost.name) train_data = [[1.0], [2.0], [3.0], [4.0]] y_true = [[2.0], [4.0], [6.0], [8.0]] a = 0 with self.assertRaises(ValueError): - exe.run(compiled_prog, - feed={ - 'x': train_data, - 'lr': a - }, - fetch_list=[lr, cost], - return_numpy=False, - use_prune=True) + exe.run( + compiled_prog, + feed={'x': train_data, 'lr': a}, + fetch_list=[lr, cost], + return_numpy=False, + use_prune=True, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_executor_check_fetch_list.py b/python/paddle/fluid/tests/unittests/test_executor_check_fetch_list.py index e91bdf976d311a802803dc597a14c1ac0307d3fd..d6711a612d6930a37b7fe6d5323b72cff5044b79 100644 --- a/python/paddle/fluid/tests/unittests/test_executor_check_fetch_list.py +++ b/python/paddle/fluid/tests/unittests/test_executor_check_fetch_list.py @@ -20,7 +20,6 @@ import unittest class TestCheckFetchList(unittest.TestCase): - def setUp(self): paddle.enable_static() self.feed = {"x": np.array([[0], [0], [1], [0]], dtype='float32')} @@ -32,10 +31,9 @@ class TestCheckFetchList(unittest.TestCase): main_program = paddle.static.Program() with paddle.static.program_guard(main_program): x = paddle.static.data(name='x', shape=[4, 1], dtype='float32') - output = paddle.unique_consecutive(x, - return_inverse=True, - return_counts=True, - axis=0) + output = paddle.unique_consecutive( + x, return_inverse=True, return_counts=True, axis=0 + ) self.main_program = main_program self.fetch_list = output @@ -46,22 +44,23 @@ class TestCheckFetchList(unittest.TestCase): self.main_program, feed=self.feed, fetch_list=[self.fetch_list], # support single list/tuple - return_numpy=True) + return_numpy=True, + ) np.testing.assert_array_equal(res[0], self.expected) def test_with_error(self): with self.assertRaises(TypeError): fetch_list = [23] - res = self.exe.run(self.main_program, - feed=self.feed, - fetch_list=fetch_list) + res = self.exe.run( + self.main_program, feed=self.feed, fetch_list=fetch_list + ) with self.assertRaises(TypeError): fetch_list = [(self.fetch_list[0], 32)] - res = self.exe.run(self.main_program, - feed=self.feed, - fetch_list=fetch_list) + res = self.exe.run( + self.main_program, feed=self.feed, fetch_list=fetch_list + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_executor_feed_non_tensor.py b/python/paddle/fluid/tests/unittests/test_executor_feed_non_tensor.py index 6441ba66e578793b63ff125e972175bda83bae44..41b4211a8bcc69019b3952d407c429837e88855a 100644 --- a/python/paddle/fluid/tests/unittests/test_executor_feed_non_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_executor_feed_non_tensor.py @@ -20,7 +20,6 @@ import paddle.fluid as fluid class TestExecutor(unittest.TestCase): - def net(self): lr = fluid.data(name="lr", shape=[1], dtype='float32') x = fluid.data(name="x", shape=[None, 1], dtype='float32') @@ -45,18 +44,18 @@ class TestExecutor(unittest.TestCase): exe = fluid.Executor(cpu) lr, cost = self.net() exe.run(startup_program) - train_data = numpy.array([[1.0], [2.0], [3.0], - [4.0]]).astype('float32') - y_true = numpy.array([[2.0], [4.0], [6.0], - [8.0]]).astype('float32') + train_data = numpy.array([[1.0], [2.0], [3.0], [4.0]]).astype( + 'float32' + ) + y_true = numpy.array([[2.0], [4.0], [6.0], [8.0]]).astype( + 'float32' + ) a = 0.01 - _lr, _ = exe.run(feed={ - 'x': train_data, - 'y': y_true, - 'lr': a - }, - fetch_list=[lr, cost], - return_numpy=False) + _lr, _ = exe.run( + feed={'x': train_data, 'y': y_true, 'lr': a}, + fetch_list=[lr, cost], + return_numpy=False, + ) self.assertEqual(_lr._dtype(), lr.dtype) self.assertEqual(_lr._dtype(), fluid.core.VarDesc.VarType.FP32) self.assertEqual(type(a), float) @@ -71,18 +70,18 @@ class TestExecutor(unittest.TestCase): exe = fluid.Executor(cpu) lr, cost = self.net() exe.run(startup_program) - train_data = numpy.array([[1.0], [2.0], [3.0], - [4.0]]).astype('float32') - y_true = numpy.array([[2.0], [4.0], [6.0], - [8.0]]).astype('float32') + train_data = numpy.array([[1.0], [2.0], [3.0], [4.0]]).astype( + 'float32' + ) + y_true = numpy.array([[2.0], [4.0], [6.0], [8.0]]).astype( + 'float32' + ) a = 0 - _lr, _ = exe.run(feed={ - 'x': train_data, - 'y': y_true, - 'lr': a - }, - fetch_list=[lr, cost], - return_numpy=False) + _lr, _ = exe.run( + feed={'x': train_data, 'y': y_true, 'lr': a}, + fetch_list=[lr, cost], + return_numpy=False, + ) self.assertEqual(_lr._dtype(), lr.dtype) self.assertEqual(_lr._dtype(), fluid.core.VarDesc.VarType.FP32) self.assertEqual(type(a), int) @@ -100,13 +99,11 @@ class TestExecutor(unittest.TestCase): train_data = [[1.0], [2.0], [3.0], [4.0]] y_true = [[2.0], [4.0], [6.0], [8.0]] a = 0 - _lr, _ = exe.run(feed={ - 'x': train_data, - 'y': y_true, - 'lr': a - }, - fetch_list=[lr, cost], - return_numpy=False) + _lr, _ = exe.run( + feed={'x': train_data, 'y': y_true, 'lr': a}, + fetch_list=[lr, cost], + return_numpy=False, + ) self.assertEqual(_lr._dtype(), lr.dtype) self.assertEqual(_lr._dtype(), fluid.core.VarDesc.VarType.FP32) self.assertEqual(type(y_true), list) @@ -122,37 +119,39 @@ class TestExecutor(unittest.TestCase): exe = fluid.Executor(cpu) exe.run(startup_program) compiled_prog = fluid.CompiledProgram( - main_program).with_data_parallel(loss_name=cost.name) - train_data = numpy.array([[1.0], [2.0], [3.0], - [4.0]]).astype('float32') - y_true = numpy.array([[2.0], [4.0], [6.0], - [8.0]]).astype('float32') + main_program + ).with_data_parallel(loss_name=cost.name) + train_data = numpy.array([[1.0], [2.0], [3.0], [4.0]]).astype( + 'float32' + ) + y_true = numpy.array([[2.0], [4.0], [6.0], [8.0]]).astype( + 'float32' + ) a = 0.01 - _lr, _ = exe.run(compiled_prog, - feed={ - 'x': train_data, - 'y': y_true, - 'lr': a - }, - fetch_list=[lr, cost], - return_numpy=False) + _lr, _ = exe.run( + compiled_prog, + feed={'x': train_data, 'y': y_true, 'lr': a}, + fetch_list=[lr, cost], + return_numpy=False, + ) self.assertEqual(_lr._dtype(), lr.dtype) self.assertEqual(_lr._dtype(), fluid.core.VarDesc.VarType.FP32) self.assertEqual(type(a), float) class TestAsLodTensor(unittest.TestCase): - def test_as_lodtensor_int32(self): cpu = fluid.CPUPlace() - tensor = fluid.executor._as_lodtensor(1.0, cpu, - fluid.core.VarDesc.VarType.INT32) + tensor = fluid.executor._as_lodtensor( + 1.0, cpu, fluid.core.VarDesc.VarType.INT32 + ) self.assertEqual(tensor._dtype(), fluid.core.VarDesc.VarType.INT32) def test_as_lodtensor_fp64(self): cpu = fluid.CPUPlace() - tensor = fluid.executor._as_lodtensor(1, cpu, - fluid.core.VarDesc.VarType.FP64) + tensor = fluid.executor._as_lodtensor( + 1, cpu, fluid.core.VarDesc.VarType.FP64 + ) self.assertEqual(tensor._dtype(), fluid.core.VarDesc.VarType.FP64) def test_as_lodtensor_assertion_error(self): @@ -161,25 +160,37 @@ class TestAsLodTensor(unittest.TestCase): def test_as_lodtensor_type_error(self): cpu = fluid.CPUPlace() - self.assertRaises(TypeError, fluid.executor._as_lodtensor, {"a": 1}, - cpu, fluid.core.VarDesc.VarType.INT32) + self.assertRaises( + TypeError, + fluid.executor._as_lodtensor, + {"a": 1}, + cpu, + fluid.core.VarDesc.VarType.INT32, + ) def test_as_lodtensor_list(self): cpu = fluid.CPUPlace() - tensor = fluid.executor._as_lodtensor([1, 2], cpu, - fluid.core.VarDesc.VarType.FP64) + tensor = fluid.executor._as_lodtensor( + [1, 2], cpu, fluid.core.VarDesc.VarType.FP64 + ) self.assertEqual(tensor._dtype(), fluid.core.VarDesc.VarType.FP64) def test_as_lodtensor_tuple(self): cpu = fluid.CPUPlace() - tensor = fluid.executor._as_lodtensor((1, 2), cpu, - fluid.core.VarDesc.VarType.FP64) + tensor = fluid.executor._as_lodtensor( + (1, 2), cpu, fluid.core.VarDesc.VarType.FP64 + ) self.assertEqual(tensor._dtype(), fluid.core.VarDesc.VarType.FP64) def test_as_lodtensor_nested_list(self): cpu = fluid.CPUPlace() - self.assertRaises(TypeError, fluid.executor._as_lodtensor, - [[1], [1, 2]], cpu, fluid.core.VarDesc.VarType.INT32) + self.assertRaises( + TypeError, + fluid.executor._as_lodtensor, + [[1], [1, 2]], + cpu, + fluid.core.VarDesc.VarType.INT32, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_executor_return_tensor_not_overwriting.py b/python/paddle/fluid/tests/unittests/test_executor_return_tensor_not_overwriting.py index fbb45e9c90625e5b2bd0a6c740443c2b9616deab..7a3063025d5dc24d13921e7a24487ed4ec75e307 100644 --- a/python/paddle/fluid/tests/unittests/test_executor_return_tensor_not_overwriting.py +++ b/python/paddle/fluid/tests/unittests/test_executor_return_tensor_not_overwriting.py @@ -20,7 +20,6 @@ from op_test import OpTest, skip_check_grad_ci @skip_check_grad_ci(reason="Not op test but call the method of class OpTest.") class TestExecutorReturnTensorNotOverwritingWithOptest(OpTest): - def setUp(self): pass @@ -30,7 +29,7 @@ class TestExecutorReturnTensorNotOverwritingWithOptest(OpTest): self.out = np.add(self.x, self.y) self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.outputs = {'Out': self.out} self.op_type = "elementwise_add" @@ -44,7 +43,7 @@ class TestExecutorReturnTensorNotOverwritingWithOptest(OpTest): self.out = np.dot(self.x, self.y) self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.outputs = {'Out': self.out} self.op_type = "mul" @@ -67,7 +66,6 @@ class TestExecutorReturnTensorNotOverwritingWithOptest(OpTest): class TestExecutorReturnTensorNotOverOverwritingWithLayers(unittest.TestCase): - def setUp(self): pass @@ -78,7 +76,8 @@ class TestExecutorReturnTensorNotOverOverwritingWithLayers(unittest.TestCase): program = fluid.default_main_program() if parallel: program = fluid.CompiledProgram(program).with_data_parallel( - places=place) + places=place + ) exe = fluid.Executor(place) out = exe.run(program, fetch_list=[out], return_numpy=False) return out @@ -90,7 +89,8 @@ class TestExecutorReturnTensorNotOverOverwritingWithLayers(unittest.TestCase): program = fluid.default_main_program() if parallel: program = fluid.CompiledProgram(program).with_data_parallel( - places=place) + places=place + ) exe = fluid.Executor(place) out = exe.run(program, fetch_list=[out], return_numpy=False) return out diff --git a/python/paddle/fluid/tests/unittests/test_expand_as_op.py b/python/paddle/fluid/tests/unittests/test_expand_as_op.py index 9815f587ddb3aa431f9e0eb3474537e1db53e6dd..827f7a73a75de94cdeb538ae2ca205f565605c41 100755 --- a/python/paddle/fluid/tests/unittests/test_expand_as_op.py +++ b/python/paddle/fluid/tests/unittests/test_expand_as_op.py @@ -29,7 +29,6 @@ def bcast(x, target_tensor): class TestExpandAsOpRank1(OpTest): - def setUp(self): self.op_type = "expand_as" x = np.random.rand(100).astype("float64") @@ -48,7 +47,6 @@ class TestExpandAsOpRank1(OpTest): class TestExpandAsOpRank2(OpTest): - def setUp(self): self.op_type = "expand_as" x = np.random.rand(10, 12).astype("float64") @@ -67,7 +65,6 @@ class TestExpandAsOpRank2(OpTest): class TestExpandAsOpRank3(OpTest): - def setUp(self): self.op_type = "expand_as" x = np.random.rand(2, 3, 20).astype("float64") @@ -86,7 +83,6 @@ class TestExpandAsOpRank3(OpTest): class TestExpandAsOpRank4(OpTest): - def setUp(self): self.op_type = "expand_as" x = np.random.rand(1, 1, 7, 16).astype("float64") @@ -106,9 +102,9 @@ class TestExpandAsOpRank4(OpTest): # Test dygraph API class TestExpandAsDygraphAPI(unittest.TestCase): - def test_api(self): import paddle + paddle.disable_static() np_data_x = np.array([1, 2, 3]).astype('int32') np_data_y = np.array([1, 2, 3, 1, 2, 3]).astype('int32') @@ -122,29 +118,28 @@ class TestExpandAsDygraphAPI(unittest.TestCase): # Test python API class TestExpandAsAPI(unittest.TestCase): - def test_api(self): input1 = np.random.random([12, 14]).astype("float32") input2 = np.random.random([48, 14]).astype("float32") - x = fluid.layers.data(name='x', - shape=[12, 14], - append_batch_size=False, - dtype="float32") + x = fluid.layers.data( + name='x', shape=[12, 14], append_batch_size=False, dtype="float32" + ) - y = fluid.layers.data(name='target_tensor', - shape=[48, 14], - append_batch_size=False, - dtype="float32") + y = fluid.layers.data( + name='target_tensor', + shape=[48, 14], + append_batch_size=False, + dtype="float32", + ) out_1 = fluid.layers.expand_as(x, target_tensor=y) exe = fluid.Executor(place=fluid.CPUPlace()) - res_1 = exe.run(fluid.default_main_program(), - feed={ - "x": input1, - "target_tensor": input2 - }, - fetch_list=[out_1]) + res_1 = exe.run( + fluid.default_main_program(), + feed={"x": input1, "target_tensor": input2}, + fetch_list=[out_1], + ) assert np.array_equal(res_1[0], np.tile(input1, (4, 1))) diff --git a/python/paddle/fluid/tests/unittests/test_expand_as_v2_op.py b/python/paddle/fluid/tests/unittests/test_expand_as_v2_op.py index e0506f8eb52538b51c03e33586551c45e4902656..5f4c04470ff937d0dc0d0f22e931cb08c0f57480 100755 --- a/python/paddle/fluid/tests/unittests/test_expand_as_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_expand_as_v2_op.py @@ -20,7 +20,6 @@ import paddle.fluid as fluid class TestExpandAsBasic(OpTest): - def setUp(self): self.op_type = "expand_as_v2" self.python_api = paddle.expand_as @@ -40,7 +39,6 @@ class TestExpandAsBasic(OpTest): class TestExpandAsOpRank2(TestExpandAsBasic): - def setUp(self): self.op_type = "expand_as_v2" self.python_api = paddle.expand_as @@ -54,7 +52,6 @@ class TestExpandAsOpRank2(TestExpandAsBasic): class TestExpandAsOpRank3(TestExpandAsBasic): - def setUp(self): self.op_type = "expand_as_v2" self.python_api = paddle.expand_as @@ -68,7 +65,6 @@ class TestExpandAsOpRank3(TestExpandAsBasic): class TestExpandAsOpRank4(TestExpandAsBasic): - def setUp(self): self.op_type = "expand_as_v2" self.python_api = paddle.expand_as @@ -100,7 +96,6 @@ class TestExpandAsOpRank5(TestExpandAsBasic): class TestExpandAsV2Error(unittest.TestCase): - def test_errors(self): with fluid.program_guard(fluid.Program(), fluid.Program()): x1 = fluid.layers.data(name='x1', shape=[4], dtype="uint8") @@ -113,29 +108,28 @@ class TestExpandAsV2Error(unittest.TestCase): # Test python API class TestExpandAsV2API(unittest.TestCase): - def test_api(self): input1 = np.random.random([12, 14]).astype("float32") input2 = np.random.random([2, 12, 14]).astype("float32") - x = fluid.layers.data(name='x', - shape=[12, 14], - append_batch_size=False, - dtype="float32") + x = fluid.layers.data( + name='x', shape=[12, 14], append_batch_size=False, dtype="float32" + ) - y = fluid.layers.data(name='target_tensor', - shape=[2, 12, 14], - append_batch_size=False, - dtype="float32") + y = fluid.layers.data( + name='target_tensor', + shape=[2, 12, 14], + append_batch_size=False, + dtype="float32", + ) out_1 = paddle.expand_as(x, y=y) exe = fluid.Executor(place=fluid.CPUPlace()) - res_1 = exe.run(fluid.default_main_program(), - feed={ - "x": input1, - "target_tensor": input2 - }, - fetch_list=[out_1]) + res_1 = exe.run( + fluid.default_main_program(), + feed={"x": input1, "target_tensor": input2}, + fetch_list=[out_1], + ) assert np.array_equal(res_1[0], np.tile(input1, (2, 1, 1))) diff --git a/python/paddle/fluid/tests/unittests/test_expand_op.py b/python/paddle/fluid/tests/unittests/test_expand_op.py index 36be4ab1a9beb67c533c783199e155536b42e825..fd3dac2472e7c8089656d17effd36765431fade4 100644 --- a/python/paddle/fluid/tests/unittests/test_expand_op.py +++ b/python/paddle/fluid/tests/unittests/test_expand_op.py @@ -22,12 +22,12 @@ import paddle # Situation 1: expand_times is a list(without tensor) class TestExpandOpRank1(OpTest): - def setUp(self): self.op_type = "expand" self.init_data() - self.dtype = "float32" if fluid.core.is_compiled_with_rocm( - ) else "float64" + self.dtype = ( + "float32" if fluid.core.is_compiled_with_rocm() else "float64" + ) self.inputs = {'X': np.random.random(self.ori_shape).astype(self.dtype)} self.attrs = {'expand_times': self.expand_times} @@ -46,35 +46,30 @@ class TestExpandOpRank1(OpTest): class TestExpandOpRank2_Corner(TestExpandOpRank1): - def init_data(self): self.ori_shape = [120] self.expand_times = [2] class TestExpandOpRank2(TestExpandOpRank1): - def init_data(self): self.ori_shape = [12, 14] self.expand_times = [2, 3] class TestExpandOpRank3_Corner(TestExpandOpRank1): - def init_data(self): self.ori_shape = (2, 10, 5) self.expand_times = (1, 1, 1) class TestExpandOpRank3(TestExpandOpRank1): - def init_data(self): self.ori_shape = (2, 4, 15) self.expand_times = (2, 1, 4) class TestExpandOpRank4(TestExpandOpRank1): - def init_data(self): self.ori_shape = (2, 4, 5, 7) self.expand_times = (3, 2, 1, 2) @@ -82,17 +77,18 @@ class TestExpandOpRank4(TestExpandOpRank1): # Situation 2: expand_times is a list(with tensor) class TestExpandOpRank1_tensor_attr(OpTest): - def setUp(self): self.op_type = "expand" self.init_data() - self.dtype = "float32" if fluid.core.is_compiled_with_rocm( - ) else "float64" + self.dtype = ( + "float32" if fluid.core.is_compiled_with_rocm() else "float64" + ) expand_times_tensor = [] for index, ele in enumerate(self.expand_times): - expand_times_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + expand_times_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = { 'X': np.random.random(self.ori_shape).astype(self.dtype), @@ -115,7 +111,6 @@ class TestExpandOpRank1_tensor_attr(OpTest): class TestExpandOpRank2_Corner_tensor_attr(TestExpandOpRank1_tensor_attr): - def init_data(self): self.ori_shape = [12, 14] self.expand_times = [1, 1] @@ -123,7 +118,6 @@ class TestExpandOpRank2_Corner_tensor_attr(TestExpandOpRank1_tensor_attr): class TestExpandOpRank2_attr_tensor(TestExpandOpRank1_tensor_attr): - def init_data(self): self.ori_shape = [12, 14] self.expand_times = [2, 3] @@ -132,12 +126,12 @@ class TestExpandOpRank2_attr_tensor(TestExpandOpRank1_tensor_attr): # Situation 3: expand_times is a tensor class TestExpandOpRank1_tensor(OpTest): - def setUp(self): self.op_type = "expand" self.init_data() - self.dtype = "float32" if fluid.core.is_compiled_with_rocm( - ) else "float64" + self.dtype = ( + "float32" if fluid.core.is_compiled_with_rocm() else "float64" + ) self.inputs = { 'X': np.random.random(self.ori_shape).astype(self.dtype), @@ -159,7 +153,6 @@ class TestExpandOpRank1_tensor(OpTest): class TestExpandOpRank2_tensor(TestExpandOpRank1_tensor): - def init_data(self): self.ori_shape = [12, 14] self.expand_times = [2, 3] @@ -167,7 +160,6 @@ class TestExpandOpRank2_tensor(TestExpandOpRank1_tensor): # Situation 4: input x is Integer class TestExpandOpInteger(OpTest): - def setUp(self): self.op_type = "expand" self.inputs = { @@ -183,7 +175,6 @@ class TestExpandOpInteger(OpTest): # Situation 5: input x is Bool class TestExpandOpBoolean(OpTest): - def setUp(self): self.op_type = "expand" self.inputs = {'X': np.random.randint(2, size=(2, 4, 5)).astype("bool")} @@ -197,7 +188,6 @@ class TestExpandOpBoolean(OpTest): # Situation 56: input x is Integer class TestExpandOpInt64_t(OpTest): - def setUp(self): self.op_type = "expand" self.inputs = { @@ -212,11 +202,11 @@ class TestExpandOpInt64_t(OpTest): class TestExpandError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): - x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace() + ) expand_times = [2, 2] self.assertRaises(TypeError, fluid.layers.expand, x1, expand_times) x2 = fluid.layers.data(name='x2', shape=[4], dtype="uint8") @@ -228,18 +218,16 @@ class TestExpandError(unittest.TestCase): # Test python API class TestExpandAPI(unittest.TestCase): - def test_api(self): input = np.random.random([12, 14]).astype("float32") - x = fluid.layers.data(name='x', - shape=[12, 14], - append_batch_size=False, - dtype="float32") + x = fluid.layers.data( + name='x', shape=[12, 14], append_batch_size=False, dtype="float32" + ) positive_2 = fluid.layers.fill_constant([1], "int32", 2) - expand_times = fluid.layers.data(name="expand_times", - shape=[2], - append_batch_size=False) + expand_times = fluid.layers.data( + name="expand_times", shape=[2], append_batch_size=False + ) out_1 = fluid.layers.expand(x, expand_times=[2, 3]) out_2 = fluid.layers.expand(x, expand_times=[positive_2, 3]) @@ -248,28 +236,24 @@ class TestExpandAPI(unittest.TestCase): g0 = fluid.backward.calc_gradient(out_2, x) exe = fluid.Executor(place=fluid.CPUPlace()) - res_1, res_2, res_3 = exe.run(fluid.default_main_program(), - feed={ - "x": - input, - "expand_times": - np.array([1, 3]).astype("int32") - }, - fetch_list=[out_1, out_2, out_3]) + res_1, res_2, res_3 = exe.run( + fluid.default_main_program(), + feed={"x": input, "expand_times": np.array([1, 3]).astype("int32")}, + fetch_list=[out_1, out_2, out_3], + ) assert np.array_equal(res_1, np.tile(input, (2, 3))) assert np.array_equal(res_2, np.tile(input, (2, 3))) assert np.array_equal(res_3, np.tile(input, (1, 3))) class TestExpandDygraphAPI(unittest.TestCase): - def test_expand_times_is_tensor(self): with paddle.fluid.dygraph.guard(): a = paddle.rand([2, 5]) b = paddle.fluid.layers.expand(a, expand_times=[2, 3]) - c = paddle.fluid.layers.expand(a, - expand_times=paddle.to_tensor( - [2, 3], dtype='int32')) + c = paddle.fluid.layers.expand( + a, expand_times=paddle.to_tensor([2, 3], dtype='int32') + ) np.testing.assert_array_equal(b.numpy(), np.tile(a.numpy(), [2, 3])) np.testing.assert_array_equal(c.numpy(), np.tile(a.numpy(), [2, 3])) diff --git a/python/paddle/fluid/tests/unittests/test_expand_v2_op.py b/python/paddle/fluid/tests/unittests/test_expand_v2_op.py index d89332f393aa1e19a21df14b3ecb347a43f4ddf7..da27fb397cc595e4046170e22dfc3eeff6a7e22f 100644 --- a/python/paddle/fluid/tests/unittests/test_expand_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_expand_v2_op.py @@ -26,7 +26,6 @@ import paddle.fluid.layers as layers # Situation 1: shape is a list(without tensor) class TestExpandV2OpRank1(OpTest): - def setUp(self): self.op_type = "expand_v2" self.init_data() @@ -50,7 +49,6 @@ class TestExpandV2OpRank1(OpTest): class TestExpandV2OpRank2_DimExpanding(TestExpandV2OpRank1): - def init_data(self): self.ori_shape = [120] self.shape = [2, 120] @@ -58,7 +56,6 @@ class TestExpandV2OpRank2_DimExpanding(TestExpandV2OpRank1): class TestExpandV2OpRank2(TestExpandV2OpRank1): - def init_data(self): self.ori_shape = [1, 140] self.shape = [12, 140] @@ -66,7 +63,6 @@ class TestExpandV2OpRank2(TestExpandV2OpRank1): class TestExpandV2OpRank3_Corner(TestExpandV2OpRank1): - def init_data(self): self.ori_shape = (2, 10, 5) self.shape = (2, 10, 5) @@ -74,7 +70,6 @@ class TestExpandV2OpRank3_Corner(TestExpandV2OpRank1): class TestExpandV2OpRank4(TestExpandV2OpRank1): - def init_data(self): self.ori_shape = (2, 4, 5, 7) self.shape = (-1, -1, -1, -1) @@ -83,14 +78,14 @@ class TestExpandV2OpRank4(TestExpandV2OpRank1): # Situation 2: shape is a list(with tensor) class TestExpandV2OpRank1_tensor_attr(OpTest): - def setUp(self): self.op_type = "expand_v2" self.init_data() expand_shapes_tensor = [] for index, ele in enumerate(self.expand_shape): - expand_shapes_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + expand_shapes_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = { 'X': np.random.random(self.ori_shape).astype("float64"), @@ -114,7 +109,6 @@ class TestExpandV2OpRank1_tensor_attr(OpTest): class TestExpandV2OpRank2_Corner_tensor_attr(TestExpandV2OpRank1_tensor_attr): - def init_data(self): self.ori_shape = [12, 14] self.expand_times = [1, 1] @@ -124,7 +118,6 @@ class TestExpandV2OpRank2_Corner_tensor_attr(TestExpandV2OpRank1_tensor_attr): # Situation 3: shape is a tensor class TestExpandV2OpRank1_tensor(OpTest): - def setUp(self): self.op_type = "expand_v2" self.init_data() @@ -151,7 +144,6 @@ class TestExpandV2OpRank1_tensor(OpTest): # Situation 4: input x is Integer class TestExpandV2OpInteger(OpTest): - def setUp(self): self.op_type = "expand_v2" self.inputs = { @@ -167,7 +159,6 @@ class TestExpandV2OpInteger(OpTest): # Situation 5: input x is Bool class TestExpandV2OpBoolean(OpTest): - def setUp(self): self.op_type = "expand_v2" self.inputs = {'X': np.random.randint(2, size=(2, 4, 5)).astype("bool")} @@ -181,7 +172,6 @@ class TestExpandV2OpBoolean(OpTest): # Situation 56: input x is Integer class TestExpandV2OpInt64_t(OpTest): - def setUp(self): self.op_type = "expand_v2" self.inputs = { @@ -196,11 +186,11 @@ class TestExpandV2OpInt64_t(OpTest): class TestExpandV2Error(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): - x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace() + ) shape = [2, 2] self.assertRaises(TypeError, paddle.tensor.expand, x1, shape) x2 = fluid.layers.data(name='x2', shape=[4], dtype="uint8") @@ -212,19 +202,19 @@ class TestExpandV2Error(unittest.TestCase): # Test python API class TestExpandV2API(unittest.TestCase): - def test_api(self): input = np.random.random([12, 14]).astype("float32") - x = fluid.layers.data(name='x', - shape=[12, 14], - append_batch_size=False, - dtype="float32") + x = fluid.layers.data( + name='x', shape=[12, 14], append_batch_size=False, dtype="float32" + ) positive_2 = fluid.layers.fill_constant([1], "int32", 12) - expand_shape = fluid.layers.data(name="expand_shape", - shape=[2], - append_batch_size=False, - dtype="int32") + expand_shape = fluid.layers.data( + name="expand_shape", + shape=[2], + append_batch_size=False, + dtype="int32", + ) out_1 = paddle.expand(x, shape=[12, 14]) out_2 = paddle.expand(x, shape=[positive_2, 14]) @@ -233,28 +223,28 @@ class TestExpandV2API(unittest.TestCase): g0 = fluid.backward.calc_gradient(out_2, x) exe = fluid.Executor(place=fluid.CPUPlace()) - res_1, res_2, res_3 = exe.run(fluid.default_main_program(), - feed={ - "x": - input, - "expand_shape": - np.array([12, 14]).astype("int32") - }, - fetch_list=[out_1, out_2, out_3]) + res_1, res_2, res_3 = exe.run( + fluid.default_main_program(), + feed={ + "x": input, + "expand_shape": np.array([12, 14]).astype("int32"), + }, + fetch_list=[out_1, out_2, out_3], + ) assert np.array_equal(res_1, np.tile(input, (1, 1))) assert np.array_equal(res_2, np.tile(input, (1, 1))) assert np.array_equal(res_3, np.tile(input, (1, 1))) class TestExpandInferShape(unittest.TestCase): - def test_shape_with_var(self): with program_guard(Program(), Program()): x = paddle.static.data(shape=[-1, 1, 3], name='x') fake_var = paddle.randn([2, 3]) target_shape = [ - -1, paddle.shape(fake_var)[0], - paddle.shape(fake_var)[1] + -1, + paddle.shape(fake_var)[0], + paddle.shape(fake_var)[1], ] out = paddle.expand(x, shape=target_shape) self.assertListEqual(list(out.shape), [-1, -1, -1]) @@ -262,7 +252,6 @@ class TestExpandInferShape(unittest.TestCase): # Test python Dygraph API class TestExpandV2DygraphAPI(unittest.TestCase): - def test_expand_times_is_tensor(self): with paddle.fluid.dygraph.guard(): with _test_eager_guard(): @@ -278,15 +267,16 @@ class TestExpandV2DygraphAPI(unittest.TestCase): np_array = np.array([2, 5]) expand_2 = paddle.expand(a, shape=np_array) - np.testing.assert_array_equal(egr_expand_1.numpy(), - egr_expand_2.numpy()) + np.testing.assert_array_equal( + egr_expand_1.numpy(), egr_expand_2.numpy() + ) np.testing.assert_array_equal(expand_1.numpy(), expand_2.numpy()) - np.testing.assert_array_equal(expand_1.numpy(), - egr_expand_1.numpy()) + np.testing.assert_array_equal( + expand_1.numpy(), egr_expand_1.numpy() + ) class TestExpandDoubleGradCheck(unittest.TestCase): - def expand_wrapper(self, x): return paddle.expand(x[0], [2, 3]) @@ -301,17 +291,13 @@ class TestExpandDoubleGradCheck(unittest.TestCase): out = paddle.expand(data, [2, 3]) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) - gradient_checker.double_grad_check([data], - out, - x_init=[data_arr], - place=place, - eps=eps) + gradient_checker.double_grad_check( + [data], out, x_init=[data_arr], place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.double_grad_check_for_dygraph(self.expand_wrapper, - [data], - out, - x_init=[data_arr], - place=place) + gradient_checker.double_grad_check_for_dygraph( + self.expand_wrapper, [data], out, x_init=[data_arr], place=place + ) def test_grad(self): paddle.enable_static() @@ -323,7 +309,6 @@ class TestExpandDoubleGradCheck(unittest.TestCase): class TestExpandTripleGradCheck(unittest.TestCase): - def expand_wrapper(self, x): return paddle.expand(x[0], [2, 3]) @@ -338,17 +323,13 @@ class TestExpandTripleGradCheck(unittest.TestCase): out = paddle.expand(data, [2, 3]) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) - gradient_checker.triple_grad_check([data], - out, - x_init=[data_arr], - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [data], out, x_init=[data_arr], place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.triple_grad_check_for_dygraph(self.expand_wrapper, - [data], - out, - x_init=[data_arr], - place=place) + gradient_checker.triple_grad_check_for_dygraph( + self.expand_wrapper, [data], out, x_init=[data_arr], place=place + ) def test_grad(self): paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_exponential_op.py b/python/paddle/fluid/tests/unittests/test_exponential_op.py index 54a6befc8edcce779b468dfc88fd7bf629e720ec..458be9f3635b2fa941873f7825ec3680771a5f62 100644 --- a/python/paddle/fluid/tests/unittests/test_exponential_op.py +++ b/python/paddle/fluid/tests/unittests/test_exponential_op.py @@ -21,7 +21,6 @@ paddle.seed(100) class TestExponentialOp1(OpTest): - def setUp(self): paddle.enable_static() self.op_type = "exponential" @@ -43,7 +42,7 @@ class TestExponentialOp1(OpTest): hist1 = hist1.astype("float32") hist1 = hist1 / float(outs[0].size) - data_np = np.random.exponential(1. / self.lam, [1024, 1024]) + data_np = np.random.exponential(1.0 / self.lam, [1024, 1024]) hist2, _ = np.histogram(data_np, range=(0, 5)) hist2 = hist2.astype("float32") hist2 = hist2 / float(data_np.size) @@ -57,34 +56,36 @@ class TestExponentialOp1(OpTest): user_defined_grads=[np.zeros([1024, 1024], dtype=self.dtype)], user_defined_grad_outputs=[ np.random.rand(1024, 1024).astype(self.dtype) - ]) + ], + ) class TestExponentialOp2(TestExponentialOp1): - def config(self): self.lam = 0.25 self.dtype = "float32" class TestExponentialAPI(unittest.TestCase): - def test_static(self): - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): - x_np = np.full([10, 10], -1.) + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + x_np = np.full([10, 10], -1.0) x = paddle.static.data(name="X", shape=[10, 10], dtype='float64') x.exponential_(1.0) exe = paddle.static.Executor() - out = exe.run(paddle.static.default_main_program(), - feed={"X": x_np}, - fetch_list=[x]) + out = exe.run( + paddle.static.default_main_program(), + feed={"X": x_np}, + fetch_list=[x], + ) self.assertTrue(np.min(out) >= 0) def test_dygraph(self): paddle.disable_static() - x = paddle.full([10, 10], -1., dtype='float32') + x = paddle.full([10, 10], -1.0, dtype='float32') x.stop_gradient = False y = 2 * x y.exponential_(0.5) @@ -113,47 +114,94 @@ class TestExponentialAPI(unittest.TestCase): x.exponential_(1.0) x_np = x.numpy() expect = [ - 0.80073667, 0.2249291, 0.07734892, 1.25392, 0.14013891, 0.45736602, - 1.9735607, 0.30490234, 0.57100505, 0.8115938 + 0.80073667, + 0.2249291, + 0.07734892, + 1.25392, + 0.14013891, + 0.45736602, + 1.9735607, + 0.30490234, + 0.57100505, + 0.8115938, ] np.testing.assert_allclose(x_np[0, 0, 0, 0:10], expect, rtol=1e-05) expect = [ - 1.4296371e+00, 9.5411777e-01, 5.2575850e-01, 2.4805880e-01, - 1.2322118e-04, 8.4604341e-01, 2.1111444e-01, 1.4143821e+00, - 2.8194717e-01, 1.1360573e+00 + 1.4296371e00, + 9.5411777e-01, + 5.2575850e-01, + 2.4805880e-01, + 1.2322118e-04, + 8.4604341e-01, + 2.1111444e-01, + 1.4143821e00, + 2.8194717e-01, + 1.1360573e00, ] - np.testing.assert_allclose(x_np[16, 1, 300, 200:210], - expect, - rtol=1e-05) + np.testing.assert_allclose( + x_np[16, 1, 300, 200:210], expect, rtol=1e-05 + ) expect = [ - 1.3448033, 0.35146526, 1.7380928, 0.32012638, 0.10396296, - 0.51344526, 0.15308502, 0.18712929, 0.03888268, 0.20771872 + 1.3448033, + 0.35146526, + 1.7380928, + 0.32012638, + 0.10396296, + 0.51344526, + 0.15308502, + 0.18712929, + 0.03888268, + 0.20771872, ] - np.testing.assert_allclose(x_np[32, 1, 600, 500:510], - expect, - rtol=1e-05) + np.testing.assert_allclose( + x_np[32, 1, 600, 500:510], expect, rtol=1e-05 + ) expect = [ - 0.5107464, 0.20970327, 2.1986802, 1.580056, 0.31036147, 0.43966478, - 0.9056133, 0.30119267, 1.4797124, 1.4319834 + 0.5107464, + 0.20970327, + 2.1986802, + 1.580056, + 0.31036147, + 0.43966478, + 0.9056133, + 0.30119267, + 1.4797124, + 1.4319834, ] - np.testing.assert_allclose(x_np[48, 2, 900, 800:810], - expect, - rtol=1e-05) + np.testing.assert_allclose( + x_np[48, 2, 900, 800:810], expect, rtol=1e-05 + ) expect = [ - 3.4640615, 1.1019983, 0.41195083, 0.22681557, 0.291846, 0.53617656, - 1.5791925, 2.4645927, 0.04094889, 0.9057725 + 3.4640615, + 1.1019983, + 0.41195083, + 0.22681557, + 0.291846, + 0.53617656, + 1.5791925, + 2.4645927, + 0.04094889, + 0.9057725, ] - np.testing.assert_allclose(x_np[63, 2, 1023, 1000:1010], - expect, - rtol=1e-05) + np.testing.assert_allclose( + x_np[63, 2, 1023, 1000:1010], expect, rtol=1e-05 + ) x = paddle.empty([10, 10], dtype="float32") x.exponential_(3.0) x_np = x.numpy() expect = [ - 0.02831675, 0.1691551, 0.6798956, 0.69347525, 0.0243443, 0.22180498, - 0.30574575, 0.9839696, 0.2834912, 0.59420055 + 0.02831675, + 0.1691551, + 0.6798956, + 0.69347525, + 0.0243443, + 0.22180498, + 0.30574575, + 0.9839696, + 0.2834912, + 0.59420055, ] np.testing.assert_allclose(x_np[5, 0:10], expect, rtol=1e-05) @@ -161,51 +209,115 @@ class TestExponentialAPI(unittest.TestCase): x.exponential_(0.25) x_np = x.numpy() expect = [ - 10.0541229, 12.67860643, 1.09850734, 7.35289643, 2.65471225, - 3.86217432, 2.97902086, 2.92744479, 2.67927152, 0.19667352 + 10.0541229, + 12.67860643, + 1.09850734, + 7.35289643, + 2.65471225, + 3.86217432, + 2.97902086, + 2.92744479, + 2.67927152, + 0.19667352, ] np.testing.assert_allclose(x_np[0, 0, 0, 100:110], expect, rtol=1e-05) expect = [ - 0.68328125, 3.1454553, 0.92158376, 1.95842188, 1.05296941, - 12.93242051, 5.20255978, 3.3588624, 1.57377174, 5.73194183 + 0.68328125, + 3.1454553, + 0.92158376, + 1.95842188, + 1.05296941, + 12.93242051, + 5.20255978, + 3.3588624, + 1.57377174, + 5.73194183, ] np.testing.assert_allclose(x_np[4, 0, 300, 190:200], expect, rtol=1e-05) expect = [ - 1.37973974, 3.45036798, 7.94625406, 1.62610973, 0.31032122, - 4.13596493, 1.98494535, 1.13207041, 8.30592769, 2.81460147 + 1.37973974, + 3.45036798, + 7.94625406, + 1.62610973, + 0.31032122, + 4.13596493, + 1.98494535, + 1.13207041, + 8.30592769, + 2.81460147, ] np.testing.assert_allclose(x_np[8, 1, 600, 300:310], expect, rtol=1e-05) expect = [ - 2.27710811, 12.25003028, 2.96409124, 4.72405788, 0.67917249, - 4.35856718, 0.46870976, 2.31120149, 9.61595826, 4.64446271 + 2.27710811, + 12.25003028, + 2.96409124, + 4.72405788, + 0.67917249, + 4.35856718, + 0.46870976, + 2.31120149, + 9.61595826, + 4.64446271, ] - np.testing.assert_allclose(x_np[12, 1, 900, 500:510], - expect, - rtol=1e-05) + np.testing.assert_allclose( + x_np[12, 1, 900, 500:510], expect, rtol=1e-05 + ) expect = [ - 0.95883744, 1.57316361, 15.22524512, 20.49559882, 13.70008548, - 3.29430143, 3.90390424, 0.9146657, 0.80972249, 0.33376219 + 0.95883744, + 1.57316361, + 15.22524512, + 20.49559882, + 13.70008548, + 3.29430143, + 3.90390424, + 0.9146657, + 0.80972249, + 0.33376219, ] - np.testing.assert_allclose(x_np[15, 1, 1023, 750:760], - expect, - rtol=1e-05) + np.testing.assert_allclose( + x_np[15, 1, 1023, 750:760], expect, rtol=1e-05 + ) x = paddle.empty([512, 768], dtype="float64") x.exponential_(0.3) x_np = x.numpy() expect = [ - 8.79266704, 4.79596009, 2.75480243, 6.04670011, 0.35379556, - 0.76864868, 3.17428251, 0.26556859, 12.22485885, 10.51690383 + 8.79266704, + 4.79596009, + 2.75480243, + 6.04670011, + 0.35379556, + 0.76864868, + 3.17428251, + 0.26556859, + 12.22485885, + 10.51690383, ] np.testing.assert_allclose(x_np[0, 200:210], expect, rtol=1e-05) expect = [ - 5.6341126, 0.52243418, 5.36410796, 6.83672002, 11.9243311, - 5.85985566, 5.75169548, 0.13877972, 6.1348385, 3.82436519 + 5.6341126, + 0.52243418, + 5.36410796, + 6.83672002, + 11.9243311, + 5.85985566, + 5.75169548, + 0.13877972, + 6.1348385, + 3.82436519, ] np.testing.assert_allclose(x_np[300, 400:410], expect, rtol=1e-05) expect = [ - 4.94883581, 0.56345306, 0.85841585, 1.92287801, 6.10036656, - 1.19524847, 3.64735434, 5.19618716, 2.57467974, 3.49152791 + 4.94883581, + 0.56345306, + 0.85841585, + 1.92287801, + 6.10036656, + 1.19524847, + 3.64735434, + 5.19618716, + 2.57467974, + 3.49152791, ] np.testing.assert_allclose(x_np[500, 700:710], expect, rtol=1e-05) @@ -213,8 +325,16 @@ class TestExponentialAPI(unittest.TestCase): x.exponential_(4.0) x_np = x.numpy() expect = [ - 0.15713826, 0.56395964, 0.0680941, 0.00316643, 0.27046853, - 0.19852724, 0.12776634, 0.09642974, 0.51977551, 1.33739699 + 0.15713826, + 0.56395964, + 0.0680941, + 0.00316643, + 0.27046853, + 0.19852724, + 0.12776634, + 0.09642974, + 0.51977551, + 1.33739699, ] np.testing.assert_allclose(x_np[5, 0:10], expect, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_eye_op.py b/python/paddle/fluid/tests/unittests/test_eye_op.py index c67388c1339591594a01535b08e129e55ab48c74..e61037ec1afbdcade0f5b52f9f4ec6e88186b879 100644 --- a/python/paddle/fluid/tests/unittests/test_eye_op.py +++ b/python/paddle/fluid/tests/unittests/test_eye_op.py @@ -26,7 +26,6 @@ from test_attribute_var import UnittestBase class TestEyeOp(OpTest): - def setUp(self): ''' Test eye op with specified shape @@ -38,7 +37,7 @@ class TestEyeOp(OpTest): self.attrs = { 'num_rows': 219, 'num_columns': 319, - 'dtype': framework.convert_np_dtype_to_dtype_(np.int32) + 'dtype': framework.convert_np_dtype_to_dtype_(np.int32), } self.outputs = {'Out': np.eye(219, 319, dtype=np.int32)} @@ -47,7 +46,6 @@ class TestEyeOp(OpTest): class TestEyeOp1(OpTest): - def setUp(self): ''' Test eye op with default parameters @@ -64,7 +62,6 @@ class TestEyeOp1(OpTest): class TestEyeOp2(OpTest): - def setUp(self): ''' Test eye op with specified shape @@ -81,13 +78,12 @@ class TestEyeOp2(OpTest): class API_TestTensorEye(unittest.TestCase): - def test_out(self): with paddle.static.program_guard(paddle.static.Program()): data = paddle.eye(10) place = fluid.CPUPlace() exe = paddle.static.Executor(place) - result, = exe.run(fetch_list=[data]) + (result,) = exe.run(fetch_list=[data]) expected_result = np.eye(10, dtype="float32") self.assertEqual((result == expected_result).all(), True) @@ -95,7 +91,7 @@ class API_TestTensorEye(unittest.TestCase): data = paddle.eye(10, num_columns=7, dtype="float64") place = paddle.CPUPlace() exe = paddle.static.Executor(place) - result, = exe.run(fetch_list=[data]) + (result,) = exe.run(fetch_list=[data]) expected_result = np.eye(10, 7, dtype="float64") self.assertEqual((result == expected_result).all(), True) @@ -103,7 +99,7 @@ class API_TestTensorEye(unittest.TestCase): data = paddle.eye(10, dtype="int64") place = paddle.CPUPlace() exe = paddle.static.Executor(place) - result, = exe.run(fetch_list=[data]) + (result,) = exe.run(fetch_list=[data]) expected_result = np.eye(10, dtype="int64") self.assertEqual((result == expected_result).all(), True) @@ -125,8 +121,9 @@ class API_TestTensorEye(unittest.TestCase): result = tmp_result expected_result = np.stack(result, axis=0) paddle.enable_static() - self.assertEqual(out.numpy().shape == np.array(expected_result).shape, - True) + self.assertEqual( + out.numpy().shape == np.array(expected_result).shape, True + ) self.assertEqual((out.numpy() == expected_result).all(), True) paddle.disable_static() @@ -141,8 +138,9 @@ class API_TestTensorEye(unittest.TestCase): result = tmp_result expected_result = np.stack(result, axis=0) paddle.enable_static() - self.assertEqual(out.numpy().shape == np.array(expected_result).shape, - True) + self.assertEqual( + out.numpy().shape == np.array(expected_result).shape, True + ) self.assertEqual((out.numpy() == expected_result).all(), True) def test_errors(self): @@ -165,7 +163,6 @@ class API_TestTensorEye(unittest.TestCase): class TestEyeRowsCol(UnittestBase): - def init_info(self): self.shapes = [[2, 3, 4]] self.save_path = os.path.join(self.temp_dir.name, self.path_prefix()) @@ -191,8 +188,9 @@ class TestEyeRowsCol(UnittestBase): res = exe.run(fetch_list=[tmp, out]) gt = np.eye(3, 10) np.testing.assert_allclose(res[0], gt) - paddle.static.save_inference_model(self.save_path, [x], [tmp, out], - exe) + paddle.static.save_inference_model( + self.save_path, [x], [tmp, out], exe + ) # Test for Inference Predictor infer_outs = self.infer_prog() np.testing.assert_allclose(infer_outs[0], gt) @@ -215,7 +213,6 @@ class TestEyeRowsCol(UnittestBase): class TestEyeRowsCol2(TestEyeRowsCol): - def call_func(self, x): rows = paddle.assign(3) cols = paddle.assign(10) diff --git a/python/paddle/fluid/tests/unittests/test_fake_dequantize_op.py b/python/paddle/fluid/tests/unittests/test_fake_dequantize_op.py index 239f5a93b1f9f69f9660b7431b4f7411a1d74923..b56ff6edbfe9754784652b269c4cd1ecb78a924b 100644 --- a/python/paddle/fluid/tests/unittests/test_fake_dequantize_op.py +++ b/python/paddle/fluid/tests/unittests/test_fake_dequantize_op.py @@ -47,11 +47,9 @@ def channel_wise_quantize_max_abs(x, quant_bit=8, quant_axis=0): return y, scales -def channel_wise_dequantize_max_abs(x, - scales, - quant_bits, - quant_axis, - activation_scale=None): +def channel_wise_dequantize_max_abs( + x, scales, quant_bits, quant_axis, activation_scale=None +): assert quant_axis in [0, 1], "The quant_axis should be 0 or 1." if isinstance(quant_bits, list): @@ -72,7 +70,6 @@ def channel_wise_dequantize_max_abs(x, class TestFakeChannelWiseDequantizeMaxAbsOpTwoScales(OpTest): - def set_args(self): self.quant_bits = [8, 8] self.activation_scale = 0.7861 @@ -86,15 +83,19 @@ class TestFakeChannelWiseDequantizeMaxAbsOpTwoScales(OpTest): self.op_type = "fake_channel_wise_dequantize_max_abs" x = np.random.randn(4, 3, 64, 64).astype(self.dtype) yq, scales = channel_wise_quantize_max_abs(x, self.quant_bits[0], 1) - ydq = channel_wise_dequantize_max_abs(yq, scales, self.quant_bits, 1, - self.activation_scale) + ydq = channel_wise_dequantize_max_abs( + yq, scales, self.quant_bits, 1, self.activation_scale + ) self.inputs = { - 'X': - yq, - 'Scales': - [("scales0", np.array(scales).astype(self.dtype)), - ("scales1", np.array([self.activation_scale]).astype(self.dtype))] + 'X': yq, + 'Scales': [ + ("scales0", np.array(scales).astype(self.dtype)), + ( + "scales1", + np.array([self.activation_scale]).astype(self.dtype), + ), + ], } self.attrs = {'quant_bits': self.quant_bits} self.outputs = {'Out': ydq} @@ -104,8 +105,8 @@ class TestFakeChannelWiseDequantizeMaxAbsOpTwoScales(OpTest): class TestFakeChannelWiseDequantizeMaxAbsOpTwoScalesFloat16( - TestFakeChannelWiseDequantizeMaxAbsOpTwoScales): - + TestFakeChannelWiseDequantizeMaxAbsOpTwoScales +): def set_dtype(self): self.dtype = np.float16 @@ -114,7 +115,6 @@ class TestFakeChannelWiseDequantizeMaxAbsOpTwoScalesFloat16( class TestFakeChannelWiseDequantizeMaxAbsOpOneScale(OpTest): - def set_args(self): self.quant_bits = [8] self.quant_axis = 0 @@ -127,18 +127,20 @@ class TestFakeChannelWiseDequantizeMaxAbsOpOneScale(OpTest): self.set_dtype() self.op_type = "fake_channel_wise_dequantize_max_abs" x = np.random.randn(4, 3, 64, 64).astype(self.dtype) - yq, scales = channel_wise_quantize_max_abs(x, self.quant_bits[0], - self.quant_axis) - ydq = channel_wise_dequantize_max_abs(yq, scales, self.quant_bits, - self.quant_axis) + yq, scales = channel_wise_quantize_max_abs( + x, self.quant_bits[0], self.quant_axis + ) + ydq = channel_wise_dequantize_max_abs( + yq, scales, self.quant_bits, self.quant_axis + ) self.inputs = { 'X': yq, - 'Scales': [("scales0", np.array(scales).astype(self.dtype))] + 'Scales': [("scales0", np.array(scales).astype(self.dtype))], } self.attrs = { 'quant_bits': self.quant_bits, - 'quant_axis': self.quant_axis + 'quant_axis': self.quant_axis, } self.outputs = {'Out': ydq} @@ -147,16 +149,16 @@ class TestFakeChannelWiseDequantizeMaxAbsOpOneScale(OpTest): class TestFakeChannelWiseDequantizeMaxAbsOpOneScale1( - TestFakeChannelWiseDequantizeMaxAbsOpOneScale): - + TestFakeChannelWiseDequantizeMaxAbsOpOneScale +): def set_args(self): self.quant_bits = [8] self.quant_axis = 1 class TestFakeChannelWiseDequantizeMaxAbsOpOneScaleFloat16( - TestFakeChannelWiseDequantizeMaxAbsOpOneScale): - + TestFakeChannelWiseDequantizeMaxAbsOpOneScale +): def set_dtype(self): self.dtype = np.float16 @@ -165,8 +167,8 @@ class TestFakeChannelWiseDequantizeMaxAbsOpOneScaleFloat16( class TestFakeChannelWiseDequantizeMaxAbsOpOneScale1Float16( - TestFakeChannelWiseDequantizeMaxAbsOpOneScale1): - + TestFakeChannelWiseDequantizeMaxAbsOpOneScale1 +): def set_dtype(self): self.dtype = np.float16 @@ -175,7 +177,6 @@ class TestFakeChannelWiseDequantizeMaxAbsOpOneScale1Float16( class TestFakeDequantizeMaxAbsOp(OpTest): - def set_args(self): self.num_bits = 8 self.max_range = math.pow(2, self.num_bits - 1) - 1 @@ -200,20 +201,17 @@ class TestFakeDequantizeMaxAbsOp(OpTest): class TestFakeDequantizeMaxAbsOpDouble(TestFakeDequantizeMaxAbsOp): - def set_dtype(self): self.dtype = np.float64 class TestFakeDequantizeMaxAbsOp5Bits(TestFakeDequantizeMaxAbsOp): - def set_args(self): self.num_bits = 5 self.max_range = math.pow(2, self.num_bits - 1) - 1 class TestFakeDequantizeMaxAbsOpFloat16(TestFakeDequantizeMaxAbsOp): - def set_dtype(self): self.dtype = np.float16 @@ -222,7 +220,6 @@ class TestFakeDequantizeMaxAbsOpFloat16(TestFakeDequantizeMaxAbsOp): class TestChannelWiseDequantizeOp(OpTest): - def set_args(self): self.bit_length = 8 self.data_type = "float32" @@ -232,17 +229,19 @@ class TestChannelWiseDequantizeOp(OpTest): self.set_args() self.op_type = "dequantize_linear" x = np.random.randn(4, 3, 64, 64).astype(self.data_type) - yq, scale = channel_wise_quantize_max_abs(x, self.bit_length, - self.quant_axis) - ydq = channel_wise_dequantize_max_abs(yq, scale, self.bit_length, - self.quant_axis) + yq, scale = channel_wise_quantize_max_abs( + x, self.bit_length, self.quant_axis + ) + ydq = channel_wise_dequantize_max_abs( + yq, scale, self.bit_length, self.quant_axis + ) scale = np.array(scale).astype(self.data_type) zero_point = np.zeros(scale.shape, dtype="int32") print('TestChannelWiseDequantizeOp:') self.inputs = {'X': yq, 'Scale': scale, 'ZeroPoint': zero_point} self.attrs = { 'bit_length': self.bit_length, - 'quant_axis': self.quant_axis + 'quant_axis': self.quant_axis, } self.outputs = {'Y': ydq} @@ -251,7 +250,6 @@ class TestChannelWiseDequantizeOp(OpTest): class TestChannelWiseDequantizeOp1(TestChannelWiseDequantizeOp): - def set_args(self): self.bit_length = 8 self.data_type = "float32" @@ -259,7 +257,6 @@ class TestChannelWiseDequantizeOp1(TestChannelWiseDequantizeOp): class TestDequantizeOp(OpTest): - def set_args(self): self.bit_length = 8 self.quant_axis = -1 @@ -278,7 +275,7 @@ class TestDequantizeOp(OpTest): self.inputs = {'X': yq, 'Scale': scale, 'ZeroPoint': zero_point} self.attrs = { 'bit_length': self.bit_length, - 'quant_axis': self.quant_axis + 'quant_axis': self.quant_axis, } self.outputs = {'Y': ydq} @@ -287,7 +284,6 @@ class TestDequantizeOp(OpTest): class TestDequantizeOpDouble(TestDequantizeOp): - def set_args(self): self.bit_length = 8 self.max_range = math.pow(2, self.bit_length - 1) - 1 @@ -296,7 +292,6 @@ class TestDequantizeOpDouble(TestDequantizeOp): class TestDequantizeOp5Bits(TestDequantizeOp): - def set_args(self): self.bit_length = 5 self.max_range = math.pow(2, self.bit_length - 1) - 1 diff --git a/python/paddle/fluid/tests/unittests/test_fake_init_op.py b/python/paddle/fluid/tests/unittests/test_fake_init_op.py index 6ff7abe0c228f6cbc6e8925d83e92c5d6ec87319..4a18c09b389ea08d93ca202bbef76559297795c6 100644 --- a/python/paddle/fluid/tests/unittests/test_fake_init_op.py +++ b/python/paddle/fluid/tests/unittests/test_fake_init_op.py @@ -19,14 +19,14 @@ from paddle.fluid.op import Operator class TestFakeInitOpSelectedRows(unittest.TestCase): - def check_with_place(self, place, is_selected_rows): scope = core.Scope() out_var_name = 'Out' if is_selected_rows: - out_tensor = scope.var( - out_var_name).get_selected_rows().get_tensor() + out_tensor = ( + scope.var(out_var_name).get_selected_rows().get_tensor() + ) else: out_tensor = scope.var(out_var_name).get_tensor() diff --git a/python/paddle/fluid/tests/unittests/test_fake_quantize_op.py b/python/paddle/fluid/tests/unittests/test_fake_quantize_op.py index 49d6329373be9a5262a5a8893db809b99c53e5f9..5bcb0eec733f5f12e96ca4a5f128a126dc6a7a5a 100644 --- a/python/paddle/fluid/tests/unittests/test_fake_quantize_op.py +++ b/python/paddle/fluid/tests/unittests/test_fake_quantize_op.py @@ -38,16 +38,13 @@ def get_compute_type(dtype): class TestFakeQuantizeAbsMaxOp(OpTest): - def setUp(self): self.op_type = 'fake_quantize_abs_max' self.attrs = {'bit_length': 8} - def _fake_quantize_abs_max(self, - dtype, - input_shape, - distribution, - round_type='TiesAwayFromZero'): + def _fake_quantize_abs_max( + self, dtype, input_shape, distribution, round_type='TiesAwayFromZero' + ): input_data = distribution(input_shape).astype(dtype) compute_type = get_compute_type(dtype) scale = np.max(np.abs(input_data)) @@ -55,12 +52,14 @@ class TestFakeQuantizeAbsMaxOp(OpTest): inv_scale = 1.0 / (scale + 1e-6) if scale < 1e-30 else 1.0 / scale if round_type == 'TiesToEven': round_out = np.round( - input_data.astype(compute_type) * inv_scale * bnt) + input_data.astype(compute_type) * inv_scale * bnt + ) output_data = np.clip(round_out, -bnt - 1, bnt) self.attrs['round_type'] = 0 else: output_data = round_c( - input_data.astype(compute_type) * inv_scale * bnt) + input_data.astype(compute_type) * inv_scale * bnt + ) self.attrs['round_type'] = 1 self.inputs = {'X': input_data} self.outputs = {'Out': output_data, 'OutScale': scale} @@ -71,9 +70,9 @@ class TestFakeQuantizeAbsMaxOp(OpTest): self._fake_quantize_abs_max(np.float32, (124, 240), np.random.random) def test_fake_quantize_abs_max_round1(self): - self._fake_quantize_abs_max(np.float32, (124, 240), - np.random.random, - round_type='TiesToEven') + self._fake_quantize_abs_max( + np.float32, (124, 240), np.random.random, round_type='TiesToEven' + ) def test_fake_quantize_abs_max_float16(self): self._fake_quantize_abs_max(np.float16, (124, 240), np.random.random) @@ -82,41 +81,45 @@ class TestFakeQuantizeAbsMaxOp(OpTest): self._fake_quantize_abs_max(np.float32, (10, 10), np.zeros) def test_fake_quantize_abs_max_underflow2(self): - self._fake_quantize_abs_max(np.float32, (10, 10), - lambda shape: np.full(shape, 1e-40)) + self._fake_quantize_abs_max( + np.float32, (10, 10), lambda shape: np.full(shape, 1e-40) + ) class TestFakeChannelWiseQuantizeAbsMaxOp(OpTest): - def setUp(self): self.op_type = 'fake_channel_wise_quantize_abs_max' self.attrs = {'bit_length': 8} - def _fake_channel_wise_quantize_abs_max(self, - dtype, - input_shape, - quant_axis, - distribution, - round_type='TiesToEven'): + def _fake_channel_wise_quantize_abs_max( + self, + dtype, + input_shape, + quant_axis, + distribution, + round_type='TiesToEven', + ): assert quant_axis in [0, 1], 'quant_axis should be 0 or 1.' input_data = distribution(input_shape).astype(dtype) compute_type = get_compute_type(dtype) bnt = (1 << (self.attrs['bit_length'] - 1)) - 1 - compute_axis = tuple(i for i in range(len(input_shape)) - if i != quant_axis) + compute_axis = tuple( + i for i in range(len(input_shape)) if i != quant_axis + ) scale_broadcast = np.amax(input_data, axis=compute_axis, keepdims=True) if round_type == 'TiesToEven': round_out = np.round( - input_data.astype(compute_type) / scale_broadcast * bnt) + input_data.astype(compute_type) / scale_broadcast * bnt + ) output_data = np.clip(round_out, -bnt - 1, bnt) self.attrs['round_type'] = 0 else: - output_data = round_c(bnt * input_data.astype(compute_type) / - scale_broadcast) + output_data = round_c( + bnt * input_data.astype(compute_type) / scale_broadcast + ) self.attrs['round_type'] = 1 if quant_axis == 1: - scale_broadcast = np.transpose(scale_broadcast, - (1, ) + compute_axis) + scale_broadcast = np.transpose(scale_broadcast, (1,) + compute_axis) scale = scale_broadcast.reshape(input_shape[quant_axis], -1)[:, 0] self.inputs = {'X': input_data} self.outputs = {'Out': output_data, 'OutScale': scale} @@ -126,35 +129,41 @@ class TestFakeChannelWiseQuantizeAbsMaxOp(OpTest): def test_fake_channel_wise_quantize_abs_max(self): dtype_options = [np.float32, np.float16] - input_shape_quant_axis_options = [[(20, 15, 6, 6), 0], - [(20, 15, 6, 6), 1], [(30, 30), 0], - [(30, 30), 1]] + input_shape_quant_axis_options = [ + [(20, 15, 6, 6), 0], + [(20, 15, 6, 6), 1], + [(30, 30), 0], + [(30, 30), 1], + ] round_type_options = ['TiesToEven', 'TiesAwayFromZero'] for dtype, input_shape_quant_axis, round_type in itertools.product( - dtype_options, input_shape_quant_axis_options, - round_type_options): + dtype_options, input_shape_quant_axis_options, round_type_options + ): input_shape, quant_axis = input_shape_quant_axis - with self.subTest(dtype=dtype, - input_shape=input_shape, - quant_axis=quant_axis, - round_type=round_type): + with self.subTest( + dtype=dtype, + input_shape=input_shape, + quant_axis=quant_axis, + round_type=round_type, + ): self._fake_channel_wise_quantize_abs_max( - dtype, input_shape, quant_axis, np.random.random, - round_type) + dtype, input_shape, quant_axis, np.random.random, round_type + ) class TestFakeQuantizeRangeAbsMaxOp(OpTest): - def setUp(self): self.op_type = 'fake_quantize_range_abs_max' self.attrs = {'bit_length': 5, 'window_size': 1} - def _fake_quantize_range_abs_max(self, - dtype, - input_shape, - distribution, - is_test=False, - round_type='TiesToEven'): + def _fake_quantize_range_abs_max( + self, + dtype, + input_shape, + distribution, + is_test=False, + round_type='TiesToEven', + ): input_data = distribution(input_shape).astype(dtype) compute_type = get_compute_type(dtype) bnt = (1 << (self.attrs['bit_length'] - 1)) - 1 @@ -165,7 +174,8 @@ class TestFakeQuantizeRangeAbsMaxOp(OpTest): out_scale[0] = in_scale[0] = out_scale[0] - 1.0 if round_type == 'TiesToEven': round_out = np.round( - input_data.astype(compute_type) / out_scale[0] * bnt) + input_data.astype(compute_type) / out_scale[0] * bnt + ) self.attrs['round_type'] = 0 output_data = np.clip(round_out, -bnt - 1, bnt) else: @@ -174,17 +184,18 @@ class TestFakeQuantizeRangeAbsMaxOp(OpTest): else: clip_data = input_data output_data = round_c( - clip_data.astype(compute_type) / out_scale[0] * bnt) + clip_data.astype(compute_type) / out_scale[0] * bnt + ) self.attrs['round_type'] = 1 self.inputs = { 'X': input_data, 'Iter': np.zeros(1).astype(np.int64), - 'InScale': in_scale + 'InScale': in_scale, } self.outputs = { 'Out': output_data, 'OutScale': out_scale[0], - 'OutScales': out_scale + 'OutScales': out_scale, } self.dtype = dtype self.attrs['is_test'] = is_test @@ -195,20 +206,22 @@ class TestFakeQuantizeRangeAbsMaxOp(OpTest): is_test_options = [False, True] round_type_options = ['TiesToEven', 'TiesAwayFromZero'] for dtype, is_test, round_type in itertools.product( - dtype_options, is_test_options, round_type_options): + dtype_options, is_test_options, round_type_options + ): self.attrs['bit_length'] = 8 if is_test else 5 - with self.subTest(dtype=dtype, - is_test=is_test, - round_type=round_type): + with self.subTest( + dtype=dtype, is_test=is_test, round_type=round_type + ): self._fake_quantize_range_abs_max( - dtype, (8, 16, 6, 6), + dtype, + (8, 16, 6, 6), lambda shape: (np.random.random(shape) - 0.4) * 10, is_test=is_test, - round_type=round_type) + round_type=round_type, + ) class TestMovingAverageAbsMaxScaleOp(OpTest): - def setUp(self): self.op_type = 'moving_average_abs_max_scale' self.attrs = {'moving_rate': float(0.9), 'is_test': False} @@ -218,41 +231,44 @@ class TestMovingAverageAbsMaxScaleOp(OpTest): in_accum = np.ones(1).astype(dtype) in_state = np.ones(1).astype(dtype) out_accum = self.attrs['moving_rate'] * in_accum[0] + np.max( - np.abs(input_data)) + np.abs(input_data) + ) out_state = self.attrs['moving_rate'] * in_state[0] + 1.0 out_scale = out_accum / out_state self.inputs = { 'X': input_data, 'InAccum': in_accum, - 'InState': in_state + 'InState': in_state, } self.outputs = { 'Out': input_data, 'OutAccum': out_accum, 'OutState': out_state, - 'OutScale': out_scale + 'OutScale': out_scale, } self.dtype = dtype self.check_output() def test_moving_average_abs_max(self): - self._moving_average_abs_max_scale(np.float32, (8, 16, 7, 7), - np.random.random) + self._moving_average_abs_max_scale( + np.float32, (8, 16, 7, 7), np.random.random + ) class TestFakeQuantizeMovingAverageAbsMaxOp(OpTest): - def setUp(self): self.op_type = 'fake_quantize_moving_average_abs_max' self.attrs = {'bit_length': 5, 'moving_rate': 0.9, 'is_test': False} - def _fake_quantize_moving_average_abs_max(self, - dtype, - input_shape, - distribution, - dequantize=False, - with_gradient=False, - round_type='TiesAwayFromZero'): + def _fake_quantize_moving_average_abs_max( + self, + dtype, + input_shape, + distribution, + dequantize=False, + with_gradient=False, + round_type='TiesAwayFromZero', + ): input_data = distribution(input_shape).astype(dtype) compute_type = get_compute_type(dtype) bnt = (1 << (self.attrs['bit_length'] - 1)) - 1 @@ -263,17 +279,20 @@ class TestFakeQuantizeMovingAverageAbsMaxOp(OpTest): out_state = np.zeros(1).astype(dtype) out_scale = np.zeros(1).astype(dtype) out_accum[0] = self.attrs['moving_rate'] * in_accum[0] + np.max( - np.abs(input_data)) + np.abs(input_data) + ) out_state[0] = self.attrs['moving_rate'] * in_state[0] + 1.0 out_scale = out_accum / out_state if round_type == 'TiesToEven': round_out = np.round( - input_data.astype(compute_type) / out_scale * bnt) + input_data.astype(compute_type) / out_scale * bnt + ) quant_data = np.clip(round_out, -bnt - 1, bnt) self.attrs['round_type'] = 0 else: quant_data = round_c( - input_data.astype(compute_type) / out_scale * bnt) + input_data.astype(compute_type) / out_scale * bnt + ) self.attrs['round_type'] = 1 if dequantize: output_data = (quant_data * out_scale / bnt).astype(dtype) @@ -284,13 +303,13 @@ class TestFakeQuantizeMovingAverageAbsMaxOp(OpTest): 'X': input_data, 'InScale': in_scale, 'InAccum': in_accum, - 'InState': in_state + 'InState': in_state, } self.outputs = { 'Out': output_data, 'OutAccum': out_accum, 'OutState': out_state, - 'OutScale': out_scale + 'OutScale': out_scale, } self.dtype = dtype self.check_output() @@ -301,36 +320,38 @@ class TestFakeQuantizeMovingAverageAbsMaxOp(OpTest): self.check_grad(['X'], 'Out', user_defined_grads=gradient) def test_fake_quantize_moving_average_abs_max(self): - self._fake_quantize_moving_average_abs_max(np.float32, (8, 16, 7, 7), - np.random.random) + self._fake_quantize_moving_average_abs_max( + np.float32, (8, 16, 7, 7), np.random.random + ) def test_fake_quantize_moving_average_abs_max_float16(self): - self._fake_quantize_moving_average_abs_max(np.float16, (8, 16, 7, 7), - np.random.random) + self._fake_quantize_moving_average_abs_max( + np.float16, (8, 16, 7, 7), np.random.random + ) def test_fake_quantize_moving_average_abs_max_round1(self): - self._fake_quantize_moving_average_abs_max(np.float32, (8, 16, 7, 7), - np.random.random, - round_type='TiesToEven') + self._fake_quantize_moving_average_abs_max( + np.float32, (8, 16, 7, 7), np.random.random, round_type='TiesToEven' + ) def test_fake_quantize_dequantize_moving_average_abs_max(self): - self._fake_quantize_moving_average_abs_max(np.float32, (8, 16, 7, 7), - np.random.random, - dequantize=True, - with_gradient=True) + self._fake_quantize_moving_average_abs_max( + np.float32, + (8, 16, 7, 7), + np.random.random, + dequantize=True, + with_gradient=True, + ) class TestFakeQuantizeDequantizeAbsMaxOp(OpTest): - def setUp(self): self.op_type = 'fake_quantize_dequantize_abs_max' self.attrs = {'bit_length': 8} - def _fake_quantize_dequantize_abs_max(self, - dtype, - input_shape, - distribution, - round_type='TiesAwayFromZero'): + def _fake_quantize_dequantize_abs_max( + self, dtype, input_shape, distribution, round_type='TiesAwayFromZero' + ): input_data = distribution(input_shape).astype(dtype) scale = np.max(np.abs(input_data)).astype(dtype) bnt = (1 << (self.attrs['bit_length'] - 1)) - 1 @@ -344,7 +365,7 @@ class TestFakeQuantizeDequantizeAbsMaxOp(OpTest): self.inputs = {'X': input_data} self.outputs = { 'Out': output_data, - 'OutScale': np.array(scale).astype(dtype) + 'OutScale': np.array(scale).astype(dtype), } self.dtype = dtype self.check_output() @@ -352,47 +373,53 @@ class TestFakeQuantizeDequantizeAbsMaxOp(OpTest): self.check_grad(['X'], 'Out', user_defined_grads=gradient) def test_fake_quantize_dequantize_abs_max(self): - self._fake_quantize_dequantize_abs_max(np.float32, (124, 240), - np.random.random) + self._fake_quantize_dequantize_abs_max( + np.float32, (124, 240), np.random.random + ) def test_fake_quantize_dequantize_abs_max_round1(self): - self._fake_quantize_dequantize_abs_max(np.float32, (124, 240), - np.random.random, - round_type='TiesToEven') + self._fake_quantize_dequantize_abs_max( + np.float32, (124, 240), np.random.random, round_type='TiesToEven' + ) class TestChannelWiseFakeQuantizeDequantizeAbsMaxOp(OpTest): - def setUp(self): self.op_type = 'fake_channel_wise_quantize_dequantize_abs_max' self.attrs = {'bit_length': 8} - def _fake_channel_wise_quantize_dequantize_abs_max(self, - dtype, - input_shape, - quant_axis, - distribution, - round_type='TiesToEven'): + def _fake_channel_wise_quantize_dequantize_abs_max( + self, + dtype, + input_shape, + quant_axis, + distribution, + round_type='TiesToEven', + ): assert quant_axis in [0, 1], 'quant_axis should be 0 or 1.' input_data = distribution(input_shape).astype(dtype) compute_type = get_compute_type(dtype) bnt = (1 << (self.attrs['bit_length'] - 1)) - 1 output_data = input_data.copy().astype(compute_type) - compute_axis = tuple(i for i in range(len(input_shape)) - if i != quant_axis) + compute_axis = tuple( + i for i in range(len(input_shape)) if i != quant_axis + ) scale_broadcast = np.amax(input_data, axis=compute_axis, keepdims=True) if round_type == 'TiesToEven': round_out = np.round(bnt * output_data / scale_broadcast) - output_data = np.clip(round_out, -bnt - 1, - bnt) * scale_broadcast / bnt + output_data = ( + np.clip(round_out, -bnt - 1, bnt) * scale_broadcast / bnt + ) self.attrs['round_type'] = 0 else: - output_data = round_c( - bnt * output_data / scale_broadcast) * scale_broadcast / bnt + output_data = ( + round_c(bnt * output_data / scale_broadcast) + * scale_broadcast + / bnt + ) self.attrs['round_type'] = 1 if quant_axis == 1: - scale_broadcast = np.transpose(scale_broadcast, - (1, ) + compute_axis) + scale_broadcast = np.transpose(scale_broadcast, (1,) + compute_axis) scale = scale_broadcast.reshape(input_shape[quant_axis], -1)[:, 0] self.inputs = {'X': input_data} self.outputs = {'Out': output_data, 'OutScale': scale} @@ -403,22 +430,29 @@ class TestChannelWiseFakeQuantizeDequantizeAbsMaxOp(OpTest): self.check_grad(['X'], 'Out', user_defined_grads=gradient) def test_channel_wise_fake_quant_dequant_abs_max(self): - input_shape_quant_axis_options = [[(3, 4, 64, 64), 0], - [(15, 20, 5, 5), 1], [(30, 15), 0], - [(30, 15), 1]] + input_shape_quant_axis_options = [ + [(3, 4, 64, 64), 0], + [(15, 20, 5, 5), 1], + [(30, 15), 0], + [(30, 15), 1], + ] round_type_options = ['TiesToEven', 'TiesAwayFromZero'] for input_shape_quant_axis, round_type in itertools.product( - input_shape_quant_axis_options, round_type_options): + input_shape_quant_axis_options, round_type_options + ): input_shape, quant_axis = input_shape_quant_axis - with self.subTest(input_shape=input_shape, - quant_axis=quant_axis, - round_type=round_type): + with self.subTest( + input_shape=input_shape, + quant_axis=quant_axis, + round_type=round_type, + ): self._fake_channel_wise_quantize_dequantize_abs_max( np.float32, input_shape, quant_axis, np.random.random, - round_type=round_type) + round_type=round_type, + ) def quantize_max_abs(x, max_range): @@ -446,7 +480,6 @@ def channel_wise_quantize_max_abs(x, quant_bit=8, quant_axis=0): class TestChannelWiseQuantizeOp(OpTest): - def set_args(self): self.bit_length = 8 self.data_type = "float32" @@ -456,15 +489,16 @@ class TestChannelWiseQuantizeOp(OpTest): self.set_args() self.op_type = "quantize_linear" x = np.random.randn(4, 3, 64, 64).astype(self.data_type) - yq, scale = channel_wise_quantize_max_abs(x, self.bit_length, - self.quant_axis) + yq, scale = channel_wise_quantize_max_abs( + x, self.bit_length, self.quant_axis + ) scale = np.array(scale).astype(self.data_type) zero_point = np.zeros(scale.shape, dtype="int32") self.inputs = {'X': x, 'Scale': scale, 'ZeroPoint': zero_point} self.attrs = { 'bit_length': self.bit_length, - 'quant_axis': self.quant_axis + 'quant_axis': self.quant_axis, } self.outputs = {'Y': yq} @@ -473,7 +507,6 @@ class TestChannelWiseQuantizeOp(OpTest): class TestChannelWiseQuantizeOp1(TestChannelWiseQuantizeOp): - def set_args(self): self.bit_length = 8 self.data_type = "float32" @@ -481,7 +514,6 @@ class TestChannelWiseQuantizeOp1(TestChannelWiseQuantizeOp): class TestChannelWiseQuantizeOpTrain(OpTest): - def set_args(self): self.bit_length = 8 self.data_type = "float32" @@ -492,8 +524,9 @@ class TestChannelWiseQuantizeOpTrain(OpTest): self.set_args() self.op_type = "quantize_linear" x = np.random.randn(4, 3, 64, 64).astype(self.data_type) - yq, scale = channel_wise_quantize_max_abs(x, self.bit_length, - self.quant_axis) + yq, scale = channel_wise_quantize_max_abs( + x, self.bit_length, self.quant_axis + ) scale = np.array(scale).astype(self.data_type) zero_point = np.zeros(scale.shape, dtype="int32") @@ -501,7 +534,7 @@ class TestChannelWiseQuantizeOpTrain(OpTest): self.attrs = { 'bit_length': self.bit_length, 'quant_axis': self.quant_axis, - 'is_test': self.is_test + 'is_test': self.is_test, } self.outputs = {'Y': yq, 'OutScale': scale} @@ -510,7 +543,6 @@ class TestChannelWiseQuantizeOpTrain(OpTest): class TestquantizeOp(OpTest): - def set_args(self): self.bit_length = 8 self.quant_axis = -1 @@ -537,7 +569,6 @@ class TestquantizeOp(OpTest): class TestquantizeOpTrain(TestquantizeOp): - def set_args(self): self.bit_length = 8 self.quant_axis = -1 @@ -552,7 +583,7 @@ class TestquantizeOpTrain(TestquantizeOp): 'bit_length': self.bit_length, 'quant_axis': self.quant_axis, 'moving_rate': 0.9, - 'is_test': self.is_test + 'is_test': self.is_test, } x = np.random.randn(31, 65).astype(self.data_type) @@ -563,7 +594,8 @@ class TestquantizeOpTrain(TestquantizeOp): out_accum = np.zeros(1).astype(self.data_type) out_state = np.zeros(1).astype(self.data_type) out_accum[0] = self.attrs['moving_rate'] * in_accum[0] + np.max( - np.abs(x)) + np.abs(x) + ) out_state[0] = self.attrs['moving_rate'] * in_state[0] + 1.0 out_scale = out_accum / out_state diff --git a/python/paddle/fluid/tests/unittests/test_faster_tokenizer_op.py b/python/paddle/fluid/tests/unittests/test_faster_tokenizer_op.py index 44aa0225059ee01a0f79243f1166ee84f2d155e0..b21f25b75a3643a1d4e20ba16541db4492e8a031 100755 --- a/python/paddle/fluid/tests/unittests/test_faster_tokenizer_op.py +++ b/python/paddle/fluid/tests/unittests/test_faster_tokenizer_op.py @@ -38,8 +38,13 @@ def to_string_tensor(string_values, name): string_values(list[string]): The value will be setted to the tensor. name(string): The name of the tensor. """ - tensor = paddle.Tensor(core.VarDesc.VarType.STRING, [], name, - core.VarDesc.VarType.STRINGS, False) + tensor = paddle.Tensor( + core.VarDesc.VarType.STRING, + [], + name, + core.VarDesc.VarType.STRINGS, + False, + ) tensor.value().set_string_list(string_values) return tensor @@ -54,31 +59,42 @@ def to_map_tensor(string_dict, name): string_dict(dict): The value will be setted to the tensor. name(string): The name of the tensor. """ - tensor = paddle.Tensor(core.VarDesc.VarType.RAW, [], name, - core.VarDesc.VarType.VOCAB, True) + tensor = paddle.Tensor( + core.VarDesc.VarType.RAW, [], name, core.VarDesc.VarType.VOCAB, True + ) tensor.value().set_vocab(string_dict) return tensor class FasterTokenizer(nn.Layer): - def __init__(self, vocab_dict): super(FasterTokenizer, self).__init__() vocab_tensor = to_map_tensor(vocab_dict, "vocab") self.register_buffer("vocab", vocab_tensor, persistable=True) - def forward(self, - text, - text_pair=None, - do_lower_case=True, - max_seq_len=-1, - is_split_into_words=False, - pad_to_max_seq_len=False): + def forward( + self, + text, + text_pair=None, + do_lower_case=True, + max_seq_len=-1, + is_split_into_words=False, + pad_to_max_seq_len=False, + ): if _non_static_mode(): input_ids, seg_ids = _legacy_C_ops.faster_tokenizer( - self.vocab, text, text_pair, "do_lower_case", do_lower_case, - "max_seq_len", max_seq_len, "pad_to_max_seq_len", - pad_to_max_seq_len, "is_split_into_words", is_split_into_words) + self.vocab, + text, + text_pair, + "do_lower_case", + do_lower_case, + "max_seq_len", + max_seq_len, + "pad_to_max_seq_len", + pad_to_max_seq_len, + "is_split_into_words", + is_split_into_words, + ) return input_ids, seg_ids attrs = { @@ -91,33 +107,27 @@ class FasterTokenizer(nn.Layer): input_ids = helper.create_variable_for_type_inference(dtype="int64") seg_ids = helper.create_variable_for_type_inference(dtype="int64") if text_pair is None: - helper.append_op(type='faster_tokenizer', - inputs={ - 'Vocab': self.vocab, - 'Text': text - }, - outputs={ - 'InputIds': input_ids, - 'SegmentIds': seg_ids - }, - attrs=attrs) + helper.append_op( + type='faster_tokenizer', + inputs={'Vocab': self.vocab, 'Text': text}, + outputs={'InputIds': input_ids, 'SegmentIds': seg_ids}, + attrs=attrs, + ) else: - helper.append_op(type='faster_tokenizer', - inputs={ - 'Vocab': self.vocab, - 'Text': text, - 'TextPair': text_pair - }, - outputs={ - 'InputIds': input_ids, - 'SegmentIds': seg_ids - }, - attrs=attrs) + helper.append_op( + type='faster_tokenizer', + inputs={ + 'Vocab': self.vocab, + 'Text': text, + 'TextPair': text_pair, + }, + outputs={'InputIds': input_ids, 'SegmentIds': seg_ids}, + attrs=attrs, + ) return input_ids, seg_ids class Predictor(object): - def __init__(self, model_dir): model_file = os.path.join(model_dir, "inference.pdmodel") params_file = os.path.join(model_dir, "inference.pdiparams") @@ -152,7 +162,6 @@ class Predictor(object): class TestBertTokenizerOp(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() self.bert_tokenizer = BertTokenizer.from_pretrained("bert-base-chinese") @@ -177,11 +186,12 @@ class TestBertTokenizerOp(unittest.TestCase): '很好的地理位置,一蹋糊涂的服务,萧条的酒店。', ' 选择珠江花园的原因就是方便,有电动扶梯直接到达海边,周围餐馆、食廊、商场、超市、摊位一应俱全。酒店装修一般,' '但还算整洁。 泳池在大堂的屋顶,因此很小,不过女儿倒是喜欢。 包的早餐是西式的,还算丰富。 服务吗,一般', - 'Test bert tokenizer. The first text.' + 'Test bert tokenizer. The first text.', ] self.text_pairs = [ - '非常不错,服务很好,位于市中心区,交通方便,不过价格也高!', '房间太小。其他的都一般。。。。。。。。。', - 'Test bert tokenizer. The second text.' + '非常不错,服务很好,位于市中心区,交通方便,不过价格也高!', + '房间太小。其他的都一般。。。。。。。。。', + 'Test bert tokenizer. The second text.', ] self.texts_tensor = to_string_tensor(self.texts, "texts") self.text_pairs_tensor = to_string_tensor(self.text_pairs, "text_pairs") @@ -198,7 +208,8 @@ class TestBertTokenizerOp(unittest.TestCase): do_lower_case=self.bert_tokenizer.do_lower_case, max_seq_len=self.max_seq_len, pad_to_max_seq_len=self.pad_to_max_seq_len, - is_split_into_words=self.is_split_into_words) + is_split_into_words=self.is_split_into_words, + ) input_ids = input_ids.numpy() token_type_ids = token_type_ids.numpy() @@ -206,15 +217,16 @@ class TestBertTokenizerOp(unittest.TestCase): text=self.text, max_seq_len=self.max_seq_len, pad_to_max_seq_len=self.pad_to_max_seq_len, - is_split_into_words=self.is_split_into_words) + is_split_into_words=self.is_split_into_words, + ) py_input_ids = np.array(encoded_inputs[0]["input_ids"]).reshape([1, -1]) py_token_type_ids = np.array( - encoded_inputs[0]["token_type_ids"]).reshape([1, -1]) + encoded_inputs[0]["token_type_ids"] + ).reshape([1, -1]) np.testing.assert_allclose(input_ids, py_input_ids, rtol=0, atol=0.01) - np.testing.assert_allclose(token_type_ids, - py_token_type_ids, - rtol=0, - atol=0.01) + np.testing.assert_allclose( + token_type_ids, py_token_type_ids, rtol=0, atol=0.01 + ) # case 2: only one text and one text_pair (batch_size = 1) input_ids, token_type_ids = self.faster_tokenizer( @@ -223,7 +235,8 @@ class TestBertTokenizerOp(unittest.TestCase): do_lower_case=self.bert_tokenizer.do_lower_case, max_seq_len=self.max_seq_len, pad_to_max_seq_len=self.pad_to_max_seq_len, - is_split_into_words=self.is_split_into_words) + is_split_into_words=self.is_split_into_words, + ) input_ids = input_ids.numpy() token_type_ids = token_type_ids.numpy() @@ -232,15 +245,16 @@ class TestBertTokenizerOp(unittest.TestCase): text_pair=self.text_pair, max_seq_len=self.max_seq_len, pad_to_max_seq_len=self.pad_to_max_seq_len, - is_split_into_words=self.is_split_into_words) + is_split_into_words=self.is_split_into_words, + ) py_input_ids = np.array(encoded_inputs[0]["input_ids"]).reshape([1, -1]) py_token_type_ids = np.array( - encoded_inputs[0]["token_type_ids"]).reshape([1, -1]) + encoded_inputs[0]["token_type_ids"] + ).reshape([1, -1]) np.testing.assert_allclose(input_ids, py_input_ids, rtol=0, atol=0.01) - np.testing.assert_allclose(token_type_ids, - py_token_type_ids, - rtol=0, - atol=0.01) + np.testing.assert_allclose( + token_type_ids, py_token_type_ids, rtol=0, atol=0.01 + ) # case 3: only texts (batch_size = 3) input_ids, token_type_ids = self.faster_tokenizer( @@ -248,7 +262,8 @@ class TestBertTokenizerOp(unittest.TestCase): do_lower_case=self.bert_tokenizer.do_lower_case, max_seq_len=self.max_seq_len, pad_to_max_seq_len=self.pad_to_max_seq_len, - is_split_into_words=self.is_split_into_words) + is_split_into_words=self.is_split_into_words, + ) input_ids = input_ids.numpy() token_type_ids = token_type_ids.numpy() @@ -256,16 +271,16 @@ class TestBertTokenizerOp(unittest.TestCase): self.texts, max_seq_len=self.max_seq_len, pad_to_max_seq_len=self.pad_to_max_seq_len, - is_split_into_words=self.is_split_into_words) + is_split_into_words=self.is_split_into_words, + ) py_input_ids = [i["input_ids"] for i in encoded_inputs] py_token_type_ids = [i["token_type_ids"] for i in encoded_inputs] py_input_ids = np.array(py_input_ids).reshape([3, -1]) py_token_type_ids = np.array(py_token_type_ids).reshape([3, -1]) np.testing.assert_allclose(input_ids, py_input_ids, rtol=0, atol=0.01) - np.testing.assert_allclose(token_type_ids, - py_token_type_ids, - rtol=0, - atol=0.01) + np.testing.assert_allclose( + token_type_ids, py_token_type_ids, rtol=0, atol=0.01 + ) # case 4: texts and text pairs (batch_size = 3) input_ids, token_type_ids = self.faster_tokenizer( @@ -274,7 +289,8 @@ class TestBertTokenizerOp(unittest.TestCase): do_lower_case=self.bert_tokenizer.do_lower_case, max_seq_len=self.max_seq_len, pad_to_max_seq_len=self.pad_to_max_seq_len, - is_split_into_words=self.is_split_into_words) + is_split_into_words=self.is_split_into_words, + ) input_ids = input_ids.numpy() token_type_ids = token_type_ids.numpy() @@ -283,16 +299,16 @@ class TestBertTokenizerOp(unittest.TestCase): self.text_pairs, max_seq_len=self.max_seq_len, pad_to_max_seq_len=self.pad_to_max_seq_len, - is_split_into_words=self.is_split_into_words) + is_split_into_words=self.is_split_into_words, + ) py_input_ids = [i["input_ids"] for i in encoded_inputs] py_token_type_ids = [i["token_type_ids"] for i in encoded_inputs] py_input_ids = np.array(py_input_ids).reshape([3, -1]) py_token_type_ids = np.array(py_token_type_ids).reshape([3, -1]) np.testing.assert_allclose(input_ids, py_input_ids, rtol=0, atol=0.01) - np.testing.assert_allclose(token_type_ids, - py_token_type_ids, - rtol=0, - atol=0.01) + np.testing.assert_allclose( + token_type_ids, py_token_type_ids, rtol=0, atol=0.01 + ) def test_padding(self): with _test_eager_guard(): @@ -311,7 +327,8 @@ class TestBertTokenizerOp(unittest.TestCase): do_lower_case=self.bert_tokenizer.do_lower_case, max_seq_len=self.max_seq_len, pad_to_max_seq_len=self.pad_to_max_seq_len, - is_split_into_words=self.is_split_into_words) + is_split_into_words=self.is_split_into_words, + ) input_ids = input_ids.numpy() token_type_ids = token_type_ids.numpy() @@ -319,15 +336,16 @@ class TestBertTokenizerOp(unittest.TestCase): self.text, max_seq_len=self.max_seq_len, pad_to_max_seq_len=self.pad_to_max_seq_len, - is_split_into_words=self.is_split_into_words) + is_split_into_words=self.is_split_into_words, + ) py_input_ids = np.array(encoded_inputs[0]["input_ids"]).reshape([1, -1]) py_token_type_ids = np.array( - encoded_inputs[0]["token_type_ids"]).reshape([1, -1]) + encoded_inputs[0]["token_type_ids"] + ).reshape([1, -1]) np.testing.assert_allclose(input_ids, py_input_ids, rtol=0, atol=0.01) - np.testing.assert_allclose(token_type_ids, - py_token_type_ids, - rtol=0, - atol=0.01) + np.testing.assert_allclose( + token_type_ids, py_token_type_ids, rtol=0, atol=0.01 + ) # case 2: only one text and one text_pair (batch_size = 1) input_ids, token_type_ids = self.faster_tokenizer( @@ -336,7 +354,8 @@ class TestBertTokenizerOp(unittest.TestCase): do_lower_case=self.bert_tokenizer.do_lower_case, max_seq_len=self.max_seq_len, pad_to_max_seq_len=self.pad_to_max_seq_len, - is_split_into_words=self.is_split_into_words) + is_split_into_words=self.is_split_into_words, + ) input_ids = input_ids.numpy() token_type_ids = token_type_ids.numpy() @@ -345,15 +364,16 @@ class TestBertTokenizerOp(unittest.TestCase): self.text_pair, max_seq_len=self.max_seq_len, pad_to_max_seq_len=self.pad_to_max_seq_len, - is_split_into_words=self.is_split_into_words) + is_split_into_words=self.is_split_into_words, + ) py_input_ids = np.array(encoded_inputs[0]["input_ids"]).reshape([1, -1]) py_token_type_ids = np.array( - encoded_inputs[0]["token_type_ids"]).reshape([1, -1]) + encoded_inputs[0]["token_type_ids"] + ).reshape([1, -1]) np.testing.assert_allclose(input_ids, py_input_ids, rtol=0, atol=0.01) - np.testing.assert_allclose(token_type_ids, - py_token_type_ids, - rtol=0, - atol=0.01) + np.testing.assert_allclose( + token_type_ids, py_token_type_ids, rtol=0, atol=0.01 + ) def test_no_padding(self): with _test_eager_guard(): @@ -367,19 +387,21 @@ class TestBertTokenizerOp(unittest.TestCase): input_ids, token_type_ids = self.faster_tokenizer( self.text_tensor, do_lower_case=self.bert_tokenizer.do_lower_case, - is_split_into_words=self.is_split_into_words) + is_split_into_words=self.is_split_into_words, + ) input_ids = input_ids.numpy() token_type_ids = token_type_ids.numpy() encoded_inputs = self.bert_tokenizer( - list(self.text[0]), is_split_into_words=self.is_split_into_words) + list(self.text[0]), is_split_into_words=self.is_split_into_words + ) py_input_ids = np.array(encoded_inputs["input_ids"]).reshape([1, -1]) py_token_type_ids = np.array(encoded_inputs["token_type_ids"]).reshape( - [1, -1]) + [1, -1] + ) np.testing.assert_allclose(input_ids, py_input_ids, rtol=0, atol=0.01) - np.testing.assert_allclose(token_type_ids, - py_token_type_ids, - rtol=0, - atol=0.01) + np.testing.assert_allclose( + token_type_ids, py_token_type_ids, rtol=0, atol=0.01 + ) def test_is_split_into_words(self): with _test_eager_guard(): @@ -398,8 +420,10 @@ class TestBertTokenizerOp(unittest.TestCase): self.faster_tokenizer, input_spec=[ paddle.static.InputSpec( - shape=[None], dtype=core.VarDesc.VarType.STRINGS), # texts - ]) + shape=[None], dtype=core.VarDesc.VarType.STRINGS + ), # texts + ], + ) # Save in static graph model. paddle.jit.save(static_model, self.inference_path) predictor = Predictor(self.save_path) @@ -408,19 +432,19 @@ class TestBertTokenizerOp(unittest.TestCase): encoded_inputs = self.bert_tokenizer(self.text) py_input_ids = np.array(encoded_inputs[0]["input_ids"]).reshape([1, -1]) py_token_type_ids = np.array( - encoded_inputs[0]["token_type_ids"]).reshape([1, -1]) + encoded_inputs[0]["token_type_ids"] + ).reshape([1, -1]) np.testing.assert_allclose(input_ids, py_input_ids, rtol=0, atol=0.01) - np.testing.assert_allclose(token_type_ids, - py_token_type_ids, - rtol=0, - atol=0.01) + np.testing.assert_allclose( + token_type_ids, py_token_type_ids, rtol=0, atol=0.01 + ) def test_feed_string_var(self): self.init_data() paddle.enable_static() - x = paddle.static.data(name="x", - shape=[-1], - dtype=core.VarDesc.VarType.STRINGS) + x = paddle.static.data( + name="x", shape=[-1], dtype=core.VarDesc.VarType.STRINGS + ) exe = paddle.static.Executor(paddle.framework.CPUPlace()) exe.run(paddle.static.default_main_program(), feed={'x': self.text}) paddle.disable_static() diff --git a/python/paddle/fluid/tests/unittests/test_fc_op.py b/python/paddle/fluid/tests/unittests/test_fc_op.py index ac69b8e8c6ff3b24c4eb1a8f2f185bac007ad385..8003c0f99e84ba072dfc94a54b942a23f4975cc4 100644 --- a/python/paddle/fluid/tests/unittests/test_fc_op.py +++ b/python/paddle/fluid/tests/unittests/test_fc_op.py @@ -43,7 +43,6 @@ def fc_refer(matrix, with_bias, with_relu=False): class MatrixGenerate: - def __init__(self, mb, ic, oc, h, w, bias_dims=2): self.input = np.random.random((mb, ic, h, w)).astype("float32") self.weights = np.random.random((ic * h * w, oc)).astype("float32") @@ -54,7 +53,6 @@ class MatrixGenerate: class TestFCOp(OpTest): - def config(self): self.with_bias = True self.with_relu = True @@ -68,7 +66,7 @@ class TestFCOp(OpTest): self.inputs = { 'Input': self.matrix.input, 'W': self.matrix.weights, - 'Bias': self.matrix.bias + 'Bias': self.matrix.bias, } else: self.inputs = {'Input': self.matrix.input, 'W': self.matrix.weights} @@ -88,7 +86,6 @@ class TestFCOp(OpTest): class TestFCOpNoBias1(TestFCOp): - def config(self): self.with_bias = False self.with_relu = False @@ -96,7 +93,6 @@ class TestFCOpNoBias1(TestFCOp): class TestFCOpNoBias2(TestFCOp): - def config(self): self.with_bias = False self.with_relu = False @@ -104,7 +100,6 @@ class TestFCOpNoBias2(TestFCOp): class TestFCOpNoBias4(TestFCOp): - def config(self): self.with_bias = False self.with_relu = False @@ -112,7 +107,6 @@ class TestFCOpNoBias4(TestFCOp): class TestFCOpWithBias1(TestFCOp): - def config(self): self.with_bias = True self.with_relu = False @@ -120,7 +114,6 @@ class TestFCOpWithBias1(TestFCOp): class TestFCOpWithBias2(TestFCOp): - def config(self): self.with_bias = True self.with_relu = True @@ -128,7 +121,6 @@ class TestFCOpWithBias2(TestFCOp): class TestFCOpWithBias3(TestFCOp): - def config(self): self.with_bias = True self.with_relu = True @@ -136,7 +128,6 @@ class TestFCOpWithBias3(TestFCOp): class TestFCOpWithPadding(TestFCOp): - def config(self): self.with_bias = True self.with_relu = True @@ -144,9 +135,7 @@ class TestFCOpWithPadding(TestFCOp): class TestFcOp_NumFlattenDims_NegOne(unittest.TestCase): - def test_api(self): - def run_program(num_flatten_dims): paddle.seed(SEED) np.random.seed(SEED) @@ -155,17 +144,22 @@ class TestFcOp_NumFlattenDims_NegOne(unittest.TestCase): with program_guard(main_program, startup_program): input = np.random.random([2, 2, 25]).astype("float32") - x = fluid.layers.data(name="x", - shape=[2, 2, 25], - append_batch_size=False, - dtype="float32") - - out = paddle.static.nn.fc(x=x, - size=1, - num_flatten_dims=num_flatten_dims) - - place = fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0) + x = fluid.layers.data( + name="x", + shape=[2, 2, 25], + append_batch_size=False, + dtype="float32", + ) + + out = paddle.static.nn.fc( + x=x, size=1, num_flatten_dims=num_flatten_dims + ) + + place = ( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) exe = fluid.Executor(place=place) exe.run(startup_program) out = exe.run(main_program, feed={"x": input}, fetch_list=[out]) @@ -177,7 +171,6 @@ class TestFcOp_NumFlattenDims_NegOne(unittest.TestCase): class TestFCOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): input_data = np.random.random((2, 4)).astype("float32") diff --git a/python/paddle/fluid/tests/unittests/test_feed_data_check_shape_type.py b/python/paddle/fluid/tests/unittests/test_feed_data_check_shape_type.py index be58e03f661af9146d09de00eb804698ed32348d..3b885f26683cd45c80c7a25a7f0600d16784933e 100644 --- a/python/paddle/fluid/tests/unittests/test_feed_data_check_shape_type.py +++ b/python/paddle/fluid/tests/unittests/test_feed_data_check_shape_type.py @@ -27,8 +27,8 @@ np.random.seed(123) class TestFeedData(unittest.TestCase): ''' - Test paddle.fluid.data feeds with different shape and types. - Note: paddle.fluid.data is not paddle.fluid.layers.data. + Test paddle.fluid.data feeds with different shape and types. + Note: paddle.fluid.data is not paddle.fluid.layers.data. ''' def setUp(self): @@ -38,16 +38,22 @@ class TestFeedData(unittest.TestCase): self.iterations = 5 def _get_device_count(self, use_cuda): - return core.get_cuda_device_count() if use_cuda else int( - os.environ.get('CPU_NUM', multiprocessing.cpu_count())) + return ( + core.get_cuda_device_count() + if use_cuda + else int(os.environ.get('CPU_NUM', multiprocessing.cpu_count())) + ) def _get_feed_batch_size(self, use_cuda, use_parallel_executor): """ Returns actual fed data size. We should multiple the number of devices when it is using ParallelExecutor """ - return self.data_batch_size * self._get_device_count( - use_cuda) if use_parallel_executor else self.data_batch_size + return ( + self.data_batch_size * self._get_device_count(use_cuda) + if use_parallel_executor + else self.data_batch_size + ) def _simple_fc_net(self, in_size, label_size, class_num, hidden_sizes): in_data = fluid.data(name="data", dtype='float32', shape=in_size) @@ -59,26 +65,32 @@ class TestFeedData(unittest.TestCase): predict_label = fluid.layers.fc(hidden, size=class_num, act='softmax') loss = paddle.mean( - fluid.layers.cross_entropy(input=predict_label, label=label)) + fluid.layers.cross_entropy(input=predict_label, label=label) + ) optimizer = fluid.optimizer.Adam() optimizer.minimize(loss) return in_data, label, loss def test(self): - for use_cuda in [True, False - ] if core.is_compiled_with_cuda() else [False]: + for use_cuda in ( + [True, False] if core.is_compiled_with_cuda() else [False] + ): for use_parallel_executor in [False, True]: print('Test Parameters:'), - print({ - 'use_cuda': use_cuda, - 'use_parallel_executor': use_parallel_executor, - }) + print( + { + 'use_cuda': use_cuda, + 'use_parallel_executor': use_parallel_executor, + } + ) # Test feeding without error - self._test_feed_data_match_shape_type(use_cuda, - use_parallel_executor) - self._test_feed_data_contains_neg_one(use_cuda, - use_parallel_executor) + self._test_feed_data_match_shape_type( + use_cuda, use_parallel_executor + ) + self._test_feed_data_contains_neg_one( + use_cuda, use_parallel_executor + ) self._test_feed_lod_tensor(use_cuda, use_parallel_executor) # Test exception message when feeding with error @@ -86,74 +98,110 @@ class TestFeedData(unittest.TestCase): error_shape_list = [self.data_batch_size, 3, 4, 5] with self.assertRaises(ValueError) as shape_mismatch_err: - self._test_feed_data_shape_mismatch(use_cuda, - use_parallel_executor) + self._test_feed_data_shape_mismatch( + use_cuda, use_parallel_executor + ) self.assertEqual( str(shape_mismatch_err.exception), "The fed Variable %r should have dimensions = %r, " - "shape = %r, but received fed shape %r on each device" % - (u'data', len(in_shape_tuple), in_shape_tuple, - error_shape_list)) + "shape = %r, but received fed shape %r on each device" + % ( + u'data', + len(in_shape_tuple), + in_shape_tuple, + error_shape_list, + ), + ) with self.assertRaises(ValueError) as dtype_mismatch_err: - self._test_feed_data_dtype_mismatch(use_cuda, - use_parallel_executor) + self._test_feed_data_dtype_mismatch( + use_cuda, use_parallel_executor + ) self.assertEqual( str(dtype_mismatch_err.exception), "The data type of fed Variable %r must be 'int64', but " - "received 'float64'" % (u'label')) + "received 'float64'" % (u'label'), + ) def _test_feed_data_dtype_mismatch(self, use_cuda, use_parallel_executor): - feed_batch_size = self._get_feed_batch_size(use_cuda, - use_parallel_executor) + feed_batch_size = self._get_feed_batch_size( + use_cuda, use_parallel_executor + ) in_size = [self.data_batch_size, 3, 4, 5] feed_in_data = np.random.uniform( - size=[feed_batch_size, 3, 4, 5]).astype(np.float32) + size=[feed_batch_size, 3, 4, 5] + ).astype(np.float32) label_size = [self.data_batch_size, 1] - feed_label = np.random.randint(low=0, - high=self.class_num, - size=[feed_batch_size, - 1]).astype(np.float64) - self._feed_data_in_executor(in_size, label_size, feed_in_data, - feed_label, use_cuda, use_parallel_executor) + feed_label = np.random.randint( + low=0, high=self.class_num, size=[feed_batch_size, 1] + ).astype(np.float64) + self._feed_data_in_executor( + in_size, + label_size, + feed_in_data, + feed_label, + use_cuda, + use_parallel_executor, + ) def _test_feed_data_shape_mismatch(self, use_cuda, use_parallel_executor): batch_size = self._get_feed_batch_size(use_cuda, use_parallel_executor) in_size = [None, 3, 4, 8] feed_in_data = np.random.uniform(size=[batch_size, 3, 4, 5]).astype( - np.float32) + np.float32 + ) label_size = [-1, 1] - feed_label = np.random.randint(low=0, - high=self.class_num, - size=[batch_size, 1]).astype(np.int64) - self._feed_data_in_executor(in_size, label_size, feed_in_data, - feed_label, use_cuda, use_parallel_executor) + feed_label = np.random.randint( + low=0, high=self.class_num, size=[batch_size, 1] + ).astype(np.int64) + self._feed_data_in_executor( + in_size, + label_size, + feed_in_data, + feed_label, + use_cuda, + use_parallel_executor, + ) def _test_feed_data_contains_neg_one(self, use_cuda, use_parallel_executor): batch_size = self._get_feed_batch_size(use_cuda, use_parallel_executor) in_size = [-1, 3, 4, 5] feed_in_data = np.random.uniform(size=[batch_size, 3, 4, 5]).astype( - np.float32) + np.float32 + ) label_size = (None, 1) - feed_label = np.random.randint(low=0, - high=self.class_num, - size=[batch_size, 1]).astype(np.int64) - self._feed_data_in_executor(in_size, label_size, feed_in_data, - feed_label, use_cuda, use_parallel_executor) + feed_label = np.random.randint( + low=0, high=self.class_num, size=[batch_size, 1] + ).astype(np.int64) + self._feed_data_in_executor( + in_size, + label_size, + feed_in_data, + feed_label, + use_cuda, + use_parallel_executor, + ) def _test_feed_data_match_shape_type(self, use_cuda, use_parallel_executor): - feed_batch_size = self._get_feed_batch_size(use_cuda, - use_parallel_executor) + feed_batch_size = self._get_feed_batch_size( + use_cuda, use_parallel_executor + ) in_size = [self.data_batch_size, 3, 4, 5] feed_in_data = np.random.uniform( - size=[feed_batch_size, 3, 4, 5]).astype(np.float32) + size=[feed_batch_size, 3, 4, 5] + ).astype(np.float32) label_size = [self.data_batch_size, 1] - feed_label = np.random.randint(low=0, - high=self.class_num, - size=[feed_batch_size, - 1]).astype(np.int64) - self._feed_data_in_executor(in_size, label_size, feed_in_data, - feed_label, use_cuda, use_parallel_executor) + feed_label = np.random.randint( + low=0, high=self.class_num, size=[feed_batch_size, 1] + ).astype(np.int64) + self._feed_data_in_executor( + in_size, + label_size, + feed_in_data, + feed_label, + use_cuda, + use_parallel_executor, + ) def _test_feed_lod_tensor(self, use_cuda, use_parallel_executor): device_count = self._get_device_count(use_cuda) @@ -164,33 +212,46 @@ class TestFeedData(unittest.TestCase): sum_length = int((device_count + 1) * device_count / 2) feed_in_data = np.random.uniform(size=[sum_length, 3, 4, 5]).astype( - np.float32) + np.float32 + ) feed_data_tensor = fluid.LoDTensor() feed_data_tensor.set(feed_in_data, fluid.CPUPlace()) feed_data_tensor.set_recursive_sequence_lengths(sequence_lengths) label_size = [device_count, 1] feed_label_tensor = fluid.LoDTensor() - feed_label = np.random.randint(low=0, - high=self.class_num, - size=[sum_length, 1]).astype(np.int64) + feed_label = np.random.randint( + low=0, high=self.class_num, size=[sum_length, 1] + ).astype(np.int64) feed_label_tensor.set(feed_label, fluid.CPUPlace()) feed_label_tensor.set_recursive_sequence_lengths(sequence_lengths) - self._feed_data_in_executor(in_size, label_size, feed_data_tensor, - feed_label_tensor, use_cuda, - use_parallel_executor) - - def _feed_data_in_executor(self, in_size, label_size, feed_in_data, - feed_label, use_cuda, use_parallel_executor): + self._feed_data_in_executor( + in_size, + label_size, + feed_data_tensor, + feed_label_tensor, + use_cuda, + use_parallel_executor, + ) + + def _feed_data_in_executor( + self, + in_size, + label_size, + feed_in_data, + feed_label, + use_cuda, + use_parallel_executor, + ): startup_program = fluid.Program() main_program = fluid.Program() with fluid.program_guard(main_program, startup_program): - in_data, label, loss = self._simple_fc_net(in_size, label_size, - self.class_num, - self.hidden_sizes) + in_data, label, loss = self._simple_fc_net( + in_size, label_size, self.class_num, self.hidden_sizes + ) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() @@ -200,15 +261,15 @@ class TestFeedData(unittest.TestCase): train_program = main_program if use_parallel_executor: train_program = compiler.CompiledProgram( - main_program).with_data_parallel(loss_name=loss.name) + main_program + ).with_data_parallel(loss_name=loss.name) for i in range(self.iterations): - fetches = exe.run(train_program, - feed={ - in_data.name: feed_in_data, - label.name: feed_label - }, - fetch_list=[loss.name]) + fetches = exe.run( + train_program, + feed={in_data.name: feed_in_data, label.name: feed_label}, + fetch_list=[loss.name], + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_feed_fetch_method.py b/python/paddle/fluid/tests/unittests/test_feed_fetch_method.py index 1a6d780fb61fe476f949396f14b3cc109f943969..4054af8fb5192532ed256815ed6310b4cd132e38 100644 --- a/python/paddle/fluid/tests/unittests/test_feed_fetch_method.py +++ b/python/paddle/fluid/tests/unittests/test_feed_fetch_method.py @@ -18,7 +18,6 @@ import numpy as np class TestFeedFetch(unittest.TestCase): - def test_feed_fetch(self): scope = core.Scope() place = core.CPUPlace() diff --git a/python/paddle/fluid/tests/unittests/test_fetch_handler.py b/python/paddle/fluid/tests/unittests/test_fetch_handler.py index 562c79cc6ebed2c6dd3bcb0e130f966b5c721c8b..6a8b1f689fd7975d84536c903858df88f62c12d0 100644 --- a/python/paddle/fluid/tests/unittests/test_fetch_handler.py +++ b/python/paddle/fluid/tests/unittests/test_fetch_handler.py @@ -22,7 +22,6 @@ import paddle.fluid as fluid class TestFetchHandler(unittest.TestCase): - @unittest.skip(reason="Skip unstable ci") def test_fetch_handler(self): place = core.CPUPlace() @@ -36,7 +35,6 @@ class TestFetchHandler(unittest.TestCase): var_emb3 = block.create_var(name='emb3', type=core.VarDesc.VarType.FP32) class FH(fluid.executor.FetchHandler): - def handler(self, fetch_dict): assert len(fetch_dict) == 1 @@ -49,14 +47,13 @@ class TestFetchHandler(unittest.TestCase): time.sleep(3) fm.stop() - default_fh = fluid.executor.FetchHandler(var_dict={ - 'emb': var_emb, - 'emb2': None, - 'emb3': var_emb3 - }, - period_secs=1) + default_fh = fluid.executor.FetchHandler( + var_dict={'emb': var_emb, 'emb2': None, 'emb3': var_emb3}, + period_secs=1, + ) default_fm = fluid.trainer_factory.FetchHandlerMonitor( - scope, default_fh) + scope, default_fh + ) default_fm.start() time.sleep(5) default_fm.stop() diff --git a/python/paddle/fluid/tests/unittests/test_fetch_lod_tensor_array.py b/python/paddle/fluid/tests/unittests/test_fetch_lod_tensor_array.py index 2b59c88948ddec40921f10d94fa9b2a9003b8302..c096a4c8a6e02789f441ee576765b5af6e449655 100644 --- a/python/paddle/fluid/tests/unittests/test_fetch_lod_tensor_array.py +++ b/python/paddle/fluid/tests/unittests/test_fetch_lod_tensor_array.py @@ -21,7 +21,6 @@ from simple_nets import simple_fc_net_with_inputs, simple_fc_net class TestFetchLoDTensorArray(unittest.TestCase): - def build_program(self, main_program, startup_program): with fluid.unique_name.guard(): with fluid.program_guard(main_program, startup_program): @@ -59,27 +58,34 @@ class TestFetchLoDTensorArray(unittest.TestCase): build_strategy = fluid.BuildStrategy() binary = fluid.CompiledProgram(main_program).with_data_parallel( - loss_name=loss.name, build_strategy=build_strategy) + loss_name=loss.name, build_strategy=build_strategy + ) device_num = fluid.core.get_cuda_device_count() if use_cuda else 2 for _ in range(3): - loss_v, array_v = exe.run(binary, - feed=feed_dict, - fetch_list=[loss, array], - return_merged=False) + loss_v, array_v = exe.run( + binary, + feed=feed_dict, + fetch_list=[loss, array], + return_merged=False, + ) self.assertEqual(np.array(loss_v).shape, (device_num, 1)) self.assertEqual( - np.array(array_v[0][0]).shape, (batch_size / device_num, 784)) + np.array(array_v[0][0]).shape, (batch_size / device_num, 784) + ) self.assertEqual( - np.array(array_v[0][1]).shape, (batch_size / device_num, 1)) - self.assertEqual(np.array(array_v[0][2]).shape, (1, )) + np.array(array_v[0][1]).shape, (batch_size / device_num, 1) + ) + self.assertEqual(np.array(array_v[0][2]).shape, (1,)) for _ in range(3): - loss_v, array_v = exe.run(binary, - feed=feed_dict, - fetch_list=[loss, array], - return_merged=True) - self.assertEqual(np.array(loss_v).shape, (device_num, )) + loss_v, array_v = exe.run( + binary, + feed=feed_dict, + fetch_list=[loss, array], + return_merged=True, + ) + self.assertEqual(np.array(loss_v).shape, (device_num,)) self.assertEqual(np.array(array_v[0]).shape, (batch_size, 784)) self.assertEqual(np.array(array_v[1]).shape, (batch_size, 1)) np.testing.assert_allclose(loss_v, array_v[2], rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_fetch_unmerged.py b/python/paddle/fluid/tests/unittests/test_fetch_unmerged.py index e9ce5294c37e7088f3af80cb9fe44f4aaf0f0fd5..0da628db92c1123170e606f29abee330d6a8e66e 100644 --- a/python/paddle/fluid/tests/unittests/test_fetch_unmerged.py +++ b/python/paddle/fluid/tests/unittests/test_fetch_unmerged.py @@ -22,23 +22,26 @@ os.environ["CPU_NUM"] = "2" class TestFetchUnmerged(unittest.TestCase): - def conv_net(self, img, label): - conv_pool_1 = fluid.nets.simple_img_conv_pool(input=img, - filter_size=5, - num_filters=8, - pool_size=2, - pool_stride=2, - pool_type='max', - act="relu") + conv_pool_1 = fluid.nets.simple_img_conv_pool( + input=img, + filter_size=5, + num_filters=8, + pool_size=2, + pool_stride=2, + pool_type='max', + act="relu", + ) conv_pool_1 = fluid.layers.batch_norm(conv_pool_1) - conv_pool_2 = fluid.nets.simple_img_conv_pool(input=conv_pool_1, - filter_size=5, - num_filters=16, - pool_size=2, - pool_stride=2, - pool_type='avg', - act="relu") + conv_pool_2 = fluid.nets.simple_img_conv_pool( + input=conv_pool_1, + filter_size=5, + num_filters=16, + pool_size=2, + pool_stride=2, + pool_type='avg', + act="relu", + ) hidden = fluid.layers.fc(input=conv_pool_2, size=32, act='relu') prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') loss = fluid.layers.cross_entropy(input=prediction, label=label) @@ -48,12 +51,12 @@ class TestFetchUnmerged(unittest.TestCase): def build_program(self, main, startup, is_test): with fluid.unique_name.guard(): with fluid.program_guard(main, startup): - img = fluid.layers.data(name='image', - shape=[1, 28, 28], - dtype='float32') - label = fluid.layers.data(name='label', - shape=[1], - dtype='int64') + img = fluid.layers.data( + name='image', shape=[1, 28, 28], dtype='float32' + ) + label = fluid.layers.data( + name='label', shape=[1], dtype='int64' + ) loss, prediction = self.conv_net(img, label) if not is_test: opt = fluid.optimizer.Adam(learning_rate=0.001) @@ -63,8 +66,9 @@ class TestFetchUnmerged(unittest.TestCase): def fetch_unmerged(self, use_cuda=True): main_program = fluid.Program() startup_program = fluid.Program() - feeds, loss, prediction = self.build_program(main_program, - startup_program, False) + feeds, loss, prediction = self.build_program( + main_program, startup_program, False + ) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) @@ -72,34 +76,41 @@ class TestFetchUnmerged(unittest.TestCase): build_strategy = fluid.BuildStrategy() binary = fluid.CompiledProgram(main_program).with_data_parallel( - loss_name=loss.name, build_strategy=build_strategy) + loss_name=loss.name, build_strategy=build_strategy + ) iters = 2 batch_size = 16 - train_reader = paddle.batch(paddle.reader.shuffle( - paddle.dataset.mnist.train(), buf_size=500), - batch_size=batch_size) + train_reader = paddle.batch( + paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=500), + batch_size=batch_size, + ) feeder = fluid.DataFeeder(feed_list=feeds, place=place) device_num = fluid.core.get_cuda_device_count() if use_cuda else 2 for _ in range(iters): data = next(train_reader()) - loss_v, prediction_v = exe.run(binary, - feed=feeder.feed(data), - fetch_list=[loss, prediction], - return_merged=False) + loss_v, prediction_v = exe.run( + binary, + feed=feeder.feed(data), + fetch_list=[loss, prediction], + return_merged=False, + ) self.assertEqual(np.array(loss_v).shape, (device_num, 1)) self.assertEqual( np.array(prediction_v).shape, - (device_num, batch_size / device_num, 10)) + (device_num, batch_size / device_num, 10), + ) for _ in range(iters): data = next(train_reader()) - loss_v, prediction_v = exe.run(binary, - feed=feeder.feed(data), - fetch_list=[loss, prediction], - return_merged=True) - self.assertEqual(np.array(loss_v).shape, (device_num, )) + loss_v, prediction_v = exe.run( + binary, + feed=feeder.feed(data), + fetch_list=[loss, prediction], + return_merged=True, + ) + self.assertEqual(np.array(loss_v).shape, (device_num,)) self.assertEqual(np.array(prediction_v).shape, (batch_size, 10)) def test_fetch_unmerged(self): diff --git a/python/paddle/fluid/tests/unittests/test_fetch_var.py b/python/paddle/fluid/tests/unittests/test_fetch_var.py index 1ce597ef98ad15031edec6a3f58a22ac93987b7c..98b06b31368d976ccf6edbf6acd66af62248ad9e 100644 --- a/python/paddle/fluid/tests/unittests/test_fetch_var.py +++ b/python/paddle/fluid/tests/unittests/test_fetch_var.py @@ -19,7 +19,6 @@ import unittest class TestFetchVar(unittest.TestCase): - def set_input(self): self.val = np.array([1, 3, 5]).astype(np.int32) @@ -35,7 +34,6 @@ class TestFetchVar(unittest.TestCase): class TestFetchNullVar(TestFetchVar): - def set_input(self): self.val = np.array([]).astype(np.int32) diff --git a/python/paddle/fluid/tests/unittests/test_fill_any_like_op.py b/python/paddle/fluid/tests/unittests/test_fill_any_like_op.py index fae7b3c2b0a0a6c5401d41d0656c433dc80e5dc0..1739361080d3b731568a5073ae73b298d23f8c8b 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_any_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_fill_any_like_op.py @@ -20,7 +20,6 @@ from op_test import OpTest, convert_float_to_uint16 class TestFillAnyLikeOp(OpTest): - def setUp(self): self.op_type = "fill_any_like" self.dtype = np.int32 @@ -38,16 +37,15 @@ class TestFillAnyLikeOp(OpTest): class TestFillAnyLikeOpFloat32(TestFillAnyLikeOp): - def init(self): self.dtype = np.float32 self.value = 0.0 -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFillAnyLikeOpBfloat16(OpTest): - def setUp(self): self.op_type = "fill_any_like" self.dtype = np.uint16 @@ -55,8 +53,9 @@ class TestFillAnyLikeOpBfloat16(OpTest): self.inputs = {'X': np.random.random((219, 232)).astype(np.float32)} self.attrs = {'value': self.value, 'dtype': core.VarDesc.VarType.BF16} self.outputs = { - 'Out': - convert_float_to_uint16(self.value * np.ones_like(self.inputs["X"])) + 'Out': convert_float_to_uint16( + self.value * np.ones_like(self.inputs["X"]) + ) } def test_check_output(self): @@ -65,25 +64,21 @@ class TestFillAnyLikeOpBfloat16(OpTest): class TestFillAnyLikeOpValue1(TestFillAnyLikeOp): - def init(self): self.value = 1.0 class TestFillAnyLikeOpValue2(TestFillAnyLikeOp): - def init(self): self.value = 1e-10 class TestFillAnyLikeOpValue3(TestFillAnyLikeOp): - def init(self): self.value = 1e-100 class TestFillAnyLikeOpType(TestFillAnyLikeOp): - def setUp(self): self.op_type = "fill_any_like" self.dtype = np.int32 @@ -92,16 +87,15 @@ class TestFillAnyLikeOpType(TestFillAnyLikeOp): self.inputs = {'X': np.random.random((219, 232)).astype(self.dtype)} self.attrs = { 'value': self.value, - 'dtype': int(core.VarDesc.VarType.FP32) + 'dtype': int(core.VarDesc.VarType.FP32), } self.outputs = { - 'Out': - self.value * np.ones_like(self.inputs["X"]).astype(np.float32) + 'Out': self.value + * np.ones_like(self.inputs["X"]).astype(np.float32) } class TestFillAnyLikeOpFloat16(TestFillAnyLikeOp): - def init(self): self.dtype = np.float16 diff --git a/python/paddle/fluid/tests/unittests/test_fill_any_op.py b/python/paddle/fluid/tests/unittests/test_fill_any_op.py index 7dbdfe8d58a6c1850067a7a4866312b888d74207..76daed5a5d84a2562c76cad6b25e942b192cfc69 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_any_op.py +++ b/python/paddle/fluid/tests/unittests/test_fill_any_op.py @@ -19,7 +19,6 @@ from op_test import OpTest class TestFillAnyOp(OpTest): - def setUp(self): self.op_type = "fill_any" self.dtype = 'float64' @@ -28,11 +27,11 @@ class TestFillAnyOp(OpTest): self.inputs = {'X': np.random.random((20, 30)).astype(self.dtype)} self.attrs = { 'value_float': float(self.value), - 'value_int': int(self.value) + 'value_int': int(self.value), } self.outputs = { - 'Out': - self.value * np.ones_like(self.inputs["X"]).astype(self.dtype) + 'Out': self.value + * np.ones_like(self.inputs["X"]).astype(self.dtype) } def init(self): @@ -46,34 +45,29 @@ class TestFillAnyOp(OpTest): class TestFillAnyOpFloat32(TestFillAnyOp): - def init(self): self.dtype = np.float32 self.value = 0.0 class TestFillAnyOpFloat16(TestFillAnyOp): - def init(self): self.dtype = np.float16 class TestFillAnyOpvalue1(TestFillAnyOp): - def init(self): self.dtype = np.float32 self.value = 111111555 class TestFillAnyOpvalue2(TestFillAnyOp): - def init(self): self.dtype = np.float32 self.value = 11111.1111 class TestFillAnyInplace(unittest.TestCase): - def test_fill_any_version(self): with paddle.fluid.dygraph.guard(): var = paddle.to_tensor(np.ones((4, 2, 3)).astype(np.float32)) @@ -91,7 +85,8 @@ class TestFillAnyInplace(unittest.TestCase): def test_fill_any_eqaul(self): with paddle.fluid.dygraph.guard(): tensor = paddle.to_tensor( - np.random.random((20, 30)).astype(np.float32)) + np.random.random((20, 30)).astype(np.float32) + ) target = tensor.numpy() target[...] = 1 @@ -100,7 +95,7 @@ class TestFillAnyInplace(unittest.TestCase): def test_backward(self): with paddle.fluid.dygraph.guard(): - x = paddle.full([10, 10], -1., dtype='float32') + x = paddle.full([10, 10], -1.0, dtype='float32') x.stop_gradient = False y = 2 * x y.fill_(1) diff --git a/python/paddle/fluid/tests/unittests/test_fill_constant_batch_size_like.py b/python/paddle/fluid/tests/unittests/test_fill_constant_batch_size_like.py index ebe0ed179d2779963693d613fbd608c99821005d..339fafbd0b4e2d168df5c0905ddef61d4b75092c 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_constant_batch_size_like.py +++ b/python/paddle/fluid/tests/unittests/test_fill_constant_batch_size_like.py @@ -21,16 +21,18 @@ from paddle.fluid.framework import convert_np_dtype_to_dtype_ paddle.enable_static() -def fill_constant_batch_size_like(input, - shape, - value, - data_type, - input_dim_idx=0, - output_dim_idx=0, - force_cpu=False): +def fill_constant_batch_size_like( + input, + shape, + value, + data_type, + input_dim_idx=0, + output_dim_idx=0, + force_cpu=False, +): return paddle.fluid.layers.fill_constant_batch_size_like( - input, shape, data_type, value, input_dim_idx, output_dim_idx, - force_cpu) + input, shape, data_type, value, input_dim_idx, output_dim_idx, force_cpu + ) class TestFillConstatnBatchSizeLike1(OpTest): @@ -51,7 +53,7 @@ class TestFillConstatnBatchSizeLike1(OpTest): 'value': self.value, 'input_dim_idx': self.input_dim_idx, 'output_dim_idx': self.output_dim_idx, - 'force_cpu': self.force_cpu + 'force_cpu': self.force_cpu, } def init_data(self): diff --git a/python/paddle/fluid/tests/unittests/test_fill_constant_op.py b/python/paddle/fluid/tests/unittests/test_fill_constant_op.py index a031385aa0a096776b843da42cdfb7d0daaa43d3..838efff490ab813a2cdfb9bae644dc11628db98b 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_constant_op.py +++ b/python/paddle/fluid/tests/unittests/test_fill_constant_op.py @@ -26,10 +26,8 @@ from paddle.fluid import Program, program_guard # Situation 1: Attr(shape) is a list(without tensor) class TestFillConstantOp1(OpTest): - def setUp(self): - '''Test fill_constant op with specified value - ''' + '''Test fill_constant op with specified value''' self.op_type = "fill_constant" self.inputs = {} @@ -41,10 +39,8 @@ class TestFillConstantOp1(OpTest): class TestFillConstantOp2(OpTest): - def setUp(self): - '''Test fill_constant op with default value - ''' + '''Test fill_constant op with default value''' self.op_type = "fill_constant" self.inputs = {} @@ -56,10 +52,8 @@ class TestFillConstantOp2(OpTest): class TestFillConstantOp3(OpTest): - def setUp(self): - '''Test fill_constant op with specified int64 value - ''' + '''Test fill_constant op with specified int64 value''' self.op_type = "fill_constant" self.inputs = {} @@ -71,10 +65,8 @@ class TestFillConstantOp3(OpTest): class TestFillConstantOp4(OpTest): - def setUp(self): - '''Test fill_constant op with specified int value - ''' + '''Test fill_constant op with specified int value''' self.op_type = "fill_constant" self.inputs = {} @@ -85,20 +77,19 @@ class TestFillConstantOp4(OpTest): self.check_output() -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFillConstantBF16Op(OpTest): - def setUp(self): - '''Test fill_constant op with specified value - ''' + '''Test fill_constant op with specified value''' self.op_type = "fill_constant" self.dtype = np.uint16 self.inputs = {} self.attrs = { 'shape': [123, 92], 'value': 3.8, - 'dtype': core.VarDesc.VarType.BF16 + 'dtype': core.VarDesc.VarType.BF16, } self.outputs = {'Out': convert_float_to_uint16(np.full((123, 92), 3.8))} @@ -108,17 +99,15 @@ class TestFillConstantBF16Op(OpTest): class TestFillConstantOpWithSelectedRows(unittest.TestCase): - def check_with_place(self, place): scope = core.Scope() # create Out Variable out = scope.var('Out').get_selected_rows() # create and run fill_constant_op operator - fill_constant_op = Operator("fill_constant", - shape=[123, 92], - value=3.8, - Out='Out') + fill_constant_op = Operator( + "fill_constant", shape=[123, 92], value=3.8, Out='Out' + ) fill_constant_op.run(scope, place) # get result from Out @@ -138,16 +127,15 @@ class TestFillConstantOpWithSelectedRows(unittest.TestCase): # Situation 2: Attr(shape) is a list(with tensor) class TestFillConstantOp1_ShapeTensorList(OpTest): - def setUp(self): - '''Test fill_constant op with specified value - ''' + '''Test fill_constant op with specified value''' self.op_type = "fill_constant" self.init_data() shape_tensor_list = [] for index, ele in enumerate(self.shape): - shape_tensor_list.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + shape_tensor_list.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = {"ShapeTensorList": shape_tensor_list} self.attrs = {'shape': self.infer_shape, 'value': self.value} @@ -163,16 +151,15 @@ class TestFillConstantOp1_ShapeTensorList(OpTest): class TestFillConstantOp2_ShapeTensorList(OpTest): - def setUp(self): - '''Test fill_constant op with default value - ''' + '''Test fill_constant op with default value''' self.op_type = "fill_constant" self.init_data() shape_tensor_list = [] for index, ele in enumerate(self.shape): - shape_tensor_list.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + shape_tensor_list.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = {"ShapeTensorList": shape_tensor_list} self.attrs = {'shape': self.infer_shape} @@ -187,7 +174,6 @@ class TestFillConstantOp2_ShapeTensorList(OpTest): class TestFillConstantOp3_ShapeTensorList(TestFillConstantOp1_ShapeTensorList): - def init_data(self): self.shape = [123, 92] self.infer_shape = [123, -1] @@ -195,7 +181,6 @@ class TestFillConstantOp3_ShapeTensorList(TestFillConstantOp1_ShapeTensorList): class TestFillConstantOp4_ShapeTensorList(TestFillConstantOp1_ShapeTensorList): - def init_data(self): self.shape = [123, 92] self.infer_shape = [123, -1] @@ -204,10 +189,8 @@ class TestFillConstantOp4_ShapeTensorList(TestFillConstantOp1_ShapeTensorList): # Situation 3: shape is a tensor class TestFillConstantOp1_ShapeTensor(OpTest): - def setUp(self): - '''Test fill_constant op with specified value - ''' + '''Test fill_constant op with specified value''' self.op_type = "fill_constant" self.init_data() @@ -225,16 +208,14 @@ class TestFillConstantOp1_ShapeTensor(OpTest): # Situation 4: value is a tensor class TestFillConstantOp1_ValueTensor(OpTest): - def setUp(self): - '''Test fill_constant op with specified value - ''' + '''Test fill_constant op with specified value''' self.op_type = "fill_constant" self.init_data() self.inputs = { "ShapeTensor": np.array(self.shape).astype("int32"), - 'ValueTensor': np.array([self.value]).astype("float32") + 'ValueTensor': np.array([self.value]).astype("float32"), } self.attrs = {'value': self.value + 1.0} self.outputs = {'Out': np.full(self.shape, self.value)} @@ -250,16 +231,14 @@ class TestFillConstantOp1_ValueTensor(OpTest): # Situation 5: value is a tensor class TestFillConstantOp2_ValueTensor(OpTest): - def setUp(self): - '''Test fill_constant op with specified value - ''' + '''Test fill_constant op with specified value''' self.op_type = "fill_constant" self.init_data() self.inputs = { "ShapeTensor": np.array(self.shape).astype("int32"), - 'ValueTensor': np.array([self.value]).astype("int32") + 'ValueTensor': np.array([self.value]).astype("int32"), } self.attrs = {'value': self.value, 'dtype': 2} self.outputs = {'Out': np.full(self.shape, self.value)} @@ -275,56 +254,55 @@ class TestFillConstantOp2_ValueTensor(OpTest): # Test python API class TestFillConstantAPI(unittest.TestCase): - def test_api(self): positive_2_int32 = fluid.layers.fill_constant([1], "int32", 2) positive_2_int64 = fluid.layers.fill_constant([1], "int64", 2) - shape_tensor_int32 = fluid.data(name="shape_tensor_int32", - shape=[2], - dtype="int32") - shape_tensor_int64 = fluid.data(name="shape_tensor_int64", - shape=[2], - dtype="int64") - - out_1 = fluid.layers.fill_constant(shape=[1, 2], - dtype="float32", - value=1.1) - - out_2 = fluid.layers.fill_constant(shape=[1, positive_2_int32], - dtype="float32", - value=1.1) - - out_3 = fluid.layers.fill_constant(shape=[1, positive_2_int64], - dtype="float32", - value=1.1) - - out_4 = fluid.layers.fill_constant(shape=shape_tensor_int32, - dtype="float32", - value=1.1) - - out_5 = fluid.layers.fill_constant(shape=shape_tensor_int64, - dtype="float32", - value=1.1) - - out_6 = fluid.layers.fill_constant(shape=shape_tensor_int64, - dtype=np.float32, - value=1.1) - - val1 = fluid.layers.fill_constant(shape=[1], - dtype=np.float32, - value=1.1) - val2 = fluid.layers.fill_constant(shape=[1], - dtype=np.float64, - value=1.1) - out_7 = fluid.layers.fill_constant(shape=shape_tensor_int64, - dtype=np.float32, - value=val1) - - out_8 = fluid.layers.fill_constant(shape=shape_tensor_int64, - dtype=np.float32, - value=val2) + shape_tensor_int32 = fluid.data( + name="shape_tensor_int32", shape=[2], dtype="int32" + ) + shape_tensor_int64 = fluid.data( + name="shape_tensor_int64", shape=[2], dtype="int64" + ) + + out_1 = fluid.layers.fill_constant( + shape=[1, 2], dtype="float32", value=1.1 + ) + + out_2 = fluid.layers.fill_constant( + shape=[1, positive_2_int32], dtype="float32", value=1.1 + ) + + out_3 = fluid.layers.fill_constant( + shape=[1, positive_2_int64], dtype="float32", value=1.1 + ) + + out_4 = fluid.layers.fill_constant( + shape=shape_tensor_int32, dtype="float32", value=1.1 + ) + + out_5 = fluid.layers.fill_constant( + shape=shape_tensor_int64, dtype="float32", value=1.1 + ) + + out_6 = fluid.layers.fill_constant( + shape=shape_tensor_int64, dtype=np.float32, value=1.1 + ) + + val1 = fluid.layers.fill_constant( + shape=[1], dtype=np.float32, value=1.1 + ) + val2 = fluid.layers.fill_constant( + shape=[1], dtype=np.float64, value=1.1 + ) + out_7 = fluid.layers.fill_constant( + shape=shape_tensor_int64, dtype=np.float32, value=val1 + ) + + out_8 = fluid.layers.fill_constant( + shape=shape_tensor_int64, dtype=np.float32, value=val2 + ) exe = fluid.Executor(place=fluid.CPUPlace()) res_1, res_2, res_3, res_4, res_5, res_6, res_7, res_8 = exe.run( @@ -333,7 +311,8 @@ class TestFillConstantAPI(unittest.TestCase): "shape_tensor_int32": np.array([1, 2]).astype("int32"), "shape_tensor_int64": np.array([1, 2]).astype("int64"), }, - fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7, out_8]) + fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7, out_8], + ) assert np.array_equal(res_1, np.full([1, 2], 1.1, dtype="float32")) assert np.array_equal(res_2, np.full([1, 2], 1.1, dtype="float32")) @@ -346,7 +325,6 @@ class TestFillConstantAPI(unittest.TestCase): class TestFillConstantImperative(unittest.TestCase): - def test_api(self): with fluid.dygraph.guard(): data1 = np.array([1, 2]).astype('int32') @@ -355,26 +333,30 @@ class TestFillConstantImperative(unittest.TestCase): shape = fluid.dygraph.to_variable(data1) val = fluid.dygraph.to_variable(data2) value = fluid.dygraph.to_variable(data3) - res1 = fluid.layers.fill_constant(shape=[1, 2], - dtype='float32', - value=1.1) - res2 = fluid.layers.fill_constant(shape=shape, - dtype='float32', - value=1.1) - res3 = fluid.layers.fill_constant(shape=shape, - dtype='float32', - value=val) - res4 = fluid.layers.fill_constant(shape=shape, - dtype='int32', - value=value) - assert np.array_equal(res1.numpy(), - np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(res2.numpy(), - np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(res3.numpy(), - np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(res4.numpy(), - np.full([1, 2], 88, dtype="int32")) + res1 = fluid.layers.fill_constant( + shape=[1, 2], dtype='float32', value=1.1 + ) + res2 = fluid.layers.fill_constant( + shape=shape, dtype='float32', value=1.1 + ) + res3 = fluid.layers.fill_constant( + shape=shape, dtype='float32', value=val + ) + res4 = fluid.layers.fill_constant( + shape=shape, dtype='int32', value=value + ) + assert np.array_equal( + res1.numpy(), np.full([1, 2], 1.1, dtype="float32") + ) + assert np.array_equal( + res2.numpy(), np.full([1, 2], 1.1, dtype="float32") + ) + assert np.array_equal( + res3.numpy(), np.full([1, 2], 1.1, dtype="float32") + ) + assert np.array_equal( + res4.numpy(), np.full([1, 2], 88, dtype="int32") + ) def test_nan(self): with fluid.dygraph.guard(): @@ -394,42 +376,49 @@ class TestFillConstantImperative(unittest.TestCase): class TestFillConstantOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): - #for ci coverage + # for ci coverage x1 = fluid.layers.data(name='x1', shape=[1], dtype="int16") - self.assertRaises(TypeError, - fluid.layers.fill_constant, - shape=[1], - value=5, - dtype='uint4') - - self.assertRaises(TypeError, - fluid.layers.fill_constant, - shape=[1.1], - value=5, - dtype='float32', - out=x1) + self.assertRaises( + TypeError, + fluid.layers.fill_constant, + shape=[1], + value=5, + dtype='uint4', + ) + + self.assertRaises( + TypeError, + fluid.layers.fill_constant, + shape=[1.1], + value=5, + dtype='float32', + out=x1, + ) # The argument dtype of fill_constant_op must be one of bool, float16, - #float32, float64, uint8, int16, int32 or int64 + # float32, float64, uint8, int16, int32 or int64 x2 = fluid.layers.data(name='x2', shape=[1], dtype="int32") - self.assertRaises(TypeError, - fluid.layers.fill_constant, - shape=[1], - value=5, - dtype='float64', - out=x2) + self.assertRaises( + TypeError, + fluid.layers.fill_constant, + shape=[1], + value=5, + dtype='float64', + out=x2, + ) x3 = np.random.randn(100, 100).astype('int32') - self.assertRaises(TypeError, - fluid.layers.fill_constant, - shape=[100, 100], - value=5, - dtype='float64', - out=x3) + self.assertRaises( + TypeError, + fluid.layers.fill_constant, + shape=[100, 100], + value=5, + dtype='float64', + out=x3, + ) # The argument shape's type of fill_constant_op must be list, tuple or Variable. def test_shape_type(): @@ -439,39 +428,37 @@ class TestFillConstantOpError(unittest.TestCase): # The shape dtype of fill_constant_op must be int32 or int64. def test_shape_tensor_dtype(): - shape = fluid.data(name="shape_tensor", - shape=[2], - dtype="float32") - fluid.layers.fill_constant(shape=shape, - dtype="float32", - value=1) + shape = fluid.data( + name="shape_tensor", shape=[2], dtype="float32" + ) + fluid.layers.fill_constant( + shape=shape, dtype="float32", value=1 + ) self.assertRaises(TypeError, test_shape_tensor_dtype) def test_shape_tensor_list_dtype(): - shape = fluid.data(name="shape_tensor_list", - shape=[1], - dtype="bool") - fluid.layers.fill_constant(shape=[shape, 2], - dtype="float32", - value=1) + shape = fluid.data( + name="shape_tensor_list", shape=[1], dtype="bool" + ) + fluid.layers.fill_constant( + shape=[shape, 2], dtype="float32", value=1 + ) self.assertRaises(TypeError, test_shape_tensor_list_dtype) class TestFillConstantOp_ValueTensorBf16(OpTest): - def setUp(self): - '''Test fill_constant op with specified value - ''' + '''Test fill_constant op with specified value''' self.op_type = "fill_constant" self.init_data() self.inputs = { - "ShapeTensor": - np.array(self.shape).astype("int32"), - 'ValueTensor': - convert_float_to_uint16(np.array([self.value]).astype("float32")) + "ShapeTensor": np.array(self.shape).astype("int32"), + 'ValueTensor': convert_float_to_uint16( + np.array([self.value]).astype("float32") + ), } self.attrs = {'value': self.value, 'dtype': core.VarDesc.VarType.BF16} self.outputs = {'Out': np.full(self.shape, self.value)} diff --git a/python/paddle/fluid/tests/unittests/test_fill_diagonal_tensor_op.py b/python/paddle/fluid/tests/unittests/test_fill_diagonal_tensor_op.py index 9823f5e25851913f2a759349bdeaf23abc76a408..f412a161eb63c04a1b4c7bd6d57918ab426ed3c7 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_diagonal_tensor_op.py +++ b/python/paddle/fluid/tests/unittests/test_fill_diagonal_tensor_op.py @@ -34,13 +34,15 @@ def fill_diagonal_ndarray(x, value, offset=0, dim1=0, dim2=1): diagonal = np.lib.stride_tricks.as_strided( x[:, offset:] if dim_sum == 1 else x[:, :, offset:], shape=(shape[dim3], diagdim), - strides=(strides[dim3], strides[dim1] + strides[dim2])) + strides=(strides[dim3], strides[dim1] + strides[dim2]), + ) else: diagdim = min(shape[dim2], shape[dim1] + offset) diagonal = np.lib.stride_tricks.as_strided( x[-offset:, :] if dim_sum in [1, 2] else x[:, -offset:], shape=(shape[dim3], diagdim), - strides=(strides[dim3], strides[dim1] + strides[dim2])) + strides=(strides[dim3], strides[dim1] + strides[dim2]), + ) diagonal[...] = value return x @@ -79,13 +81,12 @@ def fill_gt(x, y, offset, dim1, dim2): class TensorFillDiagTensor_Test(OpTest): - def setUp(self): self.op_type = "fill_diagonal_tensor" self.python_api = paddle.tensor.manipulation.fill_diagonal_tensor self.init_kernel_type() x = np.random.random((10, 10)).astype(self.dtype) - y = np.random.random((10, )).astype(self.dtype) + y = np.random.random((10,)).astype(self.dtype) dim1 = 0 dim2 = 1 offset = 0 @@ -106,7 +107,6 @@ class TensorFillDiagTensor_Test(OpTest): class TensorFillDiagTensor_Test2(TensorFillDiagTensor_Test): - def setUp(self): self.op_type = "fill_diagonal_tensor" self.python_api = paddle.tensor.manipulation.fill_diagonal_tensor @@ -127,7 +127,6 @@ class TensorFillDiagTensor_Test2(TensorFillDiagTensor_Test): class TensorFillDiagTensor_Test3(TensorFillDiagTensor_Test): - def setUp(self): self.op_type = "fill_diagonal_tensor" self.python_api = paddle.tensor.manipulation.fill_diagonal_tensor diff --git a/python/paddle/fluid/tests/unittests/test_fill_op.py b/python/paddle/fluid/tests/unittests/test_fill_op.py index 0cfa7ecbed0242ca5b548f36bb720579376fb192..95a432136caca74325d9dc6322da754bd7476b56 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_op.py +++ b/python/paddle/fluid/tests/unittests/test_fill_op.py @@ -20,7 +20,6 @@ from paddle.fluid.op import Operator class TestFillOp1(OpTest): - def setUp(self): self.op_type = "fill" val = np.random.random(size=[100, 200]) @@ -29,7 +28,7 @@ class TestFillOp1(OpTest): 'value': val.flatten().tolist(), 'shape': [100, 200], 'dtype': int(core.VarDesc.VarType.FP64), - 'force_cpu': False + 'force_cpu': False, } self.outputs = {'Out': val.astype('float64')} @@ -38,7 +37,6 @@ class TestFillOp1(OpTest): class TestFillOp2(OpTest): - def setUp(self): self.op_type = "fill" val = np.random.random(size=[100, 200]) @@ -47,7 +45,7 @@ class TestFillOp2(OpTest): 'value': val.flatten().tolist(), 'shape': [100, 200], 'dtype': int(core.VarDesc.VarType.FP64), - 'force_cpu': True + 'force_cpu': True, } self.outputs = {'Out': val.astype('float64')} @@ -56,7 +54,6 @@ class TestFillOp2(OpTest): class TestFillOp3(unittest.TestCase): - def check_with_place(self, place, f_cpu): scope = core.Scope() # create Out Variable @@ -64,12 +61,14 @@ class TestFillOp3(unittest.TestCase): # create and run fill_op operator val = np.random.random(size=[300, 200]) - fill_op = Operator("fill", - value=val.flatten(), - shape=[300, 200], - dtype=int(core.VarDesc.VarType.FP32), - force_cpu=f_cpu, - Out='Out') + fill_op = Operator( + "fill", + value=val.flatten(), + shape=[300, 200], + dtype=int(core.VarDesc.VarType.FP32), + force_cpu=f_cpu, + Out='Out', + ) fill_op.run(scope, place) # get result from Out diff --git a/python/paddle/fluid/tests/unittests/test_fill_zeros_like2_op.py b/python/paddle/fluid/tests/unittests/test_fill_zeros_like2_op.py index a298bc5c58bc3f280c472c6168b25a4d5435ee7e..f1a1039ea016cc75fe0cffb1ee128c1999520b9f 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_zeros_like2_op.py +++ b/python/paddle/fluid/tests/unittests/test_fill_zeros_like2_op.py @@ -20,7 +20,6 @@ from op_test import OpTest class TestFillZerosLike2Op(OpTest): - def setUp(self): self.op_type = "fill_zeros_like2" self.dtype = np.float32 @@ -37,21 +36,17 @@ class TestFillZerosLike2Op(OpTest): class TestFillZerosLike2OpFp16(TestFillZerosLike2Op): - def init_dtype(self): self.dtype = np.float16 class TestFillZerosLike2OpFp64(TestFillZerosLike2Op): - def init_dtype(self): self.dtype = np.float64 class TestZerosError(unittest.TestCase): - def test_errors(self): - def test_zeros_like_type_error(): with fluid.program_guard(fluid.Program(), fluid.Program()): fluid.layers.zeros_like([10], dtype="float") diff --git a/python/paddle/fluid/tests/unittests/test_fill_zeros_like_op.py b/python/paddle/fluid/tests/unittests/test_fill_zeros_like_op.py index e6515e426e40c9bee684b1bd3c87871426f436b8..7317973e774ab5b9d9d99cad6f2da0334c871105 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_zeros_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_fill_zeros_like_op.py @@ -18,7 +18,6 @@ from op_test import OpTest class TestFillZerosLikeOp(OpTest): - def setUp(self): self.op_type = "fill_zeros_like" self.dtype = np.float32 @@ -34,7 +33,6 @@ class TestFillZerosLikeOp(OpTest): class TestFillZerosLikeOpFp16(TestFillZerosLikeOp): - def init_dtype(self): self.dtype = np.float16 diff --git a/python/paddle/fluid/tests/unittests/test_filter_by_instag_op.py b/python/paddle/fluid/tests/unittests/test_filter_by_instag_op.py index b60dcbe9c899b1951ad6891d5d0075860a484452..e1136d677a13d932e8ee4d9acf756feb70c1c4c8 100644 --- a/python/paddle/fluid/tests/unittests/test_filter_by_instag_op.py +++ b/python/paddle/fluid/tests/unittests/test_filter_by_instag_op.py @@ -16,11 +16,11 @@ import unittest import numpy as np from op_test import OpTest + """This is Test Case 1""" class TestFilterByInstagOp(OpTest): - def setUp(self): self.op_type = 'filter_by_instag' x1 = np.zeros((36, 4), dtype=np.float64) @@ -48,8 +48,9 @@ class TestFilterByInstagOp(OpTest): out[ln, k] = cur ln += 1 - mmap = np.array([[0, 1, 2], [2, 6, 4], [6, 15, 6], [12, 28, - 8]]).astype('int64') + mmap = np.array([[0, 1, 2], [2, 6, 4], [6, 15, 6], [12, 28, 8]]).astype( + 'int64' + ) mmap_lod = [[1, 1, 1, 1]] loss_weight = np.array([[1], [1], [1], [1]]).astype('double') @@ -62,7 +63,7 @@ class TestFilterByInstagOp(OpTest): self.outputs = { 'Out': (out, out_lod), 'LossWeight': (loss_weight, mmap_lod), - 'IndexMap': (mmap, mmap_lod) + 'IndexMap': (mmap, mmap_lod), } self.attrs = {'is_lod': True, 'out_val_if_empty': 0} @@ -71,16 +72,15 @@ class TestFilterByInstagOp(OpTest): self.check_output() def test_check_grad(self): - self.check_grad(['Ins'], - 'Out', - no_grad_set=set(['Ins_tag', 'Filter_tag'])) + self.check_grad( + ['Ins'], 'Out', no_grad_set=set(['Ins_tag', 'Filter_tag']) + ) """This is Test Case 2""" class TestFilterByInstagOp2(OpTest): - def setUp(self): self.op_type = 'filter_by_instag' @@ -110,7 +110,7 @@ class TestFilterByInstagOp2(OpTest): self.outputs = { 'Out': (out, out_lod), 'LossWeight': (loss_weight, mmap_lod), - 'IndexMap': (mmap, mmap_lod) + 'IndexMap': (mmap, mmap_lod), } self.attrs = {'is_lod': True, 'out_val_if_empty': 0} @@ -118,16 +118,15 @@ class TestFilterByInstagOp2(OpTest): self.check_output() def test_check_grad(self): - self.check_grad(['Ins'], - 'Out', - no_grad_set=set(['Ins_tag', 'Filter_tag'])) + self.check_grad( + ['Ins'], 'Out', no_grad_set=set(['Ins_tag', 'Filter_tag']) + ) """This is Test Case 3""" class TestFilterByInstagOp3(OpTest): - def setUp(self): self.op_type = 'filter_by_instag' @@ -154,7 +153,7 @@ class TestFilterByInstagOp3(OpTest): self.outputs = { 'Out': (out, out_lod), 'LossWeight': (loss_weight, mmap_lod), - 'IndexMap': (mmap, mmap_lod) + 'IndexMap': (mmap, mmap_lod), } self.attrs = {'is_lod': True, 'out_val_if_empty': 0} @@ -162,16 +161,15 @@ class TestFilterByInstagOp3(OpTest): self.check_output() def test_check_grad(self): - self.check_grad(['Ins'], - 'Out', - no_grad_set=set(['Ins_tag', 'Filter_tag'])) + self.check_grad( + ['Ins'], 'Out', no_grad_set=set(['Ins_tag', 'Filter_tag']) + ) """This is Test Case 4""" class TestFilterByInstagOp4(OpTest): - def setUp(self): self.op_type = 'filter_by_instag' @@ -197,7 +195,7 @@ class TestFilterByInstagOp4(OpTest): self.outputs = { 'Out': (out, out_lod), 'LossWeight': (loss_weight, mmap_lod), - 'IndexMap': (mmap, mmap_lod) + 'IndexMap': (mmap, mmap_lod), } self.attrs = {'is_lod': False, 'out_val_if_empty': 0} @@ -205,13 +203,12 @@ class TestFilterByInstagOp4(OpTest): self.check_output() def test_check_grad(self): - self.check_grad(['Ins'], - 'Out', - no_grad_set=set(['Ins_tag', 'Filter_tag'])) + self.check_grad( + ['Ins'], 'Out', no_grad_set=set(['Ins_tag', 'Filter_tag']) + ) class TestFilterByInstagOp6(OpTest): - def setUp(self): self.op_type = 'filter_by_instag' @@ -237,7 +234,7 @@ class TestFilterByInstagOp6(OpTest): self.outputs = { 'Out': (out, out_lod), 'LossWeight': (loss_weight, mmap_lod), - 'IndexMap': (mmap, mmap_lod) + 'IndexMap': (mmap, mmap_lod), } self.attrs = {'is_lod': False, 'out_val_if_empty': 0} @@ -249,7 +246,6 @@ class TestFilterByInstagOp6(OpTest): class TestFilterByInstagOp7(OpTest): - def setUp(self): self.op_type = 'filter_by_instag' @@ -275,7 +271,7 @@ class TestFilterByInstagOp7(OpTest): self.outputs = { 'Out': (out, out_lod), 'LossWeight': (loss_weight, mmap_lod), - 'IndexMap': (mmap, mmap_lod) + 'IndexMap': (mmap, mmap_lod), } self.attrs = {'is_lod': False, 'out_val_if_empty': 0} diff --git a/python/paddle/fluid/tests/unittests/test_flatten2_op.py b/python/paddle/fluid/tests/unittests/test_flatten2_op.py index cbfceb720ad1c0990e19a4a1da14cb4abce1328f..fe5aad118af46e5792a110d7ce5c4d7b94f10d84 100644 --- a/python/paddle/fluid/tests/unittests/test_flatten2_op.py +++ b/python/paddle/fluid/tests/unittests/test_flatten2_op.py @@ -20,7 +20,6 @@ from op_test import OpTest class TestFlattenOp(OpTest): - def setUp(self): self.op_type = "flatten2" self.init_test_case() @@ -28,7 +27,7 @@ class TestFlattenOp(OpTest): self.init_attrs() self.outputs = { "Out": self.inputs["X"].reshape(self.new_shape), - "XShape": np.random.random(self.in_shape).astype("float32") + "XShape": np.random.random(self.in_shape).astype("float32"), } def test_check_output(self): @@ -47,7 +46,6 @@ class TestFlattenOp(OpTest): class TestFlattenOp1(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.axis = 0 @@ -55,7 +53,6 @@ class TestFlattenOp1(TestFlattenOp): class TestFlattenOpWithDefaultAxis(TestFlattenOp): - def init_test_case(self): self.in_shape = (10, 2, 2, 3) self.new_shape = (10, 12) @@ -65,7 +62,6 @@ class TestFlattenOpWithDefaultAxis(TestFlattenOp): class TestFlattenOpSixDims(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 3, 2, 4, 4) self.axis = 4 @@ -73,7 +69,6 @@ class TestFlattenOpSixDims(TestFlattenOp): class TestStaticFlattenInferShapePythonAPI(unittest.TestCase): - def execute_api(self, x, axis=1): return fluid.layers.flatten(x, axis=axis) @@ -81,15 +76,14 @@ class TestStaticFlattenInferShapePythonAPI(unittest.TestCase): paddle.enable_static() main_prog = paddle.static.Program() with paddle.static.program_guard(main_prog, paddle.static.Program()): - x = paddle.static.data(name="x", - shape=[-1, 3, -1, -1], - dtype='float32') + x = paddle.static.data( + name="x", shape=[-1, 3, -1, -1], dtype='float32' + ) out = self.execute_api(x, axis=2) self.assertTrue((-1, -1) == out.shape) class TestFlatten2OpError(unittest.TestCase): - def test_errors(self): with fluid.program_guard(fluid.Program(), fluid.Program()): input_data = np.random.random((3, 2, 4, 5)).astype("float64") @@ -102,9 +96,9 @@ class TestFlatten2OpError(unittest.TestCase): def test_type(): # dtype must be float32, float64, int8, int32, int64, uint8. - x2 = fluid.layers.data(name='x2', - shape=[3, 2, 4, 5], - dtype='float16') + x2 = fluid.layers.data( + name='x2', shape=[3, 2, 4, 5], dtype='float16' + ) fluid.layers.flatten(x2, axis=1) self.assertRaises(TypeError, test_type) diff --git a/python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py b/python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py index b938adfbf1426a519c1a7a30ca6e45ce18d5c8c3..8d1bcc6254d01094308a30e7173b993dbf2d4985 100644 --- a/python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py +++ b/python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py @@ -19,7 +19,6 @@ from op_test import OpTest class TestFlattenOp(OpTest): - def setUp(self): self.python_api = paddle.flatten self.python_out_sig = ["Out"] @@ -31,7 +30,7 @@ class TestFlattenOp(OpTest): self.init_attrs() self.outputs = { "Out": self.inputs["X"].reshape(self.new_shape), - "XShape": np.random.random(self.in_shape).astype("float32") + "XShape": np.random.random(self.in_shape).astype("float32"), } def test_check_output(self): @@ -44,17 +43,16 @@ class TestFlattenOp(OpTest): self.in_shape = (3, 2, 5, 4) self.start_axis = 0 self.stop_axis = -1 - self.new_shape = (120) + self.new_shape = 120 def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } class TestFlattenOp_1(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = 1 @@ -64,12 +62,11 @@ class TestFlattenOp_1(TestFlattenOp): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } class TestFlattenOp_2(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = 0 @@ -79,12 +76,11 @@ class TestFlattenOp_2(TestFlattenOp): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } class TestFlattenOp_3(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = 0 @@ -94,12 +90,11 @@ class TestFlattenOp_3(TestFlattenOp): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } class TestFlattenOp_4(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = -2 @@ -109,12 +104,11 @@ class TestFlattenOp_4(TestFlattenOp): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } class TestFlattenOp_5(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = 2 @@ -124,12 +118,11 @@ class TestFlattenOp_5(TestFlattenOp): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } class TestFlattenOpSixDims(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 3, 2, 4, 4) self.start_axis = 3 @@ -139,50 +132,63 @@ class TestFlattenOpSixDims(TestFlattenOp): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } class TestFlatten2OpError(unittest.TestCase): - def test_errors(self): image_shape = (2, 3, 4, 4) - x = np.arange(image_shape[0] * image_shape[1] * image_shape[2] * - image_shape[3]).reshape(image_shape) / 100. + x = ( + np.arange( + image_shape[0] + * image_shape[1] + * image_shape[2] + * image_shape[3] + ).reshape(image_shape) + / 100.0 + ) x = x.astype('float32') def test_ValueError1(): - x_var = paddle.static.data(name="x", - shape=image_shape, - dtype='float32') + x_var = paddle.static.data( + name="x", shape=image_shape, dtype='float32' + ) out = paddle.flatten(x_var, start_axis=2, stop_axis=1) self.assertRaises(ValueError, test_ValueError1) def test_ValueError2(): - x_var = paddle.static.data(name="x", - shape=image_shape, - dtype='float32') + x_var = paddle.static.data( + name="x", shape=image_shape, dtype='float32' + ) paddle.flatten(x_var, start_axis=10, stop_axis=1) self.assertRaises(ValueError, test_ValueError2) def test_ValueError3(): - x_var = paddle.static.data(name="x", - shape=image_shape, - dtype='float32') + x_var = paddle.static.data( + name="x", shape=image_shape, dtype='float32' + ) paddle.flatten(x_var, start_axis=2, stop_axis=10) self.assertRaises(ValueError, test_ValueError3) def test_type(): # dtype must be float32, float64, int8, int32, int64, uint8. - x2 = np.arange(image_shape[0] * image_shape[1] * image_shape[2] * - image_shape[3]).reshape(image_shape) / 100. + x2 = ( + np.arange( + image_shape[0] + * image_shape[1] + * image_shape[2] + * image_shape[3] + ).reshape(image_shape) + / 100.0 + ) x2 = x2.astype('float16') - x2_var = paddle.fluid.data(name='x2', - shape=[3, 2, 4, 5], - dtype='float16') + x2_var = paddle.fluid.data( + name='x2', shape=[3, 2, 4, 5], dtype='float16' + ) paddle.flatten(x2_var) self.assertRaises(TypeError, test_type) @@ -194,7 +200,6 @@ class TestFlatten2OpError(unittest.TestCase): class TestStaticFlattenPythonAPI(unittest.TestCase): - def execute_api(self, x, start_axis=0, stop_axis=-1): return paddle.flatten(x, start_axis, stop_axis) @@ -204,9 +209,9 @@ class TestStaticFlattenPythonAPI(unittest.TestCase): main_prog = paddle.static.Program() with paddle.static.program_guard(main_prog, paddle.static.Program()): - x = paddle.static.data(name="x", - shape=[2, 3, 4, 4], - dtype='float32') + x = paddle.static.data( + name="x", shape=[2, 3, 4, 4], dtype='float32' + ) out = self.execute_api(x, start_axis=-2, stop_axis=-1) exe = paddle.static.Executor(place=paddle.CPUPlace()) @@ -215,7 +220,6 @@ class TestStaticFlattenPythonAPI(unittest.TestCase): class TestStaticFlattenInferShapePythonAPI(unittest.TestCase): - def execute_api(self, x, start_axis=0, stop_axis=-1): return paddle.flatten(x, start_axis, stop_axis) @@ -223,25 +227,30 @@ class TestStaticFlattenInferShapePythonAPI(unittest.TestCase): paddle.enable_static() main_prog = paddle.static.Program() with paddle.static.program_guard(main_prog, paddle.static.Program()): - x = paddle.static.data(name="x", - shape=[-1, 3, -1, -1], - dtype='float32') + x = paddle.static.data( + name="x", shape=[-1, 3, -1, -1], dtype='float32' + ) out = self.execute_api(x, start_axis=2, stop_axis=3) self.assertTrue((-1, 3, -1) == out.shape) class TestStaticInplaceFlattenPythonAPI(TestStaticFlattenPythonAPI): - def execute_api(self, x, start_axis=0, stop_axis=-1): return x.flatten_(start_axis, stop_axis) class TestFlattenPython(unittest.TestCase): - def test_python_api(self): image_shape = (2, 3, 4, 4) - x = np.arange(image_shape[0] * image_shape[1] * image_shape[2] * - image_shape[3]).reshape(image_shape) / 100. + x = ( + np.arange( + image_shape[0] + * image_shape[1] + * image_shape[2] + * image_shape[3] + ).reshape(image_shape) + / 100.0 + ) x = x.astype('float32') def test_InputError(): @@ -260,11 +269,17 @@ class TestFlattenPython(unittest.TestCase): class TestDygraphInplaceFlattenPython(unittest.TestCase): - def test_python_api(self): image_shape = (2, 3, 4, 4) - x = np.arange(image_shape[0] * image_shape[1] * image_shape[2] * - image_shape[3]).reshape(image_shape) / 100. + x = ( + np.arange( + image_shape[0] + * image_shape[1] + * image_shape[2] + * image_shape[3] + ).reshape(image_shape) + / 100.0 + ) x = x.astype('float32') def test_Negative(): diff --git a/python/paddle/fluid/tests/unittests/test_flatten_op.py b/python/paddle/fluid/tests/unittests/test_flatten_op.py index 9a371465f33e8ab913d8c98fb1d130cb22632a48..a1108d3392ca58756adcff9243fa1dd44454bdc4 100644 --- a/python/paddle/fluid/tests/unittests/test_flatten_op.py +++ b/python/paddle/fluid/tests/unittests/test_flatten_op.py @@ -19,7 +19,6 @@ from op_test import OpTest class TestFlattenOp(OpTest): - def setUp(self): self.op_type = "flatten" self.init_test_case() @@ -43,7 +42,6 @@ class TestFlattenOp(OpTest): class TestFlattenOp1(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 2, 10) self.axis = 0 @@ -51,7 +49,6 @@ class TestFlattenOp1(TestFlattenOp): class TestFlattenOpWithDefaultAxis(TestFlattenOp): - def init_test_case(self): self.in_shape = (10, 2, 2, 3) self.new_shape = (10, 12) @@ -61,7 +58,6 @@ class TestFlattenOpWithDefaultAxis(TestFlattenOp): class TestFlattenOpSixDims(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 3, 2, 4, 4) self.axis = 4 diff --git a/python/paddle/fluid/tests/unittests/test_fleet.py b/python/paddle/fluid/tests/unittests/test_fleet.py index a06f264750978e95d3c6e890f9144ffc6a7e04d7..a9a75868ee396977c5c8b1b5dd440724678ab8fc 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet.py +++ b/python/paddle/fluid/tests/unittests/test_fleet.py @@ -27,7 +27,8 @@ class TestFleet1(unittest.TestCase): """Set up, set envs.""" os.environ["PADDLE_TRAINERS_NUM"] = "2" os.environ[ - "PADDLE_PSERVERS_IP_PORT_LIST"] = "127.0.0.1:36001,127.0.0.2:36001" + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:36001,127.0.0.2:36001" def test_pslib_1(self): """Test cases for pslib.""" @@ -42,24 +43,38 @@ class TestFleet1(unittest.TestCase): os.environ["PADDLE_PSERVERS_IP_PORT_LIST"] = "127.0.0.1:36002" os.environ["PADDLE_TRAINER_ID"] = "0" role_maker = GeneralRoleMaker() - #role_maker.generate_role() + # role_maker.generate_role() place = fluid.CPUPlace() exe = fluid.Executor(place) - #fleet.init(role_maker) + # fleet.init(role_maker) train_program = fluid.Program() startup_program = fluid.Program() scope = fluid.Scope() with fluid.program_guard(train_program, startup_program): - show = fluid.layers.data(name="show", shape=[-1, 1], \ - dtype="int64", lod_level=1, append_batch_size=False) - emb = fluid.layers.embedding(input=show, size=[1, 1], \ - is_sparse=True, is_distributed=True, \ - param_attr=fluid.ParamAttr(name="embedding")) + show = fluid.layers.data( + name="show", + shape=[-1, 1], + dtype="int64", + lod_level=1, + append_batch_size=False, + ) + emb = fluid.layers.embedding( + input=show, + size=[1, 1], + is_sparse=True, + is_distributed=True, + param_attr=fluid.ParamAttr(name="embedding"), + ) bow = fluid.layers.sequence_pool(input=emb, pool_type='sum') bow = fluid.layers.data_norm(input=bow, epsilon=1e-4, name="norm") fc = fluid.layers.fc(input=bow, size=1, act=None) - label = fluid.layers.data(name="click", shape=[-1, 1], \ - dtype="int64", lod_level=1, append_batch_size=False) + label = fluid.layers.data( + name="click", + shape=[-1, 1], + dtype="int64", + lod_level=1, + append_batch_size=False, + ) label_cast = fluid.layers.cast(label, dtype='float32') cost = fluid.layers.log_loss(fc, label_cast) try: @@ -70,7 +85,8 @@ class TestFleet1(unittest.TestCase): "embedding": { "sparse_accessor_class": "DownpourSparseValueAccessor" } - }) + }, + ) adam.minimize([cost], [scope]) fleet.run_server() except: diff --git a/python/paddle/fluid/tests/unittests/test_fleet_api_input.py b/python/paddle/fluid/tests/unittests/test_fleet_api_input.py index 56b06e3b8f2e99fb06212c69de2c10e51d1f796c..ac77fc42efdfbf753629995a811d5260b949c71e 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_api_input.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_api_input.py @@ -15,19 +15,24 @@ import unittest import paddle import paddle.fluid as fluid -from paddle.fluid.transpiler.distribute_transpiler import DistributeTranspilerConfig +from paddle.fluid.transpiler.distribute_transpiler import ( + DistributeTranspilerConfig, +) from paddle.fluid.incubate.fleet.base.role_maker import UserDefinedRoleMaker -from paddle.fluid.incubate.fleet.base.role_maker import UserDefinedCollectiveRoleMaker +from paddle.fluid.incubate.fleet.base.role_maker import ( + UserDefinedCollectiveRoleMaker, +) from paddle.fluid.incubate.fleet.base.role_maker import Role import paddle.fluid.incubate.fleet.base.role_maker as role_maker -from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet +from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import ( + fleet, +) from paddle.fluid.incubate.fleet.parameter_server import TranspilerOptimizer from paddle.fluid.incubate.fleet.collective import CollectiveOptimizer from dist_simnet_bow import train_network class DistributeTranspilerConfigTest(unittest.TestCase): - def set_runtime_split_send_recv(self, config, value): config.runtime_split_send_recv = value @@ -37,10 +42,12 @@ class DistributeTranspilerConfigTest(unittest.TestCase): def testConfig(self): config = DistributeTranspilerConfig() self.assertRaises(Exception, self.set_sync_mode, config, None) - self.assertRaises(Exception, self.set_runtime_split_send_recv, config, - None) - self.assertRaises(Exception, self.set_runtime_split_send_recv, config, - True) + self.assertRaises( + Exception, self.set_runtime_split_send_recv, config, None + ) + self.assertRaises( + Exception, self.set_runtime_split_send_recv, config, True + ) self.set_sync_mode(config, False) self.assertFalse(config.sync_mode) self.set_runtime_split_send_recv(config, True) @@ -48,7 +55,6 @@ class DistributeTranspilerConfigTest(unittest.TestCase): class FleetTest(unittest.TestCase): - def testInvalidInputs(self): self.assertRaises(Exception, fleet.split_files, "files") self.assertRaises(Exception, fleet.init, "pserver") @@ -61,40 +67,50 @@ class FleetTest(unittest.TestCase): place = fluid.CPUPlace() exe = fluid.Executor(place) pe = fluid.ParallelExecutor(use_cuda=False, loss_name=loss.name) - self.assertRaises(Exception, - fleet.save_inference_model, - dirname='/tmp/', - feeded_var_names=['X'], - target_vars=[loss], - executor=pe) - self.assertRaises(Exception, - fleet.save_inference_model, - dirname='/tmp/', - feeded_var_names=['X'], - target_vars=[loss], - executor="executor") + self.assertRaises( + Exception, + fleet.save_inference_model, + dirname='/tmp/', + feeded_var_names=['X'], + target_vars=[loss], + executor=pe, + ) + self.assertRaises( + Exception, + fleet.save_inference_model, + dirname='/tmp/', + feeded_var_names=['X'], + target_vars=[loss], + executor="executor", + ) compiled_prog = fluid.compiler.CompiledProgram( - fluid.default_main_program()) - self.assertRaises(Exception, - fleet.save_inference_model, - dirname='/tmp/', - feeded_var_names=['X'], - target_vars=[loss], - executor=exe, - main_program=compiled_prog) - self.assertRaises(Exception, - fleet.save_persistables, - executor=pe, - dirname='/tmp/') - self.assertRaises(Exception, - fleet.save_persistables, - executor="executor", - dirname='/tmp/') - self.assertRaises(Exception, - fleet.save_persistables, - executor=exe, - dirname='/tmp/', - main_program=compiled_prog) + fluid.default_main_program() + ) + self.assertRaises( + Exception, + fleet.save_inference_model, + dirname='/tmp/', + feeded_var_names=['X'], + target_vars=[loss], + executor=exe, + main_program=compiled_prog, + ) + self.assertRaises( + Exception, fleet.save_persistables, executor=pe, dirname='/tmp/' + ) + self.assertRaises( + Exception, + fleet.save_persistables, + executor="executor", + dirname='/tmp/', + ) + self.assertRaises( + Exception, + fleet.save_persistables, + executor=exe, + dirname='/tmp/', + main_program=compiled_prog, + ) self.assertRaises(Exception, fleet._transpile, "config") def set_program(self, avg_cost, strategy): @@ -108,7 +124,8 @@ class FleetTest(unittest.TestCase): current_id=0, role=role_maker.Role.SERVER, worker_num=2, - server_endpoints=["127.0.0.1:36011", "127.0.0.1:36012"]) + server_endpoints=["127.0.0.1:36011", "127.0.0.1:36012"], + ) # for test optimizer without init(role) # fleet.init(role) batch_size = 128 @@ -127,7 +144,8 @@ class FleetTest(unittest.TestCase): current_id=0, role=role_maker.Role.SERVER, worker_num=2, - server_endpoints=["127.0.0.1:36011", "127.0.0.1:36012"]) + server_endpoints=["127.0.0.1:36011", "127.0.0.1:36012"], + ) # for test optimizer without init(role) fleet.init(role) batch_size = 128 @@ -145,119 +163,120 @@ class FleetTest(unittest.TestCase): class TranspilerOptimizerTest(unittest.TestCase): - def testInvalidInputs(self): self.assertRaises(Exception, TranspilerOptimizer, "Adam", None) - self.assertRaises(Exception, TranspilerOptimizer, - fluid.optimizer.Adam(0.001), "strategy") + self.assertRaises( + Exception, + TranspilerOptimizer, + fluid.optimizer.Adam(0.001), + "strategy", + ) transpiler = TranspilerOptimizer(fluid.optimizer.Adam(0.001)) self.assertRaises(Exception, transpiler.minimize, loss=[]) data = fluid.layers.data(name='X', shape=[1], dtype='float32') hidden = fluid.layers.fc(input=data, size=10) loss = paddle.mean(hidden) - self.assertRaises(Exception, - transpiler.minimize, - loss=loss.name, - startup_program=[]) + self.assertRaises( + Exception, transpiler.minimize, loss=loss.name, startup_program=[] + ) class UserDefinedRoleMakerTest(unittest.TestCase): - - def createRoleMaker(self, - current_id=0, - role=Role.WORKER, - worker_num=1, - server_endpoints=["127.0.0.1:8080"]): - role = UserDefinedRoleMaker(current_id, role, worker_num, - server_endpoints) + def createRoleMaker( + self, + current_id=0, + role=Role.WORKER, + worker_num=1, + server_endpoints=["127.0.0.1:8080"], + ): + role = UserDefinedRoleMaker( + current_id, role, worker_num, server_endpoints + ) def testRoleMaker(self): self.createRoleMaker() # test all invalid server_endpoints self.assertRaises( - Exception, self.createRoleMaker, - server_endpoints=None) # server_endpoints must be as list + Exception, self.createRoleMaker, server_endpoints=None + ) # server_endpoints must be as list + self.assertRaises( + Exception, self.createRoleMaker, server_endpoints=[] + ) # server_endpoints can't be empty + self.assertRaises( + Exception, self.createRoleMaker, server_endpoints=[3, []] + ) # element in server_endpoints must be as string self.assertRaises( - Exception, self.createRoleMaker, - server_endpoints=[]) # server_endpoints can't be empty - self.assertRaises(Exception, - self.createRoleMaker, - server_endpoints=[ - 3, [] - ]) # element in server_endpoints must be as string - self.assertRaises(Exception, - self.createRoleMaker, - server_endpoints=[ - "127.0.0.1:8080", "127.0.0.1:8080" - ]) # element in server_endpoints can't be duplicate + Exception, + self.createRoleMaker, + server_endpoints=["127.0.0.1:8080", "127.0.0.1:8080"], + ) # element in server_endpoints can't be duplicate # test all invalid current_id - self.assertRaises(Exception, self.createRoleMaker, - current_id="0") # current_id must be as int self.assertRaises( - Exception, self.createRoleMaker, - current_id=-1) # current_id must be greater than or equal to 0 + Exception, self.createRoleMaker, current_id="0" + ) # current_id must be as int + self.assertRaises( + Exception, self.createRoleMaker, current_id=-1 + ) # current_id must be greater than or equal to 0 self.assertRaises( Exception, self.createRoleMaker, current_id=1, role=Role.SERVER, - server_endpoints=["127.0.0.1:8080"] + server_endpoints=["127.0.0.1:8080"], ) # if role is server, current_id must be less than len(server_endpoints) # test all invalid worker_num - self.assertRaises(Exception, self.createRoleMaker, - worker_num="1") # worker_num must be as int - self.assertRaises(Exception, self.createRoleMaker, - worker_num=0) # worker_num must be greater than 0 + self.assertRaises( + Exception, self.createRoleMaker, worker_num="1" + ) # worker_num must be as int + self.assertRaises( + Exception, self.createRoleMaker, worker_num=0 + ) # worker_num must be greater than 0 # test all invalid role self.assertRaises( - Exception, self.createRoleMaker, - role=3) # role must be as Role(Role.WORKER=1, Role.SERVER=2) + Exception, self.createRoleMaker, role=3 + ) # role must be as Role(Role.WORKER=1, Role.SERVER=2) class UserDefinedCollectiveRoleMakerTest(unittest.TestCase): - - def createRoleMaker(self, - current_id=0, - worker_endpoints=["127.0.0.1:8080"]): + def createRoleMaker( + self, current_id=0, worker_endpoints=["127.0.0.1:8080"] + ): role = UserDefinedCollectiveRoleMaker(current_id, worker_endpoints) def testRoleMaker(self): self.createRoleMaker() # test all invalid worker_endpoints self.assertRaises( - Exception, self.createRoleMaker, - worker_endpoints=None) # worker_endpoints must be as list + Exception, self.createRoleMaker, worker_endpoints=None + ) # worker_endpoints must be as list + self.assertRaises( + Exception, self.createRoleMaker, worker_endpoints=[] + ) # worker_endpoints can't be empty + self.assertRaises( + Exception, self.createRoleMaker, worker_endpoints=[3, []] + ) # element worker_endpoints must be as string self.assertRaises( - Exception, self.createRoleMaker, - worker_endpoints=[]) # worker_endpoints can't be empty - self.assertRaises(Exception, - self.createRoleMaker, - worker_endpoints=[ - 3, [] - ]) # element worker_endpoints must be as string - self.assertRaises(Exception, - self.createRoleMaker, - worker_endpoints=[ - "127.0.0.1:8080", "127.0.0.1:8080" - ]) # element in worker_endpoints can't be duplicate + Exception, + self.createRoleMaker, + worker_endpoints=["127.0.0.1:8080", "127.0.0.1:8080"], + ) # element in worker_endpoints can't be duplicate # test all invalid current_id - self.assertRaises(Exception, self.createRoleMaker, - current_id="0") # current_id must be as int self.assertRaises( - Exception, self.createRoleMaker, - current_id=-1) # current_id must be greater than or equal to 0 + Exception, self.createRoleMaker, current_id="0" + ) # current_id must be as int + self.assertRaises( + Exception, self.createRoleMaker, current_id=-1 + ) # current_id must be greater than or equal to 0 self.assertRaises( Exception, self.createRoleMaker, current_id=1, - worker_endpoints=[ - "127.0.0.1:8080" - ]) # current_id must be less than len(worker_endpoints) + worker_endpoints=["127.0.0.1:8080"], + ) # current_id must be less than len(worker_endpoints) class CollectiveOptimizerTest(unittest.TestCase): - def test_ds_as_None(self): optimizer = fluid.optimizer.AdamOptimizer() dist_optimizer = CollectiveOptimizer(optimizer, strategy=None) diff --git a/python/paddle/fluid/tests/unittests/test_fleet_ascend_utils.py b/python/paddle/fluid/tests/unittests/test_fleet_ascend_utils.py index 7ee6ee83f22c9bbce7aa52262ca73a55c87cae5a..03260616cda7d5700e0479a9d67d91bab8b1dfa4 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_ascend_utils.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_ascend_utils.py @@ -18,30 +18,22 @@ import unittest import paddle.distributed.fleet.ascend_utils as ascend_utils RANK_TABLE_JSON = { - "status": - "completed", - "version": - "1.0", - "server_count": - "1", - "server_list": [{ - "server_id": - "127.0.0.1", - "device": [{ - "device_id": "0", - "device_ip": "192.1.184.23", - "rank_id": "0" - }, { - "device_id": "1", - "device_ip": "192.2.21.93", - "rank_id": "1" - }] - }] + "status": "completed", + "version": "1.0", + "server_count": "1", + "server_list": [ + { + "server_id": "127.0.0.1", + "device": [ + {"device_id": "0", "device_ip": "192.1.184.23", "rank_id": "0"}, + {"device_id": "1", "device_ip": "192.2.21.93", "rank_id": "1"}, + ], + } + ], } class TestAscendUtil(unittest.TestCase): - def test_get_cloud_cluster(self): cluster, pod = ascend_utils.get_cloud_cluster() self.assertTrue(cluster) @@ -51,7 +43,8 @@ class TestAscendUtil(unittest.TestCase): json.dump(RANK_TABLE_JSON, f) rank_table_file = "./rank_table_file.json" cluster, pod = ascend_utils.get_cloud_cluster( - rank_table_file=rank_table_file) + rank_table_file=rank_table_file + ) self.assertTrue(cluster) self.assertTrue(pod) diff --git a/python/paddle/fluid/tests/unittests/test_fleet_auto.py b/python/paddle/fluid/tests/unittests/test_fleet_auto.py index 29710dc5885684217734239d746fa7d4aca66e68..608b948d35f06aa3e532438b39a65fdc13ae4b6d 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_auto.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_auto.py @@ -21,26 +21,27 @@ paddle.enable_static() class TestDistributedStrategyAuto(unittest.TestCase): - def setUp(self): os.environ["POD_IP"] = "127.0.0.1" os.environ["PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001" os.environ["PADDLE_TRAINERS_NUM"] = "2" - os.environ["PADDLE_PSERVERS_IP_PORT_LIST"] = \ - "127.0.0.1:36001,127.0.0.2:36001" + os.environ[ + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:36001,127.0.0.2:36001" def test_distributed_strategy_auto(self): fleet.init(is_collective=True) - input_x = paddle.fluid.layers.data(name="x", - shape=[32], - dtype='float32') + input_x = paddle.fluid.layers.data( + name="x", shape=[32], dtype='float32' + ) input_y = paddle.fluid.layers.data(name="y", shape=[1], dtype='int64') fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh') fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') prediction = paddle.fluid.layers.fc(input=[fc_2], size=2, act='softmax') - cost = paddle.fluid.layers.cross_entropy(input=prediction, - label=input_y) + cost = paddle.fluid.layers.cross_entropy( + input=prediction, label=input_y + ) avg_cost = paddle.mean(x=cost) strategy = paddle.distributed.fleet.DistributedStrategy() diff --git a/python/paddle/fluid/tests/unittests/test_fleet_base.py b/python/paddle/fluid/tests/unittests/test_fleet_base.py index aba343b1f2952ba1df5dd1d4cac805e0ede9bad3..49dd5d6928bfd9954f79a0759cb584180510dd5c 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_base.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_base.py @@ -22,13 +22,13 @@ import numpy as np class TestFleetBase(unittest.TestCase): - def setUp(self): os.environ["POD_IP"] = "127.0.0.1" os.environ["PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36000" os.environ["PADDLE_TRAINERS_NUM"] = "2" - os.environ["PADDLE_PSERVERS_IP_PORT_LIST"] = \ - "127.0.0.1:36001,127.0.0.2:36002" + os.environ[ + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:36001,127.0.0.2:36002" def test_init(self): role = role_maker.PaddleCloudRoleMaker(is_collective=True) @@ -59,8 +59,9 @@ class TestFleetBase(unittest.TestCase): def test_worker_endpoints(self): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) - self.assertEqual("127.0.0.1:36000", - fleet.worker_endpoints(to_string=True)) + self.assertEqual( + "127.0.0.1:36000", fleet.worker_endpoints(to_string=True) + ) self.assertEqual(["127.0.0.1:36000"], fleet.worker_endpoints()) def test_server_num(self): @@ -90,10 +91,13 @@ class TestFleetBase(unittest.TestCase): role = role_maker.PaddleCloudRoleMaker() fleet.init(role) if fleet.is_server(): - self.assertEqual("127.0.0.1:36001,127.0.0.2:36002", - fleet.server_endpoints(to_string=True)) - self.assertEqual(["127.0.0.1:36001", "127.0.0.2:36002"], - fleet.server_endpoints()) + self.assertEqual( + "127.0.0.1:36001,127.0.0.2:36002", + fleet.server_endpoints(to_string=True), + ) + self.assertEqual( + ["127.0.0.1:36001", "127.0.0.2:36002"], fleet.server_endpoints() + ) def test_is_server(self): os.environ["TRAINING_ROLE"] = "PSERVER" @@ -139,14 +143,15 @@ class TestFleetBase(unittest.TestCase): def test_exception(self): import paddle.distributed.fleet as fleet + self.assertRaises(Exception, fleet.init_worker) class TestFleetDygraph(unittest.TestCase): - def setUp(self): os.environ[ - "PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36213,127.0.0.1:36214" + "PADDLE_TRAINER_ENDPOINTS" + ] = "127.0.0.1:36213,127.0.0.1:36214" os.environ["PADDLE_CURRENT_ENDPOINTS"] = "127.0.0.1:36213" os.environ["PADDLE_TRAINERS_NUM"] = "2" os.environ["PADDLE_TRAINER_ID"] = "0" @@ -156,8 +161,9 @@ class TestFleetDygraph(unittest.TestCase): value = np.arange(26).reshape(2, 13).astype("float32") a = fluid.dygraph.to_variable(value) layer = paddle.nn.Linear(13, 5) - adam = paddle.optimizer.Adam(learning_rate=0.01, - parameters=layer.parameters()) + adam = paddle.optimizer.Adam( + learning_rate=0.01, parameters=layer.parameters() + ) # remove init cause this UT cannot launch distributed task adam = fleet.distributed_optimizer(adam) try: @@ -169,7 +175,7 @@ class TestFleetDygraph(unittest.TestCase): lr = 0.001 adam.set_lr(lr) cur_lr = adam.get_lr() - assert (lr == cur_lr) + assert lr == cur_lr state_dict = adam.state_dict() adam.set_state_dict(state_dict) @@ -177,22 +183,20 @@ class TestFleetDygraph(unittest.TestCase): class TestFleetBaseSingleError(unittest.TestCase): - def setUp(self): os.environ.pop("PADDLE_TRAINER_ENDPOINTS") def gen_data(self): return { "x": np.random.random(size=(128, 32)).astype('float32'), - "y": np.random.randint(2, size=(128, 1)).astype('int64') + "y": np.random.randint(2, size=(128, 1)).astype('int64'), } def test_single_run_collective_minimize(self): - def test_single_error(): - input_x = paddle.static.data(name="x", - shape=[-1, 32], - dtype='float32') + input_x = paddle.static.data( + name="x", shape=[-1, 32], dtype='float32' + ) input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') fc_1 = fluid.layers.fc(input=input_x, size=64, act='tanh') @@ -202,8 +206,10 @@ class TestFleetBaseSingleError(unittest.TestCase): fleet.init(is_collective=True) # in non_distributed mode(use `python` to launch), raise error if has multi cards - if fluid.core.is_compiled_with_cuda( - ) and fluid.core.get_cuda_device_count() > 1: + if ( + fluid.core.is_compiled_with_cuda() + and fluid.core.get_cuda_device_count() > 1 + ): self.assertRaises(ValueError, test_single_error) else: test_single_error() diff --git a/python/paddle/fluid/tests/unittests/test_fleet_base_2.py b/python/paddle/fluid/tests/unittests/test_fleet_base_2.py index ed914d2866510795c172df5fb76521d6e80c3942..084718c5407a338aa05d4c6bc47b0a433e8f19fb 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_base_2.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_base_2.py @@ -23,12 +23,11 @@ import paddle.fluid as fluid class TestFleetBase(unittest.TestCase): - def setUp(self): os.environ["POD_IP"] = "127.0.0.1" os.environ["PADDLE_PORT"] = "36000" os.environ["PADDLE_TRAINERS_NUM"] = "1" - #os.environ["PADDLE_PSERVERS_IP_PORT_LIST"] = \ + # os.environ["PADDLE_PSERVERS_IP_PORT_LIST"] = \ # "127.0.0.1:36001,127.0.0.2:36001" def test_ps_minimize(self): @@ -38,23 +37,24 @@ class TestFleetBase(unittest.TestCase): os.environ["TRAINING_ROLE"] = "TRAINER" os.environ["PADDLE_TRAINER_ID"] = "1" - input_x = paddle.fluid.layers.data(name="x", - shape=[32], - dtype='float32') - input_slot = paddle.fluid.layers.data(name="slot", - shape=[1], - dtype='int64') + input_x = paddle.fluid.layers.data( + name="x", shape=[32], dtype='float32' + ) + input_slot = paddle.fluid.layers.data( + name="slot", shape=[1], dtype='int64' + ) input_y = paddle.fluid.layers.data(name="y", shape=[1], dtype='int64') - emb = paddle.fluid.layers.embedding(input=input_slot, - size=[10, 9], - is_sparse=True) + emb = paddle.fluid.layers.embedding( + input=input_slot, size=[10, 9], is_sparse=True + ) input_x = paddle.concat(x=[input_x, emb], axis=1) fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh') fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') prediction = paddle.fluid.layers.fc(input=[fc_2], size=2, act='softmax') - cost = paddle.fluid.layers.cross_entropy(input=prediction, - label=input_y) + cost = paddle.fluid.layers.cross_entropy( + input=prediction, label=input_y + ) avg_cost = paddle.mean(x=cost) role = fleet.PaddleCloudRoleMaker(is_collective=False) @@ -73,16 +73,17 @@ class TestFleetBase(unittest.TestCase): exe.run(paddle.static.default_startup_program()) pe = fluid.ParallelExecutor(use_cuda=False, loss_name=avg_cost.name) compiled_prog = fluid.compiler.CompiledProgram( - fluid.default_main_program()) + fluid.default_main_program() + ) temp_dir = tempfile.TemporaryDirectory() fleet.init_worker() - fleet.fleet.save(dirname=temp_dir.name, - feed=['x', 'y'], - fetch=[avg_cost]) - fleet.fleet.save(dirname=temp_dir.name, - feed=[input_x, input_y], - fetch=[avg_cost]) + fleet.fleet.save( + dirname=temp_dir.name, feed=['x', 'y'], fetch=[avg_cost] + ) + fleet.fleet.save( + dirname=temp_dir.name, feed=[input_x, input_y], fetch=[avg_cost] + ) fleet.fleet.save(dirname=temp_dir.name) fleet.load_model(path=temp_dir.name, mode=0) diff --git a/python/paddle/fluid/tests/unittests/test_fleet_base_3.py b/python/paddle/fluid/tests/unittests/test_fleet_base_3.py index 40ee3836dd27d4da5cc782164ee2ad736cd1db02..d9014b2c6f858a5b747c94e0e9e9de26df033d77 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_base_3.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_base_3.py @@ -22,25 +22,26 @@ paddle.enable_static() class TestFleetBase_1(unittest.TestCase): - def setUp(self): os.environ["POD_IP"] = "127.0.0.1" os.environ["PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001" os.environ["PADDLE_TRAINERS_NUM"] = "2" - os.environ["PADDLE_PSERVERS_IP_PORT_LIST"] = \ - "127.0.0.1:36001,127.0.0.2:36001" + os.environ[ + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:36001,127.0.0.2:36001" def test_collective_minimize(self): - input_x = paddle.fluid.layers.data(name="x", - shape=[32], - dtype='float32') + input_x = paddle.fluid.layers.data( + name="x", shape=[32], dtype='float32' + ) input_y = paddle.fluid.layers.data(name="y", shape=[1], dtype='int64') fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh') fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') prediction = paddle.fluid.layers.fc(input=[fc_2], size=2, act='softmax') - cost = paddle.fluid.layers.cross_entropy(input=prediction, - label=input_y) + cost = paddle.fluid.layers.cross_entropy( + input=prediction, label=input_y + ) avg_cost = paddle.mean(x=cost) role = role_maker.PaddleCloudRoleMaker(is_collective=True) @@ -52,25 +53,26 @@ class TestFleetBase_1(unittest.TestCase): class TestFleetBase(unittest.TestCase): - def setUp(self): os.environ["POD_IP"] = "127.0.0.1" os.environ["PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001" os.environ["PADDLE_TRAINERS_NUM"] = "2" - os.environ["PADDLE_PSERVERS_IP_PORT_LIST"] = \ - "127.0.0.1:36001,127.0.0.2:36001" + os.environ[ + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:36001,127.0.0.2:36001" def test_fleet_get_applied_optimizer(self): - input_x = paddle.fluid.layers.data(name="x", - shape=[32], - dtype='float32') + input_x = paddle.fluid.layers.data( + name="x", shape=[32], dtype='float32' + ) input_y = paddle.fluid.layers.data(name="y", shape=[1], dtype='int64') fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh') fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') prediction = paddle.fluid.layers.fc(input=[fc_2], size=2, act='softmax') - cost = paddle.fluid.layers.cross_entropy(input=prediction, - label=input_y) + cost = paddle.fluid.layers.cross_entropy( + input=prediction, label=input_y + ) avg_cost = paddle.mean(x=cost) fleet.init(is_collective=True) diff --git a/python/paddle/fluid/tests/unittests/test_fleet_base_4.py b/python/paddle/fluid/tests/unittests/test_fleet_base_4.py index 5d70193212cf953786b51e57ecd34bf150ee2397..986a48162b28c3f52b9f763ca76538315e8343af 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_base_4.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_base_4.py @@ -21,13 +21,13 @@ paddle.enable_static() class TestFleetBase(unittest.TestCase): - def setUp(self): os.environ["POD_IP"] = "127.0.0.1" os.environ["PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001" os.environ["PADDLE_TRAINERS_NUM"] = "2" - os.environ["PADDLE_PSERVERS_IP_PORT_LIST"] = \ - "127.0.0.1:36001,127.0.0.2:36001" + os.environ[ + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:36001,127.0.0.2:36001" def test_fleet_init(self): diff --git a/python/paddle/fluid/tests/unittests/test_fleet_base_single.py b/python/paddle/fluid/tests/unittests/test_fleet_base_single.py index 776a286d97de4fa88363d70f51c293bd43e41394..13318d2d3df5caa9df98e5b6c406a8b5452df0a1 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_base_single.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_base_single.py @@ -28,7 +28,6 @@ import paddle.nn as nn class LinearNet(nn.Layer): - def __init__(self): super(LinearNet, self).__init__() self._linear1 = nn.Linear(10, 10) @@ -39,7 +38,6 @@ class LinearNet(nn.Layer): class TestFleetDygraphSingle(unittest.TestCase): - def setUp(self): os.environ["PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36213" os.environ["PADDLE_CURRENT_ENDPOINTS"] = "127.0.0.1:36213" @@ -52,8 +50,9 @@ class TestFleetDygraphSingle(unittest.TestCase): layer = LinearNet() loss_fn = nn.MSELoss() - adam = paddle.optimizer.Adam(learning_rate=0.001, - parameters=layer.parameters()) + adam = paddle.optimizer.Adam( + learning_rate=0.001, parameters=layer.parameters() + ) adam = fleet.distributed_optimizer(adam) dp_layer = fleet.distributed_model(layer) @@ -68,14 +67,13 @@ class TestFleetDygraphSingle(unittest.TestCase): class TestFleetBaseSingleRunCollective(unittest.TestCase): - def setUp(self): pass def gen_data(self): return { "x": np.random.random(size=(128, 32)).astype('float32'), - "y": np.random.randint(2, size=(128, 1)).astype('int64') + "y": np.random.randint(2, size=(128, 1)).astype('int64'), } def test_single_run_collective_minimize(self): @@ -93,8 +91,11 @@ class TestFleetBaseSingleRunCollective(unittest.TestCase): optimizer = fleet.distributed_optimizer(optimizer) optimizer.minimize(avg_cost) - place = fluid.CUDAPlace( - 0) if paddle.fluid.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if paddle.fluid.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) exe.run(paddle.static.default_startup_program()) @@ -105,14 +106,13 @@ class TestFleetBaseSingleRunCollective(unittest.TestCase): class TestFleetBaseSingleRunPS(unittest.TestCase): - def setUp(self): pass def gen_data(self): return { "x": np.random.random(size=(128, 32)).astype('float32'), - "y": np.random.randint(2, size=(128, 1)).astype('int64') + "y": np.random.randint(2, size=(128, 1)).astype('int64'), } def test_single_run_ps_minimize(self): @@ -139,11 +139,15 @@ class TestFleetBaseSingleRunPS(unittest.TestCase): exe.run(paddle.static.default_startup_program()) step = 10 for i in range(step): - cost_val = exe.run(program=fluid.default_main_program(), - feed=self.gen_data(), - fetch_list=[avg_cost.name]) - print("worker_index: %d, step%d cost = %f" % - (fleet.worker_index(), i, cost_val[0])) + cost_val = exe.run( + program=fluid.default_main_program(), + feed=self.gen_data(), + fetch_list=[avg_cost.name], + ) + print( + "worker_index: %d, step%d cost = %f" + % (fleet.worker_index(), i, cost_val[0]) + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_fleet_elastic_collective.py b/python/paddle/fluid/tests/unittests/test_fleet_elastic_collective.py index 8d31fc4efb50c8fdde0470cbdba95f97149db668..2d9a9268ee3fa17ca68fe2080472bb6961795c93 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_elastic_collective.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_elastic_collective.py @@ -25,11 +25,11 @@ print("test") class TestCollectiveLauncher(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() - self.code_path = os.path.join(self.temp_dir.name, - "fake_python_for_elastic.py") + self.code_path = os.path.join( + self.temp_dir.name, "fake_python_for_elastic.py" + ) with open(self.code_path, "w") as f: f.write(fake_python_code) @@ -37,7 +37,6 @@ class TestCollectiveLauncher(unittest.TestCase): self.temp_dir.cleanup() def test_launch(self): - class Argument: elastic_server = "127.0.0.1:2379" job_id = "test_job_id_123" @@ -76,7 +75,6 @@ class TestCollectiveLauncher(unittest.TestCase): pass def test_stop(self): - class Argument: elastic_server = "127.0.0.1:2379" job_id = "test_job_id_123" diff --git a/python/paddle/fluid/tests/unittests/test_fleet_elastic_init.py b/python/paddle/fluid/tests/unittests/test_fleet_elastic_init.py index e32f74ffb14591a5c5b703e1a47cb433e27de7b5..5bd3859013cf9aa5cc9621eacfcf40e17cbe07ba 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_elastic_init.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_elastic_init.py @@ -19,9 +19,7 @@ from paddle.distributed.fleet.launch_utils import DistributeMode class TestElasticInit(unittest.TestCase): - def setUp(self): - class Argument: elastic_server = "127.0.0.1:2379" job_id = "test_job_id_123" diff --git a/python/paddle/fluid/tests/unittests/test_fleet_elastic_manager.py b/python/paddle/fluid/tests/unittests/test_fleet_elastic_manager.py index 3c07edc78037eb85c7efa3d3312a8ec38debb64c..a4ad3e1a7c4ea61f983d4d4c1a3d0c5d0950ecb6 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_elastic_manager.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_elastic_manager.py @@ -17,17 +17,17 @@ import unittest from paddle.distributed.fleet.elastic.manager import ElasticManager from paddle.distributed.fleet.elastic.manager import LauncherInterface -from paddle.distributed.fleet.elastic.manager import ELASTIC_AUTO_PARALLEL_EXIT_CODE +from paddle.distributed.fleet.elastic.manager import ( + ELASTIC_AUTO_PARALLEL_EXIT_CODE, +) -class MockLease(): - +class MockLease: def refresh(self): pass class MockEtcdClient: - def __init__(self, lease=None): self._lease = lease @@ -66,12 +66,10 @@ class MockEtcdClient: class TestElasticManager(unittest.TestCase): - def setUp(self): self.etcd_client = MockEtcdClient() def test_elastic_manager_init(self): - class Argument: elastic_server = "127.0.0.1:2379" job_id = "test_job_id_123" @@ -87,8 +85,7 @@ class TestElasticManager(unittest.TestCase): args = Argument() - class _MockLease(): - + class _MockLease: def refresh(self): raise ValueError("valid error, this only for unittest") @@ -96,7 +93,6 @@ class TestElasticManager(unittest.TestCase): elastic = ElasticManager(args, etcd_client=etcd_client) def test_match_faulttolerance(self): - class Argument: elastic_server = "127.0.0.1:2379" job_id = "test_job_id_123" @@ -117,7 +113,8 @@ class TestElasticManager(unittest.TestCase): hosts = ["10.10.10.1:6001", "10.10.10.2:6001"] os.environ[ - 'PADDLE_TRAINER_ENDPOINTS'] = "10.10.10.1:6001,10.10.10.2:6001" + 'PADDLE_TRAINER_ENDPOINTS' + ] = "10.10.10.1:6001,10.10.10.2:6001" self.assertEqual(elastic._match(hosts), True) @@ -127,7 +124,6 @@ class TestElasticManager(unittest.TestCase): self.assertEqual(elastic._match(hosts), False) def test_match_elastic(self): - class Argument: elastic_server = "127.0.0.1:2379" job_id = "test_job_id_123" @@ -146,16 +142,20 @@ class TestElasticManager(unittest.TestCase): args.ips = "10.10.10.1,10.10.10.2,10.10.10.3,10.10.10.4" os.environ['FLAGS_START_PORT'] = "6001" os.environ[ - 'DISTRIBUTED_TRAINER_ENDPOINTS'] = "10.10.10.1:6001,10.10.10.2:6001,10.10.10.3:6001,10.10.10.4:6001" + 'DISTRIBUTED_TRAINER_ENDPOINTS' + ] = "10.10.10.1:6001,10.10.10.2:6001,10.10.10.3:6001,10.10.10.4:6001" os.environ[ - 'PADDLE_TRAINER_ENDPOINTS'] = "10.10.10.1:6001,10.10.10.2:6001,10.10.10.3:6001,10.10.10.4:6001" + 'PADDLE_TRAINER_ENDPOINTS' + ] = "10.10.10.1:6001,10.10.10.2:6001,10.10.10.3:6001,10.10.10.4:6001" elastic = ElasticManager(args, self.etcd_client) hosts = ["10.10.10.1:6001", "10.10.10.2:6001"] self.assertEqual(elastic._match(hosts), False) hosts = [ - "10.10.10.1:6001", "10.10.10.2:6001", "10.10.10.3:6001", - "10.10.10.4:6001" + "10.10.10.1:6001", + "10.10.10.2:6001", + "10.10.10.3:6001", + "10.10.10.4:6001", ] self.assertEqual(elastic._match(hosts), True) @@ -167,19 +167,20 @@ class TestElasticManager(unittest.TestCase): args.ips = "10.10.10.1,10.10.10.2" os.environ[ - 'DISTRIBUTED_TRAINER_ENDPOINTS'] = "10.10.10.1:6001,10.10.10.2:6001" + 'DISTRIBUTED_TRAINER_ENDPOINTS' + ] = "10.10.10.1:6001,10.10.10.2:6001" os.environ[ - 'PADDLE_TRAINER_ENDPOINTS'] = "10.10.10.1:6001,10.10.10.2:6001" + 'PADDLE_TRAINER_ENDPOINTS' + ] = "10.10.10.1:6001,10.10.10.2:6001" elastic = ElasticManager(args, self.etcd_client) hosts = ["10.10.10.1:6001", "10.10.10.2:6001"] self.assertEqual(elastic._match(hosts), True) # TODO test timeout - #time.sleep(60) - #self.assertEqual(elastic._match(hosts), True) + # time.sleep(60) + # self.assertEqual(elastic._match(hosts), True) def test_update_hosts_for_faulttolerance(self): - class Argument: elastic_server = "127.0.0.1:2379" job_id = "test_job_id_123" @@ -198,9 +199,11 @@ class TestElasticManager(unittest.TestCase): os.environ['PADDLE_ELASTIC_NP'] = "2" os.environ['PADDLE_TRAINERS'] = "10.10.10.1,10.10.10.2" os.environ[ - 'DISTRIBUTED_TRAINER_ENDPOINTS'] = "10.10.10.1:6001,10.10.10.2:6001" + 'DISTRIBUTED_TRAINER_ENDPOINTS' + ] = "10.10.10.1:6001,10.10.10.2:6001" os.environ[ - 'PADDLE_TRAINER_ENDPOINTS'] = "10.10.10.1:6001,10.10.10.2:6001" + 'PADDLE_TRAINER_ENDPOINTS' + ] = "10.10.10.1:6001,10.10.10.2:6001" elastic = ElasticManager(args, self.etcd_client) # add 10.10.10.3:6001 os.environ['PADDLE_TRAINER_ID'] = "0" @@ -244,30 +247,38 @@ class TestElasticManager(unittest.TestCase): os.environ['FLAGS_START_PORT'] = "6001" os.environ['PADDLE_TRAINERS'] = "10.10.10.1,10.10.10.2" os.environ[ - 'DISTRIBUTED_TRAINER_ENDPOINTS'] = "10.10.10.1:6001,10.10.10.2:6001" + 'DISTRIBUTED_TRAINER_ENDPOINTS' + ] = "10.10.10.1:6001,10.10.10.2:6001" os.environ[ - 'PADDLE_TRAINER_ENDPOINTS'] = "10.10.10.1:6001,10.10.10.2:6001" + 'PADDLE_TRAINER_ENDPOINTS' + ] = "10.10.10.1:6001,10.10.10.2:6001" elastic = ElasticManager(args, self.etcd_client) # add 10.10.10.3:6001 elastic.curr_host = "10.10.10.1:6001" elastic.hosts = [ - "10.10.10.1:6001", "10.10.10.2:6001", "10.10.10.3:6001" + "10.10.10.1:6001", + "10.10.10.2:6001", + "10.10.10.3:6001", ] elastic._update_hosts() - #self.assertEqual(elastic.all_host_endpoints, + # self.assertEqual(elastic.all_host_endpoints, # ["10.10.10.1:6001", "10.10.10.2:6001", "10.10.10.3:6001"]) - self.assertEqual(os.getenv('PADDLE_TRAINERS'), - "10.10.10.1,10.10.10.2,10.10.10.3") + self.assertEqual( + os.getenv('PADDLE_TRAINERS'), "10.10.10.1,10.10.10.2,10.10.10.3" + ) ####################### # elastic, scale in # ####################### os.environ[ - 'PADDLE_TRAINERS'] = "10.10.10.0,10.10.10.1,10.10.10.2,10.10.10.3" + 'PADDLE_TRAINERS' + ] = "10.10.10.0,10.10.10.1,10.10.10.2,10.10.10.3" os.environ[ - 'DISTRIBUTED_TRAINER_ENDPOINTS'] = "10.10.10.0:6000,10.10.10.1:6001,10.10.10.2:6001,10.10.10.3:6001" + 'DISTRIBUTED_TRAINER_ENDPOINTS' + ] = "10.10.10.0:6000,10.10.10.1:6001,10.10.10.2:6001,10.10.10.3:6001" os.environ[ - 'PADDLE_TRAINER_ENDPOINTS'] = "10.10.10.0:6000,10.10.10.1:6001,10.10.10.2:6001,10.10.10.3:6001" + 'PADDLE_TRAINER_ENDPOINTS' + ] = "10.10.10.0:6000,10.10.10.1:6001,10.10.10.2:6001,10.10.10.3:6001" os.environ['POD_IP'] = "10.10.10.1" os.environ['TRAINER_PORTS_NUM'] = "4" os.environ['PADDLE_TRAINER_ID'] = "1" @@ -277,23 +288,31 @@ class TestElasticManager(unittest.TestCase): # remove 10.10.10.1:6001 elastic.curr_host = "10.10.10.1:6001" elastic.hosts = [ - "10.10.10.1:6001", "10.10.10.2:6001", "10.10.10.3:6001" + "10.10.10.1:6001", + "10.10.10.2:6001", + "10.10.10.3:6001", ] elastic._update_hosts() - #self.assertEqual(elastic.all_host_endpoints, + # self.assertEqual(elastic.all_host_endpoints, # ["10.10.10.3:6001", "10.10.10.1:6001", "10.10.10.2:6001"]) - self.assertEqual(os.getenv('PADDLE_TRAINERS'), - "10.10.10.3,10.10.10.1,10.10.10.2") - self.assertEqual(os.getenv('DISTRIBUTED_TRAINER_ENDPOINTS'), - "10.10.10.3:6001,10.10.10.1:6001,10.10.10.2:6001") + self.assertEqual( + os.getenv('PADDLE_TRAINERS'), "10.10.10.3,10.10.10.1,10.10.10.2" + ) + self.assertEqual( + os.getenv('DISTRIBUTED_TRAINER_ENDPOINTS'), + "10.10.10.3:6001,10.10.10.1:6001,10.10.10.2:6001", + ) ############ os.environ[ - 'PADDLE_TRAINERS'] = "10.10.10.1,10.10.10.1,10.10.10.1,10.10.10.1" + 'PADDLE_TRAINERS' + ] = "10.10.10.1,10.10.10.1,10.10.10.1,10.10.10.1" os.environ[ - 'DISTRIBUTED_TRAINER_ENDPOINTS'] = "10.10.10.1:6001,10.10.10.1:6002,10.10.10.1:6003,10.10.10.1:6004" + 'DISTRIBUTED_TRAINER_ENDPOINTS' + ] = "10.10.10.1:6001,10.10.10.1:6002,10.10.10.1:6003,10.10.10.1:6004" os.environ[ - 'PADDLE_TRAINER_ENDPOINTS'] = "10.10.10.1:6001,10.10.10.1:6002,10.10.10.1:6003,10.10.10.1:6004" + 'PADDLE_TRAINER_ENDPOINTS' + ] = "10.10.10.1:6001,10.10.10.1:6002,10.10.10.1:6003,10.10.10.1:6004" os.environ['POD_IP'] = "10.10.10.1" os.environ['TRAINER_PORTS_NUM'] = "4" os.environ['PADDLE_PORT'] = "6001" @@ -304,14 +323,15 @@ class TestElasticManager(unittest.TestCase): os.environ['PADDLE_TRAINER_ID'] = "-1" elastic.hosts = ["10.10.10.1:6001", "10.10.10.1:6003"] elastic._update_hosts() - #self.assertEqual(elastic.all_host_endpoints, + # self.assertEqual(elastic.all_host_endpoints, # ["10.10.10.1:6001", "10.10.10.1:6001"]) self.assertEqual(os.getenv('PADDLE_TRAINERS'), "10.10.10.1,10.10.10.1") - self.assertEqual(os.getenv('DISTRIBUTED_TRAINER_ENDPOINTS'), - "10.10.10.1:6001,10.10.10.1:6003") + self.assertEqual( + os.getenv('DISTRIBUTED_TRAINER_ENDPOINTS'), + "10.10.10.1:6001,10.10.10.1:6003", + ) def test_exit(self): - class Argument: elastic_server = "127.0.0.1:2379" job_id = "test_job_id_123" @@ -330,7 +350,6 @@ class TestElasticManager(unittest.TestCase): elastic.exit() def test_pre_hook(self): - class Argument: elastic_server = "127.0.0.1:2379" job_id = "test_job_id_123" @@ -353,7 +372,6 @@ class TestElasticManager(unittest.TestCase): elastic.pre_hook() def test_watch(self): - class Argument: elastic_server = "127.0.0.1:2379" job_id = "test_job_id_123" @@ -369,7 +387,6 @@ class TestElasticManager(unittest.TestCase): elastic_pre_hook = None class ElasticLauncher: - def watch(self): return ELASTIC_AUTO_PARALLEL_EXIT_CODE @@ -383,14 +400,11 @@ class TestElasticManager(unittest.TestCase): elastic.watch() def test_launcher_interface_check_procs(self): - class Proc: - def poll(self): return ELASTIC_AUTO_PARALLEL_EXIT_CODE class ProcList: - def __init__(self): self.proc = Proc() diff --git a/python/paddle/fluid/tests/unittests/test_fleet_exe_dist_model_run.py b/python/paddle/fluid/tests/unittests/test_fleet_exe_dist_model_run.py index 9bde09ab7243b54e4ba56dcdcbc3084b66e0e270..4e37988f7673eee94776f3eea9edbd1d3c9600f1 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_exe_dist_model_run.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_exe_dist_model_run.py @@ -23,7 +23,6 @@ paddle.enable_static() class TestDistModelRun(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -34,8 +33,9 @@ class TestDistModelRun(unittest.TestCase): def test_dist_model_run(self): # step 0: declare folder to save the model and params - path_prefix = os.path.join(self.temp_dir.name, - "dist_model_run_test/inf") + path_prefix = os.path.join( + self.temp_dir.name, "dist_model_run_test/inf" + ) # step 1: saving the inference model and params x = paddle.static.data(name='x', shape=[28, 28], dtype='float32') @@ -47,12 +47,11 @@ class TestDistModelRun(unittest.TestCase): exe.run(paddle.static.default_startup_program()) x_data = np.random.randn(28, 28).astype('float32') y_data = np.random.randint(0, 9, size=[28, 1]).astype('int64') - exe.run(paddle.static.default_main_program(), - feed={ - 'x': x_data, - 'y': y_data - }, - fetch_list=[avg_loss]) + exe.run( + paddle.static.default_main_program(), + feed={'x': x_data, 'y': y_data}, + fetch_list=[avg_loss], + ) paddle.static.save_inference_model(path_prefix, [x, y], [avg_loss], exe) print('save model to', path_prefix) @@ -74,21 +73,23 @@ class TestDistModelRun(unittest.TestCase): print("dist model rst:", dist_model_rst) # step 4: use framework's api to inference with fake data - [inference_program, feed_target_names, - fetch_targets] = (paddle.static.load_inference_model(path_prefix, exe)) - results = exe.run(inference_program, - feed={ - 'x': x_tensor, - 'y': y_tensor - }, - fetch_list=fetch_targets) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = paddle.static.load_inference_model(path_prefix, exe) + results = exe.run( + inference_program, + feed={'x': x_tensor, 'y': y_tensor}, + fetch_list=fetch_targets, + ) load_inference_model_rst = results[0] print("load inference model api rst:", load_inference_model_rst) # step 5: compare two results - np.testing.assert_allclose(dist_model_rst, - load_inference_model_rst, - rtol=1e-05) + np.testing.assert_allclose( + dist_model_rst, load_inference_model_rst, rtol=1e-05 + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_fleet_exe_dist_model_tensor.py b/python/paddle/fluid/tests/unittests/test_fleet_exe_dist_model_tensor.py index 98affdfa540716d681e40f128df7f18cf642d139..85da732ab5bd6d7238c32ba146ef5fccefd138e1 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_exe_dist_model_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_exe_dist_model_tensor.py @@ -22,53 +22,67 @@ paddle.enable_static() class TestDistModelTensor(unittest.TestCase): - def test_dist_model_tensor(self): tensor_32 = np.random.randint(10, 20, size=[20, 2]).astype('int32') dist_tensor32 = DistModelTensor(tensor_32, '32_tensor') self.assertEqual(dist_tensor32.dtype, DistModelDataType.INT32) - self.assertEqual(dist_tensor32.data.tolist('int32'), - tensor_32.ravel().tolist()) + self.assertEqual( + dist_tensor32.data.tolist('int32'), tensor_32.ravel().tolist() + ) # the length is how many byte the data contains self.assertEqual(dist_tensor32.data.length(), 40 * 4) self.assertEqual(dist_tensor32.name, '32_tensor') dist_tensor32.data.reset(tensor_32) - self.assertEqual(dist_tensor32.as_ndarray().ravel().tolist(), - tensor_32.ravel().tolist()) + self.assertEqual( + dist_tensor32.as_ndarray().ravel().tolist(), + tensor_32.ravel().tolist(), + ) tensor_64 = np.random.randint(10, 20, size=[20, 2]).astype('int64') dist_tensor64 = DistModelTensor(tensor_64, '64_tensor') self.assertEqual(dist_tensor64.dtype, DistModelDataType.INT64) - self.assertEqual(dist_tensor64.data.tolist('int64'), - tensor_64.ravel().tolist()) + self.assertEqual( + dist_tensor64.data.tolist('int64'), tensor_64.ravel().tolist() + ) self.assertEqual(dist_tensor64.data.length(), 40 * 8) self.assertEqual(dist_tensor64.name, '64_tensor') dist_tensor64.data.reset(tensor_64) - self.assertEqual(dist_tensor64.as_ndarray().ravel().tolist(), - tensor_64.ravel().tolist()) + self.assertEqual( + dist_tensor64.as_ndarray().ravel().tolist(), + tensor_64.ravel().tolist(), + ) tensor_float = np.random.randn(20, 2).astype('float32') dist_tensor_float = DistModelTensor(tensor_float, 'float_tensor') self.assertEqual(dist_tensor_float.dtype, DistModelDataType.FLOAT32) - self.assertEqual(dist_tensor_float.data.tolist('float32'), - tensor_float.ravel().tolist()) + self.assertEqual( + dist_tensor_float.data.tolist('float32'), + tensor_float.ravel().tolist(), + ) self.assertEqual(dist_tensor_float.data.length(), 40 * 4) self.assertEqual(dist_tensor_float.name, 'float_tensor') dist_tensor_float.data.reset(tensor_float) - self.assertEqual(dist_tensor_float.as_ndarray().ravel().tolist(), - tensor_float.ravel().tolist()) + self.assertEqual( + dist_tensor_float.as_ndarray().ravel().tolist(), + tensor_float.ravel().tolist(), + ) tensor_float_16 = np.random.randn(20, 2).astype('float16') - dist_tensor_float_16 = DistModelTensor(tensor_float_16, - 'float_tensor_16') + dist_tensor_float_16 = DistModelTensor( + tensor_float_16, 'float_tensor_16' + ) self.assertEqual(dist_tensor_float_16.dtype, DistModelDataType.FLOAT16) - self.assertEqual(dist_tensor_float_16.data.tolist('float16'), - tensor_float_16.ravel().tolist()) + self.assertEqual( + dist_tensor_float_16.data.tolist('float16'), + tensor_float_16.ravel().tolist(), + ) self.assertEqual(dist_tensor_float_16.data.length(), 40 * 2) self.assertEqual(dist_tensor_float_16.name, 'float_tensor_16') dist_tensor_float_16.data.reset(tensor_float_16) - self.assertEqual(dist_tensor_float_16.as_ndarray().ravel().tolist(), - tensor_float_16.ravel().tolist()) + self.assertEqual( + dist_tensor_float_16.as_ndarray().ravel().tolist(), + tensor_float_16.ravel().tolist(), + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_fleet_executor.py b/python/paddle/fluid/tests/unittests/test_fleet_executor.py index f8469c8ed26be54716b8e2664a184af5ad3f31e6..502f03784cbcc368a2b635c69fe10d7e0d3ee887 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_executor.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_executor.py @@ -21,21 +21,21 @@ paddle.enable_static() class TestFleetExecutor(unittest.TestCase): - def fake_fleet_opt(self): # TODO: Fake for coverage will be removed in the future import paddle.distributed.fleet as fleet + strategy = fleet.DistributedStrategy() strategy.sharding_configs = { "dp_degree": 1, "mp_degree": 1, - "pp_degree": 1 + "pp_degree": 1, } strategy.pipeline_configs = {"accumulate_steps": 1} fleet_opt = { "dist_strategy": strategy.sharding_configs, "num_micro_batches": strategy.pipeline_configs["accumulate_steps"], - "scheduler": "1F1B" + "scheduler": "1F1B", } return fleet_opt @@ -43,12 +43,12 @@ class TestFleetExecutor(unittest.TestCase): exe = paddle.static.Executor(place) empty_program = paddle.static.Program() with fluid.program_guard(empty_program, empty_program): - x = fluid.layers.data(name='x', - shape=x_data.shape, - dtype=x_data.dtype) - y = fluid.layers.data(name='y', - shape=y_data.shape, - dtype=y_data.dtype) + x = fluid.layers.data( + name='x', shape=x_data.shape, dtype=x_data.dtype + ) + y = fluid.layers.data( + name='y', shape=y_data.shape, dtype=y_data.dtype + ) z = x + y a = 2 * x + 3 * y loss = paddle.mean(a) @@ -57,23 +57,24 @@ class TestFleetExecutor(unittest.TestCase): steps_per_pass = 10 bd = [steps_per_pass * p for p in passes] lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)] - lr_val = paddle.optimizer.lr.PiecewiseDecay(boundaries=bd, - values=lr) + lr_val = paddle.optimizer.lr.PiecewiseDecay( + boundaries=bd, values=lr + ) opt = paddle.optimizer.AdamW( learning_rate=lr_val, - grad_clip=fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0)) + grad_clip=fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0), + ) opt.minimize(loss) # TODO: section_program will be removed in the future empty_program._pipeline_opt = { "fleet_opt": self.fake_fleet_opt(), - "section_program": empty_program + "section_program": empty_program, } - res = exe.run(empty_program, - feed={ - 'x': x_data, - 'y': y_data - }, - fetch_list=[z.name, a.name]) + res = exe.run( + empty_program, + feed={'x': x_data, 'y': y_data}, + fetch_list=[z.name, a.name], + ) return res def test_executor_on_single_device(self): diff --git a/python/paddle/fluid/tests/unittests/test_fleet_executor_multi_devices.py b/python/paddle/fluid/tests/unittests/test_fleet_executor_multi_devices.py index c21549c3ce3348e545d0953b4c32c6a553742f7d..5c635e4ce480761d7cd790ff81459bc0c9b23956 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_executor_multi_devices.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_executor_multi_devices.py @@ -22,7 +22,6 @@ paddle.enable_static() class TestFleetExecutor(unittest.TestCase): - def run_fleet_executor(self, place, fleet_opt=dict()): exe = paddle.static.Executor(place) empty_program = paddle.static.Program() @@ -30,24 +29,25 @@ class TestFleetExecutor(unittest.TestCase): x = fluid.layers.data(name='x', shape=[1], dtype=paddle.float32) empty_program._pipeline_opt = { "fleet_opt": fleet_opt, - "section_program": empty_program + "section_program": empty_program, } exe.run(empty_program, feed={'x': [1]}) def test_dist_executor_on_multi_devices(self): os.environ["PADDLE_TRAINER_ID"] = "0" os.environ[ - "PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:7000,127.0.0.1:7001,127.0.0.1:7002,127.0.0.1:7003,127.0.0.1:7004,127.0.0.1:7005,127.0.0.1:7006,127.0.0.1:7007" + "PADDLE_TRAINER_ENDPOINTS" + ] = "127.0.0.1:7000,127.0.0.1:7001,127.0.0.1:7002,127.0.0.1:7003,127.0.0.1:7004,127.0.0.1:7005,127.0.0.1:7006,127.0.0.1:7007" strategy = fleet.DistributedStrategy() strategy.sharding_configs = { "dp_degree": 2, "mp_degree": 2, - "pp_degree": 2 + "pp_degree": 2, } strategy.pipeline_configs = {"accumulate_steps": 8} fleet_opt = { "dist_strategy": strategy.sharding_configs, - "num_micro_batches": strategy.pipeline_configs["accumulate_steps"] + "num_micro_batches": strategy.pipeline_configs["accumulate_steps"], } if fluid.is_compiled_with_cuda(): # TODO: Distribute test case is not supported for executor can not stop diff --git a/python/paddle/fluid/tests/unittests/test_fleet_executor_origin_scheduler.py b/python/paddle/fluid/tests/unittests/test_fleet_executor_origin_scheduler.py index 9a71937ddb25fc59626377c52bfc061a39345ecc..cda6be8f0a6be003554e69799651d1a9f9e254c9 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_executor_origin_scheduler.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_executor_origin_scheduler.py @@ -21,21 +21,21 @@ paddle.enable_static() class TestFleetExecutor(unittest.TestCase): - def fake_fleet_opt(self): # TODO: Fake for coverage will be removed in the future import paddle.distributed.fleet as fleet + strategy = fleet.DistributedStrategy() strategy.sharding_configs = { "dp_degree": 1, "mp_degree": 1, - "pp_degree": 1 + "pp_degree": 1, } strategy.pipeline_configs = {"accumulate_steps": 1} fleet_opt = { "dist_strategy": strategy.sharding_configs, "num_micro_batches": strategy.pipeline_configs["accumulate_steps"], - "scheduler": "Origin" + "scheduler": "Origin", } return fleet_opt @@ -43,12 +43,12 @@ class TestFleetExecutor(unittest.TestCase): exe = paddle.static.Executor(place) empty_program = paddle.static.Program() with fluid.program_guard(empty_program, empty_program): - x = fluid.layers.data(name='x', - shape=x_data.shape, - dtype=x_data.dtype) - y = fluid.layers.data(name='y', - shape=y_data.shape, - dtype=y_data.dtype) + x = fluid.layers.data( + name='x', shape=x_data.shape, dtype=x_data.dtype + ) + y = fluid.layers.data( + name='y', shape=y_data.shape, dtype=y_data.dtype + ) z = x + y a = 2 * x + 3 * y loss = paddle.mean(a) @@ -57,23 +57,24 @@ class TestFleetExecutor(unittest.TestCase): steps_per_pass = 10 bd = [steps_per_pass * p for p in passes] lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)] - lr_val = paddle.optimizer.lr.PiecewiseDecay(boundaries=bd, - values=lr) + lr_val = paddle.optimizer.lr.PiecewiseDecay( + boundaries=bd, values=lr + ) opt = paddle.optimizer.AdamW( learning_rate=lr_val, - grad_clip=fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0)) + grad_clip=fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0), + ) opt.minimize(loss) # TODO: section_program will be removed in the future empty_program._pipeline_opt = { "fleet_opt": self.fake_fleet_opt(), - "section_program": empty_program + "section_program": empty_program, } - res = exe.run(empty_program, - feed={ - 'x': x_data, - 'y': y_data - }, - fetch_list=[z.name, a.name]) + res = exe.run( + empty_program, + feed={'x': x_data, 'y': y_data}, + fetch_list=[z.name, a.name], + ) return res def test_executor_on_single_device(self): diff --git a/python/paddle/fluid/tests/unittests/test_fleet_executor_task_node.py b/python/paddle/fluid/tests/unittests/test_fleet_executor_task_node.py index 07ecf85c3db2eca2649920eefdbbb487d9330f20..03d6be10111f3d1ffc0702fd63d669bc20785c67 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_executor_task_node.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_executor_task_node.py @@ -21,7 +21,6 @@ paddle.enable_static() class TestFleetExecutorTaskNode(unittest.TestCase): - def test_task_node(self): program = paddle.static.Program() task_node_0 = core.TaskNode(program.desc, 0, 1, 1) @@ -31,16 +30,19 @@ class TestFleetExecutorTaskNode(unittest.TestCase): self.assertEqual(task_node_1.task_id(), 1) self.assertEqual(task_node_2.task_id(), 2) self.assertTrue( - task_node_0.add_downstream_task(task_node_1.task_id(), 1)) + task_node_0.add_downstream_task(task_node_1.task_id(), 1) + ) self.assertTrue(task_node_1.add_upstream_task(task_node_0.task_id(), 1)) def test_lazy_task_node(self): program = paddle.static.Program() - task = TaskNode(program=program, - rank=0, - max_run_times=1, - max_slot_times=1, - lazy_initialize=True) + task = TaskNode( + program=program, + rank=0, + max_run_times=1, + max_slot_times=1, + lazy_initialize=True, + ) task_node = task.task_node() diff --git a/python/paddle/fluid/tests/unittests/test_fleet_executor_utils.py b/python/paddle/fluid/tests/unittests/test_fleet_executor_utils.py index cbc54e8c70f5c3644f7e34abfd13054917cc2530..fb003f450f6e18aa07c1b0c49f18fc68e73d11b6 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_executor_utils.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_executor_utils.py @@ -20,25 +20,27 @@ paddle.enable_static() class TestFleetExecutorUtils(unittest.TestCase): - def test_construct_program(self): # TODO(liyurui): These functions are not ready now. strategy = paddle.distributed.fleet.DistributedStrategy() strategy.sharding_configs = { "dp_degree": 2, "mp_degree": 2, - "pp_degree": 2 + "pp_degree": 2, } fleet_executor_utils = FleetExecutorUtils( dist_strategy=strategy.sharding_configs, rank=0, nrank=1, - max_run_times=1) + max_run_times=1, + ) op_list = {"lr": [], "fwd": [], "bwd": [], "opt": []} program_map = fleet_executor_utils.convert_op_list_to_program( - op_list, paddle.static.Program()) + op_list, paddle.static.Program() + ) task_node_map = fleet_executor_utils.construct_task_nodes_1f1b( - program_map) + program_map + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_fleet_executor_with_task_nodes.py b/python/paddle/fluid/tests/unittests/test_fleet_executor_with_task_nodes.py index f80f998c047dc34164b11f0736a1acdb08391279..802ce0cbfe3c975bd2a96929d1d18670ae6dfdc7 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_executor_with_task_nodes.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_executor_with_task_nodes.py @@ -22,17 +22,16 @@ paddle.enable_static() class TestFleetExecutor(unittest.TestCase): - def run_fleet_executor(self, place, x_data, y_data): exe = paddle.static.Executor(place) empty_program = paddle.static.Program() with fluid.program_guard(empty_program, empty_program): - x = fluid.layers.data(name='x', - shape=x_data.shape, - dtype=x_data.dtype) - y = fluid.layers.data(name='y', - shape=y_data.shape, - dtype=y_data.dtype) + x = fluid.layers.data( + name='x', shape=x_data.shape, dtype=x_data.dtype + ) + y = fluid.layers.data( + name='y', shape=y_data.shape, dtype=y_data.dtype + ) z = x + y a = 2 * x + 3 * y loss = paddle.mean(a) @@ -41,11 +40,13 @@ class TestFleetExecutor(unittest.TestCase): steps_per_pass = 10 bd = [steps_per_pass * p for p in passes] lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)] - lr_val = paddle.optimizer.lr.PiecewiseDecay(boundaries=bd, - values=lr) + lr_val = paddle.optimizer.lr.PiecewiseDecay( + boundaries=bd, values=lr + ) opt = paddle.optimizer.AdamW( learning_rate=lr_val, - grad_clip=fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0)) + grad_clip=fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0), + ) opt.minimize(loss) # TODO: section_program will be removed in the future task_node = TaskNode( @@ -55,22 +56,20 @@ class TestFleetExecutor(unittest.TestCase): node_type="Compute", max_run_times=1, max_slot_times=1, - lazy_initialize=True) + lazy_initialize=True, + ) empty_program._pipeline_opt = { "fleet_opt": { 'tasks': [task_node], - 'task_id_to_rank': { - task_node.task_id(): 0 - } + 'task_id_to_rank': {task_node.task_id(): 0}, }, - "section_program": empty_program + "section_program": empty_program, } - res = exe.run(empty_program, - feed={ - 'x': x_data, - 'y': y_data - }, - fetch_list=[z.name, a.name]) + res = exe.run( + empty_program, + feed={'x': x_data, 'y': y_data}, + fetch_list=[z.name, a.name], + ) return res def test_executor_on_single_device(self): diff --git a/python/paddle/fluid/tests/unittests/test_fleet_gradient_scale.py b/python/paddle/fluid/tests/unittests/test_fleet_gradient_scale.py index 6aefb2926e40c88685fef77500b660001970a5b8..36e823a2bc92a39509d160852c6eb01ae45efe6c 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_gradient_scale.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_gradient_scale.py @@ -22,7 +22,6 @@ import os class TestGradientScale(unittest.TestCase): - def setUp(self): os.environ["PADDLE_TRAINER_ID"] = "0" os.environ["PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001" @@ -30,18 +29,19 @@ class TestGradientScale(unittest.TestCase): def mlp(self, input_x, input_y, hid_dim=128, label_dim=2): fc_1 = paddle.static.nn.fc(x=input_x, size=hid_dim, activation='tanh') fc_2 = paddle.static.nn.fc(x=fc_1, size=hid_dim, activation='tanh') - prediction = paddle.static.nn.fc(x=[fc_2], - size=label_dim, - activation='softmax') - cost = paddle.nn.functional.cross_entropy(input=prediction, - label=input_y) + prediction = paddle.static.nn.fc( + x=[fc_2], size=label_dim, activation='softmax' + ) + cost = paddle.nn.functional.cross_entropy( + input=prediction, label=input_y + ) avg_cost = paddle.mean(x=cost) return avg_cost def gen_data(self): return { "x": np.random.random(size=(128, 32)).astype('float32'), - "y": np.random.randint(2, size=(128, 1)).astype('int64') + "y": np.random.randint(2, size=(128, 1)).astype('int64'), } def test_single_gpu(self): @@ -53,16 +53,17 @@ class TestGradientScale(unittest.TestCase): strategy.gradient_scale_configs = {'scale_strategy': 'sum'} with fluid.program_guard(main_program, startup_program): with fluid.unique_name.guard(): - input_x = paddle.static.data(name="x", - shape=[None, 32], - dtype='float32') - input_y = paddle.static.data(name="y", - shape=[None, 1], - dtype='int64') + input_x = paddle.static.data( + name="x", shape=[None, 32], dtype='float32' + ) + input_y = paddle.static.data( + name="y", shape=[None, 1], dtype='int64' + ) cost = self.mlp(input_x=input_x, input_y=input_y) output_name = cost.name - optimizer = fleet.distributed_optimizer(fluid.optimizer.Adam(), - strategy) + optimizer = fleet.distributed_optimizer( + fluid.optimizer.Adam(), strategy + ) optimizer.minimize(cost) final_strategy = fleet._final_strategy() diff --git a/python/paddle/fluid/tests/unittests/test_fleet_metric.py b/python/paddle/fluid/tests/unittests/test_fleet_metric.py index f4af98e649db270845750455615d8bc9481a5671..c8fd7e0d2e529e8108b180da1faffeacf3544b94 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_metric.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_metric.py @@ -32,7 +32,6 @@ class TestFleetMetric(unittest.TestCase): """Set up, set envs.""" class FakeUtil(UtilBase): - def __init__(self, fake_fleet): super(FakeUtil, self).__init__() self.fleet = fake_fleet @@ -79,16 +78,20 @@ class TestFleetMetric(unittest.TestCase): train = fluid.Program() startup = fluid.Program() with fluid.program_guard(train, startup): - t = fluid.layers.create_global_var(shape=[1, 1], - value=1, - dtype='int64', - persistable=True, - force_cpu=True) - t1 = fluid.layers.create_global_var(shape=[1, 1], - value=1, - dtype='int64', - persistable=True, - force_cpu=True) + t = fluid.layers.create_global_var( + shape=[1, 1], + value=1, + dtype='int64', + persistable=True, + force_cpu=True, + ) + t1 = fluid.layers.create_global_var( + shape=[1, 1], + value=1, + dtype='int64', + persistable=True, + force_cpu=True, + ) place = fluid.CPUPlace() exe = fluid.Executor(place) scope = fluid.Scope() diff --git a/python/paddle/fluid/tests/unittests/test_fleet_nocvm_1.py b/python/paddle/fluid/tests/unittests/test_fleet_nocvm_1.py index 81daea3af9310cba658487f42db6ce5469e86195..8c92efe323650e7e3dad494042d121367c1db53e 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_nocvm_1.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_nocvm_1.py @@ -26,7 +26,8 @@ class TestFleet1(unittest.TestCase): """Set up, set envs.""" os.environ["PADDLE_TRAINERS_NUM"] = "2" os.environ[ - "PADDLE_PSERVERS_IP_PORT_LIST"] = "127.0.0.1:36001,127.0.0.2:36001" + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:36001,127.0.0.2:36001" def test_pslib_1(self): """Test cases for pslib.""" @@ -41,33 +42,48 @@ class TestFleet1(unittest.TestCase): os.environ["PADDLE_PSERVERS_IP_PORT_LIST"] = "127.0.0.1:36002" os.environ["PADDLE_TRAINER_ID"] = "0" role_maker = GeneralRoleMaker() - #role_maker.generate_role() + # role_maker.generate_role() place = fluid.CPUPlace() exe = fluid.Executor(place) - #fleet.init(role_maker) + # fleet.init(role_maker) train_program = fluid.Program() startup_program = fluid.Program() scope = fluid.Scope() with fluid.program_guard(train_program, startup_program): - show = fluid.layers.data(name="show", shape=[-1, 1], \ - dtype="int64", lod_level=1, append_batch_size=False) - emb = fluid.layers.embedding(input=show, size=[1, 1], \ - is_sparse=True, is_distributed=True, \ - param_attr=fluid.ParamAttr(name="embedding")) + show = fluid.layers.data( + name="show", + shape=[-1, 1], + dtype="int64", + lod_level=1, + append_batch_size=False, + ) + emb = fluid.layers.embedding( + input=show, + size=[1, 1], + is_sparse=True, + is_distributed=True, + param_attr=fluid.ParamAttr(name="embedding"), + ) fc = fluid.layers.fc(input=emb, size=1, act=None) - label = fluid.layers.data(name="click", shape=[-1, 1], \ - dtype="int64", lod_level=1, append_batch_size=False) + label = fluid.layers.data( + name="click", + shape=[-1, 1], + dtype="int64", + lod_level=1, + append_batch_size=False, + ) label_cast = fluid.layers.cast(label, dtype='float32') cost = fluid.layers.log_loss(fc, label_cast) try: adam = fluid.optimizer.Adam(learning_rate=0.000005) - adam = fleet.distributed_optimizer(adam, - strategy={ - "embedding": { - "sparse_accessor_class": - "DownpourCtrAccessor" - } - }) + adam = fleet.distributed_optimizer( + adam, + strategy={ + "embedding": { + "sparse_accessor_class": "DownpourCtrAccessor" + } + }, + ) adam.minimize([cost], [scope]) fleet.run_server() except: diff --git a/python/paddle/fluid/tests/unittests/test_fleet_ps.py b/python/paddle/fluid/tests/unittests/test_fleet_ps.py index 687c9a6eaf59e484db7a736f80df49c4c9d40af4..f5f7f81311cfc2db9d851385271e5cf37cd7ea9e 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_ps.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_ps.py @@ -14,15 +14,17 @@ import unittest from paddle.fluid.framework import default_main_program -from paddle.fluid.incubate.fleet.parameter_server.ir.pserver_pass import _get_optimizer_input_shape +from paddle.fluid.incubate.fleet.parameter_server.ir.pserver_pass import ( + _get_optimizer_input_shape, +) main_program = default_main_program() class TestFleetPS(unittest.TestCase): - def test_version(self): from paddle.fluid.incubate.fleet.parameter_server import version + transpiler = version.is_transpiler() self.assertEqual(transpiler, True) @@ -43,8 +45,9 @@ class TestFleetPS(unittest.TestCase): for attrs in optimizers: op_type, varkey, orig_shape, param_shape = attrs - new_shape = _get_optimizer_input_shape(op_type, varkey, orig_shape, - param_shape) + new_shape = _get_optimizer_input_shape( + op_type, varkey, orig_shape, param_shape + ) self.assertListEqual(new_shape, param_shape) optimizers = [] @@ -52,8 +55,9 @@ class TestFleetPS(unittest.TestCase): for attrs in optimizers: op_type, varkey, orig_shape, param_shape = attrs - new_shape = _get_optimizer_input_shape(op_type, varkey, orig_shape, - param_shape) + new_shape = _get_optimizer_input_shape( + op_type, varkey, orig_shape, param_shape + ) self.assertListEqual(new_shape, orig_shape) with self.assertRaises(ValueError): @@ -62,8 +66,9 @@ class TestFleetPS(unittest.TestCase): for attrs in optimizers: op_type, varkey, orig_shape, param_shape = attrs - _get_optimizer_input_shape(op_type, varkey, orig_shape, - param_shape) + _get_optimizer_input_shape( + op_type, varkey, orig_shape, param_shape + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_fleet_pyramid_hash.py b/python/paddle/fluid/tests/unittests/test_fleet_pyramid_hash.py index d22fc3a1b8c438d17f82f22c9f8333331737bfcc..fb4ed901d814bd6912c67e2532d904781f74574c 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_pyramid_hash.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_pyramid_hash.py @@ -15,12 +15,15 @@ import unittest import paddle.fluid as fluid import paddle.fluid.incubate.fleet.base.role_maker as role_maker -from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet -from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import StrategyFactory +from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import ( + fleet, +) +from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import ( + StrategyFactory, +) class TestPyramidHashOpApi(unittest.TestCase): - def test_dist_geo_server_transpiler(self): num_voc = 128 embed_dim = 64 @@ -49,7 +52,8 @@ class TestPyramidHashOpApi(unittest.TestCase): ), param_attr_bl=None, distribute_update_vars=["PyramidHash_emb_0"], - name=None) + name=None, + ) cost = fluid.layers.reduce_sum(hash_embd) @@ -57,7 +61,8 @@ class TestPyramidHashOpApi(unittest.TestCase): current_id=0, role=role_maker.Role.SERVER, worker_num=2, - server_endpoints=["127.0.0.1:36011", "127.0.0.1:36012"]) + server_endpoints=["127.0.0.1:36011", "127.0.0.1:36012"], + ) fleet.init(role) diff --git a/python/paddle/fluid/tests/unittests/test_fleet_rolemaker.py b/python/paddle/fluid/tests/unittests/test_fleet_rolemaker.py index 9824b59028fad29b7cafd2eec666aed0dfeca232..05732d3cc86199a1ffec7b110b6d1a0a1bda7e3a 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_rolemaker.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_rolemaker.py @@ -27,7 +27,8 @@ class TestCloudRoleMaker(unittest.TestCase): """Set up, set envs.""" os.environ["PADDLE_TRAINERS_NUM"] = "2" os.environ[ - "PADDLE_PSERVERS_IP_PORT_LIST"] = "127.0.0.1:36001,127.0.0.2:36001" + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:36001,127.0.0.2:36001" def test_tr_rolemaker(self): """Test tr rolenamer.""" @@ -70,20 +71,30 @@ class TestCloudRoleMaker(unittest.TestCase): os.environ["PADDLE_PSERVERS_IP_PORT_LIST"] = "127.0.0.1:36002" os.environ["PADDLE_TRAINER_ID"] = "0" role_maker = GeneralRoleMaker() - #print("init rolemaker") - #role_maker.generate_role() + # print("init rolemaker") + # role_maker.generate_role() place = fluid.CPUPlace() exe = fluid.Executor(place) - #fleet.init(role_maker) + # fleet.init(role_maker) train_program = fluid.Program() startup_program = fluid.Program() scope = fluid.Scope() with fluid.program_guard(train_program, startup_program): - show = fluid.layers.data(name="show", shape=[-1, 1], \ - dtype="float32", lod_level=1, append_batch_size=False) + show = fluid.layers.data( + name="show", + shape=[-1, 1], + dtype="float32", + lod_level=1, + append_batch_size=False, + ) fc = fluid.layers.fc(input=show, size=1, act=None) - label = fluid.layers.data(name="click", shape=[-1, 1], \ - dtype="int64", lod_level=1, append_batch_size=False) + label = fluid.layers.data( + name="click", + shape=[-1, 1], + dtype="int64", + lod_level=1, + append_batch_size=False, + ) label_cast = fluid.layers.cast(label, dtype='float32') cost = fluid.layers.log_loss(fc, label_cast) try: @@ -95,8 +106,10 @@ class TestCloudRoleMaker(unittest.TestCase): print("do not support pslib test, skip") return fleet.clear_one_table(0) - from paddle.fluid.incubate.fleet.base.role_maker import \ - MPISymetricRoleMaker + from paddle.fluid.incubate.fleet.base.role_maker import ( + MPISymetricRoleMaker, + ) + try: role = MPISymetricRoleMaker() role._all_reduce([1], [2]) diff --git a/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_2.py b/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_2.py index b60c43a209a14798fee4a791615fe4de7369e3a2..7d03b00fbdd8b6e6252766890b9bf7130b6da4ce 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_2.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_2.py @@ -34,7 +34,9 @@ class TestCloudRoleMaker2(unittest.TestCase): def test_pslib_2(self): """Test cases for pslib.""" import paddle.fluid as fluid - from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet + from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import ( + fleet, + ) from paddle.fluid.incubate.fleet.base.role_maker import GeneralRoleMaker from paddle.fluid.incubate.fleet.base.role_maker import RoleMakerBase @@ -58,11 +60,21 @@ class TestCloudRoleMaker2(unittest.TestCase): startup_program = fluid.Program() scope = fluid.Scope() with fluid.program_guard(train_program, startup_program): - show = fluid.layers.data(name="show", shape=[-1, 1], \ - dtype="float32", lod_level=1, append_batch_size=False) + show = fluid.layers.data( + name="show", + shape=[-1, 1], + dtype="float32", + lod_level=1, + append_batch_size=False, + ) fc = fluid.layers.fc(input=show, size=1, act=None) - label = fluid.layers.data(name="click", shape=[-1, 1], \ - dtype="int64", lod_level=1, append_batch_size=False) + label = fluid.layers.data( + name="click", + shape=[-1, 1], + dtype="int64", + lod_level=1, + append_batch_size=False, + ) label_cast = fluid.layers.cast(label, dtype='float32') cost = fluid.layers.log_loss(fc, label_cast) try: @@ -159,8 +171,9 @@ class TestCloudRoleMaker2(unittest.TestCase): role23._get_size() role23._get_size() - path = os.path.join(self.temp_dir.name, - "test_fleet_gloo_role_maker_1.txt") + path = os.path.join( + self.temp_dir.name, "test_fleet_gloo_role_maker_1.txt" + ) with open(path, "w") as f: data = "1 1 1 1\n" f.write(data) @@ -172,7 +185,7 @@ class TestCloudRoleMaker2(unittest.TestCase): dataset.get_memory_data_size(fleet) dataset.get_shuffle_data_size(fleet) - class TmpClass(): + class TmpClass: """ dummy tmp class """ @@ -262,13 +275,16 @@ class TestCloudRoleMaker2(unittest.TestCase): tmp.all_reduce_worker([], []) tmp.barrier_worker() from paddle.fluid.incubate.fleet.base.role_maker import GeneralRoleMaker + tmp = RoleMakerBase() tmp.all_gather(1) tmp.all_reduce_worker([], []) tmp.barrier_worker() tmp.barrier_all() - from paddle.fluid.incubate.fleet.base.role_maker import \ - MPISymetricRoleMaker + from paddle.fluid.incubate.fleet.base.role_maker import ( + MPISymetricRoleMaker, + ) + tmp1 = MPISymetricRoleMaker() tmp1.all_gather(1) tmp1.all_gather(1) diff --git a/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_3.py b/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_3.py index 2e639a8167984b53a6aca124953f9cb0be9c82d5..0ce1b0743aa92d278da9898e051a14b6a6edacf3 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_3.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_3.py @@ -26,7 +26,8 @@ class TestCloudRoleMaker(unittest.TestCase): """Set up, set envs.""" os.environ["PADDLE_TRAINERS_NUM"] = "2" os.environ[ - "PADDLE_PSERVERS_IP_PORT_LIST"] = "127.0.0.1:36001,127.0.0.2:36001" + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:36001,127.0.0.2:36001" def test_pslib_1(self): """Test cases for pslib.""" @@ -40,22 +41,34 @@ class TestCloudRoleMaker(unittest.TestCase): os.environ["PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001" os.environ["PADDLE_PSERVERS_IP_PORT_LIST"] = "127.0.0.1:36002" os.environ["PADDLE_TRAINER_ID"] = "0" - role_maker = GeneralRoleMaker(init_timeout_seconds=100, - run_timeout_seconds=100, - http_ip_port="127.0.0.1:36003") - #role_maker.generate_role() + role_maker = GeneralRoleMaker( + init_timeout_seconds=100, + run_timeout_seconds=100, + http_ip_port="127.0.0.1:36003", + ) + # role_maker.generate_role() place = fluid.CPUPlace() exe = fluid.Executor(place) - #fleet.init(role_maker) + # fleet.init(role_maker) train_program = fluid.Program() startup_program = fluid.Program() scope = fluid.Scope() with fluid.program_guard(train_program, startup_program): - show = fluid.layers.data(name="show", shape=[-1, 1], \ - dtype="float32", lod_level=1, append_batch_size=False) + show = fluid.layers.data( + name="show", + shape=[-1, 1], + dtype="float32", + lod_level=1, + append_batch_size=False, + ) fc = fluid.layers.fc(input=show, size=1, act=None) - label = fluid.layers.data(name="click", shape=[-1, 1], \ - dtype="int64", lod_level=1, append_batch_size=False) + label = fluid.layers.data( + name="click", + shape=[-1, 1], + dtype="int64", + lod_level=1, + append_batch_size=False, + ) label_cast = fluid.layers.cast(label, dtype='float32') cost = fluid.layers.log_loss(fc, label_cast) try: @@ -72,6 +85,7 @@ class TestCloudRoleMaker(unittest.TestCase): return from paddle.fluid.incubate.fleet.base.role_maker import MockBarrier + mb = MockBarrier() mb.barrier() mb.barrier_all() diff --git a/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_4.py b/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_4.py index 14d5277446bb0032cdafbaded2bda328bceeced0..2825a1232fc0556fbebab9c6a7889c84bab4a221 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_4.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_4.py @@ -26,11 +26,13 @@ class TestCloudRoleMaker(unittest.TestCase): """Set up, set envs.""" os.environ["PADDLE_TRAINERS_NUM"] = "2" os.environ[ - "PADDLE_PSERVERS_IP_PORT_LIST"] = "127.0.0.1:36001,127.0.0.2:36001" + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:36001,127.0.0.2:36001" def test_pslib_1(self): """Test cases for pslib.""" import threading + try: from paddle.distributed.fleet.utils.http_server import KVHandler from paddle.distributed.fleet.utils.http_server import KVServer @@ -39,7 +41,7 @@ class TestCloudRoleMaker(unittest.TestCase): print("warning: no fleet, skip test_pslib_4") return - class FakeStream(): + class FakeStream: """ it is a fake stream only for test. """ @@ -113,6 +115,7 @@ class TestCloudRoleMaker(unittest.TestCase): fake end header, it will do nothing. """ pass + except: print("warning: no KVHandler, skip test_pslib_4") return @@ -130,6 +133,7 @@ class TestCloudRoleMaker(unittest.TestCase): self.delete_kv = {} self.kv_lock = threading.Lock() self.kv = {} + except: print("warning: no KVHTTPServer, skip test_pslib_4") return @@ -147,6 +151,7 @@ class TestCloudRoleMaker(unittest.TestCase): self.listen_thread = None self.size = {} self.size["a"] = 999 + except: print("warning: no KVServer, skip test_pslib_4") return diff --git a/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_init.py b/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_init.py index dfc88afaf1f9d1760ee81f2bbb17e4118eb0fd4e..f7f2b11acdcb2d01610d0fdd53b744bc2d6bd080 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_init.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_init.py @@ -25,7 +25,8 @@ class TestPSCloudRoleMakerCase1(unittest.TestCase): def setUp(self): os.environ[ - "PADDLE_PSERVERS_IP_PORT_LIST"] = "127.0.0.1:4001,127.0.0.1:4002" + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:4001,127.0.0.1:4002" def test_paddle_trainers_num(self): # PADDLE_TRAINERS_NUM @@ -40,7 +41,8 @@ class TestPSCloudRoleMakerCase2(unittest.TestCase): def setUp(self): os.environ[ - "PADDLE_PSERVERS_IP_PORT_LIST"] = "127.0.0.1:4001,127.0.0.1:4002" + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:4001,127.0.0.1:4002" os.environ["PADDLE_TRAINERS_NUM"] = str(2) def test_training_role(self): @@ -56,7 +58,8 @@ class TestPSCloudRoleMakerCase3(unittest.TestCase): def setUp(self): os.environ[ - "PADDLE_PSERVERS_IP_PORT_LIST"] = "127.0.0.1:4001,127.0.0.1:4002" + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:4001,127.0.0.1:4002" os.environ["PADDLE_TRAINERS_NUM"] = str(2) os.environ["TRAINING_ROLE"] = 'TRAINER' @@ -73,7 +76,8 @@ class TestPSCloudRoleMakerCase4(unittest.TestCase): def setUp(self): os.environ[ - "PADDLE_PSERVERS_IP_PORT_LIST"] = "127.0.0.1:4001,127.0.0.1:4002" + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:4001,127.0.0.1:4002" os.environ["PADDLE_TRAINERS_NUM"] = str(2) os.environ["TRAINING_ROLE"] = 'PSERVER' @@ -90,7 +94,8 @@ class TestPSCloudRoleMakerCase5(unittest.TestCase): def setUp(self): os.environ[ - "PADDLE_PSERVERS_IP_PORT_LIST"] = "127.0.0.1:4001,127.0.0.1:4002" + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:4001,127.0.0.1:4002" os.environ["PADDLE_TRAINERS_NUM"] = str(2) os.environ["TRAINING_ROLE"] = 'PSERVER' os.environ["PADDLE_PORT"] = str(4001) @@ -108,9 +113,11 @@ class TestPSCloudRoleMakerCase6(unittest.TestCase): def setUp(self): os.environ[ - "PADDLE_PSERVERS_IP_PORT_LIST"] = "127.0.0.1:4001,127.0.0.1:4002" + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:4001,127.0.0.1:4002" os.environ[ - "PADDLE_HETER_TRAINER_IP_PORT_LIST"] = "127.0.0.1:4003,127.0.0.1:4004" + "PADDLE_HETER_TRAINER_IP_PORT_LIST" + ] = "127.0.0.1:4003,127.0.0.1:4004" os.environ["PADDLE_TRAINERS_NUM"] = str(2) os.environ["TRAINING_ROLE"] = 'HETER_TRAINER' @@ -127,9 +134,11 @@ class TestPSCloudRoleMakerCase7(unittest.TestCase): def setUp(self): os.environ[ - "PADDLE_PSERVERS_IP_PORT_LIST"] = "127.0.0.1:4001,127.0.0.1:4002" + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:4001,127.0.0.1:4002" os.environ[ - "PADDLE_HETER_TRAINER_IP_PORT_LIST"] = "127.0.0.1:4003,127.0.0.1:4004" + "PADDLE_HETER_TRAINER_IP_PORT_LIST" + ] = "127.0.0.1:4003,127.0.0.1:4004" os.environ["PADDLE_TRAINERS_NUM"] = str(2) os.environ["TRAINING_ROLE"] = 'HETER_TRAINER' os.environ["PADDLE_PORT"] = str(4003) diff --git a/python/paddle/fluid/tests/unittests/test_fleet_runtime.py b/python/paddle/fluid/tests/unittests/test_fleet_runtime.py index 21a0afab0126a9323e7679bfcd2f6538f4a22f68..4a415f92ca5a16848c587802fe1a29306a0df710 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_runtime.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_runtime.py @@ -17,9 +17,9 @@ import paddle class TestFleetRuntime(unittest.TestCase): - def test_fleet_runtime_base(self): import paddle.distributed.fleet.runtime + base = paddle.distributed.fleet.runtime.runtime_base.RuntimeBase() base._run_worker() base._init_server() @@ -30,7 +30,9 @@ class TestFleetRuntime(unittest.TestCase): def test_fleet_collective_runtime(self): import paddle.distributed.fleet.runtime - collective_runtime = paddle.distributed.fleet.runtime.CollectiveRuntime( + + collective_runtime = ( + paddle.distributed.fleet.runtime.CollectiveRuntime() ) collective_runtime._init_worker() collective_runtime._run_worker() @@ -42,20 +44,26 @@ class TestFleetRuntime(unittest.TestCase): def test_fleet_ps_runtime(self): ps_runtime = paddle.distributed.fleet.runtime.ParameterServerRuntime() - self.assertRaises(Exception, ps_runtime._get_optimizer_status, - "test_op", None) + self.assertRaises( + Exception, ps_runtime._get_optimizer_status, "test_op", None + ) reshaped_names, origin_names = ps_runtime._get_optimizer_status( - "adam", "param") + "adam", "param" + ) self.assertTrue( - len(reshaped_names) == 2 and reshaped_names[0] == 'param_moment1_0' - and reshaped_names[1] == 'param_moment2_0') + len(reshaped_names) == 2 + and reshaped_names[0] == 'param_moment1_0' + and reshaped_names[1] == 'param_moment2_0' + ) self.assertTrue( len(origin_names) == 2 and origin_names[0] == 'param_beta1_pow_acc_0' - and origin_names[1] == 'param_beta2_pow_acc_0') + and origin_names[1] == 'param_beta2_pow_acc_0' + ) reshaped_names, origin_names = ps_runtime._get_optimizer_status( - "sgd", "param") + "sgd", "param" + ) self.assertTrue(len(reshaped_names) == 0 and len(origin_names) == 0) diff --git a/python/paddle/fluid/tests/unittests/test_fleet_unitaccessor.py b/python/paddle/fluid/tests/unittests/test_fleet_unitaccessor.py index 09cc08dc327c26a9ae4fda0d3f55a7fe46a1b1f1..d1f4df653280027ebd4574f2aced6565badeee94 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_unitaccessor.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_unitaccessor.py @@ -26,7 +26,8 @@ class TestFleet1(unittest.TestCase): """Set up, set envs.""" os.environ["PADDLE_TRAINERS_NUM"] = "2" os.environ[ - "PADDLE_PSERVERS_IP_PORT_LIST"] = "127.0.0.1:36001,127.0.0.2:36001" + "PADDLE_PSERVERS_IP_PORT_LIST" + ] = "127.0.0.1:36001,127.0.0.2:36001" def test_pslib_1(self): """Test cases for pslib.""" @@ -41,22 +42,36 @@ class TestFleet1(unittest.TestCase): os.environ["PADDLE_PSERVERS_IP_PORT_LIST"] = "127.0.0.1:36002" os.environ["PADDLE_TRAINER_ID"] = "0" role_maker = GeneralRoleMaker() - #role_maker.generate_role() + # role_maker.generate_role() place = fluid.CPUPlace() exe = fluid.Executor(place) - #fleet.init(role_maker) + # fleet.init(role_maker) train_program = fluid.Program() startup_program = fluid.Program() scope = fluid.Scope() with fluid.program_guard(train_program, startup_program): - show = fluid.layers.data(name="show", shape=[-1, 1], \ - dtype="int64", lod_level=1, append_batch_size=False) - emb = fluid.layers.embedding(input=show, size=[1, 1], \ - is_sparse=True, is_distributed=True, \ - param_attr=fluid.ParamAttr(name="embedding")) + show = fluid.layers.data( + name="show", + shape=[-1, 1], + dtype="int64", + lod_level=1, + append_batch_size=False, + ) + emb = fluid.layers.embedding( + input=show, + size=[1, 1], + is_sparse=True, + is_distributed=True, + param_attr=fluid.ParamAttr(name="embedding"), + ) fc = fluid.layers.fc(input=emb, size=1, act=None) - label = fluid.layers.data(name="click", shape=[-1, 1], \ - dtype="int64", lod_level=1, append_batch_size=False) + label = fluid.layers.data( + name="click", + shape=[-1, 1], + dtype="int64", + lod_level=1, + append_batch_size=False, + ) label_cast = fluid.layers.cast(label, dtype='float32') cost = fluid.layers.log_loss(fc, label_cast) diff --git a/python/paddle/fluid/tests/unittests/test_fleet_util.py b/python/paddle/fluid/tests/unittests/test_fleet_util.py index 1b6f2cf85527746e63fdeb0d8b174f4f2429c153..ff12ef64b5f613d514a34422cf13c874348a1201 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_util.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_util.py @@ -31,6 +31,7 @@ class TestFleetUtil(unittest.TestCase): def test_util_base(self): import paddle.distributed.fleet as fleet + util = fleet.UtilBase() strategy = fleet.DistributedStrategy() util._set_strategy(strategy) @@ -39,6 +40,7 @@ class TestFleetUtil(unittest.TestCase): def test_util_factory(self): import paddle.distributed.fleet as fleet + factory = fleet.base.util_factory.UtilFactory() strategy = fleet.DistributedStrategy() role_maker = None # should be fleet.PaddleCloudRoleMaker() @@ -53,6 +55,7 @@ class TestFleetUtil(unittest.TestCase): def test_get_util(self): import paddle.distributed.fleet as fleet import paddle.distributed.fleet.base.role_maker as role_maker + role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) self.assertNotEqual(fleet.util, None) @@ -61,7 +64,6 @@ class TestFleetUtil(unittest.TestCase): import paddle.distributed.fleet as fleet class UserDefinedUtil(fleet.UtilBase): - def __init__(self): super(UserDefinedUtil, self).__init__() @@ -69,6 +71,7 @@ class TestFleetUtil(unittest.TestCase): return 10 import paddle.distributed.fleet.base.role_maker as role_maker + role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) my_util = UserDefinedUtil() @@ -87,8 +90,9 @@ class TestFleetUtil(unittest.TestCase): fleet.util._set_file_system(fs) def download_files(self): - path = download(self.proto_data_url, self.module_name, - self.proto_data_md5) + path = download( + self.proto_data_url, self.module_name, self.proto_data_md5 + ) print('data is downloaded at ' + path) tar = tarfile.open(path) unzip_folder = tempfile.mkdtemp() @@ -97,6 +101,7 @@ class TestFleetUtil(unittest.TestCase): def test_get_file_shard(self): import paddle.distributed.fleet as fleet + self.assertRaises(Exception, fleet.util.get_file_shard, "files") role = role_maker.UserDefinedRoleMaker( @@ -105,7 +110,8 @@ class TestFleetUtil(unittest.TestCase): current_id=0, role=role_maker.Role.WORKER, worker_endpoints=["127.0.0.1:6003", "127.0.0.1:6004"], - server_endpoints=["127.0.0.1:6001", "127.0.0.1:6002"]) + server_endpoints=["127.0.0.1:6001", "127.0.0.1:6002"], + ) fleet.init(role) files = fleet.util.get_file_shard(["1", "2", "3"]) @@ -113,21 +119,27 @@ class TestFleetUtil(unittest.TestCase): def test_program_type_trans(self): import paddle.distributed.fleet as fleet + data_dir = self.download_files() program_dir = os.path.join(data_dir, self.pruned_dir) text_program = "pruned_main_program.pbtxt" binary_program = "pruned_main_program.bin" - text_to_binary = fleet.util._program_type_trans(program_dir, - text_program, True) - binary_to_text = fleet.util._program_type_trans(program_dir, - binary_program, False) + text_to_binary = fleet.util._program_type_trans( + program_dir, text_program, True + ) + binary_to_text = fleet.util._program_type_trans( + program_dir, binary_program, False + ) self.assertTrue( - os.path.exists(os.path.join(program_dir, text_to_binary))) + os.path.exists(os.path.join(program_dir, text_to_binary)) + ) self.assertTrue( - os.path.exists(os.path.join(program_dir, binary_to_text))) + os.path.exists(os.path.join(program_dir, binary_to_text)) + ) def test_prams_check(self): import paddle.distributed.fleet as fleet + data_dir = self.download_files() class config: @@ -139,7 +151,7 @@ class TestFleetUtil(unittest.TestCase): feed_config.feeded_vars_types = [np.float32, np.float32] feed_config.feeded_vars_filelist = [ os.path.join(data_dir, os.path.join(self.pruned_dir, "concat_1")), - os.path.join(data_dir, os.path.join(self.pruned_dir, "concat_2")) + os.path.join(data_dir, os.path.join(self.pruned_dir, "concat_2")), ] fetch_config = config() @@ -155,7 +167,9 @@ class TestFleetUtil(unittest.TestCase): conf.save_params_filename = None # test saved var's shape - conf.dump_program_filename = "pruned_main_program.save_var_shape_not_match" + conf.dump_program_filename = ( + "pruned_main_program.save_var_shape_not_match" + ) self.assertRaises(Exception, fleet.util._params_check) @@ -164,10 +178,13 @@ class TestFleetUtil(unittest.TestCase): results = fleet.util._params_check(conf) self.assertTrue(len(results) == 1) np.testing.assert_array_almost_equal( - results[0], np.array([[3.0590223e-07]], dtype=np.float32)) + results[0], np.array([[3.0590223e-07]], dtype=np.float32) + ) # test feed_var's shape - conf.dump_program_filename = "pruned_main_program.feed_var_shape_not_match" + conf.dump_program_filename = ( + "pruned_main_program.feed_var_shape_not_match" + ) self.assertRaises(Exception, fleet.util._params_check) # test correct case with feed_vars_filelist @@ -175,7 +192,8 @@ class TestFleetUtil(unittest.TestCase): results = fleet.util._params_check(conf) self.assertTrue(len(results) == 1) np.testing.assert_array_almost_equal( - results[0], np.array([[3.0590223e-07]], dtype=np.float32)) + results[0], np.array([[3.0590223e-07]], dtype=np.float32) + ) # test correct case without feed_vars_filelist conf.feed_config.feeded_vars_filelist = None @@ -189,6 +207,7 @@ class TestFleetUtil(unittest.TestCase): def test_proto_check(self): import paddle.distributed.fleet as fleet + data_dir = self.download_files() class config: @@ -196,14 +215,17 @@ class TestFleetUtil(unittest.TestCase): conf = config() conf.train_prog_path = os.path.join( - data_dir, os.path.join(self.train_dir, "join_main_program.pbtxt")) + data_dir, os.path.join(self.train_dir, "join_main_program.pbtxt") + ) conf.is_text_train_program = True # test not match conf.pruned_prog_path = os.path.join( data_dir, - os.path.join(self.pruned_dir, - "pruned_main_program.save_var_shape_not_match")) + os.path.join( + self.pruned_dir, "pruned_main_program.save_var_shape_not_match" + ), + ) conf.is_text_pruned_program = True conf.draw = False res = fleet.util._proto_check(conf) @@ -211,8 +233,8 @@ class TestFleetUtil(unittest.TestCase): # test match conf.pruned_prog_path = os.path.join( - data_dir, os.path.join(self.pruned_dir, - "pruned_main_program.pbtxt")) + data_dir, os.path.join(self.pruned_dir, "pruned_main_program.pbtxt") + ) if sys.platform == 'win32' or sys.platform == 'sys.platform': conf.draw = False else: @@ -223,13 +245,15 @@ class TestFleetUtil(unittest.TestCase): def test_visualize(self): import paddle.distributed.fleet as fleet + if sys.platform == 'win32' or sys.platform == 'sys.platform': pass else: data_dir = self.download_files() program_path = os.path.join( - data_dir, os.path.join(self.train_dir, - "join_main_program.pbtxt")) + data_dir, + os.path.join(self.train_dir, "join_main_program.pbtxt"), + ) is_text = True program = fleet.util._load_program(program_path, is_text) output_dir = os.path.join(data_dir, self.train_dir) @@ -237,10 +261,14 @@ class TestFleetUtil(unittest.TestCase): fleet.util._visualize_graphviz(program, output_dir, output_filename) self.assertTrue( os.path.exists( - os.path.join(output_dir, output_filename + ".dot"))) + os.path.join(output_dir, output_filename + ".dot") + ) + ) self.assertTrue( os.path.exists( - os.path.join(output_dir, output_filename + ".pdf"))) + os.path.join(output_dir, output_filename + ".pdf") + ) + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_flip.py b/python/paddle/fluid/tests/unittests/test_flip.py index ddf3534ad336c9208d4cb43b4b8d9aa71a5064db..f5943db87e0c7f8a345cfdb9499258f79c505fd7 100644 --- a/python/paddle/fluid/tests/unittests/test_flip.py +++ b/python/paddle/fluid/tests/unittests/test_flip.py @@ -41,13 +41,15 @@ class TestFlipOp_API(unittest.TestCase): exe = fluid.Executor(place) exe.run(startup_program) img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) - res = exe.run(train_program, - feed={'input': img}, - fetch_list=[output]) + res = exe.run( + train_program, feed={'input': img}, fetch_list=[output] + ) out_np = np.array(res[0]) out_ref = np.array([[3, 2, 1], [6, 5, 4]]).astype(np.float32) - self.assertTrue((out_np == out_ref).all(), - msg='flip output is wrong, out =' + str(out_np)) + self.assertTrue( + (out_np == out_ref).all(), + msg='flip output is wrong, out =' + str(out_np), + ) def test_dygraph(self): img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) @@ -60,11 +62,11 @@ class TestFlipOp_API(unittest.TestCase): self.assertTrue( (ret.numpy() == out_ref).all(), - msg='flip output is wrong, out =' + str(ret.numpy())) + msg='flip output is wrong, out =' + str(ret.numpy()), + ) class TestFlipOp(OpTest): - def setUp(self): self.op_type = 'flip' self.python_api = paddle.tensor.flip @@ -96,49 +98,42 @@ class TestFlipOp(OpTest): class TestFlipOpAxis1(TestFlipOp): - def init_test_case(self): self.in_shape = (2, 4, 4) self.axis = [0] class TestFlipOpAxis2(TestFlipOp): - def init_test_case(self): self.in_shape = (4, 4, 6, 3) self.axis = [0, 2] class TestFlipOpAxis3(TestFlipOp): - def init_test_case(self): self.in_shape = (4, 3, 1) self.axis = [0, 1, 2] class TestFlipOpAxis4(TestFlipOp): - def init_test_case(self): self.in_shape = (6, 4, 2, 2) self.axis = [0, 1, 2, 3] class TestFlipOpEmptyAxis(TestFlipOp): - def init_test_case(self): self.in_shape = (6, 4, 2, 2) self.axis = [] class TestFlipOpNegAxis(TestFlipOp): - def init_test_case(self): self.in_shape = (6, 4, 2, 2) self.axis = [-1] class TestFlipDoubleGradCheck(unittest.TestCase): - def flip_wrapper(self, x): return paddle.flip(x[0], [0, 1]) @@ -153,17 +148,13 @@ class TestFlipDoubleGradCheck(unittest.TestCase): out = paddle.flip(data, [0, 1]) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) - gradient_checker.double_grad_check([data], - out, - x_init=[data_arr], - place=place, - eps=eps) + gradient_checker.double_grad_check( + [data], out, x_init=[data_arr], place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.double_grad_check_for_dygraph(self.flip_wrapper, - [data], - out, - x_init=[data_arr], - place=place) + gradient_checker.double_grad_check_for_dygraph( + self.flip_wrapper, [data], out, x_init=[data_arr], place=place + ) def test_grad(self): paddle.enable_static() @@ -175,7 +166,6 @@ class TestFlipDoubleGradCheck(unittest.TestCase): class TestFlipTripleGradCheck(unittest.TestCase): - def flip_wrapper(self, x): return paddle.flip(x[0], [0, 1]) @@ -190,17 +180,13 @@ class TestFlipTripleGradCheck(unittest.TestCase): out = paddle.flip(data, [0, 1]) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) - gradient_checker.triple_grad_check([data], - out, - x_init=[data_arr], - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [data], out, x_init=[data_arr], place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.triple_grad_check_for_dygraph(self.flip_wrapper, - [data], - out, - x_init=[data_arr], - place=place) + gradient_checker.triple_grad_check_for_dygraph( + self.flip_wrapper, [data], out, x_init=[data_arr], place=place + ) def test_grad(self): paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_fmax_op.py b/python/paddle/fluid/tests/unittests/test_fmax_op.py index 16e462754790e57e0b4c45a737eafb539ca4dd96..986417fede626479379a97dec056c170e6fefb7b 100644 --- a/python/paddle/fluid/tests/unittests/test_fmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_fmax_op.py @@ -44,56 +44,56 @@ class ApiFMaxTest(unittest.TestCase): def test_static_api(self): """test_static_api""" paddle.enable_static() - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data_x = paddle.static.data("x", shape=[10, 15], dtype="float32") data_y = paddle.static.data("y", shape=[10, 15], dtype="float32") result_fmax = paddle.fmax(data_x, data_y) exe = paddle.static.Executor(self.place) - res, = exe.run(feed={ - "x": self.input_x, - "y": self.input_y - }, - fetch_list=[result_fmax]) + (res,) = exe.run( + feed={"x": self.input_x, "y": self.input_y}, + fetch_list=[result_fmax], + ) np.testing.assert_allclose(res, self.np_expected1, rtol=1e-05) - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data_x = paddle.static.data("x", shape=[10, 15], dtype="float32") data_z = paddle.static.data("z", shape=[15], dtype="float32") result_fmax = paddle.fmax(data_x, data_z) exe = paddle.static.Executor(self.place) - res, = exe.run(feed={ - "x": self.input_x, - "z": self.input_z - }, - fetch_list=[result_fmax]) + (res,) = exe.run( + feed={"x": self.input_x, "z": self.input_z}, + fetch_list=[result_fmax], + ) np.testing.assert_allclose(res, self.np_expected2, rtol=1e-05) - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data_a = paddle.static.data("a", shape=[3], dtype="int64") data_c = paddle.static.data("c", shape=[3], dtype="int64") result_fmax = paddle.fmax(data_a, data_c) exe = paddle.static.Executor(self.place) - res, = exe.run(feed={ - "a": self.input_a, - "c": self.input_c - }, - fetch_list=[result_fmax]) + (res,) = exe.run( + feed={"a": self.input_a, "c": self.input_c}, + fetch_list=[result_fmax], + ) np.testing.assert_allclose(res, self.np_expected3, rtol=1e-05) - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data_b = paddle.static.data("b", shape=[3], dtype="int64") data_c = paddle.static.data("c", shape=[3], dtype="int64") result_fmax = paddle.fmax(data_b, data_c) exe = paddle.static.Executor(self.place) - res, = exe.run(feed={ - "b": self.input_b, - "c": self.input_c - }, - fetch_list=[result_fmax]) + (res,) = exe.run( + feed={"b": self.input_b, "c": self.input_c}, + fetch_list=[result_fmax], + ) np.testing.assert_allclose(res, self.np_expected4, rtol=1e-05) def test_dynamic_api(self): @@ -151,19 +151,23 @@ class TestElementwiseFmaxOp(OpTest): def test_check_grad_ingore_x(self): """test_check_grad_ingore_x""" - self.check_grad(['Y'], - 'Out', - max_relative_error=0.005, - no_grad_set=set("X"), - check_eager=True) + self.check_grad( + ['Y'], + 'Out', + max_relative_error=0.005, + no_grad_set=set("X"), + check_eager=True, + ) def test_check_grad_ingore_y(self): """test_check_grad_ingore_y""" - self.check_grad(['X'], - 'Out', - max_relative_error=0.005, - no_grad_set=set('Y'), - check_eager=True) + self.check_grad( + ['X'], + 'Out', + max_relative_error=0.005, + no_grad_set=set('Y'), + check_eager=True, + ) class TestElementwiseFmax2Op(OpTest): @@ -194,19 +198,23 @@ class TestElementwiseFmax2Op(OpTest): def test_check_grad_ingore_x(self): """test_check_grad_ingore_x""" - self.check_grad(['Y'], - 'Out', - max_relative_error=0.005, - no_grad_set=set("X"), - check_eager=True) + self.check_grad( + ['Y'], + 'Out', + max_relative_error=0.005, + no_grad_set=set("X"), + check_eager=True, + ) def test_check_grad_ingore_y(self): """test_check_grad_ingore_y""" - self.check_grad(['X'], - 'Out', - max_relative_error=0.005, - no_grad_set=set('Y'), - check_eager=True) + self.check_grad( + ['X'], + 'Out', + max_relative_error=0.005, + no_grad_set=set('Y'), + check_eager=True, + ) class TestElementwiseFmax3Op(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_fmin_op.py b/python/paddle/fluid/tests/unittests/test_fmin_op.py index daad19a40593d599acec5ef059427fffdfb115cb..e1a9cf7a912d7adfb6dbe961d6a2d1c7bc45f6c7 100644 --- a/python/paddle/fluid/tests/unittests/test_fmin_op.py +++ b/python/paddle/fluid/tests/unittests/test_fmin_op.py @@ -46,56 +46,56 @@ class ApiFMinTest(unittest.TestCase): def test_static_api(self): """test_static_api""" paddle.enable_static() - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data_x = paddle.static.data("x", shape=[10, 15], dtype="float32") data_y = paddle.static.data("y", shape=[10, 15], dtype="float32") result_fmin = paddle.fmin(data_x, data_y) exe = paddle.static.Executor(self.place) - res, = exe.run(feed={ - "x": self.input_x, - "y": self.input_y - }, - fetch_list=[result_fmin]) + (res,) = exe.run( + feed={"x": self.input_x, "y": self.input_y}, + fetch_list=[result_fmin], + ) np.testing.assert_allclose(res, self.np_expected1, rtol=1e-05) - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data_x = paddle.static.data("x", shape=[10, 15], dtype="float32") data_z = paddle.static.data("z", shape=[15], dtype="float32") result_fmin = paddle.fmin(data_x, data_z) exe = paddle.static.Executor(self.place) - res, = exe.run(feed={ - "x": self.input_x, - "z": self.input_z - }, - fetch_list=[result_fmin]) + (res,) = exe.run( + feed={"x": self.input_x, "z": self.input_z}, + fetch_list=[result_fmin], + ) np.testing.assert_allclose(res, self.np_expected2, rtol=1e-05) - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data_a = paddle.static.data("a", shape=[3], dtype="int64") data_c = paddle.static.data("c", shape=[3], dtype="int64") result_fmin = paddle.fmin(data_a, data_c) exe = paddle.static.Executor(self.place) - res, = exe.run(feed={ - "a": self.input_a, - "c": self.input_c - }, - fetch_list=[result_fmin]) + (res,) = exe.run( + feed={"a": self.input_a, "c": self.input_c}, + fetch_list=[result_fmin], + ) np.testing.assert_allclose(res, self.np_expected3, rtol=1e-05) - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data_b = paddle.static.data("b", shape=[3], dtype="int64") data_c = paddle.static.data("c", shape=[3], dtype="int64") result_fmin = paddle.fmin(data_b, data_c) exe = paddle.static.Executor(self.place) - res, = exe.run(feed={ - "b": self.input_b, - "c": self.input_c - }, - fetch_list=[result_fmin]) + (res,) = exe.run( + feed={"b": self.input_b, "c": self.input_c}, + fetch_list=[result_fmin], + ) np.testing.assert_allclose(res, self.np_expected4, rtol=1e-05) def test_dynamic_api(self): @@ -153,19 +153,23 @@ class TestElementwiseFminOp(OpTest): def test_check_grad_ingore_x(self): """test_check_grad_ingore_x""" - self.check_grad(['Y'], - 'Out', - max_relative_error=0.005, - no_grad_set=set("X"), - check_eager=True) + self.check_grad( + ['Y'], + 'Out', + max_relative_error=0.005, + no_grad_set=set("X"), + check_eager=True, + ) def test_check_grad_ingore_y(self): """test_check_grad_ingore_y""" - self.check_grad(['X'], - 'Out', - max_relative_error=0.005, - no_grad_set=set('Y'), - check_eager=True) + self.check_grad( + ['X'], + 'Out', + max_relative_error=0.005, + no_grad_set=set('Y'), + check_eager=True, + ) class TestElementwiseFmin2Op(OpTest): @@ -196,19 +200,23 @@ class TestElementwiseFmin2Op(OpTest): def test_check_grad_ingore_x(self): """test_check_grad_ingore_x""" - self.check_grad(['Y'], - 'Out', - max_relative_error=0.005, - no_grad_set=set("X"), - check_eager=True) + self.check_grad( + ['Y'], + 'Out', + max_relative_error=0.005, + no_grad_set=set("X"), + check_eager=True, + ) def test_check_grad_ingore_y(self): """test_check_grad_ingore_y""" - self.check_grad(['X'], - 'Out', - max_relative_error=0.005, - no_grad_set=set('Y'), - check_eager=True) + self.check_grad( + ['X'], + 'Out', + max_relative_error=0.005, + no_grad_set=set('Y'), + check_eager=True, + ) class TestElementwiseFmin3Op(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_fold_op.py b/python/paddle/fluid/tests/unittests/test_fold_op.py index 62833173909e3208d3cd3ed5093344c1bd912bb9..b0c9a757585e66570d54267264c249f78d2a569c 100644 --- a/python/paddle/fluid/tests/unittests/test_fold_op.py +++ b/python/paddle/fluid/tests/unittests/test_fold_op.py @@ -42,34 +42,64 @@ class TestFoldOp(OpTest): def calc_fold(self): output_shape = [0] * 4 output_shape[0] = self.batch_size - output_shape[1] = int(self.input_channels / - (self.kernel_sizes[0] * self.kernel_sizes[1])) + output_shape[1] = int( + self.input_channels / (self.kernel_sizes[0] * self.kernel_sizes[1]) + ) output_shape[2] = self.output_sizes[0] output_shape[3] = self.output_sizes[1] dkernel_h = self.dilations[0] * (self.kernel_sizes[0] - 1) + 1 dkernel_w = self.dilations[1] * (self.kernel_sizes[1] - 1) + 1 - col_height = int((self.output_sizes[0] + self.paddings[0] + - self.paddings[2] - dkernel_h) / self.strides[0]) + 1 - col_width = int((self.output_sizes[1] + self.paddings[1] + - self.paddings[3] - dkernel_w) / self.strides[1]) + 1 + col_height = ( + int( + ( + self.output_sizes[0] + + self.paddings[0] + + self.paddings[2] + - dkernel_h + ) + / self.strides[0] + ) + + 1 + ) + col_width = ( + int( + ( + self.output_sizes[1] + + self.paddings[1] + + self.paddings[3] + - dkernel_w + ) + / self.strides[1] + ) + + 1 + ) output = np.zeros(output_shape).astype(np.float64) ############ calculate output ############## for b in range(output_shape[0]): for c in range(self.input_channels): w_offset = int(c % self.kernel_sizes[1]) h_offset = int( - (c / self.kernel_sizes[1]) % self.kernel_sizes[0]) + (c / self.kernel_sizes[1]) % self.kernel_sizes[0] + ) c_out = int(c / self.kernel_sizes[0] / self.kernel_sizes[1]) for h in range(col_height): - h_out = int(h * self.strides[0] - self.paddings[0] + - h_offset * self.dilations[0]) + h_out = int( + h * self.strides[0] + - self.paddings[0] + + h_offset * self.dilations[0] + ) for w in range(col_width): - w_out = int(w * self.strides[1] - self.paddings[1] + - w_offset * self.dilations[1]) + w_out = int( + w * self.strides[1] + - self.paddings[1] + + w_offset * self.dilations[1] + ) if (h_out >= 0 and h_out < self.output_sizes[0]) and ( - w_out >= 0 and w_out < self.output_sizes[1]): - output[b, c_out, h_out, - w_out] += self.x[b, c, w + col_width * h] + w_out >= 0 and w_out < self.output_sizes[1] + ): + output[b, c_out, h_out, w_out] += self.x[ + b, c, w + col_width * h + ] self.outputs = output @@ -82,7 +112,7 @@ class TestFoldOp(OpTest): 'paddings': self.paddings, 'dilations': self.dilations, 'strides': self.strides, - 'output_sizes': self.output_sizes + 'output_sizes': self.output_sizes, } self.outputs = {'Y': self.outputs} @@ -100,7 +130,7 @@ class TestFoldOp(OpTest): class TestFoldAPI(TestFoldOp): - #This is for test on paddle.nn.Fold + # This is for test on paddle.nn.Fold def setUp(self): self.op_type = 'fold' @@ -117,19 +147,19 @@ class TestFoldAPI(TestFoldOp): m = paddle.nn.Fold(**self.attrs) m.eval() result = m(input) - np.testing.assert_allclose(result.numpy(), - self.outputs['Y'], - rtol=1e-05) + np.testing.assert_allclose( + result.numpy(), self.outputs['Y'], rtol=1e-05 + ) def test_info(self): str(paddle.nn.Fold(**self.attrs)) class TestFoldOpError(unittest.TestCase): - def test_errors(self): from paddle.nn.functional import fold from paddle.fluid.framework import Program, program_guard + with program_guard(Program(), Program()): def test_input_shape(): @@ -145,59 +175,67 @@ class TestFoldOpError(unittest.TestCase): def test_padding_shape(): # padding_size must be 2 or 4 x = paddle.randn(shape=[2, 6, 6], dtype="float32") - out = fold(x, - output_sizes=[2, 3], - kernel_sizes=[2, 2], - paddings=[2, 2, 3]) + out = fold( + x, + output_sizes=[2, 3], + kernel_sizes=[2, 2], + paddings=[2, 2, 3], + ) def test_dilations_shape(): # dialtions_size must be 2 x = paddle.randn(shape=[2, 6, 6], dtype="float32") - out = fold(x, - output_sizes=[2, 3], - kernel_sizes=[2, 2], - dilations=[2, 2, 3]) + out = fold( + x, + output_sizes=[2, 3], + kernel_sizes=[2, 2], + dilations=[2, 2, 3], + ) def test_strides_shape(): # strids_size must be 2 x = paddle.randn(shape=[2, 6, 6], dtype="float32") - out = fold(x, - output_sizes=[2, 3], - kernel_sizes=[2, 2], - strides=[2, 2, 3]) + out = fold( + x, + output_sizes=[2, 3], + kernel_sizes=[2, 2], + strides=[2, 2, 3], + ) def test_output_size(): # im_h * im_w must be L x = paddle.randn(shape=[2, 6, 6], dtype="float32") - out = fold(x, - output_sizes=[6, 6], - kernel_sizes=[2, 2], - strides=[1, 1]) + out = fold( + x, output_sizes=[6, 6], kernel_sizes=[2, 2], strides=[1, 1] + ) def test_output_size_2(): # out_size must GT 1 x = paddle.randn(shape=[2, 6, 6], dtype="float32") - out = fold(x, - output_sizes=[0.1, 0.2], - kernel_sizes=[2, 2], - strides=[1, 1]) + out = fold( + x, + output_sizes=[0.1, 0.2], + kernel_sizes=[2, 2], + strides=[1, 1], + ) def test_block_h_w(): # test_block_h_w GT 0 x = paddle.randn(shape=[2, 1, 1], dtype="float32") - out = fold(x, - output_sizes=[1, 1], - kernel_sizes=[2, 2], - strides=1) + out = fold( + x, output_sizes=[1, 1], kernel_sizes=[2, 2], strides=1 + ) def test_GT_0(): x = paddle.randn(shape=[2, 1, 1], dtype="float32") - out = fold(x, - output_sizes=[0, 0], - kernel_sizes=[0, 0], - dilations=0, - paddings=[0, 0], - strides=0) + out = fold( + x, + output_sizes=[0, 0], + kernel_sizes=[0, 0], + dilations=0, + paddings=[0, 0], + strides=0, + ) self.assertRaises(AssertionError, test_input_shape) self.assertRaises(AssertionError, test_kernel_shape) diff --git a/python/paddle/fluid/tests/unittests/test_frac_api.py b/python/paddle/fluid/tests/unittests/test_frac_api.py index c3f826bed0e9908d0f7b4b357aea0db6d21c8c8f..b455dbbdb4ddcea7288567d6c035f225afc5623a 100644 --- a/python/paddle/fluid/tests/unittests/test_frac_api.py +++ b/python/paddle/fluid/tests/unittests/test_frac_api.py @@ -34,8 +34,11 @@ class TestFracAPI(unittest.TestCase): def setUp(self): self.set_dtype() self.x_np = np.random.uniform(-3, 3, [2, 3]).astype(self.dtype) - self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_api_static(self): paddle.enable_static() @@ -46,7 +49,7 @@ class TestFracAPI(unittest.TestCase): if fluid.core.is_compiled_with_cuda(): place = fluid.CUDAPlace(0) exe = fluid.Executor(place) - res, = exe.run(feed={'X': self.x_np}, fetch_list=[out]) + (res,) = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = ref_frac(self.x_np) np.testing.assert_allclose(out_ref, res, rtol=1e-05) @@ -97,8 +100,11 @@ class TestFracError(unittest.TestCase): def setUp(self): self.x_np = np.random.uniform(-3, 3, [2, 3]).astype('int16') - self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_static_error(self): paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_frame_op.py b/python/paddle/fluid/tests/unittests/test_frame_op.py index 7f38b952665cbc5e2b7b4269fa65caa6665c2391..fc0c092e91e6397b5453b4ecf1186b8a0cabb2f1 100644 --- a/python/paddle/fluid/tests/unittests/test_frame_op.py +++ b/python/paddle/fluid/tests/unittests/test_frame_op.py @@ -44,7 +44,6 @@ def frame_from_librosa(x, frame_length, hop_length, axis=-1): class TestFrameOp(OpTest): - def setUp(self): self.op_type = "frame" self.python_api = paddle.signal.frame @@ -57,7 +56,7 @@ class TestFrameOp(OpTest): } def initTestCase(self): - input_shape = (150, ) + input_shape = (150,) input_type = 'float64' attrs = { 'frame_length': 50, @@ -78,9 +77,8 @@ class TestFrameOp(OpTest): class TestCase1(TestFrameOp): - def initTestCase(self): - input_shape = (150, ) + input_shape = (150,) input_type = 'float64' attrs = { 'frame_length': 50, @@ -91,7 +89,6 @@ class TestCase1(TestFrameOp): class TestCase2(TestFrameOp): - def initTestCase(self): input_shape = (8, 150) input_type = 'float64' @@ -104,7 +101,6 @@ class TestCase2(TestFrameOp): class TestCase3(TestFrameOp): - def initTestCase(self): input_shape = (150, 8) input_type = 'float64' @@ -117,7 +113,6 @@ class TestCase3(TestFrameOp): class TestCase4(TestFrameOp): - def initTestCase(self): input_shape = (4, 2, 150) input_type = 'float64' @@ -130,7 +125,6 @@ class TestCase4(TestFrameOp): class TestCase5(TestFrameOp): - def initTestCase(self): input_shape = (150, 4, 2) input_type = 'float64' diff --git a/python/paddle/fluid/tests/unittests/test_framework_debug_str.py b/python/paddle/fluid/tests/unittests/test_framework_debug_str.py index d398b66f8fe5524f973fce79ed3e9f4a8d417de9..6a1e72c072ac2d0eafe7a54add0ff645c183c93b 100644 --- a/python/paddle/fluid/tests/unittests/test_framework_debug_str.py +++ b/python/paddle/fluid/tests/unittests/test_framework_debug_str.py @@ -17,7 +17,6 @@ from paddle.fluid.framework import Program class TestDebugStringFramework(unittest.TestCase): - def test_debug_str(self): p = Program() p.current_block().create_var(name='t', shape=[0, 1]) diff --git a/python/paddle/fluid/tests/unittests/test_frexp_api.py b/python/paddle/fluid/tests/unittests/test_frexp_api.py index a7c4fa6e2f7d8879a84d5c1e10f64449a3915baa..9604080ffd41ef0e8361f9cdd2deb84a7278b77a 100644 --- a/python/paddle/fluid/tests/unittests/test_frexp_api.py +++ b/python/paddle/fluid/tests/unittests/test_frexp_api.py @@ -18,13 +18,15 @@ import paddle.fluid class TestFrexpAPI(unittest.TestCase): - def setUp(self): np.random.seed(1024) self.rtol = 1e-5 self.atol = 1e-8 - self.place = paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + ) self.set_input() def set_input(self): @@ -35,8 +37,9 @@ class TestFrexpAPI(unittest.TestCase): # 开启静态图模式 paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - input_data = paddle.fluid.data('X', self.x_np.shape, - self.x_np.dtype) + input_data = paddle.fluid.data( + 'X', self.x_np.shape, self.x_np.dtype + ) out = paddle.frexp(input_data) # 计算静态图结果 exe = paddle.static.Executor(self.place) @@ -65,7 +68,7 @@ class TestFrexpAPI(unittest.TestCase): class TestSplitsFloat32Case1(TestFrexpAPI): """ - Test num_or_sections which is an integer and data type is float32. + Test num_or_sections which is an integer and data type is float32. """ def set_input(self): @@ -74,7 +77,7 @@ class TestSplitsFloat32Case1(TestFrexpAPI): class TestSplitsFloat64Case1(TestFrexpAPI): """ - Test num_or_sections which is an integer and data type is float64. + Test num_or_sections which is an integer and data type is float64. """ def set_input(self): @@ -83,7 +86,7 @@ class TestSplitsFloat64Case1(TestFrexpAPI): class TestSplitsFloat64Case2(TestFrexpAPI): """ - Test num_or_sections which is an integer and data type is float64. + Test num_or_sections which is an integer and data type is float64. """ def set_input(self): diff --git a/python/paddle/fluid/tests/unittests/test_fs_interface.py b/python/paddle/fluid/tests/unittests/test_fs_interface.py index e4682e5f731fe86cfb3ab12f276fe379c6b5d98a..4705e44eba62b927947961493258c93a8dd81876 100644 --- a/python/paddle/fluid/tests/unittests/test_fs_interface.py +++ b/python/paddle/fluid/tests/unittests/test_fs_interface.py @@ -19,7 +19,6 @@ from paddle.distributed.fleet.utils.fs import FS class FSTest(unittest.TestCase): - def _test_method(self, func): args = inspect.getfullargspec(func).args diff --git a/python/paddle/fluid/tests/unittests/test_fsp_op.py b/python/paddle/fluid/tests/unittests/test_fsp_op.py index bac40a98d4b17c6f600ddb8c3b7ede64892cc7c6..3a1de0833a6ff0af612dc3be306e7f2b34bfd074 100644 --- a/python/paddle/fluid/tests/unittests/test_fsp_op.py +++ b/python/paddle/fluid/tests/unittests/test_fsp_op.py @@ -27,16 +27,18 @@ def fsp_matrix(a, b): a_t = a.transpose([0, 2, 3, 1]) a_t = a_t.reshape([batch, h * w, a_channel]) b_t = b.transpose([0, 2, 3, 1]).reshape([batch, h * w, b_channel]) - a_r = a_t.repeat(b_channel, - axis=1).reshape([batch, h * w, b_channel, - a_channel]).transpose([0, 1, 3, 2]) - b_r = b_t.repeat(a_channel, - axis=1).reshape([batch, h * w, a_channel, b_channel]) + a_r = ( + a_t.repeat(b_channel, axis=1) + .reshape([batch, h * w, b_channel, a_channel]) + .transpose([0, 1, 3, 2]) + ) + b_r = b_t.repeat(a_channel, axis=1).reshape( + [batch, h * w, a_channel, b_channel] + ) return np.mean(a_r * b_r, axis=1) class TestFSPOp(OpTest): - def setUp(self): self.op_type = "fsp" self.initTestCase() @@ -59,25 +61,24 @@ class TestFSPOp(OpTest): class BadInputTest(unittest.TestCase): - def test_error(self): with fluid.program_guard(fluid.Program()): def test_bad_x(): data = fluid.layers.data(name='data', shape=[3, 32, 32]) feature_map_0 = [1, 2, 3] - feature_map_1 = fluid.layers.conv2d(data, - num_filters=2, - filter_size=3) + feature_map_1 = fluid.layers.conv2d( + data, num_filters=2, filter_size=3 + ) loss = fluid.layers.fsp_matrix(feature_map_0, feature_map_1) self.assertRaises(TypeError, test_bad_x) def test_bad_y(): data = fluid.layers.data(name='data', shape=[3, 32, 32]) - feature_map_0 = fluid.layers.conv2d(data, - num_filters=2, - filter_size=3) + feature_map_0 = fluid.layers.conv2d( + data, num_filters=2, filter_size=3 + ) feature_map_1 = [1, 2, 3] loss = fluid.layers.fsp_matrix(feature_map_0, feature_map_1) diff --git a/python/paddle/fluid/tests/unittests/test_ftrl_op.py b/python/paddle/fluid/tests/unittests/test_ftrl_op.py index 7feabc4be600d6768c1264da6034923f6bf826f6..e346b4f2d118edbaf1fba221aaff3ea32c6e92b9 100644 --- a/python/paddle/fluid/tests/unittests/test_ftrl_op.py +++ b/python/paddle/fluid/tests/unittests/test_ftrl_op.py @@ -29,24 +29,38 @@ def ftrl_step(param, grad, rows, sq_accum, lin_accum, lr, l1, l2, lr_power): new_accum = sq_accum_hit + grad * grad if lr_power == -0.5: - lin_accum_updated = lin_accum_hit + grad - ( - (np.sqrt(new_accum) - np.sqrt(sq_accum_hit)) / lr) * param_hit + lin_accum_updated = ( + lin_accum_hit + + grad + - ((np.sqrt(new_accum) - np.sqrt(sq_accum_hit)) / lr) * param_hit + ) else: - lin_accum_updated = lin_accum_hit + grad - ( - (np.power(new_accum, -lr_power) - np.power(sq_accum_hit, -lr_power)) - / lr) * param_hit + lin_accum_updated = ( + lin_accum_hit + + grad + - ( + ( + np.power(new_accum, -lr_power) + - np.power(sq_accum_hit, -lr_power) + ) + / lr + ) + * param_hit + ) x = l1 * np.sign(lin_accum_updated) - lin_accum_updated if lr_power == -0.5: y = (np.sqrt(new_accum) / lr) + (2 * l2) pre_shrink = x / y param_updated = np.where( - np.abs(lin_accum_updated) > l1, pre_shrink, 0.0) + np.abs(lin_accum_updated) > l1, pre_shrink, 0.0 + ) else: y = (np.power(new_accum, -lr_power) / lr) + (2 * l2) pre_shrink = x / y param_updated = np.where( - np.abs(lin_accum_updated) > l1, pre_shrink, 0.0) + np.abs(lin_accum_updated) > l1, pre_shrink, 0.0 + ) sq_accum_updated = sq_accum_hit + grad * grad @@ -63,7 +77,6 @@ def ftrl_step(param, grad, rows, sq_accum, lin_accum, lr, l1, l2, lr_power): class TestFTRLOp(OpTest): - def setUp(self): self.op_type = "ftrl" rows = 102 @@ -81,22 +94,23 @@ class TestFTRLOp(OpTest): 'SquaredAccumulator': sq_accum, 'LinearAccumulator': linear_accum, 'Grad': g, - 'LearningRate': lr + 'LearningRate': lr, } self.attrs = { 'l1': l1, 'l2': l2, 'lr_power': lr_power, - 'learning_rate': lr + 'learning_rate': lr, } param_out, sq_accum_out, lin_accum_out = ftrl_step( - w, g, range(rows), sq_accum, linear_accum, lr, l1, l2, lr_power) + w, g, range(rows), sq_accum, linear_accum, lr, l1, l2, lr_power + ) self.outputs = { 'ParamOut': param_out, 'SquaredAccumOut': sq_accum_out, - 'LinearAccumOut': lin_accum_out + 'LinearAccumOut': lin_accum_out, } def test_check_output(self): @@ -104,7 +118,6 @@ class TestFTRLOp(OpTest): class TestSparseFTRLOp(unittest.TestCase): - def setUp(self): self.lr_power = -0.5 @@ -150,22 +163,32 @@ class TestSparseFTRLOp(unittest.TestCase): # calculate ground-truth answer param_out, sq_accum_out, lin_accum_out = ftrl_step( - param_array, grad_array, rows, sq_accum_array, lin_accum_array, lr, - l1, l2, lr_power) + param_array, + grad_array, + rows, + sq_accum_array, + lin_accum_array, + lr, + l1, + l2, + lr_power, + ) # create and run operator - op = Operator("ftrl", - Param='Param', - Grad='Grad', - ParamOut='Param', - SquaredAccumulator='SquaredAccumulator', - SquaredAccumOut='SquaredAccumulator', - LinearAccumulator='LinearAccumulator', - LinearAccumOut='LinearAccumulator', - LearningRate='LearningRate', - l1=l1, - l2=l2, - lr_power=lr_power) + op = Operator( + "ftrl", + Param='Param', + Grad='Grad', + ParamOut='Param', + SquaredAccumulator='SquaredAccumulator', + SquaredAccumOut='SquaredAccumulator', + LinearAccumulator='LinearAccumulator', + LinearAccumOut='LinearAccumulator', + LearningRate='LearningRate', + l1=l1, + l2=l2, + lr_power=lr_power, + ) op.run(scope, place) @@ -176,15 +199,15 @@ class TestSparseFTRLOp(unittest.TestCase): for i in range(height): for j in range(row_numel): - self.assertAlmostEqual(param_out[i][j], - param_array[i][j], - places=4) - self.assertAlmostEqual(sq_accum_out[i][j], - sq_accum_array[i][j], - places=4) - self.assertAlmostEqual(lin_accum_out[i][j], - lin_accum_array[i][j], - places=4) + self.assertAlmostEqual( + param_out[i][j], param_array[i][j], places=4 + ) + self.assertAlmostEqual( + sq_accum_out[i][j], sq_accum_array[i][j], places=4 + ) + self.assertAlmostEqual( + lin_accum_out[i][j], lin_accum_array[i][j], places=4 + ) def init_kernel(self): pass @@ -198,12 +221,12 @@ class TestSparseFTRLOp(unittest.TestCase): class TestSparseFTRLOp2(TestSparseFTRLOp): - def init_kernel(self): self.lr_power = -0.6 if __name__ == "__main__": import paddle + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_full_like_op.py b/python/paddle/fluid/tests/unittests/test_full_like_op.py index fe565140e70c473d20192f91cfc50b86709ed188..c24ee6791b14a9d4419c441a91278872118caeb8 100644 --- a/python/paddle/fluid/tests/unittests/test_full_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_full_like_op.py @@ -23,16 +23,16 @@ from paddle.fluid.framework import _test_eager_guard class TestFullOp(unittest.TestCase): - """ Test fill_any_like op(whose API is full_like) for attr out. """ + """Test fill_any_like op(whose API is full_like) for attr out.""" def test_attr_tensor_API(self): startup_program = Program() train_program = Program() with program_guard(train_program, startup_program): fill_value = 2.0 - input = paddle.fluid.data(name='input', - dtype='float32', - shape=[2, 3]) + input = paddle.fluid.data( + name='input', dtype='float32', shape=[2, 3] + ) output = paddle.full_like(input, fill_value) output_dtype = paddle.full_like(input, fill_value, dtype='float32') @@ -44,14 +44,15 @@ class TestFullOp(unittest.TestCase): img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) - res = exe.run(train_program, - feed={'input': img}, - fetch_list=[output]) + res = exe.run( + train_program, feed={'input': img}, fetch_list=[output] + ) out_np = np.array(res[0]) - self.assertTrue(not (out_np - np.full_like(img, fill_value)).any(), - msg="full_like output is wrong, out = " + - str(out_np)) + self.assertTrue( + not (out_np - np.full_like(img, fill_value)).any(), + msg="full_like output is wrong, out = " + str(out_np), + ) def test_full_like_imperative(self): paddle.disable_static() @@ -73,24 +74,25 @@ class TestFullOp(unittest.TestCase): class TestFullOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): - #for ci coverage + # for ci coverage - input_data = paddle.fluid.data(name='input', - dtype='float32', - shape=[2, 3]) + input_data = paddle.fluid.data( + name='input', dtype='float32', shape=[2, 3] + ) output = paddle.full_like(input_data, 2.0) def test_input_dtype(): paddle.full_like - self.assertRaises(TypeError, - paddle.full_like, - x=input_data, - fill_value=2, - dtype='uint4') + self.assertRaises( + TypeError, + paddle.full_like, + x=input_data, + fill_value=2, + dtype='uint4', + ) class TestFullLikeOp1(OpTest): @@ -107,7 +109,7 @@ class TestFullLikeOp1(OpTest): self.outputs = {'Out': out} self.attrs = { 'value': self.fill_value, - 'dtype': convert_np_dtype_to_dtype_(self.dtype) + 'dtype': convert_np_dtype_to_dtype_(self.dtype), } def init_data(self): @@ -120,7 +122,6 @@ class TestFullLikeOp1(OpTest): class TestFullLikeOp2(TestFullLikeOp1): - def init_data(self): self.fill_value = 1000 self.shape = [1024, 1024] @@ -128,25 +129,26 @@ class TestFullLikeOp2(TestFullLikeOp1): class TestFullLikeOp3(TestFullLikeOp1): - def init_data(self): self.fill_value = 8888 self.shape = [5000, 5000] self.dtype = np.int64 -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFullLikeOp4(unittest.TestCase): - def test_skip_data_transform(self): paddle.disable_static() with _test_eager_guard(): - x = paddle.to_tensor([1., 2., 3., 4.], - place=paddle.CUDAPinnedPlace()) - out = paddle.full_like(x, 1.) + x = paddle.to_tensor( + [1.0, 2.0, 3.0, 4.0], place=paddle.CUDAPinnedPlace() + ) + out = paddle.full_like(x, 1.0) self.assertTrue( - (out.numpy() == np.ones([4]).astype(np.float32)).all(), True) + (out.numpy() == np.ones([4]).astype(np.float32)).all(), True + ) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_full_op.py b/python/paddle/fluid/tests/unittests/test_full_op.py index 1f20cfd6c726c3f196c9cd978e2cd5fda97ea1a2..a8c54188e48bbdbe5e11fce47df10c0032f66a77 100644 --- a/python/paddle/fluid/tests/unittests/test_full_op.py +++ b/python/paddle/fluid/tests/unittests/test_full_op.py @@ -23,45 +23,44 @@ from paddle.fluid.framework import _test_eager_guard # Test python API class TestFullAPI(unittest.TestCase): - def test_api(self): positive_2_int32 = fluid.layers.fill_constant([1], "int32", 2) positive_2_int64 = fluid.layers.fill_constant([1], "int64", 2) - shape_tensor_int32 = fluid.data(name="shape_tensor_int32", - shape=[2], - dtype="int32") + shape_tensor_int32 = fluid.data( + name="shape_tensor_int32", shape=[2], dtype="int32" + ) - shape_tensor_int64 = fluid.data(name="shape_tensor_int64", - shape=[2], - dtype="int64") + shape_tensor_int64 = fluid.data( + name="shape_tensor_int64", shape=[2], dtype="int64" + ) out_1 = paddle.full(shape=[1, 2], dtype="float32", fill_value=1.1) - out_2 = paddle.full(shape=[1, positive_2_int32], - dtype="float32", - fill_value=1.1) + out_2 = paddle.full( + shape=[1, positive_2_int32], dtype="float32", fill_value=1.1 + ) - out_3 = paddle.full(shape=[1, positive_2_int64], - dtype="float32", - fill_value=1.1) + out_3 = paddle.full( + shape=[1, positive_2_int64], dtype="float32", fill_value=1.1 + ) - out_4 = paddle.full(shape=shape_tensor_int32, - dtype="float32", - fill_value=1.2) + out_4 = paddle.full( + shape=shape_tensor_int32, dtype="float32", fill_value=1.2 + ) - out_5 = paddle.full(shape=shape_tensor_int64, - dtype="float32", - fill_value=1.1) + out_5 = paddle.full( + shape=shape_tensor_int64, dtype="float32", fill_value=1.1 + ) - out_6 = paddle.full(shape=shape_tensor_int64, - dtype=np.float32, - fill_value=1.1) + out_6 = paddle.full( + shape=shape_tensor_int64, dtype=np.float32, fill_value=1.1 + ) val = fluid.layers.fill_constant(shape=[1], dtype=np.float32, value=1.1) - out_7 = paddle.full(shape=shape_tensor_int64, - dtype=np.float32, - fill_value=val) + out_7 = paddle.full( + shape=shape_tensor_int64, dtype=np.float32, fill_value=val + ) exe = fluid.Executor(place=fluid.CPUPlace()) res_1, res_2, res_3, res_4, res_5, res_6, res_7 = exe.run( @@ -70,7 +69,8 @@ class TestFullAPI(unittest.TestCase): "shape_tensor_int32": np.array([1, 2]).astype("int32"), "shape_tensor_int64": np.array([1, 2]).astype("int64"), }, - fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7]) + fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7], + ) assert np.array_equal(res_1, np.full([1, 2], 1.1, dtype="float32")) assert np.array_equal(res_2, np.full([1, 2], 1.1, dtype="float32")) @@ -85,89 +85,104 @@ class TestFullAPI(unittest.TestCase): with _test_eager_guard(): positive_2_int32 = fluid.layers.fill_constant([1], "int32", 2) positive_2_int64 = fluid.layers.fill_constant([1], "int64", 2) - positive_4_int64 = fluid.layers.fill_constant([1], "int64", 4, - True) - - out_1 = paddle.full(shape=[1, 2], - dtype="float32", - fill_value=1.1) - - out_2 = paddle.full(shape=[1, positive_2_int32.item()], - dtype="float32", - fill_value=1.1) - - out_3 = paddle.full(shape=[1, positive_2_int64.item()], - dtype="float32", - fill_value=1.1) - - out_4 = paddle.full(shape=[1, 2], - dtype="float32", - fill_value=1.2) - - out_5 = paddle.full(shape=[1, 2], - dtype="float32", - fill_value=1.1) - - out_6 = paddle.full(shape=[1, 2], - dtype=np.float32, - fill_value=1.1) - - val = fluid.layers.fill_constant(shape=[1], - dtype=np.float32, - value=1.1) - out_7 = paddle.full(shape=[1, 2], - dtype=np.float32, - fill_value=val) - - out_8 = paddle.full(shape=positive_2_int32, - dtype="float32", - fill_value=1.1) - - out_9 = paddle.full(shape=[ - positive_2_int32, positive_2_int64, positive_4_int64 - ], - dtype="float32", - fill_value=1.1) + positive_4_int64 = fluid.layers.fill_constant( + [1], "int64", 4, True + ) + + out_1 = paddle.full( + shape=[1, 2], dtype="float32", fill_value=1.1 + ) + + out_2 = paddle.full( + shape=[1, positive_2_int32.item()], + dtype="float32", + fill_value=1.1, + ) + + out_3 = paddle.full( + shape=[1, positive_2_int64.item()], + dtype="float32", + fill_value=1.1, + ) + + out_4 = paddle.full( + shape=[1, 2], dtype="float32", fill_value=1.2 + ) + + out_5 = paddle.full( + shape=[1, 2], dtype="float32", fill_value=1.1 + ) + + out_6 = paddle.full( + shape=[1, 2], dtype=np.float32, fill_value=1.1 + ) + + val = fluid.layers.fill_constant( + shape=[1], dtype=np.float32, value=1.1 + ) + out_7 = paddle.full( + shape=[1, 2], dtype=np.float32, fill_value=val + ) + + out_8 = paddle.full( + shape=positive_2_int32, dtype="float32", fill_value=1.1 + ) + + out_9 = paddle.full( + shape=[ + positive_2_int32, + positive_2_int64, + positive_4_int64, + ], + dtype="float32", + fill_value=1.1, + ) # test for numpy.float64 as fill_value - out_10 = paddle.full_like(out_7, - dtype=np.float32, - fill_value=np.abs(1.1)) - - assert np.array_equal(out_1, - np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(out_2, - np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(out_3, - np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(out_4, - np.full([1, 2], 1.2, dtype="float32")) - assert np.array_equal(out_5, - np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(out_6, - np.full([1, 2], 1.1, dtype="float32")) - assert np.array_equal(out_7, - np.full([1, 2], 1.1, dtype="float32")) + out_10 = paddle.full_like( + out_7, dtype=np.float32, fill_value=np.abs(1.1) + ) + + assert np.array_equal( + out_1, np.full([1, 2], 1.1, dtype="float32") + ) + assert np.array_equal( + out_2, np.full([1, 2], 1.1, dtype="float32") + ) + assert np.array_equal( + out_3, np.full([1, 2], 1.1, dtype="float32") + ) + assert np.array_equal( + out_4, np.full([1, 2], 1.2, dtype="float32") + ) + assert np.array_equal( + out_5, np.full([1, 2], 1.1, dtype="float32") + ) + assert np.array_equal( + out_6, np.full([1, 2], 1.1, dtype="float32") + ) + assert np.array_equal( + out_7, np.full([1, 2], 1.1, dtype="float32") + ) assert np.array_equal(out_8, np.full([2], 1.1, dtype="float32")) - assert np.array_equal(out_9, - np.full([2, 2, 4], 1.1, dtype="float32")) - assert np.array_equal(out_10, - np.full([1, 2], 1.1, dtype="float32")) + assert np.array_equal( + out_9, np.full([2, 2, 4], 1.1, dtype="float32") + ) + assert np.array_equal( + out_10, np.full([1, 2], 1.1, dtype="float32") + ) class TestFullOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): - #for ci coverage - self.assertRaises(TypeError, - paddle.full, - shape=[1], - fill_value=5, - dtype='uint4') + # for ci coverage + self.assertRaises( + TypeError, paddle.full, shape=[1], fill_value=5, dtype='uint4' + ) # The argument dtype of full must be one of bool, float16, - #float32, float64, uint8, int16, int32 or int64 + # float32, float64, uint8, int16, int32 or int64 # The argument shape's type of full_op must be list, tuple or Variable. def test_shape_type(): @@ -177,17 +192,17 @@ class TestFullOpError(unittest.TestCase): # The shape dtype of full op must be int32 or int64. def test_shape_tensor_dtype(): - shape = fluid.data(name="shape_tensor", - shape=[2], - dtype="float32") + shape = fluid.data( + name="shape_tensor", shape=[2], dtype="float32" + ) paddle.full(shape=shape, dtype="float32", fill_value=1) self.assertRaises(TypeError, test_shape_tensor_dtype) def test_shape_tensor_list_dtype(): - shape = fluid.data(name="shape_tensor_list", - shape=[1], - dtype="bool") + shape = fluid.data( + name="shape_tensor_list", shape=[1], dtype="bool" + ) paddle.full(shape=[shape, 2], dtype="float32", fill_value=1) self.assertRaises(TypeError, test_shape_tensor_list_dtype) diff --git a/python/paddle/fluid/tests/unittests/test_function_hook.py b/python/paddle/fluid/tests/unittests/test_function_hook.py index b884d62e7d32ed6f4a2bd192c9cebce80f37ddfd..7ba230233a4152cbcdafe4e594cb75da5eff27d0 100644 --- a/python/paddle/fluid/tests/unittests/test_function_hook.py +++ b/python/paddle/fluid/tests/unittests/test_function_hook.py @@ -21,7 +21,6 @@ from paddle.fluid.framework import _test_eager_guard class TestCapture: - def __init__(self): self.list = [] @@ -40,7 +39,6 @@ def grad_hook(grad): class TestBakcwardFunctionHookError(unittest.TestCase): - def func_hook(self): input_data = np.ones([4, 4]).astype('float32') diff --git a/python/paddle/fluid/tests/unittests/test_functional_conv1d.py b/python/paddle/fluid/tests/unittests/test_functional_conv1d.py index 251ac52451d7eeb0e1fa6326f788dc8abd2a2ad3..6869c511b9fbef569b64a5a7b65b290d8d219c28 100644 --- a/python/paddle/fluid/tests/unittests/test_functional_conv1d.py +++ b/python/paddle/fluid/tests/unittests/test_functional_conv1d.py @@ -21,7 +21,6 @@ from unittest import TestCase class TestFunctionalConv1DError(TestCase): - def setUp(self): self.input = [] self.filter = [] @@ -36,16 +35,21 @@ class TestFunctionalConv1DError(TestCase): with dg.guard(): x = dg.to_variable(self.input, dtype=paddle.float32) w = dg.to_variable(self.filter, dtype=paddle.float32) - b = None if self.bias is None else dg.to_variable( - self.bias, dtype=paddle.float32) - y = F.conv1d(x, - w, - b, - padding=self.padding, - stride=self.stride, - dilation=self.dilation, - groups=self.groups, - data_format=self.data_format) + b = ( + None + if self.bias is None + else dg.to_variable(self.bias, dtype=paddle.float32) + ) + y = F.conv1d( + x, + w, + b, + padding=self.padding, + stride=self.stride, + dilation=self.dilation, + groups=self.groups, + data_format=self.data_format, + ) def test_exception(self): with self.assertRaises(ValueError): @@ -53,7 +57,6 @@ class TestFunctionalConv1DError(TestCase): class TestFunctionalConv1DErrorCase1(TestFunctionalConv1DError): - def setUp(self): self.input = np.random.randn(1, 3, 3) self.filter = np.random.randn(3, 3, 1) diff --git a/python/paddle/fluid/tests/unittests/test_functional_conv1d_transpose.py b/python/paddle/fluid/tests/unittests/test_functional_conv1d_transpose.py index 98daff216a760c34f2c4684134ee34fd88024979..431c940f0b4a57d7b07c0872b0100208d83cf190 100644 --- a/python/paddle/fluid/tests/unittests/test_functional_conv1d_transpose.py +++ b/python/paddle/fluid/tests/unittests/test_functional_conv1d_transpose.py @@ -21,7 +21,6 @@ from unittest import TestCase class TestFunctionalConv1DError(TestCase): - def setUp(self): self.input = [] self.filter = [] @@ -36,16 +35,21 @@ class TestFunctionalConv1DError(TestCase): with dg.guard(): x = dg.to_variable(self.input, dtype=paddle.float32) w = dg.to_variable(self.filter, dtype=paddle.float32) - b = None if self.bias is None else dg.to_variable( - self.bias, dtype=paddle.float32) - y = F.conv1d_transpose(x, - w, - b, - padding=self.padding, - stride=self.stride, - dilation=self.dilation, - groups=self.groups, - data_format=self.data_format) + b = ( + None + if self.bias is None + else dg.to_variable(self.bias, dtype=paddle.float32) + ) + y = F.conv1d_transpose( + x, + w, + b, + padding=self.padding, + stride=self.stride, + dilation=self.dilation, + groups=self.groups, + data_format=self.data_format, + ) def test_exception(self): with self.assertRaises(ValueError): @@ -53,7 +57,6 @@ class TestFunctionalConv1DError(TestCase): class TestFunctionalConv1DErrorCase1(TestFunctionalConv1DError): - def setUp(self): self.input = np.random.randn(1, 3, 3) self.filter = np.random.randn(3, 3, 1) diff --git a/python/paddle/fluid/tests/unittests/test_functional_conv2d.py b/python/paddle/fluid/tests/unittests/test_functional_conv2d.py index 6c0f526f236cb0a632f534a4657bc5ce335c598a..1a92a9babb727590a958bd4e06d2e97baf4c89df 100644 --- a/python/paddle/fluid/tests/unittests/test_functional_conv2d.py +++ b/python/paddle/fluid/tests/unittests/test_functional_conv2d.py @@ -41,27 +41,34 @@ class TestFunctionalConv2D(TestCase): def prepare(self): if isinstance(self.filter_shape, int): - filter_shape = (self.filter_shape, ) * 2 + filter_shape = (self.filter_shape,) * 2 else: filter_shape = tuple(self.filter_shape) self.weight = np.random.uniform( - -1, 1, (self.out_channels, self.in_channels // self.groups) + - filter_shape).astype(self.dtype) + -1, + 1, + (self.out_channels, self.in_channels // self.groups) + filter_shape, + ).astype(self.dtype) if not self.no_bias: - self.bias = np.random.uniform(-1, 1, (self.out_channels, )).astype( - self.dtype) + self.bias = np.random.uniform(-1, 1, (self.out_channels,)).astype( + self.dtype + ) - self.channel_last = (self.data_format == "NHWC") + self.channel_last = self.data_format == "NHWC" if self.channel_last: - self.input_shape = (self.batch_size, ) + self.spatial_shape + ( - self.in_channels, ) + self.input_shape = ( + (self.batch_size,) + self.spatial_shape + (self.in_channels,) + ) else: - self.input_shape = (self.batch_size, - self.in_channels) + self.spatial_shape + self.input_shape = ( + self.batch_size, + self.in_channels, + ) + self.spatial_shape - self.input = np.random.uniform(-1, 1, - self.input_shape).astype(self.dtype) + self.input = np.random.uniform(-1, 1, self.input_shape).astype( + self.dtype + ) def static_graph_case_1(self): main = fluid.Program() @@ -69,11 +76,17 @@ class TestFunctionalConv2D(TestCase): with fluid.unique_name.guard(): with fluid.program_guard(main, start): if self.channel_last: - x = fluid.data("input", (-1, -1, -1, self.in_channels), - dtype=self.dtype) + x = fluid.data( + "input", + (-1, -1, -1, self.in_channels), + dtype=self.dtype, + ) else: - x = fluid.data("input", (-1, self.in_channels, -1, -1), - dtype=self.dtype) + x = fluid.data( + "input", + (-1, self.in_channels, -1, -1), + dtype=self.dtype, + ) y = fluid.layers.conv2d( x, self.out_channels, @@ -84,12 +97,14 @@ class TestFunctionalConv2D(TestCase): groups=self.groups, param_attr=I.NumpyArrayInitializer(self.weight), bias_attr=False - if self.no_bias else I.NumpyArrayInitializer(self.bias), + if self.no_bias + else I.NumpyArrayInitializer(self.bias), act=self.act, - data_format=self.data_format) + data_format=self.data_format, + ) exe = fluid.Executor(self.place) exe.run(start) - out, = exe.run(main, feed={"input": self.input}, fetch_list=[y]) + (out,) = exe.run(main, feed={"input": self.input}, fetch_list=[y]) return out def static_graph_case_2(self): @@ -98,24 +113,32 @@ class TestFunctionalConv2D(TestCase): with fluid.unique_name.guard(): with fluid.program_guard(main, start): if self.channel_last: - x = x = fluid.data("input", (-1, -1, -1, self.in_channels), - dtype=self.dtype) + x = x = fluid.data( + "input", + (-1, -1, -1, self.in_channels), + dtype=self.dtype, + ) else: - x = fluid.data("input", (-1, self.in_channels, -1, -1), - dtype=self.dtype) - weight = fluid.data("weight", - self.weight.shape, - dtype=self.dtype) + x = fluid.data( + "input", + (-1, self.in_channels, -1, -1), + dtype=self.dtype, + ) + weight = fluid.data( + "weight", self.weight.shape, dtype=self.dtype + ) if not self.no_bias: bias = fluid.data("bias", self.bias.shape, dtype=self.dtype) - y = F.conv2d(x, - weight, - None if self.no_bias else bias, - padding=self.padding, - stride=self.stride, - dilation=self.dilation, - groups=self.groups, - data_format=self.data_format) + y = F.conv2d( + x, + weight, + None if self.no_bias else bias, + padding=self.padding, + stride=self.stride, + dilation=self.dilation, + groups=self.groups, + data_format=self.data_format, + ) if self.act == 'sigmoid': y = F.sigmoid(y) @@ -125,7 +148,7 @@ class TestFunctionalConv2D(TestCase): feed_dict = {"input": self.input, "weight": self.weight} if not self.no_bias: feed_dict["bias"] = self.bias - out, = exe.run(main, feed=feed_dict, fetch_list=[y]) + (out,) = exe.run(main, feed=feed_dict, fetch_list=[y]) return out def dygraph_case(self): @@ -133,14 +156,16 @@ class TestFunctionalConv2D(TestCase): x = dg.to_variable(self.input) weight = dg.to_variable(self.weight) bias = None if self.no_bias else dg.to_variable(self.bias) - y = F.conv2d(x, - weight, - bias, - padding=self.padding, - stride=self.stride, - dilation=self.dilation, - groups=self.groups, - data_format=self.data_format) + y = F.conv2d( + x, + weight, + bias, + padding=self.padding, + stride=self.stride, + dilation=self.dilation, + groups=self.groups, + data_format=self.data_format, + ) if self.act == 'sigmoid': y = F.sigmoid(y) @@ -160,8 +185,9 @@ class TestFunctionalConv2D(TestCase): self.place = fluid.CPUPlace() self._test_identity() - @unittest.skipIf(not fluid.core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) def test_identity_gpu(self): self.place = fluid.CUDAPlace(0) self._test_identity() @@ -191,12 +217,14 @@ class TestFunctionalConv2DError(TestCase): def prepare(self): if isinstance(self.filter_shape, int): - filter_shape = (self.filter_shape, ) * 2 + filter_shape = (self.filter_shape,) * 2 else: filter_shape = tuple(self.filter_shape) - self.weight_shape = (self.out_channels, - self.in_channels // self.groups) + filter_shape - self.bias_shape = (self.out_channels, ) + self.weight_shape = ( + self.out_channels, + self.in_channels // self.groups, + ) + filter_shape + self.bias_shape = (self.out_channels,) def static_graph_case(self): main = fluid.Program() @@ -205,28 +233,35 @@ class TestFunctionalConv2DError(TestCase): with fluid.program_guard(main, start): self.channel_last = self.data_format == "NHWC" if self.channel_last: - x = x = fluid.data("input", (-1, -1, -1, self.in_channels), - dtype=self.dtype) + x = x = fluid.data( + "input", + (-1, -1, -1, self.in_channels), + dtype=self.dtype, + ) else: - x = fluid.data("input", (-1, self.in_channels, -1, -1), - dtype=self.dtype) - weight = fluid.data("weight", - self.weight_shape, - dtype=self.dtype) + x = fluid.data( + "input", + (-1, self.in_channels, -1, -1), + dtype=self.dtype, + ) + weight = fluid.data( + "weight", self.weight_shape, dtype=self.dtype + ) if not self.no_bias: bias = fluid.data("bias", self.bias_shape, dtype=self.dtype) - y = F.conv2d(x, - weight, - None if self.no_bias else bias, - padding=self.padding, - stride=self.stride, - dilation=self.dilation, - groups=self.groups, - data_format=self.data_format) + y = F.conv2d( + x, + weight, + None if self.no_bias else bias, + padding=self.padding, + stride=self.stride, + dilation=self.dilation, + groups=self.groups, + data_format=self.data_format, + ) class TestFunctionalConv2DCase2(TestFunctionalConv2D): - def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -242,7 +277,6 @@ class TestFunctionalConv2DCase2(TestFunctionalConv2D): class TestFunctionalConv2DCase3(TestFunctionalConv2D): - def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -258,7 +292,6 @@ class TestFunctionalConv2DCase3(TestFunctionalConv2D): class TestFunctionalConv2DCase4(TestFunctionalConv2D): - def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -274,7 +307,6 @@ class TestFunctionalConv2DCase4(TestFunctionalConv2D): class TestFunctionalConv2DCase5(TestFunctionalConv2D): - def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -290,7 +322,6 @@ class TestFunctionalConv2DCase5(TestFunctionalConv2D): class TestFunctionalConv2DCase6(TestFunctionalConv2D): - def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -306,7 +337,6 @@ class TestFunctionalConv2DCase6(TestFunctionalConv2D): class TestFunctionalConv2DCase7(TestFunctionalConv2D): - def setUp(self): self.in_channels = 6 self.out_channels = 8 @@ -322,7 +352,6 @@ class TestFunctionalConv2DCase7(TestFunctionalConv2D): class TestFunctionalConv2DCase8(TestFunctionalConv2D): - def setUp(self): self.in_channels = 6 self.out_channels = 12 @@ -338,7 +367,6 @@ class TestFunctionalConv2DCase8(TestFunctionalConv2D): class TestFunctionalConv2DErrorCase2(TestFunctionalConv2DError): - def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -354,7 +382,6 @@ class TestFunctionalConv2DErrorCase2(TestFunctionalConv2DError): class TestFunctionalConv2DErrorCase3(TestFunctionalConv2DError): - def setUp(self): self.in_channels = 3 self.out_channels = 4 @@ -370,7 +397,6 @@ class TestFunctionalConv2DErrorCase3(TestFunctionalConv2DError): class TestFunctionalConv2DErrorCase4(TestFunctionalConv2DError): - def setUp(self): self.in_channels = 4 self.out_channels = 3 @@ -386,7 +412,6 @@ class TestFunctionalConv2DErrorCase4(TestFunctionalConv2DError): class TestFunctionalConv2DErrorCase7(TestFunctionalConv2DError): - def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -402,7 +427,6 @@ class TestFunctionalConv2DErrorCase7(TestFunctionalConv2DError): class TestFunctionalConv2DErrorCase8(TestFunctionalConv2DError): - def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -418,7 +442,6 @@ class TestFunctionalConv2DErrorCase8(TestFunctionalConv2DError): class TestFunctionalConv2DErrorCase9(TestFunctionalConv2DError): - def setUp(self): self.in_channels = -5 self.out_channels = 5 @@ -434,7 +457,6 @@ class TestFunctionalConv2DErrorCase9(TestFunctionalConv2DError): class TestFunctionalConv2DErrorCase10(TestFunctionalConv2DError): - def setUp(self): self.in_channels = 3 self.out_channels = 4 @@ -450,7 +472,6 @@ class TestFunctionalConv2DErrorCase10(TestFunctionalConv2DError): class TestFunctionalConv2DErrorCase11(TestFunctionalConv2DError): - def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -466,7 +487,6 @@ class TestFunctionalConv2DErrorCase11(TestFunctionalConv2DError): class TestFunctionalConv2DErrorCase12(TestCase): - def setUp(self): self.input = np.array([]) self.filter = np.array([]) @@ -485,38 +505,45 @@ class TestFunctionalConv2DErrorCase12(TestCase): with fluid.unique_name.guard(): with fluid.program_guard(main, start): x = fluid.data("input", self.input.shape, dtype=paddle.float32) - y = fluid.layers.conv2d(x, - self.num_filters, - self.filter_size, - stride=self.stride, - padding=self.padding, - dilation=self.dilation, - groups=self.groups, - param_attr=I.NumpyArrayInitializer( - self.filter), - bias_attr=False if self.bias is None - else I.NumpyArrayInitializer(self.bias), - act=None, - data_format=self.data_format) + y = fluid.layers.conv2d( + x, + self.num_filters, + self.filter_size, + stride=self.stride, + padding=self.padding, + dilation=self.dilation, + groups=self.groups, + param_attr=I.NumpyArrayInitializer(self.filter), + bias_attr=False + if self.bias is None + else I.NumpyArrayInitializer(self.bias), + act=None, + data_format=self.data_format, + ) exe = fluid.Executor() exe.run(start) - out, = exe.run(main, feed={"input": self.input}, fetch_list=[y]) + (out,) = exe.run(main, feed={"input": self.input}, fetch_list=[y]) return out def dygraph_case(self): with dg.guard(): x = dg.to_variable(self.input, dtype=paddle.float32) w = dg.to_variable(self.filter, dtype=paddle.float32) - b = None if self.bias is None else dg.to_variable( - self.bias, dtype=paddle.float32) - y = F.conv2d(x, - w, - b, - padding=self.padding, - stride=self.stride, - dilation=self.dilation, - groups=self.groups, - data_format=self.data_format) + b = ( + None + if self.bias is None + else dg.to_variable(self.bias, dtype=paddle.float32) + ) + y = F.conv2d( + x, + w, + b, + padding=self.padding, + stride=self.stride, + dilation=self.dilation, + groups=self.groups, + data_format=self.data_format, + ) def test_dygraph_exception(self): with self.assertRaises(ValueError): @@ -528,7 +555,6 @@ class TestFunctionalConv2DErrorCase12(TestCase): class TestFunctionalConv2DErrorCase13(TestFunctionalConv2DErrorCase12): - def setUp(self): self.input = np.random.randn(1, 3, 3, 3) self.filter = np.random.randn(3, 3, 1, 1) diff --git a/python/paddle/fluid/tests/unittests/test_functional_conv2d_transpose.py b/python/paddle/fluid/tests/unittests/test_functional_conv2d_transpose.py index dce6a37c6bbb8ec2d537d4616ee3d16184082660..d377be24ddf8716e382a9254334e3532ab4be1a0 100644 --- a/python/paddle/fluid/tests/unittests/test_functional_conv2d_transpose.py +++ b/python/paddle/fluid/tests/unittests/test_functional_conv2d_transpose.py @@ -43,27 +43,34 @@ class TestFunctionalConv2D(TestCase): def prepare(self): if isinstance(self.filter_shape, int): - filter_shape = (self.filter_shape, ) * 2 + filter_shape = (self.filter_shape,) * 2 else: filter_shape = tuple(self.filter_shape) self.weight = np.random.uniform( - -1, 1, (self.in_channels, self.out_channels // self.groups) + - filter_shape).astype(self.dtype) + -1, + 1, + (self.in_channels, self.out_channels // self.groups) + filter_shape, + ).astype(self.dtype) if not self.no_bias: - self.bias = np.random.uniform(-1, 1, (self.out_channels, )).astype( - self.dtype) + self.bias = np.random.uniform(-1, 1, (self.out_channels,)).astype( + self.dtype + ) - self.channel_last = (self.data_format == "NHWC") + self.channel_last = self.data_format == "NHWC" if self.channel_last: - self.input_shape = (self.batch_size, ) + self.spatial_shape + ( - self.in_channels, ) + self.input_shape = ( + (self.batch_size,) + self.spatial_shape + (self.in_channels,) + ) else: - self.input_shape = (self.batch_size, - self.in_channels) + self.spatial_shape + self.input_shape = ( + self.batch_size, + self.in_channels, + ) + self.spatial_shape - self.input = np.random.uniform(-1, 1, - self.input_shape).astype(self.dtype) + self.input = np.random.uniform(-1, 1, self.input_shape).astype( + self.dtype + ) def static_graph_case_1(self): main = fluid.Program() @@ -71,11 +78,17 @@ class TestFunctionalConv2D(TestCase): with fluid.unique_name.guard(): with fluid.program_guard(main, start): if self.channel_last: - x = fluid.data("input", (-1, -1, -1, self.in_channels), - dtype=self.dtype) + x = fluid.data( + "input", + (-1, -1, -1, self.in_channels), + dtype=self.dtype, + ) else: - x = fluid.data("input", (-1, self.in_channels, -1, -1), - dtype=self.dtype) + x = fluid.data( + "input", + (-1, self.in_channels, -1, -1), + dtype=self.dtype, + ) y = fluid.layers.conv2d_transpose( x, self.out_channels, @@ -87,11 +100,13 @@ class TestFunctionalConv2D(TestCase): groups=self.groups, param_attr=I.NumpyArrayInitializer(self.weight), bias_attr=False - if self.no_bias else I.NumpyArrayInitializer(self.bias), - data_format=self.data_format) + if self.no_bias + else I.NumpyArrayInitializer(self.bias), + data_format=self.data_format, + ) exe = fluid.Executor(self.place) exe.run(start) - out, = exe.run(main, feed={"input": self.input}, fetch_list=[y]) + (out,) = exe.run(main, feed={"input": self.input}, fetch_list=[y]) return out def static_graph_case_2(self): @@ -100,31 +115,39 @@ class TestFunctionalConv2D(TestCase): with fluid.unique_name.guard(): with fluid.program_guard(main, start): if self.channel_last: - x = x = fluid.data("input", (-1, -1, -1, self.in_channels), - dtype=self.dtype) + x = x = fluid.data( + "input", + (-1, -1, -1, self.in_channels), + dtype=self.dtype, + ) else: - x = fluid.data("input", (-1, self.in_channels, -1, -1), - dtype=self.dtype) - weight = fluid.data("weight", - self.weight.shape, - dtype=self.dtype) + x = fluid.data( + "input", + (-1, self.in_channels, -1, -1), + dtype=self.dtype, + ) + weight = fluid.data( + "weight", self.weight.shape, dtype=self.dtype + ) if not self.no_bias: bias = fluid.data("bias", self.bias.shape, dtype=self.dtype) - y = F.conv2d_transpose(x, - weight, - None if self.no_bias else bias, - output_size=self.output_size, - padding=self.padding, - stride=self.stride, - dilation=self.dilation, - groups=self.groups, - data_format=self.data_format) + y = F.conv2d_transpose( + x, + weight, + None if self.no_bias else bias, + output_size=self.output_size, + padding=self.padding, + stride=self.stride, + dilation=self.dilation, + groups=self.groups, + data_format=self.data_format, + ) exe = fluid.Executor(self.place) exe.run(start) feed_dict = {"input": self.input, "weight": self.weight} if not self.no_bias: feed_dict["bias"] = self.bias - out, = exe.run(main, feed=feed_dict, fetch_list=[y]) + (out,) = exe.run(main, feed=feed_dict, fetch_list=[y]) return out def dygraph_case(self): @@ -132,15 +155,17 @@ class TestFunctionalConv2D(TestCase): x = dg.to_variable(self.input) weight = dg.to_variable(self.weight) bias = None if self.no_bias else dg.to_variable(self.bias) - y = F.conv2d_transpose(x, - weight, - bias, - output_size=self.output_size, - padding=self.padding, - stride=self.stride, - dilation=self.dilation, - groups=self.groups, - data_format=self.data_format) + y = F.conv2d_transpose( + x, + weight, + bias, + output_size=self.output_size, + padding=self.padding, + stride=self.stride, + dilation=self.dilation, + groups=self.groups, + data_format=self.data_format, + ) out = y.numpy() return out @@ -160,14 +185,16 @@ class TestFunctionalConv2D(TestCase): with _test_eager_guard(): self.test_identity_cpu() - @unittest.skipIf(not fluid.core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) def test_identity_gpu(self): self.place = fluid.CUDAPlace(0) self._test_identity() - @unittest.skipIf(not fluid.core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) def test_identity_gpu_check_eager(self): with _test_eager_guard(): self.test_identity_gpu() @@ -198,12 +225,14 @@ class TestFunctionalConv2DError(TestCase): def prepare(self): if isinstance(self.filter_shape, int): - filter_shape = (self.filter_shape, ) * 2 + filter_shape = (self.filter_shape,) * 2 else: filter_shape = tuple(self.filter_shape) - self.weight_shape = (self.in_channels, - self.out_channels // self.groups) + filter_shape - self.bias_shape = (self.out_channels, ) + self.weight_shape = ( + self.in_channels, + self.out_channels // self.groups, + ) + filter_shape + self.bias_shape = (self.out_channels,) def static_graph_case(self): main = fluid.Program() @@ -212,29 +241,36 @@ class TestFunctionalConv2DError(TestCase): with fluid.program_guard(main, start): self.channel_last = self.data_format == "NHWC" if self.channel_last: - x = x = fluid.data("input", (-1, -1, -1, self.in_channels), - dtype=self.dtype) + x = x = fluid.data( + "input", + (-1, -1, -1, self.in_channels), + dtype=self.dtype, + ) else: - x = fluid.data("input", (-1, self.in_channels, -1, -1), - dtype=self.dtype) - weight = fluid.data("weight", - self.weight_shape, - dtype=self.dtype) + x = fluid.data( + "input", + (-1, self.in_channels, -1, -1), + dtype=self.dtype, + ) + weight = fluid.data( + "weight", self.weight_shape, dtype=self.dtype + ) if not self.no_bias: bias = fluid.data("bias", self.bias_shape, dtype=self.dtype) - y = F.conv2d_transpose(x, - weight, - None if self.no_bias else bias, - output_size=self.output_size, - padding=self.padding, - stride=self.stride, - dilation=self.dilation, - groups=self.groups, - data_format=self.data_format) + y = F.conv2d_transpose( + x, + weight, + None if self.no_bias else bias, + output_size=self.output_size, + padding=self.padding, + stride=self.stride, + dilation=self.dilation, + groups=self.groups, + data_format=self.data_format, + ) class TestFunctionalConv2DCase2(TestFunctionalConv2D): - def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -248,7 +284,6 @@ class TestFunctionalConv2DCase2(TestFunctionalConv2D): class TestFunctionalConv2DCase3(TestFunctionalConv2D): - def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -262,7 +297,6 @@ class TestFunctionalConv2DCase3(TestFunctionalConv2D): class TestFunctionalConv2DCase4(TestFunctionalConv2D): - def setUp(self): self.in_channels = 4 self.out_channels = 6 @@ -276,7 +310,6 @@ class TestFunctionalConv2DCase4(TestFunctionalConv2D): class TestFunctionalConv2DCase5(TestFunctionalConv2D): - def setUp(self): self.in_channels = 4 self.out_channels = 6 @@ -290,7 +323,6 @@ class TestFunctionalConv2DCase5(TestFunctionalConv2D): class TestFunctionalConv2DCase6(TestFunctionalConv2D): - def setUp(self): self.in_channels = 4 self.out_channels = 6 @@ -304,7 +336,6 @@ class TestFunctionalConv2DCase6(TestFunctionalConv2D): class TestFunctionalConv2DCase7(TestFunctionalConv2D): - def setUp(self): self.in_channels = 4 self.out_channels = 4 @@ -318,7 +349,6 @@ class TestFunctionalConv2DCase7(TestFunctionalConv2D): class TestFunctionalConv2DCase8(TestFunctionalConv2D): - def setUp(self): self.in_channels = 4 self.out_channels = 4 @@ -333,7 +363,6 @@ class TestFunctionalConv2DCase8(TestFunctionalConv2D): class TestFunctionalConv2DCase9(TestFunctionalConv2D): - def setUp(self): self.in_channels = 4 self.out_channels = 6 @@ -347,7 +376,6 @@ class TestFunctionalConv2DCase9(TestFunctionalConv2D): class TestFunctionalConv2DCase10(TestFunctionalConv2D): - def setUp(self): self.in_channels = 4 self.out_channels = 6 @@ -361,7 +389,6 @@ class TestFunctionalConv2DCase10(TestFunctionalConv2D): class TestFunctionalConv2DCase11(TestFunctionalConv2D): - def setUp(self): self.in_channels = 4 self.out_channels = 6 @@ -375,7 +402,6 @@ class TestFunctionalConv2DCase11(TestFunctionalConv2D): class TestFunctionalConv2DCase12(TestFunctionalConv2D): - def setUp(self): self.in_channels = 4 self.out_channels = 6 @@ -389,7 +415,6 @@ class TestFunctionalConv2DCase12(TestFunctionalConv2D): class TestFunctionalConv2DErrorCase2(TestFunctionalConv2DError): - def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -403,7 +428,6 @@ class TestFunctionalConv2DErrorCase2(TestFunctionalConv2DError): class TestFunctionalConv2DErrorCase3(TestFunctionalConv2DError): - def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -417,7 +441,6 @@ class TestFunctionalConv2DErrorCase3(TestFunctionalConv2DError): class TestFunctionalConv2DErrorCase4(TestFunctionalConv2DError): - def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -431,7 +454,6 @@ class TestFunctionalConv2DErrorCase4(TestFunctionalConv2DError): class TestFunctionalConv2DErrorCase5(TestFunctionalConv2DError): - def setUp(self): self.in_channels = -2 self.out_channels = 5 @@ -445,7 +467,6 @@ class TestFunctionalConv2DErrorCase5(TestFunctionalConv2DError): class TestFunctionalConv2DErrorCase7(TestFunctionalConv2DError): - def setUp(self): self.in_channels = 4 self.out_channels = 5 @@ -460,7 +481,6 @@ class TestFunctionalConv2DErrorCase7(TestFunctionalConv2DError): class TestFunctionalConv2DErrorCase8(TestFunctionalConv2DError): - def setUp(self): self.in_channels = 4 self.out_channels = 5 @@ -474,7 +494,6 @@ class TestFunctionalConv2DErrorCase8(TestFunctionalConv2DError): class TestFunctionalConv2DErrorCase9(TestFunctionalConv2DError): - def setUp(self): self.in_channels = 3 self.out_channels = 4 @@ -488,7 +507,6 @@ class TestFunctionalConv2DErrorCase9(TestFunctionalConv2DError): class TestFunctionalConv2DErrorCase10(TestCase): - def setUp(self): self.input = np.array([]) self.filter = np.array([]) @@ -507,38 +525,45 @@ class TestFunctionalConv2DErrorCase10(TestCase): with fluid.unique_name.guard(): with fluid.program_guard(main, start): x = fluid.data("input", self.input.shape, dtype=paddle.float32) - y = fluid.layers.conv2d(x, - self.num_filters, - self.filter_size, - stride=self.stride, - padding=self.padding, - dilation=self.dilation, - groups=self.groups, - param_attr=I.NumpyArrayInitializer( - self.filter), - bias_attr=False if self.bias is None - else I.NumpyArrayInitializer(self.bias), - act=None, - data_format=self.data_format) + y = fluid.layers.conv2d( + x, + self.num_filters, + self.filter_size, + stride=self.stride, + padding=self.padding, + dilation=self.dilation, + groups=self.groups, + param_attr=I.NumpyArrayInitializer(self.filter), + bias_attr=False + if self.bias is None + else I.NumpyArrayInitializer(self.bias), + act=None, + data_format=self.data_format, + ) exe = fluid.Executor() exe.run(start) - out, = exe.run(main, feed={"input": self.input}, fetch_list=[y]) + (out,) = exe.run(main, feed={"input": self.input}, fetch_list=[y]) return out def dygraph_case(self): with dg.guard(): x = dg.to_variable(self.input, dtype=paddle.float32) w = dg.to_variable(self.filter, dtype=paddle.float32) - b = None if self.bias is None else dg.to_variable( - self.bias, dtype=paddle.float32) - y = F.conv2d_transpose(x, - w, - b, - padding=self.padding, - stride=self.stride, - dilation=self.dilation, - groups=self.groups, - data_format=self.data_format) + b = ( + None + if self.bias is None + else dg.to_variable(self.bias, dtype=paddle.float32) + ) + y = F.conv2d_transpose( + x, + w, + b, + padding=self.padding, + stride=self.stride, + dilation=self.dilation, + groups=self.groups, + data_format=self.data_format, + ) def test_dygraph_exception(self): with self.assertRaises(ValueError): @@ -554,7 +579,6 @@ class TestFunctionalConv2DErrorCase10(TestCase): class TestFunctionalConv2DErrorCase11(TestFunctionalConv2DErrorCase10): - def setUp(self): self.input = np.random.randn(1, 3, 3, 3) self.filter = np.random.randn(3, 3, 1, 1) diff --git a/python/paddle/fluid/tests/unittests/test_functional_conv3d.py b/python/paddle/fluid/tests/unittests/test_functional_conv3d.py index 9ecbf2bf46c5d11d7bad2aa3244186b3dd171e2f..d2d7074a4b1a3343e5c44aaf0566c24f71757ef2 100644 --- a/python/paddle/fluid/tests/unittests/test_functional_conv3d.py +++ b/python/paddle/fluid/tests/unittests/test_functional_conv3d.py @@ -41,27 +41,34 @@ class TestFunctionalConv3D(TestCase): def prepare(self): if isinstance(self.filter_shape, int): - filter_shape = (self.filter_shape, ) * 3 + filter_shape = (self.filter_shape,) * 3 else: filter_shape = tuple(self.filter_shape) self.weight = np.random.uniform( - -1, 1, (self.out_channels, self.in_channels // self.groups) + - filter_shape).astype(self.dtype) + -1, + 1, + (self.out_channels, self.in_channels // self.groups) + filter_shape, + ).astype(self.dtype) if not self.no_bias: - self.bias = np.random.uniform(-1, 1, (self.out_channels, )).astype( - self.dtype) + self.bias = np.random.uniform(-1, 1, (self.out_channels,)).astype( + self.dtype + ) - self.channel_last = (self.data_format == "NDHWC") + self.channel_last = self.data_format == "NDHWC" if self.channel_last: - self.input_shape = (self.batch_size, ) + self.spatial_shape + ( - self.in_channels, ) + self.input_shape = ( + (self.batch_size,) + self.spatial_shape + (self.in_channels,) + ) else: - self.input_shape = (self.batch_size, - self.in_channels) + self.spatial_shape + self.input_shape = ( + self.batch_size, + self.in_channels, + ) + self.spatial_shape - self.input = np.random.uniform(-1, 1, - self.input_shape).astype(self.dtype) + self.input = np.random.uniform(-1, 1, self.input_shape).astype( + self.dtype + ) def static_graph_case_1(self): main = fluid.Program() @@ -69,11 +76,17 @@ class TestFunctionalConv3D(TestCase): with fluid.unique_name.guard(): with fluid.program_guard(main, start): if self.channel_last: - x = fluid.data("input", (-1, -1, -1, -1, self.in_channels), - dtype=self.dtype) + x = fluid.data( + "input", + (-1, -1, -1, -1, self.in_channels), + dtype=self.dtype, + ) else: - x = fluid.data("input", (-1, self.in_channels, -1, -1, -1), - dtype=self.dtype) + x = fluid.data( + "input", + (-1, self.in_channels, -1, -1, -1), + dtype=self.dtype, + ) y = fluid.layers.conv3d( x, self.out_channels, @@ -84,12 +97,14 @@ class TestFunctionalConv3D(TestCase): groups=self.groups, param_attr=I.NumpyArrayInitializer(self.weight), bias_attr=False - if self.no_bias else I.NumpyArrayInitializer(self.bias), + if self.no_bias + else I.NumpyArrayInitializer(self.bias), act=self.act, - data_format=self.data_format) + data_format=self.data_format, + ) exe = fluid.Executor(self.place) exe.run(start) - out, = exe.run(main, feed={"input": self.input}, fetch_list=[y]) + (out,) = exe.run(main, feed={"input": self.input}, fetch_list=[y]) return out def static_graph_case_2(self): @@ -98,25 +113,32 @@ class TestFunctionalConv3D(TestCase): with fluid.unique_name.guard(): with fluid.program_guard(main, start): if self.channel_last: - x = x = fluid.data("input", - (-1, -1, -1, -1, self.in_channels), - dtype=self.dtype) + x = x = fluid.data( + "input", + (-1, -1, -1, -1, self.in_channels), + dtype=self.dtype, + ) else: - x = fluid.data("input", (-1, self.in_channels, -1, -1, -1), - dtype=self.dtype) - weight = fluid.data("weight", - self.weight.shape, - dtype=self.dtype) + x = fluid.data( + "input", + (-1, self.in_channels, -1, -1, -1), + dtype=self.dtype, + ) + weight = fluid.data( + "weight", self.weight.shape, dtype=self.dtype + ) if not self.no_bias: bias = fluid.data("bias", self.bias.shape, dtype=self.dtype) - y = F.conv3d(x, - weight, - None if self.no_bias else bias, - padding=self.padding, - stride=self.stride, - dilation=self.dilation, - groups=self.groups, - data_format=self.data_format) + y = F.conv3d( + x, + weight, + None if self.no_bias else bias, + padding=self.padding, + stride=self.stride, + dilation=self.dilation, + groups=self.groups, + data_format=self.data_format, + ) if self.act == 'sigmoid': y = F.sigmoid(y) @@ -126,7 +148,7 @@ class TestFunctionalConv3D(TestCase): feed_dict = {"input": self.input, "weight": self.weight} if not self.no_bias: feed_dict["bias"] = self.bias - out, = exe.run(main, feed=feed_dict, fetch_list=[y]) + (out,) = exe.run(main, feed=feed_dict, fetch_list=[y]) return out def dygraph_case(self): @@ -134,14 +156,16 @@ class TestFunctionalConv3D(TestCase): x = dg.to_variable(self.input) weight = dg.to_variable(self.weight) bias = None if self.no_bias else dg.to_variable(self.bias) - y = F.conv3d(x, - weight, - bias, - padding=self.padding, - stride=self.stride, - dilation=self.dilation, - groups=self.groups, - data_format=self.data_format) + y = F.conv3d( + x, + weight, + bias, + padding=self.padding, + stride=self.stride, + dilation=self.dilation, + groups=self.groups, + data_format=self.data_format, + ) if self.act == 'sigmoid': y = F.sigmoid(y) @@ -161,8 +185,9 @@ class TestFunctionalConv3D(TestCase): self.place = fluid.CPUPlace() self._test_identity() - @unittest.skipIf(not fluid.core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) def test_identity_gpu(self): self.place = fluid.CUDAPlace(0) self._test_identity() @@ -192,12 +217,14 @@ class TestFunctionalConv3DError(TestCase): def prepare(self): if isinstance(self.filter_shape, int): - filter_shape = (self.filter_shape, ) * 3 + filter_shape = (self.filter_shape,) * 3 else: filter_shape = tuple(self.filter_shape) - self.weight_shape = (self.out_channels, - self.in_channels // self.groups) + filter_shape - self.bias_shape = (self.out_channels, ) + self.weight_shape = ( + self.out_channels, + self.in_channels // self.groups, + ) + filter_shape + self.bias_shape = (self.out_channels,) def static_graph_case(self): main = fluid.Program() @@ -206,32 +233,38 @@ class TestFunctionalConv3DError(TestCase): with fluid.program_guard(main, start): self.channel_last = self.data_format == "NDHWC" if self.channel_last: - x = x = fluid.data("input", - (-1, -1, -1, -1, self.in_channels), - dtype=self.dtype) + x = x = fluid.data( + "input", + (-1, -1, -1, -1, self.in_channels), + dtype=self.dtype, + ) else: - x = fluid.data("input", (-1, self.in_channels, -1, -1, -1), - dtype=self.dtype) - weight = fluid.data("weight", - self.weight_shape, - dtype=self.dtype) + x = fluid.data( + "input", + (-1, self.in_channels, -1, -1, -1), + dtype=self.dtype, + ) + weight = fluid.data( + "weight", self.weight_shape, dtype=self.dtype + ) if not self.no_bias: bias = fluid.data("bias", self.bias_shape, dtype=self.dtype) - y = F.conv3d(x, - weight, - None if self.no_bias else bias, - padding=self.padding, - stride=self.stride, - dilation=self.dilation, - groups=self.groups, - data_format=self.data_format) + y = F.conv3d( + x, + weight, + None if self.no_bias else bias, + padding=self.padding, + stride=self.stride, + dilation=self.dilation, + groups=self.groups, + data_format=self.data_format, + ) if self.act == 'sigmoid': y = F.sigmoid(y) class TestFunctionalConv3DCase2(TestFunctionalConv3D): - def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -246,7 +279,6 @@ class TestFunctionalConv3DCase2(TestFunctionalConv3D): class TestFunctionalConv3DCase3(TestFunctionalConv3D): - def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -261,7 +293,6 @@ class TestFunctionalConv3DCase3(TestFunctionalConv3D): class TestFunctionalConv3DCase4(TestFunctionalConv3D): - def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -276,7 +307,6 @@ class TestFunctionalConv3DCase4(TestFunctionalConv3D): class TestFunctionalConv3DCase5(TestFunctionalConv3D): - def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -291,7 +321,6 @@ class TestFunctionalConv3DCase5(TestFunctionalConv3D): class TestFunctionalConv3DCase6(TestFunctionalConv3D): - def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -306,7 +335,6 @@ class TestFunctionalConv3DCase6(TestFunctionalConv3D): class TestFunctionalConv3DCase7(TestFunctionalConv3D): - def setUp(self): self.in_channels = 6 self.out_channels = 8 @@ -321,7 +349,6 @@ class TestFunctionalConv3DCase7(TestFunctionalConv3D): class TestFunctionalConv3DCase8(TestFunctionalConv3D): - def setUp(self): self.in_channels = 6 self.out_channels = 12 @@ -337,7 +364,6 @@ class TestFunctionalConv3DCase8(TestFunctionalConv3D): class TestFunctionalConv3DErrorCase2(TestFunctionalConv3DError): - def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -352,7 +378,6 @@ class TestFunctionalConv3DErrorCase2(TestFunctionalConv3DError): class TestFunctionalConv3DErrorCase3(TestFunctionalConv3DError): - def setUp(self): self.in_channels = 3 self.out_channels = 4 @@ -367,7 +392,6 @@ class TestFunctionalConv3DErrorCase3(TestFunctionalConv3DError): class TestFunctionalConv3DErrorCase4(TestFunctionalConv3DError): - def setUp(self): self.in_channels = 4 self.out_channels = 3 @@ -382,7 +406,6 @@ class TestFunctionalConv3DErrorCase4(TestFunctionalConv3DError): class TestFunctionalConv3DErrorCase7(TestFunctionalConv3DError): - def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -397,7 +420,6 @@ class TestFunctionalConv3DErrorCase7(TestFunctionalConv3DError): class TestFunctionalConv3DErrorCase8(TestFunctionalConv3DError): - def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -412,7 +434,6 @@ class TestFunctionalConv3DErrorCase8(TestFunctionalConv3DError): class TestFunctionalConv3DErrorCase9(TestFunctionalConv3DError): - def setUp(self): self.in_channels = -5 self.out_channels = 5 @@ -427,7 +448,6 @@ class TestFunctionalConv3DErrorCase9(TestFunctionalConv3DError): class TestFunctionalConv3DErrorCase10(TestFunctionalConv3DError): - def setUp(self): self.in_channels = 3 self.out_channels = 4 @@ -442,7 +462,6 @@ class TestFunctionalConv3DErrorCase10(TestFunctionalConv3DError): class TestFunctionalConv3DErrorCase11(TestCase): - def setUp(self): self.input = np.array([]) self.filter = np.array([]) @@ -461,38 +480,45 @@ class TestFunctionalConv3DErrorCase11(TestCase): with fluid.unique_name.guard(): with fluid.program_guard(main, start): x = fluid.data("input", self.input.shape, dtype=paddle.float32) - y = fluid.layers.conv3d(x, - self.num_filters, - self.filter_size, - stride=self.stride, - padding=self.padding, - dilation=self.dilation, - groups=self.groups, - param_attr=I.NumpyArrayInitializer( - self.filter), - bias_attr=False if self.bias is None - else I.NumpyArrayInitializer(self.bias), - act=None, - data_format=self.data_format) + y = fluid.layers.conv3d( + x, + self.num_filters, + self.filter_size, + stride=self.stride, + padding=self.padding, + dilation=self.dilation, + groups=self.groups, + param_attr=I.NumpyArrayInitializer(self.filter), + bias_attr=False + if self.bias is None + else I.NumpyArrayInitializer(self.bias), + act=None, + data_format=self.data_format, + ) exe = fluid.Executor() exe.run(start) - out, = exe.run(main, feed={"input": self.input}, fetch_list=[y]) + (out,) = exe.run(main, feed={"input": self.input}, fetch_list=[y]) return out def dygraph_case(self): with dg.guard(): x = dg.to_variable(self.input, dtype=paddle.float32) w = dg.to_variable(self.filter, dtype=paddle.float32) - b = None if self.bias is None else dg.to_variable( - self.bias, dtype=paddle.float32) - y = F.conv3d(x, - w, - b, - padding=self.padding, - stride=self.stride, - dilation=self.dilation, - groups=self.groups, - data_format=self.data_format) + b = ( + None + if self.bias is None + else dg.to_variable(self.bias, dtype=paddle.float32) + ) + y = F.conv3d( + x, + w, + b, + padding=self.padding, + stride=self.stride, + dilation=self.dilation, + groups=self.groups, + data_format=self.data_format, + ) def test_dygraph_exception(self): with self.assertRaises(ValueError): @@ -504,7 +530,6 @@ class TestFunctionalConv3DErrorCase11(TestCase): class TestFunctionalConv3DErrorCase12(TestFunctionalConv3DErrorCase11): - def setUp(self): self.input = np.random.randn(1, 3, 3, 3, 3) self.filter = np.random.randn(3, 3, 1, 1, 1) diff --git a/python/paddle/fluid/tests/unittests/test_functional_conv3d_transpose.py b/python/paddle/fluid/tests/unittests/test_functional_conv3d_transpose.py index 0190779a021c939e8d14961a077b21bb06dc2a8e..d9ee04953592bc5a7ddbef93018c7eab14b799ef 100644 --- a/python/paddle/fluid/tests/unittests/test_functional_conv3d_transpose.py +++ b/python/paddle/fluid/tests/unittests/test_functional_conv3d_transpose.py @@ -43,27 +43,34 @@ class TestFunctionalConv3DTranspose(TestCase): def prepare(self): if isinstance(self.filter_shape, int): - filter_shape = (self.filter_shape, ) * 3 + filter_shape = (self.filter_shape,) * 3 else: filter_shape = tuple(self.filter_shape) self.weight = np.random.uniform( - -1, 1, (self.in_channels, self.out_channels // self.groups) + - filter_shape).astype(self.dtype) + -1, + 1, + (self.in_channels, self.out_channels // self.groups) + filter_shape, + ).astype(self.dtype) if not self.no_bias: - self.bias = np.random.uniform(-1, 1, (self.out_channels, )).astype( - self.dtype) + self.bias = np.random.uniform(-1, 1, (self.out_channels,)).astype( + self.dtype + ) - self.channel_last = (self.data_format == "NDHWC") + self.channel_last = self.data_format == "NDHWC" if self.channel_last: - self.input_shape = (self.batch_size, ) + self.spatial_shape + ( - self.in_channels, ) + self.input_shape = ( + (self.batch_size,) + self.spatial_shape + (self.in_channels,) + ) else: - self.input_shape = (self.batch_size, - self.in_channels) + self.spatial_shape + self.input_shape = ( + self.batch_size, + self.in_channels, + ) + self.spatial_shape - self.input = np.random.uniform(-1, 1, - self.input_shape).astype(self.dtype) + self.input = np.random.uniform(-1, 1, self.input_shape).astype( + self.dtype + ) def static_graph_case_1(self): main = fluid.Program() @@ -71,11 +78,17 @@ class TestFunctionalConv3DTranspose(TestCase): with fluid.unique_name.guard(): with fluid.program_guard(main, start): if self.channel_last: - x = fluid.data("input", (-1, -1, -1, -1, self.in_channels), - dtype=self.dtype) + x = fluid.data( + "input", + (-1, -1, -1, -1, self.in_channels), + dtype=self.dtype, + ) else: - x = fluid.data("input", (-1, self.in_channels, -1, -1, -1), - dtype=self.dtype) + x = fluid.data( + "input", + (-1, self.in_channels, -1, -1, -1), + dtype=self.dtype, + ) y = fluid.layers.conv3d_transpose( x, self.out_channels, @@ -87,12 +100,14 @@ class TestFunctionalConv3DTranspose(TestCase): groups=self.groups, param_attr=I.NumpyArrayInitializer(self.weight), bias_attr=False - if self.no_bias else I.NumpyArrayInitializer(self.bias), + if self.no_bias + else I.NumpyArrayInitializer(self.bias), act=self.act, - data_format=self.data_format) + data_format=self.data_format, + ) exe = fluid.Executor(self.place) exe.run(start) - out, = exe.run(main, feed={"input": self.input}, fetch_list=[y]) + (out,) = exe.run(main, feed={"input": self.input}, fetch_list=[y]) return out def static_graph_case_2(self): @@ -101,26 +116,33 @@ class TestFunctionalConv3DTranspose(TestCase): with fluid.unique_name.guard(): with fluid.program_guard(main, start): if self.channel_last: - x = x = fluid.data("input", - (-1, -1, -1, -1, self.in_channels), - dtype=self.dtype) + x = x = fluid.data( + "input", + (-1, -1, -1, -1, self.in_channels), + dtype=self.dtype, + ) else: - x = fluid.data("input", (-1, self.in_channels, -1, -1, -1), - dtype=self.dtype) - weight = fluid.data("weight", - self.weight.shape, - dtype=self.dtype) + x = fluid.data( + "input", + (-1, self.in_channels, -1, -1, -1), + dtype=self.dtype, + ) + weight = fluid.data( + "weight", self.weight.shape, dtype=self.dtype + ) if not self.no_bias: bias = fluid.data("bias", self.bias.shape, dtype=self.dtype) - y = F.conv3d_transpose(x, - weight, - None if self.no_bias else bias, - output_size=self.output_size, - padding=self.padding, - stride=self.stride, - dilation=self.dilation, - groups=self.groups, - data_format=self.data_format) + y = F.conv3d_transpose( + x, + weight, + None if self.no_bias else bias, + output_size=self.output_size, + padding=self.padding, + stride=self.stride, + dilation=self.dilation, + groups=self.groups, + data_format=self.data_format, + ) if self.act == 'sigmoid': y = F.sigmoid(y) exe = fluid.Executor(self.place) @@ -128,7 +150,7 @@ class TestFunctionalConv3DTranspose(TestCase): feed_dict = {"input": self.input, "weight": self.weight} if not self.no_bias: feed_dict["bias"] = self.bias - out, = exe.run(main, feed=feed_dict, fetch_list=[y]) + (out,) = exe.run(main, feed=feed_dict, fetch_list=[y]) return out def dygraph_case(self): @@ -136,15 +158,17 @@ class TestFunctionalConv3DTranspose(TestCase): x = dg.to_variable(self.input) weight = dg.to_variable(self.weight) bias = None if self.no_bias else dg.to_variable(self.bias) - y = F.conv3d_transpose(x, - weight, - bias, - output_size=self.output_size, - padding=self.padding, - stride=self.stride, - dilation=self.dilation, - groups=self.groups, - data_format=self.data_format) + y = F.conv3d_transpose( + x, + weight, + bias, + output_size=self.output_size, + padding=self.padding, + stride=self.stride, + dilation=self.dilation, + groups=self.groups, + data_format=self.data_format, + ) if self.act == 'sigmoid': y = F.sigmoid(y) out = y.numpy() @@ -166,14 +190,16 @@ class TestFunctionalConv3DTranspose(TestCase): with _test_eager_guard(): self.test_identity_cpu() - @unittest.skipIf(not fluid.core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) def test_identity_gpu(self): self.place = fluid.CUDAPlace(0) self._test_identity() - @unittest.skipIf(not fluid.core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) def test_identity_gpu_check_eager(self): with _test_eager_guard(): self.test_identity_gpu() @@ -204,12 +230,14 @@ class TestFunctionalConv3DTransposeError(TestCase): def prepare(self): if isinstance(self.filter_shape, int): - filter_shape = (self.filter_shape, ) * 3 + filter_shape = (self.filter_shape,) * 3 else: filter_shape = tuple(self.filter_shape) - self.weight_shape = (self.in_channels, - self.out_channels // self.groups) + filter_shape - self.bias_shape = (self.out_channels, ) + self.weight_shape = ( + self.in_channels, + self.out_channels // self.groups, + ) + filter_shape + self.bias_shape = (self.out_channels,) def static_graph_case(self): main = fluid.Program() @@ -218,32 +246,38 @@ class TestFunctionalConv3DTransposeError(TestCase): with fluid.program_guard(main, start): self.channel_last = self.data_format == "NDHWC" if self.channel_last: - x = x = fluid.data("input", - (-1, -1, -1, -1, self.in_channels), - dtype=self.dtype) + x = x = fluid.data( + "input", + (-1, -1, -1, -1, self.in_channels), + dtype=self.dtype, + ) else: - x = fluid.data("input", (-1, self.in_channels, -1, -1, -1), - dtype=self.dtype) - weight = fluid.data("weight", - self.weight_shape, - dtype=self.dtype) + x = fluid.data( + "input", + (-1, self.in_channels, -1, -1, -1), + dtype=self.dtype, + ) + weight = fluid.data( + "weight", self.weight_shape, dtype=self.dtype + ) if not self.no_bias: bias = fluid.data("bias", self.bias_shape, dtype=self.dtype) - y = F.conv3d_transpose(x, - weight, - None if self.no_bias else bias, - output_size=self.output_size, - padding=self.padding, - stride=self.stride, - dilation=self.dilation, - groups=self.groups, - data_format=self.data_format) + y = F.conv3d_transpose( + x, + weight, + None if self.no_bias else bias, + output_size=self.output_size, + padding=self.padding, + stride=self.stride, + dilation=self.dilation, + groups=self.groups, + data_format=self.data_format, + ) if self.act == 'sigmoid': y = F.sigmoid(y) class TestFunctionalConv3DTransposeCase2(TestFunctionalConv3DTranspose): - def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -258,7 +292,6 @@ class TestFunctionalConv3DTransposeCase2(TestFunctionalConv3DTranspose): class TestFunctionalConv3DTransposeCase3(TestFunctionalConv3DTranspose): - def setUp(self): self.in_channels = 4 self.out_channels = 6 @@ -273,7 +306,6 @@ class TestFunctionalConv3DTransposeCase3(TestFunctionalConv3DTranspose): class TestFunctionalConv3DTransposeCase4(TestFunctionalConv3DTranspose): - def setUp(self): self.in_channels = 4 self.out_channels = 6 @@ -288,7 +320,6 @@ class TestFunctionalConv3DTransposeCase4(TestFunctionalConv3DTranspose): class TestFunctionalConv3DTransposeCase5(TestFunctionalConv3DTranspose): - def setUp(self): self.in_channels = 4 self.out_channels = 6 @@ -303,7 +334,6 @@ class TestFunctionalConv3DTransposeCase5(TestFunctionalConv3DTranspose): class TestFunctionalConv3DTransposeCase6(TestFunctionalConv3DTranspose): - def setUp(self): self.in_channels = 4 self.out_channels = 4 @@ -318,7 +348,6 @@ class TestFunctionalConv3DTransposeCase6(TestFunctionalConv3DTranspose): class TestFunctionalConv3DTransposeCase7(TestFunctionalConv3DTranspose): - def setUp(self): self.in_channels = 4 self.out_channels = 4 @@ -334,7 +363,6 @@ class TestFunctionalConv3DTransposeCase7(TestFunctionalConv3DTranspose): class TestFunctionalConv3DTransposeCase8(TestFunctionalConv3DTranspose): - def setUp(self): self.in_channels = 4 self.out_channels = 6 @@ -349,7 +377,6 @@ class TestFunctionalConv3DTransposeCase8(TestFunctionalConv3DTranspose): class TestFunctionalConv3DTransposeCase9(TestFunctionalConv3DTranspose): - def setUp(self): self.in_channels = 4 self.out_channels = 6 @@ -364,7 +391,6 @@ class TestFunctionalConv3DTransposeCase9(TestFunctionalConv3DTranspose): class TestFunctionalConv3DTransposeCase10(TestFunctionalConv3DTranspose): - def setUp(self): self.in_channels = 4 self.out_channels = 6 @@ -379,7 +405,6 @@ class TestFunctionalConv3DTransposeCase10(TestFunctionalConv3DTranspose): class TestFunctionalConv3DTransposeCase11(TestFunctionalConv3DTranspose): - def setUp(self): self.in_channels = 4 self.out_channels = 6 @@ -393,9 +418,9 @@ class TestFunctionalConv3DTransposeCase11(TestFunctionalConv3DTranspose): self.data_format = "NCDHW" -class TestFunctionalConv3DTransposeErrorCase2(TestFunctionalConv3DTransposeError - ): - +class TestFunctionalConv3DTransposeErrorCase2( + TestFunctionalConv3DTransposeError +): def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -409,9 +434,9 @@ class TestFunctionalConv3DTransposeErrorCase2(TestFunctionalConv3DTransposeError self.data_format = "NDHWC" -class TestFunctionalConv3DTransposeErrorCase3(TestFunctionalConv3DTransposeError - ): - +class TestFunctionalConv3DTransposeErrorCase3( + TestFunctionalConv3DTransposeError +): def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -425,9 +450,9 @@ class TestFunctionalConv3DTransposeErrorCase3(TestFunctionalConv3DTransposeError self.data_format = "NDHWC" -class TestFunctionalConv3DTransposeErrorCase4(TestFunctionalConv3DTransposeError - ): - +class TestFunctionalConv3DTransposeErrorCase4( + TestFunctionalConv3DTransposeError +): def setUp(self): self.in_channels = 3 self.out_channels = 5 @@ -441,9 +466,9 @@ class TestFunctionalConv3DTransposeErrorCase4(TestFunctionalConv3DTransposeError self.data_format = "NCDHW" -class TestFunctionalConv3DTransposeErrorCase5(TestFunctionalConv3DTransposeError - ): - +class TestFunctionalConv3DTransposeErrorCase5( + TestFunctionalConv3DTransposeError +): def setUp(self): self.in_channels = -2 self.out_channels = 5 @@ -457,9 +482,9 @@ class TestFunctionalConv3DTransposeErrorCase5(TestFunctionalConv3DTransposeError self.data_format = "NCDHW" -class TestFunctionalConv3DTransposeErrorCase7(TestFunctionalConv3DTransposeError - ): - +class TestFunctionalConv3DTransposeErrorCase7( + TestFunctionalConv3DTransposeError +): def setUp(self): self.in_channels = 4 self.out_channels = 5 @@ -474,9 +499,9 @@ class TestFunctionalConv3DTransposeErrorCase7(TestFunctionalConv3DTransposeError self.data_format = "NCDHW" -class TestFunctionalConv3DTransposeErrorCase8(TestFunctionalConv3DTransposeError - ): - +class TestFunctionalConv3DTransposeErrorCase8( + TestFunctionalConv3DTransposeError +): def setUp(self): self.in_channels = 4 self.out_channels = 5 @@ -490,9 +515,9 @@ class TestFunctionalConv3DTransposeErrorCase8(TestFunctionalConv3DTransposeError self.data_format = "not_valid" -class TestFunctionalConv3DTransposeErrorCase9(TestFunctionalConv3DTransposeError - ): - +class TestFunctionalConv3DTransposeErrorCase9( + TestFunctionalConv3DTransposeError +): def setUp(self): self.in_channels = 3 self.out_channels = 4 @@ -507,7 +532,6 @@ class TestFunctionalConv3DTransposeErrorCase9(TestFunctionalConv3DTransposeError class TestFunctionalConv3DTransposeErrorCase10(TestCase): - def setUp(self): self.input = np.array([]) self.filter = np.array([]) @@ -535,29 +559,36 @@ class TestFunctionalConv3DTransposeErrorCase10(TestCase): dilation=self.dilation, groups=self.groups, param_attr=I.NumpyArrayInitializer(self.filter), - bias_attr=False if self.bias is None else - I.NumpyArrayInitializer(self.bias), + bias_attr=False + if self.bias is None + else I.NumpyArrayInitializer(self.bias), act=None, - data_format=self.data_format) + data_format=self.data_format, + ) exe = fluid.Executor() exe.run(start) - out, = exe.run(main, feed={"input": self.input}, fetch_list=[y]) + (out,) = exe.run(main, feed={"input": self.input}, fetch_list=[y]) return out def dygraph_case(self): with dg.guard(): x = dg.to_variable(self.input, dtype=paddle.float32) w = dg.to_variable(self.filter, dtype=paddle.float32) - b = None if self.bias is None else dg.to_variable( - self.bias, dtype=paddle.float32) - y = F.conv3d_transpose(x, - w, - b, - padding=self.padding, - stride=self.stride, - dilation=self.dilation, - groups=self.groups, - data_format=self.data_format) + b = ( + None + if self.bias is None + else dg.to_variable(self.bias, dtype=paddle.float32) + ) + y = F.conv3d_transpose( + x, + w, + b, + padding=self.padding, + stride=self.stride, + dilation=self.dilation, + groups=self.groups, + data_format=self.data_format, + ) def test_dygraph_exception(self): with self.assertRaises(ValueError): @@ -573,8 +604,8 @@ class TestFunctionalConv3DTransposeErrorCase10(TestCase): class TestFunctionalConv3DTransposeErrorCase11( - TestFunctionalConv3DTransposeErrorCase10): - + TestFunctionalConv3DTransposeErrorCase10 +): def setUp(self): self.input = np.random.randn(1, 3, 3, 3, 3) self.filter = np.random.randn(3, 3, 1, 1, 1) diff --git a/python/paddle/fluid/tests/unittests/test_fuse_all_reduce_pass.py b/python/paddle/fluid/tests/unittests/test_fuse_all_reduce_pass.py index 67729d6633d0e153d1ff67c9d7011456cff35ba7..9431bcac4e34acbf7e572e3c624e12869b9f0358 100644 --- a/python/paddle/fluid/tests/unittests/test_fuse_all_reduce_pass.py +++ b/python/paddle/fluid/tests/unittests/test_fuse_all_reduce_pass.py @@ -26,18 +26,19 @@ paddle.enable_static() class TestFuseAllReduceOpsBase(TestParallelExecutorBase): - @classmethod def setUpClass(cls): os.environ['CPU_NUM'] = str(4) - def compare_fuse_all_reduce_ops(self, - model, - use_device, - init_feed_dict=None, - get_data_from_feeder=None, - optimizer=None, - fuse_all_optimizer_ops=False): + def compare_fuse_all_reduce_ops( + self, + model, + use_device, + init_feed_dict=None, + get_data_from_feeder=None, + optimizer=None, + fuse_all_optimizer_ops=False, + ): if use_device == DeviceType.CUDA and not core.is_compiled_with_cuda(): return if use_device == DeviceType.XPU and not core.is_compiled_with_xpu(): @@ -48,22 +49,32 @@ class TestFuseAllReduceOpsBase(TestParallelExecutorBase): img, label = init_feed_dict() feed_dict_data = {"image": img, "label": label} - not_fuse_op_first_loss, not_fuse_op_last_loss, _ = self.check_network_convergence( + ( + not_fuse_op_first_loss, + not_fuse_op_last_loss, + _, + ) = self.check_network_convergence( model, feed_dict=feed_dict_data, get_data_from_feeder=get_data_from_feeder, use_device=use_device, fuse_all_reduce_ops=False, fuse_all_optimizer_ops=fuse_all_optimizer_ops, - optimizer=optimizer) - fuse_op_first_loss, fuse_op_last_loss, _ = self.check_network_convergence( + optimizer=optimizer, + ) + ( + fuse_op_first_loss, + fuse_op_last_loss, + _, + ) = self.check_network_convergence( model, feed_dict=feed_dict_data, get_data_from_feeder=get_data_from_feeder, use_device=use_device, fuse_all_reduce_ops=True, fuse_all_optimizer_ops=fuse_all_optimizer_ops, - optimizer=optimizer) + optimizer=optimizer, + ) for loss in zip(not_fuse_op_first_loss, fuse_op_first_loss): self.assertAlmostEquals(loss[0], loss[1], delta=1e-6) @@ -73,18 +84,20 @@ class TestFuseAllReduceOpsBase(TestParallelExecutorBase): def optimizer(self, learning_rate=1e-3): optimizer = fluid.optimizer.SGD( learning_rate=learning_rate, - regularization=fluid.regularizer.L2Decay(1e-3)) + regularization=fluid.regularizer.L2Decay(1e-3), + ) return optimizer class TestFuseAllReduceOps(TestFuseAllReduceOpsBase): - def _decorate_compare_fused_all_reduce(self, model, use_device): - self.compare_fuse_all_reduce_ops(model, - use_device, - init_feed_dict=init_data, - optimizer=self.optimizer, - fuse_all_optimizer_ops=True) + self.compare_fuse_all_reduce_ops( + model, + use_device, + init_feed_dict=init_data, + optimizer=self.optimizer, + fuse_all_optimizer_ops=True, + ) def test_simple_fc_with_fuse_all_reduce(self): self._decorate_compare_fused_all_reduce(simple_fc_net, DeviceType.CUDA) @@ -92,27 +105,29 @@ class TestFuseAllReduceOps(TestFuseAllReduceOpsBase): self._decorate_compare_fused_all_reduce(simple_fc_net, DeviceType.CPU) def test_batchnorm_fc_with_fuse_all_reduce(self): - self._decorate_compare_fused_all_reduce(fc_with_batchnorm, - DeviceType.CUDA) + self._decorate_compare_fused_all_reduce( + fc_with_batchnorm, DeviceType.CUDA + ) # TODO(wangxi): xpu batch_norm op only support dim = 4 # self._decorate_compare_fused_all_reduce(fc_with_batchnorm, # DeviceType.XPU) - self._decorate_compare_fused_all_reduce(fc_with_batchnorm, - DeviceType.CPU) + self._decorate_compare_fused_all_reduce( + fc_with_batchnorm, DeviceType.CPU + ) class TestFuseAllReduceOpsAndOptiOps(TestFuseAllReduceOps): - def _decorate_compare_fused_all_reduce(self, model, use_device): - self.compare_fuse_all_reduce_ops(model, - use_device, - init_feed_dict=init_data, - optimizer=self.optimizer, - fuse_all_optimizer_ops=True) + self.compare_fuse_all_reduce_ops( + model, + use_device, + init_feed_dict=init_data, + optimizer=self.optimizer, + fuse_all_optimizer_ops=True, + ) class TestFuseAllReduceOpsWithSparseGrad(TestFuseAllReduceOpsBase): - @classmethod def setUpClass(cls): os.environ['CPU_NUM'] = str(4) @@ -132,7 +147,8 @@ class TestFuseAllReduceOpsWithSparseGrad(TestFuseAllReduceOpsBase): model, use_device, get_data_from_feeder=self.get_data_from_feeder, - optimizer=self.optimizer) + optimizer=self.optimizer, + ) def test_simple_bow_net_with_fuse_all_reduce(self): model = partial(bow_net, dict_dim=self.word_dict_len, is_sparse=True) diff --git a/python/paddle/fluid/tests/unittests/test_fuse_bn_act_pass.py b/python/paddle/fluid/tests/unittests/test_fuse_bn_act_pass.py index d3a18ad28ca54b5d7b89e8b7798993a79e188c8f..5018b7bd5e43b6d8bd69615b55a1974f7b0b651f 100644 --- a/python/paddle/fluid/tests/unittests/test_fuse_bn_act_pass.py +++ b/python/paddle/fluid/tests/unittests/test_fuse_bn_act_pass.py @@ -18,41 +18,47 @@ import unittest class TestFuseBatchNormActPass(unittest.TestCase): - def build_program(self, main_program, startup_program, use_cuda, seed=1): with fluid.program_guard(main_program, startup_program): x = fluid.layers.data(name='x', shape=[1, 28, 28], dtype='float32') y = fluid.layers.data(name="y", shape=[1], dtype='int64') - hidden1 = fluid.layers.conv2d(input=x, - filter_size=3, - num_filters=16, - stride=1, - padding=1, - act=None, - bias_attr=False, - data_format='NHWC') + hidden1 = fluid.layers.conv2d( + input=x, + filter_size=3, + num_filters=16, + stride=1, + padding=1, + act=None, + bias_attr=False, + data_format='NHWC', + ) param_attr = fluid.ParamAttr( name='batch_norm_w', - initializer=fluid.initializer.Constant(value=1.0)) + initializer=fluid.initializer.Constant(value=1.0), + ) bias_attr = fluid.ParamAttr( name='batch_norm_b', - initializer=fluid.initializer.Constant(value=0.0)) - hidden2 = fluid.layers.batch_norm(input=hidden1, - param_attr=param_attr, - bias_attr=bias_attr, - act='relu', - data_layout='NHWC') + initializer=fluid.initializer.Constant(value=0.0), + ) + hidden2 = fluid.layers.batch_norm( + input=hidden1, + param_attr=param_attr, + bias_attr=bias_attr, + act='relu', + data_layout='NHWC', + ) hidden3 = fluid.layers.fc(input=hidden2, size=32, act='relu') - hidden4 = fluid.layers.batch_norm(input=hidden3, - act='relu', - data_layout='NHWC') + hidden4 = fluid.layers.batch_norm( + input=hidden3, act='relu', data_layout='NHWC' + ) prediction = fluid.layers.fc(input=hidden4, size=10, act='softmax') loss = fluid.layers.cross_entropy(input=prediction, label=y) loss = paddle.mean(loss) sgd = fluid.optimizer.SGD(learning_rate=0.001) if use_cuda: sgd = fluid.contrib.mixed_precision.decorate( - sgd, use_dynamic_loss_scaling=True, init_loss_scaling=128.0) + sgd, use_dynamic_loss_scaling=True, init_loss_scaling=128.0 + ) sgd.minimize(loss) return x, y, loss @@ -71,36 +77,40 @@ class TestFuseBatchNormActPass(unittest.TestCase): build_strategy = fluid.BuildStrategy() build_strategy.fuse_bn_act_ops = False binary = fluid.CompiledProgram(main_program).with_data_parallel( - loss_name=loss.name, build_strategy=build_strategy) - train_reader = paddle.batch(paddle.dataset.mnist.train(), - batch_size=batch_size) + loss_name=loss.name, build_strategy=build_strategy + ) + train_reader = paddle.batch( + paddle.dataset.mnist.train(), batch_size=batch_size + ) loss_vals = [] scope = fluid.Scope() with fluid.scope_guard(scope): exe.run(startup_program) for _ in range(iters): data = next(train_reader()) - loss_v = exe.run(binary, - feed=feeder.feed(data), - fetch_list=[loss]) + loss_v = exe.run( + binary, feed=feeder.feed(data), fetch_list=[loss] + ) loss_vals.append(loss_v[0][0]) # open fused_bn_act_ops build_strategy_fused = fluid.BuildStrategy() build_strategy_fused.fuse_bn_act_ops = True binary_fused = fluid.CompiledProgram(main_program).with_data_parallel( - loss_name=loss.name, build_strategy=build_strategy_fused) - train_reader_fused = paddle.batch(paddle.dataset.mnist.train(), - batch_size=batch_size) + loss_name=loss.name, build_strategy=build_strategy_fused + ) + train_reader_fused = paddle.batch( + paddle.dataset.mnist.train(), batch_size=batch_size + ) loss_vals_fused = [] scope_fused = fluid.Scope() with fluid.scope_guard(scope_fused): exe.run(startup_program) for _ in range(iters): data = next(train_reader_fused()) - loss_v = exe.run(binary_fused, - feed=feeder.feed(data), - fetch_list=[loss]) + loss_v = exe.run( + binary_fused, feed=feeder.feed(data), fetch_list=[loss] + ) loss_vals_fused.append(loss_v[0][0]) # check loss diff --git a/python/paddle/fluid/tests/unittests/test_fuse_bn_add_act_pass.py b/python/paddle/fluid/tests/unittests/test_fuse_bn_add_act_pass.py index 67c4879a05ada507d72ed4b19f0613fca4dcf997..47c2a1a5f16cdb94993c2ca9f0e2729049f2e663 100644 --- a/python/paddle/fluid/tests/unittests/test_fuse_bn_add_act_pass.py +++ b/python/paddle/fluid/tests/unittests/test_fuse_bn_add_act_pass.py @@ -21,130 +21,151 @@ from paddle.fluid import core paddle.enable_static() -@unittest.skipIf(not core.is_compiled_with_cuda(), - "Paddle core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "Paddle core is not compiled with CUDA" +) class TestFusedBnAddActAPI(unittest.TestCase): - def setUp(self): self.conv_param_attr1 = fluid.ParamAttr( name='conv2d_1.weight', initializer=fluid.initializer.Xavier(uniform=False), - learning_rate=0.001) + learning_rate=0.001, + ) self.conv_param_attr2 = fluid.ParamAttr( name='conv2d_2.weight', initializer=fluid.initializer.Xavier(uniform=False), - learning_rate=0.001) + learning_rate=0.001, + ) self.bn_param_attr1 = fluid.ParamAttr( name='batch_norm_w_1', - initializer=fluid.initializer.Constant(value=1.0)) + initializer=fluid.initializer.Constant(value=1.0), + ) self.bn_bias_attr1 = fluid.ParamAttr( name='batch_norm_b_1', - initializer=fluid.initializer.Constant(value=0.0)) + initializer=fluid.initializer.Constant(value=0.0), + ) self.bn_param_attr2 = fluid.ParamAttr( name='batch_norm_w_2', - initializer=fluid.initializer.Constant(value=1.0)) + initializer=fluid.initializer.Constant(value=1.0), + ) self.bn_bias_attr2 = fluid.ParamAttr( name='batch_norm_b_2', - initializer=fluid.initializer.Constant(value=0.0)) + initializer=fluid.initializer.Constant(value=0.0), + ) self.fc_param_attr = fluid.ParamAttr( name='fc.weight', - initializer=fluid.initializer.Xavier(uniform=False)) + initializer=fluid.initializer.Xavier(uniform=False), + ) - def build_fused_program(self, - main_program, - startup_program, - use_cuda, - seed=1): + def build_fused_program( + self, main_program, startup_program, use_cuda, seed=1 + ): with fluid.program_guard(main_program, startup_program): x = fluid.layers.data(name='x', shape=[1, 28, 28], dtype='float32') y = fluid.layers.data(name="y", shape=[1], dtype='int64') - conv1_1 = fluid.layers.conv2d(input=x, - filter_size=3, - num_filters=32, - stride=1, - padding=1, - act=None, - param_attr=self.conv_param_attr1, - bias_attr=False, - data_format='NHWC') - conv1_2 = fluid.layers.conv2d(input=x, - filter_size=3, - num_filters=32, - stride=1, - padding=1, - act=None, - param_attr=self.conv_param_attr2, - bias_attr=False, - data_format='NHWC') - bn = fluid.layers.batch_norm(input=conv1_1, - param_attr=self.bn_param_attr1, - bias_attr=self.bn_bias_attr1, - act=None, - data_layout='NHWC') + conv1_1 = fluid.layers.conv2d( + input=x, + filter_size=3, + num_filters=32, + stride=1, + padding=1, + act=None, + param_attr=self.conv_param_attr1, + bias_attr=False, + data_format='NHWC', + ) + conv1_2 = fluid.layers.conv2d( + input=x, + filter_size=3, + num_filters=32, + stride=1, + padding=1, + act=None, + param_attr=self.conv_param_attr2, + bias_attr=False, + data_format='NHWC', + ) + bn = fluid.layers.batch_norm( + input=conv1_1, + param_attr=self.bn_param_attr1, + bias_attr=self.bn_bias_attr1, + act=None, + data_layout='NHWC', + ) fused_bn_add_act = fluid.contrib.layers.fused_bn_add_act( conv1_2, bn, param_attr=self.bn_param_attr2, - bias_attr=self.bn_bias_attr2) - prediction = fluid.layers.fc(input=fused_bn_add_act, - size=10, - act='softmax', - param_attr=self.fc_param_attr) + bias_attr=self.bn_bias_attr2, + ) + prediction = fluid.layers.fc( + input=fused_bn_add_act, + size=10, + act='softmax', + param_attr=self.fc_param_attr, + ) loss = fluid.layers.cross_entropy(input=prediction, label=y) loss = paddle.mean(loss) sgd = fluid.optimizer.SGD(learning_rate=0.001) sgd = fluid.contrib.mixed_precision.decorate( - sgd, use_dynamic_loss_scaling=True, init_loss_scaling=128.0) + sgd, use_dynamic_loss_scaling=True, init_loss_scaling=128.0 + ) sgd.minimize(loss) return x, y, loss - def build_origin_program(self, - main_program, - startup_program, - use_cuda, - seed=1): + def build_origin_program( + self, main_program, startup_program, use_cuda, seed=1 + ): with fluid.program_guard(main_program, startup_program): x = fluid.layers.data(name='x', shape=[1, 28, 28], dtype='float32') y = fluid.layers.data(name="y", shape=[1], dtype='int64') - conv1_1 = fluid.layers.conv2d(input=x, - filter_size=3, - num_filters=32, - stride=1, - padding=1, - act=None, - param_attr=self.conv_param_attr1, - bias_attr=False, - data_format='NHWC') - bn1 = fluid.layers.batch_norm(input=conv1_1, - param_attr=self.bn_param_attr1, - bias_attr=self.bn_bias_attr1, - act=None, - data_layout='NHWC') - conv1_2 = fluid.layers.conv2d(input=conv1_1, - filter_size=1, - num_filters=32, - stride=1, - act=None, - param_attr=self.conv_param_attr2, - bias_attr=False, - data_format='NHWC') - bn2 = fluid.layers.batch_norm(input=conv1_1, - param_attr=self.bn_param_attr2, - bias_attr=self.bn_bias_attr2, - act=None, - data_layout='NHWC') + conv1_1 = fluid.layers.conv2d( + input=x, + filter_size=3, + num_filters=32, + stride=1, + padding=1, + act=None, + param_attr=self.conv_param_attr1, + bias_attr=False, + data_format='NHWC', + ) + bn1 = fluid.layers.batch_norm( + input=conv1_1, + param_attr=self.bn_param_attr1, + bias_attr=self.bn_bias_attr1, + act=None, + data_layout='NHWC', + ) + conv1_2 = fluid.layers.conv2d( + input=conv1_1, + filter_size=1, + num_filters=32, + stride=1, + act=None, + param_attr=self.conv_param_attr2, + bias_attr=False, + data_format='NHWC', + ) + bn2 = fluid.layers.batch_norm( + input=conv1_1, + param_attr=self.bn_param_attr2, + bias_attr=self.bn_bias_attr2, + act=None, + data_layout='NHWC', + ) out = bn1 + bn2 out = fluid.layers.relu(out) - prediction = fluid.layers.fc(input=out, - size=10, - act='softmax', - param_attr=self.fc_param_attr) + prediction = fluid.layers.fc( + input=out, size=10, act='softmax', param_attr=self.fc_param_attr + ) loss = fluid.layers.cross_entropy(input=prediction, label=y) loss = paddle.mean(loss) sgd = fluid.optimizer.SGD(learning_rate=0.001) sgd = fluid.contrib.mixed_precision.decorate( - sgd, use_dynamic_loss_scaling=True, init_loss_scaling=128.0) + sgd, use_dynamic_loss_scaling=True, init_loss_scaling=128.0 + ) sgd.minimize(loss) return loss @@ -158,12 +179,14 @@ class TestFusedBnAddActAPI(unittest.TestCase): # build_fused_program: turn on fuse_bn_add_act_ops main_program = fluid.Program() startup_program = fluid.Program() - loss = self.build_origin_program(main_program, startup_program, - use_cuda) + loss = self.build_origin_program( + main_program, startup_program, use_cuda + ) build_strategy_fused = fluid.BuildStrategy() build_strategy_fused.fuse_bn_add_act_ops = True binary_fused = fluid.CompiledProgram(main_program).with_data_parallel( - loss_name=loss.name, build_strategy=build_strategy_fused) + loss_name=loss.name, build_strategy=build_strategy_fused + ) exe = fluid.Executor(place) loss_vals_fused = [] x_data = [] @@ -176,30 +199,27 @@ class TestFusedBnAddActAPI(unittest.TestCase): y = np.random.random((batch_size, 1)).astype("int64") x_data.append(x) y_data.append(y) - loss_v = exe.run(binary_fused, - feed={ - "x": x, - "y": y - }, - fetch_list=[loss]) + loss_v = exe.run( + binary_fused, feed={"x": x, "y": y}, fetch_list=[loss] + ) loss_vals_fused.append(loss_v[0][0]) # build_origin_program: turn off fused_bn_act_ops build_strategy = fluid.BuildStrategy() build_strategy.fuse_bn_add_act_ops = False binary = fluid.CompiledProgram(main_program).with_data_parallel( - loss_name=loss.name, build_strategy=build_strategy_fused) + loss_name=loss.name, build_strategy=build_strategy_fused + ) loss_vals = [] scope = fluid.Scope() with fluid.scope_guard(scope): exe.run(startup_program) for i in range(iters): - loss_v = exe.run(binary, - feed={ - "x": x_data[i], - "y": y_data[i] - }, - fetch_list=[loss]) + loss_v = exe.run( + binary, + feed={"x": x_data[i], "y": y_data[i]}, + fetch_list=[loss], + ) loss_vals.append(loss_v[0][0]) # check loss @@ -215,9 +235,9 @@ class TestFusedBnAddActAPI(unittest.TestCase): main_program = fluid.Program() startup_program = fluid.Program() place = fluid.CUDAPlace(0) - x, y, loss = self.build_fused_program(main_program, - startup_program, - use_cuda=True) + x, y, loss = self.build_fused_program( + main_program, startup_program, use_cuda=True + ) exe = fluid.Executor(place) scope = fluid.Scope() with fluid.scope_guard(scope): @@ -225,12 +245,9 @@ class TestFusedBnAddActAPI(unittest.TestCase): for _ in range(5): x = np.random.random((4, 1, 28, 28)).astype("float32") y = np.random.random((4, 1)).astype("int64") - loss_v = exe.run(main_program, - feed={ - "x": x, - "y": y - }, - fetch_list=[loss]) + loss_v = exe.run( + main_program, feed={"x": x, "y": y}, fetch_list=[loss] + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_fuse_elewise_add_act_pass.py b/python/paddle/fluid/tests/unittests/test_fuse_elewise_add_act_pass.py index 15b79bf0a7fd8515a4f5c35d86aaba7b4909c12b..bc052ec1cf7225ed711617d1d8f2e07dbcf470cf 100644 --- a/python/paddle/fluid/tests/unittests/test_fuse_elewise_add_act_pass.py +++ b/python/paddle/fluid/tests/unittests/test_fuse_elewise_add_act_pass.py @@ -21,7 +21,6 @@ import os class TestMNIST(TestParallelExecutorBase): - @classmethod def setUpClass(cls): os.environ['CPU_NUM'] = str(4) @@ -34,7 +33,8 @@ class TestMNIST(TestParallelExecutorBase): def _optimizer(learning_rate=1e-6): optimizer = fluid.optimizer.SGD( learning_rate=learning_rate, - regularization=fluid.regularizer.L2Decay(1e-6)) + regularization=fluid.regularizer.L2Decay(1e-6), + ) return optimizer # NOTE(dzh): @@ -42,28 +42,32 @@ class TestMNIST(TestParallelExecutorBase): # FIXME (liuwei12) # the new memory optimize strategy will crash this unittest # add enable_inplace=False here to force pass the unittest - not_fuse_op_first_loss, not_fuse_op_last_loss, _ = self.check_network_convergence( + ( + not_fuse_op_first_loss, + not_fuse_op_last_loss, + _, + ) = self.check_network_convergence( model, - feed_dict={ - "image": img, - "label": label - }, + feed_dict={"image": img, "label": label}, use_device=use_device, fuse_elewise_add_act_ops=False, use_ir_memory_optimize=False, enable_inplace=False, - optimizer=_optimizer) - fuse_op_first_loss, fuse_op_last_loss, _ = self.check_network_convergence( + optimizer=_optimizer, + ) + ( + fuse_op_first_loss, + fuse_op_last_loss, + _, + ) = self.check_network_convergence( model, - feed_dict={ - "image": img, - "label": label - }, + feed_dict={"image": img, "label": label}, use_device=use_device, fuse_elewise_add_act_ops=True, use_ir_memory_optimize=False, enable_inplace=False, - optimizer=_optimizer) + optimizer=_optimizer, + ) for loss in zip(not_fuse_op_first_loss, fuse_op_first_loss): self.assertAlmostEquals(loss[0], loss[1], delta=1e-6) @@ -75,13 +79,16 @@ class TestMNIST(TestParallelExecutorBase): self._compare_fuse_elewise_add_act_ops(simple_fc_net, DeviceType.CPU) def test_batchnorm_fc_with_fuse_op(self): - self._compare_fuse_elewise_add_act_ops(fc_with_batchnorm, - DeviceType.CUDA) - self._compare_fuse_elewise_add_act_ops(fc_with_batchnorm, - DeviceType.CPU) + self._compare_fuse_elewise_add_act_ops( + fc_with_batchnorm, DeviceType.CUDA + ) + self._compare_fuse_elewise_add_act_ops( + fc_with_batchnorm, DeviceType.CPU + ) if __name__ == '__main__': import paddle + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_fuse_gemm_epilogue_pass.py b/python/paddle/fluid/tests/unittests/test_fuse_gemm_epilogue_pass.py index 1d7dc0e70d4aa0238893ca957c948c85d1a16dd3..3d95b77bae6ce8fbad105a5f95f153659bdd769f 100644 --- a/python/paddle/fluid/tests/unittests/test_fuse_gemm_epilogue_pass.py +++ b/python/paddle/fluid/tests/unittests/test_fuse_gemm_epilogue_pass.py @@ -45,7 +45,6 @@ def verify_node_count(graph, node_name, target_count): class MultiFCLayer(paddle.nn.Layer): - def __init__(self, hidden, Activation): super(MultiFCLayer, self).__init__() self.linear1 = paddle.nn.Linear(hidden, 4 * hidden) @@ -72,10 +71,10 @@ class MultiFCLayer(paddle.nn.Layer): return output -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueFWDBase(unittest.TestCase): - def setUp(self): self.batch = 64 self.seqlen = 128 @@ -87,29 +86,42 @@ class TestFuseGemmEpilogueFWDBase(unittest.TestCase): self.startup_prog = paddle.static.Program() with paddle.static.program_guard(self.main_prog, self.startup_prog): - data = paddle.static.data(name="_data", - shape=[-1, self.seqlen, self.hidden], - dtype='float32') - matmul_y = paddle.static.data(name="_matmul_y", - shape=[1, self.hidden, self.hidden], - dtype='float32') - ele_y = paddle.static.data(name="_ele_y", - shape=[ - self.hidden, - ], - dtype='float32') + data = paddle.static.data( + name="_data", + shape=[-1, self.seqlen, self.hidden], + dtype='float32', + ) + matmul_y = paddle.static.data( + name="_matmul_y", + shape=[1, self.hidden, self.hidden], + dtype='float32', + ) + ele_y = paddle.static.data( + name="_ele_y", + shape=[ + self.hidden, + ], + dtype='float32', + ) multi_layer = MultiFCLayer(self.hidden, self._get_act_type()[0]) with paddle.static.amp.fp16_guard(): out = multi_layer(data, matmul_y, ele_y) self.loss = paddle.mean(out) - self.data_arr = np.random.random( - (self.batch, self.seqlen, self.hidden)).astype("float32") - 0.5 - self.matmul_y_arr = np.random.random( - (1, self.hidden, self.hidden)).astype("float32") - 0.5 - self.ele_y_arr = np.random.random( - (self.hidden, )).astype("float32") - 0.5 + self.data_arr = ( + np.random.random((self.batch, self.seqlen, self.hidden)).astype( + "float32" + ) + - 0.5 + ) + self.matmul_y_arr = ( + np.random.random((1, self.hidden, self.hidden)).astype("float32") + - 0.5 + ) + self.ele_y_arr = ( + np.random.random((self.hidden,)).astype("float32") - 0.5 + ) self.place = paddle.CUDAPlace(0) self.exe = paddle.static.Executor(self.place) @@ -120,37 +132,45 @@ class TestFuseGemmEpilogueFWDBase(unittest.TestCase): self.feed = { "_data": self.data_arr, "_matmul_y": self.matmul_y_arr, - "_ele_y": self.ele_y_arr + "_ele_y": self.ele_y_arr, } - self.reference = self.exe.run(self.main_prog, - feed=self.feed, - fetch_list=[self.loss.name]) + self.reference = self.exe.run( + self.main_prog, feed=self.feed, fetch_list=[self.loss.name] + ) - @unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) def _test_output(self): build_strategy = paddle.static.BuildStrategy() build_strategy.fuse_gemm_epilogue = True program = paddle.static.CompiledProgram(self.main_prog) - program = program.with_data_parallel(loss_name=self.loss.name, - build_strategy=build_strategy, - places=paddle.static.cuda_places()) - - result = self.exe.run(program, - feed=self.feed, - fetch_list=[self.loss.name]) + program = program.with_data_parallel( + loss_name=self.loss.name, + build_strategy=build_strategy, + places=paddle.static.cuda_places(), + ) + + result = self.exe.run( + program, feed=self.feed, fetch_list=[self.loss.name] + ) self.assertTrue( compare(self.reference, result, self.atol, self.rtol), - "[{}] outputs are miss-matched.".format(type(self).__name__)) + "[{}] outputs are miss-matched.".format(type(self).__name__), + ) self.assertTrue( verify_node_count(program._graph, "fused_gemm_epilogue", 3), - "[{}] The number of fused_gemm_epilogue is miss-matched in the computing graph." - .format(type(self).__name__)) + "[{}] The number of fused_gemm_epilogue is miss-matched in the computing graph.".format( + type(self).__name__ + ), + ) act_fwd_name = self._get_act_type()[1] self.assertTrue( verify_node_count(program._graph, act_fwd_name, 1), - "[{}] The number of {} is miss-matched in the computing graph.". - format(type(self).__name__, act_fwd_name)) + "[{}] The number of {} is miss-matched in the computing graph.".format( + type(self).__name__, act_fwd_name + ), + ) def _pre_test_hooks(self): self.atol = 1e-4 @@ -160,10 +180,10 @@ class TestFuseGemmEpilogueFWDBase(unittest.TestCase): return paddle.nn.ReLU, "relu" -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueReluFWDFP32(TestFuseGemmEpilogueFWDBase): - def _pre_test_hooks(self): self.atol = 1e-3 self.rtol = 1e-2 @@ -175,27 +195,28 @@ class TestFuseGemmEpilogueReluFWDFP32(TestFuseGemmEpilogueFWDBase): self._test_output() -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueReluFWDFP16(TestFuseGemmEpilogueReluFWDFP32): - def _pre_test_hooks(self): self.atol = 1e-3 self.rtol = 1e-2 fp16_var_list = paddle.static.amp.cast_model_to_fp16(self.main_prog) paddle.static.amp.cast_parameters_to_fp16( - self.place, self.main_prog, to_fp16_var_names=fp16_var_list) + self.place, self.main_prog, to_fp16_var_names=fp16_var_list + ) self.data_arr = self.data_arr.astype("float16") self.matmul_y_arr = self.matmul_y_arr.astype("float16") self.ele_y_arr = self.ele_y_arr.astype("float16") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueGeluFWDFP32(TestFuseGemmEpilogueFWDBase): - def _pre_test_hooks(self): self.atol = 1e-4 self.rtol = 1e-3 @@ -207,27 +228,28 @@ class TestFuseGemmEpilogueGeluFWDFP32(TestFuseGemmEpilogueFWDBase): self._test_output() -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueGeluFWDFP16(TestFuseGemmEpilogueGeluFWDFP32): - def _pre_test_hooks(self): self.atol = 1e-3 self.rtol = 1e-2 fp16_var_list = paddle.static.amp.cast_model_to_fp16(self.main_prog) paddle.static.amp.cast_parameters_to_fp16( - self.place, self.main_prog, to_fp16_var_names=fp16_var_list) + self.place, self.main_prog, to_fp16_var_names=fp16_var_list + ) self.data_arr = self.data_arr.astype("float16") self.matmul_y_arr = self.matmul_y_arr.astype("float16") self.ele_y_arr = self.ele_y_arr.astype("float16") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueBWDBase(unittest.TestCase): - def setUp(self): self.batch = 64 self.seqlen = 128 @@ -239,17 +261,23 @@ class TestFuseGemmEpilogueBWDBase(unittest.TestCase): self.startup_prog = paddle.static.Program() with paddle.static.program_guard(self.main_prog, self.startup_prog): - data = paddle.static.data(name="_data", - shape=[-1, self.seqlen, self.hidden], - dtype='float32') - matmul_y = paddle.static.data(name="_matmul_y", - shape=[1, self.hidden, self.hidden], - dtype='float32') - ele_y = paddle.static.data(name="_ele_y", - shape=[ - self.hidden, - ], - dtype='float32') + data = paddle.static.data( + name="_data", + shape=[-1, self.seqlen, self.hidden], + dtype='float32', + ) + matmul_y = paddle.static.data( + name="_matmul_y", + shape=[1, self.hidden, self.hidden], + dtype='float32', + ) + ele_y = paddle.static.data( + name="_ele_y", + shape=[ + self.hidden, + ], + dtype='float32', + ) multi_layer = MultiFCLayer(self.hidden, self._get_act_type()[0]) with paddle.static.amp.fp16_guard(): @@ -257,12 +285,19 @@ class TestFuseGemmEpilogueBWDBase(unittest.TestCase): self.loss = paddle.mean(out) paddle.static.append_backward(loss=self.loss) - self.data_arr = np.random.random( - (self.batch, self.seqlen, self.hidden)).astype("float32") - 0.5 - self.matmul_y_arr = np.random.random( - (1, self.hidden, self.hidden)).astype("float32") - 0.5 - self.ele_y_arr = np.random.random( - (self.hidden, )).astype("float32") - 0.5 + self.data_arr = ( + np.random.random((self.batch, self.seqlen, self.hidden)).astype( + "float32" + ) + - 0.5 + ) + self.matmul_y_arr = ( + np.random.random((1, self.hidden, self.hidden)).astype("float32") + - 0.5 + ) + self.ele_y_arr = ( + np.random.random((self.hidden,)).astype("float32") - 0.5 + ) self.place = paddle.CUDAPlace(0) self.exe = paddle.static.Executor(self.place) @@ -273,7 +308,7 @@ class TestFuseGemmEpilogueBWDBase(unittest.TestCase): self.feed = { "_data": self.data_arr, "_matmul_y": self.matmul_y_arr, - "_ele_y": self.ele_y_arr + "_ele_y": self.ele_y_arr, } self.fetch = [ @@ -283,46 +318,58 @@ class TestFuseGemmEpilogueBWDBase(unittest.TestCase): '{}.w_0@GRAD'.format(multi_layer.linear2.full_name()), '{}.b_0@GRAD'.format(multi_layer.linear2.full_name()), '{}.w_0@GRAD'.format(multi_layer.linear3.full_name()), - '{}.b_0@GRAD'.format(multi_layer.linear3.full_name()) + '{}.b_0@GRAD'.format(multi_layer.linear3.full_name()), ] - self.outs_ref = self.exe.run(self.main_prog, - feed=self.feed, - fetch_list=self.fetch) + self.outs_ref = self.exe.run( + self.main_prog, feed=self.feed, fetch_list=self.fetch + ) - @unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) def _test_output(self): build_strategy = paddle.static.BuildStrategy() build_strategy.fuse_gemm_epilogue = True program = paddle.static.CompiledProgram(self.main_prog) - program = program.with_data_parallel(loss_name=self.loss.name, - build_strategy=build_strategy, - places=paddle.static.cuda_places()) + program = program.with_data_parallel( + loss_name=self.loss.name, + build_strategy=build_strategy, + places=paddle.static.cuda_places(), + ) outs_res = self.exe.run(program, feed=self.feed, fetch_list=self.fetch) for ref, res in zip(self.outs_ref, outs_res): self.assertTrue( compare(ref, res, self.atol, self.rtol), - "[{}] output is miss-matched.".format(type(self).__name__)) + "[{}] output is miss-matched.".format(type(self).__name__), + ) self.assertTrue( verify_node_count(program._graph, "fused_gemm_epilogue", 3), - "[{}] The number of fused_gemm_epilogue is miss-matched in the computing graph." - .format(type(self).__name__)) + "[{}] The number of fused_gemm_epilogue is miss-matched in the computing graph.".format( + type(self).__name__ + ), + ) self.assertTrue( verify_node_count(program._graph, "fused_gemm_epilogue_grad", 3), - "[{}] The number of fused_gemm_epilogue_grad is miss-matched in the computing graph." - .format(type(self).__name__)) + "[{}] The number of fused_gemm_epilogue_grad is miss-matched in the computing graph.".format( + type(self).__name__ + ), + ) _, act_fwd_name, act_bwd_name = self._get_act_type() self.assertTrue( verify_node_count(program._graph, act_fwd_name, 1), - "[{}] The number of {} is miss-matched in the computing graph.". - format(type(self).__name__, act_fwd_name)) + "[{}] The number of {} is miss-matched in the computing graph.".format( + type(self).__name__, act_fwd_name + ), + ) self.assertTrue( verify_node_count(program._graph, act_bwd_name, 2), - "[{}] The number of {} is miss-matched in the computing graph.". - format(type(self).__name__, act_bwd_name)) + "[{}] The number of {} is miss-matched in the computing graph.".format( + type(self).__name__, act_bwd_name + ), + ) def _pre_test_hooks(self): self.atol = 1e-4 @@ -332,10 +379,10 @@ class TestFuseGemmEpilogueBWDBase(unittest.TestCase): return paddle.nn.ReLU, "relu", "relu_grad" -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueReLUBWDFP32(TestFuseGemmEpilogueBWDBase): - def _pre_test_hooks(self): self.atol = 1e-4 self.rtol = 1e-3 @@ -347,27 +394,28 @@ class TestFuseGemmEpilogueReLUBWDFP32(TestFuseGemmEpilogueBWDBase): self._test_output() -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueReLUBWDFP16(TestFuseGemmEpilogueReLUBWDFP32): - def _pre_test_hooks(self): self.atol = 1e-3 self.rtol = 1e-2 fp16_var_list = paddle.static.amp.cast_model_to_fp16(self.main_prog) paddle.static.amp.cast_parameters_to_fp16( - self.place, self.main_prog, to_fp16_var_names=fp16_var_list) + self.place, self.main_prog, to_fp16_var_names=fp16_var_list + ) self.data_arr = self.data_arr.astype("float16") self.matmul_y_arr = self.matmul_y_arr.astype("float16") self.ele_y_arr = self.ele_y_arr.astype("float16") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueGeLUBWDFP32(TestFuseGemmEpilogueBWDBase): - def _pre_test_hooks(self): self.atol = 5e-4 self.rtol = 1e-3 @@ -379,17 +427,18 @@ class TestFuseGemmEpilogueGeLUBWDFP32(TestFuseGemmEpilogueBWDBase): self._test_output() -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueGeLUBWDFP16(TestFuseGemmEpilogueGeLUBWDFP32): - def _pre_test_hooks(self): self.atol = 1e-3 self.rtol = 1e-2 fp16_var_list = paddle.static.amp.cast_model_to_fp16(self.main_prog) paddle.static.amp.cast_parameters_to_fp16( - self.place, self.main_prog, to_fp16_var_names=fp16_var_list) + self.place, self.main_prog, to_fp16_var_names=fp16_var_list + ) self.data_arr = self.data_arr.astype("float16") self.matmul_y_arr = self.matmul_y_arr.astype("float16") diff --git a/python/paddle/fluid/tests/unittests/test_fuse_optimizer_pass.py b/python/paddle/fluid/tests/unittests/test_fuse_optimizer_pass.py index 5021870e9b62db8c109222578478d632e932cbbf..9fd6fcc276ecfdab5dac43ee2313bb3c8ffc1fc5 100644 --- a/python/paddle/fluid/tests/unittests/test_fuse_optimizer_pass.py +++ b/python/paddle/fluid/tests/unittests/test_fuse_optimizer_pass.py @@ -24,7 +24,6 @@ import os class TestFuseOptimizationOps(TestParallelExecutorBase): - @classmethod def setUpClass(cls): os.environ['CPU_NUM'] = str(4) @@ -33,72 +32,84 @@ class TestFuseOptimizationOps(TestParallelExecutorBase): img, label = init_data() return {"image": img, "label": label} - def _compare_fused_optimizer_ops(self, - model, - use_device, - feed_dict=None, - get_data_from_feeder=None, - optimizer=fluid.optimizer.Adam): + def _compare_fused_optimizer_ops( + self, + model, + use_device, + feed_dict=None, + get_data_from_feeder=None, + optimizer=fluid.optimizer.Adam, + ): if use_device == DeviceType.CUDA and not core.is_compiled_with_cuda(): return - not_fuse_op_first_loss, not_fuse_op_last_loss, _ = self.check_network_convergence( + ( + not_fuse_op_first_loss, + not_fuse_op_last_loss, + _, + ) = self.check_network_convergence( model, feed_dict=feed_dict, get_data_from_feeder=get_data_from_feeder, use_device=use_device, fuse_all_optimizer_ops=False, - optimizer=optimizer) - fuse_op_first_loss, fuse_op_last_loss, _ = self.check_network_convergence( + optimizer=optimizer, + ) + ( + fuse_op_first_loss, + fuse_op_last_loss, + _, + ) = self.check_network_convergence( model, feed_dict=feed_dict, get_data_from_feeder=get_data_from_feeder, use_device=use_device, fuse_all_optimizer_ops=True, - optimizer=optimizer) + optimizer=optimizer, + ) for loss in zip(not_fuse_op_first_loss, fuse_op_first_loss): self.assertAlmostEquals(loss[0], loss[1], delta=1e-6) for loss in zip(not_fuse_op_last_loss, fuse_op_last_loss): self.assertAlmostEquals(loss[0], loss[1], delta=1e-6) - def _decorate_compare_fused_optimizer_ops(self, model, use_device, - optimizer): - self._compare_fused_optimizer_ops(model, - use_device, - feed_dict=self._get_feed_dict(), - optimizer=optimizer) + def _decorate_compare_fused_optimizer_ops( + self, model, use_device, optimizer + ): + self._compare_fused_optimizer_ops( + model, + use_device, + feed_dict=self._get_feed_dict(), + optimizer=optimizer, + ) class TestFuseAdamOps(TestFuseOptimizationOps): - def optimizer(self, learning_rate=1e-4): return fluid.optimizer.Adam(learning_rate=learning_rate) def test_batchnorm_fc_with_fuse_op(self): - self._decorate_compare_fused_optimizer_ops(fc_with_batchnorm, - DeviceType.CUDA, - optimizer=self.optimizer) - self._decorate_compare_fused_optimizer_ops(fc_with_batchnorm, - DeviceType.CPU, - optimizer=self.optimizer) + self._decorate_compare_fused_optimizer_ops( + fc_with_batchnorm, DeviceType.CUDA, optimizer=self.optimizer + ) + self._decorate_compare_fused_optimizer_ops( + fc_with_batchnorm, DeviceType.CPU, optimizer=self.optimizer + ) class TestFuseSGDOps(TestFuseAdamOps): - def optimizer(self, learning_rate=1e-3): return fluid.optimizer.SGD(learning_rate=learning_rate) class TestFuseMomentumOps(TestFuseAdamOps): - def optimizer(self, learning_rate=1e-3): - return fluid.optimizer.Momentum(learning_rate=learning_rate, - momentum=0.1) + return fluid.optimizer.Momentum( + learning_rate=learning_rate, momentum=0.1 + ) class TestSpareFuseAdamOps(TestFuseOptimizationOps): - @classmethod def setUpClass(cls): os.environ['CPU_NUM'] = str(4) @@ -113,85 +124,87 @@ class TestSpareFuseAdamOps(TestFuseOptimizationOps): feeder = fluid.DataFeeder(feed_list=["words", "label"], place=place) return feeder.feed(self.train_data) - def _decorate_compare_fused_optimizer_ops(self, model, use_device, - optimizer): + def _decorate_compare_fused_optimizer_ops( + self, model, use_device, optimizer + ): self._compare_fused_optimizer_ops( model, use_device, get_data_from_feeder=self._get_data_from_feeder, - optimizer=optimizer) + optimizer=optimizer, + ) def optimizer(self, learning_rate=1e-4): return fluid.optimizer.Adam(learning_rate=learning_rate) def test_simple_bow_net_with_fuse_op(self): model = partial(bow_net, dict_dim=self.word_dict_len, is_sparse=True) - self._decorate_compare_fused_optimizer_ops(model, - DeviceType.CUDA, - optimizer=self.optimizer) - self._decorate_compare_fused_optimizer_ops(model, - DeviceType.CPU, - optimizer=self.optimizer) + self._decorate_compare_fused_optimizer_ops( + model, DeviceType.CUDA, optimizer=self.optimizer + ) + self._decorate_compare_fused_optimizer_ops( + model, DeviceType.CPU, optimizer=self.optimizer + ) class TestSpareFuseSGDOps(TestSpareFuseAdamOps): - def optimizer(self, learning_rate=1e-3): return fluid.optimizer.SGD(learning_rate=learning_rate) class TestSpareFuseMomentumOps(TestSpareFuseAdamOps): - def optimizer(self, learning_rate=1e-3): - return fluid.optimizer.Momentum(learning_rate=learning_rate, - momentum=0.1) + return fluid.optimizer.Momentum( + learning_rate=learning_rate, momentum=0.1 + ) class TestPassConflictBase(TestFuseAdamOps): - - def _compare_fused_optimizer_ops(self, - model, - use_device, - feed_dict=None, - get_data_from_feeder=None, - optimizer=fluid.optimizer.Adam): + def _compare_fused_optimizer_ops( + self, + model, + use_device, + feed_dict=None, + get_data_from_feeder=None, + optimizer=fluid.optimizer.Adam, + ): if use_device == DeviceType.CUDA and not core.is_compiled_with_cuda(): return - self.check_pass_conflict(model, - feed_dict=feed_dict, - get_data_from_feeder=get_data_from_feeder, - use_device=use_device, - fuse_all_optimizer_ops=True, - optimizer=optimizer, - enable_sequential_execution=True) + self.check_pass_conflict( + model, + feed_dict=feed_dict, + get_data_from_feeder=get_data_from_feeder, + use_device=use_device, + fuse_all_optimizer_ops=True, + optimizer=optimizer, + enable_sequential_execution=True, + ) class TestFuseAdamOpsPassConflict(TestPassConflictBase): - def optimizer(self, learning_rate=1e-4): return fluid.optimizer.Adam(learning_rate=learning_rate) def test_batchnorm_fc_with_fuse_op(self): - self._decorate_compare_fused_optimizer_ops(fc_with_batchnorm, - DeviceType.CPU, - optimizer=self.optimizer) - self._decorate_compare_fused_optimizer_ops(fc_with_batchnorm, - DeviceType.CUDA, - optimizer=self.optimizer) + self._decorate_compare_fused_optimizer_ops( + fc_with_batchnorm, DeviceType.CPU, optimizer=self.optimizer + ) + self._decorate_compare_fused_optimizer_ops( + fc_with_batchnorm, DeviceType.CUDA, optimizer=self.optimizer + ) class TestFuseSGDOpsPassConflict(TestFuseAdamOpsPassConflict): - def optimizer(self, learning_rate=1e-3): return fluid.optimizer.SGD(learning_rate=learning_rate) class TestFuseMomentumOpsPassConflict(TestFuseAdamOpsPassConflict): - def optimizer(self, learning_rate=1e-3): - return fluid.optimizer.Momentum(learning_rate=learning_rate, - momentum=0.1) + return fluid.optimizer.Momentum( + learning_rate=learning_rate, momentum=0.1 + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_fuse_relu_depthwise_conv_pass.py b/python/paddle/fluid/tests/unittests/test_fuse_relu_depthwise_conv_pass.py index a93c1380c0b316603aea085c5f57e284798638e5..686a5c1e41088e4542fd936dd09fa955962413c0 100644 --- a/python/paddle/fluid/tests/unittests/test_fuse_relu_depthwise_conv_pass.py +++ b/python/paddle/fluid/tests/unittests/test_fuse_relu_depthwise_conv_pass.py @@ -26,27 +26,27 @@ def norm(*args, **kargs): def sep_conv(input, channel, stride, filter, dilation=1, act=None): # with scope('depthwise'): - input = fluid.layers.conv2d(input, - input.shape[1], - filter, - stride, - groups=input.shape[1], - padding=(filter // 2) * dilation, - dilation=dilation, - use_cudnn=False, - bias_attr=False) + input = fluid.layers.conv2d( + input, + input.shape[1], + filter, + stride, + groups=input.shape[1], + padding=(filter // 2) * dilation, + dilation=dilation, + use_cudnn=False, + bias_attr=False, + ) input = norm(input) - if act: input = act(input) + if act: + input = act(input) # with scope('pointwise'): - input = fluid.layers.conv2d(input, - channel, - 1, - 1, - groups=1, - padding=0, - bias_attr=False) + input = fluid.layers.conv2d( + input, channel, 1, 1, groups=1, padding=0, bias_attr=False + ) input = norm(input) - if act: input = act(input) + if act: + input = act(input) return input @@ -65,7 +65,6 @@ def simple_depthwise_net(use_feed): class TestMNIST(TestParallelExecutorBase): - def _init_data(self, random=True): np.random.seed(5) if random: @@ -83,31 +82,36 @@ class TestMNIST(TestParallelExecutorBase): def _optimizer(learning_rate=1e-6): optimizer = fluid.optimizer.SGD( learning_rate=learning_rate, - regularization=fluid.regularizer.L2Decay(1e-6)) + regularization=fluid.regularizer.L2Decay(1e-6), + ) return optimizer if only_forward: _optimizer = None - fuse_op_first_loss, fuse_op_last_loss, _ = self.check_network_convergence( + ( + fuse_op_first_loss, + fuse_op_last_loss, + _, + ) = self.check_network_convergence( model, - feed_dict={ - "image": img, - "label": label - }, + feed_dict={"image": img, "label": label}, use_device=use_device, fuse_relu_depthwise_conv=True, use_ir_memory_optimize=True, - optimizer=_optimizer) - not_fuse_op_first_loss, not_fuse_op_last_loss, _ = self.check_network_convergence( + optimizer=_optimizer, + ) + ( + not_fuse_op_first_loss, + not_fuse_op_last_loss, + _, + ) = self.check_network_convergence( model, - feed_dict={ - "image": img, - "label": label - }, + feed_dict={"image": img, "label": label}, use_device=use_device, fuse_relu_depthwise_conv=False, - optimizer=_optimizer) + optimizer=_optimizer, + ) for loss in zip(not_fuse_op_first_loss, fuse_op_first_loss): self.assertAlmostEquals(loss[0], loss[1], delta=1e-6) diff --git a/python/paddle/fluid/tests/unittests/test_fused_attention_op.py b/python/paddle/fluid/tests/unittests/test_fused_attention_op.py index 1e27005499a093143d4e9550d85aa7314a64edea..6e3c0014422670a54de62d78d51708e506eacf34 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_attention_op.py +++ b/python/paddle/fluid/tests/unittests/test_fused_attention_op.py @@ -30,7 +30,6 @@ default_main_program().random_seed = 42 class TestFusedAttentionOp(OpTest): - def setUp(self): self.config() self.generate_input_data() @@ -50,22 +49,30 @@ class TestFusedAttentionOp(OpTest): self.__class__.op_type = "fused_attention" # use autograd to check grad in this unittest. self.__class__.no_need_check_grad = True - self.q_proj = Linear(self.embed_dim, - self.embed_dim, - self.weight_attr, - bias_attr=self.bias_attr) - self.k_proj = Linear(self.kdim, - self.embed_dim, - self.weight_attr, - bias_attr=self.bias_attr) - self.v_proj = Linear(self.vdim, - self.embed_dim, - self.weight_attr, - bias_attr=self.bias_attr) - self.out_proj = Linear(self.embed_dim, - self.embed_dim, - self.weight_attr, - bias_attr=self.bias_attr) + self.q_proj = Linear( + self.embed_dim, + self.embed_dim, + self.weight_attr, + bias_attr=self.bias_attr, + ) + self.k_proj = Linear( + self.kdim, + self.embed_dim, + self.weight_attr, + bias_attr=self.bias_attr, + ) + self.v_proj = Linear( + self.vdim, + self.embed_dim, + self.weight_attr, + bias_attr=self.bias_attr, + ) + self.out_proj = Linear( + self.embed_dim, + self.embed_dim, + self.weight_attr, + bias_attr=self.bias_attr, + ) paddle.set_default_dtype(np.float32) self.norm1 = LayerNorm(self.embed_dim) self.norm2 = LayerNorm(self.embed_dim) @@ -92,40 +99,57 @@ class TestFusedAttentionOp(OpTest): self.weight_attr = None self.bias_attr = None self.kdim, self.vdim = self.embed_dim, self.embed_dim - self.key_length, self.value_length = self.query_length, self.query_length + self.key_length, self.value_length = ( + self.query_length, + self.query_length, + ) def generate_input_data(self): - self.query = np.random.rand(self.batch_size, self.query_length, - self.embed_dim).astype(self.x_type) + self.query = np.random.rand( + self.batch_size, self.query_length, self.embed_dim + ).astype(self.x_type) out_seq_len = self.key_length if self.has_cache_kv: assert self.training is False, ValueError( - 'cache_kv can only used in inference') - self.cache_kv = np.random.rand(2, self.batch_size, self.num_heads, - self.cache_length, - self.head_dim).astype(self.x_type) + 'cache_kv can only used in inference' + ) + self.cache_kv = np.random.rand( + 2, + self.batch_size, + self.num_heads, + self.cache_length, + self.head_dim, + ).astype(self.x_type) out_seq_len += self.cache_length else: self.cache_kv = None if self.has_attn_mask: # [B, n_head, seq_len, out_seq_len] - self.attn_mask = np.ones((self.batch_size, self.num_heads, - self.query_length, out_seq_len), - dtype=self.attn_mask_type) + self.attn_mask = np.ones( + ( + self.batch_size, + self.num_heads, + self.query_length, + out_seq_len, + ), + dtype=self.attn_mask_type, + ) if self.attn_mask_type == np.int64: self.attn_mask = np.tril(self.attn_mask) elif self.attn_mask_type == np.float64: self.attn_mask = (np.tril(self.attn_mask) - 1.0) * 1e9 else: raise ValueError( - "'attn_mask_type' should be 'int64' or 'float64'.") + "'attn_mask_type' should be 'int64' or 'float64'." + ) else: self.attn_mask = None self.key, self.value = self.query, self.query - self.dout = np.random.random((self.batch_size, self.query_length, - self.embed_dim)).astype(self.x_type) + self.dout = np.random.random( + (self.batch_size, self.query_length, self.embed_dim) + ).astype(self.x_type) def GetBaselineOut(self): paddle.disable_static(place=paddle.CUDAPlace(0)) @@ -167,10 +191,9 @@ class TestFusedAttentionOp(OpTest): # [B, n_head, seq_len, head_dim] * [B, n_head, out_seq_len, head_dim] # --> [B, n_head, seq_len, out_seq_len] - qk_out = layers.matmul(x=q_out, - y=k_out, - transpose_y=True, - alpha=self.head_dim**-0.5) + qk_out = layers.matmul( + x=q_out, y=k_out, transpose_y=True, alpha=self.head_dim**-0.5 + ) if attn_mask is not None: attn_mask = _convert_attention_mask(attn_mask, qk_out.dtype) @@ -180,10 +203,12 @@ class TestFusedAttentionOp(OpTest): softmax_out = F.softmax(qk_out) if self.dropout_prob: - dropout_out = F.dropout(softmax_out, - self.dropout_prob, - training=self.training, - mode="upscale_in_train") + dropout_out = F.dropout( + softmax_out, + self.dropout_prob, + training=self.training, + mode="upscale_in_train", + ) # [B, n_head, seq_len, out_seq_len] * [B, n_head, out_seq_len, head_dim] # --> [B, n_head, seq_len, head_dim] qktv_out = tensor.matmul(dropout_out, v_out) @@ -192,7 +217,8 @@ class TestFusedAttentionOp(OpTest): fmha_out = tensor.transpose(qktv_out, perm=[0, 2, 1, 3]) out_linear_in = tensor.reshape( - x=fmha_out, shape=[0, 0, fmha_out.shape[2] * fmha_out.shape[3]]) + x=fmha_out, shape=[0, 0, fmha_out.shape[2] * fmha_out.shape[3]] + ) out = self.out_proj(out_linear_in) residual_out = residual + self.dropout(out) @@ -204,37 +230,47 @@ class TestFusedAttentionOp(OpTest): if self.has_cache_kv: return final_out - paddle.autograd.backward([final_out], [paddle.to_tensor(self.dout)], - retain_graph=True) + paddle.autograd.backward( + [final_out], [paddle.to_tensor(self.dout)], retain_graph=True + ) return final_out, tensor_query.grad def GetFusedAttentionOut(self): paddle.disable_static(place=paddle.CUDAPlace(0)) - q_proj_weight = paddle.to_tensor(self.q_proj.weight, - stop_gradient=False) - k_proj_weight = paddle.to_tensor(self.k_proj.weight, - stop_gradient=False) - v_proj_weight = paddle.to_tensor(self.v_proj.weight, - stop_gradient=False) - out_linear_weight = paddle.to_tensor(self.out_proj.weight, - stop_gradient=False) + q_proj_weight = paddle.to_tensor( + self.q_proj.weight, stop_gradient=False + ) + k_proj_weight = paddle.to_tensor( + self.k_proj.weight, stop_gradient=False + ) + v_proj_weight = paddle.to_tensor( + self.v_proj.weight, stop_gradient=False + ) + out_linear_weight = paddle.to_tensor( + self.out_proj.weight, stop_gradient=False + ) if self.bias_attr is False: qkv_bias_tensor = None out_linear_bias = None else: - q_proj_bias = paddle.to_tensor(self.q_proj.bias, - stop_gradient=False) - k_proj_bias = paddle.to_tensor(self.k_proj.bias, - stop_gradient=False) - v_proj_bias = paddle.to_tensor(self.v_proj.bias, - stop_gradient=False) + q_proj_bias = paddle.to_tensor( + self.q_proj.bias, stop_gradient=False + ) + k_proj_bias = paddle.to_tensor( + self.k_proj.bias, stop_gradient=False + ) + v_proj_bias = paddle.to_tensor( + self.v_proj.bias, stop_gradient=False + ) qkv_bias = np.concatenate( - (q_proj_bias.numpy(), k_proj_bias.numpy(), v_proj_bias.numpy())) + (q_proj_bias.numpy(), k_proj_bias.numpy(), v_proj_bias.numpy()) + ) qkv_bias = qkv_bias.reshape((3, self.num_heads, self.head_dim)) qkv_bias_tensor = paddle.to_tensor(qkv_bias, stop_gradient=False) - out_linear_bias = paddle.to_tensor(self.out_proj.bias, - stop_gradient=False) + out_linear_bias = paddle.to_tensor( + self.out_proj.bias, stop_gradient=False + ) ln1_scale = paddle.to_tensor(self.norm1.weight, stop_gradient=False) ln1_bias = paddle.to_tensor(self.norm1.bias, stop_gradient=False) @@ -245,9 +281,11 @@ class TestFusedAttentionOp(OpTest): k_proj_weight = k_proj_weight.numpy().transpose((1, 0)) v_proj_weight = v_proj_weight.numpy().transpose((1, 0)) qkv_weight = np.concatenate( - (q_proj_weight, k_proj_weight, v_proj_weight)) + (q_proj_weight, k_proj_weight, v_proj_weight) + ) qkv_weight = qkv_weight.reshape( - (3, self.num_heads, self.head_dim, self.embed_dim)) + (3, self.num_heads, self.head_dim, self.embed_dim) + ) x = paddle.to_tensor(self.query, stop_gradient=False) cache_kv = None @@ -264,47 +302,56 @@ class TestFusedAttentionOp(OpTest): if attn_mask is not None: attn_mask = _convert_attention_mask(attn_mask, x.dtype) final_out = incubate_f.fused_multi_head_attention( - x, qkv_weight_tensor, out_linear_weight, self.pre_layer_norm, - ln1_scale, ln1_bias, ln2_scale, ln2_bias, epsilon, qkv_bias_tensor, - out_linear_bias, cache_kv, attn_mask, self.dropout_prob, - self.attn_dropout_prob, ln2_epsilon) + x, + qkv_weight_tensor, + out_linear_weight, + self.pre_layer_norm, + ln1_scale, + ln1_bias, + ln2_scale, + ln2_bias, + epsilon, + qkv_bias_tensor, + out_linear_bias, + cache_kv, + attn_mask, + self.dropout_prob, + self.attn_dropout_prob, + ln2_epsilon, + ) if self.has_cache_kv: return final_out[0], final_out[1] - paddle.autograd.backward([final_out], [paddle.to_tensor(self.dout)], - retain_graph=True) + paddle.autograd.backward( + [final_out], [paddle.to_tensor(self.dout)], retain_graph=True + ) return final_out, x.grad def test_fused_attention_op(self): final_out_ref, x_grad_ref = self.GetBaselineOut() final_out, x_grad = self.GetFusedAttentionOut() - np.testing.assert_allclose(final_out_ref, - final_out.numpy(), - rtol=self.rtol, - atol=self.atol) - np.testing.assert_allclose(x_grad_ref, - x_grad.numpy(), - rtol=self.rtol, - atol=self.atol) + np.testing.assert_allclose( + final_out_ref, final_out.numpy(), rtol=self.rtol, atol=self.atol + ) + np.testing.assert_allclose( + x_grad_ref, x_grad.numpy(), rtol=self.rtol, atol=self.atol + ) class TestFusedAttentionOpBiasIsNone(TestFusedAttentionOp): - def config(self): super().config() self.bias_attr = False class TestFusedAttentionOpPreLn(TestFusedAttentionOp): - def config(self): super().config() self.pre_layer_norm = True class TestFusedAttentionOpNoneAttnMask(TestFusedAttentionOp): - def config(self): super().config() self.pre_layer_norm = True @@ -312,7 +359,6 @@ class TestFusedAttentionOpNoneAttnMask(TestFusedAttentionOp): class TestFusedAttentionOpFp16(TestFusedAttentionOp): - def config(self): super().config() self.x_type = np.float16 @@ -320,18 +366,15 @@ class TestFusedAttentionOpFp16(TestFusedAttentionOp): def test_fused_attention_op(self): final_out_ref, x_grad_ref = self.GetBaselineOut() final_out, x_grad = self.GetFusedAttentionOut() - np.testing.assert_allclose(final_out_ref, - final_out.numpy(), - rtol=self.rtol, - atol=self.atol) - np.testing.assert_allclose(x_grad_ref, - x_grad.numpy(), - rtol=self.rtol, - atol=self.atol) + np.testing.assert_allclose( + final_out_ref, final_out.numpy(), rtol=self.rtol, atol=self.atol + ) + np.testing.assert_allclose( + x_grad_ref, x_grad.numpy(), rtol=self.rtol, atol=self.atol + ) class TestFusedAttentionOpCacheKV(TestFusedAttentionOp): - def config(self): super().config() self.has_cache_kv = True @@ -343,10 +386,9 @@ class TestFusedAttentionOpCacheKV(TestFusedAttentionOp): with paddle.no_grad(): final_out_ref = self.GetBaselineOut() final_out, cache_kv_out = self.GetFusedAttentionOut() - np.testing.assert_allclose(final_out_ref, - final_out.numpy(), - rtol=self.rtol, - atol=self.atol) + np.testing.assert_allclose( + final_out_ref, final_out.numpy(), rtol=self.rtol, atol=self.atol + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_fused_attention_op_api.py b/python/paddle/fluid/tests/unittests/test_fused_attention_op_api.py index ee8efdc81d0b3492772abd964fe03a9d9c3aeeaa..3a3872bb6726f429c9b55e5f57b1db7b88e6d63d 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_attention_op_api.py +++ b/python/paddle/fluid/tests/unittests/test_fused_attention_op_api.py @@ -39,8 +39,9 @@ def softmax(x): def batch_matmul(x, y): assert x.shape[0] == y.shape[0] assert x.shape[1] == y.shape[1] - retval = np.zeros((x.shape[0], x.shape[1], x.shape[2], y.shape[3]), - dtype=np.float64) + retval = np.zeros( + (x.shape[0], x.shape[1], x.shape[2], y.shape[3]), dtype=np.float64 + ) for i in range(x.shape[0]): for j in range(x.shape[1]): retval[i, j, :, :] = np.matmul(x[i, j, :, :], y[i, j, :, :]) @@ -52,24 +53,34 @@ def layer_norm(x, has_scale, has_bias, weight, bias, epsilon=1e-05): x = x.reshape((batch_size * src_len, d_model)) mu = np.mean(x, axis=1, keepdims=True) sigma_squar = np.sum(np.square(x - mu), axis=1) / d_model - x1_up = (x - mu) + x1_up = x - mu x1_down_1 = sigma_squar + epsilon x1_down = np.sqrt(x1_down_1) x1_down = x1_down.reshape((x1_down.shape[0], 1)) x1 = x1_up / x1_down x_scaled = x1 - if (has_scale): + if has_scale: x_scaled = weight * x1 x_scaled_bias = x_scaled - if (has_bias): + if has_bias: x_scaled_bias = x_scaled + bias x_scaled_bias = x_scaled_bias.reshape((batch_size, src_len, d_model)) return x_scaled_bias -def compute_reference(pre_layer_norm, query, attn_mask, ln_scale, ln_bias, - ln_2_scale, ln_2_bias, qkv_weight, qkv_bias, - out_linear_weight, out_linear_bias): +def compute_reference( + pre_layer_norm, + query, + attn_mask, + ln_scale, + ln_bias, + ln_2_scale, + ln_2_bias, + qkv_weight, + qkv_bias, + out_linear_weight, + out_linear_bias, +): batch_size = query.shape[0] seq_len = query.shape[1] embed_dim = query.shape[2] @@ -87,11 +98,13 @@ def compute_reference(pre_layer_norm, query, attn_mask, ln_scale, ln_bias, qkv_weight = qkv_weight.transpose((3, 0, 1, 2)) qkv_weight = qkv_weight.reshape( qkv_weight.shape[0], - qkv_weight.shape[1] * qkv_weight.shape[2] * qkv_weight.shape[3]) + qkv_weight.shape[1] * qkv_weight.shape[2] * qkv_weight.shape[3], + ) if qkv_bias is not None: - qkv_bias = qkv_bias.reshape(qkv_bias.shape[0] * qkv_bias.shape[1] * - qkv_bias.shape[2]) + qkv_bias = qkv_bias.reshape( + qkv_bias.shape[0] * qkv_bias.shape[1] * qkv_bias.shape[2] + ) if pre_layer_norm: ln_out = ln_out.reshape(batch_size * seq_len, embed_dim) qkv = fc(ln_out, qkv_weight) @@ -109,22 +122,25 @@ def compute_reference(pre_layer_norm, query, attn_mask, ln_scale, ln_bias, qkv_bias_out = qkv query = query.reshape(batch_size, seq_len, embed_dim) - qkv_bias_out = qkv_bias_out.reshape(batch_size, seq_len, 3, num_head, - head_dim) + qkv_bias_out = qkv_bias_out.reshape( + batch_size, seq_len, 3, num_head, head_dim + ) # q*k^t qkv_bias_out = qkv_bias_out.transpose( - (2, 0, 1, 3, 4)) # 3, batch_size, seq_len, num_head, head_dim + (2, 0, 1, 3, 4) + ) # 3, batch_size, seq_len, num_head, head_dim qkv_bias_out = qkv_bias_out.transpose( - (0, 1, 3, 2, 4)) # 3, batch_size, num_head, seq_len, head_dim + (0, 1, 3, 2, 4) + ) # 3, batch_size, num_head, seq_len, head_dim q = qkv_bias_out[0:1, ::] q = q.reshape(batch_size, num_head, seq_len, head_dim) - k = qkv_bias_out[1:2, ::] #[1, batch_size, num_head, seq_len, head_dim] + k = qkv_bias_out[1:2, ::] # [1, batch_size, num_head, seq_len, head_dim] k = k.reshape(batch_size, num_head, seq_len, head_dim) v = qkv_bias_out[2::] v = v.reshape(batch_size, num_head, seq_len, head_dim) - k = k.transpose([0, 1, 3, 2]) #[batch_size, num_head, head_dim, seq_len] + k = k.transpose([0, 1, 3, 2]) # [batch_size, num_head, head_dim, seq_len] qkt = batch_matmul(q, k / np.sqrt(head_dim, dtype=np.float64)) if attn_mask is not None: @@ -139,11 +155,13 @@ def compute_reference(pre_layer_norm, query, attn_mask, ln_scale, ln_bias, attn_heads = batch_matmul(softmax_out, v) attn_heads = attn_heads.transpose( - (0, 2, 1, 3)) # [batch_size, seq_len, num_head, head_dim] + (0, 2, 1, 3) + ) # [batch_size, seq_len, num_head, head_dim] # out_linear - out_linear_input = attn_heads.reshape(batch_size, seq_len, - num_head * head_dim) + out_linear_input = attn_heads.reshape( + batch_size, seq_len, num_head * head_dim + ) out_linear_out = fc(out_linear_input, out_linear_weight) # bias add, dropout, residual add, layer_norm. @@ -155,13 +173,16 @@ def compute_reference(pre_layer_norm, query, attn_mask, ln_scale, ln_bias, out_linear_bias_dropout_residual_out = query + out_linear_bias_dropout_out if not pre_layer_norm: out_linear_bias_dropout_residual_out = layer_norm( - out_linear_bias_dropout_residual_out, True, has_bias, ln_2_scale, - ln_2_bias) + out_linear_bias_dropout_residual_out, + True, + has_bias, + ln_2_scale, + ln_2_bias, + ) return out_linear_bias_dropout_residual_out class TestFusedAttentionAPI(unittest.TestCase): - def setUp(self): self.setXType() self.setPreLn() @@ -209,22 +230,33 @@ class TestFusedAttentionAPI(unittest.TestCase): self.weight_attr = None self.kdim, self.vdim = self.embed_dim, self.embed_dim - self.key_length, self.value_length = self.query_length, self.query_length + self.key_length, self.value_length = ( + self.query_length, + self.query_length, + ) def generate_input_data(self): - self.query = np.random.rand(self.batch_size, self.query_length, - self.embed_dim).astype(self.x_type) + self.query = np.random.rand( + self.batch_size, self.query_length, self.embed_dim + ).astype(self.x_type) if self.has_attn_mask: - self.attn_mask = np.ones((self.batch_size, self.num_heads, - self.query_length, self.key_length), - dtype=self.attn_mask_type) + self.attn_mask = np.ones( + ( + self.batch_size, + self.num_heads, + self.query_length, + self.key_length, + ), + dtype=self.attn_mask_type, + ) if self.attn_mask_type == np.int64: self.attn_mask = np.tril(self.attn_mask) elif self.attn_mask_type == np.float64: self.attn_mask = (np.tril(self.attn_mask) - 1.0) * 1e9 else: raise ValueError( - "'attn_mask_type' should be 'int64' or 'float64'.") + "'attn_mask_type' should be 'int64' or 'float64'." + ) else: self.attn_mask = None self.key, self.value = self.query, self.query @@ -235,18 +267,34 @@ class TestFusedAttentionAPI(unittest.TestCase): else: attn_mask_tensor = None fused_attn = FusedMultiHeadAttention( - self.embed_dim, self.num_heads, self.dropout_prob, - self.attn_dropout_prob, self.kdim, self.vdim, self.pre_layer_norm, - self.need_weight, self.weight_attr, self.bias_attr, - self.weight_attr, self.bias_attr, self.weight_attr, self.bias_attr, - self.weight_attr, self.bias_attr) + self.embed_dim, + self.num_heads, + self.dropout_prob, + self.attn_dropout_prob, + self.kdim, + self.vdim, + self.pre_layer_norm, + self.need_weight, + self.weight_attr, + self.bias_attr, + self.weight_attr, + self.bias_attr, + self.weight_attr, + self.bias_attr, + self.weight_attr, + self.bias_attr, + ) if self.bias_attr is not False: - qkv_bias = np.random.random( - fused_attn.qkv_bias.shape).astype('float32') + qkv_bias = np.random.random(fused_attn.qkv_bias.shape).astype( + 'float32' + ) fused_attn.qkv_bias.set_value(paddle.to_tensor(qkv_bias)) - out = fused_attn(paddle.to_tensor(self.query), - paddle.to_tensor(self.query), - paddle.to_tensor(self.query), attn_mask_tensor) + out = fused_attn( + paddle.to_tensor(self.query), + paddle.to_tensor(self.query), + paddle.to_tensor(self.query), + attn_mask_tensor, + ) fused_attn_qkv_bias = None fused_attn_linear_bias = None @@ -263,38 +311,58 @@ class TestFusedAttentionAPI(unittest.TestCase): fused_attn_ln_bias = fused_attn.ln_bias.numpy() ref_out = compute_reference( - self.pre_layer_norm, self.query, self.attn_mask, + self.pre_layer_norm, + self.query, + self.attn_mask, fused_attn.pre_ln_scale.numpy() if self.pre_layer_norm else None, fused_attn_pre_ln_bias, fused_attn.ln_scale.numpy() if not self.pre_layer_norm else None, fused_attn_ln_bias, - fused_attn.qkv_weight.numpy(), fused_attn_qkv_bias, - fused_attn.linear_weight.numpy(), fused_attn_linear_bias) - np.testing.assert_allclose(ref_out, - out.numpy(), - rtol=self.rtol, - atol=self.atol) + fused_attn.qkv_weight.numpy(), + fused_attn_qkv_bias, + fused_attn.linear_weight.numpy(), + fused_attn_linear_bias, + ) + np.testing.assert_allclose( + ref_out, out.numpy(), rtol=self.rtol, atol=self.atol + ) def run_static(self): fused_attn = FusedMultiHeadAttention( - self.embed_dim, self.num_heads, self.dropout_prob, - self.attn_dropout_prob, self.kdim, self.vdim, self.pre_layer_norm, - self.need_weight, self.weight_attr, self.bias_attr, - self.weight_attr, self.bias_attr, self.weight_attr, self.bias_attr, - self.weight_attr, self.bias_attr) + self.embed_dim, + self.num_heads, + self.dropout_prob, + self.attn_dropout_prob, + self.kdim, + self.vdim, + self.pre_layer_norm, + self.need_weight, + self.weight_attr, + self.bias_attr, + self.weight_attr, + self.bias_attr, + self.weight_attr, + self.bias_attr, + self.weight_attr, + self.bias_attr, + ) x = paddle.static.data( name='X', shape=[self.batch_size, self.query_length, self.embed_dim], - dtype=self.x_type) + dtype=self.x_type, + ) if self.has_attn_mask: - attn_mask = paddle.static.data(name='SrcMask', - shape=[ - self.batch_size, self.num_heads, - self.query_length, - self.key_length - ], - dtype=self.attn_mask_type) + attn_mask = paddle.static.data( + name='SrcMask', + shape=[ + self.batch_size, + self.num_heads, + self.query_length, + self.key_length, + ], + dtype=self.attn_mask_type, + ) final_out = fused_attn(x, x, x, attn_mask) else: final_out = fused_attn(x, x, x) @@ -314,35 +382,38 @@ class TestFusedAttentionAPI(unittest.TestCase): if self.pre_layer_norm: out, qkv_weight, out_linear_weight, ln_scale = exe.run( paddle.static.default_main_program(), - feed={ - "X": self.query, - "SrcMask": self.attn_mask - }, + feed={"X": self.query, "SrcMask": self.attn_mask}, fetch_list=[ final_out, fused_attn.qkv_weight, fused_attn.linear_weight, fused_attn.pre_ln_scale, - ]) + ], + ) else: out, qkv_weight, out_linear_weight, ln_2_scale = exe.run( paddle.static.default_main_program(), - feed={ - "X": self.query, - "SrcMask": self.attn_mask - }, + feed={"X": self.query, "SrcMask": self.attn_mask}, fetch_list=[ - final_out, fused_attn.qkv_weight, - fused_attn.linear_weight, fused_attn.ln_scale - ]) + final_out, + fused_attn.qkv_weight, + fused_attn.linear_weight, + fused_attn.ln_scale, + ], + ) else: if self.pre_layer_norm: - out, qkv_weight, qkv_bias, out_linear_weight, linear_bias, ln_scale, ln_bias = exe.run( + ( + out, + qkv_weight, + qkv_bias, + out_linear_weight, + linear_bias, + ln_scale, + ln_bias, + ) = exe.run( paddle.static.default_main_program(), - feed={ - "X": self.query, - "SrcMask": self.attn_mask - }, + feed={"X": self.query, "SrcMask": self.attn_mask}, fetch_list=[ final_out, fused_attn.qkv_weight, @@ -351,20 +422,30 @@ class TestFusedAttentionAPI(unittest.TestCase): fused_attn.linear_bias, fused_attn.pre_ln_scale, fused_attn.pre_ln_bias, - ]) + ], + ) else: - out, qkv_weight, qkv_bias, out_linear_weight, linear_bias, ln_2_scale, ln_2_bias = exe.run( + ( + out, + qkv_weight, + qkv_bias, + out_linear_weight, + linear_bias, + ln_2_scale, + ln_2_bias, + ) = exe.run( paddle.static.default_main_program(), - feed={ - "X": self.query, - "SrcMask": self.attn_mask - }, + feed={"X": self.query, "SrcMask": self.attn_mask}, fetch_list=[ - final_out, fused_attn.qkv_weight, - fused_attn.qkv_bias, fused_attn.linear_weight, - fused_attn.linear_bias, fused_attn.ln_scale, - fused_attn.ln_bias - ]) + final_out, + fused_attn.qkv_weight, + fused_attn.qkv_bias, + fused_attn.linear_weight, + fused_attn.linear_bias, + fused_attn.ln_scale, + fused_attn.ln_bias, + ], + ) else: if self.bias_attr is False: if self.pre_layer_norm: @@ -378,7 +459,8 @@ class TestFusedAttentionAPI(unittest.TestCase): fused_attn.qkv_weight, fused_attn.linear_weight, fused_attn.pre_ln_scale, - ]) + ], + ) else: out, qkv_weight, out_linear_weight, ln_2_scale = exe.run( paddle.static.default_main_program(), @@ -386,12 +468,23 @@ class TestFusedAttentionAPI(unittest.TestCase): "X": self.query, }, fetch_list=[ - final_out, fused_attn.qkv_weight, - fused_attn.linear_weight, fused_attn.ln_scale - ]) + final_out, + fused_attn.qkv_weight, + fused_attn.linear_weight, + fused_attn.ln_scale, + ], + ) else: if self.pre_layer_norm: - out, qkv_weight, qkv_bias, out_linear_weight, linear_bias, ln_scale, ln_bias = exe.run( + ( + out, + qkv_weight, + qkv_bias, + out_linear_weight, + linear_bias, + ln_scale, + ln_bias, + ) = exe.run( paddle.static.default_main_program(), feed={ "X": self.query, @@ -404,30 +497,71 @@ class TestFusedAttentionAPI(unittest.TestCase): fused_attn.linear_bias, fused_attn.pre_ln_scale, fused_attn.pre_ln_bias, - ]) + ], + ) else: - out, qkv_weight, qkv_bias, out_linear_weight, linear_bias, ln_2_scale, ln_2_bias = exe.run( + ( + out, + qkv_weight, + qkv_bias, + out_linear_weight, + linear_bias, + ln_2_scale, + ln_2_bias, + ) = exe.run( paddle.static.default_main_program(), feed={ "X": self.query, }, fetch_list=[ - final_out, fused_attn.qkv_weight, - fused_attn.qkv_bias, fused_attn.linear_weight, - fused_attn.linear_bias, fused_attn.ln_scale, - fused_attn.ln_bias - ]) - return out, qkv_weight, qkv_bias, out_linear_weight, linear_bias, ln_scale, ln_bias, ln_2_scale, ln_2_bias + final_out, + fused_attn.qkv_weight, + fused_attn.qkv_bias, + fused_attn.linear_weight, + fused_attn.linear_bias, + fused_attn.ln_scale, + fused_attn.ln_bias, + ], + ) + return ( + out, + qkv_weight, + qkv_bias, + out_linear_weight, + linear_bias, + ln_scale, + ln_bias, + ln_2_scale, + ln_2_bias, + ) def test_static_api(self): paddle.enable_static() with paddle.static.program_guard(Program()): - out, qkv_weight, qkv_bias, linear_weight, linear_bias, ln_scale, ln_bias, ln_2_scale, ln_2_bias = self.run_static( - ) - ref_out = compute_reference(self.pre_layer_norm, self.query, - self.attn_mask, ln_scale, ln_bias, - ln_2_scale, ln_2_bias, qkv_weight, qkv_bias, - linear_weight, linear_bias) + ( + out, + qkv_weight, + qkv_bias, + linear_weight, + linear_bias, + ln_scale, + ln_bias, + ln_2_scale, + ln_2_bias, + ) = self.run_static() + ref_out = compute_reference( + self.pre_layer_norm, + self.query, + self.attn_mask, + ln_scale, + ln_bias, + ln_2_scale, + ln_2_bias, + qkv_weight, + qkv_bias, + linear_weight, + linear_bias, + ) np.testing.assert_allclose(ref_out, out, rtol=self.rtol, atol=self.atol) def test_dynamic_api(self): @@ -436,7 +570,6 @@ class TestFusedAttentionAPI(unittest.TestCase): class TestFusedAttentionAPINoneAttnMask(TestFusedAttentionAPI): - def setAttnMask(self): self.has_attn_mask = False @@ -445,7 +578,6 @@ class TestFusedAttentionAPINoneAttnMask(TestFusedAttentionAPI): class TestFusedAttentionAPIBiasIsNone(TestFusedAttentionAPI): - def setBiasAttr(self): self.bias_attr = False diff --git a/python/paddle/fluid/tests/unittests/test_fused_bias_dropout_residual_layer_norm_op.py b/python/paddle/fluid/tests/unittests/test_fused_bias_dropout_residual_layer_norm_op.py index 7572a1dfe14ded0c7c6e9cb16e938bbdf482ea86..8ae92fe194f35b7c1d5bd36a50d11be020deae35 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_bias_dropout_residual_layer_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_fused_bias_dropout_residual_layer_norm_op.py @@ -28,7 +28,6 @@ default_main_program().random_seed = 42 class TestFusedBiasDropoutResidualLayerNormOp(OpTest): - def setUp(self): self.config() self.generate_input_data() @@ -53,23 +52,28 @@ class TestFusedBiasDropoutResidualLayerNormOp(OpTest): self.bias_attr = None def generate_input_data(self): - self.x = np.random.rand(self.batch_size, self.query_length, - self.embed_dim).astype(self.x_type) - self.residual = np.random.rand(self.batch_size, self.query_length, - self.embed_dim).astype(self.x_type) + self.x = np.random.rand( + self.batch_size, self.query_length, self.embed_dim + ).astype(self.x_type) + self.residual = np.random.rand( + self.batch_size, self.query_length, self.embed_dim + ).astype(self.x_type) self.linear_bias = np.random.rand(self.embed_dim).astype(self.x_type) - self.dout = np.random.random((self.batch_size, self.query_length, - self.embed_dim)).astype(self.x_type) + self.dout = np.random.random( + (self.batch_size, self.query_length, self.embed_dim) + ).astype(self.x_type) if self.bias_attr is False: self.tensor_linear_bias = None else: - self.tensor_linear_bias = paddle.to_tensor(self.linear_bias, - stop_gradient=False) + self.tensor_linear_bias = paddle.to_tensor( + self.linear_bias, stop_gradient=False + ) self.tensor_x = paddle.to_tensor(self.x, stop_gradient=False) - self.tensor_residual = paddle.to_tensor(self.residual, - stop_gradient=False) + self.tensor_residual = paddle.to_tensor( + self.residual, stop_gradient=False + ) def GetBaselineOut(self): paddle.disable_static(place=paddle.CUDAPlace(0)) @@ -82,14 +86,20 @@ class TestFusedBiasDropoutResidualLayerNormOp(OpTest): residual_out = self.tensor_residual + self.dropout(out) final_out = self.norm1(residual_out) - paddle.autograd.backward([final_out], [paddle.to_tensor(self.dout)], - retain_graph=True) + paddle.autograd.backward( + [final_out], [paddle.to_tensor(self.dout)], retain_graph=True + ) if self.tensor_linear_bias is not None: tensor_linear_bias_grad = self.tensor_linear_bias.grad else: tensor_linear_bias_grad = None - return final_out, self.tensor_x.grad, self.tensor_residual.grad, tensor_linear_bias_grad + return ( + final_out, + self.tensor_x.grad, + self.tensor_residual.grad, + tensor_linear_bias_grad, + ) def GetFusedBiasDropoutResidualLayerNormOut(self): paddle.disable_static(place=paddle.CUDAPlace(0)) @@ -99,52 +109,71 @@ class TestFusedBiasDropoutResidualLayerNormOp(OpTest): epsilon = 1e-05 final_out = incubate_f.fused_bias_dropout_residual_layer_norm( - self.tensor_x, self.tensor_residual, self.tensor_linear_bias, - ln_scale, ln_bias, self.dropout_prob, epsilon) + self.tensor_x, + self.tensor_residual, + self.tensor_linear_bias, + ln_scale, + ln_bias, + self.dropout_prob, + epsilon, + ) - paddle.autograd.backward([final_out], [paddle.to_tensor(self.dout)], - retain_graph=True) + paddle.autograd.backward( + [final_out], [paddle.to_tensor(self.dout)], retain_graph=True + ) if self.tensor_linear_bias is not None: tensor_linear_bias_grad = self.tensor_linear_bias.grad else: tensor_linear_bias_grad = None - return final_out, self.tensor_x.grad, self.tensor_residual.grad, tensor_linear_bias_grad + return ( + final_out, + self.tensor_x.grad, + self.tensor_residual.grad, + tensor_linear_bias_grad, + ) def test_fused_op(self): - out_ref, x_grad_ref, residual_grad_ref, linear_bias_grad_ref = self.GetBaselineOut( + ( + out_ref, + x_grad_ref, + residual_grad_ref, + linear_bias_grad_ref, + ) = self.GetBaselineOut() + ( + out, + x_grad, + residual_grad, + linear_bias_grad, + ) = self.GetFusedBiasDropoutResidualLayerNormOut() + np.testing.assert_allclose( + out_ref, out.numpy(), rtol=1e-5, atol=self.atol + ) + np.testing.assert_allclose( + x_grad_ref, x_grad.numpy(), rtol=1e-5, atol=self.atol ) - out, x_grad, residual_grad, linear_bias_grad = self.GetFusedBiasDropoutResidualLayerNormOut( + np.testing.assert_allclose( + residual_grad_ref, residual_grad.numpy(), rtol=1e-5, atol=self.atol ) - np.testing.assert_allclose(out_ref, - out.numpy(), - rtol=1e-5, - atol=self.atol) - np.testing.assert_allclose(x_grad_ref, - x_grad.numpy(), - rtol=1e-5, - atol=self.atol) - np.testing.assert_allclose(residual_grad_ref, - residual_grad.numpy(), - rtol=1e-5, - atol=self.atol) if linear_bias_grad_ref is not None: - np.testing.assert_allclose(linear_bias_grad_ref, - linear_bias_grad.numpy(), - rtol=1e-5, - atol=self.atol) + np.testing.assert_allclose( + linear_bias_grad_ref, + linear_bias_grad.numpy(), + rtol=1e-5, + atol=self.atol, + ) class TestFusedBiasDropoutResidualLayerNormOpBiasIsNone( - TestFusedBiasDropoutResidualLayerNormOp): - + TestFusedBiasDropoutResidualLayerNormOp +): def config(self): super().config() self.bias_attr = False class TestFusedBiasDropoutResidualLayerNormOpFp16( - TestFusedBiasDropoutResidualLayerNormOp): - + TestFusedBiasDropoutResidualLayerNormOp +): def config(self): super().config() self.x_type = np.float16 diff --git a/python/paddle/fluid/tests/unittests/test_fused_bias_dropout_residual_layer_norm_op_api.py b/python/paddle/fluid/tests/unittests/test_fused_bias_dropout_residual_layer_norm_op_api.py index aa54860e0996bee1c75d6718827c050630983e1f..261a2686b271cd18da4c4883f5b42f592ab627db 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_bias_dropout_residual_layer_norm_op_api.py +++ b/python/paddle/fluid/tests/unittests/test_fused_bias_dropout_residual_layer_norm_op_api.py @@ -15,7 +15,9 @@ import numpy as np import paddle -from paddle.incubate.nn.layer.fused_transformer import FusedBiasDropoutResidualLayerNorm +from paddle.incubate.nn.layer.fused_transformer import ( + FusedBiasDropoutResidualLayerNorm, +) from paddle.static import Program import unittest @@ -25,16 +27,16 @@ def layer_norm(x, has_scale, has_bias, weight, bias, epsilon=1e-05): x = x.reshape((batch_size * src_len, d_model)) mu = np.mean(x, axis=1, keepdims=True) sigma_squar = np.sum(np.square(x - mu), axis=1) / d_model - x1_up = (x - mu) + x1_up = x - mu x1_down_1 = sigma_squar + epsilon x1_down = np.sqrt(x1_down_1) x1_down = x1_down.reshape((x1_down.shape[0], 1)) x1 = x1_up / x1_down x_scaled = x1 - if (has_scale): + if has_scale: x_scaled = weight * x1 x_scaled_bias = x_scaled - if (has_bias): + if has_bias: x_scaled_bias = x_scaled + bias x_scaled_bias = x_scaled_bias.reshape((batch_size, src_len, d_model)) return x_scaled_bias @@ -56,12 +58,12 @@ def compute_reference(x, residual, ln_scale, ln_bias, linear_bias): linear_bias_dropout_out = linear_bias_out linear_bias_dropout_residual_out = residual + linear_bias_dropout_out linear_bias_dropout_residual_ln_out = layer_norm( - linear_bias_dropout_residual_out, True, has_bias, ln_scale, ln_bias) + linear_bias_dropout_residual_out, True, has_bias, ln_scale, ln_bias + ) return linear_bias_dropout_residual_ln_out class TestFusedBiasDropoutResidualLayerNormAPI(unittest.TestCase): - def setUp(self): self.setXType() self.setBiasAttr() @@ -84,50 +86,56 @@ class TestFusedBiasDropoutResidualLayerNormAPI(unittest.TestCase): self.weight_attr = None def generate_input_data(self): - self.x = np.random.rand(self.batch_size, self.query_length, - self.embed_dim).astype(self.x_type) - self.residual = np.random.rand(self.batch_size, self.query_length, - self.embed_dim).astype(self.x_type) + self.x = np.random.rand( + self.batch_size, self.query_length, self.embed_dim + ).astype(self.x_type) + self.residual = np.random.rand( + self.batch_size, self.query_length, self.embed_dim + ).astype(self.x_type) def run_imperative(self): fused_bias_dropout_residual_ln = FusedBiasDropoutResidualLayerNorm( - self.embed_dim, self.dropout_prob, self.weight_attr, self.bias_attr) + self.embed_dim, self.dropout_prob, self.weight_attr, self.bias_attr + ) linear_bias = None if self.bias_attr is not False: linear_bias = np.random.random( - fused_bias_dropout_residual_ln.linear_bias.shape).astype( - 'float32') + fused_bias_dropout_residual_ln.linear_bias.shape + ).astype('float32') fused_bias_dropout_residual_ln.linear_bias.set_value( - paddle.to_tensor(linear_bias)) - out = fused_bias_dropout_residual_ln(paddle.to_tensor(self.x), - paddle.to_tensor(self.residual)) + paddle.to_tensor(linear_bias) + ) + out = fused_bias_dropout_residual_ln( + paddle.to_tensor(self.x), paddle.to_tensor(self.residual) + ) ln_bias = None if self.bias_attr is not False: ln_bias = fused_bias_dropout_residual_ln.ln_bias.numpy() - ln_scale = fused_bias_dropout_residual_ln.ln_scale.numpy(), - ref_out = compute_reference(self.x, self.residual, ln_scale, ln_bias, - linear_bias) - np.testing.assert_allclose(ref_out, - out.numpy(), - rtol=1e-5, - atol=self.atol) + ln_scale = (fused_bias_dropout_residual_ln.ln_scale.numpy(),) + ref_out = compute_reference( + self.x, self.residual, ln_scale, ln_bias, linear_bias + ) + np.testing.assert_allclose( + ref_out, out.numpy(), rtol=1e-5, atol=self.atol + ) def run_static(self): - fused_op = FusedBiasDropoutResidualLayerNorm(self.embed_dim, - self.dropout_prob, - self.weight_attr, - self.bias_attr) + fused_op = FusedBiasDropoutResidualLayerNorm( + self.embed_dim, self.dropout_prob, self.weight_attr, self.bias_attr + ) x = paddle.static.data( name='X', shape=[self.batch_size, self.query_length, self.embed_dim], - dtype=self.x_type) + dtype=self.x_type, + ) residual = paddle.static.data( name='Residual', shape=[self.batch_size, self.query_length, self.embed_dim], - dtype=self.x_type) + dtype=self.x_type, + ) final_out = fused_op(x, residual) place = paddle.CUDAPlace(0) @@ -137,31 +145,31 @@ class TestFusedBiasDropoutResidualLayerNormAPI(unittest.TestCase): linear_bias = None ln_bias = None if self.bias_attr is False: - out, ln_scale = exe.run(paddle.static.default_main_program(), - feed={ - "X": self.x, - "Residual": self.residual - }, - fetch_list=[final_out, fused_op.ln_scale]) + out, ln_scale = exe.run( + paddle.static.default_main_program(), + feed={"X": self.x, "Residual": self.residual}, + fetch_list=[final_out, fused_op.ln_scale], + ) else: out, linear_bias, ln_scale, ln_bias = exe.run( paddle.static.default_main_program(), - feed={ - "X": self.x, - "Residual": self.residual - }, + feed={"X": self.x, "Residual": self.residual}, fetch_list=[ - final_out, fused_op.linear_bias, fused_op.ln_scale, - fused_op.ln_bias - ]) + final_out, + fused_op.linear_bias, + fused_op.ln_scale, + fused_op.ln_bias, + ], + ) return out, linear_bias, ln_scale, ln_bias def test_static_api(self): paddle.enable_static() with paddle.static.program_guard(Program()): out, linear_bias, ln_scale, ln_bias = self.run_static() - ref_out = compute_reference(self.x, self.residual, ln_scale, ln_bias, - linear_bias) + ref_out = compute_reference( + self.x, self.residual, ln_scale, ln_bias, linear_bias + ) np.testing.assert_allclose(ref_out, out, rtol=1e-5, atol=self.atol) def test_dynamic_api(self): @@ -170,8 +178,8 @@ class TestFusedBiasDropoutResidualLayerNormAPI(unittest.TestCase): class TestFusedBiasDropoutResidualLayerNormAPIBiasIsNone( - TestFusedBiasDropoutResidualLayerNormAPI): - + TestFusedBiasDropoutResidualLayerNormAPI +): def setBiasAttr(self): self.bias_attr = False diff --git a/python/paddle/fluid/tests/unittests/test_fused_elemwise_activation_op.py b/python/paddle/fluid/tests/unittests/test_fused_elemwise_activation_op.py index d44c6720d8bb5bc64dc894f88ee8a2456b09ce0e..cc672f5a16f02f3c1c4b91f9f3c24e76348c42b9 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_elemwise_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_fused_elemwise_activation_op.py @@ -32,14 +32,10 @@ from op_test import OpTest # TestFusedElementwiseActivationOp_channelwise_add -def create_test_class(test_case, - callback, - attrs, - dtype=np.float32, - grad_chek=True): - +def create_test_class( + test_case, callback, attrs, dtype=np.float32, grad_chek=True +): class TestFusedElementwiseActivationOp_base(OpTest): - def setUp(self): self.op_type = "fused_elemwise_activation" self.dtype = dtype @@ -54,12 +50,12 @@ def create_test_class(test_case, self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } if self.attrs["save_intermediate_out"]: self.outputs = { 'Out': self.out, - "IntermediateOut": self.intermediate_out + "IntermediateOut": self.intermediate_out, } else: self.outputs = {'Out': self.out} @@ -70,8 +66,9 @@ def create_test_class(test_case, self.axis = -1 def init_output(self): - self.x, self.y, self.intermediate_out, self.out = \ - callback(self.x, self.y, self.x, self.y) + self.x, self.y, self.intermediate_out, self.out = callback( + self.x, self.y, self.x, self.y + ) def init_attr(self): self.attrs = { @@ -101,133 +98,152 @@ def create_test_class(test_case, if not grad_chek: return if self.attrs["save_intermediate_out"]: - self.check_grad(['Y'], ['Out'], - max_relative_error=0.005, - no_grad_set=set("X")) + self.check_grad( + ['Y'], + ['Out'], + max_relative_error=0.005, + no_grad_set=set("X"), + ) else: - self.check_grad(['Y'], ['Out'], - max_relative_error=0.005, - no_grad_set=set("X")) + self.check_grad( + ['Y'], + ['Out'], + max_relative_error=0.005, + no_grad_set=set("X"), + ) def test_check_grad_ingore_y(self): if not grad_chek: return if self.attrs["save_intermediate_out"]: - self.check_grad(['X'], ['Out'], - max_relative_error=0.005, - no_grad_set=set("Y")) + self.check_grad( + ['X'], + ['Out'], + max_relative_error=0.005, + no_grad_set=set("Y"), + ) else: - self.check_grad(['X'], ['Out'], - max_relative_error=0.005, - no_grad_set=set("Y")) + self.check_grad( + ['X'], + ['Out'], + max_relative_error=0.005, + no_grad_set=set("Y"), + ) class TestFusedElementwiseActivationOp_scalar( - TestFusedElementwiseActivationOp_base): - + TestFusedElementwiseActivationOp_base + ): def init_input(self): self.x = np.random.rand(2, 3, 4).astype(self.dtype) self.y = np.random.rand(1).astype(self.dtype) class TestFusedElementwiseActivationOp_scalar2( - TestFusedElementwiseActivationOp_base): - + TestFusedElementwiseActivationOp_base + ): def init_input(self): self.x = np.random.rand(2, 3, 4).astype(self.dtype) self.y = np.random.rand(1, 1).astype(self.dtype) class TestFusedElementwiseActivationOp_Vector( - TestFusedElementwiseActivationOp_base): - + TestFusedElementwiseActivationOp_base + ): def init_input(self): - self.x = np.random.random((32, )).astype(self.dtype) - self.y = np.random.random((32, )).astype(self.dtype) + self.x = np.random.random((32,)).astype(self.dtype) + self.y = np.random.random((32,)).astype(self.dtype) class TestFusedElementwiseActivationOp_broadcast_0( - TestFusedElementwiseActivationOp_base): - + TestFusedElementwiseActivationOp_base + ): def init_input(self): self.x = np.random.rand(2, 3, 4).astype(self.dtype) self.y = np.random.rand(2).astype(self.dtype) self.axis = 0 def init_output(self): - self.x, self.y, self.intermediate_out, self.out = \ - callback(self.x, self.y, self.x, self.y.reshape(2, 1, 1)) + self.x, self.y, self.intermediate_out, self.out = callback( + self.x, self.y, self.x, self.y.reshape(2, 1, 1) + ) class TestFusedElementwiseActivationOp_broadcast_1( - TestFusedElementwiseActivationOp_base): - + TestFusedElementwiseActivationOp_base + ): def init_input(self): self.x = np.random.rand(2, 3, 4).astype(self.dtype) self.y = np.random.rand(3).astype(self.dtype) self.axis = 1 def init_output(self): - self.x, self.y, self.intermediate_out, self.out = \ - callback(self.x, self.y, self.x, self.y.reshape(1, 3, 1)) + self.x, self.y, self.intermediate_out, self.out = callback( + self.x, self.y, self.x, self.y.reshape(1, 3, 1) + ) class TestFusedElementwiseActivationOp_broadcast_2( - TestFusedElementwiseActivationOp_base): - + TestFusedElementwiseActivationOp_base + ): def init_input(self): self.x = np.random.rand(2, 3, 4).astype(self.dtype) self.y = np.random.rand(4).astype(self.dtype) def init_output(self): - self.x, self.y, self.intermediate_out, self.out = \ - callback(self.x, self.y, self.x, self.y.reshape(1, 1, 4)) + self.x, self.y, self.intermediate_out, self.out = callback( + self.x, self.y, self.x, self.y.reshape(1, 1, 4) + ) class TestFusedElementwiseActivationOp_broadcast_3( - TestFusedElementwiseActivationOp_base): - + TestFusedElementwiseActivationOp_base + ): def init_input(self): self.x = np.random.rand(2, 3, 4, 5).astype(self.dtype) self.y = np.random.rand(3, 4).astype(self.dtype) self.axis = 1 def init_output(self): - self.x, self.y, self.intermediate_out, self.out = \ - callback(self.x, self.y, self.x, self.y.reshape(1, 3, 4, 1)) + self.x, self.y, self.intermediate_out, self.out = callback( + self.x, self.y, self.x, self.y.reshape(1, 3, 4, 1) + ) class TestFusedElementwiseActivationOp_broadcast_4( - TestFusedElementwiseActivationOp_base): - + TestFusedElementwiseActivationOp_base + ): def init_input(self): self.x = np.random.rand(2, 3, 4, 5).astype(self.dtype) self.y = np.random.rand(2, 1).astype(self.dtype) self.axis = 0 def init_output(self): - self.x, self.y, self.intermediate_out, self.out = \ - callback(self.x, self.y, self.x, self.y.reshape(2, 1, 1, 1)) + self.x, self.y, self.intermediate_out, self.out = callback( + self.x, self.y, self.x, self.y.reshape(2, 1, 1, 1) + ) class TestFusedElementwiseActivationOp_rowwise_add_0( - TestFusedElementwiseActivationOp_base): - + TestFusedElementwiseActivationOp_base + ): def init_input(self): self.x = np.random.rand(2, 3, 4).astype(self.dtype) self.y = np.random.rand(3, 4).astype(self.dtype) self.axis = 1 def init_output(self): - self.x, self.y, self.intermediate_out, self.out = \ - callback(self.x, self.y, self.x, self.y.reshape(1, 3, 4)) + self.x, self.y, self.intermediate_out, self.out = callback( + self.x, self.y, self.x, self.y.reshape(1, 3, 4) + ) class TestFusedElementwiseActivationOp_rowwise_add_1( - TestFusedElementwiseActivationOp_base): - + TestFusedElementwiseActivationOp_base + ): def init_input(self): self.x = np.random.rand(2, 1).astype(self.dtype) self.y = np.random.rand(1).astype(self.dtype) self.axis = 1 def init_output(self): - self.x, self.y, self.intermediate_out, self.out = \ - callback(self.x, self.y, self.x, self.y.reshape(1, 1)) + self.x, self.y, self.intermediate_out, self.out = callback( + self.x, self.y, self.x, self.y.reshape(1, 1) + ) class TestFusedElementwiseActivationOp_channelwise_add( - TestFusedElementwiseActivationOp_base): - + TestFusedElementwiseActivationOp_base + ): def init_input(self): self.x = np.random.rand(3, 20, 20).astype(self.dtype) self.y = np.random.rand(3, 1, 1).astype(self.dtype) @@ -236,36 +252,59 @@ def create_test_class(test_case, TestFusedElementwiseActivationOp_scalar.__name__ = test_case + "_scalar" TestFusedElementwiseActivationOp_scalar2.__name__ = test_case + "_scalar2" TestFusedElementwiseActivationOp_Vector.__name__ = test_case + "_Vector" - TestFusedElementwiseActivationOp_broadcast_0.__name__ = test_case + "_broadcast_0" - TestFusedElementwiseActivationOp_broadcast_1.__name__ = test_case + "_broadcast_1" - TestFusedElementwiseActivationOp_broadcast_2.__name__ = test_case + "_broadcast_2" - TestFusedElementwiseActivationOp_broadcast_3.__name__ = test_case + "_broadcast_3" - TestFusedElementwiseActivationOp_broadcast_4.__name__ = test_case + "_broadcast_4" - TestFusedElementwiseActivationOp_rowwise_add_0.__name__ = test_case + "_rowwise_add_0" - TestFusedElementwiseActivationOp_rowwise_add_1.__name__ = test_case + "_rowwise_add_1" - TestFusedElementwiseActivationOp_channelwise_add.__name__ = test_case + "_channelwise_add" + TestFusedElementwiseActivationOp_broadcast_0.__name__ = ( + test_case + "_broadcast_0" + ) + TestFusedElementwiseActivationOp_broadcast_1.__name__ = ( + test_case + "_broadcast_1" + ) + TestFusedElementwiseActivationOp_broadcast_2.__name__ = ( + test_case + "_broadcast_2" + ) + TestFusedElementwiseActivationOp_broadcast_3.__name__ = ( + test_case + "_broadcast_3" + ) + TestFusedElementwiseActivationOp_broadcast_4.__name__ = ( + test_case + "_broadcast_4" + ) + TestFusedElementwiseActivationOp_rowwise_add_0.__name__ = ( + test_case + "_rowwise_add_0" + ) + TestFusedElementwiseActivationOp_rowwise_add_1.__name__ = ( + test_case + "_rowwise_add_1" + ) + TestFusedElementwiseActivationOp_channelwise_add.__name__ = ( + test_case + "_channelwise_add" + ) globals()[test_case + "_base"] = TestFusedElementwiseActivationOp_base globals()[test_case + "_scalar"] = TestFusedElementwiseActivationOp_scalar globals()[test_case + "_scalar2"] = TestFusedElementwiseActivationOp_scalar2 globals()[test_case + "_Vector"] = TestFusedElementwiseActivationOp_Vector - globals()[test_case + - "_broadcast_0"] = TestFusedElementwiseActivationOp_broadcast_0 - globals()[test_case + - "_broadcast_1"] = TestFusedElementwiseActivationOp_broadcast_1 - globals()[test_case + - "_broadcast_2"] = TestFusedElementwiseActivationOp_broadcast_2 - globals()[test_case + - "_broadcast_3"] = TestFusedElementwiseActivationOp_broadcast_3 - globals()[test_case + - "_broadcast_4"] = TestFusedElementwiseActivationOp_broadcast_4 - globals()[test_case + - "_rowwise_add_0"] = TestFusedElementwiseActivationOp_rowwise_add_0 - globals()[test_case + - "_rowwise_add_1"] = TestFusedElementwiseActivationOp_rowwise_add_1 globals()[ - test_case + - "_channelwise_add"] = TestFusedElementwiseActivationOp_channelwise_add + test_case + "_broadcast_0" + ] = TestFusedElementwiseActivationOp_broadcast_0 + globals()[ + test_case + "_broadcast_1" + ] = TestFusedElementwiseActivationOp_broadcast_1 + globals()[ + test_case + "_broadcast_2" + ] = TestFusedElementwiseActivationOp_broadcast_2 + globals()[ + test_case + "_broadcast_3" + ] = TestFusedElementwiseActivationOp_broadcast_3 + globals()[ + test_case + "_broadcast_4" + ] = TestFusedElementwiseActivationOp_broadcast_4 + globals()[ + test_case + "_rowwise_add_0" + ] = TestFusedElementwiseActivationOp_rowwise_add_0 + globals()[ + test_case + "_rowwise_add_1" + ] = TestFusedElementwiseActivationOp_rowwise_add_1 + globals()[ + test_case + "_channelwise_add" + ] = TestFusedElementwiseActivationOp_channelwise_add def scale_add_func(x, y, x_bcast, y_bcast, scale, mode=0): @@ -338,97 +377,129 @@ for mode in {0, 1}: gelu_add_func = partial(gelu_add_func, mode=mode) for save_intermediate_out in {True, False}: - suffix = ("_save_intermediate_out" if save_intermediate_out else "") \ - + ("_mode_"+ str(mode)) + suffix = ("_save_intermediate_out" if save_intermediate_out else "") + ( + "_mode_" + str(mode) + ) create_test_class( - 'scale_add' + suffix, scale_add_func, { + 'scale_add' + suffix, + scale_add_func, + { 'scale': scale, 'functor_list': ["scale", "elementwise_add"], 'save_intermediate_out': save_intermediate_out, - }) + }, + ) create_test_class( - 'add_scale' + suffix, add_scale_func, { + 'add_scale' + suffix, + add_scale_func, + { 'scale': scale, 'functor_list': ["elementwise_add", "scale"], 'save_intermediate_out': save_intermediate_out, - }) + }, + ) create_test_class( - 'add_relu' + suffix, add_relu_func, { + 'add_relu' + suffix, + add_relu_func, + { 'functor_list': ["elementwise_add", "relu"], 'save_intermediate_out': save_intermediate_out, - }) + }, + ) create_test_class( - 'relu_add' + suffix, relu_add_func, { + 'relu_add' + suffix, + relu_add_func, + { 'functor_list': ["relu", "elementwise_add"], 'save_intermediate_out': save_intermediate_out, - }) + }, + ) create_test_class( - 'mul_scale' + suffix, mul_scale_func, { + 'mul_scale' + suffix, + mul_scale_func, + { 'scale': scale, 'functor_list': ["elementwise_mul", "scale"], 'save_intermediate_out': save_intermediate_out, - }) + }, + ) create_test_class( - 'gelu_add' + suffix, gelu_add_func, { + 'gelu_add' + suffix, + gelu_add_func, + { 'functor_list': ["gelu", "elementwise_add"], 'save_intermediate_out': save_intermediate_out, - }) + }, + ) if core.is_compiled_with_cuda(): create_test_class( 'scale_add_fp16' + suffix, - scale_add_func, { + scale_add_func, + { 'scale': scale, 'functor_list': ["scale", "elementwise_add"], 'save_intermediate_out': save_intermediate_out, }, dtype=np.float16, - grad_chek=False) + grad_chek=False, + ) create_test_class( 'add_scale_fp16' + suffix, - add_scale_func, { + add_scale_func, + { 'scale': scale, 'functor_list': ["elementwise_add", "scale"], 'save_intermediate_out': save_intermediate_out, }, dtype=np.float16, - grad_chek=False) + grad_chek=False, + ) create_test_class( 'add_relu_fp16' + suffix, - add_relu_func, { + add_relu_func, + { 'functor_list': ["elementwise_add", "relu"], 'save_intermediate_out': save_intermediate_out, }, dtype=np.float16, - grad_chek=False) + grad_chek=False, + ) create_test_class( 'relu_add_fp16' + suffix, - relu_add_func, { + relu_add_func, + { 'functor_list': ["relu", "elementwise_add"], 'save_intermediate_out': save_intermediate_out, }, dtype=np.float16, - grad_chek=False) + grad_chek=False, + ) create_test_class( 'mul_scale_fp16' + suffix, - mul_scale_func, { + mul_scale_func, + { 'scale': scale, 'functor_list': ["elementwise_mul", "scale"], 'save_intermediate_out': save_intermediate_out, }, dtype=np.float16, - grad_chek=False) + grad_chek=False, + ) create_test_class( 'gelu_add_fp16' + suffix, - gelu_add_func, { + gelu_add_func, + { 'functor_list': ["gelu", "elementwise_add"], 'save_intermediate_out': save_intermediate_out, }, dtype=np.float16, - grad_chek=False) + grad_chek=False, + ) if __name__ == '__main__': import paddle + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_fused_emb_seq_pool_op.py b/python/paddle/fluid/tests/unittests/test_fused_emb_seq_pool_op.py index 5963ef07e92a57ddefe76cb12893f7731bacee69..392c04f730a3f74f1a8844a0b6db81004555114b 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_emb_seq_pool_op.py +++ b/python/paddle/fluid/tests/unittests/test_fused_emb_seq_pool_op.py @@ -19,27 +19,34 @@ from op_test import OpTest, skip_check_grad_ci import paddle.version as ver -@skip_check_grad_ci(reason="check_grad is called when ver.mkl() == ON" - "and 'Linux' in platform.platform().") +@skip_check_grad_ci( + reason="check_grad is called when ver.mkl() == ON" + "and 'Linux' in platform.platform()." +) class TestFusedEmbeddingSeqPoolOp(OpTest): - def setUp(self): self.op_type = "fused_embedding_seq_pool" self.emb_size = 6 self.table = np.random.random((17, self.emb_size)).astype("float64") - self.ids = np.array([[[4], [3]], [[4], [3]], [[2], [1]], - [[16], [1]]]).astype("int64") + self.ids = np.array( + [[[4], [3]], [[4], [3]], [[2], [1]], [[16], [1]]] + ).astype("int64") ids_expand = np.expand_dims(self.ids, axis=1) self.lod = [[3, 1]] self.attrs = {'is_sparse': True} self.inputs = {'W': self.table, 'Ids': (ids_expand, self.lod)} self.outputs = { - 'Out': - np.reshape( - np.array([ - self.table[[4, 3]] + self.table[[4, 3]] + - self.table[[2, 1]], self.table[[16, 1]] - ]), [len(self.lod[0]), 2 * self.emb_size]) + 'Out': np.reshape( + np.array( + [ + self.table[[4, 3]] + + self.table[[4, 3]] + + self.table[[2, 1]], + self.table[[16, 1]], + ] + ), + [len(self.lod[0]), 2 * self.emb_size], + ) } def test_check_output(self): @@ -50,14 +57,12 @@ class TestFusedEmbeddingSeqPoolOp(OpTest): # TODO(wangzhongpu): support lod in dygraph mode if ver.mkl() == "ON" and 'Linux' in platform.platform(): self.attrs = {'is_sparse': False} - self.check_grad(['W'], - 'Out', - no_grad_set=['Ids'], - check_dygraph=False) + self.check_grad( + ['W'], 'Out', no_grad_set=['Ids'], check_dygraph=False + ) class TestLookupTableOpWithPadding(TestFusedEmbeddingSeqPoolOp): - def test_check_output(self): if ver.mkl() == "ON" and 'Linux' in platform.platform(): ids = np.squeeze(self.ids, axis=2) @@ -65,18 +70,20 @@ class TestLookupTableOpWithPadding(TestFusedEmbeddingSeqPoolOp): output = list() index = 0 for count in self.lod[0]: - arr = ids[index:count + index] - out = np.reshape(self.table[arr.flatten()], - [arr.shape[0], arr.shape[1], self.emb_size]) + arr = ids[index : count + index] + out = np.reshape( + self.table[arr.flatten()], + [arr.shape[0], arr.shape[1], self.emb_size], + ) idx = np.argwhere(arr == padding_idx) for item in idx: out[item[0], item[1], :] = np.zeros(self.emb_size) output.append(np.sum(out, 0)) index += count self.outputs = { - 'Out': - np.reshape(np.array(output), - [len(self.lod[0]), 2 * self.emb_size]) + 'Out': np.reshape( + np.array(output), [len(self.lod[0]), 2 * self.emb_size] + ) } self.attrs = {'padding_idx': int(padding_idx)} # TODO(wangzhongpu): support lod in dygraph mode @@ -88,30 +95,28 @@ class TestLookupTableOpWithPadding(TestFusedEmbeddingSeqPoolOp): padding_idx = np.random.choice(ids.flatten(), 1)[0] self.attrs = {'padding_idx': int(padding_idx), 'is_sparse': False} # TODO(wangzhongpu): support lod in dygraph mode - self.check_grad(['W'], - 'Out', - no_grad_set=['Ids'], - check_dygraph=False) + self.check_grad( + ['W'], 'Out', no_grad_set=['Ids'], check_dygraph=False + ) class TestFusedEmbeddingSeqPoolApi(unittest.TestCase): - def test_api(self): if ver.mkl() == "ON" and 'Linux' in platform.platform(): import paddle.fluid as fluid dict_size = 20 - data_t = fluid.layers.data(name='word', - shape=[1], - dtype='int64', - lod_level=1) + data_t = fluid.layers.data( + name='word', shape=[1], dtype='int64', lod_level=1 + ) padding_idx = np.random.randint(1, 10) out = fluid.contrib.fused_embedding_seq_pool( input=data_t, size=[dict_size, 32], param_attr='w', padding_idx=padding_idx, - is_sparse=False) + is_sparse=False, + ) place = fluid.CPUPlace() exe = fluid.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/test_fused_embedding_fc_lstm_op.py b/python/paddle/fluid/tests/unittests/test_fused_embedding_fc_lstm_op.py index 4d1b38e54eee3d8d1ac10f1001534e6ec550351c..54fc4d77b7921557341dff2b46202f69437492b6 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_embedding_fc_lstm_op.py +++ b/python/paddle/fluid/tests/unittests/test_fused_embedding_fc_lstm_op.py @@ -23,30 +23,41 @@ def fc(x, w, b): def fused_embedded_fc_lstm( - ids, # T x 1 - lod, # 1 x N - embeddings=None, # Dict_size x M - wx=None, # M x 4D - bx=None, # 1 x 4D - h0=None, # N x D - c0=None, # N x D - w_h=None, # D x 4D - w_b=None, # 1 x 4D - w_c=None, # 1 x 3D - is_reverse=False, - act_gate=None, - act_cell=None, - act_cand=None): + ids, # T x 1 + lod, # 1 x N + embeddings=None, # Dict_size x M + wx=None, # M x 4D + bx=None, # 1 x 4D + h0=None, # N x D + c0=None, # N x D + w_h=None, # D x 4D + w_b=None, # 1 x 4D + w_c=None, # 1 x 3D + is_reverse=False, + act_gate=None, + act_cell=None, + act_cand=None, +): # Make a lookup for embeddings and pass result into lstm reference T = ids.shape[0] M = embeddings.shape[1] x = embeddings[ids].reshape([T, M]) - return lstm(fc(x, wx, bx), lod, h0, c0, w_h, w_b, w_c, is_reverse, act_gate, - act_cell, act_cand) + return lstm( + fc(x, wx, bx), + lod, + h0, + c0, + w_h, + w_b, + w_c, + is_reverse, + act_gate, + act_cell, + act_cand, + ) class TestFusionLSTMOp(OpTest): - def set_conf(self): pass @@ -76,22 +87,24 @@ class TestFusionLSTMOp(OpTest): b = np.random.normal(size=(1, 7 * self.D)).astype('float32') else: b = np.random.normal(size=(1, 4 * self.D)).astype('float32') - w_b = np.copy(b[:, 0:4 * self.D]) - w_c = b[:, 4 * self.D:] if self.use_peepholes else None + w_b = np.copy(b[:, 0 : 4 * self.D]) + w_c = b[:, 4 * self.D :] if self.use_peepholes else None # low is 0 , high is voc_size - 1 - ids = np.random.randint(low=0, high=self.dict_size - 1, - size=(T, 1)).astype("int64") + ids = np.random.randint( + low=0, high=self.dict_size - 1, size=(T, 1) + ).astype("int64") # embeddings as they were trained , so each entry is of M size - embeddings = np.random.random( - (self.dict_size, self.M)).astype("float32") + embeddings = np.random.random((self.dict_size, self.M)).astype( + "float32" + ) # multiply embeddings via Weights fc_embeddings = np.dot(embeddings, wx) # bias should be manually added into the bias of this fused embedding fc LSTM - b[0, 0:4 * self.D] += bx[0, :] - combined_biases = b[:, 0:4 * self.D] + b[0, 0 : 4 * self.D] += bx[0, :] + combined_biases = b[:, 0 : 4 * self.D] # So let broadcast it , so they can be added ones = np.ones([self.dict_size, 1]) broadcasted_biases = np.dot(ones, combined_biases) @@ -107,17 +120,28 @@ class TestFusionLSTMOp(OpTest): wh = np.random.normal(size=(self.D, 4 * self.D)).astype('float32') - h, c = fused_embedded_fc_lstm(ids, self.lod, embeddings, wx, bx, h0, c0, - wh, w_b, w_c, self.is_reverse, - ACTIVATION[self.act_gate], - ACTIVATION[self.act_cell], - ACTIVATION[self.act_cand]) + h, c = fused_embedded_fc_lstm( + ids, + self.lod, + embeddings, + wx, + bx, + h0, + c0, + wh, + w_b, + w_c, + self.is_reverse, + ACTIVATION[self.act_gate], + ACTIVATION[self.act_cell], + ACTIVATION[self.act_cand], + ) self.inputs = { 'Ids': (ids, self.lod), 'Embeddings': fc_embeddings, 'WeightH': wh, - 'Bias': b + 'Bias': b, } if self.has_initial_state: @@ -133,7 +157,7 @@ class TestFusionLSTMOp(OpTest): 'is_reverse': self.is_reverse, 'gate_activation': self.act_gate, 'cell_activation': self.act_cell, - 'candidate_activation': self.act_cand + 'candidate_activation': self.act_cand, } def test_check_output(self): @@ -143,74 +167,63 @@ class TestFusionLSTMOp(OpTest): class TestFusionLSTMOpInit(TestFusionLSTMOp): - def set_conf(self): self.has_initial_state = True class TestFusionLSTMOpReverse(TestFusionLSTMOp): - def set_conf(self): self.is_reverse = True class TestFusionLSTMOpInitReverse(TestFusionLSTMOp): - def set_conf(self): self.has_initial_state = True self.is_reverse = True class TestFusionLSTMOpMD1(TestFusionLSTMOp): - def set_conf(self): self.M = 36 self.D = 8 class TestFusionLSTMOpMD2(TestFusionLSTMOp): - def set_conf(self): self.M = 8 self.D = 8 class TestFusionLSTMOpMD3(TestFusionLSTMOp): - def set_conf(self): self.M = 15 self.D = 3 class TestFusionLSTMOpBS1(TestFusionLSTMOp): - def set_conf(self): self.lod = [[3]] self.D = 16 class TestFusionLSTMOpPeepholes(TestFusionLSTMOp): - def set_conf(self): self.use_peepholes = True class TestFusionLSTMOpPeepholesInit(TestFusionLSTMOp): - def set_conf(self): self.use_peepholes = True self.has_initial_state = True class TestFusionLSTMOpPeepholesReverse(TestFusionLSTMOp): - def set_conf(self): self.use_peepholes = True self.is_reverse = True class TestFusionLSTMOpPeepholesInitReverse(TestFusionLSTMOp): - def set_conf(self): self.use_peepholes = True self.has_initial_state = True @@ -218,7 +231,6 @@ class TestFusionLSTMOpPeepholesInitReverse(TestFusionLSTMOp): class TestFusionLSTMOpPeepholesBS1(TestFusionLSTMOp): - def set_conf(self): self.use_peepholes = True self.lod = [[2]] diff --git a/python/paddle/fluid/tests/unittests/test_fused_fc_elementwise_layernorm_op.py b/python/paddle/fluid/tests/unittests/test_fused_fc_elementwise_layernorm_op.py index fa9948128dee26fb5715bff3faf03a5645518e44..09797b038491988c149ae2816d4a5d893b9134bf 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_fc_elementwise_layernorm_op.py +++ b/python/paddle/fluid/tests/unittests/test_fused_fc_elementwise_layernorm_op.py @@ -22,10 +22,10 @@ from test_layer_norm_op import _reference_layer_norm_naive np.random.random(123) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "Paddle core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "Paddle core is not compiled with CUDA" +) class TestFusedFCElementwiseLayerNormOp(OpTest): - def config(self): self.matrix = MatrixGenerate(1, 10, 15, 3, 3, 2) self.y_shape = [1, 15] @@ -44,11 +44,12 @@ class TestFusedFCElementwiseLayerNormOp(OpTest): y = np.random.random_sample(self.y_shape).astype(np.float32) add_out = fc_out + y # layer_norm - scale_shape = [np.prod(self.y_shape[self.begin_norm_axis:])] + scale_shape = [np.prod(self.y_shape[self.begin_norm_axis :])] scale = np.random.random_sample(scale_shape).astype(np.float32) bias_1 = np.random.random_sample(scale_shape).astype(np.float32) out, mean, variance = _reference_layer_norm_naive( - add_out, scale, bias_1, epsilon, self.begin_norm_axis) + add_out, scale, bias_1, epsilon, self.begin_norm_axis + ) self.inputs = { "X": self.matrix.input, @@ -56,12 +57,12 @@ class TestFusedFCElementwiseLayerNormOp(OpTest): "Bias0": self.matrix.bias, "Y": y, "Scale": scale, - "Bias1": bias_1 + "Bias1": bias_1, } self.attrs = { "activation_type": "relu", "epsilon": epsilon, - "begin_norm_axis": self.begin_norm_axis + "begin_norm_axis": self.begin_norm_axis, } self.outputs = {"Out": out, "Mean": mean, "Variance": variance} @@ -71,7 +72,6 @@ class TestFusedFCElementwiseLayerNormOp(OpTest): class TestFusedFCElementwiseLayerNormOp2(TestFusedFCElementwiseLayerNormOp): - def config(self): self.matrix = MatrixGenerate(4, 5, 6, 2, 2, 1) self.y_shape = [4, 6] diff --git a/python/paddle/fluid/tests/unittests/test_fused_feedforward_op.py b/python/paddle/fluid/tests/unittests/test_fused_feedforward_op.py index 85b7ee9285d869950cf86ce7ec16d3655cff1feb..71bf2bad2cc47634d3467191b0859d43d0a8f474 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_feedforward_op.py +++ b/python/paddle/fluid/tests/unittests/test_fused_feedforward_op.py @@ -25,7 +25,6 @@ from paddle.fluid.framework import default_main_program class TestFusedFFNOp(OpTest): - def getDtype(self): self.dtype = "float32" self.layer_norm_dtype = "float32" @@ -54,7 +53,7 @@ class TestFusedFFNOp(OpTest): def setUp(self): paddle.disable_static() self.__class__.op_type = "fused_feedforward" - #check grad in test_out_and_grad() + # check grad in test_out_and_grad() self.__class__.no_need_check_grad = True self.getDtype() self.getShape() @@ -66,17 +65,23 @@ class TestFusedFFNOp(OpTest): self.bias_attr = None self.weight_attrs = transformer._convert_param_attr_to_list( - self.weight_attr, 2) + self.weight_attr, 2 + ) self.bias_attrs = transformer._convert_param_attr_to_list( - self.bias_attr, 2) - self.linear1 = Linear(self.d_model, - self.dim_feedforward, - self.weight_attrs[1], - bias_attr=self.bias_attrs[1]) - self.linear2 = Linear(self.dim_feedforward, - self.d_model, - self.weight_attrs[1], - bias_attr=self.bias_attrs[1]) + self.bias_attr, 2 + ) + self.linear1 = Linear( + self.d_model, + self.dim_feedforward, + self.weight_attrs[1], + bias_attr=self.bias_attrs[1], + ) + self.linear2 = Linear( + self.dim_feedforward, + self.d_model, + self.weight_attrs[1], + bias_attr=self.bias_attrs[1], + ) paddle.set_default_dtype(self.layer_norm_dtype) self.norm1 = LayerNorm(self.d_model) @@ -86,10 +91,12 @@ class TestFusedFFNOp(OpTest): self.dropout2 = Dropout(0.0, mode="upscale_in_train") self.activation = getattr(F, self.act_method) - self.src = np.random.random((self.batch_size, self.query_length, - self.d_model)).astype(self.dtype) - self.dout = np.random.random((self.batch_size, self.query_length, - self.d_model)).astype(self.dtype) + self.src = np.random.random( + (self.batch_size, self.query_length, self.d_model) + ).astype(self.dtype) + self.dout = np.random.random( + (self.batch_size, self.query_length, self.d_model) + ).astype(self.dtype) def Base(self): paddle.disable_static() @@ -98,46 +105,54 @@ class TestFusedFFNOp(OpTest): if self.pre_layer_norm: ln1_out = self.norm1(tensor_src) linear2_out = self.linear2( - self.dropout(self.activation(self.linear1(ln1_out)))) + self.dropout(self.activation(self.linear1(ln1_out))) + ) dropout2_out = residual + self.dropout2(linear2_out) - paddle.autograd.backward([dropout2_out], - [paddle.to_tensor(self.dout)], True) + paddle.autograd.backward( + [dropout2_out], [paddle.to_tensor(self.dout)], True + ) return dropout2_out, tensor_src.grad else: linear2_out = self.linear2( - self.dropout(self.activation(self.linear1(tensor_src)))) + self.dropout(self.activation(self.linear1(tensor_src))) + ) dropout2_out = residual + self.dropout2(linear2_out) dropout2_out = self.norm2(dropout2_out) - paddle.autograd.backward([dropout2_out], - [paddle.to_tensor(self.dout)], True) + paddle.autograd.backward( + [dropout2_out], [paddle.to_tensor(self.dout)], True + ) return dropout2_out, tensor_src.grad def FusedFFN(self): paddle.disable_static() - linear1_weight = paddle.to_tensor(self.linear1.weight, - stop_gradient=False) + linear1_weight = paddle.to_tensor( + self.linear1.weight, stop_gradient=False + ) linear1_bias = paddle.to_tensor(self.linear1.bias, stop_gradient=False) - linear2_weight = paddle.to_tensor(self.linear2.weight, - stop_gradient=False) + linear2_weight = paddle.to_tensor( + self.linear2.weight, stop_gradient=False + ) linear2_bias = paddle.to_tensor(self.linear2.bias, stop_gradient=False) ln1_scale = paddle.to_tensor(self.norm1.weight, stop_gradient=False) ln1_bias = paddle.to_tensor(self.norm1.bias, stop_gradient=False) ln2_scale = paddle.to_tensor(self.norm2.weight, stop_gradient=False) ln2_bias = paddle.to_tensor(self.norm2.bias, stop_gradient=False) x = paddle.to_tensor(self.src, stop_gradient=False) - out = incubate_f.fused_feedforward(x, - linear1_weight, - linear2_weight, - linear1_bias, - linear2_bias, - ln1_scale, - ln1_bias, - ln2_scale, - ln2_bias, - 0.0, - 0.0, - activation=self.act_method, - pre_layer_norm=self.pre_layer_norm) + out = incubate_f.fused_feedforward( + x, + linear1_weight, + linear2_weight, + linear1_bias, + linear2_bias, + ln1_scale, + ln1_bias, + ln2_scale, + ln2_bias, + 0.0, + 0.0, + activation=self.act_method, + pre_layer_norm=self.pre_layer_norm, + ) paddle.autograd.backward([out], [paddle.to_tensor(self.dout)]) return out, x.grad @@ -145,18 +160,18 @@ class TestFusedFFNOp(OpTest): default_main_program().random_seed = 42 base_out, base_grad = self.Base() fused_out, fused_grad = self.FusedFFN() - np.testing.assert_allclose(base_out.numpy(), - fused_out.numpy(), - rtol=self.rtol, - atol=self.atol) - np.testing.assert_allclose(base_grad.numpy(), - fused_grad.numpy(), - rtol=self.rtol, - atol=self.atol) + np.testing.assert_allclose( + base_out.numpy(), fused_out.numpy(), rtol=self.rtol, atol=self.atol + ) + np.testing.assert_allclose( + base_grad.numpy(), + fused_grad.numpy(), + rtol=self.rtol, + atol=self.atol, + ) class TestFusedFFNOpFp16(TestFusedFFNOp): - def getDtype(self): self.dtype = "float16" self.layer_norm_dtype = "float32" @@ -173,20 +188,17 @@ class TestFusedFFNOpFp16(TestFusedFFNOp): class TestFusedFFNOpFp64(TestFusedFFNOp): - def getDtype(self): self.dtype = "float64" self.layer_norm_dtype = "float64" class TestFusedFFNOpActivation(TestFusedFFNOp): - def getActivation(self): self.act_method = "relu" class TestFusedFFNOpNormalizeBefore(TestFusedFFNOp): - def getNormalizeBefore(self): self.pre_layer_norm = True @@ -198,7 +210,6 @@ class TestFusedFFNOpNormalizeBefore(TestFusedFFNOp): class APITestStaticFusedFFN(unittest.TestCase): - def test_static(self): paddle.enable_static() default_main_program().random_seed = 42 @@ -208,36 +219,39 @@ class APITestStaticFusedFFN(unittest.TestCase): d_model = 8 dim_feedforward = 8 - x = paddle.static.data(name='x', - shape=[batch_size, d_model, dim_feedforward], - dtype=dtype) - linear1_weight = paddle.static.data(name='linear1_weight', - shape=[d_model, dim_feedforward], - dtype=dtype) - linear1_bias = paddle.static.data(name='linear1_bias', - shape=[dim_feedforward]) - linear2_weight = paddle.static.data(name='linear2_weight', - shape=[dim_feedforward, d_model], - dtype=dtype) + x = paddle.static.data( + name='x', shape=[batch_size, d_model, dim_feedforward], dtype=dtype + ) + linear1_weight = paddle.static.data( + name='linear1_weight', shape=[d_model, dim_feedforward], dtype=dtype + ) + linear1_bias = paddle.static.data( + name='linear1_bias', shape=[dim_feedforward] + ) + linear2_weight = paddle.static.data( + name='linear2_weight', shape=[dim_feedforward, d_model], dtype=dtype + ) linear2_bias = paddle.static.data(name='linear2_bias', shape=[d_model]) ln1_scale = paddle.static.data(name='ln1_scale', shape=[d_model]) ln1_bias = paddle.static.data(name='ln1_scale', shape=[d_model]) ln2_scale = paddle.static.data(name='ln2_scale', shape=[d_model]) ln2_bias = paddle.static.data(name='ln2_scale', shape=[d_model]) - fused_out = incubate_f.fused_feedforward(x, - linear1_weight, - linear2_weight, - linear1_bias, - linear2_bias, - ln1_scale, - ln1_bias, - ln2_scale, - ln2_bias, - 0.0, - 0.0, - activation="relu", - pre_layer_norm=False) + fused_out = incubate_f.fused_feedforward( + x, + linear1_weight, + linear2_weight, + linear1_bias, + linear2_bias, + ln1_scale, + ln1_bias, + ln2_scale, + ln2_bias, + 0.0, + 0.0, + activation="relu", + pre_layer_norm=False, + ) ######base ffn###### linear1_out = F.linear(x, linear1_weight, linear1_bias) @@ -245,21 +259,26 @@ class APITestStaticFusedFFN(unittest.TestCase): dropout1_out = F.dropout(x=act_out, p=0.0, training=False) linear2_out = F.linear(dropout1_out, linear2_weight, linear2_bias) dropout2_out = x + F.dropout(x=linear2_out, p=0.0, training=False) - ln_out = F.layer_norm(dropout2_out, - normalized_shape=list([d_model]), - weight=ln2_scale, - bias=ln2_bias) + ln_out = F.layer_norm( + dropout2_out, + normalized_shape=list([d_model]), + weight=ln2_scale, + bias=ln2_bias, + ) ######base ffn###### exe = paddle.static.Executor(paddle.CUDAPlace(0)) x_data = np.random.random( - (batch_size, d_model, dim_feedforward)).astype(dtype) + (batch_size, d_model, dim_feedforward) + ).astype(dtype) linear1_weight_data = np.random.random( - (d_model, dim_feedforward)).astype(dtype) + (d_model, dim_feedforward) + ).astype(dtype) linear1_bias_data = np.zeros((dim_feedforward)).astype(dtype) linear2_weight_data = np.random.random( - (dim_feedforward, d_model)).astype(dtype) + (dim_feedforward, d_model) + ).astype(dtype) linear2_bias_data = np.zeros((d_model)).astype(dtype) ln1_scale_data = np.ones((d_model)).astype(layer_norm_dtype) @@ -271,94 +290,92 @@ class APITestStaticFusedFFN(unittest.TestCase): real_res = [] for res in res_list: - fetch = exe.run(feed={ - 'x': x_data, - 'linear1_weight': linear1_weight_data, - 'linear1_bias': linear1_bias_data, - 'linear2_weight': linear2_weight_data, - 'linear2_bias': linear2_bias_data, - 'ln1_scale': ln1_scale_data, - 'ln1_bias': ln1_bias_data, - 'ln2_scale': ln2_scale_data, - 'ln2_bias': ln2_bias_data - }, - fetch_list=[res]) + fetch = exe.run( + feed={ + 'x': x_data, + 'linear1_weight': linear1_weight_data, + 'linear1_bias': linear1_bias_data, + 'linear2_weight': linear2_weight_data, + 'linear2_bias': linear2_bias_data, + 'ln1_scale': ln1_scale_data, + 'ln1_bias': ln1_bias_data, + 'ln2_scale': ln2_scale_data, + 'ln2_bias': ln2_bias_data, + }, + fetch_list=[res], + ) real_res.append(fetch) - np.testing.assert_allclose(real_res[0], - real_res[1], - rtol=1e-05, - atol=0.001) + np.testing.assert_allclose( + real_res[0], real_res[1], rtol=1e-05, atol=0.001 + ) class TestFusedFFNOpError(unittest.TestCase): - def test_errors(self): paddle.enable_static() - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): def test_dtype(): - x = paddle.static.data(name='x', - shape=[1, 10, 10], - dtype="int32") - linear1_weight = paddle.static.data(name='linear1_weight', - shape=[1, 10, 10], - dtype="float32") - linear2_weight = paddle.static.data(name='linear2_weight', - shape=[1, 10, 10], - dtype="float32") + x = paddle.static.data( + name='x', shape=[1, 10, 10], dtype="int32" + ) + linear1_weight = paddle.static.data( + name='linear1_weight', shape=[1, 10, 10], dtype="float32" + ) + linear2_weight = paddle.static.data( + name='linear2_weight', shape=[1, 10, 10], dtype="float32" + ) incubate_f.fused_feedforward(x, linear1_weight, linear2_weight) self.assertRaises(TypeError, test_dtype) def test_dropout_rate_type(): - x = paddle.static.data(name='x1', - shape=[1, 10, 10], - dtype="float32") - linear1_weight = paddle.static.data(name='linear1_weight1', - shape=[10, 10], - dtype="float32") - linear2_weight = paddle.static.data(name='linear2_weight1', - shape=[10, 10], - dtype="float32") - incubate_f.fused_feedforward(x, - linear1_weight, - linear2_weight, - dropout1_rate="a") + x = paddle.static.data( + name='x1', shape=[1, 10, 10], dtype="float32" + ) + linear1_weight = paddle.static.data( + name='linear1_weight1', shape=[10, 10], dtype="float32" + ) + linear2_weight = paddle.static.data( + name='linear2_weight1', shape=[10, 10], dtype="float32" + ) + incubate_f.fused_feedforward( + x, linear1_weight, linear2_weight, dropout1_rate="a" + ) self.assertRaises(TypeError, test_dropout_rate_type) def test_dropout_rate_value(): - x = paddle.static.data(name='x2', - shape=[1, 10, 10], - dtype="float32") - linear1_weight = paddle.static.data(name='linear1_weight2', - shape=[10, 10], - dtype="float32") - linear2_weight = paddle.static.data(name='linear2_weight2', - shape=[10, 10], - dtype="float32") - incubate_f.fused_feedforward(x, - linear1_weight, - linear2_weight, - dropout2_rate=-1) + x = paddle.static.data( + name='x2', shape=[1, 10, 10], dtype="float32" + ) + linear1_weight = paddle.static.data( + name='linear1_weight2', shape=[10, 10], dtype="float32" + ) + linear2_weight = paddle.static.data( + name='linear2_weight2', shape=[10, 10], dtype="float32" + ) + incubate_f.fused_feedforward( + x, linear1_weight, linear2_weight, dropout2_rate=-1 + ) self.assertRaises(ValueError, test_dropout_rate_value) def test_dropout_mode(): - x = paddle.static.data(name='x3', - shape=[1, 10, 10], - dtype="float32") - linear1_weight = paddle.static.data(name='linear1_weight3', - shape=[10, 10], - dtype="float32") - linear2_weight = paddle.static.data(name='linear2_weight3', - shape=[10, 10], - dtype="float32") - incubate_f.fused_feedforward(x, - linear1_weight, - linear2_weight, - mode='test') + x = paddle.static.data( + name='x3', shape=[1, 10, 10], dtype="float32" + ) + linear1_weight = paddle.static.data( + name='linear1_weight3', shape=[10, 10], dtype="float32" + ) + linear2_weight = paddle.static.data( + name='linear2_weight3', shape=[10, 10], dtype="float32" + ) + incubate_f.fused_feedforward( + x, linear1_weight, linear2_weight, mode='test' + ) self.assertRaises(ValueError, test_dropout_mode) diff --git a/python/paddle/fluid/tests/unittests/test_fused_gate_attention_op.py b/python/paddle/fluid/tests/unittests/test_fused_gate_attention_op.py index 2baab279462d0379ccee873aca36297e7642b782..1eed59690d88dd57db9c0c6d4cb70a6036dc0fd3 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_gate_attention_op.py +++ b/python/paddle/fluid/tests/unittests/test_fused_gate_attention_op.py @@ -28,10 +28,10 @@ from paddle import _legacy_C_ops from paddle.fluid import core -@unittest.skipIf(not core.is_compiled_with_cuda(), - "Paddle is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "Paddle is not compiled with CUDA" +) class TestFusedGateAttentionOp(OpTest): - def setUp(self): self.__class__.op_type = "fused_gate_attention" # use autograd to check grad in this unittest. @@ -55,7 +55,6 @@ class TestFusedGateAttentionOp(OpTest): self.bias_attr = True def generate_input_data(self): - def _random(shape): if self.dtype == "bfloat16": data = np.random.random(shape).astype("float32") @@ -65,7 +64,8 @@ class TestFusedGateAttentionOp(OpTest): np.random.seed(123) self.query = _random( - (self.batch_size, self.msa_len, self.res_len, self.q_dim)) + (self.batch_size, self.msa_len, self.res_len, self.q_dim) + ) self.q_weight = _random((self.q_dim, self.num_heads, self.head_dim)) self.k_weight = _random((self.kv_dim, self.num_heads, self.head_dim)) self.v_weight = _random((self.kv_dim, self.num_heads, self.head_dim)) @@ -78,15 +78,18 @@ class TestFusedGateAttentionOp(OpTest): self.qkv_weight = np.stack([q_weight_t, k_weight_t, v_weight_t]) else: self.key = _random( - (self.batch_size, self.msa_len, self.m_size, self.kv_dim)) + (self.batch_size, self.msa_len, self.m_size, self.kv_dim) + ) self.qkv_weight = None self.attn_mask = _random( - (self.batch_size, self.msa_len, 1, 1, self.m_size)) + (self.batch_size, self.msa_len, 1, 1, self.m_size) + ) if self.bias_attr: self.nonbatched_bias = _random( - (self.batch_size, 1, self.num_heads, self.res_len, self.m_size)) + (self.batch_size, 1, self.num_heads, self.res_len, self.m_size) + ) if self.has_gating: self.gating_w = _random((self.q_dim, self.num_heads, self.head_dim)) @@ -96,12 +99,17 @@ class TestFusedGateAttentionOp(OpTest): self.output_b = _random((self.out_dim)) self.dout = _random( - (self.batch_size, self.msa_len, self.res_len, self.q_dim)) + (self.batch_size, self.msa_len, self.res_len, self.q_dim) + ) def collect_outputs(self, query, key, softmax_out, fmha_out, gate_out, out): outputs = [ - softmax_out, fmha_out, gate_out if self.has_gating else None, out, - query.grad, None if self.merge_qkv else key.grad + softmax_out, + fmha_out, + gate_out if self.has_gating else None, + out, + query.grad, + None if self.merge_qkv else key.grad, ] return outputs @@ -109,14 +117,17 @@ class TestFusedGateAttentionOp(OpTest): paddle.disable_static(place=paddle.CUDAPlace(0)) query = paddle.to_tensor(self.query, stop_gradient=False) - key = query if self.merge_qkv else paddle.to_tensor(self.key, - stop_gradient=False) + key = ( + query + if self.merge_qkv + else paddle.to_tensor(self.key, stop_gradient=False) + ) q_weight = paddle.to_tensor(self.q_weight, stop_gradient=False) k_weight = paddle.to_tensor(self.k_weight, stop_gradient=False) v_weight = paddle.to_tensor(self.v_weight, stop_gradient=False) src_mask = paddle.to_tensor(self.attn_mask, stop_gradient=True) - c = self.head_dim**(-0.5) + c = self.head_dim ** (-0.5) # [batch_size, msa_len, res_len, q_dim], [q_dim, num_heads, head_dim] # -> [batch_size, msa_len, res_len, num_heads, head_dim] q = paddle.einsum('nbqa,ahc->nbqhc', query, q_weight) * c @@ -134,8 +145,9 @@ class TestFusedGateAttentionOp(OpTest): # -> [batch_size, msa_len, num_heads, res_len, m_size] logits = logits + src_mask if self.bias_attr: - nonbatched_bias = paddle.to_tensor(self.nonbatched_bias, - stop_gradient=False) + nonbatched_bias = paddle.to_tensor( + self.nonbatched_bias, stop_gradient=False + ) # [batch_size, msa_len, num_heads, res_len, m_size], [batch_size, 1, num_heads, res_len, m_size] # -> [batch_size, msa_len, num_heads, res_len, m_size] logits = logits + nonbatched_bias @@ -157,14 +169,22 @@ class TestFusedGateAttentionOp(OpTest): # gate_values = paddle.einsum('nbqc,chv->nbqhv', query, # gating_w) + gating_b gating_w_2d = paddle.reshape( - gating_w, shape=[self.q_dim, self.num_heads * self.head_dim]) + gating_w, shape=[self.q_dim, self.num_heads * self.head_dim] + ) gate_values_4d = paddle.matmul(query, gating_w_2d) - gate_values = paddle.reshape( - gate_values_4d, - shape=[ - self.batch_size, self.msa_len, self.res_len, self.num_heads, - self.head_dim - ]) + gating_b + gate_values = ( + paddle.reshape( + gate_values_4d, + shape=[ + self.batch_size, + self.msa_len, + self.res_len, + self.num_heads, + self.head_dim, + ], + ) + + gating_b + ) gate_values = nn.functional.sigmoid(gate_values) gate_out = fmha_out * gate_values else: @@ -181,20 +201,32 @@ class TestFusedGateAttentionOp(OpTest): gate_out, shape=[ self.batch_size * self.msa_len * self.res_len, - self.num_heads * self.head_dim - ]) + self.num_heads * self.head_dim, + ], + ) output_w_2d = paddle.reshape( - output_w, shape=[self.num_heads * self.head_dim, self.out_dim]) + output_w, shape=[self.num_heads * self.head_dim, self.out_dim] + ) out_2d = paddle.matmul(gate_out_2d, output_w_2d) - out = paddle.reshape( - out_2d, - shape=[self.batch_size, self.msa_len, self.res_len, self.out_dim - ]) + output_b - - paddle.autograd.backward([out], [paddle.to_tensor(self.dout)], - retain_graph=True) - return self.collect_outputs(query, key, softmax_out, fmha_out, gate_out, - out) + out = ( + paddle.reshape( + out_2d, + shape=[ + self.batch_size, + self.msa_len, + self.res_len, + self.out_dim, + ], + ) + + output_b + ) + + paddle.autograd.backward( + [out], [paddle.to_tensor(self.dout)], retain_graph=True + ) + return self.collect_outputs( + query, key, softmax_out, fmha_out, gate_out, out + ) def get_fused_gate_attention_out(self): paddle.disable_static(place=paddle.CUDAPlace(0)) @@ -216,8 +248,9 @@ class TestFusedGateAttentionOp(OpTest): src_mask = paddle.to_tensor(self.attn_mask, stop_gradient=True) if self.bias_attr: - nonbatched_bias = paddle.to_tensor(self.nonbatched_bias, - stop_gradient=False) + nonbatched_bias = paddle.to_tensor( + self.nonbatched_bias, stop_gradient=False + ) else: nonbatched_bias = None if self.has_gating: @@ -230,18 +263,42 @@ class TestFusedGateAttentionOp(OpTest): output_w = paddle.to_tensor(self.output_w, stop_gradient=False) output_b = paddle.to_tensor(self.output_b, stop_gradient=False) - _, _, _, _, softmax_out, fmha_out, gate_out, out = _legacy_C_ops.fused_gate_attention( - query, key, q_weight, k_weight, v_weight, qkv_weight, - nonbatched_bias, src_mask, gating_w, gating_b, output_w, output_b, - 'has_gating', self.has_gating, 'merge_qkv', self.merge_qkv) - - paddle.autograd.backward([out], [paddle.to_tensor(self.dout)], - retain_graph=True) - return self.collect_outputs(query, key, softmax_out, fmha_out, gate_out, - out) + ( + _, + _, + _, + _, + softmax_out, + fmha_out, + gate_out, + out, + ) = _legacy_C_ops.fused_gate_attention( + query, + key, + q_weight, + k_weight, + v_weight, + qkv_weight, + nonbatched_bias, + src_mask, + gating_w, + gating_b, + output_w, + output_b, + 'has_gating', + self.has_gating, + 'merge_qkv', + self.merge_qkv, + ) + + paddle.autograd.backward( + [out], [paddle.to_tensor(self.dout)], retain_graph=True + ) + return self.collect_outputs( + query, key, softmax_out, fmha_out, gate_out, out + ) def check(self, ref, out, atol, rtol, check_equal, name): - def _convert(value): if self.dtype == "bfloat16": return convert_uint16_to_float(value) @@ -250,19 +307,25 @@ class TestFusedGateAttentionOp(OpTest): if check_equal: self.assertTrue( np.equal(_convert(ref), _convert(out)).all(), - "Checking < {} > failed!".format(name)) + "Checking < {} > failed!".format(name), + ) else: np.testing.assert_allclose( _convert(ref), _convert(out), atol=atol, rtol=rtol, - err_msg="Checking < {} > failed!".format(name)) + err_msg="Checking < {} > failed!".format(name), + ) def check_output_and_grad(self, atol, rtol): output_names = [ - "softmax_out", "fmha_out", "gate_out", "out", "query_grad", - "key_grad" + "softmax_out", + "fmha_out", + "gate_out", + "out", + "query_grad", + "key_grad", ] outputs_ref = self.get_reference_out() outputs_fused = self.get_fused_gate_attention_out() @@ -278,22 +341,26 @@ class TestFusedGateAttentionOp(OpTest): # that in fused ops, check_equal is set to False and we use allclose # to check the correctness. check_equal = False - self.check(ref_res.numpy(), fused_res.numpy(), atol, rtol, - check_equal, output_names[i]) + self.check( + ref_res.numpy(), + fused_res.numpy(), + atol, + rtol, + check_equal, + output_names[i], + ) def test_output_and_grad(self): self.check_output_and_grad(atol=1e-5, rtol=1e-6) class TestMergeQKVLargeBatchSizeCase(TestFusedGateAttentionOp): - def config(self): super().config() self.batch_size = 2 class TestSeparatedQKVCase(TestFusedGateAttentionOp): - def config(self): self.dtype = "float32" self.has_gating = False @@ -310,7 +377,6 @@ class TestSeparatedQKVCase(TestFusedGateAttentionOp): class TestMergeQKVNoBiasGatingCase(TestFusedGateAttentionOp): - def config(self): super().config() self.has_gating = False @@ -318,7 +384,6 @@ class TestMergeQKVNoBiasGatingCase(TestFusedGateAttentionOp): class TestMergeQKVFp16Case(TestFusedGateAttentionOp): - def config(self): super().config() self.dtype = "float16" @@ -330,7 +395,6 @@ class TestMergeQKVFp16Case(TestFusedGateAttentionOp): class TestMergeQKVLargeBatchSizeFp16Case(TestMergeQKVFp16Case): - def config(self): super().config() self.batch_size = 2 @@ -338,10 +402,9 @@ class TestMergeQKVLargeBatchSizeFp16Case(TestMergeQKVFp16Case): @unittest.skipIf( not core.is_compiled_with_cuda() or get_cuda_version() < 11000, - "core is not compiled with CUDA and cuda version need larger than or equal to 11.3" + "core is not compiled with CUDA and cuda version need larger than or equal to 11.3", ) class TestMergeQKVBF16Case(TestFusedGateAttentionOp): - def config(self): super().config() self.dtype = "bfloat16" @@ -351,7 +414,6 @@ class TestMergeQKVBF16Case(TestFusedGateAttentionOp): class TestMergeQKVLargeBatchSizeBF16Case(TestMergeQKVBF16Case): - def config(self): super().config() self.batch_size = 2 diff --git a/python/paddle/fluid/tests/unittests/test_fused_gemm_epilogue_grad_op.py b/python/paddle/fluid/tests/unittests/test_fused_gemm_epilogue_grad_op.py index ceefa72631c2ef3a0cb373905508b5d8d643131f..322e63dd4fd45ac3dd2f6107b9546ba868faf405 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_gemm_epilogue_grad_op.py +++ b/python/paddle/fluid/tests/unittests/test_fused_gemm_epilogue_grad_op.py @@ -29,10 +29,10 @@ def get_outputs(DOut, X, Y): @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueGradOpDXYBiasFP16(OpTest): - def setUp(self): self.op_type = "fused_gemm_epilogue_grad" self.place = core.CUDAPlace(0) @@ -41,13 +41,14 @@ class TestFuseGemmEpilogueGradOpDXYBiasFP16(OpTest): self.inputs = { 'DOut': np.random.random((8, 128)).astype(self.dtype) - 0.5, 'X': np.random.random((8, 4)).astype(self.dtype) - 0.5, - 'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5 + 'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5, } self.attrs = {"activation": 'none'} - DX, DY, DBias = get_outputs(self.inputs['DOut'], self.inputs['X'], - self.inputs['Y']) + DX, DY, DBias = get_outputs( + self.inputs['DOut'], self.inputs['X'], self.inputs['Y'] + ) self.outputs = {'DX': DX, 'DY': DY, 'DBias': DBias} def init_dtype_type(self): @@ -56,38 +57,41 @@ class TestFuseGemmEpilogueGradOpDXYBiasFP16(OpTest): def test_check_output(self): if self.dtype == np.float16 and not core.is_float16_supported( - self.place): + self.place + ): return self.check_output_with_place(self.place, atol=self.atol) @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueGradOpDXYBiasFP32( - TestFuseGemmEpilogueGradOpDXYBiasFP16): - + TestFuseGemmEpilogueGradOpDXYBiasFP16 +): def init_dtype_type(self): self.dtype = np.single self.atol = 1e-6 @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueGradOpDXYBiasFP64( - TestFuseGemmEpilogueGradOpDXYBiasFP16): - + TestFuseGemmEpilogueGradOpDXYBiasFP16 +): def init_dtype_type(self): self.dtype = np.double self.atol = 1e-6 @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueGradOpDYBiasFP16(OpTest): - def setUp(self): self.op_type = "fused_gemm_epilogue_grad" self.place = core.CUDAPlace(0) @@ -96,13 +100,14 @@ class TestFuseGemmEpilogueGradOpDYBiasFP16(OpTest): self.inputs = { 'DOut': np.random.random((8, 128)).astype(self.dtype) - 0.5, 'X': np.random.random((8, 4)).astype(self.dtype) - 0.5, - 'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5 + 'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5, } self.attrs = {"activation": 'none'} - _, DY, DBias = get_outputs(self.inputs['DOut'], self.inputs['X'], - self.inputs['Y']) + _, DY, DBias = get_outputs( + self.inputs['DOut'], self.inputs['X'], self.inputs['Y'] + ) self.outputs = {'DY': DY, 'DBias': DBias} def init_dtype_type(self): @@ -111,38 +116,41 @@ class TestFuseGemmEpilogueGradOpDYBiasFP16(OpTest): def test_check_output(self): if self.dtype == np.float16 and not core.is_float16_supported( - self.place): + self.place + ): return self.check_output_with_place(self.place, atol=self.atol) @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") -class TestFuseGemmEpilogueGradOpDYBiasFP32(TestFuseGemmEpilogueGradOpDYBiasFP16 - ): - +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) +class TestFuseGemmEpilogueGradOpDYBiasFP32( + TestFuseGemmEpilogueGradOpDYBiasFP16 +): def init_dtype_type(self): self.dtype = np.single self.atol = 1e-6 @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") -class TestFuseGemmEpilogueGradOpDYBiasFP64(TestFuseGemmEpilogueGradOpDYBiasFP16 - ): - +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) +class TestFuseGemmEpilogueGradOpDYBiasFP64( + TestFuseGemmEpilogueGradOpDYBiasFP16 +): def init_dtype_type(self): self.dtype = np.double self.atol = 1e-6 @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueGradOpDYFP16(OpTest): - def setUp(self): self.op_type = "fused_gemm_epilogue_grad" self.place = core.CUDAPlace(0) @@ -151,13 +159,14 @@ class TestFuseGemmEpilogueGradOpDYFP16(OpTest): self.inputs = { 'DOut': np.random.random((8, 128)).astype(self.dtype) - 0.5, 'X': np.random.random((8, 4)).astype(self.dtype) - 0.5, - 'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5 + 'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5, } self.attrs = {"activation": 'none'} - _, DY, _ = get_outputs(self.inputs['DOut'], self.inputs['X'], - self.inputs['Y']) + _, DY, _ = get_outputs( + self.inputs['DOut'], self.inputs['X'], self.inputs['Y'] + ) self.outputs = {'DY': DY} def init_dtype_type(self): @@ -166,36 +175,37 @@ class TestFuseGemmEpilogueGradOpDYFP16(OpTest): def test_check_output(self): if self.dtype == np.float16 and not core.is_float16_supported( - self.place): + self.place + ): return self.check_output_with_place(self.place, atol=self.atol) @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueGradOpDYFP32(TestFuseGemmEpilogueGradOpDYFP16): - def init_dtype_type(self): self.dtype = np.single self.atol = 1e-6 @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueGradOpDYFP64(TestFuseGemmEpilogueGradOpDYFP16): - def init_dtype_type(self): self.dtype = np.double self.atol = 1e-6 @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueGradOpDXYFP16(OpTest): - def setUp(self): self.op_type = "fused_gemm_epilogue_grad" self.place = core.CUDAPlace(0) @@ -204,13 +214,14 @@ class TestFuseGemmEpilogueGradOpDXYFP16(OpTest): self.inputs = { 'DOut': np.random.random((8, 128)).astype(self.dtype) - 0.5, 'X': np.random.random((8, 4)).astype(self.dtype) - 0.5, - 'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5 + 'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5, } self.attrs = {"activation": 'none'} - DX, DY, _ = get_outputs(self.inputs['DOut'], self.inputs['X'], - self.inputs['Y']) + DX, DY, _ = get_outputs( + self.inputs['DOut'], self.inputs['X'], self.inputs['Y'] + ) self.outputs = {'DX': DX, 'DY': DY} def init_dtype_type(self): @@ -219,26 +230,27 @@ class TestFuseGemmEpilogueGradOpDXYFP16(OpTest): def test_check_output(self): if self.dtype == np.float16 and not core.is_float16_supported( - self.place): + self.place + ): return self.check_output_with_place(self.place, atol=self.atol) @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueGradOpDXYFP32(TestFuseGemmEpilogueGradOpDXYFP16): - def init_dtype_type(self): self.dtype = np.single self.atol = 1e-6 @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueGradOpDXYFP64(TestFuseGemmEpilogueGradOpDXYFP16): - def init_dtype_type(self): self.dtype = np.double self.atol = 1e-6 diff --git a/python/paddle/fluid/tests/unittests/test_fused_gemm_epilogue_op.py b/python/paddle/fluid/tests/unittests/test_fused_gemm_epilogue_op.py index 66ea9d7fa833ca48d3ea6fe7d8bf81c159c54211..a3af9cc194f73b3c1e902b433c6c5c6fecdec68b 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_gemm_epilogue_op.py +++ b/python/paddle/fluid/tests/unittests/test_fused_gemm_epilogue_op.py @@ -28,8 +28,11 @@ def is_fused_gemm_epilogue_supported(): def gelu(x): - y_ref = 0.5 * x * ( - 1.0 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3)))) + y_ref = ( + 0.5 + * x + * (1.0 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3)))) + ) return y_ref.astype(x.dtype) @@ -54,10 +57,10 @@ class TestFuseGemmBase(OpTest): @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueOpReluMMFP16(TestFuseGemmBase): - def setUp(self): self.op_type = "fused_gemm_epilogue" self.place = core.CUDAPlace(0) @@ -66,12 +69,12 @@ class TestFuseGemmEpilogueOpReluMMFP16(TestFuseGemmBase): self.inputs = { 'X': np.random.random((8, 4)).astype(self.dtype) - 0.5, 'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5, - 'Bias': np.random.random((128, )).astype(self.dtype) - 0.5 + 'Bias': np.random.random((128,)).astype(self.dtype) - 0.5, } self.outputs = { - 'Out': - get_output(self.inputs['X'], self.inputs['Y'], self.inputs['Bias'], - 'relu') + 'Out': get_output( + self.inputs['X'], self.inputs['Y'], self.inputs['Bias'], 'relu' + ) } self.attrs = {"activation": 'relu'} @@ -81,36 +84,37 @@ class TestFuseGemmEpilogueOpReluMMFP16(TestFuseGemmBase): def test_check_output(self): if self.dtype == np.float16 and not core.is_float16_supported( - self.place): + self.place + ): return self.check_output_with_place(self.place, atol=self.atol) @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueOpReluMMFP32(TestFuseGemmEpilogueOpReluMMFP16): - def init_dtype_type(self): self.dtype = np.single self.atol = 1e-6 @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueOpReluMMFP64(TestFuseGemmEpilogueOpReluMMFP16): - def init_dtype_type(self): self.dtype = np.double self.atol = 1e-6 @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueOpReluMTMFP16(TestFuseGemmBase): - def setUp(self): self.op_type = "fused_gemm_epilogue" self.place = core.CUDAPlace(0) @@ -119,12 +123,15 @@ class TestFuseGemmEpilogueOpReluMTMFP16(TestFuseGemmBase): self.inputs = { 'X': np.random.random((4, 8)).astype(self.dtype) - 0.5, 'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5, - 'Bias': np.random.random((128, )).astype(self.dtype) - 0.5 + 'Bias': np.random.random((128,)).astype(self.dtype) - 0.5, } self.outputs = { - 'Out': - get_output(self.inputs['X'].T, self.inputs['Y'], - self.inputs['Bias'], 'relu') + 'Out': get_output( + self.inputs['X'].T, + self.inputs['Y'], + self.inputs['Bias'], + 'relu', + ) } self.attrs = {'trans_x': True, "activation": 'relu'} @@ -134,36 +141,37 @@ class TestFuseGemmEpilogueOpReluMTMFP16(TestFuseGemmBase): def test_check_output(self): if self.dtype == np.float16 and not core.is_float16_supported( - self.place): + self.place + ): return self.check_output_with_place(self.place, atol=self.atol) @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueOpReluMTMFP32(TestFuseGemmEpilogueOpReluMTMFP16): - def init_dtype_type(self): self.dtype = np.single self.atol = 1e-6 @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueOpReluMTMFP64(TestFuseGemmEpilogueOpReluMTMFP16): - def init_dtype_type(self): self.dtype = np.double self.atol = 1e-6 @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueOpReluMMTFP16(TestFuseGemmBase): - def setUp(self): self.op_type = "fused_gemm_epilogue" self.place = core.CUDAPlace(0) @@ -172,12 +180,15 @@ class TestFuseGemmEpilogueOpReluMMTFP16(TestFuseGemmBase): self.inputs = { 'X': np.random.random((8, 4)).astype(self.dtype) - 0.5, 'Y': np.random.random((128, 4)).astype(self.dtype) - 0.5, - 'Bias': np.random.random((128, )).astype(self.dtype) - 0.5 + 'Bias': np.random.random((128,)).astype(self.dtype) - 0.5, } self.outputs = { - 'Out': - get_output(self.inputs['X'], self.inputs['Y'].T, - self.inputs['Bias'], 'relu') + 'Out': get_output( + self.inputs['X'], + self.inputs['Y'].T, + self.inputs['Bias'], + 'relu', + ) } self.attrs = {'trans_y': True, "activation": 'relu'} @@ -187,36 +198,37 @@ class TestFuseGemmEpilogueOpReluMMTFP16(TestFuseGemmBase): def test_check_output(self): if self.dtype == np.float16 and not core.is_float16_supported( - self.place): + self.place + ): return self.check_output_with_place(self.place, atol=self.atol) @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueOpReluMMTFP32(TestFuseGemmEpilogueOpReluMMTFP16): - def init_dtype_type(self): self.dtype = np.single self.atol = 1e-6 @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueOpReluMMTFP64(TestFuseGemmEpilogueOpReluMMTFP16): - def init_dtype_type(self): self.dtype = np.double self.atol = 1e-6 @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueOpReluMTMTFP16(TestFuseGemmBase): - def setUp(self): self.op_type = "fused_gemm_epilogue" self.place = core.CUDAPlace(0) @@ -225,12 +237,15 @@ class TestFuseGemmEpilogueOpReluMTMTFP16(TestFuseGemmBase): self.inputs = { 'X': np.random.random((4, 8)).astype(self.dtype) - 0.5, 'Y': np.random.random((128, 4)).astype(self.dtype) - 0.5, - 'Bias': np.random.random((128, )).astype(self.dtype) - 0.5 + 'Bias': np.random.random((128,)).astype(self.dtype) - 0.5, } self.outputs = { - 'Out': - get_output(self.inputs['X'].T, self.inputs['Y'].T, - self.inputs['Bias'], 'relu') + 'Out': get_output( + self.inputs['X'].T, + self.inputs['Y'].T, + self.inputs['Bias'], + 'relu', + ) } self.attrs = {'trans_x': True, 'trans_y': True, "activation": 'relu'} @@ -240,36 +255,37 @@ class TestFuseGemmEpilogueOpReluMTMTFP16(TestFuseGemmBase): def test_check_output(self): if self.dtype == np.float16 and not core.is_float16_supported( - self.place): + self.place + ): return self.check_output_with_place(self.place, atol=self.atol) @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueOpReluMTMTFP32(TestFuseGemmEpilogueOpReluMTMTFP16): - def init_dtype_type(self): self.dtype = np.single self.atol = 1e-6 @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueOpReluMTMTFP64(TestFuseGemmEpilogueOpReluMTMTFP16): - def init_dtype_type(self): self.dtype = np.double self.atol = 1e-6 @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueOpReluMMFP16MultiDimX(TestFuseGemmBase): - def setUp(self): self.op_type = "fused_gemm_epilogue" self.place = core.CUDAPlace(0) @@ -278,12 +294,15 @@ class TestFuseGemmEpilogueOpReluMMFP16MultiDimX(TestFuseGemmBase): self.inputs = { 'X': np.random.random((2, 2, 8, 4)).astype(self.dtype) - 0.5, 'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5, - 'Bias': np.random.random((128, )).astype(self.dtype) - 0.5 + 'Bias': np.random.random((128,)).astype(self.dtype) - 0.5, } self.outputs = { - 'Out': - get_output(self.inputs['X'].reshape((-1, 4)), self.inputs['Y'], - self.inputs['Bias'], 'relu').reshape((2, 2, 8, 128)) + 'Out': get_output( + self.inputs['X'].reshape((-1, 4)), + self.inputs['Y'], + self.inputs['Bias'], + 'relu', + ).reshape((2, 2, 8, 128)) } self.attrs = {"activation": 'relu'} @@ -293,38 +312,41 @@ class TestFuseGemmEpilogueOpReluMMFP16MultiDimX(TestFuseGemmBase): def test_check_output(self): if self.dtype == np.float16 and not core.is_float16_supported( - self.place): + self.place + ): return self.check_output_with_place(self.place, atol=self.atol) @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueOpReluMMFP32MultiDimX( - TestFuseGemmEpilogueOpReluMMFP16MultiDimX): - + TestFuseGemmEpilogueOpReluMMFP16MultiDimX +): def init_dtype_type(self): self.dtype = np.single self.atol = 1e-6 @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueOpReluMMFP64MultiDimX( - TestFuseGemmEpilogueOpReluMMFP16MultiDimX): - + TestFuseGemmEpilogueOpReluMMFP16MultiDimX +): def init_dtype_type(self): self.dtype = np.double self.atol = 1e-6 @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueOpReluMTMFP16MultiDimX(TestFuseGemmBase): - def setUp(self): self.op_type = "fused_gemm_epilogue" self.place = core.CUDAPlace(0) @@ -333,12 +355,15 @@ class TestFuseGemmEpilogueOpReluMTMFP16MultiDimX(TestFuseGemmBase): self.inputs = { 'X': np.random.random((4, 2, 2, 8)).astype(self.dtype) - 0.5, 'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5, - 'Bias': np.random.random((128, )).astype(self.dtype) - 0.5 + 'Bias': np.random.random((128,)).astype(self.dtype) - 0.5, } self.outputs = { - 'Out': - get_output(self.inputs['X'].reshape((4, -1)).T, self.inputs['Y'], - self.inputs['Bias'], 'relu').reshape((2, 2, 8, 128)) + 'Out': get_output( + self.inputs['X'].reshape((4, -1)).T, + self.inputs['Y'], + self.inputs['Bias'], + 'relu', + ).reshape((2, 2, 8, 128)) } self.attrs = {'trans_x': True, "activation": 'relu'} @@ -348,38 +373,41 @@ class TestFuseGemmEpilogueOpReluMTMFP16MultiDimX(TestFuseGemmBase): def test_check_output(self): if self.dtype == np.float16 and not core.is_float16_supported( - self.place): + self.place + ): return self.check_output_with_place(self.place, atol=self.atol) @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueOpReluMTMFP32MultiDimX( - TestFuseGemmEpilogueOpReluMTMFP16MultiDimX): - + TestFuseGemmEpilogueOpReluMTMFP16MultiDimX +): def init_dtype_type(self): self.dtype = np.single self.atol = 1e-6 @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueOpReluMTMFP64MultiDimX( - TestFuseGemmEpilogueOpReluMTMFP16MultiDimX): - + TestFuseGemmEpilogueOpReluMTMFP16MultiDimX +): def init_dtype_type(self): self.dtype = np.double self.atol = 1e-6 @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueOpGeluMMFP16(TestFuseGemmBase): - def setUp(self): self.op_type = "fused_gemm_epilogue" self.place = core.CUDAPlace(0) @@ -388,15 +416,15 @@ class TestFuseGemmEpilogueOpGeluMMFP16(TestFuseGemmBase): self.inputs = { 'X': np.random.random((8, 4)).astype(self.dtype) - 0.5, 'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5, - 'Bias': np.random.random((128, )).astype(self.dtype) - 0.5 + 'Bias': np.random.random((128,)).astype(self.dtype) - 0.5, } self.attrs = {"activation": 'gelu'} self.outputs = { - 'Out': - get_output(self.inputs['X'], self.inputs['Y'], self.inputs['Bias'], - 'gelu') + 'Out': get_output( + self.inputs['X'], self.inputs['Y'], self.inputs['Bias'], 'gelu' + ) } def init_dtype_type(self): @@ -405,36 +433,37 @@ class TestFuseGemmEpilogueOpGeluMMFP16(TestFuseGemmBase): def test_check_output(self): if self.dtype == np.float16 and not core.is_float16_supported( - self.place): + self.place + ): return self.check_output_with_place(self.place, atol=self.atol) @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueOpGeluMMFP32(TestFuseGemmEpilogueOpGeluMMFP16): - def init_dtype_type(self): self.dtype = np.single self.atol = 1e-6 @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueOpGeluMMFP64(TestFuseGemmEpilogueOpGeluMMFP16): - def init_dtype_type(self): self.dtype = np.double self.atol = 1e-6 @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueOpNoneMMFP16(TestFuseGemmBase): - def setUp(self): self.op_type = "fused_gemm_epilogue" self.place = core.CUDAPlace(0) @@ -443,15 +472,15 @@ class TestFuseGemmEpilogueOpNoneMMFP16(TestFuseGemmBase): self.inputs = { 'X': np.random.random((8, 4)).astype(self.dtype) - 0.5, 'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5, - 'Bias': np.random.random((128, )).astype(self.dtype) - 0.5 + 'Bias': np.random.random((128,)).astype(self.dtype) - 0.5, } self.attrs = {"activation": 'none'} self.outputs = { - 'Out': - get_output(self.inputs['X'], self.inputs['Y'], self.inputs['Bias'], - 'none') + 'Out': get_output( + self.inputs['X'], self.inputs['Y'], self.inputs['Bias'], 'none' + ) } def init_dtype_type(self): @@ -460,26 +489,27 @@ class TestFuseGemmEpilogueOpNoneMMFP16(TestFuseGemmBase): def test_check_output(self): if self.dtype == np.float16 and not core.is_float16_supported( - self.place): + self.place + ): return self.check_output_with_place(self.place, atol=self.atol) @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueOpNoneMMFP32(TestFuseGemmEpilogueOpNoneMMFP16): - def init_dtype_type(self): self.dtype = np.single self.atol = 1e-6 @skip_check_grad_ci(reason="no grap op") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFuseGemmEpilogueOpNoneMMFP64(TestFuseGemmEpilogueOpNoneMMFP16): - def init_dtype_type(self): self.dtype = np.double self.atol = 1e-6 @@ -522,9 +552,9 @@ def matmul_grad(x, y, bias, dz, trans_x, trans_y): @unittest.skipIf( not is_fused_gemm_epilogue_supported(), - "fused_gemm_epilogue is only supported when CUDA version >= 11.6") + "fused_gemm_epilogue is only supported when CUDA version >= 11.6", +) class TestEagerFusedGemmEpilogue(unittest.TestCase): - def setUp(self): paddle.set_device('gpu') @@ -532,22 +562,22 @@ class TestEagerFusedGemmEpilogue(unittest.TestCase): paddle.disable_static() x_np = np.random.random((8, 4)).astype(np.float64) - 0.5 y_np = np.random.random((4, 128)).astype(np.float64) - 0.5 - bias_np = np.random.random((128, )).astype(np.float64) - 0.5 + bias_np = np.random.random((128,)).astype(np.float64) - 0.5 x = paddle.to_tensor(x_np) y = paddle.to_tensor(y_np) bias = paddle.to_tensor(bias_np) x.stop_gradient = False y.stop_gradient = False - out1 = core.eager.ops.fused_gemm_epilogue(x, y, bias, 'trans_x', False, - 'trans_y', False, - 'activation', 'none') - out2 = core.eager.ops.fused_gemm_epilogue(x, y, bias, 'trans_x', False, - 'trans_y', False, - 'activation', 'relu') - out3 = core.eager.ops.fused_gemm_epilogue(x, y, bias, 'trans_x', False, - 'trans_y', False, - 'activation', 'gelu') + out1 = core.eager.ops.fused_gemm_epilogue( + x, y, bias, 'trans_x', False, 'trans_y', False, 'activation', 'none' + ) + out2 = core.eager.ops.fused_gemm_epilogue( + x, y, bias, 'trans_x', False, 'trans_y', False, 'activation', 'relu' + ) + out3 = core.eager.ops.fused_gemm_epilogue( + x, y, bias, 'trans_x', False, 'trans_y', False, 'activation', 'gelu' + ) out_np1 = get_output(x_np, y_np, bias_np, 'none') out_np2 = get_output(x_np, y_np, bias_np, 'relu') @@ -557,13 +587,16 @@ class TestEagerFusedGemmEpilogue(unittest.TestCase): np.testing.assert_allclose(out2, out_np2, rtol=1e-05) np.testing.assert_allclose(out3, out_np3, rtol=1e-05) - out_grad_np1 = np.random.randint(low=-20, high=20, - size=out_np1.shape).astype(np.float64) - paddle.autograd.backward(out1, - grad_tensors=[paddle.to_tensor(out_grad_np1)]) + out_grad_np1 = np.random.randint( + low=-20, high=20, size=out_np1.shape + ).astype(np.float64) + paddle.autograd.backward( + out1, grad_tensors=[paddle.to_tensor(out_grad_np1)] + ) x_grad_np, y_grad_np, bias_grad_np = matmul_grad( - x_np, y_np, bias_np, out_grad_np1, False, False) + x_np, y_np, bias_np, out_grad_np1, False, False + ) np.testing.assert_allclose(x.grad.numpy(), x_grad_np, rtol=1e-05) self.assertEqual(y_grad_np.shape, y_np.shape) np.testing.assert_allclose(y.grad.numpy(), y_grad_np, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_fused_matmul_bias.py b/python/paddle/fluid/tests/unittests/test_fused_matmul_bias.py index b26b5e2f04634e5d9c1a5c731b9193230f8bbc04..a4d152c75b997a5e0f2360bb74ba06c46b40120d 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_matmul_bias.py +++ b/python/paddle/fluid/tests/unittests/test_fused_matmul_bias.py @@ -64,9 +64,9 @@ def matmul_grad(x, y, bias, dz, trans_x, trans_y): @unittest.skipIf( not is_fused_matmul_bias_supported(), - "fused_gemm_epilogue is only supported when CUDA version >= 11.6") + "fused_gemm_epilogue is only supported when CUDA version >= 11.6", +) class TestFusedMatmulBias(unittest.TestCase): - def setUp(self): paddle.set_device('gpu') @@ -103,7 +103,8 @@ class TestFusedMatmulBias(unittest.TestCase): paddle.autograd.backward(z, grad_tensors=[paddle.to_tensor(z_grad_np)]) x_grad_np, y_grad_np, bias_grad_np = matmul_grad( - x_np, y_np, bias_np, z_grad_np, trans_x, trans_y) + x_np, y_np, bias_np, z_grad_np, trans_x, trans_y + ) np.testing.assert_array_equal(x.grad.numpy(), x_grad_np) self.assertEqual(y_grad_np.shape, y_np.shape) np.testing.assert_array_equal(y.grad.numpy(), y_grad_np) @@ -118,8 +119,9 @@ class TestFusedMatmulBias(unittest.TestCase): for trans_x in [False, True]: for trans_y in [False, True]: for need_bias in [False, True]: - self.rand_test_base(m, n, k, trans_x, trans_y, need_bias, - dtype, seed) + self.rand_test_base( + m, n, k, trans_x, trans_y, need_bias, dtype, seed + ) def test_fp32(self): self.rand_test(30, 40, 50, np.float32) @@ -130,9 +132,9 @@ class TestFusedMatmulBias(unittest.TestCase): @unittest.skipIf( not is_fused_matmul_bias_supported(), - "fused_gemm_epilogue is only supported when CUDA version >= 11.6") + "fused_gemm_epilogue is only supported when CUDA version >= 11.6", +) class TestFusedLinear(unittest.TestCase): - def check_fused_linear(self, transpose): x = paddle.randn([30, 40]) linear = FusedLinear(40, 50, transpose_weight=transpose) @@ -149,9 +151,9 @@ class TestFusedLinear(unittest.TestCase): @unittest.skipIf( not is_fused_matmul_bias_supported(), - "fused_gemm_epilogue is only supported when CUDA version >= 11.6") + "fused_gemm_epilogue is only supported when CUDA version >= 11.6", +) class TestStaticGraph(unittest.TestCase): - def test_static_graph(self): paddle.enable_static() x = paddle.static.data(name='x', dtype='float32', shape=[-1, 100]) diff --git a/python/paddle/fluid/tests/unittests/test_fused_multi_transformer_int8_op.py b/python/paddle/fluid/tests/unittests/test_fused_multi_transformer_int8_op.py index 3384e86668eaee9cb1b0ca452aedcefa02a494f3..e3fb41412fd3900d26a942aee130afe65d943b01 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_multi_transformer_int8_op.py +++ b/python/paddle/fluid/tests/unittests/test_fused_multi_transformer_int8_op.py @@ -68,27 +68,69 @@ def fused_multi_transformer_int8( ffn1_in_scale=[], ffn2_in_scale=[], ): - mode = 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode #semantic transfer + mode = ( + 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode + ) # semantic transfer cache_kv_out, final_out = _legacy_C_ops.fused_multi_transformer_int8( - x, ln_scales, ln_biases, qkv_weights, qkv_biases, cache_kvs, time_step, - attn_mask, linear_weights, linear_biases, ffn_ln_scales, ffn_ln_biases, - ffn1_weights, ffn1_biases, ffn2_weights, ffn2_biases, qkv_out_scales, - out_linear_out_scales, ffn1_out_scales, ffn2_out_scales, cache_kvs, - 'num_head', num_head, 'dim_head', dim_head, 'dim_ffn', dim_ffn, - 'qkv_in_scale', qkv_in_scale, 'out_linear_in_scale', - out_linear_in_scale, 'ffn1_in_scale', ffn1_in_scale, 'ffn2_in_scale', - ffn2_in_scale, 'pre_layer_norm', pre_layer_norm, 'epsilon', epsilon, - 'dropout_rate', dropout_rate, 'is_test', not training, - 'dropout_implementation', mode, 'act_method', activation, 'trans_qkvw', - trans_qkvw, 'ring_id', ring_id) + x, + ln_scales, + ln_biases, + qkv_weights, + qkv_biases, + cache_kvs, + time_step, + attn_mask, + linear_weights, + linear_biases, + ffn_ln_scales, + ffn_ln_biases, + ffn1_weights, + ffn1_biases, + ffn2_weights, + ffn2_biases, + qkv_out_scales, + out_linear_out_scales, + ffn1_out_scales, + ffn2_out_scales, + cache_kvs, + 'num_head', + num_head, + 'dim_head', + dim_head, + 'dim_ffn', + dim_ffn, + 'qkv_in_scale', + qkv_in_scale, + 'out_linear_in_scale', + out_linear_in_scale, + 'ffn1_in_scale', + ffn1_in_scale, + 'ffn2_in_scale', + ffn2_in_scale, + 'pre_layer_norm', + pre_layer_norm, + 'epsilon', + epsilon, + 'dropout_rate', + dropout_rate, + 'is_test', + not training, + 'dropout_implementation', + mode, + 'act_method', + activation, + 'trans_qkvw', + trans_qkvw, + 'ring_id', + ring_id, + ) if cache_kvs is not None: return final_out, cache_kv_out return final_out class TestFusedMultiTransformerInt8Op(unittest.TestCase): - def setUp(self): self.config() self.generate_input_data() @@ -110,12 +152,12 @@ class TestFusedMultiTransformerInt8Op(unittest.TestCase): self.__class__.no_need_check_grad = True paddle.set_default_dtype(np.float32) - self.norm = LayerNorm(self.embed_dim, - weight_attr=False, - bias_attr=False) - self.ffn_norm = LayerNorm(self.embed_dim, - weight_attr=False, - bias_attr=False) + self.norm = LayerNorm( + self.embed_dim, weight_attr=False, bias_attr=False + ) + self.ffn_norm = LayerNorm( + self.embed_dim, weight_attr=False, bias_attr=False + ) paddle.set_default_dtype(self.x_type) self.dropout = Dropout(self.dropout_prob, mode="upscale_in_train") @@ -127,7 +169,7 @@ class TestFusedMultiTransformerInt8Op(unittest.TestCase): self.x_type = np.float32 self.attn_mask_type = np.float64 - #self.attn_mask_type = np.bool + # self.attn_mask_type = np.bool self.pre_layer_norm = True self.has_attn_mask = True @@ -154,31 +196,38 @@ class TestFusedMultiTransformerInt8Op(unittest.TestCase): self.weight_attr = None self.bias_attr = None self.kdim, self.vdim = self.embed_dim, self.embed_dim - self.key_length, self.value_length = self.query_length, self.query_length + self.key_length, self.value_length = ( + self.query_length, + self.query_length, + ) def generate_input_data(self): - self.query = np.random.rand(self.batch_size, self.query_length, - self.embed_dim).astype(self.x_type) - q_weight = np.random.randint(-64, 64, [self.embed_dim, self.embed_dim], - np.int32).astype('float64') - k_weight = np.random.randint(-64, 64, [self.kdim, self.embed_dim], - np.int32).astype('float64') - v_weight = np.random.randint(-64, 64, [self.vdim, self.embed_dim], - np.int32).astype('float64') + self.query = np.random.rand( + self.batch_size, self.query_length, self.embed_dim + ).astype(self.x_type) + q_weight = np.random.randint( + -64, 64, [self.embed_dim, self.embed_dim], np.int32 + ).astype('float64') + k_weight = np.random.randint( + -64, 64, [self.kdim, self.embed_dim], np.int32 + ).astype('float64') + v_weight = np.random.randint( + -64, 64, [self.vdim, self.embed_dim], np.int32 + ).astype('float64') self.q_weight_tensor = paddle.to_tensor(q_weight) self.k_weight_tensor = paddle.to_tensor(k_weight) self.v_weight_tensor = paddle.to_tensor(v_weight) - out_weight = np.random.randint(-64, 64, - [self.embed_dim, self.embed_dim], - np.int32).astype('float64') - ffn1_weight = np.random.randint(-64, 64, - [self.embed_dim, 4 * self.embed_dim], - np.int32).astype('float64') - ffn2_weight = np.random.randint(-64, 64, - [4 * self.embed_dim, self.embed_dim], - np.int32).astype('float64') + out_weight = np.random.randint( + -64, 64, [self.embed_dim, self.embed_dim], np.int32 + ).astype('float64') + ffn1_weight = np.random.randint( + -64, 64, [self.embed_dim, 4 * self.embed_dim], np.int32 + ).astype('float64') + ffn2_weight = np.random.randint( + -64, 64, [4 * self.embed_dim, self.embed_dim], np.int32 + ).astype('float64') self.out_weight_tensor = paddle.to_tensor(out_weight) self.ffn1_weight_tensor = paddle.to_tensor(ffn1_weight) @@ -193,12 +242,14 @@ class TestFusedMultiTransformerInt8Op(unittest.TestCase): self.v_proj_bias_tensor = paddle.to_tensor(v_proj_bias) out_linear_proj_bias = np.random.rand(self.embed_dim).astype( - self.x_type) + self.x_type + ) ffn1_proj_bias = np.random.rand(4 * self.embed_dim).astype(self.x_type) ffn2_proj_bias = np.random.rand(self.embed_dim).astype(self.x_type) self.out_linear_proj_bias_tensor = paddle.to_tensor( - out_linear_proj_bias) + out_linear_proj_bias + ) self.ffn1_proj_bias_tensor = paddle.to_tensor(ffn1_proj_bias) self.ffn2_proj_bias_tensor = paddle.to_tensor(ffn2_proj_bias) @@ -214,9 +265,13 @@ class TestFusedMultiTransformerInt8Op(unittest.TestCase): self.ffn2_out_scales = [] if self.has_cache_kv: - self.cache_kv = np.random.rand(2, self.batch_size, self.num_heads, - self.cache_length, - self.head_dim).astype(self.x_type) + self.cache_kv = np.random.rand( + 2, + self.batch_size, + self.num_heads, + self.cache_length, + self.head_dim, + ).astype(self.x_type) if self.gen_cache_kv: self.cache_kv[:] = 0 @@ -229,7 +284,8 @@ class TestFusedMultiTransformerInt8Op(unittest.TestCase): # [B, n_head, seq_len, out_seq_len] self.attn_mask = np.ones( (self.batch_size, 1, self.query_length, out_seq_len), - dtype=self.attn_mask_type) + dtype=self.attn_mask_type, + ) if self.attn_mask_type == np.int64: self.attn_mask = np.tril(self.attn_mask) elif self.attn_mask_type == np.float64: @@ -246,7 +302,8 @@ class TestFusedMultiTransformerInt8Op(unittest.TestCase): self.attn_mask = np.tril(self.attn_mask) else: raise ValueError( - "'attn_mask_type' should be 'int64' or 'float64'.") + "'attn_mask_type' should be 'int64' or 'float64'." + ) else: self.attn_mask = None @@ -289,8 +346,11 @@ class TestFusedMultiTransformerInt8Op(unittest.TestCase): q = paddle.nn.functional.linear(ln1_out, self.q_weight_tensor) # de quant q = paddle.cast( - paddle.cast(q, 'float32') * self.qkv_in_scales[i] / - self.qkv_out_scales[i], self.x_type) + paddle.cast(q, 'float32') + * self.qkv_in_scales[i] + / self.qkv_out_scales[i], + self.x_type, + ) q = q + self.q_proj_bias_tensor q = tensor.reshape(x=q, shape=[0, 0, self.num_heads, self.head_dim]) @@ -298,13 +358,19 @@ class TestFusedMultiTransformerInt8Op(unittest.TestCase): k = paddle.nn.functional.linear(ln1_out, self.k_weight_tensor) k = paddle.cast( - paddle.cast(k, 'float32') * self.qkv_in_scales[i] / - self.qkv_out_scales[i], self.x_type) + paddle.cast(k, 'float32') + * self.qkv_in_scales[i] + / self.qkv_out_scales[i], + self.x_type, + ) k = k + self.k_proj_bias_tensor v = paddle.nn.functional.linear(ln1_out, self.v_weight_tensor) v = paddle.cast( - paddle.cast(v, 'float32') * self.qkv_in_scales[i] / - self.qkv_out_scales[i], self.x_type) + paddle.cast(v, 'float32') + * self.qkv_in_scales[i] + / self.qkv_out_scales[i], + self.x_type, + ) v = v + self.v_proj_bias_tensor k = tensor.reshape(x=k, shape=[0, 0, self.num_heads, self.head_dim]) @@ -332,10 +398,9 @@ class TestFusedMultiTransformerInt8Op(unittest.TestCase): # [B, n_head, seq_len, head_dim] * [B, n_head, out_seq_len, head_dim] # --> [B, n_head, seq_len, out_seq_len] - qk_out = layers.matmul(x=q_out, - y=k_out, - transpose_y=True, - alpha=self.head_dim**-0.5) + qk_out = layers.matmul( + x=q_out, y=k_out, transpose_y=True, alpha=self.head_dim**-0.5 + ) if self.debug: print('qk out is') @@ -355,10 +420,12 @@ class TestFusedMultiTransformerInt8Op(unittest.TestCase): print('softmax out is') print(softmax_out[0][0][0]) if self.dropout_prob: - dropout_out = F.dropout(softmax_out, - self.dropout_prob, - training=self.training, - mode="upscale_in_train") + dropout_out = F.dropout( + softmax_out, + self.dropout_prob, + training=self.training, + mode="upscale_in_train", + ) # [B, n_head, seq_len, out_seq_len] * [B, n_head, out_seq_len, head_dim] # --> [B, n_head, seq_len, head_dim] qktv_out = tensor.matmul(dropout_out, v_out) @@ -370,23 +437,30 @@ class TestFusedMultiTransformerInt8Op(unittest.TestCase): print('fmha out is') print(fmha_out[0][0][0]) out_linear_in = tensor.reshape( - x=fmha_out, shape=[0, 0, fmha_out.shape[2] * fmha_out.shape[3]]) + x=fmha_out, shape=[0, 0, fmha_out.shape[2] * fmha_out.shape[3]] + ) - max_v = paddle.max(paddle.abs(paddle.cast(out_linear_in, - 'float32')))[0] + max_v = paddle.max( + paddle.abs(paddle.cast(out_linear_in, 'float32')) + )[0] # self.out_linear_in_scales.append(127.0 / max_v) self.out_linear_in_scales.append(max_v) self.out_linear_out_scales.append((127.0 * 127.0)) - out_linear_in = self.fake_quant(out_linear_in, - self.out_linear_in_scales[i]) + out_linear_in = self.fake_quant( + out_linear_in, self.out_linear_in_scales[i] + ) - out = paddle.nn.functional.linear(out_linear_in, - self.out_weight_tensor) + out = paddle.nn.functional.linear( + out_linear_in, self.out_weight_tensor + ) out = paddle.cast( - paddle.cast(out, 'float32') * self.out_linear_in_scales[i] / - self.out_linear_out_scales[i], self.x_type) + paddle.cast(out, 'float32') + * self.out_linear_in_scales[i] + / self.out_linear_out_scales[i], + self.x_type, + ) out = out + self.out_linear_proj_bias_tensor @@ -400,18 +474,23 @@ class TestFusedMultiTransformerInt8Op(unittest.TestCase): if self.pre_layer_norm: ffn_ln_out = self.ffn_norm(attn_out) - max_v = paddle.max(paddle.abs(paddle.cast(ffn_ln_out, - 'float32')))[0] + max_v = paddle.max(paddle.abs(paddle.cast(ffn_ln_out, 'float32')))[ + 0 + ] self.ffn1_in_scales.append(max_v) self.ffn1_out_scales.append((127.0 * 127.0)) ffn_ln_out = self.fake_quant(ffn_ln_out, self.ffn1_in_scales[i]) - ffn1_out = paddle.nn.functional.linear(ffn_ln_out, - self.ffn1_weight_tensor) + ffn1_out = paddle.nn.functional.linear( + ffn_ln_out, self.ffn1_weight_tensor + ) ffn1_out = paddle.cast( - paddle.cast(ffn1_out, 'float32') * self.ffn1_in_scales[i] / - self.ffn1_out_scales[i], self.x_type) + paddle.cast(ffn1_out, 'float32') + * self.ffn1_in_scales[i] + / self.ffn1_out_scales[i], + self.x_type, + ) ffn1_out = ffn1_out + self.ffn1_proj_bias_tensor ffn1_out = self.dropout(self.activation(ffn1_out)) @@ -423,12 +502,16 @@ class TestFusedMultiTransformerInt8Op(unittest.TestCase): # print('ffn2_in_scales ', i, self.ffn2_in_scales[i]) ffn1_out = self.fake_quant(ffn1_out, self.ffn2_in_scales[i]) - ffn2_out = paddle.nn.functional.linear(ffn1_out, - self.ffn2_weight_tensor) + ffn2_out = paddle.nn.functional.linear( + ffn1_out, self.ffn2_weight_tensor + ) ffn2_out = paddle.cast( - paddle.cast(ffn2_out, 'float32') * self.ffn2_in_scales[i] / - self.ffn2_out_scales[i], self.x_type) + paddle.cast(ffn2_out, 'float32') + * self.ffn2_in_scales[i] + / self.ffn2_out_scales[i], + self.x_type, + ) ffn2_out = ffn2_out + self.ffn2_proj_bias_tensor residual_out = attn_out + self.dropout(ffn2_out) @@ -456,26 +539,35 @@ class TestFusedMultiTransformerInt8Op(unittest.TestCase): k_proj_weight = self.k_weight_tensor.numpy().transpose((1, 0)) v_proj_weight = self.v_weight_tensor.numpy().transpose((1, 0)) qkv_weight = np.concatenate( - (q_proj_weight, k_proj_weight, v_proj_weight)) + (q_proj_weight, k_proj_weight, v_proj_weight) + ) qkv_weight = qkv_weight.reshape( - (3, self.num_heads, self.head_dim, self.embed_dim)) + (3, self.num_heads, self.head_dim, self.embed_dim) + ) qkv_weight_tensor = paddle.to_tensor(qkv_weight) qkv_weight_tensor = paddle.cast(qkv_weight_tensor, 'int8') out_weight_tensor = paddle.cast( paddle.to_tensor(self.out_weight_tensor.numpy().transpose((1, 0))), - 'int8') + 'int8', + ) ffn1_weight_tensor = paddle.cast( paddle.to_tensor(self.ffn1_weight_tensor.numpy().transpose((1, 0))), - 'int8') + 'int8', + ) ffn2_weight_tensor = paddle.cast( paddle.to_tensor(self.ffn2_weight_tensor.numpy().transpose((1, 0))), - 'int8') + 'int8', + ) qkv_bias = np.concatenate( - (self.q_proj_bias_tensor.numpy(), self.k_proj_bias_tensor.numpy(), - self.v_proj_bias_tensor.numpy())) + ( + self.q_proj_bias_tensor.numpy(), + self.k_proj_bias_tensor.numpy(), + self.v_proj_bias_tensor.numpy(), + ) + ) qkv_bias = qkv_bias.reshape((3, self.num_heads, self.head_dim)) qkv_bias_tensor = paddle.to_tensor(qkv_bias) @@ -486,11 +578,16 @@ class TestFusedMultiTransformerInt8Op(unittest.TestCase): cache_kvs = [] max_seq_length = (self.cache_length + 128) // 128 * 128 - cache_kv = np.zeros([ - 2, self.batch_size, self.num_heads, max_seq_length, - self.head_dim - ], - dtype=self.x_type) + cache_kv = np.zeros( + [ + 2, + self.batch_size, + self.num_heads, + max_seq_length, + self.head_dim, + ], + dtype=self.x_type, + ) elems = 4 if self.x_type is np.float16: @@ -502,25 +599,36 @@ class TestFusedMultiTransformerInt8Op(unittest.TestCase): # [B, num_head, 128, head_dim] # cache_k_tmp = self.cache_kv[0, :] # [B, num_head, 128, head_dim / 4, 4] - cache_k_tmp = self.cache_kv[0].reshape([ - self.batch_size, self.num_heads, self.cache_length, v_elems, - elems - ]) + cache_k_tmp = self.cache_kv[0].reshape( + [ + self.batch_size, + self.num_heads, + self.cache_length, + v_elems, + elems, + ] + ) # [B, num_head, head_dim / 4, 128, 4] cache_k_tmp = cache_k_tmp.transpose([0, 1, 3, 2, 4]) - cache_kv[0, :].reshape([ - self.batch_size, self.num_heads, v_elems, max_seq_length, elems - ])[:, :, :, :self.cache_length, :] = cache_k_tmp - - cache_kv[1, :, :, :self.cache_length, :] = self.cache_kv[1] + cache_kv[0, :].reshape( + [ + self.batch_size, + self.num_heads, + v_elems, + max_seq_length, + elems, + ] + )[:, :, :, : self.cache_length, :] = cache_k_tmp + + cache_kv[1, :, :, : self.cache_length, :] = self.cache_kv[1] if self.gen_cache_kv: assert self.query_length == self.cache_length cache_kv[:] = 0 else: - time_step = paddle.to_tensor([self.cache_length], - dtype='int32', - place=paddle.CPUPlace()) + time_step = paddle.to_tensor( + [self.cache_length], dtype='int32', place=paddle.CPUPlace() + ) if self.has_attn_mask: attn_mask = paddle.to_tensor(self.attn_mask, stop_gradient=True) else: @@ -542,14 +650,18 @@ class TestFusedMultiTransformerInt8Op(unittest.TestCase): ffn1_in_scale = [] ffn2_in_scale = [] - qkv_out_scales_tensor = paddle.ones([self.layers, 3 * self.embed_dim], - 'float32') + qkv_out_scales_tensor = paddle.ones( + [self.layers, 3 * self.embed_dim], 'float32' + ) out_linear_out_scales_tensor = paddle.ones( - [self.layers, self.embed_dim], 'float32') - ffn1_out_scales_tensor = paddle.ones([self.layers, 4 * self.embed_dim], - 'float32') - ffn2_out_scales_tensor = paddle.ones([self.layers, self.embed_dim], - 'float32') + [self.layers, self.embed_dim], 'float32' + ) + ffn1_out_scales_tensor = paddle.ones( + [self.layers, 4 * self.embed_dim], 'float32' + ) + ffn2_out_scales_tensor = paddle.ones( + [self.layers, self.embed_dim], 'float32' + ) for i in range(self.layers): qkv_weights.append(qkv_weight_tensor) @@ -612,7 +724,8 @@ class TestFusedMultiTransformerInt8Op(unittest.TestCase): qkv_in_scale=qkv_in_scale, out_linear_in_scale=out_linear_in_scale, ffn1_in_scale=ffn1_in_scale, - ffn2_in_scale=ffn2_in_scale) + ffn2_in_scale=ffn2_in_scale, + ) if self.has_cache_kv: return final_out[0], final_out[1] @@ -634,9 +747,11 @@ class TestFusedMultiTransformerInt8Op(unittest.TestCase): if self.debug: print("cache_k out timestep=128") - print(cache_kv_out[0].reshape( - [2, bsz, num_head, v_elems, max_seq_len, - elems])[0, 0, 0, :, self.cache_length, :]) + print( + cache_kv_out[0].reshape( + [2, bsz, num_head, v_elems, max_seq_len, elems] + )[0, 0, 0, :, self.cache_length, :] + ) print("cache_v out timestep=128") print(cache_kv_out[0][1, 0, 0, self.cache_length, :]) @@ -649,33 +764,31 @@ class TestFusedMultiTransformerInt8Op(unittest.TestCase): cache_k = cache_kv_out[i][0, :] cache_k = cache_k.reshape( - [bsz, num_head, v_elems, max_seq_len, elems]) - cache_k = cache_k[:, :, :, :self.cache_length, :] + [bsz, num_head, v_elems, max_seq_len, elems] + ) + cache_k = cache_k[:, :, :, : self.cache_length, :] cache_k = cache_k.transpose([0, 1, 3, 2, 4]) cache_k = cache_k.reshape( - [bsz, num_head, self.cache_length, head_dim]) - - cache_v = cache_kv_out[i][1, :, :, :self.cache_length, :] - - np.testing.assert_allclose(cache_k_ref, - cache_k, - rtol=self.rtol, - atol=self.atol) - np.testing.assert_allclose(cache_v_ref, - cache_v, - rtol=self.rtol, - atol=self.atol) + [bsz, num_head, self.cache_length, head_dim] + ) + + cache_v = cache_kv_out[i][1, :, :, : self.cache_length, :] + + np.testing.assert_allclose( + cache_k_ref, cache_k, rtol=self.rtol, atol=self.atol + ) + np.testing.assert_allclose( + cache_v_ref, cache_v, rtol=self.rtol, atol=self.atol + ) if i == 0: break - np.testing.assert_allclose(final_out_ref, - final_out, - rtol=self.rtol, - atol=self.atol) + np.testing.assert_allclose( + final_out_ref, final_out, rtol=self.rtol, atol=self.atol + ) class TestFusedMultiTransformerInt8OpFp16(TestFusedMultiTransformerInt8Op): - def config(self): super().config() self.x_type = np.float16 @@ -683,7 +796,6 @@ class TestFusedMultiTransformerInt8OpFp16(TestFusedMultiTransformerInt8Op): class TestFusedMultiTransformerInt8OpCacheKV(TestFusedMultiTransformerInt8Op): - def config(self): super().config() super().generate_input_data() @@ -693,9 +805,9 @@ class TestFusedMultiTransformerInt8OpCacheKV(TestFusedMultiTransformerInt8Op): self.layers = 3 # odd layers -class TestFusedMultiTransformerInt8OpCacheKVFp16(TestFusedMultiTransformerInt8Op - ): - +class TestFusedMultiTransformerInt8OpCacheKVFp16( + TestFusedMultiTransformerInt8Op +): def config(self): super().config() self.has_cache_kv = True @@ -704,9 +816,9 @@ class TestFusedMultiTransformerInt8OpCacheKVFp16(TestFusedMultiTransformerInt8Op self.x_type = np.float16 -class TestFusedMultiTransformerInt8OpGenCacheKV(TestFusedMultiTransformerInt8Op - ): - +class TestFusedMultiTransformerInt8OpGenCacheKV( + TestFusedMultiTransformerInt8Op +): def config(self): super().config() self.has_cache_kv = True @@ -714,8 +826,8 @@ class TestFusedMultiTransformerInt8OpGenCacheKV(TestFusedMultiTransformerInt8Op class TestFusedMultiTransformerInt8OpGenCacheKVFp16( - TestFusedMultiTransformerInt8Op): - + TestFusedMultiTransformerInt8Op +): def config(self): super().config() self.has_cache_kv = True @@ -725,8 +837,8 @@ class TestFusedMultiTransformerInt8OpGenCacheKVFp16( class TestFusedMultiTransformerInt8OpPostLayerNormFp16( - TestFusedMultiTransformerInt8Op): - + TestFusedMultiTransformerInt8Op +): def config(self): super().config() self.x_type = np.float16 @@ -735,8 +847,8 @@ class TestFusedMultiTransformerInt8OpPostLayerNormFp16( class TestFusedMultiTransformerInt8OpCacheKVPostLayerNorm( - TestFusedMultiTransformerInt8Op): - + TestFusedMultiTransformerInt8Op +): def config(self): super().config() self.has_cache_kv = True @@ -747,8 +859,8 @@ class TestFusedMultiTransformerInt8OpCacheKVPostLayerNorm( class TestFusedMultiTransformerInt8OpCacheKVPostLayerNormFp16( - TestFusedMultiTransformerInt8Op): - + TestFusedMultiTransformerInt8Op +): def config(self): super().config() self.has_cache_kv = True @@ -759,8 +871,8 @@ class TestFusedMultiTransformerInt8OpCacheKVPostLayerNormFp16( class TestFusedMultiTransformerInt8OpGenCacheKVPostLayerNorm( - TestFusedMultiTransformerInt8Op): - + TestFusedMultiTransformerInt8Op +): def config(self): super().config() self.has_cache_kv = True @@ -769,8 +881,8 @@ class TestFusedMultiTransformerInt8OpGenCacheKVPostLayerNorm( class TestFusedMultiTransformerInt8OpGenCacheKVPostLayerNormFp16( - TestFusedMultiTransformerInt8Op): - + TestFusedMultiTransformerInt8Op +): def config(self): super().config() self.has_cache_kv = True diff --git a/python/paddle/fluid/tests/unittests/test_fused_multi_transformer_op.py b/python/paddle/fluid/tests/unittests/test_fused_multi_transformer_op.py index dcfb72940403cebb2c192136d91f20cf1b994889..0daf8342294f4f74e8cdd4591b1ba1f22363cff8 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_multi_transformer_op.py +++ b/python/paddle/fluid/tests/unittests/test_fused_multi_transformer_op.py @@ -32,7 +32,6 @@ default_main_program().random_seed = 42 class TestFusedMultiTransformerOp(OpTest): - def setUp(self): self.config() self.generate_input_data() @@ -54,34 +53,47 @@ class TestFusedMultiTransformerOp(OpTest): self.__class__.no_need_check_grad = False bias_attr = paddle.fluid.ParamAttr( - initializer=paddle.fluid.initializer.Constant(value=0.0005)) - self.q_proj = Linear(self.embed_dim, - self.embed_dim, - self.weight_attr, - bias_attr=bias_attr) - #bias_attr=self.bias_attr) - - self.k_proj = Linear(self.kdim, - self.embed_dim, - self.weight_attr, - bias_attr=self.bias_attr) - self.v_proj = Linear(self.vdim, - self.embed_dim, - self.weight_attr, - bias_attr=self.bias_attr) - self.out_proj = Linear(self.embed_dim, - self.embed_dim, - self.weight_attr, - bias_attr=self.bias_attr) - - self.ffn1_proj = Linear(self.embed_dim, - 4 * self.embed_dim, - self.weight_attr, - bias_attr=self.bias_attr) - self.ffn2_proj = Linear(4 * self.embed_dim, - self.embed_dim, - self.weight_attr, - bias_attr=self.bias_attr) + initializer=paddle.fluid.initializer.Constant(value=0.0005) + ) + self.q_proj = Linear( + self.embed_dim, + self.embed_dim, + self.weight_attr, + bias_attr=bias_attr, + ) + # bias_attr=self.bias_attr) + + self.k_proj = Linear( + self.kdim, + self.embed_dim, + self.weight_attr, + bias_attr=self.bias_attr, + ) + self.v_proj = Linear( + self.vdim, + self.embed_dim, + self.weight_attr, + bias_attr=self.bias_attr, + ) + self.out_proj = Linear( + self.embed_dim, + self.embed_dim, + self.weight_attr, + bias_attr=self.bias_attr, + ) + + self.ffn1_proj = Linear( + self.embed_dim, + 4 * self.embed_dim, + self.weight_attr, + bias_attr=self.bias_attr, + ) + self.ffn2_proj = Linear( + 4 * self.embed_dim, + self.embed_dim, + self.weight_attr, + bias_attr=self.bias_attr, + ) paddle.set_default_dtype(np.float32) self.norm = LayerNorm(self.embed_dim) @@ -97,7 +109,7 @@ class TestFusedMultiTransformerOp(OpTest): self.x_type = np.float32 self.attn_mask_type = np.float64 - #self.attn_mask_type = np.bool + # self.attn_mask_type = np.bool self.pre_layer_norm = True self.has_attn_mask = True @@ -126,18 +138,27 @@ class TestFusedMultiTransformerOp(OpTest): self.weight_attr = None self.bias_attr = None self.kdim, self.vdim = self.embed_dim, self.embed_dim - self.key_length, self.value_length = self.query_length, self.query_length + self.key_length, self.value_length = ( + self.query_length, + self.query_length, + ) def generate_input_data(self): - self.query = np.random.rand(self.batch_size, self.query_length, - self.embed_dim).astype(self.x_type) + self.query = np.random.rand( + self.batch_size, self.query_length, self.embed_dim + ).astype(self.x_type) out_seq_len = self.key_length if self.has_cache_kv: assert self.training is False, ValueError( - 'cache_kv can only used in inference') - self.cache_kv = np.random.rand(2, self.batch_size, self.num_heads, - self.cache_length, - self.head_dim).astype(self.x_type) + 'cache_kv can only used in inference' + ) + self.cache_kv = np.random.rand( + 2, + self.batch_size, + self.num_heads, + self.cache_length, + self.head_dim, + ).astype(self.x_type) if self.gen_cache_kv: self.cache_kv[:] = 0 else: @@ -148,14 +169,19 @@ class TestFusedMultiTransformerOp(OpTest): if self.has_pre_cache: out_seq_len += self.pre_cache_num self.pre_cache_kv = np.random.rand( - 2, self.batch_size, self.num_heads, self.pre_cache_num, - self.head_dim).astype(self.x_type) + 2, + self.batch_size, + self.num_heads, + self.pre_cache_num, + self.head_dim, + ).astype(self.x_type) if self.has_attn_mask: # [B, n_head, seq_len, out_seq_len] self.attn_mask = np.ones( (self.batch_size, 1, self.query_length, out_seq_len), - dtype=self.attn_mask_type) + dtype=self.attn_mask_type, + ) if self.attn_mask_type == np.int64: self.attn_mask = np.tril(self.attn_mask) elif self.attn_mask_type == np.float64: @@ -172,13 +198,15 @@ class TestFusedMultiTransformerOp(OpTest): self.attn_mask = np.tril(self.attn_mask) else: raise ValueError( - "'attn_mask_type' should be 'int64' or 'float64'.") + "'attn_mask_type' should be 'int64' or 'float64'." + ) else: self.attn_mask = None self.key, self.value = self.query, self.query - self.dout = np.random.random((self.batch_size, self.query_length, - self.embed_dim)).astype(self.x_type) + self.dout = np.random.random( + (self.batch_size, self.query_length, self.embed_dim) + ).astype(self.x_type) def GetBaselineOut(self): paddle.disable_static(place=paddle.CUDAPlace(0)) @@ -190,8 +218,9 @@ class TestFusedMultiTransformerOp(OpTest): cache_kv = paddle.to_tensor(self.cache_kv, stop_gradient=False) if self.has_pre_cache: - pre_cache_kv = paddle.to_tensor(self.pre_cache_kv, - stop_gradient=False) + pre_cache_kv = paddle.to_tensor( + self.pre_cache_kv, stop_gradient=False + ) if self.has_attn_mask: attn_mask = paddle.to_tensor(self.attn_mask, stop_gradient=False) @@ -241,10 +270,9 @@ class TestFusedMultiTransformerOp(OpTest): # [B, n_head, seq_len, head_dim] * [B, n_head, out_seq_len, head_dim] # --> [B, n_head, seq_len, out_seq_len] - qk_out = layers.matmul(x=q_out, - y=k_out, - transpose_y=True, - alpha=self.head_dim**-0.5) + qk_out = layers.matmul( + x=q_out, y=k_out, transpose_y=True, alpha=self.head_dim**-0.5 + ) if self.debug: print('qk out is') @@ -264,10 +292,12 @@ class TestFusedMultiTransformerOp(OpTest): print('softmax out is') print(softmax_out[0][0][0]) if self.dropout_prob: - dropout_out = F.dropout(softmax_out, - self.dropout_prob, - training=self.training, - mode="upscale_in_train") + dropout_out = F.dropout( + softmax_out, + self.dropout_prob, + training=self.training, + mode="upscale_in_train", + ) # [B, n_head, seq_len, out_seq_len] * [B, n_head, out_seq_len, head_dim] # --> [B, n_head, seq_len, head_dim] qktv_out = tensor.matmul(dropout_out, v_out) @@ -279,7 +309,8 @@ class TestFusedMultiTransformerOp(OpTest): print('fmha out is') print(fmha_out[0][0][0]) out_linear_in = tensor.reshape( - x=fmha_out, shape=[0, 0, fmha_out.shape[2] * fmha_out.shape[3]]) + x=fmha_out, shape=[0, 0, fmha_out.shape[2] * fmha_out.shape[3]] + ) out = self.out_proj(out_linear_in) residual_out = residual + self.dropout(out) @@ -309,53 +340,69 @@ class TestFusedMultiTransformerOp(OpTest): def GetFusedMultiTransformerOut(self): paddle.disable_static(place=paddle.CUDAPlace(0)) - q_proj_weight = paddle.to_tensor(self.q_proj.weight, - stop_gradient=False) - k_proj_weight = paddle.to_tensor(self.k_proj.weight, - stop_gradient=False) - v_proj_weight = paddle.to_tensor(self.v_proj.weight, - stop_gradient=False) - out_linear_weight = paddle.to_tensor(self.out_proj.weight, - stop_gradient=False) - ffn1_weight = paddle.to_tensor(self.ffn1_proj.weight, - stop_gradient=False) - ffn2_weight = paddle.to_tensor(self.ffn2_proj.weight, - stop_gradient=False) + q_proj_weight = paddle.to_tensor( + self.q_proj.weight, stop_gradient=False + ) + k_proj_weight = paddle.to_tensor( + self.k_proj.weight, stop_gradient=False + ) + v_proj_weight = paddle.to_tensor( + self.v_proj.weight, stop_gradient=False + ) + out_linear_weight = paddle.to_tensor( + self.out_proj.weight, stop_gradient=False + ) + ffn1_weight = paddle.to_tensor( + self.ffn1_proj.weight, stop_gradient=False + ) + ffn2_weight = paddle.to_tensor( + self.ffn2_proj.weight, stop_gradient=False + ) if self.bias_attr is False: qkv_bias_tensor = None out_linear_bias = None else: - q_proj_bias = paddle.to_tensor(self.q_proj.bias, - stop_gradient=False) - k_proj_bias = paddle.to_tensor(self.k_proj.bias, - stop_gradient=False) - v_proj_bias = paddle.to_tensor(self.v_proj.bias, - stop_gradient=False) + q_proj_bias = paddle.to_tensor( + self.q_proj.bias, stop_gradient=False + ) + k_proj_bias = paddle.to_tensor( + self.k_proj.bias, stop_gradient=False + ) + v_proj_bias = paddle.to_tensor( + self.v_proj.bias, stop_gradient=False + ) qkv_bias = np.concatenate( - (q_proj_bias.numpy(), k_proj_bias.numpy(), v_proj_bias.numpy())) + (q_proj_bias.numpy(), k_proj_bias.numpy(), v_proj_bias.numpy()) + ) qkv_bias = qkv_bias.reshape((3, self.num_heads, self.head_dim)) qkv_bias_tensor = paddle.to_tensor(qkv_bias, stop_gradient=False) - out_linear_bias = paddle.to_tensor(self.out_proj.bias, - stop_gradient=False) - ffn1_bias = paddle.to_tensor(self.ffn1_proj.bias, - stop_gradient=False) - ffn2_bias = paddle.to_tensor(self.ffn2_proj.bias, - stop_gradient=False) + out_linear_bias = paddle.to_tensor( + self.out_proj.bias, stop_gradient=False + ) + ffn1_bias = paddle.to_tensor( + self.ffn1_proj.bias, stop_gradient=False + ) + ffn2_bias = paddle.to_tensor( + self.ffn2_proj.bias, stop_gradient=False + ) ln_scale = paddle.to_tensor(self.norm.weight, stop_gradient=False) ln_bias = paddle.to_tensor(self.norm.bias, stop_gradient=False) - ffn_ln_scale = paddle.to_tensor(self.ffn_norm.weight, - stop_gradient=False) + ffn_ln_scale = paddle.to_tensor( + self.ffn_norm.weight, stop_gradient=False + ) ffn_ln_bias = paddle.to_tensor(self.ffn_norm.bias, stop_gradient=False) q_proj_weight = q_proj_weight.numpy().transpose((1, 0)) k_proj_weight = k_proj_weight.numpy().transpose((1, 0)) v_proj_weight = v_proj_weight.numpy().transpose((1, 0)) qkv_weight = np.concatenate( - (q_proj_weight, k_proj_weight, v_proj_weight)) + (q_proj_weight, k_proj_weight, v_proj_weight) + ) qkv_weight = qkv_weight.reshape( - (3, self.num_heads, self.head_dim, self.embed_dim)) + (3, self.num_heads, self.head_dim, self.embed_dim) + ) x = paddle.to_tensor(self.query, stop_gradient=False) cache_kvs, cache_kv = None, None @@ -365,11 +412,16 @@ class TestFusedMultiTransformerOp(OpTest): cache_kvs = [] max_seq_length = (self.cache_length + 128) // 128 * 128 - cache_kv = np.zeros([ - 2, self.batch_size, self.num_heads, max_seq_length, - self.head_dim - ], - dtype=self.x_type) + cache_kv = np.zeros( + [ + 2, + self.batch_size, + self.num_heads, + max_seq_length, + self.head_dim, + ], + dtype=self.x_type, + ) elems = 4 if self.x_type is np.float16: @@ -381,35 +433,52 @@ class TestFusedMultiTransformerOp(OpTest): # [B, num_head, 128, head_dim] # cache_k_tmp = self.cache_kv[0, :] # [B, num_head, 128, head_dim / 4, 4] - cache_k_tmp = self.cache_kv[0].reshape([ - self.batch_size, self.num_heads, self.cache_length, v_elems, - elems - ]) + cache_k_tmp = self.cache_kv[0].reshape( + [ + self.batch_size, + self.num_heads, + self.cache_length, + v_elems, + elems, + ] + ) # [B, num_head, head_dim / 4, 128, 4] cache_k_tmp = cache_k_tmp.transpose([0, 1, 3, 2, 4]) - cache_kv[0, :].reshape([ - self.batch_size, self.num_heads, v_elems, max_seq_length, elems - ])[:, :, :, :self.cache_length, :] = cache_k_tmp - - cache_kv[1, :, :, :self.cache_length, :] = self.cache_kv[1] + cache_kv[0, :].reshape( + [ + self.batch_size, + self.num_heads, + v_elems, + max_seq_length, + elems, + ] + )[:, :, :, : self.cache_length, :] = cache_k_tmp + + cache_kv[1, :, :, : self.cache_length, :] = self.cache_kv[1] if self.gen_cache_kv: assert self.query_length == self.cache_length cache_kv[:] = 0 else: - time_step = paddle.to_tensor([self.cache_length], - dtype='int32', - place=paddle.CPUPlace()) + time_step = paddle.to_tensor( + [self.cache_length], dtype='int32', place=paddle.CPUPlace() + ) if self.has_pre_cache: cache_kvs = [] - max_seq_length = (self.cache_length + - 128) // 128 * 128 + self.pre_cache_num - cache_kv = np.zeros([ - 2, self.batch_size, self.num_heads, max_seq_length, - self.head_dim - ], - dtype=self.x_type) + max_seq_length = ( + self.cache_length + 128 + ) // 128 * 128 + self.pre_cache_num + cache_kv = np.zeros( + [ + 2, + self.batch_size, + self.num_heads, + max_seq_length, + self.head_dim, + ], + dtype=self.x_type, + ) pre_caches = [] if self.has_attn_mask: @@ -443,35 +512,40 @@ class TestFusedMultiTransformerOp(OpTest): ffn_ln_scales.append(ffn_ln_scale) ffn_ln_biases.append(ffn_ln_bias) if self.has_cache_kv: - cache_kvs.append(paddle.to_tensor(cache_kv, - stop_gradient=False)) + cache_kvs.append( + paddle.to_tensor(cache_kv, stop_gradient=False) + ) if self.has_pre_cache: - cache_kvs.append(paddle.to_tensor(cache_kv, - stop_gradient=False)) + cache_kvs.append( + paddle.to_tensor(cache_kv, stop_gradient=False) + ) pre_caches.append( - paddle.to_tensor(self.pre_cache_kv, stop_gradient=False)) - - final_out = fused_multi_transformer(x, - ln_scales, - ln_biases, - qkv_weights, - qkv_biases, - out_weights, - out_biases, - ffn_ln_scales, - ffn_ln_biases, - ffn1_weights, - ffn1_biases, - ffn2_weights, - ffn2_biases, - pre_layer_norm=self.pre_layer_norm, - epsilon=epsilon, - cache_kvs=cache_kvs, - pre_caches=pre_caches, - time_step=time_step, - attn_mask=attn_mask, - dropout_rate=self.dropout_prob, - training=self.training) + paddle.to_tensor(self.pre_cache_kv, stop_gradient=False) + ) + + final_out = fused_multi_transformer( + x, + ln_scales, + ln_biases, + qkv_weights, + qkv_biases, + out_weights, + out_biases, + ffn_ln_scales, + ffn_ln_biases, + ffn1_weights, + ffn1_biases, + ffn2_weights, + ffn2_biases, + pre_layer_norm=self.pre_layer_norm, + epsilon=epsilon, + cache_kvs=cache_kvs, + pre_caches=pre_caches, + time_step=time_step, + attn_mask=attn_mask, + dropout_rate=self.dropout_prob, + training=self.training, + ) if self.has_cache_kv: return final_out[0], final_out[1] @@ -492,11 +566,16 @@ class TestFusedMultiTransformerOp(OpTest): cache_kvs = [] max_seq_length = (self.cache_length + 128) // 128 * 128 - cache_kv = np.zeros([ - 2, self.batch_size, self.num_heads, max_seq_length, - self.head_dim - ], - dtype=self.x_type) + cache_kv = np.zeros( + [ + 2, + self.batch_size, + self.num_heads, + max_seq_length, + self.head_dim, + ], + dtype=self.x_type, + ) elems = 4 if self.x_type is np.float16: @@ -504,37 +583,53 @@ class TestFusedMultiTransformerOp(OpTest): assert self.head_dim % elems == 0 v_elems = self.head_dim // elems - cache_k_tmp = self.cache_kv[0].reshape([ - self.batch_size, self.num_heads, self.cache_length, v_elems, - elems - ]) + cache_k_tmp = self.cache_kv[0].reshape( + [ + self.batch_size, + self.num_heads, + self.cache_length, + v_elems, + elems, + ] + ) # [B, num_head, head_dim / 4, 128, 4] cache_k_tmp = cache_k_tmp.transpose([0, 1, 3, 2, 4]) - cache_kv[0, :].reshape([ - self.batch_size, self.num_heads, v_elems, max_seq_length, elems - ])[:, :, :, :self.cache_length, :] = cache_k_tmp - - cache_kv[1, :, :, :self.cache_length, :] = self.cache_kv[1] + cache_kv[0, :].reshape( + [ + self.batch_size, + self.num_heads, + v_elems, + max_seq_length, + elems, + ] + )[:, :, :, : self.cache_length, :] = cache_k_tmp + + cache_kv[1, :, :, : self.cache_length, :] = self.cache_kv[1] if self.gen_cache_kv: assert self.query_length == self.cache_length cache_kv[:] = 0 else: - time_step = layers.fill_constant(shape=[1], - dtype="int32", - value=0, - force_cpu=True) + time_step = layers.fill_constant( + shape=[1], dtype="int32", value=0, force_cpu=True + ) time_step_feed = self.cache_length if self.has_pre_cache: cache_kvs = [] - max_seq_length = (self.cache_length + - 128) // 128 * 128 + self.pre_cache_num - cache_kv = np.zeros([ - 2, self.batch_size, self.num_heads, max_seq_length, - self.head_dim - ], - dtype=self.x_type) + max_seq_length = ( + self.cache_length + 128 + ) // 128 * 128 + self.pre_cache_num + cache_kv = np.zeros( + [ + 2, + self.batch_size, + self.num_heads, + max_seq_length, + self.head_dim, + ], + dtype=self.x_type, + ) pre_caches = [] attn_mask = None @@ -585,35 +680,43 @@ class TestFusedMultiTransformerOp(OpTest): ffn1_weight_attrs=ffn1_weights_attr, ffn1_bias_attrs=ffn1_biases_attr, ffn2_weight_attrs=ffn2_weights_attr, - ffn2_bias_attrs=ffn2_biases_attr) + ffn2_bias_attrs=ffn2_biases_attr, + ) transformer.eval() for i in range(self.layers): if self.has_cache_kv: cache_kvs.append( - layers.fill_constant(shape=cache_kv.shape, - dtype=cache_kv.dtype, - value=0)) + layers.fill_constant( + shape=cache_kv.shape, dtype=cache_kv.dtype, value=0 + ) + ) cache_kvs_feed.append(cache_kv) if self.has_pre_cache: cache_kvs.append( - layers.fill_constant(shape=cache_kv.shape, - dtype=cache_kv.dtype, - value=0)) + layers.fill_constant( + shape=cache_kv.shape, dtype=cache_kv.dtype, value=0 + ) + ) cache_kvs_feed.append(cache_kv) pre_caches.append( - layers.fill_constant(shape=self.pre_cache_kv.shape, - dtype=self.pre_cache_kv.dtype, - value=0)) + layers.fill_constant( + shape=self.pre_cache_kv.shape, + dtype=self.pre_cache_kv.dtype, + value=0, + ) + ) pre_caches_feed.append(self.pre_cache_kv) - final_out = transformer(x, - attn_mask=attn_mask, - caches=cache_kvs, - pre_caches=pre_caches, - time_step=time_step)[0] + final_out = transformer( + x, + attn_mask=attn_mask, + caches=cache_kvs, + pre_caches=pre_caches, + time_step=time_step, + )[0] exe = paddle.static.Executor(place=paddle.CUDAPlace(0)) exe.run(paddle.static.default_startup_program()) feed_data = { @@ -621,11 +724,13 @@ class TestFusedMultiTransformerOp(OpTest): 'cache_kvs': cache_kvs_feed, 'pre_caches': pre_caches_feed, 'time_step': time_step_feed, - 'attn_mask': attn_mask + 'attn_mask': attn_mask, } - out = exe.run(paddle.fluid.default_main_program(), - feed=feed_data, - fetch_list=[final_out]) + out = exe.run( + paddle.fluid.default_main_program(), + feed=feed_data, + fetch_list=[final_out], + ) paddle.disable_static() return out[0] @@ -644,9 +749,11 @@ class TestFusedMultiTransformerOp(OpTest): if self.debug: print("cache_k out timestep=128") - print(cache_kv_out[0].reshape( - [2, bsz, num_head, v_elems, max_seq_len, - elems])[0, 0, 0, :, self.cache_length, :]) + print( + cache_kv_out[0].reshape( + [2, bsz, num_head, v_elems, max_seq_len, elems] + )[0, 0, 0, :, self.cache_length, :] + ) print("cache_v out timestep=128") print(cache_kv_out[0][1, 0, 0, self.cache_length, :]) @@ -659,33 +766,31 @@ class TestFusedMultiTransformerOp(OpTest): cache_k = cache_kv_out[i][0, :] cache_k = cache_k.reshape( - [bsz, num_head, v_elems, max_seq_len, elems]) - cache_k = cache_k[:, :, :, :self.cache_length, :] + [bsz, num_head, v_elems, max_seq_len, elems] + ) + cache_k = cache_k[:, :, :, : self.cache_length, :] cache_k = cache_k.transpose([0, 1, 3, 2, 4]) cache_k = cache_k.reshape( - [bsz, num_head, self.cache_length, head_dim]) - - cache_v = cache_kv_out[i][1, :, :, :self.cache_length, :] - - np.testing.assert_allclose(cache_k_ref, - cache_k, - rtol=self.rtol, - atol=self.atol) - np.testing.assert_allclose(cache_v_ref, - cache_v, - rtol=self.rtol, - atol=self.atol) + [bsz, num_head, self.cache_length, head_dim] + ) + + cache_v = cache_kv_out[i][1, :, :, : self.cache_length, :] + + np.testing.assert_allclose( + cache_k_ref, cache_k, rtol=self.rtol, atol=self.atol + ) + np.testing.assert_allclose( + cache_v_ref, cache_v, rtol=self.rtol, atol=self.atol + ) if i == 0: break - np.testing.assert_allclose(final_out_ref, - final_out, - rtol=self.rtol, - atol=self.atol) + np.testing.assert_allclose( + final_out_ref, final_out, rtol=self.rtol, atol=self.atol + ) class TestFusedMultiTransformerOpFp16(TestFusedMultiTransformerOp): - def config(self): super().config() self.x_type = np.float16 @@ -693,7 +798,6 @@ class TestFusedMultiTransformerOpFp16(TestFusedMultiTransformerOp): class TestFusedMultiTransformerOpCacheKV(TestFusedMultiTransformerOp): - def config(self): super().config() self.has_cache_kv = True @@ -703,7 +807,6 @@ class TestFusedMultiTransformerOpCacheKV(TestFusedMultiTransformerOp): class TestFusedMultiTransformerOpCacheKVFp16(TestFusedMultiTransformerOp): - def config(self): super().config() self.has_cache_kv = True @@ -713,7 +816,6 @@ class TestFusedMultiTransformerOpCacheKVFp16(TestFusedMultiTransformerOp): class TestFusedMultiTransformerOpGenCacheKV(TestFusedMultiTransformerOp): - def config(self): super().config() self.has_cache_kv = True @@ -721,7 +823,6 @@ class TestFusedMultiTransformerOpGenCacheKV(TestFusedMultiTransformerOp): class TestFusedMultiTransformerOpGenCacheKVFp16(TestFusedMultiTransformerOp): - def config(self): super().config() self.has_cache_kv = True @@ -731,7 +832,6 @@ class TestFusedMultiTransformerOpGenCacheKVFp16(TestFusedMultiTransformerOp): class TestFusedMultiTransformerOpPostLayerNormFp16(TestFusedMultiTransformerOp): - def config(self): super().config() self.x_type = np.float16 @@ -740,8 +840,8 @@ class TestFusedMultiTransformerOpPostLayerNormFp16(TestFusedMultiTransformerOp): class TestFusedMultiTransformerOpCacheKVPostLayerNorm( - TestFusedMultiTransformerOp): - + TestFusedMultiTransformerOp +): def config(self): super().config() self.has_cache_kv = True @@ -752,8 +852,8 @@ class TestFusedMultiTransformerOpCacheKVPostLayerNorm( class TestFusedMultiTransformerOpCacheKVPostLayerNormFp16( - TestFusedMultiTransformerOp): - + TestFusedMultiTransformerOp +): def config(self): super().config() self.has_cache_kv = True @@ -764,8 +864,8 @@ class TestFusedMultiTransformerOpCacheKVPostLayerNormFp16( class TestFusedMultiTransformerOpGenCacheKVPostLayerNorm( - TestFusedMultiTransformerOp): - + TestFusedMultiTransformerOp +): def config(self): super().config() self.has_cache_kv = True @@ -774,8 +874,8 @@ class TestFusedMultiTransformerOpGenCacheKVPostLayerNorm( class TestFusedMultiTransformerOpGenCacheKVPostLayerNormFp16( - TestFusedMultiTransformerOp): - + TestFusedMultiTransformerOp +): def config(self): super().config() self.has_cache_kv = True @@ -786,7 +886,6 @@ class TestFusedMultiTransformerOpGenCacheKVPostLayerNormFp16( class TestFusedMultiTransformerOpPreCache(TestFusedMultiTransformerOp): - def config(self): super().config() self.has_pre_cache = True @@ -794,29 +893,31 @@ class TestFusedMultiTransformerOpPreCache(TestFusedMultiTransformerOp): class TestFusedMultiTransformerOpPreCacheStatic(TestFusedMultiTransformerOp): - def config(self): super().config() self.has_pre_cache = True self.has_attn_mask = False self.x_type = np.float32 self.weight_attr = paddle.ParamAttr( - initializer=paddle.fluid.initializer.Constant(0.)) + initializer=paddle.fluid.initializer.Constant(0.0) + ) self.bias_attr = paddle.ParamAttr( - initializer=paddle.fluid.initializer.Constant(0.0005)) + initializer=paddle.fluid.initializer.Constant(0.0005) + ) self.ln_w_attr = paddle.ParamAttr( - initializer=paddle.fluid.initializer.Constant(1.)) + initializer=paddle.fluid.initializer.Constant(1.0) + ) self.ln_b_attr = paddle.ParamAttr( - initializer=paddle.fluid.initializer.Constant(0.)) + initializer=paddle.fluid.initializer.Constant(0.0) + ) def test_fused_multi_transformer_op(self): final_out_ref = self.GetBaselineOut() final_out = self.GetFusedMultiTransformerOutStatic() - np.testing.assert_allclose(final_out_ref, - final_out, - rtol=self.rtol, - atol=self.atol) + np.testing.assert_allclose( + final_out_ref, final_out, rtol=self.rtol, atol=self.atol + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_fused_multihead_matmul_op.py b/python/paddle/fluid/tests/unittests/test_fused_multihead_matmul_op.py index 717a98abd50bbde1381b756f2da13f8930907f0c..e2b53903b6d72a8de4568673c42ad2417ab0f669 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_multihead_matmul_op.py +++ b/python/paddle/fluid/tests/unittests/test_fused_multihead_matmul_op.py @@ -24,15 +24,15 @@ def stable_softmax(x): """Compute the softmax of vector x in a numerically stable way.""" # clip to shiftx, otherwise, when calc loss with # log(exp(shiftx)), may get log(0)=INF - shiftx = (x - np.max(x)).clip(-64.) + shiftx = (x - np.max(x)).clip(-64.0) exps = np.exp(shiftx) return exps / np.sum(exps) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "Paddle core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "Paddle core is not compiled with CUDA" +) class TestFusedMultiheadMatmulOp(OpTest): - def config(self): self.seq_len = 128 self.size_per_head = 64 @@ -45,13 +45,15 @@ class TestFusedMultiheadMatmulOp(OpTest): self.config() h = self.seq_len w = self.head_number * self.size_per_head - self.Input = np.random.random( - (self.batch_size, h, w)).astype("float32") - 0.5 + self.Input = ( + np.random.random((self.batch_size, h, w)).astype("float32") - 0.5 + ) self.WQ = np.random.random((w, w)).astype("float32") self.KQ = np.random.random((w, w)).astype("float32") self.VQ = np.random.random((w, w)).astype("float32") self.CombinedW = np.hstack((self.WQ, self.KQ, self.VQ)).reshape( - (w, 3, w)) + (w, 3, w) + ) self.Q = np.dot(self.Input, self.WQ) self.K = np.dot(self.Input, self.KQ) self.V = np.dot(self.Input, self.VQ) @@ -61,18 +63,32 @@ class TestFusedMultiheadMatmulOp(OpTest): self.BiasV = np.random.random((1, w)).astype("float32") self.CombinedB = np.vstack((self.BiasQ, self.BiasK, self.BiasV)) self.BiasQK = np.random.random( - (self.batch_size, self.head_number, self.seq_len, - self.seq_len)).astype("float32") + (self.batch_size, self.head_number, self.seq_len, self.seq_len) + ).astype("float32") # Compute Q path fc_q = self.Q + self.BiasQ - reshape_q = np.reshape(fc_q, (self.batch_size, self.seq_len, - self.head_number, self.size_per_head)) + reshape_q = np.reshape( + fc_q, + ( + self.batch_size, + self.seq_len, + self.head_number, + self.size_per_head, + ), + ) transpose_q = np.transpose(reshape_q, (0, 2, 1, 3)) scale_q = self.scale * transpose_q # Compute K path fc_k = self.K + self.BiasK - reshape_k = np.reshape(fc_k, (self.batch_size, self.seq_len, - self.head_number, self.size_per_head)) + reshape_k = np.reshape( + fc_k, + ( + self.batch_size, + self.seq_len, + self.head_number, + self.size_per_head, + ), + ) transpose_k = np.transpose(reshape_k, (0, 2, 3, 1)) # Compute Q*K @@ -81,8 +97,15 @@ class TestFusedMultiheadMatmulOp(OpTest): softmax_qk = np.apply_along_axis(stable_softmax, 3, eltadd_qk) # Compute V path fc_v = self.V + self.BiasV - reshape_v = np.reshape(fc_v, (self.batch_size, self.seq_len, - self.head_number, self.size_per_head)) + reshape_v = np.reshape( + fc_v, + ( + self.batch_size, + self.seq_len, + self.head_number, + self.size_per_head, + ), + ) transpose_v = np.transpose(reshape_v, (0, 2, 1, 3)) # Compute QK*V @@ -94,14 +117,14 @@ class TestFusedMultiheadMatmulOp(OpTest): "Input": self.Input, "W": self.CombinedW, "Bias": self.CombinedB, - "BiasQK": self.BiasQK + "BiasQK": self.BiasQK, } self.attrs = { "transpose_Q": False, "transpose_K": True, "transpose_V": False, "head_number": self.head_number, - "alpha": self.scale + "alpha": self.scale, } self.outputs = {"Out": reshape_qkv} @@ -111,7 +134,6 @@ class TestFusedMultiheadMatmulOp(OpTest): class TestFusedMultiHeadMatmulOp2(TestFusedMultiheadMatmulOp): - def config(self): self.seq_len = 256 self.size_per_head = 32 diff --git a/python/paddle/fluid/tests/unittests/test_fused_token_prune_op.py b/python/paddle/fluid/tests/unittests/test_fused_token_prune_op.py index adfb9e0917776394db750059717be1ff96329534..22d0fe4ba761dfe13069804c7c879a067427217b 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_token_prune_op.py +++ b/python/paddle/fluid/tests/unittests/test_fused_token_prune_op.py @@ -18,10 +18,10 @@ from op_test import OpTest from paddle.framework import core -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFusedTokenPruneOp(OpTest): - def setDtype(self): self.dtype = np.float32 @@ -30,19 +30,21 @@ class TestFusedTokenPruneOp(OpTest): attn = np.array(attn, dtype=self.dtype) attn = np.expand_dims(attn, axis=0) self.attn = np.expand_dims( - attn, axis=0) # [1,1,2,2] bsz = 1, nd_head=1, max_seq_len=2 + attn, axis=0 + ) # [1,1,2,2] bsz = 1, nd_head=1, max_seq_len=2 mask = [[1, 1], [-1, -1]] mask = np.array(mask, dtype=self.dtype) mask = np.expand_dims(mask, axis=0) self.mask = np.expand_dims(mask, axis=0) # same as attn x = [[1, 2, 3], [4, 5, 6]] x = np.array(x, dtype=self.dtype) - self.x = np.expand_dims(x, - axis=0) # [1, 2, 3] bsz = 1, max_seq_len=2, c=3 + self.x = np.expand_dims( + x, axis=0 + ) # [1, 2, 3] bsz = 1, max_seq_len=2, c=3 new_mask = [[1]] new_mask = np.array(new_mask, dtype=self.dtype) new_mask = np.expand_dims(new_mask, axis=0) - self.new_mask = np.expand_dims(new_mask, axis=0) #[1, 1, 1, 1] + self.new_mask = np.expand_dims(new_mask, axis=0) # [1, 1, 1, 1] out_slimmedx_py = [[[1, 2, 3]]] self.out_slimmedx_py = np.array(out_slimmedx_py, dtype=self.dtype) @@ -58,49 +60,68 @@ class TestFusedTokenPruneOp(OpTest): 'Attn': self.attn, 'Mask': self.mask, 'X': self.x, - 'NewMask': self.new_mask + 'NewMask': self.new_mask, } self.outputs = { 'SlimmedX': self.out_slimmedx_py, - 'CLSInds': self.out_cls_inds_py + 'CLSInds': self.out_cls_inds_py, } def test_check_output(self): self.check_output_with_place(core.CUDAPlace(0)) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFusedTokenPruneOpFloat64(TestFusedTokenPruneOp): - def setDtype(self): self.dtype = np.float64 -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFusedTokenPruneOp2(TestFusedTokenPruneOp): - def setInouts(self): - attn = [[[[1, 2, 3, 4], [4, 3, 2, 1], [5, 9, 5, 4], [9, 6, 5, 4]], - [[8, 5, 2, 0], [1, 0, 2, 3], [2, 2, 3, 2], [7, 4, 1, 8]]]] + attn = [ + [ + [[1, 2, 3, 4], [4, 3, 2, 1], [5, 9, 5, 4], [9, 6, 5, 4]], + [[8, 5, 2, 0], [1, 0, 2, 3], [2, 2, 3, 2], [7, 4, 1, 8]], + ] + ] self.attn = np.array( - attn, - dtype=self.dtype) # [1,2,4,4] bsz = 1, nd_head=2, max_seq_len=4 - mask = [[[[-1, -1, -1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1], - [-1, -1, 1, 1]], - [[-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1], - [-1, -1, 1, 1]]]] + attn, dtype=self.dtype + ) # [1,2,4,4] bsz = 1, nd_head=2, max_seq_len=4 + mask = [ + [ + [ + [-1, -1, -1, 1], + [-1, -1, 1, 1], + [-1, -1, 1, 1], + [-1, -1, 1, 1], + ], + [ + [-1, -1, 1, 1], + [-1, -1, 1, 1], + [-1, -1, 1, 1], + [-1, -1, 1, 1], + ], + ] + ] self.mask = np.array(mask, dtype=self.dtype) # same as attn - x = [[[1.1, 1.1, 1.1], [2.2, 2.2, 2.2], [3.3, 3.3, 3.3], - [4.4, 4.4, 4.4]]] + x = [ + [[1.1, 1.1, 1.1], [2.2, 2.2, 2.2], [3.3, 3.3, 3.3], [4.4, 4.4, 4.4]] + ] self.x = np.array( - x, dtype=self.dtype) # [1, 4, 3] bsz = 1, max_seq_len=4, c=3 - self.new_mask = np.random.rand(1, 2, 2, - 2).astype(self.dtype) #[1, 2, 2, 2] + x, dtype=self.dtype + ) # [1, 4, 3] bsz = 1, max_seq_len=4, c=3 + self.new_mask = np.random.rand(1, 2, 2, 2).astype( + self.dtype + ) # [1, 2, 2, 2] - out_slimmedx_py = [[[1.1, 1.1, 1.1], [4.4, 4.4, 4.4]]] #[1, 2, 3] + out_slimmedx_py = [[[1.1, 1.1, 1.1], [4.4, 4.4, 4.4]]] # [1, 2, 3] self.out_slimmedx_py = np.array(out_slimmedx_py, dtype=self.dtype) out_cls_inds_py = [[0, 3]] diff --git a/python/paddle/fluid/tests/unittests/test_fused_transformer_encoder_layer.py b/python/paddle/fluid/tests/unittests/test_fused_transformer_encoder_layer.py index 50632b3f76f8f495266e9722aeaa812f4e60053d..a8a349265b46ce7521e8d3ac4df10796ae989994 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_transformer_encoder_layer.py +++ b/python/paddle/fluid/tests/unittests/test_fused_transformer_encoder_layer.py @@ -21,7 +21,6 @@ import unittest class TestFusedTransformerEncoderLayer(unittest.TestCase): - def setActivation(self): self.activation = 'gelu' @@ -61,8 +60,8 @@ class TestFusedTransformerEncoderLayer(unittest.TestCase): def fused_weight(self, weight, num_head): a = paddle.transpose(weight, perm=[1, 0]) return paddle.reshape( - a, shape=[1, num_head, - int(a.shape[0] / num_head), a.shape[1]]) + a, shape=[1, num_head, int(a.shape[0] / num_head), a.shape[1]] + ) def fused_qkv(self, q, k, v, num_head): fq = self.fused_weight(q, num_head) @@ -75,16 +74,29 @@ class TestFusedTransformerEncoderLayer(unittest.TestCase): return default_main_program().random_seed = 42 base_encoder = TransformerEncoderLayer( - self.d_model, self.nhead, self.dim_feedforward, self.dropout_rate, - self.activation, self.attn_dropout_rate, self.act_dropout_rate, - self.pre_layer_norm) - src = np.random.rand(self.batch_size, self.query_length, - self.embed_dim).astype(self.dtype) + self.d_model, + self.nhead, + self.dim_feedforward, + self.dropout_rate, + self.activation, + self.attn_dropout_rate, + self.act_dropout_rate, + self.pre_layer_norm, + ) + src = np.random.rand( + self.batch_size, self.query_length, self.embed_dim + ).astype(self.dtype) if self.has_attn_mask: - attn_mask = np.ones((self.batch_size, self.num_heads, - self.query_length, self.key_length), - dtype=self.attn_mask_type) + attn_mask = np.ones( + ( + self.batch_size, + self.num_heads, + self.query_length, + self.key_length, + ), + dtype=self.attn_mask_type, + ) attn_mask_tensor = paddle.to_tensor(attn_mask) else: attn_mask = None @@ -92,14 +104,21 @@ class TestFusedTransformerEncoderLayer(unittest.TestCase): dout = np.random.random(src.shape).astype(self.dtype) - base_out = base_encoder(paddle.to_tensor(src, stop_gradient=False), - attn_mask_tensor) + base_out = base_encoder( + paddle.to_tensor(src, stop_gradient=False), attn_mask_tensor + ) paddle.autograd.backward([base_out], [paddle.to_tensor(dout)], True) fused_encoder = FusedTransformerEncoderLayer( - self.d_model, self.nhead, self.dim_feedforward, self.dropout_rate, - self.activation, self.attn_dropout_rate, self.act_dropout_rate, - self.pre_layer_norm) + self.d_model, + self.nhead, + self.dim_feedforward, + self.dropout_rate, + self.activation, + self.attn_dropout_rate, + self.act_dropout_rate, + self.pre_layer_norm, + ) fused_encoder.ffn._linear1_weight.set_value(base_encoder.linear1.weight) fused_encoder.ffn._linear1_bias.set_value(base_encoder.linear1.bias) @@ -113,17 +132,22 @@ class TestFusedTransformerEncoderLayer(unittest.TestCase): fused_encoder.ffn._ln2_bias.set_value(base_encoder.norm2.bias) fused_encoder.fused_attn.linear_weight.set_value( - base_encoder.self_attn.out_proj.weight) + base_encoder.self_attn.out_proj.weight + ) fused_encoder.fused_attn.linear_bias.set_value( - base_encoder.self_attn.out_proj.bias) + base_encoder.self_attn.out_proj.bias + ) if self.pre_layer_norm: fused_encoder.fused_attn.pre_ln_scale.set_value( - base_encoder.norm1.weight) + base_encoder.norm1.weight + ) fused_encoder.fused_attn.pre_ln_bias.set_value( - base_encoder.norm1.bias) + base_encoder.norm1.bias + ) else: fused_encoder.fused_attn.ln_scale.set_value( - base_encoder.norm1.weight) + base_encoder.norm1.weight + ) fused_encoder.fused_attn.ln_bias.set_value(base_encoder.norm1.bias) q = base_encoder.self_attn.q_proj.weight @@ -138,59 +162,74 @@ class TestFusedTransformerEncoderLayer(unittest.TestCase): tmp = paddle.concat(x=[q_bias, k_bias, v_bias], axis=0) qkv_bias = paddle.reshape( tmp, - shape=[3, self.num_heads, - int(tmp.shape[0] / 3 / self.num_heads)]) + shape=[3, self.num_heads, int(tmp.shape[0] / 3 / self.num_heads)], + ) fused_encoder.fused_attn.qkv_bias.set_value(qkv_bias) - fused_out = fused_encoder(paddle.to_tensor(src, stop_gradient=False), - attn_mask_tensor) + fused_out = fused_encoder( + paddle.to_tensor(src, stop_gradient=False), attn_mask_tensor + ) paddle.autograd.backward([fused_out], [paddle.to_tensor(dout)], True) correct_ffn_str = 'd_model={}, dim_feedforward={}, dropout_rate={}, epsilon={}, activation={}, act_dropout_rate={}, normalize_before={}, dtype={}'.format( - self.d_model, self.dim_feedforward, self.dropout_rate, - fused_encoder.ffn._epsilon, self.activation, self.dropout_rate, - self.pre_layer_norm, self.dtype) + self.d_model, + self.dim_feedforward, + self.dropout_rate, + fused_encoder.ffn._epsilon, + self.activation, + self.dropout_rate, + self.pre_layer_norm, + self.dtype, + ) self.assertTrue(fused_encoder.ffn.extra_repr(), correct_ffn_str) correct_attn_str = 'embed_dim={}, num_heads={}, dropout_rate={}, attn_dropout_rate={}, epsilon={}, kdim={}, vdim={}, normalize_before={}, need_weights={}, dtype={}'.format( - self.embed_dim, self.num_heads, self.dropout_rate, - self.dropout_rate, fused_encoder.fused_attn._epsilon, None, None, - self.pre_layer_norm, False, self.dtype) + self.embed_dim, + self.num_heads, + self.dropout_rate, + self.dropout_rate, + fused_encoder.fused_attn._epsilon, + None, + None, + self.pre_layer_norm, + False, + self.dtype, + ) self.assertTrue(fused_encoder.fused_attn.extra_repr(), correct_attn_str) - np.testing.assert_allclose(fused_out.numpy(), - base_out.numpy(), - rtol=self.rtol, - atol=self.atol) - np.testing.assert_allclose(fused_out.grad.numpy(), - base_out.grad.numpy(), - rtol=self.rtol, - atol=self.atol) + np.testing.assert_allclose( + fused_out.numpy(), base_out.numpy(), rtol=self.rtol, atol=self.atol + ) + np.testing.assert_allclose( + fused_out.grad.numpy(), + base_out.grad.numpy(), + rtol=self.rtol, + atol=self.atol, + ) class TestFusedTransformerEncoderLayerAct(TestFusedTransformerEncoderLayer): - def setActivation(self): self.activation = 'relu' class TestFusedTransformerEncoderLayerPreLayerNorm( - TestFusedTransformerEncoderLayer): - + TestFusedTransformerEncoderLayer +): def setPreLayerNorm(self): self.pre_layer_norm = True class TestFusedTransformerEncoderLayerAttnMaskIsNone( - TestFusedTransformerEncoderLayer): - + TestFusedTransformerEncoderLayer +): def setAttnMask(self): self.has_attn_mask = False class TestFusedTransformerEncoderLayerPreLnTrueAttnMaskIsNone( - TestFusedTransformerEncoderLayer): - + TestFusedTransformerEncoderLayer +): def setPreLayerNorm(self): self.pre_layer_norm = True diff --git a/python/paddle/fluid/tests/unittests/test_fused_transformer_with_amp_decorator.py b/python/paddle/fluid/tests/unittests/test_fused_transformer_with_amp_decorator.py index f0173d9632fffbccb935bc9bc8cf43e7436c3ea6..212db2406aaa8d02997073dc8af1807f5ad54eec 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_transformer_with_amp_decorator.py +++ b/python/paddle/fluid/tests/unittests/test_fused_transformer_with_amp_decorator.py @@ -14,12 +14,14 @@ import paddle import paddle.nn as nn -from paddle.incubate.nn.layer.fused_transformer import FusedMultiHeadAttention, FusedFeedForward +from paddle.incubate.nn.layer.fused_transformer import ( + FusedMultiHeadAttention, + FusedFeedForward, +) import unittest class PreModel(nn.Layer): - def __init__(self): super(PreModel, self).__init__() self.attn = FusedMultiHeadAttention( @@ -27,9 +29,9 @@ class PreModel(nn.Layer): num_heads=16, normalize_before=False, ) - self.ffn = FusedFeedForward(d_model=1024, - dim_feedforward=4096, - normalize_before=False) + self.ffn = FusedFeedForward( + d_model=1024, dim_feedforward=4096, normalize_before=False + ) def forward(self, x): x = self.attn(x) @@ -37,7 +39,6 @@ class PreModel(nn.Layer): class PostModel(nn.Layer): - def __init__(self): super(PostModel, self).__init__() self.attn = FusedMultiHeadAttention( @@ -45,9 +46,9 @@ class PostModel(nn.Layer): num_heads=16, normalize_before=True, ) - self.ffn = FusedFeedForward(d_model=1024, - dim_feedforward=4096, - normalize_before=True) + self.ffn = FusedFeedForward( + d_model=1024, dim_feedforward=4096, normalize_before=True + ) def forward(self, x): x = self.attn(x) @@ -55,19 +56,18 @@ class PostModel(nn.Layer): class TestFusedTransformerWithAmpDecorator(unittest.TestCase): - def get_model(self): self.pre_model = PreModel() self.post_model = PostModel() def test_run(self): self.get_model() - pre_model = paddle.amp.decorate(models=self.pre_model, - level='O2', - save_dtype='float32') - post_model = paddle.amp.decorate(models=self.post_model, - level='O2', - save_dtype='float32') + pre_model = paddle.amp.decorate( + models=self.pre_model, level='O2', save_dtype='float32' + ) + post_model = paddle.amp.decorate( + models=self.post_model, level='O2', save_dtype='float32' + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_fusion_gru_op.py b/python/paddle/fluid/tests/unittests/test_fusion_gru_op.py index e2aa2fc901e0674120152907535ab17565237ead..68777ca256757f3757a50a935ec1ee8b4a2628c4 100644 --- a/python/paddle/fluid/tests/unittests/test_fusion_gru_op.py +++ b/python/paddle/fluid/tests/unittests/test_fusion_gru_op.py @@ -20,29 +20,31 @@ from paddle.fluid.tests.unittests.test_fusion_lstm_op import fc, ACTIVATION def fusion_gru( - x, # T x M - lod, # 1 x N - h0, # N x D - wx, # M x 3D - wh, # D x 3D - bias, # 1 x 3D + x, # T x M + lod, # 1 x N + h0, # N x D + wx, # M x 3D + wh, # D x 3D + bias, # 1 x 3D + is_reverse, + origin_mode, + act_state, + act_gate, +): + return gru( + fc(x, wx, bias), + lod, + h0, + wh, + np.zeros((1, wh.shape[1]), dtype='float32'), is_reverse, - origin_mode, act_state, - act_gate): - return gru(fc(x, wx, bias), - lod, - h0, - wh, - np.zeros((1, wh.shape[1]), dtype='float32'), - is_reverse, - act_state, - act_gate, - origin_mode=origin_mode) + act_gate, + origin_mode=origin_mode, + ) class TestFusionGRUOp(OpTest): - def set_confs(self): pass @@ -66,17 +68,29 @@ class TestFusionGRUOp(OpTest): x = np.random.rand(T, self.M).astype('float32') wx = np.random.rand(self.M, 3 * self.D).astype('float32') wh = np.random.rand(self.D, 3 * self.D).astype('float32') - bias = np.random.rand( - 1, 3 * self.D).astype('float32') if self.with_bias else np.zeros( - (1, 3 * self.D), dtype='float32') - h0 = np.random.rand( - N, self.D).astype('float32') if self.with_h0 else np.zeros( - (N, self.D), dtype='float32') - - _, _, _, hidden = fusion_gru(x, self.lod, h0, wx, wh, bias, - self.is_reverse, self.origin_mode, - ACTIVATION[self.act_state], - ACTIVATION[self.act_gate]) + bias = ( + np.random.rand(1, 3 * self.D).astype('float32') + if self.with_bias + else np.zeros((1, 3 * self.D), dtype='float32') + ) + h0 = ( + np.random.rand(N, self.D).astype('float32') + if self.with_h0 + else np.zeros((N, self.D), dtype='float32') + ) + + _, _, _, hidden = fusion_gru( + x, + self.lod, + h0, + wx, + wh, + bias, + self.is_reverse, + self.origin_mode, + ACTIVATION[self.act_state], + ACTIVATION[self.act_gate], + ) self.inputs = {'X': (x, self.lod), 'WeightX': wx, 'WeightH': wh} @@ -93,7 +107,7 @@ class TestFusionGRUOp(OpTest): 'gate_activation': self.act_gate, 'is_reverse': self.is_reverse, 'origin_mode': self.origin_mode, - 'use_mkldnn': self.use_mkldnn + 'use_mkldnn': self.use_mkldnn, } def test_check_output(self): @@ -103,46 +117,39 @@ class TestFusionGRUOp(OpTest): class TestFusionGRUOpNoInitial(TestFusionGRUOp): - def set_confs(self): self.with_h0 = False class TestFusionGRUOpNoBias(TestFusionGRUOp): - def set_confs(self): self.with_bias = False class TestFusionGRUOpReverse(TestFusionGRUOp): - def set_confs(self): self.is_reverse = True class TestFusionGRUOpMD1(TestFusionGRUOp): - def set_confs(self): self.M = 36 self.D = 8 class TestFusionGRUOpMD2(TestFusionGRUOp): - def set_confs(self): self.M = 8 self.D = 8 class TestFusionGRUOpMD3(TestFusionGRUOp): - def set_confs(self): self.M = 17 self.D = 15 class TestFusionGRUOpBS1(TestFusionGRUOp): - def set_confs(self): self.lod = [[3]] self.D = 16 @@ -150,5 +157,6 @@ class TestFusionGRUOpBS1(TestFusionGRUOp): if __name__ == "__main__": from paddle import enable_static + enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_fusion_lstm_op.py b/python/paddle/fluid/tests/unittests/test_fusion_lstm_op.py index 9b1d9dfffc6aaeca0b8dc49c6c388f20b8134395..fcde3d2f5db6643bc7612b438b0d4e907b1d2022 100644 --- a/python/paddle/fluid/tests/unittests/test_fusion_lstm_op.py +++ b/python/paddle/fluid/tests/unittests/test_fusion_lstm_op.py @@ -23,25 +23,36 @@ def fc(x, w, b): def fusion_lstm( - x, # T x M - lod, # 1 x N - wx=None, # M x 4D - bx=None, # 1 x 4D - h0=None, # N x D - c0=None, # N x D - w_h=None, # D x 4D - w_b=None, # 1 x 4D - w_c=None, # 1 x 3D - is_reverse=False, - act_gate=None, - act_cell=None, - act_cand=None): - return lstm(fc(x, wx, bx), lod, h0, c0, w_h, w_b, w_c, is_reverse, act_gate, - act_cell, act_cand) + x, # T x M + lod, # 1 x N + wx=None, # M x 4D + bx=None, # 1 x 4D + h0=None, # N x D + c0=None, # N x D + w_h=None, # D x 4D + w_b=None, # 1 x 4D + w_c=None, # 1 x 3D + is_reverse=False, + act_gate=None, + act_cell=None, + act_cand=None, +): + return lstm( + fc(x, wx, bx), + lod, + h0, + c0, + w_h, + w_b, + w_c, + is_reverse, + act_gate, + act_cell, + act_cand, + ) class TestFusionLSTMOp(OpTest): - def set_conf(self): pass @@ -76,24 +87,36 @@ class TestFusionLSTMOp(OpTest): b = np.random.normal(size=(1, 7 * self.D)).astype('float32') else: b = np.random.normal(size=(1, 4 * self.D)).astype('float32') - w_b = np.copy(b[:, 0:4 * self.D]) - w_c = b[:, 4 * self.D:] if self.use_peepholes else None + w_b = np.copy(b[:, 0 : 4 * self.D]) + w_c = b[:, 4 * self.D :] if self.use_peepholes else None # this is the weight of fc wx = np.random.normal(size=(self.M, 4 * self.D)).astype('float32') # this is the bias of fc # and it should be manually added into the bias of this fusion LSTM bx = np.random.normal(size=(1, 4 * self.D)).astype('float32') - b[0, 0:4 * self.D] += bx[0, :] - h, c = fusion_lstm(x, self.lod, wx, bx, h0, c0, wh, w_b, w_c, - self.is_reverse, ACTIVATION[self.act_gate], - ACTIVATION[self.act_cell], ACTIVATION[self.act_cand]) + b[0, 0 : 4 * self.D] += bx[0, :] + h, c = fusion_lstm( + x, + self.lod, + wx, + bx, + h0, + c0, + wh, + w_b, + w_c, + self.is_reverse, + ACTIVATION[self.act_gate], + ACTIVATION[self.act_cell], + ACTIVATION[self.act_cand], + ) self.inputs = { 'X': (x, self.lod), 'WeightX': wx, 'WeightH': wh, - 'Bias': b + 'Bias': b, } if self.has_initial_state: @@ -110,7 +133,7 @@ class TestFusionLSTMOp(OpTest): 'gate_activation': self.act_gate, 'cell_activation': self.act_cell, 'candidate_activation': self.act_cand, - 'use_mkldnn': self.use_mkldnn + 'use_mkldnn': self.use_mkldnn, } def test_check_output(self): @@ -120,74 +143,63 @@ class TestFusionLSTMOp(OpTest): class TestFusionLSTMOpInit(TestFusionLSTMOp): - def set_conf(self): self.has_initial_state = True class TestFusionLSTMOpReverse(TestFusionLSTMOp): - def set_conf(self): self.is_reverse = True class TestFusionLSTMOpInitReverse(TestFusionLSTMOp): - def set_conf(self): self.has_initial_state = True self.is_reverse = True class TestFusionLSTMOpMD1(TestFusionLSTMOp): - def set_conf(self): self.M = 36 self.D = 8 class TestFusionLSTMOpMD2(TestFusionLSTMOp): - def set_conf(self): self.M = 8 self.D = 8 class TestFusionLSTMOpMD3(TestFusionLSTMOp): - def set_conf(self): self.M = 15 self.D = 3 class TestFusionLSTMOpBS1(TestFusionLSTMOp): - def set_conf(self): self.lod = [[3]] self.D = 16 class TestFusionLSTMOpPeepholes(TestFusionLSTMOp): - def set_conf(self): self.use_peepholes = True class TestFusionLSTMOpPeepholesInit(TestFusionLSTMOp): - def set_conf(self): self.use_peepholes = True self.has_initial_state = True class TestFusionLSTMOpPeepholesReverse(TestFusionLSTMOp): - def set_conf(self): self.use_peepholes = True self.is_reverse = True class TestFusionLSTMOpPeepholesInitReverse(TestFusionLSTMOp): - def set_conf(self): self.use_peepholes = True self.has_initial_state = True @@ -195,7 +207,6 @@ class TestFusionLSTMOpPeepholesInitReverse(TestFusionLSTMOp): class TestFusionLSTMOpPeepholesBS1(TestFusionLSTMOp): - def set_conf(self): self.use_peepholes = True self.lod = [[2]] @@ -204,5 +215,6 @@ class TestFusionLSTMOpPeepholesBS1(TestFusionLSTMOp): if __name__ == '__main__': from paddle import enable_static + enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_fusion_repeated_fc_relu_op.py b/python/paddle/fluid/tests/unittests/test_fusion_repeated_fc_relu_op.py index c1b117dfe57d4d85dbb306effff4a40b5fca8d2f..c58570d9de9927282bb1a2149b9f82958e056a90 100644 --- a/python/paddle/fluid/tests/unittests/test_fusion_repeated_fc_relu_op.py +++ b/python/paddle/fluid/tests/unittests/test_fusion_repeated_fc_relu_op.py @@ -19,7 +19,6 @@ from test_fc_op import fc_refer, MatrixGenerate class TestFusionRepeatedFCReluOp(OpTest): - def setUp(self): self.bs = 3 self.ic = 9 @@ -28,7 +27,7 @@ class TestFusionRepeatedFCReluOp(OpTest): self.set_conf() self.op_type = 'fusion_repeated_fc_relu' sz = len(self.oc) - ics = [self.ic] + self.oc[0:sz - 1] + ics = [self.ic] + self.oc[0 : sz - 1] assert len(ics) == len(self.oc) weights = [] biases = [] @@ -38,23 +37,32 @@ class TestFusionRepeatedFCReluOp(OpTest): matrix = MatrixGenerate(self.bs, ics[i], self.oc[i], 1, 1) inp = np.reshape(matrix.input, [self.bs, ics[i]]) weights.append( - ('W_{0}'.format(i), np.reshape(matrix.weights, - [ics[i], self.oc[i]]))) + ( + 'W_{0}'.format(i), + np.reshape(matrix.weights, [ics[i], self.oc[i]]), + ) + ) biases.append(('B_{0}'.format(i), matrix.bias)) outs.append( - np.reshape(np.maximum(fc_refer(matrix, True), 0), - [self.bs, self.oc[i]])) + np.reshape( + np.maximum(fc_refer(matrix, True), 0), [self.bs, self.oc[i]] + ) + ) for i in range(sz - 1): matrix = MatrixGenerate(self.bs, ics[i + 1], self.oc[i + 1], 1, 1) matrix.input = np.reshape(outs[i], [self.bs, ics[i + 1], 1, 1]) out = fc_refer(matrix, True) - weights.append(('W_{0}'.format(i + 1), - np.reshape(matrix.weights, - [ics[i + 1], self.oc[i + 1]]))) + weights.append( + ( + 'W_{0}'.format(i + 1), + np.reshape(matrix.weights, [ics[i + 1], self.oc[i + 1]]), + ) + ) biases.append(('B_{0}'.format(i + 1), matrix.bias)) outs.append( - np.reshape(np.maximum(out, 0), [self.bs, self.oc[i + 1]])) + np.reshape(np.maximum(out, 0), [self.bs, self.oc[i + 1]]) + ) relu_outs = [] for i in range(sz - 1): @@ -76,7 +84,6 @@ class TestFusionRepeatedFCReluOp(OpTest): class TestFusionRepeatedFCReluOpBS1(TestFusionRepeatedFCReluOp): - def set_conf(self): self.bs = 1 self.oc = [4, 2, 7, 5, 512, 1024] diff --git a/python/paddle/fluid/tests/unittests/test_fusion_seqconv_eltadd_relu_op.py b/python/paddle/fluid/tests/unittests/test_fusion_seqconv_eltadd_relu_op.py index cceced75adc8ee5e88e75aee5d8ad41f81062e96..91ed3f564429e1505a8d47ddb602782dfca415b1 100644 --- a/python/paddle/fluid/tests/unittests/test_fusion_seqconv_eltadd_relu_op.py +++ b/python/paddle/fluid/tests/unittests/test_fusion_seqconv_eltadd_relu_op.py @@ -19,7 +19,6 @@ from sequence.test_sequence_conv import seqconv class TestSeqConvEltAddRelu(OpTest): - def set_conf(self): pass @@ -38,8 +37,8 @@ class TestSeqConvEltAddRelu(OpTest): T = sum(self.lod[0]) x = np.random.uniform(-1, 1, [T, self.in_fea_size]).astype('float32') w = np.random.uniform( - -1, 1, [self.in_fea_size * self.context_length, self.out_fea_size - ]).astype('float32') + -1, 1, [self.in_fea_size * self.context_length, self.out_fea_size] + ).astype('float32') b = np.random.uniform(-2, 1, [1, self.out_fea_size]).astype('float32') out = seqconv(x, self.lod, w, self.context_length, self.context_start) out = np.maximum(out + b, 0) @@ -48,7 +47,7 @@ class TestSeqConvEltAddRelu(OpTest): self.attrs = { 'contextStart': self.context_start, 'contextLength': self.context_length, - 'contextStride': self.context_stride + 'contextStride': self.context_stride, } self.outputs = {'Out': out} @@ -57,19 +56,16 @@ class TestSeqConvEltAddRelu(OpTest): class TestSeqConvEltAddReluBS1(TestSeqConvEltAddRelu): - def set_conf(self): self.lod = [[10]] class TestSeqConvEltAddReluBS1Case2(TestSeqConvEltAddRelu): - def set_conf(self): self.lod = [[2]] class TestSeqConvEltAddReluCase1(TestSeqConvEltAddRelu): - def set_conf(self): self.lod = [[3, 5, 1, 6]] self.context_length = 3 @@ -77,7 +73,6 @@ class TestSeqConvEltAddReluCase1(TestSeqConvEltAddRelu): class TestSeqConvEltAddReluCase2(TestSeqConvEltAddRelu): - def set_conf(self): self.lod = [[10, 1, 2, 4, 1, 5, 6]] self.in_fea_size = 2 @@ -86,7 +81,6 @@ class TestSeqConvEltAddReluCase2(TestSeqConvEltAddRelu): class TestSeqConvEltAddReluCase3(TestSeqConvEltAddRelu): - def set_conf(self): self.lod = [[10, 1, 2, 4, 1, 5, 6]] self.context_length = 5 diff --git a/python/paddle/fluid/tests/unittests/test_fusion_seqexpand_concat_fc_op.py b/python/paddle/fluid/tests/unittests/test_fusion_seqexpand_concat_fc_op.py index be3065ec86a2366755c28863614b936a1a7c65af..10a8623ec9c1d3786e11d2077280018561eb8b6a 100644 --- a/python/paddle/fluid/tests/unittests/test_fusion_seqexpand_concat_fc_op.py +++ b/python/paddle/fluid/tests/unittests/test_fusion_seqexpand_concat_fc_op.py @@ -45,7 +45,6 @@ def fusion_seqexpand_concat_fc(xs, lod, w, b, fc_act): class TestFusionSeqExpandConcatFCOp(OpTest): - def set_conf(self): pass @@ -65,19 +64,24 @@ class TestFusionSeqExpandConcatFCOp(OpTest): x0 = np.random.normal(size=(T, self.inputs_M[0])).astype('float32') xs = [x0] for i in range(num_inputs - 1): - xi = np.random.normal(size=(bs, - self.inputs_M[i + 1])).astype('float32') + xi = np.random.normal(size=(bs, self.inputs_M[i + 1])).astype( + 'float32' + ) xs.append(xi) # fc weight and bias - w = np.random.normal(size=(sum(self.inputs_M), - self.D)).astype('float32') - b = np.random.normal( - size=(1, self.D)).astype('float32') if self.with_bias else np.zeros( - (1, self.D)).astype('float32') - - out = fusion_seqexpand_concat_fc(xs, self.lod, w, b, - ACTIVATION[self.fc_act]) + w = np.random.normal(size=(sum(self.inputs_M), self.D)).astype( + 'float32' + ) + b = ( + np.random.normal(size=(1, self.D)).astype('float32') + if self.with_bias + else np.zeros((1, self.D)).astype('float32') + ) + + out = fusion_seqexpand_concat_fc( + xs, self.lod, w, b, ACTIVATION[self.fc_act] + ) self.inputs = {'X': [('x0', (x0, self.lod))], 'FCWeight': w} normal_lod = [[1] * bs] @@ -95,47 +99,40 @@ class TestFusionSeqExpandConcatFCOp(OpTest): class TestFusionSECFCOpNonBias(TestFusionSeqExpandConcatFCOp): - def set_conf(self): self.with_bias = False class TestFusionSECFCOpNonAct(TestFusionSeqExpandConcatFCOp): - def set_conf(self): self.fc_act = 'identity' class TestFusionSECFCOpMD1(TestFusionSeqExpandConcatFCOp): - def set_conf(self): self.inputs_M = [3, 4, 2, 1, 5] self.D = 8 class TestFusionSECFCOpMD2(TestFusionSeqExpandConcatFCOp): - def set_conf(self): self.lod = [[5, 6]] self.inputs_M = [1, 1] class TestFusionSECFCOpBS1_1(TestFusionSeqExpandConcatFCOp): - def set_conf(self): self.lod = [[1]] self.inputs_M = [3, 4, 2] class TestFusionSECFCOpBS1_2(TestFusionSeqExpandConcatFCOp): - def set_conf(self): self.lod = [[1]] self.inputs_M = [3, 4] class TestFusionSECFCOpBS1_3(TestFusionSeqExpandConcatFCOp): - def set_conf(self): self.lod = [[5]] self.inputs_M = [6, 3] diff --git a/python/paddle/fluid/tests/unittests/test_fusion_seqpool_concat_op.py b/python/paddle/fluid/tests/unittests/test_fusion_seqpool_concat_op.py index c634d8d2b58f6c27f87bf8ca85723e045dad6f24..ceb69451c9bf8e56eb04e22fffadb1c8c408120f 100644 --- a/python/paddle/fluid/tests/unittests/test_fusion_seqpool_concat_op.py +++ b/python/paddle/fluid/tests/unittests/test_fusion_seqpool_concat_op.py @@ -16,11 +16,14 @@ import unittest import numpy as np from op_test import OpTest from test_reorder_lod_tensor import convert_to_offset -from sequence.test_sequence_pool import compute_seqpool_sum, compute_seqpool_avg, compute_seqpool_sqrt +from sequence.test_sequence_pool import ( + compute_seqpool_sum, + compute_seqpool_avg, + compute_seqpool_sqrt, +) class TestFusionSeqPoolConcatOp(OpTest): - def setUp(self): self.w = 11 self.lods = [[[2, 3, 5]], [[1, 5, 2]]] @@ -34,8 +37,9 @@ class TestFusionSeqPoolConcatOp(OpTest): i = 0 for lod in self.lods: assert bs == len(lod[0]), 'All lod size should be equal' - x = np.random.uniform(0.1, 1, - [sum(lod[0]), self.w]).astype('float32') + x = np.random.uniform(0.1, 1, [sum(lod[0]), self.w]).astype( + 'float32' + ) offset = convert_to_offset(lod) out = np.zeros((bs, self.w)).astype('float32') if self.pooltype == "SUM": @@ -68,26 +72,22 @@ class TestFusionSeqPoolConcatOp(OpTest): class TestFusionSeqPoolConcatOpCase1(TestFusionSeqPoolConcatOp): - def set_conf(self): self.lods = [[[1]]] class TestFusionSeqPoolConcatOpCase2(TestFusionSeqPoolConcatOp): - def set_conf(self): self.lods = [[[1]], [[1]], [[1]]] class TestFusionSeqPoolConcatOpCase3(TestFusionSeqPoolConcatOp): - def set_conf(self): self.lods = [[[1, 3, 4, 6]]] self.w = 10 class TestFusionSeqPoolConcatOpCase4(TestFusionSeqPoolConcatOp): - def set_conf(self): self.lods = [[[2, 13, 4]], [[1, 1, 1]], [[5, 3, 1]], [[9, 10, 3]]] self.w = 3 @@ -95,14 +95,11 @@ class TestFusionSeqPoolConcatOpCase4(TestFusionSeqPoolConcatOp): ## test avg pool and sqrt def create_test_avg_sqrt_class(parent): - class TestSeqPoolAvgCase(parent): - def set_pooltype(self): self.pooltype = "AVERAGE" class TestSeqPoolSqrtCase(parent): - def set_pooltype(self): self.pooltype = "SQRT" diff --git a/python/paddle/fluid/tests/unittests/test_fusion_seqpool_cvm_concat_op.py b/python/paddle/fluid/tests/unittests/test_fusion_seqpool_cvm_concat_op.py index be298c155357a6efea5c10a17cec6c4d57c1373d..9d9324cefe1c4efb97e2454813c672ba53dcfe0d 100644 --- a/python/paddle/fluid/tests/unittests/test_fusion_seqpool_cvm_concat_op.py +++ b/python/paddle/fluid/tests/unittests/test_fusion_seqpool_cvm_concat_op.py @@ -16,12 +16,15 @@ import unittest import numpy as np from op_test import OpTest from test_reorder_lod_tensor import convert_to_offset -from sequence.test_sequence_pool import compute_seqpool_sum, compute_seqpool_avg, compute_seqpool_sqrt +from sequence.test_sequence_pool import ( + compute_seqpool_sum, + compute_seqpool_avg, + compute_seqpool_sqrt, +) from test_cvm_op import cvm_compute class TestFusionSeqPoolCVMConcatOp(OpTest): - def setUp(self): self.w = 11 self.use_cvm = True @@ -38,8 +41,9 @@ class TestFusionSeqPoolCVMConcatOp(OpTest): i = 0 for lod in self.lods: assert bs == len(lod[0]), 'All lod size should be equal' - x = np.random.uniform(0.1, 1, - [sum(lod[0]), self.w]).astype('float32') + x = np.random.uniform(0.1, 1, [sum(lod[0]), self.w]).astype( + 'float32' + ) offset = convert_to_offset(lod) out = np.zeros((bs, self.w)).astype('float32') if self.pooltype == "SUM": @@ -75,26 +79,22 @@ class TestFusionSeqPoolCVMConcatOp(OpTest): class TestFusionSeqPoolCVMConcatOpCase1(TestFusionSeqPoolCVMConcatOp): - def set_conf(self): self.lods = [[[1]]] class TestFusionSeqPoolCVMConcatOpCase2(TestFusionSeqPoolCVMConcatOp): - def set_conf(self): self.lods = [[[1]], [[1]], [[1]]] class TestFusionSeqPoolCVMConcatOpCase3(TestFusionSeqPoolCVMConcatOp): - def set_conf(self): self.lods = [[[1, 3, 4, 6]]] self.w = 10 class TestFusionSeqPoolCVMConcatOpCase4(TestFusionSeqPoolCVMConcatOp): - def set_conf(self): self.lods = [[[2, 13, 4]], [[1, 1, 1]], [[5, 3, 1]], [[9, 10, 3]]] self.w = 3 @@ -102,14 +102,11 @@ class TestFusionSeqPoolCVMConcatOpCase4(TestFusionSeqPoolCVMConcatOp): ## test avg pool and sqrt def create_test_avg_sqrt_class(parent): - class TestSeqPoolAvgCase(parent): - def set_pooltype(self): self.pooltype = "AVERAGE" class TestSeqPoolSqrtCase(parent): - def set_pooltype(self): self.pooltype = "SQRT" diff --git a/python/paddle/fluid/tests/unittests/test_fusion_squared_mat_sub_op.py b/python/paddle/fluid/tests/unittests/test_fusion_squared_mat_sub_op.py index a5dd503e43afd970c05cf1b59f3c0cae13ecfceb..2a6098a8a6a066ed35c76ce5a1e694969f887e3d 100644 --- a/python/paddle/fluid/tests/unittests/test_fusion_squared_mat_sub_op.py +++ b/python/paddle/fluid/tests/unittests/test_fusion_squared_mat_sub_op.py @@ -18,7 +18,6 @@ from op_test import OpTest class TestFusionSquaredMatSubOp(OpTest): - def setUp(self): self.op_type = 'fusion_squared_mat_sub' self.m = 11 @@ -31,8 +30,8 @@ class TestFusionSquaredMatSubOp(OpTest): self.inputs = {'X': matx, 'Y': maty} self.outputs = { - 'Out': - (np.dot(matx, maty)**2 - np.dot(matx**2, maty**2)) * self.scalar + 'Out': (np.dot(matx, maty) ** 2 - np.dot(matx**2, maty**2)) + * self.scalar } self.attrs = { 'scalar': self.scalar, @@ -46,7 +45,6 @@ class TestFusionSquaredMatSubOp(OpTest): class TestFusionSquaredMatSubOpCase1(TestFusionSquaredMatSubOp): - def set_conf(self): self.scalar = -0.3 diff --git a/python/paddle/fluid/tests/unittests/test_fusion_transpose_flatten_concat_op.py b/python/paddle/fluid/tests/unittests/test_fusion_transpose_flatten_concat_op.py index f60357fdfa91bb438a1020ae5b3c12fd80f31de7..768bbec552dbfe5a6c3288d0cdbe986b51206aed 100644 --- a/python/paddle/fluid/tests/unittests/test_fusion_transpose_flatten_concat_op.py +++ b/python/paddle/fluid/tests/unittests/test_fusion_transpose_flatten_concat_op.py @@ -18,10 +18,10 @@ from op_test import OpTest import paddle.fluid.core as core -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFusionTransposeFlattenConcationOp(OpTest): - def setUp(self): self.init_test_case() self.op_type = "fusion_transpose_flatten_concat" @@ -34,8 +34,10 @@ class TestFusionTransposeFlattenConcationOp(OpTest): ins.append(("x%d" % i, a)) b = a.transpose(self.trans_axis) - flat_shape = (np.prod(b.shape[:self.flatten_axis]), - np.prod(b.shape[self.flatten_axis:])) + flat_shape = ( + np.prod(b.shape[: self.flatten_axis]), + np.prod(b.shape[self.flatten_axis :]), + ) c = b.reshape(flat_shape) flats.append(c) out = np.concatenate(flats, axis=self.concat_axis) @@ -44,7 +46,7 @@ class TestFusionTransposeFlattenConcationOp(OpTest): self.attrs = { 'trans_axis': list(self.trans_axis), 'flatten_axis': self.flatten_axis, - 'concat_axis': self.concat_axis + 'concat_axis': self.concat_axis, } self.outputs = {'Out': out} @@ -59,10 +61,10 @@ class TestFusionTransposeFlattenConcationOp(OpTest): self.concat_axis = 1 -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCase1(TestFusionTransposeFlattenConcationOp): - def init_test_case(self): self.shapes = [(3, 4, 18, 17), (3, 8, 18, 7), (6, 12, 9, 5)] self.trans_axis = (0, 2, 3, 1) @@ -70,10 +72,10 @@ class TestCase1(TestFusionTransposeFlattenConcationOp): self.concat_axis = 1 -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCase2(TestFusionTransposeFlattenConcationOp): - def init_test_case(self): self.shapes = [(3, 8, 20, 17), (3, 8, 19, 17), (3, 8, 40, 17)] self.trans_axis = (0, 2, 3, 1) @@ -81,10 +83,10 @@ class TestCase2(TestFusionTransposeFlattenConcationOp): self.concat_axis = 0 -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCase3(TestFusionTransposeFlattenConcationOp): - def init_test_case(self): self.shapes = [(3, 8, 20, 17), (3, 8, 19, 17), (3, 8, 40, 17)] self.trans_axis = (0, 3, 2, 1) @@ -92,10 +94,10 @@ class TestCase3(TestFusionTransposeFlattenConcationOp): self.concat_axis = 1 -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCase4(TestFusionTransposeFlattenConcationOp): - def init_test_case(self): self.shapes = [(3, 8, 9, 17), (8, 3, 9, 17), (4, 6, 9, 17)] self.trans_axis = (0, 2, 1, 3) @@ -103,10 +105,10 @@ class TestCase4(TestFusionTransposeFlattenConcationOp): self.concat_axis = 1 -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCase5(TestFusionTransposeFlattenConcationOp): - def init_test_case(self): self.shapes = [(3, 8, 9, 17, 2), (3, 8, 2, 17, 9), (3, 17, 9, 8, 2)] self.trans_axis = (0, 2, 1, 4, 3) diff --git a/python/paddle/fluid/tests/unittests/test_gast_with_compatibility.py b/python/paddle/fluid/tests/unittests/test_gast_with_compatibility.py index 17d5fb11ea94fdaed8ef1d83a599d35c91609c1c..b098dcf4e16486473177ed1836d12b4371e98c14 100644 --- a/python/paddle/fluid/tests/unittests/test_gast_with_compatibility.py +++ b/python/paddle/fluid/tests/unittests/test_gast_with_compatibility.py @@ -20,7 +20,6 @@ import unittest class GastNodeTransformer(gast.NodeTransformer): - def __init__(self, root): self.root = root @@ -122,7 +121,6 @@ def code_ast(source): class TestPythonCompatibility(unittest.TestCase): - def _check_compatibility(self, source, target): source_dump = code_gast_ast(source) target_dump = code_ast(target) diff --git a/python/paddle/fluid/tests/unittests/test_gather_nd_op.py b/python/paddle/fluid/tests/unittests/test_gather_nd_op.py index 5154d7feb77586a7d9bd89f375acd3718cdb6935..176012e96a2262504a314881037c5c38106d38bb 100644 --- a/python/paddle/fluid/tests/unittests/test_gather_nd_op.py +++ b/python/paddle/fluid/tests/unittests/test_gather_nd_op.py @@ -39,7 +39,6 @@ class TestGatherNdOpWithEmptyIndex(OpTest): class TestGatherNdOpWithIndex1(OpTest): - def setUp(self): self.op_type = "gather_nd" self.python_api = paddle.gather_nd @@ -55,7 +54,7 @@ class TestGatherNdOpWithIndex1(OpTest): class TestGatherNdOpWithLowIndex(OpTest): - #Index has low rank, X has high rank + # Index has low rank, X has high rank def setUp(self): self.op_type = "gather_nd" @@ -65,7 +64,9 @@ class TestGatherNdOpWithLowIndex(OpTest): self.inputs = {'X': xnp, 'Index': index} - self.outputs = {'Out': xnp[tuple(index.T)]} #[[14, 25, 1], [76, 22, 3]] + self.outputs = { + 'Out': xnp[tuple(index.T)] + } # [[14, 25, 1], [76, 22, 3]] def test_check_output(self): self.check_output(check_eager=False) @@ -75,7 +76,7 @@ class TestGatherNdOpWithLowIndex(OpTest): class TestGatherNdOpIndex1(OpTest): - #Index has low rank, X has high rank + # Index has low rank, X has high rank def setUp(self): self.op_type = "gather_nd" @@ -95,7 +96,7 @@ class TestGatherNdOpIndex1(OpTest): class TestGatherNdOpWithSameIndexAsX(OpTest): - #Index has same rank as X's rank + # Index has same rank as X's rank def setUp(self): self.op_type = "gather_nd" @@ -104,7 +105,7 @@ class TestGatherNdOpWithSameIndexAsX(OpTest): index = np.array([[1, 1], [2, 1]]).astype("int64") self.inputs = {'X': xnp, 'Index': index} - self.outputs = {'Out': xnp[tuple(index.T)]} #[25, 22] + self.outputs = {'Out': xnp[tuple(index.T)]} # [25, 22] def test_check_output(self): self.check_output(check_eager=False) @@ -114,7 +115,7 @@ class TestGatherNdOpWithSameIndexAsX(OpTest): class TestGatherNdOpWithHighRankSame(OpTest): - #Both Index and X have high rank, and Rank(Index) = Rank(X) + # Both Index and X have high rank, and Rank(Index) = Rank(X) def setUp(self): self.op_type = "gather_nd" @@ -134,7 +135,7 @@ class TestGatherNdOpWithHighRankSame(OpTest): class TestGatherNdOpWithHighRankDiff(OpTest): - #Both Index and X have high rank, and Rank(Index) < Rank(X) + # Both Index and X have high rank, and Rank(Index) < Rank(X) def setUp(self): self.op_type = "gather_nd" @@ -154,13 +155,12 @@ class TestGatherNdOpWithHighRankDiff(OpTest): self.check_grad(['X'], 'Out', check_eager=False) -#Test Python API +# Test Python API class TestGatherNdOpAPI(unittest.TestCase): - def test_case1(self): - x1 = fluid.layers.data(name='x1', - shape=[30, 40, 50, 60], - dtype='float32') + x1 = fluid.layers.data( + name='x1', shape=[30, 40, 50, 60], dtype='float32' + ) index1 = fluid.layers.data(name='index1', shape=[2, 4], dtype='int32') output1 = fluid.layers.gather_nd(x1, index1) @@ -175,23 +175,20 @@ class TestGatherNdOpAPI(unittest.TestCase): output3 = fluid.layers.gather_nd(x3, index3, name="gather_nd_layer") -#Test Raise Index Error +# Test Raise Index Error class TestGatherNdOpRaise(unittest.TestCase): - def test_check_raise(self): - def check_raise_is_test(): try: - x = fluid.layers.data(name='x', - shape=[3, 4, 5], - dtype='float32') - index = fluid.layers.data(name='index', - shape=[2, 10], - dtype='int32') + x = fluid.layers.data( + name='x', shape=[3, 4, 5], dtype='float32' + ) + index = fluid.layers.data( + name='index', shape=[2, 10], dtype='int32' + ) output = fluid.layers.gather_nd(x, index) except Exception as e: - t = \ - "Input(Index).shape[-1] should be no greater than Input(X).rank" + t = "Input(Index).shape[-1] should be no greater than Input(X).rank" if t in str(e): raise IndexError @@ -199,17 +196,17 @@ class TestGatherNdOpRaise(unittest.TestCase): class TestGatherNdError(unittest.TestCase): - def test_error(self): - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): shape = [8, 9, 6] x = paddle.fluid.data(shape=shape, dtype='float32', name='x') index = paddle.fluid.data(shape=shape, dtype='bool', name='index') - index_float = paddle.fluid.data(shape=shape, - dtype='float32', - name='index_float') + index_float = paddle.fluid.data( + shape=shape, dtype='float32', name='index_float' + ) np_x = np.random.random(shape).astype('float32') np_index = np.array(np.random.randint(2, size=shape, dtype=bool)) @@ -230,7 +227,6 @@ class TestGatherNdError(unittest.TestCase): class TestGatherNdAPI2(unittest.TestCase): - def test_static(self): with fluid.program_guard(fluid.Program(), fluid.Program()): data1 = fluid.layers.data('data1', shape=[-1, 2], dtype='float64') @@ -240,11 +236,9 @@ class TestGatherNdAPI2(unittest.TestCase): exe = fluid.Executor(place) input = np.array([[1, 2], [3, 4], [5, 6]]) index_1 = np.array([[1]]) - result, = exe.run(feed={ - "data1": input, - "index": index_1 - }, - fetch_list=[out]) + (result,) = exe.run( + feed={"data1": input, "index": index_1}, fetch_list=[out] + ) expected_output = np.array([[3, 4]]) np.testing.assert_allclose(result, expected_output, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_gather_op.py b/python/paddle/fluid/tests/unittests/test_gather_op.py index 9e2dab2608173be6becbc537a0aaeafaa4c78187..c6301d3e472cd242fd82ad994b01ce7d1ea76c4f 100644 --- a/python/paddle/fluid/tests/unittests/test_gather_op.py +++ b/python/paddle/fluid/tests/unittests/test_gather_op.py @@ -29,7 +29,6 @@ def gather_numpy(x, index, axis): class TestGatherOp(OpTest): - def setUp(self): self.op_type = "gather" self.python_api = paddle.gather @@ -37,7 +36,7 @@ class TestGatherOp(OpTest): xnp = np.random.random(self.x_shape).astype(self.x_type) self.inputs = { 'X': xnp, - 'Index': np.array(self.index).astype(self.index_type) + 'Index': np.array(self.index).astype(self.index_type), } self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]} @@ -58,31 +57,28 @@ class TestGatherOp(OpTest): class TestCase1(TestGatherOp): - def config(self): """ For one dimension input """ - self.x_shape = (100) + self.x_shape = 100 self.x_type = "float64" self.index = [1, 3, 5] self.index_type = "int32" class TestCase2(TestGatherOp): - def config(self): """ For int64_t index type """ - self.x_shape = (100) + self.x_shape = 100 self.x_type = "float64" self.index = [1, 3, 5] self.index_type = "int64" class TestCase3(TestGatherOp): - def config(self): """ For other input type @@ -94,7 +90,6 @@ class TestCase3(TestGatherOp): class TestCase4(TestGatherOp): - def config(self): self.x_shape = (10, 20) self.attrs = {'overwrite': False} @@ -104,7 +99,6 @@ class TestCase4(TestGatherOp): class TestCase5(TestGatherOp): - def config(self): self.x_shape = (10, 20) self.attrs = {'overwrite': False} @@ -114,7 +108,6 @@ class TestCase5(TestGatherOp): class TestCase6(TestGatherOp): - def config(self): self.x_shape = (10, 20) self.attrs = {'overwrite': True} @@ -124,7 +117,6 @@ class TestCase6(TestGatherOp): class TestGatherBF16Op(OpTest): - def setUp(self): self.op_type = "gather" self.python_api = paddle.gather @@ -136,7 +128,7 @@ class TestGatherBF16Op(OpTest): self.inputs = { 'X': convert_float_to_uint16(xnp), 'Index': index_np, - 'Axis': axis_np + 'Axis': axis_np, } out = gather_numpy(self.inputs['X'], index_np, axis_np[0]) self.outputs = {'Out': out} @@ -159,7 +151,6 @@ class TestGatherBF16Op(OpTest): class TestGatherOp1(OpTest): - def setUp(self): self.op_type = "gather" self.python_api = paddle.gather @@ -190,7 +181,6 @@ class TestGatherOp1(OpTest): class TestGatherOp2(TestGatherOp1): - def config(self): """ For multi-dimension input @@ -204,7 +194,6 @@ class TestGatherOp2(TestGatherOp1): class TestGatherOp3(TestGatherOp1): - def config(self): """ For multi-dimension input @@ -218,7 +207,6 @@ class TestGatherOp3(TestGatherOp1): class TestGatherOp4(TestGatherOp1): - def config(self): """ For multi-dimension input @@ -233,7 +221,6 @@ class TestGatherOp4(TestGatherOp1): class API_TestGather(unittest.TestCase): - def test_out1(self): with fluid.program_guard(fluid.Program(), fluid.Program()): data1 = fluid.layers.data('data1', shape=[-1, 2], dtype='float64') @@ -243,17 +230,16 @@ class API_TestGather(unittest.TestCase): exe = fluid.Executor(place) input = np.array([[1, 2], [3, 4], [5, 6]]) index_1 = np.array([1, 2]) - result, = exe.run(feed={ - "data1": input, - "index": index_1 - }, - fetch_list=[out]) + (result,) = exe.run( + feed={"data1": input, "index": index_1}, fetch_list=[out] + ) expected_output = np.array([[3, 4], [5, 6]]) np.testing.assert_allclose(result, expected_output, rtol=1e-05) def test_out2(self): - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): x = paddle.fluid.data('x', shape=[-1, 2], dtype='float64') index = paddle.fluid.data('index', shape=[-1, 1], dtype='int32') axis = paddle.fluid.data('axis', shape=[1], dtype='int32') @@ -263,18 +249,15 @@ class API_TestGather(unittest.TestCase): x_np = np.array([[1, 2], [3, 4], [5, 6]]).astype('float64') index_np = np.array([1, 1]).astype('int32') axis_np = np.array([1]).astype('int32') - result, = exe.run(feed={ - "x": x_np, - "index": index_np, - 'axis': axis_np - }, - fetch_list=[out]) + (result,) = exe.run( + feed={"x": x_np, "index": index_np, 'axis': axis_np}, + fetch_list=[out], + ) expected_output = gather_numpy(x_np, index_np, axis_np[0]) np.testing.assert_allclose(result, expected_output, rtol=1e-05) class API_TestDygraphGather(unittest.TestCase): - def test_out1(self): paddle.disable_static() input_1 = np.array([[1, 2], [3, 4], [5, 6]]) @@ -319,18 +302,20 @@ class API_TestDygraphGather(unittest.TestCase): def test_dygraph(): with fluid.dygraph.guard(): - gpu_out = paddle.gather(paddle.to_tensor(x), - paddle.to_tensor(index)) + gpu_out = paddle.gather( + paddle.to_tensor(x), paddle.to_tensor(index) + ) return gpu_out.numpy() @switch_to_static_graph def test_static_graph(): - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): x_t = paddle.static.data(name="x", dtype=x.dtype, shape=x.shape) - index_t = paddle.static.data(name="index", - dtype=index.dtype, - shape=index.shape) + index_t = paddle.static.data( + name="index", dtype=index.dtype, shape=index.shape + ) out_t = paddle.gather(x_t, index_t) feed = {x_t.name: x, index_t.name: index} fetch = [out_t] @@ -343,18 +328,18 @@ class API_TestDygraphGather(unittest.TestCase): class TestGathertError(unittest.TestCase): - def test_error1(self): - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): shape = [8, 9, 6] x = paddle.fluid.data(shape=shape, dtype='int8', name='x') axis = paddle.fluid.data(shape=[1], dtype='float32', name='axis') index = paddle.fluid.data(shape=shape, dtype='int32', name='index') - index_float = paddle.fluid.data(shape=shape, - dtype='float32', - name='index_float') + index_float = paddle.fluid.data( + shape=shape, dtype='float32', name='index_float' + ) def test_x_type(): paddle.gather(x, index) @@ -382,9 +367,9 @@ class TestGathertError(unittest.TestCase): shape = [8, 9, 6] x = fluid.data(shape=shape, dtype='int8', name='x') index = fluid.data(shape=shape, dtype='int32', name='mask') - index_float = fluid.data(shape=shape, - dtype='float32', - name='index_float') + index_float = fluid.data( + shape=shape, dtype='float32', name='index_float' + ) def test_x_type(): paddle.fluid.layers.gather(x, index) @@ -398,7 +383,6 @@ class TestGathertError(unittest.TestCase): class TestCheckOutType(unittest.TestCase): - def test_out_type(self): data = paddle.static.data(shape=[16, 10], dtype='int64', name='x') index = paddle.static.data(shape=[4], dtype='int64', name='index') diff --git a/python/paddle/fluid/tests/unittests/test_gather_tree_op.py b/python/paddle/fluid/tests/unittests/test_gather_tree_op.py index 9d0d193e496e3a4781f3cfcfc77270162e5c89ba..18fb61c69f1ebb2a902e9fd43d457ad558240369 100644 --- a/python/paddle/fluid/tests/unittests/test_gather_tree_op.py +++ b/python/paddle/fluid/tests/unittests/test_gather_tree_op.py @@ -21,17 +21,16 @@ from paddle.fluid.framework import program_guard, Program class TestGatherTreeOp(OpTest): - def setUp(self): self.op_type = "gather_tree" self.python_api = paddle.nn.functional.gather_tree max_length, batch_size, beam_size = 5, 2, 2 - ids = np.random.randint(0, - high=10, - size=(max_length, batch_size, beam_size)) - parents = np.random.randint(0, - high=beam_size, - size=(max_length, batch_size, beam_size)) + ids = np.random.randint( + 0, high=10, size=(max_length, batch_size, beam_size) + ) + parents = np.random.randint( + 0, high=beam_size, size=(max_length, batch_size, beam_size) + ) self.inputs = {"Ids": ids, "Parents": parents} self.outputs = {'Out': self.backtrace(ids, parents)} @@ -44,8 +43,9 @@ class TestGatherTreeOp(OpTest): (max_length, batch_size, beam_size) = ids.shape for batch in range(batch_size): for beam in range(beam_size): - out[max_length - 1, batch, beam] = ids[max_length - 1, batch, - beam] + out[max_length - 1, batch, beam] = ids[ + max_length - 1, batch, beam + ] parent = parents[max_length - 1, batch, beam] for step in range(max_length - 2, -1, -1): out[step, batch, beam] = ids[step, batch, parent] @@ -54,41 +54,46 @@ class TestGatherTreeOp(OpTest): class TestGatherTreeOpAPI(unittest.TestCase): - def test_case(self): paddle.enable_static() - ids = fluid.layers.data(name='ids', - shape=[5, 2, 2], - dtype='int64', - append_batch_size=False) - parents = fluid.layers.data(name='parents', - shape=[5, 2, 2], - dtype='int64', - append_batch_size=False) + ids = fluid.layers.data( + name='ids', shape=[5, 2, 2], dtype='int64', append_batch_size=False + ) + parents = fluid.layers.data( + name='parents', + shape=[5, 2, 2], + dtype='int64', + append_batch_size=False, + ) final_sequences = fluid.layers.gather_tree(ids, parents) paddle.disable_static() def test_case2(self): - ids = paddle.to_tensor([[[2, 2], [6, 1]], [[3, 9], [6, 1]], - [[0, 1], [9, 0]]]) - parents = paddle.to_tensor([[[0, 0], [1, 1]], [[1, 0], [1, 0]], - [[0, 0], [0, 1]]]) + ids = paddle.to_tensor( + [[[2, 2], [6, 1]], [[3, 9], [6, 1]], [[0, 1], [9, 0]]] + ) + parents = paddle.to_tensor( + [[[0, 0], [1, 1]], [[1, 0], [1, 0]], [[0, 0], [0, 1]]] + ) final_sequences = paddle.nn.functional.gather_tree(ids, parents) class TestGatherTreeOpError(unittest.TestCase): - def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): - ids = fluid.layers.data(name='ids', - shape=[5, 2, 2], - dtype='int64', - append_batch_size=False) - parents = fluid.layers.data(name='parents', - shape=[5, 2, 2], - dtype='int64', - append_batch_size=False) + ids = fluid.layers.data( + name='ids', + shape=[5, 2, 2], + dtype='int64', + append_batch_size=False, + ) + parents = fluid.layers.data( + name='parents', + shape=[5, 2, 2], + dtype='int64', + append_batch_size=False, + ) def test_Variable_ids(): # the input type must be Variable @@ -106,38 +111,46 @@ class TestGatherTreeOpError(unittest.TestCase): def test_type_ids(): # dtype must be int32 or int64 - bad_ids = fluid.layers.data(name='bad_ids', - shape=[5, 2, 2], - dtype='float32', - append_batch_size=False) + bad_ids = fluid.layers.data( + name='bad_ids', + shape=[5, 2, 2], + dtype='float32', + append_batch_size=False, + ) fluid.layers.gather_tree(bad_ids, parents) self.assertRaises(TypeError, test_type_ids) def test_type_parents(): # dtype must be int32 or int64 - bad_parents = fluid.layers.data(name='bad_parents', - shape=[5, 2, 2], - dtype='float32', - append_batch_size=False) + bad_parents = fluid.layers.data( + name='bad_parents', + shape=[5, 2, 2], + dtype='float32', + append_batch_size=False, + ) fluid.layers.gather_tree(ids, bad_parents) self.assertRaises(TypeError, test_type_parents) def test_ids_ndim(): - bad_ids = fluid.layers.data(name='bad_test_ids', - shape=[5, 2], - dtype='int64', - append_batch_size=False) + bad_ids = fluid.layers.data( + name='bad_test_ids', + shape=[5, 2], + dtype='int64', + append_batch_size=False, + ) paddle.nn.functional.gather_tree(bad_ids, parents) self.assertRaises(ValueError, test_ids_ndim) def test_parents_ndim(): - bad_parents = fluid.layers.data(name='bad_test_parents', - shape=[5, 2], - dtype='int64', - append_batch_size=False) + bad_parents = fluid.layers.data( + name='bad_test_parents', + shape=[5, 2], + dtype='int64', + append_batch_size=False, + ) paddle.nn.functional.gather_tree(ids, bad_parents) self.assertRaises(ValueError, test_parents_ndim) diff --git a/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py b/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py index 16e34dc67898db78c689146c36d5d1503b757e81..c3ecf7303c17587835f42440b3c2900d80299b7e 100644 --- a/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py @@ -23,7 +23,6 @@ import paddle class TestGaussianRandomOp(OpTest): - def setUp(self): self.op_type = "gaussian_random" self.python_api = paddle.normal @@ -35,7 +34,7 @@ class TestGaussianRandomOp(OpTest): "mean": self.mean, "std": self.std, "seed": 10, - "use_mkldnn": self.use_mkldnn + "use_mkldnn": self.use_mkldnn, } paddle.seed(10) @@ -43,7 +42,7 @@ class TestGaussianRandomOp(OpTest): def set_attrs(self): self.mean = 1.0 - self.std = 2. + self.std = 2.0 def test_check_output(self): self.check_output_customized(self.verify_output) @@ -64,10 +63,10 @@ class TestGaussianRandomOp(OpTest): np.testing.assert_allclose(hist, hist2, rtol=0, atol=0.01) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestGaussianRandomBF16Op(OpTest): - def setUp(self): self.op_type = "gaussian_random" self.python_api = paddle.normal @@ -80,7 +79,7 @@ class TestGaussianRandomBF16Op(OpTest): "std": self.std, "seed": 10, "dtype": paddle.fluid.core.VarDesc.VarType.BF16, - "use_mkldnn": self.use_mkldnn + "use_mkldnn": self.use_mkldnn, } paddle.seed(10) @@ -88,11 +87,12 @@ class TestGaussianRandomBF16Op(OpTest): def set_attrs(self): self.mean = 1.0 - self.std = 2. + self.std = 2.0 def test_check_output(self): - self.check_output_with_place_customized(self.verify_output, - place=core.CUDAPlace(0)) + self.check_output_with_place_customized( + self.verify_output, place=core.CUDAPlace(0) + ) def test_eager(self): with _test_eager_guard(): @@ -112,7 +112,6 @@ class TestGaussianRandomBF16Op(OpTest): class TestMeanStdAreInt(TestGaussianRandomOp): - def set_attrs(self): self.mean = 1 self.std = 2 @@ -120,23 +119,22 @@ class TestMeanStdAreInt(TestGaussianRandomOp): # Situation 2: Attr(shape) is a list(with tensor) class TestGaussianRandomOp_ShapeTensorList(TestGaussianRandomOp): - def setUp(self): - '''Test gaussian_random op with specified value - ''' + '''Test gaussian_random op with specified value''' self.op_type = "gaussian_random" self.init_data() shape_tensor_list = [] for index, ele in enumerate(self.shape): - shape_tensor_list.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + shape_tensor_list.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.attrs = { 'shape': self.infer_shape, 'mean': self.mean, 'std': self.std, 'seed': self.seed, - 'use_mkldnn': self.use_mkldnn + 'use_mkldnn': self.use_mkldnn, } self.inputs = {"ShapeTensorList": shape_tensor_list} @@ -154,9 +152,9 @@ class TestGaussianRandomOp_ShapeTensorList(TestGaussianRandomOp): self.check_output_customized(self.verify_output) -class TestGaussianRandomOp2_ShapeTensorList(TestGaussianRandomOp_ShapeTensorList - ): - +class TestGaussianRandomOp2_ShapeTensorList( + TestGaussianRandomOp_ShapeTensorList +): def init_data(self): self.shape = [123, 92] self.infer_shape = [-1, -1] @@ -166,9 +164,9 @@ class TestGaussianRandomOp2_ShapeTensorList(TestGaussianRandomOp_ShapeTensorList self.seed = 10 -class TestGaussianRandomOp3_ShapeTensorList(TestGaussianRandomOp_ShapeTensorList - ): - +class TestGaussianRandomOp3_ShapeTensorList( + TestGaussianRandomOp_ShapeTensorList +): def init_data(self): self.shape = [123, 92] self.infer_shape = [123, -1] @@ -178,9 +176,9 @@ class TestGaussianRandomOp3_ShapeTensorList(TestGaussianRandomOp_ShapeTensorList self.seed = 10 -class TestGaussianRandomOp4_ShapeTensorList(TestGaussianRandomOp_ShapeTensorList - ): - +class TestGaussianRandomOp4_ShapeTensorList( + TestGaussianRandomOp_ShapeTensorList +): def init_data(self): self.shape = [123, 92] self.infer_shape = [123, -1] @@ -192,10 +190,8 @@ class TestGaussianRandomOp4_ShapeTensorList(TestGaussianRandomOp_ShapeTensorList # Situation 3: shape is a tensor class TestGaussianRandomOp1_ShapeTensor(TestGaussianRandomOp): - def setUp(self): - '''Test gaussian_random op with specified value - ''' + '''Test gaussian_random op with specified value''' self.op_type = "gaussian_random" self.init_data() self.use_mkldnn = False @@ -205,7 +201,7 @@ class TestGaussianRandomOp1_ShapeTensor(TestGaussianRandomOp): 'mean': self.mean, 'std': self.std, 'seed': self.seed, - 'use_mkldnn': self.use_mkldnn + 'use_mkldnn': self.use_mkldnn, } self.outputs = {'Out': np.zeros((123, 92), dtype='float32')} @@ -219,54 +215,61 @@ class TestGaussianRandomOp1_ShapeTensor(TestGaussianRandomOp): # Test python API class TestGaussianRandomAPI(unittest.TestCase): - def test_api(self): positive_2_int32 = fluid.layers.fill_constant([1], "int32", 2000) positive_2_int64 = fluid.layers.fill_constant([1], "int64", 500) - shape_tensor_int32 = fluid.data(name="shape_tensor_int32", - shape=[2], - dtype="int32") - - shape_tensor_int64 = fluid.data(name="shape_tensor_int64", - shape=[2], - dtype="int64") - - out_1 = fluid.layers.gaussian_random(shape=[2000, 500], - dtype="float32", - mean=0.0, - std=1.0, - seed=10) - - out_2 = fluid.layers.gaussian_random(shape=[2000, positive_2_int32], - dtype="float32", - mean=0., - std=1.0, - seed=10) - - out_3 = fluid.layers.gaussian_random(shape=[2000, positive_2_int64], - dtype="float32", - mean=0., - std=1.0, - seed=10) - - out_4 = fluid.layers.gaussian_random(shape=shape_tensor_int32, - dtype="float32", - mean=0., - std=1.0, - seed=10) - - out_5 = fluid.layers.gaussian_random(shape=shape_tensor_int64, - dtype="float32", - mean=0., - std=1.0, - seed=10) - - out_6 = fluid.layers.gaussian_random(shape=shape_tensor_int64, - dtype=np.float32, - mean=0., - std=1.0, - seed=10) + shape_tensor_int32 = fluid.data( + name="shape_tensor_int32", shape=[2], dtype="int32" + ) + + shape_tensor_int64 = fluid.data( + name="shape_tensor_int64", shape=[2], dtype="int64" + ) + + out_1 = fluid.layers.gaussian_random( + shape=[2000, 500], dtype="float32", mean=0.0, std=1.0, seed=10 + ) + + out_2 = fluid.layers.gaussian_random( + shape=[2000, positive_2_int32], + dtype="float32", + mean=0.0, + std=1.0, + seed=10, + ) + + out_3 = fluid.layers.gaussian_random( + shape=[2000, positive_2_int64], + dtype="float32", + mean=0.0, + std=1.0, + seed=10, + ) + + out_4 = fluid.layers.gaussian_random( + shape=shape_tensor_int32, + dtype="float32", + mean=0.0, + std=1.0, + seed=10, + ) + + out_5 = fluid.layers.gaussian_random( + shape=shape_tensor_int64, + dtype="float32", + mean=0.0, + std=1.0, + seed=10, + ) + + out_6 = fluid.layers.gaussian_random( + shape=shape_tensor_int64, + dtype=np.float32, + mean=0.0, + std=1.0, + seed=10, + ) exe = fluid.Executor(place=fluid.CPUPlace()) res_1, res_2, res_3, res_4, res_5, res_6 = exe.run( @@ -275,20 +278,21 @@ class TestGaussianRandomAPI(unittest.TestCase): "shape_tensor_int32": np.array([2000, 500]).astype("int32"), "shape_tensor_int64": np.array([2000, 500]).astype("int64"), }, - fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6]) + fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6], + ) self.assertAlmostEqual(np.mean(res_1), 0.0, delta=0.1) - self.assertAlmostEqual(np.std(res_1), 1., delta=0.1) + self.assertAlmostEqual(np.std(res_1), 1.0, delta=0.1) self.assertAlmostEqual(np.mean(res_2), 0.0, delta=0.1) - self.assertAlmostEqual(np.std(res_2), 1., delta=0.1) + self.assertAlmostEqual(np.std(res_2), 1.0, delta=0.1) self.assertAlmostEqual(np.mean(res_3), 0.0, delta=0.1) - self.assertAlmostEqual(np.std(res_3), 1., delta=0.1) + self.assertAlmostEqual(np.std(res_3), 1.0, delta=0.1) self.assertAlmostEqual(np.mean(res_4), 0.0, delta=0.1) - self.assertAlmostEqual(np.std(res_5), 1., delta=0.1) + self.assertAlmostEqual(np.std(res_5), 1.0, delta=0.1) self.assertAlmostEqual(np.mean(res_5), 0.0, delta=0.1) - self.assertAlmostEqual(np.std(res_5), 1., delta=0.1) + self.assertAlmostEqual(np.std(res_5), 1.0, delta=0.1) self.assertAlmostEqual(np.mean(res_6), 0.0, delta=0.1) - self.assertAlmostEqual(np.std(res_6), 1., delta=0.1) + self.assertAlmostEqual(np.std(res_6), 1.0, delta=0.1) def test_default_dtype(self): paddle.disable_static() @@ -316,7 +320,6 @@ class TestGaussianRandomAPI(unittest.TestCase): class TestStandardNormalDtype(unittest.TestCase): - def test_default_dtype(self): paddle.disable_static() @@ -343,7 +346,6 @@ class TestStandardNormalDtype(unittest.TestCase): class TestRandomValue(unittest.TestCase): - def test_fixed_random_number(self): # Test GPU Fixed random number, which is generated by 'curandStatePhilox4_32_10_t' if not paddle.is_compiled_with_cuda(): @@ -356,9 +358,9 @@ class TestRandomValue(unittest.TestCase): def _check_random_value(dtype, expect, expect_mean, expect_std): x = paddle.randn([32, 3, 1024, 1024], dtype=dtype) actual = x.numpy() - np.testing.assert_allclose(actual[2, 1, 512, 1000:1010], - expect, - rtol=1e-05) + np.testing.assert_allclose( + actual[2, 1, 512, 1000:1010], expect, rtol=1e-05 + ) self.assertTrue(np.mean(actual), expect_mean) self.assertTrue(np.std(actual), expect_std) @@ -367,22 +369,42 @@ class TestRandomValue(unittest.TestCase): paddle.set_device('gpu') paddle.seed(2021) expect = [ - -0.79037829, -0.54411126, -0.32266671, 0.35791815, 1.44169267, - -0.87785644, -1.23909874, -2.18194139, 0.49489656, 0.40703062 + -0.79037829, + -0.54411126, + -0.32266671, + 0.35791815, + 1.44169267, + -0.87785644, + -1.23909874, + -2.18194139, + 0.49489656, + 0.40703062, ] - expect_mean = -0.0000053026194133403266873214888799115129813799285329878330230713 + expect_mean = ( + -0.0000053026194133403266873214888799115129813799285329878330230713 + ) expect_std = 0.99999191058126390974081232343451119959354400634765625 - _check_random_value(core.VarDesc.VarType.FP64, expect, expect_mean, - expect_std) + _check_random_value( + core.VarDesc.VarType.FP64, expect, expect_mean, expect_std + ) expect = [ - -0.7988942, 1.8644791, 0.02782744, 1.3692524, 0.6419724, 0.12436751, - 0.12058455, -1.9984808, 1.5635862, 0.18506318 + -0.7988942, + 1.8644791, + 0.02782744, + 1.3692524, + 0.6419724, + 0.12436751, + 0.12058455, + -1.9984808, + 1.5635862, + 0.18506318, ] expect_mean = -0.00004762359094456769526004791259765625 expect_std = 0.999975681304931640625 - _check_random_value(core.VarDesc.VarType.FP32, expect, expect_mean, - expect_std) + _check_random_value( + core.VarDesc.VarType.FP32, expect, expect_mean, expect_std + ) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_gcd.py b/python/paddle/fluid/tests/unittests/test_gcd.py index d5ef16cea423cf574bf73fabf706c5763d7a060e..7a2d02dcb81128354a891188e883bcdd7959b728 100644 --- a/python/paddle/fluid/tests/unittests/test_gcd.py +++ b/python/paddle/fluid/tests/unittests/test_gcd.py @@ -22,7 +22,6 @@ paddle.enable_static() class TestGcdAPI(unittest.TestCase): - def setUp(self): self.x_np = 12 self.y_np = 20 @@ -37,32 +36,34 @@ class TestGcdAPI(unittest.TestCase): y = fluid.data(name='input2', dtype='int32', shape=self.y_shape) out = paddle.gcd(x, y) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) - res = exe.run(fluid.default_main_program(), - feed={ - 'input1': self.x_np, - 'input2': self.y_np - }, - fetch_list=[out]) - self.assertTrue((np.array(res[0]) == np.gcd(self.x_np, - self.y_np)).all()) + res = exe.run( + fluid.default_main_program(), + feed={'input1': self.x_np, 'input2': self.y_np}, + fetch_list=[out], + ) + self.assertTrue( + (np.array(res[0]) == np.gcd(self.x_np, self.y_np)).all() + ) def test_dygraph(self): paddle.disable_static() x = paddle.to_tensor(self.x_np) y = paddle.to_tensor(self.y_np) result = paddle.gcd(x, y) - np.testing.assert_allclose(np.gcd(self.x_np, self.y_np), - result.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + np.gcd(self.x_np, self.y_np), result.numpy(), rtol=1e-05 + ) paddle.enable_static() class TestGcdAPI2(TestGcdAPI): - def setUp(self): self.x_np = np.arange(6).astype(np.int32) self.y_np = np.array([20]).astype(np.int32) @@ -71,7 +72,6 @@ class TestGcdAPI2(TestGcdAPI): class TestGcdAPI3(TestGcdAPI): - def setUp(self): self.x_np = 0 self.y_np = 20 @@ -80,7 +80,6 @@ class TestGcdAPI3(TestGcdAPI): class TestGcdAPI4(TestGcdAPI): - def setUp(self): self.x_np = 0 self.y_np = 0 @@ -89,7 +88,6 @@ class TestGcdAPI4(TestGcdAPI): class TestGcdAPI5(TestGcdAPI): - def setUp(self): self.x_np = 12 self.y_np = -20 diff --git a/python/paddle/fluid/tests/unittests/test_gelu_op.py b/python/paddle/fluid/tests/unittests/test_gelu_op.py index 6af627be73b34bc5cae9a056b1d65fd6788dacb2..203e2517cd42511e48cd78c740164d3f5e6f3d65 100644 --- a/python/paddle/fluid/tests/unittests/test_gelu_op.py +++ b/python/paddle/fluid/tests/unittests/test_gelu_op.py @@ -24,15 +24,20 @@ from paddle.fluid.framework import _test_eager_guard def gelu(x, approximate): if approximate: - y_ref = 0.5 * x * ( - 1.0 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3)))) + y_ref = ( + 0.5 + * x + * ( + 1.0 + + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))) + ) + ) else: y_ref = 0.5 * x * (1 + erf(x / np.sqrt(2))) return y_ref.astype(x.dtype) class TestGeluOp(unittest.TestCase): - def _test_case1_cpu(self, approximate): x = np.random.uniform(-1, 1, size=(11, 17)).astype(np.float32) y_ref = gelu(x, approximate) @@ -87,10 +92,9 @@ class TestGeluOp(unittest.TestCase): y_ref, x_g_ref = run_gelu_op(True) np.testing.assert_allclose(y_ref, y_fast_math, rtol=1e-05, atol=0.0005) - np.testing.assert_allclose(x_g_ref, - x_g_fast_math, - rtol=1e-05, - atol=0.0005) + np.testing.assert_allclose( + x_g_ref, x_g_fast_math, rtol=1e-05, atol=0.0005 + ) def test_fast_math_eager(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_generate_mask_labels_op.py b/python/paddle/fluid/tests/unittests/test_generate_mask_labels_op.py index 18a13197044ac31d6fb5d58a9a3b1f9ebf7cc987..a2121ee3ef50b26bd7af3d2042d1e5f8ade25694 100644 --- a/python/paddle/fluid/tests/unittests/test_generate_mask_labels_op.py +++ b/python/paddle/fluid/tests/unittests/test_generate_mask_labels_op.py @@ -16,6 +16,7 @@ import unittest import numpy as np import math from op_test import OpTest + ''' # Equivalent code rles = mask_util.frPyObjects([segm], im_h, im_w) @@ -34,15 +35,17 @@ def decode(cnts, m): def poly2mask(xy, k, h, w): - scale = 5. + scale = 5.0 x = [int(scale * p + 0.5) for p in xy[::2]] x = x + [x[0]] y = [int(scale * p + 0.5) for p in xy[1::2]] y = y + [y[0]] - m = sum([ - int(max(abs(x[j] - x[j + 1]), abs(y[j] - y[j + 1]))) + int(1) - for j in range(k) - ]) + m = sum( + [ + int(max(abs(x[j] - x[j + 1]), abs(y[j] - y[j + 1]))) + int(1) + for j in range(k) + ] + ) u, v = [], [] for j in range(k): @@ -58,20 +61,22 @@ def poly2mask(xy, k, h, w): ys, ye = ye, ys if dx >= dy: - if (dx == 0): assert ye - ys == 0 + if dx == 0: + assert ye - ys == 0 s = 0 if dx == 0 else float(ye - ys) / dx else: - if (dy == 0): assert xe - xs == 0 + if dy == 0: + assert xe - xs == 0 s = 0 if dy == 0 else float(xe - xs) / dy if dx >= dy: ts = [dx - d if flip else d for d in range(dx + 1)] u.extend([xs + t for t in ts]) - v.extend([int(ys + s * t + .5) for t in ts]) + v.extend([int(ys + s * t + 0.5) for t in ts]) else: ts = [dy - d if flip else d for d in range(dy + 1)] v.extend([t + ys for t in ts]) - u.extend([int(xs + s * t + .5) for t in ts]) + u.extend([int(xs + s * t + 0.5) for t in ts]) k = len(u) x = np.zeros((k), np.int_) @@ -80,11 +85,11 @@ def poly2mask(xy, k, h, w): for j in range(1, k): if u[j] != u[j - 1]: xd = float(u[j] if (u[j] < u[j - 1]) else (u[j] - 1)) - xd = (xd + .5) / scale - .5 - if (math.floor(xd) != xd or xd < 0 or xd > (w - 1)): + xd = (xd + 0.5) / scale - 0.5 + if math.floor(xd) != xd or xd < 0 or xd > (w - 1): continue yd = float(v[j] if v[j] < v[j - 1] else v[j - 1]) - yd = (yd + .5) / scale - .5 + yd = (yd + 0.5) / scale - 0.5 yd = math.ceil(0 if yd < 0 else (h if yd > h else yd)) x[m] = int(xd) y[m] = int(yd) @@ -93,21 +98,21 @@ def poly2mask(xy, k, h, w): a = [int(x[i] * h + y[i]) for i in range(k)] a.append(h * w) a.sort() - b = [0] + a[:len(a) - 1] + b = [0] + a[: len(a) - 1] a = [c - d for (c, d) in zip(a, b)] k += 1 b = [0 for i in range(k)] b[0] = a[0] m, j = 1, 1 - while (j < k): + while j < k: if a[j] > 0: b[m] = a[j] m += 1 j += 1 else: j += 1 - if (j < k): + if j < k: b[m - 1] += a[j] j += 1 mask = decode(b, m) @@ -134,19 +139,28 @@ def bbox_overlaps(boxes, query_boxes): K = query_boxes.shape[0] overlaps = np.zeros((N, K), dtype=boxes.dtype) for k in range(K): - box_area = (query_boxes[k, 2] - query_boxes[k, 0] + 1) *\ - (query_boxes[k, 3] - query_boxes[k, 1] + 1) + box_area = (query_boxes[k, 2] - query_boxes[k, 0] + 1) * ( + query_boxes[k, 3] - query_boxes[k, 1] + 1 + ) for n in range(N): - iw = min(boxes[n, 2], query_boxes[k, 2]) -\ - max(boxes[n, 0], query_boxes[k, 0]) + 1 + iw = ( + min(boxes[n, 2], query_boxes[k, 2]) + - max(boxes[n, 0], query_boxes[k, 0]) + + 1 + ) if iw > 0: - ih = min(boxes[n, 3], query_boxes[k, 3]) -\ - max(boxes[n, 1], query_boxes[k, 1]) + 1 + ih = ( + min(boxes[n, 3], query_boxes[k, 3]) + - max(boxes[n, 1], query_boxes[k, 1]) + + 1 + ) if ih > 0: ua = float( - (boxes[n, 2] - boxes[n, 0] + 1) *\ - (boxes[n, 3] - boxes[n, 1] + 1) +\ - box_area - iw * ih) + (boxes[n, 2] - boxes[n, 0] + 1) + * (boxes[n, 3] - boxes[n, 1] + 1) + + box_area + - iw * ih + ) overlaps[n, k] = iw * ih / ua return overlaps @@ -191,7 +205,8 @@ def expand_mask_targets(masks, mask_class_labels, resolution, num_classes): # Target values of -1 are "don't care" / ignore labels mask_targets = -np.ones( - (masks.shape[0], num_classes * resolution**2), dtype=np.int32) + (masks.shape[0], num_classes * resolution**2), dtype=np.int32 + ) for i in range(masks.shape[0]): cls = int(mask_class_labels[i]) start = resolution**2 * cls @@ -203,9 +218,18 @@ def expand_mask_targets(masks, mask_class_labels, resolution, num_classes): return mask_targets -def generate_mask_labels(num_classes, im_info, gt_classes, is_crowd, - label_int32, gt_polys, resolution, rois, roi_lod, - gt_lod): +def generate_mask_labels( + num_classes, + im_info, + gt_classes, + is_crowd, + label_int32, + gt_polys, + resolution, + rois, + roi_lod, + gt_lod, +): mask_rois = [] roi_has_mask_int32 = [] mask_int32 = [] @@ -215,9 +239,16 @@ def generate_mask_labels(num_classes, im_info, gt_classes, is_crowd, roi_e = roi_lod[i + 1] gt_s = gt_lod[i] gt_e = gt_lod[i + 1] - mask_blob = _sample_mask(num_classes, im_info[i], gt_classes[gt_s:gt_e], - is_crowd[gt_s:gt_e], label_int32[roi_s:roi_e], - gt_polys[i], resolution, rois[roi_s:roi_e]) + mask_blob = _sample_mask( + num_classes, + im_info[i], + gt_classes[gt_s:gt_e], + is_crowd[gt_s:gt_e], + label_int32[roi_s:roi_e], + gt_polys[i], + resolution, + rois[roi_s:roi_e], + ) new_lod.append(mask_blob['mask_rois'].shape[0]) mask_rois.append(mask_blob['mask_rois']) roi_has_mask_int32.append(mask_blob['roi_has_mask_int32']) @@ -226,14 +257,15 @@ def generate_mask_labels(num_classes, im_info, gt_classes, is_crowd, def _sample_mask( - num_classes, - im_info, - gt_classes, - is_crowd, - label_int32, - gt_polys, # [[[], []], []] - resolution, - rois): + num_classes, + im_info, + gt_classes, + is_crowd, + label_int32, + gt_polys, # [[[], []], []] + resolution, + rois, +): mask_blob = {} im_scale = im_info[2] sample_boxes = rois @@ -248,7 +280,8 @@ def _sample_mask( masks = np.zeros((fg_inds.shape[0], resolution**2), dtype=np.int32) rois_fg = sample_boxes[fg_inds] overlaps_bbfg_bbpolys = bbox_overlaps( - rois_fg.astype(np.float32), boxes_from_polys.astype(np.float32)) + rois_fg.astype(np.float32), boxes_from_polys.astype(np.float32) + ) fg_polys_inds = np.argmax(overlaps_bbfg_bbpolys, axis=1) for i in range(rois_fg.shape[0]): fg_polys_ind = fg_polys_inds[i] @@ -261,10 +294,11 @@ def _sample_mask( bg_inds = np.where(label_int32 == 0)[0] rois_fg = sample_boxes[bg_inds[0]].reshape((1, -1)) masks = -np.ones((1, resolution**2), dtype=np.int32) - mask_class_labels = np.zeros((1, )) + mask_class_labels = np.zeros((1,)) roi_has_mask = np.append(roi_has_mask, 0) - masks = expand_mask_targets(masks, mask_class_labels, resolution, - num_classes) + masks = expand_mask_targets( + masks, mask_class_labels, resolution, num_classes + ) rois_fg *= im_scale mask_blob['mask_rois'] = rois_fg mask_blob['roi_has_mask_int32'] = roi_has_mask @@ -280,7 +314,6 @@ def trans_lod(lod): class TestGenerateMaskLabels(OpTest): - def set_data(self): self.init_test_case() self.make_generate_proposal_labels_out() @@ -293,16 +326,16 @@ class TestGenerateMaskLabels(OpTest): 'IsCrowd': (self.is_crowd.astype(np.int32), self.gt_lod), 'LabelsInt32': (self.label_int32.astype(np.int32), self.rois_lod), 'GtSegms': (self.gt_polys.astype(np.float32), self.masks_lod), - 'Rois': (self.rois.astype(np.float32), self.rois_lod) + 'Rois': (self.rois.astype(np.float32), self.rois_lod), } self.attrs = { 'num_classes': self.num_classes, - 'resolution': self.resolution + 'resolution': self.resolution, } self.outputs = { 'MaskRois': (self.mask_rois, [self.new_lod]), 'RoiHasMaskInt32': (self.roi_has_mask_int32, [self.new_lod]), - 'MaskInt32': (self.mask_int32, [self.new_lod]) + 'MaskInt32': (self.mask_int32, [self.new_lod]), } def init_test_case(self): @@ -358,9 +391,9 @@ class TestGenerateMaskLabels(OpTest): lod1.append(poly_num) pts = [] for j in range(poly_num): - poly_size = np.random.randint(min_poly_size, - max_poly_size, - size=1)[0] + poly_size = np.random.randint( + min_poly_size, max_poly_size, size=1 + )[0] x = np.random.rand(poly_size, 1) * w y = np.random.rand(poly_size, 1) * h xy = np.concatenate((x, y), axis=1) @@ -392,18 +425,27 @@ class TestGenerateMaskLabels(OpTest): def init_test_output(self): roi_lod = trans_lod(self.rois_lod[0]) gt_lod = trans_lod(self.gt_lod[0]) - outs = generate_mask_labels(self.num_classes, self.im_info, - self.gt_classes, self.is_crowd, - self.label_int32, self.gt_polys_list, - self.resolution, self.rois, roi_lod, gt_lod) + outs = generate_mask_labels( + self.num_classes, + self.im_info, + self.gt_classes, + self.is_crowd, + self.label_int32, + self.gt_polys_list, + self.resolution, + self.rois, + roi_lod, + gt_lod, + ) self.mask_rois = outs[0] self.roi_has_mask_int32 = outs[1] self.mask_int32 = outs[2] self.new_lod = outs[3] self.mask_rois = np.vstack(self.mask_rois) - self.roi_has_mask_int32 = np.hstack(self.roi_has_mask_int32)[:, - np.newaxis] + self.roi_has_mask_int32 = np.hstack(self.roi_has_mask_int32)[ + :, np.newaxis + ] self.mask_int32 = np.vstack(self.mask_int32) def setUp(self): diff --git a/python/paddle/fluid/tests/unittests/test_generate_proposal_labels_op.py b/python/paddle/fluid/tests/unittests/test_generate_proposal_labels_op.py index 4b5a5ebc2ba99073ce3500c00081cdb86dd776cb..8027234ce5bbc70d6756fa83c755c7965c7a9446 100644 --- a/python/paddle/fluid/tests/unittests/test_generate_proposal_labels_op.py +++ b/python/paddle/fluid/tests/unittests/test_generate_proposal_labels_op.py @@ -17,22 +17,24 @@ import numpy as np from op_test import OpTest -def generate_proposal_labels_in_python(rpn_rois, - gt_classes, - is_crowd, - gt_boxes, - im_info, - batch_size_per_im, - fg_fraction, - fg_thresh, - bg_thresh_hi, - bg_thresh_lo, - bbox_reg_weights, - class_nums, - use_random, - is_cls_agnostic, - is_cascade_rcnn, - max_overlaps=None): +def generate_proposal_labels_in_python( + rpn_rois, + gt_classes, + is_crowd, + gt_boxes, + im_info, + batch_size_per_im, + fg_fraction, + fg_thresh, + bg_thresh_hi, + bg_thresh_lo, + bbox_reg_weights, + class_nums, + use_random, + is_cls_agnostic, + is_cascade_rcnn, + max_overlaps=None, +): rois = [] labels_int32 = [] bbox_targets = [] @@ -41,16 +43,29 @@ def generate_proposal_labels_in_python(rpn_rois, max_overlap_with_gt = [] lod = [] assert len(rpn_rois) == len( - im_info), 'batch size of rpn_rois and ground_truth is not matched' + im_info + ), 'batch size of rpn_rois and ground_truth is not matched' for im_i in range(len(im_info)): max_overlap = max_overlaps[im_i] if is_cascade_rcnn else None - frcn_blobs = _sample_rois(rpn_rois[im_i], gt_classes[im_i], - is_crowd[im_i], gt_boxes[im_i], im_info[im_i], - batch_size_per_im, fg_fraction, fg_thresh, - bg_thresh_hi, bg_thresh_lo, bbox_reg_weights, - class_nums, use_random, is_cls_agnostic, - is_cascade_rcnn, max_overlap) + frcn_blobs = _sample_rois( + rpn_rois[im_i], + gt_classes[im_i], + is_crowd[im_i], + gt_boxes[im_i], + im_info[im_i], + batch_size_per_im, + fg_fraction, + fg_thresh, + bg_thresh_hi, + bg_thresh_lo, + bbox_reg_weights, + class_nums, + use_random, + is_cls_agnostic, + is_cascade_rcnn, + max_overlap, + ) lod.append(frcn_blobs['rois'].shape[0]) rois.append(frcn_blobs['rois']) labels_int32.append(frcn_blobs['labels_int32']) @@ -59,7 +74,15 @@ def generate_proposal_labels_in_python(rpn_rois, bbox_outside_weights.append(frcn_blobs['bbox_outside_weights']) max_overlap_with_gt.append(frcn_blobs['max_overlap']) - return rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights, max_overlap_with_gt, lod + return ( + rois, + labels_int32, + bbox_targets, + bbox_inside_weights, + bbox_outside_weights, + max_overlap_with_gt, + lod, + ) def filter_roi(rois, max_overlap): @@ -71,16 +94,30 @@ def filter_roi(rois, max_overlap): return np.zeros((1, 4)).astype('float32') -def _sample_rois(rpn_rois, gt_classes, is_crowd, gt_boxes, im_info, - batch_size_per_im, fg_fraction, fg_thresh, bg_thresh_hi, - bg_thresh_lo, bbox_reg_weights, class_nums, use_random, - is_cls_agnostic, is_cascade_rcnn, max_overlap): +def _sample_rois( + rpn_rois, + gt_classes, + is_crowd, + gt_boxes, + im_info, + batch_size_per_im, + fg_fraction, + fg_thresh, + bg_thresh_hi, + bg_thresh_lo, + bbox_reg_weights, + class_nums, + use_random, + is_cls_agnostic, + is_cascade_rcnn, + max_overlap, +): rois_per_image = int(batch_size_per_im) fg_rois_per_im = int(np.round(fg_fraction * rois_per_image)) # Roidb im_scale = im_info[2] - inv_im_scale = 1. / im_scale + inv_im_scale = 1.0 / im_scale rpn_rois = rpn_rois * inv_im_scale if is_cascade_rcnn: @@ -97,12 +134,14 @@ def _sample_rois(rpn_rois, gt_classes, is_crowd, gt_boxes, im_info, # Boxes which with non-zero overlap with gt boxes overlapped_boxes_ind = np.where(overlaps_max > 0)[0] overlapped_boxes_gt_classes = gt_classes[ - overlaps_argmax[overlapped_boxes_ind]] + overlaps_argmax[overlapped_boxes_ind] + ] gt_overlaps[ - overlapped_boxes_ind, - overlapped_boxes_gt_classes] = overlaps_max[overlapped_boxes_ind] + overlapped_boxes_ind, overlapped_boxes_gt_classes + ] = overlaps_max[overlapped_boxes_ind] box_to_gt_ind_map[overlapped_boxes_ind] = overlaps_argmax[ - overlapped_boxes_ind] + overlapped_boxes_ind + ] crowd_ind = np.where(is_crowd)[0] gt_overlaps[crowd_ind] = -1.0 @@ -112,8 +151,9 @@ def _sample_rois(rpn_rois, gt_classes, is_crowd, gt_boxes, im_info, if is_cascade_rcnn: # Cascade RCNN Decode Filter fg_inds = np.where(max_overlaps >= fg_thresh)[0] - bg_inds = np.where((max_overlaps < bg_thresh_hi) - & (max_overlaps >= bg_thresh_lo))[0] + bg_inds = np.where( + (max_overlaps < bg_thresh_hi) & (max_overlaps >= bg_thresh_lo) + )[0] fg_rois_per_this_image = fg_inds.shape[0] bg_rois_per_this_image = bg_inds.shape[0] else: @@ -122,21 +162,23 @@ def _sample_rois(rpn_rois, gt_classes, is_crowd, gt_boxes, im_info, fg_rois_per_this_image = np.minimum(fg_rois_per_im, fg_inds.shape[0]) # Sample foreground if there are too many if (fg_inds.shape[0] > fg_rois_per_this_image) and use_random: - fg_inds = np.random.choice(fg_inds, - size=fg_rois_per_this_image, - replace=False) + fg_inds = np.random.choice( + fg_inds, size=fg_rois_per_this_image, replace=False + ) fg_inds = fg_inds[:fg_rois_per_this_image] # Background - bg_inds = np.where((max_overlaps < bg_thresh_hi) - & (max_overlaps >= bg_thresh_lo))[0] + bg_inds = np.where( + (max_overlaps < bg_thresh_hi) & (max_overlaps >= bg_thresh_lo) + )[0] bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image - bg_rois_per_this_image = np.minimum(bg_rois_per_this_image, - bg_inds.shape[0]) + bg_rois_per_this_image = np.minimum( + bg_rois_per_this_image, bg_inds.shape[0] + ) # Sample background if there are too many if (bg_inds.shape[0] > bg_rois_per_this_image) and use_random: - bg_inds = np.random.choice(bg_inds, - size=bg_rois_per_this_image, - replace=False) + bg_inds = np.random.choice( + bg_inds, size=bg_rois_per_this_image, replace=False + ) bg_inds = bg_inds[:bg_rois_per_this_image] keep_inds = np.append(fg_inds, bg_inds) @@ -146,22 +188,27 @@ def _sample_rois(rpn_rois, gt_classes, is_crowd, gt_boxes, im_info, sampled_max_overlap = max_overlaps[keep_inds] sampled_gts = gt_boxes[box_to_gt_ind_map[keep_inds]] sampled_gts[fg_rois_per_this_image:, :] = gt_boxes[0] - bbox_label_targets = _compute_targets(sampled_boxes, sampled_gts, - sampled_labels, bbox_reg_weights) + bbox_label_targets = _compute_targets( + sampled_boxes, sampled_gts, sampled_labels, bbox_reg_weights + ) bbox_targets, bbox_inside_weights = _expand_bbox_targets( - bbox_label_targets, class_nums, is_cls_agnostic) - bbox_outside_weights = np.array(bbox_inside_weights > 0, - dtype=bbox_inside_weights.dtype) + bbox_label_targets, class_nums, is_cls_agnostic + ) + bbox_outside_weights = np.array( + bbox_inside_weights > 0, dtype=bbox_inside_weights.dtype + ) # Scale rois sampled_rois = sampled_boxes * im_scale # Faster RCNN blobs - frcn_blobs = dict(rois=sampled_rois, - labels_int32=sampled_labels, - bbox_targets=bbox_targets, - bbox_inside_weights=bbox_inside_weights, - bbox_outside_weights=bbox_outside_weights, - max_overlap=sampled_max_overlap) + frcn_blobs = dict( + rois=sampled_rois, + labels_int32=sampled_labels, + bbox_targets=bbox_targets, + bbox_inside_weights=bbox_inside_weights, + bbox_outside_weights=bbox_outside_weights, + max_overlap=sampled_max_overlap, + ) return frcn_blobs @@ -195,12 +242,13 @@ def _compute_targets(roi_boxes, gt_boxes, labels, bbox_reg_weights): targets = np.zeros(roi_boxes.shape) bbox_reg_weights = np.asarray(bbox_reg_weights) - targets = _box_to_delta(ex_boxes=roi_boxes, - gt_boxes=gt_boxes, - weights=bbox_reg_weights) + targets = _box_to_delta( + ex_boxes=roi_boxes, gt_boxes=gt_boxes, weights=bbox_reg_weights + ) - return np.hstack([labels[:, np.newaxis], targets]).astype(np.float32, - copy=False) + return np.hstack([labels[:, np.newaxis], targets]).astype( + np.float32, copy=False + ) def _box_to_delta(ex_boxes, gt_boxes, weights): @@ -230,8 +278,12 @@ def _expand_bbox_targets(bbox_targets_input, class_nums, is_cls_agnostic): # class_labels = [1 if ll > 0 else 0 for ll in class_labels] # class_labels = np.array(class_labels, dtype=np.int32) # class_nums = 2 - bbox_targets = np.zeros((class_labels.shape[0], - 4 * class_nums if not is_cls_agnostic else 4 * 2)) + bbox_targets = np.zeros( + ( + class_labels.shape[0], + 4 * class_nums if not is_cls_agnostic else 4 * 2, + ) + ) bbox_inside_weights = np.zeros(bbox_targets.shape) for ind in fg_inds: class_label = int(class_labels[ind]) if not is_cls_agnostic else 1 @@ -243,9 +295,8 @@ def _expand_bbox_targets(bbox_targets_input, class_nums, is_cls_agnostic): class TestGenerateProposalLabelsOp(OpTest): - def set_data(self): - #self.use_random = False + # self.use_random = False self.init_use_random() self.init_test_params() self.init_test_input() @@ -260,8 +311,10 @@ class TestGenerateProposalLabelsOp(OpTest): 'ImInfo': self.im_info, } if self.max_overlaps is not None: - self.inputs['MaxOverlap'] = (self.max_overlaps[0], - self.rpn_rois_lod) + self.inputs['MaxOverlap'] = ( + self.max_overlaps[0], + self.rpn_rois_lod, + ) self.attrs = { 'batch_size_per_im': self.batch_size_per_im, @@ -273,7 +326,7 @@ class TestGenerateProposalLabelsOp(OpTest): 'class_nums': self.class_nums, 'use_random': self.use_random, 'is_cls_agnostic': self.is_cls_agnostic, - 'is_cascade_rcnn': self.is_cascade_rcnn + 'is_cascade_rcnn': self.is_cascade_rcnn, } self.outputs = { 'Rois': (self.rois, [self.lod]), @@ -291,7 +344,9 @@ class TestGenerateProposalLabelsOp(OpTest): self.op_type = 'generate_proposal_labels' self.set_data() - def init_test_cascade(self, ): + def init_test_cascade( + self, + ): self.is_cascade_rcnn = False self.max_overlaps = None @@ -317,28 +372,46 @@ class TestGenerateProposalLabelsOp(OpTest): for i in range(len(images_shape)): self.im_info[i, 0] = images_shape[i][0] self.im_info[i, 1] = images_shape[i][1] - self.im_info[i, 2] = 0.8 #scale + self.im_info[i, 2] = 0.8 # scale self.rpn_rois, self.rpn_rois_lod = _generate_proposals( - images_shape, proposal_nums) + images_shape, proposal_nums + ) ground_truth, self.gts_lod = _generate_groundtruth( - images_shape, self.class_nums, gt_nums) + images_shape, self.class_nums, gt_nums + ) self.gt_classes = [gt['gt_classes'] for gt in ground_truth] self.gt_boxes = [gt['boxes'] for gt in ground_truth] self.is_crowd = [gt['is_crowd'] for gt in ground_truth] def init_test_output(self): - self.rois, self.labels_int32, self.bbox_targets, \ - self.bbox_inside_weights, self.bbox_outside_weights, \ - self.max_overlap_with_gt, \ - self.lod = generate_proposal_labels_in_python( - self.rpn_rois, self.gt_classes, self.is_crowd, self.gt_boxes, self.im_info, - self.batch_size_per_im, self.fg_fraction, - self.fg_thresh, self.bg_thresh_hi, self.bg_thresh_lo, - self.bbox_reg_weights, self.class_nums, self.use_random, - self.is_cls_agnostic, self.is_cascade_rcnn, self.max_overlaps - ) + ( + self.rois, + self.labels_int32, + self.bbox_targets, + self.bbox_inside_weights, + self.bbox_outside_weights, + self.max_overlap_with_gt, + self.lod, + ) = generate_proposal_labels_in_python( + self.rpn_rois, + self.gt_classes, + self.is_crowd, + self.gt_boxes, + self.im_info, + self.batch_size_per_im, + self.fg_fraction, + self.fg_thresh, + self.bg_thresh_hi, + self.bg_thresh_lo, + self.bbox_reg_weights, + self.class_nums, + self.use_random, + self.is_cls_agnostic, + self.is_cascade_rcnn, + self.max_overlaps, + ) self.rois = np.vstack(self.rois) self.labels_int32 = np.hstack(self.labels_int32) self.labels_int32 = self.labels_int32[:, np.newaxis] @@ -349,19 +422,17 @@ class TestGenerateProposalLabelsOp(OpTest): class TestCascade(TestGenerateProposalLabelsOp): - def init_test_cascade(self): self.is_cascade_rcnn = True roi_num = len(self.rpn_rois[0]) self.max_overlaps = [] max_overlap = np.random.rand(roi_num).astype('float32') # Make GT samples with overlap = 1 - max_overlap[max_overlap > 0.9] = 1. + max_overlap[max_overlap > 0.9] = 1.0 self.max_overlaps.append(max_overlap) class TestUseRandom(TestGenerateProposalLabelsOp): - def init_use_random(self): self.use_random = True self.is_cascade_rcnn = False @@ -384,7 +455,6 @@ class TestUseRandom(TestGenerateProposalLabelsOp): class TestClsAgnostic(TestCascade): - def init_test_params(self): self.batch_size_per_im = 512 self.fg_fraction = 0.25 @@ -397,7 +467,6 @@ class TestClsAgnostic(TestCascade): class TestOnlyGT(TestCascade): - def init_test_input(self): np.random.seed(0) gt_nums = 6 # Keep same with batch_size_per_im for unittest @@ -407,10 +476,11 @@ class TestOnlyGT(TestCascade): for i in range(len(images_shape)): self.im_info[i, 0] = images_shape[i][0] self.im_info[i, 1] = images_shape[i][1] - self.im_info[i, 2] = 0.8 #scale + self.im_info[i, 2] = 0.8 # scale ground_truth, self.gts_lod = _generate_groundtruth( - images_shape, self.class_nums, gt_nums) + images_shape, self.class_nums, gt_nums + ) self.gt_classes = [gt['gt_classes'] for gt in ground_truth] self.gt_boxes = [gt['boxes'] for gt in ground_truth] @@ -420,7 +490,6 @@ class TestOnlyGT(TestCascade): class TestOnlyGT2(TestCascade): - def init_test_cascade(self): self.is_cascade_rcnn = True roi_num = len(self.rpn_rois[0]) @@ -447,13 +516,15 @@ def _generate_groundtruth(images_shape, class_nums, gt_nums): num_gts = 0 for i, image_shape in enumerate(images_shape): # Avoid background - gt_classes = np.random.randint(low=1, high=class_nums, - size=gt_nums).astype(np.int32) + gt_classes = np.random.randint( + low=1, high=class_nums, size=gt_nums + ).astype(np.int32) gt_boxes = _generate_boxes(image_shape, gt_nums) is_crowd = np.zeros((gt_nums), dtype=np.int32) is_crowd[0] = 1 ground_truth.append( - dict(gt_classes=gt_classes, boxes=gt_boxes, is_crowd=is_crowd)) + dict(gt_classes=gt_classes, boxes=gt_boxes, is_crowd=is_crowd) + ) num_gts += len(gt_classes) gts_lod.append(num_gts) return ground_truth, [gts_lod] @@ -467,8 +538,12 @@ def _generate_boxes(image_size, box_nums): wh = xywh[:, [2, 3]] * (image_size - xy1) xy2 = xy1 + wh boxes = np.hstack([xy1, xy2]) - boxes[:, [0, 2]] = np.minimum(width - 1., np.maximum(0., boxes[:, [0, 2]])) - boxes[:, [1, 3]] = np.minimum(height - 1., np.maximum(0., boxes[:, [1, 3]])) + boxes[:, [0, 2]] = np.minimum( + width - 1.0, np.maximum(0.0, boxes[:, [0, 2]]) + ) + boxes[:, [1, 3]] = np.minimum( + height - 1.0, np.maximum(0.0, boxes[:, [1, 3]]) + ) return boxes.astype(np.float32) diff --git a/python/paddle/fluid/tests/unittests/test_generate_proposals_op.py b/python/paddle/fluid/tests/unittests/test_generate_proposals_op.py index d5fbca5be9ecbfcb3c14ae7dd0077931ddeece22..8a23973ede38ac3f2c2b2145f4d3747b7c0eba62 100644 --- a/python/paddle/fluid/tests/unittests/test_generate_proposals_op.py +++ b/python/paddle/fluid/tests/unittests/test_generate_proposals_op.py @@ -21,9 +21,18 @@ from test_anchor_generator_op import anchor_generator_in_python import copy -def generate_proposals_in_python(scores, bbox_deltas, im_info, anchors, - variances, pre_nms_topN, post_nms_topN, - nms_thresh, min_size, eta): +def generate_proposals_in_python( + scores, + bbox_deltas, + im_info, + anchors, + variances, + pre_nms_topN, + post_nms_topN, + nms_thresh, + min_size, + eta, +): all_anchors = anchors.reshape(-1, 4) rois = np.empty((0, 5), dtype=np.float32) roi_probs = np.empty((0, 1), dtype=np.float32) @@ -34,9 +43,17 @@ def generate_proposals_in_python(scores, bbox_deltas, im_info, anchors, num_images = scores.shape[0] for img_idx in range(num_images): img_i_boxes, img_i_probs = proposal_for_one_image( - im_info[img_idx, :], all_anchors, variances, - bbox_deltas[img_idx, :, :, :], scores[img_idx, :, :, :], - pre_nms_topN, post_nms_topN, nms_thresh, min_size, eta) + im_info[img_idx, :], + all_anchors, + variances, + bbox_deltas[img_idx, :, :, :], + scores[img_idx, :, :, :], + pre_nms_topN, + post_nms_topN, + nms_thresh, + min_size, + eta, + ) rois_num.append(img_i_probs.shape[0]) rpn_rois.append(img_i_boxes) rpn_roi_probs.append(img_i_probs) @@ -44,9 +61,18 @@ def generate_proposals_in_python(scores, bbox_deltas, im_info, anchors, return rpn_rois, rpn_roi_probs, rois_num -def proposal_for_one_image(im_info, all_anchors, variances, bbox_deltas, scores, - pre_nms_topN, post_nms_topN, nms_thresh, min_size, - eta): +def proposal_for_one_image( + im_info, + all_anchors, + variances, + bbox_deltas, + scores, + pre_nms_topN, + post_nms_topN, + nms_thresh, + min_size, + eta, +): # Transpose and reshape predicted bbox transformations to get them # into the same order as the anchors: # - bbox deltas will be (4 * A, H, W) format from conv output @@ -94,10 +120,9 @@ def proposal_for_one_image(im_info, all_anchors, variances, bbox_deltas, scores, # take post_nms_topN (e.g. 1000) # return the top proposals if nms_thresh > 0: - keep = nms(boxes=proposals, - scores=scores, - nms_threshold=nms_thresh, - eta=eta) + keep = nms( + boxes=proposals, scores=scores, nms_threshold=nms_thresh, eta=eta + ) if post_nms_topN > 0 and post_nms_topN < len(keep): keep = keep[:post_nms_topN] proposals = proposals[keep, :] @@ -111,10 +136,10 @@ def box_coder(all_anchors, bbox_deltas, variances, pixel_offset=True): Decode proposals by anchors and bbox_deltas from RPN """ offset = 1 if pixel_offset else 0 - #proposals: xmin, ymin, xmax, ymax + # proposals: xmin, ymin, xmax, ymax proposals = np.zeros_like(bbox_deltas, dtype=np.float32) - #anchor_loc: width, height, center_x, center_y + # anchor_loc: width, height, center_x, center_y anchor_loc = np.zeros_like(bbox_deltas, dtype=np.float32) anchor_loc[:, 0] = all_anchors[:, 2] - all_anchors[:, 0] + offset @@ -122,34 +147,52 @@ def box_coder(all_anchors, bbox_deltas, variances, pixel_offset=True): anchor_loc[:, 2] = all_anchors[:, 0] + 0.5 * anchor_loc[:, 0] anchor_loc[:, 3] = all_anchors[:, 1] + 0.5 * anchor_loc[:, 1] - #predicted bbox: bbox_center_x, bbox_center_y, bbox_width, bbox_height + # predicted bbox: bbox_center_x, bbox_center_y, bbox_width, bbox_height pred_bbox = np.zeros_like(bbox_deltas, dtype=np.float32) if variances is not None: for i in range(bbox_deltas.shape[0]): - pred_bbox[i, 0] = variances[i, 0] * bbox_deltas[i, 0] * anchor_loc[ - i, 0] + anchor_loc[i, 2] - pred_bbox[i, 1] = variances[i, 1] * bbox_deltas[i, 1] * anchor_loc[ - i, 1] + anchor_loc[i, 3] - pred_bbox[i, 2] = math.exp( - min(variances[i, 2] * bbox_deltas[i, 2], math.log( - 1000 / 16.0))) * anchor_loc[i, 0] - pred_bbox[i, 3] = math.exp( - min(variances[i, 3] * bbox_deltas[i, 3], math.log( - 1000 / 16.0))) * anchor_loc[i, 1] + pred_bbox[i, 0] = ( + variances[i, 0] * bbox_deltas[i, 0] * anchor_loc[i, 0] + + anchor_loc[i, 2] + ) + pred_bbox[i, 1] = ( + variances[i, 1] * bbox_deltas[i, 1] * anchor_loc[i, 1] + + anchor_loc[i, 3] + ) + pred_bbox[i, 2] = ( + math.exp( + min( + variances[i, 2] * bbox_deltas[i, 2], + math.log(1000 / 16.0), + ) + ) + * anchor_loc[i, 0] + ) + pred_bbox[i, 3] = ( + math.exp( + min( + variances[i, 3] * bbox_deltas[i, 3], + math.log(1000 / 16.0), + ) + ) + * anchor_loc[i, 1] + ) else: for i in range(bbox_deltas.shape[0]): - pred_bbox[i, - 0] = bbox_deltas[i, 0] * anchor_loc[i, 0] + anchor_loc[i, - 2] - pred_bbox[i, - 1] = bbox_deltas[i, 1] * anchor_loc[i, 1] + anchor_loc[i, - 3] - pred_bbox[i, 2] = math.exp( - min(bbox_deltas[i, 2], math.log(1000 / 16.0))) * anchor_loc[i, - 0] - pred_bbox[i, 3] = math.exp( - min(bbox_deltas[i, 3], math.log(1000 / 16.0))) * anchor_loc[i, - 1] + pred_bbox[i, 0] = ( + bbox_deltas[i, 0] * anchor_loc[i, 0] + anchor_loc[i, 2] + ) + pred_bbox[i, 1] = ( + bbox_deltas[i, 1] * anchor_loc[i, 1] + anchor_loc[i, 3] + ) + pred_bbox[i, 2] = ( + math.exp(min(bbox_deltas[i, 2], math.log(1000 / 16.0))) + * anchor_loc[i, 0] + ) + pred_bbox[i, 3] = ( + math.exp(min(bbox_deltas[i, 3], math.log(1000 / 16.0))) + * anchor_loc[i, 1] + ) proposals[:, 0] = pred_bbox[:, 0] - pred_bbox[:, 2] / 2 proposals[:, 1] = pred_bbox[:, 1] - pred_bbox[:, 3] / 2 proposals[:, 2] = pred_bbox[:, 0] + pred_bbox[:, 2] / 2 - offset @@ -161,33 +204,33 @@ def box_coder(all_anchors, bbox_deltas, variances, pixel_offset=True): def clip_tiled_boxes(boxes, im_shape, pixel_offset=True): """Clip boxes to image boundaries. im_shape is [height, width] and boxes has shape (N, 4 * num_tiled_boxes).""" - assert boxes.shape[1] % 4 == 0, \ - 'boxes.shape[1] is {:d}, but must be divisible by 4.'.format( + assert ( + boxes.shape[1] % 4 == 0 + ), 'boxes.shape[1] is {:d}, but must be divisible by 4.'.format( boxes.shape[1] ) offset = 1 if pixel_offset else 0 # x1 >= 0 - boxes[:, - 0::4] = np.maximum(np.minimum(boxes[:, 0::4], im_shape[1] - offset), - 0) + boxes[:, 0::4] = np.maximum( + np.minimum(boxes[:, 0::4], im_shape[1] - offset), 0 + ) # y1 >= 0 - boxes[:, - 1::4] = np.maximum(np.minimum(boxes[:, 1::4], im_shape[0] - offset), - 0) + boxes[:, 1::4] = np.maximum( + np.minimum(boxes[:, 1::4], im_shape[0] - offset), 0 + ) # x2 < im_shape[1] - boxes[:, - 2::4] = np.maximum(np.minimum(boxes[:, 2::4], im_shape[1] - offset), - 0) + boxes[:, 2::4] = np.maximum( + np.minimum(boxes[:, 2::4], im_shape[1] - offset), 0 + ) # y2 < im_shape[0] - boxes[:, - 3::4] = np.maximum(np.minimum(boxes[:, 3::4], im_shape[0] - offset), - 0) + boxes[:, 3::4] = np.maximum( + np.minimum(boxes[:, 3::4], im_shape[0] - offset), 0 + ) return boxes def filter_boxes(boxes, min_size, im_info, pixel_offset=True): - """Only keep boxes with both sides >= min_size and center within the image. - """ + """Only keep boxes with both sides >= min_size and center within the image.""" # Scale min_size to match image scale im_scale = im_info[2] min_size = max(min_size, 1.0) @@ -197,11 +240,14 @@ def filter_boxes(boxes, min_size, im_info, pixel_offset=True): if pixel_offset: ws_orig_scale = (boxes[:, 2] - boxes[:, 0]) / im_scale + 1 hs_orig_scale = (boxes[:, 3] - boxes[:, 1]) / im_scale + 1 - x_ctr = boxes[:, 0] + ws / 2. - y_ctr = boxes[:, 1] + hs / 2. - keep = np.where((ws_orig_scale >= min_size) - & (hs_orig_scale >= min_size) & (x_ctr < im_info[1]) - & (y_ctr < im_info[0]))[0] + x_ctr = boxes[:, 0] + ws / 2.0 + y_ctr = boxes[:, 1] + hs / 2.0 + keep = np.where( + (ws_orig_scale >= min_size) + & (hs_orig_scale >= min_size) + & (x_ctr < im_info[1]) + & (y_ctr < im_info[0]) + )[0] else: keep = np.where((ws >= min_size) & (hs >= min_size))[0] return keep @@ -263,9 +309,9 @@ def nms(boxes, scores, nms_threshold, eta=1.0, pixel_offset=True): for k in range(len(selected_indices)): if keep: kept_idx = selected_indices[k] - overlap = iou(boxes[idx], - boxes[kept_idx], - pixel_offset=pixel_offset) + overlap = iou( + boxes[idx], boxes[kept_idx], pixel_offset=pixel_offset + ) keep = True if overlap <= adaptive_threshold else False else: break @@ -277,7 +323,6 @@ def nms(boxes, scores, nms_threshold, eta=1.0, pixel_offset=True): class TestGenerateProposalsOp(OpTest): - def set_data(self): self.init_test_params() self.init_test_input() @@ -287,7 +332,7 @@ class TestGenerateProposalsOp(OpTest): 'BboxDeltas': self.bbox_deltas, 'ImInfo': self.im_info.astype(np.float32), 'Anchors': self.anchors, - 'Variances': self.variances + 'Variances': self.variances, } self.attrs = { @@ -295,7 +340,7 @@ class TestGenerateProposalsOp(OpTest): 'post_nms_topN': self.post_nms_topN, 'nms_thresh': self.nms_thresh, 'min_size': self.min_size, - 'eta': self.eta + 'eta': self.eta, } self.outputs = { @@ -315,7 +360,7 @@ class TestGenerateProposalsOp(OpTest): self.post_nms_topN = 5000 # train 6000, test 1000 self.nms_thresh = 0.7 self.min_size = 3.0 - self.eta = 1. + self.eta = 1.0 def init_test_input(self): batch_size = 1 @@ -323,30 +368,47 @@ class TestGenerateProposalsOp(OpTest): layer_h = 16 layer_w = 16 input_feat = np.random.random( - (batch_size, input_channels, layer_h, layer_w)).astype('float32') + (batch_size, input_channels, layer_h, layer_w) + ).astype('float32') self.anchors, self.variances = anchor_generator_in_python( input_feat=input_feat, - anchor_sizes=[16., 32.], + anchor_sizes=[16.0, 32.0], aspect_ratios=[0.5, 1.0], variances=[1.0, 1.0, 1.0, 1.0], stride=[16.0, 16.0], - offset=0.5) - self.im_info = np.array([[64., 64., 8.]]) #im_height, im_width, scale + offset=0.5, + ) + self.im_info = np.array( + [[64.0, 64.0, 8.0]] + ) # im_height, im_width, scale num_anchors = self.anchors.shape[2] self.scores = np.random.random( - (batch_size, num_anchors, layer_h, layer_w)).astype('float32') + (batch_size, num_anchors, layer_h, layer_w) + ).astype('float32') self.bbox_deltas = np.random.random( - (batch_size, num_anchors * 4, layer_h, layer_w)).astype('float32') + (batch_size, num_anchors * 4, layer_h, layer_w) + ).astype('float32') def init_test_output(self): - self.rpn_rois, self.rpn_roi_probs, self.rois_num = generate_proposals_in_python( - self.scores, self.bbox_deltas, self.im_info, self.anchors, - self.variances, self.pre_nms_topN, self.post_nms_topN, - self.nms_thresh, self.min_size, self.eta) + ( + self.rpn_rois, + self.rpn_roi_probs, + self.rois_num, + ) = generate_proposals_in_python( + self.scores, + self.bbox_deltas, + self.im_info, + self.anchors, + self.variances, + self.pre_nms_topN, + self.post_nms_topN, + self.nms_thresh, + self.min_size, + self.eta, + ) class TestGenerateProposalsOutLodOp(TestGenerateProposalsOp): - def set_data(self): self.init_test_params() self.init_test_input() @@ -356,7 +418,7 @@ class TestGenerateProposalsOutLodOp(TestGenerateProposalsOp): 'BboxDeltas': self.bbox_deltas, 'ImInfo': self.im_info.astype(np.float32), 'Anchors': self.anchors, - 'Variances': self.variances + 'Variances': self.variances, } self.attrs = { @@ -365,24 +427,23 @@ class TestGenerateProposalsOutLodOp(TestGenerateProposalsOp): 'nms_thresh': self.nms_thresh, 'min_size': self.min_size, 'eta': self.eta, - 'return_rois_num': True + 'return_rois_num': True, } self.outputs = { 'RpnRois': (self.rpn_rois[0], [self.rois_num]), 'RpnRoiProbs': (self.rpn_roi_probs[0], [self.rois_num]), - 'RpnRoisNum': (np.asarray(self.rois_num, dtype=np.int32)) + 'RpnRoisNum': (np.asarray(self.rois_num, dtype=np.int32)), } class TestGenerateProposalsOpNoBoxLeft(TestGenerateProposalsOp): - def init_test_params(self): self.pre_nms_topN = 12000 # train 12000, test 2000 self.post_nms_topN = 5000 # train 6000, test 1000 self.nms_thresh = 0.7 self.min_size = 1000.0 - self.eta = 1. + self.eta = 1.0 if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_generate_proposals_v2_op.py b/python/paddle/fluid/tests/unittests/test_generate_proposals_v2_op.py index 06db10c5bd5128d3f7d7e56ce4328a33a3cdbaba..f0b5d56114167a0c56c48f2cb7da286cce798166 100644 --- a/python/paddle/fluid/tests/unittests/test_generate_proposals_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_generate_proposals_v2_op.py @@ -34,7 +34,11 @@ def python_generate_proposals_v2( pixel_offset=False, return_rois_num=True, ): - rpn_rois, rpn_roi_probs, rpn_rois_num = paddle.vision.ops.generate_proposals( + ( + rpn_rois, + rpn_roi_probs, + rpn_rois_num, + ) = paddle.vision.ops.generate_proposals( scores, bbox_deltas, img_size, @@ -46,13 +50,24 @@ def python_generate_proposals_v2( min_size=min_size, eta=eta, pixel_offset=pixel_offset, - return_rois_num=return_rois_num) + return_rois_num=return_rois_num, + ) return rpn_rois, rpn_roi_probs -def generate_proposals_v2_in_python(scores, bbox_deltas, im_shape, anchors, - variances, pre_nms_topN, post_nms_topN, - nms_thresh, min_size, eta, pixel_offset): +def generate_proposals_v2_in_python( + scores, + bbox_deltas, + im_shape, + anchors, + variances, + pre_nms_topN, + post_nms_topN, + nms_thresh, + min_size, + eta, + pixel_offset, +): all_anchors = anchors.reshape(-1, 4) rois = np.empty((0, 5), dtype=np.float32) roi_probs = np.empty((0, 1), dtype=np.float32) @@ -63,10 +78,18 @@ def generate_proposals_v2_in_python(scores, bbox_deltas, im_shape, anchors, num_images = scores.shape[0] for img_idx in range(num_images): img_i_boxes, img_i_probs = proposal_for_one_image( - im_shape[img_idx, :], all_anchors, variances, - bbox_deltas[img_idx, :, :, :], scores[img_idx, :, :, :], - pre_nms_topN, post_nms_topN, nms_thresh, min_size, eta, - pixel_offset) + im_shape[img_idx, :], + all_anchors, + variances, + bbox_deltas[img_idx, :, :, :], + scores[img_idx, :, :, :], + pre_nms_topN, + post_nms_topN, + nms_thresh, + min_size, + eta, + pixel_offset, + ) rois_num.append(img_i_probs.shape[0]) rpn_rois.append(img_i_boxes) rpn_roi_probs.append(img_i_probs) @@ -74,9 +97,19 @@ def generate_proposals_v2_in_python(scores, bbox_deltas, im_shape, anchors, return rpn_rois, rpn_roi_probs, rois_num -def proposal_for_one_image(im_shape, all_anchors, variances, bbox_deltas, - scores, pre_nms_topN, post_nms_topN, nms_thresh, - min_size, eta, pixel_offset): +def proposal_for_one_image( + im_shape, + all_anchors, + variances, + bbox_deltas, + scores, + pre_nms_topN, + post_nms_topN, + nms_thresh, + min_size, + eta, + pixel_offset, +): # Transpose and reshape predicted bbox transformations to get them # into the same order as the anchors: # - bbox deltas will be (4 * A, H, W) format from conv output @@ -124,11 +157,13 @@ def proposal_for_one_image(im_shape, all_anchors, variances, bbox_deltas, # take post_nms_topN (e.g. 1000) # return the top proposals if nms_thresh > 0: - keep = nms(boxes=proposals, - scores=scores, - nms_threshold=nms_thresh, - eta=eta, - pixel_offset=pixel_offset) + keep = nms( + boxes=proposals, + scores=scores, + nms_threshold=nms_thresh, + eta=eta, + pixel_offset=pixel_offset, + ) if post_nms_topN > 0 and post_nms_topN < len(keep): keep = keep[:post_nms_topN] proposals = proposals[keep, :] @@ -138,25 +173,27 @@ def proposal_for_one_image(im_shape, all_anchors, variances, bbox_deltas, def filter_boxes(boxes, min_size, im_shape, pixel_offset=True): - """Only keep boxes with both sides >= min_size and center within the image. - """ + """Only keep boxes with both sides >= min_size and center within the image.""" # Scale min_size to match image scale min_size = max(min_size, 1.0) offset = 1 if pixel_offset else 0 ws = boxes[:, 2] - boxes[:, 0] + offset hs = boxes[:, 3] - boxes[:, 1] + offset if pixel_offset: - x_ctr = boxes[:, 0] + ws / 2. - y_ctr = boxes[:, 1] + hs / 2. - keep = np.where((ws >= min_size) & (hs >= min_size) - & (x_ctr < im_shape[1]) & (y_ctr < im_shape[0]))[0] + x_ctr = boxes[:, 0] + ws / 2.0 + y_ctr = boxes[:, 1] + hs / 2.0 + keep = np.where( + (ws >= min_size) + & (hs >= min_size) + & (x_ctr < im_shape[1]) + & (y_ctr < im_shape[0]) + )[0] else: keep = np.where((ws >= min_size) & (hs >= min_size))[0] return keep class TestGenerateProposalsV2Op(OpTest): - def set_data(self): self.init_test_params() self.init_test_input() @@ -166,7 +203,7 @@ class TestGenerateProposalsV2Op(OpTest): 'BboxDeltas': self.bbox_deltas, 'ImShape': self.im_shape.astype(np.float32), 'Anchors': self.anchors, - 'Variances': self.variances + 'Variances': self.variances, } self.attrs = { @@ -196,7 +233,7 @@ class TestGenerateProposalsV2Op(OpTest): self.post_nms_topN = 5000 # train 6000, test 1000 self.nms_thresh = 0.7 self.min_size = 3.0 - self.eta = 1. + self.eta = 1.0 self.pixel_offset = True def init_test_input(self): @@ -205,26 +242,43 @@ class TestGenerateProposalsV2Op(OpTest): layer_h = 16 layer_w = 16 input_feat = np.random.random( - (batch_size, input_channels, layer_h, layer_w)).astype('float32') + (batch_size, input_channels, layer_h, layer_w) + ).astype('float32') self.anchors, self.variances = anchor_generator_in_python( input_feat=input_feat, - anchor_sizes=[16., 32.], + anchor_sizes=[16.0, 32.0], aspect_ratios=[0.5, 1.0], variances=[1.0, 1.0, 1.0, 1.0], stride=[16.0, 16.0], - offset=0.5) + offset=0.5, + ) self.im_shape = np.array([[64, 64]]).astype('float32') num_anchors = self.anchors.shape[2] self.scores = np.random.random( - (batch_size, num_anchors, layer_h, layer_w)).astype('float32') + (batch_size, num_anchors, layer_h, layer_w) + ).astype('float32') self.bbox_deltas = np.random.random( - (batch_size, num_anchors * 4, layer_h, layer_w)).astype('float32') + (batch_size, num_anchors * 4, layer_h, layer_w) + ).astype('float32') def init_test_output(self): - self.rpn_rois, self.rpn_roi_probs, self.rois_num = generate_proposals_v2_in_python( - self.scores, self.bbox_deltas, self.im_shape, self.anchors, - self.variances, self.pre_nms_topN, self.post_nms_topN, - self.nms_thresh, self.min_size, self.eta, self.pixel_offset) + ( + self.rpn_rois, + self.rpn_roi_probs, + self.rois_num, + ) = generate_proposals_v2_in_python( + self.scores, + self.bbox_deltas, + self.im_shape, + self.anchors, + self.variances, + self.pre_nms_topN, + self.post_nms_topN, + self.nms_thresh, + self.min_size, + self.eta, + self.pixel_offset, + ) # class TestGenerateProposalsV2OpNoBoxLeft(TestGenerateProposalsV2Op): diff --git a/python/paddle/fluid/tests/unittests/test_generator.py b/python/paddle/fluid/tests/unittests/test_generator.py index 445c7b050d02600b4faa7848d21074d0fdfd17c2..5adef46fc5de2659c86749ebeadeb570c851486e 100644 --- a/python/paddle/fluid/tests/unittests/test_generator.py +++ b/python/paddle/fluid/tests/unittests/test_generator.py @@ -33,9 +33,9 @@ class TestGenerator(unittest.TestCase): def test_basic_generator_error(self): if paddle.fluid.core.is_compiled_with_cuda(): - self.assertRaises(ValueError, - generator.Generator, - place=paddle.CUDAPlace(0)) + self.assertRaises( + ValueError, generator.Generator, place=paddle.CUDAPlace(0) + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_generator_dataloader.py b/python/paddle/fluid/tests/unittests/test_generator_dataloader.py index dbc8169bd1131b2193f271a26b26945b0b06c62d..67b2c37accb703153c94a9cd4eb94ae24196ae17 100644 --- a/python/paddle/fluid/tests/unittests/test_generator_dataloader.py +++ b/python/paddle/fluid/tests/unittests/test_generator_dataloader.py @@ -41,15 +41,16 @@ def simple_fc_net(places, use_legacy_py_reader, use_double_buffer): with fluid.unique_name.guard(): with fluid.program_guard(main_prog, startup_prog): - image = fluid.layers.data(name='image', - shape=[784], - dtype='float32') + image = fluid.layers.data( + name='image', shape=[784], dtype='float32' + ) label = fluid.layers.data(name='label', shape=[1], dtype='int64') py_reader = fluid.io.DataLoader.from_generator( feed_list=[image, label], capacity=4, iterable=not use_legacy_py_reader, - use_double_buffer=use_double_buffer) + use_double_buffer=use_double_buffer, + ) hidden = image for hidden_size in [10, 20, 30]: hidden = fluid.layers.fc( @@ -57,13 +58,16 @@ def simple_fc_net(places, use_legacy_py_reader, use_double_buffer): size=hidden_size, act='tanh', bias_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(value=1.0))) + initializer=fluid.initializer.Constant(value=1.0) + ), + ) - predict_label = fluid.layers.fc(hidden, - size=CLASS_NUM, - act='softmax') + predict_label = fluid.layers.fc( + hidden, size=CLASS_NUM, act='softmax' + ) loss = paddle.mean( - fluid.layers.cross_entropy(input=predict_label, label=label)) + fluid.layers.cross_entropy(input=predict_label, label=label) + ) optimizer = fluid.optimizer.Adam() optimizer.minimize(loss) @@ -71,28 +75,35 @@ def simple_fc_net(places, use_legacy_py_reader, use_double_buffer): class TestBase(unittest.TestCase): - - def run_main(self, use_legacy_py_reader, with_data_parallel, places, - use_double_buffer): + def run_main( + self, + use_legacy_py_reader, + with_data_parallel, + places, + use_double_buffer, + ): scope = fluid.Scope() with fluid.scope_guard(scope): startup_prog, main_prog, py_reader, loss = simple_fc_net( - places, use_legacy_py_reader, use_double_buffer) + places, use_legacy_py_reader, use_double_buffer + ) reader = paddle.batch(random_reader, batch_size=BATCH_SIZE) ps = places if use_double_buffer else fluid.cpu_places(len(places)) py_reader.set_sample_list_generator( - reader, places=ps if py_reader.iterable else None) + reader, places=ps if py_reader.iterable else None + ) exe = fluid.Executor(place=places[0]) exe.run(startup_prog) prog = fluid.CompiledProgram(main_prog) if with_data_parallel: - prog = prog.with_data_parallel(loss_name=loss.name, - places=places) + prog = prog.with_data_parallel( + loss_name=loss.name, places=places + ) step = 0 step_list = [] @@ -104,9 +115,11 @@ class TestBase(unittest.TestCase): py_reader.start() while True: try: - L, = exe.run(program=prog, - fetch_list=[loss], - use_program_cache=True) + (L,) = exe.run( + program=prog, + fetch_list=[loss], + use_program_cache=True, + ) loss_list.append(np.mean(L)) step += 1 except fluid.core.EOFException: @@ -118,7 +131,8 @@ class TestBase(unittest.TestCase): step = 0 for d in py_reader(): assert len(d) == len(places), "{} != {}".format( - len(d), len(places)) + len(d), len(places) + ) for i, item in enumerate(d): image = item['image'] label = item['label'] @@ -126,10 +140,12 @@ class TestBase(unittest.TestCase): assert label.shape() == [BATCH_SIZE, 1] assert image._place()._equals(ps[i]) assert label._place()._equals(ps[i]) - L, = exe.run(program=prog, - feed=d, - fetch_list=[loss], - use_program_cache=True) + (L,) = exe.run( + program=prog, + feed=d, + fetch_list=[loss], + use_program_cache=True, + ) loss_list.append(np.mean(L)) step += 1 step_list.append(step) @@ -137,7 +153,7 @@ class TestBase(unittest.TestCase): ret = { "time": end_t - start_t, "step": step_list, - "loss": np.array(loss_list) + "loss": np.array(loss_list), } return ret @@ -167,16 +183,17 @@ class TestBase(unittest.TestCase): use_legacy_py_reader=use_legacy_py_reader, with_data_parallel=with_data_parallel, places=p, - use_double_buffer=use_double_buffer) + use_double_buffer=use_double_buffer, + ) results.append(ret) if not use_double_buffer: diff = np.max( - np.abs(results[0]['loss'] - results[1]['loss'])) + np.abs(results[0]['loss'] - results[1]['loss']) + ) self.assertLess(diff, 1e-3) class TestDataLoaderBaseAbstract(unittest.TestCase): - def test_main(self): loader = DataLoaderBase() try: diff --git a/python/paddle/fluid/tests/unittests/test_get_all_op_or_kernel_names.py b/python/paddle/fluid/tests/unittests/test_get_all_op_or_kernel_names.py index 999912494ad3018b1108ebc8f7ab4e435bec868b..47af7355cb7e2ac41bbec86ac04b24b06f5773a1 100644 --- a/python/paddle/fluid/tests/unittests/test_get_all_op_or_kernel_names.py +++ b/python/paddle/fluid/tests/unittests/test_get_all_op_or_kernel_names.py @@ -38,15 +38,15 @@ class TestGetAllRegisteredOpKernels(unittest.TestCase): class TestGetAllOpNames(unittest.TestCase): - def test_get_all_op_names(self): all_op_names = core.get_all_op_names() all_op_with_phi_kernels = core.get_all_op_names("phi") all_op_with_fluid_kernels = core.get_all_op_names("fluid") self.assertTrue( - len(all_op_names) > len( - set(all_op_with_phi_kernels) | set(all_op_with_fluid_kernels))) + len(all_op_names) + > len(set(all_op_with_phi_kernels) | set(all_op_with_fluid_kernels)) + ) self.assertTrue("scale" in all_op_with_phi_kernels) self.assertTrue("scale" in all_op_with_phi_kernels) diff --git a/python/paddle/fluid/tests/unittests/test_get_device_properties.py b/python/paddle/fluid/tests/unittests/test_get_device_properties.py index e29b565ff459c921f5c12b7420b1b77a058e69f8..3fea41dbadd67fa58cbd49f12741338e7b6ca700 100644 --- a/python/paddle/fluid/tests/unittests/test_get_device_properties.py +++ b/python/paddle/fluid/tests/unittests/test_get_device_properties.py @@ -18,7 +18,6 @@ from paddle.device.cuda import device_count, get_device_properties class TestGetDeviceProperties(unittest.TestCase): - def test_get_device_properties_default(self): if core.is_compiled_with_cuda(): props = get_device_properties() @@ -44,7 +43,6 @@ class TestGetDeviceProperties(unittest.TestCase): class TestGetDevicePropertiesError(unittest.TestCase): - def test_error_api(self): if core.is_compiled_with_cuda(): diff --git a/python/paddle/fluid/tests/unittests/test_get_inputs_outputs_in_block.py b/python/paddle/fluid/tests/unittests/test_get_inputs_outputs_in_block.py index d19ccea8ea038ef56b27a99bb7e40516600a5956..b419679403207f6f5459791284c6696218e81cda 100644 --- a/python/paddle/fluid/tests/unittests/test_get_inputs_outputs_in_block.py +++ b/python/paddle/fluid/tests/unittests/test_get_inputs_outputs_in_block.py @@ -21,7 +21,6 @@ paddle.enable_static() class TestGetInputsOutputsInBlock(unittest.TestCase): - def test_ordered(self): # Program variable names may be different when test order is different # This helper makes the test ordered. @@ -49,7 +48,8 @@ class TestGetInputsOutputsInBlock(unittest.TestCase): sub_block = main_program.block(1) inner_inputs, inner_outputs = utils.get_inputs_outputs_in_block( - sub_block) + sub_block + ) # 'assign_0.tmp_0', 'assign_1.tmp_0' are name of i and ten in program self.assertTrue(inner_inputs == {'assign_0.tmp_0', 'assign_1.tmp_0'}) # 'tmp_0', 'assign_0.tmp_0' are name of i < ten and i in program @@ -66,7 +66,8 @@ class TestGetInputsOutputsInBlock(unittest.TestCase): sub_block = main_program.block(1) inner_inputs, inner_outputs = utils.get_inputs_outputs_in_block( - sub_block) + sub_block + ) #'fill_constant_1.tmp_0', 'tmp_3' are names of a, c self.assertTrue(inner_inputs == {'fill_constant_1.tmp_0', 'tmp_3'}) #'_generated_var_1', is name of a + c diff --git a/python/paddle/fluid/tests/unittests/test_get_places_op.py b/python/paddle/fluid/tests/unittests/test_get_places_op.py index 93b03d029cc0674da5a320b4474e52c5078c44d1..d51e36f2514cd5718c23149fed8db4d365b09a13 100644 --- a/python/paddle/fluid/tests/unittests/test_get_places_op.py +++ b/python/paddle/fluid/tests/unittests/test_get_places_op.py @@ -20,7 +20,6 @@ import unittest class TestGetPlaces(unittest.TestCase): - @prog_scope() def check_get_cpu_places(self): places = get_places() diff --git a/python/paddle/fluid/tests/unittests/test_get_set_flags.py b/python/paddle/fluid/tests/unittests/test_get_set_flags.py index 80300eb7dfcb495662c89f5cf831e4705187ebd3..dad58ae08ba96f9ba27ca19a18ee4266065b2ee1 100644 --- a/python/paddle/fluid/tests/unittests/test_get_set_flags.py +++ b/python/paddle/fluid/tests/unittests/test_get_set_flags.py @@ -17,11 +17,10 @@ import unittest as unittest class TestGetAndSetFlags(unittest.TestCase): - def test_api(self): flags = { 'FLAGS_eager_delete_tensor_gb': 1.0, - 'FLAGS_check_nan_inf': True + 'FLAGS_check_nan_inf': True, } fluid.set_flags(flags) @@ -38,7 +37,6 @@ class TestGetAndSetFlags(unittest.TestCase): class TestGetAndSetFlagsErrors(unittest.TestCase): - def test_errors(self): flags_list = ['FLAGS_eager_delete_tensor_gb', 'FLAGS_check_nan_inf'] flag = 1 diff --git a/python/paddle/fluid/tests/unittests/test_get_tensor_from_selected_rows_op.py b/python/paddle/fluid/tests/unittests/test_get_tensor_from_selected_rows_op.py index 425cca109f02471f6acbc9e6a4bca4c9be72bae4..17bf702b88607348b77ad3e8ade20ffe0af547a1 100644 --- a/python/paddle/fluid/tests/unittests/test_get_tensor_from_selected_rows_op.py +++ b/python/paddle/fluid/tests/unittests/test_get_tensor_from_selected_rows_op.py @@ -40,7 +40,6 @@ class TestGetTensorFromSelectedRowsError(unittest.TestCase): class TestGetTensorFromSelectedRows(unittest.TestCase): - def get_places(self): places = [core.CPUPlace()] if core.is_compiled_with_cuda(): diff --git a/python/paddle/fluid/tests/unittests/test_global_var_getter_setter.py b/python/paddle/fluid/tests/unittests/test_global_var_getter_setter.py index 90fe9988ac2d89ddb3e0394225aee2f9aca3a1da..3394a08de8b197c59745edeea8953fe3ec6a2488 100644 --- a/python/paddle/fluid/tests/unittests/test_global_var_getter_setter.py +++ b/python/paddle/fluid/tests/unittests/test_global_var_getter_setter.py @@ -17,7 +17,6 @@ import unittest as unittest class VarInfo(object): - def __init__(self, var_name, var_type, writable): self.name = var_name self.type = var_type @@ -25,7 +24,6 @@ class VarInfo(object): class TestGlobalVarGetterSetter(unittest.TestCase): - def test_main(self): var_infos = [ VarInfo("FLAGS_free_idle_chunk", bool, False), diff --git a/python/paddle/fluid/tests/unittests/test_glu.py b/python/paddle/fluid/tests/unittests/test_glu.py index c8f0098456cbdc08b9a5f2ff622bd0143d3293cd..25f1975db0c5290ab538fa0204e38141d1077bbf 100644 --- a/python/paddle/fluid/tests/unittests/test_glu.py +++ b/python/paddle/fluid/tests/unittests/test_glu.py @@ -32,7 +32,6 @@ def glu(x, dim=-1): class TestGLUCase(unittest.TestCase): - def setUp(self): self.x = np.random.randn(5, 20) self.dim = -1 @@ -53,7 +52,6 @@ class TestGLUCase(unittest.TestCase): class TestGLUV2(unittest.TestCase): - def setUp(self): self.x = np.random.randn(5, 20) self.dim = -1 diff --git a/python/paddle/fluid/tests/unittests/test_gpu_package_without_gpu_device.py b/python/paddle/fluid/tests/unittests/test_gpu_package_without_gpu_device.py index fc130426aef84b512286e182d6db579a8de69b59..ac786dea529f40d5829e673c838511678c48804f 100644 --- a/python/paddle/fluid/tests/unittests/test_gpu_package_without_gpu_device.py +++ b/python/paddle/fluid/tests/unittests/test_gpu_package_without_gpu_device.py @@ -21,7 +21,6 @@ from paddle.fluid import core class TestGPUPackagePaddle(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -34,8 +33,9 @@ class TestGPUPackagePaddle(unittest.TestCase): os.environ['HIP_VISIBLE_DEVICES'] = '' else: os.environ['CUDA_VISIBLE_DEVICES'] = '' - test_file = os.path.join(self.temp_dir.name, - 'test_no_gpu_run_rand.py') + test_file = os.path.join( + self.temp_dir.name, 'test_no_gpu_run_rand.py' + ) with open(test_file, 'w') as wb: cmd_test = """ import paddle @@ -48,10 +48,12 @@ assert x.place.is_gpu_place() is False, "There is no CUDA device, but Tensor's p _python = sys.executable ps_cmd = '{} {}'.format(_python, test_file) - ps_proc = subprocess.Popen(ps_cmd.strip().split(" "), - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - env=os.environ) + ps_proc = subprocess.Popen( + ps_cmd.strip().split(" "), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=os.environ, + ) stdout, stderr = ps_proc.communicate() assert 'CPU device will be used by default' in str( diff --git a/python/paddle/fluid/tests/unittests/test_grad_clip_minimize.py b/python/paddle/fluid/tests/unittests/test_grad_clip_minimize.py index fc8c1c946bbec16d8820515d210088321d445416..ad1fc3ad1836980cec9d29900e8a0fea355935e9 100644 --- a/python/paddle/fluid/tests/unittests/test_grad_clip_minimize.py +++ b/python/paddle/fluid/tests/unittests/test_grad_clip_minimize.py @@ -19,11 +19,14 @@ import paddle.fluid as fluid from paddle.fluid.dygraph.base import to_variable -from paddle.fluid.clip import GradientClipByValue, GradientClipByNorm, GradientClipByGlobalNorm +from paddle.fluid.clip import ( + GradientClipByValue, + GradientClipByNorm, + GradientClipByGlobalNorm, +) class TestGradClipByGlobalNorm(unittest.TestCase): - def init_value(self): self.max_global_norm = 5.0 self.init_scale = 1.0 @@ -35,10 +38,15 @@ class TestGradClipByGlobalNorm(unittest.TestCase): self.para_and_grad = [] for i in range(10): self.para_and_grad.append( - (np.random.uniform(-self.init_scale, self.init_scale, - self.shape).astype('float32'), - np.random.uniform(-self.init_scale, self.init_scale, - self.shape).astype('float32'))) + ( + np.random.uniform( + -self.init_scale, self.init_scale, self.shape + ).astype('float32'), + np.random.uniform( + -self.init_scale, self.init_scale, self.shape + ).astype('float32'), + ) + ) def get_numpy_global_norm_result(self): gloabl_norm = 0.0 @@ -98,7 +106,6 @@ class TestGradClipByGlobalNorm(unittest.TestCase): class TestGradClipByNorm(unittest.TestCase): - def init_value(self): self.max_norm = 5.0 self.init_scale = 1.0 @@ -110,10 +117,15 @@ class TestGradClipByNorm(unittest.TestCase): self.para_and_grad = [] for i in range(10): self.para_and_grad.append( - (np.random.uniform(-self.init_scale, self.init_scale, - self.shape).astype('float32'), - np.random.uniform(-self.init_scale, self.init_scale, - self.shape).astype('float32'))) + ( + np.random.uniform( + -self.init_scale, self.init_scale, self.shape + ).astype('float32'), + np.random.uniform( + -self.init_scale, self.init_scale, self.shape + ).astype('float32'), + ) + ) def get_numpy_norm_result(self): @@ -169,7 +181,6 @@ class TestGradClipByNorm(unittest.TestCase): class TestGradClipByValue(unittest.TestCase): - def init_value(self): self.max_value = 0.8 self.min_value = -0.1 @@ -182,10 +193,15 @@ class TestGradClipByValue(unittest.TestCase): self.para_and_grad = [] for i in range(10): self.para_and_grad.append( - (np.random.uniform(-self.init_scale, self.init_scale, - self.shape).astype('float32'), - np.random.uniform(-self.init_scale, self.init_scale, - self.shape).astype('float32'))) + ( + np.random.uniform( + -self.init_scale, self.init_scale, self.shape + ).astype('float32'), + np.random.uniform( + -self.init_scale, self.init_scale, self.shape + ).astype('float32'), + ) + ) def get_numpy_clip_result(self): @@ -197,8 +213,9 @@ class TestGradClipByValue(unittest.TestCase): def get_dygrap_clip_result(self): with fluid.dygraph.guard(): - value_clip = GradientClipByValue(max=self.max_value, - min=self.min_value) + value_clip = GradientClipByValue( + max=self.max_value, min=self.min_value + ) p_g_var = [] for p, g in self.para_and_grad: new_p = to_variable(p) diff --git a/python/paddle/fluid/tests/unittests/test_gradient_clip.py b/python/paddle/fluid/tests/unittests/test_gradient_clip.py index 825e84f9f6d9a1646f927c73f1ef7aa72cc23a1f..1687d7e59839ff485ed8ef2b1b174dedc2419432 100644 --- a/python/paddle/fluid/tests/unittests/test_gradient_clip.py +++ b/python/paddle/fluid/tests/unittests/test_gradient_clip.py @@ -23,21 +23,17 @@ from paddle.fluid.clip import _allow_pure_fp16_global_norm_clip paddle.enable_static() -def bow_net(data, - label, - dict_dim, - emb_dim=128, - hid_dim=128, - hid_dim2=96, - class_dim=2): +def bow_net( + data, label, dict_dim, emb_dim=128, hid_dim=128, hid_dim2=96, class_dim=2 +): """ BOW net This model is from https://github.com/PaddlePaddle/models: fluid/PaddleNLP/text_classification/nets.py """ - emb = fluid.layers.embedding(input=data, - is_sparse=True, - size=[dict_dim, emb_dim]) + emb = fluid.layers.embedding( + input=data, is_sparse=True, size=[dict_dim, emb_dim] + ) bow = fluid.layers.sequence_pool(input=emb, pool_type='sum') bow_tanh = fluid.layers.tanh(bow) fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh") @@ -50,7 +46,6 @@ def bow_net(data, class TestGradientClip(unittest.TestCase): - def setUp(self): self.word_dict_len = 5147 self.BATCH_SIZE = 2 @@ -74,8 +69,9 @@ class TestGradientClip(unittest.TestCase): def check_gradient_clip(self, place, dtype='float32'): prog = fluid.Program() startup_program = fluid.Program() - with fluid.program_guard(main_program=prog, - startup_program=startup_program): + with fluid.program_guard( + main_program=prog, startup_program=startup_program + ): image = fluid.data(name="a", shape=[-1, 784], dtype='float32') label = fluid.data(name="b", shape=[-1, 1], dtype='int64') if dtype != 'float32': @@ -96,8 +92,9 @@ class TestGradientClip(unittest.TestCase): p_g = sorted(p_g, key=lambda x: x[0].name) p_g_clip = sorted(p_g_clip, key=lambda x: x[0].name) - with fluid.program_guard(main_program=prog_clip, - startup_program=startup_program): + with fluid.program_guard( + main_program=prog_clip, startup_program=startup_program + ): p_g_clip = self.clip_gradient(p_g_clip) grad_list = [elem[1] for elem in p_g] @@ -110,20 +107,20 @@ class TestGradientClip(unittest.TestCase): data = next(train_reader()) out = exe.run(prog, feed=feeder.feed(data), fetch_list=grad_list) - out_clip = exe.run(prog_clip, - feed=feeder.feed(data), - fetch_list=grad_clip_list) + out_clip = exe.run( + prog_clip, feed=feeder.feed(data), fetch_list=grad_clip_list + ) self.check_clip_result(out, out_clip) def check_sparse_gradient_clip(self, place): prog = fluid.Program() startup_program = fluid.Program() - with fluid.program_guard(main_program=prog, - startup_program=startup_program): - data = fluid.data(name="words", - shape=[-1, 1], - dtype="int64", - lod_level=1) + with fluid.program_guard( + main_program=prog, startup_program=startup_program + ): + data = fluid.data( + name="words", shape=[-1, 1], dtype="int64", lod_level=1 + ) label = fluid.data(name="label", shape=[-1, 1], dtype="int64") cost = bow_net(data, label, self.word_dict_len) @@ -135,7 +132,7 @@ class TestGradientClip(unittest.TestCase): data = next(self.train_data()) val = exe.run(prog, feed=feeder.feed(data), fetch_list=[cost])[0] - self.assertEqual((1, ), val.shape) + self.assertEqual((1,), val.shape) self.assertFalse(np.isnan(val)) def backward_and_optimize(self, cost): @@ -143,7 +140,6 @@ class TestGradientClip(unittest.TestCase): class TestGradientClipByGlobalNorm(TestGradientClip): - def init(self): self.clip_norm = 0.2 @@ -163,13 +159,13 @@ class TestGradientClipByGlobalNorm(TestGradientClip): v, rtol=1e-05, atol=1e-08, - err_msg= - 'gradient clip by global norm has wrong results!, \nu={}\nv={}\ndiff={}' - .format(u, v, u - v)) + err_msg='gradient clip by global norm has wrong results!, \nu={}\nv={}\ndiff={}'.format( + u, v, u - v + ), + ) # test whether the output is right when use 'set_gradient_clip' def test_old_gradient_clip(self): - def func(params_grads): clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=self.clip_norm) fluid.clip.set_gradient_clip(clip) @@ -180,7 +176,6 @@ class TestGradientClipByGlobalNorm(TestGradientClip): # test whether the output is right when use grad_clip def test_new_gradient_clip(self): - def func(params_grads): clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=self.clip_norm) return clip(params_grads) @@ -190,7 +185,6 @@ class TestGradientClipByGlobalNorm(TestGradientClip): # test whether the output is right when use grad_clip under float64 def test_new_gradient_clip_fp64(self): - def func(params_grads): clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=self.clip_norm) return clip(params_grads) @@ -200,12 +194,12 @@ class TestGradientClipByGlobalNorm(TestGradientClip): # invoke 'set_gradient_clip' in a wrong order def test_wrong_API_order(self): - def backward_func(cost): clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=5.0) fluid.clip.set_gradient_clip(clip) - sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.01, - grad_clip=clip) + sgd_optimizer = fluid.optimizer.SGD( + learning_rate=0.01, grad_clip=clip + ) # if 'set_gradient_clip' and 'optimize(grad_clip)' together, 'set_gradient_clip' will be ineffective sgd_optimizer.minimize(cost) # 'set_gradient_clip' must before 'minimize', otherwise, 'set_gradient_clip' will be ineffective @@ -219,43 +213,74 @@ class TestGradientClipByGlobalNorm(TestGradientClip): def test_tpyeError(self): # the type of optimizer(grad_clip=) must be an instance of GradientClipBase's derived class with self.assertRaises(TypeError): - sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.1, - grad_clip="test") + sgd_optimizer = fluid.optimizer.SGD( + learning_rate=0.1, grad_clip="test" + ) # if grad is None or not need clip def test_none_grad_fp32(self): ops = self._test_none_grad_helper("float32") - self.assertListEqual(ops, [ - 'squared_l2_norm', 'squared_l2_norm', 'sum', 'sqrt', - 'fill_constant', 'elementwise_max', 'elementwise_div', - 'elementwise_mul', 'elementwise_mul' - ]) + self.assertListEqual( + ops, + [ + 'squared_l2_norm', + 'squared_l2_norm', + 'sum', + 'sqrt', + 'fill_constant', + 'elementwise_max', + 'elementwise_div', + 'elementwise_mul', + 'elementwise_mul', + ], + ) def test_none_grad_fp16(self): ops = self._test_none_grad_helper("float16") - self.assertListEqual(ops, [ - 'square', 'reduce_sum', 'square', 'reduce_sum', 'sum', 'cast', - 'sqrt', 'fill_constant', 'elementwise_max', 'elementwise_div', - 'cast', 'elementwise_mul', 'cast', 'elementwise_mul' - ]) + self.assertListEqual( + ops, + [ + 'square', + 'reduce_sum', + 'square', + 'reduce_sum', + 'sum', + 'cast', + 'sqrt', + 'fill_constant', + 'elementwise_max', + 'elementwise_div', + 'cast', + 'elementwise_mul', + 'cast', + 'elementwise_mul', + ], + ) def _test_none_grad_helper(self, dtype): prog = fluid.Program() startup_program = fluid.Program() - with fluid.program_guard(main_program=prog, - startup_program=startup_program): + with fluid.program_guard( + main_program=prog, startup_program=startup_program + ): clip = fluid.clip.GradientClipByGlobalNorm(self.clip_norm) - x = fluid.default_main_program().global_block().create_parameter( - name="x", shape=[2, 3], dtype=dtype) - y = fluid.default_main_program().global_block().create_parameter( - name="y", shape=[2, 3], dtype=dtype) + x = ( + fluid.default_main_program() + .global_block() + .create_parameter(name="x", shape=[2, 3], dtype=dtype) + ) + y = ( + fluid.default_main_program() + .global_block() + .create_parameter(name="y", shape=[2, 3], dtype=dtype) + ) # (x, None) should not be returned params_grads = [(x, None), (x, y), (y, x)] params_grads = clip(params_grads) self.assertTrue( len(params_grads) == 2, - "ClipByGlobalNorm: when grad is None, it shouldn't be returned by gradient clip!" + "ClipByGlobalNorm: when grad is None, it shouldn't be returned by gradient clip!", ) ops = [op.type for op in x.block.ops] @@ -263,7 +288,6 @@ class TestGradientClipByGlobalNorm(TestGradientClip): class TestGradientClipByNorm(TestGradientClip): - def init(self): self.clip_norm = 0.2 @@ -277,11 +301,11 @@ class TestGradientClipByNorm(TestGradientClip): v, rtol=1e-05, atol=1e-08, - err_msg='gradient clip by norm has wrong results!') + err_msg='gradient clip by norm has wrong results!', + ) # test whether the output is right when use grad_clip def test_gradient_clip(self): - def func(params_grads): clip = fluid.clip.GradientClipByNorm(clip_norm=self.clip_norm) return clip(params_grads) @@ -292,25 +316,35 @@ class TestGradientClipByNorm(TestGradientClip): # if grad is None or not need clip def test_none_grad(self): clip = fluid.clip.GradientClipByNorm(self.clip_norm) - x = fluid.default_main_program().global_block().create_parameter( - name="x", shape=[2, 3], dtype="float32", need_clip=False) - y = fluid.default_main_program().global_block().create_parameter( - name="y", shape=[2, 3], dtype="float32", need_clip=False) + x = ( + fluid.default_main_program() + .global_block() + .create_parameter( + name="x", shape=[2, 3], dtype="float32", need_clip=False + ) + ) + y = ( + fluid.default_main_program() + .global_block() + .create_parameter( + name="y", shape=[2, 3], dtype="float32", need_clip=False + ) + ) # (x, None) should not be returned params_grads = [(x, None), (x, y)] params_grads = clip(params_grads) self.assertTrue( len(clip(params_grads)) == 1, - "ClipGradByNorm: when grad is None, it shouldn't be returned by gradient clip!" + "ClipGradByNorm: when grad is None, it shouldn't be returned by gradient clip!", ) self.assertTrue( params_grads[0][1].name == 'y', - "ClipGradByNorm: grad should not be clipped when filtered out!") + "ClipGradByNorm: grad should not be clipped when filtered out!", + ) class TestGradientClipByValue(TestGradientClip): - def init(self): self.max = 0.2 self.min = 0.1 @@ -325,11 +359,11 @@ class TestGradientClipByValue(TestGradientClip): v, rtol=1e-06, atol=1e-08, - err_msg='gradient clip by value has wrong results!') + err_msg='gradient clip by value has wrong results!', + ) # test whether the output is right when use grad_clip def test_gradient_clip(self): - def func(params_grads): clip = fluid.clip.GradientClipByValue(max=self.max, min=self.min) return clip(params_grads) @@ -340,37 +374,49 @@ class TestGradientClipByValue(TestGradientClip): # if grad is None or not need clip def test_none_grad(self): clip = fluid.clip.GradientClipByValue(self.max, self.min) - x = fluid.default_main_program().global_block().create_parameter( - name="x", shape=[2, 3], dtype="float32", need_clip=False) - y = fluid.default_main_program().global_block().create_parameter( - name="y", shape=[2, 3], dtype="float32", need_clip=False) + x = ( + fluid.default_main_program() + .global_block() + .create_parameter( + name="x", shape=[2, 3], dtype="float32", need_clip=False + ) + ) + y = ( + fluid.default_main_program() + .global_block() + .create_parameter( + name="y", shape=[2, 3], dtype="float32", need_clip=False + ) + ) # (x, None) should not be returned params_grads = [(x, None), (x, y)] params_grads = clip(params_grads) self.assertTrue( len(clip(params_grads)) == 1, - "ClipGradByValue: when grad is None, it shouldn't be returned by gradient clip!" + "ClipGradByValue: when grad is None, it shouldn't be returned by gradient clip!", ) self.assertTrue( params_grads[0][1].name == 'y', - "ClipGradByValue: grad should not be clipped when filtered out!") + "ClipGradByValue: grad should not be clipped when filtered out!", + ) class TestDygraphGradientClip(unittest.TestCase): - def test_gradient_clip(self): with fluid.dygraph.guard(): linear = fluid.dygraph.Linear(5, 5) - inputs = fluid.layers.uniform_random([16, 5], min=-10, - max=10).astype('float32') + inputs = fluid.layers.uniform_random( + [16, 5], min=-10, max=10 + ).astype('float32') out = linear(fluid.dygraph.to_variable(inputs)) loss = fluid.layers.reduce_mean(out) loss.backward() sgd_optimizer = fluid.optimizer.SGD( learning_rate=0.0, parameter_list=linear.parameters(), - grad_clip=fluid.clip.GradientClipByGlobalNorm(0.1)) + grad_clip=fluid.clip.GradientClipByGlobalNorm(0.1), + ) self.check_clip_result(loss, sgd_optimizer) def check_clip_result(self, loss, optimizer): @@ -378,20 +424,23 @@ class TestDygraphGradientClip(unittest.TestCase): class TestDygraphGradientClipByGlobalNorm(TestDygraphGradientClip): - def setUp(self): self.clip_norm = 0.8 self.clip1 = fluid.clip.GradientClipByGlobalNorm( - clip_norm=self.clip_norm) + clip_norm=self.clip_norm + ) self.clip2 = fluid.clip.GradientClipByGlobalNorm( - clip_norm=self.clip_norm) + clip_norm=self.clip_norm + ) def check_clip_result(self, loss, optimizer): # if grad is None - x = fluid.dygraph.to_variable(np.array([2, 3]).astype("float32"), - name="x") - y = fluid.dygraph.to_variable(np.array([3, 4]).astype("float32"), - name="y") + x = fluid.dygraph.to_variable( + np.array([2, 3]).astype("float32"), name="x" + ) + y = fluid.dygraph.to_variable( + np.array([3, 4]).astype("float32"), name="y" + ) assert len(self.clip1([(x, x), (x, y), (x, None)])) == 2 # get params and grads from network opt, params_grads = optimizer.minimize(loss) @@ -416,11 +465,11 @@ class TestDygraphGradientClipByGlobalNorm(TestDygraphGradientClip): self.assertTrue( np.isclose(a=a, b=b, rtol=1e-6, atol=1e-8), "gradient clip by global norm has wrong results, expetcd:%f, but received:%f" - % (a, b)) + % (a, b), + ) class TestDygraphGradientClipByNorm(TestDygraphGradientClip): - def setUp(self): self.clip_norm = 0.8 self.clip = fluid.clip.GradientClipByNorm(clip_norm=self.clip_norm) @@ -445,11 +494,11 @@ class TestDygraphGradientClipByNorm(TestDygraphGradientClip): self.assertTrue( np.isclose(a=a, b=b, rtol=1e-6, atol=1e-8), "gradient clip by norm has wrong results, expetcd:%f, but received:%f" - % (a, b)) + % (a, b), + ) class TestDygraphGradientClipByValue(TestDygraphGradientClip): - def setUp(self): self.max = 0.2 self.min = 0.1 @@ -472,11 +521,11 @@ class TestDygraphGradientClipByValue(TestDygraphGradientClip): v, rtol=1e-06, atol=1e-08, - err_msg='gradient clip by value has wrong results!') + err_msg='gradient clip by value has wrong results!', + ) class SimpleNet(paddle.nn.Layer): - def __init__(self): super(SimpleNet, self).__init__() self.linear = paddle.nn.Linear(5, 5) @@ -489,19 +538,21 @@ class SimpleNet(paddle.nn.Layer): class TestDygraphGradientClipFP16(unittest.TestCase): - def test_gradient_clip(self): if fluid.core.is_compiled_with_cuda(): with fluid.dygraph.guard(): paddle.seed(10) model = SimpleNet() sgd_optimizer = paddle.optimizer.SGD( - learning_rate=0.0, parameters=model.parameters()) + learning_rate=0.0, parameters=model.parameters() + ) model, sgd_optimizer = paddle.amp.decorate( - models=model, optimizers=sgd_optimizer, level='O2') + models=model, optimizers=sgd_optimizer, level='O2' + ) scaler = paddle.amp.GradScaler(init_loss_scaling=1024) - inputs = fluid.layers.uniform_random([1, 5], min=-10, - max=10).astype('float32') + inputs = fluid.layers.uniform_random( + [1, 5], min=-10, max=10 + ).astype('float32') with paddle.amp.auto_cast(level='O2'): out = model(fluid.dygraph.to_variable(inputs)) loss = fluid.layers.reduce_mean(out) @@ -540,15 +591,16 @@ class TestDygraphGradientClipFP16(unittest.TestCase): self.assertTrue( np.isclose(a=a, b=b, rtol=1e-3, atol=1e-8), "gradient clip by global norm has wrong results, expetcd:%f, but received:%f" - % (a, b)) + % (a, b), + ) class TestDygraphGradientClipFP64(unittest.TestCase): - def test_gradient_clip(self): with fluid.dygraph.guard(): - inputs = fluid.layers.uniform_random([16, 5], min=-10, - max=10).astype('float64') + inputs = fluid.layers.uniform_random( + [16, 5], min=-10, max=10 + ).astype('float64') linear = fluid.dygraph.Linear(5, 5, dtype="float64") out = linear(fluid.dygraph.to_variable(inputs)) loss = fluid.layers.reduce_mean(out) @@ -586,11 +638,11 @@ class TestDygraphGradientClipFP64(unittest.TestCase): self.assertTrue( np.isclose(a=a, b=b, rtol=1e-6, atol=1e-8), "gradient clip by global norm has wrong results, expetcd:%f, but received:%f" - % (a, b)) + % (a, b), + ) class TestPureFP16ClipGradByGlobalNorm(unittest.TestCase): - def check_main(self, expected_has_cast_op): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -601,12 +653,12 @@ class TestPureFP16ClipGradByGlobalNorm(unittest.TestCase): param_and_grads = [] main_block = main_prog.global_block() for name, shape in zip(names, shapes): - p = main_block.create_parameter(name=name, - shape=shape, - dtype='float16') - g = main_block.create_parameter(name=p.name + '@GRAD', - shape=p.shape, - dtype=p.dtype) + p = main_block.create_parameter( + name=name, shape=shape, dtype='float16' + ) + g = main_block.create_parameter( + name=p.name + '@GRAD', shape=p.shape, dtype=p.dtype + ) param_and_grads.append((p, g)) clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0) diff --git a/python/paddle/fluid/tests/unittests/test_graph_khop_sampler.py b/python/paddle/fluid/tests/unittests/test_graph_khop_sampler.py index 57b8209d8d69b0f6621b25d29c19f240859821f1..b2f57af35de90af2e27cad225218f56ef118816f 100644 --- a/python/paddle/fluid/tests/unittests/test_graph_khop_sampler.py +++ b/python/paddle/fluid/tests/unittests/test_graph_khop_sampler.py @@ -19,7 +19,6 @@ import paddle.fluid as fluid class TestGraphKhopSampler(unittest.TestCase): - def setUp(self): num_nodes = 20 edges = np.random.randint(num_nodes, size=(100, 2)) @@ -42,8 +41,9 @@ class TestGraphKhopSampler(unittest.TestCase): self.row = sorted_edges[:, 0].astype("int64") self.colptr = colptr.astype("int64") self.sorted_eid = sorted_eid.astype("int64") - self.nodes = np.unique(np.random.randint(num_nodes, - size=5)).astype("int64") + self.nodes = np.unique(np.random.randint(num_nodes, size=5)).astype( + "int64" + ) self.sample_sizes = [5, 5] self.dst_src_dict = dst_src_dict @@ -53,10 +53,14 @@ class TestGraphKhopSampler(unittest.TestCase): colptr = paddle.to_tensor(self.colptr) nodes = paddle.to_tensor(self.nodes) - edge_src, edge_dst, sample_index, reindex_nodes = \ - paddle.incubate.graph_khop_sampler(row, colptr, - nodes, self.sample_sizes, - return_eids=False) + ( + edge_src, + edge_dst, + sample_index, + reindex_nodes, + ) = paddle.incubate.graph_khop_sampler( + row, colptr, nodes, self.sample_sizes, return_eids=False + ) # Reindex edge_src and edge_dst to original index. edge_src = edge_src.reshape([-1]) edge_dst = edge_dst.reshape([-1]) @@ -72,10 +76,13 @@ class TestGraphKhopSampler(unittest.TestCase): continue # Ensure no repetitive sample neighbors. self.assertTrue( - edge_src_n.shape[0] == paddle.unique(edge_src_n).shape[0]) + edge_src_n.shape[0] == paddle.unique(edge_src_n).shape[0] + ) # Ensure the correct sample size. - self.assertTrue(edge_src_n.shape[0] == self.sample_sizes[0] - or edge_src_n.shape[0] == len(self.dst_src_dict[n])) + self.assertTrue( + edge_src_n.shape[0] == self.sample_sizes[0] + or edge_src_n.shape[0] == len(self.dst_src_dict[n]) + ) in_neighbors = np.isin(edge_src_n.numpy(), self.dst_src_dict[n]) # Ensure the correct sample neighbors. self.assertTrue(np.sum(in_neighbors) == in_neighbors.shape[0]) @@ -91,22 +98,35 @@ class TestGraphKhopSampler(unittest.TestCase): row = None if fluid.framework.in_dygraph_mode(): row = paddle.fluid.core.eager.to_uva_tensor( - self.row.astype(self.row.dtype), 0) + self.row.astype(self.row.dtype), 0 + ) sorted_eid = paddle.fluid.core.eager.to_uva_tensor( - self.sorted_eid.astype(self.sorted_eid.dtype), 0) + self.sorted_eid.astype(self.sorted_eid.dtype), 0 + ) else: row = paddle.fluid.core.to_uva_tensor( - self.row.astype(self.row.dtype)) + self.row.astype(self.row.dtype) + ) sorted_eid = paddle.fluid.core.to_uva_tensor( - self.sorted_eid.astype(self.sorted_eid.dtype)) + self.sorted_eid.astype(self.sorted_eid.dtype) + ) colptr = paddle.to_tensor(self.colptr) nodes = paddle.to_tensor(self.nodes) - edge_src, edge_dst, sample_index, reindex_nodes, edge_eids = \ - paddle.incubate.graph_khop_sampler(row, colptr, - nodes, self.sample_sizes, - sorted_eids=sorted_eid, - return_eids=True) + ( + edge_src, + edge_dst, + sample_index, + reindex_nodes, + edge_eids, + ) = paddle.incubate.graph_khop_sampler( + row, + colptr, + nodes, + self.sample_sizes, + sorted_eids=sorted_eid, + return_eids=True, + ) edge_src = edge_src.reshape([-1]) edge_dst = edge_dst.reshape([-1]) sample_index = sample_index.reshape([-1]) @@ -120,10 +140,12 @@ class TestGraphKhopSampler(unittest.TestCase): if edge_src_n.shape[0] == 0: continue self.assertTrue( - edge_src_n.shape[0] == paddle.unique(edge_src_n).shape[0]) + edge_src_n.shape[0] == paddle.unique(edge_src_n).shape[0] + ) self.assertTrue( edge_src_n.shape[0] == self.sample_sizes[0] - or edge_src_n.shape[0] == len(self.dst_src_dict[n])) + or edge_src_n.shape[0] == len(self.dst_src_dict[n]) + ) in_neighbors = np.isin(edge_src_n.numpy(), self.dst_src_dict[n]) self.assertTrue(np.sum(in_neighbors) == in_neighbors.shape[0]) @@ -135,31 +157,40 @@ class TestGraphKhopSampler(unittest.TestCase): def test_sample_result_static_with_eids(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - row = paddle.static.data(name="row", - shape=self.row.shape, - dtype=self.row.dtype) - sorted_eids = paddle.static.data(name="eids", - shape=self.sorted_eid.shape, - dtype=self.sorted_eid.dtype) - colptr = paddle.static.data(name="colptr", - shape=self.colptr.shape, - dtype=self.colptr.dtype) - nodes = paddle.static.data(name="nodes", - shape=self.nodes.shape, - dtype=self.nodes.dtype) - - edge_src, edge_dst, sample_index, reindex_nodes, edge_eids = \ - paddle.incubate.graph_khop_sampler(row, colptr, - nodes, self.sample_sizes, - sorted_eids, True) + row = paddle.static.data( + name="row", shape=self.row.shape, dtype=self.row.dtype + ) + sorted_eids = paddle.static.data( + name="eids", + shape=self.sorted_eid.shape, + dtype=self.sorted_eid.dtype, + ) + colptr = paddle.static.data( + name="colptr", shape=self.colptr.shape, dtype=self.colptr.dtype + ) + nodes = paddle.static.data( + name="nodes", shape=self.nodes.shape, dtype=self.nodes.dtype + ) + + ( + edge_src, + edge_dst, + sample_index, + reindex_nodes, + edge_eids, + ) = paddle.incubate.graph_khop_sampler( + row, colptr, nodes, self.sample_sizes, sorted_eids, True + ) exe = paddle.static.Executor(paddle.CPUPlace()) - ret = exe.run(feed={ - 'row': self.row, - 'eids': self.sorted_eid, - 'colptr': self.colptr, - 'nodes': self.nodes - }, - fetch_list=[edge_src, edge_dst, sample_index]) + ret = exe.run( + feed={ + 'row': self.row, + 'eids': self.sorted_eid, + 'colptr': self.colptr, + 'nodes': self.nodes, + }, + fetch_list=[edge_src, edge_dst, sample_index], + ) edge_src, edge_dst, sample_index = ret edge_src = edge_src.reshape([-1]) @@ -175,35 +206,44 @@ class TestGraphKhopSampler(unittest.TestCase): if edge_src_n.shape[0] == 0: continue self.assertTrue( - edge_src_n.shape[0] == np.unique(edge_src_n).shape[0]) + edge_src_n.shape[0] == np.unique(edge_src_n).shape[0] + ) self.assertTrue( edge_src_n.shape[0] == self.sample_sizes[0] - or edge_src_n.shape[0] == len(self.dst_src_dict[n])) + or edge_src_n.shape[0] == len(self.dst_src_dict[n]) + ) in_neighbors = np.isin(edge_src_n, self.dst_src_dict[n]) self.assertTrue(np.sum(in_neighbors) == in_neighbors.shape[0]) def test_sample_result_static_without_eids(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - row = paddle.static.data(name="row", - shape=self.row.shape, - dtype=self.row.dtype) - colptr = paddle.static.data(name="colptr", - shape=self.colptr.shape, - dtype=self.colptr.dtype) - nodes = paddle.static.data(name="nodes", - shape=self.nodes.shape, - dtype=self.nodes.dtype) - edge_src, edge_dst, sample_index, reindex_nodes = \ - paddle.incubate.graph_khop_sampler(row, colptr, - nodes, self.sample_sizes) + row = paddle.static.data( + name="row", shape=self.row.shape, dtype=self.row.dtype + ) + colptr = paddle.static.data( + name="colptr", shape=self.colptr.shape, dtype=self.colptr.dtype + ) + nodes = paddle.static.data( + name="nodes", shape=self.nodes.shape, dtype=self.nodes.dtype + ) + ( + edge_src, + edge_dst, + sample_index, + reindex_nodes, + ) = paddle.incubate.graph_khop_sampler( + row, colptr, nodes, self.sample_sizes + ) exe = paddle.static.Executor(paddle.CPUPlace()) - ret = exe.run(feed={ - 'row': self.row, - 'colptr': self.colptr, - 'nodes': self.nodes - }, - fetch_list=[edge_src, edge_dst, sample_index]) + ret = exe.run( + feed={ + 'row': self.row, + 'colptr': self.colptr, + 'nodes': self.nodes, + }, + fetch_list=[edge_src, edge_dst, sample_index], + ) edge_src, edge_dst, sample_index = ret edge_src = edge_src.reshape([-1]) edge_dst = edge_dst.reshape([-1]) @@ -218,10 +258,12 @@ class TestGraphKhopSampler(unittest.TestCase): if edge_src_n.shape[0] == 0: continue self.assertTrue( - edge_src_n.shape[0] == np.unique(edge_src_n).shape[0]) + edge_src_n.shape[0] == np.unique(edge_src_n).shape[0] + ) self.assertTrue( edge_src_n.shape[0] == self.sample_sizes[0] - or edge_src_n.shape[0] == len(self.dst_src_dict[n])) + or edge_src_n.shape[0] == len(self.dst_src_dict[n]) + ) in_neighbors = np.isin(edge_src_n, self.dst_src_dict[n]) self.assertTrue(np.sum(in_neighbors) == in_neighbors.shape[0]) diff --git a/python/paddle/fluid/tests/unittests/test_graph_reindex.py b/python/paddle/fluid/tests/unittests/test_graph_reindex.py index db1d7d3cff99ebb83f163bf87dab16f1d8e36a35..318deda61f2a7be87a9eb7b373326f981bd7518c 100644 --- a/python/paddle/fluid/tests/unittests/test_graph_reindex.py +++ b/python/paddle/fluid/tests/unittests/test_graph_reindex.py @@ -18,7 +18,6 @@ import paddle class TestGraphReindex(unittest.TestCase): - def setUp(self): self.x = np.arange(5).astype("int64") self.neighbors = np.random.randint(100, size=20).astype("int64") @@ -32,7 +31,8 @@ class TestGraphReindex(unittest.TestCase): self.out_nodes = np.array(out_nodes, dtype="int64") reindex_dict = {node: ind for ind, node in enumerate(self.out_nodes)} self.reindex_src = np.array( - [reindex_dict[node] for node in self.neighbors]) + [reindex_dict[node] for node in self.neighbors] + ) reindex_dst = [] for node, c in zip(self.x, self.count): for i in range(c): @@ -48,16 +48,21 @@ class TestGraphReindex(unittest.TestCase): value_buffer = paddle.full([self.num_nodes], -1, dtype="int32") index_buffer = paddle.full([self.num_nodes], -1, dtype="int32") - reindex_src, reindex_dst, out_nodes = \ - paddle.incubate.graph_reindex(x, neighbors, count) + reindex_src, reindex_dst, out_nodes = paddle.incubate.graph_reindex( + x, neighbors, count + ) np.testing.assert_allclose(self.reindex_src, reindex_src, rtol=1e-05) np.testing.assert_allclose(self.reindex_dst, reindex_dst, rtol=1e-05) np.testing.assert_allclose(self.out_nodes, out_nodes, rtol=1e-05) - reindex_src, reindex_dst, out_nodes = \ - paddle.incubate.graph_reindex(x, neighbors, count, - value_buffer, index_buffer, - flag_buffer_hashtable=True) + reindex_src, reindex_dst, out_nodes = paddle.incubate.graph_reindex( + x, + neighbors, + count, + value_buffer, + index_buffer, + flag_buffer_hashtable=True, + ) np.testing.assert_allclose(self.reindex_src, reindex_src, rtol=1e-05) np.testing.assert_allclose(self.reindex_dst, reindex_dst, rtol=1e-05) np.testing.assert_allclose(self.out_nodes, out_nodes, rtol=1e-05) @@ -70,20 +75,21 @@ class TestGraphReindex(unittest.TestCase): count = paddle.to_tensor(self.count) count = paddle.concat([count, count]) - reindex_src, reindex_dst, out_nodes = \ - paddle.incubate.graph_reindex(x, neighbors, count) - np.testing.assert_allclose(self.reindex_src, - reindex_src[:self.neighbors.shape[0]], - rtol=1e-05) - np.testing.assert_allclose(self.reindex_src, - reindex_src[self.neighbors.shape[0]:], - rtol=1e-05) - np.testing.assert_allclose(self.reindex_dst, - reindex_dst[:self.neighbors.shape[0]], - rtol=1e-05) - np.testing.assert_allclose(self.reindex_dst, - reindex_dst[self.neighbors.shape[0]:], - rtol=1e-05) + reindex_src, reindex_dst, out_nodes = paddle.incubate.graph_reindex( + x, neighbors, count + ) + np.testing.assert_allclose( + self.reindex_src, reindex_src[: self.neighbors.shape[0]], rtol=1e-05 + ) + np.testing.assert_allclose( + self.reindex_src, reindex_src[self.neighbors.shape[0] :], rtol=1e-05 + ) + np.testing.assert_allclose( + self.reindex_dst, reindex_dst[: self.neighbors.shape[0]], rtol=1e-05 + ) + np.testing.assert_allclose( + self.reindex_dst, reindex_dst[self.neighbors.shape[0] :], rtol=1e-05 + ) np.testing.assert_allclose(self.out_nodes, out_nodes, rtol=1e-05) def test_heter_reindex_result_v2(self): @@ -111,10 +117,11 @@ class TestGraphReindex(unittest.TestCase): reindex_dst.append(reindex_dict[node]) reindex_dst = np.array(reindex_dst, dtype="int64") - reindex_src_, reindex_dst_, out_nodes_ = \ - paddle.incubate.graph_reindex(paddle.to_tensor(x), - paddle.to_tensor(neighbors), - paddle.to_tensor(counts)) + reindex_src_, reindex_dst_, out_nodes_ = paddle.incubate.graph_reindex( + paddle.to_tensor(x), + paddle.to_tensor(neighbors), + paddle.to_tensor(counts), + ) np.testing.assert_allclose(reindex_src, reindex_src_, rtol=1e-05) np.testing.assert_allclose(reindex_dst, reindex_dst_, rtol=1e-05) np.testing.assert_allclose(out_nodes, out_nodes_, rtol=1e-05) @@ -122,66 +129,89 @@ class TestGraphReindex(unittest.TestCase): def test_reindex_result_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.static.data(name="x", - shape=self.x.shape, - dtype=self.x.dtype) - neighbors = paddle.static.data(name="neighbors", - shape=self.neighbors.shape, - dtype=self.neighbors.dtype) - count = paddle.static.data(name="count", - shape=self.count.shape, - dtype=self.count.dtype) - value_buffer = paddle.static.data(name="value_buffer", - shape=[self.num_nodes], - dtype="int32") - index_buffer = paddle.static.data(name="index_buffer", - shape=[self.num_nodes], - dtype="int32") - - reindex_src_1, reindex_dst_1, out_nodes_1 = \ - paddle.incubate.graph_reindex(x, neighbors, count) - reindex_src_2, reindex_dst_2, out_nodes_2 = \ - paddle.incubate.graph_reindex(x, neighbors, count, - value_buffer, index_buffer, - flag_buffer_hashtable=True) + x = paddle.static.data( + name="x", shape=self.x.shape, dtype=self.x.dtype + ) + neighbors = paddle.static.data( + name="neighbors", + shape=self.neighbors.shape, + dtype=self.neighbors.dtype, + ) + count = paddle.static.data( + name="count", shape=self.count.shape, dtype=self.count.dtype + ) + value_buffer = paddle.static.data( + name="value_buffer", shape=[self.num_nodes], dtype="int32" + ) + index_buffer = paddle.static.data( + name="index_buffer", shape=[self.num_nodes], dtype="int32" + ) + + ( + reindex_src_1, + reindex_dst_1, + out_nodes_1, + ) = paddle.incubate.graph_reindex(x, neighbors, count) + ( + reindex_src_2, + reindex_dst_2, + out_nodes_2, + ) = paddle.incubate.graph_reindex( + x, + neighbors, + count, + value_buffer, + index_buffer, + flag_buffer_hashtable=True, + ) exe = paddle.static.Executor(paddle.CPUPlace()) - ret = exe.run(feed={ - 'x': - self.x, - 'neighbors': - self.neighbors, - 'count': - self.count, - 'value_buffer': - np.full([self.num_nodes], -1, dtype="int32"), - 'index_buffer': - np.full([self.num_nodes], -1, dtype="int32") - }, - fetch_list=[ - reindex_src_1, reindex_dst_1, out_nodes_1, - reindex_src_2, reindex_dst_2, out_nodes_2 - ]) - reindex_src_1, reindex_dst_1, out_nodes_1, reindex_src_2, \ - reindex_dst_2, out_nodes_2 = ret - np.testing.assert_allclose(self.reindex_src, - reindex_src_1, - rtol=1e-05) - np.testing.assert_allclose(self.reindex_dst, - reindex_dst_1, - rtol=1e-05) + ret = exe.run( + feed={ + 'x': self.x, + 'neighbors': self.neighbors, + 'count': self.count, + 'value_buffer': np.full( + [self.num_nodes], -1, dtype="int32" + ), + 'index_buffer': np.full( + [self.num_nodes], -1, dtype="int32" + ), + }, + fetch_list=[ + reindex_src_1, + reindex_dst_1, + out_nodes_1, + reindex_src_2, + reindex_dst_2, + out_nodes_2, + ], + ) + ( + reindex_src_1, + reindex_dst_1, + out_nodes_1, + reindex_src_2, + reindex_dst_2, + out_nodes_2, + ) = ret + np.testing.assert_allclose( + self.reindex_src, reindex_src_1, rtol=1e-05 + ) + np.testing.assert_allclose( + self.reindex_dst, reindex_dst_1, rtol=1e-05 + ) np.testing.assert_allclose(self.out_nodes, out_nodes_1, rtol=1e-05) - np.testing.assert_allclose(self.reindex_src, - reindex_src_2, - rtol=1e-05) - np.testing.assert_allclose(self.reindex_dst, - reindex_dst_2, - rtol=1e-05) + np.testing.assert_allclose( + self.reindex_src, reindex_src_2, rtol=1e-05 + ) + np.testing.assert_allclose( + self.reindex_dst, reindex_dst_2, rtol=1e-05 + ) np.testing.assert_allclose(self.out_nodes, out_nodes_2, rtol=1e-05) class TestGeometricGraphReindex(unittest.TestCase): - def setUp(self): self.x = np.arange(5).astype("int64") self.neighbors = np.random.randint(100, size=20).astype("int64") @@ -195,7 +225,8 @@ class TestGeometricGraphReindex(unittest.TestCase): self.out_nodes = np.array(out_nodes, dtype="int64") reindex_dict = {node: ind for ind, node in enumerate(self.out_nodes)} self.reindex_src = np.array( - [reindex_dict[node] for node in self.neighbors]) + [reindex_dict[node] for node in self.neighbors] + ) reindex_dst = [] for node, c in zip(self.x, self.count): for i in range(c): @@ -211,15 +242,16 @@ class TestGeometricGraphReindex(unittest.TestCase): value_buffer = paddle.full([self.num_nodes], -1, dtype="int32") index_buffer = paddle.full([self.num_nodes], -1, dtype="int32") - reindex_src, reindex_dst, out_nodes = \ - paddle.geometric.reindex_graph(x, neighbors, count) + reindex_src, reindex_dst, out_nodes = paddle.geometric.reindex_graph( + x, neighbors, count + ) np.testing.assert_allclose(self.reindex_src, reindex_src, rtol=1e-05) np.testing.assert_allclose(self.reindex_dst, reindex_dst, rtol=1e-05) np.testing.assert_allclose(self.out_nodes, out_nodes, rtol=1e-05) - reindex_src, reindex_dst, out_nodes = \ - paddle.geometric.reindex_graph(x, neighbors, count, - value_buffer, index_buffer) + reindex_src, reindex_dst, out_nodes = paddle.geometric.reindex_graph( + x, neighbors, count, value_buffer, index_buffer + ) np.testing.assert_allclose(self.reindex_src, reindex_src, rtol=1e-05) np.testing.assert_allclose(self.reindex_dst, reindex_dst, rtol=1e-05) np.testing.assert_allclose(self.out_nodes, out_nodes, rtol=1e-05) @@ -232,20 +264,21 @@ class TestGeometricGraphReindex(unittest.TestCase): count = paddle.to_tensor(self.count) count = paddle.concat([count, count]) - reindex_src, reindex_dst, out_nodes = \ - paddle.geometric.reindex_graph(x, neighbors, count) - np.testing.assert_allclose(self.reindex_src, - reindex_src[:self.neighbors.shape[0]], - rtol=1e-05) - np.testing.assert_allclose(self.reindex_src, - reindex_src[self.neighbors.shape[0]:], - rtol=1e-05) - np.testing.assert_allclose(self.reindex_dst, - reindex_dst[:self.neighbors.shape[0]], - rtol=1e-05) - np.testing.assert_allclose(self.reindex_dst, - reindex_dst[self.neighbors.shape[0]:], - rtol=1e-05) + reindex_src, reindex_dst, out_nodes = paddle.geometric.reindex_graph( + x, neighbors, count + ) + np.testing.assert_allclose( + self.reindex_src, reindex_src[: self.neighbors.shape[0]], rtol=1e-05 + ) + np.testing.assert_allclose( + self.reindex_src, reindex_src[self.neighbors.shape[0] :], rtol=1e-05 + ) + np.testing.assert_allclose( + self.reindex_dst, reindex_dst[: self.neighbors.shape[0]], rtol=1e-05 + ) + np.testing.assert_allclose( + self.reindex_dst, reindex_dst[self.neighbors.shape[0] :], rtol=1e-05 + ) np.testing.assert_allclose(self.out_nodes, out_nodes, rtol=1e-05) def test_heter_reindex_result_v2(self): @@ -273,10 +306,11 @@ class TestGeometricGraphReindex(unittest.TestCase): reindex_dst.append(reindex_dict[node]) reindex_dst = np.array(reindex_dst, dtype="int64") - reindex_src_, reindex_dst_, out_nodes_ = \ - paddle.geometric.reindex_graph(paddle.to_tensor(x), - paddle.to_tensor(neighbors), - paddle.to_tensor(counts)) + reindex_src_, reindex_dst_, out_nodes_ = paddle.geometric.reindex_graph( + paddle.to_tensor(x), + paddle.to_tensor(neighbors), + paddle.to_tensor(counts), + ) np.testing.assert_allclose(reindex_src, reindex_src_, rtol=1e-05) np.testing.assert_allclose(reindex_dst, reindex_dst_, rtol=1e-05) np.testing.assert_allclose(out_nodes, out_nodes_, rtol=1e-05) @@ -308,9 +342,13 @@ class TestGeometricGraphReindex(unittest.TestCase): neighbors = [paddle.to_tensor(neighbors1), paddle.to_tensor(neighbors2)] count = [paddle.to_tensor(count1), paddle.to_tensor(count2)] - reindex_src_, reindex_dst_, out_nodes_ = \ - paddle.geometric.reindex_heter_graph(paddle.to_tensor(x), - neighbors, count) + ( + reindex_src_, + reindex_dst_, + out_nodes_, + ) = paddle.geometric.reindex_heter_graph( + paddle.to_tensor(x), neighbors, count + ) np.testing.assert_allclose(reindex_src, reindex_src_, rtol=1e-05) np.testing.assert_allclose(reindex_dst, reindex_dst_, rtol=1e-05) np.testing.assert_allclose(out_nodes, out_nodes_, rtol=1e-05) @@ -318,60 +356,80 @@ class TestGeometricGraphReindex(unittest.TestCase): def test_reindex_result_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.static.data(name="x", - shape=self.x.shape, - dtype=self.x.dtype) - neighbors = paddle.static.data(name="neighbors", - shape=self.neighbors.shape, - dtype=self.neighbors.dtype) - count = paddle.static.data(name="count", - shape=self.count.shape, - dtype=self.count.dtype) - value_buffer = paddle.static.data(name="value_buffer", - shape=[self.num_nodes], - dtype="int32") - index_buffer = paddle.static.data(name="index_buffer", - shape=[self.num_nodes], - dtype="int32") - - reindex_src_1, reindex_dst_1, out_nodes_1 = \ - paddle.geometric.reindex_graph(x, neighbors, count) - reindex_src_2, reindex_dst_2, out_nodes_2 = \ - paddle.geometric.reindex_graph(x, neighbors, count, - value_buffer, index_buffer) + x = paddle.static.data( + name="x", shape=self.x.shape, dtype=self.x.dtype + ) + neighbors = paddle.static.data( + name="neighbors", + shape=self.neighbors.shape, + dtype=self.neighbors.dtype, + ) + count = paddle.static.data( + name="count", shape=self.count.shape, dtype=self.count.dtype + ) + value_buffer = paddle.static.data( + name="value_buffer", shape=[self.num_nodes], dtype="int32" + ) + index_buffer = paddle.static.data( + name="index_buffer", shape=[self.num_nodes], dtype="int32" + ) + + ( + reindex_src_1, + reindex_dst_1, + out_nodes_1, + ) = paddle.geometric.reindex_graph(x, neighbors, count) + ( + reindex_src_2, + reindex_dst_2, + out_nodes_2, + ) = paddle.geometric.reindex_graph( + x, neighbors, count, value_buffer, index_buffer + ) exe = paddle.static.Executor(paddle.CPUPlace()) - ret = exe.run(feed={ - 'x': - self.x, - 'neighbors': - self.neighbors, - 'count': - self.count, - 'value_buffer': - np.full([self.num_nodes], -1, dtype="int32"), - 'index_buffer': - np.full([self.num_nodes], -1, dtype="int32") - }, - fetch_list=[ - reindex_src_1, reindex_dst_1, out_nodes_1, - reindex_src_2, reindex_dst_2, out_nodes_2 - ]) - reindex_src_1, reindex_dst_1, out_nodes_1, reindex_src_2, \ - reindex_dst_2, out_nodes_2 = ret - np.testing.assert_allclose(self.reindex_src, - reindex_src_1, - rtol=1e-05) - np.testing.assert_allclose(self.reindex_dst, - reindex_dst_1, - rtol=1e-05) + ret = exe.run( + feed={ + 'x': self.x, + 'neighbors': self.neighbors, + 'count': self.count, + 'value_buffer': np.full( + [self.num_nodes], -1, dtype="int32" + ), + 'index_buffer': np.full( + [self.num_nodes], -1, dtype="int32" + ), + }, + fetch_list=[ + reindex_src_1, + reindex_dst_1, + out_nodes_1, + reindex_src_2, + reindex_dst_2, + out_nodes_2, + ], + ) + ( + reindex_src_1, + reindex_dst_1, + out_nodes_1, + reindex_src_2, + reindex_dst_2, + out_nodes_2, + ) = ret + np.testing.assert_allclose( + self.reindex_src, reindex_src_1, rtol=1e-05 + ) + np.testing.assert_allclose( + self.reindex_dst, reindex_dst_1, rtol=1e-05 + ) np.testing.assert_allclose(self.out_nodes, out_nodes_1, rtol=1e-05) - np.testing.assert_allclose(self.reindex_src, - reindex_src_2, - rtol=1e-05) - np.testing.assert_allclose(self.reindex_dst, - reindex_dst_2, - rtol=1e-05) + np.testing.assert_allclose( + self.reindex_src, reindex_src_2, rtol=1e-05 + ) + np.testing.assert_allclose( + self.reindex_dst, reindex_dst_2, rtol=1e-05 + ) np.testing.assert_allclose(self.out_nodes, out_nodes_2, rtol=1e-05) def test_heter_reindex_result_static(self): @@ -401,48 +459,69 @@ class TestGeometricGraphReindex(unittest.TestCase): with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data(name="x", shape=[5], dtype="int64") - neighbors1 = paddle.static.data(name="neighbors1", - shape=[20], - dtype="int64") + neighbors1 = paddle.static.data( + name="neighbors1", shape=[20], dtype="int64" + ) count1 = paddle.static.data(name="count1", shape=[5], dtype="int32") - neighbors2 = paddle.static.data(name="neighbors2", - shape=[20], - dtype="int64") + neighbors2 = paddle.static.data( + name="neighbors2", shape=[20], dtype="int64" + ) count2 = paddle.static.data(name="count2", shape=[5], dtype="int32") - value_buffer = paddle.static.data(name="value_buffer", - shape=[5], - dtype="int32") - index_buffer = paddle.static.data(name="index_buffer", - shape=[5], - dtype="int32") - - reindex_src_1, reindex_dst_1, out_nodes_1 = \ - paddle.geometric.reindex_heter_graph(x, - [neighbors1, neighbors2], - [count1, count2]) - reindex_src_2, reindex_dst_2, out_nodes_2 = \ - paddle.geometric.reindex_heter_graph(x, - [neighbors1, neighbors2], - [count1, count2], - value_buffer, index_buffer) + value_buffer = paddle.static.data( + name="value_buffer", shape=[5], dtype="int32" + ) + index_buffer = paddle.static.data( + name="index_buffer", shape=[5], dtype="int32" + ) + + ( + reindex_src_1, + reindex_dst_1, + out_nodes_1, + ) = paddle.geometric.reindex_heter_graph( + x, [neighbors1, neighbors2], [count1, count2] + ) + ( + reindex_src_2, + reindex_dst_2, + out_nodes_2, + ) = paddle.geometric.reindex_heter_graph( + x, + [neighbors1, neighbors2], + [count1, count2], + value_buffer, + index_buffer, + ) exe = paddle.static.Executor(paddle.CPUPlace()) - ret = exe.run(feed={ - 'x': np_x, - 'neighbors1': np_neighbors1, - 'count1': np_count1, - 'neighbors2': np_neighbors2, - 'count2': np_count2, - 'value_buffer': np.full([5], -1, dtype="int32"), - 'index_buffer': np.full([5], -1, dtype="int32") - }, - fetch_list=[ - reindex_src_1, reindex_dst_1, out_nodes_1, - reindex_src_2, reindex_dst_2, out_nodes_2 - ]) - - reindex_src_1, reindex_dst_1, out_nodes_1, reindex_src_2, \ - reindex_dst_2, out_nodes_2 = ret + ret = exe.run( + feed={ + 'x': np_x, + 'neighbors1': np_neighbors1, + 'count1': np_count1, + 'neighbors2': np_neighbors2, + 'count2': np_count2, + 'value_buffer': np.full([5], -1, dtype="int32"), + 'index_buffer': np.full([5], -1, dtype="int32"), + }, + fetch_list=[ + reindex_src_1, + reindex_dst_1, + out_nodes_1, + reindex_src_2, + reindex_dst_2, + out_nodes_2, + ], + ) + + ( + reindex_src_1, + reindex_dst_1, + out_nodes_1, + reindex_src_2, + reindex_dst_2, + out_nodes_2, + ) = ret np.testing.assert_allclose(reindex_src, reindex_src_1, rtol=1e-05) np.testing.assert_allclose(reindex_dst, reindex_dst_1, rtol=1e-05) np.testing.assert_allclose(out_nodes, out_nodes_1, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_graph_sample_neighbors.py b/python/paddle/fluid/tests/unittests/test_graph_sample_neighbors.py index 557d41993b8784bdb443aabdf04d6872724aab0d..20366b80e2ad73ece80fa3a293fdd89947ca3d47 100644 --- a/python/paddle/fluid/tests/unittests/test_graph_sample_neighbors.py +++ b/python/paddle/fluid/tests/unittests/test_graph_sample_neighbors.py @@ -19,7 +19,6 @@ import paddle.fluid as fluid class TestGraphSampleNeighbors(unittest.TestCase): - def setUp(self): num_nodes = 20 edges = np.random.randint(num_nodes, size=(100, 2)) @@ -40,8 +39,9 @@ class TestGraphSampleNeighbors(unittest.TestCase): self.row = sorted_edges[:, 0].astype("int64") self.colptr = colptr.astype("int64") - self.nodes = np.unique(np.random.randint(num_nodes, - size=5)).astype("int64") + self.nodes = np.unique(np.random.randint(num_nodes, size=5)).astype( + "int64" + ) self.sample_size = 5 self.dst_src_dict = dst_src_dict @@ -52,24 +52,29 @@ class TestGraphSampleNeighbors(unittest.TestCase): nodes = paddle.to_tensor(self.nodes) out_neighbors, out_count = paddle.incubate.graph_sample_neighbors( - row, colptr, nodes, sample_size=self.sample_size) + row, colptr, nodes, sample_size=self.sample_size + ) out_count_cumsum = paddle.cumsum(out_count) for i in range(len(out_count)): if i == 0: - neighbors = out_neighbors[0:out_count_cumsum[i]] + neighbors = out_neighbors[0 : out_count_cumsum[i]] else: neighbors = out_neighbors[ - out_count_cumsum[i - 1]:out_count_cumsum[i]] + out_count_cumsum[i - 1] : out_count_cumsum[i] + ] # Ensure the correct sample size. self.assertTrue( out_count[i] == self.sample_size - or out_count[i] == len(self.dst_src_dict[self.nodes[i]])) + or out_count[i] == len(self.dst_src_dict[self.nodes[i]]) + ) # Ensure no repetitive sample neighbors. self.assertTrue( - neighbors.shape[0] == paddle.unique(neighbors).shape[0]) + neighbors.shape[0] == paddle.unique(neighbors).shape[0] + ) # Ensure the correct sample neighbors. - in_neighbors = np.isin(neighbors.numpy(), - self.dst_src_dict[self.nodes[i]]) + in_neighbors = np.isin( + neighbors.numpy(), self.dst_src_dict[self.nodes[i]] + ) self.assertTrue(np.sum(in_neighbors) == in_neighbors.shape[0]) def test_sample_result_fisher_yates_sampling(self): @@ -86,57 +91,69 @@ class TestGraphSampleNeighbors(unittest.TestCase): nodes, perm_buffer=perm_buffer, sample_size=self.sample_size, - flag_perm_buffer=True) + flag_perm_buffer=True, + ) out_count_cumsum = paddle.cumsum(out_count) for i in range(len(out_count)): if i == 0: - neighbors = out_neighbors[0:out_count_cumsum[i]] + neighbors = out_neighbors[0 : out_count_cumsum[i]] else: neighbors = out_neighbors[ - out_count_cumsum[i - 1]:out_count_cumsum[i]] + out_count_cumsum[i - 1] : out_count_cumsum[i] + ] # Ensure the correct sample size. self.assertTrue( out_count[i] == self.sample_size - or out_count[i] == len(self.dst_src_dict[self.nodes[i]])) + or out_count[i] == len(self.dst_src_dict[self.nodes[i]]) + ) # Ensure no repetitive sample neighbors. self.assertTrue( - neighbors.shape[0] == paddle.unique(neighbors).shape[0]) + neighbors.shape[0] == paddle.unique(neighbors).shape[0] + ) # Ensure the correct sample neighbors. - in_neighbors = np.isin(neighbors.numpy(), - self.dst_src_dict[self.nodes[i]]) + in_neighbors = np.isin( + neighbors.numpy(), self.dst_src_dict[self.nodes[i]] + ) self.assertTrue(np.sum(in_neighbors) == in_neighbors.shape[0]) def test_sample_result_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - row = paddle.static.data(name="row", - shape=self.row.shape, - dtype=self.row.dtype) - colptr = paddle.static.data(name="colptr", - shape=self.colptr.shape, - dtype=self.colptr.dtype) - nodes = paddle.static.data(name="nodes", - shape=self.nodes.shape, - dtype=self.nodes.dtype) + row = paddle.static.data( + name="row", shape=self.row.shape, dtype=self.row.dtype + ) + colptr = paddle.static.data( + name="colptr", shape=self.colptr.shape, dtype=self.colptr.dtype + ) + nodes = paddle.static.data( + name="nodes", shape=self.nodes.shape, dtype=self.nodes.dtype + ) out_neighbors, out_count = paddle.incubate.graph_sample_neighbors( - row, colptr, nodes, sample_size=self.sample_size) + row, colptr, nodes, sample_size=self.sample_size + ) exe = paddle.static.Executor(paddle.CPUPlace()) - ret = exe.run(feed={ - 'row': self.row, - 'colptr': self.colptr, - 'nodes': self.nodes - }, - fetch_list=[out_neighbors, out_count]) + ret = exe.run( + feed={ + 'row': self.row, + 'colptr': self.colptr, + 'nodes': self.nodes, + }, + fetch_list=[out_neighbors, out_count], + ) out_neighbors, out_count = ret out_count_cumsum = np.cumsum(out_count) out_neighbors = np.split(out_neighbors, out_count_cumsum)[:-1] - for neighbors, node, count in zip(out_neighbors, self.nodes, - out_count): - self.assertTrue(count == self.sample_size - or count == len(self.dst_src_dict[node])) + for neighbors, node, count in zip( + out_neighbors, self.nodes, out_count + ): + self.assertTrue( + count == self.sample_size + or count == len(self.dst_src_dict[node]) + ) self.assertTrue( - neighbors.shape[0] == np.unique(neighbors).shape[0]) + neighbors.shape[0] == np.unique(neighbors).shape[0] + ) in_neighbors = np.isin(neighbors, self.dst_src_dict[node]) self.assertTrue(np.sum(in_neighbors) == in_neighbors.shape[0]) @@ -147,18 +164,22 @@ class TestGraphSampleNeighbors(unittest.TestCase): nodes = paddle.to_tensor(self.nodes) def check_eid_error(): - paddle.incubate.graph_sample_neighbors(row, - colptr, - nodes, - sample_size=self.sample_size, - return_eids=True) + paddle.incubate.graph_sample_neighbors( + row, + colptr, + nodes, + sample_size=self.sample_size, + return_eids=True, + ) def check_perm_buffer_error(): - paddle.incubate.graph_sample_neighbors(row, - colptr, - nodes, - sample_size=self.sample_size, - flag_perm_buffer=True) + paddle.incubate.graph_sample_neighbors( + row, + colptr, + nodes, + sample_size=self.sample_size, + flag_perm_buffer=True, + ) self.assertRaises(ValueError, check_eid_error) self.assertRaises(ValueError, check_perm_buffer_error) @@ -171,15 +192,24 @@ class TestGraphSampleNeighbors(unittest.TestCase): eids = paddle.to_tensor(self.edges_id) perm_buffer = paddle.to_tensor(self.edges_id) - out_neighbors, out_count, out_eids = paddle.incubate.graph_sample_neighbors( + ( + out_neighbors, + out_count, + out_eids, + ) = paddle.incubate.graph_sample_neighbors( row, colptr, nodes, eids=eids, sample_size=self.sample_size, - return_eids=True) + return_eids=True, + ) - out_neighbors, out_count, out_eids = paddle.incubate.graph_sample_neighbors( + ( + out_neighbors, + out_count, + out_eids, + ) = paddle.incubate.graph_sample_neighbors( row, colptr, nodes, @@ -187,42 +217,49 @@ class TestGraphSampleNeighbors(unittest.TestCase): perm_buffer=perm_buffer, sample_size=self.sample_size, return_eids=True, - flag_perm_buffer=True) + flag_perm_buffer=True, + ) paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - row = paddle.static.data(name="row", - shape=self.row.shape, - dtype=self.row.dtype) - colptr = paddle.static.data(name="colptr", - shape=self.colptr.shape, - dtype=self.colptr.dtype) - nodes = paddle.static.data(name="nodes", - shape=self.nodes.shape, - dtype=self.nodes.dtype) - eids = paddle.static.data(name="eids", - shape=self.edges_id.shape, - dtype=self.nodes.dtype) - - out_neighbors, out_count, out_eids = paddle.incubate.graph_sample_neighbors( + row = paddle.static.data( + name="row", shape=self.row.shape, dtype=self.row.dtype + ) + colptr = paddle.static.data( + name="colptr", shape=self.colptr.shape, dtype=self.colptr.dtype + ) + nodes = paddle.static.data( + name="nodes", shape=self.nodes.shape, dtype=self.nodes.dtype + ) + eids = paddle.static.data( + name="eids", shape=self.edges_id.shape, dtype=self.nodes.dtype + ) + + ( + out_neighbors, + out_count, + out_eids, + ) = paddle.incubate.graph_sample_neighbors( row, colptr, nodes, eids, sample_size=self.sample_size, - return_eids=True) + return_eids=True, + ) exe = paddle.static.Executor(paddle.CPUPlace()) - ret = exe.run(feed={ - 'row': self.row, - 'colptr': self.colptr, - 'nodes': self.nodes, - 'eids': self.edges_id - }, - fetch_list=[out_neighbors, out_count, out_eids]) + ret = exe.run( + feed={ + 'row': self.row, + 'colptr': self.colptr, + 'nodes': self.nodes, + 'eids': self.edges_id, + }, + fetch_list=[out_neighbors, out_count, out_eids], + ) class TestGeometricGraphSampleNeighbors(unittest.TestCase): - def setUp(self): num_nodes = 20 edges = np.random.randint(num_nodes, size=(100, 2)) @@ -243,8 +280,9 @@ class TestGeometricGraphSampleNeighbors(unittest.TestCase): self.row = sorted_edges[:, 0].astype("int64") self.colptr = colptr.astype("int64") - self.nodes = np.unique(np.random.randint(num_nodes, - size=5)).astype("int64") + self.nodes = np.unique(np.random.randint(num_nodes, size=5)).astype( + "int64" + ) self.sample_size = 5 self.dst_src_dict = dst_src_dict @@ -255,24 +293,29 @@ class TestGeometricGraphSampleNeighbors(unittest.TestCase): nodes = paddle.to_tensor(self.nodes) out_neighbors, out_count = paddle.geometric.sample_neighbors( - row, colptr, nodes, sample_size=self.sample_size) + row, colptr, nodes, sample_size=self.sample_size + ) out_count_cumsum = paddle.cumsum(out_count) for i in range(len(out_count)): if i == 0: - neighbors = out_neighbors[0:out_count_cumsum[i]] + neighbors = out_neighbors[0 : out_count_cumsum[i]] else: neighbors = out_neighbors[ - out_count_cumsum[i - 1]:out_count_cumsum[i]] + out_count_cumsum[i - 1] : out_count_cumsum[i] + ] # Ensure the correct sample size. self.assertTrue( out_count[i] == self.sample_size - or out_count[i] == len(self.dst_src_dict[self.nodes[i]])) + or out_count[i] == len(self.dst_src_dict[self.nodes[i]]) + ) # Ensure no repetitive sample neighbors. self.assertTrue( - neighbors.shape[0] == paddle.unique(neighbors).shape[0]) + neighbors.shape[0] == paddle.unique(neighbors).shape[0] + ) # Ensure the correct sample neighbors. - in_neighbors = np.isin(neighbors.numpy(), - self.dst_src_dict[self.nodes[i]]) + in_neighbors = np.isin( + neighbors.numpy(), self.dst_src_dict[self.nodes[i]] + ) self.assertTrue(np.sum(in_neighbors) == in_neighbors.shape[0]) def test_sample_result_fisher_yates_sampling(self): @@ -288,57 +331,69 @@ class TestGeometricGraphSampleNeighbors(unittest.TestCase): colptr, nodes, perm_buffer=perm_buffer, - sample_size=self.sample_size) + sample_size=self.sample_size, + ) out_count_cumsum = paddle.cumsum(out_count) for i in range(len(out_count)): if i == 0: - neighbors = out_neighbors[0:out_count_cumsum[i]] + neighbors = out_neighbors[0 : out_count_cumsum[i]] else: neighbors = out_neighbors[ - out_count_cumsum[i - 1]:out_count_cumsum[i]] + out_count_cumsum[i - 1] : out_count_cumsum[i] + ] # Ensure the correct sample size. self.assertTrue( out_count[i] == self.sample_size - or out_count[i] == len(self.dst_src_dict[self.nodes[i]])) + or out_count[i] == len(self.dst_src_dict[self.nodes[i]]) + ) # Ensure no repetitive sample neighbors. self.assertTrue( - neighbors.shape[0] == paddle.unique(neighbors).shape[0]) + neighbors.shape[0] == paddle.unique(neighbors).shape[0] + ) # Ensure the correct sample neighbors. - in_neighbors = np.isin(neighbors.numpy(), - self.dst_src_dict[self.nodes[i]]) + in_neighbors = np.isin( + neighbors.numpy(), self.dst_src_dict[self.nodes[i]] + ) self.assertTrue(np.sum(in_neighbors) == in_neighbors.shape[0]) def test_sample_result_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - row = paddle.static.data(name="row", - shape=self.row.shape, - dtype=self.row.dtype) - colptr = paddle.static.data(name="colptr", - shape=self.colptr.shape, - dtype=self.colptr.dtype) - nodes = paddle.static.data(name="nodes", - shape=self.nodes.shape, - dtype=self.nodes.dtype) + row = paddle.static.data( + name="row", shape=self.row.shape, dtype=self.row.dtype + ) + colptr = paddle.static.data( + name="colptr", shape=self.colptr.shape, dtype=self.colptr.dtype + ) + nodes = paddle.static.data( + name="nodes", shape=self.nodes.shape, dtype=self.nodes.dtype + ) out_neighbors, out_count = paddle.geometric.sample_neighbors( - row, colptr, nodes, sample_size=self.sample_size) + row, colptr, nodes, sample_size=self.sample_size + ) exe = paddle.static.Executor(paddle.CPUPlace()) - ret = exe.run(feed={ - 'row': self.row, - 'colptr': self.colptr, - 'nodes': self.nodes - }, - fetch_list=[out_neighbors, out_count]) + ret = exe.run( + feed={ + 'row': self.row, + 'colptr': self.colptr, + 'nodes': self.nodes, + }, + fetch_list=[out_neighbors, out_count], + ) out_neighbors, out_count = ret out_count_cumsum = np.cumsum(out_count) out_neighbors = np.split(out_neighbors, out_count_cumsum)[:-1] - for neighbors, node, count in zip(out_neighbors, self.nodes, - out_count): - self.assertTrue(count == self.sample_size - or count == len(self.dst_src_dict[node])) + for neighbors, node, count in zip( + out_neighbors, self.nodes, out_count + ): self.assertTrue( - neighbors.shape[0] == np.unique(neighbors).shape[0]) + count == self.sample_size + or count == len(self.dst_src_dict[node]) + ) + self.assertTrue( + neighbors.shape[0] == np.unique(neighbors).shape[0] + ) in_neighbors = np.isin(neighbors, self.dst_src_dict[node]) self.assertTrue(np.sum(in_neighbors) == in_neighbors.shape[0]) @@ -349,11 +404,13 @@ class TestGeometricGraphSampleNeighbors(unittest.TestCase): nodes = paddle.to_tensor(self.nodes) def check_eid_error(): - paddle.geometric.sample_neighbors(row, - colptr, - nodes, - sample_size=self.sample_size, - return_eids=True) + paddle.geometric.sample_neighbors( + row, + colptr, + nodes, + sample_size=self.sample_size, + return_eids=True, + ) self.assertRaises(ValueError, check_eid_error) @@ -371,7 +428,8 @@ class TestGeometricGraphSampleNeighbors(unittest.TestCase): nodes, eids=eids, sample_size=self.sample_size, - return_eids=True) + return_eids=True, + ) out_neighbors, out_count, out_eids = paddle.geometric.sample_neighbors( row, @@ -380,38 +438,46 @@ class TestGeometricGraphSampleNeighbors(unittest.TestCase): eids=eids, perm_buffer=perm_buffer, sample_size=self.sample_size, - return_eids=True) + return_eids=True, + ) paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - row = paddle.static.data(name="row", - shape=self.row.shape, - dtype=self.row.dtype) - colptr = paddle.static.data(name="colptr", - shape=self.colptr.shape, - dtype=self.colptr.dtype) - nodes = paddle.static.data(name="nodes", - shape=self.nodes.shape, - dtype=self.nodes.dtype) - eids = paddle.static.data(name="eids", - shape=self.edges_id.shape, - dtype=self.nodes.dtype) - - out_neighbors, out_count, out_eids = paddle.geometric.sample_neighbors( + row = paddle.static.data( + name="row", shape=self.row.shape, dtype=self.row.dtype + ) + colptr = paddle.static.data( + name="colptr", shape=self.colptr.shape, dtype=self.colptr.dtype + ) + nodes = paddle.static.data( + name="nodes", shape=self.nodes.shape, dtype=self.nodes.dtype + ) + eids = paddle.static.data( + name="eids", shape=self.edges_id.shape, dtype=self.nodes.dtype + ) + + ( + out_neighbors, + out_count, + out_eids, + ) = paddle.geometric.sample_neighbors( row, colptr, nodes, sample_size=self.sample_size, eids=eids, - return_eids=True) + return_eids=True, + ) exe = paddle.static.Executor(paddle.CPUPlace()) - ret = exe.run(feed={ - 'row': self.row, - 'colptr': self.colptr, - 'nodes': self.nodes, - 'eids': self.edges_id - }, - fetch_list=[out_neighbors, out_count, out_eids]) + ret = exe.run( + feed={ + 'row': self.row, + 'colptr': self.colptr, + 'nodes': self.nodes, + 'eids': self.edges_id, + }, + fetch_list=[out_neighbors, out_count, out_eids], + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_graph_send_recv_op.py b/python/paddle/fluid/tests/unittests/test_graph_send_recv_op.py index 40d1673438fa72cfd0fbf672b1272fbe06cb1057..a5344a96a319e72e4d2d827f9dc8c54f59db8490 100644 --- a/python/paddle/fluid/tests/unittests/test_graph_send_recv_op.py +++ b/python/paddle/fluid/tests/unittests/test_graph_send_recv_op.py @@ -21,18 +21,15 @@ from paddle.fluid.framework import _test_eager_guard from op_test import OpTest -def graph_send_recv_wrapper(x, - src_index, - dst_index, - reduce_op="sum", - out_size=None, - name=None): - return paddle.geometric.send_u_recv(x, src_index, dst_index, - reduce_op.lower(), out_size, name) +def graph_send_recv_wrapper( + x, src_index, dst_index, reduce_op="sum", out_size=None, name=None +): + return paddle.geometric.send_u_recv( + x, src_index, dst_index, reduce_op.lower(), out_size, name + ) class TestGraphSendRecvMaxOp(OpTest): - def setUp(self): paddle.enable_static() self.python_api = graph_send_recv_wrapper @@ -48,21 +45,20 @@ class TestGraphSendRecvMaxOp(OpTest): self.attrs = {'reduce_op': 'MAX'} out, self.gradient = compute_graph_send_recv_for_min_max( - self.inputs, self.attrs) + self.inputs, self.attrs + ) self.outputs = {'Out': out} def test_check_output(self): self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X'], - 'Out', - user_defined_grads=[self.gradient], - check_eager=True) + self.check_grad( + ['X'], 'Out', user_defined_grads=[self.gradient], check_eager=True + ) class TestGraphSendRecvMinOp(OpTest): - def setUp(self): paddle.enable_static() self.python_api = graph_send_recv_wrapper @@ -78,7 +74,8 @@ class TestGraphSendRecvMinOp(OpTest): self.attrs = {'reduce_op': 'MIN'} out, self.gradient = compute_graph_send_recv_for_min_max( - self.inputs, self.attrs) + self.inputs, self.attrs + ) self.outputs = {'Out': out} @@ -86,14 +83,12 @@ class TestGraphSendRecvMinOp(OpTest): self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X'], - 'Out', - user_defined_grads=[self.gradient], - check_eager=True) + self.check_grad( + ['X'], 'Out', user_defined_grads=[self.gradient], check_eager=True + ) class TestGraphSendRecvSumOp(OpTest): - def setUp(self): paddle.enable_static() self.python_api = graph_send_recv_wrapper @@ -120,7 +115,6 @@ class TestGraphSendRecvSumOp(OpTest): class TestGraphSendRecvMeanOp(OpTest): - def setUp(self): paddle.enable_static() self.python_api = graph_send_recv_wrapper @@ -136,7 +130,8 @@ class TestGraphSendRecvMeanOp(OpTest): self.attrs = {'reduce_op': 'MEAN'} out, dst_count = compute_graph_send_recv_for_sum_mean( - self.inputs, self.attrs) + self.inputs, self.attrs + ) self.outputs = {'Out': out, 'Dst_count': dst_count} @@ -197,8 +192,9 @@ def compute_graph_send_recv_for_min_max(inputs, attributes): results[s_id, :] += gather_x[index, :] first_set.add(s_id) else: - results[s_id, :] = np.maximum(results[s_id, :], - gather_x[index, :]) + results[s_id, :] = np.maximum( + results[s_id, :], gather_x[index, :] + ) elif reduce_op == "MIN": first_set = set() for index, s_id in enumerate(dst_index): @@ -206,8 +202,9 @@ def compute_graph_send_recv_for_min_max(inputs, attributes): results[s_id, :] += gather_x[index, :] first_set.add(s_id) else: - results[s_id, :] = np.minimum(results[s_id, :], - gather_x[index, :]) + results[s_id, :] = np.minimum( + results[s_id, :], gather_x[index, :] + ) else: raise ValueError("Invalid reduce_op, only MAX, MIN supported!") @@ -216,14 +213,14 @@ def compute_graph_send_recv_for_min_max(inputs, attributes): for i in range(index_size): forward_src_idx = src_index[i] forward_dst_idx = dst_index[i] - gradient[forward_src_idx] += 1 * (x[forward_src_idx] - == results[forward_dst_idx]) + gradient[forward_src_idx] += 1 * ( + x[forward_src_idx] == results[forward_dst_idx] + ) return results, gradient / results.size class API_GraphSendRecvOpTest(unittest.TestCase): - def test_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): @@ -231,53 +228,64 @@ class API_GraphSendRecvOpTest(unittest.TestCase): src_index = paddle.static.data(name="src", shape=[4], dtype="int32") dst_index = paddle.static.data(name="dst", shape=[4], dtype="int32") - res_sum = paddle.incubate.graph_send_recv(x, src_index, dst_index, - "sum") - res_mean = paddle.incubate.graph_send_recv(x, src_index, dst_index, - "mean") - res_max = paddle.incubate.graph_send_recv(x, src_index, dst_index, - "max") - res_min = paddle.incubate.graph_send_recv(x, src_index, dst_index, - "min") + res_sum = paddle.incubate.graph_send_recv( + x, src_index, dst_index, "sum" + ) + res_mean = paddle.incubate.graph_send_recv( + x, src_index, dst_index, "mean" + ) + res_max = paddle.incubate.graph_send_recv( + x, src_index, dst_index, "max" + ) + res_min = paddle.incubate.graph_send_recv( + x, src_index, dst_index, "min" + ) exe = paddle.static.Executor(paddle.CPUPlace()) data1 = np.array([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype='float32') data2 = np.array([0, 1, 2, 0], dtype="int32") data3 = np.array([1, 2, 1, 0], dtype="int32") - np_sum = np.array([[0, 2, 3], [2, 8, 10], [1, 4, 5]], - dtype="float32") - np_mean = np.array([[0, 2, 3], [1, 4, 5], [1, 4, 5]], - dtype="float32") - np_max = np.array([[0, 2, 3], [2, 6, 7], [1, 4, 5]], - dtype="float32") - np_min = np.array([[0, 2, 3], [0, 2, 3], [1, 4, 5]], - dtype="float32") - - ret = exe.run(feed={ - 'x': data1, - 'src': data2, - 'dst': data3 - }, - fetch_list=[res_sum, res_mean, res_max, res_min]) + np_sum = np.array( + [[0, 2, 3], [2, 8, 10], [1, 4, 5]], dtype="float32" + ) + np_mean = np.array( + [[0, 2, 3], [1, 4, 5], [1, 4, 5]], dtype="float32" + ) + np_max = np.array( + [[0, 2, 3], [2, 6, 7], [1, 4, 5]], dtype="float32" + ) + np_min = np.array( + [[0, 2, 3], [0, 2, 3], [1, 4, 5]], dtype="float32" + ) + + ret = exe.run( + feed={'x': data1, 'src': data2, 'dst': data3}, + fetch_list=[res_sum, res_mean, res_max, res_min], + ) for np_res, ret_res in zip([np_sum, np_mean, np_max, np_min], ret): np.testing.assert_allclose(np_res, ret_res, rtol=1e-05, atol=1e-06) def test_dygraph(self): paddle.disable_static() - x = paddle.to_tensor(np.array([[0, 2, 3], [1, 4, 5], [2, 6, 7]]), - dtype="float32") + x = paddle.to_tensor( + np.array([[0, 2, 3], [1, 4, 5], [2, 6, 7]]), dtype="float32" + ) src_index = paddle.to_tensor(np.array([0, 1, 2, 0]), dtype="int32") dst_index = paddle.to_tensor(np.array([1, 2, 1, 0]), dtype="int32") - res_sum = paddle.incubate.graph_send_recv(x, src_index, dst_index, - "sum") - res_mean = paddle.incubate.graph_send_recv(x, src_index, dst_index, - "mean") - res_max = paddle.incubate.graph_send_recv(x, src_index, dst_index, - "max") - res_min = paddle.incubate.graph_send_recv(x, src_index, dst_index, - "min") + res_sum = paddle.incubate.graph_send_recv( + x, src_index, dst_index, "sum" + ) + res_mean = paddle.incubate.graph_send_recv( + x, src_index, dst_index, "mean" + ) + res_max = paddle.incubate.graph_send_recv( + x, src_index, dst_index, "max" + ) + res_min = paddle.incubate.graph_send_recv( + x, src_index, dst_index, "min" + ) np_sum = np.array([[0, 2, 3], [2, 8, 10], [1, 4, 5]], dtype="float32") np_mean = np.array([[0, 2, 3], [1, 4, 5], [1, 4, 5]], dtype="float32") @@ -291,18 +299,23 @@ class API_GraphSendRecvOpTest(unittest.TestCase): def test_int32_input(self): paddle.disable_static() - x = paddle.to_tensor(np.array([[0, 2, 3], [1, 4, 5], [2, 6, 6]]), - dtype="int32") + x = paddle.to_tensor( + np.array([[0, 2, 3], [1, 4, 5], [2, 6, 6]]), dtype="int32" + ) src_index = paddle.to_tensor(np.array([0, 1, 2, 0, 1]), dtype="int32") dst_index = paddle.to_tensor(np.array([1, 2, 1, 0, 1]), dtype="int32") - res_sum = paddle.incubate.graph_send_recv(x, src_index, dst_index, - "sum") - res_mean = paddle.incubate.graph_send_recv(x, src_index, dst_index, - "mean") - res_max = paddle.incubate.graph_send_recv(x, src_index, dst_index, - "max") - res_min = paddle.incubate.graph_send_recv(x, src_index, dst_index, - "min") + res_sum = paddle.incubate.graph_send_recv( + x, src_index, dst_index, "sum" + ) + res_mean = paddle.incubate.graph_send_recv( + x, src_index, dst_index, "mean" + ) + res_max = paddle.incubate.graph_send_recv( + x, src_index, dst_index, "max" + ) + res_min = paddle.incubate.graph_send_recv( + x, src_index, dst_index, "min" + ) np_sum = np.array([[0, 2, 3], [3, 12, 14], [1, 4, 5]], dtype="int32") np_mean = np.array([[0, 2, 3], [1, 4, 4], [1, 4, 5]], dtype="int32") @@ -316,23 +329,24 @@ class API_GraphSendRecvOpTest(unittest.TestCase): def test_set_outsize_gpu(self): paddle.disable_static() - x = paddle.to_tensor(np.array([[0, 2, 3], [1, 4, 5], [2, 6, 6]]), - dtype="float32") + x = paddle.to_tensor( + np.array([[0, 2, 3], [1, 4, 5], [2, 6, 6]]), dtype="float32" + ) src_index = paddle.to_tensor(np.array([0, 0, 1]), dtype="int32") dst_index = paddle.to_tensor(np.array([0, 1, 1]), dtype="int32") res = paddle.incubate.graph_send_recv(x, src_index, dst_index, "sum") out_size = paddle.max(dst_index) + 1 res_set_outsize = paddle.incubate.graph_send_recv( - x, src_index, dst_index, "sum", out_size) + x, src_index, dst_index, "sum", out_size + ) np_res = np.array([[0, 2, 3], [1, 6, 8], [0, 0, 0]], dtype="float32") np_res_set_outsize = np.array([[0, 2, 3], [1, 6, 8]], dtype="float32") np.testing.assert_allclose(np_res, res, rtol=1e-05, atol=1e-06) - np.testing.assert_allclose(np_res_set_outsize, - res_set_outsize, - rtol=1e-05, - atol=1e-06) + np.testing.assert_allclose( + np_res_set_outsize, res_set_outsize, rtol=1e-05, atol=1e-06 + ) def test_out_size_tensor_static(self): paddle.enable_static() @@ -340,12 +354,13 @@ class API_GraphSendRecvOpTest(unittest.TestCase): x = paddle.static.data(name="x", shape=[3, 3], dtype="float32") src_index = paddle.static.data(name="src", shape=[3], dtype="int32") dst_index = paddle.static.data(name="dst", shape=[3], dtype="int32") - out_size = paddle.static.data(name="out_size", - shape=[1], - dtype="int32") + out_size = paddle.static.data( + name="out_size", shape=[1], dtype="int32" + ) - res_sum = paddle.incubate.graph_send_recv(x, src_index, dst_index, - "sum", out_size) + res_sum = paddle.incubate.graph_send_recv( + x, src_index, dst_index, "sum", out_size + ) exe = paddle.static.Executor(paddle.CPUPlace()) data1 = np.array([[0, 2, 3], [1, 4, 5], [2, 6, 6]], dtype='float32') @@ -355,13 +370,15 @@ class API_GraphSendRecvOpTest(unittest.TestCase): np_sum = np.array([[0, 2, 3], [1, 6, 8]], dtype="float32") - ret = exe.run(feed={ - 'x': data1, - 'src': data2, - 'dst': data3, - 'out_size': data4, - }, - fetch_list=[res_sum]) + ret = exe.run( + feed={ + 'x': data1, + 'src': data2, + 'dst': data3, + 'out_size': data4, + }, + fetch_list=[res_sum], + ) np.testing.assert_allclose(np_sum, ret[0], rtol=1e-05, atol=1e-06) def test_api_eager_dygraph(self): @@ -372,7 +389,6 @@ class API_GraphSendRecvOpTest(unittest.TestCase): class API_GeometricSendURecvTest(unittest.TestCase): - def test_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): @@ -380,43 +396,50 @@ class API_GeometricSendURecvTest(unittest.TestCase): src_index = paddle.static.data(name="src", shape=[4], dtype="int32") dst_index = paddle.static.data(name="dst", shape=[4], dtype="int32") - res_sum = paddle.geometric.send_u_recv(x, src_index, dst_index, - "sum") - res_mean = paddle.geometric.send_u_recv(x, src_index, dst_index, - "mean") - res_max = paddle.geometric.send_u_recv(x, src_index, dst_index, - "max") - res_min = paddle.geometric.send_u_recv(x, src_index, dst_index, - "min") + res_sum = paddle.geometric.send_u_recv( + x, src_index, dst_index, "sum" + ) + res_mean = paddle.geometric.send_u_recv( + x, src_index, dst_index, "mean" + ) + res_max = paddle.geometric.send_u_recv( + x, src_index, dst_index, "max" + ) + res_min = paddle.geometric.send_u_recv( + x, src_index, dst_index, "min" + ) exe = paddle.static.Executor(paddle.CPUPlace()) data1 = np.array([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype='float32') data2 = np.array([0, 1, 2, 0], dtype="int32") data3 = np.array([1, 2, 1, 0], dtype="int32") - np_sum = np.array([[0, 2, 3], [2, 8, 10], [1, 4, 5]], - dtype="float32") - np_mean = np.array([[0, 2, 3], [1, 4, 5], [1, 4, 5]], - dtype="float32") - np_max = np.array([[0, 2, 3], [2, 6, 7], [1, 4, 5]], - dtype="float32") - np_min = np.array([[0, 2, 3], [0, 2, 3], [1, 4, 5]], - dtype="float32") - - ret = exe.run(feed={ - 'x': data1, - 'src': data2, - 'dst': data3 - }, - fetch_list=[res_sum, res_mean, res_max, res_min]) + np_sum = np.array( + [[0, 2, 3], [2, 8, 10], [1, 4, 5]], dtype="float32" + ) + np_mean = np.array( + [[0, 2, 3], [1, 4, 5], [1, 4, 5]], dtype="float32" + ) + np_max = np.array( + [[0, 2, 3], [2, 6, 7], [1, 4, 5]], dtype="float32" + ) + np_min = np.array( + [[0, 2, 3], [0, 2, 3], [1, 4, 5]], dtype="float32" + ) + + ret = exe.run( + feed={'x': data1, 'src': data2, 'dst': data3}, + fetch_list=[res_sum, res_mean, res_max, res_min], + ) for np_res, ret_res in zip([np_sum, np_mean, np_max, np_min], ret): np.testing.assert_allclose(np_res, ret_res, rtol=1e-05, atol=1e-06) def test_dygraph(self): paddle.disable_static() - x = paddle.to_tensor(np.array([[0, 2, 3], [1, 4, 5], [2, 6, 7]]), - dtype="float32") + x = paddle.to_tensor( + np.array([[0, 2, 3], [1, 4, 5], [2, 6, 7]]), dtype="float32" + ) src_index = paddle.to_tensor(np.array([0, 1, 2, 0]), dtype="int32") dst_index = paddle.to_tensor(np.array([1, 2, 1, 0]), dtype="int32") res_sum = paddle.geometric.send_u_recv(x, src_index, dst_index, "sum") @@ -436,8 +459,9 @@ class API_GeometricSendURecvTest(unittest.TestCase): def test_int32_input(self): paddle.disable_static() - x = paddle.to_tensor(np.array([[0, 2, 3], [1, 4, 5], [2, 6, 6]]), - dtype="int32") + x = paddle.to_tensor( + np.array([[0, 2, 3], [1, 4, 5], [2, 6, 6]]), dtype="int32" + ) src_index = paddle.to_tensor(np.array([0, 1, 2, 0, 1]), dtype="int32") dst_index = paddle.to_tensor(np.array([1, 2, 1, 0, 1]), dtype="int32") res_sum = paddle.geometric.send_u_recv(x, src_index, dst_index, "sum") @@ -457,23 +481,24 @@ class API_GeometricSendURecvTest(unittest.TestCase): def test_set_outsize_gpu(self): paddle.disable_static() - x = paddle.to_tensor(np.array([[0, 2, 3], [1, 4, 5], [2, 6, 6]]), - dtype="float32") + x = paddle.to_tensor( + np.array([[0, 2, 3], [1, 4, 5], [2, 6, 6]]), dtype="float32" + ) src_index = paddle.to_tensor(np.array([0, 0, 1]), dtype="int32") dst_index = paddle.to_tensor(np.array([0, 1, 1]), dtype="int32") res = paddle.geometric.send_u_recv(x, src_index, dst_index, "sum") out_size = paddle.max(dst_index) + 1 - res_set_outsize = paddle.geometric.send_u_recv(x, src_index, dst_index, - "sum", out_size) + res_set_outsize = paddle.geometric.send_u_recv( + x, src_index, dst_index, "sum", out_size + ) np_res = np.array([[0, 2, 3], [1, 6, 8], [0, 0, 0]], dtype="float32") np_res_set_outsize = np.array([[0, 2, 3], [1, 6, 8]], dtype="float32") np.testing.assert_allclose(np_res, res, rtol=1e-05, atol=1e-06) - np.testing.assert_allclose(np_res_set_outsize, - res_set_outsize, - rtol=1e-05, - atol=1e-06) + np.testing.assert_allclose( + np_res_set_outsize, res_set_outsize, rtol=1e-05, atol=1e-06 + ) def test_out_size_tensor_static(self): paddle.enable_static() @@ -481,12 +506,13 @@ class API_GeometricSendURecvTest(unittest.TestCase): x = paddle.static.data(name="x", shape=[3, 3], dtype="float32") src_index = paddle.static.data(name="src", shape=[3], dtype="int32") dst_index = paddle.static.data(name="dst", shape=[3], dtype="int32") - out_size = paddle.static.data(name="out_size", - shape=[1], - dtype="int32") + out_size = paddle.static.data( + name="out_size", shape=[1], dtype="int32" + ) - res_sum = paddle.geometric.send_u_recv(x, src_index, dst_index, - "sum", out_size) + res_sum = paddle.geometric.send_u_recv( + x, src_index, dst_index, "sum", out_size + ) exe = paddle.static.Executor(paddle.CPUPlace()) data1 = np.array([[0, 2, 3], [1, 4, 5], [2, 6, 6]], dtype='float32') @@ -496,13 +522,15 @@ class API_GeometricSendURecvTest(unittest.TestCase): np_sum = np.array([[0, 2, 3], [1, 6, 8]], dtype="float32") - ret = exe.run(feed={ - 'x': data1, - 'src': data2, - 'dst': data3, - 'out_size': data4, - }, - fetch_list=[res_sum]) + ret = exe.run( + feed={ + 'x': data1, + 'src': data2, + 'dst': data3, + 'out_size': data4, + }, + fetch_list=[res_sum], + ) np.testing.assert_allclose(np_sum, ret[0], rtol=1e-05, atol=1e-06) def test_api_eager_dygraph(self): diff --git a/python/paddle/fluid/tests/unittests/test_graph_send_ue_recv_op.py b/python/paddle/fluid/tests/unittests/test_graph_send_ue_recv_op.py index bf74885ba5a4913147412cd264ef825167190600..c3bb651a3de391d3e907401d83828e56a0f8fb3e 100644 --- a/python/paddle/fluid/tests/unittests/test_graph_send_ue_recv_op.py +++ b/python/paddle/fluid/tests/unittests/test_graph_send_ue_recv_op.py @@ -40,7 +40,6 @@ def get_broadcast_shape(shp1, shp2): class BroadCastInfo(object): - def __init__(self, x_shape, y_shape): self.x_shape = x_shape self.y_shape = y_shape @@ -71,16 +70,24 @@ class BroadCastInfo(object): lhs_offset = [0] rhs_offset = [0] for j in range(0, max_ndim): - dl = 1 if (len(self.x_shape) - 1 - j) < 1 \ - else self.x_shape[len(self.x_shape) - 1 - j] - dr = 1 if (len(self.y_shape) - 1 - j) < 1 \ - else self.y_shape[len(self.y_shape) - 1 - j] + dl = ( + 1 + if (len(self.x_shape) - 1 - j) < 1 + else self.x_shape[len(self.x_shape) - 1 - j] + ) + dr = ( + 1 + if (len(self.y_shape) - 1 - j) < 1 + else self.y_shape[len(self.y_shape) - 1 - j] + ) for i in range(1, max(dl, dr)): for k in range(0, out_len): - lhs_offset.append(lhs_offset[k] + i * - (i < dl) * stride_l) - rhs_offset.append(rhs_offset[k] + i * - (i < dr) * stride_r) + lhs_offset.append( + lhs_offset[k] + i * (i < dl) * stride_l + ) + rhs_offset.append( + rhs_offset[k] + i * (i < dr) * stride_r + ) out_len *= max(dl, dr) stride_l *= dl @@ -176,16 +183,18 @@ def compute_graph_send_ue_recv_for_max_min(inputs, attributes): results[s_id, :] += x_compute_y[index, :] first_set.add(s_id) else: - results[s_id, :] = np.maximum(results[s_id, :], - x_compute_y[index, :]) + results[s_id, :] = np.maximum( + results[s_id, :], x_compute_y[index, :] + ) elif reduce_op == 'MIN': for index, s_id in enumerate(dst_index): if s_id not in first_set: results[s_id, :] += x_compute_y[index, :] first_set.add(s_id) else: - results[s_id, :] = np.minimum(results[s_id, :], - x_compute_y[index, :]) + results[s_id, :] = np.minimum( + results[s_id, :], x_compute_y[index, :] + ) else: raise ValueError("Invalid reduce_op, only MAX, MIN supported!") @@ -220,9 +229,11 @@ def compute_graph_send_ue_recv_for_max_min(inputs, attributes): out_add_1 = int(j % out_off.shape[1]) val = x_off[x_add_0][x_add_1] + y_off[y_add_0][y_add_1] x_grad_off[x_add_0][x_add_1] += 1 * ( - val == out_off[out_add_0][out_add_1]) + val == out_off[out_add_0][out_add_1] + ) y_grad_off[y_add_0][y_add_1] += 1 * ( - val == out_off[out_add_0][out_add_1]) + val == out_off[out_add_0][out_add_1] + ) elif message_op == 'MUL': if len(x_off.shape) == 1 and len(y_off.shape) == 1: val = x_off[x_add] * y_off[y_add] @@ -237,33 +248,45 @@ def compute_graph_send_ue_recv_for_max_min(inputs, attributes): out_add_0 = int(j / out_off.shape[1]) out_add_1 = int(j % out_off.shape[1]) val = x_off[x_add_0][x_add_1] * y_off[y_add_0][y_add_1] - x_grad_off[x_add_0][x_add_1] += 1 * ( - val == out_off[out_add_0][out_add_1] - ) * y_off[y_add_0][y_add_1] - y_grad_off[y_add_0][y_add_1] += 1 * ( - val == out_off[out_add_0][out_add_1] - ) * x_off[x_add_0][x_add_1] + x_grad_off[x_add_0][x_add_1] += ( + 1 + * (val == out_off[out_add_0][out_add_1]) + * y_off[y_add_0][y_add_1] + ) + y_grad_off[y_add_0][y_add_1] += ( + 1 + * (val == out_off[out_add_0][out_add_1]) + * x_off[x_add_0][x_add_1] + ) gradients = [x_gradient / results.size, y_gradient / results.size] return results, gradients -def graph_send_ue_recv_wrapper(x, - y, - src_index, - dst_index, - message_op="add", - reduce_op="sum", - out_size=None, - name=None): - return paddle.geometric.send_ue_recv(x, y, src_index, dst_index, - message_op.lower(), reduce_op.lower(), - out_size, name) +def graph_send_ue_recv_wrapper( + x, + y, + src_index, + dst_index, + message_op="add", + reduce_op="sum", + out_size=None, + name=None, +): + return paddle.geometric.send_ue_recv( + x, + y, + src_index, + dst_index, + message_op.lower(), + reduce_op.lower(), + out_size, + name, + ) class TestGraphSendUERecvSumOp(OpTest): - def setUp(self): paddle.enable_static() self.python_api = graph_send_ue_recv_wrapper @@ -274,7 +297,7 @@ class TestGraphSendUERecvSumOp(OpTest): 'X': self.x, 'Y': self.y, 'Src_index': self.src_index, - 'Dst_index': self.dst_index + 'Dst_index': self.dst_index, } self.attrs = {'message_op': self.message_op, 'reduce_op': 'SUM'} @@ -298,7 +321,6 @@ class TestGraphSendUERecvSumOp(OpTest): class TestSumCase1(TestGraphSendUERecvSumOp): - def set_config(self): self.x = np.random.random((10, 20)).astype("float64") self.y = np.random.random((15, 20)).astype("float64") @@ -309,7 +331,6 @@ class TestSumCase1(TestGraphSendUERecvSumOp): class TestSumCase2(TestGraphSendUERecvSumOp): - def set_config(self): self.x = np.random.random((10, 20)).astype("float64") self.y = np.random.random((150, 1)).astype("float64") @@ -320,7 +341,6 @@ class TestSumCase2(TestGraphSendUERecvSumOp): class TestSumCase3(TestGraphSendUERecvSumOp): - def set_config(self): self.x = np.random.random((10, 20)).astype("float64") self.y = np.random.random((150, 1)).astype("float64") @@ -331,7 +351,6 @@ class TestSumCase3(TestGraphSendUERecvSumOp): class TestSumCase4(TestGraphSendUERecvSumOp): - def set_config(self): self.x = np.random.random((10, 8, 5)).astype("float64") self.y = np.random.random((15, 8, 1)).astype("float64") @@ -342,7 +361,6 @@ class TestSumCase4(TestGraphSendUERecvSumOp): class TestSumCase5(TestGraphSendUERecvSumOp): - def set_config(self): self.x = np.random.random((10, 8, 5)).astype("float64") self.y = np.random.random((15, 8, 1)).astype("float64") @@ -353,7 +371,6 @@ class TestSumCase5(TestGraphSendUERecvSumOp): class TestSumCase6(TestGraphSendUERecvSumOp): - def set_config(self): self.x = np.random.random((100, 1)).astype("float64") self.y = np.random.random((15, 20)).astype("float64") @@ -364,7 +381,6 @@ class TestSumCase6(TestGraphSendUERecvSumOp): class TestSumCase7(TestGraphSendUERecvSumOp): - def set_config(self): self.x = np.random.random((100, 1)).astype("float64") self.y = np.random.random((15, 20)).astype("float64") @@ -375,7 +391,6 @@ class TestSumCase7(TestGraphSendUERecvSumOp): class TestGraphSendUERecvMeanOp(OpTest): - def setUp(self): paddle.enable_static() self.python_api = graph_send_ue_recv_wrapper @@ -386,12 +401,13 @@ class TestGraphSendUERecvMeanOp(OpTest): 'X': self.x, 'Y': self.y, 'Src_index': self.src_index, - 'Dst_index': self.dst_index + 'Dst_index': self.dst_index, } self.attrs = {'message_op': self.message_op, 'reduce_op': 'MEAN'} out, dst_count = compute_graph_send_ue_recv_for_mean( - self.inputs, self.attrs) + self.inputs, self.attrs + ) self.outputs = {'Out': out, 'Dst_count': dst_count} @@ -411,7 +427,6 @@ class TestGraphSendUERecvMeanOp(OpTest): class TestMeanCase1(TestGraphSendUERecvMeanOp): - def set_config(self): self.x = np.random.random((10, 20)).astype("float64") self.y = np.random.random((15, 20)).astype("float64") @@ -422,7 +437,6 @@ class TestMeanCase1(TestGraphSendUERecvMeanOp): class TestMeanCase2(TestGraphSendUERecvMeanOp): - def set_config(self): self.x = np.random.random((10, 20)).astype("float64") self.y = np.random.random((150, 1)).astype("float64") @@ -433,7 +447,6 @@ class TestMeanCase2(TestGraphSendUERecvMeanOp): class TestMeanCase3(TestGraphSendUERecvMeanOp): - def set_config(self): self.x = np.random.random((10, 20)).astype("float64") self.y = np.random.random((150, 1)).astype("float64") @@ -444,7 +457,6 @@ class TestMeanCase3(TestGraphSendUERecvMeanOp): class TestMeanCase4(TestGraphSendUERecvMeanOp): - def set_config(self): self.x = np.random.random((10, 8, 5)).astype("float64") self.y = np.random.random((15, 8, 1)).astype("float64") @@ -455,7 +467,6 @@ class TestMeanCase4(TestGraphSendUERecvMeanOp): class TestMeanCase5(TestGraphSendUERecvMeanOp): - def set_config(self): self.x = np.random.random((10, 8, 5)).astype("float64") self.y = np.random.random((15, 8, 1)).astype("float64") @@ -466,7 +477,6 @@ class TestMeanCase5(TestGraphSendUERecvMeanOp): class TestMeanCase6(TestGraphSendUERecvMeanOp): - def set_config(self): self.x = np.random.random((100, 1)).astype("float64") self.y = np.random.random((15, 20)).astype("float64") @@ -477,7 +487,6 @@ class TestMeanCase6(TestGraphSendUERecvMeanOp): class TestMeanCase7(TestGraphSendUERecvMeanOp): - def set_config(self): self.x = np.random.random((100, 1)).astype("float64") self.y = np.random.random((15, 20)).astype("float64") @@ -488,7 +497,6 @@ class TestMeanCase7(TestGraphSendUERecvMeanOp): class TestGraphSendUERecvMaxOp(OpTest): - def setUp(self): paddle.enable_static() self.python_api = graph_send_ue_recv_wrapper @@ -499,12 +507,13 @@ class TestGraphSendUERecvMaxOp(OpTest): 'X': self.x, 'Y': self.y, 'Src_index': self.src_index, - 'Dst_index': self.dst_index + 'Dst_index': self.dst_index, } self.attrs = {'message_op': self.message_op, 'reduce_op': 'MAX'} out, self.gradients = compute_graph_send_ue_recv_for_max_min( - self.inputs, self.attrs) + self.inputs, self.attrs + ) self.outputs = {'Out': out} @@ -520,14 +529,15 @@ class TestGraphSendUERecvMaxOp(OpTest): self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X', 'Y'], - 'Out', - user_defined_grads=self.gradients, - check_eager=True) + self.check_grad( + ['X', 'Y'], + 'Out', + user_defined_grads=self.gradients, + check_eager=True, + ) class TestMaxCase1(TestGraphSendUERecvMaxOp): - def set_config(self): self.x = np.random.random((10, 20)).astype("float64") self.y = np.random.random((15, 20)).astype("float64") @@ -538,7 +548,6 @@ class TestMaxCase1(TestGraphSendUERecvMaxOp): class TestMaxCase2(TestGraphSendUERecvMaxOp): - def set_config(self): self.x = np.random.random((10, 20)).astype("float64") self.y = np.random.random((150, 1)).astype("float64") @@ -549,7 +558,6 @@ class TestMaxCase2(TestGraphSendUERecvMaxOp): class TestMaxCase3(TestGraphSendUERecvMaxOp): - def set_config(self): self.x = np.random.random((10, 20)).astype("float64") self.y = np.random.random((150, 1)).astype("float64") @@ -560,7 +568,6 @@ class TestMaxCase3(TestGraphSendUERecvMaxOp): class TestMaxCase4(TestGraphSendUERecvMaxOp): - def set_config(self): self.x = np.random.random((10, 8, 5)).astype("float64") self.y = np.random.random((15, 8, 1)).astype("float64") @@ -571,7 +578,6 @@ class TestMaxCase4(TestGraphSendUERecvMaxOp): class TestMaxCase5(TestGraphSendUERecvMaxOp): - def set_config(self): self.x = np.random.random((10, 8, 5)).astype("float64") self.y = np.random.random((15, 8, 1)).astype("float64") @@ -582,7 +588,6 @@ class TestMaxCase5(TestGraphSendUERecvMaxOp): class TestMaxCase6(TestGraphSendUERecvMaxOp): - def set_config(self): self.x = np.random.random((100, 1)).astype("float64") self.y = np.random.random((15, 20)).astype("float64") @@ -593,7 +598,6 @@ class TestMaxCase6(TestGraphSendUERecvMaxOp): class TestMaxCase7(TestGraphSendUERecvMaxOp): - def set_config(self): self.x = np.random.random((100, 1)).astype("float64") self.y = np.random.random((15, 20)).astype("float64") @@ -604,7 +608,6 @@ class TestMaxCase7(TestGraphSendUERecvMaxOp): class TestGraphSendUERecvMinOp(OpTest): - def setUp(self): paddle.enable_static() self.python_api = graph_send_ue_recv_wrapper @@ -615,12 +618,13 @@ class TestGraphSendUERecvMinOp(OpTest): 'X': self.x, 'Y': self.y, 'Src_index': self.src_index, - 'Dst_index': self.dst_index + 'Dst_index': self.dst_index, } self.attrs = {'message_op': self.message_op, 'reduce_op': 'MIN'} out, self.gradients = compute_graph_send_ue_recv_for_max_min( - self.inputs, self.attrs) + self.inputs, self.attrs + ) self.outputs = {'Out': out} @@ -636,14 +640,15 @@ class TestGraphSendUERecvMinOp(OpTest): self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X', 'Y'], - 'Out', - user_defined_grads=self.gradients, - check_eager=True) + self.check_grad( + ['X', 'Y'], + 'Out', + user_defined_grads=self.gradients, + check_eager=True, + ) class TestMinCase1(TestGraphSendUERecvMinOp): - def set_config(self): self.x = np.random.random((10, 20)).astype("float64") self.y = np.random.random((15, 20)).astype("float64") @@ -654,7 +659,6 @@ class TestMinCase1(TestGraphSendUERecvMinOp): class TestMinCase2(TestGraphSendUERecvMinOp): - def set_config(self): self.x = np.random.random((10, 20)).astype("float64") self.y = np.random.random((150, 1)).astype("float64") @@ -665,7 +669,6 @@ class TestMinCase2(TestGraphSendUERecvMinOp): class TestMinCase3(TestGraphSendUERecvMinOp): - def set_config(self): self.x = np.random.random((10, 20)).astype("float64") self.y = np.random.random((150, 1)).astype("float64") @@ -676,7 +679,6 @@ class TestMinCase3(TestGraphSendUERecvMinOp): class TestMinCase4(TestGraphSendUERecvMinOp): - def set_config(self): self.x = np.random.random((10, 8, 5)).astype("float64") self.y = np.random.random((15, 8, 1)).astype("float64") @@ -687,7 +689,6 @@ class TestMinCase4(TestGraphSendUERecvMinOp): class TestMinCase5(TestGraphSendUERecvMinOp): - def set_config(self): self.x = np.random.random((10, 8, 5)).astype("float64") self.y = np.random.random((15, 8, 1)).astype("float64") @@ -698,7 +699,6 @@ class TestMinCase5(TestGraphSendUERecvMinOp): class TestMinCase6(TestGraphSendUERecvMinOp): - def set_config(self): self.x = np.random.random((100, 1)).astype("float64") self.y = np.random.random((15, 20)).astype("float64") @@ -709,7 +709,6 @@ class TestMinCase6(TestGraphSendUERecvMinOp): class TestMinCase7(TestGraphSendUERecvMinOp): - def set_config(self): self.x = np.random.random((100, 1)).astype("float64") self.y = np.random.random((15, 20)).astype("float64") @@ -720,23 +719,27 @@ class TestMinCase7(TestGraphSendUERecvMinOp): class API_GeometricSendUERecvTest(unittest.TestCase): - def test_compute_all_with_sum(self): paddle.disable_static() - x = paddle.to_tensor(np.array([[0, 2, 3], [1, 4, 5], [2, 6, 7]]), - dtype="float32") + x = paddle.to_tensor( + np.array([[0, 2, 3], [1, 4, 5], [2, 6, 7]]), dtype="float32" + ) y = paddle.ones(shape=[4, 1], dtype="float32") src_index = paddle.to_tensor(np.array([0, 1, 2, 0]), dtype="int32") dst_index = paddle.to_tensor(np.array([1, 2, 1, 0]), dtype="int32") - res_add = paddle.geometric.send_ue_recv(x, y, src_index, dst_index, - "add", "sum") - res_sub = paddle.geometric.send_ue_recv(x, y, src_index, dst_index, - "sub", "sum") - res_mul = paddle.geometric.send_ue_recv(x, y, src_index, dst_index, - "mul", "sum") - res_div = paddle.geometric.send_ue_recv(x, y, src_index, dst_index, - "div", "sum") + res_add = paddle.geometric.send_ue_recv( + x, y, src_index, dst_index, "add", "sum" + ) + res_sub = paddle.geometric.send_ue_recv( + x, y, src_index, dst_index, "sub", "sum" + ) + res_mul = paddle.geometric.send_ue_recv( + x, y, src_index, dst_index, "mul", "sum" + ) + res_div = paddle.geometric.send_ue_recv( + x, y, src_index, dst_index, "div", "sum" + ) res = [res_add, res_sub, res_mul, res_div] np_add = np.array([[1, 3, 4], [4, 10, 12], [2, 5, 6]], dtype="float32") @@ -750,25 +753,32 @@ class API_GeometricSendUERecvTest(unittest.TestCase): paddle_res, rtol=1e-05, atol=1e-06, - err_msg='two value is {}\n{}, check diff!'. - format(np_res, paddle_res)) + err_msg='two value is {}\n{}, check diff!'.format( + np_res, paddle_res + ), + ) def test_compute_all_with_mean(self): paddle.disable_static() - x = paddle.to_tensor(np.array([[0, 2, 3], [1, 4, 5], [2, 6, 7]]), - dtype="float32") + x = paddle.to_tensor( + np.array([[0, 2, 3], [1, 4, 5], [2, 6, 7]]), dtype="float32" + ) y = paddle.ones(shape=[4, 1], dtype="float32") src_index = paddle.to_tensor(np.array([0, 1, 2, 0]), dtype="int32") dst_index = paddle.to_tensor(np.array([1, 2, 1, 0]), dtype="int32") - res_add = paddle.geometric.send_ue_recv(x, y, src_index, dst_index, - "add", "mean") - res_sub = paddle.geometric.send_ue_recv(x, y, src_index, dst_index, - "sub", "mean") - res_mul = paddle.geometric.send_ue_recv(x, y, src_index, dst_index, - "mul", "mean") - res_div = paddle.geometric.send_ue_recv(x, y, src_index, dst_index, - "div", "mean") + res_add = paddle.geometric.send_ue_recv( + x, y, src_index, dst_index, "add", "mean" + ) + res_sub = paddle.geometric.send_ue_recv( + x, y, src_index, dst_index, "sub", "mean" + ) + res_mul = paddle.geometric.send_ue_recv( + x, y, src_index, dst_index, "mul", "mean" + ) + res_div = paddle.geometric.send_ue_recv( + x, y, src_index, dst_index, "div", "mean" + ) res = [res_add, res_sub, res_mul, res_div] np_add = np.array([[1, 3, 4], [2, 5, 6], [2, 5, 6]], dtype="float32") @@ -782,25 +792,32 @@ class API_GeometricSendUERecvTest(unittest.TestCase): paddle_res, rtol=1e-05, atol=1e-06, - err_msg='two value is {}\n{}, check diff!'. - format(np_res, paddle_res)) + err_msg='two value is {}\n{}, check diff!'.format( + np_res, paddle_res + ), + ) def test_compute_all_with_max(self): paddle.disable_static() - x = paddle.to_tensor(np.array([[0, 2, 3], [1, 4, 5], [2, 6, 7]]), - dtype="float32") + x = paddle.to_tensor( + np.array([[0, 2, 3], [1, 4, 5], [2, 6, 7]]), dtype="float32" + ) y = paddle.ones(shape=[4, 1], dtype="float32") src_index = paddle.to_tensor(np.array([0, 1, 2, 0]), dtype="int32") dst_index = paddle.to_tensor(np.array([1, 2, 1, 0]), dtype="int32") - res_add = paddle.geometric.send_ue_recv(x, y, src_index, dst_index, - "add", "max") - res_sub = paddle.geometric.send_ue_recv(x, y, src_index, dst_index, - "sub", "max") - res_mul = paddle.geometric.send_ue_recv(x, y, src_index, dst_index, - "mul", "max") - res_div = paddle.geometric.send_ue_recv(x, y, src_index, dst_index, - "div", "max") + res_add = paddle.geometric.send_ue_recv( + x, y, src_index, dst_index, "add", "max" + ) + res_sub = paddle.geometric.send_ue_recv( + x, y, src_index, dst_index, "sub", "max" + ) + res_mul = paddle.geometric.send_ue_recv( + x, y, src_index, dst_index, "mul", "max" + ) + res_div = paddle.geometric.send_ue_recv( + x, y, src_index, dst_index, "div", "max" + ) res = [res_add, res_sub, res_mul, res_div] np_add = np.array([[1, 3, 4], [3, 7, 8], [2, 5, 6]], dtype="float32") @@ -815,73 +832,91 @@ class API_GeometricSendUERecvTest(unittest.TestCase): paddle_res, rtol=1e-05, atol=1e-06, - err_msg='two value is {}\n{}, check diff!'. - format(np_res, paddle_res)) + err_msg='two value is {}\n{}, check diff!'.format( + np_res, paddle_res + ), + ) def test_compute_all_with_max_fp16(self): paddle.disable_static() if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): - x = paddle.to_tensor(np.array([[0, 2, 3], [1, 4, 5], [2, 6, - 7]]), - dtype="float16") + x = paddle.to_tensor( + np.array([[0, 2, 3], [1, 4, 5], [2, 6, 7]]), dtype="float16" + ) y = paddle.ones(shape=[4, 1], dtype="float16") - src_index = paddle.to_tensor(np.array([0, 1, 2, 0]), - dtype="int32") - dst_index = paddle.to_tensor(np.array([1, 2, 1, 0]), - dtype="int32") - - res_add = paddle.geometric.send_ue_recv(x, y, src_index, - dst_index, "add", "max") - res_sub = paddle.geometric.send_ue_recv(x, y, src_index, - dst_index, "sub", "max") - res_mul = paddle.geometric.send_ue_recv(x, y, src_index, - dst_index, "mul", "max") - res_div = paddle.geometric.send_ue_recv(x, y, src_index, - dst_index, "div", "max") + src_index = paddle.to_tensor( + np.array([0, 1, 2, 0]), dtype="int32" + ) + dst_index = paddle.to_tensor( + np.array([1, 2, 1, 0]), dtype="int32" + ) + + res_add = paddle.geometric.send_ue_recv( + x, y, src_index, dst_index, "add", "max" + ) + res_sub = paddle.geometric.send_ue_recv( + x, y, src_index, dst_index, "sub", "max" + ) + res_mul = paddle.geometric.send_ue_recv( + x, y, src_index, dst_index, "mul", "max" + ) + res_div = paddle.geometric.send_ue_recv( + x, y, src_index, dst_index, "div", "max" + ) res = [res_add, res_sub, res_mul, res_div] - np_add = np.array([[1, 3, 4], [3, 7, 8], [2, 5, 6]], - dtype="float16") - np_sub = np.array([[-1, 1, 2], [1, 5, 6], [0, 3, 4]], - dtype="float16") - np_mul = np.array([[0, 2, 3], [2, 6, 7], [1, 4, 5]], - dtype="float16") - np_div = np.array([[0, 2, 3], [2, 6, 7], [1, 4, 5]], - dtype="float16") - - np.testing.assert_allclose(np_sub, - res_sub, - rtol=1e-05, - atol=1e-06) - for np_res, paddle_res in zip([np_add, np_sub, np_mul, np_div], - res): + np_add = np.array( + [[1, 3, 4], [3, 7, 8], [2, 5, 6]], dtype="float16" + ) + np_sub = np.array( + [[-1, 1, 2], [1, 5, 6], [0, 3, 4]], dtype="float16" + ) + np_mul = np.array( + [[0, 2, 3], [2, 6, 7], [1, 4, 5]], dtype="float16" + ) + np_div = np.array( + [[0, 2, 3], [2, 6, 7], [1, 4, 5]], dtype="float16" + ) + + np.testing.assert_allclose( + np_sub, res_sub, rtol=1e-05, atol=1e-06 + ) + for np_res, paddle_res in zip( + [np_add, np_sub, np_mul, np_div], res + ): np.testing.assert_allclose( np_res, paddle_res, rtol=1e-05, atol=1e-06, - err_msg= - 'two value is {}\n{}, check diff!' - .format(np_res, paddle_res)) + err_msg='two value is {}\n{}, check diff!'.format( + np_res, paddle_res + ), + ) def test_compute_all_with_min(self): paddle.disable_static() - x = paddle.to_tensor(np.array([[0, 2, 3], [1, 4, 5], [2, 6, 7]]), - dtype="float32") + x = paddle.to_tensor( + np.array([[0, 2, 3], [1, 4, 5], [2, 6, 7]]), dtype="float32" + ) y = paddle.ones(shape=[4, 1], dtype="float32") src_index = paddle.to_tensor(np.array([0, 1, 2, 0]), dtype="int32") dst_index = paddle.to_tensor(np.array([1, 2, 1, 0]), dtype="int32") - res_add = paddle.geometric.send_ue_recv(x, y, src_index, dst_index, - "add", "min") - res_sub = paddle.geometric.send_ue_recv(x, y, src_index, dst_index, - "sub", "min") - res_mul = paddle.geometric.send_ue_recv(x, y, src_index, dst_index, - "mul", "min") - res_div = paddle.geometric.send_ue_recv(x, y, src_index, dst_index, - "div", "min") + res_add = paddle.geometric.send_ue_recv( + x, y, src_index, dst_index, "add", "min" + ) + res_sub = paddle.geometric.send_ue_recv( + x, y, src_index, dst_index, "sub", "min" + ) + res_mul = paddle.geometric.send_ue_recv( + x, y, src_index, dst_index, "mul", "min" + ) + res_div = paddle.geometric.send_ue_recv( + x, y, src_index, dst_index, "div", "min" + ) res = [res_add, res_sub, res_mul, res_div] np_add = np.array([[1, 3, 4], [1, 3, 4], [2, 5, 6]], dtype="float32") @@ -895,71 +930,90 @@ class API_GeometricSendUERecvTest(unittest.TestCase): paddle_res, rtol=1e-05, atol=1e-06, - err_msg='two value is {}\n{}, check diff!'. - format(np_res, paddle_res)) + err_msg='two value is {}\n{}, check diff!'.format( + np_res, paddle_res + ), + ) def test_compute_all_with_min_fp16(self): paddle.disable_static() if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): - x = paddle.to_tensor(np.array([[0, 2, 3], [1, 4, 5], [2, 6, - 7]]), - dtype="float16") + x = paddle.to_tensor( + np.array([[0, 2, 3], [1, 4, 5], [2, 6, 7]]), dtype="float16" + ) y = paddle.ones(shape=[4, 1], dtype="float16") - src_index = paddle.to_tensor(np.array([0, 1, 2, 0]), - dtype="int32") - dst_index = paddle.to_tensor(np.array([1, 2, 1, 0]), - dtype="int32") - res_add = paddle.geometric.send_ue_recv(x, y, src_index, - dst_index, "add", "min") - res_sub = paddle.geometric.send_ue_recv(x, y, src_index, - dst_index, "sub", "min") - res_mul = paddle.geometric.send_ue_recv(x, y, src_index, - dst_index, "mul", "min") - res_div = paddle.geometric.send_ue_recv(x, y, src_index, - dst_index, "div", "min") + src_index = paddle.to_tensor( + np.array([0, 1, 2, 0]), dtype="int32" + ) + dst_index = paddle.to_tensor( + np.array([1, 2, 1, 0]), dtype="int32" + ) + res_add = paddle.geometric.send_ue_recv( + x, y, src_index, dst_index, "add", "min" + ) + res_sub = paddle.geometric.send_ue_recv( + x, y, src_index, dst_index, "sub", "min" + ) + res_mul = paddle.geometric.send_ue_recv( + x, y, src_index, dst_index, "mul", "min" + ) + res_div = paddle.geometric.send_ue_recv( + x, y, src_index, dst_index, "div", "min" + ) res = [res_add, res_sub, res_mul, res_div] - np_add = np.array([[1, 3, 4], [1, 3, 4], [2, 5, 6]], - dtype="float16") - np_sub = np.array([[-1, 1, 2], [-1, 1, 2], [0, 3, 4]], - dtype="float16") - np_mul = np.array([[0, 2, 3], [0, 2, 3], [1, 4, 5]], - dtype="float16") - np_div = np.array([[0, 2, 3], [0, 2, 3], [1, 4, 5]], - dtype="float16") - - for np_res, paddle_res in zip([np_add, np_sub, np_mul, np_div], - res): + np_add = np.array( + [[1, 3, 4], [1, 3, 4], [2, 5, 6]], dtype="float16" + ) + np_sub = np.array( + [[-1, 1, 2], [-1, 1, 2], [0, 3, 4]], dtype="float16" + ) + np_mul = np.array( + [[0, 2, 3], [0, 2, 3], [1, 4, 5]], dtype="float16" + ) + np_div = np.array( + [[0, 2, 3], [0, 2, 3], [1, 4, 5]], dtype="float16" + ) + + for np_res, paddle_res in zip( + [np_add, np_sub, np_mul, np_div], res + ): np.testing.assert_allclose( np_res, paddle_res, rtol=1e-05, atol=1e-06, - err_msg= - 'two value is {}\n{}, check diff!' - .format(np_res, paddle_res)) + err_msg='two value is {}\n{}, check diff!'.format( + np_res, paddle_res + ), + ) def test_reshape_lhs_rhs(self): paddle.disable_static() - x = paddle.to_tensor(np.array([[0, 2, 3], [1, 4, 5], [2, 6, 7]]), - dtype="float32") + x = paddle.to_tensor( + np.array([[0, 2, 3], [1, 4, 5], [2, 6, 7]]), dtype="float32" + ) x = x.reshape(shape=[3, 3, 1]) y = paddle.ones([4, 1], dtype="float32") src_index = paddle.to_tensor(np.array([0, 1, 2, 0]), dtype="int32") dst_index = paddle.to_tensor(np.array([1, 2, 1, 0]), dtype="int32") - res_add = paddle.geometric.send_ue_recv(x, y, src_index, dst_index, - "add", "min") - np_add = np.array([[1, 3, 4], [1, 3, 4], [2, 5, 6]], - dtype="float16").reshape([3, 3, 1]) + res_add = paddle.geometric.send_ue_recv( + x, y, src_index, dst_index, "add", "min" + ) + np_add = np.array( + [[1, 3, 4], [1, 3, 4], [2, 5, 6]], dtype="float16" + ).reshape([3, 3, 1]) np.testing.assert_allclose( np_add, res_add, rtol=1e-05, atol=1e-06, - err_msg='two value is {}\n{}, check diff!'. - format(np_add, res_add)) + err_msg='two value is {}\n{}, check diff!'.format( + np_add, res_add + ), + ) def test_out_size_tensor_static(self): paddle.enable_static() @@ -968,12 +1022,13 @@ class API_GeometricSendUERecvTest(unittest.TestCase): y = paddle.static.data(name="y", shape=[3], dtype="float32") src_index = paddle.static.data(name="src", shape=[3], dtype="int32") dst_index = paddle.static.data(name="dst", shape=[3], dtype="int32") - out_size = paddle.static.data(name="out_size", - shape=[1], - dtype="int32") + out_size = paddle.static.data( + name="out_size", shape=[1], dtype="int32" + ) - res_sum = paddle.geometric.send_ue_recv(x, y, src_index, dst_index, - "mul", "sum", out_size) + res_sum = paddle.geometric.send_ue_recv( + x, y, src_index, dst_index, "mul", "sum", out_size + ) exe = paddle.static.Executor(paddle.CPUPlace()) data1 = np.array([[0, 2, 3], [1, 4, 5], [2, 6, 6]], dtype="float32") @@ -984,21 +1039,25 @@ class API_GeometricSendUERecvTest(unittest.TestCase): np_sum = np.array([[0, 2, 3], [3, 16, 21]], dtype="float32") - ret = exe.run(feed={ - 'x': data1, - 'y': data2, - 'src': data3, - 'dst': data4, - 'out_size': data5, - }, - fetch_list=[res_sum]) + ret = exe.run( + feed={ + 'x': data1, + 'y': data2, + 'src': data3, + 'dst': data4, + 'out_size': data5, + }, + fetch_list=[res_sum], + ) np.testing.assert_allclose( np_sum, ret[0], rtol=1e-05, atol=1e-06, - err_msg='two value is {}\n{}, check diff!'. - format(np_sum, ret[0])) + err_msg='two value is {}\n{}, check diff!'.format( + np_sum, ret[0] + ), + ) def test_api_eager_dygraph(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_graph_send_uv_op.py b/python/paddle/fluid/tests/unittests/test_graph_send_uv_op.py index df22555d357ba696af01a49f3adfa7b03896adac..9dd252b17c53197cbbbab4f46f8c13fbed05e436 100644 --- a/python/paddle/fluid/tests/unittests/test_graph_send_uv_op.py +++ b/python/paddle/fluid/tests/unittests/test_graph_send_uv_op.py @@ -40,12 +40,12 @@ def compute_graph_send_uv(inputs, attributes): def graph_send_uv_wrapper(x, y, src_index, dst_index, message_op="add"): - return paddle.geometric.send_uv(x, y, src_index, dst_index, - message_op.lower()) + return paddle.geometric.send_uv( + x, y, src_index, dst_index, message_op.lower() + ) class TestGraphSendUVOp(OpTest): - def setUp(self): paddle.enable_static() self.python_api = graph_send_uv_wrapper @@ -56,7 +56,7 @@ class TestGraphSendUVOp(OpTest): 'x': self.x, 'y': self.y, 'src_index': self.src_index, - 'dst_index': self.dst_index + 'dst_index': self.dst_index, } self.attrs = {'message_op': self.message_op} out = compute_graph_send_uv(self.inputs, self.attrs) @@ -78,7 +78,6 @@ class TestGraphSendUVOp(OpTest): class TestCase1(TestGraphSendUVOp): - def set_config(self): self.x = np.random.random((10, 20)).astype("float64") self.y = np.random.random((10, 20)).astype("float64") @@ -89,7 +88,6 @@ class TestCase1(TestGraphSendUVOp): class TestCase2(TestGraphSendUVOp): - def set_config(self): self.x = np.random.random((100, 1)).astype("float64") self.y = np.random.random((100, 20)).astype("float64") @@ -100,7 +98,6 @@ class TestCase2(TestGraphSendUVOp): class TestCase3(TestGraphSendUVOp): - def set_config(self): self.x = np.random.random((100, 20)).astype("float64") self.y = np.random.random((100, 1)).astype("float64") @@ -111,7 +108,6 @@ class TestCase3(TestGraphSendUVOp): class TestCase4(TestGraphSendUVOp): - def set_config(self): self.x = np.random.random((100, 1)).astype("float64") self.y = np.random.random((100, 20)).astype("float64") @@ -122,7 +118,6 @@ class TestCase4(TestGraphSendUVOp): class TestCase5(TestGraphSendUVOp): - def set_config(self): self.x = np.random.random((100, 20)).astype("float64") self.y = np.random.random((100, 1)).astype("float64") @@ -133,7 +128,6 @@ class TestCase5(TestGraphSendUVOp): class TestCase6(TestGraphSendUVOp): - def set_config(self): self.x = np.random.random((10, 10, 1)).astype("float64") self.y = np.random.random((10, 10, 10)) @@ -144,7 +138,6 @@ class TestCase6(TestGraphSendUVOp): class TestCase7(TestGraphSendUVOp): - def set_config(self): self.x = np.random.random((10, 10, 1)).astype("float64") self.y = np.random.random((10, 10, 10)) @@ -155,7 +148,6 @@ class TestCase7(TestGraphSendUVOp): class API_GeometricSendUVTest(unittest.TestCase): - def test_compute_all_dygraph(self): paddle.disable_static() x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32") @@ -163,37 +155,33 @@ class API_GeometricSendUVTest(unittest.TestCase): src_index = paddle.to_tensor(np.array([0, 1, 2, 0]), dtype="int32") dst_index = paddle.to_tensor(np.array([1, 2, 1, 0]), dtype="int32") - res_add = paddle.geometric.send_uv(x, - y, - src_index, - dst_index, - message_op="add") - res_sub = paddle.geometric.send_uv(x, - y, - src_index, - dst_index, - message_op="sub") - res_mul = paddle.geometric.send_uv(x, - y, - src_index, - dst_index, - message_op="mul") - res_div = paddle.geometric.send_uv(x, - y, - src_index, - dst_index, - message_op="div") + res_add = paddle.geometric.send_uv( + x, y, src_index, dst_index, message_op="add" + ) + res_sub = paddle.geometric.send_uv( + x, y, src_index, dst_index, message_op="sub" + ) + res_mul = paddle.geometric.send_uv( + x, y, src_index, dst_index, message_op="mul" + ) + res_div = paddle.geometric.send_uv( + x, y, src_index, dst_index, message_op="div" + ) res = [res_add, res_sub, res_mul, res_div] - np_add = np.array([[2, 5, 7], [5, 9, 11], [4, 9, 11], [1, 3, 5]], - dtype="float32") - np_sub = np.array([[-2, -1, -1], [-3, -1, -1], [0, 3, 3], [-1, 1, 1]], - dtype="float32") - np_mul = np.array([[0, 6, 12], [4, 20, 30], [4, 18, 28], [0, 2, 6]], - dtype="float32") + np_add = np.array( + [[2, 5, 7], [5, 9, 11], [4, 9, 11], [1, 3, 5]], dtype="float32" + ) + np_sub = np.array( + [[-2, -1, -1], [-3, -1, -1], [0, 3, 3], [-1, 1, 1]], dtype="float32" + ) + np_mul = np.array( + [[0, 6, 12], [4, 20, 30], [4, 18, 28], [0, 2, 6]], dtype="float32" + ) np_div = np.array( [[0, 2 / 3, 0.75], [0.25, 0.8, 5 / 6], [1, 2, 7 / 4], [0, 2, 1.5]], - dtype="float32") + dtype="float32", + ) for np_res, paddle_res in zip([np_add, np_sub, np_mul, np_div], res): np.testing.assert_allclose( @@ -201,8 +189,10 @@ class API_GeometricSendUVTest(unittest.TestCase): paddle_res, rtol=1e-05, atol=1e-06, - err_msg='two value is {}\n{}, check diff!'. - format(np_res, paddle_res)) + err_msg='two value is {}\n{}, check diff!'.format( + np_res, paddle_res + ), + ) def test_compute_all_static(self): paddle.enable_static() @@ -211,26 +201,18 @@ class API_GeometricSendUVTest(unittest.TestCase): y = paddle.static.data(name="y", shape=[3, 3], dtype="float32") src_index = paddle.static.data(name="src", shape=[4], dtype="int32") dst_index = paddle.static.data(name="dst", shape=[4], dtype="int32") - res_add = paddle.geometric.send_uv(x, - y, - src_index, - dst_index, - message_op="add") - res_sub = paddle.geometric.send_uv(x, - y, - src_index, - dst_index, - message_op="sub") - res_mul = paddle.geometric.send_uv(x, - y, - src_index, - dst_index, - message_op="mul") - res_div = paddle.geometric.send_uv(x, - y, - src_index, - dst_index, - message_op="div") + res_add = paddle.geometric.send_uv( + x, y, src_index, dst_index, message_op="add" + ) + res_sub = paddle.geometric.send_uv( + x, y, src_index, dst_index, message_op="sub" + ) + res_mul = paddle.geometric.send_uv( + x, y, src_index, dst_index, message_op="mul" + ) + res_div = paddle.geometric.send_uv( + x, y, src_index, dst_index, message_op="div" + ) exe = paddle.static.Executor(paddle.CPUPlace()) data1 = np.array([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32") @@ -238,33 +220,48 @@ class API_GeometricSendUVTest(unittest.TestCase): data3 = np.array([0, 1, 2, 0], dtype="int32") data4 = np.array([1, 2, 1, 0], dtype="int32") - np_add = np.array([[2, 5, 7], [5, 9, 11], [4, 9, 11], [1, 3, 5]], - dtype="float32") + np_add = np.array( + [[2, 5, 7], [5, 9, 11], [4, 9, 11], [1, 3, 5]], dtype="float32" + ) np_sub = np.array( [[-2, -1, -1], [-3, -1, -1], [0, 3, 3], [-1, 1, 1]], - dtype="float32") - np_mul = np.array([[0, 6, 12], [4, 20, 30], [4, 18, 28], [0, 2, 6]], - dtype="float32") - np_div = np.array([[0, 2 / 3, 0.75], [0.25, 0.8, 5 / 6], - [1, 2, 7 / 4], [0, 2, 1.5]], - dtype="float32") - - ret = exe.run(feed={ - 'x': data1, - 'y': data2, - 'src': data3, - 'dst': data4, - }, - fetch_list=[res_add, res_sub, res_mul, res_div]) - for np_res, paddle_res in zip([np_add, np_sub, np_mul, np_div], - ret): + dtype="float32", + ) + np_mul = np.array( + [[0, 6, 12], [4, 20, 30], [4, 18, 28], [0, 2, 6]], + dtype="float32", + ) + np_div = np.array( + [ + [0, 2 / 3, 0.75], + [0.25, 0.8, 5 / 6], + [1, 2, 7 / 4], + [0, 2, 1.5], + ], + dtype="float32", + ) + + ret = exe.run( + feed={ + 'x': data1, + 'y': data2, + 'src': data3, + 'dst': data4, + }, + fetch_list=[res_add, res_sub, res_mul, res_div], + ) + for np_res, paddle_res in zip( + [np_add, np_sub, np_mul, np_div], ret + ): np.testing.assert_allclose( np_res, paddle_res, rtol=1e-05, atol=1e-06, - err_msg='two value is {}\n{}, check diff!' - .format(np_res, paddle_res)) + err_msg='two value is {}\n{}, check diff!'.format( + np_res, paddle_res + ), + ) def test_api_eager_dygraph(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_grid_sample_function.py b/python/paddle/fluid/tests/unittests/test_grid_sample_function.py index 7255f82bc07571695fa6348a3b7ad16e097896d1..75001091855325c5225e69176a8c57caf333fb90 100644 --- a/python/paddle/fluid/tests/unittests/test_grid_sample_function.py +++ b/python/paddle/fluid/tests/unittests/test_grid_sample_function.py @@ -21,14 +21,15 @@ import unittest class GridSampleTestCase(unittest.TestCase): - - def __init__(self, - methodName='runTest', - x_shape=[2, 2, 3, 3], - grid_shape=[2, 3, 3, 2], - mode="bilinear", - padding_mode="zeros", - align_corners=False): + def __init__( + self, + methodName='runTest', + x_shape=[2, 2, 3, 3], + grid_shape=[2, 3, 3, 2], + mode="bilinear", + padding_mode="zeros", + align_corners=False, + ): super(GridSampleTestCase, self).__init__(methodName) self.padding_mode = padding_mode self.x_shape = x_shape @@ -49,25 +50,29 @@ class GridSampleTestCase(unittest.TestCase): with fluid.program_guard(main, start): x = fluid.data("x", self.x_shape, dtype=self.dtype) grid = fluid.data("grid", self.grid_shape, dtype=self.dtype) - y_var = F.grid_sample(x, - grid, - mode=self.mode, - padding_mode=self.padding_mode, - align_corners=self.align_corners) + y_var = F.grid_sample( + x, + grid, + mode=self.mode, + padding_mode=self.padding_mode, + align_corners=self.align_corners, + ) feed_dict = {"x": self.x, "grid": self.grid} exe = fluid.Executor(place) exe.run(start) - y_np, = exe.run(main, feed=feed_dict, fetch_list=[y_var]) + (y_np,) = exe.run(main, feed=feed_dict, fetch_list=[y_var]) return y_np def dynamic_functional(self): x_t = paddle.to_tensor(self.x) grid_t = paddle.to_tensor(self.grid) - y_t = F.grid_sample(x_t, - grid_t, - mode=self.mode, - padding_mode=self.padding_mode, - align_corners=self.align_corners) + y_t = F.grid_sample( + x_t, + grid_t, + mode=self.mode, + padding_mode=self.padding_mode, + align_corners=self.align_corners, + ) y_np = y_t.numpy() return y_np @@ -87,7 +92,6 @@ class GridSampleTestCase(unittest.TestCase): class GridSampleErrorTestCase(GridSampleTestCase): - def runTest(self): place = fluid.CPUPlace() with self.assertRaises(ValueError): @@ -97,22 +101,30 @@ class GridSampleErrorTestCase(GridSampleTestCase): def add_cases(suite): suite.addTest(GridSampleTestCase(methodName='runTest')) suite.addTest( - GridSampleTestCase(methodName='runTest', - mode='bilinear', - padding_mode='reflection', - align_corners=True)) + GridSampleTestCase( + methodName='runTest', + mode='bilinear', + padding_mode='reflection', + align_corners=True, + ) + ) suite.addTest( - GridSampleTestCase(methodName='runTest', - mode='bilinear', - padding_mode='zeros', - align_corners=True)) + GridSampleTestCase( + methodName='runTest', + mode='bilinear', + padding_mode='zeros', + align_corners=True, + ) + ) def add_error_cases(suite): suite.addTest( - GridSampleErrorTestCase(methodName='runTest', padding_mode="VALID")) + GridSampleErrorTestCase(methodName='runTest', padding_mode="VALID") + ) suite.addTest( - GridSampleErrorTestCase(methodName='runTest', align_corners="VALID")) + GridSampleErrorTestCase(methodName='runTest', align_corners="VALID") + ) suite.addTest(GridSampleErrorTestCase(methodName='runTest', mode="VALID")) @@ -124,7 +136,6 @@ def load_tests(loader, standard_tests, pattern): class TestGridSampleAPI(unittest.TestCase): - def test_errors(self): with self.assertRaises(ValueError): x = paddle.randn([1, 1, 3, 3]) diff --git a/python/paddle/fluid/tests/unittests/test_grid_sampler_op.py b/python/paddle/fluid/tests/unittests/test_grid_sampler_op.py index c162da6152800f57eb04d5282fbf0b3f8971838a..0245383045b855c933dcab17a4384af3dfd80a7c 100644 --- a/python/paddle/fluid/tests/unittests/test_grid_sampler_op.py +++ b/python/paddle/fluid/tests/unittests/test_grid_sampler_op.py @@ -25,12 +25,15 @@ def AffineGrid(theta, grid_shape): n = grid_shape[0] h = grid_shape[1] w = grid_shape[2] - h_idx = np.repeat(np.linspace(-1, 1, h)[np.newaxis, :], w, - axis=0).T[:, :, np.newaxis] - w_idx = np.repeat(np.linspace(-1, 1, w)[np.newaxis, :], h, - axis=0)[:, :, np.newaxis] - grid = np.concatenate([w_idx, h_idx, np.ones([h, w, 1])], - axis=2) # h * w * 3 + h_idx = np.repeat(np.linspace(-1, 1, h)[np.newaxis, :], w, axis=0).T[ + :, :, np.newaxis + ] + w_idx = np.repeat(np.linspace(-1, 1, w)[np.newaxis, :], h, axis=0)[ + :, :, np.newaxis + ] + grid = np.concatenate( + [w_idx, h_idx, np.ones([h, w, 1])], axis=2 + ) # h * w * 3 grid = np.repeat(grid[np.newaxis, :], n, axis=0) # n * h * w *3 ret = np.zeros([n, h * w, 2]) @@ -50,13 +53,17 @@ def getGridPointValue(data, x, y): out_H = x.shape[1] out_W = x.shape[2] - #out = np.zeros(data_shape, dtype='float64') + # out = np.zeros(data_shape, dtype='float64') out = np.zeros([N, C, out_H, out_W], dtype='float64') for i in range(N): for j in range(out_H): for k in range(out_W): - if y[i, j, k] < 0 or y[i, j, k] > in_H - 1 or x[ - i, j, k] < 0 or x[i, j, k] > in_W - 1: + if ( + y[i, j, k] < 0 + or y[i, j, k] > in_H - 1 + or x[i, j, k] < 0 + or x[i, j, k] > in_W - 1 + ): out[i, :, j, k] = 0 else: out[i, :, j, k] = data[i, :, y[i, j, k], x[i, j, k]] @@ -69,26 +76,24 @@ def AffineGrid3D(theta, grid_shape): d = grid_shape[1] h = grid_shape[2] w = grid_shape[3] - d_idx = np.repeat(np.repeat(np.linspace(-1, 1, d)[:, np.newaxis, - np.newaxis], - h, - axis=1), - w, - axis=2)[:, :, :, np.newaxis] - h_idx = np.repeat(np.repeat(np.linspace(-1, 1, h)[np.newaxis, :, - np.newaxis], - w, - axis=2), - d, - axis=0)[:, :, :, np.newaxis] - w_idx = np.repeat(np.repeat(np.linspace(-1, 1, w)[np.newaxis, - np.newaxis, :], - h, - axis=1), - d, - axis=0)[:, :, :, np.newaxis] + d_idx = np.repeat( + np.repeat(np.linspace(-1, 1, d)[:, np.newaxis, np.newaxis], h, axis=1), + w, + axis=2, + )[:, :, :, np.newaxis] + h_idx = np.repeat( + np.repeat(np.linspace(-1, 1, h)[np.newaxis, :, np.newaxis], w, axis=2), + d, + axis=0, + )[:, :, :, np.newaxis] + w_idx = np.repeat( + np.repeat(np.linspace(-1, 1, w)[np.newaxis, np.newaxis, :], h, axis=1), + d, + axis=0, + )[:, :, :, np.newaxis] grid = np.concatenate( - [w_idx, h_idx, d_idx, np.ones([d, h, w, 1])], axis=3) # d * h * w * 4 + [w_idx, h_idx, d_idx, np.ones([d, h, w, 1])], axis=3 + ) # d * h * w * 4 grid = np.repeat(grid[np.newaxis, :], n, axis=0) # n * d * h * w *4 ret = np.zeros([n, d * h * w, 3]) theta = theta.transpose([0, 2, 1]) @@ -114,13 +119,19 @@ def getGridPointValue3D(data, x, y, z): for j in range(out_D): for k in range(out_H): for l in range(out_W): - if y[i, j, k, l] < 0 or y[i, j, k, l] > in_H - 1 or x[ - i, j, k, l] < 0 or x[i, j, k, l] > in_W - 1 or z[ - i, j, k, l] < 0 or z[i, j, k, l] > in_D - 1: + if ( + y[i, j, k, l] < 0 + or y[i, j, k, l] > in_H - 1 + or x[i, j, k, l] < 0 + or x[i, j, k, l] > in_W - 1 + or z[i, j, k, l] < 0 + or z[i, j, k, l] > in_D - 1 + ): out[i, :, j, k, l] = 0 else: - out[i, :, j, k, l] = data[i, :, z[i, j, k, l], - y[i, j, k, l], x[i, j, k, l]] + out[i, :, j, k, l] = data[ + i, :, z[i, j, k, l], y[i, j, k, l], x[i, j, k, l] + ] return out @@ -133,27 +144,28 @@ def unnormalizeAndClip(grid_slice, max_val, align_corners, padding_mode): if align_corners: grid_slice = 0.5 * ((grid_slice.astype('float64') + 1.0) * max_val) else: - grid_slice = 0.5 * ((grid_slice.astype('float64') + 1.0) * - (max_val + 1)) - 0.5 + grid_slice = ( + 0.5 * ((grid_slice.astype('float64') + 1.0) * (max_val + 1)) - 0.5 + ) if padding_mode == "border": grid_slice = clip(grid_slice, 0, max_val) elif padding_mode == "reflection": double_range = 2 * max_val if align_corners else (max_val + 1) * 2 - grid_abs = np.abs(grid_slice) if align_corners else np.abs(grid_slice + - 0.5) + grid_abs = ( + np.abs(grid_slice) if align_corners else np.abs(grid_slice + 0.5) + ) extra = grid_abs - np.floor(grid_abs / double_range) * double_range grid_slice = np.minimum(extra, double_range - extra) - grid_slice = grid_slice if align_corners else clip( - grid_slice - 0.5, 0, max_val) + grid_slice = ( + grid_slice if align_corners else clip(grid_slice - 0.5, 0, max_val) + ) return grid_slice -def GridSampler(data, - grid, - align_corners=True, - mode="bilinear", - padding_mode="zeros"): +def GridSampler( + data, grid, align_corners=True, mode="bilinear", padding_mode="zeros" +): dims = data.shape N = dims[0] in_C = dims[1] @@ -177,14 +189,18 @@ def GridSampler(data, y0 = np.floor(y).astype('int32') y1 = y0 + 1 - wa = np.tile(((x1 - x) * (y1 - y)).reshape((N, 1, out_H, out_W)), - (1, in_C, 1, 1)) - wb = np.tile(((x1 - x) * (y - y0)).reshape((N, 1, out_H, out_W)), - (1, in_C, 1, 1)) - wc = np.tile(((x - x0) * (y1 - y)).reshape((N, 1, out_H, out_W)), - (1, in_C, 1, 1)) - wd = np.tile(((x - x0) * (y - y0)).reshape((N, 1, out_H, out_W)), - (1, in_C, 1, 1)) + wa = np.tile( + ((x1 - x) * (y1 - y)).reshape((N, 1, out_H, out_W)), (1, in_C, 1, 1) + ) + wb = np.tile( + ((x1 - x) * (y - y0)).reshape((N, 1, out_H, out_W)), (1, in_C, 1, 1) + ) + wc = np.tile( + ((x - x0) * (y1 - y)).reshape((N, 1, out_H, out_W)), (1, in_C, 1, 1) + ) + wd = np.tile( + ((x - x0) * (y - y0)).reshape((N, 1, out_H, out_W)), (1, in_C, 1, 1) + ) va = getGridPointValue(data, x0, y0) vb = getGridPointValue(data, x0, y1) @@ -199,11 +215,9 @@ def GridSampler(data, return out -def GridSampler3D(data, - grid, - align_corners=True, - mode="bilinear", - padding_mode="zeros"): +def GridSampler3D( + data, grid, align_corners=True, mode="bilinear", padding_mode="zeros" +): dims = data.shape N = dims[0] in_C = dims[1] @@ -235,22 +249,54 @@ def GridSampler3D(data, z0 = np.floor(z).astype('int32') z1 = z0 + 1 - w_tnw = np.tile(((x1 - x) * (y1 - y) * (z1 - z)).reshape( - (N, 1, out_D, out_H, out_W)), (1, in_C, 1, 1, 1)) - w_tne = np.tile(((x - x0) * (y1 - y) * (z1 - z)).reshape( - (N, 1, out_D, out_H, out_W)), (1, in_C, 1, 1, 1)) - w_tsw = np.tile(((x1 - x) * (y - y0) * (z1 - z)).reshape( - (N, 1, out_D, out_H, out_W)), (1, in_C, 1, 1, 1)) - w_tse = np.tile(((x - x0) * (y - y0) * (z1 - z)).reshape( - (N, 1, out_D, out_H, out_W)), (1, in_C, 1, 1, 1)) - w_bnw = np.tile(((x1 - x) * (y1 - y) * (z - z0)).reshape( - (N, 1, out_D, out_H, out_W)), (1, in_C, 1, 1, 1)) - w_bne = np.tile(((x - x0) * (y1 - y) * (z - z0)).reshape( - (N, 1, out_D, out_H, out_W)), (1, in_C, 1, 1, 1)) - w_bsw = np.tile(((x1 - x) * (y - y0) * (z - z0)).reshape( - (N, 1, out_D, out_H, out_W)), (1, in_C, 1, 1, 1)) - w_bse = np.tile(((x - x0) * (y - y0) * (z - z0)).reshape( - (N, 1, out_D, out_H, out_W)), (1, in_C, 1, 1, 1)) + w_tnw = np.tile( + ((x1 - x) * (y1 - y) * (z1 - z)).reshape( + (N, 1, out_D, out_H, out_W) + ), + (1, in_C, 1, 1, 1), + ) + w_tne = np.tile( + ((x - x0) * (y1 - y) * (z1 - z)).reshape( + (N, 1, out_D, out_H, out_W) + ), + (1, in_C, 1, 1, 1), + ) + w_tsw = np.tile( + ((x1 - x) * (y - y0) * (z1 - z)).reshape( + (N, 1, out_D, out_H, out_W) + ), + (1, in_C, 1, 1, 1), + ) + w_tse = np.tile( + ((x - x0) * (y - y0) * (z1 - z)).reshape( + (N, 1, out_D, out_H, out_W) + ), + (1, in_C, 1, 1, 1), + ) + w_bnw = np.tile( + ((x1 - x) * (y1 - y) * (z - z0)).reshape( + (N, 1, out_D, out_H, out_W) + ), + (1, in_C, 1, 1, 1), + ) + w_bne = np.tile( + ((x - x0) * (y1 - y) * (z - z0)).reshape( + (N, 1, out_D, out_H, out_W) + ), + (1, in_C, 1, 1, 1), + ) + w_bsw = np.tile( + ((x1 - x) * (y - y0) * (z - z0)).reshape( + (N, 1, out_D, out_H, out_W) + ), + (1, in_C, 1, 1, 1), + ) + w_bse = np.tile( + ((x - x0) * (y - y0) * (z - z0)).reshape( + (N, 1, out_D, out_H, out_W) + ), + (1, in_C, 1, 1, 1), + ) v_tnw = getGridPointValue3D(data, x0, y0, z0) v_tne = getGridPointValue3D(data, x1, y0, z0) @@ -261,9 +307,16 @@ def GridSampler3D(data, v_bsw = getGridPointValue3D(data, x0, y1, z1) v_bse = getGridPointValue3D(data, x1, y1, z1) - out = (w_tnw * v_tnw + w_tne * v_tne + w_tsw * v_tsw + w_tse * v_tse + - w_bnw * v_bnw + w_bne * v_bne + w_bsw * v_bsw + - w_bse * v_bse).astype('float64') + out = ( + w_tnw * v_tnw + + w_tne * v_tne + + w_tsw * v_tsw + + w_tse * v_tse + + w_bnw * v_bnw + + w_bne * v_bne + + w_bsw * v_bsw + + w_bse * v_bse + ).astype('float64') elif mode == "nearest": x = np.round(x).astype('int32') @@ -274,7 +327,6 @@ def GridSampler3D(data, class TestGridSamplerOp(OpTest): - def setUp(self): self.use_cudnn = False self.numeric_grad_delta = 0.0001 @@ -298,12 +350,12 @@ class TestGridSamplerOp(OpTest): 'use_cudnn': self.use_cudnn, "align_corners": self.align_corners, "padding_mode": self.padding_mode, - "mode": self.mode + "mode": self.mode, } self.outputs = { - 'Output': - GridSampler(x, grid, self.align_corners, self.mode, - self.padding_mode) + 'Output': GridSampler( + x, grid, self.align_corners, self.mode, self.padding_mode + ) } else: for i in range(self.theta_shape[0]): @@ -316,23 +368,25 @@ class TestGridSamplerOp(OpTest): 'use_cudnn': self.use_cudnn, "align_corners": self.align_corners, "padding_mode": self.padding_mode, - "mode": self.mode + "mode": self.mode, } self.outputs = { - 'Output': - GridSampler3D(x, grid, self.align_corners, self.mode, - self.padding_mode) + 'Output': GridSampler3D( + x, grid, self.align_corners, self.mode, self.padding_mode + ) } def test_check_output(self): self.check_output(check_eager=True) def test_check_grad_normal(self): - self.check_grad(['X', 'Grid'], - 'Output', - max_relative_error=0.01, - numeric_grad_delta=self.numeric_grad_delta, - check_eager=True) + self.check_grad( + ['X', 'Grid'], + 'Output', + max_relative_error=0.01, + numeric_grad_delta=self.numeric_grad_delta, + check_eager=True, + ) def initTestCase(self): self.x_shape = (2, 3, 8, 8) @@ -345,7 +399,6 @@ class TestGridSamplerOp(OpTest): class Case1(TestGridSamplerOp): - def initTestCase(self): self.x_shape = (2, 3, 5, 6) self.grid_shape = (2, 8, 9, 2) @@ -356,7 +409,6 @@ class Case1(TestGridSamplerOp): class Case1_(TestGridSamplerOp): - def initTestCase(self): self.x_shape = (2, 3, 5, 6) self.grid_shape = (2, 8, 9, 2) @@ -367,7 +419,6 @@ class Case1_(TestGridSamplerOp): class Case2(TestGridSamplerOp): - def initTestCase(self): self.x_shape = (2, 3, 5, 6) self.grid_shape = (2, 8, 9, 2) @@ -378,7 +429,6 @@ class Case2(TestGridSamplerOp): class Case3(TestGridSamplerOp): - def initTestCase(self): self.x_shape = (2, 3, 5, 6) self.grid_shape = (2, 8, 9, 2) @@ -389,7 +439,6 @@ class Case3(TestGridSamplerOp): class Case4(TestGridSamplerOp): - def initTestCase(self): self.x_shape = (2, 3, 5, 6) self.grid_shape = (2, 8, 9, 2) @@ -400,10 +449,11 @@ class Case4(TestGridSamplerOp): self.numeric_grad_delta = 0.0001 -@skip_check_grad_ci(reason="'check_grad' on large inputs is too slow, " + - "however it is desirable to cover the forward pass") +@skip_check_grad_ci( + reason="'check_grad' on large inputs is too slow, " + + "however it is desirable to cover the forward pass" +) class LargeInputCase(TestGridSamplerOp): - def get_places(self): places = [] if core.is_compiled_with_cuda(): @@ -423,10 +473,11 @@ class LargeInputCase(TestGridSamplerOp): pass -@skip_check_grad_ci(reason="'check_grad' on large inputs is too slow, " + - "however it is desirable to cover the forward pass") +@skip_check_grad_ci( + reason="'check_grad' on large inputs is too slow, " + + "however it is desirable to cover the forward pass" +) class Case5(LargeInputCase): - def initTestCase(self): self.no_need_check_grad = True self.x_shape = (2, 3, 128, 128) @@ -439,7 +490,6 @@ class Case5(LargeInputCase): class Case6(TestGridSamplerOp): - def initTestCase(self): self.x_shape = (2, 3, 5, 6, 7) self.grid_shape = (2, 8, 9, 10, 3) @@ -451,7 +501,6 @@ class Case6(TestGridSamplerOp): class Case6_(TestGridSamplerOp): - def initTestCase(self): self.x_shape = (2, 3, 4, 5, 6) self.grid_shape = (2, 7, 8, 9, 3) @@ -463,7 +512,6 @@ class Case6_(TestGridSamplerOp): class Case7(TestGridSamplerOp): - def initTestCase(self): self.x_shape = (2, 3, 4, 5, 6) self.grid_shape = (2, 7, 8, 9, 3) @@ -475,7 +523,6 @@ class Case7(TestGridSamplerOp): class Case8(TestGridSamplerOp): - def initTestCase(self): self.x_shape = (2, 3, 4, 5, 6) self.grid_shape = (2, 7, 8, 9, 3) @@ -487,7 +534,6 @@ class Case8(TestGridSamplerOp): class Case9(TestGridSamplerOp): - def initTestCase(self): self.x_shape = (2, 3, 4, 5, 6) self.grid_shape = (2, 7, 8, 9, 3) @@ -498,10 +544,11 @@ class Case9(TestGridSamplerOp): self.numeric_grad_delta = 0.000001 -@skip_check_grad_ci(reason="'check_grad' on large inputs is too slow, " + - "however it is desirable to cover the forward pass") +@skip_check_grad_ci( + reason="'check_grad' on large inputs is too slow, " + + "however it is desirable to cover the forward pass" +) class LargeInput3DCase(TestGridSamplerOp): - def get_places(self): places = [] if core.is_compiled_with_cuda(): @@ -523,10 +570,11 @@ class LargeInput3DCase(TestGridSamplerOp): pass -@skip_check_grad_ci(reason="'check_grad' on large inputs is too slow, " + - "however it is desirable to cover the forward pass") +@skip_check_grad_ci( + reason="'check_grad' on large inputs is too slow, " + + "however it is desirable to cover the forward pass" +) class Case10(LargeInput3DCase): - def initTestCase(self): self.no_need_check_grad = True self.x_shape = (2, 3, 24, 24, 12) diff --git a/python/paddle/fluid/tests/unittests/test_group_norm_op.py b/python/paddle/fluid/tests/unittests/test_group_norm_op.py index 3ae2bf0a391f77ac7aa786d72996e9fa6d080a72..480efd5686e7cb6cf8c8dd6aeb6de100c4f73191 100644 --- a/python/paddle/fluid/tests/unittests/test_group_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_group_norm_op.py @@ -32,14 +32,14 @@ def group_norm_naive(x, scale, bias, epsilon, groups, data_layout): var = np.var(x, axis=1, keepdims=True) output = (x - mean) / np.sqrt(var + epsilon) output = output.reshape((N, C, H, W)) * scale.reshape( - (-1, 1, 1)) + bias.reshape((-1, 1, 1)) + (-1, 1, 1) + ) + bias.reshape((-1, 1, 1)) if data_layout == "NHWC": output = np.transpose(output, (0, 2, 3, 1)) # NCHW => NHWC return output, mean.reshape((N, G)), var.reshape((N, G)) class TestGroupNormOpError(unittest.TestCase): - def test_errors(self): with fluid.program_guard(fluid.Program(), fluid.Program()): @@ -51,9 +51,9 @@ class TestGroupNormOpError(unittest.TestCase): self.assertRaises(TypeError, test_x_type) def test_x_dtype(): - x2 = fluid.layers.data(name='x2', - shape=[2, 100, 3, 5], - dtype='int32') + x2 = fluid.layers.data( + name='x2', shape=[2, 100, 3, 5], dtype='int32' + ) groups = 2 fluid.layers.group_norm(x2, groups) @@ -61,7 +61,6 @@ class TestGroupNormOpError(unittest.TestCase): class TestGroupNormOp(OpTest): - def setUp(self): self.op_type = "group_norm" self.data_format = "NCHW" @@ -76,15 +75,19 @@ class TestGroupNormOp(OpTest): input = np.transpose(input, (0, 2, 3, 1)) scale = np.random.random([self.shape[1]]).astype(self.dtype) bias = np.random.random([self.shape[1]]).astype(self.dtype) - output, mean, var = group_norm_naive(input, scale, bias, - self.attrs['epsilon'], - self.attrs['groups'], - self.data_format) + output, mean, var = group_norm_naive( + input, + scale, + bias, + self.attrs['epsilon'], + self.attrs['groups'], + self.data_format, + ) self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(input), 'Scale': OpTest.np_dtype_to_fluid_dtype(scale), - 'Bias': OpTest.np_dtype_to_fluid_dtype(bias) + 'Bias': OpTest.np_dtype_to_fluid_dtype(bias), } self.outputs = {'Y': output, 'Mean': mean, 'Variance': var} self.attrs['data_layout'] = self.data_format @@ -106,28 +109,37 @@ class TestGroupNormOp(OpTest): # Set to inplace_atol to 0, which means the absolute error is 0, and the # relative error is 1e-05 in numpy.allclose by default. # Reference: https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html - self.check_output_with_place(place, - atol=atol, - inplace_atol=inplace_atol) + self.check_output_with_place( + place, atol=atol, inplace_atol=inplace_atol + ) def do_compare_between_place(self): - if not core.is_compiled_with_cuda(): return + if not core.is_compiled_with_cuda(): + return place = core.CPUPlace() place2 = core.CUDAPlace(0) self.scope = core.Scope() op_inputs = self.inputs if hasattr(self, "inputs") else dict() op_outputs = self.outputs if hasattr(self, "outputs") else dict() op_attrs = self.attrs if hasattr(self, "attrs") else dict() - self.op = create_op(self.scope, self.op_type, op_inputs, op_outputs, - op_attrs) + self.op = create_op( + self.scope, self.op_type, op_inputs, op_outputs, op_attrs + ) inputs_to_check = set(['X', 'Scale', 'Bias']) output_names = 'Y' - cpu_grads = self._get_gradient(inputs_to_check, place, output_names, - None) - gpu_grads = self._get_gradient(inputs_to_check, place2, output_names, - None) - self._assert_is_close(cpu_grads, gpu_grads, inputs_to_check, 0.005, - "Gradient Check On %s" % str(place)) + cpu_grads = self._get_gradient( + inputs_to_check, place, output_names, None + ) + gpu_grads = self._get_gradient( + inputs_to_check, place2, output_names, None + ) + self._assert_is_close( + cpu_grads, + gpu_grads, + inputs_to_check, + 0.005, + "Gradient Check On %s" % str(place), + ) def test_check_grad(self): if self.compare_between_place: @@ -149,44 +161,37 @@ class TestGroupNormOp(OpTest): class TestGroupNormOp1(TestGroupNormOp): - def init_test_case(self): self.attrs['groups'] = 1 class TestGroupNormOp2(TestGroupNormOp): - def init_test_case(self): self.attrs['groups'] = 4 class TestGroupNormOpBigEps1(TestGroupNormOp): - def init_test_case(self): self.attrs['groups'] = 1 self.attrs['epsilon'] = 0.5 class TestGroupNormOpBigEps2(TestGroupNormOp): - def init_test_case(self): self.attrs['groups'] = 4 self.attrs['epsilon'] = 0.5 class TestGroupNormOpBigEps3(TestGroupNormOp): - def init_test_case(self): self.attrs['epsilon'] = 0.5 @skip_check_grad_ci( - reason= - '''This test case is used to ensure whether the gradient checking results between CPU and GPU + reason='''This test case is used to ensure whether the gradient checking results between CPU and GPU are consistent when using the same inputs, thus, it doesn't need to call check_grad.''' ) class TestGroupNormOpLargeData(TestGroupNormOp): - def init_test_case(self): self.shape = (2, 32, 64, 64) self.attrs['groups'] = 8 @@ -194,21 +199,18 @@ class TestGroupNormOpLargeData(TestGroupNormOp): class TestGroupNormOp1_With_NHWC(TestGroupNormOp): - def init_test_case(self): self.attrs['groups'] = 1 self.data_format = "NHWC" class TestGroupNormOp2_With_NHWC(TestGroupNormOp): - def init_test_case(self): self.attrs['groups'] = 4 self.data_format = "NHWC" class TestGroupNormOpBigEps1_With_NHWC(TestGroupNormOp): - def init_test_case(self): self.attrs['groups'] = 1 self.attrs['epsilon'] = 0.5 @@ -216,7 +218,6 @@ class TestGroupNormOpBigEps1_With_NHWC(TestGroupNormOp): class TestGroupNormOpBigEps2_With_NHWC(TestGroupNormOp): - def init_test_case(self): self.attrs['groups'] = 4 self.attrs['epsilon'] = 0.5 @@ -224,19 +225,16 @@ class TestGroupNormOpBigEps2_With_NHWC(TestGroupNormOp): class TestGroupNormOpBigEps3_With_NHWC(TestGroupNormOp): - def init_test_case(self): self.attrs['epsilon'] = 0.5 self.data_format = "NHWC" @skip_check_grad_ci( - reason= - '''This test case is used to ensure whether the gradient checking results between CPU and GPU + reason='''This test case is used to ensure whether the gradient checking results between CPU and GPU are consistent when using the same inputs, thus, it doesn't need to call check_grad.''' ) class TestGroupNormOpLargeData_With_NHWC(TestGroupNormOp): - def init_test_case(self): self.shape = (2, 64, 32, 32) # NCHW self.attrs['groups'] = 8 @@ -245,16 +243,15 @@ class TestGroupNormOpLargeData_With_NHWC(TestGroupNormOp): class TestGroupNormAPI_With_NHWC(unittest.TestCase): - def test_case1(self): data1 = fluid.data(name='data1', shape=[None, 3, 3, 4], dtype='float64') - out1 = fluid.layers.group_norm(input=data1, - groups=2, - data_layout="NHWC") + out1 = fluid.layers.group_norm( + input=data1, groups=2, data_layout="NHWC" + ) data2 = fluid.data(name='data2', shape=[None, 4, 3, 3], dtype='float64') - out2 = fluid.layers.group_norm(input=data2, - groups=2, - data_layout="NCHW") + out2 = fluid.layers.group_norm( + input=data2, groups=2, data_layout="NCHW" + ) data1_np = np.random.random((2, 3, 3, 4)).astype("float64") data2_np = np.random.random((2, 4, 3, 3)).astype("float64") @@ -263,25 +260,18 @@ class TestGroupNormAPI_With_NHWC(unittest.TestCase): place = core.CPUPlace() exe = fluid.Executor(place) - results = exe.run(fluid.default_main_program(), - feed={ - "data1": data1_np, - "data2": data2_np - }, - fetch_list=[out1, out2], - return_numpy=True) - expect_res1 = group_norm_naive(data1_np, - scale, - bias, - epsilon=1e-5, - groups=2, - data_layout="NHWC") - expect_res2 = group_norm_naive(data2_np, - scale, - bias, - epsilon=1e-5, - groups=2, - data_layout="NCHW") + results = exe.run( + fluid.default_main_program(), + feed={"data1": data1_np, "data2": data2_np}, + fetch_list=[out1, out2], + return_numpy=True, + ) + expect_res1 = group_norm_naive( + data1_np, scale, bias, epsilon=1e-5, groups=2, data_layout="NHWC" + ) + expect_res2 = group_norm_naive( + data2_np, scale, bias, epsilon=1e-5, groups=2, data_layout="NCHW" + ) np.testing.assert_allclose(results[0], expect_res1[0], rtol=1e-05) np.testing.assert_allclose(results[1], expect_res2[0], rtol=1e-05) @@ -292,15 +282,14 @@ class TestGroupNormException(unittest.TestCase): data = fluid.data(name='data', shape=[None, 3, 3, 4], dtype="float64") def attr_data_format(): - out = fluid.layers.group_norm(input=data, - groups=2, - data_layout="NDHW") + out = fluid.layers.group_norm( + input=data, groups=2, data_layout="NDHW" + ) self.assertRaises(ValueError, attr_data_format) class TestGroupNormEager(unittest.TestCase): - def test_dygraph_api(self): self.dtype = np.float64 self.shape = (8, 32, 32) @@ -315,13 +304,17 @@ class TestGroupNormEager(unittest.TestCase): with _test_eager_guard(): tensor_eager_1 = fluid.dygraph.to_variable(input) tensor_eager_1.stop_gradient = False - groupNorm_eager = fluid.dygraph.nn.GroupNorm(channels=32, - groups=4) + groupNorm_eager = fluid.dygraph.nn.GroupNorm( + channels=32, groups=4 + ) ret2 = groupNorm_eager(tensor_eager_1) ret2.backward() - self.assertEqual(( - tensor_1.grad.numpy() == tensor_eager_1.grad.numpy()).all(), - True) + self.assertEqual( + ( + tensor_1.grad.numpy() == tensor_eager_1.grad.numpy() + ).all(), + True, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_group_norm_op_v2.py b/python/paddle/fluid/tests/unittests/test_group_norm_op_v2.py index 52889f757df72fba9543a7528d1283feb8d38e07..63b69e6107b7828d80c01086fab3e828c8d02ec2 100644 --- a/python/paddle/fluid/tests/unittests/test_group_norm_op_v2.py +++ b/python/paddle/fluid/tests/unittests/test_group_norm_op_v2.py @@ -33,18 +33,23 @@ def group_norm_naive_for_general_dimension(x, scale, bias, epsilon, groups): var = np.var(x, axis=1, keepdims=True) output = (x - mean) / np.sqrt(var + epsilon) output = output.reshape(input_shape) * scale.reshape( - (-1, 1, 1)) + bias.reshape((-1, 1, 1)) + (-1, 1, 1) + ) + bias.reshape((-1, 1, 1)) return output class TestDygraphGroupNormv2(unittest.TestCase): - def test_dygraph(self): places = [fluid.CPUPlace()] if core.is_compiled_with_cuda() and core.op_support_gpu("group_norm"): places.append(fluid.CUDAPlace(0)) - shapes = [[2, 2, 2, 2], [2, 2, 4], [4, 2], [4, 2, 6, 6, 2], - [2, 2, 2, 2, 2, 2]] + shapes = [ + [2, 2, 2, 2], + [2, 2, 4], + [4, 2], + [4, 2, 6, 6, 2], + [2, 2, 2, 2, 2, 2], + ] for p in places: def compute_v1(x): @@ -61,18 +66,20 @@ class TestDygraphGroupNormv2(unittest.TestCase): def test_weight_bias_false(): with fluid.dygraph.guard(p): - gn = paddle.nn.GroupNorm(num_channels=2, - num_groups=2, - weight_attr=False, - bias_attr=False) + gn = paddle.nn.GroupNorm( + num_channels=2, + num_groups=2, + weight_attr=False, + bias_attr=False, + ) def test_nn_exception(): with fluid.dygraph.guard(p): def attr_data_format(): - out = paddle.nn.GroupNorm(num_groups=2, - num_channels=2, - data_format="NHWC") + out = paddle.nn.GroupNorm( + num_groups=2, num_channels=2, data_format="NHWC" + ) self.assertRaises(ValueError, attr_data_format) @@ -92,8 +99,13 @@ class TestDygraphGroupNormv2(unittest.TestCase): places = [fluid.CPUPlace()] if core.is_compiled_with_cuda() and core.op_support_gpu("group_norm"): places.append(fluid.CUDAPlace(0)) - shapes = [[2, 6, 2, 2], [2, 6, 4], [4, 6], [4, 6, 6, 6, 2], - [4, 6, 2, 2, 2, 2]] + shapes = [ + [2, 6, 2, 2], + [2, 6, 4], + [4, 6], + [4, 6, 6, 6, 2], + [4, 6, 2, 2, 2, 2], + ] for p in places: exe = fluid.Executor(p) @@ -127,11 +139,15 @@ class TestDygraphGroupNormv2(unittest.TestCase): class TestGroupNormAPIV2_With_General_Dimensions(unittest.TestCase): - def test_numerical_accuracy(self): paddle.disable_static() - shapes = [(2, 6), (2, 6, 4), (2, 6, 4, 4), (2, 6, 6, 6, 2), - (2, 6, 6, 6, 2, 3)] + shapes = [ + (2, 6), + (2, 6, 4), + (2, 6, 4, 4), + (2, 6, 6, 6, 2), + (2, 6, 6, 6, 2, 3), + ] np.random.seed(10) places = [fluid.CPUPlace()] if core.is_compiled_with_cuda() and core.op_support_gpu("group_norm"): @@ -143,9 +159,11 @@ class TestGroupNormAPIV2_With_General_Dimensions(unittest.TestCase): bias = np.array([0]).astype("float32") data = np.random.random(shape).astype("float32") expect_res1 = group_norm_naive_for_general_dimension( - data, scale, bias, epsilon=1e-5, groups=6) + data, scale, bias, epsilon=1e-5, groups=6 + ) expect_res2 = group_norm_naive_for_general_dimension( - data, scale, bias, epsilon=1e-5, groups=2) + data, scale, bias, epsilon=1e-5, groups=2 + ) gn1 = paddle.nn.GroupNorm(num_channels=6, num_groups=6) gn2 = paddle.nn.GroupNorm(num_channels=6, num_groups=2) @@ -161,9 +179,7 @@ class TestGroupNormAPIV2_With_General_Dimensions(unittest.TestCase): class TestGroupNormDimException(unittest.TestCase): - def test_exception(self): - def test_empty_input_static_API(): x = paddle.to_tensor([], dtype='float32') paddle.static.nn.group_norm(x, 3) @@ -171,7 +187,7 @@ class TestGroupNormDimException(unittest.TestCase): self.assertRaises(ValueError, test_empty_input_static_API) def test_one_dim_input_static_API(): - x = paddle.randn((3, ), dtype='float32') + x = paddle.randn((3,), dtype='float32') paddle.static.nn.group_norm(x, 3) self.assertRaises(ValueError, test_one_dim_input_static_API) diff --git a/python/paddle/fluid/tests/unittests/test_gru_op.py b/python/paddle/fluid/tests/unittests/test_gru_op.py index 4e51c1da0e8b2f7b0016a870c8ed117930f19ad6..d2753dfd205f81bbd1e83d31a0b5e6db87d9643f 100644 --- a/python/paddle/fluid/tests/unittests/test_gru_op.py +++ b/python/paddle/fluid/tests/unittests/test_gru_op.py @@ -22,17 +22,17 @@ from paddle.fluid import Program, program_guard def gru( - input, # T x 3D - lod, # 1 x N - h0, # N x D - weight, # D x 3D - bias, # 1 x 3D - is_reverse, - act_state, - act_gate, - dtype='float32', - origin_mode=False): - + input, # T x 3D + lod, # 1 x N + h0, # N x D + weight, # D x 3D + bias, # 1 x 3D + is_reverse, + act_state, + act_gate, + dtype='float32', + origin_mode=False, +): def _seq_to_batch(lod, is_reverse): idx_in_seq_list = [] seq_lens = lod[0] @@ -41,16 +41,19 @@ def gru( seq_starts.append(seq_starts[-1] + seq_lens[i]) sorted_seqs = sorted( list(range(len(seq_lens))), - key=functools.cmp_to_key(lambda x, y: seq_lens[y] - seq_lens[x])) + key=functools.cmp_to_key(lambda x, y: seq_lens[y] - seq_lens[x]), + ) num_batch = seq_lens[sorted_seqs[0]] for batch_idx in range(num_batch): idx_in_seq = [] for i in range(len(seq_lens)): if seq_lens[sorted_seqs[i]] <= batch_idx: break - idx = (seq_starts[sorted_seqs[i] + 1] - 1 - - batch_idx) if is_reverse else ( - seq_starts[sorted_seqs[i]] + batch_idx) + idx = ( + (seq_starts[sorted_seqs[i] + 1] - 1 - batch_idx) + if is_reverse + else (seq_starts[sorted_seqs[i]] + batch_idx) + ) idx_in_seq.append(idx) idx_in_seq_list.append(idx_in_seq) return idx_in_seq_list, sorted_seqs @@ -59,13 +62,13 @@ def gru( T = x.shape[0] D = w.shape[0] g = x + np.tile(b, (T, 1)) - w_u_r = w.flatten()[:D * D * 2].reshape((D, D * 2)) - u_r = act_gate(np.dot(h_p, w_u_r) + g[:, :D * 2]) + w_u_r = w.flatten()[: D * D * 2].reshape((D, D * 2)) + u_r = act_gate(np.dot(h_p, w_u_r) + g[:, : D * 2]) u = u_r[:, :D] - r = u_r[:, D:D * 2] + r = u_r[:, D : D * 2] r_h_p = r * h_p - w_c = w.flatten()[D * D * 2:].reshape((D, D)) - c = act_state(np.dot(r_h_p, w_c) + g[:, D * 2:]) + w_c = w.flatten()[D * D * 2 :].reshape((D, D)) + c = act_state(np.dot(r_h_p, w_c) + g[:, D * 2 :]) g = np.hstack((u_r, c)) if origin_mode: h = (1 - u) * c + u * h_p @@ -90,7 +93,7 @@ def gru( x = input[idx_in_seq_list[batch_idx]] g, r_h_p, h = _step(x, h_p, weight, bias, act_state, act_gate) if batch_idx < (max_seq_len - 1): - h_p = h[:len(idx_in_seq_list[batch_idx + 1])] + h_p = h[: len(idx_in_seq_list[batch_idx + 1])] start_idx = end_idx end_idx = start_idx + len(idx_in_seq_list[batch_idx]) batch_gate[start_idx:end_idx] = g @@ -101,7 +104,6 @@ def gru( class TestGRUOp(OpTest): - def set_confs(self): pass @@ -126,17 +128,29 @@ class TestGRUOp(OpTest): N = len(self.lod[0]) input = np.random.rand(T, 3 * self.D).astype(self.dtype) weight = np.random.rand(self.D, 3 * self.D).astype(self.dtype) - bias = np.random.rand(1, 3 * self.D).astype( - self.dtype) if self.with_bias else np.zeros( - (1, 3 * self.D), dtype=self.dtype) - h0 = np.random.rand(N, self.D).astype( - self.dtype) if self.with_h0 else np.zeros( - (N, self.D), dtype=self.dtype) + bias = ( + np.random.rand(1, 3 * self.D).astype(self.dtype) + if self.with_bias + else np.zeros((1, 3 * self.D), dtype=self.dtype) + ) + h0 = ( + np.random.rand(N, self.D).astype(self.dtype) + if self.with_h0 + else np.zeros((N, self.D), dtype=self.dtype) + ) batch_gate, batch_reset_hidden_prev, batch_hidden, hidden = gru( - input, self.lod, h0, weight, bias, self.is_reverse, - ACTIVATION[self.act_state], ACTIVATION[self.act_gate], self.dtype, - self.origin_mode) + input, + self.lod, + h0, + weight, + bias, + self.is_reverse, + ACTIVATION[self.act_state], + ACTIVATION[self.act_gate], + self.dtype, + self.origin_mode, + ) self.inputs = {'Input': (input, self.lod), 'Weight': weight} if self.with_bias: @@ -157,45 +171,41 @@ class TestGRUOp(OpTest): 'gate_activation': self.act_gate, 'is_reverse': self.is_reverse, 'origin_mode': self.origin_mode, - 'is_test': self.is_test + 'is_test': self.is_test, } def test_check_output(self): self.check_output(atol=1e-8, check_dygraph=False) def test_check_grad(self): - self.check_grad(['Input', 'H0', 'Weight', 'Bias'], ['Hidden'], - check_dygraph=False) + self.check_grad( + ['Input', 'H0', 'Weight', 'Bias'], ['Hidden'], check_dygraph=False + ) class TestGRUOriginMode(TestGRUOp): - def set_confs(self): self.origin_mode = True class TestGRUOp2(TestGRUOp): - def set_confs(self): self.dtype = 'float64' class TestGRUOp2Len0(TestGRUOp): - def set_confs(self): self.lod = [[2, 0, 4]] self.dtype = 'float64' class TestGRUOp2OriginMode(TestGRUOp): - def set_confs(self): self.dtype = 'float64' self.origin_mode = True class TestGRUOp2OriginModeLen0(TestGRUOp): - def set_confs(self): self.lod = [[0, 3, 4]] self.dtype = 'float64' @@ -203,7 +213,6 @@ class TestGRUOp2OriginModeLen0(TestGRUOp): class TestGRUOp2OriginModeLastLen0(TestGRUOp): - def set_confs(self): self.lod = [[0, 3, 0]] self.dtype = 'float64' @@ -211,40 +220,37 @@ class TestGRUOp2OriginModeLastLen0(TestGRUOp): class TestGRUOpNoInitial(TestGRUOp): - def set_confs(self): self.with_h0 = False def test_check_grad(self): - self.check_grad(['Input', 'Weight', 'Bias'], ['Hidden'], - check_dygraph=False) + self.check_grad( + ['Input', 'Weight', 'Bias'], ['Hidden'], check_dygraph=False + ) class TestGRUOpNoBias(TestGRUOp): - def set_confs(self): self.with_bias = False def test_check_grad(self): - self.check_grad(['Input', 'H0', 'Weight'], ['Hidden'], - check_dygraph=False) + self.check_grad( + ['Input', 'H0', 'Weight'], ['Hidden'], check_dygraph=False + ) class TestGRUOpReverse(TestGRUOp): - def set_confs(self): self.is_reverse = True class TestGRUOpReverseOriginMode(TestGRUOp): - def set_confs(self): self.is_reverse = True self.origin_mode = True class TestGRUOpInference(TestGRUOp): - def set_is_test(self): self.is_test = True @@ -260,7 +266,6 @@ class TestGRUOpInference(TestGRUOp): class TestGruOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): @@ -271,9 +276,9 @@ class TestGruOpError(unittest.TestCase): self.assertRaises(TypeError, test_Variable) def test_h_0(): - in_data = fluid.data(name="input", - shape=[None, 1536], - dtype="float32") + in_data = fluid.data( + name="input", shape=[None, 1536], dtype="float32" + ) h = fluid.data(name="h", shape=[None, 512], dtype="int32") fluid.layers.dynamic_gru(input=in_data, size=512, h_0=h) diff --git a/python/paddle/fluid/tests/unittests/test_gru_rnn_op.py b/python/paddle/fluid/tests/unittests/test_gru_rnn_op.py index 0a10eb74f5511a6ca13ce506adc47f68b58de83a..89379ac87f3493ee75305ce504d04ec4735b95cb 100644 --- a/python/paddle/fluid/tests/unittests/test_gru_rnn_op.py +++ b/python/paddle/fluid/tests/unittests/test_gru_rnn_op.py @@ -31,7 +31,6 @@ paddle.enable_static() class TestGRUOp(OpTest): - def get_weight_names(self): weight_names = [] for i in range(self.num_layers): @@ -45,13 +44,16 @@ class TestGRUOp(OpTest): def setUp(self): self.op_type = "rnn" self.dtype = "float32" if core.is_compiled_with_rocm() else "float64" - self.sequence_length = None if core.is_compiled_with_rocm( - ) else np.array([12, 11, 10, 9, 8, 7, 6, 5], dtype=np.int32) + self.sequence_length = ( + None + if core.is_compiled_with_rocm() + else np.array([12, 11, 10, 9, 8, 7, 6, 5], dtype=np.int32) + ) self.num_layers = 1 self.is_bidirec = False self.is_test = False self.mode = "GRU" - self.dropout = 0. + self.dropout = 0.0 seq_length = 12 batch_size = 8 input_size = 4 @@ -61,10 +63,9 @@ class TestGRUOp(OpTest): self.direction_num = 2 if self.is_bidirec else 1 direction = "bidirectional" if self.is_bidirec else "forward" - input = np.random.uniform(low=-0.1, - high=0.1, - size=(seq_length, batch_size, - input_size)).astype(self.dtype) + input = np.random.uniform( + low=-0.1, high=0.1, size=(seq_length, batch_size, input_size) + ).astype(self.dtype) if self.sequence_length is not None: input[3][1:][:] = 0 @@ -72,13 +73,15 @@ class TestGRUOp(OpTest): input[2][3:][:] = 0 input[1][4:][:] = 0 - rnn1 = GRU(input_size, - self.hidden_size, - num_layers=self.num_layers, - time_major=True, - direction=direction, - dropout=self.dropout, - dtype=self.dtype) + rnn1 = GRU( + input_size, + self.hidden_size, + num_layers=self.num_layers, + time_major=True, + direction=direction, + dropout=self.dropout, + dtype=self.dtype, + ) flat_w = get_params_for_net(rnn1) @@ -92,8 +95,9 @@ class TestGRUOp(OpTest): self._get_places = rocm_rnn_get_place - init_h = np.zeros((self.num_layers * self.direction_num, batch_size, - self.hidden_size)).astype(self.dtype) + init_h = np.zeros( + (self.num_layers * self.direction_num, batch_size, self.hidden_size) + ).astype(self.dtype) state_out = np.ndarray((300)).astype("uint8") @@ -101,7 +105,7 @@ class TestGRUOp(OpTest): 'Input': input, 'WeightList': flat_w, 'PreState': [('init_h', init_h)], - 'SequenceLength': self.sequence_length + 'SequenceLength': self.sequence_length, } if self.sequence_length is None: self.inputs = { @@ -116,13 +120,13 @@ class TestGRUOp(OpTest): 'hidden_size': self.hidden_size, 'num_layers': self.num_layers, 'is_test': self.is_test, - 'mode': self.mode + 'mode': self.mode, } self.outputs = { 'Out': output, 'State': [('last_hidden', last_hidden)], 'Reserve': np.ndarray((400)).astype("uint8"), - 'DropoutState': state_out + 'DropoutState': state_out, } def set_attrs(self): @@ -140,27 +144,23 @@ class TestGRUOp(OpTest): class TestGRUOp1(TestGRUOp): - def set_attrs(self): self.sequence_length = None class TestGRUOp2(TestGRUOp): - def set_attrs(self): self.sequence_length = None self.is_bidirec = True class TestGRUOp3(TestGRUOp): - def set_attrs(self): self.sequence_length = None self.is_test = True class TestGRUOp4(TestGRUOp): - def set_attrs(self): self.sequence_length = None self.is_bidirec = True @@ -168,7 +168,6 @@ class TestGRUOp4(TestGRUOp): class TestGRUOpAvx(TestGRUOp): - def set_attrs(self): self.dtype = "float32" self.hidden_size = 8 diff --git a/python/paddle/fluid/tests/unittests/test_gru_unit_op.py b/python/paddle/fluid/tests/unittests/test_gru_unit_op.py index 96c277416107a4e0e50337421f3cda09135865ba..7111e4f8b9cd3b737c5efb5b686277d92a076ad1 100644 --- a/python/paddle/fluid/tests/unittests/test_gru_unit_op.py +++ b/python/paddle/fluid/tests/unittests/test_gru_unit_op.py @@ -23,14 +23,14 @@ from paddle.fluid.framework import program_guard, Program class TestGRUUnitAPIError(unittest.TestCase): - def test_errors(self): with fluid.program_guard(fluid.Program(), fluid.Program()): D = 5 layer = fluid.dygraph.nn.GRUUnit(size=D * 3) # the input must be Variable. - x0 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.CPUPlace()) + x0 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + ) self.assertRaises(TypeError, layer, x0) # the input dtype must be float32 or float64 x = fluid.data(name='x', shape=[-1, D * 3], dtype='float16') @@ -50,11 +50,11 @@ def identity(x): def sigmoid(x): - return 1. / (1. + np.exp(-x)) + return 1.0 / (1.0 + np.exp(-x)) def tanh(x): - return 2. * sigmoid(2. * x) - 1. + return 2.0 * sigmoid(2.0 * x) - 1.0 def relu(x): @@ -62,21 +62,22 @@ def relu(x): class TestGRUUnitOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): batch_size = 5 hidden_dim = 40 - input = fluid.data(name='input', - shape=[None, hidden_dim * 3], - dtype='float32') - pre_hidden = fluid.data(name='pre_hidden', - shape=[None, hidden_dim], - dtype='float32') + input = fluid.data( + name='input', shape=[None, hidden_dim * 3], dtype='float32' + ) + pre_hidden = fluid.data( + name='pre_hidden', shape=[None, hidden_dim], dtype='float32' + ) np_input = np.random.uniform( - -0.1, 0.1, (batch_size, hidden_dim * 3)).astype('float64') + -0.1, 0.1, (batch_size, hidden_dim * 3) + ).astype('float64') np_pre_hidden = np.random.uniform( - -0.1, 0.1, (batch_size, hidden_dim)).astype('float64') + -0.1, 0.1, (batch_size, hidden_dim) + ).astype('float64') def test_input_Variable(): gru_unit(np_input, pre_hidden, hidden_dim * 3) @@ -89,17 +90,21 @@ class TestGRUUnitOpError(unittest.TestCase): self.assertRaises(TypeError, test_pre_hidden_Variable) def test_input_type(): - error_input = fluid.data(name='error_input', - shape=[None, hidden_dim * 3], - dtype='int32') + error_input = fluid.data( + name='error_input', + shape=[None, hidden_dim * 3], + dtype='int32', + ) gru_unit(error_input, pre_hidden, hidden_dim * 3) self.assertRaises(TypeError, test_input_type) def test_pre_hidden_type(): - error_pre_hidden = fluid.data(name='error_pre_hidden', - shape=[None, hidden_dim], - dtype='int32') + error_pre_hidden = fluid.data( + name='error_pre_hidden', + shape=[None, hidden_dim], + dtype='int32', + ) gru_unit(input, error_pre_hidden, hidden_dim * 3) self.assertRaises(TypeError, test_pre_hidden_type) @@ -120,21 +125,22 @@ class TestGRUUnitOp(OpTest): frame_size = self.frame_size self.op_type = 'gru_unit' self.inputs = { - 'Input': - np.random.uniform(-0.1, 0.1, - (batch_size, frame_size * 3)).astype(self.dtype), - 'HiddenPrev': - np.random.uniform(-0.1, 0.1, - (batch_size, frame_size)).astype(self.dtype), - 'Weight': - np.random.uniform(-1. / math.sqrt(frame_size), - 1. / math.sqrt(frame_size), - (frame_size, frame_size * 3)).astype(self.dtype), + 'Input': np.random.uniform( + -0.1, 0.1, (batch_size, frame_size * 3) + ).astype(self.dtype), + 'HiddenPrev': np.random.uniform( + -0.1, 0.1, (batch_size, frame_size) + ).astype(self.dtype), + 'Weight': np.random.uniform( + -1.0 / math.sqrt(frame_size), + 1.0 / math.sqrt(frame_size), + (frame_size, frame_size * 3), + ).astype(self.dtype), } self.attrs = { 'activation': GRUActivationType.tanh, 'gate_activation': GRUActivationType.sigmoid, - 'origin_mode': origin_mode + 'origin_mode': origin_mode, } def set_outputs(self, origin_mode=False): @@ -144,21 +150,27 @@ class TestGRUUnitOp(OpTest): x = self.inputs['Input'] h_p = self.inputs['HiddenPrev'] w = self.inputs['Weight'] - b = self.inputs['Bias'] if 'Bias' in self.inputs else np.zeros( - (1, frame_size * 3)) + b = ( + self.inputs['Bias'] + if 'Bias' in self.inputs + else np.zeros((1, frame_size * 3)) + ) g = x + np.tile(b, (batch_size, 1)) - w_u_r = w.flatten()[:frame_size * frame_size * 2].reshape( - (frame_size, frame_size * 2)) - u_r = self.activate[self.attrs['gate_activation']](np.dot(h_p, w_u_r) + - g[:, :frame_size * - 2]) + w_u_r = w.flatten()[: frame_size * frame_size * 2].reshape( + (frame_size, frame_size * 2) + ) + u_r = self.activate[self.attrs['gate_activation']]( + np.dot(h_p, w_u_r) + g[:, : frame_size * 2] + ) u = u_r[:, :frame_size] - r = u_r[:, frame_size:frame_size * 2] + r = u_r[:, frame_size : frame_size * 2] r_h_p = r * h_p - w_c = w.flatten()[frame_size * frame_size * 2:].reshape( - (frame_size, frame_size)) - c = self.activate[self.attrs['activation']](np.dot(r_h_p, w_c) + - g[:, frame_size * 2:]) + w_c = w.flatten()[frame_size * frame_size * 2 :].reshape( + (frame_size, frame_size) + ) + c = self.activate[self.attrs['activation']]( + np.dot(r_h_p, w_c) + g[:, frame_size * 2 :] + ) g = np.hstack((u_r, c)) if origin_mode: h = (1 - u) * c + u * h_p @@ -167,12 +179,13 @@ class TestGRUUnitOp(OpTest): self.outputs = { 'Gate': g.astype(self.dtype), 'ResetHiddenPrev': r_h_p.astype(self.dtype), - 'Hidden': h.astype(self.dtype) + 'Hidden': h.astype(self.dtype), } def setUp(self): - self.dtype = 'float32' if fluid.core.is_compiled_with_rocm( - ) else 'float64' + self.dtype = ( + 'float32' if fluid.core.is_compiled_with_rocm() else 'float64' + ) self.set_inputs() self.set_outputs() @@ -184,41 +197,44 @@ class TestGRUUnitOp(OpTest): class TestGRUUnitOpOriginMode(TestGRUUnitOp): - def setUp(self): - self.dtype = 'float32' if fluid.core.is_compiled_with_rocm( - ) else 'float64' + self.dtype = ( + 'float32' if fluid.core.is_compiled_with_rocm() else 'float64' + ) self.set_inputs(origin_mode=True) self.set_outputs(origin_mode=True) class TestGRUUnitOpWithBias(TestGRUUnitOp): - def set_inputs(self, origin_mode=False): batch_size = self.batch_size frame_size = self.frame_size super(TestGRUUnitOpWithBias, self).set_inputs() self.inputs['Bias'] = np.random.uniform( - -0.1, 0.1, (1, frame_size * 3)).astype(self.dtype) + -0.1, 0.1, (1, frame_size * 3) + ).astype(self.dtype) self.attrs = { 'activation': GRUActivationType.identity, 'gate_activation': GRUActivationType.sigmoid, - 'origin_mode': origin_mode + 'origin_mode': origin_mode, } def test_check_grad(self): self.check_grad(['Input', 'HiddenPrev', 'Weight', 'Bias'], ['Hidden']) def test_check_grad_ingore_input(self): - self.check_grad(['HiddenPrev', 'Weight', 'Bias'], ['Hidden'], - no_grad_set=set('Input')) + self.check_grad( + ['HiddenPrev', 'Weight', 'Bias'], + ['Hidden'], + no_grad_set=set('Input'), + ) class TestGRUUnitOpWithBiasOriginMode(TestGRUUnitOpWithBias): - def setUp(self): - self.dtype = 'float32' if fluid.core.is_compiled_with_rocm( - ) else 'float64' + self.dtype = ( + 'float32' if fluid.core.is_compiled_with_rocm() else 'float64' + ) self.set_inputs(origin_mode=True) self.set_outputs(origin_mode=True) diff --git a/python/paddle/fluid/tests/unittests/test_gumbel_softmax_op.py b/python/paddle/fluid/tests/unittests/test_gumbel_softmax_op.py index 90de83fce248f88d570ab6ea5efc69724ea4f8b6..de7f5d46352368dcc94d8756d0d0039428656193 100644 --- a/python/paddle/fluid/tests/unittests/test_gumbel_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_gumbel_softmax_op.py @@ -21,7 +21,6 @@ paddle.enable_static() class TestGumbelSoftmaxOp(OpTest): - def init_attrs(self): self.shape = [20, 10] self.attrs = {"hard": True, "axis": -1} @@ -51,7 +50,6 @@ class TestGumbelSoftmaxOp(OpTest): class TestGumbelSoftmaxOp2(TestGumbelSoftmaxOp): - def init_attrs(self): self.shape = [20, 10] self.attrs = {"hard": True, "axis": 0} @@ -60,7 +58,6 @@ class TestGumbelSoftmaxOp2(TestGumbelSoftmaxOp): class TestGumbelSoftmaxOp3(TestGumbelSoftmaxOp): - def init_attrs(self): self.shape = [100] self.attrs = {"hard": True, "axis": -1} @@ -69,7 +66,6 @@ class TestGumbelSoftmaxOp3(TestGumbelSoftmaxOp): class TestGumbelSoftmaxOp4(TestGumbelSoftmaxOp): - def init_attrs(self): self.shape = [20, 10, 5] self.attrs = {"hard": True, "axis": -1} @@ -78,7 +74,6 @@ class TestGumbelSoftmaxOp4(TestGumbelSoftmaxOp): class TestGumbelSoftmaxOp5(TestGumbelSoftmaxOp): - def init_attrs(self): self.shape = [20, 10, 5] self.attrs = {"hard": True, "axis": 1} @@ -87,7 +82,6 @@ class TestGumbelSoftmaxOp5(TestGumbelSoftmaxOp): class TestGumbelSoftmaxOpSampleDistribution(OpTest): - def softmax(self, x): x_row_max = x.max(axis=-1) x_row_max = x_row_max.reshape(list(x.shape)[:-1] + [1]) @@ -139,7 +133,6 @@ class TestGumbelSoftmaxOpSampleDistribution(OpTest): class TestGumbelSoftmaxOpGrad(unittest.TestCase): - def init_attrs(self): self.shape = [20, 10] self.dtype = "float64" @@ -159,22 +152,22 @@ class TestGumbelSoftmaxOpGrad(unittest.TestCase): out_hard.sum().backward() out_soft.sum().backward() - np.testing.assert_allclose(x_hard.grad.numpy(), - x_soft.grad.numpy(), - rtol=1e-5, - atol=1e-8) + np.testing.assert_allclose( + x_hard.grad.numpy(), x_soft.grad.numpy(), rtol=1e-5, atol=1e-8 + ) paddle.enable_static() class TestGumbelSoftmaxAPI(unittest.TestCase): - def setUp(self): self.x_shape = [2, 3, 4, 5] - self.x = np.random.uniform(-1., 1., self.x_shape).astype(np.float32) + self.x = np.random.uniform(-1.0, 1.0, self.x_shape).astype(np.float32) self.count_expected = 24 - self.place = paddle.CUDAPlace(0) \ - if paddle.fluid.core.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if paddle.fluid.core.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_check_api(self): # test static api @@ -201,13 +194,13 @@ class TestGumbelSoftmaxAPI(unittest.TestCase): class TestGumbelSoftmaxOpError(unittest.TestCase): - def test_errors(self): paddle.disable_static() def test_Variable(): - x1 = fluid.create_lod_tensor(np.zeros((100, 784)), - [[10, 10, 10, 70]], fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.zeros((100, 784)), [[10, 10, 10, 70]], fluid.CPUPlace() + ) paddle.nn.functional.gumbel_softmax(x1) self.assertRaises(ValueError, test_Variable) @@ -234,9 +227,9 @@ class TestGumbelSoftmaxOpError(unittest.TestCase): def test_dtype(): with paddle.static.program_guard(paddle.static.Program()): - x_int32 = paddle.fluid.data(name='x_int32', - shape=[2, 3], - dtype='int32') + x_int32 = paddle.fluid.data( + name='x_int32', shape=[2, 3], dtype='int32' + ) paddle.nn.functional.gumbel_softmax(x_int32) self.assertRaises(TypeError, test_dtype) diff --git a/python/paddle/fluid/tests/unittests/test_hash_op.py b/python/paddle/fluid/tests/unittests/test_hash_op.py index fe0762909488a1fb47b8442fbe9ecaae10619ffc..0070a3fadc75ad78b45eec9f5517ccaeac822129 100644 --- a/python/paddle/fluid/tests/unittests/test_hash_op.py +++ b/python/paddle/fluid/tests/unittests/test_hash_op.py @@ -19,7 +19,6 @@ import paddle.fluid as fluid class TestHashOp(OpTest): - def setUp(self): self.op_type = "hash" self.init_test_case() @@ -31,9 +30,16 @@ class TestHashOp(OpTest): np.random.seed(1) self.in_seq = np.random.randint(0, 10, (8, 1)).astype("int32") self.lod = [[2, 6]] - self.out_seq = [[[3481], [7475]], [[1719], [5986]], [[8473], [694]], - [[3481], [7475]], [[4372], [9456]], [[4372], [9456]], - [[6897], [3218]], [[9038], [7951]]] + self.out_seq = [ + [[3481], [7475]], + [[1719], [5986]], + [[8473], [694]], + [[3481], [7475]], + [[4372], [9456]], + [[4372], [9456]], + [[6897], [3218]], + [[9038], [7951]], + ] self.out_seq = np.array(self.out_seq) def test_check_output(self): @@ -41,7 +47,6 @@ class TestHashOp(OpTest): class TestHashNotLoDOp(TestHashOp): - def setUp(self): self.op_type = "hash" self.init_test_case() @@ -52,9 +57,16 @@ class TestHashNotLoDOp(TestHashOp): def init_test_case(self): np.random.seed(1) self.in_seq = np.random.randint(0, 10, (8, 1)).astype("int32") - self.out_seq = [[[3481], [7475]], [[1719], [5986]], [[8473], [694]], - [[3481], [7475]], [[4372], [9456]], [[4372], [9456]], - [[6897], [3218]], [[9038], [7951]]] + self.out_seq = [ + [[3481], [7475]], + [[1719], [5986]], + [[8473], [694]], + [[3481], [7475]], + [[4372], [9456]], + [[4372], [9456]], + [[6897], [3218]], + [[9038], [7951]], + ] self.out_seq = np.array(self.out_seq) def test_check_output(self): @@ -98,15 +110,15 @@ class TestHashOp3(TestHashOp): def init_test_case(self): self.in_seq = np.array([10, 5]).reshape((2, 1)).astype("int64") - self.out_seq = np.array([1204014882, 393011615, 3586283837, - 2814821595]).reshape((2, 2, 1)) + self.out_seq = np.array( + [1204014882, 393011615, 3586283837, 2814821595] + ).reshape((2, 2, 1)) def test_check_output(self): self.check_output() class TestHashOpError(unittest.TestCase): - def test_errors(self): with fluid.program_guard(fluid.Program(), fluid.Program()): input_data = np.random.randint(0, 10, (8, 1)).astype("int32") @@ -119,30 +131,27 @@ class TestHashOpError(unittest.TestCase): def test_type(): # dtype must be int32, int64. - x2 = fluid.layers.data(name='x2', - shape=[1], - dtype="float32", - lod_level=1) + x2 = fluid.layers.data( + name='x2', shape=[1], dtype="float32", lod_level=1 + ) fluid.layers.hash(input=x2, hash_size=2**32) self.assertRaises(TypeError, test_type) def test_hash_size_type(): # hash_size dtype must be int32, int64. - x3 = fluid.layers.data(name='x3', - shape=[1], - dtype="int32", - lod_level=1) + x3 = fluid.layers.data( + name='x3', shape=[1], dtype="int32", lod_level=1 + ) fluid.layers.hash(input=x3, hash_size=1024.5) self.assertRaises(TypeError, test_hash_size_type) def test_num_hash_type(): # num_hash dtype must be int32, int64. - x4 = fluid.layers.data(name='x4', - shape=[1], - dtype="int32", - lod_level=1) + x4 = fluid.layers.data( + name='x4', shape=[1], dtype="int32", lod_level=1 + ) fluid.layers.hash(input=x4, hash_size=2**32, num_hash=2.5) self.assertRaises(TypeError, test_num_hash_type) diff --git a/python/paddle/fluid/tests/unittests/test_hinge_embedding_loss.py b/python/paddle/fluid/tests/unittests/test_hinge_embedding_loss.py index 8088ca7c363c2e6965b38d6639de8fe805fad8d8..68e7b4210a6926aaa2b40ce8e22f35924f92679a 100644 --- a/python/paddle/fluid/tests/unittests/test_hinge_embedding_loss.py +++ b/python/paddle/fluid/tests/unittests/test_hinge_embedding_loss.py @@ -21,8 +21,9 @@ np.random.seed(42) def calc_hinge_embedding_loss(input, label, margin=1.0, reduction='mean'): - result = np.where(label == -1., np.maximum(0., margin - input), 0.) + \ - np.where(label == 1., input, 0.) + result = np.where( + label == -1.0, np.maximum(0.0, margin - input), 0.0 + ) + np.where(label == 1.0, input, 0.0) if reduction == 'none': return result elif reduction == 'sum': @@ -32,13 +33,12 @@ def calc_hinge_embedding_loss(input, label, margin=1.0, reduction='mean'): class TestFunctionalHingeEmbeddingLoss(unittest.TestCase): - def setUp(self): self.margin = 1.0 self.shape = (10, 10, 5) self.input_np = np.random.random(size=self.shape).astype(np.float64) # get label elem in {1., -1.} - self.label_np = 2 * np.random.randint(0, 2, size=self.shape) - 1. + self.label_np = 2 * np.random.randint(0, 2, size=self.shape) - 1.0 def run_dynamic_check(self, place=paddle.CPUPlace()): paddle.disable_static(place=place) @@ -50,45 +50,45 @@ class TestFunctionalHingeEmbeddingLoss(unittest.TestCase): np.testing.assert_allclose(dy_result.numpy(), expected, rtol=1e-05) self.assertTrue(dy_result.shape, [1]) - dy_result = paddle.nn.functional.hinge_embedding_loss(input, - label, - reduction='sum') - expected = calc_hinge_embedding_loss(self.input_np, - self.label_np, - reduction='sum') + dy_result = paddle.nn.functional.hinge_embedding_loss( + input, label, reduction='sum' + ) + expected = calc_hinge_embedding_loss( + self.input_np, self.label_np, reduction='sum' + ) np.testing.assert_allclose(dy_result.numpy(), expected, rtol=1e-05) self.assertTrue(dy_result.shape, [1]) - dy_result = paddle.nn.functional.hinge_embedding_loss(input, - label, - reduction='none') - expected = calc_hinge_embedding_loss(self.input_np, - self.label_np, - reduction='none') + dy_result = paddle.nn.functional.hinge_embedding_loss( + input, label, reduction='none' + ) + expected = calc_hinge_embedding_loss( + self.input_np, self.label_np, reduction='none' + ) np.testing.assert_allclose(dy_result.numpy(), expected, rtol=1e-05) self.assertTrue(dy_result.shape, self.shape) def run_static_check(self, place=paddle.CPUPlace): paddle.enable_static() for reduction in ['none', 'mean', 'sum']: - expected = calc_hinge_embedding_loss(self.input_np, - self.label_np, - reduction=reduction) + expected = calc_hinge_embedding_loss( + self.input_np, self.label_np, reduction=reduction + ) with program_guard(Program(), Program()): - input = paddle.static.data(name="input", - shape=self.shape, - dtype=paddle.float64) - label = paddle.static.data(name="label", - shape=self.shape, - dtype=paddle.float64) + input = paddle.static.data( + name="input", shape=self.shape, dtype=paddle.float64 + ) + label = paddle.static.data( + name="label", shape=self.shape, dtype=paddle.float64 + ) st_result = paddle.nn.functional.hinge_embedding_loss( - input, label, reduction=reduction) + input, label, reduction=reduction + ) exe = paddle.static.Executor(place) - result_numpy, = exe.run(feed={ - "input": self.input_np, - "label": self.label_np - }, - fetch_list=[st_result]) + (result_numpy,) = exe.run( + feed={"input": self.input_np, "label": self.label_np}, + fetch_list=[st_result], + ) np.testing.assert_allclose(result_numpy, expected, rtol=1e-05) def test_cpu(self): @@ -103,22 +103,21 @@ class TestFunctionalHingeEmbeddingLoss(unittest.TestCase): # test case the raise message def test_reduce_errors(self): - def test_value_error(): loss = paddle.nn.functional.hinge_embedding_loss( - self.input_np, self.label_np, reduction='reduce_mean') + self.input_np, self.label_np, reduction='reduce_mean' + ) self.assertRaises(ValueError, test_value_error) class TestClassHingeEmbeddingLoss(unittest.TestCase): - def setUp(self): self.margin = 1.0 self.shape = (10, 10, 5) self.input_np = np.random.random(size=self.shape).astype(np.float64) # get label elem in {1., -1.} - self.label_np = 2 * np.random.randint(0, 2, size=self.shape) - 1. + self.label_np = 2 * np.random.randint(0, 2, size=self.shape) - 1.0 def run_dynamic_check(self, place=paddle.CPUPlace()): paddle.disable_static(place=place) @@ -131,45 +130,47 @@ class TestClassHingeEmbeddingLoss(unittest.TestCase): self.assertTrue(dy_result.shape, [1]) hinge_embedding_loss = paddle.nn.loss.HingeEmbeddingLoss( - reduction='sum') + reduction='sum' + ) dy_result = hinge_embedding_loss(input, label) - expected = calc_hinge_embedding_loss(self.input_np, - self.label_np, - reduction='sum') + expected = calc_hinge_embedding_loss( + self.input_np, self.label_np, reduction='sum' + ) np.testing.assert_allclose(dy_result.numpy(), expected, rtol=1e-05) self.assertTrue(dy_result.shape, [1]) hinge_embedding_loss = paddle.nn.loss.HingeEmbeddingLoss( - reduction='none') + reduction='none' + ) dy_result = hinge_embedding_loss(input, label) - expected = calc_hinge_embedding_loss(self.input_np, - self.label_np, - reduction='none') + expected = calc_hinge_embedding_loss( + self.input_np, self.label_np, reduction='none' + ) np.testing.assert_allclose(dy_result.numpy(), expected, rtol=1e-05) self.assertTrue(dy_result.shape, self.shape) def run_static_check(self, place=paddle.CPUPlace): paddle.enable_static() for reduction in ['none', 'mean', 'sum']: - expected = calc_hinge_embedding_loss(self.input_np, - self.label_np, - reduction=reduction) + expected = calc_hinge_embedding_loss( + self.input_np, self.label_np, reduction=reduction + ) with program_guard(Program(), Program()): - input = paddle.static.data(name="input", - shape=self.shape, - dtype=paddle.float64) - label = paddle.static.data(name="label", - shape=self.shape, - dtype=paddle.float64) + input = paddle.static.data( + name="input", shape=self.shape, dtype=paddle.float64 + ) + label = paddle.static.data( + name="label", shape=self.shape, dtype=paddle.float64 + ) hinge_embedding_loss = paddle.nn.loss.HingeEmbeddingLoss( - reduction=reduction) + reduction=reduction + ) st_result = hinge_embedding_loss(input, label) exe = paddle.static.Executor(place) - result_numpy, = exe.run(feed={ - "input": self.input_np, - "label": self.label_np - }, - fetch_list=[st_result]) + (result_numpy,) = exe.run( + feed={"input": self.input_np, "label": self.label_np}, + fetch_list=[st_result], + ) np.testing.assert_allclose(result_numpy, expected, rtol=1e-05) def test_cpu(self): @@ -184,10 +185,10 @@ class TestClassHingeEmbeddingLoss(unittest.TestCase): # test case the raise message def test_reduce_errors(self): - def test_value_error(): hinge_embedding_loss = paddle.nn.loss.HingeEmbeddingLoss( - reduction='reduce_mean') + reduction='reduce_mean' + ) loss = hinge_embedding_loss(self.input_np, self.label_np) self.assertRaises(ValueError, test_value_error) diff --git a/python/paddle/fluid/tests/unittests/test_hinge_loss_op.py b/python/paddle/fluid/tests/unittests/test_hinge_loss_op.py index c2fbd5d915009f92bd739bd32dcb7e9df54a3b5f..1b320f864491ab4ea0d5c41c5b4da4b8786ffc36 100644 --- a/python/paddle/fluid/tests/unittests/test_hinge_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_hinge_loss_op.py @@ -18,7 +18,6 @@ from op_test import OpTest class TestHingeLossOp(OpTest): - def setUp(self): self.op_type = 'hinge_loss' samples_num = 100 diff --git a/python/paddle/fluid/tests/unittests/test_histogram_op.py b/python/paddle/fluid/tests/unittests/test_histogram_op.py index 725b6c02362010fbe523c7a57608b79eedc40452..516bc53a34aca86c147fa4ed42414d5d32f1e528 100644 --- a/python/paddle/fluid/tests/unittests/test_histogram_op.py +++ b/python/paddle/fluid/tests/unittests/test_histogram_op.py @@ -36,14 +36,15 @@ class TestHistogramOpAPI(unittest.TestCase): exe = fluid.Executor(place) exe.run(startup_program) img = np.array([[2, 4, 2], [2, 5, 4]]).astype(np.int64) - res = exe.run(train_program, - feed={'input': img}, - fetch_list=[output]) + res = exe.run( + train_program, feed={'input': img}, fetch_list=[output] + ) actual = np.array(res[0]) expected = np.array([0, 3, 0, 2, 1]).astype(np.int64) self.assertTrue( (actual == expected).all(), - msg='histogram output is wrong, out =' + str(actual)) + msg='histogram output is wrong, out =' + str(actual), + ) def test_dygraph(self): with fluid.dygraph.guard(): @@ -53,15 +54,18 @@ class TestHistogramOpAPI(unittest.TestCase): expected = np.array([0, 3, 0, 2, 1]).astype(np.int64) self.assertTrue( (actual.numpy() == expected).all(), - msg='histogram output is wrong, out =' + str(actual.numpy())) + msg='histogram output is wrong, out =' + str(actual.numpy()), + ) with _test_eager_guard(): inputs_np = np.array([[2, 4, 2], [2, 5, 4]]).astype(np.int64) inputs = paddle.to_tensor(inputs_np) actual = paddle.histogram(inputs, bins=5, min=1, max=5) - self.assertTrue((actual.numpy() == expected).all(), - msg='histogram output is wrong, out =' + - str(actual.numpy())) + self.assertTrue( + (actual.numpy() == expected).all(), + msg='histogram output is wrong, out =' + + str(actual.numpy()), + ) class TestHistogramOpError(unittest.TestCase): @@ -79,9 +83,9 @@ class TestHistogramOpError(unittest.TestCase): """Test bins should be greater than or equal to 1.""" def net_func(): - input_value = paddle.fluid.layers.fill_constant(shape=[3, 4], - dtype='float32', - value=3.0) + input_value = paddle.fluid.layers.fill_constant( + shape=[3, 4], dtype='float32', value=3.0 + ) paddle.histogram(input=input_value, bins=-1, min=1, max=5) with self.assertRaises(IndexError): @@ -91,9 +95,9 @@ class TestHistogramOpError(unittest.TestCase): """Test max must be larger or equal to min.""" def net_func(): - input_value = paddle.fluid.layers.fill_constant(shape=[3, 4], - dtype='float32', - value=3.0) + input_value = paddle.fluid.layers.fill_constant( + shape=[3, 4], dtype='float32', value=3.0 + ) paddle.histogram(input=input_value, bins=1, min=5, max=1) with self.assertRaises(ValueError): @@ -103,9 +107,9 @@ class TestHistogramOpError(unittest.TestCase): """Test range of min, max is not finite""" def net_func(): - input_value = paddle.fluid.layers.fill_constant(shape=[3, 4], - dtype='float32', - value=3.0) + input_value = paddle.fluid.layers.fill_constant( + shape=[3, 4], dtype='float32', value=3.0 + ) paddle.histogram(input=input_value, bins=1, min=-np.inf, max=5) with self.assertRaises(TypeError): @@ -114,24 +118,17 @@ class TestHistogramOpError(unittest.TestCase): def test_type_errors(self): with program_guard(Program()): # The input type must be Variable. - self.assertRaises(TypeError, - paddle.histogram, - 1, - bins=5, - min=1, - max=5) + self.assertRaises( + TypeError, paddle.histogram, 1, bins=5, min=1, max=5 + ) # The input type must be 'int32', 'int64', 'float32', 'float64' x_bool = fluid.data(name='x_bool', shape=[4, 3], dtype='bool') - self.assertRaises(TypeError, - paddle.histogram, - x_bool, - bins=5, - min=1, - max=5) + self.assertRaises( + TypeError, paddle.histogram, x_bool, bins=5, min=1, max=5 + ) class TestHistogramOp(OpTest): - def setUp(self): self.op_type = "histogram" self.init_test_case() @@ -139,9 +136,9 @@ class TestHistogramOp(OpTest): self.python_api = paddle.histogram self.inputs = {"X": np_input} self.init_attrs() - Out, _ = np.histogram(np_input, - bins=self.bins, - range=(self.min, self.max)) + Out, _ = np.histogram( + np_input, bins=self.bins, range=(self.min, self.max) + ) self.outputs = {"Out": Out.astype(np.int64)} def init_test_case(self): diff --git a/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py b/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py index 66cb6ac427a543a840e6e0168c74fde63de2f3b0..a905b9c2ae0246d2b61d80d31813f864fe708993 100644 --- a/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py +++ b/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py @@ -31,7 +31,6 @@ def find_latest_set(num): class CodeTable(object): - def __init__(self, num_classes, code): self.c = num_classes + code @@ -46,7 +45,6 @@ class CodeTable(object): class CodeTableWithCustomTree(object): - def __init__(self, path_table, path_code, index): self.ptable_ = path_table self.pcode_ = path_code @@ -129,8 +127,9 @@ def hsigmoid_grad(x, w, label, bias, num_classes): return [dx, dw, db] -def hsigmoidWithCustomTree(x, w, path_table, path_code, label, bias, - num_classes): +def hsigmoidWithCustomTree( + x, w, path_table, path_code, label, bias, num_classes +): batch_size = x.shape[0] code_length = len(path_table[0]) code_table = [0 for _ in range(code_length)] @@ -169,26 +168,36 @@ def hsigmoidWithCustomTree(x, w, path_table, path_code, label, bias, return pre_output, out -def python_api(input, - weight, - label, - path_table=None, - path_code=None, - bias=None, - num_classes=-1, - is_sparse=False, - remote_prefetch=False): - assert is_sparse == remote_prefetch, "is_sparse is equal to remote_prefetch in dygraph." - return paddle.nn.functional.hsigmoid_loss(input, label, num_classes, weight, - bias, path_table, path_code, - is_sparse) +def python_api( + input, + weight, + label, + path_table=None, + path_code=None, + bias=None, + num_classes=-1, + is_sparse=False, + remote_prefetch=False, +): + assert ( + is_sparse == remote_prefetch + ), "is_sparse is equal to remote_prefetch in dygraph." + return paddle.nn.functional.hsigmoid_loss( + input, + label, + num_classes, + weight, + bias, + path_table, + path_code, + is_sparse, + ) python_out_sig = ["Out"] class TestHSigmoidOp(OpTest): - def setUp(self): self.op_type = "hierarchical_sigmoid" self.python_api = python_api @@ -196,12 +205,15 @@ class TestHSigmoidOp(OpTest): num_classes = 101 feature_size = 5 batch_size = 20 - x = np.random.uniform(-1, 1, - (batch_size, feature_size)).astype('float64') - w = np.random.uniform(-1, 1, - (num_classes - 1, feature_size)).astype('float64') - label = np.random.randint(0, num_classes, - (batch_size, 1)).astype('int64') + x = np.random.uniform(-1, 1, (batch_size, feature_size)).astype( + 'float64' + ) + w = np.random.uniform(-1, 1, (num_classes - 1, feature_size)).astype( + 'float64' + ) + label = np.random.randint(0, num_classes, (batch_size, 1)).astype( + 'int64' + ) bias = np.random.uniform(-1, 1, (num_classes - 1, 1)).astype('float64') self.attrs = {'num_classes': num_classes, 'is_sparse': False} self.inputs = {'X': x, 'W': w, 'Label': label, 'Bias': bias} @@ -213,35 +225,48 @@ class TestHSigmoidOp(OpTest): self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X', 'W', 'Bias'], ['Out'], - user_defined_grads=self.user_grads, - check_eager=True) + self.check_grad( + ['X', 'W', 'Bias'], + ['Out'], + user_defined_grads=self.user_grads, + check_eager=True, + ) @skip_check_grad_ci( - reason= - "For 'TestHSigmoidOpSparse', check_grad is separately calculated by 'TestHSigmoidOpWithSparseGrad'." + reason="For 'TestHSigmoidOpSparse', check_grad is separately calculated by 'TestHSigmoidOpWithSparseGrad'." ) class TestHSigmoidOpSparse(OpTest): - def setUp(self): self.op_type = "hierarchical_sigmoid" self.python_api = python_api self.python_out_sig = python_out_sig - num_classes = 6 #using 1,2,3,4,5,6 to build a huffman tree and select 1,2,5,6 as sample + num_classes = 6 # using 1,2,3,4,5,6 to build a huffman tree and select 1,2,5,6 as sample feature_size = 8 batch_size = 4 x = np.random.random((batch_size, feature_size)) w = np.random.random((num_classes - 1, feature_size)) label = np.array([0, 1, 4, 5]).astype('int64') - path_table = np.array([ - (0, 2, -1, -1, -1), (0, 1, 3, -1, -1), (0, 1, 4, -1, -1), - (0, 2, -1, -1, -1) - ]).astype( - 'int64') #np.array to store 1,2,5,6s' non-leaf path(root -> leaf) - path_code = np.array([(0, 0, -1, -1, -1), (1, 1, 1, -1, -1), - (1, 0, 0, -1, -1), (0, 1, -1, -1, -1) - ]).astype('int64') #np.array to store + path_table = np.array( + [ + (0, 2, -1, -1, -1), + (0, 1, 3, -1, -1), + (0, 1, 4, -1, -1), + (0, 2, -1, -1, -1), + ] + ).astype( + 'int64' + ) # np.array to store 1,2,5,6s' non-leaf path(root -> leaf) + path_code = np.array( + [ + (0, 0, -1, -1, -1), + (1, 1, 1, -1, -1), + (1, 0, 0, -1, -1), + (0, 1, -1, -1, -1), + ] + ).astype( + 'int64' + ) # np.array to store bias = np.random.random((num_classes - 1, 1)) self.attrs = {'num_classes': num_classes, 'is_sparse': True} self.inputs = { @@ -250,10 +275,11 @@ class TestHSigmoidOpSparse(OpTest): 'PathTable': path_table, 'PathCode': path_code, 'Label': label, - 'Bias': bias + 'Bias': bias, } - pre_output, out = hsigmoidWithCustomTree(x, w, path_table, path_code, - label, bias, num_classes) + pre_output, out = hsigmoidWithCustomTree( + x, w, path_table, path_code, label, bias, num_classes + ) self.outputs = {'PreOut': pre_output, 'Out': out} def test_check_output(self): @@ -261,15 +287,14 @@ class TestHSigmoidOpSparse(OpTest): class TestHSigmoidOpWithSparseGrad(unittest.TestCase): - def hs_net_conf(self, is_sparse): input_word = fluid.layers.data(name="x", shape=[1], dtype='int64') - path_table = fluid.layers.data(name='path_table', - shape=[3], - dtype='int64') - path_code = fluid.layers.data(name='path_code', - shape=[3], - dtype='int64') + path_table = fluid.layers.data( + name='path_table', shape=[3], dtype='int64' + ) + path_code = fluid.layers.data( + name='path_code', shape=[3], dtype='int64' + ) label = fluid.layers.data(name='label', shape=[1], dtype='int64') data_list = [input_word, path_table, path_code, label] @@ -278,17 +303,21 @@ class TestHSigmoidOpWithSparseGrad(unittest.TestCase): input=input_word, is_sparse=is_sparse, size=[3, 3], - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Normal( - scale=1 / math.sqrt(3)))) - - cost = fluid.layers.hsigmoid(input=emb, - label=label, - bias_attr=True, - num_classes=3, - path_table=path_table, - path_code=path_code, - is_custom=True, - is_sparse=is_sparse) + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Normal(scale=1 / math.sqrt(3)) + ), + ) + + cost = fluid.layers.hsigmoid( + input=emb, + label=label, + bias_attr=True, + num_classes=3, + path_table=path_table, + path_code=path_code, + is_custom=True, + is_sparse=is_sparse, + ) avg_cost = fluid.layers.reduce_mean(cost) @@ -315,45 +344,61 @@ class TestHSigmoidOpWithSparseGrad(unittest.TestCase): exe.run(start_up) result = list() for i in range(10): - data = [([[x[i % 2]]], [list(path_table[i % 2])], - [list(path_code[i % 2])], [label[i % 2]])] - - loss_val = exe.run(main_program, - feed=feeder.feed(data), - fetch_list=[loss]) + data = [ + ( + [[x[i % 2]]], + [list(path_table[i % 2])], + [list(path_code[i % 2])], + [label[i % 2]], + ) + ] + + loss_val = exe.run( + main_program, feed=feeder.feed(data), fetch_list=[loss] + ) result.append(loss_val) return result def test_hs_grad_with_sparse(self): dense_result = self.training_test(is_sparse=False) sparse_result = self.training_test(is_sparse=True) - assert (dense_result == sparse_result) + assert dense_result == sparse_result @skip_check_grad_ci( - reason= - "[skip shape check] The huffman tree is structed separately. It will be complicated if use large shape." + reason="[skip shape check] The huffman tree is structed separately. It will be complicated if use large shape." ) class TestHSigmoidOpWithCostumTree(OpTest): - def setUp(self): self.op_type = "hierarchical_sigmoid" self.python_api = python_api self.python_out_sig = python_out_sig - num_classes = 6 #using 1,2,3,4,5,6 to build a huffman tree and select 1,2,5,6 as sample + num_classes = 6 # using 1,2,3,4,5,6 to build a huffman tree and select 1,2,5,6 as sample feature_size = 8 batch_size = 4 x = np.random.uniform(-1, 1, (batch_size, feature_size)) w = np.random.uniform(-1, 1, (num_classes - 1, feature_size)) label = np.array([0, 1, 4, 5]).astype('int64') - path_table = np.array([ - (0, 2, -1, -1, -1), (0, 1, 3, -1, -1), (0, 1, 4, -1, -1), - (0, 2, -1, -1, -1) - ]).astype( - 'int64') #np.array to store 1,2,5,6s' non-leaf path(root -> leaf) - path_code = np.array([(0, 0, -1, -1, -1), (1, 1, 1, -1, -1), - (1, 0, 0, -1, -1), (0, 1, -1, -1, -1) - ]).astype('int64') #np.array to store + path_table = np.array( + [ + (0, 2, -1, -1, -1), + (0, 1, 3, -1, -1), + (0, 1, 4, -1, -1), + (0, 2, -1, -1, -1), + ] + ).astype( + 'int64' + ) # np.array to store 1,2,5,6s' non-leaf path(root -> leaf) + path_code = np.array( + [ + (0, 0, -1, -1, -1), + (1, 1, 1, -1, -1), + (1, 0, 0, -1, -1), + (0, 1, -1, -1, -1), + ] + ).astype( + 'int64' + ) # np.array to store bias = np.random.random((num_classes - 1, 1)) self.attrs = {'num_classes': num_classes, 'is_sparse': False} self.inputs = { @@ -362,45 +407,59 @@ class TestHSigmoidOpWithCostumTree(OpTest): 'PathTable': path_table, 'PathCode': path_code, 'Label': label, - 'Bias': bias + 'Bias': bias, } - pre_output, out = hsigmoidWithCustomTree(x, w, path_table, path_code, - label, bias, num_classes) + pre_output, out = hsigmoidWithCustomTree( + x, w, path_table, path_code, label, bias, num_classes + ) self.outputs = {'PreOut': pre_output, 'Out': out} def test_check_output(self): self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['Bias', 'X', 'W'], ['Out'], - no_grad_set=set('Label'), - check_eager=True) + self.check_grad( + ['Bias', 'X', 'W'], + ['Out'], + no_grad_set=set('Label'), + check_eager=True, + ) @skip_check_grad_ci( - reason= - "[skip shape check] The huffman tree is structed separately. It will be complicated if use large shape." + reason="[skip shape check] The huffman tree is structed separately. It will be complicated if use large shape." ) class TestHSigmoidOpWithCostumTreeWithoutBias(OpTest): - def setUp(self): self.op_type = "hierarchical_sigmoid" self.python_api = python_api self.python_out_sig = python_out_sig - num_classes = 6 #using 1,2,3,4,5,6 to build a huffman tree and select 1,2,5,6 as sample + num_classes = 6 # using 1,2,3,4,5,6 to build a huffman tree and select 1,2,5,6 as sample feature_size = 8 batch_size = 4 x = np.random.uniform(-1, 1, (batch_size, feature_size)) w = np.random.uniform(-1, 1, (num_classes - 1, feature_size)) label = np.array([0, 1, 4, 5]).astype('int64') - path_table = np.array([ - (0, 2, -1, -1, -1), (0, 1, 3, -1, -1), (0, 1, 4, -1, -1), - (0, 2, -1, -1, -1) - ]).astype( - 'int64') #np.array to store 1,2,5,6s' non-leaf path(root -> leaf) - path_code = np.array([(0, 0, -1, -1, -1), (1, 1, 1, -1, -1), - (1, 0, 0, -1, -1), (0, 1, -1, -1, -1) - ]).astype('int64') #np.array to store + path_table = np.array( + [ + (0, 2, -1, -1, -1), + (0, 1, 3, -1, -1), + (0, 1, 4, -1, -1), + (0, 2, -1, -1, -1), + ] + ).astype( + 'int64' + ) # np.array to store 1,2,5,6s' non-leaf path(root -> leaf) + path_code = np.array( + [ + (0, 0, -1, -1, -1), + (1, 1, 1, -1, -1), + (1, 0, 0, -1, -1), + (0, 1, -1, -1, -1), + ] + ).astype( + 'int64' + ) # np.array to store # bias = np.random.random((num_classes - 1, 1)).astype("float32") self.attrs = {'num_classes': num_classes, 'is_sparse': False} self.inputs = { @@ -410,22 +469,24 @@ class TestHSigmoidOpWithCostumTreeWithoutBias(OpTest): 'PathCode': path_code, 'Label': label, } - pre_output, out = hsigmoidWithCustomTree(x=x, - w=w, - path_table=path_table, - path_code=path_code, - label=label, - bias=None, - num_classes=num_classes) + pre_output, out = hsigmoidWithCustomTree( + x=x, + w=w, + path_table=path_table, + path_code=path_code, + label=label, + bias=None, + num_classes=num_classes, + ) self.outputs = {'PreOut': pre_output, 'Out': out} def test_check_output(self): self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X', 'W'], ['Out'], - no_grad_set=set('Label'), - check_eager=True) + self.check_grad( + ['X', 'W'], ['Out'], no_grad_set=set('Label'), check_eager=True + ) class TestHSigmoidLossAPI(unittest.TestCase): @@ -441,27 +502,38 @@ class TestHSigmoidLossAPI(unittest.TestCase): paddle.set_default_dtype(self.dtype) self.x_np = np.random.uniform( - -1, 1, [self.batch_size, self.feature_size]).astype(self.dtype) - self.labels_np = np.random.randint(self.num_classes, - size=(self.batch_size, 1), - dtype='int64') + -1, 1, [self.batch_size, self.feature_size] + ).astype(self.dtype) + self.labels_np = np.random.randint( + self.num_classes, size=(self.batch_size, 1), dtype='int64' + ) self.weight_np = np.random.uniform( - -1, 1, [self.num_classes - 1, self.feature_size]).astype(self.dtype) - self.bias_np = np.random.uniform( - -1, 1, (self.num_classes - 1, )).astype(self.dtype) + -1, 1, [self.num_classes - 1, self.feature_size] + ).astype(self.dtype) + self.bias_np = np.random.uniform(-1, 1, (self.num_classes - 1,)).astype( + self.dtype + ) self.path_table_np = None self.path_code_np = None - _, self.out_np = hsigmoid(self.x_np, self.weight_np, self.labels_np, - self.bias_np, self.num_classes) + _, self.out_np = hsigmoid( + self.x_np, + self.weight_np, + self.labels_np, + self.bias_np, + self.num_classes, + ) self.set_attrs() if self.is_custom: - _, self.out_np = hsigmoidWithCustomTree(self.x_np, self.weight_np, - self.path_table_np, - self.path_code_np, - self.labels_np, - self.bias_np.reshape(-1, 1), - self.num_classes) + _, self.out_np = hsigmoidWithCustomTree( + self.x_np, + self.weight_np, + self.path_table_np, + self.path_code_np, + self.labels_np, + self.bias_np.reshape(-1, 1), + self.num_classes, + ) def set_attrs(self): pass @@ -477,13 +549,19 @@ class TestHSigmoidLossAPI(unittest.TestCase): if self.is_custom: path_table = paddle.to_tensor(self.path_table_np) path_code = paddle.to_tensor(self.path_code_np) - out1 = F.hsigmoid_loss(x, labels, self.num_classes, weight, bias, - path_table, path_code) + out1 = F.hsigmoid_loss( + x, labels, self.num_classes, weight, bias, path_table, path_code + ) weight_attr = I.NumpyArrayInitializer(self.weight_np) bias_attr = I.NumpyArrayInitializer(self.bias_np) - m = paddle.nn.HSigmoidLoss(self.feature_size, self.num_classes, - weight_attr, bias_attr, self.is_custom) + m = paddle.nn.HSigmoidLoss( + self.feature_size, + self.num_classes, + weight_attr, + bias_attr, + self.is_custom, + ) out2 = m(x, labels, path_table, path_code) for out in [out1, out2]: @@ -497,23 +575,34 @@ class TestHSigmoidLossAPI(unittest.TestCase): x = paddle.static.data('x', [-1, self.feature_size]) labels = paddle.static.data('labels', [-1, 1], 'int64') weight = paddle.static.data('weight', [-1, self.feature_size]) - bias = paddle.static.data('bias', [ - -1, - ]) + bias = paddle.static.data( + 'bias', + [ + -1, + ], + ) path_table = None path_code = None if self.is_custom: path_table = paddle.static.data('path_table', [-1, -1], 'int64') path_code = paddle.static.data('path_code', [-1, -1], 'int64') - out1 = F.hsigmoid_loss(x, labels, self.num_classes, weight, bias, - path_table, path_code) + out1 = F.hsigmoid_loss( + x, labels, self.num_classes, weight, bias, path_table, path_code + ) weight_attr = paddle.framework.ParamAttr( - initializer=I.NumpyArrayInitializer(self.weight_np)) + initializer=I.NumpyArrayInitializer(self.weight_np) + ) bias_attr = paddle.framework.ParamAttr( - initializer=I.NumpyArrayInitializer(self.bias_np)) - m = paddle.nn.HSigmoidLoss(self.feature_size, self.num_classes, - weight_attr, bias_attr, self.is_custom) + initializer=I.NumpyArrayInitializer(self.bias_np) + ) + m = paddle.nn.HSigmoidLoss( + self.feature_size, + self.num_classes, + weight_attr, + bias_attr, + self.is_custom, + ) out2 = m(x, labels, path_table, path_code) exe = paddle.static.Executor(self.place) @@ -522,14 +611,14 @@ class TestHSigmoidLossAPI(unittest.TestCase): 'x': self.x_np, 'labels': self.labels_np, 'weight': self.weight_np, - 'bias': self.bias_np + 'bias': self.bias_np, } if self.is_custom: feed_dict["path_code"] = self.path_code_np feed_dict["path_table"] = self.path_table_np - ret1, ret2 = exe.run(train_program, - feed=feed_dict, - fetch_list=[out1, out2]) + ret1, ret2 = exe.run( + train_program, feed=feed_dict, fetch_list=[out1, out2] + ) for ret in [ret1, ret2]: np.testing.assert_allclose(self.out_np, ret, rtol=1e-05) @@ -547,9 +636,17 @@ class TestHSigmoidLossAPI(unittest.TestCase): path_code = fluid.data('path_code', [-1, -1], 'int64') weight_attr = I.NumpyArrayInitializer(self.weight_np) bias_attr = I.NumpyArrayInitializer(self.bias_np) - out = fluid.layers.hsigmoid(x, labels, self.num_classes, - weight_attr, bias_attr, 'out', - path_table, path_code, self.is_custom) + out = fluid.layers.hsigmoid( + x, + labels, + self.num_classes, + weight_attr, + bias_attr, + 'out', + path_table, + path_code, + self.is_custom, + ) exe = fluid.Executor(self.place) exe.run(startup_program) @@ -557,13 +654,14 @@ class TestHSigmoidLossAPI(unittest.TestCase): if self.is_custom: feed_dict["path_code"] = self.path_code_np feed_dict["path_table"] = self.path_table_np - ret, = exe.run(train_program, feed=feed_dict, fetch_list=[out]) + (ret,) = exe.run(train_program, feed=feed_dict, fetch_list=[out]) np.testing.assert_allclose(ret, self.out_np, rtol=1e-05) def test_errors(self): - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): # test paddle.nn.HSigmoidLoss self.assertRaises(ValueError, paddle.nn.HSigmoidLoss, 6, 1) @@ -574,46 +672,52 @@ class TestHSigmoidLossAPI(unittest.TestCase): bias = paddle.static.data('bias', [7]) x_int32 = paddle.static.data('x_int32', [4, 6], 'int32') - self.assertRaises(TypeError, F.hsigmoid_loss, x_int32, label, 8, - weight) + self.assertRaises( + TypeError, F.hsigmoid_loss, x_int32, label, 8, weight + ) - label_float32 = paddle.static.data('label_float32', [4, 1], - 'float32') - self.assertRaises(TypeError, F.hsigmoid_loss, x, label_float32, 8, - weight) + label_float32 = paddle.static.data( + 'label_float32', [4, 1], 'float32' + ) + self.assertRaises( + TypeError, F.hsigmoid_loss, x, label_float32, 8, weight + ) weight_int32 = paddle.static.data('weight_int32', [7, 6], 'int32') - self.assertRaises(TypeError, F.hsigmoid_loss, x, label, 8, - weight_int32) + self.assertRaises( + TypeError, F.hsigmoid_loss, x, label, 8, weight_int32 + ) bias_int32 = paddle.static.data('bias_int32', [7], 'int32') - self.assertRaises(TypeError, - F.hsigmoid_loss, - x, - label, - 8, - weight, - bias=bias_int32) - - path_table_int32 = paddle.static.data('path_table_int32', [7], - 'int32') - self.assertRaises(TypeError, - F.hsigmoid_loss, - x, - label, - 8, - weight, - path_table=path_table_int32) - - path_code_int32 = paddle.static.data('path_code_int32', [7], - 'int32') - self.assertRaises(TypeError, - F.hsigmoid_loss, - x, - label, - 8, - weight, - path_code=path_code_int32) + self.assertRaises( + TypeError, F.hsigmoid_loss, x, label, 8, weight, bias=bias_int32 + ) + + path_table_int32 = paddle.static.data( + 'path_table_int32', [7], 'int32' + ) + self.assertRaises( + TypeError, + F.hsigmoid_loss, + x, + label, + 8, + weight, + path_table=path_table_int32, + ) + + path_code_int32 = paddle.static.data( + 'path_code_int32', [7], 'int32' + ) + self.assertRaises( + TypeError, + F.hsigmoid_loss, + x, + label, + 8, + weight, + path_code=path_code_int32, + ) # test paddle.nn.HSigmoidLoss paddle.disable_static(self.place) @@ -636,8 +740,9 @@ class TestHSigmoidLossAPI(unittest.TestCase): self.assertRaises(TypeError, fluid.layers.hsigmoid, 1, label, 2) # The input dtype must be float16, float32, float64. x_int32 = fluid.data(name='x_int32', shape=[4, 3], dtype='int32') - self.assertRaises(TypeError, fluid.layers.hsigmoid, x_int32, label, - 2) + self.assertRaises( + TypeError, fluid.layers.hsigmoid, x_int32, label, 2 + ) # support the input dtype is float32 x_fp32 = fluid.data(name='x_fp32', shape=[4, 3], dtype='float32') fluid.layers.hsigmoid(x_fp32, label, 2) @@ -646,20 +751,30 @@ class TestHSigmoidLossAPI(unittest.TestCase): self.assertRaises(TypeError, fluid.layers.hsigmoid, x_fp32, 1, 2) # The label dtype must be int64. label_int32 = fluid.data('label_int32', [4, 1], 'int32') - self.assertRaises(TypeError, fluid.layers.hsigmoid, x_fp32, - label_int32, 2) + self.assertRaises( + TypeError, fluid.layers.hsigmoid, x_fp32, label_int32, 2 + ) class TestHSigmoidLossAPICustom(TestHSigmoidLossAPI): - def set_attrs(self): self.is_custom = True - self.path_table_np = np.array([(0, 2, -1, -1, -1), (0, 1, 3, -1, -1), - (0, 1, 4, -1, -1), - (0, 2, -1, -1, -1)]).astype(np.int64) - self.path_code_np = np.array([(0, 0, -1, -1, -1), (1, 1, 1, -1, -1), - (1, 0, 0, -1, -1), - (0, 1, -1, -1, -1)]).astype(np.int64) + self.path_table_np = np.array( + [ + (0, 2, -1, -1, -1), + (0, 1, 3, -1, -1), + (0, 1, 4, -1, -1), + (0, 2, -1, -1, -1), + ] + ).astype(np.int64) + self.path_code_np = np.array( + [ + (0, 0, -1, -1, -1), + (1, 1, 1, -1, -1), + (1, 0, 0, -1, -1), + (0, 1, -1, -1, -1), + ] + ).astype(np.int64) def test_errors(self): pass diff --git a/python/paddle/fluid/tests/unittests/test_huber_loss_op.py b/python/paddle/fluid/tests/unittests/test_huber_loss_op.py index d679d2b719f430c80596ade173b8c63065e66347..778fedfd4aa08cd658ba70b03d0fdcf638c7b4ac 100644 --- a/python/paddle/fluid/tests/unittests/test_huber_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_huber_loss_op.py @@ -29,7 +29,6 @@ def huber_loss_forward(val, delta): class TestHuberLossOp(OpTest): - def setUp(self): self.op_type = 'huber_loss' self.python_api = paddle.fluid.layers.huber_loss @@ -38,16 +37,17 @@ class TestHuberLossOp(OpTest): self.init_input() shape = self.set_shape() residual = self.inputs['Y'] - self.inputs['X'] - loss = np.vectorize(huber_loss_forward)(residual, - self.delta).astype('float32') + loss = np.vectorize(huber_loss_forward)(residual, self.delta).astype( + 'float32' + ) self.attrs = {'delta': self.delta} self.outputs = {'Residual': residual, 'Out': loss.reshape(shape)} def init_input(self): shape = self.set_shape() self.inputs = { - 'X': np.random.uniform(0, 1., shape).astype('float32'), - 'Y': np.random.uniform(0, 1., shape).astype('float32'), + 'X': np.random.uniform(0, 1.0, shape).astype('float32'), + 'Y': np.random.uniform(0, 1.0, shape).astype('float32'), } def set_shape(self): @@ -60,38 +60,32 @@ class TestHuberLossOp(OpTest): self.check_grad(['X', 'Y'], 'Out', check_eager=True) def test_check_grad_ingore_x(self): - self.check_grad(['Y'], - 'Out', - max_relative_error=0.008, - no_grad_set=set("residual")) + self.check_grad( + ['Y'], 'Out', max_relative_error=0.008, no_grad_set=set("residual") + ) def test_check_grad_ingore_y(self): - self.check_grad(['X'], - 'Out', - max_relative_error=0.008, - no_grad_set=set('residual')) + self.check_grad( + ['X'], 'Out', max_relative_error=0.008, no_grad_set=set('residual') + ) def TestHuberLossOp1(TestHuberLossOp): - def set_shape(self): - return (64) + return 64 def TestHuberLossOp2(TestHuberLossOp): - def set_shape(self): return (6, 6) def TestHuberLossOp3(TestHuberLossOp): - def set_shape(self): return (6, 6, 1) class TestHuberLossOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # the input and label must be Variable @@ -106,10 +100,12 @@ class TestHuberLossOpError(unittest.TestCase): # the dtype of input and label must be float32 or float64 xw2 = fluid.data(name='xw2', shape=[None, 6], dtype="int32") lw2 = fluid.data(name='lw2', shape=[None, 6], dtype="int32") - self.assertRaises(TypeError, fluid.layers.huber_loss, xw2, lr, - delta) - self.assertRaises(TypeError, fluid.layers.huber_loss, xr, lw2, - delta) + self.assertRaises( + TypeError, fluid.layers.huber_loss, xw2, lr, delta + ) + self.assertRaises( + TypeError, fluid.layers.huber_loss, xr, lw2, delta + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_hybrid_parallel_topology.py b/python/paddle/fluid/tests/unittests/test_hybrid_parallel_topology.py index 00b1f10ede850269d1255540b1cfee8995d232da..d9210c128dc5c7fe85e62694bec0abeb699fadc6 100644 --- a/python/paddle/fluid/tests/unittests/test_hybrid_parallel_topology.py +++ b/python/paddle/fluid/tests/unittests/test_hybrid_parallel_topology.py @@ -18,7 +18,6 @@ import numpy as np class TestCommunicateTopology(unittest.TestCase): - def test_topology(self): topo = fleet.CommunicateTopology(["dp", "mp", "pp"], [2, 2, 2]) @@ -33,8 +32,9 @@ class TestCommunicateTopology(unittest.TestCase): # test get_hybrid_group_names parallel_names = ["dp", "mp", "pp"] - np.testing.assert_array_equal(parallel_names, - topo.get_hybrid_group_names()) + np.testing.assert_array_equal( + parallel_names, topo.get_hybrid_group_names() + ) # test get_dims np.testing.assert_array_equal(2, topo.get_dim("dp")) @@ -78,29 +78,64 @@ class TestCommunicateTopology(unittest.TestCase): self.assertEqual(topo.get_dim_size("pp"), 2) def test_topology_4D(self): - topo = fleet.CommunicateTopology(["dp", "pp", "sharding", "mp"], - [2, 2, 2, 2]) + topo = fleet.CommunicateTopology( + ["dp", "pp", "sharding", "mp"], [2, 2, 2, 2] + ) # test get_comm_list - dp_comm_list = [[0, 8], [1, 9], [2, 10], [3, 11], [4, 12], [5, 13], - [6, 14], [7, 15]] - mp_comm_list = [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], - [12, 13], [14, 15]] - pp_comm_list = [[0, 4], [1, 5], [2, 6], [3, 7], [8, 12], [9, 13], - [10, 14], [11, 15]] - sharding_comm_list = [[0, 2], [1, 3], [4, 6], [5, 7], [8, 10], [9, 11], - [12, 14], [13, 15]] + dp_comm_list = [ + [0, 8], + [1, 9], + [2, 10], + [3, 11], + [4, 12], + [5, 13], + [6, 14], + [7, 15], + ] + mp_comm_list = [ + [0, 1], + [2, 3], + [4, 5], + [6, 7], + [8, 9], + [10, 11], + [12, 13], + [14, 15], + ] + pp_comm_list = [ + [0, 4], + [1, 5], + [2, 6], + [3, 7], + [8, 12], + [9, 13], + [10, 14], + [11, 15], + ] + sharding_comm_list = [ + [0, 2], + [1, 3], + [4, 6], + [5, 7], + [8, 10], + [9, 11], + [12, 14], + [13, 15], + ] np.testing.assert_array_equal(dp_comm_list, topo.get_comm_list("dp")) np.testing.assert_array_equal(mp_comm_list, topo.get_comm_list("mp")) np.testing.assert_array_equal(pp_comm_list, topo.get_comm_list("pp")) - np.testing.assert_array_equal(sharding_comm_list, - topo.get_comm_list("sharding")) + np.testing.assert_array_equal( + sharding_comm_list, topo.get_comm_list("sharding") + ) # test get_hybrid_group_names parallel_names = ["dp", "pp", "sharding", "mp"] - np.testing.assert_array_equal(parallel_names, - topo.get_hybrid_group_names()) + np.testing.assert_array_equal( + parallel_names, topo.get_hybrid_group_names() + ) # test get_dims np.testing.assert_array_equal(2, topo.get_dim("dp")) @@ -149,20 +184,27 @@ class TestCommunicateTopology(unittest.TestCase): # test get_axis_list self.assertEqual(topo.get_axis_list("dp", 0), [0, 1, 2, 3, 4, 5, 6, 7]) - self.assertEqual(topo.get_axis_list("dp", 1), - [8, 9, 10, 11, 12, 13, 14, 15]) - self.assertEqual(topo.get_axis_list("mp", 0), - [0, 2, 4, 6, 8, 10, 12, 14]) - self.assertEqual(topo.get_axis_list("mp", 1), - [1, 3, 5, 7, 9, 11, 13, 15]) - self.assertEqual(topo.get_axis_list("pp", 0), - [0, 1, 2, 3, 8, 9, 10, 11]) - self.assertEqual(topo.get_axis_list("pp", 1), - [4, 5, 6, 7, 12, 13, 14, 15]) - self.assertEqual(topo.get_axis_list("sharding", 0), - [0, 1, 4, 5, 8, 9, 12, 13]) - self.assertEqual(topo.get_axis_list("sharding", 1), - [2, 3, 6, 7, 10, 11, 14, 15]) + self.assertEqual( + topo.get_axis_list("dp", 1), [8, 9, 10, 11, 12, 13, 14, 15] + ) + self.assertEqual( + topo.get_axis_list("mp", 0), [0, 2, 4, 6, 8, 10, 12, 14] + ) + self.assertEqual( + topo.get_axis_list("mp", 1), [1, 3, 5, 7, 9, 11, 13, 15] + ) + self.assertEqual( + topo.get_axis_list("pp", 0), [0, 1, 2, 3, 8, 9, 10, 11] + ) + self.assertEqual( + topo.get_axis_list("pp", 1), [4, 5, 6, 7, 12, 13, 14, 15] + ) + self.assertEqual( + topo.get_axis_list("sharding", 0), [0, 1, 4, 5, 8, 9, 12, 13] + ) + self.assertEqual( + topo.get_axis_list("sharding", 1), [2, 3, 6, 7, 10, 11, 14, 15] + ) # test get_dim_size self.assertEqual(topo.get_dim_size("dp"), 2) diff --git a/python/paddle/fluid/tests/unittests/test_identity_loss_op.py b/python/paddle/fluid/tests/unittests/test_identity_loss_op.py index de81c16da7dfab737a5881275de2234bb6dbc83d..0d808e7be3440d26771f7256f2090aa26af6888c 100644 --- a/python/paddle/fluid/tests/unittests/test_identity_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_identity_loss_op.py @@ -21,7 +21,6 @@ from op_test import OpTest class TestIdentityLossOp(OpTest): - def setUp(self): self.max_relative_error = 0.006 self.python_api = paddle.incubate.identity_loss @@ -61,34 +60,29 @@ class TestIdentityLossOp(OpTest): class TestCase1(TestIdentityLossOp): - def initTestCase(self): self.shape = (8, 16, 8) self.reduction = 0 class TestCase2(TestIdentityLossOp): - def initTestCase(self): self.shape = (8, 16) self.reduction = 1 class TestCase3(TestIdentityLossOp): - def initTestCase(self): self.shape = (4, 8, 16) self.reduction = 2 class TestIdentityLossFloat32(TestIdentityLossOp): - def set_attrs(self): self.dtype = 'float32' class TestIdentityLossOpError(unittest.TestCase): - def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): @@ -100,8 +94,9 @@ class TestIdentityLossOpError(unittest.TestCase): self.assertRaises(Exception, test_int) def test_string(): - paddle.incubate.identity_loss(x=input_data, - reduction="wrongkey") + paddle.incubate.identity_loss( + x=input_data, reduction="wrongkey" + ) self.assertRaises(Exception, test_string) @@ -114,7 +109,6 @@ class TestIdentityLossOpError(unittest.TestCase): class TestIdentityLossAPI(unittest.TestCase): - def setUp(self): self.x_shape = [2, 3, 4, 5] self.x = np.random.uniform(-1, 1, self.x_shape).astype(np.float32) @@ -138,13 +132,14 @@ class TestIdentityLossAPI(unittest.TestCase): out4 = paddle.incubate.identity_loss(x, reduction=2) exe = paddle.static.Executor(self.place) - res = exe.run(feed={'X': self.x}, - fetch_list=[out1, out2, out3, out4]) + res = exe.run( + feed={'X': self.x}, fetch_list=[out1, out2, out3, out4] + ) ref = [ self.identity_loss_ref(self.x, 2), self.identity_loss_ref(self.x, 0), self.identity_loss_ref(self.x, 1), - self.identity_loss_ref(self.x, 2) + self.identity_loss_ref(self.x, 2), ] for out, out_ref in zip(res, ref): np.testing.assert_allclose(out, out_ref, rtol=0.0001) @@ -172,8 +167,9 @@ class TestIdentityLossAPI(unittest.TestCase): x = paddle.to_tensor(x) self.assertRaises(Exception, paddle.incubate.identity_loss, x, -1) self.assertRaises(Exception, paddle.incubate.identity_loss, x, 3) - self.assertRaises(Exception, paddle.incubate.identity_loss, x, - "wrongkey") + self.assertRaises( + Exception, paddle.incubate.identity_loss, x, "wrongkey" + ) paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): x = paddle.fluid.data('X', [10, 12], 'int32') diff --git a/python/paddle/fluid/tests/unittests/test_identity_op.py b/python/paddle/fluid/tests/unittests/test_identity_op.py index f257db187eba67264c76ab8508c6ede6fcb64c3d..f798b421cfb02a856feab5852a05e10c9f3725e6 100644 --- a/python/paddle/fluid/tests/unittests/test_identity_op.py +++ b/python/paddle/fluid/tests/unittests/test_identity_op.py @@ -18,7 +18,6 @@ import paddle class TestIdentityAPI(unittest.TestCase): - def setUp(self): self.shape = [4, 4] self.x = np.random.random((4, 4)).astype(np.float32) diff --git a/python/paddle/fluid/tests/unittests/test_iinfo_and_finfo.py b/python/paddle/fluid/tests/unittests/test_iinfo_and_finfo.py index 9debbccdb3d7edd7fd7b8d4a434cd991888c08fd..bacb13a0981b7ad8ffd070140dc28fa0d48c1523 100644 --- a/python/paddle/fluid/tests/unittests/test_iinfo_and_finfo.py +++ b/python/paddle/fluid/tests/unittests/test_iinfo_and_finfo.py @@ -18,21 +18,27 @@ import numpy as np class TestIInfoAndFInfoAPI(unittest.TestCase): - def test_invalid_input(self): for dtype in [ - paddle.float16, paddle.float32, paddle.float64, paddle.bfloat16, - paddle.complex64, paddle.complex128, paddle.bool + paddle.float16, + paddle.float32, + paddle.float64, + paddle.bfloat16, + paddle.complex64, + paddle.complex128, + paddle.bool, ]: with self.assertRaises(ValueError): _ = paddle.iinfo(dtype) def test_iinfo(self): - for paddle_dtype, np_dtype in [(paddle.int64, np.int64), - (paddle.int32, np.int32), - (paddle.int16, np.int16), - (paddle.int8, np.int8), - (paddle.uint8, np.uint8)]: + for paddle_dtype, np_dtype in [ + (paddle.int64, np.int64), + (paddle.int32, np.int32), + (paddle.int16, np.int16), + (paddle.int8, np.int8), + (paddle.uint8, np.uint8), + ]: xinfo = paddle.iinfo(paddle_dtype) xninfo = np.iinfo(np_dtype) self.assertEqual(xinfo.bits, xninfo.bits) diff --git a/python/paddle/fluid/tests/unittests/test_im2sequence_op.py b/python/paddle/fluid/tests/unittests/test_im2sequence_op.py index 5a90f79dd7ece3636c4130200c96a6a4764ed4ca..24a5e696a10c880b177f8029509983c067bcbdc0 100644 --- a/python/paddle/fluid/tests/unittests/test_im2sequence_op.py +++ b/python/paddle/fluid/tests/unittests/test_im2sequence_op.py @@ -1,16 +1,16 @@ # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # -#Licensed under the Apache License, Version 2.0 (the "License"); -#you may not use this file except in compliance with the License. -#You may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -#Unless required by applicable law or agreed to in writing, software -#distributed under the License is distributed on an "AS IS" BASIS, -#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#See the License for the specific language governing permissions and -#limitations under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import unittest import numpy as np @@ -39,26 +39,58 @@ def get_output_shape(attrs, in_shape, img_real_size): imgreal_w = img_real_size[index, 1] / out_stride[1] else: imgreal_w = img_real_size[index, 0] / out_stride[1] + 1 - output_height[0,index] = \ - 1 + \ - (imgreal_h + paddings[0] + paddings[2] - kernels[0] + strides[0] - 1) / \ - strides[0] - - output_width[0,index] = \ - 1 + \ - (imgreal_w + paddings[1] + paddings[3] - kernels[1] + strides[1] - 1) / \ - strides[1] + output_height[0, index] = ( + 1 + + ( + imgreal_h + + paddings[0] + + paddings[2] + - kernels[0] + + strides[0] + - 1 + ) + / strides[0] + ) + + output_width[0, index] = ( + 1 + + ( + imgreal_w + + paddings[1] + + paddings[3] + - kernels[1] + + strides[1] + - 1 + ) + / strides[1] + ) else: for index in range(batchsize): - output_height[0,index] = \ - 1 + \ - (img_height + paddings[0] + paddings[2] - kernels[0] + strides[0] - 1) / \ - strides[0] - - output_width[0,index] = \ - 1 + \ - (img_width + paddings[1] + paddings[3] - kernels[1] + strides[1] - 1) / \ - strides[1] + output_height[0, index] = ( + 1 + + ( + img_height + + paddings[0] + + paddings[2] + - kernels[0] + + strides[0] + - 1 + ) + / strides[0] + ) + + output_width[0, index] = ( + 1 + + ( + img_width + + paddings[1] + + paddings[3] + - kernels[1] + + strides[1] + - 1 + ) + / strides[1] + ) return output_height, output_width @@ -80,50 +112,70 @@ def im2col(attrs, im, col): for channel in range(0, input_channels): for filter_row_idx in range(0, filter_height): for filter_col_idx in range(0, filter_width): - im_row_offset = col_row_idx * stride_height \ - + filter_row_idx - padding_height - - im_col_offset = col_col_idx * stride_width \ - + filter_col_idx - padding_width - - if (im_row_offset < 0 or im_row_offset >= input_height - or im_col_offset < 0 - or im_col_offset >= input_width): - col[col_row_idx][col_col_idx][channel][\ - filter_row_idx][filter_col_idx] = 0.0 + im_row_offset = ( + col_row_idx * stride_height + + filter_row_idx + - padding_height + ) + + im_col_offset = ( + col_col_idx * stride_width + + filter_col_idx + - padding_width + ) + + if ( + im_row_offset < 0 + or im_row_offset >= input_height + or im_col_offset < 0 + or im_col_offset >= input_width + ): + col[col_row_idx][col_col_idx][channel][ + filter_row_idx + ][filter_col_idx] = 0.0 else: - im_offset = (channel * input_height + im_row_offset \ - ) * input_width + im_col_offset + im_offset = ( + channel * input_height + im_row_offset + ) * input_width + im_col_offset - col[col_row_idx][col_col_idx][channel][\ - filter_row_idx][filter_col_idx] = im[channel][ \ - im_row_offset][im_col_offset] + col[col_row_idx][col_col_idx][channel][ + filter_row_idx + ][filter_col_idx] = im[channel][im_row_offset][ + im_col_offset + ] def Im2Sequence(inputs, img_real_size, attrs): - output_height, output_width = get_output_shape(attrs, inputs.shape, - img_real_size) + output_height, output_width = get_output_shape( + attrs, inputs.shape, img_real_size + ) img_channels = inputs.shape[1] batch_size = inputs.shape[0] out = [] for index in range(batch_size): - tmp = np.zeros([ - output_height[0, index], output_width[0, index], img_channels, - attrs['kernels'][0], attrs['kernels'][1] - ]).astype("float32") + tmp = np.zeros( + [ + output_height[0, index], + output_width[0, index], + img_channels, + attrs['kernels'][0], + attrs['kernels'][1], + ] + ).astype("float32") out.append(tmp) for index in range(len(inputs)): im2col(attrs, inputs[index], out[index]) - out[index] = out[index].reshape([ - output_height[0, index] * output_width[0, index], - img_channels * attrs['kernels'][0] * attrs['kernels'][1] - ]) + out[index] = out[index].reshape( + [ + output_height[0, index] * output_width[0, index], + img_channels * attrs['kernels'][0] * attrs['kernels'][1], + ] + ) out = np.concatenate(out, axis=0) return out class TestBlockExpandOp(OpTest): - def config(self): self.batch_size = 1 self.img_channels = 3 @@ -138,9 +190,16 @@ class TestBlockExpandOp(OpTest): def setUp(self): self.config() self.op_type = "im2sequence" - x = np.random.uniform(0.1, 1, [ - self.batch_size, self.img_channels, self.img_height, self.img_width - ]).astype("float32") + x = np.random.uniform( + 0.1, + 1, + [ + self.batch_size, + self.img_channels, + self.img_height, + self.img_width, + ], + ).astype("float32") real_size = np.array([]).astype("float32") out = Im2Sequence(x, real_size, self.attrs) self.inputs = {'X': x} @@ -154,7 +213,6 @@ class TestBlockExpandOp(OpTest): class TestBlockExpandOpCase2(TestBlockExpandOp): - def config(self): self.batch_size = 2 self.img_channels = 3 @@ -168,7 +226,6 @@ class TestBlockExpandOpCase2(TestBlockExpandOp): class TestBlockExpandOpCase3(TestBlockExpandOp): - def config(self): self.batch_size = 6 self.img_channels = 1 @@ -182,7 +239,6 @@ class TestBlockExpandOpCase3(TestBlockExpandOp): class TestBlockExpandOpCase4(TestBlockExpandOp): - def config(self): self.batch_size = 6 self.img_channels = 2 @@ -196,11 +252,9 @@ class TestBlockExpandOpCase4(TestBlockExpandOp): @skip_check_grad_ci( - reason= - "Since 'real_size' is used just in forward computation, we don't test the gradient here." + reason="Since 'real_size' is used just in forward computation, we don't test the gradient here." ) class TestBlockExpandOpCase5(OpTest): - def config(self): self.batch_size = 1 self.img_channels = 3 @@ -217,9 +271,16 @@ class TestBlockExpandOpCase5(OpTest): def setUp(self): self.config() self.op_type = "im2sequence" - x = np.random.uniform(0.1, 1, [ - self.batch_size, self.img_channels, self.img_height, self.img_width - ]).astype("float32") + x = np.random.uniform( + 0.1, + 1, + [ + self.batch_size, + self.img_channels, + self.img_height, + self.img_width, + ], + ).astype("float32") out = np.array(Im2Sequence(x, self.real_size, self.attrs)) self.inputs = {'X': x, 'Y': self.real_size} self.outputs = {'Out': out} @@ -229,7 +290,6 @@ class TestBlockExpandOpCase5(OpTest): class TestBlockExpandOpCase6(TestBlockExpandOpCase5): - def config(self): self.batch_size = 3 self.img_channels = 1 @@ -245,7 +305,6 @@ class TestBlockExpandOpCase6(TestBlockExpandOpCase5): class TestBlockExpandOpCase7(TestBlockExpandOpCase6): - def config(self): self.batch_size = 2 self.img_channels = 2 diff --git a/python/paddle/fluid/tests/unittests/test_image_classification_layer.py b/python/paddle/fluid/tests/unittests/test_image_classification_layer.py index fc3fee1ec65b6d25c2c7f56212e023ef0441de15..fbcf634634b34f490cc88a6c4f8bf2e8b852e636 100644 --- a/python/paddle/fluid/tests/unittests/test_image_classification_layer.py +++ b/python/paddle/fluid/tests/unittests/test_image_classification_layer.py @@ -20,26 +20,27 @@ from paddle.fluid.framework import Program def conv_block(input, num_filter, groups, dropouts): - return nets.img_conv_group(input=input, - pool_size=2, - pool_stride=2, - conv_num_filter=[num_filter] * groups, - conv_filter_size=3, - conv_act='relu', - conv_with_batchnorm=True, - conv_batchnorm_drop_rate=dropouts, - pool_type='max') + return nets.img_conv_group( + input=input, + pool_size=2, + pool_stride=2, + conv_num_filter=[num_filter] * groups, + conv_filter_size=3, + conv_act='relu', + conv_with_batchnorm=True, + conv_batchnorm_drop_rate=dropouts, + pool_type='max', + ) class TestLayer(unittest.TestCase): - def test_batch_norm_layer(self): main_program = Program() startup_program = Program() with fluid.program_guard(main_program, startup_program): - images = fluid.layers.data(name='pixel', - shape=[3, 48, 48], - dtype='float32') + images = fluid.layers.data( + name='pixel', shape=[3, 48, 48], dtype='float32' + ) hidden1 = fluid.layers.batch_norm(input=images) hidden2 = fluid.layers.fc(input=hidden1, size=128, act='relu') fluid.layers.batch_norm(input=hidden2) @@ -50,9 +51,9 @@ class TestLayer(unittest.TestCase): main_program = Program() startup_program = Program() with fluid.program_guard(main_program, startup_program): - images = fluid.layers.data(name='pixel', - shape=[3, 48, 48], - dtype='float32') + images = fluid.layers.data( + name='pixel', shape=[3, 48, 48], dtype='float32' + ) fluid.layers.dropout(x=images, dropout_prob=0.5) print(str(main_program)) @@ -62,9 +63,9 @@ class TestLayer(unittest.TestCase): startup_program = Program() with fluid.program_guard(main_program, startup_program): - images = fluid.layers.data(name='pixel', - shape=[3, 48, 48], - dtype='float32') + images = fluid.layers.data( + name='pixel', shape=[3, 48, 48], dtype='float32' + ) conv1 = conv_block(images, 64, 2, [0.3, 0]) conv_block(conv1, 256, 3, [0.4, 0.4, 0]) @@ -74,12 +75,12 @@ class TestLayer(unittest.TestCase): main_program = Program() startup_program = Program() with fluid.program_guard(main_program, startup_program): - image1 = fluid.layers.data(name='pixel1', - shape=[3, 48, 48], - dtype='float32') - image2 = fluid.layers.data(name='pixel2', - shape=[3, 48, 48], - dtype='float32') + image1 = fluid.layers.data( + name='pixel1', shape=[3, 48, 48], dtype='float32' + ) + image2 = fluid.layers.data( + name='pixel2', shape=[3, 48, 48], dtype='float32' + ) fluid.layers.elementwise_add(x=image1, y=image2, act='relu') print(main_program) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_auto_prune.py b/python/paddle/fluid/tests/unittests/test_imperative_auto_prune.py index 0cd97cbf3288779a6f132af831c29d91c1bacadc..3cda7d5d216fd14ba9e4f63b41ffa1d09385708a 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_auto_prune.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_auto_prune.py @@ -20,19 +20,20 @@ from paddle.fluid.framework import _test_eager_guard class AutoPruneLayer0(fluid.Layer): - def __init__(self, input_size): super(AutoPruneLayer0, self).__init__() self.linear1 = fluid.dygraph.Linear( input_size, 5, param_attr=fluid.initializer.ConstantInitializer(value=2), - bias_attr=False) + bias_attr=False, + ) self.linear2 = fluid.dygraph.Linear( 5, 5, param_attr=fluid.initializer.ConstantInitializer(value=2), - bias_attr=False) + bias_attr=False, + ) def forward(self, x, y): a = self.linear1(x) @@ -43,19 +44,20 @@ class AutoPruneLayer0(fluid.Layer): class AutoPruneLayer1(fluid.Layer): - def __init__(self, input_size): super(AutoPruneLayer1, self).__init__() self.linear1 = fluid.dygraph.Linear( input_size, 5, param_attr=fluid.initializer.ConstantInitializer(value=2), - bias_attr=False) + bias_attr=False, + ) self.linear2 = fluid.dygraph.Linear( 5, 5, param_attr=fluid.initializer.ConstantInitializer(value=2), - bias_attr=False) + bias_attr=False, + ) def forward(self, x, y): a = self.linear1(x) @@ -67,7 +69,6 @@ class AutoPruneLayer1(fluid.Layer): class AutoPruneLayer2(fluid.Layer): - def __init__(self, input_size): super(AutoPruneLayer2, self).__init__() self.linear = fluid.dygraph.Linear(input_size, 10, act=None) @@ -85,16 +86,15 @@ class AutoPruneLayer2(fluid.Layer): class AutoPruneLayer3(fluid.Layer): - def __init__(self, input_size): super(AutoPruneLayer3, self).__init__() self.linear = fluid.dygraph.Linear(input_size, 20, act=None) def forward(self, x, label, test_num): feature = self.linear(x) - part1, part2 = fluid.layers.split(feature, - num_or_sections=[10, 10], - dim=1) + part1, part2 = fluid.layers.split( + feature, num_or_sections=[10, 10], dim=1 + ) # Note that: part2 is not used. loss = fluid.layers.cross_entropy(input=part1, label=label) loss = paddle.mean(loss) @@ -105,7 +105,6 @@ class AutoPruneLayer3(fluid.Layer): class MyLayer(fluid.Layer): - def __init__(self, input_size, vocab_size, size, dtype="float32"): super(MyLayer, self).__init__(dtype=dtype) self.embed0 = fluid.Embedding(size=(vocab_size, size)) @@ -128,7 +127,6 @@ class MyLayer(fluid.Layer): class MyLayer2(fluid.Layer): - def __init__(self, input_size, vocab_size, size, dtype="float32"): super(MyLayer2, self).__init__(dtype=dtype) self.embed0 = fluid.Embedding(size=(vocab_size, size)) @@ -140,8 +138,9 @@ class MyLayer2(fluid.Layer): # mind the difference with MyLayer # In this example, the forward method involes all params loss = fluid.layers.reduce_mean( - self.linear_0(self.embed0(indices)) + - self.linear_1(self.embed1(indices))) + self.linear_0(self.embed0(indices)) + + self.linear_1(self.embed1(indices)) + ) return loss def linear0(self, x): @@ -154,7 +153,6 @@ class MyLayer2(fluid.Layer): class TestImperativeAutoPrune(unittest.TestCase): - def func_auto_prune(self): with fluid.dygraph.guard(): case1 = AutoPruneLayer0(input_size=5) @@ -312,12 +310,15 @@ class TestImperativeAutoPrune(unittest.TestCase): out2.backward() optimizer = fluid.optimizer.SGD( learning_rate=0.003, - parameter_list=(linear.parameters() + linear2.parameters())) + parameter_list=(linear.parameters() + linear2.parameters()), + ) optimizer.minimize(out2) - np.testing.assert_array_equal(linear2_origin, - linear2.weight.numpy()) + np.testing.assert_array_equal( + linear2_origin, linear2.weight.numpy() + ) self.assertFalse( - np.array_equal(linear_origin, linear.weight.numpy())) + np.array_equal(linear_origin, linear.weight.numpy()) + ) def test_auto_prune8(self): with _test_eager_guard(): @@ -342,10 +343,12 @@ class TestImperativeAutoPrune(unittest.TestCase): out2.backward() optimizer = fluid.optimizer.SGD( learning_rate=0.003, - parameter_list=(linear.parameters() + linear2.parameters())) + parameter_list=(linear.parameters() + linear2.parameters()), + ) optimizer.minimize(out2) - np.testing.assert_array_equal(linear2_origin, - linear2.weight.numpy()) + np.testing.assert_array_equal( + linear2_origin, linear2.weight.numpy() + ) np.testing.assert_array_equal(linear_origin, linear.weight.numpy()) try: linear2.weight.gradient() @@ -371,7 +374,7 @@ class TestImperativeAutoPrune(unittest.TestCase): out2 = linear2(b) out1.stop_gradient = True out = fluid.layers.concat(input=[out1, out2, c], axis=1) - #TODO(jiabin): In Eager Mode we don't actually need sort_sum_gradient, this test should be removed when we don't support fluid anymore. + # TODO(jiabin): In Eager Mode we don't actually need sort_sum_gradient, this test should be removed when we don't support fluid anymore. fluid.set_flags({'FLAGS_sort_sum_gradient': True}) out.backward() self.assertTrue(linear.weight.gradient() is None) @@ -387,8 +390,9 @@ class TestImperativeAutoPrune(unittest.TestCase): size = 20 batch_size = 16 - indices = np.random.randint(low=0, high=100, - size=(batch_size, 1)).astype("int64") + indices = np.random.randint( + low=0, high=100, size=(batch_size, 1) + ).astype("int64") embed = np.random.randn(batch_size, size).astype("float32") place = fluid.CPUPlace() @@ -396,7 +400,8 @@ class TestImperativeAutoPrune(unittest.TestCase): model = MyLayer(size, vocab_size, size) grad_clip = fluid.clip.GradientClipByGlobalNorm(0.001) optimizer = fluid.optimizer.AdamOptimizer( - 0.001, parameter_list=model.parameters(), grad_clip=grad_clip) + 0.001, parameter_list=model.parameters(), grad_clip=grad_clip + ) indices = fluid.dygraph.to_variable(indices) embed = fluid.dygraph.to_variable(embed) dummy_loss = model(embed) @@ -414,7 +419,8 @@ class TestImperativeAutoPrune(unittest.TestCase): model = MyLayer2(size, vocab_size, size) grad_clip = fluid.clip.GradientClipByGlobalNorm(0.001) optimizer = fluid.optimizer.AdamOptimizer( - 0.001, parameter_list=model.parameters(), grad_clip=grad_clip) + 0.001, parameter_list=model.parameters(), grad_clip=grad_clip + ) indices = fluid.dygraph.to_variable(indices) emebd = fluid.dygraph.to_variable(embed) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_basic.py b/python/paddle/fluid/tests/unittests/test_imperative_basic.py index 3c8088a1055574c0732827c72b1fae52e9b714b8..62afffb59abca601ed58adc5657183d8f8d96c81 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_basic.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_basic.py @@ -27,7 +27,6 @@ from paddle.fluid.framework import _in_legacy_dygraph, _test_eager_guard class MyLayer(fluid.Layer): - def __init__(self): super(MyLayer, self).__init__() @@ -40,23 +39,28 @@ class MyLayer(fluid.Layer): class MLP(fluid.Layer): - def __init__(self, input_size): super(MLP, self).__init__() self._linear1 = Linear( input_size, 3, - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.1)), - bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.1))) + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.1) + ), + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.1) + ), + ) self._linear2 = Linear( 3, 4, - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.1)), - bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.1))) + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.1) + ), + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.1) + ), + ) def forward(self, inputs): x = self._linear1(inputs) @@ -66,7 +70,6 @@ class MLP(fluid.Layer): class SimpleRNNCell(fluid.Layer): - def __init__(self, step_input_size, hidden_size, output_size, param_attr): super(SimpleRNNCell, self).__init__() self.step_input_size = step_input_size @@ -79,18 +82,24 @@ class SimpleRNNCell(fluid.Layer): h2h_param_shape = [self.hidden_size, self.hidden_size] h2o_param_shape = [self.output_size, self.hidden_size] self._i2h_w = None - self._i2h_w = self.create_parameter(attr=self.param_attr, - shape=i2h_param_shape, - dtype=self._dtype, - is_bias=False) - self._h2h_w = self.create_parameter(attr=self.param_attr, - shape=h2h_param_shape, - dtype=self._dtype, - is_bias=False) - self._h2o_w = self.create_parameter(attr=self.param_attr, - shape=h2o_param_shape, - dtype=self._dtype, - is_bias=False) + self._i2h_w = self.create_parameter( + attr=self.param_attr, + shape=i2h_param_shape, + dtype=self._dtype, + is_bias=False, + ) + self._h2h_w = self.create_parameter( + attr=self.param_attr, + shape=h2h_param_shape, + dtype=self._dtype, + is_bias=False, + ) + self._h2o_w = self.create_parameter( + attr=self.param_attr, + shape=h2o_param_shape, + dtype=self._dtype, + is_bias=False, + ) def forward(self, input, pre_hidden): tmp_i2h = paddle.fluid.layers.nn.mul(input, self._i2h_w) @@ -104,29 +113,33 @@ class SimpleRNNCell(fluid.Layer): class SimpleRNN(fluid.Layer): - def __init__(self): super(SimpleRNN, self).__init__() self.seq_len = 4 self._cell = SimpleRNNCell( - 3, 3, 3, - fluid.ParamAttr(initializer=fluid.initializer.Constant(value=0.1))) + 3, + 3, + 3, + fluid.ParamAttr(initializer=fluid.initializer.Constant(value=0.1)), + ) def forward(self, inputs): outs = list() pre_hiddens = list() - init_hidden = self.create_parameter(attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(value=0.1)), - shape=[1, 3], - dtype='float32', - is_bias=False) + init_hidden = self.create_parameter( + attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.1) + ), + shape=[1, 3], + dtype='float32', + is_bias=False, + ) pre_hidden = init_hidden for i in range(self.seq_len): - input = fluid.layers.slice(inputs, - axes=[1], - starts=[i], - ends=[i + 1]) + input = fluid.layers.slice( + inputs, axes=[1], starts=[i], ends=[i + 1] + ) input = fluid.layers.reshape(input, shape=[1, 3]) out_softmax, pre_hidden = self._cell(input, pre_hidden) outs.append(out_softmax) @@ -135,7 +148,6 @@ class SimpleRNN(fluid.Layer): class TestImperative(unittest.TestCase): - def functional_dygraph_context(self): self.assertFalse(fluid.dygraph.enabled()) fluid.enable_dygraph() @@ -220,8 +232,9 @@ class TestImperative(unittest.TestCase): t = fluid.Tensor() t.set(x, fluid.CPUPlace()) if not _in_legacy_dygraph(): - egr_tmp = fluid.core.eager.Tensor(value=x, - place=fluid.core.CPUPlace()) + egr_tmp = fluid.core.eager.Tensor( + value=x, place=fluid.core.CPUPlace() + ) egr_tmp2 = fluid.core.eager.Tensor(y, fluid.core.CPUPlace()) egr_tmp3 = paddle.to_tensor(x) egr_tmp4 = fluid.core.eager.Tensor(y) @@ -359,13 +372,13 @@ class TestImperative(unittest.TestCase): cur_block = cur_program.current_block() # Normally, we don't allow tensor with -1 shape being created in dygraph mode, this test is not good. if _in_legacy_dygraph(): - new_variable = cur_block.create_var(name="X", - shape=[-1, 23, 48], - dtype='float32') + new_variable = cur_block.create_var( + name="X", shape=[-1, 23, 48], dtype='float32' + ) else: - new_variable = cur_block.create_var(name="X", - shape=[1, 23, 48], - dtype='float32') + new_variable = cur_block.create_var( + name="X", shape=[1, 23, 48], dtype='float32' + ) try: new_variable.numpy() except Exception as e: @@ -400,13 +413,13 @@ class TestImperative(unittest.TestCase): cur_block = cur_program.current_block() # Normally, we don't allow tensor with -1 shape being created in dygraph mode, this test is not good. if _in_legacy_dygraph(): - new_variable = cur_block.create_var(name="X", - shape=[-1, 23, 48], - dtype='float32') + new_variable = cur_block.create_var( + name="X", shape=[-1, 23, 48], dtype='float32' + ) else: - new_variable = cur_block.create_var(name="X", - shape=[1, 23, 48], - dtype='float32') + new_variable = cur_block.create_var( + name="X", shape=[1, 23, 48], dtype='float32' + ) try: new_variable.gradient() except Exception as e: @@ -464,19 +477,24 @@ class TestImperative(unittest.TestCase): dy_grad2 = l2._x_for_debug.gradient() with new_program_scope(): - inp = fluid.layers.data(name="inp", - shape=[3], - append_batch_size=False) + inp = fluid.layers.data( + name="inp", shape=[3], append_batch_size=False + ) l = MyLayer() x = l(inp)[0] param_grads = fluid.backward.append_backward( - x, parameter_list=[l._x_for_debug.name])[0] - exe = fluid.Executor(fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) + x, parameter_list=[l._x_for_debug.name] + )[0] + exe = fluid.Executor( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) static_out, static_grad = exe.run( feed={inp.name: np_inp}, - fetch_list=[x.name, param_grads[1].name]) + fetch_list=[x.name, param_grads[1].name], + ) np.testing.assert_array_equal(dy_out, static_out) np.testing.assert_array_equal(dy_grad, static_grad) @@ -510,20 +528,25 @@ class TestImperative(unittest.TestCase): dy_grad2 = mlp2._linear1.weight.gradient() with new_program_scope(): - inp = fluid.layers.data(name="inp", - shape=[2, 2], - append_batch_size=False) + inp = fluid.layers.data( + name="inp", shape=[2, 2], append_batch_size=False + ) mlp = MLP(input_size=2) out = mlp(inp) param_grads = fluid.backward.append_backward( - out, parameter_list=[mlp._linear1.weight.name])[0] - exe = fluid.Executor(fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) + out, parameter_list=[mlp._linear1.weight.name] + )[0] + exe = fluid.Executor( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) exe.run(fluid.default_startup_program()) static_out, static_grad = exe.run( feed={inp.name: np_inp}, - fetch_list=[out.name, param_grads[1].name]) + fetch_list=[out.name, param_grads[1].name], + ) np.testing.assert_allclose(dy_out, static_out, rtol=1e-05) np.testing.assert_allclose(dy_grad, static_grad, rtol=1e-05) @@ -548,28 +571,27 @@ class TestImperative(unittest.TestCase): self.func_mlp() def test_gradient_accumulation(self): - def test_single_api(sort_sum_gradient): fluid.set_flags({'FLAGS_sort_sum_gradient': sort_sum_gradient}) - x = paddle.to_tensor(5., stop_gradient=False) + x = paddle.to_tensor(5.0, stop_gradient=False) for i in range(10): y = paddle.pow(x, 4.0) y.backward() self.assertEqual(x.grad.numpy(), (i + 1) * 500) x.clear_gradient() - self.assertEqual(x.grad.numpy(), 0.) + self.assertEqual(x.grad.numpy(), 0.0) for i in range(10): y = paddle.pow(x, 4.0) y.backward() self.assertEqual(x.grad.numpy(), (i + 1) * 500) x.clear_grad() - self.assertEqual(x.grad.numpy(), 0.) + self.assertEqual(x.grad.numpy(), 0.0) def test_simple_net(sort_sum_gradient): fluid.set_flags({'FLAGS_sort_sum_gradient': sort_sum_gradient}) - x = paddle.to_tensor(5., stop_gradient=False) - y = paddle.to_tensor(2., stop_gradient=False) - z = paddle.to_tensor(3., stop_gradient=False) + x = paddle.to_tensor(5.0, stop_gradient=False) + y = paddle.to_tensor(2.0, stop_gradient=False) + z = paddle.to_tensor(3.0, stop_gradient=False) def fun(x, y, z): loss1 = x * x * y @@ -618,59 +640,72 @@ class TestImperative(unittest.TestCase): # generate the gradient of each step mlp2 = MLP(input_size=input_size) - expected_weight1_grad = 0. - expected_bias1_grad = 0. - expected_weight2_grad = 0. - expected_bias2_grad = 0. + expected_weight1_grad = 0.0 + expected_bias1_grad = 0.0 + expected_weight2_grad = 0.0 + expected_bias2_grad = 0.0 for batch_id in range(100): x = paddle.uniform([10, input_size]) detach_x = x.detach() clear_loss = mlp2(detach_x) clear_loss.backward() - expected_weight1_grad = (expected_weight1_grad + - mlp2._linear1.weight.grad.numpy()) - expected_bias1_grad = (expected_bias1_grad + - mlp2._linear1.bias.grad.numpy()) - expected_weight2_grad = (expected_weight2_grad + - mlp2._linear2.weight.grad.numpy()) - expected_bias2_grad = (expected_bias2_grad + - mlp2._linear2.bias.grad.numpy()) + expected_weight1_grad = ( + expected_weight1_grad + mlp2._linear1.weight.grad.numpy() + ) + expected_bias1_grad = ( + expected_bias1_grad + mlp2._linear1.bias.grad.numpy() + ) + expected_weight2_grad = ( + expected_weight2_grad + mlp2._linear2.weight.grad.numpy() + ) + expected_bias2_grad = ( + expected_bias2_grad + mlp2._linear2.bias.grad.numpy() + ) loss = mlp1(x) loss.backward() np.testing.assert_array_equal(loss.grad.numpy(), [1]) - np.testing.assert_allclose(mlp1._linear1.weight.grad.numpy(), - expected_weight1_grad, - rtol=1e-05) - np.testing.assert_allclose(mlp1._linear1.bias.grad.numpy(), - expected_bias1_grad, - rtol=1e-05) - np.testing.assert_allclose(mlp1._linear2.weight.grad.numpy(), - expected_weight2_grad, - rtol=1e-05) - np.testing.assert_allclose(mlp1._linear2.bias.grad.numpy(), - expected_bias2_grad, - rtol=1e-05) + np.testing.assert_allclose( + mlp1._linear1.weight.grad.numpy(), + expected_weight1_grad, + rtol=1e-05, + ) + np.testing.assert_allclose( + mlp1._linear1.bias.grad.numpy(), + expected_bias1_grad, + rtol=1e-05, + ) + np.testing.assert_allclose( + mlp1._linear2.weight.grad.numpy(), + expected_weight2_grad, + rtol=1e-05, + ) + np.testing.assert_allclose( + mlp1._linear2.bias.grad.numpy(), + expected_bias2_grad, + rtol=1e-05, + ) mlp2.clear_gradients() np.testing.assert_array_equal(clear_loss.grad.numpy(), [1]) if ((batch_id + 1) % 10) % 2 == 0: mlp1.clear_gradients() - expected_weight1_grad = 0. - expected_bias1_grad = 0. - expected_weight2_grad = 0. - expected_bias2_grad = 0. + expected_weight1_grad = 0.0 + expected_bias1_grad = 0.0 + expected_weight2_grad = 0.0 + expected_bias2_grad = 0.0 elif ((batch_id + 1) % 10) % 2 == 1: mlp1.clear_gradients() mlp1._linear1.weight._set_grad_ivar( - paddle.ones([input_size, 3])) + paddle.ones([input_size, 3]) + ) mlp1._linear2.weight._set_grad_ivar(paddle.ones([3, 4])) - expected_weight1_grad = 1. - expected_bias1_grad = 0. - expected_weight2_grad = 1. - expected_bias2_grad = 0. + expected_weight1_grad = 1.0 + expected_bias1_grad = 0.0 + expected_weight2_grad = 1.0 + expected_bias2_grad = 0.0 with fluid.dygraph.guard(): test_single_api(False) @@ -696,19 +731,25 @@ class TestImperative(unittest.TestCase): # static graph with new_program_scope(): - inp_data1 = fluid.layers.data(name='inp1', - shape=[3, 3], - dtype=np.float32) - inp_data2 = fluid.layers.data(name='inp2', - shape=[3, 3], - dtype=np.float32) + inp_data1 = fluid.layers.data( + name='inp1', shape=[3, 3], dtype=np.float32 + ) + inp_data2 = fluid.layers.data( + name='inp2', shape=[3, 3], dtype=np.float32 + ) a = fluid.layers.expand( - fluid.layers.reshape(fluid.layers.reduce_sum(inp_data1), - [1, 1]), [4, 1]) + fluid.layers.reshape( + fluid.layers.reduce_sum(inp_data1), [1, 1] + ), + [4, 1], + ) b = fluid.layers.expand( - fluid.layers.reshape(fluid.layers.reduce_sum(inp_data2), - [1, 1]), [4, 1]) + fluid.layers.reshape( + fluid.layers.reduce_sum(inp_data2), [1, 1] + ), + [4, 1], + ) cond = fluid.layers.less_than(x=a, y=b) ie = fluid.layers.IfElse(cond) @@ -725,14 +766,16 @@ class TestImperative(unittest.TestCase): ie.output(d3) out = ie() - exe = fluid.Executor(fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) - static_result = exe.run(fluid.default_main_program(), - feed={ - 'inp1': np_inp1, - 'inp2': np_inp2 - }, - fetch_list=out)[0] + exe = fluid.Executor( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) + static_result = exe.run( + fluid.default_main_program(), + feed={'inp1': np_inp1, 'inp2': np_inp2}, + fetch_list=out, + )[0] np.testing.assert_allclose(dygraph_result, static_result, rtol=1e-05) def test_dygraph_vs_static(self): @@ -741,8 +784,14 @@ class TestImperative(unittest.TestCase): self.func_dygraph_vs_static() def func_rnn(self): - np_inp = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0], - [10.0, 11.0, 12.0]]) + np_inp = np.array( + [ + [1.0, 2.0, 3.0], + [4.0, 5.0, 6.0], + [7.0, 8.0, 9.0], + [10.0, 11.0, 12.0], + ] + ) np_inp = np_inp.reshape((1, 4, 3)) np_inp = np_inp.astype(np.float32) with fluid.dygraph.guard(): @@ -769,20 +818,28 @@ class TestImperative(unittest.TestCase): dy_grad_i2h2 = simple_rnn2._cell._i2h_w.gradient() with new_program_scope(): - inp = fluid.layers.data(name="inp", - shape=[1, 4, 3], - append_batch_size=False) + inp = fluid.layers.data( + name="inp", shape=[1, 4, 3], append_batch_size=False + ) simple_rnn = SimpleRNN() outs, pre_hiddens = simple_rnn(inp) param_grads = fluid.backward.append_backward(outs[3]) exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_startup_program()) - static_out, static_grad_h2o, static_grad_h2h, static_grad_i2h = exe.run( + ( + static_out, + static_grad_h2o, + static_grad_h2h, + static_grad_i2h, + ) = exe.run( feed={inp.name: np_inp}, fetch_list=[ - outs[3].name, param_grads[0][1].name, - param_grads[1][1].name, param_grads[2][1].name - ]) + outs[3].name, + param_grads[0][1].name, + param_grads[1][1].name, + param_grads[2][1].name, + ], + ) np.testing.assert_array_equal(dy_out, static_out) np.testing.assert_array_equal(dy_grad_h2o, static_grad_h2o) @@ -825,7 +882,6 @@ class TestImperative(unittest.TestCase): class TestDygraphUtils(unittest.TestCase): - def func_append_activation_in_dygraph_exception(self): with new_program_scope(): np_inp = np.random.random(size=(10, 20, 30)).astype(np.float32) @@ -883,9 +939,9 @@ class TestDygraphUtils(unittest.TestCase): def func_append_activation_in_dygraph_use_mkldnn(self): a_np = np.random.uniform(-2, 2, (10, 20, 30)).astype(np.float32) - helper = LayerHelper(fluid.unique_name.generate("test"), - act="relu", - use_mkldnn=True) + helper = LayerHelper( + fluid.unique_name.generate("test"), act="relu", use_mkldnn=True + ) func = helper.append_activation with fluid.dygraph.guard(): a = paddle.to_tensor(a_np) @@ -945,12 +1001,12 @@ class TestDygraphUtils(unittest.TestCase): class TestDygraphGuardWithError(unittest.TestCase): - def func_without_guard(self): with fluid.dygraph.guard(): x = paddle.to_tensor(np.zeros([10, 10])) - with self.assertRaisesRegexp(TypeError, - "Please use `with fluid.dygraph.guard()"): + with self.assertRaisesRegexp( + TypeError, "Please use `with fluid.dygraph.guard()" + ): y = fluid.layers.matmul(x, x) def test_without_guard(self): @@ -960,16 +1016,17 @@ class TestDygraphGuardWithError(unittest.TestCase): class TestMetaclass(unittest.TestCase): - def func_metaclass(self): self.assertEqual(type(MyLayer).__name__, 'type') self.assertNotEqual(type(MyLayer).__name__, 'pybind11_type') if not _in_legacy_dygraph(): self.assertEqual( - type(paddle.fluid.core.eager.Tensor).__name__, 'type') + type(paddle.fluid.core.eager.Tensor).__name__, 'type' + ) else: self.assertEqual( - type(paddle.fluid.core.VarBase).__name__, 'pybind11_type') + type(paddle.fluid.core.VarBase).__name__, 'pybind11_type' + ) def test_metaclass(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_container_layerdict.py b/python/paddle/fluid/tests/unittests/test_imperative_container_layerdict.py index 2e7bd103ec501d00d3b0b485c3deb91f4dd9616d..d81cef00f7bcae0b4a134ae7b2d3d9631b9173ab 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_container_layerdict.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_container_layerdict.py @@ -19,12 +19,13 @@ from paddle.fluid.framework import _test_eager_guard class TestLayerDict(unittest.TestCase): - def func_layer_dict(self): - layers = OrderedDict([ - ('conv1d', paddle.nn.Conv1D(3, 2, 3)), - ('conv2d', paddle.nn.Conv2D(3, 2, 3)), - ]) + layers = OrderedDict( + [ + ('conv1d', paddle.nn.Conv1D(3, 2, 3)), + ('conv2d', paddle.nn.Conv2D(3, 2, 3)), + ] + ) layers_dicts = paddle.nn.LayerDict(sublayers=layers) @@ -58,10 +59,12 @@ class TestLayerDict(unittest.TestCase): layers_dicts['linear'] = layers['linear'] check_layer_dict() - sublayer = OrderedDict([ - ('sigmod', paddle.nn.Sigmoid()), - ('relu', paddle.nn.ReLU()), - ]) + sublayer = OrderedDict( + [ + ('sigmod', paddle.nn.Sigmoid()), + ('relu', paddle.nn.ReLU()), + ] + ) layers.update(sublayer) layers_dicts.update(sublayer) check_layer_dict() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_container_layerlist.py b/python/paddle/fluid/tests/unittests/test_imperative_container_layerlist.py index d4d2bd0e5197ae61dc9da10bf698631d8fb7a674..89d92b3820554a3f69d70e4303596c420aa1fc8b 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_container_layerlist.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_container_layerlist.py @@ -20,7 +20,6 @@ from paddle.fluid.framework import _test_eager_guard class MyLayer(fluid.Layer): - def __init__(self, layerlist): super(MyLayer, self).__init__() self.layerlist = layerlist @@ -32,53 +31,58 @@ class MyLayer(fluid.Layer): class TestImperativeContainer(unittest.TestCase): - def fluid_dygraph_list(self): return fluid.dygraph.LayerList( - [fluid.dygraph.Linear(2**i, 2**(i + 1)) for i in range(6)]) + [fluid.dygraph.Linear(2**i, 2 ** (i + 1)) for i in range(6)] + ) def paddle_imperative_list(self): return paddle.nn.LayerList( - [fluid.dygraph.Linear(2**i, 2**(i + 1)) for i in range(6)]) + [fluid.dygraph.Linear(2**i, 2 ** (i + 1)) for i in range(6)] + ) def layer_list(self, use_fluid_api): data_np = np.random.uniform(-1, 1, [5, 1]).astype('float32') with fluid.dygraph.guard(): x = fluid.dygraph.to_variable(data_np) - layerlist = self.fluid_dygraph_list( - ) if use_fluid_api else self.paddle_imperative_list() + layerlist = ( + self.fluid_dygraph_list() + if use_fluid_api + else self.paddle_imperative_list() + ) size = len(layerlist) model = MyLayer(layerlist) res1 = model(x) self.assertListEqual(res1.shape, [5, 2**size]) - model.layerlist[size - 1] = fluid.dygraph.Linear(2**(size - 1), 5) + model.layerlist[size - 1] = fluid.dygraph.Linear(2 ** (size - 1), 5) res2 = model(x) self.assertListEqual(res2.shape, [5, 5]) del model.layerlist[size - 1] res3 = model(x) - self.assertListEqual(res3.shape, [5, 2**(size - 1)]) - model.layerlist.append(fluid.dygraph.Linear(2**(size - 1), 3)) + self.assertListEqual(res3.shape, [5, 2 ** (size - 1)]) + model.layerlist.append(fluid.dygraph.Linear(2 ** (size - 1), 3)) res4 = model(x) self.assertListEqual(res4.shape, [5, 3]) res4.backward() model2 = MyLayer(layerlist[:-1]) res5 = model2(x) - self.assertListEqual(res5.shape, [5, 2**(size - 1)]) + self.assertListEqual(res5.shape, [5, 2 ** (size - 1)]) del model2.layerlist[1:] res6 = model2(x) - self.assertListEqual(res6.shape, [5, 2**(0 + 1)]) + self.assertListEqual(res6.shape, [5, 2 ** (0 + 1)]) res6.backward() model3 = MyLayer(layerlist[:-2]) model3.layerlist.append(fluid.dygraph.Linear(3, 1)) - model3.layerlist.insert(size - 2, - fluid.dygraph.Linear(2**(size - 2), 3)) + model3.layerlist.insert( + size - 2, fluid.dygraph.Linear(2 ** (size - 2), 3) + ) res7 = model3(x) self.assertListEqual(res7.shape, [5, 1]) to_be_extended = [ - fluid.dygraph.Linear(3**i, 3**(i + 1)) for i in range(3) + fluid.dygraph.Linear(3**i, 3 ** (i + 1)) for i in range(3) ] model3.layerlist.extend(to_be_extended) res8 = model3(x) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_container_parameterlist.py b/python/paddle/fluid/tests/unittests/test_imperative_container_parameterlist.py index 2101e84858ab8e087de3248a8602bc6453279a67..55e666c10aa8851eaa2f890db1692b586d280f92 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_container_parameterlist.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_container_parameterlist.py @@ -21,24 +21,26 @@ from paddle.fluid.framework import _test_eager_guard class MyLayer(fluid.Layer): - def __init__(self, num_stacked_param, use_fluid_api): super(MyLayer, self).__init__() # create ParameterList with iterable Parameters - self.params = self.fluid_dygraph_ParameterList( - num_stacked_param - ) if use_fluid_api else self.paddle_imperative_ParameterList( - num_stacked_param) + self.params = ( + self.fluid_dygraph_ParameterList(num_stacked_param) + if use_fluid_api + else self.paddle_imperative_ParameterList(num_stacked_param) + ) def fluid_dygraph_ParameterList(self, num_stacked_param): return fluid.dygraph.ParameterList( - [fluid.layers.create_parameter(shape=[2, 2], dtype='float32')] * - num_stacked_param) + [fluid.layers.create_parameter(shape=[2, 2], dtype='float32')] + * num_stacked_param + ) def paddle_imperative_ParameterList(self, num_stacked_param): return paddle.nn.ParameterList( - [fluid.layers.create_parameter(shape=[2, 2], dtype='float32')] * - num_stacked_param) + [fluid.layers.create_parameter(shape=[2, 2], dtype='float32')] + * num_stacked_param + ) def forward(self, x): for i, p in enumerate(self.params): @@ -47,7 +49,6 @@ class MyLayer(fluid.Layer): class TestImperativeContainerParameterList(unittest.TestCase): - def paramter_list(self, use_fluid_api): data_np = np.random.uniform(-1, 1, [5, 2]).astype('float32') with fluid.dygraph.guard(): @@ -61,11 +62,13 @@ class TestImperativeContainerParameterList(unittest.TestCase): loss.backward() model.params[num_stacked_param - 1] = fluid.layers.create_parameter( - shape=[2, 3], dtype='float32') + shape=[2, 3], dtype='float32' + ) res = model(x) self.assertListEqual(res.shape, [5, 3]) model.params.append( - fluid.layers.create_parameter(shape=[3, 4], dtype='float32')) + fluid.layers.create_parameter(shape=[3, 4], dtype='float32') + ) self.assertEqual(len(model.params), num_stacked_param + 1) res = model(x) self.assertListEqual(res.shape, [5, 4]) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_container_sequential.py b/python/paddle/fluid/tests/unittests/test_imperative_container_sequential.py index c0b4ed44e5b87ccacb7b991d783c7d94ec527a12..c1b0a42675f6af7857ab3f4bdc519689adb7a595 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_container_sequential.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_container_sequential.py @@ -19,13 +19,13 @@ from paddle.fluid.framework import _test_eager_guard class TestImperativeContainerSequential(unittest.TestCase): - def func_sequential(self): data = np.random.uniform(-1, 1, [5, 10]).astype('float32') with fluid.dygraph.guard(): data = fluid.dygraph.to_variable(data) - model1 = fluid.dygraph.Sequential(fluid.Linear(10, 1), - fluid.Linear(1, 2)) + model1 = fluid.dygraph.Sequential( + fluid.Linear(10, 1), fluid.Linear(1, 2) + ) res1 = model1(data) self.assertListEqual(res1.shape, [5, 2]) model1[1] = fluid.Linear(1, 3) @@ -64,8 +64,9 @@ class TestImperativeContainerSequential(unittest.TestCase): data = np.random.uniform(-1, 1, [5, 10]).astype('float32') with fluid.dygraph.guard(): data = fluid.dygraph.to_variable(data) - model1 = fluid.dygraph.Sequential(fluid.Linear(10, 1), - fluid.Linear(1, 2)) + model1 = fluid.dygraph.Sequential( + fluid.Linear(10, 1), fluid.Linear(1, 2) + ) res1 = model1(data) self.assertListEqual(res1.shape, [5, 2]) model1[1] = fluid.Linear(1, 3) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_data_loader_base.py b/python/paddle/fluid/tests/unittests/test_imperative_data_loader_base.py index 5ae1b3f00144c649d9b6f55565dcc40c10de9488..da15dcf29b570032b6c25f1decd0ef08b07c7a6d 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_data_loader_base.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_data_loader_base.py @@ -26,7 +26,6 @@ def get_random_images_and_labels(image_shape, label_shape): def sample_generator_creator(batch_size, batch_num): - def __reader__(): for _ in range(batch_num * batch_size): image, label = get_random_images_and_labels([784], [1]) @@ -36,7 +35,6 @@ def sample_generator_creator(batch_size, batch_num): class TestDygraphDataLoader(unittest.TestCase): - def setUp(self): self.batch_size = 8 self.batch_num = 4 @@ -53,13 +51,14 @@ class TestDygraphDataLoader(unittest.TestCase): def func_test_single_process_loader(self): with fluid.dygraph.guard(): - loader = fluid.io.DataLoader.from_generator(capacity=self.capacity, - iterable=False, - use_multiprocess=False) - loader.set_sample_generator(sample_generator_creator( - self.batch_size, self.batch_num), - batch_size=self.batch_size, - places=fluid.CPUPlace()) + loader = fluid.io.DataLoader.from_generator( + capacity=self.capacity, iterable=False, use_multiprocess=False + ) + loader.set_sample_generator( + sample_generator_creator(self.batch_size, self.batch_num), + batch_size=self.batch_size, + places=fluid.CPUPlace(), + ) self.iter_loader_data(loader) def test_single_process_loader(self): @@ -69,12 +68,14 @@ class TestDygraphDataLoader(unittest.TestCase): def func_test_multi_process_loader(self): with fluid.dygraph.guard(): - loader = fluid.io.DataLoader.from_generator(capacity=self.capacity, - use_multiprocess=True) - loader.set_sample_generator(sample_generator_creator( - self.batch_size, self.batch_num), - batch_size=self.batch_size, - places=fluid.CPUPlace()) + loader = fluid.io.DataLoader.from_generator( + capacity=self.capacity, use_multiprocess=True + ) + loader.set_sample_generator( + sample_generator_creator(self.batch_size, self.batch_num), + batch_size=self.batch_size, + places=fluid.CPUPlace(), + ) self.iter_loader_data(loader) def test_multi_process_loader(self): @@ -85,9 +86,10 @@ class TestDygraphDataLoader(unittest.TestCase): def func_test_generator_no_places(self): with fluid.dygraph.guard(): loader = fluid.io.DataLoader.from_generator(capacity=self.capacity) - loader.set_sample_generator(sample_generator_creator( - self.batch_size, self.batch_num), - batch_size=self.batch_size) + loader.set_sample_generator( + sample_generator_creator(self.batch_size, self.batch_num), + batch_size=self.batch_size, + ) self.iter_loader_data(loader) def test_generator_no_places(self): @@ -98,13 +100,14 @@ class TestDygraphDataLoader(unittest.TestCase): def func_test_set_pin_memory(self): with fluid.dygraph.guard(): use_pinned_memory(False) - loader = fluid.io.DataLoader.from_generator(capacity=self.capacity, - iterable=False, - use_multiprocess=False) - loader.set_sample_generator(sample_generator_creator( - self.batch_size, self.batch_num), - batch_size=self.batch_size, - places=fluid.CPUPlace()) + loader = fluid.io.DataLoader.from_generator( + capacity=self.capacity, iterable=False, use_multiprocess=False + ) + loader.set_sample_generator( + sample_generator_creator(self.batch_size, self.batch_num), + batch_size=self.batch_size, + places=fluid.CPUPlace(), + ) self.iter_loader_data(loader) use_pinned_memory(True) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_data_loader_exception.py b/python/paddle/fluid/tests/unittests/test_imperative_data_loader_exception.py index 1389b93c38e7835c3604142b45a106aa588db9c9..eac20ea03c788c88f3fccc441406f960abbfc9d2 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_data_loader_exception.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_data_loader_exception.py @@ -27,7 +27,6 @@ def get_random_images_and_labels(image_shape, label_shape): class TestDygraphDataLoaderWithException(unittest.TestCase): - def setUp(self): self.batch_size = 8 self.batch_num = 4 @@ -36,8 +35,9 @@ class TestDygraphDataLoaderWithException(unittest.TestCase): def func_test_not_capacity(self): with fluid.dygraph.guard(): - with self.assertRaisesRegexp(ValueError, - "Please give value to capacity."): + with self.assertRaisesRegexp( + ValueError, "Please give value to capacity." + ): fluid.io.DataLoader.from_generator() def test_not_capacity(self): @@ -46,9 +46,7 @@ class TestDygraphDataLoaderWithException(unittest.TestCase): self.func_test_not_capacity() def func_test_single_process_with_thread_expection(self): - def error_sample_genarator(batch_num): - def __reader__(): for _ in range(batch_num): yield [[[1, 2], [1]]] @@ -56,11 +54,12 @@ class TestDygraphDataLoaderWithException(unittest.TestCase): return __reader__ with fluid.dygraph.guard(): - loader = fluid.io.DataLoader.from_generator(capacity=self.capacity, - iterable=False, - use_multiprocess=False) - loader.set_batch_generator(error_sample_genarator(self.batch_num), - places=fluid.CPUPlace()) + loader = fluid.io.DataLoader.from_generator( + capacity=self.capacity, iterable=False, use_multiprocess=False + ) + loader.set_batch_generator( + error_sample_genarator(self.batch_num), places=fluid.CPUPlace() + ) exception = None try: for _ in loader(): @@ -76,9 +75,7 @@ class TestDygraphDataLoaderWithException(unittest.TestCase): self.func_test_single_process_with_thread_expection() def func_test_multi_process_with_process_expection(self): - def error_sample_genarator(batch_num): - def __reader__(): for _ in range(batch_num): yield [[[1, 2], [1]]] @@ -86,10 +83,12 @@ class TestDygraphDataLoaderWithException(unittest.TestCase): return __reader__ with fluid.dygraph.guard(): - loader = fluid.io.DataLoader.from_generator(capacity=self.capacity, - use_multiprocess=True) - loader.set_batch_generator(error_sample_genarator(self.batch_num), - places=fluid.CPUPlace()) + loader = fluid.io.DataLoader.from_generator( + capacity=self.capacity, use_multiprocess=True + ) + loader.set_batch_generator( + error_sample_genarator(self.batch_num), places=fluid.CPUPlace() + ) exception = None try: for _ in loader(): @@ -104,24 +103,25 @@ class TestDygraphDataLoaderWithException(unittest.TestCase): self.func_test_multi_process_with_process_expection() def func_test_multi_process_with_get_timeout(self): - def slow_batch_generator_creator(batch_size, batch_num): - def __reader__(): for _ in range(batch_num): time.sleep(80) batch_image, batch_label = get_random_images_and_labels( - [batch_size, 784], [batch_size, 1]) + [batch_size, 784], [batch_size, 1] + ) yield batch_image, batch_label return __reader__ with fluid.dygraph.guard(): - loader = fluid.io.DataLoader.from_generator(capacity=self.capacity, - use_multiprocess=True) - loader.set_batch_generator(slow_batch_generator_creator( - self.batch_size, self.batch_num), - places=fluid.CPUPlace()) + loader = fluid.io.DataLoader.from_generator( + capacity=self.capacity, use_multiprocess=True + ) + loader.set_batch_generator( + slow_batch_generator_creator(self.batch_size, self.batch_num), + places=fluid.CPUPlace(), + ) exception = None try: for _ in range(self.epoch_num): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_data_loader_exit_func.py b/python/paddle/fluid/tests/unittests/test_imperative_data_loader_exit_func.py index ec59d473c13df406e6c98338bc2386cf612aacdf..ff9ae7343b38b632eeaee6804d87373cb4c44af0 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_data_loader_exit_func.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_data_loader_exit_func.py @@ -21,14 +21,17 @@ from paddle.fluid.framework import _test_eager_guard import queue -from paddle.fluid.reader import multiprocess_queue_set, _cleanup, CleanupFuncRegistrar +from paddle.fluid.reader import ( + multiprocess_queue_set, + _cleanup, + CleanupFuncRegistrar, +) # NOTE: These special functions cannot be detected by the existing coverage mechanism, # so the following unittests are added for these internal functions. class TestDygraphDataLoaderCleanUpFunc(unittest.TestCase): - def setUp(self): self.capacity = 10 @@ -65,8 +68,9 @@ class TestRegisterExitFunc(unittest.TestCase): self.func_test_not_callable_func() def func_test_old_handler_for_sigint(self): - CleanupFuncRegistrar.register(function=self.none_func, - signals=[signal.SIGINT]) + CleanupFuncRegistrar.register( + function=self.none_func, signals=[signal.SIGINT] + ) def test_old_handler_for_sigint(self): with _test_eager_guard(): @@ -78,8 +82,9 @@ class TestRegisterExitFunc(unittest.TestCase): def __test_process__(): pass - CleanupFuncRegistrar.register(function=self.none_func, - signals=[signal.SIGCHLD]) + CleanupFuncRegistrar.register( + function=self.none_func, signals=[signal.SIGCHLD] + ) exception = None try: diff --git a/python/paddle/fluid/tests/unittests/test_imperative_data_loader_fds_clear.py b/python/paddle/fluid/tests/unittests/test_imperative_data_loader_fds_clear.py index f0bdc8dacba9ae62ad1311c121d6116e537fe41d..355f35ce5e9e62bf749019bb6f777bbd05efbbe5 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_data_loader_fds_clear.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_data_loader_fds_clear.py @@ -26,25 +26,24 @@ def get_random_images_and_labels(image_shape, label_shape): def batch_generator_creator(batch_size, batch_num): - def __reader__(): for _ in range(batch_num): batch_image, batch_label = get_random_images_and_labels( - [batch_size, 784], [batch_size, 1]) + [batch_size, 784], [batch_size, 1] + ) yield batch_image, batch_label return __reader__ class RandomDataset(Dataset): - def __init__(self, sample_num): self.sample_num = sample_num def __getitem__(self, idx): np.random.seed(idx) image = np.random.random([784]).astype('float32') - label = np.random.randint(0, 9, (1, )).astype('int64') + label = np.random.randint(0, 9, (1,)).astype('int64') return image, label def __len__(self): @@ -52,7 +51,6 @@ class RandomDataset(Dataset): class TestDygraphDataLoaderMmapFdsClear(unittest.TestCase): - def setUp(self): self.batch_size = 8 self.batch_num = 100 @@ -60,11 +58,13 @@ class TestDygraphDataLoaderMmapFdsClear(unittest.TestCase): self.capacity = 50 def prepare_data_loader(self): - loader = fluid.io.DataLoader.from_generator(capacity=self.capacity, - use_multiprocess=True) - loader.set_batch_generator(batch_generator_creator( - self.batch_size, self.batch_num), - places=fluid.CPUPlace()) + loader = fluid.io.DataLoader.from_generator( + capacity=self.capacity, use_multiprocess=True + ) + loader.set_batch_generator( + batch_generator_creator(self.batch_size, self.batch_num), + places=fluid.CPUPlace(), + ) return loader def run_one_epoch_with_break(self, loader): @@ -102,16 +102,17 @@ class TestDygraphDataLoaderMmapFdsClear(unittest.TestCase): class TestMultiProcessDataLoaderMmapFdsClear(TestDygraphDataLoaderMmapFdsClear): - def prepare_data_loader(self): place = fluid.CPUPlace() with fluid.dygraph.guard(place): dataset = RandomDataset(self.batch_size * self.batch_num) - loader = DataLoader(dataset, - places=place, - batch_size=self.batch_size, - drop_last=True, - num_workers=2) + loader = DataLoader( + dataset, + places=place, + batch_size=self.batch_size, + drop_last=True, + num_workers=2, + ) return loader diff --git a/python/paddle/fluid/tests/unittests/test_imperative_data_loader_process.py b/python/paddle/fluid/tests/unittests/test_imperative_data_loader_process.py index 94322a67959597d616b6771e569783369c01b88a..c03b9b3bbf16b6c832a9dee695ada3dfb0a38bf0 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_data_loader_process.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_data_loader_process.py @@ -29,11 +29,11 @@ def get_random_images_and_labels(image_shape, label_shape): def batch_generator_creator(batch_size, batch_num): - def __reader__(): for _ in range(batch_num): batch_image, batch_label = get_random_images_and_labels( - [batch_size, 784], [batch_size, 1]) + [batch_size, 784], [batch_size, 1] + ) yield batch_image, batch_label return __reader__ @@ -42,7 +42,6 @@ def batch_generator_creator(batch_size, batch_num): # NOTE: coverage CI can't cover child process code, so need these test. # Here test child process loop function in main process class TestDygraphDataLoaderProcess(unittest.TestCase): - def setUp(self): self.batch_size = 8 self.batch_num = 4 @@ -60,10 +59,12 @@ class TestDygraphDataLoaderProcess(unittest.TestCase): with fluid.dygraph.guard(): loader = fluid.io.DataLoader.from_generator( - capacity=self.batch_num + 1, use_multiprocess=True) - loader.set_batch_generator(batch_generator_creator( - self.batch_size, self.batch_num), - places=fluid.CPUPlace()) + capacity=self.batch_num + 1, use_multiprocess=True + ) + loader.set_batch_generator( + batch_generator_creator(self.batch_size, self.batch_num), + places=fluid.CPUPlace(), + ) loader._data_queue = queue.Queue(self.batch_num + 1) _reader_process_loop(loader._batch_reader, loader._data_queue) # For clean memory mapped files @@ -73,8 +74,9 @@ class TestDygraphDataLoaderProcess(unittest.TestCase): util_queue.put(data) # Clean up memory mapped files - clear_process = multiprocessing.Process(target=__clear_process__, - args=(util_queue, )) + clear_process = multiprocessing.Process( + target=__clear_process__, args=(util_queue,) + ) clear_process.start() def test_reader_process_loop(self): @@ -83,9 +85,7 @@ class TestDygraphDataLoaderProcess(unittest.TestCase): self.func_test_reader_process_loop() def func_test_reader_process_loop_simple_none(self): - def none_sample_genarator(batch_num): - def __reader__(): for _ in range(batch_num): yield None @@ -94,9 +94,11 @@ class TestDygraphDataLoaderProcess(unittest.TestCase): with fluid.dygraph.guard(): loader = fluid.io.DataLoader.from_generator( - capacity=self.batch_num + 1, use_multiprocess=True) - loader.set_batch_generator(none_sample_genarator(self.batch_num), - places=fluid.CPUPlace()) + capacity=self.batch_num + 1, use_multiprocess=True + ) + loader.set_batch_generator( + none_sample_genarator(self.batch_num), places=fluid.CPUPlace() + ) loader._data_queue = queue.Queue(self.batch_num + 1) exception = None try: diff --git a/python/paddle/fluid/tests/unittests/test_imperative_data_parallel.py b/python/paddle/fluid/tests/unittests/test_imperative_data_parallel.py index 4d61d2a67fd87f4b551eb9b46d907330d27cc5a2..9118788617d88065b4123bd27c5e2487e84e3de2 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_data_parallel.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_data_parallel.py @@ -23,7 +23,6 @@ import paddle.fluid.core as core class MLP(fluid.Layer): - def __init__(self, param_attr=None, bias_attr=None): super(MLP, self).__init__() @@ -37,7 +36,6 @@ class MLP(fluid.Layer): class TestDataParallelStateDict(unittest.TestCase): - def test_data_parallel_state_dict(self): with fluid.dygraph.guard(): strategy = dygraph.parallel.prepare_context() @@ -48,13 +46,17 @@ class TestDataParallelStateDict(unittest.TestCase): parallel_state = parallel_mlp.state_dict() base_para = {} - place = fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0) + place = ( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) for k, v in single_state.items(): self.assertTrue(k in parallel_state) - np.testing.assert_array_equal(v.numpy(), - parallel_state[k].numpy()) + np.testing.assert_array_equal( + v.numpy(), parallel_state[k].numpy() + ) base_para[k] = v.numpy() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_decorator.py b/python/paddle/fluid/tests/unittests/test_imperative_decorator.py index f783b18b2b2e955bd9437e467e4d7d988b2175d7..42407c0b36f3fc2b3fe2af0f47ab1a6519813c3c 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_decorator.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_decorator.py @@ -23,7 +23,6 @@ from paddle.fluid.framework import _test_eager_guard class TestTracerMode(unittest.TestCase): - def setUp(self): self.init_mode = True @@ -60,8 +59,9 @@ class TestTracerMode(unittest.TestCase): decorated_func = fluid.dygraph.no_grad(need_no_grad_func) self.assertTrue( - str(inspect.getfullargspec(decorated_func)) == str( - inspect.getfullargspec(need_no_grad_func))) + str(inspect.getfullargspec(decorated_func)) + == str(inspect.getfullargspec(need_no_grad_func)) + ) self.assertEqual(self.tracer._train_mode, self.init_mode) @@ -79,13 +79,11 @@ class TestTracerMode(unittest.TestCase): class TestTracerMode2(TestTracerMode): - def setUp(self): self.init_mode = False class TestNoGradClass(unittest.TestCase): - @paddle.no_grad() def no_grad_func(self, a): self.assertEqual(self.tracer._train_mode, True) @@ -105,8 +103,10 @@ class TestNoGradClass(unittest.TestCase): return a + b decorated_func = paddle.no_grad()(need_no_grad_func) - self.assertEqual(str(inspect.getfullargspec(decorated_func)), - str(inspect.getfullargspec(need_no_grad_func))) + self.assertEqual( + str(inspect.getfullargspec(decorated_func)), + str(inspect.getfullargspec(need_no_grad_func)), + ) def test_gen(): for i in range(3): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_deepcf.py b/python/paddle/fluid/tests/unittests/test_imperative_deepcf.py index 3da576045c58711eb0cf9390d79ae53360ade09c..361792f5077f84c80e5d25effc12e423568cbcf3 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_deepcf.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_deepcf.py @@ -28,7 +28,6 @@ from paddle.fluid.framework import _test_eager_guard class DMF(fluid.Layer): - def __init__(self): super(DMF, self).__init__() self._user_latent = Linear(1000, 256) @@ -41,15 +40,23 @@ class DMF(fluid.Layer): self._user_layers.append( self.add_sublayer( 'user_layer_%d' % i, - Linear(256 if i == 0 else self._hid_sizes[i - 1], - self._hid_sizes[i], - act='relu'))) + Linear( + 256 if i == 0 else self._hid_sizes[i - 1], + self._hid_sizes[i], + act='relu', + ), + ) + ) self._item_layers.append( self.add_sublayer( 'item_layer_%d' % i, - Linear(256 if i == 0 else self._hid_sizes[i - 1], - self._hid_sizes[i], - act='relu'))) + Linear( + 256 if i == 0 else self._hid_sizes[i - 1], + self._hid_sizes[i], + act='relu', + ), + ) + ) def forward(self, users, items): users = self._user_latent(users) @@ -62,7 +69,6 @@ class DMF(fluid.Layer): class MLP(fluid.Layer): - def __init__(self): super(MLP, self).__init__() self._user_latent = Linear(1000, 256) @@ -73,22 +79,26 @@ class MLP(fluid.Layer): self._match_layers.append( self.add_sublayer( 'match_layer_%d' % i, - Linear(256 * 2 if i == 0 else self._hid_sizes[i - 1], - self._hid_sizes[i], - act='relu'))) + Linear( + 256 * 2 if i == 0 else self._hid_sizes[i - 1], + self._hid_sizes[i], + act='relu', + ), + ) + ) def forward(self, users, items): users = self._user_latent(users) items = self._item_latent(items) - match_vec = fluid.layers.concat([users, items], - axis=len(users.shape) - 1) + match_vec = fluid.layers.concat( + [users, items], axis=len(users.shape) - 1 + ) for l in self._match_layers: match_vec = l(match_vec) return match_vec class DeepCF(fluid.Layer): - def __init__(self, num_users, num_items, matrix): super(DeepCF, self).__init__() self._num_users = num_users @@ -98,7 +108,8 @@ class DeepCF(fluid.Layer): shape=matrix.shape, dtype=matrix.dtype, is_bias=False, - default_initializer=fluid.initializer.NumpyArrayInitializer(matrix)) + default_initializer=fluid.initializer.NumpyArrayInitializer(matrix), + ) self._rating_matrix.stop_gradient = True self._mlp = MLP() @@ -110,20 +121,21 @@ class DeepCF(fluid.Layer): # items_emb = self._item_emb(items) users_emb = fluid.layers.gather(self._rating_matrix, users) items_emb = fluid.layers.gather( - fluid.layers.transpose(self._rating_matrix, [1, 0]), items) + fluid.layers.transpose(self._rating_matrix, [1, 0]), items + ) users_emb.stop_gradient = True items_emb.stop_gradient = True mlp_predictive = self._mlp(users_emb, items_emb) dmf_predictive = self._dmf(users_emb, items_emb) - predictive = fluid.layers.concat([mlp_predictive, dmf_predictive], - axis=len(mlp_predictive.shape) - 1) + predictive = fluid.layers.concat( + [mlp_predictive, dmf_predictive], axis=len(mlp_predictive.shape) - 1 + ) prediction = self._match_fc(predictive) return prediction class TestDygraphDeepCF(unittest.TestCase): - def setUp(self): # Can use Amusic dataset as the DeepCF describes. self.data_path = os.environ.get('DATA_PATH', '') @@ -152,9 +164,14 @@ class TestDygraphDeepCF(unittest.TestCase): users_np = np.array(user_ids, dtype=np.int32)[indices] items_np = np.array(item_ids, dtype=np.int32)[indices] labels_np = np.array(labels, dtype=np.float32)[indices] - return np.expand_dims(users_np, -1), \ - np.expand_dims(items_np, -1), \ - np.expand_dims(labels_np, -1), NUM_USERS, NUM_ITEMS, matrix + return ( + np.expand_dims(users_np, -1), + np.expand_dims(items_np, -1), + np.expand_dims(labels_np, -1), + NUM_USERS, + NUM_ITEMS, + matrix, + ) def load_data(self): sys.stderr.write('loading from %s\n' % self.data_path) @@ -194,18 +211,35 @@ class TestDygraphDeepCF(unittest.TestCase): users_np = np.array(user_ids, dtype=np.int32)[indices] items_np = np.array(item_ids, dtype=np.int32)[indices] labels_np = np.array(labels, dtype=np.float32)[indices] - return np.expand_dims(users_np, -1), \ - np.expand_dims(items_np, -1), \ - np.expand_dims(labels_np, -1), num_users, num_items, matrix + return ( + np.expand_dims(users_np, -1), + np.expand_dims(items_np, -1), + np.expand_dims(labels_np, -1), + num_users, + num_items, + matrix, + ) def test_deefcf(self): seed = 90 if self.data_path: - (users_np, items_np, labels_np, num_users, num_items, - matrix) = self.load_data() + ( + users_np, + items_np, + labels_np, + num_users, + num_items, + matrix, + ) = self.load_data() else: - (users_np, items_np, labels_np, num_users, num_items, - matrix) = self.get_data() + ( + users_np, + items_np, + labels_np, + num_users, + num_items, + matrix, + ) = self.get_data() paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) startup = fluid.Program() @@ -220,28 +254,39 @@ class TestDygraphDeepCF(unittest.TestCase): deepcf = DeepCF(num_users, num_items, matrix) prediction = deepcf(users, items) loss = fluid.layers.reduce_sum( - fluid.layers.log_loss(prediction, labels)) + fluid.layers.log_loss(prediction, labels) + ) adam = fluid.optimizer.AdamOptimizer(0.01) adam.minimize(loss) - exe = fluid.Executor(fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) + exe = fluid.Executor( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) exe.run(startup) for e in range(self.num_epoches): sys.stderr.write('epoch %d\n' % e) - for slice in range(0, self.batch_size * self.num_batches, - self.batch_size): + for slice in range( + 0, self.batch_size * self.num_batches, self.batch_size + ): if slice + self.batch_size >= users_np.shape[0]: break static_loss = exe.run( main, feed={ - users.name: users_np[slice:slice + self.batch_size], - items.name: items_np[slice:slice + self.batch_size], - labels.name: - labels_np[slice:slice + self.batch_size] + users.name: users_np[ + slice : slice + self.batch_size + ], + items.name: items_np[ + slice : slice + self.batch_size + ], + labels.name: labels_np[ + slice : slice + self.batch_size + ], }, - fetch_list=[loss])[0] + fetch_list=[loss], + )[0] sys.stderr.write('static loss %s\n' % static_loss) with fluid.dygraph.guard(): @@ -250,21 +295,27 @@ class TestDygraphDeepCF(unittest.TestCase): deepcf = DeepCF(num_users, num_items, matrix) adam = fluid.optimizer.AdamOptimizer( - 0.01, parameter_list=deepcf.parameters()) + 0.01, parameter_list=deepcf.parameters() + ) for e in range(self.num_epoches): sys.stderr.write('epoch %d\n' % e) - for slice in range(0, self.batch_size * self.num_batches, - self.batch_size): + for slice in range( + 0, self.batch_size * self.num_batches, self.batch_size + ): if slice + self.batch_size >= users_np.shape[0]: break prediction = deepcf( - to_variable(users_np[slice:slice + self.batch_size]), - to_variable(items_np[slice:slice + self.batch_size])) + to_variable(users_np[slice : slice + self.batch_size]), + to_variable(items_np[slice : slice + self.batch_size]), + ) loss = fluid.layers.reduce_sum( fluid.layers.log_loss( prediction, - to_variable(labels_np[slice:slice + - self.batch_size]))) + to_variable( + labels_np[slice : slice + self.batch_size] + ), + ) + ) loss.backward() adam.minimize(loss) deepcf.clear_gradients() @@ -277,28 +328,35 @@ class TestDygraphDeepCF(unittest.TestCase): deepcf2 = DeepCF(num_users, num_items, matrix) adam2 = fluid.optimizer.AdamOptimizer( - 0.01, parameter_list=deepcf2.parameters()) + 0.01, parameter_list=deepcf2.parameters() + ) fluid.set_flags({'FLAGS_sort_sum_gradient': True}) for e in range(self.num_epoches): sys.stderr.write('epoch %d\n' % e) - for slice in range(0, self.batch_size * self.num_batches, - self.batch_size): + for slice in range( + 0, self.batch_size * self.num_batches, self.batch_size + ): if slice + self.batch_size >= users_np.shape[0]: break prediction2 = deepcf2( - to_variable(users_np[slice:slice + self.batch_size]), - to_variable(items_np[slice:slice + self.batch_size])) + to_variable(users_np[slice : slice + self.batch_size]), + to_variable(items_np[slice : slice + self.batch_size]), + ) loss2 = fluid.layers.reduce_sum( fluid.layers.log_loss( prediction2, - to_variable(labels_np[slice:slice + - self.batch_size]))) + to_variable( + labels_np[slice : slice + self.batch_size] + ), + ) + ) loss2.backward() adam2.minimize(loss2) deepcf2.clear_gradients() dy_loss2 = loss2.numpy() - sys.stderr.write('dynamic loss: %s %s\n' % - (slice, dy_loss2)) + sys.stderr.write( + 'dynamic loss: %s %s\n' % (slice, dy_loss2) + ) with fluid.dygraph.guard(): with _test_eager_guard(): @@ -309,30 +367,39 @@ class TestDygraphDeepCF(unittest.TestCase): deepcf = DeepCF(num_users, num_items, matrix) adam = fluid.optimizer.AdamOptimizer( - 0.01, parameter_list=deepcf.parameters()) + 0.01, parameter_list=deepcf.parameters() + ) for e in range(self.num_epoches): sys.stderr.write('epoch %d\n' % e) - for slice in range(0, self.batch_size * self.num_batches, - self.batch_size): + for slice in range( + 0, self.batch_size * self.num_batches, self.batch_size + ): if slice + self.batch_size >= users_np.shape[0]: break prediction = deepcf( - to_variable(users_np[slice:slice + - self.batch_size]), - to_variable(items_np[slice:slice + - self.batch_size])) + to_variable( + users_np[slice : slice + self.batch_size] + ), + to_variable( + items_np[slice : slice + self.batch_size] + ), + ) loss = fluid.layers.reduce_sum( fluid.layers.log_loss( prediction, - to_variable(labels_np[slice:slice + - self.batch_size]))) + to_variable( + labels_np[slice : slice + self.batch_size] + ), + ) + ) loss.backward() adam.minimize(loss) deepcf.clear_gradients() eager_loss = loss.numpy() - sys.stderr.write('eager loss: %s %s\n' % - (slice, eager_loss)) + sys.stderr.write( + 'eager loss: %s %s\n' % (slice, eager_loss) + ) self.assertEqual(static_loss, dy_loss) self.assertEqual(static_loss, dy_loss2) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py b/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py index 1d77c5132399b9748689df9eab2d2a18fd9a096a..140f1191c96444642990f1bce44c3ebc6e35a4da 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py @@ -23,7 +23,6 @@ from paddle.fluid.framework import _test_eager_guard def _dygraph_guard_(func): - def __impl__(*args, **kwargs): if fluid._non_static_mode(): return func(*args, **kwargs) @@ -43,7 +42,6 @@ def random_var(size, low=-1, high=1, dtype='float32'): class TestEagerGrad(TestCase): - def func_simple_example_eager_grad(self): np.random.seed(2021) paddle.set_device('cpu') @@ -184,8 +182,9 @@ class TestEagerGrad(TestCase): out4 = paddle.mean(out3) egr_dout2, egr_dout3 = paddle.grad([out4], [out2, out3]) - np.testing.assert_array_equal(dout2_record_by_hook[0].numpy(), - np.array([1.0, 2.0])) + np.testing.assert_array_equal( + dout2_record_by_hook[0].numpy(), np.array([1.0, 2.0]) + ) x1 = paddle.to_tensor([1.0, 2.0]) x1.stop_gradient = False @@ -205,27 +204,30 @@ class TestEagerGrad(TestCase): class TestDygraphDoubleGrad(TestCase): - def setUp(self): self.sort_sum_gradient = False self.shape = [5, 10] - def grad(self, - outputs, - inputs, - grad_outputs=None, - no_grad_vars=None, - retain_graph=None, - create_graph=False, - allow_unused=False): + def grad( + self, + outputs, + inputs, + grad_outputs=None, + no_grad_vars=None, + retain_graph=None, + create_graph=False, + allow_unused=False, + ): fluid.set_flags({'FLAGS_sort_sum_gradient': self.sort_sum_gradient}) - return fluid.dygraph.grad(outputs=outputs, - inputs=inputs, - grad_outputs=grad_outputs, - no_grad_vars=no_grad_vars, - retain_graph=retain_graph, - create_graph=create_graph, - allow_unused=allow_unused) + return fluid.dygraph.grad( + outputs=outputs, + inputs=inputs, + grad_outputs=grad_outputs, + no_grad_vars=no_grad_vars, + retain_graph=retain_graph, + create_graph=create_graph, + allow_unused=allow_unused, + ) @dygraph_guard def func_exception(self): @@ -247,12 +249,16 @@ class TestDygraphDoubleGrad(TestCase): self.grad([random_var(shape)], [1]) with self.assertRaises(AssertionError): - self.grad([random_var(shape), random_var(shape)], - [random_var(shape)], [random_var(shape)]) + self.grad( + [random_var(shape), random_var(shape)], + [random_var(shape)], + [random_var(shape)], + ) with self.assertRaises(AssertionError): - self.grad([random_var(shape)], [random_var(shape)], - no_grad_vars=[1]) + self.grad( + [random_var(shape)], [random_var(shape)], no_grad_vars=[1] + ) with self.assertRaises(AssertionError): self.grad([random_var(shape)], [random_var(shape)], no_grad_vars=1) @@ -269,31 +275,33 @@ class TestDygraphDoubleGrad(TestCase): y = x + 1 for create_graph in [False, True]: - dx, = self.grad([x], [x], - create_graph=create_graph, - retain_graph=True) + (dx,) = self.grad( + [x], [x], create_graph=create_graph, retain_graph=True + ) self.assertEqual(dx.shape, x.shape) self.assertTrue(np.all(dx.numpy() == 1)) self.assertNotEqual(dx.stop_gradient, create_graph) - dx_mul_2, = self.grad([y, x], [x], - create_graph=create_graph, - retain_graph=True) + (dx_mul_2,) = self.grad( + [y, x], [x], create_graph=create_graph, retain_graph=True + ) self.assertEqual(dx_mul_2.shape, x.shape) self.assertTrue(np.all(dx_mul_2.numpy() == 2)) self.assertNotEqual(dx_mul_2.stop_gradient, create_graph) - none_grad, = self.grad([x], [y], - create_graph=create_graph, - allow_unused=True) + (none_grad,) = self.grad( + [x], [y], create_graph=create_graph, allow_unused=True + ) self.assertTrue(none_grad is None) - grad_with_none_and_not_none, = self.grad([x, y], [y], - create_graph=create_graph) + (grad_with_none_and_not_none,) = self.grad( + [x, y], [y], create_graph=create_graph + ) self.assertTrue(grad_with_none_and_not_none.shape, x.shape) self.assertTrue(np.all(grad_with_none_and_not_none.numpy() == 1)) - self.assertNotEqual(grad_with_none_and_not_none.stop_gradient, - create_graph) + self.assertNotEqual( + grad_with_none_and_not_none.stop_gradient, create_graph + ) def test_simple_example(self): with _test_eager_guard(): @@ -315,15 +323,20 @@ class TestDygraphDoubleGrad(TestCase): w_mean = fluid.layers.reduce_mean(w) del y1, z, w - dx_actual, = self.grad([w_mean], [x], - create_graph=True, - no_grad_vars=[y2]) + (dx_actual,) = self.grad( + [w_mean], [x], create_graph=True, no_grad_vars=[y2] + ) self.assertFalse(y2.stop_gradient) self.assertFalse(dx_actual.stop_gradient) - dx_expected = (1.0 / float(numel) * (np.maximum(x_np, 0) + y2.numpy()) * - (x_np > 0) * 2).astype('float32') + dx_expected = ( + 1.0 + / float(numel) + * (np.maximum(x_np, 0) + y2.numpy()) + * (x_np > 0) + * 2 + ).astype('float32') np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05) @@ -340,11 +353,12 @@ class TestDygraphDoubleGrad(TestCase): half_numel = int(numel / 2) half_x_positive = np.random.uniform(low=1, high=2, size=[half_numel]) - half_x_negative = np.random.uniform(low=-2, - high=-1, - size=[numel - half_numel]) - x_np = np.array(list(half_x_positive) + - list(half_x_negative)).astype('float32') + half_x_negative = np.random.uniform( + low=-2, high=-1, size=[numel - half_numel] + ) + x_np = np.array(list(half_x_positive) + list(half_x_negative)).astype( + 'float32' + ) np.random.shuffle(x_np) x = fluid.dygraph.to_variable(x_np) @@ -359,8 +373,9 @@ class TestDygraphDoubleGrad(TestCase): relu_x_np = np.maximum(x_np, alpha * x_np).astype('float32') relu_x_grad_np = ((x_np > 0) + (x_np < 0) * alpha).astype('float32') dy_expected = (relu_x_np * relu_x_grad_np * 2).astype('float32') - dz_expected = (np.power(relu_x_np, 3) * relu_x_grad_np * - 4).astype('float32') + dz_expected = (np.power(relu_x_np, 3) * relu_x_grad_np * 4).astype( + 'float32' + ) random_grad_y = random_var(y.shape, low=1, high=2) random_grad_z = random_var(z.shape, low=1, high=2) @@ -373,31 +388,39 @@ class TestDygraphDoubleGrad(TestCase): for grad_y in [random_grad_y]: for grad_z in [random_grad_z]: for create_graph in [False, True]: - dx_actual, = self.grad(outputs=[y, z], - inputs=[x], - grad_outputs=[grad_y, grad_z], - create_graph=create_graph, - retain_graph=True) + (dx_actual,) = self.grad( + outputs=[y, z], + inputs=[x], + grad_outputs=[grad_y, grad_z], + create_graph=create_graph, + retain_graph=True, + ) - grad_y_np = ones_grad_y if grad_y is None else grad_y.numpy( + grad_y_np = ( + ones_grad_y if grad_y is None else grad_y.numpy() ) - grad_z_np = ones_grad_z if grad_z is None else grad_z.numpy( + grad_z_np = ( + ones_grad_z if grad_z is None else grad_z.numpy() ) - dx_expected = dy_expected * grad_y_np + dz_expected * grad_z_np - np.testing.assert_allclose(dx_actual.numpy(), - dx_expected, - rtol=1e-05) + dx_expected = ( + dy_expected * grad_y_np + dz_expected * grad_z_np + ) + np.testing.assert_allclose( + dx_actual.numpy(), dx_expected, rtol=1e-05 + ) if grad_y is not None: self.assertTrue(grad_y.stop_gradient) - np.testing.assert_array_equal(grad_y.numpy(), - original_random_grad_y) + np.testing.assert_array_equal( + grad_y.numpy(), original_random_grad_y + ) if grad_z is not None: self.assertTrue(grad_z.stop_gradient) - np.testing.assert_array_equal(grad_z.numpy(), - original_random_grad_z) + np.testing.assert_array_equal( + grad_z.numpy(), original_random_grad_z + ) def test_none_one_initial_gradient(self): with _test_eager_guard(): @@ -418,35 +441,39 @@ class TestDygraphDoubleGrad(TestCase): w_mean = fluid.layers.reduce_mean(w) del y, z, w - dx_actual, = self.grad([w_mean], [x], create_graph=True) + (dx_actual,) = self.grad([w_mean], [x], create_graph=True) del w_mean self.assertFalse(dx_actual.stop_gradient) # Theoritical result based on math calculation - dx_expected = (1.0 / float(numel) * (np.maximum(x_np, 0) + 1) * - (x_np > 0) * 2).astype('float32') + dx_expected = ( + 1.0 / float(numel) * (np.maximum(x_np, 0) + 1) * (x_np > 0) * 2 + ).astype('float32') np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05) loss = fluid.layers.reduce_mean(dx_actual * dx_actual + x * x) loss.backward(retain_graph=True) x_grad_actual = x.gradient() - x_grad_expected = (2.0 / float(numel) * - (x_np + dx_expected * - (x_np > 0) * 2 / float(numel))).astype('float32') + x_grad_expected = ( + 2.0 + / float(numel) + * (x_np + dx_expected * (x_np > 0) * 2 / float(numel)) + ).astype('float32') np.testing.assert_allclose(x_grad_actual, x_grad_expected, rtol=1e-05) for i in range(5): loss.backward(retain_graph=True) x_grad_actual = x.gradient() - x_grad_expected = ( - i + 2) * (2.0 / float(numel) * - (x_np + dx_expected * - (x_np > 0) * 2 / float(numel))).astype('float32') - np.testing.assert_allclose(x_grad_actual, - x_grad_expected, - rtol=1e-05) + x_grad_expected = (i + 2) * ( + 2.0 + / float(numel) + * (x_np + dx_expected * (x_np > 0) * 2 / float(numel)) + ).astype('float32') + np.testing.assert_allclose( + x_grad_actual, x_grad_expected, rtol=1e-05 + ) def test_example_with_gradient_accumulation_and_create_graph(self): with _test_eager_guard(): @@ -468,25 +495,35 @@ class TestDygraphDoubleGrad(TestCase): w_mean = fluid.layers.reduce_mean(w) del y1, z, w - dx_actual, = self.grad([w_mean], [x], - retain_graph=True, - create_graph=True, - no_grad_vars=[y2]) + (dx_actual,) = self.grad( + [w_mean], + [x], + retain_graph=True, + create_graph=True, + no_grad_vars=[y2], + ) self.assertFalse(y2.stop_gradient) self.assertFalse(dx_actual.stop_gradient) - dx_expected = (1.0 / float(numel) * (np.maximum(x_np, 0) + y2.numpy()) * - (x_np > 0) * 2).astype('float32') + dx_expected = ( + 1.0 + / float(numel) + * (np.maximum(x_np, 0) + y2.numpy()) + * (x_np > 0) + * 2 + ).astype('float32') np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05) loss = fluid.layers.reduce_mean(dx_actual * dx_actual + x * x) loss.backward() x_grad_actual = x.gradient() - x_grad_expected = (2.0 / float(numel) * - (x_np + dx_expected * - (x_np > 0) * 4 / float(numel))).astype('float32') + x_grad_expected = ( + 2.0 + / float(numel) + * (x_np + dx_expected * (x_np > 0) * 4 / float(numel)) + ).astype('float32') np.testing.assert_allclose(x_grad_actual, x_grad_expected, rtol=1e-05) def test_example_with_gradient_accumulation_and_no_grad_vars(self): @@ -508,13 +545,14 @@ class TestDygraphDoubleGrad(TestCase): w_mean = fluid.layers.reduce_mean(w) del y, z, w - dx_actual, = self.grad([w_mean], [x], create_graph=False) + (dx_actual,) = self.grad([w_mean], [x], create_graph=False) del w_mean self.assertTrue(dx_actual.stop_gradient) - dx_expected = (1.0 / float(numel) * (np.maximum(x_np, 0) + 1) * - (x_np > 0) * 2).astype('float32') + dx_expected = ( + 1.0 / float(numel) * (np.maximum(x_np, 0) + 1) * (x_np > 0) * 2 + ).astype('float32') np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05) @@ -532,17 +570,18 @@ class TestDygraphDoubleGrad(TestCase): class TestDygraphDoubleGradSortGradient(TestDygraphDoubleGrad): - def setUp(self): self.sort_sum_gradient = True self.shape = [5, 10] class TestDygraphDoubleGradVisitedUniq(TestCase): - def func_compare(self): - value = np.random.uniform(-0.5, 0.5, 100).reshape(10, 2, - 5).astype("float32") + value = ( + np.random.uniform(-0.5, 0.5, 100) + .reshape(10, 2, 5) + .astype("float32") + ) def model_f(input): linear = fluid.dygraph.Linear(5, 3, bias_attr=False) @@ -563,11 +602,13 @@ class TestDygraphDoubleGradVisitedUniq(TestCase): out = model_f(a) - dx = fluid.dygraph.grad(outputs=[out], - inputs=[a], - create_graph=False, - only_inputs=True, - allow_unused=False) + dx = fluid.dygraph.grad( + outputs=[out], + inputs=[a], + create_graph=False, + only_inputs=True, + allow_unused=False, + ) grad_1 = dx[0].numpy() @@ -591,17 +632,15 @@ class TestDygraphDoubleGradVisitedUniq(TestCase): class TestRaiseNoDoubleGradOp(TestCase): - def raise_no_grad_op(self): with fluid.dygraph.guard(): x = fluid.layers.ones(shape=[2, 3, 2, 2], dtype='float32') x.stop_gradient = False y = paddle.fluid.layers.group_norm(x, groups=1) - dx = fluid.dygraph.grad(outputs=[y], - inputs=[x], - create_graph=True, - retain_graph=True)[0] + dx = fluid.dygraph.grad( + outputs=[y], inputs=[x], create_graph=True, retain_graph=True + )[0] loss = fluid.layers.reduce_mean(dx) loss.backward() @@ -611,7 +650,6 @@ class TestRaiseNoDoubleGradOp(TestCase): class TestDoubleGradResNet(TestCase): - def setUp(self): paddle.seed(123) paddle.framework.random._manual_program_seed(123) @@ -626,7 +664,8 @@ class TestDoubleGradResNet(TestCase): egr_out = model(egr_data) egr_preds = paddle.argmax(egr_out, axis=1) egr_label_onehot = paddle.nn.functional.one_hot( - paddle.to_tensor(egr_preds), num_classes=egr_out.shape[1]) + paddle.to_tensor(egr_preds), num_classes=egr_out.shape[1] + ) egr_target = paddle.sum(egr_out * egr_label_onehot, axis=1) egr_g = paddle.grad(outputs=egr_target, inputs=egr_out)[0] @@ -638,8 +677,9 @@ class TestDoubleGradResNet(TestCase): data.stop_gradient = False out = model(data) preds = paddle.argmax(out, axis=1) - label_onehot = paddle.nn.functional.one_hot(paddle.to_tensor(preds), - num_classes=out.shape[1]) + label_onehot = paddle.nn.functional.one_hot( + paddle.to_tensor(preds), num_classes=out.shape[1] + ) target = paddle.sum(out * label_onehot, axis=1) g = paddle.grad(outputs=target, inputs=out)[0] @@ -658,7 +698,8 @@ class TestDoubleGradResNet(TestCase): egr_out = model(egr_data) egr_preds = paddle.argmax(egr_out, axis=1) egr_label_onehot = paddle.nn.functional.one_hot( - paddle.to_tensor(egr_preds), num_classes=egr_out.shape[1]) + paddle.to_tensor(egr_preds), num_classes=egr_out.shape[1] + ) egr_target = paddle.sum(egr_out * egr_label_onehot, axis=1) egr_g = paddle.grad(outputs=egr_target, inputs=egr_out)[0] @@ -670,8 +711,9 @@ class TestDoubleGradResNet(TestCase): data.stop_gradient = False out = model(data) preds = paddle.argmax(out, axis=1) - label_onehot = paddle.nn.functional.one_hot(paddle.to_tensor(preds), - num_classes=out.shape[1]) + label_onehot = paddle.nn.functional.one_hot( + paddle.to_tensor(preds), num_classes=out.shape[1] + ) target = paddle.sum(out * label_onehot, axis=1) g = paddle.grad(outputs=target, inputs=out)[0] @@ -683,24 +725,23 @@ class TestDoubleGradResNet(TestCase): class TestDoubleGradBasics(TestCase): - def test_matmul(self): input_numpy = np.ones([3, 3]) * 2 with _test_eager_guard(): - x = paddle.to_tensor(input_numpy, - stop_gradient=False, - dtype='float32') - y = paddle.to_tensor(input_numpy, - stop_gradient=False, - dtype='float32') - grad_out = paddle.to_tensor(np.ones([3, 3]), - stop_gradient=False, - dtype='float32') + x = paddle.to_tensor( + input_numpy, stop_gradient=False, dtype='float32' + ) + y = paddle.to_tensor( + input_numpy, stop_gradient=False, dtype='float32' + ) + grad_out = paddle.to_tensor( + np.ones([3, 3]), stop_gradient=False, dtype='float32' + ) out = paddle.matmul(x, y, False, False) - new_x_g, new_y_g = paddle.grad([out], [x, y], [grad_out], - retain_graph=True, - create_graph=True) + new_x_g, new_y_g = paddle.grad( + [out], [x, y], [grad_out], retain_graph=True, create_graph=True + ) new_x_g.backward() out_ref = np.ones([3, 3]) * 12.0 @@ -718,8 +759,9 @@ class TestDoubleGradBasics(TestCase): np.testing.assert_array_equal(y.grad.numpy(), y_grad_ref) grad_out_grad_ref = np.ones([3, 3]) * 6.0 - np.testing.assert_array_equal(grad_out.grad.numpy(), - grad_out_grad_ref) + np.testing.assert_array_equal( + grad_out.grad.numpy(), grad_out_grad_ref + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_imperative_framework.py b/python/paddle/fluid/tests/unittests/test_imperative_framework.py index 3a1e7c9cd127094011438936afbe5dc4aab27af2..dc23f8b0a97b0bc70d19376a3a3357ed7eced5fe 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_framework.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_framework.py @@ -20,23 +20,28 @@ from paddle.fluid.framework import _test_eager_guard class MLP(fluid.Layer): - def __init__(self, input_size): super(MLP, self).__init__() self._linear1 = fluid.dygraph.Linear( input_size, 3, - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.1)), - bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.1))) + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.1) + ), + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.1) + ), + ) self._linear2 = fluid.dygraph.Linear( 3, 4, - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.1)), - bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.1))) + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.1) + ), + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.1) + ), + ) def forward(self, inputs): x = self._linear1(inputs) @@ -46,19 +51,18 @@ class MLP(fluid.Layer): class TestDygraphFramework(unittest.TestCase): - def func_test_dygraph_backward(self): with new_program_scope(): mlp = MLP(input_size=2) - var_inp = fluid.layers.data("input", - shape=[2, 2], - dtype="float32", - append_batch_size=False) + var_inp = fluid.layers.data( + "input", shape=[2, 2], dtype="float32", append_batch_size=False + ) out = mlp(var_inp) try: out.backward() raise AssertionError( - "backward should not be usable in static graph mode") + "backward should not be usable in static graph mode" + ) except AssertionError as e: self.assertTrue((e is not None)) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_gan.py b/python/paddle/fluid/tests/unittests/test_imperative_gan.py index 9edecc89989562a23d859677b0ab8efb754b0b6a..8cfe9befb629dfb0052aaa9d80c9e3652c1dc403 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_gan.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_gan.py @@ -26,7 +26,6 @@ from paddle.fluid.framework import _test_eager_guard class Discriminator(fluid.Layer): - def __init__(self): super(Discriminator, self).__init__() self._fc1 = Linear(1, 32, act='elu') @@ -39,7 +38,6 @@ class Discriminator(fluid.Layer): class Generator(fluid.Layer): - def __init__(self): super(Generator, self).__init__() self._fc1 = Linear(2, 64, act='elu') @@ -54,7 +52,6 @@ class Generator(fluid.Layer): class TestDygraphGAN(unittest.TestCase): - def func_test_gan_float32(self): seed = 90 paddle.seed(1) @@ -64,34 +61,38 @@ class TestDygraphGAN(unittest.TestCase): generate_p = fluid.Program() scope = fluid.core.Scope() - with new_program_scope(main=discriminate_p, - startup=startup, - scope=scope): + with new_program_scope( + main=discriminate_p, startup=startup, scope=scope + ): discriminator = Discriminator() generator = Generator() - img = fluid.layers.data(name="img", - shape=[2, 1], - append_batch_size=False) - noise = fluid.layers.data(name="noise", - shape=[2, 2], - append_batch_size=False) + img = fluid.layers.data( + name="img", shape=[2, 1], append_batch_size=False + ) + noise = fluid.layers.data( + name="noise", shape=[2, 2], append_batch_size=False + ) d_real = discriminator(img) d_loss_real = fluid.layers.reduce_mean( fluid.layers.sigmoid_cross_entropy_with_logits( x=d_real, - label=fluid.layers.fill_constant(shape=[2, 1], - dtype='float32', - value=1.0))) + label=fluid.layers.fill_constant( + shape=[2, 1], dtype='float32', value=1.0 + ), + ) + ) d_fake = discriminator(generator(noise)) d_loss_fake = fluid.layers.reduce_mean( fluid.layers.sigmoid_cross_entropy_with_logits( x=d_fake, - label=fluid.layers.fill_constant(shape=[2, 1], - dtype='float32', - value=0.0))) + label=fluid.layers.fill_constant( + shape=[2, 1], dtype='float32', value=0.0 + ), + ) + ) d_loss = d_loss_real + d_loss_fake @@ -102,42 +103,47 @@ class TestDygraphGAN(unittest.TestCase): discriminator = Discriminator() generator = Generator() - noise = fluid.layers.data(name="noise", - shape=[2, 2], - append_batch_size=False) + noise = fluid.layers.data( + name="noise", shape=[2, 2], append_batch_size=False + ) d_fake = discriminator(generator(noise)) g_loss = fluid.layers.reduce_mean( fluid.layers.sigmoid_cross_entropy_with_logits( x=d_fake, - label=fluid.layers.fill_constant(shape=[2, 1], - dtype='float32', - value=1.0))) + label=fluid.layers.fill_constant( + shape=[2, 1], dtype='float32', value=1.0 + ), + ) + ) sgd = SGDOptimizer(learning_rate=1e-3) sgd.minimize(g_loss) - exe = fluid.Executor(fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) + exe = fluid.Executor( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) static_params = dict() with fluid.scope_guard(scope): img = np.ones([2, 1], np.float32) noise = np.ones([2, 2], np.float32) exe.run(startup) - static_d_loss = exe.run(discriminate_p, - feed={ - 'img': img, - 'noise': noise - }, - fetch_list=[d_loss])[0] - static_g_loss = exe.run(generate_p, - feed={'noise': noise}, - fetch_list=[g_loss])[0] + static_d_loss = exe.run( + discriminate_p, + feed={'img': img, 'noise': noise}, + fetch_list=[d_loss], + )[0] + static_g_loss = exe.run( + generate_p, feed={'noise': noise}, fetch_list=[g_loss] + )[0] # generate_p contains all parameters needed. for param in generate_p.global_block().all_parameters(): static_params[param.name] = np.array( - scope.find_var(param.name).get_tensor()) + scope.find_var(param.name).get_tensor() + ) dy_params = dict() with fluid.dygraph.guard(): @@ -146,20 +152,28 @@ class TestDygraphGAN(unittest.TestCase): discriminator = Discriminator() generator = Generator() - sgd = SGDOptimizer(learning_rate=1e-3, - parameter_list=(discriminator.parameters() + - generator.parameters())) + sgd = SGDOptimizer( + learning_rate=1e-3, + parameter_list=( + discriminator.parameters() + generator.parameters() + ), + ) d_real = discriminator(to_variable(np.ones([2, 1], np.float32))) d_loss_real = fluid.layers.reduce_mean( fluid.layers.sigmoid_cross_entropy_with_logits( - x=d_real, label=to_variable(np.ones([2, 1], np.float32)))) + x=d_real, label=to_variable(np.ones([2, 1], np.float32)) + ) + ) d_fake = discriminator( - generator(to_variable(np.ones([2, 2], np.float32)))) + generator(to_variable(np.ones([2, 2], np.float32))) + ) d_loss_fake = fluid.layers.reduce_mean( fluid.layers.sigmoid_cross_entropy_with_logits( - x=d_fake, label=to_variable(np.zeros([2, 1], np.float32)))) + x=d_fake, label=to_variable(np.zeros([2, 1], np.float32)) + ) + ) d_loss = d_loss_real + d_loss_fake d_loss.backward() @@ -168,10 +182,13 @@ class TestDygraphGAN(unittest.TestCase): generator.clear_gradients() d_fake = discriminator( - generator(to_variable(np.ones([2, 2], np.float32)))) + generator(to_variable(np.ones([2, 2], np.float32))) + ) g_loss = fluid.layers.reduce_mean( fluid.layers.sigmoid_cross_entropy_with_logits( - x=d_fake, label=to_variable(np.ones([2, 1], np.float32)))) + x=d_fake, label=to_variable(np.ones([2, 1], np.float32)) + ) + ) g_loss.backward() sgd.minimize(g_loss) for p in discriminator.parameters(): @@ -189,20 +206,28 @@ class TestDygraphGAN(unittest.TestCase): paddle.framework.random._manual_program_seed(1) discriminator2 = Discriminator() generator2 = Generator() - sgd2 = SGDOptimizer(learning_rate=1e-3, - parameter_list=(discriminator2.parameters() + - generator2.parameters())) + sgd2 = SGDOptimizer( + learning_rate=1e-3, + parameter_list=( + discriminator2.parameters() + generator2.parameters() + ), + ) d_real2 = discriminator2(to_variable(np.ones([2, 1], np.float32))) d_loss_real2 = fluid.layers.reduce_mean( fluid.layers.sigmoid_cross_entropy_with_logits( - x=d_real2, label=to_variable(np.ones([2, 1], np.float32)))) + x=d_real2, label=to_variable(np.ones([2, 1], np.float32)) + ) + ) d_fake2 = discriminator2( - generator2(to_variable(np.ones([2, 2], np.float32)))) + generator2(to_variable(np.ones([2, 2], np.float32))) + ) d_loss_fake2 = fluid.layers.reduce_mean( fluid.layers.sigmoid_cross_entropy_with_logits( - x=d_fake2, label=to_variable(np.zeros([2, 1], np.float32)))) + x=d_fake2, label=to_variable(np.zeros([2, 1], np.float32)) + ) + ) d_loss2 = d_loss_real2 + d_loss_fake2 d_loss2.backward() @@ -211,10 +236,13 @@ class TestDygraphGAN(unittest.TestCase): generator2.clear_gradients() d_fake2 = discriminator2( - generator2(to_variable(np.ones([2, 2], np.float32)))) + generator2(to_variable(np.ones([2, 2], np.float32))) + ) g_loss2 = fluid.layers.reduce_mean( fluid.layers.sigmoid_cross_entropy_with_logits( - x=d_fake2, label=to_variable(np.ones([2, 1], np.float32)))) + x=d_fake2, label=to_variable(np.ones([2, 1], np.float32)) + ) + ) g_loss2.backward() sgd2.minimize(g_loss2) for p in discriminator2.parameters(): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_gnn.py b/python/paddle/fluid/tests/unittests/test_imperative_gnn.py index f001e7c4e0c6c064b0cceed851d08c9d2ed1a271..1d2e61b0863ac752a2a0c9621dcc1649d3e86c3a 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_gnn.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_gnn.py @@ -30,7 +30,6 @@ def gen_data(): class GraphConv(fluid.Layer): - def __init__(self, name_scope, in_features, out_features): super(GraphConv, self).__init__(name_scope) @@ -39,10 +38,11 @@ class GraphConv(fluid.Layer): self.weight = self.create_parameter( attr=None, dtype='float32', - shape=[self._in_features, self._out_features]) - self.bias = self.create_parameter(attr=None, - dtype='float32', - shape=[self._out_features]) + shape=[self._in_features, self._out_features], + ) + self.bias = self.create_parameter( + attr=None, dtype='float32', shape=[self._out_features] + ) def forward(self, features, adj): support = fluid.layers.matmul(features, self.weight) @@ -51,7 +51,6 @@ class GraphConv(fluid.Layer): class GCN(fluid.Layer): - def __init__(self, name_scope, num_hidden): super(GCN, self).__init__(name_scope) self.gc = GraphConv(self.full_name(), num_hidden, 32) @@ -63,7 +62,6 @@ class GCN(fluid.Layer): class TestDygraphGNN(unittest.TestCase): - def func_gnn_float32(self): paddle.seed(90) paddle.framework.random._manual_program_seed(90) @@ -72,19 +70,25 @@ class TestDygraphGNN(unittest.TestCase): scope = fluid.core.Scope() with new_program_scope(main=main, startup=startup, scope=scope): - features = fluid.layers.data(name='features', - shape=[1, 100, 50], - dtype='float32', - append_batch_size=False) + features = fluid.layers.data( + name='features', + shape=[1, 100, 50], + dtype='float32', + append_batch_size=False, + ) # Use selected rows when it's supported. - adj = fluid.layers.data(name='adj', - shape=[1, 100, 100], - dtype='float32', - append_batch_size=False) - labels = fluid.layers.data(name='labels', - shape=[100, 1], - dtype='int64', - append_batch_size=False) + adj = fluid.layers.data( + name='adj', + shape=[1, 100, 100], + dtype='float32', + append_batch_size=False, + ) + labels = fluid.layers.data( + name='labels', + shape=[100, 1], + dtype='int64', + append_batch_size=False, + ) model = GCN('test_gcn', 50) logits = model(features, adj) @@ -96,21 +100,24 @@ class TestDygraphGNN(unittest.TestCase): adam = AdamOptimizer(learning_rate=1e-3) adam.minimize(loss) - exe = fluid.Executor(fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) + exe = fluid.Executor( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) exe.run(startup) - static_loss = exe.run(feed={ - 'features': - np.ones([1, 100, 50], dtype=np.float32), - 'adj': - np.ones([1, 100, 100], dtype=np.float32), - 'labels': - np.ones([100, 1], dtype=np.int64) - }, - fetch_list=[loss])[0] + static_loss = exe.run( + feed={ + 'features': np.ones([1, 100, 50], dtype=np.float32), + 'adj': np.ones([1, 100, 100], dtype=np.float32), + 'labels': np.ones([100, 1], dtype=np.int64), + }, + fetch_list=[loss], + )[0] static_weight = np.array( - scope.find_var(model.gc.weight.name).get_tensor()) + scope.find_var(model.gc.weight.name).get_tensor() + ) with fluid.dygraph.guard(): paddle.seed(90) @@ -127,11 +134,13 @@ class TestDygraphGNN(unittest.TestCase): # In other example, it's nll with log_softmax. However, paddle's # log_loss only supports binary classification now. loss = fluid.layers.softmax_with_cross_entropy( - logits, to_variable(labels)) + logits, to_variable(labels) + ) loss = fluid.layers.reduce_sum(loss) loss.backward() - adam = AdamOptimizer(learning_rate=1e-3, - parameter_list=model.parameters()) + adam = AdamOptimizer( + learning_rate=1e-3, parameter_list=model.parameters() + ) adam.minimize(loss) model.clear_gradients() @@ -153,24 +162,26 @@ class TestDygraphGNN(unittest.TestCase): # In other example, it's nll with log_softmax. However, paddle's # log_loss only supports binary classification now. loss2 = fluid.layers.softmax_with_cross_entropy( - logits2, to_variable(labels2)) + logits2, to_variable(labels2) + ) loss2 = fluid.layers.reduce_sum(loss2) loss2.backward() - adam2 = AdamOptimizer(learning_rate=1e-3, - parameter_list=model2.parameters()) + adam2 = AdamOptimizer( + learning_rate=1e-3, parameter_list=model2.parameters() + ) adam2.minimize(loss2) model2.clear_gradients() loss2_value = loss2.numpy() model2_gc_weight_value = model2.gc.weight.numpy() self.assertEqual(static_loss, loss_value) - np.testing.assert_allclose(static_weight, - model_gc_weight_value, - rtol=1e-05) + np.testing.assert_allclose( + static_weight, model_gc_weight_value, rtol=1e-05 + ) self.assertEqual(static_loss, loss2_value) - np.testing.assert_allclose(static_weight, - model2_gc_weight_value, - rtol=1e-05) + np.testing.assert_allclose( + static_weight, model2_gc_weight_value, rtol=1e-05 + ) sys.stderr.write('%s %s\n' % (static_loss, loss_value)) def test_gnn_float32(self): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_group.py b/python/paddle/fluid/tests/unittests/test_imperative_group.py index f67592312c84a01caeea0dbd5c925c0828775abb..672dee430f8896d32211d2ecc9f709553679cbc3 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_group.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_group.py @@ -17,11 +17,14 @@ import unittest import paddle import paddle.fluid.core as core -from paddle.fluid.framework import _test_eager_guard, _in_legacy_dygraph, in_dygraph_mode +from paddle.fluid.framework import ( + _test_eager_guard, + _in_legacy_dygraph, + in_dygraph_mode, +) class TestDataParallelGroup(unittest.TestCase): - def create_varbase(self, dtype, shape): return paddle.rand(shape=shape, dtype=dtype) @@ -38,8 +41,9 @@ class TestDataParallelGroup(unittest.TestCase): var_list.append(self.create_varbase("float32", [2, 100])) var_list.append(self.create_varbase("float32", [2, 50])) var_list.append(self.create_varbase("float32", [2, 25])) - res = self.assign_group_by_size(var_list, [False, False, False, False], - [400]) + res = self.assign_group_by_size( + var_list, [False, False, False, False], [400] + ) self.assertEqual([[0], [1], [2], [3]], res) def test_construct_group1(self): @@ -52,7 +56,8 @@ class TestDataParallelGroup(unittest.TestCase): var_list.append(self.create_varbase("float32", [1, 50])) var_list.append(self.create_varbase("float64", [1, 25])) res = self.assign_group_by_size( - var_list, [False, False, False, False, False, False], [400]) + var_list, [False, False, False, False, False, False], [400] + ) self.assertEqual([[0, 2], [1, 3], [4], [5]], res) def test_construct_group2(self): @@ -62,8 +67,9 @@ class TestDataParallelGroup(unittest.TestCase): var_list.append(self.create_varbase("float32", [2, 50])) var_list.append(self.create_varbase("float32", [2, 50])) var_list.append(self.create_varbase("float32", [2, 50])) - res = self.assign_group_by_size(var_list, [False, False, False, False], - [400, 800]) + res = self.assign_group_by_size( + var_list, [False, False, False, False], [400, 800] + ) self.assertEqual([[0], [1, 2], [3]], res) def test_construct_group3(self): @@ -76,7 +82,8 @@ class TestDataParallelGroup(unittest.TestCase): var_list.append(self.create_varbase("float32", [1, 50])) var_list.append(self.create_varbase("float64", [1, 25])) res = self.assign_group_by_size( - var_list, [False, False, False, False, False, False], [200, 400]) + var_list, [False, False, False, False, False, False], [200, 400] + ) self.assertEqual([[0], [1], [2, 4], [3, 5]], res) def test_construct_group4(self): @@ -89,7 +96,8 @@ class TestDataParallelGroup(unittest.TestCase): var_list.append(self.create_varbase("float32", [1, 50])) var_list.append(self.create_varbase("float64", [1, 25])) res = self.assign_group_by_size( - var_list, [False, False, False, False, False, False], [0]) + var_list, [False, False, False, False, False, False], [0] + ) self.assertEqual([[0], [1], [2], [3], [4], [5]], res) def test_construct_group5(self): @@ -102,23 +110,27 @@ class TestDataParallelGroup(unittest.TestCase): var_list.append(self.create_varbase("float32", [1, 50])) var_list.append(self.create_varbase("float64", [1, 25])) res = self.assign_group_by_size( - var_list, [False, False, False, False, False, False], [10000]) + var_list, [False, False, False, False, False, False], [10000] + ) self.assertEqual([[0, 2, 4], [1, 3, 5]], res) def test_construct_group6(self): # multi dtype & limit capability & multi tensor type var_list = [] - var_list.append(self.create_varbase( - "float32", - [1, 50], - )) + var_list.append( + self.create_varbase( + "float32", + [1, 50], + ) + ) var_list.append(self.create_varbase("float64", [1, 25])) var_list.append(self.create_varbase("float32", [1, 50])) var_list.append(self.create_varbase("float64", [1, 25])) var_list.append(self.create_varbase("float32", [1, 50])) var_list.append(self.create_varbase("float64", [1, 25])) res = self.assign_group_by_size( - var_list, [True, False, False, False, False, True], [400]) + var_list, [True, False, False, False, False, True], [400] + ) self.assertEqual([[0], [1, 3], [2, 4], [5]], res) def test_construct_group7(self): @@ -131,7 +143,8 @@ class TestDataParallelGroup(unittest.TestCase): var_list.append(self.create_varbase("float32", [1, 50])) var_list.append(self.create_varbase("float64", [1, 25])) res = self.assign_group_by_size( - var_list, [True, False, False, False, False, True], [200, 400]) + var_list, [True, False, False, False, False, True], [200, 400] + ) self.assertEqual([[0], [1], [2], [3], [4], [5]], res) def test_construct_group8(self): @@ -141,8 +154,9 @@ class TestDataParallelGroup(unittest.TestCase): var_list.append(self.create_varbase("float32", [2, 100])) var_list.append(self.create_varbase("float32", [2, 50])) var_list.append(self.create_varbase("float32", [2, 25])) - res = self.assign_group_by_size(var_list, [False, False, False, False], - [400], [3, 0, 1, 2]) + res = self.assign_group_by_size( + var_list, [False, False, False, False], [400], [3, 0, 1, 2] + ) self.assertEqual([[3, 0], [1], [2]], res) def test_construct_group9(self): @@ -152,8 +166,9 @@ class TestDataParallelGroup(unittest.TestCase): var_list.append(self.create_varbase("float32", [2, 25])) var_list.append(self.create_varbase("float32", [2, 25])) var_list.append(self.create_varbase("float32", [2, 1000])) - res = self.assign_group_by_size(var_list, [False, False, False, True], - [300], [1, 0, 2, 3]) + res = self.assign_group_by_size( + var_list, [False, False, False, True], [300], [1, 0, 2, 3] + ) self.assertEqual([[1, 0], [3], [2]], res) def test_construct_group_in_legacy_mode(self): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_hook_for_layer.py b/python/paddle/fluid/tests/unittests/test_imperative_hook_for_layer.py index 1f60bf20e09c8c41735e9af39a3dc6ff859befc5..462f614defaf278488e314e4637742d7113ef052 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_hook_for_layer.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_hook_for_layer.py @@ -60,27 +60,37 @@ class Test_Forward_Hook(unittest.TestCase): fluid.default_main_program().random_seed = seed fluid.set_flags({'FLAGS_sort_sum_gradient': True}) - input_word = np.array( - [0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, - 8]).reshape(6, 3).astype('int64') + input_word = ( + np.array( + [0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8] + ) + .reshape(6, 3) + .astype('int64') + ) input_word1 = input_word * 2 input_word = input_word.reshape((-1, 3, 1)) input_word1 = input_word1.reshape((-1, 3, 1)) - y_data = np.array( - [1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, - 9]).reshape(6, 3).astype('int64') + y_data = ( + np.array( + [1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9] + ) + .reshape(6, 3) + .astype('int64') + ) y_data = y_data.reshape((-1, 1)) input = base.to_variable(input_word) input1 = base.to_variable(input_word1) y = base.to_variable(y_data) - simplenet = SimpleNet(hidden_size=20, - vocab_size=32, - num_steps=3, - init_scale=0.1, - is_sparse=False, - dtype="float32") + simplenet = SimpleNet( + hidden_size=20, + vocab_size=32, + num_steps=3, + init_scale=0.1, + is_sparse=False, + dtype="float32", + ) # origin, don't register any hook outs_origin = simplenet(input, y) @@ -88,29 +98,35 @@ class Test_Forward_Hook(unittest.TestCase): # register forward_pre_hook forward_pre_hook_handle1 = simplenet.register_forward_pre_hook( - forward_pre_hook1) + forward_pre_hook1 + ) outs_pre_hook = simplenet(input, y) - np.testing.assert_array_equal(outs_pre_hook.numpy(), - outs_origin1.numpy()) + np.testing.assert_array_equal( + outs_pre_hook.numpy(), outs_origin1.numpy() + ) # remove forward_pre_hook forward_pre_hook_handle1.remove() outs_pre_hook = simplenet(input, y) - np.testing.assert_array_equal(outs_pre_hook.numpy(), - outs_origin.numpy()) + np.testing.assert_array_equal( + outs_pre_hook.numpy(), outs_origin.numpy() + ) # register forward_posst_hook - forward_post_hook_handle1 = simplenet.register_forward_post_hook( - forward_post_hook1) + forward_post_hook_handle1 = ( + simplenet.register_forward_post_hook(forward_post_hook1) + ) outs_forward_hook = simplenet(input, y) - np.testing.assert_array_equal(outs_forward_hook.numpy(), - outs_origin.numpy() * 2) + np.testing.assert_array_equal( + outs_forward_hook.numpy(), outs_origin.numpy() * 2 + ) # remove forward_post_hook forward_post_hook_handle1.remove() outs_forward_hook = simplenet(input, y) - np.testing.assert_array_equal(outs_forward_hook.numpy(), - outs_origin.numpy()) + np.testing.assert_array_equal( + outs_forward_hook.numpy(), outs_origin.numpy() + ) # test forward_pre_hook and forward_post_hook that don't have return value def func_forward_hook(self): @@ -129,24 +145,34 @@ class Test_Forward_Hook(unittest.TestCase): global call_forward_post_hook global call_forward_pre_hook - input_word = np.array( - [0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, - 8]).reshape(6, 3).astype('int64') + input_word = ( + np.array( + [0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8] + ) + .reshape(6, 3) + .astype('int64') + ) input_word = input_word.reshape((-1, 3, 1)) - y_data = np.array( - [1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, - 9]).reshape(6, 3).astype('int64') + y_data = ( + np.array( + [1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9] + ) + .reshape(6, 3) + .astype('int64') + ) y_data = y_data.reshape((-1, 1)) input = base.to_variable(input_word) y = base.to_variable(y_data) - simplenet = SimpleNet(hidden_size=20, - vocab_size=32, - num_steps=3, - init_scale=0.1, - is_sparse=False, - dtype="float32") + simplenet = SimpleNet( + hidden_size=20, + vocab_size=32, + num_steps=3, + init_scale=0.1, + is_sparse=False, + dtype="float32", + ) # origin, don't register any hook outs_origin = simplenet(input, y) @@ -155,9 +181,11 @@ class Test_Forward_Hook(unittest.TestCase): # register forward_post_hook and forward_pre_hook forward_post_hook_handle = simplenet.register_forward_post_hook( - forward_post_hook) + forward_post_hook + ) forward_pre_hook_handle = simplenet.register_forward_pre_hook( - forward_pre_hook) + forward_pre_hook + ) outs_hook = simplenet(input, y) self.assertTrue(call_forward_post_hook) self.assertTrue(call_forward_pre_hook) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_layer_apply.py b/python/paddle/fluid/tests/unittests/test_imperative_layer_apply.py index 58e56fc6a6d8c195469be88ee5df0fe8a11409e0..40e24abeccdbd6b1f83e44dc0e33c623c00beac3 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_layer_apply.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_layer_apply.py @@ -23,21 +23,25 @@ from paddle.fluid.framework import _test_eager_guard class LeNetDygraph(fluid.dygraph.Layer): - def __init__(self, num_classes=10, classifier_activation='softmax'): super(LeNetDygraph, self).__init__() self.num_classes = num_classes - self.features = nn.Sequential(nn.Conv2D(1, 6, 3, stride=1, padding=1), - nn.ReLU(), - paddle.fluid.dygraph.Pool2D(2, 'max', 2), - nn.Conv2D(6, 16, 5, stride=1, padding=0), - nn.ReLU(), - paddle.fluid.dygraph.Pool2D(2, 'max', 2)) + self.features = nn.Sequential( + nn.Conv2D(1, 6, 3, stride=1, padding=1), + nn.ReLU(), + paddle.fluid.dygraph.Pool2D(2, 'max', 2), + nn.Conv2D(6, 16, 5, stride=1, padding=0), + nn.ReLU(), + paddle.fluid.dygraph.Pool2D(2, 'max', 2), + ) if num_classes > 0: - self.fc = nn.Sequential(nn.Linear(400, 120), nn.Linear(120, 84), - nn.Linear(84, 10), - nn.Softmax()) #Todo: accept any activation + self.fc = nn.Sequential( + nn.Linear(400, 120), + nn.Linear(120, 84), + nn.Linear(84, 10), + nn.Softmax(), + ) # Todo: accept any activation def forward(self, inputs): x = self.features(inputs) @@ -50,27 +54,26 @@ class LeNetDygraph(fluid.dygraph.Layer): def init_weights(layer): if type(layer) == nn.Linear: - new_weight = paddle.fluid.layers.fill_constant(layer.weight.shape, - layer.weight.dtype, - value=0.9) + new_weight = paddle.fluid.layers.fill_constant( + layer.weight.shape, layer.weight.dtype, value=0.9 + ) layer.weight.set_value(new_weight) - new_bias = paddle.fluid.layers.fill_constant(layer.bias.shape, - layer.bias.dtype, - value=-0.1) + new_bias = paddle.fluid.layers.fill_constant( + layer.bias.shape, layer.bias.dtype, value=-0.1 + ) layer.bias.set_value(new_bias) elif type(layer) == nn.Conv2D: - new_weight = paddle.fluid.layers.fill_constant(layer.weight.shape, - layer.weight.dtype, - value=0.7) + new_weight = paddle.fluid.layers.fill_constant( + layer.weight.shape, layer.weight.dtype, value=0.7 + ) layer.weight.set_value(new_weight) - new_bias = paddle.fluid.layers.fill_constant(layer.bias.shape, - layer.bias.dtype, - value=-0.2) + new_bias = paddle.fluid.layers.fill_constant( + layer.bias.shape, layer.bias.dtype, value=-0.2 + ) layer.bias.set_value(new_bias) class TestLayerApply(unittest.TestCase): - def func_apply_init_weight(self): with fluid.dygraph.guard(): net = LeNetDygraph() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_layer_children.py b/python/paddle/fluid/tests/unittests/test_imperative_layer_children.py index ded833bd9a8362cf5da475fd931a366c4008522c..4eeaf8da857848cfc8a3e6f13e8b31f8213d2238 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_layer_children.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_layer_children.py @@ -23,15 +23,16 @@ from paddle.fluid.framework import _test_eager_guard class LeNetDygraph(fluid.dygraph.Layer): - def __init__(self): super(LeNetDygraph, self).__init__() - self.features = nn.Sequential(nn.Conv2D(1, 6, 3, stride=1, padding=1), - nn.ReLU(), - paddle.fluid.dygraph.Pool2D(2, 'max', 2), - nn.Conv2D(6, 16, 5, stride=1, padding=0), - nn.ReLU(), - paddle.fluid.dygraph.Pool2D(2, 'max', 2)) + self.features = nn.Sequential( + nn.Conv2D(1, 6, 3, stride=1, padding=1), + nn.ReLU(), + paddle.fluid.dygraph.Pool2D(2, 'max', 2), + nn.Conv2D(6, 16, 5, stride=1, padding=0), + nn.ReLU(), + paddle.fluid.dygraph.Pool2D(2, 'max', 2), + ) def forward(self, inputs): x = self.features(inputs) @@ -39,7 +40,6 @@ class LeNetDygraph(fluid.dygraph.Layer): class TestLayerChildren(unittest.TestCase): - def func_apply_init_weight(self): with fluid.dygraph.guard(): net = LeNetDygraph() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_layer_trainable.py b/python/paddle/fluid/tests/unittests/test_imperative_layer_trainable.py index c359d99c81946a127182faf36ed23711df64817c..b0dcfd653fb750d7a1cdb4a9851d424bc92f6331 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_layer_trainable.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_layer_trainable.py @@ -21,7 +21,6 @@ from paddle.fluid.framework import _test_eager_guard class TestImperativeLayerTrainable(unittest.TestCase): - def func_set_trainable(self): with fluid.dygraph.guard(): label = np.random.uniform(-1, 1, [10, 10]).astype(np.float32) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_layers.py b/python/paddle/fluid/tests/unittests/test_imperative_layers.py index e2b9855af41c10a567d226bbb8b9e4ffcae262d1..e0cd3d4f88f64bc45917b4c244d4a3dcf3a5201e 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_layers.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_layers.py @@ -18,7 +18,6 @@ from paddle.fluid.framework import _test_eager_guard class TestLayerPrint(unittest.TestCase): - def func_test_layer_str(self): module = nn.ELU(0.2) self.assertEqual(str(module), 'ELU(alpha=0.2)') @@ -39,13 +38,14 @@ class TestLayerPrint(unittest.TestCase): self.assertEqual(str(module), 'Tanh(name=Tanh)') module = nn.Hardtanh(name="Hardtanh") - self.assertEqual(str(module), - 'Hardtanh(min=-1.0, max=1.0, name=Hardtanh)') + self.assertEqual( + str(module), 'Hardtanh(min=-1.0, max=1.0, name=Hardtanh)' + ) module = nn.PReLU(1, 0.25, name="PReLU", data_format="NCHW") self.assertEqual( str(module), - 'PReLU(num_parameters=1, data_format=NCHW, init=0.25, dtype=float32, name=PReLU)' + 'PReLU(num_parameters=1, data_format=NCHW, init=0.25, dtype=float32, name=PReLU)', ) module = nn.ReLU() @@ -57,7 +57,8 @@ class TestLayerPrint(unittest.TestCase): module = nn.SELU() self.assertEqual( str(module), - 'SELU(scale=1.0507009873554805, alpha=1.6732632423543772)') + 'SELU(scale=1.0507009873554805, alpha=1.6732632423543772)', + ) module = nn.LeakyReLU() self.assertEqual(str(module), 'LeakyReLU(negative_slope=0.01)') @@ -101,32 +102,35 @@ class TestLayerPrint(unittest.TestCase): module = nn.Linear(2, 4, name='linear') self.assertEqual( str(module), - 'Linear(in_features=2, out_features=4, dtype=float32, name=linear)') + 'Linear(in_features=2, out_features=4, dtype=float32, name=linear)', + ) module = nn.Upsample(size=[12, 12]) self.assertEqual( str(module), - 'Upsample(size=[12, 12], mode=nearest, align_corners=False, align_mode=0, data_format=NCHW)' + 'Upsample(size=[12, 12], mode=nearest, align_corners=False, align_mode=0, data_format=NCHW)', ) module = nn.UpsamplingNearest2D(size=[12, 12]) self.assertEqual( - str(module), 'UpsamplingNearest2D(size=[12, 12], data_format=NCHW)') + str(module), 'UpsamplingNearest2D(size=[12, 12], data_format=NCHW)' + ) module = nn.UpsamplingBilinear2D(size=[12, 12]) self.assertEqual( - str(module), - 'UpsamplingBilinear2D(size=[12, 12], data_format=NCHW)') + str(module), 'UpsamplingBilinear2D(size=[12, 12], data_format=NCHW)' + ) module = nn.Bilinear(in1_features=5, in2_features=4, out_features=1000) self.assertEqual( str(module), - 'Bilinear(in1_features=5, in2_features=4, out_features=1000, dtype=float32)' + 'Bilinear(in1_features=5, in2_features=4, out_features=1000, dtype=float32)', ) module = nn.Dropout(p=0.5) - self.assertEqual(str(module), - 'Dropout(p=0.5, axis=None, mode=upscale_in_train)') + self.assertEqual( + str(module), 'Dropout(p=0.5, axis=None, mode=upscale_in_train)' + ) module = nn.Dropout2D(p=0.5) self.assertEqual(str(module), 'Dropout2D(p=0.5, data_format=NCHW)') @@ -140,22 +144,24 @@ class TestLayerPrint(unittest.TestCase): module = nn.Pad1D(padding=[1, 2], mode='constant') self.assertEqual( str(module), - 'Pad1D(padding=[1, 2], mode=constant, value=0.0, data_format=NCL)') + 'Pad1D(padding=[1, 2], mode=constant, value=0.0, data_format=NCL)', + ) module = nn.Pad2D(padding=[1, 0, 1, 2], mode='constant') self.assertEqual( str(module), - 'Pad2D(padding=[1, 0, 1, 2], mode=constant, value=0.0, data_format=NCHW)' + 'Pad2D(padding=[1, 0, 1, 2], mode=constant, value=0.0, data_format=NCHW)', ) module = nn.ZeroPad2D(padding=[1, 0, 1, 2]) - self.assertEqual(str(module), - 'ZeroPad2D(padding=[1, 0, 1, 2], data_format=NCHW)') + self.assertEqual( + str(module), 'ZeroPad2D(padding=[1, 0, 1, 2], data_format=NCHW)' + ) module = nn.Pad3D(padding=[1, 0, 1, 2, 0, 0], mode='constant') self.assertEqual( str(module), - 'Pad3D(padding=[1, 0, 1, 2, 0, 0], mode=constant, value=0.0, data_format=NCDHW)' + 'Pad3D(padding=[1, 0, 1, 2, 0, 0], mode=constant, value=0.0, data_format=NCDHW)', ) module = nn.CosineSimilarity(axis=0) @@ -165,107 +171,127 @@ class TestLayerPrint(unittest.TestCase): self.assertEqual(str(module), 'Embedding(10, 3, sparse=True)') module = nn.Conv1D(3, 2, 3) - self.assertEqual(str(module), - 'Conv1D(3, 2, kernel_size=[3], data_format=NCL)') + self.assertEqual( + str(module), 'Conv1D(3, 2, kernel_size=[3], data_format=NCL)' + ) module = nn.Conv1DTranspose(2, 1, 2) self.assertEqual( str(module), - 'Conv1DTranspose(2, 1, kernel_size=[2], data_format=NCL)') + 'Conv1DTranspose(2, 1, kernel_size=[2], data_format=NCL)', + ) module = nn.Conv2D(4, 6, (3, 3)) - self.assertEqual(str(module), - 'Conv2D(4, 6, kernel_size=[3, 3], data_format=NCHW)') + self.assertEqual( + str(module), 'Conv2D(4, 6, kernel_size=[3, 3], data_format=NCHW)' + ) module = nn.Conv2DTranspose(4, 6, (3, 3)) self.assertEqual( str(module), - 'Conv2DTranspose(4, 6, kernel_size=[3, 3], data_format=NCHW)') + 'Conv2DTranspose(4, 6, kernel_size=[3, 3], data_format=NCHW)', + ) module = nn.Conv3D(4, 6, (3, 3, 3)) self.assertEqual( str(module), - 'Conv3D(4, 6, kernel_size=[3, 3, 3], data_format=NCDHW)') + 'Conv3D(4, 6, kernel_size=[3, 3, 3], data_format=NCDHW)', + ) module = nn.Conv3DTranspose(4, 6, (3, 3, 3)) self.assertEqual( str(module), - 'Conv3DTranspose(4, 6, kernel_size=[3, 3, 3], data_format=NCDHW)') + 'Conv3DTranspose(4, 6, kernel_size=[3, 3, 3], data_format=NCDHW)', + ) module = nn.PairwiseDistance() self.assertEqual(str(module), 'PairwiseDistance(p=2.0)') module = nn.InstanceNorm1D(2) - self.assertEqual(str(module), - 'InstanceNorm1D(num_features=2, epsilon=1e-05)') + self.assertEqual( + str(module), 'InstanceNorm1D(num_features=2, epsilon=1e-05)' + ) module = nn.InstanceNorm2D(2) - self.assertEqual(str(module), - 'InstanceNorm2D(num_features=2, epsilon=1e-05)') + self.assertEqual( + str(module), 'InstanceNorm2D(num_features=2, epsilon=1e-05)' + ) module = nn.InstanceNorm3D(2) - self.assertEqual(str(module), - 'InstanceNorm3D(num_features=2, epsilon=1e-05)') + self.assertEqual( + str(module), 'InstanceNorm3D(num_features=2, epsilon=1e-05)' + ) module = nn.GroupNorm(num_channels=6, num_groups=6) self.assertEqual( str(module), - 'GroupNorm(num_groups=6, num_channels=6, epsilon=1e-05)') + 'GroupNorm(num_groups=6, num_channels=6, epsilon=1e-05)', + ) module = nn.LayerNorm([2, 2, 3]) self.assertEqual( - str(module), 'LayerNorm(normalized_shape=[2, 2, 3], epsilon=1e-05)') + str(module), 'LayerNorm(normalized_shape=[2, 2, 3], epsilon=1e-05)' + ) module = nn.BatchNorm1D(1) self.assertEqual( str(module), - 'BatchNorm1D(num_features=1, momentum=0.9, epsilon=1e-05, data_format=NCL)' + 'BatchNorm1D(num_features=1, momentum=0.9, epsilon=1e-05, data_format=NCL)', ) module = nn.BatchNorm2D(1) self.assertEqual( str(module), - 'BatchNorm2D(num_features=1, momentum=0.9, epsilon=1e-05)') + 'BatchNorm2D(num_features=1, momentum=0.9, epsilon=1e-05)', + ) module = nn.BatchNorm3D(1) self.assertEqual( str(module), - 'BatchNorm3D(num_features=1, momentum=0.9, epsilon=1e-05, data_format=NCDHW)' + 'BatchNorm3D(num_features=1, momentum=0.9, epsilon=1e-05, data_format=NCDHW)', ) module = nn.SyncBatchNorm(2) self.assertEqual( str(module), - 'SyncBatchNorm(num_features=2, momentum=0.9, epsilon=1e-05)') + 'SyncBatchNorm(num_features=2, momentum=0.9, epsilon=1e-05)', + ) module = nn.LocalResponseNorm(size=5) self.assertEqual( str(module), - 'LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=1.0)') + 'LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=1.0)', + ) module = nn.AvgPool1D(kernel_size=2, stride=2, padding=0) - self.assertEqual(str(module), - 'AvgPool1D(kernel_size=2, stride=2, padding=0)') + self.assertEqual( + str(module), 'AvgPool1D(kernel_size=2, stride=2, padding=0)' + ) module = nn.AvgPool2D(kernel_size=2, stride=2, padding=0) - self.assertEqual(str(module), - 'AvgPool2D(kernel_size=2, stride=2, padding=0)') + self.assertEqual( + str(module), 'AvgPool2D(kernel_size=2, stride=2, padding=0)' + ) module = nn.AvgPool3D(kernel_size=2, stride=2, padding=0) - self.assertEqual(str(module), - 'AvgPool3D(kernel_size=2, stride=2, padding=0)') + self.assertEqual( + str(module), 'AvgPool3D(kernel_size=2, stride=2, padding=0)' + ) module = nn.MaxPool1D(kernel_size=2, stride=2, padding=0) - self.assertEqual(str(module), - 'MaxPool1D(kernel_size=2, stride=2, padding=0)') + self.assertEqual( + str(module), 'MaxPool1D(kernel_size=2, stride=2, padding=0)' + ) module = nn.MaxPool2D(kernel_size=2, stride=2, padding=0) - self.assertEqual(str(module), - 'MaxPool2D(kernel_size=2, stride=2, padding=0)') + self.assertEqual( + str(module), 'MaxPool2D(kernel_size=2, stride=2, padding=0)' + ) module = nn.MaxPool3D(kernel_size=2, stride=2, padding=0) - self.assertEqual(str(module), - 'MaxPool3D(kernel_size=2, stride=2, padding=0)') + self.assertEqual( + str(module), 'MaxPool3D(kernel_size=2, stride=2, padding=0)' + ) module = nn.AdaptiveAvgPool1D(output_size=16) self.assertEqual(str(module), 'AdaptiveAvgPool1D(output_size=16)') @@ -277,16 +303,19 @@ class TestLayerPrint(unittest.TestCase): self.assertEqual(str(module), 'AdaptiveAvgPool3D(output_size=3)') module = nn.AdaptiveMaxPool1D(output_size=16, return_mask=True) - self.assertEqual(str(module), - 'AdaptiveMaxPool1D(output_size=16, return_mask=True)') + self.assertEqual( + str(module), 'AdaptiveMaxPool1D(output_size=16, return_mask=True)' + ) module = nn.AdaptiveMaxPool2D(output_size=3, return_mask=True) - self.assertEqual(str(module), - 'AdaptiveMaxPool2D(output_size=3, return_mask=True)') + self.assertEqual( + str(module), 'AdaptiveMaxPool2D(output_size=3, return_mask=True)' + ) module = nn.AdaptiveMaxPool3D(output_size=3, return_mask=True) - self.assertEqual(str(module), - 'AdaptiveMaxPool3D(output_size=3, return_mask=True)') + self.assertEqual( + str(module), 'AdaptiveMaxPool3D(output_size=3, return_mask=True)' + ) module = nn.SimpleRNNCell(16, 32) self.assertEqual(str(module), 'SimpleRNNCell(16, 32)') @@ -303,49 +332,56 @@ class TestLayerPrint(unittest.TestCase): module = nn.SimpleRNN(16, 32, 2) self.assertEqual( str(module), - 'SimpleRNN(16, 32, num_layers=2\n (0): RNN(\n (cell): SimpleRNNCell(16, 32)\n )\n (1): RNN(\n (cell): SimpleRNNCell(32, 32)\n )\n)' + 'SimpleRNN(16, 32, num_layers=2\n (0): RNN(\n (cell): SimpleRNNCell(16, 32)\n )\n (1): RNN(\n (cell): SimpleRNNCell(32, 32)\n )\n)', ) module = nn.LSTM(16, 32, 2) self.assertEqual( str(module), - 'LSTM(16, 32, num_layers=2\n (0): RNN(\n (cell): LSTMCell(16, 32)\n )\n (1): RNN(\n (cell): LSTMCell(32, 32)\n )\n)' + 'LSTM(16, 32, num_layers=2\n (0): RNN(\n (cell): LSTMCell(16, 32)\n )\n (1): RNN(\n (cell): LSTMCell(32, 32)\n )\n)', ) module = nn.GRU(16, 32, 2) self.assertEqual( str(module), - 'GRU(16, 32, num_layers=2\n (0): RNN(\n (cell): GRUCell(16, 32)\n )\n (1): RNN(\n (cell): GRUCell(32, 32)\n )\n)' + 'GRU(16, 32, num_layers=2\n (0): RNN(\n (cell): GRUCell(16, 32)\n )\n (1): RNN(\n (cell): GRUCell(32, 32)\n )\n)', ) module1 = nn.Sequential( - ('conv1', nn.Conv2D(1, 20, 5)), ('relu1', nn.ReLU()), - ('conv2', nn.Conv2D(20, 64, 5)), ('relu2', nn.ReLU())) + ('conv1', nn.Conv2D(1, 20, 5)), + ('relu1', nn.ReLU()), + ('conv2', nn.Conv2D(20, 64, 5)), + ('relu2', nn.ReLU()), + ) self.assertEqual( str(module1), - 'Sequential(\n '\ - '(conv1): Conv2D(1, 20, kernel_size=[5, 5], data_format=NCHW)\n '\ - '(relu1): ReLU()\n '\ - '(conv2): Conv2D(20, 64, kernel_size=[5, 5], data_format=NCHW)\n '\ - '(relu2): ReLU()\n)' + 'Sequential(\n ' + '(conv1): Conv2D(1, 20, kernel_size=[5, 5], data_format=NCHW)\n ' + '(relu1): ReLU()\n ' + '(conv2): Conv2D(20, 64, kernel_size=[5, 5], data_format=NCHW)\n ' + '(relu2): ReLU()\n)', ) module2 = nn.Sequential( nn.Conv3DTranspose(4, 6, (3, 3, 3)), nn.AvgPool3D(kernel_size=2, stride=2, padding=0), - nn.Tanh(name="Tanh"), module1, nn.Conv3D(4, 6, (3, 3, 3)), - nn.MaxPool3D(kernel_size=2, stride=2, padding=0), nn.GELU(True)) + nn.Tanh(name="Tanh"), + module1, + nn.Conv3D(4, 6, (3, 3, 3)), + nn.MaxPool3D(kernel_size=2, stride=2, padding=0), + nn.GELU(True), + ) self.assertEqual( str(module2), - 'Sequential(\n '\ - '(0): Conv3DTranspose(4, 6, kernel_size=[3, 3, 3], data_format=NCDHW)\n '\ - '(1): AvgPool3D(kernel_size=2, stride=2, padding=0)\n '\ - '(2): Tanh(name=Tanh)\n '\ - '(3): Sequential(\n (conv1): Conv2D(1, 20, kernel_size=[5, 5], data_format=NCHW)\n (relu1): ReLU()\n'\ - ' (conv2): Conv2D(20, 64, kernel_size=[5, 5], data_format=NCHW)\n (relu2): ReLU()\n )\n '\ - '(4): Conv3D(4, 6, kernel_size=[3, 3, 3], data_format=NCDHW)\n '\ - '(5): MaxPool3D(kernel_size=2, stride=2, padding=0)\n '\ - '(6): GELU(approximate=True)\n)' + 'Sequential(\n ' + '(0): Conv3DTranspose(4, 6, kernel_size=[3, 3, 3], data_format=NCDHW)\n ' + '(1): AvgPool3D(kernel_size=2, stride=2, padding=0)\n ' + '(2): Tanh(name=Tanh)\n ' + '(3): Sequential(\n (conv1): Conv2D(1, 20, kernel_size=[5, 5], data_format=NCHW)\n (relu1): ReLU()\n' + ' (conv2): Conv2D(20, 64, kernel_size=[5, 5], data_format=NCHW)\n (relu2): ReLU()\n )\n ' + '(4): Conv3D(4, 6, kernel_size=[3, 3, 3], data_format=NCDHW)\n ' + '(5): MaxPool3D(kernel_size=2, stride=2, padding=0)\n ' + '(6): GELU(approximate=True)\n)', ) def test_layer_str(self): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_load_static_param.py b/python/paddle/fluid/tests/unittests/test_imperative_load_static_param.py index e61671e39f1698cf8e6db760238d123212cbec09..27a27c3ce70f0e8314f913872a5b6adde6366ea4 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_load_static_param.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_load_static_param.py @@ -15,14 +15,23 @@ import unittest import paddle.fluid as fluid import paddle.fluid.framework as framework -from paddle.fluid.dygraph.nn import BatchNorm, Conv2D, Conv3D, Embedding, GroupNorm, LayerNorm, Linear, NCE, PRelu +from paddle.fluid.dygraph.nn import ( + BatchNorm, + Conv2D, + Conv3D, + Embedding, + GroupNorm, + LayerNorm, + Linear, + NCE, + PRelu, +) import numpy as np import os import tempfile class TestDygraphLoadStatic(unittest.TestCase): - def testLoadStaticModel(self): # static mode temp_dir = tempfile.TemporaryDirectory() @@ -32,30 +41,26 @@ class TestDygraphLoadStatic(unittest.TestCase): fc_out1 = fluid.layers.fc(a, 10) fc_out2 = fluid.layers.fc(a, 20) - conv_out_1 = fluid.layers.conv2d(conv_in, - num_filters=10, - filter_size=5, - act="relu") - conv_out_2 = fluid.layers.conv2d(conv_in, - num_filters=10, - filter_size=5, - act="relu") - - conv3d_in = fluid.data(name='conv3d_in', - shape=[None, 3, 12, 32, 32], - dtype='float32') - conv3d_out_1 = fluid.layers.conv3d(input=conv3d_in, - num_filters=2, - filter_size=3, - act="relu") - conv3d_out_2 = fluid.layers.conv3d(input=conv3d_in, - num_filters=2, - filter_size=3, - act="relu") - - batchnorm_in = fluid.data(name="batchnorm_in", - shape=[None, 10], - dtype='float32') + conv_out_1 = fluid.layers.conv2d( + conv_in, num_filters=10, filter_size=5, act="relu" + ) + conv_out_2 = fluid.layers.conv2d( + conv_in, num_filters=10, filter_size=5, act="relu" + ) + + conv3d_in = fluid.data( + name='conv3d_in', shape=[None, 3, 12, 32, 32], dtype='float32' + ) + conv3d_out_1 = fluid.layers.conv3d( + input=conv3d_in, num_filters=2, filter_size=3, act="relu" + ) + conv3d_out_2 = fluid.layers.conv3d( + input=conv3d_in, num_filters=2, filter_size=3, act="relu" + ) + + batchnorm_in = fluid.data( + name="batchnorm_in", shape=[None, 10], dtype='float32' + ) batchnorm_out_1 = fluid.layers.batch_norm(batchnorm_in) batchnorm_out_2 = fluid.layers.batch_norm(batchnorm_in) @@ -68,53 +73,56 @@ class TestDygraphLoadStatic(unittest.TestCase): layernorm_2 = fluid.layers.layer_norm(layernorm) nce_in = fluid.data(name="nce_in", shape=[None, 100], dtype='float32') - nce_label = fluid.data(name="nce_label", - shape=[None, 10], - dtype='int64') + nce_label = fluid.data( + name="nce_label", shape=[None, 10], dtype='int64' + ) nce_out_1 = fluid.layers.nce(nce_in, nce_label, 10000) nce_out_2 = fluid.layers.nce(nce_in, nce_label, 10000) - prelu_in = fluid.data(name="prelu_in", - shape=[None, 5, 10, 10], - dtype='float32') + prelu_in = fluid.data( + name="prelu_in", shape=[None, 5, 10, 10], dtype='float32' + ) prelu_out_1 = fluid.layers.prelu(prelu_in, "channel") prelu_out_2 = fluid.layers.prelu(prelu_in, "channel") - bilinear_tensor_pro_x = fluid.data("t1", - shape=[None, 5], - dtype="float32") - bilinear_tensor_pro_y = fluid.data("t2", - shape=[None, 4], - dtype="float32") + bilinear_tensor_pro_x = fluid.data( + "t1", shape=[None, 5], dtype="float32" + ) + bilinear_tensor_pro_y = fluid.data( + "t2", shape=[None, 4], dtype="float32" + ) bilinear_tensor_pro_out_1 = fluid.layers.bilinear_tensor_product( - x=bilinear_tensor_pro_x, y=bilinear_tensor_pro_y, size=1000) + x=bilinear_tensor_pro_x, y=bilinear_tensor_pro_y, size=1000 + ) bilinear_tensor_pro_out_2 = fluid.layers.bilinear_tensor_product( - x=bilinear_tensor_pro_x, y=bilinear_tensor_pro_y, size=1000) - - conv2d_trans_in = fluid.data(name="conv2d_trans_in", - shape=[None, 10, 10, 10]) - - conv2d_trans_out_1 = fluid.layers.conv2d_transpose(conv2d_trans_in, - num_filters=10, - filter_size=5, - act="relu") - conv2d_trans_out_2 = fluid.layers.conv2d_transpose(conv2d_trans_in, - num_filters=10, - filter_size=5, - act="relu") - - conv3d_trans_in = fluid.data(name='conv3d_trans_in', - shape=[None, 3, 12, 32, 32], - dtype='float32') + x=bilinear_tensor_pro_x, y=bilinear_tensor_pro_y, size=1000 + ) + + conv2d_trans_in = fluid.data( + name="conv2d_trans_in", shape=[None, 10, 10, 10] + ) + + conv2d_trans_out_1 = fluid.layers.conv2d_transpose( + conv2d_trans_in, num_filters=10, filter_size=5, act="relu" + ) + conv2d_trans_out_2 = fluid.layers.conv2d_transpose( + conv2d_trans_in, num_filters=10, filter_size=5, act="relu" + ) + + conv3d_trans_in = fluid.data( + name='conv3d_trans_in', shape=[None, 3, 12, 32, 32], dtype='float32' + ) conv3d_trans_out_1 = fluid.layers.conv3d_transpose( - input=conv3d_trans_in, num_filters=2, filter_size=3, act="relu") + input=conv3d_trans_in, num_filters=2, filter_size=3, act="relu" + ) conv3d_trans_out_2 = fluid.layers.conv3d_transpose( - input=conv3d_trans_in, num_filters=2, filter_size=3, act="relu") + input=conv3d_trans_in, num_filters=2, filter_size=3, act="relu" + ) - groupnorm_in = fluid.data(name='groupnorm_in', - shape=[None, 8, 32, 32], - dtype='float32') + groupnorm_in = fluid.data( + name='groupnorm_in', shape=[None, 8, 32, 32], dtype='float32' + ) groupnorm_out1 = fluid.layers.group_norm(input=groupnorm_in, groups=4) groupnorm_out2 = fluid.layers.group_norm(input=groupnorm_in, groups=4) ''' @@ -123,39 +131,47 @@ class TestDygraphLoadStatic(unittest.TestCase): spe_norm_out_2 = fluid.layers.spectral_norm(weight=spec_norm, dim=1, power_iters=2) ''' - nodes_vector = fluid.data(name='vectors', - shape=[None, 10, 5], - dtype='float32') - edge_set = fluid.data(name='edge_set', - shape=[None, 10, 2], - dtype='float32') - tree_conv_out1 = fluid.contrib.layers.tree_conv(nodes_vector, edge_set, - 6, 1, 2) - tree_conv_out2 = fluid.contrib.layers.tree_conv(nodes_vector, edge_set, - 6, 1, 2) - - para1 = fluid.layers.create_parameter([100, 100], - 'float32', - name="weight_test_1") - para2 = fluid.layers.create_parameter([20, 200], - 'float32', - name="weight_test_2") + nodes_vector = fluid.data( + name='vectors', shape=[None, 10, 5], dtype='float32' + ) + edge_set = fluid.data( + name='edge_set', shape=[None, 10, 2], dtype='float32' + ) + tree_conv_out1 = fluid.contrib.layers.tree_conv( + nodes_vector, edge_set, 6, 1, 2 + ) + tree_conv_out2 = fluid.contrib.layers.tree_conv( + nodes_vector, edge_set, 6, 1, 2 + ) + + para1 = fluid.layers.create_parameter( + [100, 100], 'float32', name="weight_test_1" + ) + para2 = fluid.layers.create_parameter( + [20, 200], 'float32', name="weight_test_2" + ) para_list = fluid.default_main_program().list_vars() - exe = fluid.Executor(fluid.CPUPlace( - ) if not fluid.is_compiled_with_cuda() else fluid.CUDAPlace(0)) + exe = fluid.Executor( + fluid.CPUPlace() + if not fluid.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) out = exe.run(framework.default_startup_program()) - fluid.save(framework.default_main_program(), - os.path.join(temp_dir.name, "test_1")) + fluid.save( + framework.default_main_program(), + os.path.join(temp_dir.name, "test_1"), + ) para_dict = fluid.load_program_state( - os.path.join(temp_dir.name, "test_1")) + os.path.join(temp_dir.name, "test_1") + ) new_dict = {} for k, v in para_dict.items(): - #print( k, v.shape ) + # print( k, v.shape ) if k.startswith("fc"): name = k.replace("fc", "linear", 1) new_dict[name] = v @@ -165,30 +181,31 @@ class TestDygraphLoadStatic(unittest.TestCase): with fluid.dygraph.guard(): class MyTest(fluid.dygraph.Layer): - def __init__(self): super(MyTest, self).__init__() self.linear1 = Linear(10, 10) self.lienar2 = Linear(10, 20) - self.conv2d_1 = Conv2D(num_channels=10, - num_filters=10, - filter_size=5, - act="relu") - self.conv2d_2 = Conv2D(num_channels=10, - num_filters=10, - filter_size=5, - act="relu") - - self.conv3d_1 = Conv3D(num_channels=3, - num_filters=2, - filter_size=3, - act="relu") - self.conv3d_2 = Conv3D(num_channels=3, - num_filters=2, - filter_size=3, - act="relu") + self.conv2d_1 = Conv2D( + num_channels=10, + num_filters=10, + filter_size=5, + act="relu", + ) + self.conv2d_2 = Conv2D( + num_channels=10, + num_filters=10, + filter_size=5, + act="relu", + ) + + self.conv3d_1 = Conv3D( + num_channels=3, num_filters=2, filter_size=3, act="relu" + ) + self.conv3d_2 = Conv3D( + num_channels=3, num_filters=2, filter_size=3, act="relu" + ) self.batch_norm_1 = BatchNorm(10) self.batch_norm_2 = BatchNorm(10) @@ -208,12 +225,12 @@ class TestDygraphLoadStatic(unittest.TestCase): self.group_norm1 = GroupNorm(8, 4) self.gourp_norm2 = GroupNorm(8, 4) - self.w_1 = self.create_parameter([100, 100], - dtype='float32', - attr="weight_test_1") - self.w_2 = self.create_parameter([20, 200], - dtype='float32', - attr="weight_test_2") + self.w_1 = self.create_parameter( + [100, 100], dtype='float32', attr="weight_test_1" + ) + self.w_2 = self.create_parameter( + [20, 200], dtype='float32', attr="weight_test_2" + ) my_test = MyTest() my_test.set_dict(new_dict, use_structured_name=False) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_lod_tensor_to_selected_rows.py b/python/paddle/fluid/tests/unittests/test_imperative_lod_tensor_to_selected_rows.py index a359d43d9c6aae3bc9bd143b5b11dc7f8b3d4437..3e74ba69d52214bb44805438458188ba7ce4f3f5 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_lod_tensor_to_selected_rows.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_lod_tensor_to_selected_rows.py @@ -26,14 +26,15 @@ from paddle.fluid.framework import _test_eager_guard class SimpleNet(fluid.Layer): - - def __init__(self, - hidden_size, - vocab_size, - num_steps=20, - init_scale=0.1, - is_sparse=False, - dtype='float32'): + def __init__( + self, + hidden_size, + vocab_size, + num_steps=20, + init_scale=0.1, + is_sparse=False, + dtype='float32', + ): super(SimpleNet, self).__init__() self.hidden_size = hidden_size self.vocab_size = vocab_size @@ -46,24 +47,31 @@ class SimpleNet(fluid.Layer): param_attr=fluid.ParamAttr( name='embedding_para', initializer=fluid.initializer.UniformInitializer( - low=-init_scale, high=init_scale))) + low=-init_scale, high=init_scale + ), + ), + ) self.softmax_bias = self.create_parameter( attr=fluid.ParamAttr(), shape=[self.vocab_size], dtype=dtype, default_initializer=fluid.initializer.UniformInitializer( - low=-self.init_scale, high=self.init_scale)) + low=-self.init_scale, high=self.init_scale + ), + ) def forward(self, input, label): x_emb = self.embedding(input) projection = fluid.layers.matmul( - x_emb, fluid.layers.transpose(self.embedding.weight, perm=[1, 0])) + x_emb, fluid.layers.transpose(self.embedding.weight, perm=[1, 0]) + ) projection = fluid.layers.elementwise_add(projection, self.softmax_bias) - projection = fluid.layers.reshape(projection, - shape=[-1, self.vocab_size]) - loss = fluid.layers.softmax_with_cross_entropy(logits=projection, - label=label, - soft_label=False) + projection = fluid.layers.reshape( + projection, shape=[-1, self.vocab_size] + ) + loss = fluid.layers.softmax_with_cross_entropy( + logits=projection, label=label, soft_label=False + ) loss = fluid.layers.reshape(loss, shape=[-1, self.num_steps]) loss = fluid.layers.reduce_mean(loss, dim=[0]) loss = fluid.layers.reduce_sum(loss) @@ -72,7 +80,6 @@ class SimpleNet(fluid.Layer): class TestDygraphSimpleNet(unittest.TestCase): - def func_simple_net(self): for is_sparse in [True, False]: dtype_list = ["float32"] @@ -105,22 +112,27 @@ class TestDygraphSimpleNet(unittest.TestCase): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) - simple_net = SimpleNet(hidden_size=hidden_size, - vocab_size=vocab_size, - num_steps=num_steps, - init_scale=init_scale, - is_sparse=is_sparse, - dtype=dtype) - - sgd = SGDOptimizer(learning_rate=1e-3, - parameter_list=simple_net.parameters()) + simple_net = SimpleNet( + hidden_size=hidden_size, + vocab_size=vocab_size, + num_steps=num_steps, + init_scale=init_scale, + is_sparse=is_sparse, + dtype=dtype, + ) + + sgd = SGDOptimizer( + learning_rate=1e-3, + parameter_list=simple_net.parameters(), + ) dy_param_updated = dict() dy_param_init = dict() dy_loss = None helper = DyGraphProgramDescTracerTestHelper(self) fluid.set_flags( - {'FLAGS_sort_sum_gradient': is_sort_sum_gradient}) + {'FLAGS_sort_sum_gradient': is_sort_sum_gradient} + ) for i in range(batch_num): x_data = np.arange(12).reshape(4, 3).astype('int64') @@ -147,17 +159,19 @@ class TestDygraphSimpleNet(unittest.TestCase): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) - simple_net = SimpleNet(hidden_size=hidden_size, - vocab_size=vocab_size, - num_steps=num_steps, - is_sparse=is_sparse, - dtype=dtype) + simple_net = SimpleNet( + hidden_size=hidden_size, + vocab_size=vocab_size, + num_steps=num_steps, + is_sparse=is_sparse, + dtype=dtype, + ) exe = fluid.Executor(place) sgd = SGDOptimizer(learning_rate=1e-3) - x = fluid.layers.data(name="x", - shape=[-1, num_steps], - dtype='int64') + x = fluid.layers.data( + name="x", shape=[-1, num_steps], dtype='int64' + ) y = fluid.layers.data(name="y", shape=[-1, 1], dtype=dtype) static_loss = simple_net(x, y) @@ -168,8 +182,10 @@ class TestDygraphSimpleNet(unittest.TestCase): for param in simple_net.parameters(): static_param_name_list.append(param.name) - out = exe.run(fluid.default_startup_program(), - fetch_list=static_param_name_list) + out = exe.run( + fluid.default_startup_program(), + fetch_list=static_param_name_list, + ) for i in range(len(static_param_name_list)): static_param_init[static_param_name_list[i]] = out[i] static_loss_value = None @@ -180,22 +196,22 @@ class TestDygraphSimpleNet(unittest.TestCase): y_data = y_data.reshape((-1, 1)) fetch_list = [static_loss] fetch_list.extend(static_param_name_list) - out = exe.run(fluid.default_main_program(), - feed={ - "x": x_data, - "y": y_data - }, - fetch_list=fetch_list) + out = exe.run( + fluid.default_main_program(), + feed={"x": x_data, "y": y_data}, + fetch_list=fetch_list, + ) static_loss_value = out[0] if i == batch_num - 1: for k in range(3, len(out)): - static_param_updated[static_param_name_list[ - k - 1]] = out[k] + static_param_updated[ + static_param_name_list[k - 1] + ] = out[k] - np.testing.assert_allclose(static_loss_value, - dy_loss_value, - rtol=0.001) + np.testing.assert_allclose( + static_loss_value, dy_loss_value, rtol=0.001 + ) for key, value in static_param_init.items(): np.testing.assert_array_equal(value, dy_param_init[key]) for key, value in static_param_updated.items(): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_mnist.py b/python/paddle/fluid/tests/unittests/test_imperative_mnist.py index 22954d11e283b34103d0ae1951a4c4a807e41b4a..41b0b365e7afdb25ee37fc2c3f0cf880b910010e 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_mnist.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_mnist.py @@ -26,43 +26,48 @@ from paddle.fluid.framework import _test_eager_guard, _in_legacy_dygraph class SimpleImgConvPool(fluid.dygraph.Layer): - - def __init__(self, - num_channels, - num_filters, - filter_size, - pool_size, - pool_stride, - pool_padding=0, - pool_type='max', - global_pooling=False, - conv_stride=1, - conv_padding=0, - conv_dilation=1, - conv_groups=1, - act=None, - use_cudnn=False, - param_attr=None, - bias_attr=None): + def __init__( + self, + num_channels, + num_filters, + filter_size, + pool_size, + pool_stride, + pool_padding=0, + pool_type='max', + global_pooling=False, + conv_stride=1, + conv_padding=0, + conv_dilation=1, + conv_groups=1, + act=None, + use_cudnn=False, + param_attr=None, + bias_attr=None, + ): super(SimpleImgConvPool, self).__init__() - self._conv2d = Conv2D(num_channels=num_channels, - num_filters=num_filters, - filter_size=filter_size, - stride=conv_stride, - padding=conv_padding, - dilation=conv_dilation, - groups=conv_groups, - param_attr=None, - bias_attr=None, - use_cudnn=use_cudnn) - - self._pool2d = Pool2D(pool_size=pool_size, - pool_type=pool_type, - pool_stride=pool_stride, - pool_padding=pool_padding, - global_pooling=global_pooling, - use_cudnn=use_cudnn) + self._conv2d = Conv2D( + num_channels=num_channels, + num_filters=num_filters, + filter_size=filter_size, + stride=conv_stride, + padding=conv_padding, + dilation=conv_dilation, + groups=conv_groups, + param_attr=None, + bias_attr=None, + use_cudnn=use_cudnn, + ) + + self._pool2d = Pool2D( + pool_size=pool_size, + pool_type=pool_type, + pool_stride=pool_stride, + pool_padding=pool_padding, + global_pooling=global_pooling, + use_cudnn=use_cudnn, + ) def forward(self, inputs): x = self._conv2d(inputs) @@ -71,33 +76,30 @@ class SimpleImgConvPool(fluid.dygraph.Layer): class MNIST(fluid.dygraph.Layer): - def __init__(self): super(MNIST, self).__init__() - self._simple_img_conv_pool_1 = SimpleImgConvPool(1, - 20, - 5, - 2, - 2, - act="relu") + self._simple_img_conv_pool_1 = SimpleImgConvPool( + 1, 20, 5, 2, 2, act="relu" + ) - self._simple_img_conv_pool_2 = SimpleImgConvPool(20, - 50, - 5, - 2, - 2, - act="relu") + self._simple_img_conv_pool_2 = SimpleImgConvPool( + 20, 50, 5, 2, 2, act="relu" + ) self.pool_2_shape = 50 * 4 * 4 SIZE = 10 - scale = (2.0 / (self.pool_2_shape**2 * SIZE))**0.5 - self._fc = Linear(self.pool_2_shape, - 10, - param_attr=fluid.param_attr.ParamAttr( - initializer=fluid.initializer.NormalInitializer( - loc=0.0, scale=scale)), - act="softmax") + scale = (2.0 / (self.pool_2_shape**2 * SIZE)) ** 0.5 + self._fc = Linear( + self.pool_2_shape, + 10, + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.NormalInitializer( + loc=0.0, scale=scale + ) + ), + act="softmax", + ) def forward(self, inputs): x = self._simple_img_conv_pool_1(inputs) @@ -108,9 +110,7 @@ class MNIST(fluid.dygraph.Layer): class TestImperativeMnist(unittest.TestCase): - def reader_decorator(self, reader): - def _reader_imple(): for item in reader(): image = np.array(item[0]).reshape(1, 28, 28) @@ -132,16 +132,19 @@ class TestImperativeMnist(unittest.TestCase): fluid.default_main_program().random_seed = seed mnist = MNIST() - sgd = SGDOptimizer(learning_rate=1e-3, - parameter_list=mnist.parameters()) + sgd = SGDOptimizer( + learning_rate=1e-3, parameter_list=mnist.parameters() + ) batch_py_reader = fluid.io.PyReader(capacity=1) batch_py_reader.decorate_sample_list_generator( - paddle.batch(self.reader_decorator( - paddle.dataset.mnist.train()), - batch_size=batch_size, - drop_last=True), - places=fluid.CPUPlace()) + paddle.batch( + self.reader_decorator(paddle.dataset.mnist.train()), + batch_size=batch_size, + drop_last=True, + ), + places=fluid.CPUPlace(), + ) mnist.train() dy_param_init_value = {} @@ -159,12 +162,14 @@ class TestImperativeMnist(unittest.TestCase): if batch_id % 10 == 0 and _in_legacy_dygraph(): cost, traced_layer = paddle.jit.TracedLayer.trace( - mnist, inputs=img) + mnist, inputs=img + ) if program is not None: self.assertTrue(program, traced_layer.program) program = traced_layer.program traced_layer.save_inference_model( - './infer_imperative_mnist') + './infer_imperative_mnist' + ) else: cost = mnist(img) @@ -193,18 +198,23 @@ class TestImperativeMnist(unittest.TestCase): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed - exe = fluid.Executor(fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) + exe = fluid.Executor( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) mnist = MNIST() sgd = SGDOptimizer(learning_rate=1e-3) - train_reader = paddle.batch(paddle.dataset.mnist.train(), - batch_size=batch_size, - drop_last=True) - - img = fluid.layers.data(name='pixel', - shape=[1, 28, 28], - dtype='float32') + train_reader = paddle.batch( + paddle.dataset.mnist.train(), + batch_size=batch_size, + drop_last=True, + ) + + img = fluid.layers.data( + name='pixel', shape=[1, 28, 28], dtype='float32' + ) label = fluid.layers.data(name='label', shape=[1], dtype='int64') cost = mnist(img) loss = fluid.layers.cross_entropy(cost, label) @@ -217,8 +227,10 @@ class TestImperativeMnist(unittest.TestCase): for param in mnist.parameters(): static_param_name_list.append(param.name) - out = exe.run(fluid.default_startup_program(), - fetch_list=static_param_name_list) + out = exe.run( + fluid.default_startup_program(), + fetch_list=static_param_name_list, + ) for i in range(len(static_param_name_list)): static_param_init_value[static_param_name_list[i]] = out[i] @@ -227,12 +239,14 @@ class TestImperativeMnist(unittest.TestCase): for batch_id, data in enumerate(train_reader()): if batch_id >= batch_num: break - static_x_data = np.array([ - x[0].reshape(1, 28, 28) for x in data - ]).astype('float32') - y_data = np.array([x[1] - for x in data]).astype('int64').reshape( - [batch_size, 1]) + static_x_data = np.array( + [x[0].reshape(1, 28, 28) for x in data] + ).astype('float32') + y_data = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape([batch_size, 1]) + ) fetch_list = [avg_loss.name] fetch_list.extend(static_param_name_list) @@ -240,35 +254,34 @@ class TestImperativeMnist(unittest.TestCase): if traced_layer is not None: traced_layer([static_x_data]) - out = exe.run(fluid.default_main_program(), - feed={ - "pixel": static_x_data, - "label": y_data - }, - fetch_list=fetch_list) + out = exe.run( + fluid.default_main_program(), + feed={"pixel": static_x_data, "label": y_data}, + fetch_list=fetch_list, + ) static_param_value = {} static_out = out[0] for i in range(1, len(out)): - static_param_value[static_param_name_list[i - - 1]] = out[i] + static_param_value[static_param_name_list[i - 1]] = out[ + i + ] - np.testing.assert_allclose(dy_x_data.all(), - static_x_data.all(), - rtol=1e-05) + np.testing.assert_allclose( + dy_x_data.all(), static_x_data.all(), rtol=1e-05 + ) for key, value in static_param_init_value.items(): - np.testing.assert_allclose(value, - dy_param_init_value[key], - rtol=1e-05) + np.testing.assert_allclose( + value, dy_param_init_value[key], rtol=1e-05 + ) np.testing.assert_allclose(static_out, dy_out, rtol=1e-05) for key, value in static_param_value.items(): - np.testing.assert_allclose(value, - dy_param_value[key], - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + value, dy_param_value[key], rtol=1e-05, atol=1e-05 + ) def test_mnist_float32(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_mnist_sorted_gradient.py b/python/paddle/fluid/tests/unittests/test_imperative_mnist_sorted_gradient.py index 214214f871020c937f90f8b8ce3fac0099da71f0..1a077311e0541b6689fdbaf1154dbe1debcc20fe 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_mnist_sorted_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_mnist_sorted_gradient.py @@ -26,7 +26,6 @@ from paddle.fluid.framework import _test_eager_guard class TestImperativeMnistSortGradient(unittest.TestCase): - def func_test_mnist_sort_gradient_float32(self): seed = 90 epoch_num = 1 @@ -37,21 +36,25 @@ class TestImperativeMnistSortGradient(unittest.TestCase): fluid.set_flags({'FLAGS_sort_sum_gradient': True}) mnist2 = MNIST() - sgd2 = SGDOptimizer(learning_rate=1e-3, - parameter_list=mnist2.parameters()) - train_reader2 = paddle.batch(paddle.dataset.mnist.train(), - batch_size=128, - drop_last=True) + sgd2 = SGDOptimizer( + learning_rate=1e-3, parameter_list=mnist2.parameters() + ) + train_reader2 = paddle.batch( + paddle.dataset.mnist.train(), batch_size=128, drop_last=True + ) mnist2.train() dy_param_init_value2 = {} for epoch in range(epoch_num): for batch_id, data in enumerate(train_reader2()): - dy_x_data2 = np.array([ - x[0].reshape(1, 28, 28) for x in data - ]).astype('float32') - y_data2 = np.array([x[1] for x in data - ]).astype('int64').reshape(128, 1) + dy_x_data2 = np.array( + [x[0].reshape(1, 28, 28) for x in data] + ).astype('float32') + y_data2 = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape(128, 1) + ) img2 = to_variable(dy_x_data2) label2 = to_variable(y_data2) @@ -81,18 +84,21 @@ class TestImperativeMnistSortGradient(unittest.TestCase): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed - exe = fluid.Executor(fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) + exe = fluid.Executor( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) mnist = MNIST() sgd = SGDOptimizer(learning_rate=1e-3) - train_reader = paddle.batch(paddle.dataset.mnist.train(), - batch_size=128, - drop_last=True) + train_reader = paddle.batch( + paddle.dataset.mnist.train(), batch_size=128, drop_last=True + ) - img = fluid.layers.data(name='pixel', - shape=[1, 28, 28], - dtype='float32') + img = fluid.layers.data( + name='pixel', shape=[1, 28, 28], dtype='float32' + ) label = fluid.layers.data(name='label', shape=[1], dtype='int64') cost = mnist(img) loss = fluid.layers.cross_entropy(cost, label) @@ -105,53 +111,57 @@ class TestImperativeMnistSortGradient(unittest.TestCase): for param in mnist.parameters(): static_param_name_list.append(param.name) - out = exe.run(fluid.default_startup_program(), - fetch_list=static_param_name_list) + out = exe.run( + fluid.default_startup_program(), + fetch_list=static_param_name_list, + ) for i in range(len(static_param_name_list)): static_param_init_value[static_param_name_list[i]] = out[i] for epoch in range(epoch_num): for batch_id, data in enumerate(train_reader()): - static_x_data = np.array([ - x[0].reshape(1, 28, 28) for x in data - ]).astype('float32') - y_data = np.array([x[1] for x in data - ]).astype('int64').reshape([128, 1]) + static_x_data = np.array( + [x[0].reshape(1, 28, 28) for x in data] + ).astype('float32') + y_data = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape([128, 1]) + ) fetch_list = [avg_loss.name] fetch_list.extend(static_param_name_list) - out = exe.run(fluid.default_main_program(), - feed={ - "pixel": static_x_data, - "label": y_data - }, - fetch_list=fetch_list) + out = exe.run( + fluid.default_main_program(), + feed={"pixel": static_x_data, "label": y_data}, + fetch_list=fetch_list, + ) static_param_value = {} static_out = out[0] for i in range(1, len(out)): - static_param_value[static_param_name_list[i - - 1]] = out[i] + static_param_value[static_param_name_list[i - 1]] = out[ + i + ] if batch_id == 20: break - np.testing.assert_allclose(dy_x_data2.all(), - static_x_data.all(), - rtol=1e-05) + np.testing.assert_allclose( + dy_x_data2.all(), static_x_data.all(), rtol=1e-05 + ) for key, value in static_param_init_value.items(): - np.testing.assert_allclose(value, - dy_param_init_value2[key], - rtol=1e-05) + np.testing.assert_allclose( + value, dy_param_init_value2[key], rtol=1e-05 + ) np.testing.assert_allclose(static_out, dy_out2, rtol=1e-05) for key, value in static_param_value.items(): - np.testing.assert_allclose(value, - dy_param_value2[key], - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + value, dy_param_value2[key], rtol=1e-05, atol=1e-05 + ) def test_mnist_sort_gradient_float32(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_named_members.py b/python/paddle/fluid/tests/unittests/test_imperative_named_members.py index 57fad812bf8eca2f6178640ae820263b8856393a..7043eb0c2e7788b775124eb26feb7632637ef770 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_named_members.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_named_members.py @@ -20,7 +20,6 @@ from paddle.fluid.framework import _test_eager_guard class MyLayer(fluid.Layer): - def __init__(self, num_channel, dim, num_filter=5): super(MyLayer, self).__init__() self.fc = fluid.dygraph.Linear(dim, dim) @@ -33,7 +32,6 @@ class MyLayer(fluid.Layer): class TestImperativeNamedSubLayers(unittest.TestCase): - def func_test_named_sublayers(self): with fluid.dygraph.guard(): fc1 = fluid.Linear(10, 3) @@ -45,21 +43,22 @@ class TestImperativeNamedSubLayers(unittest.TestCase): expected_sublayers = [fc1, fc2, custom, custom.fc, custom.conv] self.assertEqual(len(list_named_sublayers), len(expected_sublayers)) - for (name, - sublayer), expected_sublayer in zip(list_named_sublayers, - expected_sublayers): + for (name, sublayer), expected_sublayer in zip( + list_named_sublayers, expected_sublayers + ): self.assertEqual(sublayer, expected_sublayer) list_sublayers = list(model.sublayers()) self.assertEqual(len(list_named_sublayers), len(list_sublayers)) - for (name, - sublayer), expected_sublayer in zip(list_named_sublayers, - list_sublayers): + for (name, sublayer), expected_sublayer in zip( + list_named_sublayers, list_sublayers + ): self.assertEqual(sublayer, expected_sublayer) self.assertListEqual( [l for _, l in list(model.named_sublayers(include_self=True))], - [model] + expected_sublayers) + [model] + expected_sublayers, + ) def test_named_sublayers(self): with _test_eager_guard(): @@ -68,7 +67,6 @@ class TestImperativeNamedSubLayers(unittest.TestCase): class TestImperativeNamedParameters(unittest.TestCase): - def func_test_named_parameters(self): with fluid.dygraph.guard(): fc1 = fluid.Linear(10, 3) @@ -80,7 +78,8 @@ class TestImperativeNamedParameters(unittest.TestCase): expected_named_parameters = list() for prefix, layer in model.named_sublayers(): for name, param in layer.named_parameters( - include_sublayers=False): + include_sublayers=False + ): full_name = prefix + ('.' if prefix else '') + name expected_named_parameters.append((full_name, param)) @@ -95,7 +94,6 @@ class TestImperativeNamedParameters(unittest.TestCase): with fluid.dygraph.guard(): class Mymodel(fluid.dygraph.Layer): - def __init__(self): super(Mymodel, self).__init__() self.linear1 = fluid.dygraph.Linear(10, 10) @@ -103,28 +101,42 @@ class TestImperativeNamedParameters(unittest.TestCase): self.conv2d = fluid.dygraph.Conv2D(3, 2, 3) self.embedding = fluid.dygraph.Embedding(size=[128, 16]) self.h_0 = fluid.dygraph.to_variable( - np.zeros([10, 10]).astype('float32')) - self.weight = self.create_parameter(shape=[2, 3], - attr=fluid.ParamAttr(), - dtype="float32", - is_bias=False) + np.zeros([10, 10]).astype('float32') + ) + self.weight = self.create_parameter( + shape=[2, 3], + attr=fluid.ParamAttr(), + dtype="float32", + is_bias=False, + ) model = Mymodel() expected_members = dir(model) - self.assertTrue("linear1" in expected_members, - "model should contain Layer: linear1") - self.assertTrue("linear2" in expected_members, - "model should contain Layer: linear2") - self.assertTrue("conv2d" in expected_members, - "model should contain Layer: conv2d") - self.assertTrue("embedding" in expected_members, - "model should contain Layer: embedding") - self.assertTrue("h_0" in expected_members, - "model should contain buffer: h_0") - self.assertTrue("weight" in expected_members, - "model should contain parameter: weight") + self.assertTrue( + "linear1" in expected_members, + "model should contain Layer: linear1", + ) + self.assertTrue( + "linear2" in expected_members, + "model should contain Layer: linear2", + ) + self.assertTrue( + "conv2d" in expected_members, + "model should contain Layer: conv2d", + ) + self.assertTrue( + "embedding" in expected_members, + "model should contain Layer: embedding", + ) + self.assertTrue( + "h_0" in expected_members, "model should contain buffer: h_0" + ) + self.assertTrue( + "weight" in expected_members, + "model should contain parameter: weight", + ) def test_dir_layer(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_numpy_bridge.py b/python/paddle/fluid/tests/unittests/test_imperative_numpy_bridge.py index a93471a09c9c314ff65b276f5578c4ed8b853c4e..4e333f7ca4e0ff6e8d0dfed6b406c8c476c4a743 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_numpy_bridge.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_numpy_bridge.py @@ -21,15 +21,16 @@ from paddle.fluid.framework import _in_legacy_dygraph, _test_eager_guard class TestImperativeNumpyBridge(unittest.TestCase): - def func_tensor_from_numpy(self): data_np = np.array([[2, 3, 1]]).astype('float32') with fluid.dygraph.guard(fluid.CPUPlace()): with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") var = fluid.dygraph.to_variable(data_np, zero_copy=True) - assert "Currently, zero_copy is not supported, and it will be discarded." in str( - w[-1].message) + assert ( + "Currently, zero_copy is not supported, and it will be discarded." + in str(w[-1].message) + ) # Temporally diable zero_copy # var = fluid.dygraph.to_variable(data_np, zero_copy=True) # np.testing.assert_array_equal(var.numpy(), data_np) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py b/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py index eb582310df46bb3757aa0c12472fc7e4f03c04b5..7971bdd34cd4c358de80e0d22c90b1975af046dc 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py @@ -17,7 +17,14 @@ import numpy as np import paddle import paddle.fluid as fluid from paddle.fluid import core -from paddle.fluid.dygraph.nn import Conv2D, Pool2D, Linear, BatchNorm, Embedding, GRUUnit +from paddle.fluid.dygraph.nn import ( + Conv2D, + Pool2D, + Linear, + BatchNorm, + Embedding, + GRUUnit, +) from paddle.fluid.dygraph.base import to_variable from test_imperative_base import new_program_scope from paddle.fluid.framework import _test_eager_guard @@ -27,6 +34,7 @@ class Config(object): ''' config for training ''' + # encoder rnn hidden_size encoder_size = 8 # decoder size for decoder stage @@ -58,53 +66,62 @@ class Config(object): class ConvBNPool(fluid.dygraph.Layer): - - def __init__(self, - group, - out_ch, - channels, - act="relu", - is_test=False, - pool=True, - use_cudnn=True): + def __init__( + self, + group, + out_ch, + channels, + act="relu", + is_test=False, + pool=True, + use_cudnn=True, + ): super(ConvBNPool, self).__init__() self.group = group self.pool = pool filter_size = 3 - conv_std_0 = (2.0 / (filter_size**2 * channels[0]))**0.5 + conv_std_0 = (2.0 / (filter_size**2 * channels[0])) ** 0.5 conv_param_0 = fluid.ParamAttr( - initializer=fluid.initializer.Normal(0.0, conv_std_0)) + initializer=fluid.initializer.Normal(0.0, conv_std_0) + ) - conv_std_1 = (2.0 / (filter_size**2 * channels[1]))**0.5 + conv_std_1 = (2.0 / (filter_size**2 * channels[1])) ** 0.5 conv_param_1 = fluid.ParamAttr( - initializer=fluid.initializer.Normal(0.0, conv_std_1)) - - self.conv_0_layer = Conv2D(channels[0], - out_ch[0], - 3, - padding=1, - param_attr=conv_param_0, - bias_attr=False, - act=None, - use_cudnn=use_cudnn) + initializer=fluid.initializer.Normal(0.0, conv_std_1) + ) + + self.conv_0_layer = Conv2D( + channels[0], + out_ch[0], + 3, + padding=1, + param_attr=conv_param_0, + bias_attr=False, + act=None, + use_cudnn=use_cudnn, + ) self.bn_0_layer = BatchNorm(out_ch[0], act=act, is_test=is_test) - self.conv_1_layer = Conv2D(out_ch[0], - num_filters=out_ch[1], - filter_size=3, - padding=1, - param_attr=conv_param_1, - bias_attr=False, - act=None, - use_cudnn=use_cudnn) + self.conv_1_layer = Conv2D( + out_ch[0], + num_filters=out_ch[1], + filter_size=3, + padding=1, + param_attr=conv_param_1, + bias_attr=False, + act=None, + use_cudnn=use_cudnn, + ) self.bn_1_layer = BatchNorm(out_ch[1], act=act, is_test=is_test) if self.pool: - self.pool_layer = Pool2D(pool_size=2, - pool_type='max', - pool_stride=2, - use_cudnn=use_cudnn, - ceil_mode=True) + self.pool_layer = Pool2D( + pool_size=2, + pool_type='max', + pool_stride=2, + use_cudnn=use_cudnn, + ceil_mode=True, + ) def forward(self, inputs): conv_0 = self.conv_0_layer(inputs) @@ -118,22 +135,25 @@ class ConvBNPool(fluid.dygraph.Layer): class OCRConv(fluid.dygraph.Layer): - def __init__(self, is_test=False, use_cudnn=True): super(OCRConv, self).__init__() - self.conv_bn_pool_1 = ConvBNPool(2, [8, 8], [1, 8], - is_test=is_test, - use_cudnn=use_cudnn) - self.conv_bn_pool_2 = ConvBNPool(2, [8, 8], [8, 8], - is_test=is_test, - use_cudnn=use_cudnn) - self.conv_bn_pool_3 = ConvBNPool(2, [8, 8], [8, 8], - is_test=is_test, - use_cudnn=use_cudnn) - self.conv_bn_pool_4 = ConvBNPool(2, [16, 16], [8, 16], - is_test=is_test, - pool=False, - use_cudnn=use_cudnn) + self.conv_bn_pool_1 = ConvBNPool( + 2, [8, 8], [1, 8], is_test=is_test, use_cudnn=use_cudnn + ) + self.conv_bn_pool_2 = ConvBNPool( + 2, [8, 8], [8, 8], is_test=is_test, use_cudnn=use_cudnn + ) + self.conv_bn_pool_3 = ConvBNPool( + 2, [8, 8], [8, 8], is_test=is_test, use_cudnn=use_cudnn + ) + self.conv_bn_pool_4 = ConvBNPool( + 2, + [16, 16], + [8, 16], + is_test=is_test, + pool=False, + use_cudnn=use_cudnn, + ) def forward(self, inputs): inputs_1 = self.conv_bn_pool_1(inputs) @@ -145,24 +165,27 @@ class OCRConv(fluid.dygraph.Layer): class DynamicGRU(fluid.dygraph.Layer): - - def __init__(self, - size, - param_attr=None, - bias_attr=None, - is_reverse=False, - gate_activation='sigmoid', - candidate_activation='tanh', - h_0=None, - origin_mode=False): + def __init__( + self, + size, + param_attr=None, + bias_attr=None, + is_reverse=False, + gate_activation='sigmoid', + candidate_activation='tanh', + h_0=None, + origin_mode=False, + ): super(DynamicGRU, self).__init__() - self.gru_unit = GRUUnit(size * 3, - param_attr=param_attr, - bias_attr=bias_attr, - activation=candidate_activation, - gate_activation=gate_activation, - origin_mode=origin_mode) + self.gru_unit = GRUUnit( + size * 3, + param_attr=param_attr, + bias_attr=bias_attr, + activation=candidate_activation, + gate_activation=gate_activation, + origin_mode=origin_mode, + ) self.h_0 = h_0 self.is_reverse = is_reverse @@ -174,15 +197,16 @@ class DynamicGRU(fluid.dygraph.Layer): for i in range(inputs.shape[1]): if self.is_reverse: i = inputs.shape[1] - 1 - i - input_ = fluid.layers.slice(inputs, - axes=[1], - starts=[i], - ends=[i + 1]) - input_ = fluid.layers.reshape(input_, [-1, input_.shape[2]], - inplace=False) + input_ = fluid.layers.slice( + inputs, axes=[1], starts=[i], ends=[i + 1] + ) + input_ = fluid.layers.reshape( + input_, [-1, input_.shape[2]], inplace=False + ) hidden, reset, gate = self.gru_unit(input_, hidden) - hidden_ = fluid.layers.reshape(hidden, [-1, 1, hidden.shape[1]], - inplace=False) + hidden_ = fluid.layers.reshape( + hidden, [-1, 1, hidden.shape[1]], inplace=False + ) if self.is_reverse: res = [hidden_] + res else: @@ -192,75 +216,85 @@ class DynamicGRU(fluid.dygraph.Layer): class EncoderNet(fluid.dygraph.Layer): - - def __init__(self, - rnn_hidden_size=Config.encoder_size, - is_test=False, - use_cudnn=True): + def __init__( + self, rnn_hidden_size=Config.encoder_size, is_test=False, use_cudnn=True + ): super(EncoderNet, self).__init__() self.rnn_hidden_size = rnn_hidden_size para_attr = fluid.ParamAttr( - initializer=fluid.initializer.Normal(0.0, 0.02)) - bias_attr = fluid.ParamAttr(initializer=fluid.initializer.Normal( - 0.0, 0.02), - learning_rate=2.0) + initializer=fluid.initializer.Normal(0.0, 0.02) + ) + bias_attr = fluid.ParamAttr( + initializer=fluid.initializer.Normal(0.0, 0.02), learning_rate=2.0 + ) if fluid.framework._non_static_mode(): - h_0 = np.zeros((Config.batch_size, rnn_hidden_size), - dtype="float32") + h_0 = np.zeros( + (Config.batch_size, rnn_hidden_size), dtype="float32" + ) h_0 = to_variable(h_0) else: h_0 = fluid.layers.fill_constant( shape=[Config.batch_size, rnn_hidden_size], dtype='float32', - value=0) + value=0, + ) self.ocr_convs = OCRConv(is_test=is_test, use_cudnn=use_cudnn) - self.fc_1_layer = Linear(32, - rnn_hidden_size * 3, - param_attr=para_attr, - bias_attr=False) - self.fc_2_layer = Linear(32, - rnn_hidden_size * 3, - param_attr=para_attr, - bias_attr=False) - self.gru_forward_layer = DynamicGRU(size=rnn_hidden_size, - h_0=h_0, - param_attr=para_attr, - bias_attr=bias_attr, - candidate_activation='relu') - self.gru_backward_layer = DynamicGRU(size=rnn_hidden_size, - h_0=h_0, - param_attr=para_attr, - bias_attr=bias_attr, - candidate_activation='relu', - is_reverse=True) - - self.encoded_proj_fc = Linear(rnn_hidden_size * 2, - Config.decoder_size, - bias_attr=False) + self.fc_1_layer = Linear( + 32, rnn_hidden_size * 3, param_attr=para_attr, bias_attr=False + ) + self.fc_2_layer = Linear( + 32, rnn_hidden_size * 3, param_attr=para_attr, bias_attr=False + ) + self.gru_forward_layer = DynamicGRU( + size=rnn_hidden_size, + h_0=h_0, + param_attr=para_attr, + bias_attr=bias_attr, + candidate_activation='relu', + ) + self.gru_backward_layer = DynamicGRU( + size=rnn_hidden_size, + h_0=h_0, + param_attr=para_attr, + bias_attr=bias_attr, + candidate_activation='relu', + is_reverse=True, + ) + + self.encoded_proj_fc = Linear( + rnn_hidden_size * 2, Config.decoder_size, bias_attr=False + ) def forward(self, inputs): conv_features = self.ocr_convs(inputs) - #sliced_feature = fluid.layers.im2sequence( + # sliced_feature = fluid.layers.im2sequence( # input=conv_features, # stride=[1, 1], # filter_size=[conv_features.shape[2], 1]) - transpose_conv_features = fluid.layers.transpose(conv_features, - perm=[0, 3, 1, 2]) - sliced_feature = fluid.layers.reshape(transpose_conv_features, [ - -1, 8, - transpose_conv_features.shape[2] * transpose_conv_features.shape[3] - ], - inplace=False) + transpose_conv_features = fluid.layers.transpose( + conv_features, perm=[0, 3, 1, 2] + ) + sliced_feature = fluid.layers.reshape( + transpose_conv_features, + [ + -1, + 8, + transpose_conv_features.shape[2] + * transpose_conv_features.shape[3], + ], + inplace=False, + ) fc_1 = self.fc_1_layer(sliced_feature) fc_2 = self.fc_2_layer(sliced_feature) gru_forward = self.gru_forward_layer(fc_1) gru_backward = self.gru_backward_layer(fc_2) - encoded_vector = fluid.layers.concat(input=[gru_forward, gru_backward], - axis=2) + encoded_vector = fluid.layers.concat( + input=[gru_forward, gru_backward], axis=2 + ) encoded_proj = self.encoded_proj_fc(encoded_vector) @@ -268,79 +302,80 @@ class EncoderNet(fluid.dygraph.Layer): class SimpleAttention(fluid.dygraph.Layer): - def __init__(self, decoder_size): super(SimpleAttention, self).__init__() - self.fc_1 = Linear(decoder_size, - decoder_size, - act=None, - bias_attr=False) + self.fc_1 = Linear( + decoder_size, decoder_size, act=None, bias_attr=False + ) self.fc_2 = Linear(decoder_size, 1, act=None, bias_attr=False) def forward(self, encoder_vec, encoder_proj, decoder_state): decoder_state_fc = self.fc_1(decoder_state) decoder_state_proj_reshape = fluid.layers.reshape( - decoder_state_fc, [-1, 1, decoder_state_fc.shape[1]], inplace=False) + decoder_state_fc, [-1, 1, decoder_state_fc.shape[1]], inplace=False + ) decoder_state_expand = fluid.layers.expand( - decoder_state_proj_reshape, [1, encoder_proj.shape[1], 1]) - concated = fluid.layers.elementwise_add(encoder_proj, - decoder_state_expand) + decoder_state_proj_reshape, [1, encoder_proj.shape[1], 1] + ) + concated = fluid.layers.elementwise_add( + encoder_proj, decoder_state_expand + ) concated = fluid.layers.tanh(x=concated) attention_weight = self.fc_2(concated) weights_reshape = fluid.layers.reshape( x=attention_weight, shape=[attention_weight.shape[0], attention_weight.shape[1]], - inplace=False) + inplace=False, + ) weights_reshape = fluid.layers.softmax(weights_reshape) - scaled = fluid.layers.elementwise_mul(x=encoder_vec, - y=weights_reshape, - axis=0) + scaled = fluid.layers.elementwise_mul( + x=encoder_vec, y=weights_reshape, axis=0 + ) context = fluid.layers.reduce_sum(scaled, dim=1) return context class GRUDecoderWithAttention(fluid.dygraph.Layer): - def __init__(self, decoder_size, num_classes): super(GRUDecoderWithAttention, self).__init__() self.simple_attention = SimpleAttention(decoder_size) - self.fc_1_layer = Linear(Config.encoder_size * 2, - decoder_size * 3, - bias_attr=False) - self.fc_2_layer = Linear(decoder_size, - decoder_size * 3, - bias_attr=False) - self.gru_unit = GRUUnit(size=decoder_size * 3, - param_attr=None, - bias_attr=None) - self.out_layer = Linear(decoder_size, - num_classes + 2, - bias_attr=None, - act='softmax') + self.fc_1_layer = Linear( + Config.encoder_size * 2, decoder_size * 3, bias_attr=False + ) + self.fc_2_layer = Linear( + decoder_size, decoder_size * 3, bias_attr=False + ) + self.gru_unit = GRUUnit( + size=decoder_size * 3, param_attr=None, bias_attr=None + ) + self.out_layer = Linear( + decoder_size, num_classes + 2, bias_attr=None, act='softmax' + ) self.decoder_size = decoder_size - def forward(self, target_embedding, encoder_vec, encoder_proj, - decoder_boot): + def forward( + self, target_embedding, encoder_vec, encoder_proj, decoder_boot + ): res = [] hidden_mem = decoder_boot for i in range(target_embedding.shape[1]): - current_word = fluid.layers.slice(target_embedding, - axes=[1], - starts=[i], - ends=[i + 1]) - current_word = fluid.layers.reshape(current_word, - [-1, current_word.shape[2]], - inplace=False) - - context = self.simple_attention(encoder_vec, encoder_proj, - hidden_mem) + current_word = fluid.layers.slice( + target_embedding, axes=[1], starts=[i], ends=[i + 1] + ) + current_word = fluid.layers.reshape( + current_word, [-1, current_word.shape[2]], inplace=False + ) + + context = self.simple_attention( + encoder_vec, encoder_proj, hidden_mem + ) fc_1 = self.fc_1_layer(context) fc_2 = self.fc_2_layer(current_word) decoder_inputs = fluid.layers.elementwise_add(x=fc_1, y=fc_2) @@ -356,45 +391,48 @@ class GRUDecoderWithAttention(fluid.dygraph.Layer): class OCRAttention(fluid.dygraph.Layer): - def __init__(self): super(OCRAttention, self).__init__() self.encoder_net = EncoderNet() - self.fc = Linear(Config.encoder_size, - Config.decoder_size, - bias_attr=False, - act='relu') + self.fc = Linear( + Config.encoder_size, + Config.decoder_size, + bias_attr=False, + act='relu', + ) self.embedding = Embedding( - [Config.num_classes + 2, Config.word_vector_dim], dtype='float32') + [Config.num_classes + 2, Config.word_vector_dim], dtype='float32' + ) self.gru_decoder_with_attention = GRUDecoderWithAttention( - Config.decoder_size, Config.num_classes) + Config.decoder_size, Config.num_classes + ) def forward(self, inputs, label_in): gru_backward, encoded_vector, encoded_proj = self.encoder_net(inputs) - backward_first = fluid.layers.slice(gru_backward, - axes=[1], - starts=[0], - ends=[1]) - backward_first = fluid.layers.reshape(backward_first, - [-1, backward_first.shape[2]], - inplace=False) + backward_first = fluid.layers.slice( + gru_backward, axes=[1], starts=[0], ends=[1] + ) + backward_first = fluid.layers.reshape( + backward_first, [-1, backward_first.shape[2]], inplace=False + ) decoder_boot = self.fc(backward_first) label_in = fluid.layers.reshape(label_in, [-1], inplace=False) trg_embedding = self.embedding(label_in) trg_embedding = fluid.layers.reshape( - trg_embedding, [-1, Config.max_length, trg_embedding.shape[1]], - inplace=False) + trg_embedding, + [-1, Config.max_length, trg_embedding.shape[1]], + inplace=False, + ) - prediction = self.gru_decoder_with_attention(trg_embedding, - encoded_vector, - encoded_proj, decoder_boot) + prediction = self.gru_decoder_with_attention( + trg_embedding, encoded_vector, encoded_proj, decoder_boot + ) return prediction class TestDygraphOCRAttention(unittest.TestCase): - def test_ocr_test(self): seed = 90 epoch_num = 1 @@ -403,26 +441,41 @@ class TestDygraphOCRAttention(unittest.TestCase): else: batch_num = 2 np.random.seed = seed - image_np = np.random.randn(Config.batch_size, Config.DATA_SHAPE[0], - Config.DATA_SHAPE[1], - Config.DATA_SHAPE[2]).astype('float32') - label_in_np = np.arange(0, Config.max_length, - dtype='int64').reshape([1, Config.max_length]) + image_np = np.random.randn( + Config.batch_size, + Config.DATA_SHAPE[0], + Config.DATA_SHAPE[1], + Config.DATA_SHAPE[2], + ).astype('float32') + label_in_np = np.arange(0, Config.max_length, dtype='int64').reshape( + [1, Config.max_length] + ) for i in range(2, Config.batch_size + 1): label_in_np = np.vstack( - (label_in_np, - np.arange((i - 1) * Config.max_length, - i * Config.max_length, - dtype='int64').reshape([1, Config.max_length]))) + ( + label_in_np, + np.arange( + (i - 1) * Config.max_length, + i * Config.max_length, + dtype='int64', + ).reshape([1, Config.max_length]), + ) + ) - label_out_np = np.arange(0, Config.max_length, - dtype='int64').reshape([1, Config.max_length]) + label_out_np = np.arange(0, Config.max_length, dtype='int64').reshape( + [1, Config.max_length] + ) for i in range(2, Config.batch_size + 1): label_out_np = np.vstack( - (label_out_np, - np.arange((i - 1) * Config.max_length, - i * Config.max_length, - dtype='int64').reshape([1, Config.max_length]))) + ( + label_out_np, + np.arange( + (i - 1) * Config.max_length, + i * Config.max_length, + dtype='int64', + ).reshape([1, Config.max_length]), + ) + ) def run_dygraph(): fluid.set_flags({'FLAGS_sort_sum_gradient': True}) @@ -432,11 +485,13 @@ class TestDygraphOCRAttention(unittest.TestCase): if Config.learning_rate_decay == "piecewise_decay": learning_rate = fluid.layers.piecewise_decay( - [50000], [Config.LR, Config.LR * 0.01]) + [50000], [Config.LR, Config.LR * 0.01] + ) else: learning_rate = Config.LR optimizer = fluid.optimizer.SGD( - learning_rate=0.001, parameter_list=ocr_attention.parameters()) + learning_rate=0.001, parameter_list=ocr_attention.parameters() + ) dy_param_init_value = {} for param in ocr_attention.parameters(): dy_param_init_value[param.name] = param.numpy() @@ -447,12 +502,15 @@ class TestDygraphOCRAttention(unittest.TestCase): label_out.stop_gradient = True img = to_variable(image_np) dy_prediction = ocr_attention(img, label_in) - label_out = fluid.layers.reshape(label_out, [-1, 1], - inplace=False) + label_out = fluid.layers.reshape( + label_out, [-1, 1], inplace=False + ) dy_prediction = fluid.layers.reshape( - dy_prediction, [label_out.shape[0], -1], inplace=False) - loss = fluid.layers.cross_entropy(input=dy_prediction, - label=label_out) + dy_prediction, [label_out.shape[0], -1], inplace=False + ) + loss = fluid.layers.cross_entropy( + input=dy_prediction, label=label_out + ) avg_loss = fluid.layers.reduce_sum(loss) dy_out = avg_loss.numpy() @@ -466,9 +524,11 @@ class TestDygraphOCRAttention(unittest.TestCase): for param in ocr_attention.parameters(): if param.trainable: np_array = np.array( - param._grad_ivar().value().get_tensor()) - dy_grad_value[param.name + - core.grad_var_suffix()] = np_array + param._grad_ivar().value().get_tensor() + ) + dy_grad_value[ + param.name + core.grad_var_suffix() + ] = np_array optimizer.minimize(avg_loss) ocr_attention.clear_gradients() @@ -483,45 +543,52 @@ class TestDygraphOCRAttention(unittest.TestCase): with fluid.dygraph.guard(): with _test_eager_guard(): - eager_out, eager_param_init_value, eager_param_value = run_dygraph( - ) + ( + eager_out, + eager_param_init_value, + eager_param_value, + ) = run_dygraph() with new_program_scope(): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) - exe = fluid.Executor(fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) + exe = fluid.Executor( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) ocr_attention = OCRAttention() if Config.learning_rate_decay == "piecewise_decay": learning_rate = fluid.layers.piecewise_decay( - [50000], [Config.LR, Config.LR * 0.01]) + [50000], [Config.LR, Config.LR * 0.01] + ) else: learning_rate = Config.LR optimizer = fluid.optimizer.SGD(learning_rate=0.001) - images = fluid.layers.data(name='pixel', - shape=Config.DATA_SHAPE, - dtype='float32') - static_label_in = fluid.layers.data(name='label_in', - shape=[1], - dtype='int64', - lod_level=0) - static_label_out = fluid.layers.data(name='label_out', - shape=[1], - dtype='int64', - lod_level=0) + images = fluid.layers.data( + name='pixel', shape=Config.DATA_SHAPE, dtype='float32' + ) + static_label_in = fluid.layers.data( + name='label_in', shape=[1], dtype='int64', lod_level=0 + ) + static_label_out = fluid.layers.data( + name='label_out', shape=[1], dtype='int64', lod_level=0 + ) static_label_out.stop_gradient = True static_label_out.trainable = False static_prediction = ocr_attention(images, static_label_in) static_prediction = fluid.layers.reshape( - static_prediction, shape=[-1, Config.num_classes + 2]) + static_prediction, shape=[-1, Config.num_classes + 2] + ) - cost = fluid.layers.cross_entropy(input=static_prediction, - label=static_label_out) + cost = fluid.layers.cross_entropy( + input=static_prediction, label=static_label_out + ) static_avg_loss = fluid.layers.reduce_sum(cost) # param_grad_list = fluid.backward.append_backward(static_avg_loss) optimizer.minimize(static_avg_loss) @@ -532,11 +599,14 @@ class TestDygraphOCRAttention(unittest.TestCase): for param in ocr_attention.parameters(): static_param_name_list.append(param.name) if param.trainable: - static_grad_name_list.append(param.name + - core.grad_var_suffix()) + static_grad_name_list.append( + param.name + core.grad_var_suffix() + ) - out = exe.run(fluid.default_startup_program(), - fetch_list=static_param_name_list) + out = exe.run( + fluid.default_startup_program(), + fetch_list=static_param_name_list, + ) for i in range(len(static_param_name_list)): static_param_init_value[static_param_name_list[i]] = out[i] @@ -549,24 +619,30 @@ class TestDygraphOCRAttention(unittest.TestCase): static_label_in = label_in_np static_label_out = label_out_np static_label_out = static_label_out.reshape((-1, 1)) - out = exe.run(fluid.default_main_program(), - feed={ - "pixel": image_np, - "label_in": static_label_in, - "label_out": static_label_out - }, - fetch_list=fetch_list) + out = exe.run( + fluid.default_main_program(), + feed={ + "pixel": image_np, + "label_in": static_label_in, + "label_out": static_label_out, + }, + fetch_list=fetch_list, + ) static_param_value = {} static_grad_value = {} static_out = out[0] for i in range(1, len(static_param_name_list) + 1): - static_param_value[static_param_name_list[i - - 1]] = out[i] + static_param_value[static_param_name_list[i - 1]] = out[ + i + ] grad_start_pos = len(static_param_name_list) + 1 - for i in range(grad_start_pos, - len(static_grad_name_list) + grad_start_pos): - static_grad_value[static_grad_name_list[ - i - grad_start_pos]] = out[i] + for i in range( + grad_start_pos, + len(static_grad_name_list) + grad_start_pos, + ): + static_grad_value[ + static_grad_name_list[i - grad_start_pos] + ] = out[i] np.testing.assert_allclose(static_out, dy_out, rtol=1e-05, atol=1e-8) @@ -574,10 +650,9 @@ class TestDygraphOCRAttention(unittest.TestCase): np.testing.assert_array_equal(value, dy_param_init_value[key]) for key, value in static_param_value.items(): - np.testing.assert_allclose(value, - dy_param_value[key], - rtol=1e-05, - atol=1e-8) + np.testing.assert_allclose( + value, dy_param_value[key], rtol=1e-05, atol=1e-8 + ) # check eager here np.testing.assert_allclose(static_out, eager_out, rtol=1e-05, atol=1e-8) @@ -586,10 +661,9 @@ class TestDygraphOCRAttention(unittest.TestCase): np.testing.assert_array_equal(value, eager_param_init_value[key]) for key, value in static_param_value.items(): - np.testing.assert_allclose(value, - eager_param_value[key], - rtol=1e-05, - atol=1e-8) + np.testing.assert_allclose( + value, eager_param_value[key], rtol=1e-05, atol=1e-8 + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py b/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py index ba1f73bcbcb42a119c82d9a926b360fa2270eb30..0219cc9947975bb15ddfd591bd34845de57ba528 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py @@ -19,8 +19,28 @@ import itertools import paddle import paddle.fluid as fluid from paddle.fluid import core -from paddle.fluid.optimizer import SGDOptimizer, Adam, MomentumOptimizer, LarsMomentumOptimizer, AdagradOptimizer, AdamaxOptimizer, DpsgdOptimizer, DecayedAdagradOptimizer, AdadeltaOptimizer, RMSPropOptimizer, FtrlOptimizer, LambOptimizer -from paddle.fluid.optimizer import ModelAverage, DGCMomentumOptimizer, ExponentialMovingAverage, PipelineOptimizer, LookaheadOptimizer, RecomputeOptimizer +from paddle.fluid.optimizer import ( + SGDOptimizer, + Adam, + MomentumOptimizer, + LarsMomentumOptimizer, + AdagradOptimizer, + AdamaxOptimizer, + DpsgdOptimizer, + DecayedAdagradOptimizer, + AdadeltaOptimizer, + RMSPropOptimizer, + FtrlOptimizer, + LambOptimizer, +) +from paddle.fluid.optimizer import ( + ModelAverage, + DGCMomentumOptimizer, + ExponentialMovingAverage, + PipelineOptimizer, + LookaheadOptimizer, + RecomputeOptimizer, +) from paddle.fluid.dygraph import Linear from test_imperative_base import new_program_scope from paddle.fluid.framework import _test_eager_guard @@ -30,7 +50,6 @@ from paddle.fluid.framework import _test_eager_guard class MLP(fluid.Layer): - def __init__(self, param_attr=None, bias_attr=None): super(MLP, self).__init__() @@ -44,7 +63,6 @@ class MLP(fluid.Layer): class TestImperativeOptimizerBase(unittest.TestCase): - def setUp(self): self.batch_num = 20 @@ -55,7 +73,6 @@ class TestImperativeOptimizerBase(unittest.TestCase): raise NotImplementedError() def reader_decorator(self, reader): - def _reader_imple(): for item in reader(): image = np.array(item[0]).reshape(1, 784) @@ -68,8 +85,11 @@ class TestImperativeOptimizerBase(unittest.TestCase): seed = 90 batch_size = 128 if place == None: - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.dygraph.guard(place): try: @@ -77,7 +97,8 @@ class TestImperativeOptimizerBase(unittest.TestCase): paddle.framework.random._manual_program_seed(seed) mlp = MLP() optimizer = self.get_optimizer_dygraph( - parameter_list=mlp.parameters()) + parameter_list=mlp.parameters() + ) except Exception as e: assert str(e) == exception_message @@ -86,8 +107,11 @@ class TestImperativeOptimizerBase(unittest.TestCase): batch_size = 128 if place == None: - place = fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0) + place = ( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) with fluid.dygraph.guard(place): paddle.seed(seed) @@ -95,15 +119,18 @@ class TestImperativeOptimizerBase(unittest.TestCase): mlp = MLP() optimizer = self.get_optimizer_dygraph( - parameter_list=mlp.parameters()) + parameter_list=mlp.parameters() + ) batch_py_reader = fluid.io.PyReader(capacity=1) batch_py_reader.decorate_sample_list_generator( - paddle.batch(self.reader_decorator( - paddle.dataset.mnist.train()), - batch_size=batch_size, - drop_last=True), - places=fluid.CPUPlace()) + paddle.batch( + self.reader_decorator(paddle.dataset.mnist.train()), + batch_size=batch_size, + drop_last=True, + ), + places=fluid.CPUPlace(), + ) dy_param_init_value = {} for batch_id, data in enumerate(batch_py_reader()): @@ -135,20 +162,23 @@ class TestImperativeOptimizerBase(unittest.TestCase): paddle.framework.random._manual_program_seed(seed) if place == None: - place = fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0) + place = ( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) exe = fluid.Executor(place) mlp = MLP() optimizer = self.get_optimizer() - train_reader = paddle.batch(paddle.dataset.mnist.train(), - batch_size=128, - drop_last=True) + train_reader = paddle.batch( + paddle.dataset.mnist.train(), batch_size=128, drop_last=True + ) - img = fluid.layers.data(name='pixel', - shape=[1, 28, 28], - dtype='float32') + img = fluid.layers.data( + name='pixel', shape=[1, 28, 28], dtype='float32' + ) label = fluid.layers.data(name='label', shape=[1], dtype='int64') img = fluid.layers.reshape(img, shape=[batch_size, 784]) cost = mlp(img) @@ -161,8 +191,10 @@ class TestImperativeOptimizerBase(unittest.TestCase): for param in mlp.parameters(): static_param_name_list.append(param.name) - out = exe.run(fluid.default_startup_program(), - fetch_list=static_param_name_list) + out = exe.run( + fluid.default_startup_program(), + fetch_list=static_param_name_list, + ) for i in range(len(static_param_name_list)): static_param_init_value[static_param_name_list[i]] = out[i] @@ -172,18 +204,21 @@ class TestImperativeOptimizerBase(unittest.TestCase): break static_x_data = np.array( - [x[0].reshape(1, 28, 28) for x in data]).astype('float32') - y_data = np.array([x[1] for x in data - ]).astype('int64').reshape([128, 1]) + [x[0].reshape(1, 28, 28) for x in data] + ).astype('float32') + y_data = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape([128, 1]) + ) fetch_list = [avg_loss.name] fetch_list.extend(static_param_name_list) - out = exe.run(fluid.default_main_program(), - feed={ - "pixel": static_x_data, - "label": y_data - }, - fetch_list=fetch_list) + out = exe.run( + fluid.default_main_program(), + feed={"pixel": static_x_data, "label": y_data}, + fetch_list=fetch_list, + ) static_param_value = {} static_out = out[0] @@ -191,43 +226,48 @@ class TestImperativeOptimizerBase(unittest.TestCase): static_param_value[static_param_name_list[i - 1]] = out[i] for key, value in static_param_init_value.items(): - np.testing.assert_allclose(value, - dy_param_init_value[key], - rtol=1e-05) + np.testing.assert_allclose( + value, dy_param_init_value[key], rtol=1e-05 + ) if core.is_compiled_with_rocm(): - np.testing.assert_allclose(static_out, - dy_out, - rtol=1e-05, - atol=0.001) + np.testing.assert_allclose( + static_out, dy_out, rtol=1e-05, atol=0.001 + ) else: np.testing.assert_allclose(static_out, dy_out, rtol=1e-05) for key, value in static_param_value.items(): if core.is_compiled_with_rocm(): - np.testing.assert_allclose(value, - dy_param_value[key], - rtol=1e-05, - atol=0.001) + np.testing.assert_allclose( + value, dy_param_value[key], rtol=1e-05, atol=0.001 + ) else: - np.testing.assert_allclose(value, - dy_param_value[key], - rtol=1e-05) + np.testing.assert_allclose( + value, dy_param_value[key], rtol=1e-05 + ) class TestImperativeOptimizerPiecewiseDecay(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): bd = [3, 6, 9] - optimizer = SGDOptimizer(learning_rate=fluid.layers.piecewise_decay( - boundaries=bd, values=[0.1 * (0.1**i) for i in range(len(bd) + 1)]), - parameter_list=parameter_list) + optimizer = SGDOptimizer( + learning_rate=fluid.layers.piecewise_decay( + boundaries=bd, + values=[0.1 * (0.1**i) for i in range(len(bd) + 1)], + ), + parameter_list=parameter_list, + ) return optimizer def get_optimizer(self): bd = [3, 6, 9] - optimizer = SGDOptimizer(learning_rate=fluid.layers.piecewise_decay( - boundaries=bd, values=[0.1 * (0.1**i) for i in range(len(bd) + 1)])) + optimizer = SGDOptimizer( + learning_rate=fluid.layers.piecewise_decay( + boundaries=bd, + values=[0.1 * (0.1**i) for i in range(len(bd) + 1)], + ) + ) return optimizer def func_test_sgd(self): @@ -240,22 +280,27 @@ class TestImperativeOptimizerPiecewiseDecay(TestImperativeOptimizerBase): class TestImperativeOptimizerNaturalExpDecay(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = SGDOptimizer(learning_rate=fluid.layers.natural_exp_decay( - learning_rate=0.1, - decay_steps=10000, - decay_rate=0.5, - staircase=True), - parameter_list=parameter_list) + optimizer = SGDOptimizer( + learning_rate=fluid.layers.natural_exp_decay( + learning_rate=0.1, + decay_steps=10000, + decay_rate=0.5, + staircase=True, + ), + parameter_list=parameter_list, + ) return optimizer def get_optimizer(self): optimizer = SGDOptimizer( - learning_rate=fluid.layers.natural_exp_decay(learning_rate=0.1, - decay_steps=10000, - decay_rate=0.5, - staircase=True)) + learning_rate=fluid.layers.natural_exp_decay( + learning_rate=0.1, + decay_steps=10000, + decay_rate=0.5, + staircase=True, + ) + ) return optimizer def func_test_sgd(self): @@ -268,22 +313,27 @@ class TestImperativeOptimizerNaturalExpDecay(TestImperativeOptimizerBase): class TestImperativeOptimizerExponentialDecay(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = SGDOptimizer(learning_rate=fluid.layers.exponential_decay( - learning_rate=0.1, - decay_steps=10000, - decay_rate=0.5, - staircase=True), - parameter_list=parameter_list) + optimizer = SGDOptimizer( + learning_rate=fluid.layers.exponential_decay( + learning_rate=0.1, + decay_steps=10000, + decay_rate=0.5, + staircase=True, + ), + parameter_list=parameter_list, + ) return optimizer def get_optimizer(self): optimizer = SGDOptimizer( - learning_rate=fluid.layers.exponential_decay(learning_rate=0.1, - decay_steps=10000, - decay_rate=0.5, - staircase=True)) + learning_rate=fluid.layers.exponential_decay( + learning_rate=0.1, + decay_steps=10000, + decay_rate=0.5, + staircase=True, + ) + ) return optimizer def func_test_sgd(self): @@ -296,22 +346,27 @@ class TestImperativeOptimizerExponentialDecay(TestImperativeOptimizerBase): class TestImperativeOptimizerInverseTimeDecay(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = Adam(learning_rate=fluid.layers.inverse_time_decay( - learning_rate=0.1, - decay_steps=10000, - decay_rate=0.5, - staircase=True), - parameter_list=parameter_list) + optimizer = Adam( + learning_rate=fluid.layers.inverse_time_decay( + learning_rate=0.1, + decay_steps=10000, + decay_rate=0.5, + staircase=True, + ), + parameter_list=parameter_list, + ) return optimizer def get_optimizer(self): optimizer = Adam( - learning_rate=fluid.layers.inverse_time_decay(learning_rate=0.1, - decay_steps=10000, - decay_rate=0.5, - staircase=True)) + learning_rate=fluid.layers.inverse_time_decay( + learning_rate=0.1, + decay_steps=10000, + decay_rate=0.5, + staircase=True, + ) + ) return optimizer def func_test_adam(self): @@ -324,16 +379,21 @@ class TestImperativeOptimizerInverseTimeDecay(TestImperativeOptimizerBase): class TestImperativeOptimizerPolynomialDecay(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = SGDOptimizer(learning_rate=fluid.layers.polynomial_decay( - learning_rate=0.1, decay_steps=5, cycle=self.cycle), - parameter_list=parameter_list) + optimizer = SGDOptimizer( + learning_rate=fluid.layers.polynomial_decay( + learning_rate=0.1, decay_steps=5, cycle=self.cycle + ), + parameter_list=parameter_list, + ) return optimizer def get_optimizer(self): - optimizer = SGDOptimizer(learning_rate=fluid.layers.polynomial_decay( - learning_rate=0.1, decay_steps=5, cycle=self.cycle)) + optimizer = SGDOptimizer( + learning_rate=fluid.layers.polynomial_decay( + learning_rate=0.1, decay_steps=5, cycle=self.cycle + ) + ) return optimizer def func_test_sgd_cycle(self): @@ -356,16 +416,21 @@ class TestImperativeOptimizerPolynomialDecay(TestImperativeOptimizerBase): class TestImperativeOptimizerCosineDecay(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = SGDOptimizer(learning_rate=fluid.layers.cosine_decay( - learning_rate=0.1, step_each_epoch=10000, epochs=120), - parameter_list=parameter_list) + optimizer = SGDOptimizer( + learning_rate=fluid.layers.cosine_decay( + learning_rate=0.1, step_each_epoch=10000, epochs=120 + ), + parameter_list=parameter_list, + ) return optimizer def get_optimizer(self): - optimizer = SGDOptimizer(learning_rate=fluid.layers.cosine_decay( - learning_rate=0.1, step_each_epoch=10000, epochs=120)) + optimizer = SGDOptimizer( + learning_rate=fluid.layers.cosine_decay( + learning_rate=0.1, step_each_epoch=10000, epochs=120 + ) + ) return optimizer def func_test_sgd(self): @@ -378,16 +443,21 @@ class TestImperativeOptimizerCosineDecay(TestImperativeOptimizerBase): class TestImperativeOptimizerNoamDecay(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = SGDOptimizer(learning_rate=fluid.layers.noam_decay( - d_model=512, warmup_steps=8000), - parameter_list=parameter_list) + optimizer = SGDOptimizer( + learning_rate=fluid.layers.noam_decay( + d_model=512, warmup_steps=8000 + ), + parameter_list=parameter_list, + ) return optimizer def get_optimizer(self): - optimizer = SGDOptimizer(learning_rate=fluid.layers.noam_decay( - d_model=512, warmup_steps=8000)) + optimizer = SGDOptimizer( + learning_rate=fluid.layers.noam_decay( + d_model=512, warmup_steps=8000 + ) + ) return optimizer def func_test_sgd(self): @@ -400,7 +470,6 @@ class TestImperativeOptimizerNoamDecay(TestImperativeOptimizerBase): class TestOptimizerLearningRate(unittest.TestCase): - def func_test_constant_lr(self): with fluid.dygraph.guard(): a = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") @@ -413,13 +482,13 @@ class TestOptimizerLearningRate(unittest.TestCase): loss = fluid.layers.reduce_mean(b) - adam = fluid.optimizer.Adam(0.001, - parameter_list=linear.parameters()) + adam = fluid.optimizer.Adam( + 0.001, parameter_list=linear.parameters() + ) - np.testing.assert_allclose(adam.current_step_lr(), - 0.001, - rtol=1e-06, - atol=0.0) + np.testing.assert_allclose( + adam.current_step_lr(), 0.001, rtol=1e-06, atol=0.0 + ) for i in range(10): adam.minimize(loss) @@ -447,14 +516,14 @@ class TestOptimizerLearningRate(unittest.TestCase): bd = [2, 4, 6, 8] value = [0.2, 0.4, 0.6, 0.8, 1.0] - adam = fluid.optimizer.Adam(fluid.dygraph.PiecewiseDecay( - bd, value, 0), - parameter_list=linear.parameters()) + adam = fluid.optimizer.Adam( + fluid.dygraph.PiecewiseDecay(bd, value, 0), + parameter_list=linear.parameters(), + ) - np.testing.assert_allclose(adam.current_step_lr(), - 0.2, - rtol=1e-06, - atol=0.0) + np.testing.assert_allclose( + adam.current_step_lr(), 0.2, rtol=1e-06, atol=0.0 + ) ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] for i in range(12): @@ -481,17 +550,19 @@ class TestOptimizerLearningRate(unittest.TestCase): loss = fluid.layers.reduce_mean(b) base_lr = 1.0 - adam = fluid.optimizer.Adam(fluid.dygraph.NaturalExpDecay( - learning_rate=base_lr, - decay_steps=3, - decay_rate=0.5, - staircase=True), - parameter_list=linear.parameters()) + adam = fluid.optimizer.Adam( + fluid.dygraph.NaturalExpDecay( + learning_rate=base_lr, + decay_steps=3, + decay_rate=0.5, + staircase=True, + ), + parameter_list=linear.parameters(), + ) - np.testing.assert_allclose(adam.current_step_lr(), - 1.0, - rtol=1e-06, - atol=0.0) + np.testing.assert_allclose( + adam.current_step_lr(), 1.0, rtol=1e-06, atol=0.0 + ) ret = [1.0, 1.0, 1.0, np.exp(-0.5), np.exp(-0.5)] for i in range(5): @@ -526,21 +597,24 @@ class TestOptimizerLearningRate(unittest.TestCase): lr = adam.current_step_lr() np.testing.assert_allclose(lr, lr_list[i], rtol=1e-06, atol=0.0) - lr_var = fluid.layers.create_global_var(shape=[1], - value=0.7, - dtype='float32') + lr_var = fluid.layers.create_global_var( + shape=[1], value=0.7, dtype='float32' + ) adam.set_lr(lr_var) adam.minimize(loss) lr = adam.current_step_lr() np.testing.assert_allclose(lr, 0.7, rtol=1e-06, atol=0.0) with self.assertRaises(RuntimeError): - adam = fluid.optimizer.Adam(fluid.dygraph.NaturalExpDecay( - learning_rate=0.1, - decay_steps=3, - decay_rate=0.5, - staircase=True), - parameter_list=linear.parameters()) + adam = fluid.optimizer.Adam( + fluid.dygraph.NaturalExpDecay( + learning_rate=0.1, + decay_steps=3, + decay_rate=0.5, + staircase=True, + ), + parameter_list=linear.parameters(), + ) adam.set_lr(0.01) def test_set_lr(self): @@ -550,11 +624,10 @@ class TestOptimizerLearningRate(unittest.TestCase): class TestImperativeMomentumOptimizer(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = MomentumOptimizer(learning_rate=0.001, - momentum=0.9, - parameter_list=parameter_list) + optimizer = MomentumOptimizer( + learning_rate=0.001, momentum=0.9, parameter_list=parameter_list + ) return optimizer def get_optimizer(self): @@ -571,11 +644,10 @@ class TestImperativeMomentumOptimizer(TestImperativeOptimizerBase): class TestImperativeLarsMomentumOptimizer(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = LarsMomentumOptimizer(learning_rate=0.001, - momentum=0.9, - parameter_list=parameter_list) + optimizer = LarsMomentumOptimizer( + learning_rate=0.001, momentum=0.9, parameter_list=parameter_list + ) return optimizer def get_optimizer(self): @@ -592,10 +664,10 @@ class TestImperativeLarsMomentumOptimizer(TestImperativeOptimizerBase): class TestImperativeAdagradOptimizer(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = AdagradOptimizer(learning_rate=0.2, - parameter_list=parameter_list) + optimizer = AdagradOptimizer( + learning_rate=0.2, parameter_list=parameter_list + ) return optimizer def get_optimizer(self): @@ -612,10 +684,10 @@ class TestImperativeAdagradOptimizer(TestImperativeOptimizerBase): class TestImperativeAdamaxOptimizer(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = AdamaxOptimizer(learning_rate=0.2, - parameter_list=parameter_list) + optimizer = AdamaxOptimizer( + learning_rate=0.2, parameter_list=parameter_list + ) return optimizer def get_optimizer(self): @@ -632,21 +704,21 @@ class TestImperativeAdamaxOptimizer(TestImperativeOptimizerBase): class TestImperativeDpsgdOptimizer(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = DpsgdOptimizer(learning_rate=0.01, - clip=10.0, - batch_size=16.0, - sigma=1.0, - parameter_list=parameter_list) + optimizer = DpsgdOptimizer( + learning_rate=0.01, + clip=10.0, + batch_size=16.0, + sigma=1.0, + parameter_list=parameter_list, + ) optimizer._seed = 100 return optimizer def get_optimizer(self): - optimizer = DpsgdOptimizer(learning_rate=0.01, - clip=10.0, - batch_size=16.0, - sigma=1.0) + optimizer = DpsgdOptimizer( + learning_rate=0.01, clip=10.0, batch_size=16.0, sigma=1.0 + ) optimizer._seed = 100 return optimizer @@ -660,10 +732,10 @@ class TestImperativeDpsgdOptimizer(TestImperativeOptimizerBase): class TestImperativeDecayedAdagradOptimizer(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = DecayedAdagradOptimizer(learning_rate=0.2, - parameter_list=parameter_list) + optimizer = DecayedAdagradOptimizer( + learning_rate=0.2, parameter_list=parameter_list + ) return optimizer def get_optimizer(self): @@ -680,18 +752,19 @@ class TestImperativeDecayedAdagradOptimizer(TestImperativeOptimizerBase): class TestImperativeAdadeltaOptimizer(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = AdadeltaOptimizer(learning_rate=0.0003, - epsilon=1.0e-6, - rho=0.95, - parameter_list=parameter_list) + optimizer = AdadeltaOptimizer( + learning_rate=0.0003, + epsilon=1.0e-6, + rho=0.95, + parameter_list=parameter_list, + ) return optimizer def get_optimizer(self): - optimizer = AdadeltaOptimizer(learning_rate=0.0003, - epsilon=1.0e-6, - rho=0.95) + optimizer = AdadeltaOptimizer( + learning_rate=0.0003, epsilon=1.0e-6, rho=0.95 + ) return optimizer def func_test_adadelta(self): @@ -704,10 +777,10 @@ class TestImperativeAdadeltaOptimizer(TestImperativeOptimizerBase): class TestImperativeRMSPropOptimizer(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = RMSPropOptimizer(learning_rate=0.1, - parameter_list=parameter_list) + optimizer = RMSPropOptimizer( + learning_rate=0.1, parameter_list=parameter_list + ) return optimizer def get_optimizer(self): @@ -724,10 +797,10 @@ class TestImperativeRMSPropOptimizer(TestImperativeOptimizerBase): class TestImperativeFtrlOptimizer(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = FtrlOptimizer(learning_rate=0.1, - parameter_list=parameter_list) + optimizer = FtrlOptimizer( + learning_rate=0.1, parameter_list=parameter_list + ) return optimizer def get_optimizer(self): @@ -748,16 +821,18 @@ def exclude_fn(param): class TestImperativeLambOptimizer(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = LambOptimizer(learning_rate=0.002, - exclude_from_weight_decay_fn=exclude_fn, - parameter_list=parameter_list) + optimizer = LambOptimizer( + learning_rate=0.002, + exclude_from_weight_decay_fn=exclude_fn, + parameter_list=parameter_list, + ) return optimizer def get_optimizer(self): - optimizer = LambOptimizer(learning_rate=0.002, - exclude_from_weight_decay_fn=exclude_fn) + optimizer = LambOptimizer( + learning_rate=0.002, exclude_from_weight_decay_fn=exclude_fn + ) return optimizer # should fix: may fail in CI-windows @@ -766,11 +841,10 @@ class TestImperativeLambOptimizer(TestImperativeOptimizerBase): class TestImperativeModelAverage(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = ModelAverage(0.15, - min_average_window=10000, - max_average_window=12500) + optimizer = ModelAverage( + 0.15, min_average_window=10000, max_average_window=12500 + ) return optimizer def func_test_modelaverage(self): @@ -784,13 +858,14 @@ class TestImperativeModelAverage(TestImperativeOptimizerBase): class TestImperativeDGCMomentumOptimizer(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = DGCMomentumOptimizer(learning_rate=0.0001, - momentum=0.9, - rampup_step=1000, - rampup_begin_step=1252, - sparsity=[0.999, 0.999]) + optimizer = DGCMomentumOptimizer( + learning_rate=0.0001, + momentum=0.9, + rampup_step=1000, + rampup_begin_step=1252, + sparsity=[0.999, 0.999], + ) return optimizer def func_test_dgcmomentum(self): @@ -804,13 +879,14 @@ class TestImperativeDGCMomentumOptimizer(TestImperativeOptimizerBase): class TestImperativeExponentialMovingAverage(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): optimizer = ExponentialMovingAverage(0.999) return optimizer def func_test_exponentialmoving(self): - exception_message = "In dygraph, don't support ExponentialMovingAverage." + exception_message = ( + "In dygraph, don't support ExponentialMovingAverage." + ) self._check_exception(exception_message) def test_exponentialmoving(self): @@ -820,10 +896,10 @@ class TestImperativeExponentialMovingAverage(TestImperativeOptimizerBase): class TestImperativePipelineOptimizer(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = fluid.optimizer.SGD(learning_rate=0.5, - parameter_list=parameter_list) + optimizer = fluid.optimizer.SGD( + learning_rate=0.5, parameter_list=parameter_list + ) optimizer = PipelineOptimizer(optimizer) return optimizer @@ -838,10 +914,10 @@ class TestImperativePipelineOptimizer(TestImperativeOptimizerBase): class TestImperativeLookaheadOptimizer(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = fluid.optimizer.SGD(learning_rate=0.5, - parameter_list=parameter_list) + optimizer = fluid.optimizer.SGD( + learning_rate=0.5, parameter_list=parameter_list + ) optimizer = LookaheadOptimizer(optimizer, alpha=0.5, k=5) return optimizer @@ -856,10 +932,10 @@ class TestImperativeLookaheadOptimizer(TestImperativeOptimizerBase): class TestImperativeRecomputeOptimizer(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = fluid.optimizer.SGD(learning_rate=0.5, - parameter_list=parameter_list) + optimizer = fluid.optimizer.SGD( + learning_rate=0.5, parameter_list=parameter_list + ) optimizer = RecomputeOptimizer(optimizer) return optimizer @@ -874,16 +950,17 @@ class TestImperativeRecomputeOptimizer(TestImperativeOptimizerBase): class TestImperativeOptimizerList(unittest.TestCase): - def func_test_parameter_list(self): with fluid.dygraph.guard(): linear_1 = Linear(10, 10) linear_2 = Linear(10, 10) - sgd = SGDOptimizer(1.0, - parameter_list=itertools.chain( - linear_1.parameters(), - linear_2.parameters())) + sgd = SGDOptimizer( + 1.0, + parameter_list=itertools.chain( + linear_1.parameters(), linear_2.parameters() + ), + ) in_np = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") in_data = fluid.dygraph.to_variable(in_np) @@ -895,8 +972,9 @@ class TestImperativeOptimizerList(unittest.TestCase): sgd.minimize(loss) self.assertTrue( - len(sgd._parameter_list) == len(linear_1.parameters() + - linear_2.parameters())) + len(sgd._parameter_list) + == len(linear_1.parameters() + linear_2.parameters()) + ) def test_parameter_list(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_optimizer_v2.py b/python/paddle/fluid/tests/unittests/test_imperative_optimizer_v2.py index 3b0ba3572cc541f404fd2ef9c57781cd76763f87..251ac5b6b9338144ac3273c4e23e67c70a89173d 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_optimizer_v2.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_optimizer_v2.py @@ -19,8 +19,25 @@ import itertools import paddle import paddle.fluid as fluid from paddle.fluid import core -from paddle.fluid.optimizer import MomentumOptimizer, LarsMomentumOptimizer, AdagradOptimizer, AdamaxOptimizer, DpsgdOptimizer, DecayedAdagradOptimizer, AdadeltaOptimizer, RMSPropOptimizer, FtrlOptimizer -from paddle.fluid.optimizer import ModelAverage, DGCMomentumOptimizer, ExponentialMovingAverage, PipelineOptimizer, LookaheadOptimizer, RecomputeOptimizer +from paddle.fluid.optimizer import ( + MomentumOptimizer, + LarsMomentumOptimizer, + AdagradOptimizer, + AdamaxOptimizer, + DpsgdOptimizer, + DecayedAdagradOptimizer, + AdadeltaOptimizer, + RMSPropOptimizer, + FtrlOptimizer, +) +from paddle.fluid.optimizer import ( + ModelAverage, + DGCMomentumOptimizer, + ExponentialMovingAverage, + PipelineOptimizer, + LookaheadOptimizer, + RecomputeOptimizer, +) from paddle.fluid.dygraph import Linear from test_imperative_base import new_program_scope from paddle.fluid.framework import _test_eager_guard @@ -30,7 +47,6 @@ from paddle.fluid.framework import _test_eager_guard class MLP(fluid.Layer): - def __init__(self, param_attr=None, bias_attr=None): super(MLP, self).__init__() @@ -44,7 +60,6 @@ class MLP(fluid.Layer): class TestImperativeOptimizerBase(unittest.TestCase): - def setUp(self): self.batch_num = 20 @@ -55,7 +70,6 @@ class TestImperativeOptimizerBase(unittest.TestCase): raise NotImplementedError() def reader_decorator(self, reader): - def _reader_imple(): for item in reader(): image = np.array(item[0]).reshape(1, 784) @@ -68,8 +82,11 @@ class TestImperativeOptimizerBase(unittest.TestCase): seed = 90 batch_size = 128 if place == None: - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) try: paddle.disable_static() @@ -77,7 +94,8 @@ class TestImperativeOptimizerBase(unittest.TestCase): paddle.framework.random._manual_program_seed(seed) mlp = MLP() optimizer = self.get_optimizer_dygraph( - parameter_list=mlp.parameters()) + parameter_list=mlp.parameters() + ) except Exception as e: assert str(e) == exception_message finally: @@ -88,8 +106,11 @@ class TestImperativeOptimizerBase(unittest.TestCase): batch_size = 128 if place == None: - place = fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0) + place = ( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) paddle.disable_static(place) paddle.seed(seed) @@ -99,11 +120,14 @@ class TestImperativeOptimizerBase(unittest.TestCase): optimizer = self.get_optimizer_dygraph(parameter_list=mlp.parameters()) batch_py_reader = fluid.io.PyReader(capacity=1) - batch_py_reader.decorate_sample_list_generator(paddle.batch( - self.reader_decorator(paddle.dataset.mnist.train()), - batch_size=batch_size, - drop_last=True), - places=fluid.CPUPlace()) + batch_py_reader.decorate_sample_list_generator( + paddle.batch( + self.reader_decorator(paddle.dataset.mnist.train()), + batch_size=batch_size, + drop_last=True, + ), + places=fluid.CPUPlace(), + ) dy_param_init_value = {} for batch_id, data in enumerate(batch_py_reader()): @@ -126,10 +150,13 @@ class TestImperativeOptimizerBase(unittest.TestCase): avg_loss.backward() optimizer.minimize(avg_loss) - if isinstance(optimizer._learning_rate, - paddle.optimizer.lr.LRScheduler): - if isinstance(optimizer._learning_rate, - paddle.optimizer.lr.ReduceOnPlateau): + if isinstance( + optimizer._learning_rate, paddle.optimizer.lr.LRScheduler + ): + if isinstance( + optimizer._learning_rate, + paddle.optimizer.lr.ReduceOnPlateau, + ): optimizer._learning_rate.step(avg_loss) else: optimizer._learning_rate.step() @@ -144,20 +171,23 @@ class TestImperativeOptimizerBase(unittest.TestCase): paddle.framework.random._manual_program_seed(seed) if place == None: - place = fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0) + place = ( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) exe = fluid.Executor(place) mlp = MLP() optimizer = self.get_optimizer() - train_reader = paddle.batch(paddle.dataset.mnist.train(), - batch_size=128, - drop_last=True) + train_reader = paddle.batch( + paddle.dataset.mnist.train(), batch_size=128, drop_last=True + ) - img = fluid.layers.data(name='pixel', - shape=[1, 28, 28], - dtype='float32') + img = fluid.layers.data( + name='pixel', shape=[1, 28, 28], dtype='float32' + ) label = fluid.layers.data(name='label', shape=[1], dtype='int64') img = fluid.layers.reshape(img, shape=[batch_size, 784]) cost = mlp(img) @@ -170,8 +200,10 @@ class TestImperativeOptimizerBase(unittest.TestCase): for param in mlp.parameters(): static_param_name_list.append(param.name) - out = exe.run(fluid.default_startup_program(), - fetch_list=static_param_name_list) + out = exe.run( + fluid.default_startup_program(), + fetch_list=static_param_name_list, + ) for i in range(len(static_param_name_list)): static_param_init_value[static_param_name_list[i]] = out[i] @@ -181,22 +213,28 @@ class TestImperativeOptimizerBase(unittest.TestCase): break static_x_data = np.array( - [x[0].reshape(1, 28, 28) for x in data]).astype('float32') - y_data = np.array([x[1] for x in data - ]).astype('int64').reshape([128, 1]) + [x[0].reshape(1, 28, 28) for x in data] + ).astype('float32') + y_data = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape([128, 1]) + ) fetch_list = [avg_loss.name] fetch_list.extend(static_param_name_list) - out = exe.run(fluid.default_main_program(), - feed={ - "pixel": static_x_data, - "label": y_data - }, - fetch_list=fetch_list) - if isinstance(optimizer._learning_rate, - paddle.optimizer.lr.LRScheduler): - if isinstance(optimizer._learning_rate, - paddle.optimizer.lr.ReduceOnPlateau): + out = exe.run( + fluid.default_main_program(), + feed={"pixel": static_x_data, "label": y_data}, + fetch_list=fetch_list, + ) + if isinstance( + optimizer._learning_rate, paddle.optimizer.lr.LRScheduler + ): + if isinstance( + optimizer._learning_rate, + paddle.optimizer.lr.ReduceOnPlateau, + ): optimizer._learning_rate.step(out[0]) else: optimizer._learning_rate.step() @@ -207,39 +245,38 @@ class TestImperativeOptimizerBase(unittest.TestCase): static_param_value[static_param_name_list[i - 1]] = out[i] for key, value in static_param_init_value.items(): - np.testing.assert_allclose(value, - dy_param_init_value[key], - rtol=1e-05) + np.testing.assert_allclose( + value, dy_param_init_value[key], rtol=1e-05 + ) if core.is_compiled_with_rocm(): - np.testing.assert_allclose(static_out, - dy_out, - rtol=1e-05, - atol=0.001) + np.testing.assert_allclose( + static_out, dy_out, rtol=1e-05, atol=0.001 + ) else: np.testing.assert_allclose(static_out, dy_out, rtol=1e-05) for key, value in static_param_value.items(): if core.is_compiled_with_rocm(): - np.testing.assert_allclose(value, - dy_param_value[key], - rtol=1e-05, - atol=0.001) + np.testing.assert_allclose( + value, dy_param_value[key], rtol=1e-05, atol=0.001 + ) else: - np.testing.assert_allclose(value, - dy_param_value[key], - rtol=1e-05) + np.testing.assert_allclose( + value, dy_param_value[key], rtol=1e-05 + ) class TestImperativeOptimizerPiecewiseDecay(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): bd = [3, 6, 9] optimizer = paddle.optimizer.SGD( learning_rate=paddle.optimizer.lr.PiecewiseDecay( boundaries=bd, - values=[0.1 * (0.1**i) for i in range(len(bd) + 1)]), - parameters=parameter_list) + values=[0.1 * (0.1**i) for i in range(len(bd) + 1)], + ), + parameters=parameter_list, + ) return optimizer def get_optimizer(self): @@ -247,7 +284,9 @@ class TestImperativeOptimizerPiecewiseDecay(TestImperativeOptimizerBase): optimizer = paddle.optimizer.SGD( learning_rate=paddle.optimizer.lr.PiecewiseDecay( boundaries=bd, - values=[0.1 * (0.1**i) for i in range(len(bd) + 1)])) + values=[0.1 * (0.1**i) for i in range(len(bd) + 1)], + ) + ) return optimizer def func_test_sgd(self): @@ -260,18 +299,21 @@ class TestImperativeOptimizerPiecewiseDecay(TestImperativeOptimizerBase): class TestImperativeOptimizerNaturalExpDecay(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): optimizer = paddle.optimizer.SGD( - learning_rate=paddle.optimizer.lr.NaturalExpDecay(learning_rate=0.5, - gamma=0.9), - parameters=parameter_list) + learning_rate=paddle.optimizer.lr.NaturalExpDecay( + learning_rate=0.5, gamma=0.9 + ), + parameters=parameter_list, + ) return optimizer def get_optimizer(self): optimizer = paddle.optimizer.SGD( - learning_rate=paddle.optimizer.lr.NaturalExpDecay(learning_rate=0.5, - gamma=0.9)) + learning_rate=paddle.optimizer.lr.NaturalExpDecay( + learning_rate=0.5, gamma=0.9 + ) + ) return optimizer def func_test_sgd(self): @@ -284,18 +326,21 @@ class TestImperativeOptimizerNaturalExpDecay(TestImperativeOptimizerBase): class TestImperativeOptimizerExponentialDecay(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): optimizer = paddle.optimizer.SGD( learning_rate=paddle.optimizer.lr.ExponentialDecay( - learning_rate=0.5, gamma=0.9), - parameters=parameter_list) + learning_rate=0.5, gamma=0.9 + ), + parameters=parameter_list, + ) return optimizer def get_optimizer(self): optimizer = paddle.optimizer.SGD( learning_rate=paddle.optimizer.lr.ExponentialDecay( - learning_rate=0.5, gamma=0.9)) + learning_rate=0.5, gamma=0.9 + ) + ) return optimizer def func_test_sgd(self): @@ -308,18 +353,21 @@ class TestImperativeOptimizerExponentialDecay(TestImperativeOptimizerBase): class TestImperativeOptimizerInverseTimeDecay(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): optimizer = paddle.optimizer.Adam( learning_rate=paddle.optimizer.lr.InverseTimeDecay( - learning_rate=0.5, gamma=0.9), - parameters=parameter_list) + learning_rate=0.5, gamma=0.9 + ), + parameters=parameter_list, + ) return optimizer def get_optimizer(self): optimizer = paddle.optimizer.Adam( learning_rate=paddle.optimizer.lr.InverseTimeDecay( - learning_rate=0.5, gamma=0.9)) + learning_rate=0.5, gamma=0.9 + ) + ) return optimizer def func_test_adam(self): @@ -332,19 +380,21 @@ class TestImperativeOptimizerInverseTimeDecay(TestImperativeOptimizerBase): class TestImperativeOptimizerPolynomialDecay(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): optimizer = paddle.optimizer.SGD( - learning_rate=paddle.optimizer.lr.PolynomialDecay(learning_rate=0.5, - decay_steps=5, - cycle=self.cycle), - parameters=parameter_list) + learning_rate=paddle.optimizer.lr.PolynomialDecay( + learning_rate=0.5, decay_steps=5, cycle=self.cycle + ), + parameters=parameter_list, + ) return optimizer def get_optimizer(self): optimizer = paddle.optimizer.SGD( learning_rate=paddle.optimizer.lr.PolynomialDecay( - learning_rate=0.5, decay_steps=5, cycle=self.cycle)) + learning_rate=0.5, decay_steps=5, cycle=self.cycle + ) + ) return optimizer def func_test_sgd_cycle(self): @@ -367,18 +417,21 @@ class TestImperativeOptimizerPolynomialDecay(TestImperativeOptimizerBase): class TestImperativeOptimizerCosineAnnealingDecay(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): optimizer = paddle.optimizer.SGD( learning_rate=paddle.optimizer.lr.CosineAnnealingDecay( - learning_rate=0.5, T_max=5), - parameters=parameter_list) + learning_rate=0.5, T_max=5 + ), + parameters=parameter_list, + ) return optimizer def get_optimizer(self): optimizer = paddle.optimizer.SGD( learning_rate=paddle.optimizer.lr.CosineAnnealingDecay( - learning_rate=0.5, T_max=5)) + learning_rate=0.5, T_max=5 + ) + ) return optimizer def func_test_sgd(self): @@ -391,19 +444,21 @@ class TestImperativeOptimizerCosineAnnealingDecay(TestImperativeOptimizerBase): class TestImperativeOptimizerNoamDecay(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): optimizer = paddle.optimizer.SGD( - learning_rate=paddle.optimizer.lr.NoamDecay(d_model=0.01, - warmup_steps=100, - verbose=True), - parameters=parameter_list) + learning_rate=paddle.optimizer.lr.NoamDecay( + d_model=0.01, warmup_steps=100, verbose=True + ), + parameters=parameter_list, + ) return optimizer def get_optimizer(self): optimizer = paddle.optimizer.SGD( - learning_rate=paddle.optimizer.lr.NoamDecay(d_model=0.01, - warmup_steps=100)) + learning_rate=paddle.optimizer.lr.NoamDecay( + d_model=0.01, warmup_steps=100 + ) + ) return optimizer def func_test_sgd(self): @@ -416,18 +471,21 @@ class TestImperativeOptimizerNoamDecay(TestImperativeOptimizerBase): class TestImperativeOptimizerLambdaDecay(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): optimizer = paddle.optimizer.SGD( learning_rate=paddle.optimizer.lr.LambdaDecay( - learning_rate=0.5, lr_lambda=lambda epoch: 0.9**epoch), - parameters=parameter_list) + learning_rate=0.5, lr_lambda=lambda epoch: 0.9**epoch + ), + parameters=parameter_list, + ) return optimizer def get_optimizer(self): optimizer = paddle.optimizer.SGD( learning_rate=paddle.optimizer.lr.LambdaDecay( - learning_rate=0.5, lr_lambda=lambda epoch: 0.9**epoch)) + learning_rate=0.5, lr_lambda=lambda epoch: 0.9**epoch + ) + ) return optimizer def func_test_sgd(self): @@ -440,23 +498,25 @@ class TestImperativeOptimizerLambdaDecay(TestImperativeOptimizerBase): class TestImperativeOptimizerLinearWarmup(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): optimizer = paddle.optimizer.SGD( - learning_rate=paddle.optimizer.lr.LinearWarmup(learning_rate=0.5, - warmup_steps=20, - start_lr=0, - end_lr=0.5), - parameters=parameter_list) + learning_rate=paddle.optimizer.lr.LinearWarmup( + learning_rate=0.5, warmup_steps=20, start_lr=0, end_lr=0.5 + ), + parameters=parameter_list, + ) return optimizer def get_optimizer(self): optimizer = paddle.optimizer.SGD( - learning_rate=paddle.optimizer.lr.LinearWarmup(learning_rate=0.5, - warmup_steps=20, - start_lr=0, - end_lr=0.5, - verbose=True)) + learning_rate=paddle.optimizer.lr.LinearWarmup( + learning_rate=0.5, + warmup_steps=20, + start_lr=0, + end_lr=0.5, + verbose=True, + ) + ) return optimizer def func_test_sgd(self): @@ -469,18 +529,21 @@ class TestImperativeOptimizerLinearWarmup(TestImperativeOptimizerBase): class TestImperativeOptimizerMultiStepDecay(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): optimizer = paddle.optimizer.SGD( learning_rate=paddle.optimizer.lr.MultiStepDecay( - learning_rate=0.5, milestones=[2, 4, 6], gamma=0.8), - parameters=parameter_list) + learning_rate=0.5, milestones=[2, 4, 6], gamma=0.8 + ), + parameters=parameter_list, + ) return optimizer def get_optimizer(self): optimizer = paddle.optimizer.SGD( learning_rate=paddle.optimizer.lr.MultiStepDecay( - learning_rate=0.5, milestones=[2, 4, 6], gamma=0.8)) + learning_rate=0.5, milestones=[2, 4, 6], gamma=0.8 + ) + ) return optimizer def func_test_sgd(self): @@ -493,19 +556,21 @@ class TestImperativeOptimizerMultiStepDecay(TestImperativeOptimizerBase): class TestImperativeOptimizerStepLR(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): optimizer = paddle.optimizer.SGD( - learning_rate=paddle.optimizer.lr.StepDecay(learning_rate=0.5, - step_size=5, - gamma=0.8), - parameters=parameter_list) + learning_rate=paddle.optimizer.lr.StepDecay( + learning_rate=0.5, step_size=5, gamma=0.8 + ), + parameters=parameter_list, + ) return optimizer def get_optimizer(self): optimizer = paddle.optimizer.SGD( learning_rate=paddle.optimizer.lr.StepDecay( - learning_rate=0.5, step_size=5, gamma=0.8)) + learning_rate=0.5, step_size=5, gamma=0.8 + ) + ) return optimizer def func_test_sgd(self): @@ -518,18 +583,19 @@ class TestImperativeOptimizerStepLR(TestImperativeOptimizerBase): class TestImperativeOptimizerReduceOnPlateau(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): optimizer = paddle.optimizer.SGD( learning_rate=paddle.optimizer.lr.ReduceOnPlateau( - learning_rate=0.5), - parameters=parameter_list) + learning_rate=0.5 + ), + parameters=parameter_list, + ) return optimizer def get_optimizer(self): optimizer = paddle.optimizer.SGD( - learning_rate=paddle.optimizer.lr.ReduceOnPlateau( - learning_rate=0.5)) + learning_rate=paddle.optimizer.lr.ReduceOnPlateau(learning_rate=0.5) + ) return optimizer def func_test_sgd(self): @@ -542,7 +608,6 @@ class TestImperativeOptimizerReduceOnPlateau(TestImperativeOptimizerBase): class TestOptimizerLearningRate(unittest.TestCase): - def func_test_constant_lr(self): with fluid.dygraph.guard(): a = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") @@ -557,10 +622,9 @@ class TestOptimizerLearningRate(unittest.TestCase): adam = paddle.optimizer.Adam(0.001, parameters=linear.parameters()) - np.testing.assert_allclose(adam.get_lr(), - 0.001, - rtol=1e-06, - atol=0.0) + np.testing.assert_allclose( + adam.get_lr(), 0.001, rtol=1e-06, atol=0.0 + ) for i in range(10): adam.minimize(loss) @@ -589,8 +653,9 @@ class TestOptimizerLearningRate(unittest.TestCase): value = [0.2, 0.4, 0.6, 0.8, 1.0] scheduler = paddle.optimizer.lr.PiecewiseDecay(bd, value) - adam = paddle.optimizer.Adam(scheduler, - parameters=linear.parameters()) + adam = paddle.optimizer.Adam( + scheduler, parameters=linear.parameters() + ) np.testing.assert_allclose(adam.get_lr(), 0.2, rtol=1e-06, atol=0.0) @@ -618,8 +683,9 @@ class TestOptimizerLearningRate(unittest.TestCase): base_lr = 1.0 scheduler = paddle.optimizer.lr.NaturalExpDecay(1.0, gamma=0.5) - adam = paddle.optimizer.Adam(scheduler, - parameters=linear.parameters()) + adam = paddle.optimizer.Adam( + scheduler, parameters=linear.parameters() + ) np.testing.assert_allclose(adam.get_lr(), 1.0, rtol=1e-06, atol=0.0) @@ -657,16 +723,18 @@ class TestOptimizerLearningRate(unittest.TestCase): np.testing.assert_allclose(lr, lr_list[i], rtol=1e-06, atol=0.0) with self.assertRaises(TypeError): - lr_var = fluid.layers.create_global_var(shape=[1], - value=0.7, - dtype='float32') + lr_var = fluid.layers.create_global_var( + shape=[1], value=0.7, dtype='float32' + ) adam.set_lr(lr_var) with self.assertRaises(RuntimeError): adam = paddle.optimizer.Adam( - paddle.optimizer.lr.NaturalExpDecay(learning_rate=0.1, - gamma=0.5), - parameters=linear.parameters()) + paddle.optimizer.lr.NaturalExpDecay( + learning_rate=0.1, gamma=0.5 + ), + parameters=linear.parameters(), + ) adam.set_lr(0.01) def test_set_lr(self): @@ -676,11 +744,10 @@ class TestOptimizerLearningRate(unittest.TestCase): class TestImperativeMomentumOptimizer(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = MomentumOptimizer(learning_rate=0.001, - momentum=0.9, - parameter_list=parameter_list) + optimizer = MomentumOptimizer( + learning_rate=0.001, momentum=0.9, parameter_list=parameter_list + ) return optimizer def get_optimizer(self): @@ -697,11 +764,10 @@ class TestImperativeMomentumOptimizer(TestImperativeOptimizerBase): class TestImperativeLarsMomentumOptimizer(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = LarsMomentumOptimizer(learning_rate=0.001, - momentum=0.9, - parameter_list=parameter_list) + optimizer = LarsMomentumOptimizer( + learning_rate=0.001, momentum=0.9, parameter_list=parameter_list + ) return optimizer def get_optimizer(self): @@ -718,10 +784,10 @@ class TestImperativeLarsMomentumOptimizer(TestImperativeOptimizerBase): class TestImperativeAdagradOptimizer(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = AdagradOptimizer(learning_rate=0.2, - parameter_list=parameter_list) + optimizer = AdagradOptimizer( + learning_rate=0.2, parameter_list=parameter_list + ) return optimizer def get_optimizer(self): @@ -738,10 +804,10 @@ class TestImperativeAdagradOptimizer(TestImperativeOptimizerBase): class TestImperativeAdamaxOptimizer(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = AdamaxOptimizer(learning_rate=0.2, - parameter_list=parameter_list) + optimizer = AdamaxOptimizer( + learning_rate=0.2, parameter_list=parameter_list + ) return optimizer def get_optimizer(self): @@ -758,21 +824,21 @@ class TestImperativeAdamaxOptimizer(TestImperativeOptimizerBase): class TestImperativeDpsgdOptimizer(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = DpsgdOptimizer(learning_rate=0.01, - clip=10.0, - batch_size=16.0, - sigma=1.0, - parameter_list=parameter_list) + optimizer = DpsgdOptimizer( + learning_rate=0.01, + clip=10.0, + batch_size=16.0, + sigma=1.0, + parameter_list=parameter_list, + ) optimizer._seed = 100 return optimizer def get_optimizer(self): - optimizer = DpsgdOptimizer(learning_rate=0.01, - clip=10.0, - batch_size=16.0, - sigma=1.0) + optimizer = DpsgdOptimizer( + learning_rate=0.01, clip=10.0, batch_size=16.0, sigma=1.0 + ) optimizer._seed = 100 return optimizer @@ -786,10 +852,10 @@ class TestImperativeDpsgdOptimizer(TestImperativeOptimizerBase): class TestImperativeDecayedAdagradOptimizer(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = DecayedAdagradOptimizer(learning_rate=0.2, - parameter_list=parameter_list) + optimizer = DecayedAdagradOptimizer( + learning_rate=0.2, parameter_list=parameter_list + ) return optimizer def get_optimizer(self): @@ -806,18 +872,19 @@ class TestImperativeDecayedAdagradOptimizer(TestImperativeOptimizerBase): class TestImperativeAdadeltaOptimizer(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = AdadeltaOptimizer(learning_rate=0.0003, - epsilon=1.0e-6, - rho=0.95, - parameter_list=parameter_list) + optimizer = AdadeltaOptimizer( + learning_rate=0.0003, + epsilon=1.0e-6, + rho=0.95, + parameter_list=parameter_list, + ) return optimizer def get_optimizer(self): - optimizer = AdadeltaOptimizer(learning_rate=0.0003, - epsilon=1.0e-6, - rho=0.95) + optimizer = AdadeltaOptimizer( + learning_rate=0.0003, epsilon=1.0e-6, rho=0.95 + ) return optimizer def func_test_adadelta(self): @@ -830,10 +897,10 @@ class TestImperativeAdadeltaOptimizer(TestImperativeOptimizerBase): class TestImperativeRMSPropOptimizer(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = RMSPropOptimizer(learning_rate=0.1, - parameter_list=parameter_list) + optimizer = RMSPropOptimizer( + learning_rate=0.1, parameter_list=parameter_list + ) return optimizer def get_optimizer(self): @@ -850,10 +917,10 @@ class TestImperativeRMSPropOptimizer(TestImperativeOptimizerBase): class TestImperativeFtrlOptimizer(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = FtrlOptimizer(learning_rate=0.1, - parameter_list=parameter_list) + optimizer = FtrlOptimizer( + learning_rate=0.1, parameter_list=parameter_list + ) return optimizer def get_optimizer(self): @@ -874,17 +941,18 @@ def exclude_fn(param): class TestImperativeLambOptimizer(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): optimizer = paddle.optimizer.Lamb( learning_rate=0.002, exclude_from_weight_decay_fn=exclude_fn, - parameters=parameter_list) + parameters=parameter_list, + ) return optimizer def get_optimizer(self): optimizer = paddle.optimizer.Lamb( - learning_rate=0.002, exclude_from_weight_decay_fn=exclude_fn) + learning_rate=0.002, exclude_from_weight_decay_fn=exclude_fn + ) return optimizer # should fix: may fail in CI-windows @@ -893,11 +961,10 @@ class TestImperativeLambOptimizer(TestImperativeOptimizerBase): class TestImperativeModelAverage(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = ModelAverage(0.15, - min_average_window=10000, - max_average_window=12500) + optimizer = ModelAverage( + 0.15, min_average_window=10000, max_average_window=12500 + ) return optimizer def func_test_modelaverage(self): @@ -911,13 +978,14 @@ class TestImperativeModelAverage(TestImperativeOptimizerBase): class TestImperativeDGCMomentumOptimizer(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = DGCMomentumOptimizer(learning_rate=0.0001, - momentum=0.9, - rampup_step=1000, - rampup_begin_step=1252, - sparsity=[0.999, 0.999]) + optimizer = DGCMomentumOptimizer( + learning_rate=0.0001, + momentum=0.9, + rampup_step=1000, + rampup_begin_step=1252, + sparsity=[0.999, 0.999], + ) return optimizer def func_test_dgcmomentum(self): @@ -931,13 +999,14 @@ class TestImperativeDGCMomentumOptimizer(TestImperativeOptimizerBase): class TestImperativeExponentialMovingAverage(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): optimizer = ExponentialMovingAverage(0.999) return optimizer def func_test_exponentialmoving(self): - exception_message = "In dygraph, don't support ExponentialMovingAverage." + exception_message = ( + "In dygraph, don't support ExponentialMovingAverage." + ) self._check_exception(exception_message) def test_exponentialmoving(self): @@ -947,10 +1016,10 @@ class TestImperativeExponentialMovingAverage(TestImperativeOptimizerBase): class TestImperativePipelineOptimizer(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = paddle.optimizer.SGD(learning_rate=0.5, - parameters=parameter_list) + optimizer = paddle.optimizer.SGD( + learning_rate=0.5, parameters=parameter_list + ) optimizer = PipelineOptimizer(optimizer) return optimizer @@ -965,10 +1034,10 @@ class TestImperativePipelineOptimizer(TestImperativeOptimizerBase): class TestImperativeLookaheadOptimizer(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = paddle.optimizer.SGD(learning_rate=0.5, - parameters=parameter_list) + optimizer = paddle.optimizer.SGD( + learning_rate=0.5, parameters=parameter_list + ) optimizer = LookaheadOptimizer(optimizer, alpha=0.5, k=5) return optimizer @@ -983,10 +1052,10 @@ class TestImperativeLookaheadOptimizer(TestImperativeOptimizerBase): class TestImperativeRecomputeOptimizer(TestImperativeOptimizerBase): - def get_optimizer_dygraph(self, parameter_list): - optimizer = paddle.optimizer.SGD(learning_rate=0.5, - parameters=parameter_list) + optimizer = paddle.optimizer.SGD( + learning_rate=0.5, parameters=parameter_list + ) optimizer = RecomputeOptimizer(optimizer) return optimizer @@ -1001,16 +1070,17 @@ class TestImperativeRecomputeOptimizer(TestImperativeOptimizerBase): class TestImperativeOptimizerList(unittest.TestCase): - def func_test_parameter_list(self): with fluid.dygraph.guard(): linear_1 = Linear(10, 10) linear_2 = Linear(10, 10) - sgd = paddle.optimizer.SGD(1.0, - parameters=itertools.chain( - linear_1.parameters(), - linear_2.parameters())) + sgd = paddle.optimizer.SGD( + 1.0, + parameters=itertools.chain( + linear_1.parameters(), linear_2.parameters() + ), + ) in_np = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") in_data = fluid.dygraph.to_variable(in_np) @@ -1022,8 +1092,9 @@ class TestImperativeOptimizerList(unittest.TestCase): sgd.minimize(loss) self.assertTrue( - len(sgd._parameter_list) == len(linear_1.parameters() + - linear_2.parameters())) + len(sgd._parameter_list) + == len(linear_1.parameters() + linear_2.parameters()) + ) def test_parameter_list(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_parallel_coalesce_split.py b/python/paddle/fluid/tests/unittests/test_imperative_parallel_coalesce_split.py index 7c6d13399c0e08ad14dc551fb4f410997749c99b..300a195f6dce38b5ba56630fb2987bdd685a6baf 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_parallel_coalesce_split.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_parallel_coalesce_split.py @@ -20,11 +20,14 @@ import paddle.fluid as fluid from paddle.fluid import core from paddle.fluid.dygraph.parallel import DataParallel from paddle.fluid.dygraph.base import to_variable -from paddle.fluid.dygraph.parallel import _coalesce_tensors, _split_tensors, _reshape_inplace +from paddle.fluid.dygraph.parallel import ( + _coalesce_tensors, + _split_tensors, + _reshape_inplace, +) class MyLayer(fluid.Layer): - def __init__(self, name_scope): super(MyLayer, self).__init__(name_scope) @@ -36,7 +39,6 @@ class MyLayer(fluid.Layer): class TestImperativeParallelCoalesceSplit(unittest.TestCase): - def test_coalesce_split(self): with fluid.dygraph.guard(): test_layer = MyLayer("test_layer") @@ -47,8 +49,9 @@ class TestImperativeParallelCoalesceSplit(unittest.TestCase): vars = [] vars.append(to_variable(np.random.random([2, 3]).astype("float32"))) vars.append(to_variable(np.random.random([4, 9]).astype("float32"))) - vars.append(to_variable( - np.random.random([10, 1]).astype("float32"))) + vars.append( + to_variable(np.random.random([10, 1]).astype("float32")) + ) var_groups = OrderedDict() var_groups.setdefault(0, vars) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_partitial_backward.py b/python/paddle/fluid/tests/unittests/test_imperative_partitial_backward.py index f966b2ba9a8e31a159965cd1c8d9e10ca1e5065c..7f723a5d665d5c389ad525ca40bc954656e97d86 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_partitial_backward.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_partitial_backward.py @@ -19,7 +19,6 @@ from paddle.fluid.framework import _test_eager_guard class TestImperativePartitialBackward(unittest.TestCase): - def func_partitial_backward(self): with fluid.dygraph.guard(): x = np.random.randn(2, 4, 5).astype("float32") @@ -39,11 +38,14 @@ class TestImperativePartitialBackward(unittest.TestCase): self.assertIsNone(param._grad_ivar()) optimizer = fluid.optimizer.AdamOptimizer( - parameter_list=(linear1.parameters() + linear2.parameters())) + parameter_list=(linear1.parameters() + linear2.parameters()) + ) _, params_grads = optimizer.minimize(loss) - self.assertListEqual(sorted([p.name for p in linear1.parameters()]), - sorted([p_g[0].name for p_g in params_grads])) + self.assertListEqual( + sorted([p.name for p in linear1.parameters()]), + sorted([p_g[0].name for p_g in params_grads]), + ) linear1.clear_gradients() linear2.clear_gradients() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py index 0588016ce2765d82b184a3cff97fd8b7b55929c5..ecf81ed54336cc2a3e7bd7f7c1a954b8dc5294e6 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py @@ -28,13 +28,9 @@ from paddle.fluid.framework import _test_eager_guard, _in_legacy_dygraph class SimpleLSTMRNN(fluid.Layer): - - def __init__(self, - hidden_size, - num_steps, - num_layers=2, - init_scale=0.1, - dropout=None): + def __init__( + self, hidden_size, num_steps, num_layers=2, init_scale=0.1, dropout=None + ): super(SimpleLSTMRNN, self).__init__() self._hidden_size = hidden_size self._num_layers = num_layers @@ -56,19 +52,26 @@ class SimpleLSTMRNN(fluid.Layer): weight_1 = self.create_parameter( attr=fluid.ParamAttr( initializer=fluid.initializer.UniformInitializer( - low=-self._init_scale, high=self._init_scale)), + low=-self._init_scale, high=self._init_scale + ) + ), shape=[self._hidden_size * 2, self._hidden_size * 4], dtype="float32", default_initializer=fluid.initializer.UniformInitializer( - low=-self._init_scale, high=self._init_scale)) + low=-self._init_scale, high=self._init_scale + ), + ) self.weight_1_arr.append(self.add_parameter('w_%d' % i, weight_1)) bias_1 = self.create_parameter( attr=fluid.ParamAttr( initializer=fluid.initializer.UniformInitializer( - low=-self._init_scale, high=self._init_scale)), + low=-self._init_scale, high=self._init_scale + ) + ), shape=[self._hidden_size * 4], dtype="float32", - default_initializer=fluid.initializer.Constant(0.0)) + default_initializer=fluid.initializer.Constant(0.0), + ) self.bias_arr.append(self.add_parameter('b_%d' % i, bias_1)) def forward(self, input_embedding, init_hidden=None, init_cell=None): @@ -76,29 +79,29 @@ class SimpleLSTMRNN(fluid.Layer): self.hidden_array = [] for i in range(self._num_layers): - pre_hidden = fluid.layers.slice(init_hidden, - axes=[0], - starts=[i], - ends=[i + 1]) - pre_cell = fluid.layers.slice(init_cell, - axes=[0], - starts=[i], - ends=[i + 1]) - pre_hidden = fluid.layers.reshape(pre_hidden, - shape=[-1, self._hidden_size]) - pre_cell = fluid.layers.reshape(pre_cell, - shape=[-1, self._hidden_size]) + pre_hidden = fluid.layers.slice( + init_hidden, axes=[0], starts=[i], ends=[i + 1] + ) + pre_cell = fluid.layers.slice( + init_cell, axes=[0], starts=[i], ends=[i + 1] + ) + pre_hidden = fluid.layers.reshape( + pre_hidden, shape=[-1, self._hidden_size] + ) + pre_cell = fluid.layers.reshape( + pre_cell, shape=[-1, self._hidden_size] + ) self.hidden_array.append(pre_hidden) self.cell_array.append(pre_cell) res = [] for index in range(self._num_steps): - self._input = fluid.layers.slice(input_embedding, - axes=[1], - starts=[index], - ends=[index + 1]) - self._input = fluid.layers.reshape(self._input, - shape=[-1, self._hidden_size]) + self._input = fluid.layers.slice( + input_embedding, axes=[1], starts=[index], ends=[index + 1] + ) + self._input = fluid.layers.reshape( + self._input, shape=[-1, self._hidden_size] + ) for k in range(self._num_layers): pre_hidden = self.hidden_array[k] pre_cell = self.cell_array[k] @@ -109,11 +112,12 @@ class SimpleLSTMRNN(fluid.Layer): gate_input = fluid.layers.matmul(x=nn, y=weight_1) gate_input = fluid.layers.elementwise_add(gate_input, bias) - i, j, f, o = fluid.layers.split(gate_input, - num_or_sections=4, - dim=-1) + i, j, f, o = fluid.layers.split( + gate_input, num_or_sections=4, dim=-1 + ) c = pre_cell * fluid.layers.sigmoid(f) + fluid.layers.sigmoid( - i) * fluid.layers.tanh(j) + i + ) * fluid.layers.tanh(j) m = fluid.layers.tanh(c) * fluid.layers.sigmoid(o) self.hidden_array[k] = m self.cell_array[k] = c @@ -123,33 +127,39 @@ class SimpleLSTMRNN(fluid.Layer): self._input = fluid.layers.dropout( self._input, dropout_prob=self._dropout, - dropout_implementation='upscale_in_train') + dropout_implementation='upscale_in_train', + ) res.append( - fluid.layers.reshape(self._input, - shape=[1, -1, self._hidden_size])) + fluid.layers.reshape( + self._input, shape=[1, -1, self._hidden_size] + ) + ) real_res = fluid.layers.concat(res, 0) real_res = fluid.layers.transpose(x=real_res, perm=[1, 0, 2]) last_hidden = fluid.layers.concat(self.hidden_array, 1) last_hidden = fluid.layers.reshape( - last_hidden, shape=[-1, self._num_layers, self._hidden_size]) + last_hidden, shape=[-1, self._num_layers, self._hidden_size] + ) last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2]) last_cell = fluid.layers.concat(self.cell_array, 1) last_cell = fluid.layers.reshape( - last_cell, shape=[-1, self._num_layers, self._hidden_size]) + last_cell, shape=[-1, self._num_layers, self._hidden_size] + ) last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2]) return real_res, last_hidden, last_cell class PtbModel(fluid.Layer): - - def __init__(self, - hidden_size, - vocab_size, - num_layers=2, - num_steps=20, - init_scale=0.1, - is_sparse=False, - dropout=None): + def __init__( + self, + hidden_size, + vocab_size, + num_layers=2, + num_steps=20, + init_scale=0.1, + is_sparse=False, + dropout=None, + ): super(PtbModel, self).__init__() self.hidden_size = hidden_size self.vocab_size = vocab_size @@ -157,11 +167,13 @@ class PtbModel(fluid.Layer): self.num_layers = num_layers self.num_steps = num_steps self.dropout = dropout - self.simple_lstm_rnn = SimpleLSTMRNN(hidden_size, - num_steps, - num_layers=num_layers, - init_scale=init_scale, - dropout=dropout) + self.simple_lstm_rnn = SimpleLSTMRNN( + hidden_size, + num_steps, + num_layers=num_layers, + init_scale=init_scale, + dropout=dropout, + ) self.embedding = Embedding( size=[vocab_size, hidden_size], dtype='float32', @@ -169,46 +181,60 @@ class PtbModel(fluid.Layer): param_attr=fluid.ParamAttr( name='embedding_para', initializer=fluid.initializer.UniformInitializer( - low=-init_scale, high=init_scale))) + low=-init_scale, high=init_scale + ), + ), + ) self.softmax_weight = self.create_parameter( attr=fluid.ParamAttr(), shape=[self.hidden_size, self.vocab_size], dtype="float32", default_initializer=fluid.initializer.UniformInitializer( - low=-self.init_scale, high=self.init_scale)) + low=-self.init_scale, high=self.init_scale + ), + ) self.softmax_bias = self.create_parameter( attr=fluid.ParamAttr(), shape=[self.vocab_size], dtype="float32", default_initializer=fluid.initializer.UniformInitializer( - low=-self.init_scale, high=self.init_scale)) + low=-self.init_scale, high=self.init_scale + ), + ) def forward(self, input, label, init_hidden, init_cell): init_h = fluid.layers.reshape( - init_hidden, shape=[self.num_layers, -1, self.hidden_size]) + init_hidden, shape=[self.num_layers, -1, self.hidden_size] + ) init_c = fluid.layers.reshape( - init_cell, shape=[self.num_layers, -1, self.hidden_size]) + init_cell, shape=[self.num_layers, -1, self.hidden_size] + ) x_emb = self.embedding(input) x_emb = fluid.layers.reshape( - x_emb, shape=[-1, self.num_steps, self.hidden_size]) + x_emb, shape=[-1, self.num_steps, self.hidden_size] + ) if self.dropout is not None and self.dropout > 0.0: x_emb = fluid.layers.dropout( x_emb, dropout_prob=self.drop_out, - dropout_implementation='upscale_in_train') + dropout_implementation='upscale_in_train', + ) rnn_out, last_hidden, last_cell = self.simple_lstm_rnn( - x_emb, init_h, init_c) + x_emb, init_h, init_c + ) rnn_out = fluid.layers.reshape( - rnn_out, shape=[-1, self.num_steps, self.hidden_size]) + rnn_out, shape=[-1, self.num_steps, self.hidden_size] + ) projection = fluid.layers.matmul(rnn_out, self.softmax_weight) projection = fluid.layers.elementwise_add(projection, self.softmax_bias) - projection = fluid.layers.reshape(projection, - shape=[-1, self.vocab_size]) - loss = fluid.layers.softmax_with_cross_entropy(logits=projection, - label=label, - soft_label=False) + projection = fluid.layers.reshape( + projection, shape=[-1, self.vocab_size] + ) + loss = fluid.layers.softmax_with_cross_entropy( + logits=projection, label=label, soft_label=False + ) loss = fluid.layers.reshape(loss, shape=[-1, self.num_steps]) loss = fluid.layers.reduce_mean(loss, dim=[0]) loss = fluid.layers.reduce_sum(loss) @@ -217,7 +243,6 @@ class PtbModel(fluid.Layer): class TestDygraphPtbRnn(unittest.TestCase): - def func_test_ptb_rnn(self): for is_sparse in [True, False]: self.ptb_rnn_cpu_float32(is_sparse) @@ -242,15 +267,18 @@ class TestDygraphPtbRnn(unittest.TestCase): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to - ptb_model = PtbModel(hidden_size=hidden_size, - vocab_size=vocab_size, - num_layers=num_layers, - num_steps=num_steps, - init_scale=init_scale, - is_sparse=is_sparse) - - sgd = SGDOptimizer(learning_rate=1e-3, - parameter_list=ptb_model.parameters()) + ptb_model = PtbModel( + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale, + is_sparse=is_sparse, + ) + + sgd = SGDOptimizer( + learning_rate=1e-3, parameter_list=ptb_model.parameters() + ) dy_param_updated = dict() dy_param_init = dict() dy_loss = None @@ -265,27 +293,32 @@ class TestDygraphPtbRnn(unittest.TestCase): y_data = np.arange(1, 13).reshape(4, 3).astype('int64') y_data = y_data.reshape((-1, 1)) init_hidden_data = np.zeros( - (num_layers, batch_size, hidden_size), dtype='float32') - init_cell_data = np.zeros((num_layers, batch_size, hidden_size), - dtype='float32') + (num_layers, batch_size, hidden_size), dtype='float32' + ) + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32' + ) x = to_variable(x_data) y = to_variable(y_data) init_hidden = to_variable(init_hidden_data) init_cell = to_variable(init_cell_data) if i % 5 == 0 and _in_legacy_dygraph(): outs, traced_layer = TracedLayer.trace( - ptb_model, [x, y, init_hidden, init_cell]) + ptb_model, [x, y, init_hidden, init_cell] + ) outs_static = traced_layer([x, y, init_hidden, init_cell]) helper.assertEachVar(outs, outs_static) if program is not None: self.assertTrue( - is_equal_program(traced_layer.program, program)) + is_equal_program(traced_layer.program, program) + ) program = traced_layer.program traced_layer.save_inference_model( - './infe_imperative_ptb_rnn', feed=list(range(4))) + './infe_imperative_ptb_rnn', feed=list(range(4)) + ) else: outs = ptb_model(x, y, init_hidden, init_cell) @@ -308,29 +341,35 @@ class TestDygraphPtbRnn(unittest.TestCase): with new_program_scope(): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) - ptb_model = PtbModel(hidden_size=hidden_size, - vocab_size=vocab_size, - num_layers=num_layers, - num_steps=num_steps, - init_scale=init_scale, - is_sparse=is_sparse) - - exe = fluid.Executor(fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) + ptb_model = PtbModel( + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale, + is_sparse=is_sparse, + ) + + exe = fluid.Executor( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) sgd = SGDOptimizer(learning_rate=1e-3) - x = fluid.layers.data(name="x", - shape=[-1, num_steps], - dtype='int64') + x = fluid.layers.data( + name="x", shape=[-1, num_steps], dtype='int64' + ) y = fluid.layers.data(name="y", shape=[-1, 1], dtype='float32') - init_hidden = fluid.layers.data(name="init_hidden", - shape=[1], - dtype='float32') - init_cell = fluid.layers.data(name="init_cell", - shape=[1], - dtype='float32') + init_hidden = fluid.layers.data( + name="init_hidden", shape=[1], dtype='float32' + ) + init_cell = fluid.layers.data( + name="init_cell", shape=[1], dtype='float32' + ) static_loss, static_last_hidden, static_last_cell = ptb_model( - x, y, init_hidden, init_cell) + x, y, init_hidden, init_cell + ) sgd.minimize(static_loss) static_param_updated = dict() static_param_init = dict() @@ -338,8 +377,10 @@ class TestDygraphPtbRnn(unittest.TestCase): for param in ptb_model.parameters(): static_param_name_list.append(param.name) - out = exe.run(framework.default_startup_program(), - fetch_list=static_param_name_list) + out = exe.run( + framework.default_startup_program(), + fetch_list=static_param_name_list, + ) for i in range(len(static_param_name_list)): static_param_init[static_param_name_list[i]] = out[i] static_loss_value = None @@ -351,33 +392,40 @@ class TestDygraphPtbRnn(unittest.TestCase): x_data = x_data.reshape((-1, num_steps, 1)) y_data = y_data.reshape((-1, 1)) init_hidden_data = np.zeros( - (num_layers, batch_size, hidden_size), dtype='float32') - init_cell_data = np.zeros((num_layers, batch_size, hidden_size), - dtype='float32') + (num_layers, batch_size, hidden_size), dtype='float32' + ) + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32' + ) fetch_list = [static_loss, static_last_hidden, static_last_cell] fetch_list.extend(static_param_name_list) - out = exe.run(fluid.default_main_program(), - feed={ - "x": x_data, - "y": y_data, - "init_hidden": init_hidden_data, - "init_cell": init_cell_data - }, - fetch_list=fetch_list) + out = exe.run( + fluid.default_main_program(), + feed={ + "x": x_data, + "y": y_data, + "init_hidden": init_hidden_data, + "init_cell": init_cell_data, + }, + fetch_list=fetch_list, + ) static_loss_value = out[0] static_last_hidden_value = out[1] static_last_cell_value = out[2] if i == batch_num - 1: for k in range(3, len(out)): - static_param_updated[static_param_name_list[k - - 3]] = out[k] + static_param_updated[ + static_param_name_list[k - 3] + ] = out[k] np.testing.assert_array_equal(static_loss_value, dy_loss_value) - np.testing.assert_array_equal(static_last_cell_value, - dy_last_cell_value) - np.testing.assert_array_equal(static_last_hidden_value, - dy_last_hidden_value) + np.testing.assert_array_equal( + static_last_cell_value, dy_last_cell_value + ) + np.testing.assert_array_equal( + static_last_hidden_value, dy_last_hidden_value + ) for key, value in static_param_init.items(): np.testing.assert_array_equal(value, dy_param_init[key]) for key, value in static_param_updated.items(): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn_sorted_gradient.py b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn_sorted_gradient.py index 286049500c2968f2996d3bbbfe93b15e6cb772e6..38c8b9ca73dd464c0a845e346052f9a73e562325 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn_sorted_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn_sorted_gradient.py @@ -26,7 +26,6 @@ from paddle.fluid.framework import _test_eager_guard class TestDygraphPtbRnnSortGradient(unittest.TestCase): - def func_ptb_rnn_sort_gradient(self): for is_sparse in [True, False]: self.ptb_rnn_sort_gradient_cpu_float32(is_sparse) @@ -47,15 +46,18 @@ class TestDygraphPtbRnnSortGradient(unittest.TestCase): paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to - ptb_model = PtbModel(hidden_size=hidden_size, - vocab_size=vocab_size, - num_layers=num_layers, - num_steps=num_steps, - init_scale=init_scale, - is_sparse=is_sparse) - - sgd = SGDOptimizer(learning_rate=1e-3, - parameter_list=ptb_model.parameters()) + ptb_model = PtbModel( + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale, + is_sparse=is_sparse, + ) + + sgd = SGDOptimizer( + learning_rate=1e-3, parameter_list=ptb_model.parameters() + ) dy_param_updated = dict() dy_param_init = dict() dy_loss = None @@ -68,15 +70,18 @@ class TestDygraphPtbRnnSortGradient(unittest.TestCase): x_data = x_data.reshape((-1, num_steps, 1)) y_data = y_data.reshape((-1, 1)) init_hidden_data = np.zeros( - (num_layers, batch_size, hidden_size), dtype='float32') - init_cell_data = np.zeros((num_layers, batch_size, hidden_size), - dtype='float32') + (num_layers, batch_size, hidden_size), dtype='float32' + ) + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32' + ) x = to_variable(x_data) y = to_variable(y_data) init_hidden = to_variable(init_hidden_data) init_cell = to_variable(init_cell_data) dy_loss, last_hidden, last_cell = ptb_model( - x, y, init_hidden, init_cell) + x, y, init_hidden, init_cell + ) if i == 0: for param in ptb_model.parameters(): dy_param_init[param.name] = param.numpy() @@ -95,29 +100,35 @@ class TestDygraphPtbRnnSortGradient(unittest.TestCase): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) - ptb_model = PtbModel(hidden_size=hidden_size, - vocab_size=vocab_size, - num_layers=num_layers, - num_steps=num_steps, - init_scale=init_scale, - is_sparse=is_sparse) - - exe = fluid.Executor(fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) + ptb_model = PtbModel( + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale, + is_sparse=is_sparse, + ) + + exe = fluid.Executor( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) sgd = SGDOptimizer(learning_rate=1e-3) - x = fluid.layers.data(name="x", - shape=[-1, num_steps, 1], - dtype='int64') + x = fluid.layers.data( + name="x", shape=[-1, num_steps, 1], dtype='int64' + ) y = fluid.layers.data(name="y", shape=[-1, 1], dtype='float32') - init_hidden = fluid.layers.data(name="init_hidden", - shape=[1], - dtype='float32') - init_cell = fluid.layers.data(name="init_cell", - shape=[1], - dtype='float32') + init_hidden = fluid.layers.data( + name="init_hidden", shape=[1], dtype='float32' + ) + init_cell = fluid.layers.data( + name="init_cell", shape=[1], dtype='float32' + ) static_loss, static_last_hidden, static_last_cell = ptb_model( - x, y, init_hidden, init_cell) + x, y, init_hidden, init_cell + ) sgd.minimize(static_loss) static_param_updated = dict() static_param_init = dict() @@ -125,8 +136,10 @@ class TestDygraphPtbRnnSortGradient(unittest.TestCase): for param in ptb_model.parameters(): static_param_name_list.append(param.name) - out = exe.run(framework.default_startup_program(), - fetch_list=static_param_name_list) + out = exe.run( + framework.default_startup_program(), + fetch_list=static_param_name_list, + ) for i in range(len(static_param_name_list)): static_param_init[static_param_name_list[i]] = out[i] static_loss_value = None @@ -138,33 +151,40 @@ class TestDygraphPtbRnnSortGradient(unittest.TestCase): x_data = x_data.reshape((-1, num_steps, 1)) y_data = y_data.reshape((-1, 1)) init_hidden_data = np.zeros( - (num_layers, batch_size, hidden_size), dtype='float32') - init_cell_data = np.zeros((num_layers, batch_size, hidden_size), - dtype='float32') + (num_layers, batch_size, hidden_size), dtype='float32' + ) + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32' + ) fetch_list = [static_loss, static_last_hidden, static_last_cell] fetch_list.extend(static_param_name_list) - out = exe.run(fluid.default_main_program(), - feed={ - "x": x_data, - "y": y_data, - "init_hidden": init_hidden_data, - "init_cell": init_cell_data - }, - fetch_list=fetch_list) + out = exe.run( + fluid.default_main_program(), + feed={ + "x": x_data, + "y": y_data, + "init_hidden": init_hidden_data, + "init_cell": init_cell_data, + }, + fetch_list=fetch_list, + ) static_loss_value = out[0] static_last_hidden_value = out[1] static_last_cell_value = out[2] if i == batch_num - 1: for k in range(3, len(out)): - static_param_updated[static_param_name_list[k - - 3]] = out[k] + static_param_updated[ + static_param_name_list[k - 3] + ] = out[k] np.testing.assert_array_equal(static_loss_value, dy_loss_value) - np.testing.assert_array_equal(static_last_cell_value, - dy_last_cell_value) - np.testing.assert_array_equal(static_last_hidden_value, - dy_last_hidden_value) + np.testing.assert_array_equal( + static_last_cell_value, dy_last_cell_value + ) + np.testing.assert_array_equal( + static_last_hidden_value, dy_last_hidden_value + ) for key, value in static_param_init.items(): np.testing.assert_array_equal(value, dy_param_init[key]) for key, value in static_param_updated.items(): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_recurrent_usage.py b/python/paddle/fluid/tests/unittests/test_imperative_recurrent_usage.py index 622d174eefcc0004794757548ed4e6a02a0fa8c2..123803731da1bd942041a440d68b82ce8ce5281e 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_recurrent_usage.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_recurrent_usage.py @@ -23,7 +23,6 @@ import numpy as np class RecurrentTest(fluid.Layer): - def __init__(self, name_scope): super(RecurrentTest, self).__init__(name_scope) @@ -34,7 +33,6 @@ class RecurrentTest(fluid.Layer): class TestRecurrentFeed(unittest.TestCase): - def test_recurrent_feed(self): seed = 90 @@ -84,28 +82,33 @@ class TestRecurrentFeed(unittest.TestCase): with new_program_scope(): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed - in1 = fluid.layers.data(name="inp1", - shape=[2, 2], - append_batch_size=False) - in2 = fluid.layers.data(name="inp2", - shape=[2, 2], - append_batch_size=False) + in1 = fluid.layers.data( + name="inp1", shape=[2, 2], append_batch_size=False + ) + in2 = fluid.layers.data( + name="inp2", shape=[2, 2], append_batch_size=False + ) rt1 = RecurrentTest("RecurrentTest") static_sum_out, static_out = rt1(in1, in2) fluid.backward.append_backward(static_sum_out) - exe = fluid.Executor(fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) - - static_dout = fluid.default_main_program().block( - 0)._find_var_recursive(static_out.name + "@GRAD") + exe = fluid.Executor( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) + + static_dout = ( + fluid.default_main_program() + .block(0) + ._find_var_recursive(static_out.name + "@GRAD") + ) fetch_list = [static_sum_out, static_out, static_dout] for i in range(3): - out = exe.run(fluid.default_main_program(), - feed={ - "inp1": original_np1, - "inp2": original_np2 - }, - fetch_list=fetch_list) + out = exe.run( + fluid.default_main_program(), + feed={"inp1": original_np1, "inp2": original_np2}, + fetch_list=fetch_list, + ) static_out_value = out[1] static_sum_out = out[0] static_dout = out[2] diff --git a/python/paddle/fluid/tests/unittests/test_imperative_reinforcement.py b/python/paddle/fluid/tests/unittests/test_imperative_reinforcement.py index a831737d75242ed87201ebfcd2fefb7e7a0ac52f..5e52815bbb30d51e6b19f0ed18158199e9551e3b 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_reinforcement.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_reinforcement.py @@ -25,7 +25,6 @@ from paddle.fluid.framework import _test_eager_guard class Policy(fluid.dygraph.Layer): - def __init__(self, input_size): super(Policy, self).__init__() @@ -46,7 +45,6 @@ class Policy(fluid.dygraph.Layer): class TestImperativeMnist(unittest.TestCase): - def test_mnist_float32(self): seed = 90 epoch_num = 1 @@ -83,8 +81,9 @@ class TestImperativeMnist(unittest.TestCase): loss_probs = fluid.layers.elementwise_mul(dy_reward, loss_probs) loss = fluid.layers.reduce_sum(loss_probs) - sgd = SGDOptimizer(learning_rate=1e-3, - parameter_list=policy.parameters()) + sgd = SGDOptimizer( + learning_rate=1e-3, parameter_list=policy.parameters() + ) dy_param_init_value = {} @@ -108,29 +107,35 @@ class TestImperativeMnist(unittest.TestCase): with fluid.dygraph.guard(): with _test_eager_guard(): - eager_out, eager_param_init_value, eager_param_value = run_dygraph( - ) + ( + eager_out, + eager_param_init_value, + eager_param_value, + ) = run_dygraph() with new_program_scope(): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) - exe = fluid.Executor(fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) + exe = fluid.Executor( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) policy = Policy(input_size=4) st_sgd = SGDOptimizer(learning_rate=1e-3) - st_state = fluid.layers.data(name='st_state', - shape=[4], - dtype='float32') - st_reward = fluid.layers.data(name='st_reward', - shape=[1], - dtype='float32') - st_mask = fluid.layers.data(name='st_mask', - shape=[2], - dtype='float32') + st_state = fluid.layers.data( + name='st_state', shape=[4], dtype='float32' + ) + st_reward = fluid.layers.data( + name='st_reward', shape=[1], dtype='float32' + ) + st_mask = fluid.layers.data( + name='st_mask', shape=[2], dtype='float32' + ) st_loss_probs = policy(st_state) @@ -139,7 +144,8 @@ class TestImperativeMnist(unittest.TestCase): st_loss_probs = fluid.layers.reduce_sum(st_loss_probs, dim=-1) st_loss_probs = fluid.layers.elementwise_mul( - st_reward, st_loss_probs) + st_reward, st_loss_probs + ) st_loss = fluid.layers.reduce_sum(st_loss_probs) st_sgd.minimize(st_loss) @@ -150,8 +156,10 @@ class TestImperativeMnist(unittest.TestCase): for param in policy.parameters(): static_param_name_list.append(param.name) - out = exe.run(fluid.default_startup_program(), - fetch_list=static_param_name_list) + out = exe.run( + fluid.default_startup_program(), + fetch_list=static_param_name_list, + ) for i in range(len(static_param_name_list)): static_param_init_value[static_param_name_list[i]] = out[i] @@ -159,13 +167,11 @@ class TestImperativeMnist(unittest.TestCase): fetch_list = [st_loss.name] fetch_list.extend(static_param_name_list) - out = exe.run(fluid.default_main_program(), - feed={ - "st_state": state, - "st_reward": reward, - "st_mask": mask - }, - fetch_list=fetch_list) + out = exe.run( + fluid.default_main_program(), + feed={"st_state": state, "st_reward": reward, "st_mask": mask}, + fetch_list=fetch_list, + ) static_param_value = {} static_out = out[0] diff --git a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py index 55a51347283a811cafe189a8ce34499342ba9b4e..657246795f7b57986ddf37c02d15d7a4d0a7c5c3 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py @@ -26,7 +26,7 @@ from utils import DyGraphProgramDescTracerTestHelper, is_equal_program from paddle.fluid.dygraph import TracedLayer from paddle.fluid.framework import _test_eager_guard, _in_legacy_dygraph -#NOTE(zhiqiu): run with FLAGS_cudnn_deterministic=1 +# NOTE(zhiqiu): run with FLAGS_cudnn_deterministic=1 batch_size = 8 train_parameters = { @@ -37,7 +37,7 @@ train_parameters = { "name": "piecewise_decay", "batch_size": batch_size, "epochs": [30, 60, 90], - "steps": [0.1, 0.01, 0.001, 0.0001] + "steps": [0.1, 0.01, 0.001, 0.0001], }, "batch_size": batch_size, "lr": 0.1, @@ -60,8 +60,9 @@ def optimizer_setting(params, parameter_list=None): lr = [] lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)] if fluid._non_static_mode(): - optimizer = fluid.optimizer.SGD(learning_rate=0.01, - parameter_list=parameter_list) + optimizer = fluid.optimizer.SGD( + learning_rate=0.01, parameter_list=parameter_list + ) else: optimizer = fluid.optimizer.SGD(learning_rate=0.01) # TODO(minqiyang): Add learning rate scheduler support to dygraph mode @@ -76,26 +77,29 @@ def optimizer_setting(params, parameter_list=None): class ConvBNLayer(fluid.Layer): - - def __init__(self, - num_channels, - num_filters, - filter_size, - stride=1, - groups=1, - act=None, - use_cudnn=False): + def __init__( + self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + use_cudnn=False, + ): super(ConvBNLayer, self).__init__() - self._conv = Conv2D(num_channels=num_channels, - num_filters=num_filters, - filter_size=filter_size, - stride=stride, - padding=(filter_size - 1) // 2, - groups=groups, - act=None, - bias_attr=False, - use_cudnn=use_cudnn) + self._conv = Conv2D( + num_channels=num_channels, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + act=None, + bias_attr=False, + use_cudnn=use_cudnn, + ) self._batch_norm = BatchNorm(num_filters, act=act) @@ -107,38 +111,42 @@ class ConvBNLayer(fluid.Layer): class BottleneckBlock(fluid.Layer): - - def __init__(self, - num_channels, - num_filters, - stride, - shortcut=True, - use_cudnn=False): + def __init__( + self, num_channels, num_filters, stride, shortcut=True, use_cudnn=False + ): super(BottleneckBlock, self).__init__() - self.conv0 = ConvBNLayer(num_channels=num_channels, - num_filters=num_filters, - filter_size=1, - act='relu', - use_cudnn=use_cudnn) - self.conv1 = ConvBNLayer(num_channels=num_filters, - num_filters=num_filters, - filter_size=3, - stride=stride, - act='relu', - use_cudnn=use_cudnn) - self.conv2 = ConvBNLayer(num_channels=num_filters, - num_filters=num_filters * 4, - filter_size=1, - act=None, - use_cudnn=use_cudnn) + self.conv0 = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=1, + act='relu', + use_cudnn=use_cudnn, + ) + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + stride=stride, + act='relu', + use_cudnn=use_cudnn, + ) + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters * 4, + filter_size=1, + act=None, + use_cudnn=use_cudnn, + ) if not shortcut: - self.short = ConvBNLayer(num_channels=num_channels, - num_filters=num_filters * 4, - filter_size=1, - stride=stride, - use_cudnn=use_cudnn) + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters * 4, + filter_size=1, + stride=stride, + use_cudnn=use_cudnn, + ) self.shortcut = shortcut @@ -159,14 +167,16 @@ class BottleneckBlock(fluid.Layer): class ResNet(fluid.Layer): - def __init__(self, layers=50, class_dim=102, use_cudnn=True): super(ResNet, self).__init__() self.layers = layers supported_layers = [50, 101, 152] - assert layers in supported_layers, \ - "supported layers are {} but input layer is {}".format(supported_layers, layers) + assert ( + layers in supported_layers + ), "supported layers are {} but input layer is {}".format( + supported_layers, layers + ) if layers == 50: depth = [3, 4, 6, 3] @@ -177,16 +187,17 @@ class ResNet(fluid.Layer): num_channels = [64, 256, 512, 1024] num_filters = [64, 128, 256, 512] - self.conv = ConvBNLayer(num_channels=3, - num_filters=64, - filter_size=7, - stride=2, - act='relu', - use_cudnn=use_cudnn) - self.pool2d_max = Pool2D(pool_size=3, - pool_stride=2, - pool_padding=1, - pool_type='max') + self.conv = ConvBNLayer( + num_channels=3, + num_filters=64, + filter_size=7, + stride=2, + act='relu', + use_cudnn=use_cudnn, + ) + self.pool2d_max = Pool2D( + pool_size=3, pool_stride=2, pool_padding=1, pool_type='max' + ) self.bottleneck_block_list = [] for block in range(len(depth)): @@ -194,22 +205,27 @@ class ResNet(fluid.Layer): for i in range(depth[block]): bottleneck_block = self.add_sublayer( 'bb_%d_%d' % (block, i), - BottleneckBlock(num_channels=num_channels[block] - if i == 0 else num_filters[block] * 4, - num_filters=num_filters[block], - stride=2 if i == 0 and block != 0 else 1, - shortcut=shortcut, - use_cudnn=use_cudnn)) + BottleneckBlock( + num_channels=num_channels[block] + if i == 0 + else num_filters[block] * 4, + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + shortcut=shortcut, + use_cudnn=use_cudnn, + ), + ) self.bottleneck_block_list.append(bottleneck_block) shortcut = True - self.pool2d_avg = Pool2D(pool_size=7, - pool_type='avg', - global_pooling=True) + self.pool2d_avg = Pool2D( + pool_size=7, pool_type='avg', global_pooling=True + ) self.pool2d_avg_output = num_filters[-1] * 4 * 1 * 1 import math + stdv = 1.0 / math.sqrt(2048 * 1.0) self.out = Linear( @@ -217,7 +233,9 @@ class ResNet(fluid.Layer): class_dim, act='softmax', param_attr=fluid.param_attr.ParamAttr( - initializer=fluid.initializer.Uniform(-stdv, stdv))) + initializer=fluid.initializer.Uniform(-stdv, stdv) + ), + ) def forward(self, inputs): y = self.conv(inputs) @@ -231,9 +249,7 @@ class ResNet(fluid.Layer): class TestDygraphResnet(unittest.TestCase): - def reader_decorator(self, reader): - def _reader_imple(): for item in reader(): doc = np.array(item[0]).reshape(3, 224, 224) @@ -255,13 +271,15 @@ class TestDygraphResnet(unittest.TestCase): paddle.framework.random._manual_program_seed(seed) resnet = ResNet() - optimizer = optimizer_setting(train_parameters, - parameter_list=resnet.parameters()) + optimizer = optimizer_setting( + train_parameters, parameter_list=resnet.parameters() + ) np.random.seed(seed) train_reader = paddle.batch( paddle.dataset.flowers.train(use_xmap=False), - batch_size=batch_size) + batch_size=batch_size, + ) dy_param_init_value = {} for param in resnet.parameters(): @@ -274,10 +292,14 @@ class TestDygraphResnet(unittest.TestCase): if batch_id >= batch_num: break - dy_x_data = np.array([x[0].reshape(3, 224, 224) - for x in data]).astype('float32') - y_data = np.array([x[1] for x in data - ]).astype('int64').reshape(batch_size, 1) + dy_x_data = np.array( + [x[0].reshape(3, 224, 224) for x in data] + ).astype('float32') + y_data = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape(batch_size, 1) + ) img = to_variable(dy_x_data) label = to_variable(y_data) @@ -288,10 +310,12 @@ class TestDygraphResnet(unittest.TestCase): out, traced_layer = TracedLayer.trace(resnet, img) if program is not None: self.assertTrue( - is_equal_program(program, traced_layer.program)) + is_equal_program(program, traced_layer.program) + ) traced_layer.save_inference_model( - './infer_imperative_resnet') + './infer_imperative_resnet' + ) program = traced_layer.program else: @@ -322,9 +346,11 @@ class TestDygraphResnet(unittest.TestCase): for param in resnet.parameters(): if param.trainable: np_array = np.array( - param._grad_ivar().value().get_tensor()) - dy_grad_value[param.name + - core.grad_var_suffix()] = np_array + param._grad_ivar().value().get_tensor() + ) + dy_grad_value[ + param.name + core.grad_var_suffix() + ] = np_array optimizer.minimize(avg_loss) resnet.clear_gradients() @@ -337,8 +363,11 @@ class TestDygraphResnet(unittest.TestCase): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) - exe = fluid.Executor(fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) + exe = fluid.Executor( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) resnet = ResNet() optimizer = optimizer_setting(train_parameters) @@ -346,11 +375,12 @@ class TestDygraphResnet(unittest.TestCase): np.random.seed(seed) train_reader = paddle.batch( paddle.dataset.flowers.train(use_xmap=False), - batch_size=batch_size) + batch_size=batch_size, + ) - img = fluid.layers.data(name='pixel', - shape=[3, 224, 224], - dtype='float32') + img = fluid.layers.data( + name='pixel', shape=[3, 224, 224], dtype='float32' + ) label = fluid.layers.data(name='label', shape=[1], dtype='int64') out = resnet(img) loss = fluid.layers.cross_entropy(input=out, label=label) @@ -365,11 +395,14 @@ class TestDygraphResnet(unittest.TestCase): static_param_name_list.append(param.name) for param in resnet.parameters(): if param.trainable: - static_grad_name_list.append(param.name + - core.grad_var_suffix()) + static_grad_name_list.append( + param.name + core.grad_var_suffix() + ) - out = exe.run(fluid.default_startup_program(), - fetch_list=static_param_name_list) + out = exe.run( + fluid.default_startup_program(), + fetch_list=static_param_name_list, + ) for i in range(len(static_param_name_list)): static_param_init_value[static_param_name_list[i]] = out[i] @@ -379,9 +412,13 @@ class TestDygraphResnet(unittest.TestCase): break static_x_data = np.array( - [x[0].reshape(3, 224, 224) for x in data]).astype('float32') - y_data = np.array([x[1] for x in data - ]).astype('int64').reshape([batch_size, 1]) + [x[0].reshape(3, 224, 224) for x in data] + ).astype('float32') + y_data = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape([batch_size, 1]) + ) if traced_layer is not None: traced_layer([static_x_data]) @@ -389,26 +426,30 @@ class TestDygraphResnet(unittest.TestCase): fetch_list = [avg_loss.name] fetch_list.extend(static_param_name_list) fetch_list.extend(static_grad_name_list) - out = exe.run(fluid.default_main_program(), - feed={ - "pixel": static_x_data, - "label": y_data - }, - fetch_list=fetch_list) + out = exe.run( + fluid.default_main_program(), + feed={"pixel": static_x_data, "label": y_data}, + fetch_list=fetch_list, + ) static_param_value = {} static_grad_value = {} static_out = out[0] param_start_pos = 1 grad_start_pos = len(static_param_name_list) + param_start_pos - for i in range(param_start_pos, - len(static_param_name_list) + param_start_pos): - static_param_value[static_param_name_list[ - i - param_start_pos]] = out[i] - for i in range(grad_start_pos, - len(static_grad_name_list) + grad_start_pos): - static_grad_value[static_grad_name_list[ - i - grad_start_pos]] = out[i] + for i in range( + param_start_pos, + len(static_param_name_list) + param_start_pos, + ): + static_param_value[ + static_param_name_list[i - param_start_pos] + ] = out[i] + for i in range( + grad_start_pos, len(static_grad_name_list) + grad_start_pos + ): + static_grad_value[ + static_grad_name_list[i - grad_start_pos] + ] = out[i] print("static", static_out) print("dygraph", dy_out) @@ -417,9 +458,9 @@ class TestDygraphResnet(unittest.TestCase): self.assertEqual(len(dy_param_init_value), len(static_param_init_value)) for key, value in static_param_init_value.items(): - np.testing.assert_allclose(value, - dy_param_init_value[key], - rtol=1e-05) + np.testing.assert_allclose( + value, dy_param_init_value[key], rtol=1e-05 + ) self.assertTrue(np.isfinite(value.all())) self.assertFalse(np.isnan(value.any())) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py b/python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py index e28f2c1408620564c9450fa0f92f3a63718d4a3a..9e203092dc77b46a333f9b883d24dfdd8ee1db61 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py @@ -32,7 +32,7 @@ train_parameters = { "name": "piecewise_decay", "batch_size": batch_size, "epochs": [30, 60, 90], - "steps": [0.1, 0.01, 0.001, 0.0001] + "steps": [0.1, 0.01, 0.001, 0.0001], }, "batch_size": batch_size, "lr": 0.1, @@ -55,8 +55,9 @@ def optimizer_setting(params, parameter_list=None): lr = [] lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)] if fluid._non_static_mode(): - optimizer = fluid.optimizer.SGD(learning_rate=0.01, - parameter_list=parameter_list) + optimizer = fluid.optimizer.SGD( + learning_rate=0.01, parameter_list=parameter_list + ) else: optimizer = fluid.optimizer.SGD(learning_rate=0.01) # TODO(minqiyang): Add learning rate scheduler support to dygraph mode @@ -71,7 +72,6 @@ def optimizer_setting(params, parameter_list=None): class TestDygraphResnetSortGradient(unittest.TestCase): - def func_test_resnet_sort_gradient_float32(self): seed = 90 @@ -83,14 +83,17 @@ class TestDygraphResnetSortGradient(unittest.TestCase): paddle.framework.random._manual_program_seed(seed) resnet = ResNet() - optimizer = optimizer_setting(train_parameters, - parameter_list=resnet.parameters()) + optimizer = optimizer_setting( + train_parameters, parameter_list=resnet.parameters() + ) np.random.seed(seed) import random + random.seed = seed train_reader = paddle.batch( paddle.dataset.flowers.train(use_xmap=False), - batch_size=batch_size) + batch_size=batch_size, + ) dy_param_init_value = {} for param in resnet.parameters(): @@ -100,10 +103,14 @@ class TestDygraphResnetSortGradient(unittest.TestCase): if batch_id >= batch_num: break - dy_x_data = np.array([x[0].reshape(3, 224, 224) - for x in data]).astype('float32') - y_data = np.array([x[1] for x in data - ]).astype('int64').reshape(batch_size, 1) + dy_x_data = np.array( + [x[0].reshape(3, 224, 224) for x in data] + ).astype('float32') + y_data = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape(batch_size, 1) + ) img = to_variable(dy_x_data) label = to_variable(y_data) @@ -126,9 +133,11 @@ class TestDygraphResnetSortGradient(unittest.TestCase): for param in resnet.parameters(): if param.trainable: np_array = np.array( - param._grad_ivar().value().get_tensor()) - dy_grad_value[param.name + - core.grad_var_suffix()] = np_array + param._grad_ivar().value().get_tensor() + ) + dy_grad_value[ + param.name + core.grad_var_suffix() + ] = np_array optimizer.minimize(avg_loss) resnet.clear_gradients() @@ -141,22 +150,27 @@ class TestDygraphResnetSortGradient(unittest.TestCase): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) - exe = fluid.Executor(fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) + exe = fluid.Executor( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) resnet = ResNet() optimizer = optimizer_setting(train_parameters) np.random.seed(seed) import random + random.seed = seed train_reader = paddle.batch( paddle.dataset.flowers.train(use_xmap=False), - batch_size=batch_size) + batch_size=batch_size, + ) - img = fluid.layers.data(name='pixel', - shape=[3, 224, 224], - dtype='float32') + img = fluid.layers.data( + name='pixel', shape=[3, 224, 224], dtype='float32' + ) label = fluid.layers.data(name='label', shape=[1], dtype='int64') out = resnet(img) loss = fluid.layers.cross_entropy(input=out, label=label) @@ -171,11 +185,14 @@ class TestDygraphResnetSortGradient(unittest.TestCase): static_param_name_list.append(param.name) for param in resnet.parameters(): if param.trainable: - static_grad_name_list.append(param.name + - core.grad_var_suffix()) + static_grad_name_list.append( + param.name + core.grad_var_suffix() + ) - out = exe.run(fluid.default_startup_program(), - fetch_list=static_param_name_list) + out = exe.run( + fluid.default_startup_program(), + fetch_list=static_param_name_list, + ) for i in range(len(static_param_name_list)): static_param_init_value[static_param_name_list[i]] = out[i] @@ -185,42 +202,50 @@ class TestDygraphResnetSortGradient(unittest.TestCase): break static_x_data = np.array( - [x[0].reshape(3, 224, 224) for x in data]).astype('float32') - y_data = np.array([x[1] for x in data - ]).astype('int64').reshape([batch_size, 1]) + [x[0].reshape(3, 224, 224) for x in data] + ).astype('float32') + y_data = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape([batch_size, 1]) + ) fetch_list = [avg_loss.name] fetch_list.extend(static_param_name_list) fetch_list.extend(static_grad_name_list) - out = exe.run(fluid.default_main_program(), - feed={ - "pixel": static_x_data, - "label": y_data - }, - fetch_list=fetch_list) + out = exe.run( + fluid.default_main_program(), + feed={"pixel": static_x_data, "label": y_data}, + fetch_list=fetch_list, + ) static_param_value = {} static_grad_value = {} static_out = out[0] param_start_pos = 1 grad_start_pos = len(static_param_name_list) + param_start_pos - for i in range(param_start_pos, - len(static_param_name_list) + param_start_pos): - static_param_value[static_param_name_list[ - i - param_start_pos]] = out[i] - for i in range(grad_start_pos, - len(static_grad_name_list) + grad_start_pos): - static_grad_value[static_grad_name_list[ - i - grad_start_pos]] = out[i] + for i in range( + param_start_pos, + len(static_param_name_list) + param_start_pos, + ): + static_param_value[ + static_param_name_list[i - param_start_pos] + ] = out[i] + for i in range( + grad_start_pos, len(static_grad_name_list) + grad_start_pos + ): + static_grad_value[ + static_grad_name_list[i - grad_start_pos] + ] = out[i] np.testing.assert_allclose(static_out, dy_out, rtol=1e-05) self.assertEqual(len(dy_param_init_value), len(static_param_init_value)) for key, value in static_param_init_value.items(): - np.testing.assert_allclose(value, - dy_param_init_value[key], - rtol=1e-05) + np.testing.assert_allclose( + value, dy_param_init_value[key], rtol=1e-05 + ) self.assertTrue(np.isfinite(value.all())) self.assertFalse(np.isnan(value.any())) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_save_load.py b/python/paddle/fluid/tests/unittests/test_imperative_save_load.py index 7933d4f15456ee0cc30d1ebbb8bf304bdbade316..55cc00b12ab91718934f1f3e972f25722780f2bb 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_save_load.py @@ -26,13 +26,9 @@ from paddle.fluid.framework import _test_eager_guard class SimpleLSTMRNN(fluid.Layer): - - def __init__(self, - hidden_size, - num_steps, - num_layers=2, - init_scale=0.1, - dropout=None): + def __init__( + self, hidden_size, num_steps, num_layers=2, init_scale=0.1, dropout=None + ): super(SimpleLSTMRNN, self).__init__() self._hidden_size = hidden_size self._num_layers = num_layers @@ -51,19 +47,26 @@ class SimpleLSTMRNN(fluid.Layer): weight_1 = self.create_parameter( attr=fluid.ParamAttr( initializer=fluid.initializer.UniformInitializer( - low=-self._init_scale, high=self._init_scale)), + low=-self._init_scale, high=self._init_scale + ) + ), shape=[self._hidden_size * 2, self._hidden_size * 4], dtype="float32", default_initializer=fluid.initializer.UniformInitializer( - low=-self._init_scale, high=self._init_scale)) + low=-self._init_scale, high=self._init_scale + ), + ) self.weight_1_arr.append(self.add_parameter('w_%d' % i, weight_1)) bias_1 = self.create_parameter( attr=fluid.ParamAttr( initializer=fluid.initializer.UniformInitializer( - low=-self._init_scale, high=self._init_scale)), + low=-self._init_scale, high=self._init_scale + ) + ), shape=[self._hidden_size * 4], dtype="float32", - default_initializer=fluid.initializer.Constant(0.0)) + default_initializer=fluid.initializer.Constant(0.0), + ) self.bias_arr.append(self.add_parameter('b_%d' % i, bias_1)) def forward(self, input_embedding, init_hidden=None, init_cell=None): @@ -71,29 +74,29 @@ class SimpleLSTMRNN(fluid.Layer): self.hidden_array = [] for i in range(self._num_layers): - pre_hidden = fluid.layers.slice(init_hidden, - axes=[0], - starts=[i], - ends=[i + 1]) - pre_cell = fluid.layers.slice(init_cell, - axes=[0], - starts=[i], - ends=[i + 1]) - pre_hidden = fluid.layers.reshape(pre_hidden, - shape=[-1, self._hidden_size]) - pre_cell = fluid.layers.reshape(pre_cell, - shape=[-1, self._hidden_size]) + pre_hidden = fluid.layers.slice( + init_hidden, axes=[0], starts=[i], ends=[i + 1] + ) + pre_cell = fluid.layers.slice( + init_cell, axes=[0], starts=[i], ends=[i + 1] + ) + pre_hidden = fluid.layers.reshape( + pre_hidden, shape=[-1, self._hidden_size] + ) + pre_cell = fluid.layers.reshape( + pre_cell, shape=[-1, self._hidden_size] + ) self.hidden_array.append(pre_hidden) self.cell_array.append(pre_cell) res = [] for index in range(self._num_steps): - self._input = fluid.layers.slice(input_embedding, - axes=[1], - starts=[index], - ends=[index + 1]) - self._input = fluid.layers.reshape(self._input, - shape=[-1, self._hidden_size]) + self._input = fluid.layers.slice( + input_embedding, axes=[1], starts=[index], ends=[index + 1] + ) + self._input = fluid.layers.reshape( + self._input, shape=[-1, self._hidden_size] + ) for k in range(self._num_layers): pre_hidden = self.hidden_array[k] pre_cell = self.cell_array[k] @@ -104,11 +107,12 @@ class SimpleLSTMRNN(fluid.Layer): gate_input = fluid.layers.matmul(x=nn, y=weight_1) gate_input = fluid.layers.elementwise_add(gate_input, bias) - i, j, f, o = fluid.layers.split(gate_input, - num_or_sections=4, - dim=-1) + i, j, f, o = fluid.layers.split( + gate_input, num_or_sections=4, dim=-1 + ) c = pre_cell * fluid.layers.sigmoid(f) + fluid.layers.sigmoid( - i) * fluid.layers.tanh(j) + i + ) * fluid.layers.tanh(j) m = fluid.layers.tanh(c) * fluid.layers.sigmoid(o) self.hidden_array[k] = m self.cell_array[k] = c @@ -118,32 +122,38 @@ class SimpleLSTMRNN(fluid.Layer): self._input = fluid.layers.dropout( self._input, dropout_prob=self._dropout, - dropout_implementation='upscale_in_train') + dropout_implementation='upscale_in_train', + ) res.append( - fluid.layers.reshape(self._input, - shape=[1, -1, self._hidden_size])) + fluid.layers.reshape( + self._input, shape=[1, -1, self._hidden_size] + ) + ) real_res = fluid.layers.concat(res, 0) real_res = fluid.layers.transpose(x=real_res, perm=[1, 0, 2]) last_hidden = fluid.layers.concat(self.hidden_array, 1) last_hidden = fluid.layers.reshape( - last_hidden, shape=[-1, self._num_layers, self._hidden_size]) + last_hidden, shape=[-1, self._num_layers, self._hidden_size] + ) last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2]) last_cell = fluid.layers.concat(self.cell_array, 1) last_cell = fluid.layers.reshape( - last_cell, shape=[-1, self._num_layers, self._hidden_size]) + last_cell, shape=[-1, self._num_layers, self._hidden_size] + ) last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2]) return real_res, last_hidden, last_cell class PtbModel(fluid.Layer): - - def __init__(self, - hidden_size, - vocab_size, - num_layers=2, - num_steps=20, - init_scale=0.1, - dropout=None): + def __init__( + self, + hidden_size, + vocab_size, + num_layers=2, + num_steps=20, + init_scale=0.1, + dropout=None, + ): super(PtbModel, self).__init__() self.hidden_size = hidden_size self.vocab_size = vocab_size @@ -151,11 +161,13 @@ class PtbModel(fluid.Layer): self.num_layers = num_layers self.num_steps = num_steps self.dropout = dropout - self.simple_lstm_rnn = SimpleLSTMRNN(hidden_size, - num_steps, - num_layers=num_layers, - init_scale=init_scale, - dropout=dropout) + self.simple_lstm_rnn = SimpleLSTMRNN( + hidden_size, + num_steps, + num_layers=num_layers, + init_scale=init_scale, + dropout=dropout, + ) self.embedding = Embedding( size=[vocab_size, hidden_size], dtype='float32', @@ -163,48 +175,62 @@ class PtbModel(fluid.Layer): param_attr=fluid.ParamAttr( name='embedding_para', initializer=fluid.initializer.UniformInitializer( - low=-init_scale, high=init_scale))) + low=-init_scale, high=init_scale + ), + ), + ) self.softmax_weight = self.create_parameter( attr=fluid.ParamAttr(), shape=[self.hidden_size, self.vocab_size], dtype="float32", default_initializer=fluid.initializer.UniformInitializer( - low=-self.init_scale, high=self.init_scale)) + low=-self.init_scale, high=self.init_scale + ), + ) self.softmax_bias = self.create_parameter( attr=fluid.ParamAttr(), shape=[self.vocab_size], dtype="float32", default_initializer=fluid.initializer.UniformInitializer( - low=-self.init_scale, high=self.init_scale)) + low=-self.init_scale, high=self.init_scale + ), + ) def forward(self, input, label, init_hidden, init_cell): init_h = fluid.layers.reshape( - init_hidden, shape=[self.num_layers, -1, self.hidden_size]) + init_hidden, shape=[self.num_layers, -1, self.hidden_size] + ) init_c = fluid.layers.reshape( - init_cell, shape=[self.num_layers, -1, self.hidden_size]) + init_cell, shape=[self.num_layers, -1, self.hidden_size] + ) x_emb = self.embedding(input) x_emb = fluid.layers.reshape( - x_emb, shape=[-1, self.num_steps, self.hidden_size]) + x_emb, shape=[-1, self.num_steps, self.hidden_size] + ) if self.dropout is not None and self.dropout > 0.0: x_emb = fluid.layers.dropout( x_emb, dropout_prob=self.drop_out, - dropout_implementation='upscale_in_train') + dropout_implementation='upscale_in_train', + ) rnn_out, last_hidden, last_cell = self.simple_lstm_rnn( - x_emb, init_h, init_c) + x_emb, init_h, init_c + ) rnn_out = fluid.layers.reshape( - rnn_out, shape=[-1, self.num_steps, self.hidden_size]) + rnn_out, shape=[-1, self.num_steps, self.hidden_size] + ) projection = fluid.layers.matmul(rnn_out, self.softmax_weight) projection = fluid.layers.elementwise_add(projection, self.softmax_bias) - projection = fluid.layers.reshape(projection, - shape=[-1, self.vocab_size]) - loss = fluid.layers.softmax_with_cross_entropy(logits=projection, - label=label, - soft_label=False) + projection = fluid.layers.reshape( + projection, shape=[-1, self.vocab_size] + ) + loss = fluid.layers.softmax_with_cross_entropy( + logits=projection, label=label, soft_label=False + ) loss = fluid.layers.reshape(loss, shape=[-1, self.num_steps]) loss = fluid.layers.reduce_mean(loss, dim=[0]) loss = fluid.layers.reduce_sum(loss) @@ -213,7 +239,6 @@ class PtbModel(fluid.Layer): class TestDygraphPtbRnn(unittest.TestCase): - def func_setUp(self): seed = 90 hidden_size = 10 @@ -228,11 +253,13 @@ class TestDygraphPtbRnn(unittest.TestCase): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to - ptb_model = PtbModel(hidden_size=hidden_size, - vocab_size=vocab_size, - num_layers=num_layers, - num_steps=num_steps, - init_scale=init_scale) + ptb_model = PtbModel( + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale, + ) bd = [] lr_arr = [1.0] @@ -242,11 +269,17 @@ class TestDygraphPtbRnn(unittest.TestCase): new_lr = 1.0 lr_arr.append(new_lr) - place = fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0) - adam = Adam(learning_rate=fluid.layers.piecewise_decay( - boundaries=bd, values=lr_arr), - parameter_list=ptb_model.parameters()) + place = ( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) + adam = Adam( + learning_rate=fluid.layers.piecewise_decay( + boundaries=bd, values=lr_arr + ), + parameter_list=ptb_model.parameters(), + ) dy_param_updated = dict() dy_param_init = dict() dy_loss = None @@ -258,15 +291,18 @@ class TestDygraphPtbRnn(unittest.TestCase): y_data = np.arange(1, 13).reshape(4, 3).astype('int64') y_data = y_data.reshape((-1, 1)) init_hidden_data = np.zeros( - (num_layers, batch_size, hidden_size), dtype='float32') - init_cell_data = np.zeros((num_layers, batch_size, hidden_size), - dtype='float32') + (num_layers, batch_size, hidden_size), dtype='float32' + ) + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32' + ) x = to_variable(x_data) y = to_variable(y_data) init_hidden = to_variable(init_hidden_data) init_cell = to_variable(init_cell_data) dy_loss, last_hidden, last_cell = ptb_model( - x, y, init_hidden, init_cell) + x, y, init_hidden, init_cell + ) if i == 0: for param in ptb_model.parameters(): dy_param_init[param.name] = param.numpy() @@ -312,11 +348,13 @@ class TestDygraphPtbRnn(unittest.TestCase): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to - ptb_model = PtbModel(hidden_size=hidden_size, - vocab_size=vocab_size, - num_layers=num_layers, - num_steps=num_steps, - init_scale=init_scale) + ptb_model = PtbModel( + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale, + ) bd = [] lr_arr = [1.0] @@ -326,11 +364,17 @@ class TestDygraphPtbRnn(unittest.TestCase): new_lr = 1.0 lr_arr.append(new_lr) - place = fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0) - adam = Adam(learning_rate=fluid.layers.piecewise_decay( - boundaries=bd, values=lr_arr), - parameter_list=ptb_model.parameters()) + place = ( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) + adam = Adam( + learning_rate=fluid.layers.piecewise_decay( + boundaries=bd, values=lr_arr + ), + parameter_list=ptb_model.parameters(), + ) dy_param_updated = dict() dy_param_init = dict() dy_loss = None @@ -342,15 +386,18 @@ class TestDygraphPtbRnn(unittest.TestCase): y_data = np.arange(1, 13).reshape(4, 3).astype('int64') y_data = y_data.reshape((-1, 1)) init_hidden_data = np.zeros( - (num_layers, batch_size, hidden_size), dtype='float32') - init_cell_data = np.zeros((num_layers, batch_size, hidden_size), - dtype='float32') + (num_layers, batch_size, hidden_size), dtype='float32' + ) + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32' + ) x = to_variable(x_data) y = to_variable(y_data) init_hidden = to_variable(init_hidden_data) init_cell = to_variable(init_cell_data) dy_loss, last_hidden, last_cell = ptb_model( - x, y, init_hidden, init_cell) + x, y, init_hidden, init_cell + ) if i == 0: for param in ptb_model.parameters(): dy_param_init[param.name] = param.numpy() @@ -382,8 +429,9 @@ class TestDygraphPtbRnn(unittest.TestCase): opti_dict = adam.state_dict() for k, v in opti_dict.items(): if isinstance(v, (core.VarBase, core.eager.Tensor)): - np.testing.assert_array_equal(v.numpy(), - self.base_opti[v.name]) + np.testing.assert_array_equal( + v.numpy(), self.base_opti[v.name] + ) else: self.assertEqual(v, self.base_opti[k]) @@ -420,11 +468,13 @@ class TestDygraphPtbRnn(unittest.TestCase): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to - ptb_model = PtbModel(hidden_size=hidden_size, - vocab_size=vocab_size, - num_layers=num_layers, - num_steps=num_steps, - init_scale=init_scale) + ptb_model = PtbModel( + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale, + ) bd = [] lr_arr = [1.0] @@ -434,11 +484,17 @@ class TestDygraphPtbRnn(unittest.TestCase): new_lr = 1.0 lr_arr.append(new_lr) - place = fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0) - adam = Adam(learning_rate=fluid.layers.piecewise_decay( - boundaries=bd, values=lr_arr), - parameter_list=ptb_model.parameters()) + place = ( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) + adam = Adam( + learning_rate=fluid.layers.piecewise_decay( + boundaries=bd, values=lr_arr + ), + parameter_list=ptb_model.parameters(), + ) dy_param_updated = dict() dy_param_init = dict() dy_loss = None @@ -450,15 +506,18 @@ class TestDygraphPtbRnn(unittest.TestCase): y_data = np.arange(1, 13).reshape(4, 3).astype('int64') y_data = y_data.reshape((-1, 1)) init_hidden_data = np.zeros( - (num_layers, batch_size, hidden_size), dtype='float32') - init_cell_data = np.zeros((num_layers, batch_size, hidden_size), - dtype='float32') + (num_layers, batch_size, hidden_size), dtype='float32' + ) + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32' + ) x = to_variable(x_data) y = to_variable(y_data) init_hidden = to_variable(init_hidden_data) init_cell = to_variable(init_cell_data) dy_loss, last_hidden, last_cell = ptb_model( - x, y, init_hidden, init_cell) + x, y, init_hidden, init_cell + ) if i == 0: for param in ptb_model.parameters(): dy_param_init[param.name] = param.numpy() @@ -487,8 +546,9 @@ class TestDygraphPtbRnn(unittest.TestCase): opti_dict = adam.state_dict() for k, v in opti_dict.items(): if isinstance(v, (core.VarBase, core.eager.Tensor)): - np.testing.assert_array_equal(v.numpy(), - self.base_opti[v.name]) + np.testing.assert_array_equal( + v.numpy(), self.base_opti[v.name] + ) else: self.assertEqual(v, self.base_opti[k]) @@ -525,11 +585,13 @@ class TestDygraphPtbRnn(unittest.TestCase): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to - ptb_model = PtbModel(hidden_size=hidden_size, - vocab_size=vocab_size, - num_layers=num_layers, - num_steps=num_steps, - init_scale=init_scale) + ptb_model = PtbModel( + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale, + ) bd = [] lr_arr = [1.0] @@ -539,11 +601,17 @@ class TestDygraphPtbRnn(unittest.TestCase): new_lr = 1.0 lr_arr.append(new_lr) - place = fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0) - adam = Adam(learning_rate=fluid.layers.piecewise_decay( - boundaries=bd, values=lr_arr), - parameter_list=ptb_model.parameters()) + place = ( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) + adam = Adam( + learning_rate=fluid.layers.piecewise_decay( + boundaries=bd, values=lr_arr + ), + parameter_list=ptb_model.parameters(), + ) dy_param_updated = dict() dy_param_init = dict() dy_loss = None @@ -555,15 +623,18 @@ class TestDygraphPtbRnn(unittest.TestCase): y_data = np.arange(1, 13).reshape(4, 3).astype('int64') y_data = y_data.reshape((-1, 1)) init_hidden_data = np.zeros( - (num_layers, batch_size, hidden_size), dtype='float32') - init_cell_data = np.zeros((num_layers, batch_size, hidden_size), - dtype='float32') + (num_layers, batch_size, hidden_size), dtype='float32' + ) + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32' + ) x = to_variable(x_data) y = to_variable(y_data) init_hidden = to_variable(init_hidden_data) init_cell = to_variable(init_cell_data) dy_loss, last_hidden, last_cell = ptb_model( - x, y, init_hidden, init_cell) + x, y, init_hidden, init_cell + ) if i == 0: for param in ptb_model.parameters(): dy_param_init[param.name] = param.numpy() @@ -596,8 +667,9 @@ class TestDygraphPtbRnn(unittest.TestCase): opti_dict = adam.state_dict() for k, v in opti_dict.items(): if isinstance(v, (core.VarBase, core.eager.Tensor)): - np.testing.assert_array_equal(v.numpy(), - self.base_opti[v.name]) + np.testing.assert_array_equal( + v.numpy(), self.base_opti[v.name] + ) else: self.assertEqual(v, self.base_opti[k]) @@ -634,18 +706,25 @@ class TestDygraphPtbRnn(unittest.TestCase): with fluid.dygraph.guard(): # TODO: marsyang1993 Change seed to - ptb_model = PtbModel(hidden_size=hidden_size, - vocab_size=vocab_size, - num_layers=num_layers, - num_steps=num_steps, - init_scale=init_scale) - - place = fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0) - adam = Adam(learning_rate=0.0, - beta1=0.8, - beta2=0.6, - parameter_list=ptb_model.parameters()) + ptb_model = PtbModel( + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale, + ) + + place = ( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) + adam = Adam( + learning_rate=0.0, + beta1=0.8, + beta2=0.6, + parameter_list=ptb_model.parameters(), + ) dy_param_updated = dict() dy_param_init = dict() dy_loss = None @@ -660,15 +739,18 @@ class TestDygraphPtbRnn(unittest.TestCase): y_data = np.arange(1, 13).reshape(4, 3).astype('int64') y_data = y_data.reshape((-1, 1)) init_hidden_data = np.zeros( - (num_layers, batch_size, hidden_size), dtype='float32') - init_cell_data = np.zeros((num_layers, batch_size, hidden_size), - dtype='float32') + (num_layers, batch_size, hidden_size), dtype='float32' + ) + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32' + ) x = to_variable(x_data) y = to_variable(y_data) init_hidden = to_variable(init_hidden_data) init_cell = to_variable(init_cell_data) dy_loss, last_hidden, last_cell = ptb_model( - x, y, init_hidden, init_cell) + x, y, init_hidden, init_cell + ) dy_loss.backward() adam.minimize(dy_loss) @@ -677,15 +759,18 @@ class TestDygraphPtbRnn(unittest.TestCase): opti_dict = adam.state_dict() for k, v in opti_dict.items(): if k == "global_step": - np.testing.assert_array_equal(v.numpy(), - self.base_opti[v.name] + 1) + np.testing.assert_array_equal( + v.numpy(), self.base_opti[v.name] + 1 + ) if k.find("beta1_pow_acc_0") > 0: np.testing.assert_array_equal( - v.numpy(), self.base_opti[v.name] * adam._beta1) + v.numpy(), self.base_opti[v.name] * adam._beta1 + ) if k.find("beta2_pow_acc_0") > 0: np.testing.assert_array_equal( - v.numpy(), self.base_opti[v.name] * adam._beta2) + v.numpy(), self.base_opti[v.name] * adam._beta2 + ) state_dict = ptb_model.state_dict() @@ -709,11 +794,13 @@ class TestDygraphPtbRnn(unittest.TestCase): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to - ptb_model = PtbModel(hidden_size=hidden_size, - vocab_size=vocab_size, - num_layers=num_layers, - num_steps=num_steps, - init_scale=init_scale) + ptb_model = PtbModel( + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale, + ) bd = [] lr_arr = [0.0] @@ -724,12 +811,17 @@ class TestDygraphPtbRnn(unittest.TestCase): new_lr = 0.0 lr_arr.append(new_lr) - place = fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0) - adam = Adam(learning_rate=0.0, - beta1=0.8, - beta2=0.6, - parameter_list=ptb_model.parameters()) + place = ( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) + adam = Adam( + learning_rate=0.0, + beta1=0.8, + beta2=0.6, + parameter_list=ptb_model.parameters(), + ) dy_param_updated = dict() dy_param_init = dict() dy_loss = None @@ -745,15 +837,18 @@ class TestDygraphPtbRnn(unittest.TestCase): y_data = np.arange(1, 13).reshape(4, 3).astype('int64') y_data = y_data.reshape((-1, 1)) init_hidden_data = np.zeros( - (num_layers, batch_size, hidden_size), dtype='float32') - init_cell_data = np.zeros((num_layers, batch_size, hidden_size), - dtype='float32') + (num_layers, batch_size, hidden_size), dtype='float32' + ) + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32' + ) x = to_variable(x_data) y = to_variable(y_data) init_hidden = to_variable(init_hidden_data) init_cell = to_variable(init_cell_data) dy_loss, last_hidden, last_cell = ptb_model( - x, y, init_hidden, init_cell) + x, y, init_hidden, init_cell + ) dy_loss.backward() adam.minimize(dy_loss) @@ -762,15 +857,18 @@ class TestDygraphPtbRnn(unittest.TestCase): opti_dict = adam.state_dict() for k, v in opti_dict.items(): if k == "global_step": - np.testing.assert_array_equal(v.numpy(), - self.base_opti[v.name] + 1) + np.testing.assert_array_equal( + v.numpy(), self.base_opti[v.name] + 1 + ) if k.find("beta1_pow_acc_0") > 0: np.testing.assert_array_equal( - v.numpy(), self.base_opti[v.name] * adam._beta1) + v.numpy(), self.base_opti[v.name] * adam._beta1 + ) if k.find("beta2_pow_acc_0") > 0: np.testing.assert_array_equal( - v.numpy(), self.base_opti[v.name] * adam._beta2) + v.numpy(), self.base_opti[v.name] * adam._beta2 + ) # check parameter @@ -797,11 +895,13 @@ class TestDygraphPtbRnn(unittest.TestCase): paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to - ptb_model = PtbModel(hidden_size=hidden_size, - vocab_size=vocab_size, - num_layers=num_layers, - num_steps=num_steps, - init_scale=init_scale) + ptb_model = PtbModel( + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale, + ) bd = [] lr_arr = [0.0] @@ -812,13 +912,19 @@ class TestDygraphPtbRnn(unittest.TestCase): new_lr = 0.0 lr_arr.append(new_lr) - place = fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0) - adam = Adam(learning_rate=fluid.layers.piecewise_decay( - boundaries=bd, values=lr_arr), - beta1=0.8, - beta2=0.6, - parameter_list=ptb_model.parameters()) + place = ( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) + adam = Adam( + learning_rate=fluid.layers.piecewise_decay( + boundaries=bd, values=lr_arr + ), + beta1=0.8, + beta2=0.6, + parameter_list=ptb_model.parameters(), + ) dy_param_updated = dict() dy_param_init = dict() dy_loss = None @@ -844,15 +950,18 @@ class TestDygraphPtbRnn(unittest.TestCase): y_data = np.arange(1, 13).reshape(4, 3).astype('int64') y_data = y_data.reshape((-1, 1)) init_hidden_data = np.zeros( - (num_layers, batch_size, hidden_size), dtype='float32') - init_cell_data = np.zeros((num_layers, batch_size, hidden_size), - dtype='float32') + (num_layers, batch_size, hidden_size), dtype='float32' + ) + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32' + ) x = to_variable(x_data) y = to_variable(y_data) init_hidden = to_variable(init_hidden_data) init_cell = to_variable(init_cell_data) dy_loss, last_hidden, last_cell = ptb_model( - x, y, init_hidden, init_cell) + x, y, init_hidden, init_cell + ) dy_loss.backward() adam.minimize(dy_loss) @@ -861,15 +970,18 @@ class TestDygraphPtbRnn(unittest.TestCase): opti_dict = adam.state_dict() for k, v in opti_dict.items(): if k == "global_step": - np.testing.assert_array_equal(v.numpy(), - self.base_opti[v.name] + 1) + np.testing.assert_array_equal( + v.numpy(), self.base_opti[v.name] + 1 + ) if k.find("beta1_pow_acc_0") > 0: np.testing.assert_array_equal( - v.numpy(), self.base_opti[v.name] * adam._beta1) + v.numpy(), self.base_opti[v.name] * adam._beta1 + ) if k.find("beta2_pow_acc_0") > 0: np.testing.assert_array_equal( - v.numpy(), self.base_opti[v.name] * adam._beta2) + v.numpy(), self.base_opti[v.name] * adam._beta2 + ) # check parameter @@ -888,15 +1000,18 @@ class TestDygraphPtbRnn(unittest.TestCase): fluid.save_dygraph(state_dict, os.path.join('saved_dy', 'emb_dy')) para_state_dict, opti_state_dict = fluid.load_dygraph( - os.path.join('saved_dy', 'emb_dy')) + os.path.join('saved_dy', 'emb_dy') + ) self.assertTrue(opti_state_dict == None) para_state_dict, opti_state_dict = fluid.load_dygraph( - os.path.join('saved_dy', 'emb_dy.pdparams')) + os.path.join('saved_dy', 'emb_dy.pdparams') + ) para_state_dict, opti_state_dict = fluid.load_dygraph( - os.path.join('saved_dy', 'emb_dy.pdopt')) + os.path.join('saved_dy', 'emb_dy.pdopt') + ) def func_test_load_compatible_with_keep_name_table(self): with fluid.dygraph.guard(): @@ -905,7 +1020,8 @@ class TestDygraphPtbRnn(unittest.TestCase): fluid.save_dygraph(state_dict, os.path.join('saved_dy', 'emb_dy')) para_state_dict, opti_state_dict = fluid.load_dygraph( - os.path.join('saved_dy', 'emb_dy'), keep_name_table=True) + os.path.join('saved_dy', 'emb_dy'), keep_name_table=True + ) self.assertTrue(para_state_dict != None) self.assertTrue(opti_state_dict == None) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_save_load_v2.py b/python/paddle/fluid/tests/unittests/test_imperative_save_load_v2.py index 0e051381adf7194fad1792f5ce9cdcf2f1dd9317..795870e3d01873217970e10c80bb055c40fdb399 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_save_load_v2.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_save_load_v2.py @@ -28,13 +28,9 @@ import tempfile class SimpleLSTMRNN(fluid.Layer): - - def __init__(self, - hidden_size, - num_steps, - num_layers=2, - init_scale=0.1, - dropout=None): + def __init__( + self, hidden_size, num_steps, num_layers=2, init_scale=0.1, dropout=None + ): super(SimpleLSTMRNN, self).__init__() self._hidden_size = hidden_size self._num_layers = num_layers @@ -53,19 +49,26 @@ class SimpleLSTMRNN(fluid.Layer): weight_1 = self.create_parameter( attr=fluid.ParamAttr( initializer=fluid.initializer.UniformInitializer( - low=-self._init_scale, high=self._init_scale)), + low=-self._init_scale, high=self._init_scale + ) + ), shape=[self._hidden_size * 2, self._hidden_size * 4], dtype="float32", default_initializer=fluid.initializer.UniformInitializer( - low=-self._init_scale, high=self._init_scale)) + low=-self._init_scale, high=self._init_scale + ), + ) self.weight_1_arr.append(self.add_parameter('w_%d' % i, weight_1)) bias_1 = self.create_parameter( attr=fluid.ParamAttr( initializer=fluid.initializer.UniformInitializer( - low=-self._init_scale, high=self._init_scale)), + low=-self._init_scale, high=self._init_scale + ) + ), shape=[self._hidden_size * 4], dtype="float32", - default_initializer=fluid.initializer.Constant(0.0)) + default_initializer=fluid.initializer.Constant(0.0), + ) self.bias_arr.append(self.add_parameter('b_%d' % i, bias_1)) def forward(self, input_embedding, init_hidden=None, init_cell=None): @@ -73,29 +76,29 @@ class SimpleLSTMRNN(fluid.Layer): self.hidden_array = [] for i in range(self._num_layers): - pre_hidden = fluid.layers.slice(init_hidden, - axes=[0], - starts=[i], - ends=[i + 1]) - pre_cell = fluid.layers.slice(init_cell, - axes=[0], - starts=[i], - ends=[i + 1]) - pre_hidden = fluid.layers.reshape(pre_hidden, - shape=[-1, self._hidden_size]) - pre_cell = fluid.layers.reshape(pre_cell, - shape=[-1, self._hidden_size]) + pre_hidden = fluid.layers.slice( + init_hidden, axes=[0], starts=[i], ends=[i + 1] + ) + pre_cell = fluid.layers.slice( + init_cell, axes=[0], starts=[i], ends=[i + 1] + ) + pre_hidden = fluid.layers.reshape( + pre_hidden, shape=[-1, self._hidden_size] + ) + pre_cell = fluid.layers.reshape( + pre_cell, shape=[-1, self._hidden_size] + ) self.hidden_array.append(pre_hidden) self.cell_array.append(pre_cell) res = [] for index in range(self._num_steps): - self._input = fluid.layers.slice(input_embedding, - axes=[1], - starts=[index], - ends=[index + 1]) - self._input = fluid.layers.reshape(self._input, - shape=[-1, self._hidden_size]) + self._input = fluid.layers.slice( + input_embedding, axes=[1], starts=[index], ends=[index + 1] + ) + self._input = fluid.layers.reshape( + self._input, shape=[-1, self._hidden_size] + ) for k in range(self._num_layers): pre_hidden = self.hidden_array[k] pre_cell = self.cell_array[k] @@ -106,11 +109,12 @@ class SimpleLSTMRNN(fluid.Layer): gate_input = fluid.layers.matmul(x=nn, y=weight_1) gate_input = fluid.layers.elementwise_add(gate_input, bias) - i, j, f, o = fluid.layers.split(gate_input, - num_or_sections=4, - dim=-1) + i, j, f, o = fluid.layers.split( + gate_input, num_or_sections=4, dim=-1 + ) c = pre_cell * fluid.layers.sigmoid(f) + fluid.layers.sigmoid( - i) * fluid.layers.tanh(j) + i + ) * fluid.layers.tanh(j) m = fluid.layers.tanh(c) * fluid.layers.sigmoid(o) self.hidden_array[k] = m self.cell_array[k] = c @@ -120,32 +124,38 @@ class SimpleLSTMRNN(fluid.Layer): self._input = fluid.layers.dropout( self._input, dropout_prob=self._dropout, - dropout_implementation='upscale_in_train') + dropout_implementation='upscale_in_train', + ) res.append( - fluid.layers.reshape(self._input, - shape=[1, -1, self._hidden_size])) + fluid.layers.reshape( + self._input, shape=[1, -1, self._hidden_size] + ) + ) real_res = fluid.layers.concat(res, 0) real_res = fluid.layers.transpose(x=real_res, perm=[1, 0, 2]) last_hidden = fluid.layers.concat(self.hidden_array, 1) last_hidden = fluid.layers.reshape( - last_hidden, shape=[-1, self._num_layers, self._hidden_size]) + last_hidden, shape=[-1, self._num_layers, self._hidden_size] + ) last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2]) last_cell = fluid.layers.concat(self.cell_array, 1) last_cell = fluid.layers.reshape( - last_cell, shape=[-1, self._num_layers, self._hidden_size]) + last_cell, shape=[-1, self._num_layers, self._hidden_size] + ) last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2]) return real_res, last_hidden, last_cell class PtbModel(fluid.Layer): - - def __init__(self, - hidden_size, - vocab_size, - num_layers=2, - num_steps=20, - init_scale=0.1, - dropout=None): + def __init__( + self, + hidden_size, + vocab_size, + num_layers=2, + num_steps=20, + init_scale=0.1, + dropout=None, + ): super(PtbModel, self).__init__() self.hidden_size = hidden_size self.vocab_size = vocab_size @@ -153,11 +163,13 @@ class PtbModel(fluid.Layer): self.num_layers = num_layers self.num_steps = num_steps self.dropout = dropout - self.simple_lstm_rnn = SimpleLSTMRNN(hidden_size, - num_steps, - num_layers=num_layers, - init_scale=init_scale, - dropout=dropout) + self.simple_lstm_rnn = SimpleLSTMRNN( + hidden_size, + num_steps, + num_layers=num_layers, + init_scale=init_scale, + dropout=dropout, + ) self.embedding = Embedding( size=[vocab_size, hidden_size], dtype='float32', @@ -165,48 +177,62 @@ class PtbModel(fluid.Layer): param_attr=fluid.ParamAttr( name='embedding_para', initializer=fluid.initializer.UniformInitializer( - low=-init_scale, high=init_scale))) + low=-init_scale, high=init_scale + ), + ), + ) self.softmax_weight = self.create_parameter( attr=fluid.ParamAttr(), shape=[self.hidden_size, self.vocab_size], dtype="float32", default_initializer=fluid.initializer.UniformInitializer( - low=-self.init_scale, high=self.init_scale)) + low=-self.init_scale, high=self.init_scale + ), + ) self.softmax_bias = self.create_parameter( attr=fluid.ParamAttr(), shape=[self.vocab_size], dtype="float32", default_initializer=fluid.initializer.UniformInitializer( - low=-self.init_scale, high=self.init_scale)) + low=-self.init_scale, high=self.init_scale + ), + ) def forward(self, input, label, init_hidden, init_cell): init_h = fluid.layers.reshape( - init_hidden, shape=[self.num_layers, -1, self.hidden_size]) + init_hidden, shape=[self.num_layers, -1, self.hidden_size] + ) init_c = fluid.layers.reshape( - init_cell, shape=[self.num_layers, -1, self.hidden_size]) + init_cell, shape=[self.num_layers, -1, self.hidden_size] + ) x_emb = self.embedding(input) x_emb = fluid.layers.reshape( - x_emb, shape=[-1, self.num_steps, self.hidden_size]) + x_emb, shape=[-1, self.num_steps, self.hidden_size] + ) if self.dropout is not None and self.dropout > 0.0: x_emb = fluid.layers.dropout( x_emb, dropout_prob=self.drop_out, - dropout_implementation='upscale_in_train') + dropout_implementation='upscale_in_train', + ) rnn_out, last_hidden, last_cell = self.simple_lstm_rnn( - x_emb, init_h, init_c) + x_emb, init_h, init_c + ) rnn_out = fluid.layers.reshape( - rnn_out, shape=[-1, self.num_steps, self.hidden_size]) + rnn_out, shape=[-1, self.num_steps, self.hidden_size] + ) projection = fluid.layers.matmul(rnn_out, self.softmax_weight) projection = fluid.layers.elementwise_add(projection, self.softmax_bias) - projection = fluid.layers.reshape(projection, - shape=[-1, self.vocab_size]) - loss = fluid.layers.softmax_with_cross_entropy(logits=projection, - label=label, - soft_label=False) + projection = fluid.layers.reshape( + projection, shape=[-1, self.vocab_size] + ) + loss = fluid.layers.softmax_with_cross_entropy( + logits=projection, label=label, soft_label=False + ) loss = fluid.layers.reshape(loss, shape=[-1, self.num_steps]) loss = fluid.layers.reduce_mean(loss, dim=[0]) loss = fluid.layers.reduce_sum(loss) @@ -215,7 +241,6 @@ class PtbModel(fluid.Layer): class TestDygraphPtbRnn(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -236,11 +261,13 @@ class TestDygraphPtbRnn(unittest.TestCase): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to - ptb_model = PtbModel(hidden_size=hidden_size, - vocab_size=vocab_size, - num_layers=num_layers, - num_steps=num_steps, - init_scale=init_scale) + ptb_model = PtbModel( + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale, + ) bd = [] lr_arr = [1.0] @@ -250,12 +277,17 @@ class TestDygraphPtbRnn(unittest.TestCase): new_lr = 1.0 lr_arr.append(new_lr) - place = fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0) - scheduler = paddle.optimizer.lr.PiecewiseDecay(boundaries=bd, - values=lr_arr) - adam = Adam(learning_rate=scheduler, - parameters=ptb_model.parameters()) + place = ( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) + scheduler = paddle.optimizer.lr.PiecewiseDecay( + boundaries=bd, values=lr_arr + ) + adam = Adam( + learning_rate=scheduler, parameters=ptb_model.parameters() + ) dy_param_updated = dict() dy_param_init = dict() dy_loss = None @@ -267,15 +299,18 @@ class TestDygraphPtbRnn(unittest.TestCase): y_data = np.arange(1, 13).reshape(4, 3).astype('int64') y_data = y_data.reshape((-1, 1)) init_hidden_data = np.zeros( - (num_layers, batch_size, hidden_size), dtype='float32') - init_cell_data = np.zeros((num_layers, batch_size, hidden_size), - dtype='float32') + (num_layers, batch_size, hidden_size), dtype='float32' + ) + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32' + ) x = to_variable(x_data) y = to_variable(y_data) init_hidden = to_variable(init_hidden_data) init_cell = to_variable(init_cell_data) dy_loss, last_hidden, last_cell = ptb_model( - x, y, init_hidden, init_cell) + x, y, init_hidden, init_cell + ) if i == 0: for param in ptb_model.parameters(): dy_param_init[param.name] = param.numpy() @@ -298,8 +333,10 @@ class TestDygraphPtbRnn(unittest.TestCase): else: self.base_opti[k] = v - paddle.save(self.opti_dict, - os.path.join(self.temp_dir.name, "test_dy_v2.pdopt")) + paddle.save( + self.opti_dict, + os.path.join(self.temp_dir.name, "test_dy_v2.pdopt"), + ) self.state_dict = ptb_model.state_dict() @@ -308,8 +345,10 @@ class TestDygraphPtbRnn(unittest.TestCase): np_t = v.numpy() self.model_base[k] = np_t - paddle.save(self.state_dict, - os.path.join(self.temp_dir.name, "test_dy_v2.pdparams")) + paddle.save( + self.state_dict, + os.path.join(self.temp_dir.name, "test_dy_v2.pdparams"), + ) def func_testLoadAndSetVarBase(self): seed = 90 @@ -325,11 +364,13 @@ class TestDygraphPtbRnn(unittest.TestCase): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to - ptb_model = PtbModel(hidden_size=hidden_size, - vocab_size=vocab_size, - num_layers=num_layers, - num_steps=num_steps, - init_scale=init_scale) + ptb_model = PtbModel( + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale, + ) bd = [] lr_arr = [1.0] @@ -339,12 +380,17 @@ class TestDygraphPtbRnn(unittest.TestCase): new_lr = 1.0 lr_arr.append(new_lr) - place = fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0) - scheduler = paddle.optimizer.lr.PiecewiseDecay(boundaries=bd, - values=lr_arr) - adam = Adam(learning_rate=scheduler, - parameters=ptb_model.parameters()) + place = ( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) + scheduler = paddle.optimizer.lr.PiecewiseDecay( + boundaries=bd, values=lr_arr + ) + adam = Adam( + learning_rate=scheduler, parameters=ptb_model.parameters() + ) dy_param_updated = dict() dy_param_init = dict() dy_loss = None @@ -356,15 +402,18 @@ class TestDygraphPtbRnn(unittest.TestCase): y_data = np.arange(1, 13).reshape(4, 3).astype('int64') y_data = y_data.reshape((-1, 1)) init_hidden_data = np.zeros( - (num_layers, batch_size, hidden_size), dtype='float32') - init_cell_data = np.zeros((num_layers, batch_size, hidden_size), - dtype='float32') + (num_layers, batch_size, hidden_size), dtype='float32' + ) + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32' + ) x = to_variable(x_data) y = to_variable(y_data) init_hidden = to_variable(init_hidden_data) init_cell = to_variable(init_cell_data) dy_loss, last_hidden, last_cell = ptb_model( - x, y, init_hidden, init_cell) + x, y, init_hidden, init_cell + ) if i == 0: for param in ptb_model.parameters(): dy_param_init[param.name] = param.numpy() @@ -388,16 +437,19 @@ class TestDygraphPtbRnn(unittest.TestCase): self.assertTrue(np.sum(np.abs(v.numpy())) == 0) para_state_dict = paddle.load( - os.path.join(self.temp_dir.name, "test_dy_v2.pdparams")) + os.path.join(self.temp_dir.name, "test_dy_v2.pdparams") + ) opti_state_dict = paddle.load( - os.path.join(self.temp_dir.name, "test_dy_v2.pdopt")) + os.path.join(self.temp_dir.name, "test_dy_v2.pdopt") + ) adam.set_state_dict(opti_state_dict) opti_dict = adam.state_dict() for k, v in opti_dict.items(): if isinstance(v, (core.VarBase, core.eager.Tensor)): - np.testing.assert_array_equal(v.numpy(), - self.base_opti[v.name]) + np.testing.assert_array_equal( + v.numpy(), self.base_opti[v.name] + ) else: self.assertEqual(v, self.base_opti[k]) @@ -434,11 +486,13 @@ class TestDygraphPtbRnn(unittest.TestCase): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to - ptb_model = PtbModel(hidden_size=hidden_size, - vocab_size=vocab_size, - num_layers=num_layers, - num_steps=num_steps, - init_scale=init_scale) + ptb_model = PtbModel( + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale, + ) bd = [] lr_arr = [1.0] @@ -448,12 +502,17 @@ class TestDygraphPtbRnn(unittest.TestCase): new_lr = 1.0 lr_arr.append(new_lr) - place = fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0) - scheduler = paddle.optimizer.lr.PiecewiseDecay(boundaries=bd, - values=lr_arr) - adam = Adam(learning_rate=scheduler, - parameters=ptb_model.parameters()) + place = ( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) + scheduler = paddle.optimizer.lr.PiecewiseDecay( + boundaries=bd, values=lr_arr + ) + adam = Adam( + learning_rate=scheduler, parameters=ptb_model.parameters() + ) dy_param_updated = dict() dy_param_init = dict() dy_loss = None @@ -465,15 +524,18 @@ class TestDygraphPtbRnn(unittest.TestCase): y_data = np.arange(1, 13).reshape(4, 3).astype('int64') y_data = y_data.reshape((-1, 1)) init_hidden_data = np.zeros( - (num_layers, batch_size, hidden_size), dtype='float32') - init_cell_data = np.zeros((num_layers, batch_size, hidden_size), - dtype='float32') + (num_layers, batch_size, hidden_size), dtype='float32' + ) + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32' + ) x = to_variable(x_data) y = to_variable(y_data) init_hidden = to_variable(init_hidden_data) init_cell = to_variable(init_cell_data) dy_loss, last_hidden, last_cell = ptb_model( - x, y, init_hidden, init_cell) + x, y, init_hidden, init_cell + ) if i == 0: for param in ptb_model.parameters(): dy_param_init[param.name] = param.numpy() @@ -503,8 +565,9 @@ class TestDygraphPtbRnn(unittest.TestCase): opti_dict = adam.state_dict() for k, v in opti_dict.items(): if isinstance(v, (core.VarBase, core.eager.Tensor)): - np.testing.assert_array_equal(v.numpy(), - self.base_opti[v.name]) + np.testing.assert_array_equal( + v.numpy(), self.base_opti[v.name] + ) else: self.assertEqual(v, self.base_opti[k]) @@ -541,11 +604,13 @@ class TestDygraphPtbRnn(unittest.TestCase): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to - ptb_model = PtbModel(hidden_size=hidden_size, - vocab_size=vocab_size, - num_layers=num_layers, - num_steps=num_steps, - init_scale=init_scale) + ptb_model = PtbModel( + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale, + ) bd = [] lr_arr = [1.0] @@ -555,12 +620,17 @@ class TestDygraphPtbRnn(unittest.TestCase): new_lr = 1.0 lr_arr.append(new_lr) - place = fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0) - scheduler = paddle.optimizer.lr.PiecewiseDecay(boundaries=bd, - values=lr_arr) - adam = Adam(learning_rate=scheduler, - parameters=ptb_model.parameters()) + place = ( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) + scheduler = paddle.optimizer.lr.PiecewiseDecay( + boundaries=bd, values=lr_arr + ) + adam = Adam( + learning_rate=scheduler, parameters=ptb_model.parameters() + ) dy_param_updated = dict() dy_param_init = dict() dy_loss = None @@ -572,15 +642,18 @@ class TestDygraphPtbRnn(unittest.TestCase): y_data = np.arange(1, 13).reshape(4, 3).astype('int64') y_data = y_data.reshape((-1, 1)) init_hidden_data = np.zeros( - (num_layers, batch_size, hidden_size), dtype='float32') - init_cell_data = np.zeros((num_layers, batch_size, hidden_size), - dtype='float32') + (num_layers, batch_size, hidden_size), dtype='float32' + ) + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32' + ) x = to_variable(x_data) y = to_variable(y_data) init_hidden = to_variable(init_hidden_data) init_cell = to_variable(init_cell_data) dy_loss, last_hidden, last_cell = ptb_model( - x, y, init_hidden, init_cell) + x, y, init_hidden, init_cell + ) if i == 0: for param in ptb_model.parameters(): dy_param_init[param.name] = param.numpy() @@ -614,8 +687,9 @@ class TestDygraphPtbRnn(unittest.TestCase): opti_dict = adam.state_dict() for k, v in opti_dict.items(): if isinstance(v, (core.VarBase, core.eager.Tensor)): - np.testing.assert_array_equal(v.numpy(), - self.base_opti[v.name]) + np.testing.assert_array_equal( + v.numpy(), self.base_opti[v.name] + ) else: self.assertEqual(v, self.base_opti[k]) @@ -654,18 +728,25 @@ class TestDygraphPtbRnn(unittest.TestCase): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to - ptb_model = PtbModel(hidden_size=hidden_size, - vocab_size=vocab_size, - num_layers=num_layers, - num_steps=num_steps, - init_scale=init_scale) - - place = fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0) - adam = Adam(learning_rate=0.0, - beta1=0.8, - beta2=0.6, - parameters=ptb_model.parameters()) + ptb_model = PtbModel( + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale, + ) + + place = ( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) + adam = Adam( + learning_rate=0.0, + beta1=0.8, + beta2=0.6, + parameters=ptb_model.parameters(), + ) dy_param_updated = dict() dy_param_init = dict() dy_loss = None @@ -680,15 +761,18 @@ class TestDygraphPtbRnn(unittest.TestCase): y_data = np.arange(1, 13).reshape(4, 3).astype('int64') y_data = y_data.reshape((-1, 1)) init_hidden_data = np.zeros( - (num_layers, batch_size, hidden_size), dtype='float32') - init_cell_data = np.zeros((num_layers, batch_size, hidden_size), - dtype='float32') + (num_layers, batch_size, hidden_size), dtype='float32' + ) + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32' + ) x = to_variable(x_data) y = to_variable(y_data) init_hidden = to_variable(init_hidden_data) init_cell = to_variable(init_cell_data) dy_loss, last_hidden, last_cell = ptb_model( - x, y, init_hidden, init_cell) + x, y, init_hidden, init_cell + ) dy_loss.backward() adam.minimize(dy_loss) @@ -697,15 +781,18 @@ class TestDygraphPtbRnn(unittest.TestCase): opti_dict = adam.state_dict() for k, v in opti_dict.items(): if k == "global_step": - np.testing.assert_array_equal(v.numpy(), - self.base_opti[v.name] + 1) + np.testing.assert_array_equal( + v.numpy(), self.base_opti[v.name] + 1 + ) if k.find("beta1_pow_acc_0") > 0: np.testing.assert_array_equal( - v.numpy(), self.base_opti[v.name] * adam._beta1) + v.numpy(), self.base_opti[v.name] * adam._beta1 + ) if k.find("beta2_pow_acc_0") > 0: np.testing.assert_array_equal( - v.numpy(), self.base_opti[v.name] * adam._beta2) + v.numpy(), self.base_opti[v.name] * adam._beta2 + ) state_dict = ptb_model.state_dict() @@ -729,11 +816,13 @@ class TestDygraphPtbRnn(unittest.TestCase): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to - ptb_model = PtbModel(hidden_size=hidden_size, - vocab_size=vocab_size, - num_layers=num_layers, - num_steps=num_steps, - init_scale=init_scale) + ptb_model = PtbModel( + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale, + ) bd = [] lr_arr = [0.0] @@ -744,12 +833,17 @@ class TestDygraphPtbRnn(unittest.TestCase): new_lr = 0.0 lr_arr.append(new_lr) - place = fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0) - adam = Adam(learning_rate=0.0, - beta1=0.8, - beta2=0.6, - parameters=ptb_model.parameters()) + place = ( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) + adam = Adam( + learning_rate=0.0, + beta1=0.8, + beta2=0.6, + parameters=ptb_model.parameters(), + ) dy_param_updated = dict() dy_param_init = dict() dy_loss = None @@ -757,7 +851,8 @@ class TestDygraphPtbRnn(unittest.TestCase): last_cell = None state_dict, opti_dict = fluid.load_dygraph( - os.path.join(self.temp_dir.name, "test_dy_v2")) + os.path.join(self.temp_dir.name, "test_dy_v2") + ) adam.set_state_dict(opti_dict) ptb_model.set_dict(state_dict) @@ -766,15 +861,18 @@ class TestDygraphPtbRnn(unittest.TestCase): y_data = np.arange(1, 13).reshape(4, 3).astype('int64') y_data = y_data.reshape((-1, 1)) init_hidden_data = np.zeros( - (num_layers, batch_size, hidden_size), dtype='float32') - init_cell_data = np.zeros((num_layers, batch_size, hidden_size), - dtype='float32') + (num_layers, batch_size, hidden_size), dtype='float32' + ) + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32' + ) x = to_variable(x_data) y = to_variable(y_data) init_hidden = to_variable(init_hidden_data) init_cell = to_variable(init_cell_data) dy_loss, last_hidden, last_cell = ptb_model( - x, y, init_hidden, init_cell) + x, y, init_hidden, init_cell + ) dy_loss.backward() adam.minimize(dy_loss) @@ -783,15 +881,18 @@ class TestDygraphPtbRnn(unittest.TestCase): opti_dict = adam.state_dict() for k, v in opti_dict.items(): if k == "global_step": - np.testing.assert_array_equal(v.numpy(), - self.base_opti[v.name] + 1) + np.testing.assert_array_equal( + v.numpy(), self.base_opti[v.name] + 1 + ) if k.find("beta1_pow_acc_0") > 0: np.testing.assert_array_equal( - v.numpy(), self.base_opti[v.name] * adam._beta1) + v.numpy(), self.base_opti[v.name] * adam._beta1 + ) if k.find("beta2_pow_acc_0") > 0: np.testing.assert_array_equal( - v.numpy(), self.base_opti[v.name] * adam._beta2) + v.numpy(), self.base_opti[v.name] * adam._beta2 + ) # check parameter @@ -817,11 +918,13 @@ class TestDygraphPtbRnn(unittest.TestCase): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to - ptb_model = PtbModel(hidden_size=hidden_size, - vocab_size=vocab_size, - num_layers=num_layers, - num_steps=num_steps, - init_scale=init_scale) + ptb_model = PtbModel( + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale, + ) bd = [] lr_arr = [0.0] @@ -832,14 +935,20 @@ class TestDygraphPtbRnn(unittest.TestCase): new_lr = 0.0 lr_arr.append(new_lr) - place = fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0) - scheduler = paddle.optimizer.lr.PiecewiseDecay(boundaries=bd, - values=lr_arr) - adam = Adam(learning_rate=scheduler, - beta1=0.8, - beta2=0.6, - parameters=ptb_model.parameters()) + place = ( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) + scheduler = paddle.optimizer.lr.PiecewiseDecay( + boundaries=bd, values=lr_arr + ) + adam = Adam( + learning_rate=scheduler, + beta1=0.8, + beta2=0.6, + parameters=ptb_model.parameters(), + ) dy_param_updated = dict() dy_param_init = dict() dy_loss = None @@ -865,15 +974,18 @@ class TestDygraphPtbRnn(unittest.TestCase): y_data = np.arange(1, 13).reshape(4, 3).astype('int64') y_data = y_data.reshape((-1, 1)) init_hidden_data = np.zeros( - (num_layers, batch_size, hidden_size), dtype='float32') - init_cell_data = np.zeros((num_layers, batch_size, hidden_size), - dtype='float32') + (num_layers, batch_size, hidden_size), dtype='float32' + ) + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32' + ) x = to_variable(x_data) y = to_variable(y_data) init_hidden = to_variable(init_hidden_data) init_cell = to_variable(init_cell_data) dy_loss, last_hidden, last_cell = ptb_model( - x, y, init_hidden, init_cell) + x, y, init_hidden, init_cell + ) dy_loss.backward() scheduler.step() @@ -884,14 +996,17 @@ class TestDygraphPtbRnn(unittest.TestCase): for k, v in opti_dict.items(): if k == "LR_Scheduler": np.testing.assert_array_equal( - v['last_epoch'], self.base_opti[k]['last_epoch'] + 1) + v['last_epoch'], self.base_opti[k]['last_epoch'] + 1 + ) if k.find("beta1_pow_acc_0") > 0: np.testing.assert_array_equal( - v.numpy(), self.base_opti[v.name] * adam._beta1) + v.numpy(), self.base_opti[v.name] * adam._beta1 + ) if k.find("beta2_pow_acc_0") > 0: np.testing.assert_array_equal( - v.numpy(), self.base_opti[v.name] * adam._beta2) + v.numpy(), self.base_opti[v.name] * adam._beta2 + ) # check parameter @@ -909,10 +1024,12 @@ class TestDygraphPtbRnn(unittest.TestCase): state_dict = emb.state_dict() paddle.save( state_dict, - os.path.join(self.temp_dir.name, 'saved_dy', 'emb_dy.pdparams')) + os.path.join(self.temp_dir.name, 'saved_dy', 'emb_dy.pdparams'), + ) para_state_dict = paddle.load( - os.path.join(self.temp_dir.name, 'saved_dy', 'emb_dy.pdparams')) + os.path.join(self.temp_dir.name, 'saved_dy', 'emb_dy.pdparams') + ) def func_test_no_state_in_input_dict(self): with fluid.dygraph.guard(): @@ -920,10 +1037,12 @@ class TestDygraphPtbRnn(unittest.TestCase): state_dict = emb.state_dict() paddle.save( state_dict, - os.path.join(self.temp_dir.name, 'saved_dy', 'emb_dy.pdparams')) + os.path.join(self.temp_dir.name, 'saved_dy', 'emb_dy.pdparams'), + ) para_state_dict = paddle.load( - os.path.join(self.temp_dir.name, 'saved_dy', 'emb_dy.pdparams')) + os.path.join(self.temp_dir.name, 'saved_dy', 'emb_dy.pdparams') + ) para_state_dict.pop('weight') emb.set_state_dict(para_state_dict) @@ -934,14 +1053,16 @@ class TestDygraphPtbRnn(unittest.TestCase): state_dict = emb.state_dict() paddle.save( state_dict, - os.path.join(self.temp_dir.name, 'saved_dy', 'emb_dy.pdparams')) + os.path.join(self.temp_dir.name, 'saved_dy', 'emb_dy.pdparams'), + ) - para_state_dict = paddle.load(os.path.join(self.temp_dir.name, - 'saved_dy', - 'emb_dy.pdparams'), - return_numpy=True) + para_state_dict = paddle.load( + os.path.join(self.temp_dir.name, 'saved_dy', 'emb_dy.pdparams'), + return_numpy=True, + ) para_state_dict['weight'] = np.expand_dims( - para_state_dict['weight'], axis=-1) + para_state_dict['weight'], axis=-1 + ) emb.set_state_dict(para_state_dict) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py b/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py index c04f3adcadd53092b5fc4a94eaa5a2df0c4ecca8..1f995817c0f77d7a308186f306ac710d997e7b05 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py @@ -35,7 +35,7 @@ train_parameters = { "name": "piecewise_decay", "batch_size": batch_size, "epochs": [30, 60, 90], - "steps": [0.1, 0.01, 0.001, 0.0001] + "steps": [0.1, 0.01, 0.001, 0.0001], }, "batch_size": batch_size, "lr": 0.1, @@ -51,15 +51,16 @@ def optimizer_setting(params, parameter_list=None): else: total_images = params["total_images"] # TODO(Yancey1989): using lr decay if it is ready. - #batch_size = ls["batch_size"] - #step = int(total_images / batch_size + 1) + # batch_size = ls["batch_size"] + # step = int(total_images / batch_size + 1) - #bd = [step * e for e in ls["epochs"]] - #base_lr = params["lr"] - #lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)] + # bd = [step * e for e in ls["epochs"]] + # base_lr = params["lr"] + # lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)] if fluid._non_static_mode(): - optimizer = fluid.optimizer.SGD(learning_rate=0.01, - parameter_list=parameter_list) + optimizer = fluid.optimizer.SGD( + learning_rate=0.01, parameter_list=parameter_list + ) else: optimizer = fluid.optimizer.SGD(learning_rate=0.01) @@ -67,24 +68,27 @@ def optimizer_setting(params, parameter_list=None): class ConvBNLayer(fluid.dygraph.Layer): - - def __init__(self, - num_channels, - num_filters, - filter_size, - stride=1, - groups=1, - act=None): + def __init__( + self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + act=None, + ): super(ConvBNLayer, self).__init__() - self._conv = Conv2D(num_channels=num_channels, - num_filters=num_filters, - filter_size=filter_size, - stride=stride, - padding=(filter_size - 1) // 2, - groups=groups, - act=None, - bias_attr=None) + self._conv = Conv2D( + num_channels=num_channels, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + act=None, + bias_attr=None, + ) self._batch_norm = BatchNorm(num_filters, act=act) @@ -96,7 +100,6 @@ class ConvBNLayer(fluid.dygraph.Layer): class SqueezeExcitation(fluid.dygraph.Layer): - def __init__(self, num_channels, reduction_ratio): super(SqueezeExcitation, self).__init__() @@ -105,15 +108,19 @@ class SqueezeExcitation(fluid.dygraph.Layer): self._squeeze = Linear( num_channels, num_channels // reduction_ratio, - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.05)), - act='relu') + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.05) + ), + act='relu', + ) self._excitation = Linear( num_channels // reduction_ratio, num_channels, - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.05)), - act='sigmoid') + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.05) + ), + act='sigmoid', + ) def forward(self, input): y = self._pool(input) @@ -125,37 +132,45 @@ class SqueezeExcitation(fluid.dygraph.Layer): class BottleneckBlock(fluid.dygraph.Layer): - - def __init__(self, - num_channels, - num_filters, - stride, - cardinality, - reduction_ratio, - shortcut=True): + def __init__( + self, + num_channels, + num_filters, + stride, + cardinality, + reduction_ratio, + shortcut=True, + ): super(BottleneckBlock, self).__init__() - self.conv0 = ConvBNLayer(num_channels=num_channels, - num_filters=num_filters, - filter_size=1) - self.conv1 = ConvBNLayer(num_channels=num_filters, - num_filters=num_filters, - filter_size=3, - stride=stride, - groups=cardinality) - self.conv2 = ConvBNLayer(num_channels=num_filters, - num_filters=num_filters * 4, - filter_size=1, - act='relu') - - self.scale = SqueezeExcitation(num_channels=num_filters * 4, - reduction_ratio=reduction_ratio) + self.conv0 = ConvBNLayer( + num_channels=num_channels, num_filters=num_filters, filter_size=1 + ) + self.conv1 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + stride=stride, + groups=cardinality, + ) + self.conv2 = ConvBNLayer( + num_channels=num_filters, + num_filters=num_filters * 4, + filter_size=1, + act='relu', + ) + + self.scale = SqueezeExcitation( + num_channels=num_filters * 4, reduction_ratio=reduction_ratio + ) if not shortcut: - self.short = ConvBNLayer(num_channels=num_channels, - num_filters=num_filters * 4, - filter_size=1, - stride=stride) + self.short = ConvBNLayer( + num_channels=num_channels, + num_filters=num_filters * 4, + filter_size=1, + stride=stride, + ) self.shortcut = shortcut @@ -180,67 +195,76 @@ class BottleneckBlock(fluid.dygraph.Layer): class SeResNeXt(fluid.dygraph.Layer): - def __init__(self, layers=50, class_dim=102): super(SeResNeXt, self).__init__() self.layers = layers supported_layers = [50, 101, 152] - assert layers in supported_layers, \ - "supported layers are {} but input layer is {}".format(supported_layers, layers) + assert ( + layers in supported_layers + ), "supported layers are {} but input layer is {}".format( + supported_layers, layers + ) if layers == 50: cardinality = 32 reduction_ratio = 16 depth = [3, 4, 6, 3] num_filters = [128, 256, 512, 1024] - self.conv0 = ConvBNLayer(num_channels=3, - num_filters=64, - filter_size=7, - stride=2, - act='relu') - self.pool = Pool2D(pool_size=3, - pool_stride=2, - pool_padding=1, - pool_type='max') + self.conv0 = ConvBNLayer( + num_channels=3, + num_filters=64, + filter_size=7, + stride=2, + act='relu', + ) + self.pool = Pool2D( + pool_size=3, pool_stride=2, pool_padding=1, pool_type='max' + ) elif layers == 101: cardinality = 32 reduction_ratio = 16 depth = [3, 4, 23, 3] num_filters = [128, 256, 512, 1024] - self.conv0 = ConvBNLayer(num_channels=3, - num_filters=64, - filter_size=7, - stride=2, - act='relu') - self.pool = Pool2D(pool_size=3, - pool_stride=2, - pool_padding=1, - pool_type='max') + self.conv0 = ConvBNLayer( + num_channels=3, + num_filters=64, + filter_size=7, + stride=2, + act='relu', + ) + self.pool = Pool2D( + pool_size=3, pool_stride=2, pool_padding=1, pool_type='max' + ) elif layers == 152: cardinality = 64 reduction_ratio = 16 depth = [3, 8, 36, 3] num_filters = [128, 256, 512, 1024] - self.conv0 = ConvBNLayer(num_channels=3, - num_filters=64, - filter_size=3, - stride=2, - act='relu') - self.conv1 = ConvBNLayer(num_channels=64, - num_filters=64, - filter_size=3, - stride=2, - act='relu') - self.conv2 = ConvBNLayer(num_channels=64, - num_filters=128, - filter_size=3, - stride=1, - act='relu') - self.pool = Pool2D(pool_size=3, - pool_stride=2, - pool_padding=1, - pool_type='max') + self.conv0 = ConvBNLayer( + num_channels=3, + num_filters=64, + filter_size=3, + stride=2, + act='relu', + ) + self.conv1 = ConvBNLayer( + num_channels=64, + num_filters=64, + filter_size=3, + stride=2, + act='relu', + ) + self.conv2 = ConvBNLayer( + num_channels=64, + num_filters=128, + filter_size=3, + stride=1, + act='relu', + ) + self.pool = Pool2D( + pool_size=3, pool_stride=2, pool_padding=1, pool_type='max' + ) self.bottleneck_block_list = [] num_channels = 64 @@ -251,20 +275,24 @@ class SeResNeXt(fluid.dygraph.Layer): for i in range(depth[block]): bottleneck_block = self.add_sublayer( 'bb_%d_%d' % (block, i), - BottleneckBlock(num_channels=num_channels, - num_filters=num_filters[block], - stride=2 if i == 0 and block != 0 else 1, - cardinality=cardinality, - reduction_ratio=reduction_ratio, - shortcut=shortcut)) + BottleneckBlock( + num_channels=num_channels, + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + cardinality=cardinality, + reduction_ratio=reduction_ratio, + shortcut=shortcut, + ), + ) num_channels = bottleneck_block._num_channels_out self.bottleneck_block_list.append(bottleneck_block) shortcut = True - self.pool2d_avg = Pool2D(pool_size=7, - pool_type='avg', - global_pooling=True) + self.pool2d_avg = Pool2D( + pool_size=7, pool_type='avg', global_pooling=True + ) import math + stdv = 1.0 / math.sqrt(2048 * 1.0) self.pool2d_avg_output = num_filters[-1] * 4 * 1 * 1 @@ -274,7 +302,9 @@ class SeResNeXt(fluid.dygraph.Layer): class_dim, act='softmax', param_attr=fluid.param_attr.ParamAttr( - initializer=fluid.initializer.Uniform(-stdv, stdv))) + initializer=fluid.initializer.Uniform(-stdv, stdv) + ), + ) def forward(self, inputs): if self.layers == 50 or self.layers == 101: @@ -295,9 +325,7 @@ class SeResNeXt(fluid.dygraph.Layer): class TestImperativeResneXt(unittest.TestCase): - def reader_decorator(self, reader): - def _reader_imple(): for item in reader(): doc = np.array(item[0]).reshape(3, 224, 224) @@ -319,16 +347,21 @@ class TestImperativeResneXt(unittest.TestCase): se_resnext = SeResNeXt() optimizer = optimizer_setting( - train_parameters, parameter_list=se_resnext.parameters()) + train_parameters, parameter_list=se_resnext.parameters() + ) np.random.seed(seed) batch_py_reader = fluid.io.PyReader(capacity=1) batch_py_reader.decorate_sample_list_generator( - paddle.batch(self.reader_decorator( - paddle.dataset.flowers.train(use_xmap=False)), - batch_size=batch_size, - drop_last=True), - places=fluid.CPUPlace()) + paddle.batch( + self.reader_decorator( + paddle.dataset.flowers.train(use_xmap=False) + ), + batch_size=batch_size, + drop_last=True, + ), + places=fluid.CPUPlace(), + ) dy_param_init_value = {} for param in se_resnext.parameters(): @@ -346,8 +379,9 @@ class TestImperativeResneXt(unittest.TestCase): out = se_resnext(img) softmax_out = fluid.layers.softmax(out, use_cudnn=False) - loss = fluid.layers.cross_entropy(input=softmax_out, - label=label) + loss = fluid.layers.cross_entropy( + input=softmax_out, label=label + ) avg_loss = paddle.mean(x=loss) dy_out = avg_loss.numpy() @@ -362,9 +396,11 @@ class TestImperativeResneXt(unittest.TestCase): for param in se_resnext.parameters(): if param.trainable: np_array = np.array( - param._grad_ivar().value().get_tensor()) - dy_grad_value[param.name + - core.grad_var_suffix()] = np_array + param._grad_ivar().value().get_tensor() + ) + dy_grad_value[ + param.name + core.grad_var_suffix() + ] = np_array optimizer.minimize(avg_loss) se_resnext.clear_gradients() @@ -373,23 +409,39 @@ class TestImperativeResneXt(unittest.TestCase): for param in se_resnext.parameters(): dy_param_value[param.name] = param.numpy() - return dy_out, dy_param_init_value, dy_param_value, dy_grad_value + return ( + dy_out, + dy_param_init_value, + dy_param_value, + dy_grad_value, + ) with fluid.dygraph.guard(): - dy_out, dy_param_init_value, dy_param_value, dy_grad_value = run_dygraph( - ) + ( + dy_out, + dy_param_init_value, + dy_param_value, + dy_grad_value, + ) = run_dygraph() with fluid.dygraph.guard(): with _test_eager_guard(): - eager_out, eager_param_init_value, eager_param_value, eager_grad_value = run_dygraph( - ) + ( + eager_out, + eager_param_init_value, + eager_param_value, + eager_grad_value, + ) = run_dygraph() with new_program_scope(): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) - exe = fluid.Executor(fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) + exe = fluid.Executor( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) se_resnext = SeResNeXt() optimizer = optimizer_setting(train_parameters) @@ -398,11 +450,12 @@ class TestImperativeResneXt(unittest.TestCase): train_reader = paddle.batch( paddle.dataset.flowers.train(use_xmap=False), batch_size=batch_size, - drop_last=True) + drop_last=True, + ) - img = fluid.layers.data(name='pixel', - shape=[3, 224, 224], - dtype='float32') + img = fluid.layers.data( + name='pixel', shape=[3, 224, 224], dtype='float32' + ) label = fluid.layers.data(name='label', shape=[1], dtype='int64') out = se_resnext(img) softmax_out = fluid.layers.softmax(out, use_cudnn=False) @@ -418,11 +471,14 @@ class TestImperativeResneXt(unittest.TestCase): static_param_name_list.append(param.name) for param in se_resnext.parameters(): if param.trainable: - static_grad_name_list.append(param.name + - core.grad_var_suffix()) + static_grad_name_list.append( + param.name + core.grad_var_suffix() + ) - out = exe.run(fluid.default_startup_program(), - fetch_list=static_param_name_list) + out = exe.run( + fluid.default_startup_program(), + fetch_list=static_param_name_list, + ) for i in range(len(static_param_name_list)): static_param_init_value[static_param_name_list[i]] = out[i] @@ -431,47 +487,54 @@ class TestImperativeResneXt(unittest.TestCase): if batch_id >= batch_num and batch_num != -1: break - static_x_data = np.array([ - x[0].reshape(3, 224, 224) for x in data - ]).astype('float32') - y_data = np.array([x[1] - for x in data]).astype('int64').reshape( - [batch_size, 1]) + static_x_data = np.array( + [x[0].reshape(3, 224, 224) for x in data] + ).astype('float32') + y_data = ( + np.array([x[1] for x in data]) + .astype('int64') + .reshape([batch_size, 1]) + ) fetch_list = [avg_loss.name] fetch_list.extend(static_param_name_list) fetch_list.extend(static_grad_name_list) - out = exe.run(fluid.default_main_program(), - feed={ - "pixel": static_x_data, - "label": y_data - }, - fetch_list=fetch_list) + out = exe.run( + fluid.default_main_program(), + feed={"pixel": static_x_data, "label": y_data}, + fetch_list=fetch_list, + ) static_param_value = {} static_grad_value = {} static_out = out[0] param_start_pos = 1 - grad_start_pos = len( - static_param_name_list) + param_start_pos + grad_start_pos = ( + len(static_param_name_list) + param_start_pos + ) + for i in range( + param_start_pos, + len(static_param_name_list) + param_start_pos, + ): + static_param_value[ + static_param_name_list[i - param_start_pos] + ] = out[i] for i in range( - param_start_pos, - len(static_param_name_list) + param_start_pos): - static_param_value[static_param_name_list[ - i - param_start_pos]] = out[i] - for i in range(grad_start_pos, - len(static_grad_name_list) + grad_start_pos): - static_grad_value[static_grad_name_list[ - i - grad_start_pos]] = out[i] + grad_start_pos, + len(static_grad_name_list) + grad_start_pos, + ): + static_grad_value[ + static_grad_name_list[i - grad_start_pos] + ] = out[i] np.testing.assert_allclose(static_out, dy_out, rtol=1e-05) self.assertEqual(len(dy_param_init_value), len(static_param_init_value)) for key, value in static_param_init_value.items(): - np.testing.assert_allclose(value, - dy_param_init_value[key], - rtol=1e-05) + np.testing.assert_allclose( + value, dy_param_init_value[key], rtol=1e-05 + ) self.assertTrue(np.isfinite(value.all())) self.assertFalse(np.isnan(value.any())) @@ -491,13 +554,14 @@ class TestImperativeResneXt(unittest.TestCase): # check eager np.testing.assert_allclose(static_out, eager_out, rtol=1e-05) - self.assertEqual(len(eager_param_init_value), - len(static_param_init_value)) + self.assertEqual( + len(eager_param_init_value), len(static_param_init_value) + ) for key, value in static_param_init_value.items(): - np.testing.assert_allclose(value, - eager_param_init_value[key], - rtol=1e-05) + np.testing.assert_allclose( + value, eager_param_init_value[key], rtol=1e-05 + ) self.assertEqual(len(eager_grad_value), len(static_grad_value)) @@ -506,9 +570,9 @@ class TestImperativeResneXt(unittest.TestCase): self.assertEqual(len(eager_param_value), len(static_param_value)) for key, value in static_param_value.items(): - np.testing.assert_allclose(value, - eager_param_value[key], - rtol=1e-05) + np.testing.assert_allclose( + value, eager_param_value[key], rtol=1e-05 + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_imperative_selected_rows.py b/python/paddle/fluid/tests/unittests/test_imperative_selected_rows.py index 602d2e07ce55eb31cde5e2a8db6cfd27e040f528..93a04a36115c616677bf801ea4f770a5e08e31e3 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_selected_rows.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_selected_rows.py @@ -23,13 +23,14 @@ from paddle.fluid.framework import _test_eager_guard class SimpleNet(paddle.nn.Layer): - def __init__(self, vocab_size, hidden_size, dtype): super(SimpleNet, self).__init__() - self.emb = fluid.dygraph.Embedding(size=[vocab_size, hidden_size], - dtype=dtype, - param_attr='emb.w', - is_sparse=True) + self.emb = fluid.dygraph.Embedding( + size=[vocab_size, hidden_size], + dtype=dtype, + param_attr='emb.w', + is_sparse=True, + ) def forward(self, input): input_emb = self.emb(input) @@ -37,7 +38,6 @@ class SimpleNet(paddle.nn.Layer): class TestSimpleNet(unittest.TestCase): - def func_selectedrows_gradient1(self): places = [fluid.CPUPlace()] if core.is_compiled_with_cuda(): @@ -48,16 +48,18 @@ class TestSimpleNet(unittest.TestCase): for sort_sum_gradient in [True, False]: paddle.disable_static(place) fluid.set_flags( - {'FLAGS_sort_sum_gradient': sort_sum_gradient}) + {'FLAGS_sort_sum_gradient': sort_sum_gradient} + ) # grad_clip = fluid.clip.GradientClipByGlobalNorm(5.0) input_word = np.array([[1, 2], [2, 1]]).astype('int64') input = paddle.to_tensor(input_word) simplenet = SimpleNet(20, 32, dtype) - adam = SGDOptimizer(learning_rate=0.001, - parameter_list=simplenet.parameters() - ) # grad_clip=grad_clip + adam = SGDOptimizer( + learning_rate=0.001, + parameter_list=simplenet.parameters(), + ) # grad_clip=grad_clip input_emb, emb = simplenet(input) self.assertTrue(emb.weight.gradient() is None) @@ -90,16 +92,19 @@ class TestSimpleNet(unittest.TestCase): for sort_sum_gradient in [True, False]: with fluid.dygraph.guard(place): fluid.set_flags( - {'FLAGS_sort_sum_gradient': sort_sum_gradient}) + {'FLAGS_sort_sum_gradient': sort_sum_gradient} + ) grad_clip = fluid.clip.GradientClipByGlobalNorm(5.0) input_word = np.array([[1, 2], [2, 1]]).astype('int64') input = to_variable(input_word) simplenet = SimpleNet(20, 32, "float32") - adam = SGDOptimizer(learning_rate=0.001, - parameter_list=simplenet.parameters(), - grad_clip=grad_clip) + adam = SGDOptimizer( + learning_rate=0.001, + parameter_list=simplenet.parameters(), + grad_clip=grad_clip, + ) input_emb, emb = simplenet(input) self.assertTrue(emb.weight.gradient() is None) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py b/python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py index 35d863b774b0200fa063ad413192ea89237287cb..023753322d1df54b7a6182577510ab417ef22a14 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py @@ -26,14 +26,15 @@ from paddle.fluid.framework import _test_eager_guard class SimpleNet(fluid.Layer): - - def __init__(self, - hidden_size, - vocab_size, - num_steps=20, - init_scale=0.1, - is_sparse=False, - dtype='float32'): + def __init__( + self, + hidden_size, + vocab_size, + num_steps=20, + init_scale=0.1, + is_sparse=False, + dtype='float32', + ): super(SimpleNet, self).__init__() self.hidden_size = hidden_size self.vocab_size = vocab_size @@ -46,31 +47,40 @@ class SimpleNet(fluid.Layer): param_attr=fluid.ParamAttr( name='embedding_para', initializer=fluid.initializer.UniformInitializer( - low=-init_scale, high=init_scale))) + low=-init_scale, high=init_scale + ), + ), + ) self.softmax_weight = self.create_parameter( attr=fluid.ParamAttr(), shape=[self.hidden_size, self.hidden_size], dtype=dtype, default_initializer=fluid.initializer.UniformInitializer( - low=-self.init_scale, high=self.init_scale)) + low=-self.init_scale, high=self.init_scale + ), + ) self.softmax_bias = self.create_parameter( attr=fluid.ParamAttr(), shape=[self.hidden_size], dtype=dtype, default_initializer=fluid.initializer.UniformInitializer( - low=-self.init_scale, high=self.init_scale)) + low=-self.init_scale, high=self.init_scale + ), + ) def forward(self, input, label): x_emb = self.embedding(input) fc = fluid.layers.matmul(x_emb, self.softmax_weight) fc = fluid.layers.elementwise_add(fc, self.softmax_bias) projection = fluid.layers.matmul( - fc, fluid.layers.transpose(self.embedding.weight, perm=[1, 0])) - projection = fluid.layers.reshape(projection, - shape=[-1, self.vocab_size]) - loss = fluid.layers.softmax_with_cross_entropy(logits=projection, - label=label, - soft_label=False) + fc, fluid.layers.transpose(self.embedding.weight, perm=[1, 0]) + ) + projection = fluid.layers.reshape( + projection, shape=[-1, self.vocab_size] + ) + loss = fluid.layers.softmax_with_cross_entropy( + logits=projection, label=label, soft_label=False + ) loss = fluid.layers.reshape(loss, shape=[-1, self.num_steps]) loss = fluid.layers.reduce_mean(loss, dim=[0]) loss = fluid.layers.reduce_sum(loss) @@ -79,7 +89,6 @@ class SimpleNet(fluid.Layer): class TestDygraphSimpleNet(unittest.TestCase): - def func_simple_net(self): for is_sparse in [True, False]: dtype_list = ["float32"] @@ -113,21 +122,26 @@ class TestDygraphSimpleNet(unittest.TestCase): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) - simple_net = SimpleNet(hidden_size=hidden_size, - vocab_size=vocab_size, - num_steps=num_steps, - init_scale=init_scale, - is_sparse=is_sparse, - dtype=dtype) - - sgd = SGDOptimizer(learning_rate=1e-3, - parameter_list=simple_net.parameters()) + simple_net = SimpleNet( + hidden_size=hidden_size, + vocab_size=vocab_size, + num_steps=num_steps, + init_scale=init_scale, + is_sparse=is_sparse, + dtype=dtype, + ) + + sgd = SGDOptimizer( + learning_rate=1e-3, + parameter_list=simple_net.parameters(), + ) dy_param_updated = dict() dy_param_init = dict() dy_loss = None fluid.set_flags( - {'FLAGS_sort_sum_gradient': is_sort_sum_gradient}) + {'FLAGS_sort_sum_gradient': is_sort_sum_gradient} + ) for i in range(batch_num): x_data = np.arange(12).reshape(4, 3).astype('int64') @@ -154,17 +168,19 @@ class TestDygraphSimpleNet(unittest.TestCase): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) - simple_net = SimpleNet(hidden_size=hidden_size, - vocab_size=vocab_size, - num_steps=num_steps, - is_sparse=is_sparse, - dtype=dtype) + simple_net = SimpleNet( + hidden_size=hidden_size, + vocab_size=vocab_size, + num_steps=num_steps, + is_sparse=is_sparse, + dtype=dtype, + ) exe = fluid.Executor(place) sgd = SGDOptimizer(learning_rate=1e-3) - x = fluid.layers.data(name="x", - shape=[-1, num_steps], - dtype='int64') + x = fluid.layers.data( + name="x", shape=[-1, num_steps], dtype='int64' + ) y = fluid.layers.data(name="y", shape=[-1, 1], dtype=dtype) static_loss = simple_net(x, y) @@ -175,8 +191,10 @@ class TestDygraphSimpleNet(unittest.TestCase): for param in simple_net.parameters(): static_param_name_list.append(param.name) - out = exe.run(framework.default_startup_program(), - fetch_list=static_param_name_list) + out = exe.run( + framework.default_startup_program(), + fetch_list=static_param_name_list, + ) for i in range(len(static_param_name_list)): static_param_init[static_param_name_list[i]] = out[i] static_loss_value = None @@ -187,18 +205,18 @@ class TestDygraphSimpleNet(unittest.TestCase): y_data = y_data.reshape((-1, 1)) fetch_list = [static_loss] fetch_list.extend(static_param_name_list) - out = exe.run(fluid.default_main_program(), - feed={ - "x": x_data, - "y": y_data - }, - fetch_list=fetch_list) + out = exe.run( + fluid.default_main_program(), + feed={"x": x_data, "y": y_data}, + fetch_list=fetch_list, + ) static_loss_value = out[0] if i == batch_num - 1: for k in range(3, len(out)): - static_param_updated[static_param_name_list[ - k - 1]] = out[k] + static_param_updated[ + static_param_name_list[k - 1] + ] = out[k] np.testing.assert_array_equal(static_loss_value, dy_loss_value) for key, value in static_param_init.items(): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_signal_handler.py b/python/paddle/fluid/tests/unittests/test_imperative_signal_handler.py index 977cb9cc2576d6ca13d7ea2842447ff316950c12..488500cb3802a552333da64ae869b57c85dd0c40 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_signal_handler.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_signal_handler.py @@ -38,9 +38,7 @@ def set_child_signal_handler(self, child_pid): class DygraphDataLoaderSingalHandler(unittest.TestCase): - def func_child_process_exit_with_error(self): - def __test_process__(): core._set_process_signal_handler() sys.exit(1) @@ -73,7 +71,6 @@ class DygraphDataLoaderSingalHandler(unittest.TestCase): self.func_child_process_exit_with_error() def func_child_process_killed_by_sigsegv(self): - def __test_process__(): core._set_process_signal_handler() os.kill(os.getpid(), signal.SIGSEGV) @@ -106,7 +103,6 @@ class DygraphDataLoaderSingalHandler(unittest.TestCase): self.func_child_process_killed_by_sigsegv() def func_child_process_killed_by_sigbus(self): - def __test_process__(): core._set_process_signal_handler() os.kill(os.getpid(), signal.SIGBUS) @@ -139,7 +135,6 @@ class DygraphDataLoaderSingalHandler(unittest.TestCase): self.func_child_process_killed_by_sigbus() def func_child_process_killed_by_sigterm(self): - def __test_process__(): core._set_process_signal_handler() time.sleep(10) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py b/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py index 4b394ba87fdd1ea534697ae5f8759092b9544739..3a43709997ed50b85cc75c6290590a25723b51e2 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py @@ -24,7 +24,6 @@ if fluid.is_compiled_with_cuda(): class Config(object): - def __init__(self, place, sort_sum_gradient=True): self.place = place @@ -60,7 +59,6 @@ class Config(object): def create_mnist_dataset(cfg): - def create_target_label(label): return label # return (label + 1) % cfg.c_dim # fake label target @@ -107,7 +105,6 @@ def create_mnist_dataset(cfg): class InstanceNorm(fluid.dygraph.Layer): - def __init__(self, num_channels, epsilon=1e-5): super(InstanceNorm, self).__init__() self.epsilon = epsilon @@ -117,36 +114,40 @@ class InstanceNorm(fluid.dygraph.Layer): def forward(self, input): if fluid._non_static_mode(): - out, _, _ = _legacy_C_ops.instance_norm(input, self.scale, - self.bias, 'epsilon', - self.epsilon) + out, _, _ = _legacy_C_ops.instance_norm( + input, self.scale, self.bias, 'epsilon', self.epsilon + ) return out else: return fluid.layers.instance_norm( input, epsilon=self.epsilon, param_attr=fluid.ParamAttr(self.scale.name), - bias_attr=fluid.ParamAttr(self.bias.name)) + bias_attr=fluid.ParamAttr(self.bias.name), + ) class Conv2DLayer(fluid.dygraph.Layer): - - def __init__(self, - num_channels, - num_filters=64, - filter_size=7, - stride=1, - padding=0, - norm=None, - use_bias=False, - relufactor=None): + def __init__( + self, + num_channels, + num_filters=64, + filter_size=7, + stride=1, + padding=0, + norm=None, + use_bias=False, + relufactor=None, + ): super(Conv2DLayer, self).__init__() - self._conv = fluid.dygraph.Conv2D(num_channels=num_channels, - num_filters=num_filters, - filter_size=filter_size, - stride=stride, - padding=padding, - bias_attr=None if use_bias else False) + self._conv = fluid.dygraph.Conv2D( + num_channels=num_channels, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=padding, + bias_attr=None if use_bias else False, + ) if norm is not None: self._norm = InstanceNorm(num_filters) @@ -168,16 +169,17 @@ class Conv2DLayer(fluid.dygraph.Layer): class Deconv2DLayer(fluid.dygraph.Layer): - - def __init__(self, - num_channels, - num_filters=64, - filter_size=7, - stride=1, - padding=0, - norm=None, - use_bias=False, - relufactor=None): + def __init__( + self, + num_channels, + num_filters=64, + filter_size=7, + stride=1, + padding=0, + norm=None, + use_bias=False, + relufactor=None, + ): super(Deconv2DLayer, self).__init__() self._deconv = fluid.dygraph.Conv2DTranspose( @@ -186,7 +188,8 @@ class Deconv2DLayer(fluid.dygraph.Layer): filter_size=filter_size, stride=stride, padding=padding, - bias_attr=None if use_bias else False) + bias_attr=None if use_bias else False, + ) if norm is not None: self._norm = InstanceNorm(num_filters) @@ -208,24 +211,27 @@ class Deconv2DLayer(fluid.dygraph.Layer): class ResidualBlock(fluid.dygraph.Layer): - def __init__(self, num_channels, num_filters): super(ResidualBlock, self).__init__() - self._conv0 = Conv2DLayer(num_channels=num_channels, - num_filters=num_filters, - filter_size=3, - stride=1, - padding=1, - norm=True, - relufactor=0) - - self._conv1 = Conv2DLayer(num_channels=num_filters, - num_filters=num_filters, - filter_size=3, - stride=1, - padding=1, - norm=True, - relufactor=None) + self._conv0 = Conv2DLayer( + num_channels=num_channels, + num_filters=num_filters, + filter_size=3, + stride=1, + padding=1, + norm=True, + relufactor=0, + ) + + self._conv1 = Conv2DLayer( + num_channels=num_filters, + num_filters=num_filters, + filter_size=3, + stride=1, + padding=1, + norm=True, + relufactor=None, + ) def forward(self, input): conv0 = self._conv0(input) @@ -234,27 +240,30 @@ class ResidualBlock(fluid.dygraph.Layer): class Generator(fluid.dygraph.Layer): - def __init__(self, cfg, num_channels=3): super(Generator, self).__init__() - conv_base = Conv2DLayer(num_channels=cfg.c_dim + num_channels, - num_filters=cfg.g_base_dims, - filter_size=7, - stride=1, - padding=3, - norm=True, - relufactor=0) + conv_base = Conv2DLayer( + num_channels=cfg.c_dim + num_channels, + num_filters=cfg.g_base_dims, + filter_size=7, + stride=1, + padding=3, + norm=True, + relufactor=0, + ) sub_layers = [conv_base] cur_channels = cfg.g_base_dims for i in range(2): - sub_layer = Conv2DLayer(num_channels=cur_channels, - num_filters=cur_channels * 2, - filter_size=4, - stride=2, - padding=1, - norm=True, - relufactor=0) + sub_layer = Conv2DLayer( + num_channels=cur_channels, + num_filters=cur_channels * 2, + filter_size=4, + stride=2, + padding=1, + norm=True, + relufactor=0, + ) cur_channels *= 2 sub_layers.append(sub_layer) @@ -264,8 +273,9 @@ class Generator(fluid.dygraph.Layer): repeat_num = cfg.g_repeat_num sub_layers = [] for i in range(repeat_num): - res_block = ResidualBlock(num_channels=cur_channels, - num_filters=cfg.g_base_dims * 4) + res_block = ResidualBlock( + num_channels=cur_channels, num_filters=cfg.g_base_dims * 4 + ) sub_layers.append(res_block) self._res_block = fluid.dygraph.Sequential(*sub_layers) @@ -273,32 +283,38 @@ class Generator(fluid.dygraph.Layer): cur_channels = cfg.g_base_dims * 4 sub_layers = [] for i in range(2): - rate = 2**(1 - i) - deconv = Deconv2DLayer(num_channels=cur_channels, - num_filters=cfg.g_base_dims * rate, - filter_size=4, - stride=2, - padding=1, - relufactor=0, - norm=True) + rate = 2 ** (1 - i) + deconv = Deconv2DLayer( + num_channels=cur_channels, + num_filters=cfg.g_base_dims * rate, + filter_size=4, + stride=2, + padding=1, + relufactor=0, + norm=True, + ) cur_channels = cfg.g_base_dims * rate sub_layers.append(deconv) self._deconv = fluid.dygraph.Sequential(*sub_layers) - self._conv1 = Conv2DLayer(num_channels=cur_channels, - num_filters=3, - filter_size=7, - stride=1, - padding=3, - relufactor=None) + self._conv1 = Conv2DLayer( + num_channels=cur_channels, + num_filters=3, + filter_size=7, + stride=1, + padding=3, + relufactor=None, + ) def forward(self, input, label_trg): shape = input.shape - label_trg_e = fluid.layers.reshape(label_trg, - [-1, label_trg.shape[1], 1, 1]) + label_trg_e = fluid.layers.reshape( + label_trg, [-1, label_trg.shape[1], 1, 1] + ) label_trg_e = fluid.layers.expand( - x=label_trg_e, expand_times=[1, 1, shape[2], shape[3]]) + x=label_trg_e, expand_times=[1, 1, shape[2], shape[3]] + ) input1 = fluid.layers.concat([input, label_trg_e], 1) @@ -311,28 +327,31 @@ class Generator(fluid.dygraph.Layer): class Discriminator(fluid.dygraph.Layer): - def __init__(self, cfg, num_channels=3): super(Discriminator, self).__init__() cur_dim = cfg.d_base_dims - conv_base = Conv2DLayer(num_channels=num_channels, - num_filters=cur_dim, - filter_size=4, - stride=2, - padding=1, - relufactor=0.2) + conv_base = Conv2DLayer( + num_channels=num_channels, + num_filters=cur_dim, + filter_size=4, + stride=2, + padding=1, + relufactor=0.2, + ) repeat_num = cfg.d_repeat_num sub_layers = [conv_base] for i in range(1, repeat_num): - sub_layer = Conv2DLayer(num_channels=cur_dim, - num_filters=cur_dim * 2, - filter_size=4, - stride=2, - padding=1, - relufactor=0.2) + sub_layer = Conv2DLayer( + num_channels=cur_dim, + num_filters=cur_dim * 2, + filter_size=4, + stride=2, + padding=1, + relufactor=0.2, + ) cur_dim *= 2 sub_layers.append(sub_layer) @@ -340,15 +359,17 @@ class Discriminator(fluid.dygraph.Layer): kernel_size = int(cfg.image_size / np.power(2, repeat_num)) - self._conv1 = Conv2DLayer(num_channels=cur_dim, - num_filters=1, - filter_size=3, - stride=1, - padding=1) + self._conv1 = Conv2DLayer( + num_channels=cur_dim, + num_filters=1, + filter_size=3, + stride=1, + padding=1, + ) - self._conv2 = Conv2DLayer(num_channels=cur_dim, - num_filters=cfg.c_dim, - filter_size=kernel_size) + self._conv2 = Conv2DLayer( + num_channels=cur_dim, num_filters=cfg.c_dim, filter_size=kernel_size + ) def forward(self, input): conv = self._conv0(input) @@ -359,38 +380,41 @@ class Discriminator(fluid.dygraph.Layer): def loss_cls(cls, label, cfg): cls_shape = cls.shape - cls = fluid.layers.reshape(cls, - [-1, cls_shape[1] * cls_shape[2] * cls_shape[3]]) - return fluid.layers.reduce_sum( - fluid.layers.sigmoid_cross_entropy_with_logits(cls, - label)) / cfg.batch_size + cls = fluid.layers.reshape( + cls, [-1, cls_shape[1] * cls_shape[2] * cls_shape[3]] + ) + return ( + fluid.layers.reduce_sum( + fluid.layers.sigmoid_cross_entropy_with_logits(cls, label) + ) + / cfg.batch_size + ) def calc_gradients(outputs, inputs, no_grad_set): if fluid._non_static_mode(): - return fluid.dygraph.grad(outputs=outputs, - inputs=inputs, - no_grad_vars=no_grad_set, - create_graph=True) + return fluid.dygraph.grad( + outputs=outputs, + inputs=inputs, + no_grad_vars=no_grad_set, + create_graph=True, + ) else: - return fluid.gradients(targets=outputs, - inputs=inputs, - no_grad_set=no_grad_set) + return fluid.gradients( + targets=outputs, inputs=inputs, no_grad_set=no_grad_set + ) def gradient_penalty(f, real, fake, no_grad_set, cfg): - def _interpolate(a, b): shape = [a.shape[0]] - alpha = fluid.layers.uniform_random_batch_size_like(input=a, - shape=shape, - min=0.1, - max=1.0, - seed=cfg.seed) + alpha = fluid.layers.uniform_random_batch_size_like( + input=a, shape=shape, min=0.1, max=1.0, seed=cfg.seed + ) inner = fluid.layers.elementwise_mul( - b, 1.0 - alpha, axis=0) + fluid.layers.elementwise_mul( - a, alpha, axis=0) + b, 1.0 - alpha, axis=0 + ) + fluid.layers.elementwise_mul(a, alpha, axis=0) return inner x = _interpolate(real, fake) @@ -398,9 +422,9 @@ def gradient_penalty(f, real, fake, no_grad_set, cfg): if isinstance(pred, tuple): pred = pred[0] - gradient = calc_gradients(outputs=[pred], - inputs=[x], - no_grad_set=no_grad_set) + gradient = calc_gradients( + outputs=[pred], inputs=[x], no_grad_set=no_grad_set + ) if gradient is None: return None @@ -409,22 +433,26 @@ def gradient_penalty(f, real, fake, no_grad_set, cfg): grad_shape = gradient.shape gradient = fluid.layers.reshape( - gradient, [-1, grad_shape[1] * grad_shape[2] * grad_shape[3]]) + gradient, [-1, grad_shape[1] * grad_shape[2] * grad_shape[3]] + ) epsilon = 1e-16 norm = fluid.layers.sqrt( - fluid.layers.reduce_sum(fluid.layers.square(gradient), dim=1) + epsilon) + fluid.layers.reduce_sum(fluid.layers.square(gradient), dim=1) + epsilon + ) gp = fluid.layers.reduce_mean(fluid.layers.square(norm - 1.0)) return gp -def get_generator_loss(image_real, label_org, label_trg, generator, - discriminator, cfg): +def get_generator_loss( + image_real, label_org, label_trg, generator, discriminator, cfg +): fake_img = generator(image_real, label_trg) rec_img = generator(fake_img, label_org) g_loss_rec = fluid.layers.reduce_mean( - fluid.layers.abs(fluid.layers.elementwise_sub(image_real, rec_img))) + fluid.layers.abs(fluid.layers.elementwise_sub(image_real, rec_img)) + ) pred_fake, cls_fake = discriminator(fake_img) @@ -434,8 +462,9 @@ def get_generator_loss(image_real, label_org, label_trg, generator, return g_loss -def get_discriminator_loss(image_real, label_org, label_trg, generator, - discriminator, cfg): +def get_discriminator_loss( + image_real, label_org, label_trg, generator, discriminator, cfg +): fake_img = generator(image_real, label_trg) pred_real, cls_real = discriminator(image_real) pred_fake, _ = discriminator(fake_img) @@ -444,8 +473,13 @@ def get_discriminator_loss(image_real, label_org, label_trg, generator, d_loss_real = -paddle.mean(pred_real) d_loss = d_loss_real + d_loss_fake + d_loss_cls - d_loss_gp = gradient_penalty(discriminator, image_real, fake_img, - set(discriminator.parameters()), cfg) + d_loss_gp = gradient_penalty( + discriminator, + image_real, + fake_img, + set(discriminator.parameters()), + cfg, + ) if d_loss_gp is not None: d_loss += cfg.lambda_gp * d_loss_gp @@ -457,21 +491,22 @@ def build_optimizer(layer, cfg, loss=None): beta1 = 0.5 beta2 = 0.999 if fluid._non_static_mode(): - return fluid.optimizer.Adam(learning_rate=learning_rate, - beta1=beta1, - beta2=beta2, - parameter_list=layer.parameters()) + return fluid.optimizer.Adam( + learning_rate=learning_rate, + beta1=beta1, + beta2=beta2, + parameter_list=layer.parameters(), + ) else: - optimizer = fluid.optimizer.Adam(learning_rate=learning_rate, - beta1=beta1, - beta2=beta2) + optimizer = fluid.optimizer.Adam( + learning_rate=learning_rate, beta1=beta1, beta2=beta2 + ) optimizer.minimize(loss, parameter_list=layer.parameters()) return optimizer class DyGraphTrainModel(object): - def __init__(self, cfg): paddle.seed(1) paddle.framework.random._manual_program_seed(1) @@ -498,18 +533,28 @@ class DyGraphTrainModel(object): label_org = fluid.dygraph.to_variable(label_org) label_trg = fluid.dygraph.to_variable(label_trg) - g_loss = get_generator_loss(image_real, label_org, label_trg, - self.generator, self.discriminator, - self.cfg) + g_loss = get_generator_loss( + image_real, + label_org, + label_trg, + self.generator, + self.discriminator, + self.cfg, + ) g_loss.backward() if self.g_optimizer: self.g_optimizer.minimize(g_loss) self.clear_gradients() - d_loss = get_discriminator_loss(image_real, label_org, label_trg, - self.generator, self.discriminator, - self.cfg) + d_loss = get_discriminator_loss( + image_real, + label_org, + label_trg, + self.generator, + self.discriminator, + self.cfg, + ) d_loss.backward() if self.d_optimizer: self.d_optimizer.minimize(d_loss) @@ -520,7 +565,6 @@ class DyGraphTrainModel(object): class StaticGraphTrainModel(object): - def __init__(self, cfg): self.cfg = cfg @@ -528,13 +572,14 @@ class StaticGraphTrainModel(object): image_real = fluid.data( shape=[None, 3, cfg.image_size, cfg.image_size], dtype='float32', - name='image_real') - label_org = fluid.data(shape=[None, cfg.c_dim], - dtype='float32', - name='label_org') - label_trg = fluid.data(shape=[None, cfg.c_dim], - dtype='float32', - name='label_trg') + name='image_real', + ) + label_org = fluid.data( + shape=[None, cfg.c_dim], dtype='float32', name='label_org' + ) + label_trg = fluid.data( + shape=[None, cfg.c_dim], dtype='float32', name='label_trg' + ) return image_real, label_org, label_trg paddle.seed(cfg.seed) @@ -547,8 +592,14 @@ class StaticGraphTrainModel(object): image_real, label_org, label_trg = create_data_layer() generator = Generator(cfg) discriminator = Discriminator(cfg) - g_loss = get_generator_loss(image_real, label_org, label_trg, - generator, discriminator, cfg) + g_loss = get_generator_loss( + image_real, + label_org, + label_trg, + generator, + discriminator, + cfg, + ) build_optimizer(generator, cfg, loss=g_loss) self.dis_program = fluid.Program() @@ -558,9 +609,14 @@ class StaticGraphTrainModel(object): image_real, label_org, label_trg = create_data_layer() generator = Generator(cfg) discriminator = Discriminator(cfg) - d_loss = get_discriminator_loss(image_real, label_org, - label_trg, generator, - discriminator, cfg) + d_loss = get_discriminator_loss( + image_real, + label_org, + label_trg, + generator, + discriminator, + cfg, + ) build_optimizer(discriminator, cfg, loss=d_loss) self.executor = fluid.Executor(cfg.place) @@ -577,20 +633,19 @@ class StaticGraphTrainModel(object): feed = { 'image_real': image_real, 'label_org': label_org, - 'label_trg': label_trg + 'label_trg': label_trg, } with fluid.scope_guard(self.scope): - g_loss_val = self.executor.run(self.gen_program, - feed=feed, - fetch_list=[self.g_loss])[0] - d_loss_val = self.executor.run(self.dis_program, - feed=feed, - fetch_list=[self.d_loss])[0] + g_loss_val = self.executor.run( + self.gen_program, feed=feed, fetch_list=[self.g_loss] + )[0] + d_loss_val = self.executor.run( + self.dis_program, feed=feed, fetch_list=[self.d_loss] + )[0] return g_loss_val[0], d_loss_val[0] class TestStarGANWithGradientPenalty(unittest.TestCase): - def func_main(self): self.place_test(fluid.CPUPlace()) @@ -606,8 +661,9 @@ class TestStarGANWithGradientPenalty(unittest.TestCase): fluid_dygraph_loss = [] with fluid.dygraph.guard(cfg.place): fluid_dygraph_model = DyGraphTrainModel(cfg) - for batch_id, (image_real, label_org, - label_trg) in enumerate(dataset()): + for batch_id, (image_real, label_org, label_trg) in enumerate( + dataset() + ): loss = fluid_dygraph_model.run(image_real, label_org, label_trg) fluid_dygraph_loss.append(loss) @@ -615,15 +671,17 @@ class TestStarGANWithGradientPenalty(unittest.TestCase): with _test_eager_guard(): with fluid.dygraph.guard(cfg.place): eager_dygraph_model = DyGraphTrainModel(cfg) - for batch_id, (image_real, label_org, - label_trg) in enumerate(dataset()): - loss = eager_dygraph_model.run(image_real, label_org, - label_trg) + for batch_id, (image_real, label_org, label_trg) in enumerate( + dataset() + ): + loss = eager_dygraph_model.run( + image_real, label_org, label_trg + ) eager_dygraph_loss.append(loss) - for (g_loss_f, d_loss_f), (g_loss_e, - d_loss_e) in zip(fluid_dygraph_loss, - eager_dygraph_loss): + for (g_loss_f, d_loss_f), (g_loss_e, d_loss_e) in zip( + fluid_dygraph_loss, eager_dygraph_loss + ): self.assertEqual(g_loss_f, g_loss_e) self.assertEqual(d_loss_f, d_loss_e) @@ -632,7 +690,6 @@ class TestStarGANWithGradientPenalty(unittest.TestCase): class TestStarGANWithGradientPenaltyLegacy(unittest.TestCase): - def func_main(self): self.place_test(fluid.CPUPlace()) @@ -647,21 +704,24 @@ class TestStarGANWithGradientPenaltyLegacy(unittest.TestCase): static_graph_model = StaticGraphTrainModel(cfg) static_loss = [] - for batch_id, (image_real, label_org, - label_trg) in enumerate(dataset()): + for batch_id, (image_real, label_org, label_trg) in enumerate( + dataset() + ): loss = static_graph_model.run(image_real, label_org, label_trg) static_loss.append(loss) dygraph_loss = [] with fluid.dygraph.guard(cfg.place): dygraph_model = DyGraphTrainModel(cfg) - for batch_id, (image_real, label_org, - label_trg) in enumerate(dataset()): + for batch_id, (image_real, label_org, label_trg) in enumerate( + dataset() + ): loss = dygraph_model.run(image_real, label_org, label_trg) dygraph_loss.append(loss) - for (g_loss_s, d_loss_s), (g_loss_d, - d_loss_d) in zip(static_loss, dygraph_loss): + for (g_loss_s, d_loss_s), (g_loss_d, d_loss_d) in zip( + static_loss, dygraph_loss + ): self.assertEqual(g_loss_s, g_loss_d) self.assertEqual(d_loss_s, d_loss_d) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_static_runner_mnist.py b/python/paddle/fluid/tests/unittests/test_imperative_static_runner_mnist.py index cc204e6b3d42cce4f9352d12f5e5fde17795d1c0..825144cd16ade1927948dc5351c5e015e3703b75 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_static_runner_mnist.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_static_runner_mnist.py @@ -25,19 +25,23 @@ LOADED_VAR_SUFFIX = ".load_0" def convolutional_neural_network(img): - conv_pool_1 = fluid.nets.simple_img_conv_pool(input=img, - filter_size=5, - num_filters=20, - pool_size=2, - pool_stride=2, - act="relu") + conv_pool_1 = fluid.nets.simple_img_conv_pool( + input=img, + filter_size=5, + num_filters=20, + pool_size=2, + pool_stride=2, + act="relu", + ) conv_pool_1 = fluid.layers.batch_norm(conv_pool_1) - conv_pool_2 = fluid.nets.simple_img_conv_pool(input=conv_pool_1, - filter_size=5, - num_filters=50, - pool_size=2, - pool_stride=2, - act="relu") + conv_pool_2 = fluid.nets.simple_img_conv_pool( + input=conv_pool_1, + filter_size=5, + num_filters=50, + pool_size=2, + pool_stride=2, + act="relu", + ) prediction = fluid.layers.fc(input=conv_pool_2, size=10, act='softmax') return prediction @@ -55,7 +59,6 @@ def static_train_net(img, label): class TestImperativeStaticModelRunnerMnist(unittest.TestCase): - def setUp(self): self.seed = 90 self.epoch_num = 1 @@ -63,7 +66,6 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase): self.batch_num = 50 def reader_decorator(self, reader): - def _reader_impl(): for item in reader(): image = np.array(item[0]).reshape(1, 28, 28) @@ -77,44 +79,58 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase): startup_program = fluid.default_startup_program() main_program = fluid.default_main_program() - img = fluid.data(name='img', - shape=[None, 1, 28, 28], - dtype='float32') + img = fluid.data( + name='img', shape=[None, 1, 28, 28], dtype='float32' + ) label = fluid.data(name='label', shape=[None, 1], dtype='int64') prediction, avg_loss = static_train_net(img, label) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) feeder = fluid.DataFeeder(feed_list=[img, label], place=place) exe.run(startup_program) - train_reader = paddle.batch(paddle.reader.shuffle( - paddle.dataset.mnist.train(), buf_size=100), - batch_size=self.batch_size) + train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.mnist.train(), buf_size=100 + ), + batch_size=self.batch_size, + ) for _ in range(0, self.epoch_num): for batch_id, data in enumerate(train_reader()): - exe.run(main_program, - feed=feeder.feed(data), - fetch_list=[avg_loss]) + exe.run( + main_program, + feed=feeder.feed(data), + fetch_list=[avg_loss], + ) if batch_id > self.batch_num: break - fluid.io.save_inference_model(self.save_dirname, ["img"], - [prediction], - exe, - model_filename=self.model_filename, - params_filename=self.params_filename, - clip_extra=False) + fluid.io.save_inference_model( + self.save_dirname, + ["img"], + [prediction], + exe, + model_filename=self.model_filename, + params_filename=self.params_filename, + clip_extra=False, + ) def load_and_train_dygraph(self): - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.dygraph.guard(place): fluid.default_startup_program().random_seed = self.seed fluid.default_main_program().random_seed = self.seed @@ -123,22 +139,26 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase): mnist = fluid.dygraph.static_runner.StaticModelRunner( model_dir=self.save_dirname, model_filename=self.model_filename, - params_filename=self.params_filename) + params_filename=self.params_filename, + ) suffix_varname_dict = mnist._program_holder_dict[ - 'forward']._suffix_varname_dict + 'forward' + ]._suffix_varname_dict dict_old_new = {v: k for k, v in suffix_varname_dict.items()} dy_param_init_value = {} for param in mnist.parameters(): dy_param_init_value[param.name] = param.numpy() - sgd = fluid.optimizer.SGD(learning_rate=0.001, - parameter_list=mnist.parameters()) + sgd = fluid.optimizer.SGD( + learning_rate=0.001, parameter_list=mnist.parameters() + ) - train_reader = paddle.batch(self.reader_decorator( - paddle.dataset.mnist.train()), - batch_size=self.batch_size, - drop_last=True) + train_reader = paddle.batch( + self.reader_decorator(paddle.dataset.mnist.train()), + batch_size=self.batch_size, + drop_last=True, + ) train_loader = fluid.io.DataLoader.from_generator(capacity=10) train_loader.set_sample_list_generator(train_reader, places=place) @@ -169,58 +189,71 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase): for param in mnist.parameters(): dy_param_value[param.name] = param.numpy() - return dy_x_data, dy_out, dy_param_init_value, dy_param_value, dict_old_new + return ( + dy_x_data, + dy_out, + dy_param_init_value, + dy_param_value, + dict_old_new, + ) def load_and_train_static(self): with new_program_scope(): fluid.default_startup_program().random_seed = self.seed fluid.default_main_program().random_seed = self.seed - img = fluid.data(name='img', - shape=[None, 1, 28, 28], - dtype='float32') + img = fluid.data( + name='img', shape=[None, 1, 28, 28], dtype='float32' + ) label = fluid.data(name='label', shape=[None, 1], dtype='int64') prediction, avg_loss = static_train_net(img, label) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - fluid.io.load_params(exe, - self.save_dirname, - main_program=fluid.default_main_program(), - filename=self.params_filename) + fluid.io.load_params( + exe, + self.save_dirname, + main_program=fluid.default_main_program(), + filename=self.params_filename, + ) static_param_init_value = {} static_param_name_list = [] for param in fluid.default_main_program().all_parameters(): static_param_name_list.append(param.name) static_param_init_value[param.name] = fluid.executor._fetch_var( - param.name) + param.name + ) - train_reader = paddle.batch(self.reader_decorator( - paddle.dataset.mnist.train()), - batch_size=self.batch_size, - drop_last=True) + train_reader = paddle.batch( + self.reader_decorator(paddle.dataset.mnist.train()), + batch_size=self.batch_size, + drop_last=True, + ) for epoch in range(self.epoch_num): for batch_id, data in enumerate(train_reader()): static_x_data = np.array([x[0] for x in data]) - y_data = np.array([x[1] for x in data - ]).reshape([self.batch_size, 1]) + y_data = np.array([x[1] for x in data]).reshape( + [self.batch_size, 1] + ) fetch_list = [avg_loss.name] fetch_list.extend(static_param_name_list) - out = exe.run(fluid.default_main_program(), - feed={ - "img": static_x_data, - "label": y_data - }, - fetch_list=fetch_list) + out = exe.run( + fluid.default_main_program(), + feed={"img": static_x_data, "label": y_data}, + fetch_list=fetch_list, + ) if batch_id >= self.batch_num: break @@ -230,21 +263,31 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase): for i in range(1, len(out)): static_param_value[static_param_name_list[i - 1]] = out[i] - return static_x_data, static_out, static_param_init_value, static_param_value + return ( + static_x_data, + static_out, + static_param_init_value, + static_param_value, + ) def load_and_infer_dygraph(self): - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.dygraph.guard(place): fluid.default_main_program().random_seed = self.seed mnist = fluid.dygraph.static_runner.StaticModelRunner( - model_dir=self.save_dirname, model_filename=self.model_filename) - - train_reader = paddle.batch(self.reader_decorator( - paddle.dataset.mnist.test()), - batch_size=self.batch_size, - drop_last=True) + model_dir=self.save_dirname, model_filename=self.model_filename + ) + + train_reader = paddle.batch( + self.reader_decorator(paddle.dataset.mnist.test()), + batch_size=self.batch_size, + drop_last=True, + ) train_loader = fluid.io.DataLoader.from_generator(capacity=10) train_loader.set_sample_list_generator(train_reader, places=place) @@ -264,24 +307,33 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase): def load_and_infer_static(self): with new_program_scope(): - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) - [infer_program, feed_target_names, fetch_targets - ] = fluid.io.load_inference_model(self.save_dirname, exe) + [ + infer_program, + feed_target_names, + fetch_targets, + ] = fluid.io.load_inference_model(self.save_dirname, exe) infer_program.random_seed = self.seed - train_reader = paddle.batch(self.reader_decorator( - paddle.dataset.mnist.test()), - batch_size=self.batch_size, - drop_last=True) + train_reader = paddle.batch( + self.reader_decorator(paddle.dataset.mnist.test()), + batch_size=self.batch_size, + drop_last=True, + ) for batch_id, data in enumerate(train_reader()): static_x_data = np.array([x[0] for x in data]) - out = exe.run(infer_program, - feed={feed_target_names[0]: static_x_data}, - fetch_list=fetch_targets) + out = exe.run( + infer_program, + feed={feed_target_names[0]: static_x_data}, + fetch_list=fetch_targets, + ) if batch_id >= 1: break @@ -300,11 +352,20 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase): # Phase 2. load model & train dygraph - dy_x_data, dy_out, dy_param_init_value, dy_param_value, dict_old_new_init= \ - self.load_and_train_dygraph() - - static_x_data, static_out, static_param_init_value, static_param_value = \ - self.load_and_train_static() + ( + dy_x_data, + dy_out, + dy_param_init_value, + dy_param_value, + dict_old_new_init, + ) = self.load_and_train_dygraph() + + ( + static_x_data, + static_out, + static_param_init_value, + static_param_value, + ) = self.load_and_train_static() # Phase 3. compare np.testing.assert_array_equal(static_x_data, dy_x_data) @@ -318,10 +379,9 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase): for key, value in static_param_value.items(): key = dict_old_new_init[key] - np.testing.assert_allclose(value, - dy_param_value[key], - rtol=1e-05, - atol=1e-4) + np.testing.assert_allclose( + value, dy_param_value[key], rtol=1e-05, atol=1e-4 + ) def test_mnist_train_with_params_filename(self): self.save_dirname = "mnist.inference.model" @@ -331,11 +391,20 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase): self.train_and_save_model() # Phase 2. load model & train dygraph - dy_x_data, dy_out, dy_param_init_value, dy_param_value, dict_old_new_init= \ - self.load_and_train_dygraph() - - static_x_data, static_out, static_param_init_value, static_param_value = \ - self.load_and_train_static() + ( + dy_x_data, + dy_out, + dy_param_init_value, + dy_param_value, + dict_old_new_init, + ) = self.load_and_train_dygraph() + + ( + static_x_data, + static_out, + static_param_init_value, + static_param_value, + ) = self.load_and_train_static() # Phase 3. compare np.testing.assert_array_equal(static_x_data, dy_x_data) @@ -348,10 +417,9 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase): for key, value in static_param_value.items(): key = dict_old_new_init[key] - np.testing.assert_allclose(value, - dy_param_value[key], - rtol=1e-05, - atol=1e-4) + np.testing.assert_allclose( + value, dy_param_value[key], rtol=1e-05, atol=1e-4 + ) def test_mnist_infer_no_params_filename(self): self.save_dirname = "mnist.inference.model.noname" @@ -361,11 +429,9 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase): self.train_and_save_model() # Phase 2. load model & train dygraph - dy_x_data, dy_out = \ - self.load_and_infer_dygraph() + dy_x_data, dy_out = self.load_and_infer_dygraph() - static_x_data, static_out = \ - self.load_and_infer_static() + static_x_data, static_out = self.load_and_infer_static() # Phase 3. compare np.testing.assert_array_equal(static_x_data, dy_x_data) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_static_runner_while.py b/python/paddle/fluid/tests/unittests/test_imperative_static_runner_while.py index b12d8e6a3db484a4a8aab5700ab916d926f7eadd..d163350f136b7e4c9079a45b03fab296b46e464c 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_static_runner_while.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_static_runner_while.py @@ -27,7 +27,6 @@ LOADED_VAR_SUFFIX = ".load_0" def while_softmax_regression(img): - def cond(i, times, pred): return i < times @@ -39,14 +38,13 @@ def while_softmax_regression(img): i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) times = fluid.layers.fill_constant(shape=[1], dtype='int64', value=5) pred = fluid.layers.fc(input=img, size=10, act='softmax') - i, times, pred = fluid.layers.while_loop(cond=cond, - body=body, - loop_vars=[i, times, pred]) + i, times, pred = fluid.layers.while_loop( + cond=cond, body=body, loop_vars=[i, times, pred] + ) return pred class TestImperativeStaticModelRunnerWhile(unittest.TestCase): - def setUp(self): self.seed = 90 self.batch_size = 32 @@ -56,7 +54,6 @@ class TestImperativeStaticModelRunnerWhile(unittest.TestCase): self.params_filename = None def _random_batch_reader(self): - def _get_random_images_and_labels(image_shape, label_shape): image = np.random.random(size=image_shape).astype('float32') label = np.random.random(size=label_shape).astype('int64') @@ -65,7 +62,8 @@ class TestImperativeStaticModelRunnerWhile(unittest.TestCase): def __reader__(): for _ in range(self.batch_num): batch_image, batch_label = _get_random_images_and_labels( - [self.batch_size, 784], [self.batch_size, 1]) + [self.batch_size, 784], [self.batch_size, 1] + ) yield batch_image, batch_label return __reader__ @@ -85,29 +83,39 @@ class TestImperativeStaticModelRunnerWhile(unittest.TestCase): optimizer = fluid.optimizer.SGD(learning_rate=0.001) optimizer.minimize(avg_loss) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) exe.run(startup_program) - loader = fluid.io.DataLoader.from_generator(feed_list=[img, label], - capacity=5, - iterable=True) + loader = fluid.io.DataLoader.from_generator( + feed_list=[img, label], capacity=5, iterable=True + ) loader.set_batch_generator(self._random_batch_reader(), places=place) for data in loader(): exe.run(main_program, feed=data, fetch_list=[avg_loss]) - fluid.io.save_inference_model(self.save_dirname, ["img"], [pred], - exe, - model_filename=self.model_filename, - params_filename=self.params_filename, - clip_extra=False) + fluid.io.save_inference_model( + self.save_dirname, + ["img"], + [pred], + exe, + model_filename=self.model_filename, + params_filename=self.params_filename, + clip_extra=False, + ) def load_and_train_dygraph(self): - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.dygraph.guard(place): fluid.default_startup_program().random_seed = self.seed fluid.default_main_program().random_seed = self.seed @@ -115,18 +123,21 @@ class TestImperativeStaticModelRunnerWhile(unittest.TestCase): fluid.set_flags({'FLAGS_sort_sum_gradient': True}) while_net = fluid.dygraph.static_runner.StaticModelRunner( - self.save_dirname) + self.save_dirname + ) dy_param_init_value = {} for param in while_net.parameters(): dy_param_init_value[param.name] = param.numpy() - sgd = fluid.optimizer.SGD(learning_rate=0.001, - parameter_list=while_net.parameters()) + sgd = fluid.optimizer.SGD( + learning_rate=0.001, parameter_list=while_net.parameters() + ) train_loader = fluid.io.DataLoader.from_generator(capacity=10) - train_loader.set_batch_generator(self._random_batch_reader(), - places=place) + train_loader.set_batch_generator( + self._random_batch_reader(), places=place + ) while_net.train() @@ -168,37 +179,46 @@ class TestImperativeStaticModelRunnerWhile(unittest.TestCase): optimizer = fluid.optimizer.SGD(learning_rate=0.001) optimizer.minimize(avg_loss) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - fluid.io.load_params(exe, - self.save_dirname, - main_program=fluid.default_main_program(), - filename=self.params_filename) + fluid.io.load_params( + exe, + self.save_dirname, + main_program=fluid.default_main_program(), + filename=self.params_filename, + ) static_param_init_value = {} static_param_name_list = [] for param in fluid.default_main_program().all_parameters(): static_param_name_list.append(param.name) static_param_init_value[param.name] = fluid.executor._fetch_var( - param.name) + param.name + ) - loader = fluid.io.DataLoader.from_generator(feed_list=[img, label], - capacity=5, - iterable=True) - loader.set_batch_generator(self._random_batch_reader(), - places=place) + loader = fluid.io.DataLoader.from_generator( + feed_list=[img, label], capacity=5, iterable=True + ) + loader.set_batch_generator( + self._random_batch_reader(), places=place + ) for data in loader(): fetch_list = [avg_loss.name] fetch_list.extend(static_param_name_list) - out = exe.run(fluid.default_main_program(), - feed=data, - fetch_list=[avg_loss]) + out = exe.run( + fluid.default_main_program(), + feed=data, + fetch_list=[avg_loss], + ) static_param_value = {} static_out = out[0] @@ -213,17 +233,24 @@ class TestImperativeStaticModelRunnerWhile(unittest.TestCase): # # Phase 2. load model & train dygraph with unique_name.guard(): - dy_out, dy_param_init_value, dy_param_value = \ - self.load_and_train_dygraph() + ( + dy_out, + dy_param_init_value, + dy_param_value, + ) = self.load_and_train_dygraph() with unique_name.guard(): - static_out, static_param_init_value, static_param_value = \ - self.load_and_train_static() + ( + static_out, + static_param_init_value, + static_param_value, + ) = self.load_and_train_static() # Phase 3. compare with unique_name.guard(): dict_old_new_init = rename_var_with_generator( - static_param_init_value.keys()) + static_param_init_value.keys() + ) for key, value in static_param_init_value.items(): key = dict_old_new_init[key] np.testing.assert_array_equal(value, dy_param_init_value[key]) @@ -232,10 +259,9 @@ class TestImperativeStaticModelRunnerWhile(unittest.TestCase): for key, value in static_param_value.items(): key += LOADED_VAR_SUFFIX - np.testing.assert_allclose(value, - dy_param_value[key], - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + value, dy_param_value[key], rtol=1e-05, atol=1e-05 + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_imperative_tensor_clear_gradient.py b/python/paddle/fluid/tests/unittests/test_imperative_tensor_clear_gradient.py index 55879293734ada1e78174eb7b069c1f9cffd19d5..0445e09232f2c27f2b593b58cddca2a445ee88a8 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_tensor_clear_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_tensor_clear_gradient.py @@ -21,7 +21,6 @@ import numpy as np def _dygraph_guard_(func): - def __impl__(*args, **kwargs): if fluid._non_static_mode(): return func(*args, **kwargs) @@ -36,7 +35,6 @@ dygraph_guard = wrap_decorator(_dygraph_guard_) class TestDygraphClearGradient(TestCase): - def setUp(self): self.input_shape = [10, 2] diff --git a/python/paddle/fluid/tests/unittests/test_imperative_thread_local_has_grad.py b/python/paddle/fluid/tests/unittests/test_imperative_thread_local_has_grad.py index 5e8a54ca3901496d9b68fde01e39808d9010535f..7acbc2d339c1c7edba0d4df3ca624b7250538058 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_thread_local_has_grad.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_thread_local_has_grad.py @@ -22,7 +22,6 @@ from paddle.fluid.framework import _test_eager_guard class SimpleNet(nn.Layer): - def __init__(self, in_dim, out_dim): super(SimpleNet, self).__init__() self.fc = nn.Linear(in_dim, out_dim) @@ -32,7 +31,6 @@ class SimpleNet(nn.Layer): class TestCases(unittest.TestCase): - @paddle.no_grad() def thread_1_main(self): time.sleep(8) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_trace_non_persistable_inputs.py b/python/paddle/fluid/tests/unittests/test_imperative_trace_non_persistable_inputs.py index a346bb2298588ad7c8fd3b487e9b062d9b7c5ff4..66a1e7f84119c6405ecc31da3574b5d1b50e748d 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_trace_non_persistable_inputs.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_trace_non_persistable_inputs.py @@ -19,12 +19,12 @@ import os class SimpleFCLayer(fluid.dygraph.Layer): - def __init__(self, feature_size, batch_size, fc_size): super(SimpleFCLayer, self).__init__() self._linear = fluid.dygraph.Linear(feature_size, fc_size) self._offset = fluid.dygraph.to_variable( - np.random.random((batch_size, fc_size)).astype('float32')) + np.random.random((batch_size, fc_size)).astype('float32') + ) def forward(self, x): fc = self._linear(x) @@ -32,7 +32,6 @@ class SimpleFCLayer(fluid.dygraph.Layer): class TestTracedLayerRecordNonPersistableInput(unittest.TestCase): - def test_main(self): if fluid.framework.in_dygraph_mode(): return @@ -42,21 +41,28 @@ class TestTracedLayerRecordNonPersistableInput(unittest.TestCase): batch_size = 4 fc_size = 2 layer = SimpleFCLayer(feature_size, batch_size, fc_size) - optimizer = fluid.optimizer.SGD(learning_rate=1e-3, - parameter_list=layer.parameters()) - - expected_persistable_vars = set([ - layer._linear.weight.name, layer._linear.bias.name, - layer._offset.name - ]) + optimizer = fluid.optimizer.SGD( + learning_rate=1e-3, parameter_list=layer.parameters() + ) + + expected_persistable_vars = set( + [ + layer._linear.weight.name, + layer._linear.bias.name, + layer._offset.name, + ] + ) for _ in range(10): in_x = fluid.dygraph.to_variable( - np.random.random( - (batch_size, feature_size)).astype('float32')) + np.random.random((batch_size, feature_size)).astype( + 'float32' + ) + ) if traced_layer is None: dygraph_out, traced_layer = fluid.dygraph.TracedLayer.trace( - layer, [in_x]) + layer, [in_x] + ) else: dygraph_out = layer(in_x) dygraph_out_numpy = dygraph_out.numpy() @@ -79,11 +85,15 @@ class TestTracedLayerRecordNonPersistableInput(unittest.TestCase): self.assertEqual(actual_persistable_vars, expected_persistable_vars) traced_layer.save_inference_model( - path='./traced_layer_test_non_persistable_vars') - self.assertTrue('traced_layer_test_non_persistable_vars.pdmodel' in - os.listdir('./')) - self.assertTrue('traced_layer_test_non_persistable_vars.pdiparams' in - os.listdir('./')) + path='./traced_layer_test_non_persistable_vars' + ) + self.assertTrue( + 'traced_layer_test_non_persistable_vars.pdmodel' in os.listdir('./') + ) + self.assertTrue( + 'traced_layer_test_non_persistable_vars.pdiparams' + in os.listdir('./') + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py b/python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py index 3ba02fe8dafcd04aee6e3279ddc299cf5cde5cb7..22189d88d3102d246622a042e048fbbf7ac96f27 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py @@ -149,12 +149,15 @@ def position_encoding_init(n_position, d_pos_vec): channels = d_pos_vec position = np.arange(n_position) num_timescales = channels // 2 - log_timescale_increment = (np.log(float(1e4) / float(1)) / - (num_timescales - 1)) - inv_timescales = np.exp( - np.arange(num_timescales)) * -log_timescale_increment + log_timescale_increment = np.log(float(1e4) / float(1)) / ( + num_timescales - 1 + ) + inv_timescales = ( + np.exp(np.arange(num_timescales)) * -log_timescale_increment + ) scaled_time = np.expand_dims(position, 1) * np.expand_dims( - inv_timescales, 0) + inv_timescales, 0 + ) signal = np.concatenate([np.sin(scaled_time), np.cos(scaled_time)], axis=1) signal = np.pad(signal, [[0, 0], [0, np.mod(channels, 2)]], 'constant') position_enc = signal @@ -164,21 +167,27 @@ def position_encoding_init(n_position, d_pos_vec): def create_data(is_static=False): if is_static: return [ - src_word_np, src_pos_np, src_slf_attn_bias_np, trg_word_np, - trg_pos_np, trg_slf_attn_bias_np, trg_src_attn_bias_np, lbl_word_np, - lbl_weight_np + src_word_np, + src_pos_np, + src_slf_attn_bias_np, + trg_word_np, + trg_pos_np, + trg_slf_attn_bias_np, + trg_src_attn_bias_np, + lbl_word_np, + lbl_weight_np, ] else: enc_inputs = [ to_variable(src_word_np, name='src_word'), to_variable(src_pos_np, name='src_pos'), - to_variable(src_slf_attn_bias_np, name='src_slf_attn_bias') + to_variable(src_slf_attn_bias_np, name='src_slf_attn_bias'), ] dec_inputs = [ to_variable(trg_word_np, name='trg_word'), to_variable(trg_pos_np, name='trg_pos'), to_variable(trg_slf_attn_bias_np, name='trg_slf_attn_bias'), - to_variable(trg_src_attn_bias_np, name='trg_src_attn_bias') + to_variable(trg_src_attn_bias_np, name='trg_src_attn_bias'), ] label = to_variable(lbl_word_np, name='lbl_word') weight = to_variable(lbl_weight_np, name='lbl_weight') @@ -187,11 +196,18 @@ def create_data(is_static=False): def create_feed_dict_list(data, init=False): if init: - data_input_names = encoder_data_input_fields + \ - decoder_data_input_fields[:-1] + label_data_input_fields + pos_enc_param_names + data_input_names = ( + encoder_data_input_fields + + decoder_data_input_fields[:-1] + + label_data_input_fields + + pos_enc_param_names + ) else: - data_input_names = encoder_data_input_fields + \ - decoder_data_input_fields[:-1] + label_data_input_fields + data_input_names = ( + encoder_data_input_fields + + decoder_data_input_fields[:-1] + + label_data_input_fields + ) feed_dict_list = dict() for i in range(len(data_input_names)): feed_dict_list[data_input_names[i]] = data[i] @@ -204,12 +220,15 @@ def make_all_inputs(input_fields): """ inputs = [] for input_field in input_fields: - input_var = fluid.layers.data(name=input_field, - shape=input_descs[input_field][0], - dtype=input_descs[input_field][1], - lod_level=input_descs[input_field][2] if - len(input_descs[input_field]) == 3 else 0, - append_batch_size=False) + input_var = fluid.layers.data( + name=input_field, + shape=input_descs[input_field][0], + dtype=input_descs[input_field][1], + lod_level=input_descs[input_field][2] + if len(input_descs[input_field]) == 3 + else 0, + append_batch_size=False, + ) inputs.append(input_var) return inputs @@ -234,12 +253,17 @@ input_descs = { # encoder. # The actual data shape of src_slf_attn_bias is: # [batch_size, n_head, max_src_len_in_batch, max_src_len_in_batch] - "src_slf_attn_bias": - [(batch_size, ModelHyperParams.n_head, seq_len, seq_len), "float32"], + "src_slf_attn_bias": [ + (batch_size, ModelHyperParams.n_head, seq_len, seq_len), + "float32", + ], # The actual data shape of trg_word is: # [batch_size, max_trg_len_in_batch] - "trg_word": [(batch_size, seq_len), "int64", - 2], # lod_level is only used in fast decoder. + "trg_word": [ + (batch_size, seq_len), + "int64", + 2, + ], # lod_level is only used in fast decoder. # The actual data shape of trg_pos is: # [batch_size, max_trg_len_in_batch] "trg_pos": [(batch_size, seq_len), "int64"], @@ -247,14 +271,18 @@ input_descs = { # subsequent words in the decoder. # The actual data shape of trg_slf_attn_bias is: # [batch_size, n_head, max_trg_len_in_batch, max_trg_len_in_batch] - "trg_slf_attn_bias": - [(batch_size, ModelHyperParams.n_head, seq_len, seq_len), "float32"], + "trg_slf_attn_bias": [ + (batch_size, ModelHyperParams.n_head, seq_len, seq_len), + "float32", + ], # This input is used to remove attention weights on paddings of the source # input in the encoder-decoder attention. # The actual data shape of trg_src_attn_bias is: # [batch_size, n_head, max_trg_len_in_batch, max_src_len_in_batch] - "trg_src_attn_bias": - [(batch_size, ModelHyperParams.n_head, seq_len, seq_len), "float32"], + "trg_src_attn_bias": [ + (batch_size, ModelHyperParams.n_head, seq_len, seq_len), + "float32", + ], # This input is used in independent decoder program for inference. # The actual data shape of enc_output is: # [batch_size, max_src_len_in_batch, d_model] @@ -270,7 +298,7 @@ input_descs = { "init_score": [(batch_size, 1), "float32", 2], # This input is used in beam-search decoder for the first gather # (cell states updation) - "init_idx": [(batch_size, ), "int32"], + "init_idx": [(batch_size,), "int32"], } # Names of word embedding table which might be reused for weight sharing. @@ -318,44 +346,52 @@ sync = False batch_num = 5 np.random.seed(90) -src_word_np = np.arange(1, TrainTaskConfig.batch_size * seq_len + 1).reshape( - [TrainTaskConfig.batch_size, seq_len]).astype('int64') -src_pos_np = np.random.randint(1, - seq_len, - size=(TrainTaskConfig.batch_size, seq_len), - dtype='int64') -src_slf_attn_bias_np = np.random.randn(TrainTaskConfig.batch_size, - ModelHyperParams.n_head, seq_len, - seq_len).astype('float32') - -trg_word_np = np.arange(1, TrainTaskConfig.batch_size * seq_len + 1).reshape( - [TrainTaskConfig.batch_size, seq_len]).astype('int64') -trg_pos_np = np.random.randint(1, - seq_len, - size=(TrainTaskConfig.batch_size, seq_len), - dtype='int64') -trg_slf_attn_bias_np = np.random.randn(TrainTaskConfig.batch_size, - ModelHyperParams.n_head, seq_len, - seq_len).astype('float32') -trg_src_attn_bias_np = np.random.randn(TrainTaskConfig.batch_size, - ModelHyperParams.n_head, seq_len, - seq_len).astype('float32') - -lbl_word_np = np.random.randint(1, - ModelHyperParams.src_vocab_size - 1, - size=(TrainTaskConfig.batch_size * seq_len, 1), - dtype='int64') -lbl_weight_np = np.random.randn(TrainTaskConfig.batch_size * seq_len, - 1).astype('float32') - -pos_inp1 = position_encoding_init(ModelHyperParams.max_length, - ModelHyperParams.d_model) -pos_inp2 = position_encoding_init(ModelHyperParams.max_length, - ModelHyperParams.d_model) +src_word_np = ( + np.arange(1, TrainTaskConfig.batch_size * seq_len + 1) + .reshape([TrainTaskConfig.batch_size, seq_len]) + .astype('int64') +) +src_pos_np = np.random.randint( + 1, seq_len, size=(TrainTaskConfig.batch_size, seq_len), dtype='int64' +) +src_slf_attn_bias_np = np.random.randn( + TrainTaskConfig.batch_size, ModelHyperParams.n_head, seq_len, seq_len +).astype('float32') + +trg_word_np = ( + np.arange(1, TrainTaskConfig.batch_size * seq_len + 1) + .reshape([TrainTaskConfig.batch_size, seq_len]) + .astype('int64') +) +trg_pos_np = np.random.randint( + 1, seq_len, size=(TrainTaskConfig.batch_size, seq_len), dtype='int64' +) +trg_slf_attn_bias_np = np.random.randn( + TrainTaskConfig.batch_size, ModelHyperParams.n_head, seq_len, seq_len +).astype('float32') +trg_src_attn_bias_np = np.random.randn( + TrainTaskConfig.batch_size, ModelHyperParams.n_head, seq_len, seq_len +).astype('float32') + +lbl_word_np = np.random.randint( + 1, + ModelHyperParams.src_vocab_size - 1, + size=(TrainTaskConfig.batch_size * seq_len, 1), + dtype='int64', +) +lbl_weight_np = np.random.randn(TrainTaskConfig.batch_size * seq_len, 1).astype( + 'float32' +) +pos_inp1 = position_encoding_init( + ModelHyperParams.max_length, ModelHyperParams.d_model +) +pos_inp2 = position_encoding_init( + ModelHyperParams.max_length, ModelHyperParams.d_model +) -class PrePostProcessLayer(Layer): +class PrePostProcessLayer(Layer): def __init__(self, d_model, process_cmd, shape_len=None): super(PrePostProcessLayer, self).__init__() for cmd in process_cmd: @@ -363,11 +399,14 @@ class PrePostProcessLayer(Layer): self._layer_norm = LayerNorm( normalized_shape=d_model, param_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(1.)), + initializer=fluid.initializer.Constant(1.0) + ), bias_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(0.))) + initializer=fluid.initializer.Constant(0.0) + ), + ) - def forward(self, prev_out, out, process_cmd, dropout_rate=0.): + def forward(self, prev_out, out, process_cmd, dropout_rate=0.0): for cmd in process_cmd: if cmd == "a": # add residual connection out = out + prev_out if prev_out is not None else out @@ -379,12 +418,12 @@ class PrePostProcessLayer(Layer): out, dropout_prob=dropout_rate, seed=ModelHyperParams.dropout_seed, - is_test=False) + is_test=False, + ) return out class PositionwiseFeedForwardLayer(Layer): - def __init__(self, d_inner_hid, d_hid, dropout_rate): super(PositionwiseFeedForwardLayer, self).__init__() self._i2h = Linear(d_hid, d_inner_hid, act="relu") @@ -394,25 +433,28 @@ class PositionwiseFeedForwardLayer(Layer): def forward(self, x): hidden = self._i2h(x) if self._dropout_rate: - hidden = fluid.layers.dropout(hidden, - dropout_prob=self._dropout_rate, - seed=ModelHyperParams.dropout_seed, - is_test=False) + hidden = fluid.layers.dropout( + hidden, + dropout_prob=self._dropout_rate, + seed=ModelHyperParams.dropout_seed, + is_test=False, + ) out = self._h2o(hidden) return out class MultiHeadAttentionLayer(Layer): - - def __init__(self, - d_key, - d_value, - d_model, - n_head=1, - dropout_rate=0., - cache=None, - gather_idx=None, - static_kv=False): + def __init__( + self, + d_key, + d_value, + d_model, + n_head=1, + dropout_rate=0.0, + cache=None, + gather_idx=None, + static_kv=False, + ): super(MultiHeadAttentionLayer, self).__init__() self._n_head = n_head self._d_key = d_key @@ -435,20 +477,25 @@ class MultiHeadAttentionLayer(Layer): # split head reshaped_q = fluid.layers.reshape( - x=q, shape=[0, 0, self._n_head, self._d_key], inplace=False) + x=q, shape=[0, 0, self._n_head, self._d_key], inplace=False + ) transpose_q = fluid.layers.transpose(x=reshaped_q, perm=[0, 2, 1, 3]) reshaped_k = fluid.layers.reshape( - x=k, shape=[0, 0, self._n_head, self._d_key], inplace=False) + x=k, shape=[0, 0, self._n_head, self._d_key], inplace=False + ) transpose_k = fluid.layers.transpose(x=reshaped_k, perm=[0, 2, 1, 3]) reshaped_v = fluid.layers.reshape( - x=v, shape=[0, 0, self._n_head, self._d_value], inplace=False) + x=v, shape=[0, 0, self._n_head, self._d_value], inplace=False + ) transpose_v = fluid.layers.transpose(x=reshaped_v, perm=[0, 2, 1, 3]) # scale dot product attention - product = fluid.layers.matmul(x=transpose_q, - y=transpose_k, - transpose_y=True, - alpha=self._d_model**-0.5) + product = fluid.layers.matmul( + x=transpose_q, + y=transpose_k, + transpose_y=True, + alpha=self._d_model**-0.5, + ) if attn_bias is not None: product += attn_bias weights = fluid.layers.softmax(product) @@ -457,7 +504,8 @@ class MultiHeadAttentionLayer(Layer): weights, dropout_prob=self._dropout_rate, seed=ModelHyperParams.dropout_seed, - is_test=False) + is_test=False, + ) out = fluid.layers.matmul(weights_droped, transpose_v) else: out = fluid.layers.matmul(weights, transpose_v) @@ -469,7 +517,8 @@ class MultiHeadAttentionLayer(Layer): final_out = fluid.layers.reshape( x=trans_x, shape=[0, 0, trans_x.shape[2] * trans_x.shape[3]], - inplace=False) + inplace=False, + ) # fc to output proj_out = self._proj_fc(final_out) @@ -477,118 +526,150 @@ class MultiHeadAttentionLayer(Layer): class EncoderSubLayer(Layer): - - def __init__(self, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - prepostprocess_dropout, - attention_dropout, - relu_dropout, - preprocess_cmd="n", - postprocess_cmd="da"): + def __init__( + self, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd="n", + postprocess_cmd="da", + ): super(EncoderSubLayer, self).__init__() self._preprocess_cmd = preprocess_cmd self._postprocess_cmd = postprocess_cmd self._prepostprocess_dropout = prepostprocess_dropout - self._preprocess_layer = PrePostProcessLayer(d_model, - self._preprocess_cmd, 3) + self._preprocess_layer = PrePostProcessLayer( + d_model, self._preprocess_cmd, 3 + ) self._multihead_attention_layer = MultiHeadAttentionLayer( - d_key, d_value, d_model, n_head, attention_dropout) - self._postprocess_layer = PrePostProcessLayer(d_model, - self._postprocess_cmd, - None) - self._preprocess_layer2 = PrePostProcessLayer(d_model, - self._preprocess_cmd, 3) + d_key, d_value, d_model, n_head, attention_dropout + ) + self._postprocess_layer = PrePostProcessLayer( + d_model, self._postprocess_cmd, None + ) + self._preprocess_layer2 = PrePostProcessLayer( + d_model, self._preprocess_cmd, 3 + ) self._positionwise_feed_forward = PositionwiseFeedForwardLayer( - d_inner_hid, d_model, relu_dropout) - self._postprocess_layer2 = PrePostProcessLayer(d_model, - self._postprocess_cmd, - None) + d_inner_hid, d_model, relu_dropout + ) + self._postprocess_layer2 = PrePostProcessLayer( + d_model, self._postprocess_cmd, None + ) def forward(self, enc_input, attn_bias): pre_process_multihead = self._preprocess_layer( - None, enc_input, self._preprocess_cmd, self._prepostprocess_dropout) - attn_output = self._multihead_attention_layer(pre_process_multihead, - None, None, attn_bias) - attn_output = self._postprocess_layer(enc_input, attn_output, - self._postprocess_cmd, - self._prepostprocess_dropout) + None, enc_input, self._preprocess_cmd, self._prepostprocess_dropout + ) + attn_output = self._multihead_attention_layer( + pre_process_multihead, None, None, attn_bias + ) + attn_output = self._postprocess_layer( + enc_input, + attn_output, + self._postprocess_cmd, + self._prepostprocess_dropout, + ) pre_process2_output = self._preprocess_layer2( - None, attn_output, self._preprocess_cmd, - self._prepostprocess_dropout) + None, + attn_output, + self._preprocess_cmd, + self._prepostprocess_dropout, + ) ffd_output = self._positionwise_feed_forward(pre_process2_output) - return self._postprocess_layer2(attn_output, ffd_output, - self._postprocess_cmd, - self._prepostprocess_dropout) + return self._postprocess_layer2( + attn_output, + ffd_output, + self._postprocess_cmd, + self._prepostprocess_dropout, + ) class EncoderLayer(Layer): - - def __init__(self, - n_layer, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - prepostprocess_dropout, - attention_dropout, - relu_dropout, - preprocess_cmd="n", - postprocess_cmd="da"): + def __init__( + self, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd="n", + postprocess_cmd="da", + ): super(EncoderLayer, self).__init__() self._preprocess_cmd = preprocess_cmd self._encoder_sublayers = list() self._prepostprocess_dropout = prepostprocess_dropout self._n_layer = n_layer - self._preprocess_layer = PrePostProcessLayer(d_model, - self._preprocess_cmd, 3) + self._preprocess_layer = PrePostProcessLayer( + d_model, self._preprocess_cmd, 3 + ) for i in range(n_layer): self._encoder_sublayers.append( self.add_sublayer( 'esl_%d' % i, - EncoderSubLayer(n_head, d_key, d_value, d_model, - d_inner_hid, prepostprocess_dropout, - attention_dropout, relu_dropout, - preprocess_cmd, postprocess_cmd))) + EncoderSubLayer( + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + ), + ) + ) def forward(self, enc_input, attn_bias): for i in range(self._n_layer): enc_output = self._encoder_sublayers[i](enc_input, attn_bias) enc_input = enc_output - return self._preprocess_layer(None, enc_output, self._preprocess_cmd, - self._prepostprocess_dropout) + return self._preprocess_layer( + None, enc_output, self._preprocess_cmd, self._prepostprocess_dropout + ) class PrepareEncoderDecoderLayer(Layer): - - def __init__(self, - src_vocab_size, - src_emb_dim, - src_max_len, - dropout_rate, - is_sparse=False, - word_emb_param_name=None, - pos_enc_param_name=None): + def __init__( + self, + src_vocab_size, + src_emb_dim, + src_max_len, + dropout_rate, + is_sparse=False, + word_emb_param_name=None, + pos_enc_param_name=None, + ): super(PrepareEncoderDecoderLayer, self).__init__() self._src_max_len = src_max_len self._src_emb_dim = src_emb_dim self._src_vocab_size = src_vocab_size self._dropout_rate = dropout_rate - self._input_emb = Embedding(size=[src_vocab_size, src_emb_dim], - is_sparse=is_sparse, - padding_idx=0, - param_attr=fluid.ParamAttr( - name=word_emb_param_name, - initializer=fluid.initializer.Normal( - 0., src_emb_dim**-0.5))) + self._input_emb = Embedding( + size=[src_vocab_size, src_emb_dim], + is_sparse=is_sparse, + padding_idx=0, + param_attr=fluid.ParamAttr( + name=word_emb_param_name, + initializer=fluid.initializer.Normal(0.0, src_emb_dim**-0.5), + ), + ) if pos_enc_param_name is pos_enc_param_names[0]: pos_inp = pos_inp1 @@ -600,7 +681,9 @@ class PrepareEncoderDecoderLayer(Layer): param_attr=fluid.ParamAttr( name=pos_enc_param_name, initializer=fluid.initializer.NumpyArrayInitializer(pos_inp), - trainable=False)) + trainable=False, + ), + ) # use in dygraph_mode to fit different length batch # self._pos_emb._w = to_variable( @@ -608,37 +691,44 @@ class PrepareEncoderDecoderLayer(Layer): def forward(self, src_word, src_pos): src_word_emb = self._input_emb(src_word) - src_word_emb = fluid.layers.scale(x=src_word_emb, - scale=self._src_emb_dim**0.5) + src_word_emb = fluid.layers.scale( + x=src_word_emb, scale=self._src_emb_dim**0.5 + ) # # TODO change this to fit dynamic length input src_pos_emb = self._pos_emb(src_pos) src_pos_emb.stop_gradient = True enc_input = src_word_emb + src_pos_emb - return fluid.layers.dropout( - enc_input, - dropout_prob=self._dropout_rate, - seed=ModelHyperParams.dropout_seed, - is_test=False) if self._dropout_rate else enc_input + return ( + fluid.layers.dropout( + enc_input, + dropout_prob=self._dropout_rate, + seed=ModelHyperParams.dropout_seed, + is_test=False, + ) + if self._dropout_rate + else enc_input + ) class WrapEncoderLayer(Layer): - - def __init__(self, - src_vocab_size, - max_length, - n_layer, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - prepostprocess_dropout, - attention_dropout, - relu_dropout, - preprocess_cmd, - postprocess_cmd, - weight_sharing, - is_sparse=False): + def __init__( + self, + src_vocab_size, + max_length, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + weight_sharing, + is_sparse=False, + ): """ The wrapper assembles together all needed layers for the encoder. """ @@ -651,11 +741,21 @@ class WrapEncoderLayer(Layer): prepostprocess_dropout, is_sparse=is_sparse, word_emb_param_name=word_emb_param_names[0], - pos_enc_param_name=pos_enc_param_names[0]) - self._encoder = EncoderLayer(n_layer, n_head, d_key, d_value, d_model, - d_inner_hid, prepostprocess_dropout, - attention_dropout, relu_dropout, - preprocess_cmd, postprocess_cmd) + pos_enc_param_name=pos_enc_param_names[0], + ) + self._encoder = EncoderLayer( + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + ) def forward(self, enc_inputs): src_word, src_pos, src_slf_attn_bias = enc_inputs @@ -665,26 +765,28 @@ class WrapEncoderLayer(Layer): class DecoderSubLayer(Layer): - - def __init__(self, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - prepostprocess_dropout, - attention_dropout, - relu_dropout, - preprocess_cmd, - postprocess_cmd, - cache=None, - gather_idx=None): + def __init__( + self, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + cache=None, + gather_idx=None, + ): super(DecoderSubLayer, self).__init__() self._postprocess_cmd = postprocess_cmd self._preprocess_cmd = preprocess_cmd self._prepostprcess_dropout = prepostprocess_dropout - self._pre_process_layer = PrePostProcessLayer(d_model, preprocess_cmd, - 3) + self._pre_process_layer = PrePostProcessLayer( + d_model, preprocess_cmd, 3 + ) self._multihead_attention_layer = MultiHeadAttentionLayer( d_key, d_value, @@ -692,11 +794,14 @@ class DecoderSubLayer(Layer): n_head, attention_dropout, cache=cache, - gather_idx=gather_idx) - self._post_process_layer = PrePostProcessLayer(d_model, postprocess_cmd, - None) - self._pre_process_layer2 = PrePostProcessLayer(d_model, preprocess_cmd, - 3) + gather_idx=gather_idx, + ) + self._post_process_layer = PrePostProcessLayer( + d_model, postprocess_cmd, None + ) + self._pre_process_layer2 = PrePostProcessLayer( + d_model, preprocess_cmd, 3 + ) self._multihead_attention_layer2 = MultiHeadAttentionLayer( d_key, d_value, @@ -705,63 +810,86 @@ class DecoderSubLayer(Layer): attention_dropout, cache=cache, gather_idx=gather_idx, - static_kv=True) - self._post_process_layer2 = PrePostProcessLayer(d_model, - postprocess_cmd, None) - self._pre_process_layer3 = PrePostProcessLayer(d_model, preprocess_cmd, - 3) + static_kv=True, + ) + self._post_process_layer2 = PrePostProcessLayer( + d_model, postprocess_cmd, None + ) + self._pre_process_layer3 = PrePostProcessLayer( + d_model, preprocess_cmd, 3 + ) self._positionwise_feed_forward_layer = PositionwiseFeedForwardLayer( - d_inner_hid, d_model, relu_dropout) - self._post_process_layer3 = PrePostProcessLayer(d_model, - postprocess_cmd, None) + d_inner_hid, d_model, relu_dropout + ) + self._post_process_layer3 = PrePostProcessLayer( + d_model, postprocess_cmd, None + ) def forward(self, dec_input, enc_output, slf_attn_bias, dec_enc_attn_bias): - pre_process_rlt = self._pre_process_layer(None, dec_input, - self._preprocess_cmd, - self._prepostprcess_dropout) + pre_process_rlt = self._pre_process_layer( + None, dec_input, self._preprocess_cmd, self._prepostprcess_dropout + ) slf_attn_output = self._multihead_attention_layer( - pre_process_rlt, None, None, slf_attn_bias) + pre_process_rlt, None, None, slf_attn_bias + ) slf_attn_output_pp = self._post_process_layer( - dec_input, slf_attn_output, self._postprocess_cmd, - self._prepostprcess_dropout) - pre_process_rlt2 = self._pre_process_layer2(None, slf_attn_output_pp, - self._preprocess_cmd, - self._prepostprcess_dropout) + dec_input, + slf_attn_output, + self._postprocess_cmd, + self._prepostprcess_dropout, + ) + pre_process_rlt2 = self._pre_process_layer2( + None, + slf_attn_output_pp, + self._preprocess_cmd, + self._prepostprcess_dropout, + ) enc_attn_output_pp = self._multihead_attention_layer2( - pre_process_rlt2, enc_output, enc_output, dec_enc_attn_bias) - enc_attn_output = self._post_process_layer2(slf_attn_output_pp, - enc_attn_output_pp, - self._postprocess_cmd, - self._prepostprcess_dropout) - pre_process_rlt3 = self._pre_process_layer3(None, enc_attn_output, - self._preprocess_cmd, - self._prepostprcess_dropout) + pre_process_rlt2, enc_output, enc_output, dec_enc_attn_bias + ) + enc_attn_output = self._post_process_layer2( + slf_attn_output_pp, + enc_attn_output_pp, + self._postprocess_cmd, + self._prepostprcess_dropout, + ) + pre_process_rlt3 = self._pre_process_layer3( + None, + enc_attn_output, + self._preprocess_cmd, + self._prepostprcess_dropout, + ) ffd_output = self._positionwise_feed_forward_layer(pre_process_rlt3) - dec_output = self._post_process_layer3(enc_attn_output, ffd_output, - self._postprocess_cmd, - self._prepostprcess_dropout) + dec_output = self._post_process_layer3( + enc_attn_output, + ffd_output, + self._postprocess_cmd, + self._prepostprcess_dropout, + ) return dec_output class DecoderLayer(Layer): - - def __init__(self, - n_layer, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - prepostprocess_dropout, - attention_dropout, - relu_dropout, - preprocess_cmd, - postprocess_cmd, - caches=None, - gather_idx=None): + def __init__( + self, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + caches=None, + gather_idx=None, + ): super(DecoderLayer, self).__init__() - self._pre_process_layer = PrePostProcessLayer(d_model, preprocess_cmd, - 3) + self._pre_process_layer = PrePostProcessLayer( + d_model, preprocess_cmd, 3 + ) self._decoder_sub_layers = list() self._n_layer = n_layer self._preprocess_cmd = preprocess_cmd @@ -770,53 +898,62 @@ class DecoderLayer(Layer): self._decoder_sub_layers.append( self.add_sublayer( 'dsl_%d' % i, - DecoderSubLayer(n_head, - d_key, - d_value, - d_model, - d_inner_hid, - prepostprocess_dropout, - attention_dropout, - relu_dropout, - preprocess_cmd, - postprocess_cmd, - cache=None if caches is None else caches[i], - gather_idx=gather_idx))) - - def forward(self, dec_input, enc_output, dec_slf_attn_bias, - dec_enc_attn_bias): + DecoderSubLayer( + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + cache=None if caches is None else caches[i], + gather_idx=gather_idx, + ), + ) + ) + + def forward( + self, dec_input, enc_output, dec_slf_attn_bias, dec_enc_attn_bias + ): for i in range(self._n_layer): - tmp_dec_output = self._decoder_sub_layers[i](dec_input, enc_output, - dec_slf_attn_bias, - dec_enc_attn_bias) + tmp_dec_output = self._decoder_sub_layers[i]( + dec_input, enc_output, dec_slf_attn_bias, dec_enc_attn_bias + ) dec_input = tmp_dec_output - dec_output = self._pre_process_layer(None, tmp_dec_output, - self._preprocess_cmd, - self._prepostprocess_dropout) + dec_output = self._pre_process_layer( + None, + tmp_dec_output, + self._preprocess_cmd, + self._prepostprocess_dropout, + ) return dec_output class WrapDecoderLayer(Layer): - - def __init__(self, - trg_vocab_size, - max_length, - n_layer, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - prepostprocess_dropout, - attention_dropout, - relu_dropout, - preprocess_cmd, - postprocess_cmd, - weight_sharing, - caches=None, - gather_idx=None, - is_sparse=False): + def __init__( + self, + trg_vocab_size, + max_length, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + weight_sharing, + caches=None, + gather_idx=None, + is_sparse=False, + ): """ The wrapper assembles together all needed layers for the encoder. """ @@ -829,20 +966,23 @@ class WrapDecoderLayer(Layer): prepostprocess_dropout, is_sparse=is_sparse, word_emb_param_name=word_emb_param_names[1], - pos_enc_param_name=pos_enc_param_names[1]) - self._decoder_layer = DecoderLayer(n_layer, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - prepostprocess_dropout, - attention_dropout, - relu_dropout, - preprocess_cmd, - postprocess_cmd, - caches=caches, - gather_idx=gather_idx) + pos_enc_param_name=pos_enc_param_names[1], + ) + self._decoder_layer = DecoderLayer( + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + caches=caches, + gather_idx=gather_idx, + ) self._weight_sharing = weight_sharing if not weight_sharing: self._fc = Linear(d_model, trg_vocab_size, bias_attr=False) @@ -850,17 +990,20 @@ class WrapDecoderLayer(Layer): def forward(self, dec_inputs=None, enc_output=None): trg_word, trg_pos, trg_slf_attn_bias, trg_src_attn_bias = dec_inputs dec_input = self._prepare_decoder_layer(trg_word, trg_pos) - dec_output = self._decoder_layer(dec_input, enc_output, - trg_slf_attn_bias, trg_src_attn_bias) + dec_output = self._decoder_layer( + dec_input, enc_output, trg_slf_attn_bias, trg_src_attn_bias + ) dec_output_reshape = fluid.layers.reshape( - dec_output, shape=[-1, dec_output.shape[-1]], inplace=False) + dec_output, shape=[-1, dec_output.shape[-1]], inplace=False + ) if self._weight_sharing: predict = fluid.layers.matmul( x=dec_output_reshape, y=self._prepare_decoder_layer._input_emb.weight, - transpose_y=True) + transpose_y=True, + ) else: predict = self._fc(dec_output_reshape) @@ -872,81 +1015,91 @@ class WrapDecoderLayer(Layer): class TransFormer(Layer): - - def __init__(self, - src_vocab_size, - trg_vocab_size, - max_length, - n_layer, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - prepostprocess_dropout, - attention_dropout, - relu_dropout, - preprocess_cmd, - postprocess_cmd, - weight_sharing, - label_smooth_eps, - use_py_reader=False, - is_test=False, - is_sparse=False): + def __init__( + self, + src_vocab_size, + trg_vocab_size, + max_length, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + weight_sharing, + label_smooth_eps, + use_py_reader=False, + is_test=False, + is_sparse=False, + ): super(TransFormer, self).__init__() self._label_smooth_eps = label_smooth_eps self._trg_vocab_size = trg_vocab_size if weight_sharing: - assert src_vocab_size == trg_vocab_size, ( - "Vocabularies in source and target should be same for weight sharing." - ) - self._wrap_encoder_layer = WrapEncoderLayer(src_vocab_size, - max_length, - n_layer, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - prepostprocess_dropout, - attention_dropout, - relu_dropout, - preprocess_cmd, - postprocess_cmd, - weight_sharing, - is_sparse=is_sparse) - self._wrap_decoder_layer = WrapDecoderLayer(trg_vocab_size, - max_length, - n_layer, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - prepostprocess_dropout, - attention_dropout, - relu_dropout, - preprocess_cmd, - postprocess_cmd, - weight_sharing, - is_sparse=is_sparse) + assert ( + src_vocab_size == trg_vocab_size + ), "Vocabularies in source and target should be same for weight sharing." + self._wrap_encoder_layer = WrapEncoderLayer( + src_vocab_size, + max_length, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + weight_sharing, + is_sparse=is_sparse, + ) + self._wrap_decoder_layer = WrapDecoderLayer( + trg_vocab_size, + max_length, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + weight_sharing, + is_sparse=is_sparse, + ) if weight_sharing: - self._wrap_decoder_layer._prepare_decoder_layer._input_emb.weight = self._wrap_encoder_layer._prepare_encoder_layer._input_emb.weight + self._wrap_decoder_layer._prepare_decoder_layer._input_emb.weight = ( + self._wrap_encoder_layer._prepare_encoder_layer._input_emb.weight + ) def forward(self, enc_inputs, dec_inputs, label, weights): enc_output = self._wrap_encoder_layer(enc_inputs) predict = self._wrap_decoder_layer(dec_inputs, enc_output) if self._label_smooth_eps: label_out = fluid.layers.label_smooth( - label=fluid.layers.one_hot(input=label, - depth=self._trg_vocab_size), - epsilon=self._label_smooth_eps) + label=fluid.layers.one_hot( + input=label, depth=self._trg_vocab_size + ), + epsilon=self._label_smooth_eps, + ) cost = fluid.layers.softmax_with_cross_entropy( logits=predict, label=label_out, - soft_label=True if self._label_smooth_eps else False) + soft_label=True if self._label_smooth_eps else False, + ) weighted_cost = cost * weights sum_cost = fluid.layers.reduce_sum(weighted_cost) token_num = fluid.layers.reduce_sum(weights) @@ -956,7 +1109,6 @@ class TransFormer(Layer): class TestDygraphTransformerSortGradient(unittest.TestCase): - def test_transformer_sort_gradient(self): for is_sparse in [True, False]: self.transformer_sort_gradient_float32(is_sparse) @@ -969,28 +1121,31 @@ class TestDygraphTransformerSortGradient(unittest.TestCase): fluid.set_flags({'FLAGS_new_executor_use_inplace': False}) paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) - transformer = TransFormer(ModelHyperParams.src_vocab_size, - ModelHyperParams.trg_vocab_size, - ModelHyperParams.max_length + 1, - ModelHyperParams.n_layer, - ModelHyperParams.n_head, - ModelHyperParams.d_key, - ModelHyperParams.d_value, - ModelHyperParams.d_model, - ModelHyperParams.d_inner_hid, - ModelHyperParams.prepostprocess_dropout, - ModelHyperParams.attention_dropout, - ModelHyperParams.relu_dropout, - ModelHyperParams.preprocess_cmd, - ModelHyperParams.postprocess_cmd, - ModelHyperParams.weight_sharing, - TrainTaskConfig.label_smooth_eps, - use_py_reader=use_py_reader, - is_test=False, - is_sparse=is_sparse) + transformer = TransFormer( + ModelHyperParams.src_vocab_size, + ModelHyperParams.trg_vocab_size, + ModelHyperParams.max_length + 1, + ModelHyperParams.n_layer, + ModelHyperParams.n_head, + ModelHyperParams.d_key, + ModelHyperParams.d_value, + ModelHyperParams.d_model, + ModelHyperParams.d_inner_hid, + ModelHyperParams.prepostprocess_dropout, + ModelHyperParams.attention_dropout, + ModelHyperParams.relu_dropout, + ModelHyperParams.preprocess_cmd, + ModelHyperParams.postprocess_cmd, + ModelHyperParams.weight_sharing, + TrainTaskConfig.label_smooth_eps, + use_py_reader=use_py_reader, + is_test=False, + is_sparse=is_sparse, + ) if sync: lr_decay = fluid.layers.learning_rate_scheduler.noam_decay( - ModelHyperParams.d_model, TrainTaskConfig.warmup_steps) + ModelHyperParams.d_model, TrainTaskConfig.warmup_steps + ) with fluid.default_main_program()._lr_schedule_guard(): learning_rate = lr_decay * TrainTaskConfig.learning_rate optimizer = fluid.optimizer.Adam( @@ -998,11 +1153,12 @@ class TestDygraphTransformerSortGradient(unittest.TestCase): beta1=TrainTaskConfig.beta1, beta2=TrainTaskConfig.beta2, epsilon=TrainTaskConfig.eps, - parameter_list=transformer.parameters()) + parameter_list=transformer.parameters(), + ) else: optimizer = fluid.optimizer.SGD( - learning_rate=0.003, - parameter_list=transformer.parameters()) + learning_rate=0.003, parameter_list=transformer.parameters() + ) dy_param_init = dict() dy_param_updated = dict() @@ -1013,20 +1169,23 @@ class TestDygraphTransformerSortGradient(unittest.TestCase): enc_inputs, dec_inputs, label, weights = create_data() if False: outs, traced_layer = TracedLayer.trace( - transformer, [enc_inputs, dec_inputs, label, weights]) + transformer, [enc_inputs, dec_inputs, label, weights] + ) ins_static = enc_inputs + dec_inputs + [label, weights] outs_static = traced_layer(ins_static) helper.assertEachVar(outs, outs_static) if program is not None: self.assertTrue( - is_equal_program(program, traced_layer.program)) + is_equal_program(program, traced_layer.program) + ) program = traced_layer.program traced_layer.save_inference_model( './infer_imperative_transformer', feed=list(range(len(ins_static))), - fetch=list(range(len(outs_static)))) + fetch=list(range(len(outs_static))), + ) else: outs = transformer(enc_inputs, dec_inputs, label, weights) @@ -1049,61 +1208,88 @@ class TestDygraphTransformerSortGradient(unittest.TestCase): dy_predict_value = dy_predict.numpy() dy_token_num_value = dy_token_num.numpy() - return dy_avg_cost_value, dy_sum_cost_value, dy_predict_value, dy_token_num_value, \ - dy_param_init, dy_param_updated + return ( + dy_avg_cost_value, + dy_sum_cost_value, + dy_predict_value, + dy_token_num_value, + dy_param_init, + dy_param_updated, + ) with guard(): fluid.set_flags({'FLAGS_sort_sum_gradient': True}) if _in_legacy_dygraph(): - dy_avg_cost_value, dy_sum_cost_value, dy_predict_value, dy_token_num_value, \ - dy_param_init, dy_param_updated = run_dygraph() + ( + dy_avg_cost_value, + dy_sum_cost_value, + dy_predict_value, + dy_token_num_value, + dy_param_init, + dy_param_updated, + ) = run_dygraph() with new_program_scope(): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) - transformer = TransFormer(ModelHyperParams.src_vocab_size, - ModelHyperParams.trg_vocab_size, - ModelHyperParams.max_length + 1, - ModelHyperParams.n_layer, - ModelHyperParams.n_head, - ModelHyperParams.d_key, - ModelHyperParams.d_value, - ModelHyperParams.d_model, - ModelHyperParams.d_inner_hid, - ModelHyperParams.prepostprocess_dropout, - ModelHyperParams.attention_dropout, - ModelHyperParams.relu_dropout, - ModelHyperParams.preprocess_cmd, - ModelHyperParams.postprocess_cmd, - ModelHyperParams.weight_sharing, - TrainTaskConfig.label_smooth_eps, - use_py_reader=use_py_reader, - is_test=False, - is_sparse=is_sparse) - exe = fluid.Executor(fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) + transformer = TransFormer( + ModelHyperParams.src_vocab_size, + ModelHyperParams.trg_vocab_size, + ModelHyperParams.max_length + 1, + ModelHyperParams.n_layer, + ModelHyperParams.n_head, + ModelHyperParams.d_key, + ModelHyperParams.d_value, + ModelHyperParams.d_model, + ModelHyperParams.d_inner_hid, + ModelHyperParams.prepostprocess_dropout, + ModelHyperParams.attention_dropout, + ModelHyperParams.relu_dropout, + ModelHyperParams.preprocess_cmd, + ModelHyperParams.postprocess_cmd, + ModelHyperParams.weight_sharing, + TrainTaskConfig.label_smooth_eps, + use_py_reader=use_py_reader, + is_test=False, + is_sparse=is_sparse, + ) + exe = fluid.Executor( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) optimizer = fluid.optimizer.SGD(learning_rate=0.003) - data_input_names = encoder_data_input_fields + decoder_data_input_fields[: - -1] + label_data_input_fields + data_input_names = ( + encoder_data_input_fields + + decoder_data_input_fields[:-1] + + label_data_input_fields + ) all_inputs = make_all_inputs(data_input_names) enc_inputs_len = len(encoder_data_input_fields) dec_inputs_len = len(decoder_data_input_fields[:-1]) enc_inputs = all_inputs[0:enc_inputs_len] - dec_inputs = all_inputs[enc_inputs_len:enc_inputs_len + - dec_inputs_len] + dec_inputs = all_inputs[ + enc_inputs_len : enc_inputs_len + dec_inputs_len + ] label = all_inputs[-2] weights = all_inputs[-1] static_param_updated = dict() static_param_init = dict() static_param_name_list = list() - static_sum_cost, static_avg_cost, static_predict, static_token_num = transformer( - enc_inputs, dec_inputs, label, weights) + ( + static_sum_cost, + static_avg_cost, + static_predict, + static_token_num, + ) = transformer(enc_inputs, dec_inputs, label, weights) optimizer.minimize(static_avg_cost) for param in transformer.parameters(): static_param_name_list.append(param.name) - out = exe.run(fluid.default_startup_program(), - fetch_list=static_param_name_list) + out = exe.run( + fluid.default_startup_program(), + fetch_list=static_param_name_list, + ) for i in range(len(static_param_name_list)): static_param_init[static_param_name_list[i]] = out[i] static_sum_cost_value = None @@ -1113,31 +1299,40 @@ class TestDygraphTransformerSortGradient(unittest.TestCase): for i in range(batch_num): feed_dict = create_feed_dict_list(create_data(True)) fetch_list = [ - static_sum_cost, static_avg_cost, static_predict, - static_token_num + static_sum_cost, + static_avg_cost, + static_predict, + static_token_num, ] fetch_list.extend(static_param_name_list) - out = exe.run(fluid.default_main_program(), - feed=feed_dict, - fetch_list=fetch_list) + out = exe.run( + fluid.default_main_program(), + feed=feed_dict, + fetch_list=fetch_list, + ) static_sum_cost_value = out[0] static_avg_cost_value = out[1] static_predict_value = out[2] static_token_num_value = out[3] if i == batch_num - 1: for k in range(4, len(out)): - static_param_updated[static_param_name_list[k - - 4]] = out[k] + static_param_updated[ + static_param_name_list[k - 4] + ] = out[k] if _in_legacy_dygraph(): - np.testing.assert_array_equal(static_avg_cost_value, - dy_avg_cost_value) - np.testing.assert_array_equal(static_sum_cost_value, - dy_sum_cost_value) - np.testing.assert_array_equal(static_predict_value, - dy_predict_value) - np.testing.assert_array_equal(static_token_num_value, - dy_token_num_value) + np.testing.assert_array_equal( + static_avg_cost_value, dy_avg_cost_value + ) + np.testing.assert_array_equal( + static_sum_cost_value, dy_sum_cost_value + ) + np.testing.assert_array_equal( + static_predict_value, dy_predict_value + ) + np.testing.assert_array_equal( + static_token_num_value, dy_token_num_value + ) for key, value in static_param_init.items(): np.testing.assert_array_equal(value, dy_param_init[key]) @@ -1147,33 +1342,45 @@ class TestDygraphTransformerSortGradient(unittest.TestCase): # compare eager result with imperative result with guard(): fluid.set_flags({'FLAGS_sort_sum_gradient': False}) - dy_avg_cost_value, dy_sum_cost_value, dy_predict_value, dy_token_num_value, \ - dy_param_init, dy_param_updated = run_dygraph() + ( + dy_avg_cost_value, + dy_sum_cost_value, + dy_predict_value, + dy_token_num_value, + dy_param_init, + dy_param_updated, + ) = run_dygraph() with guard(): with _test_eager_guard(): - eager_avg_cost_value, eager_sum_cost_value, eager_predict_value, eager_token_num_value, \ - eager_param_init, eager_param_updated = run_dygraph() - np.testing.assert_allclose(dy_avg_cost_value, - eager_avg_cost_value, - rtol=1e-05) - np.testing.assert_allclose(dy_sum_cost_value, - eager_sum_cost_value, - rtol=1e-05) - - np.testing.assert_allclose(dy_predict_value, - eager_predict_value, - rtol=1e-05) - np.testing.assert_allclose(dy_token_num_value, - eager_token_num_value, - rtol=1e-05) + ( + eager_avg_cost_value, + eager_sum_cost_value, + eager_predict_value, + eager_token_num_value, + eager_param_init, + eager_param_updated, + ) = run_dygraph() + np.testing.assert_allclose( + dy_avg_cost_value, eager_avg_cost_value, rtol=1e-05 + ) + np.testing.assert_allclose( + dy_sum_cost_value, eager_sum_cost_value, rtol=1e-05 + ) + + np.testing.assert_allclose( + dy_predict_value, eager_predict_value, rtol=1e-05 + ) + np.testing.assert_allclose( + dy_token_num_value, eager_token_num_value, rtol=1e-05 + ) for key, value in static_param_init.items(): np.testing.assert_array_equal(value, eager_param_init[key]) for key, value in dy_param_updated.items(): - np.testing.assert_allclose(value, - eager_param_updated[key], - rtol=1e-05) + np.testing.assert_allclose( + value, eager_param_updated[key], rtol=1e-05 + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_imperative_triple_grad.py b/python/paddle/fluid/tests/unittests/test_imperative_triple_grad.py index 7f0ea807271bec02bffab980256adbedc0eed341..ebebe754b217652598b219f81da746443aeed270 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_triple_grad.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_triple_grad.py @@ -22,7 +22,6 @@ from paddle.fluid.framework import _test_eager_guard def _dygraph_guard_(func): - def __impl__(*args, **kwargs): if fluid._non_static_mode(): return func(*args, **kwargs) @@ -43,36 +42,37 @@ def random_var(size, low=-1, high=1, dtype='float32'): class TestDygraphTripleGradMatmul(TestCase): - def test_matmul_triple_grad(self): input_numpy = np.ones([3, 3]) * 2 with _test_eager_guard(): - x = paddle.to_tensor(input_numpy, - stop_gradient=False, - dtype='float32') - y = paddle.to_tensor(input_numpy, - stop_gradient=False, - dtype='float32') + x = paddle.to_tensor( + input_numpy, stop_gradient=False, dtype='float32' + ) + y = paddle.to_tensor( + input_numpy, stop_gradient=False, dtype='float32' + ) out = paddle.matmul(x, y, False, False) - new_out_g = paddle.to_tensor(np.ones([3, 3]), - stop_gradient=False, - dtype='float32') - new_x_g, new_y_g = paddle.grad([out], [x, y], [new_out_g], - retain_graph=True, - create_graph=True) - - new_x_g_g = paddle.to_tensor(np.ones([3, 3]), - stop_gradient=False, - dtype='float32') - new_y_g_g = paddle.to_tensor(np.ones([3, 3]), - stop_gradient=False, - dtype='float32') - new_a, new_b, new_c = paddle.grad([new_x_g, new_y_g], - [x, y, new_out_g], - [new_x_g_g, new_y_g_g], - retain_graph=True, - create_graph=True) + new_out_g = paddle.to_tensor( + np.ones([3, 3]), stop_gradient=False, dtype='float32' + ) + new_x_g, new_y_g = paddle.grad( + [out], [x, y], [new_out_g], retain_graph=True, create_graph=True + ) + + new_x_g_g = paddle.to_tensor( + np.ones([3, 3]), stop_gradient=False, dtype='float32' + ) + new_y_g_g = paddle.to_tensor( + np.ones([3, 3]), stop_gradient=False, dtype='float32' + ) + new_a, new_b, new_c = paddle.grad( + [new_x_g, new_y_g], + [x, y, new_out_g], + [new_x_g_g, new_y_g_g], + retain_graph=True, + create_graph=True, + ) new_a.backward() @@ -108,27 +108,30 @@ class TestDygraphTripleGradMatmul(TestCase): class TestDygraphTripleGrad(TestCase): - def setUp(self): self.sort_sum_gradient = False self.shape = [5, 5] - def grad(self, - outputs, - inputs, - grad_outputs=None, - no_grad_vars=None, - retain_graph=None, - create_graph=False, - allow_unused=False): + def grad( + self, + outputs, + inputs, + grad_outputs=None, + no_grad_vars=None, + retain_graph=None, + create_graph=False, + allow_unused=False, + ): fluid.set_flags({'FLAGS_sort_sum_gradient': self.sort_sum_gradient}) - return fluid.dygraph.grad(outputs=outputs, - inputs=inputs, - grad_outputs=grad_outputs, - no_grad_vars=no_grad_vars, - retain_graph=retain_graph, - create_graph=create_graph, - allow_unused=allow_unused) + return fluid.dygraph.grad( + outputs=outputs, + inputs=inputs, + grad_outputs=grad_outputs, + no_grad_vars=no_grad_vars, + retain_graph=retain_graph, + create_graph=create_graph, + allow_unused=allow_unused, + ) @dygraph_guard def func_exception(self): @@ -150,12 +153,16 @@ class TestDygraphTripleGrad(TestCase): self.grad([random_var(shape)], [1]) with self.assertRaises(AssertionError): - self.grad([random_var(shape), random_var(shape)], - [random_var(shape)], [random_var(shape)]) + self.grad( + [random_var(shape), random_var(shape)], + [random_var(shape)], + [random_var(shape)], + ) with self.assertRaises(AssertionError): - self.grad([random_var(shape)], [random_var(shape)], - no_grad_vars=[1]) + self.grad( + [random_var(shape)], [random_var(shape)], no_grad_vars=[1] + ) with self.assertRaises(AssertionError): self.grad([random_var(shape)], [random_var(shape)], no_grad_vars=1) @@ -178,34 +185,41 @@ class TestDygraphTripleGrad(TestCase): out = fluid.layers.sigmoid(paddle.matmul(x, y) + z) out_np = out.numpy() - dx_actual, = self.grad([out], [x], create_graph=True) + (dx_actual,) = self.grad([out], [x], create_graph=True) # Theoritical result based on math calculation dout = np.ones(self.shape).astype('float32') - dx_expected = np.matmul(dout * out_np * (1 - out_np), - np.transpose(y_np)) + dx_expected = np.matmul( + dout * out_np * (1 - out_np), np.transpose(y_np) + ) np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05) - ddx_actual, = self.grad([dx_actual], [x], create_graph=True) + (ddx_actual,) = self.grad([dx_actual], [x], create_graph=True) # Theoritical result based on math calculation DDY = np.zeros(self.shape).astype('float32') DDX = np.ones(self.shape).astype('float32') - double_grad_tmp1 = np.matmul(dout * out_np * (1 - out_np), - np.transpose(DDY)) + double_grad_tmp1 = np.matmul( + dout * out_np * (1 - out_np), np.transpose(DDY) + ) double_grad_tmp2 = np.matmul(DDX, y_np) + np.matmul(x_np, DDY) double_grad_tmp3 = ( - 1 - 2 * out_np) * dout * double_grad_tmp2 * out_np * (1 - out_np) - ddx_expected = double_grad_tmp1 + np.matmul(double_grad_tmp3, - np.transpose(y_np)) + (1 - 2 * out_np) * dout * double_grad_tmp2 * out_np * (1 - out_np) + ) + ddx_expected = double_grad_tmp1 + np.matmul( + double_grad_tmp3, np.transpose(y_np) + ) np.testing.assert_allclose(ddx_actual.numpy(), ddx_expected, rtol=1e-05) # Theoritical result based on math calculation d_ddout = np.zeros(self.shape).astype('float32') tmp0 = np.matmul(DDX, y_np) + np.matmul(x_np, DDY) tmp1 = (1 - 2 * out_np) * ((1 - 2 * out_np) * dout * tmp0 * tmp0) - tmp2 = tmp0 * (1 - 2 * out_np) * d_ddout - 2 * dout * ( - 1 - out_np) * out_np * tmp0 * tmp0 - dddx_expected = np.matmul(((tmp1 + tmp2) * out_np * (1 - out_np)), - np.transpose(y_np)) + tmp2 = ( + tmp0 * (1 - 2 * out_np) * d_ddout + - 2 * dout * (1 - out_np) * out_np * tmp0 * tmp0 + ) + dddx_expected = np.matmul( + ((tmp1 + tmp2) * out_np * (1 - out_np)), np.transpose(y_np) + ) ddx_actual.backward() dddx_grad_actual = x.gradient() @@ -222,29 +236,32 @@ class TestDygraphTripleGrad(TestCase): class TestDygraphTripleGradBradcastCase(TestCase): - def setUp(self): self.sort_sum_gradient = False self.x_shape = [3, 2, 2] self.y_shape = [1, 2, 2] self.z_shape = [2, 2] - def grad(self, - outputs, - inputs, - grad_outputs=None, - no_grad_vars=None, - retain_graph=None, - create_graph=False, - allow_unused=False): + def grad( + self, + outputs, + inputs, + grad_outputs=None, + no_grad_vars=None, + retain_graph=None, + create_graph=False, + allow_unused=False, + ): fluid.set_flags({'FLAGS_sort_sum_gradient': self.sort_sum_gradient}) - return fluid.dygraph.grad(outputs=outputs, - inputs=inputs, - grad_outputs=grad_outputs, - no_grad_vars=no_grad_vars, - retain_graph=retain_graph, - create_graph=create_graph, - allow_unused=allow_unused) + return fluid.dygraph.grad( + outputs=outputs, + inputs=inputs, + grad_outputs=grad_outputs, + no_grad_vars=no_grad_vars, + retain_graph=retain_graph, + create_graph=create_graph, + allow_unused=allow_unused, + ) @dygraph_guard def func_example_with_gradient_and_create_graph(self): @@ -264,34 +281,42 @@ class TestDygraphTripleGradBradcastCase(TestCase): out = fluid.layers.sigmoid(paddle.matmul(x, y) + z) out_np = out.numpy() - dx_actual, = self.grad([out], [x], create_graph=True) + (dx_actual,) = self.grad([out], [x], create_graph=True) # Theoritical result based on math calculation dout = np.ones(self.x_shape).astype('float32') - dx_expected = np.matmul(dout * out_np * (1 - out_np), - np.transpose(y_np, axes=(0, 2, 1))) + dx_expected = np.matmul( + dout * out_np * (1 - out_np), np.transpose(y_np, axes=(0, 2, 1)) + ) np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05) - ddx_actual, = self.grad([dx_actual], [x], create_graph=True) + (ddx_actual,) = self.grad([dx_actual], [x], create_graph=True) # Theoritical result based on math calculation DDY = np.zeros(self.y_shape).astype('float32') DDX = np.ones(self.x_shape).astype('float32') - double_grad_tmp1 = np.matmul(dout * out_np * (1 - out_np), - np.transpose(DDY, axes=(0, 2, 1))) + double_grad_tmp1 = np.matmul( + dout * out_np * (1 - out_np), np.transpose(DDY, axes=(0, 2, 1)) + ) double_grad_tmp2 = np.matmul(DDX, y_np) + np.matmul(x_np, DDY) double_grad_tmp3 = ( - 1 - 2 * out_np) * dout * double_grad_tmp2 * out_np * (1 - out_np) + (1 - 2 * out_np) * dout * double_grad_tmp2 * out_np * (1 - out_np) + ) ddx_expected = double_grad_tmp1 + np.matmul( - double_grad_tmp3, np.transpose(y_np, axes=(0, 2, 1))) + double_grad_tmp3, np.transpose(y_np, axes=(0, 2, 1)) + ) np.testing.assert_allclose(ddx_actual.numpy(), ddx_expected, rtol=1e-05) # Theoritical result based on math calculation d_ddout = np.zeros(self.x_shape).astype('float32') tmp0 = np.matmul(DDX, y_np) + np.matmul(x_np, DDY) tmp1 = (1 - 2 * out_np) * ((1 - 2 * out_np) * dout * tmp0 * tmp0) - tmp2 = tmp0 * (1 - 2 * out_np) * d_ddout - 2 * dout * ( - 1 - out_np) * out_np * tmp0 * tmp0 - dddx_expected = np.matmul(((tmp1 + tmp2) * out_np * (1 - out_np)), - np.transpose(y_np, axes=(0, 2, 1))) + tmp2 = ( + tmp0 * (1 - 2 * out_np) * d_ddout + - 2 * dout * (1 - out_np) * out_np * tmp0 * tmp0 + ) + dddx_expected = np.matmul( + ((tmp1 + tmp2) * out_np * (1 - out_np)), + np.transpose(y_np, axes=(0, 2, 1)), + ) ddx_actual.backward() dddx_grad_actual = x.gradient() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_using_non_zero_gpu.py b/python/paddle/fluid/tests/unittests/test_imperative_using_non_zero_gpu.py index 2cc157ae050cf9edc9406e37f86641b324600f99..9d7ece633604d1e09503f78ca9469793e6923a15 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_using_non_zero_gpu.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_using_non_zero_gpu.py @@ -21,7 +21,6 @@ from paddle.fluid.framework import _test_eager_guard class TestImperativeUsingNonZeroGpu(unittest.TestCase): - def run_main(self, np_arr, place): with guard(place): var = to_variable(np_arr) diff --git a/python/paddle/fluid/tests/unittests/test_increment.py b/python/paddle/fluid/tests/unittests/test_increment.py index 34c7af4ac081e315c1694b8ef917fc2f08febfa0..4e435e55794e6a9dc3b19e704cda15d9bb8c94b5 100755 --- a/python/paddle/fluid/tests/unittests/test_increment.py +++ b/python/paddle/fluid/tests/unittests/test_increment.py @@ -20,12 +20,11 @@ import paddle.fluid as fluid class TestIncrement(unittest.TestCase): - def test_api(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.layers.fill_constant(shape=[1], - dtype='int64', - value=5) + input = fluid.layers.fill_constant( + shape=[1], dtype='int64', value=5 + ) expected_result = np.array([8], dtype='int64') output = paddle.tensor.math.increment(input, value=3) @@ -41,7 +40,6 @@ class TestIncrement(unittest.TestCase): class TestInplaceApiWithDataTransform(unittest.TestCase): - def test_increment(self): if fluid.core.is_compiled_with_cuda(): paddle.enable_static() @@ -50,7 +48,7 @@ class TestInplaceApiWithDataTransform(unittest.TestCase): with paddle.fluid.device_guard("cpu"): x = paddle.increment(x) exe = paddle.static.Executor(paddle.CUDAPlace(0)) - a, = exe.run(paddle.static.default_main_program(), fetch_list=[x]) + (a,) = exe.run(paddle.static.default_main_program(), fetch_list=[x]) paddle.disable_static() self.assertEqual(a[0], 1) diff --git a/python/paddle/fluid/tests/unittests/test_index_add_op.py b/python/paddle/fluid/tests/unittests/test_index_add_op.py index f3b799f0d3501a94f81352640bd9dcc34be0a49e..c54479b3529683ef47fc8ed02e0f399d12261635 100644 --- a/python/paddle/fluid/tests/unittests/test_index_add_op.py +++ b/python/paddle/fluid/tests/unittests/test_index_add_op.py @@ -19,8 +19,9 @@ from op_test import OpTest from paddle.fluid import Program -def compute_index_add_ref(axis, x_shape, x_np, add_value_shape, add_value_np, - index_size, index_np): +def compute_index_add_ref( + axis, x_shape, x_np, add_value_shape, add_value_np, index_size, index_np +): if axis < 0: axis = axis + len(x_shape) if axis != 0: @@ -28,11 +29,13 @@ def compute_index_add_ref(axis, x_shape, x_np, add_value_shape, add_value_np, x_reshape = [outer_loop] + list(x_shape[axis:]) x_np_reshape = np.reshape(x_np, tuple(x_reshape)) - add_value_reshape = [np.prod(add_value_shape[:axis]).astype(int) - ] + list(add_value_shape[axis:]) + add_value_reshape = [ + np.prod(add_value_shape[:axis]).astype(int) + ] + list(add_value_shape[axis:]) - add_value_np_reshape = np.reshape(add_value_np, - tuple(add_value_reshape)) + add_value_np_reshape = np.reshape( + add_value_np, tuple(add_value_reshape) + ) else: x_np_reshape = x_np add_value_np_reshape = add_value_np @@ -54,23 +57,29 @@ def raw_index_add(x, index, value, axis): class TestIndexAddOp(OpTest): - def setUp(self): self.python_api = raw_index_add self.op_type = "index_add" self.init_dtype_type() - index_np = np.random.randint(low=0, - high=self.x_shape[self.axis], - size=self.index_size) + index_np = np.random.randint( + low=0, high=self.x_shape[self.axis], size=self.index_size + ) x_np = np.random.random(self.x_shape).astype(self.x_type) add_value_np = np.random.random(self.add_value_shape).astype( - self.x_type) + self.x_type + ) self.inputs = {'X': x_np, 'Index': index_np, 'AddValue': add_value_np} self.attrs = {'axis': self.axis} - out = compute_index_add_ref(self.axis, self.x_shape, x_np, - self.add_value_shape, add_value_np, - self.index_size, index_np) + out = compute_index_add_ref( + self.axis, + self.x_shape, + x_np, + self.add_value_shape, + add_value_np, + self.index_size, + index_np, + ) self.outputs = {'Out': out} def init_dtype_type(self): @@ -89,7 +98,6 @@ class TestIndexAddOp(OpTest): class TestIndexAddAPI(unittest.TestCase): - def setUp(self): self.setType() self.setPlace() @@ -126,11 +134,11 @@ class TestIndexAddAPI(unittest.TestCase): self.x_np = np.random.random(self.x_shape).astype(self.x_type) self.add_value_np = np.random.random(self.add_value_shape).astype( - self.x_type) - self.index_np = np.random.randint(low=0, - high=self.x_shape[axis], - size=self.index_size).astype( - self.index_type) + self.x_type + ) + self.index_np = np.random.randint( + low=0, high=self.x_shape[axis], size=self.index_size + ).astype(self.index_type) if self.check_backward: self.dout_np = np.random.random(self.x_shape).astype(self.x_type) @@ -154,36 +162,47 @@ class TestIndexAddAPI(unittest.TestCase): add_value = paddle.to_tensor(self.add_value_np, stop_gradient=False) out = paddle.index_add(input_tensor, index, self.axis, add_value) - ref_out = compute_index_add_ref(self.axis, self.x_shape, self.x_np, - self.add_value_shape, self.add_value_np, - self.index_size, self.index_np) - np.testing.assert_allclose(ref_out, - out.numpy(), - rtol=self.rtol, - atol=self.atol) + ref_out = compute_index_add_ref( + self.axis, + self.x_shape, + self.x_np, + self.add_value_shape, + self.add_value_np, + self.index_size, + self.index_np, + ) + np.testing.assert_allclose( + ref_out, out.numpy(), rtol=self.rtol, atol=self.atol + ) if self.check_backward: dout_tensor = paddle.to_tensor(self.dout_np) paddle.autograd.backward([out], [dout_tensor], retain_graph=True) - ref_x_grad, ref_add_value_grad = self.compute_index_add_backward_ref( + ( + ref_x_grad, + ref_add_value_grad, + ) = self.compute_index_add_backward_ref() + np.testing.assert_allclose( + ref_x_grad, + input_tensor.grad.numpy(), + rtol=self.rtol, + atol=self.atol, + ) + np.testing.assert_allclose( + ref_add_value_grad, + add_value.grad.numpy(), + rtol=self.rtol, + atol=self.atol, ) - np.testing.assert_allclose(ref_x_grad, - input_tensor.grad.numpy(), - rtol=self.rtol, - atol=self.atol) - np.testing.assert_allclose(ref_add_value_grad, - add_value.grad.numpy(), - rtol=self.rtol, - atol=self.atol) def run_static(self, device): x = paddle.static.data(name='X', shape=self.x_shape, dtype=self.x_type) - index = paddle.static.data(name='Index', - shape=self.index_shape, - dtype=self.index_type) - add_value = paddle.static.data(name='AddValue', - shape=self.add_value_shape, - dtype=self.x_type) + index = paddle.static.data( + name='Index', shape=self.index_shape, dtype=self.index_type + ) + add_value = paddle.static.data( + name='AddValue', shape=self.add_value_shape, dtype=self.x_type + ) out = paddle.index_add(x, index, self.axis, add_value) @@ -193,19 +212,22 @@ class TestIndexAddAPI(unittest.TestCase): place = paddle.CUDAPlace(0) else: raise TypeError( - "paddle.index_add api only support cpu and gpu device now.") + "paddle.index_add api only support cpu and gpu device now." + ) exe = paddle.static.Executor(place) exe.run(paddle.static.default_startup_program()) - res = exe.run(paddle.static.default_main_program(), - feed={ - "X": self.x_np, - "Index": self.index_np, - "AddValue": self.add_value_np, - }, - fetch_list=[out.name], - return_numpy=False) + res = exe.run( + paddle.static.default_main_program(), + feed={ + "X": self.x_np, + "Index": self.index_np, + "AddValue": self.add_value_np, + }, + fetch_list=[out.name], + return_numpy=False, + ) return res def test_static(self): @@ -213,14 +235,18 @@ class TestIndexAddAPI(unittest.TestCase): for device in self.place: with paddle.static.program_guard(Program()): out = self.run_static(device) - ref_out = compute_index_add_ref(self.axis, self.x_shape, self.x_np, - self.add_value_shape, - self.add_value_np, self.index_size, - self.index_np) - np.testing.assert_allclose(ref_out, - np.array(out[0]), - rtol=self.rtol, - atol=self.atol) + ref_out = compute_index_add_ref( + self.axis, + self.x_shape, + self.x_np, + self.add_value_shape, + self.add_value_np, + self.index_size, + self.index_np, + ) + np.testing.assert_allclose( + ref_out, np.array(out[0]), rtol=self.rtol, atol=self.atol + ) def test_dynamic(self): paddle.disable_static() @@ -229,14 +255,12 @@ class TestIndexAddAPI(unittest.TestCase): class TestIndexAddAPIMoreType(TestIndexAddAPI): - def setType(self): self.x_type = np.float64 self.index_type = np.int64 class TestIndexAddAPICase2(TestIndexAddAPI): - def config(self): self.axis = 1 self.x_shape = (100, 100, 5) @@ -245,7 +269,6 @@ class TestIndexAddAPICase2(TestIndexAddAPI): class TestIndexAddAPICase3(TestIndexAddAPI): - def config(self): self.axis = 2 self.x_shape = (100, 100, 25) @@ -254,16 +277,14 @@ class TestIndexAddAPICase3(TestIndexAddAPI): class TestIndexAddAPICase4(TestIndexAddAPI): - def config(self): self.axis = 0 - self.x_shape = (10, ) + self.x_shape = (10,) self.index_size = 4 - self.add_value_shape = (4, ) + self.add_value_shape = (4,) class TestIndexAddAPICase5(TestIndexAddAPI): - def config(self): self.axis = -1 self.x_shape = (10, 10) diff --git a/python/paddle/fluid/tests/unittests/test_index_sample_op.py b/python/paddle/fluid/tests/unittests/test_index_sample_op.py index 7c4be22305b8c14449530e7aae53488135b61811..550ddfe344f3eb7961ab72d80f585e2111aefdf0 100644 --- a/python/paddle/fluid/tests/unittests/test_index_sample_op.py +++ b/python/paddle/fluid/tests/unittests/test_index_sample_op.py @@ -20,16 +20,14 @@ from op_test import OpTest class TestIndexSampleOp(OpTest): - def setUp(self): self.op_type = "index_sample" self.python_api = paddle.index_sample self.config() xnp = np.random.random(self.x_shape).astype(self.x_type) - indexnp = np.random.randint(low=0, - high=self.x_shape[1], - size=self.index_shape).astype( - self.index_type) + indexnp = np.random.randint( + low=0, high=self.x_shape[1], size=self.index_shape + ).astype(self.index_type) self.inputs = {'X': xnp, 'Index': indexnp} index_array = [] for i in range(self.index_shape[0]): @@ -56,7 +54,6 @@ class TestIndexSampleOp(OpTest): class TestCase1(TestIndexSampleOp): - def config(self): """ For one dimension input @@ -68,7 +65,6 @@ class TestCase1(TestIndexSampleOp): class TestCase2(TestIndexSampleOp): - def config(self): """ For int64_t index type @@ -80,7 +76,6 @@ class TestCase2(TestIndexSampleOp): class TestCase3(TestIndexSampleOp): - def config(self): """ For int index type @@ -92,7 +87,6 @@ class TestCase3(TestIndexSampleOp): class TestCase4(TestIndexSampleOp): - def config(self): """ For int64 index type @@ -104,7 +98,6 @@ class TestCase4(TestIndexSampleOp): class TestIndexSampleShape(unittest.TestCase): - def test_shape(self): paddle.enable_static() # create x value @@ -115,8 +108,9 @@ class TestIndexSampleShape(unittest.TestCase): # create index value index_shape = (2, 3) index_type = "int32" - index_np = np.random.randint(low=0, high=x_shape[1], - size=index_shape).astype(index_type) + index_np = np.random.randint( + low=0, high=x_shape[1], size=index_shape + ).astype(index_type) x = fluid.data(name='x', shape=[-1, 5], dtype='float64') index = fluid.data(name='index', shape=[-1, 3], dtype='int32') @@ -131,18 +125,24 @@ class TestIndexSampleShape(unittest.TestCase): class TestIndexSampleDynamic(unittest.TestCase): - def test_result(self): with fluid.dygraph.guard(): - x = paddle.to_tensor([[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], - [9.0, 10.0, 11.0, 12.0]], - dtype='float32') - index = paddle.to_tensor([[0, 1, 2], [1, 2, 3], [0, 0, 0]], - dtype='int32') + x = paddle.to_tensor( + [ + [1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [9.0, 10.0, 11.0, 12.0], + ], + dtype='float32', + ) + index = paddle.to_tensor( + [[0, 1, 2], [1, 2, 3], [0, 0, 0]], dtype='int32' + ) out_z1 = paddle.index_sample(x, index) - except_output = np.array([[1.0, 2.0, 3.0], [6.0, 7.0, 8.0], - [9.0, 9.0, 9.0]]) + except_output = np.array( + [[1.0, 2.0, 3.0], [6.0, 7.0, 8.0], [9.0, 9.0, 9.0]] + ) assert out_z1.numpy().all() == except_output.all() diff --git a/python/paddle/fluid/tests/unittests/test_index_select_op.py b/python/paddle/fluid/tests/unittests/test_index_select_op.py index 2d73a29e2c936f200a0680520a9095751b3f82fc..edb2dc711822a5978d9d06e554d86c16e4d0ac24 100644 --- a/python/paddle/fluid/tests/unittests/test_index_select_op.py +++ b/python/paddle/fluid/tests/unittests/test_index_select_op.py @@ -21,19 +21,18 @@ from paddle.fluid import Program, program_guard class TestIndexSelectOp(OpTest): - def setUp(self): self.python_api = paddle.index_select self.op_type = "index_select" self.init_dtype_type() - index_np = np.random.randint(low=0, - high=self.x_shape[self.dim], - size=self.index_size) + index_np = np.random.randint( + low=0, high=self.x_shape[self.dim], size=self.index_size + ) x_np = np.random.random(self.x_shape).astype(self.x_type) self.inputs = {'X': x_np, 'Index': index_np} self.attrs = {'dim': self.dim} - outer_loop = np.prod(self.x_shape[:self.dim]) - x_reshape = [outer_loop] + list(self.x_shape[self.dim:]) + outer_loop = np.prod(self.x_shape[: self.dim]) + x_reshape = [outer_loop] + list(self.x_shape[self.dim :]) x_np_reshape = np.reshape(x_np, tuple(x_reshape)) out_list = [] for i in range(outer_loop): @@ -61,7 +60,6 @@ class TestIndexSelectOp(OpTest): class TestIndexSelectOpCase2(TestIndexSelectOp): - def init_dtype_type(self): self.x_type = np.float32 self.index_type = np.int32 @@ -71,7 +69,6 @@ class TestIndexSelectOpCase2(TestIndexSelectOp): class TestIndexSelectOpCaseSingleThread(TestIndexSelectOp): - def init_dtype_type(self): if fluid.is_compiled_with_cuda(): fluid.set_flags({'FLAGS_cudnn_deterministic': True}) @@ -83,10 +80,14 @@ class TestIndexSelectOpCaseSingleThread(TestIndexSelectOp): class TestIndexSelectAPI(unittest.TestCase): - def input_data(self): - self.data_x = np.array([[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], - [9.0, 10.0, 11.0, 12.0]]) + self.data_x = np.array( + [ + [1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [9.0, 10.0, 11.0, 12.0], + ] + ) self.data_index = np.array([0, 1, 1]).astype('int32') def test_index_select_api(self): @@ -95,39 +96,37 @@ class TestIndexSelectAPI(unittest.TestCase): # case 1: with program_guard(Program(), Program()): x = fluid.layers.data(name='x', shape=[-1, 4]) - index = fluid.layers.data(name='index', - shape=[3], - dtype='int32', - append_batch_size=False) + index = fluid.layers.data( + name='index', shape=[3], dtype='int32', append_batch_size=False + ) z = paddle.index_select(x, index, axis=1) exe = fluid.Executor(fluid.CPUPlace()) - res, = exe.run(feed={ - 'x': self.data_x, - 'index': self.data_index - }, - fetch_list=[z.name], - return_numpy=False) - expect_out = np.array([[1.0, 2.0, 2.0], [5.0, 6.0, 6.0], - [9.0, 10.0, 10.0]]) + (res,) = exe.run( + feed={'x': self.data_x, 'index': self.data_index}, + fetch_list=[z.name], + return_numpy=False, + ) + expect_out = np.array( + [[1.0, 2.0, 2.0], [5.0, 6.0, 6.0], [9.0, 10.0, 10.0]] + ) np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) # case 2: with program_guard(Program(), Program()): x = fluid.layers.data(name='x', shape=[-1, 4]) - index = fluid.layers.data(name='index', - shape=[3], - dtype='int32', - append_batch_size=False) + index = fluid.layers.data( + name='index', shape=[3], dtype='int32', append_batch_size=False + ) z = paddle.index_select(x, index) exe = fluid.Executor(fluid.CPUPlace()) - res, = exe.run(feed={ - 'x': self.data_x, - 'index': self.data_index - }, - fetch_list=[z.name], - return_numpy=False) - expect_out = np.array([[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], - [5.0, 6.0, 7.0, 8.0]]) + (res,) = exe.run( + feed={'x': self.data_x, 'index': self.data_index}, + fetch_list=[z.name], + return_numpy=False, + ) + expect_out = np.array( + [[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], [5.0, 6.0, 7.0, 8.0]] + ) np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) def test_dygraph_api(self): @@ -138,8 +137,9 @@ class TestIndexSelectAPI(unittest.TestCase): index = fluid.dygraph.to_variable(self.data_index) z = paddle.index_select(x, index) np_z = z.numpy() - expect_out = np.array([[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], - [5.0, 6.0, 7.0, 8.0]]) + expect_out = np.array( + [[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], [5.0, 6.0, 7.0, 8.0]] + ) np.testing.assert_allclose(expect_out, np_z, rtol=1e-05) # case 2: @@ -148,8 +148,9 @@ class TestIndexSelectAPI(unittest.TestCase): index = fluid.dygraph.to_variable(self.data_index) z = paddle.index_select(x, index, axis=1) np_z = z.numpy() - expect_out = np.array([[1.0, 2.0, 2.0], [5.0, 6.0, 6.0], - [9.0, 10.0, 10.0]]) + expect_out = np.array( + [[1.0, 2.0, 2.0], [5.0, 6.0, 6.0], [9.0, 10.0, 10.0]] + ) np.testing.assert_allclose(expect_out, np_z, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_infer_no_need_buffer_slots.py b/python/paddle/fluid/tests/unittests/test_infer_no_need_buffer_slots.py index 51f6cd53ca37654e850f43e8587b351a4576742c..659aa36c334d3734fdf235eb69e372d4263c55f4 100644 --- a/python/paddle/fluid/tests/unittests/test_infer_no_need_buffer_slots.py +++ b/python/paddle/fluid/tests/unittests/test_infer_no_need_buffer_slots.py @@ -20,12 +20,17 @@ import paddle.fluid.core as core class TestInferNoNeedBufferSlots(unittest.TestCase): - def net(self): - x1 = fluid.default_main_program().global_block().create_var( - dtype="float32", shape=[1], lod_level=0, name="x1") - x2 = fluid.default_main_program().global_block().create_var( - dtype="float32", shape=[1], lod_level=0, name="x2") + x1 = ( + fluid.default_main_program() + .global_block() + .create_var(dtype="float32", shape=[1], lod_level=0, name="x1") + ) + x2 = ( + fluid.default_main_program() + .global_block() + .create_var(dtype="float32", shape=[1], lod_level=0, name="x2") + ) x = fluid.layers.elementwise_add(x1, x2) return x @@ -52,18 +57,27 @@ class TestInferNoNeedBufferSlots(unittest.TestCase): if idx == 0: # elementwise_add op self.assertEqual( - core.infer_no_need_buffer_slots(op.type, inputs, outputs, - attrs), set([])) + core.infer_no_need_buffer_slots( + op.type, inputs, outputs, attrs + ), + set([]), + ) elif idx == 1: # fill constant op self.assertEqual( - core.infer_no_need_buffer_slots(op.type, inputs, outputs, - attrs), set([])) + core.infer_no_need_buffer_slots( + op.type, inputs, outputs, attrs + ), + set([]), + ) else: # elementwise_add_grad op self.assertEqual( - core.infer_no_need_buffer_slots(op.type, inputs, outputs, - attrs), set(['Y', 'X'])) + core.infer_no_need_buffer_slots( + op.type, inputs, outputs, attrs + ), + set(['Y', 'X']), + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_infer_shape.py b/python/paddle/fluid/tests/unittests/test_infer_shape.py index f4c718bf80ad673e40c221b642ef32f7da0a5e6f..df2ba25afbca448425838179b9ba02dde332e910 100644 --- a/python/paddle/fluid/tests/unittests/test_infer_shape.py +++ b/python/paddle/fluid/tests/unittests/test_infer_shape.py @@ -18,7 +18,6 @@ import paddle.fluid.core as core class TestInferShape(unittest.TestCase): - def test_sum_op(self): prog = core.ProgramDesc() self.assertIsNotNone(prog) diff --git a/python/paddle/fluid/tests/unittests/test_inference_api.py b/python/paddle/fluid/tests/unittests/test_inference_api.py index 27ddf815aa0d09fca4083386bfc0dea56bcd75d9..e62b258fefefde7340e3e0bf4d026d20023d7bec 100644 --- a/python/paddle/fluid/tests/unittests/test_inference_api.py +++ b/python/paddle/fluid/tests/unittests/test_inference_api.py @@ -25,37 +25,46 @@ from paddle.inference import get_trt_compile_version, get_trt_runtime_version class TestInferenceApi(unittest.TestCase): - def test_inference_api(self): tensor32 = np.random.randint(10, 20, size=[20, 2]).astype('int32') paddletensor32 = PaddleTensor(tensor32) dtype32 = paddletensor32.dtype self.assertEqual(dtype32, PaddleDType.INT32) - self.assertEqual(paddletensor32.data.tolist('int32'), - tensor32.ravel().tolist()) + self.assertEqual( + paddletensor32.data.tolist('int32'), tensor32.ravel().tolist() + ) paddletensor32.data.reset(tensor32) - self.assertEqual(paddletensor32.as_ndarray().ravel().tolist(), - tensor32.ravel().tolist()) + self.assertEqual( + paddletensor32.as_ndarray().ravel().tolist(), + tensor32.ravel().tolist(), + ) tensor64 = np.random.randint(10, 20, size=[20, 2]).astype('int64') paddletensor64 = PaddleTensor(tensor64) dtype64 = paddletensor64.dtype self.assertEqual(dtype64, PaddleDType.INT64) - self.assertEqual(paddletensor64.data.tolist('int64'), - tensor64.ravel().tolist()) + self.assertEqual( + paddletensor64.data.tolist('int64'), tensor64.ravel().tolist() + ) paddletensor64.data.reset(tensor64) - self.assertEqual(paddletensor64.as_ndarray().ravel().tolist(), - tensor64.ravel().tolist()) + self.assertEqual( + paddletensor64.as_ndarray().ravel().tolist(), + tensor64.ravel().tolist(), + ) tensor_float = np.random.randn(20, 2).astype('float32') paddletensor_float = PaddleTensor(tensor_float) dtype_float = paddletensor_float.dtype self.assertEqual(dtype_float, PaddleDType.FLOAT32) - self.assertEqual(paddletensor_float.data.tolist('float32'), - tensor_float.ravel().tolist()) + self.assertEqual( + paddletensor_float.data.tolist('float32'), + tensor_float.ravel().tolist(), + ) paddletensor_float.data.reset(tensor_float) - self.assertEqual(paddletensor_float.as_ndarray().ravel().tolist(), - tensor_float.ravel().tolist()) + self.assertEqual( + paddletensor_float.as_ndarray().ravel().tolist(), + tensor_float.ravel().tolist(), + ) def get_sample_model(): @@ -66,24 +75,26 @@ def get_sample_model(): startup_program = fluid.Program() with fluid.program_guard(main_program, startup_program): data = fluid.data(name="data", shape=[-1, 6, 64, 64], dtype="float32") - conv_out = fluid.layers.conv2d(input=data, - num_filters=3, - filter_size=3, - groups=1, - padding=0, - bias_attr=False, - act=None) + conv_out = fluid.layers.conv2d( + input=data, + num_filters=3, + filter_size=3, + groups=1, + padding=0, + bias_attr=False, + act=None, + ) exe.run(startup_program) - serialized_program = paddle.static.serialize_program(data, - conv_out, - program=main_program) + serialized_program = paddle.static.serialize_program( + data, conv_out, program=main_program + ) serialized_params = paddle.static.serialize_persistables( - data, conv_out, executor=exe, program=main_program) + data, conv_out, executor=exe, program=main_program + ) return serialized_program, serialized_params class TestInferenceBaseAPI(unittest.TestCase): - def get_config(self, model, params): config = Config() config.set_model_buffer(model, len(model), params, len(params)) diff --git a/python/paddle/fluid/tests/unittests/test_inference_model_io.py b/python/paddle/fluid/tests/unittests/test_inference_model_io.py index f63368dffc89037b59249b7ed8090c89aec9a265..bf9ac53f99c53d8927a0370d37712c10bf46c607 100644 --- a/python/paddle/fluid/tests/unittests/test_inference_model_io.py +++ b/python/paddle/fluid/tests/unittests/test_inference_model_io.py @@ -28,13 +28,16 @@ import paddle.fluid.layers as layers import paddle.fluid.optimizer as optimizer from paddle.fluid.compiler import CompiledProgram from paddle.fluid.framework import Program, program_guard -from paddle.fluid.io import save_inference_model, load_inference_model, save_persistables +from paddle.fluid.io import ( + save_inference_model, + load_inference_model, + save_persistables, +) paddle.enable_static() class InferModel(object): - def __init__(self, list): self.program = list[0] self.feed_var_names = list[1] @@ -42,7 +45,6 @@ class InferModel(object): class TestBook(unittest.TestCase): - def test_fit_line_inference_model(self): root_path = tempfile.TemporaryDirectory() MODEL_DIR = os.path.join(root_path.name, "inference_model") @@ -69,31 +71,36 @@ class TestBook(unittest.TestCase): exe.run(init_program, feed={}, fetch_list=[]) for i in range(100): - tensor_x = np.array([[1, 1], [1, 2], [3, 4], [5, - 2]]).astype("float32") + tensor_x = np.array([[1, 1], [1, 2], [3, 4], [5, 2]]).astype( + "float32" + ) tensor_y = np.array([[-2], [-3], [-7], [-7]]).astype("float32") - exe.run(program, - feed={ - 'x': tensor_x, - 'y': tensor_y - }, - fetch_list=[avg_cost]) + exe.run( + program, + feed={'x': tensor_x, 'y': tensor_y}, + fetch_list=[avg_cost], + ) # Separated model and unified model save_inference_model(MODEL_DIR, ["x", "y"], [avg_cost], exe, program) - save_inference_model(UNI_MODEL_DIR, ["x", "y"], [avg_cost], exe, - program, 'model', 'params') + save_inference_model( + UNI_MODEL_DIR, + ["x", "y"], + [avg_cost], + exe, + program, + 'model', + 'params', + ) main_program = program.clone()._prune_with_input( - feeded_var_names=["x", "y"], targets=[avg_cost]) + feeded_var_names=["x", "y"], targets=[avg_cost] + ) params_str = save_persistables(exe, None, main_program, None) - expected = exe.run(program, - feed={ - 'x': tensor_x, - 'y': tensor_y - }, - fetch_list=[avg_cost])[0] + expected = exe.run( + program, feed={'x': tensor_x, 'y': tensor_y}, fetch_list=[avg_cost] + )[0] importlib.reload(executor) # reload to build a new scope @@ -101,15 +108,18 @@ class TestBook(unittest.TestCase): with open(os.path.join(UNI_MODEL_DIR, 'model'), "rb") as f: model_str = f.read() model_1 = InferModel( - load_inference_model(None, exe, model_str, params_str)) + load_inference_model(None, exe, model_str, params_str) + ) for model in [model_0, model_1]: - outs = exe.run(model.program, - feed={ - model.feed_var_names[0]: tensor_x, - model.feed_var_names[1]: tensor_y - }, - fetch_list=model.fetch_vars) + outs = exe.run( + model.program, + feed={ + model.feed_var_names[0]: tensor_x, + model.feed_var_names[1]: tensor_y, + }, + fetch_list=model.fetch_vars, + ) actual = outs[0] self.assertEqual(model.feed_var_names, ["x", "y"]) @@ -119,12 +129,17 @@ class TestBook(unittest.TestCase): root_path.cleanup() - self.assertRaises(ValueError, fluid.io.load_inference_model, None, exe, - model_str, None) + self.assertRaises( + ValueError, + fluid.io.load_inference_model, + None, + exe, + model_str, + None, + ) class TestSaveInferenceModel(unittest.TestCase): - def test_save_inference_model(self): root_path = tempfile.TemporaryDirectory() MODEL_DIR = os.path.join(root_path.name, "inference_model2") @@ -160,8 +175,9 @@ class TestSaveInferenceModel(unittest.TestCase): y = layers.data(name='y', shape=[1], dtype='int32') predict = fluid.layers.fc(input=x, size=2, act='softmax') acc = fluid.layers.accuracy(input=predict, label=y) - auc_var, batch_auc_var, auc_states = fluid.layers.auc(input=predict, - label=y) + auc_var, batch_auc_var, auc_states = fluid.layers.auc( + input=predict, label=y + ) cost = fluid.layers.cross_entropy(input=predict, label=y) avg_cost = paddle.mean(x=cost) @@ -170,8 +186,9 @@ class TestSaveInferenceModel(unittest.TestCase): exe.run(init_program, feed={}, fetch_list=[]) with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") - save_inference_model(MODEL_DIR, ["x", "y"], [avg_cost], exe, - program) + save_inference_model( + MODEL_DIR, ["x", "y"], [avg_cost], exe, program + ) root_path.cleanup() expected_warn = "please ensure that you have set the auc states to zeros before saving inference model" self.assertTrue(len(w) > 0) @@ -179,7 +196,6 @@ class TestSaveInferenceModel(unittest.TestCase): class TestInstance(unittest.TestCase): - def test_save_inference_model(self): root_path = tempfile.TemporaryDirectory() MODEL_DIR = os.path.join(root_path.name, "inference_model3") @@ -203,16 +219,19 @@ class TestInstance(unittest.TestCase): # will print warning message cp_prog = CompiledProgram(program).with_data_parallel( - loss_name=avg_cost.name) + loss_name=avg_cost.name + ) save_inference_model(MODEL_DIR, ["x", "y"], [avg_cost], exe, cp_prog) - self.assertRaises(TypeError, save_inference_model, - [MODEL_DIR, ["x", "y"], [avg_cost], [], cp_prog]) + self.assertRaises( + TypeError, + save_inference_model, + [MODEL_DIR, ["x", "y"], [avg_cost], [], cp_prog], + ) root_path.cleanup() class TestSaveInferenceModelNew(unittest.TestCase): - def test_save_and_load_inference_model(self): root_path = tempfile.TemporaryDirectory() MODEL_DIR = os.path.join(root_path.name, "inference_model5") @@ -239,86 +258,143 @@ class TestSaveInferenceModelNew(unittest.TestCase): tensor_x = np.array([[1, 1], [1, 2], [5, 2]]).astype("float32") tensor_y = np.array([[-2], [-3], [-7]]).astype("float32") for i in range(3): - exe.run(program, - feed={ - 'x': tensor_x, - 'y': tensor_y - }, - fetch_list=[avg_cost]) - - self.assertRaises(ValueError, paddle.static.save_inference_model, None, - ['x', 'y'], [avg_cost], exe) - self.assertRaises(ValueError, paddle.static.save_inference_model, - MODEL_DIR + "/", [x, y], [avg_cost], exe) - self.assertRaises(ValueError, paddle.static.save_inference_model, - MODEL_DIR, ['x', 'y'], [avg_cost], exe) - self.assertRaises(ValueError, paddle.static.save_inference_model, - MODEL_DIR, 'x', [avg_cost], exe) - self.assertRaises(ValueError, paddle.static.save_inference_model, - MODEL_DIR, [x, y], ['avg_cost'], exe) - self.assertRaises(ValueError, paddle.static.save_inference_model, - MODEL_DIR, [x, y], 'avg_cost', exe) + exe.run( + program, + feed={'x': tensor_x, 'y': tensor_y}, + fetch_list=[avg_cost], + ) + + self.assertRaises( + ValueError, + paddle.static.save_inference_model, + None, + ['x', 'y'], + [avg_cost], + exe, + ) + self.assertRaises( + ValueError, + paddle.static.save_inference_model, + MODEL_DIR + "/", + [x, y], + [avg_cost], + exe, + ) + self.assertRaises( + ValueError, + paddle.static.save_inference_model, + MODEL_DIR, + ['x', 'y'], + [avg_cost], + exe, + ) + self.assertRaises( + ValueError, + paddle.static.save_inference_model, + MODEL_DIR, + 'x', + [avg_cost], + exe, + ) + self.assertRaises( + ValueError, + paddle.static.save_inference_model, + MODEL_DIR, + [x, y], + ['avg_cost'], + exe, + ) + self.assertRaises( + ValueError, + paddle.static.save_inference_model, + MODEL_DIR, + [x, y], + 'avg_cost', + exe, + ) model_path = MODEL_DIR + "_isdir.pdmodel" os.makedirs(model_path) - self.assertRaises(ValueError, paddle.static.save_inference_model, - MODEL_DIR + "_isdir", [x, y], [avg_cost], exe) + self.assertRaises( + ValueError, + paddle.static.save_inference_model, + MODEL_DIR + "_isdir", + [x, y], + [avg_cost], + exe, + ) os.rmdir(model_path) params_path = MODEL_DIR + "_isdir.pdmodel" os.makedirs(params_path) - self.assertRaises(ValueError, paddle.static.save_inference_model, - MODEL_DIR + "_isdir", [x, y], [avg_cost], exe) + self.assertRaises( + ValueError, + paddle.static.save_inference_model, + MODEL_DIR + "_isdir", + [x, y], + [avg_cost], + exe, + ) os.rmdir(params_path) - paddle.static.io.save_inference_model(MODEL_DIR, [x, y], [avg_cost], - exe) + paddle.static.io.save_inference_model( + MODEL_DIR, [x, y], [avg_cost], exe + ) self.assertTrue(os.path.exists(MODEL_DIR + ".pdmodel")) self.assertTrue(os.path.exists(MODEL_DIR + ".pdiparams")) - expected = exe.run(program, - feed={ - 'x': tensor_x, - 'y': tensor_y - }, - fetch_list=[avg_cost])[0] + expected = exe.run( + program, feed={'x': tensor_x, 'y': tensor_y}, fetch_list=[avg_cost] + )[0] importlib.reload(executor) # reload to build a new scope - self.assertRaises(ValueError, paddle.static.load_inference_model, None, - exe) - self.assertRaises(ValueError, paddle.static.load_inference_model, - MODEL_DIR + "/", exe) - self.assertRaises(ValueError, paddle.static.load_inference_model, - [MODEL_DIR], exe) - self.assertRaises(ValueError, - paddle.static.load_inference_model, - MODEL_DIR, - exe, - pserver_endpoints=None) - self.assertRaises(ValueError, - paddle.static.load_inference_model, - MODEL_DIR, - exe, - unsupported_param=None) - self.assertRaises((TypeError, ValueError), - paddle.static.load_inference_model, - None, - exe, - model_filename="illegal", - params_filename="illegal") - - model = InferModel(paddle.static.io.load_inference_model( - MODEL_DIR, exe)) + self.assertRaises( + ValueError, paddle.static.load_inference_model, None, exe + ) + self.assertRaises( + ValueError, paddle.static.load_inference_model, MODEL_DIR + "/", exe + ) + self.assertRaises( + ValueError, paddle.static.load_inference_model, [MODEL_DIR], exe + ) + self.assertRaises( + ValueError, + paddle.static.load_inference_model, + MODEL_DIR, + exe, + pserver_endpoints=None, + ) + self.assertRaises( + ValueError, + paddle.static.load_inference_model, + MODEL_DIR, + exe, + unsupported_param=None, + ) + self.assertRaises( + (TypeError, ValueError), + paddle.static.load_inference_model, + None, + exe, + model_filename="illegal", + params_filename="illegal", + ) + + model = InferModel( + paddle.static.io.load_inference_model(MODEL_DIR, exe) + ) root_path.cleanup() - outs = exe.run(model.program, - feed={ - model.feed_var_names[0]: tensor_x, - model.feed_var_names[1]: tensor_y - }, - fetch_list=model.fetch_vars) + outs = exe.run( + model.program, + feed={ + model.feed_var_names[0]: tensor_x, + model.feed_var_names[1]: tensor_y, + }, + fetch_list=model.fetch_vars, + ) actual = outs[0] self.assertEqual(model.feed_var_names, ["x", "y"]) @@ -359,12 +435,11 @@ class TestSaveInferenceModelNew(unittest.TestCase): tensor_x = np.array([[1, 1], [1, 2], [5, 2]]).astype("float32") tensor_y = np.array([[-2], [-3], [-7]]).astype("float32") for i in range(3): - exe.run(program, - feed={ - 'x': tensor_x, - 'y': tensor_y - }, - fetch_list=[avg_cost]) + exe.run( + program, + feed={'x': tensor_x, 'y': tensor_y}, + fetch_list=[avg_cost], + ) # test if return type of serialize_program is bytes res1 = paddle.static.io.serialize_program([x, y], [avg_cost]) @@ -375,8 +450,13 @@ class TestSaveInferenceModelNew(unittest.TestCase): # test if variables in program is empty res = paddle.static.io._serialize_persistables(Program(), None) self.assertEqual(res, None) - self.assertRaises(TypeError, paddle.static.io.deserialize_persistables, - None, None, None) + self.assertRaises( + TypeError, + paddle.static.io.deserialize_persistables, + None, + None, + None, + ) def test_normalize_program(self): init_program = fluid.default_startup_program() @@ -402,34 +482,40 @@ class TestSaveInferenceModelNew(unittest.TestCase): tensor_x = np.array([[1, 1], [1, 2], [5, 2]]).astype("float32") tensor_y = np.array([[-2], [-3], [-7]]).astype("float32") for i in range(3): - exe.run(program, - feed={ - 'x': tensor_x, - 'y': tensor_y - }, - fetch_list=[avg_cost]) + exe.run( + program, + feed={'x': tensor_x, 'y': tensor_y}, + fetch_list=[avg_cost], + ) # test if return type of serialize_program is bytes res = paddle.static.normalize_program(program, [x, y], [avg_cost]) self.assertTrue(isinstance(res, Program)) # test program type - self.assertRaises(TypeError, paddle.static.normalize_program, None, - [x, y], [avg_cost]) + self.assertRaises( + TypeError, paddle.static.normalize_program, None, [x, y], [avg_cost] + ) # test feed_vars type - self.assertRaises(TypeError, paddle.static.normalize_program, program, - 'x', [avg_cost]) + self.assertRaises( + TypeError, paddle.static.normalize_program, program, 'x', [avg_cost] + ) # test fetch_vars type - self.assertRaises(TypeError, paddle.static.normalize_program, program, - [x, y], 'avg_cost') + self.assertRaises( + TypeError, + paddle.static.normalize_program, + program, + [x, y], + 'avg_cost', + ) class TestLoadInferenceModelError(unittest.TestCase): - def test_load_model_not_exist(self): place = core.CPUPlace() exe = executor.Executor(place) - self.assertRaises(ValueError, load_inference_model, - './test_not_exist_dir', exe) + self.assertRaises( + ValueError, load_inference_model, './test_not_exist_dir', exe + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_initializer.py b/python/paddle/fluid/tests/unittests/test_initializer.py index 24a384b3aa0c730132778503e0bfca3ae87be833..4454d7a9bb8d5dd77de14403a331c5d096fcaf85 100644 --- a/python/paddle/fluid/tests/unittests/test_initializer.py +++ b/python/paddle/fluid/tests/unittests/test_initializer.py @@ -27,9 +27,11 @@ DELTA = 0.00001 def check_cast_op(op): - return op.type == 'cast' and \ - op.attr('in_dtype') == VarDesc.VarType.FP32 and \ - op.attr('out_dtype') in [VarDesc.VarType.FP16, VarDesc.VarType.BF16] + return ( + op.type == 'cast' + and op.attr('in_dtype') == VarDesc.VarType.FP32 + and op.attr('out_dtype') in [VarDesc.VarType.FP16, VarDesc.VarType.BF16] + ) def output_hist(out): @@ -41,21 +43,21 @@ def output_hist(out): class TestConstantInitializer(unittest.TestCase): - def test_calculate_gain(self): self.assertEqual(paddle.nn.initializer.calculate_gain('sigmoid'), 1) self.assertEqual(paddle.nn.initializer.calculate_gain('linear'), 1) self.assertEqual(paddle.nn.initializer.calculate_gain('conv2d'), 1) self.assertEqual(paddle.nn.initializer.calculate_gain('tanh'), 5.0 / 3) - self.assertEqual(paddle.nn.initializer.calculate_gain('relu'), - math.sqrt(2.0)) - self.assertEqual(paddle.nn.initializer.calculate_gain('leaky_relu', 1), - 1) + self.assertEqual( + paddle.nn.initializer.calculate_gain('relu'), math.sqrt(2.0) + ) + self.assertEqual( + paddle.nn.initializer.calculate_gain('leaky_relu', 1), 1 + ) self.assertEqual(paddle.nn.initializer.calculate_gain('selu'), 3.0 / 4) def test_constant_initializer_default_value(self, dtype="float32"): - """Test the constant initializer with default value - """ + """Test the constant initializer with default value""" program = framework.Program() block = program.global_block() for _ in range(2): @@ -64,7 +66,8 @@ class TestConstantInitializer(unittest.TestCase): shape=[5, 10], lod_level=0, name="param", - initializer=initializer.ConstantInitializer()) + initializer=initializer.ConstantInitializer(), + ) num_ops = 1 self.assertEqual(len(block.ops), num_ops) init_op = block.ops[0] @@ -73,8 +76,7 @@ class TestConstantInitializer(unittest.TestCase): return block def test_constant_initializer(self, dtype="float32"): - """Test constant initializer with supplied value - """ + """Test constant initializer with supplied value""" program = framework.Program() block = program.global_block() for _ in range(2): @@ -83,7 +85,8 @@ class TestConstantInitializer(unittest.TestCase): shape=[5, 10], lod_level=0, name="param", - initializer=initializer.ConstantInitializer(2.3)) + initializer=initializer.ConstantInitializer(2.3), + ) num_ops = 1 self.assertEqual(len(block.ops), num_ops) init_op = block.ops[0] @@ -92,32 +95,31 @@ class TestConstantInitializer(unittest.TestCase): return block def test_constant_initializer_fp16(self): - """Test constant initializer with float16 - """ + """Test constant initializer with float16""" self.test_constant_initializer_default_value("float16") self.test_constant_initializer("float16") def test_constant_initializer_bf16(self): """Test constant initializer with bfloat16 - No cast operator has been added here + No cast operator has been added here """ self.test_constant_initializer_default_value("uint16") self.test_constant_initializer("uint16") class TestUniformInitializer(unittest.TestCase): - def test_uniform_initializer_default_value(self, dtype="float32"): - """Test the uniform initializer with default value - """ + """Test the uniform initializer with default value""" program = framework.Program() block = program.global_block() for _ in range(2): - block.create_parameter(dtype=dtype, - shape=[5, 10], - lod_level=0, - name="param", - initializer=initializer.UniformInitializer()) + block.create_parameter( + dtype=dtype, + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.UniformInitializer(), + ) num_ops = 2 if dtype == "float16" else 1 self.assertEqual(len(block.ops), num_ops) init_op = block.ops[0] @@ -128,40 +130,42 @@ class TestUniformInitializer(unittest.TestCase): return block def test_uniform_initializer_random_seed(self): - """Test the uniform initializer with manually setting seed - """ + """Test the uniform initializer with manually setting seed""" program = framework.Program() program.random_seed = 123 block = program.global_block() for _ in range(2): - block.create_parameter(dtype="float32", - shape=[5, 10], - lod_level=0, - name="param1", - initializer=initializer.UniformInitializer()) + block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="param1", + initializer=initializer.UniformInitializer(), + ) block.create_parameter( dtype="float32", shape=[5, 10], lod_level=0, name="param2", - initializer=initializer.UniformInitializer(seed=456)) + initializer=initializer.UniformInitializer(seed=456), + ) init_op = block.ops[1] self.assertEqual(init_op.attr("seed"), 456) init_op1 = block.ops[0] self.assertEqual(init_op1.attr("seed"), 123) def test_uniform_initializer(self, dtype="float32"): - """Test uniform initializer with supplied attributes - """ + """Test uniform initializer with supplied attributes""" program = framework.Program() block = program.global_block() for _ in range(2): - block.create_parameter(dtype=dtype, - shape=[5, 10], - lod_level=0, - name="param", - initializer=initializer.UniformInitializer( - -4.2, 3.1, 123)) + block.create_parameter( + dtype=dtype, + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.UniformInitializer(-4.2, 3.1, 123), + ) num_ops = 2 if dtype == "float16" else 1 self.assertEqual(len(block.ops), num_ops) init_op = block.ops[0] @@ -172,17 +176,17 @@ class TestUniformInitializer(unittest.TestCase): return block def test_uniform_initializer_two_op(self, dtype="float32"): - """Test uniform initializer with supplied attributes - """ + """Test uniform initializer with supplied attributes""" program = framework.Program() block = program.global_block() for i in range(2): - block.create_parameter(dtype=dtype, - shape=[5, 10], - lod_level=0, - name="param", - initializer=initializer.UniformInitializer( - -4.2, float(i), 123)) + block.create_parameter( + dtype=dtype, + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.UniformInitializer(-4.2, float(i), 123), + ) num_ops = 2 if dtype == "float16" else 1 self.assertEqual(len(block.ops), num_ops) init_op0 = block.ops[0] @@ -193,8 +197,7 @@ class TestUniformInitializer(unittest.TestCase): return block def test_uniform_initializer_fp16(self): - """Test uniform initializer with float16 - """ + """Test uniform initializer with float16""" block = self.test_uniform_initializer_default_value("float16") self.assertTrue(check_cast_op(block.ops[1])) block = self.test_uniform_initializer(dtype="float16") @@ -204,7 +207,7 @@ class TestUniformInitializer(unittest.TestCase): def test_uniform_initializer_bf16(self): """Test uniform initializer with bfloat16 - No cast operator has been added here + No cast operator has been added here """ block = self.test_uniform_initializer_default_value("uint16") block = self.test_uniform_initializer(dtype="uint16") @@ -212,18 +215,18 @@ class TestUniformInitializer(unittest.TestCase): class TestNormalInitializer(unittest.TestCase): - def test_normal_initializer_default_value(self): - """Test the normal initializer with default value - """ + """Test the normal initializer with default value""" program = framework.Program() block = program.global_block() for _ in range(2): - block.create_parameter(dtype="float32", - shape=[5, 10], - lod_level=0, - name="param", - initializer=initializer.NormalInitializer()) + block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.NormalInitializer(), + ) self.assertEqual(len(block.ops), 1) init_op = block.ops[0] self.assertEqual(init_op.type, 'gaussian_random') @@ -232,17 +235,17 @@ class TestNormalInitializer(unittest.TestCase): self.assertEqual(init_op.attr('seed'), 0) def test_normal_initializer(self, dtype="float32"): - """Test normal initializer with supplied attributes - """ + """Test normal initializer with supplied attributes""" program = framework.Program() block = program.global_block() for _ in range(2): - block.create_parameter(dtype=dtype, - shape=[5, 10], - lod_level=0, - name="param", - initializer=initializer.NormalInitializer( - 2.3, 1.9, 123)) + block.create_parameter( + dtype=dtype, + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.NormalInitializer(2.3, 1.9, 123), + ) num_ops = 1 self.assertEqual(len(block.ops), num_ops) init_op = block.ops[0] @@ -253,21 +256,18 @@ class TestNormalInitializer(unittest.TestCase): return block def test_normal_initializer_fp16(self): - """Test normal initializer with float16 - """ + """Test normal initializer with float16""" self.test_normal_initializer("float16") def test_normal_initializer_bf16(self): - """Test normal initializer with bfloat16 - """ + """Test normal initializer with bfloat16""" self.test_normal_initializer("uint16") class TestXavierInitializer(unittest.TestCase): - def test_uniform_xavier_initializer(self): """Test Xavier initializer with uniform distribution on - for matrix multiply. + for matrix multiply. """ program = framework.Program() block = program.global_block() @@ -277,7 +277,8 @@ class TestXavierInitializer(unittest.TestCase): shape=[5, 10], lod_level=0, name="param", - initializer=initializer.XavierInitializer()) + initializer=initializer.XavierInitializer(), + ) self.assertEqual(len(block.ops), 1) init_op = block.ops[0] self.assertEqual(init_op.type, 'uniform_random') @@ -288,7 +289,7 @@ class TestXavierInitializer(unittest.TestCase): def test_uniform_xavier_initializer_conv(self): """Test Xavier initializer with uniform distribution on - for convolutions. + for convolutions. """ program = framework.Program() block = program.global_block() @@ -298,20 +299,22 @@ class TestXavierInitializer(unittest.TestCase): shape=[5, 10, 15, 20], lod_level=0, name="param", - initializer=initializer.XavierInitializer()) + initializer=initializer.XavierInitializer(), + ) self.assertEqual(len(block.ops), 1) init_op = block.ops[0] self.assertEqual(init_op.type, 'uniform_random') receptive_field_size = float(15 * 20) limit = np.sqrt( - 6.0 / ((param.shape[0] + param.shape[1]) * receptive_field_size)) + 6.0 / ((param.shape[0] + param.shape[1]) * receptive_field_size) + ) self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA) self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA) self.assertEqual(init_op.attr('seed'), 0) def test_normal_xavier_initializer(self): """Test Xavier initializer with normal distribution on - for matrix multiply. + for matrix multiply. """ program = framework.Program() block = program.global_block() @@ -321,7 +324,8 @@ class TestXavierInitializer(unittest.TestCase): shape=[5, 10], lod_level=0, name="param", - initializer=initializer.XavierInitializer(uniform=False)) + initializer=initializer.XavierInitializer(uniform=False), + ) self.assertEqual(len(block.ops), 1) init_op = block.ops[0] self.assertEqual(init_op.type, 'gaussian_random') @@ -332,7 +336,7 @@ class TestXavierInitializer(unittest.TestCase): def test_normal_xavier_initializer_conv(self): """Test Xavier initializer with normal distribution on - for convolutions. + for convolutions. """ program = framework.Program() block = program.global_block() @@ -342,36 +346,40 @@ class TestXavierInitializer(unittest.TestCase): shape=[5, 10, 15, 20], lod_level=0, name="param", - initializer=initializer.XavierInitializer(uniform=False)) + initializer=initializer.XavierInitializer(uniform=False), + ) self.assertEqual(len(block.ops), 1) init_op = block.ops[0] self.assertEqual(init_op.type, 'gaussian_random') receptive_field_size = float(15 * 20) std = np.sqrt( - 2.0 / ((param.shape[0] + param.shape[1]) * receptive_field_size)) + 2.0 / ((param.shape[0] + param.shape[1]) * receptive_field_size) + ) self.assertAlmostEqual(init_op.attr('mean'), 0.0, delta=DELTA) self.assertAlmostEqual(init_op.attr('std'), std, delta=DELTA) self.assertEqual(init_op.attr('seed'), 0) - def test_xavier_initializer_supplied_arguments(self, - dtype="float32", - uniform=True): - """Test the Xavier initializer with supplied arguments - """ + def test_xavier_initializer_supplied_arguments( + self, dtype="float32", uniform=True + ): + """Test the Xavier initializer with supplied arguments""" program = framework.Program() block = program.global_block() for _ in range(2): - block.create_parameter(dtype=dtype, - shape=[5, 10], - lod_level=0, - name="param", - initializer=initializer.XavierInitializer( - uniform=uniform, - fan_in=12, - fan_out=23, - seed=134)) - num_ops = 2 if (dtype == "float16" or - (dtype == "uint16" and not uniform)) else 1 + block.create_parameter( + dtype=dtype, + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.XavierInitializer( + uniform=uniform, fan_in=12, fan_out=23, seed=134 + ), + ) + num_ops = ( + 2 + if (dtype == "float16" or (dtype == "uint16" and not uniform)) + else 1 + ) self.assertEqual(len(block.ops), num_ops) init_op = block.ops[0] if uniform: @@ -385,25 +393,24 @@ class TestXavierInitializer(unittest.TestCase): return block def test_xavier_initializer_fp16(self): - """Test the Xavier initializer with float16 - """ + """Test the Xavier initializer with float16""" block = self.test_xavier_initializer_supplied_arguments("float16") def test_xavier_initializer_bf16(self): - """Test the Xavier initializer with bfloat16 - """ + """Test the Xavier initializer with bfloat16""" block_uniform = self.test_xavier_initializer_supplied_arguments( - "uint16") + "uint16" + ) self.assertEqual(len(block_uniform.ops), 1) block_gaussian = self.test_xavier_initializer_supplied_arguments( - "uint16", False) + "uint16", False + ) class TestMSRAInitializer(unittest.TestCase): - def test_uniform_msra_initializer(self): """Test MSRA initializer with uniform distribution on - for matrix multiply. + for matrix multiply. """ program = framework.Program() block = program.global_block() @@ -413,7 +420,8 @@ class TestMSRAInitializer(unittest.TestCase): shape=[5, 10], lod_level=0, name="param", - initializer=initializer.MSRAInitializer()) + initializer=initializer.MSRAInitializer(), + ) self.assertEqual(len(block.ops), 1) init_op = block.ops[0] self.assertEqual(init_op.type, 'uniform_random') @@ -424,7 +432,7 @@ class TestMSRAInitializer(unittest.TestCase): def test_uniform_msra_initializer_conv(self): """Test MSRA initializer with uniform distribution on - for convolutions. + for convolutions. """ program = framework.Program() block = program.global_block() @@ -434,7 +442,8 @@ class TestMSRAInitializer(unittest.TestCase): shape=[5, 10, 15, 20], lod_level=0, name="param", - initializer=initializer.MSRAInitializer()) + initializer=initializer.MSRAInitializer(), + ) self.assertEqual(len(block.ops), 1) init_op = block.ops[0] self.assertEqual(init_op.type, 'uniform_random') @@ -446,7 +455,7 @@ class TestMSRAInitializer(unittest.TestCase): def test_normal_msra_initializer(self): """Test MSRA initializer with normal distribution on - for matrix multiply. + for matrix multiply. """ program = framework.Program() block = program.global_block() @@ -456,7 +465,8 @@ class TestMSRAInitializer(unittest.TestCase): shape=[5, 10], lod_level=0, name="param", - initializer=initializer.MSRAInitializer(uniform=False)) + initializer=initializer.MSRAInitializer(uniform=False), + ) self.assertEqual(len(block.ops), 1) init_op = block.ops[0] self.assertEqual(init_op.type, 'gaussian_random') @@ -467,7 +477,7 @@ class TestMSRAInitializer(unittest.TestCase): def test_normal_msra_initializer_conv(self): """Test MSRA initializer with normal distribution on - for convolutions. + for convolutions. """ program = framework.Program() block = program.global_block() @@ -477,7 +487,8 @@ class TestMSRAInitializer(unittest.TestCase): shape=[5, 10, 15, 20], lod_level=0, name="param", - initializer=initializer.MSRAInitializer(uniform=False)) + initializer=initializer.MSRAInitializer(uniform=False), + ) self.assertEqual(len(block.ops), 1) init_op = block.ops[0] self.assertEqual(init_op.type, 'gaussian_random') @@ -488,17 +499,17 @@ class TestMSRAInitializer(unittest.TestCase): self.assertEqual(init_op.attr('seed'), 0) def test_msra_initializer_supplied_arguments(self, dtype="float32"): - """Test the MSRA initializer with supplied arguments - """ + """Test the MSRA initializer with supplied arguments""" program = framework.Program() block = program.global_block() for _ in range(2): - block.create_parameter(dtype=dtype, - shape=[5, 10], - lod_level=0, - name="param", - initializer=initializer.MSRAInitializer( - fan_in=12, seed=134)) + block.create_parameter( + dtype=dtype, + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.MSRAInitializer(fan_in=12, seed=134), + ) num_ops = 2 if dtype == "float16" else 1 self.assertEqual(len(block.ops), num_ops) init_op = block.ops[0] @@ -510,22 +521,18 @@ class TestMSRAInitializer(unittest.TestCase): return block def test_msra_initializer_fp16(self): - """Test the MSRA initializer with float16 - """ + """Test the MSRA initializer with float16""" block = self.test_msra_initializer_supplied_arguments("float16") self.assertTrue(check_cast_op(block.ops[1])) def test_msra_initializer_bf16(self): - """Test the MSRA initializer with bfloat16 - """ + """Test the MSRA initializer with bfloat16""" block = self.test_msra_initializer_supplied_arguments("uint16") class TestBilinearInitializer(unittest.TestCase): - def test_bilinear_initializer(self, dtype="float32"): - """Test the bilinear initializer with supplied arguments - """ + """Test the bilinear initializer with supplied arguments""" program = framework.Program() block = program.global_block() for _ in range(2): @@ -534,7 +541,8 @@ class TestBilinearInitializer(unittest.TestCase): shape=[8, 1, 3, 3], lod_level=0, name="param", - initializer=initializer.BilinearInitializer()) + initializer=initializer.BilinearInitializer(), + ) num_ops = 2 if dtype in ["float16", "uint16", "float64"] else 1 self.assertEqual(len(block.ops), num_ops) init_op = block.ops[0] @@ -545,14 +553,12 @@ class TestBilinearInitializer(unittest.TestCase): self.test_bilinear_initializer(dtype='float64') def test_bilinear_initializer_fp16(self): - """Test the bilinear initializer with supplied arguments - """ + """Test the bilinear initializer with supplied arguments""" block = self.test_bilinear_initializer("float16") self.assertTrue(check_cast_op(block.ops[1])) def test_bilinear_initializer_bf16(self): - """Test the bilinear initializer with supplied arguments - """ + """Test the bilinear initializer with supplied arguments""" block = self.test_bilinear_initializer("uint16") self.assertTrue(check_cast_op(block.ops[1])) @@ -561,33 +567,37 @@ class TestBilinearInitializer(unittest.TestCase): class TestBilinearInitializerDygraphAPI(unittest.TestCase): - def func_test_case(self): factor = 2 C = 2 B = 8 H = W = 32 - w_attr = paddle.ParamAttr(learning_rate=0., - regularizer=L2Decay(0.), - initializer=initializer.BilinearInitializer()) + w_attr = paddle.ParamAttr( + learning_rate=0.0, + regularizer=L2Decay(0.0), + initializer=initializer.BilinearInitializer(), + ) data = paddle.rand([B, 3, H, W], dtype='float32') - conv_up = paddle.nn.Conv2DTranspose(3, - out_channels=C, - kernel_size=2 * factor - factor % 2, - padding=int( - math.ceil((factor - 1) / 2.)), - stride=factor, - weight_attr=w_attr, - bias_attr=False) + conv_up = paddle.nn.Conv2DTranspose( + 3, + out_channels=C, + kernel_size=2 * factor - factor % 2, + padding=int(math.ceil((factor - 1) / 2.0)), + stride=factor, + weight_attr=w_attr, + bias_attr=False, + ) x = conv_up(data) return x def func_test_case_fp16(self): paddle.set_default_dtype("float16") paddle.seed(1234) - w_attr = paddle.ParamAttr(learning_rate=0., - regularizer=L2Decay(0.), - initializer=initializer.BilinearInitializer()) + w_attr = paddle.ParamAttr( + learning_rate=0.0, + regularizer=L2Decay(0.0), + initializer=initializer.BilinearInitializer(), + ) conv2d = paddle.nn.Conv2D(1, 2, 3, weight_attr=w_attr) paddle.set_default_dtype("float32") return conv2d.weight @@ -610,11 +620,10 @@ class TestBilinearInitializerDygraphAPI(unittest.TestCase): class TestNumpyArrayInitializer(unittest.TestCase): - def test_numpy_array_initializer(self, dtype="float32"): - """Test the numpy array initializer with supplied arguments - """ + """Test the numpy array initializer with supplied arguments""" import numpy + program = framework.Program() block = program.global_block() np_array = numpy.random.random((10000)).astype(dtype) @@ -624,7 +633,8 @@ class TestNumpyArrayInitializer(unittest.TestCase): shape=np_array.shape, lod_level=0, name="param", - initializer=initializer.NumpyArrayInitializer(np_array)) + initializer=initializer.NumpyArrayInitializer(np_array), + ) num_ops = 2 if dtype in ["float16", "uint16"] else 1 self.assertEqual(len(block.ops), num_ops) init_op = block.ops[0] @@ -633,23 +643,19 @@ class TestNumpyArrayInitializer(unittest.TestCase): return block def test_numpy_array_initializer_fp16(self): - """Test the numpy array initializer with float16 - """ + """Test the numpy array initializer with float16""" block = self.test_numpy_array_initializer("float16") self.assertTrue(block.ops[1]) def test_numpy_array_initializer_bf16(self): - """Test the numpy array initializer with bfloat16 - """ + """Test the numpy array initializer with bfloat16""" block = self.test_numpy_array_initializer("uint16") self.assertTrue(block.ops[1]) class TestSetGlobalInitializer(unittest.TestCase): - def test_set_global_weight_initilizer(self): - """Test Set Global Param initilizer with UniformInitializer - """ + """Test Set Global Param initilizer with UniformInitializer""" main_prog = framework.Program() startup_prog = framework.Program() fluid.set_global_initializer(initializer.Uniform(low=-0.5, high=0.5)) @@ -674,13 +680,13 @@ class TestSetGlobalInitializer(unittest.TestCase): fluid.set_global_initializer(None) def test_set_global_bias_initilizer(self): - """Test Set Global Bias initilizer with NormalInitializer - """ + """Test Set Global Bias initilizer with NormalInitializer""" main_prog = framework.Program() startup_prog = framework.Program() - fluid.set_global_initializer(initializer.Uniform(low=-0.5, high=0.5), - bias_init=initializer.Normal(loc=0.0, - scale=2.0)) + fluid.set_global_initializer( + initializer.Uniform(low=-0.5, high=0.5), + bias_init=initializer.Normal(loc=0.0, scale=2.0), + ) with fluid.program_guard(main_prog, startup_prog): x = fluid.data(name="x", shape=[1, 3, 32, 32]) # default initilizer of bias in layers.conv2d is ConstantInitializer @@ -705,7 +711,6 @@ class TestSetGlobalInitializer(unittest.TestCase): class TestUniformInitializerDygraph(unittest.TestCase): - def func_uniform_initializer(self, dtype="float32"): """ In dygraph mode, we can use initializer directly to initialize a tensor. @@ -714,15 +719,16 @@ class TestUniformInitializerDygraph(unittest.TestCase): tensor = paddle.zeros([1024, 1024, 16]) tensor.stop_gradient = False - np.testing.assert_allclose(np.zeros((1024, 1024, 16)), - tensor.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + np.zeros((1024, 1024, 16)), tensor.numpy(), rtol=1e-05 + ) uniform_ = paddle.nn.initializer.Uniform() uniform_(tensor) - self.assertEqual(tensor.stop_gradient, - False) # stop_gradient is not changed + self.assertEqual( + tensor.stop_gradient, False + ) # stop_gradient is not changed hist, prob = output_hist(tensor.numpy()) @@ -737,7 +743,6 @@ class TestUniformInitializerDygraph(unittest.TestCase): class TestXavierInitializerDygraph(unittest.TestCase): - def func_xvarier_initializer(self, dtype="float32"): """ In dygraph mode, we can use initializer directly to initialize a tensor. @@ -747,15 +752,16 @@ class TestXavierInitializerDygraph(unittest.TestCase): tensor = paddle.zeros([1024, 1024, 16]) tensor.stop_gradient = False - xavier_ = paddle.fluid.initializer.XavierInitializer(uniform=False, - fan_in=3, - fan_out=5) + xavier_ = paddle.fluid.initializer.XavierInitializer( + uniform=False, fan_in=3, fan_out=5 + ) xavier_(tensor) hist, _ = output_hist(tensor.numpy()) hist2, _ = output_hist( - np.random.normal(0, np.sqrt(2.0 / (3 + 5)), [1024, 1024, 16])) + np.random.normal(0, np.sqrt(2.0 / (3 + 5)), [1024, 1024, 16]) + ) np.testing.assert_allclose(hist, hist2, rtol=0, atol=0.01) paddle.enable_static() @@ -767,7 +773,6 @@ class TestXavierInitializerDygraph(unittest.TestCase): class TestMSRAInitializerDygraph(unittest.TestCase): - def func_msra_initializer(self, dtype="float32"): """ In dygraph mode, we can use initializer directly to initialize a tensor. @@ -777,14 +782,16 @@ class TestMSRAInitializerDygraph(unittest.TestCase): tensor = paddle.zeros([1024, 1024, 16]) tensor.stop_gradient = False - msra_ = paddle.fluid.initializer.MSRAInitializer(uniform=False, - fan_in=4) + msra_ = paddle.fluid.initializer.MSRAInitializer( + uniform=False, fan_in=4 + ) msra_(tensor) hist, _ = output_hist(tensor.numpy()) hist2, _ = output_hist( - np.random.normal(0, np.sqrt(2.0 / (4)), [1024, 1024, 16])) + np.random.normal(0, np.sqrt(2.0 / (4)), [1024, 1024, 16]) + ) np.testing.assert_allclose(hist, hist2, rtol=0, atol=0.01) paddle.enable_static() @@ -796,7 +803,6 @@ class TestMSRAInitializerDygraph(unittest.TestCase): class TesetconsistencyOfDynamicAndStaticGraph(unittest.TestCase): - def func_order(self): paddle.set_device('cpu') SEED = 123 @@ -805,23 +811,26 @@ class TesetconsistencyOfDynamicAndStaticGraph(unittest.TestCase): learning_rate=1.0, trainable=False, regularizer=None, - initializer=paddle.nn.initializer.TruncatedNormal(mean=0.0, - std=2.0)) + initializer=paddle.nn.initializer.TruncatedNormal( + mean=0.0, std=2.0 + ), + ) bias_attr = paddle.framework.ParamAttr( name="linear_bias", learning_rate=1.0, trainable=False, regularizer=None, - initializer=paddle.nn.initializer.TruncatedNormal(mean=0.0, - std=2.0)) + initializer=paddle.nn.initializer.TruncatedNormal( + mean=0.0, std=2.0 + ), + ) def run_dynamic_graph(): paddle.disable_static() paddle.seed(SEED) - linear = paddle.nn.Linear(1, - 1, - weight_attr=weight_attr, - bias_attr=bias_attr) + linear = paddle.nn.Linear( + 1, 1, weight_attr=weight_attr, bias_attr=bias_attr + ) return linear.weight.numpy(), linear.bias.numpy() paddle.enable_static() @@ -829,12 +838,13 @@ class TesetconsistencyOfDynamicAndStaticGraph(unittest.TestCase): paddle.enable_static() exe = paddle.static.Executor(paddle.CPUPlace()) paddle.seed(SEED) - linear = paddle.nn.Linear(1, - 1, - weight_attr=weight_attr, - bias_attr=bias_attr) - res = exe.run(paddle.static.default_startup_program(), - fetch_list=['linear_weight', 'linear_bias']) + linear = paddle.nn.Linear( + 1, 1, weight_attr=weight_attr, bias_attr=bias_attr + ) + res = exe.run( + paddle.static.default_startup_program(), + fetch_list=['linear_weight', 'linear_bias'], + ) return res[0], res[1] dynamic_res = run_dynamic_graph() @@ -857,7 +867,8 @@ class TestOrthogonalInitializer1(unittest.TestCase): def config(self): self.weight_attr = paddle.ParamAttr( - initializer=paddle.nn.initializer.Orthogonal(gain=3.0)) + initializer=paddle.nn.initializer.Orthogonal(gain=3.0) + ) self.dtype = "float64" self.in_features = 10 self.out_features = 15 @@ -865,10 +876,9 @@ class TestOrthogonalInitializer1(unittest.TestCase): def check_result(self, a, b): np.testing.assert_array_equal(a, b) - np.testing.assert_allclose(np.matmul(a, a.T), - 9 * np.eye(10), - rtol=1e-5, - atol=1e-8) + np.testing.assert_allclose( + np.matmul(a, a.T), 9 * np.eye(10), rtol=1e-5, atol=1e-8 + ) def func_orthogonal(self): self.config() @@ -876,9 +886,9 @@ class TestOrthogonalInitializer1(unittest.TestCase): paddle.disable_static() paddle.seed(2021) - linear = paddle.nn.Linear(self.in_features, - self.out_features, - weight_attr=self.weight_attr) + linear = paddle.nn.Linear( + self.in_features, self.out_features, weight_attr=self.weight_attr + ) res_dygraph = linear.weight.numpy() paddle.enable_static() @@ -886,9 +896,11 @@ class TestOrthogonalInitializer1(unittest.TestCase): start_prog = paddle.static.Program() main_prog = paddle.static.Program() with paddle.static.program_guard(main_prog, start_prog): - linear = paddle.nn.Linear(self.in_features, - self.out_features, - weight_attr=self.weight_attr) + linear = paddle.nn.Linear( + self.in_features, + self.out_features, + weight_attr=self.weight_attr, + ) block = start_prog.global_block() self.assertEqual(len(block.ops), self.num_ops) @@ -919,7 +931,8 @@ class TestOrthogonalInitializer2(TestOrthogonalInitializer1): def config(self): self.weight_attr = paddle.ParamAttr( - initializer=paddle.nn.initializer.Orthogonal(gain=2.0)) + initializer=paddle.nn.initializer.Orthogonal(gain=2.0) + ) self.dtype = "float64" self.in_features = 15 self.out_features = 10 @@ -927,10 +940,9 @@ class TestOrthogonalInitializer2(TestOrthogonalInitializer1): def check_result(self, a, b): np.testing.assert_array_equal(a, b) - np.testing.assert_allclose(np.matmul(a.T, a), - 4 * np.eye(10), - rtol=1e-5, - atol=1e-8) + np.testing.assert_allclose( + np.matmul(a.T, a), 4 * np.eye(10), rtol=1e-5, atol=1e-8 + ) # 2-D Parameter with shape: [10, 10] @@ -941,7 +953,8 @@ class TestOrthogonalInitializer3(TestOrthogonalInitializer1): def config(self): self.weight_attr = paddle.ParamAttr( - initializer=paddle.nn.initializer.Orthogonal()) + initializer=paddle.nn.initializer.Orthogonal() + ) self.dtype = "float32" self.in_features = 10 self.out_features = 10 @@ -949,14 +962,12 @@ class TestOrthogonalInitializer3(TestOrthogonalInitializer1): def check_result(self, a, b): np.testing.assert_array_equal(a, b) - np.testing.assert_allclose(np.matmul(a.T, a), - np.eye(10), - rtol=1e-05, - atol=1e-06) - np.testing.assert_allclose(np.matmul(a, a.T), - np.eye(10), - rtol=1e-05, - atol=1e-06) + np.testing.assert_allclose( + np.matmul(a.T, a), np.eye(10), rtol=1e-05, atol=1e-06 + ) + np.testing.assert_allclose( + np.matmul(a, a.T), np.eye(10), rtol=1e-05, atol=1e-06 + ) def test_error(self): self.config() @@ -972,7 +983,8 @@ class TestOrthogonalInitializer4(unittest.TestCase): def config(self): self.weight_attr = paddle.ParamAttr( - initializer=paddle.nn.initializer.Orthogonal(gain=3.0)) + initializer=paddle.nn.initializer.Orthogonal(gain=3.0) + ) self.dtype = "float64" self.in_features = 4 self.out_features = 6 @@ -981,10 +993,9 @@ class TestOrthogonalInitializer4(unittest.TestCase): def check_result(self, a, b): np.testing.assert_array_equal(a, b) a = a.reshape(6, -1) - np.testing.assert_allclose(np.matmul(a, a.T), - 9 * np.eye(6), - rtol=1e-5, - atol=1e-8) + np.testing.assert_allclose( + np.matmul(a, a.T), 9 * np.eye(6), rtol=1e-5, atol=1e-8 + ) def func_orthogonal(self): self.config() @@ -992,10 +1003,12 @@ class TestOrthogonalInitializer4(unittest.TestCase): paddle.disable_static() paddle.seed(2021) - conv2d = paddle.nn.Conv2D(self.in_features, - self.out_features, - self.kernel_size, - weight_attr=self.weight_attr) + conv2d = paddle.nn.Conv2D( + self.in_features, + self.out_features, + self.kernel_size, + weight_attr=self.weight_attr, + ) res_dygraph = conv2d.weight.numpy() paddle.enable_static() @@ -1003,13 +1016,17 @@ class TestOrthogonalInitializer4(unittest.TestCase): start_prog = paddle.static.Program() main_prog = paddle.static.Program() with paddle.static.program_guard(main_prog, start_prog): - conv2d = paddle.nn.Conv2D(self.in_features, - self.out_features, - self.kernel_size, - weight_attr=self.weight_attr) + conv2d = paddle.nn.Conv2D( + self.in_features, + self.out_features, + self.kernel_size, + weight_attr=self.weight_attr, + ) exe = paddle.static.Executor() - res_static = exe.run(paddle.static.default_startup_program(), - fetch_list=[conv2d.weight])[0] + res_static = exe.run( + paddle.static.default_startup_program(), + fetch_list=[conv2d.weight], + )[0] self.check_result(res_dygraph, res_static) def test_orthogonal(self): @@ -1026,7 +1043,8 @@ class TestOrthogonalInitializer5(TestOrthogonalInitializer4): def config(self): self.weight_attr = paddle.ParamAttr( - initializer=paddle.nn.initializer.Orthogonal(gain=2.0)) + initializer=paddle.nn.initializer.Orthogonal(gain=2.0) + ) self.dtype = "float64" self.in_features = 4 self.out_features = 50 @@ -1035,10 +1053,9 @@ class TestOrthogonalInitializer5(TestOrthogonalInitializer4): def check_result(self, a, b): np.testing.assert_array_equal(a, b) a = a.reshape(50, -1) - np.testing.assert_allclose(np.matmul(a.T, a), - 4 * np.eye(36), - rtol=1e-5, - atol=1e-8) + np.testing.assert_allclose( + np.matmul(a.T, a), 4 * np.eye(36), rtol=1e-5, atol=1e-8 + ) # 4-D Parameter with shape: [36, 4, 3, 3] @@ -1049,7 +1066,8 @@ class TestOrthogonalInitializer6(TestOrthogonalInitializer4): def config(self): self.weight_attr = paddle.ParamAttr( - initializer=paddle.nn.initializer.Orthogonal()) + initializer=paddle.nn.initializer.Orthogonal() + ) self.dtype = "float32" self.in_features = 4 self.out_features = 36 @@ -1058,29 +1076,29 @@ class TestOrthogonalInitializer6(TestOrthogonalInitializer4): def check_result(self, a, b): np.testing.assert_array_equal(a, b) a = a.reshape(36, -1) - np.testing.assert_allclose(np.matmul(a.T, a), - np.eye(36), - rtol=1e-05, - atol=1e-06) - np.testing.assert_allclose(np.matmul(a, a.T), - np.eye(36), - rtol=1e-05, - atol=1e-06) + np.testing.assert_allclose( + np.matmul(a.T, a), np.eye(36), rtol=1e-05, atol=1e-06 + ) + np.testing.assert_allclose( + np.matmul(a, a.T), np.eye(36), rtol=1e-05, atol=1e-06 + ) # initialize Conv1D weight class TestDiracInitializer1(unittest.TestCase): - def config(self): self.weight_attr = paddle.ParamAttr( - initializer=paddle.nn.initializer.Dirac()) + initializer=paddle.nn.initializer.Dirac() + ) self.dtype = "float64" self.in_channels = 3 self.out_channels = 2 self.kernel_size = 3 self.input_shape = [8, self.in_channels, 10] self.conv_layer = paddle.nn.Conv1D - self.num_ops = 8 #fill_constant*2, reshape*2, assign_value*2, scatter, cast + self.num_ops = ( + 8 # fill_constant*2, reshape*2, assign_value*2, scatter, cast + ) def check_result(self, w_dygraph, w_static, conv_in, conv_out): np.testing.assert_array_equal(w_dygraph, w_static) @@ -1091,10 +1109,12 @@ class TestDiracInitializer1(unittest.TestCase): paddle.set_default_dtype(self.dtype) paddle.disable_static() - conv = self.conv_layer(self.in_channels, - self.out_channels, - self.kernel_size, - weight_attr=self.weight_attr) + conv = self.conv_layer( + self.in_channels, + self.out_channels, + self.kernel_size, + weight_attr=self.weight_attr, + ) weight_dygraph = conv.weight.numpy() paddle.enable_static() @@ -1102,10 +1122,12 @@ class TestDiracInitializer1(unittest.TestCase): main_prog = paddle.static.Program() with paddle.static.program_guard(main_prog, start_prog): inp = paddle.rand(self.input_shape) - conv = self.conv_layer(self.in_channels, - self.out_channels, - self.kernel_size, - weight_attr=self.weight_attr) + conv = self.conv_layer( + self.in_channels, + self.out_channels, + self.kernel_size, + weight_attr=self.weight_attr, + ) output = conv(inp) block = start_prog.global_block() @@ -1124,8 +1146,9 @@ class TestDiracInitializer1(unittest.TestCase): conv_output = fetch[1] weight_static = fetch[2] - self.check_result(weight_dygraph, weight_static, conv_input, - conv_output) + self.check_result( + weight_dygraph, weight_static, conv_input, conv_output + ) def test_dirac(self): with framework._test_eager_guard(): @@ -1135,10 +1158,10 @@ class TestDiracInitializer1(unittest.TestCase): # initialize Conv2D weight class TestDiracInitializer2(TestDiracInitializer1): - def config(self): self.weight_attr = paddle.ParamAttr( - initializer=paddle.nn.initializer.Dirac(groups=1)) + initializer=paddle.nn.initializer.Dirac(groups=1) + ) self.dtype = "float64" self.in_channels = 4 self.out_channels = 8 @@ -1149,18 +1172,20 @@ class TestDiracInitializer2(TestDiracInitializer1): def check_result(self, w_dygraph, w_static, conv_in, conv_out): np.testing.assert_array_equal(w_dygraph, w_static) - np.testing.assert_array_equal(conv_out[:, 0:4, :, :], conv_in[:, :, 1:9, - 1:9]) - np.testing.assert_array_equal(conv_out[:, 4:8, :, :], - np.zeros([8, 4, 8, 8])) + np.testing.assert_array_equal( + conv_out[:, 0:4, :, :], conv_in[:, :, 1:9, 1:9] + ) + np.testing.assert_array_equal( + conv_out[:, 4:8, :, :], np.zeros([8, 4, 8, 8]) + ) # initialize Conv3D weight class TestDiracInitializer3(TestDiracInitializer1): - def config(self): self.weight_attr = paddle.ParamAttr( - initializer=paddle.nn.initializer.Dirac(groups=2)) + initializer=paddle.nn.initializer.Dirac(groups=2) + ) self.dtype = "float32" self.in_channels = 5 self.out_channels = 10 @@ -1171,10 +1196,12 @@ class TestDiracInitializer3(TestDiracInitializer1): def check_result(self, w_dygraph, w_static, conv_in, conv_out): np.testing.assert_array_equal(w_dygraph, w_static) - np.testing.assert_array_equal(conv_out[:, 0:5, :, :, :], - conv_in[:, :, 1:9, 1:9, 1:9]) - np.testing.assert_array_equal(conv_out[:, 5:10, :, :, :], - conv_in[:, :, 1:9, 1:9, 1:9]) + np.testing.assert_array_equal( + conv_out[:, 0:5, :, :, :], conv_in[:, :, 1:9, 1:9, 1:9] + ) + np.testing.assert_array_equal( + conv_out[:, 5:10, :, :, :], conv_in[:, :, 1:9, 1:9, 1:9] + ) def test_error(self): self.config() diff --git a/python/paddle/fluid/tests/unittests/test_initializer_nn.py b/python/paddle/fluid/tests/unittests/test_initializer_nn.py index 54063f74a6e845c16d7dfd43a8edde0330376a2b..c9b81720ba94b138d36adc99a4e634fd7efd9e2b 100644 --- a/python/paddle/fluid/tests/unittests/test_initializer_nn.py +++ b/python/paddle/fluid/tests/unittests/test_initializer_nn.py @@ -32,26 +32,28 @@ def get_uniform_min_and_max(weight): def check_cast_op(op): - return op.type == 'cast' and \ - op.attr('in_dtype') == VarDesc.VarType.FP32 and \ - op.attr('out_dtype') in [VarDesc.VarType.FP16, VarDesc.VarType.BF16] + return ( + op.type == 'cast' + and op.attr('in_dtype') == VarDesc.VarType.FP32 + and op.attr('out_dtype') in [VarDesc.VarType.FP16, VarDesc.VarType.BF16] + ) class TestConstantInitializer(unittest.TestCase): - - def static_test_constant_initializer_common(self, - init_inst, - dtype="float32", - value_target=0.0): + def static_test_constant_initializer_common( + self, init_inst, dtype="float32", value_target=0.0 + ): paddle.enable_static() program = framework.Program() block = program.global_block() for _ in range(2): - block.create_parameter(dtype=dtype, - shape=[5, 10], - lod_level=0, - name="param", - initializer=init_inst) + block.create_parameter( + dtype=dtype, + shape=[5, 10], + lod_level=0, + name="param", + initializer=init_inst, + ) num_ops = 1 self.assertEqual(len(block.ops), num_ops) init_op = block.ops[0] @@ -61,46 +63,45 @@ class TestConstantInitializer(unittest.TestCase): return block def test_constant_initializer_default_value_static(self, dtype="float32"): - """Test the constant initializer with default value in static graph - """ + """Test the constant initializer with default value in static graph""" block = self.static_test_constant_initializer_common( - init_inst=initializer.Constant(), dtype=dtype, value_target=0.0) + init_inst=initializer.Constant(), dtype=dtype, value_target=0.0 + ) return block def test_constant_initializer_default_value_dygraph(self, dtype="float32"): - """Test constant initializer with supplied value in dygraph - """ + """Test constant initializer with supplied value in dygraph""" with fluid.dygraph.guard(): linear = nn.Linear(2, 4, weight_attr=nn.initializer.Constant()) mat_target = np.ones((2, 4), dtype=dtype) * 0.0 mat_linear = linear.weight.numpy() mismatch = np.sum( - (mat_target - mat_linear) * (mat_target - mat_linear)) + (mat_target - mat_linear) * (mat_target - mat_linear) + ) self.assertAlmostEqual(mismatch, 0.0, delta=DELTA) def test_constant_initializer_static(self, dtype="float32"): - """Test constant initializer with supplied value in static graph - """ + """Test constant initializer with supplied value in static graph""" block = self.static_test_constant_initializer_common( - init_inst=initializer.Constant(2.3), dtype=dtype, value_target=2.3) + init_inst=initializer.Constant(2.3), dtype=dtype, value_target=2.3 + ) return block def test_constant_initializer_dygraph(self, dtype="float32"): - """Test constant initializer with supplied value in dygraph - """ + """Test constant initializer with supplied value in dygraph""" with fluid.dygraph.guard(): - linear = nn.Linear(2, - 4, - weight_attr=nn.initializer.Constant(value=2.0)) + linear = nn.Linear( + 2, 4, weight_attr=nn.initializer.Constant(value=2.0) + ) mat_target = np.ones((2, 4), dtype=dtype) * 2.0 mat_linear = linear.weight.numpy() mismatch = np.sum( - (mat_target - mat_linear) * (mat_target - mat_linear)) + (mat_target - mat_linear) * (mat_target - mat_linear) + ) self.assertAlmostEqual(mismatch, 0.0, delta=DELTA) def test_constant_initializer_fp16(self): - """Test constant initializer with float16 - """ + """Test constant initializer with float16""" block = self.test_constant_initializer_default_value_static("float16") block = self.test_constant_initializer_static("float16") self.test_constant_initializer_default_value_dygraph("float16") @@ -108,29 +109,30 @@ class TestConstantInitializer(unittest.TestCase): def test_constant_initializer_bf16(self): """Test constant initializer with bfloat16 - No cast operator has been added here + No cast operator has been added here """ - self.test_constant_initializer_default_value_static("uint16") #bfloat16 - self.test_constant_initializer_static("uint16") #bfloat16 + self.test_constant_initializer_default_value_static( + "uint16" + ) # bfloat16 + self.test_constant_initializer_static("uint16") # bfloat16 class TestKaimingInitializer(unittest.TestCase): - - def static_test_kaiming_initializer_common(self, - init_inst, - dtype="float32", - uniform=False, - is_conv=False): + def static_test_kaiming_initializer_common( + self, init_inst, dtype="float32", uniform=False, is_conv=False + ): paddle.enable_static() program = framework.Program() block = program.global_block() shape_mat = [5, 10, 15, 20] if is_conv else [5, 10] for _ in range(2): - param = block.create_parameter(dtype="float32", - shape=shape_mat, - lod_level=0, - name="param", - initializer=init_inst) + param = block.create_parameter( + dtype="float32", + shape=shape_mat, + lod_level=0, + name="param", + initializer=init_inst, + ) self.assertEqual(len(block.ops), 1) init_op = block.ops[0] if uniform: @@ -153,75 +155,76 @@ class TestKaimingInitializer(unittest.TestCase): self.assertAlmostEqual(init_op.attr('std'), std, delta=DELTA) paddle.disable_static() - def dygraph_test_kaiming_initializer_common(self, - init_inst, - dtype="float32", - uniform=False): + def dygraph_test_kaiming_initializer_common( + self, init_inst, dtype="float32", uniform=False + ): linear = nn.Linear(40, 20, weight_attr=init_inst) def test_kaiming_dygraph(self): self.dygraph_test_kaiming_initializer_common( init_inst=initializer.KaimingUniform(), dtype="float32", - uniform=True) + uniform=True, + ) self.dygraph_test_kaiming_initializer_common( init_inst=initializer.KaimingNormal(), dtype="float32", - uniform=False) + uniform=False, + ) def test_kaiming_uniform_initializer_static(self): - """Test Kaiming unorm initializer for matrix multiply. - """ + """Test Kaiming unorm initializer for matrix multiply.""" self.static_test_kaiming_initializer_common( init_inst=initializer.KaimingUniform(), dtype="float32", uniform=True, - is_conv=False) + is_conv=False, + ) def test_kaiming_uniform_initializer_conv_static(self): - """Test Kaiming unorm initializer for convolutions. - """ + """Test Kaiming unorm initializer for convolutions.""" self.static_test_kaiming_initializer_common( init_inst=initializer.KaimingUniform(), dtype="float32", uniform=True, - is_conv=True) + is_conv=True, + ) def test_kaiming_normal_initializer_static(self): - """Test Kaiming normal initializer for matrix multiply. - """ + """Test Kaiming normal initializer for matrix multiply.""" self.static_test_kaiming_initializer_common( init_inst=initializer.KaimingNormal(), dtype="float32", uniform=False, - is_conv=False) + is_conv=False, + ) def test_kaiming_normal_initializer_conv_static(self): - """Test Kaiming normal initializer for convolutions. - """ + """Test Kaiming normal initializer for convolutions.""" self.static_test_kaiming_initializer_common( init_inst=initializer.KaimingNormal(), dtype="float32", uniform=False, - is_conv=True) + is_conv=True, + ) class TestUniform(unittest.TestCase): - def test_uniform_common(self, dtype="float32", seed=0): - """Test the uniform initializer with default value - """ + """Test the uniform initializer with default value""" paddle.enable_static() program = framework.Program() program.random_seed = seed block = program.global_block() for _ in range(2): - block.create_parameter(dtype=dtype, - shape=[5, 10], - lod_level=0, - name="param", - initializer=initializer.Uniform()) + block.create_parameter( + dtype=dtype, + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.Uniform(), + ) num_ops = 2 if dtype == "float16" else 1 self.assertEqual(len(block.ops), num_ops) init_op = block.ops[0] @@ -234,24 +237,23 @@ class TestUniform(unittest.TestCase): return block - def test_uniform_initializer_default_value(self, - dtype="float32", - seed=0, - min_value=-1.0, - max_vlaue=1.0): - """Test the uniform initializer with default value - """ + def test_uniform_initializer_default_value( + self, dtype="float32", seed=0, min_value=-1.0, max_vlaue=1.0 + ): + """Test the uniform initializer with default value""" paddle.enable_static() program = framework.Program() program.random_seed = seed block = program.global_block() for _ in range(2): - block.create_parameter(dtype=dtype, - shape=[5, 10], - lod_level=0, - name="param", - initializer=initializer.Uniform()) + block.create_parameter( + dtype=dtype, + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.Uniform(), + ) num_ops = 2 if dtype == "float16" else 1 self.assertEqual(len(block.ops), num_ops) init_op = block.ops[0] @@ -264,25 +266,23 @@ class TestUniform(unittest.TestCase): return block - def test_uniform_initializer(self, - dtype="float32", - seed=0, - min_value=-4.2, - max_vlaue=3.1): - """Test uniform initializer with supplied attributes - """ + def test_uniform_initializer( + self, dtype="float32", seed=0, min_value=-4.2, max_vlaue=3.1 + ): + """Test uniform initializer with supplied attributes""" paddle.enable_static() program = framework.Program() program.random_seed = seed block = program.global_block() for _ in range(2): - block.create_parameter(dtype=dtype, - shape=[5, 10], - lod_level=0, - name="param", - initializer=initializer.Uniform( - min_value, max_vlaue)) + block.create_parameter( + dtype=dtype, + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.Uniform(min_value, max_vlaue), + ) num_ops = 2 if dtype == "float16" else 1 self.assertEqual(len(block.ops), num_ops) init_op = block.ops[0] @@ -294,25 +294,23 @@ class TestUniform(unittest.TestCase): return block - def test_uniform_initializer_two_op(self, - dtype="float32", - seed=123, - min_value=-4.2, - max_vlaue=0.0): - """Test uniform initializer with supplied attributes - """ + def test_uniform_initializer_two_op( + self, dtype="float32", seed=123, min_value=-4.2, max_vlaue=0.0 + ): + """Test uniform initializer with supplied attributes""" paddle.enable_static() program = framework.Program() program.random_seed = seed block = program.global_block() for i in range(2): - block.create_parameter(dtype=dtype, - shape=[5, 10], - lod_level=0, - name="param", - initializer=initializer.Uniform( - min_value, float(i))) + block.create_parameter( + dtype=dtype, + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.Uniform(min_value, float(i)), + ) num_ops = 2 if dtype == "float16" else 1 self.assertEqual(len(block.ops), num_ops) init_op0 = block.ops[0] @@ -326,8 +324,7 @@ class TestUniform(unittest.TestCase): return block def test_uniform_initializer_fp16(self): - """Test uniform initializer with float16 - """ + """Test uniform initializer with float16""" block = self.test_uniform_initializer_default_value("float16") self.assertTrue(check_cast_op(block.ops[1])) block = self.test_uniform_initializer(dtype="float16") @@ -336,44 +333,47 @@ class TestUniform(unittest.TestCase): self.assertTrue(check_cast_op(block.ops[1])) def test_uniform_initializer_bf16(self): - """Test uniform initializer with bfloat16 - """ - block = self.test_uniform_initializer_default_value("uint16") #bfloat16 - block = self.test_uniform_initializer(dtype="uint16") #bfloat16 - block = self.test_uniform_initializer_two_op("uint16") #bfloat16 + """Test uniform initializer with bfloat16""" + block = self.test_uniform_initializer_default_value( + "uint16" + ) # bfloat16 + block = self.test_uniform_initializer(dtype="uint16") # bfloat16 + block = self.test_uniform_initializer_two_op("uint16") # bfloat16 def test_uniform_initializer_dygraph(self): - """Test uniform initializer in dygraph model. - """ + """Test uniform initializer in dygraph model.""" paddle.disable_static() weight_attr = paddle.framework.ParamAttr( name="linear_weight", - initializer=paddle.nn.initializer.Uniform(low=-0.5, high=0.5)) + initializer=paddle.nn.initializer.Uniform(low=-0.5, high=0.5), + ) linear = paddle.nn.Linear(2, 2, weight_attr=weight_attr) min_value, max_value = get_uniform_min_and_max(linear.weight.numpy()) - self.assertTrue(min_value >= -0.5, - 'min value {} should >= -0.5'.format(min_value)) - self.assertTrue(max_value <= 0.5, - 'max value {} should <= 0.5'.format(max_value)) + self.assertTrue( + min_value >= -0.5, 'min value {} should >= -0.5'.format(min_value) + ) + self.assertTrue( + max_value <= 0.5, 'max value {} should <= 0.5'.format(max_value) + ) class TestNormal(unittest.TestCase): - def test_normal_initializer_default_value(self): - """Test the normal initializer with default value - """ + """Test the normal initializer with default value""" paddle.enable_static() program = framework.Program() block = program.global_block() for _ in range(2): - block.create_parameter(dtype="float32", - shape=[5, 10], - lod_level=0, - name="param", - initializer=initializer.Normal()) + block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.Normal(), + ) self.assertEqual(len(block.ops), 1) init_op = block.ops[0] self.assertEqual(init_op.type, 'gaussian_random') @@ -384,18 +384,19 @@ class TestNormal(unittest.TestCase): paddle.disable_static() def test_normal_initializer(self, dtype="float32"): - """Test normal initializer with supplied attributes - """ + """Test normal initializer with supplied attributes""" paddle.enable_static() program = framework.Program() block = program.global_block() for _ in range(2): - block.create_parameter(dtype=dtype, - shape=[5, 10], - lod_level=0, - name="param", - initializer=initializer.Normal(2.3, 1.9)) + block.create_parameter( + dtype=dtype, + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.Normal(2.3, 1.9), + ) num_ops = 1 self.assertEqual(len(block.ops), num_ops) init_op = block.ops[0] @@ -408,41 +409,39 @@ class TestNormal(unittest.TestCase): return block def test_normal_initializer_fp16(self): - """Test normal initializer with float16 - """ + """Test normal initializer with float16""" block = self.test_normal_initializer("float16") def test_normal_initializer_bf16(self): - """Test normal initializer with bfloat16 - """ - block = self.test_normal_initializer("uint16") #bfloat16 + """Test normal initializer with bfloat16""" + block = self.test_normal_initializer("uint16") # bfloat16 def test_normal_initializer_dygraph(self): - """Test normal initializer in dygraph model. - """ + """Test normal initializer in dygraph model.""" paddle.disable_static() weight_attr = paddle.framework.ParamAttr( name="linear_weight", - initializer=paddle.nn.initializer.Normal(mean=0.0, std=2.0)) + initializer=paddle.nn.initializer.Normal(mean=0.0, std=2.0), + ) linear = paddle.nn.Linear(2, 2, weight_attr=weight_attr) class TestTruncatedNormal(unittest.TestCase): - def test_truncated_normal_initializer_default_value(self): - """Test the truncated normal initializer with default value - """ + """Test the truncated normal initializer with default value""" paddle.enable_static() program = framework.Program() block = program.global_block() for _ in range(2): - block.create_parameter(dtype="float32", - shape=[5, 10], - lod_level=0, - name="param", - initializer=initializer.TruncatedNormal()) + block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.TruncatedNormal(), + ) self.assertEqual(len(block.ops), 1) init_op = block.ops[0] self.assertEqual(init_op.type, 'truncated_gaussian_random') @@ -453,19 +452,19 @@ class TestTruncatedNormal(unittest.TestCase): paddle.disable_static() def test_truncated_normal_initializer(self, dtype="float32"): - """Test truncated normal initializer with supplied attributes - """ + """Test truncated normal initializer with supplied attributes""" paddle.enable_static() program = framework.Program() block = program.global_block() for _ in range(2): - block.create_parameter(dtype=dtype, - shape=[5, 10], - lod_level=0, - name="param", - initializer=initializer.TruncatedNormal( - 2.3, 1.9)) + block.create_parameter( + dtype=dtype, + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.TruncatedNormal(2.3, 1.9), + ) num_ops = 2 if dtype in ["float16", "uint16"] else 1 self.assertEqual(len(block.ops), num_ops) init_op = block.ops[0] @@ -478,38 +477,36 @@ class TestTruncatedNormal(unittest.TestCase): return block def test_truncated_normal_initializer_fp16(self): - """Test truncated normal initializer with float16 - """ + """Test truncated normal initializer with float16""" paddle.enable_static() block = self.test_truncated_normal_initializer("float16") self.assertTrue(check_cast_op(block.ops[1])) def test_truncated_normal_initializer_bf16(self): - """Test truncated normal initializer with bfloat16 - """ + """Test truncated normal initializer with bfloat16""" paddle.enable_static() - block = self.test_truncated_normal_initializer("uint16") #bfloat16 + block = self.test_truncated_normal_initializer("uint16") # bfloat16 self.assertTrue(check_cast_op(block.ops[1])) def test_truncated_normal_initializer_dygraph(self): - """Test truncated normal initializer in dygraph model. - """ + """Test truncated normal initializer in dygraph model.""" paddle.disable_static() weight_attr = paddle.framework.ParamAttr( name="linear_weight", - initializer=paddle.nn.initializer.TruncatedNormal(mean=0.0, - std=2.0)) + initializer=paddle.nn.initializer.TruncatedNormal( + mean=0.0, std=2.0 + ), + ) linear = paddle.nn.Linear(2, 2, weight_attr=weight_attr) class TestXavierUniform(unittest.TestCase): - def test_xavier_uniform_initializer(self): """Test Xavier initializer with uniform distribution on - for matrix multiply. + for matrix multiply. """ paddle.enable_static() @@ -521,7 +518,8 @@ class TestXavierUniform(unittest.TestCase): shape=[5, 10], lod_level=0, name="param", - initializer=initializer.XavierUniform()) + initializer=initializer.XavierUniform(), + ) self.assertEqual(len(block.ops), 1) init_op = block.ops[0] self.assertEqual(init_op.type, 'uniform_random') @@ -534,7 +532,7 @@ class TestXavierUniform(unittest.TestCase): def test_xavier_uniform_initializer_conv(self): """Test Xavier initializer with uniform distribution on - for convolutions. + for convolutions. """ paddle.enable_static() @@ -546,33 +544,34 @@ class TestXavierUniform(unittest.TestCase): shape=[5, 10, 15, 20], lod_level=0, name="param", - initializer=initializer.XavierUniform()) + initializer=initializer.XavierUniform(), + ) self.assertEqual(len(block.ops), 1) init_op = block.ops[0] self.assertEqual(init_op.type, 'uniform_random') receptive_field_size = float(15 * 20) limit = np.sqrt( - 6.0 / ((param.shape[0] + param.shape[1]) * receptive_field_size)) + 6.0 / ((param.shape[0] + param.shape[1]) * receptive_field_size) + ) self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA) self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA) self.assertEqual(init_op.attr('seed'), 0) def test_xavier_uniform_initializer_dygraph(self): - """Test xavier uniform initializer in dygraph model. - """ + """Test xavier uniform initializer in dygraph model.""" paddle.disable_static() weight_attr = paddle.framework.ParamAttr( name="linear_weight", - initializer=paddle.nn.initializer.XavierUniform()) + initializer=paddle.nn.initializer.XavierUniform(), + ) linear = paddle.nn.Linear(2, 2, weight_attr=weight_attr) class TestXavierNormal(unittest.TestCase): - def test_xavier_normal_initializer(self): """Test Xavier initializer with normal distribution on - for matrix multiply. + for matrix multiply. """ paddle.enable_static() @@ -584,7 +583,8 @@ class TestXavierNormal(unittest.TestCase): shape=[5, 10], lod_level=0, name="param", - initializer=initializer.XavierNormal()) + initializer=initializer.XavierNormal(), + ) self.assertEqual(len(block.ops), 1) init_op = block.ops[0] self.assertEqual(init_op.type, 'gaussian_random') @@ -597,7 +597,7 @@ class TestXavierNormal(unittest.TestCase): def test_xavier_normal_initializer_conv(self): """Test Xavier initializer with normal distribution on - for convolutions. + for convolutions. """ paddle.enable_static() @@ -609,13 +609,15 @@ class TestXavierNormal(unittest.TestCase): shape=[5, 10, 15, 20], lod_level=0, name="param", - initializer=initializer.XavierNormal()) + initializer=initializer.XavierNormal(), + ) self.assertEqual(len(block.ops), 1) init_op = block.ops[0] self.assertEqual(init_op.type, 'gaussian_random') receptive_field_size = float(15 * 20) std = np.sqrt( - 2.0 / ((param.shape[0] + param.shape[1]) * receptive_field_size)) + 2.0 / ((param.shape[0] + param.shape[1]) * receptive_field_size) + ) self.assertAlmostEqual(init_op.attr('mean'), 0.0, delta=DELTA) self.assertAlmostEqual(init_op.attr('std'), std, delta=DELTA) self.assertEqual(init_op.attr('seed'), 0) @@ -623,33 +625,34 @@ class TestXavierNormal(unittest.TestCase): paddle.disable_static() def test_xavier_normal_initializer_dygraph(self): - """Test xavier normal initializer in dygraph model. - """ + """Test xavier normal initializer in dygraph model.""" paddle.disable_static() weight_attr = paddle.framework.ParamAttr( name="linear_weight", - initializer=paddle.nn.initializer.XavierNormal()) + initializer=paddle.nn.initializer.XavierNormal(), + ) linear = paddle.nn.Linear(2, 2, weight_attr=weight_attr) class TestAssign(unittest.TestCase): - def test_assign_initializer(self, dtype="float32"): - """Test the numpy array initializer with supplied arguments - """ + """Test the numpy array initializer with supplied arguments""" paddle.enable_static() import numpy + program = framework.Program() block = program.global_block() np_array = numpy.random.random((10000)).astype(dtype) for _ in range(2): - block.create_parameter(dtype=np_array.dtype, - shape=np_array.shape, - lod_level=0, - name="param", - initializer=initializer.Assign(np_array)) + block.create_parameter( + dtype=np_array.dtype, + shape=np_array.shape, + lod_level=0, + name="param", + initializer=initializer.Assign(np_array), + ) num_ops = 2 if dtype in ["float16", "uint16"] else 1 self.assertEqual(len(block.ops), num_ops) init_op = block.ops[0] @@ -661,61 +664,59 @@ class TestAssign(unittest.TestCase): return block def test_assign_initializer_fp16(self): - """Test the numpy array initializer with float16 - """ + """Test the numpy array initializer with float16""" block = self.test_assign_initializer("float16") self.assertTrue(block.ops[1]) def test_assign_initializer_bf16(self): - """Test the numpy array initializer with bfloat16 - """ - block = self.test_assign_initializer("uint16") #bfloat16 + """Test the numpy array initializer with bfloat16""" + block = self.test_assign_initializer("uint16") # bfloat16 self.assertTrue(block.ops[1]) def test_assign_initializer_dygraph_1(self): - """Test assign initializer in dygraph model. - """ + """Test assign initializer in dygraph model.""" paddle.disable_static() weight_attr_1 = paddle.framework.ParamAttr( name="linear_weight_1", - initializer=paddle.nn.initializer.Assign(np.array([2, 2]))) + initializer=paddle.nn.initializer.Assign(np.array([2, 2])), + ) linear_1 = paddle.nn.Linear(2, 2, weight_attr=weight_attr_1) self.assertTrue((linear_1.weight.numpy() == [2.0, 2.0]).all(), '') def test_assign_initializer_dygraph_2(self): - """Test assign initializer in dygraph model. - """ + """Test assign initializer in dygraph model.""" paddle.disable_static() weight_attr_2 = paddle.framework.ParamAttr( name="linear_weight_2", - initializer=paddle.nn.initializer.Assign([2, 2])) + initializer=paddle.nn.initializer.Assign([2, 2]), + ) linear_2 = paddle.nn.Linear(2, 2, weight_attr=weight_attr_2) self.assertTrue((linear_2.weight.numpy() == [2.0, 2.0]).all(), '') def test_assign_initializer_dygraph_3(self): - """Test assign initializer in dygraph model. - """ + """Test assign initializer in dygraph model.""" paddle.disable_static() weight_attr_3 = paddle.framework.ParamAttr( name="linear_weight_3", - initializer=paddle.nn.initializer.Assign(paddle.full([2], 2))) + initializer=paddle.nn.initializer.Assign(paddle.full([2], 2)), + ) linear_3 = paddle.nn.Linear(2, 2, weight_attr=weight_attr_3) self.assertTrue((linear_3.weight.numpy() == [2.0, 2.0]).all(), '') def test_assign_initializer_dygraph_4(self): - """Test assign initializer in dygraph model. - """ + """Test assign initializer in dygraph model.""" paddle.disable_static() weight_attr_4 = paddle.framework.ParamAttr( name="linear_weight_4", - initializer=paddle.nn.initializer.Assign((2, 2))) + initializer=paddle.nn.initializer.Assign((2, 2)), + ) linear_4 = paddle.nn.Linear(2, 2, weight_attr=weight_attr_4) self.assertTrue((linear_4.weight.numpy() == [2.0, 2.0]).all(), '') diff --git a/python/paddle/fluid/tests/unittests/test_inner.py b/python/paddle/fluid/tests/unittests/test_inner.py index cf631101e8ee8a6552f87f038ef81561ab7b5fed..3e30e9fa2284b7af3757bd06f11c0ecee986468b 100644 --- a/python/paddle/fluid/tests/unittests/test_inner.py +++ b/python/paddle/fluid/tests/unittests/test_inner.py @@ -22,27 +22,28 @@ from paddle.fluid.framework import _test_eager_guard class TestMultiplyApi(unittest.TestCase): - def _run_static_graph_case(self, x_data, y_data): with program_guard(Program(), Program()): paddle.enable_static() - x = paddle.static.data(name='x', - shape=x_data.shape, - dtype=x_data.dtype) - y = paddle.static.data(name='y', - shape=y_data.shape, - dtype=y_data.dtype) + x = paddle.static.data( + name='x', shape=x_data.shape, dtype=x_data.dtype + ) + y = paddle.static.data( + name='y', shape=y_data.shape, dtype=y_data.dtype + ) res = paddle.inner(x, y) - place = paddle.CUDAPlace( - 0) if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() + else paddle.CPUPlace() + ) exe = paddle.static.Executor(place) - outs = exe.run(paddle.static.default_main_program(), - feed={ - 'x': x_data, - 'y': y_data - }, - fetch_list=[res]) + outs = exe.run( + paddle.static.default_main_program(), + feed={'x': x_data, 'y': y_data}, + fetch_list=[res], + ) res = outs[0] return res @@ -94,17 +95,21 @@ class TestMultiplyApi(unittest.TestCase): # test dynamic computation graph: 2-d array Complex x_data = np.random.rand(20, 50).astype( - np.float64) + 1J * np.random.rand(20, 50).astype(np.float64) - y_data = np.random.rand(50).astype( - np.float64) + 1J * np.random.rand(50).astype(np.float64) + np.float64 + ) + 1j * np.random.rand(20, 50).astype(np.float64) + y_data = np.random.rand(50).astype(np.float64) + 1j * np.random.rand( + 50 + ).astype(np.float64) res = self._run_dynamic_graph_case(x_data, y_data) np.testing.assert_allclose(res, np.inner(x_data, y_data), rtol=1e-05) # test dynamic computation graph: 3-d array Complex x_data = np.random.rand(5, 10, 10).astype( - np.float64) + 1J * np.random.rand(5, 10, 10).astype(np.float64) - y_data = np.random.rand(2, 10).astype( - np.float64) + 1J * np.random.rand(2, 10).astype(np.float64) + np.float64 + ) + 1j * np.random.rand(5, 10, 10).astype(np.float64) + y_data = np.random.rand(2, 10).astype(np.float64) + 1j * np.random.rand( + 2, 10 + ).astype(np.float64) res = self._run_dynamic_graph_case(x_data, y_data) np.testing.assert_allclose(res, np.inner(x_data, y_data), rtol=1e-05) @@ -115,7 +120,6 @@ class TestMultiplyApi(unittest.TestCase): class TestMultiplyError(unittest.TestCase): - def func_test_errors(self): # test static computation graph: dtype can not be int8 paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_inplace.py b/python/paddle/fluid/tests/unittests/test_inplace.py index 0d88a35ff2453ab57600c9ed9b7d926dad461273..2496ee37a05e4b43ffd37f66ada2216d5acc4dfe 100644 --- a/python/paddle/fluid/tests/unittests/test_inplace.py +++ b/python/paddle/fluid/tests/unittests/test_inplace.py @@ -20,7 +20,6 @@ from paddle.fluid.framework import _test_eager_guard class TestInplace(unittest.TestCase): - def func_test_forward_version(self): with paddle.fluid.dygraph.guard(): var = paddle.to_tensor(np.ones((4, 2, 3)).astype(np.float32)) @@ -60,9 +59,11 @@ class TestInplace(unittest.TestCase): loss = paddle.nn.functional.relu(var_c + var_d) with self.assertRaisesRegexp( - RuntimeError, - "received tensor_version:{} != wrapper_version_snapshot:{}". - format(1, 0)): + RuntimeError, + "received tensor_version:{} != wrapper_version_snapshot:{}".format( + 1, 0 + ), + ): loss.backward() def test_backward_error(self): @@ -101,7 +102,9 @@ class TestInplace(unittest.TestCase): var_b[1:2] = 3 # var_b is modified inplace before using it - var_c = var_b + var_b # Here, the grad op of sum doesn't use the value of var_b + var_c = ( + var_b + var_b + ) # Here, the grad op of sum doesn't use the value of var_b loss = var_c.sum() var_b[1:2] = 3 # var_b is modified inplace after using it @@ -115,7 +118,6 @@ class TestInplace(unittest.TestCase): class TestDygraphInplace(unittest.TestCase): - def setUp(self): self.init_data() self.set_np_compare_func() @@ -138,7 +140,7 @@ class TestDygraphInplace(unittest.TestCase): inplace_var = self.inplace_api_processing(var) self.assertTrue(id(var) == id(inplace_var)) - inplace_var[0] = 2. + inplace_var[0] = 2.0 np.testing.assert_array_equal(var.numpy(), inplace_var.numpy()) def test_inplace_api(self): @@ -154,7 +156,7 @@ class TestDygraphInplace(unittest.TestCase): inplace_var = self.inplace_api_processing(var) self.assertEqual(var.inplace_version, 1) - inplace_var[0] = 2. + inplace_var[0] = 2.0 self.assertEqual(var.inplace_version, 2) inplace_var = self.inplace_api_processing(inplace_var) @@ -195,9 +197,11 @@ class TestDygraphInplace(unittest.TestCase): loss = paddle.nn.functional.relu(var_c) with self.assertRaisesRegexp( - RuntimeError, - "received tensor_version:{} != wrapper_version_snapshot:{}". - format(1, 0)): + RuntimeError, + "received tensor_version:{} != wrapper_version_snapshot:{}".format( + 1, 0 + ), + ): loss.backward() def test_backward_error(self): @@ -215,7 +219,8 @@ class TestDygraphInplace(unittest.TestCase): var_b = var_a**2 var_c = self.inplace_api_processing( - var_b) # var_b is modified inplace before using it + var_b + ) # var_b is modified inplace before using it # Here, the gradient computation will use the value of var_b var_d = var_c**2 @@ -252,9 +257,12 @@ class TestDygraphInplace(unittest.TestCase): var_b = var_a**2 var_c = self.inplace_api_processing( - var_b) # var_b is modified inplace before using it + var_b + ) # var_b is modified inplace before using it - var_d = var_c + var_c # Here, the grad op of sum doesn't use the value of var_b + var_d = ( + var_c + var_c + ) # Here, the grad op of sum doesn't use the value of var_b loss = var_d.sum() loss.backward() @@ -268,7 +276,9 @@ class TestDygraphInplace(unittest.TestCase): var_c = self.non_inplace_api_processing(var_b) - var_d = var_c + var_c # Here, the grad op of sum doesn't use the value of var_b + var_d = ( + var_c + var_c + ) # Here, the grad op of sum doesn't use the value of var_b loss = var_d.sum() loss.backward() @@ -282,7 +292,6 @@ class TestDygraphInplace(unittest.TestCase): class TestDygraphInplaceUnsqueeze(TestDygraphInplace): - def non_inplace_api_processing(self, var): return paddle.unsqueeze(var, -1) @@ -291,7 +300,6 @@ class TestDygraphInplaceUnsqueeze(TestDygraphInplace): class TestDygraphInplaceReshape(TestDygraphInplace): - def non_inplace_api_processing(self, var): return paddle.reshape(var, [-1]) @@ -300,7 +308,6 @@ class TestDygraphInplaceReshape(TestDygraphInplace): class TestDygraphInplaceReshapeTensor(TestDygraphInplace): - def non_inplace_api_processing(self, var): shape = paddle.to_tensor(-1) return paddle.reshape(var, shape) @@ -311,7 +318,6 @@ class TestDygraphInplaceReshapeTensor(TestDygraphInplace): class TestDygraphInplaceFlatten(TestDygraphInplace): - def non_inplace_api_processing(self, var): return var.flatten() @@ -320,28 +326,28 @@ class TestDygraphInplaceFlatten(TestDygraphInplace): class TestDygraphInplaceScatter(TestDygraphInplace): - def init_data(self): self.input_var_numpy = np.array([[1, 1], [2, 2], [3, 3]]) self.dtype = "float32" def non_inplace_api_processing(self, var): index = paddle.to_tensor([2, 1, 0, 1], dtype='int64') - updates = paddle.to_tensor([[1, 1], [2, 2], [3, 3], [4, 4]], - dtype='float32') + updates = paddle.to_tensor( + [[1, 1], [2, 2], [3, 3], [4, 4]], dtype='float32' + ) return paddle.scatter(var, index, updates, overwrite=False) def inplace_api_processing(self, var): index = paddle.to_tensor([2, 1, 0, 1], dtype='int64') - updates = paddle.to_tensor([[1, 1], [2, 2], [3, 3], [4, 4]], - dtype='float32') + updates = paddle.to_tensor( + [[1, 1], [2, 2], [3, 3], [4, 4]], dtype='float32' + ) return paddle.scatter_(var, index, updates, overwrite=False) class TestDygraphInplaceElu(TestDygraphInplace): - def non_inplace_api_processing(self, var): return paddle.nn.functional.elu(var) @@ -350,7 +356,6 @@ class TestDygraphInplaceElu(TestDygraphInplace): class TestDygraphInplaceRelu(TestDygraphInplace): - def non_inplace_api_processing(self, var): return paddle.nn.functional.relu(var) @@ -359,7 +364,6 @@ class TestDygraphInplaceRelu(TestDygraphInplace): class TestDygraphInplaceSoftmax(TestDygraphInplace): - def non_inplace_api_processing(self, var): return paddle.nn.functional.softmax(var) @@ -368,7 +372,6 @@ class TestDygraphInplaceSoftmax(TestDygraphInplace): class TestDygraphInplaceTanh(TestDygraphInplace): - def non_inplace_api_processing(self, var): return paddle.tanh(var) @@ -377,7 +380,6 @@ class TestDygraphInplaceTanh(TestDygraphInplace): class TestDygraphInplaceCeil(TestDygraphInplace): - def non_inplace_api_processing(self, var): return var.ceil() @@ -386,7 +388,6 @@ class TestDygraphInplaceCeil(TestDygraphInplace): class TestDygraphInplaceFloor(TestDygraphInplace): - def non_inplace_api_processing(self, var): return var.floor() @@ -395,7 +396,6 @@ class TestDygraphInplaceFloor(TestDygraphInplace): class TestDygraphInplaceExp(TestDygraphInplace): - def set_np_compare_func(self): self.np_compare = np.allclose @@ -407,7 +407,6 @@ class TestDygraphInplaceExp(TestDygraphInplace): class TestDygraphInplaceReciprocal(TestDygraphInplace): - def non_inplace_api_processing(self, var): return var.reciprocal() @@ -416,7 +415,6 @@ class TestDygraphInplaceReciprocal(TestDygraphInplace): class TestDygraphInplaceRound(TestDygraphInplace): - def non_inplace_api_processing(self, var): return var.round() @@ -425,7 +423,6 @@ class TestDygraphInplaceRound(TestDygraphInplace): class TestDygraphInplaceSqrt(TestDygraphInplace): - def init_data(self): self.input_var_numpy = np.random.uniform(0, 5, [10, 20, 1]) self.dtype = "float32" @@ -438,7 +435,6 @@ class TestDygraphInplaceSqrt(TestDygraphInplace): class TestDygraphInplaceRsqrt(TestDygraphInplaceSqrt): - def non_inplace_api_processing(self, var): return var.rsqrt() @@ -447,7 +443,6 @@ class TestDygraphInplaceRsqrt(TestDygraphInplaceSqrt): class TestDygraphInplaceClip(TestDygraphInplace): - def non_inplace_api_processing(self, var): return var.clip(0.6, 1.5) @@ -456,7 +451,6 @@ class TestDygraphInplaceClip(TestDygraphInplace): class TestDygraphInplaceScale(TestDygraphInplace): - def non_inplace_api_processing(self, var): return var.scale(scale=2.0, bias=3.0) @@ -465,7 +459,6 @@ class TestDygraphInplaceScale(TestDygraphInplace): class TestDygraphInplaceAdd(TestDygraphInplace): - def init_data(self): self.input_var_numpy = np.random.rand(2, 3, 4) self.dtype = "float32" @@ -481,7 +474,6 @@ class TestDygraphInplaceAdd(TestDygraphInplace): class TestDygraphInplaceSubtract(TestDygraphInplaceAdd): - def non_inplace_api_processing(self, var): input_var_2 = paddle.to_tensor(self.input_var_numpy_2) return var.subtract(input_var_2) @@ -492,7 +484,6 @@ class TestDygraphInplaceSubtract(TestDygraphInplaceAdd): class TestDygraphInplaceRemainder(TestDygraphInplaceAdd): - def non_inplace_api_processing(self, var): input_var_2 = paddle.to_tensor(self.input_var_numpy_2) return var.remainder(input_var_2) @@ -515,7 +506,6 @@ class TestDygraphInplaceRemainder(TestDygraphInplaceAdd): class TestLossIsInplaceVar(unittest.TestCase): - def func_test_loss_is_inplace_var(self): with paddle.fluid.dygraph.guard(): var_a = paddle.ones((2, 2)) @@ -546,7 +536,6 @@ class TestLossIsInplaceVar(unittest.TestCase): class TestContinuouslyInplace(unittest.TestCase): - def func_test_continuously_inplace(self): a = paddle.rand([2, 3]) a.stop_gradient = False @@ -565,7 +554,6 @@ class TestContinuouslyInplace(unittest.TestCase): class TestGetitemBeforeInplace(unittest.TestCase): - def test_getitem_before_inplace(self): with _test_eager_guard(): a = paddle.ones(shape=[4, 2, 3], dtype="float32") diff --git a/python/paddle/fluid/tests/unittests/test_inplace_abn_op.py b/python/paddle/fluid/tests/unittests/test_inplace_abn_op.py index 998480a9193ded0437b3a70402dcd1ee0d05f047..1048c6710d27ccdec223e3394d6dc594d8c5b9d9 100644 --- a/python/paddle/fluid/tests/unittests/test_inplace_abn_op.py +++ b/python/paddle/fluid/tests/unittests/test_inplace_abn_op.py @@ -22,7 +22,6 @@ import paddle class TestInplaceANBOpTraining(unittest.TestCase): - def setUp(self): self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64 self.N = 4 @@ -31,26 +30,30 @@ class TestInplaceANBOpTraining(unittest.TestCase): self.W = 9 self.dshape = [self.N, self.C, self.H, self.W] - def build_program(self, - place, - layout, - seed, - only_forward=False, - activation="identity", - alpha=1.0, - use_cuda=False, - inplace=False): + def build_program( + self, + place, + layout, + seed, + only_forward=False, + activation="identity", + alpha=1.0, + use_cuda=False, + inplace=False, + ): main = fluid.Program() startup = fluid.Program() main.random_seed = seed startup.random_seed = seed with fluid.unique_name.guard(): with fluid.program_guard(main, startup): - data = fluid.layers.data(name='input', - shape=self.dshape, - dtype=self.dtype, - append_batch_size=False, - stop_gradient=False) + data = fluid.layers.data( + name='input', + shape=self.dshape, + dtype=self.dtype, + append_batch_size=False, + stop_gradient=False, + ) if inplace: bn = fluid.layers.inplace_abn( data, @@ -61,7 +64,8 @@ class TestInplaceANBOpTraining(unittest.TestCase): moving_variance_name='bn_moving_variance', data_layout=layout, is_test=only_forward, - act_alpha=alpha) + act_alpha=alpha, + ) else: bn = fluid.layers.batch_norm( data, @@ -71,7 +75,8 @@ class TestInplaceANBOpTraining(unittest.TestCase): moving_variance_name='bn_moving_variance', data_layout=layout, is_test=only_forward, - in_place=inplace) + in_place=inplace, + ) if activation == 'leaky_relu': bn = fluid.layers.leaky_relu(bn, alpha) if activation == 'elu': @@ -80,7 +85,7 @@ class TestInplaceANBOpTraining(unittest.TestCase): # NOTE: in inplace mode input and output of bn # may have same name, multiply 1. to generate # a new Variable for fetch - bn = bn * 1. + bn = bn * 1.0 sigmoid = fluid.layers.sigmoid(bn) out = fluid.layers.reduce_sum(sigmoid) @@ -92,23 +97,28 @@ class TestInplaceANBOpTraining(unittest.TestCase): def compare(self, place, layout, only_forward, activation, alpha, use_cuda): seed = 10 os.environ['FLAGS_cudnn_deterministic'] = "1" - data = np.random.random(size=self.dshape).astype(self.dtype) * 4. - 2 + data = np.random.random(size=self.dshape).astype(self.dtype) * 4.0 - 2 fetch_outs = [] fetch_names = [] for inplace in [False, True]: - main, startup, outs = self.build_program(place, - layout, - seed, - only_forward, - activation, - alpha, - inplace=inplace) + main, startup, outs = self.build_program( + place, + layout, + seed, + only_forward, + activation, + alpha, + inplace=inplace, + ) exe = fluid.Executor(place) exe.run(startup) fetch_name = [v.name for v in outs] + [ - 'bn_moving_mean', 'bn_moving_variance', 'bn_scale', 'bn_bias' + 'bn_moving_mean', + 'bn_moving_variance', + 'bn_scale', + 'bn_bias', ] if not only_forward: others = [ @@ -124,50 +134,69 @@ class TestInplaceANBOpTraining(unittest.TestCase): fv.persistable = True build_strategy = fluid.BuildStrategy() - build_strategy.sync_batch_norm = use_cuda and \ - fluid.core.get_cuda_device_count() > 1 + build_strategy.sync_batch_norm = ( + use_cuda and fluid.core.get_cuda_device_count() > 1 + ) build_strategy.enable_inplace = inplace exec_strategy = fluid.ExecutionStrategy() exec_strategy.num_threads = 1 if os.name == 'nt' else 0 comp_prog1 = compiler.CompiledProgram(main).with_data_parallel( outs[0].name if not only_forward else None, build_strategy=build_strategy, - exec_strategy=exec_strategy) - bn_fetches = exe.run(program=main, - feed={'input': data}, - fetch_list=fetch_name) + exec_strategy=exec_strategy, + ) + bn_fetches = exe.run( + program=main, feed={'input': data}, fetch_list=fetch_name + ) fetch_outs.append(bn_fetches) fetch_names.append(fetch_name) - for bn_val, inplace_abn_val, name1, name2 in zip(*(fetch_outs + - fetch_names)): + for bn_val, inplace_abn_val, name1, name2 in zip( + *(fetch_outs + fetch_names) + ): np.testing.assert_allclose( bn_val, inplace_abn_val, rtol=1e-05, atol=0.01, - err_msg='Output (' + name1 + ':' + name2 + - ') has diff on {} with {} layout and {} activation. \n'.format( - place, layout, activation) + '\nBN ' + str(bn_val) + - '\n' + 'Inplace ABN ' + str(inplace_abn_val)) + err_msg='Output (' + + name1 + + ':' + + name2 + + ') has diff on {} with {} layout and {} activation. \n'.format( + place, layout, activation + ) + + '\nBN ' + + str(bn_val) + + '\n' + + 'Inplace ABN ' + + str(inplace_abn_val), + ) def test_op(self): use_cudas = [False, True] if core.is_compiled_with_cuda() else [False] - #use_cudas = [False] + # use_cudas = [False] for use_cuda in use_cudas: place = core.CUDAPlace(0) if use_cuda else core.CPUPlace() layouts = ["NCHW", "NHWC"] for layout in layouts: - for activation, alpha in zip([None, 'elu', 'leaky_relu'], - [0., 1., 0.02]): + for activation, alpha in zip( + [None, 'elu', 'leaky_relu'], [0.0, 1.0, 0.02] + ): for infer_only in [True, False]: - self.compare(place, layout, infer_only, activation, - alpha, use_cuda) + self.compare( + place, + layout, + infer_only, + activation, + alpha, + use_cuda, + ) def test_all_branches(self): seed = 10 os.environ['FLAGS_cudnn_deterministic'] = "1" - data = np.random.random(size=self.dshape).astype(self.dtype) * 4. - 2 + data = np.random.random(size=self.dshape).astype(self.dtype) * 4.0 - 2 use_cudas = [False, True] if core.is_compiled_with_cuda() else [False] alpha = 0.1 layouts = ["NCHW", "NHWC"] @@ -176,8 +205,15 @@ class TestInplaceANBOpTraining(unittest.TestCase): for layout in layouts: for activation in ['identity', 'leaky_relu']: main, startup, outs = self.build_program( - place, layout, seed, False, activation, alpha, use_cuda, - True) + place, + layout, + seed, + False, + activation, + alpha, + use_cuda, + True, + ) exe = fluid.Executor(place) exe.run(startup) exe.run(program=main, feed={'input': data}) diff --git a/python/paddle/fluid/tests/unittests/test_inplace_addto_strategy.py b/python/paddle/fluid/tests/unittests/test_inplace_addto_strategy.py index 825fcbd0620c3745b6d256fe7aff929fbd1b857e..4f0aebf6c3c074c939b0044ecb66a27aae9220e0 100644 --- a/python/paddle/fluid/tests/unittests/test_inplace_addto_strategy.py +++ b/python/paddle/fluid/tests/unittests/test_inplace_addto_strategy.py @@ -20,27 +20,31 @@ import numpy as np class ConvBNLayer(fluid.Layer): - - def __init__(self, - num_channels, - num_filters, - filter_size, - stride=1, - groups=1, - data_format="NCHW"): + def __init__( + self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + data_format="NCHW", + ): super(ConvBNLayer, self).__init__() - self._conv = paddle.nn.Conv2D(in_channels=num_channels, - out_channels=num_filters, - kernel_size=filter_size, - stride=stride, - padding=(filter_size - 1) // 2, - groups=groups, - bias_attr=False, - data_format=data_format) - - self._batch_norm = paddle.nn.BatchNorm(num_filters, - data_layout=data_format) + self._conv = paddle.nn.Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + bias_attr=False, + data_format=data_format, + ) + + self._batch_norm = paddle.nn.BatchNorm( + num_filters, data_layout=data_format + ) def forward(self, inputs): y = self._conv(inputs) @@ -57,10 +61,12 @@ def create_program(data_format="NCHW"): if data_format == "NHWC": x = paddle.transpose(x, [0, 2, 3, 1]) x = fluid.layers.prelu(x, mode="channel") - conv = ConvBNLayer(num_channels=3, - num_filters=3, - filter_size=1, - data_format=data_format) + conv = ConvBNLayer( + num_channels=3, + num_filters=3, + filter_size=1, + data_format=data_format, + ) y = conv(x) + x loss = fluid.layers.reduce_sum(y) @@ -72,9 +78,7 @@ def create_program(data_format="NCHW"): class TestInplaceAddto(unittest.TestCase): - def check_result(self, data_format="NCHW"): - def run_program(enable_addto): np.random.seed(10) paddle.seed(10) @@ -83,22 +87,27 @@ class TestInplaceAddto(unittest.TestCase): fluid.set_flags({"FLAGS_cudnn_deterministic": True}) fluid.set_flags({"FLAGS_max_inplace_grad_add": 2}) loss, main, startup, w = create_program(data_format=data_format) - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) strategy = fluid.BuildStrategy() strategy.enable_addto = enable_addto compiled = fluid.CompiledProgram(main).with_data_parallel( - loss_name=loss.name, build_strategy=strategy) + loss_name=loss.name, build_strategy=strategy + ) exe.run(startup) - img = np.random.uniform(-128, 128, - [8, 3, 224, 224]).astype(np.float32) + img = np.random.uniform(-128, 128, [8, 3, 224, 224]).astype( + np.float32 + ) for i in range(10): - res = exe.run(compiled, - feed={'img': img}, - fetch_list=[loss.name, w.name]) + res = exe.run( + compiled, feed={'img': img}, fetch_list=[loss.name, w.name] + ) return res res1, w1 = run_program(True) diff --git a/python/paddle/fluid/tests/unittests/test_inplace_and_clear_gradient.py b/python/paddle/fluid/tests/unittests/test_inplace_and_clear_gradient.py index 7f4bf6a5fefb7d5cb22b394dcb21a0b9a3a65d22..aa7e837b6cd280513815629d37c28d9b8cbdd187 100644 --- a/python/paddle/fluid/tests/unittests/test_inplace_and_clear_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_inplace_and_clear_gradient.py @@ -21,7 +21,6 @@ paddle.disable_static() def clear_grad(w, a): - @paddle.no_grad() def warp(*_): assert w.grad is not None @@ -32,7 +31,6 @@ def clear_grad(w, a): class TestInplaceAndClearGradient(unittest.TestCase): - def test(self): paddle.set_device('cpu') diff --git a/python/paddle/fluid/tests/unittests/test_inplace_auto_generated_apis.py b/python/paddle/fluid/tests/unittests/test_inplace_auto_generated_apis.py index 09e8c6112367746e9a8511a3761b4b0b5dc2805d..d8f8b5dbc72ce1a98915a55e3db39fefa77ac6b5 100644 --- a/python/paddle/fluid/tests/unittests/test_inplace_auto_generated_apis.py +++ b/python/paddle/fluid/tests/unittests/test_inplace_auto_generated_apis.py @@ -21,7 +21,6 @@ from paddle.static import Program, program_guard # In static mode, inplace strategy will not be used in Inplace APIs. class TestStaticAutoGeneratedAPI(unittest.TestCase): - def setUp(self): paddle.enable_static() self.init_data() @@ -48,23 +47,22 @@ class TestStaticAutoGeneratedAPI(unittest.TestCase): out = self.executed_paddle_api(x) exe = paddle.static.Executor(place=paddle.CPUPlace()) - fetch_x, fetch_out = exe.run(main_prog, - feed={"x": self.np_x}, - fetch_list=[x, out]) + fetch_x, fetch_out = exe.run( + main_prog, feed={"x": self.np_x}, fetch_list=[x, out] + ) np.testing.assert_array_equal(fetch_x, self.np_x) self.assertTrue( - self.np_compare(fetch_out, self.executed_numpy_api(self.np_x))) + self.np_compare(fetch_out, self.executed_numpy_api(self.np_x)) + ) class TestStaticInplaceAutoGeneratedAPI(TestStaticAutoGeneratedAPI): - def executed_paddle_api(self, x): return x.ceil_() class TestStaticFloorAPI(TestStaticAutoGeneratedAPI): - def executed_paddle_api(self, x): return x.floor() @@ -73,13 +71,11 @@ class TestStaticFloorAPI(TestStaticAutoGeneratedAPI): class TestStaticInplaceFloorAPI(TestStaticFloorAPI): - def executed_paddle_api(self, x): return x.floor_() class TestStaticExpAPI(TestStaticAutoGeneratedAPI): - def set_np_compare_func(self): self.np_compare = np.allclose @@ -91,13 +87,11 @@ class TestStaticExpAPI(TestStaticAutoGeneratedAPI): class TestStaticInplaceExpAPI(TestStaticExpAPI): - def executed_paddle_api(self, x): return x.exp_() class TestStaticReciprocalAPI(TestStaticAutoGeneratedAPI): - def executed_paddle_api(self, x): return x.reciprocal() @@ -106,13 +100,11 @@ class TestStaticReciprocalAPI(TestStaticAutoGeneratedAPI): class TestStaticInplaceReciprocalAPI(TestStaticReciprocalAPI): - def executed_paddle_api(self, x): return x.reciprocal_() class TestStaticRoundAPI(TestStaticAutoGeneratedAPI): - def executed_paddle_api(self, x): return x.round() @@ -121,13 +113,11 @@ class TestStaticRoundAPI(TestStaticAutoGeneratedAPI): class TestStaticInplaceRoundAPI(TestStaticRoundAPI): - def executed_paddle_api(self, x): return x.round_() class TestStaticSqrtAPI(TestStaticAutoGeneratedAPI): - def init_data(self): self.dtype = 'float32' self.shape = [10, 20] @@ -144,13 +134,11 @@ class TestStaticSqrtAPI(TestStaticAutoGeneratedAPI): class TestStaticInplaceSqrtAPI(TestStaticSqrtAPI): - def executed_paddle_api(self, x): return x.sqrt_() class TestStaticRsqrtAPI(TestStaticSqrtAPI): - def executed_paddle_api(self, x): return x.rsqrt() @@ -159,14 +147,12 @@ class TestStaticRsqrtAPI(TestStaticSqrtAPI): class TestStaticInplaceRsqrtAPI(TestStaticRsqrtAPI): - def executed_paddle_api(self, x): return x.rsqrt_() # In dygraph mode, inplace strategy will be used in Inplace APIs. class TestDygraphAutoGeneratedAPI(unittest.TestCase): - def setUp(self): paddle.disable_static() self.init_data() @@ -191,17 +177,16 @@ class TestDygraphAutoGeneratedAPI(unittest.TestCase): out = self.executed_paddle_api(x) self.assertTrue( - self.np_compare(out.numpy(), self.executed_numpy_api(self.np_x))) + self.np_compare(out.numpy(), self.executed_numpy_api(self.np_x)) + ) class TestDygraphInplaceAutoGeneratedAPI(TestDygraphAutoGeneratedAPI): - def executed_paddle_api(self, x): return x.ceil_() class TestDygraphFloorAPI(TestDygraphAutoGeneratedAPI): - def executed_paddle_api(self, x): return x.floor() @@ -210,13 +195,11 @@ class TestDygraphFloorAPI(TestDygraphAutoGeneratedAPI): class TestDygraphInplaceFloorAPI(TestDygraphFloorAPI): - def executed_paddle_api(self, x): return x.floor_() class TestDygraphExpAPI(TestDygraphAutoGeneratedAPI): - def executed_paddle_api(self, x): return x.exp() @@ -228,13 +211,11 @@ class TestDygraphExpAPI(TestDygraphAutoGeneratedAPI): class TestDygraphInplaceExpAPI(TestDygraphExpAPI): - def executed_paddle_api(self, x): return x.exp_() class TestDygraphReciprocalAPI(TestDygraphAutoGeneratedAPI): - def executed_paddle_api(self, x): return x.reciprocal() @@ -243,13 +224,11 @@ class TestDygraphReciprocalAPI(TestDygraphAutoGeneratedAPI): class TestDygraphInplaceReciprocalAPI(TestDygraphReciprocalAPI): - def executed_paddle_api(self, x): return x.reciprocal_() class TestDygraphRoundAPI(TestDygraphAutoGeneratedAPI): - def executed_paddle_api(self, x): return x.round() @@ -258,13 +237,11 @@ class TestDygraphRoundAPI(TestDygraphAutoGeneratedAPI): class TestDygraphInplaceRoundAPI(TestDygraphRoundAPI): - def executed_paddle_api(self, x): return x.round_() class TestDygraphSqrtAPI(TestDygraphAutoGeneratedAPI): - def init_data(self): self.dtype = 'float32' self.shape = [10, 20] @@ -281,22 +258,19 @@ class TestDygraphSqrtAPI(TestDygraphAutoGeneratedAPI): class TestDygraphInplaceSqrtAPI(TestDygraphSqrtAPI): - def executed_paddle_api(self, x): return x.sqrt_() class TestDygraphRsqrtAPI(TestDygraphSqrtAPI): - def executed_paddle_api(self, x): return x.rsqrt() def executed_numpy_api(self, x): - return 1. / np.sqrt(x) + return 1.0 / np.sqrt(x) class TestDygraphInplaceRsqrtAPI(TestDygraphRsqrtAPI): - def executed_paddle_api(self, x): return x.rsqrt_() diff --git a/python/paddle/fluid/tests/unittests/test_inplace_softmax_with_cross_entropy.py b/python/paddle/fluid/tests/unittests/test_inplace_softmax_with_cross_entropy.py index 759454a2887fc170edcc4799162ef362fc302f8e..8e9b154a41cce03eab7076d9d55414b6ee5bb9c0 100644 --- a/python/paddle/fluid/tests/unittests/test_inplace_softmax_with_cross_entropy.py +++ b/python/paddle/fluid/tests/unittests/test_inplace_softmax_with_cross_entropy.py @@ -18,40 +18,41 @@ import unittest class TestSoftmaxWithXe(unittest.TestCase): - def setUp(self): self.initParameter() - self.m, self.n = np.random.random_integers(low=100, high=2000, - size=[2]).astype('int64') + self.m, self.n = np.random.random_integers( + low=100, high=2000, size=[2] + ).astype('int64') def initParameter(self): self.dtype = 'float32' self.soft_label = False - def softmax_with_xe(self, - x, - y, - place, - inplace=True, - numeric_stable_mode=True): + def softmax_with_xe( + self, x, y, place, inplace=True, numeric_stable_mode=True + ): m, n = x.shape with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.scope_guard(fluid.Scope()): - x_d = fluid.layers.data(name='x', - shape=[m, n], - dtype=self.dtype, - append_batch_size=False) + x_d = fluid.layers.data( + name='x', + shape=[m, n], + dtype=self.dtype, + append_batch_size=False, + ) y_d = fluid.layers.data( name='y', shape=[m, 1] if not self.soft_label else [m, n], dtype='int64' if not self.soft_label else self.dtype, - append_batch_size=False) + append_batch_size=False, + ) z_d, s_d = fluid.layers.softmax_with_cross_entropy( x_d, y_d, soft_label=self.soft_label, return_softmax=True, - numeric_stable_mode=numeric_stable_mode) + numeric_stable_mode=numeric_stable_mode, + ) exe = fluid.Executor(place) @@ -60,19 +61,18 @@ class TestSoftmaxWithXe(unittest.TestCase): build_strategy = fluid.BuildStrategy() build_strategy.enable_inplace = inplace prog = fluid.CompiledProgram( - fluid.default_main_program()).with_data_parallel( - build_strategy=build_strategy, places=place) + fluid.default_main_program() + ).with_data_parallel( + build_strategy=build_strategy, places=place + ) fetch_list = [z_d.name, s_d.name] print('Inplace is {}'.format("ON" if inplace else "OFF")) - z, s = exe.run(prog, - feed={ - x_d.name: x, - y_d.name: y - }, - fetch_list=fetch_list) + z, s = exe.run( + prog, feed={x_d.name: x, y_d.name: y}, fetch_list=fetch_list + ) return z, s def main_with_place(self, place): @@ -82,39 +82,32 @@ class TestSoftmaxWithXe(unittest.TestCase): for a, b in x_range: x = ((b - a) * x + a).astype(self.dtype) if not self.soft_label: - y = np.random.random_integers(size=[self.m, 1], - low=0, - high=self.n - 1).astype('int64') + y = np.random.random_integers( + size=[self.m, 1], low=0, high=self.n - 1 + ).astype('int64') else: y = np.random.random(size=[self.m, self.n]).astype(self.dtype) - norm_y = np.broadcast_to(np.reshape(np.sum(y, axis=1), [-1, 1]), - y.shape) + norm_y = np.broadcast_to( + np.reshape(np.sum(y, axis=1), [-1, 1]), y.shape + ) y = y / norm_y - z1, s1 = self.softmax_with_xe(x, - y, - place, - inplace=False, - numeric_stable_mode=False) - z2, s2 = self.softmax_with_xe(x, - y, - place, - inplace=True, - numeric_stable_mode=False) + z1, s1 = self.softmax_with_xe( + x, y, place, inplace=False, numeric_stable_mode=False + ) + z2, s2 = self.softmax_with_xe( + x, y, place, inplace=True, numeric_stable_mode=False + ) self.assertTrue((z1 == z2).all()) self.assertTrue((s1 == s2).all()) - z1, s1 = self.softmax_with_xe(x, - y, - place, - inplace=False, - numeric_stable_mode=True) - z2, s2 = self.softmax_with_xe(x, - y, - place, - inplace=True, - numeric_stable_mode=True) + z1, s1 = self.softmax_with_xe( + x, y, place, inplace=False, numeric_stable_mode=True + ) + z2, s2 = self.softmax_with_xe( + x, y, place, inplace=True, numeric_stable_mode=True + ) self.assertTrue((z1 == z2).all()) self.assertTrue((s1 == s2).all()) @@ -125,21 +118,18 @@ class TestSoftmaxWithXe(unittest.TestCase): class TestSoftmaxWithXe1(TestSoftmaxWithXe): - def initParameter(self): self.dtype = 'float32' self.soft_label = True class TestSoftmaxWithXe2(TestSoftmaxWithXe): - def initParameter(self): self.dtype = 'float64' self.soft_label = False class TestSoftmaxWithXe3(TestSoftmaxWithXe): - def initParameter(self): self.dtype = 'float64' self.soft_label = True diff --git a/python/paddle/fluid/tests/unittests/test_input_spec.py b/python/paddle/fluid/tests/unittests/test_input_spec.py index bb31c4e7f51e6cddaec7363ffed7dffb7ce8bb96..9d805d64e93cc94dcb382546d605857f9215e878 100644 --- a/python/paddle/fluid/tests/unittests/test_input_spec.py +++ b/python/paddle/fluid/tests/unittests/test_input_spec.py @@ -21,15 +21,17 @@ import paddle import paddle.fluid as fluid from paddle.static import InputSpec from paddle.fluid.framework import convert_np_dtype_to_dtype_ -from paddle.fluid.dygraph.dygraph_to_static.utils import _compatible_non_tensor_spec +from paddle.fluid.dygraph.dygraph_to_static.utils import ( + _compatible_non_tensor_spec, +) class TestInputSpec(unittest.TestCase): - def test_default(self): tensor_spec = InputSpec([3, 4]) - self.assertEqual(tensor_spec.dtype, - convert_np_dtype_to_dtype_('float32')) + self.assertEqual( + tensor_spec.dtype, convert_np_dtype_to_dtype_('float32') + ) self.assertEqual(tensor_spec.name, None) def test_from_tensor(self): @@ -45,15 +47,17 @@ class TestInputSpec(unittest.TestCase): def test_from_numpy(self): x_numpy = np.ones([10, 12]) x_np_spec = InputSpec.from_numpy(x_numpy) - self.assertEqual(x_np_spec.dtype, - convert_np_dtype_to_dtype_(x_numpy.dtype)) + self.assertEqual( + x_np_spec.dtype, convert_np_dtype_to_dtype_(x_numpy.dtype) + ) self.assertEqual(x_np_spec.shape, x_numpy.shape) self.assertEqual(x_np_spec.name, None) x_numpy2 = np.array([1, 2, 3, 4]).astype('int64') x_np_spec2 = InputSpec.from_numpy(x_numpy2, name='x_np_int64') - self.assertEqual(x_np_spec2.dtype, - convert_np_dtype_to_dtype_(x_numpy2.dtype)) + self.assertEqual( + x_np_spec2.dtype, convert_np_dtype_to_dtype_(x_numpy2.dtype) + ) self.assertEqual(x_np_spec2.shape, x_numpy2.shape) self.assertEqual(x_np_spec2.name, 'x_np_int64') @@ -84,7 +88,7 @@ class TestInputSpec(unittest.TestCase): # unbatch unbatch_spec = batch_tensor_spec.unbatch() - self.assertEqual(unbatch_spec.shape, (10, )) + self.assertEqual(unbatch_spec.shape, (10,)) # 1. `unbatch` requires len(shape) > 1 with self.assertRaises(ValueError): @@ -116,7 +120,6 @@ class TestInputSpec(unittest.TestCase): class NetWithNonTensorSpec(paddle.nn.Layer): - def __init__(self, in_num, out_num): super(NetWithNonTensorSpec, self).__init__() self.linear_1 = paddle.nn.Linear(in_num, out_num) @@ -157,7 +160,6 @@ class NetWithNonTensorSpec(paddle.nn.Layer): class TestNetWithNonTensorSpec(unittest.TestCase): - def setUp(self): self.in_num = 16 self.out_num = 16 @@ -243,7 +245,6 @@ class TestNetWithNonTensorSpec(unittest.TestCase): class NetWithNonTensorSpecPrune(paddle.nn.Layer): - def __init__(self, in_num, out_num): super(NetWithNonTensorSpecPrune, self).__init__() self.linear_1 = paddle.nn.Linear(in_num, out_num) @@ -263,7 +264,6 @@ class NetWithNonTensorSpecPrune(paddle.nn.Layer): class TestNetWithNonTensorSpecWithPrune(unittest.TestCase): - def setUp(self): self.in_num = 16 self.out_num = 16 @@ -311,7 +311,6 @@ class TestNetWithNonTensorSpecWithPrune(unittest.TestCase): class UnHashableObject: - def __init__(self, val): self.val = val @@ -320,7 +319,6 @@ class UnHashableObject: class TestCompatibleNonTensorSpec(unittest.TestCase): - def test_case(self): self.assertTrue(_compatible_non_tensor_spec([1, 2, 3], [1, 2, 3])) self.assertFalse(_compatible_non_tensor_spec([1, 2, 3], [1, 2])) @@ -328,8 +326,10 @@ class TestCompatibleNonTensorSpec(unittest.TestCase): # not supported unhashable object. self.assertTrue( - _compatible_non_tensor_spec(UnHashableObject(1), - UnHashableObject(1))) + _compatible_non_tensor_spec( + UnHashableObject(1), UnHashableObject(1) + ) + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_install_check.py b/python/paddle/fluid/tests/unittests/test_install_check.py index e51079278db184540066ba56dd9c305620dd8ba0..15f2b5f3b7eedbeef80aa03e3f3d72afc5e607da 100644 --- a/python/paddle/fluid/tests/unittests/test_install_check.py +++ b/python/paddle/fluid/tests/unittests/test_install_check.py @@ -18,7 +18,6 @@ import os class TestInstallCheck(unittest.TestCase): - def test_paddle_fluid(self): paddle.fluid.install_check.run_check() diff --git a/python/paddle/fluid/tests/unittests/test_instance_norm_op.py b/python/paddle/fluid/tests/unittests/test_instance_norm_op.py index 88bed96b35203281dae6227fc6c70dbd765b2f6e..50278505f98d66d13003ffb036eb3623a3ca94a9 100644 --- a/python/paddle/fluid/tests/unittests/test_instance_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_instance_norm_op.py @@ -62,10 +62,19 @@ def _reference_instance_norm_grad(x, d_y, scale, mean, var, epsilon): scale_tile = np.reshape(scale, (1, c, 1, 1)) scale_tile = np.tile(scale_tile, (n, 1, h, w)) - d_x = scale_tile * var_inv * ( - d_y - np.mean(d_y, axis=(2, 3), keepdims=True) - - (x - mean_tile) * var_inv * - np.mean(d_y * (x - mean_tile) * var_inv, axis=(2, 3), keepdims=True)) + d_x = ( + scale_tile + * var_inv + * ( + d_y + - np.mean(d_y, axis=(2, 3), keepdims=True) + - (x - mean_tile) + * var_inv + * np.mean( + d_y * (x - mean_tile) * var_inv, axis=(2, 3), keepdims=True + ) + ) + ) return d_x, d_scale, d_bias @@ -76,7 +85,6 @@ def _cal_mean_variance(x, epsilon, mean_shape): class TestInstanceNormOpTraining(unittest.TestCase): - def setUp(self): self.epsilon = 1e-5 self.init_test_case() @@ -85,23 +93,24 @@ class TestInstanceNormOpTraining(unittest.TestCase): self.shape = [2, 3, 4, 5] self.no_grad_set = set() self.fetch_list = [ - 'y', 'saved_mean', 'saved_variance', 'x@GRAD', 'scale@GRAD', - 'bias@GRAD' + 'y', + 'saved_mean', + 'saved_variance', + 'x@GRAD', + 'scale@GRAD', + 'bias@GRAD', ] def __assert_close(self, tensor, np_array, msg, atol=1e-4): - np.testing.assert_allclose(np.array(tensor), - np_array, - rtol=1e-05, - atol=atol, - err_msg=msg) + np.testing.assert_allclose( + np.array(tensor), np_array, rtol=1e-05, atol=atol, err_msg=msg + ) def set_global_mean_var(self, mean_shape, x): mean, variance = _cal_mean_variance(x, self.epsilon, mean_shape) return mean, variance def test_forward_backward(self): - def test_with_place(place, shape): epsilon = self.epsilon n, c, h, w = shape[0], shape[1], shape[2], shape[3] @@ -116,12 +125,14 @@ class TestInstanceNormOpTraining(unittest.TestCase): d_y = np.random.random_sample(shape).astype(np.float32) y, saved_mean, variance_tmp = _reference_instance_norm_naive( - x, scale, bias, epsilon, mean, variance) + x, scale, bias, epsilon, mean, variance + ) saved_variance = 1 / np.sqrt(variance_tmp + epsilon) d_x, d_scale, d_bias = _reference_instance_norm_grad( - x, d_y, scale, saved_mean, saved_variance, epsilon) + x, d_y, scale, saved_mean, saved_variance, epsilon + ) var_dict = locals() var_dict['y@GRAD'] = d_y @@ -130,7 +141,12 @@ class TestInstanceNormOpTraining(unittest.TestCase): var_dict['bias@GRAD'] = d_bias var_names = [ - 'x', 'scale', 'bias', 'y', 'saved_mean', 'saved_variance' + 'x', + 'scale', + 'bias', + 'y', + 'saved_mean', + 'saved_variance', ] ground_truth = {name: var_dict[name] for name in var_names} @@ -138,31 +154,33 @@ class TestInstanceNormOpTraining(unittest.TestCase): with fluid.program_guard(program): block = program.global_block() for name in ground_truth: - block.create_var(name=name, - dtype='float32', - shape=ground_truth[name].shape) - in_op = block.append_op(type="instance_norm", - inputs={ - "X": block.var("x"), - "Scale": block.var("scale"), - "Bias": block.var("bias"), - }, - outputs={ - "Y": - block.var("y"), - "SavedMean": - block.var("saved_mean"), - "SavedVariance": - block.var("saved_variance") - }, - attrs={ - "epsilon": epsilon, - }) + block.create_var( + name=name, + dtype='float32', + shape=ground_truth[name].shape, + ) + in_op = block.append_op( + type="instance_norm", + inputs={ + "X": block.var("x"), + "Scale": block.var("scale"), + "Bias": block.var("bias"), + }, + outputs={ + "Y": block.var("y"), + "SavedMean": block.var("saved_mean"), + "SavedVariance": block.var("saved_variance"), + }, + attrs={ + "epsilon": epsilon, + }, + ) block.create_var(name="y@GRAD", dtype='float32', shape=y.shape) grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc( - in_op.desc, self.no_grad_set, []) + in_op.desc, self.no_grad_set, [] + ) grad_op_desc = grad_op_desc_list[0] new_op_desc = block.desc.append_op() new_op_desc.copy_from(grad_op_desc) @@ -177,12 +195,14 @@ class TestInstanceNormOpTraining(unittest.TestCase): program._sync_with_cpp() exe = fluid.Executor(place) - out = exe.run(program, - feed={ - name: var_dict[name] - for name in ['x', 'scale', 'bias', 'y@GRAD'] - }, - fetch_list=self.fetch_list) + out = exe.run( + program, + feed={ + name: var_dict[name] + for name in ['x', 'scale', 'bias', 'y@GRAD'] + }, + fetch_list=self.fetch_list, + ) for id, name in enumerate(self.fetch_list): self.__assert_close(var_dict[name], out[id], name) @@ -191,14 +211,14 @@ class TestInstanceNormOpTraining(unittest.TestCase): places = [core.CPUPlace()] if core.is_compiled_with_cuda() and core.op_support_gpu( - "instance_norm"): + "instance_norm" + ): places.append(core.CUDAPlace(0)) for place in places: test_with_place(place, self.shape) class TestInstanceNormOpTrainingCase1(TestInstanceNormOpTraining): - def init_test_case(self): self.shape = [2, 3, 4, 5] self.no_grad_set = set(['scale@GRAD', 'bias@GRAD']) @@ -206,7 +226,6 @@ class TestInstanceNormOpTrainingCase1(TestInstanceNormOpTraining): class TestInstanceNormOpTrainingCase2(TestInstanceNormOpTraining): - def init_test_case(self): self.shape = [20, 50, 4, 5] self.no_grad_set = set(['scale@GRAD', 'bias@GRAD']) @@ -214,12 +233,12 @@ class TestInstanceNormOpTrainingCase2(TestInstanceNormOpTraining): class TestInstanceNormOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # the input of instance_norm must be Variable. - x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + ) self.assertRaises(TypeError, fluid.layers.instance_norm, x1) # the input dtype of instance_norm must be float32 or float64 @@ -228,24 +247,22 @@ class TestInstanceNormOpError(unittest.TestCase): class TestInstanceNormOpErrorCase1(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # the first dimension of input for instance_norm must between [2d, 5d] - x = fluid.layers.data(name='x', - shape=[3], - dtype="float32", - append_batch_size=False) + x = fluid.layers.data( + name='x', shape=[3], dtype="float32", append_batch_size=False + ) self.assertRaises(ValueError, paddle.static.nn.instance_norm, x) class TestElasticNormOp(unittest.TestCase): - def init_test_case(self): self.epsilon = 1e-5 self.places = [core.CPUPlace()] if core.is_compiled_with_cuda() and core.op_support_gpu( - "instance_norm"): + "instance_norm" + ): self.places.append(core.CUDAPlace(0)) def test_norm(self): @@ -258,20 +275,19 @@ class TestElasticNormOp(unittest.TestCase): scale = np.ones(scale_shape).astype(np.float32) bias = np.zeros(scale_shape).astype(np.float32) mean, variance = _cal_mean_variance(inputs, self.epsilon, mean_shape) - out_np, _, _ = _reference_instance_norm_naive(inputs, scale, bias, - self.epsilon, mean, - variance) + out_np, _, _ = _reference_instance_norm_naive( + inputs, scale, bias, self.epsilon, mean, variance + ) for place in self.places: with fluid.dygraph.guard(place): - instance_norm = fluid.dygraph.InstanceNorm(5, - param_attr=False, - bias_attr=False) + instance_norm = fluid.dygraph.InstanceNorm( + 5, param_attr=False, bias_attr=False + ) outputs = instance_norm(to_variable(inputs)) - np.testing.assert_allclose(outputs.numpy(), - out_np, - rtol=1e-05, - atol=1e-06) + np.testing.assert_allclose( + outputs.numpy(), out_np, rtol=1e-05, atol=1e-06 + ) def test_eager_api(self): with _test_eager_guard(): @@ -279,12 +295,12 @@ class TestElasticNormOp(unittest.TestCase): class TestElasticNormOpCase2(unittest.TestCase): - def init_test_case(self): self.epsilon = 1e-5 self.places = [core.CPUPlace()] if core.is_compiled_with_cuda() and core.op_support_gpu( - "instance_norm"): + "instance_norm" + ): self.places.append(core.CUDAPlace(0)) def test_norm(self): @@ -297,20 +313,19 @@ class TestElasticNormOpCase2(unittest.TestCase): scale = np.ones(scale_shape).astype(np.float32) bias = np.zeros(scale_shape).astype(np.float32) mean, variance = _cal_mean_variance(inputs, self.epsilon, mean_shape) - out_np, _, _ = _reference_instance_norm_naive(inputs, scale, bias, - self.epsilon, mean, - variance) + out_np, _, _ = _reference_instance_norm_naive( + inputs, scale, bias, self.epsilon, mean, variance + ) for place in self.places: with fluid.dygraph.guard(place): - instance_norm = fluid.dygraph.InstanceNorm(3, - param_attr=True, - bias_attr=True) + instance_norm = fluid.dygraph.InstanceNorm( + 3, param_attr=True, bias_attr=True + ) outputs = instance_norm(to_variable(inputs)) - np.testing.assert_allclose(outputs.numpy(), - out_np, - rtol=1e-05, - atol=1e-06) + np.testing.assert_allclose( + outputs.numpy(), out_np, rtol=1e-05, atol=1e-06 + ) def test_eager_api(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_instance_norm_op_v2.py b/python/paddle/fluid/tests/unittests/test_instance_norm_op_v2.py index cbb91795a50052e18f8d3abd72535dfc7c867b4b..66bbc3a338714e879cf3da956ff6765d1c780dc6 100644 --- a/python/paddle/fluid/tests/unittests/test_instance_norm_op_v2.py +++ b/python/paddle/fluid/tests/unittests/test_instance_norm_op_v2.py @@ -23,11 +23,11 @@ import paddle class TestInstanceNorm(unittest.TestCase): - def test_error(self): places = [fluid.CPUPlace()] if core.is_compiled_with_cuda() and core.op_support_gpu( - "instance_norm"): + "instance_norm" + ): places.append(fluid.CUDAPlace(0)) for p in places: @@ -48,9 +48,9 @@ class TestInstanceNorm(unittest.TestCase): def weight_bias_false(): x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32') - instance_norm3d = paddle.nn.InstanceNorm3D(1, - weight_attr=False, - bias_attr=False) + instance_norm3d = paddle.nn.InstanceNorm3D( + 1, weight_attr=False, bias_attr=False + ) with fluid.dygraph.guard(p): weight_bias_false() @@ -61,7 +61,8 @@ class TestInstanceNorm(unittest.TestCase): def test_dygraph(self): places = [fluid.CPUPlace()] if core.is_compiled_with_cuda() and core.op_support_gpu( - "instance_norm"): + "instance_norm" + ): places.append(fluid.CUDAPlace(0)) for p in places: shape = [4, 10, 4, 4] @@ -86,7 +87,8 @@ class TestInstanceNorm(unittest.TestCase): def test_static(self): places = [fluid.CPUPlace()] if core.is_compiled_with_cuda() and core.op_support_gpu( - "instance_norm"): + "instance_norm" + ): places.append(fluid.CUDAPlace(0)) for p in places: exe = fluid.Executor(p) diff --git a/python/paddle/fluid/tests/unittests/test_inverse_op.py b/python/paddle/fluid/tests/unittests/test_inverse_op.py index b155df0498d093a2e632a0dfa21c94a753bb8408..f06d32170ba9b80e5d55918146869943ce8f3ef9 100644 --- a/python/paddle/fluid/tests/unittests/test_inverse_op.py +++ b/python/paddle/fluid/tests/unittests/test_inverse_op.py @@ -21,7 +21,6 @@ from op_test import OpTest class TestInverseOp(OpTest): - def config(self): self.matrix_shape = [10, 10] self.dtype = "float64" @@ -46,7 +45,6 @@ class TestInverseOp(OpTest): class TestInverseOpBatched(TestInverseOp): - def config(self): self.matrix_shape = [8, 4, 4] self.dtype = "float64" @@ -54,35 +52,30 @@ class TestInverseOpBatched(TestInverseOp): class TestInverseOpLarge(TestInverseOp): - def config(self): self.matrix_shape = [32, 32] self.dtype = "float64" self.python_api = paddle.tensor.math.inverse def test_grad(self): - self.check_grad(['Input'], - 'Output', - max_relative_error=1e-6, - check_eager=True) + self.check_grad( + ['Input'], 'Output', max_relative_error=1e-6, check_eager=True + ) class TestInverseOpFP32(TestInverseOp): - def config(self): self.matrix_shape = [10, 10] self.dtype = "float32" self.python_api = paddle.tensor.math.inverse def test_grad(self): - self.check_grad(['Input'], - 'Output', - max_relative_error=1e-2, - check_eager=True) + self.check_grad( + ['Input'], 'Output', max_relative_error=1e-2, check_eager=True + ) class TestInverseOpBatchedFP32(TestInverseOpFP32): - def config(self): self.matrix_shape = [8, 4, 4] self.dtype = "float32" @@ -90,7 +83,6 @@ class TestInverseOpBatchedFP32(TestInverseOpFP32): class TestInverseOpLargeFP32(TestInverseOpFP32): - def config(self): self.matrix_shape = [32, 32] self.dtype = "float32" @@ -98,7 +90,6 @@ class TestInverseOpLargeFP32(TestInverseOpFP32): class TestInverseAPI(unittest.TestCase): - def setUp(self): np.random.seed(123) self.places = [fluid.CPUPlace()] @@ -113,12 +104,14 @@ class TestInverseAPI(unittest.TestCase): result_np = np.linalg.inv(input_np) exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={"input": input_np}, - fetch_list=[result]) - np.testing.assert_allclose(fetches[0], - np.linalg.inv(input_np), - rtol=1e-05) + fetches = exe.run( + fluid.default_main_program(), + feed={"input": input_np}, + fetch_list=[result], + ) + np.testing.assert_allclose( + fetches[0], np.linalg.inv(input_np), rtol=1e-05 + ) def test_static(self): for place in self.places: @@ -130,13 +123,12 @@ class TestInverseAPI(unittest.TestCase): input_np = np.random.random([4, 4]).astype("float64") input = fluid.dygraph.to_variable(input_np) result = paddle.inverse(input) - np.testing.assert_allclose(result.numpy(), - np.linalg.inv(input_np), - rtol=1e-05) + np.testing.assert_allclose( + result.numpy(), np.linalg.inv(input_np), rtol=1e-05 + ) class TestInverseAPIError(unittest.TestCase): - def test_errors(self): input_np = np.random.random([4, 4]).astype("float64") @@ -159,7 +151,6 @@ class TestInverseAPIError(unittest.TestCase): class TestInverseSingularAPI(unittest.TestCase): - def setUp(self): self.places = [fluid.CPUPlace()] if core.is_compiled_with_cuda(): @@ -174,9 +165,11 @@ class TestInverseSingularAPI(unittest.TestCase): exe = fluid.Executor(place) try: - fetches = exe.run(fluid.default_main_program(), - feed={"input": input_np}, - fetch_list=[result]) + fetches = exe.run( + fluid.default_main_program(), + feed={"input": input_np}, + fetch_list=[result], + ) except RuntimeError as ex: print("The mat is singular") except ValueError as ex: diff --git a/python/paddle/fluid/tests/unittests/test_io_save_load.py b/python/paddle/fluid/tests/unittests/test_io_save_load.py index f5feb8b78d604591dbadd70c9f6199acd42dd927..80c2ae1f92ff565796e1881528c8a7aa48782b21 100644 --- a/python/paddle/fluid/tests/unittests/test_io_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_io_save_load.py @@ -22,7 +22,6 @@ import os class TestSaveLoadAPIError(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() self.save_dir = os.path.join(self.temp_dir.name, "fake_dir") @@ -51,16 +50,18 @@ class TestSaveLoadAPIError(unittest.TestCase): exe = fluid.Executor(place) # case 1: main_program type error when vars None with self.assertRaises(TypeError): - fluid.io.load_vars(executor=exe, - dirname=self.save_dir, - main_program="program") + fluid.io.load_vars( + executor=exe, dirname=self.save_dir, main_program="program" + ) # case 2: main_program type error when vars not None with self.assertRaises(TypeError): - fluid.io.load_vars(executor=exe, - dirname=self.save_dir, - main_program="program", - vars="vars") + fluid.io.load_vars( + executor=exe, + dirname=self.save_dir, + main_program="program", + vars="vars", + ) def test_load_vars_error(self): with _test_eager_guard(): @@ -69,7 +70,6 @@ class TestSaveLoadAPIError(unittest.TestCase): class TestSaveInferenceModelAPIError(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -87,13 +87,15 @@ class TestSaveInferenceModelAPIError(unittest.TestCase): exe = fluid.Executor(fluid.CPUPlace()) exe.run(start_prog) with self.assertRaisesRegexp( - ValueError, "not involved in the target_vars calculation"): - fluid.io.save_inference_model(dirname=os.path.join( - self.temp_dir.name, 'model'), - feeded_var_names=['x', 'y'], - target_vars=[z], - executor=exe, - main_program=main_prog) + ValueError, "not involved in the target_vars calculation" + ): + fluid.io.save_inference_model( + dirname=os.path.join(self.temp_dir.name, 'model'), + feeded_var_names=['x', 'y'], + target_vars=[z], + executor=exe, + main_program=main_prog, + ) def test_useless_feeded_var_names(self): with _test_eager_guard(): @@ -102,7 +104,6 @@ class TestSaveInferenceModelAPIError(unittest.TestCase): class TestWhenTrainWithNoGrad(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() diff --git a/python/paddle/fluid/tests/unittests/test_iou_similarity_op.py b/python/paddle/fluid/tests/unittests/test_iou_similarity_op.py index 4d7c66993c0d9dc95da39dc7936c49750585d5c8..4324280efcbb60bf2dfedee20ea207739d9d9adc 100644 --- a/python/paddle/fluid/tests/unittests/test_iou_similarity_op.py +++ b/python/paddle/fluid/tests/unittests/test_iou_similarity_op.py @@ -18,7 +18,6 @@ from op_test import OpTest class TestIOUSimilarityOp(OpTest): - def test_check_output(self): self.check_output(check_dygraph=False) @@ -34,7 +33,9 @@ class TestIOUSimilarityOp(OpTest): self.attrs = {"box_normalized": self.box_normalized} self.outputs = {'Out': self.output} - def _compute_iou(self, ): + def _compute_iou( + self, + ): for row in range(self.boxes1.shape[0]): for col in range(self.boxes2.shape[0]): xmin1, ymin1, xmax1, ymax1 = self.boxes1[row] @@ -64,7 +65,6 @@ class TestIOUSimilarityOp(OpTest): class TestIOUSimilarityOpWithLoD(TestIOUSimilarityOp): - def test_check_output(self): self.check_output(check_dygraph=False) @@ -81,7 +81,6 @@ class TestIOUSimilarityOpWithLoD(TestIOUSimilarityOp): class TestIOUSimilarityOpWithBoxNormalized(TestIOUSimilarityOp): - def test_check_output(self): self.check_output(check_dygraph=False) diff --git a/python/paddle/fluid/tests/unittests/test_ir_graph.py b/python/paddle/fluid/tests/unittests/test_ir_graph.py index 1951db0395fa99cf7cc463e371eb4910b339d4a3..a8effa96920a841a5a0662acc10af10ab4107d8f 100644 --- a/python/paddle/fluid/tests/unittests/test_ir_graph.py +++ b/python/paddle/fluid/tests/unittests/test_ir_graph.py @@ -24,8 +24,9 @@ class TestIRGraph(unittest.TestCase): def test_nodes(self): graph = build_graph() self.assertTrue( - {node.name() - for node in graph.nodes()} == {"x1", "x2", "out", "sum"}) + {node.name() for node in graph.nodes()} + == {"x1", "x2", "out", "sum"} + ) def test_has_set_get(self): graph = build_graph() @@ -85,8 +86,9 @@ class TestIRGraph(unittest.TestCase): graph = build_graph() nodes = graph.release_nodes() self.assertTrue(len(graph.nodes()) == 0) - self.assertTrue({node.name() - for node in nodes} == {"x1", "x2", "out", "sum"}) + self.assertTrue( + {node.name() for node in nodes} == {"x1", "x2", "out", "sum"} + ) def test_remove_node(self): graph = build_graph() @@ -94,8 +96,9 @@ class TestIRGraph(unittest.TestCase): for node in nodes: if node.name() == "sum": break - self.assertTrue({node.name() - for node in nodes} == {"x1", "x2", "out", "sum"}) + self.assertTrue( + {node.name() for node in nodes} == {"x1", "x2", "out", "sum"} + ) nodes.remove(node) self.assertTrue({node.name() for node in nodes} == {"x1", "x2", "out"}) diff --git a/python/paddle/fluid/tests/unittests/test_ir_inplace_pass.py b/python/paddle/fluid/tests/unittests/test_ir_inplace_pass.py index 3fde50653b66e193b89f7e1139dec2966bc965d6..62ea08307ce1572c77f1a9d3930daf068926cb86 100644 --- a/python/paddle/fluid/tests/unittests/test_ir_inplace_pass.py +++ b/python/paddle/fluid/tests/unittests/test_ir_inplace_pass.py @@ -31,8 +31,10 @@ def fc_with_batchnorm(use_feed): hidden, size=200, act='tanh', - bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=1.0))) + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=1.0) + ), + ) hidden = fluid.layers.batch_norm(input=hidden) prediction = fluid.layers.fc(hidden, size=10, act='softmax') @@ -42,7 +44,6 @@ def fc_with_batchnorm(use_feed): class TestIrInplace(TestParallelExecutorBase): - @classmethod def setUpClass(cls): os.environ['CPU_NUM'] = str(4) @@ -56,13 +57,11 @@ class TestIrInplace(TestParallelExecutorBase): label = np.ones(shape=[32, 1], dtype='int64') self.check_network_convergence( fc_with_batchnorm, - feed_dict={ - "image": img, - "label": label - }, + feed_dict={"image": img, "label": label}, use_device=DeviceType.CUDA, use_ir_memory_optimize=ir_memory_optimize, - enable_inplace=enable_inplace) + enable_inplace=enable_inplace, + ) def test_fc_with_batchnorm(self, delta=1e-3): loss00 = self._fc_with_batchnorm(False, False) diff --git a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_ifelse_op.py b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_ifelse_op.py index 9cc78982aab5793e2a206ea467fda86bace29dd3..657c267767dfa49deefd382a49eb7fef9ea3204a 100644 --- a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_ifelse_op.py +++ b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_ifelse_op.py @@ -29,11 +29,9 @@ from paddle.fluid.optimizer import MomentumOptimizer class TestIrMemoryOptimizeIfElseOp(unittest.TestCase): - - def check_network_convergence(self, - use_cuda=True, - use_mem_opt=False, - iter_num=5): + def check_network_convergence( + self, use_cuda=True, use_mem_opt=False, iter_num=5 + ): paddle.seed(100) paddle.framework.random._manual_program_seed(100) prog = Program() @@ -65,14 +63,17 @@ class TestIrMemoryOptimizeIfElseOp(unittest.TestCase): optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9) optimizer.minimize(avg_loss, startup_prog) - train_reader = paddle.batch(paddle.dataset.mnist.train(), - batch_size=200) + train_reader = paddle.batch( + paddle.dataset.mnist.train(), batch_size=200 + ) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = Executor(place) exec_strategy = fluid.ExecutionStrategy() - exec_strategy._use_device = core.DeviceType.CUDA if use_cuda else core.DeviceType.CPU + exec_strategy._use_device = ( + core.DeviceType.CUDA if use_cuda else core.DeviceType.CPU + ) build_strategy = fluid.BuildStrategy() build_strategy.memory_optimize = use_mem_opt @@ -81,7 +82,8 @@ class TestIrMemoryOptimizeIfElseOp(unittest.TestCase): train_cp = train_cp.with_data_parallel( loss_name=avg_loss.name, exec_strategy=exec_strategy, - build_strategy=build_strategy) + build_strategy=build_strategy, + ) fetch_list = [avg_loss.name] exe.run(startup_prog) @@ -94,12 +96,11 @@ class TestIrMemoryOptimizeIfElseOp(unittest.TestCase): y_data = np.array([x[1] for x in data]).astype("int64") y_data = y_data.reshape((y_data.shape[0], 1)) - outs = exe.run(train_cp, - feed={ - 'x': x_data, - 'y': y_data - }, - fetch_list=[avg_loss]) + outs = exe.run( + train_cp, + feed={'x': x_data, 'y': y_data}, + fetch_list=[avg_loss], + ) loop += 1 ret.append(outs[0]) diff --git a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_nlp.py b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_nlp.py index ed4ab08a506d7077738ef998de528fcabf2e9ec5..f661db6d15e507903c4323eb6f6051d1bcced8c1 100644 --- a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_nlp.py +++ b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_nlp.py @@ -20,23 +20,26 @@ import unittest from ir_memory_optimize_net_base import TestIrMemOptBase -def lstm_net(data, - label, - dict_dim, - emb_dim=128, - hid_dim=128, - hid_dim2=96, - class_dim=2, - emb_lr=30.0): +def lstm_net( + data, + label, + dict_dim, + emb_dim=128, + hid_dim=128, + hid_dim2=96, + class_dim=2, + emb_lr=30.0, +): emb = fluid.layers.embedding( input=data, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr(learning_rate=emb_lr)) + param_attr=fluid.ParamAttr(learning_rate=emb_lr), + ) fc0 = fluid.layers.fc(input=emb, size=hid_dim * 4) - lstm_h, c = fluid.layers.dynamic_lstm(input=fc0, - size=hid_dim * 4, - is_reverse=False) + lstm_h, c = fluid.layers.dynamic_lstm( + input=fc0, size=hid_dim * 4, is_reverse=False + ) lstm_max = fluid.layers.sequence_pool(input=lstm_h, pool_type='max') lstm_max_tanh = fluid.layers.tanh(lstm_max) fc1 = fluid.layers.fc(input=lstm_max_tanh, size=hid_dim2, act='tanh') @@ -47,7 +50,6 @@ def lstm_net(data, class TestIrMemOptRNN(TestIrMemOptBase): - def setUp(self): self.network = lstm_net diff --git a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_pass.py b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_pass.py index d26a0a678940c6a94db708fbc14ea506375dcc84..d21156b43e8cc8f1ff4568d45a9571d98749746c 100644 --- a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_pass.py +++ b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_pass.py @@ -52,7 +52,6 @@ def fc_with_inplace_net(use_feed): class TestMNIST(TestParallelExecutorBase): - def _dummy_data(self): np.random.seed(5) img = np.random.random(size=[32, 784]).astype(np.float32) @@ -66,20 +65,16 @@ class TestMNIST(TestParallelExecutorBase): img, label = self._dummy_data() first_loss0, last_loss0, _ = self.check_network_convergence( model, - feed_dict={ - "image": img, - "label": label - }, + feed_dict={"image": img, "label": label}, use_device=use_device, - use_ir_memory_optimize=False) + use_ir_memory_optimize=False, + ) first_loss1, last_loss1, _ = self.check_network_convergence( model, - feed_dict={ - "image": img, - "label": label - }, + feed_dict={"image": img, "label": label}, use_device=use_device, - use_ir_memory_optimize=True) + use_ir_memory_optimize=True, + ) for loss in zip(first_loss0, first_loss1): self.assertAlmostEqual(loss[0], loss[1], delta=1e-6) for loss in zip(last_loss0, last_loss1): diff --git a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_transformer.py b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_transformer.py index 8b7fe96805fff798dbdd6a86af3246160bdc9e6c..8d1191fddd5b3e545dda1064bba180d6049c877f 100644 --- a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_transformer.py +++ b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_transformer.py @@ -25,7 +25,6 @@ from test_parallel_executor_transformer import get_feed_data_reader, transformer # NOTE(dzhwinter): test diferent strategy colisions. # open the eager delete tensor strategy by default. class TestTransformerWithIR(TestParallelExecutorBase): - def test_main(self): if core.is_compiled_with_cuda(): # check python transpiler @@ -34,14 +33,16 @@ class TestTransformerWithIR(TestParallelExecutorBase): use_device=DeviceType.CUDA, feed_data_reader=get_feed_data_reader(), use_ir_memory_optimize=False, - iter=2) + iter=2, + ) # check IR memory optimize self.check_network_convergence( transformer, use_device=DeviceType.CUDA, feed_data_reader=get_feed_data_reader(), use_ir_memory_optimize=True, - iter=2) + iter=2, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_is_complex.py b/python/paddle/fluid/tests/unittests/test_is_complex.py index a441bd86296704b6246c55b4613602c9c08870cb..fd2e58eff6399bed62ebf73538b11ce7ab91b515 100644 --- a/python/paddle/fluid/tests/unittests/test_is_complex.py +++ b/python/paddle/fluid/tests/unittests/test_is_complex.py @@ -18,7 +18,6 @@ import unittest class TestIsComplex(unittest.TestCase): - def test_for_integer(self): x = paddle.arange(10) self.assertFalse(paddle.is_complex(x)) diff --git a/python/paddle/fluid/tests/unittests/test_is_empty_op.py b/python/paddle/fluid/tests/unittests/test_is_empty_op.py index edbc60c68e5071544985c7305b27336114fef91f..d6235f7abc4bb915af3fc6a5b775e9994e7e45d3 100644 --- a/python/paddle/fluid/tests/unittests/test_is_empty_op.py +++ b/python/paddle/fluid/tests/unittests/test_is_empty_op.py @@ -19,7 +19,6 @@ import paddle class TestEmpty(OpTest): - def setUp(self): self.op_type = "is_empty" self.inputs = {'X': np.array([1, 2, 3])} @@ -30,7 +29,6 @@ class TestEmpty(OpTest): class TestNotEmpty(TestEmpty): - def setUp(self): self.op_type = "is_empty" self.inputs = {'X': np.array([])} @@ -38,11 +36,11 @@ class TestNotEmpty(TestEmpty): class TestIsEmptyOpError(unittest.TestCase): - def test_errors(self): paddle.enable_static() - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): input_data = np.random.random((3, 2)).astype("float64") def test_Variable(): @@ -53,25 +51,24 @@ class TestIsEmptyOpError(unittest.TestCase): def test_type(): # dtype must be float32, float64, int32, int64 - x3 = paddle.static.data(name="x3", - shape=[4, 32, 32], - dtype="bool") + x3 = paddle.static.data( + name="x3", shape=[4, 32, 32], dtype="bool" + ) res = paddle.is_empty(x=x3) self.assertRaises(TypeError, test_type) def test_name_type(): # name type must be string. - x4 = paddle.static.data(name="x4", - shape=[3, 2], - dtype="float32") + x4 = paddle.static.data( + name="x4", shape=[3, 2], dtype="float32" + ) res = paddle.is_empty(x=x4, name=1) self.assertRaises(TypeError, test_name_type) class TestIsEmptyOpDygraph(unittest.TestCase): - def test_dygraph(self): paddle.disable_static() input = paddle.rand(shape=[4, 32, 32], dtype='float32') diff --git a/python/paddle/fluid/tests/unittests/test_is_integer.py b/python/paddle/fluid/tests/unittests/test_is_integer.py index a933e9fac6678518529a50e38372c49be575775e..9e0eae91292966069f01963e522378abd69ef912 100644 --- a/python/paddle/fluid/tests/unittests/test_is_integer.py +++ b/python/paddle/fluid/tests/unittests/test_is_integer.py @@ -18,7 +18,6 @@ import unittest class TestIsInteger(unittest.TestCase): - def test_for_integer(self): x = paddle.arange(10) self.assertTrue(paddle.is_integer(x)) diff --git a/python/paddle/fluid/tests/unittests/test_is_tensor.py b/python/paddle/fluid/tests/unittests/test_is_tensor.py index deef1225cd06a47ec3b79fa4bbe7d973b1b35ce7..b91b5aaf3e70227ac9ab0ad4f5ad9fcd4dc687e5 100644 --- a/python/paddle/fluid/tests/unittests/test_is_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_is_tensor.py @@ -19,24 +19,20 @@ DELTA = 0.00001 class TestIsTensorApi(unittest.TestCase): - def test_is_tensor_real(self, dtype="float32"): - """Test is_tensor api with a real tensor - """ + """Test is_tensor api with a real tensor""" paddle.disable_static() x = paddle.rand([3, 2, 4], dtype=dtype) self.assertTrue(paddle.is_tensor(x)) def test_is_tensor_list(self, dtype="float32"): - """Test is_tensor api with a list - """ + """Test is_tensor api with a list""" paddle.disable_static() x = [1, 2, 3] self.assertFalse(paddle.is_tensor(x)) def test_is_tensor_number(self, dtype="float32"): - """Test is_tensor api with a number - """ + """Test is_tensor api with a number""" paddle.disable_static() x = 5 self.assertFalse(paddle.is_tensor(x)) diff --git a/python/paddle/fluid/tests/unittests/test_isclose_op.py b/python/paddle/fluid/tests/unittests/test_isclose_op.py index 04b7fbe54e7eec24027a443ec3b0bdda8725af50..5d53c337ce18cefa1ac795cb0539bd2bd404372f 100644 --- a/python/paddle/fluid/tests/unittests/test_isclose_op.py +++ b/python/paddle/fluid/tests/unittests/test_isclose_op.py @@ -19,9 +19,8 @@ import paddle class TestIscloseOp(OpTest): - def set_args(self): - self.input = np.array([10000., 1e-07]).astype("float32") + self.input = np.array([10000.0, 1e-07]).astype("float32") self.other = np.array([10000.1, 1e-08]).astype("float32") self.rtol = np.array([1e-05]).astype("float64") self.atol = np.array([1e-08]).astype("float64") @@ -36,18 +35,21 @@ class TestIscloseOp(OpTest): 'Input': self.input, 'Other': self.other, "Rtol": self.rtol, - "Atol": self.atol + "Atol": self.atol, } self.attrs = {'equal_nan': self.equal_nan} self.outputs = { - 'Out': - np.array([ - np.isclose(self.inputs['Input'], - self.inputs['Other'], - rtol=self.rtol, - atol=self.atol, - equal_nan=self.equal_nan) - ]) + 'Out': np.array( + [ + np.isclose( + self.inputs['Input'], + self.inputs['Other'], + rtol=self.rtol, + atol=self.atol, + equal_nan=self.equal_nan, + ) + ] + ) } def test_check_output(self): @@ -55,9 +57,7 @@ class TestIscloseOp(OpTest): class TestIscloseOpException(TestIscloseOp): - def test_check_output(self): - def test_rtol_num(): self.inputs['Rtol'] = np.array([1e-05, 1e-05]).astype("float64") self.inputs['Atol'] = np.array([1e-08]).astype("float64") @@ -88,9 +88,8 @@ class TestIscloseOpException(TestIscloseOp): class TestIscloseOpSmallNum(TestIscloseOp): - def set_args(self): - self.input = np.array([10000., 1e-08]).astype("float32") + self.input = np.array([10000.0, 1e-08]).astype("float32") self.other = np.array([10000.1, 1e-09]).astype("float32") self.rtol = np.array([1e-05]).astype("float64") self.atol = np.array([1e-08]).astype("float64") @@ -98,7 +97,6 @@ class TestIscloseOpSmallNum(TestIscloseOp): class TestIscloseOpNanFalse(TestIscloseOp): - def set_args(self): self.input = np.array([1.0, float('nan')]).astype("float32") self.other = np.array([1.0, float('nan')]).astype("float32") @@ -108,7 +106,6 @@ class TestIscloseOpNanFalse(TestIscloseOp): class TestIscloseOpNanTrue(TestIscloseOp): - def set_args(self): self.input = np.array([1.0, float('nan')]).astype("float32") self.other = np.array([1.0, float('nan')]).astype("float32") @@ -118,7 +115,6 @@ class TestIscloseOpNanTrue(TestIscloseOp): class TestIscloseStatic(unittest.TestCase): - def test_api_case(self): paddle.enable_static() x_data = np.random.rand(10, 10) @@ -127,24 +123,23 @@ class TestIscloseStatic(unittest.TestCase): if paddle.fluid.core.is_compiled_with_cuda(): places.append(paddle.fluid.CUDAPlace(0)) for place in places: - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64') y = paddle.fluid.data(name='y', shape=[10, 10], dtype='float64') result = paddle.isclose(x, y) exe = paddle.fluid.Executor(place) - fetches = exe.run(paddle.fluid.default_main_program(), - feed={ - "x": x_data, - "y": y_data - }, - fetch_list=[result]) + fetches = exe.run( + paddle.fluid.default_main_program(), + feed={"x": x_data, "y": y_data}, + fetch_list=[result], + ) expected_out = np.isclose(x_data, y_data) self.assertTrue((fetches[0] == expected_out).all(), True) class TestIscloseDygraph(unittest.TestCase): - def test_api_case(self): places = [paddle.CPUPlace()] if paddle.fluid.core.is_compiled_with_cuda(): @@ -162,13 +157,13 @@ class TestIscloseDygraph(unittest.TestCase): class TestIscloseError(unittest.TestCase): - def test_input_dtype(self): paddle.enable_static() def test_x_dtype(): - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float16') y = paddle.fluid.data(name='y', shape=[10, 10], dtype='float64') result = paddle.isclose(x, y) @@ -176,8 +171,9 @@ class TestIscloseError(unittest.TestCase): self.assertRaises(TypeError, test_x_dtype) def test_y_dtype(): - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64') y = paddle.fluid.data(name='y', shape=[10, 10], dtype='int32') result = paddle.isclose(x, y) @@ -206,7 +202,6 @@ class TestIscloseError(unittest.TestCase): class TestIscloseOpFloat32(TestIscloseOp): - def set_args(self): self.input = np.array([10.1]).astype("float32") self.other = np.array([10]).astype("float32") @@ -216,7 +211,6 @@ class TestIscloseOpFloat32(TestIscloseOp): class TestIscloseOpFloat64(TestIscloseOp): - def set_args(self): self.input = np.array([10.1]).astype("float64") self.other = np.array([10]).astype("float64") @@ -229,7 +223,6 @@ class TestIscloseOpFloat64(TestIscloseOp): class TestIscloseOpLargeDimInput(TestIscloseOp): - def set_args(self): self.input = np.array(np.zeros([2048, 1024])).astype("float64") self.other = np.array(np.zeros([2048, 1024])).astype("float64") diff --git a/python/paddle/fluid/tests/unittests/test_isfinite_op.py b/python/paddle/fluid/tests/unittests/test_isfinite_op.py index f2fa252ff0a6d01ad35a8845b8f9cc2b438a0b7c..e2fa9f67b53312df45aef154a5d11ee61cac017e 100644 --- a/python/paddle/fluid/tests/unittests/test_isfinite_op.py +++ b/python/paddle/fluid/tests/unittests/test_isfinite_op.py @@ -21,7 +21,6 @@ from op_test import OpTest class TestInf(OpTest): - def setUp(self): self.op_type = "isinf" self.dtype = np.float32 @@ -42,9 +41,7 @@ class TestInf(OpTest): class TestRaiseError(unittest.TestCase): - def test_errors(self): - def test_type(): fluid.layers.isfinite([10]) @@ -57,16 +54,15 @@ class TestRaiseError(unittest.TestCase): self.assertRaises(TypeError, test_dtype) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFP16Inf(TestInf): - def init_dtype(self): self.dtype = np.float16 class TestNAN(OpTest): - def setUp(self): self.op_type = "isnan" self.dtype = np.float32 @@ -86,16 +82,15 @@ class TestNAN(OpTest): self.check_output() -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFP16NAN(TestNAN): - def init_dtype(self): self.dtype = np.float16 class TestIsfinite(OpTest): - def setUp(self): self.op_type = "isfinite" self.dtype = np.float32 @@ -116,16 +111,15 @@ class TestIsfinite(OpTest): self.check_output() -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFP16Isfinite(TestIsfinite): - def init_dtype(self): self.dtype = np.float16 class BadInputTest(unittest.TestCase): - def test_error(self): with fluid.program_guard(fluid.Program()): diff --git a/python/paddle/fluid/tests/unittests/test_isfinite_v2_op.py b/python/paddle/fluid/tests/unittests/test_isfinite_v2_op.py index 252e43b3423dcf96d1ffb0c6aff9fb04f2963fec..e3d094890ac7c6206bf20cb98a3e988ba4d3c550 100644 --- a/python/paddle/fluid/tests/unittests/test_isfinite_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_isfinite_v2_op.py @@ -31,9 +31,9 @@ def run_static(x_np, dtype, op_str, use_gpu=False): x = paddle.fluid.data(name='x', shape=x_np.shape, dtype=dtype) res = getattr(paddle.tensor, op_str)(x) exe.run(startup_program) - static_result = exe.run(main_program, - feed={'x': x_np}, - fetch_list=[res]) + static_result = exe.run( + main_program, feed={'x': x_np}, fetch_list=[res] + ) return static_result @@ -59,15 +59,16 @@ def run_eager(x_np, op_str, use_gpu=True): return dygraph_result -def np_data_generator(low, high, np_shape, type, sv_list, op_str, *args, - **kwargs): +def np_data_generator( + low, high, np_shape, type, sv_list, op_str, *args, **kwargs +): x_np = np.random.uniform(low, high, np_shape).astype(getattr(np, type)) # x_np.shape[0] >= len(sv_list) if type in ['float16', 'float32', 'float64']: for i, v in enumerate(sv_list): x_np[i] = v ori_shape = x_np.shape - x_np = x_np.reshape((np.product(ori_shape), )) + x_np = x_np.reshape((np.product(ori_shape),)) np.random.shuffle(x_np) x_np = x_np.reshape(ori_shape) result_np = getattr(np, op_str)(x_np) @@ -80,35 +81,35 @@ TEST_META_DATA = [ 'high': 1, 'np_shape': [8, 17, 5, 6, 7], 'type': 'float16', - 'sv_list': [np.inf, np.nan] + 'sv_list': [np.inf, np.nan], }, { 'low': 0.1, 'high': 1, 'np_shape': [11, 17], 'type': 'float32', - 'sv_list': [np.inf, np.nan] + 'sv_list': [np.inf, np.nan], }, { 'low': 0.1, 'high': 1, 'np_shape': [2, 3, 4, 5], 'type': 'float64', - 'sv_list': [np.inf, np.nan] + 'sv_list': [np.inf, np.nan], }, { 'low': 0, 'high': 100, 'np_shape': [11, 17, 10], 'type': 'int32', - 'sv_list': [np.inf, np.nan] + 'sv_list': [np.inf, np.nan], }, { 'low': 0, 'high': 999, 'np_shape': [132], 'type': 'int64', - 'sv_list': [np.inf, np.nan] + 'sv_list': [np.inf, np.nan], }, ] @@ -127,7 +128,6 @@ def test(test_case, op_str, use_gpu=False): class TestCPUNormal(unittest.TestCase): - def test_inf(self): test(self, 'isinf') @@ -139,7 +139,6 @@ class TestCPUNormal(unittest.TestCase): class TestCUDANormal(unittest.TestCase): - def test_inf(self): test(self, 'isinf', True) @@ -151,7 +150,6 @@ class TestCUDANormal(unittest.TestCase): class TestError(unittest.TestCase): - def test_bad_input(self): paddle.enable_static() with fluid.program_guard(fluid.Program()): diff --git a/python/paddle/fluid/tests/unittests/test_jit_layer.py b/python/paddle/fluid/tests/unittests/test_jit_layer.py index 5a03e0ac3b80ee71bfc89857269582487950932e..1713e61f9de618bb63a129bf2233b9731400f679 100644 --- a/python/paddle/fluid/tests/unittests/test_jit_layer.py +++ b/python/paddle/fluid/tests/unittests/test_jit_layer.py @@ -20,13 +20,14 @@ import numpy as np from paddle.static import InputSpec from paddle.fluid.framework import _dygraph_place_guard from paddle.jit.layer import Layer -from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator +from paddle.fluid.dygraph.dygraph_to_static.program_translator import ( + ProgramTranslator, +) paddle.seed(1) class Net(paddle.nn.Layer): - def __init__(self): super(Net, self).__init__() self.fc1 = paddle.nn.Linear(4, 4) @@ -50,7 +51,6 @@ class Net(paddle.nn.Layer): class TestMultiLoad(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -81,20 +81,19 @@ class TestMultiLoad(unittest.TestCase): class SaveLinear(paddle.nn.Layer): - def __init__(self): super().__init__() self.linear = paddle.nn.Linear(80, 80) @paddle.jit.to_static( - input_spec=[InputSpec(shape=[None, 80], dtype='float32')]) + input_spec=[InputSpec(shape=[None, 80], dtype='float32')] + ) def forward(self, x): out = self.linear(x) return out class TestMKLOutput(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() diff --git a/python/paddle/fluid/tests/unittests/test_jit_pre_save_hooks.py b/python/paddle/fluid/tests/unittests/test_jit_pre_save_hooks.py index 1d3a35b6173527f16ba6e30c5b9d313b94b7ac3e..9b9ce8297ba4dfbae22a69c7531d3a40ceb69e18 100644 --- a/python/paddle/fluid/tests/unittests/test_jit_pre_save_hooks.py +++ b/python/paddle/fluid/tests/unittests/test_jit_pre_save_hooks.py @@ -16,15 +16,17 @@ import unittest import paddle -from paddle.fluid.dygraph.jit import _run_save_pre_hooks, _clear_save_pre_hooks, _register_save_pre_hook +from paddle.fluid.dygraph.jit import ( + _run_save_pre_hooks, + _clear_save_pre_hooks, + _register_save_pre_hook, +) _counter = 0 class TestPreSaveHooks(unittest.TestCase): - def test_pre_save_hook_functions(self): - def fake_func(*args, **kwgs): global _counter _counter += 1 @@ -32,13 +34,15 @@ class TestPreSaveHooks(unittest.TestCase): remove_handler = _register_save_pre_hook(fake_func) self.assertEqual(len(paddle.fluid.dygraph.jit._save_pre_hooks), 1) self.assertTrue( - paddle.fluid.dygraph.jit._save_pre_hooks[0] is fake_func) + paddle.fluid.dygraph.jit._save_pre_hooks[0] is fake_func + ) # Test of avoiding redundancy hanging remove_handler = _register_save_pre_hook(fake_func) self.assertEqual(len(paddle.fluid.dygraph.jit._save_pre_hooks), 1) self.assertTrue( - paddle.fluid.dygraph.jit._save_pre_hooks[0] is fake_func) + paddle.fluid.dygraph.jit._save_pre_hooks[0] is fake_func + ) remove_handler.remove() self.assertEqual(len(paddle.fluid.dygraph.jit._save_pre_hooks), 0) diff --git a/python/paddle/fluid/tests/unittests/test_jit_save_load.py b/python/paddle/fluid/tests/unittests/test_jit_save_load.py index efb5a196f81835338aed38217ed1b7bd9c649c02..2f6405e7aa6bf9fe6600b1b3e1d78f1c795b8b3b 100644 --- a/python/paddle/fluid/tests/unittests/test_jit_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_jit_save_load.py @@ -34,7 +34,6 @@ SEED = 10 def random_batch_reader(input_size, label_size): - def _get_random_inputs_and_labels(input_size, label_size): np.random.seed(SEED) input = np.random.random(size=input_size).astype('float32') @@ -44,14 +43,14 @@ def random_batch_reader(input_size, label_size): def __reader__(): for _ in range(BATCH_NUM): batch_input, batch_label = _get_random_inputs_and_labels( - [BATCH_SIZE, input_size], [BATCH_SIZE, label_size]) + [BATCH_SIZE, input_size], [BATCH_SIZE, label_size] + ) yield batch_input, batch_label return __reader__ class LinearNet(fluid.dygraph.Layer): - def __init__(self, in_size, out_size): super(LinearNet, self).__init__() self._linear = Linear(in_size, out_size) @@ -62,7 +61,6 @@ class LinearNet(fluid.dygraph.Layer): class LinearNetWithInputSpec(fluid.dygraph.Layer): - def __init__(self, in_size, out_size): super(LinearNetWithInputSpec, self).__init__() self._linear = Linear(in_size, out_size) @@ -73,7 +71,6 @@ class LinearNetWithInputSpec(fluid.dygraph.Layer): class LinearNetNotDeclarative(fluid.dygraph.Layer): - def __init__(self, in_size, out_size): super(LinearNetNotDeclarative, self).__init__() self._linear = Linear(in_size, out_size) @@ -83,15 +80,16 @@ class LinearNetNotDeclarative(fluid.dygraph.Layer): class LinerNetWithLabel(paddle.nn.Layer): - def __init__(self, in_size, out_size): super(LinerNetWithLabel, self).__init__() self._linear = Linear(in_size, out_size) - @declarative(input_spec=[ - InputSpec(shape=[None, 784], dtype='float32', name="image"), - InputSpec(shape=[None, 1], dtype='int64', name="label") - ]) + @declarative( + input_spec=[ + InputSpec(shape=[None, 784], dtype='float32', name="image"), + InputSpec(shape=[None, 1], dtype='int64', name="label"), + ] + ) def forward(self, x, label): out = self._linear(x) loss = fluid.layers.cross_entropy(out, label) @@ -100,15 +98,16 @@ class LinerNetWithLabel(paddle.nn.Layer): class LinerNetWithPruneInput(paddle.nn.Layer): - def __init__(self, in_size, out_size): super(LinerNetWithPruneInput, self).__init__() self._linear = Linear(in_size, out_size) - @declarative(input_spec=[ - InputSpec(shape=[None, 784], dtype='float32', name="image"), - InputSpec(shape=[None, 1], dtype='int64', name="label") - ]) + @declarative( + input_spec=[ + InputSpec(shape=[None, 784], dtype='float32', name="image"), + InputSpec(shape=[None, 1], dtype='int64', name="label"), + ] + ) def forward(self, x, label): out = self._linear(x) loss = fluid.layers.cross_entropy(out, label) @@ -117,22 +116,22 @@ class LinerNetWithPruneInput(paddle.nn.Layer): class LinerNetWithUselessInput(paddle.nn.Layer): - def __init__(self, in_size, out_size): super(LinerNetWithUselessInput, self).__init__() self._linear = Linear(in_size, out_size) - @declarative(input_spec=[ - InputSpec(shape=[None, 784], dtype='float32', name="image"), - InputSpec(shape=[None, 1], dtype='int64', name="label") - ]) + @declarative( + input_spec=[ + InputSpec(shape=[None, 784], dtype='float32', name="image"), + InputSpec(shape=[None, 1], dtype='int64', name="label"), + ] + ) def forward(self, x, label): out = self._linear(x) return out class LinearNetReturnLoss(fluid.dygraph.Layer): - def __init__(self, in_size, out_size): super(LinearNetReturnLoss, self).__init__() self._linear = Linear(in_size, out_size) @@ -146,16 +145,17 @@ class LinearNetReturnLoss(fluid.dygraph.Layer): class LinearNetMultiInput(fluid.dygraph.Layer): - def __init__(self, in_size, out_size): super(LinearNetMultiInput, self).__init__() self._linear1 = Linear(in_size, out_size) self._linear2 = Linear(in_size, out_size) - @declarative(input_spec=[ - InputSpec([None, 8], dtype='float32'), - InputSpec([None, 8], dtype='float32') - ]) + @declarative( + input_spec=[ + InputSpec([None, 8], dtype='float32'), + InputSpec([None, 8], dtype='float32'), + ] + ) def forward(self, x, y): x_out = self._linear1(x) y_out = self._linear2(y) @@ -164,14 +164,17 @@ class LinearNetMultiInput(fluid.dygraph.Layer): class LinearNetMultiInput1(fluid.dygraph.Layer): - def __init__(self, in_size, out_size): super(LinearNetMultiInput1, self).__init__() self._linear1 = Linear(in_size, out_size) self._linear2 = Linear(in_size, out_size) - @declarative(input_spec=(InputSpec([None, 8], dtype='float32'), - InputSpec([None, 8], dtype='float32'))) + @declarative( + input_spec=( + InputSpec([None, 8], dtype='float32'), + InputSpec([None, 8], dtype='float32'), + ) + ) def forward(self, x, y): x_out = self._linear1(x) y_out = self._linear2(y) @@ -180,7 +183,6 @@ class LinearNetMultiInput1(fluid.dygraph.Layer): class MultiLoadingLinearNet(fluid.dygraph.Layer): - def __init__(self, size, model_path): super(MultiLoadingLinearNet, self).__init__() self._linear = Linear(size, size) @@ -197,7 +199,6 @@ class MultiLoadingLinearNet(fluid.dygraph.Layer): class LinearNetReturnHidden(fluid.dygraph.Layer): - def __init__(self, in_size, out_size): super(LinearNetReturnHidden, self).__init__() self._linear_1 = Linear(in_size, out_size) @@ -212,7 +213,6 @@ class LinearNetReturnHidden(fluid.dygraph.Layer): class LinearNetWithNestOut(fluid.dygraph.Layer): - def __init__(self, in_size, out_size): super(LinearNetWithNestOut, self).__init__() self._linear_1 = Linear(in_size, out_size) @@ -228,18 +228,16 @@ class LinearNetWithNestOut(fluid.dygraph.Layer): class LinearNetWithDictInput(paddle.nn.Layer): - def __init__(self, in_size, out_size): super(LinearNetWithDictInput, self).__init__() self._linear = Linear(in_size, out_size) - @paddle.jit.to_static(input_spec=[{ - 'img': - InputSpec(shape=[None, 8], dtype='float32', name='img') - }, { - 'label': - InputSpec(shape=[None, 1], dtype='int64', name='label') - }]) + @paddle.jit.to_static( + input_spec=[ + {'img': InputSpec(shape=[None, 8], dtype='float32', name='img')}, + {'label': InputSpec(shape=[None, 1], dtype='int64', name='label')}, + ] + ) def forward(self, img, label): out = self._linear(img['img']) # not return loss to avoid prune output @@ -248,7 +246,6 @@ class LinearNetWithDictInput(paddle.nn.Layer): class LinearNetWithDictInputNoPrune(paddle.nn.Layer): - def __init__(self, in_size, out_size): super(LinearNetWithDictInputNoPrune, self).__init__() self._linear = Linear(in_size, out_size) @@ -259,7 +256,6 @@ class LinearNetWithDictInputNoPrune(paddle.nn.Layer): class EmptyLayer(paddle.nn.Layer): - def __init__(self): super(EmptyLayer, self).__init__() @@ -269,7 +265,6 @@ class EmptyLayer(paddle.nn.Layer): class NoParamLayer(paddle.nn.Layer): - def __init__(self): super(NoParamLayer, self).__init__() @@ -279,7 +274,6 @@ class NoParamLayer(paddle.nn.Layer): class LinearNetWithMultiStaticFunc(fluid.dygraph.Layer): - def __init__(self, in_size, out_size): super(LinearNetWithMultiStaticFunc, self).__init__() self._linear_0 = Linear(in_size, out_size) @@ -301,12 +295,14 @@ class LinearNetWithMultiStaticFunc(fluid.dygraph.Layer): def train(layer, input_size=784, label_size=1): # create optimizer - sgd = fluid.optimizer.SGDOptimizer(learning_rate=0.01, - parameter_list=layer.parameters()) + sgd = fluid.optimizer.SGDOptimizer( + learning_rate=0.01, parameter_list=layer.parameters() + ) # create data loader train_loader = fluid.io.DataLoader.from_generator(capacity=5) - train_loader.set_batch_generator(random_batch_reader( - input_size, label_size)) + train_loader.set_batch_generator( + random_batch_reader(input_size, label_size) + ) # train for data in train_loader(): img, label = data @@ -325,12 +321,14 @@ def train(layer, input_size=784, label_size=1): def train_with_label(layer, input_size=784, label_size=1): # create optimizer - sgd = fluid.optimizer.SGDOptimizer(learning_rate=0.01, - parameter_list=layer.parameters()) + sgd = fluid.optimizer.SGDOptimizer( + learning_rate=0.01, parameter_list=layer.parameters() + ) # create data loader train_loader = fluid.io.DataLoader.from_generator(capacity=5) - train_loader.set_batch_generator(random_batch_reader( - input_size, label_size)) + train_loader.set_batch_generator( + random_batch_reader(input_size, label_size) + ) # train for data in train_loader(): img, label = data @@ -345,11 +343,11 @@ def train_with_label(layer, input_size=784, label_size=1): class TestJitSaveLoad(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() - self.model_path = os.path.join(self.temp_dir.name, - "test_jit_save_load/model") + self.model_path = os.path.join( + self.temp_dir.name, "test_jit_save_load/model" + ) # enable dygraph mode fluid.enable_dygraph() # config seed @@ -364,9 +362,9 @@ class TestJitSaveLoad(unittest.TestCase): example_inputs, layer, _ = train(layer) final_model_path = model_path if model_path else self.model_path orig_input_types = [type(x) for x in example_inputs] - paddle.jit.save(layer=layer, - path=final_model_path, - input_spec=example_inputs) + paddle.jit.save( + layer=layer, path=final_model_path, input_spec=example_inputs + ) new_input_types = [type(x) for x in example_inputs] self.assertEqual(orig_input_types, new_input_types) return layer @@ -385,10 +383,11 @@ class TestJitSaveLoad(unittest.TestCase): infer_layer.eval() # inference & compare x = fluid.dygraph.to_variable( - np.random.random((1, 784)).astype('float32')) + np.random.random((1, 784)).astype('float32') + ) np.testing.assert_array_equal( - train_layer(x).numpy(), - infer_layer(x).numpy()) + train_layer(x).numpy(), infer_layer(x).numpy() + ) def load_and_finetune(self, train_layer, load_train_layer): train_layer.train() @@ -396,8 +395,9 @@ class TestJitSaveLoad(unittest.TestCase): # train & compare img0, _, train_loss = train(train_layer) img1, _, load_train_loss = train(load_train_layer) - np.testing.assert_array_equal(train_loss.numpy(), - load_train_loss.numpy()) + np.testing.assert_array_equal( + train_loss.numpy(), load_train_loss.numpy() + ) def load_dygraph_state_dict(self, train_layer): train_layer.eval() @@ -411,26 +411,28 @@ class TestJitSaveLoad(unittest.TestCase): new_layer.eval() # inference & compare x = fluid.dygraph.to_variable( - np.random.random((1, 784)).astype('float32')) + np.random.random((1, 784)).astype('float32') + ) np.testing.assert_array_equal( - train_layer(x).numpy(), - new_layer(x).numpy()) + train_layer(x).numpy(), new_layer(x).numpy() + ) def test_load_dygraph_no_path(self): - model_path = os.path.join(self.temp_dir.name, - "test_jit_save_load.no_path/model_path") + model_path = os.path.join( + self.temp_dir.name, "test_jit_save_load.no_path/model_path" + ) with self.assertRaises(ValueError): model_dict, _ = fluid.dygraph.load_dygraph(model_path) def test_jit_load_no_path(self): - path = os.path.join(self.temp_dir.name, - "test_jit_save_load.no_path/model_path") + path = os.path.join( + self.temp_dir.name, "test_jit_save_load.no_path/model_path" + ) with self.assertRaises(ValueError): loaded_layer = paddle.jit.load(path) class TestSaveLoadWithNestOut(unittest.TestCase): - def setUp(self): # enable dygraph mode fluid.enable_dygraph() @@ -441,7 +443,8 @@ class TestSaveLoadWithNestOut(unittest.TestCase): def test_nest_output(self): x = fluid.dygraph.to_variable( - np.random.random((4, 8)).astype('float32')) + np.random.random((4, 8)).astype('float32') + ) net = LinearNetWithNestOut(8, 8) dy_outs = flatten(net(x)) @@ -455,13 +458,12 @@ class TestSaveLoadWithNestOut(unittest.TestCase): self.assertTrue(len(dy_outs) == 4) for dy_out, load_out in zip(dy_outs, load_outs): - np.testing.assert_allclose(dy_out.numpy(), - load_out.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + dy_out.numpy(), load_out.numpy(), rtol=1e-05 + ) class TestSaveLoadWithDictInput(unittest.TestCase): - def test_dict_input(self): # NOTE: This net cannot be executed, it is just # a special case for exporting models in model validation @@ -473,17 +475,17 @@ class TestSaveLoadWithDictInput(unittest.TestCase): # {'label': var label : fluid.VarType.LOD_TENSOR.shape(-1, 1).astype(VarType.INT64)}) self.assertEqual(len(net.forward.concrete_program.inputs), 3) temp_dir = tempfile.TemporaryDirectory() - path = os.path.join(temp_dir.name, - "test_jit_save_load_with_dict_input/model") + path = os.path.join( + temp_dir.name, "test_jit_save_load_with_dict_input/model" + ) # prune inputs - paddle.jit.save(layer=net, - path=path, - input_spec=[{ - 'img': - InputSpec(shape=[None, 8], - dtype='float32', - name='img') - }]) + paddle.jit.save( + layer=net, + path=path, + input_spec=[ + {'img': InputSpec(shape=[None, 8], dtype='float32', name='img')} + ], + ) img = paddle.randn(shape=[4, 8], dtype='float32') loaded_net = paddle.jit.load(path) @@ -496,25 +498,27 @@ class TestSaveLoadWithDictInput(unittest.TestCase): class TestSaveLoadWithDictInputNoPrune(unittest.TestCase): - def test_dict_input(self): net = LinearNetWithDictInputNoPrune(8, 8) temp_dir = tempfile.TemporaryDirectory() path = os.path.join( - temp_dir.name, "test_jit_save_load_with_dict_input_no_prune/model") + temp_dir.name, "test_jit_save_load_with_dict_input_no_prune/model" + ) # prune inputs - paddle.jit.save(layer=net, - path=path, - input_spec=[{ - 'img': - InputSpec(shape=[None, 8], - dtype='float32', - name='img'), - 'img2': - InputSpec(shape=[None, 8], - dtype='float32', - name='img2') - }]) + paddle.jit.save( + layer=net, + path=path, + input_spec=[ + { + 'img': InputSpec( + shape=[None, 8], dtype='float32', name='img' + ), + 'img2': InputSpec( + shape=[None, 8], dtype='float32', name='img2' + ), + } + ], + ) img = paddle.randn(shape=[4, 8], dtype='float32') img2 = paddle.randn(shape=[4, 8], dtype='float32') @@ -526,7 +530,6 @@ class TestSaveLoadWithDictInputNoPrune(unittest.TestCase): class TestSaveLoadWithInputSpec(unittest.TestCase): - def setUp(self): # enable dygraph mode fluid.enable_dygraph() @@ -538,11 +541,13 @@ class TestSaveLoadWithInputSpec(unittest.TestCase): def test_with_input_spec(self): net = LinearNetReturnLoss(8, 8) # set x.shape = [None, 8] - net.forward = declarative(net.forward, - input_spec=[InputSpec([None, 8], name='x')]) + net.forward = declarative( + net.forward, input_spec=[InputSpec([None, 8], name='x')] + ) - model_path = os.path.join(self.temp_dir.name, - "input_spec.output_spec/model") + model_path = os.path.join( + self.temp_dir.name, "input_spec.output_spec/model" + ) # check inputs and outputs self.assertTrue(len(net.forward.inputs) == 1) input_x = net.forward.inputs[0] @@ -556,14 +561,16 @@ class TestSaveLoadWithInputSpec(unittest.TestCase): # 2. load to infer infer_layer = paddle.jit.load(model_path) x = fluid.dygraph.to_variable( - np.random.random((4, 8)).astype('float32')) + np.random.random((4, 8)).astype('float32') + ) pred = infer_layer(x) def test_multi_in_out(self): net = LinearNetMultiInput(8, 8) - model_path = os.path.join(self.temp_dir.name, - "multi_inout.output_spec1/model") + model_path = os.path.join( + self.temp_dir.name, "multi_inout.output_spec1/model" + ) # 1. check inputs and outputs self.assertTrue(len(net.forward.inputs) == 2) input_x = net.forward.inputs[0] @@ -578,15 +585,18 @@ class TestSaveLoadWithInputSpec(unittest.TestCase): # 3. load to infer infer_layer = paddle.jit.load(model_path) x = fluid.dygraph.to_variable( - np.random.random((4, 8)).astype('float32')) + np.random.random((4, 8)).astype('float32') + ) y = fluid.dygraph.to_variable( - np.random.random((4, 8)).astype('float32')) + np.random.random((4, 8)).astype('float32') + ) # 4. predict pred_x, pred_y = infer_layer(x, y) # 1. prune y and loss - model_path = os.path.join(self.temp_dir.name, - "multi_inout.output_spec2/model") + model_path = os.path.join( + self.temp_dir.name, "multi_inout.output_spec2/model" + ) output_spec = net.forward.outputs[:1] paddle.jit.save(net, model_path, [input_x], output_spec=output_spec) # 2. load again @@ -600,8 +610,9 @@ class TestSaveLoadWithInputSpec(unittest.TestCase): def test_multi_in_out1(self): net = LinearNetMultiInput1(8, 8) - model_path = os.path.join(self.temp_dir.name, - "multi_inout1.output_spec1/model") + model_path = os.path.join( + self.temp_dir.name, "multi_inout1.output_spec1/model" + ) # 1. check inputs and outputs self.assertTrue(len(net.forward.inputs) == 2) input_x = net.forward.inputs[0] @@ -616,17 +627,20 @@ class TestSaveLoadWithInputSpec(unittest.TestCase): # 3. load to infer infer_layer = paddle.jit.load(model_path) x = fluid.dygraph.to_variable( - np.random.random((4, 8)).astype('float32')) + np.random.random((4, 8)).astype('float32') + ) y = fluid.dygraph.to_variable( - np.random.random((4, 8)).astype('float32')) + np.random.random((4, 8)).astype('float32') + ) # 4. predict pred_x, pred_y = infer_layer(x, y) # 1. prune y and loss - model_path = os.path.join(self.temp_dir.name, - "multi_inout1.output_spec2/model") + model_path = os.path.join( + self.temp_dir.name, "multi_inout1.output_spec2/model" + ) output_spec = net.forward.outputs[:1] - paddle.jit.save(net, model_path, (input_x, ), output_spec=output_spec) + paddle.jit.save(net, model_path, (input_x,), output_spec=output_spec) # 2. load again infer_layer2 = paddle.jit.load(model_path) # 3. predict @@ -637,7 +651,6 @@ class TestSaveLoadWithInputSpec(unittest.TestCase): class TestJitSaveLoadConfig(unittest.TestCase): - def setUp(self): # enable dygraph mode fluid.enable_dygraph() @@ -652,30 +665,36 @@ class TestJitSaveLoadConfig(unittest.TestCase): def test_output_spec(self): train_layer = LinearNetReturnLoss(8, 8) adam = fluid.optimizer.AdamOptimizer( - learning_rate=0.1, parameter_list=train_layer.parameters()) + learning_rate=0.1, parameter_list=train_layer.parameters() + ) x = fluid.dygraph.to_variable( - np.random.random((4, 8)).astype('float32')) + np.random.random((4, 8)).astype('float32') + ) for i in range(10): out, loss = train_layer(x) loss.backward() adam.minimize(loss) train_layer.clear_gradients() - model_path = os.path.join(self.temp_dir.name, - "save_load_config.output_spec") + model_path = os.path.join( + self.temp_dir.name, "save_load_config.output_spec" + ) output_spec = [out] - paddle.jit.save(layer=train_layer, - path=model_path, - input_spec=[x], - output_spec=output_spec) + paddle.jit.save( + layer=train_layer, + path=model_path, + input_spec=[x], + output_spec=output_spec, + ) train_layer.eval() infer_layer = paddle.jit.load(model_path) x = fluid.dygraph.to_variable( - np.random.random((4, 8)).astype('float32')) + np.random.random((4, 8)).astype('float32') + ) np.testing.assert_array_equal( - train_layer(x)[0].numpy(), - infer_layer(x).numpy()) + train_layer(x)[0].numpy(), infer_layer(x).numpy() + ) def test_save_no_support_config_error(self): layer = LinearNet(784, 1) @@ -700,12 +719,12 @@ class TestJitSaveLoadConfig(unittest.TestCase): class TestJitMultipleLoading(unittest.TestCase): - def setUp(self): self.linear_size = 4 self.temp_dir = tempfile.TemporaryDirectory() - self.model_path = os.path.join(self.temp_dir.name, - "jit_multi_load/model") + self.model_path = os.path.join( + self.temp_dir.name, "jit_multi_load/model" + ) # enable dygraph mode fluid.enable_dygraph() # config seed @@ -720,13 +739,14 @@ class TestJitMultipleLoading(unittest.TestCase): def train_and_save_orig_model(self): layer = LinearNet(self.linear_size, self.linear_size) example_inputs, layer, _ = train(layer, self.linear_size, 1) - paddle.jit.save(layer=layer, - path=self.model_path, - input_spec=example_inputs) + paddle.jit.save( + layer=layer, path=self.model_path, input_spec=example_inputs + ) def test_load_model_retransform_inference(self): - multi_loaded_layer = MultiLoadingLinearNet(self.linear_size, - self.model_path) + multi_loaded_layer = MultiLoadingLinearNet( + self.linear_size, self.model_path + ) state_dict = multi_loaded_layer.state_dict() name_set = set() for _, var in state_dict.items(): @@ -735,12 +755,12 @@ class TestJitMultipleLoading(unittest.TestCase): class TestJitPruneModelAndLoad(unittest.TestCase): - def setUp(self): self.linear_size = 4 self.temp_dir = tempfile.TemporaryDirectory() - self.model_path = os.path.join(self.temp_dir.name, - "jit_prune_model_and_load/model") + self.model_path = os.path.join( + self.temp_dir.name, "jit_prune_model_and_load/model" + ) # enable dygraph mode fluid.enable_dygraph() # config seed @@ -753,9 +773,11 @@ class TestJitPruneModelAndLoad(unittest.TestCase): def train_and_save(self): train_layer = LinearNetReturnHidden(8, 8) adam = fluid.optimizer.AdamOptimizer( - learning_rate=0.1, parameter_list=train_layer.parameters()) + learning_rate=0.1, parameter_list=train_layer.parameters() + ) x = fluid.dygraph.to_variable( - np.random.random((4, 8)).astype('float32')) + np.random.random((4, 8)).astype('float32') + ) for i in range(10): hidden, loss = train_layer(x) loss.backward() @@ -763,10 +785,12 @@ class TestJitPruneModelAndLoad(unittest.TestCase): train_layer.clear_gradients() output_spec = [hidden] - paddle.jit.save(layer=train_layer, - path=self.model_path, - input_spec=[x], - output_spec=output_spec) + paddle.jit.save( + layer=train_layer, + path=self.model_path, + input_spec=[x], + output_spec=output_spec, + ) return train_layer @@ -777,10 +801,11 @@ class TestJitPruneModelAndLoad(unittest.TestCase): infer_layer = paddle.jit.load(self.model_path) x = fluid.dygraph.to_variable( - np.random.random((4, 8)).astype('float32')) + np.random.random((4, 8)).astype('float32') + ) np.testing.assert_array_equal( - train_layer(x)[0].numpy(), - infer_layer(x).numpy()) + train_layer(x)[0].numpy(), infer_layer(x).numpy() + ) def test_load_var_not_in_extra_var_info(self): self.train_and_save() @@ -798,7 +823,6 @@ class TestJitPruneModelAndLoad(unittest.TestCase): class TestJitSaveMultiCases(unittest.TestCase): - def setUp(self): # enable dygraph mode fluid.enable_dygraph() @@ -810,11 +834,9 @@ class TestJitSaveMultiCases(unittest.TestCase): def tearDown(self): self.temp_dir.cleanup() - def verify_inference_correctness(self, - layer, - model_path, - with_label_and_loss=False, - with_label=False): + def verify_inference_correctness( + self, layer, model_path, with_label_and_loss=False, with_label=False + ): layer.eval() loaded_layer = paddle.jit.load(model_path) loaded_layer.eval() @@ -834,17 +856,19 @@ class TestJitSaveMultiCases(unittest.TestCase): np.testing.assert_array_equal( pred, loaded_pred, - err_msg= - 'Result diff when load and inference:\nlayer result:\n{}\nloaded layer result:\n{}' - .format(pred, loaded_pred)) + err_msg='Result diff when load and inference:\nlayer result:\n{}\nloaded layer result:\n{}'.format( + pred, loaded_pred + ), + ) def test_no_prune_to_static_after_train(self): layer = LinearNet(784, 1) train(layer) - model_path = os.path.join(self.temp_dir.name, - "test_no_prune_to_static_after_train/model") + model_path = os.path.join( + self.temp_dir.name, "test_no_prune_to_static_after_train/model" + ) paddle.jit.save(layer, model_path) self.verify_inference_correctness(layer, model_path) @@ -852,8 +876,9 @@ class TestJitSaveMultiCases(unittest.TestCase): def test_no_prune_to_static_no_train(self): layer = LinearNetWithInputSpec(784, 1) - model_path = os.path.join(self.temp_dir.name, - "test_no_prune_to_static_no_train/model") + model_path = os.path.join( + self.temp_dir.name, "test_no_prune_to_static_no_train/model" + ) paddle.jit.save(layer, model_path) self.verify_inference_correctness(layer, model_path) @@ -864,11 +889,13 @@ class TestJitSaveMultiCases(unittest.TestCase): train(layer) model_path = os.path.join( - self.temp_dir.name, "test_no_prune_no_to_static_after_train/model") + self.temp_dir.name, "test_no_prune_no_to_static_after_train/model" + ) paddle.jit.save( layer, model_path, - input_spec=[InputSpec(shape=[None, 784], dtype='float32')]) + input_spec=[InputSpec(shape=[None, 784], dtype='float32')], + ) self.verify_inference_correctness(layer, model_path) @@ -879,7 +906,8 @@ class TestJitSaveMultiCases(unittest.TestCase): model_path = os.path.join( self.temp_dir.name, - "test_no_prune_no_to_static_after_train_with_examples/model") + "test_no_prune_no_to_static_after_train_with_examples/model", + ) paddle.jit.save(layer=layer, path=model_path, input_spec=example_inputs) self.verify_inference_correctness(layer, model_path) @@ -887,12 +915,14 @@ class TestJitSaveMultiCases(unittest.TestCase): def test_no_prune_no_to_static_no_train(self): layer = LinearNetNotDeclarative(784, 1) - model_path = os.path.join(self.temp_dir.name, - "test_no_prune_no_to_static_no_train/model") + model_path = os.path.join( + self.temp_dir.name, "test_no_prune_no_to_static_no_train/model" + ) paddle.jit.save( layer, model_path, - input_spec=[InputSpec(shape=[None, 784], dtype='float32')]) + input_spec=[InputSpec(shape=[None, 784], dtype='float32')], + ) self.verify_inference_correctness(layer, model_path) @@ -901,54 +931,57 @@ class TestJitSaveMultiCases(unittest.TestCase): out = train_with_label(layer) - model_path = os.path.join(self.temp_dir.name, - "test_prune_to_static_after_train/model") - paddle.jit.save(layer, - model_path, - input_spec=[ - InputSpec(shape=[None, 784], - dtype='float32', - name="image") - ], - output_spec=[out]) - - self.verify_inference_correctness(layer, - model_path, - with_label_and_loss=True) + model_path = os.path.join( + self.temp_dir.name, "test_prune_to_static_after_train/model" + ) + paddle.jit.save( + layer, + model_path, + input_spec=[ + InputSpec(shape=[None, 784], dtype='float32', name="image") + ], + output_spec=[out], + ) + + self.verify_inference_correctness( + layer, model_path, with_label_and_loss=True + ) def test_prune_to_static_no_train(self): layer = LinerNetWithLabel(784, 1) - model_path = os.path.join(self.temp_dir.name, - "test_prune_to_static_no_train/model") + model_path = os.path.join( + self.temp_dir.name, "test_prune_to_static_no_train/model" + ) # TODO: no train, cannot get output_spec var here # now only can use index output_spec = layer.forward.outputs[:1] - paddle.jit.save(layer, - model_path, - input_spec=[ - InputSpec(shape=[None, 784], - dtype='float32', - name="image") - ], - output_spec=output_spec) - - self.verify_inference_correctness(layer, - model_path, - with_label_and_loss=True) + paddle.jit.save( + layer, + model_path, + input_spec=[ + InputSpec(shape=[None, 784], dtype='float32', name="image") + ], + output_spec=output_spec, + ) + + self.verify_inference_correctness( + layer, model_path, with_label_and_loss=True + ) def test_prune_input_to_static_no_train(self): layer = LinerNetWithPruneInput(784, 1) - model_path = os.path.join(self.temp_dir.name, - "test_prune_input_to_static_no_train/model") - paddle.jit.save(layer, - model_path, - input_spec=[ - InputSpec(shape=[None, 784], - dtype='float32', - name="image") - ]) + model_path = os.path.join( + self.temp_dir.name, "test_prune_input_to_static_no_train/model" + ) + paddle.jit.save( + layer, + model_path, + input_spec=[ + InputSpec(shape=[None, 784], dtype='float32', name="image") + ], + ) self.verify_inference_correctness(layer, model_path, with_label=True) @@ -957,14 +990,15 @@ class TestJitSaveMultiCases(unittest.TestCase): model_path = os.path.join( self.temp_dir.name, - "test_prune_useless_input_to_static_no_train/model") - paddle.jit.save(layer, - model_path, - input_spec=[ - InputSpec(shape=[None, 784], - dtype='float32', - name="image") - ]) + "test_prune_useless_input_to_static_no_train/model", + ) + paddle.jit.save( + layer, + model_path, + input_spec=[ + InputSpec(shape=[None, 784], dtype='float32', name="image") + ], + ) self.verify_inference_correctness(layer, model_path, with_label=True) @@ -974,18 +1008,20 @@ class TestJitSaveMultiCases(unittest.TestCase): train(layer) model_path = os.path.join( - self.temp_dir.name, "test_no_prune_input_spec_name_warning/model") + self.temp_dir.name, "test_no_prune_input_spec_name_warning/model" + ) paddle.jit.save( layer, model_path, - input_spec=[InputSpec(shape=[None, 784], dtype='float32')]) - paddle.jit.save(layer, - model_path, - input_spec=[ - InputSpec(shape=[None, 784], - dtype='float32', - name='feed_input') - ]) + input_spec=[InputSpec(shape=[None, 784], dtype='float32')], + ) + paddle.jit.save( + layer, + model_path, + input_spec=[ + InputSpec(shape=[None, 784], dtype='float32', name='feed_input') + ], + ) self.verify_inference_correctness(layer, model_path) @@ -995,7 +1031,8 @@ class TestJitSaveMultiCases(unittest.TestCase): train(layer) model_path = os.path.join( - self.temp_dir.name, "test_not_prune_output_spec_name_warning/model") + self.temp_dir.name, "test_not_prune_output_spec_name_warning/model" + ) out = paddle.to_tensor(np.random.random((1, 1)).astype('float')) paddle.jit.save(layer, model_path, output_spec=[out]) @@ -1004,47 +1041,52 @@ class TestJitSaveMultiCases(unittest.TestCase): def test_prune_input_spec_name_error(self): layer = LinerNetWithLabel(784, 1) - model_path = os.path.join(self.temp_dir.name, - "test_prune_input_spec_name_error/model") + model_path = os.path.join( + self.temp_dir.name, "test_prune_input_spec_name_error/model" + ) with self.assertRaises(ValueError): paddle.jit.save( layer, model_path, - input_spec=[InputSpec(shape=[None, 784], dtype='float32')]) + input_spec=[InputSpec(shape=[None, 784], dtype='float32')], + ) with self.assertRaises(ValueError): - paddle.jit.save(layer, - model_path, - input_spec=[ - InputSpec(shape=[None, 784], - dtype='float32', - name='feed_input') - ]) + paddle.jit.save( + layer, + model_path, + input_spec=[ + InputSpec( + shape=[None, 784], dtype='float32', name='feed_input' + ) + ], + ) def test_prune_output_spec_name_error(self): layer = LinerNetWithLabel(784, 1) train_with_label(layer) - model_path = os.path.join(self.temp_dir.name, - "test_prune_to_static_after_train/model") + model_path = os.path.join( + self.temp_dir.name, "test_prune_to_static_after_train/model" + ) out = paddle.to_tensor(np.random.random((1, 1)).astype('float')) with self.assertRaises(ValueError): - paddle.jit.save(layer, - model_path, - input_spec=[ - InputSpec(shape=[None, 784], - dtype='float32', - name="image") - ], - output_spec=[out]) + paddle.jit.save( + layer, + model_path, + input_spec=[ + InputSpec(shape=[None, 784], dtype='float32', name="image") + ], + output_spec=[out], + ) class TestJitSaveLoadEmptyLayer(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() - self.model_path = os.path.join(self.temp_dir.name, - "jit_save_load_empty_layer/model") + self.model_path = os.path.join( + self.temp_dir.name, "jit_save_load_empty_layer/model" + ) # enable dygraph mode paddle.disable_static() @@ -1062,11 +1104,11 @@ class TestJitSaveLoadEmptyLayer(unittest.TestCase): class TestJitSaveLoadNoParamLayer(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() - self.model_path = os.path.join(self.temp_dir.name, - "jit_save_load_no_param_layer/model") + self.model_path = os.path.join( + self.temp_dir.name, "jit_save_load_no_param_layer/model" + ) # enable dygraph mode paddle.disable_static() @@ -1085,7 +1127,6 @@ class TestJitSaveLoadNoParamLayer(unittest.TestCase): class TestJitSaveLoadMultiMethods(unittest.TestCase): - def setUp(self): # enable dygraph mode paddle.disable_static() @@ -1096,7 +1137,8 @@ class TestJitSaveLoadMultiMethods(unittest.TestCase): def test_jit_save_load_inference(self): model_path_inference = os.path.join( - self.temp_dir.name, "jit_save_load_multi_methods/model") + self.temp_dir.name, "jit_save_load_multi_methods/model" + ) IMAGE_SIZE = 224 layer = LinearNetWithMultiStaticFunc(IMAGE_SIZE, 10) inps = paddle.randn([1, IMAGE_SIZE]) @@ -1108,21 +1150,26 @@ class TestJitSaveLoadMultiMethods(unittest.TestCase): load_net = paddle.jit.load(model_path_inference) for func, result in result_origin.items(): self.assertTrue( - float((result - - getattr(load_net, func, None)(inps)).abs().max()) < 1e-5) + float( + (result - getattr(load_net, func, None)(inps)).abs().max() + ) + < 1e-5 + ) def test_jit_save_load_multi_methods_inputspec(self): - model_path = os.path.join(self.temp_dir.name, - 'jit_save_load_multi_methods/model') + model_path = os.path.join( + self.temp_dir.name, 'jit_save_load_multi_methods/model' + ) layer = LinearNetWithMultiStaticFunc(784, 1) with self.assertRaises(ValueError): - paddle.jit.save(layer, - model_path, - input_spec=[InputSpec(shape=[None, 784])]) + paddle.jit.save( + layer, model_path, input_spec=[InputSpec(shape=[None, 784])] + ) def test_parse_name(self): - model_path_inference = os.path.join(self.temp_dir.name, - "jit_save_load_parse_name/model") + model_path_inference = os.path.join( + self.temp_dir.name, "jit_save_load_parse_name/model" + ) IMAGE_SIZE = 224 layer = LinearNet(IMAGE_SIZE, 1) inps = paddle.randn([1, IMAGE_SIZE]) @@ -1135,7 +1182,6 @@ class TestJitSaveLoadMultiMethods(unittest.TestCase): class LayerSaved(paddle.nn.Layer): - def __init__(self, in_size, out_size): super(LayerSaved, self).__init__() self.hidden = 100 @@ -1157,7 +1203,6 @@ class LayerSaved(paddle.nn.Layer): class Net(paddle.nn.Layer): - def __init__(self): super().__init__() self.fc1 = paddle.nn.Linear(4, 4) @@ -1210,7 +1255,6 @@ class Net(paddle.nn.Layer): class NetTensor(paddle.nn.Layer): - def __init__(self): super().__init__() self.fc1 = paddle.nn.Linear(4, 4) @@ -1231,7 +1275,6 @@ class NetTensor(paddle.nn.Layer): class TestJitSaveCombineProperty(unittest.TestCase): - def setUp(self): # enable dygraph mode paddle.disable_static() @@ -1241,17 +1284,19 @@ class TestJitSaveCombineProperty(unittest.TestCase): self.temp_dir.cleanup() def test_jit_save_combine_property(self): - model_path = os.path.join(self.temp_dir.name, - "test_jit_save_combine/model") + model_path = os.path.join( + self.temp_dir.name, "test_jit_save_combine/model" + ) # Use new namespace with unique_name.guard(): net = Net() - #save + # save paddle.jit.save(net, model_path, combine_params=True) def test_jit_save_tensor_property(self): - model_path = os.path.join(self.temp_dir.name, - "test_jit_save_combine/model") + model_path = os.path.join( + self.temp_dir.name, "test_jit_save_combine/model" + ) # Use new namespace with unique_name.guard(): net = NetTensor() @@ -1260,7 +1305,6 @@ class TestJitSaveCombineProperty(unittest.TestCase): class LayerLoadFinetune(paddle.nn.Layer): - def __init__(self, in_size, out_size, load_path): super(LayerLoadFinetune, self).__init__() # Test duplicate name @@ -1294,7 +1338,6 @@ class LayerLoadFinetune(paddle.nn.Layer): class TestJitSaveLoadSaveWithoutRunning(unittest.TestCase): - def setUp(self): # enable dygraph mode paddle.disable_static() @@ -1305,32 +1348,39 @@ class TestJitSaveLoadSaveWithoutRunning(unittest.TestCase): def test_save_load_finetune_load(self): model_path = os.path.join( - self.temp_dir.name, "test_jit_save_load_save_without_running/model") + self.temp_dir.name, "test_jit_save_load_save_without_running/model" + ) IMAGE_SIZE = 224 inps0 = paddle.randn([1, IMAGE_SIZE]) inps1 = paddle.randn([2, IMAGE_SIZE]) # Use new namespace with unique_name.guard(): layer_save = LayerSaved(IMAGE_SIZE, IMAGE_SIZE) - #save - paddle.jit.save(layer_save, - model_path, - input_spec=[ - paddle.static.InputSpec(shape=[None, IMAGE_SIZE], - dtype='float32') - ]) + # save + paddle.jit.save( + layer_save, + model_path, + input_spec=[ + paddle.static.InputSpec( + shape=[None, IMAGE_SIZE], dtype='float32' + ) + ], + ) result_00 = layer_save(inps0) result_01 = layer_save(inps1) - #load and save without running + # load and save without running with unique_name.guard(): layer_load = paddle.jit.load(model_path) - paddle.jit.save(layer_load, - model_path, - input_spec=[ - paddle.static.InputSpec( - shape=[None, IMAGE_SIZE], dtype='float32') - ]) - #reload + paddle.jit.save( + layer_load, + model_path, + input_spec=[ + paddle.static.InputSpec( + shape=[None, IMAGE_SIZE], dtype='float32' + ) + ], + ) + # reload layer_reload = paddle.jit.load(model_path) result_10 = layer_reload(inps0) result_11 = layer_reload(inps1) @@ -1340,7 +1390,6 @@ class TestJitSaveLoadSaveWithoutRunning(unittest.TestCase): class TestJitSaveLoadFinetuneLoad(unittest.TestCase): - def setUp(self): # enable dygraph mode paddle.disable_static() @@ -1350,8 +1399,9 @@ class TestJitSaveLoadFinetuneLoad(unittest.TestCase): self.temp_dir.cleanup() def test_save_load_finetune_load(self): - model_path = os.path.join(self.temp_dir.name, - "test_jit_save_load_finetune_load/model") + model_path = os.path.join( + self.temp_dir.name, "test_jit_save_load_finetune_load/model" + ) IMAGE_SIZE = 224 inps0 = paddle.randn([1, IMAGE_SIZE]) inps1 = paddle.randn([2, IMAGE_SIZE]) @@ -1359,18 +1409,18 @@ class TestJitSaveLoadFinetuneLoad(unittest.TestCase): with unique_name.guard(): layer_save = LayerSaved(IMAGE_SIZE, IMAGE_SIZE) layer_save(inps0) - #save + # save paddle.jit.save(layer_save, model_path) - #load + # load with unique_name.guard(): layer_load = LayerLoadFinetune(IMAGE_SIZE, IMAGE_SIZE, model_path) - #train + # train train(layer_load, input_size=IMAGE_SIZE) result_00 = layer_load(inps0) result_01 = layer_load(inps1) - #save + # save paddle.jit.save(layer_load, model_path) - #load + # load layer_finetune = paddle.jit.load(model_path) result_10 = layer_finetune(inps0) result_11 = layer_finetune(inps1) @@ -1385,7 +1435,6 @@ class TestJitSaveLoadFinetuneLoad(unittest.TestCase): # So divided into three TestCase: TestJitSaveLoadFunctionCase1, # TestJitSaveLoadFunctionCase2, TestJitSaveLoadFunctionCase3. class TestJitSaveLoadFunctionCase1(unittest.TestCase): - def setUp(self): paddle.disable_static() self.temp_dir = tempfile.TemporaryDirectory() @@ -1394,13 +1443,13 @@ class TestJitSaveLoadFunctionCase1(unittest.TestCase): self.temp_dir.cleanup() def test_jit_save_load_static_function(self): - @paddle.jit.to_static def fun(inputs): return paddle.tanh(inputs) - path = os.path.join(self.temp_dir.name, - 'test_jit_save_load_function_1/func') + path = os.path.join( + self.temp_dir.name, 'test_jit_save_load_function_1/func' + ) inps = paddle.rand([3, 6]) origin = fun(inps) @@ -1412,7 +1461,6 @@ class TestJitSaveLoadFunctionCase1(unittest.TestCase): class TestJitSaveLoadFunctionCase2(unittest.TestCase): - def setUp(self): paddle.disable_static() self.temp_dir = tempfile.TemporaryDirectory() @@ -1421,15 +1469,17 @@ class TestJitSaveLoadFunctionCase2(unittest.TestCase): self.temp_dir.cleanup() def test_jit_save_load_function_input_spec(self): - - @paddle.jit.to_static(input_spec=[ - InputSpec(shape=[None, 6], dtype='float32', name='x'), - ]) + @paddle.jit.to_static( + input_spec=[ + InputSpec(shape=[None, 6], dtype='float32', name='x'), + ] + ) def fun(inputs): return paddle.nn.functional.relu(inputs) - path = os.path.join(self.temp_dir.name, - 'test_jit_save_load_function_2/func') + path = os.path.join( + self.temp_dir.name, 'test_jit_save_load_function_2/func' + ) inps = paddle.rand([3, 6]) origin = fun(inps) @@ -1440,7 +1490,6 @@ class TestJitSaveLoadFunctionCase2(unittest.TestCase): class TestJitSaveLoadFunctionCase3(unittest.TestCase): - def setUp(self): paddle.disable_static() self.temp_dir = tempfile.TemporaryDirectory() @@ -1449,22 +1498,22 @@ class TestJitSaveLoadFunctionCase3(unittest.TestCase): self.temp_dir.cleanup() def test_jit_save_load_function_function(self): - def fun(inputs): return paddle.tanh(inputs) - path = os.path.join(self.temp_dir.name, - 'test_jit_save_load_function_3/func') + path = os.path.join( + self.temp_dir.name, 'test_jit_save_load_function_3/func' + ) inps = paddle.rand([3, 6]) origin = fun(inps) - paddle.jit.save(fun, - path, - input_spec=[ - InputSpec(shape=[None, 6], - dtype='float32', - name='x'), - ]) + paddle.jit.save( + fun, + path, + input_spec=[ + InputSpec(shape=[None, 6], dtype='float32', name='x'), + ], + ) load_func = paddle.jit.load(path) load_result = load_func(inps) @@ -1472,7 +1521,6 @@ class TestJitSaveLoadFunctionCase3(unittest.TestCase): class TestJitSaveLoadFunctionWithParamCase1(unittest.TestCase): - def setUp(self): paddle.disable_static() self.temp_dir = tempfile.TemporaryDirectory() @@ -1481,9 +1529,7 @@ class TestJitSaveLoadFunctionWithParamCase1(unittest.TestCase): self.temp_dir.cleanup() def test_jit_save_load_function(self): - class LinearNet(paddle.nn.Layer): - def __init__(self): super(LinearNet, self).__init__() self._linear = paddle.nn.Linear(5, 6) @@ -1499,11 +1545,13 @@ class TestJitSaveLoadFunctionWithParamCase1(unittest.TestCase): inps = paddle.rand([3, 5]) origin = layer.anothor_forward(inps) - func = paddle.jit.to_static(layer.anothor_forward, - [paddle.static.InputSpec(shape=[-1, 5])]) + func = paddle.jit.to_static( + layer.anothor_forward, [paddle.static.InputSpec(shape=[-1, 5])] + ) path = os.path.join( self.temp_dir.name, - 'test_jit_save_load_function_with_params_case1/func') + 'test_jit_save_load_function_with_params_case1/func', + ) paddle.jit.save(func, path) load_func = paddle.jit.load(path) @@ -1512,7 +1560,6 @@ class TestJitSaveLoadFunctionWithParamCase1(unittest.TestCase): class TestJitSaveLoadFunctionWithParamCase2(unittest.TestCase): - def setUp(self): paddle.disable_static() self.temp_dir = tempfile.TemporaryDirectory() @@ -1521,9 +1568,7 @@ class TestJitSaveLoadFunctionWithParamCase2(unittest.TestCase): self.temp_dir.cleanup() def test_jit_save_load_function(self): - class LinearNet(paddle.nn.Layer): - def __init__(self): super(LinearNet, self).__init__() self._linear = paddle.nn.Linear(5, 6) @@ -1541,19 +1586,20 @@ class TestJitSaveLoadFunctionWithParamCase2(unittest.TestCase): path = os.path.join( self.temp_dir.name, - 'test_jit_save_load_function_with_params_case2/func') + 'test_jit_save_load_function_with_params_case2/func', + ) paddle.jit.save(layer.anothor_forward, path) origin_result = layer.anothor_forward(inps) load_func = paddle.jit.load(path) load_result = load_func(inps) - np.testing.assert_array_equal(origin_result.numpy(), - load_result.numpy()) + np.testing.assert_array_equal( + origin_result.numpy(), load_result.numpy() + ) class TestJitSaveLoadFunctionWithParamCase3(unittest.TestCase): - def setUp(self): paddle.disable_static() self.temp_dir = tempfile.TemporaryDirectory() @@ -1562,9 +1608,7 @@ class TestJitSaveLoadFunctionWithParamCase3(unittest.TestCase): self.temp_dir.cleanup() def test_jit_save_load_function(self): - class LinearNet(paddle.nn.Layer): - def __init__(self): super(LinearNet, self).__init__() self._linear = paddle.nn.Linear(5, 6) @@ -1583,7 +1627,8 @@ class TestJitSaveLoadFunctionWithParamCase3(unittest.TestCase): path = os.path.join( self.temp_dir.name, - 'test_jit_save_load_function_with_params_case3/func') + 'test_jit_save_load_function_with_params_case3/func', + ) paddle.jit.save(layer.anothor_forward, path) load_func = paddle.jit.load(path) @@ -1592,7 +1637,6 @@ class TestJitSaveLoadFunctionWithParamCase3(unittest.TestCase): class TestJitSaveLoadDataParallel(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -1610,18 +1654,20 @@ class TestJitSaveLoadDataParallel(unittest.TestCase): np.testing.assert_array_equal( pred, loaded_pred, - err_msg= - 'Result diff when load and inference:\nlayer result:\n{}\nloaded layer result:\n{}' - .format(pred, loaded_pred)) + err_msg='Result diff when load and inference:\nlayer result:\n{}\nloaded layer result:\n{}'.format( + pred, loaded_pred + ), + ) def test_jit_save_data_parallel_with_inputspec(self): layer = LinearNetNotDeclarative(784, 1) layer = paddle.DataParallel(layer) - path = os.path.join(self.temp_dir.name, - "jit_save_data_parallel_with_inputspec/model") - paddle.jit.save(layer=layer, - path=path, - input_spec=[InputSpec(shape=[None, 784])]) + path = os.path.join( + self.temp_dir.name, "jit_save_data_parallel_with_inputspec/model" + ) + paddle.jit.save( + layer=layer, path=path, input_spec=[InputSpec(shape=[None, 784])] + ) self.verify_inference_correctness(layer, path) @@ -1629,8 +1675,9 @@ class TestJitSaveLoadDataParallel(unittest.TestCase): layer = LinearNetWithInputSpec(784, 1) layer = paddle.DataParallel(layer) - path = os.path.join(self.temp_dir.name, - "jit_save_data_parallel_with_to_static/model") + path = os.path.join( + self.temp_dir.name, "jit_save_data_parallel_with_to_static/model" + ) paddle.jit.save(layer, path) self.verify_inference_correctness(layer, path) @@ -1641,16 +1688,17 @@ class InputSepcLayer(paddle.nn.Layer): A layer with InputSpec to test InputSpec compatibility ''' - @paddle.jit.to_static(input_spec=[ - InputSpec(shape=[None, 8], dtype='float32', name='x'), - InputSpec(shape=[None, 1], dtype='float64', name='y') - ]) + @paddle.jit.to_static( + input_spec=[ + InputSpec(shape=[None, 8], dtype='float32', name='x'), + InputSpec(shape=[None, 1], dtype='float64', name='y'), + ] + ) def forward(self, x, y): return x, y class TestInputSpecCompatibility(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -1662,15 +1710,18 @@ class TestInputSpecCompatibility(unittest.TestCase): input_y = paddle.uniform([8, 1], dtype='float64') expected_result = expect_layer(input_x, input_y) test_result = test_layer(input_x, input_y) - np.testing.assert_allclose(expected_result[0].numpy(), - test_result[0].numpy()) - np.testing.assert_allclose(expected_result[1].numpy(), - test_result[1].numpy()) + np.testing.assert_allclose( + expected_result[0].numpy(), test_result[0].numpy() + ) + np.testing.assert_allclose( + expected_result[1].numpy(), test_result[1].numpy() + ) def test_jit_save_compatible_input_sepc(self): layer = InputSepcLayer() - save_dir = os.path.join(self.temp_dir.name, - "jit_save_compatible_input_spec") + save_dir = os.path.join( + self.temp_dir.name, "jit_save_compatible_input_spec" + ) path = save_dir + "/model" paddle.jit.save(layer=layer, path=path) @@ -1678,68 +1729,74 @@ class TestInputSpecCompatibility(unittest.TestCase): self._assert_input_spec_layer_return(layer, no_input_spec_layer) shutil.rmtree(save_dir) - paddle.jit.save(layer=layer, - path=path, - input_spec=[ - InputSpec(shape=[None, 8], - dtype='float32', - name='x'), - InputSpec(shape=[None, 1], - dtype='float64', - name='y') - ]) + paddle.jit.save( + layer=layer, + path=path, + input_spec=[ + InputSpec(shape=[None, 8], dtype='float32', name='x'), + InputSpec(shape=[None, 1], dtype='float64', name='y'), + ], + ) same_input_spec_layer = paddle.jit.load(path) self._assert_input_spec_layer_return(layer, same_input_spec_layer) shutil.rmtree(save_dir) - paddle.jit.save(layer=layer, - path=path, - input_spec=[ - InputSpec(shape=[8, 8], dtype='float32'), - InputSpec(shape=[8, -1], dtype='float64') - ]) + paddle.jit.save( + layer=layer, + path=path, + input_spec=[ + InputSpec(shape=[8, 8], dtype='float32'), + InputSpec(shape=[8, -1], dtype='float64'), + ], + ) compatible_input_spec_layer = paddle.jit.load(path) self._assert_input_spec_layer_return(layer, compatible_input_spec_layer) shutil.rmtree(save_dir) def test_jit_save_incompatible_input_sepc(self): layer = InputSepcLayer() - save_dir = os.path.join(self.temp_dir.name, - "jit_save_compatible_input_spec") + save_dir = os.path.join( + self.temp_dir.name, "jit_save_compatible_input_spec" + ) path = save_dir + "/model" with self.assertRaises(ValueError): # type mismatch - paddle.jit.save(layer=layer, - path=path, - input_spec=[ - InputSpec(shape=[None, 8], dtype='float64'), - InputSpec(shape=[None, 1], dtype='float64') - ]) + paddle.jit.save( + layer=layer, + path=path, + input_spec=[ + InputSpec(shape=[None, 8], dtype='float64'), + InputSpec(shape=[None, 1], dtype='float64'), + ], + ) with self.assertRaises(ValueError): # shape len mismatch - paddle.jit.save(layer=layer, - path=path, - input_spec=[ - InputSpec(shape=[None, 8, 1], dtype='float32'), - InputSpec(shape=[None, 1], dtype='float64') - ]) + paddle.jit.save( + layer=layer, + path=path, + input_spec=[ + InputSpec(shape=[None, 8, 1], dtype='float32'), + InputSpec(shape=[None, 1], dtype='float64'), + ], + ) with self.assertRaises(ValueError): # shape mismatch - paddle.jit.save(layer=layer, - path=path, - input_spec=[ - InputSpec(shape=[None, 8], dtype='float32'), - InputSpec(shape=[None, 2], dtype='float64') - ]) + paddle.jit.save( + layer=layer, + path=path, + input_spec=[ + InputSpec(shape=[None, 8], dtype='float32'), + InputSpec(shape=[None, 2], dtype='float64'), + ], + ) if os.path.exists(save_dir): shutil.rmtree(save_dir) class NotJitForward(paddle.nn.Layer): - def __init__(self): super(NotJitForward, self).__init__() @@ -1748,7 +1805,6 @@ class NotJitForward(paddle.nn.Layer): class TestNotJitForward(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() diff --git a/python/paddle/fluid/tests/unittests/test_kldiv_loss_op.py b/python/paddle/fluid/tests/unittests/test_kldiv_loss_op.py index 6358854255d918b9fd9ba01b7cf229acca4da427..9fd0868f1e4c77c5fd30420198df67b66af39c30 100644 --- a/python/paddle/fluid/tests/unittests/test_kldiv_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_kldiv_loss_op.py @@ -36,7 +36,6 @@ def kldiv_loss(x, target, reduction): class TestKLDivLossOp(OpTest): - def setUp(self): self.initTestCase() self.op_type = 'kldiv_loss' @@ -57,10 +56,9 @@ class TestKLDivLossOp(OpTest): self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X'], - 'Loss', - no_grad_set=set(["Target"]), - check_eager=True) + self.check_grad( + ['X'], 'Loss', no_grad_set=set(["Target"]), check_eager=True + ) def initTestCase(self): self.x_shape = (4, 5, 5) @@ -68,28 +66,24 @@ class TestKLDivLossOp(OpTest): class TestKLDivLossOp2(TestKLDivLossOp): - def initTestCase(self): self.x_shape = (3, 2, 7, 7) self.reduction = 'none' class TestKLDivLossOp3(TestKLDivLossOp): - def initTestCase(self): self.x_shape = (2, 3, 5, 7, 9) self.reduction = 'mean' class TestKLDivLossOp4(TestKLDivLossOp): - def initTestCase(self): self.x_shape = (5, 20) self.reduction = 'sum' class TestKLDivLossDygraph(unittest.TestCase): - def run_kl_loss(self, reduction, shape=(5, 20)): x = np.random.uniform(-10, 10, shape).astype('float64') target = np.random.uniform(-10, 10, shape).astype('float64') @@ -97,8 +91,9 @@ class TestKLDivLossDygraph(unittest.TestCase): with paddle.fluid.dygraph.guard(): kldiv_criterion = paddle.nn.KLDivLoss(reduction) - pred_loss = kldiv_criterion(paddle.to_tensor(x), - paddle.to_tensor(target)) + pred_loss = kldiv_criterion( + paddle.to_tensor(x), paddle.to_tensor(target) + ) np.testing.assert_allclose(pred_loss.numpy(), gt_loss, rtol=1e-05) def test_kl_loss_batchmean(self): @@ -126,7 +121,6 @@ class TestKLDivLossDygraph(unittest.TestCase): class TestKLDivLossTypePromotion(unittest.TestCase): - def test_kl_div_promotion(self): with paddle.fluid.dygraph.guard(): diff --git a/python/paddle/fluid/tests/unittests/test_kron_op.py b/python/paddle/fluid/tests/unittests/test_kron_op.py index b29df857b62867b806231195747d66cd5bd50fb3..a2f1657a06e6608eea4d2ddde3f36b0262f4fa9c 100644 --- a/python/paddle/fluid/tests/unittests/test_kron_op.py +++ b/python/paddle/fluid/tests/unittests/test_kron_op.py @@ -23,7 +23,6 @@ from paddle.fluid.framework import _test_eager_guard class TestKronOp(OpTest): - def setUp(self): self.op_type = "kron" self.python_api = paddle.kron @@ -51,7 +50,6 @@ class TestKronOp(OpTest): class TestKronOp2(TestKronOp): - def setUp(self): self.op_type = "kron" self.python_api = paddle.kron @@ -64,7 +62,6 @@ class TestKronOp2(TestKronOp): class TestKronOp3(TestKronOp): - def setUp(self): self.op_type = "kron" self.python_api = paddle.kron @@ -77,7 +74,6 @@ class TestKronOp3(TestKronOp): class TestKronLayer(unittest.TestCase): - def test_case(self): a = np.random.randn(10, 10).astype(np.float64) b = np.random.randn(10, 10).astype(np.float64) @@ -104,7 +100,7 @@ class TestKronLayer(unittest.TestCase): place = fluid.CPUPlace() exe = fluid.Executor(place) exe.run(start) - c, = exe.run(main, feed={'a': a, 'b': b}, fetch_list=[out_var]) + (c,) = exe.run(main, feed={'a': a, 'b': b}, fetch_list=[out_var]) np.testing.assert_allclose(c, np.kron(a, b)) def test_api_eager_dygraph(self): @@ -114,7 +110,6 @@ class TestKronLayer(unittest.TestCase): class TestComplexKronOp(OpTest): - def setUp(self): self.op_type = "kron" self.python_api = paddle.kron @@ -127,7 +122,7 @@ class TestComplexKronOp(OpTest): self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.attrs = {'axis': -1, 'use_mkldnn': False} self.outputs = {'Out': self.out} @@ -137,14 +132,17 @@ class TestComplexKronOp(OpTest): def init_input_output(self): self.x = np.random.random(self.x_shape).astype( - self.dtype) + 1J * np.random.random(self.x_shape).astype(self.dtype) + self.dtype + ) + 1j * np.random.random(self.x_shape).astype(self.dtype) self.y = np.random.random(self.y_shape).astype( - self.dtype) + 1J * np.random.random(self.y_shape).astype(self.dtype) + self.dtype + ) + 1j * np.random.random(self.y_shape).astype(self.dtype) self.out = np.kron(self.x, self.y) def init_grad_input_output(self): - self.grad_out = np.ones(self.out_shape, self.dtype) + 1J * np.ones( - self.out_shape, self.dtype) + self.grad_out = np.ones(self.out_shape, self.dtype) + 1j * np.ones( + self.out_shape, self.dtype + ) self.grad_x = self.get_grad_x_by_numpy() self.grad_y = self.get_grad_y_by_numpy() @@ -157,7 +155,8 @@ class TestComplexKronOp(OpTest): idx_i = x_i * self.y_shape[0] + i idx_j = x_j * self.y_shape[1] + j grad_x[x_i][x_j] += self.grad_out[idx_i][ - idx_j] * np.conj(self.y[i][j]) + idx_j + ] * np.conj(self.y[i][j]) return grad_x def get_grad_y_by_numpy(self): @@ -169,47 +168,55 @@ class TestComplexKronOp(OpTest): idx_i = x_i * self.y_shape[0] + y_i idx_j = x_j * self.y_shape[1] + y_j grad_y[y_i][y_j] += self.grad_out[idx_i][ - idx_j] * np.conj(self.x[x_i][x_j]) + idx_j + ] * np.conj(self.x[x_i][x_j]) return grad_y def test_check_output(self): self.check_output(check_eager=True) def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], - 'Out', - user_defined_grads=[self.grad_x, self.grad_y], - user_defined_grad_outputs=[self.grad_out], - check_eager=True) + self.check_grad( + ['X', 'Y'], + 'Out', + user_defined_grads=[self.grad_x, self.grad_y], + user_defined_grad_outputs=[self.grad_out], + check_eager=True, + ) def test_check_grad_ingore_x(self): - self.check_grad(['Y'], - 'Out', - no_grad_set=set("X"), - user_defined_grads=[self.grad_y], - user_defined_grad_outputs=[self.grad_out], - check_eager=True) + self.check_grad( + ['Y'], + 'Out', + no_grad_set=set("X"), + user_defined_grads=[self.grad_y], + user_defined_grad_outputs=[self.grad_out], + check_eager=True, + ) def test_check_grad_ingore_y(self): - self.check_grad(['X'], - 'Out', - no_grad_set=set('Y'), - user_defined_grads=[self.grad_x], - user_defined_grad_outputs=[self.grad_out], - check_eager=True) + self.check_grad( + ['X'], + 'Out', + no_grad_set=set('Y'), + user_defined_grads=[self.grad_x], + user_defined_grad_outputs=[self.grad_out], + check_eager=True, + ) class TestKronOpTypePromotion(TestComplexKronOp): - def init_input_output(self): self.x = np.random.random(self.x_shape).astype(self.dtype) self.y = np.random.random(self.y_shape).astype( - self.dtype) + 1J * np.random.random(self.y_shape).astype(self.dtype) + self.dtype + ) + 1j * np.random.random(self.y_shape).astype(self.dtype) self.out = np.kron(self.x, self.y) def init_grad_input_output(self): - self.grad_out = np.ones(self.out_shape, self.dtype) + 1J * np.ones( - self.out_shape, self.dtype) + self.grad_out = np.ones(self.out_shape, self.dtype) + 1j * np.ones( + self.out_shape, self.dtype + ) self.grad_x = self.get_grad_x_by_numpy().real self.grad_y = self.get_grad_y_by_numpy() diff --git a/python/paddle/fluid/tests/unittests/test_kthvalue_op.py b/python/paddle/fluid/tests/unittests/test_kthvalue_op.py index 823efffd3bab8d85305f063b4c7c97f11b930762..f3f5ccf18797d90ccc813b4204c541d74efc062d 100644 --- a/python/paddle/fluid/tests/unittests/test_kthvalue_op.py +++ b/python/paddle/fluid/tests/unittests/test_kthvalue_op.py @@ -33,7 +33,6 @@ def cal_kthvalue(x, k, axis, keepdim=False): class TestKthvalueOp(OpTest): - def init_args(self): self.k = 5 self.axis = -1 @@ -46,9 +45,9 @@ class TestKthvalueOp(OpTest): self.init_args() self.inputs = {'X': self.input_data} self.attrs = {'k': self.k, 'axis': self.axis} - output, indices = cal_kthvalue(self.input_data, - k=self.k, - axis=self.axis) + output, indices = cal_kthvalue( + self.input_data, k=self.k, axis=self.axis + ) self.outputs = {'Out': output, 'Indices': indices} def test_check_output(self): @@ -61,7 +60,6 @@ class TestKthvalueOp(OpTest): class TestKthvalueOpWithKeepdim(OpTest): - def init_args(self): self.k = 2 self.axis = 1 @@ -74,10 +72,9 @@ class TestKthvalueOpWithKeepdim(OpTest): self.input_data = np.random.random((1, 3, 2, 4, 10)) self.inputs = {'X': self.input_data} self.attrs = {'k': self.k, 'axis': self.axis, 'keepdim': True} - output, indices = cal_kthvalue(self.input_data, - k=self.k, - axis=self.axis, - keepdim=True) + output, indices = cal_kthvalue( + self.input_data, k=self.k, axis=self.axis, keepdim=True + ) self.outputs = {'Out': output, 'Indices': indices} def test_check_output(self): @@ -90,7 +87,6 @@ class TestKthvalueOpWithKeepdim(OpTest): class TestKthvalueOpKernels(unittest.TestCase): - def setUp(self): self.axises = [2, -1] @@ -107,9 +103,9 @@ class TestKthvalueOpKernels(unittest.TestCase): value_expect, indice_expect = cal_kthvalue(inputs, k, axis) v, inds = paddle.kthvalue(tensor, k, axis) np.testing.assert_allclose(v.numpy(), value_expect, rtol=1e-05) - np.testing.assert_allclose(inds.numpy(), - indice_expect, - rtol=1e-05) + np.testing.assert_allclose( + inds.numpy(), indice_expect, rtol=1e-05 + ) def test_gpu_kernel(): shape = (2, 30, 250) @@ -121,9 +117,9 @@ class TestKthvalueOpKernels(unittest.TestCase): value_expect, indice_expect = cal_kthvalue(inputs, k, axis) v, inds = paddle.kthvalue(tensor, k, axis) np.testing.assert_allclose(v.numpy(), value_expect, rtol=1e-05) - np.testing.assert_allclose(inds.numpy(), - indice_expect, - rtol=1e-05) + np.testing.assert_allclose( + inds.numpy(), indice_expect, rtol=1e-05 + ) test_cpu_kernel() if fluid.core.is_compiled_with_cuda(): @@ -131,13 +127,11 @@ class TestKthvalueOpKernels(unittest.TestCase): class TestKthvalueOpWithNaN(unittest.TestCase): - def setUp(self): paddle.disable_static() self.x = paddle.uniform([2, 200, 10], dtype='float32') def test_errors(self): - def test_nan_in_cpu_kernel(): paddle.set_device('cpu') nan_position = 100 @@ -160,7 +154,6 @@ class TestKthvalueOpWithNaN(unittest.TestCase): class TestKthvalueOpErrors(unittest.TestCase): - def setUp(self): self.x = paddle.uniform([2, 10, 20, 25], dtype='float32') @@ -184,7 +177,6 @@ class TestKthvalueOpErrors(unittest.TestCase): class TestModeOpInStatic(unittest.TestCase): - def setUp(self): np.random.seed(666) self.input_data = np.random.random((2, 20, 1, 2, 80)).astype(np.float64) @@ -192,16 +184,18 @@ class TestModeOpInStatic(unittest.TestCase): def test_run_static(self): paddle.enable_static() - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): - input_tensor = paddle.static.data(name="x", - shape=[2, 20, 1, 2, 80], - dtype="float64") + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + input_tensor = paddle.static.data( + name="x", shape=[2, 20, 1, 2, 80], dtype="float64" + ) result = paddle.kthvalue(input_tensor, self.k, axis=1) expect_value = cal_kthvalue(self.input_data, self.k, axis=1)[0] exe = paddle.static.Executor(paddle.CPUPlace()) - paddle_result = exe.run(feed={"x": self.input_data}, - fetch_list=[result])[0] + paddle_result = exe.run( + feed={"x": self.input_data}, fetch_list=[result] + )[0] np.testing.assert_allclose(paddle_result, expect_value, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_l1_loss.py b/python/paddle/fluid/tests/unittests/test_l1_loss.py index bd68a125e3a26a64e8b382a46606ce7b94c95dbc..00b716ce8715481cf7bd41870ec0a0336ecde8ce 100644 --- a/python/paddle/fluid/tests/unittests/test_l1_loss.py +++ b/python/paddle/fluid/tests/unittests/test_l1_loss.py @@ -19,7 +19,6 @@ import unittest class TestFunctionalL1Loss(unittest.TestCase): - def setUp(self): self.input_np = np.random.random(size=(10, 10, 5)).astype(np.float32) self.label_np = np.random.random(size=(10, 10, 5)).astype(np.float32) @@ -43,12 +42,12 @@ class TestFunctionalL1Loss(unittest.TestCase): self.assertTrue(dy_result.shape, [10, 10, 5]) def run_static(self, use_gpu=False): - input = paddle.fluid.data(name='input', - shape=[10, 10, 5], - dtype='float32') - label = paddle.fluid.data(name='label', - shape=[10, 10, 5], - dtype='float32') + input = paddle.fluid.data( + name='input', shape=[10, 10, 5], dtype='float32' + ) + label = paddle.fluid.data( + name='label', shape=[10, 10, 5], dtype='float32' + ) result0 = paddle.nn.functional.l1_loss(input, label) result1 = paddle.nn.functional.l1_loss(input, label, reduction='sum') result2 = paddle.nn.functional.l1_loss(input, label, reduction='none') @@ -57,11 +56,10 @@ class TestFunctionalL1Loss(unittest.TestCase): place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - static_result = exe.run(feed={ - "input": self.input_np, - "label": self.label_np - }, - fetch_list=[result0, result1, result2]) + static_result = exe.run( + feed={"input": self.input_np, "label": self.label_np}, + fetch_list=[result0, result1, result2], + ) expected = np.mean(np.abs(self.input_np - self.label_np)) np.testing.assert_allclose(static_result[0], expected, rtol=1e-05) @@ -93,23 +91,21 @@ class TestFunctionalL1Loss(unittest.TestCase): # test case the raise message def test_errors(self): - def test_value_error(): - input = paddle.fluid.data(name='input', - shape=[10, 10, 5], - dtype='float32') - label = paddle.fluid.data(name='label', - shape=[10, 10, 5], - dtype='float32') - loss = paddle.nn.functional.l1_loss(input, - label, - reduction='reduce_mean') + input = paddle.fluid.data( + name='input', shape=[10, 10, 5], dtype='float32' + ) + label = paddle.fluid.data( + name='label', shape=[10, 10, 5], dtype='float32' + ) + loss = paddle.nn.functional.l1_loss( + input, label, reduction='reduce_mean' + ) self.assertRaises(ValueError, test_value_error) class TestClassL1Loss(unittest.TestCase): - def setUp(self): self.input_np = np.random.random(size=(10, 10, 5)).astype(np.float32) self.label_np = np.random.random(size=(10, 10, 5)).astype(np.float32) @@ -136,12 +132,12 @@ class TestClassL1Loss(unittest.TestCase): self.assertTrue(dy_result.shape, [10, 10, 5]) def run_static(self, use_gpu=False): - input = paddle.fluid.data(name='input', - shape=[10, 10, 5], - dtype='float32') - label = paddle.fluid.data(name='label', - shape=[10, 10, 5], - dtype='float32') + input = paddle.fluid.data( + name='input', shape=[10, 10, 5], dtype='float32' + ) + label = paddle.fluid.data( + name='label', shape=[10, 10, 5], dtype='float32' + ) l1_loss = paddle.nn.loss.L1Loss() result0 = l1_loss(input, label) l1_loss = paddle.nn.loss.L1Loss(reduction='sum') @@ -154,11 +150,10 @@ class TestClassL1Loss(unittest.TestCase): place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - static_result = exe.run(feed={ - "input": self.input_np, - "label": self.label_np - }, - fetch_list=[result0, result1, result2]) + static_result = exe.run( + feed={"input": self.input_np, "label": self.label_np}, + fetch_list=[result0, result1, result2], + ) expected = np.mean(np.abs(self.input_np - self.label_np)) np.testing.assert_allclose(static_result[0], expected, rtol=1e-05) @@ -189,7 +184,6 @@ class TestClassL1Loss(unittest.TestCase): # test case the raise message def test_errors(self): - def test_value_error(): loss = paddle.nn.loss.L1Loss(reduction="reduce_mean") diff --git a/python/paddle/fluid/tests/unittests/test_l1_norm_op.py b/python/paddle/fluid/tests/unittests/test_l1_norm_op.py index bc568dd5414a094f358d17e6c1ad424be70eb230..03d69e2334750224a54de7cfedd8cb799b47e117 100644 --- a/python/paddle/fluid/tests/unittests/test_l1_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_l1_norm_op.py @@ -18,8 +18,7 @@ from op_test import OpTest class TestL1NormOp(OpTest): - """Test l1_norm - """ + """Test l1_norm""" def setUp(self): self.op_type = "l1_norm" diff --git a/python/paddle/fluid/tests/unittests/test_label_smooth_functional.py b/python/paddle/fluid/tests/unittests/test_label_smooth_functional.py index c5859b7311659f5af41465ad2d8049e681957da8..12b2f0a47148a613ccc65a7d86f74e779f449644 100644 --- a/python/paddle/fluid/tests/unittests/test_label_smooth_functional.py +++ b/python/paddle/fluid/tests/unittests/test_label_smooth_functional.py @@ -21,13 +21,14 @@ import unittest class LabelSmoothTestCase(unittest.TestCase): - - def __init__(self, - methodName='runTest', - label_shape=(20, 1), - prior_dist=None, - epsilon=0.1, - dtype="float32"): + def __init__( + self, + methodName='runTest', + label_shape=(20, 1), + prior_dist=None, + epsilon=0.1, + dtype="float32", + ): super(LabelSmoothTestCase, self).__init__(methodName) self.label_shape = label_shape @@ -44,17 +45,19 @@ class LabelSmoothTestCase(unittest.TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - label_var = fluid.data("input", - self.label_shape, - dtype=self.dtype) - y_var = fluid.layers.label_smooth(label_var, - prior_dist=self.prior_dist, - epsilon=self.epsilon, - dtype=self.dtype) + label_var = fluid.data( + "input", self.label_shape, dtype=self.dtype + ) + y_var = fluid.layers.label_smooth( + label_var, + prior_dist=self.prior_dist, + epsilon=self.epsilon, + dtype=self.dtype, + ) feed_dict = {"input": self.label} exe = fluid.Executor(place) exe.run(start) - y_np, = exe.run(main, feed=feed_dict, fetch_list=[y_var]) + (y_np,) = exe.run(main, feed=feed_dict, fetch_list=[y_var]) return y_np def functional(self, place): @@ -63,24 +66,24 @@ class LabelSmoothTestCase(unittest.TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - label_var = fluid.data("input", - self.label_shape, - dtype=self.dtype) - y_var = F.label_smooth(label_var, - prior_dist=self.prior_dist, - epsilon=self.epsilon) + label_var = fluid.data( + "input", self.label_shape, dtype=self.dtype + ) + y_var = F.label_smooth( + label_var, prior_dist=self.prior_dist, epsilon=self.epsilon + ) feed_dict = {"input": self.label} exe = fluid.Executor(place) exe.run(start) - y_np, = exe.run(main, feed=feed_dict, fetch_list=[y_var]) + (y_np,) = exe.run(main, feed=feed_dict, fetch_list=[y_var]) return y_np def paddle_dygraph_layer(self): paddle.disable_static() label_var = dg.to_variable(self.label) - y_var = F.label_smooth(label_var, - prior_dist=self.prior_dist, - epsilon=self.epsilon) + y_var = F.label_smooth( + label_var, prior_dist=self.prior_dist, epsilon=self.epsilon + ) y_np = y_var.numpy() return y_np @@ -101,7 +104,6 @@ class LabelSmoothTestCase(unittest.TestCase): class LabelSmoothErrorTestCase(LabelSmoothTestCase): - def runTest(self): place = fluid.CPUPlace() with dg.guard(place): @@ -112,7 +114,8 @@ class LabelSmoothErrorTestCase(LabelSmoothTestCase): def add_cases(suite): suite.addTest(LabelSmoothTestCase(methodName='runTest')) suite.addTest( - LabelSmoothTestCase(methodName='runTest', label_shape=[2, 3, 1])) + LabelSmoothTestCase(methodName='runTest', label_shape=[2, 3, 1]) + ) def add_error_cases(suite): diff --git a/python/paddle/fluid/tests/unittests/test_label_smooth_op.py b/python/paddle/fluid/tests/unittests/test_label_smooth_op.py index 0fe5862f2c9c68cf8e3cb8846ea7daf6fc5bbc07..9166d07c741ed3d1bac1c4eaba64eebcabd369d5 100644 --- a/python/paddle/fluid/tests/unittests/test_label_smooth_op.py +++ b/python/paddle/fluid/tests/unittests/test_label_smooth_op.py @@ -19,7 +19,6 @@ import paddle class TestLabelSmoothOp(OpTest): - def config(self): self.op_type = "label_smooth" self.python_api = paddle.nn.functional.label_smooth @@ -32,7 +31,8 @@ class TestLabelSmoothOp(OpTest): def setUp(self): self.config() smoothed_label = ( - 1 - self.epsilon) * self.label + self.epsilon / self.label_dim + 1 - self.epsilon + ) * self.label + self.epsilon / self.label_dim self.inputs = {'X': self.label} self.attrs = {'epsilon': self.epsilon} self.outputs = {'Out': smoothed_label} @@ -45,7 +45,6 @@ class TestLabelSmoothOp(OpTest): class TestLabelSmoothOpWithPriorDist(TestLabelSmoothOp): - def setUp(self): self.config() dist = np.random.random((1, self.label_dim)) @@ -56,23 +55,25 @@ class TestLabelSmoothOpWithPriorDist(TestLabelSmoothOp): class TestLabelSmoothOp3D(TestLabelSmoothOp): - def setUp(self): super(TestLabelSmoothOp3D, self).setUp() self.inputs['X'] = self.inputs['X'].reshape( - [2, -1, self.inputs['X'].shape[-1]]) + [2, -1, self.inputs['X'].shape[-1]] + ) self.outputs['Out'] = self.outputs['Out'].reshape( - self.inputs['X'].shape) + self.inputs['X'].shape + ) class TestLabelSmoothOpWithPriorDist3D(TestLabelSmoothOpWithPriorDist): - def setUp(self): super(TestLabelSmoothOpWithPriorDist3D, self).setUp() self.inputs['X'] = self.inputs['X'].reshape( - [2, -1, self.inputs['X'].shape[-1]]) + [2, -1, self.inputs['X'].shape[-1]] + ) self.outputs['Out'] = self.outputs['Out'].reshape( - self.inputs['X'].shape) + self.inputs['X'].shape + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_lamb_op.py b/python/paddle/fluid/tests/unittests/test_lamb_op.py index eaa8db70313085be19f6d64e960944426104f273..9c3bbe6d830bc0e5e8d9b193e5289500227e04d4 100644 --- a/python/paddle/fluid/tests/unittests/test_lamb_op.py +++ b/python/paddle/fluid/tests/unittests/test_lamb_op.py @@ -23,18 +23,16 @@ paddle.enable_static() class TestLambOp1(OpTest): - def set_attrs(self): self.attrs = { 'epsilon': 1e-4, 'beta1': 0.78, 'beta2': 0.836, - 'weight_decay': 0.01 + 'weight_decay': 0.01, } def setUp(self): - '''Test Lamb Op with supplied attributes - ''' + '''Test Lamb Op with supplied attributes''' self.op_type = "lamb" param = np.random.uniform(-1, 1, (102, 105)).astype("float32") grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") @@ -53,19 +51,23 @@ class TestLambOp1(OpTest): 'Moment2': moment2, 'LearningRate': np.array([learning_rate]).astype("float32"), 'Beta1Pow': np.array([beta1_pow]).astype("float32"), - 'Beta2Pow': np.array([beta2_pow]).astype("float32") + 'Beta2Pow': np.array([beta2_pow]).astype("float32"), } - - param_out, moment1_out, moment2_out, \ - beta1_pow_out, beta2_pow_out = lamb_step(self.inputs, self.attrs) + ( + param_out, + moment1_out, + moment2_out, + beta1_pow_out, + beta2_pow_out, + ) = lamb_step(self.inputs, self.attrs) self.outputs = { 'Moment1Out': moment1_out, 'Moment2Out': moment2_out, 'ParamOut': param_out, 'Beta1PowOut': beta1_pow_out, - 'Beta2PowOut': beta2_pow_out + 'Beta2PowOut': beta2_pow_out, } def test_check_output(self): @@ -73,38 +75,41 @@ class TestLambOp1(OpTest): class TestLambOp2(TestLambOp1): - def set_attrs(self): self.attrs = { 'epsilon': 1e-8, 'beta1': 0.9, 'beta2': 0.999, - 'weight_decay': 0.01 + 'weight_decay': 0.01, } class TestLambOpMultipleSteps(TestLambOp1): - def set_attrs(self): self.attrs = { 'epsilon': 1e-8, 'beta1': 0.9, 'beta2': 0.999, - 'weight_decay': 0.01 + 'weight_decay': 0.01, } self.num_steps = 10 def test_check_output(self): for i in range(self.num_steps): - param_out, moment1_out, moment2_out, \ - beta1_pow_out, beta2_pow_out = lamb_step(self.inputs, self.attrs) + ( + param_out, + moment1_out, + moment2_out, + beta1_pow_out, + beta2_pow_out, + ) = lamb_step(self.inputs, self.attrs) self.outputs = { 'Moment1Out': moment1_out, 'Moment2Out': moment2_out, 'ParamOut': param_out, 'Beta1PowOut': beta1_pow_out, - 'Beta2PowOut': beta2_pow_out + 'Beta2PowOut': beta2_pow_out, } # Verify output for this step @@ -120,8 +125,9 @@ class TestLambOpMultipleSteps(TestLambOp1): self.inputs['Beta2Pow'] = beta2_pow_out # Randomize gradient for next step - self.inputs['Grad'] = np.random.uniform( - -1, 1, (102, 105)).astype("float32") + self.inputs['Grad'] = np.random.uniform(-1, 1, (102, 105)).astype( + "float32" + ) def lamb_step(inputs, attributes): @@ -152,14 +158,16 @@ def lamb_step(inputs, attributes): moment2_unbiased = moment2_out / (1 - beta2_pow) r_1 = np.linalg.norm(param) - r_2 = np.linalg.norm(moment1_unbiased / - (np.sqrt(moment2_unbiased) + epsilon) + - weight_decay * param) + r_2 = np.linalg.norm( + moment1_unbiased / (np.sqrt(moment2_unbiased) + epsilon) + + weight_decay * param + ) lr_t = lr * r_1 / r_2 - param_out = param - lr_t * (moment1_unbiased / - (np.sqrt(moment2_unbiased) + epsilon) + - weight_decay * param) + param_out = param - lr_t * ( + moment1_unbiased / (np.sqrt(moment2_unbiased) + epsilon) + + weight_decay * param + ) beta1_pow_out = beta1_pow * beta1 beta2_pow_out = beta2_pow * beta2 @@ -195,25 +203,32 @@ def lamb_step_sparse(inputs, attributes, height, rows, row_numel, np_grad): moment2_unbiased = np.zeros(shape=[height, row_numel]) def update_mom(row_id, update_value): - moment1_out[row_id] = beta1 * moment1[row_id] + (1 - - beta1) * update_value - moment2_out[row_id] = beta2 * moment2[row_id] + ( - 1 - beta2) * np.square(update_value) - - moment1_out[row_id] = beta1 * moment1[row_id] + (1 - - beta1) * update_value - moment2_out[row_id] = beta2 * moment2[row_id] + ( - 1 - beta2) * np.square(update_value) + moment1_out[row_id] = ( + beta1 * moment1[row_id] + (1 - beta1) * update_value + ) + moment2_out[row_id] = beta2 * moment2[row_id] + (1 - beta2) * np.square( + update_value + ) + + moment1_out[row_id] = ( + beta1 * moment1[row_id] + (1 - beta1) * update_value + ) + moment2_out[row_id] = beta2 * moment2[row_id] + (1 - beta2) * np.square( + update_value + ) def update_param(): r_1 = np.linalg.norm(param) - r_2 = np.linalg.norm(moment1_out / (np.sqrt(moment2_out) + epsilon) + - weight_decay * param) + r_2 = np.linalg.norm( + moment1_out / (np.sqrt(moment2_out) + epsilon) + + weight_decay * param + ) lr_t = lr * r_1 / r_2 - param_out = param - lr_t * (moment1_out / - (np.sqrt(moment2_out) + epsilon) + - weight_decay * param) + param_out = param - lr_t * ( + moment1_out / (np.sqrt(moment2_out) + epsilon) + + weight_decay * param + ) for row_id in range(param_out.shape[0]): update_value = np.zeros(np_grad[0].shape).astype("float32") @@ -229,7 +244,6 @@ def lamb_step_sparse(inputs, attributes, height, rows, row_numel, np_grad): class TestSparseLambOp(unittest.TestCase): - def setup(self, scope, place): beta1 = 0.78 beta2 = 0.836 @@ -246,14 +260,14 @@ class TestSparseLambOp(unittest.TestCase): "Moment2": np.full((height, row_numel), 5.0).astype("float32"), 'Beta1Pow': np.array([beta1]).astype("float32"), 'Beta2Pow': np.array([beta2]).astype("float32"), - "LearningRate": np.full((1), 2.0).astype("float32") + "LearningRate": np.full((1), 2.0).astype("float32"), } self.init_output = np.full((height, row_numel), 0.0).astype("float32") self.attrs = { 'epsilon': epsilon, 'beta1': beta1, 'beta2': beta2, - 'weight_decay': 0.05 + 'weight_decay': 0.05, } grad_selected_rows = scope.var('Grad').get_selected_rows() @@ -269,13 +283,14 @@ class TestSparseLambOp(unittest.TestCase): self.sparse_inputs = ["Grad"] param_out, mom1, mom2, beta1_pow_out, beta2_pow_out = lamb_step_sparse( - self.dense_inputs, self.attrs, height, rows, row_numel, np_array) + self.dense_inputs, self.attrs, height, rows, row_numel, np_array + ) self.outputs = { "ParamOut": param_out, "Moment1Out": mom1, "Moment2Out": mom2, 'Beta1PowOut': beta1_pow_out, - 'Beta2PowOut': beta2_pow_out + 'Beta2PowOut': beta2_pow_out, } def check_with_place(self, place): diff --git a/python/paddle/fluid/tests/unittests/test_lambv2_op.py b/python/paddle/fluid/tests/unittests/test_lambv2_op.py index 1e01f3a4ce3940b407523d65adc5b1f6c9a652d9..475866fb903e8e6ded366d6b1571709fc4651ef3 100644 --- a/python/paddle/fluid/tests/unittests/test_lambv2_op.py +++ b/python/paddle/fluid/tests/unittests/test_lambv2_op.py @@ -22,39 +22,40 @@ import paddle.fluid.layers as layers class LAMBOptimizer(paddle.optimizer.Lamb): - def _append_optimize_op(self, block, param_and_grad): assert isinstance(block, fluid.framework.Block) block.program._use_lamb = True - m = moment1 = self._get_accumulator(self._moment1_acc_str, - param_and_grad[0]) + m = moment1 = self._get_accumulator( + self._moment1_acc_str, param_and_grad[0] + ) v = self._get_accumulator(self._moment2_acc_str, param_and_grad[0]) - beta_1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str, - param_and_grad[0]) - beta_2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str, - param_and_grad[0]) - - beta_1 = layers.fill_constant(dtype='float32', - shape=[1], - value=self._beta1, - name='lamb_beta_1') - beta_2 = layers.fill_constant(dtype='float32', - shape=[1], - value=self._beta2, - name='lamb_beta_2') - epsilon = layers.fill_constant(dtype='float32', - shape=[1], - value=self._epsilon, - name='epsilon') + beta_1_pow_acc = self._get_accumulator( + self._beta1_pow_acc_str, param_and_grad[0] + ) + beta_2_pow_acc = self._get_accumulator( + self._beta2_pow_acc_str, param_and_grad[0] + ) + + beta_1 = layers.fill_constant( + dtype='float32', shape=[1], value=self._beta1, name='lamb_beta_1' + ) + beta_2 = layers.fill_constant( + dtype='float32', shape=[1], value=self._beta2, name='lamb_beta_2' + ) + epsilon = layers.fill_constant( + dtype='float32', shape=[1], value=self._epsilon, name='epsilon' + ) one = paddle.ones(shape=[1]).astype('float32') zero = paddle.zeros(shape=[1]).astype('float32') next_m = paddle.multiply(m, beta_1) + paddle.multiply( - param_and_grad[1], one - beta_1) + param_and_grad[1], one - beta_1 + ) next_v = paddle.multiply(v, beta_2) + paddle.multiply( - paddle.pow(param_and_grad[1], 2), one - beta_2) + paddle.pow(param_and_grad[1], 2), one - beta_2 + ) beta1_correction = one - beta_1_pow_acc beta2_correction = one - beta_2_pow_acc @@ -64,8 +65,10 @@ class LAMBOptimizer(paddle.optimizer.Lamb): update = next_m_unbiased / (paddle.sqrt(next_v_unbiased) + epsilon) - if self._exclude_from_weight_decay_fn is not None and self._exclude_from_weight_decay_fn( - param_and_grad[0]): + if ( + self._exclude_from_weight_decay_fn is not None + and self._exclude_from_weight_decay_fn(param_and_grad[0]) + ): self._lamb_weight_decay = 0.0 update += self._lamb_weight_decay * param_and_grad[0] @@ -76,8 +79,11 @@ class LAMBOptimizer(paddle.optimizer.Lamb): ratio = paddle.where( paddle.greater_than(w_norm, zero), - paddle.where(paddle.greater_than(g_norm, zero), (w_norm / g_norm), - one), one) + paddle.where( + paddle.greater_than(g_norm, zero), (w_norm / g_norm), one + ), + one, + ) update_with_lr = ratio * learning_rate * update next_param = param_and_grad[0] - update_with_lr @@ -92,16 +98,15 @@ class LAMBOptimizer(paddle.optimizer.Lamb): class TestLambOpV2(unittest.TestCase): - def test_lamb_op(self): shape = [2, 4, 8, 8] data = paddle.to_tensor(np.random.random(size=shape).astype("float32")) conv = paddle.nn.Conv2D(4, 6, (3, 3)) data = conv(data) loss = paddle.mean(data) - opt = paddle.optimizer.Lamb(learning_rate=1e-5, - epsilon=1e-8, - parameters=conv.parameters()) + opt = paddle.optimizer.Lamb( + learning_rate=1e-5, epsilon=1e-8, parameters=conv.parameters() + ) loss.backward() opt.minimize(loss) @@ -109,7 +114,6 @@ class TestLambOpV2(unittest.TestCase): class TestLambOpWithCombinedOp(unittest.TestCase): - def test_lamb_op_with_multi_steps(self): paddle.enable_static() @@ -140,12 +144,11 @@ class TestLambOpWithCombinedOp(unittest.TestCase): executor = fluid.Executor(place) executor.run(startup_program) - output = executor.run(program=main_program, - feed={ - 'X': feed_x, - 'Y': feed_y - }, - fetch_list=[avg_loss.name]) + output = executor.run( + program=main_program, + feed={'X': feed_x, 'Y': feed_y}, + fetch_list=[avg_loss.name], + ) main = fluid.Program() startup = fluid.Program() @@ -156,18 +159,16 @@ class TestLambOpWithCombinedOp(unittest.TestCase): exe = fluid.Executor(place) exe.run(startup) - out = exe.run(program=main, - feed={ - 'X': feed_x, - 'Y': feed_y - }, - fetch_list=[loss.name]) + out = exe.run( + program=main, + feed={'X': feed_x, 'Y': feed_y}, + fetch_list=[loss.name], + ) np.testing.assert_allclose(out, output, rtol=1e-05) class TestLambOpV2Group(TestLambOpV2): - def test_lamb_op(self): paddle.disable_static() value = np.arange(26).reshape(2, 13).astype("float32") @@ -175,16 +176,19 @@ class TestLambOpV2Group(TestLambOpV2): linear_1 = paddle.nn.Linear(13, 5) linear_2 = paddle.nn.Linear(5, 3) # This can be any optimizer supported by dygraph. - adam = paddle.optimizer.Lamb(learning_rate=0.01, - parameters=[{ - 'params': linear_1.parameters() - }, { - 'params': linear_2.parameters(), - 'lamb_weight_decay': 0.001, - 'beta1': 0.9, - 'beta2': 0.99 - }], - lamb_weight_decay=0.01) + adam = paddle.optimizer.Lamb( + learning_rate=0.01, + parameters=[ + {'params': linear_1.parameters()}, + { + 'params': linear_2.parameters(), + 'lamb_weight_decay': 0.001, + 'beta1': 0.9, + 'beta2': 0.99, + }, + ], + lamb_weight_decay=0.01, + ) out = linear_1(a) out = linear_2(out) out.backward() @@ -193,16 +197,15 @@ class TestLambOpV2Group(TestLambOpV2): class TestLambOpMultiPrecision(unittest.TestCase): - def check_main(self, x_np, place, multi_precision=False, seed=10, n=10): main_prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(main_prog, startup_prog): paddle.seed(seed) with paddle.static.amp.fp16_guard(): - x = paddle.static.data(name='x', - shape=[None, 10], - dtype='float32') + x = paddle.static.data( + name='x', shape=[None, 10], dtype='float32' + ) linear = paddle.nn.Linear(10, 2) hidden = linear(x) loss = paddle.mean(hidden) @@ -210,9 +213,9 @@ class TestLambOpMultiPrecision(unittest.TestCase): original_optimizer = paddle.optimizer.Lamb(learning_rate=1e-3) original_optimizer._multi_precision = multi_precision if multi_precision: - optimizer = paddle.static.amp.decorate(original_optimizer, - use_pure_fp16=True, - use_fp16_guard=True) + optimizer = paddle.static.amp.decorate( + original_optimizer, use_pure_fp16=True, use_fp16_guard=True + ) else: optimizer = original_optimizer optimizer.minimize(loss) @@ -233,8 +236,9 @@ class TestLambOpMultiPrecision(unittest.TestCase): if multi_precision: params[0] = np.array(params[0]) params[1] = np.array(params[1]) - np.testing.assert_array_equal(params[0], - params[1].astype(np.float16)) + np.testing.assert_array_equal( + params[0], params[1].astype(np.float16) + ) return params[0].astype(np.float32) else: self.assertTrue(params[0] is not None) @@ -250,9 +254,9 @@ class TestLambOpMultiPrecision(unittest.TestCase): weight_np, bias_np = None, None for i in range(n): feed_dict = {x.name: x_np} - weight_np, bias_np = exe.run(main_prog, - feed=feed_dict, - fetch_list=[weight, bias]) + weight_np, bias_np = exe.run( + main_prog, feed=feed_dict, fetch_list=[weight, bias] + ) weight_np = weight_np.astype('float32') bias_np = bias_np.astype('float32') np.testing.assert_array_equal(weight_np, get_parameter(weight)) diff --git a/python/paddle/fluid/tests/unittests/test_launch_coverage.py b/python/paddle/fluid/tests/unittests/test_launch_coverage.py index 00db6590d6f3fa4c71411c827967e70aab0160f0..1f00ea9af55639db585d568a3b0d0e35b9bcbff1 100644 --- a/python/paddle/fluid/tests/unittests/test_launch_coverage.py +++ b/python/paddle/fluid/tests/unittests/test_launch_coverage.py @@ -15,7 +15,11 @@ import unittest from argparse import ArgumentParser, REMAINDER -from paddle.distributed.utils.launch_utils import _print_arguments, get_gpus, get_cluster_from_args +from paddle.distributed.utils.launch_utils import ( + _print_arguments, + get_gpus, + get_cluster_from_args, +) from paddle.distributed.fleet.launch_utils import find_free_ports @@ -32,72 +36,75 @@ PADDLE_CURRENT_ENDPOINT PADDLE_TRAINERS_NUM PADDLE_TRAINER_ENDPOINTS POD_IP (current node ip address, not needed for local training) -''') +''' + ) - #Optional arguments for the launch helper + # Optional arguments for the launch helper parser.add_argument( "--cluster_node_ips", type=str, default="127.0.0.1", - help="Paddle cluster nodes ips, such as 192.168.0.16,192.168.0.17..") - parser.add_argument("--node_ip", - type=str, - default="127.0.0.1", - help="The current node ip. ") + help="Paddle cluster nodes ips, such as 192.168.0.16,192.168.0.17..", + ) + parser.add_argument( + "--node_ip", type=str, default="127.0.0.1", help="The current node ip. " + ) parser.add_argument( "--use_paddlecloud", action='store_true', - help= - "wheter to use paddlecloud platform to run your multi-process job. If false, no need to set this argument." + help="wheter to use paddlecloud platform to run your multi-process job. If false, no need to set this argument.", + ) + parser.add_argument( + "--started_port", + type=int, + default=None, + help="The trainer's started port on a single node", ) - parser.add_argument("--started_port", - type=int, - default=None, - help="The trainer's started port on a single node") - parser.add_argument("--print_config", - type=bool, - default=True, - help="Print the config or not") + parser.add_argument( + "--print_config", + type=bool, + default=True, + help="Print the config or not", + ) parser.add_argument( "--selected_gpus", type=str, default=None, - help= - "It's for gpu training and the training process will run on the selected_gpus," - "each process is bound to a single GPU. And if it's not set, this module will use all the gpu cards for training." + help="It's for gpu training and the training process will run on the selected_gpus," + "each process is bound to a single GPU. And if it's not set, this module will use all the gpu cards for training.", ) parser.add_argument( "--log_level", type=int, - default= - 20, # logging.INFO, details are here:https://docs.python.org/3/library/logging.html#levels - help="Logging level, default is logging.INFO") + default=20, # logging.INFO, details are here:https://docs.python.org/3/library/logging.html#levels + help="Logging level, default is logging.INFO", + ) parser.add_argument( "--log_dir", type=str, - help= - "The path for each process's log.If it's not set, the log will printed to default pipe." + help="The path for each process's log.If it's not set, the log will printed to default pipe.", ) - #positional - parser.add_argument("training_script", - type=str, - help="The full path to the single GPU training " - "program/script to be launched in parallel, " - "followed by all the arguments for the " - "training script") + # positional + parser.add_argument( + "training_script", + type=str, + help="The full path to the single GPU training " + "program/script to be launched in parallel, " + "followed by all the arguments for the " + "training script", + ) - #rest from the training program + # rest from the training program parser.add_argument('training_script_args', nargs=REMAINDER) return parser.parse_args() class TestCoverage(unittest.TestCase): - def test_gpus(self): args = _parse_args() diff --git a/python/paddle/fluid/tests/unittests/test_layer_norm_op.py b/python/paddle/fluid/tests/unittests/test_layer_norm_op.py index e49cf0b24e77589721a58cd4644cb0bf9c163ffa..17d2730b3e8f7b80e8d87b735014a0d5dd3e5c3a 100644 --- a/python/paddle/fluid/tests/unittests/test_layer_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_layer_norm_op.py @@ -23,7 +23,9 @@ import paddle.nn.functional as F from functools import reduce from op_test import _set_use_system_allocator from paddle.fluid import Program, program_guard -from paddle.fluid.contrib.mixed_precision.fp16_utils import _keep_layer_norm_scale_bias_to_fp32 +from paddle.fluid.contrib.mixed_precision.fp16_utils import ( + _keep_layer_norm_scale_bias_to_fp32, +) paddle.enable_static() @@ -35,13 +37,14 @@ _set_use_system_allocator(True) def _reference_layer_norm_naive(x, scale, beta, epsilon, begin_norm_axis=1): x_shape = x.shape N = reduce(mul, x_shape[0:begin_norm_axis], 1) - D = reduce(mul, x_shape[begin_norm_axis:len(x_shape)], 1) + D = reduce(mul, x_shape[begin_norm_axis : len(x_shape)], 1) x.shape = [N, D] mean = np.mean(x, axis=1) var = np.var(x, axis=1) + epsilon - output = np.divide((x - mean.reshape([N, 1])), - (np.sqrt(var)).reshape([N, 1])) + output = np.divide( + (x - mean.reshape([N, 1])), (np.sqrt(var)).reshape([N, 1]) + ) if scale is not None: output = scale.reshape([1, D]) * output if beta is not None: @@ -51,16 +54,12 @@ def _reference_layer_norm_naive(x, scale, beta, epsilon, begin_norm_axis=1): return output, mean, var -def _reference_layer_norm_grad(x, - grad_y, - scale, - bias, - mean, - var, - begin_norm_axis=1): +def _reference_layer_norm_grad( + x, grad_y, scale, bias, mean, var, begin_norm_axis=1 +): x_shape = x.shape N = reduce(mul, x_shape[0:begin_norm_axis], 1) - D = reduce(mul, x_shape[begin_norm_axis:len(x_shape)], 1) + D = reduce(mul, x_shape[begin_norm_axis : len(x_shape)], 1) if scale is not None: scale_shape = scale.shape @@ -75,38 +74,39 @@ def _reference_layer_norm_grad(x, d_bias = None # d_scale if scale is not None: - d_scale = np.sum(((x - mean) * np.sqrt(1 / var)) * grad_y, - axis=0).reshape([1, D]) + d_scale = np.sum( + ((x - mean) * np.sqrt(1 / var)) * grad_y, axis=0 + ).reshape([1, D]) else: d_scale = None # dx if scale is not None: dx_end = scale * np.sqrt(1.0 / var) * grad_y - d_mean_0 = np.sum(-np.sqrt(1.0 / var) * grad_y * scale, - axis=1).reshape([N, 1 - ]) # the second part equals to zero. + d_mean_0 = np.sum(-np.sqrt(1.0 / var) * grad_y * scale, axis=1).reshape( + [N, 1] + ) # the second part equals to zero. d_mean = 1.0 / D * d_mean_0 - d_std = np.sum(-(1.0 / var) * - (x - mean) * grad_y * scale, axis=1).reshape([N, 1]) * ( - 1.0 / D * np.sqrt(1.0 / var).reshape([N, 1]) * - (x - mean)) + d_std = np.sum( + -(1.0 / var) * (x - mean) * grad_y * scale, axis=1 + ).reshape([N, 1]) * ( + 1.0 / D * np.sqrt(1.0 / var).reshape([N, 1]) * (x - mean) + ) else: dx_end = 1.0 * np.sqrt(1.0 / var) * grad_y - d_mean_0 = np.sum(-np.sqrt(1.0 / var) * grad_y * 1.0, - axis=1).reshape([N, 1 - ]) # the second part equals to zero. + d_mean_0 = np.sum(-np.sqrt(1.0 / var) * grad_y * 1.0, axis=1).reshape( + [N, 1] + ) # the second part equals to zero. d_mean = 1.0 / D * d_mean_0 - d_std = np.sum(-(1.0 / var) * - (x - mean) * grad_y * 1.0, axis=1).reshape([N, 1]) * ( - 1.0 / D * np.sqrt(1.0 / var).reshape([N, 1]) * - (x - mean)) + d_std = np.sum( + -(1.0 / var) * (x - mean) * grad_y * 1.0, axis=1 + ).reshape([N, 1]) * ( + 1.0 / D * np.sqrt(1.0 / var).reshape([N, 1]) * (x - mean) + ) grad_x = dx_end + d_mean + d_std grad_x.shape, x.shape, grad_y.shape = x_shape, x_shape, x_shape - var.shape, mean.shape = [ - N, - ], [ + var.shape, mean.shape = [N,], [ N, ] @@ -116,45 +116,53 @@ def _reference_layer_norm_grad(x, class TestLayerNormOp(unittest.TestCase): - def setUp(self): self.use_cudnn = True def __assert_close(self, tensor, np_array, msg, atol=1e-4): self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg) - def check_forward_backward(self, - shape, - begin_norm_axis, - has_scale=True, - has_bias=True, - y_grad_scale=1.0, - use_mkldnn=False): - - def test_with_place(place, - shape, - begin_norm_axis, - use_mkldnn=use_mkldnn): + def check_forward_backward( + self, + shape, + begin_norm_axis, + has_scale=True, + has_bias=True, + y_grad_scale=1.0, + use_mkldnn=False, + ): + def test_with_place( + place, shape, begin_norm_axis, use_mkldnn=use_mkldnn + ): # attr epsilon = 0.00001 x_shape = shape - D = reduce(mul, x_shape[begin_norm_axis:len(x_shape)], 1) + D = reduce(mul, x_shape[begin_norm_axis : len(x_shape)], 1) scale_shape = [D] np.random.seed(123) x = np.random.random_sample(x_shape).astype(np.float32) - scale = np.random.random_sample(scale_shape).astype( - np.float32) if has_scale else None - bias = np.random.random_sample(scale_shape).astype( - np.float32) if has_bias else None + scale = ( + np.random.random_sample(scale_shape).astype(np.float32) + if has_scale + else None + ) + bias = ( + np.random.random_sample(scale_shape).astype(np.float32) + if has_bias + else None + ) y_grad = (np.random.random_sample(x_shape) * y_grad_scale).astype( - np.float32) + np.float32 + ) # reference forward & backward y, mean, variance = _reference_layer_norm_naive( - x, scale, bias, epsilon, begin_norm_axis) + x, scale, bias, epsilon, begin_norm_axis + ) x_grad, scale_grad, bias_grad = _reference_layer_norm_grad( - x, y_grad, scale, bias, mean, variance, begin_norm_axis) + x, y_grad, scale, bias, mean, variance, begin_norm_axis + ) var_dict = locals() var_dict['y@GRAD'] = y_grad @@ -169,9 +177,11 @@ class TestLayerNormOp(unittest.TestCase): with fluid.program_guard(program): block = program.global_block() for name in ground_truth: - block.create_var(name=name, - dtype='float32', - shape=ground_truth[name].shape) + block.create_var( + name=name, + dtype='float32', + shape=ground_truth[name].shape, + ) inputs = {"X": block.var('x')} fetch_list = [ 'y', @@ -191,17 +201,20 @@ class TestLayerNormOp(unittest.TestCase): outputs={ "Y": block.var('y'), "Mean": block.var('mean'), # share the same memory - "Variance": - block.var('variance'), # share the same memory + "Variance": block.var( + 'variance' + ), # share the same memory }, attrs={ "epsilon": epsilon, "begin_norm_axis": begin_norm_axis, - "use_mkldnn": use_mkldnn - }) + "use_mkldnn": use_mkldnn, + }, + ) # generate backward op_desc grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc( - layer_norm_op.desc, set(), []) + layer_norm_op.desc, set(), [] + ) grad_op_desc = grad_op_desc_list[0] new_op_desc = block.desc.append_op() new_op_desc.copy_from(grad_op_desc) @@ -215,12 +228,14 @@ class TestLayerNormOp(unittest.TestCase): program._sync_with_cpp() exe = fluid.Executor(place) - out = exe.run(program, - feed={ - name: var_dict[name] - for name in ['x', 'scale', 'bias', 'y@GRAD'] - }, - fetch_list=fetch_list) + out = exe.run( + program, + feed={ + name: var_dict[name] + for name in ['x', 'scale', 'bias', 'y@GRAD'] + }, + fetch_list=fetch_list, + ) # print(y) # print(out[0]) self.__assert_close(y, out[0], "y") @@ -228,17 +243,25 @@ class TestLayerNormOp(unittest.TestCase): self.__assert_close(variance, out[2], "variance", 1e-3) self.__assert_close(x_grad, out[3], "x_grad") if has_scale: - self.__assert_close(scale_grad, - out[fetch_list.index('scale@GRAD')], - "scale_grad", 1e-3) + self.__assert_close( + scale_grad, + out[fetch_list.index('scale@GRAD')], + "scale_grad", + 1e-3, + ) if has_bias: - self.__assert_close(bias_grad, - out[fetch_list.index('bias@GRAD')], - "bias_grad") + self.__assert_close( + bias_grad, + out[fetch_list.index('bias@GRAD')], + "bias_grad", + ) places = [core.CPUPlace()] - if core.is_compiled_with_cuda() and core.op_support_gpu( - "layer_norm") and self.use_cudnn: + if ( + core.is_compiled_with_cuda() + and core.op_support_gpu("layer_norm") + and self.use_cudnn + ): places.append(core.CUDAPlace(0)) for place in places: @@ -247,87 +270,108 @@ class TestLayerNormOp(unittest.TestCase): def test_check_forward_backward_with_scale_and_bias(self): self.check_forward_backward(shape=[1, 3, 4, 5], begin_norm_axis=1) self.check_forward_backward(shape=[2, 3, 4, 5], begin_norm_axis=1) - self.check_forward_backward(shape=[2, 3, 4, 5], - begin_norm_axis=1, - has_scale=False, - has_bias=True) - self.check_forward_backward(shape=[2, 3, 4, 5], - begin_norm_axis=1, - has_scale=True, - has_bias=False) - self.check_forward_backward(shape=[2, 3, 4, 5], - begin_norm_axis=1, - has_scale=False, - has_bias=False) + self.check_forward_backward( + shape=[2, 3, 4, 5], + begin_norm_axis=1, + has_scale=False, + has_bias=True, + ) + self.check_forward_backward( + shape=[2, 3, 4, 5], + begin_norm_axis=1, + has_scale=True, + has_bias=False, + ) + self.check_forward_backward( + shape=[2, 3, 4, 5], + begin_norm_axis=1, + has_scale=False, + has_bias=False, + ) self.check_forward_backward(shape=[2, 3, 4, 5], begin_norm_axis=3) - self.check_forward_backward(shape=[92, 513, 129], - begin_norm_axis=2, - y_grad_scale=0.1) + self.check_forward_backward( + shape=[92, 513, 129], begin_norm_axis=2, y_grad_scale=0.1 + ) self.check_forward_backward(shape=[3, 34, 1134], begin_norm_axis=2) - self.check_forward_backward(shape=[92, 513, 1134], - begin_norm_axis=2, - y_grad_scale=0.1) - self.check_forward_backward(shape=[92, 513, 1134], - begin_norm_axis=2, - has_scale=False, - has_bias=True, - y_grad_scale=0.1) - self.check_forward_backward(shape=[92, 513, 1134], - begin_norm_axis=2, - has_scale=True, - has_bias=False, - y_grad_scale=0.1) - self.check_forward_backward(shape=[92, 513, 1134], - begin_norm_axis=2, - has_scale=False, - has_bias=False, - y_grad_scale=0.1) - self.check_forward_backward(shape=[512, 1024], - begin_norm_axis=1, - has_scale=True, - has_bias=True) - self.check_forward_backward(shape=[1, 128, 256, 256], - begin_norm_axis=3, - has_scale=True, - has_bias=True) - self.check_forward_backward(shape=[1, 256, 384], - begin_norm_axis=2, - has_scale=True, - has_bias=True) + self.check_forward_backward( + shape=[92, 513, 1134], begin_norm_axis=2, y_grad_scale=0.1 + ) + self.check_forward_backward( + shape=[92, 513, 1134], + begin_norm_axis=2, + has_scale=False, + has_bias=True, + y_grad_scale=0.1, + ) + self.check_forward_backward( + shape=[92, 513, 1134], + begin_norm_axis=2, + has_scale=True, + has_bias=False, + y_grad_scale=0.1, + ) + self.check_forward_backward( + shape=[92, 513, 1134], + begin_norm_axis=2, + has_scale=False, + has_bias=False, + y_grad_scale=0.1, + ) + self.check_forward_backward( + shape=[512, 1024], begin_norm_axis=1, has_scale=True, has_bias=True + ) + self.check_forward_backward( + shape=[1, 128, 256, 256], + begin_norm_axis=3, + has_scale=True, + has_bias=True, + ) + self.check_forward_backward( + shape=[1, 256, 384], + begin_norm_axis=2, + has_scale=True, + has_bias=True, + ) class TestLayerNormAPI(unittest.TestCase): - def test_case(self): - x = fluid.layers.data(name='x', - shape=[64, 32, 256], - dtype='float32', - append_batch_size=False) - x = fluid.layers.layer_norm(x, - scale=True, - shift=True, - begin_norm_axis=1, - epsilon=1e-05, - param_attr=None, - bias_attr=None) - x = fluid.layers.layer_norm(x, - scale=False, - shift=False, - begin_norm_axis=1, - epsilon=1e-05, - param_attr=None, - bias_attr=None) - x = fluid.layers.layer_norm(x, - scale=False, - shift=False, - begin_norm_axis=1, - epsilon=1e-05, - param_attr="scale", - bias_attr="shift") + x = fluid.layers.data( + name='x', + shape=[64, 32, 256], + dtype='float32', + append_batch_size=False, + ) + x = fluid.layers.layer_norm( + x, + scale=True, + shift=True, + begin_norm_axis=1, + epsilon=1e-05, + param_attr=None, + bias_attr=None, + ) + x = fluid.layers.layer_norm( + x, + scale=False, + shift=False, + begin_norm_axis=1, + epsilon=1e-05, + param_attr=None, + bias_attr=None, + ) + x = fluid.layers.layer_norm( + x, + scale=False, + shift=False, + begin_norm_axis=1, + epsilon=1e-05, + param_attr="scale", + bias_attr="shift", + ) class TestDygraphLayerNormAPIError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): paddle.enable_static() @@ -344,7 +388,6 @@ class TestDygraphLayerNormAPIError(unittest.TestCase): class TestFP16ScaleBiasLayerNorm(unittest.TestCase): - def check_main(self, x_np, weight_np, bias_np, dtype): paddle.disable_static() @@ -375,9 +418,11 @@ class TestFP16ScaleBiasLayerNorm(unittest.TestCase): bias_np = np.random.random([20]).astype('float16') y_np_1, x_g_np_1, w_g_np_1, b_g_np_1 = self.check_main( - x_np, weight_np, bias_np, 'float16') + x_np, weight_np, bias_np, 'float16' + ) y_np_2, x_g_np_2, w_g_np_2, b_g_np_2 = self.check_main( - x_np, weight_np, bias_np, 'float32') + x_np, weight_np, bias_np, 'float32' + ) def assert_equal(x, y): np.testing.assert_array_equal(x, y) @@ -389,7 +434,6 @@ class TestFP16ScaleBiasLayerNorm(unittest.TestCase): class TestBF16ScaleBiasLayerNorm(unittest.TestCase): - def check_main(self, x_np, weight_np, bias_np, dtype): paddle.disable_static() @@ -416,18 +460,22 @@ class TestBF16ScaleBiasLayerNorm(unittest.TestCase): return y_np, x_g_np, w_g_np, b_g_np def test_main(self): - if (not core.is_compiled_with_cuda()) or ( - core.cudnn_version() < - 8100) or (paddle.device.cuda.get_device_capability()[0] < 8): + if ( + (not core.is_compiled_with_cuda()) + or (core.cudnn_version() < 8100) + or (paddle.device.cuda.get_device_capability()[0] < 8) + ): return x_np = np.random.random([10, 20]).astype('float32') weight_np = np.random.random([20]).astype('float32') bias_np = np.random.random([20]).astype('float32') y_np_1, x_g_np_1, w_g_np_1, b_g_np_1 = self.check_main( - x_np, weight_np, bias_np, 'float32') + x_np, weight_np, bias_np, 'float32' + ) y_np_2, x_g_np_2, w_g_np_2, b_g_np_2 = self.check_main( - x_np, weight_np, bias_np, 'bfloat16') + x_np, weight_np, bias_np, 'bfloat16' + ) def assert_equal(x, y): np.testing.assert_allclose(x, y, rtol=1e-05, atol=0.1) @@ -439,7 +487,6 @@ class TestBF16ScaleBiasLayerNorm(unittest.TestCase): class TestGetSetKeepLayerNormScaleBiasFP32Flag(unittest.TestCase): - def test_main(self): self.assertTrue(_keep_layer_norm_scale_bias_to_fp32()) _keep_layer_norm_scale_bias_to_fp32(False) diff --git a/python/paddle/fluid/tests/unittests/test_layer_norm_op_v2.py b/python/paddle/fluid/tests/unittests/test_layer_norm_op_v2.py index 8d6dfab71990bf10f272b14881c4162c6f8dc5eb..6e9e4931465434a3a354098e6730e09392422a25 100644 --- a/python/paddle/fluid/tests/unittests/test_layer_norm_op_v2.py +++ b/python/paddle/fluid/tests/unittests/test_layer_norm_op_v2.py @@ -23,7 +23,6 @@ import paddle class TestDygraphLayerNormv2(unittest.TestCase): - def test_dygraph(self): places = [fluid.CPUPlace()] if core.is_compiled_with_cuda() and core.op_support_gpu("layer_norm"): @@ -114,7 +113,6 @@ class TestDygraphLayerNormv2(unittest.TestCase): class TestLayerNormFunction(unittest.TestCase): - def test_dygraph(self): places = [fluid.CPUPlace()] if core.is_compiled_with_cuda() and core.op_support_gpu("layer_norm"): @@ -162,10 +160,12 @@ class TestLayerNormFunction(unittest.TestCase): y4 = compute_v4(x) np.testing.assert_allclose(y3, y4, rtol=1e-05) - self.assertRaises(ValueError, - paddle.nn.functional.layer_norm, - x=x, - normalized_shape=1.0) + self.assertRaises( + ValueError, + paddle.nn.functional.layer_norm, + x=x, + normalized_shape=1.0, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 176a8385c3c973327ddefea994f4badcd44257de..14754e347f9b91963ad37f1c7c242f0dbe136da1 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -36,7 +36,6 @@ from paddle.fluid.framework import _test_eager_guard class LayerTest(unittest.TestCase): - @classmethod def setUpClass(cls): cls.seed = 111 @@ -61,38 +60,36 @@ class LayerTest(unittest.TestCase): paddle.framework.random._manual_program_seed(self.seed) yield - def get_static_graph_result(self, - feed, - fetch_list, - with_lod=False, - force_to_use_cpu=False): + def get_static_graph_result( + self, feed, fetch_list, with_lod=False, force_to_use_cpu=False + ): exe = fluid.Executor(self._get_place(force_to_use_cpu)) exe.run(fluid.default_startup_program()) - return exe.run(fluid.default_main_program(), - feed=feed, - fetch_list=fetch_list, - return_numpy=(not with_lod)) + return exe.run( + fluid.default_main_program(), + feed=feed, + fetch_list=fetch_list, + return_numpy=(not with_lod), + ) @contextlib.contextmanager def dynamic_graph(self, force_to_use_cpu=False): with fluid.dygraph.guard( - self._get_place(force_to_use_cpu=force_to_use_cpu)): + self._get_place(force_to_use_cpu=force_to_use_cpu) + ): paddle.seed(self.seed) paddle.framework.random._manual_program_seed(self.seed) yield class TestLayer(LayerTest): - def test_custom_layer_with_kwargs(self): - class CustomLayer(fluid.Layer): - def __init__(self, input_size, linear1_size=4): super(CustomLayer, self).__init__() - self.linear1 = nn.Linear(input_size, - linear1_size, - bias_attr=False) + self.linear1 = nn.Linear( + input_size, linear1_size, bias_attr=False + ) self.linear2 = nn.Linear(linear1_size, 1, bias_attr=False) def forward(self, x, do_linear2=False): @@ -121,37 +118,37 @@ class TestLayer(LayerTest): def test_dropout(self): inp = np.ones([3, 32, 32], dtype='float32') with self.static_graph(): - t = layers.data(name='data', - shape=[3, 32, 32], - dtype='float32', - append_batch_size=False) + t = layers.data( + name='data', + shape=[3, 32, 32], + dtype='float32', + append_batch_size=False, + ) dropout = nn.Dropout(p=0.35, seed=1, is_test=False) ret = dropout(t) - ret2 = fluid.layers.dropout(t, - dropout_prob=0.35, - seed=1, - is_test=False) + ret2 = fluid.layers.dropout( + t, dropout_prob=0.35, seed=1, is_test=False + ) static_ret, static_ret2 = self.get_static_graph_result( - feed={'data': inp}, fetch_list=[ret, ret2]) + feed={'data': inp}, fetch_list=[ret, ret2] + ) with self.dynamic_graph(): with _test_eager_guard(): t = base.to_variable(inp) dropout = nn.Dropout(p=0.35, seed=1, is_test=False) dy_eager_ret = dropout(t) - dy_eager_ret2 = fluid.layers.dropout(t, - dropout_prob=0.35, - seed=1, - is_test=False) + dy_eager_ret2 = fluid.layers.dropout( + t, dropout_prob=0.35, seed=1, is_test=False + ) dy_eager_ret_value = dy_eager_ret.numpy() dy_eager_ret2_value = dy_eager_ret2.numpy() t = base.to_variable(inp) dropout = nn.Dropout(p=0.35, seed=1, is_test=False) dy_ret = dropout(t) - dy_ret2 = fluid.layers.dropout(t, - dropout_prob=0.35, - seed=1, - is_test=False) + dy_ret2 = fluid.layers.dropout( + t, dropout_prob=0.35, seed=1, is_test=False + ) dy_ret_value = dy_ret.numpy() dy_ret2_value = dy_ret2.numpy() @@ -165,28 +162,34 @@ class TestLayer(LayerTest): def test_linear(self): inp = np.ones([3, 32, 32], dtype='float32') with self.static_graph(): - t = layers.data(name='data', - shape=[3, 32, 32], - dtype='float32', - append_batch_size=False) + t = layers.data( + name='data', + shape=[3, 32, 32], + dtype='float32', + append_batch_size=False, + ) linear = nn.Linear( - 32, 4, bias_attr=fluid.initializer.ConstantInitializer(value=1)) + 32, 4, bias_attr=fluid.initializer.ConstantInitializer(value=1) + ) ret = linear(t) - static_ret = self.get_static_graph_result(feed={'data': inp}, - fetch_list=[ret])[0] + static_ret = self.get_static_graph_result( + feed={'data': inp}, fetch_list=[ret] + )[0] with self.dynamic_graph(): with _test_eager_guard(): t = base.to_variable(inp) linear = nn.Linear( 32, 4, - bias_attr=fluid.initializer.ConstantInitializer(value=1)) + bias_attr=fluid.initializer.ConstantInitializer(value=1), + ) dy_eager_ret = linear(t) dy_eager_ret_value = dy_eager_ret.numpy() t = base.to_variable(inp) linear = nn.Linear( - 32, 4, bias_attr=fluid.initializer.ConstantInitializer(value=1)) + 32, 4, bias_attr=fluid.initializer.ConstantInitializer(value=1) + ) dy_ret = linear(t) dy_ret_value = dy_ret.numpy() @@ -201,7 +204,8 @@ class TestLayer(LayerTest): linear = nn.Linear( 32, 4, - bias_attr=fluid.initializer.ConstantInitializer(value=1)) + bias_attr=fluid.initializer.ConstantInitializer(value=1), + ) linear_ret1 = linear(inp) self.assertRaises(TypeError, test_Variable) @@ -213,7 +217,8 @@ class TestLayer(LayerTest): linear = nn.Linear( 32, 4, - bias_attr=fluid.initializer.ConstantInitializer(value=1)) + bias_attr=fluid.initializer.ConstantInitializer(value=1), + ) linear_ret2 = linear(inp) self.assertRaises(TypeError, test_type) @@ -221,14 +226,17 @@ class TestLayer(LayerTest): def test_Flatten(self): inp = np.ones([3, 4, 4, 5], dtype='float32') with self.static_graph(): - t = layers.data(name='data', - shape=[3, 4, 4, 5], - dtype='float32', - append_batch_size=False) + t = layers.data( + name='data', + shape=[3, 4, 4, 5], + dtype='float32', + append_batch_size=False, + ) flatten = nn.Flatten() ret = flatten(t) - static_ret = self.get_static_graph_result(feed={'data': inp}, - fetch_list=[ret])[0] + static_ret = self.get_static_graph_result( + feed={'data': inp}, fetch_list=[ret] + )[0] with self.dynamic_graph(): with _test_eager_guard(): t = base.to_variable(inp) @@ -252,7 +260,8 @@ class TestLayer(LayerTest): linear = nn.Linear( 32, 4, - bias_attr=fluid.initializer.ConstantInitializer(value=1)) + bias_attr=fluid.initializer.ConstantInitializer(value=1), + ) linear_ret1 = linear(inp) self.assertRaises(TypeError, test_Variable) @@ -264,7 +273,8 @@ class TestLayer(LayerTest): linear = nn.Linear( 32, 4, - bias_attr=fluid.initializer.ConstantInitializer(value=1)) + bias_attr=fluid.initializer.ConstantInitializer(value=1), + ) linear_ret2 = linear(inp) self.assertRaises(TypeError, test_type) @@ -272,41 +282,51 @@ class TestLayer(LayerTest): def test_layer_norm(self): inp = np.ones([3, 32, 32], dtype='float32') with self.static_graph(): - t = layers.data(name='data', - shape=[3, 32, 32], - dtype='float32', - append_batch_size=False) + t = layers.data( + name='data', + shape=[3, 32, 32], + dtype='float32', + append_batch_size=False, + ) ret = layers.layer_norm( t, bias_attr=fluid.initializer.ConstantInitializer(value=1), - act='sigmoid') - static_ret = self.get_static_graph_result(feed={'data': inp}, - fetch_list=[ret])[0] + act='sigmoid', + ) + static_ret = self.get_static_graph_result( + feed={'data': inp}, fetch_list=[ret] + )[0] with self.static_graph(): - t = layers.data(name='data', - shape=[3, 32, 32], - dtype='float32', - append_batch_size=False) + t = layers.data( + name='data', + shape=[3, 32, 32], + dtype='float32', + append_batch_size=False, + ) lm = nn.LayerNorm( normalized_shape=[32, 32], bias_attr=fluid.initializer.ConstantInitializer(value=1), - act='sigmoid') + act='sigmoid', + ) ret = lm(t) - static_ret2 = self.get_static_graph_result(feed={'data': inp}, - fetch_list=[ret])[0] + static_ret2 = self.get_static_graph_result( + feed={'data': inp}, fetch_list=[ret] + )[0] with self.dynamic_graph(): with _test_eager_guard(): lm = nn.LayerNorm( normalized_shape=[32, 32], bias_attr=fluid.initializer.ConstantInitializer(value=1), - act='sigmoid') + act='sigmoid', + ) dy_eager_ret = lm(base.to_variable(inp)) dy_eager_ret_value = dy_eager_ret.numpy() lm = nn.LayerNorm( normalized_shape=[32, 32], bias_attr=fluid.initializer.ConstantInitializer(value=1), - act='sigmoid') + act='sigmoid', + ) dy_ret = lm(base.to_variable(inp)) dy_ret_value = dy_ret.numpy() @@ -318,7 +338,8 @@ class TestLayer(LayerTest): scale=False, param_attr=fluid.initializer.ConstantInitializer(value=1), bias_attr=fluid.initializer.ConstantInitializer(value=1), - act='sigmoid') + act='sigmoid', + ) lm(base.to_variable(inp)) self.assertFalse(hasattr(lm, "_scale_w")) @@ -330,7 +351,8 @@ class TestLayer(LayerTest): scale=False, param_attr=fluid.initializer.ConstantInitializer(value=1), bias_attr=fluid.initializer.ConstantInitializer(value=1), - act='sigmoid') + act='sigmoid', + ) lm(base.to_variable(inp)) self.assertFalse(hasattr(lm, "_scale_w")) @@ -345,14 +367,16 @@ class TestLayer(LayerTest): lm = nn.LayerNorm( normalized_shape=[16, 32], bias_attr=fluid.initializer.ConstantInitializer(value=1), - act='sigmoid') + act='sigmoid', + ) with self.assertRaises(ValueError): lm(base.to_variable(inp)) lm = nn.LayerNorm( normalized_shape=[16, 32], bias_attr=fluid.initializer.ConstantInitializer(value=1), - act='sigmoid') + act='sigmoid', + ) with self.assertRaises(ValueError): lm(base.to_variable(inp)) @@ -364,7 +388,8 @@ class TestLayer(LayerTest): ret = my_sync_bn(t) static_ret = self.get_static_graph_result( feed={'t': np.ones([3, 3, 5, 5], dtype='float32')}, - fetch_list=[ret])[0] + fetch_list=[ret], + )[0] with self.dynamic_graph(): with _test_eager_guard(): @@ -385,8 +410,8 @@ class TestLayer(LayerTest): t = layers.data(name='t', shape=[3, 3], dtype='float32') ret = layers.relu(t) static_ret = self.get_static_graph_result( - feed={'t': np.ones([3, 3], - dtype='float32')}, fetch_list=[ret])[0] + feed={'t': np.ones([3, 3], dtype='float32')}, fetch_list=[ret] + )[0] with self.dynamic_graph(): with _test_eager_guard(): @@ -406,20 +431,21 @@ class TestLayer(LayerTest): t = layers.data(name='t', shape=[3, 3], dtype='float32') t2 = layers.data(name='t2', shape=[3, 3], dtype='float32') ret = layers.matmul(t, t2) - static_ret = self.get_static_graph_result(feed={ - 't': - np.ones([3, 3], dtype='float32'), - 't2': - np.ones([3, 3], dtype='float32') - }, - fetch_list=[ret])[0] + static_ret = self.get_static_graph_result( + feed={ + 't': np.ones([3, 3], dtype='float32'), + 't2': np.ones([3, 3], dtype='float32'), + }, + fetch_list=[ret], + )[0] with self.dynamic_graph(): with _test_eager_guard(): t = np.ones([3, 3], dtype='float32') t2 = np.ones([3, 3], dtype='float32') - dy_eager_ret = layers.matmul(base.to_variable(t), - base.to_variable(t2)) + dy_eager_ret = layers.matmul( + base.to_variable(t), base.to_variable(t2) + ) dy_eager_ret_value = dy_eager_ret.numpy() t = np.ones([3, 3], dtype='float32') @@ -436,49 +462,55 @@ class TestLayer(LayerTest): ret = layers.conv2d(input=images, num_filters=3, filter_size=[2, 2]) static_ret = self.get_static_graph_result( feed={'pixel': np.ones([2, 3, 5, 5], dtype='float32')}, - fetch_list=[ret])[0] + fetch_list=[ret], + )[0] with self.static_graph(): images = layers.data(name='pixel', shape=[3, 5, 5], dtype='float32') - conv2d = nn.Conv2D(num_channels=3, - num_filters=3, - filter_size=[2, 2]) + conv2d = nn.Conv2D( + num_channels=3, num_filters=3, filter_size=[2, 2] + ) ret = conv2d(images) static_ret2 = self.get_static_graph_result( feed={'pixel': np.ones([2, 3, 5, 5], dtype='float32')}, - fetch_list=[ret])[0] + fetch_list=[ret], + )[0] with self.dynamic_graph(): with _test_eager_guard(): images = np.ones([2, 3, 5, 5], dtype='float32') - conv2d = nn.Conv2D(num_channels=3, - num_filters=3, - filter_size=[2, 2]) + conv2d = nn.Conv2D( + num_channels=3, num_filters=3, filter_size=[2, 2] + ) dy_eager_ret = conv2d(base.to_variable(images)) dy_eager_ret_value = dy_eager_ret.numpy() images = np.ones([2, 3, 5, 5], dtype='float32') - conv2d = nn.Conv2D(num_channels=3, - num_filters=3, - filter_size=[2, 2]) + conv2d = nn.Conv2D( + num_channels=3, num_filters=3, filter_size=[2, 2] + ) dy_ret = conv2d(base.to_variable(images)) dy_ret_value = dy_ret.numpy() with self.dynamic_graph(): with _test_eager_guard(): images = np.ones([2, 3, 5, 5], dtype='float32') - conv2d = nn.Conv2D(num_channels=3, - num_filters=3, - filter_size=[2, 2], - bias_attr=False) + conv2d = nn.Conv2D( + num_channels=3, + num_filters=3, + filter_size=[2, 2], + bias_attr=False, + ) dy_ret = conv2d(base.to_variable(images)) self.assertTrue(conv2d.bias is None) images = np.ones([2, 3, 5, 5], dtype='float32') - conv2d = nn.Conv2D(num_channels=3, - num_filters=3, - filter_size=[2, 2], - bias_attr=False) + conv2d = nn.Conv2D( + num_channels=3, + num_filters=3, + filter_size=[2, 2], + bias_attr=False, + ) dy_ret = conv2d(base.to_variable(images)) self.assertTrue(conv2d.bias is None) @@ -486,9 +518,9 @@ class TestLayer(LayerTest): # the input of Conv2D must be Variable. def test_Variable(): images = np.ones([2, 3, 5, 5], dtype='float32') - conv2d = nn.Conv2D(num_channels=3, - num_filters=3, - filter_size=[2, 2]) + conv2d = nn.Conv2D( + num_channels=3, num_filters=3, filter_size=[2, 2] + ) conv2d_ret1 = conv2d(images) self.assertRaises(TypeError, test_Variable) @@ -496,12 +528,12 @@ class TestLayer(LayerTest): # the input dtype of Conv2D must be float16 or float32 or float64 # float16 only can be set on GPU place def test_type(): - images = layers.data(name='pixel', - shape=[3, 5, 5], - dtype='int32') - conv2d = nn.Conv2D(num_channels=3, - num_filters=3, - filter_size=[2, 2]) + images = layers.data( + name='pixel', shape=[3, 5, 5], dtype='int32' + ) + conv2d = nn.Conv2D( + num_channels=3, num_filters=3, filter_size=[2, 2] + ) conv2d_ret2 = conv2d(images) self.assertRaises(TypeError, test_type) @@ -516,26 +548,33 @@ class TestLayer(LayerTest): custom_weight = np.random.randn(3, 3, 2, 2).astype("float32") weight_attr = fluid.ParamAttr( initializer=fluid.initializer.NumpyArrayInitializer( - custom_weight)) - conv2d1 = nn.Conv2D(num_channels=3, - num_filters=3, - filter_size=[2, 2]) - conv2d2 = nn.Conv2D(num_channels=3, - num_filters=3, - filter_size=[2, 2], - param_attr=weight_attr) + custom_weight + ) + ) + conv2d1 = nn.Conv2D( + num_channels=3, num_filters=3, filter_size=[2, 2] + ) + conv2d2 = nn.Conv2D( + num_channels=3, + num_filters=3, + filter_size=[2, 2], + param_attr=weight_attr, + ) dy_ret1 = conv2d1(base.to_variable(images)) dy_ret2 = conv2d2(base.to_variable(images)) self.assertFalse( - np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())) + np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()) + ) conv2d1_weight_np = conv2d1.weight.numpy() conv2d1_bias = conv2d1.bias self.assertFalse( - np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy())) + np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy()) + ) conv2d2.weight.set_value(conv2d1_weight_np) - np.testing.assert_array_equal(conv2d1_weight_np, - conv2d2.weight.numpy()) + np.testing.assert_array_equal( + conv2d1_weight_np, conv2d2.weight.numpy() + ) conv2d2.bias.set_value(conv2d1_bias) dy_ret1 = conv2d1(base.to_variable(images)) dy_ret2 = conv2d2(base.to_variable(images)) @@ -543,22 +582,29 @@ class TestLayer(LayerTest): conv2d2.weight = conv2d1.weight conv2d2.bias = conv2d1.bias - np.testing.assert_array_equal(conv2d1.weight.numpy(), - conv2d2.weight.numpy()) - np.testing.assert_array_equal(conv2d1.bias.numpy(), - conv2d2.bias.numpy()) + np.testing.assert_array_equal( + conv2d1.weight.numpy(), conv2d2.weight.numpy() + ) + np.testing.assert_array_equal( + conv2d1.bias.numpy(), conv2d2.bias.numpy() + ) images = np.ones([2, 3, 5, 5], dtype='float32') custom_weight = np.random.randn(3, 3, 2, 2).astype("float32") - weight_attr = fluid.ParamAttr(initializer=fluid.initializer. - NumpyArrayInitializer(custom_weight)) - conv2d1 = nn.Conv2D(num_channels=3, - num_filters=3, - filter_size=[2, 2]) - conv2d2 = nn.Conv2D(num_channels=3, - num_filters=3, - filter_size=[2, 2], - param_attr=weight_attr) + weight_attr = fluid.ParamAttr( + initializer=fluid.initializer.NumpyArrayInitializer( + custom_weight + ) + ) + conv2d1 = nn.Conv2D( + num_channels=3, num_filters=3, filter_size=[2, 2] + ) + conv2d2 = nn.Conv2D( + num_channels=3, + num_filters=3, + filter_size=[2, 2], + param_attr=weight_attr, + ) dy_ret1 = conv2d1(base.to_variable(images)) dy_ret2 = conv2d2(base.to_variable(images)) self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())) @@ -566,10 +612,12 @@ class TestLayer(LayerTest): conv2d1_weight_np = conv2d1.weight.numpy() conv2d1_bias = conv2d1.bias self.assertFalse( - np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy())) + np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy()) + ) conv2d2.weight.set_value(conv2d1_weight_np) - np.testing.assert_array_equal(conv2d1_weight_np, - conv2d2.weight.numpy()) + np.testing.assert_array_equal( + conv2d1_weight_np, conv2d2.weight.numpy() + ) conv2d2.bias.set_value(conv2d1_bias) dy_ret1 = conv2d1(base.to_variable(images)) dy_ret2 = conv2d2(base.to_variable(images)) @@ -577,10 +625,12 @@ class TestLayer(LayerTest): conv2d2.weight = conv2d1.weight conv2d2.bias = conv2d1.bias - np.testing.assert_array_equal(conv2d1.weight.numpy(), - conv2d2.weight.numpy()) - np.testing.assert_array_equal(conv2d1.bias.numpy(), - conv2d2.bias.numpy()) + np.testing.assert_array_equal( + conv2d1.weight.numpy(), conv2d2.weight.numpy() + ) + np.testing.assert_array_equal( + conv2d1.bias.numpy(), conv2d2.bias.numpy() + ) def test_gru_unit(self): lod = [[2, 4, 3]] @@ -595,114 +645,132 @@ class TestLayer(LayerTest): x = layers.data(name='x', shape=[-1, D * 3], dtype='float32') hidden = layers.data(name='hidden', shape=[-1, D], dtype='float32') updated_hidden, reset_hidden_pre, gate = layers.gru_unit( - input=x, hidden=hidden, size=D * 3) + input=x, hidden=hidden, size=D * 3 + ) static_ret = self.get_static_graph_result( - feed={ - 'x': input, - 'hidden': hidden_input - }, - fetch_list=[updated_hidden, reset_hidden_pre, gate]) + feed={'x': input, 'hidden': hidden_input}, + fetch_list=[updated_hidden, reset_hidden_pre, gate], + ) with self.static_graph(): x = layers.data(name='x', shape=[-1, D * 3], dtype='float32') hidden = layers.data(name='hidden', shape=[-1, D], dtype='float32') updated_hidden, reset_hidden_pre, gate = layers.gru_unit( - input=x, hidden=hidden, size=D * 3) + input=x, hidden=hidden, size=D * 3 + ) gru = nn.GRUUnit(size=D * 3) updated_hidden, reset_hidden_pre, gate = gru(x, hidden) static_ret2 = self.get_static_graph_result( - feed={ - 'x': input, - 'hidden': hidden_input - }, - fetch_list=[updated_hidden, reset_hidden_pre, gate]) + feed={'x': input, 'hidden': hidden_input}, + fetch_list=[updated_hidden, reset_hidden_pre, gate], + ) with self.dynamic_graph(): with _test_eager_guard(): gru = nn.GRUUnit(size=D * 3) - dy_eager_ret = gru(base.to_variable(input), - base.to_variable(hidden_input)) + dy_eager_ret = gru( + base.to_variable(input), base.to_variable(hidden_input) + ) dy_eager_ret_value = [] for i in range(len(static_ret)): dy_eager_ret_value.append(dy_eager_ret[i].numpy()) gru = nn.GRUUnit(size=D * 3) - dy_ret = gru(base.to_variable(input), - base.to_variable(hidden_input)) + dy_ret = gru( + base.to_variable(input), base.to_variable(hidden_input) + ) dy_ret_value = [] for i in range(len(static_ret)): dy_ret_value.append(dy_ret[i].numpy()) for i in range(len(static_ret)): - np.testing.assert_allclose(static_ret[i], - static_ret2[i], - rtol=1e-05) - np.testing.assert_allclose(static_ret[i], - dy_ret_value[i], - rtol=1e-05) - np.testing.assert_allclose(static_ret[i], - dy_eager_ret_value[i], - rtol=1e-05) + np.testing.assert_allclose( + static_ret[i], static_ret2[i], rtol=1e-05 + ) + np.testing.assert_allclose( + static_ret[i], dy_ret_value[i], rtol=1e-05 + ) + np.testing.assert_allclose( + static_ret[i], dy_eager_ret_value[i], rtol=1e-05 + ) with self.dynamic_graph(): with _test_eager_guard(): custom_weight = np.random.randn(D, D * 3).astype("float32") weight_attr = fluid.ParamAttr( initializer=fluid.initializer.NumpyArrayInitializer( - custom_weight)) + custom_weight + ) + ) gru1 = nn.GRUUnit(size=D * 3) gru2 = nn.GRUUnit(size=D * 3, param_attr=weight_attr) - dy_ret1 = gru1(base.to_variable(input), - base.to_variable(hidden_input)) - dy_ret2 = gru2(base.to_variable(input), - base.to_variable(hidden_input)) + dy_ret1 = gru1( + base.to_variable(input), base.to_variable(hidden_input) + ) + dy_ret2 = gru2( + base.to_variable(input), base.to_variable(hidden_input) + ) self.assertFalse( - np.array_equal(gru1.weight.numpy(), gru2.weight.numpy())) + np.array_equal(gru1.weight.numpy(), gru2.weight.numpy()) + ) for o1, o2 in zip(dy_ret1, dy_ret2): self.assertFalse(np.array_equal(o1.numpy(), o2.numpy())) gru2.weight.set_value(gru1.weight.numpy()) gru2.bias.set_value(gru1.bias) - dy_ret1 = gru1(base.to_variable(input), - base.to_variable(hidden_input)) - dy_ret2 = gru2(base.to_variable(input), - base.to_variable(hidden_input)) + dy_ret1 = gru1( + base.to_variable(input), base.to_variable(hidden_input) + ) + dy_ret2 = gru2( + base.to_variable(input), base.to_variable(hidden_input) + ) for o1, o2 in zip(dy_ret1, dy_ret2): np.testing.assert_array_equal(o1.numpy(), o2.numpy()) gru2.weight = gru1.weight gru2.bias = gru1.bias - np.testing.assert_array_equal(gru1.weight.numpy(), - gru2.weight.numpy()) - np.testing.assert_array_equal(gru1.bias.numpy(), - gru2.bias.numpy()) + np.testing.assert_array_equal( + gru1.weight.numpy(), gru2.weight.numpy() + ) + np.testing.assert_array_equal( + gru1.bias.numpy(), gru2.bias.numpy() + ) custom_weight = np.random.randn(D, D * 3).astype("float32") - weight_attr = fluid.ParamAttr(initializer=fluid.initializer. - NumpyArrayInitializer(custom_weight)) + weight_attr = fluid.ParamAttr( + initializer=fluid.initializer.NumpyArrayInitializer( + custom_weight + ) + ) gru1 = nn.GRUUnit(size=D * 3) gru2 = nn.GRUUnit(size=D * 3, param_attr=weight_attr) - dy_ret1 = gru1(base.to_variable(input), - base.to_variable(hidden_input)) - dy_ret2 = gru2(base.to_variable(input), - base.to_variable(hidden_input)) + dy_ret1 = gru1( + base.to_variable(input), base.to_variable(hidden_input) + ) + dy_ret2 = gru2( + base.to_variable(input), base.to_variable(hidden_input) + ) self.assertFalse( - np.array_equal(gru1.weight.numpy(), gru2.weight.numpy())) + np.array_equal(gru1.weight.numpy(), gru2.weight.numpy()) + ) for o1, o2 in zip(dy_ret1, dy_ret2): self.assertFalse(np.array_equal(o1.numpy(), o2.numpy())) gru2.weight.set_value(gru1.weight.numpy()) gru2.bias.set_value(gru1.bias) - dy_ret1 = gru1(base.to_variable(input), - base.to_variable(hidden_input)) - dy_ret2 = gru2(base.to_variable(input), - base.to_variable(hidden_input)) + dy_ret1 = gru1( + base.to_variable(input), base.to_variable(hidden_input) + ) + dy_ret2 = gru2( + base.to_variable(input), base.to_variable(hidden_input) + ) for o1, o2 in zip(dy_ret1, dy_ret2): np.testing.assert_array_equal(o1.numpy(), o2.numpy()) gru2.weight = gru1.weight gru2.bias = gru1.bias - np.testing.assert_array_equal(gru1.weight.numpy(), - gru2.weight.numpy()) + np.testing.assert_array_equal( + gru1.weight.numpy(), gru2.weight.numpy() + ) np.testing.assert_array_equal(gru1.bias.numpy(), gru2.bias.numpy()) def test_elementwise_math(self): @@ -727,15 +795,10 @@ class TestLayer(LayerTest): ret = layers.elementwise_sub(ret, t5) ret = layers.elementwise_mul(ret, t6) - static_ret = self.get_static_graph_result(feed={ - 't': n, - 't2': n2, - 't3': n3, - 't4': n4, - 't5': n5, - 't6': n6 - }, - fetch_list=[ret])[0] + static_ret = self.get_static_graph_result( + feed={'t': n, 't2': n2, 't3': n3, 't4': n4, 't5': n5, 't6': n6}, + fetch_list=[ret], + )[0] with self.dynamic_graph(): with _test_eager_guard(): @@ -762,10 +825,12 @@ class TestLayer(LayerTest): with self.dynamic_graph(): with _test_eager_guard(): - min_eager_ret = layers.elementwise_min(to_variable(n), - to_variable(n2)) - max_eager_ret = layers.elementwise_max(to_variable(n), - to_variable(n2)) + min_eager_ret = layers.elementwise_min( + to_variable(n), to_variable(n2) + ) + max_eager_ret = layers.elementwise_max( + to_variable(n), to_variable(n2) + ) min_eager_ret_value = min_eager_ret.numpy() max_eager_ret_value = max_eager_ret.numpy() @@ -786,39 +851,46 @@ class TestLayer(LayerTest): else: place = core.CPUPlace() with self.static_graph(): - seq = layers.data(name='seq_in', - shape=[3, 4], - dtype='float32', - lod_level=1, - append_batch_size=False) + seq = layers.data( + name='seq_in', + shape=[3, 4], + dtype='float32', + lod_level=1, + append_batch_size=False, + ) out = layers.sequence_conv(seq, 2, act='sigmoid') - static_rlt = self.get_static_graph_result(feed={ - "seq_in": - fluid.create_lod_tensor(data=inp_np, - recursive_seq_lens=[[1, 1, 1]], - place=place) - }, - fetch_list=[out], - with_lod=True)[0] + static_rlt = self.get_static_graph_result( + feed={ + "seq_in": fluid.create_lod_tensor( + data=inp_np, recursive_seq_lens=[[1, 1, 1]], place=place + ) + }, + fetch_list=[out], + with_lod=True, + )[0] with self.static_graph(): - seq = layers.data(name='seq_in', - shape=[3, 4], - dtype='float32', - lod_level=1, - append_batch_size=False) + seq = layers.data( + name='seq_in', + shape=[3, 4], + dtype='float32', + lod_level=1, + append_batch_size=False, + ) seq_conv = nn.SequenceConv('seq_conv', num_filters=2, act='sigmoid') out = seq_conv(seq) - static_rlt2 = self.get_static_graph_result(feed={ - "seq_in": - fluid.create_lod_tensor(data=inp_np, - recursive_seq_lens=[[1, 1, 1]], - place=place) - }, - fetch_list=[out], - with_lod=True)[0] - np.testing.assert_array_equal(np.array(static_rlt), - np.array(static_rlt2)) + static_rlt2 = self.get_static_graph_result( + feed={ + "seq_in": fluid.create_lod_tensor( + data=inp_np, recursive_seq_lens=[[1, 1, 1]], place=place + ) + }, + fetch_list=[out], + with_lod=True, + )[0] + np.testing.assert_array_equal( + np.array(static_rlt), np.array(static_rlt2) + ) def test_conv2d_transpose(self): inp_np = np.arange(0, 24).reshape([2, 3, 2, 2]).astype('float32') @@ -829,9 +901,11 @@ class TestLayer(LayerTest): num_filters=10, filter_size=27, act='sigmoid', - bias_attr=fluid.initializer.ConstantInitializer(value=1)) - static_rlt = self.get_static_graph_result(feed={'pixel': inp_np}, - fetch_list=[out])[0] + bias_attr=fluid.initializer.ConstantInitializer(value=1), + ) + static_rlt = self.get_static_graph_result( + feed={'pixel': inp_np}, fetch_list=[out] + )[0] with self.static_graph(): img = layers.data(name='pixel', shape=[3, 2, 2], dtype='float32') conv2d_transpose = nn.Conv2DTranspose( @@ -839,10 +913,12 @@ class TestLayer(LayerTest): num_filters=10, filter_size=27, act='sigmoid', - bias_attr=fluid.initializer.ConstantInitializer(value=1)) + bias_attr=fluid.initializer.ConstantInitializer(value=1), + ) out = conv2d_transpose(img) - static_rlt2 = self.get_static_graph_result(feed={'pixel': inp_np}, - fetch_list=[out])[0] + static_rlt2 = self.get_static_graph_result( + feed={'pixel': inp_np}, fetch_list=[out] + )[0] with self.dynamic_graph(): with _test_eager_guard(): conv2d_transpose = nn.Conv2DTranspose( @@ -850,7 +926,8 @@ class TestLayer(LayerTest): num_filters=10, filter_size=27, act='sigmoid', - bias_attr=fluid.initializer.ConstantInitializer(value=1)) + bias_attr=fluid.initializer.ConstantInitializer(value=1), + ) dy_eager_rlt = conv2d_transpose(base.to_variable(inp_np)) dy_eager_rlt_value = dy_eager_rlt.numpy() @@ -859,7 +936,8 @@ class TestLayer(LayerTest): num_filters=10, filter_size=27, act='sigmoid', - bias_attr=fluid.initializer.ConstantInitializer(value=1)) + bias_attr=fluid.initializer.ConstantInitializer(value=1), + ) dy_rlt = conv2d_transpose(base.to_variable(inp_np)) dy_rlt_value = dy_rlt.numpy() np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05) @@ -872,26 +950,33 @@ class TestLayer(LayerTest): custom_weight = np.random.randn(3, 3, 2, 2).astype("float32") weight_attr = fluid.ParamAttr( initializer=fluid.initializer.NumpyArrayInitializer( - custom_weight)) - conv2d1 = nn.Conv2DTranspose(num_channels=3, - num_filters=3, - filter_size=[2, 2]) - conv2d2 = nn.Conv2DTranspose(num_channels=3, - num_filters=3, - filter_size=[2, 2], - param_attr=weight_attr) + custom_weight + ) + ) + conv2d1 = nn.Conv2DTranspose( + num_channels=3, num_filters=3, filter_size=[2, 2] + ) + conv2d2 = nn.Conv2DTranspose( + num_channels=3, + num_filters=3, + filter_size=[2, 2], + param_attr=weight_attr, + ) dy_ret1 = conv2d1(base.to_variable(images)) dy_ret2 = conv2d2(base.to_variable(images)) self.assertFalse( - np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())) + np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()) + ) conv2d1_weight_np = conv2d1.weight.numpy() conv2d1_bias = conv2d1.bias self.assertFalse( - np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy())) + np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy()) + ) conv2d2.weight.set_value(conv2d1_weight_np) - np.testing.assert_array_equal(conv2d1_weight_np, - conv2d2.weight.numpy()) + np.testing.assert_array_equal( + conv2d1_weight_np, conv2d2.weight.numpy() + ) conv2d2.bias.set_value(conv2d1_bias) dy_ret1 = conv2d1(base.to_variable(images)) dy_ret2 = conv2d2(base.to_variable(images)) @@ -899,22 +984,29 @@ class TestLayer(LayerTest): conv2d2.weight = conv2d1.weight conv2d2.bias = conv2d1.bias - np.testing.assert_array_equal(conv2d1.weight.numpy(), - conv2d2.weight.numpy()) - np.testing.assert_array_equal(conv2d1.bias.numpy(), - conv2d2.bias.numpy()) + np.testing.assert_array_equal( + conv2d1.weight.numpy(), conv2d2.weight.numpy() + ) + np.testing.assert_array_equal( + conv2d1.bias.numpy(), conv2d2.bias.numpy() + ) images = np.ones([2, 3, 5, 5], dtype='float32') custom_weight = np.random.randn(3, 3, 2, 2).astype("float32") - weight_attr = fluid.ParamAttr(initializer=fluid.initializer. - NumpyArrayInitializer(custom_weight)) - conv2d1 = nn.Conv2DTranspose(num_channels=3, - num_filters=3, - filter_size=[2, 2]) - conv2d2 = nn.Conv2DTranspose(num_channels=3, - num_filters=3, - filter_size=[2, 2], - param_attr=weight_attr) + weight_attr = fluid.ParamAttr( + initializer=fluid.initializer.NumpyArrayInitializer( + custom_weight + ) + ) + conv2d1 = nn.Conv2DTranspose( + num_channels=3, num_filters=3, filter_size=[2, 2] + ) + conv2d2 = nn.Conv2DTranspose( + num_channels=3, + num_filters=3, + filter_size=[2, 2], + param_attr=weight_attr, + ) dy_ret1 = conv2d1(base.to_variable(images)) dy_ret2 = conv2d2(base.to_variable(images)) self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())) @@ -922,10 +1014,12 @@ class TestLayer(LayerTest): conv2d1_weight_np = conv2d1.weight.numpy() conv2d1_bias = conv2d1.bias self.assertFalse( - np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy())) + np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy()) + ) conv2d2.weight.set_value(conv2d1_weight_np) - np.testing.assert_array_equal(conv2d1_weight_np, - conv2d2.weight.numpy()) + np.testing.assert_array_equal( + conv2d1_weight_np, conv2d2.weight.numpy() + ) conv2d2.bias.set_value(conv2d1_bias) dy_ret1 = conv2d1(base.to_variable(images)) dy_ret2 = conv2d2(base.to_variable(images)) @@ -933,19 +1027,21 @@ class TestLayer(LayerTest): conv2d2.weight = conv2d1.weight conv2d2.bias = conv2d1.bias - np.testing.assert_array_equal(conv2d1.weight.numpy(), - conv2d2.weight.numpy()) - np.testing.assert_array_equal(conv2d1.bias.numpy(), - conv2d2.bias.numpy()) + np.testing.assert_array_equal( + conv2d1.weight.numpy(), conv2d2.weight.numpy() + ) + np.testing.assert_array_equal( + conv2d1.bias.numpy(), conv2d2.bias.numpy() + ) with self.static_graph(): # the input of Conv2DTranspose must be Variable. def test_Variable(): images = np.ones([2, 3, 5, 5], dtype='float32') - conv2d = nn.Conv2DTranspose(num_channels=3, - num_filters=3, - filter_size=[2, 2]) + conv2d = nn.Conv2DTranspose( + num_channels=3, num_filters=3, filter_size=[2, 2] + ) conv2d_ret1 = conv2d(images) self.assertRaises(TypeError, test_Variable) @@ -953,12 +1049,12 @@ class TestLayer(LayerTest): # the input dtype of Conv2DTranspose must be float16 or float32 or float64 # float16 only can be set on GPU place def test_type(): - images = layers.data(name='pixel', - shape=[3, 5, 5], - dtype='int32') - conv2d = nn.Conv2DTranspose(num_channels=3, - num_filters=3, - filter_size=[2, 2]) + images = layers.data( + name='pixel', shape=[3, 5, 5], dtype='int32' + ) + conv2d = nn.Conv2DTranspose( + num_channels=3, num_filters=3, filter_size=[2, 2] + ) conv2d_ret2 = conv2d(images) self.assertRaises(TypeError, test_type) @@ -968,48 +1064,42 @@ class TestLayer(LayerTest): inp_np_y = np.array([[4, 5, 6]]).astype('float32') with self.static_graph(): - data_x = layers.data(name='x', - shape=[1, 3], - dtype="float32", - append_batch_size=False) - data_y = layers.data(name='y', - shape=[1, 3], - dtype="float32", - append_batch_size=False) + data_x = layers.data( + name='x', shape=[1, 3], dtype="float32", append_batch_size=False + ) + data_y = layers.data( + name='y', shape=[1, 3], dtype="float32", append_batch_size=False + ) out = layers.bilinear_tensor_product( data_x, data_y, 6, bias_attr=fluid.initializer.ConstantInitializer(value=1), - act='sigmoid') + act='sigmoid', + ) - static_rlt = self.get_static_graph_result(feed={ - 'x': inp_np_x, - 'y': inp_np_y - }, - fetch_list=[out])[0] + static_rlt = self.get_static_graph_result( + feed={'x': inp_np_x, 'y': inp_np_y}, fetch_list=[out] + )[0] with self.static_graph(): - data_x = layers.data(name='x', - shape=[1, 3], - dtype="float32", - append_batch_size=False) - data_y = layers.data(name='y', - shape=[1, 3], - dtype="float32", - append_batch_size=False) + data_x = layers.data( + name='x', shape=[1, 3], dtype="float32", append_batch_size=False + ) + data_y = layers.data( + name='y', shape=[1, 3], dtype="float32", append_batch_size=False + ) btp = nn.BilinearTensorProduct( 3, 3, 6, bias_attr=fluid.initializer.ConstantInitializer(value=1), - act='sigmoid') + act='sigmoid', + ) out = btp(data_x, data_y) - static_rlt2 = self.get_static_graph_result(feed={ - 'x': inp_np_x, - 'y': inp_np_y - }, - fetch_list=[out])[0] + static_rlt2 = self.get_static_graph_result( + feed={'x': inp_np_x, 'y': inp_np_y}, fetch_list=[out] + )[0] with self.dynamic_graph(): with _test_eager_guard(): btp = nn.BilinearTensorProduct( @@ -1017,9 +1107,11 @@ class TestLayer(LayerTest): 3, 6, bias_attr=fluid.initializer.ConstantInitializer(value=1), - act='sigmoid') - dy_eager_rlt = btp(base.to_variable(inp_np_x), - base.to_variable(inp_np_y)) + act='sigmoid', + ) + dy_eager_rlt = btp( + base.to_variable(inp_np_x), base.to_variable(inp_np_y) + ) dy_eager_rlt_value = dy_eager_rlt.numpy() btp = nn.BilinearTensorProduct( @@ -1027,41 +1119,39 @@ class TestLayer(LayerTest): 3, 6, bias_attr=fluid.initializer.ConstantInitializer(value=1), - act='sigmoid') + act='sigmoid', + ) dy_rlt = btp(base.to_variable(inp_np_x), base.to_variable(inp_np_y)) dy_rlt_value = dy_rlt.numpy() with self.dynamic_graph(): with _test_eager_guard(): btp2 = nn.BilinearTensorProduct(3, 3, 6, act='sigmoid') - dy_eager_rlt2 = btp2(base.to_variable(inp_np_x), - base.to_variable(inp_np_y)) + dy_eager_rlt2 = btp2( + base.to_variable(inp_np_x), base.to_variable(inp_np_y) + ) dy_eager_rlt2_value = dy_eager_rlt2.numpy() btp2 = nn.BilinearTensorProduct(3, 3, 6, act='sigmoid') - dy_rlt2 = btp2(base.to_variable(inp_np_x), - base.to_variable(inp_np_y)) + dy_rlt2 = btp2( + base.to_variable(inp_np_x), base.to_variable(inp_np_y) + ) dy_rlt2_value = dy_rlt2.numpy() with self.static_graph(): - data_x2 = layers.data(name='x', - shape=[1, 3], - dtype="float32", - append_batch_size=False) - data_y2 = layers.data(name='y', - shape=[1, 3], - dtype="float32", - append_batch_size=False) - out2 = layers.bilinear_tensor_product(data_x2, - data_y2, - 6, - act='sigmoid') - - static_rlt3 = self.get_static_graph_result(feed={ - 'x': inp_np_x, - 'y': inp_np_y - }, - fetch_list=[out2])[0] + data_x2 = layers.data( + name='x', shape=[1, 3], dtype="float32", append_batch_size=False + ) + data_y2 = layers.data( + name='y', shape=[1, 3], dtype="float32", append_batch_size=False + ) + out2 = layers.bilinear_tensor_product( + data_x2, data_y2, 6, act='sigmoid' + ) + + static_rlt3 = self.get_static_graph_result( + feed={'x': inp_np_x, 'y': inp_np_y}, fetch_list=[out2] + )[0] np.testing.assert_array_equal(dy_rlt2_value, static_rlt3) np.testing.assert_array_equal(dy_eager_rlt2_value, static_rlt3) @@ -1074,87 +1164,108 @@ class TestLayer(LayerTest): custom_weight = np.random.randn(6, 3, 3).astype("float32") weight_attr = fluid.ParamAttr( initializer=fluid.initializer.NumpyArrayInitializer( - custom_weight)) + custom_weight + ) + ) btp1 = nn.BilinearTensorProduct(3, 3, 6, act='sigmoid') - btp2 = nn.BilinearTensorProduct(3, - 3, - 6, - act='sigmoid', - param_attr=weight_attr) - dy_rlt1 = btp1(base.to_variable(inp_np_x), - base.to_variable(inp_np_y)) - dy_rlt2 = btp2(base.to_variable(inp_np_x), - base.to_variable(inp_np_y)) + btp2 = nn.BilinearTensorProduct( + 3, 3, 6, act='sigmoid', param_attr=weight_attr + ) + dy_rlt1 = btp1( + base.to_variable(inp_np_x), base.to_variable(inp_np_y) + ) + dy_rlt2 = btp2( + base.to_variable(inp_np_x), base.to_variable(inp_np_y) + ) self.assertFalse( - np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())) + np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy()) + ) btp2.weight.set_value(btp1.weight.numpy()) btp2.bias.set_value(btp1.bias) - dy_rlt1 = btp1(base.to_variable(inp_np_x), - base.to_variable(inp_np_y)) - dy_rlt2 = btp2(base.to_variable(inp_np_x), - base.to_variable(inp_np_y)) + dy_rlt1 = btp1( + base.to_variable(inp_np_x), base.to_variable(inp_np_y) + ) + dy_rlt2 = btp2( + base.to_variable(inp_np_x), base.to_variable(inp_np_y) + ) np.testing.assert_array_equal(dy_rlt1.numpy(), dy_rlt2.numpy()) btp2.weight = btp1.weight btp2.bias = btp1.bias - np.testing.assert_array_equal(btp1.weight.numpy(), - btp2.weight.numpy()) - np.testing.assert_array_equal(btp1.bias.numpy(), - btp2.bias.numpy()) + np.testing.assert_array_equal( + btp1.weight.numpy(), btp2.weight.numpy() + ) + np.testing.assert_array_equal( + btp1.bias.numpy(), btp2.bias.numpy() + ) custom_weight = np.random.randn(6, 3, 3).astype("float32") - weight_attr = fluid.ParamAttr(initializer=fluid.initializer. - NumpyArrayInitializer(custom_weight)) + weight_attr = fluid.ParamAttr( + initializer=fluid.initializer.NumpyArrayInitializer( + custom_weight + ) + ) btp1 = nn.BilinearTensorProduct(3, 3, 6, act='sigmoid') - btp2 = nn.BilinearTensorProduct(3, - 3, - 6, - act='sigmoid', - param_attr=weight_attr) - dy_rlt1 = btp1(base.to_variable(inp_np_x), - base.to_variable(inp_np_y)) - dy_rlt2 = btp2(base.to_variable(inp_np_x), - base.to_variable(inp_np_y)) + btp2 = nn.BilinearTensorProduct( + 3, 3, 6, act='sigmoid', param_attr=weight_attr + ) + dy_rlt1 = btp1( + base.to_variable(inp_np_x), base.to_variable(inp_np_y) + ) + dy_rlt2 = btp2( + base.to_variable(inp_np_x), base.to_variable(inp_np_y) + ) self.assertFalse(np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())) btp2.weight.set_value(btp1.weight.numpy()) btp2.bias.set_value(btp1.bias) - dy_rlt1 = btp1(base.to_variable(inp_np_x), - base.to_variable(inp_np_y)) - dy_rlt2 = btp2(base.to_variable(inp_np_x), - base.to_variable(inp_np_y)) + dy_rlt1 = btp1( + base.to_variable(inp_np_x), base.to_variable(inp_np_y) + ) + dy_rlt2 = btp2( + base.to_variable(inp_np_x), base.to_variable(inp_np_y) + ) np.testing.assert_array_equal(dy_rlt1.numpy(), dy_rlt2.numpy()) btp2.weight = btp1.weight btp2.bias = btp1.bias - np.testing.assert_array_equal(btp1.weight.numpy(), - btp2.weight.numpy()) + np.testing.assert_array_equal( + btp1.weight.numpy(), btp2.weight.numpy() + ) np.testing.assert_array_equal(btp1.bias.numpy(), btp2.bias.numpy()) def prelu_test(self, mode): inp_np = np.ones([5, 200, 100, 100]).astype('float32') with self.static_graph(): - data_t = layers.data(name="input", - shape=[5, 200, 100, 100], - dtype="float32", - append_batch_size=False) - out = layers.prelu(data_t, - mode, - param_attr=ParamAttr(initializer=Constant(1.0))) - static_rlt = self.get_static_graph_result(feed={"input": inp_np}, - fetch_list=[out])[0] + data_t = layers.data( + name="input", + shape=[5, 200, 100, 100], + dtype="float32", + append_batch_size=False, + ) + out = layers.prelu( + data_t, mode, param_attr=ParamAttr(initializer=Constant(1.0)) + ) + static_rlt = self.get_static_graph_result( + feed={"input": inp_np}, fetch_list=[out] + )[0] with self.static_graph(): - data_t = layers.data(name="input", - shape=[5, 200, 100, 100], - dtype="float32", - append_batch_size=False) - prelu = nn.PRelu(mode=mode, - channel=inp_np.shape[1], - input_shape=data_t.shape, - param_attr=ParamAttr(initializer=Constant(1.0))) + data_t = layers.data( + name="input", + shape=[5, 200, 100, 100], + dtype="float32", + append_batch_size=False, + ) + prelu = nn.PRelu( + mode=mode, + channel=inp_np.shape[1], + input_shape=data_t.shape, + param_attr=ParamAttr(initializer=Constant(1.0)), + ) out = prelu(data_t) - static_rlt2 = self.get_static_graph_result(feed={"input": inp_np}, - fetch_list=[out])[0] + static_rlt2 = self.get_static_graph_result( + feed={"input": inp_np}, fetch_list=[out] + )[0] with self.dynamic_graph(): with _test_eager_guard(): @@ -1162,14 +1273,17 @@ class TestLayer(LayerTest): mode=mode, channel=inp_np.shape[1], input_shape=inp_np.shape, - param_attr=ParamAttr(initializer=Constant(1.0))) + param_attr=ParamAttr(initializer=Constant(1.0)), + ) dy_eager_rlt = prelu(base.to_variable(inp_np)) dy_eager_rlt_value = dy_eager_rlt.numpy() - prelu = nn.PRelu(mode=mode, - channel=inp_np.shape[1], - input_shape=inp_np.shape, - param_attr=ParamAttr(initializer=Constant(1.0))) + prelu = nn.PRelu( + mode=mode, + channel=inp_np.shape[1], + input_shape=inp_np.shape, + param_attr=ParamAttr(initializer=Constant(1.0)), + ) dy_rlt = prelu(base.to_variable(inp_np)) dy_rlt_value = dy_rlt.numpy() @@ -1185,42 +1299,51 @@ class TestLayer(LayerTest): mode=mode, channel=inp_np.shape[1], input_shape=inp_np.shape, - param_attr=ParamAttr(initializer=Constant(2.0))) + param_attr=ParamAttr(initializer=Constant(2.0)), + ) prelu2 = nn.PRelu( mode=mode, channel=inp_np.shape[1], input_shape=inp_np.shape, - param_attr=ParamAttr(initializer=Constant(1.0))) + param_attr=ParamAttr(initializer=Constant(1.0)), + ) dy_rlt1 = prelu1(inp) dy_rlt2 = prelu2(inp) self.assertFalse( - np.array_equal(prelu1.weight.numpy(), - prelu2.weight.numpy())) + np.array_equal(prelu1.weight.numpy(), prelu2.weight.numpy()) + ) self.assertFalse( - np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())) + np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy()) + ) prelu2.weight.set_value(prelu1.weight.numpy()) dy_rlt1 = prelu1(inp) dy_rlt2 = prelu2(inp) np.testing.assert_array_equal(dy_rlt1.numpy(), dy_rlt2.numpy()) prelu2.weight = prelu1.weight - np.testing.assert_array_equal(prelu1.weight.numpy(), - prelu2.weight.numpy()) + np.testing.assert_array_equal( + prelu1.weight.numpy(), prelu2.weight.numpy() + ) inp_np = np.random.randn(5, 200, 100, 100).astype("float32") inp = base.to_variable(inp_np) - prelu1 = nn.PRelu(mode=mode, - channel=inp_np.shape[1], - input_shape=inp_np.shape, - param_attr=ParamAttr(initializer=Constant(2.0))) - prelu2 = nn.PRelu(mode=mode, - channel=inp_np.shape[1], - input_shape=inp_np.shape, - param_attr=ParamAttr(initializer=Constant(1.0))) + prelu1 = nn.PRelu( + mode=mode, + channel=inp_np.shape[1], + input_shape=inp_np.shape, + param_attr=ParamAttr(initializer=Constant(2.0)), + ) + prelu2 = nn.PRelu( + mode=mode, + channel=inp_np.shape[1], + input_shape=inp_np.shape, + param_attr=ParamAttr(initializer=Constant(1.0)), + ) dy_rlt1 = prelu1(inp) dy_rlt2 = prelu2(inp) self.assertFalse( - np.array_equal(prelu1.weight.numpy(), prelu2.weight.numpy())) + np.array_equal(prelu1.weight.numpy(), prelu2.weight.numpy()) + ) self.assertFalse(np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())) prelu2.weight.set_value(prelu1.weight.numpy()) dy_rlt1 = prelu1(inp) @@ -1228,8 +1351,9 @@ class TestLayer(LayerTest): np.testing.assert_array_equal(dy_rlt1.numpy(), dy_rlt2.numpy()) prelu2.weight = prelu1.weight - np.testing.assert_array_equal(prelu1.weight.numpy(), - prelu2.weight.numpy()) + np.testing.assert_array_equal( + prelu1.weight.numpy(), prelu2.weight.numpy() + ) def test_prelu(self): self.prelu_test("channel") @@ -1241,31 +1365,37 @@ class TestLayer(LayerTest): dict_size = 20 with self.static_graph(): data_t = layers.data(name='word', shape=[1], dtype='int64') - emb = layers.embedding(input=data_t, - size=[dict_size, 32], - param_attr='emb.w', - is_sparse=False) - static_rlt = self.get_static_graph_result(feed={'word': inp_word}, - fetch_list=[emb])[0] + emb = layers.embedding( + input=data_t, + size=[dict_size, 32], + param_attr='emb.w', + is_sparse=False, + ) + static_rlt = self.get_static_graph_result( + feed={'word': inp_word}, fetch_list=[emb] + )[0] with self.static_graph(): data_t = layers.data(name='word', shape=[1], dtype='int64') - emb2 = nn.Embedding(size=[dict_size, 32], - param_attr='emb.w', - is_sparse=False) + emb2 = nn.Embedding( + size=[dict_size, 32], param_attr='emb.w', is_sparse=False + ) emb_rlt = emb2(data_t) - static_rlt2 = self.get_static_graph_result(feed={'word': inp_word}, - fetch_list=[emb_rlt])[0] + static_rlt2 = self.get_static_graph_result( + feed={'word': inp_word}, fetch_list=[emb_rlt] + )[0] with self.dynamic_graph(): with _test_eager_guard(): - emb2 = nn.Embedding(size=[dict_size, 32], - param_attr='eager_emb.w', - is_sparse=False) + emb2 = nn.Embedding( + size=[dict_size, 32], + param_attr='eager_emb.w', + is_sparse=False, + ) dy_eager_rlt = emb2(base.to_variable(inp_word)) dy_eager_rlt_value = dy_eager_rlt.numpy() - emb2 = nn.Embedding(size=[dict_size, 32], - param_attr='emb.w', - is_sparse=False) + emb2 = nn.Embedding( + size=[dict_size, 32], param_attr='emb.w', is_sparse=False + ) dy_rlt = emb2(base.to_variable(inp_word)) dy_rlt_value = dy_rlt.numpy() @@ -1278,33 +1408,43 @@ class TestLayer(LayerTest): custom_weight = np.random.randn(dict_size, 32).astype("float32") weight_attr = fluid.ParamAttr( initializer=fluid.initializer.NumpyArrayInitializer( - custom_weight)) + custom_weight + ) + ) emb1 = nn.Embedding(size=[dict_size, 32], is_sparse=False) - emb2 = nn.Embedding(size=[dict_size, 32], - param_attr=weight_attr, - is_sparse=False) + emb2 = nn.Embedding( + size=[dict_size, 32], + param_attr=weight_attr, + is_sparse=False, + ) rep1 = emb1(base.to_variable(inp_word)) rep2 = emb2(base.to_variable(inp_word)) self.assertFalse( - np.array_equal(emb1.weight.numpy(), custom_weight)) - np.testing.assert_array_equal(emb2.weight.numpy(), - custom_weight) + np.array_equal(emb1.weight.numpy(), custom_weight) + ) + np.testing.assert_array_equal( + emb2.weight.numpy(), custom_weight + ) self.assertFalse(np.array_equal(rep1.numpy(), rep2.numpy())) emb2.weight.set_value(emb1.weight.numpy()) rep2 = emb2(base.to_variable(inp_word)) np.testing.assert_array_equal(rep1.numpy(), rep2.numpy()) emb2.weight = emb1.weight - np.testing.assert_array_equal(emb1.weight.numpy(), - emb2.weight.numpy()) + np.testing.assert_array_equal( + emb1.weight.numpy(), emb2.weight.numpy() + ) custom_weight = np.random.randn(dict_size, 32).astype("float32") - weight_attr = fluid.ParamAttr(initializer=fluid.initializer. - NumpyArrayInitializer(custom_weight)) + weight_attr = fluid.ParamAttr( + initializer=fluid.initializer.NumpyArrayInitializer( + custom_weight + ) + ) emb1 = nn.Embedding(size=[dict_size, 32], is_sparse=False) - emb2 = nn.Embedding(size=[dict_size, 32], - param_attr=weight_attr, - is_sparse=False) + emb2 = nn.Embedding( + size=[dict_size, 32], param_attr=weight_attr, is_sparse=False + ) rep1 = emb1(base.to_variable(inp_word)) rep2 = emb2(base.to_variable(inp_word)) self.assertFalse(np.array_equal(emb1.weight.numpy(), custom_weight)) @@ -1315,8 +1455,9 @@ class TestLayer(LayerTest): np.testing.assert_array_equal(rep1.numpy(), rep2.numpy()) emb2.weight = emb1.weight - np.testing.assert_array_equal(emb1.weight.numpy(), - emb2.weight.numpy()) + np.testing.assert_array_equal( + emb1.weight.numpy(), emb2.weight.numpy() + ) def test_nce(self): window_size = 5 @@ -1329,54 +1470,61 @@ class TestLayer(LayerTest): words = [] for i in range(window_size): words.append( - layers.data(name='word_{0}'.format(i), - shape=[None], - dtype='int64')) - sample_weights = layers.fill_constant(shape=[5, 1], - dtype='float32', - value=1) + layers.data( + name='word_{0}'.format(i), shape=[None], dtype='int64' + ) + ) + sample_weights = layers.fill_constant( + shape=[5, 1], dtype='float32', value=1 + ) embs = [] for i in range(window_size): if i == label_word: continue - emb = fluid.embedding(input=words[i], - size=[dict_size, 32], - param_attr='emb.w', - is_sparse=False) + emb = fluid.embedding( + input=words[i], + size=[dict_size, 32], + param_attr='emb.w', + is_sparse=False, + ) embs.append(emb) embs = layers.concat(input=embs, axis=1) wl = fluid.layers.unsqueeze(words[label_word], axes=[0]) - nce_loss = layers.nce(input=embs, - label=wl, - num_total_classes=dict_size, - num_neg_samples=2, - sampler="custom_dist", - custom_dist=nid_freq_arr.tolist(), - seed=seed, - param_attr='nce.w', - bias_attr='nce.b', - sample_weight=sample_weights) + nce_loss = layers.nce( + input=embs, + label=wl, + num_total_classes=dict_size, + num_neg_samples=2, + sampler="custom_dist", + custom_dist=nid_freq_arr.tolist(), + seed=seed, + param_attr='nce.w', + bias_attr='nce.b', + sample_weight=sample_weights, + ) feed_dict = dict() for i in range(window_size): feed_dict['word_{0}'.format(i)] = inp_word[i] - static_rlt = self.get_static_graph_result(feed=feed_dict, - fetch_list=[nce_loss])[0] + static_rlt = self.get_static_graph_result( + feed=feed_dict, fetch_list=[nce_loss] + )[0] with self.static_graph(): words = [] for i in range(window_size): words.append( - layers.data(name='word_{0}'.format(i), - shape=[None], - dtype='int64')) - sample_weights = layers.fill_constant(shape=[5, 1], - dtype='float32', - value=1) - emb = nn.Embedding(size=[dict_size, 32], - param_attr='emb.w', - is_sparse=False) + layers.data( + name='word_{0}'.format(i), shape=[None], dtype='int64' + ) + ) + sample_weights = layers.fill_constant( + shape=[5, 1], dtype='float32', value=1 + ) + emb = nn.Embedding( + size=[dict_size, 32], param_attr='emb.w', is_sparse=False + ) embs2 = [] for i in range(window_size): @@ -1387,15 +1535,17 @@ class TestLayer(LayerTest): embs2.append(emb_rlt) embs2 = layers.concat(input=embs2, axis=1) - nce = nn.NCE(num_total_classes=dict_size, - dim=embs2.shape[1], - num_neg_samples=2, - sampler="custom_dist", - custom_dist=nid_freq_arr.tolist(), - seed=seed, - param_attr='nce.w', - bias_attr='nce.b', - sample_weight=sample_weights) + nce = nn.NCE( + num_total_classes=dict_size, + dim=embs2.shape[1], + num_neg_samples=2, + sampler="custom_dist", + custom_dist=nid_freq_arr.tolist(), + seed=seed, + param_attr='nce.w', + bias_attr='nce.b', + sample_weight=sample_weights, + ) wl = fluid.layers.unsqueeze(words[label_word], axes=[0]) nce_loss2 = nce(embs2, wl) @@ -1403,21 +1553,23 @@ class TestLayer(LayerTest): for i in range(len(words)): feed_dict['word_{0}'.format(i)] = inp_word[i] - static_rlt2 = self.get_static_graph_result(feed=feed_dict, - fetch_list=[nce_loss2 - ])[0] + static_rlt2 = self.get_static_graph_result( + feed=feed_dict, fetch_list=[nce_loss2] + )[0] with self.dynamic_graph(): with _test_eager_guard(): words = [] for i in range(window_size): words.append(base.to_variable(inp_word[i])) - sample_weights = layers.fill_constant(shape=[5, 1], - dtype='float32', - value=1) - emb = nn.Embedding(size=[dict_size, 32], - param_attr='eager_emb.w', - is_sparse=False) + sample_weights = layers.fill_constant( + shape=[5, 1], dtype='float32', value=1 + ) + emb = nn.Embedding( + size=[dict_size, 32], + param_attr='eager_emb.w', + is_sparse=False, + ) embs3 = [] for i in range(window_size): @@ -1427,18 +1579,20 @@ class TestLayer(LayerTest): emb_rlt = emb(words[i]) embs3.append(emb_rlt) - embs3 = layers.concat(input=embs3, - axis=fluid.dygraph.to_variable( - np.array([1]))) - nce = nn.NCE(num_total_classes=dict_size, - dim=embs3.shape[1], - num_neg_samples=2, - sampler="custom_dist", - custom_dist=nid_freq_arr.tolist(), - seed=seed, - param_attr='eager_nce.w', - bias_attr='eager_nce.b', - sample_weight=sample_weights) + embs3 = layers.concat( + input=embs3, axis=fluid.dygraph.to_variable(np.array([1])) + ) + nce = nn.NCE( + num_total_classes=dict_size, + dim=embs3.shape[1], + num_neg_samples=2, + sampler="custom_dist", + custom_dist=nid_freq_arr.tolist(), + seed=seed, + param_attr='eager_nce.w', + bias_attr='eager_nce.b', + sample_weight=sample_weights, + ) wl = fluid.layers.unsqueeze(words[label_word], axes=[0]) dy_eager_rlt = nce(embs3, wl) @@ -1447,12 +1601,12 @@ class TestLayer(LayerTest): words = [] for i in range(window_size): words.append(base.to_variable(inp_word[i])) - sample_weights = layers.fill_constant(shape=[5, 1], - dtype='float32', - value=1) - emb = nn.Embedding(size=[dict_size, 32], - param_attr='emb.w', - is_sparse=False) + sample_weights = layers.fill_constant( + shape=[5, 1], dtype='float32', value=1 + ) + emb = nn.Embedding( + size=[dict_size, 32], param_attr='emb.w', is_sparse=False + ) embs3 = [] for i in range(window_size): @@ -1462,17 +1616,20 @@ class TestLayer(LayerTest): emb_rlt = emb(words[i]) embs3.append(emb_rlt) - embs3 = layers.concat(input=embs3, - axis=fluid.dygraph.to_variable(np.array([1]))) - nce = nn.NCE(num_total_classes=dict_size, - dim=embs3.shape[1], - num_neg_samples=2, - sampler="custom_dist", - custom_dist=nid_freq_arr.tolist(), - seed=seed, - param_attr='nce.w', - bias_attr='nce.b', - sample_weight=sample_weights) + embs3 = layers.concat( + input=embs3, axis=fluid.dygraph.to_variable(np.array([1])) + ) + nce = nn.NCE( + num_total_classes=dict_size, + dim=embs3.shape[1], + num_neg_samples=2, + sampler="custom_dist", + custom_dist=nid_freq_arr.tolist(), + seed=seed, + param_attr='nce.w', + bias_attr='nce.b', + sample_weight=sample_weights, + ) wl = fluid.layers.unsqueeze(words[label_word], axes=[0]) dy_rlt = nce(embs3, wl) @@ -1484,21 +1641,27 @@ class TestLayer(LayerTest): with self.dynamic_graph(): with _test_eager_guard(): - custom_weight = np.random.randn(dict_size, - 128).astype("float32") + custom_weight = np.random.randn(dict_size, 128).astype( + "float32" + ) weight_attr = fluid.ParamAttr( initializer=fluid.initializer.NumpyArrayInitializer( - custom_weight)) + custom_weight + ) + ) words = [] for i in range(window_size): words.append(base.to_variable(inp_word[i])) sample_weights = layers.fill_constant( shape=fluid.dygraph.to_variable(np.array([5, 1])), dtype='float32', - value=1) - emb = nn.Embedding(size=[dict_size, 32], - param_attr='eager_emb.w', - is_sparse=False) + value=1, + ) + emb = nn.Embedding( + size=[dict_size, 32], + param_attr='eager_emb.w', + is_sparse=False, + ) embs3 = [] for i in range(window_size): @@ -1509,58 +1672,70 @@ class TestLayer(LayerTest): embs3.append(emb_rlt) embs3 = layers.concat(input=embs3, axis=1) - nce1 = nn.NCE(num_total_classes=dict_size, - dim=embs3.shape[1], - num_neg_samples=2, - sampler="custom_dist", - custom_dist=nid_freq_arr.tolist(), - seed=seed, - param_attr='eager_nce1.w', - bias_attr='eager_nce1.b', - sample_weight=sample_weights) - - nce2 = nn.NCE(num_total_classes=dict_size, - dim=embs3.shape[1], - num_neg_samples=2, - sampler="custom_dist", - custom_dist=nid_freq_arr.tolist(), - seed=seed, - param_attr=weight_attr, - bias_attr='eager_nce2.b', - sample_weight=sample_weights) + nce1 = nn.NCE( + num_total_classes=dict_size, + dim=embs3.shape[1], + num_neg_samples=2, + sampler="custom_dist", + custom_dist=nid_freq_arr.tolist(), + seed=seed, + param_attr='eager_nce1.w', + bias_attr='eager_nce1.b', + sample_weight=sample_weights, + ) + + nce2 = nn.NCE( + num_total_classes=dict_size, + dim=embs3.shape[1], + num_neg_samples=2, + sampler="custom_dist", + custom_dist=nid_freq_arr.tolist(), + seed=seed, + param_attr=weight_attr, + bias_attr='eager_nce2.b', + sample_weight=sample_weights, + ) wl = fluid.layers.unsqueeze(words[label_word], axes=[0]) nce1_loss = nce1(embs3, wl) nce2_loss = nce2(embs3, wl) self.assertFalse( - np.array_equal(nce1_loss.numpy(), nce2_loss.numpy())) + np.array_equal(nce1_loss.numpy(), nce2_loss.numpy()) + ) nce2.weight.set_value(nce1.weight.numpy()) nce2.bias.set_value(nce1.bias) nce1_loss = nce1(embs3, wl) nce2_loss = nce2(embs3, wl) - np.testing.assert_array_equal(nce1_loss.numpy(), - nce2_loss.numpy()) + np.testing.assert_array_equal( + nce1_loss.numpy(), nce2_loss.numpy() + ) nce2.weight = nce1.weight nce2.bias = nce1.bias - np.testing.assert_array_equal(nce1.weight.numpy(), - nce2.weight.numpy()) - np.testing.assert_array_equal(nce1.bias.numpy(), - nce2.bias.numpy()) + np.testing.assert_array_equal( + nce1.weight.numpy(), nce2.weight.numpy() + ) + np.testing.assert_array_equal( + nce1.bias.numpy(), nce2.bias.numpy() + ) custom_weight = np.random.randn(dict_size, 128).astype("float32") - weight_attr = fluid.ParamAttr(initializer=fluid.initializer. - NumpyArrayInitializer(custom_weight)) + weight_attr = fluid.ParamAttr( + initializer=fluid.initializer.NumpyArrayInitializer( + custom_weight + ) + ) words = [] for i in range(window_size): words.append(base.to_variable(inp_word[i])) sample_weights = layers.fill_constant( shape=fluid.dygraph.to_variable(np.array([5, 1])), dtype='float32', - value=1) - emb = nn.Embedding(size=[dict_size, 32], - param_attr='emb.w', - is_sparse=False) + value=1, + ) + emb = nn.Embedding( + size=[dict_size, 32], param_attr='emb.w', is_sparse=False + ) embs3 = [] for i in range(window_size): @@ -1571,31 +1746,36 @@ class TestLayer(LayerTest): embs3.append(emb_rlt) embs3 = layers.concat(input=embs3, axis=1) - nce1 = nn.NCE(num_total_classes=dict_size, - dim=embs3.shape[1], - num_neg_samples=2, - sampler="custom_dist", - custom_dist=nid_freq_arr.tolist(), - seed=seed, - param_attr='nce1.w', - bias_attr='nce1.b', - sample_weight=sample_weights) - - nce2 = nn.NCE(num_total_classes=dict_size, - dim=embs3.shape[1], - num_neg_samples=2, - sampler="custom_dist", - custom_dist=nid_freq_arr.tolist(), - seed=seed, - param_attr=weight_attr, - bias_attr='nce2.b', - sample_weight=sample_weights) + nce1 = nn.NCE( + num_total_classes=dict_size, + dim=embs3.shape[1], + num_neg_samples=2, + sampler="custom_dist", + custom_dist=nid_freq_arr.tolist(), + seed=seed, + param_attr='nce1.w', + bias_attr='nce1.b', + sample_weight=sample_weights, + ) + + nce2 = nn.NCE( + num_total_classes=dict_size, + dim=embs3.shape[1], + num_neg_samples=2, + sampler="custom_dist", + custom_dist=nid_freq_arr.tolist(), + seed=seed, + param_attr=weight_attr, + bias_attr='nce2.b', + sample_weight=sample_weights, + ) wl = fluid.layers.unsqueeze(words[label_word], axes=[0]) nce1_loss = nce1(embs3, wl) nce2_loss = nce2(embs3, wl) self.assertFalse( - np.array_equal(nce1_loss.numpy(), nce2_loss.numpy())) + np.array_equal(nce1_loss.numpy(), nce2_loss.numpy()) + ) nce2.weight.set_value(nce1.weight.numpy()) nce2.bias.set_value(nce1.bias) nce1_loss = nce1(embs3, wl) @@ -1604,46 +1784,54 @@ class TestLayer(LayerTest): nce2.weight = nce1.weight nce2.bias = nce1.bias - np.testing.assert_array_equal(nce1.weight.numpy(), - nce2.weight.numpy()) + np.testing.assert_array_equal( + nce1.weight.numpy(), nce2.weight.numpy() + ) np.testing.assert_array_equal(nce1.bias.numpy(), nce2.bias.numpy()) def test_one_hot(self): with self.dynamic_graph(): with _test_eager_guard(): - label = fluid.dygraph.to_variable(np.array([[1], [1], [3], - [0]])) + label = fluid.dygraph.to_variable( + np.array([[1], [1], [3], [0]]) + ) one_hot_label1 = fluid.layers.one_hot(input=label, depth=4) one_hot_label2 = fluid.layers.one_hot( - input=label, depth=fluid.dygraph.to_variable(np.array([4]))) - np.testing.assert_array_equal(one_hot_label1.numpy(), - one_hot_label2.numpy()) + input=label, depth=fluid.dygraph.to_variable(np.array([4])) + ) + np.testing.assert_array_equal( + one_hot_label1.numpy(), one_hot_label2.numpy() + ) label = fluid.dygraph.to_variable(np.array([[1], [1], [3], [0]])) one_hot_label1 = fluid.layers.one_hot(input=label, depth=4) one_hot_label2 = fluid.layers.one_hot( - input=label, depth=fluid.dygraph.to_variable(np.array([4]))) - np.testing.assert_array_equal(one_hot_label1.numpy(), - one_hot_label2.numpy()) + input=label, depth=fluid.dygraph.to_variable(np.array([4])) + ) + np.testing.assert_array_equal( + one_hot_label1.numpy(), one_hot_label2.numpy() + ) def test_split(self): with self.dynamic_graph(): with _test_eager_guard(): input = fluid.dygraph.to_variable(np.random.random((3, 8, 5))) x0, x1 = fluid.layers.split(input, num_or_sections=2, dim=1) - x00, x11 = fluid.layers.split(input, - num_or_sections=2, - dim=fluid.dygraph.to_variable( - np.array([1]))) + x00, x11 = fluid.layers.split( + input, + num_or_sections=2, + dim=fluid.dygraph.to_variable(np.array([1])), + ) np.testing.assert_array_equal(x0.numpy(), x00.numpy()) np.testing.assert_array_equal(x1.numpy(), x11.numpy()) input = fluid.dygraph.to_variable(np.random.random((3, 8, 5))) x0, x1 = fluid.layers.split(input, num_or_sections=2, dim=1) - x00, x11 = fluid.layers.split(input, - num_or_sections=2, - dim=fluid.dygraph.to_variable( - np.array([1]))) + x00, x11 = fluid.layers.split( + input, + num_or_sections=2, + dim=fluid.dygraph.to_variable(np.array([1])), + ) np.testing.assert_array_equal(x0.numpy(), x00.numpy()) np.testing.assert_array_equal(x1.numpy(), x11.numpy()) @@ -1653,40 +1841,48 @@ class TestLayer(LayerTest): input = fluid.dygraph.to_variable(np.random.random((13, 11))) top5_values1, top5_indices1 = layers.topk(input, k=5) top5_values2, top5_indices2 = layers.topk( - input, k=fluid.dygraph.to_variable(np.array([5]))) - np.testing.assert_array_equal(top5_values1.numpy(), - top5_values2.numpy()) - np.testing.assert_array_equal(top5_indices1.numpy(), - top5_indices2.numpy()) + input, k=fluid.dygraph.to_variable(np.array([5])) + ) + np.testing.assert_array_equal( + top5_values1.numpy(), top5_values2.numpy() + ) + np.testing.assert_array_equal( + top5_indices1.numpy(), top5_indices2.numpy() + ) input = fluid.dygraph.to_variable(np.random.random((13, 11))) top5_values1, top5_indices1 = layers.topk(input, k=5) top5_values2, top5_indices2 = layers.topk( - input, k=fluid.dygraph.to_variable(np.array([5]))) - np.testing.assert_array_equal(top5_values1.numpy(), - top5_values2.numpy()) - np.testing.assert_array_equal(top5_indices1.numpy(), - top5_indices2.numpy()) + input, k=fluid.dygraph.to_variable(np.array([5])) + ) + np.testing.assert_array_equal( + top5_values1.numpy(), top5_values2.numpy() + ) + np.testing.assert_array_equal( + top5_indices1.numpy(), top5_indices2.numpy() + ) def test_conv3d(self): with self.static_graph(): - images = layers.data(name='pixel', - shape=[3, 6, 6, 6], - dtype='float32') + images = layers.data( + name='pixel', shape=[3, 6, 6, 6], dtype='float32' + ) ret = layers.conv3d(input=images, num_filters=3, filter_size=2) static_ret = self.get_static_graph_result( feed={'pixel': np.ones([2, 3, 6, 6, 6], dtype='float32')}, - fetch_list=[ret])[0] + fetch_list=[ret], + )[0] with self.static_graph(): - images = layers.data(name='pixel', - shape=[3, 6, 6, 6], - dtype='float32') + images = layers.data( + name='pixel', shape=[3, 6, 6, 6], dtype='float32' + ) conv3d = nn.Conv3D(num_channels=3, num_filters=3, filter_size=2) ret = conv3d(images) static_ret2 = self.get_static_graph_result( feed={'pixel': np.ones([2, 3, 6, 6, 6], dtype='float32')}, - fetch_list=[ret])[0] + fetch_list=[ret], + )[0] with self.dynamic_graph(): with _test_eager_guard(): @@ -1710,26 +1906,33 @@ class TestLayer(LayerTest): custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32") weight_attr = fluid.ParamAttr( initializer=fluid.initializer.NumpyArrayInitializer( - custom_weight)) - conv3d1 = nn.Conv3D(num_channels=3, - num_filters=3, - filter_size=2) - conv3d2 = nn.Conv3D(num_channels=3, - num_filters=3, - filter_size=2, - param_attr=weight_attr) + custom_weight + ) + ) + conv3d1 = nn.Conv3D( + num_channels=3, num_filters=3, filter_size=2 + ) + conv3d2 = nn.Conv3D( + num_channels=3, + num_filters=3, + filter_size=2, + param_attr=weight_attr, + ) dy_ret1 = conv3d1(base.to_variable(images)) dy_ret2 = conv3d2(base.to_variable(images)) self.assertFalse( - np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())) + np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()) + ) conv3d1_weight_np = conv3d1.weight.numpy() conv3d1_bias = conv3d1.bias self.assertFalse( - np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy())) + np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy()) + ) conv3d2.weight.set_value(conv3d1_weight_np) - np.testing.assert_array_equal(conv3d1_weight_np, - conv3d2.weight.numpy()) + np.testing.assert_array_equal( + conv3d1_weight_np, conv3d2.weight.numpy() + ) conv3d1.bias.set_value(conv3d1_bias) dy_ret1 = conv3d1(base.to_variable(images)) dy_ret2 = conv3d2(base.to_variable(images)) @@ -1737,20 +1940,27 @@ class TestLayer(LayerTest): conv3d2.weight = conv3d1.weight conv3d2.bias = conv3d1.bias - np.testing.assert_array_equal(conv3d1.weight.numpy(), - conv3d2.weight.numpy()) - np.testing.assert_array_equal(conv3d1.bias.numpy(), - conv3d2.bias.numpy()) + np.testing.assert_array_equal( + conv3d1.weight.numpy(), conv3d2.weight.numpy() + ) + np.testing.assert_array_equal( + conv3d1.bias.numpy(), conv3d2.bias.numpy() + ) images = np.ones([2, 3, 6, 6, 6], dtype='float32') custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32") - weight_attr = fluid.ParamAttr(initializer=fluid.initializer. - NumpyArrayInitializer(custom_weight)) + weight_attr = fluid.ParamAttr( + initializer=fluid.initializer.NumpyArrayInitializer( + custom_weight + ) + ) conv3d1 = nn.Conv3D(num_channels=3, num_filters=3, filter_size=2) - conv3d2 = nn.Conv3D(num_channels=3, - num_filters=3, - filter_size=2, - param_attr=weight_attr) + conv3d2 = nn.Conv3D( + num_channels=3, + num_filters=3, + filter_size=2, + param_attr=weight_attr, + ) dy_ret1 = conv3d1(base.to_variable(images)) dy_ret2 = conv3d2(base.to_variable(images)) self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())) @@ -1758,10 +1968,12 @@ class TestLayer(LayerTest): conv3d1_weight_np = conv3d1.weight.numpy() conv3d1_bias = conv3d1.bias self.assertFalse( - np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy())) + np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy()) + ) conv3d2.weight.set_value(conv3d1_weight_np) - np.testing.assert_array_equal(conv3d1_weight_np, - conv3d2.weight.numpy()) + np.testing.assert_array_equal( + conv3d1_weight_np, conv3d2.weight.numpy() + ) conv3d1.bias.set_value(conv3d1_bias) dy_ret1 = conv3d1(base.to_variable(images)) dy_ret2 = conv3d2(base.to_variable(images)) @@ -1769,10 +1981,12 @@ class TestLayer(LayerTest): conv3d2.weight = conv3d1.weight conv3d2.bias = conv3d1.bias - np.testing.assert_array_equal(conv3d1.weight.numpy(), - conv3d2.weight.numpy()) - np.testing.assert_array_equal(conv3d1.bias.numpy(), - conv3d2.bias.numpy()) + np.testing.assert_array_equal( + conv3d1.weight.numpy(), conv3d2.weight.numpy() + ) + np.testing.assert_array_equal( + conv3d1.bias.numpy(), conv3d2.bias.numpy() + ) def test_row_conv(self): input = np.arange(15).reshape([3, 5]).astype('float32') @@ -1782,37 +1996,43 @@ class TestLayer(LayerTest): place = core.CPUPlace() with self.static_graph(): - x = layers.data(name='X', - shape=[3, 5], - dtype='float32', - lod_level=1, - append_batch_size=False) + x = layers.data( + name='X', + shape=[3, 5], + dtype='float32', + lod_level=1, + append_batch_size=False, + ) ret = layers.row_conv(input=x, future_context_size=2) - static_ret = self.get_static_graph_result(feed={ - 'X': - fluid.create_lod_tensor(data=input, - recursive_seq_lens=[[1, 1, 1]], - place=place) - }, - fetch_list=[ret], - with_lod=True)[0] + static_ret = self.get_static_graph_result( + feed={ + 'X': fluid.create_lod_tensor( + data=input, recursive_seq_lens=[[1, 1, 1]], place=place + ) + }, + fetch_list=[ret], + with_lod=True, + )[0] with self.static_graph(): - x = layers.data(name='X', - shape=[3, 5], - dtype='float32', - lod_level=1, - append_batch_size=False) + x = layers.data( + name='X', + shape=[3, 5], + dtype='float32', + lod_level=1, + append_batch_size=False, + ) rowConv = nn.RowConv('RowConv', future_context_size=2) ret = rowConv(x) - static_ret2 = self.get_static_graph_result(feed={ - 'X': - fluid.create_lod_tensor(data=input, - recursive_seq_lens=[[1, 1, 1]], - place=place) - }, - fetch_list=[ret], - with_lod=True)[0] + static_ret2 = self.get_static_graph_result( + feed={ + 'X': fluid.create_lod_tensor( + data=input, recursive_seq_lens=[[1, 1, 1]], place=place + ) + }, + fetch_list=[ret], + with_lod=True, + )[0] # TODO: dygraph can't support LODTensor @@ -1829,52 +2049,61 @@ class TestLayer(LayerTest): input = np.random.random(shape).astype('float32') with self.static_graph(): - X = fluid.layers.data(name='X', - shape=shape, - dtype='float32', - lod_level=1, - append_batch_size=False) + X = fluid.layers.data( + name='X', + shape=shape, + dtype='float32', + lod_level=1, + append_batch_size=False, + ) ret = layers.group_norm( input=X, groups=2, param_attr=fluid.initializer.Uniform(low=-0.5, high=0.5), - bias_attr=fluid.initializer.ConstantInitializer(value=1)) - static_ret = self.get_static_graph_result(feed={ - 'X': - fluid.create_lod_tensor(data=input, - recursive_seq_lens=[[1, 1]], - place=place) - }, - fetch_list=[ret], - with_lod=True)[0] + bias_attr=fluid.initializer.ConstantInitializer(value=1), + ) + static_ret = self.get_static_graph_result( + feed={ + 'X': fluid.create_lod_tensor( + data=input, recursive_seq_lens=[[1, 1]], place=place + ) + }, + fetch_list=[ret], + with_lod=True, + )[0] with self.static_graph(): - X = fluid.layers.data(name='X', - shape=shape, - dtype='float32', - lod_level=1, - append_batch_size=False) + X = fluid.layers.data( + name='X', + shape=shape, + dtype='float32', + lod_level=1, + append_batch_size=False, + ) groupNorm = nn.GroupNorm( channels=shape[1], groups=2, param_attr=fluid.initializer.Uniform(low=-0.5, high=0.5), - bias_attr=fluid.initializer.ConstantInitializer(value=1)) + bias_attr=fluid.initializer.ConstantInitializer(value=1), + ) ret = groupNorm(X) - static_ret2 = self.get_static_graph_result(feed={ - 'X': - fluid.create_lod_tensor(data=input, - recursive_seq_lens=[[1, 1]], - place=place) - }, - fetch_list=[ret], - with_lod=True)[0] + static_ret2 = self.get_static_graph_result( + feed={ + 'X': fluid.create_lod_tensor( + data=input, recursive_seq_lens=[[1, 1]], place=place + ) + }, + fetch_list=[ret], + with_lod=True, + )[0] with self.dynamic_graph(): groupNorm = nn.GroupNorm( channels=shape[1], groups=2, param_attr=fluid.initializer.Uniform(low=-0.5, high=0.5), - bias_attr=fluid.initializer.ConstantInitializer(value=1)) + bias_attr=fluid.initializer.ConstantInitializer(value=1), + ) dy_ret = groupNorm(base.to_variable(input)) dy_rlt_value = dy_ret.numpy() @@ -1897,23 +2126,23 @@ class TestLayer(LayerTest): input = np.random.random(shape).astype('float32') with self.static_graph(): - X = fluid.layers.data(name='X', - shape=shape, - dtype='float32', - append_batch_size=False) + X = fluid.layers.data( + name='X', shape=shape, dtype='float32', append_batch_size=False + ) ret = layers.instance_norm(input=X) - static_ret = self.get_static_graph_result(feed={'X': input}, - fetch_list=[ret])[0] + static_ret = self.get_static_graph_result( + feed={'X': input}, fetch_list=[ret] + )[0] with self.static_graph(): - X = fluid.layers.data(name='X', - shape=shape, - dtype='float32', - append_batch_size=False) + X = fluid.layers.data( + name='X', shape=shape, dtype='float32', append_batch_size=False + ) instanceNorm = nn.InstanceNorm(num_channels=shape[1]) ret = instanceNorm(X) - static_ret2 = self.get_static_graph_result(feed={'X': input}, - fetch_list=[ret])[0] + static_ret2 = self.get_static_graph_result( + feed={'X': input}, fetch_list=[ret] + )[0] with self.dynamic_graph(): with _test_eager_guard(): @@ -1968,37 +2197,43 @@ class TestLayer(LayerTest): input = np.random.random(shape).astype('float32') with self.static_graph(): - Weight = fluid.layers.data(name='Weight', - shape=shape, - dtype='float32', - lod_level=1, - append_batch_size=False) + Weight = fluid.layers.data( + name='Weight', + shape=shape, + dtype='float32', + lod_level=1, + append_batch_size=False, + ) ret = layers.spectral_norm(weight=Weight, dim=1, power_iters=2) - static_ret = self.get_static_graph_result(feed={ - 'Weight': - fluid.create_lod_tensor(data=input, - recursive_seq_lens=[[1, 1]], - place=place), - }, - fetch_list=[ret], - with_lod=True)[0] + static_ret = self.get_static_graph_result( + feed={ + 'Weight': fluid.create_lod_tensor( + data=input, recursive_seq_lens=[[1, 1]], place=place + ), + }, + fetch_list=[ret], + with_lod=True, + )[0] with self.static_graph(): - Weight = fluid.layers.data(name='Weight', - shape=shape, - dtype='float32', - lod_level=1, - append_batch_size=False) + Weight = fluid.layers.data( + name='Weight', + shape=shape, + dtype='float32', + lod_level=1, + append_batch_size=False, + ) spectralNorm = nn.SpectralNorm(shape, dim=1, power_iters=2) ret = spectralNorm(Weight) - static_ret2 = self.get_static_graph_result(feed={ - 'Weight': - fluid.create_lod_tensor(data=input, - recursive_seq_lens=[[1, 1]], - place=place) - }, - fetch_list=[ret], - with_lod=True)[0] + static_ret2 = self.get_static_graph_result( + feed={ + 'Weight': fluid.create_lod_tensor( + data=input, recursive_seq_lens=[[1, 1]], place=place + ) + }, + fetch_list=[ret], + with_lod=True, + )[0] with self.dynamic_graph(): with _test_eager_guard(): @@ -2024,77 +2259,85 @@ class TestLayer(LayerTest): adj = np.tile(adj, (1, 1, 1)) vectors = np.random.random((1, 10, 5)).astype('float32') with self.static_graph(): - NodesVector = fluid.layers.data(name='NodesVector', - shape=(1, 10, 5), - dtype='float32', - lod_level=1, - append_batch_size=False) - EdgeSet = fluid.layers.data(name='EdgeSet', - shape=(1, 9, 2), - dtype='int32', - lod_level=1, - append_batch_size=False) - ret = fluid.contrib.layers.tree_conv(nodes_vector=NodesVector, - edge_set=EdgeSet, - output_size=6, - num_filters=1, - max_depth=2) - static_ret = self.get_static_graph_result(feed={ - 'NodesVector': - fluid.create_lod_tensor(data=vectors, - recursive_seq_lens=[[1]], - place=place), - 'EdgeSet': - fluid.create_lod_tensor(data=adj, - recursive_seq_lens=[[1]], - place=place) - }, - fetch_list=[ret], - with_lod=False)[0] + NodesVector = fluid.layers.data( + name='NodesVector', + shape=(1, 10, 5), + dtype='float32', + lod_level=1, + append_batch_size=False, + ) + EdgeSet = fluid.layers.data( + name='EdgeSet', + shape=(1, 9, 2), + dtype='int32', + lod_level=1, + append_batch_size=False, + ) + ret = fluid.contrib.layers.tree_conv( + nodes_vector=NodesVector, + edge_set=EdgeSet, + output_size=6, + num_filters=1, + max_depth=2, + ) + static_ret = self.get_static_graph_result( + feed={ + 'NodesVector': fluid.create_lod_tensor( + data=vectors, recursive_seq_lens=[[1]], place=place + ), + 'EdgeSet': fluid.create_lod_tensor( + data=adj, recursive_seq_lens=[[1]], place=place + ), + }, + fetch_list=[ret], + with_lod=False, + )[0] with self.static_graph(): - NodesVector = fluid.layers.data(name='NodesVector', - shape=(1, 10, 5), - dtype='float32', - lod_level=1, - append_batch_size=False) - EdgeSet = fluid.layers.data(name='EdgeSet', - shape=(1, 9, 2), - dtype='int32', - lod_level=1, - append_batch_size=False) - treeConv = nn.TreeConv(feature_size=5, - output_size=6, - num_filters=1, - max_depth=2) + NodesVector = fluid.layers.data( + name='NodesVector', + shape=(1, 10, 5), + dtype='float32', + lod_level=1, + append_batch_size=False, + ) + EdgeSet = fluid.layers.data( + name='EdgeSet', + shape=(1, 9, 2), + dtype='int32', + lod_level=1, + append_batch_size=False, + ) + treeConv = nn.TreeConv( + feature_size=5, output_size=6, num_filters=1, max_depth=2 + ) ret = treeConv(NodesVector, EdgeSet) - static_ret2 = self.get_static_graph_result(feed={ - 'NodesVector': - fluid.create_lod_tensor(data=vectors, - recursive_seq_lens=[[1]], - place=place), - 'EdgeSet': - fluid.create_lod_tensor(data=adj, - recursive_seq_lens=[[1]], - place=place) - }, - fetch_list=[ret], - with_lod=False)[0] + static_ret2 = self.get_static_graph_result( + feed={ + 'NodesVector': fluid.create_lod_tensor( + data=vectors, recursive_seq_lens=[[1]], place=place + ), + 'EdgeSet': fluid.create_lod_tensor( + data=adj, recursive_seq_lens=[[1]], place=place + ), + }, + fetch_list=[ret], + with_lod=False, + )[0] with self.dynamic_graph(): with _test_eager_guard(): - treeConv = nn.TreeConv(feature_size=5, - output_size=6, - num_filters=1, - max_depth=2) - dy_eager_ret = treeConv(base.to_variable(vectors), - base.to_variable(adj)) + treeConv = nn.TreeConv( + feature_size=5, output_size=6, num_filters=1, max_depth=2 + ) + dy_eager_ret = treeConv( + base.to_variable(vectors), base.to_variable(adj) + ) dy_eager_rlt_value = dy_eager_ret.numpy() - treeConv = nn.TreeConv(feature_size=5, - output_size=6, - num_filters=1, - max_depth=2) + treeConv = nn.TreeConv( + feature_size=5, output_size=6, num_filters=1, max_depth=2 + ) dy_ret = treeConv(base.to_variable(vectors), base.to_variable(adj)) dy_rlt_value = dy_ret.numpy() @@ -2107,107 +2350,135 @@ class TestLayer(LayerTest): custom_weight = np.random.randn(5, 3, 6, 1).astype("float32") weight_attr = fluid.ParamAttr( initializer=fluid.initializer.NumpyArrayInitializer( - custom_weight)) - treeConv1 = nn.TreeConv(feature_size=5, - output_size=6, - num_filters=1, - max_depth=2, - bias_attr='eager_tc1_b') - treeConv2 = nn.TreeConv(feature_size=5, - output_size=6, - num_filters=1, - max_depth=2, - param_attr=weight_attr, - bias_attr='eager_tc2_b') - dy_ret1 = treeConv1(base.to_variable(vectors), - base.to_variable(adj)) - dy_ret2 = treeConv2(base.to_variable(vectors), - base.to_variable(adj)) + custom_weight + ) + ) + treeConv1 = nn.TreeConv( + feature_size=5, + output_size=6, + num_filters=1, + max_depth=2, + bias_attr='eager_tc1_b', + ) + treeConv2 = nn.TreeConv( + feature_size=5, + output_size=6, + num_filters=1, + max_depth=2, + param_attr=weight_attr, + bias_attr='eager_tc2_b', + ) + dy_ret1 = treeConv1( + base.to_variable(vectors), base.to_variable(adj) + ) + dy_ret2 = treeConv2( + base.to_variable(vectors), base.to_variable(adj) + ) self.assertFalse( - np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())) + np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()) + ) treeConv2.weight.set_value(treeConv1.weight.numpy()) treeConv2.bias.set_value(treeConv1.bias) - dy_ret1 = treeConv1(base.to_variable(vectors), - base.to_variable(adj)) - dy_ret2 = treeConv2(base.to_variable(vectors), - base.to_variable(adj)) + dy_ret1 = treeConv1( + base.to_variable(vectors), base.to_variable(adj) + ) + dy_ret2 = treeConv2( + base.to_variable(vectors), base.to_variable(adj) + ) np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy()) treeConv2.weight = treeConv1.weight treeConv2.bias = treeConv1.bias - np.testing.assert_array_equal(treeConv1.weight.numpy(), - treeConv2.weight.numpy()) - np.testing.assert_array_equal(treeConv1.bias.numpy(), - treeConv2.bias.numpy()) + np.testing.assert_array_equal( + treeConv1.weight.numpy(), treeConv2.weight.numpy() + ) + np.testing.assert_array_equal( + treeConv1.bias.numpy(), treeConv2.bias.numpy() + ) custom_weight = np.random.randn(5, 3, 6, 1).astype("float32") - weight_attr = fluid.ParamAttr(initializer=fluid.initializer. - NumpyArrayInitializer(custom_weight)) - treeConv1 = nn.TreeConv(feature_size=5, - output_size=6, - num_filters=1, - max_depth=2, - bias_attr='tc1_b') - treeConv2 = nn.TreeConv(feature_size=5, - output_size=6, - num_filters=1, - max_depth=2, - param_attr=weight_attr, - bias_attr='tc2_b') - dy_ret1 = treeConv1(base.to_variable(vectors), - base.to_variable(adj)) - dy_ret2 = treeConv2(base.to_variable(vectors), - base.to_variable(adj)) + weight_attr = fluid.ParamAttr( + initializer=fluid.initializer.NumpyArrayInitializer( + custom_weight + ) + ) + treeConv1 = nn.TreeConv( + feature_size=5, + output_size=6, + num_filters=1, + max_depth=2, + bias_attr='tc1_b', + ) + treeConv2 = nn.TreeConv( + feature_size=5, + output_size=6, + num_filters=1, + max_depth=2, + param_attr=weight_attr, + bias_attr='tc2_b', + ) + dy_ret1 = treeConv1( + base.to_variable(vectors), base.to_variable(adj) + ) + dy_ret2 = treeConv2( + base.to_variable(vectors), base.to_variable(adj) + ) self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())) treeConv2.weight.set_value(treeConv1.weight.numpy()) treeConv2.bias.set_value(treeConv1.bias) - dy_ret1 = treeConv1(base.to_variable(vectors), - base.to_variable(adj)) - dy_ret2 = treeConv2(base.to_variable(vectors), - base.to_variable(adj)) + dy_ret1 = treeConv1( + base.to_variable(vectors), base.to_variable(adj) + ) + dy_ret2 = treeConv2( + base.to_variable(vectors), base.to_variable(adj) + ) np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy()) treeConv2.weight = treeConv1.weight treeConv2.bias = treeConv1.bias - np.testing.assert_array_equal(treeConv1.weight.numpy(), - treeConv2.weight.numpy()) - np.testing.assert_array_equal(treeConv1.bias.numpy(), - treeConv2.bias.numpy()) + np.testing.assert_array_equal( + treeConv1.weight.numpy(), treeConv2.weight.numpy() + ) + np.testing.assert_array_equal( + treeConv1.bias.numpy(), treeConv2.bias.numpy() + ) def test_conv3d_transpose(self): - input_array = np.arange(0, 48).reshape([2, 3, 2, 2, - 2]).astype('float32') + input_array = ( + np.arange(0, 48).reshape([2, 3, 2, 2, 2]).astype('float32') + ) with self.static_graph(): img = layers.data(name='pixel', shape=[3, 2, 2, 2], dtype='float32') - out = layers.conv3d_transpose(input=img, - num_filters=12, - filter_size=12, - use_cudnn=False) + out = layers.conv3d_transpose( + input=img, num_filters=12, filter_size=12, use_cudnn=False + ) static_rlt = self.get_static_graph_result( - feed={'pixel': input_array}, fetch_list=[out])[0] + feed={'pixel': input_array}, fetch_list=[out] + )[0] with self.static_graph(): img = layers.data(name='pixel', shape=[3, 2, 2, 2], dtype='float32') - conv3d_transpose = nn.Conv3DTranspose(num_channels=3, - num_filters=12, - filter_size=12, - use_cudnn=False) + conv3d_transpose = nn.Conv3DTranspose( + num_channels=3, num_filters=12, filter_size=12, use_cudnn=False + ) out = conv3d_transpose(img) static_rlt2 = self.get_static_graph_result( - feed={'pixel': input_array}, fetch_list=[out])[0] + feed={'pixel': input_array}, fetch_list=[out] + )[0] with self.dynamic_graph(): with _test_eager_guard(): - conv3d_transpose = nn.Conv3DTranspose(num_channels=3, - num_filters=12, - filter_size=12, - use_cudnn=False) + conv3d_transpose = nn.Conv3DTranspose( + num_channels=3, + num_filters=12, + filter_size=12, + use_cudnn=False, + ) dy_eager_rlt = conv3d_transpose(base.to_variable(input_array)) dy_eager_rlt_value = dy_eager_rlt.numpy() - conv3d_transpose = nn.Conv3DTranspose(num_channels=3, - num_filters=12, - filter_size=12, - use_cudnn=False) + conv3d_transpose = nn.Conv3DTranspose( + num_channels=3, num_filters=12, filter_size=12, use_cudnn=False + ) dy_rlt = conv3d_transpose(base.to_variable(input_array)) dy_rlt_value = dy_rlt.numpy() np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05) @@ -2220,30 +2491,39 @@ class TestLayer(LayerTest): custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32") weight_attr = fluid.ParamAttr( initializer=fluid.initializer.NumpyArrayInitializer( - custom_weight)) - conv3d1 = nn.Conv3DTranspose(num_channels=3, - num_filters=3, - filter_size=2, - bias_attr='eager_conv3d1_b', - use_cudnn=False) - conv3d2 = nn.Conv3DTranspose(num_channels=3, - num_filters=3, - filter_size=2, - param_attr=weight_attr, - bias_attr='eager_conv3d2_b', - use_cudnn=False) + custom_weight + ) + ) + conv3d1 = nn.Conv3DTranspose( + num_channels=3, + num_filters=3, + filter_size=2, + bias_attr='eager_conv3d1_b', + use_cudnn=False, + ) + conv3d2 = nn.Conv3DTranspose( + num_channels=3, + num_filters=3, + filter_size=2, + param_attr=weight_attr, + bias_attr='eager_conv3d2_b', + use_cudnn=False, + ) dy_ret1 = conv3d1(base.to_variable(images)) dy_ret2 = conv3d2(base.to_variable(images)) self.assertFalse( - np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())) + np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()) + ) conv3d1_weight_np = conv3d1.weight.numpy() conv3d1_bias = conv3d1.bias self.assertFalse( - np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy())) + np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy()) + ) conv3d2.weight.set_value(conv3d1_weight_np) - np.testing.assert_array_equal(conv3d1_weight_np, - conv3d2.weight.numpy()) + np.testing.assert_array_equal( + conv3d1_weight_np, conv3d2.weight.numpy() + ) conv3d1.bias.set_value(conv3d1_bias) dy_ret1 = conv3d1(base.to_variable(images)) dy_ret2 = conv3d2(base.to_variable(images)) @@ -2251,26 +2531,35 @@ class TestLayer(LayerTest): conv3d2.weight = conv3d1.weight conv3d2.bias = conv3d1.bias - np.testing.assert_array_equal(conv3d1.weight.numpy(), - conv3d2.weight.numpy()) - np.testing.assert_array_equal(conv3d1.bias.numpy(), - conv3d2.bias.numpy()) + np.testing.assert_array_equal( + conv3d1.weight.numpy(), conv3d2.weight.numpy() + ) + np.testing.assert_array_equal( + conv3d1.bias.numpy(), conv3d2.bias.numpy() + ) images = np.ones([2, 3, 6, 6, 6], dtype='float32') custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32") - weight_attr = fluid.ParamAttr(initializer=fluid.initializer. - NumpyArrayInitializer(custom_weight)) - conv3d1 = nn.Conv3DTranspose(num_channels=3, - num_filters=3, - filter_size=2, - bias_attr='conv3d1_b', - use_cudnn=False) - conv3d2 = nn.Conv3DTranspose(num_channels=3, - num_filters=3, - filter_size=2, - param_attr=weight_attr, - bias_attr='conv3d2_b', - use_cudnn=False) + weight_attr = fluid.ParamAttr( + initializer=fluid.initializer.NumpyArrayInitializer( + custom_weight + ) + ) + conv3d1 = nn.Conv3DTranspose( + num_channels=3, + num_filters=3, + filter_size=2, + bias_attr='conv3d1_b', + use_cudnn=False, + ) + conv3d2 = nn.Conv3DTranspose( + num_channels=3, + num_filters=3, + filter_size=2, + param_attr=weight_attr, + bias_attr='conv3d2_b', + use_cudnn=False, + ) dy_ret1 = conv3d1(base.to_variable(images)) dy_ret2 = conv3d2(base.to_variable(images)) self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())) @@ -2278,10 +2567,12 @@ class TestLayer(LayerTest): conv3d1_weight_np = conv3d1.weight.numpy() conv3d1_bias = conv3d1.bias self.assertFalse( - np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy())) + np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy()) + ) conv3d2.weight.set_value(conv3d1_weight_np) - np.testing.assert_array_equal(conv3d1_weight_np, - conv3d2.weight.numpy()) + np.testing.assert_array_equal( + conv3d1_weight_np, conv3d2.weight.numpy() + ) conv3d1.bias.set_value(conv3d1_bias) dy_ret1 = conv3d1(base.to_variable(images)) dy_ret2 = conv3d2(base.to_variable(images)) @@ -2289,10 +2580,12 @@ class TestLayer(LayerTest): conv3d2.weight = conv3d1.weight conv3d2.bias = conv3d1.bias - np.testing.assert_array_equal(conv3d1.weight.numpy(), - conv3d2.weight.numpy()) - np.testing.assert_array_equal(conv3d1.bias.numpy(), - conv3d2.bias.numpy()) + np.testing.assert_array_equal( + conv3d1.weight.numpy(), conv3d2.weight.numpy() + ) + np.testing.assert_array_equal( + conv3d1.bias.numpy(), conv3d2.bias.numpy() + ) def test_eye_op(self): np_eye = np.eye(3, 2) @@ -2304,12 +2597,12 @@ class TestLayer(LayerTest): with self.dynamic_graph(): with _test_eager_guard(): eager_eye_tensor = layers.eye(num_rows=3, num_columns=2) - eager_eye_tensor_rlt1 = layers.eye(num_rows=3, - num_columns=2, - batch_shape=[3]) - eager_eye_tensor_rlt2 = layers.eye(num_rows=3, - num_columns=2, - batch_shape=[4, 3]) + eager_eye_tensor_rlt1 = layers.eye( + num_rows=3, num_columns=2, batch_shape=[3] + ) + eager_eye_tensor_rlt2 = layers.eye( + num_rows=3, num_columns=2, batch_shape=[4, 3] + ) eager_diag_tensor = layers.eye(20) eager_eye_tensor_value = eager_eye_tensor.numpy() eager_eye_tensor_rlt1_value = eager_eye_tensor_rlt1.numpy() @@ -2317,12 +2610,12 @@ class TestLayer(LayerTest): eager_diag_tensor_value = eager_diag_tensor.numpy() eye_tensor = layers.eye(num_rows=3, num_columns=2) - eye_tensor_rlt1 = layers.eye(num_rows=3, - num_columns=2, - batch_shape=[3]) - eye_tensor_rlt2 = layers.eye(num_rows=3, - num_columns=2, - batch_shape=[4, 3]) + eye_tensor_rlt1 = layers.eye( + num_rows=3, num_columns=2, batch_shape=[3] + ) + eye_tensor_rlt2 = layers.eye( + num_rows=3, num_columns=2, batch_shape=[4, 3] + ) diag_tensor = layers.eye(20) eye_tensor_value = eye_tensor.numpy() eye_tensor_rlt1_value = eye_tensor_rlt1.numpy() @@ -2330,23 +2623,23 @@ class TestLayer(LayerTest): diag_tensor_value = diag_tensor.numpy() np.testing.assert_allclose(eager_eye_tensor_value, np_eye, rtol=1e-05) - np.testing.assert_allclose(eager_eye_tensor_rlt1_value, - stack_rlt1, - rtol=1e-05) - np.testing.assert_allclose(eager_eye_tensor_rlt2_value, - stack_rlt2, - rtol=1e-05) - np.testing.assert_allclose(eager_diag_tensor_value, - np.eye(20), - rtol=1e-05) + np.testing.assert_allclose( + eager_eye_tensor_rlt1_value, stack_rlt1, rtol=1e-05 + ) + np.testing.assert_allclose( + eager_eye_tensor_rlt2_value, stack_rlt2, rtol=1e-05 + ) + np.testing.assert_allclose( + eager_diag_tensor_value, np.eye(20), rtol=1e-05 + ) np.testing.assert_allclose(eye_tensor_value, np_eye, rtol=1e-05) - np.testing.assert_allclose(eye_tensor_rlt1_value, - stack_rlt1, - rtol=1e-05) - np.testing.assert_allclose(eye_tensor_rlt2_value, - stack_rlt2, - rtol=1e-05) + np.testing.assert_allclose( + eye_tensor_rlt1_value, stack_rlt1, rtol=1e-05 + ) + np.testing.assert_allclose( + eye_tensor_rlt2_value, stack_rlt2, rtol=1e-05 + ) np.testing.assert_allclose(diag_tensor_value, np.eye(20), rtol=1e-05) with self.assertRaises(TypeError): @@ -2406,11 +2699,9 @@ class TestLayer(LayerTest): a = layers.data(name='a', shape=[1], dtype='int64') b = layers.data(name='b', shape=[1], dtype='int64') cond = layers.less_than(x=a, y=b) - static_ret = self.get_static_graph_result(feed={ - "a": value_a, - "b": value_b - }, - fetch_list=[cond])[0] + static_ret = self.get_static_graph_result( + feed={"a": value_a, "b": value_b}, fetch_list=[cond] + )[0] with self.dynamic_graph(): with _test_eager_guard(): da = base.to_variable(value_a) @@ -2432,11 +2723,9 @@ class TestLayer(LayerTest): a1 = layers.data(name='a1', shape=[1], dtype='int64') b1 = layers.data(name='b1', shape=[1], dtype='int64') cond1 = layers.less_equal(x=a1, y=b1) - static_ret1 = self.get_static_graph_result(feed={ - "a1": value_a, - "b1": value_b - }, - fetch_list=[cond1])[0] + static_ret1 = self.get_static_graph_result( + feed={"a1": value_a, "b1": value_b}, fetch_list=[cond1] + )[0] with self.dynamic_graph(): with _test_eager_guard(): da1 = base.to_variable(value_a) @@ -2453,16 +2742,14 @@ class TestLayer(LayerTest): for i in range(len(static_ret1)): self.assertTrue(dcond1.numpy()[i] == static_ret1[i]) - #greater than + # greater than with self.static_graph(): a2 = layers.data(name='a2', shape=[1], dtype='int64') b2 = layers.data(name='b2', shape=[1], dtype='int64') cond2 = layers.greater_than(x=a2, y=b2) - static_ret2 = self.get_static_graph_result(feed={ - "a2": value_a, - "b2": value_b - }, - fetch_list=[cond2])[0] + static_ret2 = self.get_static_graph_result( + feed={"a2": value_a, "b2": value_b}, fetch_list=[cond2] + )[0] with self.dynamic_graph(): with _test_eager_guard(): da2 = base.to_variable(value_a) @@ -2479,16 +2766,14 @@ class TestLayer(LayerTest): for i in range(len(static_ret2)): self.assertTrue(dcond2.numpy()[i] == static_ret2[i]) - #greater equal + # greater equal with self.static_graph(): a3 = layers.data(name='a3', shape=[1], dtype='int64') b3 = layers.data(name='b3', shape=[1], dtype='int64') cond3 = layers.greater_equal(x=a3, y=b3) - static_ret3 = self.get_static_graph_result(feed={ - "a3": value_a, - "b3": value_b - }, - fetch_list=[cond3])[0] + static_ret3 = self.get_static_graph_result( + feed={"a3": value_a, "b3": value_b}, fetch_list=[cond3] + )[0] with self.dynamic_graph(): with _test_eager_guard(): da3 = base.to_variable(value_a) @@ -2510,11 +2795,9 @@ class TestLayer(LayerTest): a4 = layers.data(name='a4', shape=[1], dtype='int64') b4 = layers.data(name='b4', shape=[1], dtype='int64') cond4 = layers.equal(x=a4, y=b4) - static_ret4 = self.get_static_graph_result(feed={ - "a4": value_a, - "b4": value_b - }, - fetch_list=[cond4])[0] + static_ret4 = self.get_static_graph_result( + feed={"a4": value_a, "b4": value_b}, fetch_list=[cond4] + )[0] with self.dynamic_graph(): with _test_eager_guard(): da4 = base.to_variable(value_a) @@ -2536,11 +2819,9 @@ class TestLayer(LayerTest): a5 = layers.data(name='a5', shape=[1], dtype='int64') b5 = layers.data(name='b5', shape=[1], dtype='int64') cond5 = layers.equal(x=a5, y=b5) - static_ret5 = self.get_static_graph_result(feed={ - "a5": value_a, - "b5": value_b - }, - fetch_list=[cond5])[0] + static_ret5 = self.get_static_graph_result( + feed={"a5": value_a, "b5": value_b}, fetch_list=[cond5] + )[0] with self.dynamic_graph(): with _test_eager_guard(): da5 = base.to_variable(value_a) @@ -2558,7 +2839,6 @@ class TestLayer(LayerTest): self.assertTrue(dcond5.numpy()[i] == static_ret5[i]) def test_cond(self): - def less_than_branch(a, b): return fluid.layers.elementwise_add(a, b) @@ -2566,16 +2846,22 @@ class TestLayer(LayerTest): return fluid.layers.elementwise_sub(a, b) with self.static_graph(): - a = fluid.layers.fill_constant(shape=[1], - dtype='float32', - value=0.1) - b = fluid.layers.fill_constant(shape=[1], - dtype='float32', - value=0.23) - out = fluid.layers.cond(a >= b, lambda: greater_equal_branch(a, b), - lambda: less_than_branch(a, b)) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + a = fluid.layers.fill_constant( + shape=[1], dtype='float32', value=0.1 + ) + b = fluid.layers.fill_constant( + shape=[1], dtype='float32', value=0.23 + ) + out = fluid.layers.cond( + a >= b, + lambda: greater_equal_branch(a, b), + lambda: less_than_branch(a, b), + ) + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) ret = exe.run(fetch_list=[out]) static_res = ret[0] @@ -2584,15 +2870,23 @@ class TestLayer(LayerTest): with _test_eager_guard(): a = fluid.dygraph.to_variable(np.array([0.1]).astype('float32')) b = fluid.dygraph.to_variable( - np.array([0.23]).astype('float32')) - out = layers.cond(a < b, lambda: less_than_branch(a, b), - lambda: greater_equal_branch(a, b)) - out2 = layers.cond(a >= b, lambda: greater_equal_branch(a, b), - lambda: less_than_branch(a, b)) + np.array([0.23]).astype('float32') + ) + out = layers.cond( + a < b, + lambda: less_than_branch(a, b), + lambda: greater_equal_branch(a, b), + ) + out2 = layers.cond( + a >= b, + lambda: greater_equal_branch(a, b), + lambda: less_than_branch(a, b), + ) eager_dynamic_res = out.numpy() eager_dynamic_res2 = out2.numpy() - np.testing.assert_array_equal(eager_dynamic_res, - eager_dynamic_res2) + np.testing.assert_array_equal( + eager_dynamic_res, eager_dynamic_res2 + ) with self.assertRaises(TypeError): layers.cond(a < b, 'str', 'str') with self.assertRaises(TypeError): @@ -2600,10 +2894,16 @@ class TestLayer(LayerTest): a = fluid.dygraph.to_variable(np.array([0.1]).astype('float32')) b = fluid.dygraph.to_variable(np.array([0.23]).astype('float32')) - out = layers.cond(a < b, lambda: less_than_branch(a, b), - lambda: greater_equal_branch(a, b)) - out2 = layers.cond(a >= b, lambda: greater_equal_branch(a, b), - lambda: less_than_branch(a, b)) + out = layers.cond( + a < b, + lambda: less_than_branch(a, b), + lambda: greater_equal_branch(a, b), + ) + out2 = layers.cond( + a >= b, + lambda: greater_equal_branch(a, b), + lambda: less_than_branch(a, b), + ) dynamic_res = out.numpy() dynamic_res2 = out2.numpy() np.testing.assert_array_equal(dynamic_res, dynamic_res2) @@ -2616,7 +2916,6 @@ class TestLayer(LayerTest): np.testing.assert_array_equal(static_res, eager_dynamic_res) def test_case(self): - def fn_1(): return layers.fill_constant(shape=[1, 2], dtype='float32', value=1) @@ -2635,12 +2934,16 @@ class TestLayer(LayerTest): pred_2 = layers.less_than(x, y) # false: 0.3 < 0.1 pred_3 = layers.equal(x, y) # false: 0.3 == 0.1 - out_1 = layers.case(pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], - default=fn_3) + out_1 = layers.case( + pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], default=fn_3 + ) out_2 = layers.case(pred_fn_pairs=[(pred_2, fn_2), (pred_3, fn_3)]) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) static_res1, static_res2 = exe.run(fetch_list=[out_1, out_2]) @@ -2654,11 +2957,12 @@ class TestLayer(LayerTest): pred_2 = layers.less_than(x, y) # false: 0.3 < 0.1 pred_3 = layers.equal(x, y) # false: 0.3 == 0.1 - out_1 = layers.case(pred_fn_pairs=[(pred_1, fn_1), - (pred_2, fn_2)], - default=fn_3) - out_2 = layers.case(pred_fn_pairs=[(pred_2, fn_2), (pred_3, - fn_3)]) + out_1 = layers.case( + pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], default=fn_3 + ) + out_2 = layers.case( + pred_fn_pairs=[(pred_2, fn_2), (pred_3, fn_3)] + ) eager_dynamic_res1 = out_1.numpy() eager_dynamic_res2 = out_2.numpy() @@ -2670,8 +2974,9 @@ class TestLayer(LayerTest): pred_2 = layers.less_than(x, y) # false: 0.3 < 0.1 pred_3 = layers.equal(x, y) # false: 0.3 == 0.1 - out_1 = layers.case(pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], - default=fn_3) + out_1 = layers.case( + pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], default=fn_3 + ) out_2 = layers.case(pred_fn_pairs=[(pred_2, fn_2), (pred_3, fn_3)]) dynamic_res1 = out_1.numpy() dynamic_res2 = out_2.numpy() @@ -2682,7 +2987,6 @@ class TestLayer(LayerTest): np.testing.assert_array_equal(static_res2, eager_dynamic_res2) def test_switch_case(self): - def fn_1(): return layers.fill_constant(shape=[1, 2], dtype='float32', value=1) @@ -2696,46 +3000,54 @@ class TestLayer(LayerTest): index_1 = layers.fill_constant(shape=[1], dtype='int32', value=1) index_2 = layers.fill_constant(shape=[1], dtype='int32', value=2) - out_1 = layers.switch_case(branch_index=index_1, - branch_fns={ - 1: fn_1, - 2: fn_2 - }, - default=fn_3) - out_2 = layers.switch_case(branch_index=index_2, - branch_fns=[(1, fn_1), (2, fn_2)], - default=fn_3) - out_3 = layers.switch_case(branch_index=index_2, - branch_fns=[(0, fn_1), (4, fn_2), - (7, fn_3)]) - - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + out_1 = layers.switch_case( + branch_index=index_1, + branch_fns={1: fn_1, 2: fn_2}, + default=fn_3, + ) + out_2 = layers.switch_case( + branch_index=index_2, + branch_fns=[(1, fn_1), (2, fn_2)], + default=fn_3, + ) + out_3 = layers.switch_case( + branch_index=index_2, + branch_fns=[(0, fn_1), (4, fn_2), (7, fn_3)], + ) + + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) static_res1, static_res2, static_res3 = exe.run( - fetch_list=[out_1, out_2, out_3]) + fetch_list=[out_1, out_2, out_3] + ) with self.dynamic_graph(): with _test_eager_guard(): - index_1 = layers.fill_constant(shape=[1], - dtype='int32', - value=1) - index_2 = layers.fill_constant(shape=[1], - dtype='int32', - value=2) - - out_1 = layers.switch_case(branch_index=index_1, - branch_fns={ - 1: fn_1, - 2: fn_2 - }, - default=fn_3) - out_2 = layers.switch_case(branch_index=index_2, - branch_fns=[(1, fn_1), (2, fn_2)], - default=fn_3) - out_3 = layers.switch_case(branch_index=index_2, - branch_fns=[(0, fn_1), (4, fn_2), - (7, fn_3)]) + index_1 = layers.fill_constant( + shape=[1], dtype='int32', value=1 + ) + index_2 = layers.fill_constant( + shape=[1], dtype='int32', value=2 + ) + + out_1 = layers.switch_case( + branch_index=index_1, + branch_fns={1: fn_1, 2: fn_2}, + default=fn_3, + ) + out_2 = layers.switch_case( + branch_index=index_2, + branch_fns=[(1, fn_1), (2, fn_2)], + default=fn_3, + ) + out_3 = layers.switch_case( + branch_index=index_2, + branch_fns=[(0, fn_1), (4, fn_2), (7, fn_3)], + ) eager_dynamic_res1 = out_1.numpy() eager_dynamic_res2 = out_2.numpy() @@ -2744,18 +3056,20 @@ class TestLayer(LayerTest): index_1 = layers.fill_constant(shape=[1], dtype='int32', value=1) index_2 = layers.fill_constant(shape=[1], dtype='int32', value=2) - out_1 = layers.switch_case(branch_index=index_1, - branch_fns={ - 1: fn_1, - 2: fn_2 - }, - default=fn_3) - out_2 = layers.switch_case(branch_index=index_2, - branch_fns=[(1, fn_1), (2, fn_2)], - default=fn_3) - out_3 = layers.switch_case(branch_index=index_2, - branch_fns=[(0, fn_1), (4, fn_2), - (7, fn_3)]) + out_1 = layers.switch_case( + branch_index=index_1, + branch_fns={1: fn_1, 2: fn_2}, + default=fn_3, + ) + out_2 = layers.switch_case( + branch_index=index_2, + branch_fns=[(1, fn_1), (2, fn_2)], + default=fn_3, + ) + out_3 = layers.switch_case( + branch_index=index_2, + branch_fns=[(0, fn_1), (4, fn_2), (7, fn_3)], + ) dynamic_res1 = out_1.numpy() dynamic_res2 = out_2.numpy() @@ -2772,32 +3086,32 @@ class TestLayer(LayerTest): with self.static_graph(): x = fluid.layers.data(name="x1", shape=[6, 5, 8]) - dim1 = fluid.layers.data(name="dim1", - shape=[1], - append_batch_size=False) - dim2 = fluid.layers.data(name="dim2", - shape=[1], - append_batch_size=False) + dim1 = fluid.layers.data( + name="dim1", shape=[1], append_batch_size=False + ) + dim2 = fluid.layers.data( + name="dim2", shape=[1], append_batch_size=False + ) crop_shape1 = (1, 2, 4, 4) - crop_shape2 = fluid.layers.data(name="crop_shape", - shape=[4], - append_batch_size=False) + crop_shape2 = fluid.layers.data( + name="crop_shape", shape=[4], append_batch_size=False + ) crop_shape3 = [-1, dim1, dim2, 4] crop_offsets1 = [0, 0, 1, 0] - crop_offsets2 = fluid.layers.data(name="crop_offset", - shape=[4], - append_batch_size=False) + crop_offsets2 = fluid.layers.data( + name="crop_offset", shape=[4], append_batch_size=False + ) crop_offsets3 = [0, dim1, dim2, 0] - out1 = fluid.layers.crop_tensor(x, - shape=crop_shape1, - offsets=crop_offsets1) - out2 = fluid.layers.crop_tensor(x, - shape=crop_shape2, - offsets=crop_offsets2) - out3 = fluid.layers.crop_tensor(x, - shape=crop_shape3, - offsets=crop_offsets3) + out1 = fluid.layers.crop_tensor( + x, shape=crop_shape1, offsets=crop_offsets1 + ) + out2 = fluid.layers.crop_tensor( + x, shape=crop_shape2, offsets=crop_offsets2 + ) + out3 = fluid.layers.crop_tensor( + x, shape=crop_shape3, offsets=crop_offsets3 + ) self.assertIsNotNone(out1) self.assertIsNotNone(out2) @@ -2806,10 +3120,9 @@ class TestLayer(LayerTest): def test_shard_index(self): with self.static_graph(): x = fluid.layers.data(name="label", shape=[4, 1], dtype='int64') - shard_label = fluid.layers.shard_index(input=x, - index_num=20, - nshards=2, - shard_id=0) + shard_label = fluid.layers.shard_index( + input=x, index_num=20, nshards=2, shard_id=0 + ) self.assertIsNotNone(shard_label) @@ -2828,11 +3141,9 @@ class TestLayer(LayerTest): exe.run(fluid.default_startup_program()) # x = np.random.rand(3, 32, 32).astype("float32") # y = np.array([[1], [0], [1]]) - static_out = exe.run(feed={ - "input": x, - "label": y - }, - fetch_list=result[0]) + static_out = exe.run( + feed={"input": x, "label": y}, fetch_list=result[0] + ) with self.dynamic_graph(force_to_use_cpu=True): data = base.to_variable(x) @@ -2845,15 +3156,19 @@ class TestLayer(LayerTest): class TestBook(LayerTest): - def setUp(self): self.only_static_set = set({"make_word_embedding"}) - self.not_compare_static_dygraph_set = set({ - "make_gaussian_random", "make_gaussian_random_batch_size_like", - "make_kldiv_loss", "make_prelu", - "make_sampled_softmax_with_cross_entropy", "make_sampling_id", - "make_uniform_random_batch_size_like" - }) + self.not_compare_static_dygraph_set = set( + { + "make_gaussian_random", + "make_gaussian_random_batch_size_like", + "make_kldiv_loss", + "make_prelu", + "make_sampled_softmax_with_cross_entropy", + "make_sampling_id", + "make_uniform_random_batch_size_like", + } + ) self.all_close_compare = set({"make_spectral_norm"}) def func_all_layers(self): @@ -2877,7 +3192,8 @@ class TestBook(LayerTest): static_result = self.get_static_graph_result( feed=self._feed_dict, fetch_list=fetch_list, - force_to_use_cpu=self._force_to_use_cpu) + force_to_use_cpu=self._force_to_use_cpu, + ) else: assert method.__name__ in ('make_get_places') @@ -2898,7 +3214,9 @@ class TestBook(LayerTest): rtol=1e-05, atol=0, err_msg='Result of function [{}] compare failed'.format( - method.__name__)) + method.__name__ + ), + ) continue if method.__name__ not in self.not_compare_static_dygraph_set: @@ -2906,7 +3224,9 @@ class TestBook(LayerTest): static_result[0], dy_result_value, err_msg='Result of function [{}] not equal'.format( - method.__name__)) + method.__name__ + ), + ) def test_all_layers(self): with _test_eager_guard(): @@ -2922,95 +3242,111 @@ class TestBook(LayerTest): elif dtype == 'float64': return np.random.random(shape).astype(dtype) elif dtype == 'int32': - return np.random.randint(self._low_data_bound, - self._high_data_bound, shape).astype(dtype) + return np.random.randint( + self._low_data_bound, self._high_data_bound, shape + ).astype(dtype) elif dtype == 'int64': - return np.random.randint(self._low_data_bound, - self._high_data_bound, shape).astype(dtype) - - def _get_data(self, - name, - shape, - dtype, - set_feed_dict=True, - append_batch_size=True): + return np.random.randint( + self._low_data_bound, self._high_data_bound, shape + ).astype(dtype) + + def _get_data( + self, name, shape, dtype, set_feed_dict=True, append_batch_size=True + ): if base.enabled(): - return base.to_variable(value=self._get_np_data( - shape, dtype, append_batch_size), - name=name, - zero_copy=False) + return base.to_variable( + value=self._get_np_data(shape, dtype, append_batch_size), + name=name, + zero_copy=False, + ) else: if set_feed_dict: self._feed_dict[name] = self._get_np_data( - shape, dtype, append_batch_size) - return layers.data(name=name, - shape=shape, - dtype=dtype, - append_batch_size=append_batch_size) + shape, dtype, append_batch_size + ) + return layers.data( + name=name, + shape=shape, + dtype=dtype, + append_batch_size=append_batch_size, + ) def make_sampled_softmax_with_cross_entropy(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): logits = self._get_data(name='Logits', shape=[256], dtype='float32') label = self._get_data(name='Label', shape=[1], dtype='int64') num_samples = 25 output = layers.sampled_softmax_with_cross_entropy( - logits, label, num_samples) - return (output) + logits, label, num_samples + ) + return output def make_fit_a_line(self): - with program_guard(fluid.default_main_program(), - startup_program=fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), + startup_program=fluid.default_startup_program(), + ): x = self._get_data(name='x', shape=[13], dtype='float32') y_predict = layers.fc(input=x, size=1, act=None) y = self._get_data(name='y', shape=[1], dtype='float32') cost = layers.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) - return (avg_cost) + return avg_cost def make_recognize_digits_mlp(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): # Change g_program, so the rest layers use `g_program` images = self._get_data(name='pixel', shape=[784], dtype='float32') label = self._get_data(name='label', shape=[1], dtype='int64') hidden1 = layers.fc(input=images, size=128, act='relu') hidden2 = layers.fc(input=hidden1, size=64, act='relu') - predict = layers.fc(input=[hidden2, hidden1], - size=10, - act='softmax', - param_attr=["sftmax.w1", "sftmax.w2"]) + predict = layers.fc( + input=[hidden2, hidden1], + size=10, + act='softmax', + param_attr=["sftmax.w1", "sftmax.w2"], + ) cost = layers.cross_entropy(input=predict, label=label) avg_cost = paddle.mean(cost) - return (avg_cost) + return avg_cost def make_conv2d_transpose(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): img = self._get_data(name='pixel', shape=[3, 2, 2], dtype='float32') - return layers.conv2d_transpose(input=img, - num_filters=10, - output_size=28) + return layers.conv2d_transpose( + input=img, num_filters=10, output_size=28 + ) def make_recognize_digits_conv(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): - images = self._get_data(name='pixel', - shape=[1, 28, 28], - dtype='float32') + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): + images = self._get_data( + name='pixel', shape=[1, 28, 28], dtype='float32' + ) label = self._get_data(name='label', shape=[1], dtype='int64') - conv_pool_1 = nets.simple_img_conv_pool(input=images, - filter_size=5, - num_filters=2, - pool_size=2, - pool_stride=2, - act="relu") - conv_pool_2 = nets.simple_img_conv_pool(input=conv_pool_1, - filter_size=5, - num_filters=4, - pool_size=2, - pool_stride=2, - act="relu") + conv_pool_1 = nets.simple_img_conv_pool( + input=images, + filter_size=5, + num_filters=2, + pool_size=2, + pool_stride=2, + act="relu", + ) + conv_pool_2 = nets.simple_img_conv_pool( + input=conv_pool_1, + filter_size=5, + num_filters=4, + pool_size=2, + pool_stride=2, + act="relu", + ) predict = layers.fc(input=conv_pool_2, size=10, act="softmax") cost = layers.cross_entropy(input=predict, label=label) @@ -3018,185 +3354,212 @@ class TestBook(LayerTest): return avg_cost def make_word_embedding(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): dict_size = 10000 embed_size = 32 first_word = self._get_data(name='firstw', shape=[1], dtype='int64') - second_word = self._get_data(name='secondw', - shape=[1], - dtype='int64') + second_word = self._get_data( + name='secondw', shape=[1], dtype='int64' + ) third_word = self._get_data(name='thirdw', shape=[1], dtype='int64') forth_word = self._get_data(name='forthw', shape=[1], dtype='int64') next_word = self._get_data(name='nextw', shape=[1], dtype='int64') - embed_first = layers.embedding(input=first_word, - size=[dict_size, embed_size], - dtype='float32', - param_attr='shared_w') - embed_second = layers.embedding(input=second_word, - size=[dict_size, embed_size], - dtype='float32', - param_attr='shared_w') - - embed_third = layers.embedding(input=third_word, - size=[dict_size, embed_size], - dtype='float32', - param_attr='shared_w') - embed_forth = layers.embedding(input=forth_word, - size=[dict_size, embed_size], - dtype='float32', - param_attr='shared_w') + embed_first = layers.embedding( + input=first_word, + size=[dict_size, embed_size], + dtype='float32', + param_attr='shared_w', + ) + embed_second = layers.embedding( + input=second_word, + size=[dict_size, embed_size], + dtype='float32', + param_attr='shared_w', + ) + + embed_third = layers.embedding( + input=third_word, + size=[dict_size, embed_size], + dtype='float32', + param_attr='shared_w', + ) + embed_forth = layers.embedding( + input=forth_word, + size=[dict_size, embed_size], + dtype='float32', + param_attr='shared_w', + ) concat_embed = layers.concat( input=[embed_first, embed_second, embed_third, embed_forth], - axis=1) + axis=1, + ) hidden1 = layers.fc(input=concat_embed, size=256, act='sigmoid') - predict_word = layers.fc(input=hidden1, - size=dict_size, - act='softmax') + predict_word = layers.fc( + input=hidden1, size=dict_size, act='softmax' + ) cost = layers.cross_entropy(input=predict_word, label=next_word) avg_cost = paddle.mean(cost) - return (avg_cost) + return avg_cost def make_sigmoid_cross_entropy(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): dat = self._get_data(name='data', shape=[10], dtype='float32') lbl = self._get_data(name='label', shape=[10], dtype='float32') ignore_index = -1 - return (layers.sigmoid_cross_entropy_with_logits( - x=dat, label=lbl, ignore_index=ignore_index)) + return layers.sigmoid_cross_entropy_with_logits( + x=dat, label=lbl, ignore_index=ignore_index + ) def make_hsigmoid(self): self._force_to_use_cpu = True with fluid.framework._dygraph_place_guard(place=fluid.CPUPlace()): x = self._get_data(name='x', shape=[2], dtype='float32') y = self._get_data(name='y', shape=[2], dtype='int64') - return (layers.hsigmoid(input=x, label=y, num_classes=2)) + return layers.hsigmoid(input=x, label=y, num_classes=2) # test hsigmod with custom tree structure program2 = Program() with program_guard(program2): x2 = self._get_data(name='x2', shape=[4, 8], dtype='float32') y2 = self._get_data(name='y2', shape=[4], dtype='int64') - path_table = self._get_data(name='path_table', - shape=[4, 6], - dtype='int64') - path_code = self._get_data(name='path_code', - shape=[4, 6], - dtype='int64') - return (layers.hsigmoid(input=x2, - label=y2, - num_classes=6, - path_table=path_table, - path_code=path_code, - is_custom=True)) + path_table = self._get_data( + name='path_table', shape=[4, 6], dtype='int64' + ) + path_code = self._get_data( + name='path_code', shape=[4, 6], dtype='int64' + ) + return layers.hsigmoid( + input=x2, + label=y2, + num_classes=6, + path_table=path_table, + path_code=path_code, + is_custom=True, + ) def make_pool2d(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): x = self._get_data(name='x', shape=[3, 224, 224], dtype='float32') - return (layers.pool2d(x, - pool_size=[5, 3], - pool_stride=[1, 2], - pool_padding=(2, 1))) + return layers.pool2d( + x, pool_size=[5, 3], pool_stride=[1, 2], pool_padding=(2, 1) + ) def make_pool2d_infershape(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): theta = self._get_data("theta", shape=[2, 3], dtype='float32') x = fluid.layers.affine_grid(theta, out_shape=[2, 3, 244, 244]) - return (layers.pool2d(x, - pool_size=[5, 3], - pool_stride=[1, 2], - pool_padding=(2, 1))) + return layers.pool2d( + x, pool_size=[5, 3], pool_stride=[1, 2], pool_padding=(2, 1) + ) def make_pool3d(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): - x = self._get_data(name='x', - shape=[3, 244, 244, 244], - dtype='float32') - return (layers.pool3d(x, - pool_size=[5, 3, 2], - pool_stride=[1, 2, 3], - pool_padding=(2, 1, 1))) + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): + x = self._get_data( + name='x', shape=[3, 244, 244, 244], dtype='float32' + ) + return layers.pool3d( + x, + pool_size=[5, 3, 2], + pool_stride=[1, 2, 3], + pool_padding=(2, 1, 1), + ) def make_adaptive_pool2d(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): x = self._get_data(name='x', shape=[3, 224, 224], dtype='float32') - return (layers.adaptive_pool2d(x, [3, 3], pool_type='avg')) + return layers.adaptive_pool2d(x, [3, 3], pool_type='avg') pool, mask = layers.adaptive_pool2d(x, [3, 3], require_index=True) - return (pool) - return (mask) - return (layers.adaptive_pool2d(x, 3, pool_type='avg')) + return pool + return mask + return layers.adaptive_pool2d(x, 3, pool_type='avg') pool, mask = layers.adaptive_pool2d(x, 3, require_index=True) - return (pool) - return (mask) + return pool + return mask def make_adaptive_pool3d(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): - x = self._get_data(name='x', - shape=[3, 244, 224, 224], - dtype='float32') - return (layers.adaptive_pool3d(x, [3, 3, 3], pool_type='avg')) - pool, mask = layers.adaptive_pool3d(x, [3, 3, 3], - require_index=True) - return (pool) - return (mask) - return (layers.adaptive_pool3d(x, 3, pool_type='avg')) + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): + x = self._get_data( + name='x', shape=[3, 244, 224, 224], dtype='float32' + ) + return layers.adaptive_pool3d(x, [3, 3, 3], pool_type='avg') + pool, mask = layers.adaptive_pool3d( + x, [3, 3, 3], require_index=True + ) + return pool + return mask + return layers.adaptive_pool3d(x, 3, pool_type='avg') pool, mask = layers.adaptive_pool3d(x, 3, require_index=True) - return (pool) - return (mask) + return pool + return mask def make_lstm_unit(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): - x_t_data = self._get_data(name='x_t_data', - shape=[10, 10], - dtype='float32') + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): + x_t_data = self._get_data( + name='x_t_data', shape=[10, 10], dtype='float32' + ) x_t = layers.fc(input=x_t_data, size=10) - prev_hidden_data = self._get_data(name='prev_hidden_data', - shape=[10, 30], - dtype='float32') + prev_hidden_data = self._get_data( + name='prev_hidden_data', shape=[10, 30], dtype='float32' + ) prev_hidden = layers.fc(input=prev_hidden_data, size=30) - prev_cell_data = self._get_data(name='prev_cell', - shape=[10, 30], - dtype='float32') + prev_cell_data = self._get_data( + name='prev_cell', shape=[10, 30], dtype='float32' + ) prev_cell = layers.fc(input=prev_cell_data, size=30) - return (layers.lstm_unit(x_t=x_t, - hidden_t_prev=prev_hidden, - cell_t_prev=prev_cell)) + return layers.lstm_unit( + x_t=x_t, hidden_t_prev=prev_hidden, cell_t_prev=prev_cell + ) def make_softmax(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): data = self._get_data(name='data', shape=[10], dtype='float32') hid = layers.fc(input=data, size=20) - return (layers.softmax(hid, axis=1)) + return layers.softmax(hid, axis=1) def make_space_to_depth(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): - data = self._get_data(name='data', - shape=[32, 9, 6, 6], - append_batch_size=False, - dtype='float32') - return (layers.space_to_depth(data, 3)) + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): + data = self._get_data( + name='data', + shape=[32, 9, 6, 6], + append_batch_size=False, + dtype='float32', + ) + return layers.space_to_depth(data, 3) def make_lrn(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): data = self._get_data(name='data', shape=[6, 2, 2], dtype='float32') - return (layers.lrn(data)) + return layers.lrn(data) def make_get_places(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): get_places(device_count=1) @prog_scope() @@ -3205,9 +3568,10 @@ class TestBook(LayerTest): words = [] for i in range(window_size): words.append( - self._get_data(name='word_{0}'.format(i), - shape=[1], - dtype='int64')) + self._get_data( + name='word_{0}'.format(i), shape=[1], dtype='int64' + ) + ) dict_size = 10000 label_word = int(window_size // 2) + 1 @@ -3217,38 +3581,45 @@ class TestBook(LayerTest): if i == label_word: continue - emb = layers.embedding(input=words[i], - size=[dict_size, 32], - param_attr='emb.w', - is_sparse=True) + emb = layers.embedding( + input=words[i], + size=[dict_size, 32], + param_attr='emb.w', + is_sparse=True, + ) embs.append(emb) embs = layers.concat(input=embs, axis=1) - loss = layers.nce(input=embs, - label=words[label_word], - num_total_classes=dict_size, - param_attr='nce.w', - bias_attr='nce.b') + loss = layers.nce( + input=embs, + label=words[label_word], + num_total_classes=dict_size, + param_attr='nce.w', + bias_attr='nce.b', + ) avg_loss = paddle.mean(loss) - return (avg_loss) + return avg_loss def make_multiplex(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): x1 = self._get_data(name='x1', shape=[4], dtype='float32') x2 = self._get_data(name='x2', shape=[4], dtype='float32') index = self._get_data(name='index', shape=[1], dtype='int32') out = layers.multiplex(inputs=[x1, x2], index=index) - return (out) + return out def make_softmax_with_cross_entropy(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): x = self._get_data(name='x', shape=[16], dtype='float32') y = self._get_data(name='label', shape=[1], dtype='int64') loss, softmax = layers.softmax_with_cross_entropy( - x, y, return_softmax=True) + x, y, return_softmax=True + ) self.assertIsNotNone(loss) self.assertIsNotNone(softmax) @@ -3267,39 +3638,41 @@ class TestBook(LayerTest): self.assertIsNotNone(loss2) self.assertIsNotNone(loss3) self.assertIsNotNone(loss4) - return (loss4) + return loss4 def make_smooth_l1(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): x = self._get_data(name='x', shape=[4], dtype='float32') y = self._get_data(name='label', shape=[4], dtype='float32') loss = layers.smooth_l1(x, y) - return (loss) + return loss def make_scatter(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): - x = self._get_data(name='x', - shape=[3, 3], - append_batch_size=False, - dtype='float32') - idx = self._get_data(name='idx', - shape=[2], - append_batch_size=False, - dtype='int32') - updates = self._get_data(name='updates', - shape=[2, 3], - append_batch_size=False, - dtype='float32') + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): + x = self._get_data( + name='x', shape=[3, 3], append_batch_size=False, dtype='float32' + ) + idx = self._get_data( + name='idx', shape=[2], append_batch_size=False, dtype='int32' + ) + updates = self._get_data( + name='updates', + shape=[2, 3], + append_batch_size=False, + dtype='float32', + ) out = layers.scatter(input=x, index=idx, updates=updates) - return (out) + return out def make_one_hot(self): with fluid.framework._dygraph_place_guard(place=fluid.CPUPlace()): label = self._get_data(name="label", shape=[1], dtype="int32") one_hot_label = layers.one_hot(input=label, depth=10) - return (one_hot_label) + return one_hot_label def make_label_smooth(self): # TODO(minqiyang): support gpu ut @@ -3307,321 +3680,370 @@ class TestBook(LayerTest): with fluid.framework._dygraph_place_guard(place=fluid.CPUPlace()): label = self._get_data(name="label", shape=[1], dtype="int32") one_hot_label = layers.one_hot(input=label, depth=10) - smooth_label = layers.label_smooth(label=one_hot_label, - epsilon=0.1, - dtype="int32") - return (smooth_label) + smooth_label = layers.label_smooth( + label=one_hot_label, epsilon=0.1, dtype="int32" + ) + return smooth_label def make_topk(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): data = self._get_data(name="label", shape=[200], dtype="float32") values, indices = layers.topk(data, k=5) - return (values) - return (indices) + return values + return indices def make_resize_bilinear(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): x = self._get_data(name='x', shape=[3, 9, 6], dtype="float32") output = layers.resize_bilinear(x, out_shape=[12, 12]) - return (output) + return output def make_resize_bilinear_by_scale(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): x = self._get_data(name='x', shape=[3, 9, 6], dtype="float32") output = layers.resize_bilinear(x, scale=1.5) - return (output) + return output def make_resize_nearest(self): try: - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): x = self._get_data(name='x1', shape=[3, 9, 6], dtype="float32") output = layers.resize_nearest(x, out_shape=[12, 12]) except ValueError: pass try: - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): - x = self._get_data(name='x2', - shape=[3, 9, 6, 7], - dtype="float32") + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): + x = self._get_data( + name='x2', shape=[3, 9, 6, 7], dtype="float32" + ) output = layers.resize_nearest(x, out_shape=[12, 12, 12]) except ValueError: pass - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): x = self._get_data(name='x', shape=[3, 9, 6], dtype="float32") output = layers.resize_nearest(x, out_shape=[12, 12]) - return (output) + return output def make_resize_nearest_by_scale(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): x = self._get_data(name='x1', shape=[3, 9, 6], dtype="float32") output = layers.resize_nearest(x, scale=1.8) - return (output) + return output def make_resize_trilinear(self): try: - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): x = self._get_data(name='x2', shape=[3, 9, 6], dtype="float32") output = layers.resize_trilinear(x, out_shape=[12, 12, 12]) except ValueError: pass try: - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): - x = self._get_data(name='x', - shape=[3, 9, 6, 7], - dtype="float32") + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): + x = self._get_data( + name='x', shape=[3, 9, 6, 7], dtype="float32" + ) output = layers.resize_trilinear(x, out_shape=[12, 12]) except ValueError: pass - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): x = self._get_data(name='x', shape=[3, 9, 6, 7], dtype="float32") output = layers.resize_trilinear(x, out_shape=[12, 12, 12]) - return (output) + return output def make_resize_trilinear_by_scale(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): x = self._get_data(name='x', shape=[3, 9, 6, 7], dtype="float32") output = layers.resize_trilinear(x, scale=2.1) - return (output) + return output def make_polygon_box_transform(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): x = self._get_data(name='x', shape=[8, 4, 4], dtype="float32") output = layers.polygon_box_transform(input=x) - return (output) + return output def make_l2_normalize(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): x = self._get_data(name='x', shape=[8, 7, 10], dtype="float32") output = layers.l2_normalize(x, axis=1) return output def make_crop(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): x = self._get_data(name='x', shape=[3, 5], dtype="float32") y = self._get_data(name='y', shape=[2, 3], dtype="float32") output = layers.crop(x, shape=y) - return (output) + return output def make_mean_iou(self): with fluid.framework._dygraph_place_guard(place=fluid.CPUPlace()): x = self._get_data(name='x', shape=[16], dtype='int32') y = self._get_data(name='label', shape=[16], dtype='int32') iou = layers.mean_iou(x, y, self._high_data_bound) - return (iou) + return iou def make_argsort(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): data = self._get_data(name='x', shape=[2, 3, 3], dtype="float32") out, ids = layers.argsort(input=data, axis=1) - return (out) - return (ids) + return out + return ids def make_rank_loss(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): - label = self._get_data(name='label', - append_batch_size=False, - shape=[16, 1], - dtype="float32") - left = self._get_data(name='left', - append_batch_size=False, - shape=[16, 1], - dtype="float32") - right = self._get_data(name='right', - append_batch_size=False, - shape=[16, 1], - dtype="float32") + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): + label = self._get_data( + name='label', + append_batch_size=False, + shape=[16, 1], + dtype="float32", + ) + left = self._get_data( + name='left', + append_batch_size=False, + shape=[16, 1], + dtype="float32", + ) + right = self._get_data( + name='right', + append_batch_size=False, + shape=[16, 1], + dtype="float32", + ) out = layers.rank_loss(label, left, right, name="rank_loss") - return (out) + return out def make_shape(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): - input = self._get_data(name="input", - shape=[3, 100, 100], - dtype="float32") + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): + input = self._get_data( + name="input", shape=[3, 100, 100], dtype="float32" + ) out = layers.shape(input) - return (out) + return out def make_pad2d(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): - input = self._get_data(name="input", - shape=[3, 100, 100], - dtype="float32") + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): + input = self._get_data( + name="input", shape=[3, 100, 100], dtype="float32" + ) paddings = layers.fill_constant(shape=[4], dtype='int32', value=1) - out = layers.pad2d(input, - paddings=[1, 2, 3, 4], - mode='reflect', - data_format='NCHW', - name="shape") - out_1 = layers.pad2d(input, - paddings=paddings, - mode='reflect', - data_format='NCHW', - name="shape") - return (out) - return (out_1) + out = layers.pad2d( + input, + paddings=[1, 2, 3, 4], + mode='reflect', + data_format='NCHW', + name="shape", + ) + out_1 = layers.pad2d( + input, + paddings=paddings, + mode='reflect', + data_format='NCHW', + name="shape", + ) + return out + return out_1 def make_prelu(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): - input = self._get_data(name="input", - shape=[5, 200, 100, 100], - dtype="float32") + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): + input = self._get_data( + name="input", shape=[5, 200, 100, 100], dtype="float32" + ) mode = 'channel' - out = layers.prelu(input, - mode, - param_attr=ParamAttr(initializer=Constant(1.0)), - name='prelu') - return (out) + out = layers.prelu( + input, + mode, + param_attr=ParamAttr(initializer=Constant(1.0)), + name='prelu', + ) + return out def make_soft_relu(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): input = self._get_data(name="input", shape=[16], dtype="float32") out = layers.soft_relu(input, threshold=30.0, name='soft_relu') - return (out) + return out def make_sigmoid(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): input = self._get_data(name="input", shape=[16], dtype="float32") out = layers.sigmoid(input, name='sigmoid') - return (out) + return out def make_exp(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): input = self._get_data(name="input", shape=[16], dtype="float32") out = layers.exp(input, name='exp') - return (out) + return out def make_tanh(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): input = self._get_data(name="input", shape=[16], dtype="float32") out = layers.tanh(input, name='tanh') - return (out) + return out def make_tanh_shrink(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): input = self._get_data(name="input", shape=[16], dtype="float32") out = layers.tanh_shrink(input, name='tanh_shrink') - return (out) + return out def make_sqrt(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): input = self._get_data(name="input", shape=[16], dtype="float32") out = layers.sqrt(input, name='sqrt') - return (out) + return out def make_abs(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): input = self._get_data(name="input", shape=[16], dtype="float32") out = layers.abs(input, name='abs') - return (out) + return out def make_ceil(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): input = self._get_data(name="input", shape=[16], dtype="float32") out = layers.ceil(input, name='ceil') - return (out) + return out def make_floor(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): input = self._get_data(name="input", shape=[16], dtype="float32") out = layers.floor(input, name='floor') - return (out) + return out def make_cos(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): input = self._get_data(name="input", shape=[16], dtype="float32") out = layers.cos(input, name='cos') - return (out) + return out def make_sin(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): input = self._get_data(name="input", shape=[16], dtype="float32") out = layers.sin(input, name='sin') - return (out) + return out def make_round(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): input = self._get_data(name="input", shape=[16], dtype="float32") out = layers.round(input, name='round') - return (out) + return out def make_reciprocal(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): input = self._get_data(name="input", shape=[16], dtype="float32") out = layers.reciprocal(input, name='reciprocal') - return (out) + return out def make_square(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): input = self._get_data(name="input", shape=[16], dtype="float32") out = layers.square(input, name='square') - return (out) + return out def make_softplus(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): input = self._get_data(name="input", shape=[16], dtype="float32") out = layers.softplus(input, name='softplus') - return (out) + return out def make_softsign(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): input = self._get_data(name="input", shape=[16], dtype="float32") out = layers.softsign(input, name='softsign') - return (out) + return out def make_mish(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): input = self._get_data(name="input", shape=[16], dtype="float32") out = layers.mish(input, name='mish') - return (out) + return out def make_cross_entropy(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): x = self._get_data(name="x", shape=[30, 10], dtype="float32") label = self._get_data(name="label", shape=[30, 1], dtype="int64") mode = 'channel' out = layers.cross_entropy(x, label, False, 4) - return (out) + return out def make_bpr_loss(self): self._force_to_use_cpu = True @@ -3629,173 +4051,196 @@ class TestBook(LayerTest): x = self._get_data(name="x", shape=[30, 10], dtype="float32") label = self._get_data(name="label", shape=[30, 1], dtype="int64") out = layers.bpr_loss(x, label) - return (out) + return out def make_expand(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): x = self._get_data(name="input", shape=[10], dtype='int32') out = layers.expand(x, [1, 2]) return out def make_uniform_random_batch_size_like(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): - input = self._get_data(name="input", - shape=[13, 11], - dtype='float32') + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): + input = self._get_data( + name="input", shape=[13, 11], dtype='float32' + ) out = layers.uniform_random_batch_size_like(input, [-1, 11]) - return (out) + return out def make_gaussian_random(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): out = layers.gaussian_random(shape=[20, 30]) - return (out) + return out def make_sampling_id(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): - x = self._get_data(name="X", - shape=[13, 11], - dtype='float32', - append_batch_size=False) + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): + x = self._get_data( + name="X", + shape=[13, 11], + dtype='float32', + append_batch_size=False, + ) out = layers.sampling_id(x) - return (out) + return out def make_gaussian_random_batch_size_like(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): - input = self._get_data(name="input", - shape=[13, 11], - dtype='float32') - - out = layers.gaussian_random_batch_size_like(input, - shape=[-1, 11], - mean=1.0, - std=2.0) - return (out) + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): + input = self._get_data( + name="input", shape=[13, 11], dtype='float32' + ) + + out = layers.gaussian_random_batch_size_like( + input, shape=[-1, 11], mean=1.0, std=2.0 + ) + return out def make_sum(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): - input = self._get_data(name="input", - shape=[13, 11], - dtype='float32') + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): + input = self._get_data( + name="input", shape=[13, 11], dtype='float32' + ) out = layers.sum(input) - return (out) + return out def make_slice(self): starts = [1, 0, 2] ends = [3, 3, 4] axes = [0, 1, 2] - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): - input = self._get_data(name="input", - shape=[3, 4, 5, 6], - dtype='float32') + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): + input = self._get_data( + name="input", shape=[3, 4, 5, 6], dtype='float32' + ) out = layers.slice(input, axes=axes, starts=starts, ends=ends) return out def make_scale_variable(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): - input = self._get_data(name="input", - shape=[3, 4, 5, 6], - dtype='float32') - scale_var = self._get_data(name="scale", - shape=[1], - dtype='float32', - append_batch_size=False) + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): + input = self._get_data( + name="input", shape=[3, 4, 5, 6], dtype='float32' + ) + scale_var = self._get_data( + name="scale", + shape=[1], + dtype='float32', + append_batch_size=False, + ) out = layers.scale(input, scale=scale_var) return out def make_softshrink(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): input = self._get_data(name="input", shape=[16], dtype="float32") out = layers.softshrink(input, alpha=0.3) - return (out) + return out def make_iou_similarity(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): x = self._get_data(name="x", shape=[4], dtype="float32") y = self._get_data(name="y", shape=[4], dtype="float32") out = layers.iou_similarity(x, y, name='iou_similarity') - return (out) + return out def make_grid_sampler(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): x = self._get_data(name='x', shape=[3, 5, 7], dtype='float32') grid = self._get_data(name='grid', shape=[5, 7, 2], dtype='float32') out = layers.grid_sampler(x, grid) - return (out) + return out def make_bilinear_tensor_product_layer(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): data = self._get_data(name='data', shape=[4], dtype="float32") theta = self._get_data(name="theta", shape=[5], dtype="float32") out = layers.bilinear_tensor_product(data, theta, 6) - return (out) + return out def make_batch_norm(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): - data = self._get_data(name='data', - shape=[32, 128, 128], - dtype="float32") + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): + data = self._get_data( + name='data', shape=[32, 128, 128], dtype="float32" + ) out = layers.batch_norm(data) - return (out) + return out def make_batch_norm_momentum_variable(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): - data = self._get_data(name='data', - shape=[32, 128, 128], - dtype="float32") - momentum = self._get_data(name='momentum', - shape=[1], - dtype='float32', - append_batch_size=False) + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): + data = self._get_data( + name='data', shape=[32, 128, 128], dtype="float32" + ) + momentum = self._get_data( + name='momentum', + shape=[1], + dtype='float32', + append_batch_size=False, + ) out = layers.batch_norm(data, momentum=momentum) - return (out) + return out def make_inplace_abn(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): - data = self._get_data(name='data', - shape=[32, 128, 128], - dtype="float32") + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): + data = self._get_data( + name='data', shape=[32, 128, 128], dtype="float32" + ) out = layers.inplace_abn(data, act='leaky_relu', act_alpha=0.2) - return (out) + return out def make_inplace_abn_momentum_variable(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): - data = self._get_data(name='data', - shape=[32, 128, 128], - dtype="float32") - momentum = self._get_data(name='momentum', - shape=[1], - dtype='float32', - append_batch_size=False) - out = layers.inplace_abn(data, - momentum=momentum, - act='elu', - act_alpha=2.0) - return (out) + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): + data = self._get_data( + name='data', shape=[32, 128, 128], dtype="float32" + ) + momentum = self._get_data( + name='momentum', + shape=[1], + dtype='float32', + append_batch_size=False, + ) + out = layers.inplace_abn( + data, momentum=momentum, act='elu', act_alpha=2.0 + ) + return out def make_range(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): layers.range(0, 10, 2, 'int32') layers.range(0.1, 10.0, 0.2, 'float32') layers.range(0.1, 10.0, 0.2, 'float64') @@ -3806,87 +4251,101 @@ class TestBook(LayerTest): return y def make_spectral_norm(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): - weight = self._get_data(name='weight', - shape=[2, 3, 32, 32], - dtype="float32", - append_batch_size=False) + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): + weight = self._get_data( + name='weight', + shape=[2, 3, 32, 32], + dtype="float32", + append_batch_size=False, + ) out = layers.spectral_norm(weight, dim=1, power_iters=1) - return (out) + return out def make_kldiv_loss(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): - x = self._get_data(name='x', - shape=[32, 128, 128], - dtype="float32", - append_batch_size=False) - target = self._get_data(name='target', - shape=[32, 128, 128], - dtype="float32", - append_batch_size=False) + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): + x = self._get_data( + name='x', + shape=[32, 128, 128], + dtype="float32", + append_batch_size=False, + ) + target = self._get_data( + name='target', + shape=[32, 128, 128], + dtype="float32", + append_batch_size=False, + ) loss = layers.kldiv_loss(x=x, target=target, reduction='batchmean') - return (loss) + return loss def make_temporal_shift(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): x = self._get_data(name="X", shape=[16, 4, 4], dtype="float32") out = layers.temporal_shift(x, seg_num=2, shift_ratio=0.2) - return (out) + return out def make_shuffle_channel(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): x = self._get_data(name="X", shape=[16, 4, 4], dtype="float32") out = layers.shuffle_channel(x, group=4) - return (out) + return out def make_fsp_matrix(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): x = self._get_data(name="X", shape=[16, 4, 4], dtype="float32") y = self._get_data(name="Y", shape=[8, 4, 4], dtype="float32") out = layers.fsp_matrix(x, y) - return (out) + return out def make_pixel_shuffle(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): x = self._get_data(name="X", shape=[9, 4, 4], dtype="float32") out = layers.pixel_shuffle(x, upscale_factor=3) - return (out) + return out def make_mse_loss(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): x = self._get_data(name="X", shape=[1], dtype="float32") y = self._get_data(name="Y", shape=[1], dtype="float32") out = layers.mse_loss(input=x, label=y) - return (out) + return out def make_square_error_cost(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): x = self._get_data(name="X", shape=[1], dtype="float32") y = self._get_data(name="Y", shape=[1], dtype="float32") out = layers.square_error_cost(input=x, label=y) - return (out) + return out def test_dynamic_lstmp(self): # TODO(minqiyang): dygraph do not support lod now with self.static_graph(): hidden_dim, proj_dim = 16, 8 - seq_data = layers.data(name='seq_data', - shape=[10, 10], - dtype='float32', - lod_level=1) + seq_data = layers.data( + name='seq_data', shape=[10, 10], dtype='float32', lod_level=1 + ) fc_out = layers.fc(input=seq_data, size=4 * hidden_dim) self.assertIsNotNone( - layers.dynamic_lstmp(input=fc_out, - size=4 * hidden_dim, - proj_size=proj_dim)) + layers.dynamic_lstmp( + input=fc_out, size=4 * hidden_dim, proj_size=proj_dim + ) + ) def test_linear_chain_crf(self): with self.static_graph(): @@ -3894,63 +4353,71 @@ class TestBook(LayerTest): feature = layers.data(name='feature', shape=[784], dtype='float32') label = layers.data(name='label', shape=[1], dtype='int64') emission = layers.fc(input=feature, size=10) - crf = layers.linear_chain_crf(input=emission, - label=label, - param_attr=ParamAttr(name="crfw")) - crf_decode = layers.crf_decoding(input=emission, - param_attr=ParamAttr(name="crfw")) + crf = layers.linear_chain_crf( + input=emission, label=label, param_attr=ParamAttr(name="crfw") + ) + crf_decode = layers.crf_decoding( + input=emission, param_attr=ParamAttr(name="crfw") + ) self.assertFalse(crf is None) self.assertFalse(crf_decode is None) - return layers.chunk_eval(input=crf_decode, - label=label, - chunk_scheme="IOB", - num_chunk_types=(label_dict_len - 1) // 2) + return layers.chunk_eval( + input=crf_decode, + label=label, + chunk_scheme="IOB", + num_chunk_types=(label_dict_len - 1) // 2, + ) def test_linear_chain_crf_padding(self): with self.static_graph(): label_dict_len, max_len = 10, 20 - feature = layers.data(name='feature', - shape=[max_len, 784], - dtype='float32') + feature = layers.data( + name='feature', shape=[max_len, 784], dtype='float32' + ) label = layers.data(name='label', shape=[max_len], dtype='int64') length = layers.data(name='length', shape=[1], dtype='int64') emission = layers.fc(input=feature, size=10, num_flatten_dims=2) - crf = layers.linear_chain_crf(input=emission, - label=label, - length=length, - param_attr=ParamAttr(name="crfw")) - crf_decode = layers.crf_decoding(input=emission, - length=length, - param_attr=ParamAttr(name="crfw")) + crf = layers.linear_chain_crf( + input=emission, + label=label, + length=length, + param_attr=ParamAttr(name="crfw"), + ) + crf_decode = layers.crf_decoding( + input=emission, length=length, param_attr=ParamAttr(name="crfw") + ) self.assertFalse(crf is None) self.assertFalse(crf_decode is None) - return layers.chunk_eval(input=crf_decode, - label=label, - seq_length=length, - chunk_scheme="IOB", - num_chunk_types=(label_dict_len - 1) // 2) + return layers.chunk_eval( + input=crf_decode, + label=label, + seq_length=length, + chunk_scheme="IOB", + num_chunk_types=(label_dict_len - 1) // 2, + ) def test_im2sequence(self): # TODO(minqiyang): dygraph do not support lod now with self.static_graph(): x = layers.data(name='x', shape=[3, 128, 128], dtype='float32') y = layers.data(name='y', shape=[], dtype='float32') - output = layers.im2sequence(input=x, - input_image_size=y, - stride=[1, 1], - filter_size=[2, 2], - out_stride=[1, 1]) - return (output) + output = layers.im2sequence( + input=x, + input_image_size=y, + stride=[1, 1], + filter_size=[2, 2], + out_stride=[1, 1], + ) + return output def test_lod_reset(self): # TODO(minqiyang): dygraph do not support lod now with self.static_graph(): # case 1 x = layers.data(name='x', shape=[10], dtype='float32') - y = layers.data(name='y', - shape=[10, 20], - dtype='float32', - lod_level=2) + y = layers.data( + name='y', shape=[10, 20], dtype='float32', lod_level=2 + ) z = layers.lod_reset(x=x, y=y) self.assertTrue(z.lod_level == 2) # case 2 @@ -3982,153 +4449,152 @@ class TestBook(LayerTest): strides = [1, 1, 1] with self.static_graph(): x = layers.data(name="x", shape=[245, 30, 30], dtype="float32") - out = layers.strided_slice(x, - axes=axes, - starts=starts, - ends=ends, - strides=strides) + out = layers.strided_slice( + x, axes=axes, starts=starts, ends=ends, strides=strides + ) return out def test_fill_constant_batch_size_like(self): with self.static_graph(): - like = fluid.layers.fill_constant(shape=[1, 200], - value=10, - dtype='int64') - out = layers.fill_constant_batch_size_like(input=like, - shape=[2, 3300], - value=1315454564656, - dtype='int64') + like = fluid.layers.fill_constant( + shape=[1, 200], value=10, dtype='int64' + ) + out = layers.fill_constant_batch_size_like( + input=like, shape=[2, 3300], value=1315454564656, dtype='int64' + ) return out def test_psroi_pool(self): # TODO(minqiyang): dygraph do not support lod now with self.static_graph(): x = layers.data(name="x", shape=[245, 30, 30], dtype="float32") - rois = layers.data(name="rois", - shape=[4], - dtype="float32", - lod_level=1) + rois = layers.data( + name="rois", shape=[4], dtype="float32", lod_level=1 + ) output = layers.psroi_pool(x, rois, 5, 0.25, 7, 7) - return (output) + return output def test_sequence_expand(self): # TODO(minqiyang): dygraph do not support lod now with self.static_graph(): x = layers.data(name='x', shape=[10], dtype='float32') - y = layers.data(name='y', - shape=[10, 20], - dtype='float32', - lod_level=2) - return (layers.sequence_expand(x=x, y=y, ref_level=1)) + y = layers.data( + name='y', shape=[10, 20], dtype='float32', lod_level=2 + ) + return layers.sequence_expand(x=x, y=y, ref_level=1) def test_sequence_reshape(self): # TODO(minqiyang): dygraph do not support lod now with self.static_graph(): x = layers.data(name='x', shape=[8], dtype='float32', lod_level=1) out = layers.sequence_reshape(input=x, new_dim=16) - return (out) + return out def test_sequence_unpad(self): # TODO(minqiyang): dygraph do not support lod now with self.static_graph(): x = layers.data(name='x', shape=[10, 5], dtype='float32') length = layers.data(name='length', shape=[], dtype='int64') - return (layers.sequence_unpad(x=x, length=length)) + return layers.sequence_unpad(x=x, length=length) def test_sequence_softmax(self): # TODO(minqiyang): dygraph do not support lod now with self.static_graph(): - seq_data = layers.data(name='seq_data', - shape=[10, 10], - dtype='float32', - lod_level=1) + seq_data = layers.data( + name='seq_data', shape=[10, 10], dtype='float32', lod_level=1 + ) seq = layers.fc(input=seq_data, size=20) - return (layers.sequence_softmax(seq)) + return layers.sequence_softmax(seq) def test_sequence_unsqueeze(self): # TODO(minqiyang): dygraph do not support lod now with self.static_graph(): x = layers.data(name='x', shape=[8, 2], dtype='float32') out = layers.unsqueeze(input=x, axes=[1]) - return (out) + return out def test_sequence_scatter(self): # TODO(minqiyang): dygraph do not support lod now with self.static_graph(): - x = layers.data(name='x', - shape=[3, 6], - append_batch_size=False, - dtype='float32') - idx = layers.data(name='idx', - shape=[12, 1], - append_batch_size=False, - dtype='int32', - lod_level=1) - updates = layers.data(name='updates', - shape=[12, 1], - append_batch_size=False, - dtype='float32', - lod_level=1) + x = layers.data( + name='x', shape=[3, 6], append_batch_size=False, dtype='float32' + ) + idx = layers.data( + name='idx', + shape=[12, 1], + append_batch_size=False, + dtype='int32', + lod_level=1, + ) + updates = layers.data( + name='updates', + shape=[12, 1], + append_batch_size=False, + dtype='float32', + lod_level=1, + ) out = layers.sequence_scatter(input=x, index=idx, updates=updates) - return (out) + return out def test_sequence_slice(self): # TODO(minqiyang): dygraph do not support lod now with self.static_graph(): import numpy as np - seqs = layers.data(name='x', - shape=[10, 5], - dtype='float32', - lod_level=1) + + seqs = layers.data( + name='x', shape=[10, 5], dtype='float32', lod_level=1 + ) offset = layers.assign(input=np.array([[0, 1]]).astype('int32')) length = layers.assign(input=np.array([[2, 1]]).astype('int32')) - out = layers.sequence_slice(input=seqs, - offset=offset, - length=length) - return (out) + out = layers.sequence_slice( + input=seqs, offset=offset, length=length + ) + return out def test_filter_by_instag(self): # TODO(minqiyang): dygraph do not support lod now with self.static_graph(): - x1 = layers.data(name='Ins', - shape=[32, 1], - dtype='float32', - lod_level=0) - x2 = layers.data(name='Ins_tag', - shape=[32, 1], - dtype='int64', - lod_level=0, - stop_gradient=True) - x3 = layers.create_global_var(shape=[1, 1], - value=20, - dtype='int64', - persistable=True, - force_cpu=True, - name='Filter_tag') + x1 = layers.data( + name='Ins', shape=[32, 1], dtype='float32', lod_level=0 + ) + x2 = layers.data( + name='Ins_tag', + shape=[32, 1], + dtype='int64', + lod_level=0, + stop_gradient=True, + ) + x3 = layers.create_global_var( + shape=[1, 1], + value=20, + dtype='int64', + persistable=True, + force_cpu=True, + name='Filter_tag', + ) out1, out2 = layers.filter_by_instag(x1, x2, x3, is_lod=True) def test_shuffle_batch(self): # TODO(minqiyang): dygraph do not support lod now with self.static_graph(): - x = layers.data(name='X', - shape=[4, 50], - dtype='float32', - lod_level=0) + x = layers.data( + name='X', shape=[4, 50], dtype='float32', lod_level=0 + ) out1 = fluid.contrib.layers.shuffle_batch(x) default_main_program().random_seed = 1000 out2 = fluid.contrib.layers.shuffle_batch(x) self.assertIsNotNone(out1) self.assertIsNotNone(out2) - return (out1) + return out1 def test_partial_sum(self): with self.static_graph(): x = fluid.data(name="x", shape=[None, 3], dtype="float32") y = fluid.data(name="y", shape=[None, 3], dtype="float32") - sum = fluid.contrib.layers.partial_sum([x, y], - start_index=0, - length=2) - return (sum) + sum = fluid.contrib.layers.partial_sum( + [x, y], start_index=0, length=2 + ) + return sum def test_batch_fc(self): with self.static_graph(): @@ -4139,21 +4605,24 @@ class TestBook(LayerTest): param_attr=fluid.ParamAttr( learning_rate=1.0, name="w_0", - initializer=fluid.initializer.Xavier(uniform=False)), + initializer=fluid.initializer.Xavier(uniform=False), + ), bias_size=[16, 10], bias_attr=fluid.ParamAttr( learning_rate=1.0, name="b_0", - initializer=fluid.initializer.Xavier(uniform=False)), - act="relu") - return (out) + initializer=fluid.initializer.Xavier(uniform=False), + ), + act="relu", + ) + return out def test_rank_attention(self): with self.static_graph(): input = fluid.data(name="input", shape=[None, 2], dtype="float32") - rank_offset = fluid.data(name="rank_offset", - shape=[None, 7], - dtype="int32") + rank_offset = fluid.data( + name="rank_offset", shape=[None, 7], dtype="int32" + ) out = fluid.contrib.layers.rank_attention( input=input, rank_offset=rank_offset, @@ -4161,9 +4630,11 @@ class TestBook(LayerTest): rank_param_attr=fluid.ParamAttr( learning_rate=1.0, name="ubm_rank_param.w_0", - initializer=fluid.initializer.Xavier(uniform=False)), - max_rank=3) - return (out) + initializer=fluid.initializer.Xavier(uniform=False), + ), + max_rank=3, + ) + return out def test_roi_pool(self): x_np = np.random.rand(2, 3, 8, 8).astype('float32') @@ -4175,35 +4646,27 @@ class TestBook(LayerTest): rois = layers.data(name="rois", shape=[4], dtype="float32") rois_num = fluid.data(name="rois_num", shape=[None], dtype="int32") output = layers.roi_pool(x, rois, 4, 4, 0.5, rois_num=rois_num) - static_res = self.get_static_graph_result(feed={ - 'x': x_np, - 'rois': rois_np, - 'rois_num': rois_num_np - }, - fetch_list=[output])[0] + static_res = self.get_static_graph_result( + feed={'x': x_np, 'rois': rois_np, 'rois_num': rois_num_np}, + fetch_list=[output], + )[0] with self.dynamic_graph(): with _test_eager_guard(): x_dy = base.to_variable(x_np) rois_dy = base.to_variable(rois_np) rois_num_dy = base.to_variable(rois_num_np) - dy_eager_res = layers.roi_pool(x_dy, - rois_dy, - 4, - 4, - 0.5, - rois_num=rois_num_dy) + dy_eager_res = layers.roi_pool( + x_dy, rois_dy, 4, 4, 0.5, rois_num=rois_num_dy + ) dy_eager_res_value = dy_eager_res[0].numpy() x_dy = base.to_variable(x_np) rois_dy = base.to_variable(rois_np) rois_num_dy = base.to_variable(rois_num_np) - dy_res = layers.roi_pool(x_dy, - rois_dy, - 4, - 4, - 0.5, - rois_num=rois_num_dy) + dy_res = layers.roi_pool( + x_dy, rois_dy, 4, 4, 0.5, rois_num=rois_num_dy + ) dy_res_value = dy_res[0].numpy() np.testing.assert_array_equal(static_res, dy_res_value) np.testing.assert_array_equal(static_res, dy_eager_res_value) @@ -4224,37 +4687,27 @@ class TestBook(LayerTest): rois = layers.data(name="rois", shape=[4], dtype="float32") rois_num = fluid.data(name="rois_num", shape=[None], dtype="int32") output = layers.roi_align(x, rois, 4, 4, 0.5, 2, rois_num=rois_num) - static_res = self.get_static_graph_result(feed={ - 'x': x_np, - 'rois': rois_np, - 'rois_num': rois_num_np - }, - fetch_list=[output])[0] + static_res = self.get_static_graph_result( + feed={'x': x_np, 'rois': rois_np, 'rois_num': rois_num_np}, + fetch_list=[output], + )[0] with self.dynamic_graph(): with _test_eager_guard(): x_dy = base.to_variable(x_np) rois_dy = base.to_variable(rois_np) rois_num_dy = base.to_variable(rois_num_np) - dy_eager_res = layers.roi_align(x_dy, - rois_dy, - 4, - 4, - 0.5, - 2, - rois_num=rois_num_dy) + dy_eager_res = layers.roi_align( + x_dy, rois_dy, 4, 4, 0.5, 2, rois_num=rois_num_dy + ) dy_eager_res_value = dy_eager_res.numpy() x_dy = base.to_variable(x_np) rois_dy = base.to_variable(rois_np) rois_num_dy = base.to_variable(rois_num_np) - dy_res = layers.roi_align(x_dy, - rois_dy, - 4, - 4, - 0.5, - 2, - rois_num=rois_num_dy) + dy_res = layers.roi_align( + x_dy, rois_dy, 4, 4, 0.5, 2, rois_num=rois_num_dy + ) dy_res_value = dy_res.numpy() np.testing.assert_array_equal(static_res, dy_eager_res_value) np.testing.assert_array_equal(static_res, dy_res_value) @@ -4266,18 +4719,16 @@ class TestBook(LayerTest): label_np = np.random.randint(0, num_classes, [2, 3, 1], dtype=np.int64) with self.static_graph(): - input_ = layers.data(name="input", - shape=[None, 3, num_classes], - dtype="float32") - label_ = layers.data(name="label", - shape=[None, 3, 1], - dtype="int64") + input_ = layers.data( + name="input", shape=[None, 3, num_classes], dtype="float32" + ) + label_ = layers.data( + name="label", shape=[None, 3, 1], dtype="int64" + ) output = layers.dice_loss(input_, label_, eps) - static_res = self.get_static_graph_result(feed={ - 'input': input_np, - 'label': label_np - }, - fetch_list=[output])[0] + static_res = self.get_static_graph_result( + feed={'input': input_np, 'label': label_np}, fetch_list=[output] + )[0] with self.dynamic_graph(): with _test_eager_guard(): @@ -4297,46 +4748,47 @@ class TestBook(LayerTest): # TODO(minqiyang): dygraph do not support lod now with self.static_graph(): x = layers.data(name="x", shape=[256, 30, 30], dtype="float32") - rois = layers.data(name="rois", - shape=[8], - dtype="float32", - lod_level=1) + rois = layers.data( + name="rois", shape=[8], dtype="float32", lod_level=1 + ) output = layers.roi_perspective_transform(x, rois, 7, 7, 0.6) - return (output) + return output def test_row_conv(self): # TODO(minqiyang): dygraph do not support lod now with self.static_graph(): x = layers.data(name='x', shape=[16], dtype='float32', lod_level=1) out = layers.row_conv(input=x, future_context_size=2) - return (out) + return out def test_simple_conv2d(self): # TODO(minqiyang): dygraph do not support layers with param now with self.static_graph(): - images = layers.data(name='pixel', - shape=[3, 48, 48], - dtype='float32') - return layers.conv2d(input=images, - num_filters=3, - filter_size=[4, 4]) + images = layers.data( + name='pixel', shape=[3, 48, 48], dtype='float32' + ) + return layers.conv2d( + input=images, num_filters=3, filter_size=[4, 4] + ) def test_squeeze(self): # TODO(minqiyang): dygraph do not support layers with param now with self.static_graph(): x = layers.data(name='x', shape=[1, 1, 4], dtype='float32') out = layers.squeeze(input=x, axes=[2]) - return (out) + return out def test_flatten(self): # TODO(minqiyang): dygraph do not support op without kernel now with self.static_graph(): - x = layers.data(name='x', - append_batch_size=False, - shape=[4, 4, 3], - dtype="float32") + x = layers.data( + name='x', + append_batch_size=False, + shape=[4, 4, 3], + dtype="float32", + ) out = layers.flatten(x, axis=1, name="flatten") - return (out) + return out def test_linspace(self): program = Program() @@ -4347,211 +4799,270 @@ class TestBook(LayerTest): def test_deformable_conv(self): with self.static_graph(): - input = layers.data(name='input', - append_batch_size=False, - shape=[2, 3, 32, 32], - dtype="float32") - offset = layers.data(name='offset', - append_batch_size=False, - shape=[2, 18, 32, 32], - dtype="float32") - mask = layers.data(name='mask', - append_batch_size=False, - shape=[2, 9, 32, 32], - dtype="float32") - out = layers.deformable_conv(input=input, - offset=offset, - mask=mask, - num_filters=2, - filter_size=3, - padding=1) - return (out) + input = layers.data( + name='input', + append_batch_size=False, + shape=[2, 3, 32, 32], + dtype="float32", + ) + offset = layers.data( + name='offset', + append_batch_size=False, + shape=[2, 18, 32, 32], + dtype="float32", + ) + mask = layers.data( + name='mask', + append_batch_size=False, + shape=[2, 9, 32, 32], + dtype="float32", + ) + out = layers.deformable_conv( + input=input, + offset=offset, + mask=mask, + num_filters=2, + filter_size=3, + padding=1, + ) + return out def test_deformable_conv2(self): with self.static_graph(): - input = fluid.data(name='input', - shape=[None, 3, None, None], - dtype="float32") - offset = fluid.data(name='offset', - shape=[None, 18, None, None], - dtype="float32") - mask = fluid.data(name='mask', - shape=[None, 9, None, None], - dtype="float32") - out = layers.deformable_conv(input=input, - offset=offset, - mask=mask, - num_filters=2, - filter_size=3, - padding=1) - return (out) + input = fluid.data( + name='input', shape=[None, 3, None, None], dtype="float32" + ) + offset = fluid.data( + name='offset', shape=[None, 18, None, None], dtype="float32" + ) + mask = fluid.data( + name='mask', shape=[None, 9, None, None], dtype="float32" + ) + out = layers.deformable_conv( + input=input, + offset=offset, + mask=mask, + num_filters=2, + filter_size=3, + padding=1, + ) + return out def test_unfold(self): with self.static_graph(): x = layers.data(name='x', shape=[3, 20, 20], dtype='float32') out = layers.unfold(x, [3, 3], 1, 1, 1) - return (out) + return out def test_partial_concat(self): with self.static_graph(): x = fluid.data(name="x", shape=[None, 3], dtype="float32") y = fluid.data(name="y", shape=[None, 3], dtype="float32") - concat1 = fluid.contrib.layers.partial_concat([x, y], - start_index=0, - length=2) - concat2 = fluid.contrib.layers.partial_concat(x, - start_index=0, - length=-1) + concat1 = fluid.contrib.layers.partial_concat( + [x, y], start_index=0, length=2 + ) + concat2 = fluid.contrib.layers.partial_concat( + x, start_index=0, length=-1 + ) return concat1, concat2 def test_deform_roi_pooling(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): - input = layers.data(name='input', - shape=[2, 3, 32, 32], - dtype='float32', - append_batch_size=False) - rois = layers.data(name="rois", - shape=[4], - dtype='float32', - lod_level=1) - trans = layers.data(name="trans", - shape=[2, 3, 32, 32], - dtype='float32', - append_batch_size=False) - out = layers.deformable_roi_pooling(input=input, - rois=rois, - trans=trans, - no_trans=False, - spatial_scale=1.0, - group_size=(1, 1), - pooled_height=8, - pooled_width=8, - part_size=(8, 8), - sample_per_part=4, - trans_std=0.1) - return (out) + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): + input = layers.data( + name='input', + shape=[2, 3, 32, 32], + dtype='float32', + append_batch_size=False, + ) + rois = layers.data( + name="rois", shape=[4], dtype='float32', lod_level=1 + ) + trans = layers.data( + name="trans", + shape=[2, 3, 32, 32], + dtype='float32', + append_batch_size=False, + ) + out = layers.deformable_roi_pooling( + input=input, + rois=rois, + trans=trans, + no_trans=False, + spatial_scale=1.0, + group_size=(1, 1), + pooled_height=8, + pooled_width=8, + part_size=(8, 8), + sample_per_part=4, + trans_std=0.1, + ) + return out def test_deformable_conv_v1(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): - input = layers.data(name='input', - append_batch_size=False, - shape=[2, 3, 32, 32], - dtype="float32") - offset = layers.data(name='offset', - append_batch_size=False, - shape=[2, 18, 32, 32], - dtype="float32") - out = layers.deformable_conv(input=input, - offset=offset, - mask=None, - num_filters=2, - filter_size=3, - padding=1, - modulated=False) - return (out) + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): + input = layers.data( + name='input', + append_batch_size=False, + shape=[2, 3, 32, 32], + dtype="float32", + ) + offset = layers.data( + name='offset', + append_batch_size=False, + shape=[2, 18, 32, 32], + dtype="float32", + ) + out = layers.deformable_conv( + input=input, + offset=offset, + mask=None, + num_filters=2, + filter_size=3, + padding=1, + modulated=False, + ) + return out def test_retinanet_target_assign(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): - bbox_pred = layers.data(name='bbox_pred', - shape=[1, 100, 4], - append_batch_size=False, - dtype='float32') - cls_logits = layers.data(name='cls_logits', - shape=[1, 100, 10], - append_batch_size=False, - dtype='float32') - anchor_box = layers.data(name='anchor_box', - shape=[100, 4], - append_batch_size=False, - dtype='float32') - anchor_var = layers.data(name='anchor_var', - shape=[100, 4], - append_batch_size=False, - dtype='float32') - gt_boxes = layers.data(name='gt_boxes', - shape=[10, 4], - append_batch_size=False, - dtype='float32') - gt_labels = layers.data(name='gt_labels', - shape=[10, 1], - append_batch_size=False, - dtype='int32') - is_crowd = layers.data(name='is_crowd', - shape=[1], - append_batch_size=False, - dtype='int32') - im_info = layers.data(name='im_info', - shape=[1, 3], - append_batch_size=False, - dtype='float32') - return (layers.retinanet_target_assign(bbox_pred, cls_logits, - anchor_box, anchor_var, - gt_boxes, gt_labels, - is_crowd, im_info, 10)) + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): + bbox_pred = layers.data( + name='bbox_pred', + shape=[1, 100, 4], + append_batch_size=False, + dtype='float32', + ) + cls_logits = layers.data( + name='cls_logits', + shape=[1, 100, 10], + append_batch_size=False, + dtype='float32', + ) + anchor_box = layers.data( + name='anchor_box', + shape=[100, 4], + append_batch_size=False, + dtype='float32', + ) + anchor_var = layers.data( + name='anchor_var', + shape=[100, 4], + append_batch_size=False, + dtype='float32', + ) + gt_boxes = layers.data( + name='gt_boxes', + shape=[10, 4], + append_batch_size=False, + dtype='float32', + ) + gt_labels = layers.data( + name='gt_labels', + shape=[10, 1], + append_batch_size=False, + dtype='int32', + ) + is_crowd = layers.data( + name='is_crowd', + shape=[1], + append_batch_size=False, + dtype='int32', + ) + im_info = layers.data( + name='im_info', + shape=[1, 3], + append_batch_size=False, + dtype='float32', + ) + return layers.retinanet_target_assign( + bbox_pred, + cls_logits, + anchor_box, + anchor_var, + gt_boxes, + gt_labels, + is_crowd, + im_info, + 10, + ) def test_sigmoid_focal_loss(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): - input = layers.data(name='data', - shape=[10, 80], - append_batch_size=False, - dtype='float32') - label = layers.data(name='label', - shape=[10, 1], - append_batch_size=False, - dtype='int32') - fg_num = layers.data(name='fg_num', - shape=[1], - append_batch_size=False, - dtype='int32') - out = fluid.layers.sigmoid_focal_loss(x=input, - label=label, - fg_num=fg_num, - gamma=2., - alpha=0.25) - return (out) + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): + input = layers.data( + name='data', + shape=[10, 80], + append_batch_size=False, + dtype='float32', + ) + label = layers.data( + name='label', + shape=[10, 1], + append_batch_size=False, + dtype='int32', + ) + fg_num = layers.data( + name='fg_num', shape=[1], append_batch_size=False, dtype='int32' + ) + out = fluid.layers.sigmoid_focal_loss( + x=input, label=label, fg_num=fg_num, gamma=2.0, alpha=0.25 + ) + return out def test_addmm(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): - input = layers.data(name='input_data', - shape=[3, 3], - append_batch_size=False, - dtype='float32') - x = layers.data(name='x', - shape=[3, 2], - append_batch_size=False, - dtype='float32') - y = layers.data(name='y', - shape=[2, 3], - append_batch_size=False, - dtype='float32') + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): + input = layers.data( + name='input_data', + shape=[3, 3], + append_batch_size=False, + dtype='float32', + ) + x = layers.data( + name='x', shape=[3, 2], append_batch_size=False, dtype='float32' + ) + y = layers.data( + name='y', shape=[2, 3], append_batch_size=False, dtype='float32' + ) out = paddle.addmm(input=input, x=x, y=y) - return (out) + return out def test_retinanet_detection_output(self): - with program_guard(fluid.default_main_program(), - fluid.default_startup_program()): - bboxes = layers.data(name='bboxes', - shape=[1, 21, 4], - append_batch_size=False, - dtype='float32') - scores = layers.data(name='scores', - shape=[1, 21, 10], - append_batch_size=False, - dtype='float32') - anchors = layers.data(name='anchors', - shape=[21, 4], - append_batch_size=False, - dtype='float32') - im_info = layers.data(name="im_info", - shape=[1, 3], - append_batch_size=False, - dtype='float32') + with program_guard( + fluid.default_main_program(), fluid.default_startup_program() + ): + bboxes = layers.data( + name='bboxes', + shape=[1, 21, 4], + append_batch_size=False, + dtype='float32', + ) + scores = layers.data( + name='scores', + shape=[1, 21, 10], + append_batch_size=False, + dtype='float32', + ) + anchors = layers.data( + name='anchors', + shape=[21, 4], + append_batch_size=False, + dtype='float32', + ) + im_info = layers.data( + name="im_info", + shape=[1, 3], + append_batch_size=False, + dtype='float32', + ) nmsed_outs = layers.retinanet_detection_output( bboxes=[bboxes, bboxes], scores=[scores, scores], @@ -4561,38 +5072,39 @@ class TestBook(LayerTest): nms_top_k=1000, keep_top_k=100, nms_threshold=0.3, - nms_eta=1.) - return (nmsed_outs) + nms_eta=1.0, + ) + return nmsed_outs def test_warpctc_with_padding(self): # TODO(minqiyang): dygraph do not support lod now with self.static_graph(): - input_length = layers.data(name='logits_length', - shape=[11], - dtype='int64') - label_length = layers.data(name='labels_length', - shape=[12], - dtype='int64') + input_length = layers.data( + name='logits_length', shape=[11], dtype='int64' + ) + label_length = layers.data( + name='labels_length', shape=[12], dtype='int64' + ) label = layers.data(name='label', shape=[12, 1], dtype='int32') - predict = layers.data(name='predict', - shape=[4, 4, 8], - dtype='float32') - output = layers.warpctc(input=predict, - label=label, - input_length=input_length, - label_length=label_length) - return (output) + predict = layers.data( + name='predict', shape=[4, 4, 8], dtype='float32' + ) + output = layers.warpctc( + input=predict, + label=label, + input_length=input_length, + label_length=label_length, + ) + return output def test_edit_distance(self): with self.static_graph(): - predict = layers.data(name='predict', - shape=[-1, 1], - dtype='int64', - lod_level=1) - label = layers.data(name='label', - shape=[-1, 1], - dtype='int64', - lod_level=1) + predict = layers.data( + name='predict', shape=[-1, 1], dtype='int64', lod_level=1 + ) + label = layers.data( + name='label', shape=[-1, 1], dtype='int64', lod_level=1 + ) evaluator = fluid.evaluator.EditDistance(predict, label) return evaluator.metrics @@ -4600,15 +5112,15 @@ class TestBook(LayerTest): input_size = 128 hidden_size = 256 with self.static_graph(): - input = fluid.data(name="input", - shape=[None, None, input_size], - dtype='float32') - pre_hidden = fluid.data(name="pre_hidden", - shape=[None, hidden_size], - dtype='float32') - sequence_length = fluid.data(name="sequence_length", - shape=[None], - dtype='int32') + input = fluid.data( + name="input", shape=[None, None, input_size], dtype='float32' + ) + pre_hidden = fluid.data( + name="pre_hidden", shape=[None, hidden_size], dtype='float32' + ) + sequence_length = fluid.data( + name="sequence_length", shape=[None], dtype='int32' + ) for bidirectional in [True, False]: for batch_first in [True, False]: @@ -4620,30 +5132,35 @@ class TestBook(LayerTest): sequence_length=sequence_length, dropout_prob=0.5, bidirectional=bidirectional, - batch_first=batch_first) + batch_first=batch_first, + ) class TestMetricsDetectionMap(unittest.TestCase): - def test_detection_map(self): program = fluid.Program() with program_guard(program): - detect_res = fluid.layers.data(name='detect_res', - shape=[10, 6], - append_batch_size=False, - dtype='float32') - label = fluid.layers.data(name='label', - shape=[10, 1], - append_batch_size=False, - dtype='float32') - box = fluid.layers.data(name='bbox', - shape=[10, 4], - append_batch_size=False, - dtype='float32') - map_eval = fluid.metrics.DetectionMAP(detect_res, - label, - box, - class_num=21) + detect_res = fluid.layers.data( + name='detect_res', + shape=[10, 6], + append_batch_size=False, + dtype='float32', + ) + label = fluid.layers.data( + name='label', + shape=[10, 1], + append_batch_size=False, + dtype='float32', + ) + box = fluid.layers.data( + name='bbox', + shape=[10, 4], + append_batch_size=False, + dtype='float32', + ) + map_eval = fluid.metrics.DetectionMAP( + detect_res, label, box, class_num=21 + ) cur_map, accm_map = map_eval.get_map_var() self.assertIsNotNone(cur_map) self.assertIsNotNone(accm_map) @@ -4651,11 +5168,11 @@ class TestMetricsDetectionMap(unittest.TestCase): class ExampleNet(paddle.nn.Layer): - def __init__(self): super(ExampleNet, self).__init__() self.weight = self.create_parameter( - shape=[1, 1], attr=paddle.ParamAttr(trainable=False)) + shape=[1, 1], attr=paddle.ParamAttr(trainable=False) + ) def forward(self): # only for test parameter trainable attr @@ -4663,7 +5180,6 @@ class ExampleNet(paddle.nn.Layer): class TestLayerParameterTrainableSet(unittest.TestCase): - def test_layer_parameter_set(self): with fluid.dygraph.guard(): net = ExampleNet() @@ -4671,7 +5187,6 @@ class TestLayerParameterTrainableSet(unittest.TestCase): class TestLayerTrainingAttribute(unittest.TestCase): - def test_set_train_eval_in_dynamic_mode(self): with fluid.dygraph.guard(): net = paddle.nn.Dropout() @@ -4689,7 +5204,6 @@ class TestLayerTrainingAttribute(unittest.TestCase): class MyLayer(paddle.nn.Layer): - def __init__(self): super(MyLayer, self).__init__() self._linear = paddle.nn.Linear(1, 1) @@ -4702,7 +5216,6 @@ class MyLayer(paddle.nn.Layer): class MySuperLayer(paddle.nn.Layer): - def __init__(self): super(MySuperLayer, self).__init__() self._mylayer = MyLayer() @@ -4713,7 +5226,6 @@ class MySuperLayer(paddle.nn.Layer): class TestSubLayerCount(unittest.TestCase): - def test_sublayer(self): with fluid.dygraph.guard(): mySuperlayer = MySuperLayer() diff --git a/python/paddle/fluid/tests/unittests/test_layout_autotune.py b/python/paddle/fluid/tests/unittests/test_layout_autotune.py index b7af0464ba9f93f90fbe6d7e27e9d3bca38c32eb..95618edcc38fe8f8563c006f5b76d74d4609b927 100644 --- a/python/paddle/fluid/tests/unittests/test_layout_autotune.py +++ b/python/paddle/fluid/tests/unittests/test_layout_autotune.py @@ -23,7 +23,6 @@ import paddle.nn.functional as F class SimpleNet(paddle.nn.Layer): - def __init__(self, data_format="NCHW", class_num=2): super(SimpleNet, self).__init__() self.conv = paddle.nn.Conv2D(3, 8, (3, 3)) @@ -44,7 +43,6 @@ class SimpleNet(paddle.nn.Layer): class LayoutAutoTune(unittest.TestCase): - def test_config(self): paddle.fluid.core.enable_layout_autotune() if self.use_autoune(): @@ -59,9 +57,8 @@ class LayoutAutoTune(unittest.TestCase): def use_autoune(self): if paddle.is_compiled_with_cuda(): paddle.incubate.autotune.set_config( - config={"layout": { - "enable": True - }}) + config={"layout": {"enable": True}} + ) return paddle.fluid.core.use_layout_autotune() else: config = {"layout": {"enable": False}} @@ -75,11 +72,12 @@ class LayoutAutoTune(unittest.TestCase): def train(self, data_format): model = SimpleNet(data_format="NCHW", class_num=2) data = paddle.rand([1, 3, 16, 16]) - if (data_format == "NHWC"): + if data_format == "NHWC": data = paddle.rand([1, 16, 16, 3]) label_data = paddle.randint(0, 1, shape=[1, 1], dtype="int64") - optimizer = paddle.optimizer.SGD(learning_rate=0.0001, - parameters=model.parameters()) + optimizer = paddle.optimizer.SGD( + learning_rate=0.0001, parameters=model.parameters() + ) scaler = paddle.amp.GradScaler() for i in range(2): with paddle.amp.auto_cast(level="O2"): @@ -101,8 +99,9 @@ class LayoutAutoTune(unittest.TestCase): conv = paddle.nn.Conv2D(3, 8, (3, 3)) data = paddle.rand([1, 3, 16, 14]) label_data = paddle.randint(0, 1, shape=[1, 1], dtype="int64") - optimizer = paddle.optimizer.SGD(learning_rate=0.0001, - parameters=conv.parameters()) + optimizer = paddle.optimizer.SGD( + learning_rate=0.0001, parameters=conv.parameters() + ) scaler = paddle.amp.GradScaler() with paddle.amp.auto_cast(level="O2"): conv_out = conv(data) @@ -170,7 +169,6 @@ class LayoutAutoTune(unittest.TestCase): class TestAutoTuneAPI(unittest.TestCase): - def test_set_config_warnings(self): with warnings.catch_warnings(record=True) as w: config = {"layout": {"enable": 1}} diff --git a/python/paddle/fluid/tests/unittests/test_lazy_init.py b/python/paddle/fluid/tests/unittests/test_lazy_init.py index 79b3429b10e0bf78852b84462a0db59b55fa476f..914fc1412615241629367b0409356140d1e8d5c4 100644 --- a/python/paddle/fluid/tests/unittests/test_lazy_init.py +++ b/python/paddle/fluid/tests/unittests/test_lazy_init.py @@ -17,12 +17,18 @@ import unittest import numpy as np from paddle import LazyGuard from paddle.nn import Linear, Layer -from paddle.nn.initializer import Constant, Normal, TruncatedNormal, Uniform, XavierNormal, XavierUniform +from paddle.nn.initializer import ( + Constant, + Normal, + TruncatedNormal, + Uniform, + XavierNormal, + XavierUniform, +) from paddle.fluid import unique_name class TestInitializerBase(unittest.TestCase): - def setUp(self): self.set_initializer() self.set_param_attr() @@ -34,11 +40,13 @@ class TestInitializerBase(unittest.TestCase): self.b_initializer = Constant(0.3) def set_param_attr(self): - self.weight_attr = paddle.ParamAttr(name="weight", - initializer=self.w_initializer) + self.weight_attr = paddle.ParamAttr( + name="weight", initializer=self.w_initializer + ) - self.bias_attr = paddle.ParamAttr(name="bias", - initializer=self.b_initializer) + self.bias_attr = paddle.ParamAttr( + name="bias", initializer=self.b_initializer + ) def set_init_ops(self): self.init_ops = ['fill_constant', 'fill_constant'] @@ -48,29 +56,26 @@ class TestInitializerBase(unittest.TestCase): def test_wrapper(self): with LazyGuard(): - fc = Linear(10, - 10, - weight_attr=self.weight_attr, - bias_attr=self.bias_attr) + fc = Linear( + 10, 10, weight_attr=self.weight_attr, bias_attr=self.bias_attr + ) program = fc._startup_program() print(program) self.check_program(program) def check_program(self, program): self.assertEqual(program.block(0).var("weight").shape, (10, 10)) - self.assertEqual(program.block(0).var("bias").shape, (10, )) + self.assertEqual(program.block(0).var("bias").shape, (10,)) ops = [op.type for op in program.block(0).ops] self.assertEqual(ops, self.init_ops) class TestDygraphLazy(TestInitializerBase): - def test_wrapper(self): with LazyGuard(): - fc = Linear(10, - 10, - weight_attr=self.weight_attr, - bias_attr=self.bias_attr) + fc = Linear( + 10, 10, weight_attr=self.weight_attr, bias_attr=self.bias_attr + ) self.check_data(fc) @@ -86,14 +91,15 @@ class TestDygraphLazy(TestInitializerBase): out = model(x) self.assertEqual(out.shape, [2, 10]) - np.testing.assert_allclose(model.weight.numpy(), - np.ones([10, 10], dtype=np.float32) * 0.6) - np.testing.assert_allclose(model.bias.numpy(), - np.ones([10], dtype=np.float32) * 0.3) + np.testing.assert_allclose( + model.weight.numpy(), np.ones([10, 10], dtype=np.float32) * 0.6 + ) + np.testing.assert_allclose( + model.bias.numpy(), np.ones([10], dtype=np.float32) * 0.3 + ) class NestModel(Layer): - def __init__(self, base_model): super(NestModel, self).__init__() self.base_model = base_model @@ -106,13 +112,11 @@ class NestModel(Layer): class TestNestModelLazy(TestInitializerBase): - def test_wrapper(self): with LazyGuard(): - base_model = Linear(10, - 10, - weight_attr=self.weight_attr, - bias_attr=self.bias_attr) + base_model = Linear( + 10, 10, weight_attr=self.weight_attr, bias_attr=self.bias_attr + ) nest_model = NestModel(base_model) self.check_data(nest_model) @@ -130,16 +134,19 @@ class TestNestModelLazy(TestInitializerBase): out = model(x) self.assertEqual(out.shape, [2, 10]) - np.testing.assert_allclose(model.base_model.weight.numpy(), - np.ones([10, 10], dtype=np.float32) * 0.6) - np.testing.assert_allclose(model.base_model.bias.numpy(), - np.ones([10], dtype=np.float32) * 0.3) + np.testing.assert_allclose( + model.base_model.weight.numpy(), + np.ones([10, 10], dtype=np.float32) * 0.6, + ) + np.testing.assert_allclose( + model.base_model.bias.numpy(), np.ones([10], dtype=np.float32) * 0.3 + ) def check_program(self, model): # verify nest_model startup_program whole_program = model._startup_program() self.assertEqual(whole_program.block(0).var("weight").shape, (10, 10)) - self.assertEqual(whole_program.block(0).var("bias").shape, (10, )) + self.assertEqual(whole_program.block(0).var("bias").shape, (10,)) ops = [op.type for op in whole_program.block(0).ops] init_ops = self.init_ops + ['uniform_random', 'fill_constant'] self.assertEqual(ops, init_ops) @@ -147,13 +154,12 @@ class TestNestModelLazy(TestInitializerBase): # verify base_model startup_program sub_program = model.base_model._startup_program() self.assertEqual(sub_program.block(0).var("weight").shape, (10, 10)) - self.assertEqual(sub_program.block(0).var("bias").shape, (10, )) + self.assertEqual(sub_program.block(0).var("bias").shape, (10,)) ops = [op.type for op in sub_program.block(0).ops] self.assertEqual(ops, self.init_ops) class TestUniform(TestInitializerBase): - def set_initializer(self): self.w_initializer = Uniform() self.b_initializer = Uniform() @@ -163,7 +169,6 @@ class TestUniform(TestInitializerBase): class TestNormal(TestInitializerBase): - def set_initializer(self): self.w_initializer = Normal() self.b_initializer = Normal() @@ -173,26 +178,24 @@ class TestNormal(TestInitializerBase): class TestTruncatedNormal(TestInitializerBase): - def set_initializer(self): self.w_initializer = TruncatedNormal() self.b_initializer = TruncatedNormal() def set_init_ops(self): self.init_ops = [ - 'truncated_gaussian_random', 'truncated_gaussian_random' + 'truncated_gaussian_random', + 'truncated_gaussian_random', ] class TestXavierNormal(TestNormal): - def set_initializer(self): self.w_initializer = XavierNormal() self.b_initializer = XavierNormal() class TestXavierUniform(TestUniform): - def set_initializer(self): self.w_initializer = XavierUniform() self.b_initializer = XavierUniform() diff --git a/python/paddle/fluid/tests/unittests/test_lbfgs.py b/python/paddle/fluid/tests/unittests/test_lbfgs.py index 36b01d05e78653ea3922c40132e7671bdd8b6401..21d96d17c2ad9fbcecf0399c224403fff82ae559 100644 --- a/python/paddle/fluid/tests/unittests/test_lbfgs.py +++ b/python/paddle/fluid/tests/unittests/test_lbfgs.py @@ -44,37 +44,35 @@ def test_static_graph_H0(func, x0, H0, dtype='float32'): startup = paddle.static.Program() with paddle.static.program_guard(main, startup): X = paddle.static.data(name='x', shape=[x0.shape[0]], dtype=dtype) - H = paddle.static.data(name='h', - shape=[H0.shape[0], H0.shape[1]], - dtype=dtype) - Y = minimize_lbfgs(func, - X, - initial_inverse_hessian_estimate=H, - dtype=dtype) + H = paddle.static.data( + name='h', shape=[H0.shape[0], H0.shape[1]], dtype=dtype + ) + Y = minimize_lbfgs( + func, X, initial_inverse_hessian_estimate=H, dtype=dtype + ) exe = paddle.static.Executor() exe.run(startup) return exe.run(main, feed={'x': x0, 'h': H0}, fetch_list=[Y]) -def test_dynamic_graph(func, - x0, - H0=None, - line_search_fn='strong_wolfe', - dtype='float32'): +def test_dynamic_graph( + func, x0, H0=None, line_search_fn='strong_wolfe', dtype='float32' +): paddle.disable_static() x0 = paddle.to_tensor(x0) if H0 is not None: H0 = paddle.to_tensor(H0) - return minimize_lbfgs(func, - x0, - initial_inverse_hessian_estimate=H0, - line_search_fn=line_search_fn, - dtype=dtype) + return minimize_lbfgs( + func, + x0, + initial_inverse_hessian_estimate=H0, + line_search_fn=line_search_fn, + dtype=dtype, + ) class TestLbfgs(unittest.TestCase): - def test_quadratic_nd(self): for dimension in [1, 10]: minimum = np.random.random(size=[dimension]).astype('float32') @@ -84,7 +82,8 @@ class TestLbfgs(unittest.TestCase): minimum_ = paddle.assign(minimum) scale_ = paddle.assign(scale) return paddle.sum( - paddle.multiply(scale_, (F.square_error_cost(x, minimum_)))) + paddle.multiply(scale_, (F.square_error_cost(x, minimum_))) + ) x0 = np.random.random(size=[dimension]).astype('float32') results = test_static_graph(func, x0) @@ -99,16 +98,17 @@ class TestLbfgs(unittest.TestCase): def func(x): # df = 3(x - 1.01)(x - 0.99) # f = x^3 - 3x^2 + 3*1.01*0.99x - return x * x * x / 3.0 - ( - extream_point[0] + extream_point[1] - ) * x * x / 2 + extream_point[0] * extream_point[1] * x + return ( + x * x * x / 3.0 + - (extream_point[0] + extream_point[1]) * x * x / 2 + + extream_point[0] * extream_point[1] * x + ) x0 = np.array([-1.7]).astype('float32') results = test_static_graph(func, x0) self.assertFalse(results[0][0]) def test_multi_minima(self): - def func(x): # df = 12(x + 1.1)(x - 0.2)(x - 0.8) # f = 3*x^4+0.4*x^3-5.46*x^2+2.112*x @@ -131,7 +131,7 @@ class TestLbfgs(unittest.TestCase): # f(x, y) = (a - x)^2 + b (y - x^2)^2 # minimum = (a, a^2) x, y = position[0], position[1] - c = (a - x)**2 + b * (y - x**2)**2 + c = (a - x) ** 2 + b * (y - x**2) ** 2 # the return cant be np array[1], or in jacobin will cause flat error return c[0] @@ -141,7 +141,6 @@ class TestLbfgs(unittest.TestCase): np.testing.assert_allclose(minimum, results[2], rtol=1e-05) def test_exception(self): - def func(x): return paddle.dot(x, x) @@ -150,11 +149,9 @@ class TestLbfgs(unittest.TestCase): # test dtype is not float32 or float64 x1 = np.random.random(size=[2]).astype('int32') - self.assertRaises(ValueError, - test_static_graph, - func, - x1, - dtype='int32') + self.assertRaises( + ValueError, test_static_graph, func, x1, dtype='int32' + ) # test initial_inverse_hessian_estimate is good results = test_static_graph_H0(func, x0, H0, dtype='float32') @@ -164,12 +161,9 @@ class TestLbfgs(unittest.TestCase): # test initial_inverse_hessian_estimate is bad and float64 x2 = np.random.random(size=[2]).astype('float64') H1 = np.array([[1.0, 2.0], [3.0, 1.0]]).astype('float64') - self.assertRaises(ValueError, - test_static_graph_H0, - func, - x2, - H0=H1, - dtype='float64') + self.assertRaises( + ValueError, test_static_graph_H0, func, x2, H0=H1, dtype='float64' + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_lcm.py b/python/paddle/fluid/tests/unittests/test_lcm.py index 6f113c13a01a70daa60ef5b5313ede90dedc07aa..83f3f82d7e7a204dff7e45e592253153aefc0bfb 100644 --- a/python/paddle/fluid/tests/unittests/test_lcm.py +++ b/python/paddle/fluid/tests/unittests/test_lcm.py @@ -22,7 +22,6 @@ paddle.enable_static() class TestLcmAPI(unittest.TestCase): - def setUp(self): self.x_np = 12 self.y_np = 20 @@ -37,32 +36,34 @@ class TestLcmAPI(unittest.TestCase): x2 = fluid.data(name='input2', dtype='int32', shape=self.y_shape) out = paddle.lcm(x1, x2) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) - res = exe.run(fluid.default_main_program(), - feed={ - 'input1': self.x_np, - 'input2': self.y_np - }, - fetch_list=[out]) - self.assertTrue((np.array(res[0]) == np.lcm(self.x_np, - self.y_np)).all()) + res = exe.run( + fluid.default_main_program(), + feed={'input1': self.x_np, 'input2': self.y_np}, + fetch_list=[out], + ) + self.assertTrue( + (np.array(res[0]) == np.lcm(self.x_np, self.y_np)).all() + ) def test_dygraph(self): paddle.disable_static() x1 = paddle.to_tensor(self.x_np) x2 = paddle.to_tensor(self.y_np) result = paddle.lcm(x1, x2) - np.testing.assert_allclose(np.lcm(self.x_np, self.y_np), - result.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + np.lcm(self.x_np, self.y_np), result.numpy(), rtol=1e-05 + ) paddle.enable_static() class TestLcmAPI2(TestLcmAPI): - def setUp(self): self.x_np = np.arange(6).astype(np.int32) self.y_np = np.array([20]).astype(np.int32) @@ -71,7 +72,6 @@ class TestLcmAPI2(TestLcmAPI): class TestLcmAPI3(TestLcmAPI): - def setUp(self): self.x_np = 0 self.y_np = 20 @@ -80,7 +80,6 @@ class TestLcmAPI3(TestLcmAPI): class TestLcmAPI4(TestLcmAPI): - def setUp(self): self.x_np = 0 self.y_np = 0 @@ -89,7 +88,6 @@ class TestLcmAPI4(TestLcmAPI): class TestLcmAPI5(TestLcmAPI): - def setUp(self): self.x_np = 12 self.y_np = -20 diff --git a/python/paddle/fluid/tests/unittests/test_learning_rate_scheduler.py b/python/paddle/fluid/tests/unittests/test_learning_rate_scheduler.py index bd73f628a02986bfca63c7adc7f5a81bde59b843..ffe45f3ec312e97df979eea44660bacb4093d429 100644 --- a/python/paddle/fluid/tests/unittests/test_learning_rate_scheduler.py +++ b/python/paddle/fluid/tests/unittests/test_learning_rate_scheduler.py @@ -23,45 +23,41 @@ import paddle.fluid.framework as framework import paddle.fluid.core as core -def exponential_decay(learning_rate, - global_step, - decay_steps, - decay_rate, - staircase=False): +def exponential_decay( + learning_rate, global_step, decay_steps, decay_rate, staircase=False +): exponent = global_step / decay_steps if staircase: exponent = math.floor(exponent) return learning_rate * decay_rate**exponent -def natural_exp_decay(learning_rate, - global_step, - decay_steps, - decay_rate, - staircase=False): +def natural_exp_decay( + learning_rate, global_step, decay_steps, decay_rate, staircase=False +): exponent = float(global_step) / float(decay_steps) if staircase: exponent = math.floor(exponent) return learning_rate * math.exp(-1 * decay_rate * exponent) -def inverse_time_decay(learning_rate, - global_step, - decay_steps, - decay_rate, - staircase=False): +def inverse_time_decay( + learning_rate, global_step, decay_steps, decay_rate, staircase=False +): temp = float(global_step) / float(decay_steps) if staircase: temp = math.floor(temp) return learning_rate / (1 + decay_rate * temp) -def polynomial_decay(learning_rate, - global_step, - decay_steps, - end_learning_rate=0.0001, - power=1.0, - cycle=False): +def polynomial_decay( + learning_rate, + global_step, + decay_steps, + end_learning_rate=0.0001, + power=1.0, + cycle=False, +): if cycle: div = math.ceil(global_step / float(decay_steps)) if div == 0: @@ -69,8 +65,9 @@ def polynomial_decay(learning_rate, decay_steps = decay_steps * div else: global_step = min(global_step, decay_steps) - return (learning_rate - end_learning_rate) * \ - ((1 - float(global_step) / float(decay_steps)) ** power) + end_learning_rate + return (learning_rate - end_learning_rate) * ( + (1 - float(global_step) / float(decay_steps)) ** power + ) + end_learning_rate def piecewise_decay(global_step, boundaries, values): @@ -83,8 +80,9 @@ def piecewise_decay(global_step, boundaries, values): def cosine_decay(global_step, learning_rate, step_each_epoch, epochs): cur_epoch = math.floor(global_step / step_each_epoch) - decayed_lr = learning_rate * 0.5 * (math.cos(cur_epoch * math.pi / epochs) + - 1) + decayed_lr = ( + learning_rate * 0.5 * (math.cos(cur_epoch * math.pi / epochs) + 1) + ) return decayed_lr @@ -119,7 +117,6 @@ def lambda_decay(global_step, learning_rate, lr_lambda): class TestLearningRateDecayDygraph(unittest.TestCase): - def test_LR_state_dict(self): with fluid.dygraph.guard(): x = np.random.uniform(-1, 1, [3, 10]).astype("float32") @@ -130,17 +127,24 @@ class TestLearningRateDecayDygraph(unittest.TestCase): learning_rate=0.1, decay_steps=10000, decay_rate=0.5, - staircase=True) + staircase=True, + ) Step_scheduler = fluid.dygraph.StepDecay(0.5, step_size=3) Reducelr_scheduler = fluid.dygraph.ReduceLROnPlateau( - learning_rate=1.0, decay_rate=0.5, patience=5, cooldown=3) - - adam1 = fluid.optimizer.Adam(learning_rate=Exponential_scheduler, - parameter_list=linear.parameters()) - adam2 = fluid.optimizer.Adam(learning_rate=Step_scheduler, - parameter_list=linear.parameters()) - adam3 = fluid.optimizer.Adam(learning_rate=Reducelr_scheduler, - parameter_list=linear.parameters()) + learning_rate=1.0, decay_rate=0.5, patience=5, cooldown=3 + ) + + adam1 = fluid.optimizer.Adam( + learning_rate=Exponential_scheduler, + parameter_list=linear.parameters(), + ) + adam2 = fluid.optimizer.Adam( + learning_rate=Step_scheduler, parameter_list=linear.parameters() + ) + adam3 = fluid.optimizer.Adam( + learning_rate=Reducelr_scheduler, + parameter_list=linear.parameters(), + ) print(adam3.state_dict()) for epoch in range(10): @@ -161,59 +165,76 @@ class TestLearningRateDecayDygraph(unittest.TestCase): learning_rate=0.1, decay_steps=10000, decay_rate=0.5, - staircase=True) + staircase=True, + ) Step_scheduler_test = fluid.dygraph.StepDecay(0.5, step_size=3) Reducelr_scheduler_test = fluid.dygraph.ReduceLROnPlateau( - learning_rate=1.0, decay_rate=0.5, patience=5, cooldown=3) + learning_rate=1.0, decay_rate=0.5, patience=5, cooldown=3 + ) fluid.dygraph.save_dygraph(adam1.state_dict(), "save_path") _, opt_state = fluid.dygraph.load_dygraph("save_path") adam_test = fluid.optimizer.Adam( learning_rate=Exponential_scheduler_test, - parameter_list=linear.parameters()) + parameter_list=linear.parameters(), + ) adam_test.set_dict(opt_state) self.assertEqual( adam_test._learning_rate.step_num, adam1._learning_rate.step_num, - "epoch_num is different before and after set_dict") + "epoch_num is different before and after set_dict", + ) fluid.dygraph.save_dygraph(adam2.state_dict(), "save_path") _, opt_state = fluid.dygraph.load_dygraph("save_path") - adam_test = fluid.optimizer.Adam(learning_rate=Step_scheduler_test, - parameter_list=linear.parameters()) + adam_test = fluid.optimizer.Adam( + learning_rate=Step_scheduler_test, + parameter_list=linear.parameters(), + ) adam_test.set_dict(opt_state) self.assertEqual( adam_test._learning_rate.epoch_num, adam2._learning_rate.epoch_num, - "epoch_num is different before and after set_dict") + "epoch_num is different before and after set_dict", + ) self.assertEqual( - adam_test._learning_rate(), adam2._learning_rate(), - "current learning rate is different before and after set_dict") + adam_test._learning_rate(), + adam2._learning_rate(), + "current learning rate is different before and after set_dict", + ) fluid.dygraph.save_dygraph(adam3.state_dict(), "save_path") _, opt_state = fluid.dygraph.load_dygraph("save_path") adam_test = fluid.optimizer.Adam( learning_rate=Reducelr_scheduler_test, - parameter_list=linear.parameters()) + parameter_list=linear.parameters(), + ) adam_test.set_dict(opt_state) self.assertEqual( adam_test._learning_rate.best_loss, adam3._learning_rate.best_loss.numpy()[0], - "best_loss is different before and after set_dict") + "best_loss is different before and after set_dict", + ) self.assertEqual( adam_test._learning_rate.cooldown_counter, adam3._learning_rate.cooldown_counter, - "cooldown_counter is different before and after set_dict") + "cooldown_counter is different before and after set_dict", + ) self.assertEqual( adam_test._learning_rate.num_bad_epochs, adam3._learning_rate.num_bad_epochs, - "num_bad_epochs is different before and after set_dict") - self.assertEqual(adam_test._learning_rate.epoch_num, - adam3._learning_rate.epoch_num, - "epoch is different before and after set_dict") + "num_bad_epochs is different before and after set_dict", + ) + self.assertEqual( + adam_test._learning_rate.epoch_num, + adam3._learning_rate.epoch_num, + "epoch is different before and after set_dict", + ) self.assertEqual( - adam_test._learning_rate(), adam3._learning_rate(), - "current learning rate is different before and after set_dict") + adam_test._learning_rate(), + adam3._learning_rate(), + "current learning rate is different before and after set_dict", + ) def test_NoamDecay(self): with fluid.dygraph.guard(): @@ -223,42 +244,47 @@ class TestLearningRateDecayDygraph(unittest.TestCase): lr = fluid.layers.noam_decay(d_model, warmup_steps, learning_rate) for step in range(5): step += 1 - right_result = noam_decay(step, d_model, warmup_steps, - learning_rate) + right_result = noam_decay( + step, d_model, warmup_steps, learning_rate + ) fluid_result = lr() self.assertAlmostEqual( right_result, fluid_result[0], - msg= - 'Failed lr scheduler in step {0}, Python result is {1}, Fluid result is {2}' - .format(step, right_result, fluid_result[0])) + msg='Failed lr scheduler in step {0}, Python result is {1}, Fluid result is {2}'.format( + step, right_result, fluid_result[0] + ), + ) def test_LinearLrWarmup(self): with fluid.dygraph.guard(): - lr = fluid.layers.polynomial_decay(learning_rate=1.0, - decay_steps=10, - end_learning_rate=0.0, - power=1.0) - lr = fluid.layers.linear_lr_warmup(learning_rate=lr, - warmup_steps=2, - start_lr=0.0, - end_lr=1.0) + lr = fluid.layers.polynomial_decay( + learning_rate=1.0, + decay_steps=10, + end_learning_rate=0.0, + power=1.0, + ) + lr = fluid.layers.linear_lr_warmup( + learning_rate=lr, warmup_steps=2, start_lr=0.0, end_lr=1.0 + ) right_result = [0.5, 0.9, 0.8, 0.7, 0.6] for i in range(5): t = lr() - np.testing.assert_allclose(t.numpy()[0].item(), - right_result[i], - rtol=1e-05) + np.testing.assert_allclose( + t.numpy()[0].item(), right_result[i], rtol=1e-05 + ) with self.assertRaises(TypeError): - lr = fluid.layers.linear_lr_warmup(learning_rate="fake_lr", - warmup_steps=2, - start_lr=0.0, - end_lr=1.0) + lr = fluid.layers.linear_lr_warmup( + learning_rate="fake_lr", + warmup_steps=2, + start_lr=0.0, + end_lr=1.0, + ) def test_MultiStepDecay(self): with fluid.dygraph.guard(): @@ -267,30 +293,36 @@ class TestLearningRateDecayDygraph(unittest.TestCase): decay_rate = 0.2 linear = fluid.dygraph.Linear(10, 10) - scheduler = fluid.dygraph.MultiStepDecay(learning_rate, milestones, - decay_rate) + scheduler = fluid.dygraph.MultiStepDecay( + learning_rate, milestones, decay_rate + ) adam = fluid.optimizer.AdamOptimizer( - learning_rate=scheduler, parameter_list=linear.parameters()) + learning_rate=scheduler, parameter_list=linear.parameters() + ) for epoch in range(10): - right_result = multi_step_decay(epoch, learning_rate, - milestones, decay_rate) + right_result = multi_step_decay( + epoch, learning_rate, milestones, decay_rate + ) fluid_result = adam.current_step_lr() scheduler.epoch() self.assertAlmostEqual( right_result, fluid_result, - msg= - 'Failed lr scheduler in epoch {0}, Python result is {1}, Fluid result is {2}' - .format(epoch, right_result, fluid_result)) + msg='Failed lr scheduler in epoch {0}, Python result is {1}, Fluid result is {2}'.format( + epoch, right_result, fluid_result + ), + ) with self.assertRaises(ValueError): - lr = fluid.dygraph.MultiStepDecay(learning_rate, [30, 50, 20], - 0.1) + lr = fluid.dygraph.MultiStepDecay( + learning_rate, [30, 50, 20], 0.1 + ) with self.assertRaises(ValueError): - lr = fluid.dygraph.MultiStepDecay(learning_rate, [20, 30, 50], - 1) + lr = fluid.dygraph.MultiStepDecay( + learning_rate, [20, 30, 50], 1 + ) with self.assertRaises(TypeError): lr = fluid.dygraph.MultiStepDecay("test", [20, 30, 50]) @@ -303,19 +335,22 @@ class TestLearningRateDecayDygraph(unittest.TestCase): learning_rate = 0.5 step_size = 3 decay_rate = 0.2 - scheduler = fluid.dygraph.StepDecay(learning_rate, step_size, - decay_rate) + scheduler = fluid.dygraph.StepDecay( + learning_rate, step_size, decay_rate + ) for epoch in range(10): - right_result = step_decay(epoch, learning_rate, step_size, - decay_rate) + right_result = step_decay( + epoch, learning_rate, step_size, decay_rate + ) fluid_result = scheduler().numpy()[0] scheduler.epoch() self.assertAlmostEqual( right_result, fluid_result, - msg= - 'Failed lr scheduler in epoch {0}, Python result is {1}, Fluid result is {2}' - .format(epoch, right_result, fluid_result)) + msg='Failed lr scheduler in epoch {0}, Python result is {1}, Fluid result is {2}'.format( + epoch, right_result, fluid_result + ), + ) with self.assertRaises(TypeError): lr = fluid.dygraph.StepDecay(learning_rate, "test", 0.1) @@ -330,8 +365,9 @@ class TestLearningRateDecayDygraph(unittest.TestCase): scheduler = fluid.dygraph.LambdaDecay(learning_rate, lr_lambda) linear = fluid.dygraph.nn.Linear(10, 10) - adam = fluid.optimizer.Adam(scheduler, - parameter_list=linear.parameters()) + adam = fluid.optimizer.Adam( + scheduler, parameter_list=linear.parameters() + ) for epoch in range(30): right_result = lambda_decay(epoch, learning_rate, lr_lambda) @@ -340,26 +376,28 @@ class TestLearningRateDecayDygraph(unittest.TestCase): self.assertAlmostEqual( right_result, fluid_result, - msg= - 'Failed lr scheduler in epoch {0}, Python result is {1}, Fluid result is {2}' - .format(epoch, right_result, fluid_result)) + msg='Failed lr scheduler in epoch {0}, Python result is {1}, Fluid result is {2}'.format( + epoch, right_result, fluid_result + ), + ) with self.assertRaises(TypeError): lr = fluid.dygraph.LambdaDecay(learning_rate, "test") class TestLearningRateDecay(unittest.TestCase): - def check_decay(self, python_decay_fn, fluid_decay_fn, kwargs): places = [fluid.CPUPlace()] if core.is_compiled_with_cuda(): places.append(fluid.CUDAPlace(0)) for place in places: - self.check_decay_with_place(place, python_decay_fn, fluid_decay_fn, - kwargs) + self.check_decay_with_place( + place, python_decay_fn, fluid_decay_fn, kwargs + ) - def check_decay_with_place(self, place, python_decay_fn, fluid_decay_fn, - kwargs): + def check_decay_with_place( + self, place, python_decay_fn, fluid_decay_fn, kwargs + ): main_prog = fluid.Program() startup_prog = fluid.Program() @@ -375,23 +413,27 @@ class TestLearningRateDecay(unittest.TestCase): # Step of NoamDecay starts from 1. if python_decay_fn.__name__ == 'noam_decay': step += 1 - lr_val, = exe.run(main_prog, feed={}, fetch_list=[decayed_lr]) - python_decayed_lr = python_decay_fn(global_step=float(step), - **kwargs) + (lr_val,) = exe.run(main_prog, feed={}, fetch_list=[decayed_lr]) + python_decayed_lr = python_decay_fn( + global_step=float(step), **kwargs + ) self.assertAlmostEqual( python_decayed_lr, lr_val[0], - msg= - 'Failed lr scheduler is {0}, step {1}, Python result is {2}, Fluid result is {3}' - .format(python_decay_fn.__name__, str(step), - str(python_decayed_lr), str(lr_val[0]))) + msg='Failed lr scheduler is {0}, step {1}, Python result is {2}, Fluid result is {3}'.format( + python_decay_fn.__name__, + str(step), + str(python_decayed_lr), + str(lr_val[0]), + ), + ) def test_decay(self): common_kwargs_true = { "learning_rate": 1.0, "decay_steps": 5, "decay_rate": 0.5, - "staircase": True + "staircase": True, } common_kwargs_false = copy.deepcopy(common_kwargs_true) common_kwargs_false["staircase"] = False @@ -402,37 +444,47 @@ class TestLearningRateDecay(unittest.TestCase): (natural_exp_decay, layers.natural_exp_decay, common_kwargs_true), (natural_exp_decay, layers.natural_exp_decay, common_kwargs_false), (inverse_time_decay, layers.inverse_time_decay, common_kwargs_true), - (inverse_time_decay, layers.inverse_time_decay, - common_kwargs_false), - (polynomial_decay, layers.polynomial_decay, { - "learning_rate": 1.0, - "decay_steps": 5, - "cycle": True - }), - (polynomial_decay, layers.polynomial_decay, { - "learning_rate": 1.0, - "decay_steps": 5, - "cycle": False - }), - (piecewise_decay, layers.piecewise_decay, { - "boundaries": [3, 6, 9], - "values": [0.1, 0.2, 0.3, 0.4] - }), - (cosine_decay, layers.cosine_decay, { - "learning_rate": 0.1, - "step_each_epoch": 100, - "epochs": 120 - }), - (noam_decay, layers.noam_decay, { - "d_model": 0.01, - "warmup_steps": 200, - "learning_rate": 2.0 - }) + ( + inverse_time_decay, + layers.inverse_time_decay, + common_kwargs_false, + ), + ( + polynomial_decay, + layers.polynomial_decay, + {"learning_rate": 1.0, "decay_steps": 5, "cycle": True}, + ), + ( + polynomial_decay, + layers.polynomial_decay, + {"learning_rate": 1.0, "decay_steps": 5, "cycle": False}, + ), + ( + piecewise_decay, + layers.piecewise_decay, + {"boundaries": [3, 6, 9], "values": [0.1, 0.2, 0.3, 0.4]}, + ), + ( + cosine_decay, + layers.cosine_decay, + {"learning_rate": 0.1, "step_each_epoch": 100, "epochs": 120}, + ), + ( + noam_decay, + layers.noam_decay, + {"d_model": 0.01, "warmup_steps": 200, "learning_rate": 2.0}, + ), ] for py_decay_fn, fluid_decay_fn, kwargs in decay_fns: - print("class=" + self.__class__.__name__ + " decay_fn=" + - py_decay_fn.__name__ + " kwargs=" + str(kwargs)) + print( + "class=" + + self.__class__.__name__ + + " decay_fn=" + + py_decay_fn.__name__ + + " kwargs=" + + str(kwargs) + ) main_program = framework.Program() startup_program = framework.Program() with framework.program_guard(main_program, startup_program): @@ -440,19 +492,20 @@ class TestLearningRateDecay(unittest.TestCase): class TestLinearWamrupLearningRateDecay(unittest.TestCase): - - def check_decay_with_place(self, place, python_decay_fn, fluid_decay_fn, - kwargs): + def check_decay_with_place( + self, place, python_decay_fn, fluid_decay_fn, kwargs + ): main_prog = fluid.Program() startup_prog = fluid.Program() warmup_steps = 10 - start_lr = 0.1 / 3. + start_lr = 0.1 / 3.0 end_lr = 0.1 with fluid.program_guard(main_prog, startup_prog): - decayed_lr = layers.linear_lr_warmup(fluid_decay_fn(**kwargs), - warmup_steps, start_lr, end_lr) + decayed_lr = layers.linear_lr_warmup( + fluid_decay_fn(**kwargs), warmup_steps, start_lr, end_lr + ) place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -462,24 +515,28 @@ class TestLinearWamrupLearningRateDecay(unittest.TestCase): # Step of NoamDecay starts from 1. if fluid_decay_fn.__name__ == 'noam_decay': step += 1 - lr_val, = exe.run(main_prog, feed={}, fetch_list=[decayed_lr]) + (lr_val,) = exe.run(main_prog, feed={}, fetch_list=[decayed_lr]) if step < warmup_steps: - python_decayed_lr = linear_lr_warmup(float(step), warmup_steps, - start_lr, end_lr) + python_decayed_lr = linear_lr_warmup( + float(step), warmup_steps, start_lr, end_lr + ) else: - python_decayed_lr = python_decay_fn(global_step=float(step), - **kwargs) + python_decayed_lr = python_decay_fn( + global_step=float(step), **kwargs + ) self.assertAlmostEqual( python_decayed_lr, lr_val[0], - msg= - 'Test {0} Failed, step {1}, Python result is {2}, Fluid result is {3}' - .format(python_decay_fn.__name__, str(step), - str(python_decayed_lr), str(lr_val[0]))) + msg='Test {0} Failed, step {1}, Python result is {2}, Fluid result is {3}'.format( + python_decay_fn.__name__, + str(step), + str(python_decayed_lr), + str(lr_val[0]), + ), + ) class TestLinearWamrupLearningRateDecayWithScalarInput(unittest.TestCase): - def run_scalar_lr(self, place, lr, start_lr, end_lr): main_prog = fluid.Program() startup_prog = fluid.Program() @@ -487,27 +544,30 @@ class TestLinearWamrupLearningRateDecayWithScalarInput(unittest.TestCase): warmup_steps = 10 with fluid.program_guard(main_prog, startup_prog): - decayed_lr = layers.linear_lr_warmup(lr, warmup_steps, start_lr, - end_lr) + decayed_lr = layers.linear_lr_warmup( + lr, warmup_steps, start_lr, end_lr + ) exe = fluid.Executor(place) exe.run(startup_prog) for step in range(20): - lr_val, = exe.run(main_prog, feed={}, fetch_list=[decayed_lr]) + (lr_val,) = exe.run(main_prog, feed={}, fetch_list=[decayed_lr]) if step < warmup_steps: - expected_lr = linear_lr_warmup(float(step), warmup_steps, - start_lr, end_lr) + expected_lr = linear_lr_warmup( + float(step), warmup_steps, start_lr, end_lr + ) else: expected_lr = lr self.assertAlmostEqual( expected_lr, lr_val[0], msg='Test failed, step {0}, expected {1}, but got {2}'.format( - step, expected_lr, lr_val[0])) + step, expected_lr, lr_val[0] + ), + ) def test_scalar_lr(self): - def run_places(lr, start_lr, end_lr): places = [fluid.CPUPlace()] if core.is_compiled_with_cuda(): @@ -517,13 +577,13 @@ class TestLinearWamrupLearningRateDecayWithScalarInput(unittest.TestCase): # float lr = 0.2 - start_lr = 0.1 / 3. + start_lr = 0.1 / 3.0 end_lr = 0.2 run_places(lr, start_lr, end_lr) # int end_lr - lr = 2. - start_lr = 0.1 / 3. + lr = 2.0 + start_lr = 0.1 / 3.0 end_lr = 1 run_places(lr, start_lr, end_lr) diff --git a/python/paddle/fluid/tests/unittests/test_lerp_op.py b/python/paddle/fluid/tests/unittests/test_lerp_op.py index f7f096a8d5a2f7f4e65736d02e783f69b7957bf9..1cae3fbaba9ac9a2015a42dcfd92a3a8f28cb09b 100644 --- a/python/paddle/fluid/tests/unittests/test_lerp_op.py +++ b/python/paddle/fluid/tests/unittests/test_lerp_op.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import unittest import numpy as np from op_test import OpTest @@ -25,14 +23,13 @@ np.random.seed(0) class TestLerp(OpTest): - def setUp(self): self.op_type = "lerp" self.python_api = paddle.lerp self.init_dtype() self.init_shape() - x = np.arange(1., 101.).astype(self.dtype).reshape(self.shape) - y = np.full(100, 10.).astype(self.dtype).reshape(self.shape) + x = np.arange(1.0, 101.0).astype(self.dtype).reshape(self.shape) + y = np.full(100, 10.0).astype(self.dtype).reshape(self.shape) w = np.asarray([0.5]).astype(self.dtype) self.inputs = {'X': x, 'Y': y, 'Weight': w} self.outputs = {'Out': x + w * (y - x)} @@ -51,72 +48,64 @@ class TestLerp(OpTest): class TestLerpWithDim2(TestLerp): - def init_shape(self): self.shape = [2, 50] class TestLerpWithDim3(TestLerp): - def init_shape(self): self.shape = [2, 2, 25] class TestLerpWithDim4(TestLerp): - def init_shape(self): self.shape = [2, 2, 5, 5] class TestLerpWithDim5(TestLerp): - def init_shape(self): self.shape = [2, 1, 2, 5, 5] class TestLerpWithDim6(TestLerp): - def init_shape(self): self.shape = [2, 1, 2, 5, 1, 5] class TestLerpBroadXY(TestLerp): - def setUp(self): self.op_type = "lerp" self.python_api = paddle.lerp self.init_dtype() self.init_shape() - x = np.arange(1., 201.).astype(self.dtype).reshape([2, 1, 2, 50]) - y = np.full(200, 10.).astype(self.dtype).reshape([2, 2, 1, 50]) + x = np.arange(1.0, 201.0).astype(self.dtype).reshape([2, 1, 2, 50]) + y = np.full(200, 10.0).astype(self.dtype).reshape([2, 2, 1, 50]) w = np.asarray([0.5]).astype(self.dtype) self.inputs = {'X': x, 'Y': y, 'Weight': w} self.outputs = {'Out': x + w * (y - x)} class TestLerpBroadWToXY(TestLerp): - def setUp(self): self.op_type = "lerp" self.python_api = paddle.lerp self.init_dtype() self.init_shape() x = np.full(600, 2.5).astype(self.dtype).reshape([50, 2, 2, 3]) - y = np.full(600, 1.).astype(self.dtype).reshape([50, 2, 2, 3]) + y = np.full(600, 1.0).astype(self.dtype).reshape([50, 2, 2, 3]) w = np.random.random([3]).astype(self.dtype) self.inputs = {'X': x, 'Y': y, 'Weight': w} self.outputs = {'Out': x + w * (y - x)} class TestLerpAPI(unittest.TestCase): - def init_dtype(self): self.dtype = 'float32' def setUp(self): self.init_dtype() - self.x = np.arange(1., 5.).astype(self.dtype) - self.y = np.full(4, 10.).astype(self.dtype) + self.x = np.arange(1.0, 5.0).astype(self.dtype) + self.y = np.full(4, 10.0).astype(self.dtype) self.w = np.asarray([0.75]).astype(self.dtype) self.res_ref = self.x + self.w * (self.y - self.x) self.place = [paddle.CPUPlace()] @@ -132,10 +121,12 @@ class TestLerpAPI(unittest.TestCase): y = paddle.fluid.data('y', [1, 4], dtype=self.dtype) out = paddle.lerp(x, y, 0.5) exe = paddle.static.Executor(place) - res = exe.run(feed={ - 'x': self.x.reshape([1, 4]), - 'y': self.y.reshape([1, 4]), - }) + res = exe.run( + feed={ + 'x': self.x.reshape([1, 4]), + 'y': self.y.reshape([1, 4]), + } + ) for r in res: np.testing.assert_allclose(self.res_ref, r, rtol=1e-05) @@ -143,7 +134,6 @@ class TestLerpAPI(unittest.TestCase): run(place) def test_dygraph_api(self): - def run(place): paddle.disable_static(place) x = paddle.to_tensor(self.x) @@ -157,7 +147,6 @@ class TestLerpAPI(unittest.TestCase): run(place) def test_inplace_api(self): - def run(place): paddle.disable_static(place) x = paddle.to_tensor(self.x) @@ -170,7 +159,6 @@ class TestLerpAPI(unittest.TestCase): run(place) def test_inplace_api_exception(self): - def run(place): paddle.disable_static(place) x = paddle.to_tensor(self.x) @@ -185,8 +173,8 @@ class TestLerpAPI(unittest.TestCase): def test_x_broadcast_y(self): paddle.disable_static() - x = np.arange(1., 21.).astype(self.dtype).reshape([2, 2, 5]) - y = np.full(30, 10.).astype(self.dtype).reshape([3, 2, 1, 5]) + x = np.arange(1.0, 21.0).astype(self.dtype).reshape([2, 2, 5]) + y = np.full(30, 10.0).astype(self.dtype).reshape([3, 2, 1, 5]) out = paddle.lerp(paddle.to_tensor(x), paddle.to_tensor(y), 0.5) res_ref = x + 0.5 * (y - x) np.testing.assert_allclose(res_ref, out.numpy(), rtol=1e-05) @@ -194,11 +182,12 @@ class TestLerpAPI(unittest.TestCase): def test_x_y_broadcast_w(self): paddle.disable_static() - x = np.arange(11., 21.).astype(self.dtype).reshape([2, 5]) + x = np.arange(11.0, 21.0).astype(self.dtype).reshape([2, 5]) y = np.full(20, 7.5).astype(self.dtype).reshape([2, 2, 5]) w = np.full(40, 0.225).astype(self.dtype).reshape([2, 2, 2, 5]) - out = paddle.lerp(paddle.to_tensor(x), paddle.to_tensor(y), - paddle.to_tensor(w)) + out = paddle.lerp( + paddle.to_tensor(x), paddle.to_tensor(y), paddle.to_tensor(w) + ) res_ref = x + w * (y - x) np.testing.assert_allclose(res_ref, out.numpy(), rtol=1e-05) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_lgamma_op.py b/python/paddle/fluid/tests/unittests/test_lgamma_op.py index 13e420abad348148db4543e028910f643a043eeb..54211aef14ffa4b7e30a7d7878ce2850394d3813 100644 --- a/python/paddle/fluid/tests/unittests/test_lgamma_op.py +++ b/python/paddle/fluid/tests/unittests/test_lgamma_op.py @@ -23,7 +23,6 @@ paddle.enable_static() class TestLgammaOp(OpTest): - def setUp(self): self.op_type = 'lgamma' self.python_api = paddle.lgamma @@ -48,19 +47,16 @@ class TestLgammaOp(OpTest): class TestLgammaOpFp32(TestLgammaOp): - def init_dtype_type(self): self.dtype = np.float32 def test_check_grad_normal(self): - self.check_grad(['X'], - 'Out', - numeric_grad_delta=0.005, - check_eager=True) + self.check_grad( + ['X'], 'Out', numeric_grad_delta=0.005, check_eager=True + ) class TestLgammaOpApi(unittest.TestCase): - def test_lgamma(self): paddle.disable_static() self.dtype = "float32" diff --git a/python/paddle/fluid/tests/unittests/test_limit_by_capacity_op.py b/python/paddle/fluid/tests/unittests/test_limit_by_capacity_op.py index 3e604d25657eed44b0c9fc657a599df3e8b57072..02c71ea3a4147d75f4fc101125ab8bd1dc09ec18 100644 --- a/python/paddle/fluid/tests/unittests/test_limit_by_capacity_op.py +++ b/python/paddle/fluid/tests/unittests/test_limit_by_capacity_op.py @@ -43,17 +43,17 @@ def all_close(exp, out, n_worker): return np.allclose(exp.sum(0), out.sum(0)) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestLimitByCapacityInt64API(unittest.TestCase): - def init_test_case(self): - self.expert_count = np.random.randint(0, - 1000, - size=(len(self.capacity) * - self.n_worker)) - self.out = limit_by_capacity(self.expert_count, self.capacity, - self.n_worker) + self.expert_count = np.random.randint( + 0, 1000, size=(len(self.capacity) * self.n_worker) + ) + self.out = limit_by_capacity( + self.expert_count, self.capacity, self.n_worker + ) self.expert_count = self.expert_count.astype("int64") self.capacity = self.capacity.astype("int64") self.place = paddle.CUDAPlace(0) @@ -66,19 +66,23 @@ class TestLimitByCapacityInt64API(unittest.TestCase): def test_static_api(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - capacity = paddle.static.data('capacity', - shape=self.capacity.shape, - dtype="int64") + capacity = paddle.static.data( + 'capacity', shape=self.capacity.shape, dtype="int64" + ) expert_count_tensor = paddle.static.data( - 'ExpertCount', shape=self.expert_count.shape, dtype="int64") - out = utils._limit_by_capacity(expert_count_tensor, capacity, - self.n_worker) + 'ExpertCount', shape=self.expert_count.shape, dtype="int64" + ) + out = utils._limit_by_capacity( + expert_count_tensor, capacity, self.n_worker + ) exe = paddle.static.Executor(self.place) - res = exe.run(feed={ - 'capacity': self.capacity, - 'ExpertCount': self.expert_count, - }, - fetch_list=out) + res = exe.run( + feed={ + 'capacity': self.capacity, + 'ExpertCount': self.expert_count, + }, + fetch_list=out, + ) assert all_close(self.out, res[0], self.n_worker) @@ -86,8 +90,9 @@ class TestLimitByCapacityInt64API(unittest.TestCase): paddle.disable_static(self.place) capacity = paddle.to_tensor(self.capacity) expert_count_tensor = paddle.to_tensor(self.expert_count) - out = utils._limit_by_capacity(expert_count_tensor, capacity, - self.n_worker) + out = utils._limit_by_capacity( + expert_count_tensor, capacity, self.n_worker + ) assert all_close(self.out, out.numpy(), self.n_worker) def test_dygraph_api(self): @@ -96,10 +101,10 @@ class TestLimitByCapacityInt64API(unittest.TestCase): self.func_dygraph_api() -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestLimitByCapacityInt64API_SmallWorker(TestLimitByCapacityInt64API): - def setUp(self): self.capacity = np.array([100, 12000, 1200, 0, 4700, 1000, 57, 200]) self.n_worker = 1 diff --git a/python/paddle/fluid/tests/unittests/test_linalg_cond.py b/python/paddle/fluid/tests/unittests/test_linalg_cond.py index 175d44cb1cefee43a6ff271cd6232219497af40b..4724bbc6d080203847d0eb3ef3521defeb16fd49 100644 --- a/python/paddle/fluid/tests/unittests/test_linalg_cond.py +++ b/python/paddle/fluid/tests/unittests/test_linalg_cond.py @@ -31,9 +31,9 @@ def test_static_assert_true(self, x_list, p_list): exe = static.Executor() result = exe.run(feed={"X": x}, fetch_list=[output]) expected_output = np.linalg.cond(x, p) - np.testing.assert_allclose(result[0], - expected_output, - rtol=5e-5) + np.testing.assert_allclose( + result[0], expected_output, rtol=5e-5 + ) def test_dygraph_assert_true(self, x_list, p_list): @@ -42,9 +42,9 @@ def test_dygraph_assert_true(self, x_list, p_list): input_tensor = paddle.to_tensor(x) output = paddle.linalg.cond(input_tensor, p) expected_output = np.linalg.cond(x, p) - np.testing.assert_allclose(output.numpy(), - expected_output, - rtol=5e-5) + np.testing.assert_allclose( + output.numpy(), expected_output, rtol=5e-5 + ) def gen_input(): @@ -81,7 +81,6 @@ def gen_empty_input(): class API_TestStaticCond(unittest.TestCase): - def test_out(self): paddle.enable_static() # test calling results of 'cond' in static mode @@ -91,7 +90,6 @@ class API_TestStaticCond(unittest.TestCase): class API_TestDygraphCond(unittest.TestCase): - def func_out(self): paddle.disable_static() # test calling results of 'cond' in dynamic mode @@ -106,14 +104,13 @@ class API_TestDygraphCond(unittest.TestCase): class TestCondAPIError(unittest.TestCase): - def func_dygraph_api_error(self): paddle.disable_static() # test raising errors when 'cond' is called in dygraph mode p_list_error = ('fro_', '_nuc', -0.7, 0, 1.5, 3) x_list_n_n, x_list_m_n = gen_input() for p in p_list_error: - for x in (x_list_n_n + x_list_m_n): + for x in x_list_n_n + x_list_m_n: x_tensor = paddle.to_tensor(x) self.assertRaises(ValueError, paddle.linalg.cond, x_tensor, p) @@ -133,7 +130,7 @@ class TestCondAPIError(unittest.TestCase): p_list_error = ('f ro', 'fre', 'NUC', -1.6, 0, 5) x_list_n_n, x_list_m_n = gen_input() for p in p_list_error: - for x in (x_list_n_n + x_list_m_n): + for x in x_list_n_n + x_list_m_n: with static.program_guard(static.Program(), static.Program()): x_data = static.data("X", shape=x.shape, dtype=x.dtype) self.assertRaises(ValueError, paddle.linalg.cond, x_data, p) @@ -149,13 +146,13 @@ class TestCondAPIError(unittest.TestCase): paddle.enable_static() x_list_n_n, x_list_m_n = gen_empty_input() - for p in (p_list_n_n + p_list_m_n): + for p in p_list_n_n + p_list_m_n: for x in x_list_n_n: with static.program_guard(static.Program(), static.Program()): x_data = static.data("X", shape=x.shape, dtype=x.dtype) self.assertRaises(ValueError, paddle.linalg.cond, x_data, p) - for p in (p_list_n_n + p_list_m_n): + for p in p_list_n_n + p_list_m_n: for x in x_list_n_n: with static.program_guard(static.Program(), static.Program()): x_data = static.data("X", shape=x.shape, dtype=x.dtype) @@ -163,7 +160,6 @@ class TestCondAPIError(unittest.TestCase): class TestCondEmptyTensorInput(unittest.TestCase): - def func_dygraph_empty_tensor_input(self): paddle.disable_static() # test calling results of 'cond' when input is an empty tensor in dynamic mode diff --git a/python/paddle/fluid/tests/unittests/test_linalg_lstsq_op.py b/python/paddle/fluid/tests/unittests/test_linalg_lstsq_op.py index f4180941be235bd2deb7b228d9a639c0d6f2c4a6..f627057ed242da74a37be1e526b3a9ade6eb0652 100644 --- a/python/paddle/fluid/tests/unittests/test_linalg_lstsq_op.py +++ b/python/paddle/fluid/tests/unittests/test_linalg_lstsq_op.py @@ -20,7 +20,6 @@ import paddle.fluid.core as core class LinalgLstsqTestCase(unittest.TestCase): - def setUp(self): self.devices = ["cpu"] self.init_config() @@ -39,15 +38,17 @@ class LinalgLstsqTestCase(unittest.TestCase): def generate_input(self): self._input_data_1 = np.random.random(self._input_shape_1).astype( - self.dtype) + self.dtype + ) self._input_data_2 = np.random.random(self._input_shape_2).astype( - self.dtype) + self.dtype + ) def generate_output(self): if len(self._input_shape_1) == 2: - out = np.linalg.lstsq(self._input_data_1, - self._input_data_2, - rcond=self.rcond) + out = np.linalg.lstsq( + self._input_data_1, self._input_data_2, rcond=self.rcond + ) self._output_solution = out[0] self._output_residuals = out[1] self._output_rank = out[2] @@ -58,9 +59,11 @@ class LinalgLstsqTestCase(unittest.TestCase): self._output_rank = [] self._output_sg_values = [] for i in range(self._input_shape_1[0]): - out = np.linalg.lstsq(self._input_data_1[i], - self._input_data_2[i], - rcond=self.rcond) + out = np.linalg.lstsq( + self._input_data_1[i], + self._input_data_2[i], + rcond=self.rcond, + ) self._output_solution.append(out[0]) self._output_residuals.append(out[1]) self._output_rank.append(out[2]) @@ -72,16 +75,15 @@ class LinalgLstsqTestCase(unittest.TestCase): for dev in self.devices: paddle.set_device(dev) place = paddle.CPUPlace() if dev == "cpu" else paddle.CUDAPlace(0) - x = paddle.to_tensor(self._input_data_1, - place=place, - dtype=self.dtype) - y = paddle.to_tensor(self._input_data_2, - place=place, - dtype=self.dtype) - results = paddle.linalg.lstsq(x, - y, - rcond=self.rcond, - driver=self.driver) + x = paddle.to_tensor( + self._input_data_1, place=place, dtype=self.dtype + ) + y = paddle.to_tensor( + self._input_data_2, place=place, dtype=self.dtype + ) + results = paddle.linalg.lstsq( + x, y, rcond=self.rcond, driver=self.driver + ) self._result_solution = results[0].numpy() self._result_residuals = results[1].numpy() self._result_rank = results[2].numpy() @@ -94,16 +96,15 @@ class LinalgLstsqTestCase(unittest.TestCase): for dev in self.devices: paddle.set_device(dev) place = paddle.CPUPlace() if dev == "cpu" else paddle.CUDAPlace(0) - x = paddle.to_tensor(self._input_data_1, - place=place, - dtype=self.dtype) - y = paddle.to_tensor(self._input_data_2, - place=place, - dtype=self.dtype) - results = paddle.linalg.lstsq(x, - y, - rcond=self.rcond, - driver=self.driver) + x = paddle.to_tensor( + self._input_data_1, place=place, dtype=self.dtype + ) + y = paddle.to_tensor( + self._input_data_2, place=place, dtype=self.dtype + ) + results = paddle.linalg.lstsq( + x, y, rcond=self.rcond, driver=self.driver + ) self._result_solution = results[0].numpy() self._result_residuals = results[1].numpy() self._result_rank = results[2].numpy() @@ -116,23 +117,25 @@ class LinalgLstsqTestCase(unittest.TestCase): paddle.set_device(dev) place = fluid.CPUPlace() if dev == "cpu" else fluid.CUDAPlace(0) with fluid.program_guard(fluid.Program(), fluid.Program()): - x = paddle.fluid.data(name="x", - shape=self._input_shape_1, - dtype=self._input_data_1.dtype) - y = paddle.fluid.data(name="y", - shape=self._input_shape_2, - dtype=self._input_data_2.dtype) - results = paddle.linalg.lstsq(x, - y, - rcond=self.rcond, - driver=self.driver) + x = paddle.fluid.data( + name="x", + shape=self._input_shape_1, + dtype=self._input_data_1.dtype, + ) + y = paddle.fluid.data( + name="y", + shape=self._input_shape_2, + dtype=self._input_data_2.dtype, + ) + results = paddle.linalg.lstsq( + x, y, rcond=self.rcond, driver=self.driver + ) exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={ - "x": self._input_data_1, - "y": self._input_data_2 - }, - fetch_list=[results]) + fetches = exe.run( + fluid.default_main_program(), + feed={"x": self._input_data_1, "y": self._input_data_2}, + fetch_list=[results], + ) self._result_solution = fetches[0] self._result_residuals = fetches[1] self._result_rank = fetches[2] @@ -141,44 +144,53 @@ class LinalgLstsqTestCase(unittest.TestCase): def assert_np_close(self): if len(self._input_shape_1) == 2: - np.testing.assert_allclose(self._result_solution, - self._output_solution, - rtol=1e-3) - if self._input_shape_1[-2] > self._input_shape_1[ - -1] and self._output_rank == self._input_shape_1[-1]: - np.testing.assert_allclose(self._result_residuals, - self._output_residuals, - rtol=1e-5) + np.testing.assert_allclose( + self._result_solution, self._output_solution, rtol=1e-3 + ) + if ( + self._input_shape_1[-2] > self._input_shape_1[-1] + and self._output_rank == self._input_shape_1[-1] + ): + np.testing.assert_allclose( + self._result_residuals, self._output_residuals, rtol=1e-5 + ) if self.driver in ("gelsy", "gelsd", "gelss"): - np.testing.assert_allclose(self._result_rank, - self._output_rank, - rtol=1e-5) + np.testing.assert_allclose( + self._result_rank, self._output_rank, rtol=1e-5 + ) if self.driver in ("gelsd", "gelss"): - np.testing.assert_allclose(self._result_sg_values, - self._output_sg_values, - rtol=1e-5) + np.testing.assert_allclose( + self._result_sg_values, self._output_sg_values, rtol=1e-5 + ) else: for i in range(len(self._output_solution)): - np.testing.assert_allclose(self._result_solution[i], - self._output_solution[i], - rtol=1e-3) - if self._input_shape_1[-2] > self._input_shape_1[ - -1] and self._output_rank[i] == self._input_shape_1[-1]: - np.testing.assert_allclose(self._result_residuals[i], - self._output_residuals[i], - rtol=1e-5) + np.testing.assert_allclose( + self._result_solution[i], + self._output_solution[i], + rtol=1e-3, + ) + if ( + self._input_shape_1[-2] > self._input_shape_1[-1] + and self._output_rank[i] == self._input_shape_1[-1] + ): + np.testing.assert_allclose( + self._result_residuals[i], + self._output_residuals[i], + rtol=1e-5, + ) if self.driver in ("gelsy", "gelsd", "gelss"): - np.testing.assert_allclose(self._result_rank[i], - self._output_rank[i], - rtol=1e-5) + np.testing.assert_allclose( + self._result_rank[i], self._output_rank[i], rtol=1e-5 + ) if self.driver in ("gelsd", "gelss"): - np.testing.assert_allclose(self._result_sg_values[i], - self._output_sg_values[i], - rtol=1e-5) + np.testing.assert_allclose( + self._result_sg_values[i], + self._output_sg_values[i], + rtol=1e-5, + ) class LinalgLstsqTestCase1(LinalgLstsqTestCase): - def init_config(self): self.dtype = 'float32' self.rcond = 1e-15 @@ -188,7 +200,6 @@ class LinalgLstsqTestCase1(LinalgLstsqTestCase): class LinalgLstsqTestCase2(LinalgLstsqTestCase): - def init_config(self): self.dtype = 'float64' self.rcond = 1e-15 @@ -198,7 +209,6 @@ class LinalgLstsqTestCase2(LinalgLstsqTestCase): class LinalgLstsqTestCase3(LinalgLstsqTestCase): - def init_config(self): self.dtype = 'float64' self.rcond = 1e-15 @@ -208,7 +218,6 @@ class LinalgLstsqTestCase3(LinalgLstsqTestCase): class LinalgLstsqTestCaseRcond(LinalgLstsqTestCase): - def init_config(self): self.dtype = 'float64' self.rcond = 1e-7 @@ -218,7 +227,6 @@ class LinalgLstsqTestCaseRcond(LinalgLstsqTestCase): class LinalgLstsqTestCaseGelsFloat32(LinalgLstsqTestCase): - def init_config(self): self.dtype = 'float32' self.rcond = None @@ -228,7 +236,6 @@ class LinalgLstsqTestCaseGelsFloat32(LinalgLstsqTestCase): class LinalgLstsqTestCaseGelsFloat64(LinalgLstsqTestCase): - def init_config(self): self.dtype = 'float32' self.rcond = None @@ -238,7 +245,6 @@ class LinalgLstsqTestCaseGelsFloat64(LinalgLstsqTestCase): class LinalgLstsqTestCaseGelssFloat64(LinalgLstsqTestCase): - def init_config(self): self.dtype = 'float64' self.rcond = None @@ -248,7 +254,6 @@ class LinalgLstsqTestCaseGelssFloat64(LinalgLstsqTestCase): class LinalgLstsqTestCaseGelsyFloat32(LinalgLstsqTestCase): - def init_config(self): self.dtype = 'float32' self.rcond = 1e-15 @@ -258,7 +263,6 @@ class LinalgLstsqTestCaseGelsyFloat32(LinalgLstsqTestCase): class LinalgLstsqTestCaseBatch1(LinalgLstsqTestCase): - def init_config(self): self.dtype = 'float32' self.rcond = 1e-15 @@ -268,7 +272,6 @@ class LinalgLstsqTestCaseBatch1(LinalgLstsqTestCase): class LinalgLstsqTestCaseBatch2(LinalgLstsqTestCase): - def init_config(self): self.dtype = 'float64' self.rcond = 1e-15 @@ -278,7 +281,6 @@ class LinalgLstsqTestCaseBatch2(LinalgLstsqTestCase): class LinalgLstsqTestCaseLarge1(LinalgLstsqTestCase): - def init_config(self): self.dtype = 'float64' self.rcond = 1e-15 @@ -288,7 +290,6 @@ class LinalgLstsqTestCaseLarge1(LinalgLstsqTestCase): class LinalgLstsqTestCaseLarge2(LinalgLstsqTestCase): - def init_config(self): self.dtype = 'float64' self.rcond = 1e-15 diff --git a/python/paddle/fluid/tests/unittests/test_linalg_pinv_op.py b/python/paddle/fluid/tests/unittests/test_linalg_pinv_op.py index 03e671d883219b7a5f9c75dc24250a569b8765ca..8c39cbb7c9517fdb6997240d3d81ce3c3fa693f7 100644 --- a/python/paddle/fluid/tests/unittests/test_linalg_pinv_op.py +++ b/python/paddle/fluid/tests/unittests/test_linalg_pinv_op.py @@ -20,7 +20,6 @@ import paddle.fluid.core as core class LinalgPinvTestCase(unittest.TestCase): - def setUp(self): self.init_config() self.generate_input() @@ -33,11 +32,13 @@ class LinalgPinvTestCase(unittest.TestCase): self._input_shape = (5, 5) np.random.seed(123) self._input_data = np.random.random(self._input_shape).astype( - self.dtype) + self.dtype + ) def generate_output(self): - self._output_data = np.linalg.pinv(self._input_data, \ - rcond=self.rcond, hermitian=self.hermitian) + self._output_data = np.linalg.pinv( + self._input_data, rcond=self.rcond, hermitian=self.hermitian + ) def init_config(self): self.dtype = 'float64' @@ -48,9 +49,9 @@ class LinalgPinvTestCase(unittest.TestCase): for place in self.places: paddle.disable_static(place) x = paddle.to_tensor(self._input_data, place=place) - out = paddle.linalg.pinv(x, - rcond=self.rcond, - hermitian=self.hermitian).numpy() + out = paddle.linalg.pinv( + x, rcond=self.rcond, hermitian=self.hermitian + ).numpy() if (np.abs(out - self._output_data) < 1e-6).any(): pass else: @@ -65,16 +66,20 @@ class LinalgPinvTestCase(unittest.TestCase): places.append(fluid.CUDAPlace(0)) for place in places: with fluid.program_guard(fluid.Program(), fluid.Program()): - x = paddle.fluid.data(name="input", - shape=self._input_shape, - dtype=self._input_data.dtype) - out = paddle.linalg.pinv(x, - rcond=self.rcond, - hermitian=self.hermitian) + x = paddle.fluid.data( + name="input", + shape=self._input_shape, + dtype=self._input_data.dtype, + ) + out = paddle.linalg.pinv( + x, rcond=self.rcond, hermitian=self.hermitian + ) exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={"input": self._input_data}, - fetch_list=[out]) + fetches = exe.run( + fluid.default_main_program(), + feed={"input": self._input_data}, + fetch_list=[out], + ) if (np.abs(fetches[0] - self._output_data) < 1e-6).any(): pass else: @@ -84,12 +89,12 @@ class LinalgPinvTestCase(unittest.TestCase): def test_grad(self): for place in self.places: - x = paddle.to_tensor(self._input_data, - place=place, - stop_gradient=False) - out = paddle.linalg.pinv(x, - rcond=self.rcond, - hermitian=self.hermitian) + x = paddle.to_tensor( + self._input_data, place=place, stop_gradient=False + ) + out = paddle.linalg.pinv( + x, rcond=self.rcond, hermitian=self.hermitian + ) try: out.backward() x_grad = x.grad @@ -99,75 +104,75 @@ class LinalgPinvTestCase(unittest.TestCase): class LinalgPinvTestCase1(LinalgPinvTestCase): - def generate_input(self): self._input_shape = (4, 5) np.random.seed(123) self._input_data = np.random.random(self._input_shape).astype( - self.dtype) + self.dtype + ) class LinalgPinvTestCase2(LinalgPinvTestCase): - def generate_input(self): self._input_shape = (5, 4) np.random.seed(123) self._input_data = np.random.random(self._input_shape).astype( - self.dtype) + self.dtype + ) class LinalgPinvTestCaseBatch1(LinalgPinvTestCase): - def generate_input(self): self._input_shape = (3, 5, 5) np.random.seed(123) self._input_data = np.random.random(self._input_shape).astype( - self.dtype) + self.dtype + ) class LinalgPinvTestCaseBatch2(LinalgPinvTestCase): - def generate_input(self): self._input_shape = (3, 4, 5) np.random.seed(123) self._input_data = np.random.random(self._input_shape).astype( - self.dtype) + self.dtype + ) class LinalgPinvTestCaseBatch3(LinalgPinvTestCase): - def generate_input(self): self._input_shape = (3, 5, 4) np.random.seed(123) self._input_data = np.random.random(self._input_shape).astype( - self.dtype) + self.dtype + ) class LinalgPinvTestCaseBatch4(LinalgPinvTestCase): - def generate_input(self): self._input_shape = (3, 6, 5, 4) np.random.seed(123) self._input_data = np.random.random(self._input_shape).astype( - self.dtype) + self.dtype + ) class LinalgPinvTestCaseBatchBig(LinalgPinvTestCase): - def generate_input(self): self._input_shape = (2, 200, 300) np.random.seed(123) self._input_data = np.random.random(self._input_shape).astype( - self.dtype) + self.dtype + ) class LinalgPinvTestCaseFP32(LinalgPinvTestCase): - def generate_input(self): self._input_shape = (3, 5, 5) np.random.seed(123) self._input_data = np.random.random(self._input_shape).astype( - self.dtype) + self.dtype + ) def init_config(self): self.dtype = 'float32' @@ -176,12 +181,12 @@ class LinalgPinvTestCaseFP32(LinalgPinvTestCase): class LinalgPinvTestCaseRcond(LinalgPinvTestCase): - def generate_input(self): self._input_shape = (3, 5, 5) np.random.seed(123) self._input_data = np.random.random(self._input_shape).astype( - self.dtype) + self.dtype + ) def init_config(self): self.dtype = 'float64' @@ -190,12 +195,12 @@ class LinalgPinvTestCaseRcond(LinalgPinvTestCase): class LinalgPinvTestCaseHermitian1(LinalgPinvTestCase): - def generate_input(self): self._input_shape = (5, 5) np.random.seed(123) - x = np.random.random(self._input_shape).astype(self.dtype) + \ - 1J * np.random.random(self._input_shape).astype(self.dtype) + x = np.random.random(self._input_shape).astype( + self.dtype + ) + 1j * np.random.random(self._input_shape).astype(self.dtype) self._input_data = x + x.transpose().conj() def init_config(self): @@ -205,12 +210,12 @@ class LinalgPinvTestCaseHermitian1(LinalgPinvTestCase): class LinalgPinvTestCaseHermitian2(LinalgPinvTestCase): - def generate_input(self): self._input_shape = (3, 5, 5) np.random.seed(123) - x = np.random.random(self._input_shape).astype(self.dtype) + \ - 1J * np.random.random(self._input_shape).astype(self.dtype) + x = np.random.random(self._input_shape).astype( + self.dtype + ) + 1j * np.random.random(self._input_shape).astype(self.dtype) self._input_data = x + x.transpose((0, 2, 1)).conj() def init_config(self): @@ -220,12 +225,12 @@ class LinalgPinvTestCaseHermitian2(LinalgPinvTestCase): class LinalgPinvTestCaseHermitian3(LinalgPinvTestCase): - def generate_input(self): self._input_shape = (3, 5, 5) np.random.seed(123) - x = np.random.random(self._input_shape).astype(self.dtype) + \ - 1J * np.random.random(self._input_shape).astype(self.dtype) + x = np.random.random(self._input_shape).astype( + self.dtype + ) + 1j * np.random.random(self._input_shape).astype(self.dtype) self._input_data = x + x.transpose((0, 2, 1)).conj() def init_config(self): @@ -235,7 +240,6 @@ class LinalgPinvTestCaseHermitian3(LinalgPinvTestCase): class LinalgPinvTestCaseHermitian4(LinalgPinvTestCase): - def generate_input(self): self._input_shape = (5, 5) np.random.seed(123) @@ -249,7 +253,6 @@ class LinalgPinvTestCaseHermitian4(LinalgPinvTestCase): class LinalgPinvTestCaseHermitian5(LinalgPinvTestCase): - def generate_input(self): self._input_shape = (3, 5, 5) np.random.seed(123) @@ -263,7 +266,6 @@ class LinalgPinvTestCaseHermitian5(LinalgPinvTestCase): class LinalgPinvTestCaseHermitianFP32(LinalgPinvTestCase): - def generate_input(self): self._input_shape = (3, 5, 5) np.random.seed(123) diff --git a/python/paddle/fluid/tests/unittests/test_linear.py b/python/paddle/fluid/tests/unittests/test_linear.py index eaf2cb528cc2a8e66a7373bfc70bbd3d4b3e709f..0cec046354294cd5f846670c3d7ede07a23ed879 100644 --- a/python/paddle/fluid/tests/unittests/test_linear.py +++ b/python/paddle/fluid/tests/unittests/test_linear.py @@ -21,14 +21,16 @@ import paddle.nn.functional as F class LinearTestCase(unittest.TestCase): - def setUp(self): self.dtype = 'float32' self.input = np.ones((3, 1, 2)).astype(self.dtype) self.weight = np.ones((2, 2)).astype(self.dtype) self.bias = np.ones((2)).astype(self.dtype) - self.place = paddle.CUDAPlace( - 0) if core.is_compiled_with_cuda() else paddle.CPUPlace() + self.place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() + else paddle.CPUPlace() + ) def functional(self, place): paddle.disable_static(place) @@ -46,17 +48,18 @@ class LinearTestCase(unittest.TestCase): learning_rate=1.0, trainable=False, regularizer=None, - initializer=paddle.fluid.initializer.ConstantInitializer(value=1.0)) + initializer=paddle.fluid.initializer.ConstantInitializer(value=1.0), + ) bias_attr = fluid.ParamAttr( name="linear_bias", learning_rate=1.0, trainable=False, regularizer=None, - initializer=paddle.fluid.initializer.ConstantInitializer(value=1.0)) - linear = paddle.nn.Linear(2, - 2, - weight_attr=weight_attr, - bias_attr=bias_attr) + initializer=paddle.fluid.initializer.ConstantInitializer(value=1.0), + ) + linear = paddle.nn.Linear( + 2, 2, weight_attr=weight_attr, bias_attr=bias_attr + ) y = linear(input) return y.numpy() @@ -75,18 +78,21 @@ class LinearTestCase(unittest.TestCase): if not paddle.is_compiled_with_cuda(): return paddle.seed(100) - linear = paddle.nn.Linear(2, - 3, - weight_attr=paddle.nn.initializer.Normal( - 0, 1.)) + linear = paddle.nn.Linear( + 2, 3, weight_attr=paddle.nn.initializer.Normal(0, 1.0) + ) paddle.nn.utils._stride_column(linear.weight) - expect = [[1.4349908, -0.8099171, -2.64788], - [-1.4981681, -1.1784115, -0.023253186]] + expect = [ + [1.4349908, -0.8099171, -2.64788], + [-1.4981681, -1.1784115, -0.023253186], + ] np.testing.assert_allclose(linear.weight.numpy(), expect, rtol=1e-05) linear = paddle.nn.Linear(2, 3) - expect = [[0.73261100, 0.43836895, 0.07908206], - [0.85075015, -1.04724526, 0.64371765]] + expect = [ + [0.73261100, 0.43836895, 0.07908206], + [0.85075015, -1.04724526, 0.64371765], + ] np.testing.assert_allclose(linear.weight.numpy(), expect, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_linear_chain_crf_op.py b/python/paddle/fluid/tests/unittests/test_linear_chain_crf_op.py index 526601bbe680106ce1111246408c2f79d35fd852..203a4ac62ca49d3650a455e5afd71be27c4e472a 100755 --- a/python/paddle/fluid/tests/unittests/test_linear_chain_crf_op.py +++ b/python/paddle/fluid/tests/unittests/test_linear_chain_crf_op.py @@ -20,9 +20,16 @@ from op_test import OpTest class LinearChainCrfForward(object): - - def __init__(self, seq_start_positions, emission_weights, emission_row_max, - emission_exps, transition_weights, transition_exps, labels): + def __init__( + self, + seq_start_positions, + emission_weights, + emission_row_max, + emission_exps, + transition_weights, + transition_exps, + labels, + ): self.tag_num = emission_weights.shape[1] self.seq_num = len(seq_start_positions) - 1 @@ -46,8 +53,9 @@ class LinearChainCrfForward(object): # The output of linear chain crf operator. # alpha is a memo table in dynamic programming to calculate # nomalization factor. - self.alpha = np.zeros((seq_start_positions[-1], self.tag_num), - dtype="float64") + self.alpha = np.zeros( + (seq_start_positions[-1], self.tag_num), dtype="float64" + ) self.log_likelihood = np.zeros((self.seq_num, 1)) def _l1_norm(self, x): @@ -57,7 +65,7 @@ class LinearChainCrfForward(object): def _forward_a_sequence(self, x, x_row_max, x_exps, label, alpha): seq_len = x_row_max.shape[0] - log_likelihood = 0. + log_likelihood = 0.0 for i in range(self.tag_num): alpha[0, i] = self.a_exps[i] * x_exps[0, i] @@ -66,22 +74,21 @@ class LinearChainCrfForward(object): # calculate the unnormalized logits of the normalization factor. for k in range(1, seq_len): for i in range(self.tag_num): - s = 0. + s = 0.0 for j in range(self.tag_num): s += alpha[k - 1, j] * self.w_exps[j, i] alpha[k, i] = x_exps[k, i] * s log_likelihood -= x_row_max[k] + np.log(self._l1_norm(alpha[k, :])) - s = 0. + s = 0.0 for i in range(self.tag_num): s += alpha[-1, i] * self.b_exps[i] log_likelihood -= np.log(s) # calculate the nominator part. - log_likelihood += (self.a[label[0]] + x[0, label[0]] + - self.b[label[-1]]) + log_likelihood += self.a[label[0]] + x[0, label[0]] + self.b[label[-1]] for k in range(1, seq_len): - log_likelihood += (x[k, label[k]] + self.w[label[k - 1], label[k]]) + log_likelihood += x[k, label[k]] + self.w[label[k - 1], label[k]] return -log_likelihood def crf_forward_compute(self): @@ -91,14 +98,16 @@ class LinearChainCrfForward(object): if start >= end: continue self.log_likelihood[i] = self._forward_a_sequence( - self.x[start:end, :], self.x_row_max[start:end, :], - self.x_exps[start:end, :], self.labels[start:end, :], - self.alpha[start:end, :]) + self.x[start:end, :], + self.x_row_max[start:end, :], + self.x_exps[start:end, :], + self.labels[start:end, :], + self.alpha[start:end, :], + ) return self.alpha, self.log_likelihood class TestLinearChainCrfOp(OpTest): - def set_test_data(self): # TODO(caoying) Fix the unittest by: add the boundary cases when # sequence lengths are 1, 2, and 3. @@ -114,34 +123,41 @@ class TestLinearChainCrfOp(OpTest): lod[-1].append(random.randint(1, MAX_SEQ_LEN)) seq_start_pos.append(seq_start_pos[-1] + lod[-1][-1]) emission = np.random.uniform( - -1, 1, [seq_start_pos[-1], TAG_NUM]).astype("float64") + -1, 1, [seq_start_pos[-1], TAG_NUM] + ).astype("float64") emission_row_max = np.amax(emission, axis=1, keepdims=True) emission_exps = np.exp(emission - emission_row_max) - transition = np.random.uniform(-0.5, 0.5, - [TAG_NUM + 2, TAG_NUM]).astype("float64") + transition = np.random.uniform( + -0.5, 0.5, [TAG_NUM + 2, TAG_NUM] + ).astype("float64") transition_exps = np.exp(transition) - labels = np.random.randint(low=0, - high=TAG_NUM, - size=(seq_start_pos[-1], 1), - dtype="int64") + labels = np.random.randint( + low=0, high=TAG_NUM, size=(seq_start_pos[-1], 1), dtype="int64" + ) self.inputs = { "Emission": (emission, lod), "Transition": transition, - "Label": (labels, lod) + "Label": (labels, lod), } - crf = LinearChainCrfForward(seq_start_pos, emission, emission_row_max, - emission_exps, transition, transition_exps, - labels) + crf = LinearChainCrfForward( + seq_start_pos, + emission, + emission_row_max, + emission_exps, + transition, + transition_exps, + labels, + ) alpha, log_likelihood = crf.crf_forward_compute() self.outputs = { "Alpha": alpha, "EmissionExps": emission_exps, "TransitionExps": transition_exps, - "LogLikelihood": log_likelihood + "LogLikelihood": log_likelihood, } def setUp(self): @@ -155,20 +171,19 @@ class TestLinearChainCrfOp(OpTest): self.check_grad(["Emission", "Transition"], "LogLikelihood") def test_check_grad_ignore_transition(self): - self.check_grad(["Emission"], - "LogLikelihood", - no_grad_set=set("Transition")) + self.check_grad( + ["Emission"], "LogLikelihood", no_grad_set=set("Transition") + ) class TestLinearChainCrfPaddingTensor(OpTest): - def seq_pad(self, data, length): max_len = np.max(length) shape = [len(length), max_len] + list(data.shape[1:]) padded = np.zeros(shape).astype(data.dtype) offset = 0 for i, l in enumerate(length): - padded[i, 0:l] = data[offset:offset + l] + padded[i, 0:l] = data[offset : offset + l] offset += l return padded @@ -179,7 +194,7 @@ class TestLinearChainCrfPaddingTensor(OpTest): padded = np.ones(shape).astype(data.dtype) offset = 0 for i, l in enumerate(length): - padded[i, 0:l] = data[offset:offset + l] + padded[i, 0:l] = data[offset : offset + l] offset += l return padded @@ -196,32 +211,39 @@ class TestLinearChainCrfPaddingTensor(OpTest): lod[-1].append(random.randint(1, MAX_SEQ_LEN)) seq_start_pos.append(seq_start_pos[-1] + lod[-1][-1]) emission = np.random.uniform( - -1, 1, [seq_start_pos[-1], TAG_NUM]).astype("float64") + -1, 1, [seq_start_pos[-1], TAG_NUM] + ).astype("float64") emission_row_max = np.amax(emission, axis=1, keepdims=True) emission_exps = np.exp(emission - emission_row_max) - transition = np.random.uniform(-0.5, 0.5, - [TAG_NUM + 2, TAG_NUM]).astype("float64") + transition = np.random.uniform( + -0.5, 0.5, [TAG_NUM + 2, TAG_NUM] + ).astype("float64") transition_exps = np.exp(transition) - labels = np.random.randint(low=0, - high=TAG_NUM, - size=(seq_start_pos[-1], 1), - dtype="int64") + labels = np.random.randint( + low=0, high=TAG_NUM, size=(seq_start_pos[-1], 1), dtype="int64" + ) self.inputs = { "Emission": self.seq_pad(emission, lod[0]), "Transition": transition, "Label": self.seq_pad(labels, lod[0]), - "Length": np.array(lod).astype("int64") + "Length": np.array(lod).astype("int64"), } - crf = LinearChainCrfForward(seq_start_pos, emission, emission_row_max, - emission_exps, transition, transition_exps, - labels) + crf = LinearChainCrfForward( + seq_start_pos, + emission, + emission_row_max, + emission_exps, + transition, + transition_exps, + labels, + ) alpha, log_likelihood = crf.crf_forward_compute() self.outputs = { "Alpha": self.seq_pad(alpha, lod[0]), "EmissionExps": self.seq_pad_exps(emission_exps, lod[0]), "TransitionExps": transition_exps, - "LogLikelihood": log_likelihood + "LogLikelihood": log_likelihood, } def setUp(self): @@ -235,9 +257,9 @@ class TestLinearChainCrfPaddingTensor(OpTest): self.check_grad(["Emission", "Transition"], "LogLikelihood") def test_check_grad_ignore_transition(self): - self.check_grad(["Emission"], - "LogLikelihood", - no_grad_set=set("Transition")) + self.check_grad( + ["Emission"], "LogLikelihood", no_grad_set=set("Transition") + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_linear_interp_op.py b/python/paddle/fluid/tests/unittests/test_linear_interp_op.py index cd5d799a5bd85d2c4f0006b5363a172cb0448a8e..f52e597d66549fd82c328dc12d8456f74999a7cf 100755 --- a/python/paddle/fluid/tests/unittests/test_linear_interp_op.py +++ b/python/paddle/fluid/tests/unittests/test_linear_interp_op.py @@ -23,13 +23,15 @@ from paddle.fluid import Program, program_guard from paddle.nn.functional import interpolate -def linear_interp_np(input, - out_w, - out_size=None, - actual_shape=None, - align_corners=True, - align_mode=0, - data_layout='NCHW'): +def linear_interp_np( + input, + out_w, + out_size=None, + actual_shape=None, + align_corners=True, + align_mode=0, + data_layout='NCHW', +): if data_layout == "NHWC": input = np.transpose(input, (0, 2, 1)) # NHWC => NCHW if out_size is not None: @@ -40,7 +42,7 @@ def linear_interp_np(input, ratio_w = 0.0 if out_w > 1: - if (align_corners): + if align_corners: ratio_w = (in_w - 1.0) / (out_w - 1.0) else: ratio_w = 1.0 * in_w / out_w @@ -48,22 +50,23 @@ def linear_interp_np(input, out = np.zeros((batch_size, channel, out_w)) for j in range(out_w): - if (align_mode == 0 and not align_corners): + if align_mode == 0 and not align_corners: w = int(ratio_w * (j + 0.5) - 0.5) else: w = int(ratio_w * j) w = max(0, w) wid = 1 if w < in_w - 1 else 0 - if (align_mode == 0 and not align_corners): + if align_mode == 0 and not align_corners: idx_src_w = max(ratio_w * (j + 0.5) - 0.5, 0) w1lambda = idx_src_w - w else: w1lambda = ratio_w * j - w w2lambda = 1.0 - w1lambda - out[:, :, - j] = w2lambda * input[:, :, w] + w1lambda * input[:, :, w + wid] + out[:, :, j] = ( + w2lambda * input[:, :, w] + w1lambda * input[:, :, w + wid] + ) if data_layout == "NHWC": out = np.transpose(out, (0, 2, 1)) # NCHW => NHWC @@ -72,7 +75,6 @@ def linear_interp_np(input, class TestLinearInterpOp(OpTest): - def setUp(self): self.out_size = None self.actual_shape = None @@ -91,9 +93,15 @@ class TestLinearInterpOp(OpTest): else: out_w = self.out_w - output_np = linear_interp_np(input_np, out_w, self.out_size, - self.actual_shape, self.align_corners, - self.align_mode, self.data_layout) + output_np = linear_interp_np( + input_np, + out_w, + self.out_size, + self.actual_shape, + self.align_corners, + self.align_mode, + self.data_layout, + ) self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size @@ -106,7 +114,7 @@ class TestLinearInterpOp(OpTest): 'interp_method': self.interp_method, 'align_corners': self.align_corners, 'align_mode': self.align_mode, - 'data_layout': self.data_layout + 'data_layout': self.data_layout, } self.outputs = {'Out': output_np} @@ -123,59 +131,63 @@ class TestLinearInterpOp(OpTest): self.interp_method = 'linear' self.input_shape = [1, 3, 100] self.out_w = 50 - self.scale = 0. - self.out_size = np.array([ - 50, - ]).astype("int32") + self.scale = 0.0 + self.out_size = np.array( + [ + 50, + ] + ).astype("int32") self.align_corners = False self.align_mode = 1 class TestLinearInterpOpDataLayout(TestLinearInterpOp): - def init_test_case(self): self.interp_method = 'linear' self.input_shape = [1, 3, 100] self.out_w = 50 - self.scale = 0. - self.out_size = np.array([ - 50, - ]).astype("int32") + self.scale = 0.0 + self.out_size = np.array( + [ + 50, + ] + ).astype("int32") self.align_corners = False self.align_mode = 1 self.data_layout = 'NHWC' class TestLinearInterpOpAlignMode(TestLinearInterpOp): - def init_test_case(self): self.interp_method = 'linear' self.input_shape = [1, 3, 100] self.out_w = 50 - self.scale = 0. - self.out_size = np.array([ - 50, - ]).astype("int32") + self.scale = 0.0 + self.out_size = np.array( + [ + 50, + ] + ).astype("int32") self.align_corners = False self.align_mode = 0 class TestLinearInterpOpScale(TestLinearInterpOp): - def init_test_case(self): self.interp_method = 'linear' self.input_shape = [1, 3, 100] self.out_w = 50 self.scale = 0.5 - self.out_size = np.array([ - 50, - ]).astype("int32") + self.out_size = np.array( + [ + 50, + ] + ).astype("int32") self.align_corners = False self.align_mode = 0 class TestLinearInterpOpSizeTensor(TestLinearInterpOp): - def setUp(self): self.out_size = None self.actual_shape = None @@ -196,9 +208,15 @@ class TestLinearInterpOpSizeTensor(TestLinearInterpOp): else: out_w = self.out_w - output_np = linear_interp_np(input_np, out_w, self.out_size, - self.actual_shape, self.align_corners, - self.align_mode, self.data_layout) + output_np = linear_interp_np( + input_np, + out_w, + self.out_size, + self.actual_shape, + self.align_corners, + self.align_mode, + self.data_layout, + ) self.inputs = {'X': input_np} if self.out_size is not None and self.shape_by_1Dtensor: @@ -208,8 +226,9 @@ class TestLinearInterpOpSizeTensor(TestLinearInterpOp): else: size_tensor = [] for index, ele in enumerate(self.out_size): - size_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + size_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs['SizeTensor'] = size_tensor self.attrs = { @@ -218,78 +237,88 @@ class TestLinearInterpOpSizeTensor(TestLinearInterpOp): 'interp_method': self.interp_method, 'align_corners': self.align_corners, 'align_mode': self.align_mode, - 'data_layout': self.data_layout + 'data_layout': self.data_layout, } self.outputs = {'Out': output_np} class TestResizeLinearAPI(unittest.TestCase): - def test_case(self): x = fluid.data(name="x", shape=[1, 3, 64], dtype="float32") dim = fluid.data(name="dim", shape=[1], dtype="int32") shape_tensor = fluid.data(name="shape_tensor", shape=[1], dtype="int32") actual_size = fluid.data(name="actual_size", shape=[1], dtype="int32") - scale_tensor = fluid.data(name="scale_tensor", - shape=[1], - dtype="float32") - - out1 = fluid.layers.resize_linear(x, - out_shape=[ - 128, - ], - align_mode=1, - align_corners=False) - out2 = fluid.layers.resize_linear(x, - out_shape=[128], - align_mode=1, - align_corners=False) - out3 = fluid.layers.resize_linear(x, - out_shape=shape_tensor, - align_mode=1, - align_corners=False) - out4 = fluid.layers.resize_linear(x, - out_shape=[ - 128, - ], - actual_shape=actual_size, - align_mode=1, - align_corners=False) - out5 = fluid.layers.resize_linear(x, - scale=scale_tensor, - align_mode=1, - align_corners=False) - - out6 = interpolate(x, - scale_factor=scale_tensor, - mode='linear', - align_mode=1, - align_corners=False, - data_format='NCW') - out7 = interpolate(x, - size=[ - 128, - ], - mode='linear', - align_mode=1, - align_corners=False, - data_format='NCW') - out8 = interpolate(x, - size=shape_tensor, - mode='linear', - align_mode=1, - align_corners=False, - data_format='NCW') + scale_tensor = fluid.data( + name="scale_tensor", shape=[1], dtype="float32" + ) + + out1 = fluid.layers.resize_linear( + x, + out_shape=[ + 128, + ], + align_mode=1, + align_corners=False, + ) + out2 = fluid.layers.resize_linear( + x, out_shape=[128], align_mode=1, align_corners=False + ) + out3 = fluid.layers.resize_linear( + x, out_shape=shape_tensor, align_mode=1, align_corners=False + ) + out4 = fluid.layers.resize_linear( + x, + out_shape=[ + 128, + ], + actual_shape=actual_size, + align_mode=1, + align_corners=False, + ) + out5 = fluid.layers.resize_linear( + x, scale=scale_tensor, align_mode=1, align_corners=False + ) + + out6 = interpolate( + x, + scale_factor=scale_tensor, + mode='linear', + align_mode=1, + align_corners=False, + data_format='NCW', + ) + out7 = interpolate( + x, + size=[ + 128, + ], + mode='linear', + align_mode=1, + align_corners=False, + data_format='NCW', + ) + out8 = interpolate( + x, + size=shape_tensor, + mode='linear', + align_mode=1, + align_corners=False, + data_format='NCW', + ) x_data = np.random.random((1, 3, 64)).astype("float32") dim_data = np.array([128]).astype("int32") - shape_data = np.array([ - 128, - ]).astype("int32") - actual_size_data = np.array([ - 128, - ]).astype("int32") + shape_data = np.array( + [ + 128, + ] + ).astype("int32") + actual_size_data = np.array( + [ + 128, + ] + ).astype("int32") scale_data = np.array([2.0]).astype("float32") if core.is_compiled_with_cuda(): @@ -305,46 +334,45 @@ class TestResizeLinearAPI(unittest.TestCase): "dim": dim_data, "shape_tensor": shape_data, "actual_size": actual_size_data, - "scale_tensor": scale_data + "scale_tensor": scale_data, }, fetch_list=[out1, out2, out3, out4, out5, out6, out7, out8], - return_numpy=True) + return_numpy=True, + ) - expect_res = linear_interp_np(x_data, - out_w=128, - align_mode=1, - align_corners=False) + expect_res = linear_interp_np( + x_data, out_w=128, align_mode=1, align_corners=False + ) for res in results: np.testing.assert_allclose(res, expect_res, rtol=1e-05) class TestLinearInterpOpAPI2_0(unittest.TestCase): - def test_case(self): # dygraph x_data = np.random.random((1, 3, 128)).astype("float32") - us_1 = paddle.nn.Upsample(size=[ - 64, - ], - mode='linear', - align_mode=1, - align_corners=False, - data_format='NCW') + us_1 = paddle.nn.Upsample( + size=[ + 64, + ], + mode='linear', + align_mode=1, + align_corners=False, + data_format='NCW', + ) with fluid.dygraph.guard(): x = fluid.dygraph.to_variable(x_data) interp = us_1(x) - expect = linear_interp_np(x_data, - out_w=64, - align_mode=1, - align_corners=False) + expect = linear_interp_np( + x_data, out_w=64, align_mode=1, align_corners=False + ) np.testing.assert_allclose(interp.numpy(), expect, rtol=1e-05) class TestResizeLinearOpUint8(OpTest): - def setUp(self): self.out_size = None self.actual_shape = None @@ -357,9 +385,14 @@ class TestResizeLinearOpUint8(OpTest): else: out_w = self.out_w - output_np = linear_interp_np(input_np, out_w, self.out_size, - self.actual_shape, self.align_corners, - self.align_mode) + output_np = linear_interp_np( + input_np, + out_w, + self.out_size, + self.actual_shape, + self.align_corners, + self.align_mode, + ) self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size @@ -369,7 +402,7 @@ class TestResizeLinearOpUint8(OpTest): 'scale': self.scale, 'interp_method': self.interp_method, 'align_corners': self.align_corners, - 'align_mode': self.align_mode + 'align_mode': self.align_mode, } self.outputs = {'Out': output_np} @@ -383,42 +416,48 @@ class TestResizeLinearOpUint8(OpTest): self.interp_method = 'linear' self.input_shape = [2, 3, 100] self.out_w = 50 - self.scale = 0. - self.out_size = np.array([ - 50, - ]).astype("int32") + self.scale = 0.0 + self.out_size = np.array( + [ + 50, + ] + ).astype("int32") self.align_corners = True self.align_mode = 1 class TestLinearInterpOpException(unittest.TestCase): - def test_exception(self): - def input_shape_error(): x1 = fluid.data(name="x1", shape=[1], dtype="float32") - out = fluid.layers.resize_linear(x1, - out_shape=[ - 256, - ], - data_format='NCW') + out = fluid.layers.resize_linear( + x1, + out_shape=[ + 256, + ], + data_format='NCW', + ) def data_format_error(): x2 = fluid.data(name="x2", shape=[1, 3, 128], dtype="float32") - out = fluid.layers.resize_linear(x2, - out_shape=[ - 256, - ], - data_format='NHWCD') + out = fluid.layers.resize_linear( + x2, + out_shape=[ + 256, + ], + data_format='NHWCD', + ) def out_shape_error(): x3 = fluid.data(name="x3", shape=[1, 3, 128], dtype="float32") - out = fluid.layers.resize_linear(x3, - out_shape=[ - 256, - 256, - ], - data_format='NHWC') + out = fluid.layers.resize_linear( + x3, + out_shape=[ + 256, + 256, + ], + data_format='NHWC', + ) self.assertRaises(ValueError, input_shape_error) self.assertRaises(ValueError, data_format_error) @@ -426,36 +465,41 @@ class TestLinearInterpOpException(unittest.TestCase): class TestLinearInterpOpError(unittest.TestCase): - def test_error(self): with program_guard(Program(), Program()): def input_shape_error(): x1 = fluid.data(name="x1", shape=[1], dtype="float32") - out1 = paddle.nn.Upsample(size=[ - 256, - ], - data_format='NCW', - mode='linear') + out1 = paddle.nn.Upsample( + size=[ + 256, + ], + data_format='NCW', + mode='linear', + ) out1_res = out1(x1) def data_format_error(): x2 = fluid.data(name="x2", shape=[1, 3, 128], dtype="float32") - out2 = paddle.nn.Upsample(size=[ - 256, - ], - data_format='NHWCD', - mode='linear') + out2 = paddle.nn.Upsample( + size=[ + 256, + ], + data_format='NHWCD', + mode='linear', + ) out2_res = out2(x2) def out_shape_error(): x3 = fluid.data(name="x3", shape=[1, 3, 128], dtype="float32") - out3 = paddle.nn.Upsample(size=[ - 256, - 256, - ], - data_format='NHWC', - mode='linear') + out3 = paddle.nn.Upsample( + size=[ + 256, + 256, + ], + data_format='NHWC', + mode='linear', + ) out3_res = out3(x3) self.assertRaises(ValueError, input_shape_error) diff --git a/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py b/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py index 9fbbc8ceaa0fcdb11efe65180bc6e515504651ad..30e7d2ed97b054f239abafee1ee21c370f1f5902 100755 --- a/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py @@ -23,18 +23,20 @@ from paddle.fluid import Program, program_guard from paddle.nn.functional import interpolate -def linear_interp_test(x, - OutSize=None, - SizeTensor=None, - Scale=None, - data_layout='NCHW', - out_d=-1, - out_h=-1, - out_w=-1, - scale=[], - interp_method='linear', - align_corners=True, - align_mode=0): +def linear_interp_test( + x, + OutSize=None, + SizeTensor=None, + Scale=None, + data_layout='NCHW', + out_d=-1, + out_h=-1, + out_w=-1, + scale=[], + interp_method='linear', + align_corners=True, + align_mode=0, +): if isinstance(scale, float) or isinstance(scale, int): scale_list = [] for _ in range(len(x.shape) - 2): @@ -44,21 +46,35 @@ def linear_interp_test(x, scale = list(map(float, scale)) if SizeTensor is not None: if not isinstance(SizeTensor, list) and not isinstance( - SizeTensor, tuple): + SizeTensor, tuple + ): SizeTensor = [SizeTensor] - return paddle._C_ops.linear_interp(x, OutSize, SizeTensor, Scale, - data_layout, out_d, out_h, out_w, scale, - interp_method, align_corners, align_mode) - - -def linear_interp_np(input, - out_w, - scale_w=0, - out_size=None, - actual_shape=None, - align_corners=True, - align_mode=0, - data_layout='NCHW'): + return paddle._C_ops.linear_interp( + x, + OutSize, + SizeTensor, + Scale, + data_layout, + out_d, + out_h, + out_w, + scale, + interp_method, + align_corners, + align_mode, + ) + + +def linear_interp_np( + input, + out_w, + scale_w=0, + out_size=None, + actual_shape=None, + align_corners=True, + align_mode=0, + data_layout='NCHW', +): if data_layout == "NHWC": input = np.transpose(input, (0, 2, 1)) # NHWC => NCHW if out_size is not None: @@ -69,7 +85,7 @@ def linear_interp_np(input, ratio_w = 0.0 if out_w > 1: - if (align_corners): + if align_corners: ratio_w = (in_w - 1.0) / (out_w - 1.0) else: if scale_w > 0: @@ -80,22 +96,23 @@ def linear_interp_np(input, out = np.zeros((batch_size, channel, out_w)) for j in range(out_w): - if (align_mode == 0 and not align_corners): + if align_mode == 0 and not align_corners: w = int(ratio_w * (j + 0.5) - 0.5) else: w = int(ratio_w * j) w = max(0, w) wid = 1 if w < in_w - 1 else 0 - if (align_mode == 0 and not align_corners): + if align_mode == 0 and not align_corners: idx_src_w = max(ratio_w * (j + 0.5) - 0.5, 0) w1lambda = idx_src_w - w else: w1lambda = ratio_w * j - w w2lambda = 1.0 - w1lambda - out[:, :, - j] = w2lambda * input[:, :, w] + w1lambda * input[:, :, w + wid] + out[:, :, j] = ( + w2lambda * input[:, :, w] + w1lambda * input[:, :, w + wid] + ) if data_layout == "NHWC": out = np.transpose(out, (0, 2, 1)) # NCHW => NHWC @@ -104,7 +121,6 @@ def linear_interp_np(input, class TestLinearInterpOp(OpTest): - def setUp(self): self.python_api = linear_interp_test self.out_size = None @@ -129,9 +145,16 @@ class TestLinearInterpOp(OpTest): else: out_w = self.out_w - output_np = linear_interp_np(input_np, out_w, self.scale, self.out_size, - self.actual_shape, self.align_corners, - self.align_mode, self.data_layout) + output_np = linear_interp_np( + input_np, + out_w, + self.scale, + self.out_size, + self.actual_shape, + self.align_corners, + self.align_mode, + self.data_layout, + ) self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size @@ -143,7 +166,7 @@ class TestLinearInterpOp(OpTest): 'interp_method': self.interp_method, 'align_corners': self.align_corners, 'align_mode': self.align_mode, - 'data_layout': self.data_layout + 'data_layout': self.data_layout, } if self.scale > 0: if isinstance(self.scale, float) or isinstance(self.scale, int): @@ -165,58 +188,62 @@ class TestLinearInterpOp(OpTest): self.input_shape = [1, 3, 100] self.out_w = 50 self.scale = 0.5 - self.out_size = np.array([ - 50, - ]).astype("int32") + self.out_size = np.array( + [ + 50, + ] + ).astype("int32") self.align_corners = False self.align_mode = 1 class TestLinearInterpOpDataLayout(TestLinearInterpOp): - def init_test_case(self): self.interp_method = 'linear' self.input_shape = [1, 100, 3] self.out_w = 50 self.scale = 0.5 - self.out_size = np.array([ - 50, - ]).astype("int32") + self.out_size = np.array( + [ + 50, + ] + ).astype("int32") self.align_corners = False self.align_mode = 1 self.data_layout = 'NHWC' class TestLinearInterpOpAlignMode(TestLinearInterpOp): - def init_test_case(self): self.interp_method = 'linear' self.input_shape = [1, 3, 100] self.out_w = 50 self.scale = 0.5 - self.out_size = np.array([ - 50, - ]).astype("int32") + self.out_size = np.array( + [ + 50, + ] + ).astype("int32") self.align_corners = False self.align_mode = 0 class TestLinearInterpOpScale(TestLinearInterpOp): - def init_test_case(self): self.interp_method = 'linear' self.input_shape = [1, 3, 100] self.out_w = 50 self.scale = 0.8 - self.out_size = np.array([ - 50, - ]).astype("int32") + self.out_size = np.array( + [ + 50, + ] + ).astype("int32") self.align_corners = False self.align_mode = 0 class TestLinearInterpOpSizeTensor(TestLinearInterpOp): - def setUp(self): self.python_api = linear_interp_test self.out_size = None @@ -242,9 +269,16 @@ class TestLinearInterpOpSizeTensor(TestLinearInterpOp): else: out_w = self.out_w - output_np = linear_interp_np(input_np, out_w, 0, self.out_size, - self.actual_shape, self.align_corners, - self.align_mode, self.data_layout) + output_np = linear_interp_np( + input_np, + out_w, + 0, + self.out_size, + self.actual_shape, + self.align_corners, + self.align_mode, + self.data_layout, + ) self.inputs = {'X': input_np} if self.out_size is not None and self.shape_by_1Dtensor: @@ -254,8 +288,9 @@ class TestLinearInterpOpSizeTensor(TestLinearInterpOp): else: size_tensor = [] for index, ele in enumerate(self.out_size): - size_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + size_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs['SizeTensor'] = size_tensor self.attrs = { @@ -263,7 +298,7 @@ class TestLinearInterpOpSizeTensor(TestLinearInterpOp): 'interp_method': self.interp_method, 'align_corners': self.align_corners, 'align_mode': self.align_mode, - 'data_layout': self.data_layout + 'data_layout': self.data_layout, } if self.scale > 0: if isinstance(self.scale, float) or isinstance(self.scale, int): @@ -275,72 +310,82 @@ class TestLinearInterpOpSizeTensor(TestLinearInterpOp): class TestResizeLinearAPI(unittest.TestCase): - def test_case(self): x = fluid.data(name="x", shape=[1, 3, 64], dtype="float32") dim = fluid.data(name="dim", shape=[1], dtype="int32") shape_tensor = fluid.data(name="shape_tensor", shape=[1], dtype="int32") actual_size = fluid.data(name="actual_size", shape=[1], dtype="int32") - scale_tensor = fluid.data(name="scale_tensor", - shape=[1], - dtype="float32") - - out1 = fluid.layers.resize_linear(x, - out_shape=[ - 128, - ], - align_mode=1, - align_corners=False) - out2 = fluid.layers.resize_linear(x, - out_shape=[128], - align_mode=1, - align_corners=False) - out3 = fluid.layers.resize_linear(x, - out_shape=shape_tensor, - align_mode=1, - align_corners=False) - out4 = fluid.layers.resize_linear(x, - out_shape=[ - 128, - ], - actual_shape=actual_size, - align_mode=1, - align_corners=False) - out5 = fluid.layers.resize_linear(x, - scale=scale_tensor, - align_mode=1, - align_corners=False) - - out6 = interpolate(x, - scale_factor=scale_tensor, - mode='linear', - align_mode=1, - align_corners=False, - data_format='NCW') - out7 = interpolate(x, - size=[ - 128, - ], - mode='linear', - align_mode=1, - align_corners=False, - data_format='NCW') - out8 = interpolate(x, - size=shape_tensor, - mode='linear', - align_mode=1, - align_corners=False, - data_format='NCW') + scale_tensor = fluid.data( + name="scale_tensor", shape=[1], dtype="float32" + ) + + out1 = fluid.layers.resize_linear( + x, + out_shape=[ + 128, + ], + align_mode=1, + align_corners=False, + ) + out2 = fluid.layers.resize_linear( + x, out_shape=[128], align_mode=1, align_corners=False + ) + out3 = fluid.layers.resize_linear( + x, out_shape=shape_tensor, align_mode=1, align_corners=False + ) + out4 = fluid.layers.resize_linear( + x, + out_shape=[ + 128, + ], + actual_shape=actual_size, + align_mode=1, + align_corners=False, + ) + out5 = fluid.layers.resize_linear( + x, scale=scale_tensor, align_mode=1, align_corners=False + ) + + out6 = interpolate( + x, + scale_factor=scale_tensor, + mode='linear', + align_mode=1, + align_corners=False, + data_format='NCW', + ) + out7 = interpolate( + x, + size=[ + 128, + ], + mode='linear', + align_mode=1, + align_corners=False, + data_format='NCW', + ) + out8 = interpolate( + x, + size=shape_tensor, + mode='linear', + align_mode=1, + align_corners=False, + data_format='NCW', + ) x_data = np.random.random((1, 3, 64)).astype("float32") dim_data = np.array([128]).astype("int32") - shape_data = np.array([ - 128, - ]).astype("int32") - actual_size_data = np.array([ - 128, - ]).astype("int32") + shape_data = np.array( + [ + 128, + ] + ).astype("int32") + actual_size_data = np.array( + [ + 128, + ] + ).astype("int32") scale_data = np.array([2.0]).astype("float32") if core.is_compiled_with_cuda(): @@ -356,44 +401,43 @@ class TestResizeLinearAPI(unittest.TestCase): "dim": dim_data, "shape_tensor": shape_data, "actual_size": actual_size_data, - "scale_tensor": scale_data + "scale_tensor": scale_data, }, fetch_list=[out1, out2, out3, out4, out5, out6, out7, out8], - return_numpy=True) + return_numpy=True, + ) - expect_res = linear_interp_np(x_data, - out_w=128, - align_mode=1, - align_corners=False) + expect_res = linear_interp_np( + x_data, out_w=128, align_mode=1, align_corners=False + ) for res in results: np.testing.assert_allclose(res, expect_res, rtol=1e-05) class TestLinearInterpOpAPI2_0(unittest.TestCase): - def test_case(self): # dygraph x_data = np.random.random((1, 3, 128)).astype("float32") - us_1 = paddle.nn.Upsample(size=[64], - mode='linear', - align_mode=1, - align_corners=False, - data_format='NCW') + us_1 = paddle.nn.Upsample( + size=[64], + mode='linear', + align_mode=1, + align_corners=False, + data_format='NCW', + ) with fluid.dygraph.guard(): x = fluid.dygraph.to_variable(x_data) interp = us_1(x) - expect = linear_interp_np(x_data, - out_w=64, - align_mode=1, - align_corners=False) + expect = linear_interp_np( + x_data, out_w=64, align_mode=1, align_corners=False + ) np.testing.assert_allclose(interp.numpy(), expect, rtol=1e-05) class TestResizeLinearOpUint8(OpTest): - def setUp(self): self.out_size = None self.actual_shape = None @@ -410,9 +454,15 @@ class TestResizeLinearOpUint8(OpTest): else: out_w = self.out_w - output_np = linear_interp_np(input_np, out_w, 0, self.out_size, - self.actual_shape, self.align_corners, - self.align_mode) + output_np = linear_interp_np( + input_np, + out_w, + 0, + self.out_size, + self.actual_shape, + self.align_corners, + self.align_mode, + ) self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size @@ -421,7 +471,7 @@ class TestResizeLinearOpUint8(OpTest): 'out_w': self.out_w, 'interp_method': self.interp_method, 'align_corners': self.align_corners, - 'align_mode': self.align_mode + 'align_mode': self.align_mode, } if self.scale > 0: if isinstance(self.scale, float) or isinstance(self.scale, int): @@ -441,42 +491,48 @@ class TestResizeLinearOpUint8(OpTest): self.interp_method = 'linear' self.input_shape = [2, 3, 100] self.out_w = 50 - self.scale = 0. - self.out_size = np.array([ - 50, - ]).astype("int32") + self.scale = 0.0 + self.out_size = np.array( + [ + 50, + ] + ).astype("int32") self.align_corners = True self.align_mode = 1 class TestLinearInterpOpException(unittest.TestCase): - def test_exception(self): - def input_shape_error(): x1 = fluid.data(name="x1", shape=[1], dtype="float32") - out = fluid.layers.resize_linear(x1, - out_shape=[ - 256, - ], - data_format='NCW') + out = fluid.layers.resize_linear( + x1, + out_shape=[ + 256, + ], + data_format='NCW', + ) def data_format_error(): x2 = fluid.data(name="x2", shape=[1, 3, 128], dtype="float32") - out = fluid.layers.resize_linear(x2, - out_shape=[ - 256, - ], - data_format='NHWCD') + out = fluid.layers.resize_linear( + x2, + out_shape=[ + 256, + ], + data_format='NHWCD', + ) def out_shape_error(): x3 = fluid.data(name="x3", shape=[1, 3, 128], dtype="float32") - out = fluid.layers.resize_linear(x3, - out_shape=[ - 256, - 256, - ], - data_format='NHWC') + out = fluid.layers.resize_linear( + x3, + out_shape=[ + 256, + 256, + ], + data_format='NHWC', + ) self.assertRaises(ValueError, input_shape_error) self.assertRaises(ValueError, data_format_error) @@ -484,29 +540,28 @@ class TestLinearInterpOpException(unittest.TestCase): class TestLinearInterpOpError(unittest.TestCase): - def test_error(self): with program_guard(Program(), Program()): def input_shape_error(): x1 = fluid.data(name="x1", shape=[1], dtype="float32") - out1 = paddle.nn.Upsample(size=[256], - data_format='NCW', - mode='linear') + out1 = paddle.nn.Upsample( + size=[256], data_format='NCW', mode='linear' + ) out1_res = out1(x1) def data_format_error(): x2 = fluid.data(name="x2", shape=[1, 3, 128], dtype="float32") - out2 = paddle.nn.Upsample(size=[256], - data_format='NHWCD', - mode='linear') + out2 = paddle.nn.Upsample( + size=[256], data_format='NHWCD', mode='linear' + ) out2_res = out2(x2) def out_shape_error(): x3 = fluid.data(name="x3", shape=[1, 3, 128], dtype="float32") - out3 = paddle.nn.Upsample(size=[256, 256], - data_format='NHWC', - mode='linear') + out3 = paddle.nn.Upsample( + size=[256, 256], data_format='NHWC', mode='linear' + ) out3_res = out3(x3) self.assertRaises(ValueError, input_shape_error) @@ -514,10 +569,10 @@ class TestLinearInterpOpError(unittest.TestCase): self.assertRaises(ValueError, out_shape_error) -@unittest.skipIf(not fluid.core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestLinearInterpOpForFloat16(unittest.TestCase): - def init_test_case(self): self.interp_method = 'linear' self.input_shape = [1, 3, 64] @@ -531,12 +586,14 @@ class TestLinearInterpOpForFloat16(unittest.TestCase): x_np = x_np.astype(dtype) x = paddle.to_tensor(x_np) x.stop_gradient = False - y = interpolate(x, - scale_factor=self.scale, - mode=self.interp_method, - align_mode=self.align_mode, - align_corners=self.align_corners, - data_format=self.data_layout) + y = interpolate( + x, + scale_factor=self.scale, + mode=self.interp_method, + align_mode=self.align_mode, + align_corners=self.align_corners, + data_format=self.data_layout, + ) x_g = paddle.grad(y, x) y_np = y[0].numpy().astype('float32') x_g_np = x_g[0].numpy().astype('float32') diff --git a/python/paddle/fluid/tests/unittests/test_linspace.py b/python/paddle/fluid/tests/unittests/test_linspace.py index c38d5b1fe591b61a43ae9ae44fd121807d6a314b..3549d46ec9eec9e1780cee01b21e15d5798354c7 100644 --- a/python/paddle/fluid/tests/unittests/test_linspace.py +++ b/python/paddle/fluid/tests/unittests/test_linspace.py @@ -23,7 +23,6 @@ from paddle.fluid.framework import _test_eager_guard class TestLinspaceOpCommonCase(OpTest): - def setUp(self): self.op_type = "linspace" self.python_api = paddle.linspace @@ -31,7 +30,7 @@ class TestLinspaceOpCommonCase(OpTest): self.inputs = { 'Start': np.array([0]).astype(dtype), 'Stop': np.array([10]).astype(dtype), - 'Num': np.array([11]).astype('int32') + 'Num': np.array([11]).astype('int32'), } self.attrs = {'dtype': int(core.VarDesc.VarType.FP32)} @@ -42,7 +41,6 @@ class TestLinspaceOpCommonCase(OpTest): class TestLinspaceOpReverseCase(OpTest): - def setUp(self): self.op_type = "linspace" self.python_api = paddle.linspace @@ -50,7 +48,7 @@ class TestLinspaceOpReverseCase(OpTest): self.inputs = { 'Start': np.array([10]).astype(dtype), 'Stop': np.array([0]).astype(dtype), - 'Num': np.array([11]).astype('int32') + 'Num': np.array([11]).astype('int32'), } self.attrs = {'dtype': int(core.VarDesc.VarType.FP32)} @@ -61,7 +59,6 @@ class TestLinspaceOpReverseCase(OpTest): class TestLinspaceOpNumOneCase(OpTest): - def setUp(self): self.op_type = "linspace" self.python_api = paddle.linspace @@ -69,7 +66,7 @@ class TestLinspaceOpNumOneCase(OpTest): self.inputs = { 'Start': np.array([10]).astype(dtype), 'Stop': np.array([0]).astype(dtype), - 'Num': np.array([1]).astype('int32') + 'Num': np.array([1]).astype('int32'), } self.attrs = {'dtype': int(core.VarDesc.VarType.FP32)} @@ -80,7 +77,6 @@ class TestLinspaceOpNumOneCase(OpTest): class TestLinspaceAPI(unittest.TestCase): - def test_variable_input1(self): start = paddle.full(shape=[1], fill_value=0, dtype='float32') stop = paddle.full(shape=[1], fill_value=10, dtype='float32') @@ -106,17 +102,16 @@ class TestLinspaceAPI(unittest.TestCase): out_2 = paddle.linspace(0, 10, 5, dtype=np.float32) out_3 = paddle.linspace(0, 10, 5, dtype=core.VarDesc.VarType.FP32) exe = fluid.Executor(place=fluid.CPUPlace()) - res_1, res_2, res_3 = exe.run(fluid.default_main_program(), - fetch_list=[out_1, out_2, out_3]) + res_1, res_2, res_3 = exe.run( + fluid.default_main_program(), fetch_list=[out_1, out_2, out_3] + ) assert np.array_equal(res_1, res_2) def test_name(self): with paddle.static.program_guard(paddle.static.Program()): - out = paddle.linspace(0, - 10, - 5, - dtype='float32', - name='linspace_res') + out = paddle.linspace( + 0, 10, 5, dtype='float32', name='linspace_res' + ) assert 'linspace_res' in out.name def test_imperative(self): @@ -139,7 +134,6 @@ class TestLinspaceAPI(unittest.TestCase): class TestLinspaceOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): diff --git a/python/paddle/fluid/tests/unittests/test_listen_and_serv_op.py b/python/paddle/fluid/tests/unittests/test_listen_and_serv_op.py index 897689d55971428d2793c05b8af4fb393c014a04..3bac2d40ae3a20c31391df3bdb617f175e20c3bd 100644 --- a/python/paddle/fluid/tests/unittests/test_listen_and_serv_op.py +++ b/python/paddle/fluid/tests/unittests/test_listen_and_serv_op.py @@ -50,18 +50,21 @@ def run_pserver(use_cuda, sync_mode, ip, port, trainers, trainer_id): config = fluid.DistributeTranspilerConfig() config.sync_mode = sync_mode t = fluid.DistributeTranspiler(config=config) - t.transpile(trainer_id, - pservers=pserver_endpoints, - trainers=trainers, - sync_mode=sync_mode) + t.transpile( + trainer_id, + pservers=pserver_endpoints, + trainers=trainers, + sync_mode=sync_mode, + ) pserver_prog = t.get_pserver_program(current_endpoint) pserver_startup = t.get_startup_program(current_endpoint, pserver_prog) exe.run(pserver_startup) exe.run(pserver_prog) -def run_pserver_with_empty_block(use_cuda, sync_mode, ip, port, trainers, - trainer_id): +def run_pserver_with_empty_block( + use_cuda, sync_mode, ip, port, trainers, trainer_id +): remove_ps_flag(os.getpid()) x = fluid.layers.data(name='x', shape=[1], dtype='float32') y_predict = fluid.layers.fc(input=x, size=1, act=None, bias_attr=False) @@ -87,15 +90,17 @@ def run_pserver_with_empty_block(use_cuda, sync_mode, ip, port, trainers, config.slice_var_up = False t = fluid.DistributeTranspiler(config=config) - t.transpile(trainer_id, - pservers=pserver_endpoints, - trainers=trainers, - sync_mode=sync_mode) + t.transpile( + trainer_id, + pservers=pserver_endpoints, + trainers=trainers, + sync_mode=sync_mode, + ) pserver_prog = t.get_pserver_program(ps2) # pserver2 have no parameter - assert (len(pserver_prog.blocks) == 2) - assert (len(pserver_prog.blocks[1].ops) == 0) + assert len(pserver_prog.blocks) == 2 + assert len(pserver_prog.blocks[1].ops) == 0 pserver_startup = t.get_startup_program(ps2, pserver_prog) exe.run(pserver_startup) @@ -108,7 +113,6 @@ def gen_complete_file_flag(flag_file): class TestListenAndServOp(unittest.TestCase): - def setUp(self): self.ps_timeout = 200 self.ip = "127.0.0.1" @@ -117,9 +121,17 @@ class TestListenAndServOp(unittest.TestCase): self.trainer_id = 0 def _start_pserver(self, use_cuda, sync_mode, pserver_func): - p = Process(target=pserver_func, - args=(use_cuda, sync_mode, self.ip, self.port, - self.trainers, self.trainer_id)) + p = Process( + target=pserver_func, + args=( + use_cuda, + sync_mode, + self.ip, + self.port, + self.trainers, + self.trainer_id, + ), + ) p.daemon = True p.start() return p diff --git a/python/paddle/fluid/tests/unittests/test_load_op.py b/python/paddle/fluid/tests/unittests/test_load_op.py index 1b81f3b8f40965a19e4ea74e76c3e1a022695911..ed123e06a03c55273d4b2339b0f33fe608d00e5b 100644 --- a/python/paddle/fluid/tests/unittests/test_load_op.py +++ b/python/paddle/fluid/tests/unittests/test_load_op.py @@ -21,8 +21,7 @@ import tempfile class TestLoadOp(unittest.TestCase): - """ Test load operator. - """ + """Test load operator.""" def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -37,13 +36,17 @@ class TestLoadOp(unittest.TestCase): param_attr=fluid.ParamAttr( name='w', initializer=fluid.initializer.NumpyArrayInitializer( - self.ones))) + self.ones + ), + ), + ) exe = fluid.Executor(fluid.CPUPlace()) exe.run(start_prog) - fluid.io.save_persistables(exe, - dirname=os.path.join(self.temp_dir.name, - "./model"), - main_program=main_prog) + fluid.io.save_persistables( + exe, + dirname=os.path.join(self.temp_dir.name, "./model"), + main_program=main_prog, + ) def tearDown(self): self.temp_dir.cleanup() @@ -53,8 +56,9 @@ class TestLoadOp(unittest.TestCase): start_prog = fluid.Program() with fluid.program_guard(main_prog, start_prog): var = layers.create_tensor(dtype='float32') - layers.load(var, - file_path=os.path.join(self.temp_dir.name, './model/w')) + layers.load( + var, file_path=os.path.join(self.temp_dir.name, './model/w') + ) exe = fluid.Executor(fluid.CPUPlace()) exe.run(start_prog) diff --git a/python/paddle/fluid/tests/unittests/test_load_op_xpu.py b/python/paddle/fluid/tests/unittests/test_load_op_xpu.py index 957fe5aa42075be7c939865b0822d5c0576973c3..05ad3dc77626e81c7ca2c6ca6b1f9a9e0ff21e1c 100644 --- a/python/paddle/fluid/tests/unittests/test_load_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/test_load_op_xpu.py @@ -21,11 +21,11 @@ import paddle.fluid.layers as layers import paddle -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") +@unittest.skipIf( + not paddle.is_compiled_with_xpu(), "core is not compiled with XPU" +) class TestLoadOpXpu(unittest.TestCase): - """ Test load operator. - """ + """Test load operator.""" def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -41,12 +41,15 @@ class TestLoadOpXpu(unittest.TestCase): param_attr=fluid.ParamAttr( name='w', initializer=fluid.initializer.NumpyArrayInitializer( - self.ones))) + self.ones + ), + ), + ) exe = fluid.Executor(fluid.XPUPlace(0)) exe.run(start_prog) - fluid.io.save_persistables(exe, - dirname=self.model_path, - main_program=main_prog) + fluid.io.save_persistables( + exe, dirname=self.model_path, main_program=main_prog + ) def tearDown(self): self.temp_dir.cleanup() diff --git a/python/paddle/fluid/tests/unittests/test_load_state_dict_from_old_format.py b/python/paddle/fluid/tests/unittests/test_load_state_dict_from_old_format.py index 4d47c94f2fabf4821e817337d00f5203943817fc..6bace212953a62bd77f0004fe6dd8498e2659885 100644 --- a/python/paddle/fluid/tests/unittests/test_load_state_dict_from_old_format.py +++ b/python/paddle/fluid/tests/unittests/test_load_state_dict_from_old_format.py @@ -24,19 +24,23 @@ import tempfile def convolutional_neural_network(img): - conv_pool_1 = fluid.nets.simple_img_conv_pool(input=img, - filter_size=5, - num_filters=20, - pool_size=2, - pool_stride=2, - act="relu") + conv_pool_1 = fluid.nets.simple_img_conv_pool( + input=img, + filter_size=5, + num_filters=20, + pool_size=2, + pool_stride=2, + act="relu", + ) conv_pool_1 = fluid.layers.batch_norm(conv_pool_1) - conv_pool_2 = fluid.nets.simple_img_conv_pool(input=conv_pool_1, - filter_size=5, - num_filters=50, - pool_size=2, - pool_stride=2, - act="relu") + conv_pool_2 = fluid.nets.simple_img_conv_pool( + input=conv_pool_1, + filter_size=5, + num_filters=50, + pool_size=2, + pool_stride=2, + act="relu", + ) prediction = fluid.layers.fc(input=conv_pool_2, size=10, act='softmax') return prediction @@ -54,7 +58,6 @@ def static_train_net(img, label): class TestLoadStateDictFromSaveInferenceModel(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() self.seed = 90 @@ -72,30 +75,38 @@ class TestLoadStateDictFromSaveInferenceModel(unittest.TestCase): startup_program = fluid.default_startup_program() main_program = fluid.default_main_program() - img = fluid.data(name='img', - shape=[None, 1, 28, 28], - dtype='float32') + img = fluid.data( + name='img', shape=[None, 1, 28, 28], dtype='float32' + ) label = fluid.data(name='label', shape=[None, 1], dtype='int64') prediction, avg_loss = static_train_net(img, label) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) feeder = fluid.DataFeeder(feed_list=[img, label], place=place) exe.run(startup_program) - train_reader = paddle.batch(paddle.reader.shuffle( - paddle.dataset.mnist.train(), buf_size=100), - batch_size=self.batch_size) + train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.mnist.train(), buf_size=100 + ), + batch_size=self.batch_size, + ) for _ in range(0, self.epoch_num): for batch_id, data in enumerate(train_reader()): - exe.run(main_program, - feed=feeder.feed(data), - fetch_list=[avg_loss]) + exe.run( + main_program, + feed=feeder.feed(data), + fetch_list=[avg_loss], + ) if batch_id > self.batch_num: break @@ -103,18 +114,22 @@ class TestLoadStateDictFromSaveInferenceModel(unittest.TestCase): static_param_dict = {} for param in fluid.default_main_program().all_parameters(): static_param_dict[param.name] = fluid.executor._fetch_var( - param.name) + param.name + ) if only_params: - fluid.io.save_params(exe, - self.save_dirname, - filename=self.params_filename) + fluid.io.save_params( + exe, self.save_dirname, filename=self.params_filename + ) else: fluid.io.save_inference_model( - self.save_dirname, ["img"], [prediction], + self.save_dirname, + ["img"], + [prediction], exe, model_filename=self.model_filename, - params_filename=self.params_filename) + params_filename=self.params_filename, + ) return static_param_dict @@ -124,7 +139,8 @@ class TestLoadStateDictFromSaveInferenceModel(unittest.TestCase): def test_load_default(self): self.save_dirname = os.path.join( - self.temp_dir.name, "static_mnist.load_state_dict.default") + self.temp_dir.name, "static_mnist.load_state_dict.default" + ) self.model_filename = None self.params_filename = None orig_param_dict = self.train_and_save_model() @@ -137,38 +153,45 @@ class TestLoadStateDictFromSaveInferenceModel(unittest.TestCase): def test_load_with_model_filename(self): self.save_dirname = os.path.join( - self.temp_dir.name, "static_mnist.load_state_dict.model_filename") + self.temp_dir.name, "static_mnist.load_state_dict.model_filename" + ) self.model_filename = "static_mnist.model" self.params_filename = None orig_param_dict = self.train_and_save_model() load_param_dict, _ = fluid.load_dygraph( - self.save_dirname, model_filename=self.model_filename) + self.save_dirname, model_filename=self.model_filename + ) self.check_load_state_dict(orig_param_dict, load_param_dict) - new_load_param_dict = paddle.load(self.save_dirname, - model_filename=self.model_filename) + new_load_param_dict = paddle.load( + self.save_dirname, model_filename=self.model_filename + ) self.check_load_state_dict(orig_param_dict, new_load_param_dict) def test_load_with_param_filename(self): self.save_dirname = os.path.join( - self.temp_dir.name, "static_mnist.load_state_dict.param_filename") + self.temp_dir.name, "static_mnist.load_state_dict.param_filename" + ) self.model_filename = None self.params_filename = "static_mnist.params" orig_param_dict = self.train_and_save_model() load_param_dict, _ = fluid.load_dygraph( - self.save_dirname, params_filename=self.params_filename) + self.save_dirname, params_filename=self.params_filename + ) self.check_load_state_dict(orig_param_dict, load_param_dict) - new_load_param_dict = paddle.load(self.save_dirname, - params_filename=self.params_filename) + new_load_param_dict = paddle.load( + self.save_dirname, params_filename=self.params_filename + ) self.check_load_state_dict(orig_param_dict, new_load_param_dict) def test_load_with_model_and_param_filename(self): self.save_dirname = os.path.join( self.temp_dir.name, - "static_mnist.load_state_dict.model_and_param_filename") + "static_mnist.load_state_dict.model_and_param_filename", + ) self.model_filename = "static_mnist.model" self.params_filename = "static_mnist.params" orig_param_dict = self.train_and_save_model() @@ -176,17 +199,21 @@ class TestLoadStateDictFromSaveInferenceModel(unittest.TestCase): load_param_dict, _ = fluid.load_dygraph( self.save_dirname, params_filename=self.params_filename, - model_filename=self.model_filename) + model_filename=self.model_filename, + ) self.check_load_state_dict(orig_param_dict, load_param_dict) - new_load_param_dict = paddle.load(self.save_dirname, - params_filename=self.params_filename, - model_filename=self.model_filename) + new_load_param_dict = paddle.load( + self.save_dirname, + params_filename=self.params_filename, + model_filename=self.model_filename, + ) self.check_load_state_dict(orig_param_dict, new_load_param_dict) def test_load_state_dict_from_save_params(self): self.save_dirname = os.path.join( - self.temp_dir.name, "static_mnist.load_state_dict.save_params") + self.temp_dir.name, "static_mnist.load_state_dict.save_params" + ) self.params_filename = None orig_param_dict = self.train_and_save_model(True) diff --git a/python/paddle/fluid/tests/unittests/test_load_vars_shape_check.py b/python/paddle/fluid/tests/unittests/test_load_vars_shape_check.py index 2d9bd1ac650909248e84c926edad835f9b675ae6..756b2ab77320ef5e3ed466c43e0773a85e5ad6fa 100644 --- a/python/paddle/fluid/tests/unittests/test_load_vars_shape_check.py +++ b/python/paddle/fluid/tests/unittests/test_load_vars_shape_check.py @@ -20,7 +20,6 @@ from paddle.fluid.executor import Executor class TestLoadVarsShapeCheck(unittest.TestCase): - def setUp(self): self.model_path = "./model_temp/" diff --git a/python/paddle/fluid/tests/unittests/test_locality_aware_nms_op.py b/python/paddle/fluid/tests/unittests/test_locality_aware_nms_op.py index b79b1bff8b94db95e2410c30743dbbccd89f7ddf..a09707c5767a913e3e97235bc4bde8b43f140b7e 100644 --- a/python/paddle/fluid/tests/unittests/test_locality_aware_nms_op.py +++ b/python/paddle/fluid/tests/unittests/test_locality_aware_nms_op.py @@ -1,16 +1,16 @@ # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # -#Licensed under the Apache License, Version 2.0 (the "License"); -#you may not use this file except in compliance with the License. -#You may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -#Unless required by applicable law or agreed to in writing, software -#distributed under the License is distributed on an "AS IS" BASIS, -#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#See the License for the specific language governing permissions and -#limitations under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import unittest import numpy as np @@ -25,13 +25,15 @@ def weight_merge(box1, box2, score1, score2): box2[i] = (box1[i] * score1 + box2[i] * score2) / (score1 + score2) -def nms(boxes, - scores, - score_threshold, - nms_threshold, - top_k=200, - normalized=True, - eta=1.0): +def nms( + boxes, + scores, + score_threshold, + nms_threshold, + top_k=200, + normalized=True, + eta=1.0, +): """Apply non-maximum suppression at test time to avoid detecting too many overlapping bounding boxes for a given object. Args: @@ -48,11 +50,13 @@ def nms(boxes, """ index = -1 for i in range(boxes.shape[0]): - if index > -1 and iou(boxes[i], boxes[index], - normalized) > nms_threshold: + if ( + index > -1 + and iou(boxes[i], boxes[index], normalized) > nms_threshold + ): weight_merge(boxes[i], boxes[index], scores[i], scores[index]) scores[index] += scores[i] - scores[i] = score_threshold - 1. + scores[i] = score_threshold - 1.0 else: index = i @@ -90,8 +94,17 @@ def nms(boxes, return selected_indices -def multiclass_nms(boxes, scores, background, score_threshold, nms_threshold, - nms_top_k, keep_top_k, normalized, shared): +def multiclass_nms( + boxes, + scores, + background, + score_threshold, + nms_threshold, + nms_top_k, + keep_top_k, + normalized, + shared, +): if shared: class_num = scores.shape[0] priorbox_num = scores.shape[1] @@ -102,13 +115,26 @@ def multiclass_nms(boxes, scores, background, score_threshold, nms_threshold, selected_indices = {} num_det = 0 for c in range(class_num): - if c == background: continue + if c == background: + continue if shared: - indices = nms(boxes, scores[c], score_threshold, nms_threshold, - nms_top_k, normalized) + indices = nms( + boxes, + scores[c], + score_threshold, + nms_threshold, + nms_top_k, + normalized, + ) else: - indices = nms(boxes[:, c, :], scores[:, c], score_threshold, - nms_threshold, nms_top_k, normalized) + indices = nms( + boxes[:, c, :], + scores[:, c], + score_threshold, + nms_threshold, + nms_top_k, + normalized, + ) selected_indices[c] = indices num_det += len(indices) @@ -121,9 +147,9 @@ def multiclass_nms(boxes, scores, background, score_threshold, nms_threshold, else: score_index.append((scores[idx][c], c, idx)) - sorted_score_index = sorted(score_index, - key=lambda tup: tup[0], - reverse=True) + sorted_score_index = sorted( + score_index, key=lambda tup: tup[0], reverse=True + ) sorted_score_index = sorted_score_index[:keep_top_k] selected_indices = {} @@ -139,29 +165,33 @@ def multiclass_nms(boxes, scores, background, score_threshold, nms_threshold, return selected_indices, num_det -def batched_multiclass_nms(boxes, - scores, - background, - score_threshold, - nms_threshold, - nms_top_k, - keep_top_k, - normalized=True): +def batched_multiclass_nms( + boxes, + scores, + background, + score_threshold, + nms_threshold, + nms_top_k, + keep_top_k, + normalized=True, +): batch_size = scores.shape[0] num_boxes = scores.shape[2] det_outs = [] lod = [] for n in range(batch_size): - nmsed_outs, nmsed_num = multiclass_nms(boxes[n], - scores[n], - background, - score_threshold, - nms_threshold, - nms_top_k, - keep_top_k, - normalized, - shared=True) + nmsed_outs, nmsed_num = multiclass_nms( + boxes[n], + scores[n], + background, + score_threshold, + nms_threshold, + nms_top_k, + keep_top_k, + normalized, + shared=True, + ) lod.append(nmsed_num) if nmsed_num == 0: @@ -170,19 +200,25 @@ def batched_multiclass_nms(boxes, for c, indices in nmsed_outs.items(): for idx in indices: xmin, ymin, xmax, ymax = boxes[n][idx][:] - tmp_det_out.append([ - c, scores[n][c][idx], xmin, ymin, xmax, ymax, - idx + n * num_boxes - ]) - sorted_det_out = sorted(tmp_det_out, - key=lambda tup: tup[0], - reverse=False) + tmp_det_out.append( + [ + c, + scores[n][c][idx], + xmin, + ymin, + xmax, + ymax, + idx + n * num_boxes, + ] + ) + sorted_det_out = sorted( + tmp_det_out, key=lambda tup: tup[0], reverse=False + ) det_outs.extend(sorted_det_out) return det_outs, lod class TestLocalAwareNMSOp(OpTest): - def set_argument(self): self.score_threshold = 0.01 @@ -203,7 +239,7 @@ class TestLocalAwareNMSOp(OpTest): def softmax(x): # clip to shiftx, otherwise, when calc loss with # log(exp(shiftx)), may get log(0)=INF - shiftx = (x - np.max(x)).clip(-64.) + shiftx = (x - np.max(x)).clip(-64.0) exps = np.exp(shiftx) return exps / np.sum(exps) @@ -217,10 +253,15 @@ class TestLocalAwareNMSOp(OpTest): boxes_copy = copy.deepcopy(boxes) scores_copy = copy.deepcopy(scores) - det_outs, lod = batched_multiclass_nms(boxes_copy, scores_copy, - background, score_threshold, - nms_threshold, nms_top_k, - keep_top_k) + det_outs, lod = batched_multiclass_nms( + boxes_copy, + scores_copy, + background, + score_threshold, + nms_threshold, + nms_top_k, + keep_top_k, + ) lod = [1] if not det_outs else lod det_outs = [[-1, 0]] if not det_outs else det_outs @@ -245,13 +286,11 @@ class TestLocalAwareNMSOp(OpTest): class TestLocalAwareNMSOpNoBoxes(TestLocalAwareNMSOp): - def set_argument(self): self.score_threshold = 2.0 class TestLocalAwareNMSOp4Points(OpTest): - def set_argument(self): self.score_threshold = 0.01 @@ -266,41 +305,93 @@ class TestLocalAwareNMSOp4Points(OpTest): nms_threshold = 0.3 score_threshold = self.score_threshold - scores = np.array([[[0.76319082, 0.73770091]], - [[0.68513154, 0.45952697]]]) - boxes = np.array([[[ - 0.42078365, 0.58117018, 2.92776169, 3.28557757, 4.24344318, - 0.92196165, 2.72370856, -1.66141214 - ], - [ - 0.13856006, 1.86871034, 2.81287224, 3.61381734, - 4.5505249, 0.51766346, 2.75630304, -1.91459389 - ]], - [[ - 1.57533883, 1.3217477, 3.07904942, 3.89512545, - 4.78680923, 1.96914586, 3.539482, -1.59739244 - ], - [ - 0.55084125, 1.71596215, 2.52476074, 3.18940435, - 5.09035159, 0.91959482, 3.71442385, -0.57299128 - ]]]) - - det_outs = np.array([[ - 0., 1.5008917, 0.28206837, 1.2140071, 2.8712926, 3.4469104, - 4.3943763, 0.7232457, 2.7397292, -1.7858533 - ], - [ - 0., 1.1446586, 1.1640508, 1.4800063, 2.856528, - 3.6118112, 4.908667, 1.5478, 3.609713, - -1.1861432 - ]]) + scores = np.array( + [[[0.76319082, 0.73770091]], [[0.68513154, 0.45952697]]] + ) + boxes = np.array( + [ + [ + [ + 0.42078365, + 0.58117018, + 2.92776169, + 3.28557757, + 4.24344318, + 0.92196165, + 2.72370856, + -1.66141214, + ], + [ + 0.13856006, + 1.86871034, + 2.81287224, + 3.61381734, + 4.5505249, + 0.51766346, + 2.75630304, + -1.91459389, + ], + ], + [ + [ + 1.57533883, + 1.3217477, + 3.07904942, + 3.89512545, + 4.78680923, + 1.96914586, + 3.539482, + -1.59739244, + ], + [ + 0.55084125, + 1.71596215, + 2.52476074, + 3.18940435, + 5.09035159, + 0.91959482, + 3.71442385, + -0.57299128, + ], + ], + ] + ) + + det_outs = np.array( + [ + [ + 0.0, + 1.5008917, + 0.28206837, + 1.2140071, + 2.8712926, + 3.4469104, + 4.3943763, + 0.7232457, + 2.7397292, + -1.7858533, + ], + [ + 0.0, + 1.1446586, + 1.1640508, + 1.4800063, + 2.856528, + 3.6118112, + 4.908667, + 1.5478, + 3.609713, + -1.1861432, + ], + ] + ) lod = [1, 1] nmsed_outs = det_outs.astype('float32') self.op_type = 'locality_aware_nms' self.inputs = { 'BBoxes': boxes.astype('float32'), - 'Scores': scores.astype('float32') + 'Scores': scores.astype('float32'), } self.outputs = {'Out': (nmsed_outs, [lod])} self.attrs = { @@ -309,7 +400,7 @@ class TestLocalAwareNMSOp4Points(OpTest): 'nms_top_k': nms_top_k, 'keep_top_k': keep_top_k, 'background_label': -1, - 'normalized': False + 'normalized': False, } def test_check_output(self): @@ -317,75 +408,151 @@ class TestLocalAwareNMSOp4Points(OpTest): class TestLocalityAwareNMSAPI(unittest.TestCase): - def test_api(self): boxes = fluid.data(name='bboxes', shape=[None, 81, 8], dtype='float32') scores = fluid.data(name='scores', shape=[None, 1, 81], dtype='float32') - fluid.layers.locality_aware_nms(bboxes=boxes, - scores=scores, - score_threshold=0.5, - nms_top_k=400, - nms_threshold=0.3, - keep_top_k=200, - normalized=False) + fluid.layers.locality_aware_nms( + bboxes=boxes, + scores=scores, + score_threshold=0.5, + nms_top_k=400, + nms_threshold=0.3, + keep_top_k=200, + normalized=False, + ) class TestLocalityAwareNMSError(unittest.TestCase): - def test_error(self): boxes = fluid.data(name='bboxes', shape=[None, 81, 8], dtype='float32') scores = fluid.data(name='scores', shape=[None, 1, 81], dtype='float32') - boxes_int = fluid.data(name='bboxes_int', - shape=[None, 81, 8], - dtype='int32') - scores_int = fluid.data(name='scores_int', - shape=[None, 1, 81], - dtype='int32') + boxes_int = fluid.data( + name='bboxes_int', shape=[None, 81, 8], dtype='int32' + ) + scores_int = fluid.data( + name='scores_int', shape=[None, 1, 81], dtype='int32' + ) boxes_tmp = [1, 2] scores_tmp = [1, 2] # type of boxes and scores must be variable - self.assertRaises(TypeError, fluid.layers.locality_aware_nms, boxes_tmp, - scores, 0.5, 400, 200) - self.assertRaises(TypeError, fluid.layers.locality_aware_nms, boxes, - scores_tmp, 0.5, 400, 200) + self.assertRaises( + TypeError, + fluid.layers.locality_aware_nms, + boxes_tmp, + scores, + 0.5, + 400, + 200, + ) + self.assertRaises( + TypeError, + fluid.layers.locality_aware_nms, + boxes, + scores_tmp, + 0.5, + 400, + 200, + ) # dtype of boxes and scores must in ['float32', 'float64'] - self.assertRaises(TypeError, fluid.layers.locality_aware_nms, boxes_int, - scores, 0.5, 400, 200) - self.assertRaises(TypeError, fluid.layers.locality_aware_nms, boxes, - scores_int, 0.5, 400, 200) + self.assertRaises( + TypeError, + fluid.layers.locality_aware_nms, + boxes_int, + scores, + 0.5, + 400, + 200, + ) + self.assertRaises( + TypeError, + fluid.layers.locality_aware_nms, + boxes, + scores_int, + 0.5, + 400, + 200, + ) score_threshold = int(1) # type of score_threshold must be float - self.assertRaises(TypeError, fluid.layers.locality_aware_nms, boxes, - scores, score_threshold, 400, 200) + self.assertRaises( + TypeError, + fluid.layers.locality_aware_nms, + boxes, + scores, + score_threshold, + 400, + 200, + ) nms_top_k = 400.5 # type of num_top_k must be int - self.assertRaises(TypeError, fluid.layers.locality_aware_nms, boxes, - scores, 0.5, nms_top_k, 200) + self.assertRaises( + TypeError, + fluid.layers.locality_aware_nms, + boxes, + scores, + 0.5, + nms_top_k, + 200, + ) keep_top_k = 200.5 # type of keep_top_k must be int - self.assertRaises(TypeError, fluid.layers.locality_aware_nms, boxes, - scores, 0.5, 400, keep_top_k) + self.assertRaises( + TypeError, + fluid.layers.locality_aware_nms, + boxes, + scores, + 0.5, + 400, + keep_top_k, + ) nms_threshold = int(0) # type of nms_threshold must be int - self.assertRaises(TypeError, fluid.layers.locality_aware_nms, boxes, - scores, 0.5, 400, 200, nms_threshold) + self.assertRaises( + TypeError, + fluid.layers.locality_aware_nms, + boxes, + scores, + 0.5, + 400, + 200, + nms_threshold, + ) nms_eta = int(1) # type of nms_eta must be float - self.assertRaises(TypeError, fluid.layers.locality_aware_nms, boxes, - scores, 0.5, 400, 200, 0.5, nms_eta) + self.assertRaises( + TypeError, + fluid.layers.locality_aware_nms, + boxes, + scores, + 0.5, + 400, + 200, + 0.5, + nms_eta, + ) bg_label = 1.5 # type of background_label must be int - self.assertRaises(TypeError, fluid.layers.locality_aware_nms, boxes, - scores, 0.5, 400, 200, 0.5, 1.0, bg_label) + self.assertRaises( + TypeError, + fluid.layers.locality_aware_nms, + boxes, + scores, + 0.5, + 400, + 200, + 0.5, + 1.0, + bg_label, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_lod_append_op.py b/python/paddle/fluid/tests/unittests/test_lod_append_op.py index 23d828b3cd78a4df22c40d2e0c05ba6565fad98e..721a247d5d6009f2622fdd8bca29f575d1e3022d 100644 --- a/python/paddle/fluid/tests/unittests/test_lod_append_op.py +++ b/python/paddle/fluid/tests/unittests/test_lod_append_op.py @@ -1,4 +1,4 @@ -#Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,15 +19,13 @@ from paddle.fluid import Program class TestLoDAppendAPI(unittest.TestCase): - def test_api(self, use_cuda=False): main_program = Program() with fluid.program_guard(main_program): x = fluid.layers.data(name='x', shape=[6], dtype='float32') - level = fluid.layers.data(name='level', - shape=[3], - dtype='int32', - lod_level=0) + level = fluid.layers.data( + name='level', shape=[3], dtype='int32', lod_level=0 + ) result = fluid.layers.lod_append(x, level) x_i = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0]).astype("float32") @@ -38,35 +36,32 @@ class TestLoDAppendAPI(unittest.TestCase): return place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) - [out] = exe.run(fluid.default_main_program(), - feed={ - 'x': x_i, - 'level': level_i - }, - fetch_list=[result], - return_numpy=False) + [out] = exe.run( + fluid.default_main_program(), + feed={'x': x_i, 'level': level_i}, + fetch_list=[result], + return_numpy=False, + ) self.assertEqual(out.recursive_sequence_lengths(), [[2, 4]]) class TestLodAppendOpError(unittest.TestCase): - def test_error(self): # The input(x) must be Variable. x1 = np.array([0.9383, 0.1983, 3.2, 1.2]).astype("float64") level1 = [0, 2, 4] self.assertRaises(TypeError, fluid.layers.lod_append, x1, level1) - #The input(level) must be Variable or list. + # The input(level) must be Variable or list. x2 = fluid.layers.data(name='x2', shape=[4], dtype='float32') self.assertRaises(ValueError, fluid.layers.lod_append, x2, 2) # Input(x) dtype must be float32 or float64 or int32 or int64 for dtype in ["bool", "float16"]: x3 = fluid.layers.data(name='x3_' + dtype, shape=[4], dtype=dtype) - level3 = fluid.layers.data(name='level3' + dtype, - shape=[4], - dtype='int32', - lod_level=2) + level3 = fluid.layers.data( + name='level3' + dtype, shape=[4], dtype='int32', lod_level=2 + ) self.assertRaises(TypeError, fluid.layers.lod_append, x3, level3) diff --git a/python/paddle/fluid/tests/unittests/test_lod_array_length_op.py b/python/paddle/fluid/tests/unittests/test_lod_array_length_op.py index 4129c59edca1e15d05526db6d5bf310b60de6271..4b702d0a0f0ac163027074ededb579e4c3034b99 100644 --- a/python/paddle/fluid/tests/unittests/test_lod_array_length_op.py +++ b/python/paddle/fluid/tests/unittests/test_lod_array_length_op.py @@ -24,7 +24,6 @@ import numpy class TestLoDArrayLength(unittest.TestCase): - def test_array_length(self): tmp = layers.zeros(shape=[10], dtype='int32') i = layers.fill_constant(shape=[1], dtype='int64', value=10) @@ -37,17 +36,15 @@ class TestLoDArrayLength(unittest.TestCase): class TestLoDArrayLengthOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): - #for ci coverage + # for ci coverage x1 = numpy.random.randn(2, 4).astype('int32') self.assertRaises(TypeError, fluid.layers.array_length, array=x1) class TestArrayLengthApi(unittest.TestCase): - def test_api(self): paddle.disable_static() diff --git a/python/paddle/fluid/tests/unittests/test_lod_rank_table.py b/python/paddle/fluid/tests/unittests/test_lod_rank_table.py index 7a6b6857ffbfab6d595b21bb676fdde16a502e3c..88ad04a52c20542ec4f1ebc6bca80465b60a27bc 100644 --- a/python/paddle/fluid/tests/unittests/test_lod_rank_table.py +++ b/python/paddle/fluid/tests/unittests/test_lod_rank_table.py @@ -21,7 +21,6 @@ import unittest class TestLoDRankTable(unittest.TestCase): - def test_lod_rank_table(self): x = data(name='x', shape=[100]) cpu = core.CPUPlace() @@ -32,8 +31,9 @@ class TestLoDRankTable(unittest.TestCase): tensor = core.LoDTensor() tensor.set(numpy.random.random(size=(17, 100)), cpu) - tensor.set_recursive_sequence_lengths([[1, 2], [5, 1, 1], - [3, 1, 5, 1, 3, 3, 1]]) + tensor.set_recursive_sequence_lengths( + [[1, 2], [5, 1, 1], [3, 1, 5, 1, 3, 3, 1]] + ) exe.run(scope=scope, feed={'x': tensor}) var = scope.find_var(rank_table.name) table = var.get_lod_rank_table() @@ -41,7 +41,6 @@ class TestLoDRankTable(unittest.TestCase): class TestLoDRankTableError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): x = numpy.random.random((2, 4)).astype("float32") diff --git a/python/paddle/fluid/tests/unittests/test_lod_reset_op.py b/python/paddle/fluid/tests/unittests/test_lod_reset_op.py index 8c814e984e28cad9e0b05dd3b6cdb42909535734..5e9bd45907a40c67752ac961112e900b034f88f3 100644 --- a/python/paddle/fluid/tests/unittests/test_lod_reset_op.py +++ b/python/paddle/fluid/tests/unittests/test_lod_reset_op.py @@ -20,7 +20,6 @@ from paddle.fluid import Program, program_guard class TestLodResetOpByAttr(OpTest): - def setUp(self): self.op_type = "lod_reset" x = np.random.random((10, 20)).astype("float64") @@ -44,7 +43,6 @@ class TestLodResetOpByAttr(OpTest): class TestLodResetOpByInput(OpTest): - def setUp(self): self.op_type = "lod_reset" x = np.random.random((10, 20)).astype("float64") @@ -55,7 +53,7 @@ class TestLodResetOpByInput(OpTest): target_lod = [4, 3, 3] self.inputs = { 'X': (x, lod), - 'Y': np.array([target_offset_lod]).astype('int32') + 'Y': np.array([target_offset_lod]).astype('int32'), } self.outputs = {'Out': (x, [target_lod])} @@ -69,7 +67,6 @@ class TestLodResetOpByInput(OpTest): class TestLodResetOpBoth(OpTest): - def setUp(self): self.op_type = "lod_reset" x = np.random.random((10, 20)).astype("float64") @@ -79,7 +76,7 @@ class TestLodResetOpBoth(OpTest): target_lod_in = [4, 3, 3] self.inputs = { 'X': (x, lod), - 'Y': np.array(target_offset_lod_in).astype('int32') + 'Y': np.array(target_offset_lod_in).astype('int32'), } self.attrs = {'target_lod': target_offset_lod_attr} self.outputs = {'Out': (x, [target_lod_in])} @@ -94,7 +91,6 @@ class TestLodResetOpBoth(OpTest): class TestLodResetOpYIsLoDTensor(OpTest): - def setUp(self): self.op_type = "lod_reset" x = np.random.random((10, 20)).astype("float64") @@ -114,7 +110,6 @@ class TestLodResetOpYIsLoDTensor(OpTest): class TestLodAppendOpByAttr(OpTest): - def setUp(self): self.op_type = "lod_reset" x = np.random.random((10, 20)).astype("float64") @@ -138,7 +133,6 @@ class TestLodAppendOpByAttr(OpTest): class TestLodResetOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # The input must be Variable. @@ -148,13 +142,12 @@ class TestLodResetOpError(unittest.TestCase): # Input(x) dtype must be float32 or float64 or int32 or int64 for dtype in ["bool", "float16"]: - x2 = fluid.layers.data(name='x2' + dtype, - shape=[4], - dtype=dtype) - y2 = fluid.layers.data(name='y2' + dtype, - shape=[4], - dtype='int32', - lod_level=2) + x2 = fluid.layers.data( + name='x2' + dtype, shape=[4], dtype=dtype + ) + y2 = fluid.layers.data( + name='y2' + dtype, shape=[4], dtype='int32', lod_level=2 + ) self.assertRaises(TypeError, fluid.layers.lod_reset, x2, y2) diff --git a/python/paddle/fluid/tests/unittests/test_lod_tensor_array.py b/python/paddle/fluid/tests/unittests/test_lod_tensor_array.py index 685c48e96877ced0fee4339065591c73b9717d6c..c6656ff1caa66b0eb08352093952dc2e006a1ec6 100644 --- a/python/paddle/fluid/tests/unittests/test_lod_tensor_array.py +++ b/python/paddle/fluid/tests/unittests/test_lod_tensor_array.py @@ -19,7 +19,6 @@ import numpy as np class TestLoDTensorArray(unittest.TestCase): - def test_get_set(self): scope = core.Scope() arr = scope.var('tmp_lod_tensor_array') @@ -49,7 +48,6 @@ class TestLoDTensorArray(unittest.TestCase): class TestCreateArray(unittest.TestCase): - def setUp(self): self.place = paddle.CPUPlace() self.shapes = [[10, 4], [8, 12], [1]] @@ -60,7 +58,8 @@ class TestCreateArray(unittest.TestCase): np.random.random(shape).astype('float32') for shape in self.shapes ] array = paddle.tensor.create_array( - 'float32', [paddle.to_tensor(x) for x in init_data]) + 'float32', [paddle.to_tensor(x) for x in init_data] + ) for res, gt in zip(array, init_data): np.testing.assert_array_equal(res, gt) @@ -82,8 +81,9 @@ class TestCreateArray(unittest.TestCase): # test error with nest list with self.assertRaises(TypeError): - paddle.tensor.create_array('float32', - [init_data[0], [init_data[1]]]) + paddle.tensor.create_array( + 'float32', [init_data[0], [init_data[1]]] + ) # test error with not variable with self.assertRaises(TypeError): diff --git a/python/paddle/fluid/tests/unittests/test_lod_tensor_array_ops.py b/python/paddle/fluid/tests/unittests/test_lod_tensor_array_ops.py index 9cc9a5540aa3554ff54d108f8e989080ddc758c7..418c95901edd0db23d48bf072a604f796b09c703 100644 --- a/python/paddle/fluid/tests/unittests/test_lod_tensor_array_ops.py +++ b/python/paddle/fluid/tests/unittests/test_lod_tensor_array_ops.py @@ -28,7 +28,6 @@ from paddle.fluid.layers.control_flow import array_to_lod_tensor class TestCPULoDTensorArrayOps(unittest.TestCase): - def place(self): return core.CPUPlace() @@ -40,10 +39,12 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): np.array(x).astype('int32') for x in [[3, 0, 9], [4, 1], [5, 2], [6], [7], [8]] ] - self.main(tensor=tensor, - expect_array=expect, - expect_lod=[] * 6, - expect_max_len=6) + self.main( + tensor=tensor, + expect_array=expect, + expect_lod=[] * 6, + expect_max_len=6, + ) def test_lod_tensor_to_array_level_0_empty_seq(self): tensor = core.LoDTensor() @@ -53,10 +54,12 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): np.array(x).astype('int32') for x in [[3, 0, 9], [4, 1], [5, 2], [6], [7], [8]] ] - self.main(tensor=tensor, - expect_array=expect, - expect_lod=[] * 6, - expect_max_len=6) + self.main( + tensor=tensor, + expect_array=expect, + expect_lod=[] * 6, + expect_max_len=6, + ) def test_lod_tensor_to_array_level_1(self): tensor = core.LoDTensor() @@ -66,65 +69,82 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): expect = [ np.array([9, 10, 0, 1, 2], dtype='int32'), np.array([11, 12, 13, 14, 15, 16, 3, 4, 5, 6, 7, 8], dtype='int32'), - np.array([17, 18, 19], dtype='int32') + np.array([17, 18, 19], dtype='int32'), ] lod = [[[2, 3]], [[6, 6]], [[3]]] - self.main(tensor=tensor, - expect_array=expect, - expect_lod=lod, - expect_max_len=3) + self.main( + tensor=tensor, expect_array=expect, expect_lod=lod, expect_max_len=3 + ) def test_lod_tensor_to_array_level_1_empty_seq(self): tensor = core.LoDTensor() tensor.set(np.arange(31).reshape(31, 1).astype('int32'), self.place()) tensor.set_recursive_sequence_lengths( - [[3, 2, 4, 2], [3, 4, 4, 0, 1, 5, 2, 2, 2, 7, 1]]) + [[3, 2, 4, 2], [3, 4, 4, 0, 1, 5, 2, 2, 2, 7, 1]] + ) expect = [ - np.array(item, dtype='int32') for item in [[ - 12, 13, 14, 15, 16, 0, 1, 2, 23, 24, 25, 26, 27, 28, 29 - ], [17, 18, 3, 4, 5, 6, 11, 30], [19, 20, 7, 8, 9, 10], [21, 22]] + np.array(item, dtype='int32') + for item in [ + [12, 13, 14, 15, 16, 0, 1, 2, 23, 24, 25, 26, 27, 28, 29], + [17, 18, 3, 4, 5, 6, 11, 30], + [19, 20, 7, 8, 9, 10], + [21, 22], + ] ] lod = [[[5, 3, 0, 7]], [[2, 4, 1, 1]], [[2, 4]], [[2]]] - self.main(tensor=tensor, - expect_array=expect, - expect_lod=lod, - expect_max_len=4) + self.main( + tensor=tensor, expect_array=expect, expect_lod=lod, expect_max_len=4 + ) def test_lod_tensor_to_array_level_2(self): tensor = core.LoDTensor() tensor.set(np.arange(50).reshape(50, 1).astype('int32'), self.place()) tensor.set_recursive_sequence_lengths( - [[2, 3, 1], [2, 3, 1, 4, 2, 1], - [3, 4, 4, 6, 4, 1, 1, 4, 4, 8, 6, 1, 4]]) + [ + [2, 3, 1], + [2, 3, 1, 4, 2, 1], + [3, 4, 4, 6, 4, 1, 1, 4, 4, 8, 6, 1, 4], + ] + ) expect = [ np.array(item, dtype='int32') - for item in [[21, 0, 1, 2, 3, 4, 5, 6, 46, 47, 48, 49], - list(range(22, 39)) + list(range(7, 21)), - list(range(39, 46))] + for item in [ + [21, 0, 1, 2, 3, 4, 5, 6, 46, 47, 48, 49], + list(range(22, 39)) + list(range(7, 21)), + list(range(39, 46)), + ] ] - lod = [[[1, 2, 1], [1, 3, 4, 4]], [[4, 3], [1, 4, 4, 8, 4, 6, 4]], - [[2], [6, 1]]] - self.main(tensor=tensor, - expect_array=expect, - expect_lod=lod, - expect_max_len=3) + lod = [ + [[1, 2, 1], [1, 3, 4, 4]], + [[4, 3], [1, 4, 4, 8, 4, 6, 4]], + [[2], [6, 1]], + ] + self.main( + tensor=tensor, expect_array=expect, expect_lod=lod, expect_max_len=3 + ) def test_lod_tensor_to_array_level_2_skip_level(self): tensor = core.LoDTensor() tensor.set(np.arange(50).reshape(50, 1).astype('int32'), self.place()) tensor.set_recursive_sequence_lengths( - [[2, 3, 1], [2, 3, 1, 4, 2, 1], - [3, 4, 4, 6, 4, 1, 1, 4, 4, 8, 6, 1, 4]]) - self.main(tensor=tensor, - expect_array=None, - expect_lod=None, - expect_max_len=4, - level=1) + [ + [2, 3, 1], + [2, 3, 1, 4, 2, 1], + [3, 4, 4, 6, 4, 1, 1, 4, 4, 8, 6, 1, 4], + ] + ) + self.main( + tensor=tensor, + expect_array=None, + expect_lod=None, + expect_max_len=4, + level=1, + ) def main(self, tensor, expect_array, expect_lod, expect_max_len, level=0): place = self.place() @@ -151,37 +171,38 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): self.assertEqual( np.array(scope.find_var(max_len.name).get_tensor())[0], - expect_max_len) + expect_max_len, + ) def check_array_same(self, array, expect_tensor, expect_lod): self.assertEqual(len(expect_tensor), len(array)) for i, exp in enumerate(zip(expect_tensor, expect_lod)): exp_tensor, exp_lod = exp exp_tensor = np.expand_dims(exp_tensor, axis=1) - np.testing.assert_allclose(exp_tensor, - np.array(array[i]), - rtol=1e-05) + np.testing.assert_allclose( + exp_tensor, np.array(array[i]), rtol=1e-05 + ) self.assertEqual(exp_lod, array[i].recursive_sequence_lengths()) def check_tensor_same(self, actual, expect): - np.testing.assert_allclose(np.array(actual), - np.array(expect), - rtol=1e-05) - self.assertEqual(actual.recursive_sequence_lengths(), - expect.recursive_sequence_lengths()) + np.testing.assert_allclose( + np.array(actual), np.array(expect), rtol=1e-05 + ) + self.assertEqual( + actual.recursive_sequence_lengths(), + expect.recursive_sequence_lengths(), + ) class TestCPULoDTensorArrayOpGrad(unittest.TestCase): - def test_grad(self): place = core.CPUPlace() program = Program() with program_guard(program): - x = layers.data(name='x', - shape=[1], - dtype='float32', - stop_gradient=False) + x = layers.data( + name='x', shape=[1], dtype='float32', stop_gradient=False + ) table = lod_rank_table(x, level=0) array = lod_tensor_to_array(x, table) result = array_to_lod_tensor(array, table) @@ -198,10 +219,13 @@ class TestCPULoDTensorArrayOpGrad(unittest.TestCase): exe = Executor(place) g_out = [ - np.array(item).sum() for item in exe.run(program, - feed={'x': tensor}, - fetch_list=[g_vars], - return_numpy=False) + np.array(item).sum() + for item in exe.run( + program, + feed={'x': tensor}, + fetch_list=[g_vars], + return_numpy=False, + ) ] g_out_sum = np.array(g_out).sum() @@ -209,7 +233,6 @@ class TestCPULoDTensorArrayOpGrad(unittest.TestCase): class TestLoDTensorArrayError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): x = np.random.random((10)).astype("float32") @@ -242,7 +265,6 @@ class TestLoDTensorArrayError(unittest.TestCase): class TestArrayLoDTensorError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): x = np.random.random((10)).astype("float32") diff --git a/python/paddle/fluid/tests/unittests/test_log_loss_op.py b/python/paddle/fluid/tests/unittests/test_log_loss_op.py index 28681da7882c50791d15e187393b04510804cad0..35d31128732bd850fc0433b983440ae50dbf9e73 100644 --- a/python/paddle/fluid/tests/unittests/test_log_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_log_loss_op.py @@ -23,7 +23,6 @@ def sigmoid_array(x): class TestLogLossOp(OpTest): - def setUp(self): self.op_type = 'log_loss' samples_num = 100 @@ -38,8 +37,9 @@ class TestLogLossOp(OpTest): } self.attrs = {'epsilon': epsilon} - loss = -labels * np.log(predicted + epsilon) - ( - 1 - labels) * np.log(1 - predicted + epsilon) + loss = -labels * np.log(predicted + epsilon) - (1 - labels) * np.log( + 1 - predicted + epsilon + ) self.outputs = {'Loss': loss} def test_check_output(self): @@ -50,7 +50,6 @@ class TestLogLossOp(OpTest): class TestLogLossOpError(unittest.TestCase): - def test_errors(self): with fluid.program_guard(fluid.Program()): diff --git a/python/paddle/fluid/tests/unittests/test_log_softmax.py b/python/paddle/fluid/tests/unittests/test_log_softmax.py index 8f1fb2c1da28f0529fad04d7f68f7e7222ad2a26..4be4319d805f3fc534bb5f0aeb7f7126e04f0c06 100644 --- a/python/paddle/fluid/tests/unittests/test_log_softmax.py +++ b/python/paddle/fluid/tests/unittests/test_log_softmax.py @@ -23,7 +23,7 @@ np.random.seed(10) def ref_log_softmax(x): - shiftx = (x - np.max(x)) + shiftx = x - np.max(x) out = shiftx - np.log(np.exp(shiftx).sum()) return out @@ -33,14 +33,14 @@ def ref_log_softmax_grad(x, axis): axis += len(x.shape) out = np.apply_along_axis(ref_log_softmax, axis, x) axis_dim = x.shape[axis] - dout = np.full_like(x, fill_value=1. / x.size) + dout = np.full_like(x, fill_value=1.0 / x.size) dx = dout - np.exp(out) * dout.copy().sum(axis=axis, keepdims=True).repeat( - axis_dim, axis=axis) + axis_dim, axis=axis + ) return dx class TestLogSoftmaxOp(OpTest): - def setUp(self): self.op_type = 'log_softmax' self.python_api = F.log_softmax @@ -49,7 +49,7 @@ class TestLogSoftmaxOp(OpTest): self.axis = -1 self.set_attrs() - x = np.random.uniform(0.1, 1., self.shape).astype(self.dtype) + x = np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype) out = np.apply_along_axis(ref_log_softmax, self.axis, x) self.x_grad = ref_log_softmax_grad(x, self.axis) @@ -64,27 +64,25 @@ class TestLogSoftmaxOp(OpTest): self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X'], ['Out'], - user_defined_grads=[self.x_grad], - check_eager=True) + self.check_grad( + ['X'], ['Out'], user_defined_grads=[self.x_grad], check_eager=True + ) class TestLogSoftmaxShape(TestLogSoftmaxOp): - def set_attrs(self): self.shape = [12, 10] class TestLogSoftmaxAxis(TestLogSoftmaxOp): - def set_attrs(self): self.axis = 1 -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestLogSoftmaxBF16Op(OpTest): - def setUp(self): self.op_type = 'log_softmax' self.python_api = F.log_softmax @@ -92,7 +90,7 @@ class TestLogSoftmaxBF16Op(OpTest): self.shape = [2, 3, 4, 5] self.axis = -1 - x = np.random.uniform(0.1, 1., self.shape).astype(np.float32) + x = np.random.uniform(0.1, 1.0, self.shape).astype(np.float32) out = np.apply_along_axis(ref_log_softmax, self.axis, x) self.x_grad = ref_log_softmax_grad(x, self.axis) @@ -106,19 +104,24 @@ class TestLogSoftmaxBF16Op(OpTest): def test_check_grad(self): place = core.CUDAPlace(0) - self.check_grad_with_place(place, ['X'], ['Out'], - user_defined_grads=[self.x_grad], - check_eager=True) + self.check_grad_with_place( + place, + ['X'], + ['Out'], + user_defined_grads=[self.x_grad], + check_eager=True, + ) class TestNNLogSoftmaxAPI(unittest.TestCase): - def setUp(self): self.x_shape = [2, 3, 4, 5] - self.x = np.random.uniform(-1., 1., self.x_shape).astype(np.float32) - self.place = paddle.CUDAPlace(0) \ - if paddle.fluid.core.is_compiled_with_cuda() \ + self.x = np.random.uniform(-1.0, 1.0, self.x_shape).astype(np.float32) + self.place = ( + paddle.CUDAPlace(0) + if paddle.fluid.core.is_compiled_with_cuda() else paddle.CPUPlace() + ) def check_api(self, axis=-1): ref_out = np.apply_along_axis(ref_log_softmax, axis, self.x) @@ -145,13 +148,14 @@ class TestNNLogSoftmaxAPI(unittest.TestCase): class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase): - def setUp(self): self.x_shape = [2, 3, 4, 5] self.x = np.random.uniform(-1, 1, self.x_shape).astype(np.float32) - self.place = paddle.CUDAPlace(0) \ - if paddle.fluid.core.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if paddle.fluid.core.is_compiled_with_cuda() else paddle.CPUPlace() + ) def check_api(self, axis=-1, dtype=None): x = self.x.copy() diff --git a/python/paddle/fluid/tests/unittests/test_logcumsumexp_op.py b/python/paddle/fluid/tests/unittests/test_logcumsumexp_op.py index 6064303a5217883d7521f302adcb4161c2a6c2ff..96f0e4bff6a669574a799ef8b818c5075b7f6bf5 100644 --- a/python/paddle/fluid/tests/unittests/test_logcumsumexp_op.py +++ b/python/paddle/fluid/tests/unittests/test_logcumsumexp_op.py @@ -26,11 +26,13 @@ def np_naive_logcumsumexp(x: np.ndarray, axis: Optional[int] = None): return np.log(np.cumsum(np.exp(x), axis=axis)) -def np_logcumsumexp(x: np.ndarray, - axis: Optional[int] = None, - flatten: Optional[bool] = None, - reverse: bool = False, - exclusive: bool = False): +def np_logcumsumexp( + x: np.ndarray, + axis: Optional[int] = None, + flatten: Optional[bool] = None, + reverse: bool = False, + exclusive: bool = False, +): # `flatten` aligns with c++ op if flatten: assert axis in [0, None] @@ -76,23 +78,30 @@ def np_logcumsumexp_grad( log_grad_negative = np.where(dout < 0, np.log(-dout), np.finfo(x.dtype).min) output_pos = np.exp( - np_logcumsumexp(log_grad_positive - out, - axis=axis, - flatten=flatten, - reverse=not reverse, - exclusive=exclusive).reshape(x.shape) + x) + np_logcumsumexp( + log_grad_positive - out, + axis=axis, + flatten=flatten, + reverse=not reverse, + exclusive=exclusive, + ).reshape(x.shape) + + x + ) output_neg = np.exp( - np_logcumsumexp(log_grad_negative - out, - axis=axis, - flatten=flatten, - reverse=not reverse, - exclusive=exclusive).reshape(x.shape) + x) + np_logcumsumexp( + log_grad_negative - out, + axis=axis, + flatten=flatten, + reverse=not reverse, + exclusive=exclusive, + ).reshape(x.shape) + + x + ) return output_pos - output_neg class TestLogcumsumexp(unittest.TestCase): - def run_imperative(self): data_np = np.arange(12, dtype=np.float32).reshape(3, 4) data = paddle.to_tensor(data_np) @@ -146,14 +155,16 @@ class TestLogcumsumexp(unittest.TestCase): place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - out = exe.run(feed={'X': data_np}, - fetch_list=[ - y.name, - y2.name, - y3.name, - y4.name, - y5.name, - ]) + out = exe.run( + feed={'X': data_np}, + fetch_list=[ + y.name, + y2.name, + y3.name, + y4.name, + y5.name, + ], + ) z = np_logcumsumexp(data_np) np.testing.assert_allclose(z, out[0], rtol=1e-05) @@ -202,9 +213,7 @@ class TestLogcumsumexp(unittest.TestCase): class BaseTestCases: - class BaseOpTest(OpTest): - def setUp(self): self.op_type = "logcumsumexp" input, attrs = self.input_and_attrs() @@ -218,56 +227,55 @@ class BaseTestCases: self.check_output() def test_check_grad(self): - self.check_grad(['X'], - 'Out', - user_defined_grads=[ - np_logcumsumexp_grad(self.inputs['X'], - 1 / self.inputs['X'].size, - **self.attrs) - ]) + self.check_grad( + ['X'], + 'Out', + user_defined_grads=[ + np_logcumsumexp_grad( + self.inputs['X'], + 1 / self.inputs['X'].size, + **self.attrs + ) + ], + ) def input_and_attrs(self): raise NotImplementedError() class TestLogcumsumexpOp1(BaseTestCases.BaseOpTest): - def input_and_attrs(self): return np.arange(100, dtype=np.float64).reshape(10, 10), { 'axis': 0, 'flatten': True, - 'reverse': True + 'reverse': True, } class TestLogcumsumexpOp2(BaseTestCases.BaseOpTest): - def input_and_attrs(self): return np.arange(100, dtype=np.float64).reshape(10, 10), { 'axis': 1, - 'reverse': True + 'reverse': True, } class TestLogcumsumexpOp3(BaseTestCases.BaseOpTest): - def input_and_attrs(self): return np.arange(100, dtype=np.float64).reshape(10, 10), {'axis': 1} class TestLogcumsumexpOp4(BaseTestCases.BaseOpTest): - def input_and_attrs(self): return np.arange(100, dtype=np.float64).reshape(10, 10), { 'axis': 0, 'flatten': True, 'reverse': True, - 'exclusive': True + 'exclusive': True, } class TestLogcumsumexpFP16(unittest.TestCase): - def check_main(self, x_np, dtype, axis=None): paddle.disable_static() x = paddle.to_tensor(x_np.astype(dtype)) diff --git a/python/paddle/fluid/tests/unittests/test_logical_op.py b/python/paddle/fluid/tests/unittests/test_logical_op.py index c36445e7b0710f9cd83b7aed72f650d1c23ea4da..31490961c84ce98a38f8bcdd8fdea168651d5671 100755 --- a/python/paddle/fluid/tests/unittests/test_logical_op.py +++ b/python/paddle/fluid/tests/unittests/test_logical_op.py @@ -20,87 +20,41 @@ from paddle.framework import _non_static_mode from paddle.fluid.framework import _test_eager_guard SUPPORTED_DTYPES = [ - bool, np.int8, np.int16, np.int32, np.int64, np.float32, np.float64 + bool, + np.int8, + np.int16, + np.int32, + np.int64, + np.float32, + np.float64, ] -TEST_META_OP_DATA = [{ - 'op_str': 'logical_and', - 'binary_op': True -}, { - 'op_str': 'logical_or', - 'binary_op': True -}, { - 'op_str': 'logical_xor', - 'binary_op': True -}, { - 'op_str': 'logical_not', - 'binary_op': False -}] +TEST_META_OP_DATA = [ + {'op_str': 'logical_and', 'binary_op': True}, + {'op_str': 'logical_or', 'binary_op': True}, + {'op_str': 'logical_xor', 'binary_op': True}, + {'op_str': 'logical_not', 'binary_op': False}, +] TEST_META_SHAPE_DATA = { - 'XDimLargerThanYDim1': { - 'x_shape': [2, 3, 4, 5], - 'y_shape': [4, 5] - }, - 'XDimLargerThanYDim2': { - 'x_shape': [2, 3, 4, 5], - 'y_shape': [4, 1] - }, - 'XDimLargerThanYDim3': { - 'x_shape': [2, 3, 4, 5], - 'y_shape': [1, 4, 1] - }, - 'XDimLargerThanYDim4': { - 'x_shape': [2, 3, 4, 5], - 'y_shape': [3, 4, 1] - }, - 'XDimLargerThanYDim5': { - 'x_shape': [2, 3, 1, 5], - 'y_shape': [3, 1, 1] - }, - 'XDimLessThanYDim1': { - 'x_shape': [4, 1], - 'y_shape': [2, 3, 4, 5] - }, - 'XDimLessThanYDim2': { - 'x_shape': [1, 4, 1], - 'y_shape': [2, 3, 4, 5] - }, - 'XDimLessThanYDim3': { - 'x_shape': [3, 4, 1], - 'y_shape': [2, 3, 4, 5] - }, - 'XDimLessThanYDim4': { - 'x_shape': [3, 1, 1], - 'y_shape': [2, 3, 1, 5] - }, - 'XDimLessThanYDim5': { - 'x_shape': [4, 5], - 'y_shape': [2, 3, 4, 5] - }, - 'Axis1InLargerDim': { - 'x_shape': [1, 4, 5], - 'y_shape': [2, 3, 1, 5] - }, - 'EqualDim1': { - 'x_shape': [10, 7], - 'y_shape': [10, 7] - }, - 'EqualDim2': { - 'x_shape': [1, 1, 4, 5], - 'y_shape': [2, 3, 1, 5] - } + 'XDimLargerThanYDim1': {'x_shape': [2, 3, 4, 5], 'y_shape': [4, 5]}, + 'XDimLargerThanYDim2': {'x_shape': [2, 3, 4, 5], 'y_shape': [4, 1]}, + 'XDimLargerThanYDim3': {'x_shape': [2, 3, 4, 5], 'y_shape': [1, 4, 1]}, + 'XDimLargerThanYDim4': {'x_shape': [2, 3, 4, 5], 'y_shape': [3, 4, 1]}, + 'XDimLargerThanYDim5': {'x_shape': [2, 3, 1, 5], 'y_shape': [3, 1, 1]}, + 'XDimLessThanYDim1': {'x_shape': [4, 1], 'y_shape': [2, 3, 4, 5]}, + 'XDimLessThanYDim2': {'x_shape': [1, 4, 1], 'y_shape': [2, 3, 4, 5]}, + 'XDimLessThanYDim3': {'x_shape': [3, 4, 1], 'y_shape': [2, 3, 4, 5]}, + 'XDimLessThanYDim4': {'x_shape': [3, 1, 1], 'y_shape': [2, 3, 1, 5]}, + 'XDimLessThanYDim5': {'x_shape': [4, 5], 'y_shape': [2, 3, 4, 5]}, + 'Axis1InLargerDim': {'x_shape': [1, 4, 5], 'y_shape': [2, 3, 1, 5]}, + 'EqualDim1': {'x_shape': [10, 7], 'y_shape': [10, 7]}, + 'EqualDim2': {'x_shape': [1, 1, 4, 5], 'y_shape': [2, 3, 1, 5]}, } TEST_META_WRONG_SHAPE_DATA = { - 'ErrorDim1': { - 'x_shape': [2, 3, 4, 5], - 'y_shape': [3, 4] - }, - 'ErrorDim2': { - 'x_shape': [2, 3, 4, 5], - 'y_shape': [4, 3] - } + 'ErrorDim1': {'x_shape': [2, 3, 4, 5], 'y_shape': [3, 4]}, + 'ErrorDim2': {'x_shape': [2, 3, 4, 5], 'y_shape': [4, 3]}, } @@ -175,16 +129,20 @@ def test(unit_test, use_gpu=False, test_error=False): META_DATA = dict(TEST_META_WRONG_SHAPE_DATA) for shape_data in META_DATA.values(): for data_type in SUPPORTED_DTYPES: - meta_data['x_np'] = np_data_generator(shape_data['x_shape'], - dtype=data_type) - meta_data['y_np'] = np_data_generator(shape_data['y_shape'], - dtype=data_type) + meta_data['x_np'] = np_data_generator( + shape_data['x_shape'], dtype=data_type + ) + meta_data['y_np'] = np_data_generator( + shape_data['y_shape'], dtype=data_type + ) if meta_data['binary_op'] and test_error: # catch C++ Exception - unit_test.assertRaises(BaseException, run_static, - **meta_data) - unit_test.assertRaises(BaseException, run_dygraph, - **meta_data) + unit_test.assertRaises( + BaseException, run_static, **meta_data + ) + unit_test.assertRaises( + BaseException, run_dygraph, **meta_data + ) continue static_result = run_static(**meta_data) dygraph_result = run_dygraph(**meta_data) @@ -195,12 +153,12 @@ def test(unit_test, use_gpu=False, test_error=False): np_result = np_op(meta_data['x_np']) unit_test.assertTrue((static_result == np_result).all()) unit_test.assertTrue( - (dygraph_result.numpy() == np_result).all()) + (dygraph_result.numpy() == np_result).all() + ) unit_test.assertTrue((eager_result.numpy() == np_result).all()) def test_type_error(unit_test, use_gpu, type_str_map): - def check_type(op_str, x, y, binary_op): op = getattr(paddle, op_str) error_type = ValueError @@ -235,24 +193,24 @@ def test_type_error(unit_test, use_gpu, type_str_map): startup_program = paddle.static.Program() main_program = paddle.static.Program() with paddle.static.program_guard(main_program, startup_program): - x = paddle.static.data(name='x', - shape=[10], - dtype=type_str_map['x']) - y = paddle.static.data(name='y', - shape=[10], - dtype=type_str_map['y']) + x = paddle.static.data( + name='x', shape=[10], dtype=type_str_map['x'] + ) + y = paddle.static.data( + name='y', shape=[10], dtype=type_str_map['y'] + ) check_type(meta_data['op_str'], x, y, binary_op) def type_map_factory(): - return [{ - 'x': x_type, - 'y': y_type - } for x_type in SUPPORTED_DTYPES for y_type in SUPPORTED_DTYPES] + return [ + {'x': x_type, 'y': y_type} + for x_type in SUPPORTED_DTYPES + for y_type in SUPPORTED_DTYPES + ] class TestCPU(unittest.TestCase): - def test(self): test(self) @@ -266,7 +224,6 @@ class TestCPU(unittest.TestCase): class TestCUDA(unittest.TestCase): - def test(self): test(self, True) diff --git a/python/paddle/fluid/tests/unittests/test_logit_op.py b/python/paddle/fluid/tests/unittests/test_logit_op.py index 7240fad1a441fc04c61489e9b0a5a604dd0ffdb7..74f645fb68d1c8952a289c1ec4d5928111fddaa4 100644 --- a/python/paddle/fluid/tests/unittests/test_logit_op.py +++ b/python/paddle/fluid/tests/unittests/test_logit_op.py @@ -22,22 +22,23 @@ np.random.seed(10) def logit(x, eps): - x_min = np.minimum(x, 1. - eps) + x_min = np.minimum(x, 1.0 - eps) x_max = np.maximum(x_min, eps) - return np.log(x_max / (1. - x_max)) + return np.log(x_max / (1.0 - x_max)) def logit_grad(x, eps=1e-8): - tmp_x = np.select([x < eps, x > (1. - eps)], [x * 0., x * 0.], default=-1.0) - x_1 = 1. - x + tmp_x = np.select( + [x < eps, x > (1.0 - eps)], [x * 0.0, x * 0.0], default=-1.0 + ) + x_1 = 1.0 - x _x = np.select([tmp_x == -1.0], [np.reciprocal(x * x_1)], default=0.0) - dout = np.full_like(x, fill_value=1. / _x.size) + dout = np.full_like(x, fill_value=1.0 / _x.size) dx = dout * _x return dx class TestLogitOp(OpTest): - def setUp(self): self.op_type = 'logit' self.python_api = paddle.logit @@ -45,7 +46,7 @@ class TestLogitOp(OpTest): self.shape = [120] self.eps = 1e-8 self.set_attrs() - x = np.random.uniform(-1., 1., self.shape).astype(self.dtype) + x = np.random.uniform(-1.0, 1.0, self.shape).astype(self.dtype) out = logit(x, self.eps) self.x_grad = logit_grad(x, self.eps) self.inputs = {'X': x} @@ -59,31 +60,30 @@ class TestLogitOp(OpTest): self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X'], ['Out'], - user_defined_grads=[self.x_grad], - check_eager=True) + self.check_grad( + ['X'], ['Out'], user_defined_grads=[self.x_grad], check_eager=True + ) class TestLogitShape(TestLogitOp): - def set_attrs(self): self.shape = [2, 60] class TestLogitEps(TestLogitOp): - def set_attrs(self): self.eps = 1e-8 class TestLogitAPI(unittest.TestCase): - def setUp(self): self.x_shape = [120] - self.x = np.random.uniform(0., 1., self.x_shape).astype(np.float32) - self.place = paddle.CUDAPlace(0) \ - if paddle.fluid.core.is_compiled_with_cuda() \ + self.x = np.random.uniform(0.0, 1.0, self.x_shape).astype(np.float32) + self.place = ( + paddle.CUDAPlace(0) + if paddle.fluid.core.is_compiled_with_cuda() else paddle.CPUPlace() + ) def check_api(self, eps=1e-8): ref_out = logit(self.x, eps) diff --git a/python/paddle/fluid/tests/unittests/test_logspace.py b/python/paddle/fluid/tests/unittests/test_logspace.py index 8b5501a26ae5d5d5f63159582cb57fe19c2805e5..170d056895a941995065f83820ff010c20466b1a 100644 --- a/python/paddle/fluid/tests/unittests/test_logspace.py +++ b/python/paddle/fluid/tests/unittests/test_logspace.py @@ -19,7 +19,6 @@ import paddle class TestLogspaceOpCommonCase(OpTest): - def setUp(self): self.op_type = "logspace" dtype = 'float32' @@ -38,7 +37,6 @@ class TestLogspaceOpCommonCase(OpTest): class TestLogspaceOpReverseCase(OpTest): - def setUp(self): self.op_type = "logspace" dtype = 'float32' @@ -46,7 +44,7 @@ class TestLogspaceOpReverseCase(OpTest): 'Start': np.array([10]).astype(dtype), 'Stop': np.array([0]).astype(dtype), 'Num': np.array([11]).astype('int32'), - 'Base': np.array([2]).astype(dtype) + 'Base': np.array([2]).astype(dtype), } self.attrs = {'dtype': int(paddle.float32)} @@ -57,7 +55,6 @@ class TestLogspaceOpReverseCase(OpTest): class TestLogspaceOpNumOneCase(OpTest): - def setUp(self): self.op_type = "logspace" dtype = 'float32' @@ -65,7 +62,7 @@ class TestLogspaceOpNumOneCase(OpTest): 'Start': np.array([10]).astype(dtype), 'Stop': np.array([0]).astype(dtype), 'Num': np.array([1]).astype('int32'), - 'Base': np.array([2]).astype(dtype) + 'Base': np.array([2]).astype(dtype), } self.attrs = {'dtype': int(paddle.float32)} @@ -76,7 +73,6 @@ class TestLogspaceOpNumOneCase(OpTest): class TestLogspaceOpMinusBaseCase(OpTest): - def setUp(self): self.op_type = "logspace" dtype = 'float32' @@ -95,7 +91,6 @@ class TestLogspaceOpMinusBaseCase(OpTest): class TestLogspaceOpZeroBaseCase(OpTest): - def setUp(self): self.op_type = "logspace" dtype = 'float32' @@ -114,7 +109,6 @@ class TestLogspaceOpZeroBaseCase(OpTest): class TestLogspaceAPI(unittest.TestCase): - def test_variable_input1(self): paddle.enable_static() prog = paddle.static.Program() @@ -156,12 +150,9 @@ class TestLogspaceAPI(unittest.TestCase): def test_name(self): with paddle.static.program_guard(paddle.static.Program()): - out = paddle.logspace(0, - 10, - 5, - 2, - dtype='float32', - name='logspace_res') + out = paddle.logspace( + 0, 10, 5, 2, dtype='float32', name='logspace_res' + ) assert 'logspace_res' in out.name def test_imperative(self): @@ -179,7 +170,6 @@ class TestLogspaceAPI(unittest.TestCase): class TestLogspaceOpError(unittest.TestCase): - def test_errors(self): with paddle.static.program_guard(paddle.static.Program()): @@ -209,9 +199,9 @@ class TestLogspaceOpError(unittest.TestCase): self.assertRaises(TypeError, test_num_type) def test_start_dtype(): - start = paddle.static.data(shape=[1], - dtype="float64", - name="start") + start = paddle.static.data( + shape=[1], dtype="float64", name="start" + ) paddle.logspace(start, 10, 1, 2, dtype="float32") self.assertRaises(ValueError, test_start_dtype) @@ -223,17 +213,17 @@ class TestLogspaceOpError(unittest.TestCase): self.assertRaises(ValueError, test_end_dtype) def test_num_dtype(): - num = paddle.static.data(shape=[1], - dtype="float32", - name="step") + num = paddle.static.data( + shape=[1], dtype="float32", name="step" + ) paddle.logspace(0, 10, num, 2, dtype="float32") self.assertRaises(TypeError, test_num_dtype) def test_base_dtype(): - base = paddle.static.data(shape=[1], - dtype="float64", - name="end") + base = paddle.static.data( + shape=[1], dtype="float64", name="end" + ) paddle.logspace(0, 10, 1, base, dtype="float32") self.assertRaises(ValueError, test_base_dtype) diff --git a/python/paddle/fluid/tests/unittests/test_logsumexp.py b/python/paddle/fluid/tests/unittests/test_logsumexp.py index ca732d6b0a7663dc2aa6f2a618aa15d7ced154bf..ac29603713e16e9f69b4e6d2e08b220f3564ad45 100644 --- a/python/paddle/fluid/tests/unittests/test_logsumexp.py +++ b/python/paddle/fluid/tests/unittests/test_logsumexp.py @@ -21,7 +21,7 @@ from op_test import OpTest def ref_logsumexp(x, axis=None, keepdim=False, reduce_all=False): if isinstance(axis, int): - axis = (axis, ) + axis = (axis,) elif isinstance(axis, list): axis = tuple(axis) if reduce_all: @@ -53,7 +53,6 @@ def logsumexp_ref_grad(x): class TestLogsumexp(OpTest): - def setUp(self): self.op_type = 'logsumexp' self.python_api = logsumexp_wrapper @@ -73,7 +72,7 @@ class TestLogsumexp(OpTest): self.attrs = { 'axis': self.axis, 'keepdim': self.keepdim, - 'reduce_all': self.reduce_all + 'reduce_all': self.reduce_all, } self.user_defined_grads = None self.user_defined_grad_outputs = None @@ -90,10 +89,12 @@ class TestLogsumexp(OpTest): def test_check_grad(self): self.check_grad( - ['X'], ['Out'], + ['X'], + ['Out'], user_defined_grads=self.user_defined_grads, user_defined_grad_outputs=self.user_defined_grad_outputs, - check_eager=True) + check_eager=True, + ) def calc_grad(self): dy = np.ones(1, dtype=self.dtype) @@ -103,19 +104,16 @@ class TestLogsumexp(OpTest): class TestLogsumexp_shape(TestLogsumexp): - def set_attrs(self): self.shape = [4, 5, 6] class TestLogsumexp_axis(TestLogsumexp): - def set_attrs(self): self.axis = [0, -1] class TestLogsumexp_axis_all(TestLogsumexp): - def set_attrs(self): self.axis = [0, 1, 2, 3] @@ -126,13 +124,11 @@ class TestLogsumexp_axis_all(TestLogsumexp): class TestLogsumexp_keepdim(TestLogsumexp): - def set_attrs(self): self.keepdim = True class TestLogsumexp_reduce_all(TestLogsumexp): - def set_attrs(self): self.reduce_all = True @@ -143,7 +139,6 @@ class TestLogsumexp_reduce_all(TestLogsumexp): class TestLogsumexp_FP32(TestLogsumexp): - def set_attrs(self): self.dtype = 'float32' @@ -154,10 +149,10 @@ class TestLogsumexp_FP32(TestLogsumexp): np.testing.assert_allclose(x_grad, ref_x_grad, rtol=1e-08, atol=1e-08) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestLogsumexp_FP16(TestLogsumexp): - def set_attrs(self): self.dtype = 'float16' @@ -169,10 +164,9 @@ class TestLogsumexp_FP16(TestLogsumexp): tensor_x = paddle.to_tensor(x) out_pad = logsumexp_wrapper(tensor_x) paddle.enable_static() - np.testing.assert_allclose(out_pad.numpy(), - out_ref, - rtol=1e-03, - atol=1e-08) + np.testing.assert_allclose( + out_pad.numpy(), out_ref, rtol=1e-03, atol=1e-08 + ) def test_check_grad(self): self.__class__.dtype = self.dtype @@ -184,7 +178,6 @@ class TestLogsumexp_FP16(TestLogsumexp): class TestLogsumexpError(unittest.TestCase): - def test_errors(self): with paddle.static.program_guard(paddle.static.Program()): self.assertRaises(TypeError, paddle.logsumexp, 1) @@ -193,12 +186,14 @@ class TestLogsumexpError(unittest.TestCase): class TestLogsumexpAPI(unittest.TestCase): - def setUp(self): self.shape = [2, 3, 4, 5] self.x = np.random.uniform(-1, 1, self.shape).astype(np.float32) - self.place = paddle.CUDAPlace(0) if paddle.fluid.core.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if paddle.fluid.core.is_compiled_with_cuda() else paddle.CPUPlace() + ) def api_case(self, axis=None, keepdim=False): out_ref = ref_logsumexp(self.x, axis, keepdim) diff --git a/python/paddle/fluid/tests/unittests/test_lookahead.py b/python/paddle/fluid/tests/unittests/test_lookahead.py index 10c2ef78b861821babd8433bf1ae73f13832f7c2..6f7c052f9925697666e83dc19fd3307f3201239d 100644 --- a/python/paddle/fluid/tests/unittests/test_lookahead.py +++ b/python/paddle/fluid/tests/unittests/test_lookahead.py @@ -25,7 +25,6 @@ SGD_LR = 1.0 class TestLookAhead(unittest.TestCase): - def test_lookahead_static(self): paddle.enable_static() place = fluid.CPUPlace() @@ -41,7 +40,8 @@ class TestLookAhead(unittest.TestCase): optimizer = paddle.optimizer.SGD(learning_rate=SGD_LR) lookahead = paddle.incubate.optimizer.LookAhead( - optimizer, alpha=LOOKAHEAD_ALPHA, k=LOOKAHEAD_K) + optimizer, alpha=LOOKAHEAD_ALPHA, k=LOOKAHEAD_K + ) lookahead.minimize(loss) exe.run(startup) @@ -49,21 +49,24 @@ class TestLookAhead(unittest.TestCase): fast_param = None for i in range(10): if (i + 1) % LOOKAHEAD_K == 0: - slow_param = slow_param + LOOKAHEAD_ALPHA * (fast_param - - slow_param) + slow_param = slow_param + LOOKAHEAD_ALPHA * ( + fast_param - slow_param + ) x = np.random.random(size=(10, 1)).astype('float32') - latest_b, b_grad = exe.run(program=train_program, - feed={'X': x}, - fetch_list=[ - 'fc_0.b_0', - 'fc_0.b_0@GRAD', - ]) + latest_b, b_grad = exe.run( + program=train_program, + feed={'X': x}, + fetch_list=[ + 'fc_0.b_0', + 'fc_0.b_0@GRAD', + ], + ) if i == 0: slow_param = latest_b if (i + 1) % LOOKAHEAD_K == 0: - self.assertAlmostEqual(slow_param.all(), - latest_b.all(), - delta=5e-3) + self.assertAlmostEqual( + slow_param.all(), latest_b.all(), delta=5e-3 + ) fast_param = latest_b - SGD_LR * b_grad def func_test_look_ahead_dygraph(self): @@ -76,21 +79,20 @@ class TestLookAhead(unittest.TestCase): # define a random dataset class RandomDataset(paddle.io.Dataset): - def __init__(self, num_samples): self.num_samples = num_samples def __getitem__(self, idx): image = np.random.random([IMAGE_SIZE]).astype('float32') - label = np.random.randint(0, CLASS_NUM - 1, - (1, )).astype('int64') + label = np.random.randint(0, CLASS_NUM - 1, (1,)).astype( + 'int64' + ) return image, label def __len__(self): return self.num_samples class LinearNet(nn.Layer): - def __init__(self): super(LinearNet, self).__init__() self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM) @@ -110,34 +112,41 @@ class TestLookAhead(unittest.TestCase): out = layer(image) loss = loss_fn(out, label) loss.backward() - fast_param = (layer.bias.numpy() - - SGD_LR * layer.bias.grad.numpy()) + fast_param = ( + layer.bias.numpy() - SGD_LR * layer.bias.grad.numpy() + ) opt.step() if idx == 1: slow_param = fast_param if idx % LOOKAHEAD_K == 0: slow_param = slow_param + LOOKAHEAD_ALPHA * ( - fast_param - slow_param) - self.assertAlmostEqual(np.mean(slow_param), - np.mean(layer.bias.numpy()), - delta=5e-3) + fast_param - slow_param + ) + self.assertAlmostEqual( + np.mean(slow_param), + np.mean(layer.bias.numpy()), + delta=5e-3, + ) opt.clear_grad() layer = LinearNet() loss_fn = nn.CrossEntropyLoss() - optimizer = paddle.optimizer.SGD(learning_rate=SGD_LR, - parameters=layer.parameters()) - lookahead = paddle.incubate.optimizer.LookAhead(optimizer, - alpha=LOOKAHEAD_ALPHA, - k=LOOKAHEAD_K) + optimizer = paddle.optimizer.SGD( + learning_rate=SGD_LR, parameters=layer.parameters() + ) + lookahead = paddle.incubate.optimizer.LookAhead( + optimizer, alpha=LOOKAHEAD_ALPHA, k=LOOKAHEAD_K + ) # create data loader dataset = RandomDataset(BATCH_NUM * BATCH_SIZE) - loader = paddle.io.DataLoader(dataset, - batch_size=BATCH_SIZE, - shuffle=True, - drop_last=True, - num_workers=2) + loader = paddle.io.DataLoader( + dataset, + batch_size=BATCH_SIZE, + shuffle=True, + drop_last=True, + num_workers=2, + ) train(layer, loader, loss_fn, lookahead) diff --git a/python/paddle/fluid/tests/unittests/test_lookup_table_bf16_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_bf16_op.py index 8c6b3421e24a995e212ab0b107f77a1d1d259d5f..4c572622abe7b8c1659eab73638d751d0c4628f7 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_table_bf16_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_table_bf16_op.py @@ -14,10 +14,12 @@ import unittest import numpy as np -from paddle.fluid.tests.unittests.op_test import (OpTest, - convert_float_to_uint16, - convert_uint16_to_float, - skip_check_grad_ci) +from paddle.fluid.tests.unittests.op_test import ( + OpTest, + convert_float_to_uint16, + convert_uint16_to_float, + skip_check_grad_ci, +) import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.op import Operator @@ -26,8 +28,11 @@ from paddle import enable_static def _lookup(weights, ids, flat_ids, op_version="lookup_table"): w_shape = weights.shape - out_shape = list(ids.shape[:-1]) if op_version == "lookup_table" else list( - ids.shape) + out_shape = ( + list(ids.shape[:-1]) + if op_version == "lookup_table" + else list(ids.shape) + ) out_shape.append(w_shape[-1]) out = weights[flat_ids].reshape(out_shape) return out @@ -36,8 +41,11 @@ def _lookup(weights, ids, flat_ids, op_version="lookup_table"): def _get_grad(weights, ids, flat_ids, op_version="lookup_table"): w_shape = weights.shape w_grad = np.zeros((w_shape), dtype=weights.dtype) - out_shape = list(ids.shape[:-1]) if op_version == "lookup_table" else list( - ids.shape) + out_shape = ( + list(ids.shape[:-1]) + if op_version == "lookup_table" + else list(ids.shape) + ) out_grad_shape = (np.prod(out_shape), w_shape[-1]) out_grad = weights[flat_ids].reshape(out_grad_shape) for i, idx in enumerate(flat_ids): @@ -45,10 +53,10 @@ def _get_grad(weights, ids, flat_ids, op_version="lookup_table"): return w_grad -@unittest.skipIf(not core.supports_bfloat16(), - "place does not support BF16 evaluation") +@unittest.skipIf( + not core.supports_bfloat16(), "place does not support BF16 evaluation" +) class TestLookupTableBF16Op(OpTest): - def init_test(self): self.op_type = "lookup_table" self.ids_shape = (4, 1) @@ -62,11 +70,13 @@ class TestLookupTableBF16Op(OpTest): self.flat_ids = self.ids.flatten() self.w_bf16 = convert_float_to_uint16(table) - self.out_bf16 = _lookup(self.w_bf16, self.ids, self.flat_ids, - self.op_type) + self.out_bf16 = _lookup( + self.w_bf16, self.ids, self.flat_ids, self.op_type + ) self.out_fp32 = _lookup(table, self.ids, self.flat_ids, self.op_type) - self.w_grad_fp32 = _get_grad(table, self.ids, self.flat_ids, - self.op_type) + self.w_grad_fp32 = _get_grad( + table, self.ids, self.flat_ids, self.op_type + ) self.inputs = {'W': self.w_bf16, 'Ids': self.ids} self.outputs = {'Out': self.out_fp32} @@ -75,36 +85,40 @@ class TestLookupTableBF16Op(OpTest): self.check_output_with_place(core.CPUPlace(), check_dygraph=False) def test_check_grad(self): - self.check_grad_with_place(core.CPUPlace(), ['W'], - 'Out', - no_grad_set=set('Ids'), - check_dygraph=False, - max_relative_error=1.5e-2, - user_defined_grads=[self.w_grad_fp32], - user_defined_grad_outputs=[self.out_bf16]) - - -@unittest.skipIf(not core.supports_bfloat16(), - "place does not support BF16 evaluation") + self.check_grad_with_place( + core.CPUPlace(), + ['W'], + 'Out', + no_grad_set=set('Ids'), + check_dygraph=False, + max_relative_error=1.5e-2, + user_defined_grads=[self.w_grad_fp32], + user_defined_grad_outputs=[self.out_bf16], + ) + + +@unittest.skipIf( + not core.supports_bfloat16(), "place does not support BF16 evaluation" +) class TestLookupTableBF16OpIds4D(TestLookupTableBF16Op): - def init_test(self): self.op_type = "lookup_table" self.ids_shape = (2, 4, 5, 1) -@unittest.skipIf(not core.supports_bfloat16(), - "place does not support BF16 evaluation") +@unittest.skipIf( + not core.supports_bfloat16(), "place does not support BF16 evaluation" +) class TestLookupTableBF16OpWIsSelectedRows(unittest.TestCase): - def init_test(self): self.op_type = "lookup_table" self.ids_shape = (10, 1) def setUp(self): self.init_test() - self.ids = np.random.randint(low=0, high=15, - size=self.ids_shape).astype("int64") + self.ids = np.random.randint( + low=0, high=15, size=self.ids_shape + ).astype("int64") self.flat_ids = self.ids.flatten() self.w_fp32 = np.random.random((15, 32)).astype("float32") self.w_bf16 = convert_float_to_uint16(self.w_fp32) @@ -144,11 +158,12 @@ class TestLookupTableBF16OpWIsSelectedRows(unittest.TestCase): self._check_output(ref, result_array) -@unittest.skipIf(not core.supports_bfloat16(), - "place does not support BF16 evaluation") +@unittest.skipIf( + not core.supports_bfloat16(), "place does not support BF16 evaluation" +) class TestLookupTableBF16OpWIsSelectedRows4DIds( - TestLookupTableBF16OpWIsSelectedRows): - + TestLookupTableBF16OpWIsSelectedRows +): def init_test(self): self.op_type = "lookup_table" self.ids_shape = (3, 4, 5, 1) @@ -161,11 +176,12 @@ class TestLookupTableBF16OpWIsSelectedRows4DIds( @skip_check_grad_ci( reason="Since paddings are not trainable and fixed in forward," "the gradient of paddings makes no sense and we don't " - "test the gradient here.") -@unittest.skipIf(not core.supports_bfloat16(), - "place does not support BF16 evaluation") + "test the gradient here." +) +@unittest.skipIf( + not core.supports_bfloat16(), "place does not support BF16 evaluation" +) class TestLookupTableBF16OpWithPadding(TestLookupTableBF16Op): - def test_check_output(self): ids = np.squeeze(self.inputs['Ids']) padding_idx = np.random.choice(ids, 1)[0] @@ -177,11 +193,12 @@ class TestLookupTableBF16OpWithPadding(TestLookupTableBF16Op): @skip_check_grad_ci( reason="Since paddings are not trainable and fixed in forward," "the gradient of paddings makes no sense and we don't " - "test the gradient here.") -@unittest.skipIf(not core.supports_bfloat16(), - "place does not support BF16 evaluation") + "test the gradient here." +) +@unittest.skipIf( + not core.supports_bfloat16(), "place does not support BF16 evaluation" +) class TestLookupTableBF16OpIds4DPadding(TestLookupTableBF16OpIds4D): - def test_check_output(self): ids = self.inputs['Ids'] flatten_idx = ids.flatten() @@ -202,8 +219,9 @@ class TestEmbeddingLayerBF16ConstantInitializer(unittest.TestCase): def setUp(self): self.ids_shape = [4, 1] self.w_shape = [10, 64] - self.ids = np.random.randint(low=0, high=9, - size=self.ids_shape).astype("int64") + self.ids = np.random.randint(low=0, high=9, size=self.ids_shape).astype( + "int64" + ) self.flat_ids = self.ids.flatten() self.value = 3.0 self.w_fp32 = np.full(self.w_shape, self.value) @@ -214,18 +232,20 @@ class TestEmbeddingLayerBF16ConstantInitializer(unittest.TestCase): with fluid.program_guard(self.prog, self.startup_prog): x = fluid.layers.data(name='x', shape=self.ids_shape, dtype='int64') - self.emb = fluid.layers.embedding(input=x, - size=self.w_shape, - param_attr=fluid.ParamAttr( - name="emb_weight", - initializer=self.initializer), - is_sparse=False, - dtype="uint16") # bfloat16 + self.emb = fluid.layers.embedding( + input=x, + size=self.w_shape, + param_attr=fluid.ParamAttr( + name="emb_weight", initializer=self.initializer + ), + is_sparse=False, + dtype="uint16", + ) # bfloat16 exe = fluid.Executor(self.place) exe.run(self.startup_prog) - self.result = exe.run(self.prog, - feed={'x': self.ids}, - fetch_list=['emb_weight', self.emb]) + self.result = exe.run( + self.prog, feed={'x': self.ids}, fetch_list=['emb_weight', self.emb] + ) def test_embedding_weights(self): result = convert_uint16_to_float(self.result[0]) diff --git a/python/paddle/fluid/tests/unittests/test_lookup_table_dequant_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_dequant_op.py index bd15a3ac1cbf0ee689523d5530f8b73aaeb6d903..42a491344c62cd994f0f40390817f5a07d1a40da 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_table_dequant_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_table_dequant_op.py @@ -19,7 +19,6 @@ import struct class TestLookupTableDequantOp(OpTest): - def setUp(self): self.op_type = "lookup_table_dequant" table = np.random.random((17, 32)).astype("float32") diff --git a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py index 2a57ca7e5968f637503d035d194e297e4698fcf7..02fccaaca223281c9a7f35678f06b14c7052abcf 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py @@ -23,7 +23,6 @@ import paddle.nn.functional as F class TestLookupTableOp(OpTest): - def setUp(self): self.op_type = "lookup_table" table = np.random.random((17, 31)).astype("float64") @@ -40,12 +39,12 @@ class TestLookupTableOp(OpTest): class TestLookupTableOpWithTensorIds(OpTest): - def setUp(self): self.op_type = "lookup_table" table = np.random.random((17, 31)).astype("float64") - ids = np.random.randint(low=0, high=17, - size=(2, 4, 5, 1)).astype("int64") + ids = np.random.randint(low=0, high=17, size=(2, 4, 5, 1)).astype( + "int64" + ) self.inputs = {'W': table, 'Ids': ids} self.outputs = {'Out': table[ids.flatten()].reshape((2, 4, 5, 31))} @@ -59,9 +58,9 @@ class TestLookupTableOpWithTensorIds(OpTest): @skip_check_grad_ci( reason="Since paddings are not trainable and fixed in forward," "the gradient of paddings makes no sense and we don't " - "test the gradient here.") + "test the gradient here." +) class TestLookupTableOpWithPadding(TestLookupTableOp): - def test_check_output(self): ids = np.squeeze(self.inputs['Ids']) padding_idx = np.random.choice(ids, 1)[0] @@ -73,9 +72,9 @@ class TestLookupTableOpWithPadding(TestLookupTableOp): @skip_check_grad_ci( reason="Since paddings are not trainable and fixed in forward," "the gradient of paddings makes no sense and we don't " - "test the gradient here.") + "test the gradient here." +) class TestLookupTableOpWithTensorIdsAndPadding(TestLookupTableOpWithTensorIds): - def test_check_output(self): ids = self.inputs['Ids'] flatten_idx = ids.flatten() @@ -86,7 +85,6 @@ class TestLookupTableOpWithTensorIdsAndPadding(TestLookupTableOpWithTensorIds): class TestLookupTableWIsSelectedRows(unittest.TestCase): - def prepare_ids(self, scope, place): ids_tensor = scope.var('Ids').get_tensor() ids_array = np.array([[0], [4], [3], [5]]).astype("int64") @@ -139,13 +137,14 @@ class TestLookupTableWIsSelectedRows(unittest.TestCase): self.check_with_place(place) -class TestLookupTableWithTensorIdsWIsSelectedRows(TestLookupTableWIsSelectedRows - ): - +class TestLookupTableWithTensorIdsWIsSelectedRows( + TestLookupTableWIsSelectedRows +): def prepare_ids(self, scope, place): ids_tensor = scope.var('Ids').get_tensor() - ids_array = np.random.randint(low=0, high=6, - size=(2, 4, 3, 1)).astype("int64") + ids_array = np.random.randint(low=0, high=6, size=(2, 4, 3, 1)).astype( + "int64" + ) ids_tensor.set(ids_array, place) return ids_array @@ -155,7 +154,6 @@ class TestLookupTableWithTensorIdsWIsSelectedRows(TestLookupTableWIsSelectedRows class TestEmbedOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): input_data = np.random.randint(0, 10, (4, 1)).astype("int64") @@ -176,9 +174,9 @@ class TestEmbedOpError(unittest.TestCase): def test_param_dtype(): # dtype must be float32 or float64 input2 = fluid.data(name='x2', shape=[4, 1], dtype='int64') - fluid.layers.embedding(input=input2, - size=(10, 64), - dtype='int64') + fluid.layers.embedding( + input=input2, size=(10, 64), dtype='int64' + ) self.assertRaises(TypeError, test_param_dtype) @@ -187,11 +185,11 @@ class TestEmbedOpError(unittest.TestCase): class TestLookupTableOpInt8(OpTest): - def setUp(self): self.op_type = "lookup_table" - table = np.random.randint(low=-128, high=127, - size=(17, 31)).astype("int8") + table = np.random.randint(low=-128, high=127, size=(17, 31)).astype( + "int8" + ) ids = np.random.randint(0, 17, 4).astype("int64") ids_expand = np.expand_dims(ids, axis=1) self.inputs = {'W': table, 'Ids': ids_expand} @@ -207,13 +205,14 @@ class TestLookupTableOpInt8(OpTest): class TestLookupTableOpWithTensorIdsInt8(OpTest): - def setUp(self): self.op_type = "lookup_table" - table = np.random.randint(low=-128, high=127, - size=(17, 31)).astype("int8") - ids = np.random.randint(low=0, high=17, - size=(2, 4, 5, 1)).astype("int64") + table = np.random.randint(low=-128, high=127, size=(17, 31)).astype( + "int8" + ) + ids = np.random.randint(low=0, high=17, size=(2, 4, 5, 1)).astype( + "int64" + ) self.inputs = {'W': table, 'Ids': ids} self.outputs = {'Out': table[ids.flatten()].reshape((2, 4, 5, 31))} @@ -227,7 +226,6 @@ class TestLookupTableOpWithTensorIdsInt8(OpTest): class TestLookupTableOpWithPaddingInt8(TestLookupTableOpInt8): - def test_check_output(self): ids = np.squeeze(self.inputs['Ids']) padding_idx = np.random.choice(ids, 1)[0] @@ -242,8 +240,8 @@ class TestLookupTableOpWithPaddingInt8(TestLookupTableOpInt8): class TestLookupTableOpWithTensorIdsAndPaddingInt8( - TestLookupTableOpWithTensorIdsInt8): - + TestLookupTableOpWithTensorIdsInt8 +): def test_check_output(self): ids = self.inputs['Ids'] flatten_idx = ids.flatten() @@ -259,7 +257,6 @@ class TestLookupTableOpWithTensorIdsAndPaddingInt8( class TestLookupTableWIsSelectedRowsInt8(unittest.TestCase): - def prepare_ids(self, scope, place): ids_tensor = scope.var('Ids').get_tensor() ids_array = np.array([[0], [4], [3], [5]]).astype("int64") @@ -313,12 +310,13 @@ class TestLookupTableWIsSelectedRowsInt8(unittest.TestCase): class TestLookupTableWithTensorIdsWIsSelectedRowsInt8( - TestLookupTableWIsSelectedRowsInt8): - + TestLookupTableWIsSelectedRowsInt8 +): def prepare_ids(self, scope, place): ids_tensor = scope.var('Ids').get_tensor() - ids_array = np.random.randint(low=0, high=6, - size=(2, 4, 3, 1)).astype("int64") + ids_array = np.random.randint(low=0, high=6, size=(2, 4, 3, 1)).astype( + "int64" + ) ids_tensor.set(ids_array, place) return ids_array @@ -329,11 +327,11 @@ class TestLookupTableWithTensorIdsWIsSelectedRowsInt8( @skip_check_grad_ci(reason="Int16 type only be used in test and inference.") class TestLookupTableOpInt16(OpTest): - def setUp(self): self.op_type = "lookup_table" - table = np.random.randint(low=-128, high=127, - size=(17, 31)).astype("int16") + table = np.random.randint(low=-128, high=127, size=(17, 31)).astype( + "int16" + ) ids = np.random.randint(0, 17, 4).astype("int64") ids_expand = np.expand_dims(ids, axis=1) self.inputs = {'W': table, 'Ids': ids_expand} @@ -345,13 +343,14 @@ class TestLookupTableOpInt16(OpTest): @skip_check_grad_ci(reason="Int16 type only be used in test and inference.") class TestLookupTableOpWithTensorIdsInt16(OpTest): - def setUp(self): self.op_type = "lookup_table" - table = np.random.randint(low=-128, high=127, - size=(17, 31)).astype("int16") - ids = np.random.randint(low=0, high=17, - size=(2, 4, 5, 1)).astype("int64") + table = np.random.randint(low=-128, high=127, size=(17, 31)).astype( + "int16" + ) + ids = np.random.randint(low=0, high=17, size=(2, 4, 5, 1)).astype( + "int64" + ) self.inputs = {'W': table, 'Ids': ids} self.outputs = {'Out': table[ids.flatten()].reshape((2, 4, 5, 31))} @@ -361,7 +360,6 @@ class TestLookupTableOpWithTensorIdsInt16(OpTest): @skip_check_grad_ci(reason="Int16 type only be used in test and inference.") class TestLookupTableOpWithPaddingInt16(TestLookupTableOpInt16): - def test_check_output(self): ids = np.squeeze(self.inputs['Ids']) padding_idx = np.random.choice(ids, 1)[0] @@ -372,8 +370,8 @@ class TestLookupTableOpWithPaddingInt16(TestLookupTableOpInt16): @skip_check_grad_ci(reason="Int16 type only be used in test and inference.") class TestLookupTableOpWithTensorIdsAndPaddingInt16( - TestLookupTableOpWithTensorIdsInt16): - + TestLookupTableOpWithTensorIdsInt16 +): def test_check_output(self): ids = self.inputs['Ids'] flatten_idx = ids.flatten() @@ -384,7 +382,6 @@ class TestLookupTableOpWithTensorIdsAndPaddingInt16( class TestLookupTableWIsSelectedRowsInt16(unittest.TestCase): - def prepare_ids(self, scope, place): ids_tensor = scope.var('Ids').get_tensor() ids_array = np.array([[0], [4], [3], [5]]).astype("int64") @@ -437,12 +434,13 @@ class TestLookupTableWIsSelectedRowsInt16(unittest.TestCase): class TestLookupTableWithTensorIdsWIsSelectedRowsInt16( - TestLookupTableWIsSelectedRowsInt16): - + TestLookupTableWIsSelectedRowsInt16 +): def prepare_ids(self, scope, place): ids_tensor = scope.var('Ids').get_tensor() - ids_array = np.random.randint(low=0, high=6, - size=(2, 4, 3, 1)).astype("int64") + ids_array = np.random.randint(low=0, high=6, size=(2, 4, 3, 1)).astype( + "int64" + ) ids_tensor.set(ids_array, place) return ids_array @@ -452,13 +450,14 @@ class TestLookupTableWithTensorIdsWIsSelectedRowsInt16( class TestOutDtype(unittest.TestCase): - def test_dtype(self): api_fn = F.embedding - check_out_dtype(api_fn, - in_specs=[([10, 16], 'int64'), ([100, 64], )], - expect_dtypes=['float32', 'float64'], - target_index=1) + check_out_dtype( + api_fn, + in_specs=[([10, 16], 'int64'), ([100, 64],)], + expect_dtypes=['float32', 'float64'], + target_index=1, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_lookup_table_v2_bf16_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_v2_bf16_op.py index a479693d518b26e5e43995c4be7d3a1fa7fbca2e..e0dbf743430c6625c5917eeec0796e2dc2abdf23 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_table_v2_bf16_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_table_v2_bf16_op.py @@ -17,23 +17,24 @@ import numpy as np import paddle from paddle.fluid.tests.unittests.op_test import convert_uint16_to_float from paddle.fluid.tests.unittests.test_lookup_table_bf16_op import ( - _lookup, TestLookupTableBF16Op, TestLookupTableBF16OpIds4D, + _lookup, + TestLookupTableBF16Op, + TestLookupTableBF16OpIds4D, TestLookupTableBF16OpWIsSelectedRows, - TestLookupTableBF16OpWIsSelectedRows4DIds) + TestLookupTableBF16OpWIsSelectedRows4DIds, +) import paddle.fluid as fluid import paddle.fluid.core as core class TestLookupTableV2BF16Op(TestLookupTableBF16Op): - def init_test(self): self.op_type = "lookup_table_v2" - self.ids_shape = (4) + self.ids_shape = 4 self.mkldnn_data_type = "bfloat16" class TestLookupTableV2BF16OpIds4D(TestLookupTableBF16OpIds4D): - def init_test(self): self.op_type = "lookup_table_v2" self.ids_shape = (2, 4, 5) @@ -41,23 +42,22 @@ class TestLookupTableV2BF16OpIds4D(TestLookupTableBF16OpIds4D): class TestLookupTableV2BF16OpWIsSelectedRows( - TestLookupTableBF16OpWIsSelectedRows): - + TestLookupTableBF16OpWIsSelectedRows +): def init_test(self): self.op_type = "lookup_table_v2" - self.ids_shape = (10) + self.ids_shape = 10 class TestLookupTableV2BF16OpWIsSelectedRows4DIds( - TestLookupTableBF16OpWIsSelectedRows4DIds): - + TestLookupTableBF16OpWIsSelectedRows4DIds +): def init_test(self): self.op_type = "lookup_table_v2" self.ids_shape = (3, 4, 5) class TestLookupTableBF16OpWithPadding(TestLookupTableV2BF16Op): - def test_check_output(self): ids = np.squeeze(self.inputs['Ids']) padding_idx = np.random.choice(ids, 1)[0] @@ -67,7 +67,6 @@ class TestLookupTableBF16OpWithPadding(TestLookupTableV2BF16Op): class TestLookupTableBF16OpIds4DPadding(TestLookupTableV2BF16OpIds4D): - def test_check_output(self): ids = self.inputs['Ids'] flatten_idx = ids.flatten() @@ -89,8 +88,9 @@ class TestEmbeddingLayerBF16ConstantInitializer(unittest.TestCase): self.op_type = "lookup_table_v2" self.ids_shape = [4] self.w_shape = [10, 64] - self.ids = np.random.randint(low=0, high=9, - size=self.ids_shape).astype("int64") + self.ids = np.random.randint(low=0, high=9, size=self.ids_shape).astype( + "int64" + ) self.flat_ids = self.ids.flatten() self.value = 3.0 self.w_fp32 = np.full(self.w_shape, self.value) @@ -101,18 +101,20 @@ class TestEmbeddingLayerBF16ConstantInitializer(unittest.TestCase): with fluid.program_guard(self.prog, self.startup_prog): x = fluid.layers.data(name='x', shape=self.ids_shape, dtype='int64') - self.emb = fluid.input.embedding(input=x, - size=self.w_shape, - param_attr=fluid.ParamAttr( - name="emb_weight", - initializer=self.initializer), - is_sparse=False, - dtype="uint16") # bfloat16 + self.emb = fluid.input.embedding( + input=x, + size=self.w_shape, + param_attr=fluid.ParamAttr( + name="emb_weight", initializer=self.initializer + ), + is_sparse=False, + dtype="uint16", + ) # bfloat16 exe = fluid.Executor(self.place) exe.run(self.startup_prog) - self.result = exe.run(self.prog, - feed={'x': self.ids}, - fetch_list=['emb_weight', self.emb]) + self.result = exe.run( + self.prog, feed={'x': self.ids}, fetch_list=['emb_weight', self.emb] + ) def test_embedding_weights(self): result = convert_uint16_to_float(self.result[0]) diff --git a/python/paddle/fluid/tests/unittests/test_lookup_table_v2_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_v2_op.py index 4919c9e8c00891d249ff85662f52225e8b3404d5..0f73c052bb51b449dd65c5dff10387f2d68ad7fb 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_table_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_table_v2_op.py @@ -24,7 +24,6 @@ from paddle.fluid import Program, program_guard class TestStaticGraphSupportMultipleInt(unittest.TestCase): - def test_main(self): dtypes = ['uint8', 'int8', 'int16', 'int32', 'int64'] if paddle.in_dynamic_mode(): @@ -33,8 +32,9 @@ class TestStaticGraphSupportMultipleInt(unittest.TestCase): else: disable_static = False for i, dtype in enumerate(dtypes): - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): x = paddle.static.data(name='x', shape=[-1, 7, 30], dtype=dtype) emb = paddle.nn.Embedding(10, 20) y = emb(x) @@ -44,7 +44,6 @@ class TestStaticGraphSupportMultipleInt(unittest.TestCase): class TestLookupTableOp(OpTest): - def setUp(self): self.op_type = "lookup_table_v2" self.python_api = paddle.nn.functional.embedding @@ -64,25 +63,21 @@ class TestLookupTableOp(OpTest): class TestLookupTableOpInt16(OpTest): - def id_dtype(self): return "int16" class TestLookupTableOpInt8(OpTest): - def id_dtype(self): return "int8" class TestLookupTableOpUInt8(OpTest): - def id_dtype(self): return "uint8" class TestLookupTableOpWithTensorIds(OpTest): - def setUp(self): self.op_type = "lookup_table_v2" table = np.random.random((17, 31)).astype("float64") @@ -100,9 +95,9 @@ class TestLookupTableOpWithTensorIds(OpTest): @skip_check_grad_ci( reason="Since paddings are not trainable and fixed in forward," "the gradient of paddings makes no sense and we don't " - "test the gradient here.") + "test the gradient here." +) class TestLookupTableOpWithPadding(TestLookupTableOp): - def test_check_output(self): ids = np.squeeze(self.inputs['Ids']) padding_idx = np.random.choice(ids, 1)[0] @@ -114,9 +109,9 @@ class TestLookupTableOpWithPadding(TestLookupTableOp): @skip_check_grad_ci( reason="Since paddings are not trainable and fixed in forward," "the gradient of paddings makes no sense and we don't " - "test the gradient here.") + "test the gradient here." +) class TestLookupTableOpWithTensorIdsAndPadding(TestLookupTableOpWithTensorIds): - def test_check_output(self): ids = self.inputs['Ids'] flatten_idx = ids.flatten() @@ -127,7 +122,6 @@ class TestLookupTableOpWithTensorIdsAndPadding(TestLookupTableOpWithTensorIds): class TestLookupTableWIsSelectedRows(unittest.TestCase): - def prepare_ids(self, scope, place): ids_tensor = scope.var('Ids').get_tensor() ids_array = np.array([0, 4, 3, 5]).astype("int32") @@ -180,13 +174,14 @@ class TestLookupTableWIsSelectedRows(unittest.TestCase): self.check_with_place(place) -class TestLookupTableWithTensorIdsWIsSelectedRows(TestLookupTableWIsSelectedRows - ): - +class TestLookupTableWithTensorIdsWIsSelectedRows( + TestLookupTableWIsSelectedRows +): def prepare_ids(self, scope, place): ids_tensor = scope.var('Ids').get_tensor() - ids_array = np.random.randint(low=0, high=6, - size=(2, 4, 3)).astype("int64") + ids_array = np.random.randint(low=0, high=6, size=(2, 4, 3)).astype( + "int64" + ) ids_tensor.set(ids_array, place) return ids_array @@ -196,7 +191,6 @@ class TestLookupTableWithTensorIdsWIsSelectedRows(TestLookupTableWIsSelectedRows class TestLookupTableIsSparse(unittest.TestCase): - def init_data(self): self.x_data = np.array([[1, 3, 0, 4, 7]]).astype("int64") self.y_data = np.array([[0.1, 0.3, 0, 0.4, 0.7]]).astype("float32") @@ -214,8 +208,11 @@ class TestLookupTableIsSparse(unittest.TestCase): name="emb_weight", learning_rate=10, initializer=fluid.initializer.NumpyArrayInitializer( - self.w_data)), - is_sparse=is_sparse) + self.w_data + ), + ), + is_sparse=is_sparse, + ) y = fluid.layers.reduce_sum(emb, dim=-1) loss = fluid.layers.square_error_cost(input=y, label=y_) @@ -227,12 +224,11 @@ class TestLookupTableIsSparse(unittest.TestCase): place = fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - ret = exe.run(feed={ - 'x': self.x_data, - 'y_': self.y_data - }, - fetch_list=['emb_weight'], - return_numpy=False) + ret = exe.run( + feed={'x': self.x_data, 'y_': self.y_data}, + fetch_list=['emb_weight'], + return_numpy=False, + ) return np.array(ret[0]) def test_w_grad(self): @@ -242,14 +238,12 @@ class TestLookupTableIsSparse(unittest.TestCase): self.check_grad(w_grad, w_grad_with_sparse) def check_grad(self, w_grad1, w_grad2, tolerance=1e-6): - np.testing.assert_allclose(w_grad1, - w_grad2, - rtol=tolerance, - atol=tolerance) + np.testing.assert_allclose( + w_grad1, w_grad2, rtol=tolerance, atol=tolerance + ) class TestLookupTableApi(unittest.TestCase): - def test_api(self): x = fluid.layers.data(name='x', shape=[20], dtype='int64') emb = fluid.embedding(input=x, size=[128, 64]) @@ -259,15 +253,16 @@ class TestLookupTableApi(unittest.TestCase): exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - ret = exe.run(feed={ - 'x': x_data, - }, - fetch_list=[emb], - return_numpy=False) + ret = exe.run( + feed={ + 'x': x_data, + }, + fetch_list=[emb], + return_numpy=False, + ) class TestEmbedOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): input_data = np.random.randint(0, 10, (4, 6)).astype("int64") diff --git a/python/paddle/fluid/tests/unittests/test_lr_scheduler.py b/python/paddle/fluid/tests/unittests/test_lr_scheduler.py index 2d76f141f9ff8ea2a66525a6734a5aec6e54298d..55d26bc50937907de54671a9bea2704b7652e099 100644 --- a/python/paddle/fluid/tests/unittests/test_lr_scheduler.py +++ b/python/paddle/fluid/tests/unittests/test_lr_scheduler.py @@ -21,9 +21,9 @@ import paddle.fluid as fluid import paddle.fluid.core as core -def reduce_lr_on_plateau(decay_rate, threshold, cooldown, patience, m, n, loss, - var_list): - +def reduce_lr_on_plateau( + decay_rate, threshold, cooldown, patience, m, n, loss, var_list +): def is_better(current, best, m, n): if m == 'min' and n == 'rel': return current < best - best * threshold @@ -53,7 +53,6 @@ def reduce_lr_on_plateau(decay_rate, threshold, cooldown, patience, m, n, loss, class TestReduceOnPlateauDecay(object): - def test_ReduceLR(self): # the decay rate must be less than 1.0 with self.assertRaises(ValueError): @@ -63,8 +62,9 @@ class TestReduceOnPlateauDecay(object): paddle.optimizer.lr.ReduceOnPlateau(learning_rate=1.0, mode="test") # the threshold_mode must be "rel" or "abs" with self.assertRaises(ValueError): - paddle.optimizer.lr.ReduceOnPlateau(learning_rate=1.0, - threshold_mode="test") + paddle.optimizer.lr.ReduceOnPlateau( + learning_rate=1.0, threshold_mode="test" + ) with self.assertRaises(TypeError): paddle.optimizer.lr.ReduceOnPlateau(learning_rate="test") with self.assertRaises(TypeError): @@ -75,8 +75,9 @@ class TestReduceOnPlateauDecay(object): places.append(paddle.CUDAPlace(0)) for place in places: - for m, n in zip(['min', 'max', 'min', 'max'], - ['rel', 'rel', 'abs', 'abs']): + for m, n in zip( + ['min', 'max', 'min', 'max'], ['rel', 'rel', 'abs', 'abs'] + ): kwargs = { 'learning_rate': 1.0, 'mode': m, @@ -107,10 +108,9 @@ class TestReduceOnPlateauDecay(object): main_prog = paddle.static.Program() start_prog = paddle.static.Program() with paddle.static.program_guard(main_prog, start_prog): - x = fluid.layers.create_global_var([1], - 1, - 'float32', - persistable=True) + x = fluid.layers.create_global_var( + [1], 1, 'float32', persistable=True + ) paddle.increment(x) loss = paddle.sin(x) scheduler = paddle.optimizer.lr.ReduceOnPlateau(**kwargs) @@ -124,12 +124,19 @@ class TestReduceOnPlateauDecay(object): for epoch in range(20): for batch_id in range(1): - out, actual_lr = exe.run(main_prog, - fetch_list=[loss.name, lr_var.name]) + out, actual_lr = exe.run( + main_prog, fetch_list=[loss.name, lr_var.name] + ) expected_lr = reduce_lr_on_plateau( - kwargs['factor'], kwargs['threshold'], kwargs['cooldown'], - kwargs['patience'], kwargs['mode'], - kwargs['threshold_mode'], out[0], var_list) + kwargs['factor'], + kwargs['threshold'], + kwargs['cooldown'], + kwargs['patience'], + kwargs['mode'], + kwargs['threshold_mode'], + out[0], + var_list, + ) scheduler.step(out[0]) actual_lr = scheduler() @@ -137,12 +144,19 @@ class TestReduceOnPlateauDecay(object): for epoch in range(10): for batch_id in range(1): - out, actual_lr = exe.run(test_prog, - fetch_list=[loss.name, lr_var.name]) + out, actual_lr = exe.run( + test_prog, fetch_list=[loss.name, lr_var.name] + ) expected_lr = reduce_lr_on_plateau( - kwargs['factor'], kwargs['threshold'], kwargs['cooldown'], - kwargs['patience'], kwargs['mode'], - kwargs['threshold_mode'], out[0], var_list) + kwargs['factor'], + kwargs['threshold'], + kwargs['cooldown'], + kwargs['patience'], + kwargs['mode'], + kwargs['threshold_mode'], + out[0], + var_list, + ) scheduler.step(out[0]) actual_lr = scheduler() self.assertEqual(actual_lr, np.array(expected_lr)) @@ -158,8 +172,9 @@ class TestReduceOnPlateauDecay(object): linear = paddle.nn.Linear(10, 10) scheduler = paddle.optimizer.lr.ReduceOnPlateau(**kwargs) - adam = paddle.optimizer.Adam(learning_rate=scheduler, - parameters=linear.parameters()) + adam = paddle.optimizer.Adam( + learning_rate=scheduler, parameters=linear.parameters() + ) for epoch in range(20): for batch_id in range(1): @@ -174,17 +189,25 @@ class TestReduceOnPlateauDecay(object): current_lr = adam.get_lr() # get lr form python expected_lr = reduce_lr_on_plateau( - kwargs['factor'], kwargs['threshold'], kwargs['cooldown'], - kwargs['patience'], kwargs['mode'], kwargs['threshold_mode'], - loss, var_list) + kwargs['factor'], + kwargs['threshold'], + kwargs['cooldown'], + kwargs['patience'], + kwargs['mode'], + kwargs['threshold_mode'], + loss, + var_list, + ) self.assertEqual(current_lr, expected_lr) state_dict = adam.state_dict() scheduler1 = paddle.optimizer.lr.ReduceOnPlateau(**kwargs) - adam1 = paddle.optimizer.Adam(learning_rate=scheduler1, - parameters=linear.parameters()) + adam1 = paddle.optimizer.Adam( + learning_rate=scheduler1, parameters=linear.parameters() + ) adam1.set_state_dict(state_dict) - self.assertEqual(scheduler.cooldown_counter, - scheduler1.cooldown_counter) + self.assertEqual( + scheduler.cooldown_counter, scheduler1.cooldown_counter + ) self.assertEqual(scheduler.best.numpy()[0], scheduler1.best) self.assertEqual(scheduler.num_bad_epochs, scheduler1.num_bad_epochs) self.assertEqual(scheduler.last_epoch, scheduler1.last_epoch) @@ -231,13 +254,15 @@ def inverse_time_lr(epoch_num, learning_rate, gamma, verbose=False): return learning_rate / (1 + gamma * epoch_num) -def polynomial_lr(epoch_num, - learning_rate, - decay_steps, - end_lr=0.0001, - power=1.0, - cycle=False, - verbose=False): +def polynomial_lr( + epoch_num, + learning_rate, + decay_steps, + end_lr=0.0001, + power=1.0, + cycle=False, + verbose=False, +): if cycle: div = math.ceil(epoch_num / float(decay_steps)) @@ -247,53 +272,58 @@ def polynomial_lr(epoch_num, else: epoch_num = min(epoch_num, decay_steps) return (learning_rate - end_lr) * ( - (1 - float(epoch_num) / float(decay_steps))**power) + end_lr + (1 - float(epoch_num) / float(decay_steps)) ** power + ) + end_lr def get_lr(self): if self.last_epoch == 0: return self.base_lr elif (self.last_epoch - 1 - self.T_max) % (2 * self.T_max) == 0: - return self.last_lr + (self.base_lr - self.eta_min) * ( - 1 - math.cos(math.pi / self.T_max)) / 2 + return ( + self.last_lr + + (self.base_lr - self.eta_min) + * (1 - math.cos(math.pi / self.T_max)) + / 2 + ) return (1 + math.cos(math.pi * self.last_epoch / self.T_max)) / ( - 1 + math.cos(math.pi * (self.last_epoch - 1) / self.T_max)) * ( - self.last_lr - self.eta_min) + self.eta_min + 1 + math.cos(math.pi * (self.last_epoch - 1) / self.T_max) + ) * (self.last_lr - self.eta_min) + self.eta_min cosine_annealing_lr_current = None -def cosine_annealing_lr(epoch_num, - learning_rate, - T_max, - eta_min=0, - verbose=False): +def cosine_annealing_lr( + epoch_num, learning_rate, T_max, eta_min=0, verbose=False +): global cosine_annealing_lr_current if epoch_num == 0: cosine_annealing_lr_current = learning_rate elif (epoch_num - 1 - T_max) % (2 * T_max) == 0: - cosine_annealing_lr_current = cosine_annealing_lr_current + ( - learning_rate - eta_min) * (1 - - math.cos(math.pi / float(T_max))) / 2 + cosine_annealing_lr_current = ( + cosine_annealing_lr_current + + (learning_rate - eta_min) + * (1 - math.cos(math.pi / float(T_max))) + / 2 + ) else: cosine_annealing_lr_current = ( - 1 + math.cos(math.pi * epoch_num / float(T_max))) / ( - 1 + math.cos(math.pi * (epoch_num - 1) / float(T_max))) * ( - cosine_annealing_lr_current - eta_min) + eta_min + 1 + math.cos(math.pi * epoch_num / float(T_max)) + ) / (1 + math.cos(math.pi * (epoch_num - 1) / float(T_max))) * ( + cosine_annealing_lr_current - eta_min + ) + eta_min return cosine_annealing_lr_current -def linear_warmup_lr(epoch_num, - learning_rate, - warmup_steps, - start_lr, - end_lr, - verbose=False): +def linear_warmup_lr( + epoch_num, learning_rate, warmup_steps, start_lr, end_lr, verbose=False +): tmp = epoch_num - warmup_steps if tmp < 0: - return start_lr + (end_lr - start_lr) * (float(epoch_num) / - float(warmup_steps)) + return start_lr + (end_lr - start_lr) * ( + float(epoch_num) / float(warmup_steps) + ) elif paddle.in_dynamic_mode(): if tmp < 3: return 0.5 @@ -305,35 +335,36 @@ def linear_warmup_lr(epoch_num, return 0.5 -def multi_step_lr(epoch_num, - learning_rate, - milestones, - gamma=0.1, - verbose=False): +def multi_step_lr( + epoch_num, learning_rate, milestones, gamma=0.1, verbose=False +): for i in range(len(milestones)): if epoch_num < milestones[i]: return learning_rate * (gamma**i) - return learning_rate * (gamma**len(milestones)) + return learning_rate * (gamma ** len(milestones)) def step_lr(epoch_num, learning_rate, step_size, gamma=0.1, verbose=False): return learning_rate * math.pow(gamma, epoch_num // step_size) -def one_cycle_lr(epoch_num, - max_learning_rate, - total_steps, - divide_factor=25, - end_learning_rate=0.0001, - phase_pct=0.3, - anneal_strategy='cos', - three_phase=False, - verbose=False): +def one_cycle_lr( + epoch_num, + max_learning_rate, + total_steps, + divide_factor=25, + end_learning_rate=0.0001, + phase_pct=0.3, + anneal_strategy='cos', + three_phase=False, + verbose=False, +): initial_lr = max_learning_rate / divide_factor if three_phase: _end_steps = [ float(phase_pct * total_steps) - 1, - float(2 * phase_pct * total_steps) - 2, total_steps - 1 + float(2 * phase_pct * total_steps) - 2, + total_steps - 1, ] _schedule_phases = [ { @@ -367,6 +398,7 @@ def one_cycle_lr(epoch_num, def anneal_func(start, end, pct): cos_out = math.cos(math.pi * pct) + 1 return end + (start - end) / 2.0 * cos_out + else: def anneal_func(start, end, pct): @@ -384,24 +416,26 @@ def one_cycle_lr(epoch_num, return computed_lr -def cyclic_lr(epoch_num, - base_learning_rate, - max_learning_rate, - step_size_up, - step_size_down, - mode, - exp_gamma=0.1, - scale_fn=None, - scale_mode='cycle', - verbose=False): +def cyclic_lr( + epoch_num, + base_learning_rate, + max_learning_rate, + step_size_up, + step_size_down, + mode, + exp_gamma=0.1, + scale_fn=None, + scale_mode='cycle', + verbose=False, +): total_steps = step_size_up + step_size_down step_ratio = step_size_up / total_steps def triangular(x): - return 1. + return 1.0 def triangular2(x): - return 1 / (2.**(x - 1)) + return 1 / (2.0 ** (x - 1)) def exp_range(x): return exp_gamma**x @@ -419,7 +453,7 @@ def cyclic_lr(epoch_num, cycle = math.floor(1 + epoch_num / total_steps) iterations = epoch_num - x = 1. + epoch_num / total_steps - cycle + x = 1.0 + epoch_num / total_steps - cycle if x <= step_ratio: scale_factor = x / step_ratio @@ -432,7 +466,6 @@ def cyclic_lr(epoch_num, class TestLRScheduler(unittest.TestCase): - def _test_static(self, python_func, paddle_api, kwarg, place): scheduler = paddle_api(**kwarg) adam = paddle.optimizer.Adam(learning_rate=scheduler) @@ -456,7 +489,8 @@ class TestLRScheduler(unittest.TestCase): out = exe.run( main_prog, feed={'x': np.random.randn(3, 4, 5).astype('float32')}, - fetch_list=lr_var.name) + fetch_list=lr_var.name, + ) self.assertEqual(out, np.array(python_func(num, **kwarg))) scheduler.step() num += 1 @@ -466,22 +500,26 @@ class TestLRScheduler(unittest.TestCase): out = exe.run( test_prog, feed={'x': np.random.randn(3, 4, 5).astype('float32')}, - fetch_list=lr_var.name) + fetch_list=lr_var.name, + ) self.assertEqual(out, np.array(python_func(num, **kwarg))) scheduler.step() num += 1 if isinstance(place, paddle.CPUPlace): compiled_train_prog = paddle.static.CompiledProgram( - main_prog).with_data_parallel(loss_name=loss.name, - places=fluid.cpu_places(4)) + main_prog + ).with_data_parallel( + loss_name=loss.name, places=fluid.cpu_places(4) + ) for epoch in range(5): python_result = python_func(num, **kwarg) for batch_id in range(2): _ = exe.run( compiled_train_prog, feed={'x': np.random.randn(12, 4, 5).astype('float32')}, - fetch_list=lr_var.name) + fetch_list=lr_var.name, + ) scopes = compiled_train_prog._executor.local_scopes() out = np.array(scopes[0].var(lr_var.name).get_tensor()) self.assertEqual(out, np.array(python_result)) @@ -495,17 +533,20 @@ class TestLRScheduler(unittest.TestCase): num += 1 compiled_test_prog = paddle.static.CompiledProgram( - test_prog).with_data_parallel( - loss_name=loss.name, - share_vars_from=compiled_train_prog, - places=fluid.cpu_places(4)) + test_prog + ).with_data_parallel( + loss_name=loss.name, + share_vars_from=compiled_train_prog, + places=fluid.cpu_places(4), + ) for epoch in range(5): python_result = python_func(num, **kwarg) for batch_id in range(2): _ = exe.run( compiled_test_prog, feed={'x': np.random.randn(12, 4, 5).astype('float32')}, - fetch_list=lr_var.name) + fetch_list=lr_var.name, + ) scopes = compiled_test_prog._executor.local_scopes() out = np.array(scopes[0].var(lr_var.name).get_tensor()) self.assertEqual(out, np.array(python_result)) @@ -524,10 +565,12 @@ class TestLRScheduler(unittest.TestCase): linear = paddle.nn.Linear(10, 10) if paddle_api.__name__ == "LinearWarmup": kwarg['learning_rate'] = paddle.optimizer.lr.PiecewiseDecay( - [3, 6], [0.5, 0.2, 0.1]) + [3, 6], [0.5, 0.2, 0.1] + ) scheduler = paddle_api(**kwarg) - adam = paddle.optimizer.Adam(learning_rate=scheduler, - parameters=linear.parameters()) + adam = paddle.optimizer.Adam( + learning_rate=scheduler, parameters=linear.parameters() + ) for epoch in range(20): for batch_id in range(2): x = paddle.to_tensor(x) @@ -545,15 +588,20 @@ class TestLRScheduler(unittest.TestCase): self.assertAlmostEqual(current_lr, expected_lr) state_dict = adam.state_dict() scheduler1 = paddle.optimizer.lr.LinearWarmup(**kwarg) - adam1 = paddle.optimizer.Adam(learning_rate=scheduler1, - parameters=linear.parameters()) + adam1 = paddle.optimizer.Adam( + learning_rate=scheduler1, parameters=linear.parameters() + ) adam1.set_state_dict(state_dict) self.assertEqual(scheduler.last_epoch, scheduler1.last_epoch) self.assertEqual(scheduler.last_lr, scheduler1.last_lr) - self.assertEqual(scheduler.learning_rate.last_lr, - scheduler1.learning_rate.last_lr) - self.assertEqual(scheduler.learning_rate.last_epoch, - scheduler1.learning_rate.last_epoch) + self.assertEqual( + scheduler.learning_rate.last_lr, + scheduler1.learning_rate.last_lr, + ) + self.assertEqual( + scheduler.learning_rate.last_epoch, + scheduler1.learning_rate.last_epoch, + ) scheduler.step() else: self.assertEqual(current_lr, expected_lr) @@ -563,266 +611,355 @@ class TestLRScheduler(unittest.TestCase): with self.assertRaises(NotImplementedError): paddle.optimizer.lr.LRScheduler().step() with self.assertRaises(TypeError): - paddle.optimizer.lr.MultiStepDecay(learning_rate="test", - milestones=[1, 2, 3]) + paddle.optimizer.lr.MultiStepDecay( + learning_rate="test", milestones=[1, 2, 3] + ) with self.assertRaises(TypeError): - paddle.optimizer.lr.MultiStepDecay(learning_rate=0.5, - milestones='test') + paddle.optimizer.lr.MultiStepDecay( + learning_rate=0.5, milestones='test' + ) with self.assertRaises(ValueError): - paddle.optimizer.lr.MultiStepDecay(learning_rate=0.5, - milestones=[3, 2, 1]) + paddle.optimizer.lr.MultiStepDecay( + learning_rate=0.5, milestones=[3, 2, 1] + ) with self.assertRaises(ValueError): - paddle.optimizer.lr.MultiStepDecay(learning_rate=0.5, - milestones=[1, 2, 3], - gamma=2) + paddle.optimizer.lr.MultiStepDecay( + learning_rate=0.5, milestones=[1, 2, 3], gamma=2 + ) # check type of max_learning_rate with self.assertRaises(TypeError): - paddle.optimizer.lr.OneCycleLR(max_learning_rate='test', - total_steps=20) + paddle.optimizer.lr.OneCycleLR( + max_learning_rate='test', total_steps=20 + ) # check value of max_learning_rate with self.assertRaises(ValueError): - paddle.optimizer.lr.OneCycleLR(max_learning_rate=-1.5, - total_steps=20) + paddle.optimizer.lr.OneCycleLR( + max_learning_rate=-1.5, total_steps=20 + ) # check type of end_learning_rate with self.assertRaises(TypeError): - paddle.optimizer.lr.OneCycleLR(max_learning_rate=0.1, - total_steps=20, - end_learning_rate='test') + paddle.optimizer.lr.OneCycleLR( + max_learning_rate=0.1, total_steps=20, end_learning_rate='test' + ) # check value of end_learning_rate with self.assertRaises(ValueError): - paddle.optimizer.lr.OneCycleLR(max_learning_rate=0.1, - total_steps=20, - end_learning_rate=-1) + paddle.optimizer.lr.OneCycleLR( + max_learning_rate=0.1, total_steps=20, end_learning_rate=-1 + ) # check type of total_steps with self.assertRaises(TypeError): - paddle.optimizer.lr.OneCycleLR(max_learning_rate=0.1, - total_steps='test') + paddle.optimizer.lr.OneCycleLR( + max_learning_rate=0.1, total_steps='test' + ) # check value of total_steps with self.assertRaises(ValueError): - paddle.optimizer.lr.OneCycleLR(max_learning_rate=0.1, - total_steps=-10) + paddle.optimizer.lr.OneCycleLR( + max_learning_rate=0.1, total_steps=-10 + ) # check value of anneal_strategy with self.assertRaises(ValueError): - paddle.optimizer.lr.OneCycleLR(max_learning_rate=0.1, - total_steps=20, - anneal_strategy='test') + paddle.optimizer.lr.OneCycleLR( + max_learning_rate=0.1, total_steps=20, anneal_strategy='test' + ) # check value of phase_pct when three_phase is True with self.assertRaises(ValueError): - paddle.optimizer.lr.OneCycleLR(max_learning_rate=0.1, - total_steps=20, - phase_pct=0.6, - three_phase=True) + paddle.optimizer.lr.OneCycleLR( + max_learning_rate=0.1, + total_steps=20, + phase_pct=0.6, + three_phase=True, + ) # check type of max_learning_rate with self.assertRaises(TypeError): - paddle.optimizer.lr.CyclicLR(base_learning_rate=0.5, - max_learning_rate='test', - step_size_up=10) + paddle.optimizer.lr.CyclicLR( + base_learning_rate=0.5, + max_learning_rate='test', + step_size_up=10, + ) # check value of max_learning_rate with self.assertRaises(ValueError): - paddle.optimizer.lr.CyclicLR(base_learning_rate=0.5, - max_learning_rate=-1, - step_size_up=10) + paddle.optimizer.lr.CyclicLR( + base_learning_rate=0.5, max_learning_rate=-1, step_size_up=10 + ) # check type of step_size_up with self.assertRaises(TypeError): - paddle.optimizer.lr.CyclicLR(base_learning_rate=0.5, - max_learning_rate=1.0, - step_size_up='test') + paddle.optimizer.lr.CyclicLR( + base_learning_rate=0.5, + max_learning_rate=1.0, + step_size_up='test', + ) # check value of step_size_up with self.assertRaises(ValueError): - paddle.optimizer.lr.CyclicLR(base_learning_rate=0.5, - max_learning_rate=1.0, - step_size_up=-1) + paddle.optimizer.lr.CyclicLR( + base_learning_rate=0.5, max_learning_rate=1.0, step_size_up=-1 + ) # check type of step_size_down with self.assertRaises(TypeError): - paddle.optimizer.lr.CyclicLR(base_learning_rate=0.5, - max_learning_rate=1.0, - step_size_up=500, - step_size_down='test') + paddle.optimizer.lr.CyclicLR( + base_learning_rate=0.5, + max_learning_rate=1.0, + step_size_up=500, + step_size_down='test', + ) # check type of step_size_down with self.assertRaises(ValueError): - paddle.optimizer.lr.CyclicLR(base_learning_rate=0.5, - max_learning_rate=1.0, - step_size_up=500, - step_size_down=-1) + paddle.optimizer.lr.CyclicLR( + base_learning_rate=0.5, + max_learning_rate=1.0, + step_size_up=500, + step_size_down=-1, + ) # check value of mode with self.assertRaises(ValueError): - paddle.optimizer.lr.CyclicLR(base_learning_rate=0.5, - max_learning_rate=1.0, - step_size_up=500, - step_size_down=500, - mode='test') + paddle.optimizer.lr.CyclicLR( + base_learning_rate=0.5, + max_learning_rate=1.0, + step_size_up=500, + step_size_down=500, + mode='test', + ) # check type value of scale_mode with self.assertRaises(ValueError): - paddle.optimizer.lr.CyclicLR(base_learning_rate=0.5, - max_learning_rate=1.0, - step_size_up=500, - step_size_down=-1, - scale_mode='test') + paddle.optimizer.lr.CyclicLR( + base_learning_rate=0.5, + max_learning_rate=1.0, + step_size_up=500, + step_size_down=-1, + scale_mode='test', + ) func_api_kwargs = [ - (noam_lr, paddle.optimizer.lr.NoamDecay, { - "d_model": 0.01, - "warmup_steps": 100, - "verbose": False - }), - (piecewise_lr, paddle.optimizer.lr.PiecewiseDecay, { - "boundaries": [3, 6, 9, 15, 20], - "values": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6], - "verbose": False - }), - (natural_exp_lr, paddle.optimizer.lr.NaturalExpDecay, { - "learning_rate": 0.5, - "gamma": 0.1, - "verbose": True - }), - (inverse_time_lr, paddle.optimizer.lr.InverseTimeDecay, { - "learning_rate": 0.5, - "gamma": 0.1, - "verbose": False - }), - (polynomial_lr, paddle.optimizer.lr.PolynomialDecay, { - "learning_rate": 0.5, - "decay_steps": 20, - "end_lr": 0, - "power": 1.0, - "cycle": False - }), - (polynomial_lr, paddle.optimizer.lr.PolynomialDecay, { - "learning_rate": 0.5, - "decay_steps": 20, - "end_lr": 0, - "power": 1.0, - "cycle": True, - "verbose": False - }), - (linear_warmup_lr, paddle.optimizer.lr.LinearWarmup, { - 'learning_rate': 0.5, - 'warmup_steps': 10, - 'start_lr': 0, - 'end_lr': 0.5 - }), - (exponential_lr, paddle.optimizer.lr.ExponentialDecay, { - "learning_rate": 0.5, - "gamma": 0.9, - "verbose": False - }), - (multi_step_lr, paddle.optimizer.lr.MultiStepDecay, { - "learning_rate": 0.5, - "milestones": [3, 6, 9, 15, 20], - "gamma": 0.8 - }), - (step_lr, paddle.optimizer.lr.StepDecay, { - "learning_rate": 0.5, - "step_size": 2, - "gamma": 0.8, - "verbose": False - }), - (lambda_lr, paddle.optimizer.lr.LambdaDecay, { - "learning_rate": 0.5, - "lr_lambda": lambda x: 0.95**x, - "verbose": True - }), - (multiplicative_lr, paddle.optimizer.lr.MultiplicativeDecay, { - "learning_rate": 0.5, - "lr_lambda": lambda x: 0.95, - "verbose": True - }), - (cosine_annealing_lr, paddle.optimizer.lr.CosineAnnealingDecay, { - "learning_rate": 0.5, - "T_max": 10, - "verbose": False - }), - (one_cycle_lr, paddle.optimizer.lr.OneCycleLR, { - "max_learning_rate": 0.1, - "total_steps": 20, - "divide_factor": 5, - "end_learning_rate": 0.0001, - "anneal_strategy": 'cos', - "phase_pct": 0.3, - "three_phase": False, - }), - (one_cycle_lr, paddle.optimizer.lr.OneCycleLR, { - "max_learning_rate": 0.5, - "total_steps": 20, - "divide_factor": 10, - "end_learning_rate": 0.001, - "anneal_strategy": 'linear', - "phase_pct": 0.4, - "three_phase": False, - }), - (one_cycle_lr, paddle.optimizer.lr.OneCycleLR, { - "max_learning_rate": 1.0, - "total_steps": 20, - "divide_factor": 9, - "end_learning_rate": 0.0001, - "anneal_strategy": 'cos', - "phase_pct": 0.3, - "three_phase": True, - }), - (one_cycle_lr, paddle.optimizer.lr.OneCycleLR, { - "max_learning_rate": 0.3, - "total_steps": 20, - "divide_factor": 25, - "end_learning_rate": 0.0005, - "anneal_strategy": 'linear', - "phase_pct": 0.2, - "three_phase": True, - }), - (cyclic_lr, paddle.optimizer.lr.CyclicLR, { - "base_learning_rate": 0.5, - "max_learning_rate": 1.0, - "step_size_up": 15, - "step_size_down": 5, - "mode": 'triangular', - "exp_gamma": 1., - "scale_fn": None, - "scale_mode": 'cycle', - "verbose": False - }), - (cyclic_lr, paddle.optimizer.lr.CyclicLR, { - "base_learning_rate": 0.5, - "max_learning_rate": 1.0, - "step_size_up": 15, - "step_size_down": 5, - "mode": 'triangular2', - "exp_gamma": 1., - "scale_fn": None, - "scale_mode": 'cycle', - "verbose": False - }), - (cyclic_lr, paddle.optimizer.lr.CyclicLR, { - "base_learning_rate": 0.5, - "max_learning_rate": 1.0, - "step_size_up": 15, - "step_size_down": 5, - "mode": 'exp_range', - "exp_gamma": 0.8, - "scale_fn": None, - "scale_mode": 'cycle', - "verbose": False - }), - (cyclic_lr, paddle.optimizer.lr.CyclicLR, { - "base_learning_rate": 0.5, - "max_learning_rate": 1.0, - "step_size_up": 15, - "step_size_down": 5, - "mode": 'exp_range', - "exp_gamma": 1., - "scale_fn": lambda x: 0.95**x, - "scale_mode": 'cycle', - "verbose": False - }), - (cyclic_lr, paddle.optimizer.lr.CyclicLR, { - "base_learning_rate": 0.5, - "max_learning_rate": 1.0, - "step_size_up": 15, - "step_size_down": 5, - "mode": 'exp_range', - "exp_gamma": 1., - "scale_fn": lambda x: 0.95, - "scale_mode": 'iterations', - "verbose": False - }) + ( + noam_lr, + paddle.optimizer.lr.NoamDecay, + {"d_model": 0.01, "warmup_steps": 100, "verbose": False}, + ), + ( + piecewise_lr, + paddle.optimizer.lr.PiecewiseDecay, + { + "boundaries": [3, 6, 9, 15, 20], + "values": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6], + "verbose": False, + }, + ), + ( + natural_exp_lr, + paddle.optimizer.lr.NaturalExpDecay, + {"learning_rate": 0.5, "gamma": 0.1, "verbose": True}, + ), + ( + inverse_time_lr, + paddle.optimizer.lr.InverseTimeDecay, + {"learning_rate": 0.5, "gamma": 0.1, "verbose": False}, + ), + ( + polynomial_lr, + paddle.optimizer.lr.PolynomialDecay, + { + "learning_rate": 0.5, + "decay_steps": 20, + "end_lr": 0, + "power": 1.0, + "cycle": False, + }, + ), + ( + polynomial_lr, + paddle.optimizer.lr.PolynomialDecay, + { + "learning_rate": 0.5, + "decay_steps": 20, + "end_lr": 0, + "power": 1.0, + "cycle": True, + "verbose": False, + }, + ), + ( + linear_warmup_lr, + paddle.optimizer.lr.LinearWarmup, + { + 'learning_rate': 0.5, + 'warmup_steps': 10, + 'start_lr': 0, + 'end_lr': 0.5, + }, + ), + ( + exponential_lr, + paddle.optimizer.lr.ExponentialDecay, + {"learning_rate": 0.5, "gamma": 0.9, "verbose": False}, + ), + ( + multi_step_lr, + paddle.optimizer.lr.MultiStepDecay, + { + "learning_rate": 0.5, + "milestones": [3, 6, 9, 15, 20], + "gamma": 0.8, + }, + ), + ( + step_lr, + paddle.optimizer.lr.StepDecay, + { + "learning_rate": 0.5, + "step_size": 2, + "gamma": 0.8, + "verbose": False, + }, + ), + ( + lambda_lr, + paddle.optimizer.lr.LambdaDecay, + { + "learning_rate": 0.5, + "lr_lambda": lambda x: 0.95**x, + "verbose": True, + }, + ), + ( + multiplicative_lr, + paddle.optimizer.lr.MultiplicativeDecay, + { + "learning_rate": 0.5, + "lr_lambda": lambda x: 0.95, + "verbose": True, + }, + ), + ( + cosine_annealing_lr, + paddle.optimizer.lr.CosineAnnealingDecay, + {"learning_rate": 0.5, "T_max": 10, "verbose": False}, + ), + ( + one_cycle_lr, + paddle.optimizer.lr.OneCycleLR, + { + "max_learning_rate": 0.1, + "total_steps": 20, + "divide_factor": 5, + "end_learning_rate": 0.0001, + "anneal_strategy": 'cos', + "phase_pct": 0.3, + "three_phase": False, + }, + ), + ( + one_cycle_lr, + paddle.optimizer.lr.OneCycleLR, + { + "max_learning_rate": 0.5, + "total_steps": 20, + "divide_factor": 10, + "end_learning_rate": 0.001, + "anneal_strategy": 'linear', + "phase_pct": 0.4, + "three_phase": False, + }, + ), + ( + one_cycle_lr, + paddle.optimizer.lr.OneCycleLR, + { + "max_learning_rate": 1.0, + "total_steps": 20, + "divide_factor": 9, + "end_learning_rate": 0.0001, + "anneal_strategy": 'cos', + "phase_pct": 0.3, + "three_phase": True, + }, + ), + ( + one_cycle_lr, + paddle.optimizer.lr.OneCycleLR, + { + "max_learning_rate": 0.3, + "total_steps": 20, + "divide_factor": 25, + "end_learning_rate": 0.0005, + "anneal_strategy": 'linear', + "phase_pct": 0.2, + "three_phase": True, + }, + ), + ( + cyclic_lr, + paddle.optimizer.lr.CyclicLR, + { + "base_learning_rate": 0.5, + "max_learning_rate": 1.0, + "step_size_up": 15, + "step_size_down": 5, + "mode": 'triangular', + "exp_gamma": 1.0, + "scale_fn": None, + "scale_mode": 'cycle', + "verbose": False, + }, + ), + ( + cyclic_lr, + paddle.optimizer.lr.CyclicLR, + { + "base_learning_rate": 0.5, + "max_learning_rate": 1.0, + "step_size_up": 15, + "step_size_down": 5, + "mode": 'triangular2', + "exp_gamma": 1.0, + "scale_fn": None, + "scale_mode": 'cycle', + "verbose": False, + }, + ), + ( + cyclic_lr, + paddle.optimizer.lr.CyclicLR, + { + "base_learning_rate": 0.5, + "max_learning_rate": 1.0, + "step_size_up": 15, + "step_size_down": 5, + "mode": 'exp_range', + "exp_gamma": 0.8, + "scale_fn": None, + "scale_mode": 'cycle', + "verbose": False, + }, + ), + ( + cyclic_lr, + paddle.optimizer.lr.CyclicLR, + { + "base_learning_rate": 0.5, + "max_learning_rate": 1.0, + "step_size_up": 15, + "step_size_down": 5, + "mode": 'exp_range', + "exp_gamma": 1.0, + "scale_fn": lambda x: 0.95**x, + "scale_mode": 'cycle', + "verbose": False, + }, + ), + ( + cyclic_lr, + paddle.optimizer.lr.CyclicLR, + { + "base_learning_rate": 0.5, + "max_learning_rate": 1.0, + "step_size_up": 15, + "step_size_down": 5, + "mode": 'exp_range', + "exp_gamma": 1.0, + "scale_fn": lambda x: 0.95, + "scale_mode": 'iterations', + "verbose": False, + }, + ), ] for python_func, paddle_api, kwarg in func_api_kwargs: @@ -838,14 +975,17 @@ class TestLRScheduler(unittest.TestCase): paddle.enable_static() def test_linear_warmp(self): - natural_lr = paddle.optimizer.lr.NaturalExpDecay(learning_rate=0.5, - gamma=0.1) + natural_lr = paddle.optimizer.lr.NaturalExpDecay( + learning_rate=0.5, gamma=0.1 + ) natural_lr_warmup = paddle.optimizer.lr.LinearWarmup( - learning_rate=natural_lr, warmup_steps=10, start_lr=0.0, end_lr=0.1) + learning_rate=natural_lr, warmup_steps=10, start_lr=0.0, end_lr=0.1 + ) for idx in range(30): if idx >= 10: - self.assertEqual(natural_lr_warmup.get_lr(), - natural_lr.get_lr()) + self.assertEqual( + natural_lr_warmup.get_lr(), natural_lr.get_lr() + ) natural_lr.step() natural_lr_warmup.step() diff --git a/python/paddle/fluid/tests/unittests/test_lrn_op.py b/python/paddle/fluid/tests/unittests/test_lrn_op.py index c87d3067cc4fde6e60725e9a2c317392a5462665..60446284600006cabaedcab6770b0932125a43cc 100644 --- a/python/paddle/fluid/tests/unittests/test_lrn_op.py +++ b/python/paddle/fluid/tests/unittests/test_lrn_op.py @@ -22,7 +22,6 @@ from paddle.fluid import Program, program_guard class TestLRNOp(OpTest): - def get_input(self): r''' TODO(gongweibao): why it's grad diff is so large? x = np.ndarray( @@ -64,7 +63,7 @@ class TestLRNOp(OpTest): 'k': self.k, 'alpha': self.alpha, 'beta': self.beta, - 'data_format': self.data_format + 'data_format': self.data_format, } return attrs @@ -103,13 +102,11 @@ class TestLRNOp(OpTest): class TestLRNOpAttrDataFormat(TestLRNOp): - def init_test_case(self): self.data_format = 'NHWC' class TestLRNAPI(unittest.TestCase): - def test_case(self): data1 = fluid.data(name='data1', shape=[2, 4, 5, 5], dtype='float32') data2 = fluid.data(name='data2', shape=[2, 5, 5, 4], dtype='float32') @@ -124,23 +121,22 @@ class TestLRNAPI(unittest.TestCase): place = core.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - results = exe.run(fluid.default_main_program(), - feed={ - "data1": data1_np, - "data2": data2_np - }, - fetch_list=[out1, out2], - return_numpy=True) - - np.testing.assert_allclose(results[0], - np.transpose(results[1], (0, 3, 1, 2)), - rtol=1e-05) + results = exe.run( + fluid.default_main_program(), + feed={"data1": data1_np, "data2": data2_np}, + fetch_list=[out1, out2], + return_numpy=True, + ) + + np.testing.assert_allclose( + results[0], np.transpose(results[1], (0, 3, 1, 2)), rtol=1e-05 + ) def test_exception(self): input1 = fluid.data(name="input1", shape=[2, 4, 5, 5], dtype="float32") - input2 = fluid.data(name="input2", - shape=[2, 4, 5, 5, 5], - dtype="float32") + input2 = fluid.data( + name="input2", shape=[2, 4, 5, 5, 5], dtype="float32" + ) def _attr_data_fromat(): out = fluid.layers.lrn(input1, data_format='NDHW') @@ -153,7 +149,6 @@ class TestLRNAPI(unittest.TestCase): class TestLRNOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # the input must be float32 @@ -162,7 +157,6 @@ class TestLRNOpError(unittest.TestCase): class TestLocalResponseNormFAPI(unittest.TestCase): - def setUp(self): np.random.seed(123) self.places = [fluid.CPUPlace()] @@ -174,84 +168,81 @@ class TestLocalResponseNormFAPI(unittest.TestCase): in_np1 = np.random.random([3, 40, 40]).astype("float32") in_np2 = np.transpose(in_np1, (0, 2, 1)) - input1 = fluid.data(name="input1", - shape=[3, 40, 40], - dtype="float32") - input2 = fluid.data(name="input2", - shape=[3, 40, 40], - dtype="float32") - res1 = paddle.nn.functional.local_response_norm(x=input1, - size=5, - data_format='NCL') - res2 = paddle.nn.functional.local_response_norm(x=input2, - size=5, - data_format='NLC') + input1 = fluid.data( + name="input1", shape=[3, 40, 40], dtype="float32" + ) + input2 = fluid.data( + name="input2", shape=[3, 40, 40], dtype="float32" + ) + res1 = paddle.nn.functional.local_response_norm( + x=input1, size=5, data_format='NCL' + ) + res2 = paddle.nn.functional.local_response_norm( + x=input2, size=5, data_format='NLC' + ) exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={ - "input1": in_np1, - "input2": in_np2 - }, - fetch_list=[res1, res2]) + fetches = exe.run( + fluid.default_main_program(), + feed={"input1": in_np1, "input2": in_np2}, + fetch_list=[res1, res2], + ) fetches1_tran = np.transpose(fetches[1], (0, 2, 1)) np.testing.assert_allclose(fetches[0], fetches1_tran, rtol=1e-05) def check_static_4d_input(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input1 = fluid.data(name="input1", - shape=[3, 3, 40, 40], - dtype="float32") - input2 = fluid.data(name="input2", - shape=[3, 40, 40, 3], - dtype="float32") - - res1 = paddle.nn.functional.local_response_norm(x=input1, - size=5, - data_format='NCHW') - res2 = paddle.nn.functional.local_response_norm(x=input2, - size=5, - data_format='NHWC') + input1 = fluid.data( + name="input1", shape=[3, 3, 40, 40], dtype="float32" + ) + input2 = fluid.data( + name="input2", shape=[3, 40, 40, 3], dtype="float32" + ) + + res1 = paddle.nn.functional.local_response_norm( + x=input1, size=5, data_format='NCHW' + ) + res2 = paddle.nn.functional.local_response_norm( + x=input2, size=5, data_format='NHWC' + ) in_np1 = np.random.random([3, 3, 40, 40]).astype("float32") in_np2 = np.transpose(in_np1, (0, 2, 3, 1)) exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={ - "input1": in_np1, - "input2": in_np2 - }, - fetch_list=[res1, res2]) + fetches = exe.run( + fluid.default_main_program(), + feed={"input1": in_np1, "input2": in_np2}, + fetch_list=[res1, res2], + ) fetches1_tran = np.transpose(fetches[1], (0, 3, 1, 2)) np.testing.assert_allclose(fetches[0], fetches1_tran, rtol=1e-05) def check_static_5d_input(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input1 = fluid.data(name="input1", - shape=[3, 3, 3, 40, 40], - dtype="float32") - input2 = fluid.data(name="input2", - shape=[3, 3, 40, 40, 3], - dtype="float32") - res1 = paddle.nn.functional.local_response_norm(x=input1, - size=5, - data_format='NCDHW') - res2 = paddle.nn.functional.local_response_norm(x=input2, - size=5, - data_format='NDHWC') + input1 = fluid.data( + name="input1", shape=[3, 3, 3, 40, 40], dtype="float32" + ) + input2 = fluid.data( + name="input2", shape=[3, 3, 40, 40, 3], dtype="float32" + ) + res1 = paddle.nn.functional.local_response_norm( + x=input1, size=5, data_format='NCDHW' + ) + res2 = paddle.nn.functional.local_response_norm( + x=input2, size=5, data_format='NDHWC' + ) in_np1 = np.random.random([3, 3, 3, 40, 40]).astype("float32") in_np2 = np.transpose(in_np1, (0, 2, 3, 4, 1)) exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={ - "input1": in_np1, - "input2": in_np2 - }, - fetch_list=[res1, res2]) + fetches = exe.run( + fluid.default_main_program(), + feed={"input1": in_np1, "input2": in_np2}, + fetch_list=[res1, res2], + ) fetches1_tran = np.transpose(fetches[1], (0, 4, 1, 2, 3)) np.testing.assert_allclose(fetches[0], fetches1_tran, rtol=1e-05) @@ -270,12 +261,12 @@ class TestLocalResponseNormFAPI(unittest.TestCase): in1 = paddle.to_tensor(in_np1) in2 = paddle.to_tensor(in_np2) - res1 = paddle.nn.functional.local_response_norm(x=in1, - size=5, - data_format='NCL') - res2 = paddle.nn.functional.local_response_norm(x=in2, - size=5, - data_format='NLC') + res1 = paddle.nn.functional.local_response_norm( + x=in1, size=5, data_format='NCL' + ) + res2 = paddle.nn.functional.local_response_norm( + x=in2, size=5, data_format='NLC' + ) res2_tran = np.transpose(res2.numpy(), (0, 2, 1)) np.testing.assert_allclose(res1.numpy(), res2_tran, rtol=1e-05) @@ -288,12 +279,12 @@ class TestLocalResponseNormFAPI(unittest.TestCase): in1 = paddle.to_tensor(in_np1) in2 = paddle.to_tensor(in_np2) - res1 = paddle.nn.functional.local_response_norm(x=in1, - size=5, - data_format='NCHW') - res2 = paddle.nn.functional.local_response_norm(x=in2, - size=5, - data_format='NHWC') + res1 = paddle.nn.functional.local_response_norm( + x=in1, size=5, data_format='NCHW' + ) + res2 = paddle.nn.functional.local_response_norm( + x=in2, size=5, data_format='NHWC' + ) res2_tran = np.transpose(res2.numpy(), (0, 3, 1, 2)) np.testing.assert_allclose(res1.numpy(), res2_tran, rtol=1e-05) @@ -306,12 +297,12 @@ class TestLocalResponseNormFAPI(unittest.TestCase): in1 = paddle.to_tensor(in_np1) in2 = paddle.to_tensor(in_np2) - res1 = paddle.nn.functional.local_response_norm(x=in1, - size=5, - data_format='NCDHW') - res2 = paddle.nn.functional.local_response_norm(x=in2, - size=5, - data_format='NDHWC') + res1 = paddle.nn.functional.local_response_norm( + x=in1, size=5, data_format='NCDHW' + ) + res2 = paddle.nn.functional.local_response_norm( + x=in2, size=5, data_format='NDHWC' + ) res2_tran = np.transpose(res2.numpy(), (0, 4, 1, 2, 3)) np.testing.assert_allclose(res1.numpy(), res2_tran, rtol=1e-05) @@ -324,14 +315,14 @@ class TestLocalResponseNormFAPI(unittest.TestCase): class TestLocalResponseNormFAPIError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): def test_Variable(): # the input of lrn must be Variable. - x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + ) paddle.nn.functional.local_response_norm(x1, size=5) self.assertRaises(TypeError, test_Variable) @@ -344,9 +335,9 @@ class TestLocalResponseNormFAPIError(unittest.TestCase): def test_dataformat(): x = fluid.data(name='x', shape=[3, 4, 5, 6], dtype="float32") - paddle.nn.functional.local_response_norm(x, - size=5, - data_format="NCTHW") + paddle.nn.functional.local_response_norm( + x, size=5, data_format="NCTHW" + ) self.assertRaises(ValueError, test_dataformat) @@ -364,7 +355,6 @@ class TestLocalResponseNormFAPIError(unittest.TestCase): class TestLocalResponseNormCAPI(unittest.TestCase): - def setUp(self): np.random.seed(123) self.places = [fluid.CPUPlace()] diff --git a/python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py b/python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py index 53949bbb738e94a21c5b151baad3d59e3b89daa7..3be8fa8e3c17051bd816c00e8eb5a737d1ef6234 100644 --- a/python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py +++ b/python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py @@ -33,7 +33,6 @@ EXP_MAX_INPUT = 40.0 class RandomWeight: - def __init__(self): pass @@ -43,34 +42,29 @@ class RandomWeight: self.input_size = input_size self.dtype = dtype - self.weight_ih = np.random.uniform(low=-std, - high=std, - size=(4 * self.hidden_size, - self.input_size)).astype(dtype) + self.weight_ih = np.random.uniform( + low=-std, high=std, size=(4 * self.hidden_size, self.input_size) + ).astype(dtype) self.weight_hh = np.random.uniform( - low=-std, high=std, - size=(4 * self.hidden_size, self.hidden_size)).astype(dtype) - self.bias_ih = np.random.uniform(low=-std, - high=std, - size=(4 * - self.hidden_size)).astype(dtype) - self.bias_hh = np.random.uniform(low=-std, - high=std, - size=(4 * - self.hidden_size)).astype(dtype) + low=-std, high=std, size=(4 * self.hidden_size, self.hidden_size) + ).astype(dtype) + self.bias_ih = np.random.uniform( + low=-std, high=std, size=(4 * self.hidden_size) + ).astype(dtype) + self.bias_hh = np.random.uniform( + low=-std, high=std, size=(4 * self.hidden_size) + ).astype(dtype) weight = RandomWeight() class LayerMixin(object): - def __call__(self, *args, **kwargs): return self.forward(*args, **kwargs) class LayerListMixin(LayerMixin): - def __init__(self, layers=None): self._layers = list(layers) if layers else [] @@ -82,7 +76,6 @@ class LayerListMixin(LayerMixin): class LSTMCell(LayerMixin): - def __init__(self, input_size, hidden_size, bias=True): self.input_size = input_size self.hidden_size = hidden_size @@ -145,12 +138,14 @@ def update_state(mask, new, old): return tuple(map(lambda x, y: np.where(mask, x, y), new, old)) -def rnn(cell, - inputs, - initial_states, - sequence_length=None, - time_major=False, - is_reverse=False): +def rnn( + cell, + inputs, + initial_states, + sequence_length=None, + time_major=False, + is_reverse=False, +): if not time_major: inputs = np.transpose(inputs, [1, 0, 2]) if is_reverse: @@ -172,7 +167,7 @@ def rnn(cell, if mask is not None: m_t = mask[t] y, new_state = cell(x_t, state) - y = np.where(m_t, y, 0.) + y = np.where(m_t, y, 0.0) outputs.append(y) state = update_state(m_t, new_state, state) else: @@ -190,25 +185,27 @@ def rnn(cell, return outputs, final_state -def birnn(cell_fw, - cell_bw, - inputs, - initial_states, - sequence_length=None, - time_major=False): +def birnn( + cell_fw, + cell_bw, + inputs, + initial_states, + sequence_length=None, + time_major=False, +): states_fw, states_bw = initial_states - outputs_fw, states_fw = rnn(cell_fw, - inputs, - states_fw, - sequence_length, - time_major=time_major) - - outputs_bw, states_bw = rnn(cell_bw, - inputs, - states_bw, - sequence_length, - time_major=time_major, - is_reverse=True) + outputs_fw, states_fw = rnn( + cell_fw, inputs, states_fw, sequence_length, time_major=time_major + ) + + outputs_bw, states_bw = rnn( + cell_bw, + inputs, + states_bw, + sequence_length, + time_major=time_major, + is_reverse=True, + ) outputs = np.concatenate((outputs_fw, outputs_bw), -1) final_states = (states_fw, states_bw) @@ -271,7 +268,6 @@ def concat_states(states, bidirectional=False, state_components=1): class RNN(LayerMixin): - def __init__(self, cell, is_reverse=False, time_major=False): super(RNN, self).__init__() self.cell = cell @@ -282,59 +278,69 @@ class RNN(LayerMixin): self.time_major = time_major def forward(self, inputs, initial_states=None, sequence_length=None): - final_outputs, final_states = rnn(self.cell, - inputs, - initial_states=initial_states, - sequence_length=sequence_length, - time_major=self.time_major, - is_reverse=self.is_reverse) + final_outputs, final_states = rnn( + self.cell, + inputs, + initial_states=initial_states, + sequence_length=sequence_length, + time_major=self.time_major, + is_reverse=self.is_reverse, + ) return final_outputs, final_states class BiRNN(LayerMixin): - def __init__(self, cell_fw, cell_bw, time_major=False): super(BiRNN, self).__init__() self.cell_fw = cell_fw self.cell_bw = cell_bw self.time_major = time_major - def forward(self, - inputs, - initial_states=None, - sequence_length=None, - **kwargs): + def forward( + self, inputs, initial_states=None, sequence_length=None, **kwargs + ): if isinstance(initial_states, (list, tuple)): - assert len(initial_states) == 2, \ - "length of initial_states should be 2 when it is a list/tuple" + assert ( + len(initial_states) == 2 + ), "length of initial_states should be 2 when it is a list/tuple" else: initial_states = [initial_states, initial_states] - outputs, final_states = birnn(self.cell_fw, self.cell_bw, inputs, - initial_states, sequence_length, - self.time_major) + outputs, final_states = birnn( + self.cell_fw, + self.cell_bw, + inputs, + initial_states, + sequence_length, + self.time_major, + ) return outputs, final_states class RNNMixin(LayerListMixin): - def forward(self, inputs, initial_states=None, sequence_length=None): batch_index = 1 if self.time_major else 0 batch_size = inputs.shape[batch_index] dtype = inputs.dtype if initial_states is None: - state_shape = (self.num_layers * self.num_directions, batch_size, - self.hidden_size) + state_shape = ( + self.num_layers * self.num_directions, + batch_size, + self.hidden_size, + ) if self.state_components == 1: initial_states = np.zeros(state_shape, dtype) else: - initial_states = tuple([ - np.zeros(state_shape, dtype) - for _ in range(self.state_components) - ]) - - states = split_states(initial_states, self.num_directions == 2, - self.state_components) + initial_states = tuple( + [ + np.zeros(state_shape, dtype) + for _ in range(self.state_components) + ] + ) + + states = split_states( + initial_states, self.num_directions == 2, self.state_components + ) final_states = [] for i, rnn_layer in enumerate(self): @@ -344,20 +350,22 @@ class RNNMixin(LayerListMixin): final_states.append(final_state) inputs = outputs - final_states = concat_states(final_states, self.num_directions == 2, - self.state_components) + final_states = concat_states( + final_states, self.num_directions == 2, self.state_components + ) return outputs, final_states class LSTM(RNNMixin): - - def __init__(self, - input_size, - hidden_size, - num_layers=1, - direction="forward", - dropout=0., - time_major=False): + def __init__( + self, + input_size, + hidden_size, + num_layers=1, + direction="forward", + dropout=0.0, + time_major=False, + ): super(LSTM, self).__init__() if direction in ["forward", "backward"]: @@ -378,7 +386,8 @@ class LSTM(RNNMixin): else: raise ValueError( "direction should be forward, backward or bidirectional, " - "received direction = {}".format(direction)) + "received direction = {}".format(direction) + ) self.input_size = input_size self.hidden_size = hidden_size @@ -389,10 +398,10 @@ class LSTM(RNNMixin): self.state_components = 2 -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNNLstmOp(OpTest): - def get_weight_names(self): weight_names = [] for i in range(2 * self.num_layers): @@ -404,8 +413,11 @@ class TestCUDNNLstmOp(OpTest): def setUp(self): self.op_type = "cudnn_lstm" self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64 - self.sequence_length = None if core.is_compiled_with_rocm( - ) else np.array([12, 11, 10, 9, 8], dtype=np.int32) + self.sequence_length = ( + None + if core.is_compiled_with_rocm() + else np.array([12, 11, 10, 9, 8], dtype=np.int32) + ) self.num_layers = 1 self.set_attrs() @@ -414,24 +426,26 @@ class TestCUDNNLstmOp(OpTest): input_size = 21 hidden_size = 21 - input = np.random.uniform(low=-0.1, - high=0.1, - size=(seq_length, batch_size, - input_size)).astype(self.dtype) + input = np.random.uniform( + low=-0.1, high=0.1, size=(seq_length, batch_size, input_size) + ).astype(self.dtype) input[11][1:][:] = 0 input[10][2:][:] = 0 input[9][3:][:] = 0 input[8][4:][:] = 0 weight.updata_weight(hidden_size, input_size, self.dtype) - rnn1 = LSTM(input_size, - hidden_size, - num_layers=self.num_layers, - time_major=True, - direction="forward") - - output, (last_hidden, - last_cell) = rnn1(input, sequence_length=self.sequence_length) + rnn1 = LSTM( + input_size, + hidden_size, + num_layers=self.num_layers, + time_major=True, + direction="forward", + ) + + output, (last_hidden, last_cell) = rnn1( + input, sequence_length=self.sequence_length + ) flat_w = [] num = 0 @@ -455,10 +469,12 @@ class TestCUDNNLstmOp(OpTest): bias_hh = weight.bias_hh flat_w.append(("bias" + str(num), bias_hh)) num += 1 - init_h = np.zeros( - (self.num_layers, batch_size, hidden_size)).astype(self.dtype) - init_c = np.zeros( - (self.num_layers, batch_size, hidden_size)).astype(self.dtype) + init_h = np.zeros((self.num_layers, batch_size, hidden_size)).astype( + self.dtype + ) + init_c = np.zeros((self.num_layers, batch_size, hidden_size)).astype( + self.dtype + ) state_out = np.ndarray((300)).astype("uint8") if core.is_compiled_with_rocm(): @@ -473,7 +489,7 @@ class TestCUDNNLstmOp(OpTest): 'WeightList': flat_w, 'InitH': init_h, 'InitC': init_c, - 'SequenceLength': self.sequence_length + 'SequenceLength': self.sequence_length, } if self.sequence_length is None: self.inputs = { @@ -494,7 +510,7 @@ class TestCUDNNLstmOp(OpTest): "LastH": last_hidden, 'LastC': last_cell, 'Reserve': np.ndarray((400)).astype("uint8"), - 'StateOut': state_out + 'StateOut': state_out, } def set_attrs(self): @@ -503,26 +519,29 @@ class TestCUDNNLstmOp(OpTest): def test_output_with_place(self): place = core.CUDAPlace(0) if core.is_compiled_with_rocm(): - self.check_output_with_place(place, - atol=1e-5, - no_check_set=['Reserve', 'StateOut']) + self.check_output_with_place( + place, atol=1e-5, no_check_set=['Reserve', 'StateOut'] + ) else: - self.check_output_with_place(place, - no_check_set=['Reserve', 'StateOut']) + self.check_output_with_place( + place, no_check_set=['Reserve', 'StateOut'] + ) def test_grad_with_place(self): place = core.CUDAPlace(0) var_name_list = self.get_weight_names() for var_name in var_name_list: self.check_grad_with_place( - place, set(['Input', var_name, 'InitH', 'InitC']), - ['Out', 'LastH', 'LastC']) + place, + set(['Input', var_name, 'InitH', 'InitC']), + ['Out', 'LastH', 'LastC'], + ) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNNlstmAPI(unittest.TestCase): - def test_lstm(self): seq_len = 20 batch_size = 5 @@ -530,31 +549,41 @@ class TestCUDNNlstmAPI(unittest.TestCase): dropout_prob = 0.0 num_layers = 1 dtype = 'float32' if core.is_compiled_with_rocm() else 'float64' - input = fluid.data(name='input', - shape=[seq_len, batch_size, hidden_size], - dtype=dtype) - init_h = layers.fill_constant([num_layers, batch_size, hidden_size], - dtype, 0.0) - init_c = layers.fill_constant([num_layers, batch_size, hidden_size], - dtype, 0.0) - rnn_out, last_h, last_c = layers.lstm(input, init_h, init_c, seq_len, - hidden_size, num_layers, - dropout_prob, False) + input = fluid.data( + name='input', shape=[seq_len, batch_size, hidden_size], dtype=dtype + ) + init_h = layers.fill_constant( + [num_layers, batch_size, hidden_size], dtype, 0.0 + ) + init_c = layers.fill_constant( + [num_layers, batch_size, hidden_size], dtype, 0.0 + ) + rnn_out, last_h, last_c = layers.lstm( + input, + init_h, + init_c, + seq_len, + hidden_size, + num_layers, + dropout_prob, + False, + ) exe = fluid.Executor(fluid.CUDAPlace(0)) exe.run(fluid.default_startup_program()) - input_i = np.random.uniform(low=-0.1, - high=0.1, - size=(seq_len, batch_size, - hidden_size)).astype("float64") - out = exe.run(fluid.default_main_program(), - feed={'input': input_i}, - fetch_list=[rnn_out, last_h, last_c, 'cudnn_lstm_0.w_0']) - - -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + input_i = np.random.uniform( + low=-0.1, high=0.1, size=(seq_len, batch_size, hidden_size) + ).astype("float64") + out = exe.run( + fluid.default_main_program(), + feed={'input': input_i}, + fetch_list=[rnn_out, last_h, last_c, 'cudnn_lstm_0.w_0'], + ) + + +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNNlstmAPI(unittest.TestCase): - def test_lstm(self): seq_len = 20 batch_size = 5 @@ -562,25 +591,36 @@ class TestCUDNNlstmAPI(unittest.TestCase): dropout_prob = 0.0 num_layers = 2 dtype = 'float32' if core.is_compiled_with_rocm() else 'float64' - input = fluid.data(name='input', - shape=[seq_len, batch_size, hidden_size], - dtype=dtype) - init_h = layers.fill_constant([num_layers, batch_size, hidden_size], - dtype, 0.0) - init_c = layers.fill_constant([num_layers, batch_size, hidden_size], - dtype, 0.0) - rnn_out, last_h, last_c = layers.lstm(input, init_h, init_c, seq_len, - hidden_size, num_layers, - dropout_prob, False, True) + input = fluid.data( + name='input', shape=[seq_len, batch_size, hidden_size], dtype=dtype + ) + init_h = layers.fill_constant( + [num_layers, batch_size, hidden_size], dtype, 0.0 + ) + init_c = layers.fill_constant( + [num_layers, batch_size, hidden_size], dtype, 0.0 + ) + rnn_out, last_h, last_c = layers.lstm( + input, + init_h, + init_c, + seq_len, + hidden_size, + num_layers, + dropout_prob, + False, + True, + ) exe = fluid.Executor(fluid.CUDAPlace(0)) exe.run(fluid.default_startup_program()) - input_i = np.random.uniform(low=-0.1, - high=0.1, - size=(seq_len, batch_size, - hidden_size)).astype(dtype) - out = exe.run(fluid.default_main_program(), - feed={'input': input_i}, - fetch_list=[rnn_out, last_h, last_c, 'cudnn_lstm_0.w_0']) + input_i = np.random.uniform( + low=-0.1, high=0.1, size=(seq_len, batch_size, hidden_size) + ).astype(dtype) + out = exe.run( + fluid.default_main_program(), + feed={'input': input_i}, + fetch_list=[rnn_out, last_h, last_c, 'cudnn_lstm_0.w_0'], + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_lstm_op.py b/python/paddle/fluid/tests/unittests/test_lstm_op.py index db701a7b733ce8b76f9672c84c678c35d1c72c5c..aeb9b28c860c0012a0fe4a4f7fa565fa621952b5 100644 --- a/python/paddle/fluid/tests/unittests/test_lstm_op.py +++ b/python/paddle/fluid/tests/unittests/test_lstm_op.py @@ -33,13 +33,13 @@ def sigmoid(x): y = np.copy(x) y[x < SIGMOID_THRESHOLD_MIN] = SIGMOID_THRESHOLD_MIN y[x > SIGMOID_THRESHOLD_MAX] = SIGMOID_THRESHOLD_MAX - return 1. / (1. + np.exp(-y)) + return 1.0 / (1.0 + np.exp(-y)) def tanh(x): - y = -2. * x + y = -2.0 * x y[y > EXP_MAX_INPUT] = EXP_MAX_INPUT - return (2. / (1. + np.exp(y))) - 1. + return (2.0 / (1.0 + np.exp(y))) - 1.0 def relu(x): @@ -50,23 +50,23 @@ ACTIVATION = { 'identity': identity, 'sigmoid': sigmoid, 'tanh': tanh, - 'relu': relu + 'relu': relu, } def lstm( - input, # T x 4D - lod, # 1 x N - h0=None, # N x D - c0=None, # N x D - w_h=None, # D x 4D - w_b=None, # 1 x 4D - w_c=None, # 1 x 3D - is_reverse=False, - act_gate=None, - act_cell=None, - act_cand=None): - + input, # T x 4D + lod, # 1 x N + h0=None, # N x D + c0=None, # N x D + w_h=None, # D x 4D + w_b=None, # 1 x 4D + w_c=None, # 1 x 3D + is_reverse=False, + act_gate=None, + act_cell=None, + act_cand=None, +): def _step(x, w_h, w_c, h_pre, c_pre, act_gate, act_cell, act_cand): g = np.dot(h_pre, w_h) # 1 x 4D g = g + x @@ -108,13 +108,14 @@ def lstm( for i in range(batch_size): # compute one sequence seq_len = lod[0][i] - x = input[offset[i]:offset[i + 1], :] + x = input[offset[i] : offset[i + 1], :] h_pre = h0[i] # 1 x D c_pre = c0[i] # 1 x D for j in range(seq_len): # compute one step - h_pre, c_pre = _step(x[j], w_h, w_c, h_pre, c_pre, act_gate, - act_cell, act_cand) + h_pre, c_pre = _step( + x[j], w_h, w_c, h_pre, c_pre, act_gate, act_cell, act_cand + ) hidden.append(h_pre.flatten()) cell.append(c_pre.flatten()) @@ -130,7 +131,6 @@ def lstm( class LstmUnitTestError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): batch_size = 20 @@ -138,77 +138,123 @@ class LstmUnitTestError(unittest.TestCase): dropout_prob = 0.2 hidden_size = 150 num_layers = 1 - input = fluid.data(name='input', - shape=[batch_size, seq_len, hidden_size], - dtype='float32') - pre_hidden = fill_constant([num_layers, batch_size, hidden_size], - 'float32', 0.0) - pre_cell = fill_constant([num_layers, batch_size, hidden_size], - 'float32', 0.0) + input = fluid.data( + name='input', + shape=[batch_size, seq_len, hidden_size], + dtype='float32', + ) + pre_hidden = fill_constant( + [num_layers, batch_size, hidden_size], 'float32', 0.0 + ) + pre_cell = fill_constant( + [num_layers, batch_size, hidden_size], 'float32', 0.0 + ) np_input = np.random.uniform( - -0.1, 0.1, (batch_size, seq_len, hidden_size)).astype('float64') + -0.1, 0.1, (batch_size, seq_len, hidden_size) + ).astype('float64') np_pre_hidden = np.random.uniform( - -0.1, 0.1, - (num_layers, batch_size, hidden_size)).astype('float64') + -0.1, 0.1, (num_layers, batch_size, hidden_size) + ).astype('float64') np_pre_cell = np.random.uniform( - -0.1, 0.1, - (num_layers, batch_size, hidden_size)).astype('float64') + -0.1, 0.1, (num_layers, batch_size, hidden_size) + ).astype('float64') def test_input_Variable(): - LSTM(np_input, pre_hidden, pre_cell, \ - seq_len, hidden_size, num_layers, \ - dropout_prob=dropout_prob) + LSTM( + np_input, + pre_hidden, + pre_cell, + seq_len, + hidden_size, + num_layers, + dropout_prob=dropout_prob, + ) self.assertRaises(TypeError, test_input_Variable) def test_pre_hidden_Variable(): - LSTM(np_input, np_pre_hidden, pre_cell, \ - seq_len, hidden_size, num_layers, \ - dropout_prob=dropout_prob) + LSTM( + np_input, + np_pre_hidden, + pre_cell, + seq_len, + hidden_size, + num_layers, + dropout_prob=dropout_prob, + ) self.assertRaises(TypeError, test_pre_hidden_Variable) def test_pre_cell_Variable(): - LSTM(np_input, pre_hidden, np_pre_cell, \ - seq_len, hidden_size, num_layers, \ - dropout_prob=dropout_prob) + LSTM( + np_input, + pre_hidden, + np_pre_cell, + seq_len, + hidden_size, + num_layers, + dropout_prob=dropout_prob, + ) self.assertRaises(TypeError, test_pre_cell_Variable) def test_input_type(): - error_input = fluid.data(name='error_input', - shape=[None, hidden_size * 3], - dtype='int32') - LSTM(error_input, pre_hidden, pre_cell, \ - seq_len, hidden_size, num_layers, \ - dropout_prob=dropout_prob) + error_input = fluid.data( + name='error_input', + shape=[None, hidden_size * 3], + dtype='int32', + ) + LSTM( + error_input, + pre_hidden, + pre_cell, + seq_len, + hidden_size, + num_layers, + dropout_prob=dropout_prob, + ) self.assertRaises(TypeError, test_input_type) def test_pre_hidden_type(): - error_pre_hidden = fluid.data(name='error_pre_hidden', - shape=[None, hidden_size], - dtype='int32') - LSTM(input, error_pre_hidden, pre_cell, \ - seq_len, hidden_size, num_layers, \ - dropout_prob=dropout_prob) + error_pre_hidden = fluid.data( + name='error_pre_hidden', + shape=[None, hidden_size], + dtype='int32', + ) + LSTM( + input, + error_pre_hidden, + pre_cell, + seq_len, + hidden_size, + num_layers, + dropout_prob=dropout_prob, + ) self.assertRaises(TypeError, test_pre_hidden_type) def test_pre_cell_type(): - error_pre_cell = fluid.data(name='error_pre_cell', - shape=[None, hidden_size], - dtype='int32') - LSTM(input, pre_hidden, error_pre_cell, \ - seq_len, hidden_size, num_layers, \ - dropout_prob=dropout_prob) + error_pre_cell = fluid.data( + name='error_pre_cell', + shape=[None, hidden_size], + dtype='int32', + ) + LSTM( + input, + pre_hidden, + error_pre_cell, + seq_len, + hidden_size, + num_layers, + dropout_prob=dropout_prob, + ) self.assertRaises(TypeError, test_pre_cell_type) class TestLstmOp(OpTest): - def set_is_test(self): self.is_test = False @@ -247,11 +293,21 @@ class TestLstmOp(OpTest): else: b = np.random.normal(size=(1, 4 * self.D)).astype('float64') - w_b = b[:, 0:4 * self.D] - w_c = b[:, 4 * self.D:] if self.use_peepholes else None - h, c = lstm(x, self.lod, h0, c0, w, w_b, w_c, self.is_reverse, - ACTIVATION[self.act_gate], ACTIVATION[self.act_cell], - ACTIVATION[self.act_cand]) + w_b = b[:, 0 : 4 * self.D] + w_c = b[:, 4 * self.D :] if self.use_peepholes else None + h, c = lstm( + x, + self.lod, + h0, + c0, + w, + w_b, + w_c, + self.is_reverse, + ACTIVATION[self.act_gate], + ACTIVATION[self.act_cell], + ACTIVATION[self.act_cand], + ) self.inputs = {'Input': (x, self.lod), 'Weight': w} @@ -271,7 +327,7 @@ class TestLstmOp(OpTest): 'gate_activation': self.act_gate, 'cell_activation': self.act_cell, 'candidate_activation': self.act_cand, - 'is_test': self.is_test + 'is_test': self.is_test, } def test_check_output(self): @@ -281,33 +337,33 @@ class TestLstmOp(OpTest): # TODO(qingqing) remove folowing lines after the check_grad is refined. N = len(self.lod[0]) self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') - self.outputs['BatchCellPreAct'] = np.zeros( - (N, self.D)).astype('float64') - self.check_grad(['Input', 'Weight', 'Bias'], ['Hidden'], - max_relative_error=5e-4, - check_dygraph=False) + self.outputs['BatchCellPreAct'] = np.zeros((N, self.D)).astype( + 'float64' + ) + self.check_grad( + ['Input', 'Weight', 'Bias'], + ['Hidden'], + max_relative_error=5e-4, + check_dygraph=False, + ) class TestLstmOpCase1(TestLstmOp): - def set_lod(self): self.lod = [[0, 3, 2]] class TestLstmOpCase2(TestLstmOp): - def set_lod(self): self.lod = [[0, 3, 0]] class TestLstmOpCase3(TestLstmOp): - def set_lod(self): self.lod = [[2, 0, 4]] class TestLstmOpInference(TestLstmOp): - def set_is_test(self): self.is_test = True @@ -317,43 +373,42 @@ class TestLstmOpInference(TestLstmOp): class TestLstmOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): def test_Variable(): input_data = np.random.random((1, 2048)).astype("float32") - fluid.layers.dynamic_lstm(input=input_data, - size=2048, - use_peepholes=False) + fluid.layers.dynamic_lstm( + input=input_data, size=2048, use_peepholes=False + ) self.assertRaises(TypeError, test_Variable) def test_h_0(): - in_data = fluid.data(name="input", - shape=[None, 2048], - dtype="float32") + in_data = fluid.data( + name="input", shape=[None, 2048], dtype="float32" + ) h = fluid.data(name="h", shape=[None, 512], dtype="int32") c = fluid.data(name="c", shape=[None, 512], dtype="float32") - fluid.layers.dynamic_lstm(input=in_data, - size=2048, - use_peepholes=False, - h_0=h, - c_0=c) + fluid.layers.dynamic_lstm( + input=in_data, size=2048, use_peepholes=False, h_0=h, c_0=c + ) self.assertRaises(TypeError, test_h_0) def test_c_0(): - in_data_ = fluid.data(name="input_", - shape=[None, 2048], - dtype="float32") + in_data_ = fluid.data( + name="input_", shape=[None, 2048], dtype="float32" + ) h_ = fluid.data(name="h_", shape=[None, 512], dtype="float32") c_ = fluid.data(name="c_", shape=[None, 512], dtype="int32") - fluid.layers.dynamic_lstm(input=in_data_, - size=2048, - use_peepholes=False, - h_0=h_, - c_0=c_) + fluid.layers.dynamic_lstm( + input=in_data_, + size=2048, + use_peepholes=False, + h_0=h_, + c_0=c_, + ) self.assertRaises(TypeError, test_c_0) diff --git a/python/paddle/fluid/tests/unittests/test_lstm_unit_op.py b/python/paddle/fluid/tests/unittests/test_lstm_unit_op.py index b4e6690117d207997873f6164e9fab3039b68b15..d46e8e4719e818851c0cf6acfaed30dc1041f21b 100644 --- a/python/paddle/fluid/tests/unittests/test_lstm_unit_op.py +++ b/python/paddle/fluid/tests/unittests/test_lstm_unit_op.py @@ -21,35 +21,39 @@ from paddle.fluid.framework import program_guard, Program def sigmoid_np(x): - return 1. / (1. + np.exp(-x)) + return 1.0 / (1.0 + np.exp(-x)) def tanh_np(x): - return 2 * sigmoid_np(2. * x) - 1. + return 2 * sigmoid_np(2.0 * x) - 1.0 class LstmUnitTestError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): batch_size, dict_dim, emb_dim, hidden_dim = 32, 128, 64, 512 - data = fluid.data(name='step_data', - shape=[batch_size], - dtype='int64') + data = fluid.data( + name='step_data', shape=[batch_size], dtype='int64' + ) inputs = fluid.embedding(input=data, size=[dict_dim, emb_dim]) - pre_hidden = fluid.data(name='pre_hidden', - shape=[batch_size, hidden_dim], - dtype='float32') - pre_cell = fluid.data(name='pre_cell', - shape=[batch_size, hidden_dim], - dtype='float32') + pre_hidden = fluid.data( + name='pre_hidden', + shape=[batch_size, hidden_dim], + dtype='float32', + ) + pre_cell = fluid.data( + name='pre_cell', shape=[batch_size, hidden_dim], dtype='float32' + ) np_input = np.random.uniform( - -0.1, 0.1, (batch_size, emb_dim)).astype('float64') + -0.1, 0.1, (batch_size, emb_dim) + ).astype('float64') np_pre_hidden = np.random.uniform( - -0.1, 0.1, (batch_size, hidden_dim)).astype('float64') + -0.1, 0.1, (batch_size, hidden_dim) + ).astype('float64') np_pre_cell = np.random.uniform( - -0.1, 0.1, (batch_size, hidden_dim)).astype('float64') + -0.1, 0.1, (batch_size, hidden_dim) + ).astype('float64') def test_input_Variable(): lstm_unit(np_input, pre_hidden, pre_cell) @@ -67,42 +71,48 @@ class LstmUnitTestError(unittest.TestCase): self.assertRaises(TypeError, test_pre_cell_Variable) def test_input_type(): - error_input = fluid.data(name='error_input', - shape=[batch_size, emb_dim], - dtype='int32') + error_input = fluid.data( + name='error_input', + shape=[batch_size, emb_dim], + dtype='int32', + ) lstm_unit(error_input, pre_hidden, pre_cell) self.assertRaises(TypeError, test_input_type) def test_pre_hidden_type(): - error_pre_hidden = fluid.data(name='error_pre_hidden', - shape=[batch_size, hidden_dim], - dtype='int32') + error_pre_hidden = fluid.data( + name='error_pre_hidden', + shape=[batch_size, hidden_dim], + dtype='int32', + ) lstm_unit(inputs, error_pre_hidden, pre_cell) self.assertRaises(TypeError, test_pre_hidden_type) def test_pre_cell_type(): - error_pre_cell = fluid.data(name='error_pre_cell', - shape=[batch_size, hidden_dim], - dtype='int32') + error_pre_cell = fluid.data( + name='error_pre_cell', + shape=[batch_size, hidden_dim], + dtype='int32', + ) lstm_unit(inputs, pre_hidden, error_pre_cell) self.assertRaises(TypeError, test_pre_cell_type) class LstmUnitTest(OpTest): - def setUp(self): self.op_type = "lstm_unit" x_np = np.random.normal(size=(15, 160)).astype("float64") c_np = np.random.normal(size=(15, 40)).astype("float64") i_np, f_np, o_np, j_np = np.split(x_np, 4, axis=1) - forget_bias_np = 0. - self.attrs = {'forget_bias': 0.} + forget_bias_np = 0.0 + self.attrs = {'forget_bias': 0.0} new_c = c_np * sigmoid_np(f_np + forget_bias_np) + sigmoid_np( - i_np) * tanh_np(j_np) + i_np + ) * tanh_np(j_np) new_h = tanh_np(new_c) * sigmoid_np(o_np) self.inputs = {'X': x_np, 'C_prev': c_np} diff --git a/python/paddle/fluid/tests/unittests/test_lstmp_op.py b/python/paddle/fluid/tests/unittests/test_lstmp_op.py index 721a61a7e7d558e4a359f34f22b992f461bf7d1f..730af353dce6be11a8603455cc25652c763631cc 100644 --- a/python/paddle/fluid/tests/unittests/test_lstmp_op.py +++ b/python/paddle/fluid/tests/unittests/test_lstmp_op.py @@ -1,16 +1,16 @@ # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # -#Licensed under the Apache License, Version 2.0 (the "License"); -#you may not use this file except in compliance with the License. -#You may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -#Unless required by applicable law or agreed to in writing, software -#distributed under the License is distributed on an "AS IS" BASIS, -#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#See the License for the specific language governing permissions and -#limitations under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import unittest import numpy as np @@ -22,30 +22,42 @@ ACTIVATION = { 'identity': LstmTest.identity, 'sigmoid': LstmTest.sigmoid, 'tanh': LstmTest.tanh, - 'relu': LstmTest.relu + 'relu': LstmTest.relu, } # LSTM with recurrent projection Layer def lstmp( - input, # T x 4D - lod, # 1 x N - h0=None, # N x D - c0=None, # N x D - w_r=None, # P x 4D - w_rh=None, # D x P - w_b=None, # 1 x 4D - w_c=None, # 1 x 3D - is_reverse=False, - proj_clip=0.0, - cell_clip=0.0, - act_gate=None, - act_cell=None, - act_cand=None, - act_proj=None): - - def _step(x, w_r, w_rh, w_c, r_pre, c_pre, proj_clip, cell_clip, act_gate, - act_cell, act_cand, act_proj): + input, # T x 4D + lod, # 1 x N + h0=None, # N x D + c0=None, # N x D + w_r=None, # P x 4D + w_rh=None, # D x P + w_b=None, # 1 x 4D + w_c=None, # 1 x 3D + is_reverse=False, + proj_clip=0.0, + cell_clip=0.0, + act_gate=None, + act_cell=None, + act_cand=None, + act_proj=None, +): + def _step( + x, + w_r, + w_rh, + w_c, + r_pre, + c_pre, + proj_clip, + cell_clip, + act_gate, + act_cell, + act_cand, + act_proj, + ): g = np.dot(r_pre, w_r) # 1 x 4D g = g + x g = np.reshape(g, (1, g.size)) @@ -103,14 +115,25 @@ def lstmp( for i in range(batch_size): # compute one sequence seq_len = lod[0][i] - x = input[offset[i]:offset[i + 1], :] + x = input[offset[i] : offset[i + 1], :] r_pre = h0[i] c_pre = c0[i] # 1 x D for j in range(seq_len): # compute one step - r_pre, c_pre = _step(x[j], w_r, w_rh, w_c, r_pre, c_pre, proj_clip, - cell_clip, act_gate, act_cell, act_cand, - act_proj) + r_pre, c_pre = _step( + x[j], + w_r, + w_rh, + w_c, + r_pre, + c_pre, + proj_clip, + cell_clip, + act_gate, + act_cell, + act_cand, + act_proj, + ) projection.append(r_pre.flatten()) cell.append(c_pre.flatten()) @@ -126,7 +149,6 @@ def lstmp( class TestLstmpOp(LstmTest.TestLstmOp): - def reset_argument(self): pass @@ -154,15 +176,28 @@ class TestLstmpOp(LstmTest.TestLstmOp): else: b = np.random.normal(size=(1, 4 * self.D)).astype('float64') - w_b = b[:, 0:4 * self.D] - w_c = b[:, 4 * self.D:] if self.use_peepholes else None + w_b = b[:, 0 : 4 * self.D] + w_c = b[:, 4 * self.D :] if self.use_peepholes else None w_rh = np.random.normal(size=(self.D, self.P)).astype('float64') proj_clip = 0.1 cell_clip = 0.1 - r, c = lstmp(x, self.lod, h0, c0, w, w_rh, w_b, w_c, self.is_reverse, - proj_clip, cell_clip, ACTIVATION[self.act_gate], - ACTIVATION[self.act_cell], ACTIVATION[self.act_cand], - ACTIVATION[self.act_proj]) + r, c = lstmp( + x, + self.lod, + h0, + c0, + w, + w_rh, + w_b, + w_c, + self.is_reverse, + proj_clip, + cell_clip, + ACTIVATION[self.act_gate], + ACTIVATION[self.act_cell], + ACTIVATION[self.act_cand], + ACTIVATION[self.act_proj], + ) self.inputs = {'Input': (x, self.lod), 'Weight': w, 'ProjWeight': w_rh} @@ -184,7 +219,7 @@ class TestLstmpOp(LstmTest.TestLstmOp): 'gate_activation': self.act_gate, 'cell_activation': self.act_cell, 'candidate_activation': self.act_cand, - 'proj_activation': self.act_proj + 'proj_activation': self.act_proj, } def test_check_output(self): @@ -195,16 +230,18 @@ class TestLstmpOp(LstmTest.TestLstmOp): N = len(self.lod[0]) self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') - self.outputs['BatchCellPreAct'] = np.zeros( - (N, self.D)).astype('float64') - self.check_grad(['Input', 'Weight', 'ProjWeight', 'Bias'], - ['Projection'], - numeric_grad_delta=0.0000005, - check_dygraph=False) + self.outputs['BatchCellPreAct'] = np.zeros((N, self.D)).astype( + 'float64' + ) + self.check_grad( + ['Input', 'Weight', 'ProjWeight', 'Bias'], + ['Projection'], + numeric_grad_delta=0.0000005, + check_dygraph=False, + ) class TestLstmpOpHasInitial(TestLstmpOp): - def reset_argument(self): self.has_initial_state = True @@ -213,162 +250,187 @@ class TestLstmpOpHasInitial(TestLstmpOp): N = len(self.lod[0]) self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') - self.outputs['BatchCellPreAct'] = np.zeros( - (N, self.D)).astype('float64') - self.check_grad(['Input', 'Weight', 'ProjWeight', 'Bias', 'H0', 'C0'], - ['Projection'], - numeric_grad_delta=0.0000005, - check_dygraph=False) + self.outputs['BatchCellPreAct'] = np.zeros((N, self.D)).astype( + 'float64' + ) + self.check_grad( + ['Input', 'Weight', 'ProjWeight', 'Bias', 'H0', 'C0'], + ['Projection'], + numeric_grad_delta=0.0000005, + check_dygraph=False, + ) def test_check_grad_ingore_bias(self): N = len(self.lod[0]) self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') - self.outputs['BatchCellPreAct'] = np.zeros( - (N, self.D)).astype('float64') - self.check_grad(['Input', 'ProjWeight', 'Weight'], ['Projection'], - numeric_grad_delta=0.0000005, - no_grad_set=set('Bias'), - check_dygraph=False) + self.outputs['BatchCellPreAct'] = np.zeros((N, self.D)).astype( + 'float64' + ) + self.check_grad( + ['Input', 'ProjWeight', 'Weight'], + ['Projection'], + numeric_grad_delta=0.0000005, + no_grad_set=set('Bias'), + check_dygraph=False, + ) def test_check_grad_ingore_weight(self): N = len(self.lod[0]) self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') - self.outputs['BatchCellPreAct'] = np.zeros( - (N, self.D)).astype('float64') - self.check_grad(['Input', 'ProjWeight', 'Bias'], ['Projection'], - numeric_grad_delta=0.0000005, - no_grad_set=set('Weight'), - check_dygraph=False) + self.outputs['BatchCellPreAct'] = np.zeros((N, self.D)).astype( + 'float64' + ) + self.check_grad( + ['Input', 'ProjWeight', 'Bias'], + ['Projection'], + numeric_grad_delta=0.0000005, + no_grad_set=set('Weight'), + check_dygraph=False, + ) def test_check_grad_ingore_proj_weight(self): N = len(self.lod[0]) self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') - self.outputs['BatchCellPreAct'] = np.zeros( - (N, self.D)).astype('float64') - self.check_grad(['Input', 'Weight', 'Bias'], ['Projection'], - numeric_grad_delta=0.0000005, - no_grad_set=set('ProjWeight'), - check_dygraph=False) + self.outputs['BatchCellPreAct'] = np.zeros((N, self.D)).astype( + 'float64' + ) + self.check_grad( + ['Input', 'Weight', 'Bias'], + ['Projection'], + numeric_grad_delta=0.0000005, + no_grad_set=set('ProjWeight'), + check_dygraph=False, + ) def test_check_grad_ingore_input(self): N = len(self.lod[0]) self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') - self.outputs['BatchCellPreAct'] = np.zeros( - (N, self.D)).astype('float64') - self.check_grad(['Weight', 'ProjWeight', 'Bias'], ['Projection'], - numeric_grad_delta=0.0000005, - no_grad_set=set('Input'), - check_dygraph=False) + self.outputs['BatchCellPreAct'] = np.zeros((N, self.D)).astype( + 'float64' + ) + self.check_grad( + ['Weight', 'ProjWeight', 'Bias'], + ['Projection'], + numeric_grad_delta=0.0000005, + no_grad_set=set('Input'), + check_dygraph=False, + ) def test_check_grad_ingore_h0(self): N = len(self.lod[0]) self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') - self.outputs['BatchCellPreAct'] = np.zeros( - (N, self.D)).astype('float64') - self.check_grad(['Input', 'Weight', 'ProjWeight', 'Bias', 'C0'], - ['Projection'], - numeric_grad_delta=0.0000005, - no_grad_set=set('H0'), - check_dygraph=False) + self.outputs['BatchCellPreAct'] = np.zeros((N, self.D)).astype( + 'float64' + ) + self.check_grad( + ['Input', 'Weight', 'ProjWeight', 'Bias', 'C0'], + ['Projection'], + numeric_grad_delta=0.0000005, + no_grad_set=set('H0'), + check_dygraph=False, + ) def test_check_grad_ingore_c0(self): N = len(self.lod[0]) self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') - self.outputs['BatchCellPreAct'] = np.zeros( - (N, self.D)).astype('float64') - self.check_grad(['Input', 'Weight', 'ProjWeight', 'Bias', 'H0'], - ['Projection'], - numeric_grad_delta=0.0000005, - no_grad_set=set('C0'), - check_dygraph=False) + self.outputs['BatchCellPreAct'] = np.zeros((N, self.D)).astype( + 'float64' + ) + self.check_grad( + ['Input', 'Weight', 'ProjWeight', 'Bias', 'H0'], + ['Projection'], + numeric_grad_delta=0.0000005, + no_grad_set=set('C0'), + check_dygraph=False, + ) class TestLstmpOpRerverse(TestLstmpOp): - def reset_argument(self): self.is_reverse = True class TestLstmpOpNotUsePeepholes(TestLstmpOp): - def reset_argument(self): self.use_peepholes = False class TestLstmpOpLinearProjection(TestLstmpOp): - def reset_argument(self): self.act_proj = 'identity' class TestLstmpOpLen0Case1(TestLstmpOp): - def reset_argument(self): self.lod = [[0, 4, 0]] class TestLstmpOpLen0Case2(TestLstmpOp): - def reset_argument(self): self.lod = [[2, 0, 3]] class TestLstmpOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): def test_Variable(): input_data = np.random.random((1, 2048)).astype("float32") - fluid.layers.dynamic_lstmp(input=input_data, - size=2048, - proj_size=256, - use_peepholes=False, - is_reverse=True, - cell_activation="tanh", - proj_activation="tanh") + fluid.layers.dynamic_lstmp( + input=input_data, + size=2048, + proj_size=256, + use_peepholes=False, + is_reverse=True, + cell_activation="tanh", + proj_activation="tanh", + ) self.assertRaises(TypeError, test_Variable) def test_h_0(): - in_data = fluid.data(name="input", - shape=[None, 2048], - dtype="float32") + in_data = fluid.data( + name="input", shape=[None, 2048], dtype="float32" + ) h = fluid.data(name="h", shape=[None, 512], dtype="int32") c = fluid.data(name="c", shape=[None, 512], dtype="float32") - fluid.layers.dynamic_lstmp(input=in_data, - size=2048, - proj_size=256, - use_peepholes=False, - is_reverse=True, - cell_activation="tanh", - proj_activation="tanh", - h_0=h, - c_0=c) + fluid.layers.dynamic_lstmp( + input=in_data, + size=2048, + proj_size=256, + use_peepholes=False, + is_reverse=True, + cell_activation="tanh", + proj_activation="tanh", + h_0=h, + c_0=c, + ) self.assertRaises(TypeError, test_h_0) def test_c_0(): - in_data_ = fluid.data(name="input_", - shape=[None, 2048], - dtype="float32") + in_data_ = fluid.data( + name="input_", shape=[None, 2048], dtype="float32" + ) h_ = fluid.data(name="h_", shape=[None, 512], dtype="float32") c_ = fluid.data(name="c_", shape=[None, 512], dtype="int32") - fluid.layers.dynamic_lstmp(input=in_data_, - size=2048, - proj_size=256, - use_peepholes=False, - is_reverse=True, - cell_activation="tanh", - proj_activation="tanh", - h_0=h_, - c_0=c_) + fluid.layers.dynamic_lstmp( + input=in_data_, + size=2048, + proj_size=256, + use_peepholes=False, + is_reverse=True, + cell_activation="tanh", + proj_activation="tanh", + h_0=h_, + c_0=c_, + ) self.assertRaises(TypeError, test_c_0) diff --git a/python/paddle/fluid/tests/unittests/test_lu_op.py b/python/paddle/fluid/tests/unittests/test_lu_op.py index 0c24bcd68650f6bf095c0c207c5c89486333dcf3..d76fdf16ad9334c13182f2fde06380803fdc8e3f 100644 --- a/python/paddle/fluid/tests/unittests/test_lu_op.py +++ b/python/paddle/fluid/tests/unittests/test_lu_op.py @@ -43,8 +43,11 @@ def scipy_lu(A, pivot): PP.append(P) PL.append(L) PU.append(U) - return np.array(PP).reshape(preshape + pshape), np.array(PL).reshape( - preshape + lshape), np.array(PU).reshape(preshape + ushape) + return ( + np.array(PP).reshape(preshape + pshape), + np.array(PL).reshape(preshape + lshape), + np.array(PU).reshape(preshape + ushape), + ) def Pmat_to_perm(Pmat_org, cut): @@ -66,9 +69,15 @@ def Pmat_to_perm(Pmat_org, cut): sP[idx, :] = tmp permmat.append(permlst) - Pivot = np.array(permmat).reshape(list(shape[:-2]) + [ - rows, - ]) + 1 + Pivot = ( + np.array(permmat).reshape( + list(shape[:-2]) + + [ + rows, + ] + ) + + 1 + ) return Pivot[..., :cut] @@ -112,17 +121,20 @@ class TestLUOp(OpTest): ushape = np.array(sU.shape) lpad = (len(sL.shape) - 2) * [(0, 0)] + list( - ((0, (ashape - lshape)[-2]), (0, (ashape - lshape)[-1]))) + ((0, (ashape - lshape)[-2]), (0, (ashape - lshape)[-1])) + ) upad = (len(sU.shape) - 2) * [(0, 0)] + list( - ((0, (ashape - ushape)[-2]), (0, (ashape - ushape)[-1]))) + ((0, (ashape - ushape)[-2]), (0, (ashape - ushape)[-1])) + ) NsL = np.pad(sL, lpad) NsU = np.pad(sU, upad) NLU = NsL + NsU self.output = NLU self.Pivots = Pmat_to_perm(sP, min(ashape[-2], ashape[-1])) - self.Infos = np.zeros( - self.x_shape[:-2]) if len(X.shape) > 2 else np.array([0]) + self.Infos = ( + np.zeros(self.x_shape[:-2]) if len(X.shape) > 2 else np.array([0]) + ) def setUp(self): self.op_type = "lu" @@ -136,7 +148,7 @@ class TestLUOp(OpTest): self.outputs = { 'Out': self.output, 'Pivots': self.Pivots, - 'Infos': self.Infos + 'Infos': self.Infos, } def test_check_output(self): @@ -173,9 +185,7 @@ class TestLUOp3(TestLUOp): class TestLUAPI(unittest.TestCase): - def test_dygraph(self): - def run_lu_dygraph(shape, dtype): if dtype == "float32": np_dtype = np.float32 @@ -217,7 +227,7 @@ class TestLUAPI(unittest.TestCase): (4, 5, 3), # 3-dim Tensors (2, 5, 3, 5), (3, 5, 5, 5), - (4, 5, 5, 3) # 4-dim Tensors + (4, 5, 5, 3), # 4-dim Tensors ] dtypes = ["float32", "float64"] for tensor_shape, dtype in itertools.product(tensor_shapes, dtypes): @@ -251,28 +261,29 @@ class TestLUAPI(unittest.TestCase): ushape = np.array(sU.shape) lpad = (len(sL.shape) - 2) * [(0, 0)] + list( - ((0, (ashape - lshape)[-2]), (0, - (ashape - lshape)[-1]))) + ((0, (ashape - lshape)[-2]), (0, (ashape - lshape)[-1])) + ) upad = (len(sU.shape) - 2) * [(0, 0)] + list( - ((0, (ashape - ushape)[-2]), (0, - (ashape - ushape)[-1]))) + ((0, (ashape - ushape)[-2]), (0, (ashape - ushape)[-1])) + ) NsL = np.pad(sL, lpad) NsU = np.pad(sU, upad) NLU = NsL + NsU - x = paddle.fluid.data(name="input", - shape=shape, - dtype=dtype) + x = paddle.fluid.data( + name="input", shape=shape, dtype=dtype + ) lu, p = paddle.linalg.lu(x, pivot=pivot) exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={"input": a}, - fetch_list=[lu, p]) - np.testing.assert_allclose(fetches[0], - NLU, - rtol=1e-05, - atol=1e-05) + fetches = exe.run( + fluid.default_main_program(), + feed={"input": a}, + fetch_list=[lu, p], + ) + np.testing.assert_allclose( + fetches[0], NLU, rtol=1e-05, atol=1e-05 + ) tensor_shapes = [ (3, 5), @@ -283,7 +294,7 @@ class TestLUAPI(unittest.TestCase): (4, 5, 3), # 3-dim Tensors (2, 5, 3, 5), (3, 5, 5, 5), - (4, 5, 5, 3) # 4-dim Tensors + (4, 5, 5, 3), # 4-dim Tensors ] dtypes = ["float32", "float64"] for tensor_shape, dtype in itertools.product(tensor_shapes, dtypes): diff --git a/python/paddle/fluid/tests/unittests/test_lu_unpack_op.py b/python/paddle/fluid/tests/unittests/test_lu_unpack_op.py index 1ec36680fa4c27cdbd5973c07eb9481ede0b1352..0c00fbea8208808fc9cce8f9a1596fd83725a3f3 100644 --- a/python/paddle/fluid/tests/unittests/test_lu_unpack_op.py +++ b/python/paddle/fluid/tests/unittests/test_lu_unpack_op.py @@ -48,9 +48,11 @@ def scipy_lu_unpack(A): Llst.append(L) Ulst.append(U) - return np.array(Plst).reshape(preshape + pshape), np.array( - Llst).reshape(preshape + lshape), np.array(Ulst).reshape(preshape + - ushape) + return ( + np.array(Plst).reshape(preshape + pshape), + np.array(Llst).reshape(preshape + lshape), + np.array(Ulst).reshape(preshape + ushape), + ) def Pmat_to_perm(Pmat_org, cut): @@ -72,9 +74,15 @@ def Pmat_to_perm(Pmat_org, cut): sP[idx, :] = tmp permmat.append(permlst) - Pivot = np.array(permmat).reshape(list(shape[:-2]) + [ - rows, - ]) + 1 + Pivot = ( + np.array(permmat).reshape( + list(shape[:-2]) + + [ + rows, + ] + ) + + 1 + ) return Pivot[..., :cut] @@ -132,21 +140,23 @@ class TestLU_UnpackOp(OpTest): place = fluid.CPUPlace() if core.is_compiled_with_cuda(): place = fluid.CUDAPlace(0) - xv = paddle.fluid.data(name="input", - shape=self.x_shape, - dtype=self.dtype) + xv = paddle.fluid.data( + name="input", shape=self.x_shape, dtype=self.dtype + ) lu, p = paddle.linalg.lu(xv) exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={"input": x}, - fetch_list=[lu, p]) + fetches = exe.run( + fluid.default_main_program(), + feed={"input": x}, + fetch_list=[lu, p], + ) lu, pivots = fetches[0], fetches[1] self.inputs = {'X': lu, 'Pivots': pivots} self.attrs = { 'unpack_ludata': self.unpack_ludata, - 'unpack_pivots': self.unpack_pivots + 'unpack_pivots': self.unpack_pivots, } self.set_output(x) self.outputs = { @@ -189,12 +199,10 @@ class TestLU_UnpackOp3(TestLU_UnpackOp): class TestLU_UnpackAPI(unittest.TestCase): - def setUp(self): np.random.seed(2022) def test_dygraph(self): - def run_lu_unpack_dygraph(shape, dtype): if dtype == "float32": np_dtype = np.float32 @@ -229,7 +237,7 @@ class TestLU_UnpackAPI(unittest.TestCase): (4, 5, 3), # 3-dim Tensors (2, 5, 3, 5), (3, 5, 5, 5), - (4, 5, 5, 3) # 4-dim Tensors + (4, 5, 5, 3), # 4-dim Tensors ] dtypes = ["float32", "float64"] for tensor_shape, dtype in itertools.product(tensor_shapes, dtypes): @@ -255,27 +263,26 @@ class TestLU_UnpackAPI(unittest.TestCase): with fluid.program_guard(fluid.Program(), fluid.Program()): sP, sL, sU = scipy_lu_unpack(a) - x = paddle.fluid.data(name="input", - shape=shape, - dtype=dtype) + x = paddle.fluid.data( + name="input", shape=shape, dtype=dtype + ) lu, p = paddle.linalg.lu(x) pP, pL, pU = paddle.linalg.lu_unpack(lu, p) exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={"input": a}, - fetch_list=[pP, pL, pU]) - np.testing.assert_allclose(fetches[0], - sP, - rtol=1e-05, - atol=1e-05) - np.testing.assert_allclose(fetches[1], - sL, - rtol=1e-05, - atol=1e-05) - np.testing.assert_allclose(fetches[2], - sU, - rtol=1e-05, - atol=1e-05) + fetches = exe.run( + fluid.default_main_program(), + feed={"input": a}, + fetch_list=[pP, pL, pU], + ) + np.testing.assert_allclose( + fetches[0], sP, rtol=1e-05, atol=1e-05 + ) + np.testing.assert_allclose( + fetches[1], sL, rtol=1e-05, atol=1e-05 + ) + np.testing.assert_allclose( + fetches[2], sU, rtol=1e-05, atol=1e-05 + ) tensor_shapes = [ (3, 5), @@ -286,7 +293,7 @@ class TestLU_UnpackAPI(unittest.TestCase): (4, 5, 3), # 3-dim Tensors (2, 5, 3, 5), (3, 5, 5, 5), - (4, 5, 5, 3) # 4-dim Tensors + (4, 5, 5, 3), # 4-dim Tensors ] dtypes = ["float32", "float64"] for tensor_shape, dtype in itertools.product(tensor_shapes, dtypes): diff --git a/python/paddle/fluid/tests/unittests/test_manual_seed.py b/python/paddle/fluid/tests/unittests/test_manual_seed.py index 7d9762c788abbd0feb6227135c7998566f0035d6..ac697a49dbd6699db37b947a66165457b55b723b 100644 --- a/python/paddle/fluid/tests/unittests/test_manual_seed.py +++ b/python/paddle/fluid/tests/unittests/test_manual_seed.py @@ -20,7 +20,6 @@ import numpy as np class TestManualSeed(unittest.TestCase): - def test_seed(self): fluid.enable_dygraph() diff --git a/python/paddle/fluid/tests/unittests/test_margin_cross_entropy_op.py b/python/paddle/fluid/tests/unittests/test_margin_cross_entropy_op.py index 6473ab7568f1994b4bd9529df5e035906777b17b..d8a5f653a9ae66987388e55ae79a1d166c78d8f2 100644 --- a/python/paddle/fluid/tests/unittests/test_margin_cross_entropy_op.py +++ b/python/paddle/fluid/tests/unittests/test_margin_cross_entropy_op.py @@ -21,20 +21,15 @@ from paddle.fluid import Program, program_guard def stable_softmax_comm(x): - shiftx = (x - np.max(x)) + shiftx = x - np.max(x) deno = np.log(np.sum(np.exp(shiftx))) comm = shiftx - deno return comm -def margin_cross_entropy(logits, - label, - axis, - margin1, - margin2, - margin3, - scale, - reduction=None): +def margin_cross_entropy( + logits, label, axis, margin1, margin2, margin3, scale, reduction=None +): one_hot_label = np.zeros_like(logits, dtype=logits.dtype) for i, lb in enumerate(label): one_hot_label[i, lb] = 1.0 @@ -61,16 +56,18 @@ def margin_cross_entropy(logits, return loss, softmax -def python_api(logits, - label, - return_softmax=False, - ring_id=0, - rank=0, - nrank=0, - margin1=1.0, - margin2=0.5, - margin3=0.0, - scale=64.0): +def python_api( + logits, + label, + return_softmax=False, + ring_id=0, + rank=0, + nrank=0, + margin1=1.0, + margin2=0.5, + margin3=0.0, + scale=64.0, +): return paddle.nn.functional.margin_cross_entropy( logits, label, @@ -80,13 +77,14 @@ def python_api(logits, margin3=margin3, scale=scale, group=None, - reduction=None) + reduction=None, + ) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestMarginCrossEntropyOp(OpTest): - def initParams(self): self.python_api = python_api self.op_type = "margin_cross_entropy" @@ -111,26 +109,35 @@ class TestMarginCrossEntropyOp(OpTest): self.init_dtype() datas = np.random.uniform( - -0.99, 0.99, [self.batch_dim, self.feat_dim]).astype(self.dtype) + -0.99, 0.99, [self.batch_dim, self.feat_dim] + ).astype(self.dtype) datas = datas / np.sqrt(np.sum(np.square(datas), axis=1, keepdims=True)) weights = np.random.uniform( - -0.99, 0.99, [self.feat_dim, self.num_class]).astype(self.dtype) + -0.99, 0.99, [self.feat_dim, self.num_class] + ).astype(self.dtype) weights = weights / np.sqrt( - np.sum(np.square(weights), axis=0, keepdims=True)) + np.sum(np.square(weights), axis=0, keepdims=True) + ) logits = np.matmul(datas, weights) - labels = np.random.randint(0, - self.num_class, (self.batch_dim, ), - dtype="int64") + labels = np.random.randint( + 0, self.num_class, (self.batch_dim,), dtype="int64" + ) - loss, softmax = margin_cross_entropy(logits, labels, self.axis, - self.margin1, self.margin2, - self.margin3, self.scale) + loss, softmax = margin_cross_entropy( + logits, + labels, + self.axis, + self.margin1, + self.margin2, + self.margin3, + self.scale, + ) self.inputs = {"Logits": logits, "Label": labels} self.outputs = { "Softmax": softmax.astype(self.dtype), - "Loss": loss.astype(self.dtype) + "Loss": loss.astype(self.dtype), } self.attrs = { 'margin1': self.margin1, @@ -140,55 +147,61 @@ class TestMarginCrossEntropyOp(OpTest): } def test_check_output(self): - self.check_output_with_place(core.CUDAPlace(0), - atol=1e-5, - check_eager=True) + self.check_output_with_place( + core.CUDAPlace(0), atol=1e-5, check_eager=True + ) def test_check_grad(self): - self.check_grad_with_place(core.CUDAPlace(0), ["Logits"], - "Loss", - check_eager=True) + self.check_grad_with_place( + core.CUDAPlace(0), ["Logits"], "Loss", check_eager=True + ) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestMarginCrossEntropyOpFP32(TestMarginCrossEntropyOp): - def init_dtype(self): self.dtype = np.float32 def test_check_grad(self): - self.check_grad_with_place(core.CUDAPlace(0), ["Logits"], - "Loss", - numeric_grad_delta=5e-2, - max_relative_error=5e-2, - check_eager=True) - - -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + self.check_grad_with_place( + core.CUDAPlace(0), + ["Logits"], + "Loss", + numeric_grad_delta=5e-2, + max_relative_error=5e-2, + check_eager=True, + ) + + +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestMarginCrossEntropyOpFP16(TestMarginCrossEntropyOp): - def init_dtype(self): self.dtype = np.float16 def test_check_output(self): - self.check_output_with_place(core.CUDAPlace(0), - atol=5e-2, - check_eager=True) + self.check_output_with_place( + core.CUDAPlace(0), atol=5e-2, check_eager=True + ) def test_check_grad(self): - self.check_grad_with_place(core.CUDAPlace(0), ["Logits"], - "Loss", - numeric_grad_delta=6e-1, - max_relative_error=6e-1, - check_eager=True) - - -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + self.check_grad_with_place( + core.CUDAPlace(0), + ["Logits"], + "Loss", + numeric_grad_delta=6e-1, + max_relative_error=6e-1, + check_eager=True, + ) + + +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestMarginCrossEntropyOpCosFace(TestMarginCrossEntropyOp): - def init_loss_params(self): self.margin1 = 1.0 self.margin2 = 0.0 @@ -196,10 +209,10 @@ class TestMarginCrossEntropyOpCosFace(TestMarginCrossEntropyOp): self.scale = 2.0 -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestMarginCrossEntropyOpSphereFace(TestMarginCrossEntropyOp): - def init_loss_params(self): self.margin1 = 1.35 self.margin2 = 0.0 @@ -208,28 +221,27 @@ class TestMarginCrossEntropyOpSphereFace(TestMarginCrossEntropyOp): class TestMarginCrossEntropyOpCPU(TestMarginCrossEntropyOp): - def test_check_output(self): try: - self.check_output_with_place(core.CPUPlace(), - atol=1e-5, - check_eager=True) + self.check_output_with_place( + core.CPUPlace(), atol=1e-5, check_eager=True + ) except RuntimeError: pass def test_check_grad(self): try: - self.check_grad_with_place(core.CPUPlace(), ["Logits"], - "Loss", - check_eager=True) + self.check_grad_with_place( + core.CPUPlace(), ["Logits"], "Loss", check_eager=True + ) except RuntimeError: pass -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestMarginCrossEntropyOpV2(unittest.TestCase): - def setUp(self): self.initParams() np.random.seed(self.seed) @@ -268,31 +280,42 @@ class TestMarginCrossEntropyOpV2(unittest.TestCase): def check_static_result(self, place): with program_guard(Program(), Program()): datas = np.random.uniform( - -0.99, 0.99, [self.batch_dim, self.feat_dim]).astype(self.dtype) + -0.99, 0.99, [self.batch_dim, self.feat_dim] + ).astype(self.dtype) datas = datas / np.sqrt( - np.sum(np.square(datas), axis=1, keepdims=True)) + np.sum(np.square(datas), axis=1, keepdims=True) + ) weights = np.random.uniform( - -0.99, 0.99, [self.feat_dim, self.num_class]).astype(self.dtype) + -0.99, 0.99, [self.feat_dim, self.num_class] + ).astype(self.dtype) weights = weights / np.sqrt( - np.sum(np.square(weights), axis=0, keepdims=True)) + np.sum(np.square(weights), axis=0, keepdims=True) + ) logits_np = np.matmul(datas, weights) - labels_np = np.random.randint(0, - self.num_class, (self.batch_dim, ), - dtype="int64") - - loss_np, softmax_np = margin_cross_entropy(logits_np, labels_np, - self.axis, self.margin1, - self.margin2, - self.margin3, self.scale, - self.reduction) - - logits = paddle.static.data(name='logits', - shape=[self.batch_dim, self.num_class], - dtype=self.dtype) - label = paddle.static.data(name='label', - shape=[self.batch_dim], - dtype="int64") + labels_np = np.random.randint( + 0, self.num_class, (self.batch_dim,), dtype="int64" + ) + + loss_np, softmax_np = margin_cross_entropy( + logits_np, + labels_np, + self.axis, + self.margin1, + self.margin2, + self.margin3, + self.scale, + self.reduction, + ) + + logits = paddle.static.data( + name='logits', + shape=[self.batch_dim, self.num_class], + dtype=self.dtype, + ) + label = paddle.static.data( + name='label', shape=[self.batch_dim], dtype="int64" + ) loss, softmax = paddle.nn.functional.margin_cross_entropy( logits, label, @@ -301,16 +324,15 @@ class TestMarginCrossEntropyOpV2(unittest.TestCase): margin3=self.margin3, scale=self.scale, return_softmax=True, - reduction=self.reduction) + reduction=self.reduction, + ) exe = paddle.fluid.Executor(place) - [loss_res, - softmax_res] = exe.run(paddle.fluid.default_main_program(), - feed={ - 'logits': logits_np, - 'label': labels_np - }, - fetch_list=[loss, softmax]) + [loss_res, softmax_res] = exe.run( + paddle.fluid.default_main_program(), + feed={'logits': logits_np, 'label': labels_np}, + fetch_list=[loss, softmax], + ) np.testing.assert_allclose(loss_res, loss_np) np.testing.assert_allclose(softmax_res, softmax_np) @@ -321,24 +343,33 @@ class TestMarginCrossEntropyOpV2(unittest.TestCase): def check_dynamic_result(self, place): with paddle.fluid.dygraph.guard(place): datas = np.random.uniform( - -0.99, 0.99, [self.batch_dim, self.feat_dim]).astype(self.dtype) + -0.99, 0.99, [self.batch_dim, self.feat_dim] + ).astype(self.dtype) datas = datas / np.sqrt( - np.sum(np.square(datas), axis=1, keepdims=True)) + np.sum(np.square(datas), axis=1, keepdims=True) + ) weights = np.random.uniform( - -0.99, 0.99, [self.feat_dim, self.num_class]).astype(self.dtype) + -0.99, 0.99, [self.feat_dim, self.num_class] + ).astype(self.dtype) weights = weights / np.sqrt( - np.sum(np.square(weights), axis=0, keepdims=True)) + np.sum(np.square(weights), axis=0, keepdims=True) + ) logits_np = np.matmul(datas, weights) - labels_np = np.random.randint(0, - self.num_class, (self.batch_dim, ), - dtype="int64") - - loss_np, softmax_np = margin_cross_entropy(logits_np, labels_np, - self.axis, self.margin1, - self.margin2, - self.margin3, self.scale, - self.reduction) + labels_np = np.random.randint( + 0, self.num_class, (self.batch_dim,), dtype="int64" + ) + + loss_np, softmax_np = margin_cross_entropy( + logits_np, + labels_np, + self.axis, + self.margin1, + self.margin2, + self.margin3, + self.scale, + self.reduction, + ) logits = paddle.to_tensor(logits_np, dtype=self.dtype) labels = paddle.to_tensor(labels_np, dtype="int64") @@ -351,7 +382,8 @@ class TestMarginCrossEntropyOpV2(unittest.TestCase): margin3=self.margin3, scale=self.scale, return_softmax=True, - reduction=self.reduction) + reduction=self.reduction, + ) loss_res = loss.numpy() softmax_res = softmax.numpy() @@ -359,26 +391,26 @@ class TestMarginCrossEntropyOpV2(unittest.TestCase): np.testing.assert_allclose(softmax_res, softmax_np) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestMarginCrossEntropyOpV3(TestMarginCrossEntropyOpV2): - def init_reduction(self): self.reduction = 'mean' -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestMarginCrossEntropyOpV4(TestMarginCrossEntropyOpV2): - def init_reduction(self): self.reduction = 'sum' -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestMarginCrossEntropyOpAPIError(unittest.TestCase): - def setUp(self): self.initParams() np.random.seed(self.seed) @@ -408,17 +440,15 @@ class TestMarginCrossEntropyOpAPIError(unittest.TestCase): self.dtype = np.float64 def test_dynamic_errors(self): - def test_dim(): for place in self.places: with paddle.fluid.dygraph.guard(place): - labels_np = np.random.randint(0, - self.num_class, - (self.batch_dim, 2), - dtype="int64") + labels_np = np.random.randint( + 0, self.num_class, (self.batch_dim, 2), dtype="int64" + ) logits_np = np.random.uniform( - -0.99, 0.99, - [self.batch_dim, self.num_class]).astype(self.dtype) + -0.99, 0.99, [self.batch_dim, self.num_class] + ).astype(self.dtype) labels = paddle.to_tensor(labels_np) logits = paddle.to_tensor(logits_np) @@ -430,17 +460,18 @@ class TestMarginCrossEntropyOpAPIError(unittest.TestCase): margin3=self.margin3, scale=self.scale, return_softmax=True, - reduction=None) + reduction=None, + ) def test_label_type(): for place in self.places: with paddle.fluid.dygraph.guard(place): - labels_np = np.random.uniform(0, self.num_class, - (self.batch_dim, 1)).astype( - self.dtype) + labels_np = np.random.uniform( + 0, self.num_class, (self.batch_dim, 1) + ).astype(self.dtype) logits_np = np.random.uniform( - -0.99, 0.99, - [self.batch_dim, self.num_class]).astype(self.dtype) + -0.99, 0.99, [self.batch_dim, self.num_class] + ).astype(self.dtype) labels = paddle.to_tensor(labels_np) logits = paddle.to_tensor(logits_np) @@ -452,18 +483,18 @@ class TestMarginCrossEntropyOpAPIError(unittest.TestCase): margin3=self.margin3, scale=self.scale, return_softmax=True, - reduction=None) + reduction=None, + ) def test_group_value(): for place in self.places: with paddle.fluid.dygraph.guard(place): - labels_np = np.random.randint(0, - self.num_class, - (self.batch_dim, ), - dtype="int64") + labels_np = np.random.randint( + 0, self.num_class, (self.batch_dim,), dtype="int64" + ) logits_np = np.random.uniform( - -0.99, 0.99, - [self.batch_dim, self.num_class]).astype(self.dtype) + -0.99, 0.99, [self.batch_dim, self.num_class] + ).astype(self.dtype) labels = paddle.to_tensor(labels_np) logits = paddle.to_tensor(logits_np) @@ -476,7 +507,8 @@ class TestMarginCrossEntropyOpAPIError(unittest.TestCase): scale=self.scale, return_softmax=True, reduction=None, - group=True) + group=True, + ) self.assertRaises(ValueError, test_dim) self.assertRaises(NotImplementedError, test_label_type) diff --git a/python/paddle/fluid/tests/unittests/test_margin_rank_loss_op.py b/python/paddle/fluid/tests/unittests/test_margin_rank_loss_op.py index 95919ff53292f481f0a171af32be0915bd318dd2..f68b995a1ff7a897090f4c4a0c24e0f4175efa89 100644 --- a/python/paddle/fluid/tests/unittests/test_margin_rank_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_margin_rank_loss_op.py @@ -19,20 +19,21 @@ from paddle import fluid class TestMarginRankLossOp(OpTest): - def setUp(self): self.op_type = "margin_rank_loss" batch_size = 5 margin = 0.5 # labels_{i} = {-1, 1} - label = 2 * np.random.randint( - 0, 2, size=(batch_size, 1)).astype("float32") - 1 + label = ( + 2 * np.random.randint(0, 2, size=(batch_size, 1)).astype("float32") + - 1 + ) x1 = np.random.random((batch_size, 1)).astype("float32") x2 = np.random.random((batch_size, 1)).astype("float32") # loss = max(0, -label * (x1 - x2) + margin) loss = -label * (x1 - x2) + margin loss = np.where(loss > 0, loss, 0) - act = np.where(loss > 0, 1., 0.) + act = np.where(loss > 0, 1.0, 0.0) self.attrs = {'margin': margin} self.inputs = {'Label': label, 'X1': x1, 'X2': x2} @@ -52,13 +53,17 @@ class TestMarginRankLossOp(OpTest): class TestMarginRankLossLayer(unittest.TestCase): - def setUp(self): self.batch_size = 5 self.margin = 0.5 # labels_{i} = {-1, 1} - self.label = 2 * np.random.randint( - 0, 2, size=(self.batch_size, 1)).astype("float32") - 1 + self.label = ( + 2 + * np.random.randint(0, 2, size=(self.batch_size, 1)).astype( + "float32" + ) + - 1 + ) self.x1 = np.random.random((self.batch_size, 1)).astype("float32") self.x2 = np.random.random((self.batch_size, 1)).astype("float32") # loss = max(0, -label * (x1 - x2) + margin) @@ -86,13 +91,11 @@ class TestMarginRankLossLayer(unittest.TestCase): exe = fluid.Executor(place) exe.run(start) - out_np, = exe.run(main, - feed={ - "label": self.label, - "x1": self.x1, - "x2": self.x2 - }, - fetch_list=[out]) + (out_np,) = exe.run( + main, + feed={"label": self.label, "x1": self.x1, "x2": self.x2}, + fetch_list=[out], + ) np.testing.assert_allclose(out_np, self.loss) diff --git a/python/paddle/fluid/tests/unittests/test_marker_op.py b/python/paddle/fluid/tests/unittests/test_marker_op.py index f0cb6ec1bbbf2b092203e1feb51b1e572d984d8d..ed19915c41e008130b0a0bd73339d64cb84d6e81 100644 --- a/python/paddle/fluid/tests/unittests/test_marker_op.py +++ b/python/paddle/fluid/tests/unittests/test_marker_op.py @@ -17,14 +17,13 @@ from paddle.distributed.fleet.meta_optimizers.common import OpRole class TestMarkerOp(OpTest): - def setUp(self): self.op_type = "marker" self.inputs = {} self.attrs = { 'marker_role': 'forward', 'marker_pos': 'B', - 'op_role': OpRole.Forward + 'op_role': OpRole.Forward, } self.outputs = {} diff --git a/python/paddle/fluid/tests/unittests/test_masked_select_op.py b/python/paddle/fluid/tests/unittests/test_masked_select_op.py index 4de07192dd08db14d91411e86513b91780ad4062..7774eb23dc17128330941c5702e6612791b43cd4 100644 --- a/python/paddle/fluid/tests/unittests/test_masked_select_op.py +++ b/python/paddle/fluid/tests/unittests/test_masked_select_op.py @@ -27,7 +27,6 @@ def np_masked_select(x, mask): class TestMaskedSelectOp(OpTest): - def setUp(self): self.init() self.op_type = "masked_select" @@ -49,19 +48,16 @@ class TestMaskedSelectOp(OpTest): class TestMaskedSelectOp1(TestMaskedSelectOp): - def init(self): self.shape = (6, 8, 9, 18) class TestMaskedSelectOp2(TestMaskedSelectOp): - def init(self): - self.shape = (168, ) + self.shape = (168,) class TestMaskedSelectAPI(unittest.TestCase): - def test_imperative_mode(self): paddle.disable_static() shape = (88, 6, 8) @@ -86,27 +82,26 @@ class TestMaskedSelectAPI(unittest.TestCase): exe = paddle.static.Executor(place=paddle.CPUPlace()) - res, = exe.run(paddle.static.default_main_program(), - feed={ - "x": np_x, - "mask": np_mask - }, - fetch_list=[out]) + (res,) = exe.run( + paddle.static.default_main_program(), + feed={"x": np_x, "mask": np_mask}, + fetch_list=[out], + ) np.testing.assert_allclose(res, np_out, rtol=1e-05) class TestMaskedSelectError(unittest.TestCase): - def test_error(self): - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): shape = [8, 9, 6] x = paddle.fluid.data(shape=shape, dtype='float32', name='x') mask = paddle.fluid.data(shape=shape, dtype='bool', name='mask') - mask_float = paddle.fluid.data(shape=shape, - dtype='float32', - name='mask_float') + mask_float = paddle.fluid.data( + shape=shape, dtype='float32', name='mask_float' + ) np_x = np.random.random(shape).astype('float32') np_mask = np.array(np.random.randint(2, size=shape, dtype=bool)) diff --git a/python/paddle/fluid/tests/unittests/test_match_matrix_tensor_op.py b/python/paddle/fluid/tests/unittests/test_match_matrix_tensor_op.py index 56e6ab07a301979120821186718afb551f799ae3..8b46676704e055ed8d4383b17925bb8b17e36b8c 100644 --- a/python/paddle/fluid/tests/unittests/test_match_matrix_tensor_op.py +++ b/python/paddle/fluid/tests/unittests/test_match_matrix_tensor_op.py @@ -19,7 +19,6 @@ import paddle.fluid as fluid class TestMatchMatrixTensorOp(OpTest): - def setUp(self): self.init_op_type() self.set_data() @@ -56,8 +55,8 @@ class TestMatchMatrixTensorOp(OpTest): for idx in range(len(x_lod[0])): x_len = x_lod[0][idx] y_len = y_lod[0][idx] - x_sub = x_data[x_offset:(x_offset + x_len), :] - y_sub = y_data[y_offset:(y_offset + y_len), :] + x_sub = x_data[x_offset : (x_offset + x_len), :] + y_sub = y_data[y_offset : (y_offset + y_len), :] tmp_sub = np.dot(x_sub, w_data) tmp = np.vstack((tmp, tmp_sub.reshape(tmp_sub.size, 1))) @@ -77,7 +76,6 @@ class TestMatchMatrixTensorOp(OpTest): class TestMatchMatrixTensorOpCase1(TestMatchMatrixTensorOp): - def set_data(self): ix, iy, h, dim_t = [5, 8, 25, 4] x_lod = [[5]] @@ -86,7 +84,6 @@ class TestMatchMatrixTensorOpCase1(TestMatchMatrixTensorOp): class TestMatchMatrixTensorOpCase2(TestMatchMatrixTensorOp): - def set_data(self): ix, iy, h, dim_t = [105, 120, 1, 4] x_lod = [[30, 45, 30]] @@ -95,7 +92,6 @@ class TestMatchMatrixTensorOpCase2(TestMatchMatrixTensorOp): class TestMatchMatrixTensorOpCase3(TestMatchMatrixTensorOp): - def set_data(self): ix, iy, h, dim_t = [5, 9, 32, 1] x_lod = [[1, 2, 2]] @@ -104,7 +100,6 @@ class TestMatchMatrixTensorOpCase3(TestMatchMatrixTensorOp): class TestMatchMatrixTensorOpCase4(TestMatchMatrixTensorOp): - def set_data(self): ix, iy, h, dim_t = [8, 12, 16, 5] x_lod = [[1, 2, 3, 1, 1]] @@ -114,9 +109,9 @@ class TestMatchMatrixTensorOpCase4(TestMatchMatrixTensorOp): def test_api(self): x_lod_tensor = fluid.layers.data(name='x', shape=[10], lod_level=1) y_lod_tensor = fluid.layers.data(name='y', shape=[10], lod_level=1) - out, out_tmp = fluid.contrib.match_matrix_tensor(x=x_lod_tensor, - y=y_lod_tensor, - channel_num=3) + out, out_tmp = fluid.contrib.match_matrix_tensor( + x=x_lod_tensor, y=y_lod_tensor, channel_num=3 + ) place = fluid.CPUPlace() x_data = np.random.rand(7, 10).astype('float32') @@ -126,12 +121,9 @@ class TestMatchMatrixTensorOpCase4(TestMatchMatrixTensorOp): exe = fluid.Executor(place=place) exe.run(fluid.default_startup_program()) - ret = exe.run(feed={ - 'x': x, - 'y': y - }, - fetch_list=[out], - return_numpy=False) + ret = exe.run( + feed={'x': x, 'y': y}, fetch_list=[out], return_numpy=False + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_math_op_patch.py b/python/paddle/fluid/tests/unittests/test_math_op_patch.py index 9e36c3b382f217a18a6a2967ec16a0d13c2a16b2..9efdb268a4b693e22e5bb34ada0df1f58b4b0d8e 100644 --- a/python/paddle/fluid/tests/unittests/test_math_op_patch.py +++ b/python/paddle/fluid/tests/unittests/test_math_op_patch.py @@ -21,7 +21,6 @@ import numpy as np class TestMathOpPatches(unittest.TestCase): - def setUp(self): paddle.enable_static() @@ -36,9 +35,9 @@ class TestMathOpPatches(unittest.TestCase): place = fluid.CPUPlace() exe = fluid.Executor(place) a_np = np.random.random(size=[10, 1]).astype('float32') - b_np, c_np, d_np = exe.run(fluid.default_main_program(), - feed={"a": a_np}, - fetch_list=[b, c, d]) + b_np, c_np, d_np = exe.run( + fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b, c, d] + ) np.testing.assert_allclose(a_np + 10, b_np, rtol=1e-05) ab_np = np.concatenate([a_np, b_np], axis=1) np.testing.assert_allclose(ab_np + 10, c_np, rtol=1e-05) @@ -52,9 +51,9 @@ class TestMathOpPatches(unittest.TestCase): place = fluid.CPUPlace() exe = fluid.Executor(place) a_np = np.random.random(size=[10, 1]).astype('float32') - b_np = exe.run(fluid.default_main_program(), - feed={"a": a_np}, - fetch_list=[b]) + b_np = exe.run( + fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b] + ) np.testing.assert_allclose(a_np + 10, b_np, rtol=1e-05) @prog_scope() @@ -64,9 +63,9 @@ class TestMathOpPatches(unittest.TestCase): place = fluid.CPUPlace() exe = fluid.Executor(place) a_np = np.random.random(size=[10, 1]).astype('float32') - b_np, = exe.run(fluid.default_main_program(), - feed={"a": a_np}, - fetch_list=[b]) + (b_np,) = exe.run( + fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b] + ) np.testing.assert_allclose(a_np - 10, b_np, rtol=1e-05) @prog_scope() @@ -76,9 +75,9 @@ class TestMathOpPatches(unittest.TestCase): place = fluid.CPUPlace() exe = fluid.Executor(place) a_np = np.random.random(size=[10, 1]).astype('float32') - b_np, = exe.run(fluid.default_main_program(), - feed={"a": a_np}, - fetch_list=[b]) + (b_np,) = exe.run( + fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b] + ) np.testing.assert_allclose(10 - a_np, b_np, rtol=1e-05) @prog_scope() @@ -88,9 +87,9 @@ class TestMathOpPatches(unittest.TestCase): place = fluid.CPUPlace() exe = fluid.Executor(place) a_np = np.random.random(size=[10, 1]).astype('float32') - b_np, = exe.run(fluid.default_main_program(), - feed={"a": a_np}, - fetch_list=[b]) + (b_np,) = exe.run( + fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b] + ) np.testing.assert_allclose(a_np * 10, b_np, rtol=1e-05) @prog_scope() @@ -100,9 +99,9 @@ class TestMathOpPatches(unittest.TestCase): place = fluid.CPUPlace() exe = fluid.Executor(place) a_np = np.random.random(size=[10, 1]).astype('float32') - b_np, = exe.run(fluid.default_main_program(), - feed={"a": a_np}, - fetch_list=[b]) + (b_np,) = exe.run( + fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b] + ) np.testing.assert_allclose(10 * a_np, b_np, rtol=1e-05) @prog_scope() @@ -112,9 +111,9 @@ class TestMathOpPatches(unittest.TestCase): place = fluid.CPUPlace() exe = fluid.Executor(place) a_np = np.random.random(size=[10, 1]).astype('float32') - b_np, = exe.run(fluid.default_main_program(), - feed={"a": a_np}, - fetch_list=[b]) + (b_np,) = exe.run( + fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b] + ) np.testing.assert_allclose(a_np / 10, b_np, rtol=1e-05) @prog_scope() @@ -125,9 +124,9 @@ class TestMathOpPatches(unittest.TestCase): exe = fluid.Executor(place) a_np = np.random.random(size=[10, 1]).astype('float32') + 1e-2 - b_np, = exe.run(fluid.default_main_program(), - feed={"a": a_np}, - fetch_list=[b]) + (b_np,) = exe.run( + fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b] + ) np.testing.assert_allclose(10 / a_np, b_np, rtol=1e-05) @prog_scope() @@ -139,12 +138,11 @@ class TestMathOpPatches(unittest.TestCase): exe = fluid.Executor(place) a_np = np.random.random(size=[10, 1]).astype('float32') b_np = np.random.random(size=[10, 1]).astype('float32') + 1e-2 - c_np, = exe.run(fluid.default_main_program(), - feed={ - "a": a_np, - 'b': b_np - }, - fetch_list=[c]) + (c_np,) = exe.run( + fluid.default_main_program(), + feed={"a": a_np, 'b': b_np}, + fetch_list=[c], + ) np.testing.assert_allclose(a_np / b_np, c_np, rtol=1e-05) @prog_scope() @@ -156,12 +154,11 @@ class TestMathOpPatches(unittest.TestCase): exe = fluid.Executor(place) a_np = np.random.random(size=[10, 1]).astype('float32') b_np = np.random.random(size=[10, 1]).astype('float32') - c_np, = exe.run(fluid.default_main_program(), - feed={ - "a": a_np, - 'b': b_np - }, - fetch_list=[c]) + (c_np,) = exe.run( + fluid.default_main_program(), + feed={"a": a_np, 'b': b_np}, + fetch_list=[c], + ) np.testing.assert_allclose(a_np * b_np, c_np, rtol=1e-05) @prog_scope() @@ -173,12 +170,11 @@ class TestMathOpPatches(unittest.TestCase): exe = fluid.Executor(place) a_np = np.random.random(size=[10, 1]).astype('float32') b_np = np.random.random(size=[10, 1]).astype('float32') - c_np, = exe.run(fluid.default_main_program(), - feed={ - "a": a_np, - 'b': b_np - }, - fetch_list=[c]) + (c_np,) = exe.run( + fluid.default_main_program(), + feed={"a": a_np, 'b': b_np}, + fetch_list=[c], + ) np.testing.assert_allclose(a_np + b_np, c_np, rtol=1e-05) @prog_scope() @@ -190,12 +186,11 @@ class TestMathOpPatches(unittest.TestCase): exe = fluid.Executor(place) a_np = np.random.random(size=[10, 1]).astype('float32') b_np = np.random.random(size=[10, 1]).astype('float32') - c_np, = exe.run(fluid.default_main_program(), - feed={ - "a": a_np, - 'b': b_np - }, - fetch_list=[c]) + (c_np,) = exe.run( + fluid.default_main_program(), + feed={"a": a_np, 'b': b_np}, + fetch_list=[c], + ) np.testing.assert_allclose(a_np - b_np, c_np, rtol=1e-05) @prog_scope() @@ -205,9 +200,9 @@ class TestMathOpPatches(unittest.TestCase): place = fluid.CPUPlace() exe = fluid.Executor(place) a_np = np.array([3, 4, 10, 14, 9, 18]).astype('int64') - b_np, = exe.run(fluid.default_main_program(), - feed={"a": a_np}, - fetch_list=[b]) + (b_np,) = exe.run( + fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b] + ) b_np_actual = (a_np / 7).astype('float32') np.testing.assert_allclose(b_np, b_np_actual, rtol=1e-05) @@ -216,19 +211,18 @@ class TestMathOpPatches(unittest.TestCase): def test_equal(self): a = fluid.layers.data(name="a", shape=[1], dtype='float32') b = fluid.layers.data(name="b", shape=[1], dtype='float32') - c = (a == b) + c = a == b place = fluid.CPUPlace() exe = fluid.Executor(place) a_np = np.array([3, 4, 10, 14, 9, 18]).astype('float32') b_np = np.array([3, 4, 11, 15, 8, 18]).astype('float32') - c_np, = exe.run(fluid.default_main_program(), - feed={ - "a": a_np, - "b": b_np - }, - fetch_list=[c]) + (c_np,) = exe.run( + fluid.default_main_program(), + feed={"a": a_np, "b": b_np}, + fetch_list=[c], + ) np.testing.assert_array_equal(c_np, a_np == b_np) self.assertEqual(c.dtype, fluid.core.VarDesc.VarType.BOOL) @@ -240,19 +234,18 @@ class TestMathOpPatches(unittest.TestCase): one = fluid.layers.ones(shape=[1], dtype='int32') zero = fluid.layers.zeros(shape=[1], dtype='int32') - cond = (one == zero) + cond = one == zero c = fluid.layers.cond(cond, lambda: a + b, lambda: a - b) place = fluid.CPUPlace() exe = fluid.Executor(place) a_np = np.array([3, 4, 10, 14, 9, 18]).astype('float') b_np = np.array([3, 4, 11, 15, 8, 18]).astype('float') - c_np, = exe.run(fluid.default_main_program(), - feed={ - "a": a_np, - "b": b_np - }, - fetch_list=[c]) + (c_np,) = exe.run( + fluid.default_main_program(), + feed={"a": a_np, "b": b_np}, + fetch_list=[c], + ) np.testing.assert_array_equal(c_np, a_np - b_np) @@ -264,9 +257,9 @@ class TestMathOpPatches(unittest.TestCase): exe = fluid.Executor(place) a_np = np.random.uniform(-1, 1, size=[10, 1]).astype('float32') - b_np, = exe.run(fluid.default_main_program(), - feed={"a": a_np}, - fetch_list=[b]) + (b_np,) = exe.run( + fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b] + ) np.testing.assert_allclose(-a_np, b_np, rtol=1e-05) @prog_scope() @@ -277,9 +270,9 @@ class TestMathOpPatches(unittest.TestCase): exe = fluid.Executor(place) a_np = np.random.uniform(-1, 1, size=[10, 1]).astype('float64') - b_np, = exe.run(fluid.default_main_program(), - feed={"a": a_np}, - fetch_list=[b]) + (b_np,) = exe.run( + fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b] + ) np.testing.assert_allclose(a_np.astype('float32'), b_np, rtol=1e-05) def test_bitwise_and(self): @@ -292,12 +285,11 @@ class TestMathOpPatches(unittest.TestCase): z = x & y exe = fluid.Executor() - out = exe.run(fluid.default_main_program(), - feed={ - "x": x_np, - "y": y_np - }, - fetch_list=[z]) + out = exe.run( + fluid.default_main_program(), + feed={"x": x_np, "y": y_np}, + fetch_list=[z], + ) np.testing.assert_array_equal(out[0], out_np) @prog_scope() @@ -311,12 +303,11 @@ class TestMathOpPatches(unittest.TestCase): z = x | y exe = fluid.Executor() - out = exe.run(fluid.default_main_program(), - feed={ - "x": x_np, - "y": y_np - }, - fetch_list=[z]) + out = exe.run( + fluid.default_main_program(), + feed={"x": x_np, "y": y_np}, + fetch_list=[z], + ) np.testing.assert_array_equal(out[0], out_np) @prog_scope() @@ -330,12 +321,11 @@ class TestMathOpPatches(unittest.TestCase): z = x ^ y exe = fluid.Executor() - out = exe.run(fluid.default_main_program(), - feed={ - "x": x_np, - "y": y_np - }, - fetch_list=[z]) + out = exe.run( + fluid.default_main_program(), + feed={"x": x_np, "y": y_np}, + fetch_list=[z], + ) np.testing.assert_array_equal(out[0], out_np) @prog_scope() @@ -347,9 +337,9 @@ class TestMathOpPatches(unittest.TestCase): z = ~x exe = fluid.Executor() - out = exe.run(fluid.default_main_program(), - feed={"x": x_np}, - fetch_list=[z]) + out = exe.run( + fluid.default_main_program(), feed={"x": x_np}, fetch_list=[z] + ) np.testing.assert_array_equal(out[0], out_np) @prog_scope() @@ -361,9 +351,9 @@ class TestMathOpPatches(unittest.TestCase): z = x.T exe = fluid.Executor() - out = exe.run(fluid.default_main_program(), - feed={"x": x_np}, - fetch_list=[z]) + out = exe.run( + fluid.default_main_program(), feed={"x": x_np}, fetch_list=[z] + ) np.testing.assert_array_equal(out[0], out_np) @prog_scope() @@ -382,12 +372,11 @@ class TestMathOpPatches(unittest.TestCase): b_np = np.random.uniform(-1, 1, size=[3, 5]).astype('float32') place = paddle.CPUPlace() exe = paddle.static.Executor(place) - c_np, = exe.run(paddle.static.default_main_program(), - feed={ - "a": a_np, - "b": b_np - }, - fetch_list=[c]) + (c_np,) = exe.run( + paddle.static.default_main_program(), + feed={"a": a_np, "b": b_np}, + fetch_list=[c], + ) np.testing.assert_allclose(a_np @ b_np, c_np, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_math_op_patch_var_base.py b/python/paddle/fluid/tests/unittests/test_math_op_patch_var_base.py index 5c66d4cd6b5f1a63b74b2f6b8bfdafeb9dc45f6d..5330dc8b4f852f95177e7a7bfb475af4e85e30a6 100644 --- a/python/paddle/fluid/tests/unittests/test_math_op_patch_var_base.py +++ b/python/paddle/fluid/tests/unittests/test_math_op_patch_var_base.py @@ -21,7 +21,6 @@ from paddle.fluid.framework import _test_eager_guard class TestMathOpPatchesVarBase(unittest.TestCase): - def setUp(self): self.shape = [10, 1024] self.dtype = np.float32 @@ -75,7 +74,7 @@ class TestMathOpPatchesVarBase(unittest.TestCase): a = fluid.dygraph.to_variable(a_np) b = fluid.dygraph.to_variable(b_np) res = a / b - #NOTE: Not sure why array_equal fails on windows, allclose is acceptable + # NOTE: Not sure why array_equal fails on windows, allclose is acceptable np.testing.assert_allclose(res.numpy(), a_np / b_np, rtol=1e-05) def test_div(self): @@ -244,8 +243,8 @@ class TestMathOpPatchesVarBase(unittest.TestCase): a = fluid.dygraph.to_variable(a_np) b = fluid.dygraph.to_variable(b_np) c = fluid.dygraph.to_variable(c_np) - res1 = (a == b) - res2 = (a == c) + res1 = a == b + res2 = a == c np.testing.assert_array_equal(res1.numpy(), a_np == b_np) np.testing.assert_array_equal(res2.numpy(), a_np == c_np) @@ -262,8 +261,8 @@ class TestMathOpPatchesVarBase(unittest.TestCase): a = fluid.dygraph.to_variable(a_np) b = fluid.dygraph.to_variable(b_np) c = fluid.dygraph.to_variable(c_np) - res1 = (a != b) - res2 = (a != c) + res1 = a != b + res2 = a != c np.testing.assert_array_equal(res1.numpy(), a_np != b_np) np.testing.assert_array_equal(res2.numpy(), a_np != c_np) @@ -278,7 +277,7 @@ class TestMathOpPatchesVarBase(unittest.TestCase): with fluid.dygraph.guard(): a = fluid.dygraph.to_variable(a_np) b = fluid.dygraph.to_variable(b_np) - res = (a < b) + res = a < b np.testing.assert_array_equal(res.numpy(), a_np < b_np) def test_less_than(self): @@ -292,7 +291,7 @@ class TestMathOpPatchesVarBase(unittest.TestCase): with fluid.dygraph.guard(): a = fluid.dygraph.to_variable(a_np) b = fluid.dygraph.to_variable(b_np) - res = (a <= b) + res = a <= b np.testing.assert_array_equal(res.numpy(), a_np <= b_np) def test_less_equal(self): @@ -306,7 +305,7 @@ class TestMathOpPatchesVarBase(unittest.TestCase): with fluid.dygraph.guard(): a = fluid.dygraph.to_variable(a_np) b = fluid.dygraph.to_variable(b_np) - res = (a > b) + res = a > b np.testing.assert_array_equal(res.numpy(), a_np > b_np) def test_greater_than(self): @@ -320,7 +319,7 @@ class TestMathOpPatchesVarBase(unittest.TestCase): with fluid.dygraph.guard(): a = fluid.dygraph.to_variable(a_np) b = fluid.dygraph.to_variable(b_np) - res = (a >= b) + res = a >= b np.testing.assert_array_equal(res.numpy(), a_np >= b_np) def test_greater_equal(self): @@ -386,10 +385,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase): x = fluid.layers.ones((2, 2), dtype="float32") y = t * x - np.testing.assert_allclose(y.numpy(), - t * np.ones((2, 2), dtype='float32'), - rtol=1e-05, - atol=0.0) + np.testing.assert_allclose( + y.numpy(), + t * np.ones((2, 2), dtype='float32'), + rtol=1e-05, + atol=0.0, + ) def test_np_left_mul(self): with _test_eager_guard(): @@ -483,92 +484,112 @@ class TestMathOpPatchesVarBase(unittest.TestCase): np.testing.assert_array_equal(x.abs().numpy(), paddle.abs(x).numpy()) m = x.abs() np.testing.assert_array_equal(m.sqrt().numpy(), paddle.sqrt(m).numpy()) - np.testing.assert_array_equal(m.rsqrt().numpy(), - paddle.rsqrt(m).numpy()) + np.testing.assert_array_equal( + m.rsqrt().numpy(), paddle.rsqrt(m).numpy() + ) np.testing.assert_array_equal(x.ceil().numpy(), paddle.ceil(x).numpy()) - np.testing.assert_array_equal(x.floor().numpy(), - paddle.floor(x).numpy()) + np.testing.assert_array_equal( + x.floor().numpy(), paddle.floor(x).numpy() + ) np.testing.assert_array_equal(x.cos().numpy(), paddle.cos(x).numpy()) np.testing.assert_array_equal(x.acos().numpy(), paddle.acos(x).numpy()) np.testing.assert_array_equal(x.asin().numpy(), paddle.asin(x).numpy()) np.testing.assert_array_equal(x.sin().numpy(), paddle.sin(x).numpy()) np.testing.assert_array_equal(x.sinh().numpy(), paddle.sinh(x).numpy()) np.testing.assert_array_equal(x.cosh().numpy(), paddle.cosh(x).numpy()) - np.testing.assert_array_equal(x.round().numpy(), - paddle.round(x).numpy()) - np.testing.assert_array_equal(x.reciprocal().numpy(), - paddle.reciprocal(x).numpy()) - np.testing.assert_array_equal(x.square().numpy(), - paddle.square(x).numpy()) + np.testing.assert_array_equal( + x.round().numpy(), paddle.round(x).numpy() + ) + np.testing.assert_array_equal( + x.reciprocal().numpy(), paddle.reciprocal(x).numpy() + ) + np.testing.assert_array_equal( + x.square().numpy(), paddle.square(x).numpy() + ) np.testing.assert_array_equal(x.rank().numpy(), paddle.rank(x).numpy()) np.testing.assert_array_equal(x[0].t().numpy(), paddle.t(x[0]).numpy()) - np.testing.assert_array_equal(x.asinh().numpy(), - paddle.asinh(x).numpy()) + np.testing.assert_array_equal( + x.asinh().numpy(), paddle.asinh(x).numpy() + ) ### acosh(x) = nan, need to change input t_np = np.random.uniform(1, 2, [2, 3]).astype(self.dtype) t = paddle.to_tensor(t_np) - np.testing.assert_array_equal(t.acosh().numpy(), - paddle.acosh(t).numpy()) - np.testing.assert_array_equal(x.atanh().numpy(), - paddle.atanh(x).numpy()) - d = paddle.to_tensor([[1.2285208, 1.3491015, 1.4899898], - [1.30058, 1.0688717, 1.4928783], - [1.0958099, 1.3724753, 1.8926544]]) + np.testing.assert_array_equal( + t.acosh().numpy(), paddle.acosh(t).numpy() + ) + np.testing.assert_array_equal( + x.atanh().numpy(), paddle.atanh(x).numpy() + ) + d = paddle.to_tensor( + [ + [1.2285208, 1.3491015, 1.4899898], + [1.30058, 1.0688717, 1.4928783], + [1.0958099, 1.3724753, 1.8926544], + ] + ) d = d.matmul(d.t()) # ROCM not support cholesky if not fluid.core.is_compiled_with_rocm(): - np.testing.assert_array_equal(d.cholesky().numpy(), - paddle.cholesky(d).numpy()) + np.testing.assert_array_equal( + d.cholesky().numpy(), paddle.cholesky(d).numpy() + ) - np.testing.assert_array_equal(x.is_empty().numpy(), - paddle.is_empty(x).numpy()) - np.testing.assert_array_equal(x.isfinite().numpy(), - paddle.isfinite(x).numpy()) np.testing.assert_array_equal( - x.cast('int32').numpy(), - paddle.cast(x, 'int32').numpy()) + x.is_empty().numpy(), paddle.is_empty(x).numpy() + ) + np.testing.assert_array_equal( + x.isfinite().numpy(), paddle.isfinite(x).numpy() + ) np.testing.assert_array_equal( - x.expand([3, 2, 3]).numpy(), - paddle.expand(x, [3, 2, 3]).numpy()) + x.cast('int32').numpy(), paddle.cast(x, 'int32').numpy() + ) np.testing.assert_array_equal( - x.tile([2, 2]).numpy(), - paddle.tile(x, [2, 2]).numpy()) - np.testing.assert_array_equal(x.flatten().numpy(), - paddle.flatten(x).numpy()) + x.expand([3, 2, 3]).numpy(), paddle.expand(x, [3, 2, 3]).numpy() + ) + np.testing.assert_array_equal( + x.tile([2, 2]).numpy(), paddle.tile(x, [2, 2]).numpy() + ) + np.testing.assert_array_equal( + x.flatten().numpy(), paddle.flatten(x).numpy() + ) index = paddle.to_tensor([0, 1]) np.testing.assert_array_equal( - x.gather(index).numpy(), - paddle.gather(x, index).numpy()) + x.gather(index).numpy(), paddle.gather(x, index).numpy() + ) index = paddle.to_tensor([[0, 1], [1, 2]]) np.testing.assert_array_equal( - x.gather_nd(index).numpy(), - paddle.gather_nd(x, index).numpy()) + x.gather_nd(index).numpy(), paddle.gather_nd(x, index).numpy() + ) np.testing.assert_array_equal( - x.reverse([0, 1]).numpy(), - paddle.reverse(x, [0, 1]).numpy()) + x.reverse([0, 1]).numpy(), paddle.reverse(x, [0, 1]).numpy() + ) np.testing.assert_array_equal( - a.reshape([3, 2]).numpy(), - paddle.reshape(a, [3, 2]).numpy()) + a.reshape([3, 2]).numpy(), paddle.reshape(a, [3, 2]).numpy() + ) np.testing.assert_array_equal( x.slice([0, 1], [0, 0], [1, 2]).numpy(), - paddle.slice(x, [0, 1], [0, 0], [1, 2]).numpy()) + paddle.slice(x, [0, 1], [0, 0], [1, 2]).numpy(), + ) np.testing.assert_array_equal( - x.split(2)[0].numpy(), - paddle.split(x, 2)[0].numpy()) + x.split(2)[0].numpy(), paddle.split(x, 2)[0].numpy() + ) m = paddle.to_tensor( - np.random.uniform(-1, 1, [1, 6, 1, 1]).astype(self.dtype)) + np.random.uniform(-1, 1, [1, 6, 1, 1]).astype(self.dtype) + ) np.testing.assert_array_equal( - m.squeeze([]).numpy(), - paddle.squeeze(m, []).numpy()) + m.squeeze([]).numpy(), paddle.squeeze(m, []).numpy() + ) np.testing.assert_array_equal( - m.squeeze([1, 2]).numpy(), - paddle.squeeze(m, [1, 2]).numpy()) + m.squeeze([1, 2]).numpy(), paddle.squeeze(m, [1, 2]).numpy() + ) m = paddle.to_tensor([2, 3, 3, 1, 5, 3], 'float32') - np.testing.assert_array_equal(m.unique()[0].numpy(), - paddle.unique(m)[0].numpy()) + np.testing.assert_array_equal( + m.unique()[0].numpy(), paddle.unique(m)[0].numpy() + ) np.testing.assert_array_equal( m.unique(return_counts=True)[1], - paddle.unique(m, return_counts=True)[1]) + paddle.unique(m, return_counts=True)[1], + ) np.testing.assert_array_equal(x.flip([0]), paddle.flip(x, [0])) np.testing.assert_array_equal(x.unbind(0), paddle.unbind(x, 0)) np.testing.assert_array_equal(x.roll(1), paddle.roll(x, 1)) @@ -582,84 +603,86 @@ class TestMathOpPatchesVarBase(unittest.TestCase): # 2. Binary operation np.testing.assert_array_equal( - x.divide(y).numpy(), - paddle.divide(x, y).numpy()) + x.divide(y).numpy(), paddle.divide(x, y).numpy() + ) np.testing.assert_array_equal( x.matmul(y, True, False).numpy(), - paddle.matmul(x, y, True, False).numpy()) + paddle.matmul(x, y, True, False).numpy(), + ) np.testing.assert_array_equal( x.norm(p='fro', axis=[0, 1]).numpy(), - paddle.norm(x, p='fro', axis=[0, 1]).numpy()) + paddle.norm(x, p='fro', axis=[0, 1]).numpy(), + ) np.testing.assert_array_equal( - x.dist(y).numpy(), - paddle.dist(x, y).numpy()) + x.dist(y).numpy(), paddle.dist(x, y).numpy() + ) np.testing.assert_array_equal( - x.cross(y).numpy(), - paddle.cross(x, y).numpy()) + x.cross(y).numpy(), paddle.cross(x, y).numpy() + ) m = x.expand([2, 2, 3]) n = y.expand([2, 2, 3]).transpose([0, 2, 1]) np.testing.assert_array_equal( - m.bmm(n).numpy(), - paddle.bmm(m, n).numpy()) + m.bmm(n).numpy(), paddle.bmm(m, n).numpy() + ) np.testing.assert_array_equal( - x.histogram(5, -1, 1).numpy(), - paddle.histogram(x, 5, -1, 1).numpy()) + x.histogram(5, -1, 1).numpy(), paddle.histogram(x, 5, -1, 1).numpy() + ) np.testing.assert_array_equal( - x.equal(y).numpy(), - paddle.equal(x, y).numpy()) + x.equal(y).numpy(), paddle.equal(x, y).numpy() + ) np.testing.assert_array_equal( - x.greater_equal(y).numpy(), - paddle.greater_equal(x, y).numpy()) + x.greater_equal(y).numpy(), paddle.greater_equal(x, y).numpy() + ) np.testing.assert_array_equal( - x.greater_than(y).numpy(), - paddle.greater_than(x, y).numpy()) + x.greater_than(y).numpy(), paddle.greater_than(x, y).numpy() + ) np.testing.assert_array_equal( - x.less_equal(y).numpy(), - paddle.less_equal(x, y).numpy()) + x.less_equal(y).numpy(), paddle.less_equal(x, y).numpy() + ) np.testing.assert_array_equal( - x.less_than(y).numpy(), - paddle.less_than(x, y).numpy()) + x.less_than(y).numpy(), paddle.less_than(x, y).numpy() + ) np.testing.assert_array_equal( - x.not_equal(y).numpy(), - paddle.not_equal(x, y).numpy()) + x.not_equal(y).numpy(), paddle.not_equal(x, y).numpy() + ) np.testing.assert_array_equal( - x.equal_all(y).numpy(), - paddle.equal_all(x, y).numpy()) + x.equal_all(y).numpy(), paddle.equal_all(x, y).numpy() + ) np.testing.assert_array_equal( - x.allclose(y).numpy(), - paddle.allclose(x, y).numpy()) + x.allclose(y).numpy(), paddle.allclose(x, y).numpy() + ) m = x.expand([2, 2, 3]) np.testing.assert_array_equal( - x.expand_as(m).numpy(), - paddle.expand_as(x, m).numpy()) + x.expand_as(m).numpy(), paddle.expand_as(x, m).numpy() + ) index = paddle.to_tensor([2, 1, 0]) np.testing.assert_array_equal( - a.scatter(index, b).numpy(), - paddle.scatter(a, index, b).numpy()) + a.scatter(index, b).numpy(), paddle.scatter(a, index, b).numpy() + ) # 3. Bool tensor operation x = paddle.to_tensor([[True, False], [True, False]]) y = paddle.to_tensor([[False, False], [False, True]]) np.testing.assert_array_equal( - x.logical_and(y).numpy(), - paddle.logical_and(x, y).numpy()) + x.logical_and(y).numpy(), paddle.logical_and(x, y).numpy() + ) np.testing.assert_array_equal( - x.logical_not(y).numpy(), - paddle.logical_not(x, y).numpy()) + x.logical_not(y).numpy(), paddle.logical_not(x, y).numpy() + ) np.testing.assert_array_equal( - x.logical_or(y).numpy(), - paddle.logical_or(x, y).numpy()) + x.logical_or(y).numpy(), paddle.logical_or(x, y).numpy() + ) np.testing.assert_array_equal( - x.logical_xor(y).numpy(), - paddle.logical_xor(x, y).numpy()) + x.logical_xor(y).numpy(), paddle.logical_xor(x, y).numpy() + ) np.testing.assert_array_equal( - x.logical_and(y).numpy(), - paddle.logical_and(x, y).numpy()) + x.logical_and(y).numpy(), paddle.logical_and(x, y).numpy() + ) a = paddle.to_tensor([[1, 2], [3, 4]]) b = paddle.to_tensor([[4, 3], [2, 1]]) np.testing.assert_array_equal( - x.where(a, b).numpy(), - paddle.where(x, a, b).numpy()) + x.where(a, b).numpy(), paddle.where(x, a, b).numpy() + ) x_np = np.random.randn(3, 6, 9, 7) x = paddle.to_tensor(x_np) @@ -724,7 +747,7 @@ class TestMathOpPatchesVarBase(unittest.TestCase): a_np = np.random.random(self.shape).astype(self.dtype) with fluid.dygraph.guard(): a = fluid.dygraph.to_variable(a_np) - res = 1J * a + res = 1j * a np.testing.assert_array_equal(res.numpy(), 1j * a_np) def test_complex_scalar(self): diff --git a/python/paddle/fluid/tests/unittests/test_matmul_op.py b/python/paddle/fluid/tests/unittests/test_matmul_op.py index b8ca9b9dc1a92ae2d2e83a9864bc8483cef1848a..62c7e9b7bb716f00d97d2b12e4767420bca992aa 100644 --- a/python/paddle/fluid/tests/unittests/test_matmul_op.py +++ b/python/paddle/fluid/tests/unittests/test_matmul_op.py @@ -86,7 +86,6 @@ def reference_matmul(X, Y, transpose_X=False, transpose_Y=False): class Generator(object): - def setUp(self): self.op_type = "matmul" X = np.random.random(self.shape_X).astype("float32") @@ -95,7 +94,7 @@ class Generator(object): self.inputs = {'X': X, 'Y': Y} self.attrs = { 'transpose_X': self.transpose_X, - 'transpose_Y': self.transpose_Y + 'transpose_Y': self.transpose_Y, } self.outputs = {'Out': Out} @@ -106,39 +105,37 @@ class Generator(object): self.check_grad(['X', 'Y'], 'Out', max_relative_error=1e-3) def test_check_grad_ignore_x(self): - self.check_grad(['Y'], - 'Out', - max_relative_error=1e-3, - no_grad_set=set("X")) + self.check_grad( + ['Y'], 'Out', max_relative_error=1e-3, no_grad_set=set("X") + ) def test_check_grad_ignore_y(self): - self.check_grad(['X'], - 'Out', - max_relative_error=1e-3, - no_grad_set=set('Y')) + self.check_grad( + ['X'], 'Out', max_relative_error=1e-3, no_grad_set=set('Y') + ) class TestMatmulOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # The inputs type of matmul_op must be Variable. input1 = 12 self.assertRaises(TypeError, fluid.layers.matmul, input1, input1) # The inputs dtype of matmul_op must be float32, float64. - input2 = fluid.layers.data(name='input2', - shape=[10, 10], - dtype="int32") + input2 = fluid.layers.data( + name='input2', shape=[10, 10], dtype="int32" + ) self.assertRaises(TypeError, fluid.layers.matmul, input2, input2) - input3 = fluid.layers.data(name='input3', - shape=[2, 2], - dtype="float16") + input3 = fluid.layers.data( + name='input3', shape=[2, 2], dtype="float16" + ) fluid.layers.matmul(input3, input3) # Negative dimension generation def generate_negative_dims(in_shape): from itertools import combinations + size = len(in_shape) indexs = list() shapes = list() @@ -146,7 +143,8 @@ def generate_negative_dims(in_shape): indexs.extend(list(combinations([j for j in range(size)], i + 1))) for idx in indexs: shapes.append( - [in_shape[i] if i not in idx else -1 for i in range(size)]) + [in_shape[i] if i not in idx else -1 for i in range(size)] + ) return shapes @@ -160,51 +158,61 @@ def test_negative_dims_program(obj): with program_guard(Program(), Program()): x = fluid.data(name='x', shape=shape_x, dtype='float32') y = fluid.data(name='y', shape=shape_y, dtype='float32') - output = fluid.layers.matmul(x, y, obj.transpose_X, - obj.transpose_Y) + output = fluid.layers.matmul( + x, y, obj.transpose_X, obj.transpose_Y + ) obj.assertEqual(len(Ref.shape), len(output.shape)) for idx in range(len(Ref.shape)): if output.shape[idx] != -1: obj.assertEqual(Ref.shape[idx], output.shape[idx]) exe = fluid.Executor(fluid.CPUPlace()) - res, = exe.run(fluid.default_main_program(), - feed={ - 'x': X, - 'y': Y - }, - fetch_list=[output]) + (res,) = exe.run( + fluid.default_main_program(), + feed={'x': X, 'y': Y}, + fetch_list=[output], + ) np.allclose(res, Ref, atol=1e-5) # Generate program api cases for all negative possibilities def api_test(dim_x, dim_y, trans_x, trans_y): - test_name = ('TestMatMulAPI_dimX_{}_dim_Y_{}_transX_{}_transY_{}'.format( - dim_x, dim_y, trans_x, trans_y)) - shape_x, shape_y = generate_compatible_shapes(dim_x, dim_y, trans_x, - trans_y) + test_name = 'TestMatMulAPI_dimX_{}_dim_Y_{}_transX_{}_transY_{}'.format( + dim_x, dim_y, trans_x, trans_y + ) + shape_x, shape_y = generate_compatible_shapes( + dim_x, dim_y, trans_x, trans_y + ) globals()[test_name] = type( - test_name, (unittest.TestCase, ), { + test_name, + (unittest.TestCase,), + { 'shape_X': shape_x, 'shape_Y': shape_y, 'transpose_X': trans_x, 'transpose_Y': trans_y, 'test_propram': test_negative_dims_program, - }) + }, + ) # Generate operators cases for all possibilities def inject_test(dim_x, dim_y, trans_x, trans_y): - test_name = ('TestMatMulOp_dimX_{}_dim_Y_{}_transX_{}_transY_{}'.format( - dim_x, dim_y, trans_x, trans_y)) - shape_x, shape_y = generate_compatible_shapes(dim_x, dim_y, trans_x, - trans_y) + test_name = 'TestMatMulOp_dimX_{}_dim_Y_{}_transX_{}_transY_{}'.format( + dim_x, dim_y, trans_x, trans_y + ) + shape_x, shape_y = generate_compatible_shapes( + dim_x, dim_y, trans_x, trans_y + ) globals()[test_name] = type( - test_name, (Generator, OpTest), { + test_name, + (Generator, OpTest), + { 'shape_X': shape_x, 'shape_Y': shape_y, 'transpose_X': trans_x, 'transpose_Y': trans_y, - }) + }, + ) for dim_X in (1, 2, 3): @@ -216,8 +224,9 @@ for dim_X in (1, 2, 3): # Test case more batch_size and N, M, K -def generate_compatible_shapes_batch(dim_X, dim_Y, transpose_X, transpose_Y, - batch_size): +def generate_compatible_shapes_batch( + dim_X, dim_Y, transpose_X, transpose_Y, batch_size +): BATCH_SIZE = 2 M = 3 N = 4 @@ -278,20 +287,25 @@ for dim in [4]: for transpose_Y in [False, True]: test_name = ( 'TestMatMulOp_dimX_{}_dim_Y_{}_transX_{}_transY_{}'.format( - dim, dim, transpose_X, transpose_Y)) + dim, dim, transpose_X, transpose_Y + ) + ) shape_X, shape_Y = generate_compatible_shapes_ndim( - dim, transpose_X, transpose_Y) + dim, transpose_X, transpose_Y + ) globals()[test_name] = type( - test_name, (Generator, OpTest), { + test_name, + (Generator, OpTest), + { 'shape_X': shape_X, 'shape_Y': shape_Y, 'transpose_X': transpose_X, 'transpose_Y': transpose_Y, - }) + }, + ) class API_TestMm(unittest.TestCase): - def test_out(self): with fluid.program_guard(fluid.Program()): x = fluid.data(name="x", shape=[2], dtype="float64") @@ -302,8 +316,9 @@ class API_TestMm(unittest.TestCase): data1 = np.random.rand(2) data2 = np.random.rand(2) np_res = exe.run(feed={'x': data1, 'y': data2}, fetch_list=[result]) - expected_result = np.matmul(data1.reshape(1, 2), - data2.reshape(2, 1)) + expected_result = np.matmul( + data1.reshape(1, 2), data2.reshape(2, 1) + ) np.testing.assert_allclose( np_res, @@ -311,7 +326,9 @@ class API_TestMm(unittest.TestCase): rtol=1e-05, atol=1e-05, err_msg='two value is {}\n{}, check diff!'.format( - np_res, expected_result)) + np_res, expected_result + ), + ) def test_dygraph_without_out(self): device = fluid.CPUPlace() @@ -326,7 +343,6 @@ class API_TestMm(unittest.TestCase): class Test_API_Matmul(unittest.TestCase): - def test_dygraph_without_out(self): device = fluid.CPUPlace() with fluid.dygraph.guard(device): @@ -340,9 +356,7 @@ class Test_API_Matmul(unittest.TestCase): class API_TestMmError(unittest.TestCase): - def test_errors(self): - def test_error1(): with fluid.program_guard(fluid.Program(), fluid.Program()): data1 = fluid.data(name="data1", shape=[10, 2], dtype="float32") @@ -353,24 +367,24 @@ class API_TestMmError(unittest.TestCase): def test_error2(): with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = fluid.data(name="data1", - shape=[-1, 10, 2], - dtype="float32") - data2 = fluid.data(name="data2", - shape=[-1, 2, 10], - dtype="float32") + data1 = fluid.data( + name="data1", shape=[-1, 10, 2], dtype="float32" + ) + data2 = fluid.data( + name="data2", shape=[-1, 2, 10], dtype="float32" + ) paddle.mm(data1, data2) test_error2() def test_error3(): with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = fluid.data(name="data1", - shape=[10, 10, 2], - dtype="float32") - data2 = fluid.data(name="data2", - shape=[3, 2, 10], - dtype="float32") + data1 = fluid.data( + name="data1", shape=[10, 10, 2], dtype="float32" + ) + data2 = fluid.data( + name="data2", shape=[3, 2, 10], dtype="float32" + ) paddle.mm(data1, data2) self.assertRaises(ValueError, test_error3) diff --git a/python/paddle/fluid/tests/unittests/test_matmul_op_with_head.py b/python/paddle/fluid/tests/unittests/test_matmul_op_with_head.py index 77e3c7f4c1fca753a64aa146333a9228b13af4a5..bd6b1877ecd290e0055017c7e9eb6e7f916c9696 100644 --- a/python/paddle/fluid/tests/unittests/test_matmul_op_with_head.py +++ b/python/paddle/fluid/tests/unittests/test_matmul_op_with_head.py @@ -59,16 +59,18 @@ def matmul_head(X, Y, head_number=1): sub_y_height = Y.shape[-2] // head_number if np.ndim(X) == 2: for i in range(0, head_number): - x.append(X[:, i * sub_x_width:i * sub_x_width + sub_x_width]) - y.append(Y[i * sub_y_height:i * sub_y_height + sub_y_height, :]) + x.append(X[:, i * sub_x_width : i * sub_x_width + sub_x_width]) + y.append(Y[i * sub_y_height : i * sub_y_height + sub_y_height, :]) for i in range(0, head_number): z.append(np.matmul(x[i], y[i])) Z = np.concatenate((z), axis=1) elif np.ndim(X) == 3: for i in range(0, head_number): - x.append(X[:, :, i * sub_x_width:i * sub_x_width + sub_x_width]) - y.append(Y[:, i * sub_y_height:i * sub_y_height + sub_y_height, :]) + x.append(X[:, :, i * sub_x_width : i * sub_x_width + sub_x_width]) + y.append( + Y[:, i * sub_y_height : i * sub_y_height + sub_y_height, :] + ) for i in range(0, head_number): z.append(np.matmul(x[i], y[i])) Z = np.concatenate((z), axis=2) @@ -87,11 +89,9 @@ def transpose_mat(X): return X -def reference_matmul_mul_head(X, - Y, - head_number=1, - transpose_X=False, - transpose_Y=False): +def reference_matmul_mul_head( + X, Y, head_number=1, transpose_X=False, transpose_Y=False +): """Reference forward implementation using np.matmul.""" # np.matmul does not support the transpose flags, so we manually # transpose X and Y appropriately. @@ -112,19 +112,19 @@ def reference_matmul_mul_head(X, # Generator for multiple head class GeneratorMulHead(object): - def setUp(self): self.op_type = "matmul" X = np.random.random(self.shape_X).astype("float32") Y = np.random.random(self.shape_Y).astype("float32") - Out = reference_matmul_mul_head(X, Y, 4, self.transpose_X, - self.transpose_Y) + Out = reference_matmul_mul_head( + X, Y, 4, self.transpose_X, self.transpose_Y + ) self.inputs = {'X': X, 'Y': Y} self.attrs = { 'transpose_X': self.transpose_X, 'transpose_Y': self.transpose_Y, - 'head_number': self.head_number + 'head_number': self.head_number, } self.outputs = {'Out': Out} @@ -135,17 +135,23 @@ class GeneratorMulHead(object): def inject_test_multiple_head(dim_x, dim_y, trans_x, trans_y, head_number): test_name = ( 'TestMatMulOp_dimX_{}_dim_Y_{}_transX_{}_transY_{}_head_{}'.format( - dim_x, dim_y, trans_x, trans_y, head_number)) + dim_x, dim_y, trans_x, trans_y, head_number + ) + ) shape_x, shape_y = generate_compatible_shapes_mul_head( - dim_x, dim_y, trans_x, trans_y) + dim_x, dim_y, trans_x, trans_y + ) globals()[test_name] = type( - test_name, (GeneratorMulHead, OpTest), { + test_name, + (GeneratorMulHead, OpTest), + { 'shape_X': shape_x, 'shape_Y': shape_y, 'transpose_X': trans_x, 'transpose_Y': trans_y, - 'head_number': head_number - }) + 'head_number': head_number, + }, + ) def matmul_head2(X, Y, head_number=1): @@ -154,20 +160,21 @@ def matmul_head2(X, Y, head_number=1): z = [] sub_x_width = X.shape[-1] // head_number sub_y_width = Y.shape[-1] // head_number - assert (sub_x_width == Y.shape[-2] - ), "Error: incompatible head number or matrix size!" + assert ( + sub_x_width == Y.shape[-2] + ), "Error: incompatible head number or matrix size!" if np.ndim(X) == 2: for i in range(0, head_number): - x.append(X[:, i * sub_x_width:i * sub_x_width + sub_x_width]) - y.append(Y[:, i * sub_y_width:i * sub_y_width + sub_y_width]) + x.append(X[:, i * sub_x_width : i * sub_x_width + sub_x_width]) + y.append(Y[:, i * sub_y_width : i * sub_y_width + sub_y_width]) for i in range(0, head_number): z.append(np.matmul(x[i], y[i])) Z = np.concatenate((z), axis=1) elif np.ndim(X) == 3: for i in range(0, head_number): - x.append(X[:, :, i * sub_x_width:i * sub_x_width + sub_x_width]) - y.append(Y[:, :, i * sub_y_width:i * sub_y_width + sub_y_width]) + x.append(X[:, :, i * sub_x_width : i * sub_x_width + sub_x_width]) + y.append(Y[:, :, i * sub_y_width : i * sub_y_width + sub_y_width]) for i in range(0, head_number): z.append(np.matmul(x[i], y[i])) Z = np.concatenate((z), axis=2) @@ -176,11 +183,9 @@ def matmul_head2(X, Y, head_number=1): return Z -def reference_matmul_mul_head2(X, - Y, - head_number=1, - transpose_X=False, - transpose_Y=False): +def reference_matmul_mul_head2( + X, Y, head_number=1, transpose_X=False, transpose_Y=False +): """Reference forward implementation using np.matmul.""" # np.matmul does not support the transpose flags, so we manually # transpose X and Y appropriately. @@ -199,8 +204,9 @@ def reference_matmul_mul_head2(X, return Out -def generate_compatible_shapes_mul_head2(dim_X, dim_Y, transpose_X, - transpose_Y): +def generate_compatible_shapes_mul_head2( + dim_X, dim_Y, transpose_X, transpose_Y +): BATCH_SIZE = 2 # Assume head number H is 4. We need make sure K1/H = M2 M1 = 3 @@ -227,38 +233,36 @@ def generate_compatible_shapes_mul_head2(dim_X, dim_Y, transpose_X, # Generator for multiple head, case 2 when width of X is not same as height of Y class GeneratorMulHead2(object): - def setUp(self): self.op_type = "matmul" X = np.zeros(self.shape_X) Y = np.zeros(self.shape_Y) if len(self.shape_X) == 2: - X = np.arange(0, - self.shape_X[-1] * self.shape_X[-2], - dtype=np.float32).reshape(self.shape_X) - Y = np.arange(0, - self.shape_Y[-1] * self.shape_Y[-2], - dtype=np.float32).reshape(self.shape_Y) + X = np.arange( + 0, self.shape_X[-1] * self.shape_X[-2], dtype=np.float32 + ).reshape(self.shape_X) + Y = np.arange( + 0, self.shape_Y[-1] * self.shape_Y[-2], dtype=np.float32 + ).reshape(self.shape_Y) else: for i in range(0, len(self.shape_X) - 1): - X[i, :, :] = np.arange(0, - self.shape_X[-1] * self.shape_X[-2], - dtype=np.float32).reshape( - list(self.shape_X)[-2:]) - Y[i, :, :] = np.arange(0, - self.shape_Y[-1] * self.shape_Y[-2], - dtype=np.float32).reshape( - list(self.shape_Y)[-2:]) - - Out = reference_matmul_mul_head2(X, Y, 4, self.transpose_X, - self.transpose_Y) + X[i, :, :] = np.arange( + 0, self.shape_X[-1] * self.shape_X[-2], dtype=np.float32 + ).reshape(list(self.shape_X)[-2:]) + Y[i, :, :] = np.arange( + 0, self.shape_Y[-1] * self.shape_Y[-2], dtype=np.float32 + ).reshape(list(self.shape_Y)[-2:]) + + Out = reference_matmul_mul_head2( + X, Y, 4, self.transpose_X, self.transpose_Y + ) self.inputs = {'X': X, 'Y': Y} self.attrs = { 'transpose_X': self.transpose_X, 'transpose_Y': self.transpose_Y, - 'head_number': self.head_number + 'head_number': self.head_number, } self.outputs = {'Out': Out} @@ -269,26 +273,32 @@ class GeneratorMulHead2(object): def inject_test_multiple_head2(dim_x, dim_y, trans_x, trans_y, head_number): test_name = ( 'TestMatMulOp_dimX_{}_dim_Y_{}_transX_{}_transY_{}_head2_{}'.format( - dim_x, dim_y, trans_x, trans_y, head_number)) + dim_x, dim_y, trans_x, trans_y, head_number + ) + ) shape_x, shape_y = generate_compatible_shapes_mul_head2( - dim_x, dim_y, trans_x, trans_y) + dim_x, dim_y, trans_x, trans_y + ) globals()[test_name] = type( - test_name, (GeneratorMulHead2, OpTest), { + test_name, + (GeneratorMulHead2, OpTest), + { 'shape_X': shape_x, 'shape_Y': shape_y, 'transpose_X': trans_x, 'transpose_Y': trans_y, - 'head_number': head_number - }) + 'head_number': head_number, + }, + ) -#test case for multiple head +# test case for multiple head for dim in (2, 3): for transose_x in (False, True): for transose_y in (False, True): inject_test_multiple_head(dim, dim, transose_x, transose_y, 4) -#test case for multiple head when X.width != Y.height +# test case for multiple head when X.width != Y.height for dim in (2, 3): for transose_x in (False, True): for transose_y in (False, True): diff --git a/python/paddle/fluid/tests/unittests/test_matmul_v2_op.py b/python/paddle/fluid/tests/unittests/test_matmul_v2_op.py index 4caffe5019f7ccf5999efb353aa8438295cc7a03..9af6d6598d29a96c10cd5a4bf9cd0dddddd46672 100644 --- a/python/paddle/fluid/tests/unittests/test_matmul_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_matmul_v2_op.py @@ -29,7 +29,7 @@ def reference_matmul(X, Y, transpose_X=False, transpose_Y=False): # transpose X and Y appropriately. if transpose_X: if X.ndim == 1: - X = X.reshape((X.size, )) + X = X.reshape((X.size,)) elif X.ndim == 2: X = X.T else: @@ -38,7 +38,7 @@ def reference_matmul(X, Y, transpose_X=False, transpose_Y=False): X = np.transpose(X, tuple(dim)) if transpose_Y: if Y.ndim == 1: - Y = Y.reshape((Y.size, )) + Y = Y.reshape((Y.size,)) else: dim = [i for i in range(len(Y.shape))] dim[-1], dim[len(Y.shape) - 2] = dim[len(Y.shape) - 2], dim[-1] @@ -60,8 +60,8 @@ class TestMatMulV2Op(OpTest): """ def config(self): - self.x_shape = (100, ) - self.y_shape = (100, ) + self.x_shape = (100,) + self.y_shape = (100,) self.trans_x = False self.trans_y = False @@ -106,10 +106,9 @@ class TestMatMulV2Op(OpTest): def test_check_grad(self): if core.is_compiled_with_rocm(): - self.check_grad(['X', 'Y'], - 'Out', - max_relative_error=1e-2, - check_eager=False) + self.check_grad( + ['X', 'Y'], 'Out', max_relative_error=1e-2, check_eager=False + ) else: self.check_grad(['X', 'Y'], 'Out', check_eager=False) @@ -120,7 +119,7 @@ class TestMatMulOp2(TestMatMulV2Op): """ def config(self): - self.x_shape = (100, ) + self.x_shape = (100,) self.y_shape = (1, 3, 2, 100) self.trans_x = False self.trans_y = True @@ -132,7 +131,7 @@ class TestMatMulOp3(TestMatMulV2Op): """ def config(self): - self.x_shape = (100, ) + self.x_shape = (100,) self.y_shape = (1, 1, 100, 2) self.trans_x = False self.trans_y = False @@ -144,7 +143,7 @@ class TestMatMulOp4(TestMatMulV2Op): """ def config(self): - self.x_shape = (100, ) + self.x_shape = (100,) self.y_shape = (1, 2, 100, 2) self.trans_x = False self.trans_y = False @@ -157,7 +156,7 @@ class TestMatMulOp5(TestMatMulV2Op): def config(self): self.x_shape = (1, 1, 100, 1) - self.y_shape = (100, ) + self.y_shape = (100,) self.trans_x = True self.trans_y = False @@ -169,7 +168,7 @@ class TestMatMulOp6(TestMatMulV2Op): def config(self): self.x_shape = (1, 2, 102, 1) - self.y_shape = (102, ) + self.y_shape = (102,) self.trans_x = True self.trans_y = False @@ -181,7 +180,7 @@ class TestMatMulOp7(TestMatMulV2Op): def config(self): self.x_shape = (1, 2, 1, 100) - self.y_shape = (100, ) + self.y_shape = (100,) self.trans_x = False self.trans_y = False @@ -288,7 +287,7 @@ class TestMatMulOp16(TestMatMulV2Op): """ def config(self): - self.x_shape = (100) + self.x_shape = 100 self.y_shape = (1, 2, 2, 100, 2) self.trans_x = False self.trans_y = False @@ -301,7 +300,7 @@ class TestMatMulOp17(TestMatMulV2Op): def config(self): self.x_shape = (2, 1, 100) - self.y_shape = (100) + self.y_shape = 100 self.trans_x = False self.trans_y = False @@ -330,15 +329,14 @@ class TestMatMulOpBroadcast2(TestMatMulV2Op): self.trans_y = True -#--------------------test matmul fp16-------------------- +# --------------------test matmul fp16-------------------- def create_test_fp16_class(parent, atol=0.001, max_relative_error=1.0): - - @unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) class TestMatMulOpFp16Case(parent): - def init_kernel_type(self): self.dtype = np.float16 @@ -346,18 +344,20 @@ def create_test_fp16_class(parent, atol=0.001, max_relative_error=1.0): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): - self.check_output_with_place(place, - atol=atol, - check_eager=False) + self.check_output_with_place( + place, atol=atol, check_eager=False + ) def test_check_grad(self): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_grad_with_place( - place, ['X', 'Y'], + place, + ['X', 'Y'], 'Out', max_relative_error=max_relative_error, - check_eager=False) + check_eager=False, + ) cls_name = "{0}_{1}".format(parent.__name__, "Fp16") TestMatMulOpFp16Case.__name__ = cls_name @@ -382,24 +382,25 @@ create_test_fp16_class(TestMatMulOp15) create_test_fp16_class(TestMatMulOp16) create_test_fp16_class(TestMatMulOp17) -#--------------------test matmul bf16-------------------- +# --------------------test matmul bf16-------------------- def create_test_bf16_class(parent, atol=0.01): - @unittest.skipIf( not core.is_compiled_with_cuda() or not core.is_bfloat16_supported(core.CUDAPlace(0)), - "core is not compiled with CUDA and not support the bfloat16") + "core is not compiled with CUDA and not support the bfloat16", + ) class TestMatMulOpBf16Case(parent): - def get_numeric_grad(self, place, check_name): scope = core.Scope() self._check_grad_helper() - op = create_op(scope, self.op_type, self.inputs, self.outputs, - self.attrs) - return get_numeric_gradient(place, scope, op, self.inputs_fp32, - check_name, ['Out']) + op = create_op( + scope, self.op_type, self.inputs, self.outputs, self.attrs + ) + return get_numeric_gradient( + place, scope, op, self.inputs_fp32, check_name, ['Out'] + ) def init_kernel_type(self): self.dtype = np.uint16 @@ -411,18 +412,24 @@ def create_test_bf16_class(parent, atol=0.01): def test_check_grad_x(self): place = core.CUDAPlace(0) numeric_grads = self.get_numeric_grad(place, 'X') - self.check_grad_with_place(place, ['X'], - 'Out', - no_grad_set=set(['Y']), - user_defined_grads=[numeric_grads]) + self.check_grad_with_place( + place, + ['X'], + 'Out', + no_grad_set=set(['Y']), + user_defined_grads=[numeric_grads], + ) def test_check_grad_y(self): place = core.CUDAPlace(0) numeric_grads = self.get_numeric_grad(place, 'Y') - self.check_grad_with_place(place, ['Y'], - 'Out', - no_grad_set=set(['X']), - user_defined_grads=[numeric_grads]) + self.check_grad_with_place( + place, + ['Y'], + 'Out', + no_grad_set=set(['X']), + user_defined_grads=[numeric_grads], + ) def test_check_grad(self): pass @@ -452,7 +459,6 @@ create_test_bf16_class(TestMatMulOp17) class TestMatMulV2API(unittest.TestCase): - def setUp(self): self.places = [fluid.CPUPlace()] if core.is_compiled_with_cuda(): @@ -469,12 +475,11 @@ class TestMatMulV2API(unittest.TestCase): y_np = np.random.random([3, 4]).astype("float32") exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={ - "input_x": x_np, - "input_y": y_np - }, - fetch_list=[result]) + fetches = exe.run( + fluid.default_main_program(), + feed={"input_x": x_np, "input_y": y_np}, + fetch_list=[result], + ) def test_static(self): for place in self.places: @@ -506,7 +511,8 @@ class TestMatMulV2API(unittest.TestCase): if core.is_float16_supported(place): with fluid.dygraph.guard(place): paddle.set_flags( - {'FLAGS_gemm_use_half_precision_compute_type': False}) + {'FLAGS_gemm_use_half_precision_compute_type': False} + ) input_x = np.random.random([2, 8, 16]).astype("float16") input_y = np.random.random([2, 16, 8]).astype("float16") for i in range(0, 16, 2): @@ -522,7 +528,8 @@ class TestMatMulV2API(unittest.TestCase): self.assertTrue(np.isfinite(result_np)[0, 0, 0]) np.testing.assert_array_equal(result_np, result.numpy()) paddle.set_flags( - {'FLAGS_gemm_use_half_precision_compute_type': True}) + {'FLAGS_gemm_use_half_precision_compute_type': True} + ) def test_compute_type_fp16_nan(self): if core.is_compiled_with_cuda(): @@ -530,7 +537,8 @@ class TestMatMulV2API(unittest.TestCase): if core.is_float16_supported(place): with fluid.dygraph.guard(place): paddle.set_flags( - {'FLAGS_gemm_use_half_precision_compute_type': True}) + {'FLAGS_gemm_use_half_precision_compute_type': True} + ) input_x = np.random.random([2, 8, 16]).astype("float16") input_y = np.random.random([2, 16, 8]).astype("float16") for i in range(0, 16, 2): @@ -543,10 +551,12 @@ class TestMatMulV2API(unittest.TestCase): result = paddle.matmul(x, y) result_np = np.matmul(input_x, input_y) self.assertFalse( - paddle.isfinite(result)[0, 0, 0]) # contains nan/inf + paddle.isfinite(result)[0, 0, 0] + ) # contains nan/inf self.assertTrue(np.isfinite(result_np)[0, 0, 0]) paddle.set_flags( - {'FLAGS_gemm_use_half_precision_compute_type': False}) + {'FLAGS_gemm_use_half_precision_compute_type': False} + ) def test_api_eager_dygraph(self): with _test_eager_guard(): @@ -555,7 +565,6 @@ class TestMatMulV2API(unittest.TestCase): class TestComplexMatMulOp(OpTest): - def setUp(self): self.op_type = "matmul_v2" self.init_base_dtype() @@ -564,7 +573,7 @@ class TestComplexMatMulOp(OpTest): self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.attrs = {'axis': -1, 'use_mkldnn': False} self.outputs = {'Out': self.out} @@ -573,17 +582,18 @@ class TestComplexMatMulOp(OpTest): self.dtype = np.float64 def init_input_output(self): - self.x = np.random.random( - (10, 10)).astype(self.dtype) + 1J * np.random.random( - (10, 10)).astype(self.dtype) - self.y = np.random.random( - (10, 10)).astype(self.dtype) + 1J * np.random.random( - (10, 10)).astype(self.dtype) + self.x = np.random.random((10, 10)).astype( + self.dtype + ) + 1j * np.random.random((10, 10)).astype(self.dtype) + self.y = np.random.random((10, 10)).astype( + self.dtype + ) + 1j * np.random.random((10, 10)).astype(self.dtype) self.out = np.dot(self.x, self.y) def init_grad_input_output(self): - self.grad_out = np.ones((10, 10), self.dtype) + 1J * np.ones( - (10, 10), self.dtype) + self.grad_out = np.ones((10, 10), self.dtype) + 1j * np.ones( + (10, 10), self.dtype + ) self.grad_x = np.matmul(self.grad_out, np.conj(self.y).T) self.grad_y = np.matmul(np.conj(self.x).T, self.grad_out) @@ -591,31 +601,36 @@ class TestComplexMatMulOp(OpTest): self.check_output(check_eager=False) def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], - 'Out', - user_defined_grads=[self.grad_x, self.grad_y], - user_defined_grad_outputs=[self.grad_out], - check_eager=False) + self.check_grad( + ['X', 'Y'], + 'Out', + user_defined_grads=[self.grad_x, self.grad_y], + user_defined_grad_outputs=[self.grad_out], + check_eager=False, + ) def test_check_grad_ingore_x(self): - self.check_grad(['Y'], - 'Out', - no_grad_set=set("X"), - user_defined_grads=[self.grad_y], - user_defined_grad_outputs=[self.grad_out], - check_eager=False) + self.check_grad( + ['Y'], + 'Out', + no_grad_set=set("X"), + user_defined_grads=[self.grad_y], + user_defined_grad_outputs=[self.grad_out], + check_eager=False, + ) def test_check_grad_ingore_y(self): - self.check_grad(['X'], - 'Out', - no_grad_set=set('Y'), - user_defined_grads=[self.grad_x], - user_defined_grad_outputs=[self.grad_out], - check_eager=False) + self.check_grad( + ['X'], + 'Out', + no_grad_set=set('Y'), + user_defined_grads=[self.grad_x], + user_defined_grad_outputs=[self.grad_out], + check_eager=False, + ) class TestComplexMatMulOpBroadcast(OpTest): - def setUp(self): self.op_type = "matmul_v2" self.init_base_dtype() @@ -624,7 +639,7 @@ class TestComplexMatMulOpBroadcast(OpTest): self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.attrs = {'axis': -1, 'use_mkldnn': False} self.outputs = {'Out': self.out} @@ -633,61 +648,68 @@ class TestComplexMatMulOpBroadcast(OpTest): self.dtype = np.float64 def init_input_output(self): - self.x = np.random.random( - (10, 2, 5)).astype(self.dtype) + 1J * np.random.random( - (10, 2, 5)).astype(self.dtype) - self.y = np.random.random( - (5, 20)).astype(self.dtype) + 1J * np.random.random( - (5, 20)).astype(self.dtype) + self.x = np.random.random((10, 2, 5)).astype( + self.dtype + ) + 1j * np.random.random((10, 2, 5)).astype(self.dtype) + self.y = np.random.random((5, 20)).astype( + self.dtype + ) + 1j * np.random.random((5, 20)).astype(self.dtype) self.out = np.dot(self.x, self.y) def init_grad_input_output(self): - self.grad_out = np.ones((10, 2, 20), self.dtype) + 1J * np.ones( - (10, 2, 20), self.dtype) + self.grad_out = np.ones((10, 2, 20), self.dtype) + 1j * np.ones( + (10, 2, 20), self.dtype + ) self.grad_x = np.matmul(self.grad_out, np.conj(self.y).T) - self.grad_y = np.sum(np.matmul( - np.conj(self.x).transpose(0, 2, 1), self.grad_out), - axis=0) + self.grad_y = np.sum( + np.matmul(np.conj(self.x).transpose(0, 2, 1), self.grad_out), axis=0 + ) def test_check_output(self): self.check_output(check_eager=False) def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], - 'Out', - user_defined_grads=[self.grad_x, self.grad_y], - user_defined_grad_outputs=[self.grad_out], - check_eager=False) + self.check_grad( + ['X', 'Y'], + 'Out', + user_defined_grads=[self.grad_x, self.grad_y], + user_defined_grad_outputs=[self.grad_out], + check_eager=False, + ) def test_check_grad_ingore_x(self): - self.check_grad(['Y'], - 'Out', - no_grad_set=set("X"), - user_defined_grads=[self.grad_y], - user_defined_grad_outputs=[self.grad_out], - check_eager=False) + self.check_grad( + ['Y'], + 'Out', + no_grad_set=set("X"), + user_defined_grads=[self.grad_y], + user_defined_grad_outputs=[self.grad_out], + check_eager=False, + ) def test_check_grad_ingore_y(self): - self.check_grad(['X'], - 'Out', - no_grad_set=set('Y'), - user_defined_grads=[self.grad_x], - user_defined_grad_outputs=[self.grad_out], - check_eager=False) + self.check_grad( + ['X'], + 'Out', + no_grad_set=set('Y'), + user_defined_grads=[self.grad_x], + user_defined_grad_outputs=[self.grad_out], + check_eager=False, + ) class TestMatMulTypePromotion(TestComplexMatMulOp): - def init_input_output(self): self.x = np.random.random((10, 10)).astype(self.dtype) - self.y = np.random.random( - (10, 10)).astype(self.dtype) + 1J * np.random.random( - (10, 10)).astype(self.dtype) + self.y = np.random.random((10, 10)).astype( + self.dtype + ) + 1j * np.random.random((10, 10)).astype(self.dtype) self.out = np.dot(self.x, self.y) def init_grad_input_output(self): - self.grad_out = np.ones((10, 10), self.dtype) + 1J * np.ones( - (10, 10), self.dtype) + self.grad_out = np.ones((10, 10), self.dtype) + 1j * np.ones( + (10, 10), self.dtype + ) self.grad_x = np.matmul(self.grad_out, np.conj(self.y).T).real self.grad_y = np.matmul(np.conj(self.x).T, self.grad_out) diff --git a/python/paddle/fluid/tests/unittests/test_matrix_nms_op.py b/python/paddle/fluid/tests/unittests/test_matrix_nms_op.py index d0b9c3021284ad4b465c46bc188efad84b1d9096..fe43d42cb45bf394e3c4f1f459ef01292425a89f 100644 --- a/python/paddle/fluid/tests/unittests/test_matrix_nms_op.py +++ b/python/paddle/fluid/tests/unittests/test_matrix_nms_op.py @@ -21,22 +21,34 @@ from paddle.fluid import Program, program_guard import paddle -def python_matrix_nms(bboxes, - scores, - score_threshold, - nms_top_k, - keep_top_k, - post_threshold, - use_gaussian=False, - gaussian_sigma=2., - background_label=0, - normalized=True, - return_index=True, - return_rois_num=True): +def python_matrix_nms( + bboxes, + scores, + score_threshold, + nms_top_k, + keep_top_k, + post_threshold, + use_gaussian=False, + gaussian_sigma=2.0, + background_label=0, + normalized=True, + return_index=True, + return_rois_num=True, +): out, rois_num, index = paddle.vision.ops.matrix_nms( - bboxes, scores, score_threshold, post_threshold, nms_top_k, keep_top_k, - use_gaussian, gaussian_sigma, background_label, normalized, - return_index, return_rois_num) + bboxes, + scores, + score_threshold, + post_threshold, + nms_top_k, + keep_top_k, + use_gaussian, + gaussian_sigma, + background_label, + normalized, + return_index, + return_rois_num, + ) if not return_index: index = None if not return_rois_num: @@ -47,7 +59,7 @@ def python_matrix_nms(bboxes, def softmax(x): # clip to shiftx, otherwise, when calc loss with # log(exp(shiftx)), may get log(0)=INF - shiftx = (x - np.max(x)).clip(-64.) + shiftx = (x - np.max(x)).clip(-64.0) exps = np.exp(shiftx) return exps / np.sum(exps) @@ -61,18 +73,20 @@ def iou_matrix(a, b, norm=True): area_i = np.prod(br_i - tl_i + pad, axis=2) * (tl_i < br_i).all(axis=2) area_a = np.prod(a[:, 2:] - a[:, :2] + pad, axis=1) area_b = np.prod(b[:, 2:] - b[:, :2] + pad, axis=1) - area_o = (area_a[:, np.newaxis] + area_b - area_i) + area_o = area_a[:, np.newaxis] + area_b - area_i return area_i / (area_o + 1e-10) -def matrix_nms(boxes, - scores, - score_threshold, - post_threshold=0., - nms_top_k=400, - normalized=True, - use_gaussian=False, - gaussian_sigma=2.): +def matrix_nms( + boxes, + scores, + score_threshold, + post_threshold=0.0, + nms_top_k=400, + normalized=True, + use_gaussian=False, + gaussian_sigma=2.0, +): all_scores = copy.deepcopy(scores) all_scores = all_scores.flatten() selected_indices = np.where(all_scores > score_threshold)[0] @@ -99,7 +113,7 @@ def matrix_nms(boxes, decay = decay.min(0) decayed_scores = sorted_scores * decay - if post_threshold > 0.: + if post_threshold > 0.0: inds = np.where(decayed_scores > post_threshold)[0] selected_boxes = selected_boxes[inds, :] decayed_scores = decayed_scores[inds] @@ -108,9 +122,18 @@ def matrix_nms(boxes, return decayed_scores, selected_boxes, sorted_indices -def multiclass_nms(boxes, scores, background, score_threshold, post_threshold, - nms_top_k, keep_top_k, normalized, use_gaussian, - gaussian_sigma): +def multiclass_nms( + boxes, + scores, + background, + score_threshold, + post_threshold, + nms_top_k, + keep_top_k, + normalized, + use_gaussian, + gaussian_sigma, +): all_boxes = [] all_cls = [] all_scores = [] @@ -119,8 +142,15 @@ def multiclass_nms(boxes, scores, background, score_threshold, post_threshold, if c == background: continue decayed_scores, selected_boxes, indices = matrix_nms( - boxes, scores[c], score_threshold, post_threshold, nms_top_k, - normalized, use_gaussian, gaussian_sigma) + boxes, + scores[c], + score_threshold, + post_threshold, + nms_top_k, + normalized, + use_gaussian, + gaussian_sigma, + ) all_cls.append(np.full(len(decayed_scores), c, decayed_scores.dtype)) all_boxes.append(selected_boxes) all_scores.append(decayed_scores) @@ -131,7 +161,8 @@ def multiclass_nms(boxes, scores, background, score_threshold, post_threshold, all_scores = np.concatenate(all_scores) all_indices = np.concatenate(all_indices) all_pred = np.concatenate( - (all_cls[:, np.newaxis], all_scores[:, np.newaxis], all_boxes), axis=1) + (all_cls[:, np.newaxis], all_scores[:, np.newaxis], all_boxes), axis=1 + ) num_det = len(all_pred) if num_det == 0: @@ -149,25 +180,35 @@ def multiclass_nms(boxes, scores, background, score_threshold, post_threshold, return all_pred, all_indices -def batched_multiclass_nms(boxes, - scores, - background, - score_threshold, - post_threshold, - nms_top_k, - keep_top_k, - normalized=True, - use_gaussian=False, - gaussian_sigma=2.): +def batched_multiclass_nms( + boxes, + scores, + background, + score_threshold, + post_threshold, + nms_top_k, + keep_top_k, + normalized=True, + use_gaussian=False, + gaussian_sigma=2.0, +): batch_size = scores.shape[0] det_outs = [] index_outs = [] lod = [] for n in range(batch_size): - nmsed_outs, indices = multiclass_nms(boxes[n], scores[n], background, - score_threshold, post_threshold, - nms_top_k, keep_top_k, normalized, - use_gaussian, gaussian_sigma) + nmsed_outs, indices = multiclass_nms( + boxes[n], + scores[n], + background, + score_threshold, + post_threshold, + nms_top_k, + keep_top_k, + normalized, + use_gaussian, + gaussian_sigma, + ) nmsed_num = len(nmsed_outs) lod.append(nmsed_num) if nmsed_num == 0: @@ -182,9 +223,8 @@ def batched_multiclass_nms(boxes, class TestMatrixNMSOp(OpTest): - def set_argument(self): - self.post_threshold = 0. + self.post_threshold = 0.0 self.use_gaussian = False def setUp(self): @@ -202,7 +242,7 @@ class TestMatrixNMSOp(OpTest): use_gaussian = False if hasattr(self, 'use_gaussian'): use_gaussian = self.use_gaussian - gaussian_sigma = 2. + gaussian_sigma = 2.0 scores = np.random.random((N * M, C)).astype('float32') @@ -215,8 +255,17 @@ class TestMatrixNMSOp(OpTest): boxes[:, :, 2:4] = boxes[:, :, 2:4] * 0.5 + 0.5 det_outs, index_outs, lod = batched_multiclass_nms( - boxes, scores, background, score_threshold, post_threshold, - nms_top_k, keep_top_k, True, use_gaussian, gaussian_sigma) + boxes, + scores, + background, + score_threshold, + post_threshold, + nms_top_k, + keep_top_k, + True, + use_gaussian, + gaussian_sigma, + ) empty = len(det_outs) == 0 det_outs = np.array([], dtype=np.float32) if empty else det_outs @@ -228,7 +277,7 @@ class TestMatrixNMSOp(OpTest): self.outputs = { 'Out': nmsed_outs, 'Index': index_outs[:, None], - 'RoisNum': np.array(lod).astype('int32') + 'RoisNum': np.array(lod).astype('int32'), } self.attrs = { 'score_threshold': score_threshold, @@ -246,20 +295,17 @@ class TestMatrixNMSOp(OpTest): class TestMatrixNMSOpNoOutput(TestMatrixNMSOp): - def set_argument(self): self.post_threshold = 2.0 class TestMatrixNMSOpGaussian(TestMatrixNMSOp): - def set_argument(self): - self.post_threshold = 0. + self.post_threshold = 0.0 self.use_gaussian = True class TestMatrixNMSError(unittest.TestCase): - def test_errors(self): M = 1200 N = 7 @@ -268,7 +314,7 @@ class TestMatrixNMSError(unittest.TestCase): nms_top_k = 400 keep_top_k = 200 score_threshold = 0.01 - post_threshold = 0. + post_threshold = 0.0 boxes_np = np.random.random((M, C, BOX_SIZE)).astype('float32') scores = np.random.random((N * M, C)).astype('float32') @@ -277,52 +323,62 @@ class TestMatrixNMSError(unittest.TestCase): scores_np = np.transpose(scores, (0, 2, 1)) with program_guard(Program(), Program()): - boxes_data = fluid.data(name='bboxes', - shape=[M, C, BOX_SIZE], - dtype='float32') - scores_data = fluid.data(name='scores', - shape=[N, C, M], - dtype='float32') + boxes_data = fluid.data( + name='bboxes', shape=[M, C, BOX_SIZE], dtype='float32' + ) + scores_data = fluid.data( + name='scores', shape=[N, C, M], dtype='float32' + ) def test_bboxes_Variable(): # the bboxes type must be Variable - fluid.layers.matrix_nms(bboxes=boxes_np, - scores=scores_data, - score_threshold=score_threshold, - post_threshold=post_threshold, - nms_top_k=nms_top_k, - keep_top_k=keep_top_k) - paddle.vision.ops.matrix_nms(bboxes=boxes_np, - scores=scores_data, - score_threshold=score_threshold, - post_threshold=post_threshold, - nms_top_k=nms_top_k, - keep_top_k=keep_top_k) + fluid.layers.matrix_nms( + bboxes=boxes_np, + scores=scores_data, + score_threshold=score_threshold, + post_threshold=post_threshold, + nms_top_k=nms_top_k, + keep_top_k=keep_top_k, + ) + paddle.vision.ops.matrix_nms( + bboxes=boxes_np, + scores=scores_data, + score_threshold=score_threshold, + post_threshold=post_threshold, + nms_top_k=nms_top_k, + keep_top_k=keep_top_k, + ) def test_scores_Variable(): # the scores type must be Variable - fluid.layers.matrix_nms(bboxes=boxes_data, - scores=scores_np, - score_threshold=score_threshold, - post_threshold=post_threshold, - nms_top_k=nms_top_k, - keep_top_k=keep_top_k) - paddle.vision.ops.matrix_nms(bboxes=boxes_data, - scores=scores_np, - score_threshold=score_threshold, - post_threshold=post_threshold, - nms_top_k=nms_top_k, - keep_top_k=keep_top_k) + fluid.layers.matrix_nms( + bboxes=boxes_data, + scores=scores_np, + score_threshold=score_threshold, + post_threshold=post_threshold, + nms_top_k=nms_top_k, + keep_top_k=keep_top_k, + ) + paddle.vision.ops.matrix_nms( + bboxes=boxes_data, + scores=scores_np, + score_threshold=score_threshold, + post_threshold=post_threshold, + nms_top_k=nms_top_k, + keep_top_k=keep_top_k, + ) def test_empty(): # when all score are lower than threshold try: - fluid.layers.matrix_nms(bboxes=boxes_data, - scores=scores_data, - score_threshold=score_threshold, - post_threshold=post_threshold, - nms_top_k=nms_top_k, - keep_top_k=keep_top_k) + fluid.layers.matrix_nms( + bboxes=boxes_data, + scores=scores_data, + score_threshold=score_threshold, + post_threshold=post_threshold, + nms_top_k=nms_top_k, + keep_top_k=keep_top_k, + ) except Exception as e: self.fail(e) try: @@ -332,19 +388,22 @@ class TestMatrixNMSError(unittest.TestCase): score_threshold=score_threshold, post_threshold=post_threshold, nms_top_k=nms_top_k, - keep_top_k=keep_top_k) + keep_top_k=keep_top_k, + ) except Exception as e: self.fail(e) def test_coverage(): # cover correct workflow try: - fluid.layers.matrix_nms(bboxes=boxes_data, - scores=scores_data, - score_threshold=score_threshold, - post_threshold=post_threshold, - nms_top_k=nms_top_k, - keep_top_k=keep_top_k) + fluid.layers.matrix_nms( + bboxes=boxes_data, + scores=scores_data, + score_threshold=score_threshold, + post_threshold=post_threshold, + nms_top_k=nms_top_k, + keep_top_k=keep_top_k, + ) except Exception as e: self.fail(e) try: @@ -354,7 +413,8 @@ class TestMatrixNMSError(unittest.TestCase): score_threshold=score_threshold, post_threshold=post_threshold, nms_top_k=nms_top_k, - keep_top_k=keep_top_k) + keep_top_k=keep_top_k, + ) except Exception as e: self.fail(e) diff --git a/python/paddle/fluid/tests/unittests/test_matrix_power_op.py b/python/paddle/fluid/tests/unittests/test_matrix_power_op.py index af400b14615c08edd691bf5d17ed5d8c7d3b4f71..0632af3a3a3231d3c4c46c7608e3bc6ef318a7d9 100644 --- a/python/paddle/fluid/tests/unittests/test_matrix_power_op.py +++ b/python/paddle/fluid/tests/unittests/test_matrix_power_op.py @@ -23,7 +23,6 @@ paddle.enable_static() class TestMatrixPowerOp(OpTest): - def config(self): self.matrix_shape = [10, 10] self.dtype = "float64" @@ -45,14 +44,12 @@ class TestMatrixPowerOp(OpTest): self.check_output() def test_grad(self): - self.check_grad(["X"], - "Out", - numeric_grad_delta=1e-5, - max_relative_error=1e-7) + self.check_grad( + ["X"], "Out", numeric_grad_delta=1e-5, max_relative_error=1e-7 + ) class TestMatrixPowerOpN1(TestMatrixPowerOp): - def config(self): self.matrix_shape = [10, 10] self.dtype = "float64" @@ -60,7 +57,6 @@ class TestMatrixPowerOpN1(TestMatrixPowerOp): class TestMatrixPowerOpN2(TestMatrixPowerOp): - def config(self): self.matrix_shape = [10, 10] self.dtype = "float64" @@ -68,7 +64,6 @@ class TestMatrixPowerOpN2(TestMatrixPowerOp): class TestMatrixPowerOpN3(TestMatrixPowerOp): - def config(self): self.matrix_shape = [10, 10] self.dtype = "float64" @@ -76,7 +71,6 @@ class TestMatrixPowerOpN3(TestMatrixPowerOp): class TestMatrixPowerOpN4(TestMatrixPowerOp): - def config(self): self.matrix_shape = [10, 10] self.dtype = "float64" @@ -84,7 +78,6 @@ class TestMatrixPowerOpN4(TestMatrixPowerOp): class TestMatrixPowerOpN5(TestMatrixPowerOp): - def config(self): self.matrix_shape = [10, 10] self.dtype = "float64" @@ -92,7 +85,6 @@ class TestMatrixPowerOpN5(TestMatrixPowerOp): class TestMatrixPowerOpN6(TestMatrixPowerOp): - def config(self): self.matrix_shape = [10, 10] self.dtype = "float64" @@ -100,7 +92,6 @@ class TestMatrixPowerOpN6(TestMatrixPowerOp): class TestMatrixPowerOpN10(TestMatrixPowerOp): - def config(self): self.matrix_shape = [10, 10] self.dtype = "float64" @@ -108,21 +99,18 @@ class TestMatrixPowerOpN10(TestMatrixPowerOp): class TestMatrixPowerOpNMinus(TestMatrixPowerOp): - def config(self): self.matrix_shape = [10, 10] self.dtype = "float64" self.n = -1 def test_grad(self): - self.check_grad(["X"], - "Out", - numeric_grad_delta=1e-5, - max_relative_error=1e-6) + self.check_grad( + ["X"], "Out", numeric_grad_delta=1e-5, max_relative_error=1e-6 + ) class TestMatrixPowerOpNMinus2(TestMatrixPowerOpNMinus): - def config(self): self.matrix_shape = [10, 10] self.dtype = "float64" @@ -130,7 +118,6 @@ class TestMatrixPowerOpNMinus2(TestMatrixPowerOpNMinus): class TestMatrixPowerOpNMinus3(TestMatrixPowerOpNMinus): - def config(self): self.matrix_shape = [10, 10] self.dtype = "float64" @@ -138,7 +125,6 @@ class TestMatrixPowerOpNMinus3(TestMatrixPowerOpNMinus): class TestMatrixPowerOpNMinus4(TestMatrixPowerOpNMinus): - def config(self): self.matrix_shape = [10, 10] self.dtype = "float64" @@ -146,7 +132,6 @@ class TestMatrixPowerOpNMinus4(TestMatrixPowerOpNMinus): class TestMatrixPowerOpNMinus5(TestMatrixPowerOpNMinus): - def config(self): self.matrix_shape = [10, 10] self.dtype = "float64" @@ -154,7 +139,6 @@ class TestMatrixPowerOpNMinus5(TestMatrixPowerOpNMinus): class TestMatrixPowerOpNMinus6(TestMatrixPowerOpNMinus): - def config(self): self.matrix_shape = [10, 10] self.dtype = "float64" @@ -162,21 +146,18 @@ class TestMatrixPowerOpNMinus6(TestMatrixPowerOpNMinus): class TestMatrixPowerOpNMinus10(TestMatrixPowerOp): - def config(self): self.matrix_shape = [10, 10] self.dtype = "float64" self.n = -10 def test_grad(self): - self.check_grad(["X"], - "Out", - numeric_grad_delta=1e-5, - max_relative_error=1e-6) + self.check_grad( + ["X"], "Out", numeric_grad_delta=1e-5, max_relative_error=1e-6 + ) class TestMatrixPowerOpBatched1(TestMatrixPowerOp): - def config(self): self.matrix_shape = [8, 4, 4] self.dtype = "float64" @@ -184,7 +165,6 @@ class TestMatrixPowerOpBatched1(TestMatrixPowerOp): class TestMatrixPowerOpBatched2(TestMatrixPowerOp): - def config(self): self.matrix_shape = [2, 6, 4, 4] self.dtype = "float64" @@ -192,7 +172,6 @@ class TestMatrixPowerOpBatched2(TestMatrixPowerOp): class TestMatrixPowerOpBatched3(TestMatrixPowerOp): - def config(self): self.matrix_shape = [2, 6, 4, 4] self.dtype = "float64" @@ -200,7 +179,6 @@ class TestMatrixPowerOpBatched3(TestMatrixPowerOp): class TestMatrixPowerOpBatchedLong(TestMatrixPowerOp): - def config(self): self.matrix_shape = [1, 2, 3, 4, 4, 3, 3] self.dtype = "float64" @@ -208,7 +186,6 @@ class TestMatrixPowerOpBatchedLong(TestMatrixPowerOp): class TestMatrixPowerOpLarge1(TestMatrixPowerOp): - def config(self): self.matrix_shape = [32, 32] self.dtype = "float64" @@ -216,7 +193,6 @@ class TestMatrixPowerOpLarge1(TestMatrixPowerOp): class TestMatrixPowerOpLarge2(TestMatrixPowerOp): - def config(self): self.matrix_shape = [10, 10] self.dtype = "float64" @@ -224,7 +200,6 @@ class TestMatrixPowerOpLarge2(TestMatrixPowerOp): class TestMatrixPowerOpFP32(TestMatrixPowerOp): - def config(self): self.matrix_shape = [10, 10] self.dtype = "float32" @@ -235,7 +210,6 @@ class TestMatrixPowerOpFP32(TestMatrixPowerOp): class TestMatrixPowerOpBatchedFP32(TestMatrixPowerOpFP32): - def config(self): self.matrix_shape = [2, 8, 4, 4] self.dtype = "float32" @@ -243,7 +217,6 @@ class TestMatrixPowerOpBatchedFP32(TestMatrixPowerOpFP32): class TestMatrixPowerOpLarge1FP32(TestMatrixPowerOpFP32): - def config(self): self.matrix_shape = [32, 32] self.dtype = "float32" @@ -251,7 +224,6 @@ class TestMatrixPowerOpLarge1FP32(TestMatrixPowerOpFP32): class TestMatrixPowerOpLarge2FP32(TestMatrixPowerOpFP32): - def config(self): self.matrix_shape = [10, 10] self.dtype = "float32" @@ -259,7 +231,6 @@ class TestMatrixPowerOpLarge2FP32(TestMatrixPowerOpFP32): class TestMatrixPowerOpFP32Minus(TestMatrixPowerOpFP32): - def config(self): self.matrix_shape = [10, 10] self.dtype = "float32" @@ -267,7 +238,6 @@ class TestMatrixPowerOpFP32Minus(TestMatrixPowerOpFP32): class TestMatrixPowerAPI(unittest.TestCase): - def setUp(self): np.random.seed(123) self.places = [fluid.CPUPlace()] @@ -282,12 +252,14 @@ class TestMatrixPowerAPI(unittest.TestCase): result_np = np.linalg.matrix_power(input_np, -2) exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={"input_x": input_np}, - fetch_list=[result]) - np.testing.assert_allclose(fetches[0], - np.linalg.matrix_power(input_np, -2), - rtol=1e-05) + fetches = exe.run( + fluid.default_main_program(), + feed={"input_x": input_np}, + fetch_list=[result], + ) + np.testing.assert_allclose( + fetches[0], np.linalg.matrix_power(input_np, -2), rtol=1e-05 + ) def test_static(self): for place in self.places: @@ -299,13 +271,14 @@ class TestMatrixPowerAPI(unittest.TestCase): input_np = np.random.random([4, 4]).astype("float64") input = paddle.to_tensor(input_np) result = paddle.linalg.matrix_power(input, -2) - np.testing.assert_allclose(result.numpy(), - np.linalg.matrix_power(input_np, -2), - rtol=1e-05) + np.testing.assert_allclose( + result.numpy(), + np.linalg.matrix_power(input_np, -2), + rtol=1e-05, + ) class TestMatrixPowerAPIError(unittest.TestCase): - def test_errors(self): input_np = np.random.random([4, 4]).astype("float64") @@ -314,9 +287,9 @@ class TestMatrixPowerAPIError(unittest.TestCase): # n must be int for n in [2.0, '2', -2.0]: - input = fluid.data(name="input_float32", - shape=[4, 4], - dtype='float32') + input = fluid.data( + name="input_float32", shape=[4, 4], dtype='float32' + ) self.assertRaises(TypeError, paddle.linalg.matrix_power, input, n) # The data type of input must be float32 or float64. @@ -339,7 +312,6 @@ class TestMatrixPowerAPIError(unittest.TestCase): class TestMatrixPowerSingularAPI(unittest.TestCase): - def setUp(self): self.places = [fluid.CPUPlace()] if core.is_compiled_with_cuda(): @@ -354,9 +326,11 @@ class TestMatrixPowerSingularAPI(unittest.TestCase): exe = fluid.Executor(place) try: - fetches = exe.run(fluid.default_main_program(), - feed={"input": input_np}, - fetch_list=[result]) + fetches = exe.run( + fluid.default_main_program(), + feed={"input": input_np}, + fetch_list=[result], + ) except RuntimeError as ex: print("The mat is singular") except ValueError as ex: diff --git a/python/paddle/fluid/tests/unittests/test_matrix_rank_op.py b/python/paddle/fluid/tests/unittests/test_matrix_rank_op.py index 3c3dba880047d07ddbb7c152ea01c0c99013a158..72c104ad85c288c1b66152a9b98a11296f8fabdc 100644 --- a/python/paddle/fluid/tests/unittests/test_matrix_rank_op.py +++ b/python/paddle/fluid/tests/unittests/test_matrix_rank_op.py @@ -31,7 +31,6 @@ def matrix_rank_wraper(x, tol=None, use_default_tol=True, hermitian=False): class TestMatrixRankOP(OpTest): - def setUp(self): self.python_api = matrix_rank_wraper self.op_type = "matrix_rank" @@ -58,91 +57,90 @@ class TestMatrixRankOP(OpTest): class TestMatrixRankOP1(TestMatrixRankOP): - def init_data(self): self.x = np.eye(3, k=1, dtype=np.float64) self.tol_tensor = None self.tol = None self.use_default_tol = True self.hermitian = False - self.out = np.linalg.matrix_rank(self.x, self.tol_tensor, - self.hermitian) + self.out = np.linalg.matrix_rank( + self.x, self.tol_tensor, self.hermitian + ) class TestMatrixRankOP2(TestMatrixRankOP): - def init_data(self): self.x = np.random.rand(3, 4, 5, 6).astype(np.float32) self.tol_tensor = np.random.random([3, 4]).astype(self.x.dtype) self.tol = None self.use_default_tol = False self.hermitian = False - self.out = np.linalg.matrix_rank(self.x, self.tol_tensor, - self.hermitian) + self.out = np.linalg.matrix_rank( + self.x, self.tol_tensor, self.hermitian + ) class TestMatrixRankOP3(TestMatrixRankOP): - def init_data(self): self.x = np.eye(200, dtype=np.float64) self.tol_tensor = None self.tol = None self.use_default_tol = True self.hermitian = True - self.out = np.linalg.matrix_rank(self.x, self.tol_tensor, - self.hermitian) + self.out = np.linalg.matrix_rank( + self.x, self.tol_tensor, self.hermitian + ) class TestMatrixRankOP4(TestMatrixRankOP): - def init_data(self): self.x = np.random.rand(1, 10).astype(np.float32) self.tol_tensor = None self.tol = None self.use_default_tol = True self.hermitian = False - self.out = np.linalg.matrix_rank(self.x, self.tol_tensor, - self.hermitian) + self.out = np.linalg.matrix_rank( + self.x, self.tol_tensor, self.hermitian + ) class TestMatrixRankOP5(TestMatrixRankOP): - def init_data(self): self.x = np.random.rand(5, 1).astype(np.float64) self.tol_tensor = np.random.random([1, 4]).astype(self.x.dtype) self.tol = None self.use_default_tol = False self.hermitian = False - self.out = np.linalg.matrix_rank(self.x, self.tol_tensor, - self.hermitian) + self.out = np.linalg.matrix_rank( + self.x, self.tol_tensor, self.hermitian + ) class TestMatrixRankOP6(TestMatrixRankOP): - def init_data(self): self.x = np.random.rand(3, 4, 5, 6).astype(np.float32) self.tol_tensor = None self.tol = None self.use_default_tol = False self.hermitian = False - self.out = np.linalg.matrix_rank(self.x, self.tol_tensor, - self.hermitian) + self.out = np.linalg.matrix_rank( + self.x, self.tol_tensor, self.hermitian + ) class TestMatrixRankOP7(TestMatrixRankOP): - def init_data(self): self.x = np.eye(200, dtype=np.float64) self.tol_tensor = np.random.random([200, 200]).astype(self.x.dtype) self.tol = None self.use_default_tol = True self.hermitian = True - self.out = np.linalg.matrix_rank(self.x, self.tol_tensor, - self.hermitian) + self.out = np.linalg.matrix_rank( + self.x, self.tol_tensor, self.hermitian + ) class TestMatrixRankAPI(unittest.TestCase): - def test_dygraph(self): paddle.disable_static() @@ -177,51 +175,54 @@ class TestMatrixRankAPI(unittest.TestCase): with fluid.program_guard(fluid.Program(), fluid.Program()): x_np = np.random.rand(3, 4, 7, 7).astype(np.float64) tol_np = np.random.random([3, 4]).astype(np.float32) - x_pd = paddle.fluid.data(name="X", - shape=[3, 4, 7, 7], - dtype='float64') - tol_pd = paddle.fluid.data(name="TolTensor", - shape=[3, 4], - dtype='float32') + x_pd = paddle.fluid.data( + name="X", shape=[3, 4, 7, 7], dtype='float64' + ) + tol_pd = paddle.fluid.data( + name="TolTensor", shape=[3, 4], dtype='float32' + ) rank_np = np.linalg.matrix_rank(x_np, tol_np, hermitian=False) - rank_pd = paddle.linalg.matrix_rank(x_pd, - tol_pd, - hermitian=False) + rank_pd = paddle.linalg.matrix_rank( + x_pd, tol_pd, hermitian=False + ) exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={ - "X": x_np, - "TolTensor": tol_np - }, - fetch_list=[rank_pd]) + fetches = exe.run( + fluid.default_main_program(), + feed={"X": x_np, "TolTensor": tol_np}, + fetch_list=[rank_pd], + ) np.testing.assert_allclose(fetches[0], rank_np, rtol=1e-05) for place in places: with fluid.program_guard(fluid.Program(), fluid.Program()): x_np = np.random.rand(3, 4, 7, 7).astype(np.float64) - x_pd = paddle.fluid.data(name="X", - shape=[3, 4, 7, 7], - dtype='float64') + x_pd = paddle.fluid.data( + name="X", shape=[3, 4, 7, 7], dtype='float64' + ) rank_np = np.linalg.matrix_rank(x_np, hermitian=True) rank_pd = paddle.linalg.matrix_rank(x_pd, hermitian=True) exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={"X": x_np}, - fetch_list=[rank_pd]) + fetches = exe.run( + fluid.default_main_program(), + feed={"X": x_np}, + fetch_list=[rank_pd], + ) np.testing.assert_allclose(fetches[0], rank_np, rtol=1e-05) for place in places: with fluid.program_guard(fluid.Program(), fluid.Program()): x_np = np.random.rand(3, 4, 7, 7).astype(np.float64) - x_pd = paddle.fluid.data(name="X", - shape=[3, 4, 7, 7], - dtype='float64') + x_pd = paddle.fluid.data( + name="X", shape=[3, 4, 7, 7], dtype='float64' + ) rank_np = np.linalg.matrix_rank(x_np, 0.1, hermitian=False) rank_pd = paddle.linalg.matrix_rank(x_pd, 0.1, hermitian=False) exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={"X": x_np}, - fetch_list=[rank_pd]) + fetches = exe.run( + fluid.default_main_program(), + feed={"X": x_np}, + fetch_list=[rank_pd], + ) np.testing.assert_allclose(fetches[0], rank_np, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_max_min_amax_amin_op.py b/python/paddle/fluid/tests/unittests/test_max_min_amax_amin_op.py index ee316bb0431e36451662f6549aced2f199e4971a..e323d91ba561150808a0a3bd8e9b01570e18a80a 100644 --- a/python/paddle/fluid/tests/unittests/test_max_min_amax_amin_op.py +++ b/python/paddle/fluid/tests/unittests/test_max_min_amax_amin_op.py @@ -22,12 +22,14 @@ paddle.enable_static() class TestMaxMinAmaxAminAPI(unittest.TestCase): - def setUp(self): self.init_case() self.cal_np_out_and_gradient() - self.place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + self.place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) def init_case(self): self.x_np = np.array([[0.2, 0.3, 0.5, 0.9], [0.1, 0.2, 0.6, 0.7]]) @@ -40,7 +42,6 @@ class TestMaxMinAmaxAminAPI(unittest.TestCase): # its gradient check is not supported by unittest framework, # thus we calculate the gradient by numpy function. def cal_np_out_and_gradient(self): - def _cal_np_out_and_gradient(func): if func == 'amax': out = np.amax(self.x_np, axis=self.axis, keepdims=self.keepdim) @@ -51,8 +52,10 @@ class TestMaxMinAmaxAminAPI(unittest.TestCase): elif func == 'min': out = np.min(self.x_np, axis=self.axis, keepdims=self.keepdim) else: - print('This unittest only test amax/amin/max/min, but now is', - func) + print( + 'This unittest only test amax/amin/max/min, but now is', + func, + ) self.np_out[func] = out grad = np.zeros(self.shape) out_b = np.broadcast_to(out.view(), self.shape) @@ -86,7 +89,6 @@ class TestMaxMinAmaxAminAPI(unittest.TestCase): # We check the output between paddle API and numpy in static graph. def test_static_graph(self): - def _test_static_graph(func): startup_program = fluid.Program() train_program = fluid.Program() @@ -96,9 +98,11 @@ class TestMaxMinAmaxAminAPI(unittest.TestCase): out = self._choose_paddle_func(func, x) exe = fluid.Executor(self.place) - res = exe.run(fluid.default_main_program(), - feed={'input': self.x_np}, - fetch_list=[out]) + res = exe.run( + fluid.default_main_program(), + feed={'input': self.x_np}, + fetch_list=[out], + ) self.assertTrue((np.array(res[0]) == self.np_out[func]).all()) _test_static_graph('amax') @@ -109,19 +113,18 @@ class TestMaxMinAmaxAminAPI(unittest.TestCase): # As dygraph is easy to compute gradient, we check the gradient between # paddle API and numpy in dygraph. def test_dygraph(self): - def _test_dygraph(func): paddle.disable_static() - x = paddle.to_tensor(self.x_np, - dtype=self.dtype, - stop_gradient=False) + x = paddle.to_tensor( + self.x_np, dtype=self.dtype, stop_gradient=False + ) out = self._choose_paddle_func(func, x) grad_tensor = paddle.ones_like(x) paddle.autograd.backward([out], [grad_tensor], True) - np.testing.assert_allclose(self.np_out[func], - out.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + self.np_out[func], out.numpy(), rtol=1e-05 + ) np.testing.assert_allclose(self.np_grad[func], x.grad, rtol=1e-05) paddle.enable_static() @@ -130,10 +133,10 @@ class TestMaxMinAmaxAminAPI(unittest.TestCase): _test_dygraph('max') _test_dygraph('min') - # test two minimum or maximum elements -class TestMaxMinAmaxAminAPI2(TestMaxMinAmaxAminAPI): + +class TestMaxMinAmaxAminAPI2(TestMaxMinAmaxAminAPI): def init_case(self): self.x_np = np.array([[0.2, 0.3, 0.9, 0.9], [0.1, 0.1, 0.6, 0.7]]) self.shape = [2, 4] @@ -144,7 +147,6 @@ class TestMaxMinAmaxAminAPI2(TestMaxMinAmaxAminAPI): # test different axis class TestMaxMinAmaxAminAPI3(TestMaxMinAmaxAminAPI): - def init_case(self): self.x_np = np.array([[0.2, 0.3, 0.9, 0.9], [0.1, 0.1, 0.6, 0.7]]) self.shape = [2, 4] @@ -155,7 +157,6 @@ class TestMaxMinAmaxAminAPI3(TestMaxMinAmaxAminAPI): # test keepdim = True class TestMaxMinAmaxAminAPI4(TestMaxMinAmaxAminAPI): - def init_case(self): self.x_np = np.array([[0.2, 0.3, 0.9, 0.9], [0.1, 0.1, 0.6, 0.7]]) self.shape = [2, 4] @@ -166,10 +167,10 @@ class TestMaxMinAmaxAminAPI4(TestMaxMinAmaxAminAPI): # test axis is tuple class TestMaxMinAmaxAminAPI5(TestMaxMinAmaxAminAPI): - def init_case(self): - self.x_np = np.array([[[1, 2], [3, 4]], [[5, 6], [7, - 8]]]).astype(np.int32) + self.x_np = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]).astype( + np.int32 + ) self.shape = [2, 2, 2] self.dtype = 'int32' self.axis = (0, 1) @@ -178,7 +179,6 @@ class TestMaxMinAmaxAminAPI5(TestMaxMinAmaxAminAPI): # test multiple minimum or maximum elements class TestMaxMinAmaxAminAPI6(TestMaxMinAmaxAminAPI): - def init_case(self): self.x_np = np.array([[0.2, 0.9, 0.9, 0.9], [0.9, 0.9, 0.2, 0.2]]) self.shape = [2, 4] diff --git a/python/paddle/fluid/tests/unittests/test_max_op.py b/python/paddle/fluid/tests/unittests/test_max_op.py index cf52cdb8d47891b1dc6c0320835a23cb8af6a119..181343bd5f66ca8213ce45b1b71c97d6186b91b9 100644 --- a/python/paddle/fluid/tests/unittests/test_max_op.py +++ b/python/paddle/fluid/tests/unittests/test_max_op.py @@ -22,7 +22,6 @@ from test_sum_op import TestReduceOPTensorAxisBase class ApiMaxTest(unittest.TestCase): - def setUp(self): if core.is_compiled_with_cuda(): self.place = core.CUDAPlace(0) @@ -31,39 +30,43 @@ class ApiMaxTest(unittest.TestCase): def test_api(self): paddle.enable_static() - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data = paddle.static.data("data", shape=[10, 10], dtype="float32") result_max = paddle.max(x=data, axis=1) exe = paddle.static.Executor(self.place) input_data = np.random.rand(10, 10).astype(np.float32) - res, = exe.run(feed={"data": input_data}, fetch_list=[result_max]) + (res,) = exe.run(feed={"data": input_data}, fetch_list=[result_max]) self.assertEqual((res == np.max(input_data, axis=1)).all(), True) - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data = paddle.static.data("data", shape=[10, 10], dtype="int64") result_max = paddle.max(x=data, axis=0) exe = paddle.static.Executor(self.place) input_data = np.random.randint(10, size=(10, 10)).astype(np.int64) - res, = exe.run(feed={"data": input_data}, fetch_list=[result_max]) + (res,) = exe.run(feed={"data": input_data}, fetch_list=[result_max]) self.assertEqual((res == np.max(input_data, axis=0)).all(), True) - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data = paddle.static.data("data", shape=[10, 10], dtype="int64") result_max = paddle.max(x=data, axis=(0, 1)) exe = paddle.static.Executor(self.place) input_data = np.random.randint(10, size=(10, 10)).astype(np.int64) - res, = exe.run(feed={"data": input_data}, fetch_list=[result_max]) + (res,) = exe.run(feed={"data": input_data}, fetch_list=[result_max]) self.assertEqual((res == np.max(input_data, axis=(0, 1))).all(), True) def test_errors(self): paddle.enable_static() def test_input_type(): - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data = np.random.rand(10, 10) result_max = paddle.max(x=data, axis=0) @@ -105,17 +108,17 @@ class ApiMaxTest(unittest.TestCase): class TestOutDtype(unittest.TestCase): - def test_max(self): api_fn = paddle.max shape = [10, 16] - check_out_dtype(api_fn, - in_specs=[(shape, )], - expect_dtypes=['float32', 'float64', 'int32', 'int64']) + check_out_dtype( + api_fn, + in_specs=[(shape,)], + expect_dtypes=['float32', 'float64', 'int32', 'int64'], + ) class TestMaxWithTensorAxis1(TestReduceOPTensorAxisBase): - def init_data(self): self.pd_api = paddle.max self.np_api = np.max @@ -125,7 +128,6 @@ class TestMaxWithTensorAxis1(TestReduceOPTensorAxisBase): class TestMaxWithTensorAxis2(TestReduceOPTensorAxisBase): - def init_data(self): self.pd_api = paddle.max self.np_api = np.max @@ -134,7 +136,7 @@ class TestMaxWithTensorAxis2(TestReduceOPTensorAxisBase): self.tensor_axis = [ 0, paddle.to_tensor([1], 'int64'), - paddle.to_tensor([2], 'int64') + paddle.to_tensor([2], 'int64'), ] diff --git a/python/paddle/fluid/tests/unittests/test_maximum_op.py b/python/paddle/fluid/tests/unittests/test_maximum_op.py index 98c7aaa262f0e39bbc809ca99332ed6bf14a2d63..0e5ce1c9ff9920d88b25339e003481e89016ca96 100644 --- a/python/paddle/fluid/tests/unittests/test_maximum_op.py +++ b/python/paddle/fluid/tests/unittests/test_maximum_op.py @@ -19,7 +19,6 @@ import paddle.fluid.core as core class ApiMaximumTest(unittest.TestCase): - def setUp(self): if core.is_compiled_with_cuda(): self.place = core.CUDAPlace(0) @@ -40,56 +39,56 @@ class ApiMaximumTest(unittest.TestCase): def test_static_api(self): paddle.enable_static() - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data_x = paddle.static.data("x", shape=[10, 15], dtype="float32") data_y = paddle.static.data("y", shape=[10, 15], dtype="float32") result_max = paddle.maximum(data_x, data_y) exe = paddle.static.Executor(self.place) - res, = exe.run(feed={ - "x": self.input_x, - "y": self.input_y - }, - fetch_list=[result_max]) + (res,) = exe.run( + feed={"x": self.input_x, "y": self.input_y}, + fetch_list=[result_max], + ) np.testing.assert_allclose(res, self.np_expected1, rtol=1e-05) - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data_x = paddle.static.data("x", shape=[10, 15], dtype="float32") data_z = paddle.static.data("z", shape=[15], dtype="float32") result_max = paddle.maximum(data_x, data_z) exe = paddle.static.Executor(self.place) - res, = exe.run(feed={ - "x": self.input_x, - "z": self.input_z - }, - fetch_list=[result_max]) + (res,) = exe.run( + feed={"x": self.input_x, "z": self.input_z}, + fetch_list=[result_max], + ) np.testing.assert_allclose(res, self.np_expected2, rtol=1e-05) - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data_a = paddle.static.data("a", shape=[3], dtype="int64") data_c = paddle.static.data("c", shape=[3], dtype="int64") result_max = paddle.maximum(data_a, data_c) exe = paddle.static.Executor(self.place) - res, = exe.run(feed={ - "a": self.input_a, - "c": self.input_c - }, - fetch_list=[result_max]) + (res,) = exe.run( + feed={"a": self.input_a, "c": self.input_c}, + fetch_list=[result_max], + ) np.testing.assert_allclose(res, self.np_expected3, rtol=1e-05) - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data_b = paddle.static.data("b", shape=[3], dtype="int64") data_c = paddle.static.data("c", shape=[3], dtype="int64") result_max = paddle.maximum(data_b, data_c) exe = paddle.static.Executor(self.place) - res, = exe.run(feed={ - "b": self.input_b, - "c": self.input_c - }, - fetch_list=[result_max]) + (res,) = exe.run( + feed={"b": self.input_b, "c": self.input_c}, + fetch_list=[result_max], + ) np.testing.assert_allclose(res, self.np_expected4, rtol=1e-05) def test_dynamic_api(self): diff --git a/python/paddle/fluid/tests/unittests/test_maxout_op.py b/python/paddle/fluid/tests/unittests/test_maxout_op.py index 019d49072c41fd939e1c99afb5be9b87d4c8accc..9334c37f94bd40dcb3afbaeed6fd24aa0f5b8dfd 100644 --- a/python/paddle/fluid/tests/unittests/test_maxout_op.py +++ b/python/paddle/fluid/tests/unittests/test_maxout_op.py @@ -28,14 +28,15 @@ np.random.seed(1) def maxout_forward_naive(x, groups, channel_axis): s0, s1, s2, s3 = x.shape if channel_axis == 1: - return np.ndarray([s0, s1 // groups, groups, s2, s3], \ - buffer = x, dtype=x.dtype).max(axis=2) - return np.ndarray([s0, s1, s2, s3 // groups, groups], \ - buffer = x, dtype=x.dtype).max(axis=4) + return np.ndarray( + [s0, s1 // groups, groups, s2, s3], buffer=x, dtype=x.dtype + ).max(axis=2) + return np.ndarray( + [s0, s1, s2, s3 // groups, groups], buffer=x, dtype=x.dtype + ).max(axis=4) class TestMaxOutOp(OpTest): - def setUp(self): self.op_type = "maxout" self.python_api = paddle.nn.functional.maxout @@ -63,25 +64,21 @@ class TestMaxOutOp(OpTest): class TestMaxOutOpAxis0(TestMaxOutOp): - def set_attrs(self): self.axis = -1 class TestMaxOutOpAxis1(TestMaxOutOp): - def set_attrs(self): self.axis = 3 class TestMaxOutOpFP32(TestMaxOutOp): - def set_attrs(self): self.dtype = 'float32' class TestMaxOutOpGroups(TestMaxOutOp): - def set_attrs(self): self.groups = 3 @@ -92,8 +89,11 @@ class TestMaxoutAPI(unittest.TestCase): self.x_np = np.random.uniform(-1, 1, [2, 6, 5, 4]).astype(np.float64) self.groups = 2 self.axis = 1 - self.place=paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_static_api(self): with paddle.static.program_guard(paddle.static.Program()): @@ -142,9 +142,9 @@ class TestMaxoutAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.maxout, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data(name='x_int32', - shape=[2, 4, 6, 8], - dtype='int32') + x_int32 = paddle.fluid.data( + name='x_int32', shape=[2, 4, 6, 8], dtype='int32' + ) self.assertRaises(TypeError, F.maxout, x_int32) x_float32 = paddle.fluid.data(name='x_float32', shape=[2, 4, 6, 8]) diff --git a/python/paddle/fluid/tests/unittests/test_mean_iou.py b/python/paddle/fluid/tests/unittests/test_mean_iou.py index d55d1507f963e7d387ed5cf722ed0c0097c44384..3c78395755fb8d290dcb29d370b936042dfd56af 100644 --- a/python/paddle/fluid/tests/unittests/test_mean_iou.py +++ b/python/paddle/fluid/tests/unittests/test_mean_iou.py @@ -18,8 +18,9 @@ from op_test import OpTest import paddle.fluid as fluid -def compute_mean_iou(predictions, labels, num_classes, in_wrongs, in_corrects, - in_mean_ious): +def compute_mean_iou( + predictions, labels, num_classes, in_wrongs, in_corrects, in_mean_ious +): assert predictions.shape == labels.shape predictions = predictions.flatten() labels = labels.flatten() @@ -40,8 +41,9 @@ def compute_mean_iou(predictions, labels, num_classes, in_wrongs, in_corrects, denominator = out_wrong + out_correct valid_count = (denominator != 0).sum() - denominator = np.where(denominator > 0, denominator, - np.ones(denominator.shape)) + denominator = np.where( + denominator > 0, denominator, np.ones(denominator.shape) + ) mean_iou = (out_correct / denominator).sum() / valid_count for _, in_mean_iou in in_mean_ious: @@ -50,48 +52,67 @@ def compute_mean_iou(predictions, labels, num_classes, in_wrongs, in_corrects, class TestMeanIOUOp(OpTest): - def setUp(self): self.config() self.op_type = "mean_iou" - predictions = np.random.randint(0, self.num_classes, - self.image_size).astype("int32") - labels = np.random.randint(0, self.num_classes, - self.image_size).astype("int32") + predictions = np.random.randint( + 0, self.num_classes, self.image_size + ).astype("int32") + labels = np.random.randint(0, self.num_classes, self.image_size).astype( + "int32" + ) in_wrongs = [] for i in range(self.in_wrong_num): in_wrongs.append( - ("in_wrong_%d" % i, - np.random.randint(0, 10, [self.num_classes]).astype("int32"))) + ( + "in_wrong_%d" % i, + np.random.randint(0, 10, [self.num_classes]).astype( + "int32" + ), + ) + ) in_corrects = [] for i in range(self.in_correct_num): in_corrects.append( - ("in_correct_%d" % i, - np.random.randint(0, 10, [self.num_classes]).astype("int32"))) + ( + "in_correct_%d" % i, + np.random.randint(0, 10, [self.num_classes]).astype( + "int32" + ), + ) + ) in_mean_ious = [] for i in range(self.in_mean_iou_num): - in_mean_ious.append(("in_mean_iou_%d" % i, - np.random.uniform(0, 1, - [1]).astype("float32"))) + in_mean_ious.append( + ( + "in_mean_iou_%d" % i, + np.random.uniform(0, 1, [1]).astype("float32"), + ) + ) self.inputs = { 'Predictions': predictions, 'Labels': labels, 'InWrongs': in_wrongs, 'InCorrects': in_corrects, - 'InMeanIou': in_mean_ious + 'InMeanIou': in_mean_ious, } self.attrs = {'num_classes': int(self.num_classes)} mean_iou, out_wrong, out_correct = compute_mean_iou( - predictions, labels, self.num_classes, in_wrongs, in_corrects, - in_mean_ious) + predictions, + labels, + self.num_classes, + in_wrongs, + in_corrects, + in_mean_ious, + ) self.outputs = { 'OutMeanIou': mean_iou, 'OutWrong': out_wrong, - 'OutCorrect': out_correct + 'OutCorrect': out_correct, } def config(self): @@ -106,7 +127,6 @@ class TestMeanIOUOp(OpTest): class TestCase1(TestMeanIOUOp): - def config(self): self.num_classes = 5 self.image_size = [100, 128] @@ -121,14 +141,15 @@ class TestCase1(TestMeanIOUOp): class TestMeanIOUOpError(unittest.TestCase): - def test_errors(self): with fluid.program_guard(fluid.Program(), fluid.Program()): # The input type of accuracy_op must be Variable. - x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - fluid.CPUPlace()) - y1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace() + ) + y1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace() + ) self.assertRaises(TypeError, fluid.layers.mean_iou, x1, y1) # The input dtype of accuracy_op must be float32 or float64. x2 = fluid.layers.data(name='x2', shape=[4], dtype="float32") diff --git a/python/paddle/fluid/tests/unittests/test_mean_op.py b/python/paddle/fluid/tests/unittests/test_mean_op.py index 6ba980879a6c91fd25f7a09495b2d5d319a61dc9..ed9313b054696bf0556251e50c98e8bb8b4a5c54 100644 --- a/python/paddle/fluid/tests/unittests/test_mean_op.py +++ b/python/paddle/fluid/tests/unittests/test_mean_op.py @@ -40,7 +40,6 @@ def reduce_mean_wrapper(x, axis=0, keepdim=False, reduce_all=False): class TestMeanOp(OpTest): - def setUp(self): self.op_type = "mean" self.python_api = paddle.mean @@ -60,27 +59,26 @@ class TestMeanOp(OpTest): class TestMeanOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # The input type of mean_op must be Variable. input1 = 12 self.assertRaises(TypeError, paddle.mean, input1) # The input dtype of mean_op must be float16, float32, float64. - input2 = fluid.layers.data(name='input2', - shape=[12, 10], - dtype="int32") + input2 = fluid.layers.data( + name='input2', shape=[12, 10], dtype="int32" + ) self.assertRaises(TypeError, paddle.mean, input2) - input3 = fluid.layers.data(name='input3', - shape=[4], - dtype="float16") + input3 = fluid.layers.data( + name='input3', shape=[4], dtype="float16" + ) fluid.layers.softmax(input3) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFP16MeanOp(TestMeanOp): - def init_dtype_type(self): self.dtype = np.float16 self.__class__.no_need_check_grad = True @@ -100,13 +98,13 @@ class TestFP16MeanOp(TestMeanOp): y = paddle.mean(x) dx = paddle.grad(y, x)[0].numpy() dx_expected = self.dtype(1.0 / np.prod(x_np.shape)) * np.ones( - x_np.shape).astype(self.dtype) + x_np.shape + ).astype(self.dtype) np.testing.assert_array_equal(dx, dx_expected) @OpTestTool.skip_if_not_cpu_bf16() class TestBF16MeanOp(TestMeanOp): - def init_dtype_type(self): self.dtype = np.uint16 @@ -136,7 +134,6 @@ def ref_reduce_mean_grad(x, axis, dtype): class TestReduceMeanOp(OpTest): - def setUp(self): self.op_type = 'reduce_mean' self.python_api = reduce_mean_wrapper @@ -157,7 +154,7 @@ class TestReduceMeanOp(OpTest): self.attrs = { 'dim': self.axis, 'keep_dim': self.keepdim, - 'reduce_all': self.reduce_all + 'reduce_all': self.reduce_all, } if self.dtype == 'float16': @@ -187,18 +184,17 @@ class TestReduceMeanOp(OpTest): return with fluid.dygraph.guard(place=place): x = paddle.tensor(self.inputs['X']) - y = paddle.mean(x, - axis=self.attrs['dim'], - keepdim=self.attrs['keep_dim']) + y = paddle.mean( + x, axis=self.attrs['dim'], keepdim=self.attrs['keep_dim'] + ) dx = paddle.grad(y, x)[0].numpy() - dx_expected = ref_reduce_mean_grad(self.inputs['X'], - self.attrs['dim'], - self.dtype) + dx_expected = ref_reduce_mean_grad( + self.inputs['X'], self.attrs['dim'], self.dtype + ) np.testing.assert_array_equal(dx, dx_expected) class TestReduceMeanOpDefaultAttrs(TestReduceMeanOp): - def setUp(self): self.op_type = 'reduce_mean' self.python_api = reduce_mean_wrapper @@ -212,104 +208,88 @@ class TestReduceMeanOpDefaultAttrs(TestReduceMeanOp): class TestReduceMeanOpFloat32(TestReduceMeanOp): - def set_attrs(self): self.dtype = 'float32' class TestReduceMeanOpFloat16(TestReduceMeanOp): - def set_attrs(self): self.dtype = 'float16' class TestReduceMeanOpShape1D(TestReduceMeanOp): - def set_attrs(self): self.shape = [100] class TestReduceMeanOpShape1DFP16(TestReduceMeanOp): - def set_attrs(self): self.shape = [100] self.dtype = 'float16' class TestReduceMeanOpShape6D(TestReduceMeanOp): - def set_attrs(self): self.shape = [2, 3, 4, 5, 6, 7] class TestReduceMeanOpShape6DFP16(TestReduceMeanOp): - def set_attrs(self): self.shape = [2, 3, 4, 5, 6, 7] self.dtype = 'float16' class TestReduceMeanOpAxisAll(TestReduceMeanOp): - def set_attrs(self): self.axis = [0, 1, 2, 3] class TestReduceMeanOpAxisAllFP16(TestReduceMeanOp): - def set_attrs(self): self.axis = [0, 1, 2, 3] self.dtype = 'float16' class TestReduceMeanOpAxisTuple(TestReduceMeanOp): - def set_attrs(self): self.axis = (0, 1, 2) class TestReduceMeanOpAxisTupleFP16(TestReduceMeanOp): - def set_attrs(self): self.axis = (0, 1, 2) self.dtype = 'float16' class TestReduceMeanOpAxisNegative(TestReduceMeanOp): - def set_attrs(self): self.axis = [-2, -1] class TestReduceMeanOpAxisNegativeFP16(TestReduceMeanOp): - def set_attrs(self): self.axis = [-2, -1] self.dtype = 'float16' class TestReduceMeanOpKeepdimTrue1(TestReduceMeanOp): - def set_attrs(self): self.keepdim = True class TestReduceMeanOpKeepdimTrue1FP16(TestReduceMeanOp): - def set_attrs(self): self.keepdim = True self.dtype = 'float16' class TestReduceMeanOpKeepdimTrue2(TestReduceMeanOp): - def set_attrs(self): self.axis = [0, 1, 2, 3] self.keepdim = True class TestReduceMeanOpKeepdimTrue2FP16(TestReduceMeanOp): - def set_attrs(self): self.axis = [0, 1, 2, 3] self.keepdim = True @@ -317,13 +297,11 @@ class TestReduceMeanOpKeepdimTrue2FP16(TestReduceMeanOp): class TestReduceMeanOpReduceAllTrue(TestReduceMeanOp): - def set_attrs(self): self.reduce_all = True class TestReduceMeanOpReduceAllTrueFP16(TestReduceMeanOp): - def set_attrs(self): self.reduce_all = True self.dtype = 'float16' @@ -335,8 +313,11 @@ class TestMeanAPI(unittest.TestCase): def setUp(self): self.x_shape = [2, 3, 4, 5] self.x = np.random.uniform(-1, 1, self.x_shape).astype(np.float32) - self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_api_static(self): paddle.enable_static() @@ -350,8 +331,9 @@ class TestMeanAPI(unittest.TestCase): out5 = paddle.mean(x, tuple(axis)) exe = paddle.static.Executor(self.place) - res = exe.run(feed={'X': self.x}, - fetch_list=[out1, out2, out3, out4, out5]) + res = exe.run( + feed={'X': self.x}, fetch_list=[out1, out2, out3, out4, out5] + ) out_ref = np.mean(self.x) for out in res: np.testing.assert_allclose(out, out_ref, rtol=0.0001) @@ -393,9 +375,9 @@ class TestMeanAPI(unittest.TestCase): x_np = np.random.rand(10, 10).astype(np.float32) x = fluid.dygraph.to_variable(x_np) out = fluid.layers.reduce_mean(input=x, dim=1) - np.testing.assert_allclose(out.numpy(), - np.mean(x_np, axis=1), - rtol=1e-05) + np.testing.assert_allclose( + out.numpy(), np.mean(x_np, axis=1), rtol=1e-05 + ) def test_errors(self): paddle.disable_static() @@ -410,7 +392,6 @@ class TestMeanAPI(unittest.TestCase): class TestMeanWithTensorAxis1(TestReduceOPTensorAxisBase): - def init_data(self): self.pd_api = paddle.mean self.np_api = np.mean @@ -420,7 +401,6 @@ class TestMeanWithTensorAxis1(TestReduceOPTensorAxisBase): class TestMeanWithTensorAxis2(TestReduceOPTensorAxisBase): - def init_data(self): self.pd_api = paddle.mean self.np_api = np.mean @@ -429,12 +409,11 @@ class TestMeanWithTensorAxis2(TestReduceOPTensorAxisBase): self.tensor_axis = [ 0, paddle.to_tensor([1], 'int64'), - paddle.to_tensor([2], 'int64') + paddle.to_tensor([2], 'int64'), ] class TestMeanDoubleGradCheck(unittest.TestCase): - def mean_wrapper(self, x): return paddle.mean(x[0]) @@ -449,17 +428,13 @@ class TestMeanDoubleGradCheck(unittest.TestCase): out = paddle.mean(data) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) - gradient_checker.double_grad_check([data], - out, - x_init=[data_arr], - place=place, - eps=eps) + gradient_checker.double_grad_check( + [data], out, x_init=[data_arr], place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.double_grad_check_for_dygraph(self.mean_wrapper, - [data], - out, - x_init=[data_arr], - place=place) + gradient_checker.double_grad_check_for_dygraph( + self.mean_wrapper, [data], out, x_init=[data_arr], place=place + ) def test_grad(self): paddle.enable_static() @@ -471,7 +446,6 @@ class TestMeanDoubleGradCheck(unittest.TestCase): class TestMeanTripleGradCheck(unittest.TestCase): - def mean_wrapper(self, x): return paddle.mean(x[0]) @@ -486,17 +460,13 @@ class TestMeanTripleGradCheck(unittest.TestCase): out = paddle.mean(data) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) - gradient_checker.triple_grad_check([data], - out, - x_init=[data_arr], - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [data], out, x_init=[data_arr], place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.triple_grad_check_for_dygraph(self.mean_wrapper, - [data], - out, - x_init=[data_arr], - place=place) + gradient_checker.triple_grad_check_for_dygraph( + self.mean_wrapper, [data], out, x_init=[data_arr], place=place + ) def test_grad(self): paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_median.py b/python/paddle/fluid/tests/unittests/test_median.py index 8f33e4713d270ddfd2ff32a8dd0bb1b2f2ba7415..c717293b4cd87f577168d4e888bb252ef54ef61b 100644 --- a/python/paddle/fluid/tests/unittests/test_median.py +++ b/python/paddle/fluid/tests/unittests/test_median.py @@ -21,7 +21,6 @@ DELTA = 1e-6 class TestMedian(unittest.TestCase): - def check_numpy_res(self, np1, np2): self.assertEqual(np1.shape, np2.shape) mismatch = np.sum((np1 - np2) * (np1 - np2)) @@ -56,8 +55,11 @@ class TestMedian(unittest.TestCase): w = 4 l = 2 x = np.arange(h * w * l).reshape([h, w, l]) - lis_tests = [[x, axis, keepdims] for axis in [-1, 0, 1, 2, None] - for keepdims in [False, True]] + lis_tests = [ + [x, axis, keepdims] + for axis in [-1, 0, 1, 2, None] + for keepdims in [False, True] + ] for lis_test in lis_tests: self.static_single_test_median(lis_test) @@ -67,8 +69,11 @@ class TestMedian(unittest.TestCase): w = 4 l = 2 x = np.arange(h * w * l).reshape([h, w, l]) - lis_tests = [[x, axis, keepdims] for axis in [-1, 0, 1, 2, None] - for keepdims in [False, True]] + lis_tests = [ + [x, axis, keepdims] + for axis in [-1, 0, 1, 2, None] + for keepdims in [False, True] + ] for lis_test in lis_tests: self.dygraph_single_test_median(lis_test) diff --git a/python/paddle/fluid/tests/unittests/test_memcpy_op.py b/python/paddle/fluid/tests/unittests/test_memcpy_op.py index 8ab07ad9583eff66ddacc9f327acec8e7cdd5df4..65a246d963624df1064039dc5011037fd4143d6f 100755 --- a/python/paddle/fluid/tests/unittests/test_memcpy_op.py +++ b/python/paddle/fluid/tests/unittests/test_memcpy_op.py @@ -21,7 +21,6 @@ from paddle.fluid import Program, program_guard class TestMemcpy_FillConstant(unittest.TestCase): - def get_prog(self): paddle.enable_static() main_program = Program() @@ -33,20 +32,25 @@ class TestMemcpy_FillConstant(unittest.TestCase): shape=[10, 10], dtype='float32', persistable=False, - stop_gradient=True) - gpu_var = main_program.global_block().create_var(name=gpu_var_name, - shape=[10, 10], - dtype='float32', - persistable=False, - stop_gradient=True) - main_program.global_block().append_op(type="fill_constant", - outputs={"Out": gpu_var_name}, - attrs={ - "shape": [10, 10], - "dtype": gpu_var.dtype, - "value": 1.0, - "place_type": 1 - }) + stop_gradient=True, + ) + gpu_var = main_program.global_block().create_var( + name=gpu_var_name, + shape=[10, 10], + dtype='float32', + persistable=False, + stop_gradient=True, + ) + main_program.global_block().append_op( + type="fill_constant", + outputs={"Out": gpu_var_name}, + attrs={ + "shape": [10, 10], + "dtype": gpu_var.dtype, + "value": 1.0, + "place_type": 1, + }, + ) main_program.global_block().append_op( type="fill_constant", outputs={"Out": pinned_var_name}, @@ -54,35 +58,40 @@ class TestMemcpy_FillConstant(unittest.TestCase): "shape": [10, 10], "dtype": gpu_var.dtype, "value": 0.0, - "place_type": 2 - }) + "place_type": 2, + }, + ) return main_program, gpu_var, pinned_var def test_gpu_copy_to_pinned(self): main_program, gpu_var, pinned_var = self.get_prog() - main_program.global_block().append_op(type='memcpy', - inputs={'X': gpu_var}, - outputs={'Out': pinned_var}, - attrs={'dst_place_type': 2}) + main_program.global_block().append_op( + type='memcpy', + inputs={'X': gpu_var}, + outputs={'Out': pinned_var}, + attrs={'dst_place_type': 2}, + ) place = fluid.CUDAPlace(0) exe = fluid.Executor(place) - gpu_, pinned_ = exe.run(main_program, - feed={}, - fetch_list=[gpu_var.name, pinned_var.name]) + gpu_, pinned_ = exe.run( + main_program, feed={}, fetch_list=[gpu_var.name, pinned_var.name] + ) np.testing.assert_allclose(gpu_, pinned_, rtol=1e-05) np.testing.assert_allclose(pinned_, np.ones((10, 10)), rtol=1e-05) def test_pinned_copy_gpu(self): main_program, gpu_var, pinned_var = self.get_prog() - main_program.global_block().append_op(type='memcpy', - inputs={'X': pinned_var}, - outputs={'Out': gpu_var}, - attrs={'dst_place_type': 1}) + main_program.global_block().append_op( + type='memcpy', + inputs={'X': pinned_var}, + outputs={'Out': gpu_var}, + attrs={'dst_place_type': 1}, + ) place = fluid.CUDAPlace(0) exe = fluid.Executor(place) - gpu_, pinned_ = exe.run(main_program, - feed={}, - fetch_list=[gpu_var.name, pinned_var.name]) + gpu_, pinned_ = exe.run( + main_program, feed={}, fetch_list=[gpu_var.name, pinned_var.name] + ) np.testing.assert_allclose(gpu_, pinned_, rtol=1e-05) np.testing.assert_allclose(gpu_, np.zeros((10, 10)), rtol=1e-05) @@ -98,13 +107,15 @@ class TestMemcpy_FillConstant(unittest.TestCase): shape=[1], dtype='bool', persistable=False, - stop_gradient=True) + stop_gradient=True, + ) gpu_var = main_program.global_block().create_var( name=gpu_var_name, shape=[1], dtype='bool', persistable=False, - stop_gradient=True) + stop_gradient=True, + ) main_program.global_block().append_op( type="fill_constant", outputs={"Out": gpu_var_name}, @@ -112,8 +123,9 @@ class TestMemcpy_FillConstant(unittest.TestCase): "shape": [1], "dtype": gpu_var.dtype, "value": False, - "place_type": 1 - }) + "place_type": 1, + }, + ) main_program.global_block().append_op( type="fill_constant", outputs={"Out": pinned_var_name}, @@ -121,18 +133,23 @@ class TestMemcpy_FillConstant(unittest.TestCase): "shape": [1], "dtype": gpu_var.dtype, "value": True, - "place_type": 2 - }) + "place_type": 2, + }, + ) - main_program.global_block().append_op(type='memcpy', - inputs={'X': pinned_var}, - outputs={'Out': gpu_var}, - attrs={'dst_place_type': 1}) + main_program.global_block().append_op( + type='memcpy', + inputs={'X': pinned_var}, + outputs={'Out': gpu_var}, + attrs={'dst_place_type': 1}, + ) place = fluid.CUDAPlace(0) exe = fluid.Executor(place) - gpu_, pinned_ = exe.run(main_program, - feed={}, - fetch_list=[gpu_var.name, pinned_var.name]) + gpu_, pinned_ = exe.run( + main_program, + feed={}, + fetch_list=[gpu_var.name, pinned_var.name], + ) expect_value = np.array([1]).astype('bool') np.testing.assert_array_equal(gpu_, expect_value) else: @@ -140,7 +157,6 @@ class TestMemcpy_FillConstant(unittest.TestCase): class TestMemcpyOPError(unittest.TestCase): - def get_prog(self): paddle.enable_static() main_program = Program() @@ -150,7 +166,8 @@ class TestMemcpyOPError(unittest.TestCase): shape=[10, 10], dtype='float32', persistable=False, - stop_gradient=True) + stop_gradient=True, + ) main_program.global_block().append_op( type="fill_constant", outputs={"Out": "tensor@Pinned_0"}, @@ -158,41 +175,47 @@ class TestMemcpyOPError(unittest.TestCase): "shape": [10, 10], "dtype": pinned_var.dtype, "value": 0.0, - "place_type": 2 - }) + "place_type": 2, + }, + ) return main_program, pinned_var def test_SELECTED_ROWS(self): main_program, pinned_var = self.get_prog() - selected_row_var = main_program.global_block().create_var( \ - name="selected_row_0", dtype="float32", persistable=False, \ - type=fluid.core.VarDesc.VarType.SELECTED_ROWS, stop_gradient=True) - main_program.global_block().append_op(type="fill_constant", - outputs={"Out": selected_row_var}, - attrs={ - "shape": - selected_row_var.shape, - "dtype": - selected_row_var.dtype, - "value": 1.0, - "place_type": 1 - }) + selected_row_var = main_program.global_block().create_var( + name="selected_row_0", + dtype="float32", + persistable=False, + type=fluid.core.VarDesc.VarType.SELECTED_ROWS, + stop_gradient=True, + ) + main_program.global_block().append_op( + type="fill_constant", + outputs={"Out": selected_row_var}, + attrs={ + "shape": selected_row_var.shape, + "dtype": selected_row_var.dtype, + "value": 1.0, + "place_type": 1, + }, + ) with self.assertRaises(RuntimeError): main_program.global_block().append_op( type='memcpy', inputs={'X': selected_row_var}, outputs={'Out': pinned_var}, - attrs={'dst_place_type': 2}) + attrs={'dst_place_type': 2}, + ) place = fluid.CUDAPlace(0) exe = fluid.Executor(place) selected_row_var_, pinned_ = exe.run( main_program, feed={}, - fetch_list=[selected_row_var.name, pinned_var.name]) + fetch_list=[selected_row_var.name, pinned_var.name], + ) class TestMemcpyApi(unittest.TestCase): - def test_api(self): a = paddle.ones([1024, 1024]) b = paddle.tensor.creation._memcpy(a, paddle.CUDAPinnedPlace()) diff --git a/python/paddle/fluid/tests/unittests/test_memory_analysis.py b/python/paddle/fluid/tests/unittests/test_memory_analysis.py index 1672e7371cbc69b84e5da74ccacbbb8176c24525..8f38433d20c9b911912abe39efe4029b54752f51 100644 --- a/python/paddle/fluid/tests/unittests/test_memory_analysis.py +++ b/python/paddle/fluid/tests/unittests/test_memory_analysis.py @@ -14,12 +14,14 @@ import unittest import paddle -from paddle.fluid.memory_analysis import pre_allocate_memory, get_max_memory_info +from paddle.fluid.memory_analysis import ( + pre_allocate_memory, + get_max_memory_info, +) from simple_nets import simple_fc_net class TestMemoryAnalysis(unittest.TestCase): - def setUp(self): paddle.enable_static() @@ -28,18 +30,19 @@ class TestMemoryAnalysis(unittest.TestCase): optimizer = paddle.optimizer.Adam(learning_rate=1e-3) optimizer.minimize(loss) main_prog = paddle.static.default_main_program() - max_tmp_mem_1, max_persitable_mem_1 = get_max_memory_info(main_prog, - batch_size=32) + max_tmp_mem_1, max_persitable_mem_1 = get_max_memory_info( + main_prog, batch_size=32 + ) self.assertGreater(max_tmp_mem_1, 0) self.assertGreater(max_persitable_mem_1, 0) - max_tmp_mem_2, max_persitable_mem_2 = get_max_memory_info(main_prog, - batch_size=64) + max_tmp_mem_2, max_persitable_mem_2 = get_max_memory_info( + main_prog, batch_size=64 + ) self.assertEqual(max_persitable_mem_1, max_persitable_mem_2) self.assertLess(max_tmp_mem_1, max_tmp_mem_2) class TestPreAllocateMemory(unittest.TestCase): - def setUp(self): paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_memory_reuse_exclude_feed_var.py b/python/paddle/fluid/tests/unittests/test_memory_reuse_exclude_feed_var.py index 7b335bf83d6ce38ea3871ea1423e77b0f4c24478..839c21b151a266e3e65109ee4f5271af2c5c4062 100644 --- a/python/paddle/fluid/tests/unittests/test_memory_reuse_exclude_feed_var.py +++ b/python/paddle/fluid/tests/unittests/test_memory_reuse_exclude_feed_var.py @@ -18,15 +18,14 @@ import unittest class TestMemoryReuseExcludeFeedVar(unittest.TestCase): - def setUp(self): self.image_shape = [28, 28] self.iteration = 10 def main_impl(self, place): - image = fluid.layers.data(name='image', - shape=self.image_shape, - dtype='float32') + image = fluid.layers.data( + name='image', shape=self.image_shape, dtype='float32' + ) relu_image = fluid.layers.relu(image) loss = fluid.layers.reduce_mean(relu_image) @@ -38,12 +37,13 @@ class TestMemoryReuseExcludeFeedVar(unittest.TestCase): exe.run(fluid.default_startup_program()) compiled_prog = fluid.CompiledProgram( - fluid.default_main_program()).with_data_parallel( - loss_name=loss.name, build_strategy=build_strategy) + fluid.default_main_program() + ).with_data_parallel(loss_name=loss.name, build_strategy=build_strategy) image_tensor = fluid.LoDTensor() - np_image = np.random.uniform(low=-10, high=10, - size=self.image_shape).astype('float32') + np_image = np.random.uniform( + low=-10, high=10, size=self.image_shape + ).astype('float32') image_tensor.set(np_image, place) feed_dict = [{image.name: image_tensor}] diff --git a/python/paddle/fluid/tests/unittests/test_memory_usage.py b/python/paddle/fluid/tests/unittests/test_memory_usage.py index 2ddfd1ceb0270beacd1870ae4e2849d6df6d4e5a..8c41d0853fc642bf831feee7c9d4ff05d45f446b 100644 --- a/python/paddle/fluid/tests/unittests/test_memory_usage.py +++ b/python/paddle/fluid/tests/unittests/test_memory_usage.py @@ -20,8 +20,10 @@ import unittest def train_simulator(test_batch_size=10): if test_batch_size <= 0: - raise ValueError("batch_size should be a positive integeral value, " - "but got batch_size={}".format(test_batch_size)) + raise ValueError( + "batch_size should be a positive integeral value, " + "but got batch_size={}".format(test_batch_size) + ) x = fluid.layers.data(name='x', shape=[13], dtype='float32') y_predict = fluid.layers.fc(input=x, size=1, act=None) @@ -35,14 +37,16 @@ def train_simulator(test_batch_size=10): # Calculate memory usage in current network config lower_usage, upper_usage, unit = fluid.contrib.memory_usage( - fluid.default_main_program(), batch_size=test_batch_size) + fluid.default_main_program(), batch_size=test_batch_size + ) - print("memory usage is about %.3f - %.3f %s" % - (lower_usage, upper_usage, unit)) + print( + "memory usage is about %.3f - %.3f %s" + % (lower_usage, upper_usage, unit) + ) class TestMemoryUsage(unittest.TestCase): - def test_with_unit_B(self): with self.program_scope_guard(): train_simulator() diff --git a/python/paddle/fluid/tests/unittests/test_merge_selectedrows_op.py b/python/paddle/fluid/tests/unittests/test_merge_selectedrows_op.py index 65fd3b9e4051258b8d33faa4578c428f6507caed..a22571289b28eeb9b1de295d993a0df6f10b270a 100644 --- a/python/paddle/fluid/tests/unittests/test_merge_selectedrows_op.py +++ b/python/paddle/fluid/tests/unittests/test_merge_selectedrows_op.py @@ -19,7 +19,6 @@ from paddle.fluid.op import Operator class TestMergeSelectedRows(unittest.TestCase): - def get_places(self): places = [core.CPUPlace()] if core.is_compiled_with_cuda(): diff --git a/python/paddle/fluid/tests/unittests/test_merged_adam_op.py b/python/paddle/fluid/tests/unittests/test_merged_adam_op.py index c6aadeda5fc153aab27e7cafb988b066e7ab637a..49d1cf4bd79a0cc9d3a03a64a578a84113ada77e 100644 --- a/python/paddle/fluid/tests/unittests/test_merged_adam_op.py +++ b/python/paddle/fluid/tests/unittests/test_merged_adam_op.py @@ -19,20 +19,22 @@ from paddle import _C_ops, _legacy_C_ops from paddle.fluid.framework import in_dygraph_mode -def run_adam_op(params, - grads, - lrs, - moment1s, - moment2s, - beta1_pows, - beta2_pows, - master_params, - epsilon, - beta1, - beta2, - place, - multi_precision=False, - use_merged=False): +def run_adam_op( + params, + grads, + lrs, + moment1s, + moment2s, + beta1_pows, + beta2_pows, + master_params, + epsilon, + beta1, + beta2, + place, + multi_precision=False, + use_merged=False, +): assert len(params) == len(grads) assert len(params) == len(lrs) assert len(params) == len(moment1s) @@ -57,25 +59,71 @@ def run_adam_op(params, if not use_merged: for i in range(len(param_vars)): _, _, _, _, _, _ = _legacy_C_ops.adam( - param_vars[i], grad_vars[i], lr_vars[i], moment1_vars[i], - moment2_vars[i], beta1_pow_vars[i], beta2_pow_vars[i], - master_param_vars[i], param_vars[i], moment1_vars[i], - moment2_vars[i], beta1_pow_vars[i], beta2_pow_vars[i], - master_param_vars[i], 'epsilon', epsilon, 'beta1', beta1, - 'beta2', beta2, 'multi_precision', multi_precision) + param_vars[i], + grad_vars[i], + lr_vars[i], + moment1_vars[i], + moment2_vars[i], + beta1_pow_vars[i], + beta2_pow_vars[i], + master_param_vars[i], + param_vars[i], + moment1_vars[i], + moment2_vars[i], + beta1_pow_vars[i], + beta2_pow_vars[i], + master_param_vars[i], + 'epsilon', + epsilon, + 'beta1', + beta1, + 'beta2', + beta2, + 'multi_precision', + multi_precision, + ) else: if in_dygraph_mode(): _, _, _, _, _, _ = _C_ops.merged_adam_( - param_vars, grad_vars, lr_vars, moment1_vars, moment2_vars, - beta1_pow_vars, beta2_pow_vars, master_param_vars, beta1, beta2, - epsilon, multi_precision, False) + param_vars, + grad_vars, + lr_vars, + moment1_vars, + moment2_vars, + beta1_pow_vars, + beta2_pow_vars, + master_param_vars, + beta1, + beta2, + epsilon, + multi_precision, + False, + ) else: _, _, _, _, _, _ = _legacy_C_ops.merged_adam( - param_vars, grad_vars, lr_vars, moment1_vars, moment2_vars, - beta1_pow_vars, beta2_pow_vars, master_param_vars, param_vars, - moment1_vars, moment2_vars, beta1_pow_vars, beta2_pow_vars, - master_param_vars, 'epsilon', epsilon, 'beta1', beta1, 'beta2', - beta2, 'multi_precision', multi_precision) + param_vars, + grad_vars, + lr_vars, + moment1_vars, + moment2_vars, + beta1_pow_vars, + beta2_pow_vars, + master_param_vars, + param_vars, + moment1_vars, + moment2_vars, + beta1_pow_vars, + beta2_pow_vars, + master_param_vars, + 'epsilon', + epsilon, + 'beta1', + beta1, + 'beta2', + beta2, + 'multi_precision', + multi_precision, + ) outputs = { 'ParamOut': param_vars, @@ -83,14 +131,13 @@ def run_adam_op(params, 'Moment2Out': moment2_vars, 'Beta1PowOut': beta1_pow_vars, 'Beta2PowOut': beta2_pow_vars, - 'MasterParamOut': master_param_vars + 'MasterParamOut': master_param_vars, } return outputs class TestMergedAdam(unittest.TestCase): - def setUp(self): paddle.disable_static() self.shapes = [[3, 4], [2, 7], [5, 6], [7, 8]] @@ -111,27 +158,46 @@ class TestMergedAdam(unittest.TestCase): beta1_pows = self.gen_rand_data([[1], [1], [1], [1]], mp_dtype) beta2_pows = self.gen_rand_data([[1], [1], [1], [1]], mp_dtype) master_params = [p.astype(mp_dtype) for p in params] - return params, grads, lrs, moment1s, moment2s, beta1_pows, beta2_pows, master_params + return ( + params, + grads, + lrs, + moment1s, + moment2s, + beta1_pows, + beta2_pows, + master_params, + ) def check_with_place(self, place, multi_precision): - params, grads, lrs, moment1s, moment2s, beta1_pows, beta2_pows, master_params = self.prepare_data( - self.shapes, multi_precision, self.seed, place) + ( + params, + grads, + lrs, + moment1s, + moment2s, + beta1_pows, + beta2_pows, + master_params, + ) = self.prepare_data(self.shapes, multi_precision, self.seed, place) def run_op(use_merged): - return run_adam_op(params=params, - grads=grads, - lrs=lrs, - moment1s=moment1s, - moment2s=moment2s, - beta1_pows=beta1_pows, - beta2_pows=beta2_pows, - master_params=master_params, - epsilon=0.9, - beta1=0.9, - beta2=0.99, - place=place, - multi_precision=multi_precision, - use_merged=use_merged) + return run_adam_op( + params=params, + grads=grads, + lrs=lrs, + moment1s=moment1s, + moment2s=moment2s, + beta1_pows=beta1_pows, + beta2_pows=beta2_pows, + master_params=master_params, + epsilon=0.9, + beta1=0.9, + beta2=0.99, + place=place, + multi_precision=multi_precision, + use_merged=use_merged, + ) outs1 = run_op(True) outs2 = run_op(False) @@ -144,10 +210,9 @@ class TestMergedAdam(unittest.TestCase): if place == 'gpu': np.testing.assert_array_equal(value1[i], value2[i]) else: - np.testing.assert_allclose(value1[i], - value2[i], - rtol=1e-05, - atol=1e-07) + np.testing.assert_allclose( + value1[i], value2[i], rtol=1e-05, atol=1e-07 + ) def get_places(self): places = ['cpu'] diff --git a/python/paddle/fluid/tests/unittests/test_merged_momentum_op.py b/python/paddle/fluid/tests/unittests/test_merged_momentum_op.py index 466ee3afd58c5e0d25b8bd4864876064c054493c..dcba88284fc10e232fbfc6a22ad5f7d0f27e507c 100644 --- a/python/paddle/fluid/tests/unittests/test_merged_momentum_op.py +++ b/python/paddle/fluid/tests/unittests/test_merged_momentum_op.py @@ -19,16 +19,18 @@ from paddle.fluid.layer_helper import LayerHelper from collections import OrderedDict -def run_momentum_op(params, - grads, - velocitys, - master_params, - learning_rate, - place, - multi_precision, - mu=0.9, - rescale_grad=0.01, - use_merged=False): +def run_momentum_op( + params, + grads, + velocitys, + master_params, + learning_rate, + place, + multi_precision, + mu=0.9, + rescale_grad=0.01, + use_merged=False, +): assert len(params) == len(grads) assert len(params) == len(velocitys) if multi_precision: @@ -45,48 +47,70 @@ def run_momentum_op(params, } param_vars = [ - helper.create_variable(persistable=True, - shape=p.shape, - dtype=p.dtype) for p in params + helper.create_variable( + persistable=True, shape=p.shape, dtype=p.dtype + ) + for p in params ] grad_vars = [ helper.create_variable(shape=g.shape, dtype=g.dtype) for g in grads ] velocity_vars = [ - helper.create_variable(persistable=True, - shape=v.shape, - dtype=v.dtype) for v in velocitys + helper.create_variable( + persistable=True, shape=v.shape, dtype=v.dtype + ) + for v in velocitys ] - lr_var = helper.create_variable(persistable=True, - shape=learning_rate.shape, - dtype=learning_rate.dtype) + lr_var = helper.create_variable( + persistable=True, + shape=learning_rate.shape, + dtype=learning_rate.dtype, + ) feed_dict = OrderedDict() feed_dict.update( - OrderedDict([(p_var.name, p_val) - for p_var, p_val in zip(param_vars, params)])) + OrderedDict( + [ + (p_var.name, p_val) + for p_var, p_val in zip(param_vars, params) + ] + ) + ) feed_dict.update( - OrderedDict([(v_var.name, v_val) - for v_var, v_val in zip(velocity_vars, velocitys)])) + OrderedDict( + [ + (v_var.name, v_val) + for v_var, v_val in zip(velocity_vars, velocitys) + ] + ) + ) fetch_list = list(feed_dict.keys()) feed_dict.update( - OrderedDict([(g_var.name, g_val) - for g_var, g_val in zip(grad_vars, grads)])) + OrderedDict( + [(g_var.name, g_val) for g_var, g_val in zip(grad_vars, grads)] + ) + ) feed_dict.update({lr_var.name: learning_rate}) if multi_precision: master_param_vars = [ - helper.create_variable(persistable=True, - shape=p.shape, - dtype=p.dtype) for p in master_params + helper.create_variable( + persistable=True, shape=p.shape, dtype=p.dtype + ) + for p in master_params ] feed_dict.update( - OrderedDict([ - (mp_var.name, mp_val) - for mp_var, mp_val in zip(master_param_vars, master_params) - ])) + OrderedDict( + [ + (mp_var.name, mp_val) + for mp_var, mp_val in zip( + master_param_vars, master_params + ) + ] + ) + ) # CPUPlace does not use MasterParam if isinstance(place, paddle.CUDAPlace): fetch_list = fetch_list + [ @@ -96,8 +120,9 @@ def run_momentum_op(params, master_param_vars = None if not use_merged: - for i, (p, g, - v) in enumerate(zip(param_vars, grad_vars, velocity_vars)): + for i, (p, g, v) in enumerate( + zip(param_vars, grad_vars, velocity_vars) + ): inputs = { 'Param': p, 'Grad': g, @@ -108,10 +133,9 @@ def run_momentum_op(params, if multi_precision: inputs['MasterParam'] = master_param_vars[i] outputs['MasterParamOut'] = master_param_vars[i] - helper.append_op(type=op_type, - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type=op_type, inputs=inputs, outputs=outputs, attrs=attrs + ) else: inputs = { 'Param': param_vars, @@ -123,10 +147,9 @@ def run_momentum_op(params, if multi_precision: inputs['MasterParam'] = master_param_vars outputs['MasterParamOut'] = master_param_vars - helper.append_op(type=op_type, - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type=op_type, inputs=inputs, outputs=outputs, attrs=attrs + ) exe = paddle.static.Executor(place) with paddle.static.scope_guard(paddle.static.Scope()): @@ -134,17 +157,19 @@ def run_momentum_op(params, return exe.run(main, feed=feed_dict, fetch_list=fetch_list) -def run_momentum_op2(params, - grads, - velocitys, - master_params, - learning_rate, - place, - multi_precision, - mu=0.9, - rescale_grad=0.01, - use_merged=False, - use_nesterov=True): +def run_momentum_op2( + params, + grads, + velocitys, + master_params, + learning_rate, + place, + multi_precision, + mu=0.9, + rescale_grad=0.01, + use_merged=False, + use_nesterov=True, +): assert len(params) == len(grads) assert len(params) == len(velocitys) if multi_precision: @@ -156,48 +181,70 @@ def run_momentum_op2(params, helper = LayerHelper(op_type, **locals()) param_vars = [ - helper.create_variable(persistable=True, - shape=p.shape, - dtype=p.dtype) for p in params + helper.create_variable( + persistable=True, shape=p.shape, dtype=p.dtype + ) + for p in params ] grad_vars = [ helper.create_variable(shape=g.shape, dtype=g.dtype) for g in grads ] velocity_vars = [ - helper.create_variable(persistable=True, - shape=v.shape, - dtype=v.dtype) for v in velocitys + helper.create_variable( + persistable=True, shape=v.shape, dtype=v.dtype + ) + for v in velocitys ] - lr_var = helper.create_variable(persistable=True, - shape=learning_rate.shape, - dtype=learning_rate.dtype) + lr_var = helper.create_variable( + persistable=True, + shape=learning_rate.shape, + dtype=learning_rate.dtype, + ) feed_dict = OrderedDict() feed_dict.update( - OrderedDict([(p_var.name, p_val) - for p_var, p_val in zip(param_vars, params)])) + OrderedDict( + [ + (p_var.name, p_val) + for p_var, p_val in zip(param_vars, params) + ] + ) + ) feed_dict.update( - OrderedDict([(v_var.name, v_val) - for v_var, v_val in zip(velocity_vars, velocitys)])) + OrderedDict( + [ + (v_var.name, v_val) + for v_var, v_val in zip(velocity_vars, velocitys) + ] + ) + ) fetch_list = list(feed_dict.keys()) feed_dict.update( - OrderedDict([(g_var.name, g_val) - for g_var, g_val in zip(grad_vars, grads)])) + OrderedDict( + [(g_var.name, g_val) for g_var, g_val in zip(grad_vars, grads)] + ) + ) feed_dict.update({lr_var.name: learning_rate}) if multi_precision: master_param_vars = [ - helper.create_variable(persistable=True, - shape=p.shape, - dtype=p.dtype) for p in master_params + helper.create_variable( + persistable=True, shape=p.shape, dtype=p.dtype + ) + for p in master_params ] feed_dict.update( - OrderedDict([ - (mp_var.name, mp_val) - for mp_var, mp_val in zip(master_param_vars, master_params) - ])) + OrderedDict( + [ + (mp_var.name, mp_val) + for mp_var, mp_val in zip( + master_param_vars, master_params + ) + ] + ) + ) # CPUPlace does not use MasterParam if isinstance(place, paddle.CUDAPlace): fetch_list = fetch_list + [ @@ -207,8 +254,9 @@ def run_momentum_op2(params, master_param_vars = None if not use_merged: - for i, (p, g, - v) in enumerate(zip(param_vars, grad_vars, velocity_vars)): + for i, (p, g, v) in enumerate( + zip(param_vars, grad_vars, velocity_vars) + ): inputs = { 'Param': p, 'Grad': g, @@ -227,10 +275,9 @@ def run_momentum_op2(params, 'regularization_method': 'l2_decay', 'regularization_coeff': 2.0, } - helper.append_op(type=op_type, - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type=op_type, inputs=inputs, outputs=outputs, attrs=attrs + ) else: inputs = { 'Param': param_vars, @@ -243,22 +290,18 @@ def run_momentum_op2(params, inputs['MasterParam'] = master_param_vars outputs['MasterParamOut'] = master_param_vars attrs = { - 'mu': - mu, - 'multi_precision': - multi_precision, - 'rescale_grad': - rescale_grad, - 'use_nesterov': - use_nesterov, - 'regularization_method': - ['l2_decay' for i in range(len(param_vars))], + 'mu': mu, + 'multi_precision': multi_precision, + 'rescale_grad': rescale_grad, + 'use_nesterov': use_nesterov, + 'regularization_method': [ + 'l2_decay' for i in range(len(param_vars)) + ], 'regularization_coeff': [2.0 for i in range(len(param_vars))], } - helper.append_op(type=op_type, - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type=op_type, inputs=inputs, outputs=outputs, attrs=attrs + ) exe = paddle.static.Executor(place) with paddle.static.scope_guard(paddle.static.Scope()): @@ -267,7 +310,6 @@ def run_momentum_op2(params, class TestMergedMomentum(unittest.TestCase): - def setUp(self): paddle.enable_static() self.shapes = [[3, 4], [2, 7], [5, 6], [7, 8]] @@ -280,8 +322,11 @@ class TestMergedMomentum(unittest.TestCase): def prepare_data(self, shapes, multi_precision, seed, place): np.random.seed(seed) mp_dtype = np.float32 - dtype = np.float16 if multi_precision and isinstance( - place, paddle.CUDAPlace) else np.float32 + dtype = ( + np.float16 + if multi_precision and isinstance(place, paddle.CUDAPlace) + else np.float32 + ) params = self.gen_rand_data(shapes, dtype) grads = self.gen_rand_data(shapes, dtype) velocitys = self.gen_rand_data(shapes, mp_dtype) @@ -293,21 +338,28 @@ class TestMergedMomentum(unittest.TestCase): return params, grads, velocitys, master_params, learning_rate def check_with_place(self, place, multi_precision): - params, grads, velocitys, master_params, learning_rate = self.prepare_data( - self.shapes, multi_precision, self.seed, place) + ( + params, + grads, + velocitys, + master_params, + learning_rate, + ) = self.prepare_data(self.shapes, multi_precision, self.seed, place) def run_op(use_merged): # FIXME(zengjinle): CPU Momentum Op does not support rescale_grad rescale_grad = 1.0 if isinstance(place, paddle.CPUPlace) else 0.01 - return run_momentum_op(params, - grads, - velocitys, - master_params, - learning_rate, - place, - multi_precision, - rescale_grad=rescale_grad, - use_merged=use_merged) + return run_momentum_op( + params, + grads, + velocitys, + master_params, + learning_rate, + place, + multi_precision, + rescale_grad=rescale_grad, + use_merged=use_merged, + ) outs1 = run_op(True) outs2 = run_op(False) @@ -331,7 +383,6 @@ class TestMergedMomentum(unittest.TestCase): class TestMergedMomentum2(unittest.TestCase): - def setUp(self): paddle.enable_static() self.shapes = [[3, 4], [2, 7], [5, 6], [7, 8]] @@ -343,8 +394,11 @@ class TestMergedMomentum2(unittest.TestCase): def prepare_data(self, shapes, multi_precision, seed, place): np.random.seed(seed) mp_dtype = np.float32 - dtype = np.float16 if multi_precision and isinstance( - place, paddle.CUDAPlace) else np.float32 + dtype = ( + np.float16 + if multi_precision and isinstance(place, paddle.CUDAPlace) + else np.float32 + ) params = self.gen_rand_data(shapes, dtype) grads = self.gen_rand_data(shapes, dtype) velocitys = self.gen_rand_data(shapes, mp_dtype) @@ -356,22 +410,29 @@ class TestMergedMomentum2(unittest.TestCase): return params, grads, velocitys, master_params, learning_rate def check_with_place(self, place, multi_precision): - params, grads, velocitys, master_params, learning_rate = self.prepare_data( - self.shapes, multi_precision, self.seed, place) + ( + params, + grads, + velocitys, + master_params, + learning_rate, + ) = self.prepare_data(self.shapes, multi_precision, self.seed, place) def run_op(use_nesterov, use_merged): # FIXME(zengjinle): CPU Momentum Op does not support rescale_grad rescale_grad = 1.0 if isinstance(place, paddle.CPUPlace) else 0.01 - return run_momentum_op2(params, - grads, - velocitys, - master_params, - learning_rate, - place, - multi_precision, - rescale_grad=rescale_grad, - use_merged=use_merged, - use_nesterov=use_nesterov) + return run_momentum_op2( + params, + grads, + velocitys, + master_params, + learning_rate, + place, + multi_precision, + rescale_grad=rescale_grad, + use_merged=use_merged, + use_nesterov=use_nesterov, + ) outs1 = run_op(use_nesterov=True, use_merged=True) outs2 = run_op(use_nesterov=True, use_merged=False) diff --git a/python/paddle/fluid/tests/unittests/test_meshgrid_op.py b/python/paddle/fluid/tests/unittests/test_meshgrid_op.py index f49ac0d1d5279572d010b02a9dfbc27091a12df5..3c3da16c958c4e0bbfd7ecb5d56fd08c6a058c9d 100644 --- a/python/paddle/fluid/tests/unittests/test_meshgrid_op.py +++ b/python/paddle/fluid/tests/unittests/test_meshgrid_op.py @@ -21,7 +21,6 @@ from paddle.fluid.framework import _test_eager_guard class TestMeshgridOp(OpTest): - def setUp(self): self.op_type = "meshgrid" self.dtype = self.get_dtype() @@ -46,7 +45,7 @@ class TestMeshgridOp(OpTest): ins = [] outs = [] for i in range(len(self.shape)): - ins.append(np.random.random((self.shape[i], )).astype(self.dtype)) + ins.append(np.random.random((self.shape[i],)).astype(self.dtype)) for i in range(len(self.shape)): out_reshape = [1] * len(self.shape) @@ -60,23 +59,29 @@ class TestMeshgridOp(OpTest): class TestMeshgridOp2(TestMeshgridOp): - def get_x_shape(self): return [100, 300] class TestMeshgridOp3(unittest.TestCase): - def test_api(self): x = fluid.data(shape=[100], dtype='int32', name='x') y = fluid.data(shape=[200], dtype='int32', name='y') - input_1 = np.random.randint(0, 100, [ + input_1 = np.random.randint( + 0, + 100, + [ + 100, + ], + ).astype('int32') + input_2 = np.random.randint( + 0, 100, - ]).astype('int32') - input_2 = np.random.randint(0, 100, [ - 200, - ]).astype('int32') + [ + 200, + ], + ).astype('int32') out_1 = np.reshape(input_1, [100, 1]) out_1 = np.broadcast_to(out_1, [100, 200]) @@ -85,28 +90,34 @@ class TestMeshgridOp3(unittest.TestCase): exe = fluid.Executor(place=fluid.CPUPlace()) grid_x, grid_y = paddle.tensor.meshgrid(x, y) - res_1, res_2 = exe.run(fluid.default_main_program(), - feed={ - 'x': input_1, - 'y': input_2 - }, - fetch_list=[grid_x, grid_y]) + res_1, res_2 = exe.run( + fluid.default_main_program(), + feed={'x': input_1, 'y': input_2}, + fetch_list=[grid_x, grid_y], + ) assert np.array_equal(res_1, out_1) assert np.array_equal(res_2, out_2) class TestMeshgridOp4(unittest.TestCase): - def test_list_input(self): x = fluid.data(shape=[100], dtype='int32', name='x') y = fluid.data(shape=[200], dtype='int32', name='y') - input_1 = np.random.randint(0, 100, [ + input_1 = np.random.randint( + 0, + 100, + [ + 100, + ], + ).astype('int32') + input_2 = np.random.randint( + 0, 100, - ]).astype('int32') - input_2 = np.random.randint(0, 100, [ - 200, - ]).astype('int32') + [ + 200, + ], + ).astype('int32') out_1 = np.reshape(input_1, [100, 1]) out_1 = np.broadcast_to(out_1, [100, 200]) @@ -115,29 +126,35 @@ class TestMeshgridOp4(unittest.TestCase): exe = fluid.Executor(place=fluid.CPUPlace()) grid_x, grid_y = paddle.tensor.meshgrid([x, y]) - res_1, res_2 = exe.run(fluid.default_main_program(), - feed={ - 'x': input_1, - 'y': input_2 - }, - fetch_list=[grid_x, grid_y]) + res_1, res_2 = exe.run( + fluid.default_main_program(), + feed={'x': input_1, 'y': input_2}, + fetch_list=[grid_x, grid_y], + ) assert np.array_equal(res_1, out_1) assert np.array_equal(res_2, out_2) class TestMeshgridOp5(unittest.TestCase): - def test_tuple_input(self): x = fluid.data(shape=[100], dtype='int32', name='x') y = fluid.data(shape=[200], dtype='int32', name='y') - input_1 = np.random.randint(0, 100, [ + input_1 = np.random.randint( + 0, + 100, + [ + 100, + ], + ).astype('int32') + input_2 = np.random.randint( + 0, 100, - ]).astype('int32') - input_2 = np.random.randint(0, 100, [ - 200, - ]).astype('int32') + [ + 200, + ], + ).astype('int32') out_1 = np.reshape(input_1, [100, 1]) out_1 = np.broadcast_to(out_1, [100, 200]) @@ -146,26 +163,32 @@ class TestMeshgridOp5(unittest.TestCase): exe = fluid.Executor(place=fluid.CPUPlace()) grid_x, grid_y = paddle.tensor.meshgrid((x, y)) - res_1, res_2 = exe.run(fluid.default_main_program(), - feed={ - 'x': input_1, - 'y': input_2 - }, - fetch_list=[grid_x, grid_y]) + res_1, res_2 = exe.run( + fluid.default_main_program(), + feed={'x': input_1, 'y': input_2}, + fetch_list=[grid_x, grid_y], + ) assert np.array_equal(res_1, out_1) assert np.array_equal(res_2, out_2) class TestMeshgridOp6(unittest.TestCase): - def test_api_with_dygraph(self): - input_3 = np.random.randint(0, 100, [ + input_3 = np.random.randint( + 0, + 100, + [ + 100, + ], + ).astype('int32') + input_4 = np.random.randint( + 0, 100, - ]).astype('int32') - input_4 = np.random.randint(0, 100, [ - 200, - ]).astype('int32') + [ + 200, + ], + ).astype('int32') with fluid.dygraph.guard(): tensor_3 = fluid.dygraph.to_variable(input_3) @@ -181,14 +204,21 @@ class TestMeshgridOp6(unittest.TestCase): class TestMeshgridOp7(unittest.TestCase): - def test_api_with_dygraph_list_input(self): - input_3 = np.random.randint(0, 100, [ + input_3 = np.random.randint( + 0, + 100, + [ + 100, + ], + ).astype('int32') + input_4 = np.random.randint( + 0, 100, - ]).astype('int32') - input_4 = np.random.randint(0, 100, [ - 200, - ]).astype('int32') + [ + 200, + ], + ).astype('int32') with fluid.dygraph.guard(): tensor_3 = fluid.dygraph.to_variable(input_3) @@ -204,14 +234,21 @@ class TestMeshgridOp7(unittest.TestCase): class TestMeshgridOp8(unittest.TestCase): - def test_api_with_dygraph_tuple_input(self): - input_3 = np.random.randint(0, 100, [ + input_3 = np.random.randint( + 0, + 100, + [ + 100, + ], + ).astype('int32') + input_4 = np.random.randint( + 0, 100, - ]).astype('int32') - input_4 = np.random.randint(0, 100, [ - 200, - ]).astype('int32') + [ + 200, + ], + ).astype('int32') with fluid.dygraph.guard(): tensor_3 = fluid.dygraph.to_variable(input_3) @@ -227,14 +264,21 @@ class TestMeshgridOp8(unittest.TestCase): class TestMeshgridEager(unittest.TestCase): - def test_dygraph_api(self): - input_1 = np.random.randint(0, 100, [ + input_1 = np.random.randint( + 0, + 100, + [ + 100, + ], + ).astype('int32') + input_2 = np.random.randint( + 0, 100, - ]).astype('int32') - input_2 = np.random.randint(0, 100, [ - 200, - ]).astype('int32') + [ + 200, + ], + ).astype('int32') with fluid.dygraph.guard(): tensor_1 = fluid.dygraph.to_variable(input_1) @@ -250,15 +294,22 @@ class TestMeshgridEager(unittest.TestCase): tensor_eager_1.stop_gradient = False tensor_eager_2.stop_gradient = False res_eager_1, res_eager_2 = paddle.tensor.meshgrid( - (tensor_eager_1, tensor_eager_2)) + (tensor_eager_1, tensor_eager_2) + ) sum_eager = paddle.add_n([res_eager_1, res_eager_2]) sum_eager.backward() - self.assertEqual(( - tensor_1.grad.numpy() == tensor_eager_1.grad.numpy()).all(), - True) - self.assertEqual(( - tensor_2.grad.numpy() == tensor_eager_2.grad.numpy()).all(), - True) + self.assertEqual( + ( + tensor_1.grad.numpy() == tensor_eager_1.grad.numpy() + ).all(), + True, + ) + self.assertEqual( + ( + tensor_2.grad.numpy() == tensor_eager_2.grad.numpy() + ).all(), + True, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_min_op.py b/python/paddle/fluid/tests/unittests/test_min_op.py index 1f4cd926241381d3d30733ab60093b7252cf4b54..a81b63dbb586d6c3769fc914dd092fac7cf8b87b 100644 --- a/python/paddle/fluid/tests/unittests/test_min_op.py +++ b/python/paddle/fluid/tests/unittests/test_min_op.py @@ -22,7 +22,6 @@ from test_sum_op import TestReduceOPTensorAxisBase class ApiMinTest(unittest.TestCase): - def setUp(self): if core.is_compiled_with_cuda(): self.place = core.CUDAPlace(0) @@ -31,39 +30,43 @@ class ApiMinTest(unittest.TestCase): def test_api(self): paddle.enable_static() - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data = paddle.static.data("data", shape=[10, 10], dtype="float32") result_min = paddle.min(x=data, axis=1) exe = paddle.static.Executor(self.place) input_data = np.random.rand(10, 10).astype(np.float32) - res, = exe.run(feed={"data": input_data}, fetch_list=[result_min]) + (res,) = exe.run(feed={"data": input_data}, fetch_list=[result_min]) self.assertEqual((res == np.min(input_data, axis=1)).all(), True) - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data = paddle.static.data("data", shape=[10, 10], dtype="int64") result_min = paddle.min(x=data, axis=0) exe = paddle.static.Executor(self.place) input_data = np.random.randint(10, size=(10, 10)).astype(np.int64) - res, = exe.run(feed={"data": input_data}, fetch_list=[result_min]) + (res,) = exe.run(feed={"data": input_data}, fetch_list=[result_min]) self.assertEqual((res == np.min(input_data, axis=0)).all(), True) - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data = paddle.static.data("data", shape=[10, 10], dtype="int64") result_min = paddle.min(x=data, axis=(0, 1)) exe = paddle.static.Executor(self.place) input_data = np.random.randint(10, size=(10, 10)).astype(np.int64) - res, = exe.run(feed={"data": input_data}, fetch_list=[result_min]) + (res,) = exe.run(feed={"data": input_data}, fetch_list=[result_min]) self.assertEqual((res == np.min(input_data, axis=(0, 1))).all(), True) def test_errors(self): paddle.enable_static() def test_input_type(): - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data = np.random.rand(10, 10) result_min = paddle.min(x=data, axis=0) @@ -84,17 +87,17 @@ class ApiMinTest(unittest.TestCase): class TestOutDtype(unittest.TestCase): - def test_min(self): api_fn = paddle.min shape = [10, 16] - check_out_dtype(api_fn, - in_specs=[(shape, )], - expect_dtypes=['float32', 'float64', 'int32', 'int64']) + check_out_dtype( + api_fn, + in_specs=[(shape,)], + expect_dtypes=['float32', 'float64', 'int32', 'int64'], + ) class TestMinWithTensorAxis1(TestReduceOPTensorAxisBase): - def init_data(self): self.pd_api = paddle.min self.np_api = np.min @@ -104,7 +107,6 @@ class TestMinWithTensorAxis1(TestReduceOPTensorAxisBase): class TestMinWithTensorAxis2(TestReduceOPTensorAxisBase): - def init_data(self): self.pd_api = paddle.min self.np_api = np.min @@ -113,7 +115,7 @@ class TestMinWithTensorAxis2(TestReduceOPTensorAxisBase): self.tensor_axis = [ 0, paddle.to_tensor([1], 'int64'), - paddle.to_tensor([2], 'int64') + paddle.to_tensor([2], 'int64'), ] self.keepdim = True diff --git a/python/paddle/fluid/tests/unittests/test_mine_hard_examples_op.py b/python/paddle/fluid/tests/unittests/test_mine_hard_examples_op.py index 67329652726a1f1510d085f80d62f25c835e00d0..7afb4860211be723ac925a4cf66170570f752d82 100644 --- a/python/paddle/fluid/tests/unittests/test_mine_hard_examples_op.py +++ b/python/paddle/fluid/tests/unittests/test_mine_hard_examples_op.py @@ -18,26 +18,25 @@ from op_test import OpTest class TestMineHardExamplesOp(OpTest): - def set_data(self): self.init_test_data() self.inputs = { 'ClsLoss': self.cls_loss, 'LocLoss': self.loc_loss, 'MatchIndices': self.match_indices, - 'MatchDist': self.match_dis + 'MatchDist': self.match_dis, } self.attrs = { 'neg_pos_ratio': self.neg_pos_ratio, 'neg_overlap': self.neg_overlap, 'sample_size': self.sample_size, - 'mining_type': self.mining_type + 'mining_type': self.mining_type, } self.outputs = { 'NegIndices': (self.neg_indices, self.neg_indices_lod), - 'UpdatedMatchIndices': self.updated_match_indices + 'UpdatedMatchIndices': self.updated_match_indices, } def test_check_output(self): @@ -55,17 +54,21 @@ class TestMineHardExamplesOp(OpTest): self.neg_overlap = 0.5 self.sample_size = 0 self.mining_type = "max_negative" - self.cls_loss = np.array([[0.1, 0.1, 0.3], [0.3, 0.1, - 0.1]]).astype('float64') + self.cls_loss = np.array([[0.1, 0.1, 0.3], [0.3, 0.1, 0.1]]).astype( + 'float64' + ) - self.loc_loss = np.array([[0.1, 0.2, 0.3], [0.3, 0.4, - 0.1]]).astype('float64') + self.loc_loss = np.array([[0.1, 0.2, 0.3], [0.3, 0.4, 0.1]]).astype( + 'float64' + ) - self.match_dis = np.array([[0.2, 0.4, 0.8], [0.1, 0.9, - 0.3]]).astype('float64') + self.match_dis = np.array([[0.2, 0.4, 0.8], [0.1, 0.9, 0.3]]).astype( + 'float64' + ) - self.match_indices = np.array([[0, -1, -1], [-1, 0, - -1]]).astype('int32') + self.match_indices = np.array([[0, -1, -1], [-1, 0, -1]]).astype( + 'int32' + ) self.updated_match_indices = self.match_indices @@ -74,23 +77,26 @@ class TestMineHardExamplesOp(OpTest): class TestMineHardExamplesOpHardExample(TestMineHardExamplesOp): - def init_test_data(self): super(TestMineHardExamplesOpHardExample, self).init_test_data() self.mining_type = "hard_example" self.sample_size = 2 - self.cls_loss = np.array([[0.5, 0.1, 0.3], [0.3, 0.1, - 0.1]]).astype('float64') + self.cls_loss = np.array([[0.5, 0.1, 0.3], [0.3, 0.1, 0.1]]).astype( + 'float64' + ) - self.loc_loss = np.array([[0.2, 0.2, 0.3], [0.3, 0.1, - 0.2]]).astype('float64') + self.loc_loss = np.array([[0.2, 0.2, 0.3], [0.3, 0.1, 0.2]]).astype( + 'float64' + ) - self.match_indices = np.array([[0, -1, -1], [-1, 0, - -1]]).astype('int32') + self.match_indices = np.array([[0, -1, -1], [-1, 0, -1]]).astype( + 'int32' + ) - self.updated_match_indices = np.array([[0, -1, -1], - [-1, -1, -1]]).astype('int32') + self.updated_match_indices = np.array( + [[0, -1, -1], [-1, -1, -1]] + ).astype('int32') self.neg_indices_lod = [[1, 2]] self.neg_indices = np.array([[2], [0], [2]]).astype('int32') diff --git a/python/paddle/fluid/tests/unittests/test_minimum_op.py b/python/paddle/fluid/tests/unittests/test_minimum_op.py index 4dbdfdcad4cd1cf09ec49be176ce6cac40223c7d..81771950c08233717f1aadc9567d1fb441c9bf51 100644 --- a/python/paddle/fluid/tests/unittests/test_minimum_op.py +++ b/python/paddle/fluid/tests/unittests/test_minimum_op.py @@ -19,7 +19,6 @@ import paddle.fluid.core as core class ApiMinimumTest(unittest.TestCase): - def setUp(self): if core.is_compiled_with_cuda(): self.place = core.CUDAPlace(0) @@ -40,56 +39,56 @@ class ApiMinimumTest(unittest.TestCase): def test_static_api(self): paddle.enable_static() - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data_x = paddle.static.data("x", shape=[10, 15], dtype="float32") data_y = paddle.static.data("y", shape=[10, 15], dtype="float32") result_max = paddle.minimum(data_x, data_y) exe = paddle.static.Executor(self.place) - res, = exe.run(feed={ - "x": self.input_x, - "y": self.input_y - }, - fetch_list=[result_max]) + (res,) = exe.run( + feed={"x": self.input_x, "y": self.input_y}, + fetch_list=[result_max], + ) np.testing.assert_allclose(res, self.np_expected1, rtol=1e-05) - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data_x = paddle.static.data("x", shape=[10, 15], dtype="float32") data_z = paddle.static.data("z", shape=[15], dtype="float32") result_max = paddle.minimum(data_x, data_z) exe = paddle.static.Executor(self.place) - res, = exe.run(feed={ - "x": self.input_x, - "z": self.input_z - }, - fetch_list=[result_max]) + (res,) = exe.run( + feed={"x": self.input_x, "z": self.input_z}, + fetch_list=[result_max], + ) np.testing.assert_allclose(res, self.np_expected2, rtol=1e-05) - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data_a = paddle.static.data("a", shape=[3], dtype="int64") data_c = paddle.static.data("c", shape=[3], dtype="int64") result_max = paddle.minimum(data_a, data_c) exe = paddle.static.Executor(self.place) - res, = exe.run(feed={ - "a": self.input_a, - "c": self.input_c - }, - fetch_list=[result_max]) + (res,) = exe.run( + feed={"a": self.input_a, "c": self.input_c}, + fetch_list=[result_max], + ) np.testing.assert_allclose(res, self.np_expected3, rtol=1e-05) - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data_b = paddle.static.data("b", shape=[3], dtype="int64") data_c = paddle.static.data("c", shape=[3], dtype="int64") result_max = paddle.minimum(data_b, data_c) exe = paddle.static.Executor(self.place) - res, = exe.run(feed={ - "b": self.input_b, - "c": self.input_c - }, - fetch_list=[result_max]) + (res,) = exe.run( + feed={"b": self.input_b, "c": self.input_c}, + fetch_list=[result_max], + ) np.testing.assert_allclose(res, self.np_expected4, rtol=1e-05) def test_dynamic_api(self): diff --git a/python/paddle/fluid/tests/unittests/test_minus_op.py b/python/paddle/fluid/tests/unittests/test_minus_op.py index aaa8744061c96649b66c9f50ea9d30a5a65cc790..40b4ddd3b579b23ab2510346050710844865aeba 100644 --- a/python/paddle/fluid/tests/unittests/test_minus_op.py +++ b/python/paddle/fluid/tests/unittests/test_minus_op.py @@ -19,12 +19,11 @@ import paddle class TestMinusOp(OpTest): - def setUp(self): self.op_type = "minus" self.inputs = { 'X': np.random.random((32, 84)).astype("float32"), - 'Y': np.random.random((32, 84)).astype("float32") + 'Y': np.random.random((32, 84)).astype("float32"), } self.outputs = {'Out': (self.inputs['X'] - self.inputs['Y'])} diff --git a/python/paddle/fluid/tests/unittests/test_mix_precision_all_reduce_fuse.py b/python/paddle/fluid/tests/unittests/test_mix_precision_all_reduce_fuse.py index bc57d38b77c5a5e8b9f83024e54571578975ef23..62243890305e5d0ca0fccd871d8093cedcb0e482 100644 --- a/python/paddle/fluid/tests/unittests/test_mix_precision_all_reduce_fuse.py +++ b/python/paddle/fluid/tests/unittests/test_mix_precision_all_reduce_fuse.py @@ -36,21 +36,25 @@ def conv_net(use_feed): img = fluid.layers.data(name='image', shape=img_shape, dtype='float16') label = fluid.layers.data(name='label', shape=[1], dtype='int64') - conv_pool_1 = fluid.nets.simple_img_conv_pool(input=img, - filter_size=5, - num_filters=20, - pool_size=2, - pool_stride=2, - act="relu") + conv_pool_1 = fluid.nets.simple_img_conv_pool( + input=img, + filter_size=5, + num_filters=20, + pool_size=2, + pool_stride=2, + act="relu", + ) conv_pool_1 = fluid.layers.batch_norm(conv_pool_1) conv_pool_1 = fluid.layers.cast(conv_pool_1, np.float32) - conv_pool_2 = fluid.nets.simple_img_conv_pool(input=conv_pool_1, - filter_size=5, - num_filters=50, - pool_size=2, - pool_stride=2, - act="relu") + conv_pool_2 = fluid.nets.simple_img_conv_pool( + input=conv_pool_1, + filter_size=5, + num_filters=50, + pool_size=2, + pool_stride=2, + act="relu", + ) hidden = fluid.layers.cast(conv_pool_2, np.float32) return loss_net(hidden, label) @@ -61,11 +65,10 @@ def _optimizer(learning_rate=1e-6): class TestResnet(TestParallelExecutorBase): - def check_model(self, use_device): - img, label = init_data(batch_size=batch_size, - img_shape=img_shape, - label_range=9) + img, label = init_data( + batch_size=batch_size, img_shape=img_shape, label_range=9 + ) img = np.float16(img) feed_dict = {"image": img, "label": label} @@ -75,7 +78,8 @@ class TestResnet(TestParallelExecutorBase): iter=10, use_device=use_device, fuse_all_reduce_ops=True, - optimizer=_optimizer) + optimizer=_optimizer, + ) def test_model(self): if core.is_compiled_with_cuda(): diff --git a/python/paddle/fluid/tests/unittests/test_mode_op.py b/python/paddle/fluid/tests/unittests/test_mode_op.py index 8b2dcdf5295257d34de2deec127dbaaf1517739f..bec5707ac956505e89872b0e92f64c73800cb5a3 100644 --- a/python/paddle/fluid/tests/unittests/test_mode_op.py +++ b/python/paddle/fluid/tests/unittests/test_mode_op.py @@ -40,7 +40,7 @@ def cal_mode(a, axis, keepdim=False): if axis < 0: axis = len(a.shape) + axis in_dims = list(range(a.ndim)) - a_view = np.transpose(a, in_dims[:axis] + in_dims[axis + 1:] + [axis]) + a_view = np.transpose(a, in_dims[:axis] + in_dims[axis + 1 :] + [axis]) inds = np.ndindex(a_view.shape[:-1]) modes = np.empty(a_view.shape[:-1], dtype=a.dtype) indexes = np.empty(a_view.shape[:-1], dtype=np.int64) @@ -55,7 +55,6 @@ def cal_mode(a, axis, keepdim=False): class TestModeOp(OpTest): - def init_args(self): self.axis = 1 @@ -81,7 +80,6 @@ class TestModeOp(OpTest): class TestModeOpLastdim(OpTest): - def init_args(self): self.axis = -1 @@ -107,14 +105,12 @@ class TestModeOpLastdim(OpTest): class TestModeOpKernels(unittest.TestCase): - def setUp(self): self.axises = [-1, 1] np.random.seed(666) self.inputs = np.ceil(np.random.rand(2, 10, 10) * 1000) def test_mode_op(self): - def test_cpu_kernel(): paddle.set_device('cpu') tensor = paddle.to_tensor(self.inputs) @@ -123,9 +119,9 @@ class TestModeOpKernels(unittest.TestCase): v, inds = paddle.mode(tensor, axis) np.testing.assert_allclose(v.numpy(), value_expect, rtol=1e-05) - value_expect, indice_expect = cal_mode(self.inputs, - axis, - keepdim=True) + value_expect, indice_expect = cal_mode( + self.inputs, axis, keepdim=True + ) v, inds = paddle.mode(tensor, axis, keepdim=True) np.testing.assert_allclose(v.numpy(), value_expect, rtol=1e-05) @@ -137,9 +133,9 @@ class TestModeOpKernels(unittest.TestCase): v, inds = paddle.mode(tensor, axis) np.testing.assert_allclose(v.numpy(), value_expect, rtol=1e-05) - value_expect, indice_expect = cal_mode(self.inputs, - axis, - keepdim=True) + value_expect, indice_expect = cal_mode( + self.inputs, axis, keepdim=True + ) v, inds = paddle.mode(tensor, axis, keepdim=True) np.testing.assert_allclose(v.numpy(), value_expect, rtol=1e-05) @@ -150,7 +146,6 @@ class TestModeOpKernels(unittest.TestCase): class TestModeOpErrors(unittest.TestCase): - def setUp(self): self.x = paddle.uniform([2, 10, 20, 25], dtype='float32') @@ -161,25 +156,27 @@ class TestModeOpErrors(unittest.TestCase): class TestModeOpInStatic(unittest.TestCase): - def setUp(self): np.random.seed(666) - self.input_data = np.ceil(np.random.random((2, 10, 10)) * 1000, - dtype=np.float64) + self.input_data = np.ceil( + np.random.random((2, 10, 10)) * 1000, dtype=np.float64 + ) def test_run_static(self): paddle.enable_static() - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): - input_tensor = paddle.static.data(name="x", - shape=[2, 10, 10], - dtype="float64") + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + input_tensor = paddle.static.data( + name="x", shape=[2, 10, 10], dtype="float64" + ) result = paddle.mode(input_tensor, axis=1) expect_value = cal_mode(self.input_data, axis=1)[0] exe = paddle.static.Executor(paddle.CPUPlace()) - paddle_result = exe.run(feed={"x": self.input_data}, - fetch_list=[result])[0] + paddle_result = exe.run( + feed={"x": self.input_data}, fetch_list=[result] + )[0] np.testing.assert_allclose(paddle_result, expect_value, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_modelaverage.py b/python/paddle/fluid/tests/unittests/test_modelaverage.py index 474fa692b476b678f00263ee1281b2fcb5a5d6b6..0ab8aad42719cd0a9903aa8bc98ed6bd32c69cad 100644 --- a/python/paddle/fluid/tests/unittests/test_modelaverage.py +++ b/python/paddle/fluid/tests/unittests/test_modelaverage.py @@ -20,7 +20,6 @@ import paddle.nn as nn class TestModelAverage(unittest.TestCase): - def test_model_average_static(self): paddle.enable_static() place = fluid.CPUPlace() @@ -35,50 +34,75 @@ class TestModelAverage(unittest.TestCase): hidden = fluid.layers.fc(input=data, size=10) loss = paddle.mean(hidden) test_program = train_program.clone() - optimizer = paddle.optimizer.Momentum(learning_rate=0.2, - momentum=0.1) + optimizer = paddle.optimizer.Momentum( + learning_rate=0.2, momentum=0.1 + ) optimizer.minimize(loss) # build ModelAverage optimizer model_average = paddle.incubate.optimizer.ModelAverage( - 0.15, min_average_window=2, max_average_window=10) + 0.15, min_average_window=2, max_average_window=10 + ) exe.run(startup) for i in range(10): x = np.random.random(size=(10, 1)).astype('float32') - latest_b, sum_1, sum_2, sum_3, num_accumulates, old_num_accumulates, num_updates = exe.run( + ( + latest_b, + sum_1, + sum_2, + sum_3, + num_accumulates, + old_num_accumulates, + num_updates, + ) = exe.run( program=train_program, feed={'X': x}, fetch_list=[ - 'fc_0.b_0', 'fc_0.b_0_sum_1_0', 'fc_0.b_0_sum_2_0', - 'fc_0.b_0_sum_3_0', 'fc_0.b_0_num_accumulates_0', - 'fc_0.b_0_old_num_accumulates_0', 'fc_0.b_0_num_updates_0' - ]) + 'fc_0.b_0', + 'fc_0.b_0_sum_1_0', + 'fc_0.b_0_sum_2_0', + 'fc_0.b_0_sum_3_0', + 'fc_0.b_0_num_accumulates_0', + 'fc_0.b_0_old_num_accumulates_0', + 'fc_0.b_0_num_updates_0', + ], + ) self.assertTrue( - np.equal(sum_1, np.zeros(shape=[10], dtype='float32')).all()) + np.equal(sum_1, np.zeros(shape=[10], dtype='float32')).all() + ) self.assertTrue( - np.equal(sum_2, np.zeros(shape=[10], dtype='float32')).all()) + np.equal(sum_2, np.zeros(shape=[10], dtype='float32')).all() + ) self.assertTrue( - np.equal(num_accumulates, np.array([0], dtype='int64')).all()) + np.equal(num_accumulates, np.array([0], dtype='int64')).all() + ) self.assertTrue( - np.equal(old_num_accumulates, np.array([2], dtype='int64')).all()) + np.equal(old_num_accumulates, np.array([2], dtype='int64')).all() + ) self.assertTrue( - np.equal(num_updates, np.array([10], dtype='int64')).all()) + np.equal(num_updates, np.array([10], dtype='int64')).all() + ) - average_b = (sum_1 + sum_2 + sum_3) / (num_accumulates + - old_num_accumulates) + average_b = (sum_1 + sum_2 + sum_3) / ( + num_accumulates + old_num_accumulates + ) # apply ModelAverage with model_average.apply(exe): x = np.random.random(size=(10, 1)).astype('float32') - outs, b = exe.run(program=test_program, - feed={'X': x}, - fetch_list=[loss.name, 'fc_0.b_0']) + outs, b = exe.run( + program=test_program, + feed={'X': x}, + fetch_list=[loss.name, 'fc_0.b_0'], + ) self.assertAlmostEqual(np.mean(average_b), np.mean(b)) x = np.random.random(size=(10, 1)).astype('float32') - outs, b = exe.run(program=test_program, - feed={'X': x}, - fetch_list=[loss.name, 'fc_0.b_0']) + outs, b = exe.run( + program=test_program, + feed={'X': x}, + fetch_list=[loss.name, 'fc_0.b_0'], + ) self.assertAlmostEqual(np.mean(latest_b), np.mean(b)) def test_model_average_dygraph(self): @@ -91,21 +115,20 @@ class TestModelAverage(unittest.TestCase): # define a random dataset class RandomDataset(paddle.io.Dataset): - def __init__(self, num_samples): self.num_samples = num_samples def __getitem__(self, idx): image = np.random.random([IMAGE_SIZE]).astype('float32') - label = np.random.randint(0, CLASS_NUM - 1, - (1, )).astype('int64') + label = np.random.randint(0, CLASS_NUM - 1, (1,)).astype( + 'int64' + ) return image, label def __len__(self): return self.num_samples class LinearNet(nn.Layer): - def __init__(self): super(LinearNet, self).__init__() self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM) @@ -131,23 +154,30 @@ class TestModelAverage(unittest.TestCase): sum_2 = model_average._get_accumulator('sum_2', layer.bias) sum_3 = model_average._get_accumulator('sum_3', layer.bias) num_accumulates = model_average._get_accumulator( - 'num_accumulates', layer.bias) + 'num_accumulates', layer.bias + ) old_num_accumulates = model_average._get_accumulator( - 'old_num_accumulates', layer.bias) + 'old_num_accumulates', layer.bias + ) num_updates = model_average._get_accumulator( - 'num_updates', layer.bias) + 'num_updates', layer.bias + ) - return ((sum_1 + sum_2 + sum_3) / - (num_accumulates + old_num_accumulates)).numpy() + return ( + (sum_1 + sum_2 + sum_3) + / (num_accumulates + old_num_accumulates) + ).numpy() def evaluate(layer, loader, loss_fn, check_param): for batch_id, (image, label) in enumerate(loader()): out = layer(image) loss = loss_fn(out, label) loss.backward() - self.assertAlmostEqual(np.mean(layer.bias.numpy()), - np.mean(check_param), - delta=5e-3) + self.assertAlmostEqual( + np.mean(layer.bias.numpy()), + np.mean(check_param), + delta=5e-3, + ) # print("Evaluate batch {}: loss = {}, bias = {}".format( # batch_id, np.mean(loss.numpy()), layer.bias.numpy())) @@ -155,36 +185,42 @@ class TestModelAverage(unittest.TestCase): layer = LinearNet() loss_fn = nn.CrossEntropyLoss() - optimizer = paddle.optimizer.Momentum(learning_rate=0.2, - momentum=0.1, - parameters=layer.parameters()) + optimizer = paddle.optimizer.Momentum( + learning_rate=0.2, momentum=0.1, parameters=layer.parameters() + ) # build ModelAverage optimizer model_average = paddle.incubate.optimizer.ModelAverage( 0.15, parameters=layer.parameters(), min_average_window=2, - max_average_window=10) + max_average_window=10, + ) # create data loader dataset = RandomDataset(BATCH_NUM * BATCH_SIZE) - loader = paddle.io.DataLoader(dataset, - batch_size=BATCH_SIZE, - shuffle=True, - drop_last=True, - num_workers=2) - eval_loader = paddle.io.DataLoader(dataset, - batch_size=BATCH_SIZE, - shuffle=True, - drop_last=True, - num_workers=1) + loader = paddle.io.DataLoader( + dataset, + batch_size=BATCH_SIZE, + shuffle=True, + drop_last=True, + num_workers=2, + ) + eval_loader = paddle.io.DataLoader( + dataset, + batch_size=BATCH_SIZE, + shuffle=True, + drop_last=True, + num_workers=1, + ) # train check_param = train(layer, loader, loss_fn, optimizer, model_average) # print(check_param) with model_average.apply(need_restore=False): evaluate(layer, eval_loader, loss_fn, check_param) - check_param = (model_average._get_accumulator('restore', - layer.bias)).numpy() + check_param = ( + model_average._get_accumulator('restore', layer.bias) + ).numpy() # print(check_param) # print("\nEvaluate With Restored Paramters") model_average.restore() diff --git a/python/paddle/fluid/tests/unittests/test_modified_huber_loss_op.py b/python/paddle/fluid/tests/unittests/test_modified_huber_loss_op.py index 1e81a6ae6b33accb5ff04b4401dd62e334c6f90d..99ce37834b5c38a6a65015ee690d10c65834f1cf 100644 --- a/python/paddle/fluid/tests/unittests/test_modified_huber_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_modified_huber_loss_op.py @@ -19,27 +19,29 @@ from op_test import OpTest def modified_huber_loss_forward(val): if val < -1: - return -4. * val + return -4.0 * val elif val < 1: - return (1. - val) * (1. - val) + return (1.0 - val) * (1.0 - val) else: - return 0. + return 0.0 class TestModifiedHuberLossOp(OpTest): - def setUp(self): self.op_type = 'modified_huber_loss' samples_num = 100 - x_np = np.random.uniform(-2., 2., (samples_num, 1)).astype('float32') - y_np = np.random.choice([0, 1], samples_num).reshape( - (samples_num, 1)).astype('float32') - product_res = x_np * (2. * y_np - 1.) + x_np = np.random.uniform(-2.0, 2.0, (samples_num, 1)).astype('float32') + y_np = ( + np.random.choice([0, 1], samples_num) + .reshape((samples_num, 1)) + .astype('float32') + ) + product_res = x_np * (2.0 * y_np - 1.0) # keep away from the junction of piecewise function for pos, val in np.ndenumerate(product_res): - while abs(val - 1.) < 0.05: - x_np[pos] = np.random.uniform(-2., 2.) + while abs(val - 1.0) < 0.05: + x_np[pos] = np.random.uniform(-2.0, 2.0) y_np[pos] = np.random.choice([0, 1]) product_res[pos] = x_np[pos] * (2 * y_np[pos] - 1) val = product_res[pos] @@ -49,7 +51,7 @@ class TestModifiedHuberLossOp(OpTest): self.outputs = { 'IntermediateVal': product_res.astype('float32'), - 'Out': loss.reshape((samples_num, 1)).astype('float32') + 'Out': loss.reshape((samples_num, 1)).astype('float32'), } def test_check_output(self): diff --git a/python/paddle/fluid/tests/unittests/test_momentum_op.py b/python/paddle/fluid/tests/unittests/test_momentum_op.py index e16dfa969f119c77c834d965cce4355f0a73b0e2..991291e36fb7cc19f5c70b226e0135a3c22fbe6d 100644 --- a/python/paddle/fluid/tests/unittests/test_momentum_op.py +++ b/python/paddle/fluid/tests/unittests/test_momentum_op.py @@ -23,14 +23,16 @@ import numpy from paddle.fluid.framework import _test_eager_guard -def calculate_momentum_by_numpy(param, - grad, - mu, - velocity, - use_nesterov, - learning_rate, - regularization_method=None, - regularization_coeff=1.0): +def calculate_momentum_by_numpy( + param, + grad, + mu, + velocity, + use_nesterov, + learning_rate, + regularization_method=None, + regularization_coeff=1.0, +): if regularization_method == "l2_decay": grad = grad + regularization_coeff * param @@ -42,8 +44,9 @@ def calculate_momentum_by_numpy(param, else: velocity_out = mu * velocity + grad if use_nesterov: - param_out = param - grad * learning_rate - \ - velocity_out * mu * learning_rate + param_out = ( + param - grad * learning_rate - velocity_out * mu * learning_rate + ) else: param_out = param - learning_rate * velocity_out @@ -51,7 +54,6 @@ def calculate_momentum_by_numpy(param, class TestMomentumOp1(OpTest): - def setUp(self): self.op_type = "momentum" self.dtype = np.float32 @@ -68,7 +70,7 @@ class TestMomentumOp1(OpTest): 'Param': param, 'Grad': grad, 'Velocity': velocity, - 'LearningRate': learning_rate + 'LearningRate': learning_rate, } self.attrs = {'mu': mu} @@ -79,7 +81,8 @@ class TestMomentumOp1(OpTest): mu=mu, velocity=velocity, use_nesterov=use_nesterov, - learning_rate=learning_rate) + learning_rate=learning_rate, + ) self.outputs = {'ParamOut': param_out, 'VelocityOut': velocity_out} @@ -91,7 +94,6 @@ class TestMomentumOp1(OpTest): class TestMomentumOpFp16(TestMomentumOp1): - def init_dtype(self): self.dtype = np.float16 @@ -100,8 +102,7 @@ class TestMomentumOpFp16(TestMomentumOp1): class TestMomentumOp2(OpTest): - '''Test Momentum with default values for attributes - ''' + '''Test Momentum with default values for attributes''' def setUp(self): self.op_type = "momentum" @@ -117,7 +118,7 @@ class TestMomentumOp2(OpTest): 'Param': param, 'Grad': grad, 'Velocity': velocity, - 'LearningRate': learning_rate + 'LearningRate': learning_rate, } self.attrs = {'mu': mu, 'use_nesterov': use_nesterov} @@ -128,7 +129,8 @@ class TestMomentumOp2(OpTest): mu=mu, velocity=velocity, use_nesterov=use_nesterov, - learning_rate=learning_rate) + learning_rate=learning_rate, + ) self.outputs = {'ParamOut': param_out, 'VelocityOut': velocity_out} @@ -136,10 +138,10 @@ class TestMomentumOp2(OpTest): self.check_output() -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestLarsMomentumOpWithMP(OpTest): - def setUp(self): self.config() self.op_type = "lars_momentum" @@ -166,11 +168,16 @@ class TestLarsMomentumOpWithMP(OpTest): fp32_grad = grad.astype("float32") pnorm = np.sqrt(np.square(master_param).sum()) gnorm = np.sqrt(np.square(fp32_grad).sum()) - local_lr = learning_rate * lars_coeff * pnorm / ( - gnorm + lars_weight_decay * pnorm) + local_lr = ( + learning_rate + * lars_coeff + * pnorm + / (gnorm + lars_weight_decay * pnorm) + ) fp32_grad = fp32_grad * rescale_grad velocity_out = mu * velocity + local_lr * ( - fp32_grad + lars_weight_decay * master_param) + fp32_grad + lars_weight_decay * master_param + ) p_new = master_param - velocity_out param_out = p_new.astype("float16") master_param_out = p_new @@ -183,7 +190,8 @@ class TestLarsMomentumOpWithMP(OpTest): param_outs.append(("SubParam_out_" + str(i), param_out)) master_params.append(("SubMasterParam_" + str(i), master_param)) master_param_outs.append( - ("SubMasterParamOut_" + str(i), master_param_out)) + ("SubMasterParamOut_" + str(i), master_param_out) + ) self.inputs = { 'Param': params, @@ -198,13 +206,13 @@ class TestLarsMomentumOpWithMP(OpTest): 'lars_coeff': lars_coeff, 'lars_weight_decay': [lars_weight_decay], 'multi_precision': True, - 'rescale_grad': rescale_grad + 'rescale_grad': rescale_grad, } self.outputs = { 'ParamOut': param_outs, 'VelocityOut': velocity_outs, - 'MasterParamOut': master_param_outs + 'MasterParamOut': master_param_outs, } def test_check_output(self): @@ -219,7 +227,6 @@ class TestLarsMomentumOpWithMP(OpTest): class TestLarsMomentumOp(OpTest): - def setUp(self): self.config() self.op_type = "lars_momentum" @@ -240,10 +247,15 @@ class TestLarsMomentumOp(OpTest): learning_rate = np.array([0.001]).astype("float32") pnorm = np.sqrt(np.square(param).sum()) gnorm = np.sqrt(np.square(grad).sum()) - local_lr = learning_rate * lars_coeff * pnorm / ( - gnorm + lars_weight_decay * param) + local_lr = ( + learning_rate + * lars_coeff + * pnorm + / (gnorm + lars_weight_decay * param) + ) velocity_out = mu * velocity + local_lr * ( - grad + lars_weight_decay * param) + grad + lars_weight_decay * param + ) param_out = param - velocity_out params.append(("SubParam_" + str(i), param)) @@ -257,13 +269,13 @@ class TestLarsMomentumOp(OpTest): 'Param': params, 'Grad': grads, 'Velocity': velocitys, - 'LearningRate': learning_rates + 'LearningRate': learning_rates, } self.attrs = { 'mu': mu, 'lars_coeff': lars_coeff, - 'lars_weight_decay': [lars_weight_decay] + 'lars_weight_decay': [lars_weight_decay], } self.outputs = {'ParamOut': param_outs, 'VelocityOut': velocity_outs} @@ -276,7 +288,6 @@ class TestLarsMomentumOp(OpTest): class TestSparseMomentumOp(unittest.TestCase): - def setUp(self): self.use_nesterov = False self.regularization_method = "" @@ -315,8 +326,9 @@ class TestSparseMomentumOp(unittest.TestCase): velocity_np_array = np.ones((height, row_numel)).astype("float32") velocity.set(velocity_np_array, place) velocity_out = scope.var('VelocityOut').get_tensor() - velocity_out_np_array = np.full((height, row_numel), - 0.0).astype("float32") + velocity_out_np_array = np.full((height, row_numel), 0.0).astype( + "float32" + ) velocity_out.set(velocity_out_np_array, place) # create and initialize LearningRate Variable @@ -325,17 +337,19 @@ class TestSparseMomentumOp(unittest.TestCase): lr.set(lr_array, place) # create and run operator - op = Operator("momentum", - Param='Param', - Grad='Grad', - Velocity='Velocity', - ParamOut='ParamOut', - VelocityOut='VelocityOut', - LearningRate='LearningRate', - mu=mu, - use_nesterov=use_nesterov, - regularization_method=regularization_method, - regularization_coeff=regularization_coeff) + op = Operator( + "momentum", + Param='Param', + Grad='Grad', + Velocity='Velocity', + ParamOut='ParamOut', + VelocityOut='VelocityOut', + LearningRate='LearningRate', + mu=mu, + use_nesterov=use_nesterov, + regularization_method=regularization_method, + regularization_coeff=regularization_coeff, + ) op.run(scope, place) # get and compare result @@ -358,7 +372,8 @@ class TestSparseMomentumOp(unittest.TestCase): use_nesterov=use_nesterov, learning_rate=lr_array, regularization_method=regularization_method, - regularization_coeff=regularization_coeff) + regularization_coeff=regularization_coeff, + ) self.assertTrue((_velocity_out == velocity_out_np_array).all()) self.assertTrue((_param_out == param_out_np_array).all()) @@ -375,13 +390,11 @@ class TestSparseMomentumOp(unittest.TestCase): class TestSparseMomentumOp2(TestSparseMomentumOp): - def init_kernel(self): self.use_nesterov = True class TestSparseMomentumOpWithMultiPrecision(unittest.TestCase): - def setUp(self): self.init_args() self.regularization_method = "" @@ -425,8 +438,9 @@ class TestSparseMomentumOpWithMultiPrecision(unittest.TestCase): velocity_np_array = np.ones((height, row_numel)).astype("float32") velocity.set(velocity_np_array, place) velocity_out = scope.var('VelocityOut').get_tensor() - velocity_out_np_array = np.full((height, row_numel), - 0.0).astype("float32") + velocity_out_np_array = np.full((height, row_numel), 0.0).astype( + "float32" + ) velocity_out.set(velocity_out_np_array, place) # create and initialize LearningRate Variable @@ -435,21 +449,23 @@ class TestSparseMomentumOpWithMultiPrecision(unittest.TestCase): lr.set(lr_array, place) # create and run operator - op = Operator("momentum", - Param='Param', - Grad='Grad', - Velocity='Velocity', - MasterParam='MasterParam', - ParamOut='ParamOut', - VelocityOut='VelocityOut', - MasterParamOut='MasterParamOut', - LearningRate='LearningRate', - mu=mu, - use_nesterov=use_nesterov, - regularization_method=regularization_method, - regularization_coeff=regularization_coeff, - multi_precision=True, - rescale_grad=1.0) + op = Operator( + "momentum", + Param='Param', + Grad='Grad', + Velocity='Velocity', + MasterParam='MasterParam', + ParamOut='ParamOut', + VelocityOut='VelocityOut', + MasterParamOut='MasterParamOut', + LearningRate='LearningRate', + mu=mu, + use_nesterov=use_nesterov, + regularization_method=regularization_method, + regularization_coeff=regularization_coeff, + multi_precision=True, + rescale_grad=1.0, + ) op.run(scope, place) # get and compare result @@ -470,7 +486,8 @@ class TestSparseMomentumOpWithMultiPrecision(unittest.TestCase): use_nesterov=use_nesterov, learning_rate=lr_array, regularization_method=regularization_method, - regularization_coeff=regularization_coeff) + regularization_coeff=regularization_coeff, + ) self.assertTrue((_velocity_out == velocity_out_np_array).all()) self.assertTrue((_param_out == param_out_np_array).all()) @@ -484,23 +501,22 @@ class TestSparseMomentumOpWithMultiPrecision(unittest.TestCase): class TestSparseMomentumOpWithMultiPrecision2( - TestSparseMomentumOpWithMultiPrecision): - + TestSparseMomentumOpWithMultiPrecision +): def init_args(self): self.use_nesterov = True class TestMomentumV2(unittest.TestCase): - def test_momentum_dygraph(self): paddle.disable_static() value = np.arange(26).reshape(2, 13).astype("float32") a = paddle.to_tensor(value) linear = paddle.nn.Linear(13, 5) # This can be any optimizer supported by dygraph. - adam = paddle.optimizer.Momentum(learning_rate=0.01, - momentum=0.9, - parameters=linear.parameters()) + adam = paddle.optimizer.Momentum( + learning_rate=0.01, momentum=0.9, parameters=linear.parameters() + ) out = linear(a) out.backward() adam.step() @@ -517,13 +533,15 @@ class TestMomentumV2(unittest.TestCase): cost = fluid.layers.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) - rms_optimizer = paddle.optimizer.Momentum(learning_rate=0.1, - momentum=0.9) + rms_optimizer = paddle.optimizer.Momentum( + learning_rate=0.1, momentum=0.9 + ) rms_optimizer.minimize(avg_cost) fetch_list = [avg_cost] - train_reader = paddle.batch(paddle.dataset.uci_housing.train(), - batch_size=1) + train_reader = paddle.batch( + paddle.dataset.uci_housing.train(), batch_size=1 + ) feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) @@ -531,9 +549,9 @@ class TestMomentumV2(unittest.TestCase): exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) def test_raise_error(self): - self.assertRaises(ValueError, - paddle.optimizer.Momentum, - learning_rate=None) + self.assertRaises( + ValueError, paddle.optimizer.Momentum, learning_rate=None + ) self.assertRaises(ValueError, paddle.optimizer.Momentum, momentum=None) def test_api_eager_dygraph(self): @@ -543,7 +561,6 @@ class TestMomentumV2(unittest.TestCase): class TestMomentumOpWithDecay(OpTest): - def setUp(self): self.op_type = "momentum" self.dtype = np.float32 @@ -565,14 +582,14 @@ class TestMomentumOpWithDecay(OpTest): 'Param': param, 'Grad': grad, 'Velocity': velocity, - 'LearningRate': learning_rate + 'LearningRate': learning_rate, } self.attrs = { 'mu': mu, 'use_nesterov': use_nesterov, 'regularization_method': regularization_method, - 'regularization_coeff': regularization_coeff + 'regularization_coeff': regularization_coeff, } grad = grad + regularization_coeff * param @@ -583,7 +600,8 @@ class TestMomentumOpWithDecay(OpTest): mu=mu, velocity=velocity, use_nesterov=use_nesterov, - learning_rate=learning_rate) + learning_rate=learning_rate, + ) self.outputs = {'ParamOut': param_out, 'VelocityOut': velocity_out} @@ -596,7 +614,6 @@ class TestMomentumOpWithDecay(OpTest): class TestMomentumOpWithDecayFP16(TestMomentumOpWithDecay): - def init_config(self): self.dtype = np.float16 @@ -606,13 +623,11 @@ class TestMomentumOpWithDecayFP16(TestMomentumOpWithDecay): class TestMomentumOpWithDecay2(TestMomentumOpWithDecay): - def init_config(self): self.use_nesterov = False class TestSparseMomentumOpWithDecay(TestSparseMomentumOp): - def setUp(self): self.use_nesterov = False self.regularization_method = 'l2_decay' @@ -620,13 +635,11 @@ class TestSparseMomentumOpWithDecay(TestSparseMomentumOp): class TestSparseMomentumOpWithDecay2(TestSparseMomentumOpWithDecay): - def init_kernel(self): self.use_nesterov = True class TestMomentumOpWithDecayAPI(unittest.TestCase): - def _test_momentum_dygraph_common(self, regularization): paddle.disable_static() inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") @@ -639,13 +652,16 @@ class TestMomentumOpWithDecayAPI(unittest.TestCase): learning_rate=0.01, momentum=0.9, parameter_list=linear.parameters(), - regularization=regularization) + regularization=regularization, + ) momentum.minimize(loss) def test_momentum_dygraph_1(self): self._test_momentum_dygraph_common( regularization=paddle.fluid.regularizer.L2Decay( - regularization_coeff=0.1)) + regularization_coeff=0.1 + ) + ) def test_momentum_static(self): paddle.enable_static() @@ -659,12 +675,14 @@ class TestMomentumOpWithDecayAPI(unittest.TestCase): avg_cost = paddle.mean(cost) momentum_optimizer = paddle.fluid.contrib.optimizer.Momentum( - learning_rate=0.1, momentum=0.9) + learning_rate=0.1, momentum=0.9 + ) momentum_optimizer.minimize(avg_cost) fetch_list = [avg_cost] - train_reader = paddle.batch(paddle.dataset.uci_housing.train(), - batch_size=1) + train_reader = paddle.batch( + paddle.dataset.uci_housing.train(), batch_size=1 + ) feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) @@ -673,23 +691,23 @@ class TestMomentumOpWithDecayAPI(unittest.TestCase): class TestFusedMomentumWithDecayAPI(unittest.TestCase): - def get_program(self, weight_attr, bias_attr=False): main_program = paddle.static.Program() startup_program = paddle.static.Program() - with paddle.static.program_guard(main_program=main_program, - startup_program=startup_program): + with paddle.static.program_guard( + main_program=main_program, startup_program=startup_program + ): x = paddle.static.data(name='x', shape=[10, 10]) - linear = paddle.nn.Linear(10, - 10, - weight_attr=weight_attr, - bias_attr=bias_attr) + linear = paddle.nn.Linear( + 10, 10, weight_attr=weight_attr, bias_attr=bias_attr + ) out = linear(x) loss = paddle.mean(out) optimizer = paddle.optimizer.Momentum( learning_rate=0.01, momentum=0.9, - weight_decay=paddle.regularizer.L2Decay(0.5)) + weight_decay=paddle.regularizer.L2Decay(0.5), + ) optimizer.minimize(loss) return main_program @@ -698,7 +716,8 @@ class TestFusedMomentumWithDecayAPI(unittest.TestCase): weight_attr = paddle.ParamAttr( name="weight", initializer=paddle.nn.initializer.Constant(value=0.5), - regularizer=paddle.regularizer.L2Decay(0.1)) + regularizer=paddle.regularizer.L2Decay(0.1), + ) program = self.get_program(weight_attr, bias_attr=False) ops = program.global_block().ops @@ -713,11 +732,13 @@ class TestFusedMomentumWithDecayAPI(unittest.TestCase): weight_attr = paddle.ParamAttr( name="weight", initializer=paddle.nn.initializer.Constant(value=0.5), - regularizer=paddle.regularizer.L1Decay(0.1)) + regularizer=paddle.regularizer.L1Decay(0.1), + ) bias_attr = paddle.ParamAttr( name="bias", - initializer=paddle.nn.initializer.Constant(value=0.), - regularizer=None) + initializer=paddle.nn.initializer.Constant(value=0.0), + regularizer=None, + ) program = self.get_program(weight_attr, bias_attr) ops = program.global_block().ops @@ -732,8 +753,9 @@ class TestFusedMomentumWithDecayAPI(unittest.TestCase): self.assertEqual(ops[-1].attr('regularization_coeff'), 0) if 'bias' in ops[-2].input('Param'): self.assertEqual(ops[-2].attr('regularization_method'), 'l2_decay') - self.assertEqual(ops[-2].attr('regularization_coeff'), - np.float32(0.5)) + self.assertEqual( + ops[-2].attr('regularization_coeff'), np.float32(0.5) + ) def test_param_has_no_regularizer(self): paddle.enable_static() @@ -747,11 +769,11 @@ class TestFusedMomentumWithDecayAPI(unittest.TestCase): class TestMomentumOpVsMomentumOpWithDecayAPI(unittest.TestCase): - def __update_params(self, momentum, linear): for i in range(10): - inp = paddle.full(shape=[2, 2], fill_value=i, - dtype='float32').astype("float32") + inp = paddle.full( + shape=[2, 2], fill_value=i, dtype='float32' + ).astype("float32") inp = paddle.to_tensor(inp) out = linear(inp) loss = paddle.mean(out) @@ -766,32 +788,39 @@ class TestMomentumOpVsMomentumOpWithDecayAPI(unittest.TestCase): 2, 2, weight_attr=paddle.nn.initializer.Constant(value=2.0), - bias_attr=paddle.nn.initializer.Constant(value=2.0)) + bias_attr=paddle.nn.initializer.Constant(value=2.0), + ) momentum_old = paddle.fluid.optimizer.Momentum( learning_rate=0.01, momentum=0.9, parameter_list=linear_old.parameters(), regularization=paddle.fluid.regularizer.L2Decay( - regularization_coeff=0.1)) + regularization_coeff=0.1 + ), + ) self.__update_params(momentum=momentum_old, linear=linear_old) linear_new = paddle.nn.Linear( 2, 2, weight_attr=paddle.nn.initializer.Constant(value=2.0), - bias_attr=paddle.nn.initializer.Constant(value=2.0)) + bias_attr=paddle.nn.initializer.Constant(value=2.0), + ) momentum_new = paddle.fluid.contrib.optimizer.Momentum( learning_rate=0.01, momentum=0.9, parameter_list=linear_new.parameters(), regularization=paddle.fluid.regularizer.L2Decay( - regularization_coeff=0.1)) + regularization_coeff=0.1 + ), + ) self.__update_params(momentum=momentum_new, linear=linear_new) self.assertEqual( (linear_old.weight.numpy() == linear_new.weight.numpy()).all(), True, - 'the param weight updated by two Momentum optimizers should equal') + 'the param weight updated by two Momentum optimizers should equal', + ) def test_vs(self, place=fluid.CPUPlace()): places = [fluid.CPUPlace()] @@ -803,7 +832,6 @@ class TestMomentumOpVsMomentumOpWithDecayAPI(unittest.TestCase): class TestMomentumV2Group(TestMomentumV2): - def test_momentum_dygraph(self): paddle.disable_static() value = np.arange(26).reshape(2, 13).astype("float32") @@ -811,22 +839,20 @@ class TestMomentumV2Group(TestMomentumV2): linear_1 = paddle.nn.Linear(13, 5) linear_2 = paddle.nn.Linear(5, 3) # This can be any optimizer supported by dygraph. - adam = paddle.optimizer.Momentum(learning_rate=0.01, - parameters=[{ - 'params': - linear_1.parameters() - }, { - 'params': - linear_2.parameters(), - 'weight_decay': - 0.001, - 'learning_rate': - 0.1, - 'momentum': - 0.99 - }], - weight_decay=0.1, - momentum=0.9) + adam = paddle.optimizer.Momentum( + learning_rate=0.01, + parameters=[ + {'params': linear_1.parameters()}, + { + 'params': linear_2.parameters(), + 'weight_decay': 0.001, + 'learning_rate': 0.1, + 'momentum': 0.99, + }, + ], + weight_decay=0.1, + momentum=0.9, + ) out = linear_1(a) out = linear_2(out) out.backward() @@ -835,13 +861,14 @@ class TestMomentumV2Group(TestMomentumV2): class TestMultiTensorMomentumDygraph(unittest.TestCase): - - def _momentum_optimize_dygraph(self, - place, - use_param_attr=False, - use_param_group=False, - use_amp=False, - use_multi_tensor=False): + def _momentum_optimize_dygraph( + self, + place, + use_param_attr=False, + use_param_group=False, + use_amp=False, + use_multi_tensor=False, + ): paddle.disable_static() paddle.seed(10) paddle.set_device(place) @@ -849,7 +876,8 @@ class TestMultiTensorMomentumDygraph(unittest.TestCase): weight_attr = paddle.ParamAttr( learning_rate=0.5, regularizer=paddle.regularizer.L2Decay(1.0), - trainable=True) + trainable=True, + ) if use_param_attr: model = paddle.nn.Linear(5, 5, weight_attr) else: @@ -858,17 +886,21 @@ class TestMultiTensorMomentumDygraph(unittest.TestCase): optimizer = paddle.optimizer.Momentum( parameters=model.parameters(), use_multi_tensor=use_multi_tensor, - multi_precision=use_amp) + multi_precision=use_amp, + ) else: optimizer = paddle.optimizer.Momentum( - parameters=[{ - 'params': model.parameters(), - 'weight_decay': 0.001, - 'learning_rate': 0.1, - 'momentum': 0.99 - }], + parameters=[ + { + 'params': model.parameters(), + 'weight_decay': 0.001, + 'learning_rate': 0.1, + 'momentum': 0.99, + } + ], use_multi_tensor=use_multi_tensor, - multi_precision=use_amp) + multi_precision=use_amp, + ) for idx in range(5): if place == 'gpu' and use_amp == True: model = paddle.amp.decorate(models=model, level='O2') @@ -898,9 +930,11 @@ class TestMultiTensorMomentumDygraph(unittest.TestCase): def _check_with_place_amp(self, place, use_amp): output1, params1 = self._momentum_optimize_dygraph( - place=place, use_amp=use_amp, use_multi_tensor=True) + place=place, use_amp=use_amp, use_multi_tensor=True + ) output2, params2 = self._momentum_optimize_dygraph( - place=place, use_amp=use_amp, use_multi_tensor=False) + place=place, use_amp=use_amp, use_multi_tensor=False + ) np.testing.assert_allclose(output1, output2, rtol=1e-05) for idx in range(len(params1)): @@ -911,12 +945,14 @@ class TestMultiTensorMomentumDygraph(unittest.TestCase): place=place, use_amp=use_amp, use_param_attr=True, - use_multi_tensor=True) + use_multi_tensor=True, + ) output2, params2 = self._momentum_optimize_dygraph( place=place, use_amp=use_amp, use_param_attr=True, - use_multi_tensor=False) + use_multi_tensor=False, + ) np.testing.assert_allclose(output1, output2, rtol=1e-05) for idx in range(len(params1)): np.testing.assert_allclose(params1[idx], params2[idx], rtol=1e-05) @@ -926,12 +962,14 @@ class TestMultiTensorMomentumDygraph(unittest.TestCase): place=place, use_amp=use_amp, use_param_group=True, - use_multi_tensor=True) + use_multi_tensor=True, + ) output2, params2 = self._momentum_optimize_dygraph( place=place, use_amp=use_amp, use_param_group=True, - use_multi_tensor=False) + use_multi_tensor=False, + ) np.testing.assert_allclose(output1, output2, rtol=1e-05) for idx in range(len(params1)): np.testing.assert_allclose(params1[idx], params2[idx], rtol=1e-05) @@ -950,11 +988,9 @@ class TestMultiTensorMomentumDygraph(unittest.TestCase): class TestMultiTensorMomentumStatic(unittest.TestCase): - - def _momentum_optimize_static(self, - place, - use_amp=False, - use_multi_tensor=False): + def _momentum_optimize_static( + self, place, use_amp=False, use_multi_tensor=False + ): paddle.enable_static() paddle.seed(10) np.random.seed(10) @@ -963,24 +999,26 @@ class TestMultiTensorMomentumStatic(unittest.TestCase): exe = paddle.static.Executor(place=place) train_program = paddle.static.Program() startup_program = paddle.static.Program() - optimizer = paddle.optimizer.Momentum(multi_precision=use_amp, - use_multi_tensor=use_multi_tensor) + optimizer = paddle.optimizer.Momentum( + multi_precision=use_amp, use_multi_tensor=use_multi_tensor + ) if use_amp: optimizer = paddle.static.amp.decorate( optimizer, init_loss_scaling=128.0, use_dynamic_loss_scaling=True, use_pure_fp16=True, - use_fp16_guard=False) + use_fp16_guard=False, + ) with paddle.static.program_guard(train_program, startup_program): if use_amp: - data = paddle.static.data(shape=[2, 2], - name='X', - dtype='float16') + data = paddle.static.data( + shape=[2, 2], name='X', dtype='float16' + ) else: - data = paddle.static.data(shape=[2, 2], - name='X', - dtype='float32') + data = paddle.static.data( + shape=[2, 2], name='X', dtype='float32' + ) hidden = paddle.static.nn.fc(x=data, size=10) loss = paddle.mean(hidden) optimizer.minimize(loss) @@ -992,9 +1030,9 @@ class TestMultiTensorMomentumStatic(unittest.TestCase): x = numpy.random.random(size=(2, 2)).astype('float32') out = [] for idx in range(5): - loss_data, = exe.run(train_program, - feed={"X": x}, - fetch_list=[loss.name]) + (loss_data,) = exe.run( + train_program, feed={"X": x}, fetch_list=[loss.name] + ) out.append(loss_data) return out @@ -1005,12 +1043,12 @@ class TestMultiTensorMomentumStatic(unittest.TestCase): return places def _check_with_place_amp(self, place, use_amp): - output1 = self._momentum_optimize_static(place=place, - use_amp=use_amp, - use_multi_tensor=True) - output2 = self._momentum_optimize_static(place=place, - use_amp=use_amp, - use_multi_tensor=False) + output1 = self._momentum_optimize_static( + place=place, use_amp=use_amp, use_multi_tensor=True + ) + output2 = self._momentum_optimize_static( + place=place, use_amp=use_amp, use_multi_tensor=False + ) for idx in range(len(output1)): np.testing.assert_allclose(output1[idx], output2[idx], rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_monitor.py b/python/paddle/fluid/tests/unittests/test_monitor.py index 8649f11821eb663aa13f91a3f7de3761d4510d37..dafb23c7d411e672a5b8e47a298b6d9ffc4a6865 100644 --- a/python/paddle/fluid/tests/unittests/test_monitor.py +++ b/python/paddle/fluid/tests/unittests/test_monitor.py @@ -27,7 +27,7 @@ import tempfile class TestDatasetWithStat(unittest.TestCase): - """ TestCases for Dataset. """ + """TestCases for Dataset.""" def setUp(self): self.use_data_loader = False @@ -53,10 +53,9 @@ class TestDatasetWithStat(unittest.TestCase): slots = ["slot1", "slot2", "slot3", "slot4"] slots_vars = [] for slot in slots: - var = fluid.layers.data(name=slot, - shape=[1], - dtype="int64", - lod_level=1) + var = fluid.layers.data( + name=slot, shape=[1], dtype="int64", lod_level=1 + ) slots_vars.append(var) embs = [] @@ -78,7 +77,8 @@ class TestDatasetWithStat(unittest.TestCase): exe.run(fluid.default_startup_program()) if self.use_data_loader: data_loader = fluid.io.DataLoader.from_dataset( - dataset, fluid.cpu_places(), self.drop_last) + dataset, fluid.cpu_places(), self.drop_last + ) for i in range(self.epoch_num): for data in data_loader(): exe.run(fluid.default_main_program(), feed=data) @@ -86,11 +86,13 @@ class TestDatasetWithStat(unittest.TestCase): else: for i in range(self.epoch_num): try: - exe.train_from_dataset(fluid.default_main_program(), - dataset, - fetch_list=[embs[0], embs[1]], - fetch_info=["emb0", "emb1"], - print_period=1) + exe.train_from_dataset( + fluid.default_main_program(), + dataset, + fetch_list=[embs[0], embs[1]], + fetch_info=["emb0", "emb1"], + print_period=1, + ) except Exception as e: self.assertTrue(False) diff --git a/python/paddle/fluid/tests/unittests/test_mse_loss.py b/python/paddle/fluid/tests/unittests/test_mse_loss.py index ef2a494c219e77c492875327d2d424c2ae0ec920..b0b59694406a7fad7860f03f0d19547f9ce00603 100644 --- a/python/paddle/fluid/tests/unittests/test_mse_loss.py +++ b/python/paddle/fluid/tests/unittests/test_mse_loss.py @@ -22,7 +22,6 @@ from paddle.fluid.executor import Executor class TestMseLoss(unittest.TestCase): - def test_mse_loss(self): input_val = np.random.uniform(0.1, 0.5, (2, 3)).astype("float32") label_val = np.random.uniform(0.1, 0.5, (2, 3)).astype("float32") @@ -34,24 +33,22 @@ class TestMseLoss(unittest.TestCase): label_var = fluid.data(name="label", shape=[-1, 3], dtype="float32") output = layers.mse_loss(input=input_var, label=label_var) - for use_cuda in ([False, True] - if core.is_compiled_with_cuda() else [False]): + for use_cuda in ( + [False, True] if core.is_compiled_with_cuda() else [False] + ): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = Executor(place) - result, = exe.run(fluid.default_main_program(), - feed={ - "input": input_val, - "label": label_val - }, - fetch_list=[output]) + (result,) = exe.run( + fluid.default_main_program(), + feed={"input": input_val, "label": label_val}, + fetch_list=[output], + ) np.testing.assert_allclose(np_result, result, rtol=1e-05) class TestMseInvalidInput(unittest.TestCase): - def test_error(self): - def test_invalid_input(): input = [256, 3] label = fluid.data(name='label1', shape=[None, 3], dtype='float32') @@ -68,7 +65,6 @@ class TestMseInvalidInput(unittest.TestCase): class TestNNMseLoss(unittest.TestCase): - def test_NNMseLoss_mean(self): for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]: input_np = np.random.uniform(0.1, 0.5, dim).astype("float32") @@ -76,30 +72,34 @@ class TestNNMseLoss(unittest.TestCase): paddle.enable_static() prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): - input = fluid.layers.data(name='input', - shape=dim, - dtype='float32') - label = fluid.layers.data(name='label', - shape=dim, - dtype='float32') + input = fluid.layers.data( + name='input', shape=dim, dtype='float32' + ) + label = fluid.layers.data( + name='label', shape=dim, dtype='float32' + ) mse_loss = paddle.nn.loss.MSELoss() ret = mse_loss(input, label) exe = fluid.Executor(place) - static_result, = exe.run(prog, - feed={ - "input": input_np, - "label": label_np - }, - fetch_list=[ret]) + (static_result,) = exe.run( + prog, + feed={"input": input_np, "label": label_np}, + fetch_list=[ret], + ) with fluid.dygraph.guard(): mse_loss = paddle.nn.loss.MSELoss() - dy_ret = mse_loss(fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + dy_ret = mse_loss( + fluid.dygraph.to_variable(input_np), + fluid.dygraph.to_variable(label_np), + ) dy_result = dy_ret.numpy() sub = input_np - label_np @@ -116,30 +116,34 @@ class TestNNMseLoss(unittest.TestCase): paddle.enable_static() prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): - input = fluid.layers.data(name='input', - shape=dim, - dtype='float32') - label = fluid.layers.data(name='label', - shape=dim, - dtype='float32') + input = fluid.layers.data( + name='input', shape=dim, dtype='float32' + ) + label = fluid.layers.data( + name='label', shape=dim, dtype='float32' + ) mse_loss = paddle.nn.loss.MSELoss(reduction='sum') ret = mse_loss(input, label) exe = fluid.Executor(place) - static_result, = exe.run(prog, - feed={ - "input": input_np, - "label": label_np - }, - fetch_list=[ret]) + (static_result,) = exe.run( + prog, + feed={"input": input_np, "label": label_np}, + fetch_list=[ret], + ) with fluid.dygraph.guard(): mse_loss = paddle.nn.loss.MSELoss(reduction='sum') - dy_ret = mse_loss(fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + dy_ret = mse_loss( + fluid.dygraph.to_variable(input_np), + fluid.dygraph.to_variable(label_np), + ) dy_result = dy_ret.numpy() sub = input_np - label_np @@ -156,34 +160,38 @@ class TestNNMseLoss(unittest.TestCase): paddle.enable_static() prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): - input = fluid.layers.data(name='input', - shape=dim, - dtype='float32') - label = fluid.layers.data(name='label', - shape=dim, - dtype='float32') + input = fluid.layers.data( + name='input', shape=dim, dtype='float32' + ) + label = fluid.layers.data( + name='label', shape=dim, dtype='float32' + ) mse_loss = paddle.nn.loss.MSELoss(reduction='none') ret = mse_loss(input, label) exe = fluid.Executor(place) - static_result, = exe.run(prog, - feed={ - "input": input_np, - "label": label_np - }, - fetch_list=[ret]) + (static_result,) = exe.run( + prog, + feed={"input": input_np, "label": label_np}, + fetch_list=[ret], + ) with fluid.dygraph.guard(): mse_loss = paddle.nn.loss.MSELoss(reduction='none') - dy_ret = mse_loss(fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + dy_ret = mse_loss( + fluid.dygraph.to_variable(input_np), + fluid.dygraph.to_variable(label_np), + ) dy_result = dy_ret.numpy() sub = input_np - label_np - expected = (sub * sub) + expected = sub * sub np.testing.assert_allclose(static_result, expected, rtol=1e-05) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05) np.testing.assert_allclose(dy_result, expected, rtol=1e-05) @@ -191,7 +199,6 @@ class TestNNMseLoss(unittest.TestCase): class TestNNFunctionalMseLoss(unittest.TestCase): - def test_NNFunctionalMseLoss_mean(self): for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]: input_np = np.random.uniform(0.1, 0.5, dim).astype("float32") @@ -199,30 +206,32 @@ class TestNNFunctionalMseLoss(unittest.TestCase): paddle.enable_static() prog = paddle.static.Program() startup_prog = paddle.static.Program() - place = paddle.CUDAPlace( - 0) if core.is_compiled_with_cuda() else paddle.CPUPlace() + place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() + else paddle.CPUPlace() + ) with paddle.static.program_guard(prog, startup_prog): - input = paddle.fluid.data(name='input', - shape=dim, - dtype='float32') - target = paddle.fluid.data(name='target', - shape=dim, - dtype='float32') + input = paddle.fluid.data( + name='input', shape=dim, dtype='float32' + ) + target = paddle.fluid.data( + name='target', shape=dim, dtype='float32' + ) mse_loss = paddle.nn.functional.mse_loss(input, target, 'mean') exe = paddle.static.Executor(place) exe.run(startup_prog) - static_result, = exe.run(prog, - feed={ - "input": input_np, - "target": target_np - }, - fetch_list=[mse_loss]) + (static_result,) = exe.run( + prog, + feed={"input": input_np, "target": target_np}, + fetch_list=[mse_loss], + ) paddle.disable_static() - dy_ret = paddle.nn.functional.mse_loss(paddle.to_tensor(input_np), - paddle.to_tensor(target_np), - 'mean') + dy_ret = paddle.nn.functional.mse_loss( + paddle.to_tensor(input_np), paddle.to_tensor(target_np), 'mean' + ) dy_result = dy_ret.numpy() sub = input_np - target_np @@ -239,30 +248,32 @@ class TestNNFunctionalMseLoss(unittest.TestCase): paddle.enable_static() prog = paddle.static.Program() startup_prog = paddle.static.Program() - place = paddle.CUDAPlace( - 0) if core.is_compiled_with_cuda() else paddle.CPUPlace() + place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() + else paddle.CPUPlace() + ) with paddle.static.program_guard(prog, startup_prog): - input = paddle.fluid.data(name='input', - shape=dim, - dtype='float32') - target = paddle.fluid.data(name='target', - shape=dim, - dtype='float32') + input = paddle.fluid.data( + name='input', shape=dim, dtype='float32' + ) + target = paddle.fluid.data( + name='target', shape=dim, dtype='float32' + ) mse_loss = paddle.nn.functional.mse_loss(input, target, 'sum') exe = paddle.static.Executor(place) exe.run(startup_prog) - static_result, = exe.run(prog, - feed={ - "input": input_np, - "target": target_np - }, - fetch_list=[mse_loss]) + (static_result,) = exe.run( + prog, + feed={"input": input_np, "target": target_np}, + fetch_list=[mse_loss], + ) paddle.disable_static() - dy_ret = paddle.nn.functional.mse_loss(paddle.to_tensor(input_np), - paddle.to_tensor(target_np), - 'sum') + dy_ret = paddle.nn.functional.mse_loss( + paddle.to_tensor(input_np), paddle.to_tensor(target_np), 'sum' + ) dy_result = dy_ret.numpy() sub = input_np - target_np @@ -279,30 +290,32 @@ class TestNNFunctionalMseLoss(unittest.TestCase): paddle.enable_static() prog = paddle.static.Program() startup_prog = paddle.static.Program() - place = paddle.CUDAPlace( - 0) if core.is_compiled_with_cuda() else paddle.CPUPlace() + place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() + else paddle.CPUPlace() + ) with paddle.static.program_guard(prog, startup_prog): - input = paddle.fluid.data(name='input', - shape=dim, - dtype='float32') - target = paddle.fluid.data(name='target', - shape=dim, - dtype='float32') + input = paddle.fluid.data( + name='input', shape=dim, dtype='float32' + ) + target = paddle.fluid.data( + name='target', shape=dim, dtype='float32' + ) mse_loss = paddle.nn.functional.mse_loss(input, target, 'none') exe = paddle.static.Executor(place) exe.run(startup_prog) - static_result, = exe.run(prog, - feed={ - "input": input_np, - "target": target_np - }, - fetch_list=[mse_loss]) + (static_result,) = exe.run( + prog, + feed={"input": input_np, "target": target_np}, + fetch_list=[mse_loss], + ) paddle.disable_static() - dy_ret = paddle.nn.functional.mse_loss(paddle.to_tensor(input_np), - paddle.to_tensor(target_np), - 'none') + dy_ret = paddle.nn.functional.mse_loss( + paddle.to_tensor(input_np), paddle.to_tensor(target_np), 'none' + ) dy_result = dy_ret.numpy() sub = input_np - target_np diff --git a/python/paddle/fluid/tests/unittests/test_mul_nn_grad.py b/python/paddle/fluid/tests/unittests/test_mul_nn_grad.py index 22000cb1808ba133e81ff2b9562b7e79bec9a33c..94e12e337ec91afc23beb9b7ecedab0d9742cf76 100644 --- a/python/paddle/fluid/tests/unittests/test_mul_nn_grad.py +++ b/python/paddle/fluid/tests/unittests/test_mul_nn_grad.py @@ -26,7 +26,6 @@ paddle.enable_static() class TestMulGradCheck(unittest.TestCase): - @prog_scope() def func(self, place): prog = fluid.Program() @@ -45,7 +44,6 @@ class TestMulGradCheck(unittest.TestCase): class TestMulDoubleGradCheck(unittest.TestCase): - @prog_scope() def func(self, place): # the shape of input variable should be clearly specified, not inlcude -1. @@ -62,11 +60,9 @@ class TestMulDoubleGradCheck(unittest.TestCase): x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) y_arr = np.random.uniform(-1, 1, y_shape).astype(dtype) - gradient_checker.double_grad_check([x, y], - out, - x_init=[x_arr, y_arr], - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -77,7 +73,6 @@ class TestMulDoubleGradCheck(unittest.TestCase): class TestMatmulDoubleGradCheck(unittest.TestCase): - def setUp(self): self.init_test() @@ -92,25 +87,21 @@ class TestMatmulDoubleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float64 typename = "float64" - x = layers.create_parameter(dtype=typename, - shape=self.x_shape, - name='x') - y = layers.create_parameter(dtype=typename, - shape=self.y_shape, - name='y') - out = layers.matmul(x, - y, - self.transpose_x, - self.transpose_y, - name='out') + x = layers.create_parameter( + dtype=typename, shape=self.x_shape, name='x' + ) + y = layers.create_parameter( + dtype=typename, shape=self.y_shape, name='y' + ) + out = layers.matmul( + x, y, self.transpose_x, self.transpose_y, name='out' + ) x_arr = np.random.uniform(-1, 1, self.x_shape).astype(dtype) y_arr = np.random.uniform(-1, 1, self.y_shape).astype(dtype) - gradient_checker.double_grad_check([x, y], - out, - x_init=[x_arr, y_arr], - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -121,7 +112,6 @@ class TestMatmulDoubleGradCheck(unittest.TestCase): def TestMatmulDoubleGradCheckCase1(TestMatmulDoubleGradCheck): - def init_test(self): self.x_shape = [2, 3] self.y_shape = [3, 2] @@ -130,7 +120,6 @@ def TestMatmulDoubleGradCheckCase1(TestMatmulDoubleGradCheck): def TestMatmulDoubleGradCheckCase2(TestMatmulDoubleGradCheck): - def init_test(self): self.x_shape = [2, 4, 3] self.y_shape = [2, 4, 5] @@ -139,7 +128,6 @@ def TestMatmulDoubleGradCheckCase2(TestMatmulDoubleGradCheck): def TestMatmulDoubleGradCheckCase3(TestMatmulDoubleGradCheck): - def init_test(self): self.x_shape = [2, 3, 4, 5] self.y_shape = [2, 3, 3, 5] @@ -148,7 +136,6 @@ def TestMatmulDoubleGradCheckCase3(TestMatmulDoubleGradCheck): def TestMatmulDoubleGradCheckCase4(TestMatmulDoubleGradCheck): - def init_test(self): self.x_shape = [2, 3, 4] self.y_shape = [4, 3] diff --git a/python/paddle/fluid/tests/unittests/test_mul_op.py b/python/paddle/fluid/tests/unittests/test_mul_op.py index 0236f34ea8bf92f3b3eb092c5b79ddc618eb1ddf..5469caed15753043d1fffbfc6c8723f3dc1a1e3e 100644 --- a/python/paddle/fluid/tests/unittests/test_mul_op.py +++ b/python/paddle/fluid/tests/unittests/test_mul_op.py @@ -24,14 +24,13 @@ from paddle.fluid import Program, program_guard class TestMulOp(OpTest): - def setUp(self): self.op_type = "mul" self.dtype = np.float64 self.init_dtype_type() self.inputs = { 'X': np.random.random((20, 5)).astype(self.dtype), - 'Y': np.random.random((5, 21)).astype(self.dtype) + 'Y': np.random.random((5, 21)).astype(self.dtype), } self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])} @@ -45,27 +44,26 @@ class TestMulOp(OpTest): self.check_grad(['X', 'Y'], 'Out') def test_check_grad_ingore_x(self): - self.check_grad(['Y'], - 'Out', - max_relative_error=0.5, - no_grad_set=set("X")) + self.check_grad( + ['Y'], 'Out', max_relative_error=0.5, no_grad_set=set("X") + ) def test_check_grad_ingore_y(self): - self.check_grad(['X'], - 'Out', - max_relative_error=0.5, - no_grad_set=set('Y')) + self.check_grad( + ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y') + ) class TestMulOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # The input type of mul_op must be Variable. - x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - fluid.CPUPlace()) - x2 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace() + ) + x2 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace() + ) self.assertRaises(TypeError, fluid.layers.mul, x1, x2) # The input dtype of mul_op must be float32 or float64. x3 = fluid.layers.data(name='x3', shape=[4], dtype="int32") @@ -74,21 +72,22 @@ class TestMulOpError(unittest.TestCase): class TestMulOp2(OpTest): - def setUp(self): self.op_type = "mul" self.dtype = np.float64 self.init_dtype_type() self.inputs = { 'X': np.random.random((3, 4, 2, 9)).astype(self.dtype), - 'Y': np.random.random((3, 6, 1, 2, 3)).astype(self.dtype) + 'Y': np.random.random((3, 6, 1, 2, 3)).astype(self.dtype), } self.attrs = { 'x_num_col_dims': 2, 'y_num_col_dims': 2, } - result = np.dot(self.inputs['X'].reshape(3 * 4, 2 * 9), - self.inputs['Y'].reshape(3 * 6, 1 * 2 * 3)) + result = np.dot( + self.inputs['X'].reshape(3 * 4, 2 * 9), + self.inputs['Y'].reshape(3 * 6, 1 * 2 * 3), + ) result = result.reshape(3, 4, 1, 2, 3) self.outputs = {'Out': result} @@ -102,22 +101,20 @@ class TestMulOp2(OpTest): self.check_grad(['X', 'Y'], 'Out') def test_check_grad_ingore_x(self): - self.check_grad(['Y'], - 'Out', - max_relative_error=0.5, - no_grad_set=set('X')) + self.check_grad( + ['Y'], 'Out', max_relative_error=0.5, no_grad_set=set('X') + ) def test_check_grad_ignore_y(self): - self.check_grad(['X'], - 'Out', - max_relative_error=0.5, - no_grad_set=set('Y')) + self.check_grad( + ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y') + ) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFP16MulOp1(TestMulOp): - def init_dtype_type(self): self.dtype = np.float16 @@ -129,31 +126,37 @@ class TestFP16MulOp1(TestMulOp): def test_check_grad_normal(self): place = core.CUDAPlace(0) if core.is_float16_supported(place): - self.check_grad_with_place(place, ['X', 'Y'], - 'Out', - max_relative_error=0.5) + self.check_grad_with_place( + place, ['X', 'Y'], 'Out', max_relative_error=0.5 + ) def test_check_grad_ingore_x(self): place = core.CUDAPlace(0) if core.is_float16_supported(place): - self.check_grad_with_place(place, ['Y'], - 'Out', - max_relative_error=0.5, - no_grad_set=set("X")) + self.check_grad_with_place( + place, + ['Y'], + 'Out', + max_relative_error=0.5, + no_grad_set=set("X"), + ) def test_check_grad_ingore_y(self): place = core.CUDAPlace(0) if core.is_float16_supported(place): - self.check_grad_with_place(place, ['X'], - 'Out', - max_relative_error=0.5, - no_grad_set=set('Y')) - - -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + self.check_grad_with_place( + place, + ['X'], + 'Out', + max_relative_error=0.5, + no_grad_set=set('Y'), + ) + + +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFP16MulOp2(TestMulOp2): - def init_dtype_type(self): self.dtype = np.float16 @@ -165,25 +168,31 @@ class TestFP16MulOp2(TestMulOp2): def test_check_grad_normal(self): place = core.CUDAPlace(0) if core.is_float16_supported(place): - self.check_grad_with_place(place, ['X', 'Y'], - 'Out', - max_relative_error=0.9) + self.check_grad_with_place( + place, ['X', 'Y'], 'Out', max_relative_error=0.9 + ) def test_check_grad_ingore_x(self): place = core.CUDAPlace(0) if core.is_float16_supported(place): - self.check_grad_with_place(place, ['Y'], - 'Out', - max_relative_error=0.5, - no_grad_set=set("X")) + self.check_grad_with_place( + place, + ['Y'], + 'Out', + max_relative_error=0.5, + no_grad_set=set("X"), + ) def test_check_grad_ingore_y(self): place = core.CUDAPlace(0) if core.is_float16_supported(place): - self.check_grad_with_place(place, ['X'], - 'Out', - max_relative_error=0.9, - no_grad_set=set('Y')) + self.check_grad_with_place( + place, + ['X'], + 'Out', + max_relative_error=0.9, + no_grad_set=set('Y'), + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_multi_dot_op.py b/python/paddle/fluid/tests/unittests/test_multi_dot_op.py index ce188cac5c7d2333b74466aa79b54974ae832561..96d8c7572eef070b5434d5e5f0e2662ecc2d886c 100644 --- a/python/paddle/fluid/tests/unittests/test_multi_dot_op.py +++ b/python/paddle/fluid/tests/unittests/test_multi_dot_op.py @@ -23,10 +23,9 @@ from paddle.fluid.framework import _test_eager_guard paddle.enable_static() -#the unittest of multi_dot -#compare the result of paddle multi_dot and numpy multi_dot +# the unittest of multi_dot +# compare the result of paddle multi_dot and numpy multi_dot class TestMultiDotOp(OpTest): - def setUp(self): self.op_type = "multi_dot" self.python_api = paddle.linalg.multi_dot @@ -50,9 +49,8 @@ class TestMultiDotOp(OpTest): self.check_grad(['x1'], 'Out', check_eager=True) -#(A*B)*C +# (A*B)*C class TestMultiDotOp3Mat(TestMultiDotOp): - def get_inputs_and_outputs(self): self.A = np.random.random((2, 10)).astype(self.dtype) self.B = np.random.random((10, 4)).astype(self.dtype) @@ -66,9 +64,8 @@ class TestMultiDotOp3Mat(TestMultiDotOp): self.check_grad(['x2'], 'Out', check_eager=True) -#A*(B*C) +# A*(B*C) class TestMultiDotOp3Mat2(TestMultiDotOp): - def get_inputs_and_outputs(self): self.A = np.random.random((3, 4)).astype(self.dtype) self.B = np.random.random((4, 8)).astype(self.dtype) @@ -83,15 +80,18 @@ class TestMultiDotOp3Mat2(TestMultiDotOp): class TestMultiDotOp4Mat(TestMultiDotOp): - def get_inputs_and_outputs(self): self.A = np.random.random((8, 6)).astype(self.dtype) self.B = np.random.random((6, 3)).astype(self.dtype) self.C = np.random.random((3, 4)).astype(self.dtype) self.D = np.random.random((4, 5)).astype(self.dtype) self.inputs = { - 'X': [('x0', self.A), ('x1', self.B), ('x2', self.C), - ('x3', self.D)] + 'X': [ + ('x0', self.A), + ('x1', self.B), + ('x2', self.C), + ('x3', self.D), + ] } self.outputs = {'Out': multi_dot([self.A, self.B, self.C, self.D])} @@ -103,7 +103,6 @@ class TestMultiDotOp4Mat(TestMultiDotOp): class TestMultiDotOpFirst1D(TestMultiDotOp): - def get_inputs_and_outputs(self): self.A = np.random.random((4)).astype(self.dtype) self.B = np.random.random((4, 3)).astype(self.dtype) @@ -112,7 +111,6 @@ class TestMultiDotOpFirst1D(TestMultiDotOp): class TestMultiDotOp3MatFirst1D(TestMultiDotOp3Mat): - def get_inputs_and_outputs(self): self.A = np.random.random((4)).astype(self.dtype) self.B = np.random.random((4, 3)).astype(self.dtype) @@ -122,21 +120,23 @@ class TestMultiDotOp3MatFirst1D(TestMultiDotOp3Mat): class TestMultiDotOp4MatFirst1D(TestMultiDotOp4Mat): - def get_inputs_and_outputs(self): self.A = np.random.random((4)).astype(self.dtype) self.B = np.random.random((4, 3)).astype(self.dtype) self.C = np.random.random((3, 4)).astype(self.dtype) self.D = np.random.random((4, 5)).astype(self.dtype) self.inputs = { - 'X': [('x0', self.A), ('x1', self.B), ('x2', self.C), - ('x3', self.D)] + 'X': [ + ('x0', self.A), + ('x1', self.B), + ('x2', self.C), + ('x3', self.D), + ] } self.outputs = {'Out': multi_dot([self.A, self.B, self.C, self.D])} class TestMultiDotOpLast1D(TestMultiDotOp): - def get_inputs_and_outputs(self): self.A = np.random.random((3, 6)).astype(self.dtype) self.B = np.random.random((6)).astype(self.dtype) @@ -145,7 +145,6 @@ class TestMultiDotOpLast1D(TestMultiDotOp): class TestMultiDotOp3MatLast1D(TestMultiDotOp3Mat): - def get_inputs_and_outputs(self): self.A = np.random.random((2, 4)).astype(self.dtype) self.B = np.random.random((4, 3)).astype(self.dtype) @@ -160,32 +159,33 @@ class TestMultiDotOp3MatLast1D(TestMultiDotOp3Mat): class TestMultiDotOp4MatLast1D(TestMultiDotOp4Mat): - def get_inputs_and_outputs(self): self.A = np.random.random((2, 3)).astype(self.dtype) self.B = np.random.random((3, 2)).astype(self.dtype) self.C = np.random.random((2, 3)).astype(self.dtype) self.D = np.random.random((3)).astype(self.dtype) self.inputs = { - 'X': [('x0', self.A), ('x1', self.B), ('x2', self.C), - ('x3', self.D)] + 'X': [ + ('x0', self.A), + ('x1', self.B), + ('x2', self.C), + ('x3', self.D), + ] } self.outputs = {'Out': multi_dot([self.A, self.B, self.C, self.D])} class TestMultiDotOpFirstAndLast1D(TestMultiDotOp): - def get_inputs_and_outputs(self): - self.A = np.random.random((4, )).astype(self.dtype) + self.A = np.random.random((4,)).astype(self.dtype) self.B = np.random.random((4)).astype(self.dtype) self.inputs = {'X': [('x0', self.A), ('x1', self.B)]} self.outputs = {'Out': multi_dot([self.A, self.B])} class TestMultiDotOp3MatFirstAndLast1D(TestMultiDotOp3Mat): - def get_inputs_and_outputs(self): - self.A = np.random.random((6, )).astype(self.dtype) + self.A = np.random.random((6,)).astype(self.dtype) self.B = np.random.random((6, 4)).astype(self.dtype) self.C = np.random.random((4)).astype(self.dtype) self.inputs = {'X': [('x0', self.A), ('x1', self.B), ('x2', self.C)]} @@ -193,52 +193,57 @@ class TestMultiDotOp3MatFirstAndLast1D(TestMultiDotOp3Mat): class TestMultiDotOp4MatFirstAndLast1D(TestMultiDotOp4Mat): - def get_inputs_and_outputs(self): - self.A = np.random.random((3, )).astype(self.dtype) + self.A = np.random.random((3,)).astype(self.dtype) self.B = np.random.random((3, 4)).astype(self.dtype) self.C = np.random.random((4, 2)).astype(self.dtype) self.D = np.random.random((2)).astype(self.dtype) self.inputs = { - 'X': [('x0', self.A), ('x1', self.B), ('x2', self.C), - ('x3', self.D)] + 'X': [ + ('x0', self.A), + ('x1', self.B), + ('x2', self.C), + ('x3', self.D), + ] } self.outputs = {'Out': multi_dot([self.A, self.B, self.C, self.D])} #####python API test####### class TestMultiDotOpError(unittest.TestCase): - def test_errors(self): - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): # The inputs type of multi_dot must be list matrix. input1 = 12 - self.assertRaises(TypeError, paddle.linalg.multi_dot, - [input1, input1]) + self.assertRaises( + TypeError, paddle.linalg.multi_dot, [input1, input1] + ) # The inputs dtype of multi_dot must be float64, float64 or float16. - input2 = paddle.static.data(name='input2', - shape=[10, 10], - dtype="int32") - self.assertRaises(TypeError, paddle.linalg.multi_dot, - [input2, input2]) + input2 = paddle.static.data( + name='input2', shape=[10, 10], dtype="int32" + ) + self.assertRaises( + TypeError, paddle.linalg.multi_dot, [input2, input2] + ) # the number of tensor must be larger than 1 x0 = paddle.static.data(name='x0', shape=[3, 2], dtype="float64") self.assertRaises(ValueError, paddle.linalg.multi_dot, [x0]) - #the first tensor must be 1D or 2D + # the first tensor must be 1D or 2D x1 = paddle.static.data(name='x1', shape=[3, 2, 3], dtype="float64") x2 = paddle.static.data(name='x2', shape=[3, 2], dtype="float64") self.assertRaises(ValueError, paddle.linalg.multi_dot, [x1, x2]) - #the last tensor must be 1D or 2D + # the last tensor must be 1D or 2D x3 = paddle.static.data(name='x3', shape=[3, 2], dtype="float64") x4 = paddle.static.data(name='x4', shape=[3, 2, 2], dtype="float64") self.assertRaises(ValueError, paddle.linalg.multi_dot, [x3, x4]) - #the tensor must be 2D, except first and last tensor + # the tensor must be 2D, except first and last tensor x5 = paddle.static.data(name='x5', shape=[3, 2], dtype="float64") x6 = paddle.static.data(name='x6', shape=[2], dtype="float64") x7 = paddle.static.data(name='x7', shape=[2, 2], dtype="float64") @@ -246,7 +251,6 @@ class TestMultiDotOpError(unittest.TestCase): class APITestMultiDot(unittest.TestCase): - def test_out(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): @@ -256,11 +260,9 @@ class APITestMultiDot(unittest.TestCase): exe = paddle.static.Executor(paddle.CPUPlace()) data1 = np.random.rand(3, 2).astype("float64") data2 = np.random.rand(2, 3).astype("float64") - np_res, = exe.run(feed={ - 'x0': data1, - 'x1': data2 - }, - fetch_list=[result]) + (np_res,) = exe.run( + feed={'x0': data1, 'x1': data2}, fetch_list=[result] + ) expected_result = np.linalg.multi_dot([data1, data2]) np.testing.assert_allclose( @@ -269,7 +271,9 @@ class APITestMultiDot(unittest.TestCase): rtol=1e-05, atol=1e-05, err_msg='two value is {}\n{}, check diff!'.format( - np_res, expected_result)) + np_res, expected_result + ), + ) def test_dygraph_without_out(self): paddle.disable_static() diff --git a/python/paddle/fluid/tests/unittests/test_multi_label_soft_margin_loss.py b/python/paddle/fluid/tests/unittests/test_multi_label_soft_margin_loss.py index b26007a83ab860914b6634db811ec08d467da5ac..b5236553ca665ea8e57bfe1f8480beabc1ec8e64 100644 --- a/python/paddle/fluid/tests/unittests/test_multi_label_soft_margin_loss.py +++ b/python/paddle/fluid/tests/unittests/test_multi_label_soft_margin_loss.py @@ -24,7 +24,8 @@ def call_MultiLabelSoftMarginLoss_layer( reduction='mean', ): multilabel_margin_loss = paddle.nn.MultiLabelSoftMarginLoss( - weight=weight, reduction=reduction) + weight=weight, reduction=reduction + ) res = multilabel_margin_loss( input=input, label=label, @@ -47,55 +48,52 @@ def call_MultiLabelSoftMarginLoss_functional( return res -def test_static(place, - input_np, - label_np, - weight_np=None, - reduction='mean', - functional=False): +def test_static( + place, + input_np, + label_np, + weight_np=None, + reduction='mean', + functional=False, +): paddle.enable_static() prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - input = paddle.static.data(name='input', - shape=input_np.shape, - dtype='float64') - label = paddle.static.data(name='label', - shape=label_np.shape, - dtype='float64') + input = paddle.static.data( + name='input', shape=input_np.shape, dtype='float64' + ) + label = paddle.static.data( + name='label', shape=label_np.shape, dtype='float64' + ) feed_dict = { "input": input_np, "label": label_np, } weight = None if weight_np is not None: - weight = paddle.static.data(name='weight', - shape=weight_np.shape, - dtype='float64') + weight = paddle.static.data( + name='weight', shape=weight_np.shape, dtype='float64' + ) feed_dict['weight'] = weight_np if functional: - res = call_MultiLabelSoftMarginLoss_functional(input=input, - label=label, - weight=weight, - reduction=reduction) + res = call_MultiLabelSoftMarginLoss_functional( + input=input, label=label, weight=weight, reduction=reduction + ) else: - res = call_MultiLabelSoftMarginLoss_layer(input=input, - label=label, - weight=weight, - reduction=reduction) + res = call_MultiLabelSoftMarginLoss_layer( + input=input, label=label, weight=weight, reduction=reduction + ) exe = paddle.static.Executor(place) - static_result, = exe.run(prog, feed=feed_dict, fetch_list=[res]) + (static_result,) = exe.run(prog, feed=feed_dict, fetch_list=[res]) return static_result -def test_dygraph(place, - input_np, - label_np, - weight=None, - reduction='mean', - functional=False): +def test_dygraph( + place, input_np, label_np, weight=None, reduction='mean', functional=False +): with paddle.fluid.dygraph.base.guard(): input = paddle.to_tensor(input_np) label = paddle.to_tensor(label_np) @@ -104,12 +102,12 @@ def test_dygraph(place, if functional: dy_res = call_MultiLabelSoftMarginLoss_functional( - input=input, label=label, weight=weight, reduction=reduction) + input=input, label=label, weight=weight, reduction=reduction + ) else: - dy_res = call_MultiLabelSoftMarginLoss_layer(input=input, - label=label, - weight=weight, - reduction=reduction) + dy_res = call_MultiLabelSoftMarginLoss_layer( + input=input, label=label, weight=weight, reduction=reduction + ) dy_result = dy_res.numpy() return dy_result @@ -120,7 +118,6 @@ def calc_multilabel_margin_loss( weight=None, reduction="mean", ): - def LogSigmoid(x): return np.log(1 / (1 + np.exp(-x))) @@ -140,7 +137,6 @@ def calc_multilabel_margin_loss( class TestMultiLabelMarginLoss(unittest.TestCase): - def test_MultiLabelSoftMarginLoss(self): input = np.random.uniform(0.1, 0.8, size=(5, 5)).astype(np.float64) label = np.random.randint(0, 2, size=(5, 5)).astype(np.float64) @@ -151,52 +147,64 @@ class TestMultiLabelMarginLoss(unittest.TestCase): reductions = ['sum', 'mean', 'none'] for place in places: for reduction in reductions: - expected = calc_multilabel_margin_loss(input=input, - label=label, - reduction=reduction) - - dy_result = test_dygraph(place=place, - input_np=input, - label_np=label, - reduction=reduction) - - static_result = test_static(place=place, - input_np=input, - label_np=label, - reduction=reduction) + expected = calc_multilabel_margin_loss( + input=input, label=label, reduction=reduction + ) + + dy_result = test_dygraph( + place=place, + input_np=input, + label_np=label, + reduction=reduction, + ) + + static_result = test_static( + place=place, + input_np=input, + label_np=label, + reduction=reduction, + ) np.testing.assert_allclose(static_result, expected, rtol=1e-05) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05) np.testing.assert_allclose(dy_result, expected, rtol=1e-05) - static_functional = test_static(place=place, - input_np=input, - label_np=label, - reduction=reduction, - functional=True) - dy_functional = test_dygraph(place=place, - input_np=input, - label_np=label, - reduction=reduction, - functional=True) - np.testing.assert_allclose(static_functional, - expected, - rtol=1e-05) - np.testing.assert_allclose(static_functional, - dy_functional, - rtol=1e-05) + static_functional = test_static( + place=place, + input_np=input, + label_np=label, + reduction=reduction, + functional=True, + ) + dy_functional = test_dygraph( + place=place, + input_np=input, + label_np=label, + reduction=reduction, + functional=True, + ) + np.testing.assert_allclose( + static_functional, expected, rtol=1e-05 + ) + np.testing.assert_allclose( + static_functional, dy_functional, rtol=1e-05 + ) np.testing.assert_allclose(dy_functional, expected, rtol=1e-05) def test_MultiLabelSoftMarginLoss_error(self): paddle.disable_static() - self.assertRaises(ValueError, - paddle.nn.MultiLabelSoftMarginLoss, - reduction="unsupport reduction") + self.assertRaises( + ValueError, + paddle.nn.MultiLabelSoftMarginLoss, + reduction="unsupport reduction", + ) input = paddle.to_tensor([[0.1, 0.3]], dtype='float32') label = paddle.to_tensor([[0.0, 1.0]], dtype='float32') - self.assertRaises(ValueError, - paddle.nn.functional.multi_label_soft_margin_loss, - input=input, - label=label, - reduction="unsupport reduction") + self.assertRaises( + ValueError, + paddle.nn.functional.multi_label_soft_margin_loss, + input=input, + label=label, + reduction="unsupport reduction", + ) paddle.enable_static() def test_MultiLabelSoftMarginLoss_weights(self): @@ -205,37 +213,44 @@ class TestMultiLabelMarginLoss(unittest.TestCase): weight = np.random.randint(0, 2, size=(5, 5)).astype(np.float64) place = 'cpu' reduction = 'mean' - expected = calc_multilabel_margin_loss(input=input, - label=label, - weight=weight, - reduction=reduction) - - dy_result = test_dygraph(place=place, - input_np=input, - label_np=label, - weight=weight, - reduction=reduction) - - static_result = test_static(place=place, - input_np=input, - label_np=label, - weight_np=weight, - reduction=reduction) + expected = calc_multilabel_margin_loss( + input=input, label=label, weight=weight, reduction=reduction + ) + + dy_result = test_dygraph( + place=place, + input_np=input, + label_np=label, + weight=weight, + reduction=reduction, + ) + + static_result = test_static( + place=place, + input_np=input, + label_np=label, + weight_np=weight, + reduction=reduction, + ) np.testing.assert_allclose(static_result, expected, rtol=1e-05) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05) np.testing.assert_allclose(dy_result, expected, rtol=1e-05) - static_functional = test_static(place=place, - input_np=input, - label_np=label, - weight_np=weight, - reduction=reduction, - functional=True) - dy_functional = test_dygraph(place=place, - input_np=input, - label_np=label, - weight=weight, - reduction=reduction, - functional=True) + static_functional = test_static( + place=place, + input_np=input, + label_np=label, + weight_np=weight, + reduction=reduction, + functional=True, + ) + dy_functional = test_dygraph( + place=place, + input_np=input, + label_np=label, + weight=weight, + reduction=reduction, + functional=True, + ) np.testing.assert_allclose(static_functional, expected, rtol=1e-05) np.testing.assert_allclose(static_functional, dy_functional, rtol=1e-05) np.testing.assert_allclose(dy_functional, expected, rtol=1e-05) @@ -245,10 +260,12 @@ class TestMultiLabelMarginLoss(unittest.TestCase): input = paddle.to_tensor([[0.1, 0.3], [1, 2]], dtype='float32') label = paddle.to_tensor([[0.2, 0.1]], dtype='float32') - self.assertRaises(ValueError, - paddle.nn.functional.multi_label_soft_margin_loss, - input=input, - label=label) + self.assertRaises( + ValueError, + paddle.nn.functional.multi_label_soft_margin_loss, + input=input, + label=label, + ) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py b/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py index bd03b646ddc4ac0725cf53eb125b5aa5f6672bf1..cde6c8daf96be0d3d3bbebe2c64fadc77b0f6b2c 100644 --- a/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py +++ b/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py @@ -1,16 +1,16 @@ # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # -#Licensed under the Apache License, Version 2.0 (the "License"); -#you may not use this file except in compliance with the License. -#You may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -#Unless required by applicable law or agreed to in writing, software -#distributed under the License is distributed on an "AS IS" BASIS, -#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#See the License for the specific language governing permissions and -#limitations under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import unittest import numpy as np @@ -18,42 +18,70 @@ import copy from op_test import OpTest import paddle import paddle.fluid as fluid -from paddle.fluid import Program, program_guard, in_dygraph_mode, _non_static_mode +from paddle.fluid import ( + Program, + program_guard, + in_dygraph_mode, + _non_static_mode, +) from paddle.fluid.layer_helper import LayerHelper from paddle import _C_ops, _legacy_C_ops -def multiclass_nms3(bboxes, - scores, - rois_num=None, - score_threshold=0.3, - nms_top_k=1000, - keep_top_k=100, - nms_threshold=0.3, - normalized=True, - nms_eta=1., - background_label=-1, - return_index=True, - return_rois_num=True, - name=None): +def multiclass_nms3( + bboxes, + scores, + rois_num=None, + score_threshold=0.3, + nms_top_k=1000, + keep_top_k=100, + nms_threshold=0.3, + normalized=True, + nms_eta=1.0, + background_label=-1, + return_index=True, + return_rois_num=True, + name=None, +): helper = LayerHelper('multiclass_nms3', **locals()) if in_dygraph_mode(): - attrs = (score_threshold, nms_top_k, keep_top_k, nms_threshold, - normalized, nms_eta, background_label) + attrs = ( + score_threshold, + nms_top_k, + keep_top_k, + nms_threshold, + normalized, + nms_eta, + background_label, + ) output, index, nms_rois_num = _C_ops.multiclass_nms3( - bboxes, scores, rois_num, *attrs) + bboxes, scores, rois_num, *attrs + ) if not return_index: index = None return output, index, nms_rois_num elif _non_static_mode(): - attrs = ('background_label', background_label, 'score_threshold', - score_threshold, 'nms_top_k', nms_top_k, 'nms_threshold', - nms_threshold, 'keep_top_k', keep_top_k, 'nms_eta', nms_eta, - 'normalized', normalized) + attrs = ( + 'background_label', + background_label, + 'score_threshold', + score_threshold, + 'nms_top_k', + nms_top_k, + 'nms_threshold', + nms_threshold, + 'keep_top_k', + keep_top_k, + 'nms_eta', + nms_eta, + 'normalized', + normalized, + ) output, index, nms_rois_num = _legacy_C_ops.multiclass_nms3( - bboxes, scores, rois_num, *attrs) + bboxes, scores, rois_num, *attrs + ) if not return_index: index = None return output, index, nms_rois_num @@ -70,21 +98,24 @@ def multiclass_nms3(bboxes, if return_rois_num: nms_rois_num = helper.create_variable_for_type_inference( - dtype='int32') + dtype='int32' + ) outputs['NmsRoisNum'] = nms_rois_num - helper.append_op(type="multiclass_nms3", - inputs=inputs, - attrs={ - 'background_label': background_label, - 'score_threshold': score_threshold, - 'nms_top_k': nms_top_k, - 'nms_threshold': nms_threshold, - 'keep_top_k': keep_top_k, - 'nms_eta': nms_eta, - 'normalized': normalized - }, - outputs=outputs) + helper.append_op( + type="multiclass_nms3", + inputs=inputs, + attrs={ + 'background_label': background_label, + 'score_threshold': score_threshold, + 'nms_top_k': nms_top_k, + 'nms_threshold': nms_threshold, + 'keep_top_k': keep_top_k, + 'nms_eta': nms_eta, + 'normalized': normalized, + }, + outputs=outputs, + ) output.stop_gradient = True index.stop_gradient = True if not return_index: @@ -98,14 +129,13 @@ def multiclass_nms3(bboxes, def softmax(x): # clip to shiftx, otherwise, when calc loss with # log(exp(shiftx)), may get log(0)=INF - shiftx = (x - np.max(x)).clip(-64.) + shiftx = (x - np.max(x)).clip(-64.0) exps = np.exp(shiftx) return exps / np.sum(exps) def iou(box_a, box_b, norm): - """Apply intersection-over-union overlap between box_a and box_b - """ + """Apply intersection-over-union overlap between box_a and box_b""" xmin_a = min(box_a[0], box_a[2]) ymin_a = min(box_a[1], box_a[3]) xmax_a = max(box_a[0], box_a[2]) @@ -116,10 +146,12 @@ def iou(box_a, box_b, norm): xmax_b = max(box_b[0], box_b[2]) ymax_b = max(box_b[1], box_b[3]) - area_a = (ymax_a - ymin_a + (norm == False)) * (xmax_a - xmin_a + - (norm == False)) - area_b = (ymax_b - ymin_b + (norm == False)) * (xmax_b - xmin_b + - (norm == False)) + area_a = (ymax_a - ymin_a + (norm == False)) * ( + xmax_a - xmin_a + (norm == False) + ) + area_b = (ymax_b - ymin_b + (norm == False)) * ( + xmax_b - xmin_b + (norm == False) + ) if area_a <= 0 and area_b <= 0: return 0.0 @@ -128,21 +160,24 @@ def iou(box_a, box_b, norm): xb = min(xmax_a, xmax_b) yb = min(ymax_a, ymax_b) - inter_area = max(xb - xa + - (norm == False), 0.0) * max(yb - ya + (norm == False), 0.0) + inter_area = max(xb - xa + (norm == False), 0.0) * max( + yb - ya + (norm == False), 0.0 + ) iou_ratio = inter_area / (area_a + area_b - inter_area) return iou_ratio -def nms(boxes, - scores, - score_threshold, - nms_threshold, - top_k=200, - normalized=True, - eta=1.0): +def nms( + boxes, + scores, + score_threshold, + nms_threshold, + top_k=200, + normalized=True, + eta=1.0, +): """Apply non-maximum suppression at test time to avoid detecting too many overlapping bounding boxes for a given object. Args: @@ -189,8 +224,17 @@ def nms(boxes, return selected_indices -def multiclass_nms(boxes, scores, background, score_threshold, nms_threshold, - nms_top_k, keep_top_k, normalized, shared): +def multiclass_nms( + boxes, + scores, + background, + score_threshold, + nms_threshold, + nms_top_k, + keep_top_k, + normalized, + shared, +): if shared: class_num = scores.shape[0] priorbox_num = scores.shape[1] @@ -201,13 +245,26 @@ def multiclass_nms(boxes, scores, background, score_threshold, nms_threshold, selected_indices = {} num_det = 0 for c in range(class_num): - if c == background: continue + if c == background: + continue if shared: - indices = nms(boxes, scores[c], score_threshold, nms_threshold, - nms_top_k, normalized) + indices = nms( + boxes, + scores[c], + score_threshold, + nms_threshold, + nms_top_k, + normalized, + ) else: - indices = nms(boxes[:, c, :], scores[:, c], score_threshold, - nms_threshold, nms_top_k, normalized) + indices = nms( + boxes[:, c, :], + scores[:, c], + score_threshold, + nms_threshold, + nms_top_k, + normalized, + ) selected_indices[c] = indices num_det += len(indices) @@ -220,9 +277,9 @@ def multiclass_nms(boxes, scores, background, score_threshold, nms_threshold, else: score_index.append((scores[idx][c], c, idx)) - sorted_score_index = sorted(score_index, - key=lambda tup: tup[0], - reverse=True) + sorted_score_index = sorted( + score_index, key=lambda tup: tup[0], reverse=True + ) sorted_score_index = sorted_score_index[:keep_top_k] selected_indices = {} @@ -238,9 +295,17 @@ def multiclass_nms(boxes, scores, background, score_threshold, nms_threshold, return selected_indices, num_det -def lod_multiclass_nms(boxes, scores, background, score_threshold, - nms_threshold, nms_top_k, keep_top_k, box_lod, - normalized): +def lod_multiclass_nms( + boxes, + scores, + background, + score_threshold, + nms_threshold, + nms_top_k, + keep_top_k, + box_lod, + normalized, +): num_class = boxes.shape[1] det_outs = [] lod = [] @@ -249,19 +314,21 @@ def lod_multiclass_nms(boxes, scores, background, score_threshold, if box_lod[0][n] == 0: lod.append(0) continue - box = boxes[head:head + box_lod[0][n]] - score = scores[head:head + box_lod[0][n]] + box = boxes[head : head + box_lod[0][n]] + score = scores[head : head + box_lod[0][n]] offset = head head = head + box_lod[0][n] - nmsed_outs, nmsed_num = multiclass_nms(box, - score, - background, - score_threshold, - nms_threshold, - nms_top_k, - keep_top_k, - normalized, - shared=False) + nmsed_outs, nmsed_num = multiclass_nms( + box, + score, + background, + score_threshold, + nms_threshold, + nms_top_k, + keep_top_k, + normalized, + shared=False, + ) lod.append(nmsed_num) if nmsed_num == 0: @@ -270,41 +337,52 @@ def lod_multiclass_nms(boxes, scores, background, score_threshold, for c, indices in nmsed_outs.items(): for idx in indices: xmin, ymin, xmax, ymax = box[idx, c, :] - tmp_det_out.append([ - c, score[idx][c], xmin, ymin, xmax, ymax, - offset * num_class + idx * num_class + c - ]) - sorted_det_out = sorted(tmp_det_out, - key=lambda tup: tup[0], - reverse=False) + tmp_det_out.append( + [ + c, + score[idx][c], + xmin, + ymin, + xmax, + ymax, + offset * num_class + idx * num_class + c, + ] + ) + sorted_det_out = sorted( + tmp_det_out, key=lambda tup: tup[0], reverse=False + ) det_outs.extend(sorted_det_out) return det_outs, lod -def batched_multiclass_nms(boxes, - scores, - background, - score_threshold, - nms_threshold, - nms_top_k, - keep_top_k, - normalized=True): +def batched_multiclass_nms( + boxes, + scores, + background, + score_threshold, + nms_threshold, + nms_top_k, + keep_top_k, + normalized=True, +): batch_size = scores.shape[0] num_boxes = scores.shape[2] det_outs = [] index_outs = [] lod = [] for n in range(batch_size): - nmsed_outs, nmsed_num = multiclass_nms(boxes[n], - scores[n], - background, - score_threshold, - nms_threshold, - nms_top_k, - keep_top_k, - normalized, - shared=True) + nmsed_outs, nmsed_num = multiclass_nms( + boxes[n], + scores[n], + background, + score_threshold, + nms_threshold, + nms_top_k, + keep_top_k, + normalized, + shared=True, + ) lod.append(nmsed_num) if nmsed_num == 0: @@ -313,19 +391,25 @@ def batched_multiclass_nms(boxes, for c, indices in nmsed_outs.items(): for idx in indices: xmin, ymin, xmax, ymax = boxes[n][idx][:] - tmp_det_out.append([ - c, scores[n][c][idx], xmin, ymin, xmax, ymax, - idx + n * num_boxes - ]) - sorted_det_out = sorted(tmp_det_out, - key=lambda tup: tup[0], - reverse=False) + tmp_det_out.append( + [ + c, + scores[n][c][idx], + xmin, + ymin, + xmax, + ymax, + idx + n * num_boxes, + ] + ) + sorted_det_out = sorted( + tmp_det_out, key=lambda tup: tup[0], reverse=False + ) det_outs.extend(sorted_det_out) return det_outs, lod class TestMulticlassNMSOp(OpTest): - def set_argument(self): self.score_threshold = 0.01 @@ -351,9 +435,15 @@ class TestMulticlassNMSOp(OpTest): boxes[:, :, 0:2] = boxes[:, :, 0:2] * 0.5 boxes[:, :, 2:4] = boxes[:, :, 2:4] * 0.5 + 0.5 - det_outs, lod = batched_multiclass_nms(boxes, scores, background, - score_threshold, nms_threshold, - nms_top_k, keep_top_k) + det_outs, lod = batched_multiclass_nms( + boxes, + scores, + background, + score_threshold, + nms_threshold, + nms_top_k, + keep_top_k, + ) lod = [1] if not det_outs else lod det_outs = [[-1, 0]] if not det_outs else det_outs det_outs = np.array(det_outs) @@ -377,7 +467,6 @@ class TestMulticlassNMSOp(OpTest): class TestMulticlassNMSOpNoOutput(TestMulticlassNMSOp): - def set_argument(self): # Here set 2.0 to test the case there is no outputs. # In practical use, 0.0 < score_threshold < 1.0 @@ -385,7 +474,6 @@ class TestMulticlassNMSOpNoOutput(TestMulticlassNMSOp): class TestMulticlassNMSLoDInput(OpTest): - def set_argument(self): self.score_threshold = 0.01 @@ -412,13 +500,21 @@ class TestMulticlassNMSLoDInput(OpTest): boxes[:, :, 2] = boxes[:, :, 2] * 10 + 10 boxes[:, :, 3] = boxes[:, :, 3] * 10 + 10 - det_outs, lod = lod_multiclass_nms(boxes, scores, background, - score_threshold, nms_threshold, - nms_top_k, keep_top_k, box_lod, - normalized) + det_outs, lod = lod_multiclass_nms( + boxes, + scores, + background, + score_threshold, + nms_threshold, + nms_top_k, + keep_top_k, + box_lod, + normalized, + ) det_outs = np.array(det_outs).astype('float32') - nmsed_outs = det_outs[:, :-1].astype('float32') if len( - det_outs) else det_outs + nmsed_outs = ( + det_outs[:, :-1].astype('float32') if len(det_outs) else det_outs + ) self.op_type = 'multiclass_nms' self.inputs = { 'BBoxes': (boxes, box_lod), @@ -440,7 +536,6 @@ class TestMulticlassNMSLoDInput(OpTest): class TestMulticlassNMSNoBox(TestMulticlassNMSLoDInput): - def setUp(self): self.set_argument() M = 1200 @@ -464,13 +559,21 @@ class TestMulticlassNMSNoBox(TestMulticlassNMSLoDInput): boxes[:, :, 2] = boxes[:, :, 2] * 10 + 10 boxes[:, :, 3] = boxes[:, :, 3] * 10 + 10 - det_outs, lod = lod_multiclass_nms(boxes, scores, background, - score_threshold, nms_threshold, - nms_top_k, keep_top_k, box_lod, - normalized) + det_outs, lod = lod_multiclass_nms( + boxes, + scores, + background, + score_threshold, + nms_threshold, + nms_top_k, + keep_top_k, + box_lod, + normalized, + ) det_outs = np.array(det_outs).astype('float32') - nmsed_outs = det_outs[:, :-1].astype('float32') if len( - det_outs) else det_outs + nmsed_outs = ( + det_outs[:, :-1].astype('float32') if len(det_outs) else det_outs + ) self.op_type = 'multiclass_nms' self.inputs = { 'BBoxes': (boxes, box_lod), @@ -489,7 +592,6 @@ class TestMulticlassNMSNoBox(TestMulticlassNMSLoDInput): class TestIOU(unittest.TestCase): - def test_iou(self): box1 = np.array([4.0, 3.0, 7.0, 5.0]).astype('float32') box2 = np.array([3.0, 4.0, 6.0, 8.0]).astype('float32') @@ -500,7 +602,6 @@ class TestIOU(unittest.TestCase): class TestMulticlassNMS2Op(TestMulticlassNMSOp): - def setUp(self): self.set_argument() N = 7 @@ -523,20 +624,28 @@ class TestMulticlassNMS2Op(TestMulticlassNMSOp): boxes[:, :, 0:2] = boxes[:, :, 0:2] * 0.5 boxes[:, :, 2:4] = boxes[:, :, 2:4] * 0.5 + 0.5 - det_outs, lod = batched_multiclass_nms(boxes, scores, background, - score_threshold, nms_threshold, - nms_top_k, keep_top_k) + det_outs, lod = batched_multiclass_nms( + boxes, + scores, + background, + score_threshold, + nms_threshold, + nms_top_k, + keep_top_k, + ) det_outs = np.array(det_outs) - nmsed_outs = det_outs[:, :-1].astype('float32') if len( - det_outs) else det_outs - index_outs = det_outs[:, - -1:].astype('int') if len(det_outs) else det_outs + nmsed_outs = ( + det_outs[:, :-1].astype('float32') if len(det_outs) else det_outs + ) + index_outs = ( + det_outs[:, -1:].astype('int') if len(det_outs) else det_outs + ) self.op_type = 'multiclass_nms2' self.inputs = {'BBoxes': boxes, 'Scores': scores} self.outputs = { 'Out': (nmsed_outs, [lod]), - 'Index': (index_outs, [lod]) + 'Index': (index_outs, [lod]), } self.attrs = { 'background_label': 0, @@ -553,7 +662,6 @@ class TestMulticlassNMS2Op(TestMulticlassNMSOp): class TestMulticlassNMS2OpNoOutput(TestMulticlassNMS2Op): - def set_argument(self): # Here set 2.0 to test the case there is no outputs. # In practical use, 0.0 < score_threshold < 1.0 @@ -561,7 +669,6 @@ class TestMulticlassNMS2OpNoOutput(TestMulticlassNMS2Op): class TestMulticlassNMS2LoDInput(TestMulticlassNMSLoDInput): - def setUp(self): self.set_argument() M = 1200 @@ -585,16 +692,25 @@ class TestMulticlassNMS2LoDInput(TestMulticlassNMSLoDInput): boxes[:, :, 2] = boxes[:, :, 2] * 10 + 10 boxes[:, :, 3] = boxes[:, :, 3] * 10 + 10 - det_outs, lod = lod_multiclass_nms(boxes, scores, background, - score_threshold, nms_threshold, - nms_top_k, keep_top_k, box_lod, - normalized) + det_outs, lod = lod_multiclass_nms( + boxes, + scores, + background, + score_threshold, + nms_threshold, + nms_top_k, + keep_top_k, + box_lod, + normalized, + ) det_outs = np.array(det_outs) - nmsed_outs = det_outs[:, :-1].astype('float32') if len( - det_outs) else det_outs - index_outs = det_outs[:, - -1:].astype('int') if len(det_outs) else det_outs + nmsed_outs = ( + det_outs[:, :-1].astype('float32') if len(det_outs) else det_outs + ) + index_outs = ( + det_outs[:, -1:].astype('int') if len(det_outs) else det_outs + ) self.op_type = 'multiclass_nms2' self.inputs = { 'BBoxes': (boxes, box_lod), @@ -602,7 +718,7 @@ class TestMulticlassNMS2LoDInput(TestMulticlassNMSLoDInput): } self.outputs = { 'Out': (nmsed_outs, [lod]), - 'Index': (index_outs, [lod]) + 'Index': (index_outs, [lod]), } self.attrs = { 'background_label': 0, @@ -620,7 +736,6 @@ def test_check_output(self): class TestMulticlassNMS2LoDNoOutput(TestMulticlassNMS2LoDInput): - def set_argument(self): # Here set 2.0 to test the case there is no outputs. # In practical use, 0.0 < score_threshold < 1.0 @@ -628,7 +743,6 @@ class TestMulticlassNMS2LoDNoOutput(TestMulticlassNMS2LoDInput): class TestMulticlassNMSError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): M = 1200 @@ -642,12 +756,12 @@ class TestMulticlassNMSError(unittest.TestCase): scores = np.reshape(scores, (N, M, C)) scores_np = np.transpose(scores, (0, 2, 1)) - boxes_data = fluid.data(name='bboxes', - shape=[M, C, BOX_SIZE], - dtype='float32') - scores_data = fluid.data(name='scores', - shape=[N, C, M], - dtype='float32') + boxes_data = fluid.data( + name='bboxes', shape=[M, C, BOX_SIZE], dtype='float32' + ) + scores_data = fluid.data( + name='scores', shape=[N, C, M], dtype='float32' + ) def test_bboxes_Variable(): # the bboxes type must be Variable @@ -662,7 +776,6 @@ class TestMulticlassNMSError(unittest.TestCase): class TestMulticlassNMS3Op(TestMulticlassNMS2Op): - def setUp(self): self.python_api = multiclass_nms3 self.set_argument() @@ -686,21 +799,29 @@ class TestMulticlassNMS3Op(TestMulticlassNMS2Op): boxes[:, :, 0:2] = boxes[:, :, 0:2] * 0.5 boxes[:, :, 2:4] = boxes[:, :, 2:4] * 0.5 + 0.5 - det_outs, lod = batched_multiclass_nms(boxes, scores, background, - score_threshold, nms_threshold, - nms_top_k, keep_top_k) + det_outs, lod = batched_multiclass_nms( + boxes, + scores, + background, + score_threshold, + nms_threshold, + nms_top_k, + keep_top_k, + ) det_outs = np.array(det_outs) - nmsed_outs = det_outs[:, :-1].astype('float32') if len( - det_outs) else det_outs - index_outs = det_outs[:, - -1:].astype('int') if len(det_outs) else det_outs + nmsed_outs = ( + det_outs[:, :-1].astype('float32') if len(det_outs) else det_outs + ) + index_outs = ( + det_outs[:, -1:].astype('int') if len(det_outs) else det_outs + ) self.op_type = 'multiclass_nms3' self.inputs = {'BBoxes': boxes, 'Scores': scores} self.outputs = { 'Out': nmsed_outs, 'Index': index_outs, - 'NmsRoisNum': np.array(lod).astype('int32') + 'NmsRoisNum': np.array(lod).astype('int32'), } self.attrs = { 'background_label': 0, @@ -717,7 +838,6 @@ class TestMulticlassNMS3Op(TestMulticlassNMS2Op): class TestMulticlassNMS3OpNoOutput(TestMulticlassNMS3Op): - def set_argument(self): # Here set 2.0 to test the case there is no outputs. # In practical use, 0.0 < score_threshold < 1.0 diff --git a/python/paddle/fluid/tests/unittests/test_multihead_attention.py b/python/paddle/fluid/tests/unittests/test_multihead_attention.py index 589b62d764192389bbb865d86cde8e1b85e05302..8c6b3f96f31a4ef29819e158809b881de7443503 100644 --- a/python/paddle/fluid/tests/unittests/test_multihead_attention.py +++ b/python/paddle/fluid/tests/unittests/test_multihead_attention.py @@ -19,42 +19,44 @@ import numpy as np class TestMultiheadAttention(unittest.TestCase): - def gen_random_input(self): - """Generate random input data. - """ + """Generate random input data.""" # batch_size, max_sequence_length, hidden dimension self.input_shape = (3, 13, 16) self.queries = np.random.random(size=self.input_shape).astype("float32") self.keys = np.random.random(size=self.input_shape).astype("float32") def set_program(self): - """Build the test program. - """ - queries = fluid.layers.data(name="queries", - shape=self.input_shape, - dtype="float32", - append_batch_size=False) + """Build the test program.""" + queries = fluid.layers.data( + name="queries", + shape=self.input_shape, + dtype="float32", + append_batch_size=False, + ) queries.stop_gradient = False - keys = fluid.layers.data(name="keys", - shape=self.input_shape, - dtype="float32", - append_batch_size=False) + keys = fluid.layers.data( + name="keys", + shape=self.input_shape, + dtype="float32", + append_batch_size=False, + ) keys.stop_gradient = False - contexts = fluid.nets.scaled_dot_product_attention(queries=queries, - keys=keys, - values=keys, - num_heads=8, - dropout_rate=0.) + contexts = fluid.nets.scaled_dot_product_attention( + queries=queries, + keys=keys, + values=keys, + num_heads=8, + dropout_rate=0.0, + ) out = fluid.layers.reduce_sum(contexts, dim=None) fluid.backward.append_backward(loss=out) self.fetch_list = [contexts] def run_program(self): - """Run the test program. - """ + """Run the test program.""" places = [core.CPUPlace()] if core.is_compiled_with_cuda(): places.append(core.CUDAPlace(0)) @@ -64,15 +66,16 @@ class TestMultiheadAttention(unittest.TestCase): exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - output = exe.run(fluid.default_main_program(), - feed=self.inputs, - fetch_list=self.fetch_list, - return_numpy=True) + output = exe.run( + fluid.default_main_program(), + feed=self.inputs, + fetch_list=self.fetch_list, + return_numpy=True, + ) self.op_output = output def set_inputs(self, place): - """Set the randomly generated data to the test program. - """ + """Set the randomly generated data to the test program.""" self.inputs = {} queries = fluid.Tensor() queries.set(self.queries, place) @@ -89,7 +92,7 @@ class TestMultiheadAttention(unittest.TestCase): self.set_program() self.run_program() - #fixme(caoying) add more meaningfull unittest. + # fixme(caoying) add more meaningfull unittest. if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_multimarginloss.py b/python/paddle/fluid/tests/unittests/test_multimarginloss.py index 1eff1deb69295e37e5e5e5c84b2ebe0c7ab2cf81..3a54f752eb2fb89342a96cd4b5582f659c13b22d 100644 --- a/python/paddle/fluid/tests/unittests/test_multimarginloss.py +++ b/python/paddle/fluid/tests/unittests/test_multimarginloss.py @@ -26,10 +26,9 @@ def call_MultiMarginLoss_layer( weight=None, reduction='mean', ): - triplet_margin_loss = paddle.nn.MultiMarginLoss(p=p, - margin=margin, - weight=weight, - reduction=reduction) + triplet_margin_loss = paddle.nn.MultiMarginLoss( + p=p, margin=margin, weight=weight, reduction=reduction + ) res = triplet_margin_loss( input=input, label=label, @@ -45,82 +44,92 @@ def call_MultiMarginLoss_functional( weight=None, reduction='mean', ): - res = paddle.nn.functional.multi_margin_loss(input=input, - label=label, - p=p, - margin=margin, - weight=weight, - reduction=reduction) + res = paddle.nn.functional.multi_margin_loss( + input=input, + label=label, + p=p, + margin=margin, + weight=weight, + reduction=reduction, + ) return res -def test_static(place, - input_np, - label_np, - p=1, - margin=1.0, - weight_np=None, - reduction='mean', - functional=False): +def test_static( + place, + input_np, + label_np, + p=1, + margin=1.0, + weight_np=None, + reduction='mean', + functional=False, +): prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - input = paddle.static.data(name='input', - shape=input_np.shape, - dtype=input_np.dtype) - label = paddle.static.data(name='label', - shape=label_np.shape, - dtype=label_np.dtype) + input = paddle.static.data( + name='input', shape=input_np.shape, dtype=input_np.dtype + ) + label = paddle.static.data( + name='label', shape=label_np.shape, dtype=label_np.dtype + ) feed_dict = { "input": input_np, "label": label_np, } weight = None if weight_np is not None: - weight = paddle.static.data(name='weight', - shape=weight_np.shape, - dtype=weight_np.dtype) + weight = paddle.static.data( + name='weight', shape=weight_np.shape, dtype=weight_np.dtype + ) feed_dict['weight'] = weight_np if functional: - res = call_MultiMarginLoss_functional(input=input, - label=label, - p=p, - margin=margin, - weight=weight, - reduction=reduction) + res = call_MultiMarginLoss_functional( + input=input, + label=label, + p=p, + margin=margin, + weight=weight, + reduction=reduction, + ) else: - res = call_MultiMarginLoss_layer(input=input, - label=label, - p=p, - margin=margin, - weight=weight, - reduction=reduction) + res = call_MultiMarginLoss_layer( + input=input, + label=label, + p=p, + margin=margin, + weight=weight, + reduction=reduction, + ) exe = paddle.static.Executor(place) static_result = exe.run(prog, feed=feed_dict, fetch_list=[res]) return static_result[0] -def test_static_data_shape(place, - input_np, - label_np, - wrong_label_shape=None, - weight_np=None, - wrong_weight_shape=None, - functional=False): +def test_static_data_shape( + place, + input_np, + label_np, + wrong_label_shape=None, + weight_np=None, + wrong_weight_shape=None, + functional=False, +): prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - input = paddle.static.data(name='input', - shape=input_np.shape, - dtype=input_np.dtype) + input = paddle.static.data( + name='input', shape=input_np.shape, dtype=input_np.dtype + ) if wrong_label_shape is None: label_shape = label_np.shape else: label_shape = wrong_label_shape - label = paddle.static.data(name='label', - shape=label_shape, - dtype=label_np.dtype) + label = paddle.static.data( + name='label', shape=label_shape, dtype=label_np.dtype + ) feed_dict = { "input": input_np, "label": label_np, @@ -131,9 +140,9 @@ def test_static_data_shape(place, weight_shape = weight_np.shape else: weight_shape = wrong_weight_shape - weight = paddle.static.data(name='weight', - shape=weight_shape, - dtype=weight_np.dtype) + weight = paddle.static.data( + name='weight', shape=weight_shape, dtype=weight_np.dtype + ) feed_dict['weight'] = weight_np if functional: res = call_MultiMarginLoss_functional( @@ -153,14 +162,16 @@ def test_static_data_shape(place, return static_result -def test_dygraph(place, - input, - label, - p=1, - margin=1.0, - weight=None, - reduction='mean', - functional=False): +def test_dygraph( + place, + input, + label, + p=1, + margin=1.0, + weight=None, + reduction='mean', + functional=False, +): paddle.disable_static() input = paddle.to_tensor(input) label = paddle.to_tensor(label) @@ -168,19 +179,23 @@ def test_dygraph(place, if weight is not None: weight = paddle.to_tensor(weight) if functional: - dy_res = call_MultiMarginLoss_functional(input=input, - label=label, - p=p, - margin=margin, - weight=weight, - reduction=reduction) + dy_res = call_MultiMarginLoss_functional( + input=input, + label=label, + p=p, + margin=margin, + weight=weight, + reduction=reduction, + ) else: - dy_res = call_MultiMarginLoss_layer(input=input, - label=label, - p=p, - margin=margin, - weight=weight, - reduction=reduction) + dy_res = call_MultiMarginLoss_layer( + input=input, + label=label, + p=p, + margin=margin, + weight=weight, + reduction=reduction, + ) dy_result = dy_res.numpy() paddle.enable_static() return dy_result @@ -194,16 +209,22 @@ def calc_multi_margin_loss( weight=None, reduction='mean', ): - index_sample = np.array([input[i, label[i]] - for i in range(label.size)]).reshape(-1, 1) + index_sample = np.array( + [input[i, label[i]] for i in range(label.size)] + ).reshape(-1, 1) if weight is None: - expected = np.mean(np.maximum(margin + input - index_sample, 0.0)**p, - axis=1) - margin**p / input.shape[1] + expected = ( + np.mean(np.maximum(margin + input - index_sample, 0.0) ** p, axis=1) + - margin**p / input.shape[1] + ) else: - weight = np.array([weight[label[i]] - for i in range(label.size)]).reshape(-1, 1) - expected = np.mean(np.maximum(weight * (margin + input - index_sample), 0.0) ** p, axis=1) - weight*(margin ** p / \ - input.shape[1]) + weight = np.array( + [weight[label[i]] for i in range(label.size)] + ).reshape(-1, 1) + expected = np.mean( + np.maximum(weight * (margin + input - index_sample), 0.0) ** p, + axis=1, + ) - weight * (margin**p / input.shape[1]) if reduction == 'mean': expected = np.mean(expected) @@ -216,14 +237,14 @@ def calc_multi_margin_loss( class TestMultiMarginLoss(unittest.TestCase): - def test_MultiMarginLoss(self): batch_size = 5 num_classes = 2 shape = (batch_size, num_classes) input = np.random.uniform(0.1, 0.8, size=shape).astype(np.float64) - label = np.random.uniform(0, input.shape[1], - size=(batch_size, )).astype(np.int64) + label = np.random.uniform(0, input.shape[1], size=(batch_size,)).astype( + np.int64 + ) places = [paddle.CPUPlace()] if paddle.device.is_compiled_with_cuda(): @@ -231,9 +252,9 @@ class TestMultiMarginLoss(unittest.TestCase): reductions = ['sum', 'mean', 'none'] for place in places: for reduction in reductions: - expected = calc_multi_margin_loss(input=input, - label=label, - reduction=reduction) + expected = calc_multi_margin_loss( + input=input, label=label, reduction=reduction + ) dy_result = test_dygraph( place=place, @@ -251,32 +272,40 @@ class TestMultiMarginLoss(unittest.TestCase): np.testing.assert_allclose(static_result, expected) np.testing.assert_allclose(static_result, dy_result) np.testing.assert_allclose(dy_result, expected) - static_functional = test_static(place=place, - input_np=input, - label_np=label, - reduction=reduction, - functional=True) - dy_functional = test_dygraph(place=place, - input=input, - label=label, - reduction=reduction, - functional=True) + static_functional = test_static( + place=place, + input_np=input, + label_np=label, + reduction=reduction, + functional=True, + ) + dy_functional = test_dygraph( + place=place, + input=input, + label=label, + reduction=reduction, + functional=True, + ) np.testing.assert_allclose(static_functional, expected) np.testing.assert_allclose(static_functional, dy_functional) np.testing.assert_allclose(dy_functional, expected) def test_MultiMarginLoss_error(self): paddle.disable_static() - self.assertRaises(ValueError, - paddle.nn.MultiMarginLoss, - reduction="unsupport reduction") + self.assertRaises( + ValueError, + paddle.nn.MultiMarginLoss, + reduction="unsupport reduction", + ) input = paddle.to_tensor([[0.1, 0.3]], dtype='float32') label = paddle.to_tensor([0], dtype='int32') - self.assertRaises(ValueError, - paddle.nn.functional.multi_margin_loss, - input=input, - label=label, - reduction="unsupport reduction") + self.assertRaises( + ValueError, + paddle.nn.functional.multi_margin_loss, + input=input, + label=label, + reduction="unsupport reduction", + ) paddle.enable_static() def test_MultiMarginLoss_dimension(self): @@ -308,12 +337,12 @@ class TestMultiMarginLoss(unittest.TestCase): reduction = 'mean' place = paddle.CPUPlace() input = np.random.uniform(0.1, 0.8, size=shape).astype(np.float64) - label = np.random.uniform(0, input.shape[1], - size=(batch_size, )).astype(np.int64) - expected = calc_multi_margin_loss(input=input, - p=p, - label=label, - reduction=reduction) + label = np.random.uniform(0, input.shape[1], size=(batch_size,)).astype( + np.int64 + ) + expected = calc_multi_margin_loss( + input=input, p=p, label=label, reduction=reduction + ) dy_result = test_dygraph( place=place, @@ -333,18 +362,22 @@ class TestMultiMarginLoss(unittest.TestCase): np.testing.assert_allclose(static_result, expected) np.testing.assert_allclose(static_result, dy_result) np.testing.assert_allclose(dy_result, expected) - static_functional = test_static(place=place, - p=p, - input_np=input, - label_np=label, - reduction=reduction, - functional=True) - dy_functional = test_dygraph(place=place, - p=p, - input=input, - label=label, - reduction=reduction, - functional=True) + static_functional = test_static( + place=place, + p=p, + input_np=input, + label_np=label, + reduction=reduction, + functional=True, + ) + dy_functional = test_dygraph( + place=place, + p=p, + input=input, + label=label, + reduction=reduction, + functional=True, + ) np.testing.assert_allclose(static_functional, expected) np.testing.assert_allclose(static_functional, dy_functional) np.testing.assert_allclose(dy_functional, expected) @@ -356,14 +389,13 @@ class TestMultiMarginLoss(unittest.TestCase): reduction = 'mean' place = paddle.CPUPlace() input = np.random.uniform(0.1, 0.8, size=shape).astype(np.float64) - label = np.random.uniform(0, input.shape[1], - size=(batch_size, )).astype(np.int64) - weight = np.random.uniform(0, 2, - size=(num_classes, )).astype(np.float64) - expected = calc_multi_margin_loss(input=input, - label=label, - weight=weight, - reduction=reduction) + label = np.random.uniform(0, input.shape[1], size=(batch_size,)).astype( + np.int64 + ) + weight = np.random.uniform(0, 2, size=(num_classes,)).astype(np.float64) + expected = calc_multi_margin_loss( + input=input, label=label, weight=weight, reduction=reduction + ) dy_result = test_dygraph( place=place, @@ -383,18 +415,22 @@ class TestMultiMarginLoss(unittest.TestCase): np.testing.assert_allclose(static_result, expected) np.testing.assert_allclose(static_result, dy_result) np.testing.assert_allclose(dy_result, expected) - static_functional = test_static(place=place, - input_np=input, - label_np=label, - weight_np=weight, - reduction=reduction, - functional=True) - dy_functional = test_dygraph(place=place, - input=input, - label=label, - weight=weight, - reduction=reduction, - functional=True) + static_functional = test_static( + place=place, + input_np=input, + label_np=label, + weight_np=weight, + reduction=reduction, + functional=True, + ) + dy_functional = test_dygraph( + place=place, + input=input, + label=label, + weight=weight, + reduction=reduction, + functional=True, + ) np.testing.assert_allclose(static_functional, expected) np.testing.assert_allclose(static_functional, dy_functional) np.testing.assert_allclose(dy_functional, expected) @@ -405,10 +441,10 @@ class TestMultiMarginLoss(unittest.TestCase): shape = (batch_size, num_classes) place = paddle.CPUPlace() input = np.random.uniform(0.1, 0.8, size=shape).astype(np.float64) - label = np.random.uniform(0, input.shape[1], - size=(batch_size, )).astype(np.int64) - weight = np.random.uniform(0, 2, - size=(num_classes, )).astype(np.float64) + label = np.random.uniform(0, input.shape[1], size=(batch_size,)).astype( + np.int64 + ) + weight = np.random.uniform(0, 2, size=(num_classes,)).astype(np.float64) self.assertRaises( ValueError, @@ -416,7 +452,7 @@ class TestMultiMarginLoss(unittest.TestCase): place=place, input_np=input, label_np=label, - wrong_label_shape=(10, ), + wrong_label_shape=(10,), functional=True, ) self.assertRaises( @@ -425,7 +461,7 @@ class TestMultiMarginLoss(unittest.TestCase): place=place, input_np=input, label_np=label, - wrong_label_shape=(10, ), + wrong_label_shape=(10,), functional=False, ) self.assertRaises( @@ -435,7 +471,7 @@ class TestMultiMarginLoss(unittest.TestCase): input_np=input, label_np=label, weight_np=weight, - wrong_weight_shape=(3, ), + wrong_weight_shape=(3,), functional=True, ) self.assertRaises( @@ -445,7 +481,7 @@ class TestMultiMarginLoss(unittest.TestCase): input_np=input, label_np=label, weight_np=weight, - wrong_weight_shape=(3, ), + wrong_weight_shape=(3,), functional=False, ) diff --git a/python/paddle/fluid/tests/unittests/test_multinomial_op.py b/python/paddle/fluid/tests/unittests/test_multinomial_op.py index 5df70185dda588a7d95c2687163912d154ce34cb..f79dee65250d557cd6bb2cec9236566b8c6a9427 100644 --- a/python/paddle/fluid/tests/unittests/test_multinomial_op.py +++ b/python/paddle/fluid/tests/unittests/test_multinomial_op.py @@ -43,7 +43,6 @@ def sample_output_two_dimension(out, shape): class TestMultinomialOp(OpTest): - def setUp(self): paddle.enable_static() self.op_type = "multinomial" @@ -66,16 +65,16 @@ class TestMultinomialOp(OpTest): # normalize the input to get the probability prob = self.input_np / self.input_np.sum(axis=-1, keepdims=True) sample_prob = self.sample_output(np.array(outs[0])) - np.testing.assert_allclose(sample_prob, - prob, - rtol=0, - atol=0.01, - err_msg='sample_prob: ' + str(sample_prob) + - '\nprob: ' + str(prob)) + np.testing.assert_allclose( + sample_prob, + prob, + rtol=0, + atol=0.01, + err_msg='sample_prob: ' + str(sample_prob) + '\nprob: ' + str(prob), + ) class TestMultinomialOp2(TestMultinomialOp): - def init_data(self): # input probability is a matrix self.input_np = np.random.rand(3, 4) @@ -87,7 +86,6 @@ class TestMultinomialOp2(TestMultinomialOp): class TestMultinomialOp3(TestMultinomialOp): - def init_data(self): # replacement is False. number of samples must be less than number of categories. self.input_np = np.random.rand(1000) @@ -98,12 +96,13 @@ class TestMultinomialOp3(TestMultinomialOp): out = np.array(outs[0]) unique_out = np.unique(out) self.assertEqual( - len(unique_out), 100, - "replacement is False. categories can't be sampled repeatedly") + len(unique_out), + 100, + "replacement is False. categories can't be sampled repeatedly", + ) class TestMultinomialApi(unittest.TestCase): - def test_dygraph(self): # input probability is a vector, and replacement is True paddle.disable_static() @@ -114,12 +113,13 @@ class TestMultinomialApi(unittest.TestCase): sample_prob = sample_output_one_dimension(out.numpy(), 4) prob = x_numpy / x_numpy.sum(axis=-1, keepdims=True) - np.testing.assert_allclose(sample_prob, - prob, - rtol=0, - atol=0.01, - err_msg='sample_prob: ' + str(sample_prob) + - '\nprob: ' + str(prob)) + np.testing.assert_allclose( + sample_prob, + prob, + rtol=0, + atol=0.01, + err_msg='sample_prob: ' + str(sample_prob) + '\nprob: ' + str(prob), + ) def test_dygraph2(self): # input probability is a matrix, and replacement is True @@ -130,12 +130,13 @@ class TestMultinomialApi(unittest.TestCase): sample_prob = sample_output_two_dimension(out.numpy(), [3, 4]) prob = x_numpy / x_numpy.sum(axis=-1, keepdims=True) - np.testing.assert_allclose(sample_prob, - prob, - rtol=0, - atol=0.01, - err_msg='sample_prob: ' + str(sample_prob) + - '\nprob: ' + str(prob)) + np.testing.assert_allclose( + sample_prob, + prob, + rtol=0, + atol=0.01, + err_msg='sample_prob: ' + str(sample_prob) + '\nprob: ' + str(prob), + ) paddle.enable_static() def test_dygraph3(self): @@ -147,8 +148,10 @@ class TestMultinomialApi(unittest.TestCase): unique_out = np.unique(out.numpy()) self.assertEqual( - len(unique_out), 100, - "replacement is False. categories can't be sampled repeatedly") + len(unique_out), + 100, + "replacement is False. categories can't be sampled repeatedly", + ) paddle.enable_static() def test_dygraph4(self): @@ -178,16 +181,16 @@ class TestMultinomialApi(unittest.TestCase): sample_prob = sample_output_one_dimension(out, 4) prob = x_np / x_np.sum(axis=-1, keepdims=True) - np.testing.assert_allclose(sample_prob, - prob, - rtol=0, - atol=0.01, - err_msg='sample_prob: ' + str(sample_prob) + - '\nprob: ' + str(prob)) + np.testing.assert_allclose( + sample_prob, + prob, + rtol=0, + atol=0.01, + err_msg='sample_prob: ' + str(sample_prob) + '\nprob: ' + str(prob), + ) class TestMultinomialAlias(unittest.TestCase): - def test_alias(self): paddle.disable_static() x = paddle.rand([4]) @@ -197,12 +200,10 @@ class TestMultinomialAlias(unittest.TestCase): class TestMultinomialError(unittest.TestCase): - def setUp(self): paddle.disable_static() def test_num_sample(self): - def test_num_sample_less_than_0(): x = paddle.rand([4]) paddle.multinomial(x, num_samples=-2) @@ -210,7 +211,6 @@ class TestMultinomialError(unittest.TestCase): self.assertRaises(ValueError, test_num_sample_less_than_0) def test_replacement_False(self): - def test_samples_larger_than_categories(): x = paddle.rand([4]) paddle.multinomial(x, num_samples=5, replacement=False) @@ -218,7 +218,6 @@ class TestMultinomialError(unittest.TestCase): self.assertRaises(ValueError, test_samples_larger_than_categories) def test_input_probs_dim(self): - def test_dim_larger_than_2(): x = paddle.rand([2, 3, 3]) paddle.multinomial(x) @@ -233,7 +232,7 @@ class TestMultinomialError(unittest.TestCase): self.assertRaises(ValueError, test_dim_less_than_1) with self.assertRaises(ValueError): - y = paddle.multinomial(paddle.to_tensor([1., 2., -3.])) + y = paddle.multinomial(paddle.to_tensor([1.0, 2.0, -3.0])) with self.assertRaises(ValueError): prob = paddle.rand([20, 1000]) @@ -242,7 +241,6 @@ class TestMultinomialError(unittest.TestCase): class TestRandomValue(unittest.TestCase): - def test_fixed_random_number(self): # Test GPU Fixed random number, which is generated by 'curandStatePhilox4_32_10_t' if not paddle.is_compiled_with_cuda(): @@ -294,7 +292,6 @@ class TestRandomValue(unittest.TestCase): class TestMultinomialTensorNumSamples(UnittestBase): - def init_info(self): self.shapes = [[3, 4]] self.save_path = os.path.join(self.temp_dir.name, self.path_prefix()) @@ -326,8 +323,9 @@ class TestMultinomialTensorNumSamples(UnittestBase): exe = paddle.static.Executor() exe.run(starup_prog) res = exe.run(fetch_list=[feat, out]) - paddle.static.save_inference_model(self.save_path, [x], [feat, out], - exe) + paddle.static.save_inference_model( + self.save_path, [x], [feat, out], exe + ) np.testing.assert_equal(res[1].shape, (3, 3)) # Test for Inference Predictor diff --git a/python/paddle/fluid/tests/unittests/test_multiplex_op.py b/python/paddle/fluid/tests/unittests/test_multiplex_op.py index 10ad40082f7771f6bcd6849fb1ce6063e5a636ec..75a08f99eccaceb55a6193af1a4468fce77d0a7c 100644 --- a/python/paddle/fluid/tests/unittests/test_multiplex_op.py +++ b/python/paddle/fluid/tests/unittests/test_multiplex_op.py @@ -21,7 +21,6 @@ from paddle.fluid.framework import _test_eager_guard class TestMultiplexOp(OpTest): - def setUp(self): self.op_type = "multiplex" rows = 4 @@ -34,7 +33,7 @@ class TestMultiplexOp(OpTest): ins4 = np.random.random((rows, 25)).astype("float64") self.inputs = { 'Ids': index, - 'X': [('x1', ins1), ('x2', ins2), ('x3', ins3), ('x4', ins4)] + 'X': [('x1', ins1), ('x2', ins2), ('x3', ins3), ('x4', ins4)], } # multiplex output output = np.zeros_like(ins1) @@ -60,7 +59,6 @@ class TestMultiplexOp(OpTest): class TestMultiplexOpError(unittest.TestCase): - def test_errors(self): with fluid.program_guard(fluid.Program(), fluid.Program()): x1 = fluid.data(name='x1', shape=[None, 2], dtype='int64') @@ -86,16 +84,15 @@ class TestMultiplexOpError(unittest.TestCase): self.assertRaises(TypeError, test_type) def test_type2(): - index2 = fluid.data(name='index2', - shape=[None, 1], - dtype='int16') + index2 = fluid.data( + name='index2', shape=[None, 1], dtype='int16' + ) paddle.multiplex(inputs=[x1, x2], index=index2) self.assertRaises(TypeError, test_type2) class TestMultiplexODygrap(unittest.TestCase): - def test_multiplex_dygraph(self): paddle.disable_static() img1 = np.array([[1, 2], [3, 4]]).astype(np.float32) @@ -118,18 +115,25 @@ class TestMultiplexODygrap(unittest.TestCase): with _test_eager_guard(): inputs_eager = [paddle.to_tensor(img1), paddle.to_tensor(img2)] index_eager = paddle.to_tensor( - np.array([[1], [0]]).astype(np.int32)) + np.array([[1], [0]]).astype(np.int32) + ) inputs_eager[0].stop_gradient = False inputs_eager[1].stop_gradient = False res_eager = paddle.multiplex(inputs_eager, index_eager) res_eager.backward() self.assertEqual((res.numpy() == res_eager.numpy()).all(), True) self.assertEqual( - (inputs[0].grad.numpy() == inputs_eager[0].grad.numpy() - ).all(), True) + ( + inputs[0].grad.numpy() == inputs_eager[0].grad.numpy() + ).all(), + True, + ) self.assertEqual( - (inputs[1].grad.numpy() == inputs_eager[1].grad.numpy() - ).all(), True) + ( + inputs[1].grad.numpy() == inputs_eager[1].grad.numpy() + ).all(), + True, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_multiply.py b/python/paddle/fluid/tests/unittests/test_multiply.py index e32245990a847267a2332b6b31eef370f8182bf9..a2c213ade9c4cd69f0b25ef290ecaed93df88cf8 100755 --- a/python/paddle/fluid/tests/unittests/test_multiply.py +++ b/python/paddle/fluid/tests/unittests/test_multiply.py @@ -23,27 +23,28 @@ from paddle.fluid.framework import _test_eager_guard class TestMultiplyApi(unittest.TestCase): - def _run_static_graph_case(self, x_data, y_data): with program_guard(Program(), Program()): paddle.enable_static() - x = paddle.static.data(name='x', - shape=x_data.shape, - dtype=x_data.dtype) - y = paddle.static.data(name='y', - shape=y_data.shape, - dtype=y_data.dtype) + x = paddle.static.data( + name='x', shape=x_data.shape, dtype=x_data.dtype + ) + y = paddle.static.data( + name='y', shape=y_data.shape, dtype=y_data.dtype + ) res = tensor.multiply(x, y) - place = paddle.CUDAPlace( - 0) if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() + else paddle.CPUPlace() + ) exe = paddle.static.Executor(place) - outs = exe.run(paddle.static.default_main_program(), - feed={ - 'x': x_data, - 'y': y_data - }, - fetch_list=[res]) + outs = exe.run( + paddle.static.default_main_program(), + feed={'x': x_data, 'y': y_data}, + fetch_list=[res], + ) res = outs[0] return res @@ -112,7 +113,6 @@ class TestMultiplyApi(unittest.TestCase): class TestMultiplyError(unittest.TestCase): - def func_test_errors(self): # test static computation graph: dtype can not be int8 paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_dataset.py b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_dataset.py index 4cf28bbd30b19e05a61394747636f1810d406899..f3872a388c0616b16ba6f5df32aaf8caf93abcc2 100755 --- a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_dataset.py +++ b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_dataset.py @@ -17,15 +17,20 @@ import numpy as np import paddle import paddle.fluid as fluid -from paddle.io import Dataset, IterableDataset, TensorDataset, \ - ComposeDataset, ChainDataset, DataLoader +from paddle.io import ( + Dataset, + IterableDataset, + TensorDataset, + ComposeDataset, + ChainDataset, + DataLoader, +) from paddle.fluid.framework import _test_eager_guard IMAGE_SIZE = 32 class RandomDataset(Dataset): - def __init__(self, sample_num): self.sample_num = sample_num @@ -35,12 +40,11 @@ class RandomDataset(Dataset): def __getitem__(self, idx): np.random.seed(idx) image = np.random.random([IMAGE_SIZE]).astype('float32') - label = np.random.randint(0, 9, (1, )).astype('int64') + label = np.random.randint(0, 9, (1,)).astype('int64') return image, label class RandomIterableDataset(IterableDataset): - def __init__(self, sample_num): self.sample_num = sample_num @@ -48,12 +52,11 @@ class RandomIterableDataset(IterableDataset): for i in range(self.sample_num): np.random.seed(i) image = np.random.random([IMAGE_SIZE]).astype('float32') - label = np.random.randint(0, 9, (1, )).astype('int64') + label = np.random.randint(0, 9, (1,)).astype('int64') yield image, label class TestTensorDataset(unittest.TestCase): - def run_main(self, num_workers, places): paddle.static.default_startup_program().random_seed = 1 paddle.static.default_main_program().random_seed = 1 @@ -66,21 +69,25 @@ class TestTensorDataset(unittest.TestCase): dataset = TensorDataset([input, label]) assert len(dataset) == 16 - dataloader = DataLoader(dataset, - places=place, - num_workers=num_workers, - batch_size=1, - drop_last=True) + dataloader = DataLoader( + dataset, + places=place, + num_workers=num_workers, + batch_size=1, + drop_last=True, + ) for i, (input, label) in enumerate(dataloader()): assert len(input) == 1 assert len(label) == 1 assert input.shape == [1, 3, 4] assert label.shape == [1, 1] - assert isinstance(input, - (fluid.core.VarBase, fluid.core.eager.Tensor)) - assert isinstance(label, - (fluid.core.VarBase, fluid.core.eager.Tensor)) + assert isinstance( + input, (fluid.core.VarBase, fluid.core.eager.Tensor) + ) + assert isinstance( + label, (fluid.core.VarBase, fluid.core.eager.Tensor) + ) assert np.allclose(input.numpy(), input_np[i]) assert np.allclose(label.numpy(), label_np[i]) @@ -98,7 +105,6 @@ class TestTensorDataset(unittest.TestCase): class TestComposeDataset(unittest.TestCase): - def func_test_main(self): paddle.static.default_startup_program().random_seed = 1 paddle.static.default_main_program().random_seed = 1 @@ -124,7 +130,6 @@ class TestComposeDataset(unittest.TestCase): class TestRandomSplitApi(unittest.TestCase): - def func_test_main(self): paddle.static.default_startup_program().random_seed = 1 paddle.static.default_main_program().random_seed = 1 @@ -151,7 +156,6 @@ class TestRandomSplitApi(unittest.TestCase): class TestRandomSplitError(unittest.TestCase): - def func_test_errors(self): paddle.static.default_startup_program().random_seed = 1 paddle.static.default_main_program().random_seed = 1 @@ -167,7 +171,6 @@ class TestRandomSplitError(unittest.TestCase): class TestSubsetDataset(unittest.TestCase): - def run_main(self, num_workers, places): paddle.static.default_startup_program().random_seed = 1 paddle.static.default_main_program().random_seed = 1 @@ -184,11 +187,13 @@ class TestSubsetDataset(unittest.TestCase): assert len(dataset) == 5 def prepare_dataloader(dataset): - return DataLoader(dataset, - places=places, - num_workers=num_workers, - batch_size=1, - drop_last=True) + return DataLoader( + dataset, + places=places, + num_workers=num_workers, + batch_size=1, + drop_last=True, + ) dataloader = prepare_dataloader(dataset) dataloader_even = prepare_dataloader(even_subset) @@ -199,10 +204,12 @@ class TestSubsetDataset(unittest.TestCase): assert len(label) == 1 assert input.shape == [1, 3, 4] assert label.shape == [1, 1] - assert isinstance(input, - (fluid.core.VarBase, fluid.core.eager.Tensor)) - assert isinstance(label, - (fluid.core.VarBase, fluid.core.eager.Tensor)) + assert isinstance( + input, (fluid.core.VarBase, fluid.core.eager.Tensor) + ) + assert isinstance( + label, (fluid.core.VarBase, fluid.core.eager.Tensor) + ) elements_list = list() for _, (input, label) in enumerate(dataloader()): @@ -237,7 +244,6 @@ class TestSubsetDataset(unittest.TestCase): class TestChainDataset(unittest.TestCase): - def run_main(self, num_workers, places): paddle.static.default_startup_program().random_seed = 1 paddle.static.default_main_program().random_seed = 1 @@ -275,7 +281,6 @@ class TestChainDataset(unittest.TestCase): class NumpyMixTensorDataset(Dataset): - def __init__(self, sample_num): self.sample_num = sample_num @@ -285,12 +290,11 @@ class NumpyMixTensorDataset(Dataset): def __getitem__(self, idx): np.random.seed(idx) image = np.random.random([IMAGE_SIZE]).astype('float32') - label = np.random.randint(0, 9, (1, )).astype('int64') + label = np.random.randint(0, 9, (1,)).astype('int64') return paddle.to_tensor(image, place=paddle.CPUPlace()), label class TestNumpyMixTensorDataset(TestTensorDataset): - def run_main(self, num_workers, places): paddle.static.default_startup_program().random_seed = 1 paddle.static.default_main_program().random_seed = 1 @@ -298,25 +302,28 @@ class TestNumpyMixTensorDataset(TestTensorDataset): with fluid.dygraph.guard(place): dataset = NumpyMixTensorDataset(16) assert len(dataset) == 16 - dataloader = DataLoader(dataset, - places=place, - num_workers=num_workers, - batch_size=1, - drop_last=True) + dataloader = DataLoader( + dataset, + places=place, + num_workers=num_workers, + batch_size=1, + drop_last=True, + ) for i, (input, label) in enumerate(dataloader()): assert len(input) == 1 assert len(label) == 1 assert input.shape == [1, IMAGE_SIZE] assert label.shape == [1, 1] - assert isinstance(input, - (fluid.core.VarBase, fluid.core.eager.Tensor)) - assert isinstance(label, - (fluid.core.VarBase, fluid.core.eager.Tensor)) + assert isinstance( + input, (fluid.core.VarBase, fluid.core.eager.Tensor) + ) + assert isinstance( + label, (fluid.core.VarBase, fluid.core.eager.Tensor) + ) class ComplextDataset(Dataset): - def __init__(self, sample_num): self.sample_num = sample_num @@ -324,18 +331,19 @@ class ComplextDataset(Dataset): return self.sample_num def __getitem__(self, idx): - return (3.1, 'abc', - paddle.to_tensor(np.random.random([IMAGE_SIZE - ]).astype('float32'), - place=paddle.CPUPlace()), - [1, np.random.random([2]).astype('float32')], { - 'a': 2.0, - 'b': np.random.random([2]).astype('float32') - }) + return ( + 3.1, + 'abc', + paddle.to_tensor( + np.random.random([IMAGE_SIZE]).astype('float32'), + place=paddle.CPUPlace(), + ), + [1, np.random.random([2]).astype('float32')], + {'a': 2.0, 'b': np.random.random([2]).astype('float32')}, + ) class TestComplextDataset(unittest.TestCase): - def run_main(self, num_workers): paddle.static.default_startup_program().random_seed = 1 paddle.static.default_main_program().random_seed = 1 @@ -343,11 +351,13 @@ class TestComplextDataset(unittest.TestCase): with fluid.dygraph.guard(place): dataset = ComplextDataset(16) assert len(dataset) == 16 - dataloader = DataLoader(dataset, - places=place, - num_workers=num_workers, - batch_size=2, - drop_last=True) + dataloader = DataLoader( + dataset, + places=place, + num_workers=num_workers, + batch_size=2, + drop_last=True, + ) for i, data in enumerate(dataloader()): assert len(data) == 5 @@ -380,7 +390,6 @@ class TestComplextDataset(unittest.TestCase): class SingleFieldDataset(Dataset): - def __init__(self, sample_num): self.sample_num = sample_num @@ -392,7 +401,6 @@ class SingleFieldDataset(Dataset): class TestSingleFieldDataset(unittest.TestCase): - def init_dataset(self): self.sample_num = 16 self.dataset = SingleFieldDataset(self.sample_num) @@ -403,15 +411,18 @@ class TestSingleFieldDataset(unittest.TestCase): place = paddle.CPUPlace() with fluid.dygraph.guard(place): self.init_dataset() - dataloader = DataLoader(self.dataset, - places=place, - num_workers=num_workers, - batch_size=2, - drop_last=True) + dataloader = DataLoader( + self.dataset, + places=place, + num_workers=num_workers, + batch_size=2, + drop_last=True, + ) for i, data in enumerate(dataloader()): - assert isinstance(data, - (fluid.core.VarBase, fluid.core.eager.Tensor)) + assert isinstance( + data, (fluid.core.VarBase, fluid.core.eager.Tensor) + ) assert data.shape == [2, 2, 3] def func_test_main(self): @@ -425,7 +436,6 @@ class TestSingleFieldDataset(unittest.TestCase): class SingleFieldIterableDataset(IterableDataset): - def __init__(self, sample_num): self.sample_num = sample_num @@ -435,22 +445,23 @@ class SingleFieldIterableDataset(IterableDataset): class TestSingleFieldIterableDataset(TestSingleFieldDataset): - def init_dataset(self): self.sample_num = 16 self.dataset = SingleFieldIterableDataset(self.sample_num) class TestDataLoaderGenerateStates(unittest.TestCase): - def setUp(self): self.inputs = [(0, 1), (0, 2), (1, 3)] - self.outputs = [[1835504127, 1731038949, 1320224556, 2330041505], - [2834126987, 2358157858, 1860244682, 1437227251], - [457190280, 2660306227, 859341110, 354512857]] + self.outputs = [ + [1835504127, 1731038949, 1320224556, 2330041505], + [2834126987, 2358157858, 1860244682, 1437227251], + [457190280, 2660306227, 859341110, 354512857], + ] def func_test_main(self): from paddle.fluid.dataloader.worker import _generate_states + for inp, outp in zip(self.inputs, self.outputs): out = _generate_states(*inp) assert out == outp @@ -462,16 +473,18 @@ class TestDataLoaderGenerateStates(unittest.TestCase): class TestDatasetWithDropLast(unittest.TestCase): - def run_main(self, dataset, num_samples, batch_size): for num_workers in [0, 1]: for drop_last in [True, False]: - steps = (num_samples + (1 - int(drop_last)) * \ - (batch_size - 1)) // batch_size - dataloader = DataLoader(dataset, - batch_size=batch_size, - drop_last=drop_last, - num_workers=num_workers) + steps = ( + num_samples + (1 - int(drop_last)) * (batch_size - 1) + ) // batch_size + dataloader = DataLoader( + dataset, + batch_size=batch_size, + drop_last=drop_last, + num_workers=num_workers, + ) datas = [] for data in dataloader: datas.append(data) diff --git a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_dynamic.py b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_dynamic.py index d0e592d488c1e462a04673fcce3a145ccadf1edd..e4d678d8d170772f417389912e6da86194c32e02 100644 --- a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_dynamic.py +++ b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_dynamic.py @@ -21,35 +21,52 @@ import paddle.fluid as fluid from paddle.io import DataLoader from paddle.fluid.dygraph.nn import Linear -from test_multiprocess_dataloader_static import RandomDataset, RandomBatchedDataset, prepare_places -from test_multiprocess_dataloader_static import EPOCH_NUM, BATCH_SIZE, IMAGE_SIZE, SAMPLE_NUM, CLASS_NUM +from test_multiprocess_dataloader_static import ( + RandomDataset, + RandomBatchedDataset, + prepare_places, +) +from test_multiprocess_dataloader_static import ( + EPOCH_NUM, + BATCH_SIZE, + IMAGE_SIZE, + SAMPLE_NUM, + CLASS_NUM, +) class SimpleFCNet(fluid.dygraph.Layer): - def __init__(self): super(SimpleFCNet, self).__init__() - param_attr = fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.8)) - bias_attr = fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.5)) + param_attr = fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.8) + ) + bias_attr = fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.5) + ) self._fcs = [] in_channel = IMAGE_SIZE for hidden_size in [10, 20, 30]: self._fcs.append( - Linear(in_channel, - hidden_size, - act='tanh', - param_attr=param_attr, - bias_attr=bias_attr)) + Linear( + in_channel, + hidden_size, + act='tanh', + param_attr=param_attr, + bias_attr=bias_attr, + ) + ) in_channel = hidden_size self._fcs.append( - Linear(in_channel, - CLASS_NUM, - act='softmax', - param_attr=param_attr, - bias_attr=bias_attr)) + Linear( + in_channel, + CLASS_NUM, + act='softmax', + param_attr=param_attr, + bias_attr=bias_attr, + ) + ) def forward(self, image): out = image @@ -59,7 +76,6 @@ class SimpleFCNet(fluid.dygraph.Layer): class TestDygraphDataLoader(unittest.TestCase): - def run_main(self, num_workers, places, persistent_workers): fluid.default_startup_program().random_seed = 1 fluid.default_main_program().random_seed = 1 @@ -68,11 +84,13 @@ class TestDygraphDataLoader(unittest.TestCase): optimizer = fluid.optimizer.Adam(parameter_list=fc_net.parameters()) dataset = RandomDataset(SAMPLE_NUM, CLASS_NUM) - dataloader = DataLoader(dataset, - num_workers=num_workers, - batch_size=BATCH_SIZE, - drop_last=True, - persistent_workers=persistent_workers) + dataloader = DataLoader( + dataset, + num_workers=num_workers, + batch_size=BATCH_SIZE, + drop_last=True, + persistent_workers=persistent_workers, + ) assert len(dataloader) == int(SAMPLE_NUM / BATCH_SIZE) step_list = [] @@ -96,7 +114,7 @@ class TestDygraphDataLoader(unittest.TestCase): ret = { "time": end_t - start_t, "step": step_list, - "loss": np.array(loss_list) + "loss": np.array(loss_list), } print("time cost", ret['time'], 'step_list', ret['step']) return ret @@ -107,21 +125,27 @@ class TestDygraphDataLoader(unittest.TestCase): for persistent_workers in [False, True]: results = [] for num_workers in [0, 2]: - print(self.__class__.__name__, p, num_workers, - persistent_workers) + print( + self.__class__.__name__, + p, + num_workers, + persistent_workers, + ) sys.stdout.flush() - ret = self.run_main(num_workers=num_workers, - places=p, - persistent_workers=persistent_workers) + ret = self.run_main( + num_workers=num_workers, + places=p, + persistent_workers=persistent_workers, + ) results.append(ret) diff = np.max( - np.abs(results[0]['loss'] - results[1]['loss']) / - np.abs(results[0]['loss'])) + np.abs(results[0]['loss'] - results[1]['loss']) + / np.abs(results[0]['loss']) + ) self.assertLess(diff, 1e-2) class TestDygraphDataLoaderWithBatchedDataset(TestDygraphDataLoader): - def run_main(self, num_workers, places, persistent_workers): fluid.default_startup_program().random_seed = 1 fluid.default_main_program().random_seed = 1 @@ -130,11 +154,13 @@ class TestDygraphDataLoaderWithBatchedDataset(TestDygraphDataLoader): optimizer = fluid.optimizer.Adam(parameter_list=fc_net.parameters()) dataset = RandomBatchedDataset(SAMPLE_NUM, CLASS_NUM) - dataloader = DataLoader(dataset, - num_workers=num_workers, - batch_size=None, - drop_last=True, - persistent_workers=persistent_workers) + dataloader = DataLoader( + dataset, + num_workers=num_workers, + batch_size=None, + drop_last=True, + persistent_workers=persistent_workers, + ) assert len(dataloader) == int(SAMPLE_NUM / BATCH_SIZE) step_list = [] @@ -158,7 +184,7 @@ class TestDygraphDataLoaderWithBatchedDataset(TestDygraphDataLoader): ret = { "time": end_t - start_t, "step": step_list, - "loss": np.array(loss_list) + "loss": np.array(loss_list), } print("time cost", ret['time'], 'step_list', ret['step']) return ret diff --git a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_exception.py b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_exception.py index ea4b9363bfce2271a236a439f6df2d7fb7cd46d8..f60c73f45773ac7b087c6d50099691a53adedb65 100644 --- a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_exception.py +++ b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_exception.py @@ -23,14 +23,13 @@ from paddle.fluid.dataloader.dataloader_iter import _worker_loop class RandomDataset(Dataset): - def __init__(self, sample_num): self.sample_num = sample_num def __getitem__(self, idx): np.random.seed(idx) image = np.random.random([784]).astype('float32') - label = np.random.randint(0, 9, (1, )).astype('int64') + label = np.random.randint(0, 9, (1,)).astype('int64') return image, label def __len__(self): @@ -38,7 +37,6 @@ class RandomDataset(Dataset): class TestDataLoaderAssert(unittest.TestCase): - def test_main(self): place = fluid.cpu_places()[0] with fluid.dygraph.guard(place): @@ -61,9 +59,9 @@ class TestDataLoaderAssert(unittest.TestCase): # num_workers < 0 try: - loader = DataLoader(dataset=dataset, - places=place, - num_workers=-1) + loader = DataLoader( + dataset=dataset, places=place, num_workers=-1 + ) self.assertTrue(False) except AssertionError: pass @@ -77,27 +75,28 @@ class TestDataLoaderAssert(unittest.TestCase): # set batch_sampler and shuffle/batch_size/drop_last try: - loader = DataLoader(dataset=dataset, - places=place, - batch_sampler=batch_sampler, - shuffle=True, - drop_last=True) + loader = DataLoader( + dataset=dataset, + places=place, + batch_sampler=batch_sampler, + shuffle=True, + drop_last=True, + ) self.assertTrue(False) except AssertionError: pass # set batch_sampler correctly try: - loader = DataLoader(dataset=dataset, - places=place, - batch_sampler=batch_sampler) + loader = DataLoader( + dataset=dataset, places=place, batch_sampler=batch_sampler + ) self.assertTrue(True) except AssertionError: self.assertTrue(False) class TestDatasetRuntimeError(unittest.TestCase): - def test_main(self): dataset = Dataset() @@ -141,10 +140,10 @@ class TestDatasetRuntimeError(unittest.TestCase): # CI Converage cannot record stub in subprocess, # HACK a _worker_loop in main process call here -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestDataLoaderWorkerLoop(unittest.TestCase): - def run_without_worker_done(self, use_shared_memory=True): try: place = fluid.cpu_places()[0] @@ -161,12 +160,15 @@ class TestDataLoaderWorkerLoop(unittest.TestCase): np.stack(s, axis=0) for s in list(zip(*sample_list)) ] - loader = DataLoader(dataset, - num_workers=1, - places=place, - use_shared_memory=use_shared_memory) - assert loader.num_workers > 0, \ - "go to AssertionError and pass in Mac and Windows" + loader = DataLoader( + dataset, + num_workers=1, + places=place, + use_shared_memory=use_shared_memory, + ) + assert ( + loader.num_workers > 0 + ), "go to AssertionError and pass in Mac and Windows" loader = iter(loader) print("loader length", len(loader)) indices_queue = multiprocessing.Queue() @@ -174,16 +176,28 @@ class TestDataLoaderWorkerLoop(unittest.TestCase): indices_queue.put([i, i + 10]) indices_queue.put(None) base_seed = 1234 - _worker_loop(loader._dataset, 0, indices_queue, - loader._data_queue, loader._workers_done_event, - True, _collate_fn, True, _init_fn, 0, 1, - loader._use_shared_memory, base_seed) + _worker_loop( + loader._dataset, + 0, + indices_queue, + loader._data_queue, + loader._workers_done_event, + True, + _collate_fn, + True, + _init_fn, + 0, + 1, + loader._use_shared_memory, + base_seed, + ) self.assertTrue(False) except AssertionError: pass except Exception as e: print("Exception", e) import sys + sys.stdout.flush() self.assertTrue(False) @@ -203,12 +217,15 @@ class TestDataLoaderWorkerLoop(unittest.TestCase): np.stack(s, axis=0) for s in list(zip(*sample_list)) ] - loader = DataLoader(dataset, - num_workers=1, - places=place, - use_shared_memory=use_shared_memory) - assert loader.num_workers > 0, \ - "go to AssertionError and pass in Mac and Windows" + loader = DataLoader( + dataset, + num_workers=1, + places=place, + use_shared_memory=use_shared_memory, + ) + assert ( + loader.num_workers > 0 + ), "go to AssertionError and pass in Mac and Windows" loader = iter(loader) print("loader length", len(loader)) indices_queue = multiprocessing.Queue() @@ -217,10 +234,21 @@ class TestDataLoaderWorkerLoop(unittest.TestCase): indices_queue.put(None) loader._workers_done_event.set() base_seed = 1234 - _worker_loop(loader._dataset, 0, indices_queue, - loader._data_queue, loader._workers_done_event, - True, _collate_fn, True, _init_fn, 0, 1, - loader._use_shared_memory, base_seed) + _worker_loop( + loader._dataset, + 0, + indices_queue, + loader._data_queue, + loader._workers_done_event, + True, + _collate_fn, + True, + _init_fn, + 0, + 1, + loader._use_shared_memory, + base_seed, + ) self.assertTrue(True) except AssertionError: pass diff --git a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_iterable_dataset_dynamic.py b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_iterable_dataset_dynamic.py index 98c8a19d520f4920e104c493bbfa94d88db40568..fe5cf6e27a8311f3c80c9675cb1e752d1e91f61c 100644 --- a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_iterable_dataset_dynamic.py +++ b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_iterable_dataset_dynamic.py @@ -21,35 +21,52 @@ import paddle.fluid as fluid from paddle.io import DataLoader from paddle.fluid.dygraph.nn import Linear -from test_multiprocess_dataloader_iterable_dataset_static import RandomDataset, RandomBatchedDataset, prepare_places -from test_multiprocess_dataloader_iterable_dataset_static import EPOCH_NUM, BATCH_SIZE, IMAGE_SIZE, SAMPLE_NUM, CLASS_NUM +from test_multiprocess_dataloader_iterable_dataset_static import ( + RandomDataset, + RandomBatchedDataset, + prepare_places, +) +from test_multiprocess_dataloader_iterable_dataset_static import ( + EPOCH_NUM, + BATCH_SIZE, + IMAGE_SIZE, + SAMPLE_NUM, + CLASS_NUM, +) class SimpleFCNet(fluid.dygraph.Layer): - def __init__(self): super(SimpleFCNet, self).__init__() - param_attr = fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.8)) - bias_attr = fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.5)) + param_attr = fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.8) + ) + bias_attr = fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.5) + ) self._fcs = [] in_channel = IMAGE_SIZE for hidden_size in [10, 20, 30]: self._fcs.append( - Linear(in_channel, - hidden_size, - act='tanh', - param_attr=param_attr, - bias_attr=bias_attr)) + Linear( + in_channel, + hidden_size, + act='tanh', + param_attr=param_attr, + bias_attr=bias_attr, + ) + ) in_channel = hidden_size self._fcs.append( - Linear(in_channel, - CLASS_NUM, - act='softmax', - param_attr=param_attr, - bias_attr=bias_attr)) + Linear( + in_channel, + CLASS_NUM, + act='softmax', + param_attr=param_attr, + bias_attr=bias_attr, + ) + ) def forward(self, image): out = image @@ -59,7 +76,6 @@ class SimpleFCNet(fluid.dygraph.Layer): class TestDygraphDataLoader(unittest.TestCase): - def run_main(self, num_workers, places, persistent_workers): fluid.default_startup_program().random_seed = 1 fluid.default_main_program().random_seed = 1 @@ -68,11 +84,13 @@ class TestDygraphDataLoader(unittest.TestCase): optimizer = fluid.optimizer.Adam(parameter_list=fc_net.parameters()) dataset = RandomDataset(SAMPLE_NUM, CLASS_NUM) - dataloader = DataLoader(dataset, - num_workers=num_workers, - batch_size=BATCH_SIZE, - drop_last=True, - persistent_workers=persistent_workers) + dataloader = DataLoader( + dataset, + num_workers=num_workers, + batch_size=BATCH_SIZE, + drop_last=True, + persistent_workers=persistent_workers, + ) step_list = [] loss_list = [] @@ -95,7 +113,7 @@ class TestDygraphDataLoader(unittest.TestCase): ret = { "time": end_t - start_t, "step": step_list, - "loss": np.array(loss_list) + "loss": np.array(loss_list), } print("time cost", ret['time'], 'step_list', ret['step']) return ret @@ -106,19 +124,26 @@ class TestDygraphDataLoader(unittest.TestCase): for persistent_workers in [False, True]: results = [] for num_workers in [0, 2]: - print(self.__class__.__name__, p, num_workers, - persistent_workers) + print( + self.__class__.__name__, + p, + num_workers, + persistent_workers, + ) sys.stdout.flush() - ret = self.run_main(num_workers=num_workers, - places=p, - persistent_workers=persistent_workers) + ret = self.run_main( + num_workers=num_workers, + places=p, + persistent_workers=persistent_workers, + ) results.append(ret) - assert results[0]['loss'].shape[0] * 2 == results[1][ - 'loss'].shape[0] + assert ( + results[0]['loss'].shape[0] * 2 + == results[1]['loss'].shape[0] + ) class TestDygraphDataLoaderWithBatchedDataset(TestDygraphDataLoader): - def run_main(self, num_workers, places, persistent_workers): fluid.default_startup_program().random_seed = 1 fluid.default_main_program().random_seed = 1 @@ -127,11 +152,13 @@ class TestDygraphDataLoaderWithBatchedDataset(TestDygraphDataLoader): optimizer = fluid.optimizer.Adam(parameter_list=fc_net.parameters()) dataset = RandomBatchedDataset(SAMPLE_NUM, CLASS_NUM) - dataloader = DataLoader(dataset, - num_workers=num_workers, - batch_size=None, - drop_last=True, - persistent_workers=persistent_workers) + dataloader = DataLoader( + dataset, + num_workers=num_workers, + batch_size=None, + drop_last=True, + persistent_workers=persistent_workers, + ) step_list = [] loss_list = [] @@ -154,7 +181,7 @@ class TestDygraphDataLoaderWithBatchedDataset(TestDygraphDataLoader): ret = { "time": end_t - start_t, "step": step_list, - "loss": np.array(loss_list) + "loss": np.array(loss_list), } print("time cost", ret['time'], 'step_list', ret['step']) return ret diff --git a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_iterable_dataset_split.py b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_iterable_dataset_split.py index aef0c8c7d0812a9f74be42d8db611aafcd8c880d..57ae8bf46f8634e07b55354ccead29e4a8c2e2c7 100644 --- a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_iterable_dataset_split.py +++ b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_iterable_dataset_split.py @@ -21,7 +21,6 @@ from paddle.io import DataLoader, IterableDataset, get_worker_info class RangeIterableDatasetSplit(IterableDataset): - def __init__(self, start, end): self.start = start self.end = end @@ -34,7 +33,9 @@ class RangeIterableDatasetSplit(IterableDataset): else: per_worker = int( math.ceil( - (self.end - self.start) / float(worker_info.num_workers))) + (self.end - self.start) / float(worker_info.num_workers) + ) + ) worker_id = worker_info.id iter_start = self.start + worker_id * per_worker iter_end = min(iter_start + per_worker, self.end) @@ -44,16 +45,17 @@ class RangeIterableDatasetSplit(IterableDataset): class TestDynamicDataLoaderIterSplit(unittest.TestCase): - def test_main(self): place = fluid.CPUPlace() with fluid.dygraph.guard(place): dataset = RangeIterableDatasetSplit(0, 10) - dataloader = DataLoader(dataset, - places=place, - num_workers=2, - batch_size=1, - drop_last=True) + dataloader = DataLoader( + dataset, + places=place, + num_workers=2, + batch_size=1, + drop_last=True, + ) rets = [] for d in dataloader: @@ -63,7 +65,6 @@ class TestDynamicDataLoaderIterSplit(unittest.TestCase): class RangeIterableDataset(IterableDataset): - def __init__(self, start, end): self.start = start self.end = end @@ -74,7 +75,6 @@ class RangeIterableDataset(IterableDataset): class TestDynamicDataLoaderIterInitFuncSplit(unittest.TestCase): - def test_main(self): place = fluid.CPUPlace() with fluid.dygraph.guard(place): @@ -87,18 +87,21 @@ class TestDynamicDataLoaderIterInitFuncSplit(unittest.TestCase): start = dataset.start end = dataset.end num_per_worker = int( - math.ceil((end - start) / float(worker_info.num_workers))) + math.ceil((end - start) / float(worker_info.num_workers)) + ) worker_id = worker_info.id dataset.start = start + worker_id * num_per_worker dataset.end = min(dataset.start + num_per_worker, end) - dataloader = DataLoader(dataset, - places=place, - num_workers=1, - batch_size=1, - drop_last=True, - worker_init_fn=worker_spliter) + dataloader = DataLoader( + dataset, + places=place, + num_workers=1, + batch_size=1, + drop_last=True, + worker_init_fn=worker_spliter, + ) rets = [] for d in dataloader: diff --git a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_iterable_dataset_static.py b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_iterable_dataset_static.py index 4c78edf3adec5cb3e6fb6e3ad6ca7c177ae8647c..49f12ea657e9306e7351caf3527caafd3175d056 100644 --- a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_iterable_dataset_static.py +++ b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_iterable_dataset_static.py @@ -28,7 +28,6 @@ CLASS_NUM = 10 class RandomDataset(IterableDataset): - def __init__(self, sample_num, class_num): self.sample_num = sample_num self.class_num = class_num @@ -37,8 +36,9 @@ class RandomDataset(IterableDataset): for i in range(self.sample_num): np.random.seed(i) image = np.random.random([IMAGE_SIZE]).astype('float32') - label = np.random.randint(0, self.class_num - 1, - (1, )).astype('int64') + label = np.random.randint(0, self.class_num - 1, (1,)).astype( + 'int64' + ) yield image, label @@ -50,29 +50,36 @@ def simple_fc_net_static(): with fluid.unique_name.guard(): with fluid.program_guard(main_prog, startup_prog): - image = fluid.data(name='image', - shape=[None, IMAGE_SIZE], - dtype='float32') + image = fluid.data( + name='image', shape=[None, IMAGE_SIZE], dtype='float32' + ) label = fluid.data(name='label', shape=[None, 1], dtype='int64') hidden = image - param_attr = fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.8)) - bias_attr = fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.5)) + param_attr = fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.8) + ) + bias_attr = fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.5) + ) for hidden_size in [10, 20, 30]: - hidden = fluid.layers.fc(hidden, - size=hidden_size, - act='tanh', - param_attr=param_attr, - bias_attr=bias_attr) - - predict_label = fluid.layers.fc(hidden, - size=CLASS_NUM, - act='softmax', - param_attr=param_attr, - bias_attr=bias_attr) + hidden = fluid.layers.fc( + hidden, + size=hidden_size, + act='tanh', + param_attr=param_attr, + bias_attr=bias_attr, + ) + + predict_label = fluid.layers.fc( + hidden, + size=CLASS_NUM, + act='softmax', + param_attr=param_attr, + bias_attr=bias_attr, + ) loss = fluid.layers.reduce_mean( - fluid.layers.cross_entropy(input=predict_label, label=label)) + fluid.layers.cross_entropy(input=predict_label, label=label) + ) optimizer = fluid.optimizer.Adam() optimizer.minimize(loss) @@ -96,21 +103,22 @@ def prepare_places(with_data_parallel, with_cpu=False, with_gpu=True): class TestStaticDataLoader(unittest.TestCase): - def run_main(self, num_workers, places, persistent_workers): scope = fluid.Scope() with fluid.scope_guard(scope): startup_prog, main_prog, image, label, loss = simple_fc_net_static() dataset = RandomDataset(SAMPLE_NUM, CLASS_NUM) - dataloader = DataLoader(dataset, - feed_list=[image, label], - places=places, - num_workers=num_workers, - batch_size=BATCH_SIZE, - return_list=False, - drop_last=True, - persistent_workers=persistent_workers) + dataloader = DataLoader( + dataset, + feed_list=[image, label], + places=places, + num_workers=num_workers, + batch_size=BATCH_SIZE, + return_list=False, + drop_last=True, + persistent_workers=persistent_workers, + ) # assert len(dataloader) == int(SAMPLE_NUM / BATCH_SIZE) exe = fluid.Executor(place=places[0]) @@ -118,8 +126,9 @@ class TestStaticDataLoader(unittest.TestCase): prog = fluid.CompiledProgram(main_prog) if len(places) > 1: - prog = prog.with_data_parallel(loss_name=loss.name, - places=places) + prog = prog.with_data_parallel( + loss_name=loss.name, places=places + ) step_list = [] loss_list = [] @@ -128,7 +137,8 @@ class TestStaticDataLoader(unittest.TestCase): step = 0 for d in dataloader: assert len(d) == len(places), "{} != {}".format( - len(d), len(places)) + len(d), len(places) + ) for i, item in enumerate(d): image = item['image'] label = item['label'] @@ -136,10 +146,12 @@ class TestStaticDataLoader(unittest.TestCase): assert label.shape() == [BATCH_SIZE, 1] assert image._place()._equals(places[i]) assert label._place()._equals(places[i]) - L, = exe.run(program=prog, - feed=d, - fetch_list=[loss], - use_program_cache=True) + (L,) = exe.run( + program=prog, + feed=d, + fetch_list=[loss], + use_program_cache=True, + ) loss_list.append(np.mean(L)) step += 1 step_list.append(step) @@ -148,7 +160,7 @@ class TestStaticDataLoader(unittest.TestCase): ret = { "time": end_t - start_t, "step": step_list, - "loss": np.array(loss_list) + "loss": np.array(loss_list), } print("time cost", ret['time'], 'step_list', ret['step']) return ret @@ -158,19 +170,26 @@ class TestStaticDataLoader(unittest.TestCase): for persistent_workers in [False, True]: results = [] for num_workers in [0, 2]: - print(self.__class__.__name__, p, num_workers, - persistent_workers) + print( + self.__class__.__name__, + p, + num_workers, + persistent_workers, + ) sys.stdout.flush() - ret = self.run_main(num_workers=num_workers, - places=p, - persistent_workers=persistent_workers) + ret = self.run_main( + num_workers=num_workers, + places=p, + persistent_workers=persistent_workers, + ) results.append(ret) - assert results[0]['loss'].shape[0] * 2 == results[1][ - 'loss'].shape[0] + assert ( + results[0]['loss'].shape[0] * 2 + == results[1]['loss'].shape[0] + ) class RandomBatchedDataset(IterableDataset): - def __init__(self, sample_num, class_num): self.sample_num = sample_num // BATCH_SIZE self.class_num = class_num @@ -182,37 +201,40 @@ class RandomBatchedDataset(IterableDataset): labels = [] for _ in range(BATCH_SIZE): image = np.random.random([IMAGE_SIZE]).astype('float32') - label = np.random.randint(0, self.class_num - 1, - (1, )).astype('int64') + label = np.random.randint(0, self.class_num - 1, (1,)).astype( + 'int64' + ) images.append(image) labels.append(label) yield np.stack(images, axis=0), np.stack(labels, axis=0) class TestStaticDataLoaderWithBatchedDataset(TestStaticDataLoader): - def run_main(self, num_workers, places, persistent_workers): scope = fluid.Scope() with fluid.scope_guard(scope): startup_prog, main_prog, image, label, loss = simple_fc_net_static() dataset = RandomBatchedDataset(SAMPLE_NUM, CLASS_NUM) - dataloader = DataLoader(dataset, - feed_list=[image, label], - places=places, - num_workers=num_workers, - batch_size=None, - return_list=False, - drop_last=True, - persistent_workers=persistent_workers) + dataloader = DataLoader( + dataset, + feed_list=[image, label], + places=places, + num_workers=num_workers, + batch_size=None, + return_list=False, + drop_last=True, + persistent_workers=persistent_workers, + ) exe = fluid.Executor(place=places[0]) exe.run(startup_prog) prog = fluid.CompiledProgram(main_prog) if len(places) > 1: - prog = prog.with_data_parallel(loss_name=loss.name, - places=places) + prog = prog.with_data_parallel( + loss_name=loss.name, places=places + ) step_list = [] loss_list = [] @@ -221,7 +243,8 @@ class TestStaticDataLoaderWithBatchedDataset(TestStaticDataLoader): step = 0 for d in dataloader: assert len(d) == len(places), "{} != {}".format( - len(d), len(places)) + len(d), len(places) + ) for i, item in enumerate(d): image = item['image'] label = item['label'] @@ -229,10 +252,12 @@ class TestStaticDataLoaderWithBatchedDataset(TestStaticDataLoader): assert label.shape() == [BATCH_SIZE, 1] assert image._place()._equals(places[i]) assert label._place()._equals(places[i]) - L, = exe.run(program=prog, - feed=d, - fetch_list=[loss], - use_program_cache=True) + (L,) = exe.run( + program=prog, + feed=d, + fetch_list=[loss], + use_program_cache=True, + ) loss_list.append(np.mean(L)) step += 1 step_list.append(step) @@ -241,7 +266,7 @@ class TestStaticDataLoaderWithBatchedDataset(TestStaticDataLoader): ret = { "time": end_t - start_t, "step": step_list, - "loss": np.array(loss_list) + "loss": np.array(loss_list), } print("time cost", ret['time'], 'step_list', ret['step']) return ret diff --git a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_static.py b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_static.py index 6a44c9f48430a8397400b270cc4731637d545c70..853d7ec1924059a5da37c7d7eb3856f0dc3fb5f0 100644 --- a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_static.py +++ b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_static.py @@ -29,7 +29,6 @@ CLASS_NUM = 10 class RandomDataset(Dataset): - def __init__(self, sample_num, class_num): self.sample_num = sample_num self.class_num = class_num @@ -37,7 +36,7 @@ class RandomDataset(Dataset): def __getitem__(self, idx): np.random.seed(idx) image = np.random.random([IMAGE_SIZE]).astype('float32') - label = np.random.randint(0, self.class_num - 1, (1, )).astype('int64') + label = np.random.randint(0, self.class_num - 1, (1,)).astype('int64') return image, label def __len__(self): @@ -52,29 +51,36 @@ def simple_fc_net_static(): with fluid.unique_name.guard(): with fluid.program_guard(main_prog, startup_prog): - image = fluid.data(name='image', - shape=[None, IMAGE_SIZE], - dtype='float32') + image = fluid.data( + name='image', shape=[None, IMAGE_SIZE], dtype='float32' + ) label = fluid.data(name='label', shape=[None, 1], dtype='int64') hidden = image - param_attr = fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.8)) - bias_attr = fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.5)) + param_attr = fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.8) + ) + bias_attr = fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.5) + ) for hidden_size in [10, 20, 30]: - hidden = fluid.layers.fc(hidden, - size=hidden_size, - act='tanh', - param_attr=param_attr, - bias_attr=bias_attr) - - predict_label = fluid.layers.fc(hidden, - size=CLASS_NUM, - act='softmax', - param_attr=param_attr, - bias_attr=bias_attr) + hidden = fluid.layers.fc( + hidden, + size=hidden_size, + act='tanh', + param_attr=param_attr, + bias_attr=bias_attr, + ) + + predict_label = fluid.layers.fc( + hidden, + size=CLASS_NUM, + act='softmax', + param_attr=param_attr, + bias_attr=bias_attr, + ) loss = fluid.layers.reduce_mean( - fluid.layers.cross_entropy(input=predict_label, label=label)) + fluid.layers.cross_entropy(input=predict_label, label=label) + ) optimizer = fluid.optimizer.Adam() optimizer.minimize(loss) @@ -98,21 +104,22 @@ def prepare_places(with_data_parallel, with_cpu=False, with_gpu=True): class TestStaticDataLoader(unittest.TestCase): - def run_main(self, num_workers, places, persistent_workers, use_pe=True): scope = fluid.Scope() with fluid.scope_guard(scope): startup_prog, main_prog, image, label, loss = simple_fc_net_static() dataset = RandomDataset(SAMPLE_NUM, CLASS_NUM) - dataloader = DataLoader(dataset, - feed_list=[image, label], - places=places, - num_workers=num_workers, - batch_size=BATCH_SIZE, - return_list=False, - drop_last=True, - persistent_workers=persistent_workers) + dataloader = DataLoader( + dataset, + feed_list=[image, label], + places=places, + num_workers=num_workers, + batch_size=BATCH_SIZE, + return_list=False, + drop_last=True, + persistent_workers=persistent_workers, + ) assert len(dataloader) == int(SAMPLE_NUM / BATCH_SIZE) exe = fluid.Executor(place=places[0]) @@ -121,8 +128,9 @@ class TestStaticDataLoader(unittest.TestCase): if use_pe: prog = fluid.CompiledProgram(main_prog) if len(places) > 1: - prog = prog.with_data_parallel(loss_name=loss.name, - places=places) + prog = prog.with_data_parallel( + loss_name=loss.name, places=places + ) else: prog = main_prog @@ -133,7 +141,8 @@ class TestStaticDataLoader(unittest.TestCase): step = 0 for d in dataloader: assert len(d) == len(places), "{} != {}".format( - len(d), len(places)) + len(d), len(places) + ) for i, item in enumerate(d): image = item['image'] label = item['label'] @@ -141,10 +150,12 @@ class TestStaticDataLoader(unittest.TestCase): assert label.shape() == [BATCH_SIZE, 1] assert image._place()._equals(places[i]) assert label._place()._equals(places[i]) - L, = exe.run(program=prog, - feed=d, - fetch_list=[loss], - use_program_cache=True) + (L,) = exe.run( + program=prog, + feed=d, + fetch_list=[loss], + use_program_cache=True, + ) loss_list.append(np.mean(L)) step += 1 step_list.append(step) @@ -153,7 +164,7 @@ class TestStaticDataLoader(unittest.TestCase): ret = { "time": end_t - start_t, "step": step_list, - "loss": np.array(loss_list) + "loss": np.array(loss_list), } print("time cost", ret['time'], 'step_list', ret['step']) return ret @@ -163,35 +174,43 @@ class TestStaticDataLoader(unittest.TestCase): for persistent_workers in [True, False]: results = [] for num_workers in [0, 2]: - print(self.__class__.__name__, p, num_workers, - persistent_workers) + print( + self.__class__.__name__, + p, + num_workers, + persistent_workers, + ) sys.stdout.flush() - ret = self.run_main(num_workers=num_workers, - places=p, - persistent_workers=persistent_workers) + ret = self.run_main( + num_workers=num_workers, + places=p, + persistent_workers=persistent_workers, + ) results.append(ret) diff = np.max( - np.abs(results[0]['loss'] - results[1]['loss']) / - np.abs(results[0]['loss'])) + np.abs(results[0]['loss'] - results[1]['loss']) + / np.abs(results[0]['loss']) + ) self.assertLess(diff, 1e-2) class TestStaticDataLoaderReturnList(unittest.TestCase): - def run_single_place(self, num_workers): scope = fluid.Scope() - image = fluid.data(name='image', - shape=[None, IMAGE_SIZE], - dtype='float32') + image = fluid.data( + name='image', shape=[None, IMAGE_SIZE], dtype='float32' + ) label = fluid.data(name='label', shape=[None, 1], dtype='int64') with fluid.scope_guard(scope): dataset = RandomDataset(SAMPLE_NUM, CLASS_NUM) - dataloader = DataLoader(dataset, - feed_list=[image, label], - num_workers=num_workers, - batch_size=BATCH_SIZE, - drop_last=True, - return_list=True) + dataloader = DataLoader( + dataset, + feed_list=[image, label], + num_workers=num_workers, + batch_size=BATCH_SIZE, + drop_last=True, + return_list=True, + ) for d in dataloader: assert isinstance(d, list) @@ -201,19 +220,21 @@ class TestStaticDataLoaderReturnList(unittest.TestCase): def run_multi_place(self, num_workers): scope = fluid.Scope() - image = fluid.data(name='image', - shape=[None, IMAGE_SIZE], - dtype='float32') + image = fluid.data( + name='image', shape=[None, IMAGE_SIZE], dtype='float32' + ) label = fluid.data(name='label', shape=[None, 1], dtype='int64') with fluid.scope_guard(scope): dataset = RandomDataset(SAMPLE_NUM, CLASS_NUM) - dataloader = DataLoader(dataset, - feed_list=[image, label], - num_workers=num_workers, - batch_size=BATCH_SIZE, - places=[fluid.CPUPlace()] * 2, - drop_last=True, - return_list=True) + dataloader = DataLoader( + dataset, + feed_list=[image, label], + num_workers=num_workers, + batch_size=BATCH_SIZE, + places=[fluid.CPUPlace()] * 2, + drop_last=True, + return_list=True, + ) for d in dataloader: assert isinstance(d, list) @@ -229,7 +250,6 @@ class TestStaticDataLoaderReturnList(unittest.TestCase): class RandomBatchedDataset(Dataset): - def __init__(self, sample_num, class_num): self.sample_num = int(sample_num / BATCH_SIZE) self.class_num = class_num @@ -240,8 +260,9 @@ class RandomBatchedDataset(Dataset): labels = [] for _ in range(BATCH_SIZE): image = np.random.random([IMAGE_SIZE]).astype('float32') - label = np.random.randint(0, self.class_num - 1, - (1, )).astype('int64') + label = np.random.randint(0, self.class_num - 1, (1,)).astype( + 'int64' + ) images.append(image) labels.append(label) return np.stack(images, axis=0), np.stack(labels, axis=0) @@ -251,21 +272,22 @@ class RandomBatchedDataset(Dataset): class TestStaticDataLoaderWithBatchedDataset(TestStaticDataLoader): - def run_main(self, num_workers, places, persistent_workers): scope = fluid.Scope() with fluid.scope_guard(scope): startup_prog, main_prog, image, label, loss = simple_fc_net_static() dataset = RandomBatchedDataset(SAMPLE_NUM, CLASS_NUM) - dataloader = DataLoader(dataset, - feed_list=[image, label], - places=places, - num_workers=num_workers, - batch_size=None, - return_list=False, - drop_last=True, - persistent_workers=persistent_workers) + dataloader = DataLoader( + dataset, + feed_list=[image, label], + places=places, + num_workers=num_workers, + batch_size=None, + return_list=False, + drop_last=True, + persistent_workers=persistent_workers, + ) assert len(dataloader) == int(SAMPLE_NUM / BATCH_SIZE) exe = fluid.Executor(place=places[0]) @@ -273,8 +295,9 @@ class TestStaticDataLoaderWithBatchedDataset(TestStaticDataLoader): prog = fluid.CompiledProgram(main_prog) if len(places) > 1: - prog = prog.with_data_parallel(loss_name=loss.name, - places=places) + prog = prog.with_data_parallel( + loss_name=loss.name, places=places + ) step_list = [] loss_list = [] @@ -283,7 +306,8 @@ class TestStaticDataLoaderWithBatchedDataset(TestStaticDataLoader): step = 0 for d in dataloader: assert len(d) == len(places), "{} != {}".format( - len(d), len(places)) + len(d), len(places) + ) for i, item in enumerate(d): image = item['image'] label = item['label'] @@ -291,10 +315,12 @@ class TestStaticDataLoaderWithBatchedDataset(TestStaticDataLoader): assert label.shape() == [BATCH_SIZE, 1] assert image._place()._equals(places[i]) assert label._place()._equals(places[i]) - L, = exe.run(program=prog, - feed=d, - fetch_list=[loss], - use_program_cache=True) + (L,) = exe.run( + program=prog, + feed=d, + fetch_list=[loss], + use_program_cache=True, + ) loss_list.append(np.mean(L)) step += 1 step_list.append(step) @@ -303,7 +329,7 @@ class TestStaticDataLoaderWithBatchedDataset(TestStaticDataLoader): ret = { "time": end_t - start_t, "step": step_list, - "loss": np.array(loss_list) + "loss": np.array(loss_list), } print("time cost", ret['time'], 'step_list', ret['step']) return ret diff --git a/python/paddle/fluid/tests/unittests/test_multiprocess_reader_exception.py b/python/paddle/fluid/tests/unittests/test_multiprocess_reader_exception.py index e1033ae881e81cbbb2f7b118f8d5a1707189defc..3eae95ef34b0572473d10bc3d7b94da9dfc9c890 100644 --- a/python/paddle/fluid/tests/unittests/test_multiprocess_reader_exception.py +++ b/python/paddle/fluid/tests/unittests/test_multiprocess_reader_exception.py @@ -24,7 +24,6 @@ class ReaderException(Exception): class TestMultiprocessReaderExceptionWithQueueSuccess(unittest.TestCase): - def setUp(self): self.use_pipe = False self.raise_exception = False @@ -40,12 +39,12 @@ class TestMultiprocessReaderExceptionWithQueueSuccess(unittest.TestCase): batch_size = 4 def fake_reader(): - def __impl__(): for _ in range(sample_num): if not self.raise_exception: - yield list(np.random.uniform(low=-1, high=1, - size=[10])), + yield list( + np.random.uniform(low=-1, high=1, size=[10]) + ), else: raise ValueError() @@ -53,23 +52,28 @@ class TestMultiprocessReaderExceptionWithQueueSuccess(unittest.TestCase): with fluid.program_guard(fluid.Program(), fluid.Program()): image = fluid.data(name='image', dtype='float32', shape=[None, 10]) - reader = fluid.io.DataLoader.from_generator(feed_list=[image], - capacity=2, - iterable=iterable) + reader = fluid.io.DataLoader.from_generator( + feed_list=[image], capacity=2, iterable=iterable + ) image_p_1 = image + 1 decorated_reader = multiprocess_reader( - [fake_reader(), fake_reader()], use_pipe=self.use_pipe) + [fake_reader(), fake_reader()], use_pipe=self.use_pipe + ) if isinstance(place, fluid.CUDAPlace): - reader.set_sample_generator(decorated_reader, - batch_size=batch_size, - places=fluid.cuda_places(0)) + reader.set_sample_generator( + decorated_reader, + batch_size=batch_size, + places=fluid.cuda_places(0), + ) else: - reader.set_sample_generator(decorated_reader, - batch_size=batch_size, - places=fluid.cpu_places(1)) + reader.set_sample_generator( + decorated_reader, + batch_size=batch_size, + places=fluid.cpu_places(1), + ) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) @@ -117,24 +121,24 @@ class TestMultiprocessReaderExceptionWithQueueSuccess(unittest.TestCase): class TestMultiprocessReaderExceptionWithQueueFailed( - TestMultiprocessReaderExceptionWithQueueSuccess): - + TestMultiprocessReaderExceptionWithQueueSuccess +): def setUp(self): self.use_pipe = False self.raise_exception = True class TestMultiprocessReaderExceptionWithPipeSuccess( - TestMultiprocessReaderExceptionWithQueueSuccess): - + TestMultiprocessReaderExceptionWithQueueSuccess +): def setUp(self): self.use_pipe = True self.raise_exception = False class TestMultiprocessReaderExceptionWithPipeFailed( - TestMultiprocessReaderExceptionWithQueueSuccess): - + TestMultiprocessReaderExceptionWithQueueSuccess +): def setUp(self): self.use_pipe = True self.raise_exception = True diff --git a/python/paddle/fluid/tests/unittests/test_mv_op.py b/python/paddle/fluid/tests/unittests/test_mv_op.py index fbfd18a4778017f0cf293e7d479898dcc8e850ed..d2381a21e99fd5eb5d3a1c62b70384a4dc703ec2 100644 --- a/python/paddle/fluid/tests/unittests/test_mv_op.py +++ b/python/paddle/fluid/tests/unittests/test_mv_op.py @@ -1,4 +1,4 @@ -#Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -20,7 +20,6 @@ from op_test import OpTest class TestMVOp(OpTest): - def setUp(self): self.op_type = "mv" self.python_api = paddle.mv @@ -40,7 +39,6 @@ class TestMVOp(OpTest): class TestMVAPI(unittest.TestCase): - def test_dygraph_api_out(self): paddle.disable_static() @@ -68,12 +66,12 @@ class TestMVAPI(unittest.TestCase): self.input_vec = np.random.rand(100).astype("float64") with program_guard(train_program, startup_program): - data_x = paddle.static.data("x", - shape=[5, 100], - dtype="float64") - data_vec = paddle.static.data("vec", - shape=[100], - dtype="float64") + data_x = paddle.static.data( + "x", shape=[5, 100], dtype="float64" + ) + data_vec = paddle.static.data( + "vec", shape=[100], dtype="float64" + ) data_x.stop_gradient = x_stop_gradient data_vec.stop_gradient = vec_stop_gradient @@ -82,19 +80,16 @@ class TestMVAPI(unittest.TestCase): self.place = paddle.CPUPlace() exe = paddle.static.Executor(self.place) - res, = exe.run(feed={ - "x": self.input_x, - "vec": self.input_vec - }, - fetch_list=[result_vec]) + (res,) = exe.run( + feed={"x": self.input_x, "vec": self.input_vec}, + fetch_list=[result_vec], + ) z_expected = np.array(np.dot(self.input_x, self.input_vec)) np.testing.assert_allclose(res, z_expected, rtol=1e-05) class TestMVError(unittest.TestCase): - def test_input(self): - def test_shape(): paddle.enable_static() @@ -102,9 +97,9 @@ class TestMVError(unittest.TestCase): self.input_vec = np.random.rand(100).astype("float64") data_x = paddle.static.data("x", shape=[5, 100], dtype="float64") - data_vec = paddle.static.data("vec", - shape=[100, 2], - dtype="float64") + data_vec = paddle.static.data( + "vec", shape=[100, 2], dtype="float64" + ) result_vec = paddle.mv(data_x, data_vec) self.assertRaises(ValueError, test_shape) diff --git a/python/paddle/fluid/tests/unittests/test_naive_best_fit_gpu_memory_limit.py b/python/paddle/fluid/tests/unittests/test_naive_best_fit_gpu_memory_limit.py index 6994bf30523bec519959ce5b9d43ac1ed621cbfb..c6846d7068e0c6a91587043251b58343aa833ecf 100644 --- a/python/paddle/fluid/tests/unittests/test_naive_best_fit_gpu_memory_limit.py +++ b/python/paddle/fluid/tests/unittests/test_naive_best_fit_gpu_memory_limit.py @@ -23,7 +23,6 @@ if fluid.is_compiled_with_cuda(): class TestBase(unittest.TestCase): - def setUp(self): if fluid.is_compiled_with_cuda(): self._limit = fluid.core.globals()['FLAGS_gpu_memory_limit_mb'] @@ -36,8 +35,10 @@ class TestBase(unittest.TestCase): place = fluid.CUDAPlace(0) t = fluid.LoDTensor() - t.set(np.ndarray([int(self._limit / 2), other_dim], dtype='float32'), - place) + t.set( + np.ndarray([int(self._limit / 2), other_dim], dtype='float32'), + place, + ) del t t = fluid.LoDTensor() diff --git a/python/paddle/fluid/tests/unittests/test_name_scope.py b/python/paddle/fluid/tests/unittests/test_name_scope.py index a0e37384eb2f07dc8f7daf1430763a015bfa3f18..475ebec9ecee2b89f866f3c7a73519fa9ba4f43f 100644 --- a/python/paddle/fluid/tests/unittests/test_name_scope.py +++ b/python/paddle/fluid/tests/unittests/test_name_scope.py @@ -17,7 +17,6 @@ import paddle.fluid as fluid class TestNameScope(unittest.TestCase): - def test_name_scope(self): with fluid.name_scope("s1"): a = fluid.layers.data(name='data', shape=[1], dtype='int32') diff --git a/python/paddle/fluid/tests/unittests/test_nan_inf.py b/python/paddle/fluid/tests/unittests/test_nan_inf.py index 65a537981ad2551e7ea99e6ee1dbd8dd3d246247..f062adb6a7a43ea690625e36a99c161feddb14a9 100644 --- a/python/paddle/fluid/tests/unittests/test_nan_inf.py +++ b/python/paddle/fluid/tests/unittests/test_nan_inf.py @@ -22,7 +22,6 @@ paddle.enable_static() class TestNanInf(unittest.TestCase): - def setUp(self): self._python_interp = sys.executable if os.getenv('WITH_COVERAGE', 'OFF') == 'ON': @@ -33,10 +32,12 @@ class TestNanInf(unittest.TestCase): def check_nan_inf(self): cmd = self._python_interp - proc = subprocess.Popen(cmd.split(" "), - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - env=self.env) + proc = subprocess.Popen( + cmd.split(" "), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=self.env, + ) out, err = proc.communicate() returncode = proc.returncode @@ -49,7 +50,8 @@ class TestNanInf(unittest.TestCase): assert (out + err).find('find nan or inf==='.encode()) != -1 else: assert (out + err).find( - 'There are `nan` or `inf` in tensor'.encode()) != -1 + 'There are `nan` or `inf` in tensor'.encode() + ) != -1 def test_nan_inf_in_static_mode(self): self._python_interp += " check_nan_inf_base.py" @@ -61,7 +63,6 @@ class TestNanInf(unittest.TestCase): class TestNanInfEnv(TestNanInf): - def setUp(self): super(TestNanInfEnv, self).setUp() # windows python have some bug with env, so need use str to pass ci @@ -69,7 +70,8 @@ class TestNanInfEnv(TestNanInf): self.env[str("PADDLE_INF_NAN_SKIP_OP")] = str("mul") self.env[str("PADDLE_INF_NAN_SKIP_ROLE")] = str("loss") self.env[str("PADDLE_INF_NAN_SKIP_VAR")] = str( - "elementwise_add:fc_0.tmp_1") + "elementwise_add:fc_0.tmp_1" + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_nanmean_api.py b/python/paddle/fluid/tests/unittests/test_nanmean_api.py index 3155140518de5e62846d3cf02d151e066bd5de87..bcc9ac5c77a2c2003dea608d01f0a01ced47159c 100644 --- a/python/paddle/fluid/tests/unittests/test_nanmean_api.py +++ b/python/paddle/fluid/tests/unittests/test_nanmean_api.py @@ -27,10 +27,14 @@ class TestNanmeanAPI(unittest.TestCase): self.x_shape = [2, 3, 4, 5] self.x = np.random.uniform(-1, 1, self.x_shape).astype(np.float32) self.x[0, :, :, :] = np.nan - self.x_grad = np.array([[np.nan, np.nan, 3.], [0., np.nan, - 2.]]).astype(np.float32) - self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \ + self.x_grad = np.array( + [[np.nan, np.nan, 3.0], [0.0, np.nan, 2.0]] + ).astype(np.float32) + self.place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_api_static(self): paddle.enable_static() @@ -43,8 +47,9 @@ class TestNanmeanAPI(unittest.TestCase): out4 = paddle.nanmean(x, axis) out5 = paddle.nanmean(x, tuple(axis)) exe = paddle.static.Executor(self.place) - res = exe.run(feed={'X': self.x}, - fetch_list=[out1, out2, out3, out4, out5]) + res = exe.run( + feed={'X': self.x}, fetch_list=[out1, out2, out3, out4, out5] + ) out_ref = np.nanmean(self.x) for out in res: np.testing.assert_allclose(out, out_ref, rtol=0.0001) @@ -100,9 +105,9 @@ class TestNanmeanAPI(unittest.TestCase): sum_dx_ref = np.prod(y.shape) if np.isnan(y.numpy()).sum(): sum_dx_ref -= np.isnan(y.numpy()).sum() - cnt = paddle.sum(~paddle.isnan(x_tensor), - axis=axis, - keepdim=keepdim) + cnt = paddle.sum( + ~paddle.isnan(x_tensor), axis=axis, keepdim=keepdim + ) if (cnt == 0).sum(): dx[np.isnan(dx)] = 0 sum_dx = dx.sum() diff --git a/python/paddle/fluid/tests/unittests/test_nanmedian.py b/python/paddle/fluid/tests/unittests/test_nanmedian.py index 76bea45881338f92e4dce64242586971b650fea1..952263ae94b8775f74cda9b6966b29b13e357616 100644 --- a/python/paddle/fluid/tests/unittests/test_nanmedian.py +++ b/python/paddle/fluid/tests/unittests/test_nanmedian.py @@ -21,20 +21,19 @@ np.random.seed(102) class TestNanmedian(unittest.TestCase): - def setUp(self): - single_axis_shape = (120) + single_axis_shape = 120 multi_axis_shape = (2, 3, 4, 5) self.fake_data = { - "single_axis_normal": - np.random.uniform(-1, 1, single_axis_shape).astype(np.float32), - "multi_axis_normal": - np.random.uniform(-1, 1, multi_axis_shape).astype(np.float32), - "single_axis_all_nan": - np.full(single_axis_shape, np.nan), - "multi_axis_all_nan": - np.full(multi_axis_shape, np.nan), + "single_axis_normal": np.random.uniform( + -1, 1, single_axis_shape + ).astype(np.float32), + "multi_axis_normal": np.random.uniform( + -1, 1, multi_axis_shape + ).astype(np.float32), + "single_axis_all_nan": np.full(single_axis_shape, np.nan), + "multi_axis_all_nan": np.full(multi_axis_shape, np.nan), } single_partial_nan = self.fake_data["single_axis_normal"].copy() @@ -59,11 +58,22 @@ class TestNanmedian(unittest.TestCase): col_data[:, :, 2, 3:] = np.nan self.fake_data["col_nan_odd"] = col_data - self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() else paddle.CPUPlace() + ) self.axis_candiate_list = [ - None, 0, 2, -1, -2, (1, 2), [0, -1], [0, 1, 3], (1, 2, 3), - [0, 2, 1, 3] + None, + 0, + 2, + -1, + -2, + (1, 2), + [0, -1], + [0, 1, 3], + (1, 2, 3), + [0, 2, 1, 3], ] def test_api_static(self): @@ -79,8 +89,9 @@ class TestNanmedian(unittest.TestCase): out4 = paddle.nanmedian(x, axis=axis, keepdim=True) out5 = paddle.nanmedian(x, axis=tuple(axis), keepdim=True) exe = paddle.static.Executor(self.place) - res = exe.run(feed={'X': data}, - fetch_list=[out1, out2, out3, out4, out5]) + res = exe.run( + feed={'X': data}, fetch_list=[out1, out2, out3, out4, out5] + ) for out in res: np.testing.assert_allclose(np_res, out, rtol=1e-05, equal_nan=True) @@ -109,23 +120,22 @@ class TestNanmedian(unittest.TestCase): continue np_res = np.nanmedian(data, keepdims=keep_dim) - pd_res = paddle.nanmedian(paddle.to_tensor(data), - keepdim=keep_dim) - np.testing.assert_allclose(np_res, - pd_res.numpy(), - rtol=1e-05, - equal_nan=True) + pd_res = paddle.nanmedian( + paddle.to_tensor(data), keepdim=keep_dim + ) + np.testing.assert_allclose( + np_res, pd_res.numpy(), rtol=1e-05, equal_nan=True + ) def test_axis_case(data, axis): - pd_res = paddle.nanmedian(paddle.to_tensor(data), - axis=axis, - keepdim=False) + pd_res = paddle.nanmedian( + paddle.to_tensor(data), axis=axis, keepdim=False + ) axis = clean_axis_numpy(axis, len(data.shape)) np_res = np.nanmedian(data, axis=axis, keepdims=False) - np.testing.assert_allclose(np_res, - pd_res.numpy(), - rtol=1e-05, - equal_nan=True) + np.testing.assert_allclose( + np_res, pd_res.numpy(), rtol=1e-05, equal_nan=True + ) for name, data in self.fake_data.items(): test_data_case(data) diff --git a/python/paddle/fluid/tests/unittests/test_nansum_api.py b/python/paddle/fluid/tests/unittests/test_nansum_api.py index c0fe5da0cbccb8d3ae6466ebfa8d7209773768ce..9abc0e4e4e7aa396ca087316e6914ed8ef5579a2 100644 --- a/python/paddle/fluid/tests/unittests/test_nansum_api.py +++ b/python/paddle/fluid/tests/unittests/test_nansum_api.py @@ -19,7 +19,6 @@ import paddle.fluid as fluid class API_Test_Nansum(unittest.TestCase): - def test_static_graph(self): paddle.enable_static() startup_program = fluid.Program() @@ -36,12 +35,14 @@ class API_Test_Nansum(unittest.TestCase): exe = fluid.Executor(place) exe.run(startup_program) - x = np.array([[float('nan'), 3, 5, 9], [1, 2, - float('-nan'), - 7]]).astype(np.float32) - res = exe.run(train_program, - feed={'input': x}, - fetch_list=[out1, out2, out3, out4]) + x = np.array( + [[float('nan'), 3, 5, 9], [1, 2, float('-nan'), 7]] + ).astype(np.float32) + res = exe.run( + train_program, + feed={'input': x}, + fetch_list=[out1, out2, out3, out4], + ) out1_np = np.array(res[0]) out2_np = np.array(res[1]) @@ -52,14 +53,22 @@ class API_Test_Nansum(unittest.TestCase): out3_ref = np.array([17, 10]).astype(np.float32) out4_ref = np.array([[17], [10]]).astype(np.float32) - self.assertTrue((out1_np == out1_ref).all(), - msg='nansum output is wrong, out =' + str(out1_np)) - self.assertTrue((out2_np == out2_ref).all(), - msg='nansum output is wrong, out =' + str(out2_np)) - self.assertTrue((out3_np == out3_ref).all(), - msg='nansum output is wrong, out =' + str(out3_np)) - self.assertTrue((out4_np == out4_ref).all(), - msg='nansum output is wrong, out =' + str(out4_np)) + self.assertTrue( + (out1_np == out1_ref).all(), + msg='nansum output is wrong, out =' + str(out1_np), + ) + self.assertTrue( + (out2_np == out2_ref).all(), + msg='nansum output is wrong, out =' + str(out2_np), + ) + self.assertTrue( + (out3_np == out3_ref).all(), + msg='nansum output is wrong, out =' + str(out3_np), + ) + self.assertTrue( + (out4_np == out4_ref).all(), + msg='nansum output is wrong, out =' + str(out4_np), + ) def test_error_api(self): paddle.enable_static() @@ -79,8 +88,9 @@ class API_Test_Nansum(unittest.TestCase): self.assertRaises(TypeError, run2) def test_dygraph(self): - x = np.array([[float('nan'), 3, 5, 9], [1, 2, float('-nan'), - 7]]).astype(np.float32) + x = np.array( + [[float('nan'), 3, 5, 9], [1, 2, float('-nan'), 7]] + ).astype(np.float32) with fluid.dygraph.guard(): inputs = fluid.dygraph.to_variable(x) out = paddle.nansum(inputs) @@ -88,7 +98,8 @@ class API_Test_Nansum(unittest.TestCase): self.assertTrue( (out.numpy() == out_ref).all(), - msg='nansum output is wrong, out =' + str(out.numpy())) + msg='nansum output is wrong, out =' + str(out.numpy()), + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_nce.py b/python/paddle/fluid/tests/unittests/test_nce.py index 484dbfabd9d46a5563df556c4a062b9806f11d1d..885d12c6fa2046c84c868efeac64f826f075748e 100644 --- a/python/paddle/fluid/tests/unittests/test_nce.py +++ b/python/paddle/fluid/tests/unittests/test_nce.py @@ -22,8 +22,9 @@ from paddle.fluid import Program, program_guard from op_test import OpTest -def nce(input, weight, bias, sample_weight, labels, num_classes, - num_sample_class): +def nce( + input, weight, bias, sample_weight, labels, num_classes, num_sample_class +): samples = [] sample_labels = [] batch_size = input.shape[0] @@ -54,23 +55,34 @@ def nce(input, weight, bias, sample_weight, labels, num_classes, o = sample_out[i] cost = -np.log(o / (o + b)) if samples[i][2] else -np.log(b / (o + b)) out[samples[i][0]] += cost * samples[i][3] - return (out[:, np.newaxis], - np.array(sample_out).reshape(batch_size, - num_sample_class + num_true_class), - np.array(sample_labels).reshape(batch_size, - num_sample_class + num_true_class)) + return ( + out[:, np.newaxis], + np.array(sample_out).reshape( + batch_size, num_sample_class + num_true_class + ), + np.array(sample_labels).reshape( + batch_size, num_sample_class + num_true_class + ), + ) class TestNCE(OpTest): - - def generate_data(self, dim, batch_size, num_classes, num_true_class, - num_neg_samples, is_sparse): + def generate_data( + self, + dim, + batch_size, + num_classes, + num_true_class, + num_neg_samples, + is_sparse, + ): input = np.random.randn(batch_size, dim).astype(np.float32) weight = np.random.randn(num_classes, dim).astype(np.float32) bias = np.random.randn(num_classes).astype(np.float32) sample_weight = np.random.randn(batch_size).astype(np.float32) - labels = np.random.randint(0, num_classes, - (batch_size, num_true_class)).astype("int64") + labels = np.random.randint( + 0, num_classes, (batch_size, num_true_class) + ).astype("int64") self.attrs = { 'num_total_classes': num_classes, 'num_neg_samples': num_neg_samples, @@ -78,14 +90,14 @@ class TestNCE(OpTest): 'seed': 0, 'sampler': 0, 'is_sparse': is_sparse, - 'is_test': self.is_test + 'is_test': self.is_test, } self.inputs = { 'Input': input, 'Label': labels, 'Weight': weight, 'Bias': bias, - 'SampleWeight': sample_weight + 'SampleWeight': sample_weight, } def set_is_test(self): @@ -95,17 +107,22 @@ class TestNCE(OpTest): self.generate_data(5, 25, 100, 1, 2, False) def compute(self): - out = nce(self.inputs['Input'], self.inputs['Weight'], - self.inputs['Bias'], self.inputs['SampleWeight'], - self.inputs['Label'], self.attrs['num_total_classes'], - self.attrs['num_neg_samples']) + out = nce( + self.inputs['Input'], + self.inputs['Weight'], + self.inputs['Bias'], + self.inputs['SampleWeight'], + self.inputs['Label'], + self.attrs['num_total_classes'], + self.attrs['num_neg_samples'], + ) if self.is_test: self.outputs = {'Cost': out[0]} else: self.outputs = { 'Cost': out[0], 'SampleLogits': out[1], - 'SampleLabels': out[2] + 'SampleLabels': out[2], } def setUp(self): @@ -118,13 +135,12 @@ class TestNCE(OpTest): self.check_output() def test_check_grad(self): - self.check_grad(["Input", "Weight", "Bias"], - "Cost", - max_relative_error=0.02) + self.check_grad( + ["Input", "Weight", "Bias"], "Cost", max_relative_error=0.02 + ) class TestNCECase1Tensor(TestNCE): - def set_data(self): self.generate_data(10, 20, 100, 2, 5, False) @@ -139,7 +155,6 @@ class TestNCETensorIsTest(TestNCE): class TestNCECase1SelectedRows(unittest.TestCase): - def setUp(self): self.base_lr = 0.0001 self.batch_size = 8 @@ -163,33 +178,51 @@ class TestNCECase1SelectedRows(unittest.TestCase): optimizer = fluid.optimizer.SGD(learning_rate=self.base_lr) return optimizer - def train_network(self, num_total_classes, num_neg_samples, sampler, - custom_dist, is_sparse): + def train_network( + self, + num_total_classes, + num_neg_samples, + sampler, + custom_dist, + is_sparse, + ): input = fluid.layers.data(name="input", shape=[10], dtype="float32") label = fluid.layers.data(name="label", shape=[1], dtype="int64") - w_param = fluid.default_main_program().global_block().create_parameter( - shape=[num_total_classes, 10], - dtype='float32', - name='nce_w', - initializer=initializer.ConstantInitializer()) - b_param = fluid.default_main_program().global_block().create_parameter( - shape=[num_total_classes, 1], - dtype='float32', - name='nce_b', - initializer=initializer.ConstantInitializer()) - - cost = fluid.layers.nce(input=input, - label=label, - num_total_classes=num_total_classes, - sampler=sampler, - custom_dist=custom_dist, - sample_weight=None, - param_attr='nce_w', - bias_attr='nce_b', - seed=1, - num_neg_samples=num_neg_samples, - is_sparse=is_sparse) + w_param = ( + fluid.default_main_program() + .global_block() + .create_parameter( + shape=[num_total_classes, 10], + dtype='float32', + name='nce_w', + initializer=initializer.ConstantInitializer(), + ) + ) + b_param = ( + fluid.default_main_program() + .global_block() + .create_parameter( + shape=[num_total_classes, 1], + dtype='float32', + name='nce_b', + initializer=initializer.ConstantInitializer(), + ) + ) + + cost = fluid.layers.nce( + input=input, + label=label, + num_total_classes=num_total_classes, + sampler=sampler, + custom_dist=custom_dist, + sample_weight=None, + param_attr='nce_w', + bias_attr='nce_b', + seed=1, + num_neg_samples=num_neg_samples, + is_sparse=is_sparse, + ) avg_cost = paddle.mean(cost) # optimizer optimizer = self.get_optimizer() @@ -210,15 +243,19 @@ class TestNCECase1SelectedRows(unittest.TestCase): dense_startup_program = fluid.framework.Program() dense_train_program = fluid.framework.Program() with fluid.scope_guard(dense_scope): - with fluid.program_guard(dense_train_program, - dense_startup_program): - cost, feeds = self.train_network(20, 5, "custom_dist", - nid_freq_arr.tolist(), False) + with fluid.program_guard( + dense_train_program, dense_startup_program + ): + cost, feeds = self.train_network( + 20, 5, "custom_dist", nid_freq_arr.tolist(), False + ) feeder = fluid.DataFeeder(feed_list=feeds, place=place) exe.run(dense_startup_program) - loss_val = exe.run(dense_train_program, - feed=feeder.feed(data), - fetch_list=[cost.name]) + loss_val = exe.run( + dense_train_program, + feed=feeder.feed(data), + fetch_list=[cost.name], + ) rets.append(np.mean(loss_val)) # for sparse @@ -226,106 +263,113 @@ class TestNCECase1SelectedRows(unittest.TestCase): sparse_startup_program = fluid.framework.Program() sparse_train_program = fluid.framework.Program() with fluid.scope_guard(sparse_scope): - with fluid.program_guard(sparse_train_program, - sparse_startup_program): - cost, feeds = self.train_network(20, 5, "custom_dist", - nid_freq_arr.tolist(), True) + with fluid.program_guard( + sparse_train_program, sparse_startup_program + ): + cost, feeds = self.train_network( + 20, 5, "custom_dist", nid_freq_arr.tolist(), True + ) feeder = fluid.DataFeeder(feed_list=feeds, place=place) exe.run(sparse_startup_program) - loss_val = exe.run(sparse_train_program, - feed=feeder.feed(data), - fetch_list=[cost.name]) + loss_val = exe.run( + sparse_train_program, + feed=feeder.feed(data), + fetch_list=[cost.name], + ) rets.append(np.mean(loss_val)) self.assertEqual(rets[0], rets[1]) class TestNCE_OpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): - input1 = fluid.create_lod_tensor(np.array([0.0, 3.0, 2.0, 4.0]), - [[1, 1, 2]], fluid.CPUPlace()) - label1 = fluid.layers.data(name='label1', - shape=[-1, 4], - dtype="int64") + input1 = fluid.create_lod_tensor( + np.array([0.0, 3.0, 2.0, 4.0]), [[1, 1, 2]], fluid.CPUPlace() + ) + label1 = fluid.layers.data( + name='label1', shape=[-1, 4], dtype="int64" + ) # the input(input) of nce layer must be Variable. self.assertRaises(TypeError, fluid.layers.nce, input1, label1, 5) - input2 = fluid.layers.data(name='input2', - shape=[-1, 4], - dtype="float32") - label2 = fluid.create_lod_tensor(np.array([0.0, 3.0, 2.0, 4.0]), - [[1, 1, 2]], fluid.CPUPlace()) + input2 = fluid.layers.data( + name='input2', shape=[-1, 4], dtype="float32" + ) + label2 = fluid.create_lod_tensor( + np.array([0.0, 3.0, 2.0, 4.0]), [[1, 1, 2]], fluid.CPUPlace() + ) # the input(label) of nce layer must be Variable. self.assertRaises(TypeError, fluid.layers.nce, input2, label2, 5) - input3 = fluid.layers.data(name='input3', - shape=[-1, 4], - dtype="float16") - label3 = fluid.layers.data(name='label3', - shape=[-1, 1], - dtype="int64") + input3 = fluid.layers.data( + name='input3', shape=[-1, 4], dtype="float16" + ) + label3 = fluid.layers.data( + name='label3', shape=[-1, 1], dtype="int64" + ) # the data type of input(input) must be float32 or float64. self.assertRaises(TypeError, fluid.layers.nce, input3, label3, 5) - input4 = fluid.layers.data(name='input4', - shape=[-1, 4], - dtype="float32") - label4 = fluid.layers.data(name='label4', - shape=[-1, 1], - dtype="int32") + input4 = fluid.layers.data( + name='input4', shape=[-1, 4], dtype="float32" + ) + label4 = fluid.layers.data( + name='label4', shape=[-1, 1], dtype="int32" + ) # the data type of input(label) must be int64. self.assertRaises(TypeError, fluid.layers.nce, input4, label4, 5) class TestDygraphNCE_OpError(unittest.TestCase): - def test_NCE_errors(self): with program_guard(Program(), Program()): nce = fluid.NCE(20, 5) - input1 = fluid.create_lod_tensor(np.array([0.0, 3.0, 2.0, 4.0]), - [[1, 1, 2]], fluid.CPUPlace()) - label1 = fluid.layers.data(name='label1', - shape=[-1, 4], - dtype="int64") + input1 = fluid.create_lod_tensor( + np.array([0.0, 3.0, 2.0, 4.0]), [[1, 1, 2]], fluid.CPUPlace() + ) + label1 = fluid.layers.data( + name='label1', shape=[-1, 4], dtype="int64" + ) # the input(input) of NCE layer must be Variable. self.assertRaises(TypeError, nce, input1, label1) - input2 = fluid.layers.data(name='input2', - shape=[-1, 4], - dtype="float32") - label2 = fluid.create_lod_tensor(np.array([0.0, 3.0, 2.0, 4.0]), - [[1, 1, 2]], fluid.CPUPlace()) + input2 = fluid.layers.data( + name='input2', shape=[-1, 4], dtype="float32" + ) + label2 = fluid.create_lod_tensor( + np.array([0.0, 3.0, 2.0, 4.0]), [[1, 1, 2]], fluid.CPUPlace() + ) # the input(label) of NCE layer must be Variable. self.assertRaises(TypeError, nce, input2, label2) - input3 = fluid.layers.data(name='input3', - shape=[-1, 4], - dtype="float16") - label3 = fluid.layers.data(name='label3', - shape=[-1, 1], - dtype="int64") + input3 = fluid.layers.data( + name='input3', shape=[-1, 4], dtype="float16" + ) + label3 = fluid.layers.data( + name='label3', shape=[-1, 1], dtype="int64" + ) # the data type of input(input) must be float32 or float64. self.assertRaises(TypeError, nce, input3, label3) - input4 = fluid.layers.data(name='input4', - shape=[-1, 4], - dtype="float32") - label4 = fluid.layers.data(name='label4', - shape=[-1, 1], - dtype="int32") + input4 = fluid.layers.data( + name='input4', shape=[-1, 4], dtype="float32" + ) + label4 = fluid.layers.data( + name='label4', shape=[-1, 1], dtype="int32" + ) # the data type of input(label) must be int64. self.assertRaises(TypeError, nce, input4, label4) - input5 = fluid.layers.data(name='input5', - shape=[-1, 4], - dtype="float32") - label5 = fluid.layers.data(name='label5', - shape=[-1, 1], - dtype="int64") + input5 = fluid.layers.data( + name='input5', shape=[-1, 4], dtype="float32" + ) + label5 = fluid.layers.data( + name='label5', shape=[-1, 1], dtype="int64" + ) sample_weight = fluid.create_lod_tensor( - np.array([0.0, 3.0, 2.0, 4.0]), [[1, 1, 2]], fluid.CPUPlace()) + np.array([0.0, 3.0, 2.0, 4.0]), [[1, 1, 2]], fluid.CPUPlace() + ) # the sample_weight of nce must be Variable or None. self.assertRaises(TypeError, nce, input5, label5, sample_weight) diff --git a/python/paddle/fluid/tests/unittests/test_nearest_interp_op.py b/python/paddle/fluid/tests/unittests/test_nearest_interp_op.py index 42b59e57653a77acd3f4b60e98fc4f1ee34ed8aa..f8a997e4fbab6b954d2ae070f9e4816162e5934f 100755 --- a/python/paddle/fluid/tests/unittests/test_nearest_interp_op.py +++ b/python/paddle/fluid/tests/unittests/test_nearest_interp_op.py @@ -19,13 +19,15 @@ import paddle.fluid.core as core import paddle.fluid as fluid -def nearest_neighbor_interp_np(X, - out_h, - out_w, - out_size=None, - actual_shape=None, - align_corners=True, - data_layout='NCHW'): +def nearest_neighbor_interp_np( + X, + out_h, + out_w, + out_size=None, + actual_shape=None, + align_corners=True, + data_layout='NCHW', +): """nearest neighbor interpolation implement in shape [N, C, H, W]""" if data_layout == "NHWC": X = np.transpose(X, (0, 3, 1, 2)) # NHWC => NCHW @@ -38,13 +40,13 @@ def nearest_neighbor_interp_np(X, n, c, in_h, in_w = X.shape ratio_h = ratio_w = 0.0 - if (out_h > 1): - if (align_corners): + if out_h > 1: + if align_corners: ratio_h = (in_h - 1.0) / (out_h - 1.0) else: ratio_h = 1.0 * in_h / out_h - if (out_w > 1): - if (align_corners): + if out_w > 1: + if align_corners: ratio_w = (in_w - 1.0) / (out_w - 1.0) else: ratio_w = 1.0 * in_w / out_w @@ -71,7 +73,6 @@ def nearest_neighbor_interp_np(X, class TestNearestInterpOp(OpTest): - def setUp(self): self.out_size = None self.actual_shape = None @@ -95,10 +96,15 @@ class TestNearestInterpOp(OpTest): out_h = self.out_h out_w = self.out_w - output_np = nearest_neighbor_interp_np(input_np, out_h, out_w, - self.out_size, self.actual_shape, - self.align_corners, - self.data_layout) + output_np = nearest_neighbor_interp_np( + input_np, + out_h, + out_w, + self.out_size, + self.actual_shape, + self.align_corners, + self.data_layout, + ) self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size @@ -112,7 +118,7 @@ class TestNearestInterpOp(OpTest): 'scale': self.scale, 'interp_method': self.interp_method, 'align_corners': self.align_corners, - 'data_layout': self.data_layout + 'data_layout': self.data_layout, } self.outputs = {'Out': output_np} @@ -120,136 +126,126 @@ class TestNearestInterpOp(OpTest): self.check_output(check_eager=self.check_eager) def test_check_grad(self): - self.check_grad(['X'], - 'Out', - in_place=True, - check_eager=self.check_eager) + self.check_grad( + ['X'], 'Out', in_place=True, check_eager=self.check_eager + ) def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [2, 3, 4, 5] self.out_h = 2 self.out_w = 2 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([3, 3]).astype("int32") self.align_corners = True class TestNearestNeighborInterpCase1(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [4, 1, 7, 8] self.out_h = 1 self.out_w = 1 - self.scale = 0. + self.scale = 0.0 self.align_corners = True class TestNearestNeighborInterpCase2(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 3, 9, 6] self.out_h = 12 self.out_w = 12 - self.scale = 0. + self.scale = 0.0 self.align_corners = True class TestNearestNeighborInterpCase3(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [1, 1, 32, 64] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.align_corners = True class TestNearestNeighborInterpCase4(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [4, 1, 7, 8] self.out_h = 1 self.out_w = 1 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([2, 2]).astype("int32") self.align_corners = True class TestNearestNeighborInterpCase5(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 3, 9, 6] self.out_h = 12 self.out_w = 12 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([11, 11]).astype("int32") self.align_corners = True class TestNearestNeighborInterpCase6(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [1, 1, 32, 64] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([65, 129]).astype("int32") self.align_corners = True class TestNearestNeighborInterpSame(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [2, 3, 32, 64] self.out_h = 32 self.out_w = 64 - self.scale = 0. + self.scale = 0.0 self.align_corners = True class TestNearestNeighborInterpActualShape(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 2, 32, 16] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([66, 40]).astype("int32") self.align_corners = True class TestNearestNeighborInterpDataLayout(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [2, 4, 4, 5] self.out_h = 2 self.out_w = 2 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([3, 8]).astype("int32") self.align_corners = True self.data_layout = "NHWC" class TestNearestInterpOpUint8(OpTest): - def setUp(self): self.out_size = None self.actual_shape = None self.init_test_case() self.op_type = "nearest_interp" self.check_eager = True - input_np = np.random.randint(low=0, high=256, - size=self.input_shape).astype("uint8") + input_np = np.random.randint( + low=0, high=256, size=self.input_shape + ).astype("uint8") if self.scale > 0: out_h = int(self.input_shape[2] * self.scale) @@ -258,9 +254,14 @@ class TestNearestInterpOpUint8(OpTest): out_h = self.out_h out_w = self.out_w - output_np = nearest_neighbor_interp_np(input_np, out_h, out_w, - self.out_size, self.actual_shape, - self.align_corners) + output_np = nearest_neighbor_interp_np( + input_np, + out_h, + out_w, + self.out_size, + self.actual_shape, + self.align_corners, + ) self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size @@ -270,67 +271,62 @@ class TestNearestInterpOpUint8(OpTest): 'out_w': self.out_w, 'scale': self.scale, 'interp_method': self.interp_method, - 'align_corners': self.align_corners + 'align_corners': self.align_corners, } self.outputs = {'Out': output_np} def test_check_output(self): - self.check_output_with_place(place=core.CPUPlace(), - atol=1, - check_eager=self.check_eager) + self.check_output_with_place( + place=core.CPUPlace(), atol=1, check_eager=self.check_eager + ) def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [1, 3, 9, 6] self.out_h = 10 self.out_w = 9 - self.scale = 0. + self.scale = 0.0 self.align_corners = True class TestNearestNeighborInterpCase1Uint8(TestNearestInterpOpUint8): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [2, 3, 32, 64] self.out_h = 80 self.out_w = 40 - self.scale = 0. + self.scale = 0.0 self.align_corners = True class TestNearestNeighborInterpCase2Uint8(TestNearestInterpOpUint8): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [4, 1, 7, 8] self.out_h = 5 self.out_w = 13 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([6, 15]).astype("int32") self.align_corners = True class TestNearestInterpWithoutCorners(TestNearestInterpOp): - def set_align_corners(self): self.align_corners = False class TestNearestNeighborInterpScale1(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 2, 7, 5] self.out_h = 64 self.out_w = 32 - self.scale = 2. + self.scale = 2.0 self.out_size = np.array([66, 40]).astype("int32") self.align_corners = True class TestNearestNeighborInterpScale2(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 2, 5, 7] @@ -342,19 +338,17 @@ class TestNearestNeighborInterpScale2(TestNearestInterpOp): class TestNearestNeighborInterpScale3(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 2, 7, 5] self.out_h = 64 self.out_w = 32 - self.scale = 1. + self.scale = 1.0 self.out_size = np.array([66, 40]).astype("int32") self.align_corners = True class TestNearestInterpOp_attr_tensor(OpTest): - def setUp(self): self.out_size = None self.actual_shape = None @@ -389,59 +383,62 @@ class TestNearestInterpOp_attr_tensor(OpTest): elif self.out_size is not None: size_tensor = [] for index, ele in enumerate(self.out_size): - size_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + size_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs['SizeTensor'] = size_tensor self.check_eager = False self.attrs['out_h'] = self.out_h self.attrs['out_w'] = self.out_w - output_np = nearest_neighbor_interp_np(input_np, out_h, out_w, - self.out_size, self.actual_shape, - self.align_corners) + output_np = nearest_neighbor_interp_np( + input_np, + out_h, + out_w, + self.out_size, + self.actual_shape, + self.align_corners, + ) self.outputs = {'Out': output_np} def test_check_output(self): self.check_output(check_eager=self.check_eager) def test_check_grad(self): - self.check_grad(['X'], - 'Out', - in_place=True, - check_eager=self.check_eager) + self.check_grad( + ['X'], 'Out', in_place=True, check_eager=self.check_eager + ) def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [2, 5, 4, 4] self.out_h = 3 self.out_w = 3 - self.scale = 0. + self.scale = 0.0 self.out_size = [3, 3] self.align_corners = True # out_size is a tensor list class TestNearestInterp_attr_tensor_Case1(TestNearestInterpOp_attr_tensor): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 3, 9, 6] self.out_h = 12 self.out_w = 12 - self.scale = 0. + self.scale = 0.0 self.out_size = [8, 12] self.align_corners = True # out_size is a 1-D tensor class TestNearestInterp_attr_tensor_Case2(TestNearestInterpOp_attr_tensor): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 2, 32, 16] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([66, 40]).astype("int32") self.align_corners = True self.shape_by_1Dtensor = True @@ -449,7 +446,6 @@ class TestNearestInterp_attr_tensor_Case2(TestNearestInterpOp_attr_tensor): # scale is a 1-D tensor class TestNearestInterp_attr_tensor_Case3(TestNearestInterpOp_attr_tensor): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 2, 32, 16] @@ -462,7 +458,6 @@ class TestNearestInterp_attr_tensor_Case3(TestNearestInterpOp_attr_tensor): class TestNearestAPI(unittest.TestCase): - def test_case(self): x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") y = fluid.data(name="y", shape=[2, 6, 6, 3], dtype="float32") @@ -470,18 +465,18 @@ class TestNearestAPI(unittest.TestCase): dim = fluid.data(name="dim", shape=[1], dtype="int32") shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32") actual_size = fluid.data(name="actual_size", shape=[2], dtype="int32") - scale_tensor = fluid.data(name="scale_tensor", - shape=[1], - dtype="float32") + scale_tensor = fluid.data( + name="scale_tensor", shape=[1], dtype="float32" + ) - out1 = fluid.layers.resize_nearest(y, - out_shape=[12, 12], - data_format='NHWC') + out1 = fluid.layers.resize_nearest( + y, out_shape=[12, 12], data_format='NHWC' + ) out2 = fluid.layers.resize_nearest(x, out_shape=[12, dim]) out3 = fluid.layers.resize_nearest(x, out_shape=shape_tensor) - out4 = fluid.layers.resize_nearest(x, - out_shape=[4, 4], - actual_shape=actual_size) + out4 = fluid.layers.resize_nearest( + x, out_shape=[4, 4], actual_shape=actual_size + ) out5 = fluid.layers.resize_nearest(x, scale=scale_tensor) x_data = np.random.random((2, 3, 6, 6)).astype("float32") @@ -496,39 +491,39 @@ class TestNearestAPI(unittest.TestCase): place = core.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - results = exe.run(fluid.default_main_program(), - feed={ - "x": x_data, - "y": np.transpose(x_data, (0, 2, 3, 1)), - "dim": dim_data, - "shape_tensor": shape_data, - "actual_size": actual_size_data, - "scale_tensor": scale_data - }, - fetch_list=[out1, out2, out3, out4, out5], - return_numpy=True) - - expect_res = nearest_neighbor_interp_np(x_data, - out_h=12, - out_w=12, - align_corners=True) - np.testing.assert_allclose(results[0], - np.transpose(expect_res, (0, 2, 3, 1)), - rtol=1e-05) + results = exe.run( + fluid.default_main_program(), + feed={ + "x": x_data, + "y": np.transpose(x_data, (0, 2, 3, 1)), + "dim": dim_data, + "shape_tensor": shape_data, + "actual_size": actual_size_data, + "scale_tensor": scale_data, + }, + fetch_list=[out1, out2, out3, out4, out5], + return_numpy=True, + ) + + expect_res = nearest_neighbor_interp_np( + x_data, out_h=12, out_w=12, align_corners=True + ) + np.testing.assert_allclose( + results[0], np.transpose(expect_res, (0, 2, 3, 1)), rtol=1e-05 + ) for i in range(len(results) - 1): np.testing.assert_allclose(results[i + 1], expect_res, rtol=1e-05) class TestNearestInterpException(unittest.TestCase): - def test_exception(self): input = fluid.data(name="input", shape=[1, 3, 6, 6], dtype="float32") def attr_data_format(): # for 4-D input, data_format can only be NCHW or NHWC - out = fluid.layers.resize_nearest(input, - out_shape=[4, 8], - data_format='NDHWC') + out = fluid.layers.resize_nearest( + input, out_shape=[4, 8], data_format='NDHWC' + ) def attr_scale_type(): out = fluid.layers.resize_nearest(input, scale='scale') @@ -543,5 +538,6 @@ class TestNearestInterpException(unittest.TestCase): if __name__ == "__main__": import paddle + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_nearest_interp_v2_op.py b/python/paddle/fluid/tests/unittests/test_nearest_interp_v2_op.py index c3157a6e9a155f2f49db8bb4535b7fa30aa61f9e..fb1e1b08ceab99e3c46895561d7b07576500085a 100755 --- a/python/paddle/fluid/tests/unittests/test_nearest_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_nearest_interp_v2_op.py @@ -23,18 +23,20 @@ from paddle.nn.functional import interpolate paddle.enable_static() -def nearest_interp_test(x, - OutSize=None, - SizeTensor=None, - Scale=None, - data_layout='NCHW', - out_d=-1, - out_h=-1, - out_w=-1, - scale=[], - interp_method='nearest', - align_corners=True, - align_mode=0): +def nearest_interp_test( + x, + OutSize=None, + SizeTensor=None, + Scale=None, + data_layout='NCHW', + out_d=-1, + out_h=-1, + out_w=-1, + scale=[], + interp_method='nearest', + align_corners=True, + align_mode=0, +): if isinstance(scale, float) or isinstance(scale, int): scale_list = [] for _ in range(len(x.shape) - 2): @@ -44,23 +46,36 @@ def nearest_interp_test(x, scale = list(map(float, scale)) if SizeTensor is not None: if not isinstance(SizeTensor, list) and not isinstance( - SizeTensor, tuple): + SizeTensor, tuple + ): SizeTensor = [SizeTensor] - return paddle._C_ops.nearest_interp(x, OutSize, SizeTensor, Scale, - data_layout, out_d, out_h, out_w, scale, - interp_method, align_corners, - align_mode) - - -def nearest_neighbor_interp_np(X, - out_h, - out_w, - scale_h=0, - scale_w=0, - out_size=None, - actual_shape=None, - align_corners=True, - data_layout='NCHW'): + return paddle._C_ops.nearest_interp( + x, + OutSize, + SizeTensor, + Scale, + data_layout, + out_d, + out_h, + out_w, + scale, + interp_method, + align_corners, + align_mode, + ) + + +def nearest_neighbor_interp_np( + X, + out_h, + out_w, + scale_h=0, + scale_w=0, + out_size=None, + actual_shape=None, + align_corners=True, + data_layout='NCHW', +): """nearest neighbor interpolation implement in shape [N, C, H, W]""" if data_layout == "NHWC": X = np.transpose(X, (0, 3, 1, 2)) # NHWC => NCHW @@ -73,16 +88,16 @@ def nearest_neighbor_interp_np(X, n, c, in_h, in_w = X.shape ratio_h = ratio_w = 0.0 - if (out_h > 1): - if (align_corners): + if out_h > 1: + if align_corners: ratio_h = (in_h - 1.0) / (out_h - 1.0) else: if scale_h > 0: ratio_h = 1.0 / scale_h else: ratio_h = 1.0 * in_h / out_h - if (out_w > 1): - if (align_corners): + if out_w > 1: + if align_corners: ratio_w = (in_w - 1.0) / (out_w - 1.0) else: if scale_w > 0: @@ -110,17 +125,19 @@ def nearest_neighbor_interp_np(X, return out.astype(X.dtype) -def nearest_neighbor_interp3d_np(X, - out_d, - out_h, - out_w, - scale_d=0, - scale_h=0, - scale_w=0, - out_size=None, - actual_shape=None, - align_corners=True, - data_layout='NCHW'): +def nearest_neighbor_interp3d_np( + X, + out_d, + out_h, + out_w, + scale_d=0, + scale_h=0, + scale_w=0, + out_size=None, + actual_shape=None, + align_corners=True, + data_layout='NCHW', +): """nearest neighbor interpolation implement in shape [N, C, H, W]""" if data_layout == "NHWC": X = np.transpose(X, (0, 4, 1, 2, 3)) # NDHWC => NCDHW @@ -135,24 +152,24 @@ def nearest_neighbor_interp3d_np(X, n, c, in_d, in_h, in_w = X.shape ratio_d = ratio_h = ratio_w = 0.0 - if (out_d > 1): - if (align_corners): + if out_d > 1: + if align_corners: ratio_d = (in_d - 1.0) / (out_d - 1.0) else: if scale_d > 0: ratio_d = 1.0 / scale_d else: ratio_d = 1.0 * in_d / out_d - if (out_h > 1): - if (align_corners): + if out_h > 1: + if align_corners: ratio_h = (in_h - 1.0) / (out_h - 1.0) else: if scale_h > 0: ratio_h = 1.0 / scale_h else: ratio_h = 1.0 * in_h / out_h - if (out_w > 1): - if (align_corners): + if out_w > 1: + if align_corners: ratio_w = (in_w - 1.0) / (out_w - 1.0) else: if scale_w > 0: @@ -184,7 +201,6 @@ def nearest_neighbor_interp3d_np(X, class TestNearestInterpOp(OpTest): - def setUp(self): self.python_api = nearest_interp_test self.out_size = None @@ -240,15 +256,30 @@ class TestNearestInterpOp(OpTest): if len(self.input_shape) == 4: output_np = nearest_neighbor_interp_np( - input_np, out_h, out_w, scale_h, scale_w, self.out_size, - self.actual_shape, self.align_corners, self.data_layout) + input_np, + out_h, + out_w, + scale_h, + scale_w, + self.out_size, + self.actual_shape, + self.align_corners, + self.data_layout, + ) elif len(self.input_shape) == 5: - output_np = nearest_neighbor_interp3d_np(input_np, out_d, out_h, - out_w, scale_d, scale_h, - scale_w, self.out_size, - self.actual_shape, - self.align_corners, - self.data_layout) + output_np = nearest_neighbor_interp3d_np( + input_np, + out_d, + out_h, + out_w, + scale_d, + scale_h, + scale_w, + self.out_size, + self.actual_shape, + self.align_corners, + self.data_layout, + ) self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size @@ -261,7 +292,7 @@ class TestNearestInterpOp(OpTest): 'out_w': self.out_w, 'interp_method': self.interp_method, 'align_corners': self.align_corners, - 'data_layout': self.data_layout + 'data_layout': self.data_layout, } else: self.attrs = { @@ -269,7 +300,7 @@ class TestNearestInterpOp(OpTest): 'out_w': self.out_w, 'interp_method': self.interp_method, 'align_corners': self.align_corners, - 'data_layout': self.data_layout + 'data_layout': self.data_layout, } if self.scale: if isinstance(self.scale, float) or isinstance(self.scale, int): @@ -297,7 +328,6 @@ class TestNearestInterpOp(OpTest): class TestNearestNeighborInterpCase1(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [4, 1, 1, 7, 8] @@ -309,7 +339,6 @@ class TestNearestNeighborInterpCase1(TestNearestInterpOp): class TestNearestNeighborInterpCase2(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 3, 9, 6] @@ -320,7 +349,6 @@ class TestNearestNeighborInterpCase2(TestNearestInterpOp): class TestNearestNeighborInterpCase3(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [1, 1, 32, 64] @@ -331,7 +359,6 @@ class TestNearestNeighborInterpCase3(TestNearestInterpOp): class TestNearestNeighborInterpCase4(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [4, 1, 7, 8] @@ -343,7 +370,6 @@ class TestNearestNeighborInterpCase4(TestNearestInterpOp): class TestNearestNeighborInterpCase5(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 3, 9, 6] @@ -355,7 +381,6 @@ class TestNearestNeighborInterpCase5(TestNearestInterpOp): class TestNearestNeighborInterpCase6(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [1, 1, 32, 64] @@ -367,7 +392,6 @@ class TestNearestNeighborInterpCase6(TestNearestInterpOp): class TestNearestNeighborInterpSame(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [2, 3, 32, 64] @@ -378,7 +402,6 @@ class TestNearestNeighborInterpSame(TestNearestInterpOp): class TestNearestNeighborInterpActualShape(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 2, 32, 16] @@ -390,7 +413,6 @@ class TestNearestNeighborInterpActualShape(TestNearestInterpOp): class TestNearestNeighborInterpDataLayout(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [2, 4, 4, 5] @@ -403,15 +425,15 @@ class TestNearestNeighborInterpDataLayout(TestNearestInterpOp): class TestNearestInterpOpUint8(OpTest): - def setUp(self): self.python_api = nearest_interp_test self.out_size = None self.actual_shape = None self.init_test_case() self.op_type = "nearest_interp_v2" - input_np = np.random.randint(low=0, high=256, - size=self.input_shape).astype("uint8") + input_np = np.random.randint( + low=0, high=256, size=self.input_shape + ).astype("uint8") if self.scale: if isinstance(self.scale, float) or isinstance(self.scale, int): @@ -428,9 +450,16 @@ class TestNearestInterpOpUint8(OpTest): out_h = self.out_h out_w = self.out_w - output_np = nearest_neighbor_interp_np(input_np, out_h, out_w, 0, 0, - self.out_size, self.actual_shape, - self.align_corners) + output_np = nearest_neighbor_interp_np( + input_np, + out_h, + out_w, + 0, + 0, + self.out_size, + self.actual_shape, + self.align_corners, + ) self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size @@ -438,7 +467,7 @@ class TestNearestInterpOpUint8(OpTest): 'out_h': self.out_h, 'out_w': self.out_w, 'interp_method': self.interp_method, - 'align_corners': self.align_corners + 'align_corners': self.align_corners, } if self.scale: if isinstance(self.scale, float) or isinstance(self.scale, int): @@ -450,9 +479,9 @@ class TestNearestInterpOpUint8(OpTest): self.outputs = {'Out': output_np} def test_check_output(self): - self.check_output_with_place(place=core.CPUPlace(), - atol=1, - check_eager=True) + self.check_output_with_place( + place=core.CPUPlace(), atol=1, check_eager=True + ) def init_test_case(self): self.interp_method = 'nearest' @@ -464,7 +493,6 @@ class TestNearestInterpOpUint8(OpTest): class TestNearestNeighborInterpCase1Uint8(TestNearestInterpOpUint8): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [2, 3, 32, 64] @@ -475,7 +503,6 @@ class TestNearestNeighborInterpCase1Uint8(TestNearestInterpOpUint8): class TestNearestNeighborInterpCase2Uint8(TestNearestInterpOpUint8): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [4, 1, 7, 8] @@ -487,25 +514,22 @@ class TestNearestNeighborInterpCase2Uint8(TestNearestInterpOpUint8): class TestNearestInterpWithoutCorners(TestNearestInterpOp): - def set_align_corners(self): self.align_corners = False class TestNearestNeighborInterpScale1(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 2, 7, 5] self.out_h = 64 self.out_w = 32 - self.scale = 2. + self.scale = 2.0 self.out_size = np.array([66, 40]).astype("int32") self.align_corners = True class TestNearestNeighborInterpScale2(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 2, 5, 7] @@ -517,7 +541,6 @@ class TestNearestNeighborInterpScale2(TestNearestInterpOp): class TestNearestNeighborInterpScale3(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 2, 7, 5] @@ -529,7 +552,6 @@ class TestNearestNeighborInterpScale3(TestNearestInterpOp): class TestNearestNeighbor3DInterp(TestNearestInterpOp): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 2, 4, 7, 5] @@ -542,7 +564,6 @@ class TestNearestNeighbor3DInterp(TestNearestInterpOp): class TestNearestInterpOp_attr_tensor(OpTest): - def setUp(self): self.python_api = nearest_interp_test self.out_size = None @@ -581,8 +602,9 @@ class TestNearestInterpOp_attr_tensor(OpTest): elif self.out_size is not None: size_tensor = [] for index, ele in enumerate(self.out_size): - size_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + size_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs['SizeTensor'] = size_tensor self.attrs['out_h'] = self.out_h @@ -594,9 +616,16 @@ class TestNearestInterpOp_attr_tensor(OpTest): if isinstance(self.scale, list) and len(self.scale) == 1: self.scale = [self.scale[0], self.scale[0]] self.attrs['scale'] = self.scale - output_np = nearest_neighbor_interp_np(input_np, out_h, out_w, 0, 0, - self.out_size, self.actual_shape, - self.align_corners) + output_np = nearest_neighbor_interp_np( + input_np, + out_h, + out_w, + 0, + 0, + self.out_size, + self.actual_shape, + self.align_corners, + ) self.outputs = {'Out': output_np} def test_check_output(self): @@ -617,7 +646,6 @@ class TestNearestInterpOp_attr_tensor(OpTest): # out_size is a tensor list class TestNearestInterp_attr_tensor_Case1(TestNearestInterpOp_attr_tensor): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 3, 9, 6] @@ -630,7 +658,6 @@ class TestNearestInterp_attr_tensor_Case1(TestNearestInterpOp_attr_tensor): # out_size is a 1-D tensor class TestNearestInterp_attr_tensor_Case2(TestNearestInterpOp_attr_tensor): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 2, 32, 16] @@ -644,7 +671,6 @@ class TestNearestInterp_attr_tensor_Case2(TestNearestInterpOp_attr_tensor): # scale is a 1-D tensor class TestNearestInterp_attr_tensor_Case3(TestNearestInterpOp_attr_tensor): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [3, 2, 32, 16] @@ -657,7 +683,6 @@ class TestNearestInterp_attr_tensor_Case3(TestNearestInterpOp_attr_tensor): class TestNearestAPI(unittest.TestCase): - def test_case(self): x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") y = fluid.data(name="y", shape=[2, 6, 6, 3], dtype="float32") @@ -665,18 +690,18 @@ class TestNearestAPI(unittest.TestCase): dim = fluid.data(name="dim", shape=[1], dtype="int32") shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32") actual_size = fluid.data(name="actual_size", shape=[2], dtype="int32") - scale_tensor = fluid.data(name="scale_tensor", - shape=[1], - dtype="float32") + scale_tensor = fluid.data( + name="scale_tensor", shape=[1], dtype="float32" + ) - out1 = fluid.layers.resize_nearest(y, - out_shape=[12, 12], - data_format='NHWC') + out1 = fluid.layers.resize_nearest( + y, out_shape=[12, 12], data_format='NHWC' + ) out2 = fluid.layers.resize_nearest(x, out_shape=[12, dim]) out3 = fluid.layers.resize_nearest(x, out_shape=shape_tensor) - out4 = fluid.layers.resize_nearest(x, - out_shape=[4, 4], - actual_shape=actual_size) + out4 = fluid.layers.resize_nearest( + x, out_shape=[4, 4], actual_shape=actual_size + ) out5 = fluid.layers.resize_nearest(x, scale=scale_tensor) x_data = np.random.random((2, 3, 6, 6)).astype("float32") @@ -691,33 +716,34 @@ class TestNearestAPI(unittest.TestCase): place = core.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - results = exe.run(fluid.default_main_program(), - feed={ - "x": x_data, - "y": np.transpose(x_data, (0, 2, 3, 1)), - "dim": dim_data, - "shape_tensor": shape_data, - "actual_size": actual_size_data, - "scale_tensor": scale_data - }, - fetch_list=[out1, out2, out3, out4, out5], - return_numpy=True) - - expect_res = nearest_neighbor_interp_np(x_data, - out_h=12, - out_w=12, - align_corners=True) - np.testing.assert_allclose(results[0], - np.transpose(expect_res, (0, 2, 3, 1)), - rtol=1e-05) + results = exe.run( + fluid.default_main_program(), + feed={ + "x": x_data, + "y": np.transpose(x_data, (0, 2, 3, 1)), + "dim": dim_data, + "shape_tensor": shape_data, + "actual_size": actual_size_data, + "scale_tensor": scale_data, + }, + fetch_list=[out1, out2, out3, out4, out5], + return_numpy=True, + ) + + expect_res = nearest_neighbor_interp_np( + x_data, out_h=12, out_w=12, align_corners=True + ) + np.testing.assert_allclose( + results[0], np.transpose(expect_res, (0, 2, 3, 1)), rtol=1e-05 + ) for i in range(len(results) - 1): np.testing.assert_allclose(results[i + 1], expect_res, rtol=1e-05) class TestNearestInterpOpAPI_dy(unittest.TestCase): - def test_case(self): import paddle + if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) else: @@ -727,21 +753,22 @@ class TestNearestInterpOpAPI_dy(unittest.TestCase): scale_np = np.array([2, 2]).astype("int64") input_x = paddle.to_tensor(input_data) scale = paddle.to_tensor(scale_np) - expect_res = nearest_neighbor_interp_np(input_data, - out_h=12, - out_w=12, - align_corners=False) - out = interpolate(x=input_x, - scale_factor=scale, - mode="nearest", - align_corners=False) + expect_res = nearest_neighbor_interp_np( + input_data, out_h=12, out_w=12, align_corners=False + ) + out = interpolate( + x=input_x, + scale_factor=scale, + mode="nearest", + align_corners=False, + ) np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-05) class TestNearestInterp3DOpAPI_dy(unittest.TestCase): - def test_case(self): import paddle + if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) else: @@ -751,30 +778,30 @@ class TestNearestInterp3DOpAPI_dy(unittest.TestCase): scale_np = np.array([2, 2, 2]).astype("int64") input_x = paddle.to_tensor(input_data) scale = paddle.to_tensor(scale_np) - expect_res = nearest_neighbor_interp3d_np(input_data, - out_d=12, - out_h=12, - out_w=12, - align_corners=False) - out = interpolate(x=input_x, - scale_factor=scale, - mode="nearest", - align_corners=False, - data_format="NCDHW") + expect_res = nearest_neighbor_interp3d_np( + input_data, out_d=12, out_h=12, out_w=12, align_corners=False + ) + out = interpolate( + x=input_x, + scale_factor=scale, + mode="nearest", + align_corners=False, + data_format="NCDHW", + ) np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-05) class TestNearestInterpException(unittest.TestCase): - def test_exception(self): import paddle + input = fluid.data(name="input", shape=[1, 3, 6, 6], dtype="float32") def attr_data_format(): # for 4-D input, data_format can only be NCHW or NHWC - out = fluid.layers.resize_nearest(input, - out_shape=[4, 8], - data_format='NDHWC') + out = fluid.layers.resize_nearest( + input, out_shape=[4, 8], data_format='NDHWC' + ) def attr_scale_type(): out = fluid.layers.resize_nearest(input, scale='scale') @@ -788,9 +815,9 @@ class TestNearestInterpException(unittest.TestCase): def mode_error(): x = paddle.randn([1, 3]) - out = paddle.nn.functional.interpolate(x, - scale_factor='scale', - mode="BILINEAR") + out = paddle.nn.functional.interpolate( + x, scale_factor='scale', mode="BILINEAR" + ) self.assertRaises(ValueError, attr_data_format) self.assertRaises(TypeError, attr_scale_type) @@ -799,10 +826,10 @@ class TestNearestInterpException(unittest.TestCase): self.assertRaises(ValueError, mode_error) -@unittest.skipIf(not fluid.core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestNearestInterp3DOpForFloat16(unittest.TestCase): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [2, 2, 6, 6, 6] @@ -815,11 +842,13 @@ class TestNearestInterp3DOpForFloat16(unittest.TestCase): x_np = x_np.astype(dtype) x = paddle.to_tensor(x_np) x.stop_gradient = False - y = interpolate(x, - scale_factor=self.scale, - mode=self.interp_method, - align_corners=self.align_corners, - data_format=self.data_layout) + y = interpolate( + x, + scale_factor=self.scale, + mode=self.interp_method, + align_corners=self.align_corners, + data_format=self.data_layout, + ) x_g = paddle.grad(y, x) y_np = y[0].numpy().astype('float32') x_g_np = x_g[0].numpy().astype('float32') @@ -838,10 +867,10 @@ class TestNearestInterp3DOpForFloat16(unittest.TestCase): np.testing.assert_allclose(x_g_np_1, x_g_np_2) -@unittest.skipIf(not fluid.core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestNearestInterpOpForFloat16(unittest.TestCase): - def init_test_case(self): self.interp_method = 'nearest' self.input_shape = [2, 2, 6, 6] @@ -853,10 +882,12 @@ class TestNearestInterpOpForFloat16(unittest.TestCase): x_np = x_np.astype(dtype) x = paddle.to_tensor(x_np) x.stop_gradient = False - y = interpolate(x, - scale_factor=self.scale, - mode=self.interp_method, - align_corners=self.align_corners) + y = interpolate( + x, + scale_factor=self.scale, + mode=self.interp_method, + align_corners=self.align_corners, + ) x_g = paddle.grad(y, x) y_np = y[0].numpy().astype('float32') x_g_np = x_g[0].numpy().astype('float32') diff --git a/python/paddle/fluid/tests/unittests/test_neg_op.py b/python/paddle/fluid/tests/unittests/test_neg_op.py index 77c961408a5c2b9762a0b809362b0e05a5fc3a6f..260d0fa35dc4b55e8a9aa7591410bbaf73227e8e 100644 --- a/python/paddle/fluid/tests/unittests/test_neg_op.py +++ b/python/paddle/fluid/tests/unittests/test_neg_op.py @@ -18,7 +18,6 @@ import paddle class TestNegOp(unittest.TestCase): - def setUp(self): self.init_dtype_type() self.input = (np.random.random((32, 8)) * 100).astype(self.dtype) @@ -30,9 +29,9 @@ class TestNegOp(unittest.TestCase): input = paddle.to_tensor(self.input) dy_result = paddle.neg(input) expected_result = np.negative(self.input) - np.testing.assert_allclose(dy_result.numpy(), - expected_result, - rtol=1e-05) + np.testing.assert_allclose( + dy_result.numpy(), expected_result, rtol=1e-05 + ) def run_static(self, use_gpu=False): input = paddle.fluid.data(name='input', shape=[32, 8], dtype=self.dtype) @@ -66,31 +65,26 @@ class TestNegOp(unittest.TestCase): class TestNegOpFp32(TestNegOp): - def init_dtype_type(self): self.dtype = np.float32 class TestNegOpInt64(TestNegOp): - def init_dtype_type(self): self.dtype = np.int64 class TestNegOpInt32(TestNegOp): - def init_dtype_type(self): self.dtype = np.int32 class TestNegOpInt16(TestNegOp): - def init_dtype_type(self): self.dtype = np.int16 class TestNegOpInt8(TestNegOp): - def init_dtype_type(self): self.dtype = np.int8 diff --git a/python/paddle/fluid/tests/unittests/test_network_with_dtype.py b/python/paddle/fluid/tests/unittests/test_network_with_dtype.py index 203dcdc38cb2719f6c8cd376b9396e6d0f63a311..f230cc66c20a1f3749b054fffca0fa05dba1bf63 100644 --- a/python/paddle/fluid/tests/unittests/test_network_with_dtype.py +++ b/python/paddle/fluid/tests/unittests/test_network_with_dtype.py @@ -22,7 +22,6 @@ BATCH_SIZE = 20 class TestNetWithDtype(unittest.TestCase): - def setUp(self): self.dtype = "float64" self.init_dtype() @@ -40,8 +39,9 @@ class TestNetWithDtype(unittest.TestCase): sgd_optimizer.minimize(avg_cost) fetch_list = [avg_cost] - train_reader = paddle.batch(paddle.dataset.uci_housing.train(), - batch_size=BATCH_SIZE) + train_reader = paddle.batch( + paddle.dataset.uci_housing.train(), batch_size=BATCH_SIZE + ) feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) exe = fluid.Executor(place) exe.run(startup) diff --git a/python/paddle/fluid/tests/unittests/test_newprofiler.py b/python/paddle/fluid/tests/unittests/test_newprofiler.py index afa09462cb67e31f86e891f6b5f66cf7387640b3..0d42de0be7c91a314db365500f5ebb11a046ea9b 100755 --- a/python/paddle/fluid/tests/unittests/test_newprofiler.py +++ b/python/paddle/fluid/tests/unittests/test_newprofiler.py @@ -25,62 +25,67 @@ from paddle.io import Dataset, DataLoader class TestProfiler(unittest.TestCase): - def tearDown(self): self.temp_dir.cleanup() def test_profiler(self): - def my_trace_back(prof): - path = os.path.join(self.temp_dir.name, - './test_profiler_chrometracing') + path = os.path.join( + self.temp_dir.name, './test_profiler_chrometracing' + ) profiler.export_chrome_tracing(path)(prof) path = os.path.join(self.temp_dir.name, './test_profiler_pb') profiler.export_protobuf(path)(prof) self.temp_dir = tempfile.TemporaryDirectory() x_value = np.random.randn(2, 3, 3) - x = paddle.to_tensor(x_value, - stop_gradient=False, - place=paddle.CPUPlace()) + x = paddle.to_tensor( + x_value, stop_gradient=False, place=paddle.CPUPlace() + ) y = x / 2.0 ones_like_y = paddle.ones_like(y) - with profiler.Profiler(targets=[profiler.ProfilerTarget.CPU], ) as prof: + with profiler.Profiler( + targets=[profiler.ProfilerTarget.CPU], + ) as prof: y = x / 2.0 prof = None self.assertEqual(utils._is_profiler_used, False) with profiler.RecordEvent(name='test'): y = x / 2.0 - with profiler.Profiler(targets=[profiler.ProfilerTarget.CPU], - scheduler=(1, 2)) as prof: + with profiler.Profiler( + targets=[profiler.ProfilerTarget.CPU], scheduler=(1, 2) + ) as prof: self.assertEqual(utils._is_profiler_used, True) with profiler.RecordEvent(name='test'): y = x / 2.0 prof = None - with profiler.Profiler(targets=[profiler.ProfilerTarget.CPU], - scheduler=profiler.make_scheduler(closed=0, - ready=1, - record=1, - repeat=1), - on_trace_ready=my_trace_back) as prof: + with profiler.Profiler( + targets=[profiler.ProfilerTarget.CPU], + scheduler=profiler.make_scheduler( + closed=0, ready=1, record=1, repeat=1 + ), + on_trace_ready=my_trace_back, + ) as prof: y = x / 2.0 prof = None - with profiler.Profiler(targets=[profiler.ProfilerTarget.CPU], - scheduler=profiler.make_scheduler(closed=0, - ready=0, - record=2, - repeat=1), - on_trace_ready=my_trace_back) as prof: + with profiler.Profiler( + targets=[profiler.ProfilerTarget.CPU], + scheduler=profiler.make_scheduler( + closed=0, ready=0, record=2, repeat=1 + ), + on_trace_ready=my_trace_back, + ) as prof: for i in range(3): y = x / 2.0 prof.step() prof = None with profiler.Profiler( - targets=[profiler.ProfilerTarget.CPU], - scheduler=lambda x: profiler.ProfilerState.RECORD_AND_RETURN, - on_trace_ready=my_trace_back) as prof: + targets=[profiler.ProfilerTarget.CPU], + scheduler=lambda x: profiler.ProfilerState.RECORD_AND_RETURN, + on_trace_ready=my_trace_back, + ) as prof: for i in range(2): y = x / 2.0 prof.step() @@ -107,35 +112,39 @@ class TestProfiler(unittest.TestCase): prof = None with profiler.Profiler( - targets=[profiler.ProfilerTarget.CPU], - scheduler=lambda x: profiler.ProfilerState.RECORD_AND_RETURN, - on_trace_ready=my_trace_back) as prof: + targets=[profiler.ProfilerTarget.CPU], + scheduler=lambda x: profiler.ProfilerState.RECORD_AND_RETURN, + on_trace_ready=my_trace_back, + ) as prof: for i in range(2): y = x / 2.0 prof.step() prof = None - with profiler.Profiler(targets=[profiler.ProfilerTarget.CPU], - scheduler=my_sheduler, - on_trace_ready=my_trace_back) as prof: + with profiler.Profiler( + targets=[profiler.ProfilerTarget.CPU], + scheduler=my_sheduler, + on_trace_ready=my_trace_back, + ) as prof: for i in range(5): y = x / 2.0 prof.step() prof = None - with profiler.Profiler(targets=[profiler.ProfilerTarget.CPU], - scheduler=my_sheduler1) as prof: + with profiler.Profiler( + targets=[profiler.ProfilerTarget.CPU], scheduler=my_sheduler1 + ) as prof: for i in range(5): y = x / 2.0 prof.step() prof = None - with profiler.Profiler(targets=[profiler.ProfilerTarget.CPU], - scheduler=profiler.make_scheduler(closed=1, - ready=1, - record=2, - repeat=1, - skip_first=1), - on_trace_ready=my_trace_back, - profile_memory=True, - record_shapes=True) as prof: + with profiler.Profiler( + targets=[profiler.ProfilerTarget.CPU], + scheduler=profiler.make_scheduler( + closed=1, ready=1, record=2, repeat=1, skip_first=1 + ), + on_trace_ready=my_trace_back, + profile_memory=True, + record_shapes=True, + ) as prof: for i in range(5): y = x / 2.0 paddle.grad(outputs=y, inputs=[x], grad_outputs=ones_like_y) @@ -148,13 +157,12 @@ class TestProfiler(unittest.TestCase): prof = None dataset = RandomDataset(10 * 4) simple_net = SimpleNet() - opt = paddle.optimizer.SGD(learning_rate=1e-3, - parameters=simple_net.parameters()) - loader = DataLoader(dataset, - batch_size=4, - shuffle=True, - drop_last=True, - num_workers=2) + opt = paddle.optimizer.SGD( + learning_rate=1e-3, parameters=simple_net.parameters() + ) + loader = DataLoader( + dataset, batch_size=4, shuffle=True, drop_last=True, num_workers=2 + ) prof = profiler.Profiler(on_trace_ready=lambda prof: None) prof.start() for i, (image, label) in enumerate(loader()): @@ -171,8 +179,9 @@ class TestProfiler(unittest.TestCase): dataset = RandomDataset(10 * 4) simple_net = SimpleNet() loader = DataLoader(dataset, batch_size=4, shuffle=True, drop_last=True) - opt = paddle.optimizer.Adam(learning_rate=1e-3, - parameters=simple_net.parameters()) + opt = paddle.optimizer.Adam( + learning_rate=1e-3, parameters=simple_net.parameters() + ) prof = profiler.Profiler(on_trace_ready=lambda prof: None) prof.start() for i, (image, label) in enumerate(loader()): @@ -187,19 +196,17 @@ class TestProfiler(unittest.TestCase): class TestNvprof(unittest.TestCase): - def test_nvprof(self): for i in range(10): paddle.fluid.profiler._nvprof_range(i, 10, 20) x_value = np.random.randn(2, 3, 3) - x = paddle.to_tensor(x_value, - stop_gradient=False, - place=paddle.CPUPlace()) + x = paddle.to_tensor( + x_value, stop_gradient=False, place=paddle.CPUPlace() + ) y = x / 2.0 class TestGetProfiler(unittest.TestCase): - def test_getprofiler(self): config_content = ''' { @@ -222,11 +229,12 @@ class TestGetProfiler(unittest.TestCase): filehandle.write(config_content) filehandle.flush() import paddle.profiler.profiler as profiler + profiler = profiler.get_profiler(filehandle.name) x_value = np.random.randn(2, 3, 3) - x = paddle.to_tensor(x_value, - stop_gradient=False, - place=paddle.CPUPlace()) + x = paddle.to_tensor( + x_value, stop_gradient=False, place=paddle.CPUPlace() + ) with profiler: for i in range(5): y = x / 2.0 @@ -262,6 +270,7 @@ class TestGetProfiler(unittest.TestCase): filehandle.write(config_content) filehandle.flush() import paddle.profiler.profiler as profiler + try: profiler = profiler.get_profiler(filehandle.name) except: @@ -299,6 +308,7 @@ class TestGetProfiler(unittest.TestCase): filehandle.write(config_content) filehandle.flush() import paddle.profiler.profiler as profiler + profiler = profiler.get_profiler(filehandle.name) # test exception @@ -334,20 +344,21 @@ class TestGetProfiler(unittest.TestCase): filehandle.write(config_content) filehandle.flush() import paddle.profiler.profiler as profiler + profiler = profiler.get_profiler(filehandle.name) # test path error import paddle.profiler.profiler as profiler + profiler = profiler.get_profiler('nopath.json') class RandomDataset(Dataset): - def __init__(self, num_samples): self.num_samples = num_samples def __getitem__(self, idx): image = np.random.random([100]).astype('float32') - label = np.random.randint(0, 10 - 1, (1, )).astype('int64') + label = np.random.randint(0, 10 - 1, (1,)).astype('int64') return image, label def __len__(self): @@ -355,7 +366,6 @@ class RandomDataset(Dataset): class SimpleNet(nn.Layer): - def __init__(self): super(SimpleNet, self).__init__() self.fc = nn.Linear(100, 10) @@ -365,19 +375,20 @@ class SimpleNet(nn.Layer): class TestTimerOnly(unittest.TestCase): - def test_with_dataloader(self): - def train(step_num_samples=None): dataset = RandomDataset(20 * 4) simple_net = SimpleNet() - opt = paddle.optimizer.SGD(learning_rate=1e-3, - parameters=simple_net.parameters()) - loader = DataLoader(dataset, - batch_size=4, - shuffle=True, - drop_last=True, - num_workers=2) + opt = paddle.optimizer.SGD( + learning_rate=1e-3, parameters=simple_net.parameters() + ) + loader = DataLoader( + dataset, + batch_size=4, + shuffle=True, + drop_last=True, + num_workers=2, + ) step_info = '' p = profiler.Profiler(timer_only=True) p.start() diff --git a/python/paddle/fluid/tests/unittests/test_newprofiler_helper.py b/python/paddle/fluid/tests/unittests/test_newprofiler_helper.py index e03ee36cad1c9210d25ddd3648ae1830ad96e28f..706ae74113f1118723fada0a63de6135a8f7b97b 100755 --- a/python/paddle/fluid/tests/unittests/test_newprofiler_helper.py +++ b/python/paddle/fluid/tests/unittests/test_newprofiler_helper.py @@ -18,7 +18,6 @@ import paddle.profiler.statistic_helper as statistic_helper class TestStatisticHelper(unittest.TestCase): - def test_sum_ranges_case1(self): src = [(1, 3), (4, 10), (11, 15)] self.assertEqual(statistic_helper.sum_ranges(src), 12) diff --git a/python/paddle/fluid/tests/unittests/test_nll_loss.py b/python/paddle/fluid/tests/unittests/test_nll_loss.py index b8158bc0e6bfcce3c545483b83bc4315d6edfb41..a598c501ec0526ee54bf1e45bdeea52aebf82016 100644 --- a/python/paddle/fluid/tests/unittests/test_nll_loss.py +++ b/python/paddle/fluid/tests/unittests/test_nll_loss.py @@ -20,11 +20,9 @@ from op_test import OpTest from paddle.fluid.framework import _test_eager_guard -def nll_loss_1d(logs, - targets, - weight=None, - reduction='mean', - ignore_index=-100): +def nll_loss_1d( + logs, targets, weight=None, reduction='mean', ignore_index=-100 +): input_shape = logs.shape N = input_shape[0] C = input_shape[1] @@ -41,17 +39,16 @@ def nll_loss_1d(logs, if reduction == 'sum': return np.sum(out), np.array([total_weight]).astype('float64') elif reduction == 'mean': - return out.sum() / total_weight, np.array([total_weight - ]).astype('float64') + return out.sum() / total_weight, np.array([total_weight]).astype( + 'float64' + ) elif reduction == 'none': return out -def nll_loss_2d(logs, - targets, - weight=None, - reduction='mean', - ignore_index=-100): +def nll_loss_2d( + logs, targets, weight=None, reduction='mean', ignore_index=-100 +): input_shape = logs.shape N = input_shape[0] H = input_shape[2] @@ -71,24 +68,27 @@ def nll_loss_2d(logs, if reduction == 'sum': return np.sum(out), np.array([total_weight]).astype('float64') elif reduction == 'mean': - return out.sum() / total_weight, np.array([total_weight - ]).astype('float64') + return out.sum() / total_weight, np.array([total_weight]).astype( + 'float64' + ) elif reduction == 'none': return out class TestNLLLoss(unittest.TestCase): - def test_NLLLoss_1D_mean(self): np.random.seed(200) input_np = np.random.random(size=(10, 10)).astype(np.float64) np.random.seed(200) - label_np = np.random.randint(0, 10, size=(10, )).astype(np.int64) + label_np = np.random.randint(0, 10, size=(10,)).astype(np.int64) prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() - #place = fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) + # place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): input = fluid.data(name='input', shape=[10, 10], dtype='float64') label = fluid.data(name='label', shape=[10], dtype='int64') @@ -96,24 +96,25 @@ class TestNLLLoss(unittest.TestCase): res = nll_loss(input, label) exe = fluid.Executor(place) - static_result, = exe.run(prog, - feed={ - "input": input_np, - "label": label_np - }, - fetch_list=[res]) + (static_result,) = exe.run( + prog, + feed={"input": input_np, "label": label_np}, + fetch_list=[res], + ) with fluid.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss() - dy_res = nll_loss(paddle.to_tensor(input_np), - paddle.to_tensor(label_np)) + dy_res = nll_loss( + paddle.to_tensor(input_np), paddle.to_tensor(label_np) + ) dy_result = dy_res.numpy() with fluid.dygraph.guard(): with _test_eager_guard(): nll_loss = paddle.nn.loss.NLLLoss() - eager_res = nll_loss(paddle.to_tensor(input_np), - paddle.to_tensor(label_np)) + eager_res = nll_loss( + paddle.to_tensor(input_np), paddle.to_tensor(label_np) + ) eager_result = eager_res.numpy() expected = nll_loss_1d(input_np, label_np)[0] @@ -126,12 +127,15 @@ class TestNLLLoss(unittest.TestCase): np.random.seed(200) input_np = np.random.random(size=(10, 10)).astype(np.float64) np.random.seed(200) - label_np = np.random.randint(0, 10, size=(10, )).astype(np.int64) + label_np = np.random.randint(0, 10, size=(10,)).astype(np.int64) prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() - #place = fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) + # place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): input = fluid.data(name='input', shape=[10, 10], dtype='float64') label = fluid.data(name='label', shape=[10], dtype='int64') @@ -139,17 +143,17 @@ class TestNLLLoss(unittest.TestCase): res = nll_loss(input, label) exe = fluid.Executor(place) - static_result, = exe.run(prog, - feed={ - "input": input_np, - "label": label_np - }, - fetch_list=[res]) + (static_result,) = exe.run( + prog, + feed={"input": input_np, "label": label_np}, + fetch_list=[res], + ) with fluid.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss(reduction='sum') - dy_res = nll_loss(paddle.to_tensor(input_np), - paddle.to_tensor(label_np)) + dy_res = nll_loss( + paddle.to_tensor(input_np), paddle.to_tensor(label_np) + ) dy_result = dy_res.numpy() with _test_eager_guard(): @@ -172,12 +176,15 @@ class TestNLLLoss(unittest.TestCase): np.random.seed(200) input_np = np.random.random(size=(10, 10)).astype(np.float64) np.random.seed(200) - label_np = np.random.randint(0, 10, size=(10, )).astype(np.int64) - weight_np = np.random.random(size=(10, )).astype(np.float64) + label_np = np.random.randint(0, 10, size=(10,)).astype(np.int64) + weight_np = np.random.random(size=(10,)).astype(np.float64) prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) # place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): input = fluid.data(name='input', shape=[10, 10], dtype='float64') @@ -187,26 +194,32 @@ class TestNLLLoss(unittest.TestCase): res = nll_loss(input, label) exe = fluid.Executor(place) - static_result, = exe.run(prog, - feed={ - "input": input_np, - "label": label_np, - "weight": weight_np - }, - fetch_list=[res]) + (static_result,) = exe.run( + prog, + feed={ + "input": input_np, + "label": label_np, + "weight": weight_np, + }, + fetch_list=[res], + ) with fluid.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss( - weight=paddle.to_tensor(weight_np)) - dy_res = nll_loss(paddle.to_tensor(input_np), - paddle.to_tensor(label_np)) + weight=paddle.to_tensor(weight_np) + ) + dy_res = nll_loss( + paddle.to_tensor(input_np), paddle.to_tensor(label_np) + ) dy_result = dy_res.numpy() with _test_eager_guard(): nll_loss = paddle.nn.loss.NLLLoss( - weight=paddle.to_tensor(weight_np)) - eager_res = nll_loss(paddle.to_tensor(input_np), - paddle.to_tensor(label_np)) + weight=paddle.to_tensor(weight_np) + ) + eager_res = nll_loss( + paddle.to_tensor(input_np), paddle.to_tensor(label_np) + ) loss = eager_res.sum() loss.backward() eager_result = eager_res.numpy() @@ -222,12 +235,15 @@ class TestNLLLoss(unittest.TestCase): np.random.seed(200) input_np = np.random.random(size=(10, 10)).astype(np.float64) np.random.seed(200) - label_np = np.random.randint(0, 10, size=(10, )).astype(np.int64) - weight_np = np.random.random(size=(10, )).astype(np.float64) + label_np = np.random.randint(0, 10, size=(10,)).astype(np.int64) + weight_np = np.random.random(size=(10,)).astype(np.float64) prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) # place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): input = fluid.data(name='input', shape=[10, 10], dtype='float64') @@ -237,24 +253,27 @@ class TestNLLLoss(unittest.TestCase): res = nll_loss(input, label) exe = fluid.Executor(place) - static_result, = exe.run(prog, - feed={ - "input": input_np, - "label": label_np, - "weight": weight_np - }, - fetch_list=[res]) + (static_result,) = exe.run( + prog, + feed={ + "input": input_np, + "label": label_np, + "weight": weight_np, + }, + fetch_list=[res], + ) with fluid.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss( - weight=paddle.to_tensor(weight_np), reduction='sum') - dy_res = nll_loss(paddle.to_tensor(input_np), - paddle.to_tensor(label_np)) + weight=paddle.to_tensor(weight_np), reduction='sum' + ) + dy_res = nll_loss( + paddle.to_tensor(input_np), paddle.to_tensor(label_np) + ) dy_result = dy_res.numpy() - expected = nll_loss_1d(input_np, - label_np, - weight=weight_np, - reduction='sum')[0] + expected = nll_loss_1d( + input_np, label_np, weight=weight_np, reduction='sum' + )[0] np.testing.assert_allclose(static_result, expected, rtol=1e-05) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05) @@ -264,8 +283,8 @@ class TestNLLLoss(unittest.TestCase): np.random.seed(200) input_np = np.random.random(size=(10, 10)).astype(np.float64) np.random.seed(200) - label_np = np.random.randint(0, 10, size=(10, )).astype(np.int64) - weight_np = np.random.random(size=(10, )).astype(np.float64) + label_np = np.random.randint(0, 10, size=(10,)).astype(np.int64) + weight_np = np.random.random(size=(10,)).astype(np.float64) prog = fluid.Program() startup_prog = fluid.Program() place = fluid.CPUPlace() @@ -277,19 +296,23 @@ class TestNLLLoss(unittest.TestCase): res = nll_loss(input, label) exe = fluid.Executor(place) - static_result, = exe.run(prog, - feed={ - "input": input_np, - "label": label_np, - "weight": weight_np - }, - fetch_list=[res]) + (static_result,) = exe.run( + prog, + feed={ + "input": input_np, + "label": label_np, + "weight": weight_np, + }, + fetch_list=[res], + ) with fluid.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss( - weight=paddle.to_tensor(weight_np)) - dy_res = nll_loss(paddle.to_tensor(input_np), - paddle.to_tensor(label_np)) + weight=paddle.to_tensor(weight_np) + ) + dy_res = nll_loss( + paddle.to_tensor(input_np), paddle.to_tensor(label_np) + ) dy_result = dy_res.numpy() expected = nll_loss_1d(input_np, label_np, weight=weight_np)[0] @@ -301,8 +324,8 @@ class TestNLLLoss(unittest.TestCase): np.random.seed(200) input_np = np.random.random(size=(10, 10)).astype(np.float64) np.random.seed(200) - label_np = np.random.randint(0, 10, size=(10, )).astype(np.int64) - weight_np = np.random.random(size=(10, )).astype(np.float64) + label_np = np.random.randint(0, 10, size=(10,)).astype(np.int64) + weight_np = np.random.random(size=(10,)).astype(np.float64) prog = fluid.Program() startup_prog = fluid.Program() place = fluid.CPUPlace() @@ -314,24 +337,27 @@ class TestNLLLoss(unittest.TestCase): res = nll_loss(input, label) exe = fluid.Executor(place) - static_result, = exe.run(prog, - feed={ - "input": input_np, - "label": label_np, - "weight": weight_np - }, - fetch_list=[res]) + (static_result,) = exe.run( + prog, + feed={ + "input": input_np, + "label": label_np, + "weight": weight_np, + }, + fetch_list=[res], + ) with fluid.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss( - weight=paddle.to_tensor(weight_np), reduction='none') - dy_res = nll_loss(paddle.to_tensor(input_np), - paddle.to_tensor(label_np)) + weight=paddle.to_tensor(weight_np), reduction='none' + ) + dy_res = nll_loss( + paddle.to_tensor(input_np), paddle.to_tensor(label_np) + ) dy_result = dy_res.numpy() - expected = nll_loss_1d(input_np, - label_np, - weight=weight_np, - reduction='none') + expected = nll_loss_1d( + input_np, label_np, weight=weight_np, reduction='none' + ) np.testing.assert_allclose(static_result, expected, rtol=1e-05) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05) @@ -344,29 +370,32 @@ class TestNLLLoss(unittest.TestCase): label_np = np.random.randint(0, 3, size=(5, 5, 5)).astype(np.int64) prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() - #place = fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) + # place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', - shape=[5, 3, 5, 5], - dtype='float64') + input = fluid.data( + name='input', shape=[5, 3, 5, 5], dtype='float64' + ) label = fluid.data(name='label', shape=[5, 5, 5], dtype='int64') nll_loss = paddle.nn.loss.NLLLoss() res = nll_loss(input, label) exe = fluid.Executor(place) - static_result, = exe.run(prog, - feed={ - "input": input_np, - "label": label_np - }, - fetch_list=[res]) + (static_result,) = exe.run( + prog, + feed={"input": input_np, "label": label_np}, + fetch_list=[res], + ) with fluid.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss() - dy_res = nll_loss(paddle.to_tensor(input_np), - paddle.to_tensor(label_np)) + dy_res = nll_loss( + paddle.to_tensor(input_np), paddle.to_tensor(label_np) + ) dy_result = dy_res.numpy() expected = nll_loss_2d(input_np, label_np)[0] @@ -382,29 +411,32 @@ class TestNLLLoss(unittest.TestCase): label_np = np.random.randint(0, 3, size=(5, 5, 5)).astype(np.int64) prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() - #place = fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) + # place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', - shape=[5, 3, 5, 5], - dtype='float64') + input = fluid.data( + name='input', shape=[5, 3, 5, 5], dtype='float64' + ) label = fluid.data(name='label', shape=[5, 5, 5], dtype='int64') nll_loss = paddle.nn.loss.NLLLoss(reduction='sum') res = nll_loss(input, label) exe = fluid.Executor(place) - static_result, = exe.run(prog, - feed={ - "input": input_np, - "label": label_np - }, - fetch_list=[res]) + (static_result,) = exe.run( + prog, + feed={"input": input_np, "label": label_np}, + fetch_list=[res], + ) with fluid.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss(reduction='sum') - dy_res = nll_loss(paddle.to_tensor(input_np), - paddle.to_tensor(label_np)) + dy_res = nll_loss( + paddle.to_tensor(input_np), paddle.to_tensor(label_np) + ) dy_result = dy_res.numpy() expected = nll_loss_2d(input_np, label_np, reduction='sum')[0] @@ -418,16 +450,19 @@ class TestNLLLoss(unittest.TestCase): input_np = np.random.random(size=(5, 3, 5, 5)).astype(np.float64) np.random.seed(200) label_np = np.random.randint(0, 3, size=(5, 5, 5)).astype(np.int64) - weight_np = np.random.random(size=(3, )).astype(np.float64) + weight_np = np.random.random(size=(3,)).astype(np.float64) prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() - #place = fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) + # place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', - shape=[5, 3, 5, 5], - dtype='float64') + input = fluid.data( + name='input', shape=[5, 3, 5, 5], dtype='float64' + ) label = fluid.data(name='label', shape=[5, 5, 5], dtype='int64') weight = fluid.data(name='weight', shape=[3], dtype='float64') @@ -435,19 +470,23 @@ class TestNLLLoss(unittest.TestCase): res = nll_loss(input, label) exe = fluid.Executor(place) - static_result, = exe.run(prog, - feed={ - "input": input_np, - "label": label_np, - "weight": weight_np - }, - fetch_list=[res]) + (static_result,) = exe.run( + prog, + feed={ + "input": input_np, + "label": label_np, + "weight": weight_np, + }, + fetch_list=[res], + ) with fluid.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss( - weight=paddle.to_tensor(weight_np)) - dy_res = nll_loss(paddle.to_tensor(input_np), - paddle.to_tensor(label_np)) + weight=paddle.to_tensor(weight_np) + ) + dy_res = nll_loss( + paddle.to_tensor(input_np), paddle.to_tensor(label_np) + ) dy_result = dy_res.numpy() expected = nll_loss_2d(input_np, label_np, weight=weight_np)[0] @@ -461,14 +500,14 @@ class TestNLLLoss(unittest.TestCase): input_np = np.random.random(size=(5, 3, 5, 5)).astype(np.float64) np.random.seed(200) label_np = np.random.randint(0, 3, size=(5, 5, 5)).astype(np.int64) - weight_np = np.random.random(size=(3, )).astype(np.float64) + weight_np = np.random.random(size=(3,)).astype(np.float64) prog = fluid.Program() startup_prog = fluid.Program() place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', - shape=[5, 3, 5, 5], - dtype='float64') + input = fluid.data( + name='input', shape=[5, 3, 5, 5], dtype='float64' + ) label = fluid.data(name='label', shape=[5, 5, 5], dtype='int64') weight = fluid.data(name='weight', shape=[3], dtype='float64') @@ -476,19 +515,23 @@ class TestNLLLoss(unittest.TestCase): res = nll_loss(input, label) exe = fluid.Executor(place) - static_result, = exe.run(prog, - feed={ - "input": input_np, - "label": label_np, - "weight": weight_np - }, - fetch_list=[res]) + (static_result,) = exe.run( + prog, + feed={ + "input": input_np, + "label": label_np, + "weight": weight_np, + }, + fetch_list=[res], + ) with fluid.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss( - weight=paddle.to_tensor(weight_np)) - dy_res = nll_loss(paddle.to_tensor(input_np), - paddle.to_tensor(label_np)) + weight=paddle.to_tensor(weight_np) + ) + dy_res = nll_loss( + paddle.to_tensor(input_np), paddle.to_tensor(label_np) + ) dy_result = dy_res.numpy() expected = nll_loss_2d(input_np, label_np, weight=weight_np)[0] @@ -502,15 +545,18 @@ class TestNLLLoss(unittest.TestCase): input_np = np.random.random(size=(5, 3, 5, 5)).astype(np.float64) np.random.seed(200) label_np = np.random.randint(0, 3, size=(5, 5, 5)).astype(np.int64) - weight_np = np.random.random(size=(3, )).astype(np.float64) + weight_np = np.random.random(size=(3,)).astype(np.float64) prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', - shape=[5, 3, 5, 5], - dtype='float64') + input = fluid.data( + name='input', shape=[5, 3, 5, 5], dtype='float64' + ) label = fluid.data(name='label', shape=[5, 5, 5], dtype='int64') weight = fluid.data(name='weight', shape=[3], dtype='float64') @@ -518,25 +564,28 @@ class TestNLLLoss(unittest.TestCase): res = nll_loss(input, label) exe = fluid.Executor(place) - static_result, = exe.run(prog, - feed={ - "input": input_np, - "label": label_np, - "weight": weight_np - }, - fetch_list=[res]) + (static_result,) = exe.run( + prog, + feed={ + "input": input_np, + "label": label_np, + "weight": weight_np, + }, + fetch_list=[res], + ) with fluid.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss( - weight=paddle.to_tensor(weight_np), reduction='sum') - dy_res = nll_loss(paddle.to_tensor(input_np), - paddle.to_tensor(label_np)) + weight=paddle.to_tensor(weight_np), reduction='sum' + ) + dy_res = nll_loss( + paddle.to_tensor(input_np), paddle.to_tensor(label_np) + ) dy_result = dy_res.numpy() - expected = nll_loss_2d(input_np, - label_np, - weight=weight_np, - reduction='sum')[0] + expected = nll_loss_2d( + input_np, label_np, weight=weight_np, reduction='sum' + )[0] np.testing.assert_allclose(static_result, expected, rtol=1e-05) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05) @@ -549,35 +598,39 @@ class TestNLLLoss(unittest.TestCase): label_np = np.random.randint(0, 3, size=(5, 5, 5, 5)).astype(np.int64) prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() - #place = fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) + # place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', - shape=[5, 3, 5, 5, 5], - dtype='float64') + input = fluid.data( + name='input', shape=[5, 3, 5, 5, 5], dtype='float64' + ) label = fluid.data(name='label', shape=[5, 5, 5, 5], dtype='int64') nll_loss = paddle.nn.loss.NLLLoss() res = nll_loss(input, label) exe = fluid.Executor(place) - static_result, = exe.run(prog, - feed={ - "input": input_np, - "label": label_np - }, - fetch_list=[res]) + (static_result,) = exe.run( + prog, + feed={"input": input_np, "label": label_np}, + fetch_list=[res], + ) with fluid.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss() - dy_res = nll_loss(paddle.to_tensor(input_np), - paddle.to_tensor(label_np)) + dy_res = nll_loss( + paddle.to_tensor(input_np), paddle.to_tensor(label_np) + ) dy_result = dy_res.numpy() input_shape = input_np.shape label_shape = label_np.shape - input_np_reshape = np.reshape(input_np, - (input_shape[0], input_shape[1], 1, -1)) + input_np_reshape = np.reshape( + input_np, (input_shape[0], input_shape[1], 1, -1) + ) label_np_reshape = np.reshape(label_np, (label_shape[0], 1, -1)) expected = nll_loss_2d(input_np_reshape, label_np_reshape)[0] @@ -590,45 +643,53 @@ class TestNLLLoss(unittest.TestCase): input_np = np.random.random(size=(5, 3, 5, 5, 5)).astype(np.float64) np.random.seed(200) label_np = np.random.randint(0, 3, size=(5, 5, 5, 5)).astype(np.int64) - weight_np = np.random.random(size=(3, )).astype(np.float64) + weight_np = np.random.random(size=(3,)).astype(np.float64) prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() - #place = fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) + # place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', - shape=[5, 3, 5, 5, 5], - dtype='float64') + input = fluid.data( + name='input', shape=[5, 3, 5, 5, 5], dtype='float64' + ) label = fluid.data(name='label', shape=[5, 5, 5, 5], dtype='int64') weight = fluid.data(name='weight', shape=[3], dtype='float64') nll_loss = paddle.nn.loss.NLLLoss(weight=weight) res = nll_loss(input, label) exe = fluid.Executor(place) - static_result, = exe.run(prog, - feed={ - "input": input_np, - "label": label_np, - "weight": weight_np - }, - fetch_list=[res]) + (static_result,) = exe.run( + prog, + feed={ + "input": input_np, + "label": label_np, + "weight": weight_np, + }, + fetch_list=[res], + ) with fluid.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss( - weight=paddle.to_tensor(weight_np)) - dy_res = nll_loss(paddle.to_tensor(input_np), - paddle.to_tensor(label_np)) + weight=paddle.to_tensor(weight_np) + ) + dy_res = nll_loss( + paddle.to_tensor(input_np), paddle.to_tensor(label_np) + ) dy_result = dy_res.numpy() input_shape = input_np.shape label_shape = label_np.shape - input_np_reshape = np.reshape(input_np, - (input_shape[0], input_shape[1], 1, -1)) + input_np_reshape = np.reshape( + input_np, (input_shape[0], input_shape[1], 1, -1) + ) label_np_reshape = np.reshape(label_np, (label_shape[0], 1, -1)) - expected = nll_loss_2d(input_np_reshape, - label_np_reshape, - weight=weight_np)[0] + expected = nll_loss_2d( + input_np_reshape, label_np_reshape, weight=weight_np + )[0] np.testing.assert_allclose(static_result, expected, rtol=1e-05) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05) @@ -639,46 +700,56 @@ class TestNLLLoss(unittest.TestCase): input_np = np.random.random(size=(5, 3, 5, 5, 5)).astype(np.float64) np.random.seed(200) label_np = np.random.randint(0, 3, size=(5, 5, 5, 5)).astype(np.int64) - weight_np = np.random.random(size=(3, )).astype(np.float64) + weight_np = np.random.random(size=(3,)).astype(np.float64) prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', - shape=[5, 3, 5, 5, 5], - dtype='float64') + input = fluid.data( + name='input', shape=[5, 3, 5, 5, 5], dtype='float64' + ) label = fluid.data(name='label', shape=[5, 5, 5, 5], dtype='int64') weight = fluid.data(name='weight', shape=[3], dtype='float64') nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='sum') res = nll_loss(input, label) exe = fluid.Executor(place) - static_result, = exe.run(prog, - feed={ - "input": input_np, - "label": label_np, - "weight": weight_np - }, - fetch_list=[res]) + (static_result,) = exe.run( + prog, + feed={ + "input": input_np, + "label": label_np, + "weight": weight_np, + }, + fetch_list=[res], + ) with fluid.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss( - weight=paddle.to_tensor(weight_np), reduction='sum') - dy_res = nll_loss(paddle.to_tensor(input_np), - paddle.to_tensor(label_np)) + weight=paddle.to_tensor(weight_np), reduction='sum' + ) + dy_res = nll_loss( + paddle.to_tensor(input_np), paddle.to_tensor(label_np) + ) dy_result = dy_res.numpy() input_shape = input_np.shape label_shape = label_np.shape - input_np_reshape = np.reshape(input_np, - (input_shape[0], input_shape[1], 1, -1)) + input_np_reshape = np.reshape( + input_np, (input_shape[0], input_shape[1], 1, -1) + ) label_np_reshape = np.reshape(label_np, (label_shape[0], 1, -1)) - expected = nll_loss_2d(input_np_reshape, - label_np_reshape, - weight=weight_np, - reduction='sum')[0] + expected = nll_loss_2d( + input_np_reshape, + label_np_reshape, + weight=weight_np, + reduction='sum', + )[0] np.testing.assert_allclose(static_result, expected, rtol=1e-05) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05) @@ -689,47 +760,57 @@ class TestNLLLoss(unittest.TestCase): input_np = np.random.random(size=(5, 3, 5, 5, 5)).astype(np.float64) np.random.seed(200) label_np = np.random.randint(0, 3, size=(5, 5, 5, 5)).astype(np.int64) - weight_np = np.random.random(size=(3, )).astype(np.float64) + weight_np = np.random.random(size=(3,)).astype(np.float64) prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() - #place = fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) + # place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', - shape=[5, 3, 5, 5, 5], - dtype='float64') + input = fluid.data( + name='input', shape=[5, 3, 5, 5, 5], dtype='float64' + ) label = fluid.data(name='label', shape=[5, 5, 5, 5], dtype='int64') weight = fluid.data(name='weight', shape=[3], dtype='float64') nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='none') res = nll_loss(input, label) exe = fluid.Executor(place) - static_result, = exe.run(prog, - feed={ - "input": input_np, - "label": label_np, - "weight": weight_np - }, - fetch_list=[res]) + (static_result,) = exe.run( + prog, + feed={ + "input": input_np, + "label": label_np, + "weight": weight_np, + }, + fetch_list=[res], + ) with fluid.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss( - weight=paddle.to_tensor(weight_np), reduction='none') - dy_res = nll_loss(paddle.to_tensor(input_np), - paddle.to_tensor(label_np)) + weight=paddle.to_tensor(weight_np), reduction='none' + ) + dy_res = nll_loss( + paddle.to_tensor(input_np), paddle.to_tensor(label_np) + ) dy_result = dy_res.numpy() input_shape = input_np.shape label_shape = label_np.shape - out_shape = (input_shape[0], ) + input_shape[2:] - input_np_reshape = np.reshape(input_np, - (input_shape[0], input_shape[1], 1, -1)) + out_shape = (input_shape[0],) + input_shape[2:] + input_np_reshape = np.reshape( + input_np, (input_shape[0], input_shape[1], 1, -1) + ) label_np_reshape = np.reshape(label_np, (label_shape[0], 1, -1)) - expected = nll_loss_2d(input_np_reshape, - label_np_reshape, - weight=weight_np, - reduction='none') + expected = nll_loss_2d( + input_np_reshape, + label_np_reshape, + weight=weight_np, + reduction='none', + ) expected = np.reshape(expected, out_shape) np.testing.assert_allclose(static_result, expected, rtol=1e-05) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05) @@ -740,45 +821,52 @@ class TestNLLLoss(unittest.TestCase): input_np = np.random.random(size=(5, 3, 5, 5, 5)).astype(np.float64) np.random.seed(200) label_np = np.random.randint(0, 3, size=(5, 5, 5, 5)).astype(np.int64) - weight_np = np.random.random(size=(3, )).astype(np.float64) + weight_np = np.random.random(size=(3,)).astype(np.float64) prog = fluid.Program() startup_prog = fluid.Program() place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', - shape=[5, 3, 5, 5, 5], - dtype='float64') + input = fluid.data( + name='input', shape=[5, 3, 5, 5, 5], dtype='float64' + ) label = fluid.data(name='label', shape=[5, 5, 5, 5], dtype='int64') weight = fluid.data(name='weight', shape=[3], dtype='float64') nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='none') res = nll_loss(input, label) exe = fluid.Executor(place) - static_result, = exe.run(prog, - feed={ - "input": input_np, - "label": label_np, - "weight": weight_np - }, - fetch_list=[res]) + (static_result,) = exe.run( + prog, + feed={ + "input": input_np, + "label": label_np, + "weight": weight_np, + }, + fetch_list=[res], + ) with fluid.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss( - weight=paddle.to_tensor(weight_np), reduction='none') - dy_res = nll_loss(paddle.to_tensor(input_np), - paddle.to_tensor(label_np)) + weight=paddle.to_tensor(weight_np), reduction='none' + ) + dy_res = nll_loss( + paddle.to_tensor(input_np), paddle.to_tensor(label_np) + ) dy_result = dy_res.numpy() input_shape = input_np.shape label_shape = label_np.shape - out_shape = (input_shape[0], ) + input_shape[2:] - input_np_reshape = np.reshape(input_np, - (input_shape[0], input_shape[1], 1, -1)) + out_shape = (input_shape[0],) + input_shape[2:] + input_np_reshape = np.reshape( + input_np, (input_shape[0], input_shape[1], 1, -1) + ) label_np_reshape = np.reshape(label_np, (label_shape[0], 1, -1)) - expected = nll_loss_2d(input_np_reshape, - label_np_reshape, - weight=weight_np, - reduction='none') + expected = nll_loss_2d( + input_np_reshape, + label_np_reshape, + weight=weight_np, + reduction='none', + ) expected = np.reshape(expected, out_shape) np.testing.assert_allclose(static_result, expected, rtol=1e-05) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05) @@ -786,7 +874,6 @@ class TestNLLLoss(unittest.TestCase): class TestNLLLossOp1DWithReduce(OpTest): - def setUp(self): self.init_test_case() self.op_type = "nll_loss" @@ -796,20 +883,23 @@ class TestNLLLossOp1DWithReduce(OpTest): self.python_api = paddle.nn.functional.nll_loss self.python_out_sig = ["Out"] np.random.seed(200) - input_np = np.random.uniform(0.1, 0.8, - self.input_shape).astype("float64") + input_np = np.random.uniform(0.1, 0.8, self.input_shape).astype( + "float64" + ) np.random.seed(200) - label_np = np.random.randint(0, self.input_shape[1], - self.label_shape).astype("int64") + label_np = np.random.randint( + 0, self.input_shape[1], self.label_shape + ).astype("int64") output_np, total_weight_np = nll_loss_1d(input_np, label_np) self.inputs = {'X': input_np, 'Label': label_np} if self.with_weight: np.random.seed(200) - weight_np = np.random.uniform(0.1, 0.8, - self.input_shape[1]).astype("float64") - output_np, total_weight_np = nll_loss_1d(input_np, - label_np, - weight=weight_np) + weight_np = np.random.uniform(0.1, 0.8, self.input_shape[1]).astype( + "float64" + ) + output_np, total_weight_np = nll_loss_1d( + input_np, label_np, weight=weight_np + ) self.inputs['Weight'] = weight_np self.outputs = {'Out': output_np, 'Total_weight': total_weight_np} @@ -836,7 +926,6 @@ class TestNLLLossOp1DWithReduce(OpTest): class TestNLLLossOp1DNoReduce(OpTest): - def setUp(self): self.init_test_case() self.op_type = "nll_loss" @@ -844,22 +933,24 @@ class TestNLLLossOp1DNoReduce(OpTest): self.python_out_sig = ["Out"] self.with_weight = False np.random.seed(200) - input_np = np.random.uniform(0.1, 0.8, - self.input_shape).astype("float64") + input_np = np.random.uniform(0.1, 0.8, self.input_shape).astype( + "float64" + ) np.random.seed(200) - label_np = np.random.randint(0, self.input_shape[1], - self.label_shape).astype("int64") + label_np = np.random.randint( + 0, self.input_shape[1], self.label_shape + ).astype("int64") output_np = nll_loss_1d(input_np, label_np, reduction='none') total_weight_np = np.array([0]).astype('float64') self.inputs = {'X': input_np, 'Label': label_np} if self.with_weight: np.random.seed(200) - weight_np = np.random.uniform(0.1, 0.8, - self.input_shape[1]).astype("float64") - output_np, total_weight_np = nll_loss_1d(input_np, - label_np, - weight=weight_np, - reduction='none') + weight_np = np.random.uniform(0.1, 0.8, self.input_shape[1]).astype( + "float64" + ) + output_np, total_weight_np = nll_loss_1d( + input_np, label_np, weight=weight_np, reduction='none' + ) self.inputs['Weight'] = weight_np self.outputs = {'Out': output_np, 'Total_weight': total_weight_np} @@ -886,7 +977,6 @@ class TestNLLLossOp1DNoReduce(OpTest): class TestNLLLossOp2DWithReduce(OpTest): - def setUp(self): self.init_test_case() self.op_type = "nll_loss" @@ -894,20 +984,23 @@ class TestNLLLossOp2DWithReduce(OpTest): self.python_out_sig = ["Out"] self.with_weight = False np.random.seed(200) - input_np = np.random.uniform(0.1, 0.8, - self.input_shape).astype("float64") + input_np = np.random.uniform(0.1, 0.8, self.input_shape).astype( + "float64" + ) np.random.seed(200) - label_np = np.random.randint(0, self.input_shape[1], - self.label_shape).astype("int64") + label_np = np.random.randint( + 0, self.input_shape[1], self.label_shape + ).astype("int64") output_np, total_weight_np = nll_loss_2d(input_np, label_np) self.inputs = {'X': input_np, 'Label': label_np} if self.with_weight: np.random.seed(200) - weight_np = np.random.uniform(0.1, 0.8, - self.input_shape[1]).astype("float64") - output_np, total_weight_np = nll_loss_2d(input_np, - label_np, - weight=weight_np) + weight_np = np.random.uniform(0.1, 0.8, self.input_shape[1]).astype( + "float64" + ) + output_np, total_weight_np = nll_loss_2d( + input_np, label_np, weight=weight_np + ) self.inputs['Weight'] = weight_np self.outputs = {'Out': output_np, 'Total_weight': total_weight_np} @@ -934,7 +1027,6 @@ class TestNLLLossOp2DWithReduce(OpTest): class TestNLLLossOp2DNoReduce(OpTest): - def setUp(self): self.init_test_case() self.op_type = "nll_loss" @@ -942,22 +1034,24 @@ class TestNLLLossOp2DNoReduce(OpTest): self.python_out_sig = ["Out"] self.with_weight = False np.random.seed(200) - input_np = np.random.uniform(0.1, 0.8, - self.input_shape).astype("float64") + input_np = np.random.uniform(0.1, 0.8, self.input_shape).astype( + "float64" + ) np.random.seed(200) - label_np = np.random.randint(0, self.input_shape[1], - self.label_shape).astype("int64") + label_np = np.random.randint( + 0, self.input_shape[1], self.label_shape + ).astype("int64") output_np = nll_loss_2d(input_np, label_np, reduction='none') total_weight_np = np.array([0]).astype('float64') self.inputs = {'X': input_np, 'Label': label_np} if self.with_weight: np.random.seed(200) - weight_np = np.random.uniform(0.1, 0.8, - self.input_shape[1]).astype("float64") - output_np, total_weight_np = nll_loss_2d(input_np, - label_np, - weight=weight_np, - reduction='none') + weight_np = np.random.uniform(0.1, 0.8, self.input_shape[1]).astype( + "float64" + ) + output_np, total_weight_np = nll_loss_2d( + input_np, label_np, weight=weight_np, reduction='none' + ) self.inputs['Weight'] = weight_np self.outputs = {'Out': output_np, 'Total_weight': total_weight_np} @@ -984,7 +1078,6 @@ class TestNLLLossOp2DNoReduce(OpTest): class TestNLLLossName(unittest.TestCase): - def test_name(self): prog = paddle.static.Program() startup_prog = paddle.static.Program() @@ -998,22 +1091,26 @@ class TestNLLLossName(unittest.TestCase): class TestNLLLossInvalidArgs(unittest.TestCase): - def test_x_dim_value_error(self): - def test_x_dim_lt_2(): prog = paddle.static.Program() startup_prog = paddle.static.Program() place = paddle.CPUPlace() with paddle.static.program_guard(prog, startup_prog): - x = paddle.fluid.data(name='x', shape=[ - 10, - ], dtype='float64') - label = paddle.fluid.data(name='label', - shape=[ - 10, - ], - dtype='float64') + x = paddle.fluid.data( + name='x', + shape=[ + 10, + ], + dtype='float64', + ) + label = paddle.fluid.data( + name='label', + shape=[ + 10, + ], + dtype='float64', + ) nll_loss = paddle.nn.loss.NLLLoss() res = nll_loss(x, label) @@ -1021,8 +1118,8 @@ class TestNLLLossInvalidArgs(unittest.TestCase): def test_x_dim_imperative_lt_2(): with fluid.dygraph.guard(): - x_np = np.random.random(size=(5, )).astype(np.float64) - label_np = np.random.randint(0, 10, size=(5, )).astype(np.int64) + x_np = np.random.random(size=(5,)).astype(np.float64) + label_np = np.random.randint(0, 10, size=(5,)).astype(np.int64) x = paddle.to_tensor(x_np) label = paddle.to_tensor(label_np) nll_loss = paddle.nn.loss.NLLLoss() @@ -1031,16 +1128,15 @@ class TestNLLLossInvalidArgs(unittest.TestCase): self.assertRaises(ValueError, test_x_dim_imperative_lt_2) def test_reduction_value_error(self): - def test_NLLLoss_reduction_not_sum_mean_none(): prog = paddle.static.Program() startup_prog = paddle.static.Program() place = paddle.CPUPlace() with paddle.static.program_guard(prog, startup_prog): x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64') - label = paddle.fluid.data(name='label', - shape=[10], - dtype='int64') + label = paddle.fluid.data( + name='label', shape=[10], dtype='int64' + ) nll_loss = paddle.nn.loss.NLLLoss(reduction='') res = nll_loss(x, label) @@ -1049,14 +1145,15 @@ class TestNLLLossInvalidArgs(unittest.TestCase): def test_NLLLoss_reduction_imperative_not_sum_mean_none(): with fluid.dygraph.guard(): x_np = np.random.random(size=(5, 3)).astype(np.float64) - label_np = np.random.randint(0, 3, size=(5, )).astype(np.int64) + label_np = np.random.randint(0, 3, size=(5,)).astype(np.int64) x = paddle.to_tensor(x_np) label = paddle.to_tensor(label_np) nll_loss = paddle.nn.loss.NLLLoss(reduction='') res = nll_loss(x, label) - self.assertRaises(ValueError, - test_NLLLoss_reduction_imperative_not_sum_mean_none) + self.assertRaises( + ValueError, test_NLLLoss_reduction_imperative_not_sum_mean_none + ) def test_nll_loss_function_reduction_not_sum_mean_none(): prog = paddle.static.Program() @@ -1064,25 +1161,27 @@ class TestNLLLossInvalidArgs(unittest.TestCase): place = paddle.CPUPlace() with paddle.static.program_guard(prog, startup_prog): x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64') - label = paddle.fluid.data(name='label', - shape=[10], - dtype='int64') + label = paddle.fluid.data( + name='label', shape=[10], dtype='int64' + ) res = paddle.nn.functional.nll_loss(x, label, reduction='') - self.assertRaises(ValueError, - test_nll_loss_function_reduction_not_sum_mean_none) + self.assertRaises( + ValueError, test_nll_loss_function_reduction_not_sum_mean_none + ) def test_nll_loss_function_reduction_imperative_not_sum_mean_none(): with fluid.dygraph.guard(): x_np = np.random.random(size=(5, 3)).astype(np.float64) - label_np = np.random.randint(0, 3, size=(5, )).astype(np.int64) + label_np = np.random.randint(0, 3, size=(5,)).astype(np.int64) x = paddle.to_tensor(x_np) label = paddle.to_tensor(label_np) res = paddle.nn.functional.nll_loss(x, label, reduction='') self.assertRaises( ValueError, - test_nll_loss_function_reduction_imperative_not_sum_mean_none) + test_nll_loss_function_reduction_imperative_not_sum_mean_none, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_nms_op.py b/python/paddle/fluid/tests/unittests/test_nms_op.py index cbd24d4ddf22e92a62252990bdaca7e3d81792b4..80f2da013251d68f0613ef2873ffd7fac25550da 100755 --- a/python/paddle/fluid/tests/unittests/test_nms_op.py +++ b/python/paddle/fluid/tests/unittests/test_nms_op.py @@ -19,8 +19,7 @@ import paddle def iou(box_a, box_b): - """Apply intersection-over-union overlap between box_a and box_b - """ + """Apply intersection-over-union overlap between box_a and box_b""" xmin_a = min(box_a[0], box_a[2]) ymin_a = min(box_a[1], box_a[3]) xmax_a = max(box_a[0], box_a[2]) @@ -69,7 +68,6 @@ def nms(boxes, nms_threshold): class TestNMSOp(OpTest): - def setUp(self): self.op_type = 'nms' self.python_api = paddle.vision.ops.nms diff --git a/python/paddle/fluid/tests/unittests/test_nn_dice_loss.py b/python/paddle/fluid/tests/unittests/test_nn_dice_loss.py index f5a7d2ed4ed8e27879c11aa656395bf9a0f36072..f1e1f6f6e4b2a3cbfb65fb774bf3ece02160eba5 100644 --- a/python/paddle/fluid/tests/unittests/test_nn_dice_loss.py +++ b/python/paddle/fluid/tests/unittests/test_nn_dice_loss.py @@ -22,7 +22,6 @@ eps = 1e-6 class TestDiceLossValue(unittest.TestCase): - def test_dice_loss(self): input_ = paddle.rand([2, 3, num_classes]) label_ = paddle.randint(0, num_classes, [2, 3, 1], dtype=paddle.int64) @@ -40,23 +39,21 @@ class TestDiceLossValue(unittest.TestCase): class TestDiceLossInvalidInput(unittest.TestCase): - def test_error(self): - def test_invalid_dtype(): input_ = paddle.rand([2, 3, num_classes], dtype=paddle.float32) - label_ = paddle.randint(0, - num_classes, [2, 3, 1], - dtype=paddle.int64) + label_ = paddle.randint( + 0, num_classes, [2, 3, 1], dtype=paddle.int64 + ) nn.dice_loss(input_, label_.astype(paddle.float32)) self.assertRaises(AssertionError, test_invalid_dtype) def test_zero_shape_input(): input_ = paddle.rand([0, 3, num_classes], dtype=paddle.float32) - label_ = paddle.randint(0, - num_classes, [0, 3, 1], - dtype=paddle.int64) + label_ = paddle.randint( + 0, num_classes, [0, 3, 1], dtype=paddle.int64 + ) nn.dice_loss(input_, label_) self.assertRaises(AssertionError, test_zero_shape_input) diff --git a/python/paddle/fluid/tests/unittests/test_nn_functional_embedding_dygraph.py b/python/paddle/fluid/tests/unittests/test_nn_functional_embedding_dygraph.py index d658fcec836dbf687158e04159a12e2a644c7d3a..a7062e600258e12fdce266f81269da66cfbc15d8 100644 --- a/python/paddle/fluid/tests/unittests/test_nn_functional_embedding_dygraph.py +++ b/python/paddle/fluid/tests/unittests/test_nn_functional_embedding_dygraph.py @@ -22,7 +22,6 @@ paddle.disable_static() class EmbeddingDygraph(unittest.TestCase): - def func_1(self): x_data = np.arange(3, 6).reshape((3, 1)).astype(np.int64) paddle.disable_static(paddle.CPUPlace()) @@ -33,8 +32,9 @@ class EmbeddingDygraph(unittest.TestCase): w0 = np.full(shape=(10, 3), fill_value=2).astype(np.float32) embedding.weight.set_value(w0) - adam = paddle.optimizer.Adam(parameters=[embedding.weight], - learning_rate=0.01) + adam = paddle.optimizer.Adam( + parameters=[embedding.weight], learning_rate=0.01 + ) adam.clear_grad() out = embedding(x) diff --git a/python/paddle/fluid/tests/unittests/test_nn_functional_embedding_static.py b/python/paddle/fluid/tests/unittests/test_nn_functional_embedding_static.py index bb1541beb1f44f3db407b3fe4fd260c59d381651..b5c354c7486f675441044b13191c3d6a85c38dab 100644 --- a/python/paddle/fluid/tests/unittests/test_nn_functional_embedding_static.py +++ b/python/paddle/fluid/tests/unittests/test_nn_functional_embedding_static.py @@ -19,33 +19,36 @@ import paddle.nn.functional as functional class EmbeddingStatic(unittest.TestCase): - def test_1(self): prog = fluid.Program() with fluid.program_guard(prog): def test_bad_x(): initializer = fluid.initializer.NumpyArrayInitializer( - np.random.random(size=(128, 100))) - - param_attr = fluid.ParamAttr(name="emb_weight", - learning_rate=0.5, - initializer=initializer, - trainable=True) - - weight = prog.global_block().create_parameter((128, 100), - attr=param_attr, - dtype="float32") - - label = fluid.layers.data(name="label", - shape=[4], - append_batch_size=False, - dtype="int64") - - emb = functional.embedding(x=label, - weight=weight, - sparse=True, - name="embedding") + np.random.random(size=(128, 100)) + ) + + param_attr = fluid.ParamAttr( + name="emb_weight", + learning_rate=0.5, + initializer=initializer, + trainable=True, + ) + + weight = prog.global_block().create_parameter( + (128, 100), attr=param_attr, dtype="float32" + ) + + label = fluid.layers.data( + name="label", + shape=[4], + append_batch_size=False, + dtype="int64", + ) + + emb = functional.embedding( + x=label, weight=weight, sparse=True, name="embedding" + ) test_bad_x() @@ -55,27 +58,34 @@ class EmbeddingStatic(unittest.TestCase): def test_bad_x(): initializer = fluid.initializer.NumpyArrayInitializer( - np.random.random(size=(128, 100))) - - param_attr = fluid.ParamAttr(name="emb_weight", - learning_rate=0.5, - initializer=initializer, - trainable=True) - - weight = prog.global_block().create_parameter((128, 100), - attr=param_attr, - dtype="float32") - - label = fluid.layers.data(name="label", - shape=[4], - append_batch_size=False, - dtype="int32") - - emb = functional.embedding(x=label, - weight=weight, - padding_idx=129, - sparse=True, - name="embedding") + np.random.random(size=(128, 100)) + ) + + param_attr = fluid.ParamAttr( + name="emb_weight", + learning_rate=0.5, + initializer=initializer, + trainable=True, + ) + + weight = prog.global_block().create_parameter( + (128, 100), attr=param_attr, dtype="float32" + ) + + label = fluid.layers.data( + name="label", + shape=[4], + append_batch_size=False, + dtype="int32", + ) + + emb = functional.embedding( + x=label, + weight=weight, + padding_idx=129, + sparse=True, + name="embedding", + ) with self.assertRaises(ValueError): test_bad_x() diff --git a/python/paddle/fluid/tests/unittests/test_nn_functional_hot_op.py b/python/paddle/fluid/tests/unittests/test_nn_functional_hot_op.py index 27a7af94485203d05418d28b9e135605245e118b..f7126a998818f3cde5392f3947b7567e9240185f 100644 --- a/python/paddle/fluid/tests/unittests/test_nn_functional_hot_op.py +++ b/python/paddle/fluid/tests/unittests/test_nn_functional_hot_op.py @@ -23,7 +23,6 @@ from paddle.fluid.framework import Program, program_guard class TestOneHotOp(OpTest): - def setUp(self): self.op_type = 'one_hot_v2' depth = 10 @@ -47,7 +46,6 @@ class TestOneHotOp(OpTest): class TestOneHotOp_attr(OpTest): - def setUp(self): self.op_type = 'one_hot_v2' depth = 10 @@ -56,8 +54,9 @@ class TestOneHotOp_attr(OpTest): x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1]) - out = np.zeros(shape=(np.product(x.shape[:-1]), 1, - depth)).astype('float32') + out = np.zeros(shape=(np.product(x.shape[:-1]), 1, depth)).astype( + 'float32' + ) for i in range(np.product(x.shape)): out[i, 0, x[i]] = 1.0 @@ -71,7 +70,6 @@ class TestOneHotOp_attr(OpTest): class TestOneHotOp_default_dtype(OpTest): - def setUp(self): self.op_type = 'one_hot_v2' depth = 10 @@ -95,7 +93,6 @@ class TestOneHotOp_default_dtype(OpTest): class TestOneHotOp_default_dtype_attr(OpTest): - def setUp(self): self.op_type = 'one_hot_v2' depth = 10 @@ -104,8 +101,9 @@ class TestOneHotOp_default_dtype_attr(OpTest): x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1]) - out = np.zeros(shape=(np.product(x.shape[:-1]), 1, - depth)).astype('float32') + out = np.zeros(shape=(np.product(x.shape[:-1]), 1, depth)).astype( + 'float32' + ) for i in range(np.product(x.shape)): out[i, 0, x[i]] = 1.0 @@ -119,7 +117,6 @@ class TestOneHotOp_default_dtype_attr(OpTest): class TestOneHotOp_exception(unittest.TestCase): - def setUp(self): self.op_type = 'one_hot_v2' self.depth = 10 @@ -135,30 +132,34 @@ class TestOneHotOp_exception(unittest.TestCase): def test_check_output(self): program = Program() with program_guard(program): - x = fluid.layers.data(name='x', - shape=[self.dimension], - dtype='float32', - lod_level=1) + x = fluid.layers.data( + name='x', shape=[self.dimension], dtype='float32', lod_level=1 + ) block = program.current_block() - one_hot_out = block.create_var(name="one_hot_out", - type=core.VarDesc.VarType.LOD_TENSOR, - dtype='float32') - block.append_op(type='one_hot', - inputs={'X': x}, - attrs={'depth': self.depth}, - outputs={'Out': one_hot_out}) + one_hot_out = block.create_var( + name="one_hot_out", + type=core.VarDesc.VarType.LOD_TENSOR, + dtype='float32', + ) + block.append_op( + type='one_hot', + inputs={'X': x}, + attrs={'depth': self.depth}, + outputs={'Out': one_hot_out}, + ) exe = fluid.Executor(self.place) def run(): - exe.run(feed={'x': self.x}, - fetch_list=[one_hot_out], - return_numpy=False) + exe.run( + feed={'x': self.x}, + fetch_list=[one_hot_out], + return_numpy=False, + ) self.assertRaises(ValueError, run) class TestOneHotOpApi(unittest.TestCase): - def test_api(self): num_classes = 10 self._run(num_classes) @@ -169,40 +170,45 @@ class TestOneHotOpApi(unittest.TestCase): def test_api_with_dygraph(self): num_classes = 10 - label = np.array([ - np.random.randint(0, num_classes - 1) for i in range(6) - ]).reshape([6, 1]) + label = np.array( + [np.random.randint(0, num_classes - 1) for i in range(6)] + ).reshape([6, 1]) with fluid.dygraph.guard(): one_hot_label = functional.one_hot( - x=fluid.dygraph.to_variable(label), num_classes=num_classes) + x=fluid.dygraph.to_variable(label), num_classes=num_classes + ) def _run(self, num_classes): label = fluid.layers.data(name="label", shape=[1], dtype="int64") one_hot_label = functional.one_hot(x=label, num_classes=num_classes) place = fluid.CPUPlace() - label_data = np.array([np.random.randint(0, 10 - 1) - for i in range(6)]).reshape([6, 1]) + label_data = np.array( + [np.random.randint(0, 10 - 1) for i in range(6)] + ).reshape([6, 1]) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - ret = exe.run(feed={ - 'label': label_data, - }, - fetch_list=[one_hot_label], - return_numpy=False) + ret = exe.run( + feed={ + 'label': label_data, + }, + fetch_list=[one_hot_label], + return_numpy=False, + ) class BadInputTestOnehotV2(unittest.TestCase): - def test_error(self): with fluid.program_guard(fluid.Program()): def test_bad_x(): - label = fluid.layers.data(name="label", - shape=[4], - append_batch_size=False, - dtype="float32") + label = fluid.layers.data( + name="label", + shape=[4], + append_batch_size=False, + dtype="float32", + ) one_hot_label = functional.one_hot(x=label, num_classes=4) self.assertRaises(TypeError, test_bad_x) diff --git a/python/paddle/fluid/tests/unittests/test_nn_grad.py b/python/paddle/fluid/tests/unittests/test_nn_grad.py index 7b806491bc5608295488cc26dc219c470cec387f..4b3d92c92dcea81cdd1e636d85434c65b36e271a 100644 --- a/python/paddle/fluid/tests/unittests/test_nn_grad.py +++ b/python/paddle/fluid/tests/unittests/test_nn_grad.py @@ -26,28 +26,25 @@ paddle.enable_static() class TestSliceOpDoubleGradCheck(unittest.TestCase): - @prog_scope() def func(self, place): self.config() - out = fluid.layers.slice(self.inputs, - axes=self.axes, - starts=self.starts, - ends=self.ends) - gradient_checker.double_grad_check([self.inputs], - out, - x_init=self.x_arr, - place=place) + out = fluid.layers.slice( + self.inputs, axes=self.axes, starts=self.starts, ends=self.ends + ) + gradient_checker.double_grad_check( + [self.inputs], out, x_init=self.x_arr, place=place + ) def config(self): self.starts = [1, 0, -1] self.ends = [3, 3, 6] self.axes = [0, 1, 2] self.x_arr = np.random.random([3, 4, 5, 2]).astype("float64") - self.inputs = layers.create_parameter(dtype="float64", - shape=[3, 4, 5, 2], - name='x') + self.inputs = layers.create_parameter( + dtype="float64", shape=[3, 4, 5, 2], name='x' + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -58,19 +55,17 @@ class TestSliceOpDoubleGradCheck(unittest.TestCase): class TestSliceOpDoubleGradCheckCase3(TestSliceOpDoubleGradCheck): - def config(self): self.starts = [1, -1, 1] self.ends = [3, 3, 3] self.axes = [0, 1, 2] self.x_arr = np.random.random([3, 3, 3]).astype("float64") - self.inputs = layers.create_parameter(dtype="float64", - shape=[3, 3, 3], - name='x3') + self.inputs = layers.create_parameter( + dtype="float64", shape=[3, 3, 3], name='x3' + ) class TestReduceMeanWithDimDoubleGradCheck(unittest.TestCase): - @prog_scope() def func(self, place): shape = [7, 11] @@ -82,11 +77,9 @@ class TestReduceMeanWithDimDoubleGradCheck(unittest.TestCase): y = layers.reduce_mean(x, dim=0) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) - gradient_checker.double_grad_check([x], - y, - x_init=x_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x], y, x_init=x_arr, place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -97,7 +90,6 @@ class TestReduceMeanWithDimDoubleGradCheck(unittest.TestCase): class TestReduceSumWithDimDoubleGradCheck(unittest.TestCase): - @prog_scope() def func(self, place): shape = [7, 11] @@ -109,11 +101,9 @@ class TestReduceSumWithDimDoubleGradCheck(unittest.TestCase): y = layers.reduce_sum(x, dim=0) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) - gradient_checker.double_grad_check([x], - y, - x_init=x_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x], y, x_init=x_arr, place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -124,7 +114,6 @@ class TestReduceSumWithDimDoubleGradCheck(unittest.TestCase): class TestReshapeDoubleGradCheck(unittest.TestCase): - @prog_scope() def func(self, place): x_shape = [3, 12] @@ -137,11 +126,9 @@ class TestReshapeDoubleGradCheck(unittest.TestCase): out = layers.expand(x, expand_times) x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) - gradient_checker.double_grad_check([x], - out, - x_init=x_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x], out, x_init=x_arr, place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -152,7 +139,6 @@ class TestReshapeDoubleGradCheck(unittest.TestCase): class TestExpandDoubleGradCheck(unittest.TestCase): - @prog_scope() def func(self, place): x_shape = [3, 12] @@ -165,11 +151,9 @@ class TestExpandDoubleGradCheck(unittest.TestCase): out = layers.reshape(x, new_shape) x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) - gradient_checker.double_grad_check([x], - out, - x_init=x_arr, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x], out, x_init=x_arr, place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -180,7 +164,6 @@ class TestExpandDoubleGradCheck(unittest.TestCase): class TestTileDoubleGradCheck(unittest.TestCase): - def tile_wrapper(self, x): return paddle.tile(x[0], [4, 9]) @@ -196,15 +179,12 @@ class TestTileDoubleGradCheck(unittest.TestCase): out = paddle.tile(x, repeat_times) x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) - gradient_checker.double_grad_check([x], - out, - x_init=x_arr, - place=place, - eps=eps) - gradient_checker.double_grad_check_for_dygraph(self.tile_wrapper, [x], - out, - x_init=x_arr, - place=place) + gradient_checker.double_grad_check( + [x], out, x_init=x_arr, place=place, eps=eps + ) + gradient_checker.double_grad_check_for_dygraph( + self.tile_wrapper, [x], out, x_init=x_arr, place=place + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -215,7 +195,6 @@ class TestTileDoubleGradCheck(unittest.TestCase): class TestExpandV2DoubleGradCheck(unittest.TestCase): - def expand_wrapper(self, x): return paddle.expand(x[0], [4, 12]) @@ -231,15 +210,12 @@ class TestExpandV2DoubleGradCheck(unittest.TestCase): out = paddle.expand(x, new_shape) x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) - gradient_checker.double_grad_check([x], - out, - x_init=x_arr, - place=place, - eps=eps) - gradient_checker.double_grad_check_for_dygraph(self.expand_wrapper, [x], - out, - x_init=x_arr, - place=place) + gradient_checker.double_grad_check( + [x], out, x_init=x_arr, place=place, eps=eps + ) + gradient_checker.double_grad_check_for_dygraph( + self.expand_wrapper, [x], out, x_init=x_arr, place=place + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -250,7 +226,6 @@ class TestExpandV2DoubleGradCheck(unittest.TestCase): class TestSqueezeDoubleGradCheck(unittest.TestCase): - def squeeze_warpper(self, x): axes = [0, 2] return paddle.squeeze(x[0], axes) @@ -267,16 +242,12 @@ class TestSqueezeDoubleGradCheck(unittest.TestCase): out = paddle.squeeze(x, axes) x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) - gradient_checker.double_grad_check([x], - out, - x_init=x_arr, - place=place, - eps=eps) - gradient_checker.double_grad_check_for_dygraph(self.squeeze_warpper, - [x], - out, - x_init=x_arr, - place=place) + gradient_checker.double_grad_check( + [x], out, x_init=x_arr, place=place, eps=eps + ) + gradient_checker.double_grad_check_for_dygraph( + self.squeeze_warpper, [x], out, x_init=x_arr, place=place + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -287,7 +258,6 @@ class TestSqueezeDoubleGradCheck(unittest.TestCase): class TestUnsqueezeDoubleGradCheck(unittest.TestCase): - def unsqueeze_wrapper(self, x): axes = [1, 2] return paddle.unsqueeze(x[0], axes) @@ -304,16 +274,12 @@ class TestUnsqueezeDoubleGradCheck(unittest.TestCase): out = paddle.unsqueeze(x, axes) x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) - gradient_checker.double_grad_check([x], - out, - x_init=x_arr, - place=place, - eps=eps) - gradient_checker.double_grad_check_for_dygraph(self.unsqueeze_wrapper, - [x], - out, - x_init=x_arr, - place=place) + gradient_checker.double_grad_check( + [x], out, x_init=x_arr, place=place, eps=eps + ) + gradient_checker.double_grad_check_for_dygraph( + self.unsqueeze_wrapper, [x], out, x_init=x_arr, place=place + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -324,9 +290,8 @@ class TestUnsqueezeDoubleGradCheck(unittest.TestCase): class TestClipDoubleGradCheck(unittest.TestCase): - def clip_wrapper(self, x): - return paddle.clip(x[0], min=-1., max=1.) + return paddle.clip(x[0], min=-1.0, max=1.0) @prog_scope() def func(self, place): @@ -335,14 +300,13 @@ class TestClipDoubleGradCheck(unittest.TestCase): x = layers.data('x', x_shape, False, dtype) x.persistable = True - out = paddle.clip(x, min=-1., max=1.) - x_arr = np.random.uniform(-5., 5., x_shape).astype(dtype) + out = paddle.clip(x, min=-1.0, max=1.0) + x_arr = np.random.uniform(-5.0, 5.0, x_shape).astype(dtype) gradient_checker.double_grad_check([x], out, x_init=x_arr, place=place) - gradient_checker.double_grad_check_for_dygraph(self.clip_wrapper, [x], - out, - x_init=x_arr, - place=place) + gradient_checker.double_grad_check_for_dygraph( + self.clip_wrapper, [x], out, x_init=x_arr, place=place + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -353,7 +317,6 @@ class TestClipDoubleGradCheck(unittest.TestCase): class TestTransposeDoubleGradCheck(unittest.TestCase): - @prog_scope() def func(self, place): x_shape = [3, 40] @@ -376,7 +339,6 @@ class TestTransposeDoubleGradCheck(unittest.TestCase): class TestTransposeDoubleGradCheckCase1(unittest.TestCase): - @prog_scope() def func(self, place): x_shape = [2, 3, 4, 5] @@ -399,7 +361,6 @@ class TestTransposeDoubleGradCheckCase1(unittest.TestCase): class TestConstantPadDoubleGradCheck(unittest.TestCase): - def pad_wrapper(self, x): pad = [1, 1, 1, 1] return paddle.nn.functional.pad(x[0], pad) @@ -416,15 +377,12 @@ class TestConstantPadDoubleGradCheck(unittest.TestCase): out = paddle.nn.functional.pad(x, pad) x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) - gradient_checker.double_grad_check([x], - out, - x_init=x_arr, - place=place, - eps=eps) - gradient_checker.double_grad_check_for_dygraph(self.pad_wrapper, [x], - out, - x_init=x_arr, - place=place) + gradient_checker.double_grad_check( + [x], out, x_init=x_arr, place=place, eps=eps + ) + gradient_checker.double_grad_check_for_dygraph( + self.pad_wrapper, [x], out, x_init=x_arr, place=place + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -435,7 +393,6 @@ class TestConstantPadDoubleGradCheck(unittest.TestCase): class TestConstantPadDoubleGradCheckCase1(TestConstantPadDoubleGradCheck): - @prog_scope() def func(self, place): x_shape = [2, 3, 4, 5] @@ -451,7 +408,6 @@ class TestConstantPadDoubleGradCheckCase1(TestConstantPadDoubleGradCheck): class TestConcatDoubleGradCheck(unittest.TestCase): - def concat_wrapper(self, x): return paddle.concat(x, axis=0) @@ -469,15 +425,16 @@ class TestConcatDoubleGradCheck(unittest.TestCase): x2_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) x1_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) - gradient_checker.double_grad_check([x1, x2], - out, - x_init=[x1_arr, x2_arr], - place=place) - gradient_checker.double_grad_check_for_dygraph(self.concat_wrapper, - [x1, x2], - out, - x_init=[x1_arr, x2_arr], - place=place) + gradient_checker.double_grad_check( + [x1, x2], out, x_init=[x1_arr, x2_arr], place=place + ) + gradient_checker.double_grad_check_for_dygraph( + self.concat_wrapper, + [x1, x2], + out, + x_init=[x1_arr, x2_arr], + place=place, + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -488,23 +445,22 @@ class TestConcatDoubleGradCheck(unittest.TestCase): class TestAvgPool2DDoubleGradCheckCase1(unittest.TestCase): - @prog_scope() def func(self, place): - input_NCHW = fluid.layers.data(name="input_NCHW", - shape=[2, 3, 5, 5], - append_batch_size=False, - dtype="float32") + input_NCHW = fluid.layers.data( + name="input_NCHW", + shape=[2, 3, 5, 5], + append_batch_size=False, + dtype="float32", + ) input_NCHW.persistable = True y = layers.pool2d(input_NCHW, pool_size=2, pool_type="avg") x_arr = np.random.uniform(-1, 1, [2, 3, 5, 5]).astype(np.float32) - gradient_checker.double_grad_check([input_NCHW], - y, - x_init=x_arr, - place=place, - eps=0.05) + gradient_checker.double_grad_check( + [input_NCHW], y, x_init=x_arr, place=place, eps=0.05 + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -515,36 +471,33 @@ class TestAvgPool2DDoubleGradCheckCase1(unittest.TestCase): class TestAvgPool2DDoubleGradCheckCase2(unittest.TestCase): - def pool2d_wrapper(self, x): - return paddle.nn.functional.avg_pool2d(x[0], - kernel_size=2, - data_format="NHWC") + return paddle.nn.functional.avg_pool2d( + x[0], kernel_size=2, data_format="NHWC" + ) @prog_scope() def func(self, place): - input_NHWC = fluid.layers.data(name="input_NHWC", - shape=[2, 5, 5, 3], - append_batch_size=False, - dtype="float32") + input_NHWC = fluid.layers.data( + name="input_NHWC", + shape=[2, 5, 5, 3], + append_batch_size=False, + dtype="float32", + ) input_NHWC.persistable = True - y = paddle.nn.functional.avg_pool2d(input_NHWC, - kernel_size=2, - data_format="NHWC") + y = paddle.nn.functional.avg_pool2d( + input_NHWC, kernel_size=2, data_format="NHWC" + ) x_arr = np.random.uniform(-1, 1, [2, 5, 5, 3]).astype(np.float32) - gradient_checker.double_grad_check([input_NHWC], - y, - x_init=x_arr, - place=place, - eps=0.05) + gradient_checker.double_grad_check( + [input_NHWC], y, x_init=x_arr, place=place, eps=0.05 + ) - gradient_checker.double_grad_check_for_dygraph(self.pool2d_wrapper, - [input_NHWC], - y, - x_init=x_arr, - place=place) + gradient_checker.double_grad_check_for_dygraph( + self.pool2d_wrapper, [input_NHWC], y, x_init=x_arr, place=place + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -555,35 +508,32 @@ class TestAvgPool2DDoubleGradCheckCase2(unittest.TestCase): class TestAvgPool2DDoubleGradCheckCase3(unittest.TestCase): - def pool2d_wrapper(self, x): - return paddle.nn.functional.avg_pool2d(x[0], - kernel_size=2, - padding=[1, 1]) + return paddle.nn.functional.avg_pool2d( + x[0], kernel_size=2, padding=[1, 1] + ) @prog_scope() def func(self, place): - input_NCHW = fluid.layers.data(name="input_NCHW", - shape=[2, 3, 5, 5], - append_batch_size=False, - dtype="float32") + input_NCHW = fluid.layers.data( + name="input_NCHW", + shape=[2, 3, 5, 5], + append_batch_size=False, + dtype="float32", + ) input_NCHW.persistable = True - y = paddle.nn.functional.avg_pool2d(input_NCHW, - kernel_size=2, - padding=[1, 1]) + y = paddle.nn.functional.avg_pool2d( + input_NCHW, kernel_size=2, padding=[1, 1] + ) x_arr = np.random.uniform(-1, 1, [2, 3, 5, 5]).astype(np.float32) - gradient_checker.double_grad_check([input_NCHW], - y, - x_init=x_arr, - place=place, - eps=0.05) - gradient_checker.double_grad_check_for_dygraph(self.pool2d_wrapper, - [input_NCHW], - y, - x_init=x_arr, - place=place) + gradient_checker.double_grad_check( + [input_NCHW], y, x_init=x_arr, place=place, eps=0.05 + ) + gradient_checker.double_grad_check_for_dygraph( + self.pool2d_wrapper, [input_NCHW], y, x_init=x_arr, place=place + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -594,32 +544,29 @@ class TestAvgPool2DDoubleGradCheckCase3(unittest.TestCase): class TestAvgPool2DDoubleGradCheckCase4(unittest.TestCase): - def pool2d_wrapper(self, x): return paddle.nn.functional.avg_pool2d(x[0], kernel_size=[4, 4]) @prog_scope() def func(self, place): - input_NCHW = fluid.layers.data(name="input_NCHW", - shape=[2, 3, 5, 5], - append_batch_size=False, - dtype="float32") + input_NCHW = fluid.layers.data( + name="input_NCHW", + shape=[2, 3, 5, 5], + append_batch_size=False, + dtype="float32", + ) input_NCHW.persistable = True y = layers.pool2d(input_NCHW, pool_size=[4, 4], pool_type="avg") y = paddle.nn.functional.avg_pool2d(input_NCHW, kernel_size=[4, 4]) x_arr = np.random.uniform(-1, 1, [2, 3, 5, 5]).astype(np.float32) - gradient_checker.double_grad_check([input_NCHW], - y, - x_init=x_arr, - place=place, - eps=0.05) - gradient_checker.double_grad_check_for_dygraph(self.pool2d_wrapper, - [input_NCHW], - y, - x_init=x_arr, - place=place) + gradient_checker.double_grad_check( + [input_NCHW], y, x_init=x_arr, place=place, eps=0.05 + ) + gradient_checker.double_grad_check_for_dygraph( + self.pool2d_wrapper, [input_NCHW], y, x_init=x_arr, place=place + ) def test_grad(self): places = [fluid.CPUPlace()] diff --git a/python/paddle/fluid/tests/unittests/test_nn_margin_rank_loss.py b/python/paddle/fluid/tests/unittests/test_nn_margin_rank_loss.py index 2f407edc722bce5560a1458d738bcfc42765988a..2ddfd653a40f34a3299dafeac3da2ff2ffb623fb 100644 --- a/python/paddle/fluid/tests/unittests/test_nn_margin_rank_loss.py +++ b/python/paddle/fluid/tests/unittests/test_nn_margin_rank_loss.py @@ -32,14 +32,13 @@ def calc_margin_rank_loss(x, y, label, margin=0.0, reduction='none'): def create_test_case(margin, reduction): - class MarginRankingLossCls(unittest.TestCase): - def setUp(self): self.x_data = np.random.rand(10, 10).astype("float64") self.y_data = np.random.rand(10, 10).astype("float64") - self.label_data = np.random.choice([-1, 1], - size=[10, 10]).astype("float64") + self.label_data = np.random.choice([-1, 1], size=[10, 10]).astype( + "float64" + ) self.places = [] self.places.append(fluid.CPUPlace()) if core.is_compiled_with_cuda(): @@ -47,59 +46,69 @@ def create_test_case(margin, reduction): def run_static_functional_api(self, place): paddle.enable_static() - expected = calc_margin_rank_loss(self.x_data, - self.y_data, - self.label_data, - margin=margin, - reduction=reduction) + expected = calc_margin_rank_loss( + self.x_data, + self.y_data, + self.label_data, + margin=margin, + reduction=reduction, + ) with program_guard(Program(), Program()): - x = paddle.static.data(name="x", - shape=[10, 10], - dtype="float64") - y = paddle.static.data(name="y", - shape=[10, 10], - dtype="float64") - label = paddle.static.data(name="label", - shape=[10, 10], - dtype="float64") + x = paddle.static.data( + name="x", shape=[10, 10], dtype="float64" + ) + y = paddle.static.data( + name="y", shape=[10, 10], dtype="float64" + ) + label = paddle.static.data( + name="label", shape=[10, 10], dtype="float64" + ) result = paddle.nn.functional.margin_ranking_loss( - x, y, label, margin, reduction) + x, y, label, margin, reduction + ) exe = paddle.static.Executor(place) - result_numpy, = exe.run(feed={ - "x": self.x_data, - "y": self.y_data, - "label": self.label_data - }, - fetch_list=[result]) + (result_numpy,) = exe.run( + feed={ + "x": self.x_data, + "y": self.y_data, + "label": self.label_data, + }, + fetch_list=[result], + ) np.testing.assert_allclose(result_numpy, expected, rtol=1e-05) def run_static_api(self, place): paddle.enable_static() - expected = calc_margin_rank_loss(self.x_data, - self.y_data, - self.label_data, - margin=margin, - reduction=reduction) + expected = calc_margin_rank_loss( + self.x_data, + self.y_data, + self.label_data, + margin=margin, + reduction=reduction, + ) with program_guard(Program(), Program()): - x = paddle.static.data(name="x", - shape=[10, 10], - dtype="float64") - y = paddle.static.data(name="y", - shape=[10, 10], - dtype="float64") - label = paddle.static.data(name="label", - shape=[10, 10], - dtype="float64") + x = paddle.static.data( + name="x", shape=[10, 10], dtype="float64" + ) + y = paddle.static.data( + name="y", shape=[10, 10], dtype="float64" + ) + label = paddle.static.data( + name="label", shape=[10, 10], dtype="float64" + ) margin_rank_loss = paddle.nn.loss.MarginRankingLoss( - margin=margin, reduction=reduction) + margin=margin, reduction=reduction + ) result = margin_rank_loss(x, y, label) exe = paddle.static.Executor(place) - result_numpy, = exe.run(feed={ - "x": self.x_data, - "y": self.y_data, - "label": self.label_data - }, - fetch_list=[result]) + (result_numpy,) = exe.run( + feed={ + "x": self.x_data, + "y": self.y_data, + "label": self.label_data, + }, + fetch_list=[result], + ) np.testing.assert_allclose(result_numpy, expected, rtol=1e-05) self.assertTrue('loss' in result.name) @@ -110,12 +119,15 @@ def create_test_case(margin, reduction): label = paddle.to_tensor(self.label_data) result = paddle.nn.functional.margin_ranking_loss( - x, y, label, margin, reduction) - expected = calc_margin_rank_loss(self.x_data, - self.y_data, - self.label_data, - margin=margin, - reduction=reduction) + x, y, label, margin, reduction + ) + expected = calc_margin_rank_loss( + self.x_data, + self.y_data, + self.label_data, + margin=margin, + reduction=reduction, + ) np.testing.assert_allclose(result.numpy(), expected, rtol=1e-05) def run_dynamic_api(self, place): @@ -124,13 +136,16 @@ def create_test_case(margin, reduction): y = paddle.to_tensor(self.y_data) label = paddle.to_tensor(self.label_data) margin_rank_loss = paddle.nn.loss.MarginRankingLoss( - margin=margin, reduction=reduction) + margin=margin, reduction=reduction + ) result = margin_rank_loss(x, y, label) - expected = calc_margin_rank_loss(self.x_data, - self.y_data, - self.label_data, - margin=margin, - reduction=reduction) + expected = calc_margin_rank_loss( + self.x_data, + self.y_data, + self.label_data, + margin=margin, + reduction=reduction, + ) np.testing.assert_allclose(result.numpy(), expected, rtol=1e-05) def run_dynamic_broadcast_api(self, place): @@ -140,13 +155,16 @@ def create_test_case(margin, reduction): y = paddle.to_tensor(self.y_data) label = paddle.to_tensor(label_data) margin_rank_loss = paddle.nn.loss.MarginRankingLoss( - margin=margin, reduction=reduction) + margin=margin, reduction=reduction + ) result = margin_rank_loss(x, y, label) - expected = calc_margin_rank_loss(self.x_data, - self.y_data, - label_data, - margin=margin, - reduction=reduction) + expected = calc_margin_rank_loss( + self.x_data, + self.y_data, + label_data, + margin=margin, + reduction=reduction, + ) np.testing.assert_allclose(result.numpy(), expected, rtol=1e-05) def test_case(self): @@ -172,21 +190,22 @@ class MarginRakingLossError(unittest.TestCase): paddle.enable_static() def test_errors(self): - def test_margin_value_error(): margin_rank_loss = paddle.nn.loss.MarginRankingLoss( - margin=0.1, reduction="reduce_mean") + margin=0.1, reduction="reduce_mean" + ) self.assertRaises(ValueError, test_margin_value_error) def test_functional_margin_value_error(): x = paddle.static.data(name="x", shape=[10, 10], dtype="float64") y = paddle.static.data(name="y", shape=[10, 10], dtype="float64") - label = paddle.static.data(name="label", - shape=[10, 10], - dtype="float64") + label = paddle.static.data( + name="label", shape=[10, 10], dtype="float64" + ) result = paddle.nn.functional.margin_ranking_loss( - x, y, label, margin=0.1, reduction="reduction_mean") + x, y, label, margin=0.1, reduction="reduction_mean" + ) self.assertRaises(ValueError, test_functional_margin_value_error) diff --git a/python/paddle/fluid/tests/unittests/test_nn_matmul_v2_grad.py b/python/paddle/fluid/tests/unittests/test_nn_matmul_v2_grad.py index 567ac1404c66954b913ee38b56fe0a7024c0efa2..0c3bca6ba0a8863925b16cef08357fdd1fffdfc5 100644 --- a/python/paddle/fluid/tests/unittests/test_nn_matmul_v2_grad.py +++ b/python/paddle/fluid/tests/unittests/test_nn_matmul_v2_grad.py @@ -25,7 +25,6 @@ paddle.enable_static() class TestMatmulDoubleGradCheck(unittest.TestCase): - def setUp(self): self.init_test() @@ -40,25 +39,21 @@ class TestMatmulDoubleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float64 typename = "float64" - x = paddle.static.create_parameter(dtype=typename, - shape=self.x_shape, - name='x') - y = paddle.static.create_parameter(dtype=typename, - shape=self.y_shape, - name='y') - out = paddle.matmul(x, - y, - self.transpose_x, - self.transpose_y, - name='out') + x = paddle.static.create_parameter( + dtype=typename, shape=self.x_shape, name='x' + ) + y = paddle.static.create_parameter( + dtype=typename, shape=self.y_shape, name='y' + ) + out = paddle.matmul( + x, y, self.transpose_x, self.transpose_y, name='out' + ) x_arr = np.random.uniform(-1, 1, self.x_shape).astype(dtype) y_arr = np.random.uniform(-1, 1, self.y_shape).astype(dtype) - gradient_checker.double_grad_check([x, y], - out, - x_init=[x_arr, y_arr], - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -69,7 +64,6 @@ class TestMatmulDoubleGradCheck(unittest.TestCase): class TestMatmulDoubleGradCheckCase1(TestMatmulDoubleGradCheck): - def init_test(self): self.x_shape = [2, 3] self.y_shape = [3, 2] @@ -85,7 +79,6 @@ class TestMatmulDoubleGradCheckCase1(TestMatmulDoubleGradCheck): class TestMatmulDoubleGradCheck2(unittest.TestCase): - def setUp(self): self.init_test() @@ -100,25 +93,21 @@ class TestMatmulDoubleGradCheck2(unittest.TestCase): eps = 0.005 dtype = np.float64 typename = "float64" - x = paddle.static.create_parameter(dtype=typename, - shape=self.x_shape, - name='x') - y = paddle.static.create_parameter(dtype=typename, - shape=self.y_shape, - name='y') - out = paddle.matmul(x, - y, - self.transpose_x, - self.transpose_y, - name='out') + x = paddle.static.create_parameter( + dtype=typename, shape=self.x_shape, name='x' + ) + y = paddle.static.create_parameter( + dtype=typename, shape=self.y_shape, name='y' + ) + out = paddle.matmul( + x, y, self.transpose_x, self.transpose_y, name='out' + ) x_arr = np.random.uniform(-1, 1, self.x_shape).astype(dtype) y_arr = np.random.uniform(-1, 1, self.y_shape).astype(dtype) - gradient_checker.double_grad_check([x, y], - out, - x_init=[x_arr, y_arr], - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -129,7 +118,6 @@ class TestMatmulDoubleGradCheck2(unittest.TestCase): class TestMatmulDoubleGradCheckCase3(unittest.TestCase): - def setUp(self): self.init_test() @@ -144,25 +132,21 @@ class TestMatmulDoubleGradCheckCase3(unittest.TestCase): eps = 0.005 dtype = np.float64 typename = "float64" - x = paddle.static.create_parameter(dtype=typename, - shape=self.x_shape, - name='x') - y = paddle.static.create_parameter(dtype=typename, - shape=self.y_shape, - name='y') - out = paddle.matmul(x, - y, - self.transpose_x, - self.transpose_y, - name='out') + x = paddle.static.create_parameter( + dtype=typename, shape=self.x_shape, name='x' + ) + y = paddle.static.create_parameter( + dtype=typename, shape=self.y_shape, name='y' + ) + out = paddle.matmul( + x, y, self.transpose_x, self.transpose_y, name='out' + ) x_arr = np.random.uniform(-1, 1, self.x_shape).astype(dtype) y_arr = np.random.uniform(-1, 1, self.y_shape).astype(dtype) - gradient_checker.double_grad_check([x, y], - out, - x_init=[x_arr, y_arr], - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -173,7 +157,6 @@ class TestMatmulDoubleGradCheckCase3(unittest.TestCase): class TestMatmulTripleGradCheckDotCase(unittest.TestCase): - def setUp(self): self.init_test() @@ -190,21 +173,19 @@ def func(self, place): eps = 0.005 dtype = np.float64 typename = "float64" - x = paddle.static.create_parameter(dtype=typename, - shape=self.x_shape, - name='x') - y = paddle.static.create_parameter(dtype=typename, - shape=self.y_shape, - name='y') + x = paddle.static.create_parameter( + dtype=typename, shape=self.x_shape, name='x' + ) + y = paddle.static.create_parameter( + dtype=typename, shape=self.y_shape, name='y' + ) out = paddle.matmul(x, y, self.transpose_x, self.transpose_y, name='out') np.random.seed(2021) x_arr = np.random.uniform(-1, 1, self.x_shape).astype(dtype) y_arr = np.random.uniform(-1, 1, self.y_shape).astype(dtype) - gradient_checker.triple_grad_check([x, y], - out, - x_init=[x_arr, y_arr], - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps + ) def test_grad(self): @@ -216,7 +197,6 @@ def test_grad(self): class TestMatmulTripleGradCheckNormalCase1(unittest.TestCase): - def setUp(self): self.init_test() @@ -231,25 +211,21 @@ class TestMatmulTripleGradCheckNormalCase1(unittest.TestCase): eps = 0.005 dtype = np.float64 typename = "float64" - x = paddle.static.create_parameter(dtype=typename, - shape=self.x_shape, - name='x') - y = paddle.static.create_parameter(dtype=typename, - shape=self.y_shape, - name='y') - out = paddle.matmul(x, - y, - self.transpose_x, - self.transpose_y, - name='out') + x = paddle.static.create_parameter( + dtype=typename, shape=self.x_shape, name='x' + ) + y = paddle.static.create_parameter( + dtype=typename, shape=self.y_shape, name='y' + ) + out = paddle.matmul( + x, y, self.transpose_x, self.transpose_y, name='out' + ) np.random.seed(2021) x_arr = np.random.uniform(-1, 1, self.x_shape).astype(dtype) y_arr = np.random.uniform(-1, 1, self.y_shape).astype(dtype) - gradient_checker.triple_grad_check([x, y], - out, - x_init=[x_arr, y_arr], - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -260,7 +236,6 @@ class TestMatmulTripleGradCheckNormalCase1(unittest.TestCase): class TestMatmulTripleGradCheckNormalCase2(unittest.TestCase): - def setUp(self): self.init_test() @@ -275,25 +250,21 @@ class TestMatmulTripleGradCheckNormalCase2(unittest.TestCase): eps = 0.005 dtype = np.float64 typename = "float64" - x = paddle.static.create_parameter(dtype=typename, - shape=self.x_shape, - name='x') - y = paddle.static.create_parameter(dtype=typename, - shape=self.y_shape, - name='y') - out = paddle.matmul(x, - y, - self.transpose_x, - self.transpose_y, - name='out') + x = paddle.static.create_parameter( + dtype=typename, shape=self.x_shape, name='x' + ) + y = paddle.static.create_parameter( + dtype=typename, shape=self.y_shape, name='y' + ) + out = paddle.matmul( + x, y, self.transpose_x, self.transpose_y, name='out' + ) np.random.seed(2021) x_arr = np.random.uniform(-1, 1, self.x_shape).astype(dtype) y_arr = np.random.uniform(-1, 1, self.y_shape).astype(dtype) - gradient_checker.triple_grad_check([x, y], - out, - x_init=[x_arr, y_arr], - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -304,7 +275,6 @@ class TestMatmulTripleGradCheckNormalCase2(unittest.TestCase): class TestMatmulTripleGradCheckNormalCase3(unittest.TestCase): - def setUp(self): self.init_test() @@ -319,25 +289,21 @@ class TestMatmulTripleGradCheckNormalCase3(unittest.TestCase): eps = 0.005 dtype = np.float64 typename = "float64" - x = paddle.static.create_parameter(dtype=typename, - shape=self.x_shape, - name='x') - y = paddle.static.create_parameter(dtype=typename, - shape=self.y_shape, - name='y') - out = paddle.matmul(x, - y, - self.transpose_x, - self.transpose_y, - name='out') + x = paddle.static.create_parameter( + dtype=typename, shape=self.x_shape, name='x' + ) + y = paddle.static.create_parameter( + dtype=typename, shape=self.y_shape, name='y' + ) + out = paddle.matmul( + x, y, self.transpose_x, self.transpose_y, name='out' + ) np.random.seed(2021) x_arr = np.random.uniform(-1, 1, self.x_shape).astype(dtype) y_arr = np.random.uniform(-1, 1, self.y_shape).astype(dtype) - gradient_checker.triple_grad_check([x, y], - out, - x_init=[x_arr, y_arr], - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -348,7 +314,6 @@ class TestMatmulTripleGradCheckNormalCase3(unittest.TestCase): class TestMatmulTripleGradCheckNormalCase4(unittest.TestCase): - def setUp(self): self.init_test() @@ -363,25 +328,21 @@ class TestMatmulTripleGradCheckNormalCase4(unittest.TestCase): eps = 0.005 dtype = np.float64 typename = "float64" - x = paddle.static.create_parameter(dtype=typename, - shape=self.x_shape, - name='x') - y = paddle.static.create_parameter(dtype=typename, - shape=self.y_shape, - name='y') - out = paddle.matmul(x, - y, - self.transpose_x, - self.transpose_y, - name='out') + x = paddle.static.create_parameter( + dtype=typename, shape=self.x_shape, name='x' + ) + y = paddle.static.create_parameter( + dtype=typename, shape=self.y_shape, name='y' + ) + out = paddle.matmul( + x, y, self.transpose_x, self.transpose_y, name='out' + ) np.random.seed(2021) x_arr = np.random.uniform(-1, 1, self.x_shape).astype(dtype) y_arr = np.random.uniform(-1, 1, self.y_shape).astype(dtype) - gradient_checker.triple_grad_check([x, y], - out, - x_init=[x_arr, y_arr], - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -392,7 +353,6 @@ class TestMatmulTripleGradCheckNormalCase4(unittest.TestCase): class TestMatmulTripleGradCheckBroadcastCase1(unittest.TestCase): - def setUp(self): self.init_test() @@ -407,25 +367,21 @@ class TestMatmulTripleGradCheckBroadcastCase1(unittest.TestCase): eps = 0.005 dtype = np.float64 typename = "float64" - x = paddle.static.create_parameter(dtype=typename, - shape=self.x_shape, - name='x') - y = paddle.static.create_parameter(dtype=typename, - shape=self.y_shape, - name='y') - out = paddle.matmul(x, - y, - self.transpose_x, - self.transpose_y, - name='out') + x = paddle.static.create_parameter( + dtype=typename, shape=self.x_shape, name='x' + ) + y = paddle.static.create_parameter( + dtype=typename, shape=self.y_shape, name='y' + ) + out = paddle.matmul( + x, y, self.transpose_x, self.transpose_y, name='out' + ) np.random.seed(2021) x_arr = np.random.uniform(-1, 1, self.x_shape).astype(dtype) y_arr = np.random.uniform(-1, 1, self.y_shape).astype(dtype) - gradient_checker.triple_grad_check([x, y], - out, - x_init=[x_arr, y_arr], - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -436,7 +392,6 @@ class TestMatmulTripleGradCheckBroadcastCase1(unittest.TestCase): class TestMatmulTripleGradCheckBroadcastCase2(unittest.TestCase): - def setUp(self): self.init_test() @@ -451,25 +406,21 @@ class TestMatmulTripleGradCheckBroadcastCase2(unittest.TestCase): eps = 0.005 dtype = np.float64 typename = "float64" - x = paddle.static.create_parameter(dtype=typename, - shape=self.x_shape, - name='x') - y = paddle.static.create_parameter(dtype=typename, - shape=self.y_shape, - name='y') - out = paddle.matmul(x, - y, - self.transpose_x, - self.transpose_y, - name='out') + x = paddle.static.create_parameter( + dtype=typename, shape=self.x_shape, name='x' + ) + y = paddle.static.create_parameter( + dtype=typename, shape=self.y_shape, name='y' + ) + out = paddle.matmul( + x, y, self.transpose_x, self.transpose_y, name='out' + ) np.random.seed(2021) x_arr = np.random.uniform(-1, 1, self.x_shape).astype(dtype) y_arr = np.random.uniform(-1, 1, self.y_shape).astype(dtype) - gradient_checker.triple_grad_check([x, y], - out, - x_init=[x_arr, y_arr], - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -480,7 +431,6 @@ class TestMatmulTripleGradCheckBroadcastCase2(unittest.TestCase): class TestMatmulTripleGradCheckBroadcastCase3(unittest.TestCase): - def setUp(self): self.init_test() @@ -495,25 +445,21 @@ class TestMatmulTripleGradCheckBroadcastCase3(unittest.TestCase): eps = 0.005 dtype = np.float64 typename = "float64" - x = paddle.static.create_parameter(dtype=typename, - shape=self.x_shape, - name='x') - y = paddle.static.create_parameter(dtype=typename, - shape=self.y_shape, - name='y') - out = paddle.matmul(x, - y, - self.transpose_x, - self.transpose_y, - name='out') + x = paddle.static.create_parameter( + dtype=typename, shape=self.x_shape, name='x' + ) + y = paddle.static.create_parameter( + dtype=typename, shape=self.y_shape, name='y' + ) + out = paddle.matmul( + x, y, self.transpose_x, self.transpose_y, name='out' + ) np.random.seed(2021) x_arr = np.random.uniform(-1, 1, self.x_shape).astype(dtype) y_arr = np.random.uniform(-1, 1, self.y_shape).astype(dtype) - gradient_checker.triple_grad_check([x, y], - out, - x_init=[x_arr, y_arr], - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -524,7 +470,6 @@ class TestMatmulTripleGradCheckBroadcastCase3(unittest.TestCase): class TestMatmulTripleGradCheckBroadcastCase4(unittest.TestCase): - def setUp(self): self.init_test() @@ -539,25 +484,21 @@ class TestMatmulTripleGradCheckBroadcastCase4(unittest.TestCase): eps = 0.005 dtype = np.float64 typename = "float64" - x = paddle.static.create_parameter(dtype=typename, - shape=self.x_shape, - name='x') - y = paddle.static.create_parameter(dtype=typename, - shape=self.y_shape, - name='y') - out = paddle.matmul(x, - y, - self.transpose_x, - self.transpose_y, - name='out') + x = paddle.static.create_parameter( + dtype=typename, shape=self.x_shape, name='x' + ) + y = paddle.static.create_parameter( + dtype=typename, shape=self.y_shape, name='y' + ) + out = paddle.matmul( + x, y, self.transpose_x, self.transpose_y, name='out' + ) np.random.seed(2021) x_arr = np.random.uniform(-1, 1, self.x_shape).astype(dtype) y_arr = np.random.uniform(-1, 1, self.y_shape).astype(dtype) - gradient_checker.triple_grad_check([x, y], - out, - x_init=[x_arr, y_arr], - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -568,7 +509,6 @@ class TestMatmulTripleGradCheckBroadcastCase4(unittest.TestCase): class TestMatmulTripleGradCheckBroadcastCase5(unittest.TestCase): - def setUp(self): self.init_test() @@ -583,25 +523,21 @@ class TestMatmulTripleGradCheckBroadcastCase5(unittest.TestCase): eps = 0.005 dtype = np.float64 typename = "float64" - x = paddle.static.create_parameter(dtype=typename, - shape=self.x_shape, - name='x') - y = paddle.static.create_parameter(dtype=typename, - shape=self.y_shape, - name='y') - out = paddle.matmul(x, - y, - self.transpose_x, - self.transpose_y, - name='out') + x = paddle.static.create_parameter( + dtype=typename, shape=self.x_shape, name='x' + ) + y = paddle.static.create_parameter( + dtype=typename, shape=self.y_shape, name='y' + ) + out = paddle.matmul( + x, y, self.transpose_x, self.transpose_y, name='out' + ) np.random.seed(2021) x_arr = np.random.uniform(-1, 1, self.x_shape).astype(dtype) y_arr = np.random.uniform(-1, 1, self.y_shape).astype(dtype) - gradient_checker.triple_grad_check([x, y], - out, - x_init=[x_arr, y_arr], - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -612,7 +548,6 @@ class TestMatmulTripleGradCheckBroadcastCase5(unittest.TestCase): class TestMatmulTripleGradCheckSpecialCase1(unittest.TestCase): - def setUp(self): self.init_test() @@ -627,25 +562,21 @@ class TestMatmulTripleGradCheckSpecialCase1(unittest.TestCase): eps = 0.005 dtype = np.float64 typename = "float64" - x = paddle.static.create_parameter(dtype=typename, - shape=self.x_shape, - name='x') - y = paddle.static.create_parameter(dtype=typename, - shape=self.y_shape, - name='y') - out = paddle.matmul(x, - y, - self.transpose_x, - self.transpose_y, - name='out') + x = paddle.static.create_parameter( + dtype=typename, shape=self.x_shape, name='x' + ) + y = paddle.static.create_parameter( + dtype=typename, shape=self.y_shape, name='y' + ) + out = paddle.matmul( + x, y, self.transpose_x, self.transpose_y, name='out' + ) np.random.seed(2021) x_arr = np.random.uniform(-1, 1, self.x_shape).astype(dtype) y_arr = np.random.uniform(-1, 1, self.y_shape).astype(dtype) - gradient_checker.triple_grad_check([x, y], - out, - x_init=[x_arr, y_arr], - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] @@ -656,7 +587,6 @@ class TestMatmulTripleGradCheckSpecialCase1(unittest.TestCase): class TestMatmulTripleGradCheckSpecialCase2(unittest.TestCase): - def setUp(self): self.init_test() @@ -671,25 +601,21 @@ class TestMatmulTripleGradCheckSpecialCase2(unittest.TestCase): eps = 0.005 dtype = np.float64 typename = "float64" - x = paddle.static.create_parameter(dtype=typename, - shape=self.x_shape, - name='x') - y = paddle.static.create_parameter(dtype=typename, - shape=self.y_shape, - name='y') - out = paddle.matmul(x, - y, - self.transpose_x, - self.transpose_y, - name='out') + x = paddle.static.create_parameter( + dtype=typename, shape=self.x_shape, name='x' + ) + y = paddle.static.create_parameter( + dtype=typename, shape=self.y_shape, name='y' + ) + out = paddle.matmul( + x, y, self.transpose_x, self.transpose_y, name='out' + ) np.random.seed(2021) x_arr = np.random.uniform(-1, 1, self.x_shape).astype(dtype) y_arr = np.random.uniform(-1, 1, self.y_shape).astype(dtype) - gradient_checker.triple_grad_check([x, y], - out, - x_init=[x_arr, y_arr], - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps + ) def test_grad(self): places = [fluid.CPUPlace()] diff --git a/python/paddle/fluid/tests/unittests/test_nn_quant_functional_layers.py b/python/paddle/fluid/tests/unittests/test_nn_quant_functional_layers.py index 9ef7e19fbe0984f179e8bbbf63fd7ff484d2ad6b..6edc691f23a0861a1763dae643937b8178a102b0 100644 --- a/python/paddle/fluid/tests/unittests/test_nn_quant_functional_layers.py +++ b/python/paddle/fluid/tests/unittests/test_nn_quant_functional_layers.py @@ -18,8 +18,7 @@ import paddle class TestFunctionalLayers(unittest.TestCase): - """ - """ + """ """ def setUp(self): paddle.disable_static() diff --git a/python/paddle/fluid/tests/unittests/test_nn_sigmoid_op.py b/python/paddle/fluid/tests/unittests/test_nn_sigmoid_op.py index 7af7b96c999b6455153d6ca6765a325188ad9ac7..45840d203191551e28586b26c12b9b9abb1d5339 100644 --- a/python/paddle/fluid/tests/unittests/test_nn_sigmoid_op.py +++ b/python/paddle/fluid/tests/unittests/test_nn_sigmoid_op.py @@ -23,7 +23,6 @@ import paddle.nn.functional as functional class TestNNSigmoidAPI(unittest.TestCase): - def setUp(self): self.init_data() @@ -69,7 +68,6 @@ class TestNNSigmoidAPI(unittest.TestCase): class TestNNFunctionalSigmoidAPI(unittest.TestCase): - def setUp(self): self.init_data() diff --git a/python/paddle/fluid/tests/unittests/test_nonzero_api.py b/python/paddle/fluid/tests/unittests/test_nonzero_api.py index cd4d227e823b95a3e1d8e8056862efaf6735ef6f..77363d1d3e73b70abb9177110e3dbc79c0ce5c0d 100644 --- a/python/paddle/fluid/tests/unittests/test_nonzero_api.py +++ b/python/paddle/fluid/tests/unittests/test_nonzero_api.py @@ -20,7 +20,6 @@ from paddle.fluid import Program, program_guard class TestNonZeroAPI(unittest.TestCase): - def test_nonzero_api_as_tuple(self): data = np.array([[True, False], [False, True]]) with program_guard(Program(), Program()): @@ -31,9 +30,9 @@ class TestNonZeroAPI(unittest.TestCase): z = fluid.layers.concat(list(y), axis=1) exe = fluid.Executor(fluid.CPUPlace()) - res, = exe.run(feed={'x': data}, - fetch_list=[z.name], - return_numpy=False) + (res,) = exe.run( + feed={'x': data}, fetch_list=[z.name], return_numpy=False + ) expect_out = np.array([[0, 0], [1, 1]]) np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) @@ -45,9 +44,9 @@ class TestNonZeroAPI(unittest.TestCase): self.assertEqual(len(y), 1) z = fluid.layers.concat(list(y), axis=1) exe = fluid.Executor(fluid.CPUPlace()) - res, = exe.run(feed={'x': data}, - fetch_list=[z.name], - return_numpy=False) + (res,) = exe.run( + feed={'x': data}, fetch_list=[z.name], return_numpy=False + ) expect_out = np.array([[0], [1]]) np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) @@ -57,9 +56,9 @@ class TestNonZeroAPI(unittest.TestCase): x = fluid.layers.data(name='x', shape=[-1, 2]) y = paddle.nonzero(x) exe = fluid.Executor(fluid.CPUPlace()) - res, = exe.run(feed={'x': data}, - fetch_list=[y.name], - return_numpy=False) + (res,) = exe.run( + feed={'x': data}, fetch_list=[y.name], return_numpy=False + ) expect_out = np.array([[0, 0], [1, 1]]) np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) @@ -68,9 +67,9 @@ class TestNonZeroAPI(unittest.TestCase): x = fluid.layers.data(name='x', shape=[-1]) y = paddle.nonzero(x) exe = fluid.Executor(fluid.CPUPlace()) - res, = exe.run(feed={'x': data}, - fetch_list=[y.name], - return_numpy=False) + (res,) = exe.run( + feed={'x': data}, fetch_list=[y.name], return_numpy=False + ) expect_out = np.array([[0], [1]]) np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_norm_all.py b/python/paddle/fluid/tests/unittests/test_norm_all.py index f9a62d576c844ed7fb829fc14809310cc1396125..17047d349cd162283f5dfb34744701877d6cc498 100644 --- a/python/paddle/fluid/tests/unittests/test_norm_all.py +++ b/python/paddle/fluid/tests/unittests/test_norm_all.py @@ -23,18 +23,25 @@ from paddle.fluid.framework import in_dygraph_mode, _in_legacy_dygraph # hack method for test p_norm final state -def p_norm_python_api(x, - p=2.0, - axis=-1, - epsilon=1e-12, - keepdim=False, - as_vector=False): +def p_norm_python_api( + x, p=2.0, axis=-1, epsilon=1e-12, keepdim=False, as_vector=False +): if in_dygraph_mode(): return _C_ops.p_norm(x, p, axis, epsilon, keepdim, as_vector) if _in_legacy_dygraph(): - return _legacy_C_ops.p_norm(x, 'axis', axis, 'porder', float(p), - 'keepdim', keepdim, 'epsilon', epsilon, - 'as_vector', as_vector) + return _legacy_C_ops.p_norm( + x, + 'axis', + axis, + 'porder', + float(p), + 'keepdim', + keepdim, + 'epsilon', + epsilon, + 'as_vector', + as_vector, + ) def p_norm(x, axis, porder, keepdims=False, reduce_all=False): @@ -76,10 +83,13 @@ def p_norm(x, axis, porder, keepdims=False, reduce_all=False): def numpy_frobenius_norm(x, axis=None, keepdims=False): - if isinstance(axis, list): axis = tuple(axis) - if axis is None: x = x.reshape(1, x.size) - r = np.linalg.norm(x, ord='fro', axis=axis, - keepdims=keepdims).astype(x.dtype) + if isinstance(axis, list): + axis = tuple(axis) + if axis is None: + x = x.reshape(1, x.size) + r = np.linalg.norm(x, ord='fro', axis=axis, keepdims=keepdims).astype( + x.dtype + ) return r @@ -88,19 +98,18 @@ def frobenius_norm(x, dim, keep_dim, reduce_all): class TestFrobeniusNormOp(OpTest): - def setUp(self): self.python_api = frobenius_norm self.op_type = "frobenius_norm" self.init_test_case() x = (np.random.random(self.shape) + 1.0).astype(self.dtype) norm = numpy_frobenius_norm(x, self.axis, self.keepdim) - self.reduce_all = (len(self.axis) == len(self.shape)) + self.reduce_all = len(self.axis) == len(self.shape) self.inputs = {'X': x} self.attrs = { 'dim': list(self.axis), 'keep_dim': self.keepdim, - 'reduce_all': self.reduce_all + 'reduce_all': self.reduce_all, } self.outputs = {'Out': norm} @@ -118,7 +127,6 @@ class TestFrobeniusNormOp(OpTest): class TestFrobeniusNormOp2(TestFrobeniusNormOp): - def init_test_case(self): self.shape = [5, 5, 5] self.axis = (0, 1) @@ -130,7 +138,6 @@ class TestFrobeniusNormOp2(TestFrobeniusNormOp): class TestPnormOp(OpTest): - def setUp(self): self.op_type = "p_norm" self.python_api = p_norm_python_api @@ -143,7 +150,7 @@ class TestPnormOp(OpTest): 'axis': self.axis, 'keepdim': self.keepdim, 'porder': float(self.porder), - 'asvector': self.asvector + 'asvector': self.asvector, } self.outputs = {'Out': norm} self.gradient = self.calc_gradient() @@ -169,7 +176,7 @@ class TestPnormOp(OpTest): 'axis': self.axis, 'keepdim': self.keepdim, 'porder': float(self.porder), - 'asvector': self.asvector + 'asvector': self.asvector, } x = self.inputs["X"] porder = self.attrs["porder"] @@ -180,22 +187,21 @@ class TestPnormOp(OpTest): if porder == 0: grad = np.zeros(x.shape).astype(x.dtype) elif porder in [float("inf"), float("-inf")]: - norm = p_norm(x, - axis=axis, - porder=porder, - keepdims=True, - reduce_all=asvector) + norm = p_norm( + x, axis=axis, porder=porder, keepdims=True, reduce_all=asvector + ) x_abs = np.abs(x) grad = np.sign(x) grad[x_abs != norm] = 0.0 else: - norm = p_norm(x, - axis=axis, - porder=porder, - keepdims=True, - reduce_all=asvector) - grad = np.power(norm, 1 - porder) * np.power( - np.abs(x), porder - 1) * np.sign(x) + norm = p_norm( + x, axis=axis, porder=porder, keepdims=True, reduce_all=asvector + ) + grad = ( + np.power(norm, 1 - porder) + * np.power(np.abs(x), porder - 1) + * np.sign(x) + ) numel = 1 for s in x.shape: @@ -206,7 +212,6 @@ class TestPnormOp(OpTest): class TestPnormOp2(TestPnormOp): - def init_test_case(self): self.shape = [3, 20, 3] self.axis = 2 @@ -221,7 +226,6 @@ class TestPnormOp2(TestPnormOp): class TestPnormOp3(TestPnormOp): - def init_test_case(self): self.shape = [3, 20, 3] self.axis = 2 @@ -236,7 +240,6 @@ class TestPnormOp3(TestPnormOp): class TestPnormOp4(TestPnormOp): - def init_test_case(self): self.shape = [3, 20, 3] self.axis = 2 @@ -251,7 +254,6 @@ class TestPnormOp4(TestPnormOp): class TestPnormOp5(TestPnormOp): - def init_test_case(self): self.shape = [3, 20, 3] self.axis = 2 @@ -266,7 +268,6 @@ class TestPnormOp5(TestPnormOp): class TestPnormOp6(TestPnormOp): - def init_test_case(self): self.shape = [3, 20, 3] self.axis = -1 @@ -280,10 +281,10 @@ class TestPnormOp6(TestPnormOp): self.check_grad(['X'], 'Out', user_defined_grads=self.gradient) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestPnormOpFP16(TestPnormOp): - def init_test_case(self): self.shape = [2, 3, 4, 5] self.axis = 1 @@ -301,15 +302,15 @@ class TestPnormOpFP16(TestPnormOp): def test_check_grad(self): place = core.CUDAPlace(0) if core.is_float16_supported(place): - self.check_grad_with_place(place, ['X'], - 'Out', - user_defined_grads=self.gradient) + self.check_grad_with_place( + place, ['X'], 'Out', user_defined_grads=self.gradient + ) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestPnormOpFP161(TestPnormOpFP16): - def init_test_case(self): self.shape = [2, 3, 4, 5] self.axis = -1 @@ -320,17 +321,18 @@ class TestPnormOpFP161(TestPnormOpFP16): self.asvector = True -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestPnormBF16Op(OpTest): - def setUp(self): self.op_type = "p_norm" self.python_api = p_norm_python_api self.init_test_case() self.x = (np.random.random(self.shape) + 0.5).astype(np.float32) - self.norm = p_norm(self.x, self.axis, self.porder, self.keepdim, - self.asvector) + self.norm = p_norm( + self.x, self.axis, self.porder, self.keepdim, self.asvector + ) self.gradient = self.calc_gradient() self.inputs = {'X': convert_float_to_uint16(self.x)} self.attrs = { @@ -338,7 +340,7 @@ class TestPnormBF16Op(OpTest): 'axis': self.axis, 'keepdim': self.keepdim, 'porder': float(self.porder), - 'asvector': self.asvector + 'asvector': self.asvector, } self.outputs = {'Out': convert_float_to_uint16(self.norm)} @@ -348,10 +350,13 @@ class TestPnormBF16Op(OpTest): def test_check_grad(self): place = core.CUDAPlace(0) - self.check_grad_with_place(place, ['X'], - 'Out', - user_defined_grads=self.gradient, - check_eager=True) + self.check_grad_with_place( + place, + ['X'], + 'Out', + user_defined_grads=self.gradient, + check_eager=True, + ) def init_test_case(self): self.shape = [2, 3, 4, 5] @@ -368,7 +373,7 @@ class TestPnormBF16Op(OpTest): 'axis': self.axis, 'keepdim': self.keepdim, 'porder': float(self.porder), - 'asvector': self.asvector + 'asvector': self.asvector, } x = self.x porder = self.attrs["porder"] @@ -379,22 +384,21 @@ class TestPnormBF16Op(OpTest): if porder == 0: grad = np.zeros(x.shape).astype(x.dtype) elif porder in [float("inf"), float("-inf")]: - norm = p_norm(x, - axis=axis, - porder=porder, - keepdims=True, - reduce_all=asvector) + norm = p_norm( + x, axis=axis, porder=porder, keepdims=True, reduce_all=asvector + ) x_abs = np.abs(x) grad = np.sign(x) grad[x_abs != norm] = 0.0 else: - norm = p_norm(x, - axis=axis, - porder=porder, - keepdims=True, - reduce_all=asvector) - grad = np.power(norm, 1 - porder) * np.power( - np.abs(x), porder - 1) * np.sign(x) + norm = p_norm( + x, axis=axis, porder=porder, keepdims=True, reduce_all=asvector + ) + grad = ( + np.power(norm, 1 - porder) + * np.power(np.abs(x), porder - 1) + * np.sign(x) + ) numel = 1 for s in x.shape: @@ -411,15 +415,19 @@ def run_fro(self, p, axis, shape_x, dtype, keep_dim, check_dim=False): place = fluid.CPUPlace() exe = fluid.Executor(place) np_input = (np.random.rand(*shape_x) + 1.0).astype(dtype) - expected_result = numpy_frobenius_norm(np_input, - axis=axis, - keepdims=keep_dim) - result, = exe.run(feed={"X": np_input}, fetch_list=[out]) + expected_result = numpy_frobenius_norm( + np_input, axis=axis, keepdims=keep_dim + ) + (result,) = exe.run(feed={"X": np_input}, fetch_list=[out]) self.assertEqual((np.abs(result - expected_result) < 1e-6).all(), True) if keep_dim and check_dim: self.assertEqual( - (np.abs(np.array(result.shape) - np.array(expected_result.shape)) < - 1e-6).all(), True) + ( + np.abs(np.array(result.shape) - np.array(expected_result.shape)) + < 1e-6 + ).all(), + True, + ) def run_pnorm(self, p, axis, shape_x, dtype, keep_dim, check_dim=False): @@ -429,16 +437,19 @@ def run_pnorm(self, p, axis, shape_x, dtype, keep_dim, check_dim=False): place = fluid.CPUPlace() exe = fluid.Executor(place) np_input = (np.random.rand(*shape_x) + 1.0).astype(dtype) - expected_result = p_norm(np_input, - porder=p, - axis=axis, - keepdims=keep_dim).astype(dtype) - result, = exe.run(feed={"X": np_input}, fetch_list=[out]) + expected_result = p_norm( + np_input, porder=p, axis=axis, keepdims=keep_dim + ).astype(dtype) + (result,) = exe.run(feed={"X": np_input}, fetch_list=[out]) self.assertEqual((np.abs(result - expected_result) < 1e-6).all(), True) if keep_dim and check_dim: self.assertEqual( - (np.abs(np.array(result.shape) - np.array(expected_result.shape)) < - 1e-6).all(), True) + ( + np.abs(np.array(result.shape) - np.array(expected_result.shape)) + < 1e-6 + ).all(), + True, + ) def run_graph(self, p, axis, shape_x, dtype): @@ -447,7 +458,7 @@ def run_graph(self, p, axis, shape_x, dtype): np_input = np.arange(24).astype('float32') - 12 np_input = np_input.reshape(shape) x = paddle.to_tensor(np_input) - #[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]] + # [[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]] # [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]] out_pnorm = paddle.norm(x, p=2, axis=-1) @@ -458,135 +469,166 @@ def run_graph(self, p, axis, shape_x, dtype): # compute 2-order norm along [0,1] dimension. out_pnorm = paddle.norm(x, p=2, axis=[0, 1]) out_pnorm = paddle.norm(x, p=2) - #out_pnorm = [17.43559577 16.91153453 16.73320053 16.91153453] + # out_pnorm = [17.43559577 16.91153453 16.73320053 16.91153453] # compute inf-order norm out_pnorm = paddle.norm(x, p=np.inf) - #out_pnorm = [12.] + # out_pnorm = [12.] out_pnorm = paddle.norm(x, p=np.inf, axis=0) - #out_pnorm = [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]] + # out_pnorm = [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]] # compute -inf-order norm out_pnorm = paddle.norm(x, p=-np.inf) - #out_pnorm = [0.] + # out_pnorm = [0.] out_pnorm = paddle.norm(x, p=-np.inf, axis=0) # out_fro = [17.43559577 16.91153453 16.73320053 16.91153453] paddle.enable_static() class API_NormTest(unittest.TestCase): - def test_basic(self): keep_dims = {False, True} for keep in keep_dims: - run_fro(self, - p='fro', - axis=None, - shape_x=[2, 3, 4], - dtype="float32", - keep_dim=keep) - run_fro(self, - p='fro', - axis=[0, 1], - shape_x=[2, 3, 4], - dtype="float64", - keep_dim=keep, - check_dim=True) - run_pnorm(self, - p=2, - axis=None, - shape_x=[3, 4], - dtype="float32", - keep_dim=keep) - run_pnorm(self, - p=2, - axis=1, - shape_x=[3, 4], - dtype="float64", - keep_dim=keep, - check_dim=True) - run_pnorm(self, - p=np.inf, - axis=0, - shape_x=[2, 3, 4], - dtype="float32", - keep_dim=keep, - check_dim=True) - run_pnorm(self, - p=np.inf, - axis=None, - shape_x=[2, 3, 4], - dtype="float32", - keep_dim=keep) - run_pnorm(self, - p=-np.inf, - axis=0, - shape_x=[2, 3, 4], - dtype="float64", - keep_dim=keep, - check_dim=True) - run_pnorm(self, - p=-np.inf, - axis=None, - shape_x=[2, 3, 4], - dtype="float64", - keep_dim=keep) - run_pnorm(self, - p=0, - axis=1, - shape_x=[3, 4], - dtype="float64", - keep_dim=keep, - check_dim=True) - - run_pnorm(self, - p=1, - axis=1, - shape_x=[3, 4], - dtype="float64", - keep_dim=keep, - check_dim=True) - run_pnorm(self, - p=0, - axis=None, - shape_x=[3, 4], - dtype="float64", - keep_dim=keep, - check_dim=True) - run_pnorm(self, - p=2, - axis=[0, 1], - shape_x=[2, 3, 4], - dtype="float64", - keep_dim=keep, - check_dim=True) - run_pnorm(self, - p=2, - axis=-1, - shape_x=[2, 3, 4], - dtype="float64", - keep_dim=keep, - check_dim=True) - run_pnorm(self, - p=1, - axis=[0, 1], - shape_x=[2, 3, 4], - dtype="float64", - keep_dim=keep, - check_dim=True) - run_pnorm(self, - p=np.inf, - axis=[0, 1], - shape_x=[2, 3, 4], - dtype="float64", - keep_dim=keep, - check_dim=True) - run_pnorm(self, - p=-np.inf, - axis=[0, 1], - shape_x=[2, 3, 4], - dtype="float64", - keep_dim=keep, - check_dim=True) + run_fro( + self, + p='fro', + axis=None, + shape_x=[2, 3, 4], + dtype="float32", + keep_dim=keep, + ) + run_fro( + self, + p='fro', + axis=[0, 1], + shape_x=[2, 3, 4], + dtype="float64", + keep_dim=keep, + check_dim=True, + ) + run_pnorm( + self, + p=2, + axis=None, + shape_x=[3, 4], + dtype="float32", + keep_dim=keep, + ) + run_pnorm( + self, + p=2, + axis=1, + shape_x=[3, 4], + dtype="float64", + keep_dim=keep, + check_dim=True, + ) + run_pnorm( + self, + p=np.inf, + axis=0, + shape_x=[2, 3, 4], + dtype="float32", + keep_dim=keep, + check_dim=True, + ) + run_pnorm( + self, + p=np.inf, + axis=None, + shape_x=[2, 3, 4], + dtype="float32", + keep_dim=keep, + ) + run_pnorm( + self, + p=-np.inf, + axis=0, + shape_x=[2, 3, 4], + dtype="float64", + keep_dim=keep, + check_dim=True, + ) + run_pnorm( + self, + p=-np.inf, + axis=None, + shape_x=[2, 3, 4], + dtype="float64", + keep_dim=keep, + ) + run_pnorm( + self, + p=0, + axis=1, + shape_x=[3, 4], + dtype="float64", + keep_dim=keep, + check_dim=True, + ) + + run_pnorm( + self, + p=1, + axis=1, + shape_x=[3, 4], + dtype="float64", + keep_dim=keep, + check_dim=True, + ) + run_pnorm( + self, + p=0, + axis=None, + shape_x=[3, 4], + dtype="float64", + keep_dim=keep, + check_dim=True, + ) + run_pnorm( + self, + p=2, + axis=[0, 1], + shape_x=[2, 3, 4], + dtype="float64", + keep_dim=keep, + check_dim=True, + ) + run_pnorm( + self, + p=2, + axis=-1, + shape_x=[2, 3, 4], + dtype="float64", + keep_dim=keep, + check_dim=True, + ) + run_pnorm( + self, + p=1, + axis=[0, 1], + shape_x=[2, 3, 4], + dtype="float64", + keep_dim=keep, + check_dim=True, + ) + run_pnorm( + self, + p=np.inf, + axis=[0, 1], + shape_x=[2, 3, 4], + dtype="float64", + keep_dim=keep, + check_dim=True, + ) + run_pnorm( + self, + p=-np.inf, + axis=[0, 1], + shape_x=[2, 3, 4], + dtype="float64", + keep_dim=keep, + check_dim=True, + ) def test_dygraph(self): run_graph(self, p='fro', axis=None, shape_x=[2, 3, 4], dtype="float32") @@ -609,8 +651,9 @@ class API_NormTest(unittest.TestCase): self.assertRaises(TypeError, err_dtype, "fro", [2, 2], "int64") self.assertRaises(ValueError, paddle.norm, "inf", [2], "int64") out = fluid.data(name="out", shape=[1], dtype="int64") - self.assertRaises(TypeError, err_dtype, "fro", [2, 2], "float64", - out) + self.assertRaises( + TypeError, err_dtype, "fro", [2, 2], "float64", out + ) self.assertRaises(TypeError, err_dtype, 2, [10], "int64") self.assertRaises(TypeError, err_dtype, 2, [10], "float64", out) @@ -620,11 +663,9 @@ class API_NormTest(unittest.TestCase): self.assertRaises(ValueError, paddle.norm, data, p=[1], axis=-1) self.assertRaises(ValueError, paddle.norm, 0, [1, 0], "float64") data = fluid.data(name="data_3d", shape=[2, 2, 2], dtype="float64") - self.assertRaises(ValueError, - paddle.norm, - data, - p='unspport', - axis=[-3, -2, -1]) + self.assertRaises( + ValueError, paddle.norm, data, p='unspport', axis=[-3, -2, -1] + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_norm_nn_grad.py b/python/paddle/fluid/tests/unittests/test_norm_nn_grad.py index 28622d44d18bbcf9797881efa99cea8d81a0b0de..26dea91aecce413db55ff714843f0c4491ea206b 100644 --- a/python/paddle/fluid/tests/unittests/test_norm_nn_grad.py +++ b/python/paddle/fluid/tests/unittests/test_norm_nn_grad.py @@ -25,7 +25,6 @@ from decorator_helper import prog_scope class TestInstanceNormDoubleGradCheck(unittest.TestCase): - @prog_scope() def func(self, place): prog = fluid.Program() @@ -38,12 +37,9 @@ class TestInstanceNormDoubleGradCheck(unittest.TestCase): x = layers.create_parameter(dtype=dtype, shape=shape, name='x') z = fluid.layers.instance_norm(input=x) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) - gradient_checker.double_grad_check([x], - z, - x_init=x_arr, - atol=atol, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x], z, x_init=x_arr, atol=atol, place=place, eps=eps + ) def test_grad(self): paddle.enable_static() @@ -55,8 +51,8 @@ class TestInstanceNormDoubleGradCheck(unittest.TestCase): class TestInstanceNormDoubleGradCheckWithoutParamBias( - TestInstanceNormDoubleGradCheck): - + TestInstanceNormDoubleGradCheck +): @prog_scope() def func(self, place): prog = fluid.Program() @@ -67,20 +63,16 @@ class TestInstanceNormDoubleGradCheckWithoutParamBias( eps = 0.005 atol = 1e-4 x = layers.create_parameter(dtype=dtype, shape=shape, name='x') - z = fluid.layers.instance_norm(input=x, - param_attr=False, - bias_attr=False) + z = fluid.layers.instance_norm( + input=x, param_attr=False, bias_attr=False + ) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) - gradient_checker.double_grad_check([x], - z, - x_init=x_arr, - atol=atol, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x], z, x_init=x_arr, atol=atol, place=place, eps=eps + ) class TestInstanceNormDoubleGradEagerCheck(unittest.TestCase): - def instance_norm_wrapper(self, x): return paddle.nn.functional.instance_norm(x[0]) @@ -97,19 +89,18 @@ class TestInstanceNormDoubleGradEagerCheck(unittest.TestCase): z = paddle.nn.functional.instance_norm(x) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) # check for static mode - gradient_checker.double_grad_check([x], - z, - x_init=x_arr, - atol=atol, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x], z, x_init=x_arr, atol=atol, place=place, eps=eps + ) # check for eager mode gradient_checker.double_grad_check_for_dygraph( - self.instance_norm_wrapper, [x], + self.instance_norm_wrapper, + [x], z, x_init=x_arr, atol=atol, - place=place) + place=place, + ) def test_grad(self): paddle.enable_static() @@ -121,8 +112,8 @@ class TestInstanceNormDoubleGradEagerCheck(unittest.TestCase): class TestInstanceNormDoubleGradEagerCheckWithParams( - TestInstanceNormDoubleGradEagerCheck): - + TestInstanceNormDoubleGradEagerCheck +): def instance_norm_wrapper(self, x): instance_norm = paddle.nn.InstanceNorm2D(3) return instance_norm(x[0]) @@ -140,23 +131,21 @@ class TestInstanceNormDoubleGradEagerCheckWithParams( z = paddle.nn.InstanceNorm2D(3)(x) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) # check for static mode - gradient_checker.double_grad_check([x], - z, - x_init=x_arr, - atol=atol, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x], z, x_init=x_arr, atol=atol, place=place, eps=eps + ) # check for eager mode gradient_checker.double_grad_check_for_dygraph( - self.instance_norm_wrapper, [x], + self.instance_norm_wrapper, + [x], z, x_init=x_arr, atol=atol, - place=place) + place=place, + ) class TestBatchNormDoubleGradCheck(unittest.TestCase): - def setUp(self): self.init_test() @@ -170,7 +159,8 @@ class TestBatchNormDoubleGradCheck(unittest.TestCase): batch_norm = paddle.nn.BatchNorm2D( self.shape[self.channel_index], data_format=self.data_layout, - use_global_stats=self.use_global_stats) + use_global_stats=self.use_global_stats, + ) return batch_norm(x[0]) @prog_scope() @@ -182,22 +172,23 @@ class TestBatchNormDoubleGradCheck(unittest.TestCase): eps = 0.005 atol = 1e-4 x = layers.create_parameter(dtype=dtype, shape=self.shape, name='x') - z = fluid.layers.batch_norm(input=x, - data_layout=self.data_layout, - use_global_stats=self.use_global_stats) + z = fluid.layers.batch_norm( + input=x, + data_layout=self.data_layout, + use_global_stats=self.use_global_stats, + ) x_arr = np.random.uniform(-1, 1, self.shape).astype(dtype) - gradient_checker.double_grad_check([x], - z, - x_init=x_arr, - atol=atol, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x], z, x_init=x_arr, atol=atol, place=place, eps=eps + ) gradient_checker.double_grad_check_for_dygraph( - self.batch_norm_wrapper, [x], + self.batch_norm_wrapper, + [x], z, x_init=x_arr, atol=atol, - place=place) + place=place, + ) def test_grad(self): paddle.enable_static() @@ -209,7 +200,6 @@ class TestBatchNormDoubleGradCheck(unittest.TestCase): class TestBatchNormDoubleGradCheckCase1(TestBatchNormDoubleGradCheck): - def init_test(self): self.data_layout = 'NHWC' self.use_global_stats = False @@ -218,7 +208,6 @@ class TestBatchNormDoubleGradCheckCase1(TestBatchNormDoubleGradCheck): class TestBatchNormDoubleGradCheckCase2(TestBatchNormDoubleGradCheck): - def init_test(self): self.data_layout = 'NCHW' self.use_global_stats = True @@ -227,7 +216,6 @@ class TestBatchNormDoubleGradCheckCase2(TestBatchNormDoubleGradCheck): class TestBatchNormDoubleGradCheckCase3(TestBatchNormDoubleGradCheck): - def init_test(self): self.data_layout = 'NHWC' self.use_global_stats = True @@ -236,7 +224,6 @@ class TestBatchNormDoubleGradCheckCase3(TestBatchNormDoubleGradCheck): class TestBatchNormDoubleGradCheckCase4(TestBatchNormDoubleGradCheck): - def init_test(self): self.data_layout = 'NCHW' self.use_global_stats = False @@ -247,12 +234,12 @@ class TestBatchNormDoubleGradCheckCase4(TestBatchNormDoubleGradCheck): batch_norm = paddle.nn.BatchNorm3D( self.shape[self.channel_index], data_format=self.data_layout, - use_global_stats=self.use_global_stats) + use_global_stats=self.use_global_stats, + ) return batch_norm(x[0]) class TestBatchNormDoubleGradCheckCase5(TestBatchNormDoubleGradCheck): - @prog_scope() def func(self, place): prog = fluid.Program() @@ -261,25 +248,30 @@ class TestBatchNormDoubleGradCheckCase5(TestBatchNormDoubleGradCheck): dtype = "float32" eps = 0.005 atol = 2e-4 - chn = self.shape[1] if self.data_layout == 'NCHW' else self.shape[-1] + chn = ( + self.shape[1] if self.data_layout == 'NCHW' else self.shape[-1] + ) x = layers.create_parameter(dtype=dtype, shape=self.shape, name='x') - z = fluid.layers.batch_norm(input=x, - data_layout=self.data_layout, - use_global_stats=self.use_global_stats) + z = fluid.layers.batch_norm( + input=x, + data_layout=self.data_layout, + use_global_stats=self.use_global_stats, + ) x_arr = np.random.uniform(-1, 1, self.shape).astype(dtype) w, b = prog.global_block().all_parameters()[1:3] w_arr = np.ones(chn).astype(dtype) b_arr = np.zeros(chn).astype(dtype) - gradient_checker.double_grad_check([x, w, b], - z, - x_init=[x_arr, w_arr, b_arr], - atol=atol, - place=place, - eps=eps) + gradient_checker.double_grad_check( + [x, w, b], + z, + x_init=[x_arr, w_arr, b_arr], + atol=atol, + place=place, + eps=eps, + ) class TestBatchNormDoubleGradCheckCase6(TestBatchNormDoubleGradCheckCase5): - def init_test(self): self.data_layout = 'NCHW' self.use_global_stats = True diff --git a/python/paddle/fluid/tests/unittests/test_norm_op.py b/python/paddle/fluid/tests/unittests/test_norm_op.py index a5c643f4612df5a7e9781d29c3b442610f28cfab..d5b360cb729c9c956f185dc091abb932425dd545 100644 --- a/python/paddle/fluid/tests/unittests/test_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_norm_op.py @@ -28,7 +28,6 @@ def l2_norm(x, axis, epsilon): class TestNormOp(OpTest): - def setUp(self): self.op_type = "norm" self.python_api = paddle.fluid.layers.l2_normalize @@ -56,7 +55,6 @@ class TestNormOp(OpTest): class TestNormOp2(TestNormOp): - def init_test_case(self): self.shape = [5, 3, 9, 7] self.axis = 0 @@ -64,17 +62,17 @@ class TestNormOp2(TestNormOp): class TestNormOp3(TestNormOp): - def init_test_case(self): self.shape = [5, 3, 2, 7] self.axis = -1 self.epsilon = 1e-8 -@skip_check_grad_ci(reason="'check_grad' on large inputs is too slow, " + - "however it is desirable to cover the forward pass") +@skip_check_grad_ci( + reason="'check_grad' on large inputs is too slow, " + + "however it is desirable to cover the forward pass" +) class TestNormOp4(TestNormOp): - def init_test_case(self): self.shape = [128, 1024, 14, 14] self.axis = 2 @@ -84,10 +82,11 @@ class TestNormOp4(TestNormOp): pass -@skip_check_grad_ci(reason="'check_grad' on large inputs is too slow, " + - "however it is desirable to cover the forward pass") +@skip_check_grad_ci( + reason="'check_grad' on large inputs is too slow, " + + "however it is desirable to cover the forward pass" +) class TestNormOp5(TestNormOp): - def init_test_case(self): self.shape = [2048, 2048] self.axis = 1 @@ -98,7 +97,6 @@ class TestNormOp5(TestNormOp): class TestNormOp6(TestNormOp): - def init_dtype(self): self.dtype = "float32" @@ -106,10 +104,10 @@ class TestNormOp6(TestNormOp): self.check_grad(['X'], 'Out', max_relative_error=0.008) -@unittest.skipIf(not fluid.core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestNormOp7(TestNormOp): - def init_dtype(self): self.dtype = "float16" @@ -117,14 +115,13 @@ class TestNormOp7(TestNormOp): self.check_output_with_place(fluid.core.CUDAPlace(0), atol=5e-2) def test_check_grad(self): - self.check_grad_with_place(fluid.core.CUDAPlace(0), ['X'], - 'Out', - max_relative_error=0.05) + self.check_grad_with_place( + fluid.core.CUDAPlace(0), ['X'], 'Out', max_relative_error=0.05 + ) @skip_check_grad_ci(reason="skip check grad for test mode.") class TestNormTestOp(OpTest): - def setUp(self): self.op_type = "norm" self.init_test_case() @@ -134,7 +131,7 @@ class TestNormTestOp(OpTest): self.attrs = { 'epsilon': self.epsilon, 'axis': self.axis, - 'is_test': True + 'is_test': True, } self.outputs = {'Out': y} @@ -151,7 +148,6 @@ class TestNormTestOp(OpTest): class API_NormTest(unittest.TestCase): - def test_errors(self): with fluid.program_guard(fluid.Program()): @@ -164,5 +160,6 @@ class API_NormTest(unittest.TestCase): if __name__ == '__main__': import paddle + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_normal.py b/python/paddle/fluid/tests/unittests/test_normal.py index 3458bbdf7fd86aab117f1f9c7e17f45b9c442ee2..d24f5f02d2bc03d7e23f36b82378d2e037a69ce0 100644 --- a/python/paddle/fluid/tests/unittests/test_normal.py +++ b/python/paddle/fluid/tests/unittests/test_normal.py @@ -22,7 +22,6 @@ paddle.seed(10) class TestNormalAPI(unittest.TestCase): - def setUp(self): self.mean = 1.0 self.std = 0.0 @@ -30,9 +29,11 @@ class TestNormalAPI(unittest.TestCase): self.repeat_num = 2000 self.set_attrs() self.dtype = self.get_dtype() - self.place=paddle.CUDAPlace(0) \ - if paddle.fluid.core.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if paddle.fluid.core.is_compiled_with_cuda() else paddle.CPUPlace() + ) def set_attrs(self): self.shape = [8, 12] @@ -59,27 +60,32 @@ class TestNormalAPI(unittest.TestCase): ret_all_shape = copy.deepcopy(shape) ret_all_shape.insert(0, self.repeat_num) ret_all = np.zeros(ret_all_shape, self.dtype) - if isinstance(self.mean, np.ndarray) \ - and isinstance(self.std, np.ndarray): + if isinstance(self.mean, np.ndarray) and isinstance( + self.std, np.ndarray + ): with paddle.static.program_guard(paddle.static.Program()): - mean = paddle.fluid.data('Mean', self.mean.shape, - self.mean.dtype) + mean = paddle.fluid.data( + 'Mean', self.mean.shape, self.mean.dtype + ) std = paddle.fluid.data('Std', self.std.shape, self.std.dtype) out = paddle.normal(mean, std, self.shape) exe = paddle.static.Executor(self.place) for i in range(self.repeat_num): - ret = exe.run(feed={ - 'Mean': self.mean, - 'Std': self.std.reshape(shape) - }, - fetch_list=[out]) + ret = exe.run( + feed={ + 'Mean': self.mean, + 'Std': self.std.reshape(shape), + }, + fetch_list=[out], + ) ret_all[i] = ret[0] return ret_all elif isinstance(self.mean, np.ndarray): with paddle.static.program_guard(paddle.static.Program()): - mean = paddle.fluid.data('Mean', self.mean.shape, - self.mean.dtype) + mean = paddle.fluid.data( + 'Mean', self.mean.shape, self.mean.dtype + ) out = paddle.normal(mean, self.std, self.shape) exe = paddle.static.Executor(self.place) @@ -114,10 +120,16 @@ class TestNormalAPI(unittest.TestCase): ret_all_shape.insert(0, self.repeat_num) ret_all = np.zeros(ret_all_shape, self.dtype) - mean = paddle.to_tensor(self.mean) \ - if isinstance(self.mean, np.ndarray) else self.mean - std = paddle.to_tensor(self.std) \ - if isinstance(self.std, np.ndarray) else self.std + mean = ( + paddle.to_tensor(self.mean) + if isinstance(self.mean, np.ndarray) + else self.mean + ) + std = ( + paddle.to_tensor(self.std) + if isinstance(self.std, np.ndarray) + else self.std + ) for i in range(self.repeat_num): out = paddle.normal(mean, std, self.shape) ret_all[i] = out.numpy() @@ -134,42 +146,43 @@ class TestNormalAPI(unittest.TestCase): ret = ret.flatten().reshape([self.repeat_num, -1]) mean = np.mean(ret, axis=0) std = np.std(ret, axis=0) - mean_ref=self.mean.flatten() \ - if isinstance(self.mean, np.ndarray) else self.mean - std_ref=self.std.flatten() \ - if isinstance(self.std, np.ndarray) else self.std + mean_ref = ( + self.mean.flatten() + if isinstance(self.mean, np.ndarray) + else self.mean + ) + std_ref = ( + self.std.flatten() + if isinstance(self.std, np.ndarray) + else self.std + ) np.testing.assert_allclose(mean_ref, mean, rtol=0.2, atol=0.2) np.testing.assert_allclose(std_ref, std, rtol=0.2, atol=0.2) class TestNormalAPI_mean_is_tensor(TestNormalAPI): - def set_attrs(self): self.mean = np.random.uniform(-2, -1, [2, 3, 4, 5]).astype('float64') class TestNormalAPI_std_is_tensor(TestNormalAPI): - def set_attrs(self): self.std = np.random.uniform(0.7, 1, [2, 3, 17]).astype('float64') class TestNormalAPI_mean_std_are_tensor(TestNormalAPI): - def set_attrs(self): self.mean = np.random.uniform(1, 2, [1, 100]).astype('float64') self.std = np.random.uniform(0.5, 1, [1, 100]).astype('float64') class TestNormalAPI_mean_std_are_tensor_with_different_dtype(TestNormalAPI): - def set_attrs(self): self.mean = np.random.uniform(1, 2, [100]).astype('float64') self.std = np.random.uniform(1, 2, [100]).astype('float32') class TestNormalAlias(unittest.TestCase): - def test_alias(self): paddle.disable_static() shape = [1, 2, 3] @@ -180,7 +193,6 @@ class TestNormalAlias(unittest.TestCase): class TestNormalErrors(unittest.TestCase): - def test_errors(self): with paddle.static.program_guard(paddle.static.Program()): mean = [1, 2, 3] diff --git a/python/paddle/fluid/tests/unittests/test_normalization_wrapper.py b/python/paddle/fluid/tests/unittests/test_normalization_wrapper.py index e35e0283332d885fcb61aed0d19ef8a6cf071d37..5a5b4f4eeb0cc65fdc7ffadae01be94cf84a8a71 100644 --- a/python/paddle/fluid/tests/unittests/test_normalization_wrapper.py +++ b/python/paddle/fluid/tests/unittests/test_normalization_wrapper.py @@ -22,18 +22,19 @@ class TestNormalization(unittest.TestCase): data_desc = {"name": "input", "shape": (2, 3, 7)} def gen_random_input(self): - """Generate random input data. - """ - self.data = np.random.random( - size=self.data_desc["shape"]).astype("float32") + """Generate random input data.""" + self.data = np.random.random(size=self.data_desc["shape"]).astype( + "float32" + ) def set_program(self, axis, epsilon): - """Build the test program. - """ - data = fluid.layers.data(name=self.data_desc["name"], - shape=self.data_desc["shape"], - dtype="float32", - append_batch_size=False) + """Build the test program.""" + data = fluid.layers.data( + name=self.data_desc["name"], + shape=self.data_desc["shape"], + dtype="float32", + append_batch_size=False, + ) data.stop_gradient = False l2_norm = fluid.layers.l2_normalize(x=data, axis=axis, epsilon=epsilon) out = fluid.layers.reduce_sum(l2_norm, dim=None) @@ -42,8 +43,7 @@ class TestNormalization(unittest.TestCase): self.fetch_list = [l2_norm] def run_program(self): - """Run the test program. - """ + """Run the test program.""" places = [core.CPUPlace()] if core.is_compiled_with_cuda(): places.append(core.CUDAPlace(0)) @@ -52,33 +52,33 @@ class TestNormalization(unittest.TestCase): self.set_inputs(place) exe = fluid.Executor(place) - output, = exe.run(fluid.default_main_program(), - feed=self.inputs, - fetch_list=self.fetch_list, - return_numpy=True) + (output,) = exe.run( + fluid.default_main_program(), + feed=self.inputs, + fetch_list=self.fetch_list, + return_numpy=True, + ) self.op_output = output def set_inputs(self, place): - """Set the randomly generated data to the test program. - """ + """Set the randomly generated data to the test program.""" self.inputs = {} tensor = fluid.Tensor() tensor.set(self.data, place) self.inputs[self.data_desc["name"]] = tensor def l2_normalize(self, data, axis, epsilon): - """ Compute the groundtruth. - """ + """Compute the groundtruth.""" output = data / np.broadcast_to( np.sqrt(np.sum(np.square(data), axis=axis, keepdims=True)), - data.shape) + data.shape, + ) return output def test_l2_normalize(self): - """ Test the python wrapper for l2_normalize. - """ + """Test the python wrapper for l2_normalize.""" axis = 1 - #TODO(caoying) epsilon is not supported due to lack of a maximum_op. + # TODO(caoying) epsilon is not supported due to lack of a maximum_op. epsilon = 1e-6 self.gen_random_input() @@ -89,10 +89,9 @@ class TestNormalization(unittest.TestCase): expect_output = self.l2_normalize(self.data, axis, epsilon) # check output - np.testing.assert_allclose(self.op_output, - expect_output, - rtol=1e-05, - atol=0.001) + np.testing.assert_allclose( + self.op_output, expect_output, rtol=1e-05, atol=0.001 + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_normalize.py b/python/paddle/fluid/tests/unittests/test_normalize.py index bb8b4881d3f47bcec52dbf701ea951cefe826cad..e4fed2904e977b5e3d76e18927cb8393e921644a 100644 --- a/python/paddle/fluid/tests/unittests/test_normalize.py +++ b/python/paddle/fluid/tests/unittests/test_normalize.py @@ -28,7 +28,6 @@ def p_normalize(x, axis=1, p=2, epsilon=1e-12, keepdims=True): class TestNNFunctionalNormalize(unittest.TestCase): - def setUp(self): self.input_np = np.random.random(size=(10, 10)).astype(np.float32) self.input_np2 = np.array([0.0, 0.0]).astype(np.float32) @@ -66,11 +65,10 @@ class TestNNFunctionalNormalize(unittest.TestCase): place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - static_result = exe.run(feed={ - "input": self.input_np, - "input2": self.input_np2 - }, - fetch_list=[result0, result1, result2, result4]) + static_result = exe.run( + feed={"input": self.input_np, "input2": self.input_np2}, + fetch_list=[result0, result1, result2, result4], + ) np.testing.assert_allclose(static_result[0], self.expected0, rtol=1e-05) np.testing.assert_allclose(static_result[1], self.expected1, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_npair_loss_op.py b/python/paddle/fluid/tests/unittests/test_npair_loss_op.py index 557c5bac866b0bb2d96acb9b6e5822ede7c7b5cb..f90b3f4cdff6020f1c0db72c1971971b0ace8ba3 100644 --- a/python/paddle/fluid/tests/unittests/test_npair_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_npair_loss_op.py @@ -20,13 +20,13 @@ from paddle.fluid import Program, program_guard def npairloss(anchor, positive, labels, l2_reg=0.002): - def softmax_cross_entropy_with_logits(logits, labels): logits = np.exp(logits) logits = logits / np.sum(logits, axis=1).reshape(-1, 1) - return np.mean(-np.sum(labels * np.log(logits), axis=1), - dtype=np.float32) + return np.mean( + -np.sum(labels * np.log(logits), axis=1), dtype=np.float32 + ) batch_size = labels.shape[0] @@ -35,27 +35,26 @@ def npairloss(anchor, positive, labels, l2_reg=0.002): labels = labels / np.sum(labels, axis=1, keepdims=True) l2loss = np.mean(np.sum(np.power(anchor, 2), 1)) + np.mean( - np.sum(np.power(positive, 2), 1)) + np.sum(np.power(positive, 2), 1) + ) l2loss = (l2loss * 0.25 * l2_reg).astype(np.float32) similarity_matrix = np.matmul(anchor, positive.transpose()) celoss = np.mean( - softmax_cross_entropy_with_logits(similarity_matrix, labels)) + softmax_cross_entropy_with_logits(similarity_matrix, labels) + ) return l2loss + celoss class TestNpairLossOp(unittest.TestCase): - def setUp(self): self.dtype = np.float32 def __assert_close(self, tensor, np_array, msg, atol=1e-4): - np.testing.assert_allclose(np.array(tensor), - np_array, - rtol=1e-05, - atol=atol, - err_msg=msg) + np.testing.assert_allclose( + np.array(tensor), np_array, rtol=1e-05, atol=atol, err_msg=msg + ) def test_npair_loss(self): reg_lambda = 0.002 @@ -65,81 +64,97 @@ class TestNpairLossOp(unittest.TestCase): exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - embeddings_anchor = np.random.rand(num_data, - feat_dim).astype(np.float32) - embeddings_positive = np.random.rand(num_data, - feat_dim).astype(np.float32) - row_labels = np.random.randint(0, num_classes, - size=(num_data)).astype(np.float32) - out_loss = npairloss(embeddings_anchor, - embeddings_positive, - row_labels, - l2_reg=reg_lambda) - - anc = fluid.layers.data(dtype='float32', - name='anc', - shape=embeddings_anchor.shape, - append_batch_size=False) - pos = fluid.layers.data(dtype='float32', - name='pos', - shape=embeddings_positive.shape, - append_batch_size=False) - lab = fluid.layers.data(dtype='float32', - name='lab', - shape=row_labels.shape, - append_batch_size=False) - - npair_loss_op = fluid.layers.npair_loss(anchor=anc, - positive=pos, - labels=lab, - l2_reg=reg_lambda) - out_tensor = exe.run(feed={ - 'anc': embeddings_anchor, - 'pos': embeddings_positive, - 'lab': row_labels - }, - fetch_list=[npair_loss_op.name]) - - self.__assert_close(out_tensor, - out_loss, - "inference output are different at " + str(place) + - ", " + str(np.dtype('float32')) + - str(np.array(out_tensor)) + str(out_loss), - atol=1e-3) + embeddings_anchor = np.random.rand(num_data, feat_dim).astype( + np.float32 + ) + embeddings_positive = np.random.rand(num_data, feat_dim).astype( + np.float32 + ) + row_labels = np.random.randint(0, num_classes, size=(num_data)).astype( + np.float32 + ) + out_loss = npairloss( + embeddings_anchor, + embeddings_positive, + row_labels, + l2_reg=reg_lambda, + ) + + anc = fluid.layers.data( + dtype='float32', + name='anc', + shape=embeddings_anchor.shape, + append_batch_size=False, + ) + pos = fluid.layers.data( + dtype='float32', + name='pos', + shape=embeddings_positive.shape, + append_batch_size=False, + ) + lab = fluid.layers.data( + dtype='float32', + name='lab', + shape=row_labels.shape, + append_batch_size=False, + ) + + npair_loss_op = fluid.layers.npair_loss( + anchor=anc, positive=pos, labels=lab, l2_reg=reg_lambda + ) + out_tensor = exe.run( + feed={ + 'anc': embeddings_anchor, + 'pos': embeddings_positive, + 'lab': row_labels, + }, + fetch_list=[npair_loss_op.name], + ) + + self.__assert_close( + out_tensor, + out_loss, + "inference output are different at " + + str(place) + + ", " + + str(np.dtype('float32')) + + str(np.array(out_tensor)) + + str(out_loss), + atol=1e-3, + ) class TestNpairLossOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): anchor_np = np.random.random((2, 4)).astype("float32") positive_np = np.random.random((2, 4)).astype("float32") labels_np = np.random.random((2)).astype("float32") - anchor_data = fluid.data(name='anchor', - shape=[2, 4], - dtype='float32') - positive_data = fluid.data(name='positive', - shape=[2, 4], - dtype='float32') + anchor_data = fluid.data( + name='anchor', shape=[2, 4], dtype='float32' + ) + positive_data = fluid.data( + name='positive', shape=[2, 4], dtype='float32' + ) labels_data = fluid.data(name='labels', shape=[2], dtype='float32') def test_anchor_Variable(): # the anchor type must be Variable - fluid.layers.npair_loss(anchor=anchor_np, - positive=positive_data, - labels=labels_data) + fluid.layers.npair_loss( + anchor=anchor_np, positive=positive_data, labels=labels_data + ) def test_positive_Variable(): # the positive type must be Variable - fluid.layers.npair_loss(anchor=anchor_data, - positive=positive_np, - labels=labels_data) + fluid.layers.npair_loss( + anchor=anchor_data, positive=positive_np, labels=labels_data + ) def test_labels_Variable(): # the labels type must be Variable - fluid.layers.npair_loss(anchor=anchor_data, - positive=positive_data, - labels=labels_np) + fluid.layers.npair_loss( + anchor=anchor_data, positive=positive_data, labels=labels_np + ) self.assertRaises(TypeError, test_anchor_Variable) self.assertRaises(TypeError, test_positive_Variable) @@ -147,30 +162,34 @@ class TestNpairLossOpError(unittest.TestCase): def test_anchor_type(): # dtype must be float32 or float64 - anchor_data1 = fluid.data(name='anchor1', - shape=[2, 4], - dtype='int32') - fluid.layers.npair_loss(anchor=anchor_data, - positive=positive_data, - labels=labels_np) + anchor_data1 = fluid.data( + name='anchor1', shape=[2, 4], dtype='int32' + ) + fluid.layers.npair_loss( + anchor=anchor_data, positive=positive_data, labels=labels_np + ) def test_positive_type(): # dtype must be float32 or float64 - positive_data1 = fluid.data(name='positive1', - shape=[2, 4], - dtype='int32') - fluid.layers.npair_loss(anchor=anchor_data, - positive=positive_data1, - labels=labels_np) + positive_data1 = fluid.data( + name='positive1', shape=[2, 4], dtype='int32' + ) + fluid.layers.npair_loss( + anchor=anchor_data, + positive=positive_data1, + labels=labels_np, + ) def test_labels_type(): # dtype must be float32 or float64 - labels_data1 = fluid.data(name='labels1', - shape=[2], - dtype='int32') - fluid.layers.npair_loss(anchor=anchor_data, - positive=positive_data, - labels=labels_data1) + labels_data1 = fluid.data( + name='labels1', shape=[2], dtype='int32' + ) + fluid.layers.npair_loss( + anchor=anchor_data, + positive=positive_data, + labels=labels_data1, + ) self.assertRaises(TypeError, test_anchor_type) self.assertRaises(TypeError, test_positive_type) diff --git a/python/paddle/fluid/tests/unittests/test_number_count_op.py b/python/paddle/fluid/tests/unittests/test_number_count_op.py index 58e24e6692ad321a51c1f52cfe623eba5b48170e..a31fb1a5978c12cba3f4133c48c7a9baf8364314 100644 --- a/python/paddle/fluid/tests/unittests/test_number_count_op.py +++ b/python/paddle/fluid/tests/unittests/test_number_count_op.py @@ -22,17 +22,17 @@ from paddle.fluid.framework import _test_eager_guard def count(x, upper_num): - res = np.zeros((upper_num, )).astype(int) + res = np.zeros((upper_num,)).astype(int) for i in x.reshape(-1): if i >= 0 and i < len(res): res[i] += 1 return res -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestNumberCountOpInt64(op_test.OpTest): - def setUp(self): upper_num = 16 self.op_type = "number_count" @@ -45,14 +45,15 @@ class TestNumberCountOpInt64(op_test.OpTest): self.check_output_with_place(paddle.CUDAPlace(0)) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestNumberCountAPI(unittest.TestCase): - def setUp(self): self.upper_num = 320 - self.x = np.random.randint(-1, self.upper_num, - size=(6000, 200)).astype('int64') + self.x = np.random.randint(-1, self.upper_num, size=(6000, 200)).astype( + 'int64' + ) self.out = count(self.x, self.upper_num) self.place = paddle.CUDAPlace(0) diff --git a/python/paddle/fluid/tests/unittests/test_numel_op.py b/python/paddle/fluid/tests/unittests/test_numel_op.py index 7bb76c531a31aa6e0e5b6e86b3368fc6c9a4d470..4bb359a7bd2e0e5f94f4a2ded5b59daf978defb2 100644 --- a/python/paddle/fluid/tests/unittests/test_numel_op.py +++ b/python/paddle/fluid/tests/unittests/test_numel_op.py @@ -20,7 +20,6 @@ import paddle class TestNumelOp(OpTest): - def setUp(self): self.op_type = "size" self.init() @@ -38,19 +37,16 @@ class TestNumelOp(OpTest): class TestNumelOp1(TestNumelOp): - def init(self): self.shape = (11, 66) class TestNumelOp2(TestNumelOp): - def init(self): - self.shape = (0, ) + self.shape = (0,) class TestNumelAPI(unittest.TestCase): - def test_numel_static(self): main_program = fluid.Program() startup_program = fluid.Program() @@ -64,17 +60,19 @@ class TestNumelAPI(unittest.TestCase): out_1 = paddle.numel(x_1) out_2 = paddle.numel(x_2) exe = paddle.static.Executor(place=paddle.CPUPlace()) - res_1, res_2 = exe.run(feed={ - "x_1": input_1, - "x_2": input_2, - }, - fetch_list=[out_1, out_2]) - assert (np.array_equal(res_1, - np.array([np.size(input_1) - ]).astype("int64"))) - assert (np.array_equal(res_2, - np.array([np.size(input_2) - ]).astype("int64"))) + res_1, res_2 = exe.run( + feed={ + "x_1": input_1, + "x_2": input_2, + }, + fetch_list=[out_1, out_2], + ) + assert np.array_equal( + res_1, np.array([np.size(input_1)]).astype("int64") + ) + assert np.array_equal( + res_2, np.array([np.size(input_2)]).astype("int64") + ) def test_numel_imperative(self): paddle.disable_static(paddle.CPUPlace()) @@ -84,8 +82,8 @@ class TestNumelAPI(unittest.TestCase): x_2 = paddle.to_tensor(input_2) out_1 = paddle.numel(x_1) out_2 = paddle.numel(x_2) - assert (np.array_equal(out_1.numpy().item(0), np.size(input_1))) - assert (np.array_equal(out_2.numpy().item(0), np.size(input_2))) + assert np.array_equal(out_1.numpy().item(0), np.size(input_1)) + assert np.array_equal(out_2.numpy().item(0), np.size(input_2)) paddle.enable_static() def test_error(self): diff --git a/python/paddle/fluid/tests/unittests/test_one_hot_op.py b/python/paddle/fluid/tests/unittests/test_one_hot_op.py index 55abf26ea231e8fd52d10d673ab9246c01db5ae5..4ca0017052eb94636a21cb757de93e28bf0c8cd8 100644 --- a/python/paddle/fluid/tests/unittests/test_one_hot_op.py +++ b/python/paddle/fluid/tests/unittests/test_one_hot_op.py @@ -22,7 +22,6 @@ from paddle.fluid.framework import Program, program_guard class TestOneHotOp(OpTest): - def setUp(self): self.op_type = 'one_hot' depth = 10 @@ -32,8 +31,9 @@ class TestOneHotOp(OpTest): x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1]) - out = np.zeros(shape=(np.product(x.shape[:-1]), - depth)).astype('float32') + out = np.zeros(shape=(np.product(x.shape[:-1]), depth)).astype( + 'float32' + ) for i in range(np.product(x.shape)): out[i, x[i]] = 1.0 @@ -47,7 +47,6 @@ class TestOneHotOp(OpTest): class TestOneHotOp_attr(OpTest): - def setUp(self): self.op_type = 'one_hot' depth = 10 @@ -56,8 +55,9 @@ class TestOneHotOp_attr(OpTest): x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1]) - out = np.zeros(shape=(np.product(x.shape[:-1]), - depth)).astype('float32') + out = np.zeros(shape=(np.product(x.shape[:-1]), depth)).astype( + 'float32' + ) for i in range(np.product(x.shape)): out[i, x[i]] = 1.0 @@ -71,7 +71,6 @@ class TestOneHotOp_attr(OpTest): class TestOneHotOp_default_dtype(OpTest): - def setUp(self): self.op_type = 'one_hot' depth = 10 @@ -81,8 +80,9 @@ class TestOneHotOp_default_dtype(OpTest): x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1]) - out = np.zeros(shape=(np.product(x.shape[:-1]), - depth)).astype('float32') + out = np.zeros(shape=(np.product(x.shape[:-1]), depth)).astype( + 'float32' + ) for i in range(np.product(x.shape)): out[i, x[i]] = 1.0 @@ -96,7 +96,6 @@ class TestOneHotOp_default_dtype(OpTest): class TestOneHotOp_default_dtype_attr(OpTest): - def setUp(self): self.op_type = 'one_hot' depth = 10 @@ -105,8 +104,9 @@ class TestOneHotOp_default_dtype_attr(OpTest): x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1]) - out = np.zeros(shape=(np.product(x.shape[:-1]), - depth)).astype('float32') + out = np.zeros(shape=(np.product(x.shape[:-1]), depth)).astype( + 'float32' + ) for i in range(np.product(x.shape)): out[i, x[i]] = 1.0 @@ -120,7 +120,6 @@ class TestOneHotOp_default_dtype_attr(OpTest): class TestOneHotOp_out_of_range(OpTest): - def setUp(self): self.op_type = 'one_hot' depth = 10 @@ -128,8 +127,9 @@ class TestOneHotOp_out_of_range(OpTest): x = [np.random.choice([-1, depth]) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1]) - out = np.zeros(shape=(np.product(x.shape[:-1]), - depth)).astype('float32') + out = np.zeros(shape=(np.product(x.shape[:-1]), depth)).astype( + 'float32' + ) self.inputs = {'X': (x, x_lod)} self.attrs = {'depth': depth, 'allow_out_of_range': True} @@ -140,7 +140,6 @@ class TestOneHotOp_out_of_range(OpTest): class TestOneHotOp_exception(unittest.TestCase): - def setUp(self): self.op_type = 'one_hot' self.depth = 10 @@ -156,46 +155,54 @@ class TestOneHotOp_exception(unittest.TestCase): def test_check_output(self): program = Program() with program_guard(program): - x = fluid.layers.data(name='x', - shape=[self.dimension], - dtype='float32', - lod_level=1) + x = fluid.layers.data( + name='x', shape=[self.dimension], dtype='float32', lod_level=1 + ) block = program.current_block() - one_hot_out = block.create_var(name="one_hot_out", - type=core.VarDesc.VarType.LOD_TENSOR, - dtype='float32') - block.append_op(type='one_hot', - inputs={'X': x}, - attrs={'depth': self.depth}, - outputs={'Out': one_hot_out}) + one_hot_out = block.create_var( + name="one_hot_out", + type=core.VarDesc.VarType.LOD_TENSOR, + dtype='float32', + ) + block.append_op( + type='one_hot', + inputs={'X': x}, + attrs={'depth': self.depth}, + outputs={'Out': one_hot_out}, + ) exe = fluid.Executor(self.place) def run(): - exe.run(feed={'x': self.x}, - fetch_list=[one_hot_out], - return_numpy=False) + exe.run( + feed={'x': self.x}, + fetch_list=[one_hot_out], + return_numpy=False, + ) self.assertRaises(ValueError, run) class TestOneHotOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # the input must be Variable in_w = np.random.random((4, 1)).astype("int32") self.assertRaises(TypeError, fluid.layers.one_hot, in_w) # the input must be int32 or int 64 - in_w2 = fluid.layers.data(name="in_w2", - shape=[4, 1], - append_batch_size=False, - dtype="float32") + in_w2 = fluid.layers.data( + name="in_w2", + shape=[4, 1], + append_batch_size=False, + dtype="float32", + ) self.assertRaises(TypeError, fluid.layers.one_hot, in_w2) # the depth must be int, long or Variable - in_r = fluid.layers.data(name="in_r", - shape=[4, 1], - append_batch_size=False, - dtype="int32") + in_r = fluid.layers.data( + name="in_r", + shape=[4, 1], + append_batch_size=False, + dtype="int32", + ) depth_w = np.array([4]) self.assertRaises(TypeError, fluid.layers.one_hot, in_r, 4.1) self.assertRaises(TypeError, fluid.layers.one_hot, in_r, depth_w) diff --git a/python/paddle/fluid/tests/unittests/test_one_hot_v2_op.py b/python/paddle/fluid/tests/unittests/test_one_hot_v2_op.py index 284957a5318f14039b0c9c6735c7a8d50fe779b4..23d277c04e59fb267a53f5acd8f312c560e63452 100644 --- a/python/paddle/fluid/tests/unittests/test_one_hot_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_one_hot_v2_op.py @@ -22,7 +22,6 @@ from paddle.fluid.framework import Program, program_guard, _test_eager_guard class TestOneHotOp(OpTest): - def setUp(self): self.op_type = 'one_hot_v2' depth = 10 @@ -46,7 +45,6 @@ class TestOneHotOp(OpTest): class TestOneHotOp_attr(OpTest): - def setUp(self): self.op_type = 'one_hot_v2' depth = 10 @@ -55,8 +53,9 @@ class TestOneHotOp_attr(OpTest): x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1]) - out = np.zeros(shape=(np.product(x.shape[:-1]), 1, - depth)).astype('float32') + out = np.zeros(shape=(np.product(x.shape[:-1]), 1, depth)).astype( + 'float32' + ) for i in range(np.product(x.shape)): out[i, 0, x[i]] = 1.0 @@ -70,7 +69,6 @@ class TestOneHotOp_attr(OpTest): class TestOneHotOp_default_dtype(OpTest): - def setUp(self): self.op_type = 'one_hot_v2' depth = 10 @@ -94,7 +92,6 @@ class TestOneHotOp_default_dtype(OpTest): class TestOneHotOp_default_dtype_attr(OpTest): - def setUp(self): self.op_type = 'one_hot_v2' depth = 10 @@ -103,8 +100,9 @@ class TestOneHotOp_default_dtype_attr(OpTest): x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1]) - out = np.zeros(shape=(np.product(x.shape[:-1]), 1, - depth)).astype('float32') + out = np.zeros(shape=(np.product(x.shape[:-1]), 1, depth)).astype( + 'float32' + ) for i in range(np.product(x.shape)): out[i, 0, x[i]] = 1.0 @@ -118,7 +116,6 @@ class TestOneHotOp_default_dtype_attr(OpTest): class TestOneHotOp_exception(unittest.TestCase): - def setUp(self): self.op_type = 'one_hot_v2' self.depth = 10 @@ -134,30 +131,34 @@ class TestOneHotOp_exception(unittest.TestCase): def test_check_output(self): program = Program() with program_guard(program): - x = fluid.layers.data(name='x', - shape=[self.dimension], - dtype='float32', - lod_level=1) + x = fluid.layers.data( + name='x', shape=[self.dimension], dtype='float32', lod_level=1 + ) block = program.current_block() - one_hot_out = block.create_var(name="one_hot_out", - type=core.VarDesc.VarType.LOD_TENSOR, - dtype='float32') - block.append_op(type='one_hot', - inputs={'X': x}, - attrs={'depth': self.depth}, - outputs={'Out': one_hot_out}) + one_hot_out = block.create_var( + name="one_hot_out", + type=core.VarDesc.VarType.LOD_TENSOR, + dtype='float32', + ) + block.append_op( + type='one_hot', + inputs={'X': x}, + attrs={'depth': self.depth}, + outputs={'Out': one_hot_out}, + ) exe = fluid.Executor(self.place) def run(): - exe.run(feed={'x': self.x}, - fetch_list=[one_hot_out], - return_numpy=False) + exe.run( + feed={'x': self.x}, + fetch_list=[one_hot_out], + return_numpy=False, + ) self.assertRaises(ValueError, run) class TestOneHotOpApi(unittest.TestCase): - def test_api(self): depth = 10 self._run(depth) @@ -168,45 +169,53 @@ class TestOneHotOpApi(unittest.TestCase): def test_api_with_dygraph(self): depth = 10 - label = np.array([np.random.randint(0, depth - 1) - for i in range(6)]).reshape([6, 1]) + label = np.array( + [np.random.randint(0, depth - 1) for i in range(6)] + ).reshape([6, 1]) with fluid.dygraph.guard(): one_hot_label = fluid.one_hot( - input=fluid.dygraph.to_variable(label), depth=depth) + input=fluid.dygraph.to_variable(label), depth=depth + ) one_hot_label = paddle.nn.functional.one_hot( - fluid.dygraph.to_variable(label), depth) + fluid.dygraph.to_variable(label), depth + ) with _test_eager_guard(): one_hot_label = paddle.nn.functional.one_hot( - paddle.to_tensor(label), depth) + paddle.to_tensor(label), depth + ) def _run(self, depth): label = fluid.layers.data(name="label", shape=[1], dtype="int64") one_hot_label = fluid.one_hot(input=label, depth=depth) place = fluid.CPUPlace() - label_data = np.array([np.random.randint(0, 10 - 1) - for i in range(6)]).reshape([6, 1]) + label_data = np.array( + [np.random.randint(0, 10 - 1) for i in range(6)] + ).reshape([6, 1]) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - ret = exe.run(feed={ - 'label': label_data, - }, - fetch_list=[one_hot_label], - return_numpy=False) + ret = exe.run( + feed={ + 'label': label_data, + }, + fetch_list=[one_hot_label], + return_numpy=False, + ) class BadInputTestOnehotV2(unittest.TestCase): - def test_error(self): with fluid.program_guard(fluid.Program()): def test_bad_x(): - label = fluid.layers.data(name="label", - shape=[4], - append_batch_size=False, - dtype="float32") + label = fluid.layers.data( + name="label", + shape=[4], + append_batch_size=False, + dtype="float32", + ) one_hot_label = fluid.one_hot(input=label, depth=4) self.assertRaises(TypeError, test_bad_x) diff --git a/python/paddle/fluid/tests/unittests/test_ones_like.py b/python/paddle/fluid/tests/unittests/test_ones_like.py index 8c0ceadbf581edbb71bd13b5ca7f251fd8dccc69..fe7a66acecdf4b11041f8b3f6dc6298667f5535d 100644 --- a/python/paddle/fluid/tests/unittests/test_ones_like.py +++ b/python/paddle/fluid/tests/unittests/test_ones_like.py @@ -23,7 +23,6 @@ from paddle.fluid.framework import convert_np_dtype_to_dtype_ class TestOnesLikeAPIError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): x = paddle.fluid.data('x', [3, 4]) @@ -31,7 +30,6 @@ class TestOnesLikeAPIError(unittest.TestCase): class TestOnesLikeAPI(unittest.TestCase): - def test_api(self): shape = [3, 4] startup_program = Program() @@ -46,25 +44,33 @@ class TestOnesLikeAPI(unittest.TestCase): out4 = ones_like(x, 'int32') out5 = ones_like(x, 'int64') - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) - outs = exe.run(train_program, - feed={'X': np.ones(shape).astype('float32')}, - fetch_list=[out1, out2, out3, out4, out5]) + outs = exe.run( + train_program, + feed={'X': np.ones(shape).astype('float32')}, + fetch_list=[out1, out2, out3, out4, out5], + ) for i, dtype in enumerate( - [np.float32, np.bool_, np.float64, np.int32, np.int64]): + [np.float32, np.bool_, np.float64, np.int32, np.int64] + ): self.assertEqual(outs[i].dtype, dtype) self.assertEqual((outs[i] == np.ones(shape, dtype)).all(), True) class TestOnesLikeImpeartive(unittest.TestCase): - def test_out(self): shape = [3, 4] - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) paddle.disable_static(place) x = paddle.to_tensor(np.ones(shape)) for dtype in [np.bool_, np.float32, np.float64, np.int32, np.int64]: @@ -80,11 +86,13 @@ class TestOnesLikeImpeartive(unittest.TestCase): class TestOnesAPI(unittest.TestCase): - def test_api(self): shape = [3, 4] - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) paddle.disable_static(place) for dtype in [np.float32, np.float64, np.int32, np.int64]: diff --git a/python/paddle/fluid/tests/unittests/test_ones_op.py b/python/paddle/fluid/tests/unittests/test_ones_op.py index ac02627f9cc1a1bb8176a219d1d0edb99266a36e..a82842b3c72aaaa5b8ac977ea2c2ba6b3d86d679 100644 --- a/python/paddle/fluid/tests/unittests/test_ones_op.py +++ b/python/paddle/fluid/tests/unittests/test_ones_op.py @@ -21,13 +21,12 @@ import numpy as np class ApiOnesTest(unittest.TestCase): - def test_paddle_ones(self): with paddle.static.program_guard(paddle.static.Program()): ones = paddle.ones(shape=[10]) place = paddle.CPUPlace() exe = paddle.static.Executor(place) - result, = exe.run(fetch_list=[ones]) + (result,) = exe.run(fetch_list=[ones]) expected_result = np.ones(10, dtype="float32") self.assertEqual((result == expected_result).all(), True) @@ -35,7 +34,7 @@ class ApiOnesTest(unittest.TestCase): ones = paddle.ones(shape=[10], dtype="float64") place = paddle.CPUPlace() exe = paddle.static.Executor(place) - result, = exe.run(fetch_list=[ones]) + (result,) = exe.run(fetch_list=[ones]) expected_result = np.ones(10, dtype="float64") self.assertEqual((result == expected_result).all(), True) @@ -43,7 +42,7 @@ class ApiOnesTest(unittest.TestCase): ones = paddle.ones(shape=[10], dtype="int64") place = paddle.CPUPlace() exe = paddle.static.Executor(place) - result, = exe.run(fetch_list=[ones]) + (result,) = exe.run(fetch_list=[ones]) expected_result = np.ones(10, dtype="int64") self.assertEqual((result == expected_result).all(), True) @@ -52,15 +51,13 @@ class ApiOnesTest(unittest.TestCase): ones = fluid.layers.ones(shape=[10], dtype="int64") place = paddle.CPUPlace() exe = paddle.static.Executor(place) - result, = exe.run(fetch_list=[ones]) + (result,) = exe.run(fetch_list=[ones]) expected_result = np.ones(10, dtype="int64") self.assertEqual((result == expected_result).all(), True) class ApiOnesZerosError(unittest.TestCase): - def test_errors(self): - def test_error1(): with paddle.static.program_guard(paddle.static.Program()): ones = paddle.ones(shape=10, dtype="int64") diff --git a/python/paddle/fluid/tests/unittests/test_onnx_export.py b/python/paddle/fluid/tests/unittests/test_onnx_export.py index 6cb4ed2fd924e5f52e745d6a1260a8956675759b..4a4e26000446225f82c61756c336652df847a479 100644 --- a/python/paddle/fluid/tests/unittests/test_onnx_export.py +++ b/python/paddle/fluid/tests/unittests/test_onnx_export.py @@ -20,7 +20,6 @@ from paddle.fluid.framework import _test_eager_guard class LinearNet(paddle.nn.Layer): - def __init__(self): super(LinearNet, self).__init__() self._linear = paddle.nn.Linear(128, 10) @@ -30,7 +29,6 @@ class LinearNet(paddle.nn.Layer): class Logic(paddle.nn.Layer): - def __init__(self): super(Logic, self).__init__() @@ -42,10 +40,10 @@ class Logic(paddle.nn.Layer): class TestExportWithTensor(unittest.TestCase): - def func_with_tensor(self): - self.x_spec = paddle.static.InputSpec(shape=[None, 128], - dtype='float32') + self.x_spec = paddle.static.InputSpec( + shape=[None, 128], dtype='float32' + ) model = LinearNet() paddle.onnx.export(model, 'linear_net', input_spec=[self.x_spec]) @@ -56,7 +54,6 @@ class TestExportWithTensor(unittest.TestCase): class TestExportWithTensor1(unittest.TestCase): - def func_with_tensor(self): self.x = paddle.to_tensor(np.random.random((1, 128))) model = LinearNet() @@ -69,17 +66,15 @@ class TestExportWithTensor1(unittest.TestCase): class TestExportPrunedGraph(unittest.TestCase): - def func_prune_graph(self): model = Logic() self.x = paddle.to_tensor(np.array([1])) self.y = paddle.to_tensor(np.array([-1])) paddle.jit.to_static(model) out = model(self.x, self.y, z=True) - paddle.onnx.export(model, - 'pruned', - input_spec=[self.x], - output_spec=[out]) + paddle.onnx.export( + model, 'pruned', input_spec=[self.x], output_spec=[out] + ) def test_prune_graph(self): # test eager diff --git a/python/paddle/fluid/tests/unittests/test_op_function_generator.py b/python/paddle/fluid/tests/unittests/test_op_function_generator.py index a8e4e7d3e8211c5c5edfc7da1a53cefa1b611b71..3163a7688880201ea15186fac3320d5c7e5ec273 100644 --- a/python/paddle/fluid/tests/unittests/test_op_function_generator.py +++ b/python/paddle/fluid/tests/unittests/test_op_function_generator.py @@ -22,7 +22,6 @@ from paddle import _legacy_C_ops class TestTracedLayer(fluid.dygraph.Layer): - def __init__(self, name_scope): super(TestTracedLayer, self).__init__(name_scope) @@ -31,7 +30,6 @@ class TestTracedLayer(fluid.dygraph.Layer): class TestVariable(unittest.TestCase): - def setUp(self): self.shape = [512, 768] self.dtype = np.float32 @@ -100,7 +98,8 @@ class TestVariable(unittest.TestCase): a = np.random.uniform(-1, 1, self.shape).astype(self.dtype) x = fluid.dygraph.to_variable(a) res_dygraph, static_layer = TracedLayer.trace( - layer, inputs=x) # dygraph out + layer, inputs=x + ) # dygraph out res_static_graph = static_layer([x])[0] np.testing.assert_array_equal(res_dygraph.numpy(), res_static_graph) diff --git a/python/paddle/fluid/tests/unittests/test_op_name_conflict.py b/python/paddle/fluid/tests/unittests/test_op_name_conflict.py index 86f82f0a9e22e540cf559ef94a5e27bbe20499f3..10e9b4d3028c3c10a0ac70f33d41a368a4a71bea 100644 --- a/python/paddle/fluid/tests/unittests/test_op_name_conflict.py +++ b/python/paddle/fluid/tests/unittests/test_op_name_conflict.py @@ -18,7 +18,6 @@ import unittest class TestOpNameConflict(unittest.TestCase): - def test_conflict(self): main = fluid.Program() startup = fluid.Program() @@ -34,12 +33,14 @@ class TestOpNameConflict(unittest.TestCase): place = fluid.CPUPlace() exe = fluid.Executor(place) - m_v, n_v, p_v = exe.run(feed={ - "x": np.ones((1), "float32") * 2, - "y": np.ones((1), "float32") * 3, - "z": np.ones((1), "float32") * 5 - }, - fetch_list=[m, n, p]) + m_v, n_v, p_v = exe.run( + feed={ + "x": np.ones((1), "float32") * 2, + "y": np.ones((1), "float32") * 3, + "z": np.ones((1), "float32") * 5, + }, + fetch_list=[m, n, p], + ) self.assertEqual(m_v[0], 5.0) self.assertEqual(n_v[0], 8.0) @@ -50,37 +51,42 @@ class TestOpNameConflict(unittest.TestCase): startup = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, startup): - place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda( - ) else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) - data = fluid.data(name='data', - shape=[None, 1, 2, 2], - dtype='float32') - tensor = fluid.data(name='tensor', - shape=[None, 32, 64], - dtype='float32') - x = fluid.data(name='x', - shape=[None, 1], - dtype='float32', - lod_level=1) + data = fluid.data( + name='data', shape=[None, 1, 2, 2], dtype='float32' + ) + tensor = fluid.data( + name='tensor', shape=[None, 32, 64], dtype='float32' + ) + x = fluid.data( + name='x', shape=[None, 1], dtype='float32', lod_level=1 + ) input_scale = fluid.layers.create_parameter( shape=[1], dtype="float32", - default_initializer=fluid.initializer.Constant(2.0)) + default_initializer=fluid.initializer.Constant(2.0), + ) input_bias = fluid.layers.create_parameter( shape=[1], dtype="float32", - default_initializer=fluid.initializer.Constant(0.5)) - out_affine = fluid.layers.affine_channel(data, - scale=input_scale, - bias=input_bias) - out_similarity = fluid.layers.similarity_focus(input=data, - axis=1, - indexes=[0]) + default_initializer=fluid.initializer.Constant(0.5), + ) + out_affine = fluid.layers.affine_channel( + data, scale=input_scale, bias=input_bias + ) + out_similarity = fluid.layers.similarity_focus( + input=data, axis=1, indexes=[0] + ) position_tensor = fluid.layers.add_position_encoding( - input=tensor, alpha=1.0, beta=1.0) + input=tensor, alpha=1.0, beta=1.0 + ) x_reversed = fluid.layers.sequence_reverse(x) exe.run(fluid.default_startup_program()) @@ -88,21 +94,24 @@ class TestOpNameConflict(unittest.TestCase): x_d = fluid.create_lod_tensor( np.array([[1.1], [2.2], [3.3], [4.4]]).astype('float32'), - [[1, 3]], place) - outs = exe.run(test_program, - fetch_list=[ - out_affine, out_similarity, position_tensor, - x_reversed - ], - feed={ - data.name: - np.ones([1, 1, 2, 2]).astype('float32'), - tensor.name: - np.ones([1, 32, 64]).astype('float32'), - x.name: - x_d - }, - return_numpy=False) + [[1, 3]], + place, + ) + outs = exe.run( + test_program, + fetch_list=[ + out_affine, + out_similarity, + position_tensor, + x_reversed, + ], + feed={ + data.name: np.ones([1, 1, 2, 2]).astype('float32'), + tensor.name: np.ones([1, 32, 64]).astype('float32'), + x.name: x_d, + }, + return_numpy=False, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_op_support_gpu.py b/python/paddle/fluid/tests/unittests/test_op_support_gpu.py index b13301517b34c76c28a0b783fa6269d5d3f08511..fe7fd6ab3d5f8f037a22ee21511a520fd0db23a8 100644 --- a/python/paddle/fluid/tests/unittests/test_op_support_gpu.py +++ b/python/paddle/fluid/tests/unittests/test_op_support_gpu.py @@ -17,10 +17,10 @@ import paddle.fluid.core as core class TestOpSupportGPU(unittest.TestCase): - def test_case(self): - self.assertEqual(core.is_compiled_with_cuda(), - core.op_support_gpu("sum")) + self.assertEqual( + core.is_compiled_with_cuda(), core.op_support_gpu("sum") + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_op_version.py b/python/paddle/fluid/tests/unittests/test_op_version.py index 49b3c69b8909c465154db6bebd40996374ce58cb..47f49d21864aabca49c132afa1e1bef285609a87 100644 --- a/python/paddle/fluid/tests/unittests/test_op_version.py +++ b/python/paddle/fluid/tests/unittests/test_op_version.py @@ -19,7 +19,6 @@ import paddle.fluid as fluid class OpLastCheckpointCheckerTest(unittest.TestCase): - def __init__(self, methodName='runTest'): super(OpLastCheckpointCheckerTest, self).__init__(methodName) self.checker = utils.OpLastCheckpointChecker() @@ -27,8 +26,9 @@ class OpLastCheckpointCheckerTest(unittest.TestCase): def test_op_attr_info(self): update_type = fluid.core.OpUpdateType.kNewAttr - info_list = self.checker.filter_updates(self.fake_op, update_type, - 'STRINGS') + info_list = self.checker.filter_updates( + self.fake_op, update_type, 'STRINGS' + ) self.assertTrue(info_list) self.assertEqual(info_list[0].name(), 'STRINGS') self.assertEqual(info_list[0].default_value(), ['str1', 'str2']) @@ -36,8 +36,9 @@ class OpLastCheckpointCheckerTest(unittest.TestCase): def test_op_input_output_info(self): update_type = fluid.core.OpUpdateType.kNewInput - info_list = self.checker.filter_updates(self.fake_op, update_type, - 'NewInput') + info_list = self.checker.filter_updates( + self.fake_op, update_type, 'NewInput' + ) self.assertTrue(info_list) self.assertEqual(info_list[0].name(), 'NewInput') self.assertEqual(info_list[0].remark(), 'NewInput_') @@ -50,7 +51,6 @@ class OpLastCheckpointCheckerTest(unittest.TestCase): class OpVersionTest(unittest.TestCase): - def __init__(self, methodName='runTest'): super(OpVersionTest, self).__init__(methodName) self.vmap = fluid.core.get_op_version_map() @@ -72,11 +72,13 @@ class OpVersionTest(unittest.TestCase): true_l = [2.56, 1.28] self.assertEqual(len(true_l), len(desc_2[1].info().default_value())) for i in range(len(true_l)): - self.assertAlmostEqual(desc_2[1].info().default_value()[i], - true_l[i], 2) + self.assertAlmostEqual( + desc_2[1].info().default_value()[i], true_l[i], 2 + ) self.assertEqual(desc_2[2].info().default_value(), [10, 100]) - self.assertEqual(desc_2[3].info().default_value(), - [10000001, -10000001]) + self.assertEqual( + desc_2[3].info().default_value(), [10000001, -10000001] + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_operator.py b/python/paddle/fluid/tests/unittests/test_operator.py index 62db058682bd980fe0991f55a29c8050703749d5..0b7d9a4c1bb426e619d7fe2de58cd847eef9439a 100644 --- a/python/paddle/fluid/tests/unittests/test_operator.py +++ b/python/paddle/fluid/tests/unittests/test_operator.py @@ -21,7 +21,6 @@ import paddle.fluid.proto.framework_pb2 as framework_pb2 class TestGetAllProtos(unittest.TestCase): - def test_all(self): all_protos = op.get_all_op_protos() self.assertNotEqual(0, len(all_protos)) @@ -31,7 +30,6 @@ class TestGetAllProtos(unittest.TestCase): class TestOpDescCreationMethod(unittest.TestCase): - def test_plain_input_output(self): op_proto = framework_pb2.OpProto() op_proto.type = "test" @@ -112,10 +110,9 @@ class TestOpDescCreationMethod(unittest.TestCase): expected1.type = 'fc' self.assertEqual(expected1, generated1) - generated2 = method(X=['x1', 'x2', 'x3'], - b='b', - W=['w1', 'w2', 'w3'], - Y='y') + generated2 = method( + X=['x1', 'x2', 'x3'], b='b', W=['w1', 'w2', 'w3'], Y='y' + ) expected2 = framework_pb2.OpDesc() tmp = expected2.inputs.add() @@ -163,14 +160,16 @@ class TestOpDescCreationMethod(unittest.TestCase): method = op.OpDescCreationMethod(op_proto) - generated = method(X="a", - int_attr=10, - float_attr=3.2, - float64_attr=np.finfo("float64").max, - string_attr="test_str", - ints_attr=[0, 1, 2, 3, 4], - floats_attr=[0.2, 3.2, 4.5], - strings_attr=["a", "b", "c"]) + generated = method( + X="a", + int_attr=10, + float_attr=3.2, + float64_attr=np.finfo("float64").max, + string_attr="test_str", + ints_attr=[0, 1, 2, 3, 4], + floats_attr=[0.2, 3.2, 4.5], + strings_attr=["a", "b", "c"], + ) expected = framework_pb2.OpDesc() expected.type = "test" @@ -218,13 +217,13 @@ class TestOpDescCreationMethod(unittest.TestCase): class TestOpCreations(unittest.TestCase): - def test_all(self): add_op = op.Operator("sum", X=["a", "b"], Out="z") self.assertIsNotNone(add_op) # Invoke C++ DebugString() - self.assertEqual('Op(sum), inputs:{X[a, b]}, outputs:{Out[z]}.', - str(add_op)) + self.assertEqual( + 'Op(sum), inputs:{X[a, b]}, outputs:{Out[z]}.', str(add_op) + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_operator_desc.py b/python/paddle/fluid/tests/unittests/test_operator_desc.py index eeb7dfe95db6a6be5b8a31d0fa101c5f89633a1d..af3eecec826c22ca8d2d031aa34d38ee7bacfc68 100644 --- a/python/paddle/fluid/tests/unittests/test_operator_desc.py +++ b/python/paddle/fluid/tests/unittests/test_operator_desc.py @@ -22,7 +22,6 @@ main_program = default_startup_program() class TestOperator(unittest.TestCase): - def test_error_type(self): block = main_program._create_block() try: @@ -30,37 +29,34 @@ class TestOperator(unittest.TestCase): self.assertFail() except ValueError as v_err: self.assertEqual( - str(v_err), - "`type` to initialized an Operator can not be None.") + str(v_err), "`type` to initialized an Operator can not be None." + ) try: block.append_op(type="no_such_op") self.assertFail() except ValueError as a_err: self.assertEqual( - str(a_err), "Operator \"no_such_op\" has not been registered.") + str(a_err), "Operator \"no_such_op\" has not been registered." + ) def test_op_desc_creation(self): program = Program() block = program.current_block() - mul_x = block.create_var(dtype="float32", - shape=[5, 10], - lod_level=0, - name="mul.x") - mul_y = block.create_var(dtype="float32", - shape=[10, 8], - lod_level=0, - name="mul.y") - mul_out = block.create_var(dtype="float32", - shape=[5, 8], - lod_level=0, - name="mul.out") - mul_op = block.append_op(type="mul", - inputs={ - "X": [mul_x], - "Y": mul_y - }, - outputs={"Out": [mul_out]}, - attrs={"x_num_col_dims": 1}) + mul_x = block.create_var( + dtype="float32", shape=[5, 10], lod_level=0, name="mul.x" + ) + mul_y = block.create_var( + dtype="float32", shape=[10, 8], lod_level=0, name="mul.y" + ) + mul_out = block.create_var( + dtype="float32", shape=[5, 8], lod_level=0, name="mul.out" + ) + mul_op = block.append_op( + type="mul", + inputs={"X": [mul_x], "Y": mul_y}, + outputs={"Out": [mul_out]}, + attrs={"x_num_col_dims": 1}, + ) self.assertNotEqual(str(mul_op), "") self.assertEqual(mul_op.type, "mul") @@ -71,10 +67,19 @@ class TestOperator(unittest.TestCase): self.assertEqual(mul_op.output("Out"), ["mul.out"]) self.assertEqual( set(mul_op.attr_names), - set([ - "x_num_col_dims", "y_num_col_dims", "op_role", "op_role_var", - "op_namescope", "op_callstack", "op_device", "with_quant_attr" - ])) + set( + [ + "x_num_col_dims", + "y_num_col_dims", + "op_role", + "op_role_var", + "op_namescope", + "op_callstack", + "op_device", + "with_quant_attr", + ] + ), + ) self.assertEqual(mul_op.has_attr("x_num_col_dims"), True) self.assertEqual(mul_op.attr_type("x_num_col_dims"), core.AttrType.INT) self.assertEqual(mul_op.attr("x_num_col_dims"), 1) @@ -89,25 +94,23 @@ class TestOperator(unittest.TestCase): def test_mult_input(self): program = Program() block = program.current_block() - sum_x1 = block.create_var(dtype="int", - shape=[3, 4], - lod_level=0, - name="sum.x1") - sum_x2 = block.create_var(dtype="int", - shape=[3, 4], - lod_level=0, - name="sum.x2") - sum_x3 = block.create_var(dtype="int", - shape=[3, 4], - lod_level=0, - name="sum.x3") - sum_out = block.create_var(dtype="int", - shape=[3, 4], - lod_level=0, - name="sum.out") - sum_op = block.append_op(type="sum", - inputs={"X": [sum_x1, sum_x2, sum_x3]}, - outputs={"Out": sum_out}) + sum_x1 = block.create_var( + dtype="int", shape=[3, 4], lod_level=0, name="sum.x1" + ) + sum_x2 = block.create_var( + dtype="int", shape=[3, 4], lod_level=0, name="sum.x2" + ) + sum_x3 = block.create_var( + dtype="int", shape=[3, 4], lod_level=0, name="sum.x3" + ) + sum_out = block.create_var( + dtype="int", shape=[3, 4], lod_level=0, name="sum.out" + ) + sum_op = block.append_op( + type="sum", + inputs={"X": [sum_x1, sum_x2, sum_x3]}, + outputs={"Out": sum_out}, + ) self.assertEqual(sum_op.type, "sum") self.assertEqual(sum_op.input_names, ["X"]) self.assertEqual(sum_op.input("X"), ["sum.x1", "sum.x2", "sum.x3"]) diff --git a/python/paddle/fluid/tests/unittests/test_ops_nms.py b/python/paddle/fluid/tests/unittests/test_ops_nms.py index 4004f29b446a319258e4850d7e4ea48393b9ea55..573231a8a725a9db4d0a83a16d36b814dbf6c652 100644 --- a/python/paddle/fluid/tests/unittests/test_ops_nms.py +++ b/python/paddle/fluid/tests/unittests/test_ops_nms.py @@ -46,10 +46,12 @@ def multiclass_nms(boxes, scores, category_idxs, iou_threshold, top_k): cur_category_scores = scores[cur_category_boxes_idxs] cur_category_sorted_indices = np.argsort(-cur_category_scores) cur_category_sorted_boxes = cur_category_boxes[ - cur_category_sorted_indices] + cur_category_sorted_indices + ] - cur_category_keep_boxes_sub_idxs = cur_category_sorted_indices[nms( - cur_category_sorted_boxes, iou_threshold)] + cur_category_keep_boxes_sub_idxs = cur_category_sorted_indices[ + nms(cur_category_sorted_boxes, iou_threshold) + ] mask[cur_category_boxes_idxs[cur_category_keep_boxes_sub_idxs]] = True @@ -72,7 +74,6 @@ def gen_args(num_boxes, dtype): class TestOpsNMS(unittest.TestCase): - def setUp(self): self.num_boxes = 64 self.threshold = 0.5 @@ -91,78 +92,100 @@ class TestOpsNMS(unittest.TestCase): for device in self.devices: for dtype in self.dtypes: boxes, scores, category_idxs, categories = gen_args( - self.num_boxes, dtype) + self.num_boxes, dtype + ) paddle.set_device(device) - out = paddle.vision.ops.nms(paddle.to_tensor(boxes), - self.threshold, - paddle.to_tensor(scores)) - out = paddle.vision.ops.nms(paddle.to_tensor(boxes), - self.threshold) + out = paddle.vision.ops.nms( + paddle.to_tensor(boxes), + self.threshold, + paddle.to_tensor(scores), + ) + out = paddle.vision.ops.nms( + paddle.to_tensor(boxes), self.threshold + ) out_py = nms(boxes, self.threshold) np.testing.assert_array_equal( out.numpy(), out_py, - err_msg='paddle out: {}\n py out: {}\n'.format(out, out_py)) + err_msg='paddle out: {}\n py out: {}\n'.format(out, out_py), + ) def test_multiclass_nms_dynamic(self): for device in self.devices: for dtype in self.dtypes: boxes, scores, category_idxs, categories = gen_args( - self.num_boxes, dtype) + self.num_boxes, dtype + ) paddle.set_device(device) - out = paddle.vision.ops.nms(paddle.to_tensor(boxes), - self.threshold, - paddle.to_tensor(scores), - paddle.to_tensor(category_idxs), - categories, self.topk) - out_py = multiclass_nms(boxes, scores, category_idxs, - self.threshold, self.topk) + out = paddle.vision.ops.nms( + paddle.to_tensor(boxes), + self.threshold, + paddle.to_tensor(scores), + paddle.to_tensor(category_idxs), + categories, + self.topk, + ) + out_py = multiclass_nms( + boxes, scores, category_idxs, self.threshold, self.topk + ) np.testing.assert_array_equal( out.numpy(), out_py, - err_msg='paddle out: {}\n py out: {}\n'.format(out, out_py)) + err_msg='paddle out: {}\n py out: {}\n'.format(out, out_py), + ) def test_multiclass_nms_static(self): for device in self.devices: for dtype in self.dtypes: paddle.enable_static() boxes, scores, category_idxs, categories = gen_args( - self.num_boxes, dtype) - boxes_static = paddle.static.data(shape=boxes.shape, - dtype=boxes.dtype, - name="boxes") - scores_static = paddle.static.data(shape=scores.shape, - dtype=scores.dtype, - name="scores") + self.num_boxes, dtype + ) + boxes_static = paddle.static.data( + shape=boxes.shape, dtype=boxes.dtype, name="boxes" + ) + scores_static = paddle.static.data( + shape=scores.shape, dtype=scores.dtype, name="scores" + ) category_idxs_static = paddle.static.data( shape=category_idxs.shape, dtype=category_idxs.dtype, - name="category_idxs") - out = paddle.vision.ops.nms(boxes_static, self.threshold, - scores_static, category_idxs_static, - categories, self.topk) + name="category_idxs", + ) + out = paddle.vision.ops.nms( + boxes_static, + self.threshold, + scores_static, + category_idxs_static, + categories, + self.topk, + ) place = paddle.CPUPlace() if device == 'gpu': place = paddle.CUDAPlace(0) exe = paddle.static.Executor(place) - out = exe.run(paddle.static.default_main_program(), - feed={ - 'boxes': boxes, - 'scores': scores, - 'category_idxs': category_idxs - }, - fetch_list=[out]) + out = exe.run( + paddle.static.default_main_program(), + feed={ + 'boxes': boxes, + 'scores': scores, + 'category_idxs': category_idxs, + }, + fetch_list=[out], + ) paddle.disable_static() - out_py = multiclass_nms(boxes, scores, category_idxs, - self.threshold, self.topk) + out_py = multiclass_nms( + boxes, scores, category_idxs, self.threshold, self.topk + ) out = np.array(out) out = np.squeeze(out) np.testing.assert_array_equal( out, out_py, - err_msg='paddle out: {}\n py out: {}\n'.format(out, out_py)) + err_msg='paddle out: {}\n py out: {}\n'.format(out, out_py), + ) def test_multiclass_nms_dynamic_to_static(self): for device in self.devices: @@ -173,10 +196,14 @@ class TestOpsNMS(unittest.TestCase): scores = np.arange(0, 64).astype('float32') categories = np.array([0, 1, 2, 3]) category_idxs = categories.repeat(16) - out = paddle.vision.ops.nms(x, 0.1, - paddle.to_tensor(scores), - paddle.to_tensor(category_idxs), - categories, 10) + out = paddle.vision.ops.nms( + x, + 0.1, + paddle.to_tensor(scores), + paddle.to_tensor(category_idxs), + categories, + 10, + ) return out boxes = np.random.rand(64, 4).astype('float32') @@ -188,9 +215,9 @@ class TestOpsNMS(unittest.TestCase): fun, self.path, input_spec=[ - paddle.static.InputSpec(shape=[None, 4], - dtype='float32', - name='x') + paddle.static.InputSpec( + shape=[None, 4], dtype='float32', name='x' + ) ], ) load_func = paddle.jit.load(self.path) @@ -199,20 +226,23 @@ class TestOpsNMS(unittest.TestCase): origin, res, err_msg='origin out: {}\n inference model out: {}\n'.format( - origin, res)) + origin, res + ), + ) def test_matrix_nms_dynamic(self): for device in self.devices: for dtype in self.dtypes: boxes, scores, category_idxs, categories = gen_args( - self.num_boxes, dtype) + self.num_boxes, dtype + ) scores = np.random.rand(1, 4, self.num_boxes).astype(dtype) paddle.set_device(device) out = paddle.vision.ops.matrix_nms( paddle.to_tensor(boxes).unsqueeze(0), paddle.to_tensor(scores), self.threshold, - post_threshold=0., + post_threshold=0.0, nms_top_k=400, keep_top_k=100, ) diff --git a/python/paddle/fluid/tests/unittests/test_optimizer.py b/python/paddle/fluid/tests/unittests/test_optimizer.py index 47d42e354cadbfab527ccc93b23b7e8745f5c8ab..17ad33f67ab9980d76327231c6045258e6224b0d 100644 --- a/python/paddle/fluid/tests/unittests/test_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_optimizer.py @@ -22,7 +22,11 @@ import paddle.fluid.optimizer as optimizer import paddle.fluid.core as core import numpy as np from paddle.fluid.backward import append_backward -from paddle.fluid.framework import Program, program_guard, convert_np_dtype_to_dtype_ +from paddle.fluid.framework import ( + Program, + program_guard, + convert_np_dtype_to_dtype_, +) from paddle.fluid.framework import _test_eager_guard import paddle from paddle.io import Dataset @@ -30,40 +34,36 @@ import numpy class TestOptimizer(unittest.TestCase): - def test_sgd_optimizer(self): - def check_sgd_optimizer(optimizer_attr): init_program = framework.Program() program = framework.Program() block = program.global_block() - mul_x = block.create_parameter(dtype="float32", - shape=[5, 10], - lod_level=0, - name="mul.x", - optimize_attr=optimizer_attr) - mul_y = block.create_var(dtype="float32", - shape=[10, 8], - lod_level=0, - name="mul.y") - mul_out = block.create_var(dtype="float32", - shape=[5, 8], - lod_level=0, - name="mul.out") - mean_out = block.create_var(dtype="float32", - shape=[1], - lod_level=0, - name="mean.out") - block.append_op(type="mul", - inputs={ - "X": mul_x, - "Y": mul_y - }, - outputs={"Out": mul_out}, - attrs={"x_num_col_dims": 1}) - block.append_op(type="mean", - inputs={"X": mul_out}, - outputs={"Out": mean_out}) + mul_x = block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="mul.x", + optimize_attr=optimizer_attr, + ) + mul_y = block.create_var( + dtype="float32", shape=[10, 8], lod_level=0, name="mul.y" + ) + mul_out = block.create_var( + dtype="float32", shape=[5, 8], lod_level=0, name="mul.out" + ) + mean_out = block.create_var( + dtype="float32", shape=[1], lod_level=0, name="mean.out" + ) + block.append_op( + type="mul", + inputs={"X": mul_x, "Y": mul_y}, + outputs={"Out": mul_out}, + attrs={"x_num_col_dims": 1}, + ) + block.append_op( + type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out} + ) sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.01) opts, _ = sgd_optimizer.minimize(mean_out, init_program) return opts @@ -78,40 +78,36 @@ class TestOptimizer(unittest.TestCase): class TestOptimizerBackwardApplygrad(unittest.TestCase): - def test_sgd_optimizer(self): - def check_sgd_optimizer(optimizer_attr): init_program = framework.Program() program = framework.Program() block = program.global_block() - mul_x = block.create_parameter(dtype="float32", - shape=[5, 10], - lod_level=0, - name="mul.x", - optimize_attr=optimizer_attr) - mul_y = block.create_var(dtype="float32", - shape=[10, 8], - lod_level=0, - name="mul.y") - mul_out = block.create_var(dtype="float32", - shape=[5, 8], - lod_level=0, - name="mul.out") - mean_out = block.create_var(dtype="float32", - shape=[1], - lod_level=0, - name="mean.out") - block.append_op(type="mul", - inputs={ - "X": mul_x, - "Y": mul_y - }, - outputs={"Out": mul_out}, - attrs={"x_num_col_dims": 1}) - block.append_op(type="mean", - inputs={"X": mul_out}, - outputs={"Out": mean_out}) + mul_x = block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="mul.x", + optimize_attr=optimizer_attr, + ) + mul_y = block.create_var( + dtype="float32", shape=[10, 8], lod_level=0, name="mul.y" + ) + mul_out = block.create_var( + dtype="float32", shape=[5, 8], lod_level=0, name="mul.out" + ) + mean_out = block.create_var( + dtype="float32", shape=[1], lod_level=0, name="mean.out" + ) + block.append_op( + type="mul", + inputs={"X": mul_x, "Y": mul_y}, + outputs={"Out": mul_out}, + attrs={"x_num_col_dims": 1}, + ) + block.append_op( + type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out} + ) sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.01) with framework.program_guard(program, init_program): p_g = sgd_optimizer.backward(mean_out) @@ -128,9 +124,7 @@ class TestOptimizerBackwardApplygrad(unittest.TestCase): class TestMomentumOptimizer(unittest.TestCase): - class MockMomentum(optimizer.MomentumOptimizer): - def get_accumulators(self): return self._accumulators @@ -141,36 +135,35 @@ class TestMomentumOptimizer(unittest.TestCase): init_program = framework.Program() program = framework.Program() block = program.global_block() - mul_x = block.create_parameter(dtype="float32", - shape=[5, 10], - lod_level=0, - name="mul.x", - optimize_attr={'learning_rate': 1.1}) - mul_y = block.create_var(dtype="float32", - shape=[10, 8], - lod_level=0, - name="mul.y") - mul_out = block.create_var(dtype="float32", - shape=[5, 8], - lod_level=0, - name="mul.out") - block.append_op(type="mul", - inputs={ - "X": mul_x, - "Y": mul_y - }, - outputs={"Out": mul_out}, - attrs={"x_num_col_dims": 1}) + mul_x = block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="mul.x", + optimize_attr={'learning_rate': 1.1}, + ) + mul_y = block.create_var( + dtype="float32", shape=[10, 8], lod_level=0, name="mul.y" + ) + mul_out = block.create_var( + dtype="float32", shape=[5, 8], lod_level=0, name="mul.out" + ) + block.append_op( + type="mul", + inputs={"X": mul_x, "Y": mul_y}, + outputs={"Out": mul_out}, + attrs={"x_num_col_dims": 1}, + ) learning_rate = 0.01 - momentum_optimizer = self.MockMomentum(learning_rate=learning_rate, - momentum=0.2) - mean_out = block.create_var(dtype="float32", - shape=[1], - lod_level=0, - name="mean.out") - block.append_op(type="mean", - inputs={"X": mul_out}, - outputs={"Out": mean_out}) + momentum_optimizer = self.MockMomentum( + learning_rate=learning_rate, momentum=0.2 + ) + mean_out = block.create_var( + dtype="float32", shape=[1], lod_level=0, name="mean.out" + ) + block.append_op( + type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out} + ) params_grads = append_backward(mean_out) self.assertEqual(len(params_grads), 1) self.assertEqual(len(momentum_optimizer.get_accumulators()), 0) @@ -201,37 +194,35 @@ class TestMomentumOptimizer(unittest.TestCase): init_program = framework.Program() program = framework.Program() block = program.global_block() - mul_x = block.create_parameter(dtype="float32", - shape=[5, 10], - lod_level=0, - name="mul.x", - optimize_attr={'learning_rate': 1.1}) - mul_y = block.create_var(dtype="float32", - shape=[10, 8], - lod_level=0, - name="mul.y") - mul_out = block.create_var(dtype="float32", - shape=[5, 8], - lod_level=0, - name="mul.out") - block.append_op(type="mul", - inputs={ - "X": mul_x, - "Y": mul_y - }, - outputs={"Out": mul_out}, - attrs={"x_num_col_dims": 1}) - mean_out = block.create_var(dtype="float32", - shape=[1], - lod_level=0, - name="mean.out") - block.append_op(type="mean", - inputs={"X": mul_out}, - outputs={"Out": mean_out}) + mul_x = block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="mul.x", + optimize_attr={'learning_rate': 1.1}, + ) + mul_y = block.create_var( + dtype="float32", shape=[10, 8], lod_level=0, name="mul.y" + ) + mul_out = block.create_var( + dtype="float32", shape=[5, 8], lod_level=0, name="mul.out" + ) + block.append_op( + type="mul", + inputs={"X": mul_x, "Y": mul_y}, + outputs={"Out": mul_out}, + attrs={"x_num_col_dims": 1}, + ) + mean_out = block.create_var( + dtype="float32", shape=[1], lod_level=0, name="mean.out" + ) + block.append_op( + type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out} + ) learning_rate = 0.01 - momentum_optimizer = self.MockMomentum(learning_rate=learning_rate, - momentum=0.2, - use_nesterov=True) + momentum_optimizer = self.MockMomentum( + learning_rate=learning_rate, momentum=0.2, use_nesterov=True + ) params_grads = append_backward(mean_out) self.assertEqual(len(params_grads), 1) self.assertEqual(len(momentum_optimizer.get_accumulators()), 0) @@ -260,9 +251,7 @@ class TestMomentumOptimizer(unittest.TestCase): class TestAdagradOptimizer(unittest.TestCase): - class MockAdagrad(optimizer.AdagradOptimizer): - def get_accumulators(self): return self._accumulators @@ -273,36 +262,35 @@ class TestAdagradOptimizer(unittest.TestCase): init_program = framework.Program() program = framework.Program() block = program.global_block() - mul_x = block.create_parameter(dtype="float32", - shape=[5, 10], - lod_level=0, - name="mul.x", - optimize_attr={'learning_rate': 1.1}) - mul_y = block.create_var(dtype="float32", - shape=[10, 8], - lod_level=0, - name="mul.y") - mul_out = block.create_var(dtype="float32", - shape=[5, 8], - lod_level=0, - name="mul.out") - block.append_op(type="mul", - inputs={ - "X": mul_x, - "Y": mul_y - }, - outputs={"Out": mul_out}, - attrs={"x_num_col_dims": 1}) - mean_out = block.create_var(dtype="float32", - shape=[1], - lod_level=0, - name="mean.out") - block.append_op(type="mean", - inputs={"X": mul_out}, - outputs={"Out": mean_out}) + mul_x = block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="mul.x", + optimize_attr={'learning_rate': 1.1}, + ) + mul_y = block.create_var( + dtype="float32", shape=[10, 8], lod_level=0, name="mul.y" + ) + mul_out = block.create_var( + dtype="float32", shape=[5, 8], lod_level=0, name="mul.out" + ) + block.append_op( + type="mul", + inputs={"X": mul_x, "Y": mul_y}, + outputs={"Out": mul_out}, + attrs={"x_num_col_dims": 1}, + ) + mean_out = block.create_var( + dtype="float32", shape=[1], lod_level=0, name="mean.out" + ) + block.append_op( + type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out} + ) learning_rate = 0.01 - adagrad_optimizer = self.MockAdagrad(learning_rate=learning_rate, - epsilon=1.0e-6) + adagrad_optimizer = self.MockAdagrad( + learning_rate=learning_rate, epsilon=1.0e-6 + ) params_grads = append_backward(mean_out) self.assertEqual(len(params_grads), 1) self.assertEqual(len(adagrad_optimizer.get_accumulators()), 0) @@ -329,9 +317,7 @@ class TestAdagradOptimizer(unittest.TestCase): class TestAdamOptimizer(unittest.TestCase): - class MockAdam(optimizer.AdamOptimizer): - def get_accumulators(self): return self._accumulators @@ -345,37 +331,35 @@ class TestAdamOptimizer(unittest.TestCase): init_program = framework.Program() program = framework.Program() block = program.global_block() - mul_x = block.create_parameter(dtype="float32", - shape=[5, 10], - lod_level=0, - name="mul.x", - optimize_attr={'learning_rate': 1.1}) - mul_y = block.create_var(dtype="float32", - shape=[10, 8], - lod_level=0, - name="mul.y") - mul_out = block.create_var(dtype="float32", - shape=[5, 8], - lod_level=0, - name="mul.out") - block.append_op(type="mul", - inputs={ - "X": mul_x, - "Y": mul_y - }, - outputs={"Out": mul_out}, - attrs={"x_num_col_dims": 1}) - mean_out = block.create_var(dtype="float32", - shape=[1], - lod_level=0, - name="mean.out") - block.append_op(type="mean", - inputs={"X": mul_out}, - outputs={"Out": mean_out}) + mul_x = block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="mul.x", + optimize_attr={'learning_rate': 1.1}, + ) + mul_y = block.create_var( + dtype="float32", shape=[10, 8], lod_level=0, name="mul.y" + ) + mul_out = block.create_var( + dtype="float32", shape=[5, 8], lod_level=0, name="mul.out" + ) + block.append_op( + type="mul", + inputs={"X": mul_x, "Y": mul_y}, + outputs={"Out": mul_out}, + attrs={"x_num_col_dims": 1}, + ) + mean_out = block.create_var( + dtype="float32", shape=[1], lod_level=0, name="mean.out" + ) + block.append_op( + type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out} + ) learning_rate = 0.01 - adam_optimizer = self.MockAdam(learning_rate=learning_rate, - beta1=0.9, - beta2=0.999) + adam_optimizer = self.MockAdam( + learning_rate=learning_rate, beta1=0.9, beta2=0.999 + ) params_grads = append_backward(mean_out) self.assertEqual(len(params_grads), 1) self.assertEqual(len(adam_optimizer.get_accumulators()), 0) @@ -404,9 +388,7 @@ class TestAdamOptimizer(unittest.TestCase): class TestAdamaxOptimizer(unittest.TestCase): - class MockAdamax(optimizer.AdamaxOptimizer): - def get_accumulators(self): return self._accumulators @@ -420,37 +402,35 @@ class TestAdamaxOptimizer(unittest.TestCase): init_program = framework.Program() program = framework.Program() block = program.global_block() - mul_x = block.create_parameter(dtype="float32", - shape=[5, 10], - lod_level=0, - name="mul.x", - optimize_attr={'learning_rate': 1.1}) - mul_y = block.create_var(dtype="float32", - shape=[10, 8], - lod_level=0, - name="mul.y") - mul_out = block.create_var(dtype="float32", - shape=[5, 8], - lod_level=0, - name="mul.out") - block.append_op(type="mul", - inputs={ - "X": mul_x, - "Y": mul_y - }, - outputs={"Out": mul_out}, - attrs={"x_num_col_dims": 1}) - mean_out = block.create_var(dtype="float32", - shape=[1], - lod_level=0, - name="mean.out") - block.append_op(type="mean", - inputs={"X": mul_out}, - outputs={"Out": mean_out}) + mul_x = block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="mul.x", + optimize_attr={'learning_rate': 1.1}, + ) + mul_y = block.create_var( + dtype="float32", shape=[10, 8], lod_level=0, name="mul.y" + ) + mul_out = block.create_var( + dtype="float32", shape=[5, 8], lod_level=0, name="mul.out" + ) + block.append_op( + type="mul", + inputs={"X": mul_x, "Y": mul_y}, + outputs={"Out": mul_out}, + attrs={"x_num_col_dims": 1}, + ) + mean_out = block.create_var( + dtype="float32", shape=[1], lod_level=0, name="mean.out" + ) + block.append_op( + type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out} + ) learning_rate = 0.01 - adamax_optimizer = self.MockAdamax(learning_rate=learning_rate, - beta1=0.9, - beta2=0.999) + adamax_optimizer = self.MockAdamax( + learning_rate=learning_rate, beta1=0.9, beta2=0.999 + ) params_grads = append_backward(mean_out) self.assertEqual(len(params_grads), 1) self.assertEqual(len(adamax_optimizer.get_accumulators()), 0) @@ -479,61 +459,56 @@ class TestAdamaxOptimizer(unittest.TestCase): class TestDpsgdOptimizer(unittest.TestCase): - def test_dpsgd_optimizer(self): - def check_dpsgd_optimizer(optimizer_attr): init_program = framework.Program() program = framework.Program() block = program.global_block() - mul_x = block.create_parameter(dtype="float32", - shape=[5, 10], - lod_level=0, - name="mul.x", - optimize_attr=optimizer_attr) - mul_y = block.create_var(dtype="float32", - shape=[10, 8], - lod_level=0, - name="mul.y") - mul_out = block.create_var(dtype="float32", - shape=[5, 8], - lod_level=0, - name="mul.out") - block.append_op(type="mul", - inputs={ - "X": mul_x, - "Y": mul_y - }, - outputs={"Out": mul_out}, - attrs={"x_num_col_dims": 1}) - mean_out = block.create_var(dtype="float32", - shape=[1], - lod_level=0, - name="mean.out") - block.append_op(type="mean", - inputs={"X": mul_out}, - outputs={"Out": mean_out}) - dpsgd_optimizer = optimizer.DpsgdOptimizer(learning_rate=0.01, - clip=100.0, - batch_size=16.0, - sigma=0.0) + mul_x = block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="mul.x", + optimize_attr=optimizer_attr, + ) + mul_y = block.create_var( + dtype="float32", shape=[10, 8], lod_level=0, name="mul.y" + ) + mul_out = block.create_var( + dtype="float32", shape=[5, 8], lod_level=0, name="mul.out" + ) + block.append_op( + type="mul", + inputs={"X": mul_x, "Y": mul_y}, + outputs={"Out": mul_out}, + attrs={"x_num_col_dims": 1}, + ) + mean_out = block.create_var( + dtype="float32", shape=[1], lod_level=0, name="mean.out" + ) + block.append_op( + type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out} + ) + dpsgd_optimizer = optimizer.DpsgdOptimizer( + learning_rate=0.01, clip=100.0, batch_size=16.0, sigma=0.0 + ) opts, _ = dpsgd_optimizer.minimize(mean_out, init_program) return opts - opts = check_dpsgd_optimizer({ - 'learning_rate': 1.1, - 'clip': 100.0, - 'batch_size': 16.0, - 'sigma': 4.0 - }) + opts = check_dpsgd_optimizer( + { + 'learning_rate': 1.1, + 'clip': 100.0, + 'batch_size': 16.0, + 'sigma': 4.0, + } + ) self.assertEqual(len(opts), 2) self.assertEqual([op.type for op in opts], ["scale", "dpsgd"]) class TestDecayedAdagradOptimizer(unittest.TestCase): - class MockDecayedAdagrad(optimizer.DecayedAdagradOptimizer): - def get_accumulators(self): return self._accumulators @@ -544,36 +519,35 @@ class TestDecayedAdagradOptimizer(unittest.TestCase): init_program = framework.Program() program = framework.Program() block = program.global_block() - mul_x = block.create_parameter(dtype="float32", - shape=[5, 10], - lod_level=0, - name="mul.x", - optimize_attr={'learning_rate': 1.1}) - mul_y = block.create_var(dtype="float32", - shape=[10, 8], - lod_level=0, - name="mul.y") - mul_out = block.create_var(dtype="float32", - shape=[5, 8], - lod_level=0, - name="mul.out") - block.append_op(type="mul", - inputs={ - "X": mul_x, - "Y": mul_y - }, - outputs={"Out": mul_out}, - attrs={"x_num_col_dims": 1}) - mean_out = block.create_var(dtype="float32", - shape=[1], - lod_level=0, - name="mean.out") - block.append_op(type="mean", - inputs={"X": mul_out}, - outputs={"Out": mean_out}) + mul_x = block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="mul.x", + optimize_attr={'learning_rate': 1.1}, + ) + mul_y = block.create_var( + dtype="float32", shape=[10, 8], lod_level=0, name="mul.y" + ) + mul_out = block.create_var( + dtype="float32", shape=[5, 8], lod_level=0, name="mul.out" + ) + block.append_op( + type="mul", + inputs={"X": mul_x, "Y": mul_y}, + outputs={"Out": mul_out}, + attrs={"x_num_col_dims": 1}, + ) + mean_out = block.create_var( + dtype="float32", shape=[1], lod_level=0, name="mean.out" + ) + block.append_op( + type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out} + ) learning_rate = 0.01 decayed_adagrad_optimizer = self.MockDecayedAdagrad( - learning_rate=learning_rate, decay=0.95, epsilon=1.0e-6) + learning_rate=learning_rate, decay=0.95, epsilon=1.0e-6 + ) params_grads = append_backward(mean_out) self.assertEqual(len(params_grads), 1) self.assertEqual(len(decayed_adagrad_optimizer.get_accumulators()), 0) @@ -586,7 +560,8 @@ class TestDecayedAdagradOptimizer(unittest.TestCase): accumulators = decayed_adagrad_optimizer.get_accumulators() self.assertEqual(len(accumulators), 1) self.assertTrue( - decayed_adagrad_optimizer.get_moment_str() in accumulators) + decayed_adagrad_optimizer.get_moment_str() in accumulators + ) moment_acc = accumulators[decayed_adagrad_optimizer.get_moment_str()] self.assertEqual(len(moment_acc), 1) self.assertTrue(mul_x.name in moment_acc) @@ -601,9 +576,7 @@ class TestDecayedAdagradOptimizer(unittest.TestCase): class TestFtrlOptimizer(unittest.TestCase): - class MockFtrl(optimizer.FtrlOptimizer): - def get_accumulators(self): return self._accumulators @@ -617,38 +590,35 @@ class TestFtrlOptimizer(unittest.TestCase): init_program = framework.Program() program = framework.Program() block = program.global_block() - mul_x = block.create_parameter(dtype="float32", - shape=[5, 10], - lod_level=0, - name="mul.x", - optimize_attr={'learning_rate': 1.1}) - mul_y = block.create_var(dtype="float32", - shape=[10, 8], - lod_level=0, - name="mul.y") - mul_out = block.create_var(dtype="float32", - shape=[5, 8], - lod_level=0, - name="mul.out") - block.append_op(type="mul", - inputs={ - "X": mul_x, - "Y": mul_y - }, - outputs={"Out": mul_out}, - attrs={"x_num_col_dims": 1}) - mean_out = block.create_var(dtype="float32", - shape=[1], - lod_level=0, - name="mean.out") - block.append_op(type="mean", - inputs={"X": mul_out}, - outputs={"Out": mean_out}) + mul_x = block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="mul.x", + optimize_attr={'learning_rate': 1.1}, + ) + mul_y = block.create_var( + dtype="float32", shape=[10, 8], lod_level=0, name="mul.y" + ) + mul_out = block.create_var( + dtype="float32", shape=[5, 8], lod_level=0, name="mul.out" + ) + block.append_op( + type="mul", + inputs={"X": mul_x, "Y": mul_y}, + outputs={"Out": mul_out}, + attrs={"x_num_col_dims": 1}, + ) + mean_out = block.create_var( + dtype="float32", shape=[1], lod_level=0, name="mean.out" + ) + block.append_op( + type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out} + ) learning_rate = 0.01 - ftrl_optimizer = self.MockFtrl(learning_rate=learning_rate, - l1=0.0, - l2=0.0, - lr_power=-0.5) + ftrl_optimizer = self.MockFtrl( + learning_rate=learning_rate, l1=0.0, l2=0.0, lr_power=-0.5 + ) params_grads = append_backward(mean_out) self.assertEqual(len(params_grads), 1) self.assertEqual(len(ftrl_optimizer.get_accumulators()), 0) @@ -677,44 +647,40 @@ class TestFtrlOptimizer(unittest.TestCase): class TestLookaheadOptimizer(unittest.TestCase): - def test_lookahead_optimizer(self): init_program = framework.Program() program = framework.Program() block = program.global_block() init_block = init_program.global_block() - mul_x = block.create_parameter(dtype="float32", - shape=[5, 10], - lod_level=0, - name="mul.x", - optimize_attr={'learning_rate': 1.1}) - init_mul_x = init_block.create_parameter(dtype="float32", - shape=[5, 10], - lod_level=0, - name="mul.x") - mul_y = block.create_var(dtype="float32", - shape=[10, 8], - lod_level=0, - name="mul.y") - mul_out = block.create_var(dtype="float32", - shape=[5, 8], - lod_level=0, - name="mul.out") - mean_out = block.create_var(dtype="float32", - shape=[1], - lod_level=0, - name="mean.out") - - block.append_op(type="mul", - inputs={ - "X": mul_x, - "Y": mul_y - }, - outputs={"Out": mul_out}, - attrs={"x_num_col_dims": 1}) - block.append_op(type="mean", - inputs={"X": mul_out}, - outputs={"Out": mean_out}) + mul_x = block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="mul.x", + optimize_attr={'learning_rate': 1.1}, + ) + init_mul_x = init_block.create_parameter( + dtype="float32", shape=[5, 10], lod_level=0, name="mul.x" + ) + mul_y = block.create_var( + dtype="float32", shape=[10, 8], lod_level=0, name="mul.y" + ) + mul_out = block.create_var( + dtype="float32", shape=[5, 8], lod_level=0, name="mul.out" + ) + mean_out = block.create_var( + dtype="float32", shape=[1], lod_level=0, name="mean.out" + ) + + block.append_op( + type="mul", + inputs={"X": mul_x, "Y": mul_y}, + outputs={"Out": mul_out}, + attrs={"x_num_col_dims": 1}, + ) + block.append_op( + type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out} + ) sgd = optimizer.SGD(learning_rate=0.01) lookahead = optimizer.LookaheadOptimizer(sgd, alpha=0.5, k=5) @@ -725,109 +691,98 @@ class TestLookaheadOptimizer(unittest.TestCase): class TestRecomputeOptimizer(unittest.TestCase): - def net(self, return_input=False, with_dropout=False, with_seed=False): program = framework.Program() block = program.global_block() - mul_x = block.create_parameter(dtype="float32", - shape=[5, 10], - lod_level=0, - name="mul.x") - mul_y = block.create_var(dtype="float32", - shape=[10, 8], - lod_level=0, - name="mul.y") - mul_out = block.create_var(dtype="float32", - shape=[5, 8], - lod_level=0, - name="mul.out") + mul_x = block.create_parameter( + dtype="float32", shape=[5, 10], lod_level=0, name="mul.x" + ) + mul_y = block.create_var( + dtype="float32", shape=[10, 8], lod_level=0, name="mul.y" + ) + mul_out = block.create_var( + dtype="float32", shape=[5, 8], lod_level=0, name="mul.out" + ) if with_dropout is True: - mul_out_drop = block.create_var(dtype="float32", - shape=[5, 8], - lod_level=0, - name="mul.out.dropout") - mul_out_mask = block.create_var(dtype="uint8", - shape=[5, 8], - lod_level=0, - name="mul.out.mask") + mul_out_drop = block.create_var( + dtype="float32", + shape=[5, 8], + lod_level=0, + name="mul.out.dropout", + ) + mul_out_mask = block.create_var( + dtype="uint8", shape=[5, 8], lod_level=0, name="mul.out.mask" + ) if with_seed is True: - seed_out = block.create_var(dtype="int32", - shape=[1], - name="seed.out") - - b1 = block.create_parameter(dtype="float32", - shape=[5, 8], - lod_level=0, - name="b1") - b1_out = block.create_var(dtype="float32", - shape=[5, 8], - lod_level=0, - name="b1_out") - b2 = block.create_parameter(dtype="float32", - shape=[5, 8], - lod_level=0, - name="b2") - b2_out = block.create_var(dtype="float32", - shape=[5, 8], - lod_level=0, - name="b2_out") - mean_out = block.create_var(dtype="float32", - shape=[1], - lod_level=0, - name="mean.out") - block.append_op(type="mul", - inputs={ - "X": mul_x, - "Y": mul_y - }, - outputs={"Out": mul_out}, - attrs={"x_num_col_dims": 1}) + seed_out = block.create_var( + dtype="int32", shape=[1], name="seed.out" + ) + + b1 = block.create_parameter( + dtype="float32", shape=[5, 8], lod_level=0, name="b1" + ) + b1_out = block.create_var( + dtype="float32", shape=[5, 8], lod_level=0, name="b1_out" + ) + b2 = block.create_parameter( + dtype="float32", shape=[5, 8], lod_level=0, name="b2" + ) + b2_out = block.create_var( + dtype="float32", shape=[5, 8], lod_level=0, name="b2_out" + ) + mean_out = block.create_var( + dtype="float32", shape=[1], lod_level=0, name="mean.out" + ) + block.append_op( + type="mul", + inputs={"X": mul_x, "Y": mul_y}, + outputs={"Out": mul_out}, + attrs={"x_num_col_dims": 1}, + ) if with_dropout is True: dropout_inputs = {'X': [mul_out]} if with_seed is True: - block.append_op(type='seed', - outputs={'Out': seed_out}, - attrs={ - 'deterministic': True, - 'rng_name': 'rng0', - 'force_cpu': True - }) + block.append_op( + type='seed', + outputs={'Out': seed_out}, + attrs={ + 'deterministic': True, + 'rng_name': 'rng0', + 'force_cpu': True, + }, + ) dropout_inputs = {'X': [mul_out], 'Seed': [seed_out]} - block.append_op(type='dropout', - inputs=dropout_inputs, - outputs={ - 'Out': [mul_out_drop], - 'Mask': [mul_out_mask] - }, - attrs={ - 'dropout_prob': 0.5, - }) - block.append_op(type="elementwise_add", - inputs={ - "X": mul_out_drop, - "Y": b1 - }, - outputs={"Out": b1_out}) + block.append_op( + type='dropout', + inputs=dropout_inputs, + outputs={'Out': [mul_out_drop], 'Mask': [mul_out_mask]}, + attrs={ + 'dropout_prob': 0.5, + }, + ) + block.append_op( + type="elementwise_add", + inputs={"X": mul_out_drop, "Y": b1}, + outputs={"Out": b1_out}, + ) else: - block.append_op(type="elementwise_add", - inputs={ - "X": mul_out, - "Y": b1 - }, - outputs={"Out": b1_out}) - - block.append_op(type="elementwise_add", - inputs={ - "X": b1_out, - "Y": b2 - }, - outputs={"Out": b2_out}) - block.append_op(type="mean", - inputs={"X": b2_out}, - outputs={"Out": mean_out}) + block.append_op( + type="elementwise_add", + inputs={"X": mul_out, "Y": b1}, + outputs={"Out": b1_out}, + ) + + block.append_op( + type="elementwise_add", + inputs={"X": b1_out, "Y": b2}, + outputs={"Out": b2_out}, + ) + block.append_op( + type="mean", inputs={"X": b2_out}, outputs={"Out": mean_out} + ) if return_input == True: return mul_x, mul_out, b1_out, b2_out, mean_out @@ -836,124 +791,225 @@ class TestRecomputeOptimizer(unittest.TestCase): def test_no_checkpoint(self): mul_out, b1_out, b2_out, mean_out = self.net() self.assertEqual(len(mean_out.block.ops), 4) - self.assertEqual([op.type for op in mean_out.block.ops], - ["mul", "elementwise_add", "elementwise_add", "mean"]) + self.assertEqual( + [op.type for op in mean_out.block.ops], + ["mul", "elementwise_add", "elementwise_add", "mean"], + ) sgd_optimizer = optimizer.SGD(learning_rate=1.0) recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer) recompute_optimizer._set_checkpoints([]) opts, params_grads = recompute_optimizer.minimize(mean_out) self.assertEqual(len(mean_out.block.ops), 12) - self.assertEqual([op.type for op in mean_out.block.ops], [ - "mul", "elementwise_add", "elementwise_add", "mean", - "fill_constant", "mean_grad", "elementwise_add_grad", - "elementwise_add_grad", "mul_grad", "sgd", "sgd", "sgd" - ]) + self.assertEqual( + [op.type for op in mean_out.block.ops], + [ + "mul", + "elementwise_add", + "elementwise_add", + "mean", + "fill_constant", + "mean_grad", + "elementwise_add_grad", + "elementwise_add_grad", + "mul_grad", + "sgd", + "sgd", + "sgd", + ], + ) def test_one_checkpoint(self): mul_out, b1_out, b2_out, mean_out = self.net() self.assertEqual(len(mean_out.block.ops), 4) - self.assertEqual([op.type for op in mean_out.block.ops], - ["mul", "elementwise_add", "elementwise_add", "mean"]) + self.assertEqual( + [op.type for op in mean_out.block.ops], + ["mul", "elementwise_add", "elementwise_add", "mean"], + ) sgd_optimizer = optimizer.SGD(learning_rate=1.0) recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer) recompute_optimizer._set_checkpoints([b1_out]) opts, params_grads = recompute_optimizer.minimize(mean_out) self.assertEqual(len(mean_out.block.ops), 13) - self.assertEqual([op.type for op in mean_out.block.ops], [ - "mul", "elementwise_add", "elementwise_add", "mean", - "fill_constant", "mean_grad", "elementwise_add_grad", "mul", - "elementwise_add_grad", "mul_grad", "sgd", "sgd", "sgd" - ]) + self.assertEqual( + [op.type for op in mean_out.block.ops], + [ + "mul", + "elementwise_add", + "elementwise_add", + "mean", + "fill_constant", + "mean_grad", + "elementwise_add_grad", + "mul", + "elementwise_add_grad", + "mul_grad", + "sgd", + "sgd", + "sgd", + ], + ) def test_str_checkpoints(self): mul_out, b1_out, b2_out, mean_out = self.net() self.assertEqual(len(mean_out.block.ops), 4) - self.assertEqual([op.type for op in mean_out.block.ops], - ["mul", "elementwise_add", "elementwise_add", "mean"]) + self.assertEqual( + [op.type for op in mean_out.block.ops], + ["mul", "elementwise_add", "elementwise_add", "mean"], + ) sgd_optimizer = optimizer.SGD(learning_rate=1.0) recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer) recompute_optimizer._set_checkpoints([b1_out.name]) opts, params_grads = recompute_optimizer.minimize(mean_out) self.assertEqual(len(mean_out.block.ops), 13) - self.assertEqual([op.type for op in mean_out.block.ops], [ - "mul", "elementwise_add", "elementwise_add", "mean", - "fill_constant", "mean_grad", "elementwise_add_grad", "mul", - "elementwise_add_grad", "mul_grad", "sgd", "sgd", "sgd" - ]) + self.assertEqual( + [op.type for op in mean_out.block.ops], + [ + "mul", + "elementwise_add", + "elementwise_add", + "mean", + "fill_constant", + "mean_grad", + "elementwise_add_grad", + "mul", + "elementwise_add_grad", + "mul_grad", + "sgd", + "sgd", + "sgd", + ], + ) def test_multi_checkpoint(self): mul_out, b1_out, b2_out, mean_out = self.net() self.assertEqual(len(mean_out.block.ops), 4) - self.assertEqual([op.type for op in mean_out.block.ops], - ["mul", "elementwise_add", "elementwise_add", "mean"]) + self.assertEqual( + [op.type for op in mean_out.block.ops], + ["mul", "elementwise_add", "elementwise_add", "mean"], + ) sgd_optimizer = optimizer.SGD(learning_rate=1.0) recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer) recompute_optimizer._set_checkpoints([mul_out, b2_out]) opts, params_grads = recompute_optimizer.minimize(mean_out) self.assertEqual(len(mean_out.block.ops), 13) - self.assertEqual([op.type for op in mean_out.block.ops], [ - "mul", "elementwise_add", "elementwise_add", "mean", - "fill_constant", "mean_grad", "elementwise_add", - "elementwise_add_grad", "elementwise_add_grad", "mul_grad", "sgd", - "sgd", "sgd" - ]) + self.assertEqual( + [op.type for op in mean_out.block.ops], + [ + "mul", + "elementwise_add", + "elementwise_add", + "mean", + "fill_constant", + "mean_grad", + "elementwise_add", + "elementwise_add_grad", + "elementwise_add_grad", + "mul_grad", + "sgd", + "sgd", + "sgd", + ], + ) def test_adjacent_checkpoint(self): mul_out, b1_out, b2_out, mean_out = self.net() self.assertEqual(len(mean_out.block.ops), 4) - self.assertEqual([op.type for op in mean_out.block.ops], - ["mul", "elementwise_add", "elementwise_add", "mean"]) + self.assertEqual( + [op.type for op in mean_out.block.ops], + ["mul", "elementwise_add", "elementwise_add", "mean"], + ) sgd_optimizer = optimizer.SGD(learning_rate=1.0) recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer) recompute_optimizer._set_checkpoints([mul_out, b1_out]) opts, params_grads = recompute_optimizer.minimize(mean_out) self.assertEqual(len(mean_out.block.ops), 12) - self.assertEqual([op.type for op in mean_out.block.ops], [ - "mul", "elementwise_add", "elementwise_add", "mean", - "fill_constant", "mean_grad", "elementwise_add_grad", - "elementwise_add_grad", "mul_grad", "sgd", "sgd", "sgd" - ]) + self.assertEqual( + [op.type for op in mean_out.block.ops], + [ + "mul", + "elementwise_add", + "elementwise_add", + "mean", + "fill_constant", + "mean_grad", + "elementwise_add_grad", + "elementwise_add_grad", + "mul_grad", + "sgd", + "sgd", + "sgd", + ], + ) def test_out_of_order_checkpoint(self): mul_out, b1_out, b2_out, mean_out = self.net() self.assertEqual(len(mean_out.block.ops), 4) - self.assertEqual([op.type for op in mean_out.block.ops], - ["mul", "elementwise_add", "elementwise_add", "mean"]) + self.assertEqual( + [op.type for op in mean_out.block.ops], + ["mul", "elementwise_add", "elementwise_add", "mean"], + ) sgd_optimizer = optimizer.SGD(learning_rate=1.0) recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer) recompute_optimizer._set_checkpoints([b2_out, mul_out]) opts, params_grads = recompute_optimizer.minimize(mean_out) self.assertEqual(len(mean_out.block.ops), 13) - self.assertEqual([op.type for op in mean_out.block.ops], [ - "mul", "elementwise_add", "elementwise_add", "mean", - "fill_constant", "mean_grad", "elementwise_add", - "elementwise_add_grad", "elementwise_add_grad", "mul_grad", "sgd", - "sgd", "sgd" - ]) + self.assertEqual( + [op.type for op in mean_out.block.ops], + [ + "mul", + "elementwise_add", + "elementwise_add", + "mean", + "fill_constant", + "mean_grad", + "elementwise_add", + "elementwise_add_grad", + "elementwise_add_grad", + "mul_grad", + "sgd", + "sgd", + "sgd", + ], + ) def test_input_as_checkpoints(self): mul_x, mul_out, b1_out, b2_out, mean_out = self.net(return_input=True) self.assertEqual(len(mean_out.block.ops), 4) - self.assertEqual([op.type for op in mean_out.block.ops], - ["mul", "elementwise_add", "elementwise_add", "mean"]) + self.assertEqual( + [op.type for op in mean_out.block.ops], + ["mul", "elementwise_add", "elementwise_add", "mean"], + ) sgd_optimizer = optimizer.SGD(learning_rate=1.0) recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer) recompute_optimizer._set_checkpoints([mul_x, b2_out]) opts, params_grads = recompute_optimizer.minimize(mean_out) self.assertEqual(len(mean_out.block.ops), 14) - self.assertEqual([op.type for op in mean_out.block.ops], [ - "mul", "elementwise_add", "elementwise_add", "mean", - "fill_constant", "mean_grad", "mul", "elementwise_add", - "elementwise_add_grad", "elementwise_add_grad", "mul_grad", "sgd", - "sgd", "sgd" - ]) + self.assertEqual( + [op.type for op in mean_out.block.ops], + [ + "mul", + "elementwise_add", + "elementwise_add", + "mean", + "fill_constant", + "mean_grad", + "mul", + "elementwise_add", + "elementwise_add_grad", + "elementwise_add_grad", + "mul_grad", + "sgd", + "sgd", + "sgd", + ], + ) def test_apply_gradients(self): mul_out, b1_out, b2_out, mean_out = self.net() @@ -961,10 +1017,12 @@ class TestRecomputeOptimizer(unittest.TestCase): recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer) recompute_optimizer._set_checkpoints([b1_out]) # apply backward - params_grads = recompute_optimizer.backward(mean_out, - startup_program=None, - parameter_list=None, - no_grad_set=None) + params_grads = recompute_optimizer.backward( + mean_out, + startup_program=None, + parameter_list=None, + no_grad_set=None, + ) # apply gradient program = mean_out.block.program @@ -972,11 +1030,24 @@ class TestRecomputeOptimizer(unittest.TestCase): optimize_ops = recompute_optimizer.apply_gradients(params_grads) self.assertEqual(len(mean_out.block.ops), 13) - self.assertEqual([op.type for op in mean_out.block.ops], [ - "mul", "elementwise_add", "elementwise_add", "mean", - "fill_constant", "mean_grad", "elementwise_add_grad", "mul", - "elementwise_add_grad", "mul_grad", "sgd", "sgd", "sgd" - ]) + self.assertEqual( + [op.type for op in mean_out.block.ops], + [ + "mul", + "elementwise_add", + "elementwise_add", + "mean", + "fill_constant", + "mean_grad", + "elementwise_add_grad", + "mul", + "elementwise_add_grad", + "mul_grad", + "sgd", + "sgd", + "sgd", + ], + ) def test_load(self): mul_out, b1_out, b2_out, mean_out = self.net() @@ -989,7 +1060,8 @@ class TestRecomputeOptimizer(unittest.TestCase): except NotImplementedError as e: self.assertEqual( "load function is not supported by Recompute Optimizer for now", - str(e)) + str(e), + ) def test_dropout(self): """ @@ -1000,40 +1072,81 @@ class TestRecomputeOptimizer(unittest.TestCase): self.assertEqual(len(mean_out.block.ops), 5) self.assertEqual( [op.type for op in mean_out.block.ops], - ["mul", "dropout", "elementwise_add", "elementwise_add", "mean"]) + ["mul", "dropout", "elementwise_add", "elementwise_add", "mean"], + ) sgd_optimizer = optimizer.SGD(learning_rate=1.0) recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer) recompute_optimizer._set_checkpoints([b1_out]) opts, params_grads = recompute_optimizer.minimize(mean_out) self.assertEqual(len(mean_out.block.ops), 17) - self.assertEqual([op.type for op in mean_out.block.ops], [ - "mul", "seed", "dropout", "elementwise_add", "elementwise_add", - "mean", "fill_constant", "mean_grad", "elementwise_add_grad", "mul", - "dropout", "elementwise_add_grad", "dropout_grad", "mul_grad", - "sgd", "sgd", "sgd" - ]) + self.assertEqual( + [op.type for op in mean_out.block.ops], + [ + "mul", + "seed", + "dropout", + "elementwise_add", + "elementwise_add", + "mean", + "fill_constant", + "mean_grad", + "elementwise_add_grad", + "mul", + "dropout", + "elementwise_add_grad", + "dropout_grad", + "mul_grad", + "sgd", + "sgd", + "sgd", + ], + ) def test_dropout_with_determinate_seed(self): - mul_out, b1_out, b2_out, mean_out = self.net(with_dropout=True, - with_seed=True) + mul_out, b1_out, b2_out, mean_out = self.net( + with_dropout=True, with_seed=True + ) self.assertEqual(len(mean_out.block.ops), 6) - self.assertEqual([op.type for op in mean_out.block.ops], [ - "mul", "seed", "dropout", "elementwise_add", "elementwise_add", - "mean" - ]) + self.assertEqual( + [op.type for op in mean_out.block.ops], + [ + "mul", + "seed", + "dropout", + "elementwise_add", + "elementwise_add", + "mean", + ], + ) sgd_optimizer = optimizer.SGD(learning_rate=1.0) recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer) recompute_optimizer._set_checkpoints([b1_out]) opts, params_grads = recompute_optimizer.minimize(mean_out) self.assertEqual(len(mean_out.block.ops), 17) - self.assertEqual([op.type for op in mean_out.block.ops], [ - "mul", "seed", "dropout", "elementwise_add", "elementwise_add", - "mean", "fill_constant", "mean_grad", "elementwise_add_grad", "mul", - "dropout", "elementwise_add_grad", "dropout_grad", "mul_grad", - "sgd", "sgd", "sgd" - ]) + self.assertEqual( + [op.type for op in mean_out.block.ops], + [ + "mul", + "seed", + "dropout", + "elementwise_add", + "elementwise_add", + "mean", + "fill_constant", + "mean_grad", + "elementwise_add_grad", + "mul", + "dropout", + "elementwise_add_grad", + "dropout_grad", + "mul_grad", + "sgd", + "sgd", + "sgd", + ], + ) def test_dropout_with_seed(self): """ @@ -1044,16 +1157,16 @@ class TestRecomputeOptimizer(unittest.TestCase): def gen_data(): return { "x": np.random.random(size=(100, 3)).astype('float32'), - "y": np.random.randint(2, size=(100, 1)).astype('int64') + "y": np.random.randint(2, size=(100, 1)).astype('int64'), } def mlp(input_x, input_y): - drop_res = fluid.layers.dropout(input_x, - dropout_prob=0.5, - name="dropout_with_seed_cpu") - prediction = fluid.layers.fc(input=[drop_res], - size=2, - act='softmax') + drop_res = fluid.layers.dropout( + input_x, dropout_prob=0.5, name="dropout_with_seed_cpu" + ) + prediction = fluid.layers.fc( + input=[drop_res], size=2, act='softmax' + ) cost = fluid.layers.cross_entropy(input=prediction, label=input_y) sum_cost = fluid.layers.reduce_mean(cost) return drop_res, prediction, sum_cost @@ -1063,9 +1176,9 @@ class TestRecomputeOptimizer(unittest.TestCase): scope = fluid.Scope() with fluid.scope_guard(scope): with program_guard(main_program, startup_program): - input_x = fluid.layers.data(name="x", - shape=[3], - dtype='float32') + input_x = fluid.layers.data( + name="x", shape=[3], dtype='float32' + ) input_y = fluid.layers.data(name="y", shape=[1], dtype='int64') drop_res, prediction, cost = mlp(input_x, input_y) sgd = fluid.optimizer.Adam(learning_rate=0.01) @@ -1077,19 +1190,21 @@ class TestRecomputeOptimizer(unittest.TestCase): exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) feed_data = gen_data() - drop_vec = exe.run(feed=feed_data, - program=fluid.default_main_program(), - fetch_list=[ - "dropout_with_seed_cpu.tmp_1", - "dropout_with_seed_cpu.tmp_1.subprog_0" - ]) + drop_vec = exe.run( + feed=feed_data, + program=fluid.default_main_program(), + fetch_list=[ + "dropout_with_seed_cpu.tmp_1", + "dropout_with_seed_cpu.tmp_1.subprog_0", + ], + ) self.assertEqual(drop_vec[0].tolist(), drop_vec[1].tolist()) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestRecomputeOptimizerCUDA(unittest.TestCase): - def test_dropout_with_seed(self): """ when we recompute a dropout op, make sure that the recomputed one @@ -1099,16 +1214,16 @@ class TestRecomputeOptimizerCUDA(unittest.TestCase): def gen_data(): return { "x": np.random.random(size=(100, 3)).astype('float32'), - "y": np.random.randint(2, size=(100, 1)).astype('int64') + "y": np.random.randint(2, size=(100, 1)).astype('int64'), } def mlp(input_x, input_y): - drop_res = fluid.layers.dropout(input_x, - dropout_prob=0.5, - name="dropout_with_seed_gpu") - prediction = fluid.layers.fc(input=[drop_res], - size=2, - act='softmax') + drop_res = fluid.layers.dropout( + input_x, dropout_prob=0.5, name="dropout_with_seed_gpu" + ) + prediction = fluid.layers.fc( + input=[drop_res], size=2, act='softmax' + ) cost = fluid.layers.cross_entropy(input=prediction, label=input_y) sum_cost = fluid.layers.reduce_mean(cost) return drop_res, prediction, sum_cost @@ -1118,9 +1233,9 @@ class TestRecomputeOptimizerCUDA(unittest.TestCase): scope = fluid.Scope() with fluid.scope_guard(scope): with program_guard(main_program, startup_program): - input_x = fluid.layers.data(name="x", - shape=[3], - dtype='float32') + input_x = fluid.layers.data( + name="x", shape=[3], dtype='float32' + ) input_y = fluid.layers.data(name="y", shape=[1], dtype='int64') drop_res, prediction, cost = mlp(input_x, input_y) sgd = fluid.optimizer.Adam(learning_rate=0.01) @@ -1132,70 +1247,67 @@ class TestRecomputeOptimizerCUDA(unittest.TestCase): exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) feed_data = gen_data() - drop_vec = exe.run(feed=feed_data, - program=fluid.default_main_program(), - fetch_list=[ - "dropout_with_seed_gpu.tmp_1", - "dropout_with_seed_gpu.tmp_1.subprog_0" - ]) + drop_vec = exe.run( + feed=feed_data, + program=fluid.default_main_program(), + fetch_list=[ + "dropout_with_seed_gpu.tmp_1", + "dropout_with_seed_gpu.tmp_1.subprog_0", + ], + ) self.assertEqual(drop_vec[0].tolist(), drop_vec[1].tolist()) class TestGradientMergeOptimizer(unittest.TestCase): - def net(self): program = framework.Program() block = program.global_block() - mul_x = block.create_parameter(dtype="float32", - shape=[5, 10], - lod_level=0, - name="mul.x") - mul_y = block.create_var(dtype="float32", - shape=[10, 8], - lod_level=0, - name="mul.y") - mul_out = block.create_var(dtype="float32", - shape=[5, 8], - lod_level=0, - name="mul.out") - b1 = block.create_parameter(dtype="float32", - shape=[5, 8], - lod_level=0, - name="b1") - b1_out = block.create_var(dtype="float32", - shape=[5, 8], - lod_level=0, - name="b1_out") - mean_out = block.create_var(dtype="float32", - shape=[1], - lod_level=0, - name="mean.out") - block.append_op(type="mul", - inputs={ - "X": mul_x, - "Y": mul_y - }, - outputs={"Out": mul_out}, - attrs={"x_num_col_dims": 1}) - block.append_op(type="elementwise_add", - inputs={ - "X": mul_out, - "Y": b1 - }, - outputs={"Out": b1_out}) - block.append_op(type="mean", - inputs={"X": b1_out}, - outputs={"Out": mean_out}) + mul_x = block.create_parameter( + dtype="float32", shape=[5, 10], lod_level=0, name="mul.x" + ) + mul_y = block.create_var( + dtype="float32", shape=[10, 8], lod_level=0, name="mul.y" + ) + mul_out = block.create_var( + dtype="float32", shape=[5, 8], lod_level=0, name="mul.out" + ) + b1 = block.create_parameter( + dtype="float32", shape=[5, 8], lod_level=0, name="b1" + ) + b1_out = block.create_var( + dtype="float32", shape=[5, 8], lod_level=0, name="b1_out" + ) + mean_out = block.create_var( + dtype="float32", shape=[1], lod_level=0, name="mean.out" + ) + block.append_op( + type="mul", + inputs={"X": mul_x, "Y": mul_y}, + outputs={"Out": mul_out}, + attrs={"x_num_col_dims": 1}, + ) + block.append_op( + type="elementwise_add", + inputs={"X": mul_out, "Y": b1}, + outputs={"Out": b1_out}, + ) + block.append_op( + type="mean", inputs={"X": b1_out}, outputs={"Out": mean_out} + ) return mean_out - def test_program_desc(self, ): + def test_program_desc( + self, + ): cost = self.net() main_program = cost.block.program init_program = framework.Program() self.assertEqual(main_program.num_blocks, 1) self.assertEqual(len(cost.block.ops), 3) - self.assertEqual([op.type for op in cost.block.ops], - ["mul", "elementwise_add", "mean"]) + self.assertEqual( + [op.type for op in cost.block.ops], + ["mul", "elementwise_add", "mean"], + ) opt = optimizer.SGD(learning_rate=1.0) opt = optimizer.GradientMergeOptimizer(opt, k_steps=4) @@ -1222,13 +1334,15 @@ class TestGradientMergeOptimizer(unittest.TestCase): 'elementwise_add', 'elementwise_add', 'conditional_block', - ]) + ], + ) # optimize block self.assertEqual(len(main_program.block(1).ops), 6) self.assertEqual( [op.type for op in main_program.block(1).ops], - ['scale', 'scale', 'sgd', 'sgd', 'fill_constant', 'fill_constant']) + ['scale', 'scale', 'sgd', 'sgd', 'fill_constant', 'fill_constant'], + ) class TestOptimizerDtype(unittest.TestCase): @@ -1238,9 +1352,7 @@ class TestOptimizerDtype(unittest.TestCase): ''' def check_with_dtype(self, dtype): - class MyLayer(paddle.nn.Layer): - def __init__(self, dtype): super(MyLayer, self).__init__() self._w = self.create_parameter([2, 3], dtype=dtype) @@ -1287,12 +1399,14 @@ class TestMasterWeightSaveForFP16(unittest.TestCase): numpy.random.seed(100) class SimpleNet(paddle.nn.Layer): - def __init__(self, input_size, output_size): super(SimpleNet, self).__init__() - self.linears = paddle.nn.LayerList([ - paddle.nn.Linear(input_size, output_size) for i in range(1) - ]) + self.linears = paddle.nn.LayerList( + [ + paddle.nn.Linear(input_size, output_size) + for i in range(1) + ] + ) def forward(self, x): for i, l in enumerate(self.linears): @@ -1305,7 +1419,6 @@ class TestMasterWeightSaveForFP16(unittest.TestCase): nums_batch = 10 class RandomDataset(Dataset): - def __init__(self, num_samples): self.num_samples = num_samples @@ -1318,17 +1431,21 @@ class TestMasterWeightSaveForFP16(unittest.TestCase): return self.num_samples dataset = RandomDataset(nums_batch * batch_size) - loader = paddle.io.DataLoader(dataset, - batch_size=batch_size, - shuffle=False, - drop_last=True, - num_workers=0) + loader = paddle.io.DataLoader( + dataset, + batch_size=batch_size, + shuffle=False, + drop_last=True, + num_workers=0, + ) mse = paddle.nn.MSELoss() model = SimpleNet(input_size, output_size) # 定义模型 - optimizer = paddle.optimizer.Momentum(learning_rate=0.0001, - parameters=model.parameters(), - multi_precision=True) # 定义优化器 + optimizer = paddle.optimizer.Momentum( + learning_rate=0.0001, + parameters=model.parameters(), + multi_precision=True, + ) # 定义优化器 scaler = paddle.amp.GradScaler(init_loss_scaling=1024) model = paddle.amp.decorate(models=model, level='O2') @@ -1356,9 +1473,11 @@ class TestMasterWeightSaveForFP16(unittest.TestCase): if core.is_compiled_with_cuda(): with fluid.dygraph.guard(): out_use_state_dict = self.check_with_opt_state_dict( - use_save_load=True) + use_save_load=True + ) out_no_state_dict = self.check_with_opt_state_dict( - use_save_load=False) + use_save_load=False + ) np.testing.assert_array_equal(out_use_state_dict, out_no_state_dict) diff --git a/python/paddle/fluid/tests/unittests/test_optimizer_for_varbase.py b/python/paddle/fluid/tests/unittests/test_optimizer_for_varbase.py index 06cdc599f6d00d1be60c1bf200e5e346e72d697b..fd73c042cb2da748ef15f11f64fc63b1503ce776 100644 --- a/python/paddle/fluid/tests/unittests/test_optimizer_for_varbase.py +++ b/python/paddle/fluid/tests/unittests/test_optimizer_for_varbase.py @@ -21,7 +21,6 @@ from paddle.fluid.framework import _test_eager_guard, _in_legacy_dygraph class TestOptimizerForVarBase(unittest.TestCase): - def setUp(self): self.lr = 0.01 @@ -32,16 +31,16 @@ class TestOptimizerForVarBase(unittest.TestCase): z = x + y - opt = optimizer(learning_rate=self.lr, - parameters=[x], - weight_decay=0.01) + opt = optimizer( + learning_rate=self.lr, parameters=[x], weight_decay=0.01 + ) z.backward() opt.step() - np.testing.assert_allclose(x.numpy(), - np.full([2, 3], -self.lr), - rtol=1e-05) + np.testing.assert_allclose( + x.numpy(), np.full([2, 3], -self.lr), rtol=1e-05 + ) def run_optimizer_minimize_with_varbase_list_input(self, optimizer): x = paddle.zeros([2, 3]) @@ -55,9 +54,9 @@ class TestOptimizerForVarBase(unittest.TestCase): z.backward() opt.minimize(z) - np.testing.assert_allclose(x.numpy(), - np.full([2, 3], -self.lr), - rtol=1e-05) + np.testing.assert_allclose( + x.numpy(), np.full([2, 3], -self.lr), rtol=1e-05 + ) def func_test_adam_with_varbase_list_input(self): self.run_optimizer_step_with_varbase_list_input(optimizer.Adam) @@ -130,17 +129,20 @@ class TestOptimizerForVarBase(unittest.TestCase): shape=[5, 10], lod_level=0, name="x", - optimize_attr={'learning_rate': 1.0}) + optimize_attr={'learning_rate': 1.0}, + ) else: x = paddle.fluid.framework.EagerParamBase( dtype="float32", shape=[5, 10], lod_level=0, name="x", - optimize_attr={'learning_rate': 1.0}) + optimize_attr={'learning_rate': 1.0}, + ) x.value().get_tensor().set( np.random.random((5, 10)).astype('float32'), - paddle.fluid.framework._current_expected_place()) + paddle.fluid.framework._current_expected_place(), + ) y = paddle.ones([5, 10]) z = x + y @@ -160,17 +162,20 @@ class TestOptimizerForVarBase(unittest.TestCase): shape=[5, 10], lod_level=0, name="x", - optimize_attr={'learning_rate': 0.12}) + optimize_attr={'learning_rate': 0.12}, + ) else: x = paddle.fluid.framework.EagerParamBase( dtype="float32", shape=[5, 10], lod_level=0, name="x", - optimize_attr={'learning_rate': 0.12}) + optimize_attr={'learning_rate': 0.12}, + ) x.value().get_tensor().set( np.random.random((5, 10)).astype('float32'), - paddle.fluid.framework._current_expected_place()) + paddle.fluid.framework._current_expected_place(), + ) y = paddle.ones([5, 10]) z = x + y diff --git a/python/paddle/fluid/tests/unittests/test_optimizer_grad.py b/python/paddle/fluid/tests/unittests/test_optimizer_grad.py index a29fb920d788413446ccb16f17b898a3bf5578eb..87e39a1efb0c5aa658d7d83cab7b9e9975b55155 100644 --- a/python/paddle/fluid/tests/unittests/test_optimizer_grad.py +++ b/python/paddle/fluid/tests/unittests/test_optimizer_grad.py @@ -43,8 +43,8 @@ class SimpleNetWithCond(object): def _init_param(self): self.x = np.ones(self.shape).astype('float32') - self.y = np.ones(self.shape).astype('float32') * 2. - self.z = np.ones(self.shape).astype('float32') * 3. + self.y = np.ones(self.shape).astype('float32') * 2.0 + self.z = np.ones(self.shape).astype('float32') * 3.0 def _calc_gradient(self, cond_i): """ @@ -81,39 +81,42 @@ class SimpleNetWithCond(object): dtype="float32", shape=self.shape, attr=fluid.ParamAttr(learning_rate=self.param_lr, name="param_x"), - default_initializer=fluid.initializer.NumpyArrayInitializer(self.x)) + default_initializer=fluid.initializer.NumpyArrayInitializer(self.x), + ) param_y = fluid.layers.create_parameter( dtype="float32", shape=self.shape, attr=fluid.ParamAttr(learning_rate=self.param_lr, name="param_y"), - default_initializer=fluid.initializer.NumpyArrayInitializer(self.y)) + default_initializer=fluid.initializer.NumpyArrayInitializer(self.y), + ) param_z = fluid.layers.create_parameter( dtype="float32", shape=self.shape, attr=fluid.ParamAttr(learning_rate=self.param_lr, name="param_z"), - default_initializer=fluid.initializer.NumpyArrayInitializer(self.z)) + default_initializer=fluid.initializer.NumpyArrayInitializer(self.z), + ) sum_xy = fluid.layers.elementwise_add(param_x, param_y, name='sum_xy') sub_yz = fluid.layers.elementwise_sub(param_y, param_z, name='sub_yz') useless = fluid.layers.fc(param_x, size=1, name='fc_useless') def cond_true(): - cond_yz = fluid.layers.elementwise_add(param_y, - param_z, - name='sum_cond_yz') + cond_yz = fluid.layers.elementwise_add( + param_y, param_z, name='sum_cond_yz' + ) # param_y will not be updated param_y.stop_gradient = self.y_no_grad - cond_res = fluid.layers.elementwise_add(cond_yz, - param_z, - name='sum_cond_true') + cond_res = fluid.layers.elementwise_add( + cond_yz, param_z, name='sum_cond_true' + ) cond_useless = fluid.layers.elementwise_mul(param_x, param_y) return cond_res def cond_false(): - cond_res = fluid.layers.elementwise_add(param_y, - param_z, - name='sum_cond_false') + cond_res = fluid.layers.elementwise_add( + param_y, param_z, name='sum_cond_false' + ) cond_useless = fluid.layers.elementwise_mul(param_z, param_z) return cond_res @@ -123,18 +126,23 @@ class SimpleNetWithCond(object): mean_out = paddle.mean(sum_all) if use_bf16: import paddle.static.amp as amp + self.optimizer = amp.bf16.decorate_bf16( self.optimizer, amp_lists=amp.bf16.AutoMixedPrecisionListsBF16( - custom_fp32_list={'elementwise_add'}), + custom_fp32_list={'elementwise_add'} + ), use_bf16_guard=False, - use_pure_bf16=True) + use_pure_bf16=True, + ) self.optimizer.minimize(mean_out) - fetch_list = ["param_x", "param_z"] if self.y_no_grad else [ - "param_x", "param_y", "param_z" - ] + fetch_list = ( + ["param_x", "param_z"] + if self.y_no_grad + else ["param_x", "param_y", "param_z"] + ) fetch_list += [_append_grad_suffix_(param) for param in fetch_list] return fetch_list, self.optimizer @@ -200,8 +208,9 @@ class TestOptimizer(unittest.TestCase): for param_lr in self.param_lr: for cond_i in self.cond_i: for y_no_grad in self.y_no_grad: - self.attr[ - 'lr'] = param_lr * self.optimizer._learning_rate + self.attr['lr'] = ( + param_lr * self.optimizer._learning_rate + ) self._init_param_attr() main_program = fluid.Program() @@ -209,11 +218,15 @@ class TestOptimizer(unittest.TestCase): with fluid.program_guard(main_program, init_program): # reset optimizer._accumulators to avoid duplicate name in loop. self.optimizer._accumulators = defaultdict( - lambda: dict()) - test_net = self.NetClass(self.optimizer, param_lr, - y_no_grad) - fetch_list, decorated_optimizer = test_net.build_net( - cond_i, use_bf16) + lambda: dict() + ) + test_net = self.NetClass( + self.optimizer, param_lr, y_no_grad + ) + ( + fetch_list, + decorated_optimizer, + ) = test_net.build_net(cond_i, use_bf16) if use_bf16: self.optimizer = decorated_optimizer @@ -225,24 +238,28 @@ class TestOptimizer(unittest.TestCase): # Train 2 steps to check validity for batch_i in range(2): - res = exe.run(main_program, - fetch_list=fetch_list) + res = exe.run( + main_program, fetch_list=fetch_list + ) gt_grads = test_net._calc_gradient(cond_i) gt_params = self._apply_optimize( - test_net, gt_grads) + test_net, gt_grads + ) param_grads = gt_params + gt_grads for i in range(len(res)): np.testing.assert_allclose( - res[i], param_grads[i]) + res[i], param_grads[i] + ) -@unittest.skipIf(not fluid.core.supports_bfloat16(), - "place does not support BF16 evaluation") +@unittest.skipIf( + not fluid.core.supports_bfloat16(), "place does not support BF16 evaluation" +) class TestSGDOptimizer(TestOptimizer): - def test_optimizer_multiblock_except(self): - with self.assertRaisesRegexp(ValueError, - "var param_y not in this block"): + with self.assertRaisesRegexp( + ValueError, "var param_y not in this block" + ): self._check_grads(use_bf16=True) @@ -256,10 +273,9 @@ class TestAdamOptimizer(TestOptimizer): def setUp(self): self._init_config() beta1, beta2, epsilon = 0.9, 0.999, 1e-8 - self.optimizer = optimizer.AdamOptimizer(learning_rate=0.01, - beta1=beta1, - beta2=beta2, - epsilon=epsilon) + self.optimizer = optimizer.AdamOptimizer( + learning_rate=0.01, beta1=beta1, beta2=beta2, epsilon=epsilon + ) self.attr = { "beta1": beta1, "beta2": beta2, @@ -267,7 +283,7 @@ class TestAdamOptimizer(TestOptimizer): "beta2_pow": beta2, "moment1": np.zeros(SHAPE).astype("float32"), "moment2": np.zeros(SHAPE).astype("float32"), - "epsilon": epsilon + "epsilon": epsilon, } def _apply_gradient(self, param, grad, name): @@ -280,13 +296,14 @@ class TestAdamOptimizer(TestOptimizer): beta1_pow, beta2_pow = attr['beta1_pow'], attr['beta2_pow'] epsilon = attr['epsilon'] - moment1_out = beta1 * moment1 + (1. - beta1) * grad - moment2_out = beta2 * moment2 + (1. - beta2) * np.square(grad) + moment1_out = beta1 * moment1 + (1.0 - beta1) * grad + moment2_out = beta2 * moment2 + (1.0 - beta2) * np.square(grad) - lr = attr['lr'] * np.sqrt(1. - beta2_pow) / (1. - beta1_pow) + lr = attr['lr'] * np.sqrt(1.0 - beta2_pow) / (1.0 - beta1_pow) param_out = param - lr * ( - moment1_out / - (np.sqrt(moment2_out) + epsilon * np.sqrt(1 - beta2_pow))) + moment1_out + / (np.sqrt(moment2_out) + epsilon * np.sqrt(1 - beta2_pow)) + ) # update hyper-parameter of optimizer self.param_attr[name]['beta1_pow'] = beta1_pow * beta1 diff --git a/python/paddle/fluid/tests/unittests/test_optimizer_in_control_flow.py b/python/paddle/fluid/tests/unittests/test_optimizer_in_control_flow.py index 603483302843020b400eea6337ba3aeb7db49443..8bc2b796758823e5099d869b791ffe01b5942a4a 100644 --- a/python/paddle/fluid/tests/unittests/test_optimizer_in_control_flow.py +++ b/python/paddle/fluid/tests/unittests/test_optimizer_in_control_flow.py @@ -34,10 +34,9 @@ SEED = 2020 paddle.enable_static() -def static(train_data, - loss_in_switch=True, - use_cuda=False, - use_parallel_exe=False): +def static( + train_data, loss_in_switch=True, use_cuda=False, use_parallel_exe=False +): startup_program = Program() main_program = Program() startup_program.random_seed = SEED @@ -51,20 +50,26 @@ def static(train_data, size=FC_SIZE, act='relu', param_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(value=0.99)), + initializer=fluid.initializer.Constant(value=0.99) + ), bias_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(value=0.5)), - name="hidden") + initializer=fluid.initializer.Constant(value=0.5) + ), + name="hidden", + ) prediction = layers.fc( hidden, size=CLASS_NUM, act='softmax', param_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(value=1.2)), + initializer=fluid.initializer.Constant(value=1.2) + ), bias_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(value=0.8)), - name="prediction") + initializer=fluid.initializer.Constant(value=0.8) + ), + name="prediction", + ) return hidden, prediction def fn_1(opt, avg_loss=None, pred=None, label=None): @@ -76,8 +81,9 @@ def static(train_data, def fn_2(opt, avg_loss=None, pred=None, label=None): if avg_loss is None: - loss = layers.softmax_with_cross_entropy(logits=pred, - label=label) + loss = layers.softmax_with_cross_entropy( + logits=pred, label=label + ) avg_loss = paddle.mean(loss, name='mean_softmax_loss') opt.minimize(avg_loss) return avg_loss @@ -96,15 +102,19 @@ def static(train_data, if loss_in_switch: avg_loss = layers.case( [(mod_two, lambda: fn_1(adam, None, prediction, label))], - lambda: fn_2(sgd, None, prediction, label)) + lambda: fn_2(sgd, None, prediction, label), + ) else: loss_1 = layers.cross_entropy(input=prediction, label=label) avg_loss_1 = paddle.mean(loss_1) - loss_2 = layers.softmax_with_cross_entropy(logits=prediction, - label=label) + loss_2 = layers.softmax_with_cross_entropy( + logits=prediction, label=label + ) avg_loss_2 = paddle.mean(loss_2) - avg_loss = layers.case([(mod_two, lambda: fn_1(adam, avg_loss_1))], - lambda: fn_2(sgd, avg_loss_2)) + avg_loss = layers.case( + [(mod_two, lambda: fn_1(adam, avg_loss_1))], + lambda: fn_2(sgd, avg_loss_2), + ) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) @@ -116,7 +126,7 @@ def static(train_data, feed = { 'image': feed_image, 'label': feed_label, - 'id': np.array([epoch]).astype('int32') + 'id': np.array([epoch]).astype('int32'), } out = exe.run(main_program, feed=feed, fetch_list=fetch_list) out_hidden, out_pred, loss = out @@ -125,27 +135,31 @@ def static(train_data, class DygraphLayer(fluid.dygraph.Layer): - def __init__(self): super(DygraphLayer, self).__init__() self.fc_1 = fluid.dygraph.nn.Linear( INPUT_SIZE, FC_SIZE, act='relu', - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.99)), - bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.5)), + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.99) + ), + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.5) + ), ) self.fc_2 = fluid.dygraph.nn.Linear( FC_SIZE, CLASS_NUM, act='softmax', - param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=1.2)), - bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.8))) + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=1.2) + ), + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.8) + ), + ) def forward(self, inputs): hidden = self.fc_1(inputs) @@ -159,10 +173,12 @@ def dynamic(train_data, use_cuda=False, use_parallel_exe=False): fluid.default_startup_program().random_seed = SEED fluid.default_main_program().random_seed = SEED dy_layer = DygraphLayer() - adam = fluid.optimizer.Adam(learning_rate=LR, - parameter_list=dy_layer.parameters()) - sgd = fluid.optimizer.SGD(learning_rate=LR, - parameter_list=dy_layer.parameters()) + adam = fluid.optimizer.Adam( + learning_rate=LR, parameter_list=dy_layer.parameters() + ) + sgd = fluid.optimizer.SGD( + learning_rate=LR, parameter_list=dy_layer.parameters() + ) for epoch in range(EPOCH_NUM): image_data, label = train_data[epoch] @@ -177,7 +193,8 @@ def dynamic(train_data, use_cuda=False, use_parallel_exe=False): adam.minimize(loss) else: softmax_loss = layers.softmax_with_cross_entropy( - prediction, var_label) + prediction, var_label + ) loss = paddle.mean(softmax_loss) loss.backward() sgd.minimize(loss) @@ -192,16 +209,18 @@ class TestMultiTask(unittest.TestCase): Todo(liym27): add parallel GPU train. ''' - def random_input(self, - seed, - image_shape=[BATCH_SIZE, INPUT_SIZE], - label_shape=[BATCH_SIZE, 1]): + def random_input( + self, + seed, + image_shape=[BATCH_SIZE, INPUT_SIZE], + label_shape=[BATCH_SIZE, 1], + ): np.random.seed(seed) image_np = np.random.random(size=image_shape).astype('float32') np.random.seed(seed) - label_np = np.random.randint(low=0, - high=CLASS_NUM - 1, - size=label_shape).astype('int64') + label_np = np.random.randint( + low=0, high=CLASS_NUM - 1, size=label_shape + ).astype('int64') return image_np, label_np def init_train_data(self): @@ -214,15 +233,15 @@ class TestMultiTask(unittest.TestCase): use_cuda = core.is_compiled_with_cuda() hidden_2, pre_2, loss_2 = dynamic(self.train_data, use_cuda) for loss_in_switch in [True, False]: - hidden_1, pre_1, loss_1 = static(self.train_data, loss_in_switch, - use_cuda) + hidden_1, pre_1, loss_1 = static( + self.train_data, loss_in_switch, use_cuda + ) np.testing.assert_allclose(hidden_1, hidden_2, rtol=1e-05) np.testing.assert_allclose(pre_1, pre_2, rtol=1e-05) np.testing.assert_allclose(loss_1, loss_2, rtol=1e-05) class TestMultiOptimizersMultiCardsError(unittest.TestCase): - def test_error(self): startup_program = Program() main_program = Program() @@ -244,8 +263,10 @@ class TestMultiOptimizersMultiCardsError(unittest.TestCase): cond = layers.fill_constant([1], 'bool', True) - layers.case([(cond, lambda: fn_1(adam, avg_loss))], - lambda: fn_2(sgd, avg_loss)) + layers.case( + [(cond, lambda: fn_1(adam, avg_loss))], + lambda: fn_2(sgd, avg_loss), + ) cpu_place = fluid.CPUPlace() cuda_place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() @@ -263,17 +284,20 @@ class TestMultiOptimizersMultiCardsError(unittest.TestCase): # to use multi cards ** only on CPU ** not GPU to reduce CI time. os.environ['CPU_NUM'] = str(2) - pe_exe = fluid.ParallelExecutor(use_cuda=use_cuda, - main_program=main_program, - loss_name=avg_loss.name) + pe_exe = fluid.ParallelExecutor( + use_cuda=use_cuda, + main_program=main_program, + loss_name=avg_loss.name, + ) num_devices = pe_exe.device_count def not_implemented_error(): - pe_exe.run(feed={ - 'X': - np.random.random(size=[64, 10]).astype('float32'), - }, - fetch_list=[avg_loss.name]) + pe_exe.run( + feed={ + 'X': np.random.random(size=[64, 10]).astype('float32'), + }, + fetch_list=[avg_loss.name], + ) if num_devices > 1: self.assertRaises(NotImplementedError, not_implemented_error) diff --git a/python/paddle/fluid/tests/unittests/test_outer.py b/python/paddle/fluid/tests/unittests/test_outer.py index a887615f0f30658f68f069bcb7b57ee2dcaddfe8..5625618776c29d6301b3688cc6b32e1de280e1bb 100644 --- a/python/paddle/fluid/tests/unittests/test_outer.py +++ b/python/paddle/fluid/tests/unittests/test_outer.py @@ -22,27 +22,28 @@ from paddle.fluid.framework import _test_eager_guard class TestMultiplyApi(unittest.TestCase): - def _run_static_graph_case(self, x_data, y_data): with program_guard(Program(), Program()): paddle.enable_static() - x = paddle.static.data(name='x', - shape=x_data.shape, - dtype=x_data.dtype) - y = paddle.static.data(name='y', - shape=y_data.shape, - dtype=y_data.dtype) + x = paddle.static.data( + name='x', shape=x_data.shape, dtype=x_data.dtype + ) + y = paddle.static.data( + name='y', shape=y_data.shape, dtype=y_data.dtype + ) res = paddle.outer(x, y) - place = paddle.CUDAPlace( - 0) if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() + else paddle.CPUPlace() + ) exe = paddle.static.Executor(place) - outs = exe.run(paddle.static.default_main_program(), - feed={ - 'x': x_data, - 'y': y_data - }, - fetch_list=[res]) + outs = exe.run( + paddle.static.default_main_program(), + feed={'x': x_data, 'y': y_data}, + fetch_list=[res], + ) res = outs[0] return res @@ -94,17 +95,21 @@ class TestMultiplyApi(unittest.TestCase): # test dynamic computation graph: 2-d array Complex x_data = np.random.rand(20, 50).astype( - np.float64) + 1J * np.random.rand(20, 50).astype(np.float64) - y_data = np.random.rand(50).astype( - np.float64) + 1J * np.random.rand(50).astype(np.float64) + np.float64 + ) + 1j * np.random.rand(20, 50).astype(np.float64) + y_data = np.random.rand(50).astype(np.float64) + 1j * np.random.rand( + 50 + ).astype(np.float64) res = self._run_dynamic_graph_case(x_data, y_data) np.testing.assert_allclose(res, np.outer(x_data, y_data), rtol=1e-05) # test dynamic computation graph: 3-d array Complex x_data = np.random.rand(5, 10, 10).astype( - np.float64) + 1J * np.random.rand(5, 10, 10).astype(np.float64) - y_data = np.random.rand(2, 10).astype( - np.float64) + 1J * np.random.rand(2, 10).astype(np.float64) + np.float64 + ) + 1j * np.random.rand(5, 10, 10).astype(np.float64) + y_data = np.random.rand(2, 10).astype(np.float64) + 1j * np.random.rand( + 2, 10 + ).astype(np.float64) res = self._run_dynamic_graph_case(x_data, y_data) np.testing.assert_allclose(res, np.outer(x_data, y_data), rtol=1e-05) @@ -115,7 +120,6 @@ class TestMultiplyApi(unittest.TestCase): class TestMultiplyError(unittest.TestCase): - def func_test_errors(self): # test static computation graph: dtype can not be int8 paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_overlap_add_op.py b/python/paddle/fluid/tests/unittests/test_overlap_add_op.py index f3815ba496566b85df5b9b5833901dc499911a95..430fde53aceed212039a2f49e6bd64fa8b22d05b 100644 --- a/python/paddle/fluid/tests/unittests/test_overlap_add_op.py +++ b/python/paddle/fluid/tests/unittests/test_overlap_add_op.py @@ -33,8 +33,9 @@ def overlap_add(x, hop_length, axis=-1): frame_length = x.shape[1] if axis == 0 else x.shape[-2] # Assure no gaps between frames. - assert 0 < hop_length <= frame_length, \ - f'hop_length should be in (0, frame_length({frame_length})], but got {hop_length}.' + assert ( + 0 < hop_length <= frame_length + ), f'hop_length should be in (0, frame_length({frame_length})], but got {hop_length}.' seq_length = (n_frames - 1) * hop_length + frame_length @@ -55,7 +56,7 @@ def overlap_add(x, hop_length, axis=-1): for i in range(x.shape[0]): for frame in range(x.shape[-1]): sample = frame * hop_length - y[i, sample:sample + frame_length] += x[i, :, frame] + y[i, sample : sample + frame_length] += x[i, :, frame] if axis == 0: y = y.transpose((1, 0)) @@ -70,7 +71,6 @@ def overlap_add(x, hop_length, axis=-1): class TestOverlapAddOp(OpTest): - def setUp(self): self.op_type = "overlap_add" self.python_api = paddle.signal.overlap_add @@ -101,7 +101,6 @@ class TestOverlapAddOp(OpTest): class TestCase1(TestOverlapAddOp): - def initTestCase(self): input_shape = (3, 50) input_type = 'float64' @@ -113,7 +112,6 @@ class TestCase1(TestOverlapAddOp): class TestCase2(TestOverlapAddOp): - def initTestCase(self): input_shape = (2, 40, 5) input_type = 'float64' @@ -125,7 +123,6 @@ class TestCase2(TestOverlapAddOp): class TestCase3(TestOverlapAddOp): - def initTestCase(self): input_shape = (5, 40, 2) input_type = 'float64' @@ -137,7 +134,6 @@ class TestCase3(TestOverlapAddOp): class TestCase4(TestOverlapAddOp): - def initTestCase(self): input_shape = (3, 5, 12, 8) input_type = 'float64' @@ -149,7 +145,6 @@ class TestCase4(TestOverlapAddOp): class TestCase5(TestOverlapAddOp): - def initTestCase(self): input_shape = (8, 12, 5, 3) input_type = 'float64' diff --git a/python/paddle/fluid/tests/unittests/test_pad2d_op.py b/python/paddle/fluid/tests/unittests/test_pad2d_op.py index 0f43ddbd8fc82f0263cc971af367f5ac1efd9b34..aeca9becf84b1f1b9ecdad9a9eedee9a68f30d3c 100644 --- a/python/paddle/fluid/tests/unittests/test_pad2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pad2d_op.py @@ -20,7 +20,6 @@ from paddle.fluid import Program, program_guard class TestPad2dOp(OpTest): - def setUp(self): self.pad_value = 0.0 self.variable_paddings = False @@ -30,25 +29,37 @@ class TestPad2dOp(OpTest): self.attrs = {} if self.variable_paddings: self.attrs['paddings'] = [] - self.inputs['Paddings'] = np.array( - self.paddings).flatten().astype("int32") + self.inputs['Paddings'] = ( + np.array(self.paddings).flatten().astype("int32") + ) else: - self.attrs['paddings'] = np.array( - self.paddings).flatten().astype("int32") + self.attrs['paddings'] = ( + np.array(self.paddings).flatten().astype("int32") + ) self.attrs['pad_value'] = self.pad_value self.attrs['mode'] = self.mode self.attrs['data_format'] = self.data_format if self.data_format == "NCHW": - paddings = [(0, 0), (0, 0), (self.paddings[0], self.paddings[1]), - (self.paddings[2], self.paddings[3])] + paddings = [ + (0, 0), + (0, 0), + (self.paddings[0], self.paddings[1]), + (self.paddings[2], self.paddings[3]), + ] else: - paddings = [(0, 0), (self.paddings[0], self.paddings[1]), - (self.paddings[2], self.paddings[3]), (0, 0)] + paddings = [ + (0, 0), + (self.paddings[0], self.paddings[1]), + (self.paddings[2], self.paddings[3]), + (0, 0), + ] if self.mode == "constant": - out = np.pad(self.inputs['X'], - paddings, - mode=self.mode, - constant_values=self.pad_value) + out = np.pad( + self.inputs['X'], + paddings, + mode=self.mode, + constant_values=self.pad_value, + ) else: out = np.pad(self.inputs['X'], paddings, mode=self.mode) self.outputs = {'Out': out} @@ -68,7 +79,6 @@ class TestPad2dOp(OpTest): class TestCase1(TestPad2dOp): - def initTestCase(self): self.shape = (2, 3, 4, 5) self.paddings = [0, 1, 2, 3] @@ -77,7 +87,6 @@ class TestCase1(TestPad2dOp): class TestCase2(TestPad2dOp): - def initTestCase(self): self.shape = (2, 3, 4, 5) self.paddings = [0, 1, 2, 3] @@ -86,7 +95,6 @@ class TestCase2(TestPad2dOp): class TestCase3(TestPad2dOp): - def initTestCase(self): self.shape = (2, 4, 4, 4) self.paddings = [0, 1, 2, 3] @@ -95,7 +103,6 @@ class TestCase3(TestPad2dOp): class TestCase4(TestPad2dOp): - def initTestCase(self): self.shape = (2, 4, 4, 4) self.paddings = [0, 1, 2, 3] @@ -104,7 +111,6 @@ class TestCase4(TestPad2dOp): class TestCase5(TestPad2dOp): - def initTestCase(self): self.shape = (2, 4, 4, 4) self.paddings = [0, 1, 2, 3] @@ -114,7 +120,6 @@ class TestCase5(TestPad2dOp): class TestCase6(TestPad2dOp): - def initTestCase(self): self.shape = (2, 4, 4, 4) self.paddings = [0, 1, 2, 3] @@ -125,7 +130,6 @@ class TestCase6(TestPad2dOp): class TestCase7(TestPad2dOp): - def initTestCase(self): self.shape = (2, 3, 4, 5) self.paddings = [0, 1, 2, 3] @@ -135,7 +139,6 @@ class TestCase7(TestPad2dOp): class TestPad2dOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): input_data = np.random.random((2, 2, 2, 2)).astype("float32") @@ -145,9 +148,9 @@ class TestPad2dOpError(unittest.TestCase): self.assertRaises(TypeError, test_Variable) - data = fluid.data(name='data', - shape=[None, 3, 20, 20], - dtype='float16') + data = fluid.data( + name='data', shape=[None, 3, 20, 20], dtype='float16' + ) fluid.layers.pad2d(input=data, paddings=[1, 1, 1, 1]) diff --git a/python/paddle/fluid/tests/unittests/test_pad3d_op.py b/python/paddle/fluid/tests/unittests/test_pad3d_op.py index c1c0aa7defb46214008633f7c7789a8398413e71..ee4a9aed6707390df4132c5c64ee22cf26f76136 100644 --- a/python/paddle/fluid/tests/unittests/test_pad3d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pad3d_op.py @@ -24,7 +24,6 @@ from paddle.fluid import Program, program_guard, Executor, default_main_program class TestPad3dOp(OpTest): - def setUp(self): paddle.enable_static() self.value = 0.0 @@ -35,11 +34,13 @@ class TestPad3dOp(OpTest): self.attrs = {} if self.variable_paddings: self.attrs['paddings'] = [] - self.inputs['Paddings'] = np.array( - self.paddings).flatten().astype("int32") + self.inputs['Paddings'] = ( + np.array(self.paddings).flatten().astype("int32") + ) else: - self.attrs['paddings'] = np.array( - self.paddings).flatten().astype("int32") + self.attrs['paddings'] = ( + np.array(self.paddings).flatten().astype("int32") + ) self.attrs['value'] = self.value self.attrs['mode'] = self.mode self.attrs['data_format'] = self.data_format @@ -60,10 +61,12 @@ class TestPad3dOp(OpTest): (0, 0), ] if self.mode == "constant": - out = np.pad(self.inputs['X'], - paddings, - mode=self.mode, - constant_values=self.value) + out = np.pad( + self.inputs['X'], + paddings, + mode=self.mode, + constant_values=self.value, + ) elif self.mode == "reflect": out = np.pad(self.inputs['X'], paddings, mode=self.mode) elif self.mode == "replicate": @@ -88,7 +91,6 @@ class TestPad3dOp(OpTest): class TestCase1(TestPad3dOp): - def initTestCase(self): self.shape = (2, 3, 4, 5, 6) self.paddings = [0, 1, 2, 3, 4, 5] @@ -99,7 +101,6 @@ class TestCase1(TestPad3dOp): class TestCase2(TestPad3dOp): - def initTestCase(self): self.shape = (2, 3, 4, 5, 6) self.paddings = [1, 1, 1, 1, 1, 1] @@ -110,7 +111,6 @@ class TestCase2(TestPad3dOp): class TestCase3(TestPad3dOp): - def initTestCase(self): self.shape = (2, 3, 4, 5, 6) self.paddings = [0, 1, 1, 0, 2, 3] @@ -120,7 +120,6 @@ class TestCase3(TestPad3dOp): class TestCase4(TestPad3dOp): - def initTestCase(self): self.shape = (4, 4, 4, 4, 4) self.paddings = [0, 1, 2, 1, 2, 3] @@ -130,7 +129,6 @@ class TestCase4(TestPad3dOp): class TestCase5(TestPad3dOp): - def initTestCase(self): self.shape = (2, 3, 4, 5, 6) self.paddings = [0, 1, 2, 3, 2, 1] @@ -140,7 +138,6 @@ class TestCase5(TestPad3dOp): class TestCase6(TestPad3dOp): - def initTestCase(self): self.shape = (4, 4, 4, 4, 4) self.paddings = [5, 4, 2, 1, 2, 3] @@ -150,7 +147,6 @@ class TestCase6(TestPad3dOp): class TestCase7(TestPad3dOp): - def initTestCase(self): self.shape = (2, 3, 4, 5, 6) self.paddings = [0, 1, 2, 3, 2, 1] @@ -160,7 +156,6 @@ class TestCase7(TestPad3dOp): class TestCase8(TestPad3dOp): - def initTestCase(self): self.shape = (4, 4, 4, 4, 4) self.paddings = [0, 1, 2, 1, 2, 3] @@ -170,7 +165,6 @@ class TestCase8(TestPad3dOp): class TestCase9(TestPad3dOp): - def initTestCase(self): self.shape = (2, 3, 4, 5, 6) self.paddings = [0, 1, 2, 3, 4, 5] @@ -181,7 +175,6 @@ class TestCase9(TestPad3dOp): class TestCase10(TestPad3dOp): - def initTestCase(self): self.shape = (2, 3, 4, 5, 6) self.paddings = [0, 1, 2, 3, 4, 5] @@ -192,7 +185,6 @@ class TestCase10(TestPad3dOp): class TestPadAPI(unittest.TestCase): - def setUp(self): self.places = [paddle.CPUPlace()] if core.is_compiled_with_cuda(): @@ -207,15 +199,15 @@ class TestPadAPI(unittest.TestCase): value = 100 input_data = np.random.rand(*input_shape).astype(np.float32) x = paddle.fluid.data(name="x", shape=input_shape) - result = F.pad(x=x, - pad=pad, - value=value, - mode=mode, - data_format="NCDHW") + result = F.pad( + x=x, pad=pad, value=value, mode=mode, data_format="NCDHW" + ) exe = Executor(place) - fetches = exe.run(default_main_program(), - feed={"x": input_data}, - fetch_list=[result]) + fetches = exe.run( + default_main_program(), + feed={"x": input_data}, + fetch_list=[result], + ) np_out = self._get_numpy_out(input_data, pad, mode, value) np.testing.assert_allclose(fetches[0], np_out, rtol=1e-05) @@ -231,18 +223,18 @@ class TestPadAPI(unittest.TestCase): result1 = F.pad(x=x, pad=pad, mode=mode, data_format="NCDHW") result2 = F.pad(x=x, pad=pad, mode=mode, data_format="NDHWC") exe = Executor(place) - fetches = exe.run(default_main_program(), - feed={"x": input_data}, - fetch_list=[result1, result2]) - - np_out1 = self._get_numpy_out(input_data, - pad, - mode, - data_format="NCDHW") - np_out2 = self._get_numpy_out(input_data, - pad, - mode, - data_format="NDHWC") + fetches = exe.run( + default_main_program(), + feed={"x": input_data}, + fetch_list=[result1, result2], + ) + + np_out1 = self._get_numpy_out( + input_data, pad, mode, data_format="NCDHW" + ) + np_out2 = self._get_numpy_out( + input_data, pad, mode, data_format="NDHWC" + ) np.testing.assert_allclose(fetches[0], np_out1, rtol=1e-05) np.testing.assert_allclose(fetches[1], np_out2, rtol=1e-05) @@ -257,18 +249,18 @@ class TestPadAPI(unittest.TestCase): result1 = F.pad(x=x, pad=pad, mode=mode, data_format="NCDHW") result2 = F.pad(x=x, pad=pad, mode=mode, data_format="NDHWC") exe = Executor(place) - fetches = exe.run(default_main_program(), - feed={"x": input_data}, - fetch_list=[result1, result2]) - - np_out1 = self._get_numpy_out(input_data, - pad, - mode, - data_format="NCDHW") - np_out2 = self._get_numpy_out(input_data, - pad, - mode, - data_format="NDHWC") + fetches = exe.run( + default_main_program(), + feed={"x": input_data}, + fetch_list=[result1, result2], + ) + + np_out1 = self._get_numpy_out( + input_data, pad, mode, data_format="NCDHW" + ) + np_out2 = self._get_numpy_out( + input_data, pad, mode, data_format="NDHWC" + ) np.testing.assert_allclose(fetches[0], np_out1, rtol=1e-05) np.testing.assert_allclose(fetches[1], np_out2, rtol=1e-05) @@ -283,27 +275,24 @@ class TestPadAPI(unittest.TestCase): result1 = F.pad(x=x, pad=pad, mode=mode, data_format="NCDHW") result2 = F.pad(x=x, pad=pad, mode=mode, data_format="NDHWC") exe = Executor(place) - fetches = exe.run(default_main_program(), - feed={"x": input_data}, - fetch_list=[result1, result2]) - - np_out1 = self._get_numpy_out(input_data, - pad, - mode, - data_format="NCDHW") - np_out2 = self._get_numpy_out(input_data, - pad, - mode, - data_format="NDHWC") + fetches = exe.run( + default_main_program(), + feed={"x": input_data}, + fetch_list=[result1, result2], + ) + + np_out1 = self._get_numpy_out( + input_data, pad, mode, data_format="NCDHW" + ) + np_out2 = self._get_numpy_out( + input_data, pad, mode, data_format="NDHWC" + ) np.testing.assert_allclose(fetches[0], np_out1, rtol=1e-05) np.testing.assert_allclose(fetches[1], np_out2, rtol=1e-05) - def _get_numpy_out(self, - input_data, - pad, - mode, - value=0, - data_format="NCDHW"): + def _get_numpy_out( + self, input_data, pad, mode, value=0, data_format="NCDHW" + ): if mode == "constant" and len(pad) == len(input_data.shape) * 2: pad = np.reshape(pad, (-1, 2)).tolist() elif data_format == "NCDHW": @@ -375,38 +364,26 @@ class TestPadAPI(unittest.TestCase): mode = "constant" value = 100 input_data = np.random.rand(*input_shape).astype(np.float32) - np_out1 = self._get_numpy_out(input_data, - pad, - mode, - value, - data_format="NCDHW") - np_out2 = self._get_numpy_out(input_data, - pad, - mode, - value, - data_format="NDHWC") - np_out3 = self._get_numpy_out(input_data, - pad_3, - mode, - value, - data_format="NCDHW") + np_out1 = self._get_numpy_out( + input_data, pad, mode, value, data_format="NCDHW" + ) + np_out2 = self._get_numpy_out( + input_data, pad, mode, value, data_format="NDHWC" + ) + np_out3 = self._get_numpy_out( + input_data, pad_3, mode, value, data_format="NCDHW" + ) tensor_data = paddle.to_tensor(input_data) - y1 = F.pad(tensor_data, - pad=pad, - mode=mode, - value=value, - data_format="NCDHW") - y2 = F.pad(tensor_data, - pad=pad, - mode=mode, - value=value, - data_format="NDHWC") - y3 = F.pad(tensor_data, - pad=pad_3, - mode=mode, - value=value, - data_format="NCDHW") + y1 = F.pad( + tensor_data, pad=pad, mode=mode, value=value, data_format="NCDHW" + ) + y2 = F.pad( + tensor_data, pad=pad, mode=mode, value=value, data_format="NDHWC" + ) + y3 = F.pad( + tensor_data, pad=pad_3, mode=mode, value=value, data_format="NCDHW" + ) np.testing.assert_allclose(y1.numpy(), np_out1, rtol=1e-05) np.testing.assert_allclose(y2.numpy(), np_out2, rtol=1e-05) @@ -420,40 +397,36 @@ class TestPadAPI(unittest.TestCase): mode = "constant" value = 100 input_data = np.random.rand(*input_shape).astype(np.float32) - np_out1 = self._get_numpy_out(input_data, - pad, - mode, - value, - data_format="NCHW") - np_out2 = self._get_numpy_out(input_data, - pad, - mode, - value, - data_format="NHWC") - np_out3 = self._get_numpy_out(input_data, - pad_3, - mode, - value, - data_format="NCHW") + np_out1 = self._get_numpy_out( + input_data, pad, mode, value, data_format="NCHW" + ) + np_out2 = self._get_numpy_out( + input_data, pad, mode, value, data_format="NHWC" + ) + np_out3 = self._get_numpy_out( + input_data, pad_3, mode, value, data_format="NCHW" + ) tensor_data = paddle.to_tensor(input_data) tensor_pad = paddle.to_tensor(pad, dtype="int32") - y1 = F.pad(tensor_data, - pad=tensor_pad, - mode=mode, - value=value, - data_format="NCHW") - y2 = F.pad(tensor_data, - pad=tensor_pad, - mode=mode, - value=value, - data_format="NHWC") - y3 = F.pad(tensor_data, - pad=pad_3, - mode=mode, - value=value, - data_format="NCHW") + y1 = F.pad( + tensor_data, + pad=tensor_pad, + mode=mode, + value=value, + data_format="NCHW", + ) + y2 = F.pad( + tensor_data, + pad=tensor_pad, + mode=mode, + value=value, + data_format="NHWC", + ) + y3 = F.pad( + tensor_data, pad=pad_3, mode=mode, value=value, data_format="NCHW" + ) np.testing.assert_allclose(y1.numpy(), np_out1, rtol=1e-05) np.testing.assert_allclose(y2.numpy(), np_out2, rtol=1e-05) @@ -467,39 +440,35 @@ class TestPadAPI(unittest.TestCase): mode = "constant" value = 100 input_data = np.random.rand(*input_shape).astype(np.float32) - np_out1 = self._get_numpy_out(input_data, - pad, - mode, - value, - data_format="NCL") - np_out2 = self._get_numpy_out(input_data, - pad, - mode, - value, - data_format="NLC") - np_out3 = self._get_numpy_out(input_data, - pad_3, - mode, - value, - data_format="NCL") + np_out1 = self._get_numpy_out( + input_data, pad, mode, value, data_format="NCL" + ) + np_out2 = self._get_numpy_out( + input_data, pad, mode, value, data_format="NLC" + ) + np_out3 = self._get_numpy_out( + input_data, pad_3, mode, value, data_format="NCL" + ) tensor_data = paddle.to_tensor(input_data) tensor_pad = paddle.to_tensor(pad, dtype="int32") - y1 = F.pad(tensor_data, - pad=tensor_pad, - mode=mode, - value=value, - data_format="NCL") - y2 = F.pad(tensor_data, - pad=tensor_pad, - mode=mode, - value=value, - data_format="NLC") - y3 = F.pad(tensor_data, - pad=pad_3, - mode=mode, - value=value, - data_format="NCL") + y1 = F.pad( + tensor_data, + pad=tensor_pad, + mode=mode, + value=value, + data_format="NCL", + ) + y2 = F.pad( + tensor_data, + pad=tensor_pad, + mode=mode, + value=value, + data_format="NLC", + ) + y3 = F.pad( + tensor_data, pad=pad_3, mode=mode, value=value, data_format="NCL" + ) np.testing.assert_allclose(y1.numpy(), np_out1, rtol=1e-05) np.testing.assert_allclose(y2.numpy(), np_out2, rtol=1e-05) @@ -507,13 +476,9 @@ class TestPadAPI(unittest.TestCase): class TestPad1dAPI(unittest.TestCase): - - def _get_numpy_out(self, - input_data, - pad, - mode, - value=0.0, - data_format="NCL"): + def _get_numpy_out( + self, input_data, pad, mode, value=0.0, data_format="NCL" + ): if data_format == "NCL": pad = [ (0, 0), @@ -555,59 +520,52 @@ class TestPad1dAPI(unittest.TestCase): pad_reflection = nn.Pad1D(padding=pad, mode="reflect") pad_replication = nn.Pad1D(padding=pad, mode="replicate") pad_constant = nn.Pad1D(padding=pad, mode="constant", value=value) - pad_constant_int = nn.Pad1D(padding=pad_int, - mode="constant", - value=value) + pad_constant_int = nn.Pad1D( + padding=pad_int, mode="constant", value=value + ) pad_circular = nn.Pad1D(padding=pad, mode="circular") data = paddle.to_tensor(input_data) output = pad_reflection(data) - np_out = self._get_numpy_out(input_data, - pad, - "reflect", - data_format="NCL") + np_out = self._get_numpy_out( + input_data, pad, "reflect", data_format="NCL" + ) np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) output = pad_replication(data) - np_out = self._get_numpy_out(input_data, - pad, - "replicate", - data_format="NCL") + np_out = self._get_numpy_out( + input_data, pad, "replicate", data_format="NCL" + ) np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) output = pad_constant(data) - np_out = self._get_numpy_out(input_data, - pad, - "constant", - value=value, - data_format="NCL") + np_out = self._get_numpy_out( + input_data, pad, "constant", value=value, data_format="NCL" + ) np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) output = pad_constant_int(data) - np_out = self._get_numpy_out(input_data, [pad_int] * 2, - "constant", - value=value, - data_format="NCL") + np_out = self._get_numpy_out( + input_data, + [pad_int] * 2, + "constant", + value=value, + data_format="NCL", + ) np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) output = pad_circular(data) - np_out = self._get_numpy_out(input_data, - pad, - "circular", - value=value, - data_format="NCL") + np_out = self._get_numpy_out( + input_data, pad, "circular", value=value, data_format="NCL" + ) np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) class TestPad2dAPI(unittest.TestCase): - - def _get_numpy_out(self, - input_data, - pad, - mode, - value=0.0, - data_format="NCHW"): + def _get_numpy_out( + self, input_data, pad, mode, value=0.0, data_format="NCHW" + ): if data_format == "NCHW": pad = [ (0, 0), @@ -651,58 +609,52 @@ class TestPad2dAPI(unittest.TestCase): pad_reflection = nn.Pad2D(padding=pad, mode="reflect") pad_replication = nn.Pad2D(padding=pad, mode="replicate") pad_constant = nn.Pad2D(padding=pad, mode="constant", value=value) - pad_constant_int = nn.Pad2D(padding=pad_int, - mode="constant", - value=value) + pad_constant_int = nn.Pad2D( + padding=pad_int, mode="constant", value=value + ) pad_circular = nn.Pad2D(padding=pad, mode="circular") data = paddle.to_tensor(input_data) output = pad_reflection(data) - np_out = self._get_numpy_out(input_data, - pad, - "reflect", - data_format="NCHW") + np_out = self._get_numpy_out( + input_data, pad, "reflect", data_format="NCHW" + ) np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) output = pad_replication(data) - np_out = self._get_numpy_out(input_data, - pad, - "replicate", - data_format="NCHW") + np_out = self._get_numpy_out( + input_data, pad, "replicate", data_format="NCHW" + ) np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) output = pad_constant(data) - np_out = self._get_numpy_out(input_data, - pad, - "constant", - value=value, - data_format="NCHW") + np_out = self._get_numpy_out( + input_data, pad, "constant", value=value, data_format="NCHW" + ) np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) output = pad_constant_int(data) - np_out = self._get_numpy_out(input_data, [pad_int] * 4, - "constant", - value=value, - data_format="NCHW") + np_out = self._get_numpy_out( + input_data, + [pad_int] * 4, + "constant", + value=value, + data_format="NCHW", + ) np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) output = pad_circular(data) - np_out = self._get_numpy_out(input_data, - pad, - "circular", - data_format="NCHW") + np_out = self._get_numpy_out( + input_data, pad, "circular", data_format="NCHW" + ) np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) class TestPad3dAPI(unittest.TestCase): - - def _get_numpy_out(self, - input_data, - pad, - mode, - value=0.0, - data_format="NCDHW"): + def _get_numpy_out( + self, input_data, pad, mode, value=0.0, data_format="NCDHW" + ): if data_format == "NCDHW": pad = [ (0, 0), @@ -748,47 +700,45 @@ class TestPad3dAPI(unittest.TestCase): pad_reflection = nn.Pad3D(padding=pad, mode="reflect") pad_replication = nn.Pad3D(padding=pad, mode="replicate") pad_constant = nn.Pad3D(padding=pad, mode="constant", value=value) - pad_constant_int = nn.Pad3D(padding=pad_int, - mode="constant", - value=value) + pad_constant_int = nn.Pad3D( + padding=pad_int, mode="constant", value=value + ) pad_circular = nn.Pad3D(padding=pad, mode="circular") data = paddle.to_tensor(input_data) output = pad_reflection(data) - np_out = self._get_numpy_out(input_data, - pad, - "reflect", - data_format="NCDHW") + np_out = self._get_numpy_out( + input_data, pad, "reflect", data_format="NCDHW" + ) np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) output = pad_replication(data) - np_out = self._get_numpy_out(input_data, - pad, - "replicate", - data_format="NCDHW") + np_out = self._get_numpy_out( + input_data, pad, "replicate", data_format="NCDHW" + ) np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) output = pad_constant(data) - np_out = self._get_numpy_out(input_data, - pad, - "constant", - value=value, - data_format="NCDHW") + np_out = self._get_numpy_out( + input_data, pad, "constant", value=value, data_format="NCDHW" + ) np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) output = pad_constant_int(data) - np_out = self._get_numpy_out(input_data, [pad_int] * 6, - "constant", - value=value, - data_format="NCDHW") + np_out = self._get_numpy_out( + input_data, + [pad_int] * 6, + "constant", + value=value, + data_format="NCDHW", + ) np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) output = pad_circular(data) - np_out = self._get_numpy_out(input_data, - pad, - "circular", - data_format="NCDHW") + np_out = self._get_numpy_out( + input_data, pad, "circular", data_format="NCDHW" + ) np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) def test_pad_tensor(self): @@ -799,38 +749,34 @@ class TestPad3dAPI(unittest.TestCase): pad_tensor = paddle.to_tensor(pad) input_data = np.random.rand(*input_shape).astype(np.float32) - pad_reflection_ncdhw = nn.Pad3D(padding=pad_tensor, - mode="reflect", - data_format="NCDHW") - pad_reflection_ndhwc = nn.Pad3D(padding=pad_tensor, - mode="reflect", - data_format="NDHWC") + pad_reflection_ncdhw = nn.Pad3D( + padding=pad_tensor, mode="reflect", data_format="NCDHW" + ) + pad_reflection_ndhwc = nn.Pad3D( + padding=pad_tensor, mode="reflect", data_format="NDHWC" + ) data = paddle.to_tensor(input_data) output = pad_reflection_ncdhw(data) - np_out = self._get_numpy_out(input_data, - pad, - "reflect", - data_format="NCDHW") + np_out = self._get_numpy_out( + input_data, pad, "reflect", data_format="NCDHW" + ) np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) output = pad_reflection_ndhwc(data) - np_out = self._get_numpy_out(input_data, - pad, - "reflect", - data_format="NDHWC") + np_out = self._get_numpy_out( + input_data, pad, "reflect", data_format="NDHWC" + ) np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) class TestPad3dOpError(unittest.TestCase): - def setUp(self): self.places = [paddle.CPUPlace()] if core.is_compiled_with_cuda(): self.places.append(paddle.CUDAPlace(0)) def test_errors(self): - def test_variable(): input_shape = (1, 2, 3, 4, 5) data = np.random.rand(*input_shape).astype(np.float32) @@ -840,49 +786,53 @@ class TestPad3dOpError(unittest.TestCase): input_shape = (1, 2, 3, 4, 5) data = np.random.rand(*input_shape).astype(np.float32) x = paddle.to_tensor(data) - y = F.pad(x, - pad=[5, 6, 1, 1, 1, 1], - value=1, - mode='reflect', - data_format="NCDHW") + y = F.pad( + x, + pad=[5, 6, 1, 1, 1, 1], + value=1, + mode='reflect', + data_format="NCDHW", + ) def test_reflect_2(): input_shape = (1, 2, 3, 4, 5) data = np.random.rand(*input_shape).astype(np.float32) x = paddle.to_tensor(data) - y = F.pad(x, - pad=[1, 1, 4, 3, 1, 1], - value=1, - mode='reflect', - data_format="NCDHW") + y = F.pad( + x, + pad=[1, 1, 4, 3, 1, 1], + value=1, + mode='reflect', + data_format="NCDHW", + ) def test_reflect_3(): input_shape = (1, 2, 3, 4, 5) data = np.random.rand(*input_shape).astype(np.float32) x = paddle.to_tensor(data) - y = F.pad(x, - pad=[1, 1, 1, 1, 2, 3], - value=1, - mode='reflect', - data_format="NCDHW") + y = F.pad( + x, + pad=[1, 1, 1, 1, 2, 3], + value=1, + mode='reflect', + data_format="NCDHW", + ) def test_circular_1(): input_shape = (1, 2, 0, 4, 5) data = np.random.rand(*input_shape).astype(np.float32) x = paddle.to_tensor(data) - y = F.pad(x, - pad=[1, 1, 1, 1, 2, 3], - mode='circular', - data_format="NCDHW") + y = F.pad( + x, pad=[1, 1, 1, 1, 2, 3], mode='circular', data_format="NCDHW" + ) def test_replicate_1(): input_shape = (1, 2, 0, 4, 5) data = np.random.rand(*input_shape).astype(np.float32) x = paddle.to_tensor(data) - y = F.pad(x, - pad=[1, 1, 1, 1, 2, 3], - mode='replicate', - data_format="NCDHW") + y = F.pad( + x, pad=[1, 1, 1, 1, 2, 3], mode='replicate', data_format="NCDHW" + ) paddle.disable_static() for place in self.places: @@ -896,14 +846,16 @@ class TestPad3dOpError(unittest.TestCase): class TestPadDataformatError(unittest.TestCase): - def test_errors(self): - def test_ncl(): input_shape = (1, 2, 3, 4) pad = paddle.to_tensor(np.array([2, 1, 2, 1]).astype('int32')) - data = np.arange(np.prod(input_shape), - dtype=np.float64).reshape(input_shape) + 1 + data = ( + np.arange(np.prod(input_shape), dtype=np.float64).reshape( + input_shape + ) + + 1 + ) my_pad = nn.Pad1D(padding=pad, mode="replicate", data_format="NCL") data = paddle.to_tensor(data) result = my_pad(data) @@ -911,8 +863,12 @@ class TestPadDataformatError(unittest.TestCase): def test_nchw(): input_shape = (1, 2, 4) pad = paddle.to_tensor(np.array([2, 1, 2, 1]).astype('int32')) - data = np.arange(np.prod(input_shape), - dtype=np.float64).reshape(input_shape) + 1 + data = ( + np.arange(np.prod(input_shape), dtype=np.float64).reshape( + input_shape + ) + + 1 + ) my_pad = nn.Pad1D(padding=pad, mode="replicate", data_format="NCHW") data = paddle.to_tensor(data) result = my_pad(data) @@ -920,11 +876,15 @@ class TestPadDataformatError(unittest.TestCase): def test_ncdhw(): input_shape = (1, 2, 3, 4) pad = paddle.to_tensor(np.array([2, 1, 2, 1]).astype('int32')) - data = np.arange(np.prod(input_shape), - dtype=np.float64).reshape(input_shape) + 1 - my_pad = nn.Pad1D(padding=pad, - mode="replicate", - data_format="NCDHW") + data = ( + np.arange(np.prod(input_shape), dtype=np.float64).reshape( + input_shape + ) + + 1 + ) + my_pad = nn.Pad1D( + padding=pad, mode="replicate", data_format="NCDHW" + ) data = paddle.to_tensor(data) result = my_pad(data) diff --git a/python/paddle/fluid/tests/unittests/test_pad_constant_like.py b/python/paddle/fluid/tests/unittests/test_pad_constant_like.py index 42fad15c97f89a9054de8e305bc30cfd1895380f..fda844eb09b5d763d641c76c67534210114ac006 100644 --- a/python/paddle/fluid/tests/unittests/test_pad_constant_like.py +++ b/python/paddle/fluid/tests/unittests/test_pad_constant_like.py @@ -20,22 +20,22 @@ from paddle.fluid import Program, program_guard class TestPadConstantLikeOp(OpTest): - def setUp(self): self.initTestCase() self.op_type = "pad_constant_like" self.inputs = { 'X': np.random.random(self.x_shape).astype("float64"), - 'Y': np.random.random(self.y_shape).astype("float64") + 'Y': np.random.random(self.y_shape).astype("float64"), } self.attrs = {} self.attrs['pad_value'] = self.pad_value self.outputs = { - 'Out': - np.pad(self.inputs['Y'], - self.paddings, - mode='constant', - constant_values=self.pad_value) + 'Out': np.pad( + self.inputs['Y'], + self.paddings, + mode='constant', + constant_values=self.pad_value, + ) } def test_check_output(self): @@ -52,7 +52,6 @@ class TestPadConstantLikeOp(OpTest): class TestCase1(TestPadConstantLikeOp): - def initTestCase(self): self.x_shape = (4, 3, 4, 5) self.y_shape = (2, 3, 4, 5) @@ -61,7 +60,6 @@ class TestCase1(TestPadConstantLikeOp): class TestCase2(TestPadConstantLikeOp): - def initTestCase(self): self.x_shape = (4, 3, 4, 10) self.y_shape = (2, 3, 2, 10) @@ -70,38 +68,38 @@ class TestCase2(TestPadConstantLikeOp): class TestPadConstantLikeOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): x_data = np.random.random((2, 2, 2, 2)).astype("float32") y_data = np.random.random((2, 2, 2, 2)).astype("float32") def test_Variable_x(): - var_y = fluid.data(name="data_y", - shape=[2, 2, 2, 2], - dtype="float32") + var_y = fluid.data( + name="data_y", shape=[2, 2, 2, 2], dtype="float32" + ) fluid.layers.pad_constant_like(x=x_data, y=var_y) self.assertRaises(TypeError, test_Variable_x) def test_Variable_y(): - var_x = fluid.data(name="data_x", - shape=[2, 2, 2, 2], - dtype="float32") + var_x = fluid.data( + name="data_x", shape=[2, 2, 2, 2], dtype="float32" + ) fluid.layers.pad_constant_like(x=var_x, y=y_data) self.assertRaises(TypeError, test_Variable_y) class TestOutDtype(unittest.TestCase): - def test_dtype(self): api_fn = fluid.layers.pad_constant_like - check_out_dtype(api_fn, - in_specs=[([2, 3, 2, 3], 'float64'), ([1, 3, 1, 3], )], - expect_dtypes=['float32', 'float64', 'int32', 'int64'], - target_index=1, - pad_value=0.) + check_out_dtype( + api_fn, + in_specs=[([2, 3, 2, 3], 'float64'), ([1, 3, 1, 3],)], + expect_dtypes=['float32', 'float64', 'int32', 'int64'], + target_index=1, + pad_value=0.0, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_pad_op.py b/python/paddle/fluid/tests/unittests/test_pad_op.py index fa31836ecf3e6e2a04b9975ae686178743ba5e93..916f0399d9f01e784ea9c4d3ca38200b5e20feab 100644 --- a/python/paddle/fluid/tests/unittests/test_pad_op.py +++ b/python/paddle/fluid/tests/unittests/test_pad_op.py @@ -25,7 +25,6 @@ from test_attribute_var import UnittestBase class TestPadOp(OpTest): - def setUp(self): self.initTestCase() self.dtype = self.get_dtype() @@ -37,11 +36,12 @@ class TestPadOp(OpTest): self.attrs['paddings'] = np.array(self.paddings).flatten() self.attrs['pad_value'] = self.pad_value self.outputs = { - 'Out': - np.pad(self.inputs['X'], - self.paddings, - mode='constant', - constant_values=self.pad_value) + 'Out': np.pad( + self.inputs['X'], + self.paddings, + mode='constant', + constant_values=self.pad_value, + ) } def get_dtype(self): @@ -60,7 +60,6 @@ class TestPadOp(OpTest): class TestCase1(TestPadOp): - def initTestCase(self): self.shape = (2, 3, 4, 5) self.paddings = [(0, 1), (2, 3), (2, 1), (1, 1)] @@ -68,7 +67,6 @@ class TestCase1(TestPadOp): class TestCase2(TestPadOp): - def initTestCase(self): self.shape = (5, 5, 5) self.paddings = [(0, 0), (0, 0), (1, 2)] @@ -76,22 +74,20 @@ class TestCase2(TestPadOp): class TestCase3(TestPadOp): - def initTestCase(self): - self.shape = (100) + self.shape = 100 self.paddings = [(0, 1)] self.pad_value = 0.9 -#----------------Pad Fp16---------------- +# ----------------Pad Fp16---------------- def create_test_fp16(parent): - - @unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) class TestPadFp16(parent): - def get_dtype(self): return np.float16 @@ -110,7 +106,6 @@ create_test_fp16(TestCase3) class TestPadOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): input_data = np.random.random((2, 2)).astype("float32") @@ -125,7 +120,6 @@ class TestPadOpError(unittest.TestCase): class TestPaddingValueTensor(UnittestBase): - def init_info(self): self.shapes = [[2, 4]] self.save_path = os.path.join(self.temp_dir.name, self.path_prefix()) @@ -148,15 +142,16 @@ class TestPaddingValueTensor(UnittestBase): exe = paddle.static.Executor() exe.run(starup_prog) res = exe.run(fetch_list=[feat, out]) - gt = np.pad(res[0], [1, 1], 'constant', constant_values=[1., 1.]) + gt = np.pad(res[0], [1, 1], 'constant', constant_values=[1.0, 1.0]) np.testing.assert_allclose(res[1], gt) - paddle.static.save_inference_model(self.save_path, [x], [feat, out], - exe) + paddle.static.save_inference_model( + self.save_path, [x], [feat, out], exe + ) # Test for Inference Predictor infer_outs = self.infer_prog() - gt = np.pad(infer_outs[0], [1, 1], - 'constant', - constant_values=[1., 1.]) + gt = np.pad( + infer_outs[0], [1, 1], 'constant', constant_values=[1.0, 1.0] + ) np.testing.assert_allclose(infer_outs[1], gt) def path_prefix(self): @@ -167,22 +162,20 @@ class TestPaddingValueTensor(UnittestBase): def call_func(self, x): padding_value = paddle.assign([1.0]) - out = paddle.nn.functional.pad(x, - pad=[1, 1, 1, 1], - value=padding_value, - mode='constant') + out = paddle.nn.functional.pad( + x, pad=[1, 1, 1, 1], value=padding_value, mode='constant' + ) return out class TestPaddingValueTensor2(TestPaddingValueTensor): - def call_func(self, x): padding_value = paddle.assign([1.0]) # test for int value tmp = paddle.fluid.layers.pad(x, paddings=[1, 1, 1, 1], pad_value=1) - out = paddle.fluid.layers.pad(x, - paddings=[1, 1, 1, 1], - pad_value=padding_value) + out = paddle.fluid.layers.pad( + x, paddings=[1, 1, 1, 1], pad_value=padding_value + ) return out diff --git a/python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py b/python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py index 99b797fbf9e4cc923c613bfc913aefd6bd49ffa6..dd958243fade6cf35e735e64dc85d67f8c6d0682 100644 --- a/python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py +++ b/python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py @@ -22,7 +22,6 @@ from paddle.fluid.framework import _test_eager_guard, _in_legacy_dygraph def _dygraph_guard_(func): - def __impl__(*args, **kwargs): if paddle.in_dynamic_mode(): return func(*args, **kwargs) @@ -42,26 +41,29 @@ def random_var(size, low=-1, high=1, dtype='float32'): class TestDygraphDoubleGrad(TestCase): - def setUp(self): self.sort_sum_gradient = False self.shape = [5, 10] - def grad(self, - outputs, - inputs, - grad_outputs=None, - no_grad_vars=None, - retain_graph=None, - create_graph=False, - allow_unused=False): - return paddle.grad(outputs=outputs, - inputs=inputs, - grad_outputs=grad_outputs, - no_grad_vars=no_grad_vars, - retain_graph=retain_graph, - create_graph=create_graph, - allow_unused=allow_unused) + def grad( + self, + outputs, + inputs, + grad_outputs=None, + no_grad_vars=None, + retain_graph=None, + create_graph=False, + allow_unused=False, + ): + return paddle.grad( + outputs=outputs, + inputs=inputs, + grad_outputs=grad_outputs, + no_grad_vars=no_grad_vars, + retain_graph=retain_graph, + create_graph=create_graph, + allow_unused=allow_unused, + ) @dygraph_guard def func_exception(self): @@ -83,12 +85,16 @@ class TestDygraphDoubleGrad(TestCase): self.grad([random_var(shape)], [1]) with self.assertRaises(AssertionError): - self.grad([random_var(shape), random_var(shape)], - [random_var(shape)], [random_var(shape)]) + self.grad( + [random_var(shape), random_var(shape)], + [random_var(shape)], + [random_var(shape)], + ) with self.assertRaises(AssertionError): - self.grad([random_var(shape)], [random_var(shape)], - no_grad_vars=[1]) + self.grad( + [random_var(shape)], [random_var(shape)], no_grad_vars=[1] + ) with self.assertRaises(AssertionError): self.grad([random_var(shape)], [random_var(shape)], no_grad_vars=1) @@ -105,31 +111,33 @@ class TestDygraphDoubleGrad(TestCase): y = x + 1 for create_graph in [False, True]: - dx, = self.grad([x], [x], - create_graph=create_graph, - retain_graph=True) + (dx,) = self.grad( + [x], [x], create_graph=create_graph, retain_graph=True + ) self.assertEqual(dx.shape, x.shape) self.assertTrue(np.all(dx.numpy() == 1)) self.assertNotEqual(dx.stop_gradient, create_graph) - dx_mul_2, = self.grad([y, x], [x], - create_graph=create_graph, - retain_graph=True) + (dx_mul_2,) = self.grad( + [y, x], [x], create_graph=create_graph, retain_graph=True + ) self.assertEqual(dx_mul_2.shape, x.shape) self.assertTrue(np.all(dx_mul_2.numpy() == 2)) self.assertNotEqual(dx_mul_2.stop_gradient, create_graph) - none_grad, = self.grad([x], [y], - create_graph=create_graph, - allow_unused=True) + (none_grad,) = self.grad( + [x], [y], create_graph=create_graph, allow_unused=True + ) self.assertTrue(none_grad is None) - grad_with_none_and_not_none, = self.grad([x, y], [y], - create_graph=create_graph) + (grad_with_none_and_not_none,) = self.grad( + [x, y], [y], create_graph=create_graph + ) self.assertTrue(grad_with_none_and_not_none.shape, x.shape) self.assertTrue(np.all(grad_with_none_and_not_none.numpy() == 1)) - self.assertNotEqual(grad_with_none_and_not_none.stop_gradient, - create_graph) + self.assertNotEqual( + grad_with_none_and_not_none.stop_gradient, create_graph + ) def test_simple_example(self): with _test_eager_guard(): @@ -144,11 +152,12 @@ class TestDygraphDoubleGrad(TestCase): half_numel = int(numel / 2) half_x_positive = np.random.uniform(low=1, high=2, size=[half_numel]) - half_x_negative = np.random.uniform(low=-2, - high=-1, - size=[numel - half_numel]) - x_np = np.array(list(half_x_positive) + - list(half_x_negative)).astype('float32') + half_x_negative = np.random.uniform( + low=-2, high=-1, size=[numel - half_numel] + ) + x_np = np.array(list(half_x_positive) + list(half_x_negative)).astype( + 'float32' + ) np.random.shuffle(x_np) x = fluid.dygraph.to_variable(x_np) @@ -163,8 +172,9 @@ class TestDygraphDoubleGrad(TestCase): relu_x_np = np.maximum(x_np, alpha * x_np).astype('float32') relu_x_grad_np = ((x_np > 0) + (x_np < 0) * alpha).astype('float32') dy_expected = (relu_x_np * relu_x_grad_np * 2).astype('float32') - dz_expected = (np.power(relu_x_np, 3) * relu_x_grad_np * - 4).astype('float32') + dz_expected = (np.power(relu_x_np, 3) * relu_x_grad_np * 4).astype( + 'float32' + ) random_grad_y = random_var(y.shape, low=1, high=2) random_grad_z = random_var(z.shape, low=1, high=2) @@ -177,31 +187,39 @@ class TestDygraphDoubleGrad(TestCase): for grad_y in [random_grad_y]: for grad_z in [random_grad_z]: for create_graph in [False, True]: - dx_actual, = self.grad(outputs=[y, z], - inputs=[x], - grad_outputs=[grad_y, grad_z], - create_graph=create_graph, - retain_graph=True) + (dx_actual,) = self.grad( + outputs=[y, z], + inputs=[x], + grad_outputs=[grad_y, grad_z], + create_graph=create_graph, + retain_graph=True, + ) - grad_y_np = ones_grad_y if grad_y is None else grad_y.numpy( + grad_y_np = ( + ones_grad_y if grad_y is None else grad_y.numpy() ) - grad_z_np = ones_grad_z if grad_z is None else grad_z.numpy( + grad_z_np = ( + ones_grad_z if grad_z is None else grad_z.numpy() ) - dx_expected = dy_expected * grad_y_np + dz_expected * grad_z_np - np.testing.assert_allclose(dx_actual.numpy(), - dx_expected, - rtol=1e-05) + dx_expected = ( + dy_expected * grad_y_np + dz_expected * grad_z_np + ) + np.testing.assert_allclose( + dx_actual.numpy(), dx_expected, rtol=1e-05 + ) if grad_y is not None: self.assertTrue(grad_y.stop_gradient) - np.testing.assert_array_equal(grad_y.numpy(), - original_random_grad_y) + np.testing.assert_array_equal( + grad_y.numpy(), original_random_grad_y + ) if grad_z is not None: self.assertTrue(grad_z.stop_gradient) - np.testing.assert_array_equal(grad_z.numpy(), - original_random_grad_z) + np.testing.assert_array_equal( + grad_z.numpy(), original_random_grad_z + ) def test_none_one_initial_gradient(self): with _test_eager_guard(): @@ -222,14 +240,15 @@ class TestDygraphDoubleGrad(TestCase): w_mean = fluid.layers.reduce_mean(w) del y, z, w - dx_actual, = self.grad([w_mean], [x], create_graph=True) + (dx_actual,) = self.grad([w_mean], [x], create_graph=True) del w_mean self.assertFalse(dx_actual.stop_gradient) # Theoritical result based on math calculation - dx_expected = (1.0 / float(numel) * (np.maximum(x_np, 0) + 1) * - (x_np > 0) * 2).astype('float32') + dx_expected = ( + 1.0 / float(numel) * (np.maximum(x_np, 0) + 1) * (x_np > 0) * 2 + ).astype('float32') np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05) if not _in_legacy_dygraph(): @@ -240,12 +259,13 @@ class TestDygraphDoubleGrad(TestCase): x_grad_actual = x.gradient() x_grad_expected = ( - 2.0 / float(numel) * - (x_np + dx_expected * - (x_np > 0) * 2 / float(numel))).astype('float32') - np.testing.assert_allclose(x_grad_actual, - x_grad_expected, - rtol=1e-05) + 2.0 + / float(numel) + * (x_np + dx_expected * (x_np > 0) * 2 / float(numel)) + ).astype('float32') + np.testing.assert_allclose( + x_grad_actual, x_grad_expected, rtol=1e-05 + ) def test_example_with_gradient_accumulation_and_create_graph(self): with _test_eager_guard(): @@ -267,15 +287,20 @@ class TestDygraphDoubleGrad(TestCase): w_mean = fluid.layers.reduce_mean(w) del y1, z, w - dx_actual, = self.grad([w_mean], [x], - create_graph=True, - no_grad_vars=[y2]) + (dx_actual,) = self.grad( + [w_mean], [x], create_graph=True, no_grad_vars=[y2] + ) self.assertFalse(y2.stop_gradient) self.assertFalse(dx_actual.stop_gradient) - dx_expected = (1.0 / float(numel) * (np.maximum(x_np, 0) + y2.numpy()) * - (x_np > 0) * 2).astype('float32') + dx_expected = ( + 1.0 + / float(numel) + * (np.maximum(x_np, 0) + y2.numpy()) + * (x_np > 0) + * 2 + ).astype('float32') np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05) if not _in_legacy_dygraph(): @@ -286,12 +311,13 @@ class TestDygraphDoubleGrad(TestCase): x_grad_actual = x.gradient() x_grad_expected = ( - 2.0 / float(numel) * - (x_np + dx_expected * - (x_np > 0) * 4 / float(numel))).astype('float32') - np.testing.assert_allclose(x_grad_actual, - x_grad_expected, - rtol=1e-05) + 2.0 + / float(numel) + * (x_np + dx_expected * (x_np > 0) * 4 / float(numel)) + ).astype('float32') + np.testing.assert_allclose( + x_grad_actual, x_grad_expected, rtol=1e-05 + ) def test_example_with_gradient_accumulation_and_no_grad_vars(self): with _test_eager_guard(): @@ -312,13 +338,14 @@ class TestDygraphDoubleGrad(TestCase): w_mean = fluid.layers.reduce_mean(w) del y, z, w - dx_actual, = self.grad([w_mean], [x], create_graph=False) + (dx_actual,) = self.grad([w_mean], [x], create_graph=False) del w_mean self.assertTrue(dx_actual.stop_gradient) - dx_expected = (1.0 / float(numel) * (np.maximum(x_np, 0) + 1) * - (x_np > 0) * 2).astype('float32') + dx_expected = ( + 1.0 / float(numel) * (np.maximum(x_np, 0) + 1) * (x_np > 0) * 2 + ).astype('float32') np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05) @@ -330,9 +357,9 @@ class TestDygraphDoubleGrad(TestCase): x_grad_actual = x.gradient() x_grad_expected = (2.0 * x_np / float(numel)).astype('float32') - np.testing.assert_allclose(x_grad_actual, - x_grad_expected, - rtol=1e-05) + np.testing.assert_allclose( + x_grad_actual, x_grad_expected, rtol=1e-05 + ) def test_example_with_gradient_accumulation_and_not_create_graph(self): with _test_eager_guard(): @@ -341,7 +368,6 @@ class TestDygraphDoubleGrad(TestCase): class TestDygraphDoubleGradSortGradient(TestDygraphDoubleGrad): - def setUp(self): self.sort_sum_gradient = True self.shape = [5, 10] diff --git a/python/paddle/fluid/tests/unittests/test_paddle_multiprocessing.py b/python/paddle/fluid/tests/unittests/test_paddle_multiprocessing.py index 8f4131c8d664001706be6ef290b4210f650d8369..a6e3bf827d3569b64cba468a98ae23d8f3bd3281 100644 --- a/python/paddle/fluid/tests/unittests/test_paddle_multiprocessing.py +++ b/python/paddle/fluid/tests/unittests/test_paddle_multiprocessing.py @@ -18,7 +18,11 @@ import unittest import time import paddle import paddle.incubate.multiprocessing as mp -from paddle.fluid.framework import _enable_legacy_dygraph, _test_eager_guard, in_dygraph_mode +from paddle.fluid.framework import ( + _enable_legacy_dygraph, + _test_eager_guard, + in_dygraph_mode, +) REPEAT = 20 HAS_SHM_FILES = os.path.isdir('/dev/shm') @@ -47,14 +51,14 @@ def send_parambase(queue, event, device, dtype): tensor = paddle.nn.Layer().create_parameter( [5, 5], dtype=dtype, - default_initializer=paddle.nn.initializer.Constant(value=1.0)) + default_initializer=paddle.nn.initializer.Constant(value=1.0), + ) queue.put(tensor) queue.put(tensor) event.wait() class leak_checker(object): - def __init__(self, test_case): self.checked_pids = [os.getpid()] self.test_case = test_case @@ -98,7 +102,6 @@ class leak_checker(object): class TestMultiprocessingBase(unittest.TestCase): - def get_tensor(self, device="cpu"): self.device = device.lower() place = None @@ -108,7 +111,8 @@ class TestMultiprocessingBase(unittest.TestCase): def get_parameter(self): w = paddle.nn.Layer().create_parameter( [10, 10], - default_initializer=paddle.nn.initializer.Constant(value=0.0)) + default_initializer=paddle.nn.initializer.Constant(value=0.0), + ) return w def _test_empty(self, dtype="float32"): @@ -118,13 +122,9 @@ class TestMultiprocessingBase(unittest.TestCase): out = q.get(timeout=1) self.assertEqual(str(out), str(empty)) - def _test_sharing(self, - ctx=mp, - device='cpu', - dtype="float32", - repeat=1, - param=False): - + def _test_sharing( + self, ctx=mp, device='cpu', dtype="float32", repeat=1, param=False + ): def test_fill(): if param: x = self.get_parameter() @@ -159,7 +159,8 @@ class TestMultiprocessingBase(unittest.TestCase): process = ctx.Process( target=send_parambase if param else send_tensor, - args=(queue, event, device, dtype)) + args=(queue, event, device, dtype), + ) process.daemon = True lc.check_pid(process.pid) process.start() @@ -180,7 +181,6 @@ class TestMultiprocessingBase(unittest.TestCase): class TestMultiprocessingCpu(TestMultiprocessingBase): - def func_test_pass_tensor(self): if in_dygraph_mode(): return @@ -216,9 +216,10 @@ class TestMultiprocessingCpu(TestMultiprocessingBase): class TestMultiprocessingGpu(TestMultiprocessingBase): - - @unittest.skipIf(not paddle.fluid.core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not paddle.fluid.core.is_compiled_with_cuda(), + "core is not compiled with CUDA", + ) def func_test_pass_tensor(self): if in_dygraph_mode(): return diff --git a/python/paddle/fluid/tests/unittests/test_paddle_save_load.py b/python/paddle/fluid/tests/unittests/test_paddle_save_load.py index f5aea5c303960fe15fc59880f95c8f395cfd6230..b41116973940f3d0b25a07074dab55e66f4fbcb7 100644 --- a/python/paddle/fluid/tests/unittests/test_paddle_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_paddle_save_load.py @@ -40,14 +40,17 @@ LARGE_PARAM = 2**26 def random_batch_reader(): - def _get_random_inputs_and_labels(): np.random.seed(SEED) image = np.random.random([BATCH_SIZE, IMAGE_SIZE]).astype('float32') - label = np.random.randint(0, CLASS_NUM - 1, ( - BATCH_SIZE, - 1, - )).astype('int64') + label = np.random.randint( + 0, + CLASS_NUM - 1, + ( + BATCH_SIZE, + 1, + ), + ).astype('int64') return image, label def __reader__(): @@ -61,7 +64,6 @@ def random_batch_reader(): class LinearNet(nn.Layer): - def __init__(self): super(LinearNet, self).__init__() self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM) @@ -71,7 +73,6 @@ class LinearNet(nn.Layer): class LayerWithLargeParameters(paddle.nn.Layer): - def __init__(self): super(LayerWithLargeParameters, self).__init__() self._l = paddle.nn.Linear(10, LARGE_PARAM) @@ -92,7 +93,6 @@ def train(layer, loader, loss_fn, opt): class TestSaveLoadLargeParameters(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -107,9 +107,11 @@ class TestSaveLoadLargeParameters(unittest.TestCase): layer = LayerWithLargeParameters() save_dict = layer.state_dict() - path = os.path.join(self.temp_dir.name, - "test_paddle_save_load_large_param_save", - "layer.pdparams") + path = os.path.join( + self.temp_dir.name, + "test_paddle_save_load_large_param_save", + "layer.pdparams", + ) protocol = 4 paddle.save(save_dict, path, protocol=protocol) dict_load = paddle.load(path, return_numpy=True) @@ -119,7 +121,6 @@ class TestSaveLoadLargeParameters(unittest.TestCase): class TestSaveLoadPickle(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -133,9 +134,11 @@ class TestSaveLoadPickle(unittest.TestCase): layer = LinearNet() save_dict = layer.state_dict() - path = os.path.join(self.temp_dir.name, - "test_paddle_save_load_pickle_protocol", - "layer.pdparams") + path = os.path.join( + self.temp_dir.name, + "test_paddle_save_load_pickle_protocol", + "layer.pdparams", + ) with self.assertRaises(ValueError): paddle.save(save_dict, path, 2.0) @@ -156,12 +159,12 @@ class TestSaveLoadPickle(unittest.TestCase): dict_load = paddle.load(path) # compare results before and after saving for key, value in save_dict.items(): - np.testing.assert_array_equal(dict_load[key].numpy(), - value.numpy()) + np.testing.assert_array_equal( + dict_load[key].numpy(), value.numpy() + ) class TestSaveLoadAny(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -209,9 +212,9 @@ class TestSaveLoadAny(unittest.TestCase): def test_replace_static_save_load(self): paddle.enable_static() with new_program_scope(): - x = paddle.static.data(name="static_x", - shape=[None, IMAGE_SIZE], - dtype='float32') + x = paddle.static.data( + name="static_x", shape=[None, IMAGE_SIZE], dtype='float32' + ) z = paddle.static.nn.fc(x, 10) z = paddle.static.nn.fc(z, 10, bias_attr=False) loss = fluid.layers.reduce_mean(z) @@ -226,19 +229,22 @@ class TestSaveLoadAny(unittest.TestCase): base_map = {} for var in prog.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) base_map[var.name] = t - path = os.path.join(self.temp_dir.name, - "test_replace_static_save_load", "model") + path = os.path.join( + self.temp_dir.name, "test_replace_static_save_load", "model" + ) # paddle.save, legacy paddle.fluid.load self.replace_static_save(prog, path) self.set_zero(prog, place) paddle.fluid.io.load(prog, path) for var in prog.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, np.array(base_t)) # legacy paddle.fluid.save, paddle.load @@ -247,8 +253,9 @@ class TestSaveLoadAny(unittest.TestCase): self.replace_static_load(prog, path) for var in prog.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) # test for return tensor @@ -258,7 +265,8 @@ class TestSaveLoadAny(unittest.TestCase): tensor = var.get_value(fluid.global_scope()) paddle.save( tensor, - os.path.join(self.temp_dir.name, path_vars, var.name)) + os.path.join(self.temp_dir.name, path_vars, var.name), + ) with self.assertRaises(TypeError): var.get_value('fluid.global_scope()') with self.assertRaises(ValueError): @@ -276,12 +284,14 @@ class TestSaveLoadAny(unittest.TestCase): self.set_zero(prog, place) for var in prog.list_vars(): if var.persistable: - tensor = paddle.load(os.path.join(self.temp_dir.name, - path_vars, var.name), - return_numpy=False) + tensor = paddle.load( + os.path.join(self.temp_dir.name, path_vars, var.name), + return_numpy=False, + ) var.set_value(tensor) - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) @@ -289,17 +299,19 @@ class TestSaveLoadAny(unittest.TestCase): paddle.disable_static() class StepDecay(LRScheduler): - - def __init__(self, - learning_rate, - step_size, - gamma=0.1, - last_epoch=-1, - verbose=False): + def __init__( + self, + learning_rate, + step_size, + gamma=0.1, + last_epoch=-1, + verbose=False, + ): self.step_size = step_size self.gamma = gamma - super(StepDecay, self).__init__(learning_rate, last_epoch, - verbose) + super(StepDecay, self).__init__( + learning_rate, last_epoch, verbose + ) def get_lr(self): i = self.last_epoch // self.step_size @@ -307,14 +319,16 @@ class TestSaveLoadAny(unittest.TestCase): layer = LinearNet() inps = paddle.randn([2, IMAGE_SIZE]) - adam = opt.Adam(learning_rate=StepDecay(0.1, 1), - parameters=layer.parameters()) + adam = opt.Adam( + learning_rate=StepDecay(0.1, 1), parameters=layer.parameters() + ) y = layer(inps) y.mean().backward() adam.step() state_dict = adam.state_dict() - path = os.path.join(self.temp_dir.name, - 'paddle_save_load_v2/model.pdparams') + path = os.path.join( + self.temp_dir.name, 'paddle_save_load_v2/model.pdparams' + ) with self.assertRaises(TypeError): paddle.save(state_dict, path, use_binary_format='False') # legacy paddle.save, paddle.load @@ -327,8 +341,9 @@ class TestSaveLoadAny(unittest.TestCase): if isinstance(v, dict): self.assertTrue(v == load_dict_tensor[k]) else: - np.testing.assert_array_equal(v.numpy(), - load_dict_tensor[k].numpy()) + np.testing.assert_array_equal( + v.numpy(), load_dict_tensor[k].numpy() + ) if not np.array_equal(v.numpy(), load_dict_np[k]): print(v.numpy()) print(load_dict_np[k]) @@ -338,8 +353,9 @@ class TestSaveLoadAny(unittest.TestCase): # enable dygraph mode paddle.disable_static() layer = LinearNet() - path = os.path.join(self.temp_dir.name, - 'paddle_save_load_v2/var_dygraph') + path = os.path.join( + self.temp_dir.name, 'paddle_save_load_v2/var_dygraph' + ) tensor = layer._linear.weight with self.assertRaises(ValueError): paddle.save(tensor, path, pickle_protocol='3') @@ -351,7 +367,9 @@ class TestSaveLoadAny(unittest.TestCase): self.assertTrue( isinstance( t_dygraph, - (paddle.fluid.core.VarBase, paddle.fluid.core.eager.Tensor))) + (paddle.fluid.core.VarBase, paddle.fluid.core.eager.Tensor), + ) + ) np.testing.assert_array_equal(tensor.numpy(), np_dygraph) np.testing.assert_array_equal(tensor.numpy(), t_dygraph.numpy()) paddle.enable_static() @@ -366,14 +384,16 @@ class TestSaveLoadAny(unittest.TestCase): paddle.enable_static() with new_program_scope(): # create network - x = paddle.static.data(name="x", - shape=[None, IMAGE_SIZE], - dtype='float32') + x = paddle.static.data( + name="x", shape=[None, IMAGE_SIZE], dtype='float32' + ) z = paddle.static.nn.fc(x, 128) loss = fluid.layers.reduce_mean(z) - place = fluid.CPUPlace( - ) if not paddle.fluid.core.is_compiled_with_cuda( - ) else fluid.CUDAPlace(0) + place = ( + fluid.CPUPlace() + if not paddle.fluid.core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) exe = paddle.static.Executor(place) exe.run(paddle.static.default_startup_program()) prog = paddle.static.default_main_program() @@ -383,8 +403,9 @@ class TestSaveLoadAny(unittest.TestCase): break scope = fluid.global_scope() origin_tensor = np.array(tensor) - path = os.path.join(self.temp_dir.name, - 'test_single_pickle_var_static/var') + path = os.path.join( + self.temp_dir.name, 'test_single_pickle_var_static/var' + ) paddle.save(tensor, path) self.set_zero(prog, place, scope) # static load @@ -406,8 +427,10 @@ class TestSaveLoadAny(unittest.TestCase): def test_dygraph_save_static_load(self): inps = np.random.randn(1, IMAGE_SIZE).astype('float32') - path = os.path.join(self.temp_dir.name, - 'test_dygraph_save_static_load/dy-static.pdparams') + path = os.path.join( + self.temp_dir.name, + 'test_dygraph_save_static_load/dy-static.pdparams', + ) paddle.disable_static() with paddle.utils.unique_name.guard(): layer = LinearNet() @@ -416,14 +439,16 @@ class TestSaveLoadAny(unittest.TestCase): paddle.enable_static() with new_program_scope(): layer = LinearNet() - data = paddle.static.data(name='x_static_save', - shape=(None, IMAGE_SIZE), - dtype='float32') + data = paddle.static.data( + name='x_static_save', shape=(None, IMAGE_SIZE), dtype='float32' + ) y_static = layer(data) program = paddle.static.default_main_program() - place = fluid.CPUPlace( - ) if not paddle.fluid.core.is_compiled_with_cuda( - ) else fluid.CUDAPlace(0) + place = ( + fluid.CPUPlace() + if not paddle.fluid.core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) exe = paddle.static.Executor(paddle.CPUPlace()) exe.run(paddle.static.default_startup_program()) state_dict = paddle.load(path, keep_name_table=True) @@ -431,7 +456,8 @@ class TestSaveLoadAny(unittest.TestCase): state_dict_param = program.state_dict("param") for name, tensor in state_dict_dy.items(): np.testing.assert_array_equal( - tensor.numpy(), np.array(state_dict_param[tensor.name])) + tensor.numpy(), np.array(state_dict_param[tensor.name]) + ) def test_save_load_complex_object_dygraph_save(self): paddle.disable_static() @@ -440,24 +466,28 @@ class TestSaveLoadAny(unittest.TestCase): obj1 = [ paddle.randn([3, 4], dtype='float32'), np.random.randn(5, 6), - ('fake_weight', np.ones([7, 8], dtype='float32')) + ('fake_weight', np.ones([7, 8], dtype='float32')), ] obj2 = {'k1': obj1, 'k2': state_dict, 'epoch': 123} - obj3 = (paddle.randn([5, 4], dtype='float32'), - np.random.randn(3, 4).astype("float32"), { - "state_dict": state_dict, - "opt": state_dict - }) - obj4 = (np.random.randn(5, 6), (123, )) - - path1 = os.path.join(self.temp_dir.name, - "test_save_load_any_complex_object_dygraph/obj1") - path2 = os.path.join(self.temp_dir.name, - "test_save_load_any_complex_object_dygraph/obj2") - path3 = os.path.join(self.temp_dir.name, - "test_save_load_any_complex_object_dygraph/obj3") - path4 = os.path.join(self.temp_dir.name, - "test_save_load_any_complex_object_dygraph/obj4") + obj3 = ( + paddle.randn([5, 4], dtype='float32'), + np.random.randn(3, 4).astype("float32"), + {"state_dict": state_dict, "opt": state_dict}, + ) + obj4 = (np.random.randn(5, 6), (123,)) + + path1 = os.path.join( + self.temp_dir.name, "test_save_load_any_complex_object_dygraph/obj1" + ) + path2 = os.path.join( + self.temp_dir.name, "test_save_load_any_complex_object_dygraph/obj2" + ) + path3 = os.path.join( + self.temp_dir.name, "test_save_load_any_complex_object_dygraph/obj3" + ) + path4 = os.path.join( + self.temp_dir.name, "test_save_load_any_complex_object_dygraph/obj4" + ) paddle.save(obj1, path1) paddle.save(obj2, path2) paddle.save(obj3, path3) @@ -473,10 +503,12 @@ class TestSaveLoadAny(unittest.TestCase): np.testing.assert_array_equal(load_tensor1[2].numpy(), obj1[2][1]) for i in range(len(load_tensor1)): self.assertTrue( - type(load_tensor1[i]) == type(load_tensor2['k1'][i])) + type(load_tensor1[i]) == type(load_tensor2['k1'][i]) + ) for k, v in state_dict.items(): - np.testing.assert_array_equal(v.numpy(), - load_tensor2['k2'][k].numpy()) + np.testing.assert_array_equal( + v.numpy(), load_tensor2['k2'][k].numpy() + ) self.assertTrue(load_tensor2['epoch'] == 123) np.testing.assert_array_equal(load_tensor3[0].numpy(), obj3[0].numpy()) @@ -484,11 +516,13 @@ class TestSaveLoadAny(unittest.TestCase): for k, v in state_dict.items(): np.testing.assert_array_equal( - load_tensor3[2]['state_dict'][k].numpy(), v.numpy()) + load_tensor3[2]['state_dict'][k].numpy(), v.numpy() + ) for k, v in state_dict.items(): - np.testing.assert_array_equal(load_tensor3[2]['opt'][k].numpy(), - v.numpy()) + np.testing.assert_array_equal( + load_tensor3[2]['opt'][k].numpy(), v.numpy() + ) np.testing.assert_array_equal(load_tensor4[0].numpy(), obj4[0]) @@ -510,8 +544,9 @@ class TestSaveLoadAny(unittest.TestCase): np.testing.assert_array_equal(load_array3[1], obj3[1]) for k, v in state_dict.items(): - np.testing.assert_array_equal(load_array3[2]['state_dict'][k], - v.numpy()) + np.testing.assert_array_equal( + load_array3[2]['state_dict'][k], v.numpy() + ) for k, v in state_dict.items(): np.testing.assert_array_equal(load_array3[2]['opt'][k], v.numpy()) @@ -526,38 +561,50 @@ class TestSaveLoadAny(unittest.TestCase): load_tensor3 = paddle.load(path3, return_numpy=False) load_tensor4 = paddle.load(path4, return_numpy=False) - np.testing.assert_array_equal(np.array(load_tensor1[0]), - obj1[0].numpy()) + np.testing.assert_array_equal( + np.array(load_tensor1[0]), obj1[0].numpy() + ) np.testing.assert_array_equal(np.array(load_tensor1[1]), obj1[1]) np.testing.assert_array_equal(np.array(load_tensor1[2]), obj1[2][1]) for i in range(len(load_tensor1)): self.assertTrue( - type(load_tensor1[i]) == type(load_tensor2['k1'][i])) + type(load_tensor1[i]) == type(load_tensor2['k1'][i]) + ) for k, v in state_dict.items(): - np.testing.assert_array_equal(v.numpy(), - np.array(load_tensor2['k2'][k])) + np.testing.assert_array_equal( + v.numpy(), np.array(load_tensor2['k2'][k]) + ) self.assertTrue(load_tensor2['epoch'] == 123) - self.assertTrue(isinstance(load_tensor3[0], - paddle.fluid.core.LoDTensor)) - np.testing.assert_array_equal(np.array(load_tensor3[0]), - obj3[0].numpy()) + self.assertTrue( + isinstance(load_tensor3[0], paddle.fluid.core.LoDTensor) + ) + np.testing.assert_array_equal( + np.array(load_tensor3[0]), obj3[0].numpy() + ) np.testing.assert_array_equal(np.array(load_tensor3[1]), obj3[1]) for k, v in state_dict.items(): self.assertTrue( - isinstance(load_tensor3[2]["state_dict"][k], - paddle.fluid.core.LoDTensor)) + isinstance( + load_tensor3[2]["state_dict"][k], + paddle.fluid.core.LoDTensor, + ) + ) np.testing.assert_array_equal( - np.array(load_tensor3[2]['state_dict'][k]), v.numpy()) + np.array(load_tensor3[2]['state_dict'][k]), v.numpy() + ) for k, v in state_dict.items(): self.assertTrue( - isinstance(load_tensor3[2]["opt"][k], - paddle.fluid.core.LoDTensor)) - np.testing.assert_array_equal(np.array(load_tensor3[2]['opt'][k]), - v.numpy()) + isinstance( + load_tensor3[2]["opt"][k], paddle.fluid.core.LoDTensor + ) + ) + np.testing.assert_array_equal( + np.array(load_tensor3[2]['opt'][k]), v.numpy() + ) self.assertTrue(load_tensor4[0], paddle.fluid.core.LoDTensor) np.testing.assert_array_equal(np.array(load_tensor4[0]), obj4[0]) @@ -581,8 +628,9 @@ class TestSaveLoadAny(unittest.TestCase): np.testing.assert_array_equal(load_array3[1], obj3[1]) for k, v in state_dict.items(): - np.testing.assert_array_equal(load_array3[2]['state_dict'][k], - v.numpy()) + np.testing.assert_array_equal( + load_array3[2]['state_dict'][k], v.numpy() + ) for k, v in state_dict.items(): np.testing.assert_array_equal(load_array3[2]['opt'][k], v.numpy()) @@ -593,15 +641,17 @@ class TestSaveLoadAny(unittest.TestCase): paddle.enable_static() with new_program_scope(): # create network - x = paddle.static.data(name="x", - shape=[None, IMAGE_SIZE], - dtype='float32') + x = paddle.static.data( + name="x", shape=[None, IMAGE_SIZE], dtype='float32' + ) z = paddle.static.nn.fc(x, 10, bias_attr=False) z = paddle.static.nn.fc(z, 128, bias_attr=False) loss = fluid.layers.reduce_mean(z) - place = fluid.CPUPlace( - ) if not paddle.fluid.core.is_compiled_with_cuda( - ) else fluid.CUDAPlace(0) + place = ( + fluid.CPUPlace() + if not paddle.fluid.core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) prog = paddle.static.default_main_program() exe = paddle.static.Executor(place) exe.run(paddle.static.default_startup_program()) @@ -611,27 +661,32 @@ class TestSaveLoadAny(unittest.TestCase): obj1 = [ state_dict[keys[0]], np.random.randn(5, 6), - ('fake_weight', np.ones([7, 8], dtype='float32')) + ('fake_weight', np.ones([7, 8], dtype='float32')), ] obj2 = {'k1': obj1, 'k2': state_dict, 'epoch': 123} - obj3 = (state_dict[keys[0]], np.ndarray([3, 4], dtype="float32"), { - "state_dict": state_dict, - "opt": state_dict - }) - obj4 = (np.ndarray([3, 4], dtype="float32"), ) + obj3 = ( + state_dict[keys[0]], + np.ndarray([3, 4], dtype="float32"), + {"state_dict": state_dict, "opt": state_dict}, + ) + obj4 = (np.ndarray([3, 4], dtype="float32"),) path1 = os.path.join( self.temp_dir.name, - "test_save_load_any_complex_object_static/obj1") + "test_save_load_any_complex_object_static/obj1", + ) path2 = os.path.join( self.temp_dir.name, - "test_save_load_any_complex_object_static/obj2") + "test_save_load_any_complex_object_static/obj2", + ) path3 = os.path.join( self.temp_dir.name, - "test_save_load_any_complex_object_static/obj3") + "test_save_load_any_complex_object_static/obj3", + ) path4 = os.path.join( self.temp_dir.name, - "test_save_load_any_complex_object_static/obj4") + "test_save_load_any_complex_object_static/obj4", + ) paddle.save(obj1, path1) paddle.save(obj2, path2) paddle.save(obj3, path3) @@ -642,16 +697,19 @@ class TestSaveLoadAny(unittest.TestCase): load_tensor3 = paddle.load(path3, return_numpy=False) load_tensor4 = paddle.load(path4, return_numpy=False) - np.testing.assert_array_equal(np.array(load_tensor1[0]), - np.array(obj1[0])) + np.testing.assert_array_equal( + np.array(load_tensor1[0]), np.array(obj1[0]) + ) np.testing.assert_array_equal(np.array(load_tensor1[1]), obj1[1]) np.testing.assert_array_equal(np.array(load_tensor1[2]), obj1[2][1]) for i in range(len(load_tensor1)): self.assertTrue( - type(load_tensor1[i]) == type(load_tensor2['k1'][i])) + type(load_tensor1[i]) == type(load_tensor2['k1'][i]) + ) for k, v in state_dict.items(): - np.testing.assert_array_equal(np.array(v), - np.array(load_tensor2['k2'][k])) + np.testing.assert_array_equal( + np.array(v), np.array(load_tensor2['k2'][k]) + ) self.assertTrue(load_tensor2['epoch'] == 123) self.assertTrue(isinstance(load_tensor3[0], fluid.core.LoDTensor)) @@ -661,16 +719,21 @@ class TestSaveLoadAny(unittest.TestCase): for k, v in state_dict.items(): self.assertTrue( - isinstance(load_tensor3[2]["state_dict"][k], - fluid.core.LoDTensor)) + isinstance( + load_tensor3[2]["state_dict"][k], fluid.core.LoDTensor + ) + ) np.testing.assert_array_equal( - np.array(load_tensor3[2]['state_dict'][k]), np.array(v)) + np.array(load_tensor3[2]['state_dict'][k]), np.array(v) + ) for k, v in state_dict.items(): self.assertTrue( - isinstance(load_tensor3[2]["opt"][k], fluid.core.LoDTensor)) + isinstance(load_tensor3[2]["opt"][k], fluid.core.LoDTensor) + ) np.testing.assert_array_equal( - np.array(load_tensor3[2]['opt'][k]), np.array(v)) + np.array(load_tensor3[2]['opt'][k]), np.array(v) + ) self.assertTrue(isinstance(load_tensor4[0], fluid.core.LoDTensor)) np.testing.assert_array_equal(np.array(load_tensor4[0]), obj4[0]) @@ -685,7 +748,8 @@ class TestSaveLoadAny(unittest.TestCase): np.testing.assert_array_equal(load_array1[2], obj1[2][1]) for i in range(len(load_array1)): self.assertTrue( - type(load_array1[i]) == type(load_array2['k1'][i])) + type(load_array1[i]) == type(load_array2['k1'][i]) + ) for k, v in state_dict.items(): np.testing.assert_array_equal(np.array(v), load_array2['k2'][k]) self.assertTrue(load_array2['epoch'] == 123) @@ -694,12 +758,14 @@ class TestSaveLoadAny(unittest.TestCase): np.testing.assert_array_equal(load_array3[1], obj3[1]) for k, v in state_dict.items(): - np.testing.assert_array_equal(load_array3[2]['state_dict'][k], - np.array(v)) + np.testing.assert_array_equal( + load_array3[2]['state_dict'][k], np.array(v) + ) for k, v in state_dict.items(): - np.testing.assert_array_equal(load_array3[2]['opt'][k], - np.array(v)) + np.testing.assert_array_equal( + load_array3[2]['opt'][k], np.array(v) + ) np.testing.assert_array_equal(load_array4[0], obj4[0]) @@ -711,44 +777,64 @@ class TestSaveLoadAny(unittest.TestCase): load_tensor3 = paddle.load(path3, return_numpy=False) load_tensor4 = paddle.load(path4, return_numpy=False) - np.testing.assert_array_equal(np.array(load_tensor1[0]), - np.array(obj1[0])) + np.testing.assert_array_equal( + np.array(load_tensor1[0]), np.array(obj1[0]) + ) np.testing.assert_array_equal(np.array(load_tensor1[1]), obj1[1]) np.testing.assert_array_equal(load_tensor1[2].numpy(), obj1[2][1]) for i in range(len(load_tensor1)): self.assertTrue( - type(load_tensor1[i]) == type(load_tensor2['k1'][i])) + type(load_tensor1[i]) == type(load_tensor2['k1'][i]) + ) for k, v in state_dict.items(): - np.testing.assert_array_equal(np.array(v), - np.array(load_tensor2['k2'][k])) + np.testing.assert_array_equal( + np.array(v), np.array(load_tensor2['k2'][k]) + ) self.assertTrue(load_tensor2['epoch'] == 123) self.assertTrue( - isinstance(load_tensor3[0], - (fluid.core.VarBase, fluid.core.eager.Tensor))) + isinstance( + load_tensor3[0], + (fluid.core.VarBase, fluid.core.eager.Tensor), + ) + ) np.testing.assert_array_equal(load_tensor3[0].numpy(), obj3[0]) self.assertTrue( - isinstance(load_tensor3[1], - (fluid.core.VarBase, fluid.core.eager.Tensor))) + isinstance( + load_tensor3[1], + (fluid.core.VarBase, fluid.core.eager.Tensor), + ) + ) np.testing.assert_array_equal(load_tensor3[1].numpy(), obj3[1]) for k, v in state_dict.items(): self.assertTrue( - isinstance(load_tensor3[2]["state_dict"][k], - (fluid.core.VarBase, fluid.core.eager.Tensor))) + isinstance( + load_tensor3[2]["state_dict"][k], + (fluid.core.VarBase, fluid.core.eager.Tensor), + ) + ) np.testing.assert_array_equal( - load_tensor3[2]['state_dict'][k].numpy(), np.array(v)) + load_tensor3[2]['state_dict'][k].numpy(), np.array(v) + ) for k, v in state_dict.items(): self.assertTrue( - isinstance(load_tensor3[2]["opt"][k], - (fluid.core.VarBase, fluid.core.eager.Tensor))) - np.testing.assert_array_equal(load_tensor3[2]['opt'][k].numpy(), - np.array(v)) + isinstance( + load_tensor3[2]["opt"][k], + (fluid.core.VarBase, fluid.core.eager.Tensor), + ) + ) + np.testing.assert_array_equal( + load_tensor3[2]['opt'][k].numpy(), np.array(v) + ) self.assertTrue( - isinstance(load_tensor4[0], - (fluid.core.VarBase, fluid.core.eager.Tensor))) + isinstance( + load_tensor4[0], + (fluid.core.VarBase, fluid.core.eager.Tensor), + ) + ) np.testing.assert_array_equal(load_tensor4[0].numpy(), obj4[0]) load_array1 = paddle.load(path1, return_numpy=True) @@ -761,7 +847,8 @@ class TestSaveLoadAny(unittest.TestCase): np.testing.assert_array_equal(load_array1[2], obj1[2][1]) for i in range(len(load_array1)): self.assertTrue( - type(load_array1[i]) == type(load_array2['k1'][i])) + type(load_array1[i]) == type(load_array2['k1'][i]) + ) for k, v in state_dict.items(): np.testing.assert_array_equal(np.array(v), load_array2['k2'][k]) self.assertTrue(load_array2['epoch'] == 123) @@ -770,12 +857,14 @@ class TestSaveLoadAny(unittest.TestCase): np.testing.assert_array_equal(load_array3[1], obj3[1]) for k, v in state_dict.items(): - np.testing.assert_array_equal(load_array3[2]['state_dict'][k], - np.array(v)) + np.testing.assert_array_equal( + load_array3[2]['state_dict'][k], np.array(v) + ) for k, v in state_dict.items(): - np.testing.assert_array_equal(load_array3[2]['opt'][k], - np.array(v)) + np.testing.assert_array_equal( + load_array3[2]['opt'][k], np.array(v) + ) self.assertTrue(isinstance(load_array4[0], np.ndarray)) np.testing.assert_array_equal(load_array4[0], obj4[0]) @@ -783,8 +872,10 @@ class TestSaveLoadAny(unittest.TestCase): def test_varbase_binary_var(self): paddle.disable_static() varbase = paddle.randn([3, 2], dtype='float32') - path = os.path.join(self.temp_dir.name, - 'test_paddle_save_load_varbase_binary_var/varbase') + path = os.path.join( + self.temp_dir.name, + 'test_paddle_save_load_varbase_binary_var/varbase', + ) paddle.save(varbase, path, use_binary_format=True) load_array = paddle.load(path, return_numpy=True) load_tensor = paddle.load(path, return_numpy=False) @@ -797,7 +888,6 @@ class TestSaveLoadAny(unittest.TestCase): class TestSaveLoadToMemory(unittest.TestCase): - def test_dygraph_save_to_memory(self): paddle.disable_static() linear = LinearNet() @@ -826,15 +916,17 @@ class TestSaveLoadToMemory(unittest.TestCase): paddle.enable_static() with new_program_scope(): # create network - x = paddle.static.data(name="x", - shape=[None, IMAGE_SIZE], - dtype='float32') + x = paddle.static.data( + name="x", shape=[None, IMAGE_SIZE], dtype='float32' + ) z = paddle.static.nn.fc(x, 10, bias_attr=False) z = paddle.static.nn.fc(z, 128, bias_attr=False) loss = fluid.layers.reduce_mean(z) - place = fluid.CPUPlace( - ) if not paddle.fluid.core.is_compiled_with_cuda( - ) else fluid.CUDAPlace(0) + place = ( + fluid.CPUPlace() + if not paddle.fluid.core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) prog = paddle.static.default_main_program() exe = paddle.static.Executor(place) exe.run(paddle.static.default_startup_program()) @@ -852,8 +944,10 @@ class TestSaveLoadToMemory(unittest.TestCase): byio2.seek(0) prog_load = paddle.load(byio2) - self.assertTrue(prog.desc.serialize_to_string() == - prog_load.desc.serialize_to_string()) + self.assertTrue( + prog.desc.serialize_to_string() + == prog_load.desc.serialize_to_string() + ) tensor_load = paddle.load(byio, return_numpy=True) np.testing.assert_array_equal(tensor_load, np.array(tensor)) @@ -864,7 +958,6 @@ class TestSaveLoadToMemory(unittest.TestCase): class TestSaveLoad(unittest.TestCase): - def setUp(self): # enable dygraph mode paddle.disable_static() @@ -895,18 +988,23 @@ class TestSaveLoad(unittest.TestCase): def check_load_state_dict(self, orig_dict, load_dict): for var_name, value in orig_dict.items(): - load_value = load_dict[var_name].numpy() if hasattr( - load_dict[var_name], 'numpy') else np.array(load_dict[var_name]) + load_value = ( + load_dict[var_name].numpy() + if hasattr(load_dict[var_name], 'numpy') + else np.array(load_dict[var_name]) + ) np.testing.assert_array_equal(value.numpy(), load_value) def test_save_load(self): layer, opt = self.build_and_train_model() # save - layer_save_path = os.path.join(self.temp_dir.name, - "test_paddle_save_load.linear.pdparams") - opt_save_path = os.path.join(self.temp_dir.name, - "test_paddle_save_load.linear.pdopt") + layer_save_path = os.path.join( + self.temp_dir.name, "test_paddle_save_load.linear.pdparams" + ) + opt_save_path = os.path.join( + self.temp_dir.name, "test_paddle_save_load.linear.pdopt" + ) layer_state_dict = layer.state_dict() opt_state_dict = opt.state_dict() @@ -924,7 +1022,8 @@ class TestSaveLoad(unittest.TestCase): paddle.enable_static() static_save_path = os.path.join( self.temp_dir.name, - "static_mode_test/test_paddle_save_load.linear.pdparams") + "static_mode_test/test_paddle_save_load.linear.pdparams", + ) paddle.save(layer_state_dict, static_save_path) load_static_state_dict = paddle.load(static_save_path) self.check_load_state_dict(layer_state_dict, load_static_state_dict) @@ -937,33 +1036,36 @@ class TestSaveLoad(unittest.TestCase): with self.assertRaises(ValueError): paddle.save( layer_state_dict, - os.path.join(self.temp_dir.name, - "test_paddle_save_load.linear.model/")) + os.path.join( + self.temp_dir.name, "test_paddle_save_load.linear.model/" + ), + ) # 3. test load path not exist error with self.assertRaises(ValueError): paddle.load( - os.path.join(self.temp_dir.name, - "test_paddle_save_load.linear.params")) + os.path.join( + self.temp_dir.name, "test_paddle_save_load.linear.params" + ) + ) # 4. test load old save path error with self.assertRaises(ValueError): paddle.load( - os.path.join(self.temp_dir.name, - "test_paddle_save_load.linear")) + os.path.join(self.temp_dir.name, "test_paddle_save_load.linear") + ) class TestSaveLoadProgram(unittest.TestCase): - def test_save_load_program(self): paddle.enable_static() temp_dir = tempfile.TemporaryDirectory() with new_program_scope(): layer = LinearNet() - data = paddle.static.data(name='x_static_save', - shape=(None, IMAGE_SIZE), - dtype='float32') + data = paddle.static.data( + name='x_static_save', shape=(None, IMAGE_SIZE), dtype='float32' + ) y_static = layer(data) main_program = paddle.static.default_main_program() startup_program = paddle.static.default_startup_program() @@ -971,10 +1073,12 @@ class TestSaveLoadProgram(unittest.TestCase): origin_startup = startup_program.desc.serialize_to_string() path1 = os.path.join( temp_dir.name, - "test_paddle_save_load_program/main_program.pdmodel") + "test_paddle_save_load_program/main_program.pdmodel", + ) path2 = os.path.join( temp_dir.name, - "test_paddle_save_load_program/startup_program.pdmodel") + "test_paddle_save_load_program/startup_program.pdmodel", + ) paddle.save(main_program, path1) paddle.save(startup_program, path2) @@ -987,7 +1091,6 @@ class TestSaveLoadProgram(unittest.TestCase): class TestSaveLoadLayer(unittest.TestCase): - def test_save_load_layer(self): paddle.disable_static() temp_dir = tempfile.TemporaryDirectory() @@ -998,8 +1101,9 @@ class TestSaveLoadLayer(unittest.TestCase): layer2.eval() origin_layer = (layer1, layer2) origin = (layer1(inps), layer2(inps)) - path = os.path.join(temp_dir.name, - "test_save_load_layer_/layer.pdmodel") + path = os.path.join( + temp_dir.name, "test_save_load_layer_/layer.pdmodel" + ) with self.assertRaises(ValueError): paddle.save(origin_layer, path) temp_dir.cleanup() diff --git a/python/paddle/fluid/tests/unittests/test_paddle_save_load_binary.py b/python/paddle/fluid/tests/unittests/test_paddle_save_load_binary.py index 32ee64d3c70f606a6bedfcb29e4b6ab40dee41b3..5eca9492ee39619ddb402b0d24cb16124d469679 100644 --- a/python/paddle/fluid/tests/unittests/test_paddle_save_load_binary.py +++ b/python/paddle/fluid/tests/unittests/test_paddle_save_load_binary.py @@ -28,7 +28,6 @@ IMAGE_SIZE = 784 class TestSaveLoadBinaryFormat(unittest.TestCase): - def setUp(self): # enable static graph mode paddle.enable_static() @@ -49,18 +48,18 @@ class TestSaveLoadBinaryFormat(unittest.TestCase): self.assertTrue(np.sum(np.abs(new_t)) == 0) def replace_save_vars(self, program, dirname): - def predicate(var): return var.persistable vars = filter(predicate, program.list_vars()) for var in vars: - paddle.save(var.get_value(), - os.path.join(dirname, var.name), - use_binary_format=True) + paddle.save( + var.get_value(), + os.path.join(dirname, var.name), + use_binary_format=True, + ) def replace_load_vars(self, program, dirname): - def predicate(var): return var.persistable @@ -74,60 +73,65 @@ class TestSaveLoadBinaryFormat(unittest.TestCase): paddle.enable_static() with new_program_scope(): # create network - x = paddle.static.data(name="x", - shape=[None, IMAGE_SIZE], - dtype='float32') + x = paddle.static.data( + name="x", shape=[None, IMAGE_SIZE], dtype='float32' + ) z = paddle.static.nn.fc(x, 10, bias_attr=False) z = paddle.static.nn.fc(z, 128, bias_attr=False) loss = fluid.layers.reduce_mean(z) - place = fluid.CPUPlace( - ) if not paddle.fluid.core.is_compiled_with_cuda( - ) else fluid.CUDAPlace(0) + place = ( + fluid.CPUPlace() + if not paddle.fluid.core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) exe = paddle.static.Executor(place) exe.run(paddle.static.default_startup_program()) prog = paddle.static.default_main_program() base_map = {} for var in prog.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) # make sure all the paramerter or optimizer var have been update self.assertTrue(np.sum(np.abs(t)) != 0) base_map[var.name] = t # test for replace_save_vars/io.load_vars path_vars1 = os.path.join( - self.temp_dir.name, 'test_replace_save_load_vars_binary1/model') + self.temp_dir.name, 'test_replace_save_load_vars_binary1/model' + ) self.replace_save_vars(prog, path_vars1) # set var to zero self.set_zero(prog, place) var_list = list( - filter(lambda var: var.persistable, prog.list_vars())) - fluid.io.load_vars(exe, - path_vars1, - main_program=prog, - vars=var_list) + filter(lambda var: var.persistable, prog.list_vars()) + ) + fluid.io.load_vars( + exe, path_vars1, main_program=prog, vars=var_list + ) for var in prog.list_vars(): if var.persistable: - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) # test for io.save_vars/replace_load_vars path_vars2 = os.path.join( - self.temp_dir.name, - 'test_replace_save_load_vars_binary2/model/') - fluid.io.save_vars(exe, - path_vars2, - main_program=prog, - vars=var_list) + self.temp_dir.name, 'test_replace_save_load_vars_binary2/model/' + ) + fluid.io.save_vars( + exe, path_vars2, main_program=prog, vars=var_list + ) self.set_zero(prog, place) self.replace_load_vars(prog, path_vars2) for var in prog.list_vars(): if var.persistable: - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) @@ -143,22 +147,27 @@ class TestSaveLoadBinaryFormat(unittest.TestCase): name='fc_vars', ) prog = fluid.default_main_program() - place = fluid.CPUPlace( - ) if not paddle.fluid.core.is_compiled_with_cuda( - ) else fluid.CUDAPlace(0) + place = ( + fluid.CPUPlace() + if not paddle.fluid.core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) exe = fluid.Executor(place) prog = paddle.static.default_main_program() exe.run(fluid.default_startup_program()) - dirname = os.path.join(self.temp_dir.name, - 'test_save_load_lod_tensor1/tensor_') + dirname = os.path.join( + self.temp_dir.name, 'test_save_load_lod_tensor1/tensor_' + ) for var in prog.list_vars(): - if var.persistable and list( - var.shape) == [IMAGE_SIZE, OUTPUT_NUM]: + if var.persistable and list(var.shape) == [ + IMAGE_SIZE, + OUTPUT_NUM, + ]: tensor = var.get_value() - paddle.save(tensor, - dirname + 'fc_vars.w_0', - use_binary_format=True) + paddle.save( + tensor, dirname + 'fc_vars.w_0', use_binary_format=True + ) break origin = np.array(var.get_value()) @@ -168,7 +177,8 @@ class TestSaveLoadBinaryFormat(unittest.TestCase): loaded_tensor = paddle.load(dirname + 'fc_vars.w_0') self.assertTrue(isinstance(loaded_tensor, fluid.core.LoDTensor)) self.assertTrue( - list(loaded_tensor.shape()) == [IMAGE_SIZE, OUTPUT_NUM]) + list(loaded_tensor.shape()) == [IMAGE_SIZE, OUTPUT_NUM] + ) to_array = np.array(loaded_tensor) np.testing.assert_array_equal(origin, to_array) @@ -178,8 +188,9 @@ class TestSaveLoadBinaryFormat(unittest.TestCase): # On the Windows platform, when parsing a string that can't be parsed as a `Program`, `desc_.ParseFromString` has a timeout risk. if 'Windows' != platform.system(): with self.assertRaises(ValueError): - path = os.path.join(self.temp_dir.name, - 'test_save_load_error/temp') + path = os.path.join( + self.temp_dir.name, 'test_save_load_error/temp' + ) with open(path, "w") as f: f.write('\0') paddle.load(path) @@ -193,14 +204,18 @@ class TestSaveLoadBinaryFormat(unittest.TestCase): temp_lod, os.path.join( self.temp_dir.name, - 'test_save_load_error_not_exist_file/not_exist_file')) + 'test_save_load_error_not_exist_file/not_exist_file', + ), + ) with self.assertRaises(RuntimeError): fluid.core.load_lod_tensor( temp_lod, os.path.join( self.temp_dir.name, - 'test_save_load_error_not_exist_file/not_exist_file')) + 'test_save_load_error_not_exist_file/not_exist_file', + ), + ) # save to memory byio = BytesIO() @@ -218,14 +233,18 @@ class TestSaveLoadBinaryFormat(unittest.TestCase): def test_save_load_selected_rows(self): paddle.enable_static() - place = fluid.CPUPlace() if not paddle.fluid.core.is_compiled_with_cuda( - ) else fluid.CUDAPlace(0) + place = ( + fluid.CPUPlace() + if not paddle.fluid.core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) height = 10 rows = [0, 4, 7] row_numel = 12 selected_rows = fluid.core.SelectedRows(rows, height) - path = os.path.join(self.temp_dir.name, - 'test_paddle_save_load_selected_rows/sr.pdsr') + path = os.path.join( + self.temp_dir.name, 'test_paddle_save_load_selected_rows/sr.pdsr' + ) with self.assertRaises(ValueError): paddle.save(selected_rows, path, use_binary_format=True) @@ -247,13 +266,17 @@ class TestSaveLoadBinaryFormat(unittest.TestCase): selected_rows, os.path.join( self.temp_dir.name, - 'test_paddle_save_load_selected_rows_not_exist_file/temp')) + 'test_paddle_save_load_selected_rows_not_exist_file/temp', + ), + ) with self.assertRaises(RuntimeError): fluid.core.load_selected_rows( selected_rows, os.path.join( self.temp_dir.name, - 'test_paddle_save_load_selected_rows_not_exist_file/temp')) + 'test_paddle_save_load_selected_rows_not_exist_file/temp', + ), + ) # save to memory byio = BytesIO() @@ -265,8 +288,9 @@ class TestSaveLoadBinaryFormat(unittest.TestCase): self.assertTrue(isinstance(selected_rows_mem, fluid.core.SelectedRows)) self.assertTrue(list(selected_rows_mem.rows()) == rows) self.assertTrue(selected_rows_mem.height() == height) - np.testing.assert_array_equal(np.array(selected_rows_mem.get_tensor()), - np_array) + np.testing.assert_array_equal( + np.array(selected_rows_mem.get_tensor()), np_array + ) with self.assertRaises(NotImplementedError): paddle.framework.io._save_selected_rows(selected_rows, 1) diff --git a/python/paddle/fluid/tests/unittests/test_pairwise_distance.py b/python/paddle/fluid/tests/unittests/test_pairwise_distance.py index 419084cf2715ed115b8ae2c3a3bf0aaf0924f8f2..675196f57ebb26857e54f66760f0cc93f7bfc80b 100644 --- a/python/paddle/fluid/tests/unittests/test_pairwise_distance.py +++ b/python/paddle/fluid/tests/unittests/test_pairwise_distance.py @@ -27,121 +27,92 @@ def np_pairwise_distance(x, y, p=2.0, epsilon=1e-6, keepdim=False): return distance -def call_pairwise_distance_layer(x, y, p=2., epsilon=1e-6, keepdim='False'): - pairwise_distance = paddle.nn.PairwiseDistance(p=p, - epsilon=epsilon, - keepdim=keepdim) +def call_pairwise_distance_layer(x, y, p=2.0, epsilon=1e-6, keepdim='False'): + pairwise_distance = paddle.nn.PairwiseDistance( + p=p, epsilon=epsilon, keepdim=keepdim + ) distance = pairwise_distance(x=x, y=y) return distance -def call_pairwise_distance_functional(x, - y, - p=2., - epsilon=1e-6, - keepdim='False'): - distance = paddle.nn.functional.pairwise_distance(x=x, - y=y, - p=p, - epsilon=epsilon, - keepdim=keepdim) +def call_pairwise_distance_functional( + x, y, p=2.0, epsilon=1e-6, keepdim='False' +): + distance = paddle.nn.functional.pairwise_distance( + x=x, y=y, p=p, epsilon=epsilon, keepdim=keepdim + ) return distance -def test_static(place, - x_np, - y_np, - p=2.0, - epsilon=1e-6, - keepdim=False, - functional=False): +def test_static( + place, x_np, y_np, p=2.0, epsilon=1e-6, keepdim=False, functional=False +): prog = paddle.static.Program() startup_prog = paddle.static.Program() - place = fluid.CUDAPlace( - 0) if paddle.fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if paddle.fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) paddle.enable_static() with paddle.static.program_guard(prog, startup_prog): x = paddle.fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) y = paddle.fluid.data(name='y', shape=y_np.shape, dtype=x_np.dtype) if functional: - distance = call_pairwise_distance_functional(x=x, - y=y, - p=p, - epsilon=epsilon, - keepdim=keepdim) + distance = call_pairwise_distance_functional( + x=x, y=y, p=p, epsilon=epsilon, keepdim=keepdim + ) else: - distance = call_pairwise_distance_layer(x=x, - y=y, - p=p, - epsilon=epsilon, - keepdim=keepdim) + distance = call_pairwise_distance_layer( + x=x, y=y, p=p, epsilon=epsilon, keepdim=keepdim + ) exe = paddle.static.Executor(place) - static_ret = exe.run(prog, - feed={ - 'x': x_np, - 'y': y_np - }, - fetch_list=[distance]) + static_ret = exe.run( + prog, feed={'x': x_np, 'y': y_np}, fetch_list=[distance] + ) static_ret = static_ret[0] paddle.disable_static() return static_ret -def test_dygraph(place, - x_np, - y_np, - p=2.0, - epsilon=1e-6, - keepdim=False, - functional=False): +def test_dygraph( + place, x_np, y_np, p=2.0, epsilon=1e-6, keepdim=False, functional=False +): x = paddle.to_tensor(x_np) y = paddle.to_tensor(y_np) if functional: - dy_distance = call_pairwise_distance_functional(x=x, - y=y, - p=p, - epsilon=epsilon, - keepdim=keepdim) + dy_distance = call_pairwise_distance_functional( + x=x, y=y, p=p, epsilon=epsilon, keepdim=keepdim + ) else: - dy_distance = call_pairwise_distance_layer(x=x, - y=y, - p=p, - epsilon=epsilon, - keepdim=keepdim) + dy_distance = call_pairwise_distance_layer( + x=x, y=y, p=p, epsilon=epsilon, keepdim=keepdim + ) dygraph_ret = dy_distance.numpy() return dygraph_ret -def test_legacy_dygraph(place, - x_np, - y_np, - p=2.0, - epsilon=1e-6, - keepdim=False, - functional=False): +def test_legacy_dygraph( + place, x_np, y_np, p=2.0, epsilon=1e-6, keepdim=False, functional=False +): paddle.fluid.framework._enable_legacy_dygraph() x = paddle.to_tensor(x_np) y = paddle.to_tensor(y_np) if functional: - legacy_distance = call_pairwise_distance_functional(x=x, - y=y, - p=p, - epsilon=epsilon, - keepdim=keepdim) + legacy_distance = call_pairwise_distance_functional( + x=x, y=y, p=p, epsilon=epsilon, keepdim=keepdim + ) else: - legacy_distance = call_pairwise_distance_layer(x=x, - y=y, - p=p, - epsilon=epsilon, - keepdim=keepdim) + legacy_distance = call_pairwise_distance_layer( + x=x, y=y, p=p, epsilon=epsilon, keepdim=keepdim + ) legacy_ret = legacy_distance.numpy() paddle.fluid.framework._disable_legacy_dygraph() return legacy_ret class TestPairwiseDistance(unittest.TestCase): - def test_pairwise_distance(self): epsilon = 1e-6 all_shape = [[5], [100, 100]] @@ -159,81 +130,107 @@ class TestPairwiseDistance(unittest.TestCase): x_np = np.random.random(shape).astype(dtype) y_np = np.random.random(shape).astype(dtype) - static_ret = test_static(place, - x_np, - y_np, - p, - epsilon=epsilon, - keepdim=keepdim) - dygraph_ret = test_dygraph(place, - x_np, - y_np, - p, - epsilon=epsilon, - keepdim=keepdim) - legacy_ret = test_legacy_dygraph(place, - x_np, - y_np, - p, - epsilon=epsilon, - keepdim=keepdim) + static_ret = test_static( + place, + x_np, + y_np, + p, + epsilon=epsilon, + keepdim=keepdim, + ) + dygraph_ret = test_dygraph( + place, + x_np, + y_np, + p, + epsilon=epsilon, + keepdim=keepdim, + ) + legacy_ret = test_legacy_dygraph( + place, + x_np, + y_np, + p, + epsilon=epsilon, + keepdim=keepdim, + ) excepted_value = np_pairwise_distance( - x_np, y_np, p, epsilon=epsilon, keepdim=keepdim) - - self.assertEqual(static_ret.shape, - excepted_value.shape) - self.assertEqual(dygraph_ret.shape, - excepted_value.shape) - self.assertEqual(legacy_ret.shape, - excepted_value.shape) - - np.testing.assert_allclose(static_ret, - excepted_value, - rtol=1e-05) - np.testing.assert_allclose(dygraph_ret, - excepted_value, - rtol=1e-05) - np.testing.assert_allclose(legacy_ret, - excepted_value, - rtol=1e-05) - - static_functional_ret = test_static(place, - x_np, - y_np, - p, - epsilon=epsilon, - keepdim=keepdim) + x_np, y_np, p, epsilon=epsilon, keepdim=keepdim + ) + + self.assertEqual( + static_ret.shape, excepted_value.shape + ) + self.assertEqual( + dygraph_ret.shape, excepted_value.shape + ) + self.assertEqual( + legacy_ret.shape, excepted_value.shape + ) + + np.testing.assert_allclose( + static_ret, excepted_value, rtol=1e-05 + ) + np.testing.assert_allclose( + dygraph_ret, excepted_value, rtol=1e-05 + ) + np.testing.assert_allclose( + legacy_ret, excepted_value, rtol=1e-05 + ) + + static_functional_ret = test_static( + place, + x_np, + y_np, + p, + epsilon=epsilon, + keepdim=keepdim, + ) dygraph_functional_ret = test_dygraph( place, x_np, y_np, p, epsilon=epsilon, - keepdim=keepdim) + keepdim=keepdim, + ) legacy_functional_ret = test_legacy_dygraph( place, x_np, y_np, p, epsilon=epsilon, - keepdim=keepdim) - - self.assertEqual(static_functional_ret.shape, - excepted_value.shape) - self.assertEqual(dygraph_functional_ret.shape, - excepted_value.shape) - self.assertEqual(legacy_functional_ret.shape, - excepted_value.shape) - - np.testing.assert_allclose(static_functional_ret, - excepted_value, - rtol=1e-05) - np.testing.assert_allclose(dygraph_functional_ret, - excepted_value, - rtol=1e-05) - np.testing.assert_allclose(legacy_functional_ret, - excepted_value, - rtol=1e-05) + keepdim=keepdim, + ) + + self.assertEqual( + static_functional_ret.shape, + excepted_value.shape, + ) + self.assertEqual( + dygraph_functional_ret.shape, + excepted_value.shape, + ) + self.assertEqual( + legacy_functional_ret.shape, + excepted_value.shape, + ) + + np.testing.assert_allclose( + static_functional_ret, + excepted_value, + rtol=1e-05, + ) + np.testing.assert_allclose( + dygraph_functional_ret, + excepted_value, + rtol=1e-05, + ) + np.testing.assert_allclose( + legacy_functional_ret, + excepted_value, + rtol=1e-05, + ) def test_pairwise_distance_broadcast_1(self): shape_x = [100, 100] @@ -243,25 +240,18 @@ class TestPairwiseDistance(unittest.TestCase): place = paddle.CPUPlace() x_np = np.random.random(shape_x).astype('float32') y_np = np.random.random(shape_y).astype('float32') - static_ret = test_static(place=place, - x_np=x_np, - y_np=y_np, - epsilon=epsilon, - keepdim=keepdim) - dygraph_ret = test_dygraph(place=place, - x_np=x_np, - y_np=y_np, - epsilon=epsilon, - keepdim=keepdim) - legacy_ret = test_legacy_dygraph(place=place, - x_np=x_np, - y_np=y_np, - epsilon=epsilon, - keepdim=keepdim) - excepted_value = np_pairwise_distance(x_np, - y_np, - epsilon=epsilon, - keepdim=keepdim) + static_ret = test_static( + place=place, x_np=x_np, y_np=y_np, epsilon=epsilon, keepdim=keepdim + ) + dygraph_ret = test_dygraph( + place=place, x_np=x_np, y_np=y_np, epsilon=epsilon, keepdim=keepdim + ) + legacy_ret = test_legacy_dygraph( + place=place, x_np=x_np, y_np=y_np, epsilon=epsilon, keepdim=keepdim + ) + excepted_value = np_pairwise_distance( + x_np, y_np, epsilon=epsilon, keepdim=keepdim + ) self.assertEqual(static_ret.shape, excepted_value.shape) self.assertEqual(dygraph_ret.shape, excepted_value.shape) @@ -271,38 +261,44 @@ class TestPairwiseDistance(unittest.TestCase): np.testing.assert_allclose(dygraph_ret, excepted_value, rtol=1e-05) np.testing.assert_allclose(legacy_ret, excepted_value, rtol=1e-05) - static_functional_ret = test_static(place=place, - x_np=x_np, - y_np=y_np, - epsilon=epsilon, - keepdim=keepdim, - functional=True) - dygraph_functional_ret = test_dygraph(place=place, - x_np=x_np, - y_np=y_np, - epsilon=epsilon, - keepdim=keepdim, - functional=True) - legacy_functional_ret = test_legacy_dygraph(place=place, - x_np=x_np, - y_np=y_np, - epsilon=epsilon, - keepdim=keepdim, - functional=True) + static_functional_ret = test_static( + place=place, + x_np=x_np, + y_np=y_np, + epsilon=epsilon, + keepdim=keepdim, + functional=True, + ) + dygraph_functional_ret = test_dygraph( + place=place, + x_np=x_np, + y_np=y_np, + epsilon=epsilon, + keepdim=keepdim, + functional=True, + ) + legacy_functional_ret = test_legacy_dygraph( + place=place, + x_np=x_np, + y_np=y_np, + epsilon=epsilon, + keepdim=keepdim, + functional=True, + ) self.assertEqual(static_functional_ret.shape, excepted_value.shape) self.assertEqual(dygraph_functional_ret.shape, excepted_value.shape) self.assertEqual(legacy_functional_ret.shape, excepted_value.shape) - np.testing.assert_allclose(static_functional_ret, - excepted_value, - rtol=1e-05) - np.testing.assert_allclose(dygraph_functional_ret, - excepted_value, - rtol=1e-05) - np.testing.assert_allclose(legacy_functional_ret, - excepted_value, - rtol=1e-05) + np.testing.assert_allclose( + static_functional_ret, excepted_value, rtol=1e-05 + ) + np.testing.assert_allclose( + dygraph_functional_ret, excepted_value, rtol=1e-05 + ) + np.testing.assert_allclose( + legacy_functional_ret, excepted_value, rtol=1e-05 + ) def test_pairwise_distance_broadcast_2(self): shape_x = [100, 100] @@ -312,25 +308,18 @@ class TestPairwiseDistance(unittest.TestCase): place = paddle.CPUPlace() x_np = np.random.random(shape_x).astype('float32') y_np = np.random.random(shape_y).astype('float32') - static_ret = test_static(place=place, - x_np=x_np, - y_np=y_np, - epsilon=epsilon, - keepdim=keepdim) - dygraph_ret = test_dygraph(place=place, - x_np=x_np, - y_np=y_np, - epsilon=epsilon, - keepdim=keepdim) - legacy_ret = test_legacy_dygraph(place=place, - x_np=x_np, - y_np=y_np, - epsilon=epsilon, - keepdim=keepdim) - excepted_value = np_pairwise_distance(x_np, - y_np, - epsilon=epsilon, - keepdim=keepdim) + static_ret = test_static( + place=place, x_np=x_np, y_np=y_np, epsilon=epsilon, keepdim=keepdim + ) + dygraph_ret = test_dygraph( + place=place, x_np=x_np, y_np=y_np, epsilon=epsilon, keepdim=keepdim + ) + legacy_ret = test_legacy_dygraph( + place=place, x_np=x_np, y_np=y_np, epsilon=epsilon, keepdim=keepdim + ) + excepted_value = np_pairwise_distance( + x_np, y_np, epsilon=epsilon, keepdim=keepdim + ) self.assertEqual(static_ret.shape, excepted_value.shape) self.assertEqual(dygraph_ret.shape, excepted_value.shape) @@ -340,38 +329,44 @@ class TestPairwiseDistance(unittest.TestCase): np.testing.assert_allclose(dygraph_ret, excepted_value, rtol=1e-05) np.testing.assert_allclose(legacy_ret, excepted_value, rtol=1e-05) - static_functional_ret = test_static(place=place, - x_np=x_np, - y_np=y_np, - epsilon=epsilon, - keepdim=keepdim, - functional=True) - dygraph_functional_ret = test_dygraph(place=place, - x_np=x_np, - y_np=y_np, - epsilon=epsilon, - keepdim=keepdim, - functional=True) - legacy_functional_ret = test_legacy_dygraph(place=place, - x_np=x_np, - y_np=y_np, - epsilon=epsilon, - keepdim=keepdim, - functional=True) + static_functional_ret = test_static( + place=place, + x_np=x_np, + y_np=y_np, + epsilon=epsilon, + keepdim=keepdim, + functional=True, + ) + dygraph_functional_ret = test_dygraph( + place=place, + x_np=x_np, + y_np=y_np, + epsilon=epsilon, + keepdim=keepdim, + functional=True, + ) + legacy_functional_ret = test_legacy_dygraph( + place=place, + x_np=x_np, + y_np=y_np, + epsilon=epsilon, + keepdim=keepdim, + functional=True, + ) self.assertEqual(static_functional_ret.shape, excepted_value.shape) self.assertEqual(dygraph_functional_ret.shape, excepted_value.shape) self.assertEqual(legacy_functional_ret.shape, excepted_value.shape) - np.testing.assert_allclose(static_functional_ret, - excepted_value, - rtol=1e-05) - np.testing.assert_allclose(dygraph_functional_ret, - excepted_value, - rtol=1e-05) - np.testing.assert_allclose(legacy_functional_ret, - excepted_value, - rtol=1e-05) + np.testing.assert_allclose( + static_functional_ret, excepted_value, rtol=1e-05 + ) + np.testing.assert_allclose( + dygraph_functional_ret, excepted_value, rtol=1e-05 + ) + np.testing.assert_allclose( + legacy_functional_ret, excepted_value, rtol=1e-05 + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_parallel_dygraph_dataparallel.py b/python/paddle/fluid/tests/unittests/test_parallel_dygraph_dataparallel.py index fb69986a348a789b3203c8ccbf3aba25530b61de..a452f2b2b15d65695b5bd4cb3a6dc3bfd2f77b41 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_dygraph_dataparallel.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_dygraph_dataparallel.py @@ -19,7 +19,12 @@ import copy import os import subprocess -from paddle.distributed.utils.launch_utils import find_free_ports, watch_local_trainers, get_cluster, TrainerProc +from paddle.distributed.utils.launch_utils import ( + find_free_ports, + watch_local_trainers, + get_cluster, + TrainerProc, +) def get_cluster_from_args(selected_gpus): @@ -47,10 +52,9 @@ def get_gpus(selected_gpus): return selected_gpus -def start_local_trainers_cpu(trainer_endpoints, - training_script, - training_script_args, - log_dir=None): +def start_local_trainers_cpu( + trainer_endpoints, training_script, training_script_args, log_dir=None +): current_env = copy.copy(os.environ.copy()) current_env.pop("http_proxy", None) current_env.pop("https_proxy", None) @@ -64,15 +68,16 @@ def start_local_trainers_cpu(trainer_endpoints, "PADDLE_TRAINER_ID": "%d" % rank_id, "PADDLE_CURRENT_ENDPOINT": "%s" % endpoint, "PADDLE_TRAINERS_NUM": "%d" % n_rank, - "PADDLE_TRAINER_ENDPOINTS": ",".join(trainer_endpoints) + "PADDLE_TRAINER_ENDPOINTS": ",".join(trainer_endpoints), } current_env.update(proc_env) print("trainer proc env:{}".format(current_env)) - assert os.getenv('WITH_COVERAGE', - 'OFF') == 'OFF', "Gloo don't support WITH_COVERAGE." + assert ( + os.getenv('WITH_COVERAGE', 'OFF') == 'OFF' + ), "Gloo don't support WITH_COVERAGE." cmd = "python -u " + training_script print("start trainer proc:{} env:{}".format(cmd, proc_env)) @@ -92,17 +97,19 @@ def start_local_trainers_cpu(trainer_endpoints, return procs -def start_local_trainers(cluster, - pod, - training_script, - training_script_args, - eager_mode=True, - log_dir=None): +def start_local_trainers( + cluster, + pod, + training_script, + training_script_args, + eager_mode=True, + log_dir=None, +): current_env = copy.copy(os.environ.copy()) - #paddle broadcast ncclUniqueId use socket, and - #proxy maybe make trainers unreachable, so delete them. - #if we set them to "", grpc will log error message "bad uri" - #so just delete them. + # paddle broadcast ncclUniqueId use socket, and + # proxy maybe make trainers unreachable, so delete them. + # if we set them to "", grpc will log error message "bad uri" + # so just delete them. current_env.pop("http_proxy", None) current_env.pop("https_proxy", None) @@ -113,7 +120,7 @@ def start_local_trainers(cluster, "PADDLE_TRAINER_ID": "%d" % t.rank, "PADDLE_CURRENT_ENDPOINT": "%s" % t.endpoint, "PADDLE_TRAINERS_NUM": "%d" % cluster.trainers_nranks(), - "PADDLE_TRAINER_ENDPOINTS": ",".join(cluster.trainers_endpoints()) + "PADDLE_TRAINER_ENDPOINTS": ",".join(cluster.trainers_endpoints()), } if not eager_mode: @@ -146,10 +153,11 @@ def start_local_trainers(cluster, class TestMultipleGpus(unittest.TestCase): - def run_mnist_2gpu(self, target_file_name, eager_mode=True): - if not fluid.core.is_compiled_with_cuda( - ) or fluid.core.get_cuda_device_count() == 0: + if ( + not fluid.core.is_compiled_with_cuda() + or fluid.core.get_cuda_device_count() == 0 + ): return selected_gpus = get_gpus('0,1') @@ -158,11 +166,13 @@ class TestMultipleGpus(unittest.TestCase): cluster, pod = get_cluster_from_args(selected_gpus) - procs = start_local_trainers(cluster, - pod, - eager_mode=eager_mode, - training_script=target_file_name, - training_script_args=[]) + procs = start_local_trainers( + cluster, + pod, + eager_mode=eager_mode, + training_script=target_file_name, + training_script_args=[], + ) while True: alive = watch_local_trainers(procs, cluster.trainers_endpoints()) @@ -174,15 +184,17 @@ class TestMultipleGpus(unittest.TestCase): class TestMultipleWithGloo(unittest.TestCase): - def run_mnist_2cpu(self, target_file_name): cluster, pod = get_cluster_from_args( - [0, 1]) #tmp use. for getting trainer_nranks() + [0, 1] + ) # tmp use. for getting trainer_nranks() - procs = start_local_trainers_cpu(cluster.trainers_endpoints(), - training_script=target_file_name, - training_script_args=[]) + procs = start_local_trainers_cpu( + cluster.trainers_endpoints(), + training_script=target_file_name, + training_script_args=[], + ) while True: alive = watch_local_trainers(procs, cluster.trainers_nranks()) @@ -194,22 +206,21 @@ class TestMultipleWithGloo(unittest.TestCase): class TestDataParallelGradientCheck(TestMultipleGpus): - def test_multiple_gpus_dynamic(self): - self.run_mnist_2gpu('parallel_dygraph_gradient_check.py', - eager_mode=False) + self.run_mnist_2gpu( + 'parallel_dygraph_gradient_check.py', eager_mode=False + ) class TestDataParallelWithPyLayer(TestMultipleGpus): - def test_parallel_dygraph_dataparallel_with_pylayer(self): self.run_mnist_2gpu('parallel_dygraph_dataparallel_with_pylayer.py') - self.run_mnist_2gpu('parallel_dygraph_dataparallel_with_pylayer.py', - eager_mode=False) + self.run_mnist_2gpu( + 'parallel_dygraph_dataparallel_with_pylayer.py', eager_mode=False + ) class TestGradientCheckInEagerMode(TestMultipleGpus): - def test_multiple_gpus_dynamic(self): self.run_mnist_2gpu('parallel_dygraph_gradient_check_in_eager_mode.py') diff --git a/python/paddle/fluid/tests/unittests/test_parallel_dygraph_dataparallel_cpuonly.py b/python/paddle/fluid/tests/unittests/test_parallel_dygraph_dataparallel_cpuonly.py index 8862a3310af94111c832cee2a222c9cb3a632770..540c668a6f8edd4cc85d2ff4309aeb7bf1519365 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_dygraph_dataparallel_cpuonly.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_dygraph_dataparallel_cpuonly.py @@ -18,7 +18,12 @@ import copy import os import subprocess -from paddle.distributed.utils.launch_utils import find_free_ports, watch_local_trainers, get_cluster, TrainerProc +from paddle.distributed.utils.launch_utils import ( + find_free_ports, + watch_local_trainers, + get_cluster, + TrainerProc, +) def get_cluster_from_args(selected_gpus): @@ -46,16 +51,14 @@ def get_gpus(selected_gpus): return selected_gpus -def start_local_trainers(cluster, - pod, - training_script, - training_script_args, - log_dir=None): +def start_local_trainers( + cluster, pod, training_script, training_script_args, log_dir=None +): current_env = copy.copy(os.environ.copy()) - #paddle broadcast ncclUniqueId use socket, and - #proxy maybe make trainers unreachable, so delete them. - #if we set them to "", grpc will log error message "bad uri" - #so just delete them. + # paddle broadcast ncclUniqueId use socket, and + # proxy maybe make trainers unreachable, so delete them. + # if we set them to "", grpc will log error message "bad uri" + # so just delete them. current_env.pop("http_proxy", None) current_env.pop("https_proxy", None) @@ -69,8 +72,7 @@ def start_local_trainers(cluster, "MASTER_ADDR": "127.0.0.1", "MASTER_PORT": "6170", "NCCL_DEBUG": "INFO", - "PADDLE_DISTRI_BACKEND": - "gloo", # make init_parallel_env get 'gloo' argument. + "PADDLE_DISTRI_BACKEND": "gloo", # make init_parallel_env get 'gloo' argument. } current_env.update(proc_env) @@ -100,10 +102,9 @@ def start_local_trainers(cluster, class TestMultipleGpus(unittest.TestCase): - def run_mnist_2gpu(self, target_file_name): - #if not fluid.core.is_compiled_with_cuda( - #) or fluid.core.get_cuda_device_count() == 0: + # if not fluid.core.is_compiled_with_cuda( + # ) or fluid.core.get_cuda_device_count() == 0: # return selected_gpus = get_gpus('0,1') @@ -111,10 +112,12 @@ class TestMultipleGpus(unittest.TestCase): pod = None cluster, pod = get_cluster_from_args(selected_gpus) - procs = start_local_trainers(cluster, - pod, - training_script=target_file_name, - training_script_args=[]) + procs = start_local_trainers( + cluster, + pod, + training_script=target_file_name, + training_script_args=[], + ) while True: alive = watch_local_trainers(procs, cluster.trainers_nranks()) @@ -126,13 +129,11 @@ class TestMultipleGpus(unittest.TestCase): class TestDataParallelGradientCheck(TestMultipleGpus): - def test_multiple_gpus_dynamic(self): self.run_mnist_2gpu('parallel_dygraph_gradient_check.py') class TestDataParallelGradientCheckInEagerMode(TestMultipleGpus): - def test_multiple_gpus_dynamic(self): self.run_mnist_2gpu('parallel_dygraph_gradient_check_in_eager_mode.py') diff --git a/python/paddle/fluid/tests/unittests/test_parallel_dygraph_sparse_embedding_diff_length_gloo.py b/python/paddle/fluid/tests/unittests/test_parallel_dygraph_sparse_embedding_diff_length_gloo.py index 278ea58de37a0bf043075be8f96e6ee9e0833abc..bc1c5bf4cfd9bb597d2f025290c135d3f3f5016d 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_dygraph_sparse_embedding_diff_length_gloo.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_dygraph_sparse_embedding_diff_length_gloo.py @@ -21,7 +21,6 @@ flag_name = os.path.splitext(__file__)[0] class TestParallelDygraphSparseEmdedding_GLOO(TestDistBase): - def _setup_config(self): self._sync_mode = False self._gloo_mode = True @@ -29,10 +28,12 @@ class TestParallelDygraphSparseEmdedding_GLOO(TestDistBase): self._diff_batch = True def test_sparse_embedding(self): - self.check_with_place("parallel_dygraph_sparse_embedding.py", - delta=1e-5, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "parallel_dygraph_sparse_embedding.py", + delta=1e-5, + check_error_log=True, + log_name=flag_name, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_parallel_dygraph_sparse_embedding_gloo.py b/python/paddle/fluid/tests/unittests/test_parallel_dygraph_sparse_embedding_gloo.py index c9e4d669179c7eabf691521619d680c188676540..ef56153af7feb709f5fb9b6880fc6177309177d6 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_dygraph_sparse_embedding_gloo.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_dygraph_sparse_embedding_gloo.py @@ -21,31 +21,33 @@ flag_name = os.path.splitext(__file__)[0] class TestParallelDygraphSparseEmdedding_GLOO(TestDistBase): - def _setup_config(self): self._sync_mode = False self._gloo_mode = True self._dygraph = True def test_sparse_embedding(self): - self.check_with_place("parallel_dygraph_sparse_embedding.py", - delta=1e-5, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "parallel_dygraph_sparse_embedding.py", + delta=1e-5, + check_error_log=True, + log_name=flag_name, + ) class TestParallelDygraphSparseEmdeddingFP64_GLOO(TestDistBase): - def _setup_config(self): self._sync_mode = False self._gloo_mode = True self._dygraph = True def test_sparse_embedding_fp64(self): - self.check_with_place("parallel_dygraph_sparse_embedding_fp64.py", - delta=1e-5, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "parallel_dygraph_sparse_embedding_fp64.py", + delta=1e-5, + check_error_log=True, + log_name=flag_name, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_parallel_dygraph_sparse_embedding_over_height_gloo.py b/python/paddle/fluid/tests/unittests/test_parallel_dygraph_sparse_embedding_over_height_gloo.py index 76f2d3ecaa88ff58f81899f484bef0c847f41324..c56f63893aaec4da5b0070e036d15adf19cfe475 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_dygraph_sparse_embedding_over_height_gloo.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_dygraph_sparse_embedding_over_height_gloo.py @@ -21,7 +21,6 @@ flag_name = os.path.splitext(__file__)[0] class TestParallelDygraphSparseEmdeddingOverHeight_GLOO(TestDistBase): - def _setup_config(self): self._sync_mode = False self._gloo_mode = True @@ -32,7 +31,8 @@ class TestParallelDygraphSparseEmdeddingOverHeight_GLOO(TestDistBase): "parallel_dygraph_sparse_embedding_over_height.py", delta=1e-7, check_error_log=True, - log_name=flag_name) + log_name=flag_name, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_parallel_dygraph_transformer_gloo.py b/python/paddle/fluid/tests/unittests/test_parallel_dygraph_transformer_gloo.py index 6089ef5d8292fae55d673cad279fb984fa9ff0b0..1af28e65cd7edb5ac2041e11761b51fe15442d2e 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_dygraph_transformer_gloo.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_dygraph_transformer_gloo.py @@ -22,21 +22,21 @@ flag_name = os.path.splitext(__file__)[0] class TestParallelDygraphTransformer_GLOO(TestDistBase): - def _setup_config(self): self._sync_mode = False self._gloo_mode = True self._dygraph = True def test_transformer(self): - self.check_with_place("parallel_dygraph_transformer.py", - delta=1e-5, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "parallel_dygraph_transformer.py", + delta=1e-5, + check_error_log=True, + log_name=flag_name, + ) class TestParallelDygraphTransformerAccGrad_GLOO(TestDistBase): - def _setup_config(self): self._sync_mode = False self._gloo_mode = True @@ -46,10 +46,12 @@ class TestParallelDygraphTransformerAccGrad_GLOO(TestDistBase): def test_transformer(self): if fluid.core.is_compiled_with_cuda(): - self.check_with_place("parallel_dygraph_transformer.py", - delta=1e-5, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "parallel_dygraph_transformer.py", + delta=1e-5, + check_error_log=True, + log_name=flag_name, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_parallel_dygraph_unused_variables_gloo.py b/python/paddle/fluid/tests/unittests/test_parallel_dygraph_unused_variables_gloo.py index 0cadd9a61b7770698a95233069f47e2ec92f1eff..9278ff7280e2790314947f54a1c9d4d0cb97a0bd 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_dygraph_unused_variables_gloo.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_dygraph_unused_variables_gloo.py @@ -21,45 +21,48 @@ flag_name = os.path.splitext(__file__)[0] class TestParallelDygraphUnusedVar_GLOO(TestDistBase): - def _setup_config(self): self._sync_mode = False self._gloo_mode = True self._dygraph = True def test_net(self): - self.check_with_place("parallel_dygraph_unused_variables.py", - delta=1e-5, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "parallel_dygraph_unused_variables.py", + delta=1e-5, + check_error_log=True, + log_name=flag_name, + ) class TestParallelDygraphNoVar_GLOO(TestDistBase): - def _setup_config(self): self._sync_mode = False self._gloo_mode = True self._dygraph = True def test_net(self): - self.check_with_place("parallel_dygraph_none_var.py", - delta=1e-5, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "parallel_dygraph_none_var.py", + delta=1e-5, + check_error_log=True, + log_name=flag_name, + ) class TestParallelDygraphSharedUnusedVariables_GLOO(TestDistBase): - def _setup_config(self): self._sync_mode = False self._gloo_mode = True self._dygraph = True def test_mnist(self): - self.check_with_place("parallel_dygraph_shared_unused_var.py", - delta=1e-5, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "parallel_dygraph_shared_unused_var.py", + delta=1e-5, + check_error_log=True, + log_name=flag_name, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_crf.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_crf.py index 7ef548536a33c5248c1da18179df827a6ec55c2f..7f30c15735ffba69d382365a917680685e0e3425 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_crf.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_crf.py @@ -33,27 +33,42 @@ mix_hidden_lr = 1e-3 embedding_name = 'emb' -def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark, - is_sparse, **ignored): +def db_lstm( + word, + predicate, + ctx_n2, + ctx_n1, + ctx_0, + ctx_p1, + ctx_p2, + mark, + is_sparse, + **ignored +): # 8 features - predicate_embedding = fluid.layers.embedding(input=predicate, - is_sparse=is_sparse, - size=[pred_dict_len, word_dim], - dtype='float32', - param_attr='vemb') - - mark_embedding = fluid.layers.embedding(input=mark, - is_sparse=is_sparse, - size=[mark_dict_len, mark_dim], - dtype='float32') + predicate_embedding = fluid.layers.embedding( + input=predicate, + is_sparse=is_sparse, + size=[pred_dict_len, word_dim], + dtype='float32', + param_attr='vemb', + ) + + mark_embedding = fluid.layers.embedding( + input=mark, + is_sparse=is_sparse, + size=[mark_dict_len, mark_dim], + dtype='float32', + ) word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2] emb_layers = [ - fluid.layers.embedding(size=[word_dict_len, word_dim], - is_sparse=is_sparse, - input=x, - param_attr=fluid.ParamAttr(name=embedding_name, - trainable=False)) + fluid.layers.embedding( + size=[word_dict_len, word_dim], + is_sparse=is_sparse, + input=x, + param_attr=fluid.ParamAttr(name=embedding_name, trainable=False), + ) for x in word_input ] # TODO(zcd): if the parameter is not trainable, the @@ -71,92 +86,98 @@ def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark, hidden_0 = fluid.layers.sums(input=hidden_0_layers) - lstm_0 = fluid.layers.dynamic_lstm(input=hidden_0, - size=hidden_dim, - candidate_activation='relu', - gate_activation='sigmoid', - cell_activation='sigmoid') + lstm_0 = fluid.layers.dynamic_lstm( + input=hidden_0, + size=hidden_dim, + candidate_activation='relu', + gate_activation='sigmoid', + cell_activation='sigmoid', + ) # stack L-LSTM and R-LSTM with direct edges input_tmp = [hidden_0, lstm_0] for i in range(1, depth): - mix_hidden = fluid.layers.sums(input=[ - fluid.layers.fc(input=input_tmp[0], size=hidden_dim, act='tanh'), - fluid.layers.fc(input=input_tmp[1], size=hidden_dim, act='tanh') - ]) - - lstm = fluid.layers.dynamic_lstm(input=mix_hidden, - size=hidden_dim, - candidate_activation='relu', - gate_activation='sigmoid', - cell_activation='sigmoid', - is_reverse=((i % 2) == 1)) + mix_hidden = fluid.layers.sums( + input=[ + fluid.layers.fc( + input=input_tmp[0], size=hidden_dim, act='tanh' + ), + fluid.layers.fc( + input=input_tmp[1], size=hidden_dim, act='tanh' + ), + ] + ) + + lstm = fluid.layers.dynamic_lstm( + input=mix_hidden, + size=hidden_dim, + candidate_activation='relu', + gate_activation='sigmoid', + cell_activation='sigmoid', + is_reverse=((i % 2) == 1), + ) input_tmp = [mix_hidden, lstm] - feature_out = fluid.layers.sums(input=[ - fluid.layers.fc(input=input_tmp[0], size=label_dict_len, act='tanh'), - fluid.layers.fc(input=input_tmp[1], size=label_dict_len, act='tanh') - ]) + feature_out = fluid.layers.sums( + input=[ + fluid.layers.fc( + input=input_tmp[0], size=label_dict_len, act='tanh' + ), + fluid.layers.fc( + input=input_tmp[1], size=label_dict_len, act='tanh' + ), + ] + ) return feature_out class TestCRFModel(unittest.TestCase): - - def check_network_convergence(self, - is_sparse, - build_strategy=None, - use_cuda=True): + def check_network_convergence( + self, is_sparse, build_strategy=None, use_cuda=True + ): os.environ['CPU_NUM'] = str(4) main = fluid.Program() startup = fluid.Program() scope = fluid.Scope() with fluid.scope_guard(scope): with fluid.program_guard(main, startup): - word = fluid.layers.data(name='word_data', - shape=[1], - dtype='int64', - lod_level=1) - predicate = fluid.layers.data(name='verb_data', - shape=[1], - dtype='int64', - lod_level=1) - ctx_n2 = fluid.layers.data(name='ctx_n2_data', - shape=[1], - dtype='int64', - lod_level=1) - ctx_n1 = fluid.layers.data(name='ctx_n1_data', - shape=[1], - dtype='int64', - lod_level=1) - ctx_0 = fluid.layers.data(name='ctx_0_data', - shape=[1], - dtype='int64', - lod_level=1) - ctx_p1 = fluid.layers.data(name='ctx_p1_data', - shape=[1], - dtype='int64', - lod_level=1) - ctx_p2 = fluid.layers.data(name='ctx_p2_data', - shape=[1], - dtype='int64', - lod_level=1) - mark = fluid.layers.data(name='mark_data', - shape=[1], - dtype='int64', - lod_level=1) + word = fluid.layers.data( + name='word_data', shape=[1], dtype='int64', lod_level=1 + ) + predicate = fluid.layers.data( + name='verb_data', shape=[1], dtype='int64', lod_level=1 + ) + ctx_n2 = fluid.layers.data( + name='ctx_n2_data', shape=[1], dtype='int64', lod_level=1 + ) + ctx_n1 = fluid.layers.data( + name='ctx_n1_data', shape=[1], dtype='int64', lod_level=1 + ) + ctx_0 = fluid.layers.data( + name='ctx_0_data', shape=[1], dtype='int64', lod_level=1 + ) + ctx_p1 = fluid.layers.data( + name='ctx_p1_data', shape=[1], dtype='int64', lod_level=1 + ) + ctx_p2 = fluid.layers.data( + name='ctx_p2_data', shape=[1], dtype='int64', lod_level=1 + ) + mark = fluid.layers.data( + name='mark_data', shape=[1], dtype='int64', lod_level=1 + ) feature_out = db_lstm(**locals()) - target = fluid.layers.data(name='target', - shape=[1], - dtype='int64', - lod_level=1) + target = fluid.layers.data( + name='target', shape=[1], dtype='int64', lod_level=1 + ) crf_cost = fluid.layers.linear_chain_crf( input=feature_out, label=target, - param_attr=fluid.ParamAttr(name='crfw', learning_rate=1e-1)) + param_attr=fluid.ParamAttr(name='crfw', learning_rate=1e-1), + ) avg_cost = paddle.mean(crf_cost) sgd_optimizer = fluid.optimizer.SGD( @@ -164,41 +185,63 @@ class TestCRFModel(unittest.TestCase): learning_rate=0.01, decay_steps=100000, decay_rate=0.5, - staircase=True)) + staircase=True, + ) + ) sgd_optimizer.minimize(avg_cost) - train_data = paddle.batch(paddle.reader.shuffle( - paddle.dataset.conll05.test(), buf_size=8192), - batch_size=8) + train_data = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.conll05.test(), buf_size=8192 + ), + batch_size=8, + ) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) exe.run(startup) train_cp = compiler.CompiledProgram(main).with_data_parallel( - loss_name=avg_cost.name, build_strategy=build_strategy) - - feeder = fluid.DataFeeder(feed_list=[ - word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, predicate, - mark, target - ], - place=fluid.CPUPlace()) + loss_name=avg_cost.name, build_strategy=build_strategy + ) + + feeder = fluid.DataFeeder( + feed_list=[ + word, + ctx_n2, + ctx_n1, + ctx_0, + ctx_p1, + ctx_p2, + predicate, + mark, + target, + ], + place=fluid.CPUPlace(), + ) data = train_data() for i in range(4): cur_batch = next(data) print( - exe.run(train_cp, - feed=feeder.feed(cur_batch), - fetch_list=[avg_cost.name])[0]) + exe.run( + train_cp, + feed=feeder.feed(cur_batch), + fetch_list=[avg_cost.name], + )[0] + ) def _new_build_strategy(self, use_reduce=False): build_strategy = fluid.BuildStrategy() if use_reduce: - build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce + build_strategy.reduce_strategy = ( + fluid.BuildStrategy.ReduceStrategy.Reduce + ) else: - build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce + build_strategy.reduce_strategy = ( + fluid.BuildStrategy.ReduceStrategy.AllReduce + ) return build_strategy @@ -207,46 +250,54 @@ class TestCRFModel(unittest.TestCase): self.check_network_convergence( is_sparse=True, build_strategy=self._new_build_strategy(), - use_cuda=True) + use_cuda=True, + ) self.check_network_convergence( is_sparse=True, build_strategy=self._new_build_strategy(), - use_cuda=False) + use_cuda=False, + ) def test_update_dense_parameter_all_reduce(self): if core.is_compiled_with_cuda(): self.check_network_convergence( is_sparse=False, build_strategy=self._new_build_strategy(), - use_cuda=True) + use_cuda=True, + ) self.check_network_convergence( is_sparse=False, build_strategy=self._new_build_strategy(), - use_cuda=False) + use_cuda=False, + ) def test_update_sparse_parameter_reduce(self): if core.is_compiled_with_cuda(): self.check_network_convergence( is_sparse=True, build_strategy=self._new_build_strategy(use_reduce=True), - use_cuda=True) + use_cuda=True, + ) self.check_network_convergence( is_sparse=True, build_strategy=self._new_build_strategy(use_reduce=True), - use_cuda=False) + use_cuda=False, + ) def test_update_dense_parameter_reduce(self): if core.is_compiled_with_cuda(): self.check_network_convergence( is_sparse=False, build_strategy=self._new_build_strategy(use_reduce=True), - use_cuda=True) + use_cuda=True, + ) self.check_network_convergence( is_sparse=False, build_strategy=self._new_build_strategy(use_reduce=True), - use_cuda=False) + use_cuda=False, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_drop_scope.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_drop_scope.py index 6ff24c19f46eed3a7a1383eb4eccd9124d5502af..9c8d342993739d4c07ec3bc033aa685f280575de 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_drop_scope.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_drop_scope.py @@ -20,7 +20,6 @@ import os class TestParallelExecutorDropExeScope(unittest.TestCase): - def check_drop_scope(self, use_cuda=True): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() @@ -42,14 +41,18 @@ class TestParallelExecutorDropExeScope(unittest.TestCase): exec_strateg = fluid.ExecutionStrategy() exec_strateg.num_iteration_per_drop_scope = 10 - train_exe = fluid.ParallelExecutor(use_cuda=use_cuda, - main_program=train_program, - loss_name=loss.name, - exec_strategy=exec_strateg) - test_exe = fluid.ParallelExecutor(use_cuda=use_cuda, - main_program=test_program, - share_vars_from=train_exe, - exec_strategy=exec_strateg) + train_exe = fluid.ParallelExecutor( + use_cuda=use_cuda, + main_program=train_program, + loss_name=loss.name, + exec_strategy=exec_strateg, + ) + test_exe = fluid.ParallelExecutor( + use_cuda=use_cuda, + main_program=test_program, + share_vars_from=train_exe, + exec_strategy=exec_strateg, + ) x = numpy.random.random(size=(10, 1)).astype('float32') train_exe.run(feed={"X": x}, fetch_list=[loss.name]) diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_dry_run.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_dry_run.py index d2feddffc85da5e3a5bd7e7a7e76ab33fe708a28..51b234c3719141ea63b2ad21e92675a35272638a 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_dry_run.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_dry_run.py @@ -23,16 +23,18 @@ os.environ['CPU_NUM'] = str(4) class TestBase(unittest.TestCase): - - def main(self, - network_func, - iter=10, - iter_per_pe=10, - use_gpu=True, - use_experimental_executor=False): + def main( + self, + network_func, + iter=10, + iter_per_pe=10, + use_gpu=True, + use_experimental_executor=False, + ): if use_gpu and not fluid.core.is_compiled_with_cuda(): logging.warning( - "Paddle is not compiled with CUDA, skip GPU unittests") + "Paddle is not compiled with CUDA, skip GPU unittests" + ) return main_prog = fluid.Program() @@ -42,28 +44,34 @@ class TestBase(unittest.TestCase): with fluid.scope_guard(scope): loss = network_func() exe = fluid.Executor( - fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()) + fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() + ) exe.run(startup_prog) exe_strategy = fluid.ExecutionStrategy() exe_strategy._dry_run = True - exe_strategy.use_experimental_executor = use_experimental_executor + exe_strategy.use_experimental_executor = ( + use_experimental_executor + ) train_cp = compiler.CompiledProgram( - main_prog).with_data_parallel(loss_name=loss.name, - exec_strategy=exe_strategy) + main_prog + ).with_data_parallel( + loss_name=loss.name, exec_strategy=exe_strategy + ) for _ in range(iter): for _ in range(iter_per_pe): exe.run(train_cp) class TestMNISTDryRun(TestBase): - def test_mnist_dry_run(self): for use_gpu in (False, True): for use_experimental_executor in (False, True): - self.main(network_func=TestMNISTDryRun.network_func, - use_gpu=use_gpu, - use_experimental_executor=use_experimental_executor) + self.main( + network_func=TestMNISTDryRun.network_func, + use_gpu=use_gpu, + use_experimental_executor=use_experimental_executor, + ) @staticmethod def network_func(): diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_feed_persistable_var.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_feed_persistable_var.py index 622095b85a7ce3e3882745b71cf2cd4758b8ad63..4b5781921e2961db2e12f0fb4616e1235b60aa90 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_feed_persistable_var.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_feed_persistable_var.py @@ -23,26 +23,27 @@ import os class TestFeedPersistableVar(unittest.TestCase): - @classmethod def setUpClass(cls): os.environ['CPU_NUM'] = str(4) batch_size = 4 - cls.img, cls.label = init_data(batch_size, - img_shape=[784], - label_range=9) + cls.img, cls.label = init_data( + batch_size, img_shape=[784], label_range=9 + ) cls.feed_dict = { 'image': cls.img, 'label': cls.label, - 'learning_rate': numpy.array([1.0]).astype("float32") + 'learning_rate': numpy.array([1.0]).astype("float32"), } def optimizer(self): - learning_rate = fluid.layers.create_global_var(name="learning_rate", - shape=[1], - value=1.0, - dtype='float32', - persistable=True) + learning_rate = fluid.layers.create_global_var( + name="learning_rate", + shape=[1], + value=1.0, + dtype='float32', + persistable=True, + ) optimizer = fluid.optimizer.SGD(learning_rate=learning_rate) return optimizer @@ -62,7 +63,8 @@ class TestFeedPersistableVar(unittest.TestCase): exe.run(program=startup) compiled_prog = fluid.compiler.CompiledProgram( - main).with_data_parallel(loss_name=loss.name) + main + ).with_data_parallel(loss_name=loss.name) exe.run(program=compiled_prog, feed=feed_dict) @@ -70,12 +72,14 @@ class TestFeedPersistableVar(unittest.TestCase): self.check_feed_persistable_var(self.feed_dict) self.check_feed_persistable_var(self.feed_dict, use_cuda=True) - self.feed_dict['learning_rate'] = numpy.array([1.0, - 1.0]).astype("float32") + self.feed_dict['learning_rate'] = numpy.array([1.0, 1.0]).astype( + "float32" + ) self.check_feed_persistable_var(self.feed_dict, use_cuda=True) - self.feed_dict['learning_rate'] = numpy.array([1.0, - 1.0]).astype("float32") + self.feed_dict['learning_rate'] = numpy.array([1.0, 1.0]).astype( + "float32" + ) run = partial(self.check_feed_persistable_var, self.feed_dict) self.assertRaises(RuntimeError, run) diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py index 3695de024f4c65e6387d53ec68df31f6bc39486e..950ff45e86f39bacbc85b78dc2c3efa5aecb77ee 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py @@ -37,24 +37,25 @@ def Lenet(data, class_dim): class TestFetchAndFeed(unittest.TestCase): - @classmethod def setUpClass(cls): os.environ['CPU_NUM'] = str(4) - def parallel_exe(self, - use_cuda, - run_parallel_exe, - use_faster_executor=False, - num_threads=4, - seed=1): + def parallel_exe( + self, + use_cuda, + run_parallel_exe, + use_faster_executor=False, + num_threads=4, + seed=1, + ): main_program = fluid.Program() startup = fluid.Program() startup.random_seed = seed with fluid.program_guard(main_program, startup): - data = fluid.layers.data(name='image', - shape=[3, 224, 224], - dtype='float32') + data = fluid.layers.data( + name='image', shape=[3, 224, 224], dtype='float32' + ) label = fluid.layers.data(name='label', shape=[1], dtype='int64') out = Lenet(data, class_dim=102) loss = fluid.layers.cross_entropy(input=out, label=label) @@ -62,14 +63,15 @@ class TestFetchAndFeed(unittest.TestCase): opt = fluid.optimizer.Momentum( learning_rate=0.1, momentum=0.9, - regularization=fluid.regularizer.L2Decay(1e-4)) + regularization=fluid.regularizer.L2Decay(1e-4), + ) opt.minimize(loss) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) exe.run(startup) - #FIXME force disable enable_inplace and memory_optimize to pass the unittest + # FIXME force disable enable_inplace and memory_optimize to pass the unittest build_strategy = fluid.BuildStrategy() build_strategy.enable_inplace = False build_strategy.memory_optimize = False @@ -79,56 +81,66 @@ class TestFetchAndFeed(unittest.TestCase): train_cp = compiler.CompiledProgram(main_program).with_data_parallel( loss_name=loss.name, build_strategy=build_strategy, - exec_strategy=exec_strategy) + exec_strategy=exec_strategy, + ) run_parallel_exe(train_cp, exe, use_cuda, data, label, loss) - def run_parallel_exe_with_fetch(self, compiled_program, exe, use_cuda, data, - label, loss): - + def run_parallel_exe_with_fetch( + self, compiled_program, exe, use_cuda, data, label, loss + ): def get_data(batch_size=8): np.random.seed(5) while True: img = np.random.random(size=[batch_size, 3, 224, 224]).astype( - np.float32) + np.float32 + ) l = (np.random.random(size=[batch_size, 1]) * 10).astype( - np.int64) + np.int64 + ) yield img, l fetch_list = [] all_vars = compiled_program._program.global_block().vars for k, v in all_vars.items(): - if ('tmp' not in k) and ( - k[0] != '_' or v.persistable - ) and v.type == core.VarDesc.VarType.LOD_TENSOR: + if ( + ('tmp' not in k) + and (k[0] != '_' or v.persistable) + and v.type == core.VarDesc.VarType.LOD_TENSOR + ): fetch_list.append(k) for batch_id, img_label in enumerate(get_data()): img, l = img_label train_inputs = {data.name: img, label.name: l} - ret = exe.run(compiled_program, - fetch_list=fetch_list, - feed=train_inputs, - return_numpy=True) + ret = exe.run( + compiled_program, + fetch_list=fetch_list, + feed=train_inputs, + return_numpy=True, + ) for i in range(len(fetch_list)): - assert not math.isnan(np.sum(ret[i])) and \ - not math.isinf(np.sum(ret[i])) + assert not math.isnan(np.sum(ret[i])) and not math.isinf( + np.sum(ret[i]) + ) if batch_id == 2: break - def run_parallel_exe_with_feed(self, compiled_program, exe, use_cuda, data, - label, loss): - + def run_parallel_exe_with_feed( + self, compiled_program, exe, use_cuda, data, label, loss + ): def get_data(batch_size=8): np.random.seed(5) while True: train_data = [] for _ in range(batch_size): img = np.random.random(size=[1, 3, 224, 224]).astype( - np.float32) + np.float32 + ) label = (np.random.random(size=[1, 1]) * 10).astype( - np.int64) + np.int64 + ) train_data.append([img, label]) yield train_data @@ -137,37 +149,45 @@ class TestFetchAndFeed(unittest.TestCase): reader = feeder.decorate_reader(get_data, multi_devices=True) for batch_id, data in enumerate(reader()): - loss_np = exe.run(compiled_program, - feed=data, - fetch_list=[loss.name])[0] + loss_np = exe.run( + compiled_program, feed=data, fetch_list=[loss.name] + )[0] print(batch_id, loss_np) if batch_id == 2: break def check_executor(self, use_faster_executor=False, num_threads=4): if core.is_compiled_with_cuda(): - self.parallel_exe(use_cuda=True, - run_parallel_exe=self.run_parallel_exe_with_fetch, - use_faster_executor=use_faster_executor, - num_threads=num_threads) - self.parallel_exe(use_cuda=False, - run_parallel_exe=self.run_parallel_exe_with_fetch, - use_faster_executor=use_faster_executor, - num_threads=num_threads) + self.parallel_exe( + use_cuda=True, + run_parallel_exe=self.run_parallel_exe_with_fetch, + use_faster_executor=use_faster_executor, + num_threads=num_threads, + ) + self.parallel_exe( + use_cuda=False, + run_parallel_exe=self.run_parallel_exe_with_fetch, + use_faster_executor=use_faster_executor, + num_threads=num_threads, + ) def test_fetch(self): for use_faster_executor in {True, False}: - self.check_executor(use_faster_executor=use_faster_executor, - num_threads=4) - self.check_executor(use_faster_executor=use_faster_executor, - num_threads=1) + self.check_executor( + use_faster_executor=use_faster_executor, num_threads=4 + ) + self.check_executor( + use_faster_executor=use_faster_executor, num_threads=1 + ) def test_feed(self): if core.is_compiled_with_cuda(): - self.parallel_exe(use_cuda=True, - run_parallel_exe=self.run_parallel_exe_with_feed) - self.parallel_exe(use_cuda=False, - run_parallel_exe=self.run_parallel_exe_with_feed) + self.parallel_exe( + use_cuda=True, run_parallel_exe=self.run_parallel_exe_with_feed + ) + self.parallel_exe( + use_cuda=False, run_parallel_exe=self.run_parallel_exe_with_feed + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_isolated_var.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_isolated_var.py index 3d2146ab36baac02f67382556bfdc75d7f2eaf8b..9cfd502b3a376fe892d3039e6121d9e5fbd113be 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_isolated_var.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_isolated_var.py @@ -25,7 +25,6 @@ def enable_parallel_ssa_executor(enabled=True): class TestParallelExecutorFetchIsolatedVarBase(unittest.TestCase): - def build_network(self, is_training): x = fluid.data(name='x', shape=[-1, 10], dtype='float32') y = fluid.data(name='y', shape=[-1, 10], dtype='float32') @@ -55,19 +54,30 @@ class TestParallelExecutorFetchIsolatedVarBase(unittest.TestCase): for use_experimental_executor in [False, True]: for use_parallel_ssa_executor in [False, True]: func = lambda: self.run_impl( - use_gpu, dev_cnt, is_training, + use_gpu, + dev_cnt, + is_training, use_experimental_executor, - use_parallel_ssa_executor) + use_parallel_ssa_executor, + ) self.run_func_with_guard(func) - def run_impl(self, use_gpu, dev_cnt, is_training, use_experimental_executor, - use_parallel_ssa_executor): + def run_impl( + self, + use_gpu, + dev_cnt, + is_training, + use_experimental_executor, + use_parallel_ssa_executor, + ): paddle.enable_static() enable_parallel_ssa_executor(use_parallel_ssa_executor) if fluid.is_compiled_with_cuda(): - if fluid.core.globals( - )['FLAGS_enable_parallel_graph'] and not use_gpu: + if ( + fluid.core.globals()['FLAGS_enable_parallel_graph'] + and not use_gpu + ): return # windows has only 1 GPU if use_gpu and dev_cnt > 1 and os.name == "nt": @@ -85,22 +95,23 @@ class TestParallelExecutorFetchIsolatedVarBase(unittest.TestCase): exe.run(fluid.default_startup_program()) prog = fluid.CompiledProgram( - fluid.default_main_program()).with_data_parallel( - loss_name=loss_name, - exec_strategy=self.exec_strategy(use_experimental_executor), - places=places) + fluid.default_main_program() + ).with_data_parallel( + loss_name=loss_name, + exec_strategy=self.exec_strategy(use_experimental_executor), + places=places, + ) BATCH_SIZE = 8 * dev_cnt for _ in range(10): x_np = np.random.random(size=[BATCH_SIZE, 10]).astype('float32') y_np = np.random.random(size=[BATCH_SIZE, 10]).astype('float32') - _, y_np_fetch = exe.run(prog, - feed={ - 'x': x_np, - 'y': y_np - }, - fetch_list=[loss, isolated_var]) + _, y_np_fetch = exe.run( + prog, + feed={'x': x_np, 'y': y_np}, + fetch_list=[loss, isolated_var], + ) np.testing.assert_array_equal(y_np, y_np_fetch) diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_fix_op_run_order.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_fix_op_run_order.py index 80da6b5ac61c8e3c50f57b62f8acfab3afc60ee5..4dcf4673a09efd8c68b0a99b6aac4285e5ba138b 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_fix_op_run_order.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_fix_op_run_order.py @@ -21,7 +21,6 @@ from paddle.nn import CrossEntropyLoss class TestFixOpRunOrder(unittest.TestCase): - def setUp(self): paddle.enable_static() paddle.seed(1) @@ -30,8 +29,11 @@ class TestFixOpRunOrder(unittest.TestCase): fluid.set_flags({'FLAGS_cudnn_deterministic': 1}) def get_place(self): - return paddle.CUDAPlace( - 0) if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + return ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() + else paddle.CPUPlace() + ) def get_feed(self): batch_size = 4 @@ -44,12 +46,12 @@ class TestFixOpRunOrder(unittest.TestCase): startup_prog = paddle.static.Program() scope = paddle.static.Scope() with paddle.static.program_guard(main_prog, startup_prog): - image = paddle.static.data(name="image", - shape=[None, 3, 224, 224], - dtype="float32") - label = paddle.static.data(name="label", - shape=[None, 1], - dtype="int64") + image = paddle.static.data( + name="image", shape=[None, 3, 224, 224], dtype="float32" + ) + label = paddle.static.data( + name="label", shape=[None, 1], dtype="int64" + ) model = resnet18() pred = model(image) loss_fn = CrossEntropyLoss() @@ -64,7 +66,8 @@ class TestFixOpRunOrder(unittest.TestCase): main_prog = paddle.static.CompiledProgram(main_prog).with_data_parallel( loss_name=loss.name, build_strategy=build_strategy, - places=[self.get_place()]) + places=[self.get_place()], + ) exe = paddle.static.Executor(self.get_place()) with paddle.static.scope_guard(scope): diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_inference_feed_partial_data.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_inference_feed_partial_data.py index 926201e6eaaf4587a7baf897a63a36c51c653555..37ccb429b6e9686554201e32472631dcffb774ab 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_inference_feed_partial_data.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_inference_feed_partial_data.py @@ -18,7 +18,6 @@ import unittest class TestInferencePartialFeed(unittest.TestCase): - def setUp(self): self.iterations = 10 self.size = 10 @@ -44,12 +43,15 @@ class TestInferencePartialFeed(unittest.TestCase): exe.run(startup_prog) prog = fluid.CompiledProgram(main_prog).with_data_parallel( - places=places) + places=places + ) gen_random = lambda shape: np.random.uniform( - low=-1.0, high=1.0, size=shape).astype('float32') + low=-1.0, high=1.0, size=shape + ).astype('float32') assert_result = lambda feed, result: np.testing.assert_array_equal( - np.maximum(0, feed), result) + np.maximum(0, feed), result + ) def assert_merged_unmerged(merged, unmerged): unmerged = np.concatenate(unmerged, axis=0) @@ -68,10 +70,16 @@ class TestInferencePartialFeed(unittest.TestCase): fetch_list = [relu_x, relu_y, relu_lr] relu_x_np, relu_y_np, relu_lr_np = exe.run( - prog, feed=feed, fetch_list=fetch_list, return_merged=True) + prog, feed=feed, fetch_list=fetch_list, return_merged=True + ) - relu_x_np_unmerged, relu_y_np_unmerged, relu_lr_np_unmerged = exe.run( - prog, feed=feed, fetch_list=fetch_list, return_merged=False) + ( + relu_x_np_unmerged, + relu_y_np_unmerged, + relu_lr_np_unmerged, + ) = exe.run( + prog, feed=feed, fetch_list=fetch_list, return_merged=False + ) assert_merged_unmerged(relu_x_np, relu_x_np_unmerged) assert_merged_unmerged(relu_y_np, relu_y_np_unmerged) @@ -99,24 +107,28 @@ class TestInferencePartialFeed(unittest.TestCase): y_np_list.append(y_np) lr_np_list.append(lr_np) - feed_list.append({ - x.name: x_np, - y.name: y_np, - lr.name: lr_np - }) + feed_list.append( + {x.name: x_np, y.name: y_np, lr.name: lr_np} + ) fetch_list = [relu_x, relu_y, relu_lr] relu_x_np, relu_y_np, relu_lr_np = exe.run( prog, feed=feed_list, fetch_list=fetch_list, - return_merged=True) - - relu_x_np_unmerged, relu_y_np_unmerged, relu_lr_np_unmerged = exe.run( + return_merged=True, + ) + + ( + relu_x_np_unmerged, + relu_y_np_unmerged, + relu_lr_np_unmerged, + ) = exe.run( prog, feed=feed_list, fetch_list=fetch_list, - return_merged=False) + return_merged=False, + ) assert_merged_unmerged(relu_x_np, relu_x_np_unmerged) assert_merged_unmerged(relu_y_np, relu_y_np_unmerged) @@ -144,20 +156,18 @@ class TestInferencePartialFeed(unittest.TestCase): for p in places: for has_persistable in [False, True]: for use_split in [False, True]: - self.run_network(p, - use_split=use_split, - has_persistable=has_persistable) + self.run_network( + p, use_split=use_split, has_persistable=has_persistable + ) class TestInferencePartialFeedUsingDataLoader(unittest.TestCase): - def setUp(self): self.epoch_num = 3 self.batch_num = 101 # a prime number self.batch_size = 32 def create_reader(self): - def __impl__(): for _ in range(self.batch_num): yield np.random.random([self.batch_size, 1]).astype('float32'), @@ -167,10 +177,9 @@ class TestInferencePartialFeedUsingDataLoader(unittest.TestCase): def run_network(self, iterable, use_cuda, drop_last): x = fluid.data(shape=[None, 1], name='x', dtype='float32') places = fluid.cuda_places() if use_cuda else fluid.cpu_places(4) - loader = fluid.io.DataLoader.from_generator(feed_list=[x], - capacity=16, - iterable=iterable, - drop_last=drop_last) + loader = fluid.io.DataLoader.from_generator( + feed_list=[x], capacity=16, iterable=iterable, drop_last=drop_last + ) y = fluid.layers.fc(x, size=10) loss = fluid.layers.reduce_mean(y) @@ -178,17 +187,18 @@ class TestInferencePartialFeedUsingDataLoader(unittest.TestCase): exe.run(fluid.default_startup_program()) prog = fluid.CompiledProgram( - fluid.default_main_program()).with_data_parallel( - places=places, loss_name=loss.name) + fluid.default_main_program() + ).with_data_parallel(places=places, loss_name=loss.name) - loader.set_batch_generator(self.create_reader(), - places=places if iterable else None) + loader.set_batch_generator( + self.create_reader(), places=places if iterable else None + ) for _ in range(self.epoch_num): actual_batch_num = 0 if loader.iterable: for feed_data in loader(): - x_data, = exe.run(prog, feed=feed_data, fetch_list=[x]) + (x_data,) = exe.run(prog, feed=feed_data, fetch_list=[x]) self.assertEqual(x_data.shape[0] % self.batch_size, 0) self.assertTrue(x_data.shape[0] != 0) actual_batch_num += int(x_data.shape[0] / self.batch_size) @@ -196,11 +206,12 @@ class TestInferencePartialFeedUsingDataLoader(unittest.TestCase): loader.start() try: while True: - x_data, = exe.run(prog, fetch_list=[x]) + (x_data,) = exe.run(prog, fetch_list=[x]) self.assertEqual(x_data.shape[0] % self.batch_size, 0) self.assertTrue(x_data.shape[0] != 0) - actual_batch_num += int(x_data.shape[0] / - self.batch_size) + actual_batch_num += int( + x_data.shape[0] / self.batch_size + ) except fluid.core.EOFException: loader.reset() @@ -210,8 +221,9 @@ class TestInferencePartialFeedUsingDataLoader(unittest.TestCase): self.assertGreater(self.batch_num, actual_batch_num) def test_main(self): - use_cuda_list = [False, True - ] if fluid.is_compiled_with_cuda() else [False] + use_cuda_list = ( + [False, True] if fluid.is_compiled_with_cuda() else [False] + ) iterable_list = [False, True] drop_last_list = [False, True] for iterable in iterable_list: diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py index b4a69e89e4bc46d2cf738b855efbf0fe8aa5be39..1341ccaad31af972a65a0a6d63afb7b7257f58f4 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py @@ -32,8 +32,10 @@ def simple_fc_net(use_feed): hidden, size=200, act='tanh', - bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=1.0))) + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=1.0) + ), + ) prediction = fluid.layers.fc(hidden, size=10, act='softmax') loss = fluid.layers.cross_entropy(input=prediction, label=label) loss = paddle.mean(loss) @@ -52,7 +54,9 @@ def fc_with_batchnorm(use_feed): size=200, act='tanh', bias_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(value=1.0))) + initializer=fluid.initializer.Constant(value=1.0) + ), + ) hidden = fluid.layers.batch_norm(input=hidden) with fluid.name_scope("fc_layer"): @@ -71,16 +75,13 @@ def init_data(): class TestMNIST(TestParallelExecutorBase): - @classmethod def setUpClass(cls): os.environ['CPU_NUM'] = str(4) - def _compare_reduce_and_allreduce(self, - model, - use_device, - delta1=1e-6, - delta2=1e-4): + def _compare_reduce_and_allreduce( + self, model, use_device, delta1=1e-6, delta2=1e-4 + ): if use_device == DeviceType.CUDA and not core.is_compiled_with_cuda(): return @@ -89,23 +90,23 @@ class TestMNIST(TestParallelExecutorBase): img, label = init_data() - all_reduce_first_loss, all_reduce_last_loss, _ = self.check_network_convergence( + ( + all_reduce_first_loss, + all_reduce_last_loss, + _, + ) = self.check_network_convergence( model, - feed_dict={ - "image": img, - "label": label - }, + feed_dict={"image": img, "label": label}, use_device=use_device, - use_reduce=False) + use_reduce=False, + ) reduce_first_loss, reduce_last_loss, _ = self.check_network_convergence( model, - feed_dict={ - "image": img, - "label": label - }, + feed_dict={"image": img, "label": label}, use_device=use_device, - use_reduce=True) + use_reduce=True, + ) for loss in zip(all_reduce_first_loss, reduce_first_loss): self.assertAlmostEqual(loss[0], loss[1], delta=delta1) @@ -122,13 +123,12 @@ class TestMNIST(TestParallelExecutorBase): img, label = init_data() - self.check_network_convergence(simple_fc_net, - feed_dict={ - "image": img, - "label": label - }, - use_device=use_device, - use_reduce=use_reduce) + self.check_network_convergence( + simple_fc_net, + feed_dict={"image": img, "label": label}, + use_device=use_device, + use_reduce=use_reduce, + ) def test_simple_fc(self): # use_device @@ -140,10 +140,12 @@ class TestMNIST(TestParallelExecutorBase): # use_device, use_reduce # NOTE: the computation result of nccl_reduce is non-deterministic, # related issue: https://github.com/NVIDIA/nccl/issues/157 - self._compare_reduce_and_allreduce(simple_fc_net, DeviceType.CUDA, 1e-5, - 1e-2) - self._compare_reduce_and_allreduce(simple_fc_net, DeviceType.CPU, 1e-5, - 1e-2) + self._compare_reduce_and_allreduce( + simple_fc_net, DeviceType.CUDA, 1e-5, 1e-2 + ) + self._compare_reduce_and_allreduce( + simple_fc_net, DeviceType.CPU, 1e-5, 1e-2 + ) def check_simple_fc_parallel_accuracy(self, use_device): if use_device == DeviceType.CUDA and not core.is_compiled_with_cuda(): @@ -153,29 +155,29 @@ class TestMNIST(TestParallelExecutorBase): single_first_loss, single_last_loss, _ = self.check_network_convergence( method=simple_fc_net, - feed_dict={ - "image": img, - "label": label - }, + feed_dict={"image": img, "label": label}, use_device=use_device, - use_parallel_executor=False) - parallel_first_loss, parallel_last_loss, _ = self.check_network_convergence( + use_parallel_executor=False, + ) + ( + parallel_first_loss, + parallel_last_loss, + _, + ) = self.check_network_convergence( method=simple_fc_net, - feed_dict={ - "image": img, - "label": label - }, + feed_dict={"image": img, "label": label}, use_device=use_device, - use_parallel_executor=True) + use_parallel_executor=True, + ) self.assertAlmostEquals( np.mean(parallel_first_loss), single_first_loss, delta=1e-6, ) - self.assertAlmostEquals(np.mean(parallel_last_loss), - single_last_loss, - delta=1e-6) + self.assertAlmostEquals( + np.mean(parallel_last_loss), single_last_loss, delta=1e-6 + ) def test_simple_fc_parallel_accuracy(self): self.check_simple_fc_parallel_accuracy(DeviceType.CUDA) @@ -188,31 +190,32 @@ class TestMNIST(TestParallelExecutorBase): return img, label = init_data() - self.check_network_convergence(fc_with_batchnorm, - feed_dict={ - "image": img, - "label": label - }, - use_device=use_device, - use_fast_executor=use_fast_executor) + self.check_network_convergence( + fc_with_batchnorm, + feed_dict={"image": img, "label": label}, + use_device=use_device, + use_fast_executor=use_fast_executor, + ) def test_batchnorm_fc(self): for use_device in (DeviceType.CPU, DeviceType.CUDA): for use_fast_executor in (False, True): - self.check_batchnorm_fc_convergence(use_device, - use_fast_executor) + self.check_batchnorm_fc_convergence( + use_device, use_fast_executor + ) def test_batchnorm_fc_with_new_strategy(self): # NOTE: the computation result of nccl_reduce is non-deterministic, # related issue: https://github.com/NVIDIA/nccl/issues/157 - self._compare_reduce_and_allreduce(fc_with_batchnorm, DeviceType.CUDA, - 1e-5, 1e-2) - self._compare_reduce_and_allreduce(fc_with_batchnorm, DeviceType.CPU, - 1e-5, 1e-2) + self._compare_reduce_and_allreduce( + fc_with_batchnorm, DeviceType.CUDA, 1e-5, 1e-2 + ) + self._compare_reduce_and_allreduce( + fc_with_batchnorm, DeviceType.CPU, 1e-5, 1e-2 + ) class TestMNISTNoReduce(unittest.TestCase): - def run_program(self, device_type): if device_type == DeviceType.CUDA: if not paddle.is_compiled_with_cuda(): @@ -237,16 +240,18 @@ class TestMNISTNoReduce(unittest.TestCase): build_strategy = paddle.static.BuildStrategy() build_strategy.reduce_strategy = no_reduce main_multi_place = paddle.static.CompiledProgram( - main).with_data_parallel(loss_name=loss.name, - build_strategy=build_strategy, - places=places) + main + ).with_data_parallel( + loss_name=loss.name, build_strategy=build_strategy, places=places + ) build_strategy = paddle.static.BuildStrategy() build_strategy.reduce_strategy = no_reduce main_single_place = paddle.static.CompiledProgram( - main.clone()).with_data_parallel(loss_name=loss.name, - build_strategy=build_strategy, - places=places[0]) + main.clone() + ).with_data_parallel( + loss_name=loss.name, build_strategy=build_strategy, places=places[0] + ) image, label = init_data() feed = {'image': image, 'label': label} @@ -254,9 +259,9 @@ class TestMNISTNoReduce(unittest.TestCase): scope = paddle.static.Scope() with paddle.static.scope_guard(scope): exe.run(startup) - grads_multi_place = exe.run(main_multi_place, - feed=feed, - fetch_list=[grads]) + grads_multi_place = exe.run( + main_multi_place, feed=feed, fetch_list=[grads] + ) feeds = self.split_feed(feed, len(places)) grads_single_place = [list() for _ in range(len(grads))] @@ -266,8 +271,9 @@ class TestMNISTNoReduce(unittest.TestCase): grads_single_place[i].append(g) for i in range(len(grads)): - grads_single_place[i] = np.concatenate(grads_single_place[i], - axis=0) / len(places) + grads_single_place[i] = np.concatenate( + grads_single_place[i], axis=0 + ) / len(places) self.assertEqual(len(grads_multi_place), len(grads_single_place)) for g1, g2 in zip(grads_multi_place, grads_single_place): diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_pg.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_pg.py index e312cdd125dcb13b321792c86c4fb8bfe563be94..8813c962f8b6d0d12820b11168cc2a9572213e3d 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_pg.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_pg.py @@ -25,7 +25,6 @@ from simple_nets import simple_fc_net, init_data class TestMNIST(TestParallelExecutorBase): - @classmethod def setUpClass(cls): os.environ['CPU_NUM'] = str(4) @@ -36,13 +35,12 @@ class TestMNIST(TestParallelExecutorBase): return img, label = init_data() - self.check_network_convergence(simple_fc_net, - feed_dict={ - "image": img, - "label": label - }, - use_device=use_device, - use_reduce=use_reduce) + self.check_network_convergence( + simple_fc_net, + feed_dict={"image": img, "label": label}, + use_device=use_device, + use_reduce=use_reduce, + ) def test_simple_fc(self): # use_device @@ -55,29 +53,29 @@ class TestMNIST(TestParallelExecutorBase): img, label = init_data() single_first_loss, single_last_loss, _ = self.check_network_convergence( method=simple_fc_net, - feed_dict={ - "image": img, - "label": label - }, + feed_dict={"image": img, "label": label}, use_device=use_device, - use_parallel_executor=False) - parallel_first_loss, parallel_last_loss, _ = self.check_network_convergence( + use_parallel_executor=False, + ) + ( + parallel_first_loss, + parallel_last_loss, + _, + ) = self.check_network_convergence( method=simple_fc_net, - feed_dict={ - "image": img, - "label": label - }, + feed_dict={"image": img, "label": label}, use_device=use_device, - use_parallel_executor=True) + use_parallel_executor=True, + ) self.assertAlmostEquals( np.mean(parallel_first_loss), single_first_loss, delta=1e-6, ) - self.assertAlmostEquals(np.mean(parallel_last_loss), - single_last_loss, - delta=1e-6) + self.assertAlmostEquals( + np.mean(parallel_last_loss), single_last_loss, delta=1e-6 + ) def test_simple_fc_parallel_accuracy(self): self.check_simple_fc_parallel_accuracy(DeviceType.CUDA) diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_profiler.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_profiler.py index 6ba451b0e6806d9e91bf5b0c1e105e3a4f88f905..4d8dc470fa9e46820356f891f5a388da82c614b4 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_profiler.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_profiler.py @@ -27,19 +27,20 @@ os.environ['NCCL_SHM_DISABLE'] = str(1) class TestPEProfiler(TestProfiler): - def test_cpu_profiler(self): exe = fluid.Executor(fluid.CPUPlace()) self.net_profiler(exe, 'CPU', "Default", use_parallel_executor=True) - @unittest.skipIf(not core.is_compiled_with_cuda(), - "profiler is enabled only with GPU") + @unittest.skipIf( + not core.is_compiled_with_cuda(), "profiler is enabled only with GPU" + ) def test_cuda_profiler(self): exe = fluid.Executor(fluid.CUDAPlace(0)) self.net_profiler(exe, 'GPU', "OpDetail", use_parallel_executor=True) - @unittest.skipIf(not core.is_compiled_with_cuda(), - "profiler is enabled only with GPU") + @unittest.skipIf( + not core.is_compiled_with_cuda(), "profiler is enabled only with GPU" + ) def test_all_profiler(self): exe = fluid.Executor(fluid.CUDAPlace(0)) self.net_profiler(exe, 'All', "AllOpDetail", use_parallel_executor=True) diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_run_cinn.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_run_cinn.py index 40a1d087cdc81a7774d2935417d675db1c54501a..3bb546eb917929fd5a70aaedb107a244a4929aeb 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_run_cinn.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_run_cinn.py @@ -22,8 +22,9 @@ import unittest paddle.enable_static() -logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', - level=logging.INFO) +logging.basicConfig( + format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO +) logger = logging.getLogger("paddle_with_cinn") @@ -39,8 +40,9 @@ def set_cinn_flag(val): def reader(limit): for _ in range(limit): - yield np.random.random([1, 28]).astype('float32'), \ - np.random.randint(0, 2, size=[1]).astype('int64') + yield np.random.random([1, 28]).astype('float32'), np.random.randint( + 0, 2, size=[1] + ).astype('int64') def rand_data(img, label, loop_num=10): @@ -59,8 +61,12 @@ def build_program(main_program, startup_program): name="bias", shape=[1, 28], dtype="float32", - attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Assign( - np.random.rand(1, 28).astype(np.float32)))) + attr=paddle.ParamAttr( + initializer=paddle.nn.initializer.Assign( + np.random.rand(1, 28).astype(np.float32) + ) + ), + ) label = paddle.static.data(name="label", shape=[1], dtype='int64') hidden = paddle.add(img, param) @@ -83,15 +89,19 @@ def train(dot_save_dir, prefix, seed=1234): main_program = paddle.static.Program() img, label, loss = build_program(main_program, startup_program) - place = paddle.CUDAPlace( - 0) if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() + else paddle.CPUPlace() + ) exe = paddle.static.Executor(place) exe.run(startup_program) build_strategy = paddle.static.BuildStrategy() build_strategy.debug_graphviz_path = os.path.join(dot_save_dir, prefix) compiled_program = paddle.static.CompiledProgram( - main_program, build_strategy).with_data_parallel(loss_name=loss.name) + main_program, build_strategy + ).with_data_parallel(loss_name=loss.name) iters = 100 feed = rand_data(img.name, label.name, iters) @@ -104,7 +114,6 @@ def train(dot_save_dir, prefix, seed=1234): @unittest.skipIf(not set_cinn_flag(True), "Paddle is not compiled with CINN.") class TestParallelExecutorRunCinn(unittest.TestCase): - def setUp(self): self.tmpdir = tempfile.mkdtemp(prefix="dots_") @@ -115,10 +124,9 @@ class TestParallelExecutorRunCinn(unittest.TestCase): cinn_losses = train(self.tmpdir, "paddle") set_cinn_flag(False) pd_losses = train(self.tmpdir, "cinn") - np.testing.assert_allclose(cinn_losses, - pd_losses, - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + cinn_losses, pd_losses, rtol=1e-05, atol=1e-05 + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_run_load_infer_program.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_run_load_infer_program.py index c0cd03b71fa8f473b412e167a71cf080867ef420..461461f7975291a8e3ff6b1c217202b902dbb00d 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_run_load_infer_program.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_run_load_infer_program.py @@ -19,12 +19,15 @@ from simple_nets import simple_fc_net, init_data class TestMNIST(unittest.TestCase): - @classmethod def setUpClass(cls): cls.save_dirname = "./" - cls.model_filename = "test_parallel_executor_run_load_infer_program_model" - cls.params_filename = "test_parallel_executor_run_load_infer_program_parameter" + cls.model_filename = ( + "test_parallel_executor_run_load_infer_program_model" + ) + cls.params_filename = ( + "test_parallel_executor_run_load_infer_program_parameter" + ) cls.place = fluid.CPUPlace() cls.exe = fluid.Executor(cls.place) img, label = init_data() @@ -35,22 +38,30 @@ class TestMNIST(unittest.TestCase): def test_simple_fc(self): exe_loss = self.run_with_executor() - [inference_program, feed_target_names, - fetch_targets] = fluid.io.load_inference_model(self.save_dirname, - self.exe, - self.model_filename, - self.params_filename) - - train_exe = fluid.ParallelExecutor(use_cuda=False, - main_program=inference_program) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = fluid.io.load_inference_model( + self.save_dirname, + self.exe, + self.model_filename, + self.params_filename, + ) + + train_exe = fluid.ParallelExecutor( + use_cuda=False, main_program=inference_program + ) feed_vars = [ inference_program.global_block().var(var_name) for var_name in ["image", "label"] ] feeder = fluid.DataFeeder(place=self.place, feed_list=feed_vars) - pe_loss = train_exe.run(feed=feeder.feed(self.batch_data), - fetch_list=[fetch_targets[0].name]) + pe_loss = train_exe.run( + feed=feeder.feed(self.batch_data), + fetch_list=[fetch_targets[0].name], + ) assert exe_loss == pe_loss def run_with_executor(self): @@ -60,23 +71,25 @@ class TestMNIST(unittest.TestCase): loss = simple_fc_net() feed_vars = [ - main.global_block().var(var_name) - for var_name in ["image", "label"] + main.global_block().var(var_name) for var_name in ["image", "label"] ] feeder = fluid.DataFeeder(place=self.place, feed_list=feed_vars) self.exe.run(startup) - loss_data = self.exe.run(main, - feed=feeder.feed(self.batch_data), - fetch_list=[loss.name]) - - fluid.io.save_inference_model(self.save_dirname, ["image", "label"], - [loss], - self.exe, - model_filename=self.model_filename, - params_filename=self.params_filename, - main_program=main) + loss_data = self.exe.run( + main, feed=feeder.feed(self.batch_data), fetch_list=[loss.name] + ) + + fluid.io.save_inference_model( + self.save_dirname, + ["image", "label"], + [loss], + self.exe, + model_filename=self.model_filename, + params_filename=self.params_filename, + main_program=main, + ) return loss_data diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_base_cpu.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_base_cpu.py index a3394833fd4be3cdd03334fbc44851eb339b1644..79f5fd4c3ca3077978f8fa7b5a5369c99c8da0e5 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_base_cpu.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_base_cpu.py @@ -19,19 +19,22 @@ from functools import partial class TestResnetCPU(TestResnetBase): - def test_seresnext_with_learning_rate_decay(self): # NOTE(zcd): This test is compare the result of use parallel_executor # and executor, and the result of drop_out op and batch_norm op in # this two executor have diff, so the two ops should be removed # from the model. - check_func = partial(self.check_network_convergence, - optimizer=seresnext_net.optimizer, - use_parallel_executor=False) - self._compare_result_with_origin_model(check_func, - use_device=DeviceType.CPU, - compare_separately=False, - delta2=1e-3) + check_func = partial( + self.check_network_convergence, + optimizer=seresnext_net.optimizer, + use_parallel_executor=False, + ) + self._compare_result_with_origin_model( + check_func, + use_device=DeviceType.CPU, + compare_separately=False, + delta2=1e-3, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_base_gpu.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_base_gpu.py index e3f7d0a3462e802c2c5add8b6550affdd0d37f4b..6f7f0f507a4ed39889d9dd2dcb21bc2833842caa 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_base_gpu.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_base_gpu.py @@ -19,19 +19,22 @@ from functools import partial class TestResnetGPU(TestResnetBase): - def test_seresnext_with_learning_rate_decay(self): # NOTE(zcd): This test is compare the result of use parallel_executor # and executor, and the result of drop_out op and batch_norm op in # this two executor have diff, so the two ops should be removed # from the model. - check_func = partial(self.check_network_convergence, - optimizer=seresnext_net.optimizer, - use_parallel_executor=False) - self._compare_result_with_origin_model(check_func, - use_device=DeviceType.CUDA, - delta2=1e-5, - compare_separately=False) + check_func = partial( + self.check_network_convergence, + optimizer=seresnext_net.optimizer, + use_parallel_executor=False, + ) + self._compare_result_with_origin_model( + check_func, + use_device=DeviceType.CUDA, + delta2=1e-5, + compare_separately=False, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_with_fuse_all_reduce_cpu.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_with_fuse_all_reduce_cpu.py index 4ef6be1667cd85c65253a874568d31a17dacb2ab..46b7bb83147bdcc724284a8b01a57daf79b656c5 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_with_fuse_all_reduce_cpu.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_with_fuse_all_reduce_cpu.py @@ -24,15 +24,17 @@ from functools import partial class TestResnetWithFuseAllReduceCPU(TestResnetBase): - def test_seresnext_with_fused_all_reduce(self): # NOTE(zcd): In order to make the program faster, # this unit test remove drop_out and batch_norm. - check_func = partial(self.check_network_convergence, - optimizer=seresnext_net.optimizer, - fuse_all_reduce_ops=True) - self._compare_result_with_origin_model(check_func, - use_device=DeviceType.CPU) + check_func = partial( + self.check_network_convergence, + optimizer=seresnext_net.optimizer, + fuse_all_reduce_ops=True, + ) + self._compare_result_with_origin_model( + check_func, use_device=DeviceType.CPU + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_with_fuse_all_reduce_gpu.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_with_fuse_all_reduce_gpu.py index 016c8f41cbe86fa071fc3235cb9009bba33f46a1..ca349fc1100902f2ef600a595e7552cebe8a09fa 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_with_fuse_all_reduce_gpu.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_with_fuse_all_reduce_gpu.py @@ -24,16 +24,17 @@ from functools import partial class TestResnetWithFuseAllReduceGPU(TestResnetBase): - def test_seresnext_with_fused_all_reduce(self): # NOTE(zcd): In order to make the program faster, # this unit test remove drop_out and batch_norm. - check_func = partial(self.check_network_convergence, - optimizer=seresnext_net.optimizer, - fuse_all_reduce_ops=True) - self._compare_result_with_origin_model(check_func, - use_device=DeviceType.CUDA, - delta2=1e-2) + check_func = partial( + self.check_network_convergence, + optimizer=seresnext_net.optimizer, + fuse_all_reduce_ops=True, + ) + self._compare_result_with_origin_model( + check_func, use_device=DeviceType.CUDA, delta2=1e-2 + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_with_reduce_cpu.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_with_reduce_cpu.py index d8d854fd315c987830025843f1fb899e704bd9fa..abd650468c53133d3a0d5606c0ec0bc2c18512e7 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_with_reduce_cpu.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_with_reduce_cpu.py @@ -19,19 +19,23 @@ import paddle.fluid.core as core class TestResnetWithReduceBase(TestParallelExecutorBase): - def _compare_reduce_and_allreduce(self, use_device, delta2=1e-5): if use_device == DeviceType.CUDA and not core.is_compiled_with_cuda(): return - all_reduce_first_loss, all_reduce_last_loss, _ = self.check_network_convergence( + ( + all_reduce_first_loss, + all_reduce_last_loss, + _, + ) = self.check_network_convergence( seresnext_net.model, feed_dict=seresnext_net.feed_dict(use_device), iter=seresnext_net.iter(use_device), batch_size=seresnext_net.batch_size(use_device), use_device=use_device, use_reduce=False, - optimizer=seresnext_net.optimizer) + optimizer=seresnext_net.optimizer, + ) reduce_first_loss, reduce_last_loss, _ = self.check_network_convergence( seresnext_net.model, feed_dict=seresnext_net.feed_dict(use_device), @@ -39,7 +43,8 @@ class TestResnetWithReduceBase(TestParallelExecutorBase): batch_size=seresnext_net.batch_size(use_device), use_device=use_device, use_reduce=True, - optimizer=seresnext_net.optimizer) + optimizer=seresnext_net.optimizer, + ) for loss in zip(all_reduce_first_loss, reduce_first_loss): self.assertAlmostEquals(loss[0], loss[1], delta=1e-5) @@ -49,7 +54,11 @@ class TestResnetWithReduceBase(TestParallelExecutorBase): if not use_device: return - all_reduce_first_loss_seq, all_reduce_last_loss_seq, _ = self.check_network_convergence( + ( + all_reduce_first_loss_seq, + all_reduce_last_loss_seq, + _, + ) = self.check_network_convergence( seresnext_net.model, feed_dict=seresnext_net.feed_dict(use_device), iter=seresnext_net.iter(use_device), @@ -57,9 +66,14 @@ class TestResnetWithReduceBase(TestParallelExecutorBase): use_device=use_device, use_reduce=False, optimizer=seresnext_net.optimizer, - enable_sequential_execution=True) - - reduce_first_loss_seq, reduce_last_loss_seq, _ = self.check_network_convergence( + enable_sequential_execution=True, + ) + + ( + reduce_first_loss_seq, + reduce_last_loss_seq, + _, + ) = self.check_network_convergence( seresnext_net.model, feed_dict=seresnext_net.feed_dict(use_device), iter=seresnext_net.iter(use_device), @@ -67,7 +81,8 @@ class TestResnetWithReduceBase(TestParallelExecutorBase): use_device=use_device, use_reduce=True, optimizer=seresnext_net.optimizer, - enable_sequential_execution=True) + enable_sequential_execution=True, + ) for loss in zip(all_reduce_first_loss, all_reduce_first_loss_seq): self.assertAlmostEquals(loss[0], loss[1], delta=1e-5) @@ -86,10 +101,10 @@ class TestResnetWithReduceBase(TestParallelExecutorBase): class TestResnetWithReduceCPU(TestResnetWithReduceBase): - def test_seresnext_with_reduce(self): - self._compare_reduce_and_allreduce(use_device=DeviceType.CPU, - delta2=1e-3) + self._compare_reduce_and_allreduce( + use_device=DeviceType.CPU, delta2=1e-3 + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_with_reduce_gpu.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_with_reduce_gpu.py index 726f42e12a07936664e4bf40b0e00efe19fc5fac..e4cce04e2a413b61919784d15790c5f72c8af2af 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_with_reduce_gpu.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_with_reduce_gpu.py @@ -13,14 +13,17 @@ # limitations under the License. import unittest -from test_parallel_executor_seresnext_with_reduce_cpu import TestResnetWithReduceBase, DeviceType +from test_parallel_executor_seresnext_with_reduce_cpu import ( + TestResnetWithReduceBase, + DeviceType, +) class TestResnetWithReduceGPU(TestResnetWithReduceBase): - def test_seresnext_with_reduce(self): - self._compare_reduce_and_allreduce(use_device=DeviceType.CUDA, - delta2=1e-2) + self._compare_reduce_and_allreduce( + use_device=DeviceType.CUDA, delta2=1e-2 + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_test_while_train.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_test_while_train.py index 8366f8f29cd80e2e7893cc66d6c302fbf4a8c00b..9391424692b43b1c4bbdf091b63d4ec70fd7262a 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_test_while_train.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_test_while_train.py @@ -24,7 +24,6 @@ import math class ParallelExecutorTestingDuringTraining(unittest.TestCase): - def check_network_convergence(self, use_cuda, build_strategy=None): os.environ['CPU_NUM'] = str(4) main = fluid.Program() @@ -46,20 +45,22 @@ class ParallelExecutorTestingDuringTraining(unittest.TestCase): feed_dict = {'image': image, 'label': label} train_cp = compiler.CompiledProgram(main).with_data_parallel( - loss_name=loss.name, build_strategy=build_strategy) + loss_name=loss.name, build_strategy=build_strategy + ) test_cp = compiler.CompiledProgram(test_program).with_data_parallel( loss_name=loss.name, build_strategy=build_strategy, - share_vars_from=train_cp) + share_vars_from=train_cp, + ) for i in range(5): exe.run(train_cp, feed=feed_dict, fetch_list=[loss.name]) - test_loss, = exe.run(test_cp, - feed=feed_dict, - fetch_list=[loss.name]) - train_loss, = exe.run(train_cp, - feed=feed_dict, - fetch_list=[loss.name]) + (test_loss,) = exe.run( + test_cp, feed=feed_dict, fetch_list=[loss.name] + ) + (train_loss,) = exe.run( + train_cp, feed=feed_dict, fetch_list=[loss.name] + ) avg_test_loss_val = np.array(test_loss).mean() if math.isnan(float(avg_test_loss_val)): @@ -69,32 +70,41 @@ class ParallelExecutorTestingDuringTraining(unittest.TestCase): if math.isnan(float(avg_train_loss_val)): sys.exit("got NaN loss, training failed.") - np.testing.assert_allclose(train_loss, - test_loss, - rtol=1e-05, - atol=0.01) + np.testing.assert_allclose( + train_loss, test_loss, rtol=1e-05, atol=0.01 + ) def test_parallel_testing(self): build_strategy = fluid.BuildStrategy() - build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce + build_strategy.reduce_strategy = ( + fluid.BuildStrategy.ReduceStrategy.AllReduce + ) if core.is_compiled_with_cuda(): - self.check_network_convergence(use_cuda=True, - build_strategy=build_strategy) - self.check_network_convergence(use_cuda=False, - build_strategy=build_strategy) + self.check_network_convergence( + use_cuda=True, build_strategy=build_strategy + ) + self.check_network_convergence( + use_cuda=False, build_strategy=build_strategy + ) def test_parallel_testing_with_new_strategy_gpu(self): build_strategy = fluid.BuildStrategy() - build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce + build_strategy.reduce_strategy = ( + fluid.BuildStrategy.ReduceStrategy.Reduce + ) if core.is_compiled_with_cuda(): - self.check_network_convergence(use_cuda=True, - build_strategy=build_strategy) + self.check_network_convergence( + use_cuda=True, build_strategy=build_strategy + ) def test_parallel_testing_with_new_strategy_cpu(self): build_strategy = fluid.BuildStrategy() - build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce - self.check_network_convergence(use_cuda=False, - build_strategy=build_strategy) + build_strategy.reduce_strategy = ( + fluid.BuildStrategy.ReduceStrategy.Reduce + ) + self.check_network_convergence( + use_cuda=False, build_strategy=build_strategy + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_transformer.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_transformer.py index 684be9023b64ec43d7a924176c1177911d35d2a6..d447860862b608ce9f6af0d40534dcb25b4daa8b 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_transformer.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_transformer.py @@ -77,12 +77,14 @@ def prepare_batch_input(insts, src_pad_idx, trg_pad_idx, n_head): data to tensors and return a dict mapping names to tensors. """ - def __pad_batch_data(insts, - pad_idx, - is_target=False, - return_pos=True, - return_attn_bias=True, - return_max_len=True): + def __pad_batch_data( + insts, + pad_idx, + is_target=False, + return_pos=True, + return_attn_bias=True, + return_max_len=True, + ): """ Pad the instances to the max sequence length in batch, and generate the corresponding position data and attention bias. @@ -90,13 +92,19 @@ def prepare_batch_input(insts, src_pad_idx, trg_pad_idx, n_head): return_list = [] max_len = max(len(inst) for inst in insts) inst_data = np.array( - [inst + [pad_idx] * (max_len - len(inst)) for inst in insts]) + [inst + [pad_idx] * (max_len - len(inst)) for inst in insts] + ) return_list += [inst_data.astype("int64").reshape([-1, 1])] if return_pos: - inst_pos = np.array([[ - pos_i + 1 if w_i != pad_idx else 0 - for pos_i, w_i in enumerate(inst) - ] for inst in inst_data]) + inst_pos = np.array( + [ + [ + pos_i + 1 if w_i != pad_idx else 0 + for pos_i, w_i in enumerate(inst) + ] + for inst in inst_data + ] + ) return_list += [inst_pos.astype("int64").reshape([-1, 1])] if return_attn_bias: @@ -104,37 +112,55 @@ def prepare_batch_input(insts, src_pad_idx, trg_pad_idx, n_head): # This is used to avoid attention on paddings and subsequent # words. slf_attn_bias_data = np.ones( - (inst_data.shape[0], max_len, max_len)) + (inst_data.shape[0], max_len, max_len) + ) slf_attn_bias_data = np.triu(slf_attn_bias_data, 1).reshape( - [-1, 1, max_len, max_len]) - slf_attn_bias_data = np.tile(slf_attn_bias_data, - [1, n_head, 1, 1]) * [-1e9] + [-1, 1, max_len, max_len] + ) + slf_attn_bias_data = np.tile( + slf_attn_bias_data, [1, n_head, 1, 1] + ) * [-1e9] else: # This is used to avoid attention on paddings. - slf_attn_bias_data = np.array([[0] * len(inst) + [-1e9] * - (max_len - len(inst)) - for inst in insts]) + slf_attn_bias_data = np.array( + [ + [0] * len(inst) + [-1e9] * (max_len - len(inst)) + for inst in insts + ] + ) slf_attn_bias_data = np.tile( slf_attn_bias_data.reshape([-1, 1, 1, max_len]), - [1, n_head, max_len, 1]) + [1, n_head, max_len, 1], + ) return_list += [slf_attn_bias_data.astype("float32")] if return_max_len: return_list += [max_len] return return_list if len(return_list) > 1 else return_list[0] src_word, src_pos, src_slf_attn_bias, src_max_len = __pad_batch_data( - [inst[0] for inst in insts], src_pad_idx, is_target=False) + [inst[0] for inst in insts], src_pad_idx, is_target=False + ) trg_word, trg_pos, trg_slf_attn_bias, trg_max_len = __pad_batch_data( - [inst[1] for inst in insts], trg_pad_idx, is_target=True) - trg_src_attn_bias = np.tile(src_slf_attn_bias[:, :, ::src_max_len, :], - [1, 1, trg_max_len, 1]).astype("float32") - lbl_word = __pad_batch_data([inst[2] for inst in insts], trg_pad_idx, False, - False, False, False) + [inst[1] for inst in insts], trg_pad_idx, is_target=True + ) + trg_src_attn_bias = np.tile( + src_slf_attn_bias[:, :, ::src_max_len, :], [1, 1, trg_max_len, 1] + ).astype("float32") + lbl_word = __pad_batch_data( + [inst[2] for inst in insts], trg_pad_idx, False, False, False, False + ) lbl_weight = (lbl_word != trg_pad_idx).astype("float32").reshape([-1, 1]) return [ - src_word, src_pos, trg_word, trg_pos, src_slf_attn_bias, - trg_slf_attn_bias, trg_src_attn_bias, lbl_word, lbl_weight + src_word, + src_pos, + trg_word, + trg_pos, + src_slf_attn_bias, + trg_slf_attn_bias, + trg_src_attn_bias, + lbl_word, + lbl_weight, ] @@ -145,12 +171,19 @@ def transformer(use_feed): assert not use_feed, "transfomer doesn't support feed yet" return transformer_model.transformer( ModelHyperParams.src_vocab_size + 1, - ModelHyperParams.trg_vocab_size + 1, ModelHyperParams.max_length + 1, - ModelHyperParams.n_layer, ModelHyperParams.n_head, - ModelHyperParams.d_key, ModelHyperParams.d_value, - ModelHyperParams.d_model, ModelHyperParams.d_inner_hid, - ModelHyperParams.dropout, ModelHyperParams.src_pad_idx, - ModelHyperParams.trg_pad_idx, ModelHyperParams.pos_pad_idx) + ModelHyperParams.trg_vocab_size + 1, + ModelHyperParams.max_length + 1, + ModelHyperParams.n_layer, + ModelHyperParams.n_head, + ModelHyperParams.d_key, + ModelHyperParams.d_value, + ModelHyperParams.d_model, + ModelHyperParams.d_inner_hid, + ModelHyperParams.dropout, + ModelHyperParams.src_pad_idx, + ModelHyperParams.trg_pad_idx, + ModelHyperParams.pos_pad_idx, + ) def get_feed_data_reader(): @@ -158,15 +191,21 @@ def get_feed_data_reader(): if feed_data_reader is not None: return feed_data_reader - reader = paddle.batch(wmt16.train(ModelHyperParams.src_vocab_size, - ModelHyperParams.trg_vocab_size), - batch_size=transformer_model.batch_size) + reader = paddle.batch( + wmt16.train( + ModelHyperParams.src_vocab_size, ModelHyperParams.trg_vocab_size + ), + batch_size=transformer_model.batch_size, + ) all_batch_tensors = [] for batch in reader(): tensors = [] - for tensor in prepare_batch_input(batch, ModelHyperParams.src_pad_idx, - ModelHyperParams.trg_pad_idx, - ModelHyperParams.n_head): + for tensor in prepare_batch_input( + batch, + ModelHyperParams.src_pad_idx, + ModelHyperParams.trg_pad_idx, + ModelHyperParams.n_head, + ): tensors.append(np.array(tensor)) all_batch_tensors.append(tensors) @@ -174,30 +213,36 @@ def get_feed_data_reader(): for t in all_batch_tensors: yield t - feed_data_reader = FeedDataReader(feed_list=transformer_model.build_inputs( - ModelHyperParams.max_length + 1, ModelHyperParams.n_head), - reader=__reader__) + feed_data_reader = FeedDataReader( + feed_list=transformer_model.build_inputs( + ModelHyperParams.max_length + 1, ModelHyperParams.n_head + ), + reader=__reader__, + ) return feed_data_reader class TestTransformer(TestParallelExecutorBase): - def test_main(self): if core.is_compiled_with_cuda(): self.check_network_convergence( transformer, use_device=DeviceType.CUDA, - feed_data_reader=get_feed_data_reader()) + feed_data_reader=get_feed_data_reader(), + ) self.check_network_convergence( transformer, use_device=DeviceType.CUDA, enable_sequential_execution=True, - feed_data_reader=get_feed_data_reader()) - self.check_network_convergence(transformer, - use_device=DeviceType.CPU, - iter=2, - feed_data_reader=get_feed_data_reader()) + feed_data_reader=get_feed_data_reader(), + ) + self.check_network_convergence( + transformer, + use_device=DeviceType.CPU, + iter=2, + feed_data_reader=get_feed_data_reader(), + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_parameter.py b/python/paddle/fluid/tests/unittests/test_parameter.py index e066b96127195cc4829a2e3f591b023e2974bf53..d077fb98362850b14a9bfc2b525e5c83fcd9523c 100644 --- a/python/paddle/fluid/tests/unittests/test_parameter.py +++ b/python/paddle/fluid/tests/unittests/test_parameter.py @@ -16,7 +16,12 @@ import unittest import copy import paddle from paddle.fluid.dygraph import guard -from paddle.fluid.framework import default_main_program, Variable, _test_eager_guard, ParamBase +from paddle.fluid.framework import ( + default_main_program, + Variable, + _test_eager_guard, + ParamBase, +) import paddle.fluid.core as core from paddle.fluid.executor import Executor import paddle.fluid.io as io @@ -28,15 +33,16 @@ main_program = default_main_program() class ParameterChecks(unittest.TestCase): - def test_parameter(self): shape = [784, 100] val = 1.0625 b = main_program.global_block() - param = b.create_parameter(name='fc.w', - shape=shape, - dtype='float32', - initializer=ConstantInitializer(val)) + param = b.create_parameter( + name='fc.w', + shape=shape, + dtype='float32', + initializer=ConstantInitializer(val), + ) self.assertIsNotNone(param) self.assertEqual('fc.w', param.name) self.assertEqual((784, 100), param.shape) @@ -65,8 +71,9 @@ class ParameterChecks(unittest.TestCase): np.testing.assert_array_equal(param_copy.numpy(), param.numpy()) self.assertEqual(param_copy.optimize_attr, param.optimize_attr) self.assertEqual(param_copy.regularizer, param.regularizer) - self.assertEqual(param_copy.do_model_average, - param.do_model_average) + self.assertEqual( + param_copy.do_model_average, param.do_model_average + ) self.assertEqual(param_copy.need_clip, param.need_clip) self.assertEqual(param_copy.is_distributed, param.is_distributed) @@ -84,30 +91,27 @@ class ParameterChecks(unittest.TestCase): def func_exception(self): b = main_program.global_block() with self.assertRaises(ValueError): - b.create_parameter(name='test', - shape=None, - dtype='float32', - initializer=None) + b.create_parameter( + name='test', shape=None, dtype='float32', initializer=None + ) with self.assertRaises(ValueError): - b.create_parameter(name='test', - shape=[1], - dtype=None, - initializer=None) + b.create_parameter( + name='test', shape=[1], dtype=None, initializer=None + ) with self.assertRaises(ValueError): - b.create_parameter(name='test', - shape=[], - dtype='float32', - initializer=None) + b.create_parameter( + name='test', shape=[], dtype='float32', initializer=None + ) with self.assertRaises(ValueError): - b.create_parameter(name='test', - shape=[-1], - dtype='float32', - initializer=None) + b.create_parameter( + name='test', shape=[-1], dtype='float32', initializer=None + ) def func_parambase_to_vector(self): with guard(): initializer = paddle.ParamAttr( - initializer=paddle.nn.initializer.Constant(3.)) + initializer=paddle.nn.initializer.Constant(3.0) + ) linear1 = paddle.nn.Linear(10, 15, initializer) vec = paddle.nn.utils.parameters_to_vector(linear1.parameters()) @@ -120,10 +124,12 @@ class ParameterChecks(unittest.TestCase): paddle.nn.utils.vector_to_parameters(vec, linear2.parameters()) self.assertEqual(linear2.weight.shape, [10, 15]) self.assertEqual(linear2.bias.shape, [15]) - np.testing.assert_array_equal(linear1.weight.numpy(), - linear2.weight.numpy()) - np.testing.assert_array_equal(linear1.bias.numpy(), - linear2.bias.numpy()) + np.testing.assert_array_equal( + linear1.weight.numpy(), linear2.weight.numpy() + ) + np.testing.assert_array_equal( + linear1.bias.numpy(), linear2.bias.numpy() + ) self.assertTrue(linear2.weight.is_leaf, True) self.assertTrue(linear2.bias.is_leaf, True) diff --git a/python/paddle/fluid/tests/unittests/test_partial_concat_op.py b/python/paddle/fluid/tests/unittests/test_partial_concat_op.py index 1e4c0076da6e714e4a9dfedd428de699d609432a..8046d8fa1d7e5a43ed386806e2690f10a261d41a 100644 --- a/python/paddle/fluid/tests/unittests/test_partial_concat_op.py +++ b/python/paddle/fluid/tests/unittests/test_partial_concat_op.py @@ -19,33 +19,34 @@ import random def np_partial_concat(inputs, start, length): - assert (len(inputs[0].shape) == 2) + assert len(inputs[0].shape) == 2 size = inputs[0].shape[1] - assert (start >= -size and start < size) + assert start >= -size and start < size if start < 0: start += size if length < 0: length = size - start - assert (size >= start + length) + assert size >= start + length elems = [] for elem in inputs: - assert (elem.shape == inputs[0].shape) - elems.append(elem[:, start:start + length]) + assert elem.shape == inputs[0].shape + elems.append(elem[:, start : start + length]) res = np.concatenate(elems, axis=1) return np.concatenate(elems, axis=1) class TestPartialConcatOp(OpTest): - def setUp(self): self.op_type = "partial_concat" self.init_kernel_type() self.init_para() self.var_names = ['x' + str(num) for num in range(self.var_num)] - self.vars = [np.random.random((self.batch_size, self.column)).astype(self.dtype)\ - for num in range(self.var_num) ] + self.vars = [ + np.random.random((self.batch_size, self.column)).astype(self.dtype) + for num in range(self.var_num) + ] self.inputs = {'X': list(zip(self.var_names, self.vars))} self.attrs = {'start_index': self.start_index, 'length': self.length} y = np_partial_concat(self.vars[:], self.start_index, self.length) @@ -70,7 +71,6 @@ class TestPartialConcatOp(OpTest): class TestPartialConcatOp2(TestPartialConcatOp): - def init_para(self): self.batch_size = random.randint(1, 10) self.column = random.randint(101, 200) @@ -80,7 +80,6 @@ class TestPartialConcatOp2(TestPartialConcatOp): class TestPartialConcatOp3(TestPartialConcatOp): - def init_para(self): self.batch_size = random.randint(1, 10) self.column = random.randint(101, 200) @@ -90,7 +89,6 @@ class TestPartialConcatOp3(TestPartialConcatOp): class TestPartialConcatOp4(TestPartialConcatOp): - def init_para(self): self.batch_size = random.randint(1, 10) self.column = random.randint(101, 200) diff --git a/python/paddle/fluid/tests/unittests/test_partial_sum_op.py b/python/paddle/fluid/tests/unittests/test_partial_sum_op.py index 5eb416cadc82d65778ff17fb0f919a28f724ddd9..9a5304f7b42aeca6f68dec9f112955a71cab65f7 100644 --- a/python/paddle/fluid/tests/unittests/test_partial_sum_op.py +++ b/python/paddle/fluid/tests/unittests/test_partial_sum_op.py @@ -19,7 +19,6 @@ import random class TestPartialSumOp(OpTest): - def setUp(self): self.op_type = "partial_sum" self.init_kernel_type() @@ -29,13 +28,15 @@ class TestPartialSumOp(OpTest): else: end_index = self.start_index + self.length self.var_names = ['x' + str(num) for num in range(self.var_num)] - self.vars = [np.random.random((self.batch_size, self.column)).astype(self.dtype)\ - for num in range(self.var_num) ] + self.vars = [ + np.random.random((self.batch_size, self.column)).astype(self.dtype) + for num in range(self.var_num) + ] self.inputs = {'X': list(zip(self.var_names, self.vars))} self.attrs = {'start_index': self.start_index, 'length': self.length} - y = self.vars[0][:, self.start_index:end_index] + y = self.vars[0][:, self.start_index : end_index] for i in range(1, self.var_num): - y = y + self.vars[i][:, self.start_index:end_index] + y = y + self.vars[i][:, self.start_index : end_index] self.outputs = {'Out': y} @@ -58,7 +59,6 @@ class TestPartialSumOp(OpTest): class TestPartialSumOp2(TestPartialSumOp): - def init_para(self): self.batch_size = random.randint(1, 10) self.column = random.randint(101, 200) @@ -68,7 +68,6 @@ class TestPartialSumOp2(TestPartialSumOp): class TestPartialSumOp3(TestPartialSumOp): - def init_para(self): self.batch_size = random.randint(1, 10) self.column = random.randint(101, 200) @@ -78,7 +77,6 @@ class TestPartialSumOp3(TestPartialSumOp): class TestPartialSumOp4(TestPartialSumOp): - def init_para(self): self.batch_size = random.randint(1, 10) self.column = random.randint(101, 200) diff --git a/python/paddle/fluid/tests/unittests/test_pipeline_parallel.py b/python/paddle/fluid/tests/unittests/test_pipeline_parallel.py index e1abb2c2372b1a32fc8e9bf654862c109ad619fd..11e8aa2b7c3a371d20275f8a07f1c8cdbf89ef37 100644 --- a/python/paddle/fluid/tests/unittests/test_pipeline_parallel.py +++ b/python/paddle/fluid/tests/unittests/test_pipeline_parallel.py @@ -18,13 +18,11 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus class TestPipelineParallel(TestMultipleGpus): - def test_pipeline_parallel(self): self.run_mnist_2gpu('hybrid_parallel_pp_alexnet.py') class TestModelParallelWithRecompute(TestMultipleGpus): - def test_model_parallel_with_recompute(self): self.run_mnist_2gpu("dygraph_recompute_hybrid.py") diff --git a/python/paddle/fluid/tests/unittests/test_pixel_shuffle.py b/python/paddle/fluid/tests/unittests/test_pixel_shuffle.py index 996cd826908ee439721872c8afacb66e81013163..2b20ed0fc88fd276b2c0676ab21fa0f755a25564 100644 --- a/python/paddle/fluid/tests/unittests/test_pixel_shuffle.py +++ b/python/paddle/fluid/tests/unittests/test_pixel_shuffle.py @@ -25,8 +25,14 @@ import paddle.fluid as fluid def pixel_shuffle_np(x, up_factor, data_format="NCHW"): if data_format == "NCHW": n, c, h, w = x.shape - new_shape = (n, c // (up_factor * up_factor), up_factor, up_factor, h, - w) + new_shape = ( + n, + c // (up_factor * up_factor), + up_factor, + up_factor, + h, + w, + ) # reshape to (num,output_channel,upscale_factor,upscale_factor,h,w) npresult = np.reshape(x, new_shape) # transpose to (num,output_channel,h,upscale_factor,w,upscale_factor) @@ -36,8 +42,14 @@ def pixel_shuffle_np(x, up_factor, data_format="NCHW"): return npresult else: n, h, w, c = x.shape - new_shape = (n, h, w, c // (up_factor * up_factor), up_factor, - up_factor) + new_shape = ( + n, + h, + w, + c // (up_factor * up_factor), + up_factor, + up_factor, + ) # reshape to (num,h,w,output_channel,upscale_factor,upscale_factor) npresult = np.reshape(x, new_shape) # transpose to (num,h,upscale_factor,w,upscale_factor,output_channel) @@ -48,7 +60,6 @@ def pixel_shuffle_np(x, up_factor, data_format="NCHW"): class TestPixelShuffleOp(OpTest): - def setUp(self): self.op_type = "pixel_shuffle" self.python_api = paddle.nn.functional.pixel_shuffle @@ -80,13 +91,11 @@ class TestPixelShuffleOp(OpTest): class TestChannelLast(TestPixelShuffleOp): - def init_data_format(self): self.format = "NHWC" class TestPixelShuffleAPI(unittest.TestCase): - def setUp(self): self.x_1_np = np.random.random([2, 9, 4, 4]).astype("float64") self.x_2_np = np.random.random([2, 4, 4, 9]).astype("float64") @@ -94,47 +103,53 @@ class TestPixelShuffleAPI(unittest.TestCase): self.out_2_np = pixel_shuffle_np(self.x_2_np, 3, "NHWC") def test_static_graph_functional(self): - for use_cuda in ([False, True] - if core.is_compiled_with_cuda() else [False]): + for use_cuda in ( + [False, True] if core.is_compiled_with_cuda() else [False] + ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x_1 = paddle.fluid.data(name="x", - shape=[2, 9, 4, 4], - dtype="float64") - x_2 = paddle.fluid.data(name="x2", - shape=[2, 4, 4, 9], - dtype="float64") + x_1 = paddle.fluid.data( + name="x", shape=[2, 9, 4, 4], dtype="float64" + ) + x_2 = paddle.fluid.data( + name="x2", shape=[2, 4, 4, 9], dtype="float64" + ) out_1 = F.pixel_shuffle(x_1, 3) out_2 = F.pixel_shuffle(x_2, 3, "NHWC") exe = paddle.static.Executor(place=place) - res_1 = exe.run(fluid.default_main_program(), - feed={"x": self.x_1_np}, - fetch_list=out_1, - use_prune=True) - - res_2 = exe.run(fluid.default_main_program(), - feed={"x2": self.x_2_np}, - fetch_list=out_2, - use_prune=True) + res_1 = exe.run( + fluid.default_main_program(), + feed={"x": self.x_1_np}, + fetch_list=out_1, + use_prune=True, + ) + + res_2 = exe.run( + fluid.default_main_program(), + feed={"x2": self.x_2_np}, + fetch_list=out_2, + use_prune=True, + ) assert np.allclose(res_1, self.out_1_np) assert np.allclose(res_2, self.out_2_np) # same test between layer and functional in this op. def test_static_graph_layer(self): - for use_cuda in ([False, True] - if core.is_compiled_with_cuda() else [False]): + for use_cuda in ( + [False, True] if core.is_compiled_with_cuda() else [False] + ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x_1 = paddle.fluid.data(name="x", - shape=[2, 9, 4, 4], - dtype="float64") - x_2 = paddle.fluid.data(name="x2", - shape=[2, 4, 4, 9], - dtype="float64") + x_1 = paddle.fluid.data( + name="x", shape=[2, 9, 4, 4], dtype="float64" + ) + x_2 = paddle.fluid.data( + name="x2", shape=[2, 4, 4, 9], dtype="float64" + ) # init instance ps_1 = paddle.nn.PixelShuffle(3) ps_2 = paddle.nn.PixelShuffle(3, "NHWC") @@ -144,15 +159,19 @@ class TestPixelShuffleAPI(unittest.TestCase): out_2_np = pixel_shuffle_np(self.x_2_np, 3, "NHWC") exe = paddle.static.Executor(place=place) - res_1 = exe.run(fluid.default_main_program(), - feed={"x": self.x_1_np}, - fetch_list=out_1, - use_prune=True) - - res_2 = exe.run(fluid.default_main_program(), - feed={"x2": self.x_2_np}, - fetch_list=out_2, - use_prune=True) + res_1 = exe.run( + fluid.default_main_program(), + feed={"x": self.x_1_np}, + fetch_list=out_1, + use_prune=True, + ) + + res_2 = exe.run( + fluid.default_main_program(), + feed={"x2": self.x_2_np}, + fetch_list=out_2, + use_prune=True, + ) assert np.allclose(res_1, out_1_np) assert np.allclose(res_2, out_2_np) @@ -170,23 +189,26 @@ class TestPixelShuffleAPI(unittest.TestCase): npresult = pixel_shuffle_np(x, up_factor, data_format) - for use_cuda in ([False, True] - if core.is_compiled_with_cuda() else [False]): + for use_cuda in ( + [False, True] if core.is_compiled_with_cuda() else [False] + ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.disable_static(place=place) - pixel_shuffle = paddle.nn.PixelShuffle(up_factor, - data_format=data_format) + pixel_shuffle = paddle.nn.PixelShuffle( + up_factor, data_format=data_format + ) result = pixel_shuffle(paddle.to_tensor(x)) np.testing.assert_allclose(result.numpy(), npresult, rtol=1e-05) - result_functional = F.pixel_shuffle(paddle.to_tensor(x), 3, - data_format) - np.testing.assert_allclose(result_functional.numpy(), - npresult, - rtol=1e-05) + result_functional = F.pixel_shuffle( + paddle.to_tensor(x), 3, data_format + ) + np.testing.assert_allclose( + result_functional.numpy(), npresult, rtol=1e-05 + ) def test_dygraph1(self): self.run_dygraph(3, "NCHW") @@ -196,9 +218,7 @@ class TestPixelShuffleAPI(unittest.TestCase): class TestPixelShuffleError(unittest.TestCase): - def test_error_functional(self): - def error_upscale_factor(): with paddle.fluid.dygraph.guard(): x = np.random.random([2, 9, 4, 4]).astype("float64") @@ -214,7 +234,6 @@ class TestPixelShuffleError(unittest.TestCase): self.assertRaises(ValueError, error_data_format) def test_error_layer(self): - def error_upscale_factor_layer(): with paddle.fluid.dygraph.guard(): x = np.random.random([2, 9, 4, 4]).astype("float64") diff --git a/python/paddle/fluid/tests/unittests/test_pixel_unshuffle.py b/python/paddle/fluid/tests/unittests/test_pixel_unshuffle.py index 5ab545222d8ee6a4d608da28c2643bdca64127c2..8f4de9314d3c6f3a69209169ea5bea4f523bc986 100644 --- a/python/paddle/fluid/tests/unittests/test_pixel_unshuffle.py +++ b/python/paddle/fluid/tests/unittests/test_pixel_unshuffle.py @@ -27,23 +27,41 @@ def pixel_unshuffle_np(x, down_factor, data_format="NCHW"): if data_format == "NCHW": n, c, h, w = x.shape - new_shape = (n, c, h // down_factor, down_factor, w // down_factor, - down_factor) + new_shape = ( + n, + c, + h // down_factor, + down_factor, + w // down_factor, + down_factor, + ) npresult = np.reshape(x, new_shape) npresult = npresult.transpose(0, 1, 3, 5, 2, 4) oshape = [ - n, c * down_factor * down_factor, h // down_factor, w // down_factor + n, + c * down_factor * down_factor, + h // down_factor, + w // down_factor, ] npresult = np.reshape(npresult, oshape) return npresult else: n, h, w, c = x.shape - new_shape = (n, h // down_factor, down_factor, w // down_factor, - down_factor, c) + new_shape = ( + n, + h // down_factor, + down_factor, + w // down_factor, + down_factor, + c, + ) npresult = np.reshape(x, new_shape) npresult = npresult.transpose(0, 1, 3, 5, 2, 4) oshape = [ - n, h // down_factor, w // down_factor, c * down_factor * down_factor + n, + h // down_factor, + w // down_factor, + c * down_factor * down_factor, ] npresult = np.reshape(npresult, oshape) return npresult @@ -73,7 +91,7 @@ class TestPixelUnshuffleOp(OpTest): self.outputs = {"Out": npresult} self.attrs = { "downscale_factor": down_factor, - "data_format": self.format + "data_format": self.format, } def init_data_format(self): @@ -115,30 +133,35 @@ class TestPixelUnshuffleAPI(unittest.TestCase): def test_static_graph_functional(self): '''test_static_graph_functional''' - for use_cuda in ([False, True] - if core.is_compiled_with_cuda() else [False]): + for use_cuda in ( + [False, True] if core.is_compiled_with_cuda() else [False] + ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x_1 = paddle.fluid.data(name="x", - shape=[2, 1, 12, 12], - dtype="float64") - x_2 = paddle.fluid.data(name="x2", - shape=[2, 12, 12, 1], - dtype="float64") + x_1 = paddle.fluid.data( + name="x", shape=[2, 1, 12, 12], dtype="float64" + ) + x_2 = paddle.fluid.data( + name="x2", shape=[2, 12, 12, 1], dtype="float64" + ) out_1 = F.pixel_unshuffle(x_1, 3) out_2 = F.pixel_unshuffle(x_2, 3, "NHWC") exe = paddle.static.Executor(place=place) - res_1 = exe.run(fluid.default_main_program(), - feed={"x": self.x_1_np}, - fetch_list=out_1, - use_prune=True) - - res_2 = exe.run(fluid.default_main_program(), - feed={"x2": self.x_2_np}, - fetch_list=out_2, - use_prune=True) + res_1 = exe.run( + fluid.default_main_program(), + feed={"x": self.x_1_np}, + fetch_list=out_1, + use_prune=True, + ) + + res_2 = exe.run( + fluid.default_main_program(), + feed={"x2": self.x_2_np}, + fetch_list=out_2, + use_prune=True, + ) assert np.allclose(res_1, self.out_1_np) assert np.allclose(res_2, self.out_2_np) @@ -147,17 +170,18 @@ class TestPixelUnshuffleAPI(unittest.TestCase): def test_static_graph_layer(self): '''test_static_graph_layer''' - for use_cuda in ([False, True] - if core.is_compiled_with_cuda() else [False]): + for use_cuda in ( + [False, True] if core.is_compiled_with_cuda() else [False] + ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x_1 = paddle.fluid.data(name="x", - shape=[2, 1, 12, 12], - dtype="float64") - x_2 = paddle.fluid.data(name="x2", - shape=[2, 12, 12, 1], - dtype="float64") + x_1 = paddle.fluid.data( + name="x", shape=[2, 1, 12, 12], dtype="float64" + ) + x_2 = paddle.fluid.data( + name="x2", shape=[2, 12, 12, 1], dtype="float64" + ) # init instance ps_1 = paddle.nn.PixelUnshuffle(3) ps_2 = paddle.nn.PixelUnshuffle(3, "NHWC") @@ -167,15 +191,19 @@ class TestPixelUnshuffleAPI(unittest.TestCase): out_2_np = pixel_unshuffle_np(self.x_2_np, 3, "NHWC") exe = paddle.static.Executor(place=place) - res_1 = exe.run(fluid.default_main_program(), - feed={"x": self.x_1_np}, - fetch_list=out_1, - use_prune=True) - - res_2 = exe.run(fluid.default_main_program(), - feed={"x2": self.x_2_np}, - fetch_list=out_2, - use_prune=True) + res_1 = exe.run( + fluid.default_main_program(), + feed={"x": self.x_1_np}, + fetch_list=out_1, + use_prune=True, + ) + + res_2 = exe.run( + fluid.default_main_program(), + feed={"x2": self.x_2_np}, + fetch_list=out_2, + use_prune=True, + ) assert np.allclose(res_1, out_1_np) assert np.allclose(res_2, out_2_np) @@ -194,23 +222,26 @@ class TestPixelUnshuffleAPI(unittest.TestCase): npresult = pixel_unshuffle_np(x, down_factor, data_format) - for use_cuda in ([False, True] - if core.is_compiled_with_cuda() else [False]): + for use_cuda in ( + [False, True] if core.is_compiled_with_cuda() else [False] + ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.disable_static(place=place) - pixel_unshuffle = paddle.nn.PixelUnshuffle(down_factor, - data_format=data_format) + pixel_unshuffle = paddle.nn.PixelUnshuffle( + down_factor, data_format=data_format + ) result = pixel_unshuffle(paddle.to_tensor(x)) np.testing.assert_allclose(result.numpy(), npresult, rtol=1e-05) - result_functional = F.pixel_unshuffle(paddle.to_tensor(x), 3, - data_format) - np.testing.assert_allclose(result_functional.numpy(), - npresult, - rtol=1e-05) + result_functional = F.pixel_unshuffle( + paddle.to_tensor(x), 3, data_format + ) + np.testing.assert_allclose( + result_functional.numpy(), npresult, rtol=1e-05 + ) pixel_unshuffle_str = 'downscale_factor={}'.format(down_factor) if data_format != 'NCHW': @@ -258,8 +289,9 @@ class TestPixelUnshuffleError(unittest.TestCase): def error_data_format(): with paddle.fluid.dygraph.guard(): x = np.random.random([2, 1, 12, 12]).astype("float64") - pixel_unshuffle = F.pixel_unshuffle(paddle.to_tensor(x), 3, - "WOW") + pixel_unshuffle = F.pixel_unshuffle( + paddle.to_tensor(x), 3, "WOW" + ) self.assertRaises(ValueError, error_data_format) diff --git a/python/paddle/fluid/tests/unittests/test_poisson_op.py b/python/paddle/fluid/tests/unittests/test_poisson_op.py index d6f878c94738be4219adae57d534f9fdf148891f..4e4a20c77cb5fc661e88125436ec551890ff7cf6 100644 --- a/python/paddle/fluid/tests/unittests/test_poisson_op.py +++ b/python/paddle/fluid/tests/unittests/test_poisson_op.py @@ -38,7 +38,6 @@ def output_hist(out, lam, a, b): class TestPoissonOp1(OpTest): - def setUp(self): self.op_type = "poisson" self.config() @@ -67,11 +66,11 @@ class TestPoissonOp1(OpTest): user_defined_grads=[np.zeros([2048, 1024], dtype=self.dtype)], user_defined_grad_outputs=[ np.random.rand(2048, 1024).astype(self.dtype) - ]) + ], + ) class TestPoissonOp2(TestPoissonOp1): - def config(self): self.lam = 5 self.a = 1 @@ -80,18 +79,20 @@ class TestPoissonOp2(TestPoissonOp1): class TestPoissonAPI(unittest.TestCase): - def test_static(self): - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): x_np = np.random.rand(10, 10) x = paddle.static.data(name="x", shape=[10, 10], dtype='float64') y = paddle.poisson(x) exe = paddle.static.Executor() - y_np = exe.run(paddle.static.default_main_program(), - feed={"x": x_np}, - fetch_list=[y]) + y_np = exe.run( + paddle.static.default_main_program(), + feed={"x": x_np}, + fetch_list=[y], + ) self.assertTrue(np.min(y_np) >= 0) def test_dygraph(self): @@ -117,70 +118,250 @@ class TestPoissonAPI(unittest.TestCase): paddle.disable_static() paddle.set_device('gpu') paddle.seed(2021) - x = paddle.full([32, 3, 1024, 768], 10., dtype="float32") + x = paddle.full([32, 3, 1024, 768], 10.0, dtype="float32") y = paddle.poisson(x) y_np = y.numpy() expect = [ - 13., 13., 11., 8., 12., 6., 9., 15., 16., 6., 13., 12., 9., 15., - 17., 8., 11., 16., 11., 10. + 13.0, + 13.0, + 11.0, + 8.0, + 12.0, + 6.0, + 9.0, + 15.0, + 16.0, + 6.0, + 13.0, + 12.0, + 9.0, + 15.0, + 17.0, + 8.0, + 11.0, + 16.0, + 11.0, + 10.0, ] np.testing.assert_array_equal(y_np[0, 0, 0, 0:20], expect) expect = [ - 15., 7., 12., 8., 14., 10., 10., 11., 11., 11., 21., 6., 9., 13., - 13., 11., 6., 9., 12., 12. + 15.0, + 7.0, + 12.0, + 8.0, + 14.0, + 10.0, + 10.0, + 11.0, + 11.0, + 11.0, + 21.0, + 6.0, + 9.0, + 13.0, + 13.0, + 11.0, + 6.0, + 9.0, + 12.0, + 12.0, ] np.testing.assert_array_equal(y_np[8, 1, 300, 200:220], expect) expect = [ - 10., 15., 9., 6., 4., 13., 10., 10., 13., 12., 9., 7., 10., 14., 7., - 10., 8., 5., 10., 14. + 10.0, + 15.0, + 9.0, + 6.0, + 4.0, + 13.0, + 10.0, + 10.0, + 13.0, + 12.0, + 9.0, + 7.0, + 10.0, + 14.0, + 7.0, + 10.0, + 8.0, + 5.0, + 10.0, + 14.0, ] np.testing.assert_array_equal(y_np[16, 1, 600, 400:420], expect) expect = [ - 10., 9., 14., 12., 8., 9., 7., 8., 11., 10., 13., 8., 12., 9., 7., - 8., 11., 11., 12., 5. + 10.0, + 9.0, + 14.0, + 12.0, + 8.0, + 9.0, + 7.0, + 8.0, + 11.0, + 10.0, + 13.0, + 8.0, + 12.0, + 9.0, + 7.0, + 8.0, + 11.0, + 11.0, + 12.0, + 5.0, ] np.testing.assert_array_equal(y_np[24, 2, 900, 600:620], expect) expect = [ - 15., 5., 11., 13., 12., 12., 13., 16., 9., 9., 7., 9., 13., 11., - 15., 6., 11., 9., 10., 10. + 15.0, + 5.0, + 11.0, + 13.0, + 12.0, + 12.0, + 13.0, + 16.0, + 9.0, + 9.0, + 7.0, + 9.0, + 13.0, + 11.0, + 15.0, + 6.0, + 11.0, + 9.0, + 10.0, + 10.0, ] np.testing.assert_array_equal(y_np[31, 2, 1023, 748:768], expect) - x = paddle.full([16, 1024, 1024], 5., dtype="float32") + x = paddle.full([16, 1024, 1024], 5.0, dtype="float32") y = paddle.poisson(x) y_np = y.numpy() expect = [ - 4., 5., 2., 9., 8., 7., 4., 7., 4., 7., 6., 3., 10., 7., 5., 7., 2., - 5., 5., 6. + 4.0, + 5.0, + 2.0, + 9.0, + 8.0, + 7.0, + 4.0, + 7.0, + 4.0, + 7.0, + 6.0, + 3.0, + 10.0, + 7.0, + 5.0, + 7.0, + 2.0, + 5.0, + 5.0, + 6.0, ] np.testing.assert_array_equal(y_np[0, 0, 100:120], expect) expect = [ - 1., 4., 8., 11., 6., 5., 4., 4., 7., 4., 4., 7., 11., 6., 5., 3., - 4., 6., 3., 3. + 1.0, + 4.0, + 8.0, + 11.0, + 6.0, + 5.0, + 4.0, + 4.0, + 7.0, + 4.0, + 4.0, + 7.0, + 11.0, + 6.0, + 5.0, + 3.0, + 4.0, + 6.0, + 3.0, + 3.0, ] np.testing.assert_array_equal(y_np[4, 300, 300:320], expect) expect = [ - 7., 5., 4., 6., 8., 5., 6., 7., 7., 7., 3., 10., 5., 10., 4., 5., - 8., 7., 5., 7. + 7.0, + 5.0, + 4.0, + 6.0, + 8.0, + 5.0, + 6.0, + 7.0, + 7.0, + 7.0, + 3.0, + 10.0, + 5.0, + 10.0, + 4.0, + 5.0, + 8.0, + 7.0, + 5.0, + 7.0, ] np.testing.assert_array_equal(y_np[8, 600, 600:620], expect) expect = [ - 8., 6., 7., 4., 3., 0., 4., 6., 6., 4., 3., 10., 5., 1., 3., 8., 8., - 2., 1., 4. + 8.0, + 6.0, + 7.0, + 4.0, + 3.0, + 0.0, + 4.0, + 6.0, + 6.0, + 4.0, + 3.0, + 10.0, + 5.0, + 1.0, + 3.0, + 8.0, + 8.0, + 2.0, + 1.0, + 4.0, ] np.testing.assert_array_equal(y_np[12, 900, 900:920], expect) expect = [ - 2., 1., 14., 3., 6., 5., 2., 2., 6., 5., 7., 4., 8., 4., 8., 4., 5., - 7., 1., 7. + 2.0, + 1.0, + 14.0, + 3.0, + 6.0, + 5.0, + 2.0, + 2.0, + 6.0, + 5.0, + 7.0, + 4.0, + 8.0, + 4.0, + 8.0, + 4.0, + 5.0, + 7.0, + 1.0, + 7.0, ] np.testing.assert_array_equal(y_np[15, 1023, 1000:1020], expect) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_polygon_box_transform.py b/python/paddle/fluid/tests/unittests/test_polygon_box_transform.py index 84f61f7bebc4ec19f21766681a98ab7d5fefc52f..b5deecc47007ddc04945f6638bd0e23316811e1b 100644 --- a/python/paddle/fluid/tests/unittests/test_polygon_box_transform.py +++ b/python/paddle/fluid/tests/unittests/test_polygon_box_transform.py @@ -24,23 +24,27 @@ def PolygonBoxRestore(input): geo_channels = shape[1] h = shape[2] w = shape[3] - h_indexes = np.array(list(range(h)) * w).reshape( - [w, h]).transpose()[np.newaxis, :] # [1, h, w] - w_indexes = np.array(list(range(w)) * h).reshape( - [h, w])[np.newaxis, :] # [1, h, w] - indexes = np.concatenate( - (w_indexes, h_indexes))[np.newaxis, :] # [1, 2, h, w] + h_indexes = ( + np.array(list(range(h)) * w).reshape([w, h]).transpose()[np.newaxis, :] + ) # [1, h, w] + w_indexes = np.array(list(range(w)) * h).reshape([h, w])[ + np.newaxis, : + ] # [1, h, w] + indexes = np.concatenate((w_indexes, h_indexes))[ + np.newaxis, : + ] # [1, 2, h, w] + indexes = indexes.repeat([geo_channels / 2], axis=0)[ + np.newaxis, : + ] # [1, geo_channels/2, 2, h, w] indexes = indexes.repeat( - [geo_channels / 2], - axis=0)[np.newaxis, :] # [1, geo_channels/2, 2, h, w] - indexes = indexes.repeat([batch_size], - axis=0) # [batch_size, geo_channels/2, 2, h, w] - return indexes.reshape( - input.shape) * 4 - input # [batch_size, geo_channels, h, w] + [batch_size], axis=0 + ) # [batch_size, geo_channels/2, 2, h, w] + return ( + indexes.reshape(input.shape) * 4 - input + ) # [batch_size, geo_channels, h, w] class TestPolygonBoxRestoreOp(OpTest): - def config(self): self.input_shape = (1, 8, 2, 2) @@ -57,25 +61,21 @@ class TestPolygonBoxRestoreOp(OpTest): class TestCase1(TestPolygonBoxRestoreOp): - def config(self): self.input_shape = (2, 10, 3, 2) class TestCase2(TestPolygonBoxRestoreOp): - def config(self): self.input_shape = (3, 12, 4, 5) class TestPolygonBoxInvalidInput(unittest.TestCase): - def test_error(self): - def test_invalid_input(): - input = fluid.data(name='input', - shape=[None, 3, 32, 32], - dtype='int64') + input = fluid.data( + name='input', shape=[None, 3, 32, 32], dtype='int64' + ) out = fluid.layers.polygon_box_transform(input) self.assertRaises(TypeError, test_invalid_input) diff --git a/python/paddle/fluid/tests/unittests/test_pool1d_api.py b/python/paddle/fluid/tests/unittests/test_pool1d_api.py index 19d20817342168b829a4aa3bdb19462ff3cdc34e..a3aa60e0ebd10a1ccea21d53606bb25d027dde60 100644 --- a/python/paddle/fluid/tests/unittests/test_pool1d_api.py +++ b/python/paddle/fluid/tests/unittests/test_pool1d_api.py @@ -29,24 +29,28 @@ def adaptive_end_index(index, input_size, output_size): return int(np.ceil((index + 1) * input_size / output_size)) -def max_pool1D_forward_naive(x, - ksize, - strides, - paddings, - global_pool=0, - ceil_mode=False, - exclusive=False, - adaptive=False, - data_type=np.float64): +def max_pool1D_forward_naive( + x, + ksize, + strides, + paddings, + global_pool=0, + ceil_mode=False, + exclusive=False, + adaptive=False, + data_type=np.float64, +): N, C, L = x.shape if global_pool == 1: ksize = [L] if adaptive: L_out = ksize[0] else: - L_out = (L - ksize[0] + 2 * paddings[0] + strides[0] - - 1) // strides[0] + 1 if ceil_mode else ( - L - ksize[0] + 2 * paddings[0]) // strides[0] + 1 + L_out = ( + (L - ksize[0] + 2 * paddings[0] + strides[0] - 1) // strides[0] + 1 + if ceil_mode + else (L - ksize[0] + 2 * paddings[0]) // strides[0] + 1 + ) out = np.zeros((N, C, L_out)) for i in range(L_out): @@ -62,24 +66,28 @@ def max_pool1D_forward_naive(x, return out -def avg_pool1D_forward_naive(x, - ksize, - strides, - paddings, - global_pool=0, - ceil_mode=False, - exclusive=False, - adaptive=False, - data_type=np.float64): +def avg_pool1D_forward_naive( + x, + ksize, + strides, + paddings, + global_pool=0, + ceil_mode=False, + exclusive=False, + adaptive=False, + data_type=np.float64, +): N, C, L = x.shape if global_pool == 1: ksize = [L] if adaptive: L_out = ksize[0] else: - L_out = (L - ksize[0] + 2 * paddings[0] + strides[0] - - 1) // strides[0] + 1 if ceil_mode else ( - L - ksize[0] + 2 * paddings[0]) // strides[0] + 1 + L_out = ( + (L - ksize[0] + 2 * paddings[0] + strides[0] - 1) // strides[0] + 1 + if ceil_mode + else (L - ksize[0] + 2 * paddings[0]) // strides[0] + 1 + ) out = np.zeros((N, C, L_out)) for i in range(L_out): @@ -91,19 +99,21 @@ def avg_pool1D_forward_naive(x, r_end = np.min((i * strides[0] + ksize[0] - paddings[0], L)) x_masked = x[:, :, r_start:r_end] - field_size = (r_end - r_start) \ - if (exclusive or adaptive) else (ksize[0]) + field_size = ( + (r_end - r_start) if (exclusive or adaptive) else (ksize[0]) + ) if data_type == np.int8 or data_type == np.uint8: - out[:, :, i] = (np.rint(np.sum(x_masked, axis=(2, 3)) / - field_size)).astype(data_type) + out[:, :, i] = ( + np.rint(np.sum(x_masked, axis=(2, 3)) / field_size) + ).astype(data_type) else: - out[:, :, - i] = (np.sum(x_masked, axis=(2)) / field_size).astype(data_type) + out[:, :, i] = (np.sum(x_masked, axis=(2)) / field_size).astype( + data_type + ) return out class TestPool1D_API(unittest.TestCase): - def setUp(self): np.random.seed(123) self.places = [fluid.CPUPlace()] @@ -116,16 +126,16 @@ class TestPool1D_API(unittest.TestCase): result = F.avg_pool1d(input, kernel_size=2, stride=2, padding=0) input_np = np.random.random([2, 3, 32]).astype("float32") - result_np = avg_pool1D_forward_naive(input_np, - ksize=[2], - strides=[2], - paddings=[0], - ceil_mode=False) + result_np = avg_pool1D_forward_naive( + input_np, ksize=[2], strides=[2], paddings=[0], ceil_mode=False + ) exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={"input": input_np}, - fetch_list=[result]) + fetches = exe.run( + fluid.default_main_program(), + feed={"input": input_np}, + fetch_list=[result], + ) np.testing.assert_allclose(fetches[0], result_np, rtol=1e-05) def check_avg_dygraph_results(self, place): @@ -134,16 +144,15 @@ class TestPool1D_API(unittest.TestCase): input = fluid.dygraph.to_variable(input_np) result = F.avg_pool1d(input, kernel_size=2, stride=2, padding=[0]) - result_np = avg_pool1D_forward_naive(input_np, - ksize=[2], - strides=[2], - paddings=[0]) + result_np = avg_pool1D_forward_naive( + input_np, ksize=[2], strides=[2], paddings=[0] + ) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) - avg_pool1d_dg = paddle.nn.layer.AvgPool1D(kernel_size=2, - stride=None, - padding=0) + avg_pool1d_dg = paddle.nn.layer.AvgPool1D( + kernel_size=2, stride=None, padding=0 + ) result = avg_pool1d_dg(input) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) @@ -151,24 +160,19 @@ class TestPool1D_API(unittest.TestCase): with fluid.dygraph.guard(place): input_np = np.random.random([2, 3, 32]).astype("float32") input = fluid.dygraph.to_variable(input_np) - result = F.avg_pool1d(input, - kernel_size=2, - stride=2, - padding=[1], - exclusive=True) - - result_np = avg_pool1D_forward_naive(input_np, - ksize=[2], - strides=[2], - paddings=[1], - exclusive=False) + result = F.avg_pool1d( + input, kernel_size=2, stride=2, padding=[1], exclusive=True + ) + + result_np = avg_pool1D_forward_naive( + input_np, ksize=[2], strides=[2], paddings=[1], exclusive=False + ) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) - avg_pool1d_dg = paddle.nn.AvgPool1D(kernel_size=2, - stride=None, - padding=1, - exclusive=True) + avg_pool1d_dg = paddle.nn.AvgPool1D( + kernel_size=2, stride=None, padding=1, exclusive=True + ) result = avg_pool1d_dg(input) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) @@ -179,15 +183,16 @@ class TestPool1D_API(unittest.TestCase): result = F.max_pool1d(input, kernel_size=2, stride=2, padding=[0]) input_np = np.random.random([2, 3, 32]).astype("float32") - result_np = max_pool1D_forward_naive(input_np, - ksize=[2], - strides=[2], - paddings=[0]) + result_np = max_pool1D_forward_naive( + input_np, ksize=[2], strides=[2], paddings=[0] + ) exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={"input": input_np}, - fetch_list=[result]) + fetches = exe.run( + fluid.default_main_program(), + feed={"input": input_np}, + fetch_list=[result], + ) np.testing.assert_allclose(fetches[0], result_np, rtol=1e-05) def check_max_dygraph_results(self, place): @@ -196,16 +201,15 @@ class TestPool1D_API(unittest.TestCase): input = fluid.dygraph.to_variable(input_np) result = F.max_pool1d(input, kernel_size=2, stride=2, padding=0) - result_np = max_pool1D_forward_naive(input_np, - ksize=[2], - strides=[2], - paddings=[0]) + result_np = max_pool1D_forward_naive( + input_np, ksize=[2], strides=[2], paddings=[0] + ) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) - max_pool1d_dg = paddle.nn.layer.MaxPool1D(kernel_size=2, - stride=None, - padding=0) + max_pool1d_dg = paddle.nn.layer.MaxPool1D( + kernel_size=2, stride=None, padding=0 + ) result = max_pool1d_dg(input) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) @@ -213,22 +217,19 @@ class TestPool1D_API(unittest.TestCase): with fluid.dygraph.guard(place): input_np = np.random.random([2, 3, 32]).astype("float32") input = fluid.dygraph.to_variable(input_np) - result, index = F.max_pool1d(input, - kernel_size=2, - stride=2, - padding=0, - return_mask=True) + result, index = F.max_pool1d( + input, kernel_size=2, stride=2, padding=0, return_mask=True + ) - result_np = max_pool1D_forward_naive(input_np, - ksize=[2], - strides=[2], - paddings=[0]) + result_np = max_pool1D_forward_naive( + input_np, ksize=[2], strides=[2], paddings=[0] + ) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) - max_pool1d_dg = paddle.nn.layer.MaxPool1D(kernel_size=2, - stride=None, - padding=0) + max_pool1d_dg = paddle.nn.layer.MaxPool1D( + kernel_size=2, stride=None, padding=0 + ) result = max_pool1d_dg(input) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) @@ -236,15 +237,13 @@ class TestPool1D_API(unittest.TestCase): with fluid.dygraph.guard(place): input_np = np.random.random([2, 3, 32]).astype("float32") input = fluid.dygraph.to_variable(input_np) - result = F.max_pool1d(input, - kernel_size=2, - stride=2, - padding="SAME") + result = F.max_pool1d( + input, kernel_size=2, stride=2, padding="SAME" + ) - result_np = max_pool1D_forward_naive(input_np, - ksize=[2], - strides=[2], - paddings=[0]) + result_np = max_pool1D_forward_naive( + input_np, ksize=[2], strides=[2], paddings=[0] + ) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) @@ -252,15 +251,13 @@ class TestPool1D_API(unittest.TestCase): with fluid.dygraph.guard(place): input_np = np.random.random([2, 3, 32]).astype("float32") input = fluid.dygraph.to_variable(input_np) - result = F.avg_pool1d(input, - kernel_size=2, - stride=2, - padding="SAME") + result = F.avg_pool1d( + input, kernel_size=2, stride=2, padding="SAME" + ) - result_np = avg_pool1D_forward_naive(input_np, - ksize=[2], - strides=[2], - paddings=[0]) + result_np = avg_pool1D_forward_naive( + input_np, ksize=[2], strides=[2], paddings=[0] + ) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) @@ -281,129 +278,145 @@ class TestPool1D_API(unittest.TestCase): class TestPool2DError_API(unittest.TestCase): - def test_error_api(self): - def run1(): with fluid.dygraph.guard(): - input_np = np.random.uniform(-1, 1, - [2, 3, 32]).astype(np.float32) + input_np = np.random.uniform(-1, 1, [2, 3, 32]).astype( + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) padding = [[2]] - res_pd = F.max_pool1d(input_pd, - kernel_size=2, - stride=2, - padding=padding) + res_pd = F.max_pool1d( + input_pd, kernel_size=2, stride=2, padding=padding + ) self.assertRaises(ValueError, run1) def run2(): with fluid.dygraph.guard(): - input_np = np.random.uniform(-1, 1, - [2, 3, 32, 32]).astype(np.float32) + input_np = np.random.uniform(-1, 1, [2, 3, 32, 32]).astype( + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) padding = [[2]] - res_pd = F.max_pool1d(input_pd, - kernel_size=2, - stride=2, - padding=padding) + res_pd = F.max_pool1d( + input_pd, kernel_size=2, stride=2, padding=padding + ) self.assertRaises(ValueError, run2) def run3(): with fluid.dygraph.guard(): - input_np = np.random.uniform(-1, 1, - [2, 3, 32]).astype(np.float32) + input_np = np.random.uniform(-1, 1, [2, 3, 32]).astype( + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) padding = "padding" - res_pd = F.max_pool1d(input_pd, - kernel_size=2, - stride=2, - padding=padding) + res_pd = F.max_pool1d( + input_pd, kernel_size=2, stride=2, padding=padding + ) self.assertRaises(ValueError, run3) def run4(): with fluid.dygraph.guard(): - input_np = np.random.uniform(-1, 1, - [2, 3, 32, 32]).astype(np.float32) + input_np = np.random.uniform(-1, 1, [2, 3, 32, 32]).astype( + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) padding = "VALID" - res_pd = F.max_pool1d(input_pd, - kernel_size=2, - stride=2, - padding=padding, - ceil_mode=True) + res_pd = F.max_pool1d( + input_pd, + kernel_size=2, + stride=2, + padding=padding, + ceil_mode=True, + ) self.assertRaises(ValueError, run4) def run5(): with fluid.dygraph.guard(): - input_np = np.random.uniform(-1, 1, - [2, 3, 32]).astype(np.float32) + input_np = np.random.uniform(-1, 1, [2, 3, 32]).astype( + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) padding = "VALID" - res_pd = F.max_pool1d(input_pd, - kernel_size=2, - stride=2, - padding=padding, - ceil_mode=True) + res_pd = F.max_pool1d( + input_pd, + kernel_size=2, + stride=2, + padding=padding, + ceil_mode=True, + ) self.assertRaises(ValueError, run5) def run6(): with fluid.dygraph.guard(): - input_np = np.random.uniform(-1, 1, - [2, 3, 32]).astype(np.float32) + input_np = np.random.uniform(-1, 1, [2, 3, 32]).astype( + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) padding = "VALID" - res_pd = F.avg_pool1d(input_pd, - kernel_size=2, - stride=2, - padding=padding, - ceil_mode=True) + res_pd = F.avg_pool1d( + input_pd, + kernel_size=2, + stride=2, + padding=padding, + ceil_mode=True, + ) self.assertRaises(ValueError, run6) def run7(): with fluid.dygraph.guard(): - input_np = np.random.uniform(-1, 1, - [2, 3, 32]).astype(np.float32) + input_np = np.random.uniform(-1, 1, [2, 3, 32]).astype( + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) padding = "paddle" - res_pd = F.avg_pool1d(input_pd, - kernel_size=2, - stride=2, - padding=padding, - ceil_mode=True) + res_pd = F.avg_pool1d( + input_pd, + kernel_size=2, + stride=2, + padding=padding, + ceil_mode=True, + ) self.assertRaises(ValueError, run7) def run_kernel_out_of_range(): with fluid.dygraph.guard(): - input_np = np.random.uniform(-1, 1, - [2, 3, 32]).astype(np.float32) + input_np = np.random.uniform(-1, 1, [2, 3, 32]).astype( + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) padding = 0 - res_pd = F.avg_pool1d(input_pd, - kernel_size=-1, - stride=2, - padding=padding, - ceil_mode=True) + res_pd = F.avg_pool1d( + input_pd, + kernel_size=-1, + stride=2, + padding=padding, + ceil_mode=True, + ) self.assertRaises(ValueError, run_kernel_out_of_range) def run_stride_out_of_range(): with fluid.dygraph.guard(): - input_np = np.random.uniform(-1, 1, - [2, 3, 32]).astype(np.float32) + input_np = np.random.uniform(-1, 1, [2, 3, 32]).astype( + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) padding = 0 - res_pd = F.avg_pool1d(input_pd, - kernel_size=2, - stride=0, - padding=padding, - ceil_mode=True) + res_pd = F.avg_pool1d( + input_pd, + kernel_size=2, + stride=0, + padding=padding, + ceil_mode=True, + ) self.assertRaises(ValueError, run_stride_out_of_range) diff --git a/python/paddle/fluid/tests/unittests/test_pool2d_api.py b/python/paddle/fluid/tests/unittests/test_pool2d_api.py index d20844d255876652045dbc7049ac772191658cb9..7e100d4c90558f153332ec4a04e7bc8ba2b39226 100644 --- a/python/paddle/fluid/tests/unittests/test_pool2d_api.py +++ b/python/paddle/fluid/tests/unittests/test_pool2d_api.py @@ -19,11 +19,14 @@ import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.framework import _test_eager_guard from paddle.nn.functional import avg_pool2d, max_pool2d -from test_pool2d_op import avg_pool2D_forward_naive, max_pool2D_forward_naive, pool2D_forward_naive +from test_pool2d_op import ( + avg_pool2D_forward_naive, + max_pool2D_forward_naive, + pool2D_forward_naive, +) class TestPool2D_API(unittest.TestCase): - def setUp(self): np.random.seed(123) self.places = [fluid.CPUPlace()] @@ -32,22 +35,26 @@ class TestPool2D_API(unittest.TestCase): def check_avg_static_results(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", - shape=[2, 3, 32, 32], - dtype="float32") + input = fluid.data( + name="input", shape=[2, 3, 32, 32], dtype="float32" + ) result = avg_pool2d(input, kernel_size=2, stride=2, padding=0) input_np = np.random.random([2, 3, 32, 32]).astype("float32") - result_np = pool2D_forward_naive(input_np, - ksize=[2, 2], - strides=[2, 2], - paddings=[0, 0], - pool_type='avg') + result_np = pool2D_forward_naive( + input_np, + ksize=[2, 2], + strides=[2, 2], + paddings=[0, 0], + pool_type='avg', + ) exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={"input": input_np}, - fetch_list=[result]) + fetches = exe.run( + fluid.default_main_program(), + feed={"input": input_np}, + fetch_list=[result], + ) np.testing.assert_allclose(fetches[0], result_np, rtol=1e-05) def check_avg_dygraph_results(self, place): @@ -56,16 +63,18 @@ class TestPool2D_API(unittest.TestCase): input = fluid.dygraph.to_variable(input_np) result = avg_pool2d(input, kernel_size=2, stride=2, padding=0) - result_np = pool2D_forward_naive(input_np, - ksize=[2, 2], - strides=[2, 2], - paddings=[0, 0], - pool_type='avg') + result_np = pool2D_forward_naive( + input_np, + ksize=[2, 2], + strides=[2, 2], + paddings=[0, 0], + pool_type='avg', + ) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) - avg_pool2d_dg = paddle.nn.layer.AvgPool2D(kernel_size=2, - stride=2, - padding=0) + avg_pool2d_dg = paddle.nn.layer.AvgPool2D( + kernel_size=2, stride=2, padding=0 + ) result = avg_pool2d_dg(input) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) @@ -73,24 +82,23 @@ class TestPool2D_API(unittest.TestCase): with fluid.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32]).astype("float32") input = fluid.dygraph.to_variable(input_np) - result = avg_pool2d(input, - kernel_size=2, - stride=2, - padding=1, - ceil_mode=False) - - result_np = avg_pool2D_forward_naive(input_np, - ksize=[2, 2], - strides=[2, 2], - paddings=[1, 1], - ceil_mode=False, - exclusive=False) + result = avg_pool2d( + input, kernel_size=2, stride=2, padding=1, ceil_mode=False + ) + + result_np = avg_pool2D_forward_naive( + input_np, + ksize=[2, 2], + strides=[2, 2], + paddings=[1, 1], + ceil_mode=False, + exclusive=False, + ) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) - avg_pool2d_dg = paddle.nn.layer.AvgPool2D(kernel_size=2, - stride=2, - padding=1, - ceil_mode=False) + avg_pool2d_dg = paddle.nn.layer.AvgPool2D( + kernel_size=2, stride=2, padding=1, ceil_mode=False + ) result = avg_pool2d_dg(input) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) @@ -98,66 +106,69 @@ class TestPool2D_API(unittest.TestCase): with fluid.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32]).astype("float32") input = fluid.dygraph.to_variable(input_np) - result = avg_pool2d(input, - kernel_size=2, - stride=2, - padding=0, - ceil_mode=True) - - result_np = avg_pool2D_forward_naive(input_np, - ksize=[2, 2], - strides=[2, 2], - paddings=[0, 0], - ceil_mode=True) + result = avg_pool2d( + input, kernel_size=2, stride=2, padding=0, ceil_mode=True + ) + + result_np = avg_pool2D_forward_naive( + input_np, + ksize=[2, 2], + strides=[2, 2], + paddings=[0, 0], + ceil_mode=True, + ) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) - avg_pool2d_dg = paddle.nn.layer.AvgPool2D(kernel_size=2, - stride=2, - padding=0, - ceil_mode=True) + avg_pool2d_dg = paddle.nn.layer.AvgPool2D( + kernel_size=2, stride=2, padding=0, ceil_mode=True + ) result = avg_pool2d_dg(input) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_max_static_results(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", - shape=[2, 3, 32, 32], - dtype="float32") + input = fluid.data( + name="input", shape=[2, 3, 32, 32], dtype="float32" + ) result = max_pool2d(input, kernel_size=2, stride=2, padding=0) input_np = np.random.random([2, 3, 32, 32]).astype("float32") - result_np = pool2D_forward_naive(input_np, - ksize=[2, 2], - strides=[2, 2], - paddings=[0, 0], - pool_type='max') + result_np = pool2D_forward_naive( + input_np, + ksize=[2, 2], + strides=[2, 2], + paddings=[0, 0], + pool_type='max', + ) exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={"input": input_np}, - fetch_list=[result]) + fetches = exe.run( + fluid.default_main_program(), + feed={"input": input_np}, + fetch_list=[result], + ) np.testing.assert_allclose(fetches[0], result_np, rtol=1e-05) def check_max_dygraph_results(self, place): with fluid.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32]).astype("float32") input = fluid.dygraph.to_variable(input_np) - result = max_pool2d(input, - kernel_size=2, - stride=2, - padding=0, - return_mask=False) - - result_np = pool2D_forward_naive(input_np, - ksize=[2, 2], - strides=[2, 2], - paddings=[0, 0], - pool_type='max') + result = max_pool2d( + input, kernel_size=2, stride=2, padding=0, return_mask=False + ) + + result_np = pool2D_forward_naive( + input_np, + ksize=[2, 2], + strides=[2, 2], + paddings=[0, 0], + pool_type='max', + ) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) - max_pool2d_dg = paddle.nn.layer.MaxPool2D(kernel_size=2, - stride=2, - padding=0) + max_pool2d_dg = paddle.nn.layer.MaxPool2D( + kernel_size=2, stride=2, padding=0 + ) result = max_pool2d_dg(input) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) @@ -165,46 +176,51 @@ class TestPool2D_API(unittest.TestCase): with fluid.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32]).astype("float32") input = fluid.dygraph.to_variable( - np.transpose(input_np, [0, 2, 3, 1])) - result = max_pool2d(input, - kernel_size=2, - stride=2, - padding=0, - return_mask=False, - data_format="NHWC") - - result_np = pool2D_forward_naive(input_np, - ksize=[2, 2], - strides=[2, 2], - paddings=[0, 0], - pool_type='max') - np.testing.assert_allclose(np.transpose(result.numpy(), - [0, 3, 1, 2]), - result_np, - rtol=1e-05) + np.transpose(input_np, [0, 2, 3, 1]) + ) + result = max_pool2d( + input, + kernel_size=2, + stride=2, + padding=0, + return_mask=False, + data_format="NHWC", + ) + + result_np = pool2D_forward_naive( + input_np, + ksize=[2, 2], + strides=[2, 2], + paddings=[0, 0], + pool_type='max', + ) + np.testing.assert_allclose( + np.transpose(result.numpy(), [0, 3, 1, 2]), + result_np, + rtol=1e-05, + ) def check_max_dygraph_padding_results(self, place): with fluid.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32]).astype("float32") input = fluid.dygraph.to_variable(input_np) - result = max_pool2d(input, - kernel_size=2, - stride=2, - padding=1, - ceil_mode=False) - - result_np = max_pool2D_forward_naive(input_np, - ksize=[2, 2], - strides=[2, 2], - paddings=[1, 1], - ceil_mode=False, - exclusive=False) + result = max_pool2d( + input, kernel_size=2, stride=2, padding=1, ceil_mode=False + ) + + result_np = max_pool2D_forward_naive( + input_np, + ksize=[2, 2], + strides=[2, 2], + paddings=[1, 1], + ceil_mode=False, + exclusive=False, + ) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) - max_pool2d_dg = paddle.nn.layer.MaxPool2D(kernel_size=2, - stride=2, - padding=1, - ceil_mode=False) + max_pool2d_dg = paddle.nn.layer.MaxPool2D( + kernel_size=2, stride=2, padding=1, ceil_mode=False + ) result = max_pool2d_dg(input) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) @@ -212,23 +228,22 @@ class TestPool2D_API(unittest.TestCase): with fluid.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32]).astype("float32") input = fluid.dygraph.to_variable(input_np) - result = max_pool2d(input, - kernel_size=2, - stride=2, - padding=0, - ceil_mode=True) - - result_np = max_pool2D_forward_naive(input_np, - ksize=[2, 2], - strides=[2, 2], - paddings=[0, 0], - ceil_mode=True) + result = max_pool2d( + input, kernel_size=2, stride=2, padding=0, ceil_mode=True + ) + + result_np = max_pool2D_forward_naive( + input_np, + ksize=[2, 2], + strides=[2, 2], + paddings=[0, 0], + ceil_mode=True, + ) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) - max_pool2d_dg = paddle.nn.layer.MaxPool2D(kernel_size=2, - stride=2, - padding=0, - ceil_mode=True) + max_pool2d_dg = paddle.nn.layer.MaxPool2D( + kernel_size=2, stride=2, padding=0, ceil_mode=True + ) result = max_pool2d_dg(input) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) @@ -236,23 +251,27 @@ class TestPool2D_API(unittest.TestCase): with fluid.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32]).astype("float32") input = fluid.dygraph.to_variable(input_np) - result, indices = max_pool2d(input, - kernel_size=2, - stride=None, - padding="SAME", - return_mask=True) - - result_np = pool2D_forward_naive(input_np, - ksize=[2, 2], - strides=[2, 2], - paddings=[0, 0], - pool_type='max', - padding_algorithm="SAME") + result, indices = max_pool2d( + input, + kernel_size=2, + stride=None, + padding="SAME", + return_mask=True, + ) + + result_np = pool2D_forward_naive( + input_np, + ksize=[2, 2], + strides=[2, 2], + paddings=[0, 0], + pool_type='max', + padding_algorithm="SAME", + ) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) - max_pool2d_dg = paddle.nn.layer.MaxPool2D(kernel_size=2, - stride=2, - padding=0) + max_pool2d_dg = paddle.nn.layer.MaxPool2D( + kernel_size=2, stride=2, padding=0 + ) result = max_pool2d_dg(input) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) @@ -260,22 +279,23 @@ class TestPool2D_API(unittest.TestCase): with fluid.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32]).astype("float32") input = fluid.dygraph.to_variable(input_np) - result = avg_pool2d(input, - kernel_size=2, - stride=None, - padding="SAME") - - result_np = pool2D_forward_naive(input_np, - ksize=[2, 2], - strides=[2, 2], - paddings=[0, 0], - pool_type='avg', - padding_algorithm="SAME") + result = avg_pool2d( + input, kernel_size=2, stride=None, padding="SAME" + ) + + result_np = pool2D_forward_naive( + input_np, + ksize=[2, 2], + strides=[2, 2], + paddings=[0, 0], + pool_type='avg', + padding_algorithm="SAME", + ) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) - avg_pool2d_dg = paddle.nn.layer.AvgPool2D(kernel_size=2, - stride=2, - padding=0) + avg_pool2d_dg = paddle.nn.layer.AvgPool2D( + kernel_size=2, stride=2, padding=0 + ) result = avg_pool2d_dg(input) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) @@ -284,22 +304,26 @@ class TestPool2D_API(unittest.TestCase): input_np = np.random.random([2, 3, 32, 32]).astype("float32") input = fluid.dygraph.to_variable(input_np) padding = [[0, 0], [0, 0], [0, 0], [0, 0]] - result = max_pool2d(input, - kernel_size=2, - stride=2, - padding=padding, - return_mask=False) - - result_np = pool2D_forward_naive(input_np, - ksize=[2, 2], - strides=[2, 2], - paddings=[0, 0], - pool_type='max') + result = max_pool2d( + input, + kernel_size=2, + stride=2, + padding=padding, + return_mask=False, + ) + + result_np = pool2D_forward_naive( + input_np, + ksize=[2, 2], + strides=[2, 2], + paddings=[0, 0], + pool_type='max', + ) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) - max_pool2d_dg = paddle.nn.layer.MaxPool2D(kernel_size=2, - stride=2, - padding=0) + max_pool2d_dg = paddle.nn.layer.MaxPool2D( + kernel_size=2, stride=2, padding=0 + ) result = max_pool2d_dg(input) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) @@ -308,22 +332,26 @@ class TestPool2D_API(unittest.TestCase): input_np = np.random.random([2, 3, 32, 32]).astype("float32") input = fluid.dygraph.to_variable(input_np) padding = [[0, 0], [0, 0], [0, 0], [0, 0]] - result = avg_pool2d(input, - kernel_size=2, - stride=2, - padding=padding, - divisor_override=4) - - result_np = pool2D_forward_naive(input_np, - ksize=[2, 2], - strides=[2, 2], - paddings=[0, 0], - pool_type='avg') + result = avg_pool2d( + input, + kernel_size=2, + stride=2, + padding=padding, + divisor_override=4, + ) + + result_np = pool2D_forward_naive( + input_np, + ksize=[2, 2], + strides=[2, 2], + paddings=[0, 0], + pool_type='avg', + ) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) - avg_pool2d_dg = paddle.nn.layer.AvgPool2D(kernel_size=2, - stride=2, - padding=0) + avg_pool2d_dg = paddle.nn.layer.AvgPool2D( + kernel_size=2, stride=2, padding=0 + ) result = avg_pool2d_dg(input) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) @@ -348,193 +376,227 @@ class TestPool2D_API(unittest.TestCase): class TestPool2DError_API(unittest.TestCase): - def test_error_api(self): - def run1(): with fluid.dygraph.guard(): - input_np = np.random.uniform(-1, 1, - [2, 3, 32, 32]).astype(np.float32) + input_np = np.random.uniform(-1, 1, [2, 3, 32, 32]).astype( + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) padding = [[0, 1], [0, 0], [0, 0], [0, 0]] - res_pd = max_pool2d(input_pd, - kernel_size=2, - stride=2, - padding=padding) + res_pd = max_pool2d( + input_pd, kernel_size=2, stride=2, padding=padding + ) self.assertRaises(ValueError, run1) def run2(): with fluid.dygraph.guard(): - input_np = np.random.uniform(-1, 1, - [2, 3, 32, 32]).astype(np.float32) + input_np = np.random.uniform(-1, 1, [2, 3, 32, 32]).astype( + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) padding = [[0, 1], [0, 0], [0, 0], [0, 0]] - res_pd = max_pool2d(input_pd, - kernel_size=2, - stride=2, - padding=padding, - data_format='NHWC') + res_pd = max_pool2d( + input_pd, + kernel_size=2, + stride=2, + padding=padding, + data_format='NHWC', + ) self.assertRaises(ValueError, run2) def run3(): with fluid.dygraph.guard(): - input_np = np.random.uniform(-1, 1, - [2, 3, 32, 32]).astype(np.float32) + input_np = np.random.uniform(-1, 1, [2, 3, 32, 32]).astype( + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) padding = "padding" - res_pd = max_pool2d(input_pd, - kernel_size=2, - stride=2, - padding=padding, - data_format='NHWC') + res_pd = max_pool2d( + input_pd, + kernel_size=2, + stride=2, + padding=padding, + data_format='NHWC', + ) self.assertRaises(ValueError, run3) def run3_avg(): with fluid.dygraph.guard(): - input_np = np.random.uniform(-1, 1, - [2, 3, 32, 32]).astype(np.float32) + input_np = np.random.uniform(-1, 1, [2, 3, 32, 32]).astype( + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) padding = "padding" - res_pd = avg_pool2d(input_pd, - kernel_size=2, - stride=2, - padding=padding, - data_format='NHWC') + res_pd = avg_pool2d( + input_pd, + kernel_size=2, + stride=2, + padding=padding, + data_format='NHWC', + ) self.assertRaises(ValueError, run3_avg) def run4(): with fluid.dygraph.guard(): - input_np = np.random.uniform(-1, 1, - [2, 3, 32, 32]).astype(np.float32) + input_np = np.random.uniform(-1, 1, [2, 3, 32, 32]).astype( + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) padding = "VALID" - res_pd = max_pool2d(input_pd, - kernel_size=2, - stride=2, - padding=padding, - ceil_mode=True, - data_format='NHWC') + res_pd = max_pool2d( + input_pd, + kernel_size=2, + stride=2, + padding=padding, + ceil_mode=True, + data_format='NHWC', + ) self.assertRaises(ValueError, run4) def run4_avg(): with fluid.dygraph.guard(): - input_np = np.random.uniform(-1, 1, - [2, 3, 32, 32]).astype(np.float32) + input_np = np.random.uniform(-1, 1, [2, 3, 32, 32]).astype( + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) padding = "VALID" - res_pd = avg_pool2d(input_pd, - kernel_size=2, - stride=2, - padding=padding, - ceil_mode=True, - data_format='NHWC') + res_pd = avg_pool2d( + input_pd, + kernel_size=2, + stride=2, + padding=padding, + ceil_mode=True, + data_format='NHWC', + ) self.assertRaises(ValueError, run4_avg) def run5(): with fluid.dygraph.guard(): - input_np = np.random.uniform(-1, 1, - [2, 3, 32, 32]).astype(np.float32) + input_np = np.random.uniform(-1, 1, [2, 3, 32, 32]).astype( + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) padding = "padding" - res_pd = avg_pool2d(input_pd, - kernel_size=2, - stride=2, - padding=padding, - data_format='NHWC') + res_pd = avg_pool2d( + input_pd, + kernel_size=2, + stride=2, + padding=padding, + data_format='NHWC', + ) self.assertRaises(ValueError, run5) def run6(): with fluid.dygraph.guard(): - input_np = np.random.uniform(-1, 1, - [2, 3, 32, 32]).astype(np.float32) + input_np = np.random.uniform(-1, 1, [2, 3, 32, 32]).astype( + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) padding = "VALID" - res_pd = avg_pool2d(input_pd, - kernel_size=2, - stride=2, - padding=padding, - ceil_mode=True, - data_format='NHWC') + res_pd = avg_pool2d( + input_pd, + kernel_size=2, + stride=2, + padding=padding, + ceil_mode=True, + data_format='NHWC', + ) self.assertRaises(ValueError, run6) def run7(): with fluid.dygraph.guard(): - input_np = np.random.uniform(-1, 1, - [2, 3, 32, 32]).astype(np.float32) + input_np = np.random.uniform(-1, 1, [2, 3, 32, 32]).astype( + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) padding = "VALID" - res_pd = avg_pool2d(input_pd, - kernel_size=2, - stride=2, - padding=padding, - ceil_mode=False, - data_format='NNNN') + res_pd = avg_pool2d( + input_pd, + kernel_size=2, + stride=2, + padding=padding, + ceil_mode=False, + data_format='NNNN', + ) self.assertRaises(ValueError, run7) def run8(): with fluid.dygraph.guard(): - input_np = np.random.uniform(-1, 1, - [2, 3, 32, 32]).astype(np.float32) + input_np = np.random.uniform(-1, 1, [2, 3, 32, 32]).astype( + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) padding = "VALID" - res_pd = max_pool2d(input_pd, - kernel_size=2, - stride=2, - padding=padding, - ceil_mode=False, - data_format='NNNN') + res_pd = max_pool2d( + input_pd, + kernel_size=2, + stride=2, + padding=padding, + ceil_mode=False, + data_format='NNNN', + ) self.assertRaises(ValueError, run8) def run9(): with fluid.dygraph.guard(): - input_np = np.random.uniform(-1, 1, - [2, 3, 32, 32]).astype(np.float32) + input_np = np.random.uniform(-1, 1, [2, 3, 32, 32]).astype( + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) - res_pd = max_pool2d(input_pd, - kernel_size=2, - stride=2, - padding=0, - ceil_mode=False, - data_format='NHWC', - return_mask=True) + res_pd = max_pool2d( + input_pd, + kernel_size=2, + stride=2, + padding=0, + ceil_mode=False, + data_format='NHWC', + return_mask=True, + ) self.assertRaises(ValueError, run9) def run_kernel_out_of_range(): with fluid.dygraph.guard(): - input_np = np.random.uniform(-1, 1, - [2, 3, 32, 32]).astype(np.float32) + input_np = np.random.uniform(-1, 1, [2, 3, 32, 32]).astype( + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) - res_pd = avg_pool2d(input_pd, - kernel_size=[-1, 2], - stride=2, - padding=0, - ceil_mode=False, - data_format='NHWC') + res_pd = avg_pool2d( + input_pd, + kernel_size=[-1, 2], + stride=2, + padding=0, + ceil_mode=False, + data_format='NHWC', + ) self.assertRaises(ValueError, run_kernel_out_of_range) def run_stride_out_of_range(): with fluid.dygraph.guard(): - input_np = np.random.uniform(-1, 1, - [2, 3, 32, 32]).astype(np.float32) + input_np = np.random.uniform(-1, 1, [2, 3, 32, 32]).astype( + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) - res_pd = avg_pool2d(input_pd, - kernel_size=3, - stride=[0, 2], - padding=0, - ceil_mode=False, - data_format='NHWC') + res_pd = avg_pool2d( + input_pd, + kernel_size=3, + stride=[0, 2], + padding=0, + ceil_mode=False, + data_format='NHWC', + ) self.assertRaises(ValueError, run_stride_out_of_range) diff --git a/python/paddle/fluid/tests/unittests/test_pool2d_op.py b/python/paddle/fluid/tests/unittests/test_pool2d_op.py index 46a822a513893e2d1ad7a45212a38a548f39ea59..7c44827262d6c4c1b4e7fba46f0f9d9b0ee784f0 100644 --- a/python/paddle/fluid/tests/unittests/test_pool2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool2d_op.py @@ -29,15 +29,17 @@ def adaptive_end_index(index, input_size, output_size): return int(np.ceil((index + 1) * input_size / output_size)) -def max_pool2D_forward_naive(x, - ksize, - strides, - paddings, - global_pool=0, - ceil_mode=False, - exclusive=True, - adaptive=False, - data_type=np.float64): +def max_pool2D_forward_naive( + x, + ksize, + strides, + paddings, + global_pool=0, + ceil_mode=False, + exclusive=True, + adaptive=False, + data_type=np.float64, +): if data_type == np.float64 and core.is_compiled_with_rocm(): data_type = np.float32 N, C, H, W = x.shape @@ -46,12 +48,16 @@ def max_pool2D_forward_naive(x, if adaptive: H_out, W_out = ksize else: - H_out = (H - ksize[0] + 2 * paddings[0] + strides[0] - - 1) // strides[0] + 1 if ceil_mode else ( - H - ksize[0] + 2 * paddings[0]) // strides[0] + 1 - W_out = (W - ksize[1] + 2 * paddings[1] + strides[1] - - 1) // strides[1] + 1 if ceil_mode else ( - W - ksize[1] + 2 * paddings[1]) // strides[1] + 1 + H_out = ( + (H - ksize[0] + 2 * paddings[0] + strides[0] - 1) // strides[0] + 1 + if ceil_mode + else (H - ksize[0] + 2 * paddings[0]) // strides[0] + 1 + ) + W_out = ( + (W - ksize[1] + 2 * paddings[1] + strides[1] - 1) // strides[1] + 1 + if ceil_mode + else (W - ksize[1] + 2 * paddings[1]) // strides[1] + 1 + ) out = np.zeros((N, C, H_out, W_out)) for i in range(H_out): for j in range(W_out): @@ -71,15 +77,17 @@ def max_pool2D_forward_naive(x, return out -def avg_pool2D_forward_naive(x, - ksize, - strides, - paddings, - global_pool=0, - ceil_mode=False, - exclusive=True, - adaptive=False, - data_type=np.float64): +def avg_pool2D_forward_naive( + x, + ksize, + strides, + paddings, + global_pool=0, + ceil_mode=False, + exclusive=True, + adaptive=False, + data_type=np.float64, +): if data_type == np.float64 and core.is_compiled_with_rocm(): data_type = np.float32 N, C, H, W = x.shape @@ -88,12 +96,16 @@ def avg_pool2D_forward_naive(x, if adaptive: H_out, W_out = ksize else: - H_out = (H - ksize[0] + 2 * paddings[0] + strides[0] - - 1) // strides[0] + 1 if ceil_mode else ( - H - ksize[0] + 2 * paddings[0]) // strides[0] + 1 - W_out = (W - ksize[1] + 2 * paddings[1] + strides[1] - - 1) // strides[1] + 1 if ceil_mode else ( - W - ksize[1] + 2 * paddings[1]) // strides[1] + 1 + H_out = ( + (H - ksize[0] + 2 * paddings[0] + strides[0] - 1) // strides[0] + 1 + if ceil_mode + else (H - ksize[0] + 2 * paddings[0]) // strides[0] + 1 + ) + W_out = ( + (W - ksize[1] + 2 * paddings[1] + strides[1] - 1) // strides[1] + 1 + if ceil_mode + else (W - ksize[1] + 2 * paddings[1]) // strides[1] + 1 + ) out = np.zeros((N, C, H_out, W_out)) for i in range(H_out): for j in range(W_out): @@ -115,39 +127,44 @@ def avg_pool2D_forward_naive(x, x_masked = x[:, :, r_start:r_end, c_start:c_end] - if (exclusive or adaptive): + if exclusive or adaptive: field_size = (r_end - r_start) * (c_end - c_start) if data_type == np.int8 or data_type == np.uint8: - out[:, :, i, - j] = (np.rint(np.sum(x_masked, axis=(2, 3)) / - field_size)).astype(data_type) + out[:, :, i, j] = ( + np.rint(np.sum(x_masked, axis=(2, 3)) / field_size) + ).astype(data_type) else: - out[:, :, i, j] = (np.sum(x_masked, axis=(2, 3)) / - field_size).astype(data_type) + out[:, :, i, j] = ( + np.sum(x_masked, axis=(2, 3)) / field_size + ).astype(data_type) return out -def pool2D_forward_naive(x, - ksize, - strides, - paddings, - global_pool=0, - ceil_mode=False, - exclusive=True, - adaptive=False, - data_format='NCHW', - pool_type="max", - padding_algorithm="EXPLICIT"): +def pool2D_forward_naive( + x, + ksize, + strides, + paddings, + global_pool=0, + ceil_mode=False, + exclusive=True, + adaptive=False, + data_format='NCHW', + pool_type="max", + padding_algorithm="EXPLICIT", +): # update paddings def _get_padding_with_SAME(input_shape, pool_size, pool_stride): padding = [] - for input_size, filter_size, stride_size in zip(input_shape, pool_size, - pool_stride): + for input_size, filter_size, stride_size in zip( + input_shape, pool_size, pool_stride + ): out_size = int((input_size + stride_size - 1) / stride_size) pad_sum = np.max( - ((out_size - 1) * stride_size + filter_size - input_size, 0)) + ((out_size - 1) * stride_size + filter_size - input_size, 0) + ) pad_0 = int(pad_sum / 2) pad_1 = int(pad_sum - pad_0) padding.append(pad_0) @@ -157,9 +174,10 @@ def pool2D_forward_naive(x, if isinstance(padding_algorithm, str): padding_algorithm = padding_algorithm.upper() if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]: - raise ValueError("Unknown Attr(padding_algorithm): '%s'. " - "It can only be 'SAME' or 'VALID'." % - str(padding_algorithm)) + raise ValueError( + "Unknown Attr(padding_algorithm): '%s'. " + "It can only be 'SAME' or 'VALID'." % str(padding_algorithm) + ) if padding_algorithm == "VALID": paddings = [0, 0, 0, 0] @@ -167,7 +185,8 @@ def pool2D_forward_naive(x, raise ValueError( "When Attr(pool_padding) is \"VALID\", Attr(ceil_mode)" " must be False. " - "Received ceil_mode: True.") + "Received ceil_mode: True." + ) elif padding_algorithm == "SAME": input_data_shape = [] if data_format == "NCHW": @@ -180,8 +199,11 @@ def pool2D_forward_naive(x, is_sys = True if len(paddings) == 2 else False N = x.shape[0] - C, H, W = [x.shape[1], x.shape[2], x.shape[3]] if data_format == 'NCHW' \ + C, H, W = ( + [x.shape[1], x.shape[2], x.shape[3]] + if data_format == 'NCHW' else [x.shape[3], x.shape[1], x.shape[2]] + ) if global_pool == 1: ksize = [H, W] @@ -195,13 +217,26 @@ def pool2D_forward_naive(x, if adaptive: H_out, W_out = ksize else: - H_out = (H - ksize[0] + pad_h_up + pad_h_down + strides[0] - 1) // strides[0] + 1 \ - if ceil_mode else (H - ksize[0] + pad_h_up + pad_h_down) // strides[0] + 1 - W_out = (W - ksize[1] + pad_w_left + pad_w_right + strides[1] - 1) // strides[1] + 1 \ - if ceil_mode else (W - ksize[1] + pad_w_left + pad_w_right) // strides[1] + 1 - - out = np.zeros((N, C, H_out, W_out)) if data_format=='NCHW' \ + H_out = ( + (H - ksize[0] + pad_h_up + pad_h_down + strides[0] - 1) + // strides[0] + + 1 + if ceil_mode + else (H - ksize[0] + pad_h_up + pad_h_down) // strides[0] + 1 + ) + W_out = ( + (W - ksize[1] + pad_w_left + pad_w_right + strides[1] - 1) + // strides[1] + + 1 + if ceil_mode + else (W - ksize[1] + pad_w_left + pad_w_right) // strides[1] + 1 + ) + + out = ( + np.zeros((N, C, H_out, W_out)) + if data_format == 'NCHW' else np.zeros((N, H_out, W_out, C)) + ) for i in range(H_out): if adaptive: in_h_start = adaptive_start_index(i, H, ksize[0]) @@ -229,21 +264,22 @@ def pool2D_forward_naive(x, if data_format == 'NCHW': x_masked = x[:, :, in_h_start:in_h_end, in_w_start:in_w_end] if pool_type == 'avg': - if (exclusive or adaptive): - field_size = (in_h_end - in_h_start) * (in_w_end - - in_w_start) - + if exclusive or adaptive: + field_size = (in_h_end - in_h_start) * ( + in_w_end - in_w_start + ) -# if (exclusive or adaptive) else (ksize[0] * ksize[1]) + # if (exclusive or adaptive) else (ksize[0] * ksize[1]) out[:, :, i, j] = np.sum(x_masked, axis=(2, 3)) / field_size elif pool_type == 'max': out[:, :, i, j] = np.max(x_masked, axis=(2, 3)) elif data_format == 'NHWC': x_masked = x[:, in_h_start:in_h_end, in_w_start:in_w_end, :] if pool_type == 'avg': - if (exclusive or adaptive): - field_size = (in_h_end - in_h_start) * (in_w_end - - in_w_start) + if exclusive or adaptive: + field_size = (in_h_end - in_h_start) * ( + in_w_end - in_w_start + ) out[:, i, j, :] = np.sum(x_masked, axis=(1, 2)) / field_size elif pool_type == 'max': out[:, i, j, :] = np.max(x_masked, axis=(1, 2)) @@ -251,7 +287,6 @@ def pool2D_forward_naive(x, class TestPool2D_Op_Mixin(object): - def setUp(self): self.op_type = "pool2d" self.use_cudnn = False @@ -271,12 +306,19 @@ class TestPool2D_Op_Mixin(object): self.init_shape() input = np.random.random(self.shape).astype(self.dtype) - output = pool2D_forward_naive(input, self.ksize, self.strides, - self.paddings, self.global_pool, - self.ceil_mode, self.exclusive, - self.adaptive, self.data_format, - self.pool_type, - self.padding_algorithm).astype(self.dtype) + output = pool2D_forward_naive( + input, + self.ksize, + self.strides, + self.paddings, + self.global_pool, + self.ceil_mode, + self.exclusive, + self.adaptive, + self.data_format, + self.pool_type, + self.padding_algorithm, + ).astype(self.dtype) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(input)} self.attrs = { @@ -304,7 +346,8 @@ class TestPool2D_Op_Mixin(object): if self.has_cudnn(): place = core.CUDAPlace(0) self.check_output_with_place( - place, atol=1e-5, check_dygraph=(self.use_mkldnn == False)) + place, atol=1e-5, check_dygraph=(self.use_mkldnn == False) + ) else: self.check_output(check_dygraph=(self.use_mkldnn == False)) @@ -314,16 +357,20 @@ class TestPool2D_Op_Mixin(object): # TODO(wangzhongpu): support mkldnn op in dygraph mode if self.has_cudnn() and self.pool_type != "max": place = core.CUDAPlace(0) - self.check_grad_with_place(place, - set(['X']), - 'Out', - max_relative_error=0.07, - check_dygraph=(self.use_mkldnn == False)) + self.check_grad_with_place( + place, + set(['X']), + 'Out', + max_relative_error=0.07, + check_dygraph=(self.use_mkldnn == False), + ) elif self.pool_type != "max": - self.check_grad(set(['X']), - 'Out', - max_relative_error=0.07, - check_dygraph=(self.use_mkldnn == False)) + self.check_grad( + set(['X']), + 'Out', + max_relative_error=0.07, + check_dygraph=(self.use_mkldnn == False), + ) def init_data_format(self): self.data_format = "NCHW" @@ -367,7 +414,6 @@ class TestPool2D_Op(TestPool2D_Op_Mixin, OpTest): class TestCase1(TestPool2D_Op): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -387,7 +433,6 @@ class TestCase1(TestPool2D_Op): class TestCase2(TestPool2D_Op): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -407,35 +452,31 @@ class TestCase2(TestPool2D_Op): class TestCase3(TestPool2D_Op): - def init_pool_type(self): self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive class TestCase4(TestCase1): - def init_pool_type(self): self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive class TestCase5(TestCase2): - def init_pool_type(self): self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive -#--------------------test pool2d cudnn-------------------- +# --------------------test pool2d cudnn-------------------- def create_test_cudnn_class(parent): - - @unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) class TestCUDNNCase(parent): - def init_kernel_type(self): self.use_cudnn = True @@ -451,15 +492,14 @@ create_test_cudnn_class(TestCase3) create_test_cudnn_class(TestCase4) create_test_cudnn_class(TestCase5) -#--------------------test pool2d cudnn_fp16-------------------- +# --------------------test pool2d cudnn_fp16-------------------- def create_test_cudnn_fp16_class(parent, check_grad=True): - - @unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) class TestCUDNNFp16Case(parent): - def init_kernel_type(self): self.use_cudnn = True self.dtype = np.float16 @@ -472,19 +512,24 @@ def create_test_cudnn_fp16_class(parent, check_grad=True): self.check_output_with_place( place, atol=1e-3, - check_dygraph=(self.use_mkldnn == False)) + check_dygraph=(self.use_mkldnn == False), + ) def test_check_grad(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode place = core.CUDAPlace(0) - if core.is_float16_supported( - place) and self.pool_type != "max" and check_grad: + if ( + core.is_float16_supported(place) + and self.pool_type != "max" + and check_grad + ): self.check_grad_with_place( place, set(['X']), 'Out', max_relative_error=0.07, - check_dygraph=(self.use_mkldnn == False)) + check_dygraph=(self.use_mkldnn == False), + ) cls_name = "{0}_{1}".format(parent.__name__, "CUDNNFp16Op") TestCUDNNFp16Case.__name__ = cls_name @@ -492,11 +537,10 @@ def create_test_cudnn_fp16_class(parent, check_grad=True): def create_test_fp16_class(parent, check_grad=True): - - @unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) class TestFp16Case(parent): - def init_kernel_type(self): self.use_cudnn = False self.dtype = np.float16 @@ -509,19 +553,24 @@ def create_test_fp16_class(parent, check_grad=True): self.check_output_with_place( place, atol=1e-3, - check_dygraph=(self.use_mkldnn == False)) + check_dygraph=(self.use_mkldnn == False), + ) def test_check_grad(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode place = core.CUDAPlace(0) - if core.is_float16_supported( - place) and self.pool_type != "max" and check_grad: + if ( + core.is_float16_supported(place) + and self.pool_type != "max" + and check_grad + ): self.check_grad_with_place( place, set(['X']), 'Out', max_relative_error=0.07, - check_dygraph=(self.use_mkldnn == False)) + check_dygraph=(self.use_mkldnn == False), + ) cls_name = "{0}_{1}".format(parent.__name__, "Fp16Op") TestFp16Case.__name__ = cls_name @@ -542,15 +591,14 @@ create_test_fp16_class(TestCase3) create_test_fp16_class(TestCase4) create_test_fp16_class(TestCase5) -#--------------------test pool2d use ceil mode-------------------- +# --------------------test pool2d use ceil mode-------------------- def create_test_cudnn_use_ceil_class(parent): - - @unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) class TestPool2DUseCeilCase(parent): - def init_kernel_type(self): self.use_cudnn = True @@ -567,9 +615,7 @@ create_test_cudnn_use_ceil_class(TestCase1) def create_test_use_ceil_class(parent): - class TestPool2DUseCeilCase(parent): - def init_ceil_mode(self): self.ceil_mode = True @@ -583,13 +629,11 @@ create_test_use_ceil_class(TestCase2) class TestAvgInclude(TestCase2): - def init_exclusive(self): self.exclusive = False class TestCUDNNAvgInclude(TestCase2): - def init_kernel_type(self): self.use_cudnn = True @@ -598,13 +642,11 @@ class TestCUDNNAvgInclude(TestCase2): class TestAvgPoolAdaptive(TestCase1): - def init_adaptive(self): self.adaptive = True class TestAvgPoolAdaptiveAsyOutSize(TestCase1): - def init_adaptive(self): self.adaptive = True @@ -617,11 +659,10 @@ class TestAvgPoolAdaptiveAsyOutSize(TestCase1): self.paddings = [0, 0, 0, 0] -#-------test pool2d with asymmetric padding----- +# -------test pool2d with asymmetric padding----- class TestPool2D_AsyPadding(TestPool2D_Op): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -632,7 +673,6 @@ class TestPool2D_AsyPadding(TestPool2D_Op): class TestCase1_AsyPadding(TestCase1): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -643,7 +683,6 @@ class TestCase1_AsyPadding(TestCase1): class TestCase2_AsyPadding(TestCase2): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -654,7 +693,6 @@ class TestCase2_AsyPadding(TestCase2): class TestCase3_AsyPadding(TestCase3): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -665,7 +703,6 @@ class TestCase3_AsyPadding(TestCase3): class TestCase4_AsyPadding(TestCase4): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -676,7 +713,6 @@ class TestCase4_AsyPadding(TestCase4): class TestCase5_AsyPadding((TestCase5)): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -708,7 +744,6 @@ create_test_use_ceil_class(TestCase2_AsyPadding) class TestAvgInclude_AsyPadding(TestCase2): - def init_exclusive(self): self.exclusive = False @@ -722,7 +757,6 @@ class TestAvgInclude_AsyPadding(TestCase2): class TestCUDNNAvgInclude_AsyPadding(TestCase2): - def init_kernel_type(self): self.use_cudnn = True @@ -739,7 +773,6 @@ class TestCUDNNAvgInclude_AsyPadding(TestCase2): class TestAvgPoolAdaptive_AsyPadding(TestCase1): - def init_adaptive(self): self.adaptive = True @@ -752,9 +785,8 @@ class TestAvgPoolAdaptive_AsyPadding(TestCase1): self.shape = [2, 3, 7, 7] -#----------- test channel_last -------------- +# ----------- test channel_last -------------- class TestPool2D_channel_last(TestPool2D_Op): - def init_data_format(self): self.data_format = "NHWC" @@ -763,7 +795,6 @@ class TestPool2D_channel_last(TestPool2D_Op): class TestCase1_channel_last(TestCase1): - def init_data_format(self): self.data_format = "NHWC" @@ -772,7 +803,6 @@ class TestCase1_channel_last(TestCase1): class TestCase2_channel_last(TestCase2): - def init_data_format(self): self.data_format = "NHWC" @@ -781,7 +811,6 @@ class TestCase2_channel_last(TestCase2): class TestCase3_channel_last(TestCase3): - def init_data_format(self): self.data_format = "NHWC" @@ -790,7 +819,6 @@ class TestCase3_channel_last(TestCase3): class TestCase4_channel_last(TestCase4): - def init_data_format(self): self.data_format = "NHWC" @@ -799,7 +827,6 @@ class TestCase4_channel_last(TestCase4): class TestCase5_channel_last(TestCase5): - def init_data_format(self): self.data_format = "NHWC" @@ -829,7 +856,6 @@ create_test_use_ceil_class(TestCase2_channel_last) class TestCase5_Max(TestCase2): - def init_pool_type(self): self.pool_type = "max" @@ -838,16 +864,14 @@ class TestCase5_Max(TestCase2): return if self.has_cudnn() and self.pool_type == "max": place = core.CUDAPlace(0) - self.check_grad_with_place(place, - set(['X']), - 'Out', - max_relative_error=1.00) + self.check_grad_with_place( + place, set(['X']), 'Out', max_relative_error=1.00 + ) elif self.pool_type == "max": self.check_grad(set(['X']), 'Out', max_relative_error=1.00) class TestCase5_channel_last_Max(TestCase5_Max): - def init_data_format(self): self.data_format = "NHWC" @@ -860,13 +884,11 @@ create_test_cudnn_class(TestCase5_channel_last_Max) class TestAvgInclude_channel_last(TestCase2_channel_last): - def init_exclusive(self): self.exclusive = False class TestCUDNNAvgInclude_channel_last(TestCase2_channel_last): - def init_kernel_type(self): self.use_cudnn = True @@ -875,13 +897,11 @@ class TestCUDNNAvgInclude_channel_last(TestCase2_channel_last): class TestAvgPoolAdaptive_channel_last(TestCase1_channel_last): - def init_adaptive(self): self.adaptive = True class TestPool2D_AsyPadding_channel_last(TestPool2D_AsyPadding): - def init_data_format(self): self.data_format = "NHWC" @@ -890,7 +910,6 @@ class TestPool2D_AsyPadding_channel_last(TestPool2D_AsyPadding): class TestCase1_AsyPadding_channel_last(TestCase1_AsyPadding): - def init_data_format(self): self.data_format = "NHWC" @@ -899,7 +918,6 @@ class TestCase1_AsyPadding_channel_last(TestCase1_AsyPadding): class TestCase2_AsyPadding_channel_last(TestCase2_AsyPadding): - def init_data_format(self): self.data_format = "NHWC" @@ -908,7 +926,6 @@ class TestCase2_AsyPadding_channel_last(TestCase2_AsyPadding): class TestCase3_AsyPadding_channel_last(TestCase3_AsyPadding): - def init_data_format(self): self.data_format = "NHWC" @@ -917,7 +934,6 @@ class TestCase3_AsyPadding_channel_last(TestCase3_AsyPadding): class TestCase4_AsyPadding_channel_last(TestCase4_AsyPadding): - def init_data_format(self): self.data_format = "NHWC" @@ -926,7 +942,6 @@ class TestCase4_AsyPadding_channel_last(TestCase4_AsyPadding): class TestCase5_AsyPadding_channel_last(TestCase5_AsyPadding): - def init_data_format(self): self.data_format = "NHWC" @@ -942,8 +957,9 @@ create_test_cudnn_class(TestCase4_AsyPadding_channel_last) create_test_cudnn_class(TestCase5_AsyPadding_channel_last) create_test_cudnn_fp16_class(TestPool2D_AsyPadding_channel_last) -create_test_cudnn_fp16_class(TestCase1_AsyPadding_channel_last, - check_grad=False) +create_test_cudnn_fp16_class( + TestCase1_AsyPadding_channel_last, check_grad=False +) create_test_cudnn_fp16_class(TestCase2_AsyPadding_channel_last) create_test_cudnn_fp16_class(TestCase3_AsyPadding_channel_last) create_test_cudnn_fp16_class(TestCase4_AsyPadding_channel_last) @@ -957,7 +973,6 @@ create_test_use_ceil_class(TestCase2_AsyPadding_channel_last) class TestAvgInclude_AsyPadding_channel_last(TestAvgInclude_AsyPadding): - def init_data_format(self): self.data_format = "NHWC" @@ -965,9 +980,9 @@ class TestAvgInclude_AsyPadding_channel_last(TestAvgInclude_AsyPadding): self.shape = [2, 7, 7, 3] -class TestCUDNNAvgInclude_AsyPadding_channel_last(TestCUDNNAvgInclude_AsyPadding - ): - +class TestCUDNNAvgInclude_AsyPadding_channel_last( + TestCUDNNAvgInclude_AsyPadding +): def init_data_format(self): self.data_format = "NHWC" @@ -975,9 +990,9 @@ class TestCUDNNAvgInclude_AsyPadding_channel_last(TestCUDNNAvgInclude_AsyPadding self.shape = [2, 7, 7, 3] -class TestAvgPoolAdaptive_AsyPadding_channel_last(TestAvgPoolAdaptive_AsyPadding - ): - +class TestAvgPoolAdaptive_AsyPadding_channel_last( + TestAvgPoolAdaptive_AsyPadding +): def init_data_format(self): self.data_format = "NHWC" @@ -989,9 +1004,7 @@ class TestAvgPoolAdaptive_AsyPadding_channel_last(TestAvgPoolAdaptive_AsyPadding def create_test_padding_SAME_class(parent): - class TestPaddingSMAECase(parent): - def init_paddings(self): self.paddings = [0, 0] self.padding_algorithm = "SAME" @@ -1017,11 +1030,10 @@ create_test_padding_SAME_class(TestCase5_channel_last) def create_test_cudnn_padding_SAME_class(parent): - - @unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) class TestCUDNNPaddingSMAECase(parent): - def init_kernel_type(self): self.use_cudnn = True @@ -1050,9 +1062,7 @@ create_test_cudnn_padding_SAME_class(TestCase5_channel_last) def create_test_padding_VALID_class(parent): - class TestPaddingVALIDCase(parent): - def init_paddings(self): self.paddings = [1, 1] self.padding_algorithm = "VALID" @@ -1078,11 +1088,10 @@ create_test_padding_VALID_class(TestCase5_channel_last) def create_test_cudnn_padding_VALID_class(parent): - - @unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) class TestCUDNNPaddingVALIDCase(parent): - def init_kernel_type(self): self.use_cudnn = True @@ -1111,7 +1120,6 @@ create_test_cudnn_padding_VALID_class(TestCase5_channel_last) class TestCase1_strides(TestCase1): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 2] @@ -1127,106 +1135,131 @@ create_test_cudnn_padding_SAME_class(TestCase1_strides) # ----- test API class TestPool2DAPI(unittest.TestCase): - def test_api(self): x_NHWC = np.random.random([2, 5, 5, 3]).astype("float32") x_NCHW = np.random.random([2, 3, 5, 5]).astype("float32") - input_NHWC = fluid.layers.data(name="input_NHWC", - shape=[2, 5, 5, 3], - append_batch_size=False, - dtype="float32") - - input_NCHW = fluid.layers.data(name="input_NCHW", - shape=[2, 3, 5, 5], - append_batch_size=False, - dtype="float32") - - input_NHWC_negetive = fluid.layers.data(name="input_NHWC_negetive", - shape=[2, -1, 5, 3], - append_batch_size=False, - dtype="float32") - - input_NCHW_negetive = fluid.layers.data(name="input_NCHW_negetive", - shape=[2, 3, -1, -1], - append_batch_size=False, - dtype="float32") + input_NHWC = fluid.layers.data( + name="input_NHWC", + shape=[2, 5, 5, 3], + append_batch_size=False, + dtype="float32", + ) + + input_NCHW = fluid.layers.data( + name="input_NCHW", + shape=[2, 3, 5, 5], + append_batch_size=False, + dtype="float32", + ) + + input_NHWC_negetive = fluid.layers.data( + name="input_NHWC_negetive", + shape=[2, -1, 5, 3], + append_batch_size=False, + dtype="float32", + ) + + input_NCHW_negetive = fluid.layers.data( + name="input_NCHW_negetive", + shape=[2, 3, -1, -1], + append_batch_size=False, + dtype="float32", + ) ksize = [3, 3] - out_1 = fluid.layers.pool2d(input=input_NHWC, - pool_size=ksize, - pool_type="max", - pool_padding=[1, 1], - use_cudnn=False, - data_format="NHWC") - - out_2 = fluid.layers.pool2d(input=input_NHWC, - pool_size=ksize, - pool_type="avg", - pool_padding=[[0, 0], [1, 1], [1, 1], - [0, 0]], - use_cudnn=False, - data_format="NHWC") - - out_3 = fluid.layers.pool2d(input=input_NCHW, - pool_size=ksize, - pool_type="avg", - pool_padding=[[0, 0], [0, 0], [1, 1], - [1, 1]], - use_cudnn=False, - data_format="NCHW") - - out_4 = fluid.layers.pool2d(input=input_NCHW, - pool_size=ksize, - pool_type="avg", - pool_padding=[1, 2, 1, 0], - use_cudnn=False, - data_format="NCHW") + out_1 = fluid.layers.pool2d( + input=input_NHWC, + pool_size=ksize, + pool_type="max", + pool_padding=[1, 1], + use_cudnn=False, + data_format="NHWC", + ) + + out_2 = fluid.layers.pool2d( + input=input_NHWC, + pool_size=ksize, + pool_type="avg", + pool_padding=[[0, 0], [1, 1], [1, 1], [0, 0]], + use_cudnn=False, + data_format="NHWC", + ) + + out_3 = fluid.layers.pool2d( + input=input_NCHW, + pool_size=ksize, + pool_type="avg", + pool_padding=[[0, 0], [0, 0], [1, 1], [1, 1]], + use_cudnn=False, + data_format="NCHW", + ) + + out_4 = fluid.layers.pool2d( + input=input_NCHW, + pool_size=ksize, + pool_type="avg", + pool_padding=[1, 2, 1, 0], + use_cudnn=False, + data_format="NCHW", + ) # test VALID - out_5 = fluid.layers.pool2d(input=input_NCHW, - pool_size=ksize, - pool_type="avg", - pool_padding="VALID", - use_cudnn=False, - data_format="NCHW") - - out_6 = fluid.layers.pool2d(input=input_NHWC, - pool_size=ksize, - pool_type="max", - pool_padding="VALID", - use_cudnn=False, - data_format="NHWC") + out_5 = fluid.layers.pool2d( + input=input_NCHW, + pool_size=ksize, + pool_type="avg", + pool_padding="VALID", + use_cudnn=False, + data_format="NCHW", + ) + + out_6 = fluid.layers.pool2d( + input=input_NHWC, + pool_size=ksize, + pool_type="max", + pool_padding="VALID", + use_cudnn=False, + data_format="NHWC", + ) # test SAME - out_7 = fluid.layers.pool2d(input=input_NCHW, - pool_size=[4, 4], - pool_type="avg", - pool_padding="SAME", - use_cudnn=False, - data_format="NCHW") - - out_8 = fluid.layers.pool2d(input=input_NHWC, - pool_size=[4, 4], - pool_type="max", - pool_padding="SAME", - use_cudnn=False, - data_format="NHWC") + out_7 = fluid.layers.pool2d( + input=input_NCHW, + pool_size=[4, 4], + pool_type="avg", + pool_padding="SAME", + use_cudnn=False, + data_format="NCHW", + ) + + out_8 = fluid.layers.pool2d( + input=input_NHWC, + pool_size=[4, 4], + pool_type="max", + pool_padding="SAME", + use_cudnn=False, + data_format="NHWC", + ) # test negetive - out_9 = fluid.layers.pool2d(input=input_NHWC_negetive, - pool_size=ksize, - pool_type="avg", - pool_padding=[0, 0], - use_cudnn=False, - data_format="NHWC") + out_9 = fluid.layers.pool2d( + input=input_NHWC_negetive, + pool_size=ksize, + pool_type="avg", + pool_padding=[0, 0], + use_cudnn=False, + data_format="NHWC", + ) assert out_9.shape == (2, -1, 3, 3) - out_10 = fluid.layers.pool2d(input=input_NCHW_negetive, - pool_size=ksize, - pool_type="avg", - pool_padding=[0, 0], - use_cudnn=False, - data_format="NCHW") + out_10 = fluid.layers.pool2d( + input=input_NCHW_negetive, + pool_size=ksize, + pool_type="avg", + pool_padding=[0, 0], + use_cudnn=False, + data_format="NCHW", + ) assert out_10.shape == (2, 3, -1, -1) exe = fluid.Executor(place=fluid.CPUPlace()) @@ -1236,46 +1269,61 @@ class TestPool2DAPI(unittest.TestCase): "input_NHWC": x_NHWC, "input_NCHW": x_NCHW, "input_NHWC_negetive": x_NHWC, - "input_NCHW_negetive": x_NCHW + "input_NCHW_negetive": x_NCHW, }, - fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7, out_8]) + fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7, out_8], + ) assert np.allclose( res_1, - pool2D_forward_naive(x=x_NHWC, - ksize=ksize, - pool_type="max", - strides=[1, 1], - paddings=[1, 1], - data_format="NHWC")) + pool2D_forward_naive( + x=x_NHWC, + ksize=ksize, + pool_type="max", + strides=[1, 1], + paddings=[1, 1], + data_format="NHWC", + ), + ) assert np.allclose( res_2, - pool2D_forward_naive(x=x_NHWC, - ksize=ksize, - pool_type="avg", - strides=[1, 1], - paddings=[1, 1, 1, 1], - data_format="NHWC")) - assert np.allclose(res_3, - pool2D_forward_naive(x=x_NCHW, - ksize=ksize, - pool_type="avg", - strides=[1, 1], - paddings=[1, 1, 1, 1], - data_format="NCHW"), - rtol=0.07, - atol=1e-05) - - assert np.allclose(res_4, - pool2D_forward_naive(x=x_NCHW, - ksize=ksize, - pool_type="avg", - strides=[1, 1], - paddings=[1, 2, 1, 0], - data_format="NCHW"), - rtol=0.07, - atol=1e-05) + pool2D_forward_naive( + x=x_NHWC, + ksize=ksize, + pool_type="avg", + strides=[1, 1], + paddings=[1, 1, 1, 1], + data_format="NHWC", + ), + ) + assert np.allclose( + res_3, + pool2D_forward_naive( + x=x_NCHW, + ksize=ksize, + pool_type="avg", + strides=[1, 1], + paddings=[1, 1, 1, 1], + data_format="NCHW", + ), + rtol=0.07, + atol=1e-05, + ) + + assert np.allclose( + res_4, + pool2D_forward_naive( + x=x_NCHW, + ksize=ksize, + pool_type="avg", + strides=[1, 1], + paddings=[1, 2, 1, 0], + data_format="NCHW", + ), + rtol=0.07, + atol=1e-05, + ) # VALID assert np.allclose( @@ -1287,192 +1335,234 @@ class TestPool2DAPI(unittest.TestCase): strides=[1, 1], paddings=[10, 20], # any ele is ok padding_algorithm="VALID", - data_format="NCHW"), + data_format="NCHW", + ), rtol=0.07, - atol=1e-05) + atol=1e-05, + ) assert np.allclose( res_6, - pool2D_forward_naive(x=x_NHWC, - ksize=ksize, - pool_type="max", - strides=[1, 1], - paddings=[10, 20], - padding_algorithm="VALID", - data_format="NHWC")) + pool2D_forward_naive( + x=x_NHWC, + ksize=ksize, + pool_type="max", + strides=[1, 1], + paddings=[10, 20], + padding_algorithm="VALID", + data_format="NHWC", + ), + ) # SAME - assert np.allclose(res_7, - pool2D_forward_naive(x=x_NCHW, - ksize=[4, 4], - pool_type="avg", - strides=[1, 1], - paddings=[10, 20], - padding_algorithm="SAME", - data_format="NCHW"), - rtol=0.07, - atol=1e-05) + assert np.allclose( + res_7, + pool2D_forward_naive( + x=x_NCHW, + ksize=[4, 4], + pool_type="avg", + strides=[1, 1], + paddings=[10, 20], + padding_algorithm="SAME", + data_format="NCHW", + ), + rtol=0.07, + atol=1e-05, + ) assert np.allclose( res_8, - pool2D_forward_naive(x=x_NHWC, - ksize=[4, 4], - pool_type="max", - strides=[1, 1], - paddings=[10, 20], - padding_algorithm="SAME", - data_format="NHWC")) + pool2D_forward_naive( + x=x_NHWC, + ksize=[4, 4], + pool_type="max", + strides=[1, 1], + paddings=[10, 20], + padding_algorithm="SAME", + data_format="NHWC", + ), + ) class TestPool2DAPI_Error(unittest.TestCase): - def test_api(self): - input_NHWC = fluid.layers.data(name="input_NHWC", - shape=[2, 5, 5, 3], - append_batch_size=False, - dtype="float32") + input_NHWC = fluid.layers.data( + name="input_NHWC", + shape=[2, 5, 5, 3], + append_batch_size=False, + dtype="float32", + ) ksize = [3, 3] # cudnn type error def run_1(): - out_1 = fluid.layers.pool2d(input=input_NHWC, - pool_size=ksize, - pool_type="max", - pool_padding=[1, 1], - use_cudnn=[0], - data_format="NHWC") + out_1 = fluid.layers.pool2d( + input=input_NHWC, + pool_size=ksize, + pool_type="max", + pool_padding=[1, 1], + use_cudnn=[0], + data_format="NHWC", + ) self.assertRaises(TypeError, run_1) # data_format value error def run_2(): - out_2 = fluid.layers.pool2d(input=input_NHWC, - pool_size=ksize, - pool_type="max", - pool_padding=[1, 1], - use_cudnn=False, - data_format="NHWCC") + out_2 = fluid.layers.pool2d( + input=input_NHWC, + pool_size=ksize, + pool_type="max", + pool_padding=[1, 1], + use_cudnn=False, + data_format="NHWCC", + ) self.assertRaises(ValueError, run_2) # padding str value error def run_3(): - out_3 = fluid.layers.pool2d(input=input_NHWC, - pool_size=ksize, - pool_type="max", - pool_padding="VALIDSAME", - use_cudnn=False, - data_format="NHWC") + out_3 = fluid.layers.pool2d( + input=input_NHWC, + pool_size=ksize, + pool_type="max", + pool_padding="VALIDSAME", + use_cudnn=False, + data_format="NHWC", + ) self.assertRaises(ValueError, run_3) # padding str valid and ceil_mode value error def run_4(): - out_4 = fluid.layers.pool2d(input=input_NHWC, - pool_size=ksize, - pool_type="max", - pool_padding="VALID", - use_cudnn=False, - ceil_mode=True, - data_format="NHWC") + out_4 = fluid.layers.pool2d( + input=input_NHWC, + pool_size=ksize, + pool_type="max", + pool_padding="VALID", + use_cudnn=False, + ceil_mode=True, + data_format="NHWC", + ) self.assertRaises(ValueError, run_4) # padding with 8 ele. value error def run_5(): - out_5 = fluid.layers.pool2d(input=input_NHWC, - pool_size=ksize, - pool_type="max", - pool_padding=[[1, 1], [0, 0], [0, 0], - [1, 1]], - use_cudnn=False, - data_format="NHWC") + out_5 = fluid.layers.pool2d( + input=input_NHWC, + pool_size=ksize, + pool_type="max", + pool_padding=[[1, 1], [0, 0], [0, 0], [1, 1]], + use_cudnn=False, + data_format="NHWC", + ) self.assertRaises(ValueError, run_5) class TestDygraphPool2DAPIError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # the input of Pool2D must be Variable. data1 = np.random.random((3, 32, 32, 5)).astype('float32') - pool2d = fluid.dygraph.Pool2D(pool_size=2, - pool_type='max', - pool_stride=1, - global_pooling=False) + pool2d = fluid.dygraph.Pool2D( + pool_size=2, + pool_type='max', + pool_stride=1, + global_pooling=False, + ) self.assertRaises(TypeError, pool2d, data1) # the input dtype of Pool2D must be uint8 or int8 or float16 or float32 or float64 # uint8 and int8 only can be set on mkldnn # float16 only can be set on GPU place - data2 = fluid.layers.data(name='x1', - shape=[3, 32, 32, 5], - dtype="int32") + data2 = fluid.layers.data( + name='x1', shape=[3, 32, 32, 5], dtype="int32" + ) self.assertRaises(TypeError, pool2d, data2) def test_data_format_error(self): with program_guard(Program(), Program()): # the data_format must be 'NCHW' or 'NHWC' data1 = np.random.random((3, 32, 32, 5)).astype('float32') - self.assertRaises(ValueError, - fluid.dygraph.Pool2D, - pool_size=2, - pool_type='max', - pool_stride=1, - global_pooling=False, - data_format='NWHC') + self.assertRaises( + ValueError, + fluid.dygraph.Pool2D, + pool_size=2, + pool_type='max', + pool_stride=1, + global_pooling=False, + data_format='NWHC', + ) class TestDygraphPool2DAPI(unittest.TestCase): - def test_nhwc(self): with fluid.dygraph.guard(): data = np.random.random((3, 32, 32, 5)).astype('float32') x = fluid.dygraph.to_variable(data) - pool2d = fluid.dygraph.Pool2D(pool_size=2, - pool_type='max', - pool_stride=1, - pool_padding=[0, 0], - global_pooling=False, - data_format='NHWC') + pool2d = fluid.dygraph.Pool2D( + pool_size=2, + pool_type='max', + pool_stride=1, + pool_padding=[0, 0], + global_pooling=False, + data_format='NHWC', + ) out1 = pool2d(x) - out2 = pool2D_forward_naive(data, [2, 2], [1, 1], - paddings=[0, 0], - pool_type='max', - data_format='NHWC') + out2 = pool2D_forward_naive( + data, + [2, 2], + [1, 1], + paddings=[0, 0], + pool_type='max', + data_format='NHWC', + ) np.testing.assert_allclose(out1.numpy(), out2, rtol=1e-05) def test_lower_case(self): with fluid.dygraph.guard(): data = np.random.random((3, 32, 32, 5)).astype('float32') x = fluid.dygraph.to_variable(data) - pool2d = fluid.dygraph.Pool2D(pool_size=2, - pool_type='max', - pool_stride=1, - pool_padding=[0, 0], - global_pooling=False, - data_format='nhwc') + pool2d = fluid.dygraph.Pool2D( + pool_size=2, + pool_type='max', + pool_stride=1, + pool_padding=[0, 0], + global_pooling=False, + data_format='nhwc', + ) out1 = pool2d(x) - out2 = pool2D_forward_naive(data, [2, 2], [1, 1], - paddings=[0, 0], - pool_type='max', - data_format='NHWC') + out2 = pool2D_forward_naive( + data, + [2, 2], + [1, 1], + paddings=[0, 0], + pool_type='max', + data_format='NHWC', + ) np.testing.assert_allclose(out1.numpy(), out2, rtol=1e-05) def test_upper_case(self): with fluid.dygraph.guard(): data = np.random.random((3, 32, 32, 5)).astype('float32') x = fluid.dygraph.to_variable(data) - pool2d = fluid.dygraph.Pool2D(pool_size=2, - pool_type='MAX', - pool_stride=1, - pool_padding=[0, 0], - global_pooling=False, - data_format='nhwc') + pool2d = fluid.dygraph.Pool2D( + pool_size=2, + pool_type='MAX', + pool_stride=1, + pool_padding=[0, 0], + global_pooling=False, + data_format='nhwc', + ) out1 = pool2d(x) - out2 = pool2D_forward_naive(data, [2, 2], [1, 1], - paddings=[0, 0], - pool_type='max', - data_format='NHWC') + out2 = pool2D_forward_naive( + data, + [2, 2], + [1, 1], + paddings=[0, 0], + pool_type='max', + data_format='NHWC', + ) np.testing.assert_allclose(out1.numpy(), out2, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_pool3d_api.py b/python/paddle/fluid/tests/unittests/test_pool3d_api.py index 31426fe541d72a94c9e2907bbb75bacde0e16d97..29882a6c8bce818071aca71a2240ccb748979cbd 100644 --- a/python/paddle/fluid/tests/unittests/test_pool3d_api.py +++ b/python/paddle/fluid/tests/unittests/test_pool3d_api.py @@ -20,11 +20,14 @@ import paddle.fluid.core as core from paddle.fluid.framework import _test_eager_guard from paddle.nn.functional import avg_pool3d, max_pool3d from paddle.fluid.framework import _test_eager_guard -from test_pool3d_op import avg_pool3D_forward_naive, max_pool3D_forward_naive, pool3D_forward_naive +from test_pool3d_op import ( + avg_pool3D_forward_naive, + max_pool3D_forward_naive, + pool3D_forward_naive, +) class TestPool3D_API(unittest.TestCase): - def setUp(self): np.random.seed(123) self.places = [fluid.CPUPlace()] @@ -33,22 +36,26 @@ class TestPool3D_API(unittest.TestCase): def check_avg_static_results(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", - shape=[2, 3, 32, 32, 32], - dtype="float32") + input = fluid.data( + name="input", shape=[2, 3, 32, 32, 32], dtype="float32" + ) result = avg_pool3d(input, kernel_size=2, stride=2, padding=0) input_np = np.random.random([2, 3, 32, 32, 32]).astype("float32") - result_np = pool3D_forward_naive(input_np, - ksize=[2, 2, 2], - strides=[2, 2, 2], - paddings=[0, 0, 0], - pool_type='avg') + result_np = pool3D_forward_naive( + input_np, + ksize=[2, 2, 2], + strides=[2, 2, 2], + paddings=[0, 0, 0], + pool_type='avg', + ) exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={"input": input_np}, - fetch_list=[result]) + fetches = exe.run( + fluid.default_main_program(), + feed={"input": input_np}, + fetch_list=[result], + ) np.testing.assert_allclose(fetches[0], result_np, rtol=1e-05) def check_avg_dygraph_results(self, place): @@ -57,18 +64,20 @@ class TestPool3D_API(unittest.TestCase): input = fluid.dygraph.to_variable(input_np) result = avg_pool3d(input, kernel_size=2, stride=2, padding="SAME") - result_np = pool3D_forward_naive(input_np, - ksize=[2, 2, 2], - strides=[2, 2, 2], - paddings=[0, 0, 0], - pool_type='avg', - padding_algorithm="SAME") + result_np = pool3D_forward_naive( + input_np, + ksize=[2, 2, 2], + strides=[2, 2, 2], + paddings=[0, 0, 0], + pool_type='avg', + padding_algorithm="SAME", + ) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) - avg_pool3d_dg = paddle.nn.layer.AvgPool3D(kernel_size=2, - stride=None, - padding="SAME") + avg_pool3d_dg = paddle.nn.layer.AvgPool3D( + kernel_size=2, stride=None, padding="SAME" + ) result = avg_pool3d_dg(input) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) @@ -76,27 +85,33 @@ class TestPool3D_API(unittest.TestCase): with fluid.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32, 32]).astype("float32") input = fluid.dygraph.to_variable(input_np) - result = avg_pool3d(input, - kernel_size=2, - stride=2, - padding=1, - ceil_mode=False, - exclusive=True) - - result_np = avg_pool3D_forward_naive(input_np, - ksize=[2, 2, 2], - strides=[2, 2, 2], - paddings=[1, 1, 1], - ceil_mode=False, - exclusive=False) + result = avg_pool3d( + input, + kernel_size=2, + stride=2, + padding=1, + ceil_mode=False, + exclusive=True, + ) + + result_np = avg_pool3D_forward_naive( + input_np, + ksize=[2, 2, 2], + strides=[2, 2, 2], + paddings=[1, 1, 1], + ceil_mode=False, + exclusive=False, + ) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) - avg_pool3d_dg = paddle.nn.layer.AvgPool3D(kernel_size=2, - stride=None, - padding=1, - ceil_mode=False, - exclusive=True) + avg_pool3d_dg = paddle.nn.layer.AvgPool3D( + kernel_size=2, + stride=None, + padding=1, + ceil_mode=False, + exclusive=True, + ) result = avg_pool3d_dg(input) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) @@ -104,45 +119,48 @@ class TestPool3D_API(unittest.TestCase): with fluid.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32, 32]).astype("float32") input = fluid.dygraph.to_variable(input_np) - result = avg_pool3d(input, - kernel_size=2, - stride=2, - padding=0, - ceil_mode=True) - - result_np = avg_pool3D_forward_naive(input_np, - ksize=[2, 2, 2], - strides=[2, 2, 2], - paddings=[0, 0, 0], - ceil_mode=True) + result = avg_pool3d( + input, kernel_size=2, stride=2, padding=0, ceil_mode=True + ) + + result_np = avg_pool3D_forward_naive( + input_np, + ksize=[2, 2, 2], + strides=[2, 2, 2], + paddings=[0, 0, 0], + ceil_mode=True, + ) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) - avg_pool3d_dg = paddle.nn.layer.AvgPool3D(kernel_size=2, - stride=None, - padding=0, - ceil_mode=True) + avg_pool3d_dg = paddle.nn.layer.AvgPool3D( + kernel_size=2, stride=None, padding=0, ceil_mode=True + ) result = avg_pool3d_dg(input) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_max_static_results(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", - shape=[2, 3, 32, 32, 32], - dtype="float32") + input = fluid.data( + name="input", shape=[2, 3, 32, 32, 32], dtype="float32" + ) result = max_pool3d(input, kernel_size=2, stride=2, padding=0) input_np = np.random.random([2, 3, 32, 32, 32]).astype("float32") - result_np = pool3D_forward_naive(input_np, - ksize=[2, 2, 2], - strides=[2, 2, 2], - paddings=[0, 0, 0], - pool_type='max') + result_np = pool3D_forward_naive( + input_np, + ksize=[2, 2, 2], + strides=[2, 2, 2], + paddings=[0, 0, 0], + pool_type='max', + ) exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={"input": input_np}, - fetch_list=[result]) + fetches = exe.run( + fluid.default_main_program(), + feed={"input": input_np}, + fetch_list=[result], + ) np.testing.assert_allclose(fetches[0], result_np, rtol=1e-05) def check_max_dygraph_results(self, place): @@ -151,16 +169,18 @@ class TestPool3D_API(unittest.TestCase): input = fluid.dygraph.to_variable(input_np) result = max_pool3d(input, kernel_size=2, stride=2, padding=0) - result_np = pool3D_forward_naive(input_np, - ksize=[2, 2, 2], - strides=[2, 2, 2], - paddings=[0, 0, 0], - pool_type='max') + result_np = pool3D_forward_naive( + input_np, + ksize=[2, 2, 2], + strides=[2, 2, 2], + paddings=[0, 0, 0], + pool_type='max', + ) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) - max_pool3d_dg = paddle.nn.layer.MaxPool3D(kernel_size=2, - stride=None, - padding=0) + max_pool3d_dg = paddle.nn.layer.MaxPool3D( + kernel_size=2, stride=None, padding=0 + ) result = max_pool3d_dg(input) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) @@ -168,47 +188,52 @@ class TestPool3D_API(unittest.TestCase): with fluid.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32, 32]).astype("float32") input = fluid.dygraph.to_variable( - np.transpose(input_np, [0, 2, 3, 4, 1])) - result = max_pool3d(input, - kernel_size=2, - stride=2, - padding=0, - data_format="NDHWC", - return_mask=False) - - result_np = pool3D_forward_naive(input_np, - ksize=[2, 2, 2], - strides=[2, 2, 2], - paddings=[0, 0, 0], - pool_type='max') - - np.testing.assert_allclose(np.transpose(result.numpy(), - [0, 4, 1, 2, 3]), - result_np, - rtol=1e-05) + np.transpose(input_np, [0, 2, 3, 4, 1]) + ) + result = max_pool3d( + input, + kernel_size=2, + stride=2, + padding=0, + data_format="NDHWC", + return_mask=False, + ) + + result_np = pool3D_forward_naive( + input_np, + ksize=[2, 2, 2], + strides=[2, 2, 2], + paddings=[0, 0, 0], + pool_type='max', + ) + + np.testing.assert_allclose( + np.transpose(result.numpy(), [0, 4, 1, 2, 3]), + result_np, + rtol=1e-05, + ) def check_max_dygraph_ceilmode_results(self, place): with fluid.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32, 32]).astype("float32") input = fluid.dygraph.to_variable(input_np) - result = max_pool3d(input, - kernel_size=2, - stride=2, - padding=0, - ceil_mode=True) - - result_np = max_pool3D_forward_naive(input_np, - ksize=[2, 2, 2], - strides=[2, 2, 2], - paddings=[0, 0, 0], - ceil_mode=True) + result = max_pool3d( + input, kernel_size=2, stride=2, padding=0, ceil_mode=True + ) + + result_np = max_pool3D_forward_naive( + input_np, + ksize=[2, 2, 2], + strides=[2, 2, 2], + paddings=[0, 0, 0], + ceil_mode=True, + ) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) - max_pool3d_dg = paddle.nn.layer.MaxPool3D(kernel_size=2, - stride=None, - padding=0, - ceil_mode=True) + max_pool3d_dg = paddle.nn.layer.MaxPool3D( + kernel_size=2, stride=None, padding=0, ceil_mode=True + ) result = max_pool3d_dg(input) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) @@ -216,24 +241,23 @@ class TestPool3D_API(unittest.TestCase): with fluid.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32, 32]).astype("float32") input = fluid.dygraph.to_variable(input_np) - result = max_pool3d(input, - kernel_size=2, - stride=2, - padding=1, - ceil_mode=False) - - result_np = max_pool3D_forward_naive(input_np, - ksize=[2, 2, 2], - strides=[2, 2, 2], - paddings=[1, 1, 1], - ceil_mode=False) + result = max_pool3d( + input, kernel_size=2, stride=2, padding=1, ceil_mode=False + ) + + result_np = max_pool3D_forward_naive( + input_np, + ksize=[2, 2, 2], + strides=[2, 2, 2], + paddings=[1, 1, 1], + ceil_mode=False, + ) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) - max_pool3d_dg = paddle.nn.layer.MaxPool3D(kernel_size=2, - stride=None, - padding=1, - ceil_mode=False) + max_pool3d_dg = paddle.nn.layer.MaxPool3D( + kernel_size=2, stride=None, padding=1, ceil_mode=False + ) result = max_pool3d_dg(input) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) @@ -241,23 +265,27 @@ class TestPool3D_API(unittest.TestCase): with fluid.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32, 32]).astype("float32") input = fluid.dygraph.to_variable(input_np) - result, indices = max_pool3d(input, - kernel_size=2, - stride=None, - padding="SAME", - return_mask=True) - - result_np = pool3D_forward_naive(input_np, - ksize=[2, 2, 2], - strides=[2, 2, 2], - paddings=[0, 0, 0], - pool_type='max', - padding_algorithm="SAME") + result, indices = max_pool3d( + input, + kernel_size=2, + stride=None, + padding="SAME", + return_mask=True, + ) + + result_np = pool3D_forward_naive( + input_np, + ksize=[2, 2, 2], + strides=[2, 2, 2], + paddings=[0, 0, 0], + pool_type='max', + padding_algorithm="SAME", + ) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) - max_pool3d_dg = paddle.nn.layer.MaxPool3D(kernel_size=2, - stride=2, - padding=0) + max_pool3d_dg = paddle.nn.layer.MaxPool3D( + kernel_size=2, stride=2, padding=0 + ) result = max_pool3d_dg(input) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) @@ -268,16 +296,18 @@ class TestPool3D_API(unittest.TestCase): padding = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]] result = max_pool3d(input, kernel_size=2, stride=2, padding=padding) - result_np = pool3D_forward_naive(input_np, - ksize=[2, 2, 2], - strides=[2, 2, 2], - paddings=[0, 0, 0], - pool_type='max') + result_np = pool3D_forward_naive( + input_np, + ksize=[2, 2, 2], + strides=[2, 2, 2], + paddings=[0, 0, 0], + pool_type='max', + ) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) - max_pool3d_dg = paddle.nn.layer.MaxPool3D(kernel_size=2, - stride=2, - padding=0) + max_pool3d_dg = paddle.nn.layer.MaxPool3D( + kernel_size=2, stride=2, padding=0 + ) result = max_pool3d_dg(input) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) @@ -290,31 +320,37 @@ class TestPool3D_API(unittest.TestCase): input_np = np.random.random([2, 3, 32, 32, 32]).astype("float32") input = fluid.dygraph.to_variable(input_np) padding = 0 - result = avg_pool3d(input, - kernel_size=2, - stride=2, - padding=padding, - divisor_override=8) - - result_np = pool3D_forward_naive(input_np, - ksize=[2, 2, 2], - strides=[2, 2, 2], - paddings=[0, 0, 0], - pool_type='avg') + result = avg_pool3d( + input, + kernel_size=2, + stride=2, + padding=padding, + divisor_override=8, + ) + + result_np = pool3D_forward_naive( + input_np, + ksize=[2, 2, 2], + strides=[2, 2, 2], + paddings=[0, 0, 0], + pool_type='avg', + ) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) - avg_pool3d_dg = paddle.nn.layer.AvgPool3D(kernel_size=2, - stride=2, - padding=0) + avg_pool3d_dg = paddle.nn.layer.AvgPool3D( + kernel_size=2, stride=2, padding=0 + ) result = avg_pool3d_dg(input) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) padding = [0, 0, 0, 0, 0, 0] - result = avg_pool3d(input, - kernel_size=2, - stride=2, - padding=padding, - divisor_override=8) + result = avg_pool3d( + input, + kernel_size=2, + stride=2, + padding=padding, + divisor_override=8, + ) np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def test_pool3d(self): @@ -336,167 +372,198 @@ class TestPool3D_API(unittest.TestCase): class TestPool3DError_API(unittest.TestCase): - def test_error_api(self): - def run1(): with fluid.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32, 32]).astype( - np.float32) + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) padding = [[0, 1], [0, 0], [0, 0], [0, 0], [0, 0]] - res_pd = avg_pool3d(input_pd, - kernel_size=2, - stride=2, - padding=padding) + res_pd = avg_pool3d( + input_pd, kernel_size=2, stride=2, padding=padding + ) self.assertRaises(ValueError, run1) def run2(): with fluid.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32, 32]).astype( - np.float32) + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) padding = [[0, 1], [0, 0], [0, 0], [0, 0], [0, 0]] - res_pd = avg_pool3d(input_pd, - kernel_size=2, - stride=2, - padding=padding, - data_format='NCDHW') + res_pd = avg_pool3d( + input_pd, + kernel_size=2, + stride=2, + padding=padding, + data_format='NCDHW', + ) self.assertRaises(ValueError, run2) def run3(): with fluid.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32, 32]).astype( - np.float32) + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) padding = [[0, 1], [0, 0], [0, 0], [0, 0], [0, 0]] - res_pd = avg_pool3d(input_pd, - kernel_size=2, - stride=2, - padding=padding, - data_format='NDHWC') + res_pd = avg_pool3d( + input_pd, + kernel_size=2, + stride=2, + padding=padding, + data_format='NDHWC', + ) self.assertRaises(ValueError, run3) def run4(): with fluid.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32, 32]).astype( - np.float32) + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) - res_pd = avg_pool3d(input_pd, - kernel_size=2, - stride=2, - padding=0, - data_format='NNNN') + res_pd = avg_pool3d( + input_pd, + kernel_size=2, + stride=2, + padding=0, + data_format='NNNN', + ) self.assertRaises(ValueError, run4) def run5(): with fluid.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32, 32]).astype( - np.float32) + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) - res_pd = max_pool3d(input_pd, - kernel_size=2, - stride=2, - padding=0, - data_format='NNNN') + res_pd = max_pool3d( + input_pd, + kernel_size=2, + stride=2, + padding=0, + data_format='NNNN', + ) self.assertRaises(ValueError, run5) def run6(): with fluid.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32, 32]).astype( - np.float32) + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) - res_pd = avg_pool3d(input_pd, - kernel_size=2, - stride=2, - padding="padding", - data_format='NNNN') + res_pd = avg_pool3d( + input_pd, + kernel_size=2, + stride=2, + padding="padding", + data_format='NNNN', + ) self.assertRaises(ValueError, run6) def run7(): with fluid.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32, 32]).astype( - np.float32) + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) - res_pd = max_pool3d(input_pd, - kernel_size=2, - stride=2, - padding="padding", - data_format='NNNN') + res_pd = max_pool3d( + input_pd, + kernel_size=2, + stride=2, + padding="padding", + data_format='NNNN', + ) self.assertRaises(ValueError, run7) def run8(): with fluid.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32, 32]).astype( - np.float32) + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) - res_pd = avg_pool3d(input_pd, - kernel_size=2, - stride=2, - padding="VALID", - ceil_mode=True, - data_format='NNNN') + res_pd = avg_pool3d( + input_pd, + kernel_size=2, + stride=2, + padding="VALID", + ceil_mode=True, + data_format='NNNN', + ) self.assertRaises(ValueError, run8) def run9(): with fluid.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32, 32]).astype( - np.float32) + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) - res_pd = max_pool3d(input_pd, - kernel_size=2, - stride=2, - padding="VALID", - ceil_mode=True, - data_format='NNNN') + res_pd = max_pool3d( + input_pd, + kernel_size=2, + stride=2, + padding="VALID", + ceil_mode=True, + data_format='NNNN', + ) self.assertRaises(ValueError, run9) def run10(): with fluid.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32, 32]).astype( - np.float32) + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) - res_pd = max_pool3d(input_pd, - kernel_size=2, - stride=2, - padding=0, - data_format='NDHWC', - return_mask=True) + res_pd = max_pool3d( + input_pd, + kernel_size=2, + stride=2, + padding=0, + data_format='NDHWC', + return_mask=True, + ) self.assertRaises(ValueError, run10) def run_kernel_out_of_range(): with fluid.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32, 32]).astype( - np.float32) + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) - res_pd = avg_pool3d(input_pd, - kernel_size=-1, - stride=2, - padding="VALID", - ceil_mode=True) + res_pd = avg_pool3d( + input_pd, + kernel_size=-1, + stride=2, + padding="VALID", + ceil_mode=True, + ) self.assertRaises(ValueError, run_kernel_out_of_range) def run_size_out_of_range(): with fluid.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32, 32]).astype( - np.float32) + np.float32 + ) input_pd = fluid.dygraph.to_variable(input_np) - res_pd = avg_pool3d(input_pd, - kernel_size=2, - stride=0, - padding="VALID", - ceil_mode=True) + res_pd = avg_pool3d( + input_pd, + kernel_size=2, + stride=0, + padding="VALID", + ceil_mode=True, + ) self.assertRaises(ValueError, run_size_out_of_range) diff --git a/python/paddle/fluid/tests/unittests/test_pool3d_op.py b/python/paddle/fluid/tests/unittests/test_pool3d_op.py index 37fde90a457883a0f42d6b30e155696b3cfb2eba..a5bf5066956483ed7c0991de7ed1d7b57ac2938c 100644 --- a/python/paddle/fluid/tests/unittests/test_pool3d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool3d_op.py @@ -29,25 +29,29 @@ def adaptive_end_index(index, input_size, output_size): return int(np.ceil((index + 1) * input_size / output_size)) -def pool3D_forward_naive(x, - ksize, - strides, - paddings, - global_pool=0, - ceil_mode=False, - exclusive=True, - adaptive=False, - data_format='NCDHW', - pool_type='max', - padding_algorithm="EXPLICIT"): +def pool3D_forward_naive( + x, + ksize, + strides, + paddings, + global_pool=0, + ceil_mode=False, + exclusive=True, + adaptive=False, + data_format='NCDHW', + pool_type='max', + padding_algorithm="EXPLICIT", +): # update paddings def _get_padding_with_SAME(input_shape, pool_size, pool_stride): padding = [] - for input_size, filter_size, stride_size in zip(input_shape, pool_size, - pool_stride): + for input_size, filter_size, stride_size in zip( + input_shape, pool_size, pool_stride + ): out_size = int((input_size + stride_size - 1) / stride_size) pad_sum = np.max( - ((out_size - 1) * stride_size + filter_size - input_size, 0)) + ((out_size - 1) * stride_size + filter_size - input_size, 0) + ) pad_0 = int(pad_sum / 2) pad_1 = int(pad_sum - pad_0) padding.append(pad_0) @@ -57,9 +61,10 @@ def pool3D_forward_naive(x, if isinstance(padding_algorithm, str): padding_algorithm = padding_algorithm.upper() if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]: - raise ValueError("Unknown Attr(padding_algorithm): '%s'. " - "It can only be 'SAME' or 'VALID'." % - str(padding_algorithm)) + raise ValueError( + "Unknown Attr(padding_algorithm): '%s'. " + "It can only be 'SAME' or 'VALID'." % str(padding_algorithm) + ) if padding_algorithm == "VALID": paddings = [0, 0, 0, 0, 0, 0] @@ -67,7 +72,8 @@ def pool3D_forward_naive(x, raise ValueError( "When Attr(pool_padding) is \"VALID\", Attr(ceil_mode)" " must be False. " - "Received ceil_mode: True.") + "Received ceil_mode: True." + ) elif padding_algorithm == "SAME": input_data_shape = [] if data_format == "NCDHW": @@ -80,8 +86,11 @@ def pool3D_forward_naive(x, is_sys = True if len(paddings) == 3 else False N = x.shape[0] - C,D, H, W = [x.shape[1], x.shape[2], x.shape[3], x.shape[4]] \ - if data_format == 'NCDHW' else [x.shape[4], x.shape[1], x.shape[2],x.shape[3]] + C, D, H, W = ( + [x.shape[1], x.shape[2], x.shape[3], x.shape[4]] + if data_format == 'NCDHW' + else [x.shape[4], x.shape[1], x.shape[2], x.shape[3]] + ) if global_pool == 1: ksize = [D, H, W] @@ -98,18 +107,35 @@ def pool3D_forward_naive(x, D_out, H_out, W_out = ksize else: - D_out = (D - ksize[0] + pad_d_forth+pad_d_back + strides[0] - 1) // strides[0] + 1 \ - if ceil_mode else (D - ksize[0] + pad_d_forth+pad_d_back) // strides[0] + 1 - - H_out = (H - ksize[1] + pad_h_up + pad_h_down + strides[1] - 1) // strides[1] + 1 \ - if ceil_mode else (H - ksize[1] + pad_h_up + pad_h_down) // strides[1] + 1 - - W_out = (W - ksize[2] + pad_w_left + pad_w_right + strides[2] - 1) // strides[2] + 1 \ - if ceil_mode else (W - ksize[2] + pad_w_left + pad_w_right) // strides[2] + 1 - - - out = np.zeros((N, C, D_out, H_out, W_out)) if data_format=='NCDHW' \ + D_out = ( + (D - ksize[0] + pad_d_forth + pad_d_back + strides[0] - 1) + // strides[0] + + 1 + if ceil_mode + else (D - ksize[0] + pad_d_forth + pad_d_back) // strides[0] + 1 + ) + + H_out = ( + (H - ksize[1] + pad_h_up + pad_h_down + strides[1] - 1) + // strides[1] + + 1 + if ceil_mode + else (H - ksize[1] + pad_h_up + pad_h_down) // strides[1] + 1 + ) + + W_out = ( + (W - ksize[2] + pad_w_left + pad_w_right + strides[2] - 1) + // strides[2] + + 1 + if ceil_mode + else (W - ksize[2] + pad_w_left + pad_w_right) // strides[2] + 1 + ) + + out = ( + np.zeros((N, C, D_out, H_out, W_out)) + if data_format == 'NCDHW' else np.zeros((N, D_out, H_out, W_out, C)) + ) for k in range(D_out): if adaptive: d_start = adaptive_start_index(k, D, ksize[0]) @@ -127,17 +153,29 @@ def pool3D_forward_naive(x, else: d_start = k * strides[0] - pad_d_forth - d_end = np.min((k * strides[0] + ksize[0] - pad_d_forth, - D + pad_d_back)) + d_end = np.min( + ( + k * strides[0] + ksize[0] - pad_d_forth, + D + pad_d_back, + ) + ) h_start = i * strides[1] - pad_h_up h_end = np.min( - (i * strides[1] + ksize[1] - pad_h_up, H + pad_h_down)) + (i * strides[1] + ksize[1] - pad_h_up, H + pad_h_down) + ) w_start = j * strides[2] - pad_w_left - w_end = np.min((j * strides[2] + ksize[2] - pad_w_left, - W + pad_w_right)) - - field_size = (d_end - d_start) * (h_end - h_start) * ( - w_end - w_start) + w_end = np.min( + ( + j * strides[2] + ksize[2] - pad_w_left, + W + pad_w_right, + ) + ) + + field_size = ( + (d_end - d_start) + * (h_end - h_start) + * (w_end - w_start) + ) w_start = np.max((w_start, 0)) d_start = np.max((d_start, 0)) h_start = np.max((h_start, 0)) @@ -145,78 +183,95 @@ def pool3D_forward_naive(x, d_end = np.min((d_end, D)) h_end = np.min((h_end, H)) if data_format == 'NCDHW': - x_masked = x[:, :, d_start:d_end, h_start:h_end, - w_start:w_end] + x_masked = x[ + :, :, d_start:d_end, h_start:h_end, w_start:w_end + ] if pool_type == 'avg': - if (exclusive or adaptive): - field_size = (d_end - d_start) * ( - h_end - h_start) * (w_end - w_start) - - out[:, :, k, i, - j] = np.sum(x_masked, axis=(2, 3, 4)) / field_size + if exclusive or adaptive: + field_size = ( + (d_end - d_start) + * (h_end - h_start) + * (w_end - w_start) + ) + + out[:, :, k, i, j] = ( + np.sum(x_masked, axis=(2, 3, 4)) / field_size + ) elif pool_type == 'max': out[:, :, k, i, j] = np.max(x_masked, axis=(2, 3, 4)) elif data_format == 'NDHWC': - x_masked = x[:, d_start:d_end, h_start:h_end, - w_start:w_end, :] + x_masked = x[ + :, d_start:d_end, h_start:h_end, w_start:w_end, : + ] if pool_type == 'avg': - if (exclusive or adaptive): - field_size = (d_end - d_start) * ( - h_end - h_start) * (w_end - w_start) - - out[:, k, i, j, :] = np.sum(x_masked, - axis=(1, 2, 3)) / field_size + if exclusive or adaptive: + field_size = ( + (d_end - d_start) + * (h_end - h_start) + * (w_end - w_start) + ) + + out[:, k, i, j, :] = ( + np.sum(x_masked, axis=(1, 2, 3)) / field_size + ) elif pool_type == 'max': out[:, k, i, j, :] = np.max(x_masked, axis=(1, 2, 3)) return out -def max_pool3D_forward_naive(x, - ksize, - strides, - paddings, - global_pool=0, - ceil_mode=False, - exclusive=True, - adaptive=False): - out = pool3D_forward_naive(x=x, - ksize=ksize, - strides=strides, - paddings=paddings, - global_pool=global_pool, - ceil_mode=ceil_mode, - exclusive=exclusive, - adaptive=adaptive, - data_format='NCDHW', - pool_type="max") +def max_pool3D_forward_naive( + x, + ksize, + strides, + paddings, + global_pool=0, + ceil_mode=False, + exclusive=True, + adaptive=False, +): + out = pool3D_forward_naive( + x=x, + ksize=ksize, + strides=strides, + paddings=paddings, + global_pool=global_pool, + ceil_mode=ceil_mode, + exclusive=exclusive, + adaptive=adaptive, + data_format='NCDHW', + pool_type="max", + ) return out -def avg_pool3D_forward_naive(x, - ksize, - strides, - paddings, - global_pool=0, - ceil_mode=False, - exclusive=True, - adaptive=False): - out = pool3D_forward_naive(x=x, - ksize=ksize, - strides=strides, - paddings=paddings, - global_pool=global_pool, - ceil_mode=ceil_mode, - exclusive=exclusive, - adaptive=adaptive, - data_format='NCDHW', - pool_type="avg") +def avg_pool3D_forward_naive( + x, + ksize, + strides, + paddings, + global_pool=0, + ceil_mode=False, + exclusive=True, + adaptive=False, +): + out = pool3D_forward_naive( + x=x, + ksize=ksize, + strides=strides, + paddings=paddings, + global_pool=global_pool, + ceil_mode=ceil_mode, + exclusive=exclusive, + adaptive=adaptive, + data_format='NCDHW', + pool_type="avg", + ) return out class TestPool3D_Op(OpTest): - def setUp(self): self.op_type = "pool3d" self.init_kernel_type() @@ -235,12 +290,19 @@ class TestPool3D_Op(OpTest): paddle.enable_static() input = np.random.random(self.shape).astype(self.dtype) - output = pool3D_forward_naive(input, self.ksize, self.strides, - self.paddings, self.global_pool, - self.ceil_mode, self.exclusive, - self.adaptive, self.data_format, - self.pool_type, - self.padding_algorithm).astype(self.dtype) + output = pool3D_forward_naive( + input, + self.ksize, + self.strides, + self.paddings, + self.global_pool, + self.ceil_mode, + self.exclusive, + self.adaptive, + self.data_format, + self.pool_type, + self.padding_algorithm, + ).astype(self.dtype) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(input)} @@ -276,10 +338,9 @@ class TestPool3D_Op(OpTest): if self.has_cudnn() and self.pool_type != "max": place = core.CUDAPlace(0) if core.is_compiled_with_rocm(): - self.check_grad_with_place(place, - set(['X']), - 'Out', - max_relative_error=1e-2) + self.check_grad_with_place( + place, set(['X']), 'Out', max_relative_error=1e-2 + ) else: self.check_grad_with_place(place, set(['X']), 'Out') elif self.pool_type != "max": @@ -322,7 +383,6 @@ class TestPool3D_Op(OpTest): class TestCase1(TestPool3D_Op): - def init_shape(self): self.shape = [1, 3, 7, 7, 7] @@ -341,7 +401,6 @@ class TestCase1(TestPool3D_Op): class TestCase2(TestPool3D_Op): - def init_shape(self): self.shape = [1, 3, 6, 7, 7] @@ -360,32 +419,28 @@ class TestCase2(TestPool3D_Op): class TestCase3(TestPool3D_Op): - def init_pool_type(self): self.pool_type = "max" class TestCase4(TestCase1): - def init_pool_type(self): self.pool_type = "max" class TestCase5(TestCase2): - def init_pool_type(self): self.pool_type = "max" -#--------------------test pool3d cudnn-------------------- +# --------------------test pool3d cudnn-------------------- def create_test_cudnn_class(parent): - - @unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) class TestCUDNNCase(parent): - def init_kernel_type(self): self.use_cudnn = True @@ -403,11 +458,10 @@ create_test_cudnn_class(TestCase5) def create_test_cudnn_fp16_class(parent): - - @unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) class TestCUDNNFp16Case(parent): - def init_kernel_type(self): self.use_cudnn = True self.dtype = np.float16 @@ -427,11 +481,10 @@ def create_test_cudnn_fp16_class(parent): def create_test_fp16_class(parent): - - @unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) class TestFp16Case(parent): - def init_kernel_type(self): self.use_cudnn = False self.dtype = np.float16 @@ -464,11 +517,10 @@ create_test_fp16_class(TestCase5) # ---- test ceil mode ------ def create_test_cudnn_use_ceil_class(parent): - - @unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) class TestPool3DUseCeilCase(parent): - def init_kernel_type(self): self.use_cudnn = True @@ -485,9 +537,7 @@ create_test_cudnn_use_ceil_class(TestCase1) def create_test_use_ceil_class(parent): - class TestPool3DUseCeilCase(parent): - def init_ceil_mode(self): self.ceil_mode = True @@ -501,15 +551,14 @@ create_test_use_ceil_class(TestCase2) class TestAvgInclude(TestCase2): - def init_exclusive(self): self.exclusive = False -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNNAvgInclude(TestCase2): - def init_kernel_type(self): self.use_cudnn = True @@ -518,13 +567,11 @@ class TestCUDNNAvgInclude(TestCase2): class TestAvgPoolAdaptive(TestCase1): - def init_adaptive(self): self.adaptive = True class TestAvgPoolAdaptiveAsyOutSize(TestCase1): - def init_adaptive(self): self.adaptive = True @@ -536,9 +583,8 @@ class TestAvgPoolAdaptiveAsyOutSize(TestCase1): self.strides = [1, 1, 1] -#-------test pool3d with asymmetric padding------ +# -------test pool3d with asymmetric padding------ class TestPool3D_Op_AsyPadding(TestPool3D_Op): - def init_test_case(self): self.ksize = [3, 4, 3] self.strides = [1, 1, 2] @@ -551,7 +597,6 @@ class TestPool3D_Op_AsyPadding(TestPool3D_Op): class TestCase1_AsyPadding(TestCase1): - def init_test_case(self): self.ksize = [3, 3, 4] self.strides = [1, 1, 2] @@ -564,7 +609,6 @@ class TestCase1_AsyPadding(TestCase1): class TestCase2_AsyPadding(TestCase2): - def init_test_case(self): self.ksize = [3, 3, 3] self.strides = [1, 1, 1] @@ -577,7 +621,6 @@ class TestCase2_AsyPadding(TestCase2): class TestCase3_AsyPadding(TestCase3): - def init_test_case(self): self.ksize = [3, 3, 3] self.strides = [1, 1, 1] @@ -590,7 +633,6 @@ class TestCase3_AsyPadding(TestCase3): class TestCase4_AsyPadding(TestCase4): - def init_test_case(self): self.ksize = [3, 3, 3] self.strides = [1, 1, 1] @@ -603,7 +645,6 @@ class TestCase4_AsyPadding(TestCase4): class TestCase5_AsyPadding(TestCase5): - def init_test_case(self): self.ksize = [3, 3, 3] self.strides = [1, 1, 1] @@ -637,7 +678,6 @@ create_test_use_ceil_class(TestCase2_AsyPadding) class TestAvgInclude_AsyPadding(TestCase2): - def init_exclusive(self): self.exclusive = False @@ -645,10 +685,10 @@ class TestAvgInclude_AsyPadding(TestCase2): self.paddings = [2, 2, 1, 1, 0, 0] -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNNAvgInclude_AsyPadding(TestCase2): - def init_kernel_type(self): self.use_cudnn = True @@ -663,7 +703,6 @@ class TestCUDNNAvgInclude_AsyPadding(TestCase2): class TestAvgPoolAdaptive_AsyPadding(TestCase1): - def init_adaptive(self): self.adaptive = True @@ -673,7 +712,6 @@ class TestAvgPoolAdaptive_AsyPadding(TestCase1): # ------------ test channel_last -------------- class TestPool3D_channel_last(TestPool3D_Op): - def init_data_format(self): self.data_format = "NDHWC" @@ -682,7 +720,6 @@ class TestPool3D_channel_last(TestPool3D_Op): class TestCase1_channel_last(TestCase1): - def init_data_format(self): self.data_format = "NDHWC" @@ -691,7 +728,6 @@ class TestCase1_channel_last(TestCase1): class TestCase2_channel_last(TestCase2): - def init_data_format(self): self.data_format = "NDHWC" @@ -700,7 +736,6 @@ class TestCase2_channel_last(TestCase2): class TestCase3_channel_last(TestCase3): - def init_data_format(self): self.data_format = "NDHWC" @@ -709,7 +744,6 @@ class TestCase3_channel_last(TestCase3): class TestCase4_channel_last(TestCase4): - def init_data_format(self): self.data_format = "NDHWC" @@ -718,7 +752,6 @@ class TestCase4_channel_last(TestCase4): class TestCase5_channel_last(TestCase5): - def init_data_format(self): self.data_format = "NDHWC" @@ -741,7 +774,6 @@ create_test_use_ceil_class(TestCase2_channel_last) class TestCase5_Max(TestCase2): - def init_pool_type(self): self.pool_type = "max" @@ -750,16 +782,14 @@ class TestCase5_Max(TestCase2): return if self.has_cudnn() and self.pool_type == "max": place = core.CUDAPlace(0) - self.check_grad_with_place(place, - set(['X']), - 'Out', - max_relative_error=1.00) + self.check_grad_with_place( + place, set(['X']), 'Out', max_relative_error=1.00 + ) elif self.pool_type == "max": self.check_grad(set(['X']), 'Out', max_relative_error=1.00) class TestCase5_channel_last_Max(TestCase5_Max): - def init_data_format(self): self.data_format = "NDHWC" @@ -772,15 +802,14 @@ create_test_cudnn_class(TestCase5_channel_last_Max) class TestAvgInclude_channel_last(TestCase2_channel_last): - def init_exclusive(self): self.exclusive = False -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestCUDNNAvgInclude_channel_last(TestCase2_channel_last): - def init_kernel_type(self): self.use_cudnn = True @@ -789,14 +818,12 @@ class TestCUDNNAvgInclude_channel_last(TestCase2_channel_last): class TestAvgPoolAdaptive_channel_last(TestCase1_channel_last): - def init_adaptive(self): self.adaptive = True # --- asy padding class TestPool3D_Op_AsyPadding_channel_last(TestPool3D_Op_AsyPadding): - def init_data_format(self): self.data_format = "NDHWC" @@ -805,7 +832,6 @@ class TestPool3D_Op_AsyPadding_channel_last(TestPool3D_Op_AsyPadding): class TestCase1_AsyPadding_channel_last(TestCase1_AsyPadding): - def init_data_format(self): self.data_format = "NDHWC" @@ -814,7 +840,6 @@ class TestCase1_AsyPadding_channel_last(TestCase1_AsyPadding): class TestCase2_AsyPadding_channel_last(TestCase2_AsyPadding): - def init_data_format(self): self.data_format = "NDHWC" @@ -823,7 +848,6 @@ class TestCase2_AsyPadding_channel_last(TestCase2_AsyPadding): class TestCase3_AsyPadding_channel_last(TestCase3_AsyPadding): - def init_data_format(self): self.data_format = "NDHWC" @@ -832,7 +856,6 @@ class TestCase3_AsyPadding_channel_last(TestCase3_AsyPadding): class TestCase4_AsyPadding_channel_last(TestCase4_AsyPadding): - def init_data_format(self): self.data_format = "NDHWC" @@ -841,7 +864,6 @@ class TestCase4_AsyPadding_channel_last(TestCase4_AsyPadding): class TestCase5_AsyPadding_channel_last(TestCase5_AsyPadding): - def init_data_format(self): self.data_format = "NDHWC" @@ -864,23 +886,23 @@ create_test_use_ceil_class(TestCase2_AsyPadding_channel_last) class TestAvgInclude_AsyPadding_channel_last(TestAvgInclude_AsyPadding): - def init_data_format(self): self.data_format = "NDHWC" -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") -class TestCUDNNAvgInclude_AsyPadding_channel_last(TestCUDNNAvgInclude_AsyPadding - ): - +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) +class TestCUDNNAvgInclude_AsyPadding_channel_last( + TestCUDNNAvgInclude_AsyPadding +): def init_data_format(self): self.data_format = "NDHWC" -class TestAvgPoolAdaptive_AsyPadding_channel_last(TestAvgPoolAdaptive_AsyPadding - ): - +class TestAvgPoolAdaptive_AsyPadding_channel_last( + TestAvgPoolAdaptive_AsyPadding +): def init_data_format(self): self.data_format = "NDHWC" @@ -888,11 +910,9 @@ class TestAvgPoolAdaptive_AsyPadding_channel_last(TestAvgPoolAdaptive_AsyPadding self.shape = [1, 7, 7, 7, 3] -#test padding = SAME VALID +# test padding = SAME VALID def create_test_padding_SAME_class(parent): - class TestPaddingSMAECase(parent): - def init_paddings(self): self.paddings = [0, 0, 0] self.padding_algorithm = "SAME" @@ -918,11 +938,10 @@ create_test_padding_SAME_class(TestCase5_channel_last) def create_test_cudnn_padding_SAME_class(parent): - - @unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) class TestCUDNNPaddingSMAECase(parent): - def init_kernel_type(self): self.use_cudnn = True @@ -951,9 +970,7 @@ create_test_cudnn_padding_SAME_class(TestCase5_channel_last) def create_test_padding_VALID_class(parent): - class TestPaddingVALIDCase(parent): - def init_paddings(self): self.paddings = [1, 1, 1] self.padding_algorithm = "VALID" @@ -979,11 +996,10 @@ create_test_padding_VALID_class(TestCase5_channel_last) def create_test_cudnn_padding_VALID_class(parent): - - @unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) class TestCUDNNPaddingVALIDCase(parent): - def init_kernel_type(self): self.use_cudnn = True @@ -1011,236 +1027,289 @@ create_test_cudnn_padding_VALID_class(TestCase4_channel_last) create_test_cudnn_padding_VALID_class(TestCase5_channel_last) -#test API +# test API class TestPool3DAPI(unittest.TestCase): - def test_api(self): x_NDHWC = np.random.random([2, 5, 5, 5, 3]).astype("float32") x_NCDHW = np.random.random([2, 3, 5, 5, 5]).astype("float32") - input_NDHWC = fluid.layers.data(name="input_NDHWC", - shape=[2, 5, 5, 5, 3], - append_batch_size=False, - dtype="float32") + input_NDHWC = fluid.layers.data( + name="input_NDHWC", + shape=[2, 5, 5, 5, 3], + append_batch_size=False, + dtype="float32", + ) - input_NCDHW = fluid.layers.data(name="input_NCDHW", - shape=[2, 3, 5, 5, 5], - append_batch_size=False, - dtype="float32") + input_NCDHW = fluid.layers.data( + name="input_NCDHW", + shape=[2, 3, 5, 5, 5], + append_batch_size=False, + dtype="float32", + ) ksize = [3, 3, 3] - out_1 = fluid.layers.pool3d(input=input_NDHWC, - pool_size=ksize, - pool_type="max", - pool_padding=[1, 1, 1], - use_cudnn=False, - data_format="NDHWC") - - out_2 = fluid.layers.pool3d(input=input_NDHWC, - pool_size=ksize, - pool_type="avg", - pool_padding=[[0, 0], [1, 1], [1, 1], - [1, 1], [0, 0]], - use_cudnn=False, - data_format="NDHWC") - - out_3 = fluid.layers.pool3d(input=input_NCDHW, - pool_size=ksize, - pool_type="avg", - pool_padding=[[0, 0], [0, 0], [1, 1], - [1, 1], [1, 1]], - use_cudnn=False, - data_format="NCDHW") - - out_4 = fluid.layers.pool3d(input=input_NCDHW, - pool_size=ksize, - pool_type="avg", - pool_padding=[1, 2, 1, 0, 0, 1], - use_cudnn=False, - data_format="NCDHW") + out_1 = fluid.layers.pool3d( + input=input_NDHWC, + pool_size=ksize, + pool_type="max", + pool_padding=[1, 1, 1], + use_cudnn=False, + data_format="NDHWC", + ) + + out_2 = fluid.layers.pool3d( + input=input_NDHWC, + pool_size=ksize, + pool_type="avg", + pool_padding=[[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]], + use_cudnn=False, + data_format="NDHWC", + ) + + out_3 = fluid.layers.pool3d( + input=input_NCDHW, + pool_size=ksize, + pool_type="avg", + pool_padding=[[0, 0], [0, 0], [1, 1], [1, 1], [1, 1]], + use_cudnn=False, + data_format="NCDHW", + ) + + out_4 = fluid.layers.pool3d( + input=input_NCDHW, + pool_size=ksize, + pool_type="avg", + pool_padding=[1, 2, 1, 0, 0, 1], + use_cudnn=False, + data_format="NCDHW", + ) # test VALID - out_5 = fluid.layers.pool3d(input=input_NDHWC, - pool_size=ksize, - pool_type="avg", - pool_padding="VALID", - use_cudnn=False, - data_format="NDHWC") - - out_6 = fluid.layers.pool3d(input=input_NCDHW, - pool_size=ksize, - pool_type="avg", - pool_padding="VALID", - use_cudnn=False, - data_format="NCDHW") + out_5 = fluid.layers.pool3d( + input=input_NDHWC, + pool_size=ksize, + pool_type="avg", + pool_padding="VALID", + use_cudnn=False, + data_format="NDHWC", + ) + + out_6 = fluid.layers.pool3d( + input=input_NCDHW, + pool_size=ksize, + pool_type="avg", + pool_padding="VALID", + use_cudnn=False, + data_format="NCDHW", + ) # test SAME - out_7 = fluid.layers.pool3d(input=input_NDHWC, - pool_size=ksize, - pool_stride=[1, 1, 2], - pool_type="avg", - pool_padding="SAME", - use_cudnn=False, - data_format="NDHWC") - - out_8 = fluid.layers.pool3d(input=input_NCDHW, - pool_size=[4, 4, 4], - pool_type="avg", - pool_padding="SAME", - use_cudnn=False, - data_format="NCDHW") + out_7 = fluid.layers.pool3d( + input=input_NDHWC, + pool_size=ksize, + pool_stride=[1, 1, 2], + pool_type="avg", + pool_padding="SAME", + use_cudnn=False, + data_format="NDHWC", + ) + + out_8 = fluid.layers.pool3d( + input=input_NCDHW, + pool_size=[4, 4, 4], + pool_type="avg", + pool_padding="SAME", + use_cudnn=False, + data_format="NCDHW", + ) exe = fluid.Executor(place=fluid.CPUPlace()) [res_1, res_2, res_3, res_4, res_5, res_6, res_7, res_8] = exe.run( fluid.default_main_program(), - feed={ - "input_NDHWC": x_NDHWC, - "input_NCDHW": x_NCDHW - }, - fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7, out_8]) + feed={"input_NDHWC": x_NDHWC, "input_NCDHW": x_NCDHW}, + fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7, out_8], + ) assert np.allclose( res_1, - pool3D_forward_naive(x=x_NDHWC, - ksize=ksize, - pool_type="max", - strides=[1, 1, 1], - paddings=[1, 1, 1], - data_format="NDHWC")) + pool3D_forward_naive( + x=x_NDHWC, + ksize=ksize, + pool_type="max", + strides=[1, 1, 1], + paddings=[1, 1, 1], + data_format="NDHWC", + ), + ) assert np.allclose( res_2, - pool3D_forward_naive(x=x_NDHWC, - ksize=ksize, - pool_type="avg", - strides=[1, 1, 1], - paddings=[1, 1, 1, 1, 1, 1], - data_format="NDHWC")) - assert np.allclose(res_3, - pool3D_forward_naive(x=x_NCDHW, - ksize=ksize, - pool_type="avg", - strides=[1, 1, 1], - paddings=[1, 1, 1, 1, 1, 1], - data_format="NCDHW"), - rtol=0.07, - atol=1e-05) - - assert np.allclose(res_4, - pool3D_forward_naive(x=x_NCDHW, - ksize=ksize, - pool_type="avg", - strides=[1, 1, 1], - paddings=[1, 2, 1, 0, 0, 1], - data_format="NCDHW"), - rtol=0.07, - atol=1e-05) + pool3D_forward_naive( + x=x_NDHWC, + ksize=ksize, + pool_type="avg", + strides=[1, 1, 1], + paddings=[1, 1, 1, 1, 1, 1], + data_format="NDHWC", + ), + ) + assert np.allclose( + res_3, + pool3D_forward_naive( + x=x_NCDHW, + ksize=ksize, + pool_type="avg", + strides=[1, 1, 1], + paddings=[1, 1, 1, 1, 1, 1], + data_format="NCDHW", + ), + rtol=0.07, + atol=1e-05, + ) + + assert np.allclose( + res_4, + pool3D_forward_naive( + x=x_NCDHW, + ksize=ksize, + pool_type="avg", + strides=[1, 1, 1], + paddings=[1, 2, 1, 0, 0, 1], + data_format="NCDHW", + ), + rtol=0.07, + atol=1e-05, + ) # VALID assert np.allclose( res_5, - pool3D_forward_naive(x=x_NDHWC, - ksize=ksize, - pool_type="avg", - strides=[1, 1, 1], - paddings=[10, 20], - padding_algorithm="VALID", - data_format="NDHWC")) - - assert np.allclose(res_6, - pool3D_forward_naive(x=x_NCDHW, - ksize=ksize, - pool_type="avg", - strides=[1, 1, 1], - paddings=[10, 20], - padding_algorithm="VALID", - data_format="NCDHW"), - rtol=0.07, - atol=1e-05) + pool3D_forward_naive( + x=x_NDHWC, + ksize=ksize, + pool_type="avg", + strides=[1, 1, 1], + paddings=[10, 20], + padding_algorithm="VALID", + data_format="NDHWC", + ), + ) + + assert np.allclose( + res_6, + pool3D_forward_naive( + x=x_NCDHW, + ksize=ksize, + pool_type="avg", + strides=[1, 1, 1], + paddings=[10, 20], + padding_algorithm="VALID", + data_format="NCDHW", + ), + rtol=0.07, + atol=1e-05, + ) # SAME assert np.allclose( res_7, - pool3D_forward_naive(x=x_NDHWC, - ksize=ksize, - pool_type="avg", - strides=[1, 1, 2], - paddings=[10, 20], - padding_algorithm="SAME", - data_format="NDHWC")) - - assert np.allclose(res_8, - pool3D_forward_naive(x=x_NCDHW, - ksize=[4, 4, 4], - pool_type="avg", - strides=[1, 1, 1], - paddings=[10, 20], - padding_algorithm="SAME", - data_format="NCDHW"), - rtol=0.07, - atol=1e-05) + pool3D_forward_naive( + x=x_NDHWC, + ksize=ksize, + pool_type="avg", + strides=[1, 1, 2], + paddings=[10, 20], + padding_algorithm="SAME", + data_format="NDHWC", + ), + ) + + assert np.allclose( + res_8, + pool3D_forward_naive( + x=x_NCDHW, + ksize=[4, 4, 4], + pool_type="avg", + strides=[1, 1, 1], + paddings=[10, 20], + padding_algorithm="SAME", + data_format="NCDHW", + ), + rtol=0.07, + atol=1e-05, + ) class TestPool3DAPI_Error(unittest.TestCase): - def test_api(self): - input_NDHWC = fluid.layers.data(name="input_NDHWC", - shape=[2, 5, 5, 5, 3], - append_batch_size=False, - dtype="float32") + input_NDHWC = fluid.layers.data( + name="input_NDHWC", + shape=[2, 5, 5, 5, 3], + append_batch_size=False, + dtype="float32", + ) ksize = [3, 3, 3] # cudnn type error def run_1(): - out_1 = fluid.layers.pool3d(input=input_NDHWC, - pool_size=ksize, - pool_type="max", - pool_padding=[1, 1, 1], - use_cudnn=[0], - data_format="NDHWC") + out_1 = fluid.layers.pool3d( + input=input_NDHWC, + pool_size=ksize, + pool_type="max", + pool_padding=[1, 1, 1], + use_cudnn=[0], + data_format="NDHWC", + ) self.assertRaises(TypeError, run_1) # data_format value error def run_2(): - out_2 = fluid.layers.pool3d(input=input_NDHWC, - pool_size=ksize, - pool_type="max", - pool_padding=[1, 1, 1], - use_cudnn=False, - data_format="NDHWCC") + out_2 = fluid.layers.pool3d( + input=input_NDHWC, + pool_size=ksize, + pool_type="max", + pool_padding=[1, 1, 1], + use_cudnn=False, + data_format="NDHWCC", + ) self.assertRaises(ValueError, run_2) # padding str value error def run_3(): - out_3 = fluid.layers.pool3d(input=input_NDHWC, - pool_size=ksize, - pool_type="max", - pool_padding="VALIDSAME", - use_cudnn=False, - data_format="NDHWC") + out_3 = fluid.layers.pool3d( + input=input_NDHWC, + pool_size=ksize, + pool_type="max", + pool_padding="VALIDSAME", + use_cudnn=False, + data_format="NDHWC", + ) self.assertRaises(ValueError, run_3) # padding str valid and ceil_mode value error def run_4(): - out_4 = fluid.layers.pool3d(input=input_NDHWC, - pool_size=ksize, - pool_type="max", - pool_padding="VALID", - use_cudnn=False, - ceil_mode=True, - data_format="NDHWC") + out_4 = fluid.layers.pool3d( + input=input_NDHWC, + pool_size=ksize, + pool_type="max", + pool_padding="VALID", + use_cudnn=False, + ceil_mode=True, + data_format="NDHWC", + ) self.assertRaises(ValueError, run_4) # padding with 8 ele. value error def run_5(): - out_5 = fluid.layers.pool3d(input=input_NDHWC, - pool_size=ksize, - pool_type="max", - pool_padding=[[1, 1], [0, 0], [0, 0], - [1, 1], [1, 1]], - use_cudnn=False, - data_format="NDHWC") + out_5 = fluid.layers.pool3d( + input=input_NDHWC, + pool_size=ksize, + pool_type="max", + pool_padding=[[1, 1], [0, 0], [0, 0], [1, 1], [1, 1]], + use_cudnn=False, + data_format="NDHWC", + ) self.assertRaises(ValueError, run_5) diff --git a/python/paddle/fluid/tests/unittests/test_pool_max_op.py b/python/paddle/fluid/tests/unittests/test_pool_max_op.py index 64204aa0ff26f7d75def86a76600788f77ccfe33..f324adb6c8d544acb07cc2a73efe2df30520bbd1 100644 --- a/python/paddle/fluid/tests/unittests/test_pool_max_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool_max_op.py @@ -25,12 +25,9 @@ def adaptive_end_index(index, input_size, output_size): return int(np.ceil((index + 1) * input_size / output_size)) -def max_pool3D_forward_naive(x, - ksize, - strides, - paddings, - global_pool=False, - adaptive=False): +def max_pool3D_forward_naive( + x, ksize, strides, paddings, global_pool=False, adaptive=False +): N, C, D, H, W = x.shape if global_pool: @@ -77,19 +74,19 @@ def max_pool3D_forward_naive(x, sub_deep = index[0][0] sub_row = index[1][0] sub_col = index[2][0] - index = ((d_start + sub_deep) * H + - (h_start + sub_row)) * W + w_start + sub_col + index = ( + ((d_start + sub_deep) * H + (h_start + sub_row)) * W + + w_start + + sub_col + ) mask[n, c, k, i, j] = index return out, mask -def max_pool2D_forward_naive(x, - ksize, - strides, - paddings, - global_pool=False, - adaptive=False): +def max_pool2D_forward_naive( + x, ksize, strides, paddings, global_pool=False, adaptive=False +): N, C, H, W = x.shape if global_pool: @@ -132,17 +129,21 @@ def max_pool2D_forward_naive(x, class TestMaxPoolWithIndex_Op(OpTest): - def setUp(self): self.init_test_case() self.init_global() self.init_adaptive() input = np.random.random(self.shape).astype("float64") - input = np.round(input * 100., 2) - output, mask = self.pool_forward_naive(input, self.ksize, self.strides, - self.paddings, self.global_pool, - self.adaptive) + input = np.round(input * 100.0, 2) + output, mask = self.pool_forward_naive( + input, + self.ksize, + self.strides, + self.paddings, + self.global_pool, + self.adaptive, + ) output = output.astype("float64") mask = mask.astype("int32") @@ -179,13 +180,11 @@ class TestMaxPoolWithIndex_Op(OpTest): class TestCase1(TestMaxPoolWithIndex_Op): - def init_global(self): self.global_pool = True class TestCase2(TestMaxPoolWithIndex_Op): - def init_test_case(self): self.op_type = "max_pool3d_with_index" self.pool_forward_naive = max_pool3D_forward_naive @@ -199,14 +198,12 @@ class TestCase2(TestMaxPoolWithIndex_Op): class TestCase3(TestCase2): - def init_global(self): self.global_pool = False -#----------------max_pool2d_with_index---------------- +# ----------------max_pool2d_with_index---------------- class TestCase4(TestMaxPoolWithIndex_Op): - def init_test_case(self): self.op_type = "max_pool2d_with_index" self.pool_forward_naive = max_pool2D_forward_naive @@ -220,13 +217,11 @@ class TestCase4(TestMaxPoolWithIndex_Op): class TestCase5(TestCase4): - def init_global(self): self.global_pool = False class TestCase6(TestMaxPoolWithIndex_Op): - def init_test_case(self): self.op_type = "max_pool2d_with_index" self.pool_forward_naive = max_pool2D_forward_naive @@ -240,19 +235,16 @@ class TestCase6(TestMaxPoolWithIndex_Op): class TestCase7(TestCase6): - def init_global(self): self.global_pool = False class TestCastAdaptive2d(TestCase6): - def init_adaptive(self): self.adaptive = True class TestCastAdaptive3d(TestMaxPoolWithIndex_Op): - def init_adaptive(self): self.adaptive = True diff --git a/python/paddle/fluid/tests/unittests/test_positive_negative_pair_op.py b/python/paddle/fluid/tests/unittests/test_positive_negative_pair_op.py index dcbd5928fc4b1c5e5bc23c3df9b0348296c564eb..d666157e7d28774d571e9adacf759bdb54d0ab83 100644 --- a/python/paddle/fluid/tests/unittests/test_positive_negative_pair_op.py +++ b/python/paddle/fluid/tests/unittests/test_positive_negative_pair_op.py @@ -45,12 +45,14 @@ def py_pnpair_op(score, label, query, column=-1, weight=None): else: neg += w - return np.array(pos).astype('float32'), np.array(neg).astype( - 'float32'), np.array(neu).astype('float32') + return ( + np.array(pos).astype('float32'), + np.array(neg).astype('float32'), + np.array(neu).astype('float32'), + ) class TestPositiveNegativePairOp(OpTest): - def setUp(self): self.op_type = 'positive_negative_pair' batch_size = 20 @@ -58,7 +60,8 @@ class TestPositiveNegativePairOp(OpTest): score = np.random.normal(size=(batch_size, 1)).astype('float32') label = np.random.normal(size=(batch_size, 1)).astype('float32') query = np.array( - [np.random.randint(max_query_id) for i in range(batch_size)]) + [np.random.randint(max_query_id) for i in range(batch_size)] + ) query = np.reshape(query, newshape=(batch_size, 1)).astype('int64') pos, neg, neu = py_pnpair_op(score, label, query) @@ -67,7 +70,7 @@ class TestPositiveNegativePairOp(OpTest): self.outputs = { 'PositivePair': pos, 'NegativePair': neg, - 'NeutralPair': neu + 'NeutralPair': neu, } def test_check_output(self): @@ -75,7 +78,6 @@ class TestPositiveNegativePairOp(OpTest): class TestPositiveNegativePairOpAccumulateWeight(OpTest): - def setUp(self): self.op_type = 'positive_negative_pair' batch_size = 20 @@ -86,21 +88,23 @@ class TestPositiveNegativePairOpAccumulateWeight(OpTest): label = np.random.normal(size=(batch_size, 1)).astype('float32') weight = np.random.normal(size=(batch_size, 1)).astype('float32') query = np.array( - [np.random.randint(max_query_id) for i in range(batch_size)]) + [np.random.randint(max_query_id) for i in range(batch_size)] + ) query = np.reshape(query, newshape=(batch_size, 1)).astype('int64') - acc_pos = np.reshape(np.random.randint(max_random_num), - newshape=(1)).astype('float32') - acc_neg = np.reshape(np.random.randint(max_random_num), - newshape=(1)).astype('float32') - acc_neu = np.reshape(np.random.randint(max_random_num), - newshape=(1)).astype('float32') + acc_pos = np.reshape( + np.random.randint(max_random_num), newshape=(1) + ).astype('float32') + acc_neg = np.reshape( + np.random.randint(max_random_num), newshape=(1) + ).astype('float32') + acc_neu = np.reshape( + np.random.randint(max_random_num), newshape=(1) + ).astype('float32') column = np.random.randint(score_dim) - pos, neg, neu = py_pnpair_op(score, - label, - query, - column=column, - weight=weight) + pos, neg, neu = py_pnpair_op( + score, label, query, column=column, weight=weight + ) self.inputs = { 'Score': score, 'Label': label, @@ -108,13 +112,13 @@ class TestPositiveNegativePairOpAccumulateWeight(OpTest): 'AccumulatePositivePair': acc_pos, 'AccumulateNegativePair': acc_neg, 'AccumulateNeutralPair': acc_neu, - 'Weight': weight + 'Weight': weight, } self.attrs = {'column': column} self.outputs = { 'PositivePair': pos + acc_pos, 'NegativePair': neg + acc_neg, - 'NeutralPair': neu + acc_neu + 'NeutralPair': neu + acc_neu, } def test_check_output(self): diff --git a/python/paddle/fluid/tests/unittests/test_pow.py b/python/paddle/fluid/tests/unittests/test_pow.py index 9df1af1375c722fa7b5102e5b0fd1be4148d67ad..79282f3460f1aa4b6d330d78e50a9f02efab3da9 100755 --- a/python/paddle/fluid/tests/unittests/test_pow.py +++ b/python/paddle/fluid/tests/unittests/test_pow.py @@ -71,7 +71,7 @@ class TestPowerAPI(unittest.TestCase): """test_power.""" np.random.seed(7) # test 1-d float tensor ** float scalar - dims = (np.random.randint(200, 300), ) + dims = (np.random.randint(200, 300),) x = (np.random.rand(*dims) * 10).astype(np.float64) y = np.random.rand() * 10 res = _run_power(DYNAMIC, x, y) @@ -80,7 +80,7 @@ class TestPowerAPI(unittest.TestCase): np.testing.assert_allclose(res, np.power(x, y), rtol=1e-05) # test 1-d float tensor ** int scalar - dims = (np.random.randint(200, 300), ) + dims = (np.random.randint(200, 300),) x = (np.random.rand(*dims) * 10).astype(np.float64) y = int(np.random.rand() * 10) res = _run_power(DYNAMIC, x, y) @@ -96,7 +96,7 @@ class TestPowerAPI(unittest.TestCase): np.testing.assert_allclose(res, np.power(x, y), rtol=1e-05) # test 1-d float tensor ** 1-d float tensor - dims = (np.random.randint(200, 300), ) + dims = (np.random.randint(200, 300),) x = (np.random.rand(*dims) * 10).astype(np.float64) y = (np.random.rand(*dims) * 10).astype(np.float64) res = _run_power(DYNAMIC, x, y) @@ -105,7 +105,7 @@ class TestPowerAPI(unittest.TestCase): np.testing.assert_allclose(res, np.power(x, y), rtol=1e-05) # test 1-d int tensor ** 1-d int tensor - dims = (np.random.randint(200, 300), ) + dims = (np.random.randint(200, 300),) x = (np.random.rand(*dims) * 10).astype(np.int64) y = (np.random.rand(*dims) * 10).astype(np.int64) res = _run_power(DYNAMIC, x, y) @@ -114,7 +114,7 @@ class TestPowerAPI(unittest.TestCase): np.testing.assert_allclose(res, np.power(x, y), rtol=1e-05) # test 1-d int tensor ** 1-d int tensor - dims = (np.random.randint(200, 300), ) + dims = (np.random.randint(200, 300),) x = (np.random.rand(*dims) * 10).astype(np.int32) y = (np.random.rand(*dims) * 10).astype(np.int32) res = _run_power(DYNAMIC, x, y) @@ -123,7 +123,7 @@ class TestPowerAPI(unittest.TestCase): np.testing.assert_allclose(res, np.power(x, y), rtol=1e-05) # test 1-d int tensor ** 1-d int tensor - dims = (np.random.randint(200, 300), ) + dims = (np.random.randint(200, 300),) x = (np.random.rand(*dims) * 10).astype(np.float32) y = (np.random.rand(*dims) * 10).astype(np.float32) res = _run_power(DYNAMIC, x, y) @@ -132,8 +132,11 @@ class TestPowerAPI(unittest.TestCase): np.testing.assert_allclose(res, np.power(x, y), rtol=1e-05) # test broadcast - dims = (np.random.randint(1, 10), np.random.randint(5, 10), - np.random.randint(5, 10)) + dims = ( + np.random.randint(1, 10), + np.random.randint(5, 10), + np.random.randint(5, 10), + ) x = (np.random.rand(*dims) * 10).astype(np.float64) y = (np.random.rand(dims[-1]) * 10).astype(np.float64) res = _run_power(DYNAMIC, x, y) @@ -150,22 +153,28 @@ class TestPowerError(unittest.TestCase): np.random.seed(7) # test dynamic computation graph: inputs must be broadcastable - dims = (np.random.randint(1, 10), np.random.randint(5, 10), - np.random.randint(5, 10)) + dims = ( + np.random.randint(1, 10), + np.random.randint(5, 10), + np.random.randint(5, 10), + ) x = (np.random.rand(*dims) * 10).astype(np.float64) y = (np.random.rand(dims[-1] + 1) * 10).astype(np.float64) self.assertRaises(ValueError, _run_power, DYNAMIC, x, y) self.assertRaises(ValueError, _run_power, STATIC, x, y) # test dynamic computation graph: inputs must be broadcastable - dims = (np.random.randint(1, 10), np.random.randint(5, 10), - np.random.randint(5, 10)) + dims = ( + np.random.randint(1, 10), + np.random.randint(5, 10), + np.random.randint(5, 10), + ) x = (np.random.rand(*dims) * 10).astype(np.float64) y = (np.random.rand(dims[-1] + 1) * 10).astype(np.int8) self.assertRaises(TypeError, paddle.pow, x, y) # test 1-d float tensor ** int string - dims = (np.random.randint(200, 300), ) + dims = (np.random.randint(200, 300),) x = (np.random.rand(*dims) * 10).astype(np.float64) y = int(np.random.rand() * 10) self.assertRaises(TypeError, paddle.pow, x, str(y)) diff --git a/python/paddle/fluid/tests/unittests/test_pow2_decay_with_linear_warmup_op.py b/python/paddle/fluid/tests/unittests/test_pow2_decay_with_linear_warmup_op.py index 43f98cada4235e3654d2c16a73d1512b586242ef..69619ed02af84ecd30fd5924e324b05d54baa180 100644 --- a/python/paddle/fluid/tests/unittests/test_pow2_decay_with_linear_warmup_op.py +++ b/python/paddle/fluid/tests/unittests/test_pow2_decay_with_linear_warmup_op.py @@ -23,8 +23,9 @@ def gen_pow2_warmup_op_lr(warmup_steps, total_steps, base_lr, end_lr, place): main = paddle.static.Program() startup = paddle.static.Program() with paddle.static.program_guard(main, startup): - lr = pow2_decay_with_linear_warmup(warmup_steps, total_steps, base_lr, - end_lr) + lr = pow2_decay_with_linear_warmup( + warmup_steps, total_steps, base_lr, end_lr + ) exe = paddle.static.Executor(place) with paddle.static.scope_guard(paddle.static.Scope()): exe.run(startup) @@ -34,18 +35,21 @@ def gen_pow2_warmup_op_lr(warmup_steps, total_steps, base_lr, end_lr, place): class Pow2Warmup(LinearWarmup): - def __init__(self, warmup_steps, total_steps, base_lr, end_lr): assert total_steps > warmup_steps - lr_sch = PolynomialDecay(learning_rate=base_lr, - decay_steps=total_steps - warmup_steps, - end_lr=end_lr, - power=2) + lr_sch = PolynomialDecay( + learning_rate=base_lr, + decay_steps=total_steps - warmup_steps, + end_lr=end_lr, + power=2, + ) - super(Pow2Warmup, self).__init__(learning_rate=lr_sch, - warmup_steps=warmup_steps, - start_lr=0.0, - end_lr=base_lr) + super(Pow2Warmup, self).__init__( + learning_rate=lr_sch, + warmup_steps=warmup_steps, + start_lr=0.0, + end_lr=base_lr, + ) def gen_pow2_warmup_py_lr(warmup_steps, total_steps, base_lr, end_lr, place): @@ -57,7 +61,6 @@ def gen_pow2_warmup_py_lr(warmup_steps, total_steps, base_lr, end_lr, place): class TestPow2WarmupLRScheduler(unittest.TestCase): - def setUp(self): paddle.enable_static() self.params = { diff --git a/python/paddle/fluid/tests/unittests/test_precision_recall_op.py b/python/paddle/fluid/tests/unittests/test_precision_recall_op.py index 26e12b0deb0b42508bcc2e1088f3719b267ba706..8bc80de3429d583a370a92ae9be218bf2dde1b4c 100644 --- a/python/paddle/fluid/tests/unittests/test_precision_recall_op.py +++ b/python/paddle/fluid/tests/unittests/test_precision_recall_op.py @@ -85,16 +85,21 @@ def compute_metrics(states, cls_num): class TestPrecisionRecallOp_0(OpTest): - def setUp(self): self.op_type = "precision_recall" ins_num = 64 cls_num = 10 max_probs = np.random.uniform(0, 1.0, (ins_num, 1)).astype('float32') - idxs = np.random.choice(range(cls_num), ins_num).reshape( - (ins_num, 1)).astype('int32') - labels = np.random.choice(range(cls_num), ins_num).reshape( - (ins_num, 1)).astype('int32') + idxs = ( + np.random.choice(range(cls_num), ins_num) + .reshape((ins_num, 1)) + .astype('int32') + ) + labels = ( + np.random.choice(range(cls_num), ins_num) + .reshape((ins_num, 1)) + .astype('int32') + ) states = get_states(idxs, labels, cls_num) metrics = compute_metrics(states, cls_num) @@ -105,7 +110,7 @@ class TestPrecisionRecallOp_0(OpTest): self.outputs = { 'BatchMetrics': metrics, 'AccumMetrics': metrics, - 'AccumStatesInfo': states + 'AccumStatesInfo': states, } def test_check_output(self): @@ -113,17 +118,22 @@ class TestPrecisionRecallOp_0(OpTest): class TestPrecisionRecallOp_1(OpTest): - def setUp(self): self.op_type = "precision_recall" ins_num = 64 cls_num = 10 max_probs = np.random.uniform(0, 1.0, (ins_num, 1)).astype('float32') - idxs = np.random.choice(range(cls_num), ins_num).reshape( - (ins_num, 1)).astype('int32') + idxs = ( + np.random.choice(range(cls_num), ins_num) + .reshape((ins_num, 1)) + .astype('int32') + ) weights = np.random.uniform(0, 1.0, (ins_num, 1)).astype('float32') - labels = np.random.choice(range(cls_num), ins_num).reshape( - (ins_num, 1)).astype('int32') + labels = ( + np.random.choice(range(cls_num), ins_num) + .reshape((ins_num, 1)) + .astype('int32') + ) states = get_states(idxs, labels, cls_num, weights) metrics = compute_metrics(states, cls_num) @@ -134,13 +144,13 @@ class TestPrecisionRecallOp_1(OpTest): 'MaxProbs': max_probs, 'Indices': idxs, 'Labels': labels, - 'Weights': weights + 'Weights': weights, } self.outputs = { 'BatchMetrics': metrics, 'AccumMetrics': metrics, - 'AccumStatesInfo': states + 'AccumStatesInfo': states, } def test_check_output(self): @@ -148,17 +158,22 @@ class TestPrecisionRecallOp_1(OpTest): class TestPrecisionRecallOp_2(OpTest): - def setUp(self): self.op_type = "precision_recall" ins_num = 64 cls_num = 10 max_probs = np.random.uniform(0, 1.0, (ins_num, 1)).astype('float32') - idxs = np.random.choice(range(cls_num), ins_num).reshape( - (ins_num, 1)).astype('int32') + idxs = ( + np.random.choice(range(cls_num), ins_num) + .reshape((ins_num, 1)) + .astype('int32') + ) weights = np.random.uniform(0, 1.0, (ins_num, 1)).astype('float32') - labels = np.random.choice(range(cls_num), ins_num).reshape( - (ins_num, 1)).astype('int32') + labels = ( + np.random.choice(range(cls_num), ins_num) + .reshape((ins_num, 1)) + .astype('int32') + ) states = np.random.randint(0, 30, (cls_num, 4)).astype('float32') accum_states = get_states(idxs, labels, cls_num, weights) @@ -173,13 +188,13 @@ class TestPrecisionRecallOp_2(OpTest): 'Indices': idxs, 'Labels': labels, 'Weights': weights, - 'StatesInfo': states + 'StatesInfo': states, } self.outputs = { 'BatchMetrics': batch_metrics, 'AccumMetrics': accum_metrics, - 'AccumStatesInfo': accum_states + 'AccumStatesInfo': accum_states, } def test_check_output(self): diff --git a/python/paddle/fluid/tests/unittests/test_prelu_op.py b/python/paddle/fluid/tests/unittests/test_prelu_op.py index bf6de7911da860a03ba0dcf5cd4687255bed9ba7..c267286be9dc4fe41edf86142a5467041317f3c8 100644 --- a/python/paddle/fluid/tests/unittests/test_prelu_op.py +++ b/python/paddle/fluid/tests/unittests/test_prelu_op.py @@ -38,11 +38,13 @@ def ref_prelu_nn(x, num_parameters, init): class TestFunctionalPReluAPI(unittest.TestCase): - def setUp(self): - self.place = paddle.CUDAPlace( - 0) if core.is_compiled_with_cuda() else paddle.CPUPlace() - self.x_np = np.random.uniform(-1., 1., [1, 2, 3, 4]).astype('float32') + self.place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() + else paddle.CPUPlace() + ) + self.x_np = np.random.uniform(-1.0, 1.0, [1, 2, 3, 4]).astype('float32') self.weight_np_0 = np.random.randn(1).astype('float32') self.weight_np_1 = np.random.randn(self.x_np.shape[1]).astype('float32') @@ -52,11 +54,9 @@ class TestFunctionalPReluAPI(unittest.TestCase): weight = paddle.fluid.data('Alpha', weight_np.shape, 'float32') out = F.prelu(x, weight) exe = paddle.static.Executor(self.place) - res = exe.run(feed={ - 'X': self.x_np, - 'Alpha': weight_np - }, - fetch_list=[out]) + res = exe.run( + feed={'X': self.x_np, 'Alpha': weight_np}, fetch_list=[out] + ) out_ref = ref_prelu(self.x_np, weight_np) np.testing.assert_allclose(out_ref, res[0], rtol=1e-05) @@ -83,44 +83,46 @@ class TestFunctionalPReluAPI(unittest.TestCase): def test_error(self): with paddle.static.program_guard(paddle.static.Program()): - weight_fp32 = paddle.fluid.data(name='weight_fp32', - shape=[1], - dtype='float32') + weight_fp32 = paddle.fluid.data( + name='weight_fp32', shape=[1], dtype='float32' + ) # The input type must be Variable. self.assertRaises(TypeError, F.prelu, x=1, weight=weight_fp32) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data(name='x_int32', - shape=[2, 3], - dtype='int32') + x_int32 = paddle.fluid.data( + name='x_int32', shape=[2, 3], dtype='int32' + ) self.assertRaises(TypeError, F.prelu, x=x_int32, weight=weight_fp32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data(name='x_fp16', - shape=[2, 3], - dtype='float16') + x_fp16 = paddle.fluid.data( + name='x_fp16', shape=[2, 3], dtype='float16' + ) F.prelu(x=x_fp16, weight=weight_fp32) class TestNNPReluAPI(unittest.TestCase): - def setUp(self): - self.place = paddle.CUDAPlace( - 0) if core.is_compiled_with_cuda() else paddle.CPUPlace() + self.place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() + else paddle.CPUPlace() + ) self.x_np = np.ones([1, 2, 3, 4]).astype('float32') def test_static_api(self): startup_program = paddle.static.Program() train_program = paddle.static.Program() with paddle.static.program_guard(train_program, startup_program): - x = paddle.fluid.data(name='X', - shape=self.x_np.shape, - dtype='float32') + x = paddle.fluid.data( + name='X', shape=self.x_np.shape, dtype='float32' + ) m = paddle.nn.PReLU() out = m(x) exe = paddle.static.Executor(self.place) exe.run(startup_program) - res = exe.run(train_program, - feed={'X': self.x_np}, - fetch_list=[out]) + res = exe.run( + train_program, feed={'X': self.x_np}, fetch_list=[out] + ) out_ref = ref_prelu_nn(self.x_np, 1, 0.25) np.testing.assert_allclose(out_ref, res[0], rtol=1e-05) @@ -152,8 +154,11 @@ class TestNNPReluAPI(unittest.TestCase): np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05) x = paddle.to_tensor(self.x_np) - m = paddle.nn.PReLU(weight_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(0.5))) + m = paddle.nn.PReLU( + weight_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(0.5) + ) + ) out = m(x) out_ref = ref_prelu_nn(self.x_np, 1, 0.5) np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05) @@ -167,7 +172,6 @@ def prelu_api_wrapper(x, weight, data_format="NCHW"): class PReluTest(OpTest): - def setUp(self): self.init_dtype() self.init_input_shape() @@ -182,12 +186,9 @@ class PReluTest(OpTest): x_np[np.abs(x_np) < 0.005] = 0.02 if self.attrs == { - 'mode': "all", - "data_format": "NCHW" - } or self.attrs == { - 'mode': "all", - "data_format": "NHWC" - }: + 'mode': "all", + "data_format": "NCHW", + } or self.attrs == {'mode': "all", "data_format": "NHWC"}: alpha_np = np.random.uniform(-1, -0.5, (1)) elif self.attrs == {'mode': "channel", "data_format": "NCHW"}: alpha_np = np.random.uniform(-1, -0.5, [1, self.x_shape[1], 1, 1]) @@ -205,15 +206,17 @@ class PReluTest(OpTest): # since np operands could not be broadcast together with shapes (1,100,2,2,2,3) (1,100,1,1) reshaped_alpha = self.inputs['Alpha'] if self.attrs == {'mode': "channel", "data_format": "NCHW"}: - reshaped_alpha = np.reshape(self.inputs['Alpha'], - [1, self.x_shape[1]] + - [1] * len(self.x_shape[2:])) + reshaped_alpha = np.reshape( + self.inputs['Alpha'], + [1, self.x_shape[1]] + [1] * len(self.x_shape[2:]), + ) elif self.attrs == {'mode': "channel", "data_format": "NHWC"}: - reshaped_alpha = np.reshape(self.inputs['Alpha'], - [1] + [1] * len(self.x_shape[1:-1]) + - [self.x_shape[-1]]) - out_np = np.maximum(self.inputs['X'], 0.) - out_np = out_np + np.minimum(self.inputs['X'], 0.) * reshaped_alpha + reshaped_alpha = np.reshape( + self.inputs['Alpha'], + [1] + [1] * len(self.x_shape[1:-1]) + [self.x_shape[-1]], + ) + out_np = np.maximum(self.inputs['X'], 0.0) + out_np = out_np + np.minimum(self.inputs['X'], 0.0) * reshaped_alpha assert out_np is not self.inputs['X'] self.outputs = {'Out': out_np} @@ -234,11 +237,9 @@ class PReluTest(OpTest): @skip_check_grad_ci( - reason= - "[skip shape check] Input(Alpha) must be 1-D and only has one data in 'all' mode" + reason="[skip shape check] Input(Alpha) must be 1-D and only has one data in 'all' mode" ) class TestModeAll(PReluTest): - def init_input_shape(self): self.x_shape = [2, 3, 4, 5] @@ -247,11 +248,9 @@ class TestModeAll(PReluTest): @skip_check_grad_ci( - reason= - "[skip shape check] Input(Alpha) must be 1-D and only has one data in 'all' mode" + reason="[skip shape check] Input(Alpha) must be 1-D and only has one data in 'all' mode" ) class TestModeAllNHWC(PReluTest): - def init_input_shape(self): self.x_shape = [2, 3, 4, 50] @@ -260,7 +259,6 @@ class TestModeAllNHWC(PReluTest): class TestModeElt(PReluTest): - def init_input_shape(self): self.x_shape = [3, 2, 5, 10] @@ -269,7 +267,6 @@ class TestModeElt(PReluTest): class TestModeEltNHWC(PReluTest): - def init_input_shape(self): self.x_shape = [3, 2, 5, 10] @@ -278,11 +275,9 @@ class TestModeEltNHWC(PReluTest): @skip_check_grad_ci( - reason= - "[skip shape check] Input(Alpha) must be 1-D and only has one data in 'all' mode" + reason="[skip shape check] Input(Alpha) must be 1-D and only has one data in 'all' mode" ) class TestModeAllRank3(PReluTest): - def init_input_shape(self): self.x_shape = [1, 200, 3] @@ -291,11 +286,9 @@ class TestModeAllRank3(PReluTest): @skip_check_grad_ci( - reason= - "[skip shape check] Input(Alpha) must be 1-D and only has one data in 'all' mode" + reason="[skip shape check] Input(Alpha) must be 1-D and only has one data in 'all' mode" ) class TestModeAllRank3NHWC(PReluTest): - def init_input_shape(self): self.x_shape = [1, 200, 3] @@ -304,11 +297,9 @@ class TestModeAllRank3NHWC(PReluTest): @skip_check_grad_ci( - reason= - "[skip shape check] Input(Alpha) must be 1-D and only has one data in 'all' mode" + reason="[skip shape check] Input(Alpha) must be 1-D and only has one data in 'all' mode" ) class TestModeAllRank6(PReluTest): - def init_input_shape(self): self.x_shape = [1, 2, 3, 4, 5, 6] @@ -317,11 +308,9 @@ class TestModeAllRank6(PReluTest): @skip_check_grad_ci( - reason= - "[skip shape check] Input(Alpha) must be 1-D and only has one data in 'all' mode" + reason="[skip shape check] Input(Alpha) must be 1-D and only has one data in 'all' mode" ) class TestModeAllRank6NHWC(PReluTest): - def init_input_shape(self): self.x_shape = [1, 2, 3, 4, 5, 6] @@ -330,7 +319,6 @@ class TestModeAllRank6NHWC(PReluTest): class TestModeChannelRank3(PReluTest): - def init_input_shape(self): self.x_shape = [1, 200, 3] @@ -339,7 +327,6 @@ class TestModeChannelRank3(PReluTest): class TestModeChannelRank3NHWC(PReluTest): - def init_input_shape(self): self.x_shape = [1, 3, 100] @@ -348,7 +335,6 @@ class TestModeChannelRank3NHWC(PReluTest): class TestModeChannelRank6(PReluTest): - def init_input_shape(self): self.x_shape = [1, 100, 2, 2, 2, 2] @@ -357,7 +343,6 @@ class TestModeChannelRank6(PReluTest): class TestModeChannelRank6NHWC(PReluTest): - def init_input_shape(self): self.x_shape = [1, 2, 2, 2, 2, 100] @@ -366,7 +351,6 @@ class TestModeChannelRank6NHWC(PReluTest): class TestModeElementRank3(PReluTest): - def init_input_shape(self): self.x_shape = [3, 10, 10] @@ -375,7 +359,6 @@ class TestModeElementRank3(PReluTest): class TestModeElementRank3NHWC(PReluTest): - def init_input_shape(self): self.x_shape = [3, 10, 10] @@ -384,7 +367,6 @@ class TestModeElementRank3NHWC(PReluTest): class TestModeElementRank6(PReluTest): - def init_input_shape(self): self.x_shape = [3, 2, 2, 4, 5, 2] @@ -393,7 +375,6 @@ class TestModeElementRank6(PReluTest): class TestModeElementRank6NHWC(PReluTest): - def init_input_shape(self): self.x_shape = [3, 2, 2, 4, 5, 2] @@ -401,15 +382,13 @@ class TestModeElementRank6NHWC(PReluTest): self.attrs = {'mode': "element", "data_format": "NHWC"} -def create_test_fp16_class(parent, - check_grad=True, - atol=1e-3, - max_relative_error=0.05): - - @unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +def create_test_fp16_class( + parent, check_grad=True, atol=1e-3, max_relative_error=0.05 +): + @unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) class TestPReluFp16Case(parent): - def init_dtype(self): self.dtype = np.float16 @@ -417,18 +396,20 @@ def create_test_fp16_class(parent, if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): - self.check_output_with_place(place, - atol=atol, - check_eager=self.eager_mode) + self.check_output_with_place( + place, atol=atol, check_eager=self.eager_mode + ) def test_check_grad(self): place = core.CUDAPlace(0) if core.is_float16_supported(place) and check_grad: self.check_grad_with_place( - place, ['X', 'Alpha'], + place, + ['X', 'Alpha'], 'Out', max_relative_error=max_relative_error, - check_eager=self.eager_mode) + check_eager=self.eager_mode, + ) cls_name = "{0}_{1}".format(parent.__name__, "Fp16Op") TestPReluFp16Case.__name__ = cls_name @@ -460,27 +441,26 @@ def prelu_t(x, mode, param_attr=None, name=None, data_format='NCHW'): shape=alpha_shape, dtype='float32', is_bias=False, - default_initializer=fluid.initializer.ConstantInitializer(0.25)) + default_initializer=fluid.initializer.ConstantInitializer(0.25), + ) out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type="prelu", - inputs={ - "X": x, - 'Alpha': alpha - }, - attrs={ - "mode": mode, - 'data_format': data_format - }, - outputs={"Out": out}) + helper.append_op( + type="prelu", + inputs={"X": x, 'Alpha': alpha}, + attrs={"mode": mode, 'data_format': data_format}, + outputs={"Out": out}, + ) return out # error message test if mode is not one of 'all', 'channel', 'element' class TestModeError(unittest.TestCase): - def setUp(self): - self.place = paddle.CUDAPlace( - 0) if core.is_compiled_with_cuda() else paddle.CPUPlace() + self.place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() + else paddle.CPUPlace() + ) self.x_np = np.ones([1, 2, 3, 4]).astype('float32') def test_mode_error(self): @@ -490,7 +470,7 @@ class TestModeError(unittest.TestCase): try: y = prelu_t(x, 'any') except Exception as e: - assert (e.args[0].find('InvalidArgument') != -1) + assert e.args[0].find('InvalidArgument') != -1 def test_data_format_error1(self): main_program = Program() @@ -499,7 +479,7 @@ class TestModeError(unittest.TestCase): try: y = prelu_t(x, 'channel', data_format='N') except Exception as e: - assert (e.args[0].find('InvalidArgument') != -1) + assert e.args[0].find('InvalidArgument') != -1 def test_data_format_error2(self): main_program = Program() diff --git a/python/paddle/fluid/tests/unittests/test_print_op.py b/python/paddle/fluid/tests/unittests/test_print_op.py index 1dc6cca53cb43546c392d93a717019876fe74264..0ce1230f52e868b3e133e9b58f1e20802b6f5f44 100755 --- a/python/paddle/fluid/tests/unittests/test_print_op.py +++ b/python/paddle/fluid/tests/unittests/test_print_op.py @@ -28,7 +28,6 @@ paddle.enable_static() class TestPrintOpCPU(unittest.TestCase): - def setUp(self): self.place = paddle.CPUPlace() self.x_tensor = fluid.core.LoDTensor() @@ -48,17 +47,17 @@ class TestPrintOpCPU(unittest.TestCase): switch_main_program(Program()) printed = self.build_network(True, print_phase='forward') exe = paddle.static.Executor(self.place) - outs = exe.run(feed={'x': self.x_tensor}, - fetch_list=[printed], - return_numpy=False) + outs = exe.run( + feed={'x': self.x_tensor}, fetch_list=[printed], return_numpy=False + ) def test_backward(self): switch_main_program(Program()) loss = self.build_network(False, print_phase='backward') exe = paddle.static.Executor(self.place) - outs = exe.run(feed={'x': self.x_tensor}, - fetch_list=[loss], - return_numpy=False) + outs = exe.run( + feed={'x': self.x_tensor}, fetch_list=[loss], return_numpy=False + ) def test_all_parameters(self): x = layers.data('x', shape=[3], dtype='float32', lod_level=1) @@ -78,36 +77,36 @@ class TestPrintOpCPU(unittest.TestCase): loss = paddle.mean(x) paddle.static.append_backward(loss=loss) exe = paddle.static.Executor(self.place) - outs = exe.run(feed={'x': self.x_tensor}, - fetch_list=[loss], - return_numpy=False) + outs = exe.run( + feed={'x': self.x_tensor}, fetch_list=[loss], return_numpy=False + ) def test_no_summarize(self): switch_main_program(Program()) printed = self.build_network(True, summarize=-1, print_phase='forward') exe = paddle.static.Executor(self.place) - outs = exe.run(feed={'x': self.x_tensor}, - fetch_list=[printed], - return_numpy=False) + outs = exe.run( + feed={'x': self.x_tensor}, fetch_list=[printed], return_numpy=False + ) class TestPrintOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # The input type of Print_op must be Variable. - x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - paddle.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], paddle.CPUPlace() + ) self.assertRaises(TypeError, paddle.static.Print, x1) # The input dtype of Print_op must be float32, float64, int32_t, int64_t or bool. x2 = paddle.static.data(name='x2', shape=[4], dtype="float16") self.assertRaises(TypeError, paddle.static.Print, x2) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestPrintOpGPU(TestPrintOpCPU): - def setUp(self): self.place = paddle.CUDAPlace(0) self.x_tensor = fluid.core.LoDTensor() @@ -117,7 +116,6 @@ class TestPrintOpGPU(TestPrintOpCPU): class TestPrintOpBackward(unittest.TestCase): - def check_backward(self, use_cuda): main = paddle.static.Program() startup = paddle.static.Program() @@ -135,7 +133,8 @@ class TestPrintOpBackward(unittest.TestCase): exe.run(startup) binary = paddle.static.CompiledProgram(main).with_data_parallel( - loss_name=loss.name) + loss_name=loss.name + ) img, label = init_data() feed_dict = {"image": img, "label": label} diff --git a/python/paddle/fluid/tests/unittests/test_prior_box_op.py b/python/paddle/fluid/tests/unittests/test_prior_box_op.py index 67d468ac626e6c325449d0501e1858fd86eeb406..80e567be34c3210d45215faf2f37feecde2fe2f4 100644 --- a/python/paddle/fluid/tests/unittests/test_prior_box_op.py +++ b/python/paddle/fluid/tests/unittests/test_prior_box_op.py @@ -19,19 +19,21 @@ from op_test import OpTest import paddle -def python_prior_box(input, - image, - min_sizes, - aspect_ratios=[1.], - variances=[0.1, 0.1, 0.2, 0.2], - max_sizes=None, - flip=False, - clip=False, - step_w=0, - step_h=0, - offset=0.5, - min_max_aspect_ratios_order=False, - name=None): +def python_prior_box( + input, + image, + min_sizes, + aspect_ratios=[1.0], + variances=[0.1, 0.1, 0.2, 0.2], + max_sizes=None, + flip=False, + clip=False, + step_w=0, + step_h=0, + offset=0.5, + min_max_aspect_ratios_order=False, + name=None, +): return paddle.fluid.layers.detection.prior_box( input, image, @@ -44,11 +46,11 @@ def python_prior_box(input, steps=[step_w, step_h], offset=offset, name=name, - min_max_aspect_ratios_order=min_max_aspect_ratios_order) + min_max_aspect_ratios_order=min_max_aspect_ratios_order, + ) class TestPriorBoxOp(OpTest): - def set_data(self): self.init_test_params() self.init_test_input() @@ -107,8 +109,9 @@ class TestPriorBoxOp(OpTest): self.flip = True self.set_min_max_aspect_ratios_order() self.real_aspect_ratios = [1, 2.0, 1.0 / 2.0, 3.0, 1.0 / 3.0] - self.aspect_ratios = np.array(self.aspect_ratios, - dtype=np.float64).flatten() + self.aspect_ratios = np.array( + self.aspect_ratios, dtype=np.float64 + ).flatten() self.variances = [0.1, 0.1, 0.2, 0.2] self.variances = np.array(self.variances, dtype=np.float64).flatten() @@ -120,12 +123,12 @@ class TestPriorBoxOp(OpTest): def init_test_input(self): self.image = np.random.random( - (self.batch_size, self.image_channels, self.image_w, - self.image_h)).astype('float32') + (self.batch_size, self.image_channels, self.image_w, self.image_h) + ).astype('float32') self.input = np.random.random( - (self.batch_size, self.input_channels, self.layer_w, - self.layer_h)).astype('float32') + (self.batch_size, self.input_channels, self.layer_w, self.layer_h) + ).astype('float32') def init_test_output(self): out_dim = (self.layer_h, self.layer_w, self.num_priors, 4) @@ -146,73 +149,78 @@ class TestPriorBoxOp(OpTest): ar = self.real_aspect_ratios[r] c_w = min_size * math.sqrt(ar) / 2 c_h = (min_size / math.sqrt(ar)) / 2 - out_boxes[h, w, - idx, :] = [(c_x - c_w) / self.image_w, - (c_y - c_h) / self.image_h, - (c_x + c_w) / self.image_w, - (c_y + c_h) / self.image_h] + out_boxes[h, w, idx, :] = [ + (c_x - c_w) / self.image_w, + (c_y - c_h) / self.image_h, + (c_x + c_w) / self.image_w, + (c_y + c_h) / self.image_h, + ] idx += 1 if len(self.max_sizes) > 0: max_size = self.max_sizes[s] # second prior: aspect_ratio = 1, c_w = c_h = math.sqrt(min_size * max_size) / 2 - out_boxes[h, w, - idx, :] = [(c_x - c_w) / self.image_w, - (c_y - c_h) / self.image_h, - (c_x + c_w) / self.image_w, - (c_y + c_h) / self.image_h] + out_boxes[h, w, idx, :] = [ + (c_x - c_w) / self.image_w, + (c_y - c_h) / self.image_h, + (c_x + c_w) / self.image_w, + (c_y + c_h) / self.image_h, + ] idx += 1 else: - c_w = c_h = min_size / 2. - out_boxes[h, w, idx, :] = [(c_x - c_w) / self.image_w, - (c_y - c_h) / self.image_h, - (c_x + c_w) / self.image_w, - (c_y + c_h) / self.image_h] + c_w = c_h = min_size / 2.0 + out_boxes[h, w, idx, :] = [ + (c_x - c_w) / self.image_w, + (c_y - c_h) / self.image_h, + (c_x + c_w) / self.image_w, + (c_y + c_h) / self.image_h, + ] idx += 1 if len(self.max_sizes) > 0: max_size = self.max_sizes[s] # second prior: aspect_ratio = 1, c_w = c_h = math.sqrt(min_size * max_size) / 2 - out_boxes[h, w, - idx, :] = [(c_x - c_w) / self.image_w, - (c_y - c_h) / self.image_h, - (c_x + c_w) / self.image_w, - (c_y + c_h) / self.image_h] + out_boxes[h, w, idx, :] = [ + (c_x - c_w) / self.image_w, + (c_y - c_h) / self.image_h, + (c_x + c_w) / self.image_w, + (c_y + c_h) / self.image_h, + ] idx += 1 # rest of priors for r in range(len(self.real_aspect_ratios)): ar = self.real_aspect_ratios[r] - if abs(ar - 1.) < 1e-6: + if abs(ar - 1.0) < 1e-6: continue c_w = min_size * math.sqrt(ar) / 2 c_h = (min_size / math.sqrt(ar)) / 2 - out_boxes[h, w, - idx, :] = [(c_x - c_w) / self.image_w, - (c_y - c_h) / self.image_h, - (c_x + c_w) / self.image_w, - (c_y + c_h) / self.image_h] + out_boxes[h, w, idx, :] = [ + (c_x - c_w) / self.image_w, + (c_y - c_h) / self.image_h, + (c_x + c_w) / self.image_w, + (c_y + c_h) / self.image_h, + ] idx += 1 # clip the prior's coordidate such that it is within[0, 1] if self.clip: out_boxes = np.clip(out_boxes, 0.0, 1.0) # set the variance. - out_var = np.tile(self.variances, - (self.layer_h, self.layer_w, self.num_priors, 1)) + out_var = np.tile( + self.variances, (self.layer_h, self.layer_w, self.num_priors, 1) + ) self.out_boxes = out_boxes.astype('float32') self.out_var = out_var.astype('float32') class TestPriorBoxOpWithoutMaxSize(TestPriorBoxOp): - def set_max_sizes(self): self.max_sizes = [] class TestPriorBoxOpWithSpecifiedOutOrder(TestPriorBoxOp): - def set_min_max_aspect_ratios_order(self): self.min_max_aspect_ratios_order = True diff --git a/python/paddle/fluid/tests/unittests/test_prod_op.py b/python/paddle/fluid/tests/unittests/test_prod_op.py index cd754f7dbaf261753cfd057b69e8176781f3908f..24605da11e8a7c81ee9ffbbfe1746c62ee0d0bcd 100644 --- a/python/paddle/fluid/tests/unittests/test_prod_op.py +++ b/python/paddle/fluid/tests/unittests/test_prod_op.py @@ -19,7 +19,6 @@ from test_sum_op import TestReduceOPTensorAxisBase class TestProdOp(unittest.TestCase): - def setUp(self): self.input = np.random.random(size=(10, 10, 5)).astype(np.float32) @@ -27,54 +26,52 @@ class TestProdOp(unittest.TestCase): input = paddle.to_tensor(self.input) dy_result = paddle.prod(input) expected_result = np.prod(self.input) - np.testing.assert_allclose(dy_result.numpy(), - expected_result, - rtol=1e-05) + np.testing.assert_allclose( + dy_result.numpy(), expected_result, rtol=1e-05 + ) dy_result = paddle.prod(input, axis=1) expected_result = np.prod(self.input, axis=1) - np.testing.assert_allclose(dy_result.numpy(), - expected_result, - rtol=1e-05) + np.testing.assert_allclose( + dy_result.numpy(), expected_result, rtol=1e-05 + ) dy_result = paddle.prod(input, axis=-1) expected_result = np.prod(self.input, axis=-1) - np.testing.assert_allclose(dy_result.numpy(), - expected_result, - rtol=1e-05) + np.testing.assert_allclose( + dy_result.numpy(), expected_result, rtol=1e-05 + ) dy_result = paddle.prod(input, axis=[0, 1]) expected_result = np.prod(self.input, axis=(0, 1)) - np.testing.assert_allclose(dy_result.numpy(), - expected_result, - rtol=1e-05, - atol=1e-8) + np.testing.assert_allclose( + dy_result.numpy(), expected_result, rtol=1e-05, atol=1e-8 + ) dy_result = paddle.prod(input, axis=1, keepdim=True) expected_result = np.prod(self.input, axis=1, keepdims=True) - np.testing.assert_allclose(dy_result.numpy(), - expected_result, - rtol=1e-05) + np.testing.assert_allclose( + dy_result.numpy(), expected_result, rtol=1e-05 + ) dy_result = paddle.prod(input, axis=1, dtype='int64') expected_result = np.prod(self.input, axis=1, dtype=np.int64) - np.testing.assert_allclose(dy_result.numpy(), - expected_result, - rtol=1e-05) + np.testing.assert_allclose( + dy_result.numpy(), expected_result, rtol=1e-05 + ) dy_result = paddle.prod(input, axis=1, keepdim=True, dtype='int64') - expected_result = np.prod(self.input, - axis=1, - keepdims=True, - dtype=np.int64) - np.testing.assert_allclose(dy_result.numpy(), - expected_result, - rtol=1e-05) + expected_result = np.prod( + self.input, axis=1, keepdims=True, dtype=np.int64 + ) + np.testing.assert_allclose( + dy_result.numpy(), expected_result, rtol=1e-05 + ) def run_static(self, use_gpu=False): - input = paddle.fluid.data(name='input', - shape=[10, 10, 5], - dtype='float32') + input = paddle.fluid.data( + name='input', shape=[10, 10, 5], dtype='float32' + ) result0 = paddle.prod(input) result1 = paddle.prod(input, axis=1) result2 = paddle.prod(input, axis=-1) @@ -86,44 +83,49 @@ class TestProdOp(unittest.TestCase): place = paddle.CUDAPlace(0) if use_gpu else paddle.CPUPlace() exe = paddle.static.Executor(place) exe.run(paddle.static.default_startup_program()) - static_result = exe.run(feed={"input": self.input}, - fetch_list=[ - result0, result1, result2, result3, result4, - result5, result6 - ]) + static_result = exe.run( + feed={"input": self.input}, + fetch_list=[ + result0, + result1, + result2, + result3, + result4, + result5, + result6, + ], + ) expected_result = np.prod(self.input) - np.testing.assert_allclose(static_result[0], - expected_result, - rtol=1e-05) + np.testing.assert_allclose( + static_result[0], expected_result, rtol=1e-05 + ) expected_result = np.prod(self.input, axis=1) - np.testing.assert_allclose(static_result[1], - expected_result, - rtol=1e-05) + np.testing.assert_allclose( + static_result[1], expected_result, rtol=1e-05 + ) expected_result = np.prod(self.input, axis=-1) - np.testing.assert_allclose(static_result[2], - expected_result, - rtol=1e-05) + np.testing.assert_allclose( + static_result[2], expected_result, rtol=1e-05 + ) expected_result = np.prod(self.input, axis=(0, 1)) - np.testing.assert_allclose(static_result[3], - expected_result, - rtol=1e-05, - atol=1e-8) + np.testing.assert_allclose( + static_result[3], expected_result, rtol=1e-05, atol=1e-8 + ) expected_result = np.prod(self.input, axis=1, keepdims=True) - np.testing.assert_allclose(static_result[4], - expected_result, - rtol=1e-05) + np.testing.assert_allclose( + static_result[4], expected_result, rtol=1e-05 + ) expected_result = np.prod(self.input, axis=1, dtype=np.int64) - np.testing.assert_allclose(static_result[5], - expected_result, - rtol=1e-05) - expected_result = np.prod(self.input, - axis=1, - keepdims=True, - dtype=np.int64) - np.testing.assert_allclose(static_result[6], - expected_result, - rtol=1e-05) + np.testing.assert_allclose( + static_result[5], expected_result, rtol=1e-05 + ) + expected_result = np.prod( + self.input, axis=1, keepdims=True, dtype=np.int64 + ) + np.testing.assert_allclose( + static_result[6], expected_result, rtol=1e-05 + ) def test_cpu(self): paddle.disable_static(place=paddle.CPUPlace()) @@ -146,14 +148,14 @@ class TestProdOp(unittest.TestCase): class TestProdOpError(unittest.TestCase): - def test_error(self): - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): x = paddle.fluid.data(name='x', shape=[2, 2, 4], dtype='float32') - bool_x = paddle.fluid.data(name='bool_x', - shape=[2, 2, 4], - dtype='bool') + bool_x = paddle.fluid.data( + name='bool_x', shape=[2, 2, 4], dtype='bool' + ) # The argument x shoule be a Tensor self.assertRaises(TypeError, paddle.prod, [1]) @@ -168,7 +170,6 @@ class TestProdOpError(unittest.TestCase): class TestProdWithTensorAxis1(TestReduceOPTensorAxisBase): - def init_data(self): self.pd_api = paddle.prod self.np_api = np.prod @@ -178,7 +179,6 @@ class TestProdWithTensorAxis1(TestReduceOPTensorAxisBase): class TestProdWithTensorAxis2(TestReduceOPTensorAxisBase): - def init_data(self): self.pd_api = paddle.prod self.np_api = np.prod @@ -187,7 +187,7 @@ class TestProdWithTensorAxis2(TestReduceOPTensorAxisBase): self.tensor_axis = [ 0, paddle.to_tensor([1], 'int64'), - paddle.to_tensor([2], 'int64') + paddle.to_tensor([2], 'int64'), ] diff --git a/python/paddle/fluid/tests/unittests/test_profiler.py b/python/paddle/fluid/tests/unittests/test_profiler.py index 2b630ed384046e7481909435e254d2f1e681fd27..b7dd11f7b8a80364288200474dc3198821391405 100644 --- a/python/paddle/fluid/tests/unittests/test_profiler.py +++ b/python/paddle/fluid/tests/unittests/test_profiler.py @@ -26,7 +26,6 @@ import paddle.fluid.proto.profiler.profiler_pb2 as profiler_pb2 class TestProfiler(unittest.TestCase): - @classmethod def setUpClass(cls): os.environ['CPU_NUM'] = str(4) @@ -38,9 +37,9 @@ class TestProfiler(unittest.TestCase): image = fluid.layers.data(name='x', shape=[784], dtype='float32') hidden1 = fluid.layers.fc(input=image, size=64, act='relu') i = layers.zeros(shape=[1], dtype='int64') - counter = fluid.layers.zeros(shape=[1], - dtype='int64', - force_cpu=True) + counter = fluid.layers.zeros( + shape=[1], dtype='int64', force_cpu=True + ) until = layers.fill_constant([1], dtype='int64', value=10) data_arr = layers.array_write(hidden1, i) cond = fluid.layers.less_than(x=counter, y=until) @@ -58,9 +57,9 @@ class TestProfiler(unittest.TestCase): cost = fluid.layers.cross_entropy(input=predict, label=label) avg_cost = paddle.mean(cost) batch_size = fluid.layers.create_tensor(dtype='int64') - batch_acc = fluid.layers.accuracy(input=predict, - label=label, - total=batch_size) + batch_acc = fluid.layers.accuracy( + input=predict, label=label, total=batch_size + ) optimizer = fluid.optimizer.Momentum(learning_rate=0.001, momentum=0.9) opts = optimizer.minimize(avg_cost, startup_program=startup_program) @@ -71,8 +70,10 @@ class TestProfiler(unittest.TestCase): exec_strategy = fluid.ExecutionStrategy() exec_strategy.num_threads = 1 train_program = fluid.compiler.CompiledProgram( - main_program).with_data_parallel(loss_name=avg_cost.name, - exec_strategy=exec_strategy) + main_program + ).with_data_parallel( + loss_name=avg_cost.name, exec_strategy=exec_strategy + ) else: train_program = main_program return train_program, startup_program, avg_cost, batch_size, batch_acc @@ -84,41 +85,48 @@ class TestProfiler(unittest.TestCase): def check_profile_result(self, profile_path): data = open(profile_path, 'rb').read() - if (len(data) > 0): + if len(data) > 0: profile_pb = profiler_pb2.Profile() profile_pb.ParseFromString(data) self.assertGreater(len(profile_pb.events), 0) for event in profile_pb.events: if event.type == profiler_pb2.Event.GPUKernel: if not event.detail_info and not event.name.startswith( - "MEM"): + "MEM" + ): raise Exception( "Kernel %s missing event. Has this kernel been recorded by RecordEvent?" - % event.name) + % event.name + ) elif event.type == profiler_pb2.Event.CPU and ( - event.name.startswith("Driver API") - or event.name.startswith("Runtime API")): + event.name.startswith("Driver API") + or event.name.startswith("Runtime API") + ): print("Warning: unregister", event.name) def run_iter(self, exe, main_program, fetch_list): x = np.random.random((32, 784)).astype("float32") y = np.random.randint(0, 10, (32, 1)).astype("int64") - outs = exe.run(main_program, - feed={ - 'x': x, - 'y': y - }, - fetch_list=fetch_list) - - def net_profiler(self, - exe, - state, - tracer_option, - batch_range=None, - use_parallel_executor=False, - use_new_api=False): - main_program, startup_program, avg_cost, batch_size, batch_acc = self.build_program( - compile_program=use_parallel_executor) + outs = exe.run( + main_program, feed={'x': x, 'y': y}, fetch_list=fetch_list + ) + + def net_profiler( + self, + exe, + state, + tracer_option, + batch_range=None, + use_parallel_executor=False, + use_new_api=False, + ): + ( + main_program, + startup_program, + avg_cost, + batch_size, + batch_acc, + ) = self.build_program(compile_program=use_parallel_executor) exe.run(startup_program) profile_path = self.get_profile_path() @@ -127,22 +135,26 @@ class TestProfiler(unittest.TestCase): for iter in range(10): if iter == 2: profiler.reset_profiler() - self.run_iter(exe, main_program, - [avg_cost, batch_acc, batch_size]) + self.run_iter( + exe, main_program, [avg_cost, batch_acc, batch_size] + ) else: options = utils.ProfilerOptions( options={ 'state': state, 'sorted_key': 'total', 'tracer_level': tracer_option, - 'batch_range': - [0, 10] if batch_range is None else batch_range, - 'profile_path': profile_path - }) + 'batch_range': [0, 10] + if batch_range is None + else batch_range, + 'profile_path': profile_path, + } + ) with utils.Profiler(enabled=True, options=options) as prof: for iter in range(10): - self.run_iter(exe, main_program, - [avg_cost, batch_acc, batch_size]) + self.run_iter( + exe, main_program, [avg_cost, batch_acc, batch_size] + ) utils.get_profiler().record_step() if batch_range is None and iter == 2: utils.get_profiler().reset() @@ -153,37 +165,44 @@ class TestProfiler(unittest.TestCase): def test_cpu_profiler(self): exe = fluid.Executor(fluid.CPUPlace()) for use_new_api in [False, True]: - self.net_profiler(exe, - 'CPU', - "Default", - batch_range=[5, 10], - use_new_api=use_new_api) - - @unittest.skipIf(not core.is_compiled_with_cuda(), - "profiler is enabled only with GPU") + self.net_profiler( + exe, + 'CPU', + "Default", + batch_range=[5, 10], + use_new_api=use_new_api, + ) + + @unittest.skipIf( + not core.is_compiled_with_cuda(), "profiler is enabled only with GPU" + ) def test_cuda_profiler(self): exe = fluid.Executor(fluid.CUDAPlace(0)) for use_new_api in [False, True]: - self.net_profiler(exe, - 'GPU', - "OpDetail", - batch_range=[0, 10], - use_new_api=use_new_api) - - @unittest.skipIf(not core.is_compiled_with_cuda(), - "profiler is enabled only with GPU") + self.net_profiler( + exe, + 'GPU', + "OpDetail", + batch_range=[0, 10], + use_new_api=use_new_api, + ) + + @unittest.skipIf( + not core.is_compiled_with_cuda(), "profiler is enabled only with GPU" + ) def test_all_profiler(self): exe = fluid.Executor(fluid.CUDAPlace(0)) for use_new_api in [False, True]: - self.net_profiler(exe, - 'All', - "AllOpDetail", - batch_range=None, - use_new_api=use_new_api) + self.net_profiler( + exe, + 'All', + "AllOpDetail", + batch_range=None, + use_new_api=use_new_api, + ) class TestProfilerAPIError(unittest.TestCase): - def test_errors(self): options = utils.ProfilerOptions() self.assertTrue(options['profile_path'] is None) diff --git a/python/paddle/fluid/tests/unittests/test_profiler_statistic.py b/python/paddle/fluid/tests/unittests/test_profiler_statistic.py index e6e7a9c8b62092d6bd30068b6fbcc897906ae06c..0483f4995d97a7771b3c83de39e1985bd0d0d78a 100644 --- a/python/paddle/fluid/tests/unittests/test_profiler_statistic.py +++ b/python/paddle/fluid/tests/unittests/test_profiler_statistic.py @@ -19,7 +19,6 @@ import paddle.profiler.profiler_statistic as profiler_statistic class HostPythonNode: - def __init__(self, name, type, start_ns, end_ns, process_id, thread_id): self.name = name self.type = type @@ -34,9 +33,9 @@ class HostPythonNode: class DevicePythonNode: - - def __init__(self, name, type, start_ns, end_ns, device_id, context_id, - stream_id): + def __init__( + self, name, type, start_ns, end_ns, device_id, context_id, stream_id + ): self.name = name self.type = type self.start_ns = start_ns @@ -47,8 +46,20 @@ class DevicePythonNode: class MemPythonNode: - def __init__(self, timestamp_ns, addr, type, process_id, thread_id, increase_bytes, place, current_allocated, \ - current_reserved, peak_allocated, peak_reserved): + def __init__( + self, + timestamp_ns, + addr, + type, + process_id, + thread_id, + increase_bytes, + place, + current_allocated, + current_reserved, + peak_allocated, + peak_reserved, + ): self.timestamp_ns = timestamp_ns self.addr = addr self.type = type @@ -63,155 +74,308 @@ class MemPythonNode: class TestProfilerStatistic(unittest.TestCase): - def test_statistic_case1(self): - root_node = HostPythonNode('Root Node', - profiler.TracerEventType.UserDefined, 0, - float('inf'), 1000, 1001) - profilerstep_node = HostPythonNode('ProfileStep#1', - profiler.TracerEventType.ProfileStep, - 0, 400, 1000, 1001) - dataloader_node = HostPythonNode('Dataloader', - profiler.TracerEventType.Dataloader, 5, - 15, 1000, 1001) - mobilenet_node = HostPythonNode('MobileNet', - profiler.TracerEventType.Forward, 20, - 50, 1000, 1001) - yolonet_node = HostPythonNode('Yolov3Net', - profiler.TracerEventType.Forward, 50, 110, - 1000, 1001) + root_node = HostPythonNode( + 'Root Node', + profiler.TracerEventType.UserDefined, + 0, + float('inf'), + 1000, + 1001, + ) + profilerstep_node = HostPythonNode( + 'ProfileStep#1', + profiler.TracerEventType.ProfileStep, + 0, + 400, + 1000, + 1001, + ) + dataloader_node = HostPythonNode( + 'Dataloader', profiler.TracerEventType.Dataloader, 5, 15, 1000, 1001 + ) + mobilenet_node = HostPythonNode( + 'MobileNet', profiler.TracerEventType.Forward, 20, 50, 1000, 1001 + ) + yolonet_node = HostPythonNode( + 'Yolov3Net', profiler.TracerEventType.Forward, 50, 110, 1000, 1001 + ) userdefined_node = HostPythonNode( - 'Communication Time', profiler.TracerEventType.PythonUserDefined, - 100, 110, 1000, 1001) + 'Communication Time', + profiler.TracerEventType.PythonUserDefined, + 100, + 110, + 1000, + 1001, + ) communication_node = HostPythonNode( - 'Communication', profiler.TracerEventType.Communication, 105, 110, - 1000, 1001) - backward_node = HostPythonNode('Gradient Backward', - profiler.TracerEventType.Backward, 120, - 200, 1000, 1001) + 'Communication', + profiler.TracerEventType.Communication, + 105, + 110, + 1000, + 1001, + ) + backward_node = HostPythonNode( + 'Gradient Backward', + profiler.TracerEventType.Backward, + 120, + 200, + 1000, + 1001, + ) optimization_node = HostPythonNode( - 'Optimization', profiler.TracerEventType.Optimization, 220, 300, - 1000, 1001) - conv2d_node = HostPythonNode('conv2d', - profiler.TracerEventType.Operator, 25, 40, - 1000, 1001) - sync_batch_norm_node = HostPythonNode('sync_batch_norm', - profiler.TracerEventType.Operator, - 60, 100, 1000, 1001) + 'Optimization', + profiler.TracerEventType.Optimization, + 220, + 300, + 1000, + 1001, + ) + conv2d_node = HostPythonNode( + 'conv2d', profiler.TracerEventType.Operator, 25, 40, 1000, 1001 + ) + sync_batch_norm_node = HostPythonNode( + 'sync_batch_norm', + profiler.TracerEventType.Operator, + 60, + 100, + 1000, + 1001, + ) conv2d_infer_shape = HostPythonNode( - 'conv2d::infer_shape', profiler.TracerEventType.OperatorInner, 25, - 30, 1000, 1001) - conv2d_compute = HostPythonNode('conv2d::compute', - profiler.TracerEventType.OperatorInner, - 30, 40, 1000, 1001) + 'conv2d::infer_shape', + profiler.TracerEventType.OperatorInner, + 25, + 30, + 1000, + 1001, + ) + conv2d_compute = HostPythonNode( + 'conv2d::compute', + profiler.TracerEventType.OperatorInner, + 30, + 40, + 1000, + 1001, + ) conv2d_compute.mem_node.append( - MemPythonNode(33, 0, profiler_statistic.TracerMemEventType.Allocate, - 1000, 1001, 20, 'place(gpu:0)', 200, 200, 800, 800)) + MemPythonNode( + 33, + 0, + profiler_statistic.TracerMemEventType.Allocate, + 1000, + 1001, + 20, + 'place(gpu:0)', + 200, + 200, + 800, + 800, + ) + ) conv2d_launchkernel = HostPythonNode( - 'cudalaunchkernel', profiler.TracerEventType.CudaRuntime, 30, 35, - 1000, 1001) - conv2d_MemCpy = HostPythonNode('AsyncMemcpy', - profiler.TracerEventType.UserDefined, 35, - 40, 1000, 1001) - conv2d_cudaMemCpy = HostPythonNode('cudaMemcpy', - profiler.TracerEventType.CudaRuntime, - 35, 40, 1000, 1001) - conv2d_kernel = DevicePythonNode('conv2d_kernel', - profiler.TracerEventType.Kernel, 35, - 50, 0, 0, 0) - conv2d_memcpy = DevicePythonNode('conv2d_memcpy', - profiler.TracerEventType.Memcpy, 50, - 60, 0, 0, 0) + 'cudalaunchkernel', + profiler.TracerEventType.CudaRuntime, + 30, + 35, + 1000, + 1001, + ) + conv2d_MemCpy = HostPythonNode( + 'AsyncMemcpy', + profiler.TracerEventType.UserDefined, + 35, + 40, + 1000, + 1001, + ) + conv2d_cudaMemCpy = HostPythonNode( + 'cudaMemcpy', + profiler.TracerEventType.CudaRuntime, + 35, + 40, + 1000, + 1001, + ) + conv2d_kernel = DevicePythonNode( + 'conv2d_kernel', profiler.TracerEventType.Kernel, 35, 50, 0, 0, 0 + ) + conv2d_memcpy = DevicePythonNode( + 'conv2d_memcpy', profiler.TracerEventType.Memcpy, 50, 60, 0, 0, 0 + ) sync_batch_norm_infer_shape = HostPythonNode( 'sync_batch_norm::infer_shape', - profiler.TracerEventType.OperatorInner, 60, 70, 1000, 1001) + profiler.TracerEventType.OperatorInner, + 60, + 70, + 1000, + 1001, + ) sync_batch_norm_compute = HostPythonNode( - 'sync_batch_norm::compute', profiler.TracerEventType.OperatorInner, - 80, 100, 1000, 1001) + 'sync_batch_norm::compute', + profiler.TracerEventType.OperatorInner, + 80, + 100, + 1000, + 1001, + ) sync_batch_norm_launchkernel = HostPythonNode( - 'cudalaunchkernel', profiler.TracerEventType.CudaRuntime, 80, 90, - 1000, 1001) + 'cudalaunchkernel', + profiler.TracerEventType.CudaRuntime, + 80, + 90, + 1000, + 1001, + ) sync_batch_norm_MemCpy = HostPythonNode( - 'AsyncMemcpy', profiler.TracerEventType.UserDefined, 90, 100, 1000, - 1001) + 'AsyncMemcpy', + profiler.TracerEventType.UserDefined, + 90, + 100, + 1000, + 1001, + ) sync_batch_norm_cudaMemCpy = HostPythonNode( - 'cudaMemcpy', profiler.TracerEventType.CudaRuntime, 90, 100, 1000, - 1001) + 'cudaMemcpy', + profiler.TracerEventType.CudaRuntime, + 90, + 100, + 1000, + 1001, + ) sync_batch_norm_kernel = DevicePythonNode( - 'sync_batch_norm_kernel', profiler.TracerEventType.Kernel, 95, 155, - 0, 0, 0) + 'sync_batch_norm_kernel', + profiler.TracerEventType.Kernel, + 95, + 155, + 0, + 0, + 0, + ) sync_batch_norm_memcpy = DevicePythonNode( - 'sync_batch_norm_memcpy', profiler.TracerEventType.Memcpy, 150, 200, - 0, 0, 1) + 'sync_batch_norm_memcpy', + profiler.TracerEventType.Memcpy, + 150, + 200, + 0, + 0, + 1, + ) root_node.children_node.append(profilerstep_node) - profilerstep_node.children_node.extend([ - dataloader_node, mobilenet_node, yolonet_node, backward_node, - optimization_node - ]) + profilerstep_node.children_node.extend( + [ + dataloader_node, + mobilenet_node, + yolonet_node, + backward_node, + optimization_node, + ] + ) mobilenet_node.children_node.append(conv2d_node) yolonet_node.children_node.extend( - [sync_batch_norm_node, userdefined_node]) + [sync_batch_norm_node, userdefined_node] + ) userdefined_node.children_node.append(communication_node) conv2d_node.children_node.extend( - [conv2d_infer_shape, conv2d_compute, conv2d_MemCpy]) + [conv2d_infer_shape, conv2d_compute, conv2d_MemCpy] + ) conv2d_compute.runtime_node.append(conv2d_launchkernel) conv2d_MemCpy.runtime_node.append(conv2d_cudaMemCpy) conv2d_launchkernel.device_node.append(conv2d_kernel) conv2d_cudaMemCpy.device_node.append(conv2d_memcpy) - sync_batch_norm_node.children_node.extend([ - sync_batch_norm_infer_shape, sync_batch_norm_compute, - sync_batch_norm_MemCpy - ]) + sync_batch_norm_node.children_node.extend( + [ + sync_batch_norm_infer_shape, + sync_batch_norm_compute, + sync_batch_norm_MemCpy, + ] + ) sync_batch_norm_compute.runtime_node.append( - sync_batch_norm_launchkernel) + sync_batch_norm_launchkernel + ) sync_batch_norm_MemCpy.runtime_node.append(sync_batch_norm_cudaMemCpy) sync_batch_norm_launchkernel.device_node.append(sync_batch_norm_kernel) sync_batch_norm_cudaMemCpy.device_node.append(sync_batch_norm_memcpy) thread_tree = {'thread1001': root_node} extra_info = { 'Process Cpu Utilization': '1.02', - 'System Cpu Utilization': '0.68' + 'System Cpu Utilization': '0.68', } statistic_data = profiler.profiler_statistic.StatisticData( - thread_tree, extra_info) + thread_tree, extra_info + ) time_range_summary = statistic_data.time_range_summary event_summary = statistic_data.event_summary self.assertEqual( time_range_summary.get_cpu_range_sum( - profiler.TracerEventType.ProfileStep), 400) + profiler.TracerEventType.ProfileStep + ), + 400, + ) self.assertEqual( time_range_summary.get_cpu_range_sum( - profiler.TracerEventType.Forward), 90) + profiler.TracerEventType.Forward + ), + 90, + ) self.assertEqual( time_range_summary.get_cpu_range_sum( - profiler.TracerEventType.Backward), 80) + profiler.TracerEventType.Backward + ), + 80, + ) self.assertEqual( time_range_summary.get_cpu_range_sum( - profiler.TracerEventType.Optimization), 80) + profiler.TracerEventType.Optimization + ), + 80, + ) self.assertEqual( time_range_summary.get_cpu_range_sum( - profiler.TracerEventType.Operator), 55) + profiler.TracerEventType.Operator + ), + 55, + ) self.assertEqual( time_range_summary.get_cpu_range_sum( - profiler.TracerEventType.OperatorInner), 45) + profiler.TracerEventType.OperatorInner + ), + 45, + ) self.assertEqual( time_range_summary.get_cpu_range_sum( - profiler.TracerEventType.CudaRuntime), 30) + profiler.TracerEventType.CudaRuntime + ), + 30, + ) self.assertEqual( time_range_summary.get_gpu_range_sum( - 0, profiler.TracerEventType.Kernel), 75) + 0, profiler.TracerEventType.Kernel + ), + 75, + ) self.assertEqual( time_range_summary.get_gpu_range_sum( - 0, profiler.TracerEventType.Memcpy), 60) + 0, profiler.TracerEventType.Memcpy + ), + 60, + ) self.assertEqual( time_range_summary.get_cpu_range_sum( - profiler.TracerEventType.UserDefined), 15) + profiler.TracerEventType.UserDefined + ), + 15, + ) self.assertEqual( time_range_summary.get_cpu_range_sum( - profiler.TracerEventType.Communication), 5) + profiler.TracerEventType.Communication + ), + 5, + ) self.assertEqual(len(event_summary.items), 2) self.assertEqual(len(event_summary.userdefined_items), 1) self.assertEqual(len(event_summary.model_perspective_items), 5) @@ -219,171 +383,347 @@ class TestProfilerStatistic(unittest.TestCase): self.assertEqual(event_summary.items['conv2d'].cpu_time, 15) self.assertEqual(event_summary.items['conv2d'].general_gpu_time, 25) self.assertEqual( - event_summary.model_perspective_items['Forward'].cpu_time, 90) + event_summary.model_perspective_items['Forward'].cpu_time, 90 + ) self.assertEqual( event_summary.model_perspective_items['Forward'].general_gpu_time, - 135) + 135, + ) self.assertEqual( event_summary.model_perspective_items['Backward'].general_gpu_time, - 0) + 0, + ) self.assertEqual( - event_summary.memory_manipulation_items['AsyncMemcpy'].cpu_time, 15) + event_summary.memory_manipulation_items['AsyncMemcpy'].cpu_time, 15 + ) self.assertEqual( - event_summary.memory_manipulation_items['AsyncMemcpy']. - general_gpu_time, 60) + event_summary.memory_manipulation_items[ + 'AsyncMemcpy' + ].general_gpu_time, + 60, + ) self.assertEqual( - statistic_data.memory_summary.allocated_items['place(gpu:0)'] - ['conv2d'].allocation_count, 1) + statistic_data.memory_summary.allocated_items['place(gpu:0)'][ + 'conv2d' + ].allocation_count, + 1, + ) self.assertEqual( - statistic_data.memory_summary.allocated_items['place(gpu:0)'] - ['conv2d'].allocation_size, 20) + statistic_data.memory_summary.allocated_items['place(gpu:0)'][ + 'conv2d' + ].allocation_size, + 20, + ) self.assertEqual( - statistic_data.memory_summary.allocated_items['place(gpu:0)'] - ['conv2d'].increase_size, 20) + statistic_data.memory_summary.allocated_items['place(gpu:0)'][ + 'conv2d' + ].increase_size, + 20, + ) self.assertEqual( - statistic_data.memory_summary.allocated_items['place(gpu:0)'] - ['conv2d'].increase_size, 20) + statistic_data.memory_summary.allocated_items['place(gpu:0)'][ + 'conv2d' + ].increase_size, + 20, + ) self.assertEqual( - statistic_data.memory_summary. - peak_allocation_values['place(gpu:0)'], 800) + statistic_data.memory_summary.peak_allocation_values[ + 'place(gpu:0)' + ], + 800, + ) self.assertEqual( statistic_data.memory_summary.peak_reserved_values['place(gpu:0)'], - 800) + 800, + ) print( profiler.profiler_statistic._build_table( statistic_data, sorted_by=profiler.SortedKeys.CPUTotal, op_detail=True, thread_sep=False, - time_unit='ms')) + time_unit='ms', + ) + ) def test_statistic_case2(self): - root_node = HostPythonNode('Root Node', - profiler.TracerEventType.UserDefined, 0, - float('inf'), 1000, 1001) - profilerstep_node = HostPythonNode('ProfileStep#1', - profiler.TracerEventType.ProfileStep, - 0, 400, 1000, 1001) - - dataloader_node = HostPythonNode('Dataloader', - profiler.TracerEventType.Dataloader, 5, - 15, 1000, 1001) - - mobilenet_node = HostPythonNode('MobileNet', - profiler.TracerEventType.Forward, 20, - 50, 1000, 1001) - yolonet_node = HostPythonNode('Yolov3Net', - profiler.TracerEventType.Forward, 50, 110, - 1000, 1001) + root_node = HostPythonNode( + 'Root Node', + profiler.TracerEventType.UserDefined, + 0, + float('inf'), + 1000, + 1001, + ) + profilerstep_node = HostPythonNode( + 'ProfileStep#1', + profiler.TracerEventType.ProfileStep, + 0, + 400, + 1000, + 1001, + ) + + dataloader_node = HostPythonNode( + 'Dataloader', profiler.TracerEventType.Dataloader, 5, 15, 1000, 1001 + ) + + mobilenet_node = HostPythonNode( + 'MobileNet', profiler.TracerEventType.Forward, 20, 50, 1000, 1001 + ) + yolonet_node = HostPythonNode( + 'Yolov3Net', profiler.TracerEventType.Forward, 50, 110, 1000, 1001 + ) userdefined_node = HostPythonNode( - 'Communication Time', profiler.TracerEventType.PythonUserDefined, - 100, 110, 1000, 1001) + 'Communication Time', + profiler.TracerEventType.PythonUserDefined, + 100, + 110, + 1000, + 1001, + ) allreduce_launchkernel0 = HostPythonNode( - 'cudalaunchkernel', profiler.TracerEventType.CudaRuntime, 102, 104, - 1000, 1001) + 'cudalaunchkernel', + profiler.TracerEventType.CudaRuntime, + 102, + 104, + 1000, + 1001, + ) nccl_allreduce_kernel0 = DevicePythonNode( - 'nccl_allreduce_kernel', profiler.TracerEventType.Kernel, 105, 120, - 0, 0, 2) + 'nccl_allreduce_kernel', + profiler.TracerEventType.Kernel, + 105, + 120, + 0, + 0, + 2, + ) communication_node = HostPythonNode( - 'Communication', profiler.TracerEventType.Communication, 105, 110, - 1000, 1001) + 'Communication', + profiler.TracerEventType.Communication, + 105, + 110, + 1000, + 1001, + ) - allreduce_op1 = HostPythonNode('allreduce_op1', - profiler.TracerEventType.Operator, 105, - 108, 1000, 1001) + allreduce_op1 = HostPythonNode( + 'allreduce_op1', + profiler.TracerEventType.Operator, + 105, + 108, + 1000, + 1001, + ) allreduce_op1_infershape = HostPythonNode( - 'allreduce_op1::infershape', profiler.TracerEventType.OperatorInner, - 105, 106, 1000, 1001) + 'allreduce_op1::infershape', + profiler.TracerEventType.OperatorInner, + 105, + 106, + 1000, + 1001, + ) allreduce_launchkernel1 = HostPythonNode( - 'cudalaunchkernel', profiler.TracerEventType.CudaRuntime, 106, 107, - 1000, 1001) + 'cudalaunchkernel', + profiler.TracerEventType.CudaRuntime, + 106, + 107, + 1000, + 1001, + ) nccl_allreduce_kernel1 = DevicePythonNode( - 'nccl_allreduce_kernel', profiler.TracerEventType.Kernel, 130, 150, - 0, 0, 2) + 'nccl_allreduce_kernel', + profiler.TracerEventType.Kernel, + 130, + 150, + 0, + 0, + 2, + ) - backward_node = HostPythonNode('Gradient Backward', - profiler.TracerEventType.Backward, 120, - 200, 1000, 1001) + backward_node = HostPythonNode( + 'Gradient Backward', + profiler.TracerEventType.Backward, + 120, + 200, + 1000, + 1001, + ) optimization_node = HostPythonNode( - 'Optimization', profiler.TracerEventType.Optimization, 220, 300, - 1000, 1001) - conv2d_node = HostPythonNode('conv2d', - profiler.TracerEventType.Operator, 25, 40, - 1000, 1001) - sync_batch_norm_node = HostPythonNode('sync_batch_norm', - profiler.TracerEventType.Operator, - 60, 100, 1000, 1001) + 'Optimization', + profiler.TracerEventType.Optimization, + 220, + 300, + 1000, + 1001, + ) + conv2d_node = HostPythonNode( + 'conv2d', profiler.TracerEventType.Operator, 25, 40, 1000, 1001 + ) + sync_batch_norm_node = HostPythonNode( + 'sync_batch_norm', + profiler.TracerEventType.Operator, + 60, + 100, + 1000, + 1001, + ) conv2d_infer_shape = HostPythonNode( - 'conv2d::infer_shape', profiler.TracerEventType.OperatorInner, 25, - 30, 1000, 1001) - conv2d_compute = HostPythonNode('conv2d::compute', - profiler.TracerEventType.OperatorInner, - 30, 40, 1000, 1001) + 'conv2d::infer_shape', + profiler.TracerEventType.OperatorInner, + 25, + 30, + 1000, + 1001, + ) + conv2d_compute = HostPythonNode( + 'conv2d::compute', + profiler.TracerEventType.OperatorInner, + 30, + 40, + 1000, + 1001, + ) conv2d_launchkernel = HostPythonNode( - 'cudalaunchkernel', profiler.TracerEventType.CudaRuntime, 30, 35, - 1000, 1001) - conv2d_MemCpy = HostPythonNode('AsyncMemcpy', - profiler.TracerEventType.UserDefined, 35, - 40, 1000, 1001) - conv2d_cudaMemCpy = HostPythonNode('cudaMemcpy', - profiler.TracerEventType.CudaRuntime, - 35, 40, 1000, 1001) - conv2d_kernel = DevicePythonNode('conv2d_kernel', - profiler.TracerEventType.Kernel, 35, - 50, 0, 0, 0) - conv2d_memcpy = DevicePythonNode('conv2d_memcpy', - profiler.TracerEventType.Memcpy, 50, - 60, 0, 0, 0) + 'cudalaunchkernel', + profiler.TracerEventType.CudaRuntime, + 30, + 35, + 1000, + 1001, + ) + conv2d_MemCpy = HostPythonNode( + 'AsyncMemcpy', + profiler.TracerEventType.UserDefined, + 35, + 40, + 1000, + 1001, + ) + conv2d_cudaMemCpy = HostPythonNode( + 'cudaMemcpy', + profiler.TracerEventType.CudaRuntime, + 35, + 40, + 1000, + 1001, + ) + conv2d_kernel = DevicePythonNode( + 'conv2d_kernel', profiler.TracerEventType.Kernel, 35, 50, 0, 0, 0 + ) + conv2d_memcpy = DevicePythonNode( + 'conv2d_memcpy', profiler.TracerEventType.Memcpy, 50, 60, 0, 0, 0 + ) sync_batch_norm_infer_shape = HostPythonNode( 'sync_batch_norm::infer_shape', - profiler.TracerEventType.OperatorInner, 60, 70, 1000, 1001) + profiler.TracerEventType.OperatorInner, + 60, + 70, + 1000, + 1001, + ) sync_batch_norm_compute = HostPythonNode( - 'sync_batch_norm::compute', profiler.TracerEventType.OperatorInner, - 80, 100, 1000, 1001) + 'sync_batch_norm::compute', + profiler.TracerEventType.OperatorInner, + 80, + 100, + 1000, + 1001, + ) sync_batch_norm_launchkernel = HostPythonNode( - 'cudalaunchkernel', profiler.TracerEventType.CudaRuntime, 80, 90, - 1000, 1001) + 'cudalaunchkernel', + profiler.TracerEventType.CudaRuntime, + 80, + 90, + 1000, + 1001, + ) sync_batch_norm_MemCpy = HostPythonNode( - 'AsyncMemcpy', profiler.TracerEventType.UserDefined, 90, 100, 1000, - 1001) + 'AsyncMemcpy', + profiler.TracerEventType.UserDefined, + 90, + 100, + 1000, + 1001, + ) sync_batch_norm_cudaMemCpy = HostPythonNode( - 'cudaMemcpy', profiler.TracerEventType.CudaRuntime, 90, 100, 1000, - 1001) + 'cudaMemcpy', + profiler.TracerEventType.CudaRuntime, + 90, + 100, + 1000, + 1001, + ) sync_batch_norm_kernel = DevicePythonNode( - 'sync_batch_norm_kernel', profiler.TracerEventType.Kernel, 95, 300, - 0, 0, 0) + 'sync_batch_norm_kernel', + profiler.TracerEventType.Kernel, + 95, + 300, + 0, + 0, + 0, + ) sync_batch_norm_memcpy = DevicePythonNode( - 'sync_batch_norm_memcpy', profiler.TracerEventType.Memcpy, 150, 200, - 0, 0, 1) + 'sync_batch_norm_memcpy', + profiler.TracerEventType.Memcpy, + 150, + 200, + 0, + 0, + 1, + ) - allreduce_node2 = HostPythonNode('allreduce', - profiler.TracerEventType.Operator, 230, - 250, 1000, 1001) + allreduce_node2 = HostPythonNode( + 'allreduce', profiler.TracerEventType.Operator, 230, 250, 1000, 1001 + ) allreduce_node2_infershape = HostPythonNode( 'allreduce_node2::infershape', - profiler.TracerEventType.OperatorInner, 231, 232, 1000, 1001) + profiler.TracerEventType.OperatorInner, + 231, + 232, + 1000, + 1001, + ) allreduce_launchkernel2 = HostPythonNode( - 'cudalaunchkernel', profiler.TracerEventType.CudaRuntime, 235, 240, - 1000, 1001) + 'cudalaunchkernel', + profiler.TracerEventType.CudaRuntime, + 235, + 240, + 1000, + 1001, + ) nccl_allreduce_kernel2 = DevicePythonNode( - 'nccl_allreduce_kernel', profiler.TracerEventType.Kernel, 250, 280, - 0, 0, 2) + 'nccl_allreduce_kernel', + profiler.TracerEventType.Kernel, + 250, + 280, + 0, + 0, + 2, + ) root_node.children_node.append(profilerstep_node) - profilerstep_node.children_node.extend([ - dataloader_node, mobilenet_node, yolonet_node, backward_node, - optimization_node - ]) + profilerstep_node.children_node.extend( + [ + dataloader_node, + mobilenet_node, + yolonet_node, + backward_node, + optimization_node, + ] + ) mobilenet_node.children_node.append(conv2d_node) yolonet_node.children_node.extend( - [sync_batch_norm_node, userdefined_node]) + [sync_batch_norm_node, userdefined_node] + ) userdefined_node.children_node.append(communication_node) userdefined_node.runtime_node.append(allreduce_launchkernel0) allreduce_launchkernel0.device_node.append(nccl_allreduce_kernel0) @@ -392,17 +732,22 @@ class TestProfilerStatistic(unittest.TestCase): allreduce_op1.runtime_node.append(allreduce_launchkernel1) allreduce_launchkernel1.device_node.append(nccl_allreduce_kernel1) conv2d_node.children_node.extend( - [conv2d_infer_shape, conv2d_compute, conv2d_MemCpy]) + [conv2d_infer_shape, conv2d_compute, conv2d_MemCpy] + ) conv2d_compute.runtime_node.append(conv2d_launchkernel) conv2d_MemCpy.runtime_node.append(conv2d_cudaMemCpy) conv2d_launchkernel.device_node.append(conv2d_kernel) conv2d_cudaMemCpy.device_node.append(conv2d_memcpy) - sync_batch_norm_node.children_node.extend([ - sync_batch_norm_infer_shape, sync_batch_norm_compute, - sync_batch_norm_MemCpy - ]) + sync_batch_norm_node.children_node.extend( + [ + sync_batch_norm_infer_shape, + sync_batch_norm_compute, + sync_batch_norm_MemCpy, + ] + ) sync_batch_norm_compute.runtime_node.append( - sync_batch_norm_launchkernel) + sync_batch_norm_launchkernel + ) sync_batch_norm_MemCpy.runtime_node.append(sync_batch_norm_cudaMemCpy) sync_batch_norm_launchkernel.device_node.append(sync_batch_norm_kernel) sync_batch_norm_cudaMemCpy.device_node.append(sync_batch_norm_memcpy) @@ -413,62 +758,111 @@ class TestProfilerStatistic(unittest.TestCase): thread_tree = {'thread1001': root_node} extra_info = { 'Process Cpu Utilization': '1.02', - 'System Cpu Utilization': '0.68' + 'System Cpu Utilization': '0.68', } statistic_data = profiler.profiler_statistic.StatisticData( - thread_tree, extra_info) + thread_tree, extra_info + ) time_range_summary = statistic_data.time_range_summary event_summary = statistic_data.event_summary distributed_summary = statistic_data.distributed_summary self.assertEqual( time_range_summary.get_cpu_range_sum( - profiler.TracerEventType.ProfileStep), 400) + profiler.TracerEventType.ProfileStep + ), + 400, + ) self.assertEqual( time_range_summary.get_cpu_range_sum( - profiler.TracerEventType.Forward), 90) + profiler.TracerEventType.Forward + ), + 90, + ) self.assertEqual( time_range_summary.get_cpu_range_sum( - profiler.TracerEventType.Backward), 80) + profiler.TracerEventType.Backward + ), + 80, + ) self.assertEqual( time_range_summary.get_cpu_range_sum( - profiler.TracerEventType.Optimization), 80) + profiler.TracerEventType.Optimization + ), + 80, + ) self.assertEqual( time_range_summary.get_cpu_range_sum( - profiler.TracerEventType.Operator), 78) + profiler.TracerEventType.Operator + ), + 78, + ) self.assertEqual( time_range_summary.get_cpu_range_sum( - profiler.TracerEventType.OperatorInner), 47) + profiler.TracerEventType.OperatorInner + ), + 47, + ) self.assertEqual( time_range_summary.get_cpu_range_sum( - profiler.TracerEventType.CudaRuntime), 38) + profiler.TracerEventType.CudaRuntime + ), + 38, + ) self.assertEqual( time_range_summary.get_gpu_range_sum( - 0, profiler.TracerEventType.Kernel), 220) + 0, profiler.TracerEventType.Kernel + ), + 220, + ) self.assertEqual( time_range_summary.get_gpu_range_sum( - 0, profiler.TracerEventType.Memcpy), 60) + 0, profiler.TracerEventType.Memcpy + ), + 60, + ) self.assertEqual( time_range_summary.get_cpu_range_sum( - profiler.TracerEventType.UserDefined), 15) + profiler.TracerEventType.UserDefined + ), + 15, + ) self.assertEqual( time_range_summary.get_cpu_range_sum( - profiler.TracerEventType.Communication), 5) + profiler.TracerEventType.Communication + ), + 5, + ) self.assertEqual( profiler.statistic_helper.sum_ranges( - distributed_summary.cpu_communication_range), 25) + distributed_summary.cpu_communication_range + ), + 25, + ) self.assertEqual( profiler.statistic_helper.sum_ranges( - distributed_summary.gpu_communication_range), 65) + distributed_summary.gpu_communication_range + ), + 65, + ) self.assertEqual( profiler.statistic_helper.sum_ranges( - distributed_summary.communication_range), 85) + distributed_summary.communication_range + ), + 85, + ) self.assertEqual( profiler.statistic_helper.sum_ranges( - distributed_summary.computation_range), 220) + distributed_summary.computation_range + ), + 220, + ) self.assertEqual( profiler.statistic_helper.sum_ranges( - distributed_summary.overlap_range), 85) + distributed_summary.overlap_range + ), + 85, + ) self.assertEqual(len(event_summary.items), 4) self.assertEqual(len(event_summary.userdefined_items), 1) self.assertEqual(len(event_summary.model_perspective_items), 5) @@ -476,76 +870,136 @@ class TestProfilerStatistic(unittest.TestCase): self.assertEqual(event_summary.items['conv2d'].cpu_time, 15) self.assertEqual(event_summary.items['conv2d'].general_gpu_time, 25) self.assertEqual( - event_summary.model_perspective_items['Forward'].cpu_time, 90) + event_summary.model_perspective_items['Forward'].cpu_time, 90 + ) self.assertEqual( event_summary.model_perspective_items['Forward'].general_gpu_time, - 315) + 315, + ) self.assertEqual( event_summary.model_perspective_items['Backward'].general_gpu_time, - 0) + 0, + ) self.assertEqual( - event_summary.memory_manipulation_items['AsyncMemcpy'].cpu_time, 15) + event_summary.memory_manipulation_items['AsyncMemcpy'].cpu_time, 15 + ) self.assertEqual( - event_summary.memory_manipulation_items['AsyncMemcpy']. - general_gpu_time, 60) + event_summary.memory_manipulation_items[ + 'AsyncMemcpy' + ].general_gpu_time, + 60, + ) print( profiler.profiler_statistic._build_table( statistic_data, sorted_by=profiler.SortedKeys.CPUTotal, op_detail=True, thread_sep=False, - time_unit='ms')) + time_unit='ms', + ) + ) def test_statistic_case3(self): # for coverage, test all time is 0 - root_node = HostPythonNode('Root Node', - profiler.TracerEventType.UserDefined, 0, - float('inf'), 1000, 1001) - profilerstep_node = HostPythonNode('ProfileStep#1', - profiler.TracerEventType.ProfileStep, - 0, 400, 1000, 1001) - dataloader_node = HostPythonNode('Dataloader', - profiler.TracerEventType.Dataloader, 5, - 15, 1000, 1001) - mobilenet_node = HostPythonNode('MobileNet', - profiler.TracerEventType.Forward, 20, - 50, 1000, 1001) - - backward_node = HostPythonNode('Gradient Backward', - profiler.TracerEventType.Backward, 120, - 200, 1000, 1001) + root_node = HostPythonNode( + 'Root Node', + profiler.TracerEventType.UserDefined, + 0, + float('inf'), + 1000, + 1001, + ) + profilerstep_node = HostPythonNode( + 'ProfileStep#1', + profiler.TracerEventType.ProfileStep, + 0, + 400, + 1000, + 1001, + ) + dataloader_node = HostPythonNode( + 'Dataloader', profiler.TracerEventType.Dataloader, 5, 15, 1000, 1001 + ) + mobilenet_node = HostPythonNode( + 'MobileNet', profiler.TracerEventType.Forward, 20, 50, 1000, 1001 + ) + + backward_node = HostPythonNode( + 'Gradient Backward', + profiler.TracerEventType.Backward, + 120, + 200, + 1000, + 1001, + ) optimization_node = HostPythonNode( - 'Optimization', profiler.TracerEventType.Optimization, 220, 300, - 1000, 1001) + 'Optimization', + profiler.TracerEventType.Optimization, + 220, + 300, + 1000, + 1001, + ) userdefined_node = HostPythonNode( - 'Communication Time', profiler.TracerEventType.PythonUserDefined, - 60, 70, 1000, 1001) + 'Communication Time', + profiler.TracerEventType.PythonUserDefined, + 60, + 70, + 1000, + 1001, + ) - conv2d_node = HostPythonNode('conv2d', - profiler.TracerEventType.Operator, 25, 25, - 1000, 1001) + conv2d_node = HostPythonNode( + 'conv2d', profiler.TracerEventType.Operator, 25, 25, 1000, 1001 + ) conv2d_infer_shape = HostPythonNode( - 'conv2d::infer_shape', profiler.TracerEventType.OperatorInner, 25, - 25, 1000, 1001) - conv2d_compute = HostPythonNode('conv2d::compute', - profiler.TracerEventType.OperatorInner, - 25, 25, 1000, 1001) + 'conv2d::infer_shape', + profiler.TracerEventType.OperatorInner, + 25, + 25, + 1000, + 1001, + ) + conv2d_compute = HostPythonNode( + 'conv2d::compute', + profiler.TracerEventType.OperatorInner, + 25, + 25, + 1000, + 1001, + ) conv2d_launchkernel = HostPythonNode( - 'cudalaunchkernel', profiler.TracerEventType.CudaRuntime, 25, 25, - 1000, 1001) + 'cudalaunchkernel', + profiler.TracerEventType.CudaRuntime, + 25, + 25, + 1000, + 1001, + ) - conv2d_kernel = DevicePythonNode('conv2d_kernel', - profiler.TracerEventType.Kernel, 35, - 35, 0, 0, 0) + conv2d_kernel = DevicePythonNode( + 'conv2d_kernel', profiler.TracerEventType.Kernel, 35, 35, 0, 0, 0 + ) another_kernel = DevicePythonNode( 'void phi::funcs::VectorizedBroadcastKernel, phi::funcs::AddFunctor>()', - profiler.TracerEventType.Kernel, 35, 35, 0, 0, 0) + profiler.TracerEventType.Kernel, + 35, + 35, + 0, + 0, + 0, + ) root_node.children_node.append(profilerstep_node) - profilerstep_node.children_node.extend([ - dataloader_node, mobilenet_node, userdefined_node, backward_node, - optimization_node - ]) + profilerstep_node.children_node.extend( + [ + dataloader_node, + mobilenet_node, + userdefined_node, + backward_node, + optimization_node, + ] + ) mobilenet_node.children_node.append(conv2d_node) conv2d_node.children_node.extend([conv2d_infer_shape, conv2d_compute]) conv2d_compute.runtime_node.append(conv2d_launchkernel) @@ -554,30 +1008,41 @@ class TestProfilerStatistic(unittest.TestCase): thread_tree = {'thread1001': root_node} extra_info = { 'Process Cpu Utilization': '1.02', - 'System Cpu Utilization': '0.68' + 'System Cpu Utilization': '0.68', } statistic_data = profiler.profiler_statistic.StatisticData( - thread_tree, extra_info) + thread_tree, extra_info + ) time_range_summary = statistic_data.time_range_summary event_summary = statistic_data.event_summary self.assertEqual(event_summary.items['conv2d'].cpu_time, 0) self.assertEqual(event_summary.items['conv2d'].general_gpu_time, 0) self.assertEqual( - event_summary.userdefined_items['Communication Time']. - general_gpu_time, 0) + event_summary.userdefined_items[ + 'Communication Time' + ].general_gpu_time, + 0, + ) for sort_key in [ - profiler.SortedKeys.CPUTotal, profiler.SortedKeys.CPUMax, - profiler.SortedKeys.CPUMin, profiler.SortedKeys.CPUAvg, - profiler.SortedKeys.GPUTotal, profiler.SortedKeys.GPUMax, - profiler.SortedKeys.GPUMin, profiler.SortedKeys.GPUAvg + profiler.SortedKeys.CPUTotal, + profiler.SortedKeys.CPUMax, + profiler.SortedKeys.CPUMin, + profiler.SortedKeys.CPUAvg, + profiler.SortedKeys.GPUTotal, + profiler.SortedKeys.GPUMax, + profiler.SortedKeys.GPUMin, + profiler.SortedKeys.GPUAvg, ]: print( - profiler.profiler_statistic._build_table(statistic_data, - sorted_by=sort_key, - op_detail=True, - thread_sep=False, - time_unit='ms')) + profiler.profiler_statistic._build_table( + statistic_data, + sorted_by=sort_key, + op_detail=True, + thread_sep=False, + time_unit='ms', + ) + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_program.py b/python/paddle/fluid/tests/unittests/test_program.py index 71c1e9239c04f816ad10133ba00fff34573990ed..a83574b10013fcf49270cfb86fa7db9778900010 100644 --- a/python/paddle/fluid/tests/unittests/test_program.py +++ b/python/paddle/fluid/tests/unittests/test_program.py @@ -23,7 +23,6 @@ main_program = default_main_program() class TestProgram(unittest.TestCase): - def test_program(self): b = main_program.current_block() self.assertEqual(-1, b.parent_idx) @@ -55,20 +54,17 @@ class TestProgram(unittest.TestCase): def test_program_clone(self): prog = Program() - x = prog.global_block().create_var(name='X', - shape=[1000, 784], - dtype='float32') + x = prog.global_block().create_var( + name='X', shape=[1000, 784], dtype='float32' + ) - y = prog.global_block().create_var(name='Y', - shape=[784, 100], - dtype='float32') + y = prog.global_block().create_var( + name='Y', shape=[784, 100], dtype='float32' + ) out = prog.global_block().create_var(name='Out', dtype='float32') - prog.global_block().append_op(type="mul", - inputs={ - 'X': [x], - 'Y': [y] - }, - outputs={'Out': [out]}) + prog.global_block().append_op( + type="mul", inputs={'X': [x], 'Y': [y]}, outputs={'Out': [out]} + ) # FIXME(yuyang18): We manual compare the output string, since the order # of variable could be changed. @@ -78,20 +74,17 @@ class TestProgram(unittest.TestCase): def test_parse_program_from_string(self): prog = Program() - x = prog.global_block().create_var(name='X', - shape=[1000, 784], - dtype='float32') + x = prog.global_block().create_var( + name='X', shape=[1000, 784], dtype='float32' + ) - y = prog.global_block().create_var(name='Y', - shape=[784, 100], - dtype='float32') + y = prog.global_block().create_var( + name='Y', shape=[784, 100], dtype='float32' + ) out = prog.global_block().create_var(name='Out', dtype='float32') - prog.global_block().append_op(type="mul", - inputs={ - 'X': [x], - 'Y': [y] - }, - outputs={'Out': [out]}) + prog.global_block().append_op( + type="mul", inputs={'X': [x], 'Y': [y]}, outputs={'Out': [out]} + ) binary_str = prog.desc.serialize_to_string() prog_restored = Program.parse_from_string(binary_str) @@ -111,17 +104,19 @@ class TestProgram(unittest.TestCase): self.assertNotEqual(0, len(new_program.blocks[0].all_parameters())) def test_program_inference_optimize(self): - def net(): - reader = fluid.layers.py_reader(capacity=10, - shapes=[[-1, 10], [-1, 1]], - lod_levels=[0, 0], - dtypes=['float32', 'int64'], - use_double_buffer=True) + reader = fluid.layers.py_reader( + capacity=10, + shapes=[[-1, 10], [-1, 1]], + lod_levels=[0, 0], + dtypes=['float32', 'int64'], + use_double_buffer=True, + ) in_data, label = fluid.layers.read_file(reader) predict_label = fluid.layers.fc(in_data, size=2, act='softmax') loss = paddle.mean( - fluid.layers.cross_entropy(input=predict_label, label=label)) + fluid.layers.cross_entropy(input=predict_label, label=label) + ) optimizer = fluid.optimizer.Adam() optimizer.minimize(loss) @@ -132,7 +127,8 @@ class TestProgram(unittest.TestCase): net() no_read_program = main_program._inference_optimize() keep_read_program = main_program._inference_optimize( - prune_read_op=False) + prune_read_op=False + ) no_read_ops = no_read_program.global_block().ops keep_read_ops = keep_read_program.global_block().ops self.assertEqual(len(keep_read_ops) - len(no_read_ops), 2) @@ -158,8 +154,9 @@ class TestProgram(unittest.TestCase): def test_prune_with_input_type_error(self): program = fluid.default_main_program() feed_var_names = [2, 3, 4] - self.assertRaises(ValueError, program._prune_with_input, feed_var_names, - []) + self.assertRaises( + ValueError, program._prune_with_input, feed_var_names, [] + ) def test_random_seed_error(self): program = fluid.default_main_program() @@ -169,21 +166,24 @@ class TestProgram(unittest.TestCase): def test_copy_info_from_error(self): program = fluid.default_main_program() self.assertRaises(TypeError, program._copy_param_info_from, "program") - self.assertRaises(TypeError, program._copy_dist_param_info_from, - "program") + self.assertRaises( + TypeError, program._copy_dist_param_info_from, "program" + ) def test_remove_training_info(self): - def net(): - reader = fluid.layers.py_reader(capacity=10, - shapes=[[-1, 10], [-1, 1]], - lod_levels=[0, 0], - dtypes=['float32', 'int64'], - use_double_buffer=True) + reader = fluid.layers.py_reader( + capacity=10, + shapes=[[-1, 10], [-1, 1]], + lod_levels=[0, 0], + dtypes=['float32', 'int64'], + use_double_buffer=True, + ) in_data, label = fluid.layers.read_file(reader) predict_label = fluid.layers.fc(in_data, size=2, act='softmax') loss = paddle.mean( - fluid.layers.cross_entropy(input=predict_label, label=label)) + fluid.layers.cross_entropy(input=predict_label, label=label) + ) optimizer = fluid.optimizer.Adam() optimizer.minimize(loss) @@ -212,7 +212,6 @@ def build_program(): class TestProgramProto(unittest.TestCase): - def test_update_op(self): program = build_program() a = program.desc.serialize_to_string() @@ -241,7 +240,6 @@ class TestProgramProto(unittest.TestCase): class TestProgramHash(unittest.TestCase): - def build_program(self): main_program = paddle.static.Program() startuo_program = paddle.static.Program() @@ -264,8 +262,8 @@ class TestProgramHash(unittest.TestCase): program1, program2 = programs[0], programs[1] # why not write as below? # since the callstack attribute are not equal - #program1 = self.build_program() - #program2 = self.build_program() + # program1 = self.build_program() + # program2 = self.build_program() self.assertTrue(program1.desc.need_update()) self.assertTrue(program2.desc.need_update()) @@ -273,7 +271,8 @@ class TestProgramHash(unittest.TestCase): self.assertFalse(id(program1) == id(program2)) # print(program1, program2) self.assertTrue( - program1.desc.cached_hash_str() == program2.desc.cached_hash_str()) + program1.desc.cached_hash_str() == program2.desc.cached_hash_str() + ) self.assertFalse(program1.desc.need_update()) self.assertFalse(program2.desc.need_update()) @@ -283,8 +282,10 @@ class TestProgramHash(unittest.TestCase): program_clone = program.clone() self.assertFalse(id(program) == id(program_clone)) - self.assertTrue(program.desc.cached_hash_str() == - program_clone.desc.cached_hash_str()) + self.assertTrue( + program.desc.cached_hash_str() + == program_clone.desc.cached_hash_str() + ) def test_program_update(self): program = self.build_program() diff --git a/python/paddle/fluid/tests/unittests/test_program_code.py b/python/paddle/fluid/tests/unittests/test_program_code.py index 8bdb084a5f4cdd9133538d88c6bc5b5a74c0172e..449f97c22f0309e7014ccf627da1073a15ea806b 100644 --- a/python/paddle/fluid/tests/unittests/test_program_code.py +++ b/python/paddle/fluid/tests/unittests/test_program_code.py @@ -19,25 +19,22 @@ import paddle.fluid.layers as layers class TestProgramToReadableCode(unittest.TestCase): - def setUp(self): self.program = fluid.Program() self.block = self.program.current_block() - self.var = self.block.create_var(name="X", - shape=[-1, 23, 48], - dtype='float32') - self.param = self.block.create_parameter(name="W", - shape=[23, 48], - dtype='float32', - trainable=True) - self.op = self.block.append_op(type="abs", - inputs={"X": [self.var]}, - outputs={"Out": [self.var]}) + self.var = self.block.create_var( + name="X", shape=[-1, 23, 48], dtype='float32' + ) + self.param = self.block.create_parameter( + name="W", shape=[23, 48], dtype='float32', trainable=True + ) + self.op = self.block.append_op( + type="abs", inputs={"X": [self.var]}, outputs={"Out": [self.var]} + ) # add control flow op and sub block self.append_cond_op(self.program) def append_cond_op(self, program): - def true_func(): return layers.fill_constant(shape=[2, 3], dtype='int32', value=2) diff --git a/python/paddle/fluid/tests/unittests/test_program_prune_backward.py b/python/paddle/fluid/tests/unittests/test_program_prune_backward.py index 1fb423bc1f1fe3abf6b415d29e63b2d9ec454d57..ad7617654d275e02fc0985b542b7d62e852557d6 100755 --- a/python/paddle/fluid/tests/unittests/test_program_prune_backward.py +++ b/python/paddle/fluid/tests/unittests/test_program_prune_backward.py @@ -20,7 +20,11 @@ import paddle.fluid as fluid import paddle.fluid.core as core from simple_nets import init_data, simple_fc_net, fc_with_batchnorm import seresnext_net -from test_parallel_executor_transformer import transformer, get_feed_data_reader, DeviceType +from test_parallel_executor_transformer import ( + transformer, + get_feed_data_reader, + DeviceType, +) from fake_reader import fake_imdb_reader import paddle @@ -32,19 +36,19 @@ def lstm_net(use_feed): hid_dim2 = 96 class_dim = 2 emb_lr = 30.0 - data = fluid.layers.data(name="words", - shape=[1], - dtype="int64", - lod_level=1) + data = fluid.layers.data( + name="words", shape=[1], dtype="int64", lod_level=1 + ) label = fluid.layers.data(name="label", shape=[1], dtype="int64") emb = fluid.layers.embedding( input=data, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr(learning_rate=emb_lr)) + param_attr=fluid.ParamAttr(learning_rate=emb_lr), + ) fc0 = fluid.layers.fc(input=emb, size=hid_dim * 4) - lstm_h, c = fluid.layers.dynamic_lstm(input=fc0, - size=hid_dim * 4, - is_reverse=False) + lstm_h, c = fluid.layers.dynamic_lstm( + input=fc0, size=hid_dim * 4, is_reverse=False + ) lstm_max = fluid.layers.sequence_pool(input=lstm_h, pool_type='max') lstm_max_tanh = fluid.layers.tanh(lstm_max) fc1 = fluid.layers.fc(input=lstm_max_tanh, size=hid_dim2, act='tanh') @@ -64,8 +68,10 @@ def simple_fc_net_with_accuracy(use_feed): hidden, size=200, act='relu', - bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=1.0))) + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=1.0) + ), + ) prediction = fluid.layers.fc(hidden, size=10, act='softmax') loss = fluid.layers.cross_entropy(input=prediction, label=label) loss = paddle.mean(loss) @@ -90,9 +96,11 @@ def cond_net(use_feed=None): return avg_loss two = fluid.layers.fill_constant([1], 'int32', 2) - pred = (two == 0) - avg_loss = fluid.layers.case([(pred, lambda: loss1(prediction, label))], - lambda: loss2(prediction, label)) + pred = two == 0 + avg_loss = fluid.layers.case( + [(pred, lambda: loss1(prediction, label))], + lambda: loss2(prediction, label), + ) return avg_loss @@ -118,15 +126,15 @@ def optimization_in_cond_net(with_optimize=False): sgd = fluid.optimizer.SGD(learning_rate=0.1) two = fluid.layers.fill_constant([1], 'int32', 2) - pred = (two == 0) + pred = two == 0 avg_loss = fluid.layers.case( [(pred, lambda: loss1(sgd, prediction, label, with_optimize))], - lambda: loss2(sgd, prediction, label, with_optimize)) + lambda: loss2(sgd, prediction, label, with_optimize), + ) return avg_loss class TestProgramPruneBackward(unittest.TestCase): - def program_compare(self, program_a, program_b): assert isinstance( program_a, fluid.framework.Program @@ -142,8 +150,9 @@ class TestProgramPruneBackward(unittest.TestCase): self.assertEqual(len(block_a.ops), len(block_b.ops)) self.assertEqual(len(block_a.vars), len(block_b.vars)) for op_idx in range(len(block_a.ops)): - self.assertEqual(block_a.ops[op_idx].type, - block_b.ops[op_idx].type) + self.assertEqual( + block_a.ops[op_idx].type, block_b.ops[op_idx].type + ) for var_key in list(block_a.vars.keys()): self.assertTrue(block_b.has_var(var_key)) @@ -165,113 +174,111 @@ class TestProgramPruneBackward(unittest.TestCase): exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - loss_data_prune, = exe.run(test_prog_prune, - feed=feed_dict, - fetch_list=[loss.name]) - loss_data_orig, = exe.run(test_prog_orig, - feed=feed_dict, - fetch_list=[loss.name]) + (loss_data_prune,) = exe.run( + test_prog_prune, feed=feed_dict, fetch_list=[loss.name] + ) + (loss_data_orig,) = exe.run( + test_prog_orig, feed=feed_dict, fetch_list=[loss.name] + ) self.assertEqual(loss_data_orig, loss_data_prune) def test_simple_fc_net(self): - def optimizer(): optimizer = fluid.optimizer.SGD( learning_rate=0.001, - regularization=fluid.regularizer.L2Decay(1e-4)) + regularization=fluid.regularizer.L2Decay(1e-4), + ) return optimizer with self.program_scope_guard(): img, label = init_data() - self.check_prune_correctness(method=simple_fc_net, - feed_dict={ - "image": img, - "label": label - }, - optimizer=optimizer) + self.check_prune_correctness( + method=simple_fc_net, + feed_dict={"image": img, "label": label}, + optimizer=optimizer, + ) def test_simple_fc_net_with_accuracy(self): - def optimizer(): optimizer = fluid.optimizer.SGD( learning_rate=0.001, - regularization=fluid.regularizer.L2Decay(1e-4)) + regularization=fluid.regularizer.L2Decay(1e-4), + ) return optimizer with self.program_scope_guard(): img, label = init_data() - self.check_prune_correctness(method=simple_fc_net_with_accuracy, - feed_dict={ - "image": img, - "label": label - }, - optimizer=optimizer) + self.check_prune_correctness( + method=simple_fc_net_with_accuracy, + feed_dict={"image": img, "label": label}, + optimizer=optimizer, + ) def test_batchnorm_fc(self): - def optimizer(): optimizer = fluid.optimizer.SGD( learning_rate=0.001, - regularization=fluid.regularizer.L2Decay(1e-4)) + regularization=fluid.regularizer.L2Decay(1e-4), + ) return optimizer with self.program_scope_guard(): img, label = init_data() - self.check_prune_correctness(method=fc_with_batchnorm, - feed_dict={ - "image": img, - "label": label - }, - optimizer=optimizer) + self.check_prune_correctness( + method=fc_with_batchnorm, + feed_dict={"image": img, "label": label}, + optimizer=optimizer, + ) def test_seresnet(self): with self.program_scope_guard(): self.check_prune_correctness( method=seresnext_net.model, feed_dict=seresnext_net.feed_dict(use_device=DeviceType.CPU), - optimizer=seresnext_net.optimizer) + optimizer=seresnext_net.optimizer, + ) def test_transformer(self): - def optimizer(): optimizer = fluid.optimizer.Adam( learning_rate=0.001, - regularization=fluid.regularizer.L2Decay(1e-4)) + regularization=fluid.regularizer.L2Decay(1e-4), + ) return optimizer with self.program_scope_guard(): # the program argument is used to distinguish Program and CompiledProgram feed_dict = get_feed_data_reader().get_next( - fluid.Executor(core.CPUPlace()), fluid.default_main_program()) - self.check_prune_correctness(method=transformer, - feed_dict=feed_dict, - optimizer=optimizer) + fluid.Executor(core.CPUPlace()), fluid.default_main_program() + ) + self.check_prune_correctness( + method=transformer, feed_dict=feed_dict, optimizer=optimizer + ) def test_lstm(self): - def optimizer(): optimizer = fluid.optimizer.Adagrad( learning_rate=0.001, - regularization=fluid.regularizer.L2Decay(1e-4)) + regularization=fluid.regularizer.L2Decay(1e-4), + ) return optimizer with self.program_scope_guard(): word_dict_size = 5147 reader = fake_imdb_reader(word_dict_size, 1) - data = fluid.layers.data(name="words", - shape=[1], - dtype="int64", - lod_level=1) + data = fluid.layers.data( + name="words", shape=[1], dtype="int64", lod_level=1 + ) label = fluid.layers.data(name="label", shape=[1], dtype="int64") - feeder = fluid.DataFeeder(feed_list=[data, label], - place=core.CPUPlace()) + feeder = fluid.DataFeeder( + feed_list=[data, label], place=core.CPUPlace() + ) feed_data = feeder.feed(reader()) - self.check_prune_correctness(method=lstm_net, - feed_dict=feed_data, - optimizer=optimizer) + self.check_prune_correctness( + method=lstm_net, feed_dict=feed_data, optimizer=optimizer + ) def test_cond(self): - def optimizer(): optimizer = fluid.optimizer.SGD(learning_rate=0.01) return optimizer @@ -280,9 +287,9 @@ class TestProgramPruneBackward(unittest.TestCase): x_in = np.random.random(size=(10, 4)).astype('float32') label_in = np.random.randint(1, size=(10, 1)).astype('int64') feed_dict = {'x': x_in, 'label': label_in} - self.check_prune_correctness(method=cond_net, - feed_dict=feed_dict, - optimizer=optimizer) + self.check_prune_correctness( + method=cond_net, feed_dict=feed_dict, optimizer=optimizer + ) def test_optimization_in_cond(self): x_in = np.random.random(size=(10, 4)).astype('float32') @@ -295,9 +302,9 @@ class TestProgramPruneBackward(unittest.TestCase): place = core.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - loss_data_orig, = exe.run(test_prog_orig, - feed=feed_dict, - fetch_list=[loss.name]) + (loss_data_orig,) = exe.run( + test_prog_orig, feed=feed_dict, fetch_list=[loss.name] + ) with self.program_scope_guard(): loss = optimization_in_cond_net(True) @@ -307,9 +314,9 @@ class TestProgramPruneBackward(unittest.TestCase): place = core.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - loss_data_prune, = exe.run(test_prog_prune, - feed=feed_dict, - fetch_list=[loss.name]) + (loss_data_prune,) = exe.run( + test_prog_prune, feed=feed_dict, fetch_list=[loss.name] + ) self.program_compare(test_prog_orig, test_prog_prune) self.assertEqual(loss_data_orig, loss_data_prune) diff --git a/python/paddle/fluid/tests/unittests/test_program_to_string.py b/python/paddle/fluid/tests/unittests/test_program_to_string.py index 8c010b90671b18ccbe6dbab19d5dafd4c204657c..fda96d904677013327ddb076e68242b46eb39fc8 100644 --- a/python/paddle/fluid/tests/unittests/test_program_to_string.py +++ b/python/paddle/fluid/tests/unittests/test_program_to_string.py @@ -17,17 +17,16 @@ import unittest class TestProgram(unittest.TestCase): - def test_program_to_string(self): prog = fluid.default_main_program() - a = fluid.layers.data(name="X", - shape=[2, 3], - dtype="float32", - append_batch_size=False) + a = fluid.layers.data( + name="X", shape=[2, 3], dtype="float32", append_batch_size=False + ) c = fluid.layers.fc(a, size=3) prog_string = prog.to_string(throw_on_error=True, with_details=False) - prog_string_with_details = prog.to_string(throw_on_error=False, - with_details=True) + prog_string_with_details = prog.to_string( + throw_on_error=False, with_details=True + ) assert prog_string is not None assert len(prog_string_with_details) > len(prog_string) diff --git a/python/paddle/fluid/tests/unittests/test_protobuf.py b/python/paddle/fluid/tests/unittests/test_protobuf.py index 39f32fb1a2da0b0eeab8600a8588b48a623d41a4..c3f1fa80185bfc4afc3ed715d736bcba092629d8 100644 --- a/python/paddle/fluid/tests/unittests/test_protobuf.py +++ b/python/paddle/fluid/tests/unittests/test_protobuf.py @@ -17,7 +17,6 @@ import unittest class TestFrameworkProto(unittest.TestCase): - def test_all(self): op_proto = framework_pb2.OpProto() ipt0 = op_proto.inputs.add() diff --git a/python/paddle/fluid/tests/unittests/test_protobuf_descs.py b/python/paddle/fluid/tests/unittests/test_protobuf_descs.py index 6022bf02602cbd01e35a4f4e04b6811d92821d56..1099855eec7ebe01bc24d7e491203b4c2d66cc76 100644 --- a/python/paddle/fluid/tests/unittests/test_protobuf_descs.py +++ b/python/paddle/fluid/tests/unittests/test_protobuf_descs.py @@ -18,7 +18,6 @@ from paddle.fluid.framework import Program class TestOpDesc(unittest.TestCase): - def test_op_desc(self): program_desc = core.ProgramDesc() self.assertIsNotNone(program_desc) @@ -79,7 +78,6 @@ class TestOpDesc(unittest.TestCase): class TestProgramDesc(unittest.TestCase): - def test_instance(self): program_desc = core.ProgramDesc() self.assertIsNotNone(program_desc) @@ -107,7 +105,6 @@ class TestProgramDesc(unittest.TestCase): class TestVarDesc(unittest.TestCase): - def test_shape(self): program_desc = core.ProgramDesc() block = program_desc.block(0) @@ -145,8 +142,9 @@ class TestVarDesc(unittest.TestCase): var = block.var(b'my_reader') var.set_type(core.VarDesc.VarType.READER) src_types = [ - core.VarDesc.VarType.INT32, core.VarDesc.VarType.FP64, - core.VarDesc.VarType.FP32 + core.VarDesc.VarType.INT32, + core.VarDesc.VarType.FP64, + core.VarDesc.VarType.FP32, ] var.set_dtypes(src_types) self.assertEqual(src_types, var.dtypes()) @@ -164,7 +162,6 @@ class TestVarDesc(unittest.TestCase): class TestBlockDesc(unittest.TestCase): - def test_add_var(self): program_desc = core.ProgramDesc() self.assertIsNotNone(program_desc) diff --git a/python/paddle/fluid/tests/unittests/test_proximal_adagrad_op.py b/python/paddle/fluid/tests/unittests/test_proximal_adagrad_op.py index fb4dbf32630c790e950b802570bd5bc98f79dec9..3c2689585061af5a11a247a01b87b432dcd86e13 100644 --- a/python/paddle/fluid/tests/unittests/test_proximal_adagrad_op.py +++ b/python/paddle/fluid/tests/unittests/test_proximal_adagrad_op.py @@ -18,7 +18,6 @@ from op_test import OpTest class TestProximalAdagradOp(OpTest): - def setUp(self): self.op_type = "proximal_adagrad" w = np.random.random((102, 105)).astype("float32") diff --git a/python/paddle/fluid/tests/unittests/test_proximal_gd_op.py b/python/paddle/fluid/tests/unittests/test_proximal_gd_op.py index bd78a5e8fd9e599ba88c7f5ef83e7f24fe200454..137594b9a08e13bf6c3f3779356c209596f9ba8e 100644 --- a/python/paddle/fluid/tests/unittests/test_proximal_gd_op.py +++ b/python/paddle/fluid/tests/unittests/test_proximal_gd_op.py @@ -18,7 +18,6 @@ from op_test import OpTest class TestProximalGDOp(OpTest): - def setUp(self): self.op_type = "proximal_gd" w = np.random.random((102, 105)).astype("float32") diff --git a/python/paddle/fluid/tests/unittests/test_prroi_pool_op.py b/python/paddle/fluid/tests/unittests/test_prroi_pool_op.py index a927c2a5e820cc39813335c634892aeb68d0d203..4a953b463bea5141b6555511398f75915825172f 100644 --- a/python/paddle/fluid/tests/unittests/test_prroi_pool_op.py +++ b/python/paddle/fluid/tests/unittests/test_prroi_pool_op.py @@ -22,22 +22,24 @@ from paddle.fluid import Program, program_guard class TestPRROIPoolOp(OpTest): - def set_data(self): self.init_test_case() self.make_rois() self.prRoIPool = PyPrRoIPool() - self.outs = self.prRoIPool.compute(self.x, self.rois, - self.output_channels, - self.spatial_scale, - self.pooled_height, - self.pooled_width).astype('float32') + self.outs = self.prRoIPool.compute( + self.x, + self.rois, + self.output_channels, + self.spatial_scale, + self.pooled_height, + self.pooled_width, + ).astype('float32') self.inputs = {'X': self.x, 'ROIs': (self.rois[:, 1:5], self.rois_lod)} self.attrs = { 'output_channels': self.output_channels, 'spatial_scale': self.spatial_scale, 'pooled_height': self.pooled_height, - 'pooled_width': self.pooled_width + 'pooled_width': self.pooled_width, } self.outputs = {'Out': self.outs} @@ -63,14 +65,18 @@ class TestPRROIPoolOp(OpTest): self.rois_lod[0].append(bno + 1) for i in range(bno + 1): x1 = np.random.uniform( - 0, self.width // self.spatial_scale - self.pooled_width) + 0, self.width // self.spatial_scale - self.pooled_width + ) y1 = np.random.uniform( - 0, self.height // self.spatial_scale - self.pooled_height) - - x2 = np.random.uniform(x1 + self.pooled_width, - self.width // self.spatial_scale) - y2 = np.random.uniform(y1 + self.pooled_height, - self.height // self.spatial_scale) + 0, self.height // self.spatial_scale - self.pooled_height + ) + + x2 = np.random.uniform( + x1 + self.pooled_width, self.width // self.spatial_scale + ) + y2 = np.random.uniform( + y1 + self.pooled_height, self.height // self.spatial_scale + ) roi = [bno, x1, y1, x2, y2] rois.append(roi) self.rois_num = len(rois) @@ -95,24 +101,24 @@ class TestPRROIPoolOp(OpTest): x = fluid.layers.data( name="X", shape=[self.channels, self.height, self.width], - dtype="float32") - rois = fluid.layers.data(name="ROIs", - shape=[4], - dtype="float32", - lod_level=1) + dtype="float32", + ) + rois = fluid.layers.data( + name="ROIs", shape=[4], dtype="float32", lod_level=1 + ) output = fluid.layers.prroi_pool(x, rois, 0.25, 2, 2) loss = paddle.mean(output) optimizer = fluid.optimizer.SGD(learning_rate=1e-3) optimizer.minimize(loss) input_x = fluid.create_lod_tensor(self.x, [], place) - input_rois = fluid.create_lod_tensor(self.rois[:, 1:5], - self.rois_lod, place) + input_rois = fluid.create_lod_tensor( + self.rois[:, 1:5], self.rois_lod, place + ) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - exe.run(fluid.default_main_program(), { - 'X': input_x, - "ROIs": input_rois - }) + exe.run( + fluid.default_main_program(), {'X': input_x, "ROIs": input_rois} + ) def test_net(self): places = [fluid.CPUPlace()] @@ -123,47 +129,51 @@ class TestPRROIPoolOp(OpTest): def test_errors(self): with program_guard(Program(), Program()): - x = fluid.layers.data(name="x", - shape=[245, 30, 30], - dtype="float32") - rois = fluid.layers.data(name="rois", - shape=[4], - dtype="float32", - lod_level=1) + x = fluid.layers.data( + name="x", shape=[245, 30, 30], dtype="float32" + ) + rois = fluid.layers.data( + name="rois", shape=[4], dtype="float32", lod_level=1 + ) # spatial_scale must be float type - self.assertRaises(TypeError, fluid.layers.prroi_pool, x, rois, 2, 7, - 7) + self.assertRaises( + TypeError, fluid.layers.prroi_pool, x, rois, 2, 7, 7 + ) # pooled_height must be int type - self.assertRaises(TypeError, fluid.layers.prroi_pool, x, rois, 0.25, - 0.7, 7) + self.assertRaises( + TypeError, fluid.layers.prroi_pool, x, rois, 0.25, 0.7, 7 + ) # pooled_width must be int type - self.assertRaises(TypeError, fluid.layers.prroi_pool, x, rois, 0.25, - 7, 0.7) + self.assertRaises( + TypeError, fluid.layers.prroi_pool, x, rois, 0.25, 7, 0.7 + ) class TestPRROIPoolOpTensorRoIs(OpTest): - def set_data(self): self.init_test_case() self.make_rois() self.prRoIPool = PyPrRoIPool() - self.outs = self.prRoIPool.compute(self.x, self.rois, - self.output_channels, - self.spatial_scale, - self.pooled_height, - self.pooled_width).astype('float32') + self.outs = self.prRoIPool.compute( + self.x, + self.rois, + self.output_channels, + self.spatial_scale, + self.pooled_height, + self.pooled_width, + ).astype('float32') self.rois_index = np.array(self.rois_lod).reshape([-1]).astype(np.int64) self.inputs = { 'X': self.x, 'ROIs': self.rois[:, 1:5], - 'BatchRoINums': self.rois_index + 'BatchRoINums': self.rois_index, } self.attrs = { 'output_channels': self.output_channels, 'spatial_scale': self.spatial_scale, 'pooled_height': self.pooled_height, - 'pooled_width': self.pooled_width + 'pooled_width': self.pooled_width, } self.outputs = {'Out': self.outs} @@ -189,14 +199,18 @@ class TestPRROIPoolOpTensorRoIs(OpTest): self.rois_lod.append(bno + 1) for i in range(bno + 1): x1 = np.random.uniform( - 0, self.width // self.spatial_scale - self.pooled_width) + 0, self.width // self.spatial_scale - self.pooled_width + ) y1 = np.random.uniform( - 0, self.height // self.spatial_scale - self.pooled_height) - - x2 = np.random.uniform(x1 + self.pooled_width, - self.width // self.spatial_scale) - y2 = np.random.uniform(y1 + self.pooled_height, - self.height // self.spatial_scale) + 0, self.height // self.spatial_scale - self.pooled_height + ) + + x2 = np.random.uniform( + x1 + self.pooled_width, self.width // self.spatial_scale + ) + y2 = np.random.uniform( + y1 + self.pooled_height, self.height // self.spatial_scale + ) roi = [bno, x1, y1, x2, y2] rois.append(roi) self.rois_num = len(rois) @@ -221,27 +235,28 @@ class TestPRROIPoolOpTensorRoIs(OpTest): x = fluid.layers.data( name="X", shape=[self.channels, self.height, self.width], - dtype="float32") + dtype="float32", + ) rois = fluid.layers.data(name="ROIs", shape=[4], dtype="float32") - rois_index = fluid.layers.data(name='rois_idx', - shape=[], - dtype="int64") - output = fluid.layers.prroi_pool(x, - rois, - 0.25, - 2, - 2, - batch_roi_nums=rois_index) + rois_index = fluid.layers.data( + name='rois_idx', shape=[], dtype="int64" + ) + output = fluid.layers.prroi_pool( + x, rois, 0.25, 2, 2, batch_roi_nums=rois_index + ) loss = paddle.mean(output) optimizer = fluid.optimizer.SGD(learning_rate=1e-3) optimizer.minimize(loss) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - exe.run(fluid.default_main_program(), { - 'X': self.x, - "ROIs": self.rois[:, 1:5], - "rois_idx": self.rois_index - }) + exe.run( + fluid.default_main_program(), + { + 'X': self.x, + "ROIs": self.rois[:, 1:5], + "rois_idx": self.rois_index, + }, + ) def test_net(self): places = [fluid.CPUPlace()] @@ -252,42 +267,50 @@ class TestPRROIPoolOpTensorRoIs(OpTest): def test_errors(self): with program_guard(Program(), Program()): - x = fluid.layers.data(name="x", - shape=[245, 30, 30], - dtype="float32") - rois = fluid.layers.data(name="rois", - shape=[4], - dtype="float32", - lod_level=1) + x = fluid.layers.data( + name="x", shape=[245, 30, 30], dtype="float32" + ) + rois = fluid.layers.data( + name="rois", shape=[4], dtype="float32", lod_level=1 + ) # spatial_scale must be float type - self.assertRaises(TypeError, fluid.layers.prroi_pool, x, rois, 2, 7, - 7) + self.assertRaises( + TypeError, fluid.layers.prroi_pool, x, rois, 2, 7, 7 + ) # pooled_height must be int type - self.assertRaises(TypeError, fluid.layers.prroi_pool, x, rois, 0.25, - 0.7, 7) + self.assertRaises( + TypeError, fluid.layers.prroi_pool, x, rois, 0.25, 0.7, 7 + ) # pooled_width must be int type - self.assertRaises(TypeError, fluid.layers.prroi_pool, x, rois, 0.25, - 7, 0.7) + self.assertRaises( + TypeError, fluid.layers.prroi_pool, x, rois, 0.25, 7, 0.7 + ) def test_bad_x(): - x = fluid.layers.data(name='data1', - shape=[2, 3, 16, 16], - dtype='int64', - append_batch_size=False) - label = fluid.layers.data(name='label1', - shape=[2, 4], - dtype='float32', - lod_level=1, - append_batch_size=False) + x = fluid.layers.data( + name='data1', + shape=[2, 3, 16, 16], + dtype='int64', + append_batch_size=False, + ) + label = fluid.layers.data( + name='label1', + shape=[2, 4], + dtype='float32', + lod_level=1, + append_batch_size=False, + ) output = fluid.layers.prroi_pool(x, label, 0.25, 2, 2) self.assertRaises(TypeError, test_bad_x) def test_bad_y(): - x = fluid.layers.data(name='data2', - shape=[2, 3, 16, 16], - dtype='float32', - append_batch_size=False) + x = fluid.layers.data( + name='data2', + shape=[2, 3, 16, 16], + dtype='float32', + append_batch_size=False, + ) label = [[1, 2, 3, 4], [2, 3, 4, 5]] output = fluid.layers.prroi_pool(x, label, 0.25, 2, 2) @@ -296,5 +319,6 @@ class TestPRROIPoolOpTensorRoIs(OpTest): if __name__ == '__main__': import paddle + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_prune.py b/python/paddle/fluid/tests/unittests/test_prune.py index b6386e696746688c6551abe606664709c1168726..5b7596d813d74318345bf6788921391a973e0245 100644 --- a/python/paddle/fluid/tests/unittests/test_prune.py +++ b/python/paddle/fluid/tests/unittests/test_prune.py @@ -23,7 +23,6 @@ import contextlib class TestPrune(unittest.TestCase): - def net(self): x = fluid.layers.data(name='x', shape=[2], dtype='float32') label = fluid.layers.data(name="label", shape=[1], dtype="int64") @@ -39,14 +38,24 @@ class TestPrune(unittest.TestCase): with fluid.program_guard(program, startup_program): (x, y, label, loss) = self.net() self.assertEqual(len(block.ops), 5) - self.assertEqual([op.type for op in block.ops], [ - "mul", "elementwise_add", "softmax", "cross_entropy2", "reduce_mean" - ]) + self.assertEqual( + [op.type for op in block.ops], + [ + "mul", + "elementwise_add", + "softmax", + "cross_entropy2", + "reduce_mean", + ], + ) pruned_program = program._prune_with_input( - feeded_var_names=[y.name, label.name], targets=[loss]) + feeded_var_names=[y.name, label.name], targets=[loss] + ) self.assertEqual(len(pruned_program.global_block().ops), 2) - self.assertEqual([op.type for op in pruned_program.global_block().ops], - ["cross_entropy2", "reduce_mean"]) + self.assertEqual( + [op.type for op in pruned_program.global_block().ops], + ["cross_entropy2", "reduce_mean"], + ) def test_prune(self): program = framework.Program() @@ -55,16 +64,28 @@ class TestPrune(unittest.TestCase): with fluid.program_guard(program, startup_program): (x, y, label, loss) = self.net() self.assertEqual(len(block.ops), 5) - self.assertEqual([op.type for op in block.ops], [ - "mul", "elementwise_add", "softmax", "cross_entropy2", "reduce_mean" - ]) + self.assertEqual( + [op.type for op in block.ops], + [ + "mul", + "elementwise_add", + "softmax", + "cross_entropy2", + "reduce_mean", + ], + ) pruned_program = program._prune(targets=[loss]) self.assertEqual(len(pruned_program.global_block().ops), 5) - self.assertEqual([op.type for op in pruned_program.global_block().ops], - [ - "mul", "elementwise_add", "softmax", - "cross_entropy2", "reduce_mean" - ]) + self.assertEqual( + [op.type for op in pruned_program.global_block().ops], + [ + "mul", + "elementwise_add", + "softmax", + "cross_entropy2", + "reduce_mean", + ], + ) def test_prune_target_not_list(self): program = framework.Program() @@ -73,16 +94,28 @@ class TestPrune(unittest.TestCase): with fluid.program_guard(program, startup_program): (x, y, label, loss) = self.net() self.assertEqual(len(block.ops), 5) - self.assertEqual([op.type for op in block.ops], [ - "mul", "elementwise_add", "softmax", "cross_entropy2", "reduce_mean" - ]) + self.assertEqual( + [op.type for op in block.ops], + [ + "mul", + "elementwise_add", + "softmax", + "cross_entropy2", + "reduce_mean", + ], + ) pruned_program = program._prune(targets=loss) self.assertEqual(len(pruned_program.global_block().ops), 5) - self.assertEqual([op.type for op in pruned_program.global_block().ops], - [ - "mul", "elementwise_add", "softmax", - "cross_entropy2", "reduce_mean" - ]) + self.assertEqual( + [op.type for op in pruned_program.global_block().ops], + [ + "mul", + "elementwise_add", + "softmax", + "cross_entropy2", + "reduce_mean", + ], + ) def test_prune_target_none(self): program = framework.Program() @@ -91,15 +124,23 @@ class TestPrune(unittest.TestCase): with fluid.program_guard(program, startup_program): (x, y, label, loss) = self.net() self.assertEqual(len(block.ops), 5) - self.assertEqual([op.type for op in block.ops], [ - "mul", "elementwise_add", "softmax", "cross_entropy2", "reduce_mean" - ]) + self.assertEqual( + [op.type for op in block.ops], + [ + "mul", + "elementwise_add", + "softmax", + "cross_entropy2", + "reduce_mean", + ], + ) try: pruned_program = program._prune(targets=None) except ValueError as e: self.assertIn( "All targets of Program._prune_with_input() can only be Variable or Operator", - str(e)) + str(e), + ) def mock(self, program, feed, fetch, optimize_ops): @@ -116,7 +157,6 @@ def _mock_guard(mock): class TestExecutorRunAutoPrune(unittest.TestCase): - def net1(self): x = fluid.layers.data(name='x', shape=[2], dtype='float32') label = fluid.layers.data(name="label", shape=[1], dtype="int64") @@ -124,11 +164,11 @@ class TestExecutorRunAutoPrune(unittest.TestCase): name="fc_weight", learning_rate=0.5, initializer=fluid.initializer.Constant(1.0), - trainable=True) - y = fluid.layers.fc(input=[x], - size=2, - act="softmax", - param_attr=w_param_attrs) + trainable=True, + ) + y = fluid.layers.fc( + input=[x], size=2, act="softmax", param_attr=w_param_attrs + ) loss1 = fluid.layers.cross_entropy(input=y, label=label) loss1 = paddle.mean(x=loss1) loss2 = fluid.layers.cross_entropy(input=y, label=label) @@ -145,25 +185,35 @@ class TestExecutorRunAutoPrune(unittest.TestCase): name="fc_weight1", learning_rate=0.5, initializer=fluid.initializer.Constant(1.0), - trainable=True) + trainable=True, + ) w2_param_attrs = fluid.ParamAttr( name="fc_weight2", learning_rate=0.5, initializer=fluid.initializer.Constant(1.0), - trainable=True) - y1 = fluid.layers.fc(input=[x1], - size=2, - act="softmax", - param_attr=w1_param_attrs) - y2 = fluid.layers.fc(input=[x2], - size=2, - act="softmax", - param_attr=w2_param_attrs) + trainable=True, + ) + y1 = fluid.layers.fc( + input=[x1], size=2, act="softmax", param_attr=w1_param_attrs + ) + y2 = fluid.layers.fc( + input=[x2], size=2, act="softmax", param_attr=w2_param_attrs + ) loss1 = fluid.layers.cross_entropy(input=y1, label=label) loss1 = paddle.mean(x=loss1) loss2 = fluid.layers.cross_entropy(input=y2, label=label) loss2 = paddle.mean(x=loss2) - return x1, x2, y1, y2, label, loss1, loss2, w1_param_attrs, w2_param_attrs + return ( + x1, + x2, + y1, + y2, + label, + loss1, + loss2, + w1_param_attrs, + w2_param_attrs, + ) def test_not_prune(self): """ @@ -179,13 +229,12 @@ class TestExecutorRunAutoPrune(unittest.TestCase): exe.run(startup_program) x_np = np.random.random(size=(10, 2)).astype('float32') label_np = np.random.randint(1, size=(10, 1)).astype('int64') - res = exe.run(program, - feed={ - 'x': x_np, - 'label': label_np - }, - fetch_list=[loss1.name], - use_prune=False) + res = exe.run( + program, + feed={'x': x_np, 'label': label_np}, + fetch_list=[loss1.name], + use_prune=False, + ) self.assertIsNotNone(scope.find_var(loss1.name)) self.assertIsNotNone(scope.find_var(loss2.name)) @@ -202,22 +251,24 @@ class TestExecutorRunAutoPrune(unittest.TestCase): exe = fluid.Executor(fluid.CPUPlace()) exe.run(startup_program) weight_init = np.array( - scope.find_var(w_param_attrs.name).get_tensor()) + scope.find_var(w_param_attrs.name).get_tensor() + ) x_np = np.random.random(size=(10, 2)).astype('float32') label_np = np.random.randint(1, size=(10, 1)).astype('int64') - res = exe.run(program, - feed={ - 'x': x_np, - 'label': label_np - }, - fetch_list=[loss1.name], - use_prune=True) + res = exe.run( + program, + feed={'x': x_np, 'label': label_np}, + fetch_list=[loss1.name], + use_prune=True, + ) self.assertIsNotNone(scope.find_var(loss1.name)) - self.assertIsNone(scope.find_var(loss2.name)) #loss2 is pruned + self.assertIsNone(scope.find_var(loss2.name)) # loss2 is pruned weight = np.array( - scope.find_var(w_param_attrs.name).get_tensor()) - np.testing.assert_array_equal(weight_init, - weight) # weight not changed + scope.find_var(w_param_attrs.name).get_tensor() + ) + np.testing.assert_array_equal( + weight_init, weight + ) # weight not changed def test_prune_fetches_with_optimizer(self): """ @@ -235,22 +286,24 @@ class TestExecutorRunAutoPrune(unittest.TestCase): exe = fluid.Executor(fluid.CPUPlace()) exe.run(startup_program) weight_init = np.array( - scope.find_var(w_param_attrs.name).get_tensor()) + scope.find_var(w_param_attrs.name).get_tensor() + ) x_np = np.random.random(size=(10, 2)).astype('float32') label_np = np.random.randint(1, size=(10, 1)).astype('int64') - res = exe.run(program, - feed={ - 'x': x_np, - 'label': label_np - }, - fetch_list=[loss1.name], - use_prune=True) + res = exe.run( + program, + feed={'x': x_np, 'label': label_np}, + fetch_list=[loss1.name], + use_prune=True, + ) self.assertIsNotNone(scope.find_var(loss1.name)) - self.assertIsNone(scope.find_var(loss2.name)) #loss2 is pruned + self.assertIsNone(scope.find_var(loss2.name)) # loss2 is pruned weight = np.array( - scope.find_var(w_param_attrs.name).get_tensor()) - self.assertFalse(np.array_equal(weight_init, - weight)) # weight changed + scope.find_var(w_param_attrs.name).get_tensor() + ) + self.assertFalse( + np.array_equal(weight_init, weight) + ) # weight changed def test_prune_compiled_program(self): program = framework.Program() @@ -264,25 +317,29 @@ class TestExecutorRunAutoPrune(unittest.TestCase): exe = fluid.Executor(fluid.CPUPlace()) exe.run(startup_program) compiled_prog = fluid.CompiledProgram( - program).with_data_parallel(loss_name=loss1.name, - places=fluid.CPUPlace()) + program + ).with_data_parallel( + loss_name=loss1.name, places=fluid.CPUPlace() + ) weight_init = np.array( - scope.find_var(w_param_attrs.name).get_tensor()) + scope.find_var(w_param_attrs.name).get_tensor() + ) x_np = np.random.random(size=(10, 2)).astype('float32') label_np = np.random.randint(1, size=(10, 1)).astype('int64') - res = exe.run(compiled_prog, - feed={ - 'x': x_np, - 'label': label_np - }, - fetch_list=[loss1.name], - use_prune=True) + res = exe.run( + compiled_prog, + feed={'x': x_np, 'label': label_np}, + fetch_list=[loss1.name], + use_prune=True, + ) self.assertIsNotNone(scope.find_var(loss1.name)) self.assertIsNone(scope.find_var(loss2.name)) weight = np.array( - scope.find_var(w_param_attrs.name).get_tensor()) - self.assertFalse(np.array_equal(weight_init, - weight)) # weight changed + scope.find_var(w_param_attrs.name).get_tensor() + ) + self.assertFalse( + np.array_equal(weight_init, weight) + ) # weight changed def test_prune_feed_without_optimizer(self): program = framework.Program() @@ -294,22 +351,24 @@ class TestExecutorRunAutoPrune(unittest.TestCase): exe = fluid.Executor(fluid.CPUPlace()) exe.run(startup_program) weight_init = np.array( - scope.find_var(w_param_attrs.name).get_tensor()) + scope.find_var(w_param_attrs.name).get_tensor() + ) x_np = np.random.random(size=(10, 2)).astype('float32') label_np = np.random.randint(1, size=(10, 1)).astype('int64') - res = exe.run(program, - feed={ - y.name: x_np, - 'label': label_np - }, - fetch_list=[loss1.name], - use_prune=True) + res = exe.run( + program, + feed={y.name: x_np, 'label': label_np}, + fetch_list=[loss1.name], + use_prune=True, + ) self.assertIsNotNone(scope.find_var(loss1.name)) self.assertIsNone(scope.find_var(loss2.name)) weight = np.array( - scope.find_var(w_param_attrs.name).get_tensor()) - np.testing.assert_array_equal(weight_init, - weight) # weight unchanged + scope.find_var(w_param_attrs.name).get_tensor() + ) + np.testing.assert_array_equal( + weight_init, weight + ) # weight unchanged def test_prune_feed_with_optimizer(self): program = framework.Program() @@ -324,15 +383,14 @@ class TestExecutorRunAutoPrune(unittest.TestCase): exe.run(startup_program) x_np = np.random.random(size=(10, 2)).astype('float32') label_np = np.random.randint(1, size=(10, 1)).astype('int64') - self.assertRaises(Exception, - exe.run, - program, - feed={ - y.name: x_np, - 'label': label_np - }, - fetch_list=[loss1.name], - use_prune=True) + self.assertRaises( + Exception, + exe.run, + program, + feed={y.name: x_np, 'label': label_np}, + fetch_list=[loss1.name], + use_prune=True, + ) self.assertIsNotNone(scope.find_var(loss1.name)) self.assertIsNone(scope.find_var(loss2.name)) @@ -358,16 +416,16 @@ class TestExecutorRunAutoPrune(unittest.TestCase): sgd_optimizer.minimize(loss1) exe.run(startup_program) x_np = np.random.random(size=(10, 2)).astype('float32') - label_np = np.random.randint(1, - size=(10, 1)).astype('int64') + label_np = np.random.randint(1, size=(10, 1)).astype( + 'int64' + ) for i in range(10): - res = exe.run(program, - feed={ - 'x': x_np, - 'label': label_np - }, - fetch_list=[loss1.name], - use_prune=True) + res = exe.run( + program, + feed={'x': x_np, 'label': label_np}, + fetch_list=[loss1.name], + use_prune=True, + ) if i == 0: self.assertEqual(exe.prune_called_times, 1) else: @@ -387,38 +445,54 @@ class TestExecutorRunAutoPrune(unittest.TestCase): scope = fluid.Scope() with fluid.scope_guard(scope): with fluid.program_guard(program, startup_program): - (x1, x2, y1, y2, label, loss1, loss2, w1_param_attrs, - w2_param_attrs) = self.net2() + ( + x1, + x2, + y1, + y2, + label, + loss1, + loss2, + w1_param_attrs, + w2_param_attrs, + ) = self.net2() adam_optimizer1 = fluid.optimizer.AdamOptimizer( - learning_rate=0.5) + learning_rate=0.5 + ) train1 = adam_optimizer1.minimize(loss1) adam_optimizer2 = fluid.optimizer.AdamOptimizer( - learning_rate=0.5) + learning_rate=0.5 + ) train2 = adam_optimizer2.minimize(loss2) exe.run(startup_program) x_np = np.random.random(size=(10, 2)).astype('float32') - label_np = np.random.randint(1, - size=(10, 1)).astype('int64') + label_np = np.random.randint(1, size=(10, 1)).astype( + 'int64' + ) for i in range(10): if i % 2: - res = exe.run(program, - feed={ - 'x1': x_np, - 'x2': x_np, - 'label': label_np - }, - fetch_list=[loss1, loss2, train1], - use_prune=True) + res = exe.run( + program, + feed={ + 'x1': x_np, + 'x2': x_np, + 'label': label_np, + }, + fetch_list=[loss1, loss2, train1], + use_prune=True, + ) else: - res = exe.run(program, - feed={ - 'x1': x_np, - 'x2': x_np, - 'label': label_np - }, - fetch_list=[loss1, loss2, train2], - use_prune=True) + res = exe.run( + program, + feed={ + 'x1': x_np, + 'x2': x_np, + 'label': label_np, + }, + fetch_list=[loss1, loss2, train2], + use_prune=True, + ) if i == 0: self.assertEqual(exe.prune_called_times, 1) elif i == 1: @@ -448,19 +522,21 @@ class TestExecutorRunAutoPrune(unittest.TestCase): sgd_optimizer.minimize(loss1) exe.run(startup_program) x_np = np.random.random(size=(10, 2)).astype('float32') - label_np = np.random.randint(1, - size=(10, 1)).astype('int64') + label_np = np.random.randint(1, size=(10, 1)).astype( + 'int64' + ) compiled_prog = fluid.CompiledProgram( - program).with_data_parallel(loss_name=loss1.name, - places=fluid.CPUPlace()) + program + ).with_data_parallel( + loss_name=loss1.name, places=fluid.CPUPlace() + ) for i in range(10): - res = exe.run(compiled_prog, - feed={ - 'x': x_np, - 'label': label_np - }, - fetch_list=[loss1.name], - use_prune=True) + res = exe.run( + compiled_prog, + feed={'x': x_np, 'label': label_np}, + fetch_list=[loss1.name], + use_prune=True, + ) if i == 0: self.assertEqual(exe.prune_called_times, 1) else: @@ -486,43 +562,43 @@ class TestExecutorRunAutoPrune(unittest.TestCase): exe.run(startup_program) x_np = np.random.random(size=(10, 2)).astype('float32') label_np = np.random.randint(1, size=(10, 1)).astype('int64') - res = exe.run(program, - feed={ - 'x': x_np, - 'label': label_np - }, - fetch_list=[loss1.name], - use_prune=False) + res = exe.run( + program, + feed={'x': x_np, 'label': label_np}, + fetch_list=[loss1.name], + use_prune=False, + ) weight_without_prune = np.array( - scope.find_var(w_param_attrs.name).get_tensor()) + scope.find_var(w_param_attrs.name).get_tensor() + ) scope = fluid.Scope() # use_prune with fluid.scope_guard(scope): exe.run(startup_program) - res = exe.run(program, - feed={ - 'x': x_np, - 'label': label_np - }, - fetch_list=[loss1.name, train1], - use_prune=True) + res = exe.run( + program, + feed={'x': x_np, 'label': label_np}, + fetch_list=[loss1.name, train1], + use_prune=True, + ) weight_with_prune = np.array( - scope.find_var(w_param_attrs.name).get_tensor()) + scope.find_var(w_param_attrs.name).get_tensor() + ) # expected scope = fluid.Scope() with fluid.scope_guard(scope): exe.run(startup_program) - exe.run(cloned_program, - feed={ - 'x': x_np, - 'label': label_np - }, - fetch_list=[loss1.name], - use_prune=False) + exe.run( + cloned_program, + feed={'x': x_np, 'label': label_np}, + fetch_list=[loss1.name], + use_prune=False, + ) weight_expected = np.array( - scope.find_var(w_param_attrs.name).get_tensor()) + scope.find_var(w_param_attrs.name).get_tensor() + ) np.testing.assert_array_equal(weight_with_prune, weight_expected) self.assertFalse(np.array_equal(weight_without_prune, weight_expected)) @@ -540,60 +616,72 @@ class TestExecutorRunAutoPrune(unittest.TestCase): # do not use_prune with fluid.scope_guard(scope): with fluid.program_guard(program, startup_program): - (x1, x2, y1, y2, label, loss1, loss2, w1_param_attrs, - w2_param_attrs) = self.net2() + ( + x1, + x2, + y1, + y2, + label, + loss1, + loss2, + w1_param_attrs, + w2_param_attrs, + ) = self.net2() adam_optimizer1 = fluid.optimizer.AdamOptimizer( - learning_rate=0.5) + learning_rate=0.5 + ) train1 = adam_optimizer1.minimize(loss1) cloned_program = program.clone() adam_optimizer2 = fluid.optimizer.AdamOptimizer( - learning_rate=0.5) + learning_rate=0.5 + ) train2 = adam_optimizer2.minimize(loss2) exe.run(startup_program) x_np = np.random.random(size=(10, 2)).astype('float32') label_np = np.random.randint(1, size=(10, 1)).astype('int64') compiled_prog1 = fluid.CompiledProgram( - program).with_data_parallel(loss_name=loss1.name, - places=[fluid.CPUPlace()] * 2) + program + ).with_data_parallel( + loss_name=loss1.name, places=[fluid.CPUPlace()] * 2 + ) compiled_prog2 = fluid.CompiledProgram( - program).with_data_parallel(loss_name=loss2.name, - places=[fluid.CPUPlace()] * 2) + program + ).with_data_parallel( + loss_name=loss2.name, places=[fluid.CPUPlace()] * 2 + ) for i in range(10): if i % 2 == 1: - res = exe.run(compiled_prog1, - feed=[{ - 'x1': x_np[0:5, :], - 'label': label_np[0:5, :] - }, { - 'x1': x_np[5:, :], - 'label': label_np[5:, :] - }], - fetch_list=[loss1.name, train1], - use_prune=True) + res = exe.run( + compiled_prog1, + feed=[ + {'x1': x_np[0:5, :], 'label': label_np[0:5, :]}, + {'x1': x_np[5:, :], 'label': label_np[5:, :]}, + ], + fetch_list=[loss1.name, train1], + use_prune=True, + ) else: - res = exe.run(compiled_prog2, - feed={ - 'x2': x_np, - 'label': label_np - }, - fetch_list=[loss2.name, train2], - use_prune=True) + res = exe.run( + compiled_prog2, + feed={'x2': x_np, 'label': label_np}, + fetch_list=[loss2.name, train2], + use_prune=True, + ) weight1 = np.array( - scope.find_var(w1_param_attrs.name).get_tensor()) + scope.find_var(w1_param_attrs.name).get_tensor() + ) # expected scope = fluid.Scope() with fluid.scope_guard(scope): exe.run(startup_program) for i in range(10): if i % 2 == 1: - exe.run(cloned_program, - feed={ - 'x1': x_np, - 'x2': x_np, - 'label': label_np - }, - fetch_list=[loss1.name], - use_prune=False) + exe.run( + cloned_program, + feed={'x1': x_np, 'x2': x_np, 'label': label_np}, + fetch_list=[loss1.name], + use_prune=False, + ) weight2 = np.array(scope.find_var(w1_param_attrs.name).get_tensor()) np.testing.assert_allclose(weight1, weight2, rtol=1e-05) @@ -619,44 +707,44 @@ class TestExecutorRunAutoPrune(unittest.TestCase): x_np = np.random.random(size=(10, 2)).astype('float32') label_np = np.random.randint(1, size=(10, 1)).astype('int64') - res = exe.run(program, - feed={ - 'x': x_np, - 'label': label_np - }, - fetch_list=[loss1.name], - use_prune=False) + res = exe.run( + program, + feed={'x': x_np, 'label': label_np}, + fetch_list=[loss1.name], + use_prune=False, + ) weight_without_prune = np.array( - scope.find_var(w_param_attrs.name).get_tensor()) + scope.find_var(w_param_attrs.name).get_tensor() + ) scope = fluid.Scope() # use_prune with fluid.scope_guard(scope): exe.run(startup_program) - res = exe.run(program, - feed={ - 'x': x_np, - 'label': label_np - }, - fetch_list=[loss1.name, train1], - use_prune=True) + res = exe.run( + program, + feed={'x': x_np, 'label': label_np}, + fetch_list=[loss1.name, train1], + use_prune=True, + ) weight_with_prune = np.array( - scope.find_var(w_param_attrs.name).get_tensor()) + scope.find_var(w_param_attrs.name).get_tensor() + ) # expected scope = fluid.Scope() with fluid.scope_guard(scope): exe.run(startup_program) - exe.run(cloned_program, - feed={ - 'x': x_np, - 'label': label_np - }, - fetch_list=[loss1.name], - use_prune=False) + exe.run( + cloned_program, + feed={'x': x_np, 'label': label_np}, + fetch_list=[loss1.name], + use_prune=False, + ) weight_expected = np.array( - scope.find_var(w_param_attrs.name).get_tensor()) + scope.find_var(w_param_attrs.name).get_tensor() + ) np.testing.assert_array_equal(weight_with_prune, weight_expected) self.assertFalse(np.array_equal(weight_without_prune, weight_expected)) @@ -671,8 +759,17 @@ class TestExecutorRunAutoPrune(unittest.TestCase): scope = fluid.Scope() with fluid.scope_guard(scope): with fluid.program_guard(program, startup_program): - (x1, x2, y1, y2, label, loss1, loss2, w1_param_attrs, - w2_param_attrs) = self.net2() + ( + x1, + x2, + y1, + y2, + label, + loss1, + loss2, + w1_param_attrs, + w2_param_attrs, + ) = self.net2() loss1.persistable = True loss2.persistable = True sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.5) @@ -682,31 +779,36 @@ class TestExecutorRunAutoPrune(unittest.TestCase): exe = fluid.Executor(fluid.CPUPlace()) exe.run(startup_program) weight1_init = np.array( - scope.find_var(w1_param_attrs.name).get_tensor()) + scope.find_var(w1_param_attrs.name).get_tensor() + ) weight2_init = np.array( - scope.find_var(w2_param_attrs.name).get_tensor()) + scope.find_var(w2_param_attrs.name).get_tensor() + ) x_np = np.random.random(size=(10, 2)).astype('float32') label_np = np.random.randint(1, size=(10, 1)).astype('int64') - res = exe.run(program, - feed={ - 'x1': x_np, - 'label': label_np - }, - fetch_list=[loss1.name, train1], - use_prune=True) + res = exe.run( + program, + feed={'x1': x_np, 'label': label_np}, + fetch_list=[loss1.name, train1], + use_prune=True, + ) self.assertIsNotNone(scope.find_var(w1_param_attrs.name)) self.assertIsNotNone(scope.find_var(w2_param_attrs.name)) self.assertIsNotNone(scope.find_var(loss1.name)) self.assertIsNone(scope.find_var(loss2.name)) weight1 = np.array( - scope.find_var(w1_param_attrs.name).get_tensor()) + scope.find_var(w1_param_attrs.name).get_tensor() + ) weight2 = np.array( - scope.find_var(w2_param_attrs.name).get_tensor()) - self.assertFalse(np.array_equal(weight1_init, - weight1)) # weight changed - np.testing.assert_array_equal(weight2_init, - weight2) # weight2 unchanged + scope.find_var(w2_param_attrs.name).get_tensor() + ) + self.assertFalse( + np.array_equal(weight1_init, weight1) + ) # weight changed + np.testing.assert_array_equal( + weight2_init, weight2 + ) # weight2 unchanged def test_prune_override_use_prune(self): ''' @@ -727,43 +829,43 @@ class TestExecutorRunAutoPrune(unittest.TestCase): exe.run(startup_program) x_np = np.random.random(size=(10, 2)).astype('float32') label_np = np.random.randint(1, size=(10, 1)).astype('int64') - res = exe.run(program, - feed={ - 'x': x_np, - 'label': label_np - }, - fetch_list=[loss1.name], - use_prune=False) + res = exe.run( + program, + feed={'x': x_np, 'label': label_np}, + fetch_list=[loss1.name], + use_prune=False, + ) weight_without_prune = np.array( - scope.find_var(w_param_attrs.name).get_tensor()) + scope.find_var(w_param_attrs.name).get_tensor() + ) scope = fluid.Scope() # use_prune with fluid.scope_guard(scope): exe.run(startup_program) - res = exe.run(program, - feed={ - 'x': x_np, - 'label': label_np - }, - fetch_list=[loss1.name, train1]) + res = exe.run( + program, + feed={'x': x_np, 'label': label_np}, + fetch_list=[loss1.name, train1], + ) weight_with_prune = np.array( - scope.find_var(w_param_attrs.name).get_tensor()) + scope.find_var(w_param_attrs.name).get_tensor() + ) # expected scope = fluid.Scope() with fluid.scope_guard(scope): exe.run(startup_program) - exe.run(cloned_program, - feed={ - 'x': x_np, - 'label': label_np - }, - fetch_list=[loss1.name], - use_prune=False) + exe.run( + cloned_program, + feed={'x': x_np, 'label': label_np}, + fetch_list=[loss1.name], + use_prune=False, + ) weight_expected = np.array( - scope.find_var(w_param_attrs.name).get_tensor()) + scope.find_var(w_param_attrs.name).get_tensor() + ) np.testing.assert_array_equal(weight_with_prune, weight_expected) self.assertFalse(np.array_equal(weight_without_prune, weight_expected)) @@ -779,23 +881,25 @@ class TestExecutorRunAutoPrune(unittest.TestCase): exe = fluid.Executor(fluid.CPUPlace()) exe.run(startup_program) weight_init = np.array( - scope.find_var(w_param_attrs.name).get_tensor()) + scope.find_var(w_param_attrs.name).get_tensor() + ) x_np = np.random.random(size=(10, 2)).astype('float32') label_np = np.random.randint(1, size=(10, 1)).astype('int64') - res = exe.run(program, - feed={ - y.name: x_np, - 'label': label_np - }, - fetch_list=[y.name, loss1.name], - use_prune=True) + res = exe.run( + program, + feed={y.name: x_np, 'label': label_np}, + fetch_list=[y.name, loss1.name], + use_prune=True, + ) self.assertIsNotNone(scope.find_var(loss1.name)) self.assertIsNone(scope.find_var(loss2.name)) self.assertIsNone(scope.find_var(x.name)) weight = np.array( - scope.find_var(w_param_attrs.name).get_tensor()) - np.testing.assert_array_equal(weight_init, - weight) # weight unchanged + scope.find_var(w_param_attrs.name).get_tensor() + ) + np.testing.assert_array_equal( + weight_init, weight + ) # weight unchanged def test_prune_feed_var_in_fetchlist_2(self): # the variable to be fed is leaf @@ -808,22 +912,24 @@ class TestExecutorRunAutoPrune(unittest.TestCase): exe = fluid.Executor(fluid.CPUPlace()) exe.run(startup_program) weight_init = np.array( - scope.find_var(w_param_attrs.name).get_tensor()) + scope.find_var(w_param_attrs.name).get_tensor() + ) x_np = np.random.random(size=(10, 2)).astype('float32') label_np = np.random.randint(1, size=(10, 1)).astype('int64') - res = exe.run(program, - feed={ - x.name: x_np, - 'label': label_np - }, - fetch_list=[x.name, loss1.name], - use_prune=True) + res = exe.run( + program, + feed={x.name: x_np, 'label': label_np}, + fetch_list=[x.name, loss1.name], + use_prune=True, + ) self.assertIsNotNone(scope.find_var(loss1.name)) self.assertIsNone(scope.find_var(loss2.name)) weight = np.array( - scope.find_var(w_param_attrs.name).get_tensor()) - np.testing.assert_array_equal(weight_init, - weight) # weight unchanged + scope.find_var(w_param_attrs.name).get_tensor() + ) + np.testing.assert_array_equal( + weight_init, weight + ) # weight unchanged if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_prune_gate_by_capacity_op.py b/python/paddle/fluid/tests/unittests/test_prune_gate_by_capacity_op.py index 4b73e05f502c66ace4b12d19824b409ef13a5060..0caab5573470ef75f595d86afe1641a98617bb1c 100644 --- a/python/paddle/fluid/tests/unittests/test_prune_gate_by_capacity_op.py +++ b/python/paddle/fluid/tests/unittests/test_prune_gate_by_capacity_op.py @@ -21,7 +21,7 @@ from paddle.fluid.framework import _test_eager_guard def count(x, upper_num): - res = np.zeros((upper_num, )).astype(int) + res = np.zeros((upper_num,)).astype(int) for i in x.reshape(-1): if i >= 0 and i < len(res): res[i] += 1 @@ -64,20 +64,22 @@ def assert_allclose(output, expected, n_expert): assert np.allclose(c1, c2) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestPruneGateByCapacityAPI1(unittest.TestCase): - def init_test_case(self): - self.gate_idx = np.random.randint(0, self.n_expert, - size=(200, )).astype(self.dtype) + self.gate_idx = np.random.randint(0, self.n_expert, size=(200,)).astype( + self.dtype + ) expert_count = count(self.gate_idx, self.n_expert * self.n_worker) - capacity = np.random.randint(10, 200, size=(self.n_expert, )) - self.expert_count = limit_by_capacity(expert_count, capacity, - self.n_worker).astype(self.dtype) - self.out = prune_gate_by_capacity(self.gate_idx, self.expert_count, - self.n_expert, - self.n_worker).astype(self.dtype) + capacity = np.random.randint(10, 200, size=(self.n_expert,)) + self.expert_count = limit_by_capacity( + expert_count, capacity, self.n_worker + ).astype(self.dtype) + self.out = prune_gate_by_capacity( + self.gate_idx, self.expert_count, self.n_expert, self.n_worker + ).astype(self.dtype) self.place = paddle.CUDAPlace(0) def setUp(self): @@ -89,29 +91,35 @@ class TestPruneGateByCapacityAPI1(unittest.TestCase): def test_static_api(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - gate_idx_tensor = paddle.static.data('GateIdx', - shape=self.gate_idx.shape, - dtype="int64") + gate_idx_tensor = paddle.static.data( + 'GateIdx', shape=self.gate_idx.shape, dtype="int64" + ) expert_count_tensor = paddle.static.data( - 'ExpertCount', shape=self.expert_count.shape, dtype="int64") - out = utils._prune_gate_by_capacity(gate_idx_tensor, - expert_count_tensor, - self.n_expert, self.n_worker) + 'ExpertCount', shape=self.expert_count.shape, dtype="int64" + ) + out = utils._prune_gate_by_capacity( + gate_idx_tensor, + expert_count_tensor, + self.n_expert, + self.n_worker, + ) exe = paddle.static.Executor(self.place) - res = exe.run(feed={ - 'GateIdx': self.gate_idx, - 'ExpertCount': self.expert_count, - }, - fetch_list=out) + res = exe.run( + feed={ + 'GateIdx': self.gate_idx, + 'ExpertCount': self.expert_count, + }, + fetch_list=out, + ) assert_allclose(res[0], self.out, self.n_expert) def func_dygraph_api(self): paddle.disable_static(self.place) gate_idx_tensor = paddle.to_tensor(self.gate_idx) expert_count_tensor = paddle.to_tensor(self.expert_count) - out = utils._prune_gate_by_capacity(gate_idx_tensor, - expert_count_tensor, self.n_expert, - self.n_worker) + out = utils._prune_gate_by_capacity( + gate_idx_tensor, expert_count_tensor, self.n_expert, self.n_worker + ) assert_allclose(out.numpy(), self.out, self.n_expert) def test_dygraph_api(self): @@ -120,10 +128,10 @@ class TestPruneGateByCapacityAPI1(unittest.TestCase): self.func_dygraph_api() -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestPruneGateByCapacityAPI2(TestPruneGateByCapacityAPI1): - def setUp(self): self.n_expert = 12 self.n_worker = 1 diff --git a/python/paddle/fluid/tests/unittests/test_ps_dispatcher.py b/python/paddle/fluid/tests/unittests/test_ps_dispatcher.py index f290d164333de231fe74668bfdfbf4b717c38c8b..88d6c6f45ac47f0476a58b37aa8a2c2f5c098427 100644 --- a/python/paddle/fluid/tests/unittests/test_ps_dispatcher.py +++ b/python/paddle/fluid/tests/unittests/test_ps_dispatcher.py @@ -13,15 +13,20 @@ # limitations under the License. import unittest -from paddle.fluid.incubate.fleet.parameter_server.ir.ps_dispatcher import RoundRobin, HashName, PSDispatcher +from paddle.fluid.incubate.fleet.parameter_server.ir.ps_dispatcher import ( + RoundRobin, + HashName, + PSDispatcher, +) class TestPsDispatcher(unittest.TestCase): - def setUp(self): self.points = [ - "127.0.0.1:1001", "127.0.0.1:1002", "127.0.0.1:1003", - "127.0.0.1:1004" + "127.0.0.1:1001", + "127.0.0.1:1002", + "127.0.0.1:1003", + "127.0.0.1:1004", ] def test_base(self): @@ -33,9 +38,7 @@ class TestPsDispatcher(unittest.TestCase): base.dispatch([]) def test_hash(self): - class Var: - def __init__(self, index): self._name = "var_{}".format(index) @@ -53,9 +56,7 @@ class TestPsDispatcher(unittest.TestCase): self.assertEqual(len(eplist), 4) def test_round_rodin(self): - class Var: - def __init__(self, index): self._name = "var_{}".format(index) diff --git a/python/paddle/fluid/tests/unittests/test_psroi_pool_op.py b/python/paddle/fluid/tests/unittests/test_psroi_pool_op.py index d386c7145ddb4acc6341d3c18aadb22f65b14055..0bb1adb4ad65fd222fd99004d083dfbff880678b 100644 --- a/python/paddle/fluid/tests/unittests/test_psroi_pool_op.py +++ b/python/paddle/fluid/tests/unittests/test_psroi_pool_op.py @@ -19,8 +19,15 @@ import unittest from op_test import OpTest -def calc_psroi_pool(x, rois, rois_num_per_img, output_channels, spatial_scale, - pooled_height, pooled_width): +def calc_psroi_pool( + x, + rois, + rois_num_per_img, + output_channels, + spatial_scale, + pooled_height, + pooled_width, +): """ Psroi_pool implemented by Numpy. x: 4-D as (N, C, H, W), @@ -43,8 +50,8 @@ def calc_psroi_pool(x, rois, rois_num_per_img, output_channels, spatial_scale, batch_id += 1 roi_start_w = round(roi[0]) * spatial_scale roi_start_h = round(roi[1]) * spatial_scale - roi_end_w = (round(roi[2]) + 1.) * spatial_scale - roi_end_h = (round(roi[3]) + 1.) * spatial_scale + roi_end_w = (round(roi[2]) + 1.0) * spatial_scale + roi_end_h = (round(roi[3]) + 1.0) * spatial_scale roi_height = max(roi_end_h - roi_start_h, 0.1) roi_width = max(roi_end_w - roi_start_w, 0.1) @@ -58,13 +65,17 @@ def calc_psroi_pool(x, rois, rois_num_per_img, output_channels, spatial_scale, for ph in range(pooled_height): for pw in range(pooled_width): hstart = int( - math.floor(float(ph) * bin_size_h + roi_start_h)) + math.floor(float(ph) * bin_size_h + roi_start_h) + ) wstart = int( - math.floor(float(pw) * bin_size_w + roi_start_w)) + math.floor(float(pw) * bin_size_w + roi_start_w) + ) hend = int( - math.ceil(float(ph + 1) * bin_size_h + roi_start_h)) + math.ceil(float(ph + 1) * bin_size_h + roi_start_h) + ) wend = int( - math.ceil(float(pw + 1) * bin_size_w + roi_start_w)) + math.ceil(float(pw + 1) * bin_size_w + roi_start_w) + ) hstart = min(max(hstart, 0), x.shape[2]) hend = min(max(hend, 0), x.shape[2]) wstart = min(max(wstart, 0), x.shape[3]) @@ -72,37 +83,41 @@ def calc_psroi_pool(x, rois, rois_num_per_img, output_channels, spatial_scale, c_in = (c * pooled_height + ph) * pooled_width + pw is_empty = (hend <= hstart) or (wend <= wstart) - out_sum = 0. + out_sum = 0.0 for ih in range(hstart, hend): for iw in range(wstart, wend): out_sum += x_i[c_in, ih, iw] bin_area = (hend - hstart) * (wend - wstart) - out_data[i, c, ph, - pw] = 0. if is_empty else (out_sum / - float(bin_area)) + out_data[i, c, ph, pw] = ( + 0.0 if is_empty else (out_sum / float(bin_area)) + ) return out_data class TestPSROIPoolOp(OpTest): - def set_data(self): paddle.enable_static() self.init_test_case() self.make_rois() - self.outs = calc_psroi_pool(self.x, self.boxes, self.boxes_num, - self.output_channels, self.spatial_scale, - self.pooled_height, - self.pooled_width).astype('float64') + self.outs = calc_psroi_pool( + self.x, + self.boxes, + self.boxes_num, + self.output_channels, + self.spatial_scale, + self.pooled_height, + self.pooled_width, + ).astype('float64') self.inputs = { 'X': self.x, 'ROIs': (self.rois_with_batch_id[:, 1:5], self.rois_lod), - 'RoisNum': self.boxes_num + 'RoisNum': self.boxes_num, } self.attrs = { 'output_channels': self.output_channels, 'spatial_scale': self.spatial_scale, 'pooled_height': self.pooled_height, - 'pooled_width': self.pooled_width + 'pooled_width': self.pooled_width, } self.outputs = {'Out': self.outs} @@ -128,26 +143,32 @@ class TestPSROIPoolOp(OpTest): self.rois_lod[0].append(bno + 1) for i in range(bno + 1): x1 = np.random.random_integers( - 0, self.width // self.spatial_scale - self.pooled_width) + 0, self.width // self.spatial_scale - self.pooled_width + ) y1 = np.random.random_integers( - 0, self.height // self.spatial_scale - self.pooled_height) + 0, self.height // self.spatial_scale - self.pooled_height + ) - x2 = np.random.random_integers(x1 + self.pooled_width, - self.width // self.spatial_scale) + x2 = np.random.random_integers( + x1 + self.pooled_width, self.width // self.spatial_scale + ) y2 = np.random.random_integers( - y1 + self.pooled_height, self.height // self.spatial_scale) + y1 + self.pooled_height, self.height // self.spatial_scale + ) roi = [bno, x1, y1, x2, y2] rois.append(roi) self.rois_num = len(rois) self.rois_with_batch_id = np.array(rois).astype('float64') self.boxes = self.rois_with_batch_id[:, 1:] - self.boxes_num = np.array([bno + 1 for bno in range(self.batch_size) - ]).astype('int32') + self.boxes_num = np.array( + [bno + 1 for bno in range(self.batch_size)] + ).astype('int32') def setUp(self): self.op_type = 'psroi_pool' self.python_api = lambda x, boxes, boxes_num, pooled_height, pooled_width, output_channels, spatial_scale: paddle.vision.ops.psroi_pool( - x, boxes, boxes_num, (pooled_height, pooled_width), spatial_scale) + x, boxes, boxes_num, (pooled_height, pooled_width), spatial_scale + ) self.set_data() def test_check_output(self): @@ -158,33 +179,38 @@ class TestPSROIPoolOp(OpTest): class TestPSROIPoolDynamicFunctionAPI(unittest.TestCase): - def setUp(self): self.x = np.random.random([2, 490, 28, 28]).astype(np.float32) - self.boxes = np.array([[1, 5, 8, 10], [4, 2, 6, 7], - [12, 12, 19, 21]]).astype(np.float32) + self.boxes = np.array( + [[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]] + ).astype(np.float32) self.boxes_num = np.array([1, 2]).astype(np.int32) def test_output_size(self): - def test_output_size_is_int(): output_size = 7 - out = paddle.vision.ops.psroi_pool(paddle.to_tensor(self.x), - paddle.to_tensor(self.boxes), - paddle.to_tensor(self.boxes_num), - output_size).numpy() - expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 10, - 1.0, 7, 7) + out = paddle.vision.ops.psroi_pool( + paddle.to_tensor(self.x), + paddle.to_tensor(self.boxes), + paddle.to_tensor(self.boxes_num), + output_size, + ).numpy() + expect_out = calc_psroi_pool( + self.x, self.boxes, self.boxes_num, 10, 1.0, 7, 7 + ) np.testing.assert_allclose(out, expect_out, rtol=1e-05) def test_output_size_is_tuple(): output_size = (7, 7) - out = paddle.vision.ops.psroi_pool(paddle.to_tensor(self.x), - paddle.to_tensor(self.boxes), - paddle.to_tensor(self.boxes_num), - output_size).numpy() - expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 10, - 1.0, 7, 7) + out = paddle.vision.ops.psroi_pool( + paddle.to_tensor(self.x), + paddle.to_tensor(self.boxes), + paddle.to_tensor(self.boxes_num), + output_size, + ).numpy() + expect_out = calc_psroi_pool( + self.x, self.boxes, self.boxes_num, 10, 1.0, 7, 7 + ) np.testing.assert_allclose(out, expect_out, rtol=1e-05) def test_dytype_is_float64(): @@ -192,9 +218,12 @@ class TestPSROIPoolDynamicFunctionAPI(unittest.TestCase): out = paddle.vision.ops.psroi_pool( paddle.to_tensor(self.x, 'float64'), paddle.to_tensor(self.boxes, 'float64'), - paddle.to_tensor(self.boxes_num, 'int32'), output_size).numpy() - expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 10, - 1.0, 7, 7) + paddle.to_tensor(self.boxes_num, 'int32'), + output_size, + ).numpy() + expect_out = calc_psroi_pool( + self.x, self.boxes, self.boxes_num, 10, 1.0, 7, 7 + ) np.testing.assert_allclose(out, expect_out, rtol=1e-05) places = ['cpu'] @@ -208,41 +237,48 @@ class TestPSROIPoolDynamicFunctionAPI(unittest.TestCase): class TestPSROIPoolDynamicClassAPI(unittest.TestCase): - def setUp(self): self.x = np.random.random([2, 128, 32, 32]).astype(np.float32) - self.boxes = np.array([[3, 5, 6, 13], [7, 4, 22, 18], [4, 5, 7, 10], - [5, 3, 25, 21]]).astype(np.float32) + self.boxes = np.array( + [[3, 5, 6, 13], [7, 4, 22, 18], [4, 5, 7, 10], [5, 3, 25, 21]] + ).astype(np.float32) self.boxes_num = np.array([2, 2]).astype(np.int32) def test_output_size(self): - def test_output_size_is_int(): psroi_module = paddle.vision.ops.PSRoIPool(8, 1.1) - out = psroi_module(paddle.to_tensor(self.x), - paddle.to_tensor(self.boxes), - paddle.to_tensor(self.boxes_num)).numpy() - expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 2, - 1.1, 8, 8) + out = psroi_module( + paddle.to_tensor(self.x), + paddle.to_tensor(self.boxes), + paddle.to_tensor(self.boxes_num), + ).numpy() + expect_out = calc_psroi_pool( + self.x, self.boxes, self.boxes_num, 2, 1.1, 8, 8 + ) np.testing.assert_allclose(out, expect_out, rtol=1e-05) def test_output_size_is_tuple(): psroi_pool_module = paddle.vision.ops.PSRoIPool(8, 1.1) - out = psroi_pool_module(paddle.to_tensor(self.x), - paddle.to_tensor(self.boxes), - paddle.to_tensor(self.boxes_num)).numpy() - expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 2, - 1.1, 8, 8) + out = psroi_pool_module( + paddle.to_tensor(self.x), + paddle.to_tensor(self.boxes), + paddle.to_tensor(self.boxes_num), + ).numpy() + expect_out = calc_psroi_pool( + self.x, self.boxes, self.boxes_num, 2, 1.1, 8, 8 + ) np.testing.assert_allclose(out, expect_out, rtol=1e-05) def test_dytype_is_float64(): psroi_pool_module = paddle.vision.ops.PSRoIPool(8, 1.1) - out = psroi_pool_module(paddle.to_tensor(self.x, 'float64'), - paddle.to_tensor(self.boxes, 'float64'), - paddle.to_tensor(self.boxes_num, - 'int32')).numpy() - expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 2, - 1.1, 8, 8) + out = psroi_pool_module( + paddle.to_tensor(self.x, 'float64'), + paddle.to_tensor(self.boxes, 'float64'), + paddle.to_tensor(self.boxes_num, 'int32'), + ).numpy() + expect_out = calc_psroi_pool( + self.x, self.boxes, self.boxes_num, 2, 1.1, 8, 8 + ) np.testing.assert_allclose(out, expect_out, rtol=1e-05) paddle.disable_static() @@ -257,87 +293,89 @@ class TestPSROIPoolDynamicClassAPI(unittest.TestCase): class TestPSROIPoolBoxesNumError(unittest.TestCase): - def setUp(self): paddle.disable_static() self.x = paddle.uniform([2, 490, 28, 28], dtype='float32') self.boxes = paddle.to_tensor( - [[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]], 'float32') + [[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]], 'float32' + ) def test_errors(self): - def test_boxes_num_nums_error(): boxes_num = paddle.to_tensor([1, 5], 'int32') - out = paddle.vision.ops.psroi_pool(self.x, - self.boxes, - boxes_num, - output_size=7) + out = paddle.vision.ops.psroi_pool( + self.x, self.boxes, boxes_num, output_size=7 + ) self.assertRaises(ValueError, test_boxes_num_nums_error) def test_boxes_num_length_error(): boxes_num = paddle.to_tensor([1, 1, 1], 'int32') - out = paddle.vision.ops.psroi_pool(self.x, - self.boxes, - boxes_num, - output_size=7) + out = paddle.vision.ops.psroi_pool( + self.x, self.boxes, boxes_num, output_size=7 + ) self.assertRaises(ValueError, test_boxes_num_length_error) class TestPSROIPoolChannelError(unittest.TestCase): - def setUp(self): paddle.disable_static() self.x = paddle.uniform([2, 490, 28, 28], dtype='float32') self.boxes = paddle.to_tensor( - [[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]], 'float32') + [[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]], 'float32' + ) self.output_size = 4 def test_errors(self): - def test_channel_error(): boxes_num = paddle.to_tensor([2, 1], 'int32') - out = paddle.vision.ops.psroi_pool(self.x, self.boxes, boxes_num, - self.output_size) + out = paddle.vision.ops.psroi_pool( + self.x, self.boxes, boxes_num, self.output_size + ) self.assertRaises(ValueError, test_channel_error) class TestPSROIPoolStaticAPI(unittest.TestCase): - def setUp(self): paddle.enable_static() - self.x_placeholder = paddle.static.data(name='x', - shape=[2, 490, 28, 28]) + self.x_placeholder = paddle.static.data( + name='x', shape=[2, 490, 28, 28] + ) self.x = np.random.random([2, 490, 28, 28]).astype(np.float32) - self.boxes_placeholder = paddle.static.data(name='boxes', - shape=[3, 4], - lod_level=1) - self.boxes = np.array([[1, 5, 8, 10], [4, 2, 6, 7], - [12, 12, 19, 21]]).astype(np.float32) + self.boxes_placeholder = paddle.static.data( + name='boxes', shape=[3, 4], lod_level=1 + ) + self.boxes = np.array( + [[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]] + ).astype(np.float32) self.boxes_num = np.array([1, 2]).astype(np.int32) def test_function_in_static(self): output_size = 7 - out = paddle.vision.ops.psroi_pool(self.x_placeholder, - self.boxes_placeholder, - self.boxes_num, output_size) - expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 10, - 1.0, 7, 7) + out = paddle.vision.ops.psroi_pool( + self.x_placeholder, + self.boxes_placeholder, + self.boxes_num, + output_size, + ) + expect_out = calc_psroi_pool( + self.x, self.boxes, self.boxes_num, 10, 1.0, 7, 7 + ) places = [paddle.CPUPlace()] if paddle.fluid.core.is_compiled_with_cuda(): places.append(paddle.CUDAPlace(0)) for place in places: exe = paddle.static.Executor(place) boxes_lod_data = paddle.fluid.create_lod_tensor( - self.boxes, [[1, 2]], place) - out_res, = exe.run(paddle.static.default_main_program(), - feed={ - 'x': self.x, - 'boxes': boxes_lod_data - }, - fetch_list=[out.name]) + self.boxes, [[1, 2]], place + ) + (out_res,) = exe.run( + paddle.static.default_main_program(), + feed={'x': self.x, 'boxes': boxes_lod_data}, + fetch_list=[out.name], + ) np.testing.assert_allclose(out_res, expect_out, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_pull_gpups_sparse_op.py b/python/paddle/fluid/tests/unittests/test_pull_gpups_sparse_op.py index 8bddcb6d50f5f1e49d97043b918ae72a0096e4b5..3b2eb69a549bf8e01274140cfc15184373d72108 100644 --- a/python/paddle/fluid/tests/unittests/test_pull_gpups_sparse_op.py +++ b/python/paddle/fluid/tests/unittests/test_pull_gpups_sparse_op.py @@ -30,15 +30,13 @@ class TestPullGpupsSparse(unittest.TestCase): slots = [] with fluid.program_guard(train_program, startup_program): - l = fluid.layers.data(name='input', - shape=[1], - dtype="int64", - lod_level=1) + l = fluid.layers.data( + name='input', shape=[1], dtype="int64", lod_level=1 + ) slots.append(l) - output = _pull_gpups_sparse(slots, - size=[11], - is_distributed=True, - is_sparse=True) + output = _pull_gpups_sparse( + slots, size=[11], is_distributed=True, is_sparse=True + ) cost = paddle.mean(output) sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) sgd_optimizer.minimize(cost, train_program) @@ -49,9 +47,9 @@ class TestPullGpupsSparse(unittest.TestCase): exe = fluid.Executor(place) exe.run(startup_program) img = np.array([1]).astype(np.int64) - res = exe.run(train_program, - feed={'input': img}, - fetch_list=[output]) + res = exe.run( + train_program, feed={'input': img}, fetch_list=[output] + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_put_along_axis_op.py b/python/paddle/fluid/tests/unittests/test_put_along_axis_op.py index 43771731f9b57ee42960f506d85384916d9d43b0..b5a1cb25e4d58e9ebf2e58cf9745eaaed7f61bda 100644 --- a/python/paddle/fluid/tests/unittests/test_put_along_axis_op.py +++ b/python/paddle/fluid/tests/unittests/test_put_along_axis_op.py @@ -23,7 +23,6 @@ paddle.enable_static() class TestPutAlongAxisOp(OpTest): - def setUp(self): self.init_data() self.reduce_op = "assign" @@ -42,7 +41,7 @@ class TestPutAlongAxisOp(OpTest): self.inputs = { 'Input': self.xnp, 'Index': self.index_broadcast, - 'Value': self.value_broadcast + 'Value': self.value_broadcast, } self.attrs = {'Axis': self.axis, 'Reduce': self.reduce_op} self.outputs = {'Result': self.target} @@ -65,7 +64,6 @@ class TestPutAlongAxisOp(OpTest): class TestPutAlongAxisAPI(unittest.TestCase): - def setUp(self): np.random.seed(0) self.shape = [1, 3] @@ -90,15 +88,18 @@ class TestPutAlongAxisAPI(unittest.TestCase): value = paddle.fluid.data('Value', self.value_shape) out = paddle.put_along_axis(x, index, value, self.axis) exe = paddle.static.Executor(self.place[0]) - res = exe.run(feed={ - 'X': self.x_feed, - 'Value': self.value_np, - 'Index': self.index_np - }, - fetch_list=[out]) - - np.put_along_axis(self.x_np, self.index_np, self.value_np, - self.axis) + res = exe.run( + feed={ + 'X': self.x_feed, + 'Value': self.value_np, + 'Index': self.index_np, + }, + fetch_list=[out], + ) + + np.put_along_axis( + self.x_np, self.index_np, self.value_np, self.axis + ) # numpy put_along_axis is an inplace opearion. out_ref = self.x_np @@ -109,25 +110,29 @@ class TestPutAlongAxisAPI(unittest.TestCase): run(place) def test_api_dygraph(self): - def run(place): paddle.disable_static(place) x_tensor = paddle.to_tensor(self.x_np) index_tensor = paddle.to_tensor(self.index_np) value_tensor = paddle.to_tensor(self.value_np) - out = paddle.put_along_axis(x_tensor, index_tensor, value_tensor, - self.axis) + out = paddle.put_along_axis( + x_tensor, index_tensor, value_tensor, self.axis + ) np.array( - np.put_along_axis(self.x_np, self.index_np, self.value_np, - self.axis)) + np.put_along_axis( + self.x_np, self.index_np, self.value_np, self.axis + ) + ) out_ref = self.x_np np.testing.assert_allclose(out.numpy(), out_ref, rtol=0.001) # for ci coverage, numpy put_along_axis did not support argument of 'reduce' - paddle.put_along_axis(x_tensor, index_tensor, value_tensor, - self.axis, 'mul') - paddle.put_along_axis(x_tensor, index_tensor, value_tensor, - self.axis, 'add') + paddle.put_along_axis( + x_tensor, index_tensor, value_tensor, self.axis, 'mul' + ) + paddle.put_along_axis( + x_tensor, index_tensor, value_tensor, self.axis, 'add' + ) paddle.enable_static() @@ -135,7 +140,6 @@ class TestPutAlongAxisAPI(unittest.TestCase): run(place) def test_inplace_dygraph(self): - def run(place): paddle.disable_static(place) x_tensor = paddle.to_tensor(self.x_np) @@ -145,8 +149,10 @@ class TestPutAlongAxisAPI(unittest.TestCase): x_tensor.put_along_axis_(index_tensor, value_tensor, self.axis) np.array( - np.put_along_axis(self.x_np, self.index_np, self.value_np, - self.axis)) + np.put_along_axis( + self.x_np, self.index_np, self.value_np, self.axis + ) + ) out_ref = self.x_np np.testing.assert_allclose(x_tensor.numpy(), out_ref, rtol=0.001) @@ -157,7 +163,6 @@ class TestPutAlongAxisAPI(unittest.TestCase): class TestPutAlongAxisAPICase2(TestPutAlongAxisAPI): - def setUp(self): np.random.seed(0) self.shape = [2, 2] @@ -174,13 +179,13 @@ class TestPutAlongAxisAPICase2(TestPutAlongAxisAPI): class TestPutAlongAxisAPICase3(TestPutAlongAxisAPI): - def setUp(self): np.random.seed(0) self.shape = [2, 2] self.index_shape = [4, 2] - self.index_np = np.array([[0, 0], [1, 0], [0, 0], [1, - 0]]).astype('int64') + self.index_np = np.array([[0, 0], [1, 0], [0, 0], [1, 0]]).astype( + 'int64' + ) self.x_np = np.random.random(self.shape).astype(np.float32) self.place = [paddle.CPUPlace()] self.axis = 0 diff --git a/python/paddle/fluid/tests/unittests/test_py_func_op.py b/python/paddle/fluid/tests/unittests/test_py_func_op.py index 91e41a28ee54f72bf2091685a0c026e8a16c7523..961216795671f0816432df74a5519345a1034f51 100644 --- a/python/paddle/fluid/tests/unittests/test_py_func_op.py +++ b/python/paddle/fluid/tests/unittests/test_py_func_op.py @@ -64,8 +64,9 @@ def cross_entropy_grad(logits, labels, bwd_dout): N = logits.shape[1] dlogits = np.zeros([M, N]).astype(logits.dtype) for idx in range(M): - dlogits[idx][labels[idx] - [0]] = -bwd_dout[idx] / logits[idx][labels[idx][0]] + dlogits[idx][labels[idx][0]] = ( + -bwd_dout[idx] / logits[idx][labels[idx][0]] + ) return dlogits, None @@ -75,56 +76,85 @@ def simple_fc_net(img, label, use_py_func_op): hidden = fluid.layers.fc( hidden, size=200, - bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=1.0))) + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=1.0) + ), + ) if not use_py_func_op: hidden = fluid.layers.tanh(hidden) else: - new_hidden = fluid.default_main_program().current_block( - ).create_var(name='hidden_{}'.format(idx), - dtype='float32', - shape=hidden.shape) - hidden = fluid.layers.py_func(func=tanh, - x=hidden, - out=new_hidden, - backward_func=tanh_grad, - skip_vars_in_backward_input=hidden) + new_hidden = ( + fluid.default_main_program() + .current_block() + .create_var( + name='hidden_{}'.format(idx), + dtype='float32', + shape=hidden.shape, + ) + ) + hidden = fluid.layers.py_func( + func=tanh, + x=hidden, + out=new_hidden, + backward_func=tanh_grad, + skip_vars_in_backward_input=hidden, + ) prediction = fluid.layers.fc(hidden, size=10, act='softmax') if not use_py_func_op: loss = fluid.layers.cross_entropy(input=prediction, label=label) else: - loss = fluid.default_main_program().current_block().create_var( - name='loss', dtype='float32', shape=[-1, 1]) - loss = fluid.layers.py_func(func=cross_entropy, - x=[prediction, label], - out=loss, - backward_func=cross_entropy_grad, - skip_vars_in_backward_input=loss) - - dummy_var = fluid.default_main_program().current_block().create_var( - name='test_tmp_var', dtype='float32', shape=[1]) - fluid.layers.py_func(func=dummy_func_with_no_input, - x=None, - out=dummy_var) + loss = ( + fluid.default_main_program() + .current_block() + .create_var(name='loss', dtype='float32', shape=[-1, 1]) + ) + loss = fluid.layers.py_func( + func=cross_entropy, + x=[prediction, label], + out=loss, + backward_func=cross_entropy_grad, + skip_vars_in_backward_input=loss, + ) + + dummy_var = ( + fluid.default_main_program() + .current_block() + .create_var(name='test_tmp_var', dtype='float32', shape=[1]) + ) + fluid.layers.py_func( + func=dummy_func_with_no_input, x=None, out=dummy_var + ) loss += dummy_var fluid.layers.py_func(func=dummy_func_with_no_output, x=loss, out=None) - loss_out = fluid.default_main_program().current_block().create_var( - dtype='float32', shape=[-1, 1]) - dummy_var_out = fluid.default_main_program().current_block().create_var( - dtype='float32', shape=[1]) - fluid.layers.py_func(func=dummy_func_with_multi_input_output, - x=(loss, dummy_var), - out=(loss_out, dummy_var_out)) - assert loss == loss_out and dummy_var == dummy_var_out, \ - "py_func failed with multi input and output" - - fluid.layers.py_func(func=dummy_func_with_multi_input_output, - x=[loss, dummy_var], - out=[loss_out, dummy_var_out]) - assert loss == loss_out and dummy_var == dummy_var_out, \ - "py_func failed with multi input and output" + loss_out = ( + fluid.default_main_program() + .current_block() + .create_var(dtype='float32', shape=[-1, 1]) + ) + dummy_var_out = ( + fluid.default_main_program() + .current_block() + .create_var(dtype='float32', shape=[1]) + ) + fluid.layers.py_func( + func=dummy_func_with_multi_input_output, + x=(loss, dummy_var), + out=(loss_out, dummy_var_out), + ) + assert ( + loss == loss_out and dummy_var == dummy_var_out + ), "py_func failed with multi input and output" + + fluid.layers.py_func( + func=dummy_func_with_multi_input_output, + x=[loss, dummy_var], + out=[loss_out, dummy_var_out], + ) + assert ( + loss == loss_out and dummy_var == dummy_var_out + ), "py_func failed with multi input and output" loss = paddle.mean(loss) return loss @@ -132,9 +162,9 @@ def simple_fc_net(img, label, use_py_func_op): def reader(): for _ in range(dev_cnt * 100): - yield np.random.random([784]), np.random.random_integers(size=[1], - low=0, - high=9) + yield np.random.random([784]), np.random.random_integers( + size=[1], low=0, high=9 + ) def test_main(use_cuda, use_py_func_op, use_parallel_executor): @@ -162,7 +192,8 @@ def test_main(use_cuda, use_py_func_op, use_parallel_executor): if use_parallel_executor: train_cp = compiler.CompiledProgram( - fluid.default_main_program()) + fluid.default_main_program() + ) train_cp = train_cp.with_data_parallel(loss_name=loss.name) fetch_list = [loss.name] else: @@ -171,15 +202,14 @@ def test_main(use_cuda, use_py_func_op, use_parallel_executor): ret = [] for epoch_id in range(2): for d in r(): - L, = exe.run(train_cp, - feed=feeder.feed(d), - fetch_list=fetch_list) + (L,) = exe.run( + train_cp, feed=feeder.feed(d), fetch_list=fetch_list + ) ret.append(L) return np.array(ret) class TestPyFuncOpUseExecutor(unittest.TestCase): - def setUp(self): self.use_parallel_executor = False @@ -187,8 +217,9 @@ class TestPyFuncOpUseExecutor(unittest.TestCase): for use_cuda in [True, False]: losses = [] for use_py_func_op in [True, False]: - L = test_main(use_cuda, use_py_func_op, - self.use_parallel_executor) + L = test_main( + use_cuda, use_py_func_op, self.use_parallel_executor + ) if L is not None: losses.append(L) @@ -198,7 +229,6 @@ class TestPyFuncOpUseExecutor(unittest.TestCase): class TestPyFuncOpUseParallelExecutor(TestPyFuncOpUseExecutor): - def setUp(self): self.use_parallel_executor = True diff --git a/python/paddle/fluid/tests/unittests/test_py_reader_combination.py b/python/paddle/fluid/tests/unittests/test_py_reader_combination.py index 84ea5f5f7bb7eba2e0a52eb9aadebe63da1a20ac..1c399b89cec57b6fe1b7733544180d98da7369ec 100644 --- a/python/paddle/fluid/tests/unittests/test_py_reader_combination.py +++ b/python/paddle/fluid/tests/unittests/test_py_reader_combination.py @@ -19,23 +19,20 @@ import numpy as np class TestPyReaderCombination(unittest.TestCase): - def setUp(self): self.n1 = 10 self.n2 = 20 self.batch_size = 2 def create_reader(self, batch_num): - def __impl__(): for _ in range(batch_num): - image = np.random.uniform(low=-1, high=1, - size=[batch_num, - 784]).astype('float32') - label = np.random.random_integers(low=0, - high=9, - size=[batch_num, - 1]).astype('int64') + image = np.random.uniform( + low=-1, high=1, size=[batch_num, 784] + ).astype('float32') + label = np.random.random_integers( + low=0, high=9, size=[batch_num, 1] + ).astype('int64') yield image, label return __impl__ @@ -57,17 +54,17 @@ class TestPyReaderCombination(unittest.TestCase): def main_impl(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - image = fluid.layers.data(name='image', - dtype='float32', - shape=[784]) + image = fluid.layers.data( + name='image', dtype='float32', shape=[784] + ) label = fluid.layers.data(name='label', dtype='int64', shape=[1]) - py_reader1 = fluid.io.PyReader(feed_list=[image, label], - capacity=16, - iterable=True) - py_reader2 = fluid.io.PyReader(feed_list=[image, label], - capacity=16, - iterable=True) + py_reader1 = fluid.io.PyReader( + feed_list=[image, label], capacity=16, iterable=True + ) + py_reader2 = fluid.io.PyReader( + feed_list=[image, label], capacity=16, iterable=True + ) reader1 = paddle.reader.cache(self.create_reader(self.n1)) reader2 = paddle.reader.cache(self.create_reader(self.n2)) @@ -77,8 +74,12 @@ class TestPyReaderCombination(unittest.TestCase): for _ in range(10): max_num = min(self.n1, self.n2) batch_num = 0 - for reader_np1, py_reader_dict1, reader_np2, py_reader_dict2 in zip( - reader1(), py_reader1(), reader2(), py_reader2()): + for ( + reader_np1, + py_reader_dict1, + reader_np2, + py_reader_dict2, + ) in zip(reader1(), py_reader1(), reader2(), py_reader2()): self.assertFeedVarEqual(reader_np1, py_reader_dict1) self.assertFeedVarEqual(reader_np2, py_reader_dict2) batch_num += 1 @@ -100,7 +101,6 @@ class TestPyReaderCombination(unittest.TestCase): class TestPyReaderCombination2(TestPyReaderCombination): - def setUp(self): self.n1 = 20 self.n2 = 10 @@ -108,7 +108,6 @@ class TestPyReaderCombination2(TestPyReaderCombination): class TestPyReaderCombination3(TestPyReaderCombination): - def setUp(self): self.n1 = 10 self.n2 = 10 diff --git a/python/paddle/fluid/tests/unittests/test_py_reader_error_msg.py b/python/paddle/fluid/tests/unittests/test_py_reader_error_msg.py index 337cafbb1246f82dda47e34a84942fde89dcc502..40cdcdcc19f3a533bc7c2ef9143a0e724dc68346 100644 --- a/python/paddle/fluid/tests/unittests/test_py_reader_error_msg.py +++ b/python/paddle/fluid/tests/unittests/test_py_reader_error_msg.py @@ -19,32 +19,38 @@ import paddle class TestPyReaderErrorMsg(unittest.TestCase): - def test_check_input_array(self): - fluid.reader.GeneratorLoader._check_input_array([ - np.random.randint(100, size=[2]), - np.random.randint(100, size=[2]), - np.random.randint(100, size=[2]) - ]) - self.assertRaises(TypeError, - fluid.reader.GeneratorLoader._check_input_array, [ - np.random.randint(100, size=[2]), - np.random.randint(100, size=[1]), - np.random.randint(100, size=[3]) - ]) + fluid.reader.GeneratorLoader._check_input_array( + [ + np.random.randint(100, size=[2]), + np.random.randint(100, size=[2]), + np.random.randint(100, size=[2]), + ] + ) + self.assertRaises( + TypeError, + fluid.reader.GeneratorLoader._check_input_array, + [ + np.random.randint(100, size=[2]), + np.random.randint(100, size=[1]), + np.random.randint(100, size=[3]), + ], + ) class TestDoubleBufferAPI(unittest.TestCase): - def test_double_buffer(self): paddle.enable_static() if fluid.core.is_compiled_with_cuda(): - reader = fluid.layers.py_reader(capacity=64, - shapes=[(-1, 1, 28, 28), (-1, 1)], - dtypes=['float32', 'int64'], - use_double_buffer=False) - reader = fluid.layers.double_buffer(reader, - place=fluid.core.CUDAPlace(0)) + reader = fluid.layers.py_reader( + capacity=64, + shapes=[(-1, 1, 28, 28), (-1, 1)], + dtypes=['float32', 'int64'], + use_double_buffer=False, + ) + reader = fluid.layers.double_buffer( + reader, place=fluid.core.CUDAPlace(0) + ) image, label = fluid.layers.read_file(reader) diff --git a/python/paddle/fluid/tests/unittests/test_py_reader_lod_level_share.py b/python/paddle/fluid/tests/unittests/test_py_reader_lod_level_share.py index 4b5e2b9711ee1dfbc9bc1906b18aa2532703d1ba..ef55a226dea43a1293e6afbe906e0c5862e7a0e2 100644 --- a/python/paddle/fluid/tests/unittests/test_py_reader_lod_level_share.py +++ b/python/paddle/fluid/tests/unittests/test_py_reader_lod_level_share.py @@ -17,7 +17,6 @@ import unittest class TestLoDLevelShare(unittest.TestCase): - def setUp(self): self.use_double_buffer = False @@ -27,7 +26,8 @@ class TestLoDLevelShare(unittest.TestCase): shapes=([-1, 256], [-1, 512], [-1, 100]), dtypes=('float32', 'int64', 'double'), lod_levels=(1, 2, 0), - use_double_buffer=self.use_double_buffer) + use_double_buffer=self.use_double_buffer, + ) x, y, z = fluid.layers.read_file(reader) self.assertEqual(x.lod_level, 1) @@ -36,7 +36,6 @@ class TestLoDLevelShare(unittest.TestCase): class TestLoDLevelShare2(TestLoDLevelShare): - def setUp(self): self.use_double_buffer = True diff --git a/python/paddle/fluid/tests/unittests/test_py_reader_pin_memory.py b/python/paddle/fluid/tests/unittests/test_py_reader_pin_memory.py index 596bc2ff83942150309838b244880a3ff6efdfd3..4268d6fbcc196e6fbc19152d39bc7c34ca6cfc58 100644 --- a/python/paddle/fluid/tests/unittests/test_py_reader_pin_memory.py +++ b/python/paddle/fluid/tests/unittests/test_py_reader_pin_memory.py @@ -20,7 +20,6 @@ import numpy as np def user_reader(inputs): - def _reader(): for d in inputs: yield d @@ -29,7 +28,6 @@ def user_reader(inputs): def batch_feeder(batch_reader, pin_memory=False, img_dtype="float32"): - def _feeder(): for batch_data in batch_reader(): sample_batch = [] @@ -48,7 +46,6 @@ def batch_feeder(batch_reader, pin_memory=False, img_dtype="float32"): class TestPyReader(unittest.TestCase): - def setUp(self): self.capacity = 10 self.shapes = [(-1, 3, 2, 1), (-1, 1)] @@ -57,29 +54,37 @@ class TestPyReader(unittest.TestCase): def test_pin_memory_pyreader(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) executor = fluid.Executor(place) - data_file = fluid.layers.py_reader(capacity=self.capacity, - dtypes=self.dtypes, - lod_levels=self.lod_levels, - shapes=self.shapes) + data_file = fluid.layers.py_reader( + capacity=self.capacity, + dtypes=self.dtypes, + lod_levels=self.lod_levels, + shapes=self.shapes, + ) # feed_queue = data_file.queue read_out_data = fluid.layers.read_file(data_file) self.inputs = [] for _ in range(10): - sample = np.random.uniform(low=0, high=1, - size=[3, 2, 1]).astype("float32") + sample = np.random.uniform( + low=0, high=1, size=[3, 2, 1] + ).astype("float32") label = np.random.randint(low=0, high=10, dtype="int64") self.inputs.append((sample, label)) self.input_tensors = [] for d, l in batch_feeder( - paddle.batch(user_reader(self.inputs), batch_size=2), - pin_memory=True - if fluid.core.is_compiled_with_cuda() else False)(): + paddle.batch(user_reader(self.inputs), batch_size=2), + pin_memory=True + if fluid.core.is_compiled_with_cuda() + else False, + )(): ta = fluid.LoDTensorArray() ta.append(d) ta.append(l) @@ -95,10 +100,13 @@ class TestPyReader(unittest.TestCase): self.batched_inputs.append([feed_d, feed_l]) data_file.decorate_tensor_provider( - batch_feeder(paddle.batch(user_reader(self.inputs), - batch_size=2), - pin_memory=True - if fluid.core.is_compiled_with_cuda() else False)) + batch_feeder( + paddle.batch(user_reader(self.inputs), batch_size=2), + pin_memory=True + if fluid.core.is_compiled_with_cuda() + else False, + ) + ) executor.run(fluid.default_startup_program()) self.outputs = [] @@ -106,14 +114,16 @@ class TestPyReader(unittest.TestCase): data_file.start() for _ in self.input_tensors: self.outputs.append( - executor.run(fetch_list=list(read_out_data))) + executor.run(fetch_list=list(read_out_data)) + ) data_file.reset() self.validate() def validate(self): self.assertEqual(len(self.batched_inputs), len(self.outputs)) - for in_data_list, out_data_list in zip(self.batched_inputs, - self.outputs): + for in_data_list, out_data_list in zip( + self.batched_inputs, self.outputs + ): self.assertEqual(len(in_data_list), len(out_data_list)) in_data_list_np = [ np.array(in_lod_tensor) for in_lod_tensor in in_data_list diff --git a/python/paddle/fluid/tests/unittests/test_py_reader_push_pop.py b/python/paddle/fluid/tests/unittests/test_py_reader_push_pop.py index cdca6695e947bb6941abefffe872ab035550c562..4ccaaf0274669b252ee3e70e72d2d1c1c7c230e6 100644 --- a/python/paddle/fluid/tests/unittests/test_py_reader_push_pop.py +++ b/python/paddle/fluid/tests/unittests/test_py_reader_push_pop.py @@ -24,7 +24,6 @@ def feed_data(feed_queue, inputs): class TestPyReader(unittest.TestCase): - def setUp(self): self.capacity = 10 self.batch_size_min = 10 @@ -42,46 +41,55 @@ class TestPyReader(unittest.TestCase): def main(self, use_thread=False): with fluid.program_guard(fluid.Program(), fluid.Program()): - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) executor = fluid.Executor(place) - data_file = fluid.layers.py_reader(capacity=self.capacity, - dtypes=self.dtypes, - lod_levels=self.lod_levels, - shapes=self.shapes) + data_file = fluid.layers.py_reader( + capacity=self.capacity, + dtypes=self.dtypes, + lod_levels=self.lod_levels, + shapes=self.shapes, + ) feed_queue = data_file.queue read_out_data = fluid.layers.read_file(data_file) self.inputs = [] for i in range(self.iterations): in_data = fluid.LoDTensorArray() - batch_size = np.random.random_integers(self.batch_size_min, - self.batch_size_max) + batch_size = np.random.random_integers( + self.batch_size_min, self.batch_size_max + ) for shape, dtype in zip(self.shapes, self.dtypes): - next_data = np.random.uniform(low=0, - high=1000, - size=(batch_size, ) + - shape[1:]).astype(dtype) + next_data = np.random.uniform( + low=0, high=1000, size=(batch_size,) + shape[1:] + ).astype(dtype) in_data.append( - fluid.executor._as_lodtensor(next_data, place)) + fluid.executor._as_lodtensor(next_data, place) + ) self.inputs.append(in_data) executor.run(fluid.default_startup_program()) self.outputs = [] if use_thread: - thread = Thread(target=feed_data, - args=(feed_queue, self.inputs)) + thread = Thread( + target=feed_data, args=(feed_queue, self.inputs) + ) thread.start() for in_data in self.inputs: self.outputs.append( - executor.run(fetch_list=list(read_out_data))) + executor.run(fetch_list=list(read_out_data)) + ) else: for in_data in self.inputs: feed_queue.push(in_data) self.outputs.append( - executor.run(fetch_list=list(read_out_data))) + executor.run(fetch_list=list(read_out_data)) + ) feed_queue.close() self.validate() diff --git a/python/paddle/fluid/tests/unittests/test_py_reader_return_list.py b/python/paddle/fluid/tests/unittests/test_py_reader_return_list.py index c6e951997eabfb4a851623a7e997d932f4557463..bfc08245ee33b5a31b018866f3faf8b0974a712d 100644 --- a/python/paddle/fluid/tests/unittests/test_py_reader_return_list.py +++ b/python/paddle/fluid/tests/unittests/test_py_reader_return_list.py @@ -19,39 +19,40 @@ import numpy as np class TestPyReader(unittest.TestCase): - def setUp(self): self.batch_size = 32 self.epoch_num = 2 self.sample_num = 10 def test_returnlist(self): - def reader_creator_random_image(height, width): - def reader(): for i in range(self.sample_num): - yield np.random.uniform(low=0, - high=255, - size=[height, width]), + yield np.random.uniform( + low=0, high=255, size=[height, width] + ), return reader for return_list in [True, False]: with fluid.program_guard(fluid.Program(), fluid.Program()): - image = fluid.layers.data(name='image', - shape=[784, 784], - dtype='float32') - reader = fluid.io.PyReader(feed_list=[image], - capacity=4, - iterable=True, - return_list=return_list) + image = fluid.layers.data( + name='image', shape=[784, 784], dtype='float32' + ) + reader = fluid.io.PyReader( + feed_list=[image], + capacity=4, + iterable=True, + return_list=return_list, + ) user_defined_reader = reader_creator_random_image(784, 784) reader.decorate_sample_list_generator( - paddle.batch(user_defined_reader, - batch_size=self.batch_size), - fluid.core.CPUPlace()) + paddle.batch( + user_defined_reader, batch_size=self.batch_size + ), + fluid.core.CPUPlace(), + ) # definition of network is omitted executor = fluid.Executor(fluid.core.CPUPlace()) executor.run(fluid.default_main_program()) @@ -69,7 +70,8 @@ class TestPyReader(unittest.TestCase): batch_py_reader.decorate_sample_generator( user_defined_reader, batch_size=self.batch_size, - places=fluid.core.CPUPlace()) + places=fluid.core.CPUPlace(), + ) for epoch in range(self.epoch_num): for _, data in enumerate(batch_py_reader()): diff --git a/python/paddle/fluid/tests/unittests/test_py_reader_sample_generator.py b/python/paddle/fluid/tests/unittests/test_py_reader_sample_generator.py index 7f0cf633ed25c9e391ca5aba40c9d7f95a7730d3..fa1297d46b6b965348bf3c25e8bc61cd22c8e489 100644 --- a/python/paddle/fluid/tests/unittests/test_py_reader_sample_generator.py +++ b/python/paddle/fluid/tests/unittests/test_py_reader_sample_generator.py @@ -23,18 +23,18 @@ os.environ['CPU_NUM'] = '1' def random_reader(sample_num): - def __impl__(): for _ in range(sample_num): - yield np.random.random( - size=[784]).astype('float32'), np.random.random_integers( - low=0, high=9, size=[1]).astype('int64') + yield np.random.random(size=[784]).astype( + 'float32' + ), np.random.random_integers(low=0, high=9, size=[1]).astype( + 'int64' + ) return paddle.reader.cache(__impl__) class TestCaseBase(unittest.TestCase): - def setUp(self): self.batch_size = 32 self.epoch_num = 2 @@ -54,22 +54,24 @@ class TestCaseBase(unittest.TestCase): def run_main(self, reader, use_sample_generator, iterable, drop_last): image = fluid.layers.data(name='image', dtype='float32', shape=[784]) label = fluid.layers.data(name='label', dtype='int64', shape=[1]) - py_reader = fluid.io.PyReader(feed_list=[image, label], - capacity=16, - iterable=iterable, - use_double_buffer=False) + py_reader = fluid.io.PyReader( + feed_list=[image, label], + capacity=16, + iterable=iterable, + use_double_buffer=False, + ) batch_reader = paddle.batch(reader, self.batch_size, drop_last) all_datas = self.generate_all_data(batch_reader) if not use_sample_generator: - py_reader.decorate_sample_list_generator(batch_reader, - places=fluid.cpu_places()) + py_reader.decorate_sample_list_generator( + batch_reader, places=fluid.cpu_places() + ) else: - py_reader.decorate_sample_generator(reader, - self.batch_size, - drop_last, - places=fluid.cpu_places()) + py_reader.decorate_sample_generator( + reader, self.batch_size, drop_last, places=fluid.cpu_places() + ) if drop_last: batch_num = int(self.sample_num / self.batch_size) @@ -111,12 +113,12 @@ class TestCaseBase(unittest.TestCase): for iterable in [False, True]: for drop_last in [False, True]: with fluid.program_guard(fluid.Program(), fluid.Program()): - self.run_main(reader, use_sample_generator, iterable, - drop_last) + self.run_main( + reader, use_sample_generator, iterable, drop_last + ) class TestCase1(TestCaseBase): - def setUp(self): self.batch_size = 32 self.epoch_num = 10 @@ -124,7 +126,6 @@ class TestCase1(TestCaseBase): class TestCase2(TestCaseBase): - def setUp(self): self.batch_size = 32 self.epoch_num = 2 @@ -132,7 +133,6 @@ class TestCase2(TestCaseBase): class TestCase3(TestCaseBase): - def setUp(self): self.batch_size = 32 self.epoch_num = 2 diff --git a/python/paddle/fluid/tests/unittests/test_py_reader_using_executor.py b/python/paddle/fluid/tests/unittests/test_py_reader_using_executor.py index a0ab41e4fe24fbdf47d44dba3d923f0e4d00c668..dca3b061cbff001d3aa42f2e4e5be1b68774f97f 100644 --- a/python/paddle/fluid/tests/unittests/test_py_reader_using_executor.py +++ b/python/paddle/fluid/tests/unittests/test_py_reader_using_executor.py @@ -39,8 +39,11 @@ def as_tensor(np_array_or_tensor, place=None): def as_numpy(tensor_or_numpy): - return tensor_or_numpy if isinstance( - tensor_or_numpy, np.ndarray) else np.array(tensor_or_numpy) + return ( + tensor_or_numpy + if isinstance(tensor_or_numpy, np.ndarray) + else np.array(tensor_or_numpy) + ) def sample_list_to_tensor_array(sample_list): @@ -80,13 +83,15 @@ def feed_data(feed_queue, batch_reader): feed_queue.close() -def simple_fc_net(in_size, - class_num, - hidden_sizes, - batch_size, - queue_capacity, - use_double_buffer=False, - use_feed_list=True): +def simple_fc_net( + in_size, + class_num, + hidden_sizes, + batch_size, + queue_capacity, + use_double_buffer=False, + use_feed_list=True, +): in_data = fluid.layers.data(name="data", dtype='float32', shape=[in_size]) label = fluid.layers.data(name='label', dtype='int64', shape=[1]) if use_feed_list: @@ -94,14 +99,16 @@ def simple_fc_net(in_size, capacity=queue_capacity, use_double_buffer=use_double_buffer, feed_list=[in_data, label], - name=unique_name.generate('py_reader_name')) + name=unique_name.generate('py_reader_name'), + ) else: py_reader = fluid.layers.py_reader( capacity=queue_capacity, shapes=[in_data.shape, label.shape], dtypes=['float32', 'int64'], name=unique_name.generate('py_reader_name'), - use_double_buffer=use_double_buffer) + use_double_buffer=use_double_buffer, + ) in_data, label = fluid.layers.read_file(py_reader) @@ -113,12 +120,15 @@ def simple_fc_net(in_size, hidden, size=hidden_size, act='tanh', - bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=1.0))) + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=1.0) + ), + ) predict_label = fluid.layers.fc(hidden, size=class_num, act='softmax') loss = paddle.mean( - fluid.layers.cross_entropy(input=predict_label, label=label)) + fluid.layers.cross_entropy(input=predict_label, label=label) + ) optimizer = fluid.optimizer.Adam() optimizer.minimize(loss) @@ -126,7 +136,6 @@ def simple_fc_net(in_size, class TestPyReaderUsingExecutor(unittest.TestCase): - def setUp(self): self.in_size = 1000 self.hidden_sizes = [50, 30, 20] @@ -136,39 +145,42 @@ class TestPyReaderUsingExecutor(unittest.TestCase): self.queue_capacity = 50 def test(self): - for use_cuda in ([False, True] - if core.is_compiled_with_cuda() else [False]): + for use_cuda in ( + [False, True] if core.is_compiled_with_cuda() else [False] + ): for use_parallel_executor in [False, True]: for use_double_buffer in [False, True]: for use_feed_list in [False, True]: for use_decorate_paddle_reader in [False, True]: print('Test Parameters:'), - print({ - 'use_cuda': + print( + { + 'use_cuda': use_cuda, + 'use_parallel_executor': use_parallel_executor, + 'use_double_buffer': use_double_buffer, + 'use_feed_list': use_feed_list, + 'use_decorate_paddle_reader': use_decorate_paddle_reader, + } + ) + self.main( use_cuda, - 'use_parallel_executor': use_parallel_executor, - 'use_double_buffer': use_double_buffer, - 'use_feed_list': use_feed_list, - 'use_decorate_paddle_reader': - use_decorate_paddle_reader - }) - self.main(use_cuda, use_parallel_executor, - use_double_buffer, use_feed_list, - use_decorate_paddle_reader) + use_decorate_paddle_reader, + ) def tensor_reader(self, use_decorate_paddle_reader): - def reader(): - for sample_id in range(self.batch_size * self.iterations * - self.batch_size_times): + for sample_id in range( + self.batch_size * self.iterations * self.batch_size_times + ): in_data = np.random.uniform( - low=0, high=1, size=(self.in_size, )).astype('float32') - label = np.random.random_integers(low=0, - high=self.class_num - 1, - size=(1, )).astype('int64') + low=0, high=1, size=(self.in_size,) + ).astype('float32') + label = np.random.random_integers( + low=0, high=self.class_num - 1, size=(1,) + ).astype('int64') reshaped_in_data = np.reshape(in_data, [1, -1]) reshaped_label = np.reshape(label, [1, -1]) @@ -176,9 +188,11 @@ class TestPyReaderUsingExecutor(unittest.TestCase): self.inputs.append([reshaped_in_data, reshaped_label]) else: self.inputs[-1][0] = np.concatenate( - (self.inputs[-1][0], reshaped_in_data), axis=0) + (self.inputs[-1][0], reshaped_in_data), axis=0 + ) self.inputs[-1][1] = np.concatenate( - (self.inputs[-1][1], reshaped_label), axis=0) + (self.inputs[-1][1], reshaped_label), axis=0 + ) yield in_data, label @@ -187,12 +201,14 @@ class TestPyReaderUsingExecutor(unittest.TestCase): return reader - def main(self, - use_cuda=True, - use_parallel_executor=False, - use_double_buffer=False, - use_feed_list=False, - use_decorate_paddle_reader=False): + def main( + self, + use_cuda=True, + use_parallel_executor=False, + use_double_buffer=False, + use_feed_list=False, + use_decorate_paddle_reader=False, + ): assert not use_cuda or use_cuda and core.is_compiled_with_cuda() self.use_cuda = use_cuda @@ -205,14 +221,22 @@ class TestPyReaderUsingExecutor(unittest.TestCase): main_program = fluid.Program() with fluid.program_guard(main_program, startup_program): - in_data, label, loss, optimizer, feed_queue, py_reader = simple_fc_net( + ( + in_data, + label, + loss, + optimizer, + feed_queue, + py_reader, + ) = simple_fc_net( in_size=self.in_size, class_num=self.class_num, hidden_sizes=self.hidden_sizes, batch_size=self.batch_size, queue_capacity=self.queue_capacity, use_double_buffer=self.use_double_buffer, - use_feed_list=self.use_feed_list) + use_feed_list=self.use_feed_list, + ) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() @@ -222,12 +246,14 @@ class TestPyReaderUsingExecutor(unittest.TestCase): train_cp = main_program if use_parallel_executor: train_cp = compiler.CompiledProgram( - main_program).with_data_parallel(loss_name=loss.name) + main_program + ).with_data_parallel(loss_name=loss.name) if use_cuda: self.batch_size_times = core.get_cuda_device_count() else: self.batch_size_times = int( - os.environ.get('CPU_NUM', multiprocessing.cpu_count())) + os.environ.get('CPU_NUM', multiprocessing.cpu_count()) + ) else: self.batch_size_times = 1 @@ -244,15 +270,17 @@ class TestPyReaderUsingExecutor(unittest.TestCase): py_reader.decorate_sample_list_generator(batch_reader) py_reader.start() else: - thread = threading.Thread(target=feed_data, - args=(feed_queue, batch_reader)) + thread = threading.Thread( + target=feed_data, args=(feed_queue, batch_reader) + ) thread.daemon = True thread.start() try: while True: - fetches = exe.run(train_cp, - fetch_list=[in_data.name, label.name]) + fetches = exe.run( + train_cp, fetch_list=[in_data.name, label.name] + ) fetches = [as_numpy(fetch) for fetch in fetches] self.outputs.append(fetches) except fluid.core.EOFException: diff --git a/python/paddle/fluid/tests/unittests/test_pylayer_op.py b/python/paddle/fluid/tests/unittests/test_pylayer_op.py index 6a959dab07e8673fc27a050e102307a2998070c7..becab9796dff3cd9978d4ee52e43c4ecc858617e 100644 --- a/python/paddle/fluid/tests/unittests/test_pylayer_op.py +++ b/python/paddle/fluid/tests/unittests/test_pylayer_op.py @@ -21,17 +21,13 @@ from paddle.fluid.framework import _test_eager_guard, in_dygraph_mode class FakeTensor(paddle.fluid.core.VarBase): - def __init__(self): pass class TestPyLayer(unittest.TestCase): - def func_test_simple_pylayer_multiple_output(self): - class tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer): - @staticmethod def forward(ctx, x1, x2, func1, func2=paddle.square): ctx.func = func2 @@ -59,7 +55,8 @@ class TestPyLayer(unittest.TestCase): z2.mean().backward() self.assertTrue( - np.max(np.abs((input1.grad.numpy() - input2.grad.numpy()))) < 1e-10) + np.max(np.abs((input1.grad.numpy() - input2.grad.numpy()))) < 1e-10 + ) def test_simple_pylayer_multiple_output(self): with _test_eager_guard(): @@ -67,9 +64,7 @@ class TestPyLayer(unittest.TestCase): self.func_test_simple_pylayer_multiple_output() def func_test_simple_pylayer_return_none_with_no_grad(self): - class tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer): - @staticmethod def forward(ctx, x1, x2, func1, func2=paddle.square): ctx.func = func2 @@ -101,7 +96,8 @@ class TestPyLayer(unittest.TestCase): z2.mean().backward() self.assertTrue( - np.max(np.abs((input1.grad.numpy() - input2.grad.numpy()))) < 1e-10) + np.max(np.abs((input1.grad.numpy() - input2.grad.numpy()))) < 1e-10 + ) def test_simple_pylayer_return_none_with_no_grad(self): with _test_eager_guard(): @@ -109,9 +105,7 @@ class TestPyLayer(unittest.TestCase): self.func_test_simple_pylayer_return_none_with_no_grad() def func_test_simple_pylayer_single_output(self): - class tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer): - @staticmethod def forward(ctx, x1, func1, func2=paddle.square): ctx.func = func2 @@ -121,7 +115,7 @@ class TestPyLayer(unittest.TestCase): @staticmethod def backward(ctx, dy1): - y1, = ctx.saved_tensor() + (y1,) = ctx.saved_tensor() re1 = dy1 * (1 - ctx.func(y1)) return re1 @@ -135,7 +129,8 @@ class TestPyLayer(unittest.TestCase): z2.mean().backward() self.assertTrue( - np.max(np.abs((input1.grad.numpy() - input2.grad.numpy()))) < 1e-10) + np.max(np.abs((input1.grad.numpy() - input2.grad.numpy()))) < 1e-10 + ) def test_simple_pylayer_single_output(self): with _test_eager_guard(): @@ -143,9 +138,7 @@ class TestPyLayer(unittest.TestCase): self.func_test_simple_pylayer_single_output() def func_test_pylayer_num_output_match(self): - class tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer): - @staticmethod def forward( ctx, @@ -172,9 +165,7 @@ class TestPyLayer(unittest.TestCase): self.func_test_pylayer_num_output_match() def func_test_pylayer_dtype(self): - class tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer): - @staticmethod def forward(ctx, x, dtype): y = paddle.cast(x, dtype) @@ -185,10 +176,16 @@ class TestPyLayer(unittest.TestCase): return dy1 dtypes = [ - 'bool', 'float16', 'float32', 'float64', 'uint8', 'int32', 'int64' + 'bool', + 'float16', + 'float32', + 'float64', + 'uint8', + 'int32', + 'int64', ] for dtype in dtypes: - input1 = (paddle.randn([2, 3])) + input1 = paddle.randn([2, 3]) input1.stop_gradient = False self.assertTrue(input1.grad is None) @@ -203,9 +200,7 @@ class TestPyLayer(unittest.TestCase): self.func_test_pylayer_dtype() def func_test_pylayer_Exception_forward(self): - class Layer_None1(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer): - @staticmethod def forward(ctx, *args): return None @@ -219,7 +214,6 @@ class TestPyLayer(unittest.TestCase): z = Layer_None1.apply(input1) class Layer_None2(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer): - @staticmethod def forward(ctx, *args): return [None, args[0]] @@ -233,7 +227,6 @@ class TestPyLayer(unittest.TestCase): z = Layer_None2.apply(input1) class Layer_one1(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer): - @staticmethod def forward(ctx, *args): return 1 @@ -248,7 +241,6 @@ class TestPyLayer(unittest.TestCase): z = Layer_one1.apply(input1) class Layer_one2(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer): - @staticmethod def forward(ctx, *args): return [1, 2, args[0]] @@ -262,7 +254,6 @@ class TestPyLayer(unittest.TestCase): z = Layer_one2.apply(input1) class Layer_no_fw(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer): - @staticmethod def backward(ctx, *args): return args @@ -277,9 +268,7 @@ class TestPyLayer(unittest.TestCase): self.func_test_pylayer_Exception_forward() def func_test_pylayer_nograd(self): - class tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer): - @staticmethod def forward(ctx, x1, func1, func2=paddle.square, xx=None): ctx.func = func2 @@ -302,10 +291,9 @@ class TestPyLayer(unittest.TestCase): self.func_test_pylayer_nograd() def func_test_pylayer_Exception_bk(self): - class Layer_bk_none1( - EagerPyLayer if in_dygraph_mode() else LegacyPyLayer): - + EagerPyLayer if in_dygraph_mode() else LegacyPyLayer + ): @staticmethod def forward(ctx, x): return x * 2 @@ -322,8 +310,8 @@ class TestPyLayer(unittest.TestCase): z.sum().backward() class Layer_bk_none2( - EagerPyLayer if in_dygraph_mode() else LegacyPyLayer): - + EagerPyLayer if in_dygraph_mode() else LegacyPyLayer + ): @staticmethod def forward(ctx, x1, x2): return x1 + x2 @@ -339,9 +327,9 @@ class TestPyLayer(unittest.TestCase): with self.assertRaises(ValueError): z.mean().backward() - class Layer_bk_one1(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer - ): - + class Layer_bk_one1( + EagerPyLayer if in_dygraph_mode() else LegacyPyLayer + ): @staticmethod def forward(ctx, x): return x + x @@ -357,9 +345,9 @@ class TestPyLayer(unittest.TestCase): with self.assertRaises(ValueError): z.mean().backward() - class Layer_bk_one2(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer - ): - + class Layer_bk_one2( + EagerPyLayer if in_dygraph_mode() else LegacyPyLayer + ): @staticmethod def forward(ctx, x1, x2): return x1 * 2, x2 * 5 @@ -377,7 +365,6 @@ class TestPyLayer(unittest.TestCase): z.mean().backward() class Layer_no_bk(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer): - @staticmethod def forward(ctx, x): return x * 2, x * 5 @@ -391,8 +378,8 @@ class TestPyLayer(unittest.TestCase): z.mean().backward() class Layer_bk_match( - EagerPyLayer if in_dygraph_mode() else LegacyPyLayer): - + EagerPyLayer if in_dygraph_mode() else LegacyPyLayer + ): @staticmethod def forward(ctx, x): return x * 2, x * 5 @@ -414,10 +401,9 @@ class TestPyLayer(unittest.TestCase): self.func_test_pylayer_Exception_bk() def func_test_pylayer_bk_return_none(self): - class Layer_bk_none1( - EagerPyLayer if in_dygraph_mode() else LegacyPyLayer): - + EagerPyLayer if in_dygraph_mode() else LegacyPyLayer + ): @staticmethod def forward(ctx, x1, x2): return x1 + x2 @@ -436,8 +422,8 @@ class TestPyLayer(unittest.TestCase): z.mean().backward() class Layer_bk_none2( - EagerPyLayer if in_dygraph_mode() else LegacyPyLayer): - + EagerPyLayer if in_dygraph_mode() else LegacyPyLayer + ): @staticmethod def forward(ctx, x1, x2): return x1 * 2, x2 * 5 @@ -461,9 +447,7 @@ class TestPyLayer(unittest.TestCase): self.func_test_pylayer_bk_return_none() def func_test_pylayer_inplace(self): - class cus_tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer): - @staticmethod def forward(ctx, x): return x @@ -473,7 +457,6 @@ class TestPyLayer(unittest.TestCase): return dy class Layer(paddle.nn.Layer): - def __init__(self): super(Layer, self).__init__() @@ -499,9 +482,9 @@ class TestPyLayer(unittest.TestCase): def test_pylayer_inplace_backward_error(self): with _test_eager_guard(): - class cus_tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer - ): - + class cus_tanh( + EagerPyLayer if in_dygraph_mode() else LegacyPyLayer + ): @staticmethod def forward(ctx, x): return x @@ -511,7 +494,6 @@ class TestPyLayer(unittest.TestCase): return dy class Layer(paddle.nn.Layer): - def __init__(self): super(Layer, self).__init__() @@ -527,17 +509,19 @@ class TestPyLayer(unittest.TestCase): layer = Layer() z = layer(data) with self.assertRaisesRegexp( - RuntimeError, - "received tensor_version:{} != wrapper_version_snapshot:{}". - format(1, 0)): + RuntimeError, + "received tensor_version:{} != wrapper_version_snapshot:{}".format( + 1, 0 + ), + ): z.backward() def test_pylayer_inplace_backward_success_1(self): with _test_eager_guard(): - class cus_tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer - ): - + class cus_tanh( + EagerPyLayer if in_dygraph_mode() else LegacyPyLayer + ): @staticmethod def forward(ctx, x): return x @@ -547,7 +531,6 @@ class TestPyLayer(unittest.TestCase): return dy class Layer(paddle.nn.Layer): - def __init__(self): super(Layer, self).__init__() @@ -569,9 +552,9 @@ class TestPyLayer(unittest.TestCase): def test_pylayer_inplace_backward_success_2(self): with _test_eager_guard(): - class cus_tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer - ): - + class cus_tanh( + EagerPyLayer if in_dygraph_mode() else LegacyPyLayer + ): @staticmethod def forward(ctx, x): return x @@ -581,7 +564,6 @@ class TestPyLayer(unittest.TestCase): return dy class Layer(paddle.nn.Layer): - def __init__(self): super(Layer, self).__init__() @@ -601,10 +583,9 @@ class TestPyLayer(unittest.TestCase): self.assertTrue(data.grad is not None) def func_test_pylayer_inplace_and_leaf_exception(self): - class cus_pylayer_op( - EagerPyLayer if in_dygraph_mode() else LegacyPyLayer): - + EagerPyLayer if in_dygraph_mode() else LegacyPyLayer + ): @staticmethod def forward(ctx, x): return x @@ -614,7 +595,6 @@ class TestPyLayer(unittest.TestCase): return dy class Layer(paddle.nn.Layer): - def __init__(self): super(Layer, self).__init__() @@ -636,9 +616,7 @@ class TestPyLayer(unittest.TestCase): self.func_test_pylayer_inplace_and_leaf_exception() def func_test_backward_in_backward(self): - class cus_tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer): - @staticmethod def forward(ctx, x): temp = x.detach() @@ -668,9 +646,7 @@ class TestPyLayer(unittest.TestCase): self.func_test_backward_in_backward() def func_test_return_to_tensor(self): - class Tanh(EagerPyLayer if in_dygraph_mode() else LegacyPyLayer): - @staticmethod def forward(ctx, x1): y1 = paddle.tanh(x1) @@ -680,7 +656,7 @@ class TestPyLayer(unittest.TestCase): @staticmethod def backward(ctx, dy1, dy2): - y1, = ctx.saved_tensor() + (y1,) = ctx.saved_tensor() re1 = dy1 * (1 - paddle.square(y1)) return dy1 @@ -700,7 +676,6 @@ class TestPyLayer(unittest.TestCase): with _test_eager_guard(): class Tanh(EagerPyLayer): - @staticmethod def forward(ctx, x): ctx.mark_not_inplace(x) @@ -719,7 +694,6 @@ class TestPyLayer(unittest.TestCase): with _test_eager_guard(): class Tanh(EagerPyLayer): - @staticmethod def forward(ctx, x): ctx.mark_not_inplace(x) @@ -739,7 +713,6 @@ class TestPyLayer(unittest.TestCase): with _test_eager_guard(): class Tanh(EagerPyLayer): - @staticmethod def forward(ctx, x): a = x + x @@ -760,7 +733,6 @@ class TestPyLayer(unittest.TestCase): with _test_eager_guard(): class Tanh(EagerPyLayer): - @staticmethod def forward(ctx, x): a = x + x @@ -782,11 +754,8 @@ class TestPyLayer(unittest.TestCase): class TestPyLayerReturnType(unittest.TestCase): - def test_forward_args_fake_tensor(self): - class Tanh(LegacyPyLayer): - @staticmethod def forward(ctx, x1): y1 = FakeTensor() @@ -802,9 +771,7 @@ class TestPyLayerReturnType(unittest.TestCase): y1, y2 = Tanh.apply(input1) def test_forward_kwargs_fake_tensor(self): - class Tanh(LegacyPyLayer): - @staticmethod def forward(ctx, x1): @@ -820,9 +787,7 @@ class TestPyLayerReturnType(unittest.TestCase): y = Tanh.apply(x1=input1) def test_forward_return_fake_tensor(self): - class Tanh(LegacyPyLayer): - @staticmethod def forward(ctx, x1): @@ -838,9 +803,7 @@ class TestPyLayerReturnType(unittest.TestCase): y = Tanh.apply(x1=input1) def test_forward_return_fake_tensor_tuple(self): - class Tanh(LegacyPyLayer): - @staticmethod def forward(ctx, x1): @@ -856,9 +819,7 @@ class TestPyLayerReturnType(unittest.TestCase): y = Tanh.apply(x1=input1) def test_backward_return_fake_tensor_tuple(self): - class Tanh(LegacyPyLayer): - @staticmethod def forward(ctx, x1, x2): return x1 + 1, x1 + 2 @@ -876,9 +837,7 @@ class TestPyLayerReturnType(unittest.TestCase): y.mean().backward() def test_backward_return_fake_tensor(self): - class Tanh(LegacyPyLayer): - @staticmethod def forward(ctx, x1): return x1 + 1, x1 + 2 diff --git a/python/paddle/fluid/tests/unittests/test_pyramid_hash_op.py b/python/paddle/fluid/tests/unittests/test_pyramid_hash_op.py index 6f3f94253c83d607e2e0cf419e4d773949739393..d84730fa3bd12d4ffbcdfea1f02109a47c498c34 100644 --- a/python/paddle/fluid/tests/unittests/test_pyramid_hash_op.py +++ b/python/paddle/fluid/tests/unittests/test_pyramid_hash_op.py @@ -18,7 +18,6 @@ import paddle.fluid as fluid class TestPyramidHashOpApi(unittest.TestCase): - def test_api(self): num_voc = 128 embed_dim = 64 @@ -52,14 +51,14 @@ class TestPyramidHashOpApi(unittest.TestCase): place = fluid.CPUPlace() x_tensor = fluid.create_lod_tensor( - np.random.randint(0, num_voc, x_shape).astype('int32'), x_lod, - place) + np.random.randint(0, num_voc, x_shape).astype('int32'), x_lod, place + ) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - ret = exe.run(feed={'x': x_tensor}, - fetch_list=[hash_embd], - return_numpy=False) + ret = exe.run( + feed={'x': x_tensor}, fetch_list=[hash_embd], return_numpy=False + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_python_bf16_numpy_datatype.py b/python/paddle/fluid/tests/unittests/test_python_bf16_numpy_datatype.py index 3144e5da55637645bb68c468dd470c0a8cf4fd7c..4a650dc07b973d266959b3cad5d2b2a200ba0fd0 100644 --- a/python/paddle/fluid/tests/unittests/test_python_bf16_numpy_datatype.py +++ b/python/paddle/fluid/tests/unittests/test_python_bf16_numpy_datatype.py @@ -18,7 +18,6 @@ import unittest class TestBF16DataType(unittest.TestCase): - def test_matmul(self): a_bf16 = np.random.random((6, 7)).astype(bfloat16) b_bf16 = np.random.random((7, 8)).astype(bfloat16) diff --git a/python/paddle/fluid/tests/unittests/test_qr_op.py b/python/paddle/fluid/tests/unittests/test_qr_op.py index 4d8bfb220fcac690709384dbd9860f519dbf088a..39360d2e7215880099b5a6dbc576b5207e613a04 100644 --- a/python/paddle/fluid/tests/unittests/test_qr_op.py +++ b/python/paddle/fluid/tests/unittests/test_qr_op.py @@ -22,7 +22,6 @@ from op_test import OpTest class TestQrOp(OpTest): - def setUp(self): paddle.enable_static() self.python_api = paddle.linalg.qr @@ -73,38 +72,36 @@ class TestQrOp(OpTest): self.check_output(check_eager=True) def test_check_grad_normal(self): - self.check_grad(['X'], ['Q', 'R'], - check_eager=True, - numeric_grad_delta=1e-5, - max_relative_error=1e-6) + self.check_grad( + ['X'], + ['Q', 'R'], + check_eager=True, + numeric_grad_delta=1e-5, + max_relative_error=1e-6, + ) class TestQrOpCase1(TestQrOp): - def get_shape(self): return (10, 12) class TestQrOpCase2(TestQrOp): - def get_shape(self): return (16, 15) class TestQrOpCase3(TestQrOp): - def get_shape(self): return (2, 12, 16) class TestQrOpCase4(TestQrOp): - def get_shape(self): return (3, 16, 15) class TestQrOpCase5(TestQrOp): - def get_mode(self): return "complete" @@ -113,7 +110,6 @@ class TestQrOpCase5(TestQrOp): class TestQrOpCase6(TestQrOp): - def get_mode(self): return "complete" @@ -122,7 +118,6 @@ class TestQrOpCase6(TestQrOp): class TestQrAPI(unittest.TestCase): - def test_dygraph(self): paddle.disable_static() np.random.seed(7) @@ -180,12 +175,13 @@ class TestQrAPI(unittest.TestCase): (4, 5, 3), # 3-dim Tensors (2, 5, 3, 5), (3, 5, 5, 5), - (4, 5, 5, 3) # 4-dim Tensors + (4, 5, 5, 3), # 4-dim Tensors ] modes = ["reduced", "complete", "r"] dtypes = ["float32", "float64"] for tensor_shape, mode, dtype in itertools.product( - tensor_shapes, modes, dtypes): + tensor_shapes, modes, dtypes + ): run_qr_dygraph(tensor_shape, mode, dtype) def test_static(self): @@ -227,33 +223,34 @@ class TestQrAPI(unittest.TestCase): tmp_q, tmp_r = np.linalg.qr(a[coord], mode=mode) np_q[coord] = tmp_q np_r[coord] = tmp_r - x = paddle.fluid.data(name="input", - shape=shape, - dtype=dtype) + x = paddle.fluid.data( + name="input", shape=shape, dtype=dtype + ) if mode == "r": r = paddle.linalg.qr(x, mode=mode) exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={"input": a}, - fetch_list=[r]) - np.testing.assert_allclose(fetches[0], - np_r, - rtol=1e-05, - atol=1e-05) + fetches = exe.run( + fluid.default_main_program(), + feed={"input": a}, + fetch_list=[r], + ) + np.testing.assert_allclose( + fetches[0], np_r, rtol=1e-05, atol=1e-05 + ) else: q, r = paddle.linalg.qr(x, mode=mode) exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={"input": a}, - fetch_list=[q, r]) - np.testing.assert_allclose(fetches[0], - np_q, - rtol=1e-05, - atol=1e-05) - np.testing.assert_allclose(fetches[1], - np_r, - rtol=1e-05, - atol=1e-05) + fetches = exe.run( + fluid.default_main_program(), + feed={"input": a}, + fetch_list=[q, r], + ) + np.testing.assert_allclose( + fetches[0], np_q, rtol=1e-05, atol=1e-05 + ) + np.testing.assert_allclose( + fetches[1], np_r, rtol=1e-05, atol=1e-05 + ) tensor_shapes = [ (3, 5), @@ -264,12 +261,13 @@ class TestQrAPI(unittest.TestCase): (4, 5, 3), # 3-dim Tensors (2, 5, 3, 5), (3, 5, 5, 5), - (4, 5, 5, 3) # 4-dim Tensors + (4, 5, 5, 3), # 4-dim Tensors ] modes = ["reduced", "complete", "r"] dtypes = ["float32", "float64"] for tensor_shape, mode, dtype in itertools.product( - tensor_shapes, modes, dtypes): + tensor_shapes, modes, dtypes + ): run_qr_static(tensor_shape, mode, dtype) diff --git a/python/paddle/fluid/tests/unittests/test_quantile_and_nanquantile.py b/python/paddle/fluid/tests/unittests/test_quantile_and_nanquantile.py index 259e5556deefe6f69438c765441d7e4322cc9136..3d7406c457e475be226b4486f866d2c879c2724c 100644 --- a/python/paddle/fluid/tests/unittests/test_quantile_and_nanquantile.py +++ b/python/paddle/fluid/tests/unittests/test_quantile_and_nanquantile.py @@ -16,15 +16,17 @@ import unittest import numpy as np import paddle -API_list = [(paddle.quantile, np.quantile), - (paddle.nanquantile, np.nanquantile)] +API_list = [ + (paddle.quantile, np.quantile), + (paddle.nanquantile, np.nanquantile), +] class TestQuantileAndNanquantile(unittest.TestCase): """ - This class is used for numerical precision testing. If there is - a corresponding numpy API, the precision comparison can be performed directly. - Otherwise, it needs to be verified by numpy implementated function. + This class is used for numerical precision testing. If there is + a corresponding numpy API, the precision comparison can be performed directly. + Otherwise, it needs to be verified by numpy implementated function. """ def setUp(self): @@ -99,10 +101,9 @@ class TestQuantileAndNanquantile(unittest.TestCase): x = paddle.to_tensor(input_data) paddle_res = paddle.quantile(x, q=0.35, axis=0) np_res = np.quantile(input_data, q=0.35, axis=0) - np.testing.assert_allclose(paddle_res.numpy(), - np_res, - rtol=1e-05, - equal_nan=True) + np.testing.assert_allclose( + paddle_res.numpy(), np_res, rtol=1e-05, equal_nan=True + ) # Test correctness when input filled with NaN. def test_nanquantile_all_NaN(self): @@ -111,15 +112,14 @@ class TestQuantileAndNanquantile(unittest.TestCase): x = paddle.to_tensor(input_data) paddle_res = paddle.nanquantile(x, q=0.35, axis=0) np_res = np.nanquantile(input_data, q=0.35, axis=0) - np.testing.assert_allclose(paddle_res.numpy(), - np_res, - rtol=1e-05, - equal_nan=True) + np.testing.assert_allclose( + paddle_res.numpy(), np_res, rtol=1e-05, equal_nan=True + ) class TestMuitlpleQ(unittest.TestCase): """ - This class is used to test multiple input of q. + This class is used to test multiple input of q. """ def setUp(self): @@ -139,21 +139,19 @@ class TestMuitlpleQ(unittest.TestCase): def test_quantile_multiple_axis_keepdim(self): x = paddle.to_tensor(self.input_data) - paddle_res = paddle.quantile(x, - q=[0.1, 0.2, 0.3], - axis=[1, 2], - keepdim=True) - np_res = np.quantile(self.input_data, - q=[0.1, 0.2, 0.3], - axis=[1, 2], - keepdims=True) + paddle_res = paddle.quantile( + x, q=[0.1, 0.2, 0.3], axis=[1, 2], keepdim=True + ) + np_res = np.quantile( + self.input_data, q=[0.1, 0.2, 0.3], axis=[1, 2], keepdims=True + ) np.testing.assert_allclose(paddle_res.numpy(), np_res, rtol=1e-05) class TestError(unittest.TestCase): """ - This class is used to test that exceptions are thrown correctly. - Validity of all parameter values and types should be considered. + This class is used to test that exceptions are thrown correctly. + Validity of all parameter values and types should be considered. """ def setUp(self): @@ -218,8 +216,8 @@ class TestError(unittest.TestCase): class TestQuantileRuntime(unittest.TestCase): """ - This class is used to test the API could run correctly with - different devices, different data types, and dygraph/static mode. + This class is used to test the API could run correctly with + different devices, different data types, and dygraph/static mode. """ def setUp(self): @@ -241,20 +239,22 @@ class TestQuantileRuntime(unittest.TestCase): x = paddle.to_tensor(np_input_data, dtype=dtype) paddle_res = func(x, q=0.5, axis=1) np_res = res_func(np_input_data, q=0.5, axis=1) - np.testing.assert_allclose(paddle_res.numpy(), - np_res, - rtol=1e-05) + np.testing.assert_allclose( + paddle_res.numpy(), np_res, rtol=1e-05 + ) def test_static(self): paddle.enable_static() for (func, res_func) in API_list: for device in self.devices: - x = paddle.static.data(name="x", - shape=self.input_data.shape, - dtype=paddle.float32) - x_fp64 = paddle.static.data(name="x_fp64", - shape=self.input_data.shape, - dtype=paddle.float64) + x = paddle.static.data( + name="x", shape=self.input_data.shape, dtype=paddle.float32 + ) + x_fp64 = paddle.static.data( + name="x_fp64", + shape=self.input_data.shape, + dtype=paddle.float64, + ) results = func(x, q=0.5, axis=1) np_input_data = self.input_data.astype('float32') @@ -264,16 +264,15 @@ class TestQuantileRuntime(unittest.TestCase): exe = paddle.static.Executor(device) paddle_res, paddle_res_fp64 = exe.run( paddle.static.default_main_program(), - feed={ - "x": np_input_data, - "x_fp64": np_input_data_fp64 - }, - fetch_list=[results, results_fp64]) + feed={"x": np_input_data, "x_fp64": np_input_data_fp64}, + fetch_list=[results, results_fp64], + ) np_res = res_func(np_input_data, q=0.5, axis=1) np_res_fp64 = res_func(np_input_data_fp64, q=0.5, axis=1) self.assertTrue( np.allclose(paddle_res, np_res) - and np.allclose(paddle_res_fp64, np_res_fp64)) + and np.allclose(paddle_res_fp64, np_res_fp64) + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_query_op.py b/python/paddle/fluid/tests/unittests/test_query_op.py index 162c1f440e619a76a109428e2fb8ec80ab0800a9..a97530febfa6195eeb978a1b29f58413ee3a3a39 100644 --- a/python/paddle/fluid/tests/unittests/test_query_op.py +++ b/python/paddle/fluid/tests/unittests/test_query_op.py @@ -18,7 +18,6 @@ from paddle.fluid import core class TestCudnnVersion(unittest.TestCase): - def test_no_cudnn(self): cudnn_version = paddle.get_cudnn_version() if not core.is_compiled_with_cuda(): diff --git a/python/paddle/fluid/tests/unittests/test_queue.py b/python/paddle/fluid/tests/unittests/test_queue.py index 703c11e710cabaa9826678e4f7d4403dde99c9fe..bfc2f1e0e6f2212ed9393181afb4713e79482d3b 100644 --- a/python/paddle/fluid/tests/unittests/test_queue.py +++ b/python/paddle/fluid/tests/unittests/test_queue.py @@ -21,7 +21,6 @@ import paddle.fluid.core as core class TestQueue(unittest.TestCase): - def test_eq(self): """ test queue_generator op, enqueue op and dequeue op. @@ -31,39 +30,51 @@ class TestQueue(unittest.TestCase): startup_program = fluid.Program() value = np.random.rand(1) with fluid.program_guard(main_program, startup_program): - data_in = layers.create_global_var(shape=[2, 3], - value=value, - dtype="float32", - persistable=True, - name='var_in') - data_out = layers.create_global_var(shape=[2, 3], - value=value - 1.0, - dtype="float32", - persistable=True, - name='var_out') + data_in = layers.create_global_var( + shape=[2, 3], + value=value, + dtype="float32", + persistable=True, + name='var_in', + ) + data_out = layers.create_global_var( + shape=[2, 3], + value=value - 1.0, + dtype="float32", + persistable=True, + name='var_out', + ) startup_block = startup_program.block(0) queue_name = 'blocking_queue' - startup_block.create_var(name=queue_name, - persistable=True, - type=core.VarDesc.VarType.RAW) - startup_block.append_op(type="queue_generator", - attrs={'names': [queue_name]}) + startup_block.create_var( + name=queue_name, persistable=True, type=core.VarDesc.VarType.RAW + ) + startup_block.append_op( + type="queue_generator", attrs={'names': [queue_name]} + ) block = main_program.block(0) - block.append_op(type='enqueue', - inputs={'X': data_in}, - attrs={'queue_name': queue_name}) - block.append_op(type='dequeue', - outputs={'Out': [data_out]}, - attrs={'queue_name': queue_name}) + block.append_op( + type='enqueue', + inputs={'X': data_in}, + attrs={'queue_name': queue_name}, + ) + block.append_op( + type='dequeue', + outputs={'Out': [data_out]}, + attrs={'queue_name': queue_name}, + ) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) exe.run(startup_program) - ret, = exe.run(main_program, fetch_list=[data_out.name]) - np.testing.assert_allclose(np.asarray(ret), - np.full((2, 3), value, np.float32), - rtol=1e-05) + (ret,) = exe.run(main_program, fetch_list=[data_out.name]) + np.testing.assert_allclose( + np.asarray(ret), np.full((2, 3), value, np.float32), rtol=1e-05 + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_rad2deg.py b/python/paddle/fluid/tests/unittests/test_rad2deg.py index 00cf6e9fad575cdca4b67304351cc49ba7be8335..220cf96f04f54527255063ca9d5f9d122096b755 100644 --- a/python/paddle/fluid/tests/unittests/test_rad2deg.py +++ b/python/paddle/fluid/tests/unittests/test_rad2deg.py @@ -22,11 +22,11 @@ paddle.enable_static() class TestRad2degAPI(unittest.TestCase): - def setUp(self): self.x_dtype = 'float64' - self.x_np = np.array([3.142, -3.142, 6.283, -6.283, 1.570, - -1.570]).astype(np.float64) + self.x_np = np.array( + [3.142, -3.142, 6.283, -6.283, 1.570, -1.570] + ).astype(np.float64) self.x_shape = [6] self.out_np = np.rad2deg(self.x_np) @@ -37,12 +37,17 @@ class TestRad2degAPI(unittest.TestCase): x = fluid.data(name='input', dtype=self.x_dtype, shape=self.x_shape) out = paddle.rad2deg(x) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) - res = exe.run(fluid.default_main_program(), - feed={'input': self.x_np}, - fetch_list=[out]) + res = exe.run( + fluid.default_main_program(), + feed={'input': self.x_np}, + fetch_list=[out], + ) self.assertTrue((np.array(out[0]) == self.out_np).all()) def test_dygraph(self): @@ -55,7 +60,6 @@ class TestRad2degAPI(unittest.TestCase): class TestRad2degAPI2(TestRad2degAPI): - def setUp(self): self.x_np = np.pi / 2 self.x_shape = [1] diff --git a/python/paddle/fluid/tests/unittests/test_rand_op.py b/python/paddle/fluid/tests/unittests/test_rand_op.py index b889d79b078f207fe0b96b0583ccb0230063d7a2..350380541442446e2c944410ebf71e9822a033eb 100644 --- a/python/paddle/fluid/tests/unittests/test_rand_op.py +++ b/python/paddle/fluid/tests/unittests/test_rand_op.py @@ -33,8 +33,9 @@ class TestRandOpError(unittest.TestCase): with program_guard(main_prog, start_prog): def test_Variable(): - x1 = fluid.create_lod_tensor(np.zeros((4, 784)), [[1, 1, 1, 1]], - fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.zeros((4, 784)), [[1, 1, 1, 1]], fluid.CPUPlace() + ) rand(x1) self.assertRaises(TypeError, test_Variable) @@ -69,9 +70,9 @@ class TestRandOp(unittest.TestCase): var_shape = fluid.data(name='var_shape', shape=[2], dtype="int64") result_3 = rand(var_shape) - var_shape_int32 = fluid.data(name='var_shape_int32', - shape=[2], - dtype="int32") + var_shape_int32 = fluid.data( + name='var_shape_int32', shape=[2], dtype="int32" + ) result_4 = rand(var_shape_int32) exe.run(startup_program) @@ -80,11 +81,9 @@ class TestRandOp(unittest.TestCase): x2 = np.array([4, 3]).astype('int32') ret = exe.run( train_program, - feed={ - "var_shape": x1, - "var_shape_int32": x2 - }, - fetch_list=[result_1, result_1, result_2, result_3, result_4]) + feed={"var_shape": x1, "var_shape_int32": x2}, + fetch_list=[result_1, result_1, result_2, result_3, result_4], + ) def test_run(self): self.run_net(False) @@ -118,7 +117,6 @@ class TestRandOpForDygraph(unittest.TestCase): class TestRandDtype(unittest.TestCase): - def test_default_dtype(self): paddle.disable_static() diff --git a/python/paddle/fluid/tests/unittests/test_randint_like.py b/python/paddle/fluid/tests/unittests/test_randint_like.py index 972243f24b59c7b71dc80bd1baf147a586e383e7..32a717d3815bff52cc818618e0b7a41d1e942550 100644 --- a/python/paddle/fluid/tests/unittests/test_randint_like.py +++ b/python/paddle/fluid/tests/unittests/test_randint_like.py @@ -20,7 +20,6 @@ from paddle.static import program_guard, Program # Test python API class TestRandintLikeAPI(unittest.TestCase): - def setUp(self): self.x_bool = np.zeros((10, 12)).astype("bool") self.x_int32 = np.zeros((10, 12)).astype("int32") @@ -30,16 +29,19 @@ class TestRandintLikeAPI(unittest.TestCase): self.x_float64 = np.zeros((10, 12)).astype("float64") self.dtype = ["bool", "int32", "int64", "float16", "float32", "float64"] - self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_static_api(self): paddle.enable_static() with program_guard(Program(), Program()): # results are from [-100, 100). - x_bool = paddle.fluid.data(name="x_bool", - shape=[10, 12], - dtype="bool") + x_bool = paddle.fluid.data( + name="x_bool", shape=[10, 12], dtype="bool" + ) exe = paddle.static.Executor(self.place) # x dtype is bool output dtype in ["bool", "int32", "int64", "float16", "float32", "float64"] outlist1 = [ @@ -51,9 +53,9 @@ class TestRandintLikeAPI(unittest.TestCase): self.assertTrue(out.dtype, np.dtype(dtype)) self.assertTrue(((out >= -10) & (out <= 10)).all(), True) with program_guard(Program(), Program()): - x_int32 = paddle.fluid.data(name="x_int32", - shape=[10, 12], - dtype="int32") + x_int32 = paddle.fluid.data( + name="x_int32", shape=[10, 12], dtype="int32" + ) exe = paddle.static.Executor(self.place) # x dtype is int32 output dtype in ["bool", "int32", "int64", "float16", "float32", "float64"] outlist2 = [ @@ -66,9 +68,9 @@ class TestRandintLikeAPI(unittest.TestCase): self.assertTrue(((out >= -5) & (out <= 10)).all(), True) with program_guard(Program(), Program()): - x_int64 = paddle.fluid.data(name="x_int64", - shape=[10, 12], - dtype="int64") + x_int64 = paddle.fluid.data( + name="x_int64", shape=[10, 12], dtype="int64" + ) exe = paddle.static.Executor(self.place) # x dtype is int64 output dtype in ["bool", "int32", "int64", "float16", "float32", "float64"] outlist3 = [ @@ -81,49 +83,52 @@ class TestRandintLikeAPI(unittest.TestCase): self.assertTrue(((out >= -100) & (out <= 100)).all(), True) if paddle.is_compiled_with_cuda(): with program_guard(Program(), Program()): - x_float16 = paddle.fluid.data(name="x_float16", - shape=[10, 12], - dtype="float16") + x_float16 = paddle.fluid.data( + name="x_float16", shape=[10, 12], dtype="float16" + ) exe = paddle.static.Executor(self.place) # x dtype is float16 output dtype in ["bool", "int32", "int64", "float16", "float32", "float64"] outlist4 = [ paddle.randint_like(x_float16, low=-3, high=25, dtype=dtype) for dtype in self.dtype ] - outs4 = exe.run(feed={'x_float16': self.x_float16}, - fetch_list=outlist4) + outs4 = exe.run( + feed={'x_float16': self.x_float16}, fetch_list=outlist4 + ) for out, dtype in zip(outs4, self.dtype): self.assertTrue(out.dtype, np.dtype(dtype)) self.assertTrue(((out >= -3) & (out <= 25)).all(), True) with program_guard(Program(), Program()): - x_float32 = paddle.fluid.data(name="x_float32", - shape=[10, 12], - dtype="float32") + x_float32 = paddle.fluid.data( + name="x_float32", shape=[10, 12], dtype="float32" + ) exe = paddle.static.Executor(self.place) # x dtype is float32 output dtype in ["bool", "int32", "int64", "float16", "float32", "float64"] outlist5 = [ paddle.randint_like(x_float32, low=-25, high=25, dtype=dtype) for dtype in self.dtype ] - outs5 = exe.run(feed={'x_float32': self.x_float32}, - fetch_list=outlist5) + outs5 = exe.run( + feed={'x_float32': self.x_float32}, fetch_list=outlist5 + ) for out, dtype in zip(outs5, self.dtype): self.assertTrue(out.dtype, np.dtype(dtype)) self.assertTrue(((out >= -25) & (out <= 25)).all(), True) with program_guard(Program(), Program()): - x_float64 = paddle.fluid.data(name="x_float64", - shape=[10, 12], - dtype="float64") + x_float64 = paddle.fluid.data( + name="x_float64", shape=[10, 12], dtype="float64" + ) exe = paddle.static.Executor(self.place) # x dtype is float64 output dtype in ["bool", "int32", "int64", "float16", "float32", "float64"] outlist6 = [ paddle.randint_like(x_float64, low=-16, high=16, dtype=dtype) for dtype in self.dtype ] - outs6 = exe.run(feed={'x_float64': self.x_float64}, - fetch_list=outlist6) + outs6 = exe.run( + feed={'x_float64': self.x_float64}, fetch_list=outlist6 + ) for out, dtype in zip(outs6, self.dtype): self.assertTrue(out.dtype, dtype) self.assertTrue(((out >= -16) & (out <= 16)).all(), True) @@ -132,62 +137,63 @@ class TestRandintLikeAPI(unittest.TestCase): paddle.disable_static(self.place) # x dtype ["bool", "int32", "int64", "float32", "float64"] for x in [ - self.x_bool, self.x_int32, self.x_int64, self.x_float32, - self.x_float64 + self.x_bool, + self.x_int32, + self.x_int64, + self.x_float32, + self.x_float64, ]: x_inputs = paddle.to_tensor(x) # self.dtype ["bool", "int32", "int64", "float16", "float32", "float64"] for dtype in self.dtype: - out = paddle.randint_like(x_inputs, - low=-100, - high=100, - dtype=dtype) + out = paddle.randint_like( + x_inputs, low=-100, high=100, dtype=dtype + ) self.assertTrue(out.numpy().dtype, np.dtype(dtype)) self.assertTrue( - ((out.numpy() >= -100) & (out.numpy() <= 100)).all(), True) + ((out.numpy() >= -100) & (out.numpy() <= 100)).all(), True + ) # x dtype ["float16"] if paddle.is_compiled_with_cuda(): x_inputs = paddle.to_tensor(self.x_float16) # self.dtype ["bool", "int32", "int64", "float16", "float32", "float64"] for dtype in self.dtype: - out = paddle.randint_like(x_inputs, - low=-100, - high=100, - dtype=dtype) + out = paddle.randint_like( + x_inputs, low=-100, high=100, dtype=dtype + ) self.assertTrue(out.numpy().dtype, np.dtype(dtype)) self.assertTrue( - ((out.numpy() >= -100) & (out.numpy() <= 100)).all(), True) + ((out.numpy() >= -100) & (out.numpy() <= 100)).all(), True + ) paddle.enable_static() def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): - x_bool = paddle.fluid.data(name="x_bool", - shape=[10, 12], - dtype="bool") - x_int32 = paddle.fluid.data(name="x_int32", - shape=[10, 12], - dtype="int32") - x_int64 = paddle.fluid.data(name="x_int64", - shape=[10, 12], - dtype="int64") - x_float16 = paddle.fluid.data(name="x_float16", - shape=[10, 12], - dtype="float16") - x_float32 = paddle.fluid.data(name="x_float32", - shape=[10, 12], - dtype="float32") - x_float64 = paddle.fluid.data(name="x_float64", - shape=[10, 12], - dtype="float64") + x_bool = paddle.fluid.data( + name="x_bool", shape=[10, 12], dtype="bool" + ) + x_int32 = paddle.fluid.data( + name="x_int32", shape=[10, 12], dtype="int32" + ) + x_int64 = paddle.fluid.data( + name="x_int64", shape=[10, 12], dtype="int64" + ) + x_float16 = paddle.fluid.data( + name="x_float16", shape=[10, 12], dtype="float16" + ) + x_float32 = paddle.fluid.data( + name="x_float32", shape=[10, 12], dtype="float32" + ) + x_float64 = paddle.fluid.data( + name="x_float64", shape=[10, 12], dtype="float64" + ) # x dtype is bool # low is 5 and high is 5, low must less then high - self.assertRaises(ValueError, - paddle.randint_like, - x_bool, - low=5, - high=5) + self.assertRaises( + ValueError, paddle.randint_like, x_bool, low=5, high=5 + ) # low(default value) is 0 and high is -5, low must less then high self.assertRaises(ValueError, paddle.randint_like, x_bool, high=-5) # if high is None, low must be greater than 0 @@ -195,11 +201,9 @@ class TestRandintLikeAPI(unittest.TestCase): # x dtype is int32 # low is 5 and high is 5, low must less then high - self.assertRaises(ValueError, - paddle.randint_like, - x_int32, - low=5, - high=5) + self.assertRaises( + ValueError, paddle.randint_like, x_int32, low=5, high=5 + ) # low(default value) is 0 and high is -5, low must less then high self.assertRaises(ValueError, paddle.randint_like, x_int32, high=-5) # if high is None, low must be greater than 0 @@ -207,11 +211,9 @@ class TestRandintLikeAPI(unittest.TestCase): # x dtype is int64 # low is 5 and high is 5, low must less then high - self.assertRaises(ValueError, - paddle.randint_like, - x_int64, - low=5, - high=5) + self.assertRaises( + ValueError, paddle.randint_like, x_int64, low=5, high=5 + ) # low(default value) is 0 and high is -5, low must less then high self.assertRaises(ValueError, paddle.randint_like, x_int64, high=-5) # if high is None, low must be greater than 0 @@ -220,57 +222,45 @@ class TestRandintLikeAPI(unittest.TestCase): # x dtype is float16 # low is 5 and high is 5, low must less then high if paddle.is_compiled_with_cuda(): - self.assertRaises(ValueError, - paddle.randint_like, - x_float16, - low=5, - high=5) + self.assertRaises( + ValueError, paddle.randint_like, x_float16, low=5, high=5 + ) # low(default value) is 0 and high is -5, low must less then high - self.assertRaises(ValueError, - paddle.randint_like, - x_float16, - high=-5) + self.assertRaises( + ValueError, paddle.randint_like, x_float16, high=-5 + ) # if high is None, low must be greater than 0 - self.assertRaises(ValueError, - paddle.randint_like, - x_float16, - low=-5) + self.assertRaises( + ValueError, paddle.randint_like, x_float16, low=-5 + ) # x dtype is float32 # low is 5 and high is 5, low must less then high - self.assertRaises(ValueError, - paddle.randint_like, - x_float32, - low=5, - high=5) + self.assertRaises( + ValueError, paddle.randint_like, x_float32, low=5, high=5 + ) # low(default value) is 0 and high is -5, low must less then high - self.assertRaises(ValueError, - paddle.randint_like, - x_float32, - high=-5) + self.assertRaises( + ValueError, paddle.randint_like, x_float32, high=-5 + ) # if high is None, low must be greater than 0 - self.assertRaises(ValueError, - paddle.randint_like, - x_float32, - low=-5) + self.assertRaises( + ValueError, paddle.randint_like, x_float32, low=-5 + ) # x dtype is float64 # low is 5 and high is 5, low must less then high - self.assertRaises(ValueError, - paddle.randint_like, - x_float64, - low=5, - high=5) + self.assertRaises( + ValueError, paddle.randint_like, x_float64, low=5, high=5 + ) # low(default value) is 0 and high is -5, low must less then high - self.assertRaises(ValueError, - paddle.randint_like, - x_float64, - high=-5) + self.assertRaises( + ValueError, paddle.randint_like, x_float64, high=-5 + ) # if high is None, low must be greater than 0 - self.assertRaises(ValueError, - paddle.randint_like, - x_float64, - low=-5) + self.assertRaises( + ValueError, paddle.randint_like, x_float64, low=-5 + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_randint_op.py b/python/paddle/fluid/tests/unittests/test_randint_op.py index 549554733febcca3d7b8a427b6641ad1249fd614..505cd43923a83f0690b13ffc78a3977bfb1a356a 100644 --- a/python/paddle/fluid/tests/unittests/test_randint_op.py +++ b/python/paddle/fluid/tests/unittests/test_randint_op.py @@ -32,7 +32,6 @@ def output_hist(out): class TestRandintOp(OpTest): - def setUp(self): self.op_type = "randint" self.inputs = {} @@ -56,7 +55,6 @@ class TestRandintOp(OpTest): class TestRandintOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): self.assertRaises(TypeError, paddle.randint, 5, shape=np.array([2])) @@ -66,10 +64,9 @@ class TestRandintOpError(unittest.TestCase): self.assertRaises(TypeError, paddle.randint, 5, shape=['2']) shape_tensor = paddle.static.data('X', [1]) self.assertRaises(TypeError, paddle.randint, 5, shape=shape_tensor) - self.assertRaises(TypeError, - paddle.randint, - 5, - shape=[shape_tensor]) + self.assertRaises( + TypeError, paddle.randint, 5, shape=[shape_tensor] + ) def test_errors_eager(self): with _test_eager_guard(): @@ -77,14 +74,14 @@ class TestRandintOpError(unittest.TestCase): class TestRandintOp_attr_tensorlist(OpTest): - def setUp(self): self.op_type = "randint" self.new_shape = (10000, 784) shape_tensor = [] for index, ele in enumerate(self.new_shape): - shape_tensor.append(("x" + str(index), np.ones( - (1)).astype("int64") * ele)) + shape_tensor.append( + ("x" + str(index), np.ones((1)).astype("int64") * ele) + ) self.inputs = {'ShapeTensorList': shape_tensor} self.init_attrs() self.outputs = {"Out": np.zeros((10000, 784)).astype("int32")} @@ -106,7 +103,6 @@ class TestRandintOp_attr_tensorlist(OpTest): class TestRandint_attr_tensor(OpTest): - def setUp(self): self.op_type = "randint" self.inputs = {"ShapeTensor": np.array([10000, 784]).astype("int64")} @@ -131,43 +127,42 @@ class TestRandint_attr_tensor(OpTest): # Test python API class TestRandintAPI(unittest.TestCase): - def test_api(self): with program_guard(Program(), Program()): # results are from [0, 5). out1 = paddle.randint(5) # shape is a list and dtype is 'int32' - out2 = paddle.randint(low=-100, - high=100, - shape=[64, 64], - dtype='int32') + out2 = paddle.randint( + low=-100, high=100, shape=[64, 64], dtype='int32' + ) # shape is a tuple and dtype is 'int64' - out3 = paddle.randint(low=-100, - high=100, - shape=(32, 32, 3), - dtype='int64') + out3 = paddle.randint( + low=-100, high=100, shape=(32, 32, 3), dtype='int64' + ) # shape is a tensorlist and dtype is 'float32' dim_1 = paddle.fluid.layers.fill_constant([1], "int64", 32) dim_2 = paddle.fluid.layers.fill_constant([1], "int32", 50) - out4 = paddle.randint(low=-100, - high=100, - shape=[dim_1, 5, dim_2], - dtype='int32') + out4 = paddle.randint( + low=-100, high=100, shape=[dim_1, 5, dim_2], dtype='int32' + ) # shape is a tensor and dtype is 'float64' - var_shape = paddle.static.data(name='var_shape', - shape=[2], - dtype="int64") - out5 = paddle.randint(low=1, - high=1000, - shape=var_shape, - dtype='int64') - - place = paddle.CUDAPlace( - 0) if core.is_compiled_with_cuda() else paddle.CPUPlace() + var_shape = paddle.static.data( + name='var_shape', shape=[2], dtype="int64" + ) + out5 = paddle.randint( + low=1, high=1000, shape=var_shape, dtype='int64' + ) + + place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() + else paddle.CPUPlace() + ) exe = paddle.static.Executor(place) outs = exe.run( feed={'var_shape': np.array([100, 100]).astype('int64')}, - fetch_list=[out1, out2, out3, out4, out5]) + fetch_list=[out1, out2, out3, out4, out5], + ) def test_api_eager(self): with _test_eager_guard(): @@ -175,7 +170,6 @@ class TestRandintAPI(unittest.TestCase): class TestRandintImperative(unittest.TestCase): - def test_api(self): paddle.disable_static() @@ -197,7 +191,6 @@ class TestRandintImperative(unittest.TestCase): class TestRandomValue(unittest.TestCase): - def test_fixed_random_number(self): # Test GPU Fixed random number, which is generated by 'curandStatePhilox4_32_10_t' if not paddle.is_compiled_with_cuda(): @@ -221,8 +214,9 @@ class TestRandomValue(unittest.TestCase): paddle.set_device('gpu') paddle.seed(100) - x = paddle.randint(-10000, 10000, [32, 3, 1024, 1024], - dtype='int32').numpy() + x = paddle.randint( + -10000, 10000, [32, 3, 1024, 1024], dtype='int32' + ).numpy() self.assertTrue(x.mean(), -0.7517569760481516) self.assertTrue(x.std(), 5773.696619107639) expect = [2535, 2109, 5916, -5011, -261] @@ -232,8 +226,9 @@ class TestRandomValue(unittest.TestCase): expect = [881, 1560, 1100, 9664, 1669] np.testing.assert_array_equal(x[30, 2, 1000, 1000:1005], expect) - x = paddle.randint(-10000, 10000, [32, 3, 1024, 1024], - dtype='int64').numpy() + x = paddle.randint( + -10000, 10000, [32, 3, 1024, 1024], dtype='int64' + ).numpy() self.assertTrue(x.mean(), -1.461287518342336) self.assertTrue(x.std(), 5773.023477548159) expect = [7213, -9597, 754, 8129, -1158] diff --git a/python/paddle/fluid/tests/unittests/test_randn_op.py b/python/paddle/fluid/tests/unittests/test_randn_op.py index 4590af0f10f81b9c24a6eb4824b39a6af798731e..d136e3fd57e14abf2f4236bf427537a59393635a 100644 --- a/python/paddle/fluid/tests/unittests/test_randn_op.py +++ b/python/paddle/fluid/tests/unittests/test_randn_op.py @@ -20,7 +20,6 @@ from paddle.static import program_guard, Program class TestRandnOp(unittest.TestCase): - def test_api(self): shape = [1000, 784] train_program = Program() @@ -36,24 +35,31 @@ class TestRandnOp(unittest.TestCase): var_shape = paddle.static.data('X', [2], 'int32') x4 = paddle.randn(var_shape) - place = paddle.CUDAPlace( - 0) if core.is_compiled_with_cuda() else paddle.CPUPlace() + place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() + else paddle.CPUPlace() + ) exe = paddle.static.Executor(place) - res = exe.run(train_program, - feed={'X': np.array(shape, dtype='int32')}, - fetch_list=[x1, x2, x3, x4]) + res = exe.run( + train_program, + feed={'X': np.array(shape, dtype='int32')}, + fetch_list=[x1, x2, x3, x4], + ) for out in res: - self.assertAlmostEqual(np.mean(out), .0, delta=0.1) - self.assertAlmostEqual(np.std(out), 1., delta=0.1) + self.assertAlmostEqual(np.mean(out), 0.0, delta=0.1) + self.assertAlmostEqual(np.std(out), 1.0, delta=0.1) class TestRandnOpForDygraph(unittest.TestCase): - def test_api(self): shape = [1000, 784] - place = paddle.CUDAPlace( - 0) if core.is_compiled_with_cuda() else paddle.CPUPlace() + place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() + else paddle.CPUPlace() + ) paddle.disable_static(place) x1 = paddle.randn(shape, 'float32') x2 = paddle.randn(shape, 'float64') @@ -66,13 +72,12 @@ class TestRandnOpForDygraph(unittest.TestCase): x4 = paddle.randn(var_shape) for out in [x1, x2, x3, x4]: - self.assertAlmostEqual(np.mean(out.numpy()), .0, delta=0.1) - self.assertAlmostEqual(np.std(out.numpy()), 1., delta=0.1) + self.assertAlmostEqual(np.mean(out.numpy()), 0.0, delta=0.1) + self.assertAlmostEqual(np.std(out.numpy()), 1.0, delta=0.1) paddle.enable_static() class TestRandnOpError(unittest.TestCase): - def test_error(self): with program_guard(Program(), Program()): # The argument shape's type of randn_op should be list or tuple. diff --git a/python/paddle/fluid/tests/unittests/test_random_crop_op.py b/python/paddle/fluid/tests/unittests/test_random_crop_op.py index 580d348156c2ece533f3c578b8af77327e32d0fc..ae474618733cb7ce4695007720482dcfd010d3af 100644 --- a/python/paddle/fluid/tests/unittests/test_random_crop_op.py +++ b/python/paddle/fluid/tests/unittests/test_random_crop_op.py @@ -19,15 +19,15 @@ import paddle.fluid as fluid class TestRandomCropOp(OpTest): - def setUp(self): - to_crop = np.array([[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]] * - 5).astype(np.int32) + to_crop = np.array( + [[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]] * 5 + ).astype(np.int32) self.possible_res = [ np.array([[1, 2, 3], [5, 6, 7]]).astype(np.int32), np.array([[2, 3, 4], [6, 7, 8]]).astype(np.int32), np.array([[5, 6, 7], [9, 10, 11]]).astype(np.int32), - np.array([[6, 7, 8], [10, 11, 12]]).astype(np.int32) + np.array([[6, 7, 8], [10, 11, 12]]).astype(np.int32), ] self.op_type = "random_crop" self.inputs = {'X': to_crop, 'Seed': np.array([10]).astype('int64')} @@ -45,7 +45,6 @@ class TestRandomCropOp(OpTest): class TestRandomCropOpError(unittest.TestCase): - def test_errors(self): with fluid.program_guard(fluid.Program()): @@ -56,17 +55,17 @@ class TestRandomCropOpError(unittest.TestCase): self.assertRaises(TypeError, test_x_type) def test_x_dtype(): - x2 = fluid.layers.data(name='x2', - shape=[None, 3, 256, 256], - dtype='float16') + x2 = fluid.layers.data( + name='x2', shape=[None, 3, 256, 256], dtype='float16' + ) fluid.layers.random_crop(x2) self.assertRaises(TypeError, test_x_dtype) def test_shape_type(): - x3 = fluid.layers.data(name='x3', - shape=[None, 3, 256, 256], - dtype='float32') + x3 = fluid.layers.data( + name='x3', shape=[None, 3, 256, 256], dtype='float32' + ) fluid.layers.random_crop(x3, shape=1) self.assertRaises(TypeError, test_shape_type) diff --git a/python/paddle/fluid/tests/unittests/test_random_routing_op.py b/python/paddle/fluid/tests/unittests/test_random_routing_op.py index 54313b4c10cff6a358919bf5f00fbf6f9043ebf7..03cce6c9caa8c019372548fc5379f377a204bf21 100644 --- a/python/paddle/fluid/tests/unittests/test_random_routing_op.py +++ b/python/paddle/fluid/tests/unittests/test_random_routing_op.py @@ -32,22 +32,24 @@ def random_routing(topk_idx, topk_value, prob, topk=2): raise RuntimeError("only topk=2 is supported now") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestNumberCountAPIFp32(unittest.TestCase): - def setUp(self): self.dtype = "float32" self.init() def init(self): self.upper_range = 8 - self.x = np.random.randint(-1, self.upper_range, - size=(200, 2)).astype('int64') - self.prob = np.random.random((self.x.shape[0], )).astype(self.dtype) + self.x = np.random.randint(-1, self.upper_range, size=(200, 2)).astype( + 'int64' + ) + self.prob = np.random.random((self.x.shape[0],)).astype(self.dtype) self.topk_value = np.random.random(self.x.shape).astype(self.dtype) - self.out = random_routing(self.x, self.topk_value, - self.prob).astype(self.dtype) + self.out = random_routing(self.x, self.topk_value, self.prob).astype( + self.dtype + ) self.place = paddle.CUDAPlace(0) def func_api_dygraph(self): @@ -64,10 +66,10 @@ class TestNumberCountAPIFp32(unittest.TestCase): self.func_api_dygraph() -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestNumberCountAPIFp16(TestNumberCountAPIFp32): - def setUp(self): self.dtype = "float16" self.init() diff --git a/python/paddle/fluid/tests/unittests/test_random_seed.py b/python/paddle/fluid/tests/unittests/test_random_seed.py index c49808f3ae54df48a322db6970ec97cf7d7ba5ba..97f3fa56dc5264ef502c4dd1cf37983d4c77a749 100644 --- a/python/paddle/fluid/tests/unittests/test_random_seed.py +++ b/python/paddle/fluid/tests/unittests/test_random_seed.py @@ -36,23 +36,20 @@ class TestGeneratorSeed(unittest.TestCase): x = fluid.layers.uniform_random([10], dtype="float32", min=0.0, max=1.0) st1 = gen.get_state() - x1 = fluid.layers.uniform_random([10], - dtype="float32", - min=0.0, - max=1.0) + x1 = fluid.layers.uniform_random( + [10], dtype="float32", min=0.0, max=1.0 + ) gen.set_state(st1) print(gen.get_state()) - x2 = fluid.layers.uniform_random([10], - dtype="float32", - min=0.0, - max=1.0) + x2 = fluid.layers.uniform_random( + [10], dtype="float32", min=0.0, max=1.0 + ) paddle.seed(12312321111) - x3 = fluid.layers.uniform_random([10], - dtype="float32", - min=0.0, - max=1.0) + x3 = fluid.layers.uniform_random( + [10], dtype="float32", min=0.0, max=1.0 + ) x_np = x.numpy() x1_np = x1.numpy() @@ -78,14 +75,14 @@ class TestGeneratorSeed(unittest.TestCase): exe = fluid.Executor(fluid.CPUPlace()) exe.run(startup_program) - out1 = exe.run(train_program, - feed={}, - fetch_list=[result_1, result_2]) - #gen.set_state(cur_state) + out1 = exe.run( + train_program, feed={}, fetch_list=[result_1, result_2] + ) + # gen.set_state(cur_state) gen.manual_seed(123123143) - out2 = exe.run(train_program, - feed={}, - fetch_list=[result_1, result_2]) + out2 = exe.run( + train_program, feed={}, fetch_list=[result_1, result_2] + ) out1_res1 = np.array(out1[0]) out1_res2 = np.array(out1[1]) @@ -103,17 +100,15 @@ class TestGeneratorSeed(unittest.TestCase): gen = paddle.seed(111111111) st = gen.get_state() # x = np.arange(1,101).reshape(2,50).astype("float32") - x = fluid.layers.uniform_random([2, 10], - dtype="float32", - min=0.0, - max=1.0) + x = fluid.layers.uniform_random( + [2, 10], dtype="float32", min=0.0, max=1.0 + ) y = fluid.layers.dropout(x, 0.5) gen.manual_seed(111111111) - #gen.set_state(st) - x1 = fluid.layers.uniform_random([2, 10], - dtype="float32", - min=0.0, - max=1.0) + # gen.set_state(st) + x1 = fluid.layers.uniform_random( + [2, 10], dtype="float32", min=0.0, max=1.0 + ) y1 = fluid.layers.dropout(x1, 0.5) y_np = y.numpy() y1_np = y1.numpy() @@ -137,7 +132,7 @@ class TestGeneratorSeed(unittest.TestCase): exe = fluid.Executor(fluid.CPUPlace()) exe.run(startup_program) out1 = exe.run(train_program, feed={}, fetch_list=[y_1]) - #gen.set_state(cur_state) + # gen.set_state(cur_state) gen.manual_seed(123123143) out2 = exe.run(train_program, feed={}, fetch_list=[y_1]) out1_np = np.array(out1[0]) @@ -184,14 +179,14 @@ class TestGeneratorSeed(unittest.TestCase): exe = fluid.Executor(fluid.CPUPlace()) exe.run(startup_program) - out1 = exe.run(train_program, - feed={}, - fetch_list=[result_1, result_2]) - #gen.set_state(cur_state) + out1 = exe.run( + train_program, feed={}, fetch_list=[result_1, result_2] + ) + # gen.set_state(cur_state) gen.manual_seed(123123143) - out2 = exe.run(train_program, - feed={}, - fetch_list=[result_1, result_2]) + out2 = exe.run( + train_program, feed={}, fetch_list=[result_1, result_2] + ) out1_res1 = np.array(out1[0]) out1_res2 = np.array(out1[1]) @@ -243,14 +238,14 @@ class TestGeneratorSeed(unittest.TestCase): exe = fluid.Executor(fluid.CPUPlace()) exe.run(startup_program) - out1 = exe.run(train_program, - feed={}, - fetch_list=[result_1, result_2]) - #gen.set_state(cur_state) + out1 = exe.run( + train_program, feed={}, fetch_list=[result_1, result_2] + ) + # gen.set_state(cur_state) gen.manual_seed(123123143) - out2 = exe.run(train_program, - feed={}, - fetch_list=[result_1, result_2]) + out2 = exe.run( + train_program, feed={}, fetch_list=[result_1, result_2] + ) out1_res1 = np.array(out1[0]) out1_res2 = np.array(out1[1]) @@ -297,14 +292,14 @@ class TestGeneratorSeed(unittest.TestCase): exe = fluid.Executor(fluid.CPUPlace()) exe.run(startup_program) - out1 = exe.run(train_program, - feed={}, - fetch_list=[result_1, result_2]) - #gen.set_state(cur_state) + out1 = exe.run( + train_program, feed={}, fetch_list=[result_1, result_2] + ) + # gen.set_state(cur_state) gen.manual_seed(123123143) - out2 = exe.run(train_program, - feed={}, - fetch_list=[result_1, result_2]) + out2 = exe.run( + train_program, feed={}, fetch_list=[result_1, result_2] + ) out1_res1 = np.array(out1[0]) out1_res2 = np.array(out1[1]) @@ -356,14 +351,14 @@ class TestGeneratorSeed(unittest.TestCase): exe = fluid.Executor(fluid.CPUPlace()) exe.run(startup_program) - out1 = exe.run(train_program, - feed={}, - fetch_list=[result_1, result_2]) + out1 = exe.run( + train_program, feed={}, fetch_list=[result_1, result_2] + ) paddle.seed(123123143) - out2 = exe.run(train_program, - feed={}, - fetch_list=[result_1, result_2]) + out2 = exe.run( + train_program, feed={}, fetch_list=[result_1, result_2] + ) out1_res1 = np.array(out1[0]) out1_res2 = np.array(out1[1]) @@ -383,31 +378,27 @@ class TestGeneratorSeed(unittest.TestCase): fluid.enable_dygraph() gen.manual_seed(12312321111) - x = fluid.layers.uniform_random([10, 10], - dtype="float32", - min=0.0, - max=1.0) + x = fluid.layers.uniform_random( + [10, 10], dtype="float32", min=0.0, max=1.0 + ) y = fluid.layers.sampling_id(x) st1 = gen.get_state() - x1 = fluid.layers.uniform_random([10, 10], - dtype="float32", - min=0.0, - max=1.0) + x1 = fluid.layers.uniform_random( + [10, 10], dtype="float32", min=0.0, max=1.0 + ) y1 = fluid.layers.sampling_id(x) gen.set_state(st1) - x2 = fluid.layers.uniform_random([10, 10], - dtype="float32", - min=0.0, - max=1.0) + x2 = fluid.layers.uniform_random( + [10, 10], dtype="float32", min=0.0, max=1.0 + ) y2 = fluid.layers.sampling_id(x) gen.manual_seed(12312321111) - x3 = fluid.layers.uniform_random([10, 10], - dtype="float32", - min=0.0, - max=1.0) + x3 = fluid.layers.uniform_random( + [10, 10], dtype="float32", min=0.0, max=1.0 + ) y3 = fluid.layers.sampling_id(x) x_np = y.numpy() @@ -437,14 +428,14 @@ class TestGeneratorSeed(unittest.TestCase): exe = fluid.Executor(fluid.CPUPlace()) exe.run(startup_program) - out1 = exe.run(train_program, - feed={}, - fetch_list=[result_1, result_2]) + out1 = exe.run( + train_program, feed={}, fetch_list=[result_1, result_2] + ) paddle.seed(123123143) - out2 = exe.run(train_program, - feed={}, - fetch_list=[result_1, result_2]) + out2 = exe.run( + train_program, feed={}, fetch_list=[result_1, result_2] + ) out1_res1 = np.array(out1[0]) out1_res2 = np.array(out1[1]) @@ -472,26 +463,30 @@ class TestGeneratorSeed(unittest.TestCase): result_1 = fluid.layers.fc( input=x, size=10, - param_attr=fluid.initializer.TruncatedNormal(loc=0.0, - scale=2.0)) + param_attr=fluid.initializer.TruncatedNormal( + loc=0.0, scale=2.0 + ), + ) result_2 = fluid.layers.fc( input=x, size=10, - param_attr=fluid.initializer.TruncatedNormal(loc=0.0, - scale=2.0)) + param_attr=fluid.initializer.TruncatedNormal( + loc=0.0, scale=2.0 + ), + ) exe = fluid.Executor(fluid.CPUPlace()) exe.run(startup_program) - out1 = exe.run(train_program, - feed={}, - fetch_list=[result_1, result_2]) + out1 = exe.run( + train_program, feed={}, fetch_list=[result_1, result_2] + ) gen.manual_seed(123123143) with fluid.program_guard(train_program, startup_program): exe.run(startup_program) - out2 = exe.run(train_program, - feed={}, - fetch_list=[result_1, result_2]) + out2 = exe.run( + train_program, feed={}, fetch_list=[result_1, result_2] + ) out1_res1 = np.array(out1[0]) out1_res2 = np.array(out1[1]) diff --git a/python/paddle/fluid/tests/unittests/test_randperm_op.py b/python/paddle/fluid/tests/unittests/test_randperm_op.py index bb1e493a65dd9bd04de490a0020e88a48721da6c..12543a727a2117bfc396527634221a0b6abc281c 100644 --- a/python/paddle/fluid/tests/unittests/test_randperm_op.py +++ b/python/paddle/fluid/tests/unittests/test_randperm_op.py @@ -22,31 +22,38 @@ from paddle.fluid.framework import _test_eager_guard def check_randperm_out(n, data_np): - assert isinstance(data_np, np.ndarray), \ - "The input data_np should be np.ndarray." + assert isinstance( + data_np, np.ndarray + ), "The input data_np should be np.ndarray." gt_sorted = np.arange(n) out_sorted = np.sort(data_np) return list(gt_sorted == out_sorted) def error_msg(data_np): - return "The sorted ground truth and sorted out should " + \ - "be equal, out = " + str(data_np) + return ( + "The sorted ground truth and sorted out should " + + "be equal, out = " + + str(data_np) + ) def convert_dtype(dtype_str): dtype_str_list = ["int32", "int64", "float32", "float64"] dtype_num_list = [ - core.VarDesc.VarType.INT32, core.VarDesc.VarType.INT64, - core.VarDesc.VarType.FP32, core.VarDesc.VarType.FP64 + core.VarDesc.VarType.INT32, + core.VarDesc.VarType.INT64, + core.VarDesc.VarType.FP32, + core.VarDesc.VarType.FP64, ] - assert dtype_str in dtype_str_list, dtype_str + \ - " should in " + str(dtype_str_list) + assert dtype_str in dtype_str_list, ( + dtype_str + " should in " + str(dtype_str_list) + ) return dtype_num_list[dtype_str_list.index(dtype_str)] class TestRandpermOp(OpTest): - """ Test randperm op.""" + """Test randperm op.""" def setUp(self): self.op_type = "randperm" @@ -70,8 +77,9 @@ class TestRandpermOp(OpTest): def verify_output(self, outs): out_np = np.array(outs[0]) - self.assertTrue(check_randperm_out(self.n, out_np), - msg=error_msg(out_np)) + self.assertTrue( + check_randperm_out(self.n, out_np), msg=error_msg(out_np) + ) def test_eager(self): with _test_eager_guard(): @@ -79,31 +87,26 @@ class TestRandpermOp(OpTest): class TestRandpermOpN(TestRandpermOp): - def init_attrs(self): self.n = 10000 class TestRandpermOpInt32(TestRandpermOp): - def init_attrs(self): self.dtype = "int32" class TestRandpermOpFloat32(TestRandpermOp): - def init_attrs(self): self.dtype = "float32" class TestRandpermOpFloat64(TestRandpermOp): - def init_attrs(self): self.dtype = "float64" class TestRandpermOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): self.assertRaises(ValueError, paddle.randperm, -3) @@ -111,11 +114,13 @@ class TestRandpermOpError(unittest.TestCase): class TestRandpermAPI(unittest.TestCase): - def test_out(self): n = 10 - place = paddle.CUDAPlace( - 0) if core.is_compiled_with_cuda() else paddle.CPUPlace() + place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() + else paddle.CPUPlace() + ) with program_guard(Program(), Program()): x1 = paddle.randperm(n) x2 = paddle.randperm(n, 'float32') @@ -130,20 +135,19 @@ class TestRandpermAPI(unittest.TestCase): class TestRandpermImperative(unittest.TestCase): - def test_out(self): paddle.disable_static() n = 10 for dtype in ['int32', np.int64, 'float32', 'float64']: data_p = paddle.randperm(n, dtype) data_np = data_p.numpy() - self.assertTrue(check_randperm_out(n, data_np), - msg=error_msg(data_np)) + self.assertTrue( + check_randperm_out(n, data_np), msg=error_msg(data_np) + ) paddle.enable_static() class TestRandpermEager(unittest.TestCase): - def test_out(self): paddle.disable_static() n = 10 @@ -151,13 +155,13 @@ class TestRandpermEager(unittest.TestCase): for dtype in ['int32', np.int64, 'float32', 'float64']: data_p = paddle.randperm(n, dtype) data_np = data_p.numpy() - self.assertTrue(check_randperm_out(n, data_np), - msg=error_msg(data_np)) + self.assertTrue( + check_randperm_out(n, data_np), msg=error_msg(data_np) + ) paddle.enable_static() class TestRandomValue(unittest.TestCase): - def test_fixed_random_number(self): # Test GPU Fixed random number, which is generated by 'curandStatePhilox4_32_10_t' if not paddle.is_compiled_with_cuda(): @@ -170,25 +174,70 @@ class TestRandomValue(unittest.TestCase): x = paddle.randperm(30000, dtype='int32').numpy() expect = [ - 24562, 8409, 9379, 10328, 20503, 18059, 9681, 21883, 11783, 27413 + 24562, + 8409, + 9379, + 10328, + 20503, + 18059, + 9681, + 21883, + 11783, + 27413, ] np.testing.assert_array_equal(x[0:10], expect) expect = [ - 29477, 27100, 9643, 16637, 8605, 16892, 27767, 2724, 1612, 13096 + 29477, + 27100, + 9643, + 16637, + 8605, + 16892, + 27767, + 2724, + 1612, + 13096, ] np.testing.assert_array_equal(x[10000:10010], expect) expect = [ - 298, 4104, 16479, 22714, 28684, 7510, 14667, 9950, 15940, 28343 + 298, + 4104, + 16479, + 22714, + 28684, + 7510, + 14667, + 9950, + 15940, + 28343, ] np.testing.assert_array_equal(x[20000:20010], expect) x = paddle.randperm(30000, dtype='int64').numpy() expect = [ - 6587, 1909, 5525, 23001, 6488, 14981, 14355, 3083, 29561, 8171 + 6587, + 1909, + 5525, + 23001, + 6488, + 14981, + 14355, + 3083, + 29561, + 8171, ] np.testing.assert_array_equal(x[0:10], expect) expect = [ - 23460, 12394, 22501, 5427, 20185, 9100, 5127, 1651, 25806, 4818 + 23460, + 12394, + 22501, + 5427, + 20185, + 9100, + 5127, + 1651, + 25806, + 4818, ] np.testing.assert_array_equal(x[10000:10010], expect) expect = [5829, 4508, 16193, 24836, 8526, 242, 9984, 9243, 1977, 11839] @@ -196,35 +245,83 @@ class TestRandomValue(unittest.TestCase): x = paddle.randperm(30000, dtype='float32').numpy() expect = [ - 5154., 10537., 14362., 29843., 27185., 28399., 27561., 4144., - 22906., 10705. + 5154.0, + 10537.0, + 14362.0, + 29843.0, + 27185.0, + 28399.0, + 27561.0, + 4144.0, + 22906.0, + 10705.0, ] np.testing.assert_array_equal(x[0:10], expect) expect = [ - 1958., 18414., 20090., 21910., 22746., 27346., 22347., 3002., 4564., - 26991. + 1958.0, + 18414.0, + 20090.0, + 21910.0, + 22746.0, + 27346.0, + 22347.0, + 3002.0, + 4564.0, + 26991.0, ] np.testing.assert_array_equal(x[10000:10010], expect) expect = [ - 25580., 12606., 553., 16387., 29536., 4241., 20946., 16899., 16339., - 4662. + 25580.0, + 12606.0, + 553.0, + 16387.0, + 29536.0, + 4241.0, + 20946.0, + 16899.0, + 16339.0, + 4662.0, ] np.testing.assert_array_equal(x[20000:20010], expect) x = paddle.randperm(30000, dtype='float64').numpy() expect = [ - 19051., 2449., 21940., 11121., 282., 7330., 13747., 24321., 21147., - 9163. + 19051.0, + 2449.0, + 21940.0, + 11121.0, + 282.0, + 7330.0, + 13747.0, + 24321.0, + 21147.0, + 9163.0, ] np.testing.assert_array_equal(x[0:10], expect) expect = [ - 15483., 1315., 5723., 20954., 13251., 25539., 5074., 1823., 14945., - 17624. + 15483.0, + 1315.0, + 5723.0, + 20954.0, + 13251.0, + 25539.0, + 5074.0, + 1823.0, + 14945.0, + 17624.0, ] np.testing.assert_array_equal(x[10000:10010], expect) expect = [ - 10516., 2552., 29970., 5941., 986., 8007., 24805., 26753., 12202., - 21404. + 10516.0, + 2552.0, + 29970.0, + 5941.0, + 986.0, + 8007.0, + 24805.0, + 26753.0, + 12202.0, + 21404.0, ] np.testing.assert_array_equal(x[20000:20010], expect) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_range.py b/python/paddle/fluid/tests/unittests/test_range.py index cdf4e8cd5e5bc19a9217061e1640b731762d43d5..cd6057e13b5c948c9a887e5ec6e8d2ce66d24820 100644 --- a/python/paddle/fluid/tests/unittests/test_range.py +++ b/python/paddle/fluid/tests/unittests/test_range.py @@ -24,20 +24,19 @@ def arange_wrapper(start, end, step, dtype=None): class TestRangeOp(OpTest): - def setUp(self): self.op_type = "range" self.init_config() self.inputs = { 'Start': np.array([self.case[0]]).astype(self.dtype), 'End': np.array([self.case[1]]).astype(self.dtype), - 'Step': np.array([self.case[2]]).astype(self.dtype) + 'Step': np.array([self.case[2]]).astype(self.dtype), } self.outputs = { - 'Out': - np.arange(self.case[0], self.case[1], - self.case[2]).astype(self.dtype) + 'Out': np.arange(self.case[0], self.case[1], self.case[2]).astype( + self.dtype + ) } def init_config(self): @@ -50,7 +49,6 @@ class TestRangeOp(OpTest): class TestFloatRangeOpCase0(TestRangeOp): - def init_config(self): self.dtype = np.float32 self.python_api = partial(arange_wrapper, dtype=self.dtype) @@ -58,7 +56,6 @@ class TestFloatRangeOpCase0(TestRangeOp): class TestInt32RangeOpCase0(TestRangeOp): - def init_config(self): self.dtype = np.int32 self.python_api = partial(arange_wrapper, dtype=self.dtype) @@ -66,7 +63,6 @@ class TestInt32RangeOpCase0(TestRangeOp): class TestInt32RangeOpCase1(TestRangeOp): - def init_config(self): self.dtype = np.int32 self.python_api = partial(arange_wrapper, dtype=self.dtype) @@ -74,7 +70,6 @@ class TestInt32RangeOpCase1(TestRangeOp): class TestInt32RangeOpCase2(TestRangeOp): - def init_config(self): self.dtype = np.int32 self.python_api = partial(arange_wrapper, dtype=self.dtype) diff --git a/python/paddle/fluid/tests/unittests/test_rank_attention_op.py b/python/paddle/fluid/tests/unittests/test_rank_attention_op.py index 4974fd26d1c6d6f5dd49b609b277ce05d6657586..679475a58843cc4d785f01c1fd0db912d6793ca6 100644 --- a/python/paddle/fluid/tests/unittests/test_rank_attention_op.py +++ b/python/paddle/fluid/tests/unittests/test_rank_attention_op.py @@ -60,7 +60,7 @@ def gen_param_help(input, rank_offset, param, max_rank): output_param_row = block_matrix_row * input_row output_param_col = param_col - output_param = np.zeros((output_param_row * output_param_col, )) + output_param = np.zeros((output_param_row * output_param_col,)) for idx in range(output_param_row * output_param_col): output_col_idx = idx % output_param_col @@ -75,7 +75,11 @@ def gen_param_help(input, rank_offset, param, max_rank): if lower < 0 or faster < 0: continue start = lower * max_rank + faster - ori_idx = start * param_col * input_col + k_offset * param_col + output_col_idx + ori_idx = ( + start * param_col * input_col + + k_offset * param_col + + output_col_idx + ) output_param[idx] = param[int(ori_idx / param_col), ori_idx % param_col] output_param = output_param.reshape([output_param_row, output_param_col]) @@ -87,20 +91,25 @@ def np_rank_attention(input, rank_offset, rank_para, max_rank, max_size): rank_offset_row, rank_offset_col = rank_offset.shape rank_para_row, rank_para_col = rank_para.shape - assert (input_row == rank_offset_row) - assert (max_rank == ((rank_offset_col - 1) / 2)) - assert (rank_para_row == max_rank * max_rank * input_col) + assert input_row == rank_offset_row + assert max_rank == ((rank_offset_col - 1) / 2) + assert rank_para_row == max_rank * max_rank * input_col - input_help, ins_rank = gen_input_help(input, rank_offset, max_rank, - max_size) + input_help, ins_rank = gen_input_help( + input, rank_offset, max_rank, max_size + ) param_help = gen_param_help(input, rank_offset, rank_para, max_rank) block_matrix_row = input_col * max_rank res = np.zeros((input_row, rank_para_col)) for ins in range(input_row): - res[ins, :] = \ - np.dot(input_help[ins, :], - param_help[int(block_matrix_row * ins):int(block_matrix_row * (ins+1)),:]) + res[ins, :] = np.dot( + input_help[ins, :], + param_help[ + int(block_matrix_row * ins) : int(block_matrix_row * (ins + 1)), + :, + ], + ) return res, input_help, param_help, ins_rank @@ -142,7 +151,6 @@ def gen_rank_offset(pv_nums, max_rank): class TestRankAttentionOpComplex(OpTest): - def config(self): self.pv_num = 100 self.x_feat = 10 @@ -156,22 +164,27 @@ class TestRankAttentionOpComplex(OpTest): ins_num, rank_offset = gen_rank_offset(self.pv_num, self.max_rank) input = np.random.random((ins_num, self.x_feat)).astype(self.dtype) rank_para_shape = [ - self.max_rank * self.max_rank * self.x_feat, self.y_feat + self.max_rank * self.max_rank * self.x_feat, + self.y_feat, ] rank_para = np.random.random(rank_para_shape).astype(self.dtype) np_out, np_input_help, np_param_help, np_ins_rank = np_rank_attention( - input, np.array(rank_offset), rank_para, self.max_rank, - self.pv_num * 7) + input, + np.array(rank_offset), + rank_para, + self.max_rank, + self.pv_num * 7, + ) self.inputs = { "X": input, "RankOffset": np.array(rank_offset).astype("int32"), - "RankParam": rank_para + "RankParam": rank_para, } self.attrs = {'MaxRank': self.max_rank, 'MaxSize': self.pv_num * 7} self.outputs = { "Out": np_out, "InputHelp": np_input_help, - "InsRank": np_ins_rank + "InsRank": np_ins_rank, } def test_check_output_gpu(self): @@ -184,7 +197,6 @@ class TestRankAttentionOpComplex(OpTest): class TestRankAttentionOpCpu(OpTest): - def config(self): self.pv_num = 100 self.x_feat = 10 @@ -198,22 +210,27 @@ class TestRankAttentionOpCpu(OpTest): ins_num, rank_offset = gen_rank_offset(self.pv_num, self.max_rank) input = np.random.random((ins_num, self.x_feat)).astype(self.dtype) rank_para_shape = [ - self.max_rank * self.max_rank * self.x_feat, self.y_feat + self.max_rank * self.max_rank * self.x_feat, + self.y_feat, ] rank_para = np.random.random(rank_para_shape).astype(self.dtype) np_out, np_input_help, np_param_help, np_ins_rank = np_rank_attention( - input, np.array(rank_offset), rank_para, self.max_rank, - self.pv_num * 7) + input, + np.array(rank_offset), + rank_para, + self.max_rank, + self.pv_num * 7, + ) self.inputs = { "X": input, "RankOffset": np.array(rank_offset).astype("int32"), - "RankParam": rank_para + "RankParam": rank_para, } self.attrs = {'MaxRank': self.max_rank, 'MaxSize': self.pv_num * 7} self.outputs = { "Out": np_out, "InputHelp": np_input_help, - "InsRank": np_ins_rank + "InsRank": np_ins_rank, } def test_check_output_cpu(self): diff --git a/python/paddle/fluid/tests/unittests/test_rank_loss_op.py b/python/paddle/fluid/tests/unittests/test_rank_loss_op.py index 5ef05e16c967b54129d7e09ec5251a6bf7c34af2..1ace41d2d24b28865698a4310e4e25750c57c6b3 100644 --- a/python/paddle/fluid/tests/unittests/test_rank_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_rank_loss_op.py @@ -20,7 +20,6 @@ from paddle.fluid import Program, program_guard class TestRankLossOp(OpTest): - def setUp(self): self.op_type = "rank_loss" shape = (100, 1) @@ -34,7 +33,7 @@ class TestRankLossOp(OpTest): self.inputs = { 'Label': label.reshape(label_shape), 'Left': left.reshape(left_shape), - 'Right': right.reshape(right_shape) + 'Right': right.reshape(right_shape), } self.outputs = {'Out': loss.reshape(label_shape)} @@ -56,42 +55,36 @@ class TestRankLossOp(OpTest): class TestRankLossOp1(TestRankLossOp): - def set_shape(self): batch_size = 100 return (batch_size), (batch_size, 1), (batch_size, 1) class TestRankLossOp2(TestRankLossOp): - def set_shape(self): batch_size = 100 return (batch_size, 1), (batch_size), (batch_size, 1) class TestRankLossOp3(TestRankLossOp): - def set_shape(self): batch_size = 100 return (batch_size, 1), (batch_size, 1), (batch_size) class TestRankLossOp4(TestRankLossOp): - def set_shape(self): batch_size = 100 return (batch_size), (batch_size), (batch_size, 1) class TestRankLossOp5(TestRankLossOp): - def set_shape(self): batch_size = 100 return (batch_size), (batch_size), (batch_size) class TestRankLossOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): label = fluid.data(name="label", shape=[16, 1], dtype="float32") diff --git a/python/paddle/fluid/tests/unittests/test_raw_program_optimizer.py b/python/paddle/fluid/tests/unittests/test_raw_program_optimizer.py index eb11bf975dd029c173c26cb8c4256d827cbe0be0..fe86a667d1e8803efd5e88983351dba5d8643fcd 100644 --- a/python/paddle/fluid/tests/unittests/test_raw_program_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_raw_program_optimizer.py @@ -22,7 +22,6 @@ import os class TestRawProgramOptimizer(unittest.TestCase): - def setUp(self): os.environ["PADDLE_TRAINER_ID"] = "0" os.environ["PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001" @@ -30,18 +29,19 @@ class TestRawProgramOptimizer(unittest.TestCase): def mlp(self, input_x, input_y, hid_dim=128, label_dim=2): fc_1 = paddle.static.nn.fc(x=input_x, size=hid_dim, activation='tanh') fc_2 = paddle.static.nn.fc(x=fc_1, size=hid_dim, activation='tanh') - prediction = paddle.static.nn.fc(x=[fc_2], - size=label_dim, - activation='softmax') - cost = paddle.nn.functional.cross_entropy(input=prediction, - label=input_y) + prediction = paddle.static.nn.fc( + x=[fc_2], size=label_dim, activation='softmax' + ) + cost = paddle.nn.functional.cross_entropy( + input=prediction, label=input_y + ) avg_cost = paddle.mean(x=cost) return avg_cost def gen_data(self): return { "x": np.random.random(size=(128, 32)).astype('float32'), - "y": np.random.randint(2, size=(128, 1)).astype('int64') + "y": np.random.randint(2, size=(128, 1)).astype('int64'), } def test_single_gpu(self): @@ -53,16 +53,17 @@ class TestRawProgramOptimizer(unittest.TestCase): strategy.without_graph_optimization = True with fluid.program_guard(sharding_program, sharding_startup_program): with fluid.unique_name.guard(): - input_x = paddle.static.data(name="x", - shape=[None, 32], - dtype='float32') - input_y = paddle.static.data(name="y", - shape=[None, 1], - dtype='int64') + input_x = paddle.static.data( + name="x", shape=[None, 32], dtype='float32' + ) + input_y = paddle.static.data( + name="y", shape=[None, 1], dtype='int64' + ) cost = self.mlp(input_x=input_x, input_y=input_y) output_name = cost.name - optimizer = fleet.distributed_optimizer(fluid.optimizer.Adam(), - strategy) + optimizer = fleet.distributed_optimizer( + fluid.optimizer.Adam(), strategy + ) optimizer.minimize(cost) trainer_id = fleet.worker_index() diff --git a/python/paddle/fluid/tests/unittests/test_reader_reset.py b/python/paddle/fluid/tests/unittests/test_reader_reset.py index f3611059b2dbb767023133ab40f315371c30b762..edd3c6c28b1135c835c2c968636cf9b70780371c 100644 --- a/python/paddle/fluid/tests/unittests/test_reader_reset.py +++ b/python/paddle/fluid/tests/unittests/test_reader_reset.py @@ -23,9 +23,7 @@ import unittest class TestReaderReset(unittest.TestCase): - def prepare_data(self): - def fake_data_generator(): for n in range(self.total_ins_num): yield np.ones(self.ins_shape) * n, n @@ -46,15 +44,16 @@ class TestReaderReset(unittest.TestCase): startup_prog = fluid.Program() with fluid.program_guard(main_prog, startup_prog): - image = fluid.layers.data(name='image', - shape=self.ins_shape, - dtype='float32') + image = fluid.layers.data( + name='image', shape=self.ins_shape, dtype='float32' + ) label = fluid.layers.data(name='label', shape=[1], dtype='int64') data_reader_handle = fluid.io.PyReader( feed_list=[image, label], capacity=16, iterable=False, - use_double_buffer=with_double_buffer) + use_double_buffer=with_double_buffer, + ) fetch_list = [image.name, label.name] place = fluid.CUDAPlace(0) if self.use_cuda else fluid.CPUPlace() @@ -62,10 +61,12 @@ class TestReaderReset(unittest.TestCase): exe.run(startup_prog) data_reader_handle.decorate_sample_list_generator( - paddle.batch(self.prepare_data(), batch_size=self.batch_size)) + paddle.batch(self.prepare_data(), batch_size=self.batch_size) + ) train_cp = compiler.CompiledProgram(main_prog).with_data_parallel( - places=[place]) + places=[place] + ) batch_id = 0 pass_count = 0 @@ -73,13 +74,13 @@ class TestReaderReset(unittest.TestCase): data_reader_handle.start() try: while True: - data_val, label_val = exe.run(train_cp, - fetch_list=fetch_list, - return_numpy=True) + data_val, label_val = exe.run( + train_cp, fetch_list=fetch_list, return_numpy=True + ) ins_num = data_val.shape[0] - broadcasted_label = np.ones(( - ins_num, ) + tuple(self.ins_shape)) * label_val.reshape( - (ins_num, 1)) + broadcasted_label = np.ones( + (ins_num,) + tuple(self.ins_shape) + ) * label_val.reshape((ins_num, 1)) self.assertEqual(data_val.all(), broadcasted_label.all()) batch_id += 1 except fluid.core.EOFException: diff --git a/python/paddle/fluid/tests/unittests/test_real_imag_op.py b/python/paddle/fluid/tests/unittests/test_real_imag_op.py index 8b0a0ad635b1add82c4c05f3d6c2824170bd2599..bcd8f3c561edcd42c4912f0f3cba65da798c2b91 100644 --- a/python/paddle/fluid/tests/unittests/test_real_imag_op.py +++ b/python/paddle/fluid/tests/unittests/test_real_imag_op.py @@ -32,7 +32,6 @@ paddle_apis = { class TestRealOp(OpTest): - def setUp(self): # switch to static paddle.enable_static() @@ -46,31 +45,31 @@ class TestRealOp(OpTest): def init_input_output(self): self.inputs = { - 'X': - np.random.random( - (20, 5)).astype(self.dtype) + 1j * np.random.random( - (20, 5)).astype(self.dtype) + 'X': np.random.random((20, 5)).astype(self.dtype) + + 1j * np.random.random((20, 5)).astype(self.dtype) } self.outputs = {'Out': numpy_apis[self.op_type](self.inputs['X'])} def init_grad_input_output(self): self.grad_out = np.ones((20, 5), self.dtype) - self.grad_x = np.real( - self.grad_out) + 1j * np.zeros(self.grad_out.shape) + self.grad_x = np.real(self.grad_out) + 1j * np.zeros( + self.grad_out.shape + ) def test_check_output(self): self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X'], - 'Out', - user_defined_grads=[self.grad_x], - user_defined_grad_outputs=[self.grad_out], - check_eager=True) + self.check_grad( + ['X'], + 'Out', + user_defined_grads=[self.grad_x], + user_defined_grad_outputs=[self.grad_out], + check_eager=True, + ) class TestImagOp(TestRealOp): - def setUp(self): # switch to static paddle.enable_static() @@ -84,12 +83,12 @@ class TestImagOp(TestRealOp): def init_grad_input_output(self): self.grad_out = np.ones((20, 5), self.dtype) - self.grad_x = np.zeros( - self.grad_out.shape) + 1j * np.real(self.grad_out) + self.grad_x = np.zeros(self.grad_out.shape) + 1j * np.real( + self.grad_out + ) class TestRealAPI(unittest.TestCase): - def setUp(self): # switch to static paddle.enable_static() @@ -102,10 +101,10 @@ class TestRealAPI(unittest.TestCase): self._shape = [2, 20, 2, 3] def test_in_static_mode(self): - def init_input_output(dtype): input = np.random.random(self._shape).astype( - dtype) + 1j * np.random.random(self._shape).astype(dtype) + dtype + ) + 1j * np.random.random(self._shape).astype(dtype) return {'x': input}, numpy_apis[self.api](input) for dtype in self.dtypes: @@ -122,7 +121,8 @@ class TestRealAPI(unittest.TestCase): def test_in_dynamic_mode(self): for dtype in self.dtypes: input = np.random.random(self._shape).astype( - dtype) + 1j * np.random.random(self._shape).astype(dtype) + dtype + ) + 1j * np.random.random(self._shape).astype(dtype) np_res = numpy_apis[self.api](input) for place in self.places: # it is more convenient to use `guard` than `enable/disable_**` here @@ -130,8 +130,11 @@ class TestRealAPI(unittest.TestCase): input_t = paddle.to_tensor(input) res = paddle_apis[self.api](input_t).numpy() np.testing.assert_array_equal(np_res, res) - res_t = input_t.real().numpy( - ) if self.api == "real" else input_t.imag().numpy() + res_t = ( + input_t.real().numpy() + if self.api == "real" + else input_t.imag().numpy() + ) np.testing.assert_array_equal(np_res, res_t) def test_name_argument(self): @@ -156,7 +159,6 @@ class TestRealAPI(unittest.TestCase): class TestImagAPI(TestRealAPI): - def setUp(self): # switch to static paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_recurrent_op.py b/python/paddle/fluid/tests/unittests/test_recurrent_op.py index 1153689bb941ac637136ee0807a17fd6e3123107..37ebcb77003c1eb33544524191250067b4719668 100644 --- a/python/paddle/fluid/tests/unittests/test_recurrent_op.py +++ b/python/paddle/fluid/tests/unittests/test_recurrent_op.py @@ -28,7 +28,6 @@ np.random.seed(123) class PyRNNBase(object): - def __init__(self, input_shape, output_shape): self.x = np.ones(shape=input_shape).astype("float32") self.y = np.zeros(shape=output_shape).astype("float32") @@ -46,13 +45,13 @@ class PyRNNBase(object): class PySimpleRNN1(PyRNNBase): - def __init__(self, input_shape, output_shape): super(PySimpleRNN1, self).__init__(input_shape, output_shape) seq_len, batch_size, input_dim = input_shape - self.h_boot = np.random.normal(size=(batch_size, - input_dim)).astype("float32") + self.h_boot = np.random.normal(size=(batch_size, input_dim)).astype( + "float32" + ) self.scale = 1.0 / 2.0 men_dim = (seq_len, batch_size, input_dim) @@ -68,7 +67,6 @@ class PySimpleRNN1(PyRNNBase): class PySimpleRNN2(PyRNNBase): - def __init__(self, input_shape, output_shape): super(PySimpleRNN2, self).__init__(input_shape, output_shape) @@ -89,7 +87,7 @@ class PySimpleRNN2(PyRNNBase): hU = np.matmul(pre_mem, self.U).astype("float32") def py_sigmoid(x): - return 1. / (1. + np.exp(-x)) + return 1.0 / (1.0 + np.exp(-x)) self.mems[step_id] = py_sigmoid(xW + hU) self.y[step_id] = self.mems[step_id] @@ -136,14 +134,16 @@ class RecurrentOpTest1(unittest.TestCase): self.output = paddle.mean(self.create_rnn_op()) def create_rnn_op(self): - x = layers.data(shape=[self.sent_len, self.batch_size, self.input_dim], - dtype='float32', - name='x', - append_batch_size=False) + x = layers.data( + shape=[self.sent_len, self.batch_size, self.input_dim], + dtype='float32', + name='x', + append_batch_size=False, + ) x.stop_gradient = False - h_boot = layers.data(shape=[self.input_dim], - dtype='float32', - name='h_boot') + h_boot = layers.data( + shape=[self.input_dim], dtype='float32', name='h_boot' + ) h_boot.stop_gradient = False rnn = layers.StaticRNN() @@ -151,8 +151,10 @@ class RecurrentOpTest1(unittest.TestCase): h_pre = rnn.memory(init=h_boot) x_t = rnn.step_input(x) - h = layers.scale(x=layers.elementwise_add(x=h_pre, y=x_t), - scale=self.py_rnn.scale) + h = layers.scale( + x=layers.elementwise_add(x=h_pre, y=x_t), + scale=self.py_rnn.scale, + ) rnn.update_memory(h_pre, h) rnn.output(h) @@ -165,9 +167,9 @@ class RecurrentOpTest1(unittest.TestCase): for x in self.feed_data_field } exe = Executor(self.place) - out = exe.run(self.main_program, - feed=self.feed_map, - fetch_list=[self.output]) + out = exe.run( + self.main_program, feed=self.feed_map, fetch_list=[self.output] + ) return out[0] @@ -182,10 +184,12 @@ class RecurrentOpTest1(unittest.TestCase): ] exe = Executor(self.place) - return exe.run(self.main_program, - feed=self.feed_map, - fetch_list=fetch_list, - return_numpy=False) + return exe.run( + self.main_program, + feed=self.feed_map, + fetch_list=fetch_list, + return_numpy=False, + ) def test_backward(self, rtol=0.01): self.check_forward() @@ -203,10 +207,18 @@ class RecurrentOpTest1(unittest.TestCase): ana_grad[idx], rtol=rtol, atol=1e-8, - err_msg='num_grad (' + name + ') has diff at ' + - str(self.place) + '\nExpect ' + str(num_grad[idx]) + '\n' + - 'But Got' + str(ana_grad[idx]) + ' in class ' + - self.__class__.__name__) + err_msg='num_grad (' + + name + + ') has diff at ' + + str(self.place) + + '\nExpect ' + + str(num_grad[idx]) + + '\n' + + 'But Got' + + str(ana_grad[idx]) + + ' in class ' + + self.__class__.__name__, + ) def check_forward(self): pd_output = self.forward() @@ -268,14 +280,16 @@ class RecurrentOpTest2(RecurrentOpTest1): self.output = paddle.mean(self.create_rnn_op()) def create_rnn_op(self): - x = layers.data(shape=[self.sent_len, self.batch_size, self.input_dim], - dtype='float32', - name='x', - append_batch_size=False) + x = layers.data( + shape=[self.sent_len, self.batch_size, self.input_dim], + dtype='float32', + name='x', + append_batch_size=False, + ) x.stop_gradient = False - h_boot = layers.data(shape=[self.input_dim], - dtype='float32', - name='h_boot') + h_boot = layers.data( + shape=[self.input_dim], dtype='float32', name='h_boot' + ) h_boot.stop_gradient = False rnn = layers.StaticRNN() @@ -288,15 +302,19 @@ class RecurrentOpTest2(RecurrentOpTest1): size=self.input_dim, param_attr=ParamAttr( name='W', - initializer=fluid.initializer.ConstantInitializer(1.0)), - bias_attr=False) + initializer=fluid.initializer.ConstantInitializer(1.0), + ), + bias_attr=False, + ) temp_r = layers.fc( input=h_pre, size=self.input_dim, param_attr=ParamAttr( name='U', - initializer=fluid.initializer.ConstantInitializer(0.0)), - bias_attr=False) + initializer=fluid.initializer.ConstantInitializer(0.0), + ), + bias_attr=False, + ) h = layers.sigmoid(x=layers.elementwise_add(x=temp_l, y=temp_r)) @@ -325,16 +343,18 @@ class RecurrentOpMultipleMemoryTest(RecurrentOpTest1): ''' class PySimpleRNN3(PyRNNBase): - def __init__(self, input_shape, output_shape): - super(RecurrentOpMultipleMemoryTest.PySimpleRNN3, - self).__init__(input_shape, output_shape) + super(RecurrentOpMultipleMemoryTest.PySimpleRNN3, self).__init__( + input_shape, output_shape + ) seq_len, batch_size, input_dim = input_shape - self.h_boot1 = np.random.normal(size=(batch_size, - input_dim)).astype("float32") - self.h_boot2 = np.random.normal(size=(batch_size, - input_dim)).astype("float32") + self.h_boot1 = np.random.normal( + size=(batch_size, input_dim) + ).astype("float32") + self.h_boot2 = np.random.normal( + size=(batch_size, input_dim) + ).astype("float32") men_dim = (seq_len, batch_size, input_dim) self.mems1 = np.zeros(shape=men_dim).astype("float32") @@ -364,26 +384,33 @@ class RecurrentOpMultipleMemoryTest(RecurrentOpTest1): self.input_shape = (self.sent_len, self.batch_size, self.input_dim) self.output_shape = (self.sent_len, self.batch_size, self.input_dim) self.py_rnn = RecurrentOpMultipleMemoryTest.PySimpleRNN3( - self.input_shape, self.output_shape) + self.input_shape, self.output_shape + ) with fluid.program_guard(self.main_program, self.startup_program): self.output = paddle.mean(self.create_rnn_op()) def create_rnn_op(self): - x = layers.data(shape=[self.sent_len, self.batch_size, self.input_dim], - dtype='float32', - name='x', - append_batch_size=False) + x = layers.data( + shape=[self.sent_len, self.batch_size, self.input_dim], + dtype='float32', + name='x', + append_batch_size=False, + ) x.stop_gradient = False - h_boot1 = layers.data(shape=[self.batch_size, self.input_dim], - dtype='float32', - name='h_boot1', - append_batch_size=False) + h_boot1 = layers.data( + shape=[self.batch_size, self.input_dim], + dtype='float32', + name='h_boot1', + append_batch_size=False, + ) h_boot1.stop_gradient = False - h_boot2 = layers.data(shape=[self.batch_size, self.input_dim], - dtype='float32', - name='h_boot2', - append_batch_size=False) + h_boot2 = layers.data( + shape=[self.batch_size, self.input_dim], + dtype='float32', + name='h_boot2', + append_batch_size=False, + ) h_boot2.stop_gradient = False rnn = layers.StaticRNN() @@ -418,10 +445,10 @@ class RecurrentOpNoMemBootTest(RecurrentOpTest1): ''' class PySimpleRNN4(PyRNNBase): - def __init__(self, input_shape, output_shape): - super(RecurrentOpNoMemBootTest.PySimpleRNN4, - self).__init__(input_shape, output_shape) + super(RecurrentOpNoMemBootTest.PySimpleRNN4, self).__init__( + input_shape, output_shape + ) men_dim = input_shape self.mems = np.zeros(shape=men_dim).astype("float32") @@ -446,16 +473,19 @@ class RecurrentOpNoMemBootTest(RecurrentOpTest1): self.input_shape = (self.sent_len, self.batch_size, self.input_dim) self.output_shape = (self.sent_len, self.batch_size, self.input_dim) self.py_rnn = RecurrentOpNoMemBootTest.PySimpleRNN4( - self.input_shape, self.output_shape) + self.input_shape, self.output_shape + ) with fluid.program_guard(self.main_program, self.startup_program): self.output = paddle.mean(self.create_rnn_op()) def create_rnn_op(self): - x = layers.data(shape=[self.sent_len, self.batch_size, self.input_dim], - dtype='float32', - name='x', - append_batch_size=False) + x = layers.data( + shape=[self.sent_len, self.batch_size, self.input_dim], + dtype='float32', + name='x', + append_batch_size=False, + ) x.stop_gradient = False rnn = layers.StaticRNN() @@ -491,31 +521,28 @@ class RecurrentOpSubBlockTest(RecurrentOpTest1): ''' class PySimpleRNN5(PyRNNBase): - def __init__(self, input_shape, output_shape): - super(RecurrentOpSubBlockTest.PySimpleRNN5, - self).__init__(input_shape, output_shape) + super(RecurrentOpSubBlockTest.PySimpleRNN5, self).__init__( + input_shape, output_shape + ) seq_len, batch_size, input_dim = input_shape - self.w1 = np.random.uniform(-0.1, 0.1, - size=(input_dim, - input_dim)).astype("float32") - self.w2 = np.random.uniform(-0.1, - 0.1, - size=(input_dim * 2, - input_dim)).astype("float32") - - self.emb = np.random.uniform(-0.1, - 0.1, - size=(seq_len, batch_size, - input_dim)).astype("float32") + self.w1 = np.random.uniform( + -0.1, 0.1, size=(input_dim, input_dim) + ).astype("float32") + self.w2 = np.random.uniform( + -0.1, 0.1, size=(input_dim * 2, input_dim) + ).astype("float32") + + self.emb = np.random.uniform( + -0.1, 0.1, size=(seq_len, batch_size, input_dim) + ).astype("float32") men_dim = (seq_len, batch_size, input_dim) self.mems = np.zeros(shape=men_dim).astype("float32") self.oy = np.matmul(self.emb, self.w1) def step(self, step_id, x): - def dot_attention(query, memory): attn = np.matmul(query, memory.transpose((0, 2, 1))) weight = softmax(attn) @@ -552,35 +579,43 @@ class RecurrentOpSubBlockTest(RecurrentOpTest1): self.input_shape = (self.sent_len, self.batch_size, self.input_dim) self.output_shape = (self.sent_len, self.batch_size, self.input_dim) self.py_rnn = RecurrentOpSubBlockTest.PySimpleRNN5( - self.input_shape, self.output_shape) + self.input_shape, self.output_shape + ) with fluid.program_guard(self.main_program, self.startup_program): rnn_out = self.create_rnn_op() self.output = paddle.mean(rnn_out) def create_rnn_op(self): - x = layers.data(shape=[self.sent_len, self.batch_size, self.input_dim], - dtype='float32', - name='x', - append_batch_size=False) + x = layers.data( + shape=[self.sent_len, self.batch_size, self.input_dim], + dtype='float32', + name='x', + append_batch_size=False, + ) x.stop_gradient = False emb = layers.data( name='emb', shape=[self.sent_len, self.batch_size, self.input_dim], dtype='float32', - append_batch_size=False) + append_batch_size=False, + ) emb.stop_gradient = False - w1 = layers.data(shape=[self.input_dim, self.input_dim], - dtype='float32', - name='w1', - append_batch_size=False) + w1 = layers.data( + shape=[self.input_dim, self.input_dim], + dtype='float32', + name='w1', + append_batch_size=False, + ) w1.stop_gradient = False - w2 = layers.data(shape=[self.input_dim * 2, self.input_dim], - dtype='float32', - name='w2', - append_batch_size=False) + w2 = layers.data( + shape=[self.input_dim * 2, self.input_dim], + dtype='float32', + name='w2', + append_batch_size=False, + ) w2.stop_gradient = False rnn = layers.StaticRNN() @@ -594,9 +629,11 @@ class RecurrentOpSubBlockTest(RecurrentOpTest1): y = layers.matmul(emb, w1) with rnn.step(): - pre_h = rnn.memory(shape=(self.sent_len, self.input_dim), - batch_ref=x, - init_value=0.0) + pre_h = rnn.memory( + shape=(self.sent_len, self.input_dim), + batch_ref=x, + init_value=0.0, + ) step_in = rnn.step_input(x) concat_in = layers.concat([step_in, pre_h], 1) new_h = layers.matmul(concat_in, w2) @@ -643,14 +680,16 @@ class RecurrentOpStopGradientTest(RecurrentOpTest1): self.output = paddle.mean(self.create_rnn_op()) def create_rnn_op(self): - x = layers.data(shape=[self.sent_len, self.batch_size, self.input_dim], - dtype="float32", - name="x", - append_batch_size=False) + x = layers.data( + shape=[self.sent_len, self.batch_size, self.input_dim], + dtype="float32", + name="x", + append_batch_size=False, + ) x.stop_gradient = False - h_boot = layers.data(shape=[self.input_dim], - dtype="float32", - name="h_boot") + h_boot = layers.data( + shape=[self.input_dim], dtype="float32", name="h_boot" + ) h_boot.stop_gradient = True rnn = layers.StaticRNN() @@ -663,15 +702,19 @@ class RecurrentOpStopGradientTest(RecurrentOpTest1): size=self.input_dim, param_attr=ParamAttr( name="W", - initializer=fluid.initializer.ConstantInitializer(1.0)), - bias_attr=False) + initializer=fluid.initializer.ConstantInitializer(1.0), + ), + bias_attr=False, + ) temp_r = layers.fc( input=h_pre, size=self.input_dim, param_attr=ParamAttr( name="U", - initializer=fluid.initializer.ConstantInitializer(0.0)), - bias_attr=False) + initializer=fluid.initializer.ConstantInitializer(0.0), + ), + bias_attr=False, + ) h = layers.sigmoid(x=layers.elementwise_add(temp_l, temp_r)) diff --git a/python/paddle/fluid/tests/unittests/test_reduce_op.py b/python/paddle/fluid/tests/unittests/test_reduce_op.py index 8dba9dd1b56cae38ff08fcd8ac579c90607ffefc..8fa448a6927dd5866504b5dd5bd8bdad832dc02b 100644 --- a/python/paddle/fluid/tests/unittests/test_reduce_op.py +++ b/python/paddle/fluid/tests/unittests/test_reduce_op.py @@ -23,7 +23,6 @@ from paddle.fluid.framework import convert_np_dtype_to_dtype_ class TestSumOp(OpTest): - def setUp(self): self.python_api = paddle.sum self.op_type = "reduce_sum" @@ -39,7 +38,6 @@ class TestSumOp(OpTest): class TestSumOp_fp16(OpTest): - def setUp(self): self.python_api = paddle.sum self.op_type = "reduce_sum" @@ -58,19 +56,18 @@ class TestSumOp_fp16(OpTest): def calc_gradient(self): x = self.inputs["X"] grad = np.ones(x.shape, dtype=x.dtype) - return grad, + return (grad,) def test_check_grad(self): - self.check_grad(['X'], - 'Out', - user_defined_grads=self.gradient, - check_eager=True) + self.check_grad( + ['X'], 'Out', user_defined_grads=self.gradient, check_eager=True + ) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestSumOp_bf16(OpTest): - def setUp(self): np.random.seed(100) self.python_api = paddle.sum @@ -91,10 +88,13 @@ class TestSumOp_bf16(OpTest): def test_check_grad(self): place = core.CUDAPlace(0) - self.check_grad_with_place(place, ['X'], - 'Out', - user_defined_grads=self.gradient, - check_eager=True) + self.check_grad_with_place( + place, + ['X'], + 'Out', + user_defined_grads=self.gradient, + check_eager=True, + ) def calc_gradient(self): x = self.x @@ -103,7 +103,6 @@ class TestSumOp_bf16(OpTest): class TestSumOp_fp16_withInt(OpTest): - def setUp(self): self.python_api = paddle.sum self.op_type = "reduce_sum" @@ -124,17 +123,15 @@ class TestSumOp_fp16_withInt(OpTest): def calc_gradient(self): x = self.inputs["X"] grad = np.ones(x.shape, dtype=x.dtype) - return grad, + return (grad,) def test_check_grad(self): - self.check_grad(['X'], - 'Out', - user_defined_grads=self.gradient, - check_eager=True) + self.check_grad( + ['X'], 'Out', user_defined_grads=self.gradient, check_eager=True + ) class TestSumOp5D(OpTest): - def setUp(self): self.python_api = paddle.sum self.op_type = "reduce_sum" @@ -152,7 +149,6 @@ class TestSumOp5D(OpTest): class TestSumOp6D(OpTest): - def setUp(self): self.python_api = paddle.sum self.op_type = "reduce_sum" @@ -170,7 +166,6 @@ class TestSumOp6D(OpTest): class TestSumOp8D(OpTest): - def setUp(self): self.python_api = paddle.sum self.op_type = "reduce_sum" @@ -189,7 +184,8 @@ class TestSumOp8D(OpTest): @skip_check_grad_ci( reason="reduce_max is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestMaxOp(OpTest): """Remove Max with subgradient from gradient check to confirm the success of CI.""" @@ -208,7 +204,8 @@ class TestMaxOp(OpTest): @skip_check_grad_ci( reason="reduce_min is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestMinOp(OpTest): """Remove Min with subgradient from gradient check to confirm the success of CI.""" @@ -266,7 +263,6 @@ def raw_reduce_prod(x, dim=[0], keep_dim=False): class TestProdOp(OpTest): - def setUp(self): self.op_type = "reduce_prod" self.python_api = raw_reduce_prod @@ -275,8 +271,9 @@ class TestProdOp(OpTest): self.outputs = {'Out': self.inputs['X'].prod(axis=0)} def init_data_type(self): - self.data_type = "float32" if core.is_compiled_with_rocm( - ) else "float64" + self.data_type = ( + "float32" if core.is_compiled_with_rocm() else "float64" + ) def test_check_output(self): self.check_output(check_eager=True) @@ -286,7 +283,6 @@ class TestProdOp(OpTest): class TestProd6DOp(OpTest): - def setUp(self): self.op_type = "reduce_prod" self.python_api = raw_reduce_prod @@ -300,8 +296,9 @@ class TestProd6DOp(OpTest): } def init_data_type(self): - self.data_type = "float32" if core.is_compiled_with_rocm( - ) else "float64" + self.data_type = ( + "float32" if core.is_compiled_with_rocm() else "float64" + ) def test_check_output(self): self.check_output(check_eager=True) @@ -311,14 +308,14 @@ class TestProd6DOp(OpTest): class TestProd8DOp(OpTest): - def setUp(self): self.op_type = "reduce_prod" self.python_api = raw_reduce_prod self.init_data_type() self.inputs = { - 'X': np.random.random( - (2, 5, 3, 2, 2, 3, 4, 2)).astype(self.data_type) + 'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype( + self.data_type + ) } self.attrs = {'dim': [2, 3, 4]} self.outputs = { @@ -326,8 +323,9 @@ class TestProd8DOp(OpTest): } def init_data_type(self): - self.data_type = "float32" if core.is_compiled_with_rocm( - ) else "float64" + self.data_type = ( + "float32" if core.is_compiled_with_rocm() else "float64" + ) def test_check_output(self): self.check_output(check_eager=True) @@ -337,7 +335,6 @@ class TestProd8DOp(OpTest): class TestAllOp(OpTest): - def setUp(self): self.op_type = "reduce_all" self.python_api = paddle.all @@ -350,13 +347,13 @@ class TestAllOp(OpTest): class TestAll8DOp(OpTest): - def setUp(self): self.op_type = "reduce_all" self.python_api = paddle.all self.inputs = { - 'X': np.random.randint(0, 2, - (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool") + 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( + "bool" + ) } self.attrs = {'reduce_all': True, 'dim': (2, 3, 4)} self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])} @@ -366,12 +363,11 @@ class TestAll8DOp(OpTest): class TestAllOpWithDim(OpTest): - def setUp(self): self.op_type = "reduce_all" self.python_api = paddle.all self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} - self.attrs = {'dim': (1, )} + self.attrs = {'dim': (1,)} self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])} def test_check_output(self): @@ -379,13 +375,13 @@ class TestAllOpWithDim(OpTest): class TestAll8DOpWithDim(OpTest): - def setUp(self): self.op_type = "reduce_all" self.python_api = paddle.all self.inputs = { - 'X': np.random.randint(0, 2, - (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool") + 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( + "bool" + ) } self.attrs = {'dim': (1, 3, 4)} self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])} @@ -395,7 +391,6 @@ class TestAll8DOpWithDim(OpTest): class TestAllOpWithKeepDim(OpTest): - def setUp(self): self.op_type = "reduce_all" self.python_api = paddle.all @@ -410,18 +405,19 @@ class TestAllOpWithKeepDim(OpTest): class TestAll8DOpWithKeepDim(OpTest): - def setUp(self): self.op_type = "reduce_all" self.python_api = paddle.all self.inputs = { - 'X': np.random.randint(0, 2, - (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool") + 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( + "bool" + ) } - self.attrs = {'dim': (5, ), 'keep_dim': True} + self.attrs = {'dim': (5,), 'keep_dim': True} self.outputs = { - 'Out': - np.expand_dims(self.inputs['X'].all(axis=self.attrs['dim']), axis=5) + 'Out': np.expand_dims( + self.inputs['X'].all(axis=self.attrs['dim']), axis=5 + ) } def test_check_output(self): @@ -429,21 +425,19 @@ class TestAll8DOpWithKeepDim(OpTest): class TestAllOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # The input type of reduce_all_op must be Variable. input1 = 12 self.assertRaises(TypeError, fluid.layers.reduce_all, input1) # The input dtype of reduce_all_op must be bool. - input2 = fluid.layers.data(name='input2', - shape=[12, 10], - dtype="int32") + input2 = fluid.layers.data( + name='input2', shape=[12, 10], dtype="int32" + ) self.assertRaises(TypeError, fluid.layers.reduce_all, input2) class TestAnyOp(OpTest): - def setUp(self): self.op_type = "reduce_any" self.python_api = paddle.any @@ -456,13 +450,13 @@ class TestAnyOp(OpTest): class TestAny8DOp(OpTest): - def setUp(self): self.op_type = "reduce_any" self.python_api = paddle.any self.inputs = { - 'X': np.random.randint(0, 2, - (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool") + 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( + "bool" + ) } self.attrs = {'reduce_all': True, 'dim': (3, 5, 4)} self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])} @@ -472,7 +466,6 @@ class TestAny8DOp(OpTest): class TestAnyOpWithDim(OpTest): - def setUp(self): self.op_type = "reduce_any" self.python_api = paddle.any @@ -485,13 +478,13 @@ class TestAnyOpWithDim(OpTest): class TestAny8DOpWithDim(OpTest): - def setUp(self): self.op_type = "reduce_any" self.python_api = paddle.any self.inputs = { - 'X': np.random.randint(0, 2, - (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool") + 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( + "bool" + ) } self.attrs = {'dim': (3, 6)} self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])} @@ -501,15 +494,15 @@ class TestAny8DOpWithDim(OpTest): class TestAnyOpWithKeepDim(OpTest): - def setUp(self): self.op_type = "reduce_any" self.python_api = paddle.any self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")} - self.attrs = {'dim': (1, ), 'keep_dim': True} + self.attrs = {'dim': (1,), 'keep_dim': True} self.outputs = { - 'Out': - np.expand_dims(self.inputs['X'].any(axis=self.attrs['dim']), axis=1) + 'Out': np.expand_dims( + self.inputs['X'].any(axis=self.attrs['dim']), axis=1 + ) } def test_check_output(self): @@ -517,18 +510,19 @@ class TestAnyOpWithKeepDim(OpTest): class TestAny8DOpWithKeepDim(OpTest): - def setUp(self): self.op_type = "reduce_any" self.python_api = paddle.any self.inputs = { - 'X': np.random.randint(0, 2, - (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool") + 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( + "bool" + ) } - self.attrs = {'dim': (1, ), 'keep_dim': True} + self.attrs = {'dim': (1,), 'keep_dim': True} self.outputs = { - 'Out': - np.expand_dims(self.inputs['X'].any(axis=self.attrs['dim']), axis=1) + 'Out': np.expand_dims( + self.inputs['X'].any(axis=self.attrs['dim']), axis=1 + ) } def test_check_output(self): @@ -536,21 +530,19 @@ class TestAny8DOpWithKeepDim(OpTest): class TestAnyOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # The input type of reduce_any_op must be Variable. input1 = 12 self.assertRaises(TypeError, fluid.layers.reduce_any, input1) # The input dtype of reduce_any_op must be bool. - input2 = fluid.layers.data(name='input2', - shape=[12, 10], - dtype="int32") + input2 = fluid.layers.data( + name='input2', shape=[12, 10], dtype="int32" + ) self.assertRaises(TypeError, fluid.layers.reduce_any, input2) class Test1DReduce(OpTest): - def setUp(self): self.op_type = "reduce_sum" self.inputs = {'X': np.random.random(120).astype("float64")} @@ -564,7 +556,6 @@ class Test1DReduce(OpTest): class Test2DReduce0(Test1DReduce): - def setUp(self): self.op_type = "reduce_sum" self.attrs = {'dim': [0]} @@ -573,7 +564,6 @@ class Test2DReduce0(Test1DReduce): class Test2DReduce1(Test1DReduce): - def setUp(self): self.op_type = "reduce_sum" self.attrs = {'dim': [1]} @@ -584,7 +574,6 @@ class Test2DReduce1(Test1DReduce): class Test3DReduce0(Test1DReduce): - def setUp(self): self.op_type = "reduce_sum" self.attrs = {'dim': [1]} @@ -595,7 +584,6 @@ class Test3DReduce0(Test1DReduce): class Test3DReduce1(Test1DReduce): - def setUp(self): self.op_type = "reduce_sum" self.attrs = {'dim': [2]} @@ -606,7 +594,6 @@ class Test3DReduce1(Test1DReduce): class Test3DReduce2(Test1DReduce): - def setUp(self): self.op_type = "reduce_sum" self.attrs = {'dim': [-2]} @@ -617,7 +604,6 @@ class Test3DReduce2(Test1DReduce): class Test3DReduce3(Test1DReduce): - def setUp(self): self.op_type = "reduce_sum" self.attrs = {'dim': [1, 2]} @@ -628,7 +614,6 @@ class Test3DReduce3(Test1DReduce): class Test8DReduce0(Test1DReduce): - def setUp(self): self.op_type = "reduce_sum" self.attrs = {'dim': (4, 2, 3)} @@ -641,20 +626,18 @@ class Test8DReduce0(Test1DReduce): class TestKeepDimReduce(Test1DReduce): - def setUp(self): self.op_type = "reduce_sum" self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.attrs = {'dim': [1], 'keep_dim': True} self.outputs = { - 'Out': - self.inputs['X'].sum(axis=tuple(self.attrs['dim']), - keepdims=self.attrs['keep_dim']) + 'Out': self.inputs['X'].sum( + axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim'] + ) } class TestKeepDim8DReduce(Test1DReduce): - def setUp(self): self.op_type = "reduce_sum" self.inputs = { @@ -662,15 +645,16 @@ class TestKeepDim8DReduce(Test1DReduce): } self.attrs = {'dim': (3, 4, 5), 'keep_dim': True} self.outputs = { - 'Out': - self.inputs['X'].sum(axis=tuple(self.attrs['dim']), - keepdims=self.attrs['keep_dim']) + 'Out': self.inputs['X'].sum( + axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim'] + ) } @skip_check_grad_ci( reason="reduce_max is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMaxOpMultiAxises(OpTest): """Remove Max with subgradient from gradient check to confirm the success of CI.""" @@ -689,7 +673,8 @@ class TestReduceMaxOpMultiAxises(OpTest): @skip_check_grad_ci( reason="reduce_min is discontinuous non-derivable function," - " its gradient check is not supported by unittest framework.") + " its gradient check is not supported by unittest framework." +) class TestReduceMinOpMultiAxises(OpTest): """Remove Min with subgradient from gradient check to confirm the success of CI.""" @@ -707,14 +692,14 @@ class TestReduceMinOpMultiAxises(OpTest): class TestKeepDimReduceSumMultiAxises(OpTest): - def setUp(self): self.op_type = "reduce_sum" self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.attrs = {'dim': [-2, -1], 'keep_dim': True} self.outputs = { - 'Out': - self.inputs['X'].sum(axis=tuple(self.attrs['dim']), keepdims=True) + 'Out': self.inputs['X'].sum( + axis=tuple(self.attrs['dim']), keepdims=True + ) } def test_check_output(self): @@ -725,14 +710,14 @@ class TestKeepDimReduceSumMultiAxises(OpTest): class TestReduceSumWithDimOne(OpTest): - def setUp(self): self.op_type = "reduce_sum" self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")} self.attrs = {'dim': [1, 2], 'keep_dim': True} self.outputs = { - 'Out': - self.inputs['X'].sum(axis=tuple(self.attrs['dim']), keepdims=True) + 'Out': self.inputs['X'].sum( + axis=tuple(self.attrs['dim']), keepdims=True + ) } def test_check_output(self): @@ -743,14 +728,14 @@ class TestReduceSumWithDimOne(OpTest): class TestReduceSumWithNumelOne(OpTest): - def setUp(self): self.op_type = "reduce_sum" self.inputs = {'X': np.random.random((100, 1)).astype("float64")} self.attrs = {'dim': [1], 'keep_dim': False} self.outputs = { - 'Out': - self.inputs['X'].sum(axis=tuple(self.attrs['dim']), keepdims=False) + 'Out': self.inputs['X'].sum( + axis=tuple(self.attrs['dim']), keepdims=False + ) } def test_check_output(self): @@ -761,7 +746,6 @@ class TestReduceSumWithNumelOne(OpTest): class TestReduceAll(OpTest): - def setUp(self): self.op_type = "reduce_sum" self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")} @@ -776,7 +760,6 @@ class TestReduceAll(OpTest): class Test1DReduceWithAxes1(OpTest): - def setUp(self): self.op_type = "reduce_sum" self.inputs = {'X': np.random.random(100).astype("float64")} @@ -791,18 +774,17 @@ class Test1DReduceWithAxes1(OpTest): class TestReduceWithDtype(OpTest): - def setUp(self): self.op_type = "reduce_sum" self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")} self.outputs = {'Out': self.inputs['X'].sum().astype('float64')} self.attrs = {'reduce_all': True} - self.attrs.update({ - 'in_dtype': - int(convert_np_dtype_to_dtype_(np.float32)), - 'out_dtype': - int(convert_np_dtype_to_dtype_(np.float64)) - }) + self.attrs.update( + { + 'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)), + 'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)), + } + ) def test_check_output(self): self.check_output() @@ -812,42 +794,40 @@ class TestReduceWithDtype(OpTest): class TestReduceWithDtype1(TestReduceWithDtype): - def setUp(self): self.op_type = "reduce_sum" self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")} self.outputs = {'Out': self.inputs['X'].sum(axis=1)} self.attrs = {'dim': [1]} - self.attrs.update({ - 'in_dtype': - int(convert_np_dtype_to_dtype_(np.float32)), - 'out_dtype': - int(convert_np_dtype_to_dtype_(np.float64)) - }) + self.attrs.update( + { + 'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)), + 'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)), + } + ) class TestReduceWithDtype2(TestReduceWithDtype): - def setUp(self): self.op_type = "reduce_sum" self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")} self.outputs = {'Out': self.inputs['X'].sum(axis=1, keepdims=True)} self.attrs = {'dim': [1], 'keep_dim': True} - self.attrs.update({ - 'in_dtype': - int(convert_np_dtype_to_dtype_(np.float32)), - 'out_dtype': - int(convert_np_dtype_to_dtype_(np.float64)) - }) + self.attrs.update( + { + 'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)), + 'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)), + } + ) class TestReduceSumOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # The input type of reduce_sum_op must be Variable. - x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace() + ) self.assertRaises(TypeError, fluid.layers.reduce_sum, x1) # The input dtype of reduce_sum_op must be float32 or float64 or int32 or int64. x2 = fluid.layers.data(name='x2', shape=[4], dtype="uint8") @@ -855,13 +835,9 @@ class TestReduceSumOpError(unittest.TestCase): class API_TestSumOp(unittest.TestCase): - - def run_static(self, - shape, - x_dtype, - attr_axis, - attr_dtype=None, - np_axis=None): + def run_static( + self, shape, x_dtype, attr_axis, attr_dtype=None, np_axis=None + ): if np_axis is None: np_axis = attr_axis @@ -871,19 +847,21 @@ class API_TestSumOp(unittest.TestCase): for place in places: with fluid.program_guard(fluid.Program(), fluid.Program()): data = fluid.data("data", shape=shape, dtype=x_dtype) - result_sum = paddle.sum(x=data, - axis=attr_axis, - dtype=attr_dtype) + result_sum = paddle.sum( + x=data, axis=attr_axis, dtype=attr_dtype + ) exe = fluid.Executor(place) input_data = np.random.rand(*shape).astype(x_dtype) - res, = exe.run(feed={"data": input_data}, - fetch_list=[result_sum]) + (res,) = exe.run( + feed={"data": input_data}, fetch_list=[result_sum] + ) - np.testing.assert_allclose(res, - np.sum(input_data.astype(attr_dtype), - axis=np_axis), - rtol=1e-05) + np.testing.assert_allclose( + res, + np.sum(input_data.astype(attr_dtype), axis=np_axis), + rtol=1e-05, + ) def test_static(self): shape = [10, 10] @@ -914,10 +892,9 @@ class API_TestSumOp(unittest.TestCase): shape = [5, 5, 5] self.run_static(shape, "int32", (0, 1), attr_dtype="int32") - self.run_static(shape, - "int32", (), - attr_dtype="int32", - np_axis=(0, 1, 2)) + self.run_static( + shape, "int32", (), attr_dtype="int32", np_axis=(0, 1, 2) + ) def test_dygraph(self): np_x = np.random.random([2, 3, 4]).astype('int32') @@ -935,7 +912,6 @@ class API_TestSumOp(unittest.TestCase): class TestAllAPI(unittest.TestCase): - def setUp(self): np.random.seed(123) paddle.enable_static() @@ -950,9 +926,11 @@ class TestAllAPI(unittest.TestCase): input_np = np.random.randint(0, 2, [4, 4]).astype("bool") exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={"input": input_np}, - fetch_list=[result]) + fetches = exe.run( + fluid.default_main_program(), + feed={"input": input_np}, + fetch_list=[result], + ) np.testing.assert_allclose(fetches[0], np.all(input_np), rtol=1e-05) def test_static(self): @@ -991,7 +969,6 @@ class TestAllAPI(unittest.TestCase): class TestAnyAPI(unittest.TestCase): - def setUp(self): np.random.seed(123) paddle.enable_static() @@ -1006,9 +983,11 @@ class TestAnyAPI(unittest.TestCase): input_np = np.random.randint(0, 2, [4, 4]).astype("bool") exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={"input": input_np}, - fetch_list=[result]) + fetches = exe.run( + fluid.default_main_program(), + feed={"input": input_np}, + fetch_list=[result], + ) np.testing.assert_allclose(fetches[0], np.any(input_np), rtol=1e-05) def test_static(self): @@ -1048,5 +1027,6 @@ class TestAnyAPI(unittest.TestCase): if __name__ == '__main__': import paddle + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_reducescatter.py b/python/paddle/fluid/tests/unittests/test_reducescatter.py index 8320eb9ad59e85a9bb71c85251a67eac60ecb9cf..ed46c272bca4702fba160158ee18092007baba07 100644 --- a/python/paddle/fluid/tests/unittests/test_reducescatter.py +++ b/python/paddle/fluid/tests/unittests/test_reducescatter.py @@ -21,13 +21,13 @@ paddle.enable_static() class TestReduceScatterOp(TestDistBase): - def _setup_config(self): pass def test_reducescatter(self): - self.check_with_place("collective_reducescatter_op.py", - "reduce_scatter") + self.check_with_place( + "collective_reducescatter_op.py", "reduce_scatter" + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_reducescatter_api.py b/python/paddle/fluid/tests/unittests/test_reducescatter_api.py index decb90ada9672bc05ffc6df167ca964ae3cbfd70..e5d6b261536857ee746eec52ec319b66c58f9f4f 100644 --- a/python/paddle/fluid/tests/unittests/test_reducescatter_api.py +++ b/python/paddle/fluid/tests/unittests/test_reducescatter_api.py @@ -22,7 +22,6 @@ paddle.enable_static() class TestReduceScatterAPI(TestDistBase): - def _setup_config(self): pass diff --git a/python/paddle/fluid/tests/unittests/test_registry.py b/python/paddle/fluid/tests/unittests/test_registry.py index be0ee288178ac080f808e2da9f651000b202956b..3fc6370b6c754f4ada603e305bac1d350e49667f 100644 --- a/python/paddle/fluid/tests/unittests/test_registry.py +++ b/python/paddle/fluid/tests/unittests/test_registry.py @@ -21,7 +21,6 @@ from decorator_helper import prog_scope class TestRegistry(unittest.TestCase): - @prog_scope() def test_registry_layer(self): x = fluid.layers.data(name='X', shape=[10, 10], dtype='float32') diff --git a/python/paddle/fluid/tests/unittests/test_regularizer.py b/python/paddle/fluid/tests/unittests/test_regularizer.py index 46cad0aaf754bc0106564b18bad93733d18040e4..a3f1697032b558c02ec0bfb4d1e9f68dfcaffc8a 100644 --- a/python/paddle/fluid/tests/unittests/test_regularizer.py +++ b/python/paddle/fluid/tests/unittests/test_regularizer.py @@ -26,7 +26,6 @@ from paddle.fluid.backward import append_backward class TestL2DecayRegularizer(unittest.TestCase): - def test_l2decay_regularizer(self): paddle.enable_static() program = framework.Program() @@ -36,32 +35,30 @@ class TestL2DecayRegularizer(unittest.TestCase): shape=[5, 10], lod_level=0, name="mul.x", - regularizer=regularizer.L2DecayRegularizer(0.5)) + regularizer=regularizer.L2DecayRegularizer(0.5), + ) self.assertTrue(mul_x.regularizer is not None) self.assertTrue( - isinstance(mul_x.regularizer, regularizer.L2DecayRegularizer)) - mul_y = block.create_var(dtype="float32", - shape=[10, 8], - lod_level=0, - name="mul.y") - mul_out = block.create_var(dtype="float32", - shape=[5, 8], - lod_level=0, - name="mul.out") - block.append_op(type="mul", - inputs={ - "X": mul_x, - "Y": mul_y - }, - outputs={"Out": mul_out}, - attrs={"x_num_col_dims": 1}) - mean_out = block.create_var(dtype="float32", - shape=[1], - lod_level=0, - name="mean.out") - block.append_op(type="mean", - inputs={"X": mul_out}, - outputs={"Out": mean_out}) + isinstance(mul_x.regularizer, regularizer.L2DecayRegularizer) + ) + mul_y = block.create_var( + dtype="float32", shape=[10, 8], lod_level=0, name="mul.y" + ) + mul_out = block.create_var( + dtype="float32", shape=[5, 8], lod_level=0, name="mul.out" + ) + block.append_op( + type="mul", + inputs={"X": mul_x, "Y": mul_y}, + outputs={"Out": mul_out}, + attrs={"x_num_col_dims": 1}, + ) + mean_out = block.create_var( + dtype="float32", shape=[1], lod_level=0, name="mean.out" + ) + block.append_op( + type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out} + ) params_grads = append_backward(mean_out) self.assertEqual(len(params_grads), 1) count_ops = len(block.ops) @@ -74,7 +71,6 @@ class TestL2DecayRegularizer(unittest.TestCase): class TestL1DecayRegularizer(unittest.TestCase): - def test_l2decay_regularizer(self): paddle.enable_static() program = framework.Program() @@ -84,32 +80,30 @@ class TestL1DecayRegularizer(unittest.TestCase): shape=[5, 10], lod_level=0, name="mul.x", - regularizer=regularizer.L1DecayRegularizer(0.5)) + regularizer=regularizer.L1DecayRegularizer(0.5), + ) self.assertTrue(mul_x.regularizer is not None) self.assertTrue( - isinstance(mul_x.regularizer, regularizer.L1DecayRegularizer)) - mul_y = block.create_var(dtype="float32", - shape=[10, 8], - lod_level=0, - name="mul.y") - mul_out = block.create_var(dtype="float32", - shape=[5, 8], - lod_level=0, - name="mul.out") - block.append_op(type="mul", - inputs={ - "X": mul_x, - "Y": mul_y - }, - outputs={"Out": mul_out}, - attrs={"x_num_col_dims": 1}) - mean_out = block.create_var(dtype="float32", - shape=[1], - lod_level=0, - name="mean.out") - block.append_op(type="mean", - inputs={"X": mul_out}, - outputs={"Out": mean_out}) + isinstance(mul_x.regularizer, regularizer.L1DecayRegularizer) + ) + mul_y = block.create_var( + dtype="float32", shape=[10, 8], lod_level=0, name="mul.y" + ) + mul_out = block.create_var( + dtype="float32", shape=[5, 8], lod_level=0, name="mul.out" + ) + block.append_op( + type="mul", + inputs={"X": mul_x, "Y": mul_y}, + outputs={"Out": mul_out}, + attrs={"x_num_col_dims": 1}, + ) + mean_out = block.create_var( + dtype="float32", shape=[1], lod_level=0, name="mean.out" + ) + block.append_op( + type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out} + ) params_grads = append_backward(mean_out) self.assertEqual(len(params_grads), 1) count_ops = len(block.ops) @@ -122,22 +116,24 @@ class TestL1DecayRegularizer(unittest.TestCase): self.assertEqual(block.ops[-3].type, 'sign') -def bow_net(data, - label, - dict_dim, - is_sparse=False, - emb_dim=8, - hid_dim=8, - hid_dim2=6, - class_dim=2): +def bow_net( + data, + label, + dict_dim, + is_sparse=False, + emb_dim=8, + hid_dim=8, + hid_dim2=6, + class_dim=2, +): """ BOW net This model is from https://github.com/PaddlePaddle/models: fluid/PaddleNLP/text_classification/nets.py """ - emb = fluid.layers.embedding(input=data, - is_sparse=is_sparse, - size=[dict_dim, emb_dim]) + emb = fluid.layers.embedding( + input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim] + ) bow = fluid.layers.sequence_pool(input=emb, pool_type='sum') bow_tanh = fluid.layers.tanh(bow) fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh") @@ -149,11 +145,11 @@ def bow_net(data, class TestRegularizer(unittest.TestCase): - def setUp(self): self.word_len = 1500 - self.train_data = [[(random.sample(range(1000), 10), [0])] - for _ in range(2)] + self.train_data = [ + [(random.sample(range(1000), 10), [0])] for _ in range(2) + ] def get_places(self): places = [core.CPUPlace()] @@ -179,9 +175,9 @@ class TestRegularizer(unittest.TestCase): param_sum = [] for data in self.train_data: - out = exe.run(main_prog, - feed=feeder.feed(data), - fetch_list=param_list) + out = exe.run( + main_prog, feed=feeder.feed(data), fetch_list=param_list + ) p_sum = 0 for v in out: p_sum += np.sum(np.abs(v)) @@ -193,19 +189,19 @@ class TestRegularizer(unittest.TestCase): paddle.framework.random._manual_program_seed(1) main_prog = fluid.framework.Program() startup_prog = fluid.framework.Program() - with self.scope_prog_guard(main_prog=main_prog, - startup_prog=startup_prog): - data = fluid.layers.data(name="words", - shape=[1], - dtype="int64", - lod_level=1) + with self.scope_prog_guard( + main_prog=main_prog, startup_prog=startup_prog + ): + data = fluid.layers.data( + name="words", shape=[1], dtype="int64", lod_level=1 + ) label = fluid.layers.data(name="label", shape=[1], dtype="int64") avg_cost = model(data, label, self.word_len) optimizer = fluid.optimizer.Adagrad( - learning_rate=0.1, - regularization=fluid.regularizer.L2Decay(1.0)) + learning_rate=0.1, regularization=fluid.regularizer.L2Decay(1.0) + ) optimizer.minimize(avg_cost) param_sum = self.run_program(place, [data, label]) return param_sum @@ -216,12 +212,12 @@ class TestRegularizer(unittest.TestCase): main_prog = fluid.framework.Program() startup_prog = fluid.framework.Program() - with self.scope_prog_guard(main_prog=main_prog, - startup_prog=startup_prog): - data = fluid.layers.data(name="words", - shape=[1], - dtype="int64", - lod_level=1) + with self.scope_prog_guard( + main_prog=main_prog, startup_prog=startup_prog + ): + data = fluid.layers.data( + name="words", shape=[1], dtype="int64", lod_level=1 + ) label = fluid.layers.data(name="label", shape=[1], dtype="int64") avg_cost_l2 = model(data, label, self.word_len) @@ -231,7 +227,7 @@ class TestRegularizer(unittest.TestCase): for para in param_list: para_mul = fluid.layers.square(x=para) para_sum.append(fluid.layers.reduce_sum(input=para_mul)) - avg_cost_l2 += fluid.layers.sums(para_sum) * .5 + avg_cost_l2 += fluid.layers.sums(para_sum) * 0.5 optimizer = fluid.optimizer.Adagrad(learning_rate=0.1) optimizer.minimize(avg_cost_l2) @@ -252,9 +248,11 @@ class TestRegularizer(unittest.TestCase): assert len(dense_sparse_p_sum[0]) == len(dense_sparse_p_sum[1]) for i in range(len(dense_sparse_p_sum[0])): - assert np.isclose(a=dense_sparse_p_sum[0][i], - b=dense_sparse_p_sum[1][i], - rtol=5e-5) + assert np.isclose( + a=dense_sparse_p_sum[0][i], + b=dense_sparse_p_sum[1][i], + rtol=5e-5, + ) def test_repeated_regularization(self): l1 = fluid.regularizer.L1Decay(regularization_coeff=0.1) @@ -268,43 +266,45 @@ class TestRegularizer(unittest.TestCase): sgd.minimize(loss) with fluid.dygraph.guard(): input = fluid.dygraph.to_variable( - np.random.randn(3, 2).astype('float32')) + np.random.randn(3, 2).astype('float32') + ) paddle.seed(1) paddle.framework.random._manual_program_seed(1) - linear1 = fluid.dygraph.Linear(2, - 2, - param_attr=fc_param_attr, - bias_attr=fc_param_attr) - linear2 = fluid.dygraph.Linear(2, - 2, - param_attr=fc_param_attr, - bias_attr=fc_param_attr) + linear1 = fluid.dygraph.Linear( + 2, 2, param_attr=fc_param_attr, bias_attr=fc_param_attr + ) + linear2 = fluid.dygraph.Linear( + 2, 2, param_attr=fc_param_attr, bias_attr=fc_param_attr + ) loss1 = linear1(input) loss1.backward() # set l2 regularizer in optimizer, but l1 in fluid.ParamAttr - fluid.optimizer.SGD(parameter_list=linear1.parameters(), - learning_rate=1e-2, - regularization=l2).minimize(loss1) + fluid.optimizer.SGD( + parameter_list=linear1.parameters(), + learning_rate=1e-2, + regularization=l2, + ).minimize(loss1) # only set l1 in fluid.ParamAttr loss2 = linear2(input) loss2.backward() - fluid.optimizer.SGD(parameter_list=linear2.parameters(), - learning_rate=1e-2).minimize(loss2) + fluid.optimizer.SGD( + parameter_list=linear2.parameters(), learning_rate=1e-2 + ).minimize(loss2) # they should both be applied by l1, and keep the same np.testing.assert_allclose( linear1.weight.numpy(), linear2.weight.numpy(), rtol=1e-05, - err_msg= - 'weight should use the regularization in fluid.ParamAttr!') + err_msg='weight should use the regularization in fluid.ParamAttr!', + ) np.testing.assert_allclose( linear1.bias.numpy(), linear2.bias.numpy(), rtol=1e-05, - err_msg='bias should use the regularization in fluid.ParamAttr!' + err_msg='bias should use the regularization in fluid.ParamAttr!', ) diff --git a/python/paddle/fluid/tests/unittests/test_regularizer_api.py b/python/paddle/fluid/tests/unittests/test_regularizer_api.py index b4200970dcd99c192ea6eb493678ef95b5e89125..930de8429f493a3b4b5a44c0a280681f173024a7 100644 --- a/python/paddle/fluid/tests/unittests/test_regularizer_api.py +++ b/python/paddle/fluid/tests/unittests/test_regularizer_api.py @@ -22,22 +22,24 @@ import paddle.fluid.core as core import paddle.fluid as fluid -def bow_net(data, - label, - dict_dim, - is_sparse=False, - emb_dim=8, - hid_dim=8, - hid_dim2=6, - class_dim=2): +def bow_net( + data, + label, + dict_dim, + is_sparse=False, + emb_dim=8, + hid_dim=8, + hid_dim2=6, + class_dim=2, +): """ BOW net This model is from https://github.com/PaddlePaddle/models: fluid/PaddleNLP/text_classification/nets.py """ - emb = fluid.layers.embedding(input=data, - is_sparse=is_sparse, - size=[dict_dim, emb_dim]) + emb = fluid.layers.embedding( + input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim] + ) bow = fluid.layers.sequence_pool(input=emb, pool_type='sum') bow_tanh = fluid.layers.tanh(bow) fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh") @@ -50,11 +52,11 @@ def bow_net(data, class TestRegularizer(unittest.TestCase): - def setUp(self): self.word_len = 1500 - self.train_data = [[(random.sample(range(1000), 10), [0])] - for _ in range(2)] + self.train_data = [ + [(random.sample(range(1000), 10), [0])] for _ in range(2) + ] def get_places(self): places = [core.CPUPlace()] @@ -80,9 +82,9 @@ class TestRegularizer(unittest.TestCase): param_sum = [] for data in self.train_data: - out = exe.run(main_prog, - feed=feeder.feed(data), - fetch_list=param_list) + out = exe.run( + main_prog, feed=feeder.feed(data), fetch_list=param_list + ) p_sum = 0 for v in out: p_sum += np.sum(np.abs(v)) @@ -94,19 +96,20 @@ class TestRegularizer(unittest.TestCase): paddle.framework.random._manual_program_seed(1) main_prog = fluid.framework.Program() startup_prog = fluid.framework.Program() - with self.scope_prog_guard(main_prog=main_prog, - startup_prog=startup_prog): - data = fluid.layers.data(name="words", - shape=[1], - dtype="int64", - lod_level=1) + with self.scope_prog_guard( + main_prog=main_prog, startup_prog=startup_prog + ): + data = fluid.layers.data( + name="words", shape=[1], dtype="int64", lod_level=1 + ) label = fluid.layers.data(name="label", shape=[1], dtype="int64") avg_cost = model(data, label, self.word_len) optimizer = fluid.optimizer.Adagrad( learning_rate=0.1, - regularization=paddle.regularizer.L2Decay(1.0)) + regularization=paddle.regularizer.L2Decay(1.0), + ) optimizer.minimize(avg_cost) param_sum = self.run_program(place, [data, label]) return param_sum @@ -117,12 +120,12 @@ class TestRegularizer(unittest.TestCase): main_prog = fluid.framework.Program() startup_prog = fluid.framework.Program() - with self.scope_prog_guard(main_prog=main_prog, - startup_prog=startup_prog): - data = fluid.layers.data(name="words", - shape=[1], - dtype="int64", - lod_level=1) + with self.scope_prog_guard( + main_prog=main_prog, startup_prog=startup_prog + ): + data = fluid.layers.data( + name="words", shape=[1], dtype="int64", lod_level=1 + ) label = fluid.layers.data(name="label", shape=[1], dtype="int64") avg_cost_l2 = model(data, label, self.word_len) @@ -132,7 +135,7 @@ class TestRegularizer(unittest.TestCase): for para in param_list: para_mul = fluid.layers.square(x=para) para_sum.append(fluid.layers.reduce_sum(input=para_mul)) - avg_cost_l2 += fluid.layers.sums(para_sum) * .5 + avg_cost_l2 += fluid.layers.sums(para_sum) * 0.5 optimizer = fluid.optimizer.Adagrad(learning_rate=0.1) optimizer.minimize(avg_cost_l2) @@ -154,9 +157,11 @@ class TestRegularizer(unittest.TestCase): assert len(dense_sparse_p_sum[0]) == len(dense_sparse_p_sum[1]) for i in range(len(dense_sparse_p_sum[0])): - assert np.isclose(a=dense_sparse_p_sum[0][i], - b=dense_sparse_p_sum[1][i], - rtol=5e-5) + assert np.isclose( + a=dense_sparse_p_sum[0][i], + b=dense_sparse_p_sum[1][i], + rtol=5e-5, + ) def test_repeated_regularization(self): paddle.enable_static() @@ -171,43 +176,45 @@ class TestRegularizer(unittest.TestCase): sgd.minimize(loss) with fluid.dygraph.guard(): input = fluid.dygraph.to_variable( - np.random.randn(3, 2).astype('float32')) + np.random.randn(3, 2).astype('float32') + ) paddle.seed(1) paddle.framework.random._manual_program_seed(1) - linear1 = fluid.dygraph.Linear(2, - 2, - param_attr=fc_param_attr, - bias_attr=fc_param_attr) - linear2 = fluid.dygraph.Linear(2, - 2, - param_attr=fc_param_attr, - bias_attr=fc_param_attr) + linear1 = fluid.dygraph.Linear( + 2, 2, param_attr=fc_param_attr, bias_attr=fc_param_attr + ) + linear2 = fluid.dygraph.Linear( + 2, 2, param_attr=fc_param_attr, bias_attr=fc_param_attr + ) loss1 = linear1(input) loss1.backward() # set l2 regularizer in optimizer, but l1 in fluid.ParamAttr - fluid.optimizer.SGD(parameter_list=linear1.parameters(), - learning_rate=1e-2, - regularization=l2).minimize(loss1) + fluid.optimizer.SGD( + parameter_list=linear1.parameters(), + learning_rate=1e-2, + regularization=l2, + ).minimize(loss1) # only set l1 in fluid.ParamAttr loss2 = linear2(input) loss2.backward() - fluid.optimizer.SGD(parameter_list=linear2.parameters(), - learning_rate=1e-2).minimize(loss2) + fluid.optimizer.SGD( + parameter_list=linear2.parameters(), learning_rate=1e-2 + ).minimize(loss2) # they should both be applied by l1, and keep the same np.testing.assert_allclose( linear1.weight.numpy(), linear2.weight.numpy(), rtol=1e-05, - err_msg= - 'weight should use the regularization in fluid.ParamAttr!') + err_msg='weight should use the regularization in fluid.ParamAttr!', + ) np.testing.assert_allclose( linear1.bias.numpy(), linear2.bias.numpy(), rtol=1e-05, - err_msg='bias should use the regularization in fluid.ParamAttr!' + err_msg='bias should use the regularization in fluid.ParamAttr!', ) diff --git a/python/paddle/fluid/tests/unittests/test_renorm_op.py b/python/paddle/fluid/tests/unittests/test_renorm_op.py index f2754eb16bd93ffe94dad3109d8d7c148211a42a..38362f039eaa28715b9d9de8c5e9e5c6285d0025 100644 --- a/python/paddle/fluid/tests/unittests/test_renorm_op.py +++ b/python/paddle/fluid/tests/unittests/test_renorm_op.py @@ -22,10 +22,10 @@ paddle.set_device('cpu') class TestRenormAPI(unittest.TestCase): - def input_data(self): - self.data_x = np.array([[[2.0, 2, -2], [3, 0.3, 3]], - [[2, -8, 2], [3.1, 3.7, 3]]]) + self.data_x = np.array( + [[[2.0, 2, -2], [3, 0.3, 3]], [[2, -8, 2], [3.1, 3.7, 3]]] + ) self.p = 1.0 self.dim = 2 self.max_norm = 2.05 @@ -36,17 +36,25 @@ class TestRenormAPI(unittest.TestCase): # case 1: with program_guard(Program(), Program()): - #x = fluid.layers.data(name = 'x',shape=[-1, 2, 3]) + # x = fluid.layers.data(name = 'x',shape=[-1, 2, 3]) x = paddle.static.data(name="x", shape=[-1, 2, 3], dtype='float64') z = paddle.renorm(x, self.p, self.dim, self.max_norm) exe = fluid.Executor(fluid.CPUPlace()) - res, = exe.run(feed={"x": self.data_x}, - fetch_list=[z], - return_numpy=False) - expected = np.array([[[0.40594056, 0.29285714, -0.41000000], - [0.60891086, 0.04392857, 0.61500001]], - [[0.40594056, -1.17142856, 0.41000000], - [0.62920785, 0.54178572, 0.61500001]]]) + (res,) = exe.run( + feed={"x": self.data_x}, fetch_list=[z], return_numpy=False + ) + expected = np.array( + [ + [ + [0.40594056, 0.29285714, -0.41000000], + [0.60891086, 0.04392857, 0.61500001], + ], + [ + [0.40594056, -1.17142856, 0.41000000], + [0.62920785, 0.54178572, 0.61500001], + ], + ] + ) np.testing.assert_allclose(expected, np.array(res), rtol=1e-05) def test_dygraph_api(self): @@ -56,20 +64,30 @@ class TestRenormAPI(unittest.TestCase): input = [[[2.0, 2, -2], [3, 0.3, 3]], [[2, -8, 2], [3.1, 3.7, 3]]] x = paddle.to_tensor(input, stop_gradient=False) y = paddle.renorm(x, 1.0, 2, 2.05) - expected = np.array([[[0.40594056, 0.29285714, -0.41000000], - [0.60891086, 0.04392857, 0.61500001]], - [[0.40594056, -1.17142856, 0.41000000], - [0.62920785, 0.54178572, 0.61500001]]]) + expected = np.array( + [ + [ + [0.40594056, 0.29285714, -0.41000000], + [0.60891086, 0.04392857, 0.61500001], + ], + [ + [0.40594056, -1.17142856, 0.41000000], + [0.62920785, 0.54178572, 0.61500001], + ], + ] + ) np.testing.assert_allclose(expected, np.array(y), rtol=1e-05) z = paddle.mean(y) z.backward(retain_graph=True) - expected_grad = np.array([[[0, 0.01394558, 0.02733333], - [0, 0.01394558, 0.00683333]], - [[0, 0.01045918, 0.00683333], - [0, 0.01394558, 0.00683333]]]) - np.testing.assert_allclose(expected_grad, - np.array(x.grad), - rtol=1e-05) + expected_grad = np.array( + [ + [[0, 0.01394558, 0.02733333], [0, 0.01394558, 0.00683333]], + [[0, 0.01045918, 0.00683333], [0, 0.01394558, 0.00683333]], + ] + ) + np.testing.assert_allclose( + expected_grad, np.array(x.grad), rtol=1e-05 + ) # #test exception: with fluid.dygraph.guard(): input = [[[2.0, 2, -2], [3, 0.3, 3]], [[2, -8, 2], [3.1, 3.7, 3]]] @@ -87,10 +105,18 @@ class TestRenormAPI(unittest.TestCase): exp = True self.assertTrue(exp) y = paddle.renorm(x, 1.0, -1, 2.05) - expected = np.array([[[0.40594056, 0.29285714, -0.41000000], - [0.60891086, 0.04392857, 0.61500001]], - [[0.40594056, -1.17142856, 0.41000000], - [0.62920785, 0.54178572, 0.61500001]]]) + expected = np.array( + [ + [ + [0.40594056, 0.29285714, -0.41000000], + [0.60891086, 0.04392857, 0.61500001], + ], + [ + [0.40594056, -1.17142856, 0.41000000], + [0.62920785, 0.54178572, 0.61500001], + ], + ] + ) np.testing.assert_allclose(expected, np.array(y), rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_reorder_lod_tensor.py b/python/paddle/fluid/tests/unittests/test_reorder_lod_tensor.py index 1b7538f1bcc70b18faefd4cea148d160b23bbe30..dbf14c81948f9ba0b638187f7f16c0cd818c2452 100644 --- a/python/paddle/fluid/tests/unittests/test_reorder_lod_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_reorder_lod_tensor.py @@ -40,14 +40,17 @@ class TestReorderLoDTensor(unittest.TestCase): @classmethod def set_program(cls): - dat = fluid.layers.data(name=cls.data_desc[0][0], - shape=cls.data_desc[0][1]) + dat = fluid.layers.data( + name=cls.data_desc[0][0], shape=cls.data_desc[0][1] + ) dat.stop_gradient = False - rank_dat = fluid.layers.data(name=cls.data_desc[1][0], - shape=cls.data_desc[1][1]) + rank_dat = fluid.layers.data( + name=cls.data_desc[1][0], shape=cls.data_desc[1][1] + ) table = lod_rank_table(rank_dat) - new_dat = fluid.layers.reorder_lod_tensor_by_rank(x=dat, - rank_table=table) + new_dat = fluid.layers.reorder_lod_tensor_by_rank( + x=dat, rank_table=table + ) loss = fluid.layers.reduce_sum(new_dat) fluid.backward.append_backward(loss=loss) cls.fetch_list = [new_dat, cls.data_desc[0][0] + '@GRAD'] @@ -61,10 +64,12 @@ class TestReorderLoDTensor(unittest.TestCase): for place in places: self.set_inputs(place) exe = fluid.Executor(place) - output, input_grad = exe.run(fluid.default_main_program(), - feed=self.inputs, - fetch_list=self.fetch_list, - return_numpy=False) + output, input_grad = exe.run( + fluid.default_main_program(), + feed=self.inputs, + fetch_list=self.fetch_list, + return_numpy=False, + ) outputs.append(output) input_grads.append(input_grad) self.actual_outputs = outputs @@ -81,11 +86,13 @@ class TestReorderLoDTensor(unittest.TestCase): lod_level_i = np.random.randint( low=1, high=5, - size=self.num_seq if i == 0 else sum(lod_level_i)).tolist() + size=self.num_seq if i == 0 else sum(lod_level_i), + ).tolist() data_lod.append(lod_level_i) data_value = np.random.random( - size=[sum(data_lod[-1]) if data_lod else self.num_seq] + - data_shape).astype('float32') + size=[sum(data_lod[-1]) if data_lod else self.num_seq] + + data_shape + ).astype('float32') self.data[data_name] = (data_value, data_lod) def set_inputs(self, place): @@ -104,8 +111,9 @@ class TestReorderLoDTensor(unittest.TestCase): rank_table = [] # list of (index, length) for i in range(len(ref_lod)): rank_table.append((i, ref_lod[i])) - rank_table = sorted(rank_table, - key=functools.cmp_to_key(lambda x, y: y[1] - x[1])) + rank_table = sorted( + rank_table, key=functools.cmp_to_key(lambda x, y: y[1] - x[1]) + ) # compute the input sequence info according to input_lod input_value, input_lod = self.data[self.data_desc[0][0]] @@ -120,8 +128,9 @@ class TestReorderLoDTensor(unittest.TestCase): for lod_level_i in offset_lod[level:]: sub_lod_i = [] for idx in range(start_idx, end_idx): - sub_lod_i.append(lod_level_i[idx + 1] - - lod_level_i[idx]) + sub_lod_i.append( + lod_level_i[idx + 1] - lod_level_i[idx] + ) sub_lod.append(sub_lod_i) start_idx = lod_level_i[start_idx] end_idx = lod_level_i[end_idx] @@ -137,8 +146,9 @@ class TestReorderLoDTensor(unittest.TestCase): input_seq_start = input_table[index][0] input_seq_len = input_table[index][1] input_seq_end = input_seq_start + input_seq_len - output_value[offset:offset + input_seq_len] = input_value[ - input_seq_start:input_seq_end] + output_value[offset : offset + input_seq_len] = input_value[ + input_seq_start:input_seq_end + ] offset += input_seq_len input_seq_sub_lod = input_table[index][2] @@ -155,22 +165,22 @@ class TestReorderLoDTensor(unittest.TestCase): # check output expect_output, expect_output_lod = self.reorder() for actual_output in self.actual_outputs: - np.testing.assert_allclose(np.array(actual_output), - expect_output, - rtol=1e-05, - atol=0.001) - self.assertEqual(expect_output_lod, - actual_output.recursive_sequence_lengths()) + np.testing.assert_allclose( + np.array(actual_output), expect_output, rtol=1e-05, atol=0.001 + ) + self.assertEqual( + expect_output_lod, actual_output.recursive_sequence_lengths() + ) # check gradient expect_grad = np.ones_like(self.data[self.data_desc[0][0]][0]) expect_grad_lod = self.data[self.data_desc[0][0]][1] for actual_grad in self.actual_grads: - np.testing.assert_allclose(np.array(actual_grad), - expect_grad, - rtol=1e-05, - atol=0.001) - self.assertEqual(expect_grad_lod, - actual_grad.recursive_sequence_lengths()) + np.testing.assert_allclose( + np.array(actual_grad), expect_grad, rtol=1e-05, atol=0.001 + ) + self.assertEqual( + expect_grad_lod, actual_grad.recursive_sequence_lengths() + ) def test_reorder_tensor(self): self.data_desc[0][-1] = 0 # input is tensor @@ -179,43 +189,43 @@ class TestReorderLoDTensor(unittest.TestCase): # check output expect_output, expect_output_lod = self.reorder() for actual_output in self.actual_outputs: - np.testing.assert_allclose(np.array(actual_output), - expect_output, - rtol=1e-05, - atol=0.001) - self.assertEqual(expect_output_lod, - actual_output.recursive_sequence_lengths()) + np.testing.assert_allclose( + np.array(actual_output), expect_output, rtol=1e-05, atol=0.001 + ) + self.assertEqual( + expect_output_lod, actual_output.recursive_sequence_lengths() + ) # check gradient expect_grad = np.ones_like(self.data[self.data_desc[0][0]][0]) expect_grad_lod = self.data[self.data_desc[0][0]][1] for actual_grad in self.actual_grads: - np.testing.assert_allclose(np.array(actual_grad), - expect_grad, - rtol=1e-05, - atol=0.001) - self.assertEqual(expect_grad_lod, - actual_grad.recursive_sequence_lengths()) + np.testing.assert_allclose( + np.array(actual_grad), expect_grad, rtol=1e-05, atol=0.001 + ) + self.assertEqual( + expect_grad_lod, actual_grad.recursive_sequence_lengths() + ) # compare outputs between LodTensors with explicit and implicit lod # use the same data but set the input lod explicitly input_lod = [[1] * len(self.data[self.data_desc[0][0]][0])] self.inputs[self.data_desc[0][0]].set_recursive_sequence_lengths( - input_lod) + input_lod + ) # preserve the output of LodTensor with implicit lod to compare expect_outputs = [ np.array(actual_output) for actual_output in self.actual_outputs ] self.run_program() - for actual_output, expect_output in zip(self.actual_outputs, - expect_outputs): - np.testing.assert_allclose(np.array(actual_output), - expect_output, - rtol=1e-05, - atol=0.001) + for actual_output, expect_output in zip( + self.actual_outputs, expect_outputs + ): + np.testing.assert_allclose( + np.array(actual_output), expect_output, rtol=1e-05, atol=0.001 + ) class TestReorderLoDTensorError(unittest.TestCase): - def test_errors(self): with program_guard(Program()): @@ -224,17 +234,19 @@ class TestReorderLoDTensorError(unittest.TestCase): x1 = np.array([0.9383, 0.1983, 3.2, 1.2]).astype("float64") table1 = np.array([0.9383, 0.1983, 3.2, 1.2]).astype("float64") new_dat = fluid.layers.reorder_lod_tensor_by_rank( - x=x1, rank_table=table1) + x=x1, rank_table=table1 + ) self.assertRaises(TypeError, test_Variable) def test_type(): x2 = fluid.layers.data(name='x1', shape=[4], dtype='float32') - table2 = fluid.layers.data(name='table2', - shape=[4], - dtype='int32') + table2 = fluid.layers.data( + name='table2', shape=[4], dtype='int32' + ) new_dat2 = fluid.layers.reorder_lod_tensor_by_rank( - x=x2, rank_table=table2) + x=x2, rank_table=table2 + ) self.assertRaises(TypeError, test_type) diff --git a/python/paddle/fluid/tests/unittests/test_repeat_interleave_op.py b/python/paddle/fluid/tests/unittests/test_repeat_interleave_op.py index fcc8647fbcaa4b0f3472a6a674d7009abdf48a5a..7332c36e1d2841326575b03b989057864f558431 100644 --- a/python/paddle/fluid/tests/unittests/test_repeat_interleave_op.py +++ b/python/paddle/fluid/tests/unittests/test_repeat_interleave_op.py @@ -21,20 +21,20 @@ from paddle.fluid import Program, program_guard class TestRepeatInterleaveOp(OpTest): - def setUp(self): self.op_type = "repeat_interleave" self.python_api = paddle.repeat_interleave self.init_dtype_type() index_np = np.random.randint( - low=0, high=3, size=self.index_size).astype(self.index_type) + low=0, high=3, size=self.index_size + ).astype(self.index_type) x_np = np.random.random(self.x_shape).astype(self.x_type) self.inputs = {'X': x_np, 'RepeatsTensor': index_np} self.attrs = {'dim': self.dim} - outer_loop = np.prod(self.x_shape[:self.dim]) - x_reshape = [outer_loop] + list(self.x_shape[self.dim:]) + outer_loop = np.prod(self.x_shape[: self.dim]) + x_reshape = [outer_loop] + list(self.x_shape[self.dim :]) x_np_reshape = np.reshape(x_np, tuple(x_reshape)) out_list = [] for i in range(outer_loop): @@ -63,18 +63,17 @@ class TestRepeatInterleaveOp(OpTest): class TestRepeatInterleaveOp2(OpTest): - def setUp(self): self.op_type = "repeat_interleave" self.python_api = paddle.repeat_interleave self.init_dtype_type() index_np = 2 x_np = np.random.random(self.x_shape).astype(self.x_type) - self.inputs = {'X': x_np} #, 'RepeatsTensor': None} + self.inputs = {'X': x_np} # , 'RepeatsTensor': None} self.attrs = {'dim': self.dim, 'Repeats': index_np} - outer_loop = np.prod(self.x_shape[:self.dim]) - x_reshape = [outer_loop] + list(self.x_shape[self.dim:]) + outer_loop = np.prod(self.x_shape[: self.dim]) + x_reshape = [outer_loop] + list(self.x_shape[self.dim :]) x_np_reshape = np.reshape(x_np, tuple(x_reshape)) out_list = [] for i in range(outer_loop): @@ -102,10 +101,14 @@ class TestRepeatInterleaveOp2(OpTest): class TestIndexSelectAPI(unittest.TestCase): - def input_data(self): - self.data_x = np.array([[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], - [9.0, 10.0, 11.0, 12.0]]) + self.data_x = np.array( + [ + [1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [9.0, 10.0, 11.0, 12.0], + ] + ) self.data_index = np.array([0, 1, 2, 1]).astype('int32') def test_repeat_interleave_api(self): @@ -115,18 +118,19 @@ class TestIndexSelectAPI(unittest.TestCase): # case 1: with program_guard(Program(), Program()): x = fluid.layers.data(name='x', shape=[-1, 4]) - index = fluid.layers.data(name='repeats_', - shape=[4], - dtype='int32', - append_batch_size=False) + index = fluid.layers.data( + name='repeats_', + shape=[4], + dtype='int32', + append_batch_size=False, + ) z = paddle.repeat_interleave(x, index, axis=1) exe = fluid.Executor(fluid.CPUPlace()) - res, = exe.run(feed={ - 'x': self.data_x, - 'repeats_': self.data_index - }, - fetch_list=[z.name], - return_numpy=False) + (res,) = exe.run( + feed={'x': self.data_x, 'repeats_': self.data_index}, + fetch_list=[z.name], + return_numpy=False, + ) expect_out = np.repeat(self.data_x, self.data_index, axis=1) np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) @@ -134,18 +138,22 @@ class TestIndexSelectAPI(unittest.TestCase): repeats = np.array([1, 2, 1]).astype('int32') with program_guard(Program(), Program()): x = fluid.layers.data(name='x', shape=[-1, 4]) - index = fluid.layers.data(name='repeats_', - shape=[3], - dtype='int32', - append_batch_size=False) + index = fluid.layers.data( + name='repeats_', + shape=[3], + dtype='int32', + append_batch_size=False, + ) z = paddle.repeat_interleave(x, index, axis=0) exe = fluid.Executor(fluid.CPUPlace()) - res, = exe.run(feed={ - 'x': self.data_x, - 'repeats_': repeats, - }, - fetch_list=[z.name], - return_numpy=False) + (res,) = exe.run( + feed={ + 'x': self.data_x, + 'repeats_': repeats, + }, + fetch_list=[z.name], + return_numpy=False, + ) expect_out = np.repeat(self.data_x, repeats, axis=0) np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) @@ -154,9 +162,9 @@ class TestIndexSelectAPI(unittest.TestCase): x = fluid.layers.data(name='x', shape=[-1, 4]) z = paddle.repeat_interleave(x, repeats, axis=0) exe = fluid.Executor(fluid.CPUPlace()) - res, = exe.run(feed={'x': self.data_x}, - fetch_list=[z.name], - return_numpy=False) + (res,) = exe.run( + feed={'x': self.data_x}, fetch_list=[z.name], return_numpy=False + ) expect_out = np.repeat(self.data_x, repeats, axis=0) np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_require_version.py b/python/paddle/fluid/tests/unittests/test_require_version.py index b5d9a1bfa5423cc49103c94f34798534d787f671..92066e95392c0e26ce9a8cd760c91ab0e4472a06 100644 --- a/python/paddle/fluid/tests/unittests/test_require_version.py +++ b/python/paddle/fluid/tests/unittests/test_require_version.py @@ -20,21 +20,30 @@ import paddle class VersionTest(unittest.TestCase): - def test_check_output(self): warnings.warn( "paddle.__version__: %s, fluid_version.full_version: %s, fluid_version.major: %s, fluid_version.minor: %s, fluid_version.patch: %s, fluid_version.rc: %s." - % (paddle.__version__, fluid_version.full_version, - fluid_version.major, fluid_version.minor, fluid_version.patch, - fluid_version.rc)) + % ( + paddle.__version__, + fluid_version.full_version, + fluid_version.major, + fluid_version.minor, + fluid_version.patch, + fluid_version.rc, + ) + ) ori_full_version = fluid_version.full_version ori_sep_version = [ - fluid_version.major, fluid_version.minor, fluid_version.patch, - fluid_version.rc + fluid_version.major, + fluid_version.minor, + fluid_version.patch, + fluid_version.rc, ] [ - fluid_version.major, fluid_version.minor, fluid_version.patch, - fluid_version.rc + fluid_version.major, + fluid_version.minor, + fluid_version.patch, + fluid_version.rc, ] = ['1', '4', '1', '0'] fluid.require_version('1') @@ -52,21 +61,24 @@ class VersionTest(unittest.TestCase): # if installed version is 0.0.0.0, throw warning and skip the checking. [ - fluid_version.major, fluid_version.minor, fluid_version.patch, - fluid_version.rc + fluid_version.major, + fluid_version.minor, + fluid_version.patch, + fluid_version.rc, ] = ['0', '0', '0', '0'] fluid.require_version('0.0.0') fluid_version.full_version = ori_full_version [ - fluid_version.major, fluid_version.minor, fluid_version.patch, - fluid_version.rc + fluid_version.major, + fluid_version.minor, + fluid_version.patch, + fluid_version.rc, ] = ori_sep_version # Test Errors class TestErrors(unittest.TestCase): - def test_errors(self): # The type of params must be str. def test_input_type(): @@ -118,12 +130,16 @@ class TestErrors(unittest.TestCase): ori_full_version = fluid_version.full_version ori_sep_version = [ - fluid_version.major, fluid_version.minor, fluid_version.patch, - fluid_version.rc + fluid_version.major, + fluid_version.minor, + fluid_version.patch, + fluid_version.rc, ] [ - fluid_version.major, fluid_version.minor, fluid_version.patch, - fluid_version.rc + fluid_version.major, + fluid_version.minor, + fluid_version.patch, + fluid_version.rc, ] = ['1', '4', '1', '0'] self.assertRaises(Exception, test_version) @@ -132,8 +148,10 @@ class TestErrors(unittest.TestCase): fluid_version.full_version = ori_full_version [ - fluid_version.major, fluid_version.minor, fluid_version.patch, - fluid_version.rc + fluid_version.major, + fluid_version.minor, + fluid_version.patch, + fluid_version.rc, ] = ori_sep_version diff --git a/python/paddle/fluid/tests/unittests/test_reset_grad_inplace_version.py b/python/paddle/fluid/tests/unittests/test_reset_grad_inplace_version.py index c28724b97ae6ed822edbc747aa171df1624a1edb..a13a0d2c9e8807d6747147be1226118d2ad6352b 100644 --- a/python/paddle/fluid/tests/unittests/test_reset_grad_inplace_version.py +++ b/python/paddle/fluid/tests/unittests/test_reset_grad_inplace_version.py @@ -22,7 +22,6 @@ paddle.set_device('cpu') # Test 1 def clear_grad_test_0(w, a): - @paddle.no_grad() def warp(*_): assert w.grad is not None @@ -33,7 +32,6 @@ def clear_grad_test_0(w, a): class TestInplaceAndClearGradient(unittest.TestCase): - def func_test(self): input_data = np.ones([1, 1]) w = paddle.to_tensor(input_data, 'float32', stop_gradient=False) @@ -43,8 +41,9 @@ class TestInplaceAndClearGradient(unittest.TestCase): for i in range(2): print(" Step: ", i) out0 = _legacy_C_ops.scale(w, 'scale', 0.1) - out = _legacy_C_ops.matmul_v2(out0, w, 'trans_x', False, 'trans_y', - False) + out = _legacy_C_ops.matmul_v2( + out0, w, 'trans_x', False, 'trans_y', False + ) out.backward() assert w.grad[0] == 0.15 @@ -56,14 +55,12 @@ class TestInplaceAndClearGradient(unittest.TestCase): # Test 2 class Counter: - def __init__(self): self.num_calls = 0 self.step = 0 def clear_grad_test_1(w, c): - @paddle.no_grad() def warp(*_): assert w.grad is not None @@ -77,7 +74,6 @@ def clear_grad_test_1(w, c): class TestInplaceClearGradAccumulation(unittest.TestCase): - def func_test(self): input_data = np.ones([1, 1]) w = paddle.to_tensor(input_data, 'float32', stop_gradient=False) @@ -88,8 +84,9 @@ class TestInplaceClearGradAccumulation(unittest.TestCase): for c.step in range(5): out0 = _legacy_C_ops.scale(w, 'scale', 0.1) - out = _legacy_C_ops.matmul_v2(out0, w, 'trans_x', False, 'trans_y', - False) + out = _legacy_C_ops.matmul_v2( + out0, w, 'trans_x', False, 'trans_y', False + ) out.backward() @@ -106,7 +103,6 @@ class TestInplaceClearGradAccumulation(unittest.TestCase): class TestInplaceClearGradAccumulationAlt(unittest.TestCase): - def func_test(self): input_data = np.ones([1, 1]) w = paddle.to_tensor(input_data, 'float32', stop_gradient=False) diff --git a/python/paddle/fluid/tests/unittests/test_reshape_op.py b/python/paddle/fluid/tests/unittests/test_reshape_op.py index b8cb8bfa88ac6fa075f2b6a5d1f2532bd3be97ca..dad6e3fa3284a716d4a33b6d72247278cdcd011c 100755 --- a/python/paddle/fluid/tests/unittests/test_reshape_op.py +++ b/python/paddle/fluid/tests/unittests/test_reshape_op.py @@ -23,7 +23,6 @@ from paddle.static import Program, program_guard # situation 1: have shape( list, no tensor), no actual shape(Tensor) class TestReshapeOp(OpTest): - def setUp(self): self.init_data() self.op_type = "reshape2" @@ -31,7 +30,7 @@ class TestReshapeOp(OpTest): self.attrs = {"shape": self.new_shape} self.outputs = { "Out": self.inputs["X"].reshape(self.infered_shape), - 'XShape': np.random.random(self.ori_shape).astype("float32") + 'XShape': np.random.random(self.ori_shape).astype("float32"), } def init_data(self): @@ -47,31 +46,27 @@ class TestReshapeOp(OpTest): class TestReshapeOp_ZeroDim1(OpTest): - def init_data(self): self.ori_shape = () - self.new_shape = (1) - self.infered_shape = (1) + self.new_shape = 1 + self.infered_shape = 1 class TestReshapeOp_ZeroDim2(OpTest): - def init_data(self): self.ori_shape = () - self.new_shape = (-1) - self.infered_shape = (1) + self.new_shape = -1 + self.infered_shape = 1 class TestReshapeOp_ZeroDim3(OpTest): - def init_data(self): - self.ori_shape = (1) + self.ori_shape = 1 self.new_shape = () self.infered_shape = () class TestReshapeBF16Op(OpTest): - def setUp(self): self.init_data() self.op_type = "reshape2" @@ -81,11 +76,10 @@ class TestReshapeBF16Op(OpTest): self.inputs = {"X": convert_float_to_uint16(x)} self.attrs = {"shape": self.new_shape} self.outputs = { - "Out": - convert_float_to_uint16(out), - 'XShape': - convert_float_to_uint16( - np.random.random(self.ori_shape).astype("float32")) + "Out": convert_float_to_uint16(out), + 'XShape': convert_float_to_uint16( + np.random.random(self.ori_shape).astype("float32") + ), } def init_data(self): @@ -101,7 +95,6 @@ class TestReshapeBF16Op(OpTest): class TestReshapeOpDimInfer1(TestReshapeOp): - def init_data(self): self.ori_shape = (5, 25) self.new_shape = (5, -1, 5) @@ -109,7 +102,6 @@ class TestReshapeOpDimInfer1(TestReshapeOp): class TestReshapeOpDimInfer2(TestReshapeOp): - def init_data(self): self.ori_shape = (10, 2, 6) self.new_shape = (10, 0, 3, -1) @@ -118,19 +110,18 @@ class TestReshapeOpDimInfer2(TestReshapeOp): # situation 2: have shape(list, no tensor), have actual shape(Tensor) class TestReshapeOpWithInputShape(OpTest): - def setUp(self): self.init_data() self.op_type = "reshape2" self.inputs = { "X": np.random.random(self.ori_shape).astype("float32"), - "Shape": np.array(self.actual_shape, dtype="int32") + "Shape": np.array(self.actual_shape, dtype="int32"), } self.attrs = {"shape": self.new_shape} self.outputs = { "Out": self.inputs["X"].reshape(self.actual_shape), - 'XShape': np.random.random(self.ori_shape).astype("float32") + 'XShape': np.random.random(self.ori_shape).astype("float32"), } def init_data(self): @@ -147,24 +138,24 @@ class TestReshapeOpWithInputShape(OpTest): # Situation 3: have shape(list, have tensor), no actual shape(Tensor) class TestReshapeOp_attr_ShapeTensor(OpTest): - def setUp(self): self.init_data() self.op_type = "reshape2" shape_tensor = [] for index, ele in enumerate(self.new_shape): - shape_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + shape_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = { "X": np.random.random(self.ori_shape).astype("float32"), - 'ShapeTensor': shape_tensor + 'ShapeTensor': shape_tensor, } self.attrs = {'shape': self.shape} self.outputs = { "Out": self.inputs["X"].reshape(self.infered_shape), - 'XShape': np.random.random(self.ori_shape).astype("float32") + 'XShape': np.random.random(self.ori_shape).astype("float32"), } def init_data(self): @@ -181,7 +172,6 @@ class TestReshapeOp_attr_ShapeTensor(OpTest): class TestReshapeOpDimInfer1_attr_ShapeTensor(TestReshapeOp_attr_ShapeTensor): - def init_data(self): self.ori_shape = (5, 20) self.new_shape = (5, -1, 20) @@ -190,7 +180,6 @@ class TestReshapeOpDimInfer1_attr_ShapeTensor(TestReshapeOp_attr_ShapeTensor): class TestReshapeOpDimInfer2_attr_ShapeTensor(TestReshapeOp_attr_ShapeTensor): - def init_data(self): self.ori_shape = (10, 2, 6) self.new_shape = (10, 0, 3, -1) @@ -200,19 +189,18 @@ class TestReshapeOpDimInfer2_attr_ShapeTensor(TestReshapeOp_attr_ShapeTensor): # Situation 4: have shape(Tensor), no actual shape(Tensor) class TestReshapeOp_attr_OnlyShape(OpTest): - def setUp(self): self.init_data() self.op_type = "reshape2" self.inputs = { "X": np.random.random(self.ori_shape).astype("float32"), - "Shape": np.array(self.new_shape, dtype="int32") + "Shape": np.array(self.new_shape, dtype="int32"), } self.attrs = {} self.outputs = { "Out": self.inputs["X"].reshape(self.infered_shape), - 'XShape': np.random.random(self.ori_shape).astype("float32") + 'XShape': np.random.random(self.ori_shape).astype("float32"), } def init_data(self): @@ -228,7 +216,6 @@ class TestReshapeOp_attr_OnlyShape(OpTest): class TestReshapeOpDimInfer1_attr_OnlyShape(TestReshapeOp_attr_OnlyShape): - def init_data(self): self.ori_shape = (5, 20) self.new_shape = (5, -1, 10) @@ -237,7 +224,6 @@ class TestReshapeOpDimInfer1_attr_OnlyShape(TestReshapeOp_attr_OnlyShape): class TestReshapeOpDimInfer2_attr_OnlyShape(TestReshapeOp_attr_OnlyShape): - def init_data(self): self.ori_shape = (10, 2, 6) self.new_shape = (10, 0, 3, -1) @@ -247,7 +233,6 @@ class TestReshapeOpDimInfer2_attr_OnlyShape(TestReshapeOp_attr_OnlyShape): # test int8 data type on CPU class TestReshapeInt8Op(OpTest): - def setUp(self): self.init_dtype() self.init_data() @@ -262,7 +247,7 @@ class TestReshapeInt8Op(OpTest): } self.outputs = { "Out": self.inputs["X"].reshape(self.infered_shape), - 'XShape': np.random.random(self.ori_shape).astype(np.float32) + 'XShape': np.random.random(self.ori_shape).astype(np.float32), } def init_dtype(self): @@ -274,9 +259,9 @@ class TestReshapeInt8Op(OpTest): self.infered_shape = (10, 2, 3, -1) def test_check_output(self): - self.check_output_with_place(fluid.core.CPUPlace(), - atol=1e-5, - no_check_set=['XShape']) + self.check_output_with_place( + fluid.core.CPUPlace(), atol=1e-5, no_check_set=['XShape'] + ) def test_check_grad(self): pass @@ -284,13 +269,11 @@ class TestReshapeInt8Op(OpTest): # test unt8 data type on CPU class TestReshapeUint8Op(TestReshapeInt8Op): - def init_dtype(self): self.dtype = np.uint8 class TestReshapeOpBool(TestReshapeOp): - def setUp(self): self.init_data() self.op_type = "reshape2" @@ -300,7 +283,7 @@ class TestReshapeOpBool(TestReshapeOp): self.attrs = {"shape": self.new_shape} self.outputs = { "Out": self.inputs["X"].reshape(self.infered_shape), - 'XShape': np.random.random(self.ori_shape).astype("float32") + 'XShape': np.random.random(self.ori_shape).astype("float32"), } def test_check_grad(self): @@ -309,7 +292,6 @@ class TestReshapeOpBool(TestReshapeOp): # Test python API class TestReshapeAPI(unittest.TestCase): - def _set_paddle_api(self): self.fill_constant = paddle.fluid.layers.fill_constant self.data = paddle.static.data @@ -339,9 +321,9 @@ class TestReshapeAPI(unittest.TestCase): out_1 = self.reshape(x, shape) # situation 2: have shape(list, no tensor), have actual shape(Tensor) - out_2 = fluid.layers.reshape(x, - shape=shape, - actual_shape=actual_shape) + out_2 = fluid.layers.reshape( + x, shape=shape, actual_shape=actual_shape + ) # Situation 3: have shape(list, have tensor), no actual shape(Tensor) out_3 = self.reshape(x, shape=[positive_five, 10]) @@ -352,11 +334,9 @@ class TestReshapeAPI(unittest.TestCase): exe = paddle.static.Executor(place=paddle.CPUPlace()) res_1, res_2, res_3, res_4 = exe.run( main_prog, - feed={ - "x": input, - "shape": np.array([2, 5, 5]).astype("int32") - }, - fetch_list=[out_1, out_2, out_3, out_4]) + feed={"x": input, "shape": np.array([2, 5, 5]).astype("int32")}, + fetch_list=[out_1, out_2, out_3, out_4], + ) assert np.array_equal(res_1, input.reshape(shape)) assert np.array_equal(res_2, input.reshape(shape)) @@ -392,7 +372,6 @@ class TestReshapeAPI(unittest.TestCase): class TestStaticReshape_(TestReshapeAPI): - def _executed_api(self): self.reshape = paddle.reshape_ @@ -418,7 +397,6 @@ class TestStaticReshape_(TestReshapeAPI): # Test Input Error class TestReshapeOpError(unittest.TestCase): - def _set_paddle_api(self): self.data = paddle.static.data self.reshape = paddle.reshape @@ -431,8 +409,9 @@ class TestReshapeOpError(unittest.TestCase): with program_guard(Program(), Program()): # The x type of reshape_op must be Variable. def test_x_type(): - x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - paddle.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], paddle.CPUPlace() + ) self.reshape(x1, shape=[1]) self.assertRaises(TypeError, test_x_type) @@ -445,9 +424,9 @@ class TestReshapeOpError(unittest.TestCase): self.assertRaises(TypeError, test_x_dtype) def test_x_dtype_float16(): - x_float16 = self.data(name="x_float16", - shape=[2, 25], - dtype="float16") + x_float16 = self.data( + name="x_float16", shape=[2, 25], dtype="float16" + ) self.reshape(x_float16, shape=[2, 5, 5]) test_x_dtype_float16() @@ -494,7 +473,6 @@ class TestReshapeOpError(unittest.TestCase): class TestDygraphReshapeAPI(unittest.TestCase): - def setUp(self): self.executed_api() @@ -530,13 +508,11 @@ class TestDygraphReshapeAPI(unittest.TestCase): class TestDygraphReshapeInplaceAPI(TestDygraphReshapeAPI): - def executed_api(self): self.reshape = paddle.reshape_ class TestReshapeZeroTensor(unittest.TestCase): - def test_reshape_zero_tensor_success(self): zero_tensor = paddle.zeros([0, 2, 3]) # since we use "0" as the dimension copy semantically in reshape, @@ -551,7 +527,6 @@ class TestReshapeZeroTensor(unittest.TestCase): class TestReshapeAPI_ZeroDim(unittest.TestCase): - def test_dygraph(self): paddle.disable_static() fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) @@ -596,18 +571,18 @@ class TestReshapeAPI_ZeroDim(unittest.TestCase): # Test compile shape self.assertEqual(x.shape, ()) - self.assertEqual(out.shape, (1, )) + self.assertEqual(out.shape, (1,)) self.assertEqual(x_grad.shape, ()) - self.assertEqual(out_grad.shape, (1, )) + self.assertEqual(out_grad.shape, (1,)) exe = fluid.Executor() result = exe.run(main_prog, fetch_list=[x, out, x_grad, out_grad]) # Test runtime shape self.assertEqual(result[0].shape, ()) - self.assertEqual(result[1].shape, (1, )) + self.assertEqual(result[1].shape, (1,)) self.assertEqual(result[2].shape, ()) - self.assertEqual(result[3].shape, (1, )) + self.assertEqual(result[3].shape, (1,)) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_resnet50_with_cinn.py b/python/paddle/fluid/tests/unittests/test_resnet50_with_cinn.py index 2a16242fc60e261a3848d0d1ec3918157fb45ca9..754bb0dcb5918aba84705c3923a8f5f37d4108f6 100644 --- a/python/paddle/fluid/tests/unittests/test_resnet50_with_cinn.py +++ b/python/paddle/fluid/tests/unittests/test_resnet50_with_cinn.py @@ -19,8 +19,9 @@ import unittest paddle.enable_static() -logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', - level=logging.INFO) +logging.basicConfig( + format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO +) logger = logging.getLogger(__name__) @@ -36,11 +37,14 @@ def set_cinn_flag(val): @unittest.skipIf(not set_cinn_flag(True), "Paddle is not compiled with CINN.") class TestResnet50Accuracy(unittest.TestCase): - def reader(self, limit): for _ in range(limit): - yield {'image': np.random.randint(0, 256, size=[32, 3, 224, 224]).astype('float32'), \ - 'label': np.random.randint(0, 1000, size=[32]).astype('int64')} + yield { + 'image': np.random.randint( + 0, 256, size=[32, 3, 224, 224] + ).astype('float32'), + 'label': np.random.randint(0, 1000, size=[32]).astype('int64'), + } def generate_random_data(self, loop_num=10): feed = [] @@ -51,9 +55,9 @@ class TestResnet50Accuracy(unittest.TestCase): def build_program(self, main_program, startup_program): with paddle.static.program_guard(main_program, startup_program): - image = paddle.static.data(name='image', - shape=[32, 3, 224, 224], - dtype='float32') + image = paddle.static.data( + name='image', shape=[32, 3, 224, 224], dtype='float32' + ) label = paddle.static.data(name='label', shape=[32], dtype='int64') # TODO: stop_gradient slower training speed, need fix @@ -62,8 +66,9 @@ class TestResnet50Accuracy(unittest.TestCase): model = paddle.vision.models.resnet50() prediction = model(image) - loss = paddle.nn.functional.cross_entropy(input=prediction, - label=label) + loss = paddle.nn.functional.cross_entropy( + input=prediction, label=label + ) loss = paddle.mean(loss) adam = paddle.optimizer.Adam(learning_rate=0.001) adam.minimize(loss) @@ -83,23 +88,29 @@ class TestResnet50Accuracy(unittest.TestCase): exe = paddle.static.Executor(place) compiled_prog = paddle.static.CompiledProgram( - main_program).with_data_parallel(loss_name=loss.name) + main_program + ).with_data_parallel(loss_name=loss.name) loss_vals = [] scope = paddle.static.Scope() with paddle.static.scope_guard(scope): exe.run(startup_program) for step in range(iters): - loss_v = exe.run(compiled_prog, - feed=feed[step], - fetch_list=[loss], - return_numpy=True) + loss_v = exe.run( + compiled_prog, + feed=feed[step], + fetch_list=[loss], + return_numpy=True, + ) loss_vals.append(loss_v[0][0]) return loss_vals def test_check_resnet50_accuracy(self): - place = paddle.CUDAPlace( - 0) if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() + else paddle.CPUPlace() + ) loop_num = 10 feed = self.generate_random_data(loop_num) diff --git a/python/paddle/fluid/tests/unittests/test_retain_graph.py b/python/paddle/fluid/tests/unittests/test_retain_graph.py index 71998f57e5f9576744fad44803c2281818443f64..06f0be1b879ed85ee52b06b679ec35b94eaf890d 100644 --- a/python/paddle/fluid/tests/unittests/test_retain_graph.py +++ b/python/paddle/fluid/tests/unittests/test_retain_graph.py @@ -24,7 +24,6 @@ paddle.seed(SEED) class Generator(fluid.dygraph.Layer): - def __init__(self): super(Generator, self).__init__() self.conv1 = paddle.nn.Conv2D(3, 3, 3, padding=1) @@ -36,7 +35,6 @@ class Generator(fluid.dygraph.Layer): class Discriminator(fluid.dygraph.Layer): - def __init__(self): super(Discriminator, self).__init__() self.convd = paddle.nn.Conv2D(6, 3, 1) @@ -47,15 +45,16 @@ class Discriminator(fluid.dygraph.Layer): class TestRetainGraph(unittest.TestCase): - - def cal_gradient_penalty(self, - netD, - real_data, - fake_data, - edge_data=None, - type='mixed', - constant=1.0, - lambda_gp=10.0): + def cal_gradient_penalty( + self, + netD, + real_data, + fake_data, + edge_data=None, + type='mixed', + constant=1.0, + lambda_gp=10.0, + ): if lambda_gp > 0.0: if type == 'real': interpolatesv = real_data @@ -63,10 +62,13 @@ class TestRetainGraph(unittest.TestCase): interpolatesv = fake_data elif type == 'mixed': alpha = paddle.rand((real_data.shape[0], 1)) - alpha = paddle.expand(alpha, [ - real_data.shape[0], - np.prod(real_data.shape) // real_data.shape[0] - ]) + alpha = paddle.expand( + alpha, + [ + real_data.shape[0], + np.prod(real_data.shape) // real_data.shape[0], + ], + ) alpha = paddle.reshape(alpha, real_data.shape) interpolatesv = alpha * real_data + ((1 - alpha) * fake_data) else: @@ -76,21 +78,26 @@ class TestRetainGraph(unittest.TestCase): fake_AB = paddle.concat((real_data.detach(), interpolatesv), 1) disc_interpolates = netD(fake_AB) - outs = paddle.fluid.layers.fill_constant(disc_interpolates.shape, - disc_interpolates.dtype, - 1.0) - gradients = paddle.grad(outputs=disc_interpolates, - inputs=fake_AB, - grad_outputs=outs, - create_graph=True, - retain_graph=True, - only_inputs=True) + outs = paddle.fluid.layers.fill_constant( + disc_interpolates.shape, disc_interpolates.dtype, 1.0 + ) + gradients = paddle.grad( + outputs=disc_interpolates, + inputs=fake_AB, + grad_outputs=outs, + create_graph=True, + retain_graph=True, + only_inputs=True, + ) gradients = paddle.reshape(gradients[0], [real_data.shape[0], -1]) - gradient_penalty = paddle.mean( - (paddle.norm(gradients + 1e-16, 2, 1) - constant)** - 2) * lambda_gp # added eps + gradient_penalty = ( + paddle.mean( + (paddle.norm(gradients + 1e-16, 2, 1) - constant) ** 2 + ) + * lambda_gp + ) # added eps return gradient_penalty, gradients else: return 0.0, None @@ -117,12 +124,12 @@ class TestRetainGraph(unittest.TestCase): G_pred_fake = d(fake_AB.detach()) false_target = paddle.fluid.layers.fill_constant( - G_pred_fake.shape, 'float32', 0.0) + G_pred_fake.shape, 'float32', 0.0 + ) - G_gradient_penalty, _ = self.cal_gradient_penalty(d, - realA, - fakeB, - lambda_gp=10.0) + G_gradient_penalty, _ = self.cal_gradient_penalty( + d, realA, fakeB, lambda_gp=10.0 + ) loss_d = gan_criterion(G_pred_fake, false_target) + G_gradient_penalty loss_d.backward(retain_graph=need_retain) @@ -131,10 +138,12 @@ class TestRetainGraph(unittest.TestCase): optim_g.clear_gradients() fake_AB = paddle.concat((realA, fakeB), 1) G_pred_fake = d(fake_AB) - true_target = paddle.fluid.layers.fill_constant(G_pred_fake.shape, - 'float32', 1.0) + true_target = paddle.fluid.layers.fill_constant( + G_pred_fake.shape, 'float32', 1.0 + ) loss_g = l1_criterion(fakeB, realB) + gan_criterion( - G_pred_fake, true_target) + G_pred_fake, true_target + ) loss_g.backward() optim_g.minimize(loss_g) diff --git a/python/paddle/fluid/tests/unittests/test_retinanet_detection_output.py b/python/paddle/fluid/tests/unittests/test_retinanet_detection_output.py index 61bcb87521780c71130061eb60231066c705cd34..5d8527477af7a8413baa582bdd6dcb83162db3cf 100644 --- a/python/paddle/fluid/tests/unittests/test_retinanet_detection_output.py +++ b/python/paddle/fluid/tests/unittests/test_retinanet_detection_output.py @@ -1,16 +1,16 @@ # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # -#Licensed under the Apache License, Version 2.0 (the "License") -#you may not use this file except in compliance with the License. -#You may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License") +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -#Unless required by applicable law or agreed to in writing, software -#distributed under the License is distributed on an "AS IS" BASIS, -#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#See the License for the specific language governing permissions and -#limitations under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import unittest import numpy as np @@ -42,9 +42,9 @@ def multiclass_nms(prediction, class_num, keep_top_k, nms_threshold): for idx in indices: score_index.append((prediction[c][idx][4], c, idx)) - sorted_score_index = sorted(score_index, - key=lambda tup: tup[0], - reverse=True) + sorted_score_index = sorted( + score_index, key=lambda tup: tup[0], reverse=True + ) if keep_top_k > -1 and num_det > keep_top_k: sorted_score_index = sorted_score_index[:keep_top_k] num_det = keep_top_k @@ -59,9 +59,16 @@ def multiclass_nms(prediction, class_num, keep_top_k, nms_threshold): return nmsed_outs, num_det -def retinanet_detection_out(boxes_list, scores_list, anchors_list, im_info, - score_threshold, nms_threshold, nms_top_k, - keep_top_k): +def retinanet_detection_out( + boxes_list, + scores_list, + anchors_list, + im_info, + score_threshold, + nms_threshold, + nms_top_k, + keep_top_k, +): class_num = scores_list[0].shape[-1] im_height, im_width, im_scale = im_info @@ -88,23 +95,37 @@ def retinanet_detection_out(boxes_list, scores_list, anchors_list, im_info, a = int(idx / class_num) c = int(idx % class_num) box_offset = a * 4 - anchor_box_width = anchors_per_level[ - box_offset + 2] - anchors_per_level[box_offset] + 1 - anchor_box_height = anchors_per_level[ - box_offset + 3] - anchors_per_level[box_offset + 1] + 1 - anchor_box_center_x = anchors_per_level[ - box_offset] + anchor_box_width / 2 - anchor_box_center_y = anchors_per_level[box_offset + - 1] + anchor_box_height / 2 - - target_box_center_x = bboxes_per_level[ - box_offset] * anchor_box_width + anchor_box_center_x - target_box_center_y = bboxes_per_level[ - box_offset + 1] * anchor_box_height + anchor_box_center_y - target_box_width = math.exp( - bboxes_per_level[box_offset + 2]) * anchor_box_width - target_box_height = math.exp( - bboxes_per_level[box_offset + 3]) * anchor_box_height + anchor_box_width = ( + anchors_per_level[box_offset + 2] + - anchors_per_level[box_offset] + + 1 + ) + anchor_box_height = ( + anchors_per_level[box_offset + 3] + - anchors_per_level[box_offset + 1] + + 1 + ) + anchor_box_center_x = ( + anchors_per_level[box_offset] + anchor_box_width / 2 + ) + anchor_box_center_y = ( + anchors_per_level[box_offset + 1] + anchor_box_height / 2 + ) + + target_box_center_x = ( + bboxes_per_level[box_offset] * anchor_box_width + + anchor_box_center_x + ) + target_box_center_y = ( + bboxes_per_level[box_offset + 1] * anchor_box_height + + anchor_box_center_y + ) + target_box_width = ( + math.exp(bboxes_per_level[box_offset + 2]) * anchor_box_width + ) + target_box_height = ( + math.exp(bboxes_per_level[box_offset + 3]) * anchor_box_height + ) pred_box_xmin = target_box_center_x - target_box_width / 2 pred_box_ymin = target_box_center_y - target_box_height / 2 @@ -117,33 +138,46 @@ def retinanet_detection_out(boxes_list, scores_list, anchors_list, im_info, pred_box_ymax = pred_box_ymax / im_scale pred_box_xmin = max( - min(pred_box_xmin, - np.round(im_width / im_scale) - 1), 0.) + min(pred_box_xmin, np.round(im_width / im_scale) - 1), 0.0 + ) pred_box_ymin = max( - min(pred_box_ymin, - np.round(im_height / im_scale) - 1), 0.) + min(pred_box_ymin, np.round(im_height / im_scale) - 1), 0.0 + ) pred_box_xmax = max( - min(pred_box_xmax, - np.round(im_width / im_scale) - 1), 0.) + min(pred_box_xmax, np.round(im_width / im_scale) - 1), 0.0 + ) pred_box_ymax = max( - min(pred_box_ymax, - np.round(im_height / im_scale) - 1), 0.) + min(pred_box_ymax, np.round(im_height / im_scale) - 1), 0.0 + ) if c not in prediction.keys(): prediction[c] = [] - prediction[c].append([ - pred_box_xmin, pred_box_ymin, pred_box_xmax, pred_box_ymax, - scores_per_level[idx] - ]) - - nmsed_outs, nmsed_num = multiclass_nms(prediction, class_num, keep_top_k, - nms_threshold) + prediction[c].append( + [ + pred_box_xmin, + pred_box_ymin, + pred_box_xmax, + pred_box_ymax, + scores_per_level[idx], + ] + ) + + nmsed_outs, nmsed_num = multiclass_nms( + prediction, class_num, keep_top_k, nms_threshold + ) return nmsed_outs, nmsed_num -def batched_retinanet_detection_out(boxes, scores, anchors, im_info, - score_threshold, nms_threshold, nms_top_k, - keep_top_k): +def batched_retinanet_detection_out( + boxes, + scores, + anchors, + im_info, + score_threshold, + nms_threshold, + nms_top_k, + keep_top_k, +): batch_size = scores[0].shape[0] det_outs = [] lod = [] @@ -158,8 +192,15 @@ def batched_retinanet_detection_out(boxes, scores, anchors, im_info, scores_per_batch.append(scores[lvl][n]) nmsed_outs, nmsed_num = retinanet_detection_out( - boxes_per_batch, scores_per_batch, anchors, im_info[n], - score_threshold, nms_threshold, nms_top_k, keep_top_k) + boxes_per_batch, + scores_per_batch, + anchors, + im_info[n], + score_threshold, + nms_threshold, + nms_top_k, + keep_top_k, + ) lod.append(nmsed_num) if nmsed_num == 0: continue @@ -169,7 +210,6 @@ def batched_retinanet_detection_out(boxes, scores, anchors, im_info, class TestRetinanetDetectionOutOp1(OpTest): - def set_argument(self): self.score_threshold = 0.05 self.min_level = 3 @@ -192,8 +232,8 @@ class TestRetinanetDetectionOutOp1(OpTest): self.layer_w = [] num_levels = self.max_level - self.min_level + 1 for i in range(num_levels): - self.layer_h.append(2**(num_levels - i)) - self.layer_w.append(2**(num_levels - i)) + self.layer_h.append(2 ** (num_levels - i)) + self.layer_w.append(2 ** (num_levels - i)) def init_test_input(self): anchor_num = len(self.aspect_ratios) * self.scales_per_octave @@ -206,62 +246,89 @@ class TestRetinanetDetectionOutOp1(OpTest): layer_h = self.layer_h[i] layer_w = self.layer_w[i] - input_feat = np.random.random((self.batch_size, self.input_channels, - layer_h, layer_w)).astype('float32') + input_feat = np.random.random( + (self.batch_size, self.input_channels, layer_h, layer_w) + ).astype('float32') score = np.random.random( - (self.batch_size, self.class_num * anchor_num, layer_h, - layer_w)).astype('float32') + (self.batch_size, self.class_num * anchor_num, layer_h, layer_w) + ).astype('float32') score = np.transpose(score, [0, 2, 3, 1]) score = score.reshape((self.batch_size, -1, self.class_num)) - box = np.random.random((self.batch_size, self.box_size * anchor_num, - layer_h, layer_w)).astype('float32') + box = np.random.random( + (self.batch_size, self.box_size * anchor_num, layer_h, layer_w) + ).astype('float32') box = np.transpose(box, [0, 2, 3, 1]) box = box.reshape((self.batch_size, -1, self.box_size)) anchor_sizes = [] for octave in range(self.scales_per_octave): anchor_sizes.append( - float(self.anchor_strides[i] * (2**octave)) / - float(self.scales_per_octave) * self.anchor_scale) + float(self.anchor_strides[i] * (2**octave)) + / float(self.scales_per_octave) + * self.anchor_scale + ) anchor, var = anchor_generator_in_python( input_feat=input_feat, anchor_sizes=anchor_sizes, aspect_ratios=self.aspect_ratios, variances=[1.0, 1.0, 1.0, 1.0], stride=[self.anchor_strides[i], self.anchor_strides[i]], - offset=0.5) + offset=0.5, + ) anchor = np.reshape(anchor, [-1, 4]) self.scores_list.append(score.astype('float32')) self.bboxes_list.append(box.astype('float32')) self.anchors_list.append(anchor.astype('float32')) - self.im_info = np.array([[256., 256., 1.5]]).astype( - 'float32') #im_height, im_width, scale + self.im_info = np.array([[256.0, 256.0, 1.5]]).astype( + 'float32' + ) # im_height, im_width, scale def setUp(self): self.set_argument() self.init_test_input() nmsed_outs, lod = batched_retinanet_detection_out( - self.bboxes_list, self.scores_list, self.anchors_list, self.im_info, - self.score_threshold, self.nms_threshold, self.nms_top_k, - self.keep_top_k) + self.bboxes_list, + self.scores_list, + self.anchors_list, + self.im_info, + self.score_threshold, + self.nms_threshold, + self.nms_top_k, + self.keep_top_k, + ) nmsed_outs = np.array(nmsed_outs).astype('float32') self.op_type = 'retinanet_detection_output' self.inputs = { - 'BBoxes': [('b0', self.bboxes_list[0]), ('b1', self.bboxes_list[1]), - ('b2', self.bboxes_list[2]), ('b3', self.bboxes_list[3]), - ('b4', self.bboxes_list[4])], - 'Scores': [('s0', self.scores_list[0]), ('s1', self.scores_list[1]), - ('s2', self.scores_list[2]), ('s3', self.scores_list[3]), - ('s4', self.scores_list[4])], - 'Anchors': [('a0', self.anchors_list[0]), - ('a1', self.anchors_list[1]), - ('a2', self.anchors_list[2]), - ('a3', self.anchors_list[3]), - ('a4', self.anchors_list[4])], - 'ImInfo': (self.im_info, [[ - 1, - ]]) + 'BBoxes': [ + ('b0', self.bboxes_list[0]), + ('b1', self.bboxes_list[1]), + ('b2', self.bboxes_list[2]), + ('b3', self.bboxes_list[3]), + ('b4', self.bboxes_list[4]), + ], + 'Scores': [ + ('s0', self.scores_list[0]), + ('s1', self.scores_list[1]), + ('s2', self.scores_list[2]), + ('s3', self.scores_list[3]), + ('s4', self.scores_list[4]), + ], + 'Anchors': [ + ('a0', self.anchors_list[0]), + ('a1', self.anchors_list[1]), + ('a2', self.anchors_list[2]), + ('a3', self.anchors_list[3]), + ('a4', self.anchors_list[4]), + ], + 'ImInfo': ( + self.im_info, + [ + [ + 1, + ] + ], + ), } self.outputs = {'Out': (nmsed_outs, [lod])} self.attrs = { @@ -269,7 +336,7 @@ class TestRetinanetDetectionOutOp1(OpTest): 'nms_top_k': self.nms_top_k, 'nms_threshold': self.nms_threshold, 'keep_top_k': self.keep_top_k, - 'nms_eta': 1., + 'nms_eta': 1.0, } def test_check_output(self): @@ -277,7 +344,6 @@ class TestRetinanetDetectionOutOp1(OpTest): class TestRetinanetDetectionOutOp2(OpTest): - def set_argument(self): self.score_threshold = 0.05 self.min_level = 3 @@ -302,7 +368,6 @@ class TestRetinanetDetectionOutOp2(OpTest): class TestRetinanetDetectionOutOpNo3(TestRetinanetDetectionOutOp1): - def set_argument(self): # Here set 2.0 to test the case there is no outputs. # In practical use, 0.0 < score_threshold < 1.0 @@ -327,12 +392,11 @@ class TestRetinanetDetectionOutOpNo3(TestRetinanetDetectionOutOp1): self.layer_w = [] num_levels = self.max_level - self.min_level + 1 for i in range(num_levels): - self.layer_h.append(2**(num_levels - i)) - self.layer_w.append(2**(num_levels - i)) + self.layer_h.append(2 ** (num_levels - i)) + self.layer_w.append(2 ** (num_levels - i)) class TestRetinanetDetectionOutOpNo4(TestRetinanetDetectionOutOp1): - def set_argument(self): self.score_threshold = 0.05 self.min_level = 2 @@ -355,33 +419,52 @@ class TestRetinanetDetectionOutOpNo4(TestRetinanetDetectionOutOp1): self.layer_w = [] num_levels = self.max_level - self.min_level + 1 for i in range(num_levels): - self.layer_h.append(2**(num_levels - i)) - self.layer_w.append(2**(num_levels - i)) + self.layer_h.append(2 ** (num_levels - i)) + self.layer_w.append(2 ** (num_levels - i)) def setUp(self): self.set_argument() self.init_test_input() nmsed_outs, lod = batched_retinanet_detection_out( - self.bboxes_list, self.scores_list, self.anchors_list, self.im_info, - self.score_threshold, self.nms_threshold, self.nms_top_k, - self.keep_top_k) + self.bboxes_list, + self.scores_list, + self.anchors_list, + self.im_info, + self.score_threshold, + self.nms_threshold, + self.nms_top_k, + self.keep_top_k, + ) nmsed_outs = np.array(nmsed_outs).astype('float32') self.op_type = 'retinanet_detection_output' self.inputs = { - 'BBoxes': [('b0', self.bboxes_list[0]), ('b1', self.bboxes_list[1]), - ('b2', self.bboxes_list[2]), - ('b3', self.bboxes_list[3])], - 'Scores': [('s0', self.scores_list[0]), ('s1', self.scores_list[1]), - ('s2', self.scores_list[2]), - ('s3', self.scores_list[3])], - 'Anchors': [('a0', self.anchors_list[0]), - ('a1', self.anchors_list[1]), - ('a2', self.anchors_list[2]), - ('a3', self.anchors_list[3])], - 'ImInfo': (self.im_info, [[ - 1, - ]]) + 'BBoxes': [ + ('b0', self.bboxes_list[0]), + ('b1', self.bboxes_list[1]), + ('b2', self.bboxes_list[2]), + ('b3', self.bboxes_list[3]), + ], + 'Scores': [ + ('s0', self.scores_list[0]), + ('s1', self.scores_list[1]), + ('s2', self.scores_list[2]), + ('s3', self.scores_list[3]), + ], + 'Anchors': [ + ('a0', self.anchors_list[0]), + ('a1', self.anchors_list[1]), + ('a2', self.anchors_list[2]), + ('a3', self.anchors_list[3]), + ], + 'ImInfo': ( + self.im_info, + [ + [ + 1, + ] + ], + ), } self.outputs = {'Out': (nmsed_outs, [lod])} self.attrs = { @@ -389,7 +472,7 @@ class TestRetinanetDetectionOutOpNo4(TestRetinanetDetectionOutOp1): 'nms_top_k': self.nms_top_k, 'nms_threshold': self.nms_threshold, 'keep_top_k': self.keep_top_k, - 'nms_eta': 1., + 'nms_eta': 1.0, } def test_check_output(self): @@ -397,7 +480,6 @@ class TestRetinanetDetectionOutOpNo4(TestRetinanetDetectionOutOp1): class TestRetinanetDetectionOutOpNo5(TestRetinanetDetectionOutOp1): - def set_argument(self): self.score_threshold = 0.05 self.min_level = 3 @@ -420,35 +502,34 @@ class TestRetinanetDetectionOutOpNo5(TestRetinanetDetectionOutOp1): self.layer_w = [] num_levels = self.max_level - self.min_level + 1 for i in range(num_levels): - self.layer_h.append(2**(num_levels - i)) - self.layer_w.append(2**(num_levels - i)) + self.layer_h.append(2 ** (num_levels - i)) + self.layer_w.append(2 ** (num_levels - i)) class TestRetinanetDetectionOutOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): - bboxes_low1 = fluid.data(name='bboxes_low1', - shape=[1, 44, 4], - dtype='float32') - bboxes_high1 = fluid.data(name='bboxes_high1', - shape=[1, 11, 4], - dtype='float32') - scores_low1 = fluid.data(name='scores_low1', - shape=[1, 44, 10], - dtype='float32') - scores_high1 = fluid.data(name='scores_high1', - shape=[1, 11, 10], - dtype='float32') - anchors_low1 = fluid.data(name='anchors_low1', - shape=[44, 4], - dtype='float32') - anchors_high1 = fluid.data(name='anchors_high1', - shape=[11, 4], - dtype='float32') - im_info1 = fluid.data(name="im_info1", - shape=[1, 3], - dtype='float32') + bboxes_low1 = fluid.data( + name='bboxes_low1', shape=[1, 44, 4], dtype='float32' + ) + bboxes_high1 = fluid.data( + name='bboxes_high1', shape=[1, 11, 4], dtype='float32' + ) + scores_low1 = fluid.data( + name='scores_low1', shape=[1, 44, 10], dtype='float32' + ) + scores_high1 = fluid.data( + name='scores_high1', shape=[1, 11, 10], dtype='float32' + ) + anchors_low1 = fluid.data( + name='anchors_low1', shape=[44, 4], dtype='float32' + ) + anchors_high1 = fluid.data( + name='anchors_high1', shape=[11, 4], dtype='float32' + ) + im_info1 = fluid.data( + name="im_info1", shape=[1, 3], dtype='float32' + ) # The `bboxes` must be list, each element must be Variable and # its Tensor data type must be one of float32 and float64. @@ -457,19 +538,21 @@ class TestRetinanetDetectionOutOpError(unittest.TestCase): bboxes=bboxes_low1, scores=[scores_low1, scores_high1], anchors=[anchors_low1, anchors_high1], - im_info=im_info1) + im_info=im_info1, + ) self.assertRaises(TypeError, test_bboxes_type) def test_bboxes_tensor_dtype(): - bboxes_high2 = fluid.data(name='bboxes_high2', - shape=[1, 11, 4], - dtype='int32') + bboxes_high2 = fluid.data( + name='bboxes_high2', shape=[1, 11, 4], dtype='int32' + ) fluid.layers.retinanet_detection_output( bboxes=[bboxes_high2, 5], scores=[scores_low1, scores_high1], anchors=[anchors_low1, anchors_high1], - im_info=im_info1) + im_info=im_info1, + ) self.assertRaises(TypeError, test_bboxes_tensor_dtype) @@ -480,19 +563,21 @@ class TestRetinanetDetectionOutOpError(unittest.TestCase): bboxes=[bboxes_low1, bboxes_high1], scores=scores_low1, anchors=[anchors_low1, anchors_high1], - im_info=im_info1) + im_info=im_info1, + ) self.assertRaises(TypeError, test_scores_type) def test_scores_tensor_dtype(): - scores_high2 = fluid.data(name='scores_high2', - shape=[1, 11, 10], - dtype='int32') + scores_high2 = fluid.data( + name='scores_high2', shape=[1, 11, 10], dtype='int32' + ) fluid.layers.retinanet_detection_output( bboxes=[bboxes_low1, bboxes_high1], scores=[scores_high2, 5], anchors=[anchors_low1, anchors_high1], - im_info=im_info1) + im_info=im_info1, + ) self.assertRaises(TypeError, test_scores_tensor_dtype) @@ -503,19 +588,21 @@ class TestRetinanetDetectionOutOpError(unittest.TestCase): bboxes=[bboxes_low1, bboxes_high1], scores=[scores_low1, scores_high1], anchors=anchors_low1, - im_info=im_info1) + im_info=im_info1, + ) self.assertRaises(TypeError, test_anchors_type) def test_anchors_tensor_dtype(): - anchors_high2 = fluid.data(name='anchors_high2', - shape=[11, 4], - dtype='int32') + anchors_high2 = fluid.data( + name='anchors_high2', shape=[11, 4], dtype='int32' + ) fluid.layers.retinanet_detection_output( bboxes=[bboxes_low1, bboxes_high1], scores=[scores_low1, scores_high1], anchors=[anchors_high2, 5], - im_info=im_info1) + im_info=im_info1, + ) self.assertRaises(TypeError, test_anchors_tensor_dtype) @@ -526,19 +613,21 @@ class TestRetinanetDetectionOutOpError(unittest.TestCase): bboxes=[bboxes_low1, bboxes_high1], scores=[scores_low1, scores_high1], anchors=[anchors_low1, anchors_high1], - im_info=[2, 3, 4]) + im_info=[2, 3, 4], + ) self.assertRaises(TypeError, test_iminfo_type) def test_iminfo_tensor_dtype(): - im_info2 = fluid.data(name='im_info2', - shape=[1, 3], - dtype='int32') + im_info2 = fluid.data( + name='im_info2', shape=[1, 3], dtype='int32' + ) fluid.layers.retinanet_detection_output( bboxes=[bboxes_low1, bboxes_high1], scores=[scores_low1, scores_high1], anchors=[anchors_low1, anchors_high1], - im_info=im_info2) + im_info=im_info2, + ) self.assertRaises(TypeError, test_iminfo_tensor_dtype) diff --git a/python/paddle/fluid/tests/unittests/test_reverse_op.py b/python/paddle/fluid/tests/unittests/test_reverse_op.py index fc304d516f50b6f1d0248ee68d557acd6c02626c..829ef2a883b252a84a552fdb3d2cbe9fe1dbec5d 100644 --- a/python/paddle/fluid/tests/unittests/test_reverse_op.py +++ b/python/paddle/fluid/tests/unittests/test_reverse_op.py @@ -28,7 +28,6 @@ from test_attribute_var import UnittestBase class TestReverseOp(OpTest): - def initTestCase(self): self.x = np.random.random((3, 40)).astype('float64') self.axis = [0] @@ -52,63 +51,54 @@ class TestReverseOp(OpTest): class TestCase0(TestReverseOp): - def initTestCase(self): self.x = np.random.random((3, 40)).astype('float64') self.axis = [1] class TestCase0_neg(TestReverseOp): - def initTestCase(self): self.x = np.random.random((3, 40)).astype('float64') self.axis = [-1] class TestCase1(TestReverseOp): - def initTestCase(self): self.x = np.random.random((3, 40)).astype('float64') self.axis = [0, 1] class TestCase1_neg(TestReverseOp): - def initTestCase(self): self.x = np.random.random((3, 40)).astype('float64') self.axis = [0, -1] class TestCase2(TestReverseOp): - def initTestCase(self): self.x = np.random.random((3, 4, 10)).astype('float64') self.axis = [0, 2] class TestCase2_neg(TestReverseOp): - def initTestCase(self): self.x = np.random.random((3, 4, 10)).astype('float64') self.axis = [0, -2] class TestCase3(TestReverseOp): - def initTestCase(self): self.x = np.random.random((3, 4, 10)).astype('float64') self.axis = [1, 2] class TestCase3_neg(TestReverseOp): - def initTestCase(self): self.x = np.random.random((3, 4, 10)).astype('float64') self.axis = [-1, -2] class TestCase4(unittest.TestCase): - def test_error(self): place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -116,9 +106,9 @@ class TestCase4(unittest.TestCase): train_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - label = fluid.layers.data(name="label", - shape=[1, 1, 1, 1, 1, 1, 1, 1], - dtype="int64") + label = fluid.layers.data( + name="label", shape=[1, 1, 1, 1, 1, 1, 1, 1], dtype="int64" + ) rev = fluid.layers.reverse(label, axis=[-1, -2]) def _run_program(): @@ -129,11 +119,13 @@ class TestCase4(unittest.TestCase): class TestReverseLoDTensorArray(unittest.TestCase): - def setUp(self): self.shapes = [[5, 25], [5, 20], [5, 5]] - self.place = fluid.CUDAPlace( - 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + self.place = ( + fluid.CUDAPlace(0) + if fluid.is_compiled_with_cuda() + else fluid.CPUPlace() + ) self.exe = fluid.Executor(self.place) def run_program(self, arr_len, axis=0): @@ -146,7 +138,8 @@ class TestReverseLoDTensorArray(unittest.TestCase): x.stop_gradient = False inputs.append(x) inputs_data.append( - np.random.random(self.shapes[i]).astype('float32')) + np.random.random(self.shapes[i]).astype('float32') + ) tensor_array = fluid.layers.create_array(dtype='float32') for i in range(arr_len): @@ -158,13 +151,18 @@ class TestReverseLoDTensorArray(unittest.TestCase): loss = fluid.layers.reduce_sum(output) fluid.backward.append_backward(loss) input_grads = list( - map(main_program.global_block().var, - [x.name + "@GRAD" for x in inputs])) + map( + main_program.global_block().var, + [x.name + "@GRAD" for x in inputs], + ) + ) feed_dict = dict(zip([x.name for x in inputs], inputs_data)) - res = self.exe.run(main_program, - feed=feed_dict, - fetch_list=input_grads + [output.name]) + res = self.exe.run( + main_program, + feed=feed_dict, + fetch_list=input_grads + [output.name], + ) return np.hstack(inputs_data[::-1]), res @@ -172,7 +170,7 @@ class TestReverseLoDTensorArray(unittest.TestCase): gt, res = self.run_program(arr_len=3) self.check_output(gt, res) # test with tuple type of axis - gt, res = self.run_program(arr_len=3, axis=(0, )) + gt, res = self.run_program(arr_len=3, axis=(0,)) self.check_output(gt, res) def test_case2(self): @@ -201,7 +199,6 @@ class TestReverseLoDTensorArray(unittest.TestCase): class TestReverseAxisTensor(UnittestBase): - def init_info(self): self.shapes = [[2, 3, 4]] self.save_path = os.path.join(self.temp_dir.name, self.path_prefix()) @@ -227,8 +224,9 @@ class TestReverseAxisTensor(UnittestBase): gt = res[0][::-1, :, ::-1] np.testing.assert_allclose(res[1], gt) - paddle.static.save_inference_model(self.save_path, [x], [feat, out], - exe) + paddle.static.save_inference_model( + self.save_path, [x], [feat, out], exe + ) # Test for Inference Predictor infer_outs = self.infer_prog() gt = infer_outs[0][::-1, :, ::-1] @@ -248,7 +246,6 @@ class TestReverseAxisTensor(UnittestBase): class TestReverseAxisListTensor(TestReverseAxisTensor): - def path_prefix(self): return 'reverse_tensors' @@ -261,15 +258,18 @@ class TestReverseAxisListTensor(TestReverseAxisTensor): out = paddle.fluid.layers.reverse(x, axes) # check attrs - axis_attrs = paddle.static.default_main_program().block( - 0).ops[-1].all_attrs()["axis"] + axis_attrs = ( + paddle.static.default_main_program() + .block(0) + .ops[-1] + .all_attrs()["axis"] + ) self.assertTrue(axis_attrs[0].name, axes[0].name) self.assertTrue(axis_attrs[1].name, axes[1].name) return out class TestReverseDoubleGradCheck(unittest.TestCase): - def reverse_wrapper(self, x): return fluid.layers.reverse(x[0], [0, 1]) @@ -284,17 +284,13 @@ class TestReverseDoubleGradCheck(unittest.TestCase): out = fluid.layers.reverse(data, [0, 1]) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) - gradient_checker.double_grad_check([data], - out, - x_init=[data_arr], - place=place, - eps=eps) + gradient_checker.double_grad_check( + [data], out, x_init=[data_arr], place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.double_grad_check_for_dygraph(self.reverse_wrapper, - [data], - out, - x_init=[data_arr], - place=place) + gradient_checker.double_grad_check_for_dygraph( + self.reverse_wrapper, [data], out, x_init=[data_arr], place=place + ) def test_grad(self): paddle.enable_static() @@ -306,7 +302,6 @@ class TestReverseDoubleGradCheck(unittest.TestCase): class TestReverseTripleGradCheck(unittest.TestCase): - def reverse_wrapper(self, x): return fluid.layers.reverse(x[0], [0, 1]) @@ -321,17 +316,13 @@ class TestReverseTripleGradCheck(unittest.TestCase): out = fluid.layers.reverse(data, [0, 1]) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) - gradient_checker.triple_grad_check([data], - out, - x_init=[data_arr], - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [data], out, x_init=[data_arr], place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.triple_grad_check_for_dygraph(self.reverse_wrapper, - [data], - out, - x_init=[data_arr], - place=place) + gradient_checker.triple_grad_check_for_dygraph( + self.reverse_wrapper, [data], out, x_init=[data_arr], place=place + ) def test_grad(self): paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_rmsprop_op.py b/python/paddle/fluid/tests/unittests/test_rmsprop_op.py index 1948026b374d0b89bfd2cf6e10c26e9776376426..5845b421fb511a8fc45d3f6c0923f25cdc87df9c 100644 --- a/python/paddle/fluid/tests/unittests/test_rmsprop_op.py +++ b/python/paddle/fluid/tests/unittests/test_rmsprop_op.py @@ -21,14 +21,19 @@ import paddle.fluid as fluid import paddle -def create_selected_rows_and_tensor(scope, place, height, row_num, - embedding_size): +def create_selected_rows_and_tensor( + scope, place, height, row_num, embedding_size +): sr = scope.var("@selected_rows@").get_selected_rows() tensor = scope.var("grad").get_tensor() - rows = np.random.random_integers(low=0, high=height - 1, size=[ - row_num, - ]).astype('int64') + rows = np.random.random_integers( + low=0, + high=height - 1, + size=[ + row_num, + ], + ).astype('int64') sr_val = np.random.random(size=[row_num, embedding_size]).astype('float32') sr.set_height(height) @@ -45,14 +50,9 @@ def create_selected_rows_and_tensor(scope, place, height, row_num, class TestBase(unittest.TestCase): - - def setup(self, - place, - is_sparse, - centered, - size, - row_num=None, - epsilon=1e-6): + def setup( + self, place, is_sparse, centered, size, row_num=None, epsilon=1e-6 + ): np.random.seed(5) # fix seed self.scope = fluid.global_scope() @@ -62,8 +62,9 @@ class TestBase(unittest.TestCase): self.param = np.random.random(size).astype("float32") self.mean_square_name = "mean_square" - self.mean_square = np.random.uniform(low=1, high=2, - size=size).astype("float32") + self.mean_square = np.random.uniform(low=1, high=2, size=size).astype( + "float32" + ) self.mean_grad_name = "mean_grad" self.mean_grad = np.random.random(size).astype("float32") @@ -77,31 +78,44 @@ class TestBase(unittest.TestCase): if self.is_sparse: self.grad_sr_name = "@selected_rows@" self.grad, self.grad_sr = create_selected_rows_and_tensor( - self.scope, place, size[0], row_num, size[1]) + self.scope, place, size[0], row_num, size[1] + ) else: self.grad = np.random.random(size).astype("float32") grad_tensor = self.scope.var(self.grad_name).get_tensor() grad_tensor.set(self.grad, place) self.moment_name = "moment" - self.moment = np.random.uniform(low=0, high=1, - size=size).astype("float32") + self.moment = np.random.uniform(low=0, high=1, size=size).astype( + "float32" + ) self.epsilon = epsilon self.decay = 0.9 self.momentum = 0.1 self.centered = centered - self.ms_out = self.decay * self.mean_square + ( - 1 - self.decay) * self.grad * self.grad + self.ms_out = ( + self.decay * self.mean_square + + (1 - self.decay) * self.grad * self.grad + ) if centered: - self.mg_out = self.decay * self.mean_grad + (1 - - self.decay) * self.grad - self.moment_out = self.momentum * self.moment + \ - self.learning_rate * self.grad / np.sqrt(self.ms_out - np.square(self.mg_out) + self.epsilon) + self.mg_out = ( + self.decay * self.mean_grad + (1 - self.decay) * self.grad + ) + self.moment_out = ( + self.momentum * self.moment + + self.learning_rate + * self.grad + / np.sqrt(self.ms_out - np.square(self.mg_out) + self.epsilon) + ) else: - self.moment_out = self.momentum * self.moment + \ - self.learning_rate * self.grad / np.sqrt(self.ms_out + self.epsilon) + self.moment_out = ( + self.momentum * self.moment + + self.learning_rate + * self.grad + / np.sqrt(self.ms_out + self.epsilon) + ) self.param_out = self.param - self.moment_out @@ -110,7 +124,8 @@ class TestBase(unittest.TestCase): self.param_tensor.set(self.param, place) self.mean_square_tensor = self.scope.var( - self.mean_square_name).get_tensor() + self.mean_square_name + ).get_tensor() self.mean_square_tensor.set(self.mean_square, place) lr = self.scope.var(self.lr_name).get_tensor() @@ -121,7 +136,8 @@ class TestBase(unittest.TestCase): if self.centered: self.mean_grad_tensor = self.scope.var( - self.mean_grad_name).get_tensor() + self.mean_grad_name + ).get_tensor() self.mean_grad_tensor.set(self.mean_grad, place) def check(self, actual_t, expect_t, place, out_name, atol=1e-5): @@ -130,19 +146,22 @@ class TestBase(unittest.TestCase): expect_t, rtol=1e-05, atol=atol, - err_msg='Output (' + out_name + ') has diff at ' + str(place) + - '\nExpect ' + str(expect_t) + '\n' + 'But Got' + str(actual_t)) + err_msg='Output (' + + out_name + + ') has diff at ' + + str(place) + + '\nExpect ' + + str(expect_t) + + '\n' + + 'But Got' + + str(actual_t), + ) class TestRmspropOp(TestBase): - - def check_with_place(self, - place, - is_sparse, - centered, - size, - row_num=None, - epsilon=1e-6): + def check_with_place( + self, place, is_sparse, centered, size, row_num=None, epsilon=1e-6 + ): self.setup(place, is_sparse, centered, size, row_num, epsilon) self.run_and_check() @@ -161,7 +180,7 @@ class TestRmspropOp(TestBase): 'epsilon': self.epsilon, 'decay': self.decay, 'momentum': self.momentum, - 'centered': self.centered + 'centered': self.centered, } if self.centered: @@ -173,25 +192,35 @@ class TestRmspropOp(TestBase): rmsprop_op.run(self.scope, self.place) - self.check(np.array(self.mean_square_tensor), - self.ms_out, - self.place, - self.mean_square_name, - atol=atol) - self.check(np.array(self.moment_tensor), - self.moment_out, - self.place, - self.moment_name, - atol=atol) - self.check(np.array(self.param_tensor), - self.param_out, - self.place, - self.param_name, - atol=atol) + self.check( + np.array(self.mean_square_tensor), + self.ms_out, + self.place, + self.mean_square_name, + atol=atol, + ) + self.check( + np.array(self.moment_tensor), + self.moment_out, + self.place, + self.moment_name, + atol=atol, + ) + self.check( + np.array(self.param_tensor), + self.param_out, + self.place, + self.param_name, + atol=atol, + ) if self.centered: - self.check(np.array(self.mean_grad_tensor), self.mg_out, self.place, - self.mean_grad_name) + self.check( + np.array(self.mean_grad_tensor), + self.mg_out, + self.place, + self.mean_grad_name, + ) def test_rmsprop(self): places = [core.CPUPlace()] @@ -202,37 +231,41 @@ class TestRmspropOp(TestBase): for place in places: for centered in [False, True]: with fluid.scope_guard(core.Scope()): - self.check_with_place(place, - is_sparse=False, - centered=centered, - size=size) + self.check_with_place( + place, is_sparse=False, centered=centered, size=size + ) with fluid.scope_guard(core.Scope()): - self.check_with_place(place, - is_sparse=True, - centered=centered, - row_num=512, - size=size) + self.check_with_place( + place, + is_sparse=True, + centered=centered, + row_num=512, + size=size, + ) with fluid.scope_guard(core.Scope()): - self.check_with_place(place, - is_sparse=True, - centered=centered, - row_num=60, - size=size) + self.check_with_place( + place, + is_sparse=True, + centered=centered, + row_num=60, + size=size, + ) class TestRMSPropV2(unittest.TestCase): - def test_rmsprop_dygraph(self): paddle.disable_static() value = np.arange(26).reshape(2, 13).astype("float32") a = paddle.to_tensor(value) linear = paddle.nn.Linear(13, 5) # This can be any optimizer supported by dygraph. - adam = paddle.optimizer.RMSProp(learning_rate=0.01, - parameters=linear.parameters(), - weight_decay=0.01) + adam = paddle.optimizer.RMSProp( + learning_rate=0.01, + parameters=linear.parameters(), + weight_decay=0.01, + ) out = linear(a) out.backward() adam.step() @@ -253,8 +286,9 @@ class TestRMSPropV2(unittest.TestCase): rms_optimizer.minimize(avg_cost) fetch_list = [avg_cost] - train_reader = paddle.batch(paddle.dataset.uci_housing.train(), - batch_size=1) + train_reader = paddle.batch( + paddle.dataset.uci_housing.train(), batch_size=1 + ) feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) @@ -263,38 +297,40 @@ class TestRMSPropV2(unittest.TestCase): def test_raise_error(self): self.assertRaises(ValueError, paddle.optimizer.RMSProp, None) - self.assertRaises(ValueError, - paddle.optimizer.RMSProp, - learning_rate=0.1, - rho=None) - self.assertRaises(ValueError, - paddle.optimizer.RMSProp, - learning_rate=0.1, - epsilon=None) - self.assertRaises(ValueError, - paddle.optimizer.RMSProp, - learning_rate=0.1, - momentum=None) + self.assertRaises( + ValueError, paddle.optimizer.RMSProp, learning_rate=0.1, rho=None + ) + self.assertRaises( + ValueError, + paddle.optimizer.RMSProp, + learning_rate=0.1, + epsilon=None, + ) + self.assertRaises( + ValueError, + paddle.optimizer.RMSProp, + learning_rate=0.1, + momentum=None, + ) def test_rmsprop_op_invalid_input(self): paddle.disable_static() linear = paddle.nn.Linear(10, 10) with self.assertRaises(ValueError): - adam = paddle.optimizer.RMSProp(0.1, - epsilon=-1, - parameters=linear.parameters()) + adam = paddle.optimizer.RMSProp( + 0.1, epsilon=-1, parameters=linear.parameters() + ) with self.assertRaises(ValueError): - adam = paddle.optimizer.RMSProp(0.1, - momentum=-1, - parameters=linear.parameters()) + adam = paddle.optimizer.RMSProp( + 0.1, momentum=-1, parameters=linear.parameters() + ) with self.assertRaises(ValueError): - adam = paddle.optimizer.RMSProp(0.1, - rho=-1, - parameters=linear.parameters()) + adam = paddle.optimizer.RMSProp( + 0.1, rho=-1, parameters=linear.parameters() + ) class TestRMSPropV2Group(TestRMSPropV2): - def test_rmsprop_dygraph(self): paddle.disable_static() value = np.arange(26).reshape(2, 13).astype("float32") @@ -302,17 +338,14 @@ class TestRMSPropV2Group(TestRMSPropV2): linear_1 = paddle.nn.Linear(13, 5) linear_2 = paddle.nn.Linear(5, 3) # This can be any optimizer supported by dygraph. - adam = paddle.optimizer.RMSProp(learning_rate=0.01, - parameters=[{ - 'params': - linear_1.parameters() - }, { - 'params': - linear_2.parameters(), - 'weight_decay': - 0.001 - }], - weight_decay=0.01) + adam = paddle.optimizer.RMSProp( + learning_rate=0.01, + parameters=[ + {'params': linear_1.parameters()}, + {'params': linear_2.parameters(), 'weight_decay': 0.001}, + ], + weight_decay=0.01, + ) out = linear_1(a) out = linear_2(out) out.backward() diff --git a/python/paddle/fluid/tests/unittests/test_rnn_cell_api.py b/python/paddle/fluid/tests/unittests/test_rnn_cell_api.py index 5cf1a79489e5e45bc9844f22d563086420171495..1d04b4310539ff3e169d9cf060440c67d282317f 100644 --- a/python/paddle/fluid/tests/unittests/test_rnn_cell_api.py +++ b/python/paddle/fluid/tests/unittests/test_rnn_cell_api.py @@ -33,62 +33,68 @@ import numpy as np class TestLSTMCellError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): batch_size, input_size, hidden_size = 4, 16, 16 - inputs = fluid.data(name='inputs', - shape=[None, input_size], - dtype='float32') - pre_hidden = fluid.data(name='pre_hidden', - shape=[None, hidden_size], - dtype='float32') - pre_cell = fluid.data(name='pre_cell', - shape=[None, hidden_size], - dtype='float32') + inputs = fluid.data( + name='inputs', shape=[None, input_size], dtype='float32' + ) + pre_hidden = fluid.data( + name='pre_hidden', shape=[None, hidden_size], dtype='float32' + ) + pre_cell = fluid.data( + name='pre_cell', shape=[None, hidden_size], dtype='float32' + ) cell = LSTMCell(hidden_size) def test_input_Variable(): - np_input = np.random.random( - (batch_size, input_size)).astype("float32") + np_input = np.random.random((batch_size, input_size)).astype( + "float32" + ) cell(np_input, [pre_hidden, pre_cell]) self.assertRaises(TypeError, test_input_Variable) def test_pre_hidden_Variable(): np_pre_hidden = np.random.random( - (batch_size, hidden_size)).astype("float32") + (batch_size, hidden_size) + ).astype("float32") cell(inputs, [np_pre_hidden, pre_cell]) self.assertRaises(TypeError, test_pre_hidden_Variable) def test_pre_cell_Variable(): - np_pre_cell = np.random.random( - (batch_size, input_size)).astype("float32") + np_pre_cell = np.random.random((batch_size, input_size)).astype( + "float32" + ) cell(inputs, [pre_hidden, np_pre_cell]) self.assertRaises(TypeError, test_pre_cell_Variable) def test_input_type(): - error_inputs = fluid.data(name='error_inputs', - shape=[None, input_size], - dtype='int32') + error_inputs = fluid.data( + name='error_inputs', shape=[None, input_size], dtype='int32' + ) cell(error_inputs, [pre_hidden, pre_cell]) self.assertRaises(TypeError, test_input_type) def test_pre_hidden_type(): - error_pre_hidden = fluid.data(name='error_pre_hidden', - shape=[None, hidden_size], - dtype='int32') + error_pre_hidden = fluid.data( + name='error_pre_hidden', + shape=[None, hidden_size], + dtype='int32', + ) cell(inputs, [error_pre_hidden, pre_cell]) self.assertRaises(TypeError, test_pre_hidden_type) def test_pre_cell_type(): - error_pre_cell = fluid.data(name='error_pre_cell', - shape=[None, hidden_size], - dtype='int32') + error_pre_cell = fluid.data( + name='error_pre_cell', + shape=[None, hidden_size], + dtype='int32', + ) cell(inputs, [pre_hidden, error_pre_cell]) self.assertRaises(TypeError, test_pre_cell_type) @@ -101,29 +107,35 @@ class TestLSTMCellError(unittest.TestCase): class TestLSTMCell(unittest.TestCase): - def setUp(self): self.batch_size = 4 self.input_size = 16 self.hidden_size = 16 def test_run(self): - inputs = fluid.data(name='inputs', - shape=[None, self.input_size], - dtype='float32') - pre_hidden = fluid.data(name='pre_hidden', - shape=[None, self.hidden_size], - dtype='float32') - pre_cell = fluid.data(name='pre_cell', - shape=[None, self.hidden_size], - dtype='float32') + inputs = fluid.data( + name='inputs', shape=[None, self.input_size], dtype='float32' + ) + pre_hidden = fluid.data( + name='pre_hidden', shape=[None, self.hidden_size], dtype='float32' + ) + pre_cell = fluid.data( + name='pre_cell', shape=[None, self.hidden_size], dtype='float32' + ) cell = LSTMCell(self.hidden_size) lstm_hidden_new, lstm_states_new = cell(inputs, [pre_hidden, pre_cell]) lstm_unit = contrib.layers.rnn_impl.BasicLSTMUnit( - "basicLSTM", self.hidden_size, None, None, None, None, 1.0, - "float32") + "basicLSTM", + self.hidden_size, + None, + None, + None, + None, + 1.0, + "float32", + ) lstm_hidden, lstm_cell = lstm_unit(inputs, pre_hidden, pre_cell) if core.is_compiled_with_cuda(): @@ -134,76 +146,91 @@ class TestLSTMCell(unittest.TestCase): exe.run(framework.default_startup_program()) inputs_np = np.random.uniform( - -0.1, 0.1, (self.batch_size, self.input_size)).astype('float32') + -0.1, 0.1, (self.batch_size, self.input_size) + ).astype('float32') pre_hidden_np = np.random.uniform( - -0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32') + -0.1, 0.1, (self.batch_size, self.hidden_size) + ).astype('float32') pre_cell_np = np.random.uniform( - -0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32') + -0.1, 0.1, (self.batch_size, self.hidden_size) + ).astype('float32') - param_names = [[ - "LSTMCell/BasicLSTMUnit_0.w_0", "basicLSTM/BasicLSTMUnit_0.w_0" - ], ["LSTMCell/BasicLSTMUnit_0.b_0", "basicLSTM/BasicLSTMUnit_0.b_0"]] + param_names = [ + ["LSTMCell/BasicLSTMUnit_0.w_0", "basicLSTM/BasicLSTMUnit_0.w_0"], + ["LSTMCell/BasicLSTMUnit_0.b_0", "basicLSTM/BasicLSTMUnit_0.b_0"], + ] for names in param_names: - param = np.array(fluid.global_scope().find_var( - names[0]).get_tensor()) - param = np.random.uniform(-0.1, 0.1, - size=param.shape).astype('float32') + param = np.array( + fluid.global_scope().find_var(names[0]).get_tensor() + ) + param = np.random.uniform(-0.1, 0.1, size=param.shape).astype( + 'float32' + ) fluid.global_scope().find_var(names[0]).get_tensor().set( - param, place) + param, place + ) fluid.global_scope().find_var(names[1]).get_tensor().set( - param, place) - - out = exe.run(feed={ - 'inputs': inputs_np, - 'pre_hidden': pre_hidden_np, - 'pre_cell': pre_cell_np - }, - fetch_list=[lstm_hidden_new, lstm_hidden]) + param, place + ) + + out = exe.run( + feed={ + 'inputs': inputs_np, + 'pre_hidden': pre_hidden_np, + 'pre_cell': pre_cell_np, + }, + fetch_list=[lstm_hidden_new, lstm_hidden], + ) np.testing.assert_allclose(out[0], out[1], rtol=0.0001, atol=0) class TestGRUCellError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): batch_size, input_size, hidden_size = 4, 16, 16 - inputs = fluid.data(name='inputs', - shape=[None, input_size], - dtype='float32') - pre_hidden = layers.data(name='pre_hidden', - shape=[None, hidden_size], - append_batch_size=False, - dtype='float32') + inputs = fluid.data( + name='inputs', shape=[None, input_size], dtype='float32' + ) + pre_hidden = layers.data( + name='pre_hidden', + shape=[None, hidden_size], + append_batch_size=False, + dtype='float32', + ) cell = GRUCell(hidden_size) def test_input_Variable(): - np_input = np.random.random( - (batch_size, input_size)).astype("float32") + np_input = np.random.random((batch_size, input_size)).astype( + "float32" + ) cell(np_input, pre_hidden) self.assertRaises(TypeError, test_input_Variable) def test_pre_hidden_Variable(): np_pre_hidden = np.random.random( - (batch_size, hidden_size)).astype("float32") + (batch_size, hidden_size) + ).astype("float32") cell(inputs, np_pre_hidden) self.assertRaises(TypeError, test_pre_hidden_Variable) def test_input_type(): - error_inputs = fluid.data(name='error_inputs', - shape=[None, input_size], - dtype='int32') + error_inputs = fluid.data( + name='error_inputs', shape=[None, input_size], dtype='int32' + ) cell(error_inputs, pre_hidden) self.assertRaises(TypeError, test_input_type) def test_pre_hidden_type(): - error_pre_hidden = fluid.data(name='error_pre_hidden', - shape=[None, hidden_size], - dtype='int32') + error_pre_hidden = fluid.data( + name='error_pre_hidden', + shape=[None, hidden_size], + dtype='int32', + ) cell(inputs, error_pre_hidden) self.assertRaises(TypeError, test_pre_hidden_type) @@ -216,28 +243,28 @@ class TestGRUCellError(unittest.TestCase): class TestGRUCell(unittest.TestCase): - def setUp(self): self.batch_size = 4 self.input_size = 16 self.hidden_size = 16 def test_run(self): - inputs = fluid.data(name='inputs', - shape=[None, self.input_size], - dtype='float32') - pre_hidden = layers.data(name='pre_hidden', - shape=[None, self.hidden_size], - append_batch_size=False, - dtype='float32') + inputs = fluid.data( + name='inputs', shape=[None, self.input_size], dtype='float32' + ) + pre_hidden = layers.data( + name='pre_hidden', + shape=[None, self.hidden_size], + append_batch_size=False, + dtype='float32', + ) cell = GRUCell(self.hidden_size) gru_hidden_new, _ = cell(inputs, pre_hidden) - gru_unit = contrib.layers.rnn_impl.BasicGRUUnit("basicGRU", - self.hidden_size, None, - None, None, None, - "float32") + gru_unit = contrib.layers.rnn_impl.BasicGRUUnit( + "basicGRU", self.hidden_size, None, None, None, None, "float32" + ) gru_hidden = gru_unit(inputs, pre_hidden) if core.is_compiled_with_cuda(): @@ -248,120 +275,143 @@ class TestGRUCell(unittest.TestCase): exe.run(framework.default_startup_program()) inputs_np = np.random.uniform( - -0.1, 0.1, (self.batch_size, self.input_size)).astype('float32') + -0.1, 0.1, (self.batch_size, self.input_size) + ).astype('float32') pre_hidden_np = np.random.uniform( - -0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32') + -0.1, 0.1, (self.batch_size, self.hidden_size) + ).astype('float32') param_names = [ ["GRUCell/BasicGRUUnit_0.w_0", "basicGRU/BasicGRUUnit_0.w_0"], ["GRUCell/BasicGRUUnit_0.w_1", "basicGRU/BasicGRUUnit_0.w_1"], ["GRUCell/BasicGRUUnit_0.b_0", "basicGRU/BasicGRUUnit_0.b_0"], - ["GRUCell/BasicGRUUnit_0.b_1", "basicGRU/BasicGRUUnit_0.b_1"] + ["GRUCell/BasicGRUUnit_0.b_1", "basicGRU/BasicGRUUnit_0.b_1"], ] for names in param_names: - param = np.array(fluid.global_scope().find_var( - names[0]).get_tensor()) - param = np.random.uniform(-0.1, 0.1, - size=param.shape).astype('float32') + param = np.array( + fluid.global_scope().find_var(names[0]).get_tensor() + ) + param = np.random.uniform(-0.1, 0.1, size=param.shape).astype( + 'float32' + ) fluid.global_scope().find_var(names[0]).get_tensor().set( - param, place) + param, place + ) fluid.global_scope().find_var(names[1]).get_tensor().set( - param, place) + param, place + ) - out = exe.run(feed={ - 'inputs': inputs_np, - 'pre_hidden': pre_hidden_np - }, - fetch_list=[gru_hidden_new, gru_hidden]) + out = exe.run( + feed={'inputs': inputs_np, 'pre_hidden': pre_hidden_np}, + fetch_list=[gru_hidden_new, gru_hidden], + ) np.testing.assert_allclose(out[0], out[1], rtol=0.0001, atol=0) class TestRnnError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): batch_size = 4 input_size = 16 hidden_size = 16 seq_len = 4 - inputs = fluid.data(name='inputs', - shape=[None, input_size], - dtype='float32') - pre_hidden = layers.data(name='pre_hidden', - shape=[None, hidden_size], - append_batch_size=False, - dtype='float32') - inputs_basic_lstm = fluid.data(name='inputs_basic_lstm', - shape=[None, None, input_size], - dtype='float32') - sequence_length = fluid.data(name="sequence_length", - shape=[None], - dtype='int64') - - inputs_dynamic_rnn = layers.transpose(inputs_basic_lstm, - perm=[1, 0, 2]) + inputs = fluid.data( + name='inputs', shape=[None, input_size], dtype='float32' + ) + pre_hidden = layers.data( + name='pre_hidden', + shape=[None, hidden_size], + append_batch_size=False, + dtype='float32', + ) + inputs_basic_lstm = fluid.data( + name='inputs_basic_lstm', + shape=[None, None, input_size], + dtype='float32', + ) + sequence_length = fluid.data( + name="sequence_length", shape=[None], dtype='int64' + ) + + inputs_dynamic_rnn = layers.transpose( + inputs_basic_lstm, perm=[1, 0, 2] + ) cell = LSTMCell(hidden_size, name="LSTMCell_for_rnn") np_inputs_dynamic_rnn = np.random.random( - (seq_len, batch_size, input_size)).astype("float32") + (seq_len, batch_size, input_size) + ).astype("float32") def test_input_Variable(): - dynamic_rnn(cell=cell, - inputs=np_inputs_dynamic_rnn, - sequence_length=sequence_length, - is_reverse=False) + dynamic_rnn( + cell=cell, + inputs=np_inputs_dynamic_rnn, + sequence_length=sequence_length, + is_reverse=False, + ) self.assertRaises(TypeError, test_input_Variable) def test_input_list(): - dynamic_rnn(cell=cell, - inputs=[np_inputs_dynamic_rnn], - sequence_length=sequence_length, - is_reverse=False) + dynamic_rnn( + cell=cell, + inputs=[np_inputs_dynamic_rnn], + sequence_length=sequence_length, + is_reverse=False, + ) self.assertRaises(TypeError, test_input_list) def test_initial_states_type(): cell = GRUCell(hidden_size, name="GRUCell_for_rnn") error_initial_states = np.random.random( - (batch_size, hidden_size)).astype("float32") - dynamic_rnn(cell=cell, - inputs=inputs_dynamic_rnn, - initial_states=error_initial_states, - sequence_length=sequence_length, - is_reverse=False) + (batch_size, hidden_size) + ).astype("float32") + dynamic_rnn( + cell=cell, + inputs=inputs_dynamic_rnn, + initial_states=error_initial_states, + sequence_length=sequence_length, + is_reverse=False, + ) self.assertRaises(TypeError, test_initial_states_type) def test_initial_states_list(): error_initial_states = [ - np.random.random( - (batch_size, hidden_size)).astype("float32"), - np.random.random( - (batch_size, hidden_size)).astype("float32") + np.random.random((batch_size, hidden_size)).astype( + "float32" + ), + np.random.random((batch_size, hidden_size)).astype( + "float32" + ), ] - dynamic_rnn(cell=cell, - inputs=inputs_dynamic_rnn, - initial_states=error_initial_states, - sequence_length=sequence_length, - is_reverse=False) + dynamic_rnn( + cell=cell, + inputs=inputs_dynamic_rnn, + initial_states=error_initial_states, + sequence_length=sequence_length, + is_reverse=False, + ) self.assertRaises(TypeError, test_initial_states_type) def test_sequence_length_type(): - np_sequence_length = np.random.random( - (batch_size)).astype("float32") - dynamic_rnn(cell=cell, - inputs=inputs_dynamic_rnn, - sequence_length=np_sequence_length, - is_reverse=False) + np_sequence_length = np.random.random((batch_size)).astype( + "float32" + ) + dynamic_rnn( + cell=cell, + inputs=inputs_dynamic_rnn, + sequence_length=np_sequence_length, + is_reverse=False, + ) self.assertRaises(TypeError, test_sequence_length_type) class TestRnn(unittest.TestCase): - def setUp(self): self.batch_size = 4 self.input_size = 16 @@ -369,23 +419,36 @@ class TestRnn(unittest.TestCase): self.seq_len = 4 def test_run(self): - inputs_basic_lstm = fluid.data(name='inputs_basic_lstm', - shape=[None, None, self.input_size], - dtype='float32') - sequence_length = fluid.data(name="sequence_length", - shape=[None], - dtype='int64') + inputs_basic_lstm = fluid.data( + name='inputs_basic_lstm', + shape=[None, None, self.input_size], + dtype='float32', + ) + sequence_length = fluid.data( + name="sequence_length", shape=[None], dtype='int64' + ) inputs_dynamic_rnn = layers.transpose(inputs_basic_lstm, perm=[1, 0, 2]) cell = LSTMCell(self.hidden_size, name="LSTMCell_for_rnn") - output, final_state = dynamic_rnn(cell=cell, - inputs=inputs_dynamic_rnn, - sequence_length=sequence_length, - is_reverse=False) + output, final_state = dynamic_rnn( + cell=cell, + inputs=inputs_dynamic_rnn, + sequence_length=sequence_length, + is_reverse=False, + ) output_new = layers.transpose(output, perm=[1, 0, 2]) - rnn_out, last_hidden, last_cell = basic_lstm(inputs_basic_lstm, None, None, self.hidden_size, num_layers=1, \ - batch_first = False, bidirectional=False, sequence_length=sequence_length, forget_bias = 1.0) + rnn_out, last_hidden, last_cell = basic_lstm( + inputs_basic_lstm, + None, + None, + self.hidden_size, + num_layers=1, + batch_first=False, + bidirectional=False, + sequence_length=sequence_length, + forget_bias=1.0, + ) if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) @@ -395,45 +458,57 @@ class TestRnn(unittest.TestCase): exe.run(framework.default_startup_program()) inputs_basic_lstm_np = np.random.uniform( - -0.1, 0.1, - (self.seq_len, self.batch_size, self.input_size)).astype('float32') - sequence_length_np = np.ones(self.batch_size, - dtype='int64') * self.seq_len + -0.1, 0.1, (self.seq_len, self.batch_size, self.input_size) + ).astype('float32') + sequence_length_np = ( + np.ones(self.batch_size, dtype='int64') * self.seq_len + ) inputs_np = np.random.uniform( - -0.1, 0.1, (self.batch_size, self.input_size)).astype('float32') + -0.1, 0.1, (self.batch_size, self.input_size) + ).astype('float32') pre_hidden_np = np.random.uniform( - -0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32') + -0.1, 0.1, (self.batch_size, self.hidden_size) + ).astype('float32') pre_cell_np = np.random.uniform( - -0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32') + -0.1, 0.1, (self.batch_size, self.hidden_size) + ).astype('float32') - param_names = [[ - "LSTMCell_for_rnn/BasicLSTMUnit_0.w_0", - "basic_lstm_layers_0/BasicLSTMUnit_0.w_0" - ], - [ - "LSTMCell_for_rnn/BasicLSTMUnit_0.b_0", - "basic_lstm_layers_0/BasicLSTMUnit_0.b_0" - ]] + param_names = [ + [ + "LSTMCell_for_rnn/BasicLSTMUnit_0.w_0", + "basic_lstm_layers_0/BasicLSTMUnit_0.w_0", + ], + [ + "LSTMCell_for_rnn/BasicLSTMUnit_0.b_0", + "basic_lstm_layers_0/BasicLSTMUnit_0.b_0", + ], + ] for names in param_names: - param = np.array(fluid.global_scope().find_var( - names[0]).get_tensor()) - param = np.random.uniform(-0.1, 0.1, - size=param.shape).astype('float32') + param = np.array( + fluid.global_scope().find_var(names[0]).get_tensor() + ) + param = np.random.uniform(-0.1, 0.1, size=param.shape).astype( + 'float32' + ) fluid.global_scope().find_var(names[0]).get_tensor().set( - param, place) + param, place + ) fluid.global_scope().find_var(names[1]).get_tensor().set( - param, place) - - out = exe.run(feed={ - 'inputs_basic_lstm': inputs_basic_lstm_np, - 'sequence_length': sequence_length_np, - 'inputs': inputs_np, - 'pre_hidden': pre_hidden_np, - 'pre_cell': pre_cell_np - }, - fetch_list=[output_new, rnn_out]) + param, place + ) + + out = exe.run( + feed={ + 'inputs_basic_lstm': inputs_basic_lstm_np, + 'sequence_length': sequence_length_np, + 'inputs': inputs_np, + 'pre_hidden': pre_hidden_np, + 'pre_cell': pre_cell_np, + }, + fetch_list=[output_new, rnn_out], + ) np.testing.assert_allclose(out[0], out[1], rtol=0.0001) @@ -462,7 +537,7 @@ class EncoderCell(RNNCell): self, num_layers, hidden_size, - dropout_prob=0., + dropout_prob=0.0, init_scale=0.1, ): self.num_layers = num_layers @@ -477,10 +552,14 @@ class EncoderCell(RNNCell): new_states = [] for i in range(self.num_layers): out, new_state = self.lstm_cells[i](step_input, states[i]) - step_input = layers.dropout( - out, - self.dropout_prob, - ) if self.dropout_prob else out + step_input = ( + layers.dropout( + out, + self.dropout_prob, + ) + if self.dropout_prob + else out + ) new_states.append(new_state) return step_input, new_states @@ -492,7 +571,7 @@ class EncoderCell(RNNCell): class DecoderCell(RNNCell): """Decoder Cell""" - def __init__(self, num_layers, hidden_size, dropout_prob=0.): + def __init__(self, num_layers, hidden_size, dropout_prob=0.0): self.num_layers = num_layers self.hidden_size = hidden_size self.dropout_prob = dropout_prob @@ -504,26 +583,31 @@ class DecoderCell(RNNCell): new_lstm_states = [] for i in range(self.num_layers): out, new_lstm_state = self.lstm_cells[i](step_input, states[i]) - step_input = layers.dropout( - out, - self.dropout_prob, - ) if self.dropout_prob else out + step_input = ( + layers.dropout( + out, + self.dropout_prob, + ) + if self.dropout_prob + else out + ) new_lstm_states.append(new_lstm_state) return step_input, new_lstm_states -def def_seq2seq_model(num_layers, hidden_size, dropout_prob, src_vocab_size, - trg_vocab_size): +def def_seq2seq_model( + num_layers, hidden_size, dropout_prob, src_vocab_size, trg_vocab_size +): "vanilla seq2seq model" # data source = fluid.data(name="src", shape=[None, None], dtype="int64") - source_length = fluid.data(name="src_sequence_length", - shape=[None], - dtype="int64") + source_length = fluid.data( + name="src_sequence_length", shape=[None], dtype="int64" + ) target = fluid.data(name="trg", shape=[None, None], dtype="int64") - target_length = fluid.data(name="trg_sequence_length", - shape=[None], - dtype="int64") + target_length = fluid.data( + name="trg_sequence_length", shape=[None], dtype="int64" + ) label = fluid.data(name="label", shape=[None, None, 1], dtype="int64") # embedding @@ -532,29 +616,31 @@ def def_seq2seq_model(num_layers, hidden_size, dropout_prob, src_vocab_size, # encoder enc_cell = EncoderCell(num_layers, hidden_size, dropout_prob) - enc_output, enc_final_state = dynamic_rnn(cell=enc_cell, - inputs=src_emb, - sequence_length=source_length) + enc_output, enc_final_state = dynamic_rnn( + cell=enc_cell, inputs=src_emb, sequence_length=source_length + ) # decoder dec_cell = DecoderCell(num_layers, hidden_size, dropout_prob) - dec_output, dec_final_state = dynamic_rnn(cell=dec_cell, - inputs=tar_emb, - initial_states=enc_final_state) - logits = layers.fc(dec_output, - size=trg_vocab_size, - num_flatten_dims=len(dec_output.shape) - 1, - bias_attr=False) + dec_output, dec_final_state = dynamic_rnn( + cell=dec_cell, inputs=tar_emb, initial_states=enc_final_state + ) + logits = layers.fc( + dec_output, + size=trg_vocab_size, + num_flatten_dims=len(dec_output.shape) - 1, + bias_attr=False, + ) # loss - loss = layers.softmax_with_cross_entropy(logits=logits, - label=label, - soft_label=False) + loss = layers.softmax_with_cross_entropy( + logits=logits, label=label, soft_label=False + ) loss = layers.unsqueeze(loss, axes=[2]) max_tar_seq_len = layers.shape(target)[1] - tar_mask = layers.sequence_mask(target_length, - maxlen=max_tar_seq_len, - dtype="float32") + tar_mask = layers.sequence_mask( + target_length, maxlen=max_tar_seq_len, dtype="float32" + ) loss = loss * tar_mask loss = layers.reduce_mean(loss, dim=[0]) loss = layers.reduce_sum(loss) @@ -577,7 +663,7 @@ class TestSeq2SeqModel(unittest.TestCase): "hidden_size": 128, "dropout_prob": 0.1, "src_vocab_size": 100, - "trg_vocab_size": 100 + "trg_vocab_size": 100, } self.iter_num = iter_num = 2 @@ -585,28 +671,34 @@ class TestSeq2SeqModel(unittest.TestCase): src_seq_len = 10 trg_seq_len = 12 self.data = { - "src": - np.random.randint( - 2, self.model_hparams["src_vocab_size"], - (iter_num * batch_size, src_seq_len)).astype("int64"), - "src_sequence_length": - np.random.randint(1, src_seq_len, - (iter_num * batch_size, )).astype("int64"), - "trg": - np.random.randint( - 2, self.model_hparams["src_vocab_size"], - (iter_num * batch_size, trg_seq_len)).astype("int64"), - "trg_sequence_length": - np.random.randint(1, trg_seq_len, - (iter_num * batch_size, )).astype("int64"), - "label": - np.random.randint( - 2, self.model_hparams["src_vocab_size"], - (iter_num * batch_size, trg_seq_len, 1)).astype("int64"), + "src": np.random.randint( + 2, + self.model_hparams["src_vocab_size"], + (iter_num * batch_size, src_seq_len), + ).astype("int64"), + "src_sequence_length": np.random.randint( + 1, src_seq_len, (iter_num * batch_size,) + ).astype("int64"), + "trg": np.random.randint( + 2, + self.model_hparams["src_vocab_size"], + (iter_num * batch_size, trg_seq_len), + ).astype("int64"), + "trg_sequence_length": np.random.randint( + 1, trg_seq_len, (iter_num * batch_size,) + ).astype("int64"), + "label": np.random.randint( + 2, + self.model_hparams["src_vocab_size"], + (iter_num * batch_size, trg_seq_len, 1), + ).astype("int64"), } - place = core.CUDAPlace( - 0) if core.is_compiled_with_cuda() else core.CPUPlace() + place = ( + core.CUDAPlace(0) + if core.is_compiled_with_cuda() + else core.CPUPlace() + ) self.exe = Executor(place) def test_seq2seq_model(self): @@ -616,29 +708,38 @@ class TestSeq2SeqModel(unittest.TestCase): cost = def_seq2seq_model(**self.model_hparams) self.exe.run(startup_program) for iter_idx in range(self.iter_num): - cost_val = self.exe.run(feed={ - "src": - self.data["src"][iter_idx * self.batch_size:(iter_idx + 1) * - self.batch_size, :], - "src_sequence_length": - self.data["src_sequence_length"][iter_idx * - self.batch_size:(iter_idx + - 1) * - self.batch_size], - "trg": - self.data["trg"][iter_idx * self.batch_size:(iter_idx + 1) * - self.batch_size, :], - "trg_sequence_length": - self.data["trg_sequence_length"][iter_idx * - self.batch_size:(iter_idx + - 1) * - self.batch_size], - "label": - self.data["label"][iter_idx * - self.batch_size:(iter_idx + 1) * - self.batch_size] - }, - fetch_list=[cost])[0] + cost_val = self.exe.run( + feed={ + "src": self.data["src"][ + iter_idx + * self.batch_size : (iter_idx + 1) + * self.batch_size, + :, + ], + "src_sequence_length": self.data["src_sequence_length"][ + iter_idx + * self.batch_size : (iter_idx + 1) + * self.batch_size + ], + "trg": self.data["trg"][ + iter_idx + * self.batch_size : (iter_idx + 1) + * self.batch_size, + :, + ], + "trg_sequence_length": self.data["trg_sequence_length"][ + iter_idx + * self.batch_size : (iter_idx + 1) + * self.batch_size + ], + "label": self.data["label"][ + iter_idx + * self.batch_size : (iter_idx + 1) + * self.batch_size + ], + }, + fetch_list=[cost], + )[0] print("iter_idx: %d, cost: %f" % (iter_idx, cost_val)) diff --git a/python/paddle/fluid/tests/unittests/test_rnn_decode_api.py b/python/paddle/fluid/tests/unittests/test_rnn_decode_api.py index a6f7b6ea8c2df8d481367311ebfd21335e020022..00bf9735fa513616e5d7548164a475b7299d3a27 100644 --- a/python/paddle/fluid/tests/unittests/test_rnn_decode_api.py +++ b/python/paddle/fluid/tests/unittests/test_rnn_decode_api.py @@ -34,8 +34,7 @@ paddle.enable_static() class EncoderCell(layers.RNNCell): - - def __init__(self, num_layers, hidden_size, dropout_prob=0.): + def __init__(self, num_layers, hidden_size, dropout_prob=0.0): self.num_layers = num_layers self.hidden_size = hidden_size self.dropout_prob = dropout_prob @@ -47,8 +46,11 @@ class EncoderCell(layers.RNNCell): new_states = [] for i in range(self.num_layers): out, new_state = self.lstm_cells[i](step_input, states[i]) - step_input = layers.dropout( - out, self.dropout_prob) if self.dropout_prob > 0 else out + step_input = ( + layers.dropout(out, self.dropout_prob) + if self.dropout_prob > 0 + else out + ) new_states.append(new_state) return step_input, new_states @@ -58,8 +60,7 @@ class EncoderCell(layers.RNNCell): class DecoderCell(layers.RNNCell): - - def __init__(self, num_layers, hidden_size, dropout_prob=0.): + def __init__(self, num_layers, hidden_size, dropout_prob=0.0): self.num_layers = num_layers self.hidden_size = hidden_size self.dropout_prob = dropout_prob @@ -68,42 +69,44 @@ class DecoderCell(layers.RNNCell): ] def attention(self, hidden, encoder_output, encoder_padding_mask): - query = layers.fc(hidden, - size=encoder_output.shape[-1], - bias_attr=False) - attn_scores = layers.matmul(layers.unsqueeze(query, [1]), - encoder_output, - transpose_y=True) + query = layers.fc( + hidden, size=encoder_output.shape[-1], bias_attr=False + ) + attn_scores = layers.matmul( + layers.unsqueeze(query, [1]), encoder_output, transpose_y=True + ) if encoder_padding_mask is not None: - attn_scores = layers.elementwise_add(attn_scores, - encoder_padding_mask) + attn_scores = layers.elementwise_add( + attn_scores, encoder_padding_mask + ) attn_scores = layers.softmax(attn_scores) - attn_out = layers.squeeze(layers.matmul(attn_scores, encoder_output), - [1]) + attn_out = layers.squeeze( + layers.matmul(attn_scores, encoder_output), [1] + ) attn_out = layers.concat([attn_out, hidden], 1) attn_out = layers.fc(attn_out, size=self.hidden_size, bias_attr=False) return attn_out - def call(self, - step_input, - states, - encoder_output, - encoder_padding_mask=None): + def call( + self, step_input, states, encoder_output, encoder_padding_mask=None + ): lstm_states, input_feed = states new_lstm_states = [] step_input = layers.concat([step_input, input_feed], 1) for i in range(self.num_layers): out, new_lstm_state = self.lstm_cells[i](step_input, lstm_states[i]) - step_input = layers.dropout( - out, self.dropout_prob) if self.dropout_prob > 0 else out + step_input = ( + layers.dropout(out, self.dropout_prob) + if self.dropout_prob > 0 + else out + ) new_lstm_states.append(new_lstm_state) out = self.attention(step_input, encoder_output, encoder_padding_mask) return out, [new_lstm_states, out] class Encoder(object): - - def __init__(self, num_layers, hidden_size, dropout_prob=0.): + def __init__(self, num_layers, hidden_size, dropout_prob=0.0): self.encoder_cell = EncoderCell(num_layers, hidden_size, dropout_prob) def __call__(self, src_emb, src_sequence_length): @@ -111,25 +114,35 @@ class Encoder(object): cell=self.encoder_cell, inputs=src_emb, sequence_length=src_sequence_length, - is_reverse=False) + is_reverse=False, + ) return encoder_output, encoder_final_state class Decoder(object): - - def __init__(self, - num_layers, - hidden_size, - dropout_prob, - decoding_strategy="infer_sample", - max_decoding_length=20): + def __init__( + self, + num_layers, + hidden_size, + dropout_prob, + decoding_strategy="infer_sample", + max_decoding_length=20, + ): self.decoder_cell = DecoderCell(num_layers, hidden_size, dropout_prob) self.decoding_strategy = decoding_strategy - self.max_decoding_length = None if ( - self.decoding_strategy == "train_greedy") else max_decoding_length - - def __call__(self, decoder_initial_states, encoder_output, - encoder_padding_mask, **kwargs): + self.max_decoding_length = ( + None + if (self.decoding_strategy == "train_greedy") + else max_decoding_length + ) + + def __call__( + self, + decoder_initial_states, + encoder_output, + encoder_padding_mask, + **kwargs + ): output_layer = kwargs.pop("output_layer", None) if self.decoding_strategy == "train_greedy": # for teach-forcing MLE pre-training @@ -141,113 +154,150 @@ class Decoder(object): if self.decoding_strategy == "beam_search": beam_size = kwargs.get("beam_size", 4) - encoder_output = layers.BeamSearchDecoder.tile_beam_merge_with_batch( - encoder_output, beam_size) - encoder_padding_mask = layers.BeamSearchDecoder.tile_beam_merge_with_batch( - encoder_padding_mask, beam_size) - decoder = layers.BeamSearchDecoder(cell=self.decoder_cell, - output_fn=output_layer, - **kwargs) + encoder_output = ( + layers.BeamSearchDecoder.tile_beam_merge_with_batch( + encoder_output, beam_size + ) + ) + encoder_padding_mask = ( + layers.BeamSearchDecoder.tile_beam_merge_with_batch( + encoder_padding_mask, beam_size + ) + ) + decoder = layers.BeamSearchDecoder( + cell=self.decoder_cell, output_fn=output_layer, **kwargs + ) else: - decoder = layers.BasicDecoder(self.decoder_cell, - helper, - output_fn=output_layer) - - (decoder_output, decoder_final_state, - dec_seq_lengths) = layers.dynamic_decode( - decoder, - inits=decoder_initial_states, - max_step_num=self.max_decoding_length, - encoder_output=encoder_output, - encoder_padding_mask=encoder_padding_mask, - impute_finished=False # for test coverage - if self.decoding_strategy == "beam_search" else True, - is_test=True if self.decoding_strategy == "beam_search" else False, - return_length=True) + decoder = layers.BasicDecoder( + self.decoder_cell, helper, output_fn=output_layer + ) + + ( + decoder_output, + decoder_final_state, + dec_seq_lengths, + ) = layers.dynamic_decode( + decoder, + inits=decoder_initial_states, + max_step_num=self.max_decoding_length, + encoder_output=encoder_output, + encoder_padding_mask=encoder_padding_mask, + impute_finished=False # for test coverage + if self.decoding_strategy == "beam_search" + else True, + is_test=True if self.decoding_strategy == "beam_search" else False, + return_length=True, + ) return decoder_output, decoder_final_state, dec_seq_lengths class Seq2SeqModel(object): """Seq2Seq model: RNN encoder-decoder with attention""" - def __init__(self, - num_layers, - hidden_size, - dropout_prob, - src_vocab_size, - trg_vocab_size, - start_token, - end_token, - decoding_strategy="infer_sample", - max_decoding_length=20, - beam_size=4): + def __init__( + self, + num_layers, + hidden_size, + dropout_prob, + src_vocab_size, + trg_vocab_size, + start_token, + end_token, + decoding_strategy="infer_sample", + max_decoding_length=20, + beam_size=4, + ): self.start_token, self.end_token = start_token, end_token - self.max_decoding_length, self.beam_size = max_decoding_length, beam_size + self.max_decoding_length, self.beam_size = ( + max_decoding_length, + beam_size, + ) self.src_embeder = paddle.nn.Embedding( src_vocab_size, hidden_size, - weight_attr=fluid.ParamAttr(name="source_embedding")) + weight_attr=fluid.ParamAttr(name="source_embedding"), + ) self.trg_embeder = paddle.nn.Embedding( trg_vocab_size, hidden_size, - weight_attr=fluid.ParamAttr(name="target_embedding")) + weight_attr=fluid.ParamAttr(name="target_embedding"), + ) self.encoder = Encoder(num_layers, hidden_size, dropout_prob) - self.decoder = Decoder(num_layers, hidden_size, dropout_prob, - decoding_strategy, max_decoding_length) - self.output_layer = lambda x: layers.fc(x, - size=trg_vocab_size, - num_flatten_dims=len(x.shape) - - 1, - param_attr=fluid.ParamAttr(), - bias_attr=False) + self.decoder = Decoder( + num_layers, + hidden_size, + dropout_prob, + decoding_strategy, + max_decoding_length, + ) + self.output_layer = lambda x: layers.fc( + x, + size=trg_vocab_size, + num_flatten_dims=len(x.shape) - 1, + param_attr=fluid.ParamAttr(), + bias_attr=False, + ) def __call__(self, src, src_length, trg=None, trg_length=None): # encoder encoder_output, encoder_final_state = self.encoder( - self.src_embeder(src), src_length) + self.src_embeder(src), src_length + ) decoder_initial_states = [ encoder_final_state, self.decoder.decoder_cell.get_initial_states( - batch_ref=encoder_output, shape=[encoder_output.shape[-1]]) + batch_ref=encoder_output, shape=[encoder_output.shape[-1]] + ), ] - src_mask = layers.sequence_mask(src_length, - maxlen=layers.shape(src)[1], - dtype="float32") + src_mask = layers.sequence_mask( + src_length, maxlen=layers.shape(src)[1], dtype="float32" + ) encoder_padding_mask = (src_mask - 1.0) * 1e9 encoder_padding_mask = layers.unsqueeze(encoder_padding_mask, [1]) # decoder - decoder_kwargs = { - "inputs": self.trg_embeder(trg), - "sequence_length": trg_length, - } if self.decoder.decoding_strategy == "train_greedy" else ( + decoder_kwargs = ( { - "embedding_fn": self.trg_embeder, - "beam_size": self.beam_size, - "start_token": self.start_token, - "end_token": self.end_token - } if self.decoder.decoding_strategy == "beam_search" else { - "embedding_fn": - self.trg_embeder, - "start_tokens": - layers.fill_constant_batch_size_like(input=encoder_output, - shape=[-1], - dtype=src.dtype, - value=self.start_token), - "end_token": - self.end_token - }) + "inputs": self.trg_embeder(trg), + "sequence_length": trg_length, + } + if self.decoder.decoding_strategy == "train_greedy" + else ( + { + "embedding_fn": self.trg_embeder, + "beam_size": self.beam_size, + "start_token": self.start_token, + "end_token": self.end_token, + } + if self.decoder.decoding_strategy == "beam_search" + else { + "embedding_fn": self.trg_embeder, + "start_tokens": layers.fill_constant_batch_size_like( + input=encoder_output, + shape=[-1], + dtype=src.dtype, + value=self.start_token, + ), + "end_token": self.end_token, + } + ) + ) decoder_kwargs["output_layer"] = self.output_layer - (decoder_output, decoder_final_state, - dec_seq_lengths) = self.decoder(decoder_initial_states, encoder_output, - encoder_padding_mask, **decoder_kwargs) + (decoder_output, decoder_final_state, dec_seq_lengths) = self.decoder( + decoder_initial_states, + encoder_output, + encoder_padding_mask, + **decoder_kwargs + ) if self.decoder.decoding_strategy == "beam_search": # for inference return decoder_output - logits, samples, sample_length = (decoder_output.cell_outputs, - decoder_output.sample_ids, - dec_seq_lengths) + logits, samples, sample_length = ( + decoder_output.cell_outputs, + decoder_output.sample_ids, + dec_seq_lengths, + ) probs = layers.softmax(logits) return probs, samples, sample_length @@ -262,13 +312,16 @@ class PolicyGradient(object): """ update policy model self.model with policy gradient algorithm """ - self.reward = fluid.layers.py_func(func=reward_func, - x=[action, length], - out=reward) + self.reward = fluid.layers.py_func( + func=reward_func, x=[action, length], out=reward + ) neg_log_prob = layers.cross_entropy(act_prob, action) cost = neg_log_prob * reward - cost = (layers.reduce_sum(cost) / layers.reduce_sum(length) - ) if length is not None else layers.reduce_mean(cost) + cost = ( + (layers.reduce_sum(cost) / layers.reduce_sum(length)) + if length is not None + else layers.reduce_mean(cost) + ) optimizer = fluid.optimizer.Adam(self.lr) optimizer.minimize(cost) return cost @@ -277,24 +330,26 @@ class PolicyGradient(object): def reward_func(samples, sample_length): """toy reward""" - def discount_reward(reward, sequence_length, discount=1.): + def discount_reward(reward, sequence_length, discount=1.0): return discount_reward_1d(reward, sequence_length, discount) - def discount_reward_1d(reward, sequence_length, discount=1., dtype=None): + def discount_reward_1d(reward, sequence_length, discount=1.0, dtype=None): if sequence_length is None: raise ValueError( - 'sequence_length must not be `None` for 1D reward.') + 'sequence_length must not be `None` for 1D reward.' + ) reward = np.array(reward) sequence_length = np.array(sequence_length) batch_size = reward.shape[0] max_seq_length = np.max(sequence_length) dtype = dtype or reward.dtype - if discount == 1.: + if discount == 1.0: dmat = np.ones([batch_size, max_seq_length], dtype=dtype) else: steps = np.tile(np.arange(max_seq_length), [batch_size, 1]) - mask = np.asarray(steps < (sequence_length - 1)[:, None], - dtype=dtype) + mask = np.asarray( + steps < (sequence_length - 1)[:, None], dtype=dtype + ) # Make each row = [discount, ..., discount, 1, ..., 1] dmat = mask * discount + (1 - mask) dmat = np.cumprod(dmat[:, ::-1], axis=1)[:, ::-1] @@ -329,9 +384,15 @@ def reward_func(samples, sample_length): # repeat punishment to trapped into local minima getting all same words # beam search to get more than one sample may also can avoid this for i in range(reward.shape[0]): - reward[i] += -10 if sample_length[i] > 1 and np.all( - samples[i][:sample_length[i] - 1] == samples[i][0]) else 0 - return discount_reward(reward, sample_length, discount=1.).astype("float32") + reward[i] += ( + -10 + if sample_length[i] > 1 + and np.all(samples[i][: sample_length[i] - 1] == samples[i][0]) + else 0 + ) + return discount_reward(reward, sample_length, discount=1.0).astype( + "float32" + ) class MLE(object): @@ -353,20 +414,23 @@ class MLE(object): class SeqPGAgent(object): - - def __init__(self, - model_cls, - alg_cls=PolicyGradient, - model_hparams={}, - alg_hparams={}, - executor=None, - main_program=None, - startup_program=None, - seed=None): - self.main_program = fluid.Program( - ) if main_program is None else main_program - self.startup_program = fluid.Program( - ) if startup_program is None else startup_program + def __init__( + self, + model_cls, + alg_cls=PolicyGradient, + model_hparams={}, + alg_hparams={}, + executor=None, + main_program=None, + startup_program=None, + seed=None, + ): + self.main_program = ( + fluid.Program() if main_program is None else main_program + ) + self.startup_program = ( + fluid.Program() if startup_program is None else startup_program + ) if seed is not None: self.main_program.random_seed = seed self.startup_program.random_seed = seed @@ -376,51 +440,55 @@ class SeqPGAgent(object): def build_program(self, model_cls, alg_cls, model_hparams, alg_hparams): with fluid.program_guard(self.main_program, self.startup_program): source = fluid.data(name="src", shape=[None, None], dtype="int64") - source_length = fluid.data(name="src_sequence_length", - shape=[None], - dtype="int64") + source_length = fluid.data( + name="src_sequence_length", shape=[None], dtype="int64" + ) # only for teacher-forcing MLE training target = fluid.data(name="trg", shape=[None, None], dtype="int64") - target_length = fluid.data(name="trg_sequence_length", - shape=[None], - dtype="int64") - label = fluid.data(name="label", - shape=[None, None, 1], - dtype="int64") + target_length = fluid.data( + name="trg_sequence_length", shape=[None], dtype="int64" + ) + label = fluid.data( + name="label", shape=[None, None, 1], dtype="int64" + ) self.model = model_cls(**model_hparams) self.alg = alg_cls(**alg_hparams) self.probs, self.samples, self.sample_length = self.model( - source, source_length, target, target_length) + source, source_length, target, target_length + ) self.samples.stop_gradient = True self.reward = fluid.data( name="reward", shape=[None, None], # batch_size, seq_len - dtype=self.probs.dtype) + dtype=self.probs.dtype, + ) self.samples.stop_gradient = False - self.cost = self.alg.learn(self.probs, self.samples, self.reward, - self.sample_length) + self.cost = self.alg.learn( + self.probs, self.samples, self.reward, self.sample_length + ) # to define the same parameters between different programs self.pred_program = self.main_program._prune_with_input( [source.name, source_length.name], - [self.probs, self.samples, self.sample_length]) + [self.probs, self.samples, self.sample_length], + ) def predict(self, feed_dict): samples, sample_length = self.executor.run( self.pred_program, feed=feed_dict, - fetch_list=[self.samples, self.sample_length]) + fetch_list=[self.samples, self.sample_length], + ) return samples, sample_length def learn(self, feed_dict, fetch_list): - results = self.executor.run(self.main_program, - feed=feed_dict, - fetch_list=fetch_list) + results = self.executor.run( + self.main_program, feed=feed_dict, fetch_list=fetch_list + ) return results class TestDynamicDecode(unittest.TestCase): - def setUp(self): np.random.seed(123) self.model_hparams = { @@ -432,7 +500,7 @@ class TestDynamicDecode(unittest.TestCase): "start_token": 0, "end_token": 1, "decoding_strategy": "infer_greedy", - "max_decoding_length": 10 + "max_decoding_length": 10, } self.iter_num = iter_num = 2 @@ -440,121 +508,159 @@ class TestDynamicDecode(unittest.TestCase): src_seq_len = 10 trg_seq_len = 12 self.data = { - "src": - np.random.randint( - 2, self.model_hparams["src_vocab_size"], - (iter_num * batch_size, src_seq_len)).astype("int64"), - "src_sequence_length": - np.random.randint(1, src_seq_len, - (iter_num * batch_size, )).astype("int64"), - "trg": - np.random.randint( - 2, self.model_hparams["src_vocab_size"], - (iter_num * batch_size, trg_seq_len)).astype("int64"), - "trg_sequence_length": - np.random.randint(1, trg_seq_len, - (iter_num * batch_size, )).astype("int64"), - "label": - np.random.randint( - 2, self.model_hparams["src_vocab_size"], - (iter_num * batch_size, trg_seq_len, 1)).astype("int64"), + "src": np.random.randint( + 2, + self.model_hparams["src_vocab_size"], + (iter_num * batch_size, src_seq_len), + ).astype("int64"), + "src_sequence_length": np.random.randint( + 1, src_seq_len, (iter_num * batch_size,) + ).astype("int64"), + "trg": np.random.randint( + 2, + self.model_hparams["src_vocab_size"], + (iter_num * batch_size, trg_seq_len), + ).astype("int64"), + "trg_sequence_length": np.random.randint( + 1, trg_seq_len, (iter_num * batch_size,) + ).astype("int64"), + "label": np.random.randint( + 2, + self.model_hparams["src_vocab_size"], + (iter_num * batch_size, trg_seq_len, 1), + ).astype("int64"), } - place = core.CUDAPlace( - 0) if core.is_compiled_with_cuda() else core.CPUPlace() + place = ( + core.CUDAPlace(0) + if core.is_compiled_with_cuda() + else core.CPUPlace() + ) self.exe = Executor(place) def test_mle_train(self): paddle.enable_static() self.model_hparams["decoding_strategy"] = "train_greedy" - agent = SeqPGAgent(model_cls=Seq2SeqModel, - alg_cls=MLE, - model_hparams=self.model_hparams, - alg_hparams={"lr": 0.001}, - executor=self.exe, - main_program=fluid.Program(), - startup_program=fluid.Program(), - seed=123) + agent = SeqPGAgent( + model_cls=Seq2SeqModel, + alg_cls=MLE, + model_hparams=self.model_hparams, + alg_hparams={"lr": 0.001}, + executor=self.exe, + main_program=fluid.Program(), + startup_program=fluid.Program(), + seed=123, + ) self.exe.run(agent.startup_program) for iter_idx in range(self.iter_num): reward, cost = agent.learn( { - "src": - self.data["src"][iter_idx * self.batch_size:(iter_idx + 1) * - self.batch_size, :], - "src_sequence_length": - self.data["src_sequence_length"][iter_idx * self.batch_size: - (iter_idx + 1) * - self.batch_size], - "trg": - self.data["trg"][iter_idx * self.batch_size:(iter_idx + 1) * - self.batch_size, :], - "trg_sequence_length": - self.data["trg_sequence_length"][iter_idx * self.batch_size: - (iter_idx + 1) * - self.batch_size], - "label": - self.data["label"][iter_idx * - self.batch_size:(iter_idx + 1) * - self.batch_size] + "src": self.data["src"][ + iter_idx + * self.batch_size : (iter_idx + 1) + * self.batch_size, + :, + ], + "src_sequence_length": self.data["src_sequence_length"][ + iter_idx + * self.batch_size : (iter_idx + 1) + * self.batch_size + ], + "trg": self.data["trg"][ + iter_idx + * self.batch_size : (iter_idx + 1) + * self.batch_size, + :, + ], + "trg_sequence_length": self.data["trg_sequence_length"][ + iter_idx + * self.batch_size : (iter_idx + 1) + * self.batch_size + ], + "label": self.data["label"][ + iter_idx + * self.batch_size : (iter_idx + 1) + * self.batch_size + ], }, - fetch_list=[agent.cost, agent.cost]) - print("iter_idx: %d, reward: %f, cost: %f" % - (iter_idx, reward.mean(), cost)) + fetch_list=[agent.cost, agent.cost], + ) + print( + "iter_idx: %d, reward: %f, cost: %f" + % (iter_idx, reward.mean(), cost) + ) def test_greedy_train(self): paddle.enable_static() self.model_hparams["decoding_strategy"] = "infer_greedy" - agent = SeqPGAgent(model_cls=Seq2SeqModel, - alg_cls=PolicyGradient, - model_hparams=self.model_hparams, - alg_hparams={"lr": 0.001}, - executor=self.exe, - main_program=fluid.Program(), - startup_program=fluid.Program(), - seed=123) + agent = SeqPGAgent( + model_cls=Seq2SeqModel, + alg_cls=PolicyGradient, + model_hparams=self.model_hparams, + alg_hparams={"lr": 0.001}, + executor=self.exe, + main_program=fluid.Program(), + startup_program=fluid.Program(), + seed=123, + ) self.exe.run(agent.startup_program) for iter_idx in range(self.iter_num): reward, cost = agent.learn( { - "src": - self.data["src"][iter_idx * self.batch_size:(iter_idx + 1) * - self.batch_size, :], - "src_sequence_length": - self.data["src_sequence_length"][iter_idx * self.batch_size: - (iter_idx + 1) * - self.batch_size] + "src": self.data["src"][ + iter_idx + * self.batch_size : (iter_idx + 1) + * self.batch_size, + :, + ], + "src_sequence_length": self.data["src_sequence_length"][ + iter_idx + * self.batch_size : (iter_idx + 1) + * self.batch_size + ], }, - fetch_list=[agent.reward, agent.cost]) - print("iter_idx: %d, reward: %f, cost: %f" % - (iter_idx, reward.mean(), cost)) + fetch_list=[agent.reward, agent.cost], + ) + print( + "iter_idx: %d, reward: %f, cost: %f" + % (iter_idx, reward.mean(), cost) + ) def test_sample_train(self): paddle.enable_static() self.model_hparams["decoding_strategy"] = "infer_sample" - agent = SeqPGAgent(model_cls=Seq2SeqModel, - alg_cls=PolicyGradient, - model_hparams=self.model_hparams, - alg_hparams={"lr": 0.001}, - executor=self.exe, - main_program=fluid.Program(), - startup_program=fluid.Program(), - seed=123) + agent = SeqPGAgent( + model_cls=Seq2SeqModel, + alg_cls=PolicyGradient, + model_hparams=self.model_hparams, + alg_hparams={"lr": 0.001}, + executor=self.exe, + main_program=fluid.Program(), + startup_program=fluid.Program(), + seed=123, + ) self.exe.run(agent.startup_program) for iter_idx in range(self.iter_num): reward, cost = agent.learn( { - "src": - self.data["src"][iter_idx * self.batch_size:(iter_idx + 1) * - self.batch_size, :], - "src_sequence_length": - self.data["src_sequence_length"][iter_idx * self.batch_size: - (iter_idx + 1) * - self.batch_size] + "src": self.data["src"][ + iter_idx + * self.batch_size : (iter_idx + 1) + * self.batch_size, + :, + ], + "src_sequence_length": self.data["src_sequence_length"][ + iter_idx + * self.batch_size : (iter_idx + 1) + * self.batch_size + ], }, - fetch_list=[agent.reward, agent.cost]) - print("iter_idx: %d, reward: %f, cost: %f" % - (iter_idx, reward.mean(), cost)) + fetch_list=[agent.reward, agent.cost], + ) + print( + "iter_idx: %d, reward: %f, cost: %f" + % (iter_idx, reward.mean(), cost) + ) def test_beam_search_infer(self): paddle.set_default_dtype("float32") @@ -564,9 +670,9 @@ class TestDynamicDecode(unittest.TestCase): startup_program = fluid.Program() with fluid.program_guard(main_program, startup_program): source = fluid.data(name="src", shape=[None, None], dtype="int64") - source_length = fluid.data(name="src_sequence_length", - shape=[None], - dtype="int64") + source_length = fluid.data( + name="src_sequence_length", shape=[None], dtype="int64" + ) model = Seq2SeqModel(**self.model_hparams) output = model(source, source_length) @@ -575,16 +681,20 @@ class TestDynamicDecode(unittest.TestCase): trans_ids = self.exe.run( program=main_program, feed={ - "src": - self.data["src"][iter_idx * self.batch_size:(iter_idx + 1) * - self.batch_size, :], - "src_sequence_length": - self.data["src_sequence_length"][iter_idx * - self.batch_size:(iter_idx + - 1) * - self.batch_size] + "src": self.data["src"][ + iter_idx + * self.batch_size : (iter_idx + 1) + * self.batch_size, + :, + ], + "src_sequence_length": self.data["src_sequence_length"][ + iter_idx + * self.batch_size : (iter_idx + 1) + * self.batch_size + ], }, - fetch_list=[output])[0] + fetch_list=[output], + )[0] def func_dynamic_basic_decoder(self): paddle.disable_static() @@ -601,7 +711,6 @@ class TestDynamicDecode(unittest.TestCase): class ModuleApiTest(unittest.TestCase): - @classmethod def setUpClass(cls): cls._np_rand_state = np.random.get_state() @@ -611,10 +720,13 @@ class ModuleApiTest(unittest.TestCase): random.seed(cls._random_seed) cls.model_cls = type( - cls.__name__ + "Model", (Layer, ), { + cls.__name__ + "Model", + (Layer,), + { "__init__": cls.model_init_wrapper(cls.model_init), - "forward": cls.model_forward - }) + "forward": cls.model_forward, + }, + ) @classmethod def tearDownClass(cls): @@ -623,7 +735,6 @@ class ModuleApiTest(unittest.TestCase): @staticmethod def model_init_wrapper(func): - def __impl__(self, *args, **kwargs): Layer.__init__(self) func(self, *args, **kwargs) @@ -633,7 +744,8 @@ class ModuleApiTest(unittest.TestCase): @staticmethod def model_init(model, *args, **kwargs): raise NotImplementedError( - "model_init acts as `Model.__init__`, thus must implement it") + "model_init acts as `Model.__init__`, thus must implement it" + ) @staticmethod def model_forward(model, *args, **kwargs): @@ -642,7 +754,8 @@ class ModuleApiTest(unittest.TestCase): def make_inputs(self): # TODO(guosheng): add default from `self.inputs` raise NotImplementedError( - "model_inputs makes inputs for model, thus must implement it") + "model_inputs makes inputs for model, thus must implement it" + ) def setUp(self): """ @@ -667,8 +780,11 @@ class ModuleApiTest(unittest.TestCase): paddle.framework.random._manual_program_seed(self._random_seed) scope = fluid.core.Scope() with fluid.scope_guard(scope): - layer = self.model_cls(**self.attrs) if isinstance( - self.attrs, dict) else self.model_cls(*self.attrs) + layer = ( + self.model_cls(**self.attrs) + if isinstance(self.attrs, dict) + else self.model_cls(*self.attrs) + ) model = Model(layer, inputs=self.make_inputs()) model.prepare() if self.param_states: @@ -683,10 +799,9 @@ class ModuleApiTest(unittest.TestCase): np.testing.assert_allclose(actual_t, expect_t, rtol=1e-05, atol=0) if expect_output: for actual_t, expect_t in zip(dygraph_output, expect_output): - np.testing.assert_allclose(actual_t, - expect_t, - rtol=1e-05, - atol=0) + np.testing.assert_allclose( + actual_t, expect_t, rtol=1e-05, atol=0 + ) def check_output(self): devices = ["CPU", "GPU"] if fluid.is_compiled_with_cuda() else ["CPU"] @@ -696,13 +811,12 @@ class ModuleApiTest(unittest.TestCase): class TestBeamSearch(ModuleApiTest): - def setUp(self): paddle.set_default_dtype("float64") shape = (8, 32) self.inputs = [ np.random.random(shape).astype("float64"), - np.random.random(shape).astype("float64") + np.random.random(shape).astype("float64"), ] self.outputs = None self.attrs = { @@ -713,33 +827,40 @@ class TestBeamSearch(ModuleApiTest): self.param_states = {} @staticmethod - def model_init(self, - vocab_size, - embed_dim, - hidden_size, - bos_id=0, - eos_id=1, - beam_size=4, - max_step_num=20): - embedder = paddle.fluid.dygraph.Embedding(size=[vocab_size, embed_dim], - dtype="float64") + def model_init( + self, + vocab_size, + embed_dim, + hidden_size, + bos_id=0, + eos_id=1, + beam_size=4, + max_step_num=20, + ): + embedder = paddle.fluid.dygraph.Embedding( + size=[vocab_size, embed_dim], dtype="float64" + ) output_layer = nn.Linear(hidden_size, vocab_size) cell = nn.LSTMCell(embed_dim, hidden_size) self.max_step_num = max_step_num - self.beam_search_decoder = BeamSearchDecoder(cell, - start_token=bos_id, - end_token=eos_id, - beam_size=beam_size, - embedding_fn=embedder, - output_fn=output_layer) + self.beam_search_decoder = BeamSearchDecoder( + cell, + start_token=bos_id, + end_token=eos_id, + beam_size=beam_size, + embedding_fn=embedder, + output_fn=output_layer, + ) @staticmethod def model_forward(model, init_hidden, init_cell): - return dynamic_decode(model.beam_search_decoder, - [init_hidden, init_cell], - max_step_num=model.max_step_num, - impute_finished=True, - is_test=True)[0] + return dynamic_decode( + model.beam_search_decoder, + [init_hidden, init_cell], + max_step_num=model.max_step_num, + impute_finished=True, + is_test=True, + )[0] def make_inputs(self): inputs = [ diff --git a/python/paddle/fluid/tests/unittests/test_rnn_memory_helper_op.py b/python/paddle/fluid/tests/unittests/test_rnn_memory_helper_op.py index aa5b852ff880270fd632b2d2d99ee2463e5917c7..f2014c29f91c763f1c7ce94994437657308afe15 100644 --- a/python/paddle/fluid/tests/unittests/test_rnn_memory_helper_op.py +++ b/python/paddle/fluid/tests/unittests/test_rnn_memory_helper_op.py @@ -21,59 +21,61 @@ import paddle.fluid.core as core class RNNMemoryHelperOpTest(unittest.TestCase): - def setUp(self): self.program = Program() self.place = core.CPUPlace() - self.X = self.program.global_block().create_var(name='X', - shape=[2, 3], - dtype='float32') - self.Out = self.program.global_block().create_var(name='Out', - shape=[2, 3], - dtype='float32') - self.program.global_block().append_op(type='rnn_memory_helper', - inputs={"X": self.X}, - outputs={"Out": self.Out}, - attrs={}) + self.X = self.program.global_block().create_var( + name='X', shape=[2, 3], dtype='float32' + ) + self.Out = self.program.global_block().create_var( + name='Out', shape=[2, 3], dtype='float32' + ) + self.program.global_block().append_op( + type='rnn_memory_helper', + inputs={"X": self.X}, + outputs={"Out": self.Out}, + attrs={}, + ) def test_forward(self): x_np = np.random.normal(size=(2, 3)).astype("float32") self.feed_map = {'X': x_np} self.fetch_list = [self.Out] exe = Executor(self.place) - out = exe.run(self.program, - feed=self.feed_map, - fetch_list=self.fetch_list) + out = exe.run( + self.program, feed=self.feed_map, fetch_list=self.fetch_list + ) np.testing.assert_allclose(out[0], x_np, rtol=1e-05) class RNNMemoryHelperGradOpTest(unittest.TestCase): - def setUp(self): self.program = Program() self.place = core.CPUPlace() self.input_names = ['X', 'Out', 'Out@GRAD'] self.input_vars = { - name: self.program.global_block().create_var(name=name, - shape=[2, 3], - dtype='float32') + name: self.program.global_block().create_var( + name=name, shape=[2, 3], dtype='float32' + ) for name in self.input_names } self.output_names = ['X@GRAD'] self.output_vars = { - name: self.program.global_block().create_var(name=name, - shape=[2, 3], - dtype='float32') + name: self.program.global_block().create_var( + name=name, shape=[2, 3], dtype='float32' + ) for name in self.output_names } - self.program.global_block().append_op(type='rnn_memory_helper_grad', - inputs=self.input_vars, - outputs=self.output_vars, - attrs={}) + self.program.global_block().append_op( + type='rnn_memory_helper_grad', + inputs=self.input_vars, + outputs=self.output_vars, + attrs={}, + ) def test_backward(self): self.feed_map = { @@ -83,14 +85,13 @@ class RNNMemoryHelperGradOpTest(unittest.TestCase): self.fetch_list = [self.output_vars['X@GRAD']] exe = Executor(self.place) - out = exe.run(self.program, - feed=self.feed_map, - fetch_list=self.fetch_list) + out = exe.run( + self.program, feed=self.feed_map, fetch_list=self.fetch_list + ) np.isclose(out[0], self.feed_map['Out@GRAD'], rtol=1e-5) class RNNMemoryHelperGradOpWithoutInputTest(unittest.TestCase): - def setUp(self): self.program = Program() self.fake_program = Program() @@ -98,27 +99,31 @@ class RNNMemoryHelperGradOpWithoutInputTest(unittest.TestCase): self.input_names = ['X', 'Out'] self.input_vars = { - name: self.program.global_block().create_var(name=name, - shape=[2, 3], - dtype='float32') + name: self.program.global_block().create_var( + name=name, shape=[2, 3], dtype='float32' + ) for name in self.input_names } - self.input_vars["Out@GRAD"] = \ - self.fake_program.global_block().create_var( - name="Out@GRAD", shape=[2, 3], dtype='float32') + self.input_vars[ + "Out@GRAD" + ] = self.fake_program.global_block().create_var( + name="Out@GRAD", shape=[2, 3], dtype='float32' + ) self.output_names = ['X@GRAD'] self.output_vars = { - name: self.program.global_block().create_var(name=name, - shape=[2, 3], - dtype='float32') + name: self.program.global_block().create_var( + name=name, shape=[2, 3], dtype='float32' + ) for name in self.output_names } - self.program.global_block().append_op(type='rnn_memory_helper_grad', - inputs=self.input_vars, - outputs=self.output_vars, - attrs={}) + self.program.global_block().append_op( + type='rnn_memory_helper_grad', + inputs=self.input_vars, + outputs=self.output_vars, + attrs={}, + ) def test_backward(self): self.feed_map = { @@ -128,12 +133,12 @@ class RNNMemoryHelperGradOpWithoutInputTest(unittest.TestCase): self.fetch_list = [self.output_vars['X@GRAD']] exe = Executor(self.place) - out = exe.run(self.program, - feed=self.feed_map, - fetch_list=self.fetch_list) - np.testing.assert_allclose(out[0], - np.zeros(shape=(2, 3)).astype('float32'), - rtol=1e-05) + out = exe.run( + self.program, feed=self.feed_map, fetch_list=self.fetch_list + ) + np.testing.assert_allclose( + out[0], np.zeros(shape=(2, 3)).astype('float32'), rtol=1e-05 + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_rnn_op.py b/python/paddle/fluid/tests/unittests/test_rnn_op.py index ecf79cabab5ed19dd257ebcbcb725b0520d90798..5dd255562e64114997f487bff2b428b10e2431bb 100644 --- a/python/paddle/fluid/tests/unittests/test_rnn_op.py +++ b/python/paddle/fluid/tests/unittests/test_rnn_op.py @@ -31,7 +31,6 @@ paddle.enable_static() class TestRNNOp(OpTest): - def get_weight_names(self): weight_names = [] for i in range(self.num_layers): @@ -45,8 +44,11 @@ class TestRNNOp(OpTest): def setUp(self): self.op_type = "rnn" self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64 - self.sequence_length = None if core.is_compiled_with_rocm( - ) else np.array([12, 11, 10, 9, 8], dtype=np.int32) + self.sequence_length = ( + None + if core.is_compiled_with_rocm() + else np.array([12, 11, 10, 9, 8], dtype=np.int32) + ) self.num_layers = 1 self.is_bidirec = False self.mode = "LSTM" @@ -61,27 +63,29 @@ class TestRNNOp(OpTest): input_size = 3 hidden_size = 2 - input = np.random.uniform(low=-0.1, - high=0.1, - size=(seq_length, batch_size, - input_size)).astype(self.dtype) + input = np.random.uniform( + low=-0.1, high=0.1, size=(seq_length, batch_size, input_size) + ).astype(self.dtype) if self.sequence_length is not None: input[11][1:][:] = 0 input[10][2:][:] = 0 input[9][3:][:] = 0 input[8][4:][:] = 0 - rnn1 = LSTM(input_size, - hidden_size, - num_layers=self.num_layers, - time_major=True, - direction=direction, - dropout=self.dropout, - dtype=self.dtype) + rnn1 = LSTM( + input_size, + hidden_size, + num_layers=self.num_layers, + time_major=True, + direction=direction, + dropout=self.dropout, + dtype=self.dtype, + ) flat_w = get_params_for_net(rnn1) - output, (last_hidden, - last_cell) = rnn1(input, sequence_length=self.sequence_length) + output, (last_hidden, last_cell) = rnn1( + input, sequence_length=self.sequence_length + ) if core.is_compiled_with_rocm(): @@ -91,17 +95,19 @@ class TestRNNOp(OpTest): self._get_places = rocm_rnn_get_place - init_h = np.zeros((self.num_layers * self.direction_num, batch_size, - hidden_size)).astype(self.dtype) - init_c = np.zeros((self.num_layers * self.direction_num, batch_size, - hidden_size)).astype(self.dtype) + init_h = np.zeros( + (self.num_layers * self.direction_num, batch_size, hidden_size) + ).astype(self.dtype) + init_c = np.zeros( + (self.num_layers * self.direction_num, batch_size, hidden_size) + ).astype(self.dtype) state_out = np.ndarray((300)).astype("uint8") self.inputs = { 'Input': input, 'WeightList': flat_w, 'PreState': [('init_h', init_h), ('init_c', init_c)], - 'SequenceLength': self.sequence_length + 'SequenceLength': self.sequence_length, } if self.sequence_length is None: self.inputs = { @@ -116,13 +122,13 @@ class TestRNNOp(OpTest): 'hidden_size': hidden_size, 'num_layers': self.num_layers, 'mode': self.mode, - 'is_test': self.is_test + 'is_test': self.is_test, } self.outputs = { 'Out': output, "State": [('last_hidden', last_hidden), ('last_cell', last_cell)], 'Reserve': np.ndarray((400)).astype("uint8"), - 'DropoutState': state_out + 'DropoutState': state_out, } def test_output(self): @@ -136,56 +142,56 @@ class TestRNNOp(OpTest): var_name_list = self.get_weight_names() grad_check_list = ['Input', 'init_h', 'init_c'] grad_check_list.extend(var_name_list) - self.check_grad(set(grad_check_list), - ['Out', 'last_hidden', 'last_cell']) + self.check_grad( + set(grad_check_list), ['Out', 'last_hidden', 'last_cell'] + ) def test_grad_only_input(self): if not self.is_test: var_name_list = self.get_weight_names() grad_check_list = ['Input'] grad_check_list.extend(var_name_list) - self.check_grad(set(grad_check_list), - ['Out', 'last_hidden', 'last_cell']) + self.check_grad( + set(grad_check_list), ['Out', 'last_hidden', 'last_cell'] + ) def test_grad_only_h(self): if not self.is_test: var_name_list = self.get_weight_names() grad_check_list = ['init_h'] grad_check_list.extend(var_name_list) - self.check_grad(set(grad_check_list), - ['Out', 'last_hidden', 'last_cell']) + self.check_grad( + set(grad_check_list), ['Out', 'last_hidden', 'last_cell'] + ) def test_grad_only_c(self): if not self.is_test: var_name_list = self.get_weight_names() grad_check_list = ['init_c'] grad_check_list.extend(var_name_list) - self.check_grad(set(grad_check_list), - ['Out', 'last_hidden', 'last_cell']) + self.check_grad( + set(grad_check_list), ['Out', 'last_hidden', 'last_cell'] + ) class TestRNNOp1(TestRNNOp): - def set_attrs(self): self.sequence_length = None class TestRNNOp2(TestRNNOp): - def set_attrs(self): self.sequence_length = None self.is_bidirec = True class TestRNNOp3(TestRNNOp): - def set_attrs(self): self.is_test = True self.sequence_length = None class TestRNNOp4(TestRNNOp): - def set_attrs(self): self.is_test = True self.sequence_length = None @@ -193,20 +199,17 @@ class TestRNNOp4(TestRNNOp): class TestRNNOp5(TestRNNOp): - def set_attrs(self): self.num_layers = 2 class TestRNNOp6(TestRNNOp): - def set_attrs(self): self.num_layers = 2 self.is_bidirec = True class TestRNNOp7(TestRNNOp): - def set_attrs(self): self.num_layers = 2 self.is_bidirec = True @@ -214,7 +217,6 @@ class TestRNNOp7(TestRNNOp): class TestRNNOp8(TestRNNOp): - def set_attrs(self): self.num_layers = 2 self.is_bidirec = True @@ -222,7 +224,6 @@ class TestRNNOp8(TestRNNOp): class TestRNNOp9(TestRNNOp): - def set_attrs(self): self.num_layers = 3 diff --git a/python/paddle/fluid/tests/unittests/test_roi_align_op.py b/python/paddle/fluid/tests/unittests/test_roi_align_op.py index d9111adceb66bbabe9706bc0a8587eb4fe7e277b..c26b6691f66f9de4b92863e65088d826dcead114 100644 --- a/python/paddle/fluid/tests/unittests/test_roi_align_op.py +++ b/python/paddle/fluid/tests/unittests/test_roi_align_op.py @@ -20,7 +20,6 @@ from op_test import OpTest class TestROIAlignOp(OpTest): - def set_data(self): self.init_test_case() self.make_rois() @@ -29,7 +28,7 @@ class TestROIAlignOp(OpTest): self.inputs = { 'X': self.x, 'ROIs': (self.rois[:, 1:5], self.rois_lod), - 'RoisNum': self.boxes_num + 'RoisNum': self.boxes_num, } self.attrs = { 'spatial_scale': self.spatial_scale, @@ -58,25 +57,45 @@ class TestROIAlignOp(OpTest): self.x = np.random.random(self.x_dim).astype('float64') - def pre_calc(self, x_i, roi_xmin, roi_ymin, roi_bin_grid_h, roi_bin_grid_w, - bin_size_h, bin_size_w): + def pre_calc( + self, + x_i, + roi_xmin, + roi_ymin, + roi_bin_grid_h, + roi_bin_grid_w, + bin_size_h, + bin_size_w, + ): count = roi_bin_grid_h * roi_bin_grid_w bilinear_pos = np.zeros( [self.channels, self.pooled_height, self.pooled_width, count, 4], - np.float64) - bilinear_w = np.zeros([self.pooled_height, self.pooled_width, count, 4], - np.float64) + np.float64, + ) + bilinear_w = np.zeros( + [self.pooled_height, self.pooled_width, count, 4], np.float64 + ) for ph in range(self.pooled_width): for pw in range(self.pooled_height): c = 0 for iy in range(roi_bin_grid_h): - y = roi_ymin + ph * bin_size_h + (iy + 0.5) * \ - bin_size_h / roi_bin_grid_h + y = ( + roi_ymin + + ph * bin_size_h + + (iy + 0.5) * bin_size_h / roi_bin_grid_h + ) for ix in range(roi_bin_grid_w): - x = roi_xmin + pw * bin_size_w + (ix + 0.5) * \ - bin_size_w / roi_bin_grid_w - if y < -1.0 or y > self.height or \ - x < -1.0 or x > self.width: + x = ( + roi_xmin + + pw * bin_size_w + + (ix + 0.5) * bin_size_w / roi_bin_grid_w + ) + if ( + y < -1.0 + or y > self.height + or x < -1.0 + or x > self.width + ): continue if y <= 0: y = 0 @@ -97,14 +116,18 @@ class TestROIAlignOp(OpTest): hy = 1 - ly hx = 1 - lx for ch in range(self.channels): - bilinear_pos[ch, ph, pw, c, 0] = x_i[ch, y_low, - x_low] - bilinear_pos[ch, ph, pw, c, 1] = x_i[ch, y_low, - x_high] - bilinear_pos[ch, ph, pw, c, 2] = x_i[ch, y_high, - x_low] - bilinear_pos[ch, ph, pw, c, 3] = x_i[ch, y_high, - x_high] + bilinear_pos[ch, ph, pw, c, 0] = x_i[ + ch, y_low, x_low + ] + bilinear_pos[ch, ph, pw, c, 1] = x_i[ + ch, y_low, x_high + ] + bilinear_pos[ch, ph, pw, c, 2] = x_i[ + ch, y_high, x_low + ] + bilinear_pos[ch, ph, pw, c, 3] = x_i[ + ch, y_high, x_high + ] bilinear_w[ph, pw, c, 0] = hy * hx bilinear_w[ph, pw, c, 1] = hy * lx bilinear_w[ph, pw, c, 2] = ly * hx @@ -114,10 +137,15 @@ class TestROIAlignOp(OpTest): def calc_roi_align(self): self.out_data = np.zeros( - (self.rois_num, self.channels, self.pooled_height, - self.pooled_width)).astype('float64') - - offset = 0.5 if self.aligned else 0. + ( + self.rois_num, + self.channels, + self.pooled_height, + self.pooled_width, + ) + ).astype('float64') + + offset = 0.5 if self.aligned else 0.0 for i in range(self.rois_num): roi = self.rois[i] roi_batch_id = int(roi[0]) @@ -135,16 +163,27 @@ class TestROIAlignOp(OpTest): bin_size_h = float(roi_height) / float(self.pooled_height) bin_size_w = float(roi_width) / float(self.pooled_width) - roi_bin_grid_h = self.sampling_ratio if self.sampling_ratio > 0 else \ - math.ceil(roi_height / self.pooled_height) - roi_bin_grid_w = self.sampling_ratio if self.sampling_ratio > 0 else \ - math.ceil(roi_width / self.pooled_width) + roi_bin_grid_h = ( + self.sampling_ratio + if self.sampling_ratio > 0 + else math.ceil(roi_height / self.pooled_height) + ) + roi_bin_grid_w = ( + self.sampling_ratio + if self.sampling_ratio > 0 + else math.ceil(roi_width / self.pooled_width) + ) count = max(int(roi_bin_grid_h * roi_bin_grid_w), 1) pre_size = count * self.pooled_width * self.pooled_height - bilinear_pos, bilinear_w = self.pre_calc(x_i, roi_xmin, roi_ymin, - int(roi_bin_grid_h), - int(roi_bin_grid_w), - bin_size_h, bin_size_w) + bilinear_pos, bilinear_w = self.pre_calc( + x_i, + roi_xmin, + roi_ymin, + int(roi_bin_grid_h), + int(roi_bin_grid_w), + bin_size_h, + bin_size_w, + ) for ch in range(self.channels): align_per_bin = (bilinear_pos[ch] * bilinear_w).sum(axis=-1) output_val = align_per_bin.mean(axis=-1) @@ -157,27 +196,38 @@ class TestROIAlignOp(OpTest): self.rois_lod[0].append(bno + 1) for i in range(bno + 1): x1 = np.random.random_integers( - 0, self.width // self.spatial_scale - self.pooled_width) + 0, self.width // self.spatial_scale - self.pooled_width + ) y1 = np.random.random_integers( - 0, self.height // self.spatial_scale - self.pooled_height) + 0, self.height // self.spatial_scale - self.pooled_height + ) - x2 = np.random.random_integers(x1 + self.pooled_width, - self.width // self.spatial_scale) + x2 = np.random.random_integers( + x1 + self.pooled_width, self.width // self.spatial_scale + ) y2 = np.random.random_integers( - y1 + self.pooled_height, self.height // self.spatial_scale) + y1 + self.pooled_height, self.height // self.spatial_scale + ) roi = [bno, x1, y1, x2, y2] rois.append(roi) self.rois_num = len(rois) self.rois = np.array(rois).astype("float64") - self.boxes_num = np.array([bno + 1 for bno in range(self.batch_size) - ]).astype('int32') + self.boxes_num = np.array( + [bno + 1 for bno in range(self.batch_size)] + ).astype('int32') def setUp(self): self.op_type = "roi_align" self.python_api = lambda x, boxes, boxes_num, pooled_height, pooled_width, spatial_scale, sampling_ratio, aligned: paddle.vision.ops.roi_align( - x, boxes, boxes_num, (pooled_height, pooled_width), spatial_scale, - sampling_ratio, aligned) + x, + boxes, + boxes_num, + (pooled_height, pooled_width), + spatial_scale, + sampling_ratio, + aligned, + ) self.set_data() def test_check_output(self): @@ -188,7 +238,6 @@ class TestROIAlignOp(OpTest): class TestROIAlignInLodOp(TestROIAlignOp): - def set_data(self): self.init_test_case() self.make_rois() @@ -199,7 +248,7 @@ class TestROIAlignInLodOp(TestROIAlignOp): self.inputs = { 'X': self.x, 'ROIs': (self.rois[:, 1:5], self.rois_lod), - 'RoisNum': np.asarray(seq_len).astype('int32') + 'RoisNum': np.asarray(seq_len).astype('int32'), } self.attrs = { @@ -207,14 +256,13 @@ class TestROIAlignInLodOp(TestROIAlignOp): 'pooled_height': self.pooled_height, 'pooled_width': self.pooled_width, 'sampling_ratio': self.sampling_ratio, - 'aligned': self.aligned + 'aligned': self.aligned, } self.outputs = {'Out': self.out_data} class TestROIAlignOpWithAligned(TestROIAlignOp): - def init_test_case(self): self.batch_size = 3 self.channels = 3 diff --git a/python/paddle/fluid/tests/unittests/test_roi_perspective_transform_op.py b/python/paddle/fluid/tests/unittests/test_roi_perspective_transform_op.py index ff6805590439d6dae22248dfebfdce300bada1bd..d03bdcd59202a41fd2a91eae9ba47a8c59a2c6c8 100644 --- a/python/paddle/fluid/tests/unittests/test_roi_perspective_transform_op.py +++ b/python/paddle/fluid/tests/unittests/test_roi_perspective_transform_op.py @@ -40,13 +40,20 @@ def in_quad(x, y, roi_x, roi_y): xe = roi_x[(i + 1) % 4] ye = roi_y[(i + 1) % 4] if abs(ys - ye) < 1e-4: - if abs(y - ys) < 1e-4 and abs(y - ye) < 1e-4 and gt_e( - x, min(xs, xe)) and lt_e(x, max(xs, xe)): + if ( + abs(y - ys) < 1e-4 + and abs(y - ye) < 1e-4 + and gt_e(x, min(xs, xe)) + and lt_e(x, max(xs, xe)) + ): return True else: intersec_x = (y - ys) * (xe - xs) / (ye - ys) + xs - if abs(intersec_x - x) < 1e-4 and gt_e(y, min(ys, ye)) and lt_e( - y, max(ys, ye)): + if ( + abs(intersec_x - x) < 1e-4 + and gt_e(y, min(ys, ye)) + and lt_e(y, max(ys, ye)) + ): return True n_cross = 0 for i in range(4): @@ -63,7 +70,7 @@ def in_quad(x, y, roi_x, roi_y): return True if gt(intersec_x, x): n_cross += 1 - return (n_cross % 2 == 1) + return n_cross % 2 == 1 def get_transform_matrix(transformed_width, transformed_height, roi_x, roi_y): @@ -84,8 +91,9 @@ def get_transform_matrix(transformed_width, transformed_height, roi_x, roi_y): estimated_width = (len1 + len3) / 2.0 normalized_height = max(2, transformed_height) - normalized_width = round(estimated_width * - (normalized_height - 1) / estimated_height) + 1 + normalized_width = ( + round(estimated_width * (normalized_height - 1) / estimated_height) + 1 + ) normalized_width = max(2, min(normalized_width, transformed_width)) dx1 = x1 - x2 @@ -95,22 +103,32 @@ def get_transform_matrix(transformed_width, transformed_height, roi_x, roi_y): dy2 = y3 - y2 dy3 = y0 - y1 + y2 - y3 matrix = np.zeros([9]) - matrix[6] = (dx3 * dy2 - dx2 * dy3) / (dx1 * dy2 - dx2 * dy1 + - 1e-5) / (normalized_width - 1) - matrix[7] = (dx1 * dy3 - dx3 * dy1) / (dx1 * dy2 - dx2 * dy1 + - 1e-5) / (normalized_height - 1) + matrix[6] = ( + (dx3 * dy2 - dx2 * dy3) + / (dx1 * dy2 - dx2 * dy1 + 1e-5) + / (normalized_width - 1) + ) + matrix[7] = ( + (dx1 * dy3 - dx3 * dy1) + / (dx1 * dy2 - dx2 * dy1 + 1e-5) + / (normalized_height - 1) + ) matrix[8] = 1 - matrix[3] = (y1 - y0 + matrix[6] * - (normalized_width - 1) * y1) / (normalized_width - 1) - matrix[4] = (y3 - y0 + matrix[7] * - (normalized_height - 1) * y3) / (normalized_height - 1) + matrix[3] = (y1 - y0 + matrix[6] * (normalized_width - 1) * y1) / ( + normalized_width - 1 + ) + matrix[4] = (y3 - y0 + matrix[7] * (normalized_height - 1) * y3) / ( + normalized_height - 1 + ) matrix[5] = y0 - matrix[0] = (x1 - x0 + matrix[6] * - (normalized_width - 1) * x1) / (normalized_width - 1) - matrix[1] = (x3 - x0 + matrix[7] * - (normalized_height - 1) * x3) / (normalized_height - 1) + matrix[0] = (x1 - x0 + matrix[6] * (normalized_width - 1) * x1) / ( + normalized_width - 1 + ) + matrix[1] = (x3 - x0 + matrix[7] * (normalized_height - 1) * x3) / ( + normalized_height - 1 + ) matrix[2] = x0 return matrix @@ -131,8 +149,12 @@ def bilinear_interpolate(in_data, in_n, in_c, in_w, in_h): height = in_data.shape[2] width = in_data.shape[3] - if gt_e(-0.5, in_w) or gt_e(in_w, width - 0.5) or gt_e(-0.5, in_h) or gt_e( - in_h, height - 0.5): + if ( + gt_e(-0.5, in_w) + or gt_e(in_w, width - 0.5) + or gt_e(-0.5, in_h) + or gt_e(in_h, height - 0.5) + ): return 0.0 if gt_e(0, in_w): @@ -180,8 +202,14 @@ def lod_convert(lod): return ret -def roi_transform(in_data, rois, rois_lod, transformed_height, - transformed_width, spatial_scale): +def roi_transform( + in_data, + rois, + rois_lod, + transformed_height, + transformed_width, + spatial_scale, +): channels = in_data.shape[1] in_height = in_data.shape[2] in_width = in_data.shape[3] @@ -194,8 +222,9 @@ def roi_transform(in_data, rois, rois_lod, transformed_height, roi2image[j] = i out = np.zeros([rois_num, channels, transformed_height, transformed_width]) - mask = np.zeros([rois_num, 1, transformed_height, - transformed_width]).astype('int') + mask = np.zeros( + [rois_num, 1, transformed_height, transformed_width] + ).astype('int') matrix = np.zeros([rois_num, 9], dtype=in_data.dtype) for n in range(rois_num): roi_x = [] @@ -204,20 +233,26 @@ def roi_transform(in_data, rois, rois_lod, transformed_height, roi_x.append(rois[n][2 * k] * spatial_scale) roi_y.append(rois[n][2 * k + 1] * spatial_scale) image_id = roi2image[n] - transform_matrix = get_transform_matrix(transformed_width, - transformed_height, roi_x, - roi_y) + transform_matrix = get_transform_matrix( + transformed_width, transformed_height, roi_x, roi_y + ) matrix[n] = transform_matrix for c in range(channels): for out_h in range(transformed_height): for out_w in range(transformed_width): - in_w, in_h = get_source_coords(transform_matrix, out_w, - out_h) - if in_quad(in_w, in_h, roi_x, roi_y) and gt( - in_w, -0.5) and gt(in_width - 0.5, in_w) and gt( - in_h, -0.5) and gt(in_height - 0.5, in_h): + in_w, in_h = get_source_coords( + transform_matrix, out_w, out_h + ) + if ( + in_quad(in_w, in_h, roi_x, roi_y) + and gt(in_w, -0.5) + and gt(in_width - 0.5, in_w) + and gt(in_h, -0.5) + and gt(in_height - 0.5, in_h) + ): out[n][c][out_h][out_w] = bilinear_interpolate( - in_data, image_id, c, in_w, in_h) + in_data, image_id, c, in_w, in_h + ) mask[n][0][out_h][out_w] = 1 else: out[n][c][out_h][out_w] = 0.0 @@ -226,7 +261,6 @@ def roi_transform(in_data, rois, rois_lod, transformed_height, class TestROIPoolOp(OpTest): - def set_data(self): self.init_test_case() self.make_rois() @@ -236,17 +270,20 @@ class TestROIPoolOp(OpTest): self.attrs = { 'spatial_scale': self.spatial_scale, 'transformed_height': self.transformed_height, - 'transformed_width': self.transformed_width + 'transformed_width': self.transformed_width, } - out, mask, transform_matrix = roi_transform(self.x, self.rois, - self.rois_lod, - self.transformed_height, - self.transformed_width, - self.spatial_scale) + out, mask, transform_matrix = roi_transform( + self.x, + self.rois, + self.rois_lod, + self.transformed_height, + self.transformed_width, + self.spatial_scale, + ) self.outputs = { 'Out': out, 'Mask': mask, - 'TransformMatrix': transform_matrix + 'TransformMatrix': transform_matrix, } def init_test_case(self): @@ -271,28 +308,38 @@ class TestROIPoolOp(OpTest): self.rois_lod[0].append(bno + 1) for i in range(bno + 1): x1 = np.random.randint( - 0, - self.width // self.spatial_scale - self.transformed_width) + 0, self.width // self.spatial_scale - self.transformed_width + ) y1 = np.random.randint( 0, - self.height // self.spatial_scale - self.transformed_height) + self.height // self.spatial_scale - self.transformed_height, + ) - x2 = np.random.randint(x1 + self.transformed_width, - self.width // self.spatial_scale) + x2 = np.random.randint( + x1 + self.transformed_width, + self.width // self.spatial_scale, + ) y2 = np.random.randint( 0, - self.height // self.spatial_scale - self.transformed_height) - - x3 = np.random.randint(x1 + self.transformed_width, - self.width // self.spatial_scale) - y3 = np.random.randint(y1 + self.transformed_height, - self.height // self.spatial_scale) + self.height // self.spatial_scale - self.transformed_height, + ) + + x3 = np.random.randint( + x1 + self.transformed_width, + self.width // self.spatial_scale, + ) + y3 = np.random.randint( + y1 + self.transformed_height, + self.height // self.spatial_scale, + ) x4 = np.random.randint( - 0, - self.width // self.spatial_scale - self.transformed_width) - y4 = np.random.randint(y1 + self.transformed_height, - self.height // self.spatial_scale) + 0, self.width // self.spatial_scale - self.transformed_width + ) + y4 = np.random.randint( + y1 + self.transformed_height, + self.height // self.spatial_scale, + ) roi = [x1, y1, x2, y2, x3, y3, x4, y4] rois.append(roi) @@ -308,52 +355,70 @@ class TestROIPoolOp(OpTest): def test_check_grad(self): self.outputs['Out2InIdx'] = np.zeros( - [np.product(self.outputs['Out'].shape), 4]).astype("int32") + [np.product(self.outputs['Out'].shape), 4] + ).astype("int32") self.outputs['Out2InWeights'] = np.zeros( - [np.product(self.outputs['Out'].shape), 4]).astype("float32") + [np.product(self.outputs['Out'].shape), 4] + ).astype("float32") self.check_grad(['X'], 'Out') def test_errors(self): x = fluid.data(name='x', shape=[100, 256, 28, 28], dtype='float32') - rois = fluid.data(name='rois', - shape=[None, 8], - lod_level=1, - dtype='float32') - - x_int = fluid.data(name='x_int', - shape=[100, 256, 28, 28], - dtype='int32') - rois_int = fluid.data(name='rois_int', - shape=[None, 8], - lod_level=1, - dtype='int32') + rois = fluid.data( + name='rois', shape=[None, 8], lod_level=1, dtype='float32' + ) + + x_int = fluid.data( + name='x_int', shape=[100, 256, 28, 28], dtype='int32' + ) + rois_int = fluid.data( + name='rois_int', shape=[None, 8], lod_level=1, dtype='int32' + ) x_tmp = [1, 2] rois_tmp = [1, 2] # type of intput and rois must be variable - self.assertRaises(TypeError, fluid.layers.roi_perspective_transform, - x_tmp, rois, 7, 7) - self.assertRaises(TypeError, fluid.layers.roi_perspective_transform, x, - rois_tmp, 7, 7) + self.assertRaises( + TypeError, fluid.layers.roi_perspective_transform, x_tmp, rois, 7, 7 + ) + self.assertRaises( + TypeError, fluid.layers.roi_perspective_transform, x, rois_tmp, 7, 7 + ) # dtype of intput and rois must be float32 - self.assertRaises(TypeError, fluid.layers.roi_perspective_transform, - x_int, rois, 7, 7) - self.assertRaises(TypeError, fluid.layers.roi_perspective_transform, x, - rois_int, 7, 7) + self.assertRaises( + TypeError, fluid.layers.roi_perspective_transform, x_int, rois, 7, 7 + ) + self.assertRaises( + TypeError, fluid.layers.roi_perspective_transform, x, rois_int, 7, 7 + ) height = 7.5 width = 7.5 # type of transformed_height and transformed_width must be int - self.assertRaises(TypeError, fluid.layers.roi_perspective_transform, x, - rois, height, 7) - self.assertRaises(TypeError, fluid.layers.roi_perspective_transform, x, - rois, 7, width) + self.assertRaises( + TypeError, + fluid.layers.roi_perspective_transform, + x, + rois, + height, + 7, + ) + self.assertRaises( + TypeError, fluid.layers.roi_perspective_transform, x, rois, 7, width + ) scale = int(2) # type of spatial_scale must be float - self.assertRaises(TypeError, fluid.layers.roi_perspective_transform, x, - rois, 7, 7, scale) + self.assertRaises( + TypeError, + fluid.layers.roi_perspective_transform, + x, + rois, + 7, + 7, + scale, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_roi_pool_op.py b/python/paddle/fluid/tests/unittests/test_roi_pool_op.py index 16ff2f57fb8f0cca776786022c86102796a6beac..2fcd3eda287f76c5cc2d8954db8c959a47ac6589 100644 --- a/python/paddle/fluid/tests/unittests/test_roi_pool_op.py +++ b/python/paddle/fluid/tests/unittests/test_roi_pool_op.py @@ -33,7 +33,6 @@ def _round(x): class TestROIPoolOp(OpTest): - def set_data(self): self.init_test_case() self.make_rois() @@ -42,13 +41,13 @@ class TestROIPoolOp(OpTest): self.inputs = { 'X': self.x, 'ROIs': (self.rois[:, 1:5], self.rois_lod), - 'RoisNum': self.boxes_num + 'RoisNum': self.boxes_num, } self.attrs = { 'spatial_scale': self.spatial_scale, 'pooled_height': self.pooled_height, - 'pooled_width': self.pooled_width + 'pooled_width': self.pooled_width, } self.outputs = {'Out': self.outs, 'Argmax': self.argmaxes} @@ -69,10 +68,22 @@ class TestROIPoolOp(OpTest): self.x = np.random.random(self.x_dim).astype('float64') def calc_roi_pool(self): - out_data = np.zeros((self.rois_num, self.channels, self.pooled_height, - self.pooled_width)) - argmax_data = np.zeros((self.rois_num, self.channels, - self.pooled_height, self.pooled_width)) + out_data = np.zeros( + ( + self.rois_num, + self.channels, + self.pooled_height, + self.pooled_width, + ) + ) + argmax_data = np.zeros( + ( + self.rois_num, + self.channels, + self.pooled_height, + self.pooled_width, + ) + ) for i in range(self.rois_num): roi = self.rois[i] @@ -115,8 +126,9 @@ class TestROIPoolOp(OpTest): for w in range(wstart, wend): if x_i[c, h, w] > out_data[i, c, ph, pw]: out_data[i, c, ph, pw] = x_i[c, h, w] - argmax_data[i, c, ph, - pw] = h * self.width + w + argmax_data[i, c, ph, pw] = ( + h * self.width + w + ) self.outs = out_data.astype('float64') self.argmaxes = argmax_data.astype('int64') @@ -128,26 +140,32 @@ class TestROIPoolOp(OpTest): self.rois_lod[0].append(bno + 1) for i in range(bno + 1): x1 = np.random.randint( - 0, self.width // self.spatial_scale - self.pooled_width) + 0, self.width // self.spatial_scale - self.pooled_width + ) y1 = np.random.randint( - 0, self.height // self.spatial_scale - self.pooled_height) + 0, self.height // self.spatial_scale - self.pooled_height + ) - x2 = np.random.randint(x1 + self.pooled_width, - self.width // self.spatial_scale) - y2 = np.random.randint(y1 + self.pooled_height, - self.height // self.spatial_scale) + x2 = np.random.randint( + x1 + self.pooled_width, self.width // self.spatial_scale + ) + y2 = np.random.randint( + y1 + self.pooled_height, self.height // self.spatial_scale + ) roi = [bno, x1, y1, x2, y2] rois.append(roi) self.rois_num = len(rois) self.rois = np.array(rois).astype("float64") - self.boxes_num = np.array([bno + 1 for bno in range(self.batch_size) - ]).astype('int32') + self.boxes_num = np.array( + [bno + 1 for bno in range(self.batch_size)] + ).astype('int32') def setUp(self): self.op_type = "roi_pool" self.python_api = lambda x, boxes, boxes_num, pooled_height, pooled_width, spatial_scale: paddle.vision.ops.roi_pool( - x, boxes, boxes_num, (pooled_height, pooled_width), spatial_scale) + x, boxes, boxes_num, (pooled_height, pooled_width), spatial_scale + ) self.python_out_sig = ["Out"] self.set_data() @@ -159,27 +177,27 @@ class TestROIPoolOp(OpTest): class BadInputTestRoiPool(unittest.TestCase): - def test_error(self): with fluid.program_guard(fluid.Program()): def test_bad_x(): - x = fluid.layers.data(name='data1', - shape=[2, 1, 4, 4], - dtype='int64') - label = fluid.layers.data(name='label', - shape=[2, 4], - dtype='float32', - lod_level=1) + x = fluid.layers.data( + name='data1', shape=[2, 1, 4, 4], dtype='int64' + ) + label = fluid.layers.data( + name='label', shape=[2, 4], dtype='float32', lod_level=1 + ) output = fluid.layers.roi_pool(x, label, 1, 1, 1.0) self.assertRaises(TypeError, test_bad_x) def test_bad_y(): - x = fluid.layers.data(name='data2', - shape=[2, 1, 4, 4], - dtype='float32', - append_batch_size=False) + x = fluid.layers.data( + name='data2', + shape=[2, 1, 4, 4], + dtype='float32', + append_batch_size=False, + ) label = [[1, 2, 3, 4], [2, 3, 4, 5]] output = fluid.layers.roi_pool(x, label, 1, 1, 1.0) @@ -187,7 +205,6 @@ class BadInputTestRoiPool(unittest.TestCase): class TestROIPoolInLodOp(TestROIPoolOp): - def set_data(self): self.init_test_case() self.make_rois() @@ -198,13 +215,13 @@ class TestROIPoolInLodOp(TestROIPoolOp): self.inputs = { 'X': self.x, 'ROIs': (self.rois[:, 1:5], self.rois_lod), - 'RoisNum': np.asarray(seq_len).astype('int32') + 'RoisNum': np.asarray(seq_len).astype('int32'), } self.attrs = { 'spatial_scale': self.spatial_scale, 'pooled_height': self.pooled_height, - 'pooled_width': self.pooled_width + 'pooled_width': self.pooled_width, } self.outputs = {'Out': self.outs, 'Argmax': self.argmaxes} diff --git a/python/paddle/fluid/tests/unittests/test_roll_op.py b/python/paddle/fluid/tests/unittests/test_roll_op.py index 43e56efd6429b56fada8fb2512725aee880c7761..bf11b0d86f6688775777c16f4d2a26fc9eb191ca 100644 --- a/python/paddle/fluid/tests/unittests/test_roll_op.py +++ b/python/paddle/fluid/tests/unittests/test_roll_op.py @@ -21,7 +21,6 @@ from paddle.fluid import Program, program_guard class TestRollOp(OpTest): - def setUp(self): self.python_api = paddle.roll self.op_type = "roll" @@ -29,8 +28,9 @@ class TestRollOp(OpTest): self.inputs = {'X': np.random.random(self.x_shape).astype(self.dtype)} self.attrs = {'shifts': self.shifts, 'axis': self.axis} self.outputs = { - 'Out': - np.roll(self.inputs['X'], self.attrs['shifts'], self.attrs['axis']) + 'Out': np.roll( + self.inputs['X'], self.attrs['shifts'], self.attrs['axis'] + ) } def init_dtype_type(self): @@ -47,7 +47,6 @@ class TestRollOp(OpTest): class TestRollOpCase2(TestRollOp): - def init_dtype_type(self): self.dtype = np.float32 self.x_shape = (100, 10, 5) @@ -56,10 +55,10 @@ class TestRollOpCase2(TestRollOp): class TestRollAPI(unittest.TestCase): - def input_data(self): - self.data_x = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], - [7.0, 8.0, 9.0]]) + self.data_x = np.array( + [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]] + ) def test_roll_op_api(self): self.input_data() @@ -70,11 +69,12 @@ class TestRollAPI(unittest.TestCase): x = fluid.layers.data(name='x', shape=[-1, 3]) z = paddle.roll(x, shifts=1) exe = fluid.Executor(fluid.CPUPlace()) - res, = exe.run(feed={'x': self.data_x}, - fetch_list=[z.name], - return_numpy=False) - expect_out = np.array([[9.0, 1.0, 2.0], [3.0, 4.0, 5.0], - [6.0, 7.0, 8.0]]) + (res,) = exe.run( + feed={'x': self.data_x}, fetch_list=[z.name], return_numpy=False + ) + expect_out = np.array( + [[9.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]] + ) np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) # case 2: @@ -82,11 +82,12 @@ class TestRollAPI(unittest.TestCase): x = fluid.layers.data(name='x', shape=[-1, 3]) z = paddle.roll(x, shifts=1, axis=0) exe = fluid.Executor(fluid.CPUPlace()) - res, = exe.run(feed={'x': self.data_x}, - fetch_list=[z.name], - return_numpy=False) - expect_out = np.array([[7.0, 8.0, 9.0], [1.0, 2.0, 3.0], - [4.0, 5.0, 6.0]]) + (res,) = exe.run( + feed={'x': self.data_x}, fetch_list=[z.name], return_numpy=False + ) + expect_out = np.array( + [[7.0, 8.0, 9.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0]] + ) np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) def test_dygraph_api(self): @@ -96,8 +97,9 @@ class TestRollAPI(unittest.TestCase): x = fluid.dygraph.to_variable(self.data_x) z = paddle.roll(x, shifts=1) np_z = z.numpy() - expect_out = np.array([[9.0, 1.0, 2.0], [3.0, 4.0, 5.0], - [6.0, 7.0, 8.0]]) + expect_out = np.array( + [[9.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]] + ) np.testing.assert_allclose(expect_out, np_z, rtol=1e-05) # case 2: @@ -105,8 +107,9 @@ class TestRollAPI(unittest.TestCase): x = fluid.dygraph.to_variable(self.data_x) z = paddle.roll(x, shifts=1, axis=0) np_z = z.numpy() - expect_out = np.array([[7.0, 8.0, 9.0], [1.0, 2.0, 3.0], - [4.0, 5.0, 6.0]]) + expect_out = np.array( + [[7.0, 8.0, 9.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0]] + ) np.testing.assert_allclose(expect_out, np_z, rtol=1e-05) def test_roll_op_false(self): @@ -117,9 +120,11 @@ class TestRollAPI(unittest.TestCase): x = fluid.layers.data(name='x', shape=[-1, 3]) z = paddle.roll(x, shifts=1, axis=10) exe = fluid.Executor(fluid.CPUPlace()) - res, = exe.run(feed={'x': self.data_x}, - fetch_list=[z.name], - return_numpy=False) + (res,) = exe.run( + feed={'x': self.data_x}, + fetch_list=[z.name], + return_numpy=False, + ) self.assertRaises(ValueError, test_axis_out_range) diff --git a/python/paddle/fluid/tests/unittests/test_rot90_op.py b/python/paddle/fluid/tests/unittests/test_rot90_op.py index 6a1cbcaccda734e0a803ba6c59047b2ea9c4a593..1cf3a21e9d0211e56c194b6a8eefd88a115f229a 100644 --- a/python/paddle/fluid/tests/unittests/test_rot90_op.py +++ b/python/paddle/fluid/tests/unittests/test_rot90_op.py @@ -37,15 +37,17 @@ class TestRot90_API(unittest.TestCase): exe.run(startup_program) img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) - res = exe.run(train_program, - feed={'input': img}, - fetch_list=[output]) + res = exe.run( + train_program, feed={'input': img}, fetch_list=[output] + ) out_np = np.array(res[0]) out_ref = np.array([[4, 1], [5, 2], [6, 3]]).astype(np.float32) - self.assertTrue((out_np == out_ref).all(), - msg='rot90 output is wrong, out =' + str(out_np)) + self.assertTrue( + (out_np == out_ref).all(), + msg='rot90 output is wrong, out =' + str(out_np), + ) def test_static_k_0(self): paddle.enable_static() @@ -62,15 +64,17 @@ class TestRot90_API(unittest.TestCase): exe.run(startup_program) img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) - res = exe.run(train_program, - feed={'input': img}, - fetch_list=[output]) + res = exe.run( + train_program, feed={'input': img}, fetch_list=[output] + ) out_np = np.array(res[0]) out_ref = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) - self.assertTrue((out_np == out_ref).all(), - msg='rot90 output is wrong, out =' + str(out_np)) + self.assertTrue( + (out_np == out_ref).all(), + msg='rot90 output is wrong, out =' + str(out_np), + ) def test_static_k_2(self): paddle.enable_static() @@ -87,15 +91,17 @@ class TestRot90_API(unittest.TestCase): exe.run(startup_program) img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) - res = exe.run(train_program, - feed={'input': img}, - fetch_list=[output]) + res = exe.run( + train_program, feed={'input': img}, fetch_list=[output] + ) out_np = np.array(res[0]) out_ref = np.array([[6, 5, 4], [3, 2, 1]]).astype(np.float32) - self.assertTrue((out_np == out_ref).all(), - msg='rot90 output is wrong, out =' + str(out_np)) + self.assertTrue( + (out_np == out_ref).all(), + msg='rot90 output is wrong, out =' + str(out_np), + ) def test_static_k_3(self): paddle.enable_static() @@ -112,15 +118,17 @@ class TestRot90_API(unittest.TestCase): exe.run(startup_program) img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) - res = exe.run(train_program, - feed={'input': img}, - fetch_list=[output]) + res = exe.run( + train_program, feed={'input': img}, fetch_list=[output] + ) out_np = np.array(res[0]) out_ref = np.array([[4, 1], [5, 2], [6, 3]]).astype(np.float32) - self.assertTrue((out_np == out_ref).all(), - msg='rot90 output is wrong, out =' + str(out_np)) + self.assertTrue( + (out_np == out_ref).all(), + msg='rot90 output is wrong, out =' + str(out_np), + ) def test_static_neg_k_1(self): paddle.enable_static() @@ -137,15 +145,17 @@ class TestRot90_API(unittest.TestCase): exe.run(startup_program) img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) - res = exe.run(train_program, - feed={'input': img}, - fetch_list=[output]) + res = exe.run( + train_program, feed={'input': img}, fetch_list=[output] + ) out_np = np.array(res[0]) out_ref = np.array([[4, 1], [5, 2], [6, 3]]).astype(np.float32) - self.assertTrue((out_np == out_ref).all(), - msg='rot90 output is wrong, out =' + str(out_np)) + self.assertTrue( + (out_np == out_ref).all(), + msg='rot90 output is wrong, out =' + str(out_np), + ) def test_static_neg_k_2(self): paddle.enable_static() @@ -162,15 +172,17 @@ class TestRot90_API(unittest.TestCase): exe.run(startup_program) img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) - res = exe.run(train_program, - feed={'input': img}, - fetch_list=[output]) + res = exe.run( + train_program, feed={'input': img}, fetch_list=[output] + ) out_np = np.array(res[0]) out_ref = np.array([[6, 5, 4], [3, 2, 1]]).astype(np.float32) - self.assertTrue((out_np == out_ref).all(), - msg='rot90 output is wrong, out =' + str(out_np)) + self.assertTrue( + (out_np == out_ref).all(), + msg='rot90 output is wrong, out =' + str(out_np), + ) def test_static_neg_k_3(self): paddle.enable_static() @@ -187,15 +199,17 @@ class TestRot90_API(unittest.TestCase): exe.run(startup_program) img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) - res = exe.run(train_program, - feed={'input': img}, - fetch_list=[output]) + res = exe.run( + train_program, feed={'input': img}, fetch_list=[output] + ) out_np = np.array(res[0]) out_ref = np.array([[3, 6], [2, 5], [1, 4]]).astype(np.float32) - self.assertTrue((out_np == out_ref).all(), - msg='rot90 output is wrong, out =' + str(out_np)) + self.assertTrue( + (out_np == out_ref).all(), + msg='rot90 output is wrong, out =' + str(out_np), + ) def test_static_neg_k_4(self): paddle.enable_static() @@ -212,15 +226,17 @@ class TestRot90_API(unittest.TestCase): exe.run(startup_program) img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) - res = exe.run(train_program, - feed={'input': img}, - fetch_list=[output]) + res = exe.run( + train_program, feed={'input': img}, fetch_list=[output] + ) out_np = np.array(res[0]) out_ref = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) - self.assertTrue((out_np == out_ref).all(), - msg='rot90 output is wrong, out =' + str(out_np)) + self.assertTrue( + (out_np == out_ref).all(), + msg='rot90 output is wrong, out =' + str(out_np), + ) def test_error_api(self): paddle.enable_static() @@ -269,7 +285,8 @@ class TestRot90_API(unittest.TestCase): self.assertTrue( (ret.numpy() == out_ref).all(), - msg='rot90 output is wrong, out =' + str(ret.numpy())) + msg='rot90 output is wrong, out =' + str(ret.numpy()), + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_row_conv_op.py b/python/paddle/fluid/tests/unittests/test_row_conv_op.py index 6350b25724744a0cf1d98869f114e9808dd867cd..6f4df64ce503e91b9ef537d69dd7df108be1d28c 100644 --- a/python/paddle/fluid/tests/unittests/test_row_conv_op.py +++ b/python/paddle/fluid/tests/unittests/test_row_conv_op.py @@ -43,7 +43,6 @@ def row_conv_forward(x, lod, wt): class TestRowConvOp1(OpTest): - def setUp(self): self.op_type = "row_conv" @@ -66,20 +65,17 @@ class TestRowConvOp1(OpTest): self.check_grad(['X', 'Filter'], 'Out', check_dygraph=False) def test_check_grad_ignore_x(self): - self.check_grad(['Filter'], - 'Out', - no_grad_set=set('X'), - check_dygraph=False) + self.check_grad( + ['Filter'], 'Out', no_grad_set=set('X'), check_dygraph=False + ) def test_check_grad_ignore_wt(self): - self.check_grad(['X'], - 'Out', - no_grad_set=set('Filter'), - check_dygraph=False) + self.check_grad( + ['X'], 'Out', no_grad_set=set('Filter'), check_dygraph=False + ) class TestRowConvOp2(OpTest): - def setUp(self): self.op_type = "row_conv" @@ -98,28 +94,31 @@ class TestRowConvOp2(OpTest): def test_check_output(self): self.check_output(check_dygraph=False) - #max_relative_error is increased from 0.05 to 0.06 as for higher - #dimensional input, the dX on CPU for some values has max_rel_error - #slightly more than 0.05 + # max_relative_error is increased from 0.05 to 0.06 as for higher + # dimensional input, the dX on CPU for some values has max_rel_error + # slightly more than 0.05 def test_check_grad_normal(self): - self.check_grad(['X', 'Filter'], - 'Out', - max_relative_error=0.06, - check_dygraph=False) + self.check_grad( + ['X', 'Filter'], 'Out', max_relative_error=0.06, check_dygraph=False + ) def test_check_grad_ignore_x(self): - self.check_grad(['Filter'], - 'Out', - max_relative_error=0.06, - no_grad_set=set('X'), - check_dygraph=False) + self.check_grad( + ['Filter'], + 'Out', + max_relative_error=0.06, + no_grad_set=set('X'), + check_dygraph=False, + ) def test_check_grad_ignore_wt(self): - self.check_grad(['X'], - 'Out', - max_relative_error=0.06, - no_grad_set=set('Filter'), - check_dygraph=False) + self.check_grad( + ['X'], + 'Out', + max_relative_error=0.06, + no_grad_set=set('Filter'), + check_dygraph=False, + ) def row_conv_foward_Tensor(x, wt): @@ -128,8 +127,8 @@ def row_conv_foward_Tensor(x, wt): timesteps = x.shape[1] context_length = wt.shape[0] for i in range(num_sequence): - cur_in = x[i:i + 1, :][0] - cur_out = out[i:i + 1, :][0] + cur_in = x[i : i + 1, :][0] + cur_out = out[i : i + 1, :][0] for j in range(timesteps): for k in range(context_length): if j + k >= timesteps: @@ -139,7 +138,6 @@ def row_conv_foward_Tensor(x, wt): class TestRowOpWithTensorInput(OpTest): - def setUp(self): self.op_type = "row_conv" length = [1, 2, 3] @@ -159,23 +157,20 @@ class TestRowOpWithTensorInput(OpTest): self.check_output(check_dygraph=False) def test_check_grad_ignore_x(self): - self.check_grad(['Filter'], - 'Out', - no_grad_set=set('X'), - check_dygraph=False) + self.check_grad( + ['Filter'], 'Out', no_grad_set=set('X'), check_dygraph=False + ) def test_check_grad_normal(self): self.check_grad(['X', 'Filter'], 'Out', check_dygraph=False) def test_check_grad_ignore_wt(self): - self.check_grad(['X'], - 'Out', - no_grad_set=set('Filter'), - check_dygraph=False) + self.check_grad( + ['X'], 'Out', no_grad_set=set('Filter'), check_dygraph=False + ) class TestRowConvLayer(unittest.TestCase): - def setUp(self): self.B = 2 self.T = 6 @@ -183,8 +178,9 @@ class TestRowConvLayer(unittest.TestCase): self.context_length = 6 self.x = np.random.random((self.B, self.T, self.C)).astype("float32") - self.w = np.random.random( - (self.context_length, self.C)).astype("float32") + self.w = np.random.random((self.context_length, self.C)).astype( + "float32" + ) self.out = row_conv_foward_Tensor(self.x, self.w) def check_identity(self): @@ -196,11 +192,12 @@ class TestRowConvLayer(unittest.TestCase): out = fluid.layers.row_conv( x, self.context_length, - param_attr=fluid.initializer.NumpyArrayInitializer(self.w)) + param_attr=fluid.initializer.NumpyArrayInitializer(self.w), + ) place = fluid.CPUPlace() exe = fluid.Executor(place) exe.run(start) - out_np, = exe.run(main, feed={'x': self.x}, fetch_list=[out]) + (out_np,) = exe.run(main, feed={'x': self.x}, fetch_list=[out]) np.testing.assert_allclose(out_np, self.out) diff --git a/python/paddle/fluid/tests/unittests/test_rpn_target_assign_op.py b/python/paddle/fluid/tests/unittests/test_rpn_target_assign_op.py index de3e8e211365e0fccfb13c8aeb662d34bd3a4a03..ce37fedaf59ff5068eeaa13b6fe8ff9486a69ba2 100644 --- a/python/paddle/fluid/tests/unittests/test_rpn_target_assign_op.py +++ b/python/paddle/fluid/tests/unittests/test_rpn_target_assign_op.py @@ -22,33 +22,37 @@ from test_generate_proposal_labels_op import _generate_groundtruth from test_generate_proposal_labels_op import _bbox_overlaps, _box_to_delta -def rpn_target_assign(anchor_by_gt_overlap, - rpn_batch_size_per_im, - rpn_positive_overlap, - rpn_negative_overlap, - rpn_fg_fraction, - use_random=True): +def rpn_target_assign( + anchor_by_gt_overlap, + rpn_batch_size_per_im, + rpn_positive_overlap, + rpn_negative_overlap, + rpn_fg_fraction, + use_random=True, +): anchor_to_gt_argmax = anchor_by_gt_overlap.argmax(axis=1) anchor_to_gt_max = anchor_by_gt_overlap[ - np.arange(anchor_by_gt_overlap.shape[0]), anchor_to_gt_argmax] + np.arange(anchor_by_gt_overlap.shape[0]), anchor_to_gt_argmax + ] gt_to_anchor_argmax = anchor_by_gt_overlap.argmax(axis=0) gt_to_anchor_max = anchor_by_gt_overlap[ - gt_to_anchor_argmax, - np.arange(anchor_by_gt_overlap.shape[1])] + gt_to_anchor_argmax, np.arange(anchor_by_gt_overlap.shape[1]) + ] anchors_with_max_overlap = np.where( - anchor_by_gt_overlap == gt_to_anchor_max)[0] + anchor_by_gt_overlap == gt_to_anchor_max + )[0] - labels = np.ones((anchor_by_gt_overlap.shape[0], ), dtype=np.int32) * -1 + labels = np.ones((anchor_by_gt_overlap.shape[0],), dtype=np.int32) * -1 labels[anchors_with_max_overlap] = 1 labels[anchor_to_gt_max >= rpn_positive_overlap] = 1 num_fg = int(rpn_fg_fraction * rpn_batch_size_per_im) fg_inds = np.where(labels == 1)[0] if len(fg_inds) > num_fg and use_random: - disable_inds = np.random.choice(fg_inds, - size=(len(fg_inds) - num_fg), - replace=False) + disable_inds = np.random.choice( + fg_inds, size=(len(fg_inds) - num_fg), replace=False + ) else: disable_inds = fg_inds[num_fg:] @@ -87,26 +91,30 @@ def rpn_target_assign(anchor_by_gt_overlap, def get_anchor(n, c, h, w): input_feat = np.random.random((n, c, h, w)).astype('float32') - anchors, _ = anchor_generator_in_python(input_feat=input_feat, - anchor_sizes=[32., 64.], - aspect_ratios=[0.5, 1.0], - variances=[1.0, 1.0, 1.0, 1.0], - stride=[16.0, 16.0], - offset=0.5) + anchors, _ = anchor_generator_in_python( + input_feat=input_feat, + anchor_sizes=[32.0, 64.0], + aspect_ratios=[0.5, 1.0], + variances=[1.0, 1.0, 1.0, 1.0], + stride=[16.0, 16.0], + offset=0.5, + ) return anchors -def rpn_target_assign_in_python(all_anchors, - gt_boxes, - is_crowd, - im_info, - lod, - rpn_straddle_thresh, - rpn_batch_size_per_im, - rpn_positive_overlap, - rpn_negative_overlap, - rpn_fg_fraction, - use_random=True): +def rpn_target_assign_in_python( + all_anchors, + gt_boxes, + is_crowd, + im_info, + lod, + rpn_straddle_thresh, + rpn_batch_size_per_im, + rpn_positive_overlap, + rpn_negative_overlap, + rpn_fg_fraction, + use_random=True, +): anchor_num = all_anchors.shape[0] batch_size = len(lod) - 1 for i in range(batch_size): @@ -119,7 +127,8 @@ def rpn_target_assign_in_python(all_anchors, (all_anchors[:, 0] >= -rpn_straddle_thresh) & (all_anchors[:, 1] >= -rpn_straddle_thresh) & (all_anchors[:, 2] < im_width + rpn_straddle_thresh) - & (all_anchors[:, 3] < im_height + rpn_straddle_thresh))[0] + & (all_anchors[:, 3] < im_height + rpn_straddle_thresh) + )[0] # keep only inside anchors inside_anchors = all_anchors[inds_inside, :] else: @@ -134,19 +143,29 @@ def rpn_target_assign_in_python(all_anchors, gt_boxes_slice = gt_boxes_slice[not_crowd_inds] iou = _bbox_overlaps(inside_anchors, gt_boxes_slice) - loc_inds, score_inds, labels, gt_inds, bbox_inside_weight = \ - rpn_target_assign(iou, rpn_batch_size_per_im, - rpn_positive_overlap, - rpn_negative_overlap, - rpn_fg_fraction, - use_random) + ( + loc_inds, + score_inds, + labels, + gt_inds, + bbox_inside_weight, + ) = rpn_target_assign( + iou, + rpn_batch_size_per_im, + rpn_positive_overlap, + rpn_negative_overlap, + rpn_fg_fraction, + use_random, + ) # unmap to all anchor loc_inds = inds_inside[loc_inds] score_inds = inds_inside[score_inds] sampled_gt = gt_boxes_slice[gt_inds] sampled_anchor = all_anchors[loc_inds] - box_deltas = _box_to_delta(sampled_anchor, sampled_gt, [1., 1., 1., 1.]) + box_deltas = _box_to_delta( + sampled_anchor, sampled_gt, [1.0, 1.0, 1.0, 1.0] + ) if i == 0: loc_indexes = loc_inds @@ -156,31 +175,43 @@ def rpn_target_assign_in_python(all_anchors, bbox_inside_weights = bbox_inside_weight else: loc_indexes = np.concatenate( - [loc_indexes, loc_inds + i * anchor_num]) + [loc_indexes, loc_inds + i * anchor_num] + ) score_indexes = np.concatenate( - [score_indexes, score_inds + i * anchor_num]) + [score_indexes, score_inds + i * anchor_num] + ) tgt_labels = np.concatenate([tgt_labels, labels]) tgt_bboxes = np.vstack([tgt_bboxes, box_deltas]) - bbox_inside_weights = np.vstack([bbox_inside_weights, \ - bbox_inside_weight]) - - return loc_indexes, score_indexes, tgt_bboxes, tgt_labels, bbox_inside_weights - - -def retinanet_target_assign(anchor_by_gt_overlap, gt_labels, positive_overlap, - negative_overlap): + bbox_inside_weights = np.vstack( + [bbox_inside_weights, bbox_inside_weight] + ) + + return ( + loc_indexes, + score_indexes, + tgt_bboxes, + tgt_labels, + bbox_inside_weights, + ) + + +def retinanet_target_assign( + anchor_by_gt_overlap, gt_labels, positive_overlap, negative_overlap +): anchor_to_gt_argmax = anchor_by_gt_overlap.argmax(axis=1) anchor_to_gt_max = anchor_by_gt_overlap[ - np.arange(anchor_by_gt_overlap.shape[0]), anchor_to_gt_argmax] + np.arange(anchor_by_gt_overlap.shape[0]), anchor_to_gt_argmax + ] gt_to_anchor_argmax = anchor_by_gt_overlap.argmax(axis=0) gt_to_anchor_max = anchor_by_gt_overlap[ - gt_to_anchor_argmax, - np.arange(anchor_by_gt_overlap.shape[1])] + gt_to_anchor_argmax, np.arange(anchor_by_gt_overlap.shape[1]) + ] anchors_with_max_overlap = np.where( - anchor_by_gt_overlap == gt_to_anchor_max)[0] + anchor_by_gt_overlap == gt_to_anchor_max + )[0] - labels = np.ones((anchor_by_gt_overlap.shape[0], ), dtype=np.int32) * -1 + labels = np.ones((anchor_by_gt_overlap.shape[0],), dtype=np.int32) * -1 labels[anchors_with_max_overlap] = 1 labels[anchor_to_gt_max >= positive_overlap] = 1 @@ -209,15 +240,22 @@ def retinanet_target_assign(anchor_by_gt_overlap, gt_labels, positive_overlap, gt_inds = anchor_to_gt_argmax[loc_index] label_inds = anchor_to_gt_argmax[score_index_tmp] - labels[0:len(fg_inds)] = np.squeeze(gt_labels[label_inds]) + labels[0 : len(fg_inds)] = np.squeeze(gt_labels[label_inds]) fg_num = len(fg_fake_inds) + len(fg_inds) + 1 assert not np.any(labels == -1), "Wrong labels with -1" return loc_index, score_index, labels, gt_inds, bbox_inside_weight, fg_num -def retinanet_target_assign_in_python(all_anchors, gt_boxes, gt_labels, - is_crowd, im_info, lod, positive_overlap, - negative_overlap): +def retinanet_target_assign_in_python( + all_anchors, + gt_boxes, + gt_labels, + is_crowd, + im_info, + lod, + positive_overlap, + negative_overlap, +): anchor_num = all_anchors.shape[0] batch_size = len(lod) - 1 for i in range(batch_size): @@ -235,16 +273,25 @@ def retinanet_target_assign_in_python(all_anchors, gt_boxes, gt_labels, gt_labels_slice = gt_labels_slice[not_crowd_inds] iou = _bbox_overlaps(inside_anchors, gt_boxes_slice) - loc_inds, score_inds, labels, gt_inds, bbox_inside_weight, fg_num = \ - retinanet_target_assign(iou, gt_labels_slice, - positive_overlap, negative_overlap) + ( + loc_inds, + score_inds, + labels, + gt_inds, + bbox_inside_weight, + fg_num, + ) = retinanet_target_assign( + iou, gt_labels_slice, positive_overlap, negative_overlap + ) # unmap to all anchor loc_inds = inds_inside[loc_inds] score_inds = inds_inside[score_inds] sampled_gt = gt_boxes_slice[gt_inds] sampled_anchor = all_anchors[loc_inds] - box_deltas = _box_to_delta(sampled_anchor, sampled_gt, [1., 1., 1., 1.]) + box_deltas = _box_to_delta( + sampled_anchor, sampled_gt, [1.0, 1.0, 1.0, 1.0] + ) if i == 0: loc_indexes = loc_inds @@ -255,20 +302,29 @@ def retinanet_target_assign_in_python(all_anchors, gt_boxes, gt_labels, fg_nums = [[fg_num]] else: loc_indexes = np.concatenate( - [loc_indexes, loc_inds + i * anchor_num]) + [loc_indexes, loc_inds + i * anchor_num] + ) score_indexes = np.concatenate( - [score_indexes, score_inds + i * anchor_num]) + [score_indexes, score_inds + i * anchor_num] + ) tgt_labels = np.concatenate([tgt_labels, labels]) tgt_bboxes = np.vstack([tgt_bboxes, box_deltas]) - bbox_inside_weights = np.vstack([bbox_inside_weights, \ - bbox_inside_weight]) + bbox_inside_weights = np.vstack( + [bbox_inside_weights, bbox_inside_weight] + ) fg_nums = np.concatenate([fg_nums, [[fg_num]]]) - return loc_indexes, score_indexes, tgt_bboxes, tgt_labels, bbox_inside_weights, fg_nums + return ( + loc_indexes, + score_indexes, + tgt_bboxes, + tgt_labels, + bbox_inside_weights, + fg_nums, + ) class TestRpnTargetAssignOp(OpTest): - def setUp(self): n, c, h, w = 2, 4, 14, 14 all_anchors = get_anchor(n, c, h, w) @@ -277,16 +333,16 @@ class TestRpnTargetAssignOp(OpTest): anchor_num = all_anchors.shape[0] images_shape = [[64, 64], [64, 64]] - #images_shape = [[64, 64]] + # images_shape = [[64, 64]] groundtruth, lod = _generate_groundtruth(images_shape, 3, 4) lod = [0, 4, 8] - #lod = [0, 4] + # lod = [0, 4] im_info = np.ones((len(images_shape), 3)).astype(np.float32) for i in range(len(images_shape)): im_info[i, 0] = images_shape[i][0] im_info[i, 1] = images_shape[i][1] - im_info[i, 2] = 0.8 #scale + im_info[i, 2] = 0.8 # scale gt_boxes = np.vstack([v['boxes'] for v in groundtruth]) is_crowd = np.hstack([v['is_crowd'] for v in groundtruth]) @@ -300,12 +356,25 @@ class TestRpnTargetAssignOp(OpTest): rpn_fg_fraction = 0.5 use_random = False - loc_index, score_index, tgt_bbox, labels, bbox_inside_weights = \ - rpn_target_assign_in_python(all_anchors, gt_boxes, is_crowd, - im_info, lod, rpn_straddle_thresh, - rpn_batch_size_per_im, rpn_positive_overlap, - rpn_negative_overlap, - rpn_fg_fraction, use_random) + ( + loc_index, + score_index, + tgt_bbox, + labels, + bbox_inside_weights, + ) = rpn_target_assign_in_python( + all_anchors, + gt_boxes, + is_crowd, + im_info, + lod, + rpn_straddle_thresh, + rpn_batch_size_per_im, + rpn_positive_overlap, + rpn_negative_overlap, + rpn_fg_fraction, + use_random, + ) labels = labels[:, np.newaxis] self.op_type = "rpn_target_assign" @@ -313,7 +382,7 @@ class TestRpnTargetAssignOp(OpTest): 'Anchor': all_anchors, 'GtBoxes': (gt_boxes, [[4, 4]]), 'IsCrowd': (is_crowd, [[4, 4]]), - 'ImInfo': (im_info, [[1, 1]]) + 'ImInfo': (im_info, [[1, 1]]), } self.attrs = { 'rpn_batch_size_per_im': rpn_batch_size_per_im, @@ -321,14 +390,14 @@ class TestRpnTargetAssignOp(OpTest): 'rpn_positive_overlap': rpn_positive_overlap, 'rpn_negative_overlap': rpn_negative_overlap, 'rpn_fg_fraction': rpn_fg_fraction, - 'use_random': use_random + 'use_random': use_random, } self.outputs = { 'LocationIndex': loc_index.astype('int32'), 'ScoreIndex': score_index.astype('int32'), 'TargetBBox': tgt_bbox.astype('float32'), 'TargetLabel': labels.astype('int32'), - 'BBoxInsideWeight': bbox_inside_weights.astype('float32') + 'BBoxInsideWeight': bbox_inside_weights.astype('float32'), } def test_check_output(self): @@ -336,7 +405,6 @@ class TestRpnTargetAssignOp(OpTest): class TestRetinanetTargetAssignOp(OpTest): - def setUp(self): n, c, h, w = 2, 4, 14, 14 all_anchors = get_anchor(n, c, h, w) @@ -352,13 +420,15 @@ class TestRetinanetTargetAssignOp(OpTest): for i in range(len(images_shape)): im_info[i, 0] = images_shape[i][0] im_info[i, 1] = images_shape[i][1] - im_info[i, 2] = 0.8 #scale + im_info[i, 2] = 0.8 # scale gt_boxes = np.vstack([v['boxes'] for v in groundtruth]) is_crowd = np.hstack([v['is_crowd'] for v in groundtruth]) - gt_labels = np.vstack([ - v['gt_classes'].reshape(len(v['gt_classes']), 1) - for v in groundtruth - ]) + gt_labels = np.vstack( + [ + v['gt_classes'].reshape(len(v['gt_classes']), 1) + for v in groundtruth + ] + ) gt_labels = gt_labels.reshape(len(gt_labels), 1) all_anchors = all_anchors.astype('float32') gt_boxes = gt_boxes.astype('float32') @@ -367,9 +437,23 @@ class TestRetinanetTargetAssignOp(OpTest): positive_overlap = 0.5 negative_overlap = 0.4 - loc_index, score_index, tgt_bbox, labels, bbox_inside_weights, fg_num = \ - retinanet_target_assign_in_python(all_anchors, gt_boxes, gt_labels, is_crowd, - im_info, lod, positive_overlap, negative_overlap) + ( + loc_index, + score_index, + tgt_bbox, + labels, + bbox_inside_weights, + fg_num, + ) = retinanet_target_assign_in_python( + all_anchors, + gt_boxes, + gt_labels, + is_crowd, + im_info, + lod, + positive_overlap, + negative_overlap, + ) labels = labels[:, np.newaxis] self.op_type = "retinanet_target_assign" self.inputs = { @@ -377,11 +461,11 @@ class TestRetinanetTargetAssignOp(OpTest): 'GtBoxes': (gt_boxes, [[4, 4]]), 'GtLabels': (gt_labels, [[4, 4]]), 'IsCrowd': (is_crowd, [[4, 4]]), - 'ImInfo': (im_info, [[1, 1]]) + 'ImInfo': (im_info, [[1, 1]]), } self.attrs = { 'positive_overlap': positive_overlap, - 'negative_overlap': negative_overlap + 'negative_overlap': negative_overlap, } self.outputs = { 'LocationIndex': loc_index.astype('int32'), @@ -389,7 +473,7 @@ class TestRetinanetTargetAssignOp(OpTest): 'TargetBBox': tgt_bbox.astype('float32'), 'TargetLabel': labels.astype('int32'), 'BBoxInsideWeight': bbox_inside_weights.astype('float32'), - 'ForegroundNumber': fg_num.astype('int32') + 'ForegroundNumber': fg_num.astype('int32'), } def test_check_output(self): @@ -397,181 +481,420 @@ class TestRetinanetTargetAssignOp(OpTest): class TestRetinanetTargetAssignOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): - bbox_pred1 = fluid.data(name='bbox_pred1', - shape=[1, 100, 4], - dtype='float32') - cls_logits1 = fluid.data(name='cls_logits1', - shape=[1, 100, 10], - dtype='float32') - anchor_box1 = fluid.data(name='anchor_box1', - shape=[100, 4], - dtype='float32') - anchor_var1 = fluid.data(name='anchor_var1', - shape=[100, 4], - dtype='float32') - gt_boxes1 = fluid.data(name='gt_boxes1', - shape=[10, 4], - dtype='float32') - gt_labels1 = fluid.data(name='gt_labels1', - shape=[10, 1], - dtype='int32') + bbox_pred1 = fluid.data( + name='bbox_pred1', shape=[1, 100, 4], dtype='float32' + ) + cls_logits1 = fluid.data( + name='cls_logits1', shape=[1, 100, 10], dtype='float32' + ) + anchor_box1 = fluid.data( + name='anchor_box1', shape=[100, 4], dtype='float32' + ) + anchor_var1 = fluid.data( + name='anchor_var1', shape=[100, 4], dtype='float32' + ) + gt_boxes1 = fluid.data( + name='gt_boxes1', shape=[10, 4], dtype='float32' + ) + gt_labels1 = fluid.data( + name='gt_labels1', shape=[10, 1], dtype='int32' + ) is_crowd1 = fluid.data(name='is_crowd1', shape=[1], dtype='float32') - im_info1 = fluid.data(name='im_info1', - shape=[1, 3], - dtype='float32') + im_info1 = fluid.data( + name='im_info1', shape=[1, 3], dtype='float32' + ) # The `bbox_pred` must be Variable and the data type of `bbox_pred` Tensor # one of float32 and float64. def test_bbox_pred_type(): - score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \ - fluid.layers.retinanet_target_assign([1], cls_logits1, anchor_box1, - anchor_var1, gt_boxes1, gt_labels1, is_crowd1, im_info1, 10) + ( + score_pred, + loc_pred, + score_target, + loc_target, + bbox_inside_weight, + fg_num, + ) = fluid.layers.retinanet_target_assign( + [1], + cls_logits1, + anchor_box1, + anchor_var1, + gt_boxes1, + gt_labels1, + is_crowd1, + im_info1, + 10, + ) self.assertRaises(TypeError, test_bbox_pred_type) def test_bbox_pred_tensor_dtype(): - bbox_pred2 = fluid.data(name='bbox_pred2', - shape=[1, 100, 4], - dtype='intt32') - score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \ - fluid.layers.retinanet_target_assign(bbox_pred2, cls_logits1, anchor_box1, - anchor_var1, gt_boxes1, gt_labels1, is_crowd1, im_info1, 10) + bbox_pred2 = fluid.data( + name='bbox_pred2', shape=[1, 100, 4], dtype='intt32' + ) + ( + score_pred, + loc_pred, + score_target, + loc_target, + bbox_inside_weight, + fg_num, + ) = fluid.layers.retinanet_target_assign( + bbox_pred2, + cls_logits1, + anchor_box1, + anchor_var1, + gt_boxes1, + gt_labels1, + is_crowd1, + im_info1, + 10, + ) self.assertRaises(TypeError, test_bbox_pred_tensor_dtype) # The `cls_logits` must be Variable and the data type of `cls_logits` Tensor # one of float32 and float64. def test_cls_logits_type(): - score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \ - fluid.layers.retinanet_target_assign(bbox_pred1, 2, anchor_box1, - anchor_var1, gt_boxes1, gt_labels1, is_crowd1, im_info1, 10) + ( + score_pred, + loc_pred, + score_target, + loc_target, + bbox_inside_weight, + fg_num, + ) = fluid.layers.retinanet_target_assign( + bbox_pred1, + 2, + anchor_box1, + anchor_var1, + gt_boxes1, + gt_labels1, + is_crowd1, + im_info1, + 10, + ) self.assertRaises(TypeError, test_cls_logits_type) def test_cls_logits_tensor_dtype(): - cls_logits2 = fluid.data(name='cls_logits2', - shape=[1, 100, 10], - dtype='int32') - score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \ - fluid.layers.retinanet_target_assign(bbox_pred1, cls_logits2, anchor_box1, - anchor_var1, gt_boxes1, gt_labels1, is_crowd1, im_info1, 10) + cls_logits2 = fluid.data( + name='cls_logits2', shape=[1, 100, 10], dtype='int32' + ) + ( + score_pred, + loc_pred, + score_target, + loc_target, + bbox_inside_weight, + fg_num, + ) = fluid.layers.retinanet_target_assign( + bbox_pred1, + cls_logits2, + anchor_box1, + anchor_var1, + gt_boxes1, + gt_labels1, + is_crowd1, + im_info1, + 10, + ) self.assertRaises(TypeError, test_cls_logits_tensor_dtype) # The `anchor_box` must be Variable and the data type of `anchor_box` Tensor # one of float32 and float64. def test_anchor_box_type(): - score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \ - fluid.layers.retinanet_target_assign(bbox_pred1, cls_logits1, [5], - anchor_var1, gt_boxes1, gt_labels1, is_crowd1, im_info1, 10) + ( + score_pred, + loc_pred, + score_target, + loc_target, + bbox_inside_weight, + fg_num, + ) = fluid.layers.retinanet_target_assign( + bbox_pred1, + cls_logits1, + [5], + anchor_var1, + gt_boxes1, + gt_labels1, + is_crowd1, + im_info1, + 10, + ) self.assertRaises(TypeError, test_anchor_box_type) def test_anchor_box_tensor_dtype(): - anchor_box2 = fluid.data(name='anchor_box2', - shape=[100, 4], - dtype='int32') - score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \ - fluid.layers.retinanet_target_assign(bbox_pred1, cls_logits1, anchor_box2, - anchor_var1, gt_boxes1, gt_labels1, is_crowd1, im_info1, 10) + anchor_box2 = fluid.data( + name='anchor_box2', shape=[100, 4], dtype='int32' + ) + ( + score_pred, + loc_pred, + score_target, + loc_target, + bbox_inside_weight, + fg_num, + ) = fluid.layers.retinanet_target_assign( + bbox_pred1, + cls_logits1, + anchor_box2, + anchor_var1, + gt_boxes1, + gt_labels1, + is_crowd1, + im_info1, + 10, + ) self.assertRaises(TypeError, test_anchor_box_tensor_dtype) # The `anchor_var` must be Variable and the data type of `anchor_var` Tensor # one of float32 and float64. def test_anchor_var_type(): - score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \ - fluid.layers.retinanet_target_assign(bbox_pred1, cls_logits1, anchor_box1, - 5, gt_boxes1, gt_labels1, is_crowd1, im_info1, 10) + ( + score_pred, + loc_pred, + score_target, + loc_target, + bbox_inside_weight, + fg_num, + ) = fluid.layers.retinanet_target_assign( + bbox_pred1, + cls_logits1, + anchor_box1, + 5, + gt_boxes1, + gt_labels1, + is_crowd1, + im_info1, + 10, + ) self.assertRaises(TypeError, test_anchor_var_type) def test_anchor_var_tensor_dtype(): - anchor_var2 = fluid.data(name='anchor_var2', - shape=[100, 4], - dtype='int32') - score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \ - fluid.layers.retinanet_target_assign(bbox_pred1, cls_logits1, anchor_box1, - anchor_var2, gt_boxes1, gt_labels1, is_crowd1, im_info1, 10) + anchor_var2 = fluid.data( + name='anchor_var2', shape=[100, 4], dtype='int32' + ) + ( + score_pred, + loc_pred, + score_target, + loc_target, + bbox_inside_weight, + fg_num, + ) = fluid.layers.retinanet_target_assign( + bbox_pred1, + cls_logits1, + anchor_box1, + anchor_var2, + gt_boxes1, + gt_labels1, + is_crowd1, + im_info1, + 10, + ) self.assertRaises(TypeError, test_anchor_var_tensor_dtype) # The `gt_boxes` must be Variable and the data type of `gt_boxes` Tensor # one of float32 and float64. def test_gt_boxes_type(): - score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \ - fluid.layers.retinanet_target_assign(bbox_pred1, cls_logits1, anchor_box1, - anchor_var1, [4], gt_labels1, is_crowd1, im_info1, 10) + ( + score_pred, + loc_pred, + score_target, + loc_target, + bbox_inside_weight, + fg_num, + ) = fluid.layers.retinanet_target_assign( + bbox_pred1, + cls_logits1, + anchor_box1, + anchor_var1, + [4], + gt_labels1, + is_crowd1, + im_info1, + 10, + ) self.assertRaises(TypeError, test_gt_boxes_type) def test_gt_boxes_tensor_dtype(): - gt_boxes2 = fluid.data(name='gt_boxes2', - shape=[10, 4], - dtype='int32') - score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \ - fluid.layers.retinanet_target_assign(bbox_pred1, cls_logits1, anchor_box1, - anchor_var1, gt_boxes2, gt_labels1, is_crowd1, im_info1, 10) + gt_boxes2 = fluid.data( + name='gt_boxes2', shape=[10, 4], dtype='int32' + ) + ( + score_pred, + loc_pred, + score_target, + loc_target, + bbox_inside_weight, + fg_num, + ) = fluid.layers.retinanet_target_assign( + bbox_pred1, + cls_logits1, + anchor_box1, + anchor_var1, + gt_boxes2, + gt_labels1, + is_crowd1, + im_info1, + 10, + ) self.assertRaises(TypeError, test_gt_boxes_tensor_dtype) # The `gt_label` must be Variable and the data type of `gt_label` Tensor # int32. def test_gt_label_type(): - score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \ - fluid.layers.retinanet_target_assign(bbox_pred1, cls_logits1, anchor_box1, - anchor_var1, gt_boxes1, 9, is_crowd1, im_info1, 10) + ( + score_pred, + loc_pred, + score_target, + loc_target, + bbox_inside_weight, + fg_num, + ) = fluid.layers.retinanet_target_assign( + bbox_pred1, + cls_logits1, + anchor_box1, + anchor_var1, + gt_boxes1, + 9, + is_crowd1, + im_info1, + 10, + ) self.assertRaises(TypeError, test_gt_label_type) def test_gt_label_tensor_dtype(): - gt_labels2 = fluid.data(name='label2', - shape=[10, 1], - dtype='float32') - score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \ - fluid.layers.retinanet_target_assign(bbox_pred1, cls_logits1, anchor_box1, - anchor_var1, gt_boxes1, gt_labels2, is_crowd1, im_info1, 10) + gt_labels2 = fluid.data( + name='label2', shape=[10, 1], dtype='float32' + ) + ( + score_pred, + loc_pred, + score_target, + loc_target, + bbox_inside_weight, + fg_num, + ) = fluid.layers.retinanet_target_assign( + bbox_pred1, + cls_logits1, + anchor_box1, + anchor_var1, + gt_boxes1, + gt_labels2, + is_crowd1, + im_info1, + 10, + ) self.assertRaises(TypeError, test_gt_label_tensor_dtype) # The `is_crowd` must be Variable and the data type of `is_crowd` Tensor # int32. def test_is_crowd_type(): - score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \ - fluid.layers.retinanet_target_assign(bbox_pred1, cls_logits1, anchor_box1, - anchor_var1, gt_boxes1, gt_labels1, [10], im_info1, 10) + ( + score_pred, + loc_pred, + score_target, + loc_target, + bbox_inside_weight, + fg_num, + ) = fluid.layers.retinanet_target_assign( + bbox_pred1, + cls_logits1, + anchor_box1, + anchor_var1, + gt_boxes1, + gt_labels1, + [10], + im_info1, + 10, + ) self.assertRaises(TypeError, test_is_crowd_type) def test_is_crowd_tensor_dtype(): - is_crowd2 = fluid.data(name='is_crowd2', - shape=[10, 1], - dtype='float32') - score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \ - fluid.layers.retinanet_target_assign(bbox_pred1, cls_logits1, anchor_box1, - anchor_var1, gt_boxes1, gt_labels1, is_crowd2, im_info1, 10) + is_crowd2 = fluid.data( + name='is_crowd2', shape=[10, 1], dtype='float32' + ) + ( + score_pred, + loc_pred, + score_target, + loc_target, + bbox_inside_weight, + fg_num, + ) = fluid.layers.retinanet_target_assign( + bbox_pred1, + cls_logits1, + anchor_box1, + anchor_var1, + gt_boxes1, + gt_labels1, + is_crowd2, + im_info1, + 10, + ) self.assertRaises(TypeError, test_is_crowd_tensor_dtype) # The `im_info` must be Variable and the data type of `im_info` Tensor # must be one of float32 and float64. def test_im_info_type(): - score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \ - fluid.layers.retinanet_target_assign(bbox_pred1, cls_logits1, anchor_box1, - anchor_var1, gt_boxes1, gt_labels1, is_crowd1, 1, 10) + ( + score_pred, + loc_pred, + score_target, + loc_target, + bbox_inside_weight, + fg_num, + ) = fluid.layers.retinanet_target_assign( + bbox_pred1, + cls_logits1, + anchor_box1, + anchor_var1, + gt_boxes1, + gt_labels1, + is_crowd1, + 1, + 10, + ) self.assertRaises(TypeError, test_im_info_type) def test_im_info_tensor_dtype(): - im_info2 = fluid.data(name='im_info2', - shape=[1, 3], - dtype='int32') - score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \ - fluid.layers.retinanet_target_assign(bbox_pred1, cls_logits1, anchor_box1, - anchor_var1, gt_boxes1, gt_labels1, is_crowd1, im_info2, 10) + im_info2 = fluid.data( + name='im_info2', shape=[1, 3], dtype='int32' + ) + ( + score_pred, + loc_pred, + score_target, + loc_target, + bbox_inside_weight, + fg_num, + ) = fluid.layers.retinanet_target_assign( + bbox_pred1, + cls_logits1, + anchor_box1, + anchor_var1, + gt_boxes1, + gt_labels1, + is_crowd1, + im_info2, + 10, + ) self.assertRaises(TypeError, test_im_info_tensor_dtype) diff --git a/python/paddle/fluid/tests/unittests/test_rrelu_op.py b/python/paddle/fluid/tests/unittests/test_rrelu_op.py index 6701802b44329536c727d2a816179e510e46f696..9da00696fa393cc0f41ec51c231dca3ba865b133 100644 --- a/python/paddle/fluid/tests/unittests/test_rrelu_op.py +++ b/python/paddle/fluid/tests/unittests/test_rrelu_op.py @@ -42,9 +42,8 @@ def check_output(input, output, lower, upper): class TestFunctionalRReluAPI(unittest.TestCase): - def setUp(self): - self.x_np = np.random.uniform(-1., 1., [1, 2, 3, 4]).astype('float64') + self.x_np = np.random.uniform(-1.0, 1.0, [1, 2, 3, 4]).astype('float64') self.lower_0 = 0.05 self.lower_1 = 0.1 self.upper_0 = 0.25 @@ -52,36 +51,39 @@ class TestFunctionalRReluAPI(unittest.TestCase): self.places = [ fluid.CUDAPlace(0) - if core.is_compiled_with_cuda() else fluid.CPUPlace() + if core.is_compiled_with_cuda() + else fluid.CPUPlace() ] def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", - shape=[2, 3, 4, 5], - dtype="float32") - res1 = F.rrelu(x=input, - lower=self.lower_0, - upper=self.upper_0, - training=False) - res2 = F.rrelu(x=input, - lower=self.lower_1, - upper=self.upper_1, - training=False) - in_np = np.random.uniform(-1., 1., [2, 3, 4, 5]).astype("float32") + input = fluid.data( + name="input", shape=[2, 3, 4, 5], dtype="float32" + ) + res1 = F.rrelu( + x=input, lower=self.lower_0, upper=self.upper_0, training=False + ) + res2 = F.rrelu( + x=input, lower=self.lower_1, upper=self.upper_1, training=False + ) + in_np = np.random.uniform(-1.0, 1.0, [2, 3, 4, 5]).astype("float32") res_np1 = ref_rrelu(in_np, self.lower_0, self.upper_0) exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={"input": in_np}, - fetch_list=[res1]) + fetches = exe.run( + fluid.default_main_program(), + feed={"input": in_np}, + fetch_list=[res1], + ) np.testing.assert_allclose(fetches[0], res_np1, rtol=1e-05) res_np2 = ref_rrelu(in_np, self.lower_1, self.upper_1) - fetches = exe.run(fluid.default_main_program(), - feed={"input": in_np}, - fetch_list=[res2]) + fetches = exe.run( + fluid.default_main_program(), + feed={"input": in_np}, + fetch_list=[res2], + ) np.testing.assert_allclose(fetches[0], res_np2, rtol=1e-05) def test_static(self): @@ -93,48 +95,55 @@ class TestFunctionalRReluAPI(unittest.TestCase): for place in self.places: paddle.enable_static() - x_1 = paddle.fluid.data(name="x", - shape=self.x_np.shape, - dtype="float64") - x_2 = paddle.fluid.data(name="x2", - shape=self.x_np.shape, - dtype="float64") + x_1 = paddle.fluid.data( + name="x", shape=self.x_np.shape, dtype="float64" + ) + x_2 = paddle.fluid.data( + name="x2", shape=self.x_np.shape, dtype="float64" + ) out_1 = F.rrelu(x_1, self.lower_0, self.upper_0, training=False) out_2 = F.rrelu(x_2, self.lower_1, self.upper_1, training=False) out_3 = F.rrelu(x_2, self.lower_1, self.upper_1, training=True) exe = paddle.static.Executor(place=place) - res_1, = exe.run(fluid.default_main_program(), - feed={"x": self.x_np}, - fetch_list=out_1, - use_prune=True) - res_2, = exe.run(fluid.default_main_program(), - feed={"x2": self.x_np}, - fetch_list=out_2, - use_prune=True) - res_3, = exe.run(fluid.default_main_program(), - feed={"x2": self.x_np}, - fetch_list=out_3, - use_prune=True) + (res_1,) = exe.run( + fluid.default_main_program(), + feed={"x": self.x_np}, + fetch_list=out_1, + use_prune=True, + ) + (res_2,) = exe.run( + fluid.default_main_program(), + feed={"x2": self.x_np}, + fetch_list=out_2, + use_prune=True, + ) + (res_3,) = exe.run( + fluid.default_main_program(), + feed={"x2": self.x_np}, + fetch_list=out_3, + use_prune=True, + ) out_ref_1 = ref_rrelu(self.x_np, self.lower_0, self.upper_0) out_ref_2 = ref_rrelu(self.x_np, self.lower_1, self.upper_1) np.testing.assert_allclose(out_ref_1, res_1, rtol=1e-05) np.testing.assert_allclose(out_ref_2, res_2, rtol=1e-05) self.assertTrue( - check_output(self.x_np, res_3[0], self.lower_1, self.upper_1)) + check_output(self.x_np, res_3[0], self.lower_1, self.upper_1) + ) def test_static_graph_layer(self): '''test_static_graph_layer''' for place in self.places: paddle.enable_static() - x_1 = paddle.fluid.data(name="x", - shape=self.x_np.shape, - dtype="float64") - x_2 = paddle.fluid.data(name="x2", - shape=self.x_np.shape, - dtype="float64") + x_1 = paddle.fluid.data( + name="x", shape=self.x_np.shape, dtype="float64" + ) + x_2 = paddle.fluid.data( + name="x2", shape=self.x_np.shape, dtype="float64" + ) # init instance rrelu_1 = paddle.nn.RReLU(self.lower_0, self.upper_0) rrelu_2 = paddle.nn.RReLU(self.lower_1, self.upper_1) @@ -142,19 +151,25 @@ class TestFunctionalRReluAPI(unittest.TestCase): out_2 = rrelu_2(x_2) exe = paddle.static.Executor(place=place) - res_1 = exe.run(fluid.default_main_program(), - feed={"x": self.x_np}, - fetch_list=out_1, - use_prune=True) - res_2 = exe.run(fluid.default_main_program(), - feed={"x2": self.x_np}, - fetch_list=out_2, - use_prune=True) + res_1 = exe.run( + fluid.default_main_program(), + feed={"x": self.x_np}, + fetch_list=out_1, + use_prune=True, + ) + res_2 = exe.run( + fluid.default_main_program(), + feed={"x2": self.x_np}, + fetch_list=out_2, + use_prune=True, + ) self.assertTrue( - check_output(self.x_np, res_1[0], self.lower_0, self.upper_0)) + check_output(self.x_np, res_1[0], self.lower_0, self.upper_0) + ) self.assertTrue( - check_output(self.x_np, res_2[0], self.lower_1, self.upper_1)) + check_output(self.x_np, res_2[0], self.lower_1, self.upper_1) + ) def dygraph_check(self, lower, upper): for place in self.places: @@ -179,8 +194,10 @@ class TestFunctionalRReluAPI(unittest.TestCase): rrelu = paddle.nn.RReLU(self.lower_0, self.upper_0) result = rrelu(paddle.to_tensor(self.x_np)) self.assertTrue( - check_output(self.x_np, result.numpy(), self.lower_0, - self.upper_0)) + check_output( + self.x_np, result.numpy(), self.lower_0, self.upper_0 + ) + ) paddle.enable_static() def test_dygraph(self): @@ -190,67 +207,64 @@ class TestFunctionalRReluAPI(unittest.TestCase): rrelu = paddle.nn.RReLU(self.lower_0, self.upper_0) out_np = rrelu(paddle.to_tensor(self.x_np)) self.assertTrue( - check_output(self.x_np, out_np.numpy(), self.lower_0, - self.upper_0)) + check_output( + self.x_np, out_np.numpy(), self.lower_0, self.upper_0 + ) + ) paddle.enable_static() def test_error_functional(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): # The input type must be Variable. - self.assertRaises(TypeError, - F.rrelu, - x=1, - lower=self.lower_0, - upper=self.upper_0) + self.assertRaises( + TypeError, F.rrelu, x=1, lower=self.lower_0, upper=self.upper_0 + ) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data(name='x_int32', - shape=[2, 3], - dtype='int32') - self.assertRaises(TypeError, - F.rrelu, - x=x_int32, - lower=self.lower_0, - upper=self.upper_0) - x_bool = paddle.fluid.data(name='x_bool', - shape=[2, 3], - dtype='int32') - self.assertRaises(TypeError, - F.rrelu, - x=x_bool, - lower=self.lower_0, - upper=self.upper_0) + x_int32 = paddle.fluid.data( + name='x_int32', shape=[2, 3], dtype='int32' + ) + self.assertRaises( + TypeError, + F.rrelu, + x=x_int32, + lower=self.lower_0, + upper=self.upper_0, + ) + x_bool = paddle.fluid.data( + name='x_bool', shape=[2, 3], dtype='int32' + ) + self.assertRaises( + TypeError, + F.rrelu, + x=x_bool, + lower=self.lower_0, + upper=self.upper_0, + ) # lower and upper must be float - x_fp32 = paddle.fluid.data(name='x_fp32', - shape=[2, 3], - dtype='float32') + x_fp32 = paddle.fluid.data( + name='x_fp32', shape=[2, 3], dtype='float32' + ) self.assertRaises(TypeError, F.rrelu, x=x_fp32, lower=0, upper=0.5) self.assertRaises(TypeError, F.rrelu, x=x_fp32, lower=0.5, upper=1) # lower and upper must be in (0, 1) - self.assertRaises(ValueError, - F.rrelu, - x=x_fp32, - lower=-1., - upper=0.5) - self.assertRaises(ValueError, - F.rrelu, - x=x_fp32, - lower=0.5, - upper=2.) + self.assertRaises( + ValueError, F.rrelu, x=x_fp32, lower=-1.0, upper=0.5 + ) + self.assertRaises( + ValueError, F.rrelu, x=x_fp32, lower=0.5, upper=2.0 + ) # upper should not be less than lower - self.assertRaises(ValueError, - F.rrelu, - x=x_fp32, - lower=0.5, - upper=0.2) + self.assertRaises( + ValueError, F.rrelu, x=x_fp32, lower=0.5, upper=0.2 + ) # support the input dtype is float16 - x_fp16 = paddle.fluid.data(name='x_fp16', - shape=[2, 3], - dtype='float16') + x_fp16 = paddle.fluid.data( + name='x_fp16', shape=[2, 3], dtype='float16' + ) F.rrelu(x=x_fp16, lower=self.lower_0, upper=self.upper_0) def test_error_layer(self): - def error_int_dtype(): with paddle.fluid.dygraph.guard(): x = np.random.random([2, 3]).astype("float64") @@ -296,7 +310,6 @@ class TestFunctionalRReluAPI(unittest.TestCase): class RReluTest(OpTest): - def setUp(self): self.op_type = "rrelu" self.lower = 0.1 @@ -318,7 +331,7 @@ class RReluTest(OpTest): self.attrs = { 'lower': self.lower, "upper": self.upper, - "is_test": self.is_test + "is_test": self.is_test, } def test_check_output(self): @@ -329,7 +342,6 @@ class RReluTest(OpTest): class RReluTrainingTest(OpTest): - def setUp(self): self.op_type = "rrelu" self.lower = 0.3 @@ -339,7 +351,6 @@ class RReluTrainingTest(OpTest): class RReluTrainingTest(OpTest): - def setUp(self): self.op_type = "rrelu" self.lower = 0.3 diff --git a/python/paddle/fluid/tests/unittests/test_run.py b/python/paddle/fluid/tests/unittests/test_run.py index 15af313b44ceeeced767839c619f6b12dd6a86b3..514384c0192f72970e385f4837e305708e9295c2 100644 --- a/python/paddle/fluid/tests/unittests/test_run.py +++ b/python/paddle/fluid/tests/unittests/test_run.py @@ -50,13 +50,13 @@ def write_file(name, ct): def get_files(pth, prefix): return [ - f for f in listdir(pth) + f + for f in listdir(pth) if isfile(join(pth, f)) and not f.endswith('gpu.log') ] class Collective_Test(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() self.path = os.path.join(self.temp_dir.name, pyname) @@ -87,7 +87,8 @@ class Collective_Test(unittest.TestCase): def test_collective_2(self): log_dir = tempfile.TemporaryDirectory() args = "--job_id test2 --devices 0,1,2 --log_dir {}".format( - log_dir.name) + log_dir.name + ) p = self.pdrun(args) p.wait() self.assertTrue(p.poll() == 0) @@ -116,7 +117,6 @@ class Collective_Test(unittest.TestCase): class PS_Test(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() self.path = os.path.join(self.temp_dir.name, pyname) @@ -143,8 +143,11 @@ class PS_Test(unittest.TestCase): def test_ps_2(self): log_dir = tempfile.TemporaryDirectory() - args = "--job_id ps2 --server_num=2 --trainer_num=2 --log_dir {}".format( - log_dir.name) + args = ( + "--job_id ps2 --server_num=2 --trainer_num=2 --log_dir {}".format( + log_dir.name + ) + ) p = self.pdrun(args) p.wait() self.assertTrue(p.poll() == 0) @@ -174,7 +177,8 @@ class PS_Test(unittest.TestCase): def test_ps_4(self): log_dir = tempfile.TemporaryDirectory() args = "--job_id ps4 --log_dir {} --servers 127.0.0.1:8900,127.0.0.1:8901 --trainers 127.0.0.1:8902,127.0.0.1:8903".format( - log_dir.name) + log_dir.name + ) p1 = self.pdrun(args) p1.wait() self.assertTrue(p1.poll() == 0) diff --git a/python/paddle/fluid/tests/unittests/test_run_fluid_by_module_or_command_line.py b/python/paddle/fluid/tests/unittests/test_run_fluid_by_module_or_command_line.py index d59c9db637f7401d303d9d383bddac303bd5eea1..df626dc6dded7e64c78b4eb809dc8635c19d029e 100644 --- a/python/paddle/fluid/tests/unittests/test_run_fluid_by_module_or_command_line.py +++ b/python/paddle/fluid/tests/unittests/test_run_fluid_by_module_or_command_line.py @@ -18,7 +18,6 @@ import sys class TestRunFluidByModule(unittest.TestCase): - def test_module(self): print(sys.executable) res = os.system(sys.executable + ' -m "paddle.fluid.reader"') @@ -26,7 +25,6 @@ class TestRunFluidByModule(unittest.TestCase): class TestRunFluidByCommand(unittest.TestCase): - def test_command(self): res = os.system(sys.executable + ' -c "import paddle.fluid"') self.assertEqual(res, 0) # 0 means status OK diff --git a/python/paddle/fluid/tests/unittests/test_run_program_op.py b/python/paddle/fluid/tests/unittests/test_run_program_op.py index 51538266b5daf9b04528ebd1660de6d8ec1c42e1..c2f871bd5760fa32ad2ee711003db3b02a66c348 100644 --- a/python/paddle/fluid/tests/unittests/test_run_program_op.py +++ b/python/paddle/fluid/tests/unittests/test_run_program_op.py @@ -22,7 +22,10 @@ import paddle.fluid as fluid from paddle.fluid import core, framework from paddle.fluid.layers.utils import _hash_with_id from paddle.fluid.framework import _in_eager_mode_ -from paddle.fluid.executor import _is_enable_standalone_executor, _is_dy2st_enable_standalone_executor +from paddle.fluid.executor import ( + _is_enable_standalone_executor, + _is_dy2st_enable_standalone_executor, +) from paddle.fluid.dygraph.base import switch_to_static_graph paddle.enable_static() @@ -43,9 +46,11 @@ def program_scope_guard(): def _add_build_strategy_for(input_program, start_op_index, end_op_index): compiled_program = paddle.static.CompiledProgram( core.Graph(input_program.desc, start_op_index, end_op_index), - build_strategy=paddle.static.BuildStrategy()) - compiled_program._compile(core.Scope(), - paddle.framework._current_expected_place()) + build_strategy=paddle.static.BuildStrategy(), + ) + compiled_program._compile( + core.Scope(), paddle.framework._current_expected_place() + ) ir_graph = paddle.fluid.framework.IrGraph(compiled_program._graph) builded_program = ir_graph.to_program() return builded_program @@ -70,10 +75,10 @@ def _build_program_by_desc(program_desc): # when create Operator, so here compare gradients with static graph # NOTE: Here rewrite a simple unittest framework for RunProgramOp class RunProgramOpTest(unittest.TestCase): - def build_model(self): raise NotImplementedError( - "RunProgramOp test should implement build_model") + "RunProgramOp test should implement build_model" + ) def check_output(self): places = [fluid.CPUPlace()] @@ -108,9 +113,9 @@ class RunProgramOpTest(unittest.TestCase): else: fetch_list = self.get_param_grad_names() - outs = exe.run(main_program, - feed=self.inputs['X'], - fetch_list=fetch_list) + outs = exe.run( + main_program, feed=self.inputs['X'], fetch_list=fetch_list + ) return outs def get_program_desc(self): @@ -118,21 +123,28 @@ class RunProgramOpTest(unittest.TestCase): fwd_op_num = self.build_model() return fluid.default_main_program().desc, fwd_op_num - def get_forward_backward_program_desc(self, whole_program_desc, - forward_op_num, output_num): + def get_forward_backward_program_desc( + self, whole_program_desc, forward_op_num, output_num + ): program = _build_program_by_desc(whole_program_desc) forward_program = _add_build_strategy_for(program, 0, forward_op_num) backward_program = _add_build_strategy_for( - program, forward_op_num + 2 * output_num, - program.desc.block(0).op_size()) + program, + forward_op_num + 2 * output_num, + program.desc.block(0).op_size(), + ) return forward_program.desc, backward_program.desc def prepare_attrs(self): return [ 'global_block', - self.program_desc.block(0), 'start_op_index', 0, 'end_op_index', - self.fwd_op_num, 'program_id', - _hash_with_id(self.program_desc, self) + self.program_desc.block(0), + 'start_op_index', + 0, + 'end_op_index', + self.fwd_op_num, + 'program_id', + _hash_with_id(self.program_desc, self), ] def get_param_grad_names(self): @@ -147,10 +159,9 @@ class RunProgramOpTest(unittest.TestCase): # Step 2. compare output for expect_v, actual_v in zip(self.expect_outs, actual_outs): - np.testing.assert_allclose(expect_v, - actual_v.numpy(), - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + expect_v, actual_v.numpy(), rtol=1e-05, atol=1e-05 + ) def check_grad_with_place(self, place): # Step 1. calc grads @@ -159,24 +170,20 @@ class RunProgramOpTest(unittest.TestCase): # Step 2. compare grads for expect_v, actual_v in zip(self.expect_grads, actual_grads): np.testing.assert_array_almost_equal(expect_v, actual_v) - np.testing.assert_allclose(expect_v, - actual_v, - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + expect_v, actual_v, rtol=1e-05, atol=1e-05 + ) def prepare_dygraph_input(self, place, return_param_list=False): - def create_var_base(is_input, name, np_value, stop_gradient): if _in_eager_mode_: - var = core.eager.Tensor(value=np_value, - name=name, - place=place, - zero_copy=True) + var = core.eager.Tensor( + value=np_value, name=name, place=place, zero_copy=True + ) else: - var = core.VarBase(value=np_value, - name=name, - place=place, - zero_copy=True) + var = core.VarBase( + value=np_value, name=name, place=place, zero_copy=True + ) var.stop_gradient = stop_gradient return var @@ -199,7 +206,6 @@ class RunProgramOpTest(unittest.TestCase): return inputs def prepare_dygraph_output(self): - def create_var_base(is_input, name): var = framework._varbase_creator(dtype=None, shape=None, name=name) var.stop_gradient = False @@ -217,7 +223,8 @@ class RunProgramOpTest(unittest.TestCase): outputs['OutScope'] = framework._varbase_creator( type=core.VarDesc.VarType.STEP_SCOPES, name="program_out_scope", - persistable=True) + persistable=True, + ) inner_scope = core.Scope() outputs['OutScope'].value().set_scope(inner_scope) @@ -232,20 +239,37 @@ class RunProgramOpTest(unittest.TestCase): inputs = self.prepare_dygraph_input(place) outputs = self.prepare_dygraph_output() - forward_program_desc, backward_program_desc = self.get_forward_backward_program_desc( - self.program_desc, self.fwd_op_num, len(outputs['Out'])) - - use_interpretorcore = _is_enable_standalone_executor( - ) and _is_dy2st_enable_standalone_executor() + ( + forward_program_desc, + backward_program_desc, + ) = self.get_forward_backward_program_desc( + self.program_desc, self.fwd_op_num, len(outputs['Out']) + ) + + use_interpretorcore = ( + _is_enable_standalone_executor() + and _is_dy2st_enable_standalone_executor() + ) self.attrs.extend(('use_interpretorcore', use_interpretorcore)) if use_interpretorcore: self.attrs.extend( - ('forward_global_block', forward_program_desc.block(0), - 'backward_global_block', backward_program_desc.block(0))) - - _legacy_C_ops.run_program(inputs['X'], inputs['Params'], - outputs['Out'], outputs['OutScope'], - outputs['DOut'], None, *self.attrs) + ( + 'forward_global_block', + forward_program_desc.block(0), + 'backward_global_block', + backward_program_desc.block(0), + ) + ) + + _legacy_C_ops.run_program( + inputs['X'], + inputs['Params'], + outputs['Out'], + outputs['OutScope'], + outputs['DOut'], + None, + *self.attrs + ) return outputs['Out'] @@ -258,20 +282,37 @@ class RunProgramOpTest(unittest.TestCase): inputs, input_param_list = self.prepare_dygraph_input(place, True) outputs = self.prepare_dygraph_output() - forward_program_desc, backward_program_desc = self.get_forward_backward_program_desc( - self.program_desc, self.fwd_op_num, len(outputs['Out'])) - - use_interpretorcore = _is_enable_standalone_executor( - ) and _is_dy2st_enable_standalone_executor() + ( + forward_program_desc, + backward_program_desc, + ) = self.get_forward_backward_program_desc( + self.program_desc, self.fwd_op_num, len(outputs['Out']) + ) + + use_interpretorcore = ( + _is_enable_standalone_executor() + and _is_dy2st_enable_standalone_executor() + ) self.attrs.extend(('use_interpretorcore', use_interpretorcore)) if use_interpretorcore: self.attrs.extend( - ('forward_global_block', forward_program_desc.block(0), - 'backward_global_block', backward_program_desc.block(0))) - - _legacy_C_ops.run_program(inputs['X'], inputs['Params'], - outputs['Out'], outputs['OutScope'], - outputs['DOut'], None, *self.attrs) + ( + 'forward_global_block', + forward_program_desc.block(0), + 'backward_global_block', + backward_program_desc.block(0), + ) + ) + + _legacy_C_ops.run_program( + inputs['X'], + inputs['Params'], + outputs['Out'], + outputs['OutScope'], + outputs['DOut'], + None, + *self.attrs + ) for param in input_param_list: var_type = self._get_grad_vartype(param.name) @@ -302,27 +343,29 @@ class RunProgramOpTest(unittest.TestCase): class TestRunProgramOpWithFC(RunProgramOpTest): - def setUp(self): self.op_type = "run_program" self.dtype = np.float32 self.input_names = { 'X': ['img'], - 'Params': ['weight_param', 'bias_param'] + 'Params': ['weight_param', 'bias_param'], } self.output_names = {'Out': ['fc_0.tmp_2']} self.inputs = { 'X': { - self.input_names['X'][0]: - np.random.random((32, 1, 28, 28)).astype(self.dtype) + self.input_names['X'][0]: np.random.random( + (32, 1, 28, 28) + ).astype(self.dtype) }, 'Params': { - self.input_names['Params'][0]: - np.random.random((784, 10)).astype(self.dtype), - self.input_names['Params'][1]: - np.random.random((32, 10)).astype(self.dtype) - } + self.input_names['Params'][0]: np.random.random( + (784, 10) + ).astype(self.dtype), + self.input_names['Params'][1]: np.random.random( + (32, 10) + ).astype(self.dtype), + }, } def test_check_output(self): @@ -333,26 +376,34 @@ class TestRunProgramOpWithFC(RunProgramOpTest): def build_model(self): # 1. simple model - img = fluid.data(name=self.input_names['X'][0], - shape=[None, 1, 28, 28], - dtype='float32') + img = fluid.data( + name=self.input_names['X'][0], + shape=[None, 1, 28, 28], + dtype='float32', + ) weight_attr = fluid.ParamAttr( name=self.input_names['Params'][0], learning_rate=0.5, initializer=fluid.initializer.NumpyArrayInitializer( - self.inputs['Params'][self.input_names['Params'][0]]), - trainable=True) + self.inputs['Params'][self.input_names['Params'][0]] + ), + trainable=True, + ) bias_attr = fluid.ParamAttr( name=self.input_names['Params'][1], learning_rate=0.5, initializer=fluid.initializer.NumpyArrayInitializer( - self.inputs['Params'][self.input_names['Params'][1]]), - trainable=True) - pred = fluid.layers.fc(input=img, - size=10, - param_attr=weight_attr, - bias_attr=bias_attr, - act='relu') + self.inputs['Params'][self.input_names['Params'][1]] + ), + trainable=True, + ) + pred = fluid.layers.fc( + input=img, + size=10, + param_attr=weight_attr, + bias_attr=bias_attr, + act='relu', + ) # 2. get forward op num fwd_op_num = fluid.default_main_program().global_block().desc.op_size() # 3. append backward @@ -362,7 +413,6 @@ class TestRunProgramOpWithFC(RunProgramOpTest): class TestRunProgramOpWithEmbedding(RunProgramOpTest): - def setUp(self): self.op_type = "run_program" self.dtype = np.float32 @@ -370,12 +420,10 @@ class TestRunProgramOpWithEmbedding(RunProgramOpTest): self.output_names = {'Out': ['reduce_sum_0.tmp_0']} self.inputs = { - 'X': { - 'x': np.array([[1, 3, 0, 4, 7]]).astype("int64") - }, + 'X': {'x': np.array([[1, 3, 0, 4, 7]]).astype("int64")}, 'Params': { 'emb_weight': np.random.random(size=(10, 16)).astype("float32") - } + }, } def test_check_output(self): @@ -393,9 +441,9 @@ class TestRunProgramOpWithEmbedding(RunProgramOpTest): def build_model(self): # 1. simple model - x = fluid.layers.data(name=self.input_names['X'][0], - shape=[5], - dtype='int64') + x = fluid.layers.data( + name=self.input_names['X'][0], shape=[5], dtype='int64' + ) emb = fluid.input.embedding( input=x, size=[10, 16], @@ -403,8 +451,11 @@ class TestRunProgramOpWithEmbedding(RunProgramOpTest): name="emb_weight", learning_rate=10, initializer=fluid.initializer.NumpyArrayInitializer( - self.inputs['Params'][self.input_names['Params'][0]])), - is_sparse=True) + self.inputs['Params'][self.input_names['Params'][0]] + ), + ), + is_sparse=True, + ) y = fluid.layers.reduce_sum(emb, dim=-1) # 2. get forward op num fwd_op_num = fluid.default_main_program().global_block().desc.op_size() @@ -415,7 +466,6 @@ class TestRunProgramOpWithEmbedding(RunProgramOpTest): class Net(paddle.nn.Layer): - def __init__(self): super(Net, self).__init__() self.fc1 = paddle.nn.Linear(10, 10) @@ -429,7 +479,6 @@ class Net(paddle.nn.Layer): class TestParametersWithStopGradient(unittest.TestCase): - def setUp(self): self.seed = 2021 self.iter = 5 diff --git a/python/paddle/fluid/tests/unittests/test_runtime_and_compiletime_exception.py b/python/paddle/fluid/tests/unittests/test_runtime_and_compiletime_exception.py index 36ccb8e0322be0fbafbce6b140d68aa001402f57..d498035327ba4ad929258628693de7edcd58e3ac 100644 --- a/python/paddle/fluid/tests/unittests/test_runtime_and_compiletime_exception.py +++ b/python/paddle/fluid/tests/unittests/test_runtime_and_compiletime_exception.py @@ -19,7 +19,6 @@ import paddle.fluid as fluid class TestRunTimeException(unittest.TestCase): - def test_run_time_exception(self): place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -38,7 +37,6 @@ class TestRunTimeException(unittest.TestCase): class TestCompileTimeException(unittest.TestCase): - def test_compile_time_exception(self): self.assertRaises(ValueError, self.build_model) @@ -46,10 +44,9 @@ class TestCompileTimeException(unittest.TestCase): train_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - label = fluid.layers.data(name="label", - shape=[1], - dtype="int64", - append_batch_size=False) + label = fluid.layers.data( + name="label", shape=[1], dtype="int64", append_batch_size=False + ) fluid.layers.one_hot(input=label, depth=100) diff --git a/python/paddle/fluid/tests/unittests/test_sample_logits_op.py b/python/paddle/fluid/tests/unittests/test_sample_logits_op.py index 749a32978beed9fec54c27c8b75bbd27c2f611e8..8676d8ff6e5fc8342eae4202a0748aaa2b199fa2 100644 --- a/python/paddle/fluid/tests/unittests/test_sample_logits_op.py +++ b/python/paddle/fluid/tests/unittests/test_sample_logits_op.py @@ -19,7 +19,6 @@ from op_test import OpTest class TestSampleLogitsOp(OpTest): - def setUp(self): self.op_type = "sample_logits" self.dtype = np.float64 @@ -46,33 +45,49 @@ class TestSampleLogitsOp(OpTest): self.inputs = {"Logits": self.Logits, "Labels": self.Labels} self.fetch_list = [ - 'Samples', 'Probabilities', 'SampledLogits', 'SampledLabels' + 'Samples', + 'Probabilities', + 'SampledLogits', + 'SampledLabels', ] self.outputs = collections.OrderedDict( - (('Samples', Samples), ('Probabilities', Probabilities), - ('LogitsDim', LogitsDim), ('LabelsDim', LabelsDim), - ('SampledLogits', SampledLogits), ('SampledLabels', - SampledLabels))) + ( + ('Samples', Samples), + ('Probabilities', Probabilities), + ('LogitsDim', LogitsDim), + ('LabelsDim', LabelsDim), + ('SampledLogits', SampledLogits), + ('SampledLabels', SampledLabels), + ) + ) self.attrs = {'num_samples': self.S} def test_check_output(self): places = self._get_places() for p in places: - (Samples, Probabilities, SampledLogits, - SampledLabels) = [np.array(o) for o in self.calc_output(p)] - - assert Samples.dtype == np.int64, \ - "Samples dtype is {}, not int64".format(Samples.dtype) - assert Probabilities.dtype == np.float64, \ - "Probabilities dtype is {}, not float64".format( - Probabilities.dtype) - assert SampledLogits.dtype == np.float64, \ - "SampledLogits dtype is {}, not float64".format( - SampledLogits.dtype) - assert SampledLabels.dtype == np.int64, \ - "SampledLabels dtype is {}, not int64".format( - SampledLabels.dtype) + (Samples, Probabilities, SampledLogits, SampledLabels) = [ + np.array(o) for o in self.calc_output(p) + ] + + assert ( + Samples.dtype == np.int64 + ), "Samples dtype is {}, not int64".format(Samples.dtype) + assert ( + Probabilities.dtype == np.float64 + ), "Probabilities dtype is {}, not float64".format( + Probabilities.dtype + ) + assert ( + SampledLogits.dtype == np.float64 + ), "SampledLogits dtype is {}, not float64".format( + SampledLogits.dtype + ) + assert ( + SampledLabels.dtype == np.int64 + ), "SampledLabels dtype is {}, not int64".format( + SampledLabels.dtype + ) assert Samples.shape == (self.bs, self.NT + self.S) assert Probabilities.shape == (self.bs, self.NT + self.S) @@ -80,27 +95,26 @@ class TestSampleLogitsOp(OpTest): assert SampledLabels.shape == (self.bs, self.NT) assert (SampledLabels == self.Labels).all() - sampled_logits = self.Logits[:, Samples[0][:self.NT]] - sampled_logits -= np.log(Probabilities[:, :self.NT]) - np.testing.assert_almost_equal(sampled_logits, - SampledLogits[:, :self.NT]) + sampled_logits = self.Logits[:, Samples[0][: self.NT]] + sampled_logits -= np.log(Probabilities[:, : self.NT]) + np.testing.assert_almost_equal( + sampled_logits, SampledLogits[:, : self.NT] + ) def test_check_grad(self): self._check_grad_helper() for p in self._get_places(): grads = self._get_gradient(['Logits'], p, ['SampledLogits'], []) - np.testing.assert_almost_equal(grads[0].sum(), np.array([1.])) + np.testing.assert_almost_equal(grads[0].sum(), np.array([1.0])) class TestSampleLogitsOpNoUniq(TestSampleLogitsOp): - def setUp(self): super(TestSampleLogitsOpNoUniq, self).setUp() self.attrs = {'num_samples': self.S, 'uniq': False} class TestSampleLogitsOpWithAccidentalHits(TestSampleLogitsOp): - def setUp(self): super(TestSampleLogitsOpWithAccidentalHits, self).setUp() self.attrs = {'num_samples': self.S, 'remove_accidental_hits': False} diff --git a/python/paddle/fluid/tests/unittests/test_sampling_id_op.py b/python/paddle/fluid/tests/unittests/test_sampling_id_op.py index 3481db22907c1ee06bac91ff87d9a1d36f4fd110..2e73a90abdb5b8540ec5414da40691d8c1baaf81 100644 --- a/python/paddle/fluid/tests/unittests/test_sampling_id_op.py +++ b/python/paddle/fluid/tests/unittests/test_sampling_id_op.py @@ -20,7 +20,6 @@ import paddle class TestSamplingIdShape(unittest.TestCase): - def test_shape(self): paddle.enable_static() x = fluid.layers.data(name='x', shape=[3], dtype='float32') diff --git a/python/paddle/fluid/tests/unittests/test_save_inference_model_conditional_op.py b/python/paddle/fluid/tests/unittests/test_save_inference_model_conditional_op.py index 240c0c8e129de5541d993a16e4ebe38392c5de14..399be5ee60de944ba5cf14659e8a4e2ebd7dd666 100644 --- a/python/paddle/fluid/tests/unittests/test_save_inference_model_conditional_op.py +++ b/python/paddle/fluid/tests/unittests/test_save_inference_model_conditional_op.py @@ -28,14 +28,13 @@ def getModelOp(model_path): result = set() for i in range(0, size): - #print(main_block.op(i).type()) + # print(main_block.op(i).type()) result.add(main_block.op(i).type()) return result class WhileNet(paddle.nn.Layer): - def __init__(self): super(WhileNet, self).__init__() @@ -53,7 +52,6 @@ class WhileNet(paddle.nn.Layer): class ForNet(paddle.nn.Layer): - def __init__(self): super(ForNet, self).__init__() @@ -67,7 +65,6 @@ class ForNet(paddle.nn.Layer): class IfElseNet(paddle.nn.Layer): - def __init__(self): super(IfElseNet, self).__init__() @@ -81,68 +78,91 @@ class IfElseNet(paddle.nn.Layer): class TestConditionalOp(unittest.TestCase): - def test_while_op(self): paddle.disable_static() net = WhileNet() - net = paddle.jit.to_static(net, - input_spec=[ - paddle.static.InputSpec( - shape=[1, 3, 8, 8], dtype='float32') - ]) + net = paddle.jit.to_static( + net, + input_spec=[ + paddle.static.InputSpec(shape=[1, 3, 8, 8], dtype='float32') + ], + ) root_path = tempfile.TemporaryDirectory() model_file = os.path.join(root_path.name, "while_net") paddle.jit.save(net, model_file) - right_pdmodel = set([ - "uniform_random", "shape", "slice", "not_equal", "while", - "elementwise_add" - ]) + right_pdmodel = set( + [ + "uniform_random", + "shape", + "slice", + "not_equal", + "while", + "elementwise_add", + ] + ) paddle.enable_static() pdmodel = getModelOp(model_file + ".pdmodel") self.assertTrue( len(right_pdmodel.difference(pdmodel)) == 0, - "The while op is pruned by mistake.") + "The while op is pruned by mistake.", + ) root_path.cleanup() def test_for_op(self): paddle.disable_static() net = ForNet() net = paddle.jit.to_static( - net, input_spec=[paddle.static.InputSpec(shape=[1], dtype='int32')]) + net, input_spec=[paddle.static.InputSpec(shape=[1], dtype='int32')] + ) root_path = tempfile.TemporaryDirectory() model_file = os.path.join(root_path.name, "for_net") paddle.jit.save(net, model_file) - right_pdmodel = set([ - "randint", "fill_constant", "cast", "less_than", "while", - "elementwise_add" - ]) + right_pdmodel = set( + [ + "randint", + "fill_constant", + "cast", + "less_than", + "while", + "elementwise_add", + ] + ) paddle.enable_static() pdmodel = getModelOp(model_file + ".pdmodel") self.assertTrue( len(right_pdmodel.difference(pdmodel)) == 0, - "The for op is pruned by mistake.") + "The for op is pruned by mistake.", + ) root_path.cleanup() def test_if_op(self): paddle.disable_static() net = IfElseNet() net = paddle.jit.to_static( - net, input_spec=[paddle.static.InputSpec(shape=[1], dtype='int32')]) + net, input_spec=[paddle.static.InputSpec(shape=[1], dtype='int32')] + ) root_path = tempfile.TemporaryDirectory() model_file = os.path.join(root_path.name, "if_net") paddle.jit.save(net, model_file) - right_pdmodel = set([ - "assign_value", "greater_than", "cast", "conditional_block", - "logical_not", "select_input" - ]) + right_pdmodel = set( + [ + "assign_value", + "greater_than", + "cast", + "conditional_block", + "logical_not", + "select_input", + ] + ) paddle.enable_static() pdmodel = getModelOp(model_file + ".pdmodel") self.assertTrue( len(right_pdmodel.difference(pdmodel)) == 0, - "The if op is pruned by mistake.") + "The if op is pruned by mistake.", + ) root_path.cleanup() diff --git a/python/paddle/fluid/tests/unittests/test_save_model_without_var.py b/python/paddle/fluid/tests/unittests/test_save_model_without_var.py index e8cad31514398443d79635cf79b66181117ccda7..ac02edc42e7f8dfd2be7d1ff119644b8fe655a7d 100644 --- a/python/paddle/fluid/tests/unittests/test_save_model_without_var.py +++ b/python/paddle/fluid/tests/unittests/test_save_model_without_var.py @@ -18,12 +18,10 @@ import paddle.fluid as fluid class TestSaveModelWithoutVar(unittest.TestCase): - def test_no_var_save(self): - data = fluid.layers.data(name='data', - shape=[-1, 1], - dtype='float32', - append_batch_size=False) + data = fluid.layers.data( + name='data', shape=[-1, 1], dtype='float32', append_batch_size=False + ) data_plus = data + 1 if fluid.core.is_compiled_with_cuda(): @@ -37,12 +35,14 @@ class TestSaveModelWithoutVar(unittest.TestCase): with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") - fluid.io.save_inference_model(dirname='test', - feeded_var_names=['data'], - target_vars=[data_plus], - executor=exe, - model_filename='model', - params_filename='params') + fluid.io.save_inference_model( + dirname='test', + feeded_var_names=['data'], + target_vars=[data_plus], + executor=exe, + model_filename='model', + params_filename='params', + ) expected_warn = "no variable in your model, please ensure there are any variables in your model to save" self.assertTrue(len(w) > 0) self.assertTrue(expected_warn == str(w[-1].message)) diff --git a/python/paddle/fluid/tests/unittests/test_saved_tensors_hooks.py b/python/paddle/fluid/tests/unittests/test_saved_tensors_hooks.py index 02b9d146fe3c3ba5e8650b7cafe4af86974acce3..c945eee068a5478da428edda00d27b496892ea75 100644 --- a/python/paddle/fluid/tests/unittests/test_saved_tensors_hooks.py +++ b/python/paddle/fluid/tests/unittests/test_saved_tensors_hooks.py @@ -18,9 +18,7 @@ from paddle.autograd import PyLayer class TestSavedTensorsHooks(unittest.TestCase): - def test_save_for_multiply(self): - def pack_hook(x): return x.numpy() @@ -46,9 +44,7 @@ class TestSavedTensorsHooks(unittest.TestCase): self.assertTrue(paddle.equal_all(bb.grad, b.grad)) def test_save_for_pylayer(self): - class cus_multiply(PyLayer): - @staticmethod def forward(ctx, a, b): y = paddle.multiply(a, b) diff --git a/python/paddle/fluid/tests/unittests/test_scale_op.py b/python/paddle/fluid/tests/unittests/test_scale_op.py index 6eb4d90d610ac2016037f0a7df20a11d030139c4..4fec89905373f1cc0b6ce0e9c4644e3657cdbe96 100644 --- a/python/paddle/fluid/tests/unittests/test_scale_op.py +++ b/python/paddle/fluid/tests/unittests/test_scale_op.py @@ -26,7 +26,6 @@ import paddle.fluid.layers as layers class TestScaleOp(OpTest): - def setUp(self): self.op_type = "scale" self.python_api = paddle.scale @@ -49,7 +48,6 @@ class TestScaleOp(OpTest): class TestScaleOpScaleVariable(OpTest): - def setUp(self): self.op_type = "scale" self.python_api = paddle.scale @@ -58,7 +56,7 @@ class TestScaleOpScaleVariable(OpTest): self.scale = -2.3 self.inputs = { 'X': np.random.random((10, 10)).astype(self.dtype), - 'ScaleTensor': np.array([self.scale]).astype('float64') + 'ScaleTensor': np.array([self.scale]).astype('float64'), } self.attrs = {} self.outputs = {'Out': self.inputs['X'] * self.dtype(self.scale)} @@ -74,7 +72,6 @@ class TestScaleOpScaleVariable(OpTest): class TestScaleOpSelectedRows(unittest.TestCase): - def init_dtype_type(self): pass @@ -93,8 +90,9 @@ class TestScaleOpSelectedRows(unittest.TestCase): in_selected_rows = scope.var(in_name).get_selected_rows() in_selected_rows.set_height(in_height) in_selected_rows.set_rows(in_rows) - in_array = np.random.random( - (len(in_rows), in_row_numel)).astype(self.dtype) + in_array = np.random.random((len(in_rows), in_row_numel)).astype( + self.dtype + ) in_tensor = in_selected_rows.get_tensor() in_tensor.set(in_array, place) @@ -133,9 +131,7 @@ class TestScaleOpSelectedRows(unittest.TestCase): class TestScaleRaiseError(unittest.TestCase): - def test_errors(self): - def test_type(): fluid.layers.scale([10]) @@ -143,10 +139,10 @@ class TestScaleRaiseError(unittest.TestCase): # Add FP16 test -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestScaleFp16Op(TestScaleOp): - def init_dtype_type(self): self.dtype = np.float16 @@ -158,14 +154,12 @@ class TestScaleFp16Op(TestScaleOp): def test_check_grad(self): place = core.CUDAPlace(0) if core.is_float16_supported(place): - self.check_grad_with_place(place, ["X"], - "Out", - max_relative_error=0.05, - check_eager=True) + self.check_grad_with_place( + place, ["X"], "Out", max_relative_error=0.05, check_eager=True + ) class TestScaleBF16Op(OpTest): - def setUp(self): self.op_type = "scale" self.python_api = paddle.scale @@ -183,10 +177,10 @@ class TestScaleBF16Op(OpTest): self.check_grad(['X'], 'Out', numeric_grad_delta=0.8, check_eager=True) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestScaleFp16OpSelectedRows(TestScaleOpSelectedRows): - def init_dtype_type(self): self.dtype = np.float16 @@ -202,7 +196,6 @@ class TestScaleFp16OpSelectedRows(TestScaleOpSelectedRows): class TestScaleApiStatic(unittest.TestCase): - def _executed_api(self, x, scale=1.0, bias=0.0): return paddle.scale(x, scale, bias) @@ -220,13 +213,11 @@ class TestScaleApiStatic(unittest.TestCase): class TestScaleInplaceApiStatic(TestScaleApiStatic): - def _executed_api(self, x, scale=1.0, bias=0.0): return x.scale_(scale, bias) class TestScaleApiDygraph(unittest.TestCase): - def _executed_api(self, x, scale=1.0, bias=0.0): return paddle.scale(x, scale, bias) @@ -240,13 +231,11 @@ class TestScaleApiDygraph(unittest.TestCase): class TestScaleInplaceApiDygraph(TestScaleApiDygraph): - def _executed_api(self, x, scale=1.0, bias=0.0): return x.scale_(scale, bias) class TestScaleDoubleGradCheck(unittest.TestCase): - def scale_wrapper(self, x): return paddle.scale(x[0], scale=2.0) @@ -261,17 +250,13 @@ class TestScaleDoubleGradCheck(unittest.TestCase): out = paddle.scale(data, 2.0) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) - gradient_checker.double_grad_check([data], - out, - x_init=[data_arr], - place=place, - eps=eps) + gradient_checker.double_grad_check( + [data], out, x_init=[data_arr], place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.double_grad_check_for_dygraph(self.scale_wrapper, - [data], - out, - x_init=[data_arr], - place=place) + gradient_checker.double_grad_check_for_dygraph( + self.scale_wrapper, [data], out, x_init=[data_arr], place=place + ) def test_grad(self): paddle.enable_static() @@ -283,7 +268,6 @@ class TestScaleDoubleGradCheck(unittest.TestCase): class TestScaleTripleGradCheck(unittest.TestCase): - def scale_wrapper(self, x): return paddle.scale(x[0], scale=2.0) @@ -298,17 +282,13 @@ class TestScaleTripleGradCheck(unittest.TestCase): out = paddle.scale(data, 2.0) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) - gradient_checker.triple_grad_check([data], - out, - x_init=[data_arr], - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [data], out, x_init=[data_arr], place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.triple_grad_check_for_dygraph(self.scale_wrapper, - [data], - out, - x_init=[data_arr], - place=place) + gradient_checker.triple_grad_check_for_dygraph( + self.scale_wrapper, [data], out, x_init=[data_arr], place=place + ) def test_grad(self): paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_scaled_dot_product_attention.py b/python/paddle/fluid/tests/unittests/test_scaled_dot_product_attention.py index 031b45d3dd91344aa6818adfb4f0af38f9a33b5f..7f702482440e51cb1a717c0cdf7f358c6aad6191 100644 --- a/python/paddle/fluid/tests/unittests/test_scaled_dot_product_attention.py +++ b/python/paddle/fluid/tests/unittests/test_scaled_dot_product_attention.py @@ -19,83 +19,89 @@ from paddle.fluid import Program, program_guard class TestScaledDotProductAttentionError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): - queries = fluid.data(name="queries", - shape=[3, 5, 9], - dtype="float32") + queries = fluid.data( + name="queries", shape=[3, 5, 9], dtype="float32" + ) keys = fluid.data(name="keys", shape=[3, 6, 9], dtype="float32") - values = fluid.data(name="values", - shape=[3, 6, 10], - dtype="float32") + values = fluid.data( + name="values", shape=[3, 6, 10], dtype="float32" + ) def test_queries_Variable(): queries_data = np.random.rand(3, 5, 9).astype("float32") - fluid.nets.scaled_dot_product_attention(queries_data, keys, - values) + fluid.nets.scaled_dot_product_attention( + queries_data, keys, values + ) self.assertRaises(TypeError, test_queries_Variable) def test_keys_Variable(): keys_data = np.random.rand(3, 6, 9).astype("float32") - fluid.nets.scaled_dot_product_attention(queries, keys_data, - values) + fluid.nets.scaled_dot_product_attention( + queries, keys_data, values + ) self.assertRaises(TypeError, test_keys_Variable) def test_values_Variable(): values_data = np.random.rand(3, 6, 10).astype("float32") - fluid.nets.scaled_dot_product_attention(queries, keys, - values_data) + fluid.nets.scaled_dot_product_attention( + queries, keys, values_data + ) self.assertRaises(TypeError, test_values_Variable) def test_diff_dtype(): - keys_error = fluid.data(name="keys_error", - shape=[3, 6, 9], - dtype="float64") - values_error = fluid.data(name="values_error", - shape=[3, 6, 10], - dtype="float64") - fluid.nets.scaled_dot_product_attention(queries, keys_error, - values_error) + keys_error = fluid.data( + name="keys_error", shape=[3, 6, 9], dtype="float64" + ) + values_error = fluid.data( + name="values_error", shape=[3, 6, 10], dtype="float64" + ) + fluid.nets.scaled_dot_product_attention( + queries, keys_error, values_error + ) self.assertRaises(TypeError, test_diff_dtype) def test_diff_dim(): - keys_error_dim = fluid.data(name="keys_error_dim", - shape=[3, 6], - dtype="float32") - values_error_dim = fluid.data(name="values_error_dim", - shape=[3], - dtype="float32") - fluid.nets.scaled_dot_product_attention(queries, keys_error_dim, - values_error_dim) + keys_error_dim = fluid.data( + name="keys_error_dim", shape=[3, 6], dtype="float32" + ) + values_error_dim = fluid.data( + name="values_error_dim", shape=[3], dtype="float32" + ) + fluid.nets.scaled_dot_product_attention( + queries, keys_error_dim, values_error_dim + ) self.assertRaises(ValueError, test_diff_dim) def test_diff_hidden_size(): - queries_error_hs = fluid.data(name="queries_error_hs", - shape=[3, 5, 9], - dtype="float32") - keys_error_hs = fluid.data(name="keys_error_hs", - shape=[3, 6, 10], - dtype="float32") - fluid.nets.scaled_dot_product_attention(queries_error_hs, - keys_error_hs, values) + queries_error_hs = fluid.data( + name="queries_error_hs", shape=[3, 5, 9], dtype="float32" + ) + keys_error_hs = fluid.data( + name="keys_error_hs", shape=[3, 6, 10], dtype="float32" + ) + fluid.nets.scaled_dot_product_attention( + queries_error_hs, keys_error_hs, values + ) self.assertRaises(ValueError, test_diff_hidden_size) def test_diff_max_len(): - keys_error_len = fluid.data(name="keys_error_len", - shape=[3, 7, 9], - dtype="float32") - values_error_len = fluid.data(name="values_error_len", - shape=[3, 6, 10], - dtype="float32") - fluid.nets.scaled_dot_product_attention(queries, keys_error_len, - values_error_len) + keys_error_len = fluid.data( + name="keys_error_len", shape=[3, 7, 9], dtype="float32" + ) + values_error_len = fluid.data( + name="values_error_len", shape=[3, 6, 10], dtype="float32" + ) + fluid.nets.scaled_dot_product_attention( + queries, keys_error_len, values_error_len + ) self.assertRaises(ValueError, test_diff_max_len) diff --git a/python/paddle/fluid/tests/unittests/test_scatter_nd_op.py b/python/paddle/fluid/tests/unittests/test_scatter_nd_op.py index b9b0deb3b365c7f9b442853bdc8308c8b75655da..e9edd089f5d3c844f995ecd99447be58dce38038 100644 --- a/python/paddle/fluid/tests/unittests/test_scatter_nd_op.py +++ b/python/paddle/fluid/tests/unittests/test_scatter_nd_op.py @@ -115,8 +115,9 @@ class TestScatterNdAddWithHighRankSame(OpTest): self.python_api = paddle.scatter_nd_add shape = (3, 2, 2, 1, 10) ref_np = np.random.rand(*shape).astype("float64") - index_np = np.vstack([np.random.randint(0, s, size=100) - for s in shape]).T.astype("int32") + index_np = np.vstack( + [np.random.randint(0, s, size=100) for s in shape] + ).T.astype("int32") update_shape = judge_update_shape(ref_np, index_np) updates_np = np.random.rand(*update_shape).astype("float64") expect_np = numpy_scatter_nd_add(ref_np.copy(), index_np, updates_np) @@ -157,71 +158,89 @@ class TestScatterNdAddWithHighRankDiff(OpTest): self.check_grad(['X', 'Updates'], 'Out', check_eager=True) -#Test Python API +# Test Python API class TestScatterNdOpAPI(unittest.TestCase): """ test scatter_nd_add api and scatter_nd api """ def testcase1(self): - ref1 = fluid.layers.data(name='ref1', - shape=[10, 9, 8, 1, 3], - dtype='float32', - append_batch_size=False) - index1 = fluid.layers.data(name='index1', - shape=[5, 5, 8, 5], - dtype='int32', - append_batch_size=False) - updates1 = fluid.layers.data(name='update1', - shape=[5, 5, 8], - dtype='float32', - append_batch_size=False) + ref1 = fluid.layers.data( + name='ref1', + shape=[10, 9, 8, 1, 3], + dtype='float32', + append_batch_size=False, + ) + index1 = fluid.layers.data( + name='index1', + shape=[5, 5, 8, 5], + dtype='int32', + append_batch_size=False, + ) + updates1 = fluid.layers.data( + name='update1', + shape=[5, 5, 8], + dtype='float32', + append_batch_size=False, + ) output1 = fluid.layers.scatter_nd_add(ref1, index1, updates1) def testcase2(self): - ref2 = fluid.layers.data(name='ref2', - shape=[10, 9, 8, 1, 3], - dtype='double', - append_batch_size=False) - index2 = fluid.layers.data(name='index2', - shape=[5, 8, 5], - dtype='int32', - append_batch_size=False) - updates2 = fluid.layers.data(name='update2', - shape=[5, 8], - dtype='double', - append_batch_size=False) - output2 = fluid.layers.scatter_nd_add(ref2, - index2, - updates2, - name="scatter_nd_add") + ref2 = fluid.layers.data( + name='ref2', + shape=[10, 9, 8, 1, 3], + dtype='double', + append_batch_size=False, + ) + index2 = fluid.layers.data( + name='index2', + shape=[5, 8, 5], + dtype='int32', + append_batch_size=False, + ) + updates2 = fluid.layers.data( + name='update2', + shape=[5, 8], + dtype='double', + append_batch_size=False, + ) + output2 = fluid.layers.scatter_nd_add( + ref2, index2, updates2, name="scatter_nd_add" + ) def testcase3(self): shape3 = [10, 9, 8, 1, 3] - index3 = fluid.layers.data(name='index3', - shape=[5, 5, 8, 5], - dtype='int32', - append_batch_size=False) - updates3 = fluid.layers.data(name='update3', - shape=[5, 5, 8], - dtype='float32', - append_batch_size=False) + index3 = fluid.layers.data( + name='index3', + shape=[5, 5, 8, 5], + dtype='int32', + append_batch_size=False, + ) + updates3 = fluid.layers.data( + name='update3', + shape=[5, 5, 8], + dtype='float32', + append_batch_size=False, + ) output3 = fluid.layers.scatter_nd(index3, updates3, shape3) def testcase4(self): shape4 = [10, 9, 8, 1, 3] - index4 = fluid.layers.data(name='index4', - shape=[5, 5, 8, 5], - dtype='int32', - append_batch_size=False) - updates4 = fluid.layers.data(name='update4', - shape=[5, 5, 8], - dtype='double', - append_batch_size=False) - output4 = fluid.layers.scatter_nd(index4, - updates4, - shape4, - name='scatter_nd') + index4 = fluid.layers.data( + name='index4', + shape=[5, 5, 8, 5], + dtype='int32', + append_batch_size=False, + ) + updates4 = fluid.layers.data( + name='update4', + shape=[5, 5, 8], + dtype='double', + append_batch_size=False, + ) + output4 = fluid.layers.scatter_nd( + index4, updates4, shape4, name='scatter_nd' + ) def testcase5(self): if not fluid.core.is_compiled_with_cuda(): @@ -235,27 +254,32 @@ class TestScatterNdOpAPI(unittest.TestCase): with fluid.dygraph.guard(): device = paddle.get_device() paddle.set_device('gpu') - gpu_value = paddle.scatter_nd_add(paddle.to_tensor(x), - paddle.to_tensor(index), - paddle.to_tensor(val)) + gpu_value = paddle.scatter_nd_add( + paddle.to_tensor(x), + paddle.to_tensor(index), + paddle.to_tensor(val), + ) paddle.set_device('cpu') - cpu_value = paddle.scatter_nd_add(paddle.to_tensor(x), - paddle.to_tensor(index), - paddle.to_tensor(val)) + cpu_value = paddle.scatter_nd_add( + paddle.to_tensor(x), + paddle.to_tensor(index), + paddle.to_tensor(val), + ) np.testing.assert_array_equal(gpu_value.numpy(), cpu_value.numpy()) paddle.set_device(device) @switch_to_static_graph def test_static_graph(): - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): x_t = paddle.static.data(name="x", dtype=x.dtype, shape=x.shape) - index_t = paddle.static.data(name="index", - dtype=index.dtype, - shape=index.shape) - val_t = paddle.static.data(name="val", - dtype=val.dtype, - shape=val.shape) + index_t = paddle.static.data( + name="index", dtype=index.dtype, shape=index.shape + ) + val_t = paddle.static.data( + name="val", dtype=val.dtype, shape=val.shape + ) out_t = paddle.scatter_nd_add(x_t, index_t, val_t) feed = {x_t.name: x, index_t.name: index, val_t.name: val} fetch = [out_t] @@ -269,26 +293,23 @@ class TestScatterNdOpAPI(unittest.TestCase): test_static_graph() -#Test Raise Error +# Test Raise Error class TestScatterNdOpRaise(unittest.TestCase): - def test_check_raise(self): - def check_raise_is_test(): try: - ref5 = fluid.layers.data(name='ref5', - shape=[3, 4, 5], - dtype='float32') - index5 = fluid.layers.data(name='index5', - shape=[2, 10], - dtype='int32') - updates5 = fluid.layers.data(name='updates5', - shape=[2, 10], - dtype='float32') + ref5 = fluid.layers.data( + name='ref5', shape=[3, 4, 5], dtype='float32' + ) + index5 = fluid.layers.data( + name='index5', shape=[2, 10], dtype='int32' + ) + updates5 = fluid.layers.data( + name='updates5', shape=[2, 10], dtype='float32' + ) output5 = fluid.layers.scatter_nd_add(ref5, index5, updates5) except Exception as e: - t = \ - "The last dimension of Input(Index)'s shape should be no greater " + t = "The last dimension of Input(Index)'s shape should be no greater " if t in str(e): raise IndexError @@ -296,35 +317,39 @@ class TestScatterNdOpRaise(unittest.TestCase): def test_check_raise2(self): with self.assertRaises(ValueError): - ref6 = fluid.layers.data(name='ref6', - shape=[10, 9, 8, 1, 3], - dtype='double', - append_batch_size=False) - index6 = fluid.layers.data(name='index6', - shape=[5, 8, 5], - dtype='int32', - append_batch_size=False) - updates6 = fluid.layers.data(name='update6', - shape=[5, 8], - dtype='float32', - append_batch_size=False) + ref6 = fluid.layers.data( + name='ref6', + shape=[10, 9, 8, 1, 3], + dtype='double', + append_batch_size=False, + ) + index6 = fluid.layers.data( + name='index6', + shape=[5, 8, 5], + dtype='int32', + append_batch_size=False, + ) + updates6 = fluid.layers.data( + name='update6', + shape=[5, 8], + dtype='float32', + append_batch_size=False, + ) output6 = fluid.layers.scatter_nd_add(ref6, index6, updates6) def test_check_raise3(self): - def check_raise_is_test(): try: shape = [3, 4, 5] - index7 = fluid.layers.data(name='index7', - shape=[2, 1], - dtype='int32') - updates7 = fluid.layers.data(name='updates7', - shape=[2, 4, 5, 20], - dtype='float32') + index7 = fluid.layers.data( + name='index7', shape=[2, 1], dtype='int32' + ) + updates7 = fluid.layers.data( + name='updates7', shape=[2, 4, 5, 20], dtype='float32' + ) output7 = fluid.layers.scatter_nd(index7, updates7, shape) except Exception as e: - t = \ - "Updates has wrong shape" + t = "Updates has wrong shape" if t in str(e): raise ValueError @@ -332,7 +357,6 @@ class TestScatterNdOpRaise(unittest.TestCase): class TestDygraph(unittest.TestCase): - def test_dygraph(self): with fluid.dygraph.guard(fluid.CPUPlace()): index_data = np.array([[1, 1], [0, 1], [1, 3]]).astype(np.int64) diff --git a/python/paddle/fluid/tests/unittests/test_scatter_op.py b/python/paddle/fluid/tests/unittests/test_scatter_op.py index 05aba63fbfe831fed731f76c628f0ae9c583be35..479498bfbb45142730e8ee2c05043366bc284d0c 100644 --- a/python/paddle/fluid/tests/unittests/test_scatter_op.py +++ b/python/paddle/fluid/tests/unittests/test_scatter_op.py @@ -23,7 +23,6 @@ from paddle.fluid.dygraph.base import switch_to_static_graph class TestScatterOp(OpTest): - def setUp(self): self.op_type = "scatter" self.python_api = paddle.scatter @@ -43,7 +42,6 @@ class TestScatterOp(OpTest): class TestScatterOp0(OpTest): - def setUp(self): self.op_type = "scatter" self.python_api = paddle.scatter @@ -64,7 +62,6 @@ class TestScatterOp0(OpTest): class TestScatterOp1(OpTest): - def setUp(self): self.op_type = "scatter" self.python_api = paddle.scatter @@ -87,10 +84,10 @@ class TestScatterOp1(OpTest): self.check_grad(["X", "Updates"], "Out", check_eager=False) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestScatterOp2(OpTest): - def setUp(self): self.op_type = "scatter" self.python_api = paddle.scatter @@ -110,15 +107,15 @@ class TestScatterOp2(OpTest): def test_check_grad(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) - self.check_grad_with_place(place, ['X', 'Updates'], - 'Out', - check_eager=False) + self.check_grad_with_place( + place, ['X', 'Updates'], 'Out', check_eager=False + ) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestScatterOp3(OpTest): - def setUp(self): self.op_type = "scatter" self.python_api = paddle.scatter @@ -142,13 +139,12 @@ class TestScatterOp3(OpTest): def test_check_grad(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) - self.check_grad_with_place(place, ['X', 'Updates'], - 'Out', - check_eager=False) + self.check_grad_with_place( + place, ['X', 'Updates'], 'Out', check_eager=False + ) class TestScatterOp4(OpTest): - def setUp(self): self.op_type = "scatter" self.python_api = paddle.scatter @@ -167,10 +163,10 @@ class TestScatterOp4(OpTest): self.check_grad(['X', 'Updates'], 'Out', check_eager=False) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestScatterOp5(OpTest): - def setUp(self): self.op_type = "scatter" self.python_api = paddle.scatter @@ -190,13 +186,12 @@ class TestScatterOp5(OpTest): def test_check_grad(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) - self.check_grad_with_place(place, ['X', 'Updates'], - 'Out', - check_eager=False) + self.check_grad_with_place( + place, ['X', 'Updates'], 'Out', check_eager=False + ) class TestScatterAPI(unittest.TestCase): - def setUp(self): self.places = [fluid.CPUPlace()] if core.is_compiled_with_cuda(): @@ -215,19 +210,26 @@ class TestScatterAPI(unittest.TestCase): input_data = np.array([[1, 1], [2, 2], [3, 3]]).astype(np.float64) index_data = np.array([2, 1, 0, 1]).astype(np.int64) - updates_data = np.array([[1, 1], [2, 2], [3, 3], - [4, 4]]).astype(np.float64) + updates_data = np.array([[1, 1], [2, 2], [3, 3], [4, 4]]).astype( + np.float64 + ) exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={ - "input": input_data, - "index": index_data, - "updates": updates_data - }, - fetch_list=[result]) - self.assertEqual((fetches[0] == \ - np.array([[3., 3.],[6., 6.],[1., 1.]])).all(), True) + fetches = exe.run( + fluid.default_main_program(), + feed={ + "input": input_data, + "index": index_data, + "updates": updates_data, + }, + fetch_list=[result], + ) + self.assertEqual( + ( + fetches[0] == np.array([[3.0, 3.0], [6.0, 6.0], [1.0, 1.0]]) + ).all(), + True, + ) def test_static(self): for place in self.places: @@ -238,16 +240,22 @@ class TestScatterAPI(unittest.TestCase): with fluid.dygraph.guard(place): x_data = np.array([[1, 1], [2, 2], [3, 3]]).astype(np.float64) index_data = np.array([2, 1, 0, 1]).astype(np.int64) - updates_data = np.array([[1, 1], [2, 2], [3, 3], - [4, 4]]).astype(np.float64) + updates_data = np.array( + [[1, 1], [2, 2], [3, 3], [4, 4]] + ).astype(np.float64) x = fluid.dygraph.to_variable(x_data) index = fluid.dygraph.to_variable(index_data) updates = fluid.dygraph.to_variable(updates_data) output1 = self.scatter(x, index, updates, overwrite=False) - self.assertEqual((output1.numpy() == \ - np.array([[3., 3.],[6., 6.],[1., 1.]])).all(), True) + self.assertEqual( + ( + output1.numpy() + == np.array([[3.0, 3.0], [6.0, 6.0], [1.0, 1.0]]) + ).all(), + True, + ) def test_large_data(self): if os.name == "nt" or not paddle.is_compiled_with_cuda(): @@ -259,27 +267,30 @@ class TestScatterAPI(unittest.TestCase): def test_dygraph(): with fluid.dygraph.guard(): - gpu_out = paddle.scatter(paddle.to_tensor(x), - paddle.to_tensor(index), - paddle.to_tensor(updates)) + gpu_out = paddle.scatter( + paddle.to_tensor(x), + paddle.to_tensor(index), + paddle.to_tensor(updates), + ) return gpu_out.numpy() @switch_to_static_graph def test_static_graph(): - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): x_t = paddle.static.data(name="x", dtype=x.dtype, shape=x.shape) - index_t = paddle.static.data(name="index", - dtype=index.dtype, - shape=index.shape) - updates_t = paddle.static.data(name="updates", - dtype=updates.dtype, - shape=updates.shape) + index_t = paddle.static.data( + name="index", dtype=index.dtype, shape=index.shape + ) + updates_t = paddle.static.data( + name="updates", dtype=updates.dtype, shape=updates.shape + ) out_t = paddle.scatter(x_t, index_t, updates_t) feed = { x_t.name: x, index_t.name: index, - updates_t.name: updates + updates_t.name: updates, } fetch = [out_t] @@ -290,10 +301,10 @@ class TestScatterAPI(unittest.TestCase): np.testing.assert_array_equal(test_dygraph(), test_static_graph()) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestScatterOpFp16(OpTest): - def setUp(self): self.__class__.op_type = "scatter" self.python_api = paddle.scatter @@ -313,8 +324,9 @@ class TestScatterOpFp16(OpTest): self.ref_dx[self.index_np] = zero_np def compute_ref_grad_updates(self): - ref_grad_updates = paddle.gather(paddle.to_tensor(self.dout_np), - paddle.to_tensor(self.index_np)) + ref_grad_updates = paddle.gather( + paddle.to_tensor(self.dout_np), paddle.to_tensor(self.index_np) + ) return ref_grad_updates def test_scatter_fp16(self): @@ -323,21 +335,22 @@ class TestScatterOpFp16(OpTest): index_tensor = paddle.to_tensor(self.index_np) updates_tensor = paddle.to_tensor(self.updates_np, stop_gradient=False) out_tensor = paddle.scatter(x_tensor, index_tensor, updates_tensor) - paddle.autograd.backward([out_tensor], [paddle.to_tensor(self.dout_np)], - retain_graph=True) + paddle.autograd.backward( + [out_tensor], [paddle.to_tensor(self.dout_np)], retain_graph=True + ) ref_grad_updates = self.compute_ref_grad_updates() - np.testing.assert_allclose(ref_grad_updates.numpy(), - updates_tensor.grad.numpy(), - rtol=1e-5, - atol=1e-5) - np.testing.assert_allclose(self.ref_dx, - x_tensor.grad.numpy(), - rtol=1e-5, - atol=1e-5) + np.testing.assert_allclose( + ref_grad_updates.numpy(), + updates_tensor.grad.numpy(), + rtol=1e-5, + atol=1e-5, + ) + np.testing.assert_allclose( + self.ref_dx, x_tensor.grad.numpy(), rtol=1e-5, atol=1e-5 + ) class TestScatterInplaceAPI(TestScatterAPI): - def executed_api(self): self.scatter = paddle.scatter_ diff --git a/python/paddle/fluid/tests/unittests/test_scope.py b/python/paddle/fluid/tests/unittests/test_scope.py index ddd26ad1f3b9c7a6dbdb603ded09c3655180ea49..6eaab539fc3333247ba8beed4f517ae2d96a99ff 100644 --- a/python/paddle/fluid/tests/unittests/test_scope.py +++ b/python/paddle/fluid/tests/unittests/test_scope.py @@ -17,7 +17,6 @@ import unittest class TestScope(unittest.TestCase): - def test_create_destroy(self): paddle_c = paddle.fluid.core scope = paddle_c.Scope() @@ -53,7 +52,8 @@ class TestScope(unittest.TestCase): # Delete the scope. scope._remove_from_pool() with self.assertRaisesRegexp( - Exception, "Deleting a nonexistent scope is not allowed*"): + Exception, "Deleting a nonexistent scope is not allowed*" + ): # It is not allowed to delete a nonexistent scope. scope._remove_from_pool() diff --git a/python/paddle/fluid/tests/unittests/test_searchsorted_op.py b/python/paddle/fluid/tests/unittests/test_searchsorted_op.py index e63107026e680933801aa8fbf9b465b8a347a0fc..d185fbaa110d0faac6e0aac70b65a3e3a6a13e06 100644 --- a/python/paddle/fluid/tests/unittests/test_searchsorted_op.py +++ b/python/paddle/fluid/tests/unittests/test_searchsorted_op.py @@ -23,7 +23,6 @@ from op_test import OpTest class TestSearchSorted(OpTest): - def setUp(self): self.python_api = paddle.searchsorted self.op_type = "searchsorted" @@ -31,13 +30,14 @@ class TestSearchSorted(OpTest): self.inputs = { 'SortedSequence': self.sorted_sequence, - 'Values': self.values + 'Values': self.values, } self.attrs = {"out_int32": False, "right": False} self.attrs["right"] = True if self.side == 'right' else False self.outputs = { - 'Out': - np.searchsorted(self.sorted_sequence, self.values, side=self.side) + 'Out': np.searchsorted( + self.sorted_sequence, self.values, side=self.side + ) } def test_check_output(self): @@ -50,7 +50,6 @@ class TestSearchSorted(OpTest): class TestSearchSortedOp1(TestSearchSorted): - def init_test_case(self): self.sorted_sequence = np.array([1, 3, 5, 7, 9]).astype("int32") self.values = np.array([[3, 6, 9], [3, 6, 9]]).astype("int32") @@ -58,7 +57,6 @@ class TestSearchSortedOp1(TestSearchSorted): class TestSearchSortedOp2(TestSearchSorted): - def init_test_case(self): self.sorted_sequence = np.array([1, 3, 5, 7, 9]).astype("int64") self.values = np.array([[3, 6, 9], [3, 6, 9]]).astype("int64") @@ -66,34 +64,33 @@ class TestSearchSortedOp2(TestSearchSorted): class TestSearchSortedOp3(TestSearchSorted): - def init_test_case(self): self.sorted_sequence = np.array([1, 3, 5, 7, 9]).astype("float64") - self.values = np.array([[np.nan, np.nan, np.nan], - [3, 6, 9]]).astype("float64") + self.values = np.array([[np.nan, np.nan, np.nan], [3, 6, 9]]).astype( + "float64" + ) self.side = "left" class TestSearchSortedOp4(TestSearchSorted): - def init_test_case(self): self.sorted_sequence = np.array([1, 3, 5, 7, 9]).astype("float64") - self.values = np.array([[np.inf, np.inf, np.inf], - [3, 6, 9]]).astype("float64") + self.values = np.array([[np.inf, np.inf, np.inf], [3, 6, 9]]).astype( + "float64" + ) self.side = "right" class TestSearchSortedOp5(TestSearchSorted): - def init_test_case(self): self.sorted_sequence = np.array([1, 3, 5, 7, 9]).astype("float64") - self.values = np.array([[np.inf, np.inf, np.inf], - [np.nan, np.nan, np.nan]]).astype("float64") + self.values = np.array( + [[np.inf, np.inf, np.inf], [np.nan, np.nan, np.nan]] + ).astype("float64") self.side = "right" class TestSearchSortedAPI(unittest.TestCase): - def init_test_case(self): self.sorted_sequence = np.array([2, 4, 6, 8, 10]).astype("float64") self.values = np.array([[3, 6, 9], [3, 6, 9]]).astype("float64") @@ -112,17 +109,20 @@ class TestSearchSortedAPI(unittest.TestCase): sorted_sequence = paddle.static.data( 'SortedSequence', shape=self.sorted_sequence.shape, - dtype="float64") - values = paddle.static.data('Values', - shape=self.values.shape, - dtype="float64") + dtype="float64", + ) + values = paddle.static.data( + 'Values', shape=self.values.shape, dtype="float64" + ) out = paddle.searchsorted(sorted_sequence, values) exe = paddle.static.Executor(place) - res, = exe.run(feed={ - 'SortedSequence': self.sorted_sequence, - 'Values': self.values - }, - fetch_list=out) + (res,) = exe.run( + feed={ + 'SortedSequence': self.sorted_sequence, + 'Values': self.values, + }, + fetch_list=out, + ) out_ref = np.searchsorted(self.sorted_sequence, self.values) np.testing.assert_allclose(out_ref, res, rtol=1e-05) @@ -130,16 +130,15 @@ class TestSearchSortedAPI(unittest.TestCase): run(place) def test_dygraph_api(self): - def run(place): paddle.disable_static(place) sorted_sequence = paddle.to_tensor(self.sorted_sequence) values = paddle.to_tensor(self.values) out = paddle.searchsorted(sorted_sequence, values, right=True) - out_ref = np.searchsorted(self.sorted_sequence, - self.values, - side='right') + out_ref = np.searchsorted( + self.sorted_sequence, self.values, side='right' + ) np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05) paddle.enable_static() @@ -155,60 +154,61 @@ class TestSearchSortedAPI(unittest.TestCase): class TestSearchSortedError(unittest.TestCase): - def test_error_api(self): paddle.enable_static() def test_searchsorted_dims_matched_before_lastdim_error1(): with paddle.static.program_guard(paddle.static.Program()): - sorted_sequence = paddle.static.data('SortedSequence', - shape=[2, 2, 3], - dtype="float64") - values = paddle.static.data('Values', - shape=[2, 5], - dtype="float64") + sorted_sequence = paddle.static.data( + 'SortedSequence', shape=[2, 2, 3], dtype="float64" + ) + values = paddle.static.data( + 'Values', shape=[2, 5], dtype="float64" + ) out = paddle.searchsorted(sorted_sequence, values) - self.assertRaises(RuntimeError, - test_searchsorted_dims_matched_before_lastdim_error1) + self.assertRaises( + RuntimeError, test_searchsorted_dims_matched_before_lastdim_error1 + ) def test_searchsorted_dims_matched_before_lastdim_error2(): with paddle.static.program_guard(paddle.static.Program()): - sorted_sequence = paddle.static.data('SortedSequence', - shape=[2, 2, 3], - dtype="float64") - values = paddle.static.data('Values', - shape=[2, 3, 5], - dtype="float64") + sorted_sequence = paddle.static.data( + 'SortedSequence', shape=[2, 2, 3], dtype="float64" + ) + values = paddle.static.data( + 'Values', shape=[2, 3, 5], dtype="float64" + ) out = paddle.searchsorted(sorted_sequence, values) - self.assertRaises(RuntimeError, - test_searchsorted_dims_matched_before_lastdim_error2) + self.assertRaises( + RuntimeError, test_searchsorted_dims_matched_before_lastdim_error2 + ) def test_searchsorted_sortedsequence_size_error(): with paddle.static.program_guard(paddle.static.Program()): - sorted_sequence = paddle.static.data('SortedSequence', - shape=[2, 2, - pow(2, 34)], - dtype="float64") - values = paddle.static.data('Values', - shape=[2, 2, 5], - dtype="float64") - out = paddle.searchsorted(sorted_sequence, - values, - out_int32=True) - - self.assertRaises(RuntimeError, - test_searchsorted_sortedsequence_size_error) + sorted_sequence = paddle.static.data( + 'SortedSequence', shape=[2, 2, pow(2, 34)], dtype="float64" + ) + values = paddle.static.data( + 'Values', shape=[2, 2, 5], dtype="float64" + ) + out = paddle.searchsorted( + sorted_sequence, values, out_int32=True + ) + + self.assertRaises( + RuntimeError, test_searchsorted_sortedsequence_size_error + ) def test_sortedsequence_values_type_error(): with paddle.static.program_guard(paddle.static.Program()): - sorted_sequence = paddle.static.data('SortedSequence', - shape=[2, 3], - dtype="int16") - values = paddle.static.data('Values', - shape=[2, 5], - dtype="int16") + sorted_sequence = paddle.static.data( + 'SortedSequence', shape=[2, 3], dtype="int16" + ) + values = paddle.static.data( + 'Values', shape=[2, 5], dtype="int16" + ) out = paddle.searchsorted(sorted_sequence, values) self.assertRaises(TypeError, test_sortedsequence_values_type_error) diff --git a/python/paddle/fluid/tests/unittests/test_seed_op.py b/python/paddle/fluid/tests/unittests/test_seed_op.py index 8a1933fbe6e861f8f9d5b598233ee7e7a2669269..14aa2c4f4dd75d81d6ee36cc20881609427e65da 100644 --- a/python/paddle/fluid/tests/unittests/test_seed_op.py +++ b/python/paddle/fluid/tests/unittests/test_seed_op.py @@ -22,7 +22,6 @@ paddle.enable_static() class TestSeedOpFixSeed(OpTest): - def setUp(self): self.op_type = "seed" self.inputs = {} @@ -34,7 +33,6 @@ class TestSeedOpFixSeed(OpTest): class TestSeedOpDiffSeed(OpTest): - def setUp(self): self.op_type = "seed" self.inputs = {} @@ -46,7 +44,6 @@ class TestSeedOpDiffSeed(OpTest): class TestDropoutWithRandomSeedGenerator(unittest.TestCase): - def setUp(self): paddle.framework.random.set_random_seed_generator('seed0', 123) paddle.framework.random.set_random_seed_generator('seed1', 123) @@ -58,14 +55,16 @@ class TestDropoutWithRandomSeedGenerator(unittest.TestCase): def check_static_result(self, place): import paddle.distributed.fleet.meta_parallel.parallel_layers.random as random + with static.program_guard(static.Program(), static.Program()): res1 = random.determinate_seed('seed0') exe = static.Executor(place) res_list = [res1] for i in range(2): - out1, = exe.run(static.default_main_program(), - fetch_list=res_list) + (out1,) = exe.run( + static.default_main_program(), fetch_list=res_list + ) self.assertEqual(out1, np.cast['int32'](self.rng1.random())) def test_static(self): diff --git a/python/paddle/fluid/tests/unittests/test_segment_ops.py b/python/paddle/fluid/tests/unittests/test_segment_ops.py index 71672723e82e9c7b146e981450a1465b3442145e..1c6fe88ca77a1707bf96fce87cc20059c6e7ffe8 100644 --- a/python/paddle/fluid/tests/unittests/test_segment_ops.py +++ b/python/paddle/fluid/tests/unittests/test_segment_ops.py @@ -83,7 +83,6 @@ def segment_pool_split(X, SegmentIds, pooltype): class TestSegmentOps(OpTest): - def set_data(self): x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) segment_ids = self.set_segment(len(x), len(x) // 5 + 1) @@ -112,7 +111,7 @@ class TestSegmentOps(OpTest): result = self.compute(x, segment_ids) self.inputs = { 'X': x.astype(self.dtype), - 'SegmentIds': segment_ids.astype(np.int64) + 'SegmentIds': segment_ids.astype(np.int64), } self.outputs = {'Out': result.astype(self.dtype)} @@ -124,7 +123,6 @@ class TestSegmentOps(OpTest): class TestSegmentSum2(TestSegmentOps): - def prepare(self): super(TestSegmentSum2, self).prepare() self.shape = [40, 20] @@ -136,13 +134,12 @@ class TestSegmentSum2(TestSegmentOps): result = self.compute(x, segment_ids) self.inputs = { 'X': x.astype(self.dtype), - 'SegmentIds': segment_ids.astype(np.int32) + 'SegmentIds': segment_ids.astype(np.int32), } self.outputs = {'Out': result.astype(self.dtype)} class TestSegmentMax(TestSegmentOps): - def compute(self, x, segment_ids): return compute_segment_min_max(x, segment_ids, pooltype="MAX") @@ -157,7 +154,7 @@ class TestSegmentMax(TestSegmentOps): result, self.gradient = self.compute(x, segment_ids) self.inputs = { 'X': x.astype(self.dtype), - 'SegmentIds': segment_ids.astype(np.int32) + 'SegmentIds': segment_ids.astype(np.int32), } self.outputs = {'Out': result.astype(self.dtype)} @@ -166,14 +163,12 @@ class TestSegmentMax(TestSegmentOps): class TestSegmentMax2(TestSegmentMax): - def prepare(self): super(TestSegmentMax2, self).prepare() self.dtype = np.float32 class TestSegmentMin(TestSegmentMax): - def compute(self, x, segment_ids): return compute_segment_min_max(x, segment_ids, pooltype="MIN") @@ -183,14 +178,12 @@ class TestSegmentMin(TestSegmentMax): class TestSegmentMin2(TestSegmentMin): - def prepare(self): super(TestSegmentMin2, self).prepare() self.dtype = np.float32 class TestSegmentMean(TestSegmentOps): - def compute(self, x, segment_ids): return compute_segment_mean(x, segment_ids) @@ -205,16 +198,14 @@ class TestSegmentMean(TestSegmentOps): result = self.compute(x, segment_ids) self.inputs = {'X': x, 'SegmentIds': segment_ids} self.outputs = { - 'Out': - result, - 'SummedIds': - compute_segment_sum( - np.ones([len(x), 1]).astype(self.dtype), segment_ids) + 'Out': result, + 'SummedIds': compute_segment_sum( + np.ones([len(x), 1]).astype(self.dtype), segment_ids + ), } class TestSegmentMean2(TestSegmentMean): - def prepare(self): super(TestSegmentMean2, self).prepare() self.dtype = np.float32 @@ -223,7 +214,6 @@ class TestSegmentMean2(TestSegmentMean): class API_SegmentOpsTest(unittest.TestCase): - def test_static(self): with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data(name="x", shape=[3, 3], dtype="float32") @@ -243,11 +233,10 @@ class API_SegmentOpsTest(unittest.TestCase): np_max = np.array([[3, 2, 3], [4, 5, 6]], dtype="float32") np_min = np.array([[1, 2, 1], [4, 5, 6]], dtype="float32") - ret = exe.run(feed={ - 'x': data1, - 'y': data2 - }, - fetch_list=[res_sum, res_mean, res_max, res_min]) + ret = exe.run( + feed={'x': data1, 'y': data2}, + fetch_list=[res_sum, res_mean, res_max, res_min], + ) for np_res, ret_res in zip([np_sum, np_mean, np_max, np_min], ret): np.testing.assert_allclose(np_res, ret_res, rtol=1e-05, atol=1e-06) @@ -255,8 +244,9 @@ class API_SegmentOpsTest(unittest.TestCase): def test_dygraph(self): device = paddle.CPUPlace() with paddle.fluid.dygraph.guard(device): - x = paddle.to_tensor([[1, 2, 3], [3, 2, 1], [4, 5, 6]], - dtype='float32') + x = paddle.to_tensor( + [[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float32' + ) y = paddle.to_tensor([0, 0, 1], dtype="int32") res_sum = paddle.incubate.segment_sum(x, y) res_mean = paddle.incubate.segment_mean(x, y) @@ -271,14 +261,12 @@ class API_SegmentOpsTest(unittest.TestCase): ret = [res_sum, res_mean, res_max, res_min] for np_res, ret_res in zip([np_sum, np_mean, np_max, np_min], ret): - np.testing.assert_allclose(np_res, - ret_res.numpy(), - rtol=1e-05, - atol=1e-06) + np.testing.assert_allclose( + np_res, ret_res.numpy(), rtol=1e-05, atol=1e-06 + ) class API_GeometricSegmentOpsTest(unittest.TestCase): - def test_static(self): with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data(name="x", shape=[3, 3], dtype="float32") @@ -298,11 +286,10 @@ class API_GeometricSegmentOpsTest(unittest.TestCase): np_max = np.array([[3, 2, 3], [4, 5, 6]], dtype="float32") np_min = np.array([[1, 2, 1], [4, 5, 6]], dtype="float32") - ret = exe.run(feed={ - 'x': data1, - 'y': data2 - }, - fetch_list=[res_sum, res_mean, res_max, res_min]) + ret = exe.run( + feed={'x': data1, 'y': data2}, + fetch_list=[res_sum, res_mean, res_max, res_min], + ) for np_res, ret_res in zip([np_sum, np_mean, np_max, np_min], ret): np.testing.assert_allclose(np_res, ret_res, rtol=1e-05, atol=1e-06) @@ -310,8 +297,9 @@ class API_GeometricSegmentOpsTest(unittest.TestCase): def test_dygraph(self): device = paddle.CPUPlace() with paddle.fluid.dygraph.guard(device): - x = paddle.to_tensor([[1, 2, 3], [3, 2, 1], [4, 5, 6]], - dtype='float32') + x = paddle.to_tensor( + [[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float32' + ) y = paddle.to_tensor([0, 0, 1], dtype="int32") res_sum = paddle.geometric.segment_sum(x, y) res_mean = paddle.geometric.segment_mean(x, y) @@ -326,16 +314,16 @@ class API_GeometricSegmentOpsTest(unittest.TestCase): ret = [res_sum, res_mean, res_max, res_min] for np_res, ret_res in zip([np_sum, np_mean, np_max, np_min], ret): - np.testing.assert_allclose(np_res, - ret_res.numpy(), - rtol=1e-05, - atol=1e-06) + np.testing.assert_allclose( + np_res, ret_res.numpy(), rtol=1e-05, atol=1e-06 + ) def test_dygraph_cpu_float16(self): device = paddle.CPUPlace() with paddle.fluid.dygraph.guard(device): - x = paddle.to_tensor([[1, 2, 3], [3, 2, 1], [4, 5, 6]], - dtype='float16') + x = paddle.to_tensor( + [[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float16' + ) y = paddle.to_tensor([0, 0, 1], dtype="int32") res_sum = paddle.geometric.segment_sum(x, y) res_mean = paddle.geometric.segment_mean(x, y) @@ -349,17 +337,17 @@ class API_GeometricSegmentOpsTest(unittest.TestCase): ret = [res_sum, res_mean, res_max, res_min] for np_res, ret_res in zip([np_sum, np_mean, np_max, np_min], ret): - np.testing.assert_allclose(np_res, - ret_res.numpy(), - rtol=1e-05, - atol=1e-06) + np.testing.assert_allclose( + np_res, ret_res.numpy(), rtol=1e-05, atol=1e-06 + ) def test_dygraph_cuda_float16(self): if core.is_compiled_with_cuda(): device = paddle.CUDAPlace(0) with paddle.fluid.dygraph.guard(device): - x = paddle.to_tensor([[1, 2, 3], [3, 2, 1], [4, 5, 6]], - dtype='float16') + x = paddle.to_tensor( + [[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float16' + ) y = paddle.to_tensor([0, 0, 1], dtype="int32") res_sum = paddle.geometric.segment_sum(x, y) res_mean = paddle.geometric.segment_mean(x, y) @@ -374,10 +362,9 @@ class API_GeometricSegmentOpsTest(unittest.TestCase): ret = [res_sum, res_mean, res_max, res_min] for np_res, ret_res in zip([np_sum, np_mean, np_max, np_min], ret): - np.testing.assert_allclose(np_res, - ret_res.numpy(), - rtol=1e-05, - atol=1e-06) + np.testing.assert_allclose( + np_res, ret_res.numpy(), rtol=1e-05, atol=1e-06 + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_select_input_output_op.py b/python/paddle/fluid/tests/unittests/test_select_input_output_op.py index 119bcc0ee5a3e981d616c4bcd1601606e288dacc..eeab0ca874d9651f9ede33ad754a9ac9bf2732b5 100644 --- a/python/paddle/fluid/tests/unittests/test_select_input_output_op.py +++ b/python/paddle/fluid/tests/unittests/test_select_input_output_op.py @@ -27,7 +27,6 @@ paddle.enable_static() class TestSplitMergeSelectedVarOps(unittest.TestCase): - def test_forward_backward_list_output(self): for branch_num in range(2, 10): program = Program() @@ -41,7 +40,8 @@ class TestSplitMergeSelectedVarOps(unittest.TestCase): out = program.current_block().create_var( dtype='float32', shape=[2], - type=core.VarDesc.VarType.LOD_TENSOR) + type=core.VarDesc.VarType.LOD_TENSOR, + ) outputs.append(out) select_output(x, outputs, mask) @@ -49,30 +49,31 @@ class TestSplitMergeSelectedVarOps(unittest.TestCase): mean = paddle.mean(y) append_backward(mean) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = Executor(place) feed_x = np.asarray([1.3, -1.4]).astype(np.float32) for i in range(branch_num): feed_mask = np.asarray([i]).astype(np.int32) - ret = exe.run(program, - feed={ - 'x': feed_x, - 'mask': feed_mask - }, - fetch_list=[y.name, x.grad_name]) + ret = exe.run( + program, + feed={'x': feed_x, 'mask': feed_mask}, + fetch_list=[y.name, x.grad_name], + ) x_grad = np.asarray([0.5, 0.5]).astype(np.float32) - np.testing.assert_allclose(np.asarray(ret[0]), - feed_x, - rtol=1e-05) - np.testing.assert_allclose(np.asarray(ret[1]), - x_grad, - rtol=1e-05) + np.testing.assert_allclose( + np.asarray(ret[0]), feed_x, rtol=1e-05 + ) + np.testing.assert_allclose( + np.asarray(ret[1]), x_grad, rtol=1e-05 + ) class TestSelectInputOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): mask = layers.data(name='mask', shape=[1], dtype='int32') @@ -99,17 +100,16 @@ class TestSelectInputOpError(unittest.TestCase): class TestSelectOutput_Error(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): in1 = layers.data(name='in1', shape=[1], dtype='int32') - mask_int32 = layers.data(name='mask_int32', - shape=[1], - dtype='int32') - mask_float32 = layers.data(name='mask_float32', - shape=[1], - dtype='float32') + mask_int32 = layers.data( + name='mask_int32', shape=[1], dtype='int32' + ) + mask_float32 = layers.data( + name='mask_float32', shape=[1], dtype='float32' + ) out1 = layers.data(name='out1', shape=[1], dtype='int32') # 1. The type of input in select_output must Variable. diff --git a/python/paddle/fluid/tests/unittests/test_selected_rows.py b/python/paddle/fluid/tests/unittests/test_selected_rows.py index 4eac57f834bcf2afceca2d62aa891d705a1bfa6e..a66dc40dfb1f713e9a322ae3f1b3de705fbe8056 100644 --- a/python/paddle/fluid/tests/unittests/test_selected_rows.py +++ b/python/paddle/fluid/tests/unittests/test_selected_rows.py @@ -18,7 +18,6 @@ import numpy as np class TestSelectedRows(unittest.TestCase): - def test_selected_rows(self): place = core.CPUPlace() height = 10 @@ -40,13 +39,16 @@ class TestSelectedRows(unittest.TestCase): self.assertEqual(10, selected_rows.height()) # compare tensor - self.assertAlmostEqual(2.0, - selected_rows.get_tensor()._get_float_element(0)) - self.assertAlmostEqual(1.0, - selected_rows.get_tensor()._get_float_element(1)) + self.assertAlmostEqual( + 2.0, selected_rows.get_tensor()._get_float_element(0) + ) + self.assertAlmostEqual( + 1.0, selected_rows.get_tensor()._get_float_element(1) + ) self.assertAlmostEqual( 4.0, - selected_rows.get_tensor()._get_float_element(2 * row_numel + 8)) + selected_rows.get_tensor()._get_float_element(2 * row_numel + 8), + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_selu_op.py b/python/paddle/fluid/tests/unittests/test_selu_op.py index 5de31d26b2b08e5682f781afad43697d0111e925..1390ddb5c960645f368ec191cd21108437948617 100644 --- a/python/paddle/fluid/tests/unittests/test_selu_op.py +++ b/python/paddle/fluid/tests/unittests/test_selu_op.py @@ -21,9 +21,11 @@ import paddle.fluid as fluid import paddle.nn.functional as F -def ref_selu(x, - scale=1.0507009873554804934193349852946, - alpha=1.6732632423543772848170429916717): +def ref_selu( + x, + scale=1.0507009873554804934193349852946, + alpha=1.6732632423543772848170429916717, +): out = np.copy(x) out_flat = out.flatten() for i in range(out_flat.size): @@ -35,7 +37,6 @@ def ref_selu(x, class SeluTest(OpTest): - def setUp(self): self.op_type = "selu" self.python_api = paddle.nn.functional.selu @@ -85,8 +86,11 @@ class TestSeluAPI(unittest.TestCase): # Since zero point in selu is not differentiable, avoid randomize # zero. self.x_np[np.abs(self.x_np) < 0.005] = 0.02 - self.place=paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_static_api(self): with paddle.static.program_guard(paddle.static.Program()): @@ -125,21 +129,21 @@ class TestSeluAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.selu, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data(name='x_int32', - shape=[12, 10], - dtype='int32') + x_int32 = paddle.fluid.data( + name='x_int32', shape=[12, 10], dtype='int32' + ) self.assertRaises(TypeError, F.selu, x_int32) # The scale must be greater than 1.0 - x_fp32 = paddle.fluid.data(name='x_fp32', - shape=[12, 10], - dtype='float32') + x_fp32 = paddle.fluid.data( + name='x_fp32', shape=[12, 10], dtype='float32' + ) self.assertRaises(ValueError, F.selu, x_fp32, -1.0) # The alpha must be no less than 0 self.assertRaises(ValueError, F.selu, x_fp32, 1.6, -1.0) # support the input dtype is float16 - x_fp16 = paddle.fluid.data(name='x_fp16', - shape=[12, 10], - dtype='float16') + x_fp16 = paddle.fluid.data( + name='x_fp16', shape=[12, 10], dtype='float16' + ) F.selu(x_fp16) diff --git a/python/paddle/fluid/tests/unittests/test_set_bool_attr.py b/python/paddle/fluid/tests/unittests/test_set_bool_attr.py index a089563b6ecd04cb3b3c1d054d636438c0848b36..2c233141d095eadafb550a962ec594d1bf2e8c91 100644 --- a/python/paddle/fluid/tests/unittests/test_set_bool_attr.py +++ b/python/paddle/fluid/tests/unittests/test_set_bool_attr.py @@ -17,18 +17,19 @@ import unittest class TestAttrSet(unittest.TestCase): - def test_set_bool_attr(self): x = fluid.layers.data(name='x', shape=[3, 7, 3, 7], dtype='float32') param_attr = fluid.ParamAttr( name='batch_norm_w', - initializer=fluid.initializer.Constant(value=1.0)) + initializer=fluid.initializer.Constant(value=1.0), + ) bias_attr = fluid.ParamAttr( name='batch_norm_b', - initializer=fluid.initializer.Constant(value=0.0)) - bn = fluid.layers.batch_norm(input=x, - param_attr=param_attr, - bias_attr=bias_attr) + initializer=fluid.initializer.Constant(value=0.0), + ) + bn = fluid.layers.batch_norm( + input=x, param_attr=param_attr, bias_attr=bias_attr + ) block = fluid.default_main_program().desc.block(0) op = block.op(0) before_type = op.attr_type('is_test') diff --git a/python/paddle/fluid/tests/unittests/test_set_value_op.py b/python/paddle/fluid/tests/unittests/test_set_value_op.py index 7f6b9831aaee6882c0ce7279f8b3cc2bba3c6b6e..92dc8b8fadcb7745f82e74653516a4afaa54cfd3 100644 --- a/python/paddle/fluid/tests/unittests/test_set_value_op.py +++ b/python/paddle/fluid/tests/unittests/test_set_value_op.py @@ -23,8 +23,8 @@ from paddle.fluid.layer_helper import LayerHelper from functools import reduce from paddle.fluid.framework import _test_eager_guard -class TestSetValueBase(unittest.TestCase): +class TestSetValueBase(unittest.TestCase): def setUp(self): paddle.enable_static() self.set_dtype() @@ -50,7 +50,6 @@ class TestSetValueBase(unittest.TestCase): class TestSetValueApi(TestSetValueBase): - def _run_static(self): paddle.enable_static() with paddle.static.program_guard(self.program): @@ -75,11 +74,17 @@ class TestSetValueApi(TestSetValueBase): dynamic_out = self._run_dynamic() self._get_answer() - error_msg = "\nIn {} mode: \nExpected res = \n{}, \n\nbut received : \n{}" - self.assertTrue((self.data == static_out).all(), - msg=error_msg.format("static", self.data, static_out)) - self.assertTrue((self.data == dynamic_out).all(), - msg=error_msg.format("dynamic", self.data, dynamic_out)) + error_msg = ( + "\nIn {} mode: \nExpected res = \n{}, \n\nbut received : \n{}" + ) + self.assertTrue( + (self.data == static_out).all(), + msg=error_msg.format("static", self.data, static_out), + ) + self.assertTrue( + (self.data == dynamic_out).all(), + msg=error_msg.format("dynamic", self.data, dynamic_out), + ) def test_api(self): with _test_eager_guard(): @@ -90,7 +95,6 @@ class TestSetValueApi(TestSetValueBase): # 1. Test different type of item: int, Python slice, Paddle Tensor # 1.1 item is int class TestSetValueItemInt(TestSetValueApi): - def _call_setitem(self, x): x[0] = self.value @@ -101,7 +105,6 @@ class TestSetValueItemInt(TestSetValueApi): # 1.2 item is slice # 1.2.1 step is 1 class TestSetValueItemSlice(TestSetValueApi): - def _call_setitem(self, x): x[0:2] = self.value @@ -110,7 +113,6 @@ class TestSetValueItemSlice(TestSetValueApi): class TestSetValueItemSlice2(TestSetValueApi): - def _call_setitem(self, x): x[0:-1] = self.value @@ -119,7 +121,6 @@ class TestSetValueItemSlice2(TestSetValueApi): class TestSetValueItemSlice3(TestSetValueApi): - def _call_setitem(self, x): x[0:-1, 0:2] = self.value @@ -128,7 +129,6 @@ class TestSetValueItemSlice3(TestSetValueApi): class TestSetValueItemSlice4(TestSetValueApi): - def _call_setitem(self, x): x[0:, 1:2, :] = self.value @@ -137,7 +137,6 @@ class TestSetValueItemSlice4(TestSetValueApi): class TestSetValueItemSlice5(TestSetValueApi): - def _call_setitem(self, x): x[0:, 1:1, :] = self.value @@ -146,9 +145,7 @@ class TestSetValueItemSlice5(TestSetValueApi): class TestSetValueItemSliceInWhile(TestSetValueApi): - def _call_setitem(self, x): - def cond(i, x): return i < 1 @@ -157,7 +154,7 @@ class TestSetValueItemSliceInWhile(TestSetValueApi): i = i + 1 return i, x - i = paddle.zeros(shape=(1, ), dtype='int32') + i = paddle.zeros(shape=(1,), dtype='int32') i, x = paddle.fluid.layers.while_loop(cond, body, [i, x]) def _get_answer(self): @@ -166,7 +163,6 @@ class TestSetValueItemSliceInWhile(TestSetValueApi): # 1.2.2 step > 1 class TestSetValueItemSliceStep(TestSetValueApi): - def set_shape(self): self.shape = [5, 5, 5] @@ -178,7 +174,6 @@ class TestSetValueItemSliceStep(TestSetValueApi): class TestSetValueItemSliceStep2(TestSetValueApi): - def set_shape(self): self.shape = [7, 5, 5] @@ -190,7 +185,6 @@ class TestSetValueItemSliceStep2(TestSetValueApi): class TestSetValueItemSliceStep3(TestSetValueApi): - def _call_setitem(self, x): x[0:-1, 0:2, ::2] = self.value @@ -199,7 +193,6 @@ class TestSetValueItemSliceStep3(TestSetValueApi): class TestSetValueItemSliceStep4(TestSetValueApi): - def _call_setitem(self, x): x[0:, 1:2:2, :] = self.value @@ -209,7 +202,6 @@ class TestSetValueItemSliceStep4(TestSetValueApi): # 1.2.3 step < 0 class TestSetValueItemSliceNegetiveStep(TestSetValueApi): - def set_shape(self): self.shape = [5, 2] @@ -224,7 +216,6 @@ class TestSetValueItemSliceNegetiveStep(TestSetValueApi): class TestSetValueItemSliceNegetiveStep2(TestSetValueApi): - def set_shape(self): self.shape = [5] @@ -239,7 +230,6 @@ class TestSetValueItemSliceNegetiveStep2(TestSetValueApi): class TestSetValueItemSliceNegetiveStep3(TestSetValueApi): - def set_shape(self): self.shape = [3] @@ -254,7 +244,6 @@ class TestSetValueItemSliceNegetiveStep3(TestSetValueApi): class TestSetValueItemSliceNegetiveStep4(TestSetValueApi): - def set_shape(self): self.shape = [3, 4, 5] @@ -269,7 +258,6 @@ class TestSetValueItemSliceNegetiveStep4(TestSetValueApi): class TestSetValueItemEllipsis1(TestSetValueApi): - def _call_setitem(self, x): x[0:, ..., 1:] = self.value @@ -278,7 +266,6 @@ class TestSetValueItemEllipsis1(TestSetValueApi): class TestSetValueItemEllipsis2(TestSetValueApi): - def _call_setitem(self, x): x[0:, ...] = self.value @@ -287,7 +274,6 @@ class TestSetValueItemEllipsis2(TestSetValueApi): class TestSetValueItemEllipsis3(TestSetValueApi): - def _call_setitem(self, x): x[..., 1:] = self.value @@ -296,7 +282,6 @@ class TestSetValueItemEllipsis3(TestSetValueApi): class TestSetValueItemEllipsis4(TestSetValueApi): - def _call_setitem(self, x): x[...] = self.value @@ -306,7 +291,6 @@ class TestSetValueItemEllipsis4(TestSetValueApi): # 1.4 item is Paddle Tensor class TestSetValueItemTensor(TestSetValueApi): - def _call_setitem(self, x): zero = paddle.full([1], 0, dtype="int32") x[zero] = self.value @@ -316,7 +300,6 @@ class TestSetValueItemTensor(TestSetValueApi): class TestSetValueItemTensor2(TestSetValueApi): - def _call_setitem(self, x): zero = paddle.full([1], 0, dtype="int32") two = paddle.full([1], 2, dtype="int64") @@ -327,7 +310,6 @@ class TestSetValueItemTensor2(TestSetValueApi): class TestSetValueItemTensor3(TestSetValueApi): - def _call_setitem(self, x): zero = paddle.full([1], 0, dtype="int32") two = paddle.full([1], 2, dtype="int64") @@ -338,7 +320,6 @@ class TestSetValueItemTensor3(TestSetValueApi): class TestSetValueItemTensor4(TestSetValueApi): - def _call_setitem(self, x): zero = paddle.full([1], 0, dtype="int32") two = paddle.full([1], 2, dtype="int64") @@ -349,7 +330,6 @@ class TestSetValueItemTensor4(TestSetValueApi): class TestSetValueItemTensor5(TestSetValueApi): - def _call_setitem(self, x): zero = paddle.full([1], 0, dtype="int32") two = paddle.full([1], 2, dtype="int64") @@ -360,7 +340,6 @@ class TestSetValueItemTensor5(TestSetValueApi): class TestSetValueItemTensor6(TestSetValueApi): - def set_shape(self): self.shape = [3, 4, 5] @@ -375,7 +354,6 @@ class TestSetValueItemTensor6(TestSetValueApi): # 1.5 item is None class TestSetValueItemNone1(TestSetValueApi): - def _call_setitem(self, x): x[None] = self.value @@ -384,7 +362,6 @@ class TestSetValueItemNone1(TestSetValueApi): class TestSetValueItemNone2(TestSetValueApi): - def _call_setitem(self, x): x[0, None, 1] = self.value @@ -393,7 +370,6 @@ class TestSetValueItemNone2(TestSetValueApi): class TestSetValueItemNone3(TestSetValueApi): - def _call_setitem(self, x): x[:, None, None, 1] = self.value @@ -402,7 +378,6 @@ class TestSetValueItemNone3(TestSetValueApi): class TestSetValueItemNone4(TestSetValueApi): - def _call_setitem(self, x): x[0, 0, None, 1] = self.value @@ -411,7 +386,6 @@ class TestSetValueItemNone4(TestSetValueApi): class TestSetValueItemNone5(TestSetValueApi): - def _call_setitem(self, x): x[0, None, 0, None, 1] = self.value @@ -420,7 +394,6 @@ class TestSetValueItemNone5(TestSetValueApi): class TestSetValueItemNone6(TestSetValueApi): - def _call_setitem(self, x): x[None, 0, 0, None, 0] = self.value @@ -429,7 +402,6 @@ class TestSetValueItemNone6(TestSetValueApi): class TestSetValueItemNone7(TestSetValueApi): - def _call_setitem(self, x): x[:, None, 1] = np.zeros(self.shape)[:, None, 0] @@ -438,7 +410,6 @@ class TestSetValueItemNone7(TestSetValueApi): class TestSetValueItemNone8(TestSetValueApi): - def _call_setitem(self, x): x[:, 1, None] = np.zeros(self.shape)[:, 0, None] @@ -447,7 +418,6 @@ class TestSetValueItemNone8(TestSetValueApi): class TestSetValueItemNone9(TestSetValueApi): - def _call_setitem(self, x): x[None, :, 1, ..., None] = np.zeros(self.shape)[0, 0, :, None] @@ -456,7 +426,6 @@ class TestSetValueItemNone9(TestSetValueApi): class TestSetValueItemNone10(TestSetValueApi): - def _call_setitem(self, x): x[..., None, :, None] = np.zeros(self.shape)[..., None, :, None] @@ -466,7 +435,6 @@ class TestSetValueItemNone10(TestSetValueApi): # 1.5 item is list or Tensor of bol class TestSetValueItemBool1(TestSetValueApi): - def _call_setitem(self, x): x[[True, False]] = self.value @@ -475,7 +443,6 @@ class TestSetValueItemBool1(TestSetValueApi): class TestSetValueItemBool2(TestSetValueApi): - def _call_setitem(self, x): x[[False, False]] = self.value @@ -484,7 +451,6 @@ class TestSetValueItemBool2(TestSetValueApi): class TestSetValueItemBool3(TestSetValueApi): - def _call_setitem(self, x): x[[False, True]] = np.zeros(self.shape[2]) @@ -493,7 +459,6 @@ class TestSetValueItemBool3(TestSetValueApi): class TestSetValueItemBool4(TestSetValueApi): - def _call_setitem(self, x): idx = paddle.assign(np.array([False, True])) x[idx] = np.zeros(self.shape[2]) @@ -503,19 +468,19 @@ class TestSetValueItemBool4(TestSetValueApi): class TestSetValueItemBool5(TestSetValueApi): - def _call_setitem(self, x): idx = paddle.assign( - np.array([[False, True, False], [True, True, False]])) + np.array([[False, True, False], [True, True, False]]) + ) x[idx] = self.value def _get_answer(self): - self.data[np.array([[False, True, False], [True, True, - False]])] = self.value + self.data[ + np.array([[False, True, False], [True, True, False]]) + ] = self.value class TestSetValueItemBool6(TestSetValueApi): - def _call_setitem(self, x): x[0, ...] = 0 x[x > 0] = self.value @@ -530,9 +495,7 @@ class TestSetValueItemBool6(TestSetValueApi): def create_test_value_int32(parent): - class TestValueInt(parent): - def set_value(self): self.value = 7 @@ -552,9 +515,7 @@ create_test_value_int32(TestSetValueItemSlice4) def create_test_value_int64(parent): - class TestValueInt(parent): - def set_value(self): self.value = 7 @@ -574,9 +535,7 @@ create_test_value_int64(TestSetValueItemSlice4) def create_test_value_fp16(parent): - class TestValueInt(parent): - def set_value(self): self.value = 3.7 @@ -596,9 +555,7 @@ create_test_value_fp16(TestSetValueItemSlice4) def create_test_value_fp32(parent): - class TestValueInt(parent): - def set_value(self): self.value = 3.3 @@ -618,9 +575,7 @@ create_test_value_fp32(TestSetValueItemSlice4) def create_test_value_fp64(parent): - class TestValueInt(parent): - def set_value(self): self.value = 2.0**127 # float32:[-2^128, 2^128) @@ -640,9 +595,7 @@ create_test_value_fp64(TestSetValueItemSlice4) def create_test_value_bool(parent): - class TestValueInt(parent): - def set_value(self): self.value = 0 @@ -663,9 +616,7 @@ create_test_value_bool(TestSetValueItemSlice4) # 2.2 value is numpy.array (int32, int64, float32, float64, bool) def create_test_value_numpy_int32(parent): - class TestValueInt(parent): - def set_value(self): self.value = np.array([5]) @@ -685,9 +636,7 @@ create_test_value_numpy_int32(TestSetValueItemSlice4) def create_test_value_numpy_int64(parent): - class TestValueInt(parent): - def set_value(self): self.value = np.array([1]) @@ -707,9 +656,7 @@ create_test_value_numpy_int64(TestSetValueItemSlice4) def create_test_value_numpy_fp32(parent): - class TestValueInt(parent): - def set_value(self): self.value = np.array([1]) @@ -729,9 +676,7 @@ create_test_value_numpy_fp32(TestSetValueItemSlice4) def create_test_value_numpy_fp64(parent): - class TestValueInt(parent): - def set_value(self): self.value = np.array([2**127]).astype("float64") @@ -751,9 +696,7 @@ create_test_value_numpy_fp64(TestSetValueItemSlice4) def create_test_value_numpy_bool(parent): - class TestValueInt(parent): - def set_value(self): self.value = np.array([0]) @@ -774,9 +717,7 @@ create_test_value_numpy_bool(TestSetValueItemSlice4) # 2.3 value is a Paddle Tensor (int32, int64, float32, float64, bool) def create_test_value_tensor_int32(parent): - class TestValueInt(parent): - def set_dtype(self): self.dtype = "int32" @@ -800,9 +741,7 @@ create_test_value_tensor_int32(TestSetValueItemSlice4) def create_test_value_tensor_int64(parent): - class TestValueInt(parent): - def set_dtype(self): self.dtype = "int64" @@ -826,9 +765,7 @@ create_test_value_tensor_int64(TestSetValueItemSlice4) def create_test_value_tensor_fp32(parent): - class TestValueInt(parent): - def set_dtype(self): self.dtype = "float32" @@ -852,9 +789,7 @@ create_test_value_tensor_fp32(TestSetValueItemSlice4) def create_test_value_tensor_fp64(parent): - class TestValueInt(parent): - def set_dtype(self): self.dtype = "float64" @@ -878,9 +813,7 @@ create_test_value_tensor_fp64(TestSetValueItemSlice4) def create_test_value_tensor_bool(parent): - class TestValueInt(parent): - def set_dtype(self): self.dtype = "bool" @@ -905,7 +838,6 @@ create_test_value_tensor_bool(TestSetValueItemSlice4) # 3. Test different shape of value class TestSetValueValueShape1(TestSetValueApi): - def set_value(self): self.value = np.array([3, 4, 5, 6]) # shape is (4,) @@ -917,7 +849,6 @@ class TestSetValueValueShape1(TestSetValueApi): class TestSetValueValueShape2(TestSetValueApi): - def set_value(self): self.value = np.array([[3, 4, 5, 6]]) # shape is (1,4) @@ -929,10 +860,10 @@ class TestSetValueValueShape2(TestSetValueApi): class TestSetValueValueShape3(TestSetValueApi): - def set_value(self): - self.value = np.array([[1, 1, 1, 1], [2, 2, 2, 2], - [3, 3, 3, 3]]) # shape is (3,4) + self.value = np.array( + [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]] + ) # shape is (3,4) def _call_setitem(self, x): x[0] = self.value @@ -942,11 +873,12 @@ class TestSetValueValueShape3(TestSetValueApi): class TestSetValueValueShape4(TestSetValueApi): - def set_value(self): - self.value = np.array([[1, 1, 1, 1], [2, 2, 2, 2], - [3, 3, 3, - 3]]).astype(self.dtype) # shape is (3,4) + self.value = np.array( + [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]] + ).astype( + self.dtype + ) # shape is (3,4) def _call_setitem(self, x): x[0] = paddle.assign(self.value) # x is Paddle.Tensor @@ -956,7 +888,6 @@ class TestSetValueValueShape4(TestSetValueApi): class TestSetValueValueShape5(TestSetValueApi): - def set_value(self): self.value = np.array([3, 3, 3]).astype(self.dtype) @@ -972,11 +903,10 @@ class TestSetValueValueShape5(TestSetValueApi): # 4. Test error class TestError(TestSetValueBase): - def _value_type_error(self): with self.assertRaisesRegexp( - TypeError, - "Only support to assign an integer, float, numpy.ndarray or paddle.Tensor" + TypeError, + "Only support to assign an integer, float, numpy.ndarray or paddle.Tensor", ): x = paddle.ones(shape=self.shape, dtype=self.dtype) value = [1] @@ -984,8 +914,8 @@ class TestError(TestSetValueBase): def _dtype_error(self): with self.assertRaisesRegexp( - TypeError, - "When assign a numpy.ndarray, integer or float to a paddle.Tensor, " + TypeError, + "When assign a numpy.ndarray, integer or float to a paddle.Tensor, ", ): y = paddle.ones(shape=self.shape, dtype="float16") y[0] = 1 @@ -997,7 +927,8 @@ class TestError(TestSetValueBase): def _ellipsis_error(self): with self.assertRaisesRegexp( - IndexError, "An index can only have a single ellipsis"): + IndexError, "An index can only have a single ellipsis" + ): x = paddle.ones(shape=self.shape, dtype=self.dtype) x[..., ...] = self.value with self.assertRaisesRegexp(ValueError, "the start or end is None"): @@ -1044,7 +975,6 @@ class TestError(TestSetValueBase): class Model(paddle.nn.Layer): - def __init__(self): super(Model, self).__init__() self.conv = paddle.nn.Conv2D(12, 12, 3) @@ -1060,7 +990,6 @@ class Model(paddle.nn.Layer): class TestBackward(unittest.TestCase): - def test_static(self): paddle.enable_static() main_program = paddle.static.Program() @@ -1074,9 +1003,9 @@ class TestBackward(unittest.TestCase): x = paddle.static.data(name="x", shape=[4, 4], dtype='float32') y = paddle.static.data(name="y", shape=[4, 4], dtype='float32') - label = paddle.static.data(name="label", - shape=[4, 1], - dtype='int64') + label = paddle.static.data( + name="label", shape=[4, 1], dtype='int64' + ) z = paddle.add(x, y) var = y[0, :] @@ -1084,8 +1013,9 @@ class TestBackward(unittest.TestCase): prediction = paddle.static.nn.fc(x=z, size=2, activation='softmax') - cost = paddle.nn.functional.cross_entropy(input=prediction, - label=label) + cost = paddle.nn.functional.cross_entropy( + input=prediction, label=label + ) loss = paddle.mean(cost) sgd = paddle.optimizer.SGD(learning_rate=0.01) sgd.minimize(loss) @@ -1095,12 +1025,9 @@ class TestBackward(unittest.TestCase): var_grad, z_grad = exe.run( main_program, - feed={ - "x": x_np, - "y": y_np, - "label": label_np - }, - fetch_list=[var.name + "@GRAD", z.name + "@GRAD"]) + feed={"x": x_np, "y": y_np, "label": label_np}, + fetch_list=[var.name + "@GRAD", z.name + "@GRAD"], + ) self.assertTrue((var_grad == z_grad[0, :]).all()) paddle.disable_static() @@ -1124,7 +1051,6 @@ class TestBackward(unittest.TestCase): class TestGradientTruncated(unittest.TestCase): - def func_test_consistent_with_competitor(self): paddle.disable_static() @@ -1135,8 +1061,9 @@ class TestGradientTruncated(unittest.TestCase): return y.sum() # case 1 - array = np.arange(1, 1 + 2 * 3 * 4, - dtype="float32").reshape([1, 2, 1, 3, 1, 4]) + array = np.arange(1, 1 + 2 * 3 * 4, dtype="float32").reshape( + [1, 2, 1, 3, 1, 4] + ) value = np.arange(100, 104, dtype="float32").reshape(1, 4) inps = paddle.to_tensor(array, stop_gradient=False) @@ -1145,22 +1072,41 @@ class TestGradientTruncated(unittest.TestCase): loss = set_value(inps, value) loss.backward() - value_grad = np.array([[600., 606., 612., 618.]]) - input_grad = np.array([[[[[[4., 32., 108., 256.]], - [[500., 864., 1372., 2048.]], - [[2916., 4000., 5324., 6912.]]]], - [[[[0., 0., 0., 0.]], [[0., 0., 0., 0.]], - [[0., 0., 0., 0.]]]]]]) + value_grad = np.array([[600.0, 606.0, 612.0, 618.0]]) + input_grad = np.array( + [ + [ + [ + [ + [[4.0, 32.0, 108.0, 256.0]], + [[500.0, 864.0, 1372.0, 2048.0]], + [[2916.0, 4000.0, 5324.0, 6912.0]], + ] + ], + [ + [ + [[0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0, 0.0]], + ] + ], + ] + ] + ) np.testing.assert_array_equal( inps.grad.numpy(), input_grad, - err_msg='The gradient of value should be \n{},\n but reveived {}'. - format(input_grad, inps.grad.numpy())) + err_msg='The gradient of value should be \n{},\n but reveived {}'.format( + input_grad, inps.grad.numpy() + ), + ) np.testing.assert_array_equal( value.grad.numpy(), value_grad, - err_msg='The gradient of input should be \n{},\n but reveived {}'. - format(value_grad, value.grad.numpy())) + err_msg='The gradient of input should be \n{},\n but reveived {}'.format( + value_grad, value.grad.numpy() + ), + ) # case 2 array = np.arange(1, 2 * 3 * 4 + 1, dtype="float32").reshape([4, 2, 3]) @@ -1172,23 +1118,29 @@ class TestGradientTruncated(unittest.TestCase): loss = set_value(inps2, value2) loss.backward() - value_grad2 = np.array([600.]) - input_grad2 = np.array([[[4., 32., 108.], [0., 0., 0.]], - [[1372., 2048., 2916.], [4000., 5324., 6912.]], - [[8788., 10976., 13500.], - [16384., 19652., 23328.]], - [[27436., 32000., 37044.], - [42592., 48668., 55296.]]]) + value_grad2 = np.array([600.0]) + input_grad2 = np.array( + [ + [[4.0, 32.0, 108.0], [0.0, 0.0, 0.0]], + [[1372.0, 2048.0, 2916.0], [4000.0, 5324.0, 6912.0]], + [[8788.0, 10976.0, 13500.0], [16384.0, 19652.0, 23328.0]], + [[27436.0, 32000.0, 37044.0], [42592.0, 48668.0, 55296.0]], + ] + ) np.testing.assert_array_equal( inps2.grad.numpy(), input_grad2, - err_msg='The gradient of value should be \n{},\n but reveived {}'. - format(input_grad, inps2.grad.numpy())) + err_msg='The gradient of value should be \n{},\n but reveived {}'.format( + input_grad, inps2.grad.numpy() + ), + ) np.testing.assert_array_equal( value2.grad.numpy(), value_grad2, - err_msg='The gradient of input should be \n{},\n but reveived {}'. - format(value_grad, value2.grad.numpy())) + err_msg='The gradient of input should be \n{},\n but reveived {}'.format( + value_grad, value2.grad.numpy() + ), + ) # case 3 def set_value3(t, value): @@ -1197,8 +1149,9 @@ class TestGradientTruncated(unittest.TestCase): y = a * a return y.sum() - array = np.arange(1, 1 + 2 * 3 * 4, - dtype="float32").reshape([4, 3, 1, 1, 2, 1]) + array = np.arange(1, 1 + 2 * 3 * 4, dtype="float32").reshape( + [4, 3, 1, 1, 2, 1] + ) value = np.arange(100, 100 + 2, dtype="float32").reshape(1, 2, 1) inps = paddle.to_tensor(array, stop_gradient=False) @@ -1207,37 +1160,52 @@ class TestGradientTruncated(unittest.TestCase): loss = set_value3(inps, value) loss.backward() - value_grad = np.array([[[600.], [606.]]]) - input_grad = np.array([[[[[[0.], [0.]]]], [[[[0.], [0.]]]], - [[[[0.], [0.]]]]], - [[[[[1372.], [2048.]]]], [[[[2916.], [4000.]]]], - [[[[5324.], [6912.]]]]], - [[[[[8788.], [10976.]]]], [[[[13500.], - [16384.]]]], - [[[[19652.], [23328.]]]]], - [[[[[27436.], [32000.]]]], - [[[[37044.], [42592.]]]], - [[[[48668.], [55296.]]]]]]) + value_grad = np.array([[[600.0], [606.0]]]) + input_grad = np.array( + [ + [[[[[0.0], [0.0]]]], [[[[0.0], [0.0]]]], [[[[0.0], [0.0]]]]], + [ + [[[[1372.0], [2048.0]]]], + [[[[2916.0], [4000.0]]]], + [[[[5324.0], [6912.0]]]], + ], + [ + [[[[8788.0], [10976.0]]]], + [[[[13500.0], [16384.0]]]], + [[[[19652.0], [23328.0]]]], + ], + [ + [[[[27436.0], [32000.0]]]], + [[[[37044.0], [42592.0]]]], + [[[[48668.0], [55296.0]]]], + ], + ] + ) np.testing.assert_array_equal( inps.grad.numpy(), input_grad, - err_msg='The gradient of value should be \n{},\n but reveived {}'. - format(input_grad, inps.grad.numpy())) + err_msg='The gradient of value should be \n{},\n but reveived {}'.format( + input_grad, inps.grad.numpy() + ), + ) np.testing.assert_array_equal( value.grad.numpy(), value_grad, - err_msg='The gradient of input should be \n{},\n but reveived {}'. - format(value_grad, value.grad.numpy())) + err_msg='The gradient of input should be \n{},\n but reveived {}'.format( + value_grad, value.grad.numpy() + ), + ) - #case 4: step >0 + # case 4: step >0 def set_value4(t, value): a = t * t a[0, :, 0, ::3] = value y = a * a return y.sum() - array = np.arange(1, 1 + 2 * 3 * 4, - dtype="float32").reshape([2, 3, 1, 4, 1]) + array = np.arange(1, 1 + 2 * 3 * 4, dtype="float32").reshape( + [2, 3, 1, 4, 1] + ) value = np.arange(100, 100 + 2, dtype="float32").reshape(1, 2, 1) inps = paddle.to_tensor(array, stop_gradient=False) @@ -1246,23 +1214,35 @@ class TestGradientTruncated(unittest.TestCase): loss = set_value4(inps, value) loss.backward() - value_grad = np.array([[[600.], [606.]]]) - input_grad = np.array([[[[[0.], [32.], [108.], [0.]]], - [[[0.], [864.], [1372.], [0.]]], - [[[0.], [4000.], [5324.], [0.]]]], - [[[[8788.], [10976.], [13500.], [16384.]]], - [[[19652.], [23328.], [27436.], [32000.]]], - [[[37044.], [42592.], [48668.], [55296.]]]]]) + value_grad = np.array([[[600.0], [606.0]]]) + input_grad = np.array( + [ + [ + [[[0.0], [32.0], [108.0], [0.0]]], + [[[0.0], [864.0], [1372.0], [0.0]]], + [[[0.0], [4000.0], [5324.0], [0.0]]], + ], + [ + [[[8788.0], [10976.0], [13500.0], [16384.0]]], + [[[19652.0], [23328.0], [27436.0], [32000.0]]], + [[[37044.0], [42592.0], [48668.0], [55296.0]]], + ], + ] + ) np.testing.assert_array_equal( inps.grad.numpy(), input_grad, - err_msg='The gradient of value should be \n{},\n but reveived {}'. - format(input_grad, inps.grad.numpy())) + err_msg='The gradient of value should be \n{},\n but reveived {}'.format( + input_grad, inps.grad.numpy() + ), + ) np.testing.assert_array_equal( value.grad.numpy(), value_grad, - err_msg='The gradient of input should be \n{},\n but reveived {}'. - format(value_grad, value.grad.numpy())) + err_msg='The gradient of input should be \n{},\n but reveived {}'.format( + value_grad, value.grad.numpy() + ), + ) # case 5:a[0].shape==value.shape def set_value5(t, value): @@ -1280,24 +1260,41 @@ class TestGradientTruncated(unittest.TestCase): loss = set_value5(inps, value) loss.backward() - value_grad = np.array([[200., 202., 204., - 206.], [208., 210., 212., 214.], - [216., 218., 220., 222.]]) - input_grad = np.array([[[0., 0., 0., 0.], [0., 0., 0., 0.], - [0., 0., 0., 0.]], - [[8788., 10976., 13500., 16384.], - [19652., 23328., 27436., 32000.], - [37044., 42592., 48668., 55296.]]]) + value_grad = np.array( + [ + [200.0, 202.0, 204.0, 206.0], + [208.0, 210.0, 212.0, 214.0], + [216.0, 218.0, 220.0, 222.0], + ] + ) + input_grad = np.array( + [ + [ + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0], + ], + [ + [8788.0, 10976.0, 13500.0, 16384.0], + [19652.0, 23328.0, 27436.0, 32000.0], + [37044.0, 42592.0, 48668.0, 55296.0], + ], + ] + ) np.testing.assert_array_equal( inps.grad.numpy(), input_grad, - err_msg='The gradient of value should be \n{},\n but reveived {}'. - format(input_grad, inps.grad.numpy())) + err_msg='The gradient of value should be \n{},\n but reveived {}'.format( + input_grad, inps.grad.numpy() + ), + ) np.testing.assert_array_equal( value.grad.numpy(), value_grad, - err_msg='The gradient of input should be \n{},\n but reveived {}'. - format(value_grad, value.grad.numpy())) + err_msg='The gradient of input should be \n{},\n but reveived {}'.format( + value_grad, value.grad.numpy() + ), + ) # case 6: pass stop_gradient from value to x x = paddle.zeros([8, 8], dtype='float32') @@ -1319,7 +1316,7 @@ class TestGradientTruncated(unittest.TestCase): def test_static_graph(self): paddle.enable_static() - to_string = lambda x, i, : x + '_' + str(i) + to_string = lambda x, i,: x + '_' + str(i) numel = lambda input_shape: reduce(lambda x, y: x * y, input_shape) def op1(x): @@ -1327,18 +1324,15 @@ class TestGradientTruncated(unittest.TestCase): # test stop_gradient value.stop_gradient = True x.stop_gradient = False - start = paddle.fluid.layers.fill_constant([1], - "int32", - 5, - force_cpu=True) - end = paddle.fluid.layers.fill_constant([1], - "int32", - 0, - force_cpu=True) - step = paddle.fluid.layers.fill_constant([1], - "int32", - -2, - force_cpu=True) + start = paddle.fluid.layers.fill_constant( + [1], "int32", 5, force_cpu=True + ) + end = paddle.fluid.layers.fill_constant( + [1], "int32", 0, force_cpu=True + ) + step = paddle.fluid.layers.fill_constant( + [1], "int32", -2, force_cpu=True + ) inputs = { 'Input': x, @@ -1351,16 +1345,18 @@ class TestGradientTruncated(unittest.TestCase): ], 'StepsTensorList': [ step, - ] + ], } helper = LayerHelper("set_value") y = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type="set_value", - inputs=inputs, - outputs={'Out': y}, - attrs={'axes': [0]}) + helper.append_op( + type="set_value", + inputs=inputs, + outputs={'Out': y}, + attrs={'axes': [0]}, + ) return y, value @@ -1376,17 +1372,16 @@ class TestGradientTruncated(unittest.TestCase): 'steps': [-4], 'decrease_axes': [], 'none_axes': [], - 'dtype': paddle.float32 + 'dtype': paddle.float32, } inputs = {'Input': x, 'ValueTensor': value} helper = LayerHelper("set_value") y = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type="set_value", - inputs=inputs, - outputs={'Out': y}, - attrs=attrs) + helper.append_op( + type="set_value", inputs=inputs, outputs={'Out': y}, attrs=attrs + ) return y, value @@ -1394,18 +1389,15 @@ class TestGradientTruncated(unittest.TestCase): value = paddle.fluid.layers.fill_constant([1], "float32", 1) x.stop_gradient = True value.stop_gradient = False - start = paddle.fluid.layers.fill_constant([1], - "int32", - 0, - force_cpu=True) - end = paddle.fluid.layers.fill_constant([1], - "int32", - 5, - force_cpu=True) - step = paddle.fluid.layers.fill_constant([1], - "int32", - 3, - force_cpu=True) + start = paddle.fluid.layers.fill_constant( + [1], "int32", 0, force_cpu=True + ) + end = paddle.fluid.layers.fill_constant( + [1], "int32", 5, force_cpu=True + ) + step = paddle.fluid.layers.fill_constant( + [1], "int32", 3, force_cpu=True + ) inputs = { 'Input': x, @@ -1418,24 +1410,26 @@ class TestGradientTruncated(unittest.TestCase): ], 'StepsTensorList': [ step, - ] + ], } helper = LayerHelper("set_value") y = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type="set_value", - inputs=inputs, - outputs={'Out': y}, - attrs={'axes': [0]}) + helper.append_op( + type="set_value", + inputs=inputs, + outputs={'Out': y}, + attrs={'axes': [0]}, + ) return y, value def set_value(array, i, op): name_x = to_string('x', i) - x = paddle.static.data(name=name_x, - shape=array.shape, - dtype='float32') + x = paddle.static.data( + name=name_x, shape=array.shape, dtype='float32' + ) # set_value_op in __get/setitem__ is an inplace operation. # When `input.stop_gradient = True` and `value.stop_gradient = False`, @@ -1445,9 +1439,11 @@ class TestGradientTruncated(unittest.TestCase): loss = paddle.fluid.layers.reduce_sum(y2) sgd = paddle.optimizer.Adam() sgd.minimize(loss) - place = paddle.fluid.CPUPlace( - ) if not paddle.fluid.core.is_compiled_with_cuda( - ) else paddle.fluid.CUDAPlace(0) + place = ( + paddle.fluid.CPUPlace() + if not paddle.fluid.core.is_compiled_with_cuda() + else paddle.fluid.CUDAPlace(0) + ) prog = paddle.static.default_main_program() exe = paddle.static.Executor(place) @@ -1462,8 +1458,9 @@ class TestGradientTruncated(unittest.TestCase): input_shape = [7, 6, 5, 4, 3, 2] - array = np.arange(0, numel(input_shape), - dtype="float32").reshape(input_shape) + array = np.arange(0, numel(input_shape), dtype="float32").reshape( + input_shape + ) for i in range(len(input_shape)): program = paddle.static.Program() @@ -1487,7 +1484,6 @@ class TestGradientTruncated(unittest.TestCase): class TestSetValueInplace(unittest.TestCase): - def test_inplace(self): paddle.disable_static() with paddle.fluid.dygraph.guard(): @@ -1506,7 +1502,6 @@ class TestSetValueInplace(unittest.TestCase): class TestSetValueInplaceLeafVar(unittest.TestCase): - def test_inplace_var_become_leaf_var(self): paddle.disable_static() diff --git a/python/paddle/fluid/tests/unittests/test_sgd_op.py b/python/paddle/fluid/tests/unittests/test_sgd_op.py index 2643dfe272cba6975d69bb3647675113a1c62fd3..34420ce5a9c56262fe8f79451625f47249cd81c9 100644 --- a/python/paddle/fluid/tests/unittests/test_sgd_op.py +++ b/python/paddle/fluid/tests/unittests/test_sgd_op.py @@ -25,7 +25,6 @@ paddle.enable_static() class TestSGDOp(OpTest): - def setUp(self): self.op_type = "sgd" self.conf() @@ -45,14 +44,12 @@ class TestSGDOp(OpTest): class TestSGDOpCase8X(TestSGDOp): - def conf(self): self.h = 10 self.w = 64 class TestSparseSGDOp(unittest.TestCase): - def check_with_place(self, place): scope = core.Scope() @@ -82,11 +79,13 @@ class TestSparseSGDOp(unittest.TestCase): lr.set(lr_array, place) # create and run sgd operator - sgd_op = Operator("sgd", - Param='Param', - Grad='Grad', - ParamOut='Param', - LearningRate='LearningRate') + sgd_op = Operator( + "sgd", + Param='Param', + Grad='Grad', + ParamOut='Param', + LearningRate='LearningRate', + ) sgd_op.run(scope, place) # get and compare result @@ -119,13 +118,11 @@ class TestSparseSGDOp(unittest.TestCase): class TestSparseSGDOpCase8X(TestSparseSGDOp): - def conf(self): self.row_numel = 16 class TestSGDOpOptimizeSelectedRows(unittest.TestCase): - def check_with_place(self, place): scope = core.Scope() @@ -170,15 +167,18 @@ class TestSGDOpOptimizeSelectedRows(unittest.TestCase): # optimize with Python w_after_optimize = np.copy(w_before_optimize) for index, id in enumerate(grad_rows): - w_after_optimize[ - id] = w_before_optimize[id] - lr_value * grad_array[index] + w_after_optimize[id] = ( + w_before_optimize[id] - lr_value * grad_array[index] + ) # create and run sgd operator - sgd_op = Operator("sgd", - Param='Param', - Grad='Grad', - ParamOut='Param', - LearningRate='LearningRate') + sgd_op = Operator( + "sgd", + Param='Param', + Grad='Grad', + ParamOut='Param', + LearningRate='LearningRate', + ) sgd_op.run(scope, place) # get and compare result @@ -193,13 +193,12 @@ class TestSGDOpOptimizeSelectedRows(unittest.TestCase): class TestSGDOpWithLargeInput(unittest.TestCase): - def runTest(self): paddle.enable_static() data = fluid.layers.fill_constant(shape=[1], value=128, dtype='int64') - label = fluid.layers.fill_constant(shape=[1, 150], - value=0.5, - dtype='float32') + label = fluid.layers.fill_constant( + shape=[1, 150], value=0.5, dtype='float32' + ) emb = fluid.embedding(input=data, size=(10000000, 150), dtype='float32') out = fluid.layers.l2_normalize(x=emb, axis=-1) @@ -212,21 +211,23 @@ class TestSGDOpWithLargeInput(unittest.TestCase): exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) compiled_prog = fluid.compiler.CompiledProgram( - fluid.default_main_program()) + fluid.default_main_program() + ) result = exe.run(compiled_prog, fetch_list=[avg_cost]) class TestSGDV2(unittest.TestCase): - def test_sgd_dygraph(self): paddle.disable_static() value = np.arange(26).reshape(2, 13).astype("float32") a = paddle.to_tensor(value) linear = paddle.nn.Linear(13, 5) # This can be any optimizer supported by dygraph. - adam = paddle.optimizer.SGD(learning_rate=0.01, - parameters=linear.parameters(), - weight_decay=0.01) + adam = paddle.optimizer.SGD( + learning_rate=0.01, + parameters=linear.parameters(), + weight_decay=0.01, + ) out = linear(a) out.backward() adam.step() @@ -239,33 +240,31 @@ class TestSGDV2(unittest.TestCase): init_program = paddle.static.Program() program = paddle.static.Program() block = program.global_block() - mul_x = block.create_parameter(dtype="float32", - shape=[5, 10], - lod_level=0, - name="mul.x", - optimize_attr=optimizer_attr) - mul_y = block.create_var(dtype="float32", - shape=[10, 8], - lod_level=0, - name="mul.y") - mul_out = block.create_var(dtype="float32", - shape=[5, 8], - lod_level=0, - name="mul.out") - mean_out = block.create_var(dtype="float32", - shape=[1], - lod_level=0, - name="mean.out") - block.append_op(type="mul", - inputs={ - "X": mul_x, - "Y": mul_y - }, - outputs={"Out": mul_out}, - attrs={"x_num_col_dims": 1}) - block.append_op(type="mean", - inputs={"X": mul_out}, - outputs={"Out": mean_out}) + mul_x = block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="mul.x", + optimize_attr=optimizer_attr, + ) + mul_y = block.create_var( + dtype="float32", shape=[10, 8], lod_level=0, name="mul.y" + ) + mul_out = block.create_var( + dtype="float32", shape=[5, 8], lod_level=0, name="mul.out" + ) + mean_out = block.create_var( + dtype="float32", shape=[1], lod_level=0, name="mean.out" + ) + block.append_op( + type="mul", + inputs={"X": mul_x, "Y": mul_y}, + outputs={"Out": mul_out}, + attrs={"x_num_col_dims": 1}, + ) + block.append_op( + type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out} + ) sgd_optimizer = paddle.optimizer.SGD(learning_rate=0.01) opts, _ = sgd_optimizer.minimize(mean_out, init_program) return opts @@ -288,15 +287,18 @@ class TestSGDV2(unittest.TestCase): linear_1 = paddle.nn.Linear(13, 5) linear_2 = paddle.nn.Linear(5, 3) # This can be any optimizer supported by dygraph. - adam = paddle.optimizer.SGD(learning_rate=0.01, - parameters=[{ - 'params': linear_1.parameters() - }, { - 'params': linear_2.parameters(), - 'weight_decay': 0.001, - 'learning_rate': 0.1 - }], - weight_decay=0.01) + adam = paddle.optimizer.SGD( + learning_rate=0.01, + parameters=[ + {'params': linear_1.parameters()}, + { + 'params': linear_2.parameters(), + 'weight_decay': 0.001, + 'learning_rate': 0.1, + }, + ], + weight_decay=0.01, + ) out = linear_1(a) out = linear_2(out) out.backward() @@ -310,15 +312,15 @@ class TestSGDV2(unittest.TestCase): class TestSGDMultiPrecision2_0(unittest.TestCase): - def dygraph_sgd_mp(self, mp): paddle.disable_static() paddle.seed(10) paddle.set_device('gpu') input = paddle.randn((2, 2)) model = paddle.nn.Linear(2, 2) - optimizer = paddle.optimizer.SGD(parameters=model.parameters(), - multi_precision=mp) + optimizer = paddle.optimizer.SGD( + parameters=model.parameters(), multi_precision=mp + ) if mp == True: model = paddle.amp.decorate(models=model, level='O2') scaler = paddle.amp.GradScaler(init_loss_scaling=1024) @@ -355,16 +357,17 @@ class TestSGDMultiPrecision2_0(unittest.TestCase): init_loss_scaling=128.0, use_dynamic_loss_scaling=True, use_pure_fp16=True, - use_fp16_guard=False) + use_fp16_guard=False, + ) with paddle.static.program_guard(train_program, startup_program): if mp: - data = paddle.static.data(shape=[2, 2], - name='X', - dtype='float16') + data = paddle.static.data( + shape=[2, 2], name='X', dtype='float16' + ) else: - data = paddle.static.data(shape=[2, 2], - name='X', - dtype='float32') + data = paddle.static.data( + shape=[2, 2], name='X', dtype='float32' + ) hidden = paddle.static.nn.fc(x=data, size=10) loss = paddle.mean(hidden) optimizer.minimize(loss) @@ -377,9 +380,9 @@ class TestSGDMultiPrecision2_0(unittest.TestCase): x = np.random.random(size=(2, 2)).astype('float32') out = [] for idx in range(5): - loss_data, = exe.run(train_program, - feed={"X": x}, - fetch_list=[loss.name]) + (loss_data,) = exe.run( + train_program, feed={"X": x}, fetch_list=[loss.name] + ) out.append(loss_data) return out @@ -389,28 +392,32 @@ class TestSGDMultiPrecision2_0(unittest.TestCase): "Test dygraph mode" output1_dy, params1_dy = self.dygraph_sgd_mp(mp=True) output2_dy, params2_dy = self.dygraph_sgd_mp(mp=False) - np.testing.assert_allclose(output1_dy.astype('float32').numpy(), - output2_dy.astype('float32').numpy(), - rtol=1e-05, - atol=0.1) + np.testing.assert_allclose( + output1_dy.astype('float32').numpy(), + output2_dy.astype('float32').numpy(), + rtol=1e-05, + atol=0.1, + ) for idx in range(len(params1_dy)): np.testing.assert_allclose( params1_dy[idx].astype('float32').numpy(), params2_dy[idx].astype('float32').numpy(), rtol=1e-05, - atol=0.1) + atol=0.1, + ) "Test static mode" output1_st = self.static_sgd_mp(mp=True) output2_st = self.static_sgd_mp(mp=False) for idx in range(len(output1_st)): - np.testing.assert_allclose(output1_st[idx].astype('float32'), - output2_st[idx].astype('float32'), - rtol=1e-05, - atol=0.1) + np.testing.assert_allclose( + output1_st[idx].astype('float32'), + output2_st[idx].astype('float32'), + rtol=1e-05, + atol=0.1, + ) class TestSGDMultiPrecision1_0(unittest.TestCase): - def dygraph_sgd_mp(self, mp): paddle.disable_static() paddle.seed(10) @@ -420,7 +427,8 @@ class TestSGDMultiPrecision1_0(unittest.TestCase): optimizer = paddle.fluid.optimizer.SGD( learning_rate=0.001, parameter_list=model.parameters(), - multi_precision=mp) + multi_precision=mp, + ) if mp == True: model = paddle.amp.decorate(models=model, level='O2') scaler = paddle.amp.GradScaler(init_loss_scaling=1024) @@ -449,8 +457,9 @@ class TestSGDMultiPrecision1_0(unittest.TestCase): exe = paddle.static.Executor('gpu') train_program = paddle.static.Program() startup_program = paddle.static.Program() - optimizer = paddle.fluid.optimizer.SGD(learning_rate=0.001, - multi_precision=mp) + optimizer = paddle.fluid.optimizer.SGD( + learning_rate=0.001, multi_precision=mp + ) if mp: optimizer = paddle.static.amp.decorate( @@ -458,16 +467,17 @@ class TestSGDMultiPrecision1_0(unittest.TestCase): init_loss_scaling=128.0, use_dynamic_loss_scaling=True, use_pure_fp16=True, - use_fp16_guard=False) + use_fp16_guard=False, + ) with paddle.static.program_guard(train_program, startup_program): if mp: - data = paddle.static.data(shape=[2, 2], - name='X', - dtype='float16') + data = paddle.static.data( + shape=[2, 2], name='X', dtype='float16' + ) else: - data = paddle.static.data(shape=[2, 2], - name='X', - dtype='float32') + data = paddle.static.data( + shape=[2, 2], name='X', dtype='float32' + ) hidden = paddle.static.nn.fc(x=data, size=10) loss = paddle.mean(hidden) optimizer.minimize(loss) @@ -480,9 +490,9 @@ class TestSGDMultiPrecision1_0(unittest.TestCase): x = np.random.random(size=(2, 2)).astype('float32') out = [] for idx in range(5): - loss_data, = exe.run(train_program, - feed={"X": x}, - fetch_list=[loss.name]) + (loss_data,) = exe.run( + train_program, feed={"X": x}, fetch_list=[loss.name] + ) out.append(loss_data) return out @@ -492,24 +502,29 @@ class TestSGDMultiPrecision1_0(unittest.TestCase): "Test dygraph mode" output1_dy, params1_dy = self.dygraph_sgd_mp(mp=True) output2_dy, params2_dy = self.dygraph_sgd_mp(mp=False) - np.testing.assert_allclose(output1_dy.astype('float32').numpy(), - output2_dy.astype('float32').numpy(), - rtol=1e-05, - atol=0.1) + np.testing.assert_allclose( + output1_dy.astype('float32').numpy(), + output2_dy.astype('float32').numpy(), + rtol=1e-05, + atol=0.1, + ) for idx in range(len(params1_dy)): np.testing.assert_allclose( params1_dy[idx].astype('float32').numpy(), params2_dy[idx].astype('float32').numpy(), rtol=1e-05, - atol=0.1) + atol=0.1, + ) "Test static mode" output1_st = self.static_sgd_mp(mp=True) output2_st = self.static_sgd_mp(mp=False) for idx in range(len(output1_st)): - np.testing.assert_allclose(output1_st[idx].astype('float32'), - output2_st[idx].astype('float32'), - rtol=1e-05, - atol=0.1) + np.testing.assert_allclose( + output1_st[idx].astype('float32'), + output2_st[idx].astype('float32'), + rtol=1e-05, + atol=0.1, + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_sgd_op_bf16.py b/python/paddle/fluid/tests/unittests/test_sgd_op_bf16.py index 3eb75a9b1405229d7e715b013b0087ac6a5be063..ae7e87d1b3f71581a122c05beeada7f6d1c681bc 100644 --- a/python/paddle/fluid/tests/unittests/test_sgd_op_bf16.py +++ b/python/paddle/fluid/tests/unittests/test_sgd_op_bf16.py @@ -17,18 +17,21 @@ import numpy as np import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.op import Operator -from paddle.fluid.tests.unittests.op_test import (convert_float_to_uint16, - convert_uint16_to_float, - OpTest, OpTestTool) +from paddle.fluid.tests.unittests.op_test import ( + convert_float_to_uint16, + convert_uint16_to_float, + OpTest, + OpTestTool, +) import paddle import paddle.static.amp as amp import struct -@unittest.skipIf(not core.supports_bfloat16(), - 'place does not support BF16 evaluation') +@unittest.skipIf( + not core.supports_bfloat16(), 'place does not support BF16 evaluation' +) class TestSGDOpBF16(OpTest): - def setUp(self): self.op_type = 'sgd' self.dtype = np.uint16 @@ -53,17 +56,16 @@ class TestSGDOpBF16(OpTest): self.check_output_with_place(core.CPUPlace(), check_dygraph=False) -@unittest.skipIf(not core.supports_bfloat16(), - 'place does not support BF16 evaluation') +@unittest.skipIf( + not core.supports_bfloat16(), 'place does not support BF16 evaluation' +) class TestSGDOpBF16Case2(TestSGDOpBF16): - def conf(self): self.h = 10 self.w = 64 class TestSparseSGDOpBF16(unittest.TestCase): - @classmethod def setUpClass(cls): np.random.seed(12345) @@ -121,10 +123,10 @@ class TestSparseSGDOpBF16(unittest.TestCase): return lr_tensor, lr_value -@unittest.skipIf(not core.supports_bfloat16(), - 'place does not support BF16 evaluation') +@unittest.skipIf( + not core.supports_bfloat16(), 'place does not support BF16 evaluation' +) class TestSparseGradSGDOpBF16(TestSparseSGDOpBF16): - def setUp(self): self.setup_params() @@ -136,32 +138,35 @@ class TestSparseGradSGDOpBF16(TestSparseSGDOpBF16): def test_sparse_grad_sgd(self): scope = core.Scope() place = core.CPUPlace() - _, grad_array = self.create_sparse_grad_var(scope, place, - self.grad_height, - self.grad_rows, - self.grad_row_numel) + _, grad_array = self.create_sparse_grad_var( + scope, place, self.grad_height, self.grad_rows, self.grad_row_numel + ) param_tensor, param_array = self.create_dense_param_var( - scope, place, self.grad_height, self.grad_row_numel) + scope, place, self.grad_height, self.grad_row_numel + ) _, lr_value = self.create_dense_lr_var(scope, place) - sgd_op = Operator('sgd', - Param='Param', - Grad='Grad', - ParamOut='Param', - LearningRate='LearningRate', - use_mkldnn=True) + sgd_op = Operator( + 'sgd', + Param='Param', + Grad='Grad', + ParamOut='Param', + LearningRate='LearningRate', + use_mkldnn=True, + ) sgd_op.run(scope, place) - reference = self.ref_optimize(param_array, self.grad_rows, grad_array, - lr_value) + reference = self.ref_optimize( + param_array, self.grad_rows, grad_array, lr_value + ) output = np.array(param_tensor) self.check_output(output, reference, atol=5e-3, rtol=1e-1) -@unittest.skipIf(not core.supports_bfloat16(), - 'place does not support BF16 evaluation') +@unittest.skipIf( + not core.supports_bfloat16(), 'place does not support BF16 evaluation' +) class TestSparseGradSGDOpBF16Case2(TestSparseGradSGDOpBF16): - def setup_params(self): self.grad_height = 14 self.grad_rows = [1, 4, 12, 7, 8] @@ -169,17 +174,16 @@ class TestSparseGradSGDOpBF16Case2(TestSparseGradSGDOpBF16): class TestSparseGradSGDOpBF16Case3(TestSparseGradSGDOpBF16): - def setup_params(self): self.grad_height = 10 self.grad_rows = [0, 4, 7] self.grad_row_numel = 120 -@unittest.skipIf(not core.supports_bfloat16(), - 'place does not support BF16 evaluation') +@unittest.skipIf( + not core.supports_bfloat16(), 'place does not support BF16 evaluation' +) class TestSparseGradParamSGDOpBF16(TestSparseSGDOpBF16): - def setUp(self): self.setup_params() @@ -192,31 +196,32 @@ class TestSparseGradParamSGDOpBF16(TestSparseSGDOpBF16): def test_sparse_param_grad_sgd(self): scope = core.Scope() place = core.CPUPlace() - _, grad_array = self.create_sparse_grad_var(scope, place, - self.grad_height, - self.grad_rows, - self.grad_row_numel) + _, grad_array = self.create_sparse_grad_var( + scope, place, self.grad_height, self.grad_rows, self.grad_row_numel + ) param_tensor, param_array = self.create_sparse_param_var( - scope, place, self.grad_height, self.param_rows, - self.grad_row_numel) + scope, place, self.grad_height, self.param_rows, self.grad_row_numel + ) _, lr_value = self.create_dense_lr_var(scope, place) - sgd_op = Operator('sgd', - Param='Param', - Grad='Grad', - ParamOut='Param', - LearningRate='LearningRate', - use_mkldnn=True) + sgd_op = Operator( + 'sgd', + Param='Param', + Grad='Grad', + ParamOut='Param', + LearningRate='LearningRate', + use_mkldnn=True, + ) sgd_op.run(scope, place) - reference = self.ref_optimize(param_array, self.grad_rows, grad_array, - lr_value) + reference = self.ref_optimize( + param_array, self.grad_rows, grad_array, lr_value + ) output = np.array(param_tensor) self.check_output(output, reference, atol=5e-3, rtol=1e-1) class TestSparseGradParamSGDOpBF16Case2(TestSparseGradParamSGDOpBF16): - def setup_params(self): self.grad_height = 14 self.grad_rows = [1, 4, 12, 7, 8] @@ -226,7 +231,6 @@ class TestSparseGradParamSGDOpBF16Case2(TestSparseGradParamSGDOpBF16): @OpTestTool.skip_if_not_cpu_bf16() class TestSGDOpBF16API(unittest.TestCase): - @classmethod def setUpClass(cls): np.random.seed(12345) @@ -259,18 +263,20 @@ class TestSGDOpBF16API(unittest.TestCase): return self._fp322bf16(self._bf162fp32(lhs) * self._bf162fp32(rhs)) def _reference(self, data, emb_weight, bf16=False): - emb_out_shape = np.array([self.ids_shape[0], self.w_shape[1]], - dtype=np.int64) - mean_grad_value = np.float32(1.0) / np.prod(emb_out_shape, - dtype=np.float32) + emb_out_shape = np.array( + [self.ids_shape[0], self.w_shape[1]], dtype=np.int64 + ) + mean_grad_value = np.float32(1.0) / np.prod( + emb_out_shape, dtype=np.float32 + ) if bf16: - mean_grad = np.full(emb_out_shape, - self._fp322bf16(mean_grad_value), - dtype=np.uint16) + mean_grad = np.full( + emb_out_shape, self._fp322bf16(mean_grad_value), dtype=np.uint16 + ) else: - mean_grad = np.full(emb_out_shape, - mean_grad_value, - dtype=np.float32) + mean_grad = np.full( + emb_out_shape, mean_grad_value, dtype=np.float32 + ) # add_grad = 1 * mean_grad out_dtype = np.uint16 if bf16 else np.float32 lookup_table_grad = np.zeros(self.w_shape, dtype=out_dtype) @@ -281,7 +287,8 @@ class TestSGDOpBF16API(unittest.TestCase): idxv = idx[0] for j in range(self.w_shape[1]): lookup_table_grad[idxv, j] = self._add_bf16( - lookup_table_grad[idxv, j], mean_grad[i, j]) + lookup_table_grad[idxv, j], mean_grad[i, j] + ) ref_grad = np.ndarray(shape=emb_weight.shape, dtype=np.uint16) lr_bf16 = self._fp322bf16(self.learning_rate) @@ -289,29 +296,26 @@ class TestSGDOpBF16API(unittest.TestCase): for i, row in enumerate(emb_weight): for j, val in enumerate(row): ref_grad[i, j] = self._sub_bf16( - val, self._mul_bf16(lr_bf16, lookup_table_grad[i, j])) + val, self._mul_bf16(lr_bf16, lookup_table_grad[i, j]) + ) else: for i, idx in enumerate(data): lookup_table_grad[idx, :] += mean_grad[i] ref_grad = emb_weight - self.learning_rate * lookup_table_grad return ref_grad - def _check_output(self, - actual, - reference, - bf16=False, - atol=0, - rtol=0.15e-2): + def _check_output( + self, actual, reference, bf16=False, atol=0, rtol=0.15e-2 + ): output = actual if bf16 else convert_uint16_to_float(actual) if bf16: np.testing.assert_allclose(output, reference, atol=atol, rtol=rtol) else: try: print('Compare with FP32 values:') - np.testing.assert_allclose(output, - reference, - atol=atol, - rtol=rtol) + np.testing.assert_allclose( + output, reference, atol=atol, rtol=rtol + ) except AssertionError as e: print(e) @@ -329,56 +333,61 @@ class TestSGDOpBF16API(unittest.TestCase): main = fluid.Program() with fluid.program_guard(main): x = fluid.layers.data(name='X', shape=self.ids_shape, dtype='int64') - label = fluid.layers.data(name='Y', - shape=self.y_shape, - dtype='uint16') - emb = fluid.layers.embedding(input=x, - size=self.w_shape, - param_attr=fluid.ParamAttr( - name="emb_weight", - initializer=self.initializer), - is_sparse=False, - dtype="uint16") # bfloat16 + label = fluid.layers.data( + name='Y', shape=self.y_shape, dtype='uint16' + ) + emb = fluid.layers.embedding( + input=x, + size=self.w_shape, + param_attr=fluid.ParamAttr( + name="emb_weight", initializer=self.initializer + ), + is_sparse=False, + dtype="uint16", + ) # bfloat16 cost = fluid.layers.elementwise_add(emb, label) avg_cost = paddle.mean(cost) sgd_optimizer = paddle.optimizer.SGD( - learning_rate=self.learning_rate) + learning_rate=self.learning_rate + ) sgd_optimizer = amp.bf16.decorate_bf16( sgd_optimizer, amp_lists=amp.bf16.AutoMixedPrecisionListsBF16( custom_bf16_list={ 'lookup_table', - }), + } + ), use_bf16_guard=False, - use_pure_bf16=True) + use_pure_bf16=True, + ) sgd_optimizer.minimize( - avg_cost, startup_program=fluid.default_startup_program()) + avg_cost, startup_program=fluid.default_startup_program() + ) train_reader = paddle.batch(self._data_reader, batch_size=1) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) test_prog = main.clone(for_test=True) - sgd_optimizer.amp_init(place, - test_program=test_prog, - use_bf16_test=True) + sgd_optimizer.amp_init( + place, test_program=test_prog, use_bf16_test=True + ) ref_emb = np.full(self.w_shape, self.value, dtype=np.float32) - ref_emb_bf16 = np.full(self.w_shape, - self._fp322bf16(self.value), - dtype=np.uint16) + ref_emb_bf16 = np.full( + self.w_shape, self._fp322bf16(self.value), dtype=np.uint16 + ) emb_weight = [] for sample in train_reader(): data = sample[0][0] label = sample[0][1] y_bf16 = convert_float_to_uint16(label) - emb_weight = exe.run(main, - feed={ - 'X': data, - 'Y': y_bf16 - }, - fetch_list=['emb_weight']) + emb_weight = exe.run( + main, + feed={'X': data, 'Y': y_bf16}, + fetch_list=['emb_weight'], + ) ref_emb = self._reference(data, ref_emb) ref_emb_bf16 = self._reference(data, ref_emb_bf16, True) diff --git a/python/paddle/fluid/tests/unittests/test_sgn.py b/python/paddle/fluid/tests/unittests/test_sgn.py index dacfe7aa1d21d6243d9aab12059d0fe7db50641b..75d5d1b7847c4d4010750e8ebade0ee70f51083a 100644 --- a/python/paddle/fluid/tests/unittests/test_sgn.py +++ b/python/paddle/fluid/tests/unittests/test_sgn.py @@ -29,40 +29,49 @@ def np_sgn(x: np.ndarray): class TestSgnError(unittest.TestCase): - def test_errors(self): # The input dtype of sgn must be float16, float32, float64,complex64,complex128. input2 = paddle.to_tensor( - np.random.randint(-10, 10, size=[12, 20]).astype('int32')) + np.random.randint(-10, 10, size=[12, 20]).astype('int32') + ) input3 = paddle.to_tensor( - np.random.randint(-10, 10, size=[12, 20]).astype('int64')) + np.random.randint(-10, 10, size=[12, 20]).astype('int64') + ) self.assertRaises(TypeError, paddle.sgn, input2) self.assertRaises(TypeError, paddle.sgn, input3) class TestSignAPI(unittest.TestCase): - def setUp(self) -> None: self.support_dtypes = [ - 'float16', 'float32', 'float64', 'complex64', 'complex128' + 'float16', + 'float32', + 'float64', + 'complex64', + 'complex128', ] if paddle.device.get_device() == 'cpu': self.support_dtypes = [ - 'float32', 'float64', 'complex64', 'complex128' + 'float32', + 'float64', + 'complex64', + 'complex128', ] def test_dtype(self): for dtype in self.support_dtypes: x = paddle.to_tensor( - np.random.randint(-10, 10, size=[12, 20, 2]).astype(dtype)) + np.random.randint(-10, 10, size=[12, 20, 2]).astype(dtype) + ) paddle.sgn(x) def test_complex(self): for dtype in ['complex64', 'complex128']: - np_x = np.array([[3 + 4j, 7 - 24j, 0, 1 + 2j], [6 + 8j, 3, 0, -2]], - dtype=dtype) + np_x = np.array( + [[3 + 4j, 7 - 24j, 0, 1 + 2j], [6 + 8j, 3, 0, -2]], dtype=dtype + ) x = paddle.to_tensor(np_x) z = paddle.sgn(x) np_z = z.numpy() diff --git a/python/paddle/fluid/tests/unittests/test_shape_op.py b/python/paddle/fluid/tests/unittests/test_shape_op.py index 8bee609623ea1b02f4f480d02129564d27e46f38..f4de948d229a61995958c110d0b3faf9bebbe605 100644 --- a/python/paddle/fluid/tests/unittests/test_shape_op.py +++ b/python/paddle/fluid/tests/unittests/test_shape_op.py @@ -21,7 +21,6 @@ from paddle.fluid.op import Operator class TestShapeOp(OpTest): - def setUp(self): self.op_type = "shape" self.python_api = paddle.shape @@ -39,19 +38,16 @@ class TestShapeOp(OpTest): class case1(TestShapeOp): - def config(self): self.shape = [2] class case2(TestShapeOp): - def config(self): self.shape = [1, 2, 3] class TestShapeWithSelectedRows(unittest.TestCase): - def get_places(self): places = [core.CPUPlace()] if core.is_compiled_with_cuda(): diff --git a/python/paddle/fluid/tests/unittests/test_shard_index_op.py b/python/paddle/fluid/tests/unittests/test_shard_index_op.py index 5c51b3cf58e8b2a536f3aac6d20479f4379717f1..8c13e2b44c3c549d55c1161271fe2d199f59cbd9 100644 --- a/python/paddle/fluid/tests/unittests/test_shard_index_op.py +++ b/python/paddle/fluid/tests/unittests/test_shard_index_op.py @@ -37,13 +37,12 @@ def common_setup(self, index_num, nshards, shard_id, ignore_value): 'index_num': index_num, 'nshards': nshards, 'shard_id': shard_id, - 'ignore_value': ignore_value + 'ignore_value': ignore_value, } self.outputs = {'Out': (out, x_lod)} class TestShardIndexShardId0Op(OpTest): - def setUp(self): common_setup(self, 20, 2, 0, -1) @@ -52,7 +51,6 @@ class TestShardIndexShardId0Op(OpTest): class TestShardIndexShardId1Op(OpTest): - def setUp(self): common_setup(self, 20, 2, 1, -1) @@ -61,7 +59,6 @@ class TestShardIndexShardId1Op(OpTest): class TestShardIndexIgnoreValueOp(OpTest): - def setUp(self): common_setup(self, 20, 2, 0, -2) @@ -70,7 +67,6 @@ class TestShardIndexIgnoreValueOp(OpTest): class TestShardIndexNotEvenlyDividedOp(OpTest): - def setUp(self): common_setup(self, 15, 2, 1, -1) diff --git a/python/paddle/fluid/tests/unittests/test_share_data_op.py b/python/paddle/fluid/tests/unittests/test_share_data_op.py index 56ac3af6d8538453089d6da5c639ea1cfd99005a..b6f2d1ecbec46cb24446e0465ce8999d610a7fe7 100644 --- a/python/paddle/fluid/tests/unittests/test_share_data_op.py +++ b/python/paddle/fluid/tests/unittests/test_share_data_op.py @@ -20,7 +20,6 @@ from paddle.fluid.op import Operator class TestShareDataOp(OpTest): - def setUp(self): self.op_type = "share_data" input = np.random.rand(2, 3, 5).astype("float32") @@ -32,7 +31,6 @@ class TestShareDataOp(OpTest): class TestShareDataOpOnDifferentPlaces(unittest.TestCase): - def get_places(self): places = [core.CPUPlace()] if core.is_compiled_with_cuda(): diff --git a/python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py b/python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py index 9e765859127c70050a4e82b23c29a85bf0da8585..c30c2095c9bac75019f63c19c7ba3f4bd1bcefeb 100644 --- a/python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py +++ b/python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py @@ -27,16 +27,14 @@ from paddle.fluid.layers.control_flow import lod_rank_table class TestShrinkRNNMemoryBase(unittest.TestCase): - def setUp(self): self.main_program = Program() switch_main_program(self.main_program) x = layers.data('x', shape=[100], dtype='float32') x.stop_gradient = False - rank_table_tensor = layers.data('rank_table_tensor', - shape=[1], - dtype='float32', - lod_level=1) + rank_table_tensor = layers.data( + 'rank_table_tensor', shape=[1], dtype='float32', lod_level=1 + ) table = lod_rank_table(x=rank_table_tensor) i = layers.zeros(dtype='int64', shape=[1]) self.mem1 = shrink_memory(x=x, i=i, table=table) @@ -58,7 +56,6 @@ class TestShrinkRNNMemoryBase(unittest.TestCase): class TestShrinkRNNMemoryReferLoD(TestShrinkRNNMemoryBase): - def test_refer_lod(self): cpu = core.CPUPlace() x_tensor = core.LoDTensor() @@ -69,16 +66,15 @@ class TestShrinkRNNMemoryReferLoD(TestShrinkRNNMemoryBase): rank_table_tensor = core.LoDTensor() rank_table_tensor.set_recursive_sequence_lengths([[1, 2, 3]]) rank_table_tensor.set( - np.random.random(size=(6, 1)).astype('float32'), cpu) + np.random.random(size=(6, 1)).astype('float32'), cpu + ) exe = Executor(cpu) outs = exe.run( - feed={ - 'x': x_tensor, - 'rank_table_tensor': rank_table_tensor - }, + feed={'x': x_tensor, 'rank_table_tensor': rank_table_tensor}, fetch_list=[self.mem1, self.mem2, self.mem3, self.x_grad], - return_numpy=False) + return_numpy=False, + ) np.testing.assert_allclose(tensor_np[0:6], outs[0], rtol=1e-05) np.testing.assert_allclose(tensor_np[0:5], outs[1], rtol=1e-05) np.testing.assert_allclose(tensor_np[0:2], outs[2], rtol=1e-05) @@ -86,7 +82,6 @@ class TestShrinkRNNMemoryReferLoD(TestShrinkRNNMemoryBase): class TestShrinkRNNMemoryNoLoD(TestShrinkRNNMemoryBase): - def test_no_lod(self): cpu = core.CPUPlace() x_tensor = core.LoDTensor() @@ -96,16 +91,15 @@ class TestShrinkRNNMemoryNoLoD(TestShrinkRNNMemoryBase): rank_table_tensor = core.LoDTensor() rank_table_tensor.set_recursive_sequence_lengths([[1, 2, 3]]) rank_table_tensor.set( - np.random.random(size=(6, 1)).astype('float32'), cpu) + np.random.random(size=(6, 1)).astype('float32'), cpu + ) exe = Executor(cpu) outs = exe.run( - feed={ - 'x': x_tensor, - 'rank_table_tensor': rank_table_tensor - }, + feed={'x': x_tensor, 'rank_table_tensor': rank_table_tensor}, fetch_list=[self.mem1, self.mem2, self.mem3, self.x_grad], - return_numpy=False) + return_numpy=False, + ) np.testing.assert_allclose(tensor_np[0:3], outs[0], rtol=1e-05) np.testing.assert_allclose(tensor_np[0:2], outs[1], rtol=1e-05) np.testing.assert_allclose(tensor_np[0:1], outs[2], rtol=1e-05) @@ -113,7 +107,6 @@ class TestShrinkRNNMemoryNoLoD(TestShrinkRNNMemoryBase): class TestShrinkRNNMemoryOpError(unittest.TestCase): - def test_erroes(self): with program_guard(Program(), Program()): x = layers.zeros(dtype='int64', shape=[3, 100]) @@ -121,8 +114,8 @@ class TestShrinkRNNMemoryOpError(unittest.TestCase): rank_table_tensor = core.LoDTensor() rank_table_tensor.set_recursive_sequence_lengths([[1, 2, 3]]) rank_table_tensor.set( - np.random.random(size=(6, 1)).astype('float32'), - core.CPUPlace()) + np.random.random(size=(6, 1)).astype('float32'), core.CPUPlace() + ) rank_table = np.random.random(size=(6, 1)).astype('float32') # The type of x in shrink_rnn_memory must be Variable. diff --git a/python/paddle/fluid/tests/unittests/test_shuffle_batch_op.py b/python/paddle/fluid/tests/unittests/test_shuffle_batch_op.py index 7752cfa3a65aed693b877023673edc7145add864..95fc233239a113453490b94a79ea52873d665d25 100644 --- a/python/paddle/fluid/tests/unittests/test_shuffle_batch_op.py +++ b/python/paddle/fluid/tests/unittests/test_shuffle_batch_op.py @@ -21,7 +21,6 @@ import os class TestShuffleBatchOpBase(OpTest): - def gen_random_array(self, shape, low=0, high=1): rnd = (high - low) * np.random.random(shape) + low return rnd.astype(self.dtype) @@ -40,8 +39,9 @@ class TestShuffleBatchOpBase(OpTest): self.dtype = np.float64 self.shape = self.get_shape() x = self.gen_random_array(self.shape) - seed = np.random.random_integers(low=10, high=100, - size=(1, )).astype('int64') + seed = np.random.random_integers(low=10, high=100, size=(1,)).astype( + 'int64' + ) self.inputs = {'X': x, 'Seed': seed} self.outputs = { 'Out': np.array([]).astype(x.dtype), @@ -78,7 +78,6 @@ class TestShuffleBatchOpBase(OpTest): class TestShuffleBatchOp2(TestShuffleBatchOpBase): - def get_shape(self): return (4, 30) diff --git a/python/paddle/fluid/tests/unittests/test_shuffle_channel_op.py b/python/paddle/fluid/tests/unittests/test_shuffle_channel_op.py index 594df3f6c091d9b46378aa28ecfad16c92c9fde9..01bdd48ff24ee3d5ef1ec313bbcdcad25d73dd1f 100644 --- a/python/paddle/fluid/tests/unittests/test_shuffle_channel_op.py +++ b/python/paddle/fluid/tests/unittests/test_shuffle_channel_op.py @@ -18,7 +18,6 @@ from op_test import OpTest class TestShuffleChannelOp(OpTest): - def setUp(self): self.op_type = "shuffle_channel" self.batch_size = 10 @@ -27,13 +26,14 @@ class TestShuffleChannelOp(OpTest): self.layer_w = 4 self.group = 4 self.x = np.random.random( - (self.batch_size, self.input_channels, self.layer_h, - self.layer_w)).astype('float32') + (self.batch_size, self.input_channels, self.layer_h, self.layer_w) + ).astype('float32') self.inputs = {'X': self.x} self.attrs = {'group': self.group} n, c, h, w = self.x.shape - input_reshaped = np.reshape(self.x, - (-1, self.group, c // self.group, h, w)) + input_reshaped = np.reshape( + self.x, (-1, self.group, c // self.group, h, w) + ) input_transposed = np.transpose(input_reshaped, (0, 2, 1, 3, 4)) self.outputs = {'Out': np.reshape(input_transposed, (-1, c, h, w))} diff --git a/python/paddle/fluid/tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py b/python/paddle/fluid/tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py index f6ea5018ea986745cda6a7448c05ef5dc1e65f53..1d09aefd2d8a8eaac57d1cfba41b009804841d96 100644 --- a/python/paddle/fluid/tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py +++ b/python/paddle/fluid/tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py @@ -24,12 +24,12 @@ import paddle def test_fluid_sigmoid(x, label, normalize=False, ignore_index=-100): return paddle.fluid.layers.sigmoid_cross_entropy_with_logits( - x, label, int(ignore_index), normalize=normalize) + x, label, int(ignore_index), normalize=normalize + ) class TestSigmoidCrossEntropyWithLogitsOp1(OpTest): - """Test sigmoid_cross_entropy_with_logit_op with binary label - """ + """Test sigmoid_cross_entropy_with_logit_op with binary label""" def setUp(self): self.op_type = "sigmoid_cross_entropy_with_logits" @@ -37,12 +37,14 @@ class TestSigmoidCrossEntropyWithLogitsOp1(OpTest): batch_size = 64 num_classes = 20 self.inputs = { - 'X': - logit( - np.random.uniform(0, 1, - (batch_size, num_classes)).astype("float64")), - 'Label': - np.random.randint(0, 2, (batch_size, num_classes)).astype("float64") + 'X': logit( + np.random.uniform(0, 1, (batch_size, num_classes)).astype( + "float64" + ) + ), + 'Label': np.random.randint(0, 2, (batch_size, num_classes)).astype( + "float64" + ), } # Fw Pass is implemented as elementwise sigmoid followed by @@ -61,8 +63,7 @@ class TestSigmoidCrossEntropyWithLogitsOp1(OpTest): class TestSigmoidCrossEntropyWithLogitsOp2(OpTest): - """Test sigmoid_cross_entropy_with_logit_op with probabalistic label - """ + """Test sigmoid_cross_entropy_with_logit_op with probabalistic label""" def setUp(self): self.op_type = "sigmoid_cross_entropy_with_logits" @@ -71,13 +72,14 @@ class TestSigmoidCrossEntropyWithLogitsOp2(OpTest): num_classes = 20 ignore_index = -1 self.inputs = { - 'X': - logit( - np.random.uniform(0, 1, - (batch_size, num_classes)).astype("float64")), - 'Label': - np.random.randint(-1, 2, - (batch_size, num_classes)).astype("float64") + 'X': logit( + np.random.uniform(0, 1, (batch_size, num_classes)).astype( + "float64" + ) + ), + 'Label': np.random.randint(-1, 2, (batch_size, num_classes)).astype( + "float64" + ), } self.attrs = { 'ignore_index': ignore_index, @@ -100,8 +102,7 @@ class TestSigmoidCrossEntropyWithLogitsOp2(OpTest): class TestSigmoidCrossEntropyWithLogitsOp3(OpTest): - """Test sigmoid_cross_entropy_with_logit_op with probabalistic label - """ + """Test sigmoid_cross_entropy_with_logit_op with probabalistic label""" def setUp(self): self.op_type = "sigmoid_cross_entropy_with_logits" @@ -109,12 +110,14 @@ class TestSigmoidCrossEntropyWithLogitsOp3(OpTest): batch_size = 64 num_classes = 20 self.inputs = { - 'X': - logit( - np.random.uniform(0, 1, - (batch_size, num_classes)).astype("float64")), - 'Label': - np.random.uniform(0, 1, (batch_size, num_classes)).astype("float64") + 'X': logit( + np.random.uniform(0, 1, (batch_size, num_classes)).astype( + "float64" + ) + ), + 'Label': np.random.uniform(0, 1, (batch_size, num_classes)).astype( + "float64" + ), } # Fw Pass is implemented as elementwise sigmoid followed by @@ -133,7 +136,6 @@ class TestSigmoidCrossEntropyWithLogitsOp3(OpTest): class TestSigmoidCrossEntropyWithNorm(OpTest): - def setUp(self): self.op_type = "sigmoid_cross_entropy_with_logits" self.python_api = test_fluid_sigmoid @@ -141,13 +143,14 @@ class TestSigmoidCrossEntropyWithNorm(OpTest): num_classes = 20 ignore_index = -1 self.inputs = { - 'X': - logit( - np.random.uniform(0, 1, - (batch_size, num_classes)).astype("float64")), - 'Label': - np.random.randint(-1, 2, - (batch_size, num_classes)).astype("float64") + 'X': logit( + np.random.uniform(0, 1, (batch_size, num_classes)).astype( + "float64" + ) + ), + 'Label': np.random.randint(-1, 2, (batch_size, num_classes)).astype( + "float64" + ), } self.attrs = {'ignore_index': ignore_index, 'normalize': True} sigmoid_X = expit(self.inputs['X']) @@ -157,7 +160,8 @@ class TestSigmoidCrossEntropyWithNorm(OpTest): out[np.where(self.inputs['Label'] == ignore_index)] = 0 if self.attrs['normalize']: out = out / float( - np.where(self.inputs['Label'] != ignore_index)[0].size) + np.where(self.inputs['Label'] != ignore_index)[0].size + ) self.outputs = {'Out': out} def test_check_output(self): @@ -168,8 +172,7 @@ class TestSigmoidCrossEntropyWithNorm(OpTest): class TestSigmoidCrossEntropyWithLogitsOp5(OpTest): - """Test sigmoid_cross_entropy_with_logit_op with probabalistic label - """ + """Test sigmoid_cross_entropy_with_logit_op with probabalistic label""" def setUp(self): self.op_type = "sigmoid_cross_entropy_with_logits" @@ -177,13 +180,14 @@ class TestSigmoidCrossEntropyWithLogitsOp5(OpTest): batch_size = [10, 10] num_classes = 20 self.inputs = { - 'X': - logit( + 'X': logit( np.random.uniform( - 0, 1, tuple(batch_size + [num_classes])).astype("float64")), - 'Label': - np.random.uniform(0, 1, tuple(batch_size + - [num_classes])).astype("float64") + 0, 1, tuple(batch_size + [num_classes]) + ).astype("float64") + ), + 'Label': np.random.uniform( + 0, 1, tuple(batch_size + [num_classes]) + ).astype("float64"), } # Fw Pass is implemented as elementwise sigmoid followed by @@ -202,7 +206,6 @@ class TestSigmoidCrossEntropyWithLogitsOp5(OpTest): class TestSigmoidCrossEntropyWithNorm2(OpTest): - def setUp(self): self.op_type = "sigmoid_cross_entropy_with_logits" self.python_api = test_fluid_sigmoid @@ -210,13 +213,14 @@ class TestSigmoidCrossEntropyWithNorm2(OpTest): num_classes = 20 ignore_index = -1 self.inputs = { - 'X': - logit( + 'X': logit( np.random.uniform( - 0, 1, tuple(batch_size + [num_classes])).astype("float64")), - 'Label': - np.random.randint(-1, 2, tuple(batch_size + - [num_classes])).astype("float64") + 0, 1, tuple(batch_size + [num_classes]) + ).astype("float64") + ), + 'Label': np.random.randint( + -1, 2, tuple(batch_size + [num_classes]) + ).astype("float64"), } self.attrs = {'ignore_index': ignore_index, 'normalize': True} sigmoid_X = expit(self.inputs['X']) @@ -226,7 +230,8 @@ class TestSigmoidCrossEntropyWithNorm2(OpTest): out[np.where(self.inputs['Label'] == ignore_index)] = 0 if self.attrs['normalize']: out = out / float( - np.where(self.inputs['Label'] != ignore_index)[0].size) + np.where(self.inputs['Label'] != ignore_index)[0].size + ) self.outputs = {'Out': out} def test_check_output(self): @@ -236,8 +241,7 @@ class TestSigmoidCrossEntropyWithNorm2(OpTest): self.check_grad(['X'], 'Out', check_eager=True) class TestSigmoidCrossEntropyWithLogitsOp6(OpTest): - """Test sigmoid_cross_entropy_with_logit_op with binary label - """ + """Test sigmoid_cross_entropy_with_logit_op with binary label""" def setUp(self): self.op_type = "sigmoid_cross_entropy_with_logits" @@ -245,14 +249,14 @@ class TestSigmoidCrossEntropyWithNorm2(OpTest): batch_size = [10, 10] num_classes = 20 self.inputs = { - 'X': - logit( - np.random.uniform(0, 1, - tuple(batch_size + - [num_classes])).astype("float64")), - 'Label': - np.random.randint(0, 2, tuple(batch_size + - [num_classes])).astype("float64") + 'X': logit( + np.random.uniform( + 0, 1, tuple(batch_size + [num_classes]) + ).astype("float64") + ), + 'Label': np.random.randint( + 0, 2, tuple(batch_size + [num_classes]) + ).astype("float64"), } # Fw Pass is implemented as elementwise sigmoid followed by @@ -270,18 +274,21 @@ class TestSigmoidCrossEntropyWithNorm2(OpTest): self.check_grad(['X'], 'Out', check_eager=True) class TestSigmoidCrossEntropyWithLogitsOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): def test_Variable(): # the input of sigmoid_cross_entropy_with_logits must be Variable. - x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, - 5]), [[1, 1, 1, 1]], - fluid.CPUPlace()) - lab1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], - fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), + [[1, 1, 1, 1]], + fluid.CPUPlace(), + ) + lab1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), + [[1, 1, 1, 1]], + fluid.CPUPlace(), + ) fluid.layers.sigmoid_cross_entropy_with_logits(x1, lab1) self.assertRaises(TypeError, test_Variable) @@ -289,12 +296,12 @@ class TestSigmoidCrossEntropyWithNorm2(OpTest): def test_dtype(): # the input dtype of sigmoid_cross_entropy_with_logits must be float16 or float32 or float64 # float16 only can be set on GPU place - x2 = fluid.layers.data(name='x2', - shape=[3, 4, 5, 6], - dtype="int32") - lab2 = fluid.layers.data(name='lab2', - shape=[3, 4, 5, 6], - dtype="int32") + x2 = fluid.layers.data( + name='x2', shape=[3, 4, 5, 6], dtype="int32" + ) + lab2 = fluid.layers.data( + name='lab2', shape=[3, 4, 5, 6], dtype="int32" + ) fluid.layers.sigmoid_cross_entropy_with_logits(x2, lab2) self.assertRaises(TypeError, test_dtype) diff --git a/python/paddle/fluid/tests/unittests/test_sigmoid_focal_loss.py b/python/paddle/fluid/tests/unittests/test_sigmoid_focal_loss.py index a3b614cb51f3e55a41c240d529578f9da69d37d6..e36ba383c07efc92206b734ea42e8f9cb8582e1f 100644 --- a/python/paddle/fluid/tests/unittests/test_sigmoid_focal_loss.py +++ b/python/paddle/fluid/tests/unittests/test_sigmoid_focal_loss.py @@ -19,84 +19,83 @@ import unittest from paddle.fluid.framework import _test_eager_guard -def call_sfl_functional(logit, - label, - normalizer, - alpha=0.25, - gamma=2.0, - reduction='sum'): - res = paddle.nn.functional.sigmoid_focal_loss(logit, - label, - normalizer, - alpha=alpha, - gamma=gamma, - reduction=reduction) +def call_sfl_functional( + logit, label, normalizer, alpha=0.25, gamma=2.0, reduction='sum' +): + res = paddle.nn.functional.sigmoid_focal_loss( + logit, label, normalizer, alpha=alpha, gamma=gamma, reduction=reduction + ) return res -def test_static(place, - logit_np, - label_np, - normalizer_np, - alpha=0.25, - gamma=2.0, - reduction='sum'): +def test_static( + place, + logit_np, + label_np, + normalizer_np, + alpha=0.25, + gamma=2.0, + reduction='sum', +): paddle.enable_static() prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - logit = paddle.fluid.data(name='logit', - shape=logit_np.shape, - dtype='float64') - label = paddle.fluid.data(name='label', - shape=label_np.shape, - dtype='float64') + logit = paddle.fluid.data( + name='logit', shape=logit_np.shape, dtype='float64' + ) + label = paddle.fluid.data( + name='label', shape=label_np.shape, dtype='float64' + ) feed_dict = {"logit": logit_np, "label": label_np} normalizer = None if normalizer_np is not None: - normalizer = paddle.fluid.data(name='normalizer', - shape=normalizer_np.shape, - dtype='float64') + normalizer = paddle.fluid.data( + name='normalizer', shape=normalizer_np.shape, dtype='float64' + ) feed_dict["normalizer"] = normalizer_np - res = call_sfl_functional(logit, label, normalizer, alpha, gamma, - reduction) + res = call_sfl_functional( + logit, label, normalizer, alpha, gamma, reduction + ) exe = paddle.static.Executor(place) static_result = exe.run(prog, feed=feed_dict, fetch_list=[res]) return static_result -def test_dygraph(place, - logit_np, - label_np, - normalizer_np, - alpha=0.25, - gamma=2.0, - reduction='sum'): +def test_dygraph( + place, + logit_np, + label_np, + normalizer_np, + alpha=0.25, + gamma=2.0, + reduction='sum', +): paddle.disable_static() logit = paddle.to_tensor(logit_np) label = paddle.to_tensor(label_np) normalizer = None if normalizer_np is not None: normalizer = paddle.to_tensor(normalizer_np) - dy_res = call_sfl_functional(logit, label, normalizer, alpha, gamma, - reduction) + dy_res = call_sfl_functional( + logit, label, normalizer, alpha, gamma, reduction + ) dy_result = dy_res.numpy() paddle.enable_static() return dy_result -def calc_sigmoid_focal_loss(logit_np, - label_np, - normalizer_np, - alpha=0.25, - gamma=2.0, - reduction='sum'): +def calc_sigmoid_focal_loss( + logit_np, label_np, normalizer_np, alpha=0.25, gamma=2.0, reduction='sum' +): - loss = np.maximum( - logit_np, - 0) - logit_np * label_np + np.log(1 + np.exp(-np.abs(logit_np))) + loss = ( + np.maximum(logit_np, 0) + - logit_np * label_np + + np.log(1 + np.exp(-np.abs(logit_np))) + ) pred = 1 / (1 + np.exp(-logit_np)) p_t = pred * label_np + (1 - pred) * (1 - label_np) @@ -106,7 +105,7 @@ def calc_sigmoid_focal_loss(logit_np, loss = alpha_t * loss if gamma is not None: - loss = loss * ((1 - p_t)**gamma) + loss = loss * ((1 - p_t) ** gamma) if normalizer_np is not None: loss = loss / normalizer_np @@ -120,62 +119,89 @@ def calc_sigmoid_focal_loss(logit_np, class TestSigmoidFocalLoss(unittest.TestCase): - def test_SigmoidFocalLoss(self): - logit_np = np.random.uniform(0.1, 0.8, - size=(2, 3, 4, 10)).astype(np.float64) - label_np = np.random.randint(0, 2, - size=(2, 3, 4, 10)).astype(np.float64) + logit_np = np.random.uniform(0.1, 0.8, size=(2, 3, 4, 10)).astype( + np.float64 + ) + label_np = np.random.randint(0, 2, size=(2, 3, 4, 10)).astype( + np.float64 + ) normalizer_nps = [ - np.asarray([np.sum(label_np > 0)], dtype=label_np.dtype), None + np.asarray([np.sum(label_np > 0)], dtype=label_np.dtype), + None, ] places = [fluid.CPUPlace()] if fluid.core.is_compiled_with_cuda(): places.append(fluid.CUDAPlace(0)) reductions = ['sum', 'mean', 'none'] alphas = [0.25, 0.5] - gammas = [3, 0.] + gammas = [3, 0.0] for place in places: for reduction in reductions: for alpha in alphas: for gamma in gammas: for normalizer_np in normalizer_nps: - static_result, = test_static( - place, logit_np, label_np, normalizer_np, alpha, - gamma, reduction) - dy_result = test_dygraph(place, logit_np, label_np, - normalizer_np, alpha, - gamma, reduction) + (static_result,) = test_static( + place, + logit_np, + label_np, + normalizer_np, + alpha, + gamma, + reduction, + ) + dy_result = test_dygraph( + place, + logit_np, + label_np, + normalizer_np, + alpha, + gamma, + reduction, + ) with _test_eager_guard(): eager_result = test_dygraph( - place, logit_np, label_np, normalizer_np, - alpha, gamma, reduction) + place, + logit_np, + label_np, + normalizer_np, + alpha, + gamma, + reduction, + ) expected = calc_sigmoid_focal_loss( - logit_np, label_np, normalizer_np, alpha, gamma, - reduction) - np.testing.assert_allclose(static_result, - expected, - rtol=1e-05) - np.testing.assert_allclose(static_result, - dy_result, - rtol=1e-05) - np.testing.assert_allclose(dy_result, - expected, - rtol=1e-05) - np.testing.assert_allclose(eager_result, - expected, - rtol=1e-05) + logit_np, + label_np, + normalizer_np, + alpha, + gamma, + reduction, + ) + np.testing.assert_allclose( + static_result, expected, rtol=1e-05 + ) + np.testing.assert_allclose( + static_result, dy_result, rtol=1e-05 + ) + np.testing.assert_allclose( + dy_result, expected, rtol=1e-05 + ) + np.testing.assert_allclose( + eager_result, expected, rtol=1e-05 + ) def test_SigmoidFocalLoss_error(self): paddle.disable_static() logit = paddle.to_tensor([[0.97], [0.91], [0.03]], dtype='float32') label = paddle.to_tensor([[1.0], [1.0], [0.0]], dtype='float32') - self.assertRaises(ValueError, - paddle.nn.functional.sigmoid_focal_loss, - logit=logit, - label=label, - normalizer=None, - reduction="unsupport reduction") + self.assertRaises( + ValueError, + paddle.nn.functional.sigmoid_focal_loss, + logit=logit, + label=label, + normalizer=None, + reduction="unsupport reduction", + ) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_sigmoid_focal_loss_op.py b/python/paddle/fluid/tests/unittests/test_sigmoid_focal_loss_op.py index 91b2981acd53ac02d1ebabce065a078bcc28ec07..4b9831f8e8d0a528dc70ac5dbd4828ff6e3e2120 100644 --- a/python/paddle/fluid/tests/unittests/test_sigmoid_focal_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_sigmoid_focal_loss_op.py @@ -22,8 +22,9 @@ from paddle.fluid import core from paddle.fluid import Program, program_guard -def sigmoid_focal_loss_forward(x_data, label_data, fg_num_data, gamma, alpha, - num_classes): +def sigmoid_focal_loss_forward( + x_data, label_data, fg_num_data, gamma, alpha, num_classes +): x_data_t = copy.deepcopy(x_data) out_data = copy.deepcopy(x_data) x_width = len(x_data) @@ -41,12 +42,13 @@ def sigmoid_focal_loss_forward(x_data, label_data, fg_num_data, gamma, alpha, z_neg = (1.0 - alpha) / fg_num z_pos = alpha / fg_num - p = 1. / (1. + math.exp(-x)) + p = 1.0 / (1.0 + math.exp(-x)) FLT_MIN = 1.175494351e-38 - term_pos = math.pow((1. - p), gamma) * math.log(max(FLT_MIN, p)) - term_neg = math.pow(p, gamma) * (-1. * x * (x >= 0) - - math.log(1. + math.exp(x - 2. * x * - (x >= 0)))) + term_pos = math.pow((1.0 - p), gamma) * math.log(max(FLT_MIN, p)) + term_neg = math.pow(p, gamma) * ( + -1.0 * x * (x >= 0) + - math.log(1.0 + math.exp(x - 2.0 * x * (x >= 0))) + ) out_data[idx] = 0.0 out_data[idx] += -c_pos * term_pos * z_pos out_data[idx] += -c_neg * term_neg * z_neg @@ -56,7 +58,6 @@ def sigmoid_focal_loss_forward(x_data, label_data, fg_num_data, gamma, alpha, class TestSigmoidFocalLossOp1(OpTest): - def set_argument(self): self.num_anchors = 10 self.num_classes = 10 @@ -68,8 +69,9 @@ class TestSigmoidFocalLossOp1(OpTest): dims = (self.num_anchors, self.num_classes) X = np.random.standard_normal(dims).astype("float64") - L = np.random.randint(0, self.num_classes + 1, - (dims[0], 1)).astype("int32") + L = np.random.randint(0, self.num_classes + 1, (dims[0], 1)).astype( + "int32" + ) F = np.zeros(1) F[0] = len(np.where(L > 0)[0]) F = F.astype("int32") @@ -84,10 +86,14 @@ class TestSigmoidFocalLossOp1(OpTest): 'gamma': self.gamma, 'alpha': self.alpha, } - loss = sigmoid_focal_loss_forward(self.inputs['X'], - self.inputs['Label'], - self.inputs['FgNum'], self.gamma, - self.alpha, self.num_classes) + loss = sigmoid_focal_loss_forward( + self.inputs['X'], + self.inputs['Label'], + self.inputs['FgNum'], + self.gamma, + self.alpha, + self.num_classes, + ) self.outputs = {'Out': loss.astype('float64')} def test_check_output(self): @@ -97,23 +103,22 @@ class TestSigmoidFocalLossOp1(OpTest): self.check_grad(['X'], 'Out') -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestSigmoidFocalLossOp2(TestSigmoidFocalLossOp1): - def test_check_output(self): place = core.CUDAPlace(0) self.check_output_with_place(place, atol=2e-3) def test_check_grad(self): place = core.CUDAPlace(0) - self.check_grad_with_place(place, ['X'], - 'Out', - max_relative_error=0.002) + self.check_grad_with_place( + place, ['X'], 'Out', max_relative_error=0.002 + ) class TestSigmoidFocalLossOp3(TestSigmoidFocalLossOp1): - def set_argument(self): self.num_anchors = 200 self.num_classes = 10 @@ -121,50 +126,45 @@ class TestSigmoidFocalLossOp3(TestSigmoidFocalLossOp1): self.alpha = 0.5 -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestSigmoidFocalLossOp4(TestSigmoidFocalLossOp3): - def test_check_output(self): place = core.CUDAPlace(0) self.check_output_with_place(place, atol=2e-3) def test_check_grad(self): place = core.CUDAPlace(0) - self.check_grad_with_place(place, ['X'], - 'Out', - max_relative_error=0.002) + self.check_grad_with_place( + place, ['X'], 'Out', max_relative_error=0.002 + ) class TestSigmoidFocalLossOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): - label1 = fluid.layers.fill_constant(shape=[10, 1], - dtype="int32", - value=1) - fg_num1 = fluid.layers.fill_constant(shape=[1], - dtype="int32", - value=5) + label1 = fluid.layers.fill_constant( + shape=[10, 1], dtype="int32", value=1 + ) + fg_num1 = fluid.layers.fill_constant( + shape=[1], dtype="int32", value=5 + ) # The `x` must be Variable and the data type of `x` Tensor must be one of float32 and float64. def test_x_type(): x1 = [2] - fluid.layers.sigmoid_focal_loss(x=x1, - label=label1, - fg_num=fg_num1, - gamma=2., - alpha=0.25) + fluid.layers.sigmoid_focal_loss( + x=x1, label=label1, fg_num=fg_num1, gamma=2.0, alpha=0.25 + ) self.assertRaises(TypeError, test_x_type) def test_x_tensor_dtype(): x2 = fluid.layers.data(name='x2', shape=[10, 10], dtype="int16") - fluid.layers.sigmoid_focal_loss(x=x2, - label=label1, - fg_num=fg_num1, - gamma=2., - alpha=0.25) + fluid.layers.sigmoid_focal_loss( + x=x2, label=label1, fg_num=fg_num1, gamma=2.0, alpha=0.25 + ) self.assertRaises(TypeError, test_x_tensor_dtype) @@ -173,46 +173,38 @@ class TestSigmoidFocalLossOpError(unittest.TestCase): # The `label` must be Variable and the data type of `label` Tensor must be int32. def test_label_type(): label2 = [2] - fluid.layers.sigmoid_focal_loss(x=x3, - label=label2, - fg_num=fg_num1, - gamma=2., - alpha=0.25) + fluid.layers.sigmoid_focal_loss( + x=x3, label=label2, fg_num=fg_num1, gamma=2.0, alpha=0.25 + ) self.assertRaises(TypeError, test_label_type) def test_label_tensor_dtype(): - label3 = fluid.layers.fill_constant(shape=[10, 1], - dtype="float32", - value=1.) - fluid.layers.sigmoid_focal_loss(x=x3, - label=label3, - fg_num=fg_num1, - gamma=2., - alpha=0.25) + label3 = fluid.layers.fill_constant( + shape=[10, 1], dtype="float32", value=1.0 + ) + fluid.layers.sigmoid_focal_loss( + x=x3, label=label3, fg_num=fg_num1, gamma=2.0, alpha=0.25 + ) self.assertRaises(TypeError, test_label_tensor_dtype) # The `fg_num` must be Variable and the data type of `fg_num` Tensor must be int32. def test_fgnum_type(): fg_num2 = [2] - fluid.layers.sigmoid_focal_loss(x=x3, - label=label1, - fg_num=fg_num2, - gamma=2., - alpha=0.25) + fluid.layers.sigmoid_focal_loss( + x=x3, label=label1, fg_num=fg_num2, gamma=2.0, alpha=0.25 + ) self.assertRaises(TypeError, test_fgnum_type) def test_fgnum_tensor_dtype(): - fg_num3 = fluid.layers.fill_constant(shape=[1], - dtype="float32", - value=5.) - fluid.layers.sigmoid_focal_loss(x=x3, - label=label1, - fg_num=fg_num3, - gamma=2., - alpha=0.25) + fg_num3 = fluid.layers.fill_constant( + shape=[1], dtype="float32", value=5.0 + ) + fluid.layers.sigmoid_focal_loss( + x=x3, label=label1, fg_num=fg_num3, gamma=2.0, alpha=0.25 + ) self.assertRaises(TypeError, test_fgnum_tensor_dtype) diff --git a/python/paddle/fluid/tests/unittests/test_sign_op.py b/python/paddle/fluid/tests/unittests/test_sign_op.py index 1a55cfd3fd973c7aa03d811de9d56e8c6df3cda1..2db25d0eaee505454d9d3a443581603787ae28c3 100644 --- a/python/paddle/fluid/tests/unittests/test_sign_op.py +++ b/python/paddle/fluid/tests/unittests/test_sign_op.py @@ -25,7 +25,6 @@ import paddle.fluid.layers as layers class TestSignOp(OpTest): - def setUp(self): self.op_type = "sign" self.inputs = { @@ -41,32 +40,30 @@ class TestSignOp(OpTest): class TestSignOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # The input type of sign_op must be Variable or numpy.ndarray. input1 = 12 self.assertRaises(TypeError, fluid.layers.sign, input1) # The input dtype of sign_op must be float16, float32, float64. - input2 = fluid.layers.data(name='input2', - shape=[12, 10], - dtype="int32") - input3 = fluid.layers.data(name='input3', - shape=[12, 10], - dtype="int64") + input2 = fluid.layers.data( + name='input2', shape=[12, 10], dtype="int32" + ) + input3 = fluid.layers.data( + name='input3', shape=[12, 10], dtype="int64" + ) self.assertRaises(TypeError, fluid.layers.sign, input2) self.assertRaises(TypeError, fluid.layers.sign, input3) - input4 = fluid.layers.data(name='input4', - shape=[4], - dtype="float16") + input4 = fluid.layers.data( + name='input4', shape=[4], dtype="float16" + ) fluid.layers.sign(input4) class TestSignAPI(unittest.TestCase): - def test_dygraph(self): with fluid.dygraph.guard(): - np_x = np.array([-1., 0., -0., 1.2, 1.5], dtype='float64') + np_x = np.array([-1.0, 0.0, -0.0, 1.2, 1.5], dtype='float64') x = paddle.to_tensor(np_x) z = paddle.sign(x) np_z = z.numpy() @@ -79,22 +76,21 @@ class TestSignAPI(unittest.TestCase): input1 = 12 self.assertRaises(TypeError, paddle.tensor.math.sign, input1) # The input dtype of sign_op must be float16, float32, float64. - input2 = fluid.layers.data(name='input2', - shape=[12, 10], - dtype="int32") - input3 = fluid.layers.data(name='input3', - shape=[12, 10], - dtype="int64") + input2 = fluid.layers.data( + name='input2', shape=[12, 10], dtype="int32" + ) + input3 = fluid.layers.data( + name='input3', shape=[12, 10], dtype="int64" + ) self.assertRaises(TypeError, paddle.tensor.math.sign, input2) self.assertRaises(TypeError, paddle.tensor.math.sign, input3) - input4 = fluid.layers.data(name='input4', - shape=[4], - dtype="float16") + input4 = fluid.layers.data( + name='input4', shape=[4], dtype="float16" + ) paddle.sign(input4) class TestSignDoubleGradCheck(unittest.TestCase): - def sign_wrapper(self, x): return paddle.sign(x[0]) @@ -109,17 +105,13 @@ class TestSignDoubleGradCheck(unittest.TestCase): out = paddle.sign(data) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) - gradient_checker.double_grad_check([data], - out, - x_init=[data_arr], - place=place, - eps=eps) + gradient_checker.double_grad_check( + [data], out, x_init=[data_arr], place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.double_grad_check_for_dygraph(self.sign_wrapper, - [data], - out, - x_init=[data_arr], - place=place) + gradient_checker.double_grad_check_for_dygraph( + self.sign_wrapper, [data], out, x_init=[data_arr], place=place + ) def test_grad(self): paddle.enable_static() @@ -131,7 +123,6 @@ class TestSignDoubleGradCheck(unittest.TestCase): class TestSignTripleGradCheck(unittest.TestCase): - def sign_wrapper(self, x): return paddle.sign(x[0]) @@ -146,17 +137,13 @@ class TestSignTripleGradCheck(unittest.TestCase): out = paddle.sign(data) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) - gradient_checker.triple_grad_check([data], - out, - x_init=[data_arr], - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [data], out, x_init=[data_arr], place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.triple_grad_check_for_dygraph(self.sign_wrapper, - [data], - out, - x_init=[data_arr], - place=place) + gradient_checker.triple_grad_check_for_dygraph( + self.sign_wrapper, [data], out, x_init=[data_arr], place=place + ) def test_grad(self): paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_signal.py b/python/paddle/fluid/tests/unittests/test_signal.py index d07e054bd4827a589c1ffca78810c725ea8d217d..5e363dd39513bc682503396a48dfee49bebf621e 100644 --- a/python/paddle/fluid/tests/unittests/test_signal.py +++ b/python/paddle/fluid/tests/unittests/test_signal.py @@ -57,7 +57,8 @@ def tiny(x): # Only floating types generate a tiny if np.issubdtype(x.dtype, np.floating) or np.issubdtype( - x.dtype, np.complexfloating): + x.dtype, np.complexfloating + ): dtype = x.dtype else: dtype = np.float32 @@ -71,8 +72,9 @@ def normalize(S, norm=np.inf, axis=0, threshold=None, fill=None): threshold = tiny(S) elif threshold <= 0: - raise Exception("threshold={} must be strictly " - "positive".format(threshold)) + raise Exception( + "threshold={} must be strictly " "positive".format(threshold) + ) if fill not in [None, False, True]: raise Exception("fill={} must be None or boolean".format(fill)) @@ -99,12 +101,12 @@ def normalize(S, norm=np.inf, axis=0, threshold=None, fill=None): length = np.sum(mag > 0, axis=axis, keepdims=True, dtype=mag.dtype) elif np.issubdtype(type(norm), np.number) and norm > 0: - length = np.sum(mag**norm, axis=axis, keepdims=True)**(1.0 / norm) + length = np.sum(mag**norm, axis=axis, keepdims=True) ** (1.0 / norm) if axis is None: - fill_norm = mag.size**(-1.0 / norm) + fill_norm = mag.size ** (-1.0 / norm) else: - fill_norm = mag.shape[axis]**(-1.0 / norm) + fill_norm = mag.shape[axis] ** (-1.0 / norm) elif norm is None: return S @@ -144,8 +146,9 @@ def __window_ss_fill(x, win_sq, n_frames, hop_length): # pragma: no cover n_fft = len(win_sq) for i in range(n_frames): sample = i * hop_length - x[sample:min(n, sample + - n_fft)] += win_sq[:max(0, min(n_fft, n - sample))] + x[sample : min(n, sample + n_fft)] += win_sq[ + : max(0, min(n_fft, n - sample)) + ] def window_sumsquare( @@ -165,7 +168,7 @@ def window_sumsquare( # Compute the squared window at the desired length win_sq = get_window(window, win_length) - win_sq = normalize(win_sq, norm=norm)**2 + win_sq = normalize(win_sq, norm=norm) ** 2 win_sq = pad_center(win_sq, n_fft) # Fill the envelope @@ -208,26 +211,35 @@ def dtype_r2c(d, default=np.complex64): def frame(x, frame_length, hop_length, axis=-1): if not isinstance(x, np.ndarray): - raise Exception("Input must be of type numpy.ndarray, " - "given type(x)={}".format(type(x))) + raise Exception( + "Input must be of type numpy.ndarray, " + "given type(x)={}".format(type(x)) + ) if x.shape[axis] < frame_length: - raise Exception("Input is too short (n={:d})" - " for frame_length={:d}".format(x.shape[axis], - frame_length)) + raise Exception( + "Input is too short (n={:d})" + " for frame_length={:d}".format(x.shape[axis], frame_length) + ) if hop_length < 1: raise Exception("Invalid hop_length: {:d}".format(hop_length)) if axis == -1 and not x.flags["F_CONTIGUOUS"]: - print("librosa.util.frame called with axis={} " - "on a non-contiguous input. This will result in a copy.".format( - axis)) + print( + "librosa.util.frame called with axis={} " + "on a non-contiguous input. This will result in a copy.".format( + axis + ) + ) x = np.asfortranarray(x) elif axis == 0 and not x.flags["C_CONTIGUOUS"]: - print("librosa.util.frame called with axis={} " - "on a non-contiguous input. This will result in a copy.".format( - axis)) + print( + "librosa.util.frame called with axis={} " + "on a non-contiguous input. This will result in a copy.".format( + axis + ) + ) x = np.ascontiguousarray(x) n_frames = 1 + (x.shape[axis] - frame_length) // hop_length @@ -260,8 +272,11 @@ def pad_center(data, size, axis=-1, **kwargs): lengths[axis] = (lpad, int(size - n - lpad)) if lpad < 0: - raise Exception(("Target size ({:d}) must be " - "at least input size ({:d})").format(size, n)) + raise Exception( + ("Target size ({:d}) must be " "at least input size ({:d})").format( + size, n + ) + ) return np.pad(data, lengths, **kwargs) @@ -279,8 +294,9 @@ def get_window(window, Nx, fftbins=True): if len(window) == Nx: return np.asarray(window) - raise Exception("Window size mismatch: " - "{:d} != {:d}".format(len(window), Nx)) + raise Exception( + "Window size mismatch: " "{:d} != {:d}".format(len(window), Nx) + ) else: raise Exception("Invalid window specification: {}".format(window)) @@ -294,16 +310,18 @@ def __overlap_add(y, ytmp, hop_length): n_fft = ytmp.shape[0] for frame in range(ytmp.shape[1]): sample = frame * hop_length - y[sample:(sample + n_fft)] += ytmp[:, frame] + y[sample : (sample + n_fft)] += ytmp[:, frame] -def stft(x, - n_fft=2048, - hop_length=None, - win_length=None, - window="hann", - center=True, - pad_mode="reflect"): +def stft( + x, + n_fft=2048, + hop_length=None, + win_length=None, + window="hann", + center=True, + pad_mode="reflect", +): y = x input_rank = len(y.shape) if input_rank == 2: @@ -330,15 +348,20 @@ def stft(x, # Pad the time series so that frames are centered if center: if n_fft > y.shape[-1]: - print("n_fft={} is too small for input signal of length={}".format( - n_fft, y.shape[-1])) + print( + "n_fft={} is too small for input signal of length={}".format( + n_fft, y.shape[-1] + ) + ) y = np.pad(y, int(n_fft // 2), mode=pad_mode) elif n_fft > y.shape[-1]: raise Exception( "n_fft={} is too large for input signal of length={}".format( - n_fft, y.shape[-1])) + n_fft, y.shape[-1] + ) + ) # Window the time series. y_frames = frame(y, frame_length=n_fft, hop_length=hop_length) @@ -347,9 +370,9 @@ def stft(x, dtype = dtype_r2c(y.dtype) # Pre-allocate the STFT matrix - stft_matrix = np.empty((int(1 + n_fft // 2), y_frames.shape[1]), - dtype=dtype, - order="F") + stft_matrix = np.empty( + (int(1 + n_fft // 2), y_frames.shape[1]), dtype=dtype, order="F" + ) # how many columns can we fit within MAX_MEM_BLOCK? n_columns = MAX_MEM_BLOCK // (stft_matrix.shape[0] * stft_matrix.itemsize) @@ -358,9 +381,9 @@ def stft(x, for bl_s in range(0, stft_matrix.shape[1], n_columns): bl_t = min(bl_s + n_columns, stft_matrix.shape[1]) - stft_matrix[:, - bl_s:bl_t] = fft.rfft(fft_window * y_frames[:, bl_s:bl_t], - axis=0) + stft_matrix[:, bl_s:bl_t] = fft.rfft( + fft_window * y_frames[:, bl_s:bl_t], axis=0 + ) if input_rank == 2: stft_matrix = np.expand_dims(stft_matrix, 0) @@ -405,8 +428,9 @@ def istft( padded_length = length + int(n_fft) else: padded_length = length - n_frames = min(stft_matrix.shape[1], - int(np.ceil(padded_length / hop_length))) + n_frames = min( + stft_matrix.shape[1], int(np.ceil(padded_length / hop_length)) + ) else: n_frames = stft_matrix.shape[1] @@ -428,7 +452,7 @@ def istft( ytmp = ifft_window * fft.irfft(stft_matrix[:, bl_s:bl_t], axis=0) # Overlap-add the istft block starting at the i'th frame - __overlap_add(y[frame * hop_length:], ytmp, hop_length) + __overlap_add(y[frame * hop_length :], ytmp, hop_length) frame += bl_t - bl_s @@ -449,7 +473,7 @@ def istft( # If we don't need to control length, just do the usual center trimming # to eliminate padded data if center: - y = y[int(n_fft // 2):-int(n_fft // 2)] + y = y[int(n_fft // 2) : -int(n_fft // 2)] else: if center: # If we're centering, crop off the first n_fft//2 samples @@ -506,8 +530,9 @@ def overlap_add_for_api_test(x, hop_length, axis=-1): frame_length = x.shape[1] if axis == 0 else x.shape[-2] # Assure no gaps between frames. - assert 0 < hop_length <= frame_length, \ - f'hop_length should be in (0, frame_length({frame_length})], but got {hop_length}.' + assert ( + 0 < hop_length <= frame_length + ), f'hop_length should be in (0, frame_length({frame_length})], but got {hop_length}.' seq_length = (n_frames - 1) * hop_length + frame_length @@ -528,7 +553,7 @@ def overlap_add_for_api_test(x, hop_length, axis=-1): for i in range(x.shape[0]): for frame in range(x.shape[-1]): sample = frame * hop_length - y[i, sample:sample + frame_length] += x[i, :, frame] + y[i, sample : sample + frame_length] += x[i, :, frame] if axis == 0: y = y.transpose((1, 0)) @@ -543,12 +568,10 @@ def overlap_add_for_api_test(x, hop_length, axis=-1): def place(devices, key='place'): - def decorate(cls): module = sys.modules[cls.__module__].__dict__ raw_classes = { - k: v - for k, v in module.items() if k.startswith(cls.__name__) + k: v for k, v in module.items() if k.startswith(cls.__name__) } for raw_name, raw_cls in raw_classes.items(): @@ -556,7 +579,7 @@ def place(devices, key='place'): test_cls = dict(raw_cls.__dict__) test_cls.update({key: d}) new_name = raw_name + '.' + d.__class__.__name__ - module[new_name] = type(new_name, (raw_cls, ), test_cls) + module[new_name] = type(new_name, (raw_cls,), test_cls) del module[raw_name] return cls @@ -586,20 +609,23 @@ def tearDownModule(): pass -def rand_x(dims=1, - dtype='float64', - min_dim_len=1, - max_dim_len=10, - shape=None, - complex=False): +def rand_x( + dims=1, + dtype='float64', + min_dim_len=1, + max_dim_len=10, + shape=None, + complex=False, +): if shape is None: shape = [ np.random.randint(min_dim_len, max_dim_len) for i in range(dims) ] if complex: - return np.random.randn( - *shape).astype(dtype) + 1.j * np.random.randn(*shape).astype(dtype) + return np.random.randn(*shape).astype(dtype) + 1.0j * np.random.randn( + *shape + ).astype(dtype) else: return np.random.randn(*shape).astype(dtype) @@ -608,8 +634,11 @@ def parameterize(attrs, input_values=None): if isinstance(attrs, str): attrs = [attrs] - input_dicts = (attrs if input_values is None else - [dict(zip(attrs, vals)) for vals in input_values]) + input_dicts = ( + attrs + if input_values is None + else [dict(zip(attrs, vals)) for vals in input_values] + ) def decorator(base_class): test_class_module = sys.modules[base_class.__module__].__dict__ @@ -619,8 +648,7 @@ def parameterize(attrs, input_values=None): name = class_name(base_class, idx, input_dict) - test_class_module[name] = type(name, (base_class, ), - test_class_dict) + test_class_module[name] = type(name, (base_class,), test_class_dict) for method_name in list(base_class.__dict__): if method_name.startswith("test"): @@ -632,7 +660,8 @@ def parameterize(attrs, input_values=None): def class_name(cls, num, params_dict): suffix = to_safe_name( - next((v for v in params_dict.values() if isinstance(v, str)), "")) + next((v for v in params_dict.values() if isinstance(v, str)), "") + ) if TEST_CASE_NAME in params_dict: suffix = to_safe_name(params_dict["test_case"]) return "{}_{}{}".format(cls.__name__, num, suffix and "_" + suffix) @@ -652,19 +681,22 @@ def to_safe_name(s): ('test_2d_input2', rand_x(2, np.float64, shape=[8, 150]), 50, 15, -1), ('test_3d_input1', rand_x(3, np.float64, shape=[150, 4, 2]), 50, 15, 0), ('test_3d_input2', rand_x(3, np.float64, shape=[4, 2, 150]), 50, 15, -1), - ]) # yapf: disable + ]) # fmt: skip class TestFrame(unittest.TestCase): - def test_frame(self): - np.testing.assert_allclose(frame_for_api_test(self.x, self.frame_length, - self.hop_length, - self.axis), - paddle.signal.frame(paddle.to_tensor(self.x), - self.frame_length, - self.hop_length, - self.axis), - rtol=rtol.get(str(self.x.dtype)), - atol=atol.get(str(self.x.dtype))) + np.testing.assert_allclose( + frame_for_api_test( + self.x, self.frame_length, self.hop_length, self.axis + ), + paddle.signal.frame( + paddle.to_tensor(self.x), + self.frame_length, + self.hop_length, + self.axis, + ), + rtol=rtol.get(str(self.x.dtype)), + atol=atol.get(str(self.x.dtype)), + ) @place(DEVICES) @@ -677,29 +709,33 @@ class TestFrame(unittest.TestCase): ('test_2d_input2', rand_x(2, np.float64, shape=[8, 150]), 50, 15, -1), ('test_3d_input1', rand_x(3, np.float64, shape=[150, 4, 2]), 50, 15, 0), ('test_3d_input2', rand_x(3, np.float64, shape=[4, 2, 150]), 50, 15, -1), - ]) # yapf: disable + ]) # fmt: skip class TestFrameStatic(unittest.TestCase): - def test_frame_static(self): paddle.enable_static() mp, sp = paddle.static.Program(), paddle.static.Program() with paddle.static.program_guard(mp, sp): - input = paddle.static.data('input', - self.x.shape, - dtype=self.x.dtype) - output = paddle.signal.frame(input, self.frame_length, - self.hop_length, self.axis), + input = paddle.static.data( + 'input', self.x.shape, dtype=self.x.dtype + ) + output = ( + paddle.signal.frame( + input, self.frame_length, self.hop_length, self.axis + ), + ) exe = paddle.static.Executor(self.place) exe.run(sp) [output] = exe.run(mp, feed={'input': self.x}, fetch_list=[output]) paddle.disable_static() - np.testing.assert_allclose(frame_for_api_test(self.x, self.frame_length, - self.hop_length, - self.axis), - output, - rtol=rtol.get(str(self.x.dtype)), - atol=atol.get(str(self.x.dtype))) + np.testing.assert_allclose( + frame_for_api_test( + self.x, self.frame_length, self.hop_length, self.axis + ), + output, + rtol=rtol.get(str(self.x.dtype)), + atol=atol.get(str(self.x.dtype)), + ) @place(DEVICES) @@ -710,13 +746,16 @@ class TestFrameStatic(unittest.TestCase): ('test_hop_length', rand_x(1, np.float64, shape=[150]), 50, 0, -1, ValueError), ('test_frame_length1', rand_x(2, np.float64, shape=[150, 8]), 0, 15, 0, ValueError), ('test_frame_length2', rand_x(2, np.float64, shape=[150, 8]), 151, 15, 0, ValueError), - ]) # yapf: disable + ]) # fmt: skip class TestFrameException(unittest.TestCase): - def test_frame(self): with self.assertRaises(self.expect_exception): - paddle.signal.frame(paddle.to_tensor(self.x), self.frame_length, - self.hop_length, self.axis) + paddle.signal.frame( + paddle.to_tensor(self.x), + self.frame_length, + self.hop_length, + self.axis, + ) @place(DEVICES) @@ -729,16 +768,17 @@ class TestFrameException(unittest.TestCase): ('test_3d_input2', rand_x(3, np.float64, shape=[2, 40, 5]), 10, -1), ('test_4d_input1', rand_x(4, np.float64, shape=[8, 12, 5, 3]), 5, 0), ('test_4d_input2', rand_x(4, np.float64, shape=[3, 5, 12, 8]), 5, -1), - ]) # yapf: disable + ]) # fmt: skip class TestOverlapAdd(unittest.TestCase): - def test_overlap_add(self): np.testing.assert_allclose( overlap_add_for_api_test(self.x, self.hop_length, self.axis), - paddle.signal.overlap_add(paddle.to_tensor(self.x), self.hop_length, - self.axis), + paddle.signal.overlap_add( + paddle.to_tensor(self.x), self.hop_length, self.axis + ), rtol=rtol.get(str(self.x.dtype)), - atol=atol.get(str(self.x.dtype))) + atol=atol.get(str(self.x.dtype)), + ) @place(DEVICES) @@ -751,28 +791,29 @@ class TestOverlapAdd(unittest.TestCase): ('test_3d_input2', rand_x(3, np.float64, shape=[2, 40, 5]), 10, -1), ('test_4d_input1', rand_x(4, np.float64, shape=[8, 12, 5, 3]), 5, 0), ('test_4d_input2', rand_x(4, np.float64, shape=[3, 5, 12, 8]), 5, -1), - ]) # yapf: disable + ]) # fmt: skip class TestOverlapAddStatic(unittest.TestCase): - def test_overlap_add_static(self): paddle.enable_static() mp, sp = paddle.static.Program(), paddle.static.Program() with paddle.static.program_guard(mp, sp): - input = paddle.static.data('input', - self.x.shape, - dtype=self.x.dtype) - output = paddle.signal.overlap_add(input, self.hop_length, - self.axis), + input = paddle.static.data( + 'input', self.x.shape, dtype=self.x.dtype + ) + output = ( + paddle.signal.overlap_add(input, self.hop_length, self.axis), + ) exe = paddle.static.Executor(self.place) exe.run(sp) [output] = exe.run(mp, feed={'input': self.x}, fetch_list=[output]) paddle.disable_static() - np.testing.assert_allclose(overlap_add_for_api_test( - self.x, self.hop_length, self.axis), - output, - rtol=rtol.get(str(self.x.dtype)), - atol=atol.get(str(self.x.dtype))) + np.testing.assert_allclose( + overlap_add_for_api_test(self.x, self.hop_length, self.axis), + output, + rtol=rtol.get(str(self.x.dtype)), + atol=atol.get(str(self.x.dtype)), + ) @place(DEVICES) @@ -781,13 +822,13 @@ class TestOverlapAddStatic(unittest.TestCase): [ ('test_axis', rand_x(2, np.float64, shape=[3, 50]), 4, 2, ValueError), ('test_hop_length', rand_x(2, np.float64, shape=[50, 3]), -1, -1, ValueError), - ]) # yapf: disable + ]) # fmt: skip class TestOverlapAddException(unittest.TestCase): - def test_overlap_add(self): with self.assertRaises(self.expect_exception): - paddle.signal.overlap_add(paddle.to_tensor(self.x), self.hop_length, - self.axis) + paddle.signal.overlap_add( + paddle.to_tensor(self.x), self.hop_length, self.axis + ) # ================= STFT @@ -836,9 +877,8 @@ class TestOverlapAddException(unittest.TestCase): 512, None, None, None, True, 'reflect', False, True), ('test_center', rand_x(2, np.float64, shape=[1, 160000]), 512, None, None, None, False, 'reflect', False, True), - ])# yapf: disable + ]) # fmt: skip class TestStft(unittest.TestCase): - def test_stft(self): if self.window is None: win_p = None @@ -848,14 +888,29 @@ class TestStft(unittest.TestCase): win_l = self.window np.testing.assert_allclose( - stft(self.x, self.n_fft, self.hop_length, self.win_length, win_l, - self.center, self.pad_mode), - paddle.signal.stft(paddle.to_tensor(self.x), self.n_fft, - self.hop_length, self.win_length, win_p, - self.center, self.pad_mode, self.normalized, - self.onesided), + stft( + self.x, + self.n_fft, + self.hop_length, + self.win_length, + win_l, + self.center, + self.pad_mode, + ), + paddle.signal.stft( + paddle.to_tensor(self.x), + self.n_fft, + self.hop_length, + self.win_length, + win_p, + self.center, + self.pad_mode, + self.normalized, + self.onesided, + ), rtol=rtol.get(str(self.x.dtype)), - atol=atol.get(str(self.x.dtype))) + atol=atol.get(str(self.x.dtype)), + ) @place(DEVICES) @@ -878,9 +933,8 @@ class TestStft(unittest.TestCase): 512, None, None, None, True, 'nonsense', False, True, AssertionError), ('test_complex_onesided', rand_x(1, np.float64, shape=[16000], complex=True), 512, None, None, None, False, 'reflect', False, True, AssertionError), - ]) # yapf: disable + ]) # fmt: skip class TestStftException(unittest.TestCase): - def test_stft(self): if self.window is None: win_p = None @@ -888,10 +942,17 @@ class TestStftException(unittest.TestCase): win_p = paddle.to_tensor(self.window) with self.assertRaises(self.expect_exception): - paddle.signal.stft(paddle.to_tensor(self.x), self.n_fft, - self.hop_length, self.win_length, win_p, - self.center, self.pad_mode, self.normalized, - self.onesided), + paddle.signal.stft( + paddle.to_tensor(self.x), + self.n_fft, + self.hop_length, + self.win_length, + win_p, + self.center, + self.pad_mode, + self.normalized, + self.onesided, + ), @place(DEVICES) @@ -912,9 +973,8 @@ class TestStftException(unittest.TestCase): 512, None, None, None, False, False, True, None, False), ('test_length', rand_x(3, np.float64, shape=[1, 257, 471], complex=True), 512, None, None, None, False, False, True, 1888, False), - ]) # yapf: disable + ]) # fmt: skip class TestIstft(unittest.TestCase): - def test_istft(self): if self.window is None: win_p = None @@ -924,14 +984,29 @@ class TestIstft(unittest.TestCase): win_l = self.window np.testing.assert_allclose( - istft(self.x, self.hop_length, self.win_length, win_l, self.center, - self.length), - paddle.signal.istft(paddle.to_tensor(self.x), self.n_fft, - self.hop_length, self.win_length, win_p, - self.center, self.normalized, self.onesided, - self.length, self.return_complex), + istft( + self.x, + self.hop_length, + self.win_length, + win_l, + self.center, + self.length, + ), + paddle.signal.istft( + paddle.to_tensor(self.x), + self.n_fft, + self.hop_length, + self.win_length, + win_p, + self.center, + self.normalized, + self.onesided, + self.length, + self.return_complex, + ), rtol=rtol.get(str(self.x.dtype)), - atol=atol.get(str(self.x.dtype))) + atol=atol.get(str(self.x.dtype)), + ) @place(DEVICES) @@ -962,9 +1037,8 @@ class TestIstft(unittest.TestCase): 512, None, None, rand_x(1, np.float64, shape=[512], complex=True), True, False, True, None, False, AssertionError), ('test_NOLA', rand_x(3, np.float64, shape=[1, 257, 471], complex=True), 512, 512, None, get_window('hann', 512), True, False, True, None, False, ValueError), - ]) # yapf: disable + ]) # fmt: skip class TestIstftException(unittest.TestCase): - def test_istft(self): if self.window is None: win_p = None @@ -972,10 +1046,18 @@ class TestIstftException(unittest.TestCase): win_p = paddle.to_tensor(self.window) with self.assertRaises(self.expect_exception): - paddle.signal.istft(paddle.to_tensor(self.x), self.n_fft, - self.hop_length, self.win_length, win_p, - self.center, self.normalized, self.onesided, - self.length, self.return_complex), + paddle.signal.istft( + paddle.to_tensor(self.x), + self.n_fft, + self.hop_length, + self.win_length, + win_p, + self.center, + self.normalized, + self.onesided, + self.length, + self.return_complex, + ), if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_similarity_focus_op.py b/python/paddle/fluid/tests/unittests/test_similarity_focus_op.py index d7d13bdd39e8fecc86efa2f7e4351101e11445bc..15a050211a15d364020a436410dc2baf2bc9dc41 100755 --- a/python/paddle/fluid/tests/unittests/test_similarity_focus_op.py +++ b/python/paddle/fluid/tests/unittests/test_similarity_focus_op.py @@ -20,17 +20,25 @@ from paddle.fluid import Program, program_guard class TestSimilarityFocusOp(OpTest): - def setUp(self): self.op_type = "similarity_focus" batch_size = 2 x_dim, y_dim, z_dim = 3, 2, 2 self.inputs = { - 'X': - np.array([[[[0.8, 0.1], [0.4, 0.5]], [[0.9, 0.7], [0.9, 0.9]], - [[0.8, 0.9], [0.1, 0.2]]], - [[[0.2, 0.5], [0.3, 0.4]], [[0.9, 0.7], [0.8, 0.4]], - [[0.0, 0.2], [0.4, 0.7]]]]), + 'X': np.array( + [ + [ + [[0.8, 0.1], [0.4, 0.5]], + [[0.9, 0.7], [0.9, 0.9]], + [[0.8, 0.9], [0.1, 0.2]], + ], + [ + [[0.2, 0.5], [0.3, 0.4]], + [[0.9, 0.7], [0.8, 0.4]], + [[0.0, 0.2], [0.4, 0.7]], + ], + ] + ), } self.attrs = { 'axis': 1, @@ -41,8 +49,9 @@ class TestSimilarityFocusOp(OpTest): for batch in range(batch_size): res = np.zeros((1, y_dim, z_dim)).astype("float32").reshape(-1) for index in self.attrs['indexes']: - channel = self.inputs['X'][batch, - index, :, :].reshape(-1).copy() + channel = ( + self.inputs['X'][batch, index, :, :].reshape(-1).copy() + ) tag1 = [0 for i in range(y_dim)] tag2 = [0 for i in range(z_dim)] cnt = 0 @@ -71,15 +80,14 @@ class TestSimilarityFocusOp(OpTest): class TestSimilarityFocusOp_axis1(OpTest): - def setUp(self): self.op_type = "similarity_focus" batch_size = 3 x_dim, y_dim, z_dim = 4, 5, 6 self.inputs = { - 'X': - np.random.random( - (batch_size, x_dim, y_dim, z_dim)).astype("float32"), + 'X': np.random.random((batch_size, x_dim, y_dim, z_dim)).astype( + "float32" + ), } self.attrs = { 'axis': 1, @@ -90,8 +98,9 @@ class TestSimilarityFocusOp_axis1(OpTest): for batch in range(batch_size): res = np.zeros((1, y_dim, z_dim)).astype("float32").reshape(-1) for index in self.attrs['indexes']: - channel = self.inputs['X'][batch, - index, :, :].reshape(-1).copy() + channel = ( + self.inputs['X'][batch, index, :, :].reshape(-1).copy() + ) tag1 = [0 for i in range(y_dim)] tag2 = [0 for i in range(z_dim)] cnt = 0 @@ -121,15 +130,14 @@ class TestSimilarityFocusOp_axis1(OpTest): class TestSimilarityFocusOp_axis2(OpTest): - def setUp(self): self.op_type = "similarity_focus" batch_size = 6 x_dim, y_dim, z_dim = 7, 8, 9 self.inputs = { - 'X': - np.random.random( - (batch_size, x_dim, y_dim, z_dim)).astype("float32"), + 'X': np.random.random((batch_size, x_dim, y_dim, z_dim)).astype( + "float32" + ), } self.attrs = { 'axis': 2, @@ -140,8 +148,9 @@ class TestSimilarityFocusOp_axis2(OpTest): for batch in range(batch_size): res = np.zeros((x_dim, 1, z_dim)).astype("float32").reshape(-1) for index in self.attrs['indexes']: - channel = self.inputs['X'][batch, :, - index, :].reshape(-1).copy() + channel = ( + self.inputs['X'][batch, :, index, :].reshape(-1).copy() + ) tag1 = [0 for i in range(x_dim)] tag2 = [0 for i in range(z_dim)] cnt = 0 @@ -171,15 +180,14 @@ class TestSimilarityFocusOp_axis2(OpTest): class TestSimilarityFocusOp_axis3(OpTest): - def setUp(self): self.op_type = "similarity_focus" batch_size = 64 x_dim, y_dim, z_dim = 48, 48, 13 self.inputs = { - 'X': - np.random.random( - (batch_size, x_dim, y_dim, z_dim)).astype("float32"), + 'X': np.random.random((batch_size, x_dim, y_dim, z_dim)).astype( + "float32" + ), } self.attrs = { 'axis': 3, @@ -190,8 +198,9 @@ class TestSimilarityFocusOp_axis3(OpTest): for batch in range(batch_size): res = np.zeros((x_dim, y_dim, 1)).astype("float32").reshape(-1) for index in self.attrs['indexes']: - channel = self.inputs['X'][batch, :, :, - index].reshape(-1).copy() + channel = ( + self.inputs['X'][batch, :, :, index].reshape(-1).copy() + ) tag1 = [0 for i in range(x_dim)] tag2 = [0 for i in range(y_dim)] cnt = 0 @@ -221,32 +230,31 @@ class TestSimilarityFocusOp_axis3(OpTest): class TestSimilarityFocusOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): data = fluid.data(name='data', shape=[16, 3, 2, 2], dtype='float32') def test_input_Variable(): input = np.random.rand(16, 3, 2, 2).astype("float32") - out = fluid.layers.similarity_focus(input=input, - axis=1, - indexes=[0]) + out = fluid.layers.similarity_focus( + input=input, axis=1, indexes=[0] + ) self.assertRaises(TypeError, test_input_Variable) def test_axis_Int(): axis = 1.0 - out = fluid.layers.similarity_focus(input=data, - axis=axis, - indexes=[0]) + out = fluid.layers.similarity_focus( + input=data, axis=axis, indexes=[0] + ) self.assertRaises(TypeError, test_axis_Int) def test_indexes_List(): indexes = 0 - out = fluid.layers.similarity_focus(input=data, - axis=1, - indexes=indexes) + out = fluid.layers.similarity_focus( + input=data, axis=1, indexes=indexes + ) self.assertRaises(TypeError, test_indexes_List) diff --git a/python/paddle/fluid/tests/unittests/test_simple_rnn_op.py b/python/paddle/fluid/tests/unittests/test_simple_rnn_op.py index 4a0f02204c7de2a0b764dc3a7727be4f332de912..fa129cda06627a2727e5a194fb2b45049ddd45d1 100644 --- a/python/paddle/fluid/tests/unittests/test_simple_rnn_op.py +++ b/python/paddle/fluid/tests/unittests/test_simple_rnn_op.py @@ -31,7 +31,6 @@ paddle.enable_static() class TestSimpleRNNOp(OpTest): - def get_weight_names(self): weight_names = [] for i in range(self.num_layers): @@ -45,13 +44,16 @@ class TestSimpleRNNOp(OpTest): def setUp(self): self.op_type = "rnn" self.dtype = "float32" if core.is_compiled_with_rocm() else "float64" - self.sequence_length = None if core.is_compiled_with_rocm( - ) else np.array([12, 11, 10, 9, 8], dtype=np.int32) + self.sequence_length = ( + None + if core.is_compiled_with_rocm() + else np.array([12, 11, 10, 9, 8], dtype=np.int32) + ) self.num_layers = 1 self.is_bidirec = False self.is_test = False self.mode = "RNN_TANH" - self.dropout = 0. + self.dropout = 0.0 self.set_attrs() self.direction_num = 2 if self.is_bidirec else 1 @@ -61,31 +63,33 @@ class TestSimpleRNNOp(OpTest): input_size = 3 hidden_size = 2 - input = np.random.uniform(low=-0.1, - high=0.1, - size=(seq_length, batch_size, - input_size)).astype(self.dtype) + input = np.random.uniform( + low=-0.1, high=0.1, size=(seq_length, batch_size, input_size) + ).astype(self.dtype) if self.sequence_length is not None: input[11][1:][:] = 0 input[10][2:][:] = 0 input[9][3:][:] = 0 input[8][4:][:] = 0 - rnn1 = SimpleRNN(input_size, - hidden_size, - num_layers=self.num_layers, - time_major=True, - direction=direction, - dropout=self.dropout, - nonlinearity=self.mode, - dtype=self.dtype) + rnn1 = SimpleRNN( + input_size, + hidden_size, + num_layers=self.num_layers, + time_major=True, + direction=direction, + dropout=self.dropout, + nonlinearity=self.mode, + dtype=self.dtype, + ) flat_w = get_params_for_net(rnn1) output, last_hidden = rnn1(input, sequence_length=self.sequence_length) - init_h = np.zeros((self.num_layers * self.direction_num, batch_size, - hidden_size)).astype(self.dtype) + init_h = np.zeros( + (self.num_layers * self.direction_num, batch_size, hidden_size) + ).astype(self.dtype) state_out = np.ndarray((300)).astype("uint8") @@ -93,13 +97,13 @@ class TestSimpleRNNOp(OpTest): 'Input': input, 'WeightList': flat_w, 'PreState': [('init_h', init_h)], - 'SequenceLength': self.sequence_length + 'SequenceLength': self.sequence_length, } if self.sequence_length is None: self.inputs = { 'Input': input, 'WeightList': flat_w, - 'PreState': [('init_h', init_h)] + 'PreState': [('init_h', init_h)], } self.attrs = { 'dropout_prob': self.dropout, @@ -108,13 +112,13 @@ class TestSimpleRNNOp(OpTest): 'hidden_size': hidden_size, 'num_layers': self.num_layers, 'is_test': self.is_test, - 'mode': self.mode + 'mode': self.mode, } self.outputs = { 'Out': output, 'State': [('last_hidden', last_hidden)], 'Reserve': np.ndarray((400)).astype("uint8"), - 'DropoutState': state_out + 'DropoutState': state_out, } def set_attrs(self): @@ -132,27 +136,23 @@ class TestSimpleRNNOp(OpTest): class TestSimpleRNNOp1(TestSimpleRNNOp): - def set_attrs(self): self.sequence_length = None class TestSimpleRNNOp2(TestSimpleRNNOp): - def set_attrs(self): self.sequence_length = None self.is_bidirec = True class TestSimpleRNNOp3(TestSimpleRNNOp): - def set_attrs(self): self.sequence_length = None self.is_test = True class TestSimpleRNNOp4(TestSimpleRNNOp): - def set_attrs(self): self.sequence_length = None self.is_bidirec = True @@ -160,7 +160,6 @@ class TestSimpleRNNOp4(TestSimpleRNNOp): class TestSimpleRNNOp5(TestSimpleRNNOp): - def set_attrs(self): self.mode = "RNN_RELU" diff --git a/python/paddle/fluid/tests/unittests/test_size_op.py b/python/paddle/fluid/tests/unittests/test_size_op.py index bb64e3e66b24b41c51c9e64a469387dcd2839293..6f9898ade4fb07b983be70027922183b6c6ca9aa 100644 --- a/python/paddle/fluid/tests/unittests/test_size_op.py +++ b/python/paddle/fluid/tests/unittests/test_size_op.py @@ -20,7 +20,6 @@ from op_test import OpTest class TestSizeOp(OpTest): - def setUp(self): self.op_type = "size" self.shape = [] @@ -37,31 +36,26 @@ class TestSizeOp(OpTest): class TestRank1Tensor(TestSizeOp): - def config(self): self.shape = [2] class TestRank2Tensor(TestSizeOp): - def config(self): self.shape = [2, 3] class TestRank3Tensor(TestSizeOp): - def config(self): self.shape = [2, 3, 100] class TestLargeTensor(TestSizeOp): - def config(self): self.shape = [2**10] class TestSizeAPI(unittest.TestCase): - def test_size_static(self): main_program = fluid.Program() startup_program = fluid.Program() @@ -75,17 +69,19 @@ class TestSizeAPI(unittest.TestCase): out_1 = paddle.fluid.layers.size(x_1) out_2 = paddle.fluid.layers.size(x_2) exe = paddle.static.Executor(place=paddle.CPUPlace()) - res_1, res_2 = exe.run(feed={ - "x_1": input_1, - "x_2": input_2, - }, - fetch_list=[out_1, out_2]) - assert (np.array_equal(res_1, - np.array([np.size(input_1) - ]).astype("int64"))) - assert (np.array_equal(res_2, - np.array([np.size(input_2) - ]).astype("int64"))) + res_1, res_2 = exe.run( + feed={ + "x_1": input_1, + "x_2": input_2, + }, + fetch_list=[out_1, out_2], + ) + assert np.array_equal( + res_1, np.array([np.size(input_1)]).astype("int64") + ) + assert np.array_equal( + res_2, np.array([np.size(input_2)]).astype("int64") + ) def test_size_imperative(self): paddle.disable_static(paddle.CPUPlace()) @@ -95,8 +91,8 @@ class TestSizeAPI(unittest.TestCase): x_2 = paddle.to_tensor(input_2) out_1 = paddle.fluid.layers.size(x_1) out_2 = paddle.fluid.layers.size(x_2) - assert (np.array_equal(out_1.numpy().item(0), np.size(input_1))) - assert (np.array_equal(out_2.numpy().item(0), np.size(input_2))) + assert np.array_equal(out_1.numpy().item(0), np.size(input_1)) + assert np.array_equal(out_2.numpy().item(0), np.size(input_2)) paddle.enable_static() def test_error(self): diff --git a/python/paddle/fluid/tests/unittests/test_slice_op.py b/python/paddle/fluid/tests/unittests/test_slice_op.py index bf8b9e92209d5dac451ae223449a5dd933011bee..59cd41ae9de21f75dd2090854fb86e4eb3cec668 100644 --- a/python/paddle/fluid/tests/unittests/test_slice_op.py +++ b/python/paddle/fluid/tests/unittests/test_slice_op.py @@ -30,7 +30,6 @@ paddle.enable_static() # Situation 1: starts(list, no tensor), ends(list, no tensor) # 1.1 without attr(decrease) class TestSliceOp(OpTest): - def setUp(self): self.op_type = "slice" self.config() @@ -40,7 +39,7 @@ class TestSliceOp(OpTest): 'axes': self.axes, 'starts': self.starts, 'ends': self.ends, - 'infer_flags': self.infer_flags + 'infer_flags': self.infer_flags, } def config(self): @@ -59,7 +58,6 @@ class TestSliceOp(OpTest): class TestCase1(TestSliceOp): - def config(self): self.input = np.random.random([3, 4, 5, 6]).astype("float64") self.starts = [-3, 0, 2] @@ -70,7 +68,6 @@ class TestCase1(TestSliceOp): class TestCase2(TestSliceOp): - def config(self): self.input = np.random.random([3, 4, 5, 6]).astype("float64") self.starts = [-3, 0, 2] @@ -81,7 +78,6 @@ class TestCase2(TestSliceOp): class TestSliceZerosShapeTensor(OpTest): - def setUp(self): self.op_type = "slice" self.config() @@ -92,7 +88,7 @@ class TestSliceZerosShapeTensor(OpTest): 'starts': self.starts, 'ends': self.ends, 'infer_flags': self.infer_flags, - 'use_mkldnn': True + 'use_mkldnn': True, } def config(self): @@ -109,7 +105,6 @@ class TestSliceZerosShapeTensor(OpTest): # 1.2 with attr(decrease) class TestSliceOp_decs_dim(OpTest): - def setUp(self): self.op_type = "slice" self.config() @@ -140,7 +135,6 @@ class TestSliceOp_decs_dim(OpTest): class TestSliceOp_decs_dim_2(TestSliceOp_decs_dim): - def config(self): self.input = np.random.random([3, 4, 5, 6]).astype("float64") self.starts = [1, 0, 2] @@ -152,7 +146,6 @@ class TestSliceOp_decs_dim_2(TestSliceOp_decs_dim): class TestSliceOp_decs_dim_3(TestSliceOp_decs_dim): - def config(self): self.input = np.random.random([3, 4, 5, 6]).astype("float64") self.starts = [-1, 0, 2] @@ -164,7 +157,6 @@ class TestSliceOp_decs_dim_3(TestSliceOp_decs_dim): class TestSliceOp_decs_dim_4(TestSliceOp_decs_dim): - def config(self): self.input = np.random.random([3, 4, 5, 7]).astype("float64") self.starts = [0, 1, 2, 3] @@ -176,7 +168,6 @@ class TestSliceOp_decs_dim_4(TestSliceOp_decs_dim): class TestSliceOp_decs_dim_5(TestSliceOp_decs_dim): - def config(self): self.input = np.random.random([3, 4, 5, 6]).astype("float64") self.starts = [-1] @@ -188,7 +179,6 @@ class TestSliceOp_decs_dim_5(TestSliceOp_decs_dim): class TestSliceOp_decs_dim_6(TestSliceOp_decs_dim): - def config(self): self.input = np.random.random([3, 4, 5, 6]).astype("float64") self.starts = [0, 1, 2, 3] @@ -202,15 +192,15 @@ class TestSliceOp_decs_dim_6(TestSliceOp_decs_dim): # Situation 2: starts(list, have tensor), ends(list, no tensor) # without attr(decrease) class TestSliceOp_starts_ListTensor(OpTest): - def setUp(self): self.op_type = "slice" self.config() starts_tensor = [] for index, ele in enumerate(self.starts): - starts_tensor.append(("x" + str(index), np.ones( - (1)).astype('int64') * ele)) + starts_tensor.append( + ("x" + str(index), np.ones((1)).astype('int64') * ele) + ) self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor} self.outputs = {'Out': self.out} @@ -218,7 +208,7 @@ class TestSliceOp_starts_ListTensor(OpTest): 'axes': self.axes, 'starts': self.starts_infer, 'ends': self.ends, - 'infer_flags': self.infer_flags + 'infer_flags': self.infer_flags, } def config(self): @@ -241,15 +231,15 @@ class TestSliceOp_starts_ListTensor(OpTest): # Situation 2: starts(list, have tensor), ends(list, no tensor) # with attr(decrease) class TestSliceOp_decs_dim_starts_ListTensor(OpTest): - def setUp(self): self.op_type = "slice" self.config() starts_tensor = [] for index, ele in enumerate(self.starts): - starts_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + starts_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor} @@ -281,8 +271,8 @@ class TestSliceOp_decs_dim_starts_ListTensor(OpTest): class TestSliceOp_decs_dim_5_starts_ListTensor( - TestSliceOp_decs_dim_starts_ListTensor): - + TestSliceOp_decs_dim_starts_ListTensor +): def config(self): self.input = np.random.random([3, 4, 5, 6]).astype("float64") self.starts = [-1] @@ -298,13 +288,12 @@ class TestSliceOp_decs_dim_5_starts_ListTensor( # Situation 3: starts(tensor), ends(list, no tensor) # with attr(decrease) class TestSliceOp_decs_dim_starts_OneTensor(OpTest): - def setUp(self): self.op_type = "slice" self.config() self.inputs = { 'Input': self.input, - "StartsTensor": np.array(self.starts, dtype="int32") + "StartsTensor": np.array(self.starts, dtype="int32"), } self.outputs = {'Out': self.out} self.attrs = { @@ -334,7 +323,6 @@ class TestSliceOp_decs_dim_starts_OneTensor(OpTest): # Situation 4: starts(tensor), ends(tensor) # without attr(decrease) class TestSliceOp_starts_OneTensor_ends_OneTensor(OpTest): - def setUp(self): self.op_type = "slice" self.config() @@ -342,14 +330,14 @@ class TestSliceOp_starts_OneTensor_ends_OneTensor(OpTest): self.inputs = { 'Input': self.input, "StartsTensor": np.array(self.starts, dtype="int64"), - "EndsTensor": np.array(self.ends, dtype="int32") + "EndsTensor": np.array(self.ends, dtype="int32"), } self.outputs = {'Out': self.out} self.attrs = { 'axes': self.axes, #'starts': self.starts, #'ends': self.ends_infer, - 'infer_flags': self.infer_flags + 'infer_flags': self.infer_flags, } def config(self): @@ -370,14 +358,13 @@ class TestSliceOp_starts_OneTensor_ends_OneTensor(OpTest): # Situation 5: starts(tensor), ends(tensor) # with attr(decrease) class TestSliceOp_decs_dim_starts_and_ends_OneTensor(OpTest): - def setUp(self): self.op_type = "slice" self.config() self.inputs = { 'Input': self.input, "StartsTensor": np.array(self.starts, dtype="int32"), - "EndsTensor": np.array(self.ends, dtype="int32") + "EndsTensor": np.array(self.ends, dtype="int32"), } self.outputs = {'Out': self.out} self.attrs = { @@ -407,27 +394,27 @@ class TestSliceOp_decs_dim_starts_and_ends_OneTensor(OpTest): # Situation 6: starts(tensor), ends(list, have tensor) # without attr(decrease) class TestSliceOp_starts_OneTensor_ends_ListTensor(OpTest): - def setUp(self): self.op_type = "slice" self.config() ends_tensor = [] for index, ele in enumerate(self.ends): - ends_tensor.append(("y" + str(index), np.ones( - (1)).astype('int32') * ele)) + ends_tensor.append( + ("y" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = { 'Input': self.input, "StartsTensor": np.array(self.starts, dtype="int32"), - 'EndsTensorList': ends_tensor + 'EndsTensorList': ends_tensor, } self.outputs = {'Out': self.out} self.attrs = { 'axes': self.axes, #'starts': self.starts, 'ends': self.ends_infer, - 'infer_flags': self.infer_flags + 'infer_flags': self.infer_flags, } def config(self): @@ -448,10 +435,10 @@ class TestSliceOp_starts_OneTensor_ends_ListTensor(OpTest): # Test CUDA float16 -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFP16(OpTest): - def setUp(self): self.op_type = "slice" self.config() @@ -461,7 +448,7 @@ class TestFP16(OpTest): 'axes': self.axes, 'starts': self.starts, 'ends': self.ends, - 'infer_flags': self.infer_flags + 'infer_flags': self.infer_flags, } def config(self): @@ -481,15 +468,15 @@ class TestFP16(OpTest): def test_check_grad_normal(self): place = core.CUDAPlace(0) if core.is_float16_supported(place): - self.check_grad_with_place(place, ['Input'], - 'Out', - max_relative_error=0.006) + self.check_grad_with_place( + place, ['Input'], 'Out', max_relative_error=0.006 + ) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFP16_2(OpTest): - def setUp(self): self.op_type = "slice" self.config() @@ -499,7 +486,7 @@ class TestFP16_2(OpTest): 'axes': self.axes, 'starts': self.starts, 'ends': self.ends, - 'infer_flags': self.infer_flags + 'infer_flags': self.infer_flags, } def config(self): @@ -519,14 +506,16 @@ class TestFP16_2(OpTest): def test_check_grad_normal(self): place = core.CUDAPlace(0) if core.is_float16_supported(place): - self.check_grad_with_place(place, ['Input'], - 'Out', - max_relative_error=0.006, - numeric_grad_delta=0.5) + self.check_grad_with_place( + place, + ['Input'], + 'Out', + max_relative_error=0.006, + numeric_grad_delta=0.5, + ) class TestBF16(OpTest): - def setUp(self): self.op_type = "slice" self.config() @@ -536,7 +525,7 @@ class TestBF16(OpTest): 'axes': self.axes, 'starts': self.starts, 'ends': self.ends, - 'infer_flags': self.infer_flags + 'infer_flags': self.infer_flags, } def config(self): @@ -557,38 +546,36 @@ class TestBF16(OpTest): # Test python API class TestSliceAPI(unittest.TestCase): - def test_1(self): input = np.random.random([3, 4, 5, 6]).astype("float64") minus_1 = fluid.layers.fill_constant([1], "int32", -1) minus_3 = fluid.layers.fill_constant([1], "int64", -3) - starts = fluid.layers.data(name='starts', - shape=[1, 3], - append_batch_size=False) - ends = fluid.layers.data(name='ends', - shape=[3], - append_batch_size=False) - - x = fluid.layers.data(name="x", - shape=[3, 4, 5, 6], - append_batch_size=False, - dtype="float64") + starts = fluid.layers.data( + name='starts', shape=[1, 3], append_batch_size=False + ) + ends = fluid.layers.data( + name='ends', shape=[3], append_batch_size=False + ) + + x = fluid.layers.data( + name="x", + shape=[3, 4, 5, 6], + append_batch_size=False, + dtype="float64", + ) # value_int64 is greater than 2147483647 which is the max of int32 value_int64 = fluid.layers.fill_constant([1], "int64", 2147483648) - out_1 = paddle.slice(x, - axes=[0, 1, 2], - starts=[-3, 0, 2], - ends=[value_int64, 100, -1]) - out_2 = paddle.slice(x, - axes=[0, 1, 3], - starts=[minus_3, 0, 2], - ends=[3, 100, -1]) - out_3 = paddle.slice(x, - axes=[0, 1, 3], - starts=[minus_3, 0, 2], - ends=[3, 100, minus_1]) + out_1 = paddle.slice( + x, axes=[0, 1, 2], starts=[-3, 0, 2], ends=[value_int64, 100, -1] + ) + out_2 = paddle.slice( + x, axes=[0, 1, 3], starts=[minus_3, 0, 2], ends=[3, 100, -1] + ) + out_3 = paddle.slice( + x, axes=[0, 1, 3], starts=[minus_3, 0, 2], ends=[3, 100, minus_1] + ) out_4 = paddle.slice(x, axes=[0, 1, 2], starts=starts, ends=ends) out_5 = x[-3:3, 0:100, 2:-1] @@ -601,9 +588,10 @@ class TestSliceAPI(unittest.TestCase): feed={ "x": input, 'starts': np.array([-3, 0, 2]).astype("int32"), - 'ends': np.array([3, 100, -1]).astype("int32") + 'ends': np.array([3, 100, -1]).astype("int32"), }, - fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7]) + fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7], + ) assert np.array_equal(res_1, input[-3:3, 0:100, 2:-1, :]) assert np.array_equal(res_2, input[-3:3, 0:100, :, 2:-1]) @@ -615,17 +603,18 @@ class TestSliceAPI(unittest.TestCase): class TestSliceApiWithTensor(unittest.TestCase): - def test_starts_ends_is_tensor(self): with paddle.fluid.dygraph.guard(): a = paddle.rand(shape=[4, 5, 6], dtype='float32') axes = [0, 1, 2] starts = [-3, 0, 2] ends = [3, 2, 4] - a_1 = paddle.slice(a, - axes=axes, - starts=paddle.to_tensor(starts, dtype='int32'), - ends=paddle.to_tensor(ends, dtype='int32')) + a_1 = paddle.slice( + a, + axes=axes, + starts=paddle.to_tensor(starts, dtype='int32'), + ends=paddle.to_tensor(ends, dtype='int32'), + ) a_2 = paddle.slice(a, axes=axes, starts=starts, ends=ends) np.testing.assert_array_equal(a_1.numpy(), a_2.numpy()) @@ -648,7 +637,6 @@ class TestSliceApiWithTensor(unittest.TestCase): class TestSliceApiEager(unittest.TestCase): - def test_slice_api(self): with paddle.fluid.dygraph.guard(): with _test_eager_guard(): @@ -659,23 +647,24 @@ class TestSliceApiEager(unittest.TestCase): ends = [3, 2, 4] a_1 = paddle.slice(a, axes=axes, starts=starts, ends=ends) - a_2 = paddle.slice(a, - axes=axes, - starts=paddle.to_tensor(starts), - ends=paddle.to_tensor(ends)) + a_2 = paddle.slice( + a, + axes=axes, + starts=paddle.to_tensor(starts), + ends=paddle.to_tensor(ends), + ) np.testing.assert_array_equal(a_1.numpy(), a_2.numpy()) a_1.backward() grad_truth = paddle.zeros_like(a) grad_truth[-3:3, 0:2, 2:4] = 1 np.testing.assert_array_equal(grad_truth, a.gradient()) - np.testing.assert_allclose(a_1.numpy(), - a[-3:3, 0:2, 2:4], - rtol=1e-05) + np.testing.assert_allclose( + a_1.numpy(), a[-3:3, 0:2, 2:4], rtol=1e-05 + ) class TestSliceApiWithLoDTensorArray(unittest.TestCase): - def setUp(self): self.shape = (3, 4) self.data = np.random.random(size=self.shape).astype('float32') @@ -684,8 +673,11 @@ class TestSliceApiWithLoDTensorArray(unittest.TestCase): self.end = 2 self.axis = 1 - self.place = fluid.CUDAPlace( - 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + self.place = ( + fluid.CUDAPlace(0) + if fluid.is_compiled_with_cuda() + else fluid.CPUPlace() + ) self.exe = fluid.Executor(self.place) def set_program_and_run(self, main_program, case_num): @@ -693,7 +685,7 @@ class TestSliceApiWithLoDTensorArray(unittest.TestCase): x = [ fluid.data(name='x0', shape=self.shape, dtype="float32"), fluid.data(name='x1', shape=self.shape, dtype="float32"), - fluid.data(name='x2', shape=self.shape, dtype="float32") + fluid.data(name='x2', shape=self.shape, dtype="float32"), ] for each_x in x: @@ -708,31 +700,35 @@ class TestSliceApiWithLoDTensorArray(unittest.TestCase): self.sliced_arr = output = arr[0] elif case_num == 2: - end = fluid.layers.array_length( - arr) - 1 # dtype of end is int64 - self.sliced_arr = slice_arr = arr[self.start:end] - output, _ = fluid.layers.tensor_array_to_tensor(slice_arr, - axis=self.axis, - use_stack=True) + end = ( + fluid.layers.array_length(arr) - 1 + ) # dtype of end is int64 + self.sliced_arr = slice_arr = arr[self.start : end] + output, _ = fluid.layers.tensor_array_to_tensor( + slice_arr, axis=self.axis, use_stack=True + ) elif case_num == 3: - value_int64 = fluid.layers.fill_constant([1], "int64", - 2147483648) - self.sliced_arr = slice_arr = arr[self.start:value_int64] - output, _ = fluid.layers.tensor_array_to_tensor(slice_arr, - axis=self.axis, - use_stack=True) + value_int64 = fluid.layers.fill_constant( + [1], "int64", 2147483648 + ) + self.sliced_arr = slice_arr = arr[self.start : value_int64] + output, _ = fluid.layers.tensor_array_to_tensor( + slice_arr, axis=self.axis, use_stack=True + ) loss = fluid.layers.reduce_sum(output) fluid.backward.append_backward(loss) g_vars = list( - map(main_program.global_block().var, - [each_x.name + "@GRAD" for each_x in x])) - self.out, self.g_x0, self.g_x1, self.g_x2 = \ - self.exe.run(main_program, - feed = {'x0': self.data, - 'x1': self.data, - 'x2': self.data}, - fetch_list=[output] + g_vars) + map( + main_program.global_block().var, + [each_x.name + "@GRAD" for each_x in x], + ) + ) + self.out, self.g_x0, self.g_x1, self.g_x2 = self.exe.run( + main_program, + feed={'x0': self.data, 'x1': self.data, 'x2': self.data}, + fetch_list=[output] + g_vars, + ) def test_case_1(self): main_program = fluid.Program() @@ -750,10 +746,12 @@ class TestSliceApiWithLoDTensorArray(unittest.TestCase): self.set_program_and_run(main_program, 2) self.assertTrue( - self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY) + self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY + ) self.assertEqual(self.sliced_arr.shape, self.shape) np.testing.assert_array_equal( - self.out, np.stack([self.data, self.data], axis=self.axis)) + self.out, np.stack([self.data, self.data], axis=self.axis) + ) np.testing.assert_array_equal(self.g_x0, np.ones_like(self.data)) np.testing.assert_array_equal(self.g_x1, np.ones_like(self.data)) np.testing.assert_array_equal(self.g_x2, np.zeros_like(self.data)) @@ -763,35 +761,35 @@ class TestSliceApiWithLoDTensorArray(unittest.TestCase): self.set_program_and_run(main_program, 3) self.assertTrue( - self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY) + self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY + ) self.assertEqual(self.sliced_arr.shape, self.shape) np.testing.assert_array_equal( - self.out, np.stack([self.data, self.data, self.data], - axis=self.axis)) + self.out, + np.stack([self.data, self.data, self.data], axis=self.axis), + ) np.testing.assert_array_equal(self.g_x0, np.ones_like(self.data)) np.testing.assert_array_equal(self.g_x1, np.ones_like(self.data)) np.testing.assert_array_equal(self.g_x2, np.ones_like(self.data)) class TestImperativeVarBaseGetItem(unittest.TestCase): - def test_getitem_with_long(self): with fluid.dygraph.guard(): data = np.random.random((2, 80, 16128)).astype('float32') var = fluid.dygraph.to_variable(data) - sliced = var[:, 10:, :var.shape[1]] # var.shape[1] is 80L here + sliced = var[:, 10:, : var.shape[1]] # var.shape[1] is 80L here self.assertEqual(sliced.shape, [2, 70, 80]) - sliced = var[:, var.shape[0]:, var.shape[0]:var.shape[1]] + sliced = var[:, var.shape[0] :, var.shape[0] : var.shape[1]] self.assertEqual(sliced.shape, [2, 78, 78]) def test_getitem_with_float(self): - def test_float_in_slice_item(): with fluid.dygraph.guard(): data = np.random.random((2, 80, 16128)).astype('float32') var = fluid.dygraph.to_variable(data) - sliced = var[:, 1.1:, :var.shape[1]] + sliced = var[:, 1.1:, : var.shape[1]] self.assertRaises(Exception, test_float_in_slice_item) @@ -805,7 +803,6 @@ class TestImperativeVarBaseGetItem(unittest.TestCase): class TestInferShape(unittest.TestCase): - def test(self): x = paddle.ones(shape=[3, 4, 5]) x.desc.set_shape([3, -1, 5]) @@ -820,13 +817,18 @@ class TestInferShape(unittest.TestCase): x_arr = np.arange(0, 24, dtype=np.float32).reshape([2, 3, 4]) x = paddle.to_tensor(x_arr) - pp_slice = paddle.slice(x, [ - 100, - ], [0], [1]) + pp_slice = paddle.slice( + x, + [ + 100, + ], + [0], + [1], + ) np_slice = x_arr[:, :, 0:1] np.testing.assert_array_equal(pp_slice, np_slice) - pp_slice = paddle.slice(x, (-100, ), [0], [1]) + pp_slice = paddle.slice(x, (-100,), [0], [1]) np_slice = x_arr[0:1] np.testing.assert_array_equal(pp_slice, np_slice) @@ -834,9 +836,11 @@ class TestInferShape(unittest.TestCase): x = paddle.to_tensor(np.reshape(x_arr, (0, 0, 0))) starts = paddle.to_tensor( - np.reshape(np.array([], dtype=np.int32), (0, ))) + np.reshape(np.array([], dtype=np.int32), (0,)) + ) ends = paddle.to_tensor( - np.reshape(np.array([], dtype=np.int32), (0, ))) + np.reshape(np.array([], dtype=np.int32), (0,)) + ) with self.assertRaises(ValueError): paddle.slice(x, [-1000000], starts, ends) @@ -851,30 +855,30 @@ class TestInferShape(unittest.TestCase): paddle.slice(x, 0, starts, ends) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestImperativeCUDAPinnedInput(unittest.TestCase): - def test_input_cuda_pinned_var(self): _enable_legacy_dygraph() with fluid.dygraph.guard(): data = np.random.random((2, 80, 16128)).astype('float32') - var = core.VarBase(value=data, - name='', - persistable=False, - place=fluid.CUDAPinnedPlace(), - zero_copy=False) - sliced = var[:, 10:, :var.shape[1]] + var = core.VarBase( + value=data, + name='', + persistable=False, + place=fluid.CUDAPinnedPlace(), + zero_copy=False, + ) + sliced = var[:, 10:, : var.shape[1]] self.assertEqual(sliced.shape, [2, 70, 80]) class TestSliceDoubleGradCheck(unittest.TestCase): - def slice_wrapper(self, x): - return paddle.slice(x[0], - axes=[0, 1, 2], - starts=[-3, 0, 2], - ends=[3, 2, 4]) + return paddle.slice( + x[0], axes=[0, 1, 2], starts=[-3, 0, 2], ends=[3, 2, 4] + ) @prog_scope() def func(self, place): @@ -884,23 +888,18 @@ class TestSliceDoubleGradCheck(unittest.TestCase): data = layers.data('data', [4, 5, 6], False, dtype) data.persistable = True - out = paddle.slice(data, - axes=[0, 1, 2], - starts=[-3, 0, 2], - ends=[3, 2, 4]) + out = paddle.slice( + data, axes=[0, 1, 2], starts=[-3, 0, 2], ends=[3, 2, 4] + ) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) - gradient_checker.double_grad_check([data], - out, - x_init=[data_arr], - place=place, - eps=eps) + gradient_checker.double_grad_check( + [data], out, x_init=[data_arr], place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.double_grad_check_for_dygraph(self.slice_wrapper, - [data], - out, - x_init=[data_arr], - place=place) + gradient_checker.double_grad_check_for_dygraph( + self.slice_wrapper, [data], out, x_init=[data_arr], place=place + ) def test_grad(self): paddle.enable_static() @@ -912,12 +911,10 @@ class TestSliceDoubleGradCheck(unittest.TestCase): class TestSliceTripleGradCheck(unittest.TestCase): - def slice_wrapper(self, x): - return paddle.slice(x[0], - axes=[0, 1, 2], - starts=[-3, 0, 2], - ends=[3, 2, 4]) + return paddle.slice( + x[0], axes=[0, 1, 2], starts=[-3, 0, 2], ends=[3, 2, 4] + ) @prog_scope() def func(self, place): @@ -927,23 +924,18 @@ class TestSliceTripleGradCheck(unittest.TestCase): data = layers.data('data', [4, 5, 6], False, dtype) data.persistable = True - out = paddle.slice(data, - axes=[0, 1, 2], - starts=[-3, 0, 2], - ends=[3, 2, 4]) + out = paddle.slice( + data, axes=[0, 1, 2], starts=[-3, 0, 2], ends=[3, 2, 4] + ) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) - gradient_checker.triple_grad_check([data], - out, - x_init=[data_arr], - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [data], out, x_init=[data_arr], place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.triple_grad_check_for_dygraph(self.slice_wrapper, - [data], - out, - x_init=[data_arr], - place=place) + gradient_checker.triple_grad_check_for_dygraph( + self.slice_wrapper, [data], out, x_init=[data_arr], place=place + ) def test_grad(self): paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_slice_var.py b/python/paddle/fluid/tests/unittests/test_slice_var.py index a48b6de55661721bb4d9eae94a5e19c90bfa3936..b97e5243c3299bdf9ec15a3faeb3bc902a9a1d54 100644 --- a/python/paddle/fluid/tests/unittests/test_slice_var.py +++ b/python/paddle/fluid/tests/unittests/test_slice_var.py @@ -19,15 +19,15 @@ import random class TestSliceVar(unittest.TestCase): - def check_slice_output(self, shapes, expected_sizes, min_size): var_list = [] program = fluid.Program() for shape in shapes: - var = program.global_block().create_var(name=str( - random.randint(10000, 99999)), - persistable=True, - shape=shape) + var = program.global_block().create_var( + name=str(random.randint(10000, 99999)), + persistable=True, + shape=shape, + ) var_list.append(var) blocks = slice_variable(var_list, 10, min_size) all_sizes = [] @@ -40,20 +40,33 @@ class TestSliceVar(unittest.TestCase): def test_1k(self): shapes = [[3, 5], [1024], [28, 784], [8, 1020], [800, 10]] - expected_sizes = [[15], [1024], - [ - 2352, 2352, 2352, 2352, 2352, 2352, 2352, 2352, - 2352, 784 - ], [2040, 2040, 2040, 2040], - [1150, 1150, 1150, 1150, 1150, 1150, 1100]] + expected_sizes = [ + [15], + [1024], + [2352, 2352, 2352, 2352, 2352, 2352, 2352, 2352, 2352, 784], + [2040, 2040, 2040, 2040], + [1150, 1150, 1150, 1150, 1150, 1150, 1100], + ] self.check_slice_output(shapes, expected_sizes, 1024) def test_check_output_8k(self): - shapes = [[3, 5], [1024], [28, 784], [8, 1020], [800, 10], - [6, 33, 33, 33]] - expected_sizes = [[15], [1024], [10976, 10976], [8160], [8000], - [35937, 35937, 35937, 35937, 35937, 35937]] + shapes = [ + [3, 5], + [1024], + [28, 784], + [8, 1020], + [800, 10], + [6, 33, 33, 33], + ] + expected_sizes = [ + [15], + [1024], + [10976, 10976], + [8160], + [8000], + [35937, 35937, 35937, 35937, 35937, 35937], + ] self.check_slice_output(shapes, expected_sizes, 8192) diff --git a/python/paddle/fluid/tests/unittests/test_smooth_l1_loss.py b/python/paddle/fluid/tests/unittests/test_smooth_l1_loss.py index f9df4160ff619f35d3d2ab3ee37e8fe6f65036cf..4e078e506c7d68e7e7eb0b858b04e5354671044f 100644 --- a/python/paddle/fluid/tests/unittests/test_smooth_l1_loss.py +++ b/python/paddle/fluid/tests/unittests/test_smooth_l1_loss.py @@ -38,7 +38,6 @@ def smooth_l1_loss_np(input, label, reduction='mean', delta=1.0): class SmoothL1Loss(unittest.TestCase): - def setUp(self): np.random.seed(123) @@ -47,8 +46,11 @@ class SmoothL1Loss(unittest.TestCase): label_np = np.random.random([100, 200]).astype(np.float32) prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): input = fluid.data(name='input', shape=[100, 200], dtype='float32') label = fluid.data(name='label', shape=[100, 200], dtype='float32') @@ -56,17 +58,21 @@ class SmoothL1Loss(unittest.TestCase): ret = smooth_l1_loss(input, label) exe = fluid.Executor(place) - static_ret, = exe.run(prog, - feed={ - 'input': input_np, - 'label': label_np, - }, - fetch_list=[ret]) + (static_ret,) = exe.run( + prog, + feed={ + 'input': input_np, + 'label': label_np, + }, + fetch_list=[ret], + ) self.assertIsNotNone(static_ret) with fluid.dygraph.guard(): smooth_l1_loss = paddle.nn.loss.SmoothL1Loss() - dy_ret = smooth_l1_loss(fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + dy_ret = smooth_l1_loss( + fluid.dygraph.to_variable(input_np), + fluid.dygraph.to_variable(label_np), + ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) expected = smooth_l1_loss_np(input_np, label_np, reduction='mean') @@ -79,8 +85,11 @@ class SmoothL1Loss(unittest.TestCase): label_np = np.random.random([100, 200]).astype(np.float32) prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): input = fluid.data(name='input', shape=[100, 200], dtype='float32') label = fluid.data(name='label', shape=[100, 200], dtype='float32') @@ -88,17 +97,21 @@ class SmoothL1Loss(unittest.TestCase): ret = smooth_l1_loss(input, label) exe = fluid.Executor(place) - static_ret, = exe.run(prog, - feed={ - 'input': input_np, - 'label': label_np, - }, - fetch_list=[ret]) + (static_ret,) = exe.run( + prog, + feed={ + 'input': input_np, + 'label': label_np, + }, + fetch_list=[ret], + ) self.assertIsNotNone(static_ret) with fluid.dygraph.guard(): smooth_l1_loss = paddle.nn.loss.SmoothL1Loss(reduction='sum') - dy_ret = smooth_l1_loss(fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + dy_ret = smooth_l1_loss( + fluid.dygraph.to_variable(input_np), + fluid.dygraph.to_variable(label_np), + ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) expected = smooth_l1_loss_np(input_np, label_np, reduction='sum') @@ -111,8 +124,11 @@ class SmoothL1Loss(unittest.TestCase): label_np = np.random.random([100, 200]).astype(np.float32) prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): input = fluid.data(name='input', shape=[100, 200], dtype='float32') label = fluid.data(name='label', shape=[100, 200], dtype='float32') @@ -120,17 +136,21 @@ class SmoothL1Loss(unittest.TestCase): ret = smooth_l1_loss(input, label) exe = fluid.Executor(place) - static_ret, = exe.run(prog, - feed={ - 'input': input_np, - 'label': label_np, - }, - fetch_list=[ret]) + (static_ret,) = exe.run( + prog, + feed={ + 'input': input_np, + 'label': label_np, + }, + fetch_list=[ret], + ) self.assertIsNotNone(static_ret) with fluid.dygraph.guard(): smooth_l1_loss = paddle.nn.loss.SmoothL1Loss(reduction='none') - dy_ret = smooth_l1_loss(fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + dy_ret = smooth_l1_loss( + fluid.dygraph.to_variable(input_np), + fluid.dygraph.to_variable(label_np), + ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) expected = smooth_l1_loss_np(input_np, label_np, reduction='none') @@ -144,8 +164,11 @@ class SmoothL1Loss(unittest.TestCase): delta = np.random.rand() prog = fluid.Program() startup_prog = fluid.Program() - place = fluid.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) with fluid.program_guard(prog, startup_prog): input = fluid.data(name='input', shape=[100, 200], dtype='float32') label = fluid.data(name='label', shape=[100, 200], dtype='float32') @@ -153,17 +176,21 @@ class SmoothL1Loss(unittest.TestCase): ret = smooth_l1_loss(input, label) exe = fluid.Executor(place) - static_ret, = exe.run(prog, - feed={ - 'input': input_np, - 'label': label_np, - }, - fetch_list=[ret]) + (static_ret,) = exe.run( + prog, + feed={ + 'input': input_np, + 'label': label_np, + }, + fetch_list=[ret], + ) self.assertIsNotNone(static_ret) with fluid.dygraph.guard(): smooth_l1_loss = paddle.nn.loss.SmoothL1Loss(delta=delta) - dy_ret = smooth_l1_loss(fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + dy_ret = smooth_l1_loss( + fluid.dygraph.to_variable(input_np), + fluid.dygraph.to_variable(label_np), + ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) expected = smooth_l1_loss_np(input_np, label_np, delta=delta) diff --git a/python/paddle/fluid/tests/unittests/test_smooth_l1_loss_op.py b/python/paddle/fluid/tests/unittests/test_smooth_l1_loss_op.py index ffabe17915a26cdd0386d04a08ce013c8626f044..4b31e7ead6b9434e2eb469b1524d385ee522167f 100644 --- a/python/paddle/fluid/tests/unittests/test_smooth_l1_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_smooth_l1_loss_op.py @@ -27,13 +27,12 @@ def smooth_l1_loss_forward(val, sigma2): class TestSmoothL1LossOp1(OpTest): - def setUp(self): self.op_type = "smooth_l1_loss" dims = (5, 20) self.inputs = { 'X': np.random.random(dims).astype("float32"), - 'Y': np.random.random(dims).astype("float32") + 'Y': np.random.random(dims).astype("float32"), } sigma = 3.0 self.attrs = {'sigma': sigma} @@ -43,35 +42,37 @@ class TestSmoothL1LossOp1(OpTest): loss = loss.reshape((dims[0], 1)) self.outputs = { 'Diff': diff.astype('float32'), - 'Out': loss.astype('float32') + 'Out': loss.astype('float32'), } def test_check_output(self): self.check_output(check_eager=True) def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], - 'Out', - max_relative_error=0.02, - check_eager=True) + self.check_grad( + ['X', 'Y'], 'Out', max_relative_error=0.02, check_eager=True + ) def test_check_grad_ingore_x(self): - self.check_grad(['Y'], - 'Out', - max_relative_error=0.03, - no_grad_set=set("X"), - check_eager=True) + self.check_grad( + ['Y'], + 'Out', + max_relative_error=0.03, + no_grad_set=set("X"), + check_eager=True, + ) def test_check_grad_ingore_y(self): - self.check_grad(['X'], - 'Out', - max_relative_error=0.03, - no_grad_set=set('Y'), - check_eager=True) + self.check_grad( + ['X'], + 'Out', + max_relative_error=0.03, + no_grad_set=set('Y'), + check_eager=True, + ) class TestSmoothL1LossOp2(OpTest): - def setUp(self): self.op_type = "smooth_l1_loss" dims = (5, 20) @@ -79,7 +80,7 @@ class TestSmoothL1LossOp2(OpTest): 'X': np.random.random(dims).astype("float32"), 'Y': np.random.random(dims).astype("float32"), 'InsideWeight': np.random.random(dims).astype("float32"), - 'OutsideWeight': np.random.random(dims).astype("float32") + 'OutsideWeight': np.random.random(dims).astype("float32"), } sigma = 3.0 self.attrs = {'sigma': sigma} @@ -91,42 +92,46 @@ class TestSmoothL1LossOp2(OpTest): loss = loss.sum(1).reshape((dims[0], 1)) self.outputs = { 'Diff': diff.astype('float32'), - 'Out': loss.astype('float32') + 'Out': loss.astype('float32'), } def test_check_output(self): self.check_output(check_eager=True) def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], - 'Out', - max_relative_error=0.03, - check_eager=True) + self.check_grad( + ['X', 'Y'], 'Out', max_relative_error=0.03, check_eager=True + ) def test_check_grad_ingore_x(self): - self.check_grad(['Y'], - 'Out', - max_relative_error=0.03, - no_grad_set=set(['X', 'InsideWeight', 'OutsideWeight']), - check_eager=True) + self.check_grad( + ['Y'], + 'Out', + max_relative_error=0.03, + no_grad_set=set(['X', 'InsideWeight', 'OutsideWeight']), + check_eager=True, + ) def test_check_grad_ingore_y(self): - self.check_grad(['X'], - 'Out', - max_relative_error=0.03, - no_grad_set=set(['Y', 'InsideWeight', 'OutsideWeight']), - check_eager=True) + self.check_grad( + ['X'], + 'Out', + max_relative_error=0.03, + no_grad_set=set(['Y', 'InsideWeight', 'OutsideWeight']), + check_eager=True, + ) class TestSmoothL1LossOpError(unittest.TestCase): - def test_errors(self): with fluid.program_guard(fluid.Program(), fluid.Program()): # The input type of accuracy_op must be Variable. - x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - fluid.CPUPlace()) - y1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace() + ) + y1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace() + ) self.assertRaises(TypeError, fluid.layers.smooth_l1, x1, y1) # The input dtype of accuracy_op must be float32 or float64. x2 = fluid.layers.data(name='x2', shape=[4], dtype="int32") diff --git a/python/paddle/fluid/tests/unittests/test_soft_margin_loss.py b/python/paddle/fluid/tests/unittests/test_soft_margin_loss.py index b69a2a5c18740556133c93a933d4b7fedb6b498c..98918fb4b0babf51a19ae5754d8c0417d9db5574 100644 --- a/python/paddle/fluid/tests/unittests/test_soft_margin_loss.py +++ b/python/paddle/fluid/tests/unittests/test_soft_margin_loss.py @@ -27,21 +27,18 @@ def test_static_layer( prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - input = paddle.static.data(name='input', - shape=input_np.shape, - dtype=input_np.dtype) - label = paddle.static.data(name='label', - shape=label_np.shape, - dtype=label_np.dtype) + input = paddle.static.data( + name='input', shape=input_np.shape, dtype=input_np.dtype + ) + label = paddle.static.data( + name='label', shape=label_np.shape, dtype=label_np.dtype + ) sm_loss = paddle.nn.loss.SoftMarginLoss(reduction=reduction) res = sm_loss(input, label) exe = paddle.static.Executor(place) - static_result, = exe.run(prog, - feed={ - "input": input_np, - "label": label_np - }, - fetch_list=[res]) + (static_result,) = exe.run( + prog, feed={"input": input_np, "label": label_np}, fetch_list=[res] + ) return static_result @@ -55,23 +52,20 @@ def test_static_functional( prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - input = paddle.static.data(name='input', - shape=input_np.shape, - dtype=input_np.dtype) - label = paddle.static.data(name='label', - shape=label_np.shape, - dtype=label_np.dtype) - - res = paddle.nn.functional.soft_margin_loss(input, - label, - reduction=reduction) + input = paddle.static.data( + name='input', shape=input_np.shape, dtype=input_np.dtype + ) + label = paddle.static.data( + name='label', shape=label_np.shape, dtype=label_np.dtype + ) + + res = paddle.nn.functional.soft_margin_loss( + input, label, reduction=reduction + ) exe = paddle.static.Executor(place) - static_result, = exe.run(prog, - feed={ - "input": input_np, - "label": label_np - }, - fetch_list=[res]) + (static_result,) = exe.run( + prog, feed={"input": input_np, "label": label_np}, fetch_list=[res] + ) return static_result @@ -99,9 +93,9 @@ def test_dygraph_functional( input = paddle.to_tensor(input_np) label = paddle.to_tensor(label_np) - dy_res = paddle.nn.functional.soft_margin_loss(input, - label, - reduction=reduction) + dy_res = paddle.nn.functional.soft_margin_loss( + input, label, reduction=reduction + ) dy_result = dy_res.numpy() paddle.enable_static() return dy_result @@ -126,7 +120,6 @@ def calc_softmarginloss( class TestSoftMarginLoss(unittest.TestCase): - def test_SoftMarginLoss(self): input_np = np.random.uniform(0.1, 0.8, size=(5, 5)).astype(np.float64) types = [np.int32, np.int64, np.float32, np.float64] @@ -137,48 +130,58 @@ class TestSoftMarginLoss(unittest.TestCase): for place in places: for reduction in reductions: for _type in types: - label_np = np.random.randint(0, 2, - size=(5, 5)).astype(_type) + label_np = np.random.randint(0, 2, size=(5, 5)).astype( + _type + ) label_np[label_np == 0] = -1 - static_result = test_static_layer(place, input_np, label_np, - reduction) - dy_result = test_dygraph_layer(place, input_np, label_np, - reduction) - expected = calc_softmarginloss(input_np, label_np, - reduction) - np.testing.assert_allclose(static_result, - expected, - rtol=1e-05) - np.testing.assert_allclose(static_result, - dy_result, - rtol=1e-05) + static_result = test_static_layer( + place, input_np, label_np, reduction + ) + dy_result = test_dygraph_layer( + place, input_np, label_np, reduction + ) + expected = calc_softmarginloss( + input_np, label_np, reduction + ) + np.testing.assert_allclose( + static_result, expected, rtol=1e-05 + ) + np.testing.assert_allclose( + static_result, dy_result, rtol=1e-05 + ) np.testing.assert_allclose(dy_result, expected, rtol=1e-05) static_functional = test_static_functional( - place, input_np, label_np, reduction) + place, input_np, label_np, reduction + ) dy_functional = test_dygraph_functional( - place, input_np, label_np, reduction) - np.testing.assert_allclose(static_functional, - expected, - rtol=1e-05) - np.testing.assert_allclose(static_functional, - dy_functional, - rtol=1e-05) - np.testing.assert_allclose(dy_functional, - expected, - rtol=1e-05) + place, input_np, label_np, reduction + ) + np.testing.assert_allclose( + static_functional, expected, rtol=1e-05 + ) + np.testing.assert_allclose( + static_functional, dy_functional, rtol=1e-05 + ) + np.testing.assert_allclose( + dy_functional, expected, rtol=1e-05 + ) def test_SoftMarginLoss_error(self): paddle.disable_static() - self.assertRaises(ValueError, - paddle.nn.loss.SoftMarginLoss, - reduction="unsupport reduction") + self.assertRaises( + ValueError, + paddle.nn.loss.SoftMarginLoss, + reduction="unsupport reduction", + ) input = paddle.to_tensor([[0.1, 0.3]], dtype='float32') label = paddle.to_tensor([[-1.0, 1.0]], dtype='float32') - self.assertRaises(ValueError, - paddle.nn.functional.soft_margin_loss, - input=input, - label=label, - reduction="unsupport reduction") + self.assertRaises( + ValueError, + paddle.nn.functional.soft_margin_loss, + input=input, + label=label, + reduction="unsupport reduction", + ) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_softmax2d.py b/python/paddle/fluid/tests/unittests/test_softmax2d.py index c5e45dec2280101b693a8661101da02803bdf3aa..8297ca9c9081466831cbb2156f1ba5c075c2f438 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax2d.py +++ b/python/paddle/fluid/tests/unittests/test_softmax2d.py @@ -20,13 +20,15 @@ from test_softmax_op import ref_softmax class TestSoftmax2DAPI(unittest.TestCase): - def setUp(self): self.shape = [2, 6, 5, 4] self.x_np = np.random.uniform(-1, 1, self.shape).astype('float64') self.axis = -3 - self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_static_api(self): paddle.enable_static() @@ -35,7 +37,7 @@ class TestSoftmax2DAPI(unittest.TestCase): m = paddle.nn.Softmax2D() out = m(x) exe = paddle.static.Executor(self.place) - res, = exe.run(feed={'X': self.x_np}, fetch_list=[out]) + (res,) = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = ref_softmax(self.x_np, self.axis) np.testing.assert_allclose(out_ref, res, rtol=1e-05) @@ -50,27 +52,30 @@ class TestSoftmax2DAPI(unittest.TestCase): class TestSoftmax2DShape(TestSoftmax2DAPI): - def setUp(self): self.shape = [2, 6, 4] self.x_np = np.random.uniform(-1, 1, self.shape).astype('float64') self.axis = -3 - self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() else paddle.CPUPlace() + ) class TestSoftmax2DFloat32(TestSoftmax2DAPI): - def setUp(self): self.shape = [2, 3, 4] self.x_np = np.random.uniform(-1, 1, self.shape).astype('float32') self.axis = -3 - self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() else paddle.CPUPlace() + ) class TestSoftmax2DCPU(TestSoftmax2DAPI): - def setUp(self): self.shape = [2, 6, 4] self.x_np = np.random.uniform(-1, 1, self.shape).astype('float64') @@ -79,10 +84,12 @@ class TestSoftmax2DCPU(TestSoftmax2DAPI): class TestSoftmax2DRepr(unittest.TestCase): - def setUp(self): - self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_extra_repr(self): paddle.disable_static(self.place) @@ -92,10 +99,12 @@ class TestSoftmax2DRepr(unittest.TestCase): class TestSoftmax2DError(unittest.TestCase): - def setUp(self): - self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_static_error(self): paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_op.py b/python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_op.py index 2bdede66f4ab5b3a668ca50f73829934865e63a7..c36b8362c84bcf4909bbfcaf50749e4bf01a428a 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_op.py @@ -35,10 +35,10 @@ def _get_softmax(x, mask, fp16=True): return rst -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestSoftmaxMaskFuseOp(OpTest): - def setUp(self): self.op_type = "fused_softmax_mask" x = np.random.random((1, 1, 8, 32)) @@ -61,10 +61,10 @@ class TestSoftmaxMaskFuseOp(OpTest): pass -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestSoftmaxMaskFuseOp0(OpTest): - def setUp(self): self.op_type = "fused_softmax_mask" x = np.random.random((1, 1, 8, 32)).astype("float16") @@ -81,16 +81,16 @@ class TestSoftmaxMaskFuseOp0(OpTest): self.check_grad_with_place(core.CUDAPlace(0), ["X"], "Out") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestDropoutBiasFuseOp3(unittest.TestCase): - def test_static_result(self): with fluid.program_guard(fluid.Program(), fluid.Program()): input_x = fluid.data(name="x", shape=[1, 1, 8, 32], dtype="float32") - input_mask = fluid.data(name="mask", - shape=[1, 1, 8, 32], - dtype="float32") + input_mask = fluid.data( + name="mask", shape=[1, 1, 8, 32], dtype="float32" + ) rst = incubate.softmax_mask_fuse(input_x, input_mask) x_in_np = np.random.random((1, 1, 8, 32)).astype("float32") @@ -99,12 +99,11 @@ class TestDropoutBiasFuseOp3(unittest.TestCase): rst_np = _get_softmax(x_in_np, mask_in_np, False) exe = fluid.Executor(fluid.CUDAPlace(0)) - fetches = exe.run(fluid.default_main_program(), - feed={ - "x": x_in_np, - "mask": mask_in_np - }, - fetch_list=[rst]) + fetches = exe.run( + fluid.default_main_program(), + feed={"x": x_in_np, "mask": mask_in_np}, + fetch_list=[rst], + ) np.testing.assert_allclose(fetches[0], rst_np, rtol=1e-05) def test_dygraph(self): diff --git a/python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_upper_triangle_op.py b/python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_upper_triangle_op.py index 62a2e729051e50d9dcd9425f9efd4223c2b7eaaf..84032336402c4d1894a029c33b7b96843306eda2 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_upper_triangle_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_upper_triangle_op.py @@ -36,10 +36,10 @@ def _get_softmax_upper(x, fp16=True): return rst -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestSoftmaxMaskFuseOp(OpTest): - def setUp(self): self.op_type = "fused_softmax_mask_upper_triangle" x = np.random.random((1, 4, 32, 32)).astype("float16") @@ -54,10 +54,10 @@ class TestSoftmaxMaskFuseOp(OpTest): self.check_grad_with_place(core.CUDAPlace(0), ["X"], "Out") -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestSoftmaxMaskFuseOp1(OpTest): - def setUp(self): self.op_type = "fused_softmax_mask_upper_triangle" x = np.random.random((1, 4, 32, 32)) @@ -78,8 +78,9 @@ class TestSoftmaxMaskFuseOp1(OpTest): pass -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestDropoutBiasFuseOp2(unittest.TestCase): # test the python side API for softmax_mask_fuse op def setUp(self): @@ -89,18 +90,20 @@ class TestDropoutBiasFuseOp2(unittest.TestCase): def test_static(self): for dtype in self.dtypes: with fluid.program_guard(fluid.Program(), fluid.Program()): - input_x = fluid.data(name="x", - shape=[1, 4, 32, 32], - dtype=dtype) + input_x = fluid.data( + name="x", shape=[1, 4, 32, 32], dtype=dtype + ) rst = incubate.softmax_mask_fuse_upper_triangle(input_x) x_in_np = np.random.random((1, 4, 32, 32)).astype(dtype) rst_np = _get_softmax_upper(x_in_np, dtype == 'float16') exe = fluid.Executor(fluid.CUDAPlace(0)) - fetches = exe.run(fluid.default_main_program(), - feed={"x": x_in_np}, - fetch_list=[rst]) + fetches = exe.run( + fluid.default_main_program(), + feed={"x": x_in_np}, + fetch_list=[rst], + ) np.testing.assert_allclose(fetches[0], rst_np, rtol=1e-05) def test_dygraph(self): diff --git a/python/paddle/fluid/tests/unittests/test_softmax_op.py b/python/paddle/fluid/tests/unittests/test_softmax_op.py index 13a43e4d3e900f2bd629d2400952911c64646a31..75324475f46de1b8308287fcc5263bb043ee72e9 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_op.py @@ -26,7 +26,7 @@ def stable_softmax(x): """Compute the softmax of vector x in a numerically stable way.""" # clip to shiftx, otherwise, when calc loss with # log(exp(shiftx)), may get log(0)=INF - shiftx = (x - np.max(x)).clip(-64.) + shiftx = (x - np.max(x)).clip(-64.0) exps = np.exp(shiftx) return exps / np.sum(exps) @@ -41,7 +41,6 @@ def ref_softmax(x, axis=None, dtype=None): class TestSoftmaxOp(OpTest): - def get_x_shape(self): return [10, 10] @@ -67,7 +66,7 @@ class TestSoftmaxOp(OpTest): self.attrs = { 'axis': self.axis, 'use_cudnn': self.use_cudnn, - 'use_mkldnn': self.use_mkldnn + 'use_mkldnn': self.use_mkldnn, } def init_kernel_type(self): @@ -78,7 +77,8 @@ class TestSoftmaxOp(OpTest): if self.use_cudnn: place = core.CUDAPlace(0) self.check_output_with_place( - place, atol=1e-5, check_dygraph=(self.use_mkldnn == False)) + place, atol=1e-5, check_dygraph=(self.use_mkldnn == False) + ) else: self.check_output(check_dygraph=(self.use_mkldnn == False)) @@ -88,25 +88,27 @@ class TestSoftmaxOp(OpTest): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_grad_with_place( - place, ["X"], + place, + ["X"], "Out", max_relative_error=0.01, - check_dygraph=(self.use_mkldnn == False)) + check_dygraph=(self.use_mkldnn == False), + ) else: - self.check_grad(["X"], - "Out", - max_relative_error=0.01, - check_dygraph=(self.use_mkldnn == False)) + self.check_grad( + ["X"], + "Out", + max_relative_error=0.01, + check_dygraph=(self.use_mkldnn == False), + ) class TestSoftmaxOp2(TestSoftmaxOp): - def get_x_shape(self): return [2, 3, 4, 5] class TestSoftmaxOp3(TestSoftmaxOp): - def get_x_shape(self): return [2, 3, 4, 5] @@ -115,7 +117,6 @@ class TestSoftmaxOp3(TestSoftmaxOp): class TestSoftmaxOp4(TestSoftmaxOp): - def get_x_shape(self): return [2, 3, 4, 5] @@ -124,7 +125,6 @@ class TestSoftmaxOp4(TestSoftmaxOp): class TestSoftmaxOp5(TestSoftmaxOp): - def get_x_shape(self): return [2, 3, 4, 5] @@ -133,7 +133,6 @@ class TestSoftmaxOp5(TestSoftmaxOp): class TestSoftmaxOp6(TestSoftmaxOp): - def get_x_shape(self): return [2, 3, 4, 5] @@ -141,26 +140,26 @@ class TestSoftmaxOp6(TestSoftmaxOp): return 3 -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestSoftmaxCUDNNOp(TestSoftmaxOp): - def init_kernel_type(self): self.use_cudnn = True -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestSoftmaxCUDNNOp2(TestSoftmaxCUDNNOp): - def get_x_shape(self): return [2, 3, 4, 5] -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestSoftmaxCUDNNOp3(TestSoftmaxCUDNNOp): - def get_x_shape(self): return [2, 3, 4, 5] @@ -168,10 +167,10 @@ class TestSoftmaxCUDNNOp3(TestSoftmaxCUDNNOp): return 0 -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestSoftmaxCUDNNOp4(TestSoftmaxCUDNNOp): - def get_x_shape(self): return [2, 3, 4, 5] @@ -179,10 +178,10 @@ class TestSoftmaxCUDNNOp4(TestSoftmaxCUDNNOp): return 1 -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestSoftmaxCUDNNOp5(TestSoftmaxCUDNNOp): - def get_x_shape(self): return [2, 3, 4, 5] @@ -190,10 +189,10 @@ class TestSoftmaxCUDNNOp5(TestSoftmaxCUDNNOp): return 2 -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestSoftmaxCUDNNOp6(TestSoftmaxCUDNNOp): - def get_x_shape(self): return [2, 3, 4, 5] @@ -201,18 +200,18 @@ class TestSoftmaxCUDNNOp6(TestSoftmaxCUDNNOp): return 3 -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestSoftmaxCUDNNOp7(TestSoftmaxCUDNNOp): - def get_x_shape(self): return [2, 3, 4, 5, 6] -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestSoftmaxCUDNNOp8(TestSoftmaxCUDNNOp): - def get_x_shape(self): return [2, 3, 4, 5, 6] @@ -220,10 +219,10 @@ class TestSoftmaxCUDNNOp8(TestSoftmaxCUDNNOp): return 0 -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestSoftmaxCUDNNOp9(TestSoftmaxCUDNNOp): - def get_x_shape(self): return [2, 3, 4, 5, 6] @@ -231,10 +230,10 @@ class TestSoftmaxCUDNNOp9(TestSoftmaxCUDNNOp): return 1 -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestSoftmaxCUDNNOp10(TestSoftmaxCUDNNOp): - def get_x_shape(self): return [2, 3, 4, 5, 6] @@ -242,10 +241,10 @@ class TestSoftmaxCUDNNOp10(TestSoftmaxCUDNNOp): return 2 -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestSoftmaxCUDNNOp11(TestSoftmaxCUDNNOp): - def get_x_shape(self): return [2, 3, 4, 5, 6] @@ -253,10 +252,10 @@ class TestSoftmaxCUDNNOp11(TestSoftmaxCUDNNOp): return 3 -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestSoftmaxCUDNNOp12(TestSoftmaxCUDNNOp): - def get_x_shape(self): return [2, 3, 4, 5, 6] @@ -264,10 +263,10 @@ class TestSoftmaxCUDNNOp12(TestSoftmaxCUDNNOp): return 4 -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestSoftmaxFP16Op(TestSoftmaxOp): - def init_kernel_type(self): self.dtype = np.float16 @@ -282,18 +281,18 @@ class TestSoftmaxFP16Op(TestSoftmaxOp): pass -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestSoftmaxFP16Op2(TestSoftmaxFP16Op): - def get_x_shape(self): return [2, 3, 4, 10] -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestSoftmaxFP16CUDNNOp(TestSoftmaxOp): - def init_kernel_type(self): self.use_cudnn = True self.dtype = np.float16 @@ -305,18 +304,18 @@ class TestSoftmaxFP16CUDNNOp(TestSoftmaxOp): self.check_output_with_place(place, atol=1e-3) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestSoftmaxFP16CUDNNOp2(TestSoftmaxFP16CUDNNOp): - def get_x_shape(self): return [2, 3, 4, 5] -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestSoftmaxBF16Op(OpTest): - def setUp(self): self.op_type = "softmax" self.use_cudnn = self.init_cudnn() @@ -336,7 +335,7 @@ class TestSoftmaxBF16Op(OpTest): self.attrs = { 'axis': self.axis, 'use_cudnn': self.use_cudnn, - 'use_mkldnn': self.use_mkldnn + 'use_mkldnn': self.use_mkldnn, } def init_cudnn(self): @@ -344,34 +343,40 @@ class TestSoftmaxBF16Op(OpTest): def test_check_output(self): place = core.CUDAPlace(0) - self.check_output_with_place(place, - check_dygraph=(self.use_mkldnn == False)) + self.check_output_with_place( + place, check_dygraph=(self.use_mkldnn == False) + ) def test_check_grad(self): place = core.CUDAPlace(0) - self.check_grad_with_place(place, ["X"], - "Out", - numeric_grad_delta=0.05, - check_dygraph=(self.use_mkldnn == False)) + self.check_grad_with_place( + place, + ["X"], + "Out", + numeric_grad_delta=0.05, + check_dygraph=(self.use_mkldnn == False), + ) @unittest.skipIf( - not core.is_compiled_with_cuda() or core.cudnn_version() < 8100 + not core.is_compiled_with_cuda() + or core.cudnn_version() < 8100 or paddle.device.cuda.get_device_capability()[0] < 8, - "only support compiled with CUDA and cudnn version need larger than 8.1.0 and device's compute capability is at least 8.0" + "only support compiled with CUDA and cudnn version need larger than 8.1.0 and device's compute capability is at least 8.0", ) class TestSoftmaxBF16CUDNNOp(TestSoftmaxBF16Op): - def init_cudnn(self): return True class TestSoftmaxAPI(unittest.TestCase): - def setUp(self): - self.place = paddle.CUDAPlace( - 0) if core.is_compiled_with_cuda() else paddle.CPUPlace() - self.x_np = np.random.uniform(-1., 1., [2, 3, 4, 5]).astype('float32') + self.place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() + else paddle.CPUPlace() + ) + self.x_np = np.random.uniform(-1.0, 1.0, [2, 3, 4, 5]).astype('float32') self.out_ref = np.apply_along_axis(stable_softmax, -1, self.x_np) self.executed_api() @@ -426,19 +431,18 @@ class TestSoftmaxAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, self.softmax, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data(name='x_int32', - shape=[2, 3], - dtype='int32') + x_int32 = paddle.fluid.data( + name='x_int32', shape=[2, 3], dtype='int32' + ) self.assertRaises(TypeError, self.softmax, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data(name='x_fp16', - shape=[2, 3], - dtype='float16') + x_fp16 = paddle.fluid.data( + name='x_fp16', shape=[2, 3], dtype='float16' + ) self.softmax(x_fp16) class TestSoftmaxInplaceAPI(TestSoftmaxAPI): - def executed_api(self): self.softmax = F.softmax_ diff --git a/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py b/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py index 6b4a733853b4c82a36ec96b2b2f268cf30cd2161..fb0e46b6740b8f84e0e3a5b2aa5c80c2a0e84fd0 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py @@ -28,7 +28,7 @@ def cross_entropy(softmax, label, soft_label, axis, ignore_index=-1): axis %= len(shape) n = int(np.prod(shape[:axis])) axis_dim = shape[axis] - remain = int(np.prod(shape[axis + 1:])) + remain = int(np.prod(shape[axis + 1 :])) softmax_reshape = softmax.reshape((n, axis_dim, remain)) label_reshape = label.reshape((n, 1, remain)) result = np.zeros_like(label_reshape, dtype=softmax.dtype) @@ -40,13 +40,15 @@ def cross_entropy(softmax, label, soft_label, axis, ignore_index=-1): return result.reshape(label.shape) -def python_api(logits, - label, - soft_label=False, - use_softmax=True, - numeric_stable_mode=True, - ignore_index=-100, - axis=-1): +def python_api( + logits, + label, + soft_label=False, + use_softmax=True, + numeric_stable_mode=True, + ignore_index=-100, + axis=-1, +): # here only can test paddle.nn.functional.softmax_with_cross_entropy, # the paddle.nn.functional.cross_entropy contains other math ops return paddle.nn.functional.softmax_with_cross_entropy( @@ -56,23 +58,31 @@ def python_api(logits, ignore_index=ignore_index, numeric_stable_mode=numeric_stable_mode, return_softmax=use_softmax, - axis=axis) - - -def python_core_api_without_softmax(logits, - label, - soft_label=False, - use_softmax=False, - numeric_stable_mode=True, - ignore_index=-100, - axis=-1): + axis=axis, + ) + + +def python_core_api_without_softmax( + logits, + label, + soft_label=False, + use_softmax=False, + numeric_stable_mode=True, + ignore_index=-100, + axis=-1, +): # the API paddle.nn.functional.softmax_with_cross_entropy cannot # set use_softmax=False, so add a core api manually assert use_softmax is False - _, loss = paddle._C_ops.cross_entropy_with_softmax(logits, label, - soft_label, use_softmax, - numeric_stable_mode, - ignore_index, axis) + _, loss = paddle._C_ops.cross_entropy_with_softmax( + logits, + label, + soft_label, + use_softmax, + numeric_stable_mode, + ignore_index, + axis, + ) return loss @@ -101,8 +111,10 @@ class TestSoftmaxWithCrossEntropyOp(OpTest): self.initParams() logits = getattr( - self, "logits", - np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype)) + self, + "logits", + np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype), + ) softmax = np.apply_along_axis(stable_softmax, self.axis, logits) if self.soft_label: @@ -111,13 +123,13 @@ class TestSoftmaxWithCrossEntropyOp(OpTest): else: axis_dim = self.shape[self.axis] self.shape[self.axis] = 1 - labels = np.random.randint(0, - axis_dim, - self.shape, - dtype=self.hard_label_dtype()) + labels = np.random.randint( + 0, axis_dim, self.shape, dtype=self.hard_label_dtype() + ) - loss = cross_entropy(softmax, labels, self.soft_label, self.axis, - self.ignore_index) + loss = cross_entropy( + softmax, labels, self.soft_label, self.axis, self.ignore_index + ) if self.use_softmax == False: self.inputs = {"Logits": softmax, "Label": labels} @@ -126,7 +138,7 @@ class TestSoftmaxWithCrossEntropyOp(OpTest): self.outputs = { "Softmax": softmax.astype(self.dtype), - "Loss": loss.astype(self.dtype) + "Loss": loss.astype(self.dtype), } self.attrs = { "numeric_stable_mode": self.numeric_stable_mode, @@ -146,48 +158,48 @@ class TestSoftmaxWithCrossEntropyOp(OpTest): def test_check_grad(self): if core.is_compiled_with_rocm(): if self.python_api is not None: - self.check_grad(["Logits"], - "Loss", - max_relative_error=5e-1, - check_eager=True) + self.check_grad( + ["Logits"], + "Loss", + max_relative_error=5e-1, + check_eager=True, + ) # HIP will have accuracy fail when using float32 in CPU place self.check_grad(["Logits"], "Loss", max_relative_error=5e-1) else: if self.python_api is not None: - self.check_grad(["Logits"], - "Loss", - numeric_grad_delta=0.001, - check_eager=True) + self.check_grad( + ["Logits"], + "Loss", + numeric_grad_delta=0.001, + check_eager=True, + ) self.check_grad(["Logits"], "Loss", numeric_grad_delta=0.001) class TestSoftmaxWithCrossEntropyOpInt32(TestSoftmaxWithCrossEntropyOp): - def hard_label_dtype(self): return "int32" class TestSoftmaxWithCrossEntropyOpInt16(TestSoftmaxWithCrossEntropyOp): - def hard_label_dtype(self): return "int16" class TestSoftmaxWithCrossEntropyOpInt8(TestSoftmaxWithCrossEntropyOp): - def hard_label_dtype(self): return "int8" class TestSoftmaxWithCrossEntropyOpUInt8(TestSoftmaxWithCrossEntropyOp): - def hard_label_dtype(self): return "uint8" class TestSoftmaxWithCrossEntropyOp_NotWithSoftmax_SoftLabel_1D( - TestSoftmaxWithCrossEntropyOp): - + TestSoftmaxWithCrossEntropyOp +): def initParams(self): self.op_type = "softmax_with_cross_entropy" self.python_api = python_core_api_without_softmax @@ -198,12 +210,12 @@ class TestSoftmaxWithCrossEntropyOp_NotWithSoftmax_SoftLabel_1D( self.axis = -1 self.ignore_index = -1 self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64 - self.use_softmax = False #default is true, means "with softmax" + self.use_softmax = False # default is true, means "with softmax" class TestSoftmaxWithCrossEntropyOp_NotWithSoftmax_HardLabel_1D( - TestSoftmaxWithCrossEntropyOp): - + TestSoftmaxWithCrossEntropyOp +): def initParams(self): self.op_type = "softmax_with_cross_entropy" self.python_api = python_core_api_without_softmax @@ -214,15 +226,15 @@ class TestSoftmaxWithCrossEntropyOp_NotWithSoftmax_HardLabel_1D( self.axis = -1 self.ignore_index = -1 self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64 - self.use_softmax = False #default is true, means "with softmax" + self.use_softmax = False # default is true, means "with softmax" ############################################################################## -#NotWithSoftmax_SoftLabel_2D start +# NotWithSoftmax_SoftLabel_2D start ############################################################################## class TestSoftmaxWithCrossEntropyOp_NotWithSoftmax_SoftLabel_2D( - TestSoftmaxWithCrossEntropyOp): - + TestSoftmaxWithCrossEntropyOp +): def initParams(self): self.op_type = "softmax_with_cross_entropy" self.python_api = python_core_api_without_softmax @@ -233,12 +245,12 @@ class TestSoftmaxWithCrossEntropyOp_NotWithSoftmax_SoftLabel_2D( self.axis = -1 self.ignore_index = -1 self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64 - self.use_softmax = False #default is true, means "with softmax" + self.use_softmax = False # default is true, means "with softmax" class TestSoftmaxWithCrossEntropyOp_NotWithSoftmax_SoftLabel_2D_Axis2( - TestSoftmaxWithCrossEntropyOp): - + TestSoftmaxWithCrossEntropyOp +): def initParams(self): self.op_type = "softmax_with_cross_entropy" self.python_api = python_core_api_without_softmax @@ -249,12 +261,12 @@ class TestSoftmaxWithCrossEntropyOp_NotWithSoftmax_SoftLabel_2D_Axis2( self.axis = 1 self.ignore_index = -1 self.shape = [3, 5, 7, 11] - self.use_softmax = False #default is true, means "with softmax" + self.use_softmax = False # default is true, means "with softmax" class TestSoftmaxWithCrossEntropyOp_NotWithSoftmax_SoftLabel_2D_Axis3( - TestSoftmaxWithCrossEntropyOp): - + TestSoftmaxWithCrossEntropyOp +): def initParams(self): self.op_type = "softmax_with_cross_entropy" self.python_api = python_core_api_without_softmax @@ -265,12 +277,12 @@ class TestSoftmaxWithCrossEntropyOp_NotWithSoftmax_SoftLabel_2D_Axis3( self.axis = 2 self.ignore_index = -1 self.shape = [3, 5, 7, 11] - self.use_softmax = False #default is true, means "with softmax" + self.use_softmax = False # default is true, means "with softmax" class TestSoftmaxWithCrossEntropyOp_NotWithSoftmax_SoftLabel_2D_Axis4( - TestSoftmaxWithCrossEntropyOp): - + TestSoftmaxWithCrossEntropyOp +): def initParams(self): self.op_type = "softmax_with_cross_entropy" self.python_api = python_core_api_without_softmax @@ -281,21 +293,21 @@ class TestSoftmaxWithCrossEntropyOp_NotWithSoftmax_SoftLabel_2D_Axis4( self.axis = 3 self.ignore_index = -1 self.shape = [3, 5, 7, 11] - self.use_softmax = False #default is true, means "with softmax" + self.use_softmax = False # default is true, means "with softmax" ############################################################################## -#NotWithSoftmax_SoftLabel_2D end +# NotWithSoftmax_SoftLabel_2D end ############################################################################## ############################################################################## -#NotWithSoftmax_HardLabel_2D start +# NotWithSoftmax_HardLabel_2D start ############################################################################## class TestSoftmaxWithCrossEntropyOp_NotWithSoftmax_HardLabel_2D( - TestSoftmaxWithCrossEntropyOp): - + TestSoftmaxWithCrossEntropyOp +): def initParams(self): self.op_type = "softmax_with_cross_entropy" self.python_api = python_core_api_without_softmax @@ -306,12 +318,12 @@ class TestSoftmaxWithCrossEntropyOp_NotWithSoftmax_HardLabel_2D( self.axis = -1 self.ignore_index = -1 self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64 - self.use_softmax = False #default is true, means "with softmax" + self.use_softmax = False # default is true, means "with softmax" class TestSoftmaxWithCrossEntropyOp_NotWithSoftmax_HardLabel_2D_Axis2( - TestSoftmaxWithCrossEntropyOp): - + TestSoftmaxWithCrossEntropyOp +): def initParams(self): self.op_type = "softmax_with_cross_entropy" self.python_api = python_core_api_without_softmax @@ -322,12 +334,12 @@ class TestSoftmaxWithCrossEntropyOp_NotWithSoftmax_HardLabel_2D_Axis2( self.axis = 1 self.ignore_index = -1 self.shape = [3, 5, 7, 11] - self.use_softmax = False #default is true, means "with softmax" + self.use_softmax = False # default is true, means "with softmax" class TestSoftmaxWithCrossEntropyOp_NotWithSoftmax_HardLabel_2D_Axis3( - TestSoftmaxWithCrossEntropyOp): - + TestSoftmaxWithCrossEntropyOp +): def initParams(self): self.op_type = "softmax_with_cross_entropy" self.python_api = python_core_api_without_softmax @@ -338,12 +350,12 @@ class TestSoftmaxWithCrossEntropyOp_NotWithSoftmax_HardLabel_2D_Axis3( self.axis = 2 self.ignore_index = -1 self.shape = [3, 5, 7, 11] - self.use_softmax = False #default is true, means "with softmax" + self.use_softmax = False # default is true, means "with softmax" class TestSoftmaxWithCrossEntropyOp_NotWithSoftmax_HardLabel_2D_Axis4( - TestSoftmaxWithCrossEntropyOp): - + TestSoftmaxWithCrossEntropyOp +): def initParams(self): self.op_type = "softmax_with_cross_entropy" self.python_api = python_core_api_without_softmax @@ -354,21 +366,21 @@ class TestSoftmaxWithCrossEntropyOp_NotWithSoftmax_HardLabel_2D_Axis4( self.axis = 3 self.ignore_index = -1 self.shape = [3, 5, 7, 11] - self.use_softmax = False #default is true, means "with softmax" + self.use_softmax = False # default is true, means "with softmax" ############################################################################## -#NotWithSoftmax_HardLabel_2D end +# NotWithSoftmax_HardLabel_2D end ############################################################################## ############################################################################## -#NotWithSoftmax_HardLabel_2D_Ignore start +# NotWithSoftmax_HardLabel_2D_Ignore start ############################################################################## class TestSoftmaxWithCrossEntropyOp_NotWithSoftmax_HardLabel_Ignore( - TestSoftmaxWithCrossEntropyOp): - + TestSoftmaxWithCrossEntropyOp +): def initParams(self): self.op_type = "softmax_with_cross_entropy" self.python_api = python_core_api_without_softmax @@ -379,12 +391,12 @@ class TestSoftmaxWithCrossEntropyOp_NotWithSoftmax_HardLabel_Ignore( self.axis = -1 self.ignore_index = 2 self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64 - self.use_softmax = False #default is true, means "with softmax" + self.use_softmax = False # default is true, means "with softmax" class TestSoftmaxWithCrossEntropyOp_NotWithSoftmax_HardLabel_Ignore_Axis( - TestSoftmaxWithCrossEntropyOp): - + TestSoftmaxWithCrossEntropyOp +): def initParams(self): self.op_type = "softmax_with_cross_entropy" self.python_api = python_core_api_without_softmax @@ -395,12 +407,12 @@ class TestSoftmaxWithCrossEntropyOp_NotWithSoftmax_HardLabel_Ignore_Axis( self.axis = 1 self.ignore_index = 2 self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64 - self.use_softmax = False #default is true, means "with softmax" + self.use_softmax = False # default is true, means "with softmax" class TestSoftmaxWithCrossEntropyOp_NotWithSoftmax_HardLabel_2D_Ignore( - TestSoftmaxWithCrossEntropyOp): - + TestSoftmaxWithCrossEntropyOp +): def initParams(self): self.op_type = "softmax_with_cross_entropy" self.python_api = python_core_api_without_softmax @@ -411,12 +423,12 @@ class TestSoftmaxWithCrossEntropyOp_NotWithSoftmax_HardLabel_2D_Ignore( self.axis = -1 self.ignore_index = 2 self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64 - self.use_softmax = False #default is true, means "with softmax" + self.use_softmax = False # default is true, means "with softmax" class TestSoftmaxWithCrossEntropyOp_NotWithSoftmax_HardLabel_2D_Ignore_Axis3( - TestSoftmaxWithCrossEntropyOp): - + TestSoftmaxWithCrossEntropyOp +): def initParams(self): self.op_type = "softmax_with_cross_entropy" self.python_api = python_core_api_without_softmax @@ -427,16 +439,15 @@ class TestSoftmaxWithCrossEntropyOp_NotWithSoftmax_HardLabel_2D_Ignore_Axis3( self.axis = 2 self.ignore_index = 2 self.shape = [3, 5, 7, 11] - self.use_softmax = False #default is true, means "with softmax" + self.use_softmax = False # default is true, means "with softmax" ############################################################################## -#NotWithSoftmax_HardLabel_2D_Ignore end +# NotWithSoftmax_HardLabel_2D_Ignore end ############################################################################## class TestSoftmaxWithCrossEntropyOpNoCudnn(TestSoftmaxWithCrossEntropyOp): - def initParams(self): self.op_type = "softmax_with_cross_entropy" self.python_api = python_api @@ -450,10 +461,10 @@ class TestSoftmaxWithCrossEntropyOpNoCudnn(TestSoftmaxWithCrossEntropyOp): self.use_softmax = True -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestSoftmaxWithCrossEntropyOpFp16(TestSoftmaxWithCrossEntropyOp): - def initParams(self): self.op_type = "softmax_with_cross_entropy" self.python_api = python_api @@ -472,8 +483,10 @@ class TestSoftmaxWithCrossEntropyOpFp16(TestSoftmaxWithCrossEntropyOp): # NOTE: numpy float16 have very low accuracy, use float32 for numpy check. date_type = np.float32 if core.is_compiled_with_rocm() else np.float64 logits = getattr( - self, "logits", - np.random.uniform(0.1, 1.0, self.shape).astype(date_type)) + self, + "logits", + np.random.uniform(0.1, 1.0, self.shape).astype(date_type), + ) softmax = np.apply_along_axis(stable_softmax, self.axis, logits) axis_dim = self.shape[self.axis] @@ -485,7 +498,7 @@ class TestSoftmaxWithCrossEntropyOpFp16(TestSoftmaxWithCrossEntropyOp): self.inputs = {"Logits": logits.astype(self.dtype), "Label": labels} self.outputs = { "Softmax": softmax.astype(self.dtype), - "Loss": loss.astype(self.dtype) + "Loss": loss.astype(self.dtype), } self.attrs = { "numeric_stable_mode": self.numeric_stable_mode, @@ -501,16 +514,15 @@ class TestSoftmaxWithCrossEntropyOpFp16(TestSoftmaxWithCrossEntropyOp): def test_check_grad(self): if self.python_api is not None: - self.check_grad(["Logits"], - "Loss", - max_relative_error=0.1, - check_eager=True) + self.check_grad( + ["Logits"], "Loss", max_relative_error=0.1, check_eager=True + ) self.check_grad(["Logits"], "Loss", max_relative_error=0.1) -class TestSoftmaxWithCrossEntropyOpNoCudnnFp16(TestSoftmaxWithCrossEntropyOpFp16 - ): - +class TestSoftmaxWithCrossEntropyOpNoCudnnFp16( + TestSoftmaxWithCrossEntropyOpFp16 +): def initParams(self): self.op_type = "softmax_with_cross_entropy" self.python_api = python_api @@ -524,10 +536,9 @@ class TestSoftmaxWithCrossEntropyOpNoCudnnFp16(TestSoftmaxWithCrossEntropyOpFp16 def test_check_grad(self): if self.python_api is not None: - self.check_grad(["Logits"], - "Loss", - max_relative_error=0.1, - check_eager=True) + self.check_grad( + ["Logits"], "Loss", max_relative_error=0.1, check_eager=True + ) self.check_grad(["Logits"], "Loss", max_relative_error=0.1) @@ -557,10 +568,9 @@ class TestSoftmaxWithCrossEntropyOp2(TestSoftmaxWithCrossEntropyOp): if core.is_compiled_with_rocm(): # HIP will have accuracy fail when using float32 in CPU place if self.python_api is not None: - self.check_grad(["Logits"], - "Loss", - max_relative_error=0.1, - check_eager=True) + self.check_grad( + ["Logits"], "Loss", max_relative_error=0.1, check_eager=True + ) self.check_grad(["Logits"], "Loss", max_relative_error=0.1) else: if self.python_api is not None: @@ -587,7 +597,6 @@ class TestSoftmaxWithCrossEntropyOp3(TestSoftmaxWithCrossEntropyOp): class TestSoftmaxWithCrossEntropyOp3NoCudnn(TestSoftmaxWithCrossEntropyOp3): - def initParams(self): self.op_type = "softmax_with_cross_entropy" self.python_api = python_api @@ -677,8 +686,9 @@ class TestSoftmaxWithCrossEntropyOpAxis4(TestSoftmaxWithCrossEntropyOp): self.use_softmax = True -class TestSoftmaxWithCrossEntropyOpAxisDimEqualOne(TestSoftmaxWithCrossEntropyOp - ): +class TestSoftmaxWithCrossEntropyOpAxisDimEqualOne( + TestSoftmaxWithCrossEntropyOp +): """ Test softmax with cross entropy operator with discreate one-hot labels. Given axis != -1 @@ -698,8 +708,8 @@ class TestSoftmaxWithCrossEntropyOpAxisDimEqualOne(TestSoftmaxWithCrossEntropyOp class TestSoftmaxWithCrossEntropyOpNoCudnnFp16Axis1( - TestSoftmaxWithCrossEntropyOpNoCudnnFp16): - + TestSoftmaxWithCrossEntropyOpNoCudnnFp16 +): def initParams(self): self.op_type = "softmax_with_cross_entropy" self.python_api = python_api @@ -714,8 +724,8 @@ class TestSoftmaxWithCrossEntropyOpNoCudnnFp16Axis1( class TestSoftmaxWithCrossEntropyOpNoCudnnFp16Axis2( - TestSoftmaxWithCrossEntropyOpNoCudnnFp16): - + TestSoftmaxWithCrossEntropyOpNoCudnnFp16 +): def initParams(self): self.op_type = "softmax_with_cross_entropy" self.python_api = python_api @@ -730,8 +740,8 @@ class TestSoftmaxWithCrossEntropyOpNoCudnnFp16Axis2( class TestSoftmaxWithCrossEntropyOpNoCudnnFp16Axis3( - TestSoftmaxWithCrossEntropyOpNoCudnnFp16): - + TestSoftmaxWithCrossEntropyOpNoCudnnFp16 +): def initParams(self): self.op_type = "softmax_with_cross_entropy" self.python_api = python_api @@ -745,9 +755,9 @@ class TestSoftmaxWithCrossEntropyOpNoCudnnFp16Axis3( self.use_softmax = True -class TestSoftmaxWithCrossEntropyOpSoftLabelAxis1(TestSoftmaxWithCrossEntropyOp2 - ): - +class TestSoftmaxWithCrossEntropyOpSoftLabelAxis1( + TestSoftmaxWithCrossEntropyOp2 +): def initParams(self): self.op_type = "softmax_with_cross_entropy" self.python_api = python_api @@ -761,9 +771,9 @@ class TestSoftmaxWithCrossEntropyOpSoftLabelAxis1(TestSoftmaxWithCrossEntropyOp2 self.use_softmax = True -class TestSoftmaxWithCrossEntropyOpSoftLabelAxis2(TestSoftmaxWithCrossEntropyOp2 - ): - +class TestSoftmaxWithCrossEntropyOpSoftLabelAxis2( + TestSoftmaxWithCrossEntropyOp2 +): def initParams(self): self.op_type = "softmax_with_cross_entropy" self.python_api = python_api @@ -777,9 +787,9 @@ class TestSoftmaxWithCrossEntropyOpSoftLabelAxis2(TestSoftmaxWithCrossEntropyOp2 self.use_softmax = True -class TestSoftmaxWithCrossEntropyOpSoftLabelAxis3(TestSoftmaxWithCrossEntropyOp2 - ): - +class TestSoftmaxWithCrossEntropyOpSoftLabelAxis3( + TestSoftmaxWithCrossEntropyOp2 +): def initParams(self): self.op_type = "softmax_with_cross_entropy" self.python_api = python_api @@ -793,9 +803,9 @@ class TestSoftmaxWithCrossEntropyOpSoftLabelAxis3(TestSoftmaxWithCrossEntropyOp2 self.use_softmax = True -class TestSoftmaxWithCrossEntropyOpSoftLabelAxis4(TestSoftmaxWithCrossEntropyOp2 - ): - +class TestSoftmaxWithCrossEntropyOpSoftLabelAxis4( + TestSoftmaxWithCrossEntropyOp2 +): def initParams(self): self.op_type = "softmax_with_cross_entropy" self.python_api = python_api @@ -810,8 +820,8 @@ class TestSoftmaxWithCrossEntropyOpSoftLabelAxis4(TestSoftmaxWithCrossEntropyOp2 class TestSoftmaxWithCrossEntropyOpIgnoreIndexNoCudnnAxis1( - TestSoftmaxWithCrossEntropyOp3): - + TestSoftmaxWithCrossEntropyOp3 +): def initParams(self): self.op_type = "softmax_with_cross_entropy" self.python_api = python_api @@ -826,8 +836,8 @@ class TestSoftmaxWithCrossEntropyOpIgnoreIndexNoCudnnAxis1( class TestSoftmaxWithCrossEntropyOpIgnoreIndexNoCudnnAxis2( - TestSoftmaxWithCrossEntropyOp3): - + TestSoftmaxWithCrossEntropyOp3 +): def initParams(self): self.op_type = "softmax_with_cross_entropy" self.python_api = python_api @@ -842,8 +852,8 @@ class TestSoftmaxWithCrossEntropyOpIgnoreIndexNoCudnnAxis2( class TestSoftmaxWithCrossEntropyOpIgnoreIndexNoCudnnAxis3( - TestSoftmaxWithCrossEntropyOp3): - + TestSoftmaxWithCrossEntropyOp3 +): def initParams(self): self.op_type = "softmax_with_cross_entropy" self.python_api = python_api @@ -858,8 +868,8 @@ class TestSoftmaxWithCrossEntropyOpIgnoreIndexNoCudnnAxis3( class TestSoftmaxWithCrossEntropyOpIgnoreIndexNoCudnnAxis4( - TestSoftmaxWithCrossEntropyOp3): - + TestSoftmaxWithCrossEntropyOp3 +): def initParams(self): self.op_type = "softmax_with_cross_entropy" self.python_api = python_api diff --git a/python/paddle/fluid/tests/unittests/test_solve_op.py b/python/paddle/fluid/tests/unittests/test_solve_op.py index b07c3d5d080e99eb1ac1db9dec7456a5d13af35d..4fcea5e67d59054056401581f06940bf06d0d940 100644 --- a/python/paddle/fluid/tests/unittests/test_solve_op.py +++ b/python/paddle/fluid/tests/unittests/test_solve_op.py @@ -27,7 +27,6 @@ from paddle.fluid.framework import _test_eager_guard # 2D normal case class TestSolveOp(OpTest): - def config(self): self.python_api = paddle.linalg.solve self.input_x_matrix_shape = [15, 15] @@ -42,7 +41,7 @@ class TestSolveOp(OpTest): np.random.seed(2021) self.inputs = { 'X': np.random.random(self.input_x_matrix_shape).astype(self.dtype), - 'Y': np.random.random(self.input_y_matrix_shape).astype(self.dtype) + 'Y': np.random.random(self.input_y_matrix_shape).astype(self.dtype), } self.outputs = { 'Out': np.linalg.solve(self.inputs['X'], self.inputs['Y']) @@ -57,7 +56,6 @@ class TestSolveOp(OpTest): # x broadcast + 3D batch case class TestSolveOpBatched_case0(OpTest): - def setUp(self): self.python_api = paddle.linalg.solve self.op_type = "solve" @@ -65,7 +63,7 @@ class TestSolveOpBatched_case0(OpTest): np.random.seed(2021) self.inputs = { 'X': np.random.random((11, 11)).astype(self.dtype), - 'Y': np.random.random((2, 11, 7)).astype(self.dtype) + 'Y': np.random.random((2, 11, 7)).astype(self.dtype), } result = np.linalg.solve(self.inputs['X'], self.inputs['Y']) self.outputs = {'Out': result} @@ -74,15 +72,13 @@ class TestSolveOpBatched_case0(OpTest): self.check_output(check_eager=True) def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], - 'Out', - max_relative_error=1e-1, - check_eager=True) + self.check_grad( + ['X', 'Y'], 'Out', max_relative_error=1e-1, check_eager=True + ) # 3D batch + y vector case class TestSolveOpBatched_case1(OpTest): - def setUp(self): self.python_api = paddle.linalg.solve self.op_type = "solve" @@ -90,7 +86,7 @@ class TestSolveOpBatched_case1(OpTest): np.random.seed(2021) self.inputs = { 'X': np.random.random((20, 6, 6)).astype(self.dtype), - 'Y': np.random.random((20, 6)).astype(self.dtype) + 'Y': np.random.random((20, 6)).astype(self.dtype), } result = np.linalg.solve(self.inputs['X'], self.inputs['Y']) self.outputs = {'Out': result} @@ -99,15 +95,13 @@ class TestSolveOpBatched_case1(OpTest): self.check_output(check_eager=True) def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], - 'Out', - max_relative_error=0.04, - check_eager=True) + self.check_grad( + ['X', 'Y'], 'Out', max_relative_error=0.04, check_eager=True + ) # 3D batch + y broadcast case class TestSolveOpBatched_case2(OpTest): - def setUp(self): self.python_api = paddle.linalg.solve self.op_type = "solve" @@ -115,7 +109,7 @@ class TestSolveOpBatched_case2(OpTest): np.random.seed(2021) self.inputs = { 'X': np.random.random((2, 10, 10)).astype(self.dtype), - 'Y': np.random.random((1, 10, 10)).astype(self.dtype) + 'Y': np.random.random((1, 10, 10)).astype(self.dtype), } result = np.linalg.solve(self.inputs['X'], self.inputs['Y']) self.outputs = {'Out': result} @@ -124,15 +118,13 @@ class TestSolveOpBatched_case2(OpTest): self.check_output(check_eager=True) def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], - 'Out', - max_relative_error=0.02, - check_eager=True) + self.check_grad( + ['X', 'Y'], 'Out', max_relative_error=0.02, check_eager=True + ) # x broadcast + 3D batch case class TestSolveOpBatched_case3(OpTest): - def setUp(self): self.python_api = paddle.linalg.solve self.op_type = "solve" @@ -140,7 +132,7 @@ class TestSolveOpBatched_case3(OpTest): np.random.seed(2021) self.inputs = { 'X': np.random.random((1, 10, 10)).astype(self.dtype), - 'Y': np.random.random((2, 10, 10)).astype(self.dtype) + 'Y': np.random.random((2, 10, 10)).astype(self.dtype), } result = np.linalg.solve(self.inputs['X'], self.inputs['Y']) self.outputs = {'Out': result} @@ -149,15 +141,13 @@ class TestSolveOpBatched_case3(OpTest): self.check_output(check_eager=True) def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], - 'Out', - max_relative_error=0.02, - check_eager=True) + self.check_grad( + ['X', 'Y'], 'Out', max_relative_error=0.02, check_eager=True + ) # 3D normal batch case class TestSolveOpBatched_case4(OpTest): - def setUp(self): self.python_api = paddle.linalg.solve self.op_type = "solve" @@ -165,7 +155,7 @@ class TestSolveOpBatched_case4(OpTest): np.random.seed(2021) self.inputs = { 'X': np.random.random((3, 6, 6)).astype(self.dtype), - 'Y': np.random.random((3, 6, 7)).astype(self.dtype) + 'Y': np.random.random((3, 6, 7)).astype(self.dtype), } result = np.linalg.solve(self.inputs['X'], self.inputs['Y']) self.outputs = {'Out': result} @@ -179,7 +169,6 @@ class TestSolveOpBatched_case4(OpTest): # 4D normal batch case class TestSolveOpBatched_case5(OpTest): - def setUp(self): self.python_api = paddle.linalg.solve self.op_type = "solve" @@ -187,7 +176,7 @@ class TestSolveOpBatched_case5(OpTest): np.random.seed(2021) self.inputs = { 'X': np.random.random((2, 2, 6, 6)).astype(self.dtype), - 'Y': np.random.random((2, 2, 6, 6)).astype(self.dtype) + 'Y': np.random.random((2, 2, 6, 6)).astype(self.dtype), } result = np.linalg.solve(self.inputs['X'], self.inputs['Y']) self.outputs = {'Out': result} @@ -201,7 +190,6 @@ class TestSolveOpBatched_case5(OpTest): # 4D batch + y broadcast case class TestSolveOpBatched_case6(OpTest): - def setUp(self): self.python_api = paddle.linalg.solve self.op_type = "solve" @@ -209,7 +197,7 @@ class TestSolveOpBatched_case6(OpTest): np.random.seed(2021) self.inputs = { 'X': np.random.random((2, 2, 6, 6)).astype(self.dtype), - 'Y': np.random.random((1, 2, 6, 9)).astype(self.dtype) + 'Y': np.random.random((1, 2, 6, 9)).astype(self.dtype), } result = np.linalg.solve(self.inputs['X'], self.inputs['Y']) self.outputs = {'Out': result} @@ -223,7 +211,6 @@ class TestSolveOpBatched_case6(OpTest): # 5D normal batch case class TestSolveOpBatched_case7(OpTest): - def setUp(self): self.python_api = paddle.linalg.solve self.op_type = "solve" @@ -231,7 +218,7 @@ class TestSolveOpBatched_case7(OpTest): np.random.seed(2021) self.inputs = { 'X': np.random.random((2, 2, 2, 4, 4)).astype(self.dtype), - 'Y': np.random.random((2, 2, 2, 4, 4)).astype(self.dtype) + 'Y': np.random.random((2, 2, 2, 4, 4)).astype(self.dtype), } result = np.linalg.solve(self.inputs['X'], self.inputs['Y']) self.outputs = {'Out': result} @@ -240,15 +227,13 @@ class TestSolveOpBatched_case7(OpTest): self.check_output(check_eager=True) def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], - 'Out', - max_relative_error=0.04, - check_eager=True) + self.check_grad( + ['X', 'Y'], 'Out', max_relative_error=0.04, check_eager=True + ) # 5D batch + y broadcast case class TestSolveOpBatched_case8(OpTest): - def setUp(self): self.python_api = paddle.linalg.solve self.op_type = "solve" @@ -256,7 +241,7 @@ class TestSolveOpBatched_case8(OpTest): np.random.seed(2021) self.inputs = { 'X': np.random.random((2, 2, 2, 4, 4)).astype(self.dtype), - 'Y': np.random.random((1, 2, 2, 4, 7)).astype(self.dtype) + 'Y': np.random.random((1, 2, 2, 4, 7)).astype(self.dtype), } result = np.linalg.solve(self.inputs['X'], self.inputs['Y']) self.outputs = {'Out': result} @@ -265,21 +250,21 @@ class TestSolveOpBatched_case8(OpTest): self.check_output(check_eager=True) def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], - 'Out', - max_relative_error=0.04, - check_eager=True) + self.check_grad( + ['X', 'Y'], 'Out', max_relative_error=0.04, check_eager=True + ) class TestSolveOpError(unittest.TestCase): - def func_errors(self): with program_guard(Program(), Program()): # The input type of solve_op must be Variable. - x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - fluid.CPUPlace()) - y1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace() + ) + y1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace() + ) self.assertRaises(TypeError, paddle.linalg.solve, x1, y1) # The data type of input must be float32 or float64. @@ -317,7 +302,6 @@ class TestSolveOpError(unittest.TestCase): # 2D + vector case, FP64 class TestSolveOpAPI_1(unittest.TestCase): - def setUp(self): np.random.seed(2021) self.place = [paddle.CPUPlace()] @@ -327,12 +311,12 @@ class TestSolveOpAPI_1(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - paddle_input_x = fluid.data(name="input_x", - shape=[3, 3], - dtype=self.dtype) - paddle_input_y = fluid.data(name="input_y", - shape=[3], - dtype=self.dtype) + paddle_input_x = fluid.data( + name="input_x", shape=[3, 3], dtype=self.dtype + ) + paddle_input_y = fluid.data( + name="input_y", shape=[3], dtype=self.dtype + ) paddle_result = paddle.linalg.solve(paddle_input_x, paddle_input_y) np_input_x = np.random.random([3, 3]).astype(self.dtype) @@ -341,22 +325,20 @@ class TestSolveOpAPI_1(unittest.TestCase): np_result = np.linalg.solve(np_input_x, np_input_y) exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={ - "input_x": np_input_x, - "input_y": np_input_y - }, - fetch_list=[paddle_result]) - np.testing.assert_allclose(fetches[0], - np.linalg.solve(np_input_x, np_input_y), - rtol=1e-05) + fetches = exe.run( + fluid.default_main_program(), + feed={"input_x": np_input_x, "input_y": np_input_y}, + fetch_list=[paddle_result], + ) + np.testing.assert_allclose( + fetches[0], np.linalg.solve(np_input_x, np_input_y), rtol=1e-05 + ) def test_static(self): for place in self.place: self.check_static_result(place=place) def func_dygraph(self): - def run(place): paddle.disable_static(place) np.random.seed(2021) @@ -368,9 +350,9 @@ class TestSolveOpAPI_1(unittest.TestCase): numpy_output = np.linalg.solve(input_x_np, input_y_np) paddle_output = paddle.linalg.solve(tensor_input_x, tensor_input_y) - np.testing.assert_allclose(numpy_output, - paddle_output.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + numpy_output, paddle_output.numpy(), rtol=1e-05 + ) self.assertEqual(numpy_output.shape, paddle_output.numpy().shape) paddle.enable_static() @@ -385,7 +367,6 @@ class TestSolveOpAPI_1(unittest.TestCase): # 2D normal case, FP64 class TestSolveOpAPI_2(unittest.TestCase): - def setUp(self): np.random.seed(2021) self.place = [paddle.CPUPlace()] @@ -396,12 +377,12 @@ class TestSolveOpAPI_2(unittest.TestCase): def check_static_result(self, place): paddle.enable_static() with fluid.program_guard(fluid.Program(), fluid.Program()): - paddle_input_x = fluid.data(name="input_x", - shape=[10, 10], - dtype=self.dtype) - paddle_input_y = fluid.data(name="input_y", - shape=[10, 4], - dtype=self.dtype) + paddle_input_x = fluid.data( + name="input_x", shape=[10, 10], dtype=self.dtype + ) + paddle_input_y = fluid.data( + name="input_y", shape=[10, 4], dtype=self.dtype + ) paddle_result = paddle.linalg.solve(paddle_input_x, paddle_input_y) np_input_x = np.random.random([10, 10]).astype(self.dtype) @@ -410,22 +391,20 @@ class TestSolveOpAPI_2(unittest.TestCase): np_result = np.linalg.solve(np_input_x, np_input_y) exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={ - "input_x": np_input_x, - "input_y": np_input_y - }, - fetch_list=[paddle_result]) - np.testing.assert_allclose(fetches[0], - np.linalg.solve(np_input_x, np_input_y), - rtol=1e-05) + fetches = exe.run( + fluid.default_main_program(), + feed={"input_x": np_input_x, "input_y": np_input_y}, + fetch_list=[paddle_result], + ) + np.testing.assert_allclose( + fetches[0], np.linalg.solve(np_input_x, np_input_y), rtol=1e-05 + ) def test_static(self): for place in self.place: self.check_static_result(place=place) def func_dygraph(self): - def run(place): paddle.disable_static(place) np.random.seed(2021) @@ -436,9 +415,9 @@ class TestSolveOpAPI_2(unittest.TestCase): numpy_output = np.linalg.solve(input_x_np, input_y_np) paddle_output = paddle.linalg.solve(tensor_input_x, tensor_input_y) - np.testing.assert_allclose(numpy_output, - paddle_output.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + numpy_output, paddle_output.numpy(), rtol=1e-05 + ) self.assertEqual(numpy_output.shape, paddle_output.numpy().shape) paddle.enable_static() @@ -453,7 +432,6 @@ class TestSolveOpAPI_2(unittest.TestCase): # 2D normal case, FP32 class TestSolveOpAPI_3(unittest.TestCase): - def setUp(self): np.random.seed(2021) self.place = [paddle.CPUPlace()] @@ -464,12 +442,12 @@ class TestSolveOpAPI_3(unittest.TestCase): def check_static_result(self, place): paddle.enable_static() with fluid.program_guard(fluid.Program(), fluid.Program()): - paddle_input_x = fluid.data(name="input_x", - shape=[10, 10], - dtype=self.dtype) - paddle_input_y = fluid.data(name="input_y", - shape=[10, 4], - dtype=self.dtype) + paddle_input_x = fluid.data( + name="input_x", shape=[10, 10], dtype=self.dtype + ) + paddle_input_y = fluid.data( + name="input_y", shape=[10, 4], dtype=self.dtype + ) paddle_result = paddle.linalg.solve(paddle_input_x, paddle_input_y) np_input_x = np.random.random([10, 10]).astype(self.dtype) @@ -478,22 +456,20 @@ class TestSolveOpAPI_3(unittest.TestCase): np_result = np.linalg.solve(np_input_x, np_input_y) exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={ - "input_x": np_input_x, - "input_y": np_input_y - }, - fetch_list=[paddle_result]) - np.testing.assert_allclose(fetches[0], - np.linalg.solve(np_input_x, np_input_y), - rtol=0.0001) + fetches = exe.run( + fluid.default_main_program(), + feed={"input_x": np_input_x, "input_y": np_input_y}, + fetch_list=[paddle_result], + ) + np.testing.assert_allclose( + fetches[0], np.linalg.solve(np_input_x, np_input_y), rtol=0.0001 + ) def test_static(self): for place in self.place: self.check_static_result(place=place) def func_dygraph(self): - def run(place): paddle.disable_static(place) np.random.seed(2021) @@ -505,9 +481,9 @@ class TestSolveOpAPI_3(unittest.TestCase): numpy_output = np.linalg.solve(input_x_np, input_y_np) paddle_output = paddle.linalg.solve(tensor_input_x, tensor_input_y) - np.testing.assert_allclose(numpy_output, - paddle_output.numpy(), - rtol=0.0001) + np.testing.assert_allclose( + numpy_output, paddle_output.numpy(), rtol=0.0001 + ) self.assertEqual(numpy_output.shape, paddle_output.numpy().shape) paddle.enable_static() @@ -522,7 +498,6 @@ class TestSolveOpAPI_3(unittest.TestCase): # 3D + y broadcast case, FP64 class TestSolveOpAPI_4(unittest.TestCase): - def setUp(self): np.random.seed(2021) self.place = [paddle.CPUPlace()] @@ -532,12 +507,12 @@ class TestSolveOpAPI_4(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - paddle_input_x = fluid.data(name="input_x", - shape=[2, 3, 3], - dtype=self.dtype) - paddle_input_y = fluid.data(name="input_y", - shape=[1, 3, 3], - dtype=self.dtype) + paddle_input_x = fluid.data( + name="input_x", shape=[2, 3, 3], dtype=self.dtype + ) + paddle_input_y = fluid.data( + name="input_y", shape=[1, 3, 3], dtype=self.dtype + ) paddle_result = paddle.linalg.solve(paddle_input_x, paddle_input_y) np_input_x = np.random.random([2, 3, 3]).astype(self.dtype) @@ -546,22 +521,20 @@ class TestSolveOpAPI_4(unittest.TestCase): np_result = np.linalg.solve(np_input_x, np_input_y) exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={ - "input_x": np_input_x, - "input_y": np_input_y - }, - fetch_list=[paddle_result]) - np.testing.assert_allclose(fetches[0], - np.linalg.solve(np_input_x, np_input_y), - rtol=1e-05) + fetches = exe.run( + fluid.default_main_program(), + feed={"input_x": np_input_x, "input_y": np_input_y}, + fetch_list=[paddle_result], + ) + np.testing.assert_allclose( + fetches[0], np.linalg.solve(np_input_x, np_input_y), rtol=1e-05 + ) def test_static(self): for place in self.place: self.check_static_result(place=place) def func_dygraph(self): - def run(place): paddle.disable_static(place) np.random.seed(2021) @@ -573,9 +546,9 @@ class TestSolveOpAPI_4(unittest.TestCase): numpy_output = np.linalg.solve(input_x_np, input_y_np) paddle_output = paddle.linalg.solve(tensor_input_x, tensor_input_y) - np.testing.assert_allclose(numpy_output, - paddle_output.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + numpy_output, paddle_output.numpy(), rtol=1e-05 + ) self.assertEqual(numpy_output.shape, paddle_output.numpy().shape) paddle.enable_static() @@ -608,12 +581,11 @@ class TestSolveOpSingularAPI(unittest.TestCase): exe = fluid.Executor(place) try: - fetches = exe.run(fluid.default_main_program(), - feed={ - "x": input_x_np, - "y": input_y_np - }, - fetch_list=[result]) + fetches = exe.run( + fluid.default_main_program(), + feed={"x": input_x_np, "y": input_y_np}, + fetch_list=[result], + ) except RuntimeError as ex: print("The mat is singular") except ValueError as ex: diff --git a/python/paddle/fluid/tests/unittests/test_sort_op.py b/python/paddle/fluid/tests/unittests/test_sort_op.py index d28af8f684beb0b36bf0884bac635d7805852c1f..387f5474dd4b9134c5402ca27808e1eecc0f6dac 100644 --- a/python/paddle/fluid/tests/unittests/test_sort_op.py +++ b/python/paddle/fluid/tests/unittests/test_sort_op.py @@ -21,7 +21,6 @@ from paddle.fluid.framework import _test_eager_guard class TestSortOnCPU(unittest.TestCase): - def setUp(self): self.place = core.CPUPlace() @@ -30,10 +29,14 @@ class TestSortOnCPU(unittest.TestCase): input = fluid.data(name="input", shape=[2, 3, 4], dtype="float32") output = paddle.sort(x=input) exe = fluid.Executor(self.place) - data = np.array([[[5, 8, 9, 5], [0, 0, 1, 7], [6, 9, 2, 4]], - [[5, 2, 4, 2], [4, 7, 7, 9], [1, 7, 0, 6]]], - dtype='float32') - result, = exe.run(feed={'input': data}, fetch_list=[output]) + data = np.array( + [ + [[5, 8, 9, 5], [0, 0, 1, 7], [6, 9, 2, 4]], + [[5, 2, 4, 2], [4, 7, 7, 9], [1, 7, 0, 6]], + ], + dtype='float32', + ) + (result,) = exe.run(feed={'input': data}, fetch_list=[output]) np_result = np.sort(result) self.assertEqual((result == np_result).all(), True) @@ -42,16 +45,19 @@ class TestSortOnCPU(unittest.TestCase): input = fluid.data(name="input", shape=[2, 3, 4], dtype="float32") output = paddle.sort(x=input, axis=1) exe = fluid.Executor(self.place) - data = np.array([[[5, 8, 9, 5], [0, 0, 1, 7], [6, 9, 2, 4]], - [[5, 2, 4, 2], [4, 7, 7, 9], [1, 7, 0, 6]]], - dtype='float32') - result, = exe.run(feed={'input': data}, fetch_list=[output]) + data = np.array( + [ + [[5, 8, 9, 5], [0, 0, 1, 7], [6, 9, 2, 4]], + [[5, 2, 4, 2], [4, 7, 7, 9], [1, 7, 0, 6]], + ], + dtype='float32', + ) + (result,) = exe.run(feed={'input': data}, fetch_list=[output]) np_result = np.sort(result, axis=1) self.assertEqual((result == np_result).all(), True) class TestSortOnGPU(TestSortOnCPU): - def init_place(self): if core.is_compiled_with_cuda(): self.place = core.CUDAPlace(0) @@ -60,7 +66,6 @@ class TestSortOnGPU(TestSortOnCPU): class TestSortDygraph(unittest.TestCase): - def setUp(self): self.input_data = np.random.rand(10, 10) if core.is_compiled_with_cuda(): @@ -84,8 +89,9 @@ class TestSortDygraph(unittest.TestCase): paddle.disable_static(self.place) var_x = paddle.to_tensor(self.input_data) out = paddle.sort(var_x, axis=-1) - self.assertEqual((np.sort(self.input_data, - axis=-1) == out.numpy()).all(), True) + self.assertEqual( + (np.sort(self.input_data, axis=-1) == out.numpy()).all(), True + ) paddle.enable_static() def test_api_1(self): diff --git a/python/paddle/fluid/tests/unittests/test_space_to_depth_op.py b/python/paddle/fluid/tests/unittests/test_space_to_depth_op.py index b80f511716c95520d7e9f039eb36a093a5cdfe2f..9c43836e2949fcedc183b9a1924c9359a328130c 100644 --- a/python/paddle/fluid/tests/unittests/test_space_to_depth_op.py +++ b/python/paddle/fluid/tests/unittests/test_space_to_depth_op.py @@ -19,7 +19,6 @@ from op_test import OpTest class TestSpaceToDepthOp(OpTest): - @staticmethod def helper(in_, width, height, channel, batch, blocksize, forward, out_): channel_out = channel // (blocksize * blocksize) @@ -33,8 +32,9 @@ class TestSpaceToDepthOp(OpTest): width2 = i * blocksize + offset % blocksize height2 = j * blocksize + offset // blocksize out_index = width2 + width * blocksize * ( - height2 + height * blocksize * - (channel2 + channel_out * b)) + height2 + + height * blocksize * (channel2 + channel_out * b) + ) if forward: out_[out_index] = in_[in_index] else: @@ -45,9 +45,16 @@ class TestSpaceToDepthOp(OpTest): self.op_type = "space_to_depth" self.inputs = {"X": self.x} - self.helper(self.x_1d, self.x.shape[3], self.x.shape[2], - self.x.shape[1], self.x.shape[0], self.blocksize, - self.forward, self.out_1d) + self.helper( + self.x_1d, + self.x.shape[3], + self.x.shape[2], + self.x.shape[1], + self.x.shape[0], + self.blocksize, + self.forward, + self.out_1d, + ) self.out = np.reshape(self.out_1d, self.infered_shape) self.attrs = {"blocksize": self.blocksize} self.outputs = {"Out": self.out} @@ -65,18 +72,23 @@ class TestSpaceToDepthOp(OpTest): self.forward = 1 def test_check_output(self): - place = fluid.core.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.core.CPUPlace() + place = ( + fluid.core.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.core.CPUPlace() + ) self.check_output_with_place(place, 1e-5, None, False) def test_check_grad(self): - place = fluid.core.CUDAPlace( - 0) if fluid.core.is_compiled_with_cuda() else fluid.core.CPUPlace() + place = ( + fluid.core.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.core.CPUPlace() + ) self.check_grad_with_place(place, ['X'], 'Out') class TestSpaceToDepthOpBasic(TestSpaceToDepthOp): - def init_data(self): self.ori_shape = (32, 8, 6, 6) self.infered_shape = (32, 32, 3, 3) @@ -91,7 +103,6 @@ class TestSpaceToDepthOpBasic(TestSpaceToDepthOp): class TestSpaceToDepthOpDoubleBasic(TestSpaceToDepthOp): - def init_data(self): self.ori_shape = (32, 8, 6, 6) self.infered_shape = (32, 32, 3, 3) @@ -106,7 +117,6 @@ class TestSpaceToDepthOpDoubleBasic(TestSpaceToDepthOp): class TestSpaceToDepthOpWithStride3(TestSpaceToDepthOp): - def init_data(self): self.ori_shape = (32, 9, 6, 6) self.infered_shape = (32, 81, 2, 2) @@ -121,7 +131,6 @@ class TestSpaceToDepthOpWithStride3(TestSpaceToDepthOp): class TestSpaceToDepthOpWithNotSquare(TestSpaceToDepthOp): - def init_data(self): self.ori_shape = (32, 9, 9, 6) self.infered_shape = (32, 81, 3, 2) diff --git a/python/paddle/fluid/tests/unittests/test_sparse_addmm_op.py b/python/paddle/fluid/tests/unittests/test_sparse_addmm_op.py index d93dba5c770456a4a10641191318d45a9e8eac24..ca3dbe4a19a8a16882eab2eb04ba457f1f3dac6b 100644 --- a/python/paddle/fluid/tests/unittests/test_sparse_addmm_op.py +++ b/python/paddle/fluid/tests/unittests/test_sparse_addmm_op.py @@ -51,7 +51,7 @@ class TestAddmm(unittest.TestCase): dense_x.stop_gradient = False dense_y = origin_y.detach() dense_y.stop_gradient = False - dense_out = 2. * paddle.matmul(dense_x, dense_y) + 3. * dense_input + dense_out = 2.0 * paddle.matmul(dense_x, dense_y) + 3.0 * dense_input sp_input = dense_input.detach() sp_input.stop_gradient = False @@ -64,30 +64,36 @@ class TestAddmm(unittest.TestCase): sp_y.stop_gradient = False sp_out = paddle.sparse.addmm(sp_input, sp_x, sp_y, 3.0, 2.0) - np.testing.assert_allclose(sp_out.numpy(), - dense_out.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + sp_out.numpy(), dense_out.numpy(), rtol=1e-05 + ) if get_cuda_version() >= 11030: dense_out.backward() sp_out.backward() - np.testing.assert_allclose(sp_input.grad.numpy(), - dense_input.grad.numpy(), - rtol=1e-05) - np.testing.assert_allclose(sp_x.grad.to_dense().numpy(), - (dense_x.grad * mask).numpy(), - rtol=1e-05) - np.testing.assert_allclose(sp_y.grad.numpy(), - dense_y.grad.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + sp_input.grad.numpy(), dense_input.grad.numpy(), rtol=1e-05 + ) + np.testing.assert_allclose( + sp_x.grad.to_dense().numpy(), + (dense_x.grad * mask).numpy(), + rtol=1e-05, + ) + np.testing.assert_allclose( + sp_y.grad.numpy(), dense_y.grad.numpy(), rtol=1e-05 + ) - @unittest.skipIf(not paddle.is_compiled_with_cuda() - or get_cuda_version() < 11000, "only support cuda>=11.0") + @unittest.skipIf( + not paddle.is_compiled_with_cuda() or get_cuda_version() < 11000, + "only support cuda>=11.0", + ) def test_addmm_2d(self): self.check_result([16, 10], [16, 12], [12, 10], 'coo') self.check_result([16, 10], [16, 12], [12, 10], 'csr') - @unittest.skipIf(not paddle.is_compiled_with_cuda() - or get_cuda_version() < 11070, "only support cuda>=11.7") + @unittest.skipIf( + not paddle.is_compiled_with_cuda() or get_cuda_version() < 11070, + "only support cuda>=11.7", + ) def test_addmm_3d(self): self.check_result([8, 16, 10], [8, 16, 12], [8, 12, 10], 'coo') self.check_result([8, 16, 10], [8, 16, 12], [8, 12, 10], 'csr') diff --git a/python/paddle/fluid/tests/unittests/test_sparse_attention_op.py b/python/paddle/fluid/tests/unittests/test_sparse_attention_op.py index 6e077502c018b335485dd8a022e77f5727653b08..3b29d335da42b794d09c461e02b1889a20ea9f1a 100644 --- a/python/paddle/fluid/tests/unittests/test_sparse_attention_op.py +++ b/python/paddle/fluid/tests/unittests/test_sparse_attention_op.py @@ -91,14 +91,9 @@ def get_csr_value(mat, layout, nnz): return value -def ref_sparse_attention(q, - k, - v, - offset, - columns, - kp_mask=None, - attn_mask=None, - bsz=None): +def ref_sparse_attention( + q, k, v, offset, columns, kp_mask=None, attn_mask=None, bsz=None +): row, col, nnz = q.shape[0], q.shape[1], columns.shape[0] mat = np.zeros((row, row)) for cur_row in range(row): @@ -109,7 +104,7 @@ def ref_sparse_attention(q, mat[cur_row][cur_col] = 1 a = np.dot(q, k.T) * mat a_value = get_csr_value(a, mat, nnz) - scaling = float(col)**-0.5 + scaling = float(col) ** -0.5 a = scaling * a for i in range(row): for j in range(row): @@ -125,13 +120,9 @@ def ref_sparse_attention(q, return result, a_value, b_value -def ref_batch_sparse_attention(q, - k, - v, - offset, - columns, - kp_mask=None, - attn_mask=None): +def ref_batch_sparse_attention( + q, k, v, offset, columns, kp_mask=None, attn_mask=None +): batch_size, num_heads, row, col = q.shape nnz = columns.shape[2] result = np.zeros((batch_size, num_heads, row, col)) @@ -139,11 +130,16 @@ def ref_batch_sparse_attention(q, result_softmax = np.zeros((batch_size, num_heads, nnz)) for i in range(batch_size): for j in range(num_heads): - cur_q, cur_k, cur_v, = q[i][j], k[i][j], v[i][j] + cur_q, cur_k, cur_v, = ( + q[i][j], + k[i][j], + v[i][j], + ) cur_offset, cur_columns = offset[i][j], columns[i][j] if kp_mask is None and attn_mask is None: cur_result, cur_sdd, cur_softmax = ref_sparse_attention( - cur_q, cur_k, cur_v, cur_offset, cur_columns) + cur_q, cur_k, cur_v, cur_offset, cur_columns + ) else: cur_result, cur_sdd, cur_softmax = ref_sparse_attention( cur_q, @@ -153,7 +149,8 @@ def ref_batch_sparse_attention(q, cur_columns, kp_mask=kp_mask, attn_mask=attn_mask, - bsz=i) + bsz=i, + ) result[i][j] = cur_result result_sdd[i][j], result_softmax[i][j] = cur_sdd, cur_softmax return result, result_sdd, result_softmax @@ -191,10 +188,9 @@ def init_csr_format(batch_size, num_heads, rows, blocksize): @unittest.skipIf( not core.is_compiled_with_cuda() or get_cuda_version() < 11030, - "core is not compiled with CUDA and cuda version need larger than or equal to 11.3" + "core is not compiled with CUDA and cuda version need larger than or equal to 11.3", ) class TestSparseAttentionOp(OpTest): - def config(self): self.shape = (1, 1, 16, 16) self.blocksize = 4 @@ -210,8 +206,9 @@ class TestSparseAttentionOp(OpTest): self.k = np.random.random(self.shape).astype(self.dtype) self.v = np.random.random(self.shape).astype(self.dtype) # init CSR tensor - offset, columns = init_csr_format(self.shape[0], self.shape[1], - self.shape[2], self.blocksize) + offset, columns = init_csr_format( + self.shape[0], self.shape[1], self.shape[2], self.blocksize + ) self.offset = offset.astype('int32') self.columns = columns.astype('int32') # init mask tensor @@ -232,10 +229,12 @@ class TestSparseAttentionOp(OpTest): self.offset, self.columns, kp_mask=self.key_padding_mask, - attn_mask=self.attn_mask) + attn_mask=self.attn_mask, + ) else: result, result_sdd, result_softmax = ref_batch_sparse_attention( - self.q, self.k, self.v, self.offset, self.columns) + self.q, self.k, self.v, self.offset, self.columns + ) if self.use_mask == True: self.inputs = { @@ -258,7 +257,7 @@ class TestSparseAttentionOp(OpTest): self.outputs = { 'Out': result.astype(self.dtype), 'SparseDotSdd': result_sdd.astype(self.dtype), - 'Softmax': result_softmax.astype(self.dtype) + 'Softmax': result_softmax.astype(self.dtype), } def test_check_output(self): @@ -271,7 +270,6 @@ class TestSparseAttentionOp(OpTest): class TestSparseAttentionOpFp32Test(TestSparseAttentionOp): - def config(self): self.shape = (1, 1, 8, 16) self.blocksize = 2 @@ -280,7 +278,6 @@ class TestSparseAttentionOpFp32Test(TestSparseAttentionOp): class TestSparseAttentionOpShapeTest(TestSparseAttentionOp): - def config(self): self.shape = (2, 2, 32, 8) self.blocksize = 8 @@ -290,10 +287,9 @@ class TestSparseAttentionOpShapeTest(TestSparseAttentionOp): @unittest.skipIf( not core.is_compiled_with_cuda() or get_cuda_version() < 11030, - "core is not compiled with CUDA and cuda version need larger than or equal to 11.3" + "core is not compiled with CUDA and cuda version need larger than or equal to 11.3", ) class TestSparseAttentionAPI(unittest.TestCase): - def setUp(self): self.place = paddle.CUDAPlace(0) self.shape = (1, 1, 8, 4) @@ -308,54 +304,62 @@ class TestSparseAttentionAPI(unittest.TestCase): K = paddle.static.data(name="K", shape=self.shape, dtype=self.dtype) V = paddle.static.data(name="V", shape=self.shape, dtype=self.dtype) - batch_size, num_heads, rows = self.shape[0], self.shape[ - 1], self.shape[2] + batch_size, num_heads, rows = ( + self.shape[0], + self.shape[1], + self.shape[2], + ) block_num = rows / self.blocksize block_last = rows % self.blocksize - sparse_nnz_num = block_num * self.blocksize * self.blocksize + block_last * block_last + sparse_nnz_num = ( + block_num * self.blocksize * self.blocksize + + block_last * block_last + ) offset_shape = (batch_size, num_heads, rows + 1) columns_shape = (batch_size, num_heads, int(sparse_nnz_num)) - offset = paddle.static.data(name="Offset", - shape=offset_shape, - dtype="int32") - columns = paddle.static.data(name="Columns", - shape=columns_shape, - dtype="int32") + offset = paddle.static.data( + name="Offset", shape=offset_shape, dtype="int32" + ) + columns = paddle.static.data( + name="Columns", shape=columns_shape, dtype="int32" + ) key_padding_mask_shape = (self.shape[0], self.shape[2]) attn_mask_shape = (self.shape[2], self.shape[2]) if self.use_mask == True: key_padding_mask = paddle.static.data( name="KeyPaddingMask", shape=key_padding_mask_shape, - dtype=self.dtype) - attn_mask = paddle.static.data(name="AttnMask", - shape=attn_mask_shape, - dtype=self.dtype) - Out = F.sparse_attention(Q, - K, - V, - offset, - columns, - key_padding_mask=key_padding_mask, - attn_mask=attn_mask) + dtype=self.dtype, + ) + attn_mask = paddle.static.data( + name="AttnMask", shape=attn_mask_shape, dtype=self.dtype + ) + Out = F.sparse_attention( + Q, + K, + V, + offset, + columns, + key_padding_mask=key_padding_mask, + attn_mask=attn_mask, + ) else: Out = F.sparse_attention(Q, K, V, offset, columns) Q_np = np.random.random(self.shape).astype(self.dtype) K_np = np.random.random(self.shape).astype(self.dtype) V_np = np.random.random(self.shape).astype(self.dtype) - offset_np, columns_np = init_csr_format(self.shape[0], - self.shape[1], - self.shape[2], - self.blocksize) + offset_np, columns_np = init_csr_format( + self.shape[0], self.shape[1], self.shape[2], self.blocksize + ) offset_np = offset_np.astype('int32') columns_np = columns_np.astype('int32') # init mask tensor - key_padding_mask_np = np.random.randint(0, - 2, - size=key_padding_mask_shape) + key_padding_mask_np = np.random.randint( + 0, 2, size=key_padding_mask_shape + ) attn_mask_np = np.random.randint(0, 2, size=attn_mask_shape) key_padding_mask_np = init_mask(key_padding_mask_np) attn_mask_np = init_mask(attn_mask_np) @@ -364,16 +368,18 @@ class TestSparseAttentionAPI(unittest.TestCase): exe = fluid.Executor(self.place) if self.use_mask == True: - fetches_result = exe.run(feed={ - "Q": Q_np, - "K": K_np, - "V": V_np, - "Offset": offset_np, - "Columns": columns_np, - 'KeyPaddingMask': key_padding_mask_np, - 'AttnMask': attn_mask_np - }, - fetch_list=[Out]) + fetches_result = exe.run( + feed={ + "Q": Q_np, + "K": K_np, + "V": V_np, + "Offset": offset_np, + "Columns": columns_np, + 'KeyPaddingMask': key_padding_mask_np, + 'AttnMask': attn_mask_np, + }, + fetch_list=[Out], + ) expected_result, __, __ = ref_batch_sparse_attention( Q_np, K_np, @@ -381,28 +387,32 @@ class TestSparseAttentionAPI(unittest.TestCase): offset_np, columns_np, kp_mask=key_padding_mask_np, - attn_mask=attn_mask_np) + attn_mask=attn_mask_np, + ) else: - fetches_result = exe.run(feed={ - "Q": Q_np, - "K": K_np, - "V": V_np, - "Offset": offset_np, - "Columns": columns_np - }, - fetch_list=[Out]) + fetches_result = exe.run( + feed={ + "Q": Q_np, + "K": K_np, + "V": V_np, + "Offset": offset_np, + "Columns": columns_np, + }, + fetch_list=[Out], + ) expected_result, __, __ = ref_batch_sparse_attention( - Q_np, K_np, V_np, offset_np, columns_np) + Q_np, K_np, V_np, offset_np, columns_np + ) - np.testing.assert_allclose(fetches_result, - expected_result, - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + fetches_result, expected_result, rtol=1e-05, atol=1e-05 + ) def test_dygraph(self): paddle.disable_static() - offset, columns = init_csr_format(self.shape[0], self.shape[1], - self.shape[2], self.blocksize) + offset, columns = init_csr_format( + self.shape[0], self.shape[1], self.shape[2], self.blocksize + ) offset = offset.astype('int32') columns = columns.astype('int32') query = np.random.random(self.shape).astype(self.dtype) @@ -427,13 +437,15 @@ class TestSparseAttentionAPI(unittest.TestCase): paddle_attn_mask = paddle.to_tensor(attn_mask, place=self.place) if self.use_mask == True: - paddle_result = F.sparse_attention(paddle_query, - paddle_key, - paddle_value, - paddle_offset, - paddle_colunmns, - key_padding_mask=paddle_kp_mask, - attn_mask=paddle_attn_mask) + paddle_result = F.sparse_attention( + paddle_query, + paddle_key, + paddle_value, + paddle_offset, + paddle_colunmns, + key_padding_mask=paddle_kp_mask, + attn_mask=paddle_attn_mask, + ) numpy_result, __, __ = ref_batch_sparse_attention( query, @@ -442,25 +454,29 @@ class TestSparseAttentionAPI(unittest.TestCase): offset, columns, kp_mask=key_padding_mask, - attn_mask=attn_mask) + attn_mask=attn_mask, + ) numpy_result = numpy_result.astype(self.dtype) else: - paddle_result = F.sparse_attention(paddle_query, paddle_key, - paddle_value, paddle_offset, - paddle_colunmns) + paddle_result = F.sparse_attention( + paddle_query, + paddle_key, + paddle_value, + paddle_offset, + paddle_colunmns, + ) numpy_result, __, __ = ref_batch_sparse_attention( - query, key, value, offset, columns) + query, key, value, offset, columns + ) numpy_result = numpy_result.astype(self.dtype) - np.testing.assert_allclose(paddle_result.numpy(), - numpy_result, - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + paddle_result.numpy(), numpy_result, rtol=1e-05, atol=1e-05 + ) class TestSparseAttentionAPITestFloat(TestSparseAttentionAPI): - def setUp(self): self.place = paddle.CUDAPlace(0) self.shape = (2, 2, 8, 4) @@ -470,7 +486,6 @@ class TestSparseAttentionAPITestFloat(TestSparseAttentionAPI): class TestSparseAttentionAPITestShape1(TestSparseAttentionAPI): - def setUp(self): self.place = paddle.CUDAPlace(0) self.shape = (2, 2, 64, 32) @@ -480,7 +495,6 @@ class TestSparseAttentionAPITestShape1(TestSparseAttentionAPI): class TestSparseAttentionAPITestShape2(TestSparseAttentionAPI): - def setUp(self): self.place = paddle.CUDAPlace(0) self.shape = (2, 1, 64, 32) @@ -490,7 +504,6 @@ class TestSparseAttentionAPITestShape2(TestSparseAttentionAPI): class TestSparseAttentionAPITestShape3(TestSparseAttentionAPI): - def setUp(self): self.place = paddle.CUDAPlace(0) self.shape = (4, 4, 128, 32) @@ -500,7 +513,6 @@ class TestSparseAttentionAPITestShape3(TestSparseAttentionAPI): class TestSparseAttentionAPITestShape4(TestSparseAttentionAPI): - def setUp(self): self.place = paddle.CUDAPlace(0) self.shape = (3, 3, 35, 15) diff --git a/python/paddle/fluid/tests/unittests/test_sparse_conv_op.py b/python/paddle/fluid/tests/unittests/test_sparse_conv_op.py index dfd7d23f8a44c4d4f16877fb487b3d2ef7858418..0b9c88aad9de0ab2e2301c7f9c7ae5898c6e9be7 100644 --- a/python/paddle/fluid/tests/unittests/test_sparse_conv_op.py +++ b/python/paddle/fluid/tests/unittests/test_sparse_conv_op.py @@ -21,13 +21,12 @@ import paddle.sparse as sparse class TestSparseConv(unittest.TestCase): - def test_conv3d(self): with _test_eager_guard(): kernel = [[[[[1], [1], [1]], [[1], [1], [1]], [[1], [1], [1]]]]] - dense_kernel = paddle.to_tensor(kernel, - dtype='float32', - stop_gradient=False) + dense_kernel = paddle.to_tensor( + kernel, dtype='float32', stop_gradient=False + ) dense_kernel = paddle.reshape(dense_kernel, [1, 3, 3, 1, 1]) paddings = [0, 0, 0] strides = [1, 1, 1] @@ -40,17 +39,19 @@ class TestSparseConv(unittest.TestCase): values = paddle.to_tensor(values, dtype='float32') dense_shape = [1, 1, 3, 4, 1] correct_out_values = [[5], [11]] - sparse_input = core.eager.sparse_coo_tensor(indices, values, - dense_shape, False) - out = paddle.sparse.nn.functional.conv3d(sparse_input, - dense_kernel, - bias=paddle.to_tensor( - bias, dtype='float32'), - stride=strides, - padding=paddings, - dilation=dilations, - groups=1, - data_format="NDHWC") + sparse_input = core.eager.sparse_coo_tensor( + indices, values, dense_shape, False + ) + out = paddle.sparse.nn.functional.conv3d( + sparse_input, + dense_kernel, + bias=paddle.to_tensor(bias, dtype='float32'), + stride=strides, + padding=paddings, + dilation=dilations, + groups=1, + data_format="NDHWC", + ) out.backward(out) out = paddle.sparse.coalesce(out) assert np.array_equal(correct_out_values, out.values().numpy()) @@ -62,41 +63,41 @@ class TestSparseConv(unittest.TestCase): indices = paddle.to_tensor(indices, dtype='int32') values = paddle.to_tensor(values, dtype='float32') dense_shape = [1, 1, 3, 4, 1] - sparse_x = paddle.sparse.sparse_coo_tensor(indices, - values, - dense_shape, - stop_gradient=True) + sparse_x = paddle.sparse.sparse_coo_tensor( + indices, values, dense_shape, stop_gradient=True + ) weight = paddle.randn((1, 3, 3, 1, 1), dtype='float32') - y = paddle.sparse.nn.functional.subm_conv3d(sparse_x, - weight, - key='subm_conv') - assert np.array_equal(sparse_x.indices().numpy(), - y.indices().numpy()) + y = paddle.sparse.nn.functional.subm_conv3d( + sparse_x, weight, key='subm_conv' + ) + assert np.array_equal( + sparse_x.indices().numpy(), y.indices().numpy() + ) def test_Conv3D(self): with _test_eager_guard(): - #(4, non_zero_num), 4-D:(N, D, H, W) + # (4, non_zero_num), 4-D:(N, D, H, W) indices = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]] - #(non_zero_num, C) + # (non_zero_num, C) values = [[1], [2], [3], [4]] indices = paddle.to_tensor(indices, dtype='int32') values = paddle.to_tensor(values, dtype='float32') dense_shape = [1, 1, 3, 4, 1] correct_out_values = [[4], [10]] sparse_input = paddle.sparse.sparse_coo_tensor( - indices, values, dense_shape, False) + indices, values, dense_shape, False + ) - sparse_conv3d = paddle.sparse.nn.Conv3D(1, - 1, (1, 3, 3), - data_format='NDHWC') + sparse_conv3d = paddle.sparse.nn.Conv3D( + 1, 1, (1, 3, 3), data_format='NDHWC' + ) sparse_out = sparse_conv3d(sparse_input) - #test errors + # test errors with self.assertRaises(ValueError): - #Currently, only support data_format='NDHWC' - conv3d = paddle.sparse.nn.SubmConv3D(1, - 1, (1, 3, 3), - data_format='NCDHW', - key='subm_conv') + # Currently, only support data_format='NDHWC' + conv3d = paddle.sparse.nn.SubmConv3D( + 1, 1, (1, 3, 3), data_format='NCDHW', key='subm_conv' + ) def test_SubmConv3D(self): with _test_eager_guard(): @@ -107,12 +108,12 @@ class TestSparseConv(unittest.TestCase): dense_shape = [1, 1, 3, 4, 1] correct_out_values = [[4], [10]] sparse_input = paddle.sparse.sparse_coo_tensor( - indices, values, dense_shape, False) + indices, values, dense_shape, False + ) - subm_conv3d = paddle.sparse.nn.SubmConv3D(1, - 1, (1, 3, 3), - data_format='NDHWC', - key='subm_conv') + subm_conv3d = paddle.sparse.nn.SubmConv3D( + 1, 1, (1, 3, 3), data_format='NDHWC', key='subm_conv' + ) # test extra_repr print(subm_conv3d.extra_repr()) @@ -120,13 +121,12 @@ class TestSparseConv(unittest.TestCase): # the output shape of subm_conv is same as input shape assert np.array_equal(indices, sparse_out.indices().numpy()) - #test errors + # test errors with self.assertRaises(ValueError): - #Currently, only support data_format='NDHWC' - conv3d = paddle.sparse.nn.SubmConv3D(1, - 1, (1, 3, 3), - data_format='NCDHW', - key='subm_conv') + # Currently, only support data_format='NDHWC' + conv3d = paddle.sparse.nn.SubmConv3D( + 1, 1, (1, 3, 3), data_format='NCDHW', key='subm_conv' + ) def test_Conv3D_bias(self): with _test_eager_guard(): @@ -138,8 +138,8 @@ class TestSparseConv(unittest.TestCase): sp_conv3d = paddle.sparse.nn.Conv3D(3, 2, 3, data_format='NDHWC') sp_conv3d.weight.set_value( - paddle.to_tensor(conv3d.weight.numpy().transpose(2, 3, 4, 1, - 0))) + paddle.to_tensor(conv3d.weight.numpy().transpose(2, 3, 4, 1, 0)) + ) sp_conv3d.bias.set_value(paddle.to_tensor(conv3d.bias.numpy())) x.stop_gradient = False @@ -152,50 +152,53 @@ class TestSparseConv(unittest.TestCase): dense_out = sp_out.to_dense() sp_loss = dense_out.mean() sp_loss.backward() - assert np.allclose(out.numpy(), - dense_out.numpy(), - atol=1e-3, - rtol=1e-3) - assert np.allclose(conv3d.weight.grad.numpy().transpose( - 2, 3, 4, 1, 0), - sp_conv3d.weight.grad.numpy(), - atol=1e-3, - rtol=1e-3) - assert np.allclose(conv3d.bias.grad.numpy(), - sp_conv3d.bias.grad.numpy(), - atol=1e-5, - rtol=1e-5) + assert np.allclose( + out.numpy(), dense_out.numpy(), atol=1e-3, rtol=1e-3 + ) + assert np.allclose( + conv3d.weight.grad.numpy().transpose(2, 3, 4, 1, 0), + sp_conv3d.weight.grad.numpy(), + atol=1e-3, + rtol=1e-3, + ) + assert np.allclose( + conv3d.bias.grad.numpy(), + sp_conv3d.bias.grad.numpy(), + atol=1e-5, + rtol=1e-5, + ) class TestStatic(unittest.TestCase): - def test(self): paddle.enable_static() - indices = paddle.static.data(name='indices', - shape=[4, 4], - dtype='int32') - values = paddle.static.data(name='values', - shape=[4, 1], - dtype='float32') + indices = paddle.static.data( + name='indices', shape=[4, 4], dtype='int32' + ) + values = paddle.static.data( + name='values', shape=[4, 1], dtype='float32' + ) dense_shape = [1, 1, 3, 4, 1] sp_x = sparse.sparse_coo_tensor(indices, values, dense_shape) weight_shape = [1, 3, 3, 1, 1] - weight = paddle.static.data(name='weight', - shape=weight_shape, - dtype='float32') + weight = paddle.static.data( + name='weight', shape=weight_shape, dtype='float32' + ) bias_shape = [1] - bias = paddle.static.data(name='bias', - shape=bias_shape, - dtype='float32') - out = sparse.nn.functional.conv3d(sp_x, - weight, - bias, - stride=1, - padding=0, - dilation=1, - groups=1, - data_format="NDHWC") + bias = paddle.static.data( + name='bias', shape=bias_shape, dtype='float32' + ) + out = sparse.nn.functional.conv3d( + sp_x, + weight, + bias, + stride=1, + padding=0, + dilation=1, + groups=1, + data_format="NDHWC", + ) sp_out = sparse.nn.functional.relu(out) out_indices = sp_out.indices() out_values = sp_out.values() @@ -205,19 +208,22 @@ class TestStatic(unittest.TestCase): indices_data = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]] values_data = [[1.0], [2.0], [3.0], [4.0]] - weight_data = np.array([[[[[1], [1], [1]], [[1], [1], [1]], - [[1], [1], [1]]]]]).astype('float32') + weight_data = np.array( + [[[[[1], [1], [1]], [[1], [1], [1]], [[1], [1], [1]]]]] + ).astype('float32') weight_data = weight_data.reshape(weight_shape) bias_data = np.array([1]).astype('float32') - fetch = exe.run(feed={ - 'indices': indices_data, - 'values': values_data, - 'weight': weight_data, - 'bias': bias_data - }, - fetch_list=[out, out_indices, out_values], - return_numpy=True) + fetch = exe.run( + feed={ + 'indices': indices_data, + 'values': values_data, + 'weight': weight_data, + 'bias': bias_data, + }, + fetch_list=[out, out_indices, out_values], + return_numpy=True, + ) correct_out = np.array([[[[[5.0], [11.0]]]]]).astype('float64') correct_out_values = [[5.0], [11.0]] assert np.array_equal(correct_out, fetch[0]) diff --git a/python/paddle/fluid/tests/unittests/test_sparse_copy_op.py b/python/paddle/fluid/tests/unittests/test_sparse_copy_op.py index 587e4b6d68a7b2428b9d4a5740476e079249df21..c31dc4e159fce8181dc09623a1fa390d41c9fa46 100644 --- a/python/paddle/fluid/tests/unittests/test_sparse_copy_op.py +++ b/python/paddle/fluid/tests/unittests/test_sparse_copy_op.py @@ -19,7 +19,6 @@ from paddle.fluid.framework import _test_eager_guard class TestSparseCopy(unittest.TestCase): - def test_copy_sparse_coo(self): with _test_eager_guard(): np_x = [[0, 1.0, 0], [2.0, 0, 0], [0, 3.0, 0]] diff --git a/python/paddle/fluid/tests/unittests/test_sparse_elementwise_op.py b/python/paddle/fluid/tests/unittests/test_sparse_elementwise_op.py index 172bdaee2abb37c78b6214e05a2c3c5e5ddd1c86..f762584801ea7b7f0ad54017b3bbe7551e1f7b97 100644 --- a/python/paddle/fluid/tests/unittests/test_sparse_elementwise_op.py +++ b/python/paddle/fluid/tests/unittests/test_sparse_elementwise_op.py @@ -68,37 +68,45 @@ class TestSparseElementWiseAPI(unittest.TestCase): expect_res = op(dense_x, dense_y) expect_res.backward(expect_res) - np.testing.assert_allclose(expect_res.numpy(), - actual_res.to_dense().numpy(), - rtol=1e-05, - equal_nan=True) + np.testing.assert_allclose( + expect_res.numpy(), + actual_res.to_dense().numpy(), + rtol=1e-05, + equal_nan=True, + ) if not (op == __truediv__ and dtype in ['int32', 'int64']): - np.testing.assert_allclose(dense_x.grad.numpy(), - csr_x.grad.to_dense().numpy(), - rtol=1e-05, - equal_nan=True) - np.testing.assert_allclose(dense_y.grad.numpy(), - csr_y.grad.to_dense().numpy(), - rtol=1e-05, - equal_nan=True) + np.testing.assert_allclose( + dense_x.grad.numpy(), + csr_x.grad.to_dense().numpy(), + rtol=1e-05, + equal_nan=True, + ) + np.testing.assert_allclose( + dense_y.grad.numpy(), + csr_y.grad.to_dense().numpy(), + rtol=1e-05, + equal_nan=True, + ) def func_test_coo(self, op): for sparse_dim in range(len(self.coo_shape) - 1, len(self.coo_shape)): for dtype in self.support_dtypes: - x = np.random.randint(-255, 255, - size=self.coo_shape).astype(dtype) - y = np.random.randint(-255, 255, - size=self.coo_shape).astype(dtype) + x = np.random.randint(-255, 255, size=self.coo_shape).astype( + dtype + ) + y = np.random.randint(-255, 255, size=self.coo_shape).astype( + dtype + ) dense_x = paddle.to_tensor(x, dtype=dtype, stop_gradient=False) dense_y = paddle.to_tensor(y, dtype=dtype, stop_gradient=False) - s_dense_x = paddle.to_tensor(x, - dtype=dtype, - stop_gradient=False) - s_dense_y = paddle.to_tensor(y, - dtype=dtype, - stop_gradient=False) + s_dense_x = paddle.to_tensor( + x, dtype=dtype, stop_gradient=False + ) + s_dense_y = paddle.to_tensor( + y, dtype=dtype, stop_gradient=False + ) coo_x = s_dense_x.to_sparse_coo(sparse_dim) coo_y = s_dense_y.to_sparse_coo(sparse_dim) @@ -108,18 +116,24 @@ class TestSparseElementWiseAPI(unittest.TestCase): expect_res = op(dense_x, dense_y) expect_res.backward(expect_res) - np.testing.assert_allclose(expect_res.numpy(), - actual_res.to_dense().numpy(), - rtol=1e-05, - equal_nan=True) - np.testing.assert_allclose(dense_x.grad.numpy(), - coo_x.grad.to_dense().numpy(), - rtol=1e-05, - equal_nan=True) - np.testing.assert_allclose(dense_y.grad.numpy(), - coo_y.grad.to_dense().numpy(), - rtol=1e-05, - equal_nan=True) + np.testing.assert_allclose( + expect_res.numpy(), + actual_res.to_dense().numpy(), + rtol=1e-05, + equal_nan=True, + ) + np.testing.assert_allclose( + dense_x.grad.numpy(), + coo_x.grad.to_dense().numpy(), + rtol=1e-05, + equal_nan=True, + ) + np.testing.assert_allclose( + dense_y.grad.numpy(), + coo_y.grad.to_dense().numpy(), + rtol=1e-05, + equal_nan=True, + ) def test_support_dtypes_csr(self): paddle.device.set_device('cpu') @@ -139,38 +153,37 @@ class TestSparseElementWiseAPI(unittest.TestCase): values2_data = [[1.0], [2.0]] shape = [2, 4, 2] - sp_a = sparse.sparse_coo_tensor(indices_data, - values1_data, - shape, - stop_gradient=False) - sp_b = sparse.sparse_coo_tensor(indices_data, - values2_data, - shape, - stop_gradient=False) + sp_a = sparse.sparse_coo_tensor( + indices_data, values1_data, shape, stop_gradient=False + ) + sp_b = sparse.sparse_coo_tensor( + indices_data, values2_data, shape, stop_gradient=False + ) values1 = paddle.to_tensor(values1_data, stop_gradient=False) values2 = paddle.to_tensor(values2_data, stop_gradient=False) - #c.values() = a.values() + b.values() + # c.values() = a.values() + b.values() sp_c = sparse.add(sp_a, sp_b) sp_c.backward() ref_c = values1 + values2 ref_c.backward() np.testing.assert_allclose(sp_c.values().numpy(), ref_c.numpy()) - np.testing.assert_allclose(sp_a.grad.values().numpy(), - values1.grad.numpy()) - np.testing.assert_allclose(sp_b.grad.values().numpy(), - values2.grad.numpy()) + np.testing.assert_allclose( + sp_a.grad.values().numpy(), values1.grad.numpy() + ) + np.testing.assert_allclose( + sp_b.grad.values().numpy(), values2.grad.numpy() + ) def test_add_bias(self): indices_data = [[0, 1], [0, 3]] values_data = [[1.0, 1.0], [2.0, 2.0]] shape = [2, 4, 2] - sp_a = sparse.sparse_coo_tensor(indices_data, - values_data, - shape, - stop_gradient=False) + sp_a = sparse.sparse_coo_tensor( + indices_data, values_data, shape, stop_gradient=False + ) bias_values = [1.0, 2.0] @@ -178,14 +191,15 @@ class TestSparseElementWiseAPI(unittest.TestCase): values2 = paddle.to_tensor(bias_values, stop_gradient=False) values3 = paddle.to_tensor(bias_values, stop_gradient=False) - #c.values() = a.values() + b + # c.values() = a.values() + b sp_c = sparse.add(sp_a, values2) sp_c.backward() ref_c = values1 + values3 ref_c.backward() np.testing.assert_allclose(sp_c.values().numpy(), ref_c.numpy()) - np.testing.assert_allclose(sp_a.grad.values().numpy(), - values1.grad.numpy()) + np.testing.assert_allclose( + sp_a.grad.values().numpy(), values1.grad.numpy() + ) np.testing.assert_allclose(values2.grad.numpy(), values3.grad.numpy()) diff --git a/python/paddle/fluid/tests/unittests/test_sparse_fused_attention_op.py b/python/paddle/fluid/tests/unittests/test_sparse_fused_attention_op.py index 50e9218a27d355a28f50d7f162f1594b0dedf9cb..dfc8806fd02ad471d2b787de90d084b20ad39727 100644 --- a/python/paddle/fluid/tests/unittests/test_sparse_fused_attention_op.py +++ b/python/paddle/fluid/tests/unittests/test_sparse_fused_attention_op.py @@ -37,10 +37,9 @@ def get_cuda_version(): @unittest.skipIf( not core.is_compiled_with_cuda() or get_cuda_version() < 11070, - "core is not compiled with CUDA and cuda version need larger than or equal to 11.7" + "core is not compiled with CUDA and cuda version need larger than or equal to 11.7", ) class TestSparseAttentionAPI1(unittest.TestCase): - def setUp(self): self.batch_size = 16 self.num_heads = 16 @@ -52,7 +51,10 @@ class TestSparseAttentionAPI1(unittest.TestCase): def test_dygraph(self): with _test_eager_guard(): self.shape = [ - self.batch_size, self.num_heads, self.seq_len, self.head_dim + self.batch_size, + self.num_heads, + self.seq_len, + self.head_dim, ] query = paddle.rand(self.shape, self.dtype) key = paddle.rand(self.shape, self.dtype) @@ -62,13 +64,16 @@ class TestSparseAttentionAPI1(unittest.TestCase): key.stop_gradient = False value.stop_gradient = False - mask = paddle.nn.functional.dropout(paddle.ones( - [self.seq_len, self.seq_len]), - mode='downscale_in_infer') + mask = paddle.nn.functional.dropout( + paddle.ones([self.seq_len, self.seq_len]), + mode='downscale_in_infer', + ) mask = mask.expand( - [self.batch_size, self.num_heads, self.seq_len, self.seq_len]) - sp_mask = mask.reshape([-1, self.seq_len, - self.seq_len]).to_sparse_csr() + [self.batch_size, self.num_heads, self.seq_len, self.seq_len] + ) + sp_mask = mask.reshape( + [-1, self.seq_len, self.seq_len] + ).to_sparse_csr() query_sp = copy.deepcopy(query) key_sp = copy.deepcopy(key) @@ -80,49 +85,57 @@ class TestSparseAttentionAPI1(unittest.TestCase): if self.use_mask: kp_mask = paddle.randint( - 0, 2, [self.batch_size, self.seq_len]).astype(self.dtype) + 0, 2, [self.batch_size, self.seq_len] + ).astype(self.dtype) attn_mask = paddle.randint( - 0, 2, [self.seq_len, self.seq_len]).astype(self.dtype) + 0, 2, [self.seq_len, self.seq_len] + ).astype(self.dtype) sdd = paddle.matmul(query, key, False, True) / math.sqrt( - float(self.head_dim)) - sdd = sdd + ( - (mask * kp_mask.unsqueeze([1, 2]) * attn_mask) - 1.0) * 1e9 + float(self.head_dim) + ) + sdd = ( + sdd + + ((mask * kp_mask.unsqueeze([1, 2]) * attn_mask) - 1.0) + * 1e9 + ) softmax = paddle.nn.functional.softmax(sdd) output = paddle.matmul(softmax, value) output.backward() output_sp = paddle.sparse.nn.functional.attention( - query_sp, key_sp, value_sp, sp_mask, kp_mask, attn_mask) + query_sp, key_sp, value_sp, sp_mask, kp_mask, attn_mask + ) output_sp.backward() else: sdd = paddle.matmul(query, key, False, True) / math.sqrt( - float(self.head_dim)) + float(self.head_dim) + ) sdd = sdd + (mask - 1.0) * 1e9 softmax = paddle.nn.functional.softmax(sdd) output = paddle.matmul(softmax, value) output.backward() output_sp = paddle.sparse.nn.functional.attention( - query_sp, key_sp, value_sp, sp_mask) + query_sp, key_sp, value_sp, sp_mask + ) output_sp.backward() - np.testing.assert_allclose(output_sp.numpy(), - output.numpy(), - rtol=1e-05) - np.testing.assert_allclose(query_sp.grad.numpy(), - query.grad.numpy(), - rtol=1e-05) - np.testing.assert_allclose(key_sp.grad.numpy(), - key.grad.numpy(), - rtol=1e-05) - np.testing.assert_allclose(value_sp.grad.numpy(), - value.grad.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + output_sp.numpy(), output.numpy(), rtol=1e-05 + ) + np.testing.assert_allclose( + query_sp.grad.numpy(), query.grad.numpy(), rtol=1e-05 + ) + np.testing.assert_allclose( + key_sp.grad.numpy(), key.grad.numpy(), rtol=1e-05 + ) + np.testing.assert_allclose( + value_sp.grad.numpy(), value.grad.numpy(), rtol=1e-05 + ) class TestSparseAttentionAPI2(TestSparseAttentionAPI1): - def setUp(self): self.batch_size = 16 self.num_heads = 16 @@ -133,7 +146,6 @@ class TestSparseAttentionAPI2(TestSparseAttentionAPI1): class TestSparseAttentionAPI3(TestSparseAttentionAPI1): - def setUp(self): self.batch_size = 16 self.num_heads = 16 @@ -144,7 +156,6 @@ class TestSparseAttentionAPI3(TestSparseAttentionAPI1): class TestSparseAttentionAPI4(TestSparseAttentionAPI1): - def setUp(self): self.batch_size = 16 self.num_heads = 16 @@ -155,7 +166,6 @@ class TestSparseAttentionAPI4(TestSparseAttentionAPI1): class TestSparseAttentionAPI5(TestSparseAttentionAPI1): - def setUp(self): self.batch_size = 16 self.num_heads = 16 diff --git a/python/paddle/fluid/tests/unittests/test_sparse_is_same_shape.py b/python/paddle/fluid/tests/unittests/test_sparse_is_same_shape.py index 8715b55d522ad9e889b18493c2a443368f58b1f9..f1c2ded049d6db306badae05c0c1d95c3c013428 100644 --- a/python/paddle/fluid/tests/unittests/test_sparse_is_same_shape.py +++ b/python/paddle/fluid/tests/unittests/test_sparse_is_same_shape.py @@ -28,7 +28,7 @@ class TestSparseIsSameShapeAPI(unittest.TestCase): self.tensors = [ paddle.rand(self.shapes[0]), paddle.rand(self.shapes[0]), - paddle.rand(self.shapes[1]) + paddle.rand(self.shapes[1]), ] self.sparse_dim = 2 @@ -39,85 +39,136 @@ class TestSparseIsSameShapeAPI(unittest.TestCase): def test_dense_csr(self): self.assertTrue( - is_same_shape(self.tensors[0], self.tensors[1].to_sparse_csr())) + is_same_shape(self.tensors[0], self.tensors[1].to_sparse_csr()) + ) self.assertFalse( - is_same_shape(self.tensors[0], self.tensors[2].to_sparse_csr())) + is_same_shape(self.tensors[0], self.tensors[2].to_sparse_csr()) + ) self.assertFalse( - is_same_shape(self.tensors[1], self.tensors[2].to_sparse_csr())) + is_same_shape(self.tensors[1], self.tensors[2].to_sparse_csr()) + ) def test_dense_coo(self): self.assertTrue( - is_same_shape(self.tensors[0], - self.tensors[1].to_sparse_coo(self.sparse_dim))) + is_same_shape( + self.tensors[0], self.tensors[1].to_sparse_coo(self.sparse_dim) + ) + ) self.assertFalse( - is_same_shape(self.tensors[0], - self.tensors[2].to_sparse_coo(self.sparse_dim))) + is_same_shape( + self.tensors[0], self.tensors[2].to_sparse_coo(self.sparse_dim) + ) + ) self.assertFalse( - is_same_shape(self.tensors[1], - self.tensors[2].to_sparse_coo(self.sparse_dim))) + is_same_shape( + self.tensors[1], self.tensors[2].to_sparse_coo(self.sparse_dim) + ) + ) def test_csr_dense(self): self.assertTrue( - is_same_shape(self.tensors[0].to_sparse_csr(), self.tensors[1])) + is_same_shape(self.tensors[0].to_sparse_csr(), self.tensors[1]) + ) self.assertFalse( - is_same_shape(self.tensors[0].to_sparse_csr(), self.tensors[2])) + is_same_shape(self.tensors[0].to_sparse_csr(), self.tensors[2]) + ) self.assertFalse( - is_same_shape(self.tensors[1].to_sparse_csr(), self.tensors[2])) + is_same_shape(self.tensors[1].to_sparse_csr(), self.tensors[2]) + ) def test_csr_csr(self): self.assertTrue( - is_same_shape(self.tensors[0].to_sparse_csr(), - self.tensors[1].to_sparse_csr())) + is_same_shape( + self.tensors[0].to_sparse_csr(), self.tensors[1].to_sparse_csr() + ) + ) self.assertFalse( - is_same_shape(self.tensors[0].to_sparse_csr(), - self.tensors[2].to_sparse_csr())) + is_same_shape( + self.tensors[0].to_sparse_csr(), self.tensors[2].to_sparse_csr() + ) + ) self.assertFalse( - is_same_shape(self.tensors[1].to_sparse_csr(), - self.tensors[2].to_sparse_csr())) + is_same_shape( + self.tensors[1].to_sparse_csr(), self.tensors[2].to_sparse_csr() + ) + ) def test_csr_coo(self): self.assertTrue( - is_same_shape(self.tensors[0].to_sparse_csr(), - self.tensors[1].to_sparse_coo(self.sparse_dim))) - self.assertFalse( - is_same_shape(self.tensors[0].to_sparse_csr(), - self.tensors[2].to_sparse_coo(self.sparse_dim))) - self.assertFalse( - is_same_shape(self.tensors[1].to_sparse_csr(), - self.tensors[2].to_sparse_coo(self.sparse_dim))) + is_same_shape( + self.tensors[0].to_sparse_csr(), + self.tensors[1].to_sparse_coo(self.sparse_dim), + ) + ) + self.assertFalse( + is_same_shape( + self.tensors[0].to_sparse_csr(), + self.tensors[2].to_sparse_coo(self.sparse_dim), + ) + ) + self.assertFalse( + is_same_shape( + self.tensors[1].to_sparse_csr(), + self.tensors[2].to_sparse_coo(self.sparse_dim), + ) + ) def test_coo_dense(self): self.assertTrue( - is_same_shape(self.tensors[0].to_sparse_coo(self.sparse_dim), - self.tensors[1])) + is_same_shape( + self.tensors[0].to_sparse_coo(self.sparse_dim), self.tensors[1] + ) + ) self.assertFalse( - is_same_shape(self.tensors[0].to_sparse_coo(self.sparse_dim), - self.tensors[2])) + is_same_shape( + self.tensors[0].to_sparse_coo(self.sparse_dim), self.tensors[2] + ) + ) self.assertFalse( - is_same_shape(self.tensors[1].to_sparse_coo(self.sparse_dim), - self.tensors[2])) + is_same_shape( + self.tensors[1].to_sparse_coo(self.sparse_dim), self.tensors[2] + ) + ) def test_coo_csr(self): self.assertTrue( - is_same_shape(self.tensors[0].to_sparse_coo(self.sparse_dim), - self.tensors[1].to_sparse_csr())) - self.assertFalse( - is_same_shape(self.tensors[0].to_sparse_coo(self.sparse_dim), - self.tensors[2].to_sparse_csr())) - self.assertFalse( - is_same_shape(self.tensors[1].to_sparse_coo(self.sparse_dim), - self.tensors[2].to_sparse_csr())) + is_same_shape( + self.tensors[0].to_sparse_coo(self.sparse_dim), + self.tensors[1].to_sparse_csr(), + ) + ) + self.assertFalse( + is_same_shape( + self.tensors[0].to_sparse_coo(self.sparse_dim), + self.tensors[2].to_sparse_csr(), + ) + ) + self.assertFalse( + is_same_shape( + self.tensors[1].to_sparse_coo(self.sparse_dim), + self.tensors[2].to_sparse_csr(), + ) + ) def test_coo_coo(self): self.assertTrue( - is_same_shape(self.tensors[0].to_sparse_coo(self.sparse_dim), - self.tensors[1].to_sparse_coo(self.sparse_dim))) - self.assertFalse( - is_same_shape(self.tensors[0].to_sparse_coo(self.sparse_dim), - self.tensors[2].to_sparse_coo(self.sparse_dim))) - self.assertFalse( - is_same_shape(self.tensors[1].to_sparse_coo(self.sparse_dim), - self.tensors[2].to_sparse_coo(self.sparse_dim))) + is_same_shape( + self.tensors[0].to_sparse_coo(self.sparse_dim), + self.tensors[1].to_sparse_coo(self.sparse_dim), + ) + ) + self.assertFalse( + is_same_shape( + self.tensors[0].to_sparse_coo(self.sparse_dim), + self.tensors[2].to_sparse_coo(self.sparse_dim), + ) + ) + self.assertFalse( + is_same_shape( + self.tensors[1].to_sparse_coo(self.sparse_dim), + self.tensors[2].to_sparse_coo(self.sparse_dim), + ) + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_sparse_matmul_op.py b/python/paddle/fluid/tests/unittests/test_sparse_matmul_op.py index 34a270b0fe504509a9c57f92640c691c76ef2f57..368e9cbbd2ff2655b062a87a0de766ca9b060dde 100644 --- a/python/paddle/fluid/tests/unittests/test_sparse_matmul_op.py +++ b/python/paddle/fluid/tests/unittests/test_sparse_matmul_op.py @@ -59,27 +59,33 @@ class TestMatmul(unittest.TestCase): sp_y.stop_gradient = False sp_out = paddle.sparse.matmul(sp_x, sp_y) - np.testing.assert_allclose(sp_out.numpy(), - dense_out.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + sp_out.numpy(), dense_out.numpy(), rtol=1e-05 + ) if get_cuda_version() >= 11030: dense_out.backward() sp_out.backward() - np.testing.assert_allclose(sp_x.grad.to_dense().numpy(), - (dense_x.grad * mask).numpy(), - rtol=1e-05) - np.testing.assert_allclose(sp_y.grad.numpy(), - dense_y.grad.numpy(), - rtol=1e-05) - - @unittest.skipIf(not paddle.is_compiled_with_cuda() - or get_cuda_version() < 11000, "only support cuda>=11.0") + np.testing.assert_allclose( + sp_x.grad.to_dense().numpy(), + (dense_x.grad * mask).numpy(), + rtol=1e-05, + ) + np.testing.assert_allclose( + sp_y.grad.numpy(), dense_y.grad.numpy(), rtol=1e-05 + ) + + @unittest.skipIf( + not paddle.is_compiled_with_cuda() or get_cuda_version() < 11000, + "only support cuda>=11.0", + ) def test_matmul_2d(self): self.check_result([16, 12], [12, 10], 'coo') self.check_result([16, 12], [12, 10], 'csr') - @unittest.skipIf(not paddle.is_compiled_with_cuda() - or get_cuda_version() < 11070, "only support cuda>=11.7") + @unittest.skipIf( + not paddle.is_compiled_with_cuda() or get_cuda_version() < 11070, + "only support cuda>=11.7", + ) def test_matmul_3d(self): self.check_result([8, 16, 12], [8, 12, 10], 'coo') self.check_result([8, 16, 12], [8, 12, 10], 'csr') @@ -87,9 +93,10 @@ class TestMatmul(unittest.TestCase): class TestMaskedMatmul(unittest.TestCase): # x: dense, y: dense, out: sparse_`csr - @unittest.skipIf(not paddle.is_compiled_with_cuda() - or get_cuda_version() < 11030, - "only support on cuda>=11.3") + @unittest.skipIf( + not paddle.is_compiled_with_cuda() or get_cuda_version() < 11030, + "only support on cuda>=11.3", + ) def test_masked_matmul_2d(self): np_mask = np.random.rand(10, 6) < 0.2 @@ -108,24 +115,25 @@ class TestMaskedMatmul(unittest.TestCase): mask = paddle.to_tensor(np.ones([10, 6]) * np_mask).to_sparse_csr() out = paddle.sparse.masked_matmul(x, y, mask) - np.testing.assert_allclose(np_out.indptr, - out.crows().numpy(), - rtol=1e-05) - np.testing.assert_allclose(np_out.indices, - out.cols().numpy(), - rtol=1e-05) - np.testing.assert_allclose(np_out.data, - out.values().numpy(), - rtol=1e-05) + np.testing.assert_allclose( + np_out.indptr, out.crows().numpy(), rtol=1e-05 + ) + np.testing.assert_allclose( + np_out.indices, out.cols().numpy(), rtol=1e-05 + ) + np.testing.assert_allclose( + np_out.data, out.values().numpy(), rtol=1e-05 + ) out.backward() np.testing.assert_allclose(out.is_sparse_csr(), True, rtol=1e-05) np.testing.assert_allclose(np_x_grad, x.grad.numpy(), rtol=1e-05) np.testing.assert_allclose(np_y_grad, y.grad.numpy(), rtol=1e-05) - @unittest.skipIf(not paddle.is_compiled_with_cuda() - or get_cuda_version() < 11070, - "only support on cuda>=11.7") + @unittest.skipIf( + not paddle.is_compiled_with_cuda() or get_cuda_version() < 11070, + "only support on cuda>=11.7", + ) def test_masked_matmul_3d(self): paddle.set_default_dtype('float32') origin_x = paddle.rand([16, 16, 12]) @@ -147,15 +155,17 @@ class TestMaskedMatmul(unittest.TestCase): sp_out = paddle.sparse.matmul(sp_x, sp_y) sp_out.backward() - np.testing.assert_allclose(sp_out.numpy(), - dense_out.numpy(), - rtol=1e-05) - np.testing.assert_allclose(sp_x.grad.to_dense().numpy(), - (dense_x.grad * mask).numpy(), - rtol=1e-05) - np.testing.assert_allclose(sp_y.grad.numpy(), - dense_y.grad.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + sp_out.numpy(), dense_out.numpy(), rtol=1e-05 + ) + np.testing.assert_allclose( + sp_x.grad.to_dense().numpy(), + (dense_x.grad * mask).numpy(), + rtol=1e-05, + ) + np.testing.assert_allclose( + sp_y.grad.numpy(), dense_y.grad.numpy(), rtol=1e-05 + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_sparse_model.py b/python/paddle/fluid/tests/unittests/test_sparse_model.py index aa3d97919c4de0b8e40e395279ab8c4b6b0fb6b6..52f37e60dfbc96b826660ad9e7061853748cdae1 100644 --- a/python/paddle/fluid/tests/unittests/test_sparse_model.py +++ b/python/paddle/fluid/tests/unittests/test_sparse_model.py @@ -20,15 +20,16 @@ from paddle.fluid.framework import _test_eager_guard class TestGradientAdd(unittest.TestCase): - def sparse(self, sp_x): indentity = sp_x out = nn.functional.relu(sp_x) values = out.values() + indentity.values() - out = paddle.sparse.sparse_coo_tensor(out.indices(), - values, - shape=out.shape, - stop_gradient=out.stop_gradient) + out = paddle.sparse.sparse_coo_tensor( + out.indices(), + values, + shape=out.shape, + stop_gradient=out.stop_gradient, + ) return out def dense(self, x): diff --git a/python/paddle/fluid/tests/unittests/test_sparse_momentum_op.py b/python/paddle/fluid/tests/unittests/test_sparse_momentum_op.py index bcc555b9a3e8fcf6fc71d4538c477dd1749ed0b7..fefb31255247a55c5d2e275e790d93ee48452e0c 100644 --- a/python/paddle/fluid/tests/unittests/test_sparse_momentum_op.py +++ b/python/paddle/fluid/tests/unittests/test_sparse_momentum_op.py @@ -17,16 +17,18 @@ import numpy as np from op_test import OpTest -def calculate_sparse_momentum_by_numpy(param, - grad, - mu, - velocity, - use_nesterov, - learning_rate, - index, - axis, - regularization_method=None, - regularization_coeff=1.0): +def calculate_sparse_momentum_by_numpy( + param, + grad, + mu, + velocity, + use_nesterov, + learning_rate, + index, + axis, + regularization_method=None, + regularization_coeff=1.0, +): sub_grad = grad.copy() grad = np.zeros_like(param) if axis == 0: @@ -48,8 +50,9 @@ def calculate_sparse_momentum_by_numpy(param, else: velocity_out = mu * velocity + grad if use_nesterov: - param_out = param - grad * learning_rate - \ - velocity_out * mu * learning_rate + param_out = ( + param - grad * learning_rate - velocity_out * mu * learning_rate + ) else: param_out = param - learning_rate * velocity_out @@ -57,7 +60,6 @@ def calculate_sparse_momentum_by_numpy(param, class TestSparseMomentumOp(OpTest): - def setUp(self): self.op_type = "sparse_momentum" self.dtype = np.float32 @@ -75,24 +77,31 @@ class TestSparseMomentumOp(OpTest): if self.multi_precision: assert self.dtype == np.float16 - param = np.random.random( - (self.batch_size, self.num_classes)).astype(self.dtype) - grad = np.random.random( - (self.batch_size, self.num_classes)).astype(self.dtype) + param = np.random.random((self.batch_size, self.num_classes)).astype( + self.dtype + ) + grad = np.random.random((self.batch_size, self.num_classes)).astype( + self.dtype + ) if self.axis == 0: - index = np.random.randint(0, - self.batch_size, - size=(self.batch_size // 2, ), - dtype=self.index_dtype) + index = np.random.randint( + 0, + self.batch_size, + size=(self.batch_size // 2,), + dtype=self.index_dtype, + ) grad = grad[index] else: - index = np.random.randint(0, - self.num_classes, - size=(self.num_classes // 2, ), - dtype=self.index_dtype) + index = np.random.randint( + 0, + self.num_classes, + size=(self.num_classes // 2,), + dtype=self.index_dtype, + ) grad = grad[:, index] - velocity = np.random.random( - (self.batch_size, self.num_classes)).astype(self.dtype) + velocity = np.random.random((self.batch_size, self.num_classes)).astype( + self.dtype + ) learning_rate = np.array([0.001]).astype(self.dtype) mu = 0.9 @@ -109,7 +118,8 @@ class TestSparseMomentumOp(OpTest): regularization_method=regularization_method, regularization_coeff=regularization_coeff, index=index, - axis=self.axis) + axis=self.axis, + ) self.attrs = { 'mu': mu, @@ -121,33 +131,35 @@ class TestSparseMomentumOp(OpTest): } self.inputs = { - 'Param': - param.astype("float16") if self.multi_precision else param, - 'Velocity': - velocity.astype("float32") if self.multi_precision else velocity, - 'LearningRate': - learning_rate.astype("float32") - if self.multi_precision else learning_rate, - 'Grad': - grad.astype("float16") if self.multi_precision else grad, - 'Index': - index, - 'Axis': - np.array(self.axis).astype(np.int32), + 'Param': param.astype("float16") if self.multi_precision else param, + 'Velocity': velocity.astype("float32") + if self.multi_precision + else velocity, + 'LearningRate': learning_rate.astype("float32") + if self.multi_precision + else learning_rate, + 'Grad': grad.astype("float16") if self.multi_precision else grad, + 'Index': index, + 'Axis': np.array(self.axis).astype(np.int32), } self.outputs = { - 'ParamOut': - param_out.astype("float16") if self.multi_precision else param_out, - 'VelocityOut': - velocity_out.astype("float32") - if self.multi_precision else velocity_out, + 'ParamOut': param_out.astype("float16") + if self.multi_precision + else param_out, + 'VelocityOut': velocity_out.astype("float32") + if self.multi_precision + else velocity_out, } if self.multi_precision: - self.inputs['MasterParam'] = param.astype( - "float32") if self.multi_precision else param - self.outputs['MasterParamOut'] = param_out.astype( - "float32") if self.multi_precision else param_out + self.inputs['MasterParam'] = ( + param.astype("float32") if self.multi_precision else param + ) + self.outputs['MasterParamOut'] = ( + param_out.astype("float32") + if self.multi_precision + else param_out + ) def init_dtype(self): pass @@ -162,45 +174,40 @@ class TestSparseMomentumOp(OpTest): pass def test_check_output(self): - self.check_output(atol=5e-3 if self.multi_precision else 1e-5, - check_eager=True) + self.check_output( + atol=5e-3 if self.multi_precision else 1e-5, check_eager=True + ) class TestSparseMomentumOpDtype1(TestSparseMomentumOp): - def init_dtype(self): self.dtype = np.float32 self.index_dtype = np.int64 class TestSparseMomentumOpDtype2(TestSparseMomentumOp): - def init_dtype(self): self.dtype = np.float64 self.index_dtype = np.int32 class TestSparseMomentumOpDtype3(TestSparseMomentumOp): - def init_dtype(self): self.dtype = np.float64 self.index_dtype = np.int64 class TestSparseMomentumOpAxis(TestSparseMomentumOp): - def init_axis(self): self.axis = 1 class TestSparseMomentumOpNesterov(TestSparseMomentumOp): - def init_use_nesterov(self): self.use_nesterov = True class TestSparseMomentumOpMultiPrecision(TestSparseMomentumOp): - def init_dtype(self): self.dtype = np.float16 self.index_dtype = np.int32 @@ -213,7 +220,6 @@ class TestSparseMomentumOpMultiPrecision(TestSparseMomentumOp): class TestSparseMomentumOpMultiPrecision1(TestSparseMomentumOp): - def init_dtype(self): self.dtype = np.float16 self.index_dtype = np.int64 @@ -226,7 +232,6 @@ class TestSparseMomentumOpMultiPrecision1(TestSparseMomentumOp): class TestSparseMomentumOpMultiPrecision2(TestSparseMomentumOp): - def init_dtype(self): self.dtype = np.float16 self.index_dtype = np.int32 @@ -239,7 +244,6 @@ class TestSparseMomentumOpMultiPrecision2(TestSparseMomentumOp): class TestSparseMomentumOpMultiPrecision3(TestSparseMomentumOp): - def init_dtype(self): self.dtype = np.float16 self.index_dtype = np.int64 diff --git a/python/paddle/fluid/tests/unittests/test_sparse_mv_op.py b/python/paddle/fluid/tests/unittests/test_sparse_mv_op.py index e532063ad8faf6f5f84a686bdd266a2801c501ec..ae04ddc7a487ae4342b48829b862e67a9dfa8657 100644 --- a/python/paddle/fluid/tests/unittests/test_sparse_mv_op.py +++ b/python/paddle/fluid/tests/unittests/test_sparse_mv_op.py @@ -37,7 +37,8 @@ def get_cuda_version(): @unittest.skipIf( not paddle.is_compiled_with_cuda() or get_cuda_version() < 11000, - "paddle is not compiled with CUDA and cuda version need to >= 11.0") + "paddle is not compiled with CUDA and cuda version need to >= 11.0", +) class TestCsrMv(unittest.TestCase): # x: csr-matrix, y: dense-vec, out: dense-vec def test_mv(self): @@ -62,20 +63,23 @@ class TestCsrMv(unittest.TestCase): sp_out = paddle.sparse.mv(sp_x, sp_vec) sp_out.backward() - np.testing.assert_allclose(sp_out.numpy(), - dense_out.numpy(), - rtol=1e-05) - np.testing.assert_allclose(sp_x.grad.to_dense().numpy(), - (dense_x.grad * mask).numpy(), - rtol=1e-05) - np.testing.assert_allclose(sp_vec.grad.numpy(), - dense_vec.grad.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + sp_out.numpy(), dense_out.numpy(), rtol=1e-05 + ) + np.testing.assert_allclose( + sp_x.grad.to_dense().numpy(), + (dense_x.grad * mask).numpy(), + rtol=1e-05, + ) + np.testing.assert_allclose( + sp_vec.grad.numpy(), dense_vec.grad.numpy(), rtol=1e-05 + ) @unittest.skipIf( not paddle.is_compiled_with_cuda() or get_cuda_version() < 11000, - "paddle is not compiled with CUDA and cuda version need to >= 11.0") + "paddle is not compiled with CUDA and cuda version need to >= 11.0", +) class TestCooMv(unittest.TestCase): # x: csr-matrix, y: dense-vec, out: dense-vec def test_mv(self): @@ -100,15 +104,17 @@ class TestCooMv(unittest.TestCase): sp_out = paddle.sparse.mv(sp_x, sp_vec) sp_out.backward() - np.testing.assert_allclose(sp_out.numpy(), - dense_out.numpy(), - rtol=1e-05) - np.testing.assert_allclose(sp_x.grad.to_dense().numpy(), - (dense_x.grad * mask).numpy(), - rtol=1e-05) - np.testing.assert_allclose(sp_vec.grad.numpy(), - dense_vec.grad.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + sp_out.numpy(), dense_out.numpy(), rtol=1e-05 + ) + np.testing.assert_allclose( + sp_x.grad.to_dense().numpy(), + (dense_x.grad * mask).numpy(), + rtol=1e-05, + ) + np.testing.assert_allclose( + sp_vec.grad.numpy(), dense_vec.grad.numpy(), rtol=1e-05 + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_sparse_norm_op.py b/python/paddle/fluid/tests/unittests/test_sparse_norm_op.py index 503f171bdc3e9886bb9a537e22bcfe6683c9aa88..4f48b08e9f8cba4a56cf320abf960ede1a2abd52 100644 --- a/python/paddle/fluid/tests/unittests/test_sparse_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_sparse_norm_op.py @@ -22,13 +22,12 @@ import copy class TestSparseBatchNorm(unittest.TestCase): - def test(self): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) paddle.seed(0) channels = 4 shape = [2, 3, 6, 6, channels] - #there is no zero in dense_x + # there is no zero in dense_x dense_x = paddle.randn(shape) dense_x.stop_gradient = False @@ -48,17 +47,21 @@ class TestSparseBatchNorm(unittest.TestCase): sparse_y = sparse_batch_norm(sparse_x) # compare the result with dense batch_norm - assert np.allclose(dense_y.flatten().numpy(), - sparse_y.values().flatten().numpy(), - atol=1e-5, - rtol=1e-5) + assert np.allclose( + dense_y.flatten().numpy(), + sparse_y.values().flatten().numpy(), + atol=1e-5, + rtol=1e-5, + ) # test backward sparse_y.backward(sparse_y) - assert np.allclose(dense_x.grad.flatten().numpy(), - sparse_x.grad.values().flatten().numpy(), - atol=1e-5, - rtol=1e-5) + assert np.allclose( + dense_x.grad.flatten().numpy(), + sparse_x.grad.values().flatten().numpy(), + atol=1e-5, + rtol=1e-5, + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) def test_error_layout(self): @@ -66,8 +69,9 @@ class TestSparseBatchNorm(unittest.TestCase): shape = [2, 3, 6, 6, 3] x = paddle.randn(shape) sparse_x = x.to_sparse_coo(4) - sparse_batch_norm = paddle.sparse.nn.BatchNorm(3, - data_format='NCDHW') + sparse_batch_norm = paddle.sparse.nn.BatchNorm( + 3, data_format='NCDHW' + ) sparse_batch_norm(sparse_x) def test2(self): @@ -86,10 +90,10 @@ class TestSparseBatchNorm(unittest.TestCase): class TestSyncBatchNorm(unittest.TestCase): - def test_sync_batch_norm(self): - x = np.array([[[[0.3, 0.4], [0.3, 0.07]], - [[0.83, 0.37], [0.18, 0.93]]]]).astype('float32') + x = np.array( + [[[[0.3, 0.4], [0.3, 0.07]], [[0.83, 0.37], [0.18, 0.93]]]] + ).astype('float32') x = paddle.to_tensor(x) sparse_x = x.to_sparse_coo(len(x.shape) - 1) @@ -100,18 +104,24 @@ class TestSyncBatchNorm(unittest.TestCase): dense_sync_bn = paddle.nn.SyncBatchNorm(2) x = x.reshape((-1, x.shape[-1])) dense_hidden = dense_sync_bn(x) - assert np.allclose(sparse_hidden.values().numpy(), - dense_hidden.numpy()) + assert np.allclose( + sparse_hidden.values().numpy(), dense_hidden.numpy() + ) def test_convert(self): - base_model = paddle.nn.Sequential(nn.Conv3D(3, 5, 3), nn.BatchNorm(5), - nn.BatchNorm(5)) + base_model = paddle.nn.Sequential( + nn.Conv3D(3, 5, 3), nn.BatchNorm(5), nn.BatchNorm(5) + ) model = paddle.nn.Sequential( - nn.Conv3D(3, 5, 3), nn.BatchNorm(5), - nn.BatchNorm(5, - weight_attr=fluid.ParamAttr(name='bn.scale'), - bias_attr=fluid.ParamAttr(name='bn.bias'))) + nn.Conv3D(3, 5, 3), + nn.BatchNorm(5), + nn.BatchNorm( + 5, + weight_attr=fluid.ParamAttr(name='bn.scale'), + bias_attr=fluid.ParamAttr(name='bn.bias'), + ), + ) model = nn.SyncBatchNorm.convert_sync_batchnorm(model) for idx, sublayer in enumerate(base_model.sublayers()): if isinstance(sublayer, nn.BatchNorm): @@ -119,15 +129,14 @@ class TestSyncBatchNorm(unittest.TestCase): class TestStatic(unittest.TestCase): - def test(self): paddle.enable_static() - indices = paddle.static.data(name='indices', - shape=[4, 4], - dtype='int32') - values = paddle.static.data(name='values', - shape=[4, 1], - dtype='float32') + indices = paddle.static.data( + name='indices', shape=[4, 4], dtype='int32' + ) + values = paddle.static.data( + name='values', shape=[4, 1], dtype='float32' + ) channels = 1 dense_shape = [1, 1, 3, 4, channels] sp_x = sparse.sparse_coo_tensor(indices, values, dense_shape) @@ -144,20 +153,29 @@ class TestStatic(unittest.TestCase): mean_data = np.array([1.0]).astype('float32') variance_data = np.array([2.0]).astype('float32') - fetch = exe.run(feed={ - 'indices': indices_data, - 'values': values_data, - 'batch_norm_0.b_0': bias_data, - 'batch_norm_0.w_0': weight_data, - 'batch_norm_0.w_1': mean_data, - 'batch_norm_0.w_2': variance_data - }, - fetch_list=[out], - return_numpy=True) - correct_out = np.array([[[[[0.0], [-1.6832708], [0.0], [0.1055764]], - [[0.0], [0.0], [1.8944236], [0.0]], - [[0.0], [0.0], [0.0], - [3.683271]]]]]).astype('float32') + fetch = exe.run( + feed={ + 'indices': indices_data, + 'values': values_data, + 'batch_norm_0.b_0': bias_data, + 'batch_norm_0.w_0': weight_data, + 'batch_norm_0.w_1': mean_data, + 'batch_norm_0.w_2': variance_data, + }, + fetch_list=[out], + return_numpy=True, + ) + correct_out = np.array( + [ + [ + [ + [[0.0], [-1.6832708], [0.0], [0.1055764]], + [[0.0], [0.0], [1.8944236], [0.0]], + [[0.0], [0.0], [0.0], [3.683271]], + ] + ] + ] + ).astype('float32') np.testing.assert_allclose(correct_out, fetch[0], rtol=1e-5) paddle.disable_static() diff --git a/python/paddle/fluid/tests/unittests/test_sparse_pooling_op.py b/python/paddle/fluid/tests/unittests/test_sparse_pooling_op.py index 998b5be9bb180c6aade3dd5fd056d178cc4d13e3..5425ebb6ea98db63e07ccb235206825087fe3951 100644 --- a/python/paddle/fluid/tests/unittests/test_sparse_pooling_op.py +++ b/python/paddle/fluid/tests/unittests/test_sparse_pooling_op.py @@ -20,7 +20,6 @@ import copy class TestMaxPool3DFunc(unittest.TestCase): - def setInput(self): paddle.seed(0) self.dense_x = paddle.randn((1, 4, 4, 4, 4)) @@ -49,31 +48,32 @@ class TestMaxPool3DFunc(unittest.TestCase): sparse_x, self.kernel_sizes, stride=self.strides, - padding=self.paddings) + padding=self.paddings, + ) out = sparse_out.to_dense() out.backward(out) dense_x = copy.deepcopy(self.dense_x) - dense_out = paddle.nn.functional.max_pool3d(dense_x, - self.kernel_sizes, - stride=self.strides, - padding=self.paddings, - data_format='NDHWC') + dense_out = paddle.nn.functional.max_pool3d( + dense_x, + self.kernel_sizes, + stride=self.strides, + padding=self.paddings, + data_format='NDHWC', + ) dense_out.backward(dense_out) - #compare with dense + # compare with dense assert np.allclose(dense_out.numpy(), out.numpy()) assert np.allclose(dense_x.grad.numpy(), self.dense_x.grad.numpy()) class TestStride(TestMaxPool3DFunc): - def setStride(self): self.strides = 1 class TestPadding(TestMaxPool3DFunc): - def setPadding(self): self.paddings = 1 @@ -82,7 +82,6 @@ class TestPadding(TestMaxPool3DFunc): class TestKernelSize(TestMaxPool3DFunc): - def setKernelSize(self): self.kernel_sizes = [5, 5, 5] @@ -92,7 +91,6 @@ class TestKernelSize(TestMaxPool3DFunc): class TestInput(TestMaxPool3DFunc): - def setInput(self): paddle.seed(0) self.dense_x = paddle.randn((2, 6, 7, 9, 3)) @@ -101,19 +99,19 @@ class TestInput(TestMaxPool3DFunc): class TestMaxPool3DAPI(unittest.TestCase): - def test(self): with _test_eager_guard(): dense_x = paddle.randn((2, 3, 6, 6, 3)) sparse_x = dense_x.to_sparse_coo(4) - max_pool3d = paddle.sparse.nn.MaxPool3D(kernel_size=3, - data_format='NDHWC') + max_pool3d = paddle.sparse.nn.MaxPool3D( + kernel_size=3, data_format='NDHWC' + ) out = max_pool3d(sparse_x) out = out.to_dense() - dense_out = paddle.nn.functional.max_pool3d(dense_x, - 3, - data_format='NDHWC') + dense_out = paddle.nn.functional.max_pool3d( + dense_x, 3, data_format='NDHWC' + ) assert np.allclose(dense_out.numpy(), out.numpy()) diff --git a/python/paddle/fluid/tests/unittests/test_sparse_reshape_op.py b/python/paddle/fluid/tests/unittests/test_sparse_reshape_op.py index 01619f76dc9481849e82709d4bcb37b533497030..fd748c4dcb2c9abebc1a7874f84c5d2517b78d82 100644 --- a/python/paddle/fluid/tests/unittests/test_sparse_reshape_op.py +++ b/python/paddle/fluid/tests/unittests/test_sparse_reshape_op.py @@ -42,25 +42,27 @@ class TestReshape(unittest.TestCase): dense_out = paddle.reshape(dense_x, new_shape) if format == "coo": - sp_x = paddle.to_tensor(np_x, - place=paddle.CPUPlace()).to_sparse_coo( - len(x_shape)) + sp_x = paddle.to_tensor( + np_x, place=paddle.CPUPlace() + ).to_sparse_coo(len(x_shape)) else: - sp_x = paddle.to_tensor(np_x, - place=paddle.CPUPlace()).to_sparse_csr() + sp_x = paddle.to_tensor( + np_x, place=paddle.CPUPlace() + ).to_sparse_csr() sp_x.stop_gradient = False sp_out = paddle.sparse.reshape(sp_x, new_shape) - np.testing.assert_allclose(sp_out.to_dense().numpy(), - dense_out.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + sp_out.to_dense().numpy(), dense_out.numpy(), rtol=1e-05 + ) dense_out.backward() sp_out.backward() - np.testing.assert_allclose(sp_x.grad.to_dense().numpy(), - dense_x.grad.numpy() * - np_x.astype('bool').astype('int'), - rtol=1e-05) + np.testing.assert_allclose( + sp_x.grad.to_dense().numpy(), + dense_x.grad.numpy() * np_x.astype('bool').astype('int'), + rtol=1e-05, + ) # check gpu kernel if paddle.device.is_compiled_with_cuda(): @@ -70,28 +72,35 @@ class TestReshape(unittest.TestCase): if format == "coo": sp_x = paddle.to_tensor( - np_x, place=paddle.CUDAPlace(0)).to_sparse_coo(len(x_shape)) + np_x, place=paddle.CUDAPlace(0) + ).to_sparse_coo(len(x_shape)) else: sp_x = paddle.to_tensor( - np_x, place=paddle.CUDAPlace(0)).to_sparse_csr() + np_x, place=paddle.CUDAPlace(0) + ).to_sparse_csr() sp_x.stop_gradient = False sp_out = paddle.sparse.reshape(sp_x, new_shape) - np.testing.assert_allclose(sp_out.to_dense().numpy(), - dense_out.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + sp_out.to_dense().numpy(), dense_out.numpy(), rtol=1e-05 + ) dense_out.backward() sp_out.backward() - np.testing.assert_allclose(sp_x.grad.to_dense().numpy(), - dense_x.grad.numpy() * - np_x.astype('bool').astype('int'), - rtol=1e-05) + np.testing.assert_allclose( + sp_x.grad.to_dense().numpy(), + dense_x.grad.numpy() * np_x.astype('bool').astype('int'), + rtol=1e-05, + ) def test_reshape_2d(self): - self.check_result([2, 5], [ - 10, - ], 'coo') + self.check_result( + [2, 5], + [ + 10, + ], + 'coo', + ) self.check_result([12, 5], [15, 4], 'coo') self.check_result([10, 5], [2, 25], 'csr') diff --git a/python/paddle/fluid/tests/unittests/test_sparse_softmax_op.py b/python/paddle/fluid/tests/unittests/test_sparse_softmax_op.py index 251f6cce1cec66e734c507996e6e00a9ed672d28..ef1f672047fb36fa39dbad31a99aa3b74cced06f 100644 --- a/python/paddle/fluid/tests/unittests/test_sparse_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_sparse_softmax_op.py @@ -23,7 +23,6 @@ np.random.seed(2022) class TestCsrSoftmax(unittest.TestCase): - def test_softmax2d(self): with _test_eager_guard(): mask = np.random.rand(16, 128) < 0.5 @@ -46,12 +45,12 @@ class TestCsrSoftmax(unittest.TestCase): csr = paddle.to_tensor(np_x, stop_gradient=False).to_sparse_csr() m = paddle.sparse.nn.Softmax() out = m(csr) - np.testing.assert_allclose(out.crows().numpy(), - np_csr.indptr, - rtol=1e-05) - np.testing.assert_allclose(out.cols().numpy(), - np_csr.indices, - rtol=1e-05) + np.testing.assert_allclose( + out.crows().numpy(), np_csr.indptr, rtol=1e-05 + ) + np.testing.assert_allclose( + out.cols().numpy(), np_csr.indices, rtol=1e-05 + ) np.testing.assert_allclose(out.values().numpy(), np_out, rtol=1e-05) # dx = (dout - sum(dout * out)) * out, dout=rand_x @@ -67,15 +66,15 @@ class TestCsrSoftmax(unittest.TestCase): sum = np.sum(dout * out, keepdims=True) dx = np.concatenate([dx, (dout - sum) * out]) - np.testing.assert_allclose(csr.grad.crows().numpy(), - np_csr.indptr, - rtol=1e-05) - np.testing.assert_allclose(csr.grad.cols().numpy(), - np_csr.indices, - rtol=1e-05) - np.testing.assert_allclose(csr.grad.values().numpy(), - dx, - rtol=1e-05) + np.testing.assert_allclose( + csr.grad.crows().numpy(), np_csr.indptr, rtol=1e-05 + ) + np.testing.assert_allclose( + csr.grad.cols().numpy(), np_csr.indices, rtol=1e-05 + ) + np.testing.assert_allclose( + csr.grad.values().numpy(), dx, rtol=1e-05 + ) def test_softmax3d(self): with _test_eager_guard(): @@ -88,7 +87,9 @@ class TestCsrSoftmax(unittest.TestCase): for i in range(batchNum): np_csr = sp.csr_matrix(np_x[i, :, :]) row_number = np_csr.shape[0] - for j in range(row_number, ): + for j in range( + row_number, + ): start = np_csr.indptr[j] end = np_csr.indptr[j + 1] if start == end: @@ -118,15 +119,15 @@ class TestCsrSoftmax(unittest.TestCase): if start == end: continue dout = np_csr.data[start:end] - out = np_out[batch_offset + start:batch_offset + end] + out = np_out[batch_offset + start : batch_offset + end] sum = np.sum(dout * out, keepdims=True) dx = np.concatenate([dx, (dout - sum) * out]) batch_offset += np_csr.nnz - np.testing.assert_allclose(csr.grad.values().numpy(), - dx, - rtol=1e-05) + np.testing.assert_allclose( + csr.grad.values().numpy(), dx, rtol=1e-05 + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_sparse_transpose_op.py b/python/paddle/fluid/tests/unittests/test_sparse_transpose_op.py index 12d1fd9a8b770d490e658f1299d125bae9ee937b..06f221bfe6c81a1194ed01a9f8480c75c8e7e20d 100644 --- a/python/paddle/fluid/tests/unittests/test_sparse_transpose_op.py +++ b/python/paddle/fluid/tests/unittests/test_sparse_transpose_op.py @@ -37,14 +37,16 @@ class TestTranspose(unittest.TestCase): sp_x.stop_gradient = False sp_out = paddle.sparse.transpose(sp_x, dims) - np.testing.assert_allclose(sp_out.to_dense().numpy(), - dense_out.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + sp_out.to_dense().numpy(), dense_out.numpy(), rtol=1e-05 + ) dense_out.backward() sp_out.backward() - np.testing.assert_allclose(sp_x.grad.to_dense().numpy(), - (dense_x.grad * mask).numpy(), - rtol=1e-05) + np.testing.assert_allclose( + sp_x.grad.to_dense().numpy(), + (dense_x.grad * mask).numpy(), + rtol=1e-05, + ) def test_transpose_2d(self): self.check_result([2, 5], [0, 1], 'coo') @@ -69,8 +71,9 @@ class TestTranspose(unittest.TestCase): def test_transpose_nd(self): self.check_result([8, 3, 4, 4, 5, 3], [5, 3, 4, 1, 0, 2], 'coo') # Randint now only supports access to dimension 0 to 9. - self.check_result([2, 3, 4, 2, 3, 4, 2, 3, 4], - [2, 3, 4, 5, 6, 7, 8, 0, 1], 'coo') + self.check_result( + [2, 3, 4, 2, 3, 4, 2, 3, 4], [2, 3, 4, 5, 6, 7, 8, 0, 1], 'coo' + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_sparse_unary_op.py b/python/paddle/fluid/tests/unittests/test_sparse_unary_op.py index fc39ab973c009d15f5dc85fdbe324ea22d9a1454..dd50608050e7796336ba1d272b786320f86cbd8c 100644 --- a/python/paddle/fluid/tests/unittests/test_sparse_unary_op.py +++ b/python/paddle/fluid/tests/unittests/test_sparse_unary_op.py @@ -19,7 +19,6 @@ from paddle.fluid.framework import convert_np_dtype_to_dtype_ class TestSparseUnary(unittest.TestCase): - def to_sparse(self, x, format): if format == 'coo': return x.detach().to_sparse_coo(sparse_dim=x.ndim) @@ -63,18 +62,18 @@ class TestSparseUnary(unittest.TestCase): dense_out.backward() # compare forward - np.testing.assert_allclose(sp_out.to_dense().numpy(), - dense_out.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + sp_out.to_dense().numpy(), dense_out.numpy(), rtol=1e-05 + ) # compare backward if dense_func == paddle.sqrt: - expect_grad = np.nan_to_num(dense_x.grad.numpy(), 0., 0., 0.) + expect_grad = np.nan_to_num(dense_x.grad.numpy(), 0.0, 0.0, 0.0) else: expect_grad = (dense_x.grad * mask).numpy() - np.testing.assert_allclose(sp_x.grad.to_dense().numpy(), - expect_grad, - rtol=1e-05) + np.testing.assert_allclose( + sp_x.grad.to_dense().numpy(), expect_grad, rtol=1e-05 + ) def compare_with_dense(self, dense_func, sparse_func): self.check_result(dense_func, sparse_func, 'coo') @@ -84,8 +83,9 @@ class TestSparseUnary(unittest.TestCase): self.check_result(dense_func, sparse_func, 'coo', attr1) self.check_result(dense_func, sparse_func, 'csr', attr1) - def compare_with_dense_two_attr(self, dense_func, sparse_func, attr1, - attr2): + def compare_with_dense_two_attr( + self, dense_func, sparse_func, attr1, attr2 + ): self.check_result(dense_func, sparse_func, 'coo', attr1, attr2) self.check_result(dense_func, sparse_func, 'csr', attr1, attr2) @@ -129,8 +129,9 @@ class TestSparseUnary(unittest.TestCase): self.compare_with_dense(paddle.nn.ReLU6(), paddle.sparse.nn.ReLU6()) def test_sparse_leaky_relu(self): - self.compare_with_dense(paddle.nn.LeakyReLU(0.1), - paddle.sparse.nn.LeakyReLU(0.1)) + self.compare_with_dense( + paddle.nn.LeakyReLU(0.1), paddle.sparse.nn.LeakyReLU(0.1) + ) def test_sparse_abs(self): self.compare_with_dense(paddle.abs, paddle.sparse.abs) @@ -151,18 +152,22 @@ class TestSparseUnary(unittest.TestCase): self.compare_with_dense_one_attr(paddle.pow, paddle.sparse.pow, 3) def test_sparse_mul_scalar(self): - self.compare_with_dense_one_attr(paddle.Tensor.__mul__, - paddle.sparse.multiply, 3) + self.compare_with_dense_one_attr( + paddle.Tensor.__mul__, paddle.sparse.multiply, 3 + ) def test_sparse_div_scalar(self): - self.compare_with_dense_one_attr(paddle.Tensor.__div__, - paddle.sparse.divide, 2) + self.compare_with_dense_one_attr( + paddle.Tensor.__div__, paddle.sparse.divide, 2 + ) def test_sparse_cast(self): - self.compare_with_dense_two_attr(paddle.cast, paddle.sparse.cast, - 'int32', 'float32') - self.compare_with_dense_two_attr(paddle.cast, paddle.sparse.cast, - 'int32', 'float64') + self.compare_with_dense_two_attr( + paddle.cast, paddle.sparse.cast, 'int32', 'float32' + ) + self.compare_with_dense_two_attr( + paddle.cast, paddle.sparse.cast, 'int32', 'float64' + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_sparse_utils_op.py b/python/paddle/fluid/tests/unittests/test_sparse_utils_op.py index 700c0795a0d1ff0d27e50fd276cc01f4e8a77944..8659491da2e9ae33fb2b21fd04776c9ddceb6369 100644 --- a/python/paddle/fluid/tests/unittests/test_sparse_utils_op.py +++ b/python/paddle/fluid/tests/unittests/test_sparse_utils_op.py @@ -23,7 +23,6 @@ devices = ['cpu', 'gpu'] class TestSparseCreate(unittest.TestCase): - def test_create_coo_by_tensor(self): with _test_eager_guard(): indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]] @@ -31,10 +30,9 @@ class TestSparseCreate(unittest.TestCase): dense_shape = [3, 4] dense_indices = paddle.to_tensor(indices) dense_elements = paddle.to_tensor(values, dtype='float32') - coo = paddle.sparse.sparse_coo_tensor(dense_indices, - dense_elements, - dense_shape, - stop_gradient=False) + coo = paddle.sparse.sparse_coo_tensor( + dense_indices, dense_elements, dense_shape, stop_gradient=False + ) # test the to_string.py assert np.array_equal(indices, coo.indices().numpy()) assert np.array_equal(values, coo.values().numpy()) @@ -59,11 +57,13 @@ class TestSparseCreate(unittest.TestCase): dense_cols = paddle.to_tensor(cols) dense_elements = paddle.to_tensor(values, dtype='float32') stop_gradient = False - csr = paddle.sparse.sparse_csr_tensor(dense_crows, - dense_cols, - dense_elements, - dense_shape, - stop_gradient=stop_gradient) + csr = paddle.sparse.sparse_csr_tensor( + dense_crows, + dense_cols, + dense_elements, + dense_shape, + stop_gradient=stop_gradient, + ) def test_create_csr_by_np(self): with _test_eager_guard(): @@ -71,8 +71,9 @@ class TestSparseCreate(unittest.TestCase): cols = [1, 3, 2, 0, 1] values = [1, 2, 3, 4, 5] dense_shape = [3, 4] - csr = paddle.sparse.sparse_csr_tensor(crows, cols, values, - dense_shape) + csr = paddle.sparse.sparse_csr_tensor( + crows, cols, values, dense_shape + ) # test the to_string.py assert np.array_equal(5, csr.nnz()) assert np.array_equal(crows, csr.crows().numpy()) @@ -85,10 +86,9 @@ class TestSparseCreate(unittest.TestCase): indices = [[0, 1], [0, 1]] values = [1.0, 2.0] dense_shape = [2, 2] - coo = paddle.sparse.sparse_coo_tensor(indices, - values, - dense_shape, - place=place) + coo = paddle.sparse.sparse_coo_tensor( + indices, values, dense_shape, place=place + ) assert coo.place.is_cpu_place() assert coo.values().place.is_cpu_place() assert coo.indices().place.is_cpu_place() @@ -96,10 +96,9 @@ class TestSparseCreate(unittest.TestCase): crows = [0, 2, 3, 5] cols = [1, 3, 2, 0, 1] values = [1.0, 2.0, 3.0, 4.0, 5.0] - csr = paddle.sparse.sparse_csr_tensor(crows, - cols, - values, [3, 5], - place=place) + csr = paddle.sparse.sparse_csr_tensor( + crows, cols, values, [3, 5], place=place + ) assert csr.place.is_cpu_place() assert csr.crows().place.is_cpu_place() assert csr.cols().place.is_cpu_place() @@ -112,19 +111,17 @@ class TestSparseCreate(unittest.TestCase): dense_shape = [2, 2] indices = paddle.to_tensor(indices, dtype='int32') values = paddle.to_tensor(values, dtype='float32') - coo = paddle.sparse.sparse_coo_tensor(indices, - values, - dense_shape, - dtype='float64') + coo = paddle.sparse.sparse_coo_tensor( + indices, values, dense_shape, dtype='float64' + ) assert coo.dtype == paddle.float64 crows = [0, 2, 3, 5] cols = [1, 3, 2, 0, 1] values = [1.0, 2.0, 3.0, 4.0, 5.0] - csr = paddle.sparse.sparse_csr_tensor(crows, - cols, - values, [3, 5], - dtype='float16') + csr = paddle.sparse.sparse_csr_tensor( + crows, cols, values, [3, 5], dtype='float16' + ) assert csr.dtype == paddle.float16 def test_create_coo_no_shape(self): @@ -138,7 +135,6 @@ class TestSparseCreate(unittest.TestCase): class TestSparseConvert(unittest.TestCase): - def test_to_sparse_coo(self): with _test_eager_guard(): x = [[0, 1, 0, 2], [0, 0, 3, 0], [4, 5, 0, 0]] @@ -148,17 +144,19 @@ class TestSparseConvert(unittest.TestCase): out = dense_x.to_sparse_coo(2) assert np.array_equal(out.indices().numpy(), indices) assert np.array_equal(out.values().numpy(), values) - #test to_sparse_coo_grad backward + # test to_sparse_coo_grad backward out_grad_indices = [[0, 1], [0, 1]] out_grad_values = [2.0, 3.0] out_grad = paddle.sparse.sparse_coo_tensor( paddle.to_tensor(out_grad_indices), paddle.to_tensor(out_grad_values), shape=out.shape, - stop_gradient=True) + stop_gradient=True, + ) out.backward(out_grad) - assert np.array_equal(dense_x.grad.numpy(), - out_grad.to_dense().numpy()) + assert np.array_equal( + dense_x.grad.numpy(), out_grad.to_dense().numpy() + ) def test_coo_to_dense(self): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) @@ -171,27 +169,34 @@ class TestSparseConvert(unittest.TestCase): paddle.to_tensor(indices, dtype=indices_dtype), paddle.to_tensor(values), shape=[3, 4], - stop_gradient=False) + stop_gradient=False, + ) dense_tensor = sparse_x.to_dense() - #test to_dense_grad backward - out_grad = [[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], - [9.0, 10.0, 11.0, 12.0]] + # test to_dense_grad backward + out_grad = [ + [1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [9.0, 10.0, 11.0, 12.0], + ] dense_tensor.backward(paddle.to_tensor(out_grad)) - #mask the out_grad by sparse_x.indices() + # mask the out_grad by sparse_x.indices() correct_x_grad = [2.0, 4.0, 7.0, 9.0, 10.0] - assert np.array_equal(correct_x_grad, - sparse_x.grad.values().numpy()) + assert np.array_equal( + correct_x_grad, sparse_x.grad.values().numpy() + ) paddle.device.set_device("cpu") sparse_x_cpu = paddle.sparse.sparse_coo_tensor( paddle.to_tensor(indices, dtype=indices_dtype), paddle.to_tensor(values), shape=[3, 4], - stop_gradient=False) + stop_gradient=False, + ) dense_tensor_cpu = sparse_x_cpu.to_dense() dense_tensor_cpu.backward(paddle.to_tensor(out_grad)) - assert np.array_equal(correct_x_grad, - sparse_x_cpu.grad.values().numpy()) + assert np.array_equal( + correct_x_grad, sparse_x_cpu.grad.values().numpy() + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) def test_to_sparse_csr(self): @@ -218,23 +223,35 @@ class TestSparseConvert(unittest.TestCase): paddle.to_tensor(indices), paddle.to_tensor(values), shape=[3, 4], - stop_gradient=False) + stop_gradient=False, + ) values_tensor = sparse_x.values() out_grad = [2.0, 3.0, 5.0, 8.0, 9.0] # test coo_values_grad values_tensor.backward(paddle.to_tensor(out_grad)) assert np.array_equal(out_grad, sparse_x.grad.values().numpy()) indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]] - values = [[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0], - [5.0, 5.0]] + values = [ + [1.0, 1.0], + [2.0, 2.0], + [3.0, 3.0], + [4.0, 4.0], + [5.0, 5.0], + ] sparse_x = paddle.sparse.sparse_coo_tensor( paddle.to_tensor(indices), paddle.to_tensor(values), shape=[3, 4, 2], - stop_gradient=False) + stop_gradient=False, + ) values_tensor = sparse_x.values() - out_grad = [[2.0, 2.0], [3.0, 3.0], [5.0, 5.0], [8.0, 8.0], - [9.0, 9.0]] + out_grad = [ + [2.0, 2.0], + [3.0, 3.0], + [5.0, 5.0], + [8.0, 8.0], + [9.0, 9.0], + ] # test coo_values_grad values_tensor.backward(paddle.to_tensor(out_grad)) assert np.array_equal(out_grad, sparse_x.grad.values().numpy()) @@ -243,51 +260,59 @@ class TestSparseConvert(unittest.TestCase): def test_sparse_coo_tensor_grad(self): with _test_eager_guard(): for device in devices: - if device == 'cpu' or (device == 'gpu' - and paddle.is_compiled_with_cuda()): + if device == 'cpu' or ( + device == 'gpu' and paddle.is_compiled_with_cuda() + ): paddle.device.set_device(device) indices = [[0, 1], [0, 1]] values = [1, 2] indices = paddle.to_tensor(indices, dtype='int32') - values = paddle.to_tensor(values, - dtype='float32', - stop_gradient=False) + values = paddle.to_tensor( + values, dtype='float32', stop_gradient=False + ) sparse_x = paddle.sparse.sparse_coo_tensor( - indices, values, shape=[2, 2], stop_gradient=False) + indices, values, shape=[2, 2], stop_gradient=False + ) grad_indices = [[0, 1], [1, 1]] grad_values = [2, 3] grad_indices = paddle.to_tensor(grad_indices, dtype='int32') grad_values = paddle.to_tensor(grad_values, dtype='float32') sparse_out_grad = paddle.sparse.sparse_coo_tensor( - grad_indices, grad_values, shape=[2, 2]) + grad_indices, grad_values, shape=[2, 2] + ) sparse_x.backward(sparse_out_grad) correct_values_grad = [0, 3] - assert np.array_equal(correct_values_grad, - values.grad.numpy()) + assert np.array_equal( + correct_values_grad, values.grad.numpy() + ) # test the non-zero values is a vector values = [[1, 1], [2, 2]] - values = paddle.to_tensor(values, - dtype='float32', - stop_gradient=False) + values = paddle.to_tensor( + values, dtype='float32', stop_gradient=False + ) sparse_x = paddle.sparse.sparse_coo_tensor( - indices, values, shape=[2, 2, 2], stop_gradient=False) + indices, values, shape=[2, 2, 2], stop_gradient=False + ) grad_values = [[2, 2], [3, 3]] grad_values = paddle.to_tensor(grad_values, dtype='float32') sparse_out_grad = paddle.sparse.sparse_coo_tensor( - grad_indices, grad_values, shape=[2, 2, 2]) + grad_indices, grad_values, shape=[2, 2, 2] + ) sparse_x.backward(sparse_out_grad) correct_values_grad = [[0, 0], [3, 3]] - assert np.array_equal(correct_values_grad, - values.grad.numpy()) + assert np.array_equal( + correct_values_grad, values.grad.numpy() + ) def test_sparse_coo_tensor_sorted(self): with _test_eager_guard(): for device in devices: - if device == 'cpu' or (device == 'gpu' - and paddle.is_compiled_with_cuda()): + if device == 'cpu' or ( + device == 'gpu' and paddle.is_compiled_with_cuda() + ): paddle.device.set_device(device) - #test unsorted and duplicate indices + # test unsorted and duplicate indices indices = [[1, 0, 0], [0, 1, 1]] values = [1.0, 2.0, 3.0] indices = paddle.to_tensor(indices, dtype='int32') @@ -296,10 +321,12 @@ class TestSparseConvert(unittest.TestCase): sparse_x = paddle.sparse.coalesce(sparse_x) indices_sorted = [[0, 1], [1, 0]] values_sorted = [5.0, 1.0] - assert np.array_equal(indices_sorted, - sparse_x.indices().numpy()) - assert np.array_equal(values_sorted, - sparse_x.values().numpy()) + assert np.array_equal( + indices_sorted, sparse_x.indices().numpy() + ) + assert np.array_equal( + values_sorted, sparse_x.values().numpy() + ) # test the non-zero values is a vector values = [[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]] @@ -307,10 +334,12 @@ class TestSparseConvert(unittest.TestCase): sparse_x = paddle.sparse.sparse_coo_tensor(indices, values) sparse_x = paddle.sparse.coalesce(sparse_x) values_sorted = [[5.0, 5.0], [1.0, 1.0]] - assert np.array_equal(indices_sorted, - sparse_x.indices().numpy()) - assert np.array_equal(values_sorted, - sparse_x.values().numpy()) + assert np.array_equal( + indices_sorted, sparse_x.indices().numpy() + ) + assert np.array_equal( + values_sorted, sparse_x.values().numpy() + ) def test_batch_csr(self): with _test_eager_guard(): @@ -326,7 +355,7 @@ class TestSparseConvert(unittest.TestCase): dense_x = paddle.nn.functional.dropout(dense_x, p=0.5) verify(dense_x) - #test batchs=1 + # test batchs=1 shape[0] = 1 dense_x = paddle.randn(shape) dense_x = paddle.nn.functional.dropout(dense_x, p=0.5) @@ -335,23 +364,22 @@ class TestSparseConvert(unittest.TestCase): shape = np.random.randint(low=3, high=10, size=3) shape = list(shape) dense_x = paddle.randn(shape) - #set the 0th batch to zero + # set the 0th batch to zero dense_x[0] = 0 verify(dense_x) dense_x = paddle.randn(shape) - #set the 1th batch to zero + # set the 1th batch to zero dense_x[1] = 0 verify(dense_x) dense_x = paddle.randn(shape) - #set the 2th batch to zero + # set the 2th batch to zero dense_x[2] = 0 verify(dense_x) class TestCooError(unittest.TestCase): - def test_small_shape(self): with _test_eager_guard(): with self.assertRaises(ValueError): @@ -359,9 +387,9 @@ class TestCooError(unittest.TestCase): values = [1, 2] # 1. the shape too small dense_shape = [2, 2] - sparse_x = paddle.sparse.sparse_coo_tensor(indices, - values, - shape=dense_shape) + sparse_x = paddle.sparse.sparse_coo_tensor( + indices, values, shape=dense_shape + ) def test_same_nnz(self): with _test_eager_guard(): @@ -377,9 +405,9 @@ class TestCooError(unittest.TestCase): indices = [[1, 2], [1, 0]] values = [1, 2, 3] shape = [2, 3, 4] - sparse_x = paddle.sparse.sparse_coo_tensor(indices, - values, - shape=shape) + sparse_x = paddle.sparse.sparse_coo_tensor( + indices, values, shape=shape + ) def test_indices_dtype(self): with _test_eager_guard(): @@ -390,7 +418,6 @@ class TestCooError(unittest.TestCase): class TestCsrError(unittest.TestCase): - def test_dimension1(self): with _test_eager_guard(): with self.assertRaises(ValueError): @@ -399,7 +426,8 @@ class TestCsrError(unittest.TestCase): values = [1, 2, 3] shape = [3] sparse_x = paddle.sparse.sparse_csr_tensor( - crows, cols, values, shape) + crows, cols, values, shape + ) def test_dimension2(self): with _test_eager_guard(): @@ -409,7 +437,8 @@ class TestCsrError(unittest.TestCase): values = [1, 2, 3] shape = [3, 3, 3, 3] sparse_x = paddle.sparse.sparse_csr_tensor( - crows, cols, values, shape) + crows, cols, values, shape + ) def test_same_shape1(self): with _test_eager_guard(): @@ -419,7 +448,8 @@ class TestCsrError(unittest.TestCase): values = [1, 2, 3] shape = [3, 4] sparse_x = paddle.sparse.sparse_csr_tensor( - crows, cols, values, shape) + crows, cols, values, shape + ) def test_same_shape2(self): with _test_eager_guard(): @@ -429,7 +459,8 @@ class TestCsrError(unittest.TestCase): values = [1, 2, 3, 4] shape = [3, 4] sparse_x = paddle.sparse.sparse_csr_tensor( - crows, cols, values, shape) + crows, cols, values, shape + ) def test_same_shape3(self): with _test_eager_guard(): @@ -439,7 +470,8 @@ class TestCsrError(unittest.TestCase): values = [1, 2, 3, 4, 0, 1, 2] shape = [2, 3, 4] sparse_x = paddle.sparse.sparse_csr_tensor( - crows, cols, values, shape) + crows, cols, values, shape + ) def test_crows_first_value(self): with _test_eager_guard(): @@ -449,7 +481,8 @@ class TestCsrError(unittest.TestCase): values = [1, 2, 3] shape = [3, 4] sparse_x = paddle.sparse.sparse_csr_tensor( - crows, cols, values, shape) + crows, cols, values, shape + ) def test_dtype(self): with _test_eager_guard(): @@ -459,7 +492,8 @@ class TestCsrError(unittest.TestCase): values = [1, 2, 3] shape = [3] sparse_x = paddle.sparse.sparse_csr_tensor( - crows, cols, values, shape) + crows, cols, values, shape + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_spawn_and_init_parallel_env.py b/python/paddle/fluid/tests/unittests/test_spawn_and_init_parallel_env.py index 1b230df912879726fa2c6358a2da5679158605f7..e05b1cdaaf378f4197e18e6fcf8e090d007bd6d6 100644 --- a/python/paddle/fluid/tests/unittests/test_spawn_and_init_parallel_env.py +++ b/python/paddle/fluid/tests/unittests/test_spawn_and_init_parallel_env.py @@ -17,7 +17,11 @@ import unittest import paddle import paddle.distributed as dist -from paddle.distributed.spawn import _get_subprocess_env_list, _options_valid_check, _get_default_nprocs +from paddle.distributed.spawn import ( + _get_subprocess_env_list, + _options_valid_check, + _get_default_nprocs, +) from paddle.fluid import core from paddle.fluid.dygraph import parallel_helper @@ -28,10 +32,10 @@ import multiprocessing # executed in the python3 sub-process. -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestInitParallelEnv(unittest.TestCase): - def test_check_env_failed(self): os.environ['FLAGS_selected_gpus'] = '0' os.environ['PADDLE_TRAINER_ID'] = '0' @@ -51,10 +55,10 @@ class TestInitParallelEnv(unittest.TestCase): self.assertFalse(parallel_helper._is_parallel_ctx_initialized()) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestSpawnAssistMethod(unittest.TestCase): - def test_nprocs_greater_than_device_num_error(self): with self.assertRaises(RuntimeError): _get_subprocess_env_list(nprocs=100, options=dict()) diff --git a/python/paddle/fluid/tests/unittests/test_spectral_norm_op.py b/python/paddle/fluid/tests/unittests/test_spectral_norm_op.py index c310819bf185163c69c4d51742c023ad968ccbc7..5cff9628d8dd031b48394edc3a3ab1dd69a91c14 100644 --- a/python/paddle/fluid/tests/unittests/test_spectral_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_spectral_norm_op.py @@ -47,15 +47,15 @@ def spectral_norm(weight, u, v, dim, power_iters, eps): @skip_check_grad_ci( reason="Spectral norm do not check grad when power_iters > 0 " "because grad is not calculated in power iterations, " - "which cannot be checked by python grad unittests") + "which cannot be checked by python grad unittests" +) class TestSpectralNormOpNoGrad(OpTest): - def setUp(self): self.initTestCase() self.op_type = 'spectral_norm' weight = np.random.random(self.weight_shape).astype('float64') - u = np.random.normal(0., 1., self.u_shape).astype('float64') - v = np.random.normal(0., 1., self.v_shape).astype('float64') + u = np.random.normal(0.0, 1.0, self.u_shape).astype('float64') + v = np.random.normal(0.0, 1.0, self.v_shape).astype('float64') self.attrs = { "dim": self.dim, @@ -69,8 +69,9 @@ class TestSpectralNormOpNoGrad(OpTest): "V": v, } - output = spectral_norm(weight, u, v, self.dim, self.power_iters, - self.eps) + output = spectral_norm( + weight, u, v, self.dim, self.power_iters, self.eps + ) self.outputs = {"Out": output} def test_check_output(self): @@ -78,8 +79,8 @@ class TestSpectralNormOpNoGrad(OpTest): def initTestCase(self): self.weight_shape = (10, 12) - self.u_shape = (10, ) - self.v_shape = (12, ) + self.u_shape = (10,) + self.v_shape = (12,) self.dim = 0 self.power_iters = 5 self.eps = 1e-12 @@ -88,20 +89,19 @@ class TestSpectralNormOpNoGrad(OpTest): @skip_check_grad_ci( reason="Spectral norm do not check grad when power_iters > 0 " "because grad is not calculated in power iterations, " - "which cannot be checked by python grad unittests") + "which cannot be checked by python grad unittests" +) class TestSpectralNormOpNoGrad2(TestSpectralNormOpNoGrad): - def initTestCase(self): self.weight_shape = (2, 3, 3, 3) - self.u_shape = (3, ) - self.v_shape = (18, ) + self.u_shape = (3,) + self.v_shape = (18,) self.dim = 1 self.power_iters = 10 self.eps = 1e-12 class TestSpectralNormOp(TestSpectralNormOpNoGrad): - def test_check_grad_ignore_uv(self): self.check_grad( ['Weight'], @@ -111,26 +111,24 @@ class TestSpectralNormOp(TestSpectralNormOpNoGrad): def initTestCase(self): self.weight_shape = (10, 12) - self.u_shape = (10, ) - self.v_shape = (12, ) + self.u_shape = (10,) + self.v_shape = (12,) self.dim = 0 self.power_iters = 0 self.eps = 1e-12 class TestSpectralNormOp2(TestSpectralNormOp): - def initTestCase(self): self.weight_shape = (2, 6, 3, 3) - self.u_shape = (6, ) - self.v_shape = (18, ) + self.u_shape = (6,) + self.v_shape = (18,) self.dim = 1 self.power_iters = 0 self.eps = 1e-12 class TestSpectralNormOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): @@ -150,13 +148,12 @@ class TestSpectralNormOpError(unittest.TestCase): class TestDygraphSpectralNormOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): shape = (2, 4, 3, 3) - spectralNorm = fluid.dygraph.nn.SpectralNorm(shape, - dim=1, - power_iters=2) + spectralNorm = fluid.dygraph.nn.SpectralNorm( + shape, dim=1, power_iters=2 + ) def test_Variable(): weight_1 = np.random.random((2, 4)).astype("float32") diff --git a/python/paddle/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py b/python/paddle/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py index 0afd3ec56bb26c992cf059444035990399521b39..fefa11be9aa586ea8c38e7f47100b671c0faf243 100644 --- a/python/paddle/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py +++ b/python/paddle/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py @@ -27,7 +27,6 @@ from paddle.fluid.layer_helper import LayerHelper class TestCPULoDTensorArrayOps(unittest.TestCase): - def place(self): return core.CPUPlace() @@ -52,11 +51,13 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): expect_false = core.LoDTensor() expect_false.set(expect_false_tensor, self.place()) - self.main(tensor=tensor, - mask=mask, - expect_true=expect_true, - expect_false=expect_false, - expect_out=tensor) + self.main( + tensor=tensor, + mask=mask, + expect_true=expect_true, + expect_false=expect_false, + expect_out=tensor, + ) def split_and_merge_lod_tensor_level_0(self, use_merge_lod_infer=False): tensor = core.LoDTensor() @@ -83,12 +84,14 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): expect_false.set(expect_false_tensor, self.place()) expect_false.set_recursive_sequence_lengths(expect_false_lod) - self.main(tensor=tensor, - mask=mask, - expect_true=expect_true, - expect_false=expect_false, - expect_out=tensor, - use_merge_lod_infer=use_merge_lod_infer) + self.main( + tensor=tensor, + mask=mask, + expect_true=expect_true, + expect_false=expect_false, + expect_out=tensor, + use_merge_lod_infer=use_merge_lod_infer, + ) def test_split_and_merge_lod_tensor_1(self): self.split_and_merge_lod_tensor_level_0() @@ -96,14 +99,16 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): def test_split_and_merge_lod_tensor_2(self): self.split_and_merge_lod_tensor_level_0(True) - def main(self, - tensor, - mask, - expect_true, - expect_false, - expect_out, - level=0, - use_merge_lod_infer=False): + def main( + self, + tensor, + mask, + expect_true, + expect_false, + expect_out, + level=0, + use_merge_lod_infer=False, + ): place = self.place() program = Program() with program_guard(program): @@ -122,38 +127,42 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): 'Mask': mask, 'InTrue': out_true, 'InFalse': out_false, - 'level': level + 'level': level, } helper = LayerHelper('merge_lod_tensor_infer') out = helper.create_variable_for_type_inference( - dtype=out_true.dtype) - helper.append_op(type='merge_lod_tensor_infer', - inputs={ - 'X': x, - 'Mask': y, - 'InTrue': out_true, - 'InFalse': out_false - }, - outputs={'Out': out}, - attrs={'level': level}) + dtype=out_true.dtype + ) + helper.append_op( + type='merge_lod_tensor_infer', + inputs={ + 'X': x, + 'Mask': y, + 'InTrue': out_true, + 'InFalse': out_false, + }, + outputs={'Out': out}, + attrs={'level': level}, + ) out.persistable = True else: - out = merge_lod_tensor(in_true=out_true, - in_false=out_false, - mask=y, - x=x, - level=level) + out = merge_lod_tensor( + in_true=out_true, + in_false=out_false, + mask=y, + x=x, + level=level, + ) out.persistable = True exe = Executor(place) scope = core.Scope() - exe.run(program, - feed={ - 'x': tensor, - 'y': mask - }, - scope=scope, - return_numpy=False) + exe.run( + program, + feed={'x': tensor, 'y': mask}, + scope=scope, + return_numpy=False, + ) var_true = scope.find_var(out_true.name).get_tensor() @@ -166,36 +175,33 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): self.check_tensor_same(var_out, expect_out) def check_tensor_same(self, actual, expect): - np.testing.assert_allclose(np.array(actual), - np.array(expect), - rtol=1e-05) - self.assertEqual(actual.recursive_sequence_lengths(), - expect.recursive_sequence_lengths()) + np.testing.assert_allclose( + np.array(actual), np.array(expect), rtol=1e-05 + ) + self.assertEqual( + actual.recursive_sequence_lengths(), + expect.recursive_sequence_lengths(), + ) class TestCPUSplitMergeLoDTensorGrad(unittest.TestCase): - def test_grad(self): place = core.CPUPlace() program = Program() with program_guard(program): - x = layers.data(name='x', - shape=[1], - dtype='float32', - stop_gradient=False) - y = layers.data(name='y', - shape=[1], - dtype='bool', - stop_gradient=False) + x = layers.data( + name='x', shape=[1], dtype='float32', stop_gradient=False + ) + y = layers.data( + name='y', shape=[1], dtype='bool', stop_gradient=False + ) level = 0 out_true, out_false = split_lod_tensor(input=x, mask=y, level=level) - out = merge_lod_tensor(in_true=out_true, - in_false=out_false, - mask=y, - x=x, - level=level) + out = merge_lod_tensor( + in_true=out_true, in_false=out_false, mask=y, x=x, level=level + ) mean = paddle.mean(out) append_backward(mean) @@ -215,16 +221,17 @@ class TestCPUSplitMergeLoDTensorGrad(unittest.TestCase): g_vars = program.global_block().var(x.name + "@GRAD") g_out = [ - item.sum() for item in map( + item.sum() + for item in map( np.array, - exe.run(program, - feed={ - 'x': tensor, - 'y': mask - }, - fetch_list=[g_vars], - scope=scope, - return_numpy=False)) + exe.run( + program, + feed={'x': tensor, 'y': mask}, + fetch_list=[g_vars], + scope=scope, + return_numpy=False, + ), + ) ] g_out_sum = np.array(g_out).sum() @@ -233,78 +240,78 @@ class TestCPUSplitMergeLoDTensorGrad(unittest.TestCase): class TestMergeLodTensorOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): - input_data = layers.data(name='x', - shape=[1], - dtype='float32', - stop_gradient=False) - y = layers.data(name='y', - shape=[1], - dtype='bool', - stop_gradient=False) - x_true = layers.data(name='x_true', - shape=[1], - dtype='float32', - stop_gradient=False) - x_false = layers.data(name='x_false', - shape=[1], - dtype='float32', - stop_gradient=False) + input_data = layers.data( + name='x', shape=[1], dtype='float32', stop_gradient=False + ) + y = layers.data( + name='y', shape=[1], dtype='bool', stop_gradient=False + ) + x_true = layers.data( + name='x_true', shape=[1], dtype='float32', stop_gradient=False + ) + x_false = layers.data( + name='x_false', shape=[1], dtype='float32', stop_gradient=False + ) level = 0 def test_x(): - out = merge_lod_tensor(int_true=x_true, - in_false=x_false, - x=set(), - mask=y, - level=level) + out = merge_lod_tensor( + int_true=x_true, + in_false=x_false, + x=set(), + mask=y, + level=level, + ) self.assertRaises(TypeError, test_x) def test_mask(): - out = merge_lod_tensor(int_true=x_true, - in_false=x_false, - x=input_data, - mask=set(), - level=level) + out = merge_lod_tensor( + int_true=x_true, + in_false=x_false, + x=input_data, + mask=set(), + level=level, + ) self.assertRaises(TypeError, test_mask) def test_xtrue(): - out = merge_lod_tensor(int_true=set(), - in_false=x_false, - x=input_data, - mask=y, - level=level) + out = merge_lod_tensor( + int_true=set(), + in_false=x_false, + x=input_data, + mask=y, + level=level, + ) self.assertRaises(TypeError, test_xtrue) def test_xfalse(): - out = merge_lod_tensor(int_true=x_true, - in_false=set(), - x=input_data, - mask=y, - level=level) + out = merge_lod_tensor( + int_true=x_true, + in_false=set(), + x=input_data, + mask=y, + level=level, + ) self.assertRaises(TypeError, test_xfalse) class TestSplitLodTensorWithError(unittest.TestCase): - def test_error(self): main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): - x = layers.data(name='x', - shape=[1], - dtype='float32', - stop_gradient=False) - y = layers.data(name='y', - shape=[1], - dtype='bool', - stop_gradient=False) + x = layers.data( + name='x', shape=[1], dtype='float32', stop_gradient=False + ) + y = layers.data( + name='y', shape=[1], dtype='bool', stop_gradient=False + ) level = 0 with self.assertRaises(TypeError): diff --git a/python/paddle/fluid/tests/unittests/test_split_op.py b/python/paddle/fluid/tests/unittests/test_split_op.py index 14ba9eb45a0656abb633bb6f3e0731e05dd27b3f..e087cc8b1bb84cc87ef872c8c161fdcd2a07a441 100644 --- a/python/paddle/fluid/tests/unittests/test_split_op.py +++ b/python/paddle/fluid/tests/unittests/test_split_op.py @@ -22,7 +22,6 @@ from paddle.fluid.framework import _test_eager_guard class TestSplitOp(OpTest): - def setUp(self): self._set_op_type() self.dtype = self.get_dtype() @@ -31,14 +30,19 @@ class TestSplitOp(OpTest): x = np.random.random((4, 5, 6)).astype(np.float32) out = np.split(x, [2, 3], axis) self.inputs = {'X': convert_float_to_uint16(x)} - self.outputs = {'Out': [('out%d' % i, convert_float_to_uint16(out[i])) \ - for i in range(len(out))]} + self.outputs = { + 'Out': [ + ('out%d' % i, convert_float_to_uint16(out[i])) + for i in range(len(out)) + ] + } else: x = np.random.random((4, 5, 6)).astype(self.dtype) out = np.split(x, [2, 3], axis) self.inputs = {'X': x} - self.outputs = {'Out': [('out%d' % i, out[i]) \ - for i in range(len(out))]} + self.outputs = { + 'Out': [('out%d' % i, out[i]) for i in range(len(out))] + } self.attrs = {'axis': axis, 'sections': [2, 1, 2]} def get_dtype(self): @@ -56,7 +60,6 @@ class TestSplitOp(OpTest): # test with attr(num) class TestSplitOp_2(OpTest): - def setUp(self): self._set_op_type() self.dtype = self.get_dtype() @@ -65,12 +68,11 @@ class TestSplitOp_2(OpTest): self.attrs = { 'axis': self.axis, 'sections': self.sections, - 'num': self.num + 'num': self.num, } out = np.split(self.x, self.indices_or_sections, self.axis) - self.outputs = {'Out': [('out%d' % i, out[i]) \ - for i in range(len(out))]} + self.outputs = {'Out': [('out%d' % i, out[i]) for i in range(len(out))]} def init_data(self): self.x = np.random.random((4, 5, 6)).astype(self.dtype) @@ -94,20 +96,18 @@ class TestSplitOp_2(OpTest): # attr(axis) is Tensor class TestSplitOp_AxisTensor(OpTest): - def setUp(self): self._set_op_type() self.dtype = self.get_dtype() self.init_data() self.inputs = { 'X': self.x, - 'AxisTensor': np.array([self.axis]).astype("int32") + 'AxisTensor': np.array([self.axis]).astype("int32"), } self.attrs = {'sections': self.sections, 'num': self.num} out = np.split(self.x, self.indices_or_sections, self.axis) - self.outputs = {'Out': [('out%d' % i, out[i]) \ - for i in range(len(out))]} + self.outputs = {'Out': [('out%d' % i, out[i]) for i in range(len(out))]} def init_data(self): self.x = np.random.random((4, 5, 6)).astype(self.dtype) @@ -131,7 +131,6 @@ class TestSplitOp_AxisTensor(OpTest): # attr(sections) is list containing Tensor class TestSplitOp_SectionsTensor(OpTest): - def setUp(self): self._set_op_type() self.dtype = self.get_dtype() @@ -140,20 +139,20 @@ class TestSplitOp_SectionsTensor(OpTest): sections_tensor = [] for index, ele in enumerate(self.sections): - sections_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + sections_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs['SectionsTensorList'] = sections_tensor self.attrs = { 'axis': self.axis, 'sections': self.sections_infer, - 'num': self.num + 'num': self.num, } out = np.split(self.x, self.indices_or_sections, self.axis) - self.outputs = {'Out': [('out%d' % i, out[i]) \ - for i in range(len(out))]} + self.outputs = {'Out': [('out%d' % i, out[i]) for i in range(len(out))]} def init_data(self): self.x = np.random.random((4, 5, 6)).astype(self.dtype) @@ -177,7 +176,6 @@ class TestSplitOp_SectionsTensor(OpTest): class TestSplitOp_unk_section(OpTest): - def setUp(self): self._set_op_type() self.dtype = self.get_dtype() @@ -186,12 +184,11 @@ class TestSplitOp_unk_section(OpTest): self.attrs = { 'axis': self.axis, 'sections': self.sections, - 'num': self.num + 'num': self.num, } out = np.split(self.x, self.indices_or_sections, self.axis) - self.outputs = {'Out': [('out%d' % i, out[i]) \ - for i in range(len(out))]} + self.outputs = {'Out': [('out%d' % i, out[i]) for i in range(len(out))]} def init_data(self): self.x = np.random.random((4, 5, 6)).astype(self.dtype) @@ -214,20 +211,18 @@ class TestSplitOp_unk_section(OpTest): class TestSplitByrefOp(OpTest): - def _set_op_type(self): self.op_type = "split_byref" -#----------------Split Fp16---------------- +# ----------------Split Fp16---------------- def create_test_fp16(parent): - - @unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) class TestSplitFp16(parent): - def get_dtype(self): return np.float16 @@ -241,15 +236,14 @@ def create_test_fp16(parent): create_test_fp16(TestSplitOp) -#----------------Split Bf16---------------- +# ----------------Split Bf16---------------- def create_test_bf16(parent): - - @unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) class TestSplitBf16(parent): - def get_dtype(self): return np.uint16 @@ -269,7 +263,6 @@ create_test_bf16(TestSplitOp) class TestSplitAPI(unittest.TestCase): - def test_api(self): input_1 = np.random.random([4, 5, 6]).astype("int32") positive_1_int32 = fluid.layers.fill_constant([1], "int32", 1) @@ -281,21 +274,20 @@ class TestSplitAPI(unittest.TestCase): out_0, out_1, out_2 = fluid.layers.split( input=x_1, num_or_sections=[positive_2_int64, positive_1_int32, -1], - dim=positive_1_int64) + dim=positive_1_int64, + ) - out_3, out_4, out_5 = fluid.layers.split(input=x_1, - num_or_sections=[2, 1, 2], - dim=positive_1_int32) + out_3, out_4, out_5 = fluid.layers.split( + input=x_1, num_or_sections=[2, 1, 2], dim=positive_1_int32 + ) fluid.layers.split(input=x_2, num_or_sections=2, dim=2) exe = fluid.Executor(place=fluid.CPUPlace()) - [res_0, res_1, res_2, res_3, res_4, - res_5] = exe.run(fluid.default_main_program(), - feed={ - "x_1": input_1, - "x_2": input_1 - }, - fetch_list=[out_0, out_1, out_2, out_3, out_4, out_5]) + [res_0, res_1, res_2, res_3, res_4, res_5] = exe.run( + fluid.default_main_program(), + feed={"x_1": input_1, "x_2": input_1}, + fetch_list=[out_0, out_1, out_2, out_3, out_4, out_5], + ) out = np.split(input_1, [2, 3], 1) assert np.array_equal(res_0, out[0]) @@ -307,7 +299,6 @@ class TestSplitAPI(unittest.TestCase): class TestSplitOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # The type of axis in split_op should be int or Variable. @@ -346,7 +337,6 @@ class TestSplitOpError(unittest.TestCase): class API_TestSplit(unittest.TestCase): - def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): data1 = fluid.layers.data('data1', shape=[4, 6, 6], dtype='float64') @@ -356,11 +346,9 @@ class API_TestSplit(unittest.TestCase): exe = fluid.Executor(place) input1 = np.random.random([4, 6, 6]).astype('float64') input2 = np.array([2]).astype('int32') - r0, r1, r2, = exe.run(feed={ - "data1": input1, - "data2": input2 - }, - fetch_list=[x0, x1, x2]) + r0, r1, r2, = exe.run( + feed={"data1": input1, "data2": input2}, fetch_list=[x0, x1, x2] + ) ex_x0, ex_x1, ex_x2 = np.split(input1, 3, axis=2) np.testing.assert_allclose(ex_x0, r0, rtol=1e-05) np.testing.assert_allclose(ex_x1, r1, rtol=1e-05) @@ -368,7 +356,6 @@ class API_TestSplit(unittest.TestCase): class API_TestSplit2(unittest.TestCase): - def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): data1 = fluid.layers.data('data1', shape=[4, 6, 6], dtype='float64') @@ -376,8 +363,11 @@ class API_TestSplit2(unittest.TestCase): place = fluid.CPUPlace() exe = fluid.Executor(place) input1 = np.random.random([4, 6, 6]).astype('float64') - r0, r1, r2, = exe.run(feed={"data1": input1}, - fetch_list=[x0, x1, x2]) + ( + r0, + r1, + r2, + ) = exe.run(feed={"data1": input1}, fetch_list=[x0, x1, x2]) ex_x0, ex_x1, ex_x2 = np.split(input1, 3, axis=2) np.testing.assert_allclose(ex_x0, r0, rtol=1e-05) np.testing.assert_allclose(ex_x1, r1, rtol=1e-05) @@ -385,7 +375,6 @@ class API_TestSplit2(unittest.TestCase): class API_TestSplit3(unittest.TestCase): - def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): data = fluid.layers.data('data', shape=[-1, 10], dtype='float64') @@ -394,13 +383,12 @@ class API_TestSplit3(unittest.TestCase): exe = fluid.Executor(place) input1 = np.random.random([1, 10]).astype('float64') r0, r1 = exe.run(feed={"data": input1}, fetch_list=[x0, x1]) - ex_x0, ex_x1 = np.split(input1, (3, ), axis=1) + ex_x0, ex_x1 = np.split(input1, (3,), axis=1) np.testing.assert_allclose(ex_x0, r0, rtol=1e-05) np.testing.assert_allclose(ex_x1, r1, rtol=1e-05) class API_TestSplit4(unittest.TestCase): - def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): data = fluid.layers.data('data', shape=[-1, 10], dtype='float64') @@ -410,21 +398,19 @@ class API_TestSplit4(unittest.TestCase): exe = fluid.Executor(place) input1 = np.random.random([1, 10]).astype('float64') input2 = np.array([7]).astype('int32') - r0, r1 = exe.run(feed={ - "data": input1, - "index": input2 - }, - fetch_list=[x0, x1]) - ex_x0, ex_x1 = np.split(input1, (3, ), axis=1) + r0, r1 = exe.run( + feed={"data": input1, "index": input2}, fetch_list=[x0, x1] + ) + ex_x0, ex_x1 = np.split(input1, (3,), axis=1) np.testing.assert_allclose(ex_x0, r0, rtol=1e-05) np.testing.assert_allclose(ex_x1, r1, rtol=1e-05) class API_TestSplit5(unittest.TestCase): - def test_out(self): - for use_cuda in ([False, True] - if core.is_compiled_with_cuda() else [False]): + for use_cuda in ( + [False, True] if core.is_compiled_with_cuda() else [False] + ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() with fluid.program_guard(fluid.Program(), fluid.Program()): input_1 = np.random.random([5, 4]).astype("int32") @@ -441,7 +427,6 @@ class API_TestSplit5(unittest.TestCase): class API_TestSplit6(unittest.TestCase): - def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): data = fluid.layers.data('data', shape=[-1, 10], dtype='float64') @@ -450,13 +435,12 @@ class API_TestSplit6(unittest.TestCase): exe = fluid.Executor(place) input1 = np.random.random([2, 10]).astype('float64') r0, r1 = exe.run(feed={"data": input1}, fetch_list=[x0, x1]) - ex_x0, ex_x1 = np.split(input1, (1, ), axis=0) + ex_x0, ex_x1 = np.split(input1, (1,), axis=0) np.testing.assert_allclose(ex_x0, r0, rtol=1e-05) np.testing.assert_allclose(ex_x1, r1, rtol=1e-05) class API_TestDygraphFluidSplit(unittest.TestCase): - def test_out1(self): with fluid.dygraph.guard(): input_1 = np.random.random([4, 6, 6]).astype("int32") @@ -479,9 +463,9 @@ class API_TestDygraphFluidSplit(unittest.TestCase): loss.backward() manul_grad = np.zeros_like(input_1) manul_grad[:, :2, :] = 1 - np.testing.assert_allclose(input.gradient(), - manul_grad, - rtol=1e-05) + np.testing.assert_allclose( + input.gradient(), manul_grad, rtol=1e-05 + ) np.testing.assert_allclose(ex_x0, eager_x0_out, rtol=1e-05) np.testing.assert_allclose(ex_x1, eager_x1_out, rtol=1e-05) np.testing.assert_allclose(ex_x2, eager_x2_out, rtol=1e-05) @@ -512,9 +496,9 @@ class API_TestDygraphFluidSplit(unittest.TestCase): loss.backward() manul_grad = np.zeros_like(input_1) manul_grad[:, :2, :] = 1 - np.testing.assert_allclose(input.gradient(), - manul_grad, - rtol=1e-05) + np.testing.assert_allclose( + input.gradient(), manul_grad, rtol=1e-05 + ) np.testing.assert_allclose(ex_x0, eager_x0_out, rtol=1e-05) np.testing.assert_allclose(ex_x1, eager_x1_out, rtol=1e-05) np.testing.assert_allclose(ex_x2, eager_x2_out, rtol=1e-05) @@ -525,7 +509,6 @@ class API_TestDygraphFluidSplit(unittest.TestCase): class API_TestDygraphSplit(unittest.TestCase): - def test_out1(self): with fluid.dygraph.guard(): input_1 = np.random.random([4, 6, 6]).astype("int32") @@ -549,9 +532,9 @@ class API_TestDygraphSplit(unittest.TestCase): loss.backward() manul_grad = np.zeros_like(input_1) manul_grad[:, :2, :] = 1 - np.testing.assert_allclose(input.gradient(), - manul_grad, - rtol=1e-05) + np.testing.assert_allclose( + input.gradient(), manul_grad, rtol=1e-05 + ) np.testing.assert_allclose(ex_x0, eager_x0_out, rtol=1e-05) np.testing.assert_allclose(ex_x1, eager_x1_out, rtol=1e-05) np.testing.assert_allclose(ex_x2, eager_x2_out, rtol=1e-05) @@ -599,9 +582,9 @@ class API_TestDygraphSplit(unittest.TestCase): # input is a variable which shape is [4, 6, 6] input = paddle.to_tensor(input_1) num1 = paddle.full(shape=[1], fill_value=2, dtype='int32') - x0, x1, x2 = paddle.split(input, - num_or_sections=[num1, 2, 2], - axis=1) + x0, x1, x2 = paddle.split( + input, num_or_sections=[num1, 2, 2], axis=1 + ) x0_out = x0.numpy() x1_out = x1.numpy() x2_out = x2.numpy() @@ -616,9 +599,9 @@ class API_TestDygraphSplit(unittest.TestCase): # input is a variable which shape is [4, 6, 6] input = paddle.to_tensor(input_1) num1 = paddle.full(shape=[1], fill_value=1, dtype='int32') - x0, x1, x2 = paddle.split(input, - num_or_sections=[2, 2, 2], - axis=num1) + x0, x1, x2 = paddle.split( + input, num_or_sections=[2, 2, 2], axis=num1 + ) x0_out = x0.numpy() x1_out = x1.numpy() x2_out = x2.numpy() @@ -644,7 +627,6 @@ class API_TestDygraphSplit(unittest.TestCase): class API_TestEmptySplit(unittest.TestCase): - def test_axis_input_empty_section(self): with fluid.dygraph.guard(): input_1 = np.random.random([8, 6, 6]).astype("float32") @@ -654,10 +636,13 @@ class API_TestEmptySplit(unittest.TestCase): x0_out = x0.numpy() x1_out = x1.numpy() x2_out = x2.numpy() - ex_x0, ex_x1, ex_x2 = np.split(input_1, [ - 5, - 5, - ]) + ex_x0, ex_x1, ex_x2 = np.split( + input_1, + [ + 5, + 5, + ], + ) np.testing.assert_allclose(ex_x0, x0_out, rtol=1e-05) np.testing.assert_allclose(ex_x1, x1_out, rtol=1e-05) np.testing.assert_allclose(ex_x2, x2_out, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_split_program.py b/python/paddle/fluid/tests/unittests/test_split_program.py index 761c5ed5587fa7bff6aac2837176c5d848d024a0..bab36fd88ce4abd74bbb900a585d064dadd176e8 100644 --- a/python/paddle/fluid/tests/unittests/test_split_program.py +++ b/python/paddle/fluid/tests/unittests/test_split_program.py @@ -21,7 +21,6 @@ import numpy as np class TestSplitProgram(unittest.TestCase): - def setUp(self): paddle.enable_static() if paddle.is_compiled_with_cuda(): @@ -31,12 +30,12 @@ class TestSplitProgram(unittest.TestCase): main = paddle.static.Program() startup = paddle.static.Program() with paddle.static.program_guard(main, startup): - image = paddle.static.data(shape=[batch_size, 3, 224, 224], - dtype='float32', - name='image') - label = paddle.static.data(shape=[batch_size, 1], - dtype='int64', - name='label') + image = paddle.static.data( + shape=[batch_size, 3, 224, 224], dtype='float32', name='image' + ) + label = paddle.static.data( + shape=[batch_size, 1], dtype='int64', name='label' + ) model = resnet(pretrained=False) loss_fn = nn.loss.CrossEntropyLoss() @@ -65,10 +64,11 @@ class TestSplitProgram(unittest.TestCase): self.assertEqual(len(vars_actual), len(vars_expected)) for actual, expected in zip(vars_actual, vars_expected): self.assertEqual(actual.shape, expected.shape) - np.testing.assert_array_equal(actual, - expected, - err_msg='{}\n{}\n'.format( - actual, expected)) + np.testing.assert_array_equal( + actual, + expected, + err_msg='{}\n{}\n'.format(actual, expected), + ) def get_places(self): places = [paddle.CPUPlace()] @@ -93,27 +93,26 @@ class TestSplitProgram(unittest.TestCase): exe = paddle.static.Executor(place) image_np = np.random.random(size=image.shape).astype('float32') - label_np = np.random.randint(low=0, - high=1000, - dtype='int64', - size=label.shape) + label_np = np.random.randint( + low=0, high=1000, dtype='int64', size=label.shape + ) scope = paddle.static.Scope() if not use_split: with paddle.static.scope_guard(scope): exe.run(startup_prog) for _ in range(batch_num): - exe.run(main_prog, - feed={ - image.name: image_np, - label.name: label_np - }) + exe.run( + main_prog, + feed={image.name: image_np, label.name: label_np}, + ) return self.get_var_values(scope, startup_vars) op_num = len(main_prog.global_block().ops) split_op_indices = [int(op_num / 3.0), int(op_num * 3 / 4.0)] programs, input_vars, output_vars = split_program( - main_prog, split_op_indices) + main_prog, split_op_indices + ) op_nums = [0] + split_op_indices + [op_num] op_nums = [op_nums[i + 1] - op_nums[i] for i in range(len(op_nums) - 1)] num_split = len(split_op_indices) + 1 @@ -137,15 +136,19 @@ class TestSplitProgram(unittest.TestCase): if tmp_vars[in_name] is not None: feed_dict[in_name] = tmp_vars[in_name] - output_var_values = exe.run(program, - feed=feed_dict, - fetch_list=output_vars[i], - return_numpy=False) - for out_name, out_value in zip(output_vars[i], - output_var_values): + output_var_values = exe.run( + program, + feed=feed_dict, + fetch_list=output_vars[i], + return_numpy=False, + ) + for out_name, out_value in zip( + output_vars[i], output_var_values + ): if not out_value._is_initialized(): tmp_vars[out_name] = np.ndarray( - out_value._get_dims()).astype('float32') + out_value._get_dims() + ).astype('float32') else: tmp_vars[out_name] = np.array(out_value) diff --git a/python/paddle/fluid/tests/unittests/test_splits_api.py b/python/paddle/fluid/tests/unittests/test_splits_api.py index 3613c7be3b621f0275500d9afb06d21e39605f36..491fed74f775c44c84be7d0b0b72da7964484e9b 100644 --- a/python/paddle/fluid/tests/unittests/test_splits_api.py +++ b/python/paddle/fluid/tests/unittests/test_splits_api.py @@ -35,7 +35,6 @@ test_list = [ class TestSplitsAPI(unittest.TestCase): - def setUp(self): self.rtol = 1e-5 self.atol = 1e-8 @@ -45,8 +44,11 @@ class TestSplitsAPI(unittest.TestCase): self.shape = [4, 5, 2] self.num_or_sections = 2 self.x_np = np.random.uniform(-1, 1, self.shape).astype('float64') - self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_static_api(self): paddle.enable_static() @@ -67,68 +69,79 @@ class TestSplitsAPI(unittest.TestCase): out = func(x, self.num_or_sections) out_ref = func_ref(func_type, self.x_np, self.num_or_sections) for n, p in zip(out_ref, out): - np.testing.assert_allclose(n, - p.numpy(), - rtol=self.rtol, - atol=self.atol) + np.testing.assert_allclose( + n, p.numpy(), rtol=self.rtol, atol=self.atol + ) paddle.enable_static() class TestSplitsSections(TestSplitsAPI): """ - Test num_or_sections which is a list and date type is float64. + Test num_or_sections which is a list and date type is float64. """ def set_input(self): self.shape = [6, 2, 4] self.num_or_sections = [2, 1, 3] self.x_np = np.random.uniform(-1, 1, self.shape).astype('float64') - self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() else paddle.CPUPlace() + ) class TestSplitsFloat32(TestSplitsAPI): """ - Test num_or_sections which is an integer and data type is float32. + Test num_or_sections which is an integer and data type is float32. """ def set_input(self): self.shape = [2, 3, 4] self.num_or_sections = 2 self.x_np = np.random.uniform(-1, 1, self.shape).astype('float32') - self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() else paddle.CPUPlace() + ) class TestSplitsInt32(TestSplitsAPI): """ - Test data type int32. + Test data type int32. """ def set_input(self): self.shape = [5, 1, 2] self.num_or_sections = 5 self.x_np = np.random.uniform(-1, 1, self.shape).astype('int32') - self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() else paddle.CPUPlace() + ) class TestSplitsInt64(TestSplitsAPI): """ - Test data type int64. + Test data type int64. """ def set_input(self): self.shape = [4, 3, 2] self.num_or_sections = 2 self.x_np = np.random.uniform(-1, 1, self.shape).astype('int64') - self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() else paddle.CPUPlace() + ) class TestSplitsCPU(TestSplitsAPI): """ - Test cpu place and num_or_sections which is a tuple. + Test cpu place and num_or_sections which is a tuple. """ def set_input(self): @@ -140,13 +153,16 @@ class TestSplitsCPU(TestSplitsAPI): class TestSplitsError(unittest.TestCase): """ - Test the situation that input shape less than 2. + Test the situation that input shape less than 2. """ def setUp(self): self.num_or_sections = 1 - self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if core.is_compiled_with_cuda() else paddle.CPUPlace() + ) def test_static_error(self): paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_spp_op.py b/python/paddle/fluid/tests/unittests/test_spp_op.py index 460abd59c421b04c7a7fdb9e79780c998aa25216..776cde9b55398715fb036e225d7bb572d64170e8 100644 --- a/python/paddle/fluid/tests/unittests/test_spp_op.py +++ b/python/paddle/fluid/tests/unittests/test_spp_op.py @@ -20,7 +20,6 @@ from test_pool2d_op import avg_pool2D_forward_naive class TestSppOp(OpTest): - def setUp(self): self.op_type = "spp" self.init_test_case() @@ -34,19 +33,25 @@ class TestSppOp(OpTest): bins = np.power(2, i) kernel_size = [0, 0] padding = [0, 0] - kernel_size[0] = np.ceil(hsize / - bins.astype("double")).astype("int32") - padding[0] = ((kernel_size[0] * bins - hsize + 1) / - 2).astype("int32") + kernel_size[0] = np.ceil(hsize / bins.astype("double")).astype( + "int32" + ) + padding[0] = ((kernel_size[0] * bins - hsize + 1) / 2).astype( + "int32" + ) - kernel_size[1] = np.ceil(wsize / - bins.astype("double")).astype("int32") - padding[1] = ((kernel_size[1] * bins - wsize + 1) / - 2).astype("int32") - out_level = self.pool2D_forward_naive(input, kernel_size, - kernel_size, padding) + kernel_size[1] = np.ceil(wsize / bins.astype("double")).astype( + "int32" + ) + padding[1] = ((kernel_size[1] * bins - wsize + 1) / 2).astype( + "int32" + ) + out_level = self.pool2D_forward_naive( + input, kernel_size, kernel_size, padding + ) out_level_flatten.append( - out_level.reshape(nsize, bins * bins * csize)) + out_level.reshape(nsize, bins * bins * csize) + ) if i == 0: output = out_level_flatten[i] else: @@ -57,7 +62,7 @@ class TestSppOp(OpTest): } self.attrs = { 'pyramid_height': self.pyramid_height, - 'pooling_type': self.pool_type + 'pooling_type': self.pool_type, } self.outputs = {'Out': output.astype('float64')} @@ -75,7 +80,6 @@ class TestSppOp(OpTest): class TestCase2(TestSppOp): - def init_test_case(self): self.shape = [3, 2, 16, 16] self.pyramid_height = 3 diff --git a/python/paddle/fluid/tests/unittests/test_square_error_cost.py b/python/paddle/fluid/tests/unittests/test_square_error_cost.py index 89001fe78b0215f53c6c0bbd0a3c585f2e7f0c43..498eee8051e9c5393743f72147c2be7cf584a4fb 100644 --- a/python/paddle/fluid/tests/unittests/test_square_error_cost.py +++ b/python/paddle/fluid/tests/unittests/test_square_error_cost.py @@ -21,7 +21,6 @@ from paddle.fluid.executor import Executor class TestSquareErrorCost(unittest.TestCase): - def test_square_error_cost(self): input_val = np.random.uniform(0.1, 0.5, (2, 3)).astype("float32") label_val = np.random.uniform(0.1, 0.5, (2, 3)).astype("float32") @@ -33,25 +32,23 @@ class TestSquareErrorCost(unittest.TestCase): label_var = layers.create_tensor(dtype="float32", name="label") output = layers.square_error_cost(input=input_var, label=label_var) - for use_cuda in ([False, True] - if core.is_compiled_with_cuda() else [False]): + for use_cuda in ( + [False, True] if core.is_compiled_with_cuda() else [False] + ): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = Executor(place) - result, = exe.run(fluid.default_main_program(), - feed={ - "input": input_val, - "label": label_val - }, - fetch_list=[output]) + (result,) = exe.run( + fluid.default_main_program(), + feed={"input": input_val, "label": label_val}, + fetch_list=[output], + ) np.testing.assert_allclose(np_result, result, rtol=1e-05) class TestSquareErrorInvalidInput(unittest.TestCase): - def test_error(self): - def test_invalid_input(): input = [256, 3] label = fluid.data(name='label1', shape=[None, 3], dtype='float32') diff --git a/python/paddle/fluid/tests/unittests/test_squared_l2_distance_op.py b/python/paddle/fluid/tests/unittests/test_squared_l2_distance_op.py index 6804d8a771e991ed30bdf7c5cf8732d924563814..21fdb592402f5aca6281580e6aafe5e647f67552 100644 --- a/python/paddle/fluid/tests/unittests/test_squared_l2_distance_op.py +++ b/python/paddle/fluid/tests/unittests/test_squared_l2_distance_op.py @@ -18,18 +18,17 @@ from op_test import OpTest class TestSquaredL2DistanceOp_f0(OpTest): - def setUp(self): self.op_type = "squared_l2_distance" self.inputs = { 'X': np.random.uniform(0.1, 0.6, (5, 20)).astype("float32"), - 'Y': np.random.uniform(0.1, 0.6, (5, 20)).astype("float32") + 'Y': np.random.uniform(0.1, 0.6, (5, 20)).astype("float32"), } sub_res = self.inputs['X'] - self.inputs['Y'] output = sub_res * sub_res self.outputs = { 'sub_result': sub_res, - 'Out': np.expand_dims(output.sum(1), 1) + 'Out': np.expand_dims(output.sum(1), 1), } def test_check_output(self): @@ -40,18 +39,17 @@ class TestSquaredL2DistanceOp_f0(OpTest): class TestSquaredL2DistanceOp_f1(OpTest): - def setUp(self): self.op_type = "squared_l2_distance" self.inputs = { 'X': np.random.uniform(0.1, 0.6, (2, 3)).astype("float32"), - 'Y': np.random.uniform(0.1, 0.6, (1, 3)).astype("float32") + 'Y': np.random.uniform(0.1, 0.6, (1, 3)).astype("float32"), } sub_res = self.inputs['X'] - self.inputs['Y'] output = sub_res * sub_res self.outputs = { 'sub_result': sub_res, - 'Out': np.expand_dims(output.sum(1), 1) + 'Out': np.expand_dims(output.sum(1), 1), } def test_check_output(self): @@ -62,19 +60,18 @@ class TestSquaredL2DistanceOp_f1(OpTest): class TestSquaredL2DistanceOp_f2(OpTest): - def setUp(self): self.op_type = "squared_l2_distance" self.inputs = { 'X': np.random.uniform(0.1, 0.6, (2, 3, 4)).astype("float32"), - 'Y': np.random.uniform(0.1, 0.6, (1, 3, 4)).astype("float32") + 'Y': np.random.uniform(0.1, 0.6, (1, 3, 4)).astype("float32"), } sub_res = self.inputs['X'] - self.inputs['Y'] sub_res = sub_res.reshape((2, 3 * 4)) output = sub_res * sub_res self.outputs = { 'sub_result': sub_res, - 'Out': np.expand_dims(output.sum(1), 1) + 'Out': np.expand_dims(output.sum(1), 1), } def test_check_output(self): diff --git a/python/paddle/fluid/tests/unittests/test_squared_l2_norm_op.py b/python/paddle/fluid/tests/unittests/test_squared_l2_norm_op.py index bc4dcfd1e28a0a8bb474a9bd4a435367ae900f7c..8e09d83975a60ed1ee96037572d7f77d8a57c5fd 100644 --- a/python/paddle/fluid/tests/unittests/test_squared_l2_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_squared_l2_norm_op.py @@ -29,8 +29,7 @@ def test_squared_l2_norm(x): class TestL2LossOp(OpTest): - """Test squared_l2_norm - """ + """Test squared_l2_norm""" def setUp(self): self.python_api = test_squared_l2_norm @@ -46,14 +45,15 @@ class TestL2LossOp(OpTest): self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X'], - 'Out', - max_relative_error=self.max_relative_error, - check_eager=True) + self.check_grad( + ['X'], + 'Out', + max_relative_error=self.max_relative_error, + check_eager=True, + ) class TestL2LossDeterministic(unittest.TestCase): - def check_place(self, place): with paddle.fluid.dygraph.guard(place): x_np = np.random.rand(5, 11, 13).astype('float32') diff --git a/python/paddle/fluid/tests/unittests/test_squeeze2_op.py b/python/paddle/fluid/tests/unittests/test_squeeze2_op.py index 798afe703243b4d0ca2bc26d6b242c2770721afa..c64b58efea141baca5f224996e66c4070a537ea1 100755 --- a/python/paddle/fluid/tests/unittests/test_squeeze2_op.py +++ b/python/paddle/fluid/tests/unittests/test_squeeze2_op.py @@ -27,7 +27,6 @@ paddle.enable_static() # Correct: General. class TestSqueezeOp(OpTest): - def setUp(self): self.op_type = "squeeze2" self.python_api = paddle.squeeze @@ -39,7 +38,7 @@ class TestSqueezeOp(OpTest): self.init_attrs() self.outputs = { "Out": self.inputs["X"].reshape(self.new_shape), - "XShape": np.random.random(self.ori_shape).astype("float64") + "XShape": np.random.random(self.ori_shape).astype("float64"), } def test_check_output(self): @@ -59,7 +58,6 @@ class TestSqueezeOp(OpTest): # Correct: There is mins axis. class TestSqueezeOp1(TestSqueezeOp): - def init_test_case(self): self.ori_shape = (1, 20, 1, 5) self.axes = (0, -2) @@ -68,7 +66,6 @@ class TestSqueezeOp1(TestSqueezeOp): # Correct: No axes input. class TestSqueezeOp2(TestSqueezeOp): - def init_test_case(self): self.ori_shape = (1, 20, 1, 5) self.axes = () @@ -77,7 +74,6 @@ class TestSqueezeOp2(TestSqueezeOp): # Correct: Just part of axes be squeezed. class TestSqueezeOp3(TestSqueezeOp): - def init_test_case(self): self.ori_shape = (6, 1, 5, 1, 4, 1) self.axes = (1, -1) @@ -85,7 +81,6 @@ class TestSqueezeOp3(TestSqueezeOp): class TestSqueeze2AxesTensor(UnittestBase): - def init_info(self): self.shapes = [[2, 3, 4]] self.save_path = os.path.join(self.temp_dir.name, 'squeeze_tensor') @@ -122,7 +117,6 @@ class TestSqueeze2AxesTensor(UnittestBase): class TestSqueeze2AxesTensorList(UnittestBase): - def init_info(self): self.shapes = [[2, 3, 4]] self.save_path = os.path.join(self.temp_dir.name, 'squeeze_tensor') @@ -139,7 +133,7 @@ class TestSqueeze2AxesTensorList(UnittestBase): # axes is a list[Variable] axes = [ paddle.full([1], 0, dtype='int32'), - paddle.full([1], 2, dtype='int32') + paddle.full([1], 2, dtype='int32'), ] out = paddle.squeeze(feat, axes) out2 = paddle.fluid.layers.squeeze(feat, axes) diff --git a/python/paddle/fluid/tests/unittests/test_squeeze_op.py b/python/paddle/fluid/tests/unittests/test_squeeze_op.py index 53b5cb272534f6a9bdb77e71e36177ad41a64612..e4ea13844b705cde6cd0ee2a1a1f0eb224e81e40 100755 --- a/python/paddle/fluid/tests/unittests/test_squeeze_op.py +++ b/python/paddle/fluid/tests/unittests/test_squeeze_op.py @@ -30,7 +30,6 @@ paddle.enable_static() # Correct: General. class TestSqueezeOp(OpTest): - def setUp(self): self.op_type = "squeeze" self.init_test_case() @@ -56,7 +55,6 @@ class TestSqueezeOp(OpTest): class TestSqueezeBF16Op(OpTest): - def setUp(self): self.op_type = "squeeze" self.dtype = np.uint16 @@ -84,7 +82,6 @@ class TestSqueezeBF16Op(OpTest): # Correct: There is mins axis. class TestSqueezeOp1(TestSqueezeOp): - def init_test_case(self): self.ori_shape = (1, 3, 1, 40) self.axes = (0, -2) @@ -93,7 +90,6 @@ class TestSqueezeOp1(TestSqueezeOp): # Correct: No axes input. class TestSqueezeOp2(TestSqueezeOp): - def init_test_case(self): self.ori_shape = (1, 20, 1, 5) self.axes = () @@ -102,7 +98,6 @@ class TestSqueezeOp2(TestSqueezeOp): # Correct: Just part of axes be squeezed. class TestSqueezeOp3(TestSqueezeOp): - def init_test_case(self): self.ori_shape = (6, 1, 5, 1, 4, 1) self.axes = (1, -1) @@ -111,7 +106,6 @@ class TestSqueezeOp3(TestSqueezeOp): # Correct: The demension of axis is not of size 1 remains unchanged. class TestSqueezeOp4(TestSqueezeOp): - def init_test_case(self): self.ori_shape = (6, 1, 5, 1, 4, 1) self.axes = (1, 2) @@ -119,13 +113,13 @@ class TestSqueezeOp4(TestSqueezeOp): class TestSqueezeOpError(unittest.TestCase): - def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): # The input type of softmax_op must be Variable. - x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - paddle.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], paddle.CPUPlace() + ) self.assertRaises(TypeError, paddle.squeeze, x1) # The input axes of squeeze must be list. x2 = paddle.static.data(name='x2', shape=[4], dtype="int32") @@ -136,7 +130,6 @@ class TestSqueezeOpError(unittest.TestCase): class API_TestSqueeze(unittest.TestCase): - def setUp(self): self.executed_api() @@ -145,29 +138,29 @@ class API_TestSqueeze(unittest.TestCase): def test_out(self): paddle.enable_static() - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): - data1 = paddle.static.data('data1', - shape=[-1, 1, 10], - dtype='float64') + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + data1 = paddle.static.data( + 'data1', shape=[-1, 1, 10], dtype='float64' + ) result_squeeze = self.squeeze(data1, axis=[1]) place = paddle.CPUPlace() exe = paddle.static.Executor(place) input1 = np.random.random([5, 1, 10]).astype('float64') - result, = exe.run(feed={"data1": input1}, - fetch_list=[result_squeeze]) + (result,) = exe.run( + feed={"data1": input1}, fetch_list=[result_squeeze] + ) expected_result = np.squeeze(input1, axis=1) np.testing.assert_allclose(expected_result, result, rtol=1e-05) class API_TestStaticSqueeze_(API_TestSqueeze): - def executed_api(self): self.squeeze = paddle.squeeze_ class API_TestDygraphSqueeze(unittest.TestCase): - def setUp(self): self.executed_api() @@ -221,13 +214,11 @@ class API_TestDygraphSqueeze(unittest.TestCase): class API_TestDygraphSqueezeInplace(API_TestDygraphSqueeze): - def executed_api(self): self.squeeze = paddle.squeeze_ class TestSqueezeDoubleGradCheck(unittest.TestCase): - def squeeze_wrapper(self, x): return paddle.squeeze(x[0]) @@ -242,17 +233,13 @@ class TestSqueezeDoubleGradCheck(unittest.TestCase): out = paddle.squeeze(data) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) - gradient_checker.double_grad_check([data], - out, - x_init=[data_arr], - place=place, - eps=eps) + gradient_checker.double_grad_check( + [data], out, x_init=[data_arr], place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.double_grad_check_for_dygraph(self.squeeze_wrapper, - [data], - out, - x_init=[data_arr], - place=place) + gradient_checker.double_grad_check_for_dygraph( + self.squeeze_wrapper, [data], out, x_init=[data_arr], place=place + ) def test_grad(self): paddle.enable_static() @@ -264,7 +251,6 @@ class TestSqueezeDoubleGradCheck(unittest.TestCase): class TestSqueezeTripleGradCheck(unittest.TestCase): - def squeeze_wrapper(self, x): return paddle.squeeze(x[0]) @@ -279,17 +265,13 @@ class TestSqueezeTripleGradCheck(unittest.TestCase): out = paddle.squeeze(data) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) - gradient_checker.triple_grad_check([data], - out, - x_init=[data_arr], - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [data], out, x_init=[data_arr], place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.triple_grad_check_for_dygraph(self.squeeze_wrapper, - [data], - out, - x_init=[data_arr], - place=place) + gradient_checker.triple_grad_check_for_dygraph( + self.squeeze_wrapper, [data], out, x_init=[data_arr], place=place + ) def test_grad(self): paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_stack_op.py b/python/paddle/fluid/tests/unittests/test_stack_op.py index 0677211aed9488a18f995d27af0c251f07aa3a84..0f71ebeac5b216d5e9950a82c5fe3e9cc807737e 100644 --- a/python/paddle/fluid/tests/unittests/test_stack_op.py +++ b/python/paddle/fluid/tests/unittests/test_stack_op.py @@ -23,7 +23,6 @@ paddle.enable_static() class TestStackOpBase(OpTest): - def initDefaultParameters(self): self.num_inputs = 4 self.input_dim = (5, 6, 7) @@ -47,7 +46,8 @@ class TestStackOpBase(OpTest): self.x = [] for i in range(self.num_inputs): self.x.append( - np.random.random(size=self.input_dim).astype(self.dtype)) + np.random.random(size=self.input_dim).astype(self.dtype) + ) tmp = [] x_names = self.get_x_names() @@ -66,49 +66,41 @@ class TestStackOpBase(OpTest): class TestStackOp1(TestStackOpBase): - def initParameters(self): self.num_inputs = 8 class TestStackOp2(TestStackOpBase): - def initParameters(self): self.num_inputs = 10 class TestStackOp3(TestStackOpBase): - def initParameters(self): self.axis = -1 class TestStackOp4(TestStackOpBase): - def initParameters(self): self.axis = -4 class TestStackOp5(TestStackOpBase): - def initParameters(self): self.axis = 1 class TestStackOp6(TestStackOpBase): - def initParameters(self): self.axis = 3 class TestStackOp_ZeroDim(TestStackOpBase): - def initParameters(self): self.input_dim = () class TestStackBF16Op(OpTest): - def initDefaultParameters(self): self.num_inputs = 4 self.input_dim = (5, 6, 7) @@ -132,7 +124,8 @@ class TestStackBF16Op(OpTest): self.x = [] for i in range(self.num_inputs): self.x.append( - np.random.random(size=self.input_dim).astype(np.float32)) + np.random.random(size=self.input_dim).astype(np.float32) + ) out = np.stack(self.x, axis=self.axis) @@ -162,8 +155,11 @@ class TestStackAPIWithLoDTensorArray(unittest.TestCase): self.iter_num = 3 self.input_shape = [2, 3] self.x = np.random.random(self.input_shape).astype("float32") - self.place = fluid.CUDAPlace(0) \ - if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + self.place = ( + fluid.CUDAPlace(0) + if fluid.is_compiled_with_cuda() + else fluid.CPUPlace() + ) self.set_program() def set_program(self): @@ -183,7 +179,8 @@ class TestStackAPIWithLoDTensorArray(unittest.TestCase): exe = fluid.Executor(self.place) res = exe.run(self.program, fetch_list=self.out_var) np.testing.assert_array_equal( - res[0], np.stack([self.x] * self.iter_num, axis=self.axis)) + res[0], np.stack([self.x] * self.iter_num, axis=self.axis) + ) class TestTensorStackAPIWithLoDTensorArray(unittest.TestCase): @@ -196,8 +193,11 @@ class TestTensorStackAPIWithLoDTensorArray(unittest.TestCase): self.iter_num = 3 self.input_shape = [2, 3] self.x = np.random.random(self.input_shape).astype("float32") - self.place = fluid.CUDAPlace(0) \ - if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + self.place = ( + fluid.CUDAPlace(0) + if fluid.is_compiled_with_cuda() + else fluid.CPUPlace() + ) self.set_program() def set_program(self): @@ -217,11 +217,11 @@ class TestTensorStackAPIWithLoDTensorArray(unittest.TestCase): exe = fluid.Executor(self.place) res = exe.run(self.program, fetch_list=self.out_var) np.testing.assert_array_equal( - res[0], np.stack([self.x] * self.iter_num, axis=self.axis)) + res[0], np.stack([self.x] * self.iter_num, axis=self.axis) + ) class API_test(unittest.TestCase): - def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): data1 = fluid.layers.data('data1', shape=[1, 2], dtype='float64') @@ -233,12 +233,10 @@ class API_test(unittest.TestCase): input1 = np.random.random([1, 2]).astype('float64') input2 = np.random.random([1, 2]).astype('float64') input3 = np.random.random([1, 2]).astype('float64') - result, = exe.run(feed={ - "data1": input1, - "data2": input2, - "data3": input3 - }, - fetch_list=[result_stack]) + (result,) = exe.run( + feed={"data1": input1, "data2": input2, "data3": input3}, + fetch_list=[result_stack], + ) expected_result = np.stack([input1, input2, input3], axis=0) np.testing.assert_allclose(expected_result, result, rtol=1e-05) @@ -249,7 +247,6 @@ class API_test(unittest.TestCase): class API_DygraphTest(unittest.TestCase): - def test_out(self): data1 = np.array([[1.0, 2.0]]) data2 = np.array([[3.0, 4.0]]) @@ -277,7 +274,6 @@ class API_DygraphTest(unittest.TestCase): class TestStackOpWithNegativeShape(unittest.TestCase): - def test_out(self): main_prg, startup_prg = Program(), Program() with program_guard(main_prg, startup_prg): @@ -286,23 +282,28 @@ class TestStackOpWithNegativeShape(unittest.TestCase): k = paddle.stack([b, e], axis=0) exe = paddle.static.Executor() exe.run(startup_prg) - out = exe.run(main_prg, - feed={ - 'b': np.ones([ - 3, - ]).astype("int64"), - 'e': np.zeros([ - 3, - ]).astype("int64") - }, - fetch_list=[k]) - np.testing.assert_allclose(out[0], - np.array([[1, 1, 1], [0, 0, 0]]), - rtol=1e-05) + out = exe.run( + main_prg, + feed={ + 'b': np.ones( + [ + 3, + ] + ).astype("int64"), + 'e': np.zeros( + [ + 3, + ] + ).astype("int64"), + }, + fetch_list=[k], + ) + np.testing.assert_allclose( + out[0], np.array([[1, 1, 1], [0, 0, 0]]), rtol=1e-05 + ) class TestStackAPI_ZeroDim(unittest.TestCase): - def test_dygraph(self): paddle.disable_static() fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) diff --git a/python/paddle/fluid/tests/unittests/test_static_model_parallel_fused_attention.py b/python/paddle/fluid/tests/unittests/test_static_model_parallel_fused_attention.py index 85b9edd2e3310607d0f55a2fd8eaebffb5b42765..24ee5a367755616852f369f7fdf7178868a052f9 100644 --- a/python/paddle/fluid/tests/unittests/test_static_model_parallel_fused_attention.py +++ b/python/paddle/fluid/tests/unittests/test_static_model_parallel_fused_attention.py @@ -23,7 +23,6 @@ flag_name = os.path.splitext(__file__)[0] class TestStaticModelParallel(TestDistBase): - def _setup_config(self): self._sync_mode = True self._use_reduce = False @@ -33,11 +32,14 @@ class TestStaticModelParallel(TestDistBase): def test_dist_static_model_parallel_fused_feedforward(self): import paddle.fluid as fluid + if fluid.core.is_compiled_with_cuda(): - self.check_with_place("static_model_parallel_fused_attention.py", - delta=1e-5, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "static_model_parallel_fused_attention.py", + delta=1e-5, + check_error_log=True, + log_name=flag_name, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_static_model_parallel_fused_feedforward.py b/python/paddle/fluid/tests/unittests/test_static_model_parallel_fused_feedforward.py index d2779eea7492332fca067d988785380c3eb926ab..c6153f2be0b9379f54a966af797da468b39e1588 100644 --- a/python/paddle/fluid/tests/unittests/test_static_model_parallel_fused_feedforward.py +++ b/python/paddle/fluid/tests/unittests/test_static_model_parallel_fused_feedforward.py @@ -23,7 +23,6 @@ flag_name = os.path.splitext(__file__)[0] class TestStaticModelParallel(TestDistBase): - def _setup_config(self): self._sync_mode = True self._use_reduce = False @@ -33,11 +32,14 @@ class TestStaticModelParallel(TestDistBase): def test_dist_static_model_parallel_fused_feedforward(self): import paddle.fluid as fluid + if fluid.core.is_compiled_with_cuda(): - self.check_with_place("static_model_parallel_fused_feedforward.py", - delta=1e-5, - check_error_log=True, - log_name=flag_name) + self.check_with_place( + "static_model_parallel_fused_feedforward.py", + delta=1e-5, + check_error_log=True, + log_name=flag_name, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_static_model_parallel_fused_multi_transformer.py b/python/paddle/fluid/tests/unittests/test_static_model_parallel_fused_multi_transformer.py index 211920cfebce17553e323b71b12956fb1be565ef..d34ced7b4f416c121db9103991c8c74bb36ec93e 100644 --- a/python/paddle/fluid/tests/unittests/test_static_model_parallel_fused_multi_transformer.py +++ b/python/paddle/fluid/tests/unittests/test_static_model_parallel_fused_multi_transformer.py @@ -23,7 +23,6 @@ flag_name = os.path.splitext(__file__)[0] class TestStaticModelParallel(TestDistBase): - def _setup_config(self): self._sync_mode = True self._use_reduce = False @@ -33,12 +32,14 @@ class TestStaticModelParallel(TestDistBase): def test_dist_static_model_parallel_fused_multi_transformer(self): import paddle.fluid as fluid + if fluid.core.is_compiled_with_cuda(): self.check_with_place( "static_model_parallel_fused_multi_transformer.py", delta=1e-5, check_error_log=True, - log_name=flag_name) + log_name=flag_name, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_static_save_load.py b/python/paddle/fluid/tests/unittests/test_static_save_load.py index 562477a0552757b6c4d48625ca325a2c724d17d7..a0cd268f4846ace4617675b80edadbe03a338820 100644 --- a/python/paddle/fluid/tests/unittests/test_static_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_static_save_load.py @@ -31,14 +31,15 @@ paddle.enable_static() class SimpleLSTMRNN(fluid.Layer): - - def __init__(self, - name_scope, - hidden_size, - num_steps, - num_layers=2, - init_scale=0.1, - dropout=None): + def __init__( + self, + name_scope, + hidden_size, + num_steps, + num_layers=2, + init_scale=0.1, + dropout=None, + ): super(SimpleLSTMRNN, self).__init__() self._hidden_size = hidden_size self._num_layers = num_layers @@ -58,19 +59,26 @@ class SimpleLSTMRNN(fluid.Layer): weight_1 = self.create_parameter( attr=fluid.ParamAttr( initializer=fluid.initializer.UniformInitializer( - low=-self._init_scale, high=self._init_scale)), + low=-self._init_scale, high=self._init_scale + ) + ), shape=[self._hidden_size * 2, self._hidden_size * 4], dtype="float32", default_initializer=fluid.initializer.UniformInitializer( - low=-self._init_scale, high=self._init_scale)) + low=-self._init_scale, high=self._init_scale + ), + ) self.weight_1_arr.append(self.add_parameter('w_%d' % i, weight_1)) bias_1 = self.create_parameter( attr=fluid.ParamAttr( initializer=fluid.initializer.UniformInitializer( - low=-self._init_scale, high=self._init_scale)), + low=-self._init_scale, high=self._init_scale + ) + ), shape=[self._hidden_size * 4], dtype="float32", - default_initializer=fluid.initializer.Constant(0.0)) + default_initializer=fluid.initializer.Constant(0.0), + ) self.bias_arr.append(self.add_parameter('b_%d' % i, bias_1)) def forward(self, input_embedding, init_hidden=None, init_cell=None): @@ -78,29 +86,29 @@ class SimpleLSTMRNN(fluid.Layer): self.hidden_array = [] for i in range(self._num_layers): - pre_hidden = fluid.layers.slice(init_hidden, - axes=[0], - starts=[i], - ends=[i + 1]) - pre_cell = fluid.layers.slice(init_cell, - axes=[0], - starts=[i], - ends=[i + 1]) - pre_hidden = fluid.layers.reshape(pre_hidden, - shape=[-1, self._hidden_size]) - pre_cell = fluid.layers.reshape(pre_cell, - shape=[-1, self._hidden_size]) + pre_hidden = fluid.layers.slice( + init_hidden, axes=[0], starts=[i], ends=[i + 1] + ) + pre_cell = fluid.layers.slice( + init_cell, axes=[0], starts=[i], ends=[i + 1] + ) + pre_hidden = fluid.layers.reshape( + pre_hidden, shape=[-1, self._hidden_size] + ) + pre_cell = fluid.layers.reshape( + pre_cell, shape=[-1, self._hidden_size] + ) self.hidden_array.append(pre_hidden) self.cell_array.append(pre_cell) res = [] for index in range(self._num_steps): - self._input = fluid.layers.slice(input_embedding, - axes=[1], - starts=[index], - ends=[index + 1]) - self._input = fluid.layers.reshape(self._input, - shape=[-1, self._hidden_size]) + self._input = fluid.layers.slice( + input_embedding, axes=[1], starts=[index], ends=[index + 1] + ) + self._input = fluid.layers.reshape( + self._input, shape=[-1, self._hidden_size] + ) for k in range(self._num_layers): pre_hidden = self.hidden_array[k] pre_cell = self.cell_array[k] @@ -111,11 +119,12 @@ class SimpleLSTMRNN(fluid.Layer): gate_input = fluid.layers.matmul(x=nn, y=weight_1) gate_input = fluid.layers.elementwise_add(gate_input, bias) - i, j, f, o = fluid.layers.split(gate_input, - num_or_sections=4, - dim=-1) + i, j, f, o = fluid.layers.split( + gate_input, num_or_sections=4, dim=-1 + ) c = pre_cell * fluid.layers.sigmoid(f) + fluid.layers.sigmoid( - i) * fluid.layers.tanh(j) + i + ) * fluid.layers.tanh(j) m = fluid.layers.tanh(c) * fluid.layers.sigmoid(o) self.hidden_array[k] = m self.cell_array[k] = c @@ -125,33 +134,39 @@ class SimpleLSTMRNN(fluid.Layer): self._input = fluid.layers.dropout( self._input, dropout_prob=self._dropout, - dropout_implementation='upscale_in_train') + dropout_implementation='upscale_in_train', + ) res.append( - fluid.layers.reshape(self._input, - shape=[1, -1, self._hidden_size])) + fluid.layers.reshape( + self._input, shape=[1, -1, self._hidden_size] + ) + ) real_res = fluid.layers.concat(res, 0) real_res = fluid.layers.transpose(x=real_res, perm=[1, 0, 2]) last_hidden = fluid.layers.concat(self.hidden_array, 1) last_hidden = fluid.layers.reshape( - last_hidden, shape=[-1, self._num_layers, self._hidden_size]) + last_hidden, shape=[-1, self._num_layers, self._hidden_size] + ) last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2]) last_cell = fluid.layers.concat(self.cell_array, 1) last_cell = fluid.layers.reshape( - last_cell, shape=[-1, self._num_layers, self._hidden_size]) + last_cell, shape=[-1, self._num_layers, self._hidden_size] + ) last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2]) return real_res, last_hidden, last_cell class PtbModel(fluid.Layer): - - def __init__(self, - name_scope, - hidden_size, - vocab_size, - num_layers=2, - num_steps=20, - init_scale=0.1, - dropout=None): + def __init__( + self, + name_scope, + hidden_size, + vocab_size, + num_layers=2, + num_steps=20, + init_scale=0.1, + dropout=None, + ): super(PtbModel, self).__init__() self.hidden_size = hidden_size self.vocab_size = vocab_size @@ -159,61 +174,77 @@ class PtbModel(fluid.Layer): self.num_layers = num_layers self.num_steps = num_steps self.dropout = dropout - self.simple_lstm_rnn = SimpleLSTMRNN(self.full_name(), - hidden_size, - num_steps, - num_layers=num_layers, - init_scale=init_scale, - dropout=dropout) + self.simple_lstm_rnn = SimpleLSTMRNN( + self.full_name(), + hidden_size, + num_steps, + num_layers=num_layers, + init_scale=init_scale, + dropout=dropout, + ) self.embedding = paddle.nn.Embedding( num_embeddings=vocab_size, embedding_dim=hidden_size, weight_attr=fluid.ParamAttr( name='embedding_para', initializer=fluid.initializer.UniformInitializer( - low=-init_scale, high=init_scale))) + low=-init_scale, high=init_scale + ), + ), + ) self.softmax_weight = self.create_parameter( attr=fluid.ParamAttr(), shape=[self.hidden_size, self.vocab_size], dtype="float32", default_initializer=fluid.initializer.UniformInitializer( - low=-self.init_scale, high=self.init_scale)) + low=-self.init_scale, high=self.init_scale + ), + ) self.softmax_bias = self.create_parameter( attr=fluid.ParamAttr(), shape=[self.vocab_size], dtype="float32", default_initializer=fluid.initializer.UniformInitializer( - low=-self.init_scale, high=self.init_scale)) + low=-self.init_scale, high=self.init_scale + ), + ) def forward(self, input, label, init_hidden, init_cell): init_h = fluid.layers.reshape( - init_hidden, shape=[self.num_layers, -1, self.hidden_size]) + init_hidden, shape=[self.num_layers, -1, self.hidden_size] + ) init_c = fluid.layers.reshape( - init_cell, shape=[self.num_layers, -1, self.hidden_size]) + init_cell, shape=[self.num_layers, -1, self.hidden_size] + ) # NPU 'tok_k' kernel only support `int32` dtype, so cast `input` from `int64` to `int32`. input = fluid.layers.cast(input, "int32") x_emb = self.embedding(input) x_emb = fluid.layers.reshape( - x_emb, shape=[-1, self.num_steps, self.hidden_size]) + x_emb, shape=[-1, self.num_steps, self.hidden_size] + ) if self.dropout is not None and self.dropout > 0.0: x_emb = fluid.layers.dropout( x_emb, dropout_prob=self.drop_out, - dropout_implementation='upscale_in_train') + dropout_implementation='upscale_in_train', + ) rnn_out, last_hidden, last_cell = self.simple_lstm_rnn( - x_emb, init_h, init_c) + x_emb, init_h, init_c + ) rnn_out = fluid.layers.reshape( - rnn_out, shape=[-1, self.num_steps, self.hidden_size]) + rnn_out, shape=[-1, self.num_steps, self.hidden_size] + ) projection = fluid.layers.matmul(rnn_out, self.softmax_weight) projection = fluid.layers.elementwise_add(projection, self.softmax_bias) - projection = fluid.layers.reshape(projection, - shape=[-1, self.vocab_size]) - loss = fluid.layers.softmax_with_cross_entropy(logits=projection, - label=label, - soft_label=False) + projection = fluid.layers.reshape( + projection, shape=[-1, self.vocab_size] + ) + loss = fluid.layers.softmax_with_cross_entropy( + logits=projection, label=label, soft_label=False + ) loss = fluid.layers.reshape(loss, shape=[-1, self.num_steps]) loss = fluid.layers.reduce_mean(loss, dim=[0]) loss = fluid.layers.reduce_sum(loss) @@ -222,10 +253,12 @@ class PtbModel(fluid.Layer): class TestSaveLoadBase(unittest.TestCase): - def set_place(self): - return fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0) + return ( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) def test_ptb_rnn_cpu_float32(self): seed = 90 @@ -241,29 +274,32 @@ class TestSaveLoadBase(unittest.TestCase): with new_program_scope(): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed - ptb_model = PtbModel("ptb_model", - hidden_size=hidden_size, - vocab_size=vocab_size, - num_layers=num_layers, - num_steps=num_steps, - init_scale=init_scale) + ptb_model = PtbModel( + "ptb_model", + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale, + ) place = self.set_place() exe = fluid.Executor(place) sgd = Adam(learning_rate=1e-3) - x = fluid.layers.data(name="x", - shape=[-1, num_steps], - dtype='int64') + x = fluid.layers.data( + name="x", shape=[-1, num_steps], dtype='int64' + ) y = fluid.layers.data(name="y", shape=[-1, 1], dtype='float32') - init_hidden = fluid.layers.data(name="init_hidden", - shape=[1], - dtype='float32') - init_cell = fluid.layers.data(name="init_cell", - shape=[1], - dtype='float32') + init_hidden = fluid.layers.data( + name="init_hidden", shape=[1], dtype='float32' + ) + init_cell = fluid.layers.data( + name="init_cell", shape=[1], dtype='float32' + ) static_loss, static_last_hidden, static_last_cell = ptb_model( - x, y, init_hidden, init_cell) + x, y, init_hidden, init_cell + ) sgd.minimize(static_loss) static_param_updated = dict() static_param_init = dict() @@ -279,18 +315,22 @@ class TestSaveLoadBase(unittest.TestCase): x_data = x_data.reshape((-1, num_steps, 1)) y_data = y_data.reshape((-1, 1)) init_hidden_data = np.zeros( - (num_layers, batch_size, hidden_size), dtype='float32') - init_cell_data = np.zeros((num_layers, batch_size, hidden_size), - dtype='float32') + (num_layers, batch_size, hidden_size), dtype='float32' + ) + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32' + ) fetch_list = [static_loss, static_last_hidden, static_last_cell] - out = exe.run(fluid.default_main_program(), - feed={ - "x": x_data, - "y": y_data, - "init_hidden": init_hidden_data, - "init_cell": init_cell_data - }, - fetch_list=fetch_list) + out = exe.run( + fluid.default_main_program(), + feed={ + "x": x_data, + "y": y_data, + "init_hidden": init_hidden_data, + "init_cell": init_cell_data, + }, + fetch_list=fetch_list, + ) static_loss_value = out[0] static_last_hidden_value = out[1] static_last_cell_value = out[2] @@ -300,8 +340,9 @@ class TestSaveLoadBase(unittest.TestCase): base_map = {} for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) # make sure all the paramerter or optimizer var have been update self.assertTrue(np.sum(np.abs(t)) != 0) base_map[var.name] = t @@ -314,28 +355,35 @@ class TestSaveLoadBase(unittest.TestCase): ten = fluid.global_scope().find_var(var.name).get_tensor() ten.set(np.zeros_like(np.array(ten)), place) - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) # make sure all the paramerter or optimizer var have been set to zero self.assertTrue(np.sum(np.abs(new_t)) == 0) - fluid.load(main_program, - os.path.join(temp_dir.name, "test_1.pdparams"), exe) + fluid.load( + main_program, + os.path.join(temp_dir.name, "test_1.pdparams"), + exe, + ) for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) temp_dir.cleanup() class TestSaveLoadPartial(unittest.TestCase): - def set_place(self): - return fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0) + return ( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) def test_ptb_rnn_cpu_float32(self): seed = 90 @@ -351,36 +399,41 @@ class TestSaveLoadPartial(unittest.TestCase): with new_program_scope(): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed - ptb_model = PtbModel("ptb_model", - hidden_size=hidden_size, - vocab_size=vocab_size, - num_layers=num_layers, - num_steps=num_steps, - init_scale=init_scale) + ptb_model = PtbModel( + "ptb_model", + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale, + ) place = self.set_place() exe = fluid.Executor(place) sgd = Adam(learning_rate=1e-3) - x = fluid.layers.data(name="x", - shape=[-1, num_steps], - dtype='int64') + x = fluid.layers.data( + name="x", shape=[-1, num_steps], dtype='int64' + ) y = fluid.layers.data(name="y", shape=[-1, 1], dtype='float32') - init_hidden = fluid.layers.data(name="init_hidden", - shape=[1], - dtype='float32') - init_cell = fluid.layers.data(name="init_cell", - shape=[1], - dtype='float32') + init_hidden = fluid.layers.data( + name="init_hidden", shape=[1], dtype='float32' + ) + init_cell = fluid.layers.data( + name="init_cell", shape=[1], dtype='float32' + ) static_loss, static_last_hidden, static_last_cell = ptb_model( - x, y, init_hidden, init_cell) + x, y, init_hidden, init_cell + ) test_program = fluid.default_main_program().clone(for_test=True) - add_1 = fluid.layers.fc(static_last_hidden, - size=hidden_size, - num_flatten_dims=2, - bias_attr=False) + add_1 = fluid.layers.fc( + static_last_hidden, + size=hidden_size, + num_flatten_dims=2, + bias_attr=False, + ) sgd.minimize(static_loss) static_param_updated = dict() @@ -397,18 +450,22 @@ class TestSaveLoadPartial(unittest.TestCase): x_data = x_data.reshape((-1, num_steps, 1)) y_data = y_data.reshape((-1, 1)) init_hidden_data = np.zeros( - (num_layers, batch_size, hidden_size), dtype='float32') - init_cell_data = np.zeros((num_layers, batch_size, hidden_size), - dtype='float32') + (num_layers, batch_size, hidden_size), dtype='float32' + ) + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32' + ) fetch_list = [static_loss, static_last_hidden, static_last_cell] - out = exe.run(fluid.default_main_program(), - feed={ - "x": x_data, - "y": y_data, - "init_hidden": init_hidden_data, - "init_cell": init_cell_data - }, - fetch_list=fetch_list) + out = exe.run( + fluid.default_main_program(), + feed={ + "x": x_data, + "y": y_data, + "init_hidden": init_hidden_data, + "init_cell": init_cell_data, + }, + fetch_list=fetch_list, + ) static_loss_value = out[0] static_last_hidden_value = out[1] static_last_cell_value = out[2] @@ -418,8 +475,9 @@ class TestSaveLoadPartial(unittest.TestCase): base_map = {} for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) # make sure all the paramerter or optimizer var have been update self.assertTrue(np.sum(np.abs(t)) != 0) base_map[var.name] = t @@ -432,30 +490,38 @@ class TestSaveLoadPartial(unittest.TestCase): ten = fluid.global_scope().find_var(var.name).get_tensor() ten.set(np.zeros_like(np.array(ten)), place) - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) # make sure all the paramerter or optimizer var have been set to zero self.assertTrue(np.sum(np.abs(new_t)) == 0) - fluid.load(test_program, os.path.join(temp_dir.name, - "test_1.pdopt"), None) + fluid.load( + test_program, os.path.join(temp_dir.name, "test_1.pdopt"), None + ) for var in test_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) - fluid.load(test_program, - os.path.join(temp_dir.name, "test_1.pdmodel"), None) + fluid.load( + test_program, + os.path.join(temp_dir.name, "test_1.pdmodel"), + None, + ) temp_dir.cleanup() class TestSaveLoadSetStateDict(unittest.TestCase): - def set_place(self): - return fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0) + return ( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) def test_ptb_rnn_cpu_float32(self): seed = 90 @@ -471,29 +537,32 @@ class TestSaveLoadSetStateDict(unittest.TestCase): with new_program_scope(): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed - ptb_model = PtbModel("ptb_model", - hidden_size=hidden_size, - vocab_size=vocab_size, - num_layers=num_layers, - num_steps=num_steps, - init_scale=init_scale) + ptb_model = PtbModel( + "ptb_model", + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale, + ) place = self.set_place() exe = fluid.Executor(place) sgd = Adam(learning_rate=1e-3) - x = fluid.layers.data(name="x", - shape=[-1, num_steps], - dtype='int64') + x = fluid.layers.data( + name="x", shape=[-1, num_steps], dtype='int64' + ) y = fluid.layers.data(name="y", shape=[-1, 1], dtype='float32') - init_hidden = fluid.layers.data(name="init_hidden", - shape=[1], - dtype='float32') - init_cell = fluid.layers.data(name="init_cell", - shape=[1], - dtype='float32') + init_hidden = fluid.layers.data( + name="init_hidden", shape=[1], dtype='float32' + ) + init_cell = fluid.layers.data( + name="init_cell", shape=[1], dtype='float32' + ) static_loss, static_last_hidden, static_last_cell = ptb_model( - x, y, init_hidden, init_cell) + x, y, init_hidden, init_cell + ) sgd.minimize(static_loss) static_param_updated = dict() static_param_init = dict() @@ -509,18 +578,22 @@ class TestSaveLoadSetStateDict(unittest.TestCase): x_data = x_data.reshape((-1, num_steps, 1)) y_data = y_data.reshape((-1, 1)) init_hidden_data = np.zeros( - (num_layers, batch_size, hidden_size), dtype='float32') - init_cell_data = np.zeros((num_layers, batch_size, hidden_size), - dtype='float32') + (num_layers, batch_size, hidden_size), dtype='float32' + ) + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32' + ) fetch_list = [static_loss, static_last_hidden, static_last_cell] - out = exe.run(fluid.default_main_program(), - feed={ - "x": x_data, - "y": y_data, - "init_hidden": init_hidden_data, - "init_cell": init_cell_data - }, - fetch_list=fetch_list) + out = exe.run( + fluid.default_main_program(), + feed={ + "x": x_data, + "y": y_data, + "init_hidden": init_hidden_data, + "init_cell": init_cell_data, + }, + fetch_list=fetch_list, + ) static_loss_value = out[0] static_last_hidden_value = out[1] static_last_cell_value = out[2] @@ -530,8 +603,9 @@ class TestSaveLoadSetStateDict(unittest.TestCase): base_map = {} for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) # make sure all the paramerter or optimizer var have been update self.assertTrue(np.sum(np.abs(t)) != 0) base_map[var.name] = t @@ -544,8 +618,9 @@ class TestSaveLoadSetStateDict(unittest.TestCase): ten = fluid.global_scope().find_var(var.name).get_tensor() ten.set(np.zeros_like(np.array(ten)), place) - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) # make sure all the paramerter or optimizer var have been set to zero self.assertTrue(np.sum(np.abs(new_t)) == 0) @@ -553,18 +628,21 @@ class TestSaveLoadSetStateDict(unittest.TestCase): for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) temp_dir.cleanup() class TestProgramStatePartial(unittest.TestCase): - def set_place(self): - return fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0) + return ( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) def test_ptb_rnn_cpu_float32(self): seed = 90 @@ -580,36 +658,41 @@ class TestProgramStatePartial(unittest.TestCase): with new_program_scope(): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed - ptb_model = PtbModel("ptb_model", - hidden_size=hidden_size, - vocab_size=vocab_size, - num_layers=num_layers, - num_steps=num_steps, - init_scale=init_scale) + ptb_model = PtbModel( + "ptb_model", + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale, + ) place = self.set_place() exe = fluid.Executor(place) sgd = Adam(learning_rate=1e-3) - x = fluid.layers.data(name="x", - shape=[-1, num_steps], - dtype='int64') + x = fluid.layers.data( + name="x", shape=[-1, num_steps], dtype='int64' + ) y = fluid.layers.data(name="y", shape=[-1, 1], dtype='float32') - init_hidden = fluid.layers.data(name="init_hidden", - shape=[1], - dtype='float32') - init_cell = fluid.layers.data(name="init_cell", - shape=[1], - dtype='float32') + init_hidden = fluid.layers.data( + name="init_hidden", shape=[1], dtype='float32' + ) + init_cell = fluid.layers.data( + name="init_cell", shape=[1], dtype='float32' + ) static_loss, static_last_hidden, static_last_cell = ptb_model( - x, y, init_hidden, init_cell) + x, y, init_hidden, init_cell + ) test_program = fluid.default_main_program().clone(for_test=True) - add_1 = fluid.layers.fc(static_last_hidden, - size=hidden_size, - num_flatten_dims=2, - bias_attr=False) + add_1 = fluid.layers.fc( + static_last_hidden, + size=hidden_size, + num_flatten_dims=2, + bias_attr=False, + ) sgd.minimize(static_loss) static_param_updated = dict() @@ -626,18 +709,22 @@ class TestProgramStatePartial(unittest.TestCase): x_data = x_data.reshape((-1, num_steps, 1)) y_data = y_data.reshape((-1, 1)) init_hidden_data = np.zeros( - (num_layers, batch_size, hidden_size), dtype='float32') - init_cell_data = np.zeros((num_layers, batch_size, hidden_size), - dtype='float32') + (num_layers, batch_size, hidden_size), dtype='float32' + ) + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32' + ) fetch_list = [static_loss, static_last_hidden, static_last_cell] - out = exe.run(fluid.default_main_program(), - feed={ - "x": x_data, - "y": y_data, - "init_hidden": init_hidden_data, - "init_cell": init_cell_data - }, - fetch_list=fetch_list) + out = exe.run( + fluid.default_main_program(), + feed={ + "x": x_data, + "y": y_data, + "init_hidden": init_hidden_data, + "init_cell": init_cell_data, + }, + fetch_list=fetch_list, + ) static_loss_value = out[0] static_last_hidden_value = out[1] static_last_cell_value = out[2] @@ -647,8 +734,9 @@ class TestProgramStatePartial(unittest.TestCase): base_map = {} for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) # make sure all the paramerter or optimizer var have been update self.assertTrue(np.sum(np.abs(t)) != 0) base_map[var.name] = t @@ -661,30 +749,36 @@ class TestProgramStatePartial(unittest.TestCase): ten = fluid.global_scope().find_var(var.name).get_tensor() ten.set(np.zeros_like(np.array(ten)), place) - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) # make sure all the paramerter or optimizer var have been set to zero self.assertTrue(np.sum(np.abs(new_t)) == 0) - #fluid.load(test_program, "./test_1", None ) + # fluid.load(test_program, "./test_1", None ) program_state = fluid.load_program_state( - os.path.join(temp_dir.name, 'test_1')) + os.path.join(temp_dir.name, 'test_1') + ) program_state_1 = fluid.load_program_state( - os.path.join(temp_dir.name, 'test_1.pdparams')) + os.path.join(temp_dir.name, 'test_1.pdparams') + ) program_state_2 = fluid.load_program_state( - os.path.join(temp_dir.name, 'test_1.pdopt')) + os.path.join(temp_dir.name, 'test_1.pdopt') + ) program_state_3 = fluid.load_program_state( - os.path.join(temp_dir.name, 'test_1.pdmodel')) + os.path.join(temp_dir.name, 'test_1.pdmodel') + ) fluid.set_program_state(test_program, program_state) for var in test_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) @@ -694,8 +788,9 @@ class TestProgramStatePartial(unittest.TestCase): ten = fluid.global_scope().find_var(var.name).get_tensor() ten.set(np.zeros_like(np.array(ten)), place) - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) # make sure all the paramerter or optimizer var have been set to zero self.assertTrue(np.sum(np.abs(new_t)) == 0) @@ -703,8 +798,9 @@ class TestProgramStatePartial(unittest.TestCase): for var in test_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) @@ -714,8 +810,9 @@ class TestProgramStatePartial(unittest.TestCase): ten = fluid.global_scope().find_var(var.name).get_tensor() ten.set(np.zeros_like(np.array(ten)), place) - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) # make sure all the paramerter or optimizer var have been set to zero self.assertTrue(np.sum(np.abs(new_t)) == 0) @@ -723,8 +820,9 @@ class TestProgramStatePartial(unittest.TestCase): for var in test_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) @@ -734,8 +832,9 @@ class TestProgramStatePartial(unittest.TestCase): ten = fluid.global_scope().find_var(var.name).get_tensor() ten.set(np.zeros_like(np.array(ten)), place) - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) # make sure all the paramerter or optimizer var have been set to zero self.assertTrue(np.sum(np.abs(new_t)) == 0) @@ -743,18 +842,21 @@ class TestProgramStatePartial(unittest.TestCase): for var in test_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) temp_dir.cleanup() class TestVariableInit(unittest.TestCase): - def set_place(self): - return fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0) + return ( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) def test_variable_init(self): @@ -767,8 +869,10 @@ class TestVariableInit(unittest.TestCase): exe.run(fluid.default_startup_program()) temp_dir = tempfile.TemporaryDirectory() - fluid.save(fluid.default_main_program(), - os.path.join(temp_dir.name, "test_path")) + fluid.save( + fluid.default_main_program(), + os.path.join(temp_dir.name, "test_path"), + ) def set_var(var, ndarray): t = var.get_tensor() @@ -789,35 +893,43 @@ class TestVariableInit(unittest.TestCase): place = self.set_place() exe = fluid.Executor(place) - parameter_list = list(filter(fluid.io.is_parameter, - program.list_vars())) + parameter_list = list( + filter(fluid.io.is_parameter, program.list_vars()) + ) - fluid.core._create_loaded_parameter(parameter_list, new_scope, - exe._default_executor) + fluid.core._create_loaded_parameter( + parameter_list, new_scope, exe._default_executor + ) parameter_file_name = os.path.join(temp_dir.name, "test_path.pdparams") with open(parameter_file_name, 'rb') as f: load_dict = pickle.load(f) for v in parameter_list: - assert v.name in load_dict, \ - "Can not find [{}] in model file [{}]".format( - v.name, parameter_file_name) + assert ( + v.name in load_dict + ), "Can not find [{}] in model file [{}]".format( + v.name, parameter_file_name + ) new_v = new_scope.find_var(v.name) set_var(new_v, load_dict[v.name]) opt_list = list( - filter(fluid.io.is_belong_to_optimizer, program.list_vars())) + filter(fluid.io.is_belong_to_optimizer, program.list_vars()) + ) - fluid.core._create_loaded_parameter(opt_list, new_scope, - exe._default_executor) + fluid.core._create_loaded_parameter( + opt_list, new_scope, exe._default_executor + ) opt_file_name = os.path.join(temp_dir.name, "test_path.pdopt") with open(opt_file_name, 'rb') as f: load_dict = pickle.load(f) for v in opt_list: - assert v.name in load_dict, \ - "Can not find [{}] in model file [{}]".format( - v.name, opt_file_name) + assert ( + v.name in load_dict + ), "Can not find [{}] in model file [{}]".format( + v.name, opt_file_name + ) new_v = new_scope.find_var(v.name) set_var(new_v, load_dict[v.name]) @@ -825,8 +937,9 @@ class TestVariableInit(unittest.TestCase): base_map = {} for var in program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) # make sure all the paramerter or optimizer var have been update base_map[var.name] = t @@ -840,7 +953,6 @@ class TestVariableInit(unittest.TestCase): class TestLoadFromOldInterface(unittest.TestCase): - def setUp(self): if os.path.exists("test_path.pdparams"): os.remove("test_path.pdparams") @@ -851,8 +963,11 @@ class TestLoadFromOldInterface(unittest.TestCase): self.temp_dir = tempfile.TemporaryDirectory() def set_place(self): - return fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0) + return ( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) def tearDown(self): self.temp_dir.cleanup() @@ -870,29 +985,32 @@ class TestLoadFromOldInterface(unittest.TestCase): with new_program_scope(): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed - ptb_model = PtbModel("ptb_model", - hidden_size=hidden_size, - vocab_size=vocab_size, - num_layers=num_layers, - num_steps=num_steps, - init_scale=init_scale) + ptb_model = PtbModel( + "ptb_model", + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale, + ) place = self.set_place() exe = fluid.Executor(place) sgd = Adam(learning_rate=1e-3) - x = fluid.layers.data(name="x", - shape=[-1, num_steps], - dtype='int64') + x = fluid.layers.data( + name="x", shape=[-1, num_steps], dtype='int64' + ) y = fluid.layers.data(name="y", shape=[-1, 1], dtype='float32') - init_hidden = fluid.layers.data(name="init_hidden", - shape=[1], - dtype='float32') - init_cell = fluid.layers.data(name="init_cell", - shape=[1], - dtype='float32') + init_hidden = fluid.layers.data( + name="init_hidden", shape=[1], dtype='float32' + ) + init_cell = fluid.layers.data( + name="init_cell", shape=[1], dtype='float32' + ) static_loss, static_last_hidden, static_last_cell = ptb_model( - x, y, init_hidden, init_cell) + x, y, init_hidden, init_cell + ) test_clone_program = fluid.default_main_program().clone() sgd.minimize(static_loss) @@ -910,18 +1028,22 @@ class TestLoadFromOldInterface(unittest.TestCase): x_data = x_data.reshape((-1, num_steps, 1)) y_data = y_data.reshape((-1, 1)) init_hidden_data = np.zeros( - (num_layers, batch_size, hidden_size), dtype='float32') - init_cell_data = np.zeros((num_layers, batch_size, hidden_size), - dtype='float32') + (num_layers, batch_size, hidden_size), dtype='float32' + ) + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32' + ) fetch_list = [static_loss, static_last_hidden, static_last_cell] - out = exe.run(fluid.default_main_program(), - feed={ - "x": x_data, - "y": y_data, - "init_hidden": init_hidden_data, - "init_cell": init_cell_data - }, - fetch_list=fetch_list) + out = exe.run( + fluid.default_main_program(), + feed={ + "x": x_data, + "y": y_data, + "init_hidden": init_hidden_data, + "init_cell": init_cell_data, + }, + fetch_list=fetch_list, + ) static_loss_value = out[0] static_last_hidden_value = out[1] static_last_cell_value = out[2] @@ -931,16 +1053,17 @@ class TestLoadFromOldInterface(unittest.TestCase): base_map = {} for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) # make sure all the paramerter or optimizer var have been update self.assertTrue(np.sum(np.abs(t)) != 0) base_map[var.name] = t - #fluid.save(main_program, "./test_1") + # fluid.save(main_program, "./test_1") fluid.io.save_persistables( - exe, os.path.join(self.temp_dir.name, "test_path"), - main_program) + exe, os.path.join(self.temp_dir.name, "test_path"), main_program + ) # set var to zero for var in main_program.list_vars(): @@ -948,18 +1071,21 @@ class TestLoadFromOldInterface(unittest.TestCase): ten = fluid.global_scope().find_var(var.name).get_tensor() ten.set(np.zeros_like(np.array(ten)), place) - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) # make sure all the paramerter or optimizer var have been set to zero self.assertTrue(np.sum(np.abs(new_t)) == 0) - fluid.load(main_program, - os.path.join(self.temp_dir.name, "test_path"), exe) + fluid.load( + main_program, os.path.join(self.temp_dir.name, "test_path"), exe + ) for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) @@ -971,13 +1097,19 @@ class TestLoadFromOldInterface(unittest.TestCase): var.desc.set_shape(new_shape) with self.assertRaises(RuntimeError): - fluid.load(main_program, - os.path.join(self.temp_dir.name, "test_path"), exe) + fluid.load( + main_program, + os.path.join(self.temp_dir.name, "test_path"), + exe, + ) # check unused parameter - fluid.load(test_clone_program, - os.path.join(self.temp_dir.name, "test_path"), exe) + fluid.load( + test_clone_program, + os.path.join(self.temp_dir.name, "test_path"), + exe, + ) def test_load_from_old_interface_var_list(self): seed = 90 @@ -992,29 +1124,32 @@ class TestLoadFromOldInterface(unittest.TestCase): with new_program_scope(): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed - ptb_model = PtbModel("ptb_model", - hidden_size=hidden_size, - vocab_size=vocab_size, - num_layers=num_layers, - num_steps=num_steps, - init_scale=init_scale) + ptb_model = PtbModel( + "ptb_model", + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale, + ) place = self.set_place() exe = fluid.Executor(place) sgd = Adam(learning_rate=1e-3) - x = fluid.layers.data(name="x", - shape=[-1, num_steps], - dtype='int64') + x = fluid.layers.data( + name="x", shape=[-1, num_steps], dtype='int64' + ) y = fluid.layers.data(name="y", shape=[-1, 1], dtype='float32') - init_hidden = fluid.layers.data(name="init_hidden", - shape=[1], - dtype='float32') - init_cell = fluid.layers.data(name="init_cell", - shape=[1], - dtype='float32') + init_hidden = fluid.layers.data( + name="init_hidden", shape=[1], dtype='float32' + ) + init_cell = fluid.layers.data( + name="init_cell", shape=[1], dtype='float32' + ) static_loss, static_last_hidden, static_last_cell = ptb_model( - x, y, init_hidden, init_cell) + x, y, init_hidden, init_cell + ) test_clone_program = fluid.default_main_program().clone() sgd.minimize(static_loss) @@ -1032,18 +1167,22 @@ class TestLoadFromOldInterface(unittest.TestCase): x_data = x_data.reshape((-1, num_steps, 1)) y_data = y_data.reshape((-1, 1)) init_hidden_data = np.zeros( - (num_layers, batch_size, hidden_size), dtype='float32') - init_cell_data = np.zeros((num_layers, batch_size, hidden_size), - dtype='float32') + (num_layers, batch_size, hidden_size), dtype='float32' + ) + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32' + ) fetch_list = [static_loss, static_last_hidden, static_last_cell] - out = exe.run(fluid.default_main_program(), - feed={ - "x": x_data, - "y": y_data, - "init_hidden": init_hidden_data, - "init_cell": init_cell_data - }, - fetch_list=fetch_list) + out = exe.run( + fluid.default_main_program(), + feed={ + "x": x_data, + "y": y_data, + "init_hidden": init_hidden_data, + "init_cell": init_cell_data, + }, + fetch_list=fetch_list, + ) static_loss_value = out[0] static_last_hidden_value = out[1] static_last_cell_value = out[2] @@ -1053,17 +1192,19 @@ class TestLoadFromOldInterface(unittest.TestCase): base_map = {} for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) # make sure all the paramerter or optimizer var have been update self.assertTrue(np.sum(np.abs(t)) != 0) base_map[var.name] = t - #fluid.save(main_program, "./test_1") + # fluid.save(main_program, "./test_1") fluid.io.save_persistables( exe, os.path.join(self.temp_dir.name, "test_static_load_var_list"), - main_program) + main_program, + ) # set var to zero var_list = [] @@ -1074,34 +1215,40 @@ class TestLoadFromOldInterface(unittest.TestCase): ten = fluid.global_scope().find_var(var.name).get_tensor() ten.set(np.zeros_like(np.array(ten)), place) - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) # make sure all the paramerter or optimizer var have been set to zero self.assertTrue(np.sum(np.abs(new_t)) == 0) fluid.load( main_program, os.path.join(self.temp_dir.name, "test_static_load_var_list"), - exe, var_list) + exe, + var_list, + ) var_list_names = [var.name for var in var_list] for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) if var.name in var_list_names: # loaded vars base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) else: - #not loaded vars + # not loaded vars self.assertTrue(np.sum(np.abs(new_t)) == 0) class TestLoadFromOldInterfaceSingleFile(unittest.TestCase): - def set_place(self): - return fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0) + return ( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) def test_load_from_old_interface(self): seed = 90 @@ -1117,29 +1264,32 @@ class TestLoadFromOldInterfaceSingleFile(unittest.TestCase): with new_program_scope(): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed - ptb_model = PtbModel("ptb_model", - hidden_size=hidden_size, - vocab_size=vocab_size, - num_layers=num_layers, - num_steps=num_steps, - init_scale=init_scale) + ptb_model = PtbModel( + "ptb_model", + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale, + ) place = self.set_place() exe = fluid.Executor(place) sgd = Adam(learning_rate=1e-3) - x = fluid.layers.data(name="x", - shape=[-1, num_steps], - dtype='int64') + x = fluid.layers.data( + name="x", shape=[-1, num_steps], dtype='int64' + ) y = fluid.layers.data(name="y", shape=[-1, 1], dtype='float32') - init_hidden = fluid.layers.data(name="init_hidden", - shape=[1], - dtype='float32') - init_cell = fluid.layers.data(name="init_cell", - shape=[1], - dtype='float32') + init_hidden = fluid.layers.data( + name="init_hidden", shape=[1], dtype='float32' + ) + init_cell = fluid.layers.data( + name="init_cell", shape=[1], dtype='float32' + ) static_loss, static_last_hidden, static_last_cell = ptb_model( - x, y, init_hidden, init_cell) + x, y, init_hidden, init_cell + ) sgd.minimize(static_loss) static_param_updated = dict() static_param_init = dict() @@ -1155,18 +1305,22 @@ class TestLoadFromOldInterfaceSingleFile(unittest.TestCase): x_data = x_data.reshape((-1, num_steps, 1)) y_data = y_data.reshape((-1, 1)) init_hidden_data = np.zeros( - (num_layers, batch_size, hidden_size), dtype='float32') - init_cell_data = np.zeros((num_layers, batch_size, hidden_size), - dtype='float32') + (num_layers, batch_size, hidden_size), dtype='float32' + ) + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32' + ) fetch_list = [static_loss, static_last_hidden, static_last_cell] - out = exe.run(fluid.default_main_program(), - feed={ - "x": x_data, - "y": y_data, - "init_hidden": init_hidden_data, - "init_cell": init_cell_data - }, - fetch_list=fetch_list) + out = exe.run( + fluid.default_main_program(), + feed={ + "x": x_data, + "y": y_data, + "init_hidden": init_hidden_data, + "init_cell": init_cell_data, + }, + fetch_list=fetch_list, + ) static_loss_value = out[0] static_last_hidden_value = out[1] static_last_cell_value = out[2] @@ -1176,17 +1330,17 @@ class TestLoadFromOldInterfaceSingleFile(unittest.TestCase): base_map = {} for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) # make sure all the paramerter or optimizer var have been update self.assertTrue(np.sum(np.abs(t)) != 0) base_map[var.name] = t save_dir = os.path.join(temp_dir.name, "test_path") - #fluid.save(main_program, "./test_1") - fluid.io.save_persistables(exe, - save_dir, - main_program, - filename="model_single") + # fluid.save(main_program, "./test_1") + fluid.io.save_persistables( + exe, save_dir, main_program, filename="model_single" + ) # set var to zero for var in main_program.list_vars(): @@ -1194,19 +1348,25 @@ class TestLoadFromOldInterfaceSingleFile(unittest.TestCase): ten = fluid.global_scope().find_var(var.name).get_tensor() ten.set(np.zeros_like(np.array(ten)), place) - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) # make sure all the paramerter or optimizer var have been set to zero self.assertTrue(np.sum(np.abs(new_t)) == 0) file_model_path = os.path.join(save_dir, "model_single") - fluid.load(main_program, file_model_path, exe, - fluid.io.get_program_persistable_vars(main_program)) + fluid.load( + main_program, + file_model_path, + exe, + fluid.io.get_program_persistable_vars(main_program), + ) for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) @@ -1221,21 +1381,32 @@ class TestLoadFromOldInterfaceSingleFile(unittest.TestCase): var.desc.set_shape(new_shape) with self.assertRaises(RuntimeError): - fluid.load(main_program, file_model_path, exe, - fluid.io.get_program_persistable_vars(main_program)) - - fluid.io.save_params(exe, - "test_path", - main_program, - filename="model_single") + fluid.load( + main_program, + file_model_path, + exe, + fluid.io.get_program_persistable_vars(main_program), + ) + + fluid.io.save_params( + exe, "test_path", main_program, filename="model_single" + ) with self.assertRaises(RuntimeError): - fluid.load(main_program, file_model_path, exe, - fluid.io.get_program_persistable_vars(main_program)) + fluid.load( + main_program, + file_model_path, + exe, + fluid.io.get_program_persistable_vars(main_program), + ) # check when executor is None with self.assertRaises(ValueError): - fluid.load(main_program, file_model_path, None, - fluid.io.get_program_persistable_vars(main_program)) + fluid.load( + main_program, + file_model_path, + None, + fluid.io.get_program_persistable_vars(main_program), + ) # check when var list is None with self.assertRaises(ValueError): @@ -1243,17 +1414,20 @@ class TestLoadFromOldInterfaceSingleFile(unittest.TestCase): # check save params, load var_list = get_program_persistable_vars with self.assertRaises(RuntimeError): - temp_var = framework.Variable(main_program.global_block(), - shape=[1], - name="test_temp_var") + temp_var = framework.Variable( + main_program.global_block(), shape=[1], name="test_temp_var" + ) all_var_list = list(main_program.list_vars()) - fluid.load(main_program, file_model_path, exe, - all_var_list + [temp_var]) + fluid.load( + main_program, + file_model_path, + exe, + all_var_list + [temp_var], + ) temp_dir.cleanup() class TestProgramStateOldSave(unittest.TestCase): - def setUp(self): self.test_dygraph = True self.temp_dir = tempfile.TemporaryDirectory() @@ -1262,8 +1436,11 @@ class TestProgramStateOldSave(unittest.TestCase): self.temp_dir.cleanup() def set_place(self): - return fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0) + return ( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) def test_ptb_rnn_cpu_float32(self): seed = 90 @@ -1278,36 +1455,41 @@ class TestProgramStateOldSave(unittest.TestCase): with new_program_scope(): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed - ptb_model = PtbModel("ptb_model", - hidden_size=hidden_size, - vocab_size=vocab_size, - num_layers=num_layers, - num_steps=num_steps, - init_scale=init_scale) + ptb_model = PtbModel( + "ptb_model", + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale, + ) place = self.set_place() exe = fluid.Executor(place) sgd = Adam(learning_rate=1e-3) - x = fluid.layers.data(name="x", - shape=[-1, num_steps], - dtype='int64') + x = fluid.layers.data( + name="x", shape=[-1, num_steps], dtype='int64' + ) y = fluid.layers.data(name="y", shape=[-1, 1], dtype='float32') - init_hidden = fluid.layers.data(name="init_hidden", - shape=[1], - dtype='float32') - init_cell = fluid.layers.data(name="init_cell", - shape=[1], - dtype='float32') + init_hidden = fluid.layers.data( + name="init_hidden", shape=[1], dtype='float32' + ) + init_cell = fluid.layers.data( + name="init_cell", shape=[1], dtype='float32' + ) static_loss, static_last_hidden, static_last_cell = ptb_model( - x, y, init_hidden, init_cell) + x, y, init_hidden, init_cell + ) test_program = fluid.default_main_program().clone(for_test=True) - add_1 = fluid.layers.fc(static_last_hidden, - size=hidden_size, - num_flatten_dims=2, - bias_attr=False) + add_1 = fluid.layers.fc( + static_last_hidden, + size=hidden_size, + num_flatten_dims=2, + bias_attr=False, + ) sgd.minimize(static_loss) static_param_updated = dict() @@ -1324,18 +1506,22 @@ class TestProgramStateOldSave(unittest.TestCase): x_data = x_data.reshape((-1, num_steps, 1)) y_data = y_data.reshape((-1, 1)) init_hidden_data = np.zeros( - (num_layers, batch_size, hidden_size), dtype='float32') - init_cell_data = np.zeros((num_layers, batch_size, hidden_size), - dtype='float32') + (num_layers, batch_size, hidden_size), dtype='float32' + ) + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32' + ) fetch_list = [static_loss, static_last_hidden, static_last_cell] - out = exe.run(fluid.default_main_program(), - feed={ - "x": x_data, - "y": y_data, - "init_hidden": init_hidden_data, - "init_cell": init_cell_data - }, - fetch_list=fetch_list) + out = exe.run( + fluid.default_main_program(), + feed={ + "x": x_data, + "y": y_data, + "init_hidden": init_hidden_data, + "init_cell": init_cell_data, + }, + fetch_list=fetch_list, + ) static_loss_value = out[0] static_last_hidden_value = out[1] static_last_cell_value = out[2] @@ -1345,8 +1531,9 @@ class TestProgramStateOldSave(unittest.TestCase): base_map = {} for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) # make sure all the paramerter or optimizer var have been update self.assertTrue(np.sum(np.abs(t)) != 0) base_map[var.name] = t @@ -1359,8 +1546,9 @@ class TestProgramStateOldSave(unittest.TestCase): ten = fluid.global_scope().find_var(var.name).get_tensor() ten.set(np.zeros_like(np.array(ten)), place) - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) # make sure all the paramerter or optimizer var have been set to zero self.assertTrue(np.sum(np.abs(new_t)) == 0) @@ -1386,7 +1574,8 @@ class TestProgramStateOldSave(unittest.TestCase): # case 3: load with var_list program_state = fluid.load_program_state( - save_dir, main_program.all_parameters()) + save_dir, main_program.all_parameters() + ) fluid.set_program_state(main_program, program_state) self.check_in_static(main_program, base_map) @@ -1402,23 +1591,27 @@ class TestProgramStateOldSave(unittest.TestCase): os.symlink(target, link_name) except AttributeError: import ctypes + kernel_dll = ctypes.windll.LoadLibrary("kernel32.dll") kernel_dll.CreateSymbolicLinkA(target, link_name, 0) def check_in_static(self, main_program, base_map): for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) class TestProgramStateOldSaveSingleModel(unittest.TestCase): - def set_place(self): - return fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0) + return ( + fluid.CPUPlace() + if not core.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) def test_ptb_rnn_cpu_float32(self): seed = 90 @@ -1434,36 +1627,41 @@ class TestProgramStateOldSaveSingleModel(unittest.TestCase): with new_program_scope(): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed - ptb_model = PtbModel("ptb_model", - hidden_size=hidden_size, - vocab_size=vocab_size, - num_layers=num_layers, - num_steps=num_steps, - init_scale=init_scale) + ptb_model = PtbModel( + "ptb_model", + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale, + ) place = self.set_place() exe = fluid.Executor(place) sgd = Adam(learning_rate=1e-3) - x = fluid.layers.data(name="x", - shape=[-1, num_steps], - dtype='int64') + x = fluid.layers.data( + name="x", shape=[-1, num_steps], dtype='int64' + ) y = fluid.layers.data(name="y", shape=[-1, 1], dtype='float32') - init_hidden = fluid.layers.data(name="init_hidden", - shape=[1], - dtype='float32') - init_cell = fluid.layers.data(name="init_cell", - shape=[1], - dtype='float32') + init_hidden = fluid.layers.data( + name="init_hidden", shape=[1], dtype='float32' + ) + init_cell = fluid.layers.data( + name="init_cell", shape=[1], dtype='float32' + ) static_loss, static_last_hidden, static_last_cell = ptb_model( - x, y, init_hidden, init_cell) + x, y, init_hidden, init_cell + ) test_program = fluid.default_main_program().clone(for_test=True) - add_1 = fluid.layers.fc(static_last_hidden, - size=hidden_size, - num_flatten_dims=2, - bias_attr=False) + add_1 = fluid.layers.fc( + static_last_hidden, + size=hidden_size, + num_flatten_dims=2, + bias_attr=False, + ) sgd.minimize(static_loss) static_param_updated = dict() @@ -1480,18 +1678,22 @@ class TestProgramStateOldSaveSingleModel(unittest.TestCase): x_data = x_data.reshape((-1, num_steps, 1)) y_data = y_data.reshape((-1, 1)) init_hidden_data = np.zeros( - (num_layers, batch_size, hidden_size), dtype='float32') - init_cell_data = np.zeros((num_layers, batch_size, hidden_size), - dtype='float32') + (num_layers, batch_size, hidden_size), dtype='float32' + ) + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32' + ) fetch_list = [static_loss, static_last_hidden, static_last_cell] - out = exe.run(fluid.default_main_program(), - feed={ - "x": x_data, - "y": y_data, - "init_hidden": init_hidden_data, - "init_cell": init_cell_data - }, - fetch_list=fetch_list) + out = exe.run( + fluid.default_main_program(), + feed={ + "x": x_data, + "y": y_data, + "init_hidden": init_hidden_data, + "init_cell": init_cell_data, + }, + fetch_list=fetch_list, + ) static_loss_value = out[0] static_last_hidden_value = out[1] static_last_cell_value = out[2] @@ -1501,17 +1703,17 @@ class TestProgramStateOldSaveSingleModel(unittest.TestCase): base_map = {} for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) # make sure all the paramerter or optimizer var have been update self.assertTrue(np.sum(np.abs(t)) != 0) base_map[var.name] = t save_dir = os.path.join(temp_dir.name, "test_program_2") - fluid.io.save_persistables(exe, - save_dir, - main_program, - filename="model_1") + fluid.io.save_persistables( + exe, save_dir, main_program, filename="model_1" + ) # set var to zero for var in main_program.list_vars(): @@ -1519,21 +1721,24 @@ class TestProgramStateOldSaveSingleModel(unittest.TestCase): ten = fluid.global_scope().find_var(var.name).get_tensor() ten.set(np.zeros_like(np.array(ten)), place) - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) # make sure all the paramerter or optimizer var have been set to zero self.assertTrue(np.sum(np.abs(new_t)) == 0) - #fluid.load(test_program, "./test_1", None ) + # fluid.load(test_program, "./test_1", None ) program_state = fluid.load_program_state( os.path.join(save_dir, "model_1"), - var_list=fluid.io.get_program_persistable_vars(main_program)) + var_list=fluid.io.get_program_persistable_vars(main_program), + ) fluid.set_program_state(main_program, program_state) for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) @@ -1541,30 +1746,34 @@ class TestProgramStateOldSaveSingleModel(unittest.TestCase): fluid.load_program_state(os.path.join(save_dir, "model_1")) with self.assertRaises(TypeError): - fluid.load_program_state(os.path.join(save_dir, "model_1"), - var_list=["str"]) + fluid.load_program_state( + os.path.join(save_dir, "model_1"), var_list=["str"] + ) with self.assertRaises(RuntimeError): fluid.load_program_state( os.path.join(save_dir, "model_1"), var_list=[ main_program.global_block().create_var( - name="fake_var_name", persistable=True) - ]) + name="fake_var_name", persistable=True + ) + ], + ) temp_dir.cleanup() class TestStaticSaveLoadPickle(unittest.TestCase): - def test_pickle_protocol(self): # enable static mode paddle.enable_static() with new_program_scope(): # create network - x = paddle.static.data(name="static_save_load_large_x", - shape=[None, 10], - dtype='float32') + x = paddle.static.data( + name="static_save_load_large_x", + shape=[None, 10], + dtype='float32', + ) z = paddle.static.nn.fc(x, 10, bias_attr=False) place = paddle.CPUPlace() exe = paddle.static.Executor(place) @@ -1574,15 +1783,17 @@ class TestStaticSaveLoadPickle(unittest.TestCase): base_map = {} for var in prog.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) # make sure all the paramerter or optimizer var have been update self.assertTrue(np.sum(np.abs(t)) != 0) base_map[var.name] = t temp_dir = tempfile.TemporaryDirectory() - path = os.path.join(temp_dir.name, "test_static_save_load_pickle", - "pickle_protocol") + path = os.path.join( + temp_dir.name, "test_static_save_load_pickle", "pickle_protocol" + ) with self.assertRaises(ValueError): paddle.fluid.save(prog, path, 2.0) @@ -1603,26 +1814,28 @@ class TestStaticSaveLoadPickle(unittest.TestCase): # set var to zero for var in prog.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - ten = fluid.global_scope().find_var( - var.name).get_tensor() + ten = ( + fluid.global_scope().find_var(var.name).get_tensor() + ) ten.set(np.zeros_like(np.array(ten)), place) - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) self.assertTrue(np.sum(np.abs(new_t)) == 0) paddle.fluid.load(prog, path) for var in prog.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) class TestSaveLoadInferenceModel(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() self.model_path = os.path.join(self.temp_dir.name, 'no_params') @@ -1641,8 +1854,11 @@ class TestSaveLoadInferenceModel(unittest.TestCase): paddle.static.save_inference_model(self.model_path, [x], [y], exe) - [inference_program, feed_target_names, fetch_targets - ] = (paddle.static.load_inference_model(self.model_path, exe)) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = paddle.static.load_inference_model(self.model_path, exe) self.assertEqual(feed_target_names, ['x']) self.assertEqual(fetch_targets[0].shape, (10, 10)) diff --git a/python/paddle/fluid/tests/unittests/test_static_save_load_bf16.py b/python/paddle/fluid/tests/unittests/test_static_save_load_bf16.py index 3e3f41e58d2e4afed144a9de8fd539473b41ee1a..fe578907b866e7e3bb0bd627ccbf1a8b7ff6f5e3 100644 --- a/python/paddle/fluid/tests/unittests/test_static_save_load_bf16.py +++ b/python/paddle/fluid/tests/unittests/test_static_save_load_bf16.py @@ -25,10 +25,10 @@ import tempfile import os -@unittest.skipIf(not core.supports_bfloat16(), - "place does not support BF16 evaluation") +@unittest.skipIf( + not core.supports_bfloat16(), "place does not support BF16 evaluation" +) class TestSaveLoadBF16(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -51,36 +51,41 @@ class TestSaveLoadBF16(unittest.TestCase): with new_program_scope(): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed - ptb_model = PtbModel("ptb_model", - hidden_size=hidden_size, - vocab_size=vocab_size, - num_layers=num_layers, - num_steps=num_steps, - init_scale=init_scale) + ptb_model = PtbModel( + "ptb_model", + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale, + ) place = self.set_place() exe = fluid.Executor(place) sgd = SGDOptimizer(learning_rate=1e-3) - x = fluid.layers.data(name="x", - shape=[-1, num_steps], - dtype='int64') + x = fluid.layers.data( + name="x", shape=[-1, num_steps], dtype='int64' + ) y = fluid.layers.data(name="y", shape=[-1, 1], dtype='float32') - init_hidden = fluid.layers.data(name="init_hidden", - shape=[1], - dtype='float32') - init_cell = fluid.layers.data(name="init_cell", - shape=[1], - dtype='float32') + init_hidden = fluid.layers.data( + name="init_hidden", shape=[1], dtype='float32' + ) + init_cell = fluid.layers.data( + name="init_cell", shape=[1], dtype='float32' + ) static_loss, static_last_hidden, static_last_cell = ptb_model( - x, y, init_hidden, init_cell) + x, y, init_hidden, init_cell + ) sgd = paddle.static.amp.bf16.decorate_bf16( sgd, amp_lists=paddle.static.amp.bf16.AutoMixedPrecisionListsBF16( - custom_fp32_list={'transpose2', 'concat'}), + custom_fp32_list={'transpose2', 'concat'} + ), use_bf16_guard=False, - use_pure_bf16=True) + use_pure_bf16=True, + ) sgd.minimize(static_loss, framework.default_startup_program()) out = exe.run(framework.default_startup_program()) @@ -90,30 +95,35 @@ class TestSaveLoadBF16(unittest.TestCase): y_data = np.arange(1, 13).reshape(4, 3).astype('int64') x_data = x_data.reshape((-1, num_steps, 1)) y_data = y_data.reshape((-1, 1)) - #TODO investigate initializing model with "float32" instead of "uint16" as it was before + # TODO investigate initializing model with "float32" instead of "uint16" as it was before # slice_op PR(datatypes in model graph are different than datatypes during runtime because of that) init_hidden_data = np.zeros( - (num_layers, batch_size, hidden_size), dtype='uint16') - init_cell_data = np.zeros((num_layers, batch_size, hidden_size), - dtype='uint16') + (num_layers, batch_size, hidden_size), dtype='uint16' + ) + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='uint16' + ) fetch_list = [static_loss, static_last_hidden, static_last_cell] - out = exe.run(fluid.default_main_program(), - feed={ - "x": x_data, - "y": y_data, - "init_hidden": init_hidden_data, - "init_cell": init_cell_data - }, - fetch_list=fetch_list) + out = exe.run( + fluid.default_main_program(), + feed={ + "x": x_data, + "y": y_data, + "init_hidden": init_hidden_data, + "init_cell": init_cell_data, + }, + fetch_list=fetch_list, + ) # get value before save main_program = framework.default_main_program() base_map = {} for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) # make sure all the paramerter or optimizer var have been update self.assertTrue(np.sum(np.abs(t)) != 0) base_map[var.name] = t @@ -126,18 +136,23 @@ class TestSaveLoadBF16(unittest.TestCase): ten = fluid.global_scope().find_var(var.name).get_tensor() ten.set(np.zeros_like(np.array(ten)), place) - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) # make sure all the paramerter or optimizer var have been set to zero self.assertTrue(np.sum(np.abs(new_t)) == 0) - fluid.load(main_program, - os.path.join(self.temp_dir.name, "test_1.pdparams"), exe) + fluid.load( + main_program, + os.path.join(self.temp_dir.name, "test_1.pdparams"), + exe, + ) for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) diff --git a/python/paddle/fluid/tests/unittests/test_static_save_load_large.py b/python/paddle/fluid/tests/unittests/test_static_save_load_large.py index 61568b50c1ba1992ce01445bba0bb76892e52fe9..ddae3373b5a8ab195cee1568503a2ab499fb404a 100644 --- a/python/paddle/fluid/tests/unittests/test_static_save_load_large.py +++ b/python/paddle/fluid/tests/unittests/test_static_save_load_large.py @@ -26,15 +26,16 @@ LARGE_PARAM = 2**26 class TestStaticSaveLoadLargeParameters(unittest.TestCase): - def test_large_parameters_static_save(self): # enable static mode paddle.enable_static() with new_program_scope(): # create network - x = paddle.static.data(name="static_save_load_large_x", - shape=[None, 10], - dtype='float32') + x = paddle.static.data( + name="static_save_load_large_x", + shape=[None, 10], + dtype='float32', + ) z = paddle.static.nn.fc(x, LARGE_PARAM, bias_attr=False) place = paddle.CPUPlace() exe = paddle.static.Executor(place) @@ -44,14 +45,16 @@ class TestStaticSaveLoadLargeParameters(unittest.TestCase): base_map = {} for var in prog.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) # make sure all the paramerter or optimizer var have been update self.assertTrue(np.sum(np.abs(t)) != 0) base_map[var.name] = t temp_dir = tempfile.TemporaryDirectory() - path = os.path.join(temp_dir.name, - "test_static_save_load_large_param") + path = os.path.join( + temp_dir.name, "test_static_save_load_large_param" + ) path = os.path.join(path, "static_save") protocol = 4 paddle.fluid.save(prog, path, pickle_protocol=protocol) @@ -61,16 +64,18 @@ class TestStaticSaveLoadLargeParameters(unittest.TestCase): ten = fluid.global_scope().find_var(var.name).get_tensor() ten.set(np.zeros_like(np.array(ten)), place) - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) self.assertTrue(np.sum(np.abs(new_t)) == 0) paddle.fluid.load(prog, path) for var in prog.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) @@ -80,16 +85,18 @@ class TestStaticSaveLoadLargeParameters(unittest.TestCase): ten = fluid.global_scope().find_var(var.name).get_tensor() ten.set(np.zeros_like(np.array(ten)), place) - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) self.assertTrue(np.sum(np.abs(new_t)) == 0) program_state = fluid.load_program_state(path) fluid.set_program_state(prog, program_state) for var in prog.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - new_t = np.array(fluid.global_scope().find_var( - var.name).get_tensor()) + new_t = np.array( + fluid.global_scope().find_var(var.name).get_tensor() + ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) temp_dir.cleanup() diff --git a/python/paddle/fluid/tests/unittests/test_static_shape_inferrence_for_shape_tensor.py b/python/paddle/fluid/tests/unittests/test_static_shape_inferrence_for_shape_tensor.py index 0e22905e81d69da3d5d24d65b0f3a286be748cef..c6fd490cb60ddcba976eeab641907beace525123 100644 --- a/python/paddle/fluid/tests/unittests/test_static_shape_inferrence_for_shape_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_static_shape_inferrence_for_shape_tensor.py @@ -17,12 +17,11 @@ import unittest class StaticShapeInferrenceTest(unittest.TestCase): - def test_static_graph(self): paddle.enable_static() - data = paddle.fluid.layers.data(name="x", - shape=[-1, 2], - dtype='float32') + data = paddle.fluid.layers.data( + name="x", shape=[-1, 2], dtype='float32' + ) shape = paddle.fluid.layers.shape(data) # shape should be [-1, 2] x = paddle.fluid.layers.uniform_random(shape) self.assertEqual(x.shape, data.shape) diff --git a/python/paddle/fluid/tests/unittests/test_std_layer.py b/python/paddle/fluid/tests/unittests/test_std_layer.py index a724e320c95454aac1be415c5fe436b30b76d75c..cc3e7740f45938942d95ca73e6cdc2ce77b2ec5c 100644 --- a/python/paddle/fluid/tests/unittests/test_std_layer.py +++ b/python/paddle/fluid/tests/unittests/test_std_layer.py @@ -20,14 +20,13 @@ import paddle def ref_std(x, axis=None, unbiased=True, keepdim=False): ddof = 1 if unbiased else 0 if isinstance(axis, int): - axis = (axis, ) + axis = (axis,) if axis is not None: axis = tuple(axis) return np.std(x, axis=axis, ddof=ddof, keepdims=keepdim) class TestStdAPI(unittest.TestCase): - def setUp(self): self.dtype = 'float64' self.shape = [1, 3, 4, 10] @@ -36,9 +35,11 @@ class TestStdAPI(unittest.TestCase): self.unbiased = True self.set_attrs() self.x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) - self.place=paddle.CUDAPlace(0) \ - if paddle.fluid.core.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if paddle.fluid.core.is_compiled_with_cuda() else paddle.CPUPlace() + ) def set_attrs(self): pass @@ -68,43 +69,36 @@ class TestStdAPI(unittest.TestCase): class TestStdAPI_dtype(TestStdAPI): - def set_attrs(self): self.dtype = 'float32' class TestStdAPI_axis_int(TestStdAPI): - def set_attrs(self): self.axis = 2 class TestStdAPI_axis_list(TestStdAPI): - def set_attrs(self): self.axis = [1, 2] class TestStdAPI_axis_tuple(TestStdAPI): - def set_attrs(self): self.axis = (1, 3) class TestStdAPI_keepdim(TestStdAPI): - def set_attrs(self): self.keepdim = False class TestStdAPI_unbiased(TestStdAPI): - def set_attrs(self): self.unbiased = False class TestStdAPI_alias(unittest.TestCase): - def test_alias(self): paddle.disable_static() x = paddle.to_tensor(np.array([10, 12], 'float32')) @@ -117,7 +111,6 @@ class TestStdAPI_alias(unittest.TestCase): class TestStdError(unittest.TestCase): - def test_error(self): with paddle.static.program_guard(paddle.static.Program()): x = paddle.fluid.data('X', [2, 3, 4], 'int32') diff --git a/python/paddle/fluid/tests/unittests/test_stft_op.py b/python/paddle/fluid/tests/unittests/test_stft_op.py index 8110f1d805fbb1367fe95e387109a698578e497f..91206c3da1261532a02fc15ca633437f49a6dd62 100644 --- a/python/paddle/fluid/tests/unittests/test_stft_op.py +++ b/python/paddle/fluid/tests/unittests/test_stft_op.py @@ -45,14 +45,14 @@ def frame_from_librosa(x, frame_length, hop_length, axis=-1): def stft_np(x, window, n_fft, hop_length, **kwargs): frames = frame_from_librosa(x, n_fft, hop_length) - frames = np.multiply(frames.transpose([0, 2, 1]), - window).transpose([0, 2, 1]) + frames = np.multiply(frames.transpose([0, 2, 1]), window).transpose( + [0, 2, 1] + ) res = np.fft.rfft(frames, axis=1) return res class TestStftOp(OpTest): - def setUp(self): self.op_type = "stft" self.shape, self.type, self.attrs = self.initTestCase() @@ -61,10 +61,9 @@ class TestStftOp(OpTest): 'Window': np.hamming(self.attrs['n_fft']).astype(self.type), } self.outputs = { - 'Out': - stft_np(x=self.inputs['X'], - window=self.inputs['Window'], - **self.attrs) + 'Out': stft_np( + x=self.inputs['X'], window=self.inputs['Window'], **self.attrs + ) } def initTestCase(self): diff --git a/python/paddle/fluid/tests/unittests/test_strided_slice_op.py b/python/paddle/fluid/tests/unittests/test_strided_slice_op.py index 05f4e4a99416805d7fa943a25a70a1b4800d2736..7ce7db1ba78500b2bee05655b25b5c99cb9f2e4a 100644 --- a/python/paddle/fluid/tests/unittests/test_strided_slice_op.py +++ b/python/paddle/fluid/tests/unittests/test_strided_slice_op.py @@ -37,32 +37,51 @@ def strided_slice_native_forward(input, axes, starts, ends, strides): stride[axes[i]] = strides[i] result = { - 1: lambda input, start, end, stride: input[start[0]:end[0]:stride[0]], - 2: lambda input, start, end, stride: input[start[0]:end[0]:stride[0], \ - start[1]:end[1]:stride[1]], - 3: lambda input, start, end, stride: input[start[0]:end[0]:stride[0], \ - start[1]:end[1]:stride[1], start[2]:end[2]:stride[2]], - 4: lambda input, start, end, stride: input[start[0]:end[0]:stride[0], \ - start[1]:end[1]:stride[1], start[2]:end[2]:stride[2], start[3]:end[3]:stride[3]], - 5: lambda input, start, end, stride: input[start[0]:end[0]:stride[0], \ - start[1]:end[1]:stride[1], start[2]:end[2]:stride[2], start[3]:end[3]:stride[3], start[4]:end[4]:stride[4]], - 6: lambda input, start, end, stride: input[start[0]:end[0]:stride[0], \ - start[1]:end[1]:stride[1], start[2]:end[2]:stride[2], start[3]:end[3]:stride[3], \ - start[4]:end[4]:stride[4], start[5]:end[5]:stride[5]] + 1: lambda input, start, end, stride: input[ + start[0] : end[0] : stride[0] + ], + 2: lambda input, start, end, stride: input[ + start[0] : end[0] : stride[0], start[1] : end[1] : stride[1] + ], + 3: lambda input, start, end, stride: input[ + start[0] : end[0] : stride[0], + start[1] : end[1] : stride[1], + start[2] : end[2] : stride[2], + ], + 4: lambda input, start, end, stride: input[ + start[0] : end[0] : stride[0], + start[1] : end[1] : stride[1], + start[2] : end[2] : stride[2], + start[3] : end[3] : stride[3], + ], + 5: lambda input, start, end, stride: input[ + start[0] : end[0] : stride[0], + start[1] : end[1] : stride[1], + start[2] : end[2] : stride[2], + start[3] : end[3] : stride[3], + start[4] : end[4] : stride[4], + ], + 6: lambda input, start, end, stride: input[ + start[0] : end[0] : stride[0], + start[1] : end[1] : stride[1], + start[2] : end[2] : stride[2], + start[3] : end[3] : stride[3], + start[4] : end[4] : stride[4], + start[5] : end[5] : stride[5], + ], }[dim](input, start, end, stride) return result class TestStrideSliceOp(OpTest): - def setUp(self): self.initTestCase() self.op_type = 'strided_slice' self.python_api = paddle.strided_slice - self.output = strided_slice_native_forward(self.input, self.axes, - self.starts, self.ends, - self.strides) + self.output = strided_slice_native_forward( + self.input, self.axes, self.starts, self.ends, self.strides + ) self.inputs = {'Input': self.input} self.outputs = {'Out': self.output} @@ -71,7 +90,7 @@ class TestStrideSliceOp(OpTest): 'starts': self.starts, 'ends': self.ends, 'strides': self.strides, - 'infer_flags': self.infer_flags + 'infer_flags': self.infer_flags, } def test_check_output(self): @@ -90,7 +109,6 @@ class TestStrideSliceOp(OpTest): class TestStrideSliceOp1(TestStrideSliceOp): - def initTestCase(self): self.input = np.random.rand(100) self.axes = [0] @@ -101,7 +119,6 @@ class TestStrideSliceOp1(TestStrideSliceOp): class TestStrideSliceOp2(TestStrideSliceOp): - def initTestCase(self): self.input = np.random.rand(100) self.axes = [0] @@ -112,7 +129,6 @@ class TestStrideSliceOp2(TestStrideSliceOp): class TestStrideSliceOp3(TestStrideSliceOp): - def initTestCase(self): self.input = np.random.rand(100) self.axes = [0] @@ -123,7 +139,6 @@ class TestStrideSliceOp3(TestStrideSliceOp): class TestStrideSliceOp4(TestStrideSliceOp): - def initTestCase(self): self.input = np.random.rand(3, 4, 10) self.axes = [0, 1, 2] @@ -134,7 +149,6 @@ class TestStrideSliceOp4(TestStrideSliceOp): class TestStrideSliceOp5(TestStrideSliceOp): - def initTestCase(self): self.input = np.random.rand(5, 5, 5) self.axes = [0, 1, 2] @@ -145,7 +159,6 @@ class TestStrideSliceOp5(TestStrideSliceOp): class TestStrideSliceOp6(TestStrideSliceOp): - def initTestCase(self): self.input = np.random.rand(5, 5, 5) self.axes = [0, 1, 2] @@ -156,7 +169,6 @@ class TestStrideSliceOp6(TestStrideSliceOp): class TestStrideSliceOp7(TestStrideSliceOp): - def initTestCase(self): self.input = np.random.rand(5, 5, 5) self.axes = [0, 1, 2] @@ -167,7 +179,6 @@ class TestStrideSliceOp7(TestStrideSliceOp): class TestStrideSliceOp8(TestStrideSliceOp): - def initTestCase(self): self.input = np.random.rand(1, 100, 1) self.axes = [1] @@ -178,7 +189,6 @@ class TestStrideSliceOp8(TestStrideSliceOp): class TestStrideSliceOp9(TestStrideSliceOp): - def initTestCase(self): self.input = np.random.rand(1, 100, 1) self.axes = [1] @@ -189,7 +199,6 @@ class TestStrideSliceOp9(TestStrideSliceOp): class TestStrideSliceOp10(TestStrideSliceOp): - def initTestCase(self): self.input = np.random.rand(10, 10) self.axes = [0, 1] @@ -200,7 +209,6 @@ class TestStrideSliceOp10(TestStrideSliceOp): class TestStrideSliceOp11(TestStrideSliceOp): - def initTestCase(self): self.input = np.random.rand(3, 3, 3, 4) self.axes = [0, 1, 2, 3] @@ -211,7 +219,6 @@ class TestStrideSliceOp11(TestStrideSliceOp): class TestStrideSliceOp12(TestStrideSliceOp): - def initTestCase(self): self.input = np.random.rand(3, 3, 3, 4, 5) self.axes = [0, 1, 2, 3, 4] @@ -222,7 +229,6 @@ class TestStrideSliceOp12(TestStrideSliceOp): class TestStrideSliceOp13(TestStrideSliceOp): - def initTestCase(self): self.input = np.random.rand(3, 3, 3, 6, 7, 8) self.axes = [0, 1, 2, 3, 4, 5] @@ -233,7 +239,6 @@ class TestStrideSliceOp13(TestStrideSliceOp): class TestStrideSliceOp14(TestStrideSliceOp): - def initTestCase(self): self.input = np.random.rand(4, 4, 4, 4) self.axes = [1, 2, 3] @@ -244,13 +249,11 @@ class TestStrideSliceOp14(TestStrideSliceOp): class TestStrideSliceOpBool(TestStrideSliceOp): - def test_check_grad(self): pass class TestStrideSliceOpBool1D(TestStrideSliceOpBool): - def initTestCase(self): self.input = np.random.rand(100).astype("bool") self.axes = [0] @@ -261,7 +264,6 @@ class TestStrideSliceOpBool1D(TestStrideSliceOpBool): class TestStrideSliceOpBool2D(TestStrideSliceOpBool): - def initTestCase(self): self.input = np.random.rand(10, 10).astype("bool") self.axes = [0, 1] @@ -272,7 +274,6 @@ class TestStrideSliceOpBool2D(TestStrideSliceOpBool): class TestStrideSliceOpBool3D(TestStrideSliceOpBool): - def initTestCase(self): self.input = np.random.rand(3, 4, 10).astype("bool") self.axes = [0, 1, 2] @@ -283,7 +284,6 @@ class TestStrideSliceOpBool3D(TestStrideSliceOpBool): class TestStrideSliceOpBool4D(TestStrideSliceOpBool): - def initTestCase(self): self.input = np.random.rand(3, 3, 3, 4).astype("bool") self.axes = [0, 1, 2, 3] @@ -294,7 +294,6 @@ class TestStrideSliceOpBool4D(TestStrideSliceOpBool): class TestStrideSliceOpBool5D(TestStrideSliceOpBool): - def initTestCase(self): self.input = np.random.rand(3, 3, 3, 4, 5).astype("bool") self.axes = [0, 1, 2, 3, 4] @@ -305,7 +304,6 @@ class TestStrideSliceOpBool5D(TestStrideSliceOpBool): class TestStrideSliceOpBool6D(TestStrideSliceOpBool): - def initTestCase(self): self.input = np.random.rand(3, 3, 3, 6, 7, 8).astype("bool") self.axes = [0, 1, 2, 3, 4, 5] @@ -316,15 +314,15 @@ class TestStrideSliceOpBool6D(TestStrideSliceOpBool): class TestStridedSliceOp_starts_ListTensor(OpTest): - def setUp(self): self.op_type = "strided_slice" self.config() starts_tensor = [] for index, ele in enumerate(self.starts): - starts_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + starts_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor} self.outputs = {'Out': self.output} @@ -333,7 +331,7 @@ class TestStridedSliceOp_starts_ListTensor(OpTest): 'starts': self.starts_infer, 'ends': self.ends, 'strides': self.strides, - 'infer_flags': self.infer_flags + 'infer_flags': self.infer_flags, } def config(self): @@ -343,9 +341,9 @@ class TestStridedSliceOp_starts_ListTensor(OpTest): self.axes = [0, 1, 2] self.strides = [1, 1, 1] self.infer_flags = [1, -1, 1] - self.output = strided_slice_native_forward(self.input, self.axes, - self.starts, self.ends, - self.strides) + self.output = strided_slice_native_forward( + self.input, self.axes, self.starts, self.ends, self.strides + ) self.starts_infer = [1, 10, 2] @@ -357,15 +355,15 @@ class TestStridedSliceOp_starts_ListTensor(OpTest): class TestStridedSliceOp_ends_ListTensor(OpTest): - def setUp(self): self.op_type = "strided_slice" self.config() ends_tensor = [] for index, ele in enumerate(self.ends): - ends_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + ends_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = {'Input': self.input, 'EndsTensorList': ends_tensor} self.outputs = {'Out': self.output} @@ -374,7 +372,7 @@ class TestStridedSliceOp_ends_ListTensor(OpTest): 'starts': self.starts, 'ends': self.ends_infer, 'strides': self.strides, - 'infer_flags': self.infer_flags + 'infer_flags': self.infer_flags, } def config(self): @@ -384,9 +382,9 @@ class TestStridedSliceOp_ends_ListTensor(OpTest): self.axes = [0, 1, 2] self.strides = [1, 1, 2] self.infer_flags = [1, -1, 1] - self.output = strided_slice_native_forward(self.input, self.axes, - self.starts, self.ends, - self.strides) + self.output = strided_slice_native_forward( + self.input, self.axes, self.starts, self.ends, self.strides + ) self.ends_infer = [3, 1, 4] @@ -398,13 +396,12 @@ class TestStridedSliceOp_ends_ListTensor(OpTest): class TestStridedSliceOp_starts_Tensor(OpTest): - def setUp(self): self.op_type = "strided_slice" self.config() self.inputs = { 'Input': self.input, - "StartsTensor": np.array(self.starts, dtype="int32") + "StartsTensor": np.array(self.starts, dtype="int32"), } self.outputs = {'Out': self.output} self.attrs = { @@ -422,9 +419,9 @@ class TestStridedSliceOp_starts_Tensor(OpTest): self.axes = [0, 1, 2] self.strides = [1, 1, 1] self.infer_flags = [-1, -1, -1] - self.output = strided_slice_native_forward(self.input, self.axes, - self.starts, self.ends, - self.strides) + self.output = strided_slice_native_forward( + self.input, self.axes, self.starts, self.ends, self.strides + ) def test_check_output(self): self.check_output() @@ -434,13 +431,12 @@ class TestStridedSliceOp_starts_Tensor(OpTest): class TestStridedSliceOp_ends_Tensor(OpTest): - def setUp(self): self.op_type = "strided_slice" self.config() self.inputs = { 'Input': self.input, - "EndsTensor": np.array(self.ends, dtype="int32") + "EndsTensor": np.array(self.ends, dtype="int32"), } self.outputs = {'Out': self.output} self.attrs = { @@ -458,9 +454,9 @@ class TestStridedSliceOp_ends_Tensor(OpTest): self.axes = [0, 1, 2] self.strides = [1, 1, 1] self.infer_flags = [-1, -1, -1] - self.output = strided_slice_native_forward(self.input, self.axes, - self.starts, self.ends, - self.strides) + self.output = strided_slice_native_forward( + self.input, self.axes, self.starts, self.ends, self.strides + ) def test_check_output(self): self.check_output() @@ -470,19 +466,19 @@ class TestStridedSliceOp_ends_Tensor(OpTest): class TestStridedSliceOp_listTensor_Tensor(OpTest): - def setUp(self): self.config() ends_tensor = [] for index, ele in enumerate(self.ends): - ends_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + ends_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.op_type = "strided_slice" self.inputs = { 'Input': self.input, "StartsTensor": np.array(self.starts, dtype="int32"), - "EndsTensorList": ends_tensor + "EndsTensorList": ends_tensor, } self.outputs = {'Out': self.output} self.attrs = { @@ -500,9 +496,9 @@ class TestStridedSliceOp_listTensor_Tensor(OpTest): self.axes = [0, 1, 2] self.strides = [1, 1, 1] self.infer_flags = [-1, -1, -1] - self.output = strided_slice_native_forward(self.input, self.axes, - self.starts, self.ends, - self.strides) + self.output = strided_slice_native_forward( + self.input, self.axes, self.starts, self.ends, self.strides + ) def test_check_output(self): self.check_output() @@ -512,13 +508,12 @@ class TestStridedSliceOp_listTensor_Tensor(OpTest): class TestStridedSliceOp_strides_Tensor(OpTest): - def setUp(self): self.op_type = "strided_slice" self.config() self.inputs = { 'Input': self.input, - "StridesTensor": np.array(self.strides, dtype="int32") + "StridesTensor": np.array(self.strides, dtype="int32"), } self.outputs = {'Out': self.output} self.attrs = { @@ -536,9 +531,9 @@ class TestStridedSliceOp_strides_Tensor(OpTest): self.axes = [0, 1, 2] self.strides = [1, -1, 1] self.infer_flags = [-1, -1, -1] - self.output = strided_slice_native_forward(self.input, self.axes, - self.starts, self.ends, - self.strides) + self.output = strided_slice_native_forward( + self.input, self.axes, self.starts, self.ends, self.strides + ) def test_check_output(self): self.check_output() @@ -549,48 +544,50 @@ class TestStridedSliceOp_strides_Tensor(OpTest): # Test python API class TestStridedSliceAPI(unittest.TestCase): - def test_1(self): input = np.random.random([3, 4, 5, 6]).astype("float64") minus_1 = fluid.layers.fill_constant([1], "int32", -1) minus_3 = fluid.layers.fill_constant([1], "int32", -3) - starts = fluid.layers.data(name='starts', - shape=[3], - dtype='int32', - append_batch_size=False) - ends = fluid.layers.data(name='ends', - shape=[3], - dtype='int32', - append_batch_size=False) - strides = fluid.layers.data(name='strides', - shape=[3], - dtype='int32', - append_batch_size=False) - - x = fluid.layers.data(name="x", - shape=[3, 4, 5, 6], - append_batch_size=False, - dtype="float64") - out_1 = paddle.strided_slice(x, - axes=[0, 1, 2], - starts=[-3, 0, 2], - ends=[3, 100, -1], - strides=[1, 1, 1]) - out_2 = paddle.strided_slice(x, - axes=[0, 1, 3], - starts=[minus_3, 0, 2], - ends=[3, 100, -1], - strides=[1, 1, 1]) - out_3 = paddle.strided_slice(x, - axes=[0, 1, 3], - starts=[minus_3, 0, 2], - ends=[3, 100, minus_1], - strides=[1, 1, 1]) - out_4 = paddle.strided_slice(x, - axes=[0, 1, 2], - starts=starts, - ends=ends, - strides=strides) + starts = fluid.layers.data( + name='starts', shape=[3], dtype='int32', append_batch_size=False + ) + ends = fluid.layers.data( + name='ends', shape=[3], dtype='int32', append_batch_size=False + ) + strides = fluid.layers.data( + name='strides', shape=[3], dtype='int32', append_batch_size=False + ) + + x = fluid.layers.data( + name="x", + shape=[3, 4, 5, 6], + append_batch_size=False, + dtype="float64", + ) + out_1 = paddle.strided_slice( + x, + axes=[0, 1, 2], + starts=[-3, 0, 2], + ends=[3, 100, -1], + strides=[1, 1, 1], + ) + out_2 = paddle.strided_slice( + x, + axes=[0, 1, 3], + starts=[minus_3, 0, 2], + ends=[3, 100, -1], + strides=[1, 1, 1], + ) + out_3 = paddle.strided_slice( + x, + axes=[0, 1, 3], + starts=[minus_3, 0, 2], + ends=[3, 100, minus_1], + strides=[1, 1, 1], + ) + out_4 = paddle.strided_slice( + x, axes=[0, 1, 2], starts=starts, ends=ends, strides=strides + ) out_5 = x[-3:3, 0:100:2, -1:2:-1] out_6 = x[minus_3:3:1, 0:100:2, :, minus_1:2:minus_1] @@ -603,9 +600,10 @@ class TestStridedSliceAPI(unittest.TestCase): "x": input, 'starts': np.array([-3, 0, 2]).astype("int32"), 'ends': np.array([3, 2147483648, -1]).astype("int64"), - 'strides': np.array([1, 1, 1]).astype("int32") + 'strides': np.array([1, 1, 1]).astype("int32"), }, - fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7]) + fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7], + ) assert np.array_equal(res_1, input[-3:3, 0:100, 2:-1, :]) assert np.array_equal(res_2, input[-3:3, 0:100, :, 2:-1]) assert np.array_equal(res_3, input[-3:3, 0:100, :, 2:-1]) @@ -620,19 +618,20 @@ class TestStridedSliceAPI(unittest.TestCase): starts = [-3, 0, 2] ends = [3, 2, 4] strides_1 = [1, 1, 1] - sliced_1 = paddle.strided_slice(x, - axes=axes, - starts=starts, - ends=ends, - strides=strides_1) + sliced_1 = paddle.strided_slice( + x, axes=axes, starts=starts, ends=ends, strides=strides_1 + ) assert sliced_1.shape == (3, 2, 2, 2) - @unittest.skipIf(not paddle.is_compiled_with_cuda(), - "Cannot use CUDAPinnedPlace in CPU only version") + @unittest.skipIf( + not paddle.is_compiled_with_cuda(), + "Cannot use CUDAPinnedPlace in CPU only version", + ) def test_cuda_pinned_place(self): with paddle.fluid.dygraph.guard(): - x = paddle.to_tensor(np.random.randn(2, 10), - place=paddle.CUDAPinnedPlace()) + x = paddle.to_tensor( + np.random.randn(2, 10), place=paddle.CUDAPinnedPlace() + ) self.assertTrue(x.place.is_cuda_pinned_place()) y = x[:, ::2] self.assertFalse(x.place.is_cuda_pinned_place()) @@ -640,15 +639,17 @@ class TestStridedSliceAPI(unittest.TestCase): class ArrayLayer(paddle.nn.Layer): - def __init__(self, input_size=224, output_size=10, array_size=1): super(ArrayLayer, self).__init__() self.input_size = input_size self.output_size = output_size self.array_size = array_size for i in range(self.array_size): - setattr(self, self.create_name(i), - paddle.nn.Linear(input_size, output_size)) + setattr( + self, + self.create_name(i), + paddle.nn.Linear(input_size, output_size), + ) def create_name(self, index): return 'linear_' + str(index) @@ -704,7 +705,6 @@ class ArrayLayer(paddle.nn.Layer): class TestStridedSliceTensorArray(unittest.TestCase): - def setUp(self): paddle.disable_static() @@ -718,15 +718,17 @@ class TestStridedSliceTensorArray(unittest.TestCase): def is_grads_equal(self, g1, g2): for i, g in enumerate(g1): - self.assertTrue(self.grad_equal(g, g2[i]), - msg="gradient_1:\n{} \ngradient_2:\n{}".format( - g, g2)) + self.assertTrue( + self.grad_equal(g, g2[i]), + msg="gradient_1:\n{} \ngradient_2:\n{}".format(g, g2), + ) def is_grads_equal_zeros(self, grads): for g in grads: self.assertTrue( self.grad_equal(np.zeros_like(g), g), - msg="The gradient should be zeros, but received \n{}".format(g)) + msg="The gradient should be zeros, but received \n{}".format(g), + ) def create_case(self, net): inps1 = paddle.randn([1, net.input_size], dtype='float32') @@ -751,31 +753,33 @@ class TestStridedSliceTensorArray(unittest.TestCase): np.testing.assert_array_equal( s1, s2, - err_msg='dygraph graph result:\n{} \nstatic dygraph result:\n{}'. - format(l1.numpy(), l2.numpy())) + err_msg='dygraph graph result:\n{} \nstatic dygraph result:\n{}'.format( + l1.numpy(), l2.numpy() + ), + ) def test_strided_slice_tensor_array_cuda_pinned_place(self): if paddle.device.is_compiled_with_cuda(): with paddle.fluid.dygraph.guard(): class Simple(paddle.nn.Layer): - def __init__(self): super(Simple, self).__init__() def forward(self, inps): tensor_array = None for i, tensor in enumerate(inps): - index = paddle.full(shape=[1], - dtype='int64', - fill_value=i) + index = paddle.full( + shape=[1], dtype='int64', fill_value=i + ) if tensor_array is None: tensor_array = paddle.tensor.array_write( - tensor, i=index) + tensor, i=index + ) else: - paddle.tensor.array_write(tensor, - i=index, - array=tensor_array) + paddle.tensor.array_write( + tensor, i=index, array=tensor_array + ) array1 = paddle.concat(tensor_array) array2 = paddle.concat(tensor_array[::-1]) @@ -784,12 +788,16 @@ class TestStridedSliceTensorArray(unittest.TestCase): net = Simple() func = paddle.jit.to_static(net.forward) - inps1 = paddle.to_tensor(np.random.randn(2, 10), - place=paddle.CUDAPinnedPlace(), - stop_gradient=False) - inps2 = paddle.to_tensor(np.random.randn(2, 10), - place=paddle.CUDAPinnedPlace(), - stop_gradient=False) + inps1 = paddle.to_tensor( + np.random.randn(2, 10), + place=paddle.CUDAPinnedPlace(), + stop_gradient=False, + ) + inps2 = paddle.to_tensor( + np.random.randn(2, 10), + place=paddle.CUDAPinnedPlace(), + stop_gradient=False, + ) self.assertTrue(inps1.place.is_cuda_pinned_place()) self.assertTrue(inps2.place.is_cuda_pinned_place()) @@ -799,201 +807,173 @@ class TestStridedSliceTensorArray(unittest.TestCase): self.assertFalse(result.place.is_cuda_pinned_place()) def test_strided_slice_tensor_array(self): - class Net01(ArrayLayer): - def array_slice(self, tensors): return tensors[::-1] self.create_case(Net01(array_size=10)) class Net02(ArrayLayer): - def array_slice(self, tensors): return tensors[::-2] self.create_case(Net02(input_size=112, array_size=11)) class Net03(ArrayLayer): - def array_slice(self, tensors): return tensors[::-3] self.create_case(Net03(input_size=112, array_size=9)) class Net04(ArrayLayer): - def array_slice(self, tensors): return tensors[1::-4] self.create_case(Net04(input_size=112, array_size=9)) class Net05(ArrayLayer): - def array_slice(self, tensors): return tensors[:7:-4] self.create_case(Net05(input_size=112, array_size=9)) class Net06(ArrayLayer): - def array_slice(self, tensors): return tensors[8:0:-4] self.create_case(Net06(input_size=112, array_size=9)) class Net07(ArrayLayer): - def array_slice(self, tensors): return tensors[8:1:-4] self.create_case(Net07(input_size=112, array_size=9)) class Net08(ArrayLayer): - def array_slice(self, tensors): return tensors[::2] self.create_case(Net08(input_size=112, array_size=11)) class Net09(ArrayLayer): - def array_slice(self, tensors): return tensors[::3] self.create_case(Net09(input_size=112, array_size=9)) class Net10(ArrayLayer): - def array_slice(self, tensors): return tensors[1::4] self.create_case(Net10(input_size=112, array_size=9)) class Net11(ArrayLayer): - def array_slice(self, tensors): return tensors[:8:4] self.create_case(Net11(input_size=112, array_size=9)) class Net12(ArrayLayer): - def array_slice(self, tensors): return tensors[1:8:4] self.create_case(Net12(input_size=112, array_size=9)) class Net13(ArrayLayer): - def array_slice(self, tensors): return tensors[8:10:4] self.create_case(Net13(input_size=112, array_size=13)) class Net14(ArrayLayer): - def array_slice(self, tensors): return tensors[3:10:4] self.create_case(Net14(input_size=112, array_size=13)) class Net15(ArrayLayer): - def array_slice(self, tensors): return tensors[2:10:4] self.create_case(Net15(input_size=112, array_size=13)) class Net16(ArrayLayer): - def array_slice(self, tensors): return tensors[3:10:3] self.create_case(Net16(input_size=112, array_size=13)) class Net17(ArrayLayer): - def array_slice(self, tensors): return tensors[3:15:3] self.create_case(Net17(input_size=112, array_size=13)) class Net18(ArrayLayer): - def array_slice(self, tensors): return tensors[0:15:3] self.create_case(Net18(input_size=112, array_size=13)) class Net19(ArrayLayer): - def array_slice(self, tensors): return tensors[-1:-5:-3] self.create_case(Net19(input_size=112, array_size=13)) class Net20(ArrayLayer): - def array_slice(self, tensors): return tensors[-1:-6:-3] self.create_case(Net20(input_size=112, array_size=13)) class Net21(ArrayLayer): - def array_slice(self, tensors): return tensors[-3:-6:-3] self.create_case(Net21(input_size=112, array_size=13)) class Net22(ArrayLayer): - def array_slice(self, tensors): return tensors[-5:-1:3] self.create_case(Net22(input_size=112, array_size=13)) class Net23(ArrayLayer): - def array_slice(self, tensors): return tensors[-6:-1:3] self.create_case(Net23(input_size=112, array_size=13)) class Net24(ArrayLayer): - def array_slice(self, tensors): return tensors[-6:-3:3] self.create_case(Net24(input_size=112, array_size=13)) class Net25(ArrayLayer): - def array_slice(self, tensors): return tensors[0::3] self.create_case(Net25(input_size=112, array_size=13)) class Net26(ArrayLayer): - def array_slice(self, tensors): return tensors[-60:20:3] self.create_case(Net26(input_size=112, array_size=13)) class Net27(ArrayLayer): - def array_slice(self, tensors): return tensors[-3:-60:-3] self.create_case(Net27(input_size=112, array_size=13)) -@unittest.skipIf(not fluid.core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestStridedSliceFloat16(unittest.TestCase): - def init_test_case(self): self.op_type = 'strided_slice' self.input_shape = [3, 3, 3, 6, 7, 8] @@ -1008,8 +988,9 @@ class TestStridedSliceFloat16(unittest.TestCase): x_np = x_np.astype(dtype) x = paddle.to_tensor(x_np) x.stop_gradient = False - output = strided_slice_native_forward(x, self.axes, self.starts, - self.ends, self.strides) + output = strided_slice_native_forward( + x, self.axes, self.starts, self.ends, self.strides + ) x_grad = paddle.grad(output, x) output_np = output[0].numpy().astype('float32') x_grad_np = x_grad[0].numpy().astype('float32') diff --git a/python/paddle/fluid/tests/unittests/test_subtract_op.py b/python/paddle/fluid/tests/unittests/test_subtract_op.py index 5eda6e367d4e6f2ce8e0ef2f3e95bc11bdf1388f..ce58725d050172fda63f41db2a8abebfc0488370 100644 --- a/python/paddle/fluid/tests/unittests/test_subtract_op.py +++ b/python/paddle/fluid/tests/unittests/test_subtract_op.py @@ -19,7 +19,6 @@ import paddle.fluid.core as core class ApiSubtractTest(unittest.TestCase): - def setUp(self): if core.is_compiled_with_cuda(): self.place = core.CUDAPlace(0) @@ -40,56 +39,56 @@ class ApiSubtractTest(unittest.TestCase): def test_static_api(self): paddle.enable_static() - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data_x = paddle.static.data("x", shape=[10, 15], dtype="float32") data_y = paddle.static.data("y", shape=[10, 15], dtype="float32") result_max = paddle.subtract(data_x, data_y) exe = paddle.static.Executor(self.place) - res, = exe.run(feed={ - "x": self.input_x, - "y": self.input_y - }, - fetch_list=[result_max]) + (res,) = exe.run( + feed={"x": self.input_x, "y": self.input_y}, + fetch_list=[result_max], + ) np.testing.assert_allclose(res, self.np_expected1, rtol=1e-05) - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data_x = paddle.static.data("x", shape=[10, 15], dtype="float32") data_z = paddle.static.data("z", shape=[15], dtype="float32") result_max = paddle.subtract(data_x, data_z) exe = paddle.static.Executor(self.place) - res, = exe.run(feed={ - "x": self.input_x, - "z": self.input_z - }, - fetch_list=[result_max]) + (res,) = exe.run( + feed={"x": self.input_x, "z": self.input_z}, + fetch_list=[result_max], + ) np.testing.assert_allclose(res, self.np_expected2, rtol=1e-05) - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data_a = paddle.static.data("a", shape=[3], dtype="int64") data_c = paddle.static.data("c", shape=[3], dtype="int64") result_max = paddle.subtract(data_a, data_c) exe = paddle.static.Executor(self.place) - res, = exe.run(feed={ - "a": self.input_a, - "c": self.input_c - }, - fetch_list=[result_max]) + (res,) = exe.run( + feed={"a": self.input_a, "c": self.input_c}, + fetch_list=[result_max], + ) np.testing.assert_allclose(res, self.np_expected3, rtol=1e-05) - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data_b = paddle.static.data("b", shape=[3], dtype="int64") data_c = paddle.static.data("c", shape=[3], dtype="int64") result_max = paddle.subtract(data_b, data_c) exe = paddle.static.Executor(self.place) - res, = exe.run(feed={ - "b": self.input_b, - "c": self.input_c - }, - fetch_list=[result_max]) + (res,) = exe.run( + feed={"b": self.input_b, "c": self.input_c}, + fetch_list=[result_max], + ) np.testing.assert_allclose(res, self.np_expected4, rtol=1e-05) def test_dynamic_api(self): diff --git a/python/paddle/fluid/tests/unittests/test_sum_op.py b/python/paddle/fluid/tests/unittests/test_sum_op.py index 7d99391e5b1b29b7176ca8b304a3fb875f32a112..6c8cc00ea8fd940911c6c699d97cf9fd61e51c5a 100644 --- a/python/paddle/fluid/tests/unittests/test_sum_op.py +++ b/python/paddle/fluid/tests/unittests/test_sum_op.py @@ -22,9 +22,11 @@ from paddle import enable_static import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.op import Operator -from paddle.fluid.tests.unittests.op_test import (OpTest, - convert_float_to_uint16, - convert_uint16_to_float) +from paddle.fluid.tests.unittests.op_test import ( + OpTest, + convert_float_to_uint16, + convert_uint16_to_float, +) from paddle.fluid.framework import _test_eager_guard import paddle.inference as paddle_infer import gradient_checker @@ -33,7 +35,6 @@ import paddle.fluid.layers as layers class TestSumOp(OpTest): - def setUp(self): self.op_type = "sum" self.init_kernel_type() @@ -58,7 +59,6 @@ class TestSumOp(OpTest): class TestSelectedRowsSumOp(unittest.TestCase): - def setUp(self): self.height = 10 self.row_numel = 12 @@ -67,14 +67,18 @@ class TestSelectedRowsSumOp(unittest.TestCase): self.init_kernel_type() def check_with_place(self, place, inplace): - self.check_input_and_optput(core.Scope(), place, inplace, True, True, - True) - self.check_input_and_optput(core.Scope(), place, inplace, False, True, - True) - self.check_input_and_optput(core.Scope(), place, inplace, False, False, - True) - self.check_input_and_optput(core.Scope(), place, inplace, False, False, - False) + self.check_input_and_optput( + core.Scope(), place, inplace, True, True, True + ) + self.check_input_and_optput( + core.Scope(), place, inplace, False, True, True + ) + self.check_input_and_optput( + core.Scope(), place, inplace, False, False, True + ) + self.check_input_and_optput( + core.Scope(), place, inplace, False, False, False + ) def init_kernel_type(self): pass @@ -85,13 +89,15 @@ class TestSelectedRowsSumOp(unittest.TestCase): array[i] *= rows[i] return array - def check_input_and_optput(self, - scope, - place, - inplace, - w1_has_data=False, - w2_has_data=False, - w3_has_data=False): + def check_input_and_optput( + self, + scope, + place, + inplace, + w1_has_data=False, + w2_has_data=False, + w3_has_data=False, + ): self.create_selected_rows(scope, place, "W1", w1_has_data) self.create_selected_rows(scope, place, "W2", w2_has_data) @@ -117,7 +123,8 @@ class TestSelectedRowsSumOp(unittest.TestCase): self.assertEqual(len(out.rows()), 7) np.testing.assert_array_equal( np.array(out.get_tensor()), - self._get_array(self.rows, self.row_numel) * has_data_w_num) + self._get_array(self.rows, self.row_numel) * has_data_w_num, + ) else: self.assertEqual(len(out.rows()), 0) @@ -148,15 +155,14 @@ class TestSelectedRowsSumOp(unittest.TestCase): class TestSelectedRowsSumOpInt(TestSelectedRowsSumOp): - def init_kernel_type(self): self.dtype = np.int32 -@unittest.skipIf(not core.supports_bfloat16(), - 'place does not support BF16 evaluation') +@unittest.skipIf( + not core.supports_bfloat16(), 'place does not support BF16 evaluation' +) class TestSelectedRowsSumBF16Op(TestSelectedRowsSumOp): - def setUp(self): self.height = 10 self.row_numel = 12 @@ -164,8 +170,9 @@ class TestSelectedRowsSumBF16Op(TestSelectedRowsSumOp): self.dtype = np.uint16 self.init_kernel_type() np.random.seed(12345) - self.data = np.random.random( - (len(self.rows), self.row_numel)).astype(np.float32) + self.data = np.random.random((len(self.rows), self.row_numel)).astype( + np.float32 + ) def _get_array(self, rows, row_numel): if len(rows) > 0: @@ -173,13 +180,15 @@ class TestSelectedRowsSumBF16Op(TestSelectedRowsSumOp): else: return np.ndarray((0, row_numel), dtype=self.dtype) - def check_input_and_optput(self, - scope, - place, - inplace, - w1_has_data=False, - w2_has_data=False, - w3_has_data=False): + def check_input_and_optput( + self, + scope, + place, + inplace, + w1_has_data=False, + w2_has_data=False, + w3_has_data=False, + ): self.create_selected_rows(scope, place, "W1", w1_has_data) self.create_selected_rows(scope, place, "W2", w2_has_data) @@ -205,8 +214,12 @@ class TestSelectedRowsSumBF16Op(TestSelectedRowsSumOp): self.assertEqual(len(out.rows()), 7) out_bf16 = np.array(out.get_tensor()) out_fp32 = convert_uint16_to_float(out_bf16) - ref_fp32 = convert_uint16_to_float( - self._get_array(self.rows, self.row_numel)) * has_data_w_num + ref_fp32 = ( + convert_uint16_to_float( + self._get_array(self.rows, self.row_numel) + ) + * has_data_w_num + ) np.testing.assert_allclose(out_fp32, ref_fp32, atol=0, rtol=0.95e-2) else: self.assertEqual(len(out.rows()), 0) @@ -217,13 +230,11 @@ class TestSelectedRowsSumBF16Op(TestSelectedRowsSumOp): class TestSelectedRowsSumBF16OpBigRow(TestSelectedRowsSumBF16Op): - def init_kernel_type(self): self.row_numel = 102 class TestLoDTensorAndSelectedRowsOp(TestSelectedRowsSumOp): - def setUp(self): self.height = 10 self.row_numel = 12 @@ -255,23 +266,25 @@ class TestLoDTensorAndSelectedRowsOp(TestSelectedRowsSumOp): self.assertEqual(out_t.shape[0], self.height) np.testing.assert_array_equal( out_t, - self._get_array([i for i in range(self.height)], self.row_numel) * - np.tile(np.array(result).reshape(self.height, 1), self.row_numel)) + self._get_array([i for i in range(self.height)], self.row_numel) + * np.tile(np.array(result).reshape(self.height, 1), self.row_numel), + ) def create_lod_tensor(self, scope, place, var_name): var = scope.var(var_name) w_tensor = var.get_tensor() - w_array = self._get_array([i for i in range(self.height)], - self.row_numel) + w_array = self._get_array( + [i for i in range(self.height)], self.row_numel + ) w_tensor.set(w_array, place) return var -#----------- test fp16 ----------- -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +# ----------- test fp16 ----------- +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestFP16SumOp(TestSumOp): - def init_kernel_type(self): self.dtype = np.float16 @@ -289,11 +302,10 @@ class TestFP16SumOp(TestSumOp): def create_test_sum_fp16_class(parent): - - @unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") + @unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" + ) class TestSumFp16Case(parent): - def init_kernel_type(self): self.dtype = np.float16 @@ -308,9 +320,8 @@ def create_test_sum_fp16_class(parent): globals()[cls_name] = TestSumFp16Case -#----------- test bf16 ----------- +# ----------- test bf16 ----------- class TestSumBF16Op(OpTest): - def setUp(self): self.op_type = "sum" self.init_kernel_type() @@ -319,9 +330,11 @@ class TestSumBF16Op(OpTest): x2 = np.random.random((3, 40)).astype(np.float32) y = x0 + x1 + x2 self.inputs = { - "X": [("x0", convert_float_to_uint16(x0)), - ("x1", convert_float_to_uint16(x1)), - ("x2", convert_float_to_uint16(x2))] + "X": [ + ("x0", convert_float_to_uint16(x0)), + ("x1", convert_float_to_uint16(x1)), + ("x2", convert_float_to_uint16(x2)), + ] } self.outputs = {'Out': convert_float_to_uint16(y)} @@ -336,15 +349,14 @@ class TestSumBF16Op(OpTest): class API_Test_Add_n(unittest.TestCase): - def test_api(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - input0 = fluid.layers.fill_constant(shape=[2, 3], - dtype='int64', - value=5) - input1 = fluid.layers.fill_constant(shape=[2, 3], - dtype='int64', - value=3) + input0 = fluid.layers.fill_constant( + shape=[2, 3], dtype='int64', value=5 + ) + input1 = fluid.layers.fill_constant( + shape=[2, 3], dtype='int64', value=3 + ) expected_result = np.empty((2, 3)) expected_result.fill(8) sum_value = paddle.add_n([input0, input1]) @@ -371,16 +383,19 @@ class API_Test_Add_n(unittest.TestCase): expected_result = np.empty((2, 3)) expected_result.fill(2) sum_value = paddle.add_n([input0, input1]) - self.assertEqual((sum_value.numpy() == expected_result).all(), - True) + self.assertEqual( + (sum_value.numpy() == expected_result).all(), True + ) expected_grad_result = np.empty((2, 3)) expected_grad_result.fill(1) sum_value.backward() self.assertEqual( - (input0.grad.numpy() == expected_grad_result).all(), True) + (input0.grad.numpy() == expected_grad_result).all(), True + ) self.assertEqual( - (input1.grad.numpy() == expected_grad_result).all(), True) + (input1.grad.numpy() == expected_grad_result).all(), True + ) def test_add_n_and_add_and_grad(self): with fluid.dygraph.guard(): @@ -397,7 +412,7 @@ class API_Test_Add_n(unittest.TestCase): dx, dy = paddle.grad([out], [x, y], create_graph=True) - expected_out = np.array([[10., 12., 14.], [16., 18., 20.]]) + expected_out = np.array([[10.0, 12.0, 14.0], [16.0, 18.0, 20.0]]) expected_dx = np.array([[1, 1, 1], [1, 1, 1]]) expected_dy = np.array([[1, 1, 1], [1, 1, 1]]) @@ -407,9 +422,7 @@ class API_Test_Add_n(unittest.TestCase): class TestRaiseSumError(unittest.TestCase): - def test_errors(self): - def test_type(): fluid.layers.sum([11, 22]) @@ -430,9 +443,7 @@ class TestRaiseSumError(unittest.TestCase): class TestRaiseSumsError(unittest.TestCase): - def test_errors(self): - def test_type(): fluid.layers.sums([11, 22]) @@ -468,9 +479,7 @@ class TestRaiseSumsError(unittest.TestCase): class TestSumOpError(unittest.TestCase): - def test_errors(self): - def test_empty_list_input(): with fluid.dygraph.guard(): fluid._legacy_C_ops.sum([]) @@ -488,14 +497,16 @@ create_test_sum_fp16_class(TestLoDTensorAndSelectedRowsOp) class TestReduceOPTensorAxisBase(unittest.TestCase): - def setUp(self): paddle.disable_static() paddle.seed(2022) self.temp_dir = tempfile.TemporaryDirectory() self.save_path = os.path.join(self.temp_dir.name, 'reduce_tensor_axis') - self.place = paddle.CUDAPlace( - 0) if paddle.is_compiled_with_cuda() else paddle.CPUPlace() + self.place = ( + paddle.CUDAPlace(0) + if paddle.is_compiled_with_cuda() + else paddle.CPUPlace() + ) self.keepdim = False self.init_data() @@ -514,7 +525,8 @@ class TestReduceOPTensorAxisBase(unittest.TestCase): pd_out = self.pd_api(self.x, self.tensor_axis) np_out = self.np_api(self.x.numpy(), tuple(self.np_axis)) np.testing.assert_allclose( - pd_out.numpy() if pd_out.size > 1 else pd_out.item(), np_out) + pd_out.numpy() if pd_out.size > 1 else pd_out.item(), np_out + ) pd_out.backward() self.assertEqual(self.x.gradient().shape, tuple(self.x.shape)) @@ -524,9 +536,9 @@ class TestReduceOPTensorAxisBase(unittest.TestCase): starup_prog = paddle.static.Program() with paddle.static.program_guard(main_prog, starup_prog): # run static - x = paddle.static.data(shape=self.x.shape, - name='x', - dtype='float32') + x = paddle.static.data( + shape=self.x.shape, name='x', dtype='float32' + ) if isinstance(self.tensor_axis, paddle.Tensor): axis = paddle.assign(self.np_axis) else: @@ -541,17 +553,19 @@ class TestReduceOPTensorAxisBase(unittest.TestCase): linear_out = linear(x) out = self.pd_api(linear_out, axis, keepdim=self.keepdim) - sgd = paddle.optimizer.SGD(learning_rate=0.) + sgd = paddle.optimizer.SGD(learning_rate=0.0) sgd.minimize(paddle.mean(out)) exe = paddle.static.Executor(self.place) exe.run(starup_prog) - static_out = exe.run(feed={'x': self.x.numpy().astype('float32')}, - fetch_list=[out]) + static_out = exe.run( + feed={'x': self.x.numpy().astype('float32')}, fetch_list=[out] + ) # run infer paddle.static.save_inference_model(self.save_path, [x], [out], exe) - config = paddle_infer.Config(self.save_path + '.pdmodel', - self.save_path + '.pdiparams') + config = paddle_infer.Config( + self.save_path + '.pdmodel', self.save_path + '.pdiparams' + ) if paddle.is_compiled_with_cuda(): config.enable_use_gpu(100, 0) else: @@ -570,7 +584,6 @@ class TestReduceOPTensorAxisBase(unittest.TestCase): class TestSumWithTensorAxis1(TestReduceOPTensorAxisBase): - def init_data(self): self.pd_api = paddle.sum self.np_api = np.sum @@ -579,12 +592,11 @@ class TestSumWithTensorAxis1(TestReduceOPTensorAxisBase): self.tensor_axis = [ 0, paddle.to_tensor([1], 'int64'), - paddle.to_tensor([2], 'int64') + paddle.to_tensor([2], 'int64'), ] class TestAddNDoubleGradCheck(unittest.TestCase): - def add_n_wrapper(self, x): return paddle.add_n(x) @@ -602,17 +614,21 @@ class TestAddNDoubleGradCheck(unittest.TestCase): data1_arr = np.random.uniform(-1, 1, data1.shape).astype(dtype) data2_arr = np.random.uniform(-1, 1, data1.shape).astype(dtype) - gradient_checker.double_grad_check([data1, data2], - out, - x_init=[data1_arr, data2_arr], - place=place, - eps=eps) + gradient_checker.double_grad_check( + [data1, data2], + out, + x_init=[data1_arr, data2_arr], + place=place, + eps=eps, + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) gradient_checker.double_grad_check_for_dygraph( - self.add_n_wrapper, [data1, data2], + self.add_n_wrapper, + [data1, data2], out, x_init=[data1_arr, data2_arr], - place=place) + place=place, + ) def test_grad(self): paddle.enable_static() @@ -624,7 +640,6 @@ class TestAddNDoubleGradCheck(unittest.TestCase): class TestAddNTripleGradCheck(unittest.TestCase): - def add_n_wrapper(self, x): return paddle.add_n(x) @@ -642,17 +657,21 @@ class TestAddNTripleGradCheck(unittest.TestCase): data1_arr = np.random.uniform(-1, 1, data1.shape).astype(dtype) data2_arr = np.random.uniform(-1, 1, data1.shape).astype(dtype) - gradient_checker.triple_grad_check([data1, data2], - out, - x_init=[data1_arr, data2_arr], - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [data1, data2], + out, + x_init=[data1_arr, data2_arr], + place=place, + eps=eps, + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) gradient_checker.triple_grad_check_for_dygraph( - self.add_n_wrapper, [data1, data2], + self.add_n_wrapper, + [data1, data2], out, x_init=[data1_arr, data2_arr], - place=place) + place=place, + ) def test_grad(self): paddle.enable_static() @@ -664,7 +683,6 @@ class TestAddNTripleGradCheck(unittest.TestCase): class TestSumDoubleGradCheck(unittest.TestCase): - def sum_wrapper(self, x): return paddle.sum(x[0], axis=1, keepdim=True) @@ -679,16 +697,13 @@ class TestSumDoubleGradCheck(unittest.TestCase): out = paddle.sum(data, axis=1, keepdim=True) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) - gradient_checker.double_grad_check([data], - out, - x_init=[data_arr], - place=place, - eps=eps) + gradient_checker.double_grad_check( + [data], out, x_init=[data_arr], place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.double_grad_check_for_dygraph(self.sum_wrapper, [data], - out, - x_init=[data_arr], - place=place) + gradient_checker.double_grad_check_for_dygraph( + self.sum_wrapper, [data], out, x_init=[data_arr], place=place + ) def test_grad(self): paddle.enable_static() @@ -700,7 +715,6 @@ class TestSumDoubleGradCheck(unittest.TestCase): class TestSumTripleGradCheck(unittest.TestCase): - def sum_wrapper(self, x): return paddle.sum(x[0], axis=1, keepdim=True) @@ -715,16 +729,13 @@ class TestSumTripleGradCheck(unittest.TestCase): out = paddle.sum(data, axis=1, keepdim=True) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) - gradient_checker.triple_grad_check([data], - out, - x_init=[data_arr], - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [data], out, x_init=[data_arr], place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.triple_grad_check_for_dygraph(self.sum_wrapper, [data], - out, - x_init=[data_arr], - place=place) + gradient_checker.triple_grad_check_for_dygraph( + self.sum_wrapper, [data], out, x_init=[data_arr], place=place + ) def test_grad(self): paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_svd_op.py b/python/paddle/fluid/tests/unittests/test_svd_op.py index aca403dac074c890e9d7a3df17766629d93eb49b..146b18ae20cd8d3a56911c849d0e5cc0b9eac410 100644 --- a/python/paddle/fluid/tests/unittests/test_svd_op.py +++ b/python/paddle/fluid/tests/unittests/test_svd_op.py @@ -21,25 +21,23 @@ from op_test import OpTest, skip_check_grad_ci class TestSvdOp(OpTest): - def setUp(self): paddle.enable_static() self.python_api = paddle.linalg.svd self.generate_input() self.generate_output() self.op_type = "svd" - assert (hasattr(self, "_output_data")) + assert hasattr(self, "_output_data") self.inputs = {"X": self._input_data} self.attrs = {'full_matrices': self.get_full_matrices_option()} self.outputs = { "U": self._output_data[0], "S": self._output_data[1], - "VH": self._output_data[2] + "VH": self._output_data[2], } def generate_input(self): - """ return a input_data and input_shape - """ + """return a input_data and input_shape""" self._input_shape = (100, 1) self._input_data = np.random.random(self._input_shape).astype("float64") @@ -47,17 +45,17 @@ class TestSvdOp(OpTest): return False def generate_output(self): - assert (hasattr(self, "_input_data")) + assert hasattr(self, "_input_data") self._output_data = np.linalg.svd(self._input_data) def test_check_output(self): self.check_output(no_check_set=['U', 'VH'], check_eager=True) def test_svd_forward(self): - """ u matmul diag(s) matmul vt must become X - """ + """u matmul diag(s) matmul vt must become X""" single_input = self._input_data.reshape( - [-1, self._input_shape[-2], self._input_shape[-1]])[0] + [-1, self._input_shape[-2], self._input_shape[-1]] + )[0] paddle.disable_static() dy_x = paddle.to_tensor(single_input) dy_u, dy_s, dy_vt = paddle.linalg.svd(dy_x) @@ -71,19 +69,19 @@ class TestSvdOp(OpTest): paddle.enable_static() def check_S_grad(self): - self.check_grad(['X'], ['S'], - numeric_grad_delta=0.001, - check_eager=True) + self.check_grad( + ['X'], ['S'], numeric_grad_delta=0.001, check_eager=True + ) def check_U_grad(self): - self.check_grad(['X'], ['U'], - numeric_grad_delta=0.001, - check_eager=True) + self.check_grad( + ['X'], ['U'], numeric_grad_delta=0.001, check_eager=True + ) def check_V_grad(self): - self.check_grad(['X'], ['VH'], - numeric_grad_delta=0.001, - check_eager=True) + self.check_grad( + ['X'], ['VH'], numeric_grad_delta=0.001, check_eager=True + ) def test_check_grad(self): """ @@ -101,123 +99,146 @@ class TestSvdCheckGrad2(TestSvdOp): no_need_check_grad = True def generate_input(self): - """ return a deterministic matrix, the range matrix; - vander matrix must be a full rank matrix. + """return a deterministic matrix, the range matrix; + vander matrix must be a full rank matrix. """ self._input_shape = (5, 5) - self._input_data = np.vander([2, 3, 4, 5, 6]).astype("float64").reshape( - self._input_shape) + self._input_data = ( + np.vander([2, 3, 4, 5, 6]) + .astype("float64") + .reshape(self._input_shape) + ) class TestSvdNormalMatrixSmall(TestSvdCheckGrad2): - def generate_input(self): - """ small matrix SVD. - """ + """small matrix SVD.""" self._input_shape = (1, 1) self._input_data = np.random.random(self._input_shape).astype("float64") class TestSvdNormalMatrix6x3(TestSvdCheckGrad2): - def generate_input(self): - """ return a deterministic matrix, the range matrix; - vander matrix must be a full rank matrix. + """return a deterministic matrix, the range matrix; + vander matrix must be a full rank matrix. """ self._input_shape = (6, 3) - self._input_data = np.array([[1.0, 2.0, 3.0], [0.0, 1.0, 5.0], - [0.0, 0.0, 6.0], [2.0, 4.0, 9.0], - [3.0, 6.0, 8.0], [3.0, 1.0, - 0.0]]).astype("float64") + self._input_data = np.array( + [ + [1.0, 2.0, 3.0], + [0.0, 1.0, 5.0], + [0.0, 0.0, 6.0], + [2.0, 4.0, 9.0], + [3.0, 6.0, 8.0], + [3.0, 1.0, 0.0], + ] + ).astype("float64") class TestSvdNormalMatrix3x6(TestSvdCheckGrad2): - def generate_input(self): - """ return a deterministic matrix, the range matrix; - vander matrix must be a full rank matrix. + """return a deterministic matrix, the range matrix; + vander matrix must be a full rank matrix. """ self._input_shape = (3, 6) - self._input_data = np.array([[1.0, 2.0, 3.0], [0.0, 1.0, 5.0], - [0.0, 0.0, 6.0], [2.0, 4.0, 9.0], - [3.0, 6.0, 8.0], [3.0, 1.0, - 0.0]]).astype("float64") + self._input_data = np.array( + [ + [1.0, 2.0, 3.0], + [0.0, 1.0, 5.0], + [0.0, 0.0, 6.0], + [2.0, 4.0, 9.0], + [3.0, 6.0, 8.0], + [3.0, 1.0, 0.0], + ] + ).astype("float64") self._input_data = self._input_data.transpose((-1, -2)) class TestSvdNormalMatrix6x3Batched(TestSvdOp): - def generate_input(self): self._input_shape = (10, 6, 3) - self._input_data = np.array([[1.0, 2.0, 3.0], [0.0, 1.0, 5.0], - [0.0, 0.0, 6.0], [2.0, 4.0, 9.0], - [3.0, 6.0, 8.0], [3.0, 1.0, - 0.0]]).astype("float64") + self._input_data = np.array( + [ + [1.0, 2.0, 3.0], + [0.0, 1.0, 5.0], + [0.0, 0.0, 6.0], + [2.0, 4.0, 9.0], + [3.0, 6.0, 8.0], + [3.0, 1.0, 0.0], + ] + ).astype("float64") self._input_data = np.stack([self._input_data] * 10, axis=0) def test_svd_forward(self): - """ test_svd_forward not support batched input, so disable this test. - """ + """test_svd_forward not support batched input, so disable this test.""" pass class TestSvdNormalMatrix3x6Batched(TestSvdOp): - def generate_input(self): - """ return a deterministic matrix, the range matrix; - vander matrix must be a full rank matrix. + """return a deterministic matrix, the range matrix; + vander matrix must be a full rank matrix. """ self._input_shape = (10, 3, 6) - self._input_data = np.array([[1.0, 2.0, 3.0], [0.0, 1.0, 5.0], - [0.0, 0.0, 6.0], [2.0, 4.0, 9.0], - [3.0, 6.0, 8.0], [3.0, 1.0, - 0.0]]).astype("float64") + self._input_data = np.array( + [ + [1.0, 2.0, 3.0], + [0.0, 1.0, 5.0], + [0.0, 0.0, 6.0], + [2.0, 4.0, 9.0], + [3.0, 6.0, 8.0], + [3.0, 1.0, 0.0], + ] + ).astype("float64") self._input_data = self._input_data.transpose((-1, -2)) self._input_data = np.stack([self._input_data] * 10, axis=0) def test_svd_forward(self): - """ test_svd_forward not support batched input, so disable this test. - """ + """test_svd_forward not support batched input, so disable this test.""" pass class TestSvdNormalMatrix3x3x3x6Batched(TestSvdOp): - def generate_input(self): - """ return a deterministic matrix, the range matrix; - vander matrix must be a full rank matrix. + """return a deterministic matrix, the range matrix; + vander matrix must be a full rank matrix. """ self._input_shape = (3, 3, 3, 6) - self._input_data = np.array([[1.0, 2.0, 3.0], [0.0, 1.0, 5.0], - [0.0, 0.0, 6.0], [2.0, 4.0, 9.0], - [3.0, 6.0, 8.0], [3.0, 1.0, - 0.0]]).astype("float64") + self._input_data = np.array( + [ + [1.0, 2.0, 3.0], + [0.0, 1.0, 5.0], + [0.0, 0.0, 6.0], + [2.0, 4.0, 9.0], + [3.0, 6.0, 8.0], + [3.0, 1.0, 0.0], + ] + ).astype("float64") self._input_data = self._input_data.transpose((-1, -2)) self._input_data = np.stack( - [self._input_data, self._input_data, self._input_data], axis=0) + [self._input_data, self._input_data, self._input_data], axis=0 + ) self._input_data = np.stack( - [self._input_data, self._input_data, self._input_data], axis=0) + [self._input_data, self._input_data, self._input_data], axis=0 + ) def test_svd_forward(self): - """ test_svd_forward not support batched input, so disable this test. - """ + """test_svd_forward not support batched input, so disable this test.""" pass -@skip_check_grad_ci(reason="'check_grad' on large inputs is too slow, " + - "however it is desirable to cover the forward pass") +@skip_check_grad_ci( + reason="'check_grad' on large inputs is too slow, " + + "however it is desirable to cover the forward pass" +) class TestSvdNormalMatrixBig(TestSvdOp): - def generate_input(self): - """ big matrix SVD. - - """ + """big matrix SVD.""" self._input_shape = (2, 200, 300) self._input_data = np.random.random(self._input_shape).astype("float64") def test_svd_forward(self): - """ test_svd_forward not support batched input, so disable this test. - """ + """test_svd_forward not support batched input, so disable this test.""" pass def test_check_grad(self): @@ -225,16 +246,13 @@ class TestSvdNormalMatrixBig(TestSvdOp): class TestSvdNormalMatrixBig2(TestSvdOp): - def generate_input(self): - """ big matrix SVD. - """ + """big matrix SVD.""" self._input_shape = (1, 100) self._input_data = np.random.random(self._input_shape).astype("float64") class TestSvdNormalMatrixFullMatrices(unittest.TestCase): - def setUp(self): paddle.disable_static() @@ -246,21 +264,19 @@ class TestSvdNormalMatrixFullMatrices(unittest.TestCase): mat = np.random.random(mat_shape).astype("float64") x = paddle.to_tensor(mat) u, s, vh = paddle.linalg.svd(x, full_matrices=True) - assert (u.shape == [2, 2]) - assert (vh.shape == [3, 3]) + assert u.shape == [2, 2] + assert vh.shape == [3, 3] x_recover = u.matmul(paddle.diag(s)).matmul(vh[0:2]) - if ((paddle.abs(x_recover - x) > 1e-4).any()): + if (paddle.abs(x_recover - x) > 1e-4).any(): raise RuntimeError("mat can't be recovered\n") class TestSvdFullMatriceGrad(TestSvdNormalMatrix6x3): - def get_full_matrices_option(self): return True def test_svd_forward(self): - """ test_svd_forward not support full matrices, so disable this test. - """ + """test_svd_forward not support full matrices, so disable this test.""" pass def test_check_grad(self): @@ -268,12 +284,11 @@ class TestSvdFullMatriceGrad(TestSvdNormalMatrix6x3): remember the input matrix must be the full rank matrix, otherwise the gradient will stochatic because the u / v 's (n-k) freedom vectors """ self.check_S_grad() - #self.check_U_grad() // don't check U grad, because U have freedom vector + # self.check_U_grad() // don't check U grad, because U have freedom vector self.check_V_grad() class TestSvdAPI(unittest.TestCase): - def test_dygraph(self): paddle.disable_static() a = np.random.rand(5, 5) @@ -290,15 +305,17 @@ class TestSvdAPI(unittest.TestCase): for place in places: with fluid.program_guard(fluid.Program(), fluid.Program()): a = np.random.rand(5, 5) - x = paddle.fluid.data(name="input", - shape=[5, 5], - dtype='float64') + x = paddle.fluid.data( + name="input", shape=[5, 5], dtype='float64' + ) u, s, vh = paddle.linalg.svd(x) exe = fluid.Executor(place) gt_u, gt_s, gt_vh = np.linalg.svd(a, full_matrices=False) - fetches = exe.run(fluid.default_main_program(), - feed={"input": a}, - fetch_list=[s]) + fetches = exe.run( + fluid.default_main_program(), + feed={"input": a}, + fetch_list=[s], + ) np.testing.assert_allclose(fetches[0], gt_s, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_switch.py b/python/paddle/fluid/tests/unittests/test_switch.py index cdc2162a59961e458df745e16b94409399b22798..31fe519593fca5744a0065fe008ecec39086bd55 100644 --- a/python/paddle/fluid/tests/unittests/test_switch.py +++ b/python/paddle/fluid/tests/unittests/test_switch.py @@ -22,7 +22,6 @@ from paddle.fluid.framework import default_startup_program class TestSwitch(unittest.TestCase): - def check_switch(self, value): x = layers.fill_constant(shape=[1], dtype='float32', value=value) zero_var = layers.fill_constant(shape=[1], dtype='float32', value=0.0) @@ -30,10 +29,9 @@ class TestSwitch(unittest.TestCase): two_var = layers.fill_constant(shape=[1], dtype='float32', value=2.0) three_var = layers.fill_constant(shape=[1], dtype='float32', value=3.0) - result = layers.create_global_var(shape=[1], - value=-1.0, - dtype='float32', - persistable=True) + result = layers.create_global_var( + shape=[1], value=-1.0, dtype='float32', persistable=True + ) with layers.Switch() as switch: with switch.case(layers.less_than(x, zero_var)): @@ -63,20 +61,18 @@ class TestSwitch(unittest.TestCase): class TestSwitchCaseError(unittest.TestCase): - def test_error(self): main_program = framework.Program() startup_program = framework.Program() with framework.program_guard(main_program, startup_program): cond = layers.fill_constant(shape=[1], dtype='float32', value=0.0) - zero_var = layers.fill_constant(shape=[1], - dtype='float32', - value=0.0) - - result = layers.create_global_var(shape=[1], - value=-1.0, - dtype='float32', - persistable=True) + zero_var = layers.fill_constant( + shape=[1], dtype='float32', value=0.0 + ) + + result = layers.create_global_var( + shape=[1], value=-1.0, dtype='float32', persistable=True + ) # 1. The type of 'condition' in case must be Variable. def test_condition_type(): diff --git a/python/paddle/fluid/tests/unittests/test_switch_autotune.py b/python/paddle/fluid/tests/unittests/test_switch_autotune.py index 93dd06ec2916a578639ccf2e011cfbb92aba08f4..52b39fcf617c5157c20a7cca0caa0d0935374a36 100644 --- a/python/paddle/fluid/tests/unittests/test_switch_autotune.py +++ b/python/paddle/fluid/tests/unittests/test_switch_autotune.py @@ -22,7 +22,6 @@ import os class SimpleNet(paddle.nn.Layer): - def __init__(self): super(SimpleNet, self).__init__() self.conv = paddle.nn.Conv2D(1, 2, (3, 3)) @@ -51,7 +50,6 @@ def static_program(net, data): class TestAutoTune(unittest.TestCase): - def set_flags(self, enable_autotune): if paddle.is_compiled_with_cuda(): if enable_autotune: @@ -67,7 +65,7 @@ class TestAutoTune(unittest.TestCase): expected_res = { "step_id": step_id, "cache_size": 0, - "cache_hit_rate": 0 + "cache_hit_rate": 0, } if paddle.is_compiled_with_cuda(): # Total 3 * num_iters cache accesses, only iter 2 hits the cache. @@ -77,9 +75,8 @@ class TestAutoTune(unittest.TestCase): def test_autotune(self): paddle.incubate.autotune.set_config( - config={"kernel": { - "enable": False - }}) + config={"kernel": {"enable": False}} + ) self.assertEqual(self.get_flags("FLAGS_use_autotune"), False) paddle.incubate.autotune.set_config(config={"kernel": {"enable": True}}) @@ -96,21 +93,17 @@ class TestAutoTune(unittest.TestCase): class TestDygraphAutoTuneStatus(TestAutoTune): - def run_program(self, enable_autotune): self.set_flags(enable_autotune) if enable_autotune: paddle.incubate.autotune.set_config( - config={"kernel": { - "enable": True, - "tuning_range": [1, 2] - }}) + config={"kernel": {"enable": True, "tuning_range": [1, 2]}} + ) else: paddle.incubate.autotune.set_config( - config={"kernel": { - "enable": False - }}) - x_var = paddle.uniform((1, 1, 8, 8), dtype='float32', min=-1., max=1.) + config={"kernel": {"enable": False}} + ) + x_var = paddle.uniform((1, 1, 8, 8), dtype='float32', min=-1.0, max=1.0) net = SimpleNet() for i in range(3): train_dygraph(net, x_var) @@ -135,7 +128,6 @@ class TestDygraphAutoTuneStatus(TestAutoTune): class TestStaticAutoTuneStatus(TestAutoTune): - def run_program(self, enable_autotune): paddle.enable_static() @@ -143,13 +135,16 @@ class TestStaticAutoTuneStatus(TestAutoTune): main_program = paddle.static.Program() startup_program = paddle.static.Program() with paddle.static.program_guard(main_program, startup_program): - data = paddle.static.data(name='X', - shape=data_shape, - dtype='float32') + data = paddle.static.data( + name='X', shape=data_shape, dtype='float32' + ) net = SimpleNet() loss = static_program(net, data) - place = paddle.CUDAPlace(0) if paddle.fluid.core.is_compiled_with_cuda( - ) else paddle.CPUPlace() + place = ( + paddle.CUDAPlace(0) + if paddle.fluid.core.is_compiled_with_cuda() + else paddle.CPUPlace() + ) exe = paddle.static.Executor(place) exe.run(startup_program) x = np.random.random(size=data_shape).astype('float32') @@ -164,10 +159,8 @@ class TestStaticAutoTuneStatus(TestAutoTune): os.remove(tfile.name) else: paddle.incubate.autotune.set_config( - config={"kernel": { - "enable": False, - "tuning_range": [1, 2] - }}) + config={"kernel": {"enable": False, "tuning_range": [1, 2]}} + ) for i in range(3): exe.run(program=main_program, feed={'X': x}, fetch_list=[loss]) @@ -190,7 +183,6 @@ class TestStaticAutoTuneStatus(TestAutoTune): class TestAutoTuneAPI(unittest.TestCase): - def test_set_config_warnings(self): with warnings.catch_warnings(record=True) as w: config = {"kernel": {"enable": 1, "tuning_range": 1}} @@ -204,7 +196,8 @@ class TestAutoTuneAPI(unittest.TestCase): def test_set_config_attr(self): paddle.incubate.autotune.set_config(config=None) self.assertEqual( - paddle.get_flags("FLAGS_use_autotune")["FLAGS_use_autotune"], True) + paddle.get_flags("FLAGS_use_autotune")["FLAGS_use_autotune"], True + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_switch_case.py b/python/paddle/fluid/tests/unittests/test_switch_case.py index 3c529d789beb90e617c1739079364b366b5ef3e4..aad70fe789bfe903c4ba63a38f80d04aa63c54ee 100644 --- a/python/paddle/fluid/tests/unittests/test_switch_case.py +++ b/python/paddle/fluid/tests/unittests/test_switch_case.py @@ -23,9 +23,7 @@ from functools import partial class TestAPISwitchCase(unittest.TestCase): - def test_return_single_var(self): - def fn_1(): return layers.fill_constant(shape=[4, 2], dtype='int32', value=1) @@ -43,86 +41,90 @@ class TestAPISwitchCase(unittest.TestCase): index_5 = layers.fill_constant(shape=[1], dtype='int32', value=5) # call fn_1 - out_0 = layers.switch_case(branch_index=index_1, - branch_fns={ - 1: fn_1, - 2: fn_2, - 3: fn_3 - }) + out_0 = layers.switch_case( + branch_index=index_1, branch_fns={1: fn_1, 2: fn_2, 3: fn_3} + ) # call fn_2 : branch_fns={0: fn_1, 1:fn_2, 2:fn_3} - out_1 = layers.switch_case(branch_index=index_1, - branch_fns=(fn_1, fn_2, fn_3)) + out_1 = layers.switch_case( + branch_index=index_1, branch_fns=(fn_1, fn_2, fn_3) + ) # call default fn_3 - out_2 = layers.switch_case(branch_index=index_5, - branch_fns=((1, fn_1), (2, fn_2)), - default=fn_3) + out_2 = layers.switch_case( + branch_index=index_5, + branch_fns=((1, fn_1), (2, fn_2)), + default=fn_3, + ) # no default, call fn_2 - out_3 = layers.switch_case(branch_index=index_2, - branch_fns=[(1, fn_1), (2, fn_2)]) + out_3 = layers.switch_case( + branch_index=index_2, branch_fns=[(1, fn_1), (2, fn_2)] + ) # no default, call fn_2 but branch_index is 5 - out_4 = layers.switch_case(branch_index=index_5, - branch_fns=[(1, fn_1), (3, fn_2), - (2, fn_3)]) - - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + out_4 = layers.switch_case( + branch_index=index_5, + branch_fns=[(1, fn_1), (3, fn_2), (2, fn_3)], + ) + + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) - res = exe.run(main_program, - fetch_list=[out_0, out_1, out_2, out_3, out_4]) + res = exe.run( + main_program, fetch_list=[out_0, out_1, out_2, out_3, out_4] + ) np.testing.assert_allclose( res[0], 1, rtol=1e-05, - err_msg='result is {} but answer is {}'.format(res[0], 1)) + err_msg='result is {} but answer is {}'.format(res[0], 1), + ) np.testing.assert_allclose( res[1], 2, rtol=1e-05, - err_msg='result is {} but answer is {}'.format(res[0], 2)) + err_msg='result is {} but answer is {}'.format(res[0], 2), + ) np.testing.assert_allclose( res[2], 3, rtol=1e-05, - err_msg='result is {} but answer is {}'.format(res[0], 3)) + err_msg='result is {} but answer is {}'.format(res[0], 3), + ) np.testing.assert_allclose( res[3], 2, rtol=1e-05, - err_msg='result is {} but answer is {}'.format(res[0], 2)) + err_msg='result is {} but answer is {}'.format(res[0], 2), + ) np.testing.assert_allclose( res[4], 2, rtol=1e-05, - err_msg='result is {} but answer is {}'.format(res[0], 2)) + err_msg='result is {} but answer is {}'.format(res[0], 2), + ) def test_return_var_tuple(self): - def fn_1(): - return layers.fill_constant(shape=[1, 2], dtype='int32', - value=1), layers.fill_constant( - shape=[2, 3], - dtype='float32', - value=2) + return layers.fill_constant( + shape=[1, 2], dtype='int32', value=1 + ), layers.fill_constant(shape=[2, 3], dtype='float32', value=2) def fn_2(): - return layers.fill_constant(shape=[3, 4], dtype='int32', - value=3), layers.fill_constant( - shape=[4, 5], - dtype='float32', - value=4) + return layers.fill_constant( + shape=[3, 4], dtype='int32', value=3 + ), layers.fill_constant(shape=[4, 5], dtype='float32', value=4) def fn_3(): - return layers.fill_constant(shape=[5], dtype='int32', - value=5), layers.fill_constant( - shape=[5, 6], - dtype='float32', - value=6) + return layers.fill_constant( + shape=[5], dtype='int32', value=5 + ), layers.fill_constant(shape=[5, 6], dtype='float32', value=6) main_program = Program() startup_program = Program() @@ -131,66 +133,72 @@ class TestAPISwitchCase(unittest.TestCase): out = layers.switch_case(index_1, ((1, fn_1), (2, fn_2)), fn_3) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) ret = exe.run(main_program, fetch_list=out) - np.testing.assert_allclose(np.asarray(ret[0]), - np.full((1, 2), 1, np.int32), - rtol=1e-05) - np.testing.assert_allclose(np.asarray(ret[1]), - np.full((2, 3), 2, np.float32), - rtol=1e-05) + np.testing.assert_allclose( + np.asarray(ret[0]), np.full((1, 2), 1, np.int32), rtol=1e-05 + ) + np.testing.assert_allclose( + np.asarray(ret[1]), np.full((2, 3), 2, np.float32), rtol=1e-05 + ) class TestAPISwitchCase_Nested(unittest.TestCase): - def test_nested_switch_case(self): - def fn_1(x=1): - out = layers.switch_case(branch_index=layers.fill_constant( - shape=[1], dtype='int32', value=x), - branch_fns={ - 1: - partial(layers.fill_constant, - shape=[1], - dtype='int32', - value=1), - x: - partial(layers.fill_constant, - shape=[2], - dtype='int32', - value=x) - }) + out = layers.switch_case( + branch_index=layers.fill_constant( + shape=[1], dtype='int32', value=x + ), + branch_fns={ + 1: partial( + layers.fill_constant, shape=[1], dtype='int32', value=1 + ), + x: partial( + layers.fill_constant, shape=[2], dtype='int32', value=x + ), + }, + ) return out def fn_2(x=2): - out = layers.switch_case(branch_index=layers.fill_constant( - shape=[1], dtype='int32', value=2), - branch_fns={ - 1: - partial(layers.fill_constant, - shape=[4, 3], - dtype='int32', - value=1), - 2: - partial(fn_1, x=x) - }) + out = layers.switch_case( + branch_index=layers.fill_constant( + shape=[1], dtype='int32', value=2 + ), + branch_fns={ + 1: partial( + layers.fill_constant, + shape=[4, 3], + dtype='int32', + value=1, + ), + 2: partial(fn_1, x=x), + }, + ) return out def fn_3(): - out = layers.switch_case(branch_index=layers.fill_constant( - shape=[1], dtype='int32', value=3), - branch_fns={ - 1: - partial(layers.fill_constant, - shape=[4, 3], - dtype='int32', - value=1), - 3: - partial(fn_2, x=3) - }) + out = layers.switch_case( + branch_index=layers.fill_constant( + shape=[1], dtype='int32', value=3 + ), + branch_fns={ + 1: partial( + layers.fill_constant, + shape=[4, 3], + dtype='int32', + value=1, + ), + 3: partial(fn_2, x=3), + }, + ) return out main_program = Program() @@ -200,56 +208,53 @@ class TestAPISwitchCase_Nested(unittest.TestCase): index_2 = layers.fill_constant(shape=[1], dtype='int32', value=2) index_3 = layers.fill_constant(shape=[1], dtype='int64', value=3) - out_1 = layers.switch_case(branch_index=index_1, - branch_fns={ - 1: fn_1, - 2: fn_2, - 3: fn_3 - }) - out_2 = layers.switch_case(branch_index=index_2, - branch_fns={ - 1: fn_1, - 2: fn_2, - 3: fn_3 - }) - - out_3 = layers.switch_case(branch_index=index_3, - branch_fns={ - 1: fn_1, - 2: fn_2, - 3: fn_3 - }) - - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + out_1 = layers.switch_case( + branch_index=index_1, branch_fns={1: fn_1, 2: fn_2, 3: fn_3} + ) + out_2 = layers.switch_case( + branch_index=index_2, branch_fns={1: fn_1, 2: fn_2, 3: fn_3} + ) + + out_3 = layers.switch_case( + branch_index=index_3, branch_fns={1: fn_1, 2: fn_2, 3: fn_3} + ) + + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) - res = exe.run(main_program, - feed={"index_1": np.array([1], dtype="uint8")}, - fetch_list=[out_1, out_2, out_3]) + res = exe.run( + main_program, + feed={"index_1": np.array([1], dtype="uint8")}, + fetch_list=[out_1, out_2, out_3], + ) np.testing.assert_allclose( res[0], 1, rtol=1e-05, - err_msg='result is {} but answer is {}'.format(res[0], 1)) + err_msg='result is {} but answer is {}'.format(res[0], 1), + ) np.testing.assert_allclose( res[1], 2, rtol=1e-05, - err_msg='result is {} but answer is {}'.format(res[1], 2)) + err_msg='result is {} but answer is {}'.format(res[1], 2), + ) np.testing.assert_allclose( res[2], 3, rtol=1e-05, - err_msg='result is {} but answer is {}'.format(res[2], 3)) + err_msg='result is {} but answer is {}'.format(res[2], 3), + ) # test TypeError and ValueError of api switch_case class TestAPISwitchCase_Error(unittest.TestCase): - def test_error(self): - def fn_1(): return layers.fill_constant(shape=[4, 2], dtype='int32', value=1) @@ -262,82 +267,90 @@ class TestAPISwitchCase_Error(unittest.TestCase): main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): - key_float32 = layers.fill_constant(shape=[1], - dtype='float32', - value=0.23) - key_int32 = layers.fill_constant(shape=[1], - dtype='int32', - value=0.23) + key_float32 = layers.fill_constant( + shape=[1], dtype='float32', value=0.23 + ) + key_int32 = layers.fill_constant( + shape=[1], dtype='int32', value=0.23 + ) # The type of 'branch_index' in Op(switch_case) must be Variable def type_error_branch_index(): - layers.switch_case(branch_index=1, - branch_fns=[(1, fn_1)], - default=fn_3) + layers.switch_case( + branch_index=1, branch_fns=[(1, fn_1)], default=fn_3 + ) self.assertRaises(TypeError, type_error_branch_index) # The data type of 'branch_index' in Op(switch_case) must be int32, int64 or uint8 def dtype_error_branch_index(): - layers.switch_case(branch_index=key_float32, - branch_fns=[(1, fn_1)], - default=fn_3) + layers.switch_case( + branch_index=key_float32, + branch_fns=[(1, fn_1)], + default=fn_3, + ) self.assertRaises(TypeError, dtype_error_branch_index) # The type of 'branch_fns' in Op(switch_case) must be list, tuple or dict def type_error_branch_fns(): - layers.switch_case(branch_index=key_int32, - branch_fns=1, - default=fn_3) + layers.switch_case( + branch_index=key_int32, branch_fns=1, default=fn_3 + ) self.assertRaises(TypeError, type_error_branch_fns) # The elements' type of 'branch_fns' in Op(switch_case) must be tuple def type_error_index_fn_pair_1(): - layers.switch_case(branch_index=key_int32, - branch_fns=[1], - default=fn_3) + layers.switch_case( + branch_index=key_int32, branch_fns=[1], default=fn_3 + ) self.assertRaises(TypeError, type_error_index_fn_pair_1) # The tuple's size of 'branch_fns' in Op(switch_case) must be 2 def type_error_index_fn_pair_2(): - layers.switch_case(branch_index=key_int32, - branch_fns=[(1, 2, 3)], - default=fn_3) + layers.switch_case( + branch_index=key_int32, branch_fns=[(1, 2, 3)], default=fn_3 + ) self.assertRaises(TypeError, type_error_index_fn_pair_2) # The key's type of 'branch_fns' in Op(switch_case) must be int def type_error_key(): - layers.switch_case(branch_index=key_int32, - branch_fns=[(2.3, 2)], - default=fn_3) + layers.switch_case( + branch_index=key_int32, branch_fns=[(2.3, 2)], default=fn_3 + ) self.assertRaises(TypeError, type_error_key) # The key in 'branch_fns' must be unique def value_error_key(): - layers.switch_case(branch_index=key_int32, - branch_fns=[(2, fn_1), (2, fn_2)], - default=fn_3) + layers.switch_case( + branch_index=key_int32, + branch_fns=[(2, fn_1), (2, fn_2)], + default=fn_3, + ) self.assertRaises(ValueError, value_error_key) # The type of function in 'branch_fns' must be callable def type_error_fn(): - layers.switch_case(branch_index=key_int32, - branch_fns=[(1, 1), (2, fn_2)], - default=fn_3) + layers.switch_case( + branch_index=key_int32, + branch_fns=[(1, 1), (2, fn_2)], + default=fn_3, + ) self.assertRaises(TypeError, type_error_fn) # The default in Op(case) must be callable def type_error_default(): - layers.switch_case(branch_index=key_int32, - branch_fns=[(1, fn_1), (2, fn_2)], - default=1) + layers.switch_case( + branch_index=key_int32, + branch_fns=[(1, fn_1), (2, fn_2)], + default=1, + ) self.assertRaises(TypeError, type_error_default) diff --git a/python/paddle/fluid/tests/unittests/test_sync_batch_norm_op.py b/python/paddle/fluid/tests/unittests/test_sync_batch_norm_op.py index 055187d8aabc34eae3177d4d6850d841054c3447..e2a53465d44cc62c45c472d6e874a2dd7e6a5210 100644 --- a/python/paddle/fluid/tests/unittests/test_sync_batch_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_sync_batch_norm_op.py @@ -47,7 +47,7 @@ class TestSyncBatchNormOpTraining(unittest.TestCase): def setUp(self): """Setup.""" - #self.dtype = np.float32 + # self.dtype = np.float32 self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64 self.N = 8 self.C = 16 @@ -56,12 +56,9 @@ class TestSyncBatchNormOpTraining(unittest.TestCase): self.dshape = [self.N, self.C, self.H, self.W] self.atol = 1e-3 - def _build_program(self, - place, - layout, - seed, - sync_bn=False, - only_forward=False): + def _build_program( + self, place, layout, seed, sync_bn=False, only_forward=False + ): """Build program.""" main = fluid.Program() startup = fluid.Program() @@ -70,17 +67,20 @@ class TestSyncBatchNormOpTraining(unittest.TestCase): use_cudnn = self.dtype == np.float16 with fluid.unique_name.guard(): with fluid.program_guard(main, startup): - data = fluid.layers.data(name='input', - shape=self.dshape, - dtype=self.dtype, - append_batch_size=False) + data = fluid.layers.data( + name='input', + shape=self.dshape, + dtype=self.dtype, + append_batch_size=False, + ) conv = fluid.layers.conv2d( input=data, num_filters=32, filter_size=1, param_attr=fluid.ParamAttr(name='conv2d_weight'), bias_attr=False, - use_cudnn=use_cudnn) + use_cudnn=use_cudnn, + ) bn = fluid.layers.batch_norm( conv, param_attr=fluid.ParamAttr(name='bn_scale'), @@ -88,7 +88,8 @@ class TestSyncBatchNormOpTraining(unittest.TestCase): moving_mean_name='bn_moving_mean', moving_variance_name='bn_moving_variance', data_layout=layout, - is_test=only_forward) + is_test=only_forward, + ) if core.is_compiled_with_rocm(): bn = fluid.layers.cast(bn, 'float32') else: @@ -108,42 +109,59 @@ class TestSyncBatchNormOpTraining(unittest.TestCase): seed = 10 os.environ['FLAGS_cudnn_deterministic'] = "1" scope = core.Scope() - data = np.random.random(size=self.dshape).astype(self.dtype) * 4. - 2 - data = create_or_get_tensor(scope, "input", - OpTest.np_dtype_to_fluid_dtype(data), place) + data = np.random.random(size=self.dshape).astype(self.dtype) * 4.0 - 2 + data = create_or_get_tensor( + scope, "input", OpTest.np_dtype_to_fluid_dtype(data), place + ) # Single-GPU, N = 32 per GPU - main, startup, outs = self._build_program(place, layout, seed, False, - only_forward) + main, startup, outs = self._build_program( + place, layout, seed, False, only_forward + ) exe = fluid.Executor(place) exe.run(startup) fetch_names = [v.name for v in outs] + [ - 'bn_moving_mean', 'bn_moving_variance', 'bn_scale', 'bn_bias' + 'bn_moving_mean', + 'bn_moving_variance', + 'bn_scale', + 'bn_bias', ] if not only_forward: others = [ - 'batch_norm_0.tmp_0', 'batch_norm_0.tmp_1', 'bn_scale@GRAD', - 'bn_bias@GRAD', 'batch_norm_0.tmp_3@GRAD', 'conv2d_0.tmp_0@GRAD' + 'batch_norm_0.tmp_0', + 'batch_norm_0.tmp_1', + 'bn_scale@GRAD', + 'bn_bias@GRAD', + 'batch_norm_0.tmp_3@GRAD', + 'conv2d_0.tmp_0@GRAD', ] fetch_names += others - bn_fetches = exe.run(program=main, - feed={'input': data}, - fetch_list=fetch_names) + bn_fetches = exe.run( + program=main, feed={'input': data}, fetch_list=fetch_names + ) ##################################################################### # Multi-GPUs, self.N / core.get_cuda_device_count() per GPU assert core.get_cuda_device_count() > 1 - main, startup, outs = self._build_program(place, layout, seed, True, - only_forward) + main, startup, outs = self._build_program( + place, layout, seed, True, only_forward + ) exe = fluid.Executor(place) exe.run(startup) fetch_names = [v.name for v in outs] + [ - 'bn_moving_mean', 'bn_moving_variance', 'bn_scale', 'bn_bias' + 'bn_moving_mean', + 'bn_moving_variance', + 'bn_scale', + 'bn_bias', ] if not only_forward: others = [ - 'batch_norm_0.tmp_0', 'batch_norm_0.tmp_1', 'bn_scale@GRAD', - 'bn_bias@GRAD', 'batch_norm_0.tmp_3@GRAD', 'conv2d_0.tmp_0@GRAD' + 'batch_norm_0.tmp_0', + 'batch_norm_0.tmp_1', + 'bn_scale@GRAD', + 'bn_bias@GRAD', + 'batch_norm_0.tmp_3@GRAD', + 'conv2d_0.tmp_0@GRAD', ] fetch_names += others for nm in fetch_names: @@ -155,24 +173,31 @@ class TestSyncBatchNormOpTraining(unittest.TestCase): build_strategy.memory_optimize = False comp_prog = compiler.CompiledProgram(main).with_data_parallel( outs[0].name if not only_forward else None, - build_strategy=build_strategy) - sync_bn_fetches = exe.run(program=comp_prog, - feed={'input': data}, - fetch_list=fetch_names) + build_strategy=build_strategy, + ) + sync_bn_fetches = exe.run( + program=comp_prog, feed={'input': data}, fetch_list=fetch_names + ) for i in range(1, len(sync_bn_fetches)): bn_val = bn_fetches[i] sync_bn_val = sync_bn_fetches[i] if sync_bn_val.shape != bn_val.shape: - sync_bn_val = sync_bn_val[:bn_val.shape[0]] - np.testing.assert_allclose(bn_val, - sync_bn_val, - rtol=1e-05, - atol=self.atol, - err_msg='Output (' + fetch_names[i] + - ') has diff. \n' + '\nBN ' + - str(bn_val) + '\n' + 'Sync BN ' + - str(sync_bn_val)) + sync_bn_val = sync_bn_val[: bn_val.shape[0]] + np.testing.assert_allclose( + bn_val, + sync_bn_val, + rtol=1e-05, + atol=self.atol, + err_msg='Output (' + + fetch_names[i] + + ') has diff. \n' + + '\nBN ' + + str(bn_val) + + '\n' + + 'Sync BN ' + + str(sync_bn_val), + ) def test_train(self): """Test training.""" @@ -210,15 +235,15 @@ class TestFP16SyncBatchNormOpTraining(TestSyncBatchNormOpTraining): class TestDygraphSyncBatchNormAPIError(unittest.TestCase): - def test_errors(self): if not core.is_compiled_with_cuda(): return with program_guard(Program(), Program()): my_sync_batch_norm = paddle.nn.SyncBatchNorm(10) - x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.CUDAPlace(0)) + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CUDAPlace(0) + ) self.assertRaises(TypeError, my_sync_batch_norm, x1) # the input dtype of SyncBatchNorm must be float16 or float32 or float64 @@ -228,36 +253,39 @@ class TestDygraphSyncBatchNormAPIError(unittest.TestCase): class TestConvertSyncBatchNorm(unittest.TestCase): - def test_convert(self): if not core.is_compiled_with_cuda(): return with program_guard(Program(), Program()): - compare_model = paddle.nn.Sequential(paddle.nn.Conv2D(3, 5, 3), - paddle.nn.BatchNorm2D(5), - paddle.nn.BatchNorm2D(5)) + compare_model = paddle.nn.Sequential( + paddle.nn.Conv2D(3, 5, 3), + paddle.nn.BatchNorm2D(5), + paddle.nn.BatchNorm2D(5), + ) model = paddle.nn.Sequential( - paddle.nn.Conv2D(3, 5, 3), paddle.nn.BatchNorm2D(5), + paddle.nn.Conv2D(3, 5, 3), + paddle.nn.BatchNorm2D(5), paddle.nn.BatchNorm2D( 5, weight_attr=fluid.ParamAttr(name='bn.scale'), - bias_attr=fluid.ParamAttr(name='bn.bias'))) + bias_attr=fluid.ParamAttr(name='bn.bias'), + ), + ) model = paddle.nn.SyncBatchNorm.convert_sync_batchnorm(model) for idx, sublayer in enumerate(compare_model.sublayers()): if isinstance(sublayer, paddle.nn.BatchNorm2D): self.assertEqual( - isinstance(model[idx], paddle.nn.SyncBatchNorm), True) + isinstance(model[idx], paddle.nn.SyncBatchNorm), True + ) class TestConvertSyncBatchNormCast1(unittest.TestCase): - def test_convert(self): if not core.is_compiled_with_cuda(): return class Net(nn.Layer): - def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2D(3, 5, 3) @@ -282,7 +310,6 @@ class TestConvertSyncBatchNormCast1(unittest.TestCase): class TestConvertSyncBatchNormCase2(unittest.TestCase): - def test_convert(self): if not core.is_compiled_with_cuda(): return @@ -290,16 +317,19 @@ class TestConvertSyncBatchNormCase2(unittest.TestCase): with fluid.dygraph.guard(fluid.CUDAPlace(0)): class SyBNNet(paddle.nn.Layer): - def __init__(self, in_ch=3, out_ch=3, dirate=1): super(SyBNNet, self).__init__() self.bn_s1 = paddle.nn.SyncBatchNorm.convert_sync_batchnorm( paddle.nn.BatchNorm3D( out_ch, weight_attr=paddle.ParamAttr( - regularizer=paddle.regularizer.L2Decay(0.)))) + regularizer=paddle.regularizer.L2Decay(0.0) + ), + ) + ) self.bn_s2 = paddle.nn.SyncBatchNorm.convert_sync_batchnorm( - paddle.nn.BatchNorm3D(out_ch, data_format='NDHWC')) + paddle.nn.BatchNorm3D(out_ch, data_format='NDHWC') + ) def forward(self, x): x = self.bn_s1(x) @@ -307,15 +337,17 @@ class TestConvertSyncBatchNormCase2(unittest.TestCase): return out class BNNet(paddle.nn.Layer): - def __init__(self, in_ch=3, out_ch=3, dirate=1): super(BNNet, self).__init__() self.bn_s1 = paddle.nn.BatchNorm3D( out_ch, weight_attr=paddle.ParamAttr( - regularizer=paddle.regularizer.L2Decay(0.))) + regularizer=paddle.regularizer.L2Decay(0.0) + ), + ) self.bn_s2 = paddle.nn.SyncBatchNorm.convert_sync_batchnorm( - paddle.nn.BatchNorm3D(out_ch, data_format='NDHWC')) + paddle.nn.BatchNorm3D(out_ch, data_format='NDHWC') + ) def forward(self, x): x = self.bn_s1(x) @@ -333,12 +365,16 @@ class TestConvertSyncBatchNormCase2(unittest.TestCase): bn_out.numpy(), sybn_out.numpy(), rtol=1e-05, - err_msg='Output has diff. \n' + '\nBN ' + - str(bn_out.numpy()) + '\n' + 'Sync BN ' + str(sybn_out.numpy())) + err_msg='Output has diff. \n' + + '\nBN ' + + str(bn_out.numpy()) + + '\n' + + 'Sync BN ' + + str(sybn_out.numpy()), + ) class TestDygraphSyncBatchNormDataFormatError(unittest.TestCase): - def test_errors(self): if not core.is_compiled_with_cuda(): return diff --git a/python/paddle/fluid/tests/unittests/test_take.py b/python/paddle/fluid/tests/unittests/test_take.py index 27ccee81d764d3bcb87eca071b23f74434bbb4a7..f713d777c168326edda30d59a07d54ec22043241 100644 --- a/python/paddle/fluid/tests/unittests/test_take.py +++ b/python/paddle/fluid/tests/unittests/test_take.py @@ -21,7 +21,6 @@ from paddle.fluid import Program, program_guard class TestTakeAPI(unittest.TestCase): - def set_mode(self): self.mode = 'raise' @@ -32,41 +31,46 @@ class TestTakeAPI(unittest.TestCase): def set_input(self): self.input_shape = [3, 4] self.index_shape = [2, 3] - self.input_np = np.arange(0, 12).reshape(self.input_shape).astype( - self.input_dtype) - self.index_np = np.arange(-4, 2).reshape(self.index_shape).astype( - self.index_dtype) + self.input_np = ( + np.arange(0, 12).reshape(self.input_shape).astype(self.input_dtype) + ) + self.index_np = ( + np.arange(-4, 2).reshape(self.index_shape).astype(self.index_dtype) + ) def setUp(self): self.set_mode() self.set_dtype() self.set_input() - self.place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + self.place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) def test_static_graph(self): paddle.enable_static() startup_program = Program() train_program = Program() with program_guard(startup_program, train_program): - x = fluid.data(name='input', - dtype=self.input_dtype, - shape=self.input_shape) - index = fluid.data(name='index', - dtype=self.index_dtype, - shape=self.index_shape) + x = fluid.data( + name='input', dtype=self.input_dtype, shape=self.input_shape + ) + index = fluid.data( + name='index', dtype=self.index_dtype, shape=self.index_shape + ) out = paddle.take(x, index, mode=self.mode) exe = fluid.Executor(self.place) - st_result = exe.run(fluid.default_main_program(), - feed={ - 'input': self.input_np, - 'index': self.index_np - }, - fetch_list=out) + st_result = exe.run( + fluid.default_main_program(), + feed={'input': self.input_np, 'index': self.index_np}, + fetch_list=out, + ) np.testing.assert_allclose( st_result[0], - np.take(self.input_np, self.index_np, mode=self.mode)) + np.take(self.input_np, self.index_np, mode=self.mode), + ) def test_dygraph(self): paddle.disable_static(self.place) @@ -75,7 +79,8 @@ class TestTakeAPI(unittest.TestCase): dy_result = paddle.take(x, index, mode=self.mode) np.testing.assert_allclose( np.take(self.input_np, self.index_np, mode=self.mode), - dy_result.numpy()) + dy_result.numpy(), + ) class TestTakeInt32(TestTakeAPI): @@ -109,11 +114,12 @@ class TestTakeTypeError(TestTakeAPI): """Argument 'index' must be Tensor""" paddle.enable_static() with program_guard(Program()): - x = fluid.data(name='input', - dtype=self.input_dtype, - shape=self.input_shape) - self.assertRaises(TypeError, paddle.take, x, self.index_np, - self.mode) + x = fluid.data( + name='input', dtype=self.input_dtype, shape=self.input_shape + ) + self.assertRaises( + TypeError, paddle.take, x, self.index_np, self.mode + ) def test_dygraph_type_error(self): paddle.disable_static(self.place) @@ -124,12 +130,12 @@ class TestTakeTypeError(TestTakeAPI): """Data type of argument 'index' must be in [paddle.int32, paddle.int64]""" paddle.enable_static() with program_guard(Program()): - x = fluid.data(name='input', - dtype='float64', - shape=self.input_shape) - index = fluid.data(name='index', - dtype='float32', - shape=self.index_shape) + x = fluid.data( + name='input', dtype='float64', shape=self.input_shape + ) + index = fluid.data( + name='index', dtype='float32', shape=self.index_shape + ) self.assertRaises(TypeError, paddle.take, x, index, self.mode) def test_dygraph_dtype_error(self): @@ -152,29 +158,36 @@ class TestTakeModeRaisePos(unittest.TestCase): def set_input(self): self.input_shape = [3, 4] self.index_shape = [5, 6] - self.input_np = np.arange(0, 12).reshape(self.input_shape).astype( - self.input_dtype) - self.index_np = np.arange(-10, 20).reshape(self.index_shape).astype( - self.index_dtype) # positive indices are out of range + self.input_np = ( + np.arange(0, 12).reshape(self.input_shape).astype(self.input_dtype) + ) + self.index_np = ( + np.arange(-10, 20) + .reshape(self.index_shape) + .astype(self.index_dtype) + ) # positive indices are out of range def setUp(self): self.set_mode() self.set_dtype() self.set_input() - self.place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + self.place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) def test_static_index_error(self): """When the index is out of range, an error is reported directly through `paddle.index_select`""" paddle.enable_static() with program_guard(Program()): - x = fluid.data(name='input', - dtype=self.input_dtype, - shape=self.input_shape) - index = fluid.data(name='index', - dtype=self.index_dtype, - shape=self.index_shape) + x = fluid.data( + name='input', dtype=self.input_dtype, shape=self.input_shape + ) + index = fluid.data( + name='index', dtype=self.index_dtype, shape=self.index_shape + ) self.assertRaises(ValueError, paddle.index_select, x, index) def test_dygraph_index_error(self): @@ -197,17 +210,24 @@ class TestTakeModeRaiseNeg(TestTakeModeRaisePos): def set_input(self): self.input_shape = [3, 4] self.index_shape = [5, 6] - self.input_np = np.arange(0, 12).reshape(self.input_shape).astype( - self.input_dtype) - self.index_np = np.arange(-20, 10).reshape(self.index_shape).astype( - self.index_dtype) # negative indices are out of range + self.input_np = ( + np.arange(0, 12).reshape(self.input_shape).astype(self.input_dtype) + ) + self.index_np = ( + np.arange(-20, 10) + .reshape(self.index_shape) + .astype(self.index_dtype) + ) # negative indices are out of range def setUp(self): self.set_mode() self.set_dtype() self.set_input() - self.place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + self.place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) class TestTakeModeWrap(TestTakeAPI): @@ -219,10 +239,14 @@ class TestTakeModeWrap(TestTakeAPI): def set_input(self): self.input_shape = [3, 4] self.index_shape = [5, 8] - self.input_np = np.arange(0, 12).reshape(self.input_shape).astype( - self.input_dtype) - self.index_np = np.arange(-20, 20).reshape(self.index_shape).astype( - self.index_dtype) # Both ends of the index are out of bounds + self.input_np = ( + np.arange(0, 12).reshape(self.input_shape).astype(self.input_dtype) + ) + self.index_np = ( + np.arange(-20, 20) + .reshape(self.index_shape) + .astype(self.index_dtype) + ) # Both ends of the index are out of bounds class TestTakeModeClip(TestTakeAPI): @@ -234,10 +258,14 @@ class TestTakeModeClip(TestTakeAPI): def set_input(self): self.input_shape = [3, 4] self.index_shape = [5, 8] - self.input_np = np.arange(0, 12).reshape(self.input_shape).astype( - self.input_dtype) - self.index_np = np.arange(-20, 20).reshape(self.index_shape).astype( - self.index_dtype) # Both ends of the index are out of bounds + self.input_np = ( + np.arange(0, 12).reshape(self.input_shape).astype(self.input_dtype) + ) + self.index_np = ( + np.arange(-20, 20) + .reshape(self.index_shape) + .astype(self.index_dtype) + ) # Both ends of the index are out of bounds if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_take_along_axis_op.py b/python/paddle/fluid/tests/unittests/test_take_along_axis_op.py index 947ea1fb6f2c231faa70ec217878bd0d0148dcf3..a7a65aec326115e45d74cc05a292bd5680e476fb 100644 --- a/python/paddle/fluid/tests/unittests/test_take_along_axis_op.py +++ b/python/paddle/fluid/tests/unittests/test_take_along_axis_op.py @@ -22,7 +22,6 @@ paddle.enable_static() class TestTakeAlongAxisOp(OpTest): - def setUp(self): self.init_data() self.op_type = "take_along_axis" @@ -49,14 +48,14 @@ class TestTakeAlongAxisOp(OpTest): self.x_type = "float64" self.x_shape = (5, 5, 5) self.index_type = "int32" - self.index = np.array([[[1]], [[1]], [[2]], [[4]], - [[3]]]).astype(self.index_type) + self.index = np.array([[[1]], [[1]], [[2]], [[4]], [[3]]]).astype( + self.index_type + ) self.axis = 2 self.axis_type = "int64" class TestCase1(TestTakeAlongAxisOp): - def init_data(self): self.x_type = "float64" self.x_shape = (5, 5, 5) @@ -67,7 +66,6 @@ class TestCase1(TestTakeAlongAxisOp): class TestTakeAlongAxisAPI(unittest.TestCase): - def setUp(self): np.random.seed(0) self.shape = [3, 3] @@ -86,13 +84,12 @@ class TestTakeAlongAxisAPI(unittest.TestCase): index = paddle.fluid.data('Index', self.index_shape, "int64") out = paddle.take_along_axis(x, index, self.axis) exe = paddle.static.Executor(self.place[0]) - res = exe.run(feed={ - 'X': self.x_np, - 'Index': self.index_np - }, - fetch_list=[out]) + res = exe.run( + feed={'X': self.x_np, 'Index': self.index_np}, fetch_list=[out] + ) out_ref = np.array( - np.take_along_axis(self.x_np, self.index_np, self.axis)) + np.take_along_axis(self.x_np, self.index_np, self.axis) + ) for out in res: np.testing.assert_allclose(out, out_ref, rtol=0.001) @@ -102,19 +99,20 @@ class TestTakeAlongAxisAPI(unittest.TestCase): self.index = paddle.to_tensor(self.index_np) out = paddle.take_along_axis(x_tensor, self.index, self.axis) out_ref = np.array( - np.take_along_axis(self.x_np, self.index_np, self.axis)) + np.take_along_axis(self.x_np, self.index_np, self.axis) + ) np.testing.assert_allclose(out.numpy(), out_ref, rtol=0.001) paddle.enable_static() class TestTakeAlongAxisAPICase1(TestTakeAlongAxisAPI): - def setUp(self): np.random.seed(0) self.shape = [2, 2] self.index_shape = [4, 2] - self.index_np = np.array([[0, 0], [1, 0], [0, 0], [1, - 0]]).astype('int64') + self.index_np = np.array([[0, 0], [1, 0], [0, 0], [1, 0]]).astype( + 'int64' + ) self.x_np = np.random.random(self.shape).astype(np.float32) self.place = [paddle.CPUPlace()] self.axis = 0 diff --git a/python/paddle/fluid/tests/unittests/test_target_assign_op.py b/python/paddle/fluid/tests/unittests/test_target_assign_op.py index d51444da406a70e11afb606798a13422fa1734bb..103f28a2024151e2bdb4000c4e6fd7570c90909c 100644 --- a/python/paddle/fluid/tests/unittests/test_target_assign_op.py +++ b/python/paddle/fluid/tests/unittests/test_target_assign_op.py @@ -36,16 +36,23 @@ def gen_match_and_neg_indices(num_prior, gt_lod, neg_lod): ret_ids = set([i for i in range(num_prior)]) - set(ids) l = neg_lod[n] neg_ids = random.sample(ret_ids, l) - neg_indices[offset:offset + - neg_lod[n], :] = np.array(neg_ids).astype('int32').reshape( - l, 1) + neg_indices[offset : offset + neg_lod[n], :] = ( + np.array(neg_ids).astype('int32').reshape(l, 1) + ) offset += neg_lod[n] return match_indices, neg_indices -def target_assign(encoded_box, gt_label, match_indices, neg_indices, gt_lod, - neg_lod, mismatch_value): +def target_assign( + encoded_box, + gt_label, + match_indices, + neg_indices, + gt_lod, + neg_lod, + mismatch_value, +): batch_size, num_prior = match_indices.shape # init target bbox @@ -75,7 +82,7 @@ def target_assign(encoded_box, gt_label, match_indices, neg_indices, gt_lod, trg_label_wt[i][col_ids] = 1.0 # set target label weight to 1.0 for the negative samples if neg_indices is not None: - neg_ids = neg_indices[neg_offset:neg_offset + neg_lod[i]] + neg_ids = neg_indices[neg_offset : neg_offset + neg_lod[i]] trg_label_wt[i][neg_ids] = 1.0 # update offset gt_offset += gt_lod[i] @@ -85,7 +92,6 @@ def target_assign(encoded_box, gt_label, match_indices, neg_indices, gt_lod, class TestTargetAssginFloatType(OpTest): - def setUp(self): self.op_type = "target_assign" num_prior = 120 @@ -97,15 +103,23 @@ class TestTargetAssginFloatType(OpTest): num_gt = sum(gt_lod) encoded_box = np.random.random((num_gt, num_prior, 4)).astype('float32') - gt_label = np.random.randint(num_class, - size=(num_gt, 1)).astype('int32') + gt_label = np.random.randint(num_class, size=(num_gt, 1)).astype( + 'int32' + ) match_indices, neg_indices = gen_match_and_neg_indices( - num_prior, gt_lod, neg_lod) - - out, out_wt, _, _ = target_assign(encoded_box, gt_label, match_indices, - neg_indices, gt_lod, neg_lod, - mismatch_value) + num_prior, gt_lod, neg_lod + ) + + out, out_wt, _, _ = target_assign( + encoded_box, + gt_label, + match_indices, + neg_indices, + gt_lod, + neg_lod, + mismatch_value, + ) # assign regression targets x = encoded_box @@ -124,7 +138,6 @@ class TestTargetAssginFloatType(OpTest): class TestTargetAssginIntType(OpTest): - def setUp(self): self.op_type = "target_assign" num_prior = 120 @@ -136,15 +149,23 @@ class TestTargetAssginIntType(OpTest): num_gt = sum(gt_lod) encoded_box = np.random.random((num_gt, num_prior, 4)).astype('float32') - gt_label = np.random.randint(num_class, - size=(num_gt, 1)).astype('int32') + gt_label = np.random.randint(num_class, size=(num_gt, 1)).astype( + 'int32' + ) match_indices, neg_indices = gen_match_and_neg_indices( - num_prior, gt_lod, neg_lod) - - _, _, out, out_wt, = target_assign(encoded_box, gt_label, match_indices, - neg_indices, gt_lod, neg_lod, - mismatch_value) + num_prior, gt_lod, neg_lod + ) + + _, _, out, out_wt, = target_assign( + encoded_box, + gt_label, + match_indices, + neg_indices, + gt_lod, + neg_lod, + mismatch_value, + ) # assign cassification argets x = np.reshape(gt_label, (num_gt, 1, 1)) diff --git a/python/paddle/fluid/tests/unittests/test_tdm_child_op.py b/python/paddle/fluid/tests/unittests/test_tdm_child_op.py index 5c17b737eef5d416a273a495c3bf6c66274b226d..95c49c904db5b459d0d0127409b9d459810fa820 100644 --- a/python/paddle/fluid/tests/unittests/test_tdm_child_op.py +++ b/python/paddle/fluid/tests/unittests/test_tdm_child_op.py @@ -52,15 +52,15 @@ def create_tdm_tree(): class TestTDMChildOp(OpTest): - def setUp(self): self.__class__.op_type = "tdm_child" self.config() tree_info = create_tdm_tree() tree_info_np = np.array(tree_info).astype(self.info_type) - x_np = np.random.randint(low=0, high=26, - size=self.x_shape).astype(self.x_type) + x_np = np.random.randint(low=0, high=26, size=self.x_shape).astype( + self.x_type + ) children_res = [] leaf_mask_res = [] for batch in x_np: @@ -100,9 +100,8 @@ class TestTDMChildOp(OpTest): class TestCase1(TestTDMChildOp): - def config(self): - """check int int64_t """ + """check int int64_t""" self.x_shape = (10, 20) self.child_shape = (10, 20, 2) self.x_type = 'int32' @@ -110,9 +109,8 @@ class TestCase1(TestTDMChildOp): class TestCase2(TestTDMChildOp): - def config(self): - """check int64_t int64_t """ + """check int64_t int64_t""" self.x_shape = (10, 20) self.child_shape = (10, 20, 2) self.x_type = 'int64' @@ -120,9 +118,8 @@ class TestCase2(TestTDMChildOp): class TestCase3(TestTDMChildOp): - def config(self): - """check int64 int32 """ + """check int64 int32""" self.x_shape = (10, 20) self.child_shape = (10, 20, 2) self.x_type = 'int64' @@ -130,9 +127,8 @@ class TestCase3(TestTDMChildOp): class TestCase4(TestTDMChildOp): - def config(self): - """check large shape """ + """check large shape""" self.x_shape = (100, 20) self.child_shape = (100, 20, 2) self.x_type = 'int32' @@ -140,7 +136,6 @@ class TestCase4(TestTDMChildOp): class TestTDMChildShape(unittest.TestCase): - def test_shape(self): x = fluid.layers.data(name='x', shape=[1], dtype='int32', lod_level=1) tdm_tree_info = create_tdm_tree() @@ -150,17 +145,21 @@ class TestTDMChildShape(unittest.TestCase): x=x, node_nums=26, child_nums=2, - param_attr=fluid.ParamAttr(initializer=fluid.initializer. - NumpyArrayInitializer(tree_info_np))) + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.NumpyArrayInitializer( + tree_info_np + ) + ), + ) place = fluid.CPUPlace() exe = fluid.Executor(place=place) exe.run(fluid.default_startup_program()) feed = { - 'x': - np.array([[1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], - [12]]).astype('int32') + 'x': np.array( + [[1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12]] + ).astype('int32') } exe.run(feed=feed) diff --git a/python/paddle/fluid/tests/unittests/test_tdm_sampler_op.py b/python/paddle/fluid/tests/unittests/test_tdm_sampler_op.py index 1d576038e706a0fb5d308db93f6fa25c6fbf9291..9c9ce12078008e4a4966aac95cfbb696a9c38fda 100644 --- a/python/paddle/fluid/tests/unittests/test_tdm_sampler_op.py +++ b/python/paddle/fluid/tests/unittests/test_tdm_sampler_op.py @@ -21,27 +21,41 @@ import paddle.fluid as fluid def create_tdm_travel(): - tree_travel = [[1, 3, 7, 14], [1, 3, 7, 15], [1, 3, 8, 16], [1, 3, 8, 17], - [1, 4, 9, 18], [1, 4, 9, 19], [1, 4, 10, 20], [1, 4, 10, 21], - [2, 5, 11, 22], [2, 5, 11, 23], [2, 5, 12, 24], - [2, 5, 12, 25], [2, 6, 13, 0]] + tree_travel = [ + [1, 3, 7, 14], + [1, 3, 7, 15], + [1, 3, 8, 16], + [1, 3, 8, 17], + [1, 4, 9, 18], + [1, 4, 9, 19], + [1, 4, 10, 20], + [1, 4, 10, 21], + [2, 5, 11, 22], + [2, 5, 11, 23], + [2, 5, 12, 24], + [2, 5, 12, 25], + [2, 6, 13, 0], + ] return tree_travel def create_tdm_layer(): - tree_layer = [[1, 2], [3, 4, 5, 6], [7, 8, 9, 10, 11, 12, 13], - [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]] + tree_layer = [ + [1, 2], + [3, 4, 5, 6], + [7, 8, 9, 10, 11, 12, 13], + [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25], + ] return tree_layer type_dict = { "int32": int(core.VarDesc.VarType.INT32), - "int64": int(core.VarDesc.VarType.INT64) + "int64": int(core.VarDesc.VarType.INT64), } class TestTDMSamplerOp(OpTest): - def setUp(self): self.__class__.op_type = "tdm_sampler" self.config() @@ -50,8 +64,9 @@ class TestTDMSamplerOp(OpTest): self.tree_layer = create_tdm_layer() output_0 = self.x_shape[0] - output_1 = len(self.neg_samples_num_list) + \ - np.sum(self.neg_samples_num_list) + output_1 = len(self.neg_samples_num_list) + np.sum( + self.neg_samples_num_list + ) self.output_shape = (output_0, output_1) self.layer_sample_nums = [1 + i for i in self.neg_samples_num_list] @@ -68,8 +83,9 @@ class TestTDMSamplerOp(OpTest): layer_np = np.array(tree_layer_flat).astype(self.tree_dtype) layer_np = layer_np.reshape([-1, 1]) - self.x_np = np.random.randint(low=0, high=13, - size=self.x_shape).astype(self.x_type) + self.x_np = np.random.randint(low=0, high=13, size=self.x_shape).astype( + self.x_type + ) out = np.random.random(self.output_shape).astype(self.out_dtype) label = np.random.random(self.output_shape).astype(self.out_dtype) @@ -80,7 +96,7 @@ class TestTDMSamplerOp(OpTest): 'output_positive': True, 'layer_offset_lod': tree_layer_offset_lod, 'seed': 0, - 'dtype': type_dict[self.out_dtype] + 'dtype': type_dict[self.out_dtype], } self.inputs = {'X': self.x_np, 'Travel': travel_np, 'Layer': layer_np} self.outputs = {'Out': out, 'Labels': label, 'Mask': mask} @@ -128,7 +144,8 @@ class TestTDMSamplerOp(OpTest): positive_travel.append(sampling_res_list[0]) label_sampling_res = label_res[batch_ids][ - start_offset:end_offset] + start_offset:end_offset + ] mask_sampling_res = mask_res[batch_ids][start_offset:end_offset] # check unique @@ -136,8 +153,12 @@ class TestTDMSamplerOp(OpTest): assert len(set(sampling_res_list)) == len( sampling_res_list ), "len(set(sampling_res_list)): {}, len(sampling_res_list): {} , sample_res: {}, label_res:{}, mask_res: {}".format( - len(set(sampling_res_list)), len(sampling_res_list), - sampling_res, label_sampling_res, mask_sampling_res) + len(set(sampling_res_list)), + len(sampling_res_list), + sampling_res, + label_sampling_res, + mask_sampling_res, + ) # check legal layer_node = self.tree_layer[layer_idx] layer_node.append(0) @@ -145,8 +166,12 @@ class TestTDMSamplerOp(OpTest): assert ( sample in layer_node ), "sample: {}, layer_node: {} , sample_res: {}, label_res: {}, mask_res:{}".format( - sample, layer_node, sampling_res, label_sampling_res, - mask_sampling_res) + sample, + layer_node, + sampling_res, + label_sampling_res, + mask_sampling_res, + ) # check label label_flag = 1 @@ -158,15 +183,16 @@ class TestTDMSamplerOp(OpTest): assert not np.sum( mask_sampling_res[padding_index] ), "np.sum(mask_sampling_res[padding_index]): {} ".format( - np.sum(mask_sampling_res[padding_index])) + np.sum(mask_sampling_res[padding_index]) + ) start_offset = end_offset # check travel legal - assert self.tree_travel[int( - self.x_np[batch_ids])] == positive_travel + assert ( + self.tree_travel[int(self.x_np[batch_ids])] == positive_travel + ) class TestCase1(TestTDMSamplerOp): - def config(self): """test input int64""" self.neg_samples_num_list = [0, 0, 0, 0] @@ -177,7 +203,6 @@ class TestCase1(TestTDMSamplerOp): class TestCase2(TestTDMSamplerOp): - def config(self): """test dtype int64""" self.neg_samples_num_list = [0, 0, 0, 0] @@ -188,7 +213,6 @@ class TestCase2(TestTDMSamplerOp): class TestCase3(TestTDMSamplerOp): - def config(self): """test all dtype int64""" self.neg_samples_num_list = [0, 0, 0, 0] @@ -199,7 +223,6 @@ class TestCase3(TestTDMSamplerOp): class TestCase4(TestTDMSamplerOp): - def config(self): """test one neg""" self.neg_samples_num_list = [1, 1, 1, 1] @@ -210,7 +233,6 @@ class TestCase4(TestTDMSamplerOp): class TestCase5(TestTDMSamplerOp): - def config(self): """test normal neg""" self.neg_samples_num_list = [1, 2, 3, 4] @@ -221,7 +243,6 @@ class TestCase5(TestTDMSamplerOp): class TestCase6(TestTDMSamplerOp): - def config(self): """test huge batchsize""" self.neg_samples_num_list = [1, 2, 3, 4] @@ -232,7 +253,6 @@ class TestCase6(TestTDMSamplerOp): class TestCase7(TestTDMSamplerOp): - def config(self): """test full neg""" self.neg_samples_num_list = [1, 3, 6, 11] @@ -243,7 +263,6 @@ class TestCase7(TestTDMSamplerOp): class TestTDMSamplerShape(unittest.TestCase): - def test_shape(self): x = fluid.layers.data(name='x', shape=[1], dtype='int32', lod_level=1) tdm_tree_travel = create_tdm_travel() @@ -267,23 +286,41 @@ class TestTDMSamplerShape(unittest.TestCase): leaf_node_num, tree_travel_attr=fluid.ParamAttr( initializer=fluid.initializer.NumpyArrayInitializer( - travel_array)), - tree_layer_attr=fluid.ParamAttr(initializer=fluid.initializer. - NumpyArrayInitializer(layer_array)), + travel_array + ) + ), + tree_layer_attr=fluid.ParamAttr( + initializer=fluid.initializer.NumpyArrayInitializer(layer_array) + ), output_positive=True, output_list=True, seed=0, tree_dtype='int32', - dtype='int32') + dtype='int32', + ) place = fluid.CPUPlace() exe = fluid.Executor(place=place) exe.run(fluid.default_startup_program()) feed = { - 'x': - np.array([[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], - [11], [12]]).astype('int32') + 'x': np.array( + [ + [0], + [1], + [2], + [3], + [4], + [5], + [6], + [7], + [8], + [9], + [10], + [11], + [12], + ] + ).astype('int32') } exe.run(feed=feed) diff --git a/python/paddle/fluid/tests/unittests/test_teacher_student_sigmoid_loss_op.py b/python/paddle/fluid/tests/unittests/test_teacher_student_sigmoid_loss_op.py index 228ecbb06ead3e759cc61e2d93e6b4565beec934..88ea8f647c4bf123e0ca06c0a291b18af4ba3855 100644 --- a/python/paddle/fluid/tests/unittests/test_teacher_student_sigmoid_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_teacher_student_sigmoid_loss_op.py @@ -23,7 +23,7 @@ import paddle.fluid as fluid class TestTeacherStudentSigmoidLossOp(OpTest): """ - Test teacher_student_sigmoid_loss with discrete one-hot labels. + Test teacher_student_sigmoid_loss with discrete one-hot labels. """ def setUp(self): @@ -31,12 +31,14 @@ class TestTeacherStudentSigmoidLossOp(OpTest): batch_size = 100 num_classes = 1 self.inputs = { - 'X': - logit( - np.random.uniform(0, 1, - (batch_size, num_classes)).astype("float64")), - 'Label': - np.random.uniform(0, 2, (batch_size, num_classes)).astype("float64") + 'X': logit( + np.random.uniform(0, 1, (batch_size, num_classes)).astype( + "float64" + ) + ), + 'Label': np.random.uniform(0, 2, (batch_size, num_classes)).astype( + "float64" + ), } outs = [] for index, label in enumerate(self.inputs["Label"]): @@ -46,11 +48,22 @@ class TestTeacherStudentSigmoidLossOp(OpTest): elif label < 0.0: outs.append(max(x, 0.0) - x + log(1.0 + exp(-abs(x)))) elif label < 1.0: - outs.append(max(x, 0.0) + log(1.0 + exp(-abs(x))) + \ - max(x, 0.0) - x * label + log(1.0 + exp(-abs(x)))) + outs.append( + max(x, 0.0) + + log(1.0 + exp(-abs(x))) + + max(x, 0.0) + - x * label + + log(1.0 + exp(-abs(x))) + ) else: - outs.append(max(x, 0.0) - x + log(1.0 + exp(-abs(x))) + \ - max(x, 0.0) - x * (label - 1.0) + log(1.0 + exp(-abs(x)))) + outs.append( + max(x, 0.0) + - x + + log(1.0 + exp(-abs(x))) + + max(x, 0.0) + - x * (label - 1.0) + + log(1.0 + exp(-abs(x))) + ) self.outputs = {'Y': np.array(outs)} def test_check_output(self): @@ -61,9 +74,7 @@ class TestTeacherStudentSigmoidLossOp(OpTest): class TestTeacherStudentSigmoidLossInvalidInput(unittest.TestCase): - def test_error(self): - def test_invalid_input(): input = [512, 1] label = fluid.data(name='label', shape=[None, 1], dtype='float32') diff --git a/python/paddle/fluid/tests/unittests/test_temporal_shift_op.py b/python/paddle/fluid/tests/unittests/test_temporal_shift_op.py index ea6d9abc37284daccf44fb7f8c7c422bb8ebd636..13dc115f6bc4d617fd637a25fcfb46253bdbc57a 100644 --- a/python/paddle/fluid/tests/unittests/test_temporal_shift_op.py +++ b/python/paddle/fluid/tests/unittests/test_temporal_shift_op.py @@ -25,13 +25,14 @@ def temporal_shift(x, seg_num, shift_ratio, data_format): x = np.transpose(x, (0, 3, 1, 2)) shape = x.shape reshape_x = x.reshape((-1, seg_num, shape[1], shape[2], shape[3])) - pad_x = np.pad(reshape_x, ((0, 0), (1, 1), (0, 0), (0, 0), (0, 0)), - 'constant') + pad_x = np.pad( + reshape_x, ((0, 0), (1, 1), (0, 0), (0, 0), (0, 0)), 'constant' + ) c1 = int(shape[1] * shift_ratio) c2 = int(shape[1] * 2 * shift_ratio) slice1 = pad_x[:, :seg_num, :c1, :, :] - slice2 = pad_x[:, 2:seg_num + 2, c1:c2, :, :] - slice3 = pad_x[:, 1:seg_num + 1, c2:, :, :] + slice2 = pad_x[:, 2 : seg_num + 2, c1:c2, :, :] + slice3 = pad_x[:, 1 : seg_num + 1, c2:, :, :] concat_x = np.concatenate([slice1, slice2, slice3], axis=2) out = concat_x.reshape(shape) if data_format == "NHWC": @@ -40,7 +41,6 @@ def temporal_shift(x, seg_num, shift_ratio, data_format): class TestTemporalShift(OpTest): - def setUp(self): self.initTestCase() self.op_type = 'temporal_shift' @@ -50,15 +50,16 @@ class TestTemporalShift(OpTest): self.attrs = { "seg_num": self.seg_num, "shift_ratio": self.shift_ratio, - "data_format": self.data_format + "data_format": self.data_format, } self.inputs = { "X": x, } - output = temporal_shift(x, self.seg_num, self.shift_ratio, - self.data_format) + output = temporal_shift( + x, self.seg_num, self.shift_ratio, self.data_format + ) self.outputs = {"Out": output} self.python_out_sig = ["Out"] @@ -77,7 +78,6 @@ class TestTemporalShift(OpTest): class TestTemporalShift2(TestTemporalShift): - def initTestCase(self): self.x_shape = (4, 9, 7, 7) self.seg_num = 2 @@ -86,7 +86,6 @@ class TestTemporalShift2(TestTemporalShift): class TestTemporalShift3(TestTemporalShift): - def initTestCase(self): self.x_shape = (3, 10, 5, 5) self.seg_num = 1 @@ -95,7 +94,6 @@ class TestTemporalShift3(TestTemporalShift): class TestTemporalShift4(TestTemporalShift): - def initTestCase(self): self.x_shape = (6, 5, 5, 4) self.seg_num = 3 @@ -103,10 +101,10 @@ class TestTemporalShift4(TestTemporalShift): self.data_format = 'NHWC' -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestTemporalShiftFP16(TestTemporalShift): - def initTestCase(self): self.x_shape = (3, 10, 5, 5) self.seg_num = 1 @@ -126,32 +124,29 @@ class TestTemporalShiftFP16(TestTemporalShift): class TestTemporalShiftAPI(unittest.TestCase): - def test_api(self): input = paddle.randn([6, 4, 2, 2]) - out = paddle.fluid.layers.temporal_shift(x=input, - seg_num=2, - shift_ratio=0.2) + out = paddle.fluid.layers.temporal_shift( + x=input, seg_num=2, shift_ratio=0.2 + ) - out_from_function = paddle.nn.functional.temporal_shift(x=input, - seg_num=2, - shift_ratio=0.2) + out_from_function = paddle.nn.functional.temporal_shift( + x=input, seg_num=2, shift_ratio=0.2 + ) # dygraph with paddle.fluid.dygraph.guard(): input = paddle.randn([6, 4, 2, 2]) - out = paddle.nn.functional.temporal_shift(x=input, - seg_num=2, - shift_ratio=0.2) + out = paddle.nn.functional.temporal_shift( + x=input, seg_num=2, shift_ratio=0.2 + ) def test_error(self): - def attr_data_format(): input = paddle.randn([6, 4, 2, 2]) - out = paddle.nn.functional.temporal_shift(x=input, - seg_num=2, - shift_ratio=0.2, - data_format="HWC") + out = paddle.nn.functional.temporal_shift( + x=input, seg_num=2, shift_ratio=0.2, data_format="HWC" + ) self.assertRaises(ValueError, attr_data_format) diff --git a/python/paddle/fluid/tests/unittests/test_tensor.py b/python/paddle/fluid/tests/unittests/test_tensor.py index 2226805b1253f6defa36d3abbf99cb755ef2221f..ef8e60df9d160b9bf5487ef84d7c9c39e8bd0019 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_tensor.py @@ -20,7 +20,6 @@ import numbers class TestTensorPtr(unittest.TestCase): - def test_tensor_ptr(self): t = core.Tensor() np_arr = np.zeros([2, 3]) @@ -29,11 +28,17 @@ class TestTensorPtr(unittest.TestCase): class TestTensor(unittest.TestCase): - def setUp(self): self.support_dtypes = [ - 'bool', 'uint8', 'int8', 'int16', 'int32', 'int64', 'float16', - 'float32', 'float64' + 'bool', + 'uint8', + 'int8', + 'int16', + 'int32', + 'int64', + 'float16', + 'float32', + 'float64', ] def test_int_tensor(self): @@ -79,10 +84,9 @@ class TestTensor(unittest.TestCase): scope = core.Scope() var = scope.var("int8_tensor") cpu_tensor = var.get_tensor() - tensor_array = np.random.randint(-127, - high=128, - size=[100, 200], - dtype=np.int8) + tensor_array = np.random.randint( + -127, high=128, size=[100, 200], dtype=np.int8 + ) place = core.CPUPlace() cpu_tensor.set(tensor_array, place) cpu_tensor_array_2 = np.array(cpu_tensor) @@ -90,15 +94,15 @@ class TestTensor(unittest.TestCase): if core.is_compiled_with_cuda(): cuda_tensor = var.get_tensor() - tensor_array = np.random.randint(-127, - high=128, - size=[100, 200], - dtype=np.int8) + tensor_array = np.random.randint( + -127, high=128, size=[100, 200], dtype=np.int8 + ) place = core.CUDAPlace(0) cuda_tensor.set(tensor_array, place) cuda_tensor_array_2 = np.array(cuda_tensor) - self.assertAlmostEqual(cuda_tensor_array_2.all(), - tensor_array.all()) + self.assertAlmostEqual( + cuda_tensor_array_2.all(), tensor_array.all() + ) def test_int_lod_tensor(self): place = core.CPUPlace() @@ -207,10 +211,13 @@ class TestTensor(unittest.TestCase): shape = [3, 3, 3] tensor._set_dims(shape) - tensor_array = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]], - [[10, 11, 12], [13, 14, 15], [16, 17, 18]], - [[19, 20, 21], [22, 23, 24], - [25, 26, 27]]]).astype(dtype) + tensor_array = np.array( + [ + [[1, 2, 3], [4, 5, 6], [7, 8, 9]], + [[10, 11, 12], [13, 14, 15], [16, 17, 18]], + [[19, 20, 21], [22, 23, 24], [25, 26, 27]], + ] + ).astype(dtype) tensor.set(tensor_array, place) n1 = tensor[1] @@ -283,21 +290,24 @@ class TestTensor(unittest.TestCase): tensor = var.get_tensor() dtype = core.VarDesc.VarType.FP32 self.assertTrue( - isinstance(tensor._mutable_data(place, dtype), numbers.Integral)) + isinstance(tensor._mutable_data(place, dtype), numbers.Integral) + ) if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) self.assertTrue( - isinstance(tensor._mutable_data(place, dtype), - numbers.Integral)) + isinstance(tensor._mutable_data(place, dtype), numbers.Integral) + ) place = core.CUDAPinnedPlace() self.assertTrue( - isinstance(tensor._mutable_data(place, dtype), - numbers.Integral)) + isinstance(tensor._mutable_data(place, dtype), numbers.Integral) + ) places = fluid.cuda_pinned_places() self.assertTrue( - isinstance(tensor._mutable_data(places[0], dtype), - numbers.Integral)) + isinstance( + tensor._mutable_data(places[0], dtype), numbers.Integral + ) + ) def test_tensor_set_fp16(self): array = np.random.random((300, 500)).astype("float16") diff --git a/python/paddle/fluid/tests/unittests/test_tensor_array_to_tensor.py b/python/paddle/fluid/tests/unittests/test_tensor_array_to_tensor.py index 0291052a7e08afdc3cbfb45fc2caf6b736889a41..21f90556f7a5b9f0919963f199098654750f2e58 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor_array_to_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_tensor_array_to_tensor.py @@ -50,8 +50,10 @@ class TestLoDTensorArrayConcat(unittest.TestCase): program = fluid.Program() block = program.global_block() - input_arr = block.create_var(name="tmp_lod_tensor_array", - type=core.VarDesc.VarType.LOD_TENSOR_ARRAY) + input_arr = block.create_var( + name="tmp_lod_tensor_array", + type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, + ) input_arr.persistable = True input_arr_var = scope.var('tmp_lod_tensor_array') input_tensor_array = input_arr_var.get_lod_tensor_array() @@ -75,28 +77,30 @@ class TestLoDTensorArrayConcat(unittest.TestCase): y_out_index = block.create_var(name="OutIndex") y_out_index.persistable = True - y_grad_arr = block.create_var(name='Out@GRAD', - dtype='float32', - shape=[11]) + y_grad_arr = block.create_var( + name='Out@GRAD', dtype='float32', shape=[11] + ) y_grad_arr.persistable = True y_grad = scope.var('Out@GRAD') y_grad_tensor = y_grad.get_tensor() y_grad_tensor.set(random_grad, cpu) - op = block.append_op(type=self.op_type, - inputs={"X": input_arr}, - outputs={ - "Out": y_out, - "OutIndex": y_out_index - }, - attrs=self.attrs) - - out_grad = block.create_var(name="tmp_lod_tensor_array@GRAD", - type=core.VarDesc.VarType.LOD_TENSOR_ARRAY) + op = block.append_op( + type=self.op_type, + inputs={"X": input_arr}, + outputs={"Out": y_out, "OutIndex": y_out_index}, + attrs=self.attrs, + ) + + out_grad = block.create_var( + name="tmp_lod_tensor_array@GRAD", + type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, + ) out_grad.persistable = True grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc( - op.desc, set(), []) + op.desc, set(), [] + ) grad_op_desc = grad_op_desc_list[0] new_op_desc = block.desc.append_op() new_op_desc.copy_from(grad_op_desc) @@ -115,13 +119,14 @@ class TestLoDTensorArrayConcat(unittest.TestCase): exe = fluid.Executor(fluid.CPUPlace()) out = exe.run(program, fetch_list=fetch_list, scope=scope) - #print ("index: ", np.array(out[1])) + # print ("index: ", np.array(out[1])) # test forward tensor_res = np.array(out[0]) tensor_res_out_idx = np.array(out[1]) - tensor_gt = np.array([0] + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], - dtype='float32') + tensor_gt = np.array( + [0] + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype='float32' + ) self.assertEqual(len(tensor_res), len(tensor_gt)) self.assertEqual(len(tensor_res_out_idx), 10) @@ -144,13 +149,16 @@ class TestLoDTensorArrayConcat(unittest.TestCase): for i in range(len(grad_tensor_array)): if i == 0: self.assertEqual( - np.array(grad_tensor_array[i])[0], np.array(random_grad[i])) + np.array(grad_tensor_array[i])[0], np.array(random_grad[i]) + ) self.assertEqual( np.array(grad_tensor_array[i])[1], - np.array(random_grad[i + 1])) + np.array(random_grad[i + 1]), + ) if i == 1: - self.assertEqual(np.array(grad_tensor_array[i]), - np.array(random_grad[i + 1])) + self.assertEqual( + np.array(grad_tensor_array[i]), np.array(random_grad[i + 1]) + ) class TestLoDTensorArrayStack(unittest.TestCase): @@ -162,12 +170,14 @@ class TestLoDTensorArrayStack(unittest.TestCase): self.inputs = [ np.random.rand(2, 3, 4).astype("float32"), np.random.rand(2, 3, 4).astype("float32"), - np.random.rand(2, 3, 4).astype("float32") + np.random.rand(2, 3, 4).astype("float32"), ] self.outputs = [ np.stack(self.inputs, axis=self.attrs["axis"]), - np.array([x.shape[self.attrs["axis"]] for x in self.inputs], - dtype="int32") + np.array( + [x.shape[self.attrs["axis"]] for x in self.inputs], + dtype="int32", + ), ] self.input_grads = [np.ones_like(x) for x in self.inputs] self.set_program() @@ -184,7 +194,8 @@ class TestLoDTensorArrayStack(unittest.TestCase): x = fluid.layers.assign(x) fluid.layers.array_write(x, idx + i, array) output, output_index = fluid.layers.tensor_array_to_tensor( - input=array, **self.attrs) + input=array, **self.attrs + ) loss = fluid.layers.reduce_sum(output) fluid.backward.append_backward(loss) self.output_vars = [output, output_index] @@ -192,10 +203,11 @@ class TestLoDTensorArrayStack(unittest.TestCase): def run_check(self, executor, scope): executor.run(self.program, scope=scope) for i, output in enumerate(self.outputs): - np.allclose(np.array( - scope.var(self.output_vars[i].name).get_tensor()), - output, - atol=0) + np.allclose( + np.array(scope.var(self.output_vars[i].name).get_tensor()), + output, + atol=0, + ) tensor_array_grad = scope.var(self.array.name).get_lod_tensor_array() for i, input_grad in enumerate(self.input_grads): np.allclose(np.array(tensor_array_grad[i]), input_grad, atol=0) @@ -215,7 +227,6 @@ class TestLoDTensorArrayStack(unittest.TestCase): class TestTensorArrayToTensorAPI(unittest.TestCase): - def _test_case(self, inp1, inp2): x0 = fluid.layers.assign(inp1) x0.stop_gradient = False @@ -226,10 +237,20 @@ class TestTensorArrayToTensorAPI(unittest.TestCase): fluid.layers.array_write(x0, i, array) fluid.layers.array_write(x1, i + 1, array) output_stack, output_index_stack = fluid.layers.tensor_array_to_tensor( - input=array, axis=1, use_stack=True) - output_concat, output_index_concat = fluid.layers.tensor_array_to_tensor( - input=array, axis=1, use_stack=False) - return output_stack, output_index_stack, output_concat, output_index_concat + input=array, axis=1, use_stack=True + ) + ( + output_concat, + output_index_concat, + ) = fluid.layers.tensor_array_to_tensor( + input=array, axis=1, use_stack=False + ) + return ( + output_stack, + output_index_stack, + output_concat, + output_index_concat, + ) def test_case(self): inp0 = np.random.rand(2, 3, 4).astype("float32") @@ -269,7 +290,8 @@ class TestTensorArrayToTensorAPI(unittest.TestCase): self.assertTrue(fluid.layers.array_length(array), 10) last = fluid.layers.fill_constant(shape=[1], dtype='int64', value=9) np.testing.assert_array_equal( - fluid.layers.array_read(array, last).numpy(), inp0) + fluid.layers.array_read(array, last).numpy(), inp0 + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_tensor_copy_from.py b/python/paddle/fluid/tests/unittests/test_tensor_copy_from.py index 6c38d2349905b360bce8cdad4078d2b41deadde5..3f2b00e8e73371cac25f41ca9f43ea9e6d8ee3a9 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor_copy_from.py +++ b/python/paddle/fluid/tests/unittests/test_tensor_copy_from.py @@ -19,7 +19,6 @@ from paddle.fluid.core import LoDTensor as Tensor class TestTensorCopyFrom(unittest.TestCase): - def test_main(self): place = paddle.CPUPlace() np_value = np.random.random(size=[10, 30]).astype('float32') diff --git a/python/paddle/fluid/tests/unittests/test_tensor_fill_.py b/python/paddle/fluid/tests/unittests/test_tensor_fill_.py index e52c761fd4ef55be676b989706e809f8ed7709a9..7998cbcc19e482d94cd75c26496bd0c84a4a731a 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor_fill_.py +++ b/python/paddle/fluid/tests/unittests/test_tensor_fill_.py @@ -20,7 +20,6 @@ from paddle.fluid.framework import _test_eager_guard class TensorFill_Test(unittest.TestCase): - def setUp(self): self.shape = [32, 32] @@ -36,15 +35,16 @@ class TensorFill_Test(unittest.TestCase): paddle.set_device('cpu') else: paddle.set_device('gpu') - np_arr = np.reshape(np.array(range(np.prod(self.shape))), - self.shape) + np_arr = np.reshape( + np.array(range(np.prod(self.shape))), self.shape + ) for dtype in typelist: - var = 1. + var = 1.0 tensor = paddle.to_tensor(np_arr, place=p, dtype=dtype) target = tensor.numpy() target[...] = var - tensor.fill_(var) #var type is basic type in typelist + tensor.fill_(var) # var type is basic type in typelist self.assertEqual((tensor.numpy() == target).all(), True) def test_tensor_fill_true(self): @@ -64,8 +64,9 @@ class TensorFill_Test(unittest.TestCase): paddle.set_device('cpu') else: paddle.set_device('gpu') - np_arr = np.reshape(np.array(range(np.prod(self.shape))), - self.shape) + np_arr = np.reshape( + np.array(range(np.prod(self.shape))), self.shape + ) for dtype in typelist: var = int(1) tensor = paddle.to_tensor(np_arr, place=p, dtype=dtype) @@ -85,7 +86,6 @@ class TensorFill_Test(unittest.TestCase): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) def func_test_errors(self): - def test_list(): x = paddle.to_tensor([2, 3, 4]) x.fill_([1]) diff --git a/python/paddle/fluid/tests/unittests/test_tensor_fill_diagonal_.py b/python/paddle/fluid/tests/unittests/test_tensor_fill_diagonal_.py index 3eaf01642a6e5b4e1fbd830b45292fe174ebc57c..b3c68f932043997dc4fbe86a64e1bb610e96aa1a 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor_fill_diagonal_.py +++ b/python/paddle/fluid/tests/unittests/test_tensor_fill_diagonal_.py @@ -20,12 +20,13 @@ from paddle.fluid.framework import _test_eager_guard class TensorFillDiagonal_Test(unittest.TestCase): - def func_dim2_normal(self): - expected_np = np.array([[1, 2, 2], [2, 1, 2], [2, 2, - 1]]).astype('float32') - expected_grad = np.array([[0, 1, 1], [1, 0, 1], [1, 1, - 0]]).astype('float32') + expected_np = np.array([[1, 2, 2], [2, 1, 2], [2, 2, 1]]).astype( + 'float32' + ) + expected_grad = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]]).astype( + 'float32' + ) typelist = ['float32', 'float64', 'int32', 'int64'] places = [fluid.CPUPlace()] @@ -46,10 +47,12 @@ class TensorFillDiagonal_Test(unittest.TestCase): loss.backward() self.assertEqual( - (y.numpy().astype('float32') == expected_np).all(), True) + (y.numpy().astype('float32') == expected_np).all(), True + ) self.assertEqual( (y.grad.numpy().astype('float32') == expected_grad).all(), - True) + True, + ) def test_dim2_normal(self): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) @@ -59,10 +62,12 @@ class TensorFillDiagonal_Test(unittest.TestCase): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) def func_offset(self): - expected_np = np.array([[2, 2, 1], [2, 2, 2], [2, 2, - 2]]).astype('float32') - expected_grad = np.array([[1, 1, 0], [1, 1, 1], [1, 1, - 1]]).astype('float32') + expected_np = np.array([[2, 2, 1], [2, 2, 2], [2, 2, 2]]).astype( + 'float32' + ) + expected_grad = np.array([[1, 1, 0], [1, 1, 1], [1, 1, 1]]).astype( + 'float32' + ) typelist = ['float32', 'float64', 'int32', 'int64'] places = [fluid.CPUPlace()] @@ -83,10 +88,12 @@ class TensorFillDiagonal_Test(unittest.TestCase): loss.backward() self.assertEqual( - (y.numpy().astype('float32') == expected_np).all(), True) + (y.numpy().astype('float32') == expected_np).all(), True + ) self.assertEqual( (y.grad.numpy().astype('float32') == expected_grad).all(), - True) + True, + ) def test_offset(self): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) @@ -96,8 +103,9 @@ class TensorFillDiagonal_Test(unittest.TestCase): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) def func_bool(self): - expected_np = np.array([[False, True, True], [True, False, True], - [True, True, False]]) + expected_np = np.array( + [[False, True, True], [True, False, True], [True, True, False]] + ) typelist = ['bool'] places = [fluid.CPUPlace()] @@ -122,12 +130,28 @@ class TensorFillDiagonal_Test(unittest.TestCase): self.func_bool() def func_dim2_unnormal_wrap(self): - expected_np = np.array([[1, 2, 2], [2, 1, 2], [2, 2, 1], [2, 2, 2], - [1, 2, 2], [2, 1, 2], [2, 2, - 1]]).astype('float32') - expected_grad = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0], [1, 1, 1], - [0, 1, 1], [1, 0, 1], [1, 1, - 0]]).astype('float32') + expected_np = np.array( + [ + [1, 2, 2], + [2, 1, 2], + [2, 2, 1], + [2, 2, 2], + [1, 2, 2], + [2, 1, 2], + [2, 2, 1], + ] + ).astype('float32') + expected_grad = np.array( + [ + [0, 1, 1], + [1, 0, 1], + [1, 1, 0], + [1, 1, 1], + [0, 1, 1], + [1, 0, 1], + [1, 1, 0], + ] + ).astype('float32') typelist = ['float32', 'float64', 'int32', 'int64'] places = [fluid.CPUPlace()] @@ -148,10 +172,12 @@ class TensorFillDiagonal_Test(unittest.TestCase): loss.backward() self.assertEqual( - (y.numpy().astype('float32') == expected_np).all(), True) + (y.numpy().astype('float32') == expected_np).all(), True + ) self.assertEqual( (y.grad.numpy().astype('float32') == expected_grad).all(), - True) + True, + ) def test_dim2_unnormal_wrap(self): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) @@ -161,12 +187,28 @@ class TensorFillDiagonal_Test(unittest.TestCase): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) def func_dim2_unnormal_unwrap(self): - expected_np = np.array([[1, 2, 2], [2, 1, 2], [2, 2, 1], [2, 2, 2], - [2, 2, 2], [2, 2, 2], [2, 2, - 2]]).astype('float32') - expected_grad = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0], [1, 1, 1], - [1, 1, 1], [1, 1, 1], [1, 1, - 1]]).astype('float32') + expected_np = np.array( + [ + [1, 2, 2], + [2, 1, 2], + [2, 2, 1], + [2, 2, 2], + [2, 2, 2], + [2, 2, 2], + [2, 2, 2], + ] + ).astype('float32') + expected_grad = np.array( + [ + [0, 1, 1], + [1, 0, 1], + [1, 1, 0], + [1, 1, 1], + [1, 1, 1], + [1, 1, 1], + [1, 1, 1], + ] + ).astype('float32') typelist = ['float32', 'float64', 'int32', 'int64'] places = [fluid.CPUPlace()] @@ -187,10 +229,12 @@ class TensorFillDiagonal_Test(unittest.TestCase): loss.backward() self.assertEqual( - (y.numpy().astype('float32') == expected_np).all(), True) + (y.numpy().astype('float32') == expected_np).all(), True + ) self.assertEqual( (y.grad.numpy().astype('float32') == expected_grad).all(), - True) + True, + ) def test_dim2_unnormal_unwrap(self): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) @@ -200,14 +244,20 @@ class TensorFillDiagonal_Test(unittest.TestCase): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) def func_dim_larger2_normal(self): - expected_np = np.array([[[1, 2, 2], [2, 2, 2], [2, 2, 2]], - [[2, 2, 2], [2, 1, 2], [2, 2, 2]], - [[2, 2, 2], [2, 2, 2], [2, 2, - 1]]]).astype('float32') - expected_grad = np.array([[[0, 1, 1], [1, 1, 1], [1, 1, 1]], - [[1, 1, 1], [1, 0, 1], [1, 1, 1]], - [[1, 1, 1], [1, 1, 1], - [1, 1, 0]]]).astype('float32') + expected_np = np.array( + [ + [[1, 2, 2], [2, 2, 2], [2, 2, 2]], + [[2, 2, 2], [2, 1, 2], [2, 2, 2]], + [[2, 2, 2], [2, 2, 2], [2, 2, 1]], + ] + ).astype('float32') + expected_grad = np.array( + [ + [[0, 1, 1], [1, 1, 1], [1, 1, 1]], + [[1, 1, 1], [1, 0, 1], [1, 1, 1]], + [[1, 1, 1], [1, 1, 1], [1, 1, 0]], + ] + ).astype('float32') typelist = ['float32', 'float64', 'int32', 'int64'] places = [fluid.CPUPlace()] @@ -228,10 +278,12 @@ class TensorFillDiagonal_Test(unittest.TestCase): loss.backward() self.assertEqual( - (y.numpy().astype('float32') == expected_np).all(), True) + (y.numpy().astype('float32') == expected_np).all(), True + ) self.assertEqual( (y.grad.numpy().astype('float32') == expected_grad).all(), - True) + True, + ) def test_dim_larger2_normal(self): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) diff --git a/python/paddle/fluid/tests/unittests/test_tensor_fill_diagonal_tensor.py b/python/paddle/fluid/tests/unittests/test_tensor_fill_diagonal_tensor.py index 3aa2ac96123955cd90de8d022d89834590595df2..8dd9b327f784d4d72a9c148cc36b947e56f7717d 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor_fill_diagonal_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_tensor_fill_diagonal_tensor.py @@ -21,7 +21,6 @@ from paddle.fluid.framework import _enable_legacy_dygraph class TensorFillDiagTensor_Test(unittest.TestCase): - def setUp(self): self.typelist = ['float32', 'float64', 'int32', 'int64'] self.places = [fluid.CPUPlace()] @@ -30,10 +29,12 @@ class TensorFillDiagTensor_Test(unittest.TestCase): def test_dim2(self): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - expected_np = np.array([[1, 2, 2], [2, 1, 2], [2, 2, 1], - [2, 2, 2]]).astype('float32') - expected_grad = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0], - [1, 1, 1]]).astype('float32') + expected_np = np.array( + [[1, 2, 2], [2, 1, 2], [2, 2, 1], [2, 2, 2]] + ).astype('float32') + expected_grad = np.array( + [[0, 1, 1], [1, 0, 1], [1, 1, 0], [1, 1, 1]] + ).astype('float32') for idx, p in enumerate(self.places): if idx == 0: @@ -41,8 +42,8 @@ class TensorFillDiagTensor_Test(unittest.TestCase): else: paddle.set_device('gpu') for dtype in self.typelist: - v = paddle.ones((3, ), dtype=dtype) - var = (np.random.random() + 1) + v = paddle.ones((3,), dtype=dtype) + var = np.random.random() + 1 x = paddle.ones((4, 3), dtype=dtype) x.stop_gradient = False y = x * 2 @@ -51,18 +52,22 @@ class TensorFillDiagTensor_Test(unittest.TestCase): loss.backward() self.assertEqual( - (ny.numpy().astype('float32') == expected_np).all(), True) + (ny.numpy().astype('float32') == expected_np).all(), True + ) self.assertEqual( (y.grad.numpy().astype('float32') == expected_grad).all(), - True) + True, + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) def test_dim2_offset_1(self): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - expected_np = np.array([[2, 2, 2], [1, 2, 2], [2, 1, 2], - [2, 2, 1]]).astype('float32') - expected_grad = np.array([[1, 1, 1], [0, 1, 1], [1, 0, 1], - [1, 1, 0]]).astype('float32') + expected_np = np.array( + [[2, 2, 2], [1, 2, 2], [2, 1, 2], [2, 2, 1]] + ).astype('float32') + expected_grad = np.array( + [[1, 1, 1], [0, 1, 1], [1, 0, 1], [1, 1, 0]] + ).astype('float32') for idx, p in enumerate(self.places): if idx == 0: @@ -70,8 +75,8 @@ class TensorFillDiagTensor_Test(unittest.TestCase): else: paddle.set_device('gpu') for dtype in self.typelist: - v = paddle.ones((3, ), dtype=dtype) - var = (np.random.random() + 1) + v = paddle.ones((3,), dtype=dtype) + var = np.random.random() + 1 x = paddle.ones((4, 3), dtype=dtype) x.stop_gradient = False y = x * 2 @@ -80,18 +85,22 @@ class TensorFillDiagTensor_Test(unittest.TestCase): loss.backward() self.assertEqual( - (ny.numpy().astype('float32') == expected_np).all(), True) + (ny.numpy().astype('float32') == expected_np).all(), True + ) self.assertEqual( (y.grad.numpy().astype('float32') == expected_grad).all(), - True) + True, + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) def test_dim2_offset1(self): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - expected_np = np.array([[2, 1, 2], [2, 2, 1], [2, 2, 2], - [2, 2, 2]]).astype('float32') - expected_grad = np.array([[1, 0, 1], [1, 1, 0], [1, 1, 1], - [1, 1, 1]]).astype('float32') + expected_np = np.array( + [[2, 1, 2], [2, 2, 1], [2, 2, 2], [2, 2, 2]] + ).astype('float32') + expected_grad = np.array( + [[1, 0, 1], [1, 1, 0], [1, 1, 1], [1, 1, 1]] + ).astype('float32') for idx, p in enumerate(self.places): if idx == 0: @@ -99,8 +108,8 @@ class TensorFillDiagTensor_Test(unittest.TestCase): else: paddle.set_device('gpu') for dtype in self.typelist: - v = paddle.ones((2, ), dtype=dtype) - var = (np.random.random() + 1) + v = paddle.ones((2,), dtype=dtype) + var = np.random.random() + 1 x = paddle.ones((4, 3), dtype=dtype) x.stop_gradient = False y = x * 2 @@ -109,30 +118,48 @@ class TensorFillDiagTensor_Test(unittest.TestCase): loss.backward() self.assertEqual( - (ny.numpy().astype('float32') == expected_np).all(), True) + (ny.numpy().astype('float32') == expected_np).all(), True + ) self.assertEqual( (y.grad.numpy().astype('float32') == expected_grad).all(), - True) + True, + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) def test_dim4(self): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - expected_np = np.array([[[[0, 3], [2, 2], [2, 2]], - [[2, 2], [1, 4], [2, 2]], - [[2, 2], [2, 2], [2, 5]], - [[2, 2], [2, 2], [2, 2]]], - [[[6, 9], [2, 2], [2, 2]], - [[2, 2], [7, 10], [2, 2]], - [[2, 2], [2, 2], [8, 11]], - [[2, 2], [2, 2], [2, 2]]]]).astype('float32') - expected_grad = np.array([[[[0, 0], [1, 1], [1, 1]], - [[1, 1], [0, 0], [1, 1]], - [[1, 1], [1, 1], [0, 0]], - [[1, 1], [1, 1], [1, 1]]], - [[[0, 0], [1, 1], [1, 1]], - [[1, 1], [0, 0], [1, 1]], - [[1, 1], [1, 1], [0, 0]], - [[1, 1], [1, 1], [1, 1]]]]).astype('float32') + expected_np = np.array( + [ + [ + [[0, 3], [2, 2], [2, 2]], + [[2, 2], [1, 4], [2, 2]], + [[2, 2], [2, 2], [2, 5]], + [[2, 2], [2, 2], [2, 2]], + ], + [ + [[6, 9], [2, 2], [2, 2]], + [[2, 2], [7, 10], [2, 2]], + [[2, 2], [2, 2], [8, 11]], + [[2, 2], [2, 2], [2, 2]], + ], + ] + ).astype('float32') + expected_grad = np.array( + [ + [ + [[0, 0], [1, 1], [1, 1]], + [[1, 1], [0, 0], [1, 1]], + [[1, 1], [1, 1], [0, 0]], + [[1, 1], [1, 1], [1, 1]], + ], + [ + [[0, 0], [1, 1], [1, 1]], + [[1, 1], [0, 0], [1, 1]], + [[1, 1], [1, 1], [0, 0]], + [[1, 1], [1, 1], [1, 1]], + ], + ] + ).astype('float32') for idx, p in enumerate(self.places): if idx == 0: @@ -140,9 +167,10 @@ class TensorFillDiagTensor_Test(unittest.TestCase): else: paddle.set_device('gpu') for dtype in self.typelist: - v = paddle.to_tensor(np.arange(12).reshape(2, 2, 3), - dtype=dtype) - var = (np.random.random() + 1) + v = paddle.to_tensor( + np.arange(12).reshape(2, 2, 3), dtype=dtype + ) + var = np.random.random() + 1 x = paddle.ones((2, 4, 3, 2), dtype=dtype) x.stop_gradient = False y = x * 2 @@ -151,10 +179,12 @@ class TensorFillDiagTensor_Test(unittest.TestCase): loss.backward() self.assertEqual( - (ny.numpy().astype('float32') == expected_np).all(), True) + (ny.numpy().astype('float32') == expected_np).all(), True + ) self.assertEqual( (y.grad.numpy().astype('float32') == expected_grad).all(), - True) + True, + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) def test_largedim(self): @@ -165,7 +195,8 @@ class TensorFillDiagTensor_Test(unittest.TestCase): paddle.set_device('gpu') for dtype in self.typelist: v = paddle.arange(bsdim * fsdim, dtype=dtype).reshape( - (bsdim, fsdim)) + (bsdim, fsdim) + ) y = paddle.ones((bsdim, fsdim, fsdim), dtype=dtype) y.stop_gradient = False y = y * 2 diff --git a/python/paddle/fluid/tests/unittests/test_tensor_fill_diagonal_tensor_.py b/python/paddle/fluid/tests/unittests/test_tensor_fill_diagonal_tensor_.py index eb223dc6317e026742598dbbd363fbe86bb86b90..17be54720c97ad678837ee2da5c1b796ba6c0d5d 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor_fill_diagonal_tensor_.py +++ b/python/paddle/fluid/tests/unittests/test_tensor_fill_diagonal_tensor_.py @@ -21,7 +21,6 @@ from paddle.fluid.framework import _test_eager_guard class TensorFillDiagTensor_Test(unittest.TestCase): - def setUp(self): self.typelist = ['float32', 'float64', 'int32', 'int64'] self.places = [fluid.CPUPlace()] @@ -29,10 +28,12 @@ class TensorFillDiagTensor_Test(unittest.TestCase): self.places.append(fluid.CUDAPlace(0)) def func_dim2(self): - expected_np = np.array([[1, 2, 2], [2, 1, 2], [2, 2, 1], - [2, 2, 2]]).astype('float32') - expected_grad = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0], - [1, 1, 1]]).astype('float32') + expected_np = np.array( + [[1, 2, 2], [2, 1, 2], [2, 2, 1], [2, 2, 2]] + ).astype('float32') + expected_grad = np.array( + [[0, 1, 1], [1, 0, 1], [1, 1, 0], [1, 1, 1]] + ).astype('float32') for idx, p in enumerate(self.places): if idx == 0: @@ -40,8 +41,8 @@ class TensorFillDiagTensor_Test(unittest.TestCase): else: paddle.set_device('gpu') for dtype in self.typelist: - v = paddle.ones((3, ), dtype=dtype) - var = (np.random.random() + 1) + v = paddle.ones((3,), dtype=dtype) + var = np.random.random() + 1 x = paddle.ones((4, 3), dtype=dtype) x.stop_gradient = False y = x * 2 @@ -50,10 +51,12 @@ class TensorFillDiagTensor_Test(unittest.TestCase): loss.backward() self.assertEqual( - (y.numpy().astype('float32') == expected_np).all(), True) + (y.numpy().astype('float32') == expected_np).all(), True + ) self.assertEqual( (y.grad.numpy().astype('float32') == expected_grad).all(), - True) + True, + ) def test_dim2(self): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) @@ -63,10 +66,12 @@ class TensorFillDiagTensor_Test(unittest.TestCase): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) def func_dim2_offset_1(self): - expected_np = np.array([[2, 2, 2], [1, 2, 2], [2, 1, 2], - [2, 2, 1]]).astype('float32') - expected_grad = np.array([[1, 1, 1], [0, 1, 1], [1, 0, 1], - [1, 1, 0]]).astype('float32') + expected_np = np.array( + [[2, 2, 2], [1, 2, 2], [2, 1, 2], [2, 2, 1]] + ).astype('float32') + expected_grad = np.array( + [[1, 1, 1], [0, 1, 1], [1, 0, 1], [1, 1, 0]] + ).astype('float32') for idx, p in enumerate(self.places): if idx == 0: @@ -74,8 +79,8 @@ class TensorFillDiagTensor_Test(unittest.TestCase): else: paddle.set_device('gpu') for dtype in self.typelist: - v = paddle.ones((3, ), dtype=dtype) - var = (np.random.random() + 1) + v = paddle.ones((3,), dtype=dtype) + var = np.random.random() + 1 x = paddle.ones((4, 3), dtype=dtype) x.stop_gradient = False y = x * 2 @@ -84,10 +89,12 @@ class TensorFillDiagTensor_Test(unittest.TestCase): loss.backward() self.assertEqual( - (y.numpy().astype('float32') == expected_np).all(), True) + (y.numpy().astype('float32') == expected_np).all(), True + ) self.assertEqual( (y.grad.numpy().astype('float32') == expected_grad).all(), - True) + True, + ) def test_dim2_offset_1(self): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) @@ -97,10 +104,12 @@ class TensorFillDiagTensor_Test(unittest.TestCase): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) def func_dim2_offset1(self): - expected_np = np.array([[2, 1, 2], [2, 2, 1], [2, 2, 2], - [2, 2, 2]]).astype('float32') - expected_grad = np.array([[1, 0, 1], [1, 1, 0], [1, 1, 1], - [1, 1, 1]]).astype('float32') + expected_np = np.array( + [[2, 1, 2], [2, 2, 1], [2, 2, 2], [2, 2, 2]] + ).astype('float32') + expected_grad = np.array( + [[1, 0, 1], [1, 1, 0], [1, 1, 1], [1, 1, 1]] + ).astype('float32') for idx, p in enumerate(self.places): if idx == 0: @@ -108,8 +117,8 @@ class TensorFillDiagTensor_Test(unittest.TestCase): else: paddle.set_device('gpu') for dtype in self.typelist: - v = paddle.ones((2, ), dtype=dtype) - var = (np.random.random() + 1) + v = paddle.ones((2,), dtype=dtype) + var = np.random.random() + 1 x = paddle.ones((4, 3), dtype=dtype) x.stop_gradient = False y = x * 2 @@ -118,10 +127,12 @@ class TensorFillDiagTensor_Test(unittest.TestCase): loss.backward() self.assertEqual( - (y.numpy().astype('float32') == expected_np).all(), True) + (y.numpy().astype('float32') == expected_np).all(), True + ) self.assertEqual( (y.grad.numpy().astype('float32') == expected_grad).all(), - True) + True, + ) def test_dim2_offset1(self): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) @@ -131,22 +142,38 @@ class TensorFillDiagTensor_Test(unittest.TestCase): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) def func_dim4(self): - expected_np = np.array([[[[0, 3], [2, 2], [2, 2]], - [[2, 2], [1, 4], [2, 2]], - [[2, 2], [2, 2], [2, 5]], - [[2, 2], [2, 2], [2, 2]]], - [[[6, 9], [2, 2], [2, 2]], - [[2, 2], [7, 10], [2, 2]], - [[2, 2], [2, 2], [8, 11]], - [[2, 2], [2, 2], [2, 2]]]]).astype('float32') - expected_grad = np.array([[[[0, 0], [1, 1], [1, 1]], - [[1, 1], [0, 0], [1, 1]], - [[1, 1], [1, 1], [0, 0]], - [[1, 1], [1, 1], [1, 1]]], - [[[0, 0], [1, 1], [1, 1]], - [[1, 1], [0, 0], [1, 1]], - [[1, 1], [1, 1], [0, 0]], - [[1, 1], [1, 1], [1, 1]]]]).astype('float32') + expected_np = np.array( + [ + [ + [[0, 3], [2, 2], [2, 2]], + [[2, 2], [1, 4], [2, 2]], + [[2, 2], [2, 2], [2, 5]], + [[2, 2], [2, 2], [2, 2]], + ], + [ + [[6, 9], [2, 2], [2, 2]], + [[2, 2], [7, 10], [2, 2]], + [[2, 2], [2, 2], [8, 11]], + [[2, 2], [2, 2], [2, 2]], + ], + ] + ).astype('float32') + expected_grad = np.array( + [ + [ + [[0, 0], [1, 1], [1, 1]], + [[1, 1], [0, 0], [1, 1]], + [[1, 1], [1, 1], [0, 0]], + [[1, 1], [1, 1], [1, 1]], + ], + [ + [[0, 0], [1, 1], [1, 1]], + [[1, 1], [0, 0], [1, 1]], + [[1, 1], [1, 1], [0, 0]], + [[1, 1], [1, 1], [1, 1]], + ], + ] + ).astype('float32') for idx, p in enumerate(self.places): if idx == 0: @@ -154,9 +181,10 @@ class TensorFillDiagTensor_Test(unittest.TestCase): else: paddle.set_device('gpu') for dtype in self.typelist: - v = paddle.to_tensor(np.arange(12).reshape(2, 2, 3), - dtype=dtype) - var = (np.random.random() + 1) + v = paddle.to_tensor( + np.arange(12).reshape(2, 2, 3), dtype=dtype + ) + var = np.random.random() + 1 x = paddle.ones((2, 4, 3, 2), dtype=dtype) x.stop_gradient = False y = x * 2 @@ -165,10 +193,12 @@ class TensorFillDiagTensor_Test(unittest.TestCase): loss.backward() self.assertEqual( - (y.numpy().astype('float32') == expected_np).all(), True) + (y.numpy().astype('float32') == expected_np).all(), True + ) self.assertEqual( (y.grad.numpy().astype('float32') == expected_grad).all(), - True) + True, + ) def test_func_dim4(self): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) @@ -178,14 +208,15 @@ class TensorFillDiagTensor_Test(unittest.TestCase): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) def func_largedim(self): - #large dim only test on gpu because the cpu version is too slow for ci test, and the memory is limited + # large dim only test on gpu because the cpu version is too slow for ci test, and the memory is limited if len(self.places) > 1: bsdim = 1024 fsdim = 128 paddle.set_device('gpu') for dtype in self.typelist: v = paddle.arange(bsdim * fsdim, dtype=dtype).reshape( - (bsdim, fsdim)) + (bsdim, fsdim) + ) y = paddle.ones((bsdim, fsdim, fsdim), dtype=dtype) y.stop_gradient = False y = y * 2 diff --git a/python/paddle/fluid/tests/unittests/test_tensor_register_hook.py b/python/paddle/fluid/tests/unittests/test_tensor_register_hook.py index 0a249d002836de38e68c2664fcb3c0dfa52f2bc2..8e7de658969670759e3cd26c6e13b062cd8b8105 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor_register_hook.py +++ b/python/paddle/fluid/tests/unittests/test_tensor_register_hook.py @@ -23,7 +23,6 @@ import paddle.fluid.core as core class SimpleNet(nn.Layer): - def __init__(self, in_size, out_size): super(SimpleNet, self).__init__() self.linear1 = nn.Linear(in_size, in_size) @@ -42,7 +41,6 @@ class SimpleNet(nn.Layer): class SimpleNetForStatic(nn.Layer): - def __init__(self, in_size, out_size): super(SimpleNetForStatic, self).__init__() self.linear1 = nn.Linear(in_size, in_size) @@ -58,7 +56,6 @@ class SimpleNetForStatic(nn.Layer): class TestTensorRegisterHook(unittest.TestCase): - def setUp(self): self.seed = 2021 self.in_size = 10 @@ -69,13 +66,12 @@ class TestTensorRegisterHook(unittest.TestCase): self.devices.append("gpu") def func_hook_for_interior_var(self): - def run_double_hook_for_interior_var(double_hook, removed=False): for device in self.devices: paddle.set_device(device) - x = paddle.to_tensor([0., 1., 2., 3.]) - y = paddle.to_tensor([4., 5., 6., 7.]) + x = paddle.to_tensor([0.0, 1.0, 2.0, 3.0]) + y = paddle.to_tensor([4.0, 5.0, 6.0, 7.0]) x.stop_gradient = False y.stop_gradient = False @@ -83,7 +79,7 @@ class TestTensorRegisterHook(unittest.TestCase): w.stop_gradient = False helper = w.register_hook(double_hook) - z = paddle.to_tensor([1., 2., 3., 4.]) + z = paddle.to_tensor([1.0, 2.0, 3.0, 4.0]) z.stop_gradient = False o = z.matmul(w) @@ -100,18 +96,18 @@ class TestTensorRegisterHook(unittest.TestCase): np.testing.assert_array_equal(w.grad.numpy(), z.numpy()) # x.grad and y.grad are changed if run hook np.testing.assert_array_equal( - x.grad.numpy(), - z.numpy() * 2 if not removed else z.numpy()) + x.grad.numpy(), z.numpy() * 2 if not removed else z.numpy() + ) np.testing.assert_array_equal( - y.grad.numpy(), - z.numpy() * 2 if not removed else z.numpy()) + y.grad.numpy(), z.numpy() * 2 if not removed else z.numpy() + ) def run_print_hook_for_interior_var(print_hook, removed=False): for device in self.devices: paddle.set_device(device) - x = paddle.to_tensor([0., 1., 2., 3.]) - y = paddle.to_tensor([4., 5., 6., 7.]) + x = paddle.to_tensor([0.0, 1.0, 2.0, 3.0]) + y = paddle.to_tensor([4.0, 5.0, 6.0, 7.0]) x.stop_gradient = False y.stop_gradient = False @@ -119,7 +115,7 @@ class TestTensorRegisterHook(unittest.TestCase): w.stop_gradient = False helper = w.register_hook(print_hook) - z = paddle.to_tensor([1., 2., 3., 4.]) + z = paddle.to_tensor([1.0, 2.0, 3.0, 4.0]) z.stop_gradient = False o = z.matmul(w) @@ -167,13 +163,12 @@ class TestTensorRegisterHook(unittest.TestCase): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) def func_hook_for_leaf_var(self): - def run_double_hook_for_leaf_var(double_hook, removed=False): for device in self.devices: paddle.set_device(device) - x = paddle.to_tensor([0., 1., 2., 3.]) - y = paddle.to_tensor([4., 5., 6., 7.]) + x = paddle.to_tensor([0.0, 1.0, 2.0, 3.0]) + y = paddle.to_tensor([4.0, 5.0, 6.0, 7.0]) x.stop_gradient = False y.stop_gradient = False helper = y.register_hook(double_hook) @@ -181,7 +176,7 @@ class TestTensorRegisterHook(unittest.TestCase): w = x + y w.stop_gradient = False - z = paddle.to_tensor([1., 2., 3., 4.]) + z = paddle.to_tensor([1.0, 2.0, 3.0, 4.0]) z.stop_gradient = False o = z.matmul(w) @@ -198,8 +193,8 @@ class TestTensorRegisterHook(unittest.TestCase): np.testing.assert_array_equal(x.grad.numpy(), z.numpy()) # y.grad are changed if run hook np.testing.assert_array_equal( - y.grad.numpy(), - z.numpy() * 2 if not removed else z.numpy()) + y.grad.numpy(), z.numpy() * 2 if not removed else z.numpy() + ) # register hook run_double_hook_for_leaf_var(lambda grad: grad * 2) @@ -214,14 +209,14 @@ class TestTensorRegisterHook(unittest.TestCase): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) def func_hook_for_accumulated_grad_interior_var(self): - def run_double_hook_for_accumulated_grad_interior_var( - double_hook, removed=False): + double_hook, removed=False + ): for device in self.devices: paddle.set_device(device) - a = paddle.to_tensor([0., 1., 1., 2.]) - b = paddle.to_tensor([0., 0., 1., 2.]) + a = paddle.to_tensor([0.0, 1.0, 1.0, 2.0]) + b = paddle.to_tensor([0.0, 0.0, 1.0, 2.0]) a.stop_gradient = False b.stop_gradient = False @@ -232,8 +227,8 @@ class TestTensorRegisterHook(unittest.TestCase): helper2 = x.register_hook(double_hook) - y = paddle.to_tensor([4., 5., 6., 7.]) - z = paddle.to_tensor([1., 2., 3., 4.]) + y = paddle.to_tensor([4.0, 5.0, 6.0, 7.0]) + z = paddle.to_tensor([1.0, 2.0, 3.0, 4.0]) y.stop_gradient = False z.stop_gradient = False @@ -251,21 +246,24 @@ class TestTensorRegisterHook(unittest.TestCase): o.backward() - base_grad = np.array([5., 9., 13., 19.]) + base_grad = np.array([5.0, 9.0, 13.0, 19.0]) # x.grad is not changed np.testing.assert_array_equal(x.grad.numpy(), base_grad) # b.grad is changed by x.hook np.testing.assert_array_equal( - b.grad.numpy(), base_grad * 2 if not removed else base_grad) + b.grad.numpy(), base_grad * 2 if not removed else base_grad + ) # a.grad is changed by x.hook and a.hook np.testing.assert_array_equal( - a.grad.numpy(), base_grad * 4 if not removed else base_grad) + a.grad.numpy(), base_grad * 4 if not removed else base_grad + ) # register hook run_double_hook_for_accumulated_grad_interior_var(lambda grad: grad * 2) # register hook and removed - run_double_hook_for_accumulated_grad_interior_var(lambda grad: grad * 2, - removed=True) + run_double_hook_for_accumulated_grad_interior_var( + lambda grad: grad * 2, removed=True + ) def test_hook_for_accumulated_grad_interior_var(self): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) @@ -275,19 +273,19 @@ class TestTensorRegisterHook(unittest.TestCase): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) def func_hook_for_accumulated_grad_leaf_var(self): - def run_double_hook_for_accumulated_grad_leaf_var( - double_hook, removed=False): + double_hook, removed=False + ): for device in self.devices: paddle.set_device(device) - x = paddle.to_tensor([0., 1., 2., 4.]) + x = paddle.to_tensor([0.0, 1.0, 2.0, 4.0]) x.stop_gradient = False helper = x.register_hook(double_hook) - y = paddle.to_tensor([4., 5., 6., 7.]) - z = paddle.to_tensor([1., 2., 3., 4.]) + y = paddle.to_tensor([4.0, 5.0, 6.0, 7.0]) + z = paddle.to_tensor([1.0, 2.0, 3.0, 4.0]) y.stop_gradient = False z.stop_gradient = False @@ -304,16 +302,18 @@ class TestTensorRegisterHook(unittest.TestCase): o.backward() - base_grad = np.array([5., 9., 13., 19.]) + base_grad = np.array([5.0, 9.0, 13.0, 19.0]) # x.grad is changed by x.hook np.testing.assert_array_equal( - x.grad.numpy(), base_grad * 2 if not removed else base_grad) + x.grad.numpy(), base_grad * 2 if not removed else base_grad + ) # register hook run_double_hook_for_accumulated_grad_leaf_var(lambda grad: grad * 2) # register hook and removed - run_double_hook_for_accumulated_grad_leaf_var(lambda grad: grad * 2, - removed=True) + run_double_hook_for_accumulated_grad_leaf_var( + lambda grad: grad * 2, removed=True + ) def test_hook_for_accumulated_grad_leaf_var(self): with _test_eager_guard(): @@ -321,12 +321,9 @@ class TestTensorRegisterHook(unittest.TestCase): self.func_hook_for_accumulated_grad_leaf_var() def func_hook_in_model(self): - - def run_double_hook_in_model(data, - label, - hook=None, - register=False, - remove=False): + def run_double_hook_in_model( + data, label, hook=None, register=False, remove=False + ): for device in self.devices: paddle.seed(self.seed) paddle.set_device(device) @@ -341,22 +338,35 @@ class TestTensorRegisterHook(unittest.TestCase): loss = loss_fn(out, label) loss.backward() - return (ret1.grad.numpy(), net.linear1.weight.grad.numpy(), - net.linear1.bias.grad.numpy()) + return ( + ret1.grad.numpy(), + net.linear1.weight.grad.numpy(), + net.linear1.bias.grad.numpy(), + ) - data = np.random.uniform( - size=[self.batch_size, self.in_size]).astype('float32') + data = np.random.uniform(size=[self.batch_size, self.in_size]).astype( + 'float32' + ) label = np.random.uniform(size=[self.batch_size, 1]).astype('float32') # get original value ret1_grad, linear1_w_grad, linear1_b_grad = run_double_hook_in_model( - data, label) + data, label + ) # get value changed by hook - ret1_grad_hook, linear1_w_grad_hook, linear1_b_grad_hook = run_double_hook_in_model( - data, label, lambda grad: grad * 2, True) + ( + ret1_grad_hook, + linear1_w_grad_hook, + linear1_b_grad_hook, + ) = run_double_hook_in_model(data, label, lambda grad: grad * 2, True) # get value after removing hook - ret1_grad_rm, linear1_w_grad_rm, linear1_b_grad_rm = run_double_hook_in_model( - data, label, lambda grad: grad * 2, True, True) + ( + ret1_grad_rm, + linear1_w_grad_rm, + linear1_b_grad_rm, + ) = run_double_hook_in_model( + data, label, lambda grad: grad * 2, True, True + ) # compare original value and with hook np.testing.assert_array_equal(ret1_grad, ret1_grad_hook) @@ -376,16 +386,13 @@ class TestTensorRegisterHook(unittest.TestCase): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) def func_multiple_hooks_for_interior_var(self): - - def run_multiple_hooks_for_interior_var(device, - hooks, - remove1=False, - remove2=False, - remove3=False): + def run_multiple_hooks_for_interior_var( + device, hooks, remove1=False, remove2=False, remove3=False + ): paddle.set_device(device) - x = paddle.to_tensor([0., 1., 2., 3.]) - y = paddle.to_tensor([4., 5., 6., 7.]) + x = paddle.to_tensor([0.0, 1.0, 2.0, 3.0]) + y = paddle.to_tensor([4.0, 5.0, 6.0, 7.0]) x.stop_gradient = False y.stop_gradient = False @@ -397,7 +404,7 @@ class TestTensorRegisterHook(unittest.TestCase): helper = w.register_hook(hook) helpers.append(helper) - z = paddle.to_tensor([1., 2., 3., 4.]) + z = paddle.to_tensor([1.0, 2.0, 3.0, 4.0]) z.stop_gradient = False o = z.matmul(w) @@ -420,35 +427,40 @@ class TestTensorRegisterHook(unittest.TestCase): for device in self.devices: z, w_grad, x_grad, y_grad = run_multiple_hooks_for_interior_var( - device, hooks) + device, hooks + ) np.testing.assert_array_equal(w_grad, z) np.testing.assert_array_equal(x_grad, z * 8) np.testing.assert_array_equal(y_grad, z * 8) z, w_grad, x_grad, y_grad = run_multiple_hooks_for_interior_var( - device, hooks, remove1=True) + device, hooks, remove1=True + ) np.testing.assert_array_equal(w_grad, z) np.testing.assert_array_equal(x_grad, z * 4) np.testing.assert_array_equal(y_grad, z * 4) z, w_grad, x_grad, y_grad = run_multiple_hooks_for_interior_var( - device, hooks, remove2=True) + device, hooks, remove2=True + ) np.testing.assert_array_equal(w_grad, z) np.testing.assert_array_equal(x_grad, z * 4) np.testing.assert_array_equal(y_grad, z * 4) z, w_grad, x_grad, y_grad = run_multiple_hooks_for_interior_var( - device, hooks, remove3=True) + device, hooks, remove3=True + ) np.testing.assert_array_equal(w_grad, z) np.testing.assert_array_equal(x_grad, z * 4) np.testing.assert_array_equal(y_grad, z * 4) z, w_grad, x_grad, y_grad = run_multiple_hooks_for_interior_var( - device, hooks, remove1=True, remove2=True, remove3=True) + device, hooks, remove1=True, remove2=True, remove3=True + ) np.testing.assert_array_equal(w_grad, z) np.testing.assert_array_equal(x_grad, z) @@ -462,7 +474,6 @@ class TestTensorRegisterHook(unittest.TestCase): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) def func_hook_in_double_grad(self): - def double_print_hook(grad): grad = grad * 2 print(grad) @@ -478,10 +489,9 @@ class TestTensorRegisterHook(unittest.TestCase): y = x * x # Since y = x * x, dx = 2 * x - dx = paddle.grad(outputs=[y], - inputs=[x], - create_graph=True, - retain_graph=True)[0] + dx = paddle.grad( + outputs=[y], inputs=[x], create_graph=True, retain_graph=True + )[0] z = y + dx self.assertTrue(x.grad is None) @@ -508,7 +518,7 @@ class TestTensorRegisterHook(unittest.TestCase): for device in self.devices: paddle.set_device(device) - x = paddle.to_tensor([1., 2., 3., 4.]) + x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0]) x.stop_gradient = False h = x.register_hook(lambda grad: grad * 2) @@ -524,7 +534,7 @@ class TestTensorRegisterHook(unittest.TestCase): for device in self.devices: paddle.set_device(device) - x = paddle.to_tensor([1., 2., 3., 4.]) + x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0]) with self.assertRaises(RuntimeError): x.register_hook(lambda grad: grad * 2) @@ -541,9 +551,9 @@ class TestTensorRegisterHook(unittest.TestCase): main_program = paddle.static.Program() with paddle.static.scope_guard(paddle.static.Scope()): with paddle.static.program_guard(main_program, startup_program): - x = paddle.static.data(name='x', - shape=[None, self.in_size], - dtype='float32') + x = paddle.static.data( + name='x', shape=[None, self.in_size], dtype='float32' + ) net = SimpleNetForStatic(self.in_size, self.out_size) with self.assertRaises(AssertionError): @@ -554,10 +564,12 @@ class TestTensorRegisterHook(unittest.TestCase): def func_register_hook_in_dy2static_mode(self): net = SimpleNetForStatic(self.in_size, self.out_size) jit_net = paddle.jit.to_static( - net, input_spec=[paddle.static.InputSpec([None, self.in_size])]) + net, input_spec=[paddle.static.InputSpec([None, self.in_size])] + ) - data = np.random.uniform( - size=[self.batch_size, self.in_size]).astype('float32') + data = np.random.uniform(size=[self.batch_size, self.in_size]).astype( + 'float32' + ) data_t = paddle.to_tensor(data) with self.assertRaises(AssertionError): @@ -581,7 +593,6 @@ def global_void_hook(): class TestTensorRegisterBackwardHook(unittest.TestCase): - def setUp(self): self.devices = ["cpu"] if paddle.is_compiled_with_cuda(): @@ -591,7 +602,7 @@ class TestTensorRegisterBackwardHook(unittest.TestCase): global HOOK_INIT_VALUE global HOOK_IS_CALLED for device in self.devices: - x = paddle.to_tensor(5., stop_gradient=False) + x = paddle.to_tensor(5.0, stop_gradient=False) x._register_backward_hook(global_void_hook) for i in range(5): y = paddle.pow(x, 4.0) @@ -610,7 +621,7 @@ class TestTensorRegisterBackwardHook(unittest.TestCase): self.func_register_backward_hook() def func_register_backward_hook_for_interior_var(self): - x = paddle.to_tensor(5., stop_gradient=False) + x = paddle.to_tensor(5.0, stop_gradient=False) y = paddle.pow(x, 4.0) with self.assertRaises(ValueError): @@ -622,7 +633,7 @@ class TestTensorRegisterBackwardHook(unittest.TestCase): self.func_register_backward_hook_for_interior_var() def func_register_backward_hook_for_var_without_gradient(self): - x = paddle.to_tensor(5.) + x = paddle.to_tensor(5.0) y = paddle.pow(x, 4.0) with self.assertRaises(ValueError): @@ -635,7 +646,6 @@ class TestTensorRegisterBackwardHook(unittest.TestCase): class TestRegsiterBackwardFinalHook(unittest.TestCase): - def setUp(self): self.devices = ["cpu"] if paddle.is_compiled_with_cuda(): diff --git a/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_dynamic.py b/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_dynamic.py index 594731fb08f19985f3ab0dbd3a31b798ec3cbebf..0ab01f415517167c08016bf33619a5631725e980 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_dynamic.py +++ b/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_dynamic.py @@ -28,7 +28,6 @@ from paddle.fluid.framework import _test_eager_guard class TestTensorScalarTypePromotionDynamic(unittest.TestCase): - def check_operation(self, a, b, c, op): if op == '+': c_rlt = a + b diff --git a/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_static.py b/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_static.py index 127a36d70703a7bbf0a9e08b4a2ac782639d71fa..7949d2457e9286949be1089ec5c7c6217db6d408 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_static.py +++ b/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_static.py @@ -29,7 +29,6 @@ from paddle.static import Program class TestTensorScalarTypePromotionStatic(unittest.TestCase): - def setUp(self): paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_tensor_to_list.py b/python/paddle/fluid/tests/unittests/test_tensor_to_list.py index 899be8c996cf2e4ffeee52b6dba3a180b0145f64..709ff84179f24c7d8a5222936b9cdaf695f26ab7 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor_to_list.py +++ b/python/paddle/fluid/tests/unittests/test_tensor_to_list.py @@ -20,7 +20,6 @@ from paddle.fluid.framework import _test_eager_guard class TensorToListTest(unittest.TestCase): - def setUp(self): self.shape = [11, 25, 32, 43] @@ -31,8 +30,9 @@ class TensorToListTest(unittest.TestCase): places.append(fluid.CUDAPinnedPlace()) for p in places: - np_arr = np.reshape(np.array(range(np.prod(self.shape))), - self.shape) + np_arr = np.reshape( + np.array(range(np.prod(self.shape))), self.shape + ) expectlist = np_arr.tolist() t = paddle.to_tensor(np_arr, place=p) diff --git a/python/paddle/fluid/tests/unittests/test_tensor_to_numpy.py b/python/paddle/fluid/tests/unittests/test_tensor_to_numpy.py index 1bd2f104854e2da91fd3265dcb7687a8a6ed1de6..30843455ca5e252d5c7f2e30f8132455c4bd9aac 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor_to_numpy.py +++ b/python/paddle/fluid/tests/unittests/test_tensor_to_numpy.py @@ -18,13 +18,18 @@ import numpy as np class TensorToNumpyTest(unittest.TestCase): - def setUp(self): self.shape = [11, 25, 32, 43] def test_main(self): dtypes = [ - 'float32', 'float64', 'int32', 'int64', 'uint8', 'int8', 'bool' + 'float32', + 'float64', + 'int32', + 'int64', + 'uint8', + 'int8', + 'bool', ] places = [fluid.CPUPlace()] @@ -36,7 +41,8 @@ class TensorToNumpyTest(unittest.TestCase): for dtype in dtypes: np_arr = np.reshape( np.array(range(np.prod(self.shape))).astype(dtype), - self.shape) + self.shape, + ) t = fluid.LoDTensor() t.set(np_arr, p) diff --git a/python/paddle/fluid/tests/unittests/test_tensor_type_promotion.py b/python/paddle/fluid/tests/unittests/test_tensor_type_promotion.py index 4e31fec3543e686904b47d1840afbf38dcc74854..f6a1f7e5def413dbea07ef633fea2f18477ffeda 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor_type_promotion.py +++ b/python/paddle/fluid/tests/unittests/test_tensor_type_promotion.py @@ -19,7 +19,6 @@ from paddle.fluid.framework import _test_eager_guard class TestTensorTypePromotion(unittest.TestCase): - def setUp(self): self.x = paddle.to_tensor([2, 3]) self.y = paddle.to_tensor([1.0, 2.0]) @@ -29,32 +28,36 @@ class TestTensorTypePromotion(unittest.TestCase): warnings.simplefilter("always") self.x + self.y self.assertTrue( - "The dtype of left and right variables are not the same" in str( - context[-1].message)) + "The dtype of left and right variables are not the same" + in str(context[-1].message) + ) def sub_operator(self): with warnings.catch_warnings(record=True) as context: warnings.simplefilter("always") self.x - self.y self.assertTrue( - "The dtype of left and right variables are not the same" in str( - context[-1].message)) + "The dtype of left and right variables are not the same" + in str(context[-1].message) + ) def mul_operator(self): with warnings.catch_warnings(record=True) as context: warnings.simplefilter("always") self.x * self.y self.assertTrue( - "The dtype of left and right variables are not the same" in str( - context[-1].message)) + "The dtype of left and right variables are not the same" + in str(context[-1].message) + ) def div_operator(self): with warnings.catch_warnings(record=True) as context: warnings.simplefilter("always") self.x / self.y self.assertTrue( - "The dtype of left and right variables are not the same" in str( - context[-1].message)) + "The dtype of left and right variables are not the same" + in str(context[-1].message) + ) def test_operator(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_tensor_uva.py b/python/paddle/fluid/tests/unittests/test_tensor_uva.py index 1261cf41b79ed0ca65b99136eda9c5820e214ae7..8fe03148f49211938c3bb023225b7824bc4a5052 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor_uva.py +++ b/python/paddle/fluid/tests/unittests/test_tensor_uva.py @@ -20,7 +20,6 @@ from paddle.fluid.framework import _test_eager_guard, _in_legacy_dygraph class TestTensorCopyFrom(unittest.TestCase): - def func_main(self): if paddle.fluid.core.is_compiled_with_cuda(): place = paddle.CPUPlace() @@ -36,12 +35,17 @@ class TestTensorCopyFrom(unittest.TestCase): class TestUVATensorFromNumpy(unittest.TestCase): - def func_uva_tensor_creation(self): if paddle.fluid.core.is_compiled_with_cuda(): dtype_list = [ - "int32", "int64", "float32", "float64", "float16", "int8", - "int16", "bool" + "int32", + "int64", + "float32", + "float64", + "float16", + "int8", + "int16", + "bool", ] for dtype in dtype_list: data = np.random.randint(10, size=[4, 5]).astype(dtype) @@ -64,9 +68,9 @@ class TestUVATensorFromNumpy(unittest.TestCase): slice_a = a[:, 5] tensor1 = paddle.to_tensor(slice_a) tensor2 = core.eager.to_uva_tensor(slice_a) - np.testing.assert_allclose(tensor1.numpy(), - tensor2.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + tensor1.numpy(), tensor2.numpy(), rtol=1e-05 + ) def test_uva_tensor_creation(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_tensor_zero_.py b/python/paddle/fluid/tests/unittests/test_tensor_zero_.py index b8d55028ed24cf188f2e018d55f64ba66db4a4fa..2870ad8e75a192c458ea3fe5a9d387ac56630ed6 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor_zero_.py +++ b/python/paddle/fluid/tests/unittests/test_tensor_zero_.py @@ -20,7 +20,6 @@ from paddle.fluid.framework import _test_eager_guard class TensorFill_Test(unittest.TestCase): - def setUp(self): self.shape = [32, 32] @@ -32,8 +31,9 @@ class TensorFill_Test(unittest.TestCase): places.append(fluid.CUDAPinnedPlace()) for p in places: - np_arr = np.reshape(np.array(range(np.prod(self.shape))), - self.shape) + np_arr = np.reshape( + np.array(range(np.prod(self.shape))), self.shape + ) for dtype in typelist: tensor = paddle.to_tensor(np_arr, place=p, dtype=dtype) target = tensor.numpy() diff --git a/python/paddle/fluid/tests/unittests/test_tensordot.py b/python/paddle/fluid/tests/unittests/test_tensordot.py index c0fb398d830c40f00a2bc42dc69622ca0d00afe1..8709772c18e14a4bdf1ed66c0e0cd668d97cc31a 100644 --- a/python/paddle/fluid/tests/unittests/test_tensordot.py +++ b/python/paddle/fluid/tests/unittests/test_tensordot.py @@ -26,7 +26,7 @@ def tensordot_np(x, y, axes): # np.tensordot does not support empty axes if not axes: axes = 0 - if (isinstance(axes, (tuple, list))): + if isinstance(axes, (tuple, list)): if all(np.issubdtype(type(i), np.integer) for i in axes): axes = [axes, axes] else: @@ -43,7 +43,7 @@ def tensordot_np(x, y, axes): axes = [axes_x, axes_y] # np.tensordot does not support broadcast - if (isinstance(axes, (tuple, list))): + if isinstance(axes, (tuple, list)): axes_x, axes_y = axes else: axes_x = list(range(x.ndim - axes, x.ndim)) @@ -65,7 +65,6 @@ def tensordot_np(x, y, axes): class TestTensordotAPI(unittest.TestCase): - def setUp(self): self.set_place() self.set_dtype() @@ -90,60 +89,108 @@ class TestTensordotAPI(unittest.TestCase): self.y = np.random.random(self.y_shape).astype(self.dtype) def set_test_axes(self): - self.all_axes = [[[3, 2], [3]], [[2, 1, 0], [2, 1]], - [[1, 2, 0], [1, 3, 2]], [3, 0], [[], [0, 3, 1]], - [[2, 1, 0, 3], [2, 0, 1, 3]], [[3, 1, 2], [1, 3, 2, - 0]], - [[2, 1], [0, 2]], [[2, 0, 1, 3], [2]], - [[1, 2, 0, 3], [0, 2, 1]], [[2, 1, 3, 0], [1, 2, 3]], - [[2, 0, 1, 3], [3, 1, 0, 2]], [[0, 3], [0, 3, 2, 1]], - [[1, 3, 2, 0], [2, 1, 0, 3]], - [[1, 3, 2, 0], [1, 3, 2, 0]], [[1, 0, 2], [0, 1]], - [[2, 3, 0], [3, 1]], [[1, 3, 2, 0], [3, 0, 1, 2]], - [[3, 2, 1], [2, 0, 1]], [[0], []], - [[2, 3, 0], [1, 2, 0]], [[3, 0, 2, 1], [2, 1, 0, 3]], - [[3, 1, 2], [2, 3, 1]], [[1, 0, 2, 3], []], - [[1, 2], [1, 2, 3]], [[2, 0, 1, 3], [2, 0, 1]], - [[3, 1, 2], [1, 3, 2]], [[3, 1, 2, 0], [1, 2, 3, 0]], - [[0, 2, 3], [0, 1, 2]], [[3, 2, 0], [2, 0, 3, 1]], - [[2, 1, 0, 3], [3, 1, 2, 0]], - [[1, 2, 3, 0], [1, 3, 0, 2]], [[3, 0], [2, 1]], - [[0, 1, 3, 2], [0, 2, 1, 3]], [[1, 0], [2, 1, 3]], - [[1, 0, 3, 2], [2, 3, 0, 1]], [[1, 2], [3]], - [[1, 2, 3, 0], [3, 2, 1, 0]], - [[0, 3, 2, 1], [2, 1, 3, 0]], [0], - [[0, 2, 3], [3, 2, 0, 1]], [[1, 2, 3, 0], [3, 2, 1, - 0]], - [[3, 1], [3]], [[3, 2, 0, 1], [3, 2, 0]], - [[2, 3, 0, 1], [0, 3, 2]], [[1], [1, 3]], - [[1, 2], [2, 1, 0]], [[3, 1, 2], [3, 1, 0]], - [[1, 3], [3, 1, 2]], [[2, 0, 1, 3], [3, 1, 0, 2]], - [[1, 3, 0], [1, 3]], [[2, 3, 1], [1, 0, 2]], - [[1, 2, 0, 3], [0, 2, 1, 3]], [[2], [0, 1, 3]], - [[1], [1, 2]], [[1, 0, 2, 3], [3, 0, 1, 2]], - [[0, 1, 3, 2], [1, 3, 0, 2]], [[3, 0, 2, 1], [0, 2, - 3]], - [[1, 2, 0], [1, 2, 3]], [[1, 0, 3], [2, 3, 0]], - [[2, 3, 0], [3, 1, 0]], [[1, 3], [1, 0]], - [[2, 1, 0, 3], [2, 0, 3, 1]], [[3, 2, 0], [2, 1, 0]], - [[0, 1, 3], [0, 3, 1]], [[3, 1, 0], [3, 2, 1]], - [[3, 2], [3, 1]], [[3], [2, 1, 0]], [[1, 2, 3, 0], []], - [[1, 3, 2, 0], [3, 1, 2]], [[1], [0, 2]], - [[3, 2, 0], [3, 2, 0]], [[3], []], [[1, 0, 3], [2, 1]], - [[3, 1, 0, 2], [2, 3, 1, 0]], [[0, 1], [0, 3, 2]], - [[0, 2, 3], [0, 2, 1]], [[1, 3, 0], [3, 0, 2]], - [[3, 1, 2], [1, 2, 3]], [[3, 1, 2], [3, 1, 0]], - [[0, 3, 1, 2], [3, 2, 1, 0]], [[0, 3], [3, 2, 1]], - [[2, 3], [1, 3, 0]], [[0, 3, 2], [2, 0, 3, 1]], - [[2, 3], [1, 3]], [[3, 1, 2, 0], [2, 3, 1, 0]], - [[1, 0, 3, 2], [3, 0, 1, 2]], - [[3, 2, 1, 0], [0, 1, 3, 2]], [[3, 1, 2], [3]], - [[0, 1, 3, 2], [2, 3, 0, 1]], - [[1, 2, 3, 0], [1, 3, 0, 2]], [3, 1, 2], - [[3, 1, 2], [0, 3, 2]], [[2, 3, 0], [1, 2, 0]], - [[2, 0, 3], [2, 0]], [[3, 1, 0, 2], [3, 1, 0, 2]], - [[0, 1, 2], [2, 0, 1]], [[1, 0, 3], [2, 3, 0]], - [[2, 0, 1], [0, 1, 3]], [[2, 1], [0, 1, 3]]] + self.all_axes = [ + [[3, 2], [3]], + [[2, 1, 0], [2, 1]], + [[1, 2, 0], [1, 3, 2]], + [3, 0], + [[], [0, 3, 1]], + [[2, 1, 0, 3], [2, 0, 1, 3]], + [[3, 1, 2], [1, 3, 2, 0]], + [[2, 1], [0, 2]], + [[2, 0, 1, 3], [2]], + [[1, 2, 0, 3], [0, 2, 1]], + [[2, 1, 3, 0], [1, 2, 3]], + [[2, 0, 1, 3], [3, 1, 0, 2]], + [[0, 3], [0, 3, 2, 1]], + [[1, 3, 2, 0], [2, 1, 0, 3]], + [[1, 3, 2, 0], [1, 3, 2, 0]], + [[1, 0, 2], [0, 1]], + [[2, 3, 0], [3, 1]], + [[1, 3, 2, 0], [3, 0, 1, 2]], + [[3, 2, 1], [2, 0, 1]], + [[0], []], + [[2, 3, 0], [1, 2, 0]], + [[3, 0, 2, 1], [2, 1, 0, 3]], + [[3, 1, 2], [2, 3, 1]], + [[1, 0, 2, 3], []], + [[1, 2], [1, 2, 3]], + [[2, 0, 1, 3], [2, 0, 1]], + [[3, 1, 2], [1, 3, 2]], + [[3, 1, 2, 0], [1, 2, 3, 0]], + [[0, 2, 3], [0, 1, 2]], + [[3, 2, 0], [2, 0, 3, 1]], + [[2, 1, 0, 3], [3, 1, 2, 0]], + [[1, 2, 3, 0], [1, 3, 0, 2]], + [[3, 0], [2, 1]], + [[0, 1, 3, 2], [0, 2, 1, 3]], + [[1, 0], [2, 1, 3]], + [[1, 0, 3, 2], [2, 3, 0, 1]], + [[1, 2], [3]], + [[1, 2, 3, 0], [3, 2, 1, 0]], + [[0, 3, 2, 1], [2, 1, 3, 0]], + [0], + [[0, 2, 3], [3, 2, 0, 1]], + [[1, 2, 3, 0], [3, 2, 1, 0]], + [[3, 1], [3]], + [[3, 2, 0, 1], [3, 2, 0]], + [[2, 3, 0, 1], [0, 3, 2]], + [[1], [1, 3]], + [[1, 2], [2, 1, 0]], + [[3, 1, 2], [3, 1, 0]], + [[1, 3], [3, 1, 2]], + [[2, 0, 1, 3], [3, 1, 0, 2]], + [[1, 3, 0], [1, 3]], + [[2, 3, 1], [1, 0, 2]], + [[1, 2, 0, 3], [0, 2, 1, 3]], + [[2], [0, 1, 3]], + [[1], [1, 2]], + [[1, 0, 2, 3], [3, 0, 1, 2]], + [[0, 1, 3, 2], [1, 3, 0, 2]], + [[3, 0, 2, 1], [0, 2, 3]], + [[1, 2, 0], [1, 2, 3]], + [[1, 0, 3], [2, 3, 0]], + [[2, 3, 0], [3, 1, 0]], + [[1, 3], [1, 0]], + [[2, 1, 0, 3], [2, 0, 3, 1]], + [[3, 2, 0], [2, 1, 0]], + [[0, 1, 3], [0, 3, 1]], + [[3, 1, 0], [3, 2, 1]], + [[3, 2], [3, 1]], + [[3], [2, 1, 0]], + [[1, 2, 3, 0], []], + [[1, 3, 2, 0], [3, 1, 2]], + [[1], [0, 2]], + [[3, 2, 0], [3, 2, 0]], + [[3], []], + [[1, 0, 3], [2, 1]], + [[3, 1, 0, 2], [2, 3, 1, 0]], + [[0, 1], [0, 3, 2]], + [[0, 2, 3], [0, 2, 1]], + [[1, 3, 0], [3, 0, 2]], + [[3, 1, 2], [1, 2, 3]], + [[3, 1, 2], [3, 1, 0]], + [[0, 3, 1, 2], [3, 2, 1, 0]], + [[0, 3], [3, 2, 1]], + [[2, 3], [1, 3, 0]], + [[0, 3, 2], [2, 0, 3, 1]], + [[2, 3], [1, 3]], + [[3, 1, 2, 0], [2, 3, 1, 0]], + [[1, 0, 3, 2], [3, 0, 1, 2]], + [[3, 2, 1, 0], [0, 1, 3, 2]], + [[3, 1, 2], [3]], + [[0, 1, 3, 2], [2, 3, 0, 1]], + [[1, 2, 3, 0], [1, 3, 0, 2]], + [3, 1, 2], + [[3, 1, 2], [0, 3, 2]], + [[2, 3, 0], [1, 2, 0]], + [[2, 0, 3], [2, 0]], + [[3, 1, 0, 2], [3, 1, 0, 2]], + [[0, 1, 2], [2, 0, 1]], + [[1, 0, 3], [2, 3, 0]], + [[2, 0, 1], [0, 1, 3]], + [[2, 1], [0, 1, 3]], + ] def test_dygraph(self): paddle.disable_static() @@ -159,88 +206,89 @@ class TestTensordotAPI(unittest.TestCase): paddle.enable_static() for axes in self.all_axes: for place in self.places: - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): - x = paddle.static.data(name='x', - shape=self.x_shape, - dtype=self.dtype) - y = paddle.static.data(name='y', - shape=self.y_shape, - dtype=self.dtype) + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + x = paddle.static.data( + name='x', shape=self.x_shape, dtype=self.dtype + ) + y = paddle.static.data( + name='y', shape=self.y_shape, dtype=self.dtype + ) z = paddle.tensordot(x, y, axes) exe = paddle.static.Executor(place) - paddle_res = exe.run(feed={ - 'x': self.x, - 'y': self.y - }, - fetch_list=[z]) + paddle_res = exe.run( + feed={'x': self.x, 'y': self.y}, fetch_list=[z] + ) np_res = tensordot_np(self.x, self.y, axes) np.testing.assert_allclose(paddle_res[0], np_res, rtol=1e-6) class TestTensordotAPIFloat64(TestTensordotAPI): - def set_dtype(self): self.dtype = np.float64 class TestTensordotAPIBroadcastCase1(TestTensordotAPIFloat64): - def set_input_shape(self): self.x_shape = [1, 1, 1, 5] self.y_shape = [1, 5, 1, 1] class TestTensordotAPIBroadcastCase2(TestTensordotAPIFloat64): - def set_input_shape(self): self.x_shape = [1, 5, 5, 5] self.y_shape = [1, 1, 1, 5] class TestTensordotAPIBroadcastCase3(TestTensordotAPIFloat64): - def set_input_shape(self): self.x_shape = [5, 5, 5, 1] self.y_shape = [5, 5, 1, 5] class TestTensordotAPIBroadcastCase4(TestTensordotAPIFloat64): - def set_input_shape(self): self.x_shape = [5, 5, 5, 1] self.y_shape = [1, 1, 1, 1] class TestTensordotAPIBroadcastCase5(TestTensordotAPIFloat64): - def set_input_shape(self): self.x_shape = [1, 1, 5, 5] self.y_shape = [5, 5, 1, 5] class TestTensordotAPIAxesType(TestTensordotAPI): - def set_input_shape(self): self.x_shape = [3, 4, 4] self.y_shape = [4, 4, 5] def set_test_axes(self): self.all_axes = [ - 0, 1, 2, (1, ), [1], ((1, ), ), ([1], ), ((2, 1), (0, )), - ((1, 2), (0, 1)), ([1, 2], [0, 1]), ([1, 2], [0, 1]), - [[1, 2], [0, 1]] + 0, + 1, + 2, + (1,), + [1], + ((1,),), + ([1],), + ((2, 1), (0,)), + ((1, 2), (0, 1)), + ([1, 2], [0, 1]), + ([1, 2], [0, 1]), + [[1, 2], [0, 1]], ] def test_tensor_axes(self): # The 'axes' with type 'Tensor' in tensordot is not available in static mode paddle.disable_static() tensor_axes = [ - paddle.to_tensor([1]), (paddle.to_tensor([1])), + paddle.to_tensor([1]), + (paddle.to_tensor([1])), (paddle.to_tensor([1, 2]), paddle.to_tensor([0, 1])), - [paddle.to_tensor([1, 2]), - paddle.to_tensor([0, 1])], - paddle.to_tensor([[1, 2], [0, 1]]) + [paddle.to_tensor([1, 2]), paddle.to_tensor([0, 1])], + paddle.to_tensor([[1, 2], [0, 1]]), ] for place in self.places: @@ -252,8 +300,15 @@ class TestTensordotAPIAxesType(TestTensordotAPI): np.testing.assert_allclose(paddle_res, np_res, rtol=1e-6) def test_error(self): - self.all_axes = [[[[0], [1]]], 0.1, -1, 100, [[1, 2], [0, 0]], - [[1, 2], [0, -1]], [0, 1, 2, 3]] + self.all_axes = [ + [[[0], [1]]], + 0.1, + -1, + 100, + [[1, 2], [0, 0]], + [[1, 2], [0, -1]], + [0, 1, 2, 3], + ] paddle.disable_static() x = paddle.to_tensor(self.x) y = paddle.to_tensor(self.y) @@ -263,7 +318,6 @@ class TestTensordotAPIAxesType(TestTensordotAPI): class TestTensordotAPIAxesTypeFloat64(TestTensordotAPIAxesType): - def set_dtype(self): self.dtype = np.float64 diff --git a/python/paddle/fluid/tests/unittests/test_tf32_cublas.py b/python/paddle/fluid/tests/unittests/test_tf32_cublas.py index 91dbb9938bb5bb4cea701646190c9280051218e9..ce97a7ad173dbddc610e88513b81da3a4a29a519 100644 --- a/python/paddle/fluid/tests/unittests/test_tf32_cublas.py +++ b/python/paddle/fluid/tests/unittests/test_tf32_cublas.py @@ -20,7 +20,6 @@ import paddle.fluid.core as core class TestTF32Switch(unittest.TestCase): - def test_on_off(self): if core.is_compiled_with_cuda(): place = fluid.CUDAPlace(0) @@ -36,7 +35,6 @@ class TestTF32Switch(unittest.TestCase): class TestTF32OnMatmul(unittest.TestCase): - def test_dygraph_without_out(self): if core.is_compiled_with_cuda(): place = fluid.CUDAPlace(0) diff --git a/python/paddle/fluid/tests/unittests/test_tf32_cudnn.py b/python/paddle/fluid/tests/unittests/test_tf32_cudnn.py index e50bc28133332ba2741a72474c3723af4f68903d..cb57c93b71cb52fb5d92d94e359c626edb0a3019 100644 --- a/python/paddle/fluid/tests/unittests/test_tf32_cudnn.py +++ b/python/paddle/fluid/tests/unittests/test_tf32_cudnn.py @@ -17,7 +17,6 @@ import paddle.fluid.core as core class TestTF32Switch(unittest.TestCase): - def test_on_off(self): if core.is_compiled_with_cuda(): self.assertTrue(core.get_cudnn_switch()) # default diff --git a/python/paddle/fluid/tests/unittests/test_tile_op.py b/python/paddle/fluid/tests/unittests/test_tile_op.py index 362464a1ba3673d67d13fb576e0b02ec5d1ed3f0..b6ca4b5711ac50f22773ac957986e32dd6b3ffcf 100644 --- a/python/paddle/fluid/tests/unittests/test_tile_op.py +++ b/python/paddle/fluid/tests/unittests/test_tile_op.py @@ -23,9 +23,8 @@ from decorator_helper import prog_scope import paddle.fluid.layers as layers -#Situation 1: repeat_times is a list (without tensor) +# Situation 1: repeat_times is a list (without tensor) class TestTileOpRank1(OpTest): - def setUp(self): self.op_type = "tile" self.init_data() @@ -47,21 +46,18 @@ class TestTileOpRank1(OpTest): class TestTileOpRank_ZeroDim1(TestTileOpRank1): - def init_data(self): self.ori_shape = [] self.repeat_times = [] class TestTileOpRank_ZeroDim2(TestTileOpRank1): - def init_data(self): self.ori_shape = [] self.repeat_times = [2] class TestTileOpRank_ZeroDim3(TestTileOpRank1): - def init_data(self): self.ori_shape = [] self.repeat_times = [2, 3] @@ -69,42 +65,36 @@ class TestTileOpRank_ZeroDim3(TestTileOpRank1): # with dimension expanding class TestTileOpRank2Expanding(TestTileOpRank1): - def init_data(self): self.ori_shape = [120] self.repeat_times = [2, 2] class TestTileOpRank2(TestTileOpRank1): - def init_data(self): self.ori_shape = [12, 14] self.repeat_times = [2, 3] class TestTileOpRank3_Corner(TestTileOpRank1): - def init_data(self): self.ori_shape = (2, 10, 5) self.repeat_times = (1, 1, 1) class TestTileOpRank3_Corner2(TestTileOpRank1): - def init_data(self): self.ori_shape = (2, 10, 5) self.repeat_times = (2, 2) class TestTileOpRank3(TestTileOpRank1): - def init_data(self): self.ori_shape = (2, 4, 15) self.repeat_times = (2, 1, 4) class TestTileOpRank4(TestTileOpRank1): - def init_data(self): self.ori_shape = (2, 4, 5, 7) self.repeat_times = (3, 2, 1, 2) @@ -112,14 +102,14 @@ class TestTileOpRank4(TestTileOpRank1): # Situation 2: repeat_times is a list (with tensor) class TestTileOpRank1_tensor_attr(OpTest): - def setUp(self): self.op_type = "tile" self.init_data() repeat_times_tensor = [] for index, ele in enumerate(self.repeat_times): - repeat_times_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + repeat_times_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = { 'X': np.random.random(self.ori_shape).astype("float64"), @@ -142,7 +132,6 @@ class TestTileOpRank1_tensor_attr(OpTest): class TestTileOpRank2_Corner_tensor_attr(TestTileOpRank1_tensor_attr): - def init_data(self): self.ori_shape = [12, 14] self.repeat_times = [1, 1] @@ -150,7 +139,6 @@ class TestTileOpRank2_Corner_tensor_attr(TestTileOpRank1_tensor_attr): class TestTileOpRank2_attr_tensor(TestTileOpRank1_tensor_attr): - def init_data(self): self.ori_shape = [12, 14] self.repeat_times = [2, 3] @@ -159,7 +147,6 @@ class TestTileOpRank2_attr_tensor(TestTileOpRank1_tensor_attr): # Situation 3: repeat_times is a tensor class TestTileOpRank1_tensor(OpTest): - def setUp(self): self.op_type = "tile" self.init_data() @@ -184,7 +171,6 @@ class TestTileOpRank1_tensor(OpTest): class TestTileOpRank2_tensor(TestTileOpRank1_tensor): - def init_data(self): self.ori_shape = [12, 14] self.repeat_times = [2, 3] @@ -192,7 +178,6 @@ class TestTileOpRank2_tensor(TestTileOpRank1_tensor): # Situation 4: input x is Integer class TestTileOpInteger(OpTest): - def setUp(self): self.op_type = "tile" self.inputs = { @@ -208,7 +193,6 @@ class TestTileOpInteger(OpTest): # Situation 5: input x is Bool class TestTileOpBoolean(OpTest): - def setUp(self): self.op_type = "tile" self.inputs = {'X': np.random.randint(2, size=(2, 4, 5)).astype("bool")} @@ -222,7 +206,6 @@ class TestTileOpBoolean(OpTest): # Situation 56: input x is Integer class TestTileOpInt64_t(OpTest): - def setUp(self): self.op_type = "tile" self.inputs = { @@ -237,11 +220,11 @@ class TestTileOpInt64_t(OpTest): class TestTileError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): - x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace() + ) repeat_times = [2, 2] self.assertRaises(TypeError, paddle.tile, x1, repeat_times) x2 = fluid.layers.data(name='x2', shape=[4], dtype="uint8") @@ -252,7 +235,6 @@ class TestTileError(unittest.TestCase): class TestTileAPIStatic(unittest.TestCase): - def test_api(self): with program_guard(Program(), Program()): repeat_times = [2, 2] @@ -264,7 +246,6 @@ class TestTileAPIStatic(unittest.TestCase): # Test python API class TestTileAPI(unittest.TestCase): - def test_api(self): with fluid.dygraph.guard(): np_x = np.random.random([12, 14]).astype("float32") @@ -286,7 +267,6 @@ class TestTileAPI(unittest.TestCase): class TestTileDoubleGradCheck(unittest.TestCase): - def tile_wrapper(self, x): return paddle.tile(x[0], [2, 1]) @@ -301,17 +281,13 @@ class TestTileDoubleGradCheck(unittest.TestCase): out = paddle.tile(data, [2, 1]) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) - gradient_checker.double_grad_check([data], - out, - x_init=[data_arr], - place=place, - eps=eps) + gradient_checker.double_grad_check( + [data], out, x_init=[data_arr], place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.double_grad_check_for_dygraph(self.tile_wrapper, - [data], - out, - x_init=[data_arr], - place=place) + gradient_checker.double_grad_check_for_dygraph( + self.tile_wrapper, [data], out, x_init=[data_arr], place=place + ) def test_grad(self): paddle.enable_static() @@ -323,7 +299,6 @@ class TestTileDoubleGradCheck(unittest.TestCase): class TestTileTripleGradCheck(unittest.TestCase): - def tile_wrapper(self, x): return paddle.tile(x[0], [2, 1]) @@ -338,17 +313,13 @@ class TestTileTripleGradCheck(unittest.TestCase): out = paddle.tile(data, [2, 1]) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) - gradient_checker.triple_grad_check([data], - out, - x_init=[data_arr], - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [data], out, x_init=[data_arr], place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.triple_grad_check_for_dygraph(self.tile_wrapper, - [data], - out, - x_init=[data_arr], - place=place) + gradient_checker.triple_grad_check_for_dygraph( + self.tile_wrapper, [data], out, x_init=[data_arr], place=place + ) def test_grad(self): paddle.enable_static() @@ -360,7 +331,6 @@ class TestTileTripleGradCheck(unittest.TestCase): class TestTileAPI_ZeroDim(unittest.TestCase): - def test_dygraph(self): paddle.disable_static() fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) diff --git a/python/paddle/fluid/tests/unittests/test_top_k_op.py b/python/paddle/fluid/tests/unittests/test_top_k_op.py index a4e54af05bd1339e420df9704cd63a37f33e666a..d722d3f622fe5ec47a2cb134e44e6e6c0479c2af 100644 --- a/python/paddle/fluid/tests/unittests/test_top_k_op.py +++ b/python/paddle/fluid/tests/unittests/test_top_k_op.py @@ -19,7 +19,6 @@ import paddle class TestTopkOp(OpTest): - def setUp(self): self.variable_k = False self.set_args() diff --git a/python/paddle/fluid/tests/unittests/test_top_k_v2_op.py b/python/paddle/fluid/tests/unittests/test_top_k_v2_op.py index c6254801b6ecad38c9583b3d20317f81bd3c13b6..60d2502a56b2aab01e5b91a0fd7334848c7fe074 100644 --- a/python/paddle/fluid/tests/unittests/test_top_k_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_top_k_v2_op.py @@ -36,7 +36,6 @@ def numpy_topk(x, k=1, axis=-1, largest=True): class TestTopkOp(OpTest): - def init_args(self): self.k = 3 self.axis = 1 @@ -50,10 +49,9 @@ class TestTopkOp(OpTest): self.init_args() self.inputs = {'X': self.input_data} self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest} - output, indices = numpy_topk(self.input_data, - axis=self.axis, - k=self.k, - largest=self.largest) + output, indices = numpy_topk( + self.input_data, axis=self.axis, k=self.k, largest=self.largest + ) self.outputs = {'Out': output, 'Indices': indices} def test_check_output(self): @@ -64,7 +62,6 @@ class TestTopkOp(OpTest): class TestTopkOp1(TestTopkOp): - def init_args(self): self.k = 3 self.axis = 0 @@ -72,7 +69,6 @@ class TestTopkOp1(TestTopkOp): class TestTopkOp2(TestTopkOp): - def init_args(self): self.k = 4 self.axis = 0 @@ -80,7 +76,6 @@ class TestTopkOp2(TestTopkOp): class TestTopkOp3(OpTest): - def init_args(self): self.k = 6 self.axis = 1 @@ -94,15 +89,13 @@ class TestTopkOp3(OpTest): self.init_args() self.inputs = {'X': self.input_data} self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest} - output, indices = numpy_topk(self.input_data, - axis=self.axis, - k=self.k, - largest=self.largest) + output, indices = numpy_topk( + self.input_data, axis=self.axis, k=self.k, largest=self.largest + ) self.outputs = {'Out': output, 'Indices': indices} class TestTopkOp4(TestTopkOp): - def init_args(self): self.k = 3 self.axis = 1 @@ -116,15 +109,13 @@ class TestTopkOp4(TestTopkOp): self.init_args() self.inputs = {'X': self.input_data} self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest} - output, indices = numpy_topk(self.input_data, - axis=self.axis, - k=self.k, - largest=self.largest) + output, indices = numpy_topk( + self.input_data, axis=self.axis, k=self.k, largest=self.largest + ) self.outputs = {'Out': output, 'Indices': indices} class TestTopkOp5(TestTopkOp): - def init_args(self): self.k = 3 self.axis = 1 @@ -138,15 +129,13 @@ class TestTopkOp5(TestTopkOp): self.init_args() self.inputs = {'X': self.input_data} self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest} - output, indices = numpy_topk(self.input_data, - axis=self.axis, - k=self.k, - largest=self.largest) + output, indices = numpy_topk( + self.input_data, axis=self.axis, k=self.k, largest=self.largest + ) self.outputs = {'Out': output, 'Indices': indices} class TestTopkOp6(OpTest): - def init_args(self): self.k = 100 self.axis = 1 @@ -160,15 +149,13 @@ class TestTopkOp6(OpTest): self.init_args() self.inputs = {'X': self.input_data} self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest} - output, indices = numpy_topk(self.input_data, - axis=self.axis, - k=self.k, - largest=self.largest) + output, indices = numpy_topk( + self.input_data, axis=self.axis, k=self.k, largest=self.largest + ) self.outputs = {'Out': output, 'Indices': indices} class TestTopKAPI(unittest.TestCase): - def setUp(self): np.random.seed(123) self.input_data = np.random.rand(6, 7, 8) @@ -181,92 +168,89 @@ class TestTopKAPI(unittest.TestCase): # test case for basic test case 1 paddle_result = paddle.topk(input_tensor, k=2) numpy_result = numpy_topk(self.input_data, k=2) - np.testing.assert_allclose(paddle_result[0].numpy(), - numpy_result[0], - rtol=1e-05) - np.testing.assert_allclose(paddle_result[1].numpy(), - numpy_result[1], - rtol=1e-05) + np.testing.assert_allclose( + paddle_result[0].numpy(), numpy_result[0], rtol=1e-05 + ) + np.testing.assert_allclose( + paddle_result[1].numpy(), numpy_result[1], rtol=1e-05 + ) # test case for basic test case 2 with axis paddle_result = paddle.topk(input_tensor, k=2, axis=1) numpy_result = numpy_topk(self.input_data, k=2, axis=1) - np.testing.assert_allclose(paddle_result[0].numpy(), - numpy_result[0], - rtol=1e-05) - np.testing.assert_allclose(paddle_result[1].numpy(), - numpy_result[1], - rtol=1e-05) + np.testing.assert_allclose( + paddle_result[0].numpy(), numpy_result[0], rtol=1e-05 + ) + np.testing.assert_allclose( + paddle_result[1].numpy(), numpy_result[1], rtol=1e-05 + ) # test case for basic test case 3 with tensor K k_tensor = paddle.to_tensor(np.array([2])) paddle_result = paddle.topk(input_tensor, k=k_tensor, axis=1) numpy_result = numpy_topk(self.input_data, k=2, axis=1) - np.testing.assert_allclose(paddle_result[0].numpy(), - numpy_result[0], - rtol=1e-05) - np.testing.assert_allclose(paddle_result[1].numpy(), - numpy_result[1], - rtol=1e-05) + np.testing.assert_allclose( + paddle_result[0].numpy(), numpy_result[0], rtol=1e-05 + ) + np.testing.assert_allclose( + paddle_result[1].numpy(), numpy_result[1], rtol=1e-05 + ) # test case for basic test case 4 with tensor largest k_tensor = paddle.to_tensor(np.array([2])) - paddle_result = paddle.topk(input_tensor, - k=2, - axis=1, - largest=False) - numpy_result = numpy_topk(self.input_data, - k=2, - axis=1, - largest=False) - np.testing.assert_allclose(paddle_result[0].numpy(), - numpy_result[0], - rtol=1e-05) - np.testing.assert_allclose(paddle_result[1].numpy(), - numpy_result[1], - rtol=1e-05) + paddle_result = paddle.topk( + input_tensor, k=2, axis=1, largest=False + ) + numpy_result = numpy_topk( + self.input_data, k=2, axis=1, largest=False + ) + np.testing.assert_allclose( + paddle_result[0].numpy(), numpy_result[0], rtol=1e-05 + ) + np.testing.assert_allclose( + paddle_result[1].numpy(), numpy_result[1], rtol=1e-05 + ) # test case for basic test case 5 with axis -1 k_tensor = paddle.to_tensor(np.array([2])) - paddle_result = paddle.topk(input_tensor, - k=2, - axis=-1, - largest=False) - numpy_result = numpy_topk(self.input_data, - k=2, - axis=-1, - largest=False) - np.testing.assert_allclose(paddle_result[0].numpy(), - numpy_result[0], - rtol=1e-05) - np.testing.assert_allclose(paddle_result[1].numpy(), - numpy_result[1], - rtol=1e-05) + paddle_result = paddle.topk( + input_tensor, k=2, axis=-1, largest=False + ) + numpy_result = numpy_topk( + self.input_data, k=2, axis=-1, largest=False + ) + np.testing.assert_allclose( + paddle_result[0].numpy(), numpy_result[0], rtol=1e-05 + ) + np.testing.assert_allclose( + paddle_result[1].numpy(), numpy_result[1], rtol=1e-05 + ) # test case for basic test case 6 for the partial sort paddle_result = paddle.topk(large_input_tensor, k=1, axis=-1) numpy_result = numpy_topk(self.large_input_data, k=1, axis=-1) - np.testing.assert_allclose(paddle_result[0].numpy(), - numpy_result[0], - rtol=1e-05) - np.testing.assert_allclose(paddle_result[1].numpy(), - numpy_result[1], - rtol=1e-05) + np.testing.assert_allclose( + paddle_result[0].numpy(), numpy_result[0], rtol=1e-05 + ) + np.testing.assert_allclose( + paddle_result[1].numpy(), numpy_result[1], rtol=1e-05 + ) # test case for basic test case 7 for the unsorted paddle_result = paddle.topk(input_tensor, k=2, axis=1, sorted=False) - sort_paddle = numpy_topk(np.array(paddle_result[0].numpy()), - axis=1, - k=2) + sort_paddle = numpy_topk( + np.array(paddle_result[0].numpy()), axis=1, k=2 + ) numpy_result = numpy_topk(self.input_data, k=2, axis=1) - np.testing.assert_allclose(sort_paddle[0], - numpy_result[0], - rtol=1e-05) + np.testing.assert_allclose( + sort_paddle[0], numpy_result[0], rtol=1e-05 + ) def run_static(self, place): paddle.enable_static() - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): - input_tensor = paddle.static.data(name="x", - shape=[6, 7, 8], - dtype="float64") - large_input_tensor = paddle.static.data(name="large_x", - shape=[2, 1030], - dtype="float64") + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + input_tensor = paddle.static.data( + name="x", shape=[6, 7, 8], dtype="float64" + ) + large_input_tensor = paddle.static.data( + name="large_x", shape=[2, 1030], dtype="float64" + ) k_tensor = paddle.static.data(name="k", shape=[1], dtype="int32") result1 = paddle.topk(input_tensor, k=2) result2 = paddle.topk(input_tensor, k=2, axis=-1) @@ -280,71 +264,80 @@ class TestTopKAPI(unittest.TestCase): exe = paddle.static.Executor(place) input_data = np.random.rand(10, 20).astype("float64") large_input_data = np.random.rand(2, 100).astype("float64") - paddle_result = exe.run(feed={ - "x": self.input_data, - "large_x": self.large_input_data, - "k": np.array([2]).astype("int32") - }, - fetch_list=[ - result1[0], result1[1], result2[0], - result2[1], result3[0], result3[1], - result4[0], result4[1], result5[0], - result5[1], result6[0], result6[1], - result7[0], result7[1] - ]) + paddle_result = exe.run( + feed={ + "x": self.input_data, + "large_x": self.large_input_data, + "k": np.array([2]).astype("int32"), + }, + fetch_list=[ + result1[0], + result1[1], + result2[0], + result2[1], + result3[0], + result3[1], + result4[0], + result4[1], + result5[0], + result5[1], + result6[0], + result6[1], + result7[0], + result7[1], + ], + ) numpy_result = numpy_topk(self.input_data, k=2) - np.testing.assert_allclose(paddle_result[0], - numpy_result[0], - rtol=1e-05) - np.testing.assert_allclose(paddle_result[1], - numpy_result[1], - rtol=1e-05) + np.testing.assert_allclose( + paddle_result[0], numpy_result[0], rtol=1e-05 + ) + np.testing.assert_allclose( + paddle_result[1], numpy_result[1], rtol=1e-05 + ) numpy_result = numpy_topk(self.input_data, k=2, axis=-1) - np.testing.assert_allclose(paddle_result[2], - numpy_result[0], - rtol=1e-05) - np.testing.assert_allclose(paddle_result[3], - numpy_result[1], - rtol=1e-05) + np.testing.assert_allclose( + paddle_result[2], numpy_result[0], rtol=1e-05 + ) + np.testing.assert_allclose( + paddle_result[3], numpy_result[1], rtol=1e-05 + ) numpy_result = numpy_topk(self.input_data, k=2, axis=1) - np.testing.assert_allclose(paddle_result[4], - numpy_result[0], - rtol=1e-05) - np.testing.assert_allclose(paddle_result[5], - numpy_result[1], - rtol=1e-05) - numpy_result = numpy_topk(self.input_data, - k=2, - axis=1, - largest=False) - np.testing.assert_allclose(paddle_result[6], - numpy_result[0], - rtol=1e-05) - np.testing.assert_allclose(paddle_result[7], - numpy_result[1], - rtol=1e-05) - numpy_result = numpy_topk(self.input_data, - k=2, - axis=-1, - largest=False) - np.testing.assert_allclose(paddle_result[8], - numpy_result[0], - rtol=1e-05) - np.testing.assert_allclose(paddle_result[9], - numpy_result[1], - rtol=1e-05) + np.testing.assert_allclose( + paddle_result[4], numpy_result[0], rtol=1e-05 + ) + np.testing.assert_allclose( + paddle_result[5], numpy_result[1], rtol=1e-05 + ) + numpy_result = numpy_topk( + self.input_data, k=2, axis=1, largest=False + ) + np.testing.assert_allclose( + paddle_result[6], numpy_result[0], rtol=1e-05 + ) + np.testing.assert_allclose( + paddle_result[7], numpy_result[1], rtol=1e-05 + ) + numpy_result = numpy_topk( + self.input_data, k=2, axis=-1, largest=False + ) + np.testing.assert_allclose( + paddle_result[8], numpy_result[0], rtol=1e-05 + ) + np.testing.assert_allclose( + paddle_result[9], numpy_result[1], rtol=1e-05 + ) numpy_result = numpy_topk(self.large_input_data, k=1, axis=-1) - np.testing.assert_allclose(paddle_result[10], - numpy_result[0], - rtol=1e-05) - np.testing.assert_allclose(paddle_result[11], - numpy_result[1], - rtol=1e-05) + np.testing.assert_allclose( + paddle_result[10], numpy_result[0], rtol=1e-05 + ) + np.testing.assert_allclose( + paddle_result[11], numpy_result[1], rtol=1e-05 + ) sort_paddle = numpy_topk(paddle_result[12], axis=1, k=2) numpy_result = numpy_topk(self.input_data, k=2, axis=1) - np.testing.assert_allclose(sort_paddle[0], - numpy_result[0], - rtol=1e-05) + np.testing.assert_allclose( + sort_paddle[0], numpy_result[0], rtol=1e-05 + ) def test_cases(self): places = [core.CPUPlace()] diff --git a/python/paddle/fluid/tests/unittests/test_trace_op.py b/python/paddle/fluid/tests/unittests/test_trace_op.py index 83aca5654c20cdbb318e0087216ca97a62c85616..8bceee2fdfdf66fadb764a3c74c7968db86e45cd 100644 --- a/python/paddle/fluid/tests/unittests/test_trace_op.py +++ b/python/paddle/fluid/tests/unittests/test_trace_op.py @@ -22,7 +22,6 @@ import paddle class TestTraceOp(OpTest): - def setUp(self): self.op_type = "trace" self.python_api = paddle.trace @@ -43,31 +42,32 @@ class TestTraceOp(OpTest): class TestTraceOpCase1(TestTraceOp): - def init_config(self): self.case = np.random.randn(2, 20, 2, 3).astype('float32') self.inputs = {'Input': self.case} self.attrs = {'offset': 1, 'axis1': 0, 'axis2': 2} - self.target = np.trace(self.inputs['Input'], - offset=self.attrs['offset'], - axis1=self.attrs['axis1'], - axis2=self.attrs['axis2']) + self.target = np.trace( + self.inputs['Input'], + offset=self.attrs['offset'], + axis1=self.attrs['axis1'], + axis2=self.attrs['axis2'], + ) class TestTraceOpCase2(TestTraceOp): - def init_config(self): self.case = np.random.randn(2, 20, 2, 3).astype('float32') self.inputs = {'Input': self.case} self.attrs = {'offset': -5, 'axis1': 1, 'axis2': -1} - self.target = np.trace(self.inputs['Input'], - offset=self.attrs['offset'], - axis1=self.attrs['axis1'], - axis2=self.attrs['axis2']) + self.target = np.trace( + self.inputs['Input'], + offset=self.attrs['offset'], + axis1=self.attrs['axis1'], + axis2=self.attrs['axis2'], + ) class TestTraceAPICase(unittest.TestCase): - def test_case1(self): case = np.random.randn(2, 20, 2, 3).astype('float32') data1 = fluid.data(name='data1', shape=[2, 20, 2, 3], dtype='float32') @@ -76,10 +76,12 @@ class TestTraceAPICase(unittest.TestCase): place = core.CPUPlace() exe = fluid.Executor(place) - results = exe.run(fluid.default_main_program(), - feed={"data1": case}, - fetch_list=[out1, out2], - return_numpy=True) + results = exe.run( + fluid.default_main_program(), + feed={"data1": case}, + fetch_list=[out1, out2], + return_numpy=True, + ) target1 = np.trace(case) target2 = np.trace(case, offset=-5, axis1=1, axis2=-1) np.testing.assert_allclose(results[0], target1, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_traced_layer_err_msg.py b/python/paddle/fluid/tests/unittests/test_traced_layer_err_msg.py index 59b652b1d3d67406b5ffc4c8c456c183e9cf4867..b160debe41c9a9837f67b33fb447fb457de6c076 100644 --- a/python/paddle/fluid/tests/unittests/test_traced_layer_err_msg.py +++ b/python/paddle/fluid/tests/unittests/test_traced_layer_err_msg.py @@ -24,12 +24,12 @@ import paddle.nn as nn class SimpleFCLayer(nn.Layer): - def __init__(self, feature_size, batch_size, fc_size): super(SimpleFCLayer, self).__init__() self._linear = nn.Linear(feature_size, fc_size) self._offset = paddle.to_tensor( - np.random.random((batch_size, fc_size)).astype('float32')) + np.random.random((batch_size, fc_size)).astype('float32') + ) def forward(self, x): fc = self._linear(x) @@ -37,7 +37,6 @@ class SimpleFCLayer(nn.Layer): class LinearNetWithNone(nn.Layer): - def __init__(self, feature_size, fc_size): super(LinearNetWithNone, self).__init__() self._linear = nn.Linear(feature_size, fc_size) @@ -49,7 +48,6 @@ class LinearNetWithNone(nn.Layer): class TestTracedLayerErrMsg(unittest.TestCase): - def setUp(self): self.batch_size = 4 self.feature_size = 3 @@ -66,116 +64,163 @@ class TestTracedLayerErrMsg(unittest.TestCase): return with fluid.dygraph.guard(): in_x = fluid.dygraph.to_variable( - np.random.random( - (self.batch_size, self.feature_size)).astype('float32')) + np.random.random((self.batch_size, self.feature_size)).astype( + 'float32' + ) + ) with self.assertRaises(AssertionError) as e: dygraph_out, traced_layer = fluid.dygraph.TracedLayer.trace( - None, [in_x]) + None, [in_x] + ) self.assertEqual( - "The type of 'layer' in fluid.dygraph.jit.TracedLayer.trace must be fluid.dygraph.Layer, but received <{} 'NoneType'>." - .format(self.type_str), str(e.exception)) + "The type of 'layer' in fluid.dygraph.jit.TracedLayer.trace must be fluid.dygraph.Layer, but received <{} 'NoneType'>.".format( + self.type_str + ), + str(e.exception), + ) with self.assertRaises(TypeError) as e: dygraph_out, traced_layer = fluid.dygraph.TracedLayer.trace( - self.layer, 3) + self.layer, 3 + ) self.assertEqual( - "The type of 'each element of inputs' in fluid.dygraph.jit.TracedLayer.trace must be fluid.Variable, but received <{} 'int'>." - .format(self.type_str), str(e.exception)) + "The type of 'each element of inputs' in fluid.dygraph.jit.TracedLayer.trace must be fluid.Variable, but received <{} 'int'>.".format( + self.type_str + ), + str(e.exception), + ) with self.assertRaises(TypeError) as e: dygraph_out, traced_layer = fluid.dygraph.TracedLayer.trace( - self.layer, [True, 1]) + self.layer, [True, 1] + ) self.assertEqual( - "The type of 'each element of inputs' in fluid.dygraph.jit.TracedLayer.trace must be fluid.Variable, but received <{} 'bool'>." - .format(self.type_str), str(e.exception)) + "The type of 'each element of inputs' in fluid.dygraph.jit.TracedLayer.trace must be fluid.Variable, but received <{} 'bool'>.".format( + self.type_str + ), + str(e.exception), + ) dygraph_out, traced_layer = fluid.dygraph.TracedLayer.trace( - self.layer, [in_x]) + self.layer, [in_x] + ) def test_set_strategy_err(self): if fluid.framework.in_dygraph_mode(): return with fluid.dygraph.guard(): in_x = fluid.dygraph.to_variable( - np.random.random( - (self.batch_size, self.feature_size)).astype('float32')) + np.random.random((self.batch_size, self.feature_size)).astype( + 'float32' + ) + ) dygraph_out, traced_layer = fluid.dygraph.TracedLayer.trace( - self.layer, [in_x]) + self.layer, [in_x] + ) with self.assertRaises(AssertionError) as e: traced_layer.set_strategy(1, fluid.ExecutionStrategy()) self.assertEqual( - "The type of 'build_strategy' in fluid.dygraph.jit.TracedLayer.set_strategy must be fluid.BuildStrategy, but received <{} 'int'>." - .format(self.type_str), str(e.exception)) + "The type of 'build_strategy' in fluid.dygraph.jit.TracedLayer.set_strategy must be fluid.BuildStrategy, but received <{} 'int'>.".format( + self.type_str + ), + str(e.exception), + ) with self.assertRaises(AssertionError) as e: traced_layer.set_strategy(fluid.BuildStrategy(), False) self.assertEqual( - "The type of 'exec_strategy' in fluid.dygraph.jit.TracedLayer.set_strategy must be fluid.ExecutionStrategy, but received <{} 'bool'>." - .format(self.type_str), str(e.exception)) + "The type of 'exec_strategy' in fluid.dygraph.jit.TracedLayer.set_strategy must be fluid.ExecutionStrategy, but received <{} 'bool'>.".format( + self.type_str + ), + str(e.exception), + ) traced_layer.set_strategy(build_strategy=fluid.BuildStrategy()) traced_layer.set_strategy(exec_strategy=fluid.ExecutionStrategy()) - traced_layer.set_strategy(fluid.BuildStrategy(), - fluid.ExecutionStrategy()) + traced_layer.set_strategy( + fluid.BuildStrategy(), fluid.ExecutionStrategy() + ) def test_save_inference_model_err(self): if fluid.framework.in_dygraph_mode(): return with fluid.dygraph.guard(): in_x = fluid.dygraph.to_variable( - np.random.random( - (self.batch_size, self.feature_size)).astype('float32')) + np.random.random((self.batch_size, self.feature_size)).astype( + 'float32' + ) + ) dygraph_out, traced_layer = fluid.dygraph.TracedLayer.trace( - self.layer, [in_x]) + self.layer, [in_x] + ) path = os.path.join(self.temp_dir.name, './traced_layer_err_msg') with self.assertRaises(TypeError) as e: traced_layer.save_inference_model([0]) self.assertEqual( - "The type of 'path' in fluid.dygraph.jit.TracedLayer.save_inference_model must be <{} 'str'>, but received <{} 'list'>. " - .format(self.type_str, self.type_str), str(e.exception)) + "The type of 'path' in fluid.dygraph.jit.TracedLayer.save_inference_model must be <{} 'str'>, but received <{} 'list'>. ".format( + self.type_str, self.type_str + ), + str(e.exception), + ) with self.assertRaises(TypeError) as e: traced_layer.save_inference_model(path, [0], [None]) self.assertEqual( - "The type of 'each element of fetch' in fluid.dygraph.jit.TracedLayer.save_inference_model must be <{} 'int'>, but received <{} 'NoneType'>. " - .format(self.type_str, self.type_str), str(e.exception)) + "The type of 'each element of fetch' in fluid.dygraph.jit.TracedLayer.save_inference_model must be <{} 'int'>, but received <{} 'NoneType'>. ".format( + self.type_str, self.type_str + ), + str(e.exception), + ) with self.assertRaises(TypeError) as e: traced_layer.save_inference_model(path, [0], False) self.assertEqual( - "The type of 'fetch' in fluid.dygraph.jit.TracedLayer.save_inference_model must be (<{} 'NoneType'>, <{} 'list'>), but received <{} 'bool'>. " - .format(self.type_str, self.type_str, self.type_str), - str(e.exception)) + "The type of 'fetch' in fluid.dygraph.jit.TracedLayer.save_inference_model must be (<{} 'NoneType'>, <{} 'list'>), but received <{} 'bool'>. ".format( + self.type_str, self.type_str, self.type_str + ), + str(e.exception), + ) with self.assertRaises(TypeError) as e: traced_layer.save_inference_model(path, [None], [0]) self.assertEqual( - "The type of 'each element of feed' in fluid.dygraph.jit.TracedLayer.save_inference_model must be <{} 'int'>, but received <{} 'NoneType'>. " - .format(self.type_str, self.type_str), str(e.exception)) + "The type of 'each element of feed' in fluid.dygraph.jit.TracedLayer.save_inference_model must be <{} 'int'>, but received <{} 'NoneType'>. ".format( + self.type_str, self.type_str + ), + str(e.exception), + ) with self.assertRaises(TypeError) as e: traced_layer.save_inference_model(path, True, [0]) self.assertEqual( - "The type of 'feed' in fluid.dygraph.jit.TracedLayer.save_inference_model must be (<{} 'NoneType'>, <{} 'list'>), but received <{} 'bool'>. " - .format(self.type_str, self.type_str, self.type_str), - str(e.exception)) + "The type of 'feed' in fluid.dygraph.jit.TracedLayer.save_inference_model must be (<{} 'NoneType'>, <{} 'list'>), but received <{} 'bool'>. ".format( + self.type_str, self.type_str, self.type_str + ), + str(e.exception), + ) with self.assertRaises(ValueError) as e: traced_layer.save_inference_model("") self.assertEqual( "The input path MUST be format of dirname/file_prefix [dirname\\file_prefix in Windows system], " - "but received file_prefix is empty string.", str(e.exception)) + "but received file_prefix is empty string.", + str(e.exception), + ) traced_layer.save_inference_model(path) def _train_simple_net(self): layer = None with fluid.dygraph.guard(): - layer = SimpleFCLayer(self.feature_size, self.batch_size, - self.fc_size) - optimizer = fluid.optimizer.SGD(learning_rate=1e-3, - parameter_list=layer.parameters()) + layer = SimpleFCLayer( + self.feature_size, self.batch_size, self.fc_size + ) + optimizer = fluid.optimizer.SGD( + learning_rate=1e-3, parameter_list=layer.parameters() + ) for i in range(5): in_x = fluid.dygraph.to_variable( np.random.random( - (self.batch_size, self.feature_size)).astype('float32')) + (self.batch_size, self.feature_size) + ).astype('float32') + ) dygraph_out = layer(in_x) loss = fluid.layers.reduce_mean(dygraph_out) loss.backward() @@ -184,7 +229,6 @@ class TestTracedLayerErrMsg(unittest.TestCase): class TestOutVarWithNoneErrMsg(unittest.TestCase): - def test_linear_net_with_none(self): if fluid.framework.in_dygraph_mode(): return @@ -192,7 +236,8 @@ class TestOutVarWithNoneErrMsg(unittest.TestCase): in_x = paddle.to_tensor(np.random.random((4, 100)).astype('float32')) with self.assertRaises(TypeError): dygraph_out, traced_layer = fluid.dygraph.TracedLayer.trace( - model, [in_x]) + model, [in_x] + ) class TestTracedLayerSaveInferenceModel(unittest.TestCase): @@ -202,6 +247,7 @@ class TestTracedLayerSaveInferenceModel(unittest.TestCase): self.temp_dir = tempfile.TemporaryDirectory() self.save_path = os.path.join(self.temp_dir.name, "./nonexist_dir/fc") import shutil + if os.path.exists(os.path.dirname(self.save_path)): shutil.rmtree(os.path.dirname(self.save_path)) @@ -215,7 +261,8 @@ class TestTracedLayerSaveInferenceModel(unittest.TestCase): input_var = paddle.to_tensor(np.random.random([4, 3]).astype('float32')) with fluid.dygraph.guard(): dygraph_out, traced_layer = fluid.dygraph.TracedLayer.trace( - fc_layer, inputs=[input_var]) + fc_layer, inputs=[input_var] + ) self.assertFalse(os.path.exists(os.path.dirname(self.save_path))) traced_layer.save_inference_model(self.save_path) self.assertTrue(os.path.exists(os.path.dirname(self.save_path))) diff --git a/python/paddle/fluid/tests/unittests/test_trainable.py b/python/paddle/fluid/tests/unittests/test_trainable.py index d9d5121ff9b528ef3eebb9e8bb7cf46c67a6044e..bbd7ae55d30df032a4f45753874af68341916df1 100644 --- a/python/paddle/fluid/tests/unittests/test_trainable.py +++ b/python/paddle/fluid/tests/unittests/test_trainable.py @@ -22,21 +22,18 @@ from simple_nets import init_data def test_trainable(): x = fluid.layers.data(name='image', shape=[784], dtype='float32') label = fluid.layers.data(name='label', shape=[1], dtype='int64') - feature = fluid.layers.fc(input=x, - size=10, - param_attr=fluid.ParamAttr(trainable=False)) + feature = fluid.layers.fc( + input=x, size=10, param_attr=fluid.ParamAttr(trainable=False) + ) loss = fluid.layers.cross_entropy(input=feature, label=label) loss = paddle.mean(loss) return loss class TestTrainable(unittest.TestCase): - - def check_trainable(self, - model, - feed_dict, - op_count, - optimizer=fluid.optimizer.Adam()): + def check_trainable( + self, model, feed_dict, op_count, optimizer=fluid.optimizer.Adam() + ): place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -64,22 +61,17 @@ class TestTrainable(unittest.TestCase): feed_dict = {'image': img, 'label': label} # Note that, because the Weight of FC is not trainable and the x is stop_gradient, # so the 'mul_grad' should not be appended. - self.check_trainable(test_trainable, - feed_dict, - op_count={ - 'adam': 1, - 'scale': 0, - 'mul_grad': 0 - }) self.check_trainable( test_trainable, feed_dict, - op_count={ - 'adamax': 1, - 'scale': 1, - 'mul_grad': 0 - }, - optimizer=fluid.optimizer.Adamax(learning_rate=0.2)) + op_count={'adam': 1, 'scale': 0, 'mul_grad': 0}, + ) + self.check_trainable( + test_trainable, + feed_dict, + op_count={'adamax': 1, 'scale': 1, 'mul_grad': 0}, + optimizer=fluid.optimizer.Adamax(learning_rate=0.2), + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_trainer_desc.py b/python/paddle/fluid/tests/unittests/test_trainer_desc.py index 390a650fd1395e44e544504d95e03ccd06c418d7..bc584439fdfd8862f8f28cf785ca1e8bd8e463bc 100644 --- a/python/paddle/fluid/tests/unittests/test_trainer_desc.py +++ b/python/paddle/fluid/tests/unittests/test_trainer_desc.py @@ -21,7 +21,7 @@ import unittest class TestTrainerDesc(unittest.TestCase): - """ TestCases for TrainerDesc. """ + """TestCases for TrainerDesc.""" def test_config(self): """ diff --git a/python/paddle/fluid/tests/unittests/test_transfer_dtype_op.py b/python/paddle/fluid/tests/unittests/test_transfer_dtype_op.py index 9d3edc0e062143b889c6c595d5747cdaef181309..364e6ff167ca123c83decafa5f98070cd714dd4a 100644 --- a/python/paddle/fluid/tests/unittests/test_transfer_dtype_op.py +++ b/python/paddle/fluid/tests/unittests/test_transfer_dtype_op.py @@ -21,14 +21,13 @@ from op_test import OpTest, convert_uint16_to_float, convert_float_to_uint16 class TestTransferDtypeOpFp32ToFp64(OpTest): - def setUp(self): ipt = np.random.random(size=[10, 10]) self.inputs = {'X': ipt.astype('float32')} self.outputs = {'Out': ipt.astype('float64')} self.attrs = { 'out_dtype': int(core.VarDesc.VarType.FP64), - 'in_dtype': int(core.VarDesc.VarType.FP32) + 'in_dtype': int(core.VarDesc.VarType.FP32), } self.op_type = 'transfer_dtype' @@ -37,14 +36,13 @@ class TestTransferDtypeOpFp32ToFp64(OpTest): class TestTransferDtypeOpFp16ToFp32(OpTest): - def setUp(self): ipt = np.random.random(size=[10, 10]) self.inputs = {'X': ipt.astype('float16')} self.outputs = {'Out': ipt.astype('float32')} self.attrs = { 'out_dtype': int(core.VarDesc.VarType.FP32), - 'in_dtype': int(core.VarDesc.VarType.FP16) + 'in_dtype': int(core.VarDesc.VarType.FP16), } self.op_type = 'transfer_dtype' @@ -53,14 +51,13 @@ class TestTransferDtypeOpFp16ToFp32(OpTest): class TestTransferDtypeOpFp32ToFp16(OpTest): - def setUp(self): ipt = np.random.random(size=[10, 10]) self.inputs = {'X': ipt.astype('float32')} self.outputs = {'Out': ipt.astype('float16')} self.attrs = { 'out_dtype': int(core.VarDesc.VarType.FP16), - 'in_dtype': int(core.VarDesc.VarType.FP32) + 'in_dtype': int(core.VarDesc.VarType.FP32), } self.op_type = 'transfer_dtype' @@ -69,14 +66,13 @@ class TestTransferDtypeOpFp32ToFp16(OpTest): class TestTransferDtypeOpBf16ToFp32(OpTest): - def setUp(self): ipt = np.array(np.random.randint(10, size=[10, 10])).astype('uint16') self.inputs = {'X': ipt} self.outputs = {'Out': convert_uint16_to_float(ipt)} self.attrs = { 'out_dtype': int(core.VarDesc.VarType.FP32), - 'in_dtype': int(core.VarDesc.VarType.BF16) + 'in_dtype': int(core.VarDesc.VarType.BF16), } self.op_type = 'transfer_dtype' @@ -85,14 +81,13 @@ class TestTransferDtypeOpBf16ToFp32(OpTest): class TestTransferDtypeFp32ToBf16(OpTest): - def setUp(self): ipt = np.random.random(size=[10, 10]).astype('float32') self.inputs = {'X': ipt} self.outputs = {'Out': convert_float_to_uint16(ipt)} self.attrs = { 'out_dtype': int(core.VarDesc.VarType.BF16), - 'in_dtype': int(core.VarDesc.VarType.FP32) + 'in_dtype': int(core.VarDesc.VarType.FP32), } self.op_type = 'transfer_dtype' diff --git a/python/paddle/fluid/tests/unittests/test_transfer_layout_op.py b/python/paddle/fluid/tests/unittests/test_transfer_layout_op.py index a24fb767d10d314c813e3bb35a6e86693f690765..48c2897b00549031c98a61033df851685204bfb0 100644 --- a/python/paddle/fluid/tests/unittests/test_transfer_layout_op.py +++ b/python/paddle/fluid/tests/unittests/test_transfer_layout_op.py @@ -25,15 +25,11 @@ from op_test import OpTest # default kNCHW class TestTransferLayoutOpkNCHWTokNHWC(OpTest): - def setUp(self): ipt = np.random.random(size=[2, 3, 10, 10]) self.inputs = {'X': ipt.astype('float32')} self.outputs = {'Out': ipt.transpose([0, 2, 3, 1])} - self.attrs = { - 'src_layout': 0, - 'dst_layout': 1 # kNHWC - } + self.attrs = {'src_layout': 0, 'dst_layout': 1} # kNHWC self.op_type = 'transfer_layout' def test_check_output(self): @@ -45,20 +41,17 @@ def softmax_with_data_format(x, data_format, axis=-1, dtype=None, name=None): outs_cast = x outs_softmax = helper.create_variable_for_type_inference(outs_cast.dtype) - helper.append_op(type='softmax', - inputs={'X': outs_cast}, - outputs={'Out': outs_softmax}, - attrs={ - 'axis': axis, - 'use_cudnn': True, - 'data_format': data_format - }) + helper.append_op( + type='softmax', + inputs={'X': outs_cast}, + outputs={'Out': outs_softmax}, + attrs={'axis': axis, 'use_cudnn': True, 'data_format': data_format}, + ) return outs_softmax class TestTransferLayoutOpGpu(unittest.TestCase): - def test_layout_transfer(self): if not core.is_compiled_with_cuda(): return @@ -69,19 +62,24 @@ class TestTransferLayoutOpGpu(unittest.TestCase): startup_program = Program() n, c, h, w = 2, 3, 4, 5 with program_guard(main_program, startup_program): - x = paddle.static.data(shape=[n, c, h, w], - dtype='float32', - name='x') + x = paddle.static.data( + shape=[n, c, h, w], dtype='float32', name='x' + ) y = softmax_with_data_format(x, data_format='NCHW') z = softmax_with_data_format(y, data_format='NHWC') - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) exe.run(startup_program) - ret = exe.run(main_program, - feed={'x': np.full((n, c, h, w), 1, np.float32)}, - fetch_list=[z.name]) + ret = exe.run( + main_program, + feed={'x': np.full((n, c, h, w), 1, np.float32)}, + fetch_list=[z.name], + ) assert len(ret) == 1 assert ret[0].shape == (n, h, w, c) diff --git a/python/paddle/fluid/tests/unittests/test_transformer_api.py b/python/paddle/fluid/tests/unittests/test_transformer_api.py index 6b254ac3115d4e5c39941a5e865f207c987a9e35..3f2e2185f9ee26b72ab0f42e2147a46093b42455 100644 --- a/python/paddle/fluid/tests/unittests/test_transformer_api.py +++ b/python/paddle/fluid/tests/unittests/test_transformer_api.py @@ -15,7 +15,14 @@ import numpy as np import paddle import paddle.fluid as fluid -from paddle.nn.layer.transformer import MultiHeadAttention, TransformerEncoderLayer, TransformerDecoderLayer, TransformerEncoder, TransformerDecoder, Transformer +from paddle.nn.layer.transformer import ( + MultiHeadAttention, + TransformerEncoderLayer, + TransformerDecoderLayer, + TransformerEncoder, + TransformerDecoder, + Transformer, +) import unittest @@ -33,34 +40,67 @@ def generate_basic_params(mode="attn", self_attention=True): kdim, vdim = [np.random.randint(5, 20) for _ in range(2)] key_length = np.random.randint(2, 10) value_length = key_length - return batch_size, query_length, key_length, value_length, embed_dim, kdim, vdim, num_heads, attn_dropout + return ( + batch_size, + query_length, + key_length, + value_length, + embed_dim, + kdim, + vdim, + num_heads, + attn_dropout, + ) else: dropout, act_dropout = 0.0, 0.0 dim_feedforward = np.random.randint(128, 1024) sequence_length = np.random.randint(2, 10) if mode == "encoder_layer": - return batch_size, embed_dim, num_heads, dim_feedforward, dropout, attn_dropout, act_dropout, sequence_length + return ( + batch_size, + embed_dim, + num_heads, + dim_feedforward, + dropout, + attn_dropout, + act_dropout, + sequence_length, + ) elif mode == "decoder_layer": target_length = np.random.randint(2, 10) - return batch_size, embed_dim, num_heads, dim_feedforward, dropout, attn_dropout, act_dropout, sequence_length, target_length - - -def generate_query_key_value_cache(self_attention, - batch_size, - num_heads, - query_length, - embed_dim, - attn_mask_type, - key_length=None, - value_length=None, - kdim=None, - vdim=None, - cache=None): - query = np.random.rand(batch_size, query_length, - embed_dim).astype("float32") - attn_mask = np.ones((batch_size, num_heads, query_length, key_length), - dtype=attn_mask_type) + return ( + batch_size, + embed_dim, + num_heads, + dim_feedforward, + dropout, + attn_dropout, + act_dropout, + sequence_length, + target_length, + ) + + +def generate_query_key_value_cache( + self_attention, + batch_size, + num_heads, + query_length, + embed_dim, + attn_mask_type, + key_length=None, + value_length=None, + kdim=None, + vdim=None, + cache=None, +): + query = np.random.rand(batch_size, query_length, embed_dim).astype( + "float32" + ) + attn_mask = np.ones( + (batch_size, num_heads, query_length, key_length), dtype=attn_mask_type + ) if attn_mask_type == 'int64': attn_mask = np.tril(attn_mask) elif attn_mask_type == 'float64': @@ -77,18 +117,19 @@ def generate_query_key_value_cache(self_attention, cache_dict = {} if cache: if not self_attention: - cache_dict["static_k"] = np.random.rand(batch_size, num_heads, - key_length, - head_dim).astype("float32") - cache_dict["static_v"] = np.random.rand(batch_size, num_heads, - value_length, - head_dim).astype("float32") + cache_dict["static_k"] = np.random.rand( + batch_size, num_heads, key_length, head_dim + ).astype("float32") + cache_dict["static_v"] = np.random.rand( + batch_size, num_heads, value_length, head_dim + ).astype("float32") else: - cache_dict["k"] = np.random.rand(batch_size, num_heads, key_length, - head_dim).astype("float32") - cache_dict["v"] = np.random.rand(batch_size, num_heads, - value_length, - head_dim).astype("float32") + cache_dict["k"] = np.random.rand( + batch_size, num_heads, key_length, head_dim + ).astype("float32") + cache_dict["v"] = np.random.rand( + batch_size, num_heads, value_length, head_dim + ).astype("float32") else: cache_dict = None return query, key, value, attn_mask, cache_dict @@ -113,8 +154,9 @@ def softmax(x): def batch_matmul(x, y): assert x.shape[0] == y.shape[0] assert x.shape[1] == y.shape[1] - retval = np.zeros((x.shape[0], x.shape[1], x.shape[2], y.shape[3]), - dtype=np.float64) + retval = np.zeros( + (x.shape[0], x.shape[1], x.shape[2], y.shape[3]), dtype=np.float64 + ) for i in range(x.shape[0]): for j in range(x.shape[1]): retval[i, j, :, :] = np.matmul(x[i, j, :, :], y[i, j, :, :]) @@ -133,8 +175,13 @@ def scaled_dot_product_attention(q, k, v, d_key, attn_mask, multi_head_attn): weight = softmax(qkt) attn_heads = batch_matmul(weight, v) attn_heads = attn_heads.transpose((0, 2, 1, 3)) - attn_heads = attn_heads.reshape((attn_heads.shape[0], attn_heads.shape[1], - attn_heads.shape[2] * attn_heads.shape[3])) + attn_heads = attn_heads.reshape( + ( + attn_heads.shape[0], + attn_heads.shape[1], + attn_heads.shape[2] * attn_heads.shape[3], + ) + ) return attn_heads @@ -152,8 +199,16 @@ def cal_qkv(key, value, num_heads, embed_dim, multi_head_attn): return k, v -def prepare_qkv(query, key, value, num_heads, embed_dim, self_attention, - multi_head_attn, cache_dict): +def prepare_qkv( + query, + key, + value, + num_heads, + embed_dim, + self_attention, + multi_head_attn, + cache_dict, +): q_weight = multi_head_attn.q_proj.weight.numpy() q = fc(query, q_weight) q = q.reshape((q.shape[0], q.shape[1], num_heads, embed_dim // num_heads)) @@ -196,7 +251,7 @@ def layer_norm(x, normalized_shape, norm, epsilon=1e-05, act=None): x = x.reshape((batch_size * src_len, d_model)) mu = np.mean(x, axis=1, keepdims=True) sigma_squar = np.sum(np.square(x - mu), axis=1) / d_model - x1_up = (x - mu) + x1_up = x - mu x1_down_1 = sigma_squar + epsilon x1_down = np.sqrt(x1_down_1) x1_down = x1_down.reshape((x1_down.shape[0], 1)) @@ -223,9 +278,7 @@ def ffn(src, encoder_layer, ffn_fc1_act="relu"): class TestTransformer(unittest.TestCase): - def test_multi_head_attention(self): - def multihead_attention_test_helper(self_attention, cache): paddle.seed(2020) paddle.framework.random._manual_program_seed(2020) @@ -233,59 +286,111 @@ class TestTransformer(unittest.TestCase): with fluid.dygraph.guard(fluid.CPUPlace()): # generate params for multi_head_attention - batch_size, query_length, key_length, value_length, embed_dim, kdim, vdim, num_heads, attn_dropout = generate_basic_params( - "attn", self_attention) + ( + batch_size, + query_length, + key_length, + value_length, + embed_dim, + kdim, + vdim, + num_heads, + attn_dropout, + ) = generate_basic_params("attn", self_attention) for attn_mask_type in ['int64', 'float64']: - query, key, value, attn_mask, cache_dict = generate_query_key_value_cache( - self_attention, batch_size, num_heads, query_length, - embed_dim, attn_mask_type, key_length, value_length, - kdim, vdim, cache) + ( + query, + key, + value, + attn_mask, + cache_dict, + ) = generate_query_key_value_cache( + self_attention, + batch_size, + num_heads, + query_length, + embed_dim, + attn_mask_type, + key_length, + value_length, + kdim, + vdim, + cache, + ) if cache and self_attention: - attn_mask = np.concatenate((attn_mask, attn_mask), - axis=3) + attn_mask = np.concatenate( + (attn_mask, attn_mask), axis=3 + ) need_weight, param_attr, bias_attr = False, None, None # call paddle's function multi_head_attn = MultiHeadAttention( - embed_dim, num_heads, attn_dropout, kdim, vdim, - need_weight, param_attr, bias_attr) + embed_dim, + num_heads, + attn_dropout, + kdim, + vdim, + need_weight, + param_attr, + bias_attr, + ) # construct cache object cache_obj = None if cache_dict: if 'k' and 'v' in cache_dict: cache_obj = multi_head_attn.Cache( paddle.to_tensor(cache_dict['k']), - paddle.to_tensor(cache_dict['v'])) + paddle.to_tensor(cache_dict['v']), + ) elif 'static_k' and 'static_v' in cache_dict: cache_obj = multi_head_attn.StaticCache( paddle.to_tensor(cache_dict['static_k']), - paddle.to_tensor(cache_dict['static_v'])) + paddle.to_tensor(cache_dict['static_v']), + ) if attn_mask is not None: attn_output = multi_head_attn( - paddle.to_tensor(query), paddle.to_tensor(key), + paddle.to_tensor(query), + paddle.to_tensor(key), paddle.to_tensor(value), - paddle.to_tensor(attn_mask), cache_obj) + paddle.to_tensor(attn_mask), + cache_obj, + ) else: - attn_output = multi_head_attn(paddle.to_tensor(query), - paddle.to_tensor(key), - paddle.to_tensor(value), - attn_mask, cache_obj) + attn_output = multi_head_attn( + paddle.to_tensor(query), + paddle.to_tensor(key), + paddle.to_tensor(value), + attn_mask, + cache_obj, + ) attn_output = attn_output[0] if cache_dict else attn_output # implementation by numpy # compute q, k, v - q, k, v, _ = prepare_qkv(query, key, value, num_heads, - embed_dim, self_attention, - multi_head_attn, cache_dict) + q, k, v, _ = prepare_qkv( + query, + key, + value, + num_heads, + embed_dim, + self_attention, + multi_head_attn, + cache_dict, + ) # scale dot product attention attn_heads = scaled_dot_product_attention( - q, k, v, embed_dim // num_heads, attn_mask, - multi_head_attn) + q, + k, + v, + embed_dim // num_heads, + attn_mask, + multi_head_attn, + ) out_proj_weight = multi_head_attn.out_proj.weight.numpy() reference = fc(attn_heads, out_proj_weight) - np.testing.assert_allclose(attn_output.numpy(), - reference, - atol=1e-6) + np.testing.assert_allclose( + attn_output.numpy(), reference, atol=1e-6 + ) multihead_attention_test_helper(True, True) multihead_attention_test_helper(True, False) @@ -300,34 +405,51 @@ class TestTransformer(unittest.TestCase): ffn_fc1_act = "relu" # 1.generate basic params - batch_size, d_model, n_head, dim_feedforward, dropout, attn_dropout, act_dropout, sequence_length = generate_basic_params( - mode="encoder_layer") + ( + batch_size, + d_model, + n_head, + dim_feedforward, + dropout, + attn_dropout, + act_dropout, + sequence_length, + ) = generate_basic_params(mode="encoder_layer") # 2.generate input for encoder - src = np.random.rand(batch_size, sequence_length, - d_model).astype("float32") + src = np.random.rand(batch_size, sequence_length, d_model).astype( + "float32" + ) residual = src - src_mask = np.zeros((batch_size, n_head, sequence_length, - sequence_length)).astype("float32") + src_mask = np.zeros( + (batch_size, n_head, sequence_length, sequence_length) + ).astype("float32") src_mask[0][0][0][0] = -np.inf # paddle - encoder_layer = TransformerEncoderLayer(d_model, n_head, - dim_feedforward, dropout, - ffn_fc1_act, attn_dropout, - act_dropout) + encoder_layer = TransformerEncoderLayer( + d_model, + n_head, + dim_feedforward, + dropout, + ffn_fc1_act, + attn_dropout, + act_dropout, + ) encoder_output = encoder_layer( - paddle.to_tensor(src), - paddle.to_tensor(src_mask)) # paddle.to_tensor(src_mask)) + paddle.to_tensor(src), paddle.to_tensor(src_mask) + ) # paddle.to_tensor(src_mask)) # 4.numpy: # paddle self attention - self_attn = MultiHeadAttention(d_model, - n_head, - dropout=attn_dropout) - attn_output = self_attn(paddle.to_tensor(src), - paddle.to_tensor(src), - paddle.to_tensor(src), - paddle.to_tensor(src_mask)).numpy() + self_attn = MultiHeadAttention( + d_model, n_head, dropout=attn_dropout + ) + attn_output = self_attn( + paddle.to_tensor(src), + paddle.to_tensor(src), + paddle.to_tensor(src), + paddle.to_tensor(src_mask), + ).numpy() src = attn_output + residual src_norm = layer_norm(src, d_model, encoder_layer.norm1) @@ -337,10 +459,9 @@ class TestTransformer(unittest.TestCase): src = residual + ffn_output src = layer_norm(src, d_model, encoder_layer.norm2) - np.testing.assert_allclose(encoder_output.numpy(), - src, - rtol=1e-5, - atol=1e-6) + np.testing.assert_allclose( + encoder_output.numpy(), src, rtol=1e-5, atol=1e-6 + ) def test_transformer_encoder_layer_attr_1(self): with fluid.dygraph.guard(fluid.CPUPlace()): @@ -349,44 +470,67 @@ class TestTransformer(unittest.TestCase): ffn_fc1_act = "relu" # 1.generate basic params - batch_size, d_model, n_head, dim_feedforward, dropout, attn_dropout, act_dropout, sequence_length = generate_basic_params( - mode="encoder_layer") + ( + batch_size, + d_model, + n_head, + dim_feedforward, + dropout, + attn_dropout, + act_dropout, + sequence_length, + ) = generate_basic_params(mode="encoder_layer") # 2.generate input for encoder - src = np.random.rand(batch_size, sequence_length, - d_model).astype("float32") - src_mask = np.zeros((batch_size, n_head, sequence_length, - sequence_length)).astype("float32") + src = np.random.rand(batch_size, sequence_length, d_model).astype( + "float32" + ) + src_mask = np.zeros( + (batch_size, n_head, sequence_length, sequence_length) + ).astype("float32") src_mask[0][0][0][0] = -np.inf for cache in [True, False]: # paddle - encoder_layer = TransformerEncoderLayer(d_model, n_head, - dim_feedforward, - dropout, ffn_fc1_act, - attn_dropout, - act_dropout) + encoder_layer = TransformerEncoderLayer( + d_model, + n_head, + dim_feedforward, + dropout, + ffn_fc1_act, + attn_dropout, + act_dropout, + ) cache_objs = None if cache: cache_objs = encoder_layer.gen_cache(paddle.to_tensor(src)) - encoder_output = encoder_layer(paddle.to_tensor(src), - paddle.to_tensor(src_mask), - cache_objs) - encoder_output = encoder_output[0].numpy( - ) if cache else encoder_output.numpy() + encoder_output = encoder_layer( + paddle.to_tensor(src), + paddle.to_tensor(src_mask), + cache_objs, + ) + encoder_output = ( + encoder_output[0].numpy() + if cache + else encoder_output.numpy() + ) # 4.numpy: residual = src # paddle self attention - self_attn = MultiHeadAttention(d_model, - n_head, - dropout=attn_dropout) - attn_output = self_attn(paddle.to_tensor(src), - paddle.to_tensor(src), - paddle.to_tensor(src), - paddle.to_tensor(src_mask), cache_objs) - attn_output = attn_output[0].numpy( - ) if cache else attn_output.numpy() + self_attn = MultiHeadAttention( + d_model, n_head, dropout=attn_dropout + ) + attn_output = self_attn( + paddle.to_tensor(src), + paddle.to_tensor(src), + paddle.to_tensor(src), + paddle.to_tensor(src_mask), + cache_objs, + ) + attn_output = ( + attn_output[0].numpy() if cache else attn_output.numpy() + ) src = attn_output + residual src_norm = layer_norm(src, d_model, encoder_layer.norm1) @@ -396,62 +540,92 @@ class TestTransformer(unittest.TestCase): src = residual + ffn_output src = layer_norm(src, d_model, encoder_layer.norm2) - np.testing.assert_allclose(encoder_output, - src, - rtol=1e-5, - atol=1e-6) + np.testing.assert_allclose( + encoder_output, src, rtol=1e-5, atol=1e-6 + ) def test_transformer_decoder_layer(self): with fluid.dygraph.guard(fluid.CPUPlace()): paddle.framework.seed(2020) activation = "relu" normalize_before = False - batch_size, d_model, n_head, dim_feedforward, dropout, attn_dropout, act_dropout, source_length, target_length = generate_basic_params( - mode="decoder_layer") - tgt = np.random.rand(batch_size, target_length, - d_model).astype("float32") - memory = np.random.rand(batch_size, source_length, - d_model).astype("float32") - tgt_mask = np.zeros((batch_size, n_head, target_length, - target_length)).astype("float32") + ( + batch_size, + d_model, + n_head, + dim_feedforward, + dropout, + attn_dropout, + act_dropout, + source_length, + target_length, + ) = generate_basic_params(mode="decoder_layer") + tgt = np.random.rand(batch_size, target_length, d_model).astype( + "float32" + ) + memory = np.random.rand(batch_size, source_length, d_model).astype( + "float32" + ) + tgt_mask = np.zeros( + (batch_size, n_head, target_length, target_length) + ).astype("float32") tgt_mask[0][0][0][0] = -1e9 - memory_mask = np.zeros((batch_size, n_head, target_length, - source_length)).astype("float32") + memory_mask = np.zeros( + (batch_size, n_head, target_length, source_length) + ).astype("float32") memory_mask[0][0][0][0] = -1e9 for cache in [True, False]: - self_attn = MultiHeadAttention(d_model, - n_head, - dropout=attn_dropout) - cross_attn = MultiHeadAttention(d_model, - n_head, - dropout=attn_dropout) + self_attn = MultiHeadAttention( + d_model, n_head, dropout=attn_dropout + ) + cross_attn = MultiHeadAttention( + d_model, n_head, dropout=attn_dropout + ) # paddle decoderlayer: decoder_layer = TransformerDecoderLayer( - d_model, n_head, dim_feedforward, dropout, activation, - attn_dropout, act_dropout, normalize_before) + d_model, + n_head, + dim_feedforward, + dropout, + activation, + attn_dropout, + act_dropout, + normalize_before, + ) cache_objs = None if cache: cache_objs = decoder_layer.gen_cache( - paddle.to_tensor(memory)) - - decoder_output = decoder_layer(paddle.to_tensor(tgt), - paddle.to_tensor(memory), - paddle.to_tensor(tgt_mask), - paddle.to_tensor(memory_mask), - cache_objs) - - decoder_output = decoder_output[0].numpy( - ) if cache else decoder_output.numpy() + paddle.to_tensor(memory) + ) + + decoder_output = decoder_layer( + paddle.to_tensor(tgt), + paddle.to_tensor(memory), + paddle.to_tensor(tgt_mask), + paddle.to_tensor(memory_mask), + cache_objs, + ) + + decoder_output = ( + decoder_output[0].numpy() + if cache + else decoder_output.numpy() + ) # numpy: residual = tgt # self-attn - self_attn_cache = cache_objs[ - 0] if cache_objs is not None else None - tgt = self_attn(paddle.to_tensor(tgt), paddle.to_tensor(tgt), - paddle.to_tensor(tgt), - paddle.to_tensor(tgt_mask), self_attn_cache) + self_attn_cache = ( + cache_objs[0] if cache_objs is not None else None + ) + tgt = self_attn( + paddle.to_tensor(tgt), + paddle.to_tensor(tgt), + paddle.to_tensor(tgt), + paddle.to_tensor(tgt_mask), + self_attn_cache, + ) tgt = tgt[0].numpy() if cache else tgt.numpy() @@ -460,13 +634,16 @@ class TestTransformer(unittest.TestCase): tgt_norm = layer_norm(tgt, d_model, decoder_layer.norm1) residual = tgt_norm # cross-attn - cross_attn_cache = cache_objs[ - 1] if cache_objs is not None else None - tgt = cross_attn(paddle.to_tensor(tgt_norm), - paddle.to_tensor(memory), - paddle.to_tensor(memory), - paddle.to_tensor(memory_mask), - cross_attn_cache) + cross_attn_cache = ( + cache_objs[1] if cache_objs is not None else None + ) + tgt = cross_attn( + paddle.to_tensor(tgt_norm), + paddle.to_tensor(memory), + paddle.to_tensor(memory), + paddle.to_tensor(memory_mask), + cross_attn_cache, + ) tgt = tgt[0].numpy() if cache else tgt.numpy() # postprocess @@ -479,46 +656,67 @@ class TestTransformer(unittest.TestCase): tgt = residual + ffn_output tgt_norm = layer_norm(tgt, d_model, decoder_layer.norm3) - np.testing.assert_allclose(decoder_output, - tgt_norm, - rtol=1e-5, - atol=1e-6) + np.testing.assert_allclose( + decoder_output, tgt_norm, rtol=1e-5, atol=1e-6 + ) def test_encoder(self): - batch_size, d_model, n_head, dim_feedforward, dropout, attn_dropout, act_dropout, sequence_length = generate_basic_params( - mode="encoder_layer") - - src = np.random.rand(batch_size, sequence_length, - d_model).astype("float32") - - src_mask = np.zeros((batch_size, n_head, sequence_length, - sequence_length)).astype("float32") + ( + batch_size, + d_model, + n_head, + dim_feedforward, + dropout, + attn_dropout, + act_dropout, + sequence_length, + ) = generate_basic_params(mode="encoder_layer") + + src = np.random.rand(batch_size, sequence_length, d_model).astype( + "float32" + ) + + src_mask = np.zeros( + (batch_size, n_head, sequence_length, sequence_length) + ).astype("float32") src_mask[0][0][0][0] = -np.inf with fluid.dygraph.guard(fluid.CPUPlace()): - encoder_layer = TransformerEncoderLayer(d_model, n_head, - dim_feedforward, dropout) + encoder_layer = TransformerEncoderLayer( + d_model, n_head, dim_feedforward, dropout + ) num_layers = 6 encoder = TransformerEncoder(encoder_layer, num_layers) # src, src_mask - enc_output = encoder(paddle.to_tensor(src), - paddle.to_tensor(src_mask)) + enc_output = encoder( + paddle.to_tensor(src), paddle.to_tensor(src_mask) + ) def test_encoder_attr_1(self): - batch_size, d_model, n_head, dim_feedforward, dropout, attn_dropout, act_dropout, sequence_length = generate_basic_params( - mode="encoder_layer") - - src = np.random.rand(batch_size, sequence_length, - d_model).astype("float32") - - src_mask = np.zeros((batch_size, n_head, sequence_length, - sequence_length)).astype("float32") + ( + batch_size, + d_model, + n_head, + dim_feedforward, + dropout, + attn_dropout, + act_dropout, + sequence_length, + ) = generate_basic_params(mode="encoder_layer") + + src = np.random.rand(batch_size, sequence_length, d_model).astype( + "float32" + ) + + src_mask = np.zeros( + (batch_size, n_head, sequence_length, sequence_length) + ).astype("float32") src_mask[0][0][0][0] = -np.inf with fluid.dygraph.guard(fluid.CPUPlace()): for cache in [True, False]: # paddle - encoder_layer = TransformerEncoderLayer(d_model, n_head, - dim_feedforward, - dropout) + encoder_layer = TransformerEncoderLayer( + d_model, n_head, dim_feedforward, dropout + ) num_layers = 6 encoder = TransformerEncoder(encoder_layer, num_layers) cache_objs = None @@ -526,200 +724,320 @@ class TestTransformer(unittest.TestCase): cache_objs = encoder.gen_cache(paddle.to_tensor(src)) # src, src_mask - enc_output = encoder(paddle.to_tensor(src), - paddle.to_tensor(src_mask), cache_objs) + enc_output = encoder( + paddle.to_tensor(src), + paddle.to_tensor(src_mask), + cache_objs, + ) def test_decoder(self): - batch_size, d_model, n_head, dim_feedforward, dropout, _, _, source_length, target_length = generate_basic_params( - mode="decoder_layer") - tgt = np.random.rand(batch_size, target_length, - d_model).astype("float32") - memory = np.random.rand(batch_size, source_length, - d_model).astype("float32") - tgt_mask = np.zeros((batch_size, n_head, target_length, - target_length)).astype("float32") + ( + batch_size, + d_model, + n_head, + dim_feedforward, + dropout, + _, + _, + source_length, + target_length, + ) = generate_basic_params(mode="decoder_layer") + tgt = np.random.rand(batch_size, target_length, d_model).astype( + "float32" + ) + memory = np.random.rand(batch_size, source_length, d_model).astype( + "float32" + ) + tgt_mask = np.zeros( + (batch_size, n_head, target_length, target_length) + ).astype("float32") tgt_mask[0][0][0][0] = -1e9 - memory_mask = np.zeros((batch_size, n_head, target_length, - source_length)).astype("float32") + memory_mask = np.zeros( + (batch_size, n_head, target_length, source_length) + ).astype("float32") memory_mask[0][0][0][0] = -1e9 with fluid.dygraph.guard(fluid.CPUPlace()): - decoder_layer = TransformerDecoderLayer(d_model, n_head, - dim_feedforward, dropout) + decoder_layer = TransformerDecoderLayer( + d_model, n_head, dim_feedforward, dropout + ) num_layers = 6 decoder = TransformerDecoder(decoder_layer, num_layers) - output = decoder(paddle.to_tensor(tgt), paddle.to_tensor(memory), - paddle.to_tensor(tgt_mask), - paddle.to_tensor(memory_mask)) + output = decoder( + paddle.to_tensor(tgt), + paddle.to_tensor(memory), + paddle.to_tensor(tgt_mask), + paddle.to_tensor(memory_mask), + ) def test_transformer(self): - batch_size, d_model, n_head, dim_feedforward, dropout, _, _, source_length, target_length = generate_basic_params( - mode="decoder_layer") + ( + batch_size, + d_model, + n_head, + dim_feedforward, + dropout, + _, + _, + source_length, + target_length, + ) = generate_basic_params(mode="decoder_layer") # batch_size, source_length, target_length, d_model, n_head = 4, 8, 8, 64, 8 with fluid.dygraph.guard(fluid.CPUPlace()): - transformer = Transformer(d_model, - n_head, - dim_feedforward=dim_feedforward, - dropout=dropout) + transformer = Transformer( + d_model, + n_head, + dim_feedforward=dim_feedforward, + dropout=dropout, + ) src = paddle.to_tensor( - np.random.rand(batch_size, source_length, - d_model).astype("float32")) + np.random.rand(batch_size, source_length, d_model).astype( + "float32" + ) + ) tgt = paddle.to_tensor( - np.random.rand(batch_size, target_length, - d_model).astype("float32")) - src_mask = np.zeros((batch_size, n_head, source_length, - source_length)).astype("float32") + np.random.rand(batch_size, target_length, d_model).astype( + "float32" + ) + ) + src_mask = np.zeros( + (batch_size, n_head, source_length, source_length) + ).astype("float32") src_mask[0][0][0][0] = -np.inf src_mask = paddle.to_tensor(src_mask) - tgt_mask = np.zeros((batch_size, n_head, target_length, - target_length)).astype("float32") + tgt_mask = np.zeros( + (batch_size, n_head, target_length, target_length) + ).astype("float32") tgt_mask[0][0][0][0] = -1e9 - memory_mask = np.zeros((batch_size, n_head, target_length, - source_length)).astype("float32") + memory_mask = np.zeros( + (batch_size, n_head, target_length, source_length) + ).astype("float32") memory_mask[0][0][0][0] = -1e9 tgt_mask, memory_mask = paddle.to_tensor( - tgt_mask), paddle.to_tensor(memory_mask) - trans_output = transformer(src, tgt, src_mask, tgt_mask, - memory_mask) + tgt_mask + ), paddle.to_tensor(memory_mask) + trans_output = transformer( + src, tgt, src_mask, tgt_mask, memory_mask + ) def test_transformer_attr_1(self): - batch_size, d_model, n_head, dim_feedforward, dropout, _, _, source_length, target_length = generate_basic_params( - mode="decoder_layer") + ( + batch_size, + d_model, + n_head, + dim_feedforward, + dropout, + _, + _, + source_length, + target_length, + ) = generate_basic_params(mode="decoder_layer") # batch_size, source_length, target_length, d_model, n_head = 4, 8, 8, 64, 8 with fluid.dygraph.guard(fluid.CPUPlace()): - transformer = Transformer(d_model, - n_head, - dim_feedforward=dim_feedforward, - dropout=dropout, - weight_attr=[None], - bias_attr=[False]) + transformer = Transformer( + d_model, + n_head, + dim_feedforward=dim_feedforward, + dropout=dropout, + weight_attr=[None], + bias_attr=[False], + ) src = paddle.to_tensor( - np.random.rand(batch_size, source_length, - d_model).astype("float32")) + np.random.rand(batch_size, source_length, d_model).astype( + "float32" + ) + ) tgt = paddle.to_tensor( - np.random.rand(batch_size, target_length, - d_model).astype("float32")) - src_mask = np.zeros((batch_size, n_head, source_length, - source_length)).astype("float32") + np.random.rand(batch_size, target_length, d_model).astype( + "float32" + ) + ) + src_mask = np.zeros( + (batch_size, n_head, source_length, source_length) + ).astype("float32") src_mask[0][0][0][0] = -np.inf src_mask = paddle.to_tensor(src_mask) - tgt_mask = np.zeros((batch_size, n_head, target_length, - target_length)).astype("float32") + tgt_mask = np.zeros( + (batch_size, n_head, target_length, target_length) + ).astype("float32") tgt_mask[0][0][0][0] = -1e9 - memory_mask = np.zeros((batch_size, n_head, target_length, - source_length)).astype("float32") + memory_mask = np.zeros( + (batch_size, n_head, target_length, source_length) + ).astype("float32") memory_mask[0][0][0][0] = -1e9 tgt_mask, memory_mask = paddle.to_tensor( - tgt_mask), paddle.to_tensor(memory_mask) - trans_output = transformer(src, tgt, src_mask, tgt_mask, - memory_mask) + tgt_mask + ), paddle.to_tensor(memory_mask) + trans_output = transformer( + src, tgt, src_mask, tgt_mask, memory_mask + ) def test_transformer_attr_2(self): - batch_size, d_model, n_head, dim_feedforward, dropout, _, _, source_length, target_length = generate_basic_params( - mode="decoder_layer") + ( + batch_size, + d_model, + n_head, + dim_feedforward, + dropout, + _, + _, + source_length, + target_length, + ) = generate_basic_params(mode="decoder_layer") # batch_size, source_length, target_length, d_model, n_head = 4, 8, 8, 64, 8 with fluid.dygraph.guard(fluid.CPUPlace()): - transformer = Transformer(d_model, - n_head, - dim_feedforward=dim_feedforward, - dropout=dropout, - weight_attr=[None, None], - bias_attr=[False, False]) + transformer = Transformer( + d_model, + n_head, + dim_feedforward=dim_feedforward, + dropout=dropout, + weight_attr=[None, None], + bias_attr=[False, False], + ) src = paddle.to_tensor( - np.random.rand(batch_size, source_length, - d_model).astype("float32")) + np.random.rand(batch_size, source_length, d_model).astype( + "float32" + ) + ) tgt = paddle.to_tensor( - np.random.rand(batch_size, target_length, - d_model).astype("float32")) - src_mask = np.zeros((batch_size, n_head, source_length, - source_length)).astype("float32") + np.random.rand(batch_size, target_length, d_model).astype( + "float32" + ) + ) + src_mask = np.zeros( + (batch_size, n_head, source_length, source_length) + ).astype("float32") src_mask[0][0][0][0] = -np.inf src_mask = paddle.to_tensor(src_mask) - tgt_mask = np.zeros((batch_size, n_head, target_length, - target_length)).astype("float32") + tgt_mask = np.zeros( + (batch_size, n_head, target_length, target_length) + ).astype("float32") tgt_mask[0][0][0][0] = -1e9 - memory_mask = np.zeros((batch_size, n_head, target_length, - source_length)).astype("float32") + memory_mask = np.zeros( + (batch_size, n_head, target_length, source_length) + ).astype("float32") memory_mask[0][0][0][0] = -1e9 tgt_mask, memory_mask = paddle.to_tensor( - tgt_mask), paddle.to_tensor(memory_mask) - trans_output = transformer(src, tgt, src_mask, tgt_mask, - memory_mask) + tgt_mask + ), paddle.to_tensor(memory_mask) + trans_output = transformer( + src, tgt, src_mask, tgt_mask, memory_mask + ) def test_transformer_attr_3(self): - batch_size, d_model, n_head, dim_feedforward, dropout, _, _, source_length, target_length = generate_basic_params( - mode="decoder_layer") + ( + batch_size, + d_model, + n_head, + dim_feedforward, + dropout, + _, + _, + source_length, + target_length, + ) = generate_basic_params(mode="decoder_layer") # batch_size, source_length, target_length, d_model, n_head = 4, 8, 8, 64, 8 with fluid.dygraph.guard(fluid.CPUPlace()): - transformer = Transformer(d_model, - n_head, - dim_feedforward=dim_feedforward, - dropout=dropout, - weight_attr=[None, None, None], - bias_attr=[False, False, True]) + transformer = Transformer( + d_model, + n_head, + dim_feedforward=dim_feedforward, + dropout=dropout, + weight_attr=[None, None, None], + bias_attr=[False, False, True], + ) src = paddle.to_tensor( - np.random.rand(batch_size, source_length, - d_model).astype("float32")) + np.random.rand(batch_size, source_length, d_model).astype( + "float32" + ) + ) tgt = paddle.to_tensor( - np.random.rand(batch_size, target_length, - d_model).astype("float32")) - src_mask = np.zeros((batch_size, n_head, source_length, - source_length)).astype("float32") + np.random.rand(batch_size, target_length, d_model).astype( + "float32" + ) + ) + src_mask = np.zeros( + (batch_size, n_head, source_length, source_length) + ).astype("float32") src_mask[0][0][0][0] = -np.inf src_mask = paddle.to_tensor(src_mask) - tgt_mask = np.zeros((batch_size, n_head, target_length, - target_length)).astype("float32") + tgt_mask = np.zeros( + (batch_size, n_head, target_length, target_length) + ).astype("float32") tgt_mask[0][0][0][0] = -1e9 - memory_mask = np.zeros((batch_size, n_head, target_length, - source_length)).astype("float32") + memory_mask = np.zeros( + (batch_size, n_head, target_length, source_length) + ).astype("float32") memory_mask[0][0][0][0] = -1e9 tgt_mask, memory_mask = paddle.to_tensor( - tgt_mask), paddle.to_tensor(memory_mask) - trans_output = transformer(src, tgt, src_mask, tgt_mask, - memory_mask) + tgt_mask + ), paddle.to_tensor(memory_mask) + trans_output = transformer( + src, tgt, src_mask, tgt_mask, memory_mask + ) def test_transformer_attr_boolean(self): - batch_size, d_model, n_head, dim_feedforward, dropout, _, _, source_length, target_length = generate_basic_params( - mode="decoder_layer") + ( + batch_size, + d_model, + n_head, + dim_feedforward, + dropout, + _, + _, + source_length, + target_length, + ) = generate_basic_params(mode="decoder_layer") # batch_size, source_length, target_length, d_model, n_head = 4, 8, 8, 64, 8 with fluid.dygraph.guard(fluid.CPUPlace()): - transformer = Transformer(d_model, - n_head, - dim_feedforward=dim_feedforward, - dropout=dropout, - bias_attr=False) + transformer = Transformer( + d_model, + n_head, + dim_feedforward=dim_feedforward, + dropout=dropout, + bias_attr=False, + ) src = paddle.to_tensor( - np.random.rand(batch_size, source_length, - d_model).astype("float32")) + np.random.rand(batch_size, source_length, d_model).astype( + "float32" + ) + ) tgt = paddle.to_tensor( - np.random.rand(batch_size, target_length, - d_model).astype("float32")) - src_mask = np.zeros((batch_size, n_head, source_length, - source_length)).astype("float32") + np.random.rand(batch_size, target_length, d_model).astype( + "float32" + ) + ) + src_mask = np.zeros( + (batch_size, n_head, source_length, source_length) + ).astype("float32") src_mask[0][0][0][0] = -np.inf src_mask = paddle.to_tensor(src_mask) - tgt_mask = np.zeros((batch_size, n_head, target_length, - target_length)).astype("float32") + tgt_mask = np.zeros( + (batch_size, n_head, target_length, target_length) + ).astype("float32") tgt_mask[0][0][0][0] = -1e9 - memory_mask = np.zeros((batch_size, n_head, target_length, - source_length)).astype("float32") + memory_mask = np.zeros( + (batch_size, n_head, target_length, source_length) + ).astype("float32") memory_mask[0][0][0][0] = -1e9 tgt_mask, memory_mask = paddle.to_tensor( - tgt_mask), paddle.to_tensor(memory_mask) - trans_output = transformer(src, tgt, src_mask, tgt_mask, - memory_mask) + tgt_mask + ), paddle.to_tensor(memory_mask) + trans_output = transformer( + src, tgt, src_mask, tgt_mask, memory_mask + ) def test_generate_square_subsequent_mask(self): length = 5 d_model, n_head, dim_feedforward = 8, 4, 64 - transformer = Transformer(d_model, - n_head, - dim_feedforward=dim_feedforward) + transformer = Transformer( + d_model, n_head, dim_feedforward=dim_feedforward + ) mask = transformer.generate_square_subsequent_mask(length) diff --git a/python/paddle/fluid/tests/unittests/test_translated_layer.py b/python/paddle/fluid/tests/unittests/test_translated_layer.py index ef75af2fb6171cefc6632a623e822e12707c16d9..2c249b0e5581ab93d7cf38b92784d6ead975cf6b 100644 --- a/python/paddle/fluid/tests/unittests/test_translated_layer.py +++ b/python/paddle/fluid/tests/unittests/test_translated_layer.py @@ -31,14 +31,13 @@ CLASS_NUM = 10 # define a random dataset class RandomDataset(paddle.io.Dataset): - def __init__(self, num_samples): self.num_samples = num_samples def __getitem__(self, idx): np.random.seed(SEED) image = np.random.random([IMAGE_SIZE]).astype('float32') - label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64') + label = np.random.randint(0, CLASS_NUM - 1, (1,)).astype('int64') return image, label def __len__(self): @@ -46,17 +45,18 @@ class RandomDataset(paddle.io.Dataset): class LinearNet(nn.Layer): - def __init__(self): super(LinearNet, self).__init__() self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM) self._dropout = paddle.nn.Dropout(p=0.5) - @paddle.jit.to_static(input_spec=[ - paddle.static.InputSpec(shape=[None, IMAGE_SIZE], - dtype='float32', - name='x') - ]) + @paddle.jit.to_static( + input_spec=[ + paddle.static.InputSpec( + shape=[None, IMAGE_SIZE], dtype='float32', name='x' + ) + ] + ) def forward(self, x): return self._linear(x) @@ -69,13 +69,15 @@ def train(layer, loader, loss_fn, opt): loss.backward() opt.step() opt.clear_grad() - print("Epoch {} batch {}: loss = {}".format(epoch_id, batch_id, - np.mean(loss.numpy()))) + print( + "Epoch {} batch {}: loss = {}".format( + epoch_id, batch_id, np.mean(loss.numpy()) + ) + ) return loss class TestTranslatedLayer(unittest.TestCase): - def tearDown(self): self.temp_dir.cleanup() @@ -91,17 +93,20 @@ class TestTranslatedLayer(unittest.TestCase): # create network self.layer = LinearNet() self.loss_fn = nn.CrossEntropyLoss() - self.sgd = opt.SGD(learning_rate=0.001, - parameters=self.layer.parameters()) + self.sgd = opt.SGD( + learning_rate=0.001, parameters=self.layer.parameters() + ) # create data loader dataset = RandomDataset(BATCH_NUM * BATCH_SIZE) - self.loader = paddle.io.DataLoader(dataset, - places=place, - batch_size=BATCH_SIZE, - shuffle=True, - drop_last=True, - num_workers=0) + self.loader = paddle.io.DataLoader( + dataset, + places=place, + batch_size=BATCH_SIZE, + shuffle=True, + drop_last=True, + num_workers=0, + ) self.temp_dir = tempfile.TemporaryDirectory() @@ -109,8 +114,9 @@ class TestTranslatedLayer(unittest.TestCase): train(self.layer, self.loader, self.loss_fn, self.sgd) # save - self.model_path = os.path.join(self.temp_dir.name, - './linear.example.model') + self.model_path = os.path.join( + self.temp_dir.name, './linear.example.model' + ) paddle.jit.save(self.layer, self.model_path) def test_inference_and_fine_tuning(self): @@ -142,15 +148,18 @@ class TestTranslatedLayer(unittest.TestCase): # fine-tuning translated_layer.train() - sgd = opt.SGD(learning_rate=0.001, - parameters=translated_layer.parameters()) + sgd = opt.SGD( + learning_rate=0.001, parameters=translated_layer.parameters() + ) loss = train(translated_layer, self.loader, self.loss_fn, sgd) np.testing.assert_array_equal( orig_loss.numpy(), loss.numpy(), err_msg='original loss:\n{}\nnew loss:\n{}\n'.format( - orig_loss.numpy(), loss.numpy())) + orig_loss.numpy(), loss.numpy() + ), + ) def test_get_program(self): # load @@ -171,9 +180,9 @@ class TestTranslatedLayer(unittest.TestCase): translated_layer = paddle.jit.load(self.model_path) expect_spec = [ - paddle.static.InputSpec(shape=[None, IMAGE_SIZE], - dtype='float32', - name='x') + paddle.static.InputSpec( + shape=[None, IMAGE_SIZE], dtype='float32', name='x' + ) ] actual_spec = translated_layer._input_spec() @@ -185,9 +194,11 @@ class TestTranslatedLayer(unittest.TestCase): translated_layer = paddle.jit.load(self.model_path) expect_spec = [ - paddle.static.InputSpec(shape=[None, CLASS_NUM], - dtype='float32', - name='translated_layer/scale_0.tmp_1') + paddle.static.InputSpec( + shape=[None, CLASS_NUM], + dtype='float32', + name='translated_layer/scale_0.tmp_1', + ) ] actual_spec = translated_layer._output_spec() diff --git a/python/paddle/fluid/tests/unittests/test_transpose_op.py b/python/paddle/fluid/tests/unittests/test_transpose_op.py index 390208c0cddc6e3355b239add181fb090f4e174e..67c2b8772c5ddde71a2d1b4a13d03b717b4bc0bd 100644 --- a/python/paddle/fluid/tests/unittests/test_transpose_op.py +++ b/python/paddle/fluid/tests/unittests/test_transpose_op.py @@ -27,7 +27,6 @@ paddle.enable_static() class TestTransposeOp(OpTest): - def setUp(self): self.init_op_type() self.initTestCase() @@ -39,7 +38,7 @@ class TestTransposeOp(OpTest): } self.outputs = { 'XShape': np.random.random(self.shape).astype("float64"), - 'Out': self.inputs['X'].transpose(self.axis) + 'Out': self.inputs['X'].transpose(self.axis), } def init_op_type(self): @@ -58,84 +57,72 @@ class TestTransposeOp(OpTest): class TestCase0(TestTransposeOp): - def initTestCase(self): - self.shape = (100, ) - self.axis = (0, ) + self.shape = (100,) + self.axis = (0,) class TestCase1(TestTransposeOp): - def initTestCase(self): self.shape = (3, 4, 10) self.axis = (0, 2, 1) class TestCase2(TestTransposeOp): - def initTestCase(self): self.shape = (2, 3, 4, 5) self.axis = (0, 2, 3, 1) class TestCase3(TestTransposeOp): - def initTestCase(self): self.shape = (2, 3, 4, 5, 6) self.axis = (4, 2, 3, 1, 0) class TestCase4(TestTransposeOp): - def initTestCase(self): self.shape = (2, 3, 4, 5, 6, 1) self.axis = (4, 2, 3, 1, 0, 5) class TestCase5(TestTransposeOp): - def initTestCase(self): self.shape = (2, 16, 96) self.axis = (0, 2, 1) class TestCase6(TestTransposeOp): - def initTestCase(self): self.shape = (2, 10, 12, 16) self.axis = (3, 1, 2, 0) class TestCase7(TestTransposeOp): - def initTestCase(self): self.shape = (2, 10, 2, 16) self.axis = (0, 1, 3, 2) class TestCase8(TestTransposeOp): - def initTestCase(self): self.shape = (2, 3, 2, 3, 2, 4, 3, 3) self.axis = (0, 1, 3, 2, 4, 5, 6, 7) class TestCase9(TestTransposeOp): - def initTestCase(self): self.shape = (2, 3, 2, 3, 2, 4, 3, 3) self.axis = (6, 1, 3, 5, 0, 2, 4, 7) class TestCase_ZeroDim(TestTransposeOp): - def initTestCase(self): self.shape = () self.axis = () class TestAutoTuneTransposeOp(OpTest): - def setUp(self): self.init_op_type() self.initTestCase() @@ -147,7 +134,7 @@ class TestAutoTuneTransposeOp(OpTest): } self.outputs = { 'XShape': np.random.random(self.shape).astype("float64"), - 'Out': self.inputs['X'].transpose(self.axis) + 'Out': self.inputs['X'].transpose(self.axis), } def initTestCase(self): @@ -170,7 +157,6 @@ class TestAutoTuneTransposeOp(OpTest): class TestTransposeBF16Op(OpTest): - def setUp(self): self.init_op_type() self.initTestCase() @@ -183,11 +169,10 @@ class TestTransposeBF16Op(OpTest): 'use_mkldnn': self.use_mkldnn, } self.outputs = { - 'XShape': - convert_float_to_uint16( - np.random.random(self.shape).astype("float32")), - 'Out': - self.inputs['X'].transpose(self.axis) + 'XShape': convert_float_to_uint16( + np.random.random(self.shape).astype("float32") + ), + 'Out': self.inputs['X'].transpose(self.axis), } def init_op_type(self): @@ -206,109 +191,99 @@ class TestTransposeBF16Op(OpTest): class TestTransposeOpBool(TestTransposeOp): - def test_check_grad(self): pass class TestTransposeOpBool1D(TestTransposeOpBool): - def initTestCase(self): - self.shape = (100, ) - self.axis = (0, ) + self.shape = (100,) + self.axis = (0,) self.inputs = {'X': np.random.random(self.shape).astype("bool")} self.outputs = { 'XShape': np.random.random(self.shape).astype("bool"), - 'Out': self.inputs['X'].transpose(self.axis) + 'Out': self.inputs['X'].transpose(self.axis), } class TestTransposeOpBool2D(TestTransposeOpBool): - def initTestCase(self): self.shape = (3, 40) self.axis = (1, 0) self.inputs = {'X': np.random.random(self.shape).astype("bool")} self.outputs = { 'XShape': np.random.random(self.shape).astype("bool"), - 'Out': self.inputs['X'].transpose(self.axis) + 'Out': self.inputs['X'].transpose(self.axis), } class TestTransposeOpBool3D(TestTransposeOpBool): - def initTestCase(self): self.shape = (3, 4, 10) self.axis = (0, 2, 1) self.inputs = {'X': np.random.random(self.shape).astype("bool")} self.outputs = { 'XShape': np.random.random(self.shape).astype("bool"), - 'Out': self.inputs['X'].transpose(self.axis) + 'Out': self.inputs['X'].transpose(self.axis), } class TestTransposeOpBool4D(TestTransposeOpBool): - def initTestCase(self): self.shape = (2, 3, 4, 5) self.axis = (0, 2, 3, 1) self.inputs = {'X': np.random.random(self.shape).astype("bool")} self.outputs = { 'XShape': np.random.random(self.shape).astype("bool"), - 'Out': self.inputs['X'].transpose(self.axis) + 'Out': self.inputs['X'].transpose(self.axis), } class TestTransposeOpBool5D(TestTransposeOpBool): - def initTestCase(self): self.shape = (2, 3, 4, 5, 6) self.axis = (4, 2, 3, 1, 0) self.inputs = {'X': np.random.random(self.shape).astype("bool")} self.outputs = { 'XShape': np.random.random(self.shape).astype("bool"), - 'Out': self.inputs['X'].transpose(self.axis) + 'Out': self.inputs['X'].transpose(self.axis), } class TestTransposeOpBool6D(TestTransposeOpBool): - def initTestCase(self): self.shape = (2, 3, 4, 5, 6, 1) self.axis = (4, 2, 3, 1, 0, 5) self.inputs = {'X': np.random.random(self.shape).astype("bool")} self.outputs = { 'XShape': np.random.random(self.shape).astype("bool"), - 'Out': self.inputs['X'].transpose(self.axis) + 'Out': self.inputs['X'].transpose(self.axis), } class TestTransposeOpBool7D(TestTransposeOpBool): - def initTestCase(self): self.shape = (2, 3, 2, 3, 2, 4, 3) self.axis = (0, 1, 3, 2, 4, 5, 6) self.inputs = {'X': np.random.random(self.shape).astype("bool")} self.outputs = { 'XShape': np.random.random(self.shape).astype("bool"), - 'Out': self.inputs['X'].transpose(self.axis) + 'Out': self.inputs['X'].transpose(self.axis), } class TestTransposeOpBool8D(TestTransposeOpBool): - def initTestCase(self): self.shape = (2, 3, 2, 3, 2, 4, 3, 3) self.axis = (6, 1, 3, 5, 0, 2, 4, 7) self.inputs = {'X': np.random.random(self.shape).astype("bool")} self.outputs = { 'XShape': np.random.random(self.shape).astype("bool"), - 'Out': self.inputs['X'].transpose(self.axis) + 'Out': self.inputs['X'].transpose(self.axis), } class TestTransposeOpError(unittest.TestCase): - def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): @@ -322,9 +297,9 @@ class TestTransposeOpError(unittest.TestCase): def test_x_dtype_check(): # the Input(x)'s dtype must be one of [bool, float16, float32, float64, int32, int64] - x1 = fluid.layers.data(name='x1', - shape=[10, 5, 3], - dtype='int8') + x1 = fluid.layers.data( + name='x1', shape=[10, 5, 3], dtype='int8' + ) fluid.layers.transpose(x1, perm=[1, 0, 2]) self.assertRaises(TypeError, test_x_dtype_check) @@ -350,7 +325,6 @@ class TestTransposeOpError(unittest.TestCase): class TestTransposeApi(unittest.TestCase): - def test_static_out(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): @@ -360,8 +334,9 @@ class TestTransposeApi(unittest.TestCase): place = paddle.CPUPlace() exe = paddle.static.Executor(place) x_np = np.random.random([2, 3, 4]).astype("float32") - result1, result2 = exe.run(feed={"x": x_np}, - fetch_list=[x_trans1, x_trans2]) + result1, result2 = exe.run( + feed={"x": x_np}, fetch_list=[x_trans1, x_trans2] + ) expected_result1 = np.transpose(x_np, [1, 0, 2]) expected_result2 = np.transpose(x_np, (2, 1, 0)) @@ -387,7 +362,6 @@ class TestTransposeApi(unittest.TestCase): class TestTAPI(unittest.TestCase): - def test_out(self): with fluid.program_guard(fluid.Program()): data = fluid.data(shape=[10], dtype="float64", name="data") @@ -395,7 +369,7 @@ class TestTAPI(unittest.TestCase): place = fluid.CPUPlace() exe = fluid.Executor(place) data_np = np.random.random([10]).astype("float64") - result, = exe.run(feed={"data": data_np}, fetch_list=[data_t]) + (result,) = exe.run(feed={"data": data_np}, fetch_list=[data_t]) expected_result = np.transpose(data_np) self.assertEqual((result == expected_result).all(), True) @@ -405,7 +379,7 @@ class TestTAPI(unittest.TestCase): place = fluid.CPUPlace() exe = fluid.Executor(place) data_np = np.random.random([10, 5]).astype("float64") - result, = exe.run(feed={"data": data_np}, fetch_list=[data_t]) + (result,) = exe.run(feed={"data": data_np}, fetch_list=[data_t]) expected_result = np.transpose(data_np) self.assertEqual((result == expected_result).all(), True) @@ -415,7 +389,7 @@ class TestTAPI(unittest.TestCase): place = fluid.CPUPlace() exe = fluid.Executor(place) data_np = np.random.random([1, 5]).astype("float64") - result, = exe.run(feed={"data": data_np}, fetch_list=[data_t]) + (result,) = exe.run(feed={"data": data_np}, fetch_list=[data_t]) expected_result = np.transpose(data_np) self.assertEqual((result == expected_result).all(), True) @@ -454,7 +428,6 @@ class TestTAPI(unittest.TestCase): class TestMoveAxis(unittest.TestCase): - def test_moveaxis1(self): x_np = np.random.randn(2, 3, 4, 5, 7) expected = np.moveaxis(x_np, [0, 4, 3, 2], [1, 3, 2, 0]) @@ -497,8 +470,9 @@ class TestMoveAxis(unittest.TestCase): def test_moveaxis3(self): paddle.disable_static() - x = paddle.to_tensor([[1 + 1j, -1 - 1j], [1 + 1j, -1 - 1j], - [1 + 1j, -1 - 1j]]) + x = paddle.to_tensor( + [[1 + 1j, -1 - 1j], [1 + 1j, -1 - 1j], [1 + 1j, -1 - 1j]] + ) out = x.moveaxis(0, 1) self.assertEqual(out.shape, [2, 3]) paddle.enable_static() @@ -535,7 +509,6 @@ class TestMoveAxis(unittest.TestCase): class TestTransposeDoubleGradCheck(unittest.TestCase): - def transpose_wrapper(self, x): return paddle.transpose(x[0], [1, 0, 2]) @@ -550,17 +523,13 @@ class TestTransposeDoubleGradCheck(unittest.TestCase): out = paddle.transpose(data, [1, 0, 2]) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) - gradient_checker.double_grad_check([data], - out, - x_init=[data_arr], - place=place, - eps=eps) + gradient_checker.double_grad_check( + [data], out, x_init=[data_arr], place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.double_grad_check_for_dygraph(self.transpose_wrapper, - [data], - out, - x_init=[data_arr], - place=place) + gradient_checker.double_grad_check_for_dygraph( + self.transpose_wrapper, [data], out, x_init=[data_arr], place=place + ) def test_grad(self): paddle.enable_static() @@ -572,7 +541,6 @@ class TestTransposeDoubleGradCheck(unittest.TestCase): class TestTransposeTripleGradCheck(unittest.TestCase): - def transpose_wrapper(self, x): return paddle.transpose(x[0], [1, 0, 2]) @@ -587,17 +555,13 @@ class TestTransposeTripleGradCheck(unittest.TestCase): out = paddle.transpose(data, [1, 0, 2]) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) - gradient_checker.triple_grad_check([data], - out, - x_init=[data_arr], - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [data], out, x_init=[data_arr], place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.triple_grad_check_for_dygraph(self.transpose_wrapper, - [data], - out, - x_init=[data_arr], - place=place) + gradient_checker.triple_grad_check_for_dygraph( + self.transpose_wrapper, [data], out, x_init=[data_arr], place=place + ) def test_grad(self): paddle.enable_static() @@ -609,7 +573,6 @@ class TestTransposeTripleGradCheck(unittest.TestCase): class TestTransposeAPI_ZeroDim(unittest.TestCase): - def test_dygraph(self): paddle.disable_static() fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) diff --git a/python/paddle/fluid/tests/unittests/test_tree_conv_op.py b/python/paddle/fluid/tests/unittests/test_tree_conv_op.py index 114d713b092ac695d4f6526bee88ef2bdd346e3f..d4582c3ad2e504d14b0d8b27fd9c1cae362a9f03 100644 --- a/python/paddle/fluid/tests/unittests/test_tree_conv_op.py +++ b/python/paddle/fluid/tests/unittests/test_tree_conv_op.py @@ -51,7 +51,6 @@ def collect_node_patch(og, max_depth): class TestTreeConvOp(OpTest): - def setUp(self): self.n = 17 self.fea_size = 3 @@ -60,22 +59,51 @@ class TestTreeConvOp(OpTest): self.batch_size = 2 self.num_filters = 1 adj_array = [ - 1, 2, 1, 3, 1, 4, 1, 5, 2, 6, 2, 7, 2, 8, 4, 9, 4, 10, 5, 11, 6, 12, - 6, 13, 9, 14, 9, 15, 9, 16, 9, 17 + 1, + 2, + 1, + 3, + 1, + 4, + 1, + 5, + 2, + 6, + 2, + 7, + 2, + 8, + 4, + 9, + 4, + 10, + 5, + 11, + 6, + 12, + 6, + 13, + 9, + 14, + 9, + 15, + 9, + 16, + 9, + 17, ] adj = np.array(adj_array).reshape((1, self.n - 1, 2)).astype('int32') adj = np.tile(adj, (self.batch_size, 1, 1)) self.op_type = 'tree_conv' vectors = np.random.random( - (self.batch_size, self.n, self.fea_size)).astype('float64') + (self.batch_size, self.n, self.fea_size) + ).astype('float64') self.inputs = { - 'EdgeSet': - adj, - 'NodesVector': - vectors, - 'Filter': - np.random.random((self.fea_size, 3, self.output_size, - self.num_filters)).astype('float64') + 'EdgeSet': adj, + 'NodesVector': vectors, + 'Filter': np.random.random( + (self.fea_size, 3, self.output_size, self.num_filters) + ).astype('float64'), } self.attrs = {'max_depth': self.max_depth} vectors = [] @@ -90,9 +118,9 @@ class TestTreeConvOp(OpTest): self.check_output() def test_check_grad(self): - self.check_grad(['NodesVector', 'Filter'], - 'Out', - max_relative_error=0.5) + self.check_grad( + ['NodesVector', 'Filter'], 'Out', max_relative_error=0.5 + ) def get_output_naive(self, batch_id): og = [[] for i in range(1, self.n + 2)] @@ -107,71 +135,88 @@ class TestTreeConvOp(OpTest): result = np.zeros((1, W.shape[2], W.shape[3])) for v in patch: eta_t = float(v[4] - v[3]) / float(v[4]) - eta_l = (1.0 - eta_t) * (0.5 if v[2] == 1 else - float(v[1] - 1.0) / float(v[2] - 1.0)) + eta_l = (1.0 - eta_t) * ( + 0.5 if v[2] == 1 else float(v[1] - 1.0) / float(v[2] - 1.0) + ) eta_r = (1.0 - eta_t) * (1.0 - eta_l) x = self.inputs['NodesVector'][batch_id][v[0] - 1] - eta = np.array([eta_l, eta_r, eta_t]).reshape( - (3, 1)).astype('float64') + eta = ( + np.array([eta_l, eta_r, eta_t]) + .reshape((3, 1)) + .astype('float64') + ) Wconvi = np.tensordot(eta, W, axes=([0], [0])) x = np.array(x).reshape((1, 1, self.fea_size)) res = np.tensordot(x, Wconvi, axes=2) result = result + res vec.append(result) vec = np.concatenate(vec, axis=0) - vec = np.concatenate([ - vec, - np.zeros((self.n - vec.shape[0], W.shape[2], W.shape[3]), - dtype='float64') - ], - axis=0) + vec = np.concatenate( + [ + vec, + np.zeros( + (self.n - vec.shape[0], W.shape[2], W.shape[3]), + dtype='float64', + ), + ], + axis=0, + ) return vec class TestTreeConv_OpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): nodes_vector_1 = np.random.random((10, 5)).astype("float32") - edge_set_1 = fluid.layers.data(name='edge_set_1', - shape=[10, 2], - dtype='float32') + edge_set_1 = fluid.layers.data( + name='edge_set_1', shape=[10, 2], dtype='float32' + ) # the nodes_vector of tree_conv must be Variable. - self.assertRaises(TypeError, fluid.contrib.layers.tree_conv, - nodes_vector_1, edge_set_1, 3) - - nodes_vector_2 = fluid.layers.data(name='vectors2', - shape=[10, 5], - dtype='float32') + self.assertRaises( + TypeError, + fluid.contrib.layers.tree_conv, + nodes_vector_1, + edge_set_1, + 3, + ) + + nodes_vector_2 = fluid.layers.data( + name='vectors2', shape=[10, 5], dtype='float32' + ) edge_set_2 = np.random.random((10, 2)).astype("float32") # the edge_set of tree_conv must be Variable. - self.assertRaises(TypeError, fluid.contrib.layers.tree_conv, - nodes_vector_2, edge_set_2, 3) + self.assertRaises( + TypeError, + fluid.contrib.layers.tree_conv, + nodes_vector_2, + edge_set_2, + 3, + ) class TestDygraphTreeConv_OpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): - TreeConv = fluid.dygraph.nn.TreeConv(feature_size=5, - output_size=6, - num_filters=1, - max_depth=2) + TreeConv = fluid.dygraph.nn.TreeConv( + feature_size=5, output_size=6, num_filters=1, max_depth=2 + ) nodes_vector_1 = np.random.random((10, 5)).astype("float32") - edge_set_1 = fluid.layers.data(name='edge_set_1', - shape=[10, 2], - dtype='float32') + edge_set_1 = fluid.layers.data( + name='edge_set_1', shape=[10, 2], dtype='float32' + ) # the nodes_vector of TreeConv must be Variable. - self.assertRaises(TypeError, TreeConv, nodes_vector_1, edge_set_1, - 3) + self.assertRaises( + TypeError, TreeConv, nodes_vector_1, edge_set_1, 3 + ) - nodes_vector_2 = fluid.layers.data(name='vectors2', - shape=[10, 5], - dtype='float32') + nodes_vector_2 = fluid.layers.data( + name='vectors2', shape=[10, 5], dtype='float32' + ) edge_set_2 = np.random.random((10, 2)).astype("float32") # the edge_set of TreeConv must be Variable. - self.assertRaises(TypeError, TreeConv, nodes_vector_2, edge_set_2, - 3) + self.assertRaises( + TypeError, TreeConv, nodes_vector_2, edge_set_2, 3 + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_triangular_solve_op.py b/python/paddle/fluid/tests/unittests/test_triangular_solve_op.py index fabef08b1891e71461e371b92ae3cb2c312b2a5f..c71619e31374201ad05d01871fd697a1749872a2 100644 --- a/python/paddle/fluid/tests/unittests/test_triangular_solve_op.py +++ b/python/paddle/fluid/tests/unittests/test_triangular_solve_op.py @@ -41,8 +41,9 @@ class TestTriangularSolveOp(OpTest): self.dtype = "float64" def set_output(self): - self.output = np.linalg.solve(np.triu(self.inputs['X']), - self.inputs['Y']) + self.output = np.linalg.solve( + np.triu(self.inputs['X']), self.inputs['Y'] + ) def setUp(self): self.op_type = "triangular_solve" @@ -51,7 +52,7 @@ class TestTriangularSolveOp(OpTest): self.inputs = { 'X': np.random.random(self.x_shape).astype(self.dtype), - 'Y': np.random.random(self.y_shape).astype(self.dtype) + 'Y': np.random.random(self.y_shape).astype(self.dtype), } self.attrs = { 'upper': self.upper, @@ -144,24 +145,26 @@ class TestTriangularSolveOp5(TestTriangularSolveOp): def set_output(self): x = np.triu(self.inputs['X']) - np.fill_diagonal(x, 1.) + np.fill_diagonal(x, 1.0) y = self.inputs['Y'] self.output = np.linalg.solve(x, y) def test_check_grad_normal(self): x = np.triu(self.inputs['X']) - np.fill_diagonal(x, 1.) + np.fill_diagonal(x, 1.0) grad_out = np.ones([10, 10]).astype('float64') grad_y = np.linalg.solve(x.transpose(1, 0), grad_out) grad_x = -np.matmul(grad_y, self.output.transpose(1, 0)) grad_x = np.triu(grad_x) - np.fill_diagonal(grad_x, 0.) + np.fill_diagonal(grad_x, 0.0) - self.check_grad(['X', 'Y'], - 'Out', - user_defined_grads=[grad_x, grad_y], - user_defined_grad_outputs=[grad_out]) + self.check_grad( + ['X', 'Y'], + 'Out', + user_defined_grads=[grad_x, grad_y], + user_defined_grad_outputs=[grad_out], + ) # 4D(broadcast) + 4D(broadcast) @@ -245,7 +248,6 @@ class TestTriangularSolveOp9(TestTriangularSolveOp): class TestTriangularSolveAPI(unittest.TestCase): - def setUp(self): np.random.seed(2021) self.place = [paddle.CPUPlace()] @@ -264,12 +266,11 @@ class TestTriangularSolveAPI(unittest.TestCase): z_np = np.linalg.solve(np.triu(x_np), y_np) exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={ - "x": x_np, - "y": y_np - }, - fetch_list=[z]) + fetches = exe.run( + fluid.default_main_program(), + feed={"x": x_np, "y": y_np}, + fetch_list=[z], + ) np.testing.assert_allclose(fetches[0], z_np, rtol=1e-05) def test_static(self): @@ -277,7 +278,6 @@ class TestTriangularSolveAPI(unittest.TestCase): self.check_static_result(place=place) def test_dygraph(self): - def run(place): paddle.disable_static(place) x_np = np.random.random([3, 3]).astype(self.dtype) @@ -297,14 +297,15 @@ class TestTriangularSolveAPI(unittest.TestCase): class TestTriangularSolveOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # The input type of solve_op must be Variable. - x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - fluid.CPUPlace()) - y1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace() + ) + y1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace() + ) self.assertRaises(TypeError, paddle.linalg.triangular_solve, x1, y1) # The data type of input must be float32 or float64. @@ -323,20 +324,23 @@ class TestTriangularSolveOpError(unittest.TestCase): # The number of dimensions of input'X must be >= 2. x5 = fluid.data(name="x5", shape=[30], dtype="float64") y5 = fluid.data(name="y5", shape=[30, 30], dtype="float64") - self.assertRaises(ValueError, paddle.linalg.triangular_solve, x5, - y5) + self.assertRaises( + ValueError, paddle.linalg.triangular_solve, x5, y5 + ) # The number of dimensions of input'Y must be >= 2. x6 = fluid.data(name="x6", shape=[30, 30], dtype="float64") y6 = fluid.data(name="y6", shape=[30], dtype="float64") - self.assertRaises(ValueError, paddle.linalg.triangular_solve, x6, - y6) + self.assertRaises( + ValueError, paddle.linalg.triangular_solve, x6, y6 + ) # The inner-most 2 dimensions of input'X should be equal to each other x7 = fluid.data(name="x7", shape=[2, 3, 4], dtype="float64") y7 = fluid.data(name="y7", shape=[2, 4, 3], dtype="float64") - self.assertRaises(ValueError, paddle.linalg.triangular_solve, x7, - y7) + self.assertRaises( + ValueError, paddle.linalg.triangular_solve, x7, y7 + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_tril_indices_op.py b/python/paddle/fluid/tests/unittests/test_tril_indices_op.py index fae0221240c199445fcb46017edbc56e68e38c21..0d18b32304ead736edd2dc6587db597c8568b2da 100644 --- a/python/paddle/fluid/tests/unittests/test_tril_indices_op.py +++ b/python/paddle/fluid/tests/unittests/test_tril_indices_op.py @@ -21,7 +21,6 @@ from paddle.fluid.framework import _test_eager_guard class TestTrilIndicesOp(OpTest): - def setUp(self): self.op_type = "tril_indices" self.inputs = {} @@ -34,13 +33,13 @@ class TestTrilIndicesOp(OpTest): def init_config(self): self.attrs = {'rows': 4, 'cols': 4, 'offset': -1} - self.target = np.tril_indices(self.attrs['rows'], self.attrs['offset'], - self.attrs['cols']) + self.target = np.tril_indices( + self.attrs['rows'], self.attrs['offset'], self.attrs['cols'] + ) self.target = np.array(self.target) class TestTrilIndicesOpCase1(TestTrilIndicesOp): - def init_config(self): self.attrs = {'rows': 0, 'cols': 0, 'offset': 0} self.target = np.tril_indices(0, 0, 0) @@ -48,37 +47,40 @@ class TestTrilIndicesOpCase1(TestTrilIndicesOp): class TestTrilIndicesOpCase2(TestTrilIndicesOp): - def init_config(self): self.attrs = {'rows': 4, 'cols': 4, 'offset': 2} - self.target = np.tril_indices(self.attrs['rows'], self.attrs['offset'], - self.attrs['cols']) + self.target = np.tril_indices( + self.attrs['rows'], self.attrs['offset'], self.attrs['cols'] + ) self.target = np.array(self.target) class TestTrilIndicesAPICaseStatic(unittest.TestCase): - def test_static(self): - places = [ - paddle.CPUPlace(), paddle.fluid.CUDAPlace(0) - ] if fluid.core.is_compiled_with_cuda() else [paddle.CPUPlace()] + places = ( + [paddle.CPUPlace(), paddle.fluid.CUDAPlace(0)] + if fluid.core.is_compiled_with_cuda() + else [paddle.CPUPlace()] + ) paddle.enable_static() for place in places: - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data1 = paddle.tril_indices(4, 4, -1) exe1 = paddle.static.Executor(place) - result1, = exe1.run(feed={}, fetch_list=[data1]) + (result1,) = exe1.run(feed={}, fetch_list=[data1]) expected_result1 = np.tril_indices(4, -1, 4) np.testing.assert_allclose(result1, expected_result1, rtol=1e-05) class TestTrilIndicesAPICaseDygraph(unittest.TestCase): - def test_dygraph(self): - places = [ - paddle.CPUPlace(), paddle.fluid.CUDAPlace(0) - ] if fluid.core.is_compiled_with_cuda() else [paddle.CPUPlace()] + places = ( + [paddle.CPUPlace(), paddle.fluid.CUDAPlace(0)] + if fluid.core.is_compiled_with_cuda() + else [paddle.CPUPlace()] + ) for place in places: with fluid.dygraph.base.guard(place=place): out1 = paddle.tril_indices(4, 4, 2) @@ -91,9 +93,7 @@ class TestTrilIndicesAPICaseDygraph(unittest.TestCase): class TestTrilIndicesAPICaseError(unittest.TestCase): - def test_case_error(self): - def test_num_rows_type_check(): out1 = paddle.tril_indices(1.0, 1, 2) @@ -111,14 +111,14 @@ class TestTrilIndicesAPICaseError(unittest.TestCase): class TestTrilIndicesAPICaseDefault(unittest.TestCase): - def test_default_CPU(self): paddle.enable_static() - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data = paddle.tril_indices(4, None, 2) exe = paddle.static.Executor(paddle.CPUPlace()) - result, = exe.run(feed={}, fetch_list=[data]) + (result,) = exe.run(feed={}, fetch_list=[data]) expected_result = np.tril_indices(4, 2) np.testing.assert_allclose(result, expected_result, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_tril_triu_op.py b/python/paddle/fluid/tests/unittests/test_tril_triu_op.py index 07cb0aca214f0717dc115b5770417d05215c2c49..a17a1f24928ed7f603c5a3fe16ac5f5c82f56242 100644 --- a/python/paddle/fluid/tests/unittests/test_tril_triu_op.py +++ b/python/paddle/fluid/tests/unittests/test_tril_triu_op.py @@ -21,12 +21,13 @@ from paddle.fluid.framework import Program, program_guard class TrilTriuOpDefaultTest(OpTest): - """ the base class of other op testcases - """ + """the base class of other op testcases""" def setUp(self): self.initTestCase() - self.python_api = paddle.tril if self.real_op_type == 'tril' else paddle.triu + self.python_api = ( + paddle.tril if self.real_op_type == 'tril' else paddle.triu + ) self.real_np_op = getattr(np, self.real_op_type) self.op_type = "tril_triu" @@ -36,9 +37,9 @@ class TrilTriuOpDefaultTest(OpTest): 'lower': True if self.real_op_type == 'tril' else False, } self.outputs = { - 'Out': - self.real_np_op(self.X, self.diagonal) - if self.diagonal else self.real_np_op(self.X) + 'Out': self.real_np_op(self.X, self.diagonal) + if self.diagonal + else self.real_np_op(self.X) } def test_check_output(self): @@ -59,27 +60,29 @@ def case_generator(op_type, Xshape, diagonal, expected): If arg`expercted` is 'success', it will register an Optest case and expect to pass. Otherwise, it will register an API case and check the expect failure. """ - cls_name = "{0}_{1}_shape_{2}_diag_{3}".format(expected, op_type, Xshape, - diagonal) + cls_name = "{0}_{1}_shape_{2}_diag_{3}".format( + expected, op_type, Xshape, diagonal + ) errmsg = { - "diagonal: TypeError": - "diagonal in {} must be a python Int".format(op_type), - "input: ValueError": - "x shape in {} must be at least 2-D".format(op_type), + "diagonal: TypeError": "diagonal in {} must be a python Int".format( + op_type + ), + "input: ValueError": "x shape in {} must be at least 2-D".format( + op_type + ), } class FailureCase(unittest.TestCase): - def test_failure(self): paddle.enable_static() data = fluid.data(shape=Xshape, dtype='float64', name=cls_name) - with self.assertRaisesRegexp(eval(expected.split(':')[-1]), - errmsg[expected]): + with self.assertRaisesRegexp( + eval(expected.split(':')[-1]), errmsg[expected] + ): getattr(tensor, op_type)(x=data, diagonal=diagonal) class SuccessCase(TrilTriuOpDefaultTest): - def initTestCase(self): paddle.enable_static() @@ -104,15 +107,13 @@ cases = { (20, 20): [ '2020', [20], - { - 20: 20 - }, + {20: 20}, (20, 20), 20.20, ], # str, list, dict, tuple, float }, 'input: ValueError': { - (2020, ): [None], + (2020,): [None], }, } for _op_type in ['tril', 'triu']: @@ -121,12 +122,15 @@ for _op_type in ['tril', 'triu']: list( map( lambda _diagonal: case_generator( - _op_type, _Xshape, _diagonal, _expected), _diaglist)) + _op_type, _Xshape, _diagonal, _expected + ), + _diaglist, + ) + ) class TestTrilTriuOpAPI(unittest.TestCase): - """ test case by using API and has -1 dimension - """ + """test case by using API and has -1 dimension""" def test_api(self): paddle.enable_static() @@ -140,8 +144,11 @@ class TestTrilTriuOpAPI(unittest.TestCase): x = fluid.data(shape=[1, 9, -1, 4], dtype=dtype, name='x') tril_out, triu_out = tensor.tril(x), tensor.triu(x) - place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda( - ) else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) tril_out, triu_out = exe.run( fluid.default_main_program(), @@ -159,8 +166,10 @@ class TestTrilTriuOpAPI(unittest.TestCase): with fluid.dygraph.guard(): data = np.random.random([1, 9, 9, 4]).astype(dtype) x = fluid.dygraph.to_variable(data) - tril_out, triu_out = tensor.tril(x).numpy(), tensor.triu( - x).numpy() + tril_out, triu_out = ( + tensor.tril(x).numpy(), + tensor.triu(x).numpy(), + ) np.testing.assert_allclose(tril_out, np.tril(data), rtol=1e-05) np.testing.assert_allclose(triu_out, np.triu(data), rtol=1e-05) @@ -176,12 +185,17 @@ class TestTrilTriuOpAPI(unittest.TestCase): x = fluid.data(shape=[1, 9, -1, 4], dtype=dtype, name='x') triu_out = fluid.layers.triu(x) - place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda( - ) else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if fluid.core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) - triu_out = exe.run(fluid.default_main_program(), - feed={"x": data}, - fetch_list=[triu_out]) + triu_out = exe.run( + fluid.default_main_program(), + feed={"x": data}, + fetch_list=[triu_out], + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_trilinear_interp_op.py b/python/paddle/fluid/tests/unittests/test_trilinear_interp_op.py index 2548c0aa2db7d51497e3cd3034abd4f99a848400..901d25846613fcf7e923244c7ad147f821d5816a 100755 --- a/python/paddle/fluid/tests/unittests/test_trilinear_interp_op.py +++ b/python/paddle/fluid/tests/unittests/test_trilinear_interp_op.py @@ -20,15 +20,17 @@ import paddle.fluid as fluid from paddle.nn.functional import interpolate -def trilinear_interp_np(input, - out_d, - out_h, - out_w, - out_size=None, - actual_shape=None, - align_corners=True, - align_mode=0, - data_layout='NCDHW'): +def trilinear_interp_np( + input, + out_d, + out_h, + out_w, + out_size=None, + actual_shape=None, + align_corners=True, + align_mode=0, + data_layout='NCDHW', +): """trilinear interpolation implement in shape [N, C, D, H, W]""" if data_layout == "NDHWC": input = np.transpose(input, (0, 4, 1, 2, 3)) # NDHWC => NCDHW @@ -44,17 +46,17 @@ def trilinear_interp_np(input, ratio_d = ratio_h = ratio_w = 0.0 if out_d > 1: - if (align_corners): + if align_corners: ratio_d = (in_d - 1.0) / (out_d - 1.0) else: ratio_d = 1.0 * in_d / out_d if out_h > 1: - if (align_corners): + if align_corners: ratio_h = (in_h - 1.0) / (out_h - 1.0) else: ratio_h = 1.0 * in_h / out_h if out_w > 1: - if (align_corners): + if align_corners: ratio_w = (in_w - 1.0) / (out_w - 1.0) else: ratio_w = 1.0 * in_w / out_w @@ -62,14 +64,14 @@ def trilinear_interp_np(input, out = np.zeros((batch_size, channel, out_d, out_h, out_w)) for i in range(out_d): - if (align_mode == 0 and not align_corners): + if align_mode == 0 and not align_corners: d = int(ratio_d * (i + 0.5) - 0.5) else: d = int(ratio_d * i) d = max(0, d) did = 1 if d < in_d - 1 else 0 - if (align_mode == 0 and not align_corners): + if align_mode == 0 and not align_corners: idx_src_d = max(ratio_d * (i + 0.5) - 0.5, 0) d1lambda = idx_src_d - d else: @@ -77,14 +79,14 @@ def trilinear_interp_np(input, d2lambda = 1.0 - d1lambda for j in range(out_h): - if (align_mode == 0 and not align_corners): + if align_mode == 0 and not align_corners: h = int(ratio_h * (j + 0.5) - 0.5) else: h = int(ratio_h * j) h = max(0, h) hid = 1 if h < in_h - 1 else 0 - if (align_mode == 0 and not align_corners): + if align_mode == 0 and not align_corners: idx_src_h = max(ratio_h * (j + 0.5) - 0.5, 0) h1lambda = idx_src_h - h else: @@ -92,30 +94,42 @@ def trilinear_interp_np(input, h2lambda = 1.0 - h1lambda for k in range(out_w): - if (align_mode == 0 and not align_corners): + if align_mode == 0 and not align_corners: w = int(ratio_w * (k + 0.5) - 0.5) else: w = int(ratio_w * k) w = max(0, w) wid = 1 if w < in_w - 1 else 0 - if (align_mode == 0 and not align_corners): + if align_mode == 0 and not align_corners: idx_src_w = max(ratio_w * (k + 0.5) - 0.5, 0) w1lambda = idx_src_w - w else: w1lambda = ratio_w * k - w w2lambda = 1.0 - w1lambda - out[:, :, i, j, k] = \ - d2lambda * \ - (h2lambda * (w2lambda * input[:, :, d, h, w] + \ - w1lambda * input[:, :, d, h, w+wid]) + \ - h1lambda * (w2lambda * input[:, :, d, h+hid, w] + \ - w1lambda * input[:, :, d, h+hid, w+wid])) + \ - d1lambda * \ - (h2lambda * (w2lambda * input[:, :, d+did, h, w] + \ - w1lambda * input[:, :, d+did, h, w+wid]) + \ - h1lambda * (w2lambda * input[:, :, d+did, h+hid, w] + \ - w1lambda * input[:, :, d+did, h+hid, w+wid])) + out[:, :, i, j, k] = d2lambda * ( + h2lambda + * ( + w2lambda * input[:, :, d, h, w] + + w1lambda * input[:, :, d, h, w + wid] + ) + + h1lambda + * ( + w2lambda * input[:, :, d, h + hid, w] + + w1lambda * input[:, :, d, h + hid, w + wid] + ) + ) + d1lambda * ( + h2lambda + * ( + w2lambda * input[:, :, d + did, h, w] + + w1lambda * input[:, :, d + did, h, w + wid] + ) + + h1lambda + * ( + w2lambda * input[:, :, d + did, h + hid, w] + + w1lambda * input[:, :, d + did, h + hid, w + wid] + ) + ) if data_layout == "NDHWC": out = np.transpose(out, (0, 2, 3, 4, 1)) # NCDHW => NDHWC @@ -123,7 +137,6 @@ def trilinear_interp_np(input, class TestTrilinearInterpOp(OpTest): - def setUp(self): self.out_size = None self.actual_shape = None @@ -153,10 +166,17 @@ class TestTrilinearInterpOp(OpTest): out_h = self.out_h out_w = self.out_w - output_np = trilinear_interp_np(input_np, out_d, out_h, out_w, - self.out_size, self.actual_shape, - self.align_corners, self.align_mode, - self.data_layout) + output_np = trilinear_interp_np( + input_np, + out_d, + out_h, + out_w, + self.out_size, + self.actual_shape, + self.align_corners, + self.align_mode, + self.data_layout, + ) self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size @@ -177,7 +197,7 @@ class TestTrilinearInterpOp(OpTest): 'interp_method': self.interp_method, 'align_corners': self.align_corners, 'align_mode': self.align_mode, - 'data_layout': data_layout + 'data_layout': data_layout, } self.outputs = {'Out': output_np} @@ -185,10 +205,9 @@ class TestTrilinearInterpOp(OpTest): self.check_output(check_eager=self.check_eager) def test_check_grad(self): - self.check_grad(['X'], - 'Out', - in_place=True, - check_eager=self.check_eager) + self.check_grad( + ['X'], 'Out', in_place=True, check_eager=self.check_eager + ) def init_test_case(self): self.interp_method = 'trilinear' @@ -196,142 +215,132 @@ class TestTrilinearInterpOp(OpTest): self.out_d = 2 self.out_h = 2 self.out_w = 2 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([3, 3, 3]).astype("int32") self.align_corners = True self.align_mode = 1 class TestTrilinearInterpCase1(TestTrilinearInterpOp): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [2, 1, 7, 8, 9] self.out_d = 1 self.out_h = 1 self.out_w = 1 - self.scale = 0. + self.scale = 0.0 self.align_corners = True self.align_mode = 1 class TestTrilinearInterpCase2(TestTrilinearInterpOp): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [2, 3, 9, 6, 8] self.out_d = 12 self.out_h = 12 self.out_w = 12 - self.scale = 0. + self.scale = 0.0 self.align_corners = True self.align_mode = 1 class TestTrilinearInterpCase3(TestTrilinearInterpOp): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [3, 2, 16, 8, 4] self.out_d = 32 self.out_h = 16 self.out_w = 8 - self.scale = 0. + self.scale = 0.0 self.align_corners = True self.align_mode = 1 class TestTrilinearInterpCase4(TestTrilinearInterpOp): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [4, 1, 7, 8, 9] self.out_d = 1 self.out_h = 1 self.out_w = 1 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([2, 2, 2]).astype("int32") self.align_corners = True self.align_mode = 1 class TestTrilinearInterpCase5(TestTrilinearInterpOp): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [3, 3, 9, 6, 8] self.out_d = 12 self.out_h = 12 self.out_w = 12 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([11, 11, 11]).astype("int32") self.align_corners = True self.align_mode = 1 class TestTrilinearInterpCase6(TestTrilinearInterpOp): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [1, 1, 16, 8, 4] self.out_d = 8 self.out_h = 32 self.out_w = 16 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([17, 9, 5]).astype("int32") self.align_corners = True self.align_mode = 1 class TestTrilinearInterpSame(TestTrilinearInterpOp): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [1, 1, 16, 8, 4] self.out_d = 16 self.out_h = 8 self.out_w = 4 - self.scale = 0. + self.scale = 0.0 self.align_corners = True self.align_mode = 1 class TestTrilinearInterpSameHW(TestTrilinearInterpOp): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [1, 1, 16, 8, 4] self.out_d = 8 self.out_h = 8 self.out_w = 4 - self.scale = 0. + self.scale = 0.0 self.align_corners = True self.align_mode = 1 class TestTrilinearInterpActualShape(TestTrilinearInterpOp): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [3, 2, 16, 8, 4] self.out_d = 64 self.out_h = 32 self.out_w = 16 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([33, 19, 7]).astype("int32") self.align_corners = True self.align_mode = 1 class TestTrilinearInterpDatalayout(TestTrilinearInterpOp): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [2, 4, 4, 4, 3] self.out_d = 2 self.out_h = 2 self.out_w = 2 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([3, 3, 3]).astype("int32") self.align_corners = True self.align_mode = 1 @@ -339,15 +348,15 @@ class TestTrilinearInterpDatalayout(TestTrilinearInterpOp): class TestTrilinearInterpOpUint8(OpTest): - def setUp(self): self.out_size = None self.actual_shape = None self.init_test_case() self.op_type = "trilinear_interp" self.check_eager = True - input_np = np.random.randint(low=0, high=256, - size=self.input_shape).astype("uint8") + input_np = np.random.randint( + low=0, high=256, size=self.input_shape + ).astype("uint8") if self.scale > 0: out_d = int(self.input_shape[2] * self.scale) @@ -358,9 +367,16 @@ class TestTrilinearInterpOpUint8(OpTest): out_h = self.out_h out_w = self.out_w - output_np = trilinear_interp_np(input_np, out_d, out_h, out_w, - self.out_size, self.actual_shape, - self.align_corners, self.align_mode) + output_np = trilinear_interp_np( + input_np, + out_d, + out_h, + out_w, + self.out_size, + self.actual_shape, + self.align_corners, + self.align_mode, + ) self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size @@ -373,14 +389,14 @@ class TestTrilinearInterpOpUint8(OpTest): 'scale': self.scale, 'interp_method': self.interp_method, 'align_corners': self.align_corners, - 'align_mode': self.align_mode + 'align_mode': self.align_mode, } self.outputs = {'Out': output_np} def test_check_output(self): - self.check_output_with_place(place=core.CPUPlace(), - atol=1, - check_eager=self.check_eager) + self.check_output_with_place( + place=core.CPUPlace(), atol=1, check_eager=self.check_eager + ) def init_test_case(self): self.interp_method = 'trilinear' @@ -388,87 +404,79 @@ class TestTrilinearInterpOpUint8(OpTest): self.out_d = 13 self.out_h = 10 self.out_w = 9 - self.scale = 0. + self.scale = 0.0 self.align_corners = True self.align_mode = 1 class TestTrilinearInterpCase1Uint8(TestTrilinearInterpOpUint8): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [2, 3, 16, 8, 4] self.out_d = 13 self.out_h = 7 self.out_w = 2 - self.scale = 0. + self.scale = 0.0 self.align_corners = True self.align_mode = 1 class TestTrilinearInterpCase2Uint8(TestTrilinearInterpOpUint8): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [4, 1, 7, 8, 9] self.out_d = 3 self.out_h = 5 self.out_w = 13 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([6, 15, 21]).astype("int32") self.align_corners = True self.align_mode = 1 class TestTrilinearInterpOtherMethod1(TestTrilinearInterpOp): - def set_align_mode(self): self.align_corners = False self.align_mode = 1 class TestTrilinearInterpWithMethod2(TestTrilinearInterpOp): - def set_align_mode(self): self.align_corners = False self.align_mode = 0 class TestTrilinearInterpWithMethod3(TestTrilinearInterpOp): - def set_align_mode(self): self.align_corners = True self.align_mode = 0 class TestTrilinearInterpScale1(TestTrilinearInterpOp): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [2, 3, 5, 7, 9] self.out_d = 82 self.out_h = 60 self.out_w = 25 - self.scale = 2. + self.scale = 2.0 self.align_corners = True self.align_mode = 1 class TestTrilinearInterpScale2(TestTrilinearInterpOp): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [2, 3, 5, 7, 9] self.out_d = 60 self.out_h = 40 self.out_w = 25 - self.scale = 1. + self.scale = 1.0 self.align_corners = True self.align_mode = 1 class TestTrilinearInterpScale3(TestTrilinearInterpOp): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [2, 3, 5, 7, 9] @@ -481,7 +489,6 @@ class TestTrilinearInterpScale3(TestTrilinearInterpOp): class TestTrilinearInterpZero(TestTrilinearInterpOp): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [2, 3, 5, 7, 11] @@ -494,7 +501,6 @@ class TestTrilinearInterpZero(TestTrilinearInterpOp): class TestTrilinearInterpOp_attr_tensor(OpTest): - def setUp(self): self.out_size = None self.actual_shape = None @@ -506,7 +512,7 @@ class TestTrilinearInterpOp_attr_tensor(OpTest): self.attrs = { 'interp_method': self.interp_method, 'align_corners': self.align_corners, - 'align_mode': self.align_mode + 'align_mode': self.align_mode, } input_np = np.random.random(self.input_shape).astype("float32") @@ -530,27 +536,34 @@ class TestTrilinearInterpOp_attr_tensor(OpTest): elif self.out_size is not None: size_tensor = [] for index, ele in enumerate(self.out_size): - size_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + size_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs['SizeTensor'] = size_tensor self.check_eager = False self.attrs['out_d'] = self.out_d self.attrs['out_h'] = self.out_h self.attrs['out_w'] = self.out_w - output_np = trilinear_interp_np(input_np, out_d, out_h, out_w, - self.out_size, self.actual_shape, - self.align_corners, self.align_mode) + output_np = trilinear_interp_np( + input_np, + out_d, + out_h, + out_w, + self.out_size, + self.actual_shape, + self.align_corners, + self.align_mode, + ) self.outputs = {'Out': output_np} def test_check_output(self): self.check_output(check_eager=self.check_eager) def test_check_grad(self): - self.check_grad(['X'], - 'Out', - in_place=True, - check_eager=self.check_eager) + self.check_grad( + ['X'], 'Out', in_place=True, check_eager=self.check_eager + ) def init_test_case(self): self.interp_method = 'trilinear' @@ -558,7 +571,7 @@ class TestTrilinearInterpOp_attr_tensor(OpTest): self.out_d = 2 self.out_h = 3 self.out_w = 3 - self.scale = 0. + self.scale = 0.0 self.out_size = [2, 3, 3] self.align_corners = True self.align_mode = 1 @@ -566,7 +579,6 @@ class TestTrilinearInterpOp_attr_tensor(OpTest): # out_size is a 1-D tensor class TestTrilinearInterp_attr_tensor_Case1(TestTrilinearInterpOp_attr_tensor): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [3, 2, 9, 6, 8] @@ -581,14 +593,13 @@ class TestTrilinearInterp_attr_tensor_Case1(TestTrilinearInterpOp_attr_tensor): # scale is a 1-D tensor class TestTrilinearInterp_attr_tensor_Case2(TestTrilinearInterpOp_attr_tensor): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [2, 3, 8, 8, 4] self.out_d = 16 self.out_h = 12 self.out_w = 4 - self.scale = 0. + self.scale = 0.0 self.out_size = [16, 4, 10] self.align_corners = True self.align_mode = 1 @@ -597,7 +608,6 @@ class TestTrilinearInterp_attr_tensor_Case2(TestTrilinearInterpOp_attr_tensor): # scale is a 1-D tensor class TestTrilinearInterp_attr_tensor_Case3(TestTrilinearInterpOp_attr_tensor): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [2, 3, 8, 8, 4] @@ -612,7 +622,6 @@ class TestTrilinearInterp_attr_tensor_Case3(TestTrilinearInterpOp_attr_tensor): class TestTrilinearInterpAPI(unittest.TestCase): - def test_case(self): x = fluid.data(name="x", shape=[2, 3, 6, 9, 4], dtype="float32") y = fluid.data(name="y", shape=[2, 6, 9, 4, 3], dtype="float32") @@ -620,31 +629,28 @@ class TestTrilinearInterpAPI(unittest.TestCase): dim = fluid.data(name="dim", shape=[1], dtype="int32") shape_tensor = fluid.data(name="shape_tensor", shape=[3], dtype="int32") actual_size = fluid.data(name="actual_size", shape=[3], dtype="int32") - scale_tensor = fluid.data(name="scale_tensor", - shape=[1], - dtype="float32") + scale_tensor = fluid.data( + name="scale_tensor", shape=[1], dtype="float32" + ) - out1 = fluid.layers.resize_trilinear(y, - out_shape=[12, 18, 8], - data_format='NDHWC') + out1 = fluid.layers.resize_trilinear( + y, out_shape=[12, 18, 8], data_format='NDHWC' + ) out2 = fluid.layers.resize_trilinear(x, out_shape=[12, dim, 8]) out3 = fluid.layers.resize_trilinear(x, out_shape=shape_tensor) - out4 = fluid.layers.resize_trilinear(x, - out_shape=[4, 4, 8], - actual_shape=actual_size) + out4 = fluid.layers.resize_trilinear( + x, out_shape=[4, 4, 8], actual_shape=actual_size + ) out5 = fluid.layers.resize_trilinear(x, scale=scale_tensor) - out6 = interpolate(x, - scale_factor=scale_tensor, - mode='trilinear', - data_format="NCDHW") - out7 = interpolate(x, - size=[4, 4, 8], - mode='trilinear', - data_format="NCDHW") - out8 = interpolate(x, - size=shape_tensor, - mode='trilinear', - data_format="NCDHW") + out6 = interpolate( + x, scale_factor=scale_tensor, mode='trilinear', data_format="NCDHW" + ) + out7 = interpolate( + x, size=[4, 4, 8], mode='trilinear', data_format="NCDHW" + ) + out8 = interpolate( + x, size=shape_tensor, mode='trilinear', data_format="NCDHW" + ) x_data = np.random.random((2, 3, 6, 9, 4)).astype("float32") dim_data = np.array([18]).astype("int32") @@ -658,40 +664,39 @@ class TestTrilinearInterpAPI(unittest.TestCase): place = core.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - results = exe.run(fluid.default_main_program(), - feed={ - "x": x_data, - "y": np.transpose(x_data, (0, 2, 3, 4, 1)), - "dim": dim_data, - "shape_tensor": shape_data, - "actual_size": actual_size_data, - "scale_tensor": scale_data - }, - fetch_list=[out1, out2, out3, out4, out5], - return_numpy=True) - - expect_res = trilinear_interp_np(x_data, - out_d=12, - out_h=18, - out_w=8, - align_mode=1) - np.testing.assert_allclose(results[0], - np.transpose(expect_res, (0, 2, 3, 4, 1)), - rtol=1e-05) + results = exe.run( + fluid.default_main_program(), + feed={ + "x": x_data, + "y": np.transpose(x_data, (0, 2, 3, 4, 1)), + "dim": dim_data, + "shape_tensor": shape_data, + "actual_size": actual_size_data, + "scale_tensor": scale_data, + }, + fetch_list=[out1, out2, out3, out4, out5], + return_numpy=True, + ) + + expect_res = trilinear_interp_np( + x_data, out_d=12, out_h=18, out_w=8, align_mode=1 + ) + np.testing.assert_allclose( + results[0], np.transpose(expect_res, (0, 2, 3, 4, 1)), rtol=1e-05 + ) for i in range(len(results) - 1): np.testing.assert_allclose(results[i + 1], expect_res, rtol=1e-05) class TestTrilinearInterpOpException(unittest.TestCase): - def test_exception(self): input = fluid.data(name="input", shape=[2, 3, 6, 9, 4], dtype="float32") def attr_data_format(): # for 5-D input, data_format only can be NCDHW or NDHWC - out = fluid.layers.resize_trilinear(input, - out_shape=[4, 8, 4], - data_format='NHWC') + out = fluid.layers.resize_trilinear( + input, out_shape=[4, 8, 4], data_format='NHWC' + ) self.assertRaises(ValueError, attr_data_format) diff --git a/python/paddle/fluid/tests/unittests/test_trilinear_interp_v2_op.py b/python/paddle/fluid/tests/unittests/test_trilinear_interp_v2_op.py index 6b5391aa195dfa6d6d0d7f4dcf8e6319706bf094..67ab8805dc1902bd838410fba37aaffbaf052940 100755 --- a/python/paddle/fluid/tests/unittests/test_trilinear_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_trilinear_interp_v2_op.py @@ -24,18 +24,20 @@ import paddle np.random.seed(123) -def trilinear_interp_test(x, - OutSize=None, - SizeTensor=None, - Scale=None, - data_layout='NCHW', - out_d=-1, - out_h=-1, - out_w=-1, - scale=[], - interp_method='trilinear', - align_corners=True, - align_mode=0): +def trilinear_interp_test( + x, + OutSize=None, + SizeTensor=None, + Scale=None, + data_layout='NCHW', + out_d=-1, + out_h=-1, + out_w=-1, + scale=[], + interp_method='trilinear', + align_corners=True, + align_mode=0, +): if isinstance(scale, float) or isinstance(scale, int): scale_list = [] for _ in range(len(x.shape) - 2): @@ -45,26 +47,39 @@ def trilinear_interp_test(x, scale = list(map(float, scale)) if SizeTensor is not None: if not isinstance(SizeTensor, list) and not isinstance( - SizeTensor, tuple): + SizeTensor, tuple + ): SizeTensor = [SizeTensor] - return paddle._C_ops.trilinear_interp(x, OutSize, SizeTensor, Scale, - data_layout, out_d, out_h, out_w, - scale, interp_method, align_corners, - align_mode) - - -def trilinear_interp_np(input, - out_d, - out_h, - out_w, - scale_d=0, - scale_h=0, - scale_w=0, - out_size=None, - actual_shape=None, - align_corners=True, - align_mode=0, - data_layout='NCDHW'): + return paddle._C_ops.trilinear_interp( + x, + OutSize, + SizeTensor, + Scale, + data_layout, + out_d, + out_h, + out_w, + scale, + interp_method, + align_corners, + align_mode, + ) + + +def trilinear_interp_np( + input, + out_d, + out_h, + out_w, + scale_d=0, + scale_h=0, + scale_w=0, + out_size=None, + actual_shape=None, + align_corners=True, + align_mode=0, + data_layout='NCDHW', +): """trilinear interpolation implement in shape [N, C, D, H, W]""" if data_layout == "NDHWC": input = np.transpose(input, (0, 4, 1, 2, 3)) # NDHWC => NCDHW @@ -80,7 +95,7 @@ def trilinear_interp_np(input, ratio_d = ratio_h = ratio_w = 0.0 if out_d > 1: - if (align_corners): + if align_corners: ratio_d = (in_d - 1.0) / (out_d - 1.0) else: if scale_d > 0: @@ -88,7 +103,7 @@ def trilinear_interp_np(input, else: ratio_d = 1.0 * in_d / out_d if out_h > 1: - if (align_corners): + if align_corners: ratio_h = (in_h - 1.0) / (out_h - 1.0) else: if scale_h > 0: @@ -96,7 +111,7 @@ def trilinear_interp_np(input, else: ratio_h = 1.0 * in_h / out_h if out_w > 1: - if (align_corners): + if align_corners: ratio_w = (in_w - 1.0) / (out_w - 1.0) else: if scale_w > 0: @@ -107,14 +122,14 @@ def trilinear_interp_np(input, out = np.zeros((batch_size, channel, out_d, out_h, out_w)) for i in range(out_d): - if (align_mode == 0 and not align_corners): + if align_mode == 0 and not align_corners: d = int(ratio_d * (i + 0.5) - 0.5) else: d = int(ratio_d * i) d = max(0, d) did = 1 if d < in_d - 1 else 0 - if (align_mode == 0 and not align_corners): + if align_mode == 0 and not align_corners: idx_src_d = max(ratio_d * (i + 0.5) - 0.5, 0) d1lambda = idx_src_d - d else: @@ -122,14 +137,14 @@ def trilinear_interp_np(input, d2lambda = 1.0 - d1lambda for j in range(out_h): - if (align_mode == 0 and not align_corners): + if align_mode == 0 and not align_corners: h = int(ratio_h * (j + 0.5) - 0.5) else: h = int(ratio_h * j) h = max(0, h) hid = 1 if h < in_h - 1 else 0 - if (align_mode == 0 and not align_corners): + if align_mode == 0 and not align_corners: idx_src_h = max(ratio_h * (j + 0.5) - 0.5, 0) h1lambda = idx_src_h - h else: @@ -137,30 +152,42 @@ def trilinear_interp_np(input, h2lambda = 1.0 - h1lambda for k in range(out_w): - if (align_mode == 0 and not align_corners): + if align_mode == 0 and not align_corners: w = int(ratio_w * (k + 0.5) - 0.5) else: w = int(ratio_w * k) w = max(0, w) wid = 1 if w < in_w - 1 else 0 - if (align_mode == 0 and not align_corners): + if align_mode == 0 and not align_corners: idx_src_w = max(ratio_w * (k + 0.5) - 0.5, 0) w1lambda = idx_src_w - w else: w1lambda = ratio_w * k - w w2lambda = 1.0 - w1lambda - out[:, :, i, j, k] = \ - d2lambda * \ - (h2lambda * (w2lambda * input[:, :, d, h, w] + - w1lambda * input[:, :, d, h, w+wid]) + - h1lambda * (w2lambda * input[:, :, d, h+hid, w] + - w1lambda * input[:, :, d, h+hid, w+wid])) + \ - d1lambda * \ - (h2lambda * (w2lambda * input[:, :, d+did, h, w] + - w1lambda * input[:, :, d+did, h, w+wid]) + - h1lambda * (w2lambda * input[:, :, d+did, h+hid, w] + - w1lambda * input[:, :, d+did, h+hid, w+wid])) + out[:, :, i, j, k] = d2lambda * ( + h2lambda + * ( + w2lambda * input[:, :, d, h, w] + + w1lambda * input[:, :, d, h, w + wid] + ) + + h1lambda + * ( + w2lambda * input[:, :, d, h + hid, w] + + w1lambda * input[:, :, d, h + hid, w + wid] + ) + ) + d1lambda * ( + h2lambda + * ( + w2lambda * input[:, :, d + did, h, w] + + w1lambda * input[:, :, d + did, h, w + wid] + ) + + h1lambda + * ( + w2lambda * input[:, :, d + did, h + hid, w] + + w1lambda * input[:, :, d + did, h + hid, w + wid] + ) + ) if data_layout == "NDHWC": out = np.transpose(out, (0, 2, 3, 4, 1)) # NCDHW => NDHWC @@ -168,7 +195,6 @@ def trilinear_interp_np(input, class TestTrilinearInterpOp(OpTest): - def setUp(self): self.python_api = trilinear_interp_test self.out_size = None @@ -211,10 +237,20 @@ class TestTrilinearInterpOp(OpTest): out_h = self.out_h out_w = self.out_w - output_np = trilinear_interp_np(input_np, out_d, out_h, out_w, scale_d, - scale_h, scale_w, self.out_size, - self.actual_shape, self.align_corners, - self.align_mode, self.data_layout) + output_np = trilinear_interp_np( + input_np, + out_d, + out_h, + out_w, + scale_d, + scale_h, + scale_w, + self.out_size, + self.actual_shape, + self.align_corners, + self.align_mode, + self.data_layout, + ) self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size @@ -234,7 +270,7 @@ class TestTrilinearInterpOp(OpTest): 'interp_method': self.interp_method, 'align_corners': self.align_corners, 'align_mode': self.align_mode, - 'data_layout': data_layout + 'data_layout': data_layout, } if self.scale: if isinstance(self.scale, float) or isinstance(self.scale, int): @@ -249,10 +285,9 @@ class TestTrilinearInterpOp(OpTest): self.check_output(check_eager=self.check_eager) def test_check_grad(self): - self.check_grad(['X'], - 'Out', - in_place=True, - check_eager=self.check_eager) + self.check_grad( + ['X'], 'Out', in_place=True, check_eager=self.check_eager + ) def init_test_case(self): self.interp_method = 'trilinear' @@ -267,7 +302,6 @@ class TestTrilinearInterpOp(OpTest): class TestTrilinearInterpCase1(TestTrilinearInterpOp): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [2, 1, 7, 8, 9] @@ -280,7 +314,6 @@ class TestTrilinearInterpCase1(TestTrilinearInterpOp): class TestTrilinearInterpCase2(TestTrilinearInterpOp): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [2, 3, 9, 6, 8] @@ -293,7 +326,6 @@ class TestTrilinearInterpCase2(TestTrilinearInterpOp): class TestTrilinearInterpCase3(TestTrilinearInterpOp): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [3, 2, 16, 8, 4] @@ -306,7 +338,6 @@ class TestTrilinearInterpCase3(TestTrilinearInterpOp): class TestTrilinearInterpCase4(TestTrilinearInterpOp): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [4, 1, 7, 8, 9] @@ -320,7 +351,6 @@ class TestTrilinearInterpCase4(TestTrilinearInterpOp): class TestTrilinearInterpCase5(TestTrilinearInterpOp): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [3, 3, 9, 6, 8] @@ -334,7 +364,6 @@ class TestTrilinearInterpCase5(TestTrilinearInterpOp): class TestTrilinearInterpCase6(TestTrilinearInterpOp): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [1, 1, 16, 8, 4] @@ -348,7 +377,6 @@ class TestTrilinearInterpCase6(TestTrilinearInterpOp): class TestTrilinearInterpSame(TestTrilinearInterpOp): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [1, 1, 16, 8, 4] @@ -361,7 +389,6 @@ class TestTrilinearInterpSame(TestTrilinearInterpOp): class TestTrilinearInterpSameHW(TestTrilinearInterpOp): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [1, 1, 16, 8, 4] @@ -374,7 +401,6 @@ class TestTrilinearInterpSameHW(TestTrilinearInterpOp): class TestTrilinearInterpActualShape(TestTrilinearInterpOp): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [3, 2, 16, 8, 4] @@ -388,7 +414,6 @@ class TestTrilinearInterpActualShape(TestTrilinearInterpOp): class TestTrilinearInterpDatalayout(TestTrilinearInterpOp): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [2, 4, 4, 4, 3] @@ -403,7 +428,6 @@ class TestTrilinearInterpDatalayout(TestTrilinearInterpOp): class TestTrilinearInterpOpUint8(OpTest): - def setUp(self): self.python_api = trilinear_interp_test self.out_size = None @@ -411,8 +435,9 @@ class TestTrilinearInterpOpUint8(OpTest): self.init_test_case() self.op_type = "trilinear_interp_v2" self.check_eager = True - input_np = np.random.randint(low=0, high=256, - size=self.input_shape).astype("uint8") + input_np = np.random.randint( + low=0, high=256, size=self.input_shape + ).astype("uint8") if self.scale: if isinstance(self.scale, float) or isinstance(self.scale, int): @@ -432,9 +457,19 @@ class TestTrilinearInterpOpUint8(OpTest): out_h = self.out_h out_w = self.out_w - output_np = trilinear_interp_np(input_np, out_d, out_h, out_w, 0, 0, 0, - self.out_size, self.actual_shape, - self.align_corners, self.align_mode) + output_np = trilinear_interp_np( + input_np, + out_d, + out_h, + out_w, + 0, + 0, + 0, + self.out_size, + self.actual_shape, + self.align_corners, + self.align_mode, + ) self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size @@ -446,7 +481,7 @@ class TestTrilinearInterpOpUint8(OpTest): 'out_w': self.out_w, 'interp_method': self.interp_method, 'align_corners': self.align_corners, - 'align_mode': self.align_mode + 'align_mode': self.align_mode, } if self.scale: if isinstance(self.scale, float) or isinstance(self.scale, int): @@ -458,9 +493,9 @@ class TestTrilinearInterpOpUint8(OpTest): self.outputs = {'Out': output_np} def test_check_output(self): - self.check_output_with_place(place=core.CPUPlace(), - atol=1, - check_eager=self.check_eager) + self.check_output_with_place( + place=core.CPUPlace(), atol=1, check_eager=self.check_eager + ) def init_test_case(self): self.interp_method = 'trilinear' @@ -474,7 +509,6 @@ class TestTrilinearInterpOpUint8(OpTest): class TestTrilinearInterpCase1Uint8(TestTrilinearInterpOpUint8): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [2, 3, 16, 8, 4] @@ -487,7 +521,6 @@ class TestTrilinearInterpCase1Uint8(TestTrilinearInterpOpUint8): class TestTrilinearInterpCase2Uint8(TestTrilinearInterpOpUint8): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [4, 1, 7, 8, 9] @@ -501,54 +534,48 @@ class TestTrilinearInterpCase2Uint8(TestTrilinearInterpOpUint8): class TestTrilinearInterpOtherMethod1(TestTrilinearInterpOp): - def set_align_mode(self): self.align_corners = False self.align_mode = 1 class TestTrilinearInterpWithMethod2(TestTrilinearInterpOp): - def set_align_mode(self): self.align_corners = False self.align_mode = 0 class TestTrilinearInterpWithMethod3(TestTrilinearInterpOp): - def set_align_mode(self): self.align_corners = True self.align_mode = 0 class TestTrilinearInterpScale1(TestTrilinearInterpOp): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [2, 3, 5, 7, 9] self.out_d = 19 self.out_h = 15 self.out_w = 8 - self.scale = 2. + self.scale = 2.0 self.align_corners = True self.align_mode = 1 class TestTrilinearInterpScale2(TestTrilinearInterpOp): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [2, 3, 5, 7, 9] self.out_d = 30 self.out_h = 20 self.out_w = 25 - self.scale = 1. + self.scale = 1.0 self.align_corners = True self.align_mode = 1 class TestTrilinearInterpScale3(TestTrilinearInterpOp): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [2, 3, 5, 7, 9] @@ -561,7 +588,6 @@ class TestTrilinearInterpScale3(TestTrilinearInterpOp): class TestTrilinearInterpZero(TestTrilinearInterpOp): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [2, 3, 5, 7, 11] @@ -574,7 +600,6 @@ class TestTrilinearInterpZero(TestTrilinearInterpOp): class TestTrilinearInterpOp_attr_tensor(OpTest): - def setUp(self): self.python_api = trilinear_interp_test self.out_size = None @@ -587,7 +612,7 @@ class TestTrilinearInterpOp_attr_tensor(OpTest): self.attrs = { 'interp_method': self.interp_method, 'align_corners': self.align_corners, - 'align_mode': self.align_mode + 'align_mode': self.align_mode, } input_np = np.random.random(self.input_shape).astype("float32") @@ -619,8 +644,9 @@ class TestTrilinearInterpOp_attr_tensor(OpTest): elif self.out_size is not None: size_tensor = [] for index, ele in enumerate(self.out_size): - size_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + size_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs['SizeTensor'] = size_tensor self.check_eager = False @@ -634,19 +660,28 @@ class TestTrilinearInterpOp_attr_tensor(OpTest): if isinstance(self.scale, list) and len(self.scale) == 1: self.scale = [self.scale[0], self.scale[0], self.scale[0]] self.attrs['scale'] = self.scale - output_np = trilinear_interp_np(input_np, out_d, out_h, out_w, 0, 0, 0, - self.out_size, self.actual_shape, - self.align_corners, self.align_mode) + output_np = trilinear_interp_np( + input_np, + out_d, + out_h, + out_w, + 0, + 0, + 0, + self.out_size, + self.actual_shape, + self.align_corners, + self.align_mode, + ) self.outputs = {'Out': output_np} def test_check_output(self): self.check_output(check_eager=self.check_eager) def test_check_grad(self): - self.check_grad(['X'], - 'Out', - in_place=True, - check_eager=self.check_eager) + self.check_grad( + ['X'], 'Out', in_place=True, check_eager=self.check_eager + ) def init_test_case(self): self.interp_method = 'trilinear' @@ -662,7 +697,6 @@ class TestTrilinearInterpOp_attr_tensor(OpTest): # out_size is a 1-D tensor class TestTrilinearInterp_attr_tensor_Case1(TestTrilinearInterpOp_attr_tensor): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [3, 2, 9, 6, 8] @@ -677,7 +711,6 @@ class TestTrilinearInterp_attr_tensor_Case1(TestTrilinearInterpOp_attr_tensor): # scale is a 1-D tensor class TestTrilinearInterp_attr_tensor_Case2(TestTrilinearInterpOp_attr_tensor): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [2, 3, 8, 8, 4] @@ -693,7 +726,6 @@ class TestTrilinearInterp_attr_tensor_Case2(TestTrilinearInterpOp_attr_tensor): # scale is a 1-D tensor class TestTrilinearInterp_attr_tensor_Case3(TestTrilinearInterpOp_attr_tensor): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [2, 3, 8, 8, 4] @@ -708,7 +740,6 @@ class TestTrilinearInterp_attr_tensor_Case3(TestTrilinearInterpOp_attr_tensor): class TestTrilinearInterpAPI(unittest.TestCase): - def test_imperative_case(self): with _test_eager_guard(): self.func_case() @@ -721,31 +752,28 @@ class TestTrilinearInterpAPI(unittest.TestCase): dim = fluid.data(name="dim", shape=[1], dtype="int32") shape_tensor = fluid.data(name="shape_tensor", shape=[3], dtype="int32") actual_size = fluid.data(name="actual_size", shape=[3], dtype="int32") - scale_tensor = fluid.data(name="scale_tensor", - shape=[1], - dtype="float32") + scale_tensor = fluid.data( + name="scale_tensor", shape=[1], dtype="float32" + ) - out1 = fluid.layers.resize_trilinear(y, - out_shape=[12, 18, 8], - data_format='NDHWC') + out1 = fluid.layers.resize_trilinear( + y, out_shape=[12, 18, 8], data_format='NDHWC' + ) out2 = fluid.layers.resize_trilinear(x, out_shape=[12, dim, 8]) out3 = fluid.layers.resize_trilinear(x, out_shape=shape_tensor) - out4 = fluid.layers.resize_trilinear(x, - out_shape=[4, 4, 8], - actual_shape=actual_size) + out4 = fluid.layers.resize_trilinear( + x, out_shape=[4, 4, 8], actual_shape=actual_size + ) out5 = fluid.layers.resize_trilinear(x, scale=scale_tensor) - out6 = interpolate(x, - scale_factor=scale_tensor, - mode='trilinear', - data_format="NCDHW") - out7 = interpolate(x, - size=[4, 4, 8], - mode='trilinear', - data_format="NCDHW") - out8 = interpolate(x, - size=shape_tensor, - mode='trilinear', - data_format="NCDHW") + out6 = interpolate( + x, scale_factor=scale_tensor, mode='trilinear', data_format="NCDHW" + ) + out7 = interpolate( + x, size=[4, 4, 8], mode='trilinear', data_format="NCDHW" + ) + out8 = interpolate( + x, size=shape_tensor, mode='trilinear', data_format="NCDHW" + ) x_data = np.random.random((2, 3, 6, 9, 4)).astype("float32") dim_data = np.array([18]).astype("int32") @@ -759,26 +787,26 @@ class TestTrilinearInterpAPI(unittest.TestCase): place = core.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - results = exe.run(fluid.default_main_program(), - feed={ - "x": x_data, - "y": np.transpose(x_data, (0, 2, 3, 4, 1)), - "dim": dim_data, - "shape_tensor": shape_data, - "actual_size": actual_size_data, - "scale_tensor": scale_data - }, - fetch_list=[out1, out2, out3, out4, out5], - return_numpy=True) - - expect_res = trilinear_interp_np(x_data, - out_d=12, - out_h=18, - out_w=8, - align_mode=1) - np.testing.assert_allclose(results[0], - np.transpose(expect_res, (0, 2, 3, 4, 1)), - rtol=1e-05) + results = exe.run( + fluid.default_main_program(), + feed={ + "x": x_data, + "y": np.transpose(x_data, (0, 2, 3, 4, 1)), + "dim": dim_data, + "shape_tensor": shape_data, + "actual_size": actual_size_data, + "scale_tensor": scale_data, + }, + fetch_list=[out1, out2, out3, out4, out5], + return_numpy=True, + ) + + expect_res = trilinear_interp_np( + x_data, out_d=12, out_h=18, out_w=8, align_mode=1 + ) + np.testing.assert_allclose( + results[0], np.transpose(expect_res, (0, 2, 3, 4, 1)), rtol=1e-05 + ) for i in range(len(results) - 1): np.testing.assert_allclose(results[i + 1], expect_res, rtol=1e-05) @@ -787,30 +815,28 @@ class TestTrilinearInterpAPI(unittest.TestCase): # Preceding PR link: https://github.com/PaddlePaddle/Paddle/pull/26520/files#diff-ee0c2b73d08659e90a8f3ac48451a6588d35e1613742f864f9aad4394e12c290 with fluid.dygraph.guard(): x = fluid.dygraph.to_variable(x_data) - out9 = interpolate(x, - size=[12, 18, 8], - mode='trilinear', - data_format="NCDHW") + out9 = interpolate( + x, size=[12, 18, 8], mode='trilinear', data_format="NCDHW" + ) class TestTrilinearInterpOpException(unittest.TestCase): - def test_exception(self): input = fluid.data(name="input", shape=[2, 3, 6, 9, 4], dtype="float32") def attr_data_format(): # for 5-D input, data_format only can be NCDHW or NDHWC - out = fluid.layers.resize_trilinear(input, - out_shape=[4, 8, 4], - data_format='NHWC') + out = fluid.layers.resize_trilinear( + input, out_shape=[4, 8, 4], data_format='NHWC' + ) self.assertRaises(ValueError, attr_data_format) -@unittest.skipIf(not fluid.core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestTrilinearInterpOpForFloat16(unittest.TestCase): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [2, 3, 4, 4, 4] @@ -824,12 +850,14 @@ class TestTrilinearInterpOpForFloat16(unittest.TestCase): x_np = x_np.astype(dtype) x = paddle.to_tensor(x_np) x.stop_gradient = False - y = interpolate(x, - size=self.out_size.tolist(), - mode=self.interp_method, - align_corners=self.align_corners, - align_mode=self.align_mode, - data_format=self.data_layout) + y = interpolate( + x, + size=self.out_size.tolist(), + mode=self.interp_method, + align_corners=self.align_corners, + align_mode=self.align_mode, + data_format=self.data_layout, + ) x_g = paddle.grad(y, x) y_np = y[0].numpy().astype('float32') x_g_np = x_g[0].numpy().astype('float32') @@ -848,10 +876,10 @@ class TestTrilinearInterpOpForFloat16(unittest.TestCase): np.testing.assert_allclose(x_g_np_1, x_g_np_2, rtol=1e-05) -@unittest.skipIf(not fluid.core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestTrilinearInterpDatalayoutForFloat16(TestTrilinearInterpOpForFloat16): - def init_test_case(self): self.interp_method = 'trilinear' self.input_shape = [2, 4, 4, 4, 3] diff --git a/python/paddle/fluid/tests/unittests/test_triplet_margin_loss.py b/python/paddle/fluid/tests/unittests/test_triplet_margin_loss.py index bf6faeca556d213e3a5e1ce1cb89969f1bb05480..59f63a699d56963d818fdc0cec125b1563bfa95c 100644 --- a/python/paddle/fluid/tests/unittests/test_triplet_margin_loss.py +++ b/python/paddle/fluid/tests/unittests/test_triplet_margin_loss.py @@ -27,11 +27,9 @@ def call_TripletMarginLoss_layer( eps=1e-6, reduction='mean', ): - triplet_margin_loss = paddle.nn.TripletMarginLoss(p=p, - epsilon=eps, - margin=margin, - swap=swap, - reduction=reduction) + triplet_margin_loss = paddle.nn.TripletMarginLoss( + p=p, epsilon=eps, margin=margin, swap=swap, reduction=reduction + ) res = triplet_margin_loss( input=input, positive=positive, @@ -50,102 +48,116 @@ def call_TripletMarginLoss_functional( eps=1e-6, reduction='mean', ): - res = paddle.nn.functional.triplet_margin_loss(input=input, - positive=positive, - negative=negative, - p=p, - epsilon=eps, - margin=margin, - swap=swap, - reduction=reduction) + res = paddle.nn.functional.triplet_margin_loss( + input=input, + positive=positive, + negative=negative, + p=p, + epsilon=eps, + margin=margin, + swap=swap, + reduction=reduction, + ) return res -def test_static(place, - input_np, - positive_np, - negative_np, - p=2, - margin=0.3, - swap=False, - eps=1e-6, - reduction='mean', - functional=False): +def test_static( + place, + input_np, + positive_np, + negative_np, + p=2, + margin=0.3, + swap=False, + eps=1e-6, + reduction='mean', + functional=False, +): prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - input = paddle.static.data(name='input', - shape=input_np.shape, - dtype='float64') - positive = paddle.static.data(name='positive', - shape=positive_np.shape, - dtype='float64') - negative = paddle.static.data(name='negative', - shape=negative_np.shape, - dtype='float64') + input = paddle.static.data( + name='input', shape=input_np.shape, dtype='float64' + ) + positive = paddle.static.data( + name='positive', shape=positive_np.shape, dtype='float64' + ) + negative = paddle.static.data( + name='negative', shape=negative_np.shape, dtype='float64' + ) feed_dict = { "input": input_np, "positive": positive_np, - "negative": negative_np + "negative": negative_np, } if functional: - res = call_TripletMarginLoss_functional(input=input, - positive=positive, - negative=negative, - p=p, - eps=eps, - margin=margin, - swap=swap, - reduction=reduction) + res = call_TripletMarginLoss_functional( + input=input, + positive=positive, + negative=negative, + p=p, + eps=eps, + margin=margin, + swap=swap, + reduction=reduction, + ) else: - res = call_TripletMarginLoss_layer(input=input, - positive=positive, - negative=negative, - p=p, - eps=eps, - margin=margin, - swap=swap, - reduction=reduction) + res = call_TripletMarginLoss_layer( + input=input, + positive=positive, + negative=negative, + p=p, + eps=eps, + margin=margin, + swap=swap, + reduction=reduction, + ) exe = paddle.static.Executor(place) static_result = exe.run(prog, feed=feed_dict, fetch_list=[res])[0] return static_result -def test_dygraph(place, - input, - positive, - negative, - p=2, - margin=0.3, - swap=False, - eps=1e-6, - reduction='mean', - functional=False): +def test_dygraph( + place, + input, + positive, + negative, + p=2, + margin=0.3, + swap=False, + eps=1e-6, + reduction='mean', + functional=False, +): paddle.disable_static() input = paddle.to_tensor(input) positive = paddle.to_tensor(positive) negative = paddle.to_tensor(negative) if functional: - dy_res = call_TripletMarginLoss_functional(input=input, - positive=positive, - negative=negative, - p=p, - eps=eps, - margin=margin, - swap=swap, - reduction=reduction) + dy_res = call_TripletMarginLoss_functional( + input=input, + positive=positive, + negative=negative, + p=p, + eps=eps, + margin=margin, + swap=swap, + reduction=reduction, + ) else: - dy_res = call_TripletMarginLoss_layer(input=input, - positive=positive, - negative=negative, - p=p, - eps=eps, - margin=margin, - swap=swap, - reduction=reduction) + dy_res = call_TripletMarginLoss_layer( + input=input, + positive=positive, + negative=negative, + p=p, + eps=eps, + margin=margin, + swap=swap, + reduction=reduction, + ) dy_result = dy_res.numpy() paddle.enable_static() return dy_result @@ -179,7 +191,6 @@ def calc_triplet_margin_loss( class TestTripletMarginLoss(unittest.TestCase): - def test_TripletMarginLoss(self): shape = (2, 2) input = np.random.uniform(0.1, 0.8, size=shape).astype(np.float64) @@ -192,10 +203,12 @@ class TestTripletMarginLoss(unittest.TestCase): reductions = ['sum', 'mean', 'none'] for place in places: for reduction in reductions: - expected = calc_triplet_margin_loss(input=input, - positive=positive, - negative=negative, - reduction=reduction) + expected = calc_triplet_margin_loss( + input=input, + positive=positive, + negative=negative, + reduction=reduction, + ) dy_result = test_dygraph( place=place, @@ -212,57 +225,59 @@ class TestTripletMarginLoss(unittest.TestCase): negative_np=negative, reduction=reduction, ) - np.testing.assert_allclose(static_result, - expected, - rtol=1e-5, - atol=1e-8) - np.testing.assert_allclose(static_result, - dy_result, - rtol=1e-5, - atol=1e-8) - np.testing.assert_allclose(dy_result, - expected, - rtol=1e-5, - atol=1e-8) - static_functional = test_static(place=place, - input_np=input, - positive_np=positive, - negative_np=negative, - reduction=reduction, - functional=True) - dy_functional = test_dygraph(place=place, - input=input, - positive=positive, - negative=negative, - reduction=reduction, - functional=True) - np.testing.assert_allclose(static_functional, - expected, - rtol=1e-5, - atol=1e-8) - np.testing.assert_allclose(static_functional, - dy_functional, - rtol=1e-5, - atol=1e-8) - np.testing.assert_allclose(dy_functional, - expected, - rtol=1e-5, - atol=1e-8) + np.testing.assert_allclose( + static_result, expected, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + static_result, dy_result, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + dy_result, expected, rtol=1e-5, atol=1e-8 + ) + static_functional = test_static( + place=place, + input_np=input, + positive_np=positive, + negative_np=negative, + reduction=reduction, + functional=True, + ) + dy_functional = test_dygraph( + place=place, + input=input, + positive=positive, + negative=negative, + reduction=reduction, + functional=True, + ) + np.testing.assert_allclose( + static_functional, expected, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + static_functional, dy_functional, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + dy_functional, expected, rtol=1e-5, atol=1e-8 + ) def test_TripletMarginLoss_error(self): paddle.disable_static() - self.assertRaises(ValueError, - paddle.nn.loss.TripletMarginLoss, - reduction="unsupport reduction") + self.assertRaises( + ValueError, + paddle.nn.loss.TripletMarginLoss, + reduction="unsupport reduction", + ) input = paddle.to_tensor([[0.1, 0.3]], dtype='float32') positive = paddle.to_tensor([[0.0, 1.0]], dtype='float32') negative = paddle.to_tensor([[0.2, 0.1]], dtype='float32') - self.assertRaises(ValueError, - paddle.nn.functional.triplet_margin_loss, - input=input, - positive=positive, - negative=negative, - reduction="unsupport reduction") + self.assertRaises( + ValueError, + paddle.nn.functional.triplet_margin_loss, + input=input, + positive=positive, + negative=negative, + reduction="unsupport reduction", + ) paddle.enable_static() def test_TripletMarginLoss_dimension(self): @@ -295,11 +310,13 @@ class TestTripletMarginLoss(unittest.TestCase): input = np.random.uniform(0.1, 0.8, size=shape).astype(np.float64) positive = np.random.uniform(0, 2, size=shape).astype(np.float64) negative = np.random.uniform(0, 2, size=shape).astype(np.float64) - expected = calc_triplet_margin_loss(input=input, - swap=True, - positive=positive, - negative=negative, - reduction=reduction) + expected = calc_triplet_margin_loss( + input=input, + swap=True, + positive=positive, + negative=negative, + reduction=reduction, + ) dy_result = test_dygraph( place=place, @@ -318,41 +335,40 @@ class TestTripletMarginLoss(unittest.TestCase): negative_np=negative, reduction=reduction, ) - np.testing.assert_allclose(static_result, - expected, - rtol=1e-5, - atol=1e-8) - np.testing.assert_allclose(static_result, - dy_result, - rtol=1e-5, - atol=1e-8) + np.testing.assert_allclose( + static_result, expected, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + static_result, dy_result, rtol=1e-5, atol=1e-8 + ) np.testing.assert_allclose(dy_result, expected, rtol=1e-5, atol=1e-8) - static_functional = test_static(place=place, - swap=True, - input_np=input, - positive_np=positive, - negative_np=negative, - reduction=reduction, - functional=True) - dy_functional = test_dygraph(place=place, - swap=True, - input=input, - positive=positive, - negative=negative, - reduction=reduction, - functional=True) - np.testing.assert_allclose(static_functional, - expected, - rtol=1e-5, - atol=1e-8) - np.testing.assert_allclose(static_functional, - dy_functional, - rtol=1e-5, - atol=1e-8) - np.testing.assert_allclose(dy_functional, - expected, - rtol=1e-5, - atol=1e-8) + static_functional = test_static( + place=place, + swap=True, + input_np=input, + positive_np=positive, + negative_np=negative, + reduction=reduction, + functional=True, + ) + dy_functional = test_dygraph( + place=place, + swap=True, + input=input, + positive=positive, + negative=negative, + reduction=reduction, + functional=True, + ) + np.testing.assert_allclose( + static_functional, expected, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + static_functional, dy_functional, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + dy_functional, expected, rtol=1e-5, atol=1e-8 + ) def test_TripletMarginLoss_margin(self): paddle.disable_static() @@ -379,11 +395,13 @@ class TestTripletMarginLoss(unittest.TestCase): input = np.random.uniform(0.1, 0.8, size=shape).astype(np.float64) positive = np.random.uniform(0, 2, size=shape).astype(np.float64) negative = np.random.uniform(0, 2, size=shape).astype(np.float64) - expected = calc_triplet_margin_loss(input=input, - p=p, - positive=positive, - negative=negative, - reduction=reduction) + expected = calc_triplet_margin_loss( + input=input, + p=p, + positive=positive, + negative=negative, + reduction=reduction, + ) dy_result = test_dygraph( place=place, @@ -402,41 +420,40 @@ class TestTripletMarginLoss(unittest.TestCase): negative_np=negative, reduction=reduction, ) - np.testing.assert_allclose(static_result, - expected, - rtol=1e-5, - atol=1e-8) - np.testing.assert_allclose(static_result, - dy_result, - rtol=1e-5, - atol=1e-8) + np.testing.assert_allclose( + static_result, expected, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + static_result, dy_result, rtol=1e-5, atol=1e-8 + ) np.testing.assert_allclose(dy_result, expected, rtol=1e-5, atol=1e-8) - static_functional = test_static(place=place, - p=p, - input_np=input, - positive_np=positive, - negative_np=negative, - reduction=reduction, - functional=True) - dy_functional = test_dygraph(place=place, - p=p, - input=input, - positive=positive, - negative=negative, - reduction=reduction, - functional=True) - np.testing.assert_allclose(static_functional, - expected, - rtol=1e-5, - atol=1e-8) - np.testing.assert_allclose(static_functional, - dy_functional, - rtol=1e-5, - atol=1e-8) - np.testing.assert_allclose(dy_functional, - expected, - rtol=1e-5, - atol=1e-8) + static_functional = test_static( + place=place, + p=p, + input_np=input, + positive_np=positive, + negative_np=negative, + reduction=reduction, + functional=True, + ) + dy_functional = test_dygraph( + place=place, + p=p, + input=input, + positive=positive, + negative=negative, + reduction=reduction, + functional=True, + ) + np.testing.assert_allclose( + static_functional, expected, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + static_functional, dy_functional, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + dy_functional, expected, rtol=1e-5, atol=1e-8 + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_triplet_margin_with_distance_loss.py b/python/paddle/fluid/tests/unittests/test_triplet_margin_with_distance_loss.py index 8b7f79b77abd98366bd5c5e83674f3e4b591e1d5..c8d4da36a1cb5fd33f5353bc3a79d73cc16041b1 100644 --- a/python/paddle/fluid/tests/unittests/test_triplet_margin_with_distance_loss.py +++ b/python/paddle/fluid/tests/unittests/test_triplet_margin_with_distance_loss.py @@ -30,7 +30,8 @@ def call_TripletMarginDistanceLoss_layer( distance_function=distance_function, margin=margin, swap=swap, - reduction=reduction) + reduction=reduction, + ) res = triplet_margin_with_distance_loss( input=input, positive=positive, @@ -55,35 +56,38 @@ def call_TripletMaginDistanceLoss_functional( distance_function=distance_function, margin=margin, swap=swap, - reduction=reduction) + reduction=reduction, + ) return res -def test_static(place, - input_np, - positive_np, - negative_np, - distance_function=None, - margin=0.3, - swap=False, - reduction='mean', - functional=False): +def test_static( + place, + input_np, + positive_np, + negative_np, + distance_function=None, + margin=0.3, + swap=False, + reduction='mean', + functional=False, +): prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - input = paddle.static.data(name='input', - shape=input_np.shape, - dtype='float64') - positive = paddle.static.data(name='positive', - shape=positive_np.shape, - dtype='float64') - negative = paddle.static.data(name='negative', - shape=negative_np.shape, - dtype='float64') + input = paddle.static.data( + name='input', shape=input_np.shape, dtype='float64' + ) + positive = paddle.static.data( + name='positive', shape=positive_np.shape, dtype='float64' + ) + negative = paddle.static.data( + name='negative', shape=negative_np.shape, dtype='float64' + ) feed_dict = { "input": input_np, "positive": positive_np, - "negative": negative_np + "negative": negative_np, } if functional: @@ -94,7 +98,8 @@ def test_static(place, distance_function=distance_function, margin=margin, swap=swap, - reduction=reduction) + reduction=reduction, + ) else: res = call_TripletMarginDistanceLoss_layer( input=input, @@ -103,7 +108,8 @@ def test_static(place, distance_function=distance_function, margin=margin, swap=swap, - reduction=reduction) + reduction=reduction, + ) exe = paddle.static.Executor(place) static_result = exe.run(prog, feed=feed_dict, fetch_list=[res])[0] @@ -111,15 +117,17 @@ def test_static(place, return static_result -def test_dygraph(place, - input, - positive, - negative, - distance_function=None, - margin=0.3, - swap=False, - reduction='mean', - functional=False): +def test_dygraph( + place, + input, + positive, + negative, + distance_function=None, + margin=0.3, + swap=False, + reduction='mean', + functional=False, +): paddle.disable_static() input = paddle.to_tensor(input) positive = paddle.to_tensor(positive) @@ -133,7 +141,8 @@ def test_dygraph(place, distance_function=distance_function, margin=margin, swap=swap, - reduction=reduction) + reduction=reduction, + ) else: dy_res = call_TripletMarginDistanceLoss_layer( input=input, @@ -142,7 +151,8 @@ def test_dygraph(place, distance_function=distance_function, margin=margin, swap=swap, - reduction=reduction) + reduction=reduction, + ) dy_result = dy_res.numpy() paddle.enable_static() return dy_result @@ -177,7 +187,6 @@ def calc_triplet_margin_distance_loss( class TestTripletMarginWithDistanceLoss(unittest.TestCase): - def test_TripletMarginDistanceLoss(self): shape = (5, 5) input = np.random.uniform(0.1, 0.8, size=shape).astype(np.float64) @@ -194,7 +203,8 @@ class TestTripletMarginWithDistanceLoss(unittest.TestCase): input=input, positive=positive, negative=negative, - reduction=reduction) + reduction=reduction, + ) dy_result = test_dygraph( place=place, @@ -211,48 +221,48 @@ class TestTripletMarginWithDistanceLoss(unittest.TestCase): negative_np=negative, reduction=reduction, ) - np.testing.assert_allclose(static_result, - expected, - rtol=1e-5, - atol=1e-8) - np.testing.assert_allclose(static_result, - dy_result, - rtol=1e-5, - atol=1e-8) - np.testing.assert_allclose(dy_result, - expected, - rtol=1e-5, - atol=1e-8) - static_functional = test_static(place=place, - input_np=input, - positive_np=positive, - negative_np=negative, - reduction=reduction, - functional=True) - dy_functional = test_dygraph(place=place, - input=input, - positive=positive, - negative=negative, - reduction=reduction, - functional=True) - np.testing.assert_allclose(static_functional, - expected, - rtol=1e-5, - atol=1e-8) - np.testing.assert_allclose(static_functional, - dy_functional, - rtol=1e-5, - atol=1e-8) - np.testing.assert_allclose(dy_functional, - expected, - rtol=1e-5, - atol=1e-8) + np.testing.assert_allclose( + static_result, expected, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + static_result, dy_result, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + dy_result, expected, rtol=1e-5, atol=1e-8 + ) + static_functional = test_static( + place=place, + input_np=input, + positive_np=positive, + negative_np=negative, + reduction=reduction, + functional=True, + ) + dy_functional = test_dygraph( + place=place, + input=input, + positive=positive, + negative=negative, + reduction=reduction, + functional=True, + ) + np.testing.assert_allclose( + static_functional, expected, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + static_functional, dy_functional, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + dy_functional, expected, rtol=1e-5, atol=1e-8 + ) def test_TripletMarginDistanceLoss_error(self): paddle.disable_static() - self.assertRaises(ValueError, - paddle.nn.TripletMarginWithDistanceLoss, - reduction="unsupport reduction") + self.assertRaises( + ValueError, + paddle.nn.TripletMarginWithDistanceLoss, + reduction="unsupport reduction", + ) input = paddle.to_tensor([[0.1, 0.3]], dtype='float32') positive = paddle.to_tensor([[0.0, 1.0]], dtype='float32') negative = paddle.to_tensor([[0.2, 0.1]], dtype='float32') @@ -262,11 +272,11 @@ class TestTripletMarginWithDistanceLoss(unittest.TestCase): input=input, positive=positive, negative=negative, - reduction="unsupport reduction") + reduction="unsupport reduction", + ) paddle.enable_static() def test_TripletMarginDistanceLoss_distance_function(self): - def distance_function_1(x1, x2): return 1.0 - paddle.nn.functional.cosine_similarity(x1, x2) @@ -299,28 +309,30 @@ class TestTripletMarginWithDistanceLoss(unittest.TestCase): distance_function=distance_function, reduction=reduction, ) - np.testing.assert_allclose(static_result, - dy_result, - rtol=1e-5, - atol=1e-8) - static_functional = test_static(place=place, - input_np=input, - positive_np=positive, - negative_np=negative, - distance_function=distance_function, - reduction=reduction, - functional=True) - dy_functional = test_dygraph(place=place, - input=input, - positive=positive, - negative=negative, - distance_function=distance_function, - reduction=reduction, - functional=True) - np.testing.assert_allclose(static_functional, - dy_functional, - rtol=1e-5, - atol=1e-8) + np.testing.assert_allclose( + static_result, dy_result, rtol=1e-5, atol=1e-8 + ) + static_functional = test_static( + place=place, + input_np=input, + positive_np=positive, + negative_np=negative, + distance_function=distance_function, + reduction=reduction, + functional=True, + ) + dy_functional = test_dygraph( + place=place, + input=input, + positive=positive, + negative=negative, + distance_function=distance_function, + reduction=reduction, + functional=True, + ) + np.testing.assert_allclose( + static_functional, dy_functional, rtol=1e-5, atol=1e-8 + ) def test_TripletMarginWithDistanceLoss_distance_funtion_error(self): paddle.disable_static() @@ -357,7 +369,8 @@ class TestTripletMarginWithDistanceLoss(unittest.TestCase): positive=positive, negative=negative, ) - triplet_margin_with_distance_loss = paddle.nn.loss.TripletMarginWithDistanceLoss( + triplet_margin_with_distance_loss = ( + paddle.nn.loss.TripletMarginWithDistanceLoss() ) self.assertRaises( ValueError, @@ -375,11 +388,13 @@ class TestTripletMarginWithDistanceLoss(unittest.TestCase): input = np.random.uniform(0.1, 0.8, size=shape).astype(np.float64) positive = np.random.uniform(0, 2, size=shape).astype(np.float64) negative = np.random.uniform(0, 2, size=shape).astype(np.float64) - expected = calc_triplet_margin_distance_loss(input=input, - swap=True, - positive=positive, - negative=negative, - reduction=reduction) + expected = calc_triplet_margin_distance_loss( + input=input, + swap=True, + positive=positive, + negative=negative, + reduction=reduction, + ) dy_result = test_dygraph( place=place, @@ -398,41 +413,40 @@ class TestTripletMarginWithDistanceLoss(unittest.TestCase): negative_np=negative, reduction=reduction, ) - np.testing.assert_allclose(static_result, - expected, - rtol=1e-5, - atol=1e-8) - np.testing.assert_allclose(static_result, - dy_result, - rtol=1e-5, - atol=1e-8) + np.testing.assert_allclose( + static_result, expected, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + static_result, dy_result, rtol=1e-5, atol=1e-8 + ) np.testing.assert_allclose(dy_result, expected, rtol=1e-5, atol=1e-8) - static_functional = test_static(place=place, - swap=True, - input_np=input, - positive_np=positive, - negative_np=negative, - reduction=reduction, - functional=True) - dy_functional = test_dygraph(place=place, - swap=True, - input=input, - positive=positive, - negative=negative, - reduction=reduction, - functional=True) - np.testing.assert_allclose(static_functional, - expected, - rtol=1e-5, - atol=1e-8) - np.testing.assert_allclose(static_functional, - dy_functional, - rtol=1e-5, - atol=1e-8) - np.testing.assert_allclose(dy_functional, - expected, - rtol=1e-5, - atol=1e-8) + static_functional = test_static( + place=place, + swap=True, + input_np=input, + positive_np=positive, + negative_np=negative, + reduction=reduction, + functional=True, + ) + dy_functional = test_dygraph( + place=place, + swap=True, + input=input, + positive=positive, + negative=negative, + reduction=reduction, + functional=True, + ) + np.testing.assert_allclose( + static_functional, expected, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + static_functional, dy_functional, rtol=1e-5, atol=1e-8 + ) + np.testing.assert_allclose( + dy_functional, expected, rtol=1e-5, atol=1e-8 + ) def test_TripletMarginWithDistanceLoss_margin(self): paddle.disable_static() diff --git a/python/paddle/fluid/tests/unittests/test_triu_indices_op.py b/python/paddle/fluid/tests/unittests/test_triu_indices_op.py index 6863517874e0e9acd73ec16858f1fe24c03f1f2b..1ad6b7bbd458f8264d9484a97190469c10c6d4f5 100644 --- a/python/paddle/fluid/tests/unittests/test_triu_indices_op.py +++ b/python/paddle/fluid/tests/unittests/test_triu_indices_op.py @@ -21,7 +21,6 @@ from paddle.fluid.framework import _test_eager_guard class TestTriuIndicesOp(OpTest): - def setUp(self): self.op_type = "triu_indices" self.inputs = {} @@ -34,13 +33,13 @@ class TestTriuIndicesOp(OpTest): def init_config(self): self.attrs = {'row': 4, 'col': 4, 'offset': -1} - self.target = np.triu_indices(self.attrs['row'], self.attrs['offset'], - self.attrs['col']) + self.target = np.triu_indices( + self.attrs['row'], self.attrs['offset'], self.attrs['col'] + ) self.target = np.array(self.target) class TestTriuIndicesOpCase1(TestTriuIndicesOp): - def init_config(self): self.attrs = {'row': 0, 'col': 0, 'offset': 0} self.target = np.triu_indices(0, 0, 0) @@ -48,23 +47,23 @@ class TestTriuIndicesOpCase1(TestTriuIndicesOp): class TestTriuIndicesOpCase2(TestTriuIndicesOp): - def init_config(self): self.attrs = {'row': 4, 'col': 4, 'offset': 2} - self.target = np.triu_indices(self.attrs['row'], self.attrs['offset'], - self.attrs['col']) + self.target = np.triu_indices( + self.attrs['row'], self.attrs['offset'], self.attrs['col'] + ) self.target = np.array(self.target) class TestTriuIndicesAPICaseStatic(unittest.TestCase): - def test_static(self): if fluid.core.is_compiled_with_cuda(): place = paddle.fluid.CUDAPlace(0) else: place = paddle.CPUPlace() - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data = paddle.triu_indices(4, 4, -1) exe = paddle.static.Executor(place) result = exe.run(feed={}, fetch_list=[data]) @@ -73,7 +72,6 @@ class TestTriuIndicesAPICaseStatic(unittest.TestCase): class TestTriuIndicesAPICaseDygraph(unittest.TestCase): - def test_dygraph(self): if fluid.core.is_compiled_with_cuda(): place = paddle.fluid.CUDAPlace(0) @@ -90,9 +88,7 @@ class TestTriuIndicesAPICaseDygraph(unittest.TestCase): class TestTriuIndicesAPICaseError(unittest.TestCase): - def test_case_error(self): - def test_num_rows_type_check(): out1 = paddle.triu_indices(1.0, 1, 2) @@ -110,11 +106,11 @@ class TestTriuIndicesAPICaseError(unittest.TestCase): class TestTriuIndicesAPICaseDefault(unittest.TestCase): - def test_default_CPU(self): paddle.enable_static() - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data = paddle.triu_indices(4, None, 2) exe = paddle.static.Executor(paddle.CPUPlace()) result = exe.run(feed={}, fetch_list=[data]) diff --git a/python/paddle/fluid/tests/unittests/test_trunc_op.py b/python/paddle/fluid/tests/unittests/test_trunc_op.py index 0a410e538054114a7d2554bb5e2ad27156ff823a..f8e23bd80748858a950844eacca73f721b0bf133 100644 --- a/python/paddle/fluid/tests/unittests/test_trunc_op.py +++ b/python/paddle/fluid/tests/unittests/test_trunc_op.py @@ -22,7 +22,6 @@ paddle.enable_static() class TestTruncOp(OpTest): - def setUp(self): self.op_type = "trunc" self.python_api = paddle.trunc @@ -42,7 +41,6 @@ class TestTruncOp(OpTest): class TestFloatTruncOp(TestTruncOp): - def init_dtype_type(self): self.dtype = np.float32 self.__class__.exist_fp64_check_grad = True @@ -52,7 +50,6 @@ class TestFloatTruncOp(TestTruncOp): class TestIntTruncOp(TestTruncOp): - def init_dtype_type(self): self.dtype = np.int32 self.__class__.exist_fp64_check_grad = True @@ -62,7 +59,6 @@ class TestIntTruncOp(TestTruncOp): class TestTruncAPI(unittest.TestCase): - def setUp(self): self.shape = [20, 20] self.x = np.random.random((20, 20)).astype(np.float32) diff --git a/python/paddle/fluid/tests/unittests/test_truncated_gaussian_random_op.py b/python/paddle/fluid/tests/unittests/test_truncated_gaussian_random_op.py index 780caa74333d1e89bdf95e3b3f266c891a29129e..70e79b4898af3f43a8613f1746c39b751ba3abd8 100644 --- a/python/paddle/fluid/tests/unittests/test_truncated_gaussian_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_truncated_gaussian_random_op.py @@ -23,14 +23,13 @@ from paddle.fluid.framework import _test_eager_guard class TestTrunctedGaussianRandomOp(unittest.TestCase): - def setUp(self): self.op_type = "truncated_gaussian_random" self.inputs = {} self.attrs = { "shape": [10000], - "mean": .0, - "std": 1., + "mean": 0.0, + "std": 1.0, "seed": 10, } self.outputs = ["Out"] @@ -49,9 +48,9 @@ class TestTrunctedGaussianRandomOp(unittest.TestCase): program = fluid.Program() block = program.global_block() vout = block.create_var(name="Out") - op = block.append_op(type=self.op_type, - outputs={"Out": vout}, - attrs=self.attrs) + op = block.append_op( + type=self.op_type, outputs={"Out": vout}, attrs=self.attrs + ) op.desc.infer_var_type(block.desc) op.desc.infer_shape(block.desc) @@ -63,7 +62,7 @@ class TestTrunctedGaussianRandomOp(unittest.TestCase): exe = Executor(place) outs = exe.run(program, fetch_list=fetch_list) tensor = outs[0] - self.assertAlmostEqual(numpy.mean(tensor), .0, delta=0.1) + self.assertAlmostEqual(numpy.mean(tensor), 0.0, delta=0.1) self.assertAlmostEqual(numpy.var(tensor), 0.773, delta=0.1) # TruncatedNormal.__call__ has no return value, so here call _C_ops api @@ -72,9 +71,14 @@ class TestTrunctedGaussianRandomOp(unittest.TestCase): with fluid.dygraph.guard(place): with _test_eager_guard(): out = paddle._C_ops.truncated_gaussian_random( - self.attrs["shape"], self.attrs["mean"], self.attrs["std"], - self.attrs["seed"], core.VarDesc.VarType.FP32, place) - self.assertAlmostEqual(numpy.mean(out.numpy()), .0, delta=0.1) + self.attrs["shape"], + self.attrs["mean"], + self.attrs["std"], + self.attrs["seed"], + core.VarDesc.VarType.FP32, + place, + ) + self.assertAlmostEqual(numpy.mean(out.numpy()), 0.0, delta=0.1) self.assertAlmostEqual(numpy.var(out.numpy()), 0.773, delta=0.1) diff --git a/python/paddle/fluid/tests/unittests/test_unbind_op.py b/python/paddle/fluid/tests/unittests/test_unbind_op.py index e030792ce6014dacb098dd713d378a55b2a4e051..6f719ae7d4d5cc28fad6811937ff770b42cbd1fe 100644 --- a/python/paddle/fluid/tests/unittests/test_unbind_op.py +++ b/python/paddle/fluid/tests/unittests/test_unbind_op.py @@ -23,7 +23,6 @@ from paddle.fluid.framework import _test_eager_guard class TestUnbind(unittest.TestCase): - def test_unbind(self): x_1 = fluid.data(shape=[2, 3], dtype='float32', name='x_1') @@ -32,12 +31,11 @@ class TestUnbind(unittest.TestCase): axis = fluid.data(shape=[1], dtype='int32', name='axis') exe = fluid.Executor(place=fluid.CPUPlace()) - [res_1, res_2] = exe.run(fluid.default_main_program(), - feed={ - "x_1": input_1, - "axis": 0 - }, - fetch_list=[out_0, out_1]) + [res_1, res_2] = exe.run( + fluid.default_main_program(), + feed={"x_1": input_1, "axis": 0}, + fetch_list=[out_0, out_1], + ) assert np.array_equal(res_1, input_1[0, 0:100]) assert np.array_equal(res_2, input_1[1, 0:100]) @@ -63,7 +61,6 @@ class TestUnbind(unittest.TestCase): class TestLayersUnbind(unittest.TestCase): - def test_layers_unbind(self): x_1 = fluid.data(shape=[2, 3], dtype='float32', name='x_1') @@ -72,19 +69,17 @@ class TestLayersUnbind(unittest.TestCase): axis = fluid.data(shape=[1], dtype='int32', name='axis') exe = fluid.Executor(place=fluid.CPUPlace()) - [res_1, res_2] = exe.run(fluid.default_main_program(), - feed={ - "x_1": input_1, - "axis": 0 - }, - fetch_list=[out_0, out_1]) + [res_1, res_2] = exe.run( + fluid.default_main_program(), + feed={"x_1": input_1, "axis": 0}, + fetch_list=[out_0, out_1], + ) assert np.array_equal(res_1, input_1[0, 0:100]) assert np.array_equal(res_2, input_1[1, 0:100]) class TestUnbindOp(OpTest): - def initParameters(self): pass @@ -106,8 +101,9 @@ class TestUnbindOp(OpTest): self.inputs = {'X': x} self.attrs = {'axis': self.axis} self.setAxis() - self.outputs = {'Out': [('out%d' % i, self.out[i]) \ - for i in range(len(self.out))]} + self.outputs = { + 'Out': [('out%d' % i, self.out[i]) for i in range(len(self.out))] + } def get_dtype(self): return "float64" @@ -123,7 +119,6 @@ class TestUnbindOp(OpTest): class TestUnbindOp1(TestUnbindOp): - def initParameters(self): self.axis = 1 self.num = 2 @@ -137,7 +132,6 @@ class TestUnbindOp1(TestUnbindOp): class TestUnbindOp2(TestUnbindOp): - def initParameters(self): self.axis = 2 self.num = 2 @@ -151,7 +145,6 @@ class TestUnbindOp2(TestUnbindOp): class TestUnbindOp3(TestUnbindOp): - def initParameters(self): self.axis = 2 self.num = 2 @@ -168,7 +161,6 @@ class TestUnbindOp3(TestUnbindOp): class TestUnbindOp4(TestUnbindOp): - def initParameters(self): self.axis = 1 self.num = 2 @@ -185,7 +177,6 @@ class TestUnbindOp4(TestUnbindOp): class TestUnbindBF16Op(OpTest): - def setUp(self): self._set_op_type() self.python_api = paddle.unbind @@ -196,8 +187,12 @@ class TestUnbindBF16Op(OpTest): self.out = np.split(x, self.num, self.axis) self.inputs = {'X': convert_float_to_uint16(x)} self.attrs = {'axis': self.axis} - self.outputs = {'Out': [('out%d' % i, convert_float_to_uint16(self.out[i])) \ - for i in range(len(self.out))]} + self.outputs = { + 'Out': [ + ('out%d' % i, convert_float_to_uint16(self.out[i])) + for i in range(len(self.out)) + ] + } def get_dtype(self): return np.uint16 @@ -213,7 +208,6 @@ class TestUnbindBF16Op(OpTest): class TestUnbindAxisError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): x = fluid.data(shape=[2, 3], dtype='float32', name='x') diff --git a/python/paddle/fluid/tests/unittests/test_unfold_op.py b/python/paddle/fluid/tests/unittests/test_unfold_op.py index ea38ae2be1cb205d2742973afe3dd310a88d88d8..0bf5f3d2932e246b8187100b7ca51cff78a63597 100644 --- a/python/paddle/fluid/tests/unittests/test_unfold_op.py +++ b/python/paddle/fluid/tests/unittests/test_unfold_op.py @@ -35,23 +35,45 @@ class TestUnfoldOp(OpTest): self.paddings = [1, 1, 1, 1] self.dilations = [1, 1] input_shape = [ - self.batch_size, self.input_channels, self.input_height, - self.input_width + self.batch_size, + self.input_channels, + self.input_height, + self.input_width, ] self.x = np.random.rand(*input_shape).astype(np.float64) def calc_unfold(self): output_shape = [0] * 3 output_shape[0] = self.batch_size - output_shape[1] = self.input_channels * self.kernel_sizes[ - 0] * self.kernel_sizes[1] + output_shape[1] = ( + self.input_channels * self.kernel_sizes[0] * self.kernel_sizes[1] + ) dkernel_h = self.dilations[0] * (self.kernel_sizes[0] - 1) + 1 dkernel_w = self.dilations[1] * (self.kernel_sizes[1] - 1) + 1 - out_height = int((self.input_height + self.paddings[0] + - self.paddings[2] - dkernel_h) / self.strides[0]) + 1 - out_width = int( - (self.input_width + self.paddings[1] + self.paddings[3] - dkernel_w) - / self.strides[1]) + 1 + out_height = ( + int( + ( + self.input_height + + self.paddings[0] + + self.paddings[2] + - dkernel_h + ) + / self.strides[0] + ) + + 1 + ) + out_width = ( + int( + ( + self.input_width + + self.paddings[1] + + self.paddings[3] + - dkernel_w + ) + / self.strides[1] + ) + + 1 + ) output_shape[2] = out_height * out_width output = np.zeros(output_shape).astype(np.float64) ############ calculate output ############## @@ -61,16 +83,25 @@ class TestUnfoldOp(OpTest): h_out = int(k / out_width) w_out = k % out_width w_offset = j % self.kernel_sizes[1] - h_offset = int( - j / self.kernel_sizes[1]) % self.kernel_sizes[0] - c_in = int(j / - (self.kernel_sizes[0] * self.kernel_sizes[1])) - h_in = h_offset * self.dilations[0] + h_out * self.strides[ - 0] - self.paddings[0] - w_in = w_offset * self.dilations[1] + w_out * self.strides[ - 1] - self.paddings[1] - if (h_in>=0 and h_in=0 and w_in= 0 and h_in < self.input_height) and ( + w_in >= 0 and w_in < self.input_width + ): output[i, j, k] = self.x[i, c_in, h_in, w_in] self.outputs = output @@ -84,7 +115,7 @@ class TestUnfoldOp(OpTest): 'kernel_sizes': self.kernel_sizes, 'paddings': self.paddings, 'dilations': self.dilations, - 'strides': self.strides + 'strides': self.strides, } self.outputs = {'Y': self.outputs} @@ -118,9 +149,9 @@ class TestUnfoldAPI(TestUnfoldOp): m = paddle.nn.Unfold(**self.attrs) m.eval() result = m(input) - np.testing.assert_allclose(result.numpy(), - self.outputs['Y'], - rtol=1e-05) + np.testing.assert_allclose( + result.numpy(), self.outputs['Y'], rtol=1e-05 + ) def test_info(self): str(paddle.nn.Unfold(**self.attrs)) diff --git a/python/paddle/fluid/tests/unittests/test_uniform_random_bf16_op.py b/python/paddle/fluid/tests/unittests/test_uniform_random_bf16_op.py index 77d9903cefff2a083b374adbe3781d960804eeeb..0977ec69ac3ae8fe0e37cf562c58db568bcff398 100644 --- a/python/paddle/fluid/tests/unittests/test_uniform_random_bf16_op.py +++ b/python/paddle/fluid/tests/unittests/test_uniform_random_bf16_op.py @@ -19,11 +19,13 @@ import paddle import paddle.fluid.core as core from paddle.fluid.op import Operator import paddle.fluid as fluid -from paddle.fluid.tests.unittests.test_uniform_random_op import output_hist, output_hist_diag +from paddle.fluid.tests.unittests.test_uniform_random_op import ( + output_hist, + output_hist_diag, +) class TestUniformRandomOpBF16(OpTest): - def setUp(self): self.op_type = "uniform_random" self.dtype = "uint16" @@ -37,7 +39,7 @@ class TestUniformRandomOpBF16(OpTest): "min": -5.0, "max": 10.0, "seed": 10, - 'dtype': int(core.VarDesc.VarType.BF16) + 'dtype': int(core.VarDesc.VarType.BF16), } self.output_hist = output_hist @@ -58,15 +60,15 @@ class TestUniformRandomOpBF16(OpTest): class TestUniformRandomOpBF16AttrTensorList(TestUniformRandomOpBF16): - def setUp(self): self.op_type = "uniform_random" self.new_shape = (1000, 784) self.dtype = "uint16" shape_tensor = [] for index, ele in enumerate(self.new_shape): - shape_tensor.append(("x" + str(index), np.ones( - (1)).astype("int64") * ele)) + shape_tensor.append( + ("x" + str(index), np.ones((1)).astype("int64") * ele) + ) self.inputs = {'ShapeTensorList': shape_tensor} self.init_attrs() self.outputs = {"Out": np.zeros((1000, 784)).astype("uint16")} @@ -76,14 +78,14 @@ class TestUniformRandomOpBF16AttrTensorList(TestUniformRandomOpBF16): "min": -5.0, "max": 10.0, "seed": 10, - 'dtype': int(core.VarDesc.VarType.BF16) + 'dtype': int(core.VarDesc.VarType.BF16), } self.output_hist = output_hist class TestUniformRandomOpBF16AttrTensorInt32( - TestUniformRandomOpBF16AttrTensorList): - + TestUniformRandomOpBF16AttrTensorList +): def setUp(self): self.op_type = "uniform_random" self.dtype = "uint16" @@ -93,7 +95,6 @@ class TestUniformRandomOpBF16AttrTensorInt32( class TestUniformRandomOpBF16WithDiagInit(TestUniformRandomOpBF16): - def init_attrs(self): self.attrs = { "shape": [1000, 784], @@ -103,13 +104,12 @@ class TestUniformRandomOpBF16WithDiagInit(TestUniformRandomOpBF16): "diag_num": 784, "diag_step": 784, "diag_val": 1.0, - 'dtype': int(core.VarDesc.VarType.BF16) + 'dtype': int(core.VarDesc.VarType.BF16), } self.output_hist = output_hist_diag class TestUniformRandomOpBF16SelectedRows(unittest.TestCase): - def test_check_output(self): self.check_with_place(core.CPUPlace()) @@ -117,13 +117,15 @@ class TestUniformRandomOpBF16SelectedRows(unittest.TestCase): scope = core.Scope() out = scope.var("X").get_selected_rows() paddle.seed(10) - op = Operator("uniform_random", - Out="X", - shape=[1000, 784], - min=-5.0, - max=10.0, - seed=10, - dtype=int(core.VarDesc.VarType.BF16)) + op = Operator( + "uniform_random", + Out="X", + shape=[1000, 784], + min=-5.0, + max=10.0, + seed=10, + dtype=int(core.VarDesc.VarType.BF16), + ) op.run(scope, place) self.assertEqual(out.get_tensor().shape(), [1000, 784]) result = convert_uint16_to_float(np.array(out.get_tensor())) @@ -132,22 +134,24 @@ class TestUniformRandomOpBF16SelectedRows(unittest.TestCase): class TestUniformRandomOpBF16SelectedRowsWithDiagInit( - TestUniformRandomOpBF16SelectedRows): - + TestUniformRandomOpBF16SelectedRows +): def check_with_place(self, place): scope = core.Scope() out = scope.var("X").get_selected_rows() paddle.seed(10) - op = Operator("uniform_random", - Out="X", - shape=[500, 784], - min=-5.0, - max=10.0, - seed=10, - diag_num=500, - diag_step=784, - diag_val=1.0, - dtype=int(core.VarDesc.VarType.BF16)) + op = Operator( + "uniform_random", + Out="X", + shape=[500, 784], + min=-5.0, + max=10.0, + seed=10, + diag_num=500, + diag_step=784, + diag_val=1.0, + dtype=int(core.VarDesc.VarType.BF16), + ) op.run(scope, place) self.assertEqual(out.get_tensor().shape(), [500, 784]) result = convert_uint16_to_float(np.array(out.get_tensor())) @@ -156,14 +160,14 @@ class TestUniformRandomOpBF16SelectedRowsWithDiagInit( class TestUniformRandomOpBF16AttrTensorAPI(unittest.TestCase): - def test_attr_tensor_API(self): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): dim_tensor = fluid.layers.fill_constant([1], "int64", 3) - ret = fluid.layers.nn.uniform_random([1, dim_tensor, 2], - dtype=np.uint16) + ret = fluid.layers.nn.uniform_random( + [1, dim_tensor, 2], dtype=np.uint16 + ) place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -173,7 +177,6 @@ class TestUniformRandomOpBF16AttrTensorAPI(unittest.TestCase): class TestUniformRandomOpAPISeed(unittest.TestCase): - def test_attr_tensor_API(self): _seed = 10 gen = paddle.seed(_seed) @@ -183,14 +186,12 @@ class TestUniformRandomOpAPISeed(unittest.TestCase): _min = 5 _max = 10 - ret = fluid.layers.nn.uniform_random([2, 3, 2], - min=_min, - max=_max, - seed=_seed) - ret_2 = fluid.layers.nn.uniform_random([2, 3, 2], - min=_min, - max=_max, - seed=_seed) + ret = fluid.layers.nn.uniform_random( + [2, 3, 2], min=_min, max=_max, seed=_seed + ) + ret_2 = fluid.layers.nn.uniform_random( + [2, 3, 2], min=_min, max=_max, seed=_seed + ) res = fluid.layers.equal(ret, ret_2) place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -204,7 +205,6 @@ class TestUniformRandomOpAPISeed(unittest.TestCase): class TestUniformRandomOpBF16SelectedRowsShapeTensor(unittest.TestCase): - def test_check_output(self): place = core.CPUPlace() scope = core.Scope() @@ -212,13 +212,15 @@ class TestUniformRandomOpBF16SelectedRowsShapeTensor(unittest.TestCase): shape_tensor = scope.var("Shape").get_tensor() shape_tensor.set(np.array([1000, 784]).astype("int64"), place) paddle.seed(10) - op = Operator("uniform_random", - ShapeTensor="Shape", - Out="X", - min=-5.0, - max=10.0, - seed=10, - dtype=int(core.VarDesc.VarType.BF16)) + op = Operator( + "uniform_random", + ShapeTensor="Shape", + Out="X", + min=-5.0, + max=10.0, + seed=10, + dtype=int(core.VarDesc.VarType.BF16), + ) op.run(scope, place) self.assertEqual(out.get_tensor().shape(), [1000, 784]) result = convert_uint16_to_float(np.array(out.get_tensor())) @@ -227,8 +229,8 @@ class TestUniformRandomOpBF16SelectedRowsShapeTensor(unittest.TestCase): class TestUniformRandomOpBF16SelectedRowsShapeTensorList( - TestUniformRandomOpBF16SelectedRowsShapeTensor): - + TestUniformRandomOpBF16SelectedRowsShapeTensor +): def test_check_output(self): place = core.CPUPlace() scope = core.Scope() @@ -238,13 +240,15 @@ class TestUniformRandomOpBF16SelectedRowsShapeTensorList( shape_2 = scope.var("shape2").get_tensor() shape_2.set(np.array([784]).astype("int64"), place) paddle.seed(10) - op = Operator("uniform_random", - ShapeTensorList=["shape1", "shape2"], - Out="X", - min=-5.0, - max=10.0, - seed=10, - dtype=int(core.VarDesc.VarType.BF16)) + op = Operator( + "uniform_random", + ShapeTensorList=["shape1", "shape2"], + Out="X", + min=-5.0, + max=10.0, + seed=10, + dtype=int(core.VarDesc.VarType.BF16), + ) op.run(scope, place) self.assertEqual(out.get_tensor().shape(), [1000, 784]) result = convert_uint16_to_float(np.array(out.get_tensor())) @@ -253,14 +257,14 @@ class TestUniformRandomOpBF16SelectedRowsShapeTensorList( class TestUniformRandomBatchSizeLikeOpBF16API(unittest.TestCase): - def test_attr_tensorlist_int32_API(self): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): input = fluid.data(name="input", shape=[1, 3], dtype='uint16') out_1 = fluid.layers.uniform_random_batch_size_like( - input, [2, 4], dtype=np.uint16) # out_1.shape=[1, 4] + input, [2, 4], dtype=np.uint16 + ) # out_1.shape=[1, 4] place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -271,5 +275,6 @@ class TestUniformRandomBatchSizeLikeOpBF16API(unittest.TestCase): if __name__ == "__main__": from paddle import enable_static + enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_uniform_random_inplace_op.py b/python/paddle/fluid/tests/unittests/test_uniform_random_inplace_op.py index 2b3881535c9550c0539035aa4259f10538f25c34..ea30b0da0ab46f78af8059ed47b851d17b382d5e 100644 --- a/python/paddle/fluid/tests/unittests/test_uniform_random_inplace_op.py +++ b/python/paddle/fluid/tests/unittests/test_uniform_random_inplace_op.py @@ -16,16 +16,17 @@ import unittest import paddle import paddle.fluid as fluid import numpy as np -from paddle.fluid.framework import _enable_legacy_dygraph, _disable_legacy_dygraph +from paddle.fluid.framework import ( + _enable_legacy_dygraph, + _disable_legacy_dygraph, +) class TestUniformRandomInplaceOpDtype(unittest.TestCase): - def setUp(self): self.shape = (1000, 784) def test_uniform_random_inplace_op_dtype(self): - def test_fp32(): tensor_fp32 = paddle.ones(self.shape, dtype=paddle.float32) tensor_fp32.uniform_() @@ -46,7 +47,6 @@ class TestUniformRandomInplaceOpDtype(unittest.TestCase): class TestUniformRandomInplaceOpIsInplace(unittest.TestCase): - def setUp(self): self.shape = (1000, 784) @@ -57,7 +57,6 @@ class TestUniformRandomInplaceOpIsInplace(unittest.TestCase): class TestUniformRandomInplaceOpSeedIsZero(unittest.TestCase): - def setUp(self): self.shape = (1000, 784) self.seed = 0 @@ -72,7 +71,6 @@ class TestUniformRandomInplaceOpSeedIsZero(unittest.TestCase): class TestUniformRandomInplaceOpSeedIsNotZero(unittest.TestCase): - def setUp(self): self.shape = (1000, 784) self.seed = 10 @@ -87,7 +85,6 @@ class TestUniformRandomInplaceOpSeedIsNotZero(unittest.TestCase): class TestUniformRandomInplaceOpWithinRange(unittest.TestCase): - def setUp(self): self.shape = (1000, 784) self.min = -2 @@ -98,12 +95,12 @@ class TestUniformRandomInplaceOpWithinRange(unittest.TestCase): tensor = paddle.ones(self.shape) tensor.uniform_(min=self.min, max=self.max, seed=self.seed) tensor_data = tensor.numpy() - self.assertTrue((tensor_data > self.min).all() - and (tensor_data < self.max).all()) + self.assertTrue( + (tensor_data > self.min).all() and (tensor_data < self.max).all() + ) class TestUniformRandomInplaceOpShape(unittest.TestCase): - def setUp(self): self.shape = (1000, 784) @@ -116,7 +113,6 @@ class TestUniformRandomInplaceOpShape(unittest.TestCase): class TestUniformRandomInplaceOpDistribution(unittest.TestCase): - def setUp(self): self.shape = (1000, 784) self.min = -3 @@ -130,17 +126,15 @@ class TestUniformRandomInplaceOpDistribution(unittest.TestCase): hist, _ = np.histogram(tensor.numpy()[0], bins=self.bins) prob = hist / float(self.shape[0]) - prob_expect = np.ones((self.bins, )) / float(self.bins) + prob_expect = np.ones((self.bins,)) / float(self.bins) np.testing.assert_allclose(prob, prob_expect, rtol=0, atol=0.01) class TestUniformRandomInplaceOpError(unittest.TestCase): - def setUp(self): self.shape = (1000, 784) def test_uniform_random_inplace_op_error(self): - def test_attr_error(): tensor = paddle.ones(self.shape) tensor.uniform_(shape=self.shape, min=-2, max=2) @@ -149,7 +143,6 @@ class TestUniformRandomInplaceOpError(unittest.TestCase): class TestUniformRandomInplaceOpEmptyTensor(unittest.TestCase): - def test_uniform_random_inplace_op_empty_tensor(self): places = ['cpu'] if fluid.core.is_compiled_with_cuda(): @@ -166,7 +159,6 @@ class TestUniformRandomInplaceOpEmptyTensor(unittest.TestCase): class TestUniformRandomInplaceGrad(unittest.TestCase): - def setUp(self): self.shape = (1000, 784) diff --git a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py index 285e4f4f20b5c8d14952b990c80f5485c6bc7bae..efcbf075bf3fc6843b15d23a6447fc5869fe3109 100644 --- a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py @@ -49,15 +49,15 @@ def output_hist_diag(out): class TestUniformRandomOp_attr_tensorlist(OpTest): - def setUp(self): self.op_type = "uniform_random" self.python_api = paddle.uniform self.new_shape = (1000, 784) shape_tensor = [] for index, ele in enumerate(self.new_shape): - shape_tensor.append(("x" + str(index), np.ones( - (1)).astype("int64") * ele)) + shape_tensor.append( + ("x" + str(index), np.ones((1)).astype("int64") * ele) + ) self.inputs = {'ShapeTensorList': shape_tensor} self.init_attrs() self.outputs = {"Out": np.zeros((1000, 784)).astype("float32")} @@ -75,22 +75,21 @@ class TestUniformRandomOp_attr_tensorlist(OpTest): class TestMaxMinAreInt(TestUniformRandomOp_attr_tensorlist): - def init_attrs(self): self.attrs = {"min": -5, "max": 10, "seed": 10} self.output_hist = output_hist class TestUniformRandomOp_attr_tensorlist_int32(OpTest): - def setUp(self): self.op_type = "uniform_random" self.python_api = paddle.uniform self.new_shape = (1000, 784) shape_tensor = [] for index, ele in enumerate(self.new_shape): - shape_tensor.append(("x" + str(index), np.ones( - (1)).astype("int32") * ele)) + shape_tensor.append( + ("x" + str(index), np.ones((1)).astype("int32") * ele) + ) self.inputs = {'ShapeTensorList': shape_tensor} self.init_attrs() self.outputs = {"Out": np.zeros((1000, 784)).astype("float32")} @@ -108,7 +107,6 @@ class TestUniformRandomOp_attr_tensorlist_int32(OpTest): class TestUniformRandomOp_attr_tensor(OpTest): - def setUp(self): self.op_type = "uniform_random" self.python_api = paddle.uniform @@ -129,7 +127,6 @@ class TestUniformRandomOp_attr_tensor(OpTest): class TestUniformRandomOp_attr_tensor_int32(OpTest): - def setUp(self): self.op_type = "uniform_random" self.python_api = paddle.uniform @@ -150,7 +147,6 @@ class TestUniformRandomOp_attr_tensor_int32(OpTest): class TestUniformRandomOp(OpTest): - def setUp(self): self.op_type = "uniform_random" self.python_api = paddle.uniform @@ -163,7 +159,7 @@ class TestUniformRandomOp(OpTest): "shape": [1000, 784], "min": -5.0, "max": 10.0, - "seed": 10 + "seed": 10, } self.output_hist = output_hist @@ -178,9 +174,13 @@ class TestUniformRandomOp(OpTest): places = self._get_places() for place in places: with fluid.dygraph.base.guard(place=place): - out = self.python_api(self.attrs['shape'], 'float32', - self.attrs['min'], self.attrs['max'], - self.attrs['seed']) + out = self.python_api( + self.attrs['shape'], + 'float32', + self.attrs['min'], + self.attrs['max'], + self.attrs['seed'], + ) def test_check_api_eager(self): with _test_eager_guard(): @@ -189,15 +189,15 @@ class TestUniformRandomOp(OpTest): class TestUniformRandomOpError(unittest.TestCase): - def test_errors(self): main_prog = Program() start_prog = Program() with program_guard(main_prog, start_prog): def test_Variable(): - x1 = fluid.create_lod_tensor(np.zeros((4, 784)), [[1, 1, 1, 1]], - fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.zeros((4, 784)), [[1, 1, 1, 1]], fluid.CPUPlace() + ) fluid.layers.uniform_random(x1) self.assertRaises(TypeError, test_Variable) @@ -209,9 +209,9 @@ class TestUniformRandomOpError(unittest.TestCase): self.assertRaises(TypeError, test_Variable2) def test_dtype(): - x2 = fluid.layers.data(name='x2', - shape=[4, 784], - dtype='float32') + x2 = fluid.layers.data( + name='x2', shape=[4, 784], dtype='float32' + ) fluid.layers.uniform_random(x2, 'int32') self.assertRaises(TypeError, test_dtype) @@ -224,7 +224,6 @@ class TestUniformRandomOpError(unittest.TestCase): class TestUniformRandomOpWithDiagInit(TestUniformRandomOp): - def init_attrs(self): self.attrs = { "shape": [1000, 784], @@ -233,13 +232,12 @@ class TestUniformRandomOpWithDiagInit(TestUniformRandomOp): "seed": 10, "diag_num": 784, "diag_step": 784, - "diag_val": 1.0 + "diag_val": 1.0, } self.output_hist = output_hist_diag class TestUniformRandomOpSelectedRows(unittest.TestCase): - def get_places(self): places = [core.CPUPlace()] if core.is_compiled_with_cuda(): @@ -254,12 +252,14 @@ class TestUniformRandomOpSelectedRows(unittest.TestCase): scope = core.Scope() out = scope.var("X").get_selected_rows() paddle.seed(10) - op = Operator("uniform_random", - Out="X", - shape=[1000, 784], - min=-5.0, - max=10.0, - seed=10) + op = Operator( + "uniform_random", + Out="X", + shape=[1000, 784], + min=-5.0, + max=10.0, + seed=10, + ) op.run(scope, place) self.assertEqual(out.get_tensor().shape(), [1000, 784]) hist, prob = output_hist(np.array(out.get_tensor())) @@ -267,21 +267,23 @@ class TestUniformRandomOpSelectedRows(unittest.TestCase): class TestUniformRandomOpSelectedRowsWithDiagInit( - TestUniformRandomOpSelectedRows): - + TestUniformRandomOpSelectedRows +): def check_with_place(self, place): scope = core.Scope() out = scope.var("X").get_selected_rows() paddle.seed(10) - op = Operator("uniform_random", - Out="X", - shape=[500, 784], - min=-5.0, - max=10.0, - seed=10, - diag_num=500, - diag_step=784, - diag_val=1.0) + op = Operator( + "uniform_random", + Out="X", + shape=[500, 784], + min=-5.0, + max=10.0, + seed=10, + diag_num=500, + diag_step=784, + diag_val=1.0, + ) op.run(scope, place) self.assertEqual(out.get_tensor().shape(), [500, 784]) hist, prob = output_hist_diag(np.array(out.get_tensor())) @@ -289,29 +291,32 @@ class TestUniformRandomOpSelectedRowsWithDiagInit( class TestUniformRandomOpApi(unittest.TestCase): - def test_api(self): paddle.seed(10) x = fluid.layers.data('x', shape=[16], dtype='float32', lod_level=1) - y = fluid.layers.fc(x, - size=16, - param_attr=fluid.initializer.Uniform(low=-0.5, - high=0.5, - seed=10, - diag_num=16, - diag_step=16, - diag_val=1.0)) + y = fluid.layers.fc( + x, + size=16, + param_attr=fluid.initializer.Uniform( + low=-0.5, + high=0.5, + seed=10, + diag_num=16, + diag_step=16, + diag_val=1.0, + ), + ) place = fluid.CPUPlace() x_tensor = fluid.create_lod_tensor( - np.random.rand(3, 16).astype("float32"), [[1, 2]], place) + np.random.rand(3, 16).astype("float32"), [[1, 2]], place + ) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) ret = exe.run(feed={'x': x_tensor}, fetch_list=[y], return_numpy=False) class TestUniformRandomOp_attr_tensor_API(unittest.TestCase): - def test_attr_tensor_API(self): startup_program = fluid.Program() train_program = fluid.Program() @@ -356,13 +361,12 @@ class TestUniformRandomOp_attr_tensor_API(unittest.TestCase): exe = fluid.Executor(place) Shape = np.array([2, 3]).astype('int32') exe.run(startup_program) - outs = exe.run(train_program, - feed={'shape_tensor': Shape}, - fetch_list=[ret]) + outs = exe.run( + train_program, feed={'shape_tensor': Shape}, fetch_list=[ret] + ) class TestUniformRandomOp_API_seed(unittest.TestCase): - def test_attr_tensor_API(self): _seed = 10 gen = paddle.seed(_seed) @@ -372,14 +376,12 @@ class TestUniformRandomOp_API_seed(unittest.TestCase): _min = 5 _max = 10 - ret = fluid.layers.nn.uniform_random([2, 3, 2], - min=_min, - max=_max, - seed=_seed) - ret_2 = fluid.layers.nn.uniform_random([2, 3, 2], - min=_min, - max=_max, - seed=_seed) + ret = fluid.layers.nn.uniform_random( + [2, 3, 2], min=_min, max=_max, seed=_seed + ) + ret_2 = fluid.layers.nn.uniform_random( + [2, 3, 2], min=_min, max=_max, seed=_seed + ) res = fluid.layers.equal(ret, ret_2) place = fluid.CPUPlace() if fluid.core.is_compiled_with_cuda(): @@ -395,7 +397,6 @@ class TestUniformRandomOp_API_seed(unittest.TestCase): class TestUniformRandomOpSelectedRowsShapeTensor(unittest.TestCase): - def get_places(self): places = [core.CPUPlace()] if core.is_compiled_with_cuda(): @@ -412,12 +413,14 @@ class TestUniformRandomOpSelectedRowsShapeTensor(unittest.TestCase): shape_tensor = scope.var("Shape").get_tensor() shape_tensor.set(np.array([1000, 784]).astype("int64"), place) paddle.seed(10) - op = Operator("uniform_random", - ShapeTensor="Shape", - Out="X", - min=-5.0, - max=10.0, - seed=10) + op = Operator( + "uniform_random", + ShapeTensor="Shape", + Out="X", + min=-5.0, + max=10.0, + seed=10, + ) op.run(scope, place) self.assertEqual(out.get_tensor().shape(), [1000, 784]) hist, prob = output_hist(np.array(out.get_tensor())) @@ -425,7 +428,6 @@ class TestUniformRandomOpSelectedRowsShapeTensor(unittest.TestCase): class TestUniformRandomOpSelectedRowsShapeTensorList(unittest.TestCase): - def get_places(self): places = [core.CPUPlace()] if core.is_compiled_with_cuda(): @@ -444,12 +446,14 @@ class TestUniformRandomOpSelectedRowsShapeTensorList(unittest.TestCase): shape_2 = scope.var("shape2").get_tensor() shape_2.set(np.array([784]).astype("int64"), place) paddle.seed(10) - op = Operator("uniform_random", - ShapeTensorList=["shape1", "shape2"], - Out="X", - min=-5.0, - max=10.0, - seed=10) + op = Operator( + "uniform_random", + ShapeTensorList=["shape1", "shape2"], + Out="X", + min=-5.0, + max=10.0, + seed=10, + ) op.run(scope, place) self.assertEqual(out.get_tensor().shape(), [1000, 784]) hist, prob = output_hist(np.array(out.get_tensor())) @@ -457,51 +461,48 @@ class TestUniformRandomOpSelectedRowsShapeTensorList(unittest.TestCase): class TestUniformRandomDygraphMode(unittest.TestCase): - def test_check_output(self): with fluid.dygraph.guard(): - x = fluid.layers.uniform_random([10], - dtype="float32", - min=0.0, - max=1.0) + x = fluid.layers.uniform_random( + [10], dtype="float32", min=0.0, max=1.0 + ) x_np = x.numpy() for i in range(10): self.assertTrue((x_np[i] > 0 and x_np[i] < 1.0)) class TestUniformRandomBatchSizeLikeOpError(unittest.TestCase): - def test_errors(self): main_prog = Program() start_prog = Program() with program_guard(main_prog, start_prog): def test_Variable(): - x1 = fluid.create_lod_tensor(np.zeros( - (100, 784)), [[10, 10, 10, 70]], fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.zeros((100, 784)), [[10, 10, 10, 70]], fluid.CPUPlace() + ) fluid.layers.uniform_random_batch_size_like(x1) self.assertRaises(TypeError, test_Variable) def test_shape(): - x1 = fluid.layers.data(name='x2', - shape=[100, 784], - dtype='float32') + x1 = fluid.layers.data( + name='x2', shape=[100, 784], dtype='float32' + ) fluid.layers.uniform_random_batch_size_like(x1, shape="shape") self.assertRaises(TypeError, test_shape) def test_dtype(): - x2 = fluid.layers.data(name='x2', - shape=[100, 784], - dtype='float32') + x2 = fluid.layers.data( + name='x2', shape=[100, 784], dtype='float32' + ) fluid.layers.uniform_random_batch_size_like(x2, 'int32') self.assertRaises(TypeError, test_dtype) class TestUniformAlias(unittest.TestCase): - def test_alias(self): paddle.uniform([2, 3], min=-5.0, max=5.0) paddle.tensor.uniform([2, 3], min=-5.0, max=5.0) @@ -514,15 +515,15 @@ class TestUniformAlias(unittest.TestCase): class TestUniformOpError(unittest.TestCase): - def test_errors(self): main_prog = Program() start_prog = Program() with program_guard(main_prog, start_prog): def test_Variable(): - x1 = fluid.create_lod_tensor(np.zeros( - (100, 784)), [[10, 10, 10, 70]], fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.zeros((100, 784)), [[10, 10, 10, 70]], fluid.CPUPlace() + ) paddle.tensor.random.uniform(x1) self.assertRaises(TypeError, test_Variable) @@ -534,36 +535,34 @@ class TestUniformOpError(unittest.TestCase): self.assertRaises(TypeError, test_Variable2) def test_dtype(): - x2 = fluid.layers.data(name='x2', - shape=[100, 784], - dtype='float32') + x2 = fluid.layers.data( + name='x2', shape=[100, 784], dtype='float32' + ) paddle.tensor.random.uniform(x2, 'int32') self.assertRaises(TypeError, test_dtype) def test_out_dtype(): - out = paddle.tensor.random.uniform(shape=[3, 4], - dtype='float64') + out = paddle.tensor.random.uniform( + shape=[3, 4], dtype='float64' + ) self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64) test_out_dtype() class TestUniformDygraphMode(unittest.TestCase): - def test_check_output(self): with fluid.dygraph.guard(): - x = paddle.tensor.random.uniform([10], - dtype="float32", - min=0.0, - max=1.0) + x = paddle.tensor.random.uniform( + [10], dtype="float32", min=0.0, max=1.0 + ) x_np = x.numpy() for i in range(10): self.assertTrue((x_np[i] > 0 and x_np[i] < 1.0)) class TestUniformDtype(unittest.TestCase): - def test_default_dtype(self): paddle.disable_static() @@ -599,7 +598,6 @@ class TestUniformDtype(unittest.TestCase): class TestRandomValue(unittest.TestCase): - def test_fixed_random_number(self): # Test GPU Fixed random number, which is generated by 'curandStatePhilox4_32_10_t' if not paddle.is_compiled_with_cuda(): @@ -618,37 +616,64 @@ class TestRandomValue(unittest.TestCase): expect_mean = 0.50000454338820143895816272561205551028251647949218750 expect_std = 0.28867379167297479991560749112977646291255950927734375 expect = [ - 0.55298901, 0.65184678, 0.49375412, 0.57943639, 0.16459608, - 0.67181056, 0.03021481, 0.0238559, 0.07742096, 0.55972187 + 0.55298901, + 0.65184678, + 0.49375412, + 0.57943639, + 0.16459608, + 0.67181056, + 0.03021481, + 0.0238559, + 0.07742096, + 0.55972187, ] out = paddle.rand([32, 3, 1024, 1024], dtype='float64').numpy() self.assertEqual(np.mean(out), expect_mean) self.assertEqual(np.std(out), expect_std) - np.testing.assert_allclose(out[2, 1, 512, 1000:1010], - expect, - rtol=1e-05) + np.testing.assert_allclose( + out[2, 1, 512, 1000:1010], expect, rtol=1e-05 + ) expect_mean = 0.50002604722976684570312500 expect_std = 0.2886914908885955810546875 expect = [ - 0.45320973, 0.17582087, 0.725341, 0.30849215, 0.622257, 0.46352342, - 0.97228295, 0.12771158, 0.286525, 0.9810645 + 0.45320973, + 0.17582087, + 0.725341, + 0.30849215, + 0.622257, + 0.46352342, + 0.97228295, + 0.12771158, + 0.286525, + 0.9810645, ] out = paddle.rand([32, 3, 1024, 1024], dtype='float32').numpy() self.assertEqual(np.mean(out), expect_mean) self.assertEqual(np.std(out), expect_std) - np.testing.assert_allclose(out[2, 1, 512, 1000:1010], - expect, - rtol=1e-05) + np.testing.assert_allclose( + out[2, 1, 512, 1000:1010], expect, rtol=1e-05 + ) expect_mean = 25.11843109130859375 expect_std = 43.370647430419921875 expect = [ - 30.089634, 77.05225, 3.1201615, 68.34072, 59.266724, -25.33281, - 12.973292, 27.41127, -17.412298, 27.931019 + 30.089634, + 77.05225, + 3.1201615, + 68.34072, + 59.266724, + -25.33281, + 12.973292, + 27.41127, + -17.412298, + 27.931019, ] - out = paddle.empty([16, 16, 16, 16], - dtype='float32').uniform_(-50, 100).numpy() + out = ( + paddle.empty([16, 16, 16, 16], dtype='float32') + .uniform_(-50, 100) + .numpy() + ) self.assertEqual(np.mean(out), expect_mean) self.assertEqual(np.std(out), expect_std) np.testing.assert_allclose(out[10, 10, 10, 0:10], expect, rtol=1e-05) @@ -657,7 +682,6 @@ class TestRandomValue(unittest.TestCase): class TestUniformMinMaxTensor(UnittestBase): - def init_info(self): self.shapes = [[2, 3, 4]] self.save_path = os.path.join(self.temp_dir.name, self.path_prefix()) @@ -673,9 +697,9 @@ class TestUniformMinMaxTensor(UnittestBase): min_v = paddle.to_tensor([0.1]) max_v = paddle.to_tensor([0.9]) y = paddle.uniform([2, 3, 10], min=min_v, max=max_v) - z = paddle.fluid.layers.uniform_random([2, 3, 10], - min=min_v, - max=max_v) + z = paddle.fluid.layers.uniform_random( + [2, 3, 10], min=min_v, max=max_v + ) out = feat + y + z diff --git a/python/paddle/fluid/tests/unittests/test_unique.py b/python/paddle/fluid/tests/unittests/test_unique.py index a62f5e2808d9f833755de17386602507fd7e76e3..6e739bb385950c949e1bd6534a71e509accd2776 100644 --- a/python/paddle/fluid/tests/unittests/test_unique.py +++ b/python/paddle/fluid/tests/unittests/test_unique.py @@ -22,7 +22,6 @@ from paddle.fluid.framework import _test_eager_guard class TestUniqueOp(OpTest): - def setUp(self): self.op_type = "unique" self.init_config() @@ -37,12 +36,11 @@ class TestUniqueOp(OpTest): self.attrs = {'dtype': int(core.VarDesc.VarType.INT32)} self.outputs = { 'Out': np.array([2, 3, 1, 5], dtype='int64'), - 'Index': np.array([0, 1, 1, 2, 3, 1], dtype='int32') + 'Index': np.array([0, 1, 1, 2, 3, 1], dtype='int32'), } class TestOne(TestUniqueOp): - def init_config(self): self.inputs = { 'X': np.array([2], dtype='int64'), @@ -50,31 +48,29 @@ class TestOne(TestUniqueOp): self.attrs = {'dtype': int(core.VarDesc.VarType.INT32)} self.outputs = { 'Out': np.array([2], dtype='int64'), - 'Index': np.array([0], dtype='int32') + 'Index': np.array([0], dtype='int32'), } class TestRandom(TestUniqueOp): - def init_config(self): - self.inputs = {'X': np.random.randint(0, 100, (150, ), dtype='int64')} + self.inputs = {'X': np.random.randint(0, 100, (150,), dtype='int64')} self.attrs = {'dtype': int(core.VarDesc.VarType.INT64)} - np_unique, np_index, reverse_index = np.unique(self.inputs['X'], True, - True) + np_unique, np_index, reverse_index = np.unique( + self.inputs['X'], True, True + ) np_tuple = [(np_unique[i], np_index[i]) for i in range(len(np_unique))] np_tuple.sort(key=lambda x: x[1]) target_out = np.array([i[0] for i in np_tuple], dtype='int64') target_index = np.array( - [list(target_out).index(i) for i in self.inputs['X']], - dtype='int64') + [list(target_out).index(i) for i in self.inputs['X']], dtype='int64' + ) self.outputs = {'Out': target_out, 'Index': target_index} class TestUniqueRaiseError(unittest.TestCase): - def test_errors(self): - def test_type(): fluid.layers.unique([10]) @@ -87,10 +83,10 @@ class TestUniqueRaiseError(unittest.TestCase): self.assertRaises(TypeError, test_dtype) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestOneGPU(TestUniqueOp): - def init_config(self): self.inputs = { 'X': np.array([2], dtype='int64'), @@ -98,7 +94,7 @@ class TestOneGPU(TestUniqueOp): self.attrs = {'dtype': int(core.VarDesc.VarType.INT32)} self.outputs = { 'Out': np.array([2], dtype='int64'), - 'Index': np.array([0], dtype='int32') + 'Index': np.array([0], dtype='int32'), } def test_check_output(self): @@ -107,21 +103,22 @@ class TestOneGPU(TestUniqueOp): self.check_output_with_place(place, atol=1e-5) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestRandomGPU(TestUniqueOp): - def init_config(self): - self.inputs = {'X': np.random.randint(0, 100, (150, ), dtype='int64')} + self.inputs = {'X': np.random.randint(0, 100, (150,), dtype='int64')} self.attrs = {'dtype': int(core.VarDesc.VarType.INT64)} - np_unique, np_index, reverse_index = np.unique(self.inputs['X'], True, - True) + np_unique, np_index, reverse_index = np.unique( + self.inputs['X'], True, True + ) np_tuple = [(np_unique[i], np_index[i]) for i in range(len(np_unique))] np_tuple.sort(key=lambda x: x[1]) target_out = np.array([i[0] for i in np_tuple], dtype='int64') target_index = np.array( - [list(target_out).index(i) for i in self.inputs['X']], - dtype='int64') + [list(target_out).index(i) for i in self.inputs['X']], dtype='int64' + ) self.outputs = {'Out': target_out, 'Index': target_index} @@ -132,21 +129,22 @@ class TestRandomGPU(TestUniqueOp): class TestSortedUniqueOp(TestUniqueOp): - def init_config(self): self.inputs = {'X': np.array([2, 3, 3, 1, 5, 3], dtype='int64')} - unique, indices, inverse, count = np.unique(self.inputs['X'], - return_index=True, - return_inverse=True, - return_counts=True, - axis=None) + unique, indices, inverse, count = np.unique( + self.inputs['X'], + return_index=True, + return_inverse=True, + return_counts=True, + axis=None, + ) self.attrs = { 'dtype': int(core.VarDesc.VarType.INT32), "return_index": True, "return_inverse": True, "return_counts": True, "axis": None, - "is_sorted": True + "is_sorted": True, } self.outputs = { 'Out': unique, @@ -157,21 +155,22 @@ class TestSortedUniqueOp(TestUniqueOp): class TestUniqueOpAxisNone(TestUniqueOp): - def init_config(self): self.inputs = {'X': np.random.random((4, 7, 10)).astype('float64')} - unique, indices, inverse, counts = np.unique(self.inputs['X'], - return_index=True, - return_inverse=True, - return_counts=True, - axis=None) + unique, indices, inverse, counts = np.unique( + self.inputs['X'], + return_index=True, + return_inverse=True, + return_counts=True, + axis=None, + ) self.attrs = { 'dtype': int(core.VarDesc.VarType.INT32), "return_index": True, "return_inverse": True, "return_counts": True, "axis": None, - "is_sorted": True + "is_sorted": True, } self.outputs = { 'Out': unique, @@ -182,21 +181,22 @@ class TestUniqueOpAxisNone(TestUniqueOp): class TestUniqueOpAxis1(TestUniqueOp): - def init_config(self): self.inputs = {'X': np.random.random((3, 8, 8)).astype('float64')} - unique, indices, inverse, counts = np.unique(self.inputs['X'], - return_index=True, - return_inverse=True, - return_counts=True, - axis=1) + unique, indices, inverse, counts = np.unique( + self.inputs['X'], + return_index=True, + return_inverse=True, + return_counts=True, + axis=1, + ) self.attrs = { 'dtype': int(core.VarDesc.VarType.INT32), "return_index": True, "return_inverse": True, "return_counts": True, "axis": [1], - "is_sorted": True + "is_sorted": True, } self.outputs = { 'Out': unique, @@ -207,7 +207,6 @@ class TestUniqueOpAxis1(TestUniqueOp): class TestUniqueAPI(unittest.TestCase): - def test_dygraph_api_out(self): paddle.disable_static() x_data = x_data = np.random.randint(0, 10, (120)) @@ -221,16 +220,20 @@ class TestUniqueAPI(unittest.TestCase): paddle.disable_static() x_data = np.random.random((3, 5, 5)).astype("float32") x = paddle.to_tensor(x_data) - out, index, inverse, counts = paddle.unique(x, - return_index=True, - return_inverse=True, - return_counts=True, - axis=0) - np_out, np_index, np_inverse, np_counts = np.unique(x_data, - return_index=True, - return_inverse=True, - return_counts=True, - axis=0) + out, index, inverse, counts = paddle.unique( + x, + return_index=True, + return_inverse=True, + return_counts=True, + axis=0, + ) + np_out, np_index, np_inverse, np_counts = np.unique( + x_data, + return_index=True, + return_inverse=True, + return_counts=True, + axis=0, + ) self.assertTrue((out.numpy() == np_out).all(), True) self.assertTrue((index.numpy() == np_index).all(), True) self.assertTrue((inverse.numpy() == np_inverse).all(), True) @@ -241,13 +244,16 @@ class TestUniqueAPI(unittest.TestCase): paddle.disable_static() x_data = x_data = np.random.randint(0, 10, (120)) x = paddle.to_tensor(x_data) - out, indices, inverse, counts = paddle.unique(x, - return_index=True, - return_inverse=True, - return_counts=True, - dtype="int32") + out, indices, inverse, counts = paddle.unique( + x, + return_index=True, + return_inverse=True, + return_counts=True, + dtype="int32", + ) expected_out, np_indices, np_inverse, np_counts = np.unique( - x_data, return_index=True, return_inverse=True, return_counts=True) + x_data, return_index=True, return_inverse=True, return_counts=True + ) self.assertTrue((out.numpy() == expected_out).all(), True) self.assertTrue((indices.numpy() == np_indices).all(), True) self.assertTrue((inverse.numpy() == np_inverse).all(), True) @@ -261,34 +267,33 @@ class TestUniqueAPI(unittest.TestCase): self.test_dygraph_attr_dtype() def test_static_graph(self): - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): x = paddle.fluid.data(name='x', shape=[3, 2], dtype='float64') - unique, inverse, counts = paddle.unique(x, - return_inverse=True, - return_counts=True, - axis=0) + unique, inverse, counts = paddle.unique( + x, return_inverse=True, return_counts=True, axis=0 + ) place = paddle.CPUPlace() exe = paddle.static.Executor(place) x_np = np.array([[1, 2], [3, 4], [1, 2]]).astype('float64') - result = exe.run(feed={"x": x_np}, - fetch_list=[unique, inverse, counts]) - np_unique, np_inverse, np_counts = np.unique(x_np, - return_inverse=True, - return_counts=True, - axis=0) + result = exe.run( + feed={"x": x_np}, fetch_list=[unique, inverse, counts] + ) + np_unique, np_inverse, np_counts = np.unique( + x_np, return_inverse=True, return_counts=True, axis=0 + ) np.testing.assert_allclose(result[0], np_unique, rtol=1e-05) np.testing.assert_allclose(result[1], np_inverse, rtol=1e-05) np.testing.assert_allclose(result[2], np_counts, rtol=1e-05) class TestUniqueError(unittest.TestCase): - def test_input_dtype(self): - def test_x_dtype(): - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float16') result = paddle.unique(x) diff --git a/python/paddle/fluid/tests/unittests/test_unique_consecutive_op.py b/python/paddle/fluid/tests/unittests/test_unique_consecutive_op.py index 91a8c6d6a4bf3edcdbf816c632713b999364aebc..b8a3096dbd62f8668b829e341c37a885cf30fcd0 100644 --- a/python/paddle/fluid/tests/unittests/test_unique_consecutive_op.py +++ b/python/paddle/fluid/tests/unittests/test_unique_consecutive_op.py @@ -50,7 +50,7 @@ def reference_unique_consecutive(X, return_inverse=False, return_counts=False): last += 1 inverse_vec[last] = i if return_counts: - counts_vec = counts_vec[:len(X)] + counts_vec = counts_vec[: len(X)] if return_inverse and return_counts: return X, np.array(inverse_vec), np.array(counts_vec) elif return_counts: @@ -79,8 +79,9 @@ class TestUniqueConsecutiveOp(OpTest): self.config() self.op_type = "unique_consecutive" x = np.random.randint(self.x_range, size=self.x_size).astype(self.dtype) - result = reference_unique_consecutive(x, self.return_inverse, - self.return_counts) + result = reference_unique_consecutive( + x, self.return_inverse, self.return_counts + ) out = reference_unique_consecutive(x) out = np.array(out).astype(self.dtype) self.inputs = { @@ -111,8 +112,9 @@ class TestUniqueConsecutiveOp2(TestUniqueConsecutiveOp): self.config() self.op_type = "unique_consecutive" x = np.random.randint(self.x_range, size=self.x_size).astype(self.dtype) - result, inverse = reference_unique_consecutive(x, self.return_inverse, - self.return_counts) + result, inverse = reference_unique_consecutive( + x, self.return_inverse, self.return_counts + ) result = np.array(result).astype(self.dtype) inverse = inverse.astype(self.dtype) self.inputs = { @@ -120,7 +122,7 @@ class TestUniqueConsecutiveOp2(TestUniqueConsecutiveOp): } self.attrs = { 'return_inverse': self.return_inverse, - 'dtype': int(core.VarDesc.VarType.INT32) + 'dtype': int(core.VarDesc.VarType.INT32), } self.python_out_sig = ["Out"] self.outputs = {'Out': result, 'Index': inverse} @@ -141,8 +143,9 @@ class TestUniqueConsecutiveOp3(TestUniqueConsecutiveOp): self.config() self.op_type = "unique_consecutive" x = np.random.randint(self.x_range, size=self.x_size).astype(self.dtype) - result, counts = reference_unique_consecutive(x, self.return_inverse, - self.return_counts) + result, counts = reference_unique_consecutive( + x, self.return_inverse, self.return_counts + ) result = np.array(result).astype(self.dtype) counts = counts.astype(self.dtype) self.inputs = { @@ -150,7 +153,7 @@ class TestUniqueConsecutiveOp3(TestUniqueConsecutiveOp): } self.attrs = { 'return_counts': self.return_counts, - 'dtype': int(core.VarDesc.VarType.INT32) + 'dtype': int(core.VarDesc.VarType.INT32), } self.python_out_sig = ["Out"] self.outputs = {'Out': result, 'Counts': counts} @@ -172,7 +175,8 @@ class TestUniqueConsecutiveOp4(TestUniqueConsecutiveOp): self.op_type = "unique_consecutive" x = np.random.randint(self.x_range, size=self.x_size).astype(self.dtype) result, inverse, counts = reference_unique_consecutive( - x, self.return_inverse, self.return_counts) + x, self.return_inverse, self.return_counts + ) result = np.array(result).astype(self.dtype) inverse = inverse.astype(self.dtype) counts = counts.astype(self.dtype) @@ -182,14 +186,13 @@ class TestUniqueConsecutiveOp4(TestUniqueConsecutiveOp): self.attrs = { 'return_inverse': self.return_inverse, 'return_counts': self.return_counts, - 'dtype': int(core.VarDesc.VarType.INT32) + 'dtype': int(core.VarDesc.VarType.INT32), } self.python_out_sig = ["Out"] self.outputs = {'Out': result, 'Index': inverse, 'Counts': counts} class TestUniqueConsecutiveAPI(unittest.TestCase): - def setUp(self): self.places = [fluid.CPUPlace()] if core.is_compiled_with_cuda(): @@ -198,15 +201,21 @@ class TestUniqueConsecutiveAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): paddle.enable_static() - input_x = fluid.data(name="input_x", shape=[ - 100, - ], dtype="float32") + input_x = fluid.data( + name="input_x", + shape=[ + 100, + ], + dtype="float32", + ) result = paddle.unique_consecutive(input_x) x_np = np.random.randint(20, size=100).astype("float32") exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={"input_x": x_np}, - fetch_list=[result]) + fetches = exe.run( + fluid.default_main_program(), + feed={"input_x": x_np}, + fetch_list=[result], + ) def test_static(self): for place in self.places: @@ -221,7 +230,6 @@ class TestUniqueConsecutiveAPI(unittest.TestCase): class TestUniqueConsecutiveCase2API(unittest.TestCase): - def setUp(self): self.places = [fluid.CPUPlace()] if core.is_compiled_with_cuda(): @@ -230,16 +238,23 @@ class TestUniqueConsecutiveCase2API(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): paddle.enable_static() - input_x = fluid.data(name="input_x", shape=[ - 100, - ], dtype="float32") + input_x = fluid.data( + name="input_x", + shape=[ + 100, + ], + dtype="float32", + ) result, inverse, counts = paddle.unique_consecutive( - input_x, return_inverse=True, return_counts=True) + input_x, return_inverse=True, return_counts=True + ) x_np = np.random.randint(20, size=100).astype("float32") exe = fluid.Executor(place) - fetches = exe.run(fluid.default_main_program(), - feed={"input_x": x_np}, - fetch_list=[result]) + fetches = exe.run( + fluid.default_main_program(), + feed={"input_x": x_np}, + fetch_list=[result], + ) def test_static(self): for place in self.places: @@ -251,7 +266,8 @@ class TestUniqueConsecutiveCase2API(unittest.TestCase): input_x = np.random.randint(20, size=100).astype("float64") x = paddle.to_tensor(input_x) result, inverse, counts = paddle.unique_consecutive( - x, return_inverse=True, return_counts=True) + x, return_inverse=True, return_counts=True + ) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_unique_name.py b/python/paddle/fluid/tests/unittests/test_unique_name.py index 4393089bab8e4910574e13689782130fca59292d..a423247370d14437e65491aaa2363ad3932f1e17 100644 --- a/python/paddle/fluid/tests/unittests/test_unique_name.py +++ b/python/paddle/fluid/tests/unittests/test_unique_name.py @@ -17,7 +17,6 @@ import paddle.fluid as fluid class TestUniqueName(unittest.TestCase): - def test_guard(self): with fluid.unique_name.guard(): name_1 = fluid.unique_name.generate('') @@ -45,7 +44,6 @@ class TestUniqueName(unittest.TestCase): class TestImperativeUniqueName(unittest.TestCase): - def test_name_generator(self): with fluid.dygraph.guard(): tracer = fluid.framework._dygraph_tracer() diff --git a/python/paddle/fluid/tests/unittests/test_unique_with_counts.py b/python/paddle/fluid/tests/unittests/test_unique_with_counts.py index 30fc203d933fbc063de1b6ece070a3878eb2c25e..6a5b58454b56c214cbee2d63eca2e8ba05f4a13e 100644 --- a/python/paddle/fluid/tests/unittests/test_unique_with_counts.py +++ b/python/paddle/fluid/tests/unittests/test_unique_with_counts.py @@ -20,7 +20,6 @@ import paddle.fluid.core as core class TestUniqueWithCountsOp(OpTest): - def setUp(self): self.op_type = "unique_with_counts" self.init_config() @@ -36,12 +35,11 @@ class TestUniqueWithCountsOp(OpTest): self.outputs = { 'Out': np.array([2, 3, 1, 5], dtype='int64'), 'Index': np.array([0, 1, 1, 2, 3, 1], dtype='int32'), - 'Count': np.array([1, 3, 1, 1], dtype='int32') + 'Count': np.array([1, 3, 1, 1], dtype='int32'), } class TestOne(TestUniqueWithCountsOp): - def init_config(self): self.inputs = { 'X': np.array([2], dtype='int64'), @@ -50,24 +48,24 @@ class TestOne(TestUniqueWithCountsOp): self.outputs = { 'Out': np.array([2], dtype='int64'), 'Index': np.array([0], dtype='int32'), - 'Count': np.array([1], dtype='int32') + 'Count': np.array([1], dtype='int32'), } class TestRandom(TestUniqueWithCountsOp): - def init_config(self): - input_data = np.random.randint(0, 100, (2000, ), dtype='int64') + input_data = np.random.randint(0, 100, (2000,), dtype='int64') self.inputs = {'X': input_data} self.attrs = {'dtype': int(core.VarDesc.VarType.INT64)} - np_unique, np_index, reverse_index = np.unique(self.inputs['X'], True, - True) + np_unique, np_index, reverse_index = np.unique( + self.inputs['X'], True, True + ) np_tuple = [(np_unique[i], np_index[i]) for i in range(len(np_unique))] np_tuple.sort(key=lambda x: x[1]) target_out = np.array([i[0] for i in np_tuple], dtype='int64') target_index = np.array( - [list(target_out).index(i) for i in self.inputs['X']], - dtype='int64') + [list(target_out).index(i) for i in self.inputs['X']], dtype='int64' + ) count = [0 for i in range(len(np_unique))] for i in range(target_index.shape[0]): count[target_index[i]] += 1 @@ -75,14 +73,12 @@ class TestRandom(TestUniqueWithCountsOp): self.outputs = { 'Out': target_out, 'Index': target_index, - 'Count': target_count + 'Count': target_count, } class TestUniqueWithCountsRaiseError(unittest.TestCase): - def test_errors(self): - def test_type(): fluid.layers.unique_with_counts([10]) @@ -95,10 +91,10 @@ class TestUniqueWithCountsRaiseError(unittest.TestCase): self.assertRaises(TypeError, test_dtype) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestOneGPU(TestUniqueWithCountsOp): - def init_config(self): self.inputs = { 'X': np.array([2], dtype='int64'), @@ -107,7 +103,7 @@ class TestOneGPU(TestUniqueWithCountsOp): self.outputs = { 'Out': np.array([2], dtype='int64'), 'Index': np.array([0], dtype='int32'), - 'Count': np.array([1], dtype='int32') + 'Count': np.array([1], dtype='int32'), } def test_check_output(self): @@ -116,22 +112,23 @@ class TestOneGPU(TestUniqueWithCountsOp): self.check_output_with_place(place, atol=1e-5) -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") +@unittest.skipIf( + not core.is_compiled_with_cuda(), "core is not compiled with CUDA" +) class TestRandomGPU(TestUniqueWithCountsOp): - def init_config(self): - input_data = np.random.randint(0, 100, (2000, ), dtype='int64') + input_data = np.random.randint(0, 100, (2000,), dtype='int64') self.inputs = {'X': input_data} self.attrs = {'dtype': int(core.VarDesc.VarType.INT64)} - np_unique, np_index, reverse_index = np.unique(self.inputs['X'], True, - True) + np_unique, np_index, reverse_index = np.unique( + self.inputs['X'], True, True + ) np_tuple = [(np_unique[i], np_index[i]) for i in range(len(np_unique))] np_tuple.sort(key=lambda x: x[1]) target_out = np.array([i[0] for i in np_tuple], dtype='int64') target_index = np.array( - [list(target_out).index(i) for i in self.inputs['X']], - dtype='int64') + [list(target_out).index(i) for i in self.inputs['X']], dtype='int64' + ) count = [0 for i in range(len(np_unique))] for i in range(target_index.shape[0]): count[target_index[i]] += 1 @@ -139,7 +136,7 @@ class TestRandomGPU(TestUniqueWithCountsOp): self.outputs = { 'Out': target_out, 'Index': target_index, - 'Count': target_count + 'Count': target_count, } def test_check_output(self): diff --git a/python/paddle/fluid/tests/unittests/test_unpool1d_op.py b/python/paddle/fluid/tests/unittests/test_unpool1d_op.py index f15d295ce9ebc70b26da2bca5042587b4b9bc797..787d9367f5890c5ab5d4c90d98bd7138a3bd36ef 100644 --- a/python/paddle/fluid/tests/unittests/test_unpool1d_op.py +++ b/python/paddle/fluid/tests/unittests/test_unpool1d_op.py @@ -25,8 +25,11 @@ def _unpool_output_size(x, kernel_size, stride, padding, output_size): input_size = x.shape default_size = [] for d in range(len(kernel_size)): - default_size.append((input_size[-len(kernel_size) + d] - 1) * - stride[d] + kernel_size[d] - 2 * padding[d]) + default_size.append( + (input_size[-len(kernel_size) + d] - 1) * stride[d] + + kernel_size[d] + - 2 * padding[d] + ) if output_size is None: ret = default_size else: @@ -34,11 +37,13 @@ def _unpool_output_size(x, kernel_size, stride, padding, output_size): return ret -def unpool1dmax_forward_naive(input, indices, ksize, strides, paddings, - output_size): +def unpool1dmax_forward_naive( + input, indices, ksize, strides, paddings, output_size +): s0, s1, s2 = input.shape - output_size = _unpool_output_size(input, ksize, strides, paddings, - output_size) + output_size = _unpool_output_size( + input, ksize, strides, paddings, output_size + ) out_lsize = output_size[0] out = np.zeros((s0, s1, out_lsize)) for nidx in range(s0): @@ -52,7 +57,6 @@ def unpool1dmax_forward_naive(input, indices, ksize, strides, paddings, class TestUnpool1DOpAPI_dygraph(unittest.TestCase): - def test_case(self): places = [paddle.CPUPlace()] if paddle.fluid.core.is_compiled_with_cuda(): @@ -61,25 +65,23 @@ class TestUnpool1DOpAPI_dygraph(unittest.TestCase): paddle.disable_static() input_data = np.random.rand(1, 3, 16) input_x = paddle.to_tensor(input_data) - output, indices = F.max_pool1d(input_x, - kernel_size=2, - stride=2, - return_mask=True) - output_unpool = F.max_unpool1d(output, - indices, - kernel_size=2, - stride=2) + output, indices = F.max_pool1d( + input_x, kernel_size=2, stride=2, return_mask=True + ) + output_unpool = F.max_unpool1d( + output, indices, kernel_size=2, stride=2 + ) expected_output_unpool = unpool1dmax_forward_naive( - output.numpy(), indices.numpy(), [2], [2], [0], [16]) - np.testing.assert_allclose(output_unpool.numpy(), - expected_output_unpool, - rtol=1e-05) + output.numpy(), indices.numpy(), [2], [2], [0], [16] + ) + np.testing.assert_allclose( + output_unpool.numpy(), expected_output_unpool, rtol=1e-05 + ) paddle.enable_static() class TestUnpool1DOpAPI_dygraph2(unittest.TestCase): - def test_case(self): places = [paddle.CPUPlace()] if paddle.fluid.core.is_compiled_with_cuda(): @@ -88,25 +90,23 @@ class TestUnpool1DOpAPI_dygraph2(unittest.TestCase): paddle.disable_static() input_data = np.random.rand(1, 3, 16) input_x = paddle.to_tensor(input_data) - output, indices = F.max_pool1d(input_x, - kernel_size=2, - stride=2, - return_mask=True) - output_unpool = F.max_unpool1d(output, - indices, - kernel_size=2, - stride=None) + output, indices = F.max_pool1d( + input_x, kernel_size=2, stride=2, return_mask=True + ) + output_unpool = F.max_unpool1d( + output, indices, kernel_size=2, stride=None + ) expected_output_unpool = unpool1dmax_forward_naive( - output.numpy(), indices.numpy(), [2], [2], [0], [16]) - np.testing.assert_allclose(output_unpool.numpy(), - expected_output_unpool, - rtol=1e-05) + output.numpy(), indices.numpy(), [2], [2], [0], [16] + ) + np.testing.assert_allclose( + output_unpool.numpy(), expected_output_unpool, rtol=1e-05 + ) paddle.enable_static() class TestUnpool1DOpAPI_dygraph3(unittest.TestCase): - def test_case(self): places = [paddle.CPUPlace()] if paddle.fluid.core.is_compiled_with_cuda(): @@ -115,61 +115,66 @@ class TestUnpool1DOpAPI_dygraph3(unittest.TestCase): paddle.disable_static() input_data = np.random.rand(1, 3, 16) input_x = paddle.to_tensor(input_data) - Pool1d = paddle.nn.MaxPool1D(kernel_size=2, - stride=2, - return_mask=True) + Pool1d = paddle.nn.MaxPool1D( + kernel_size=2, stride=2, return_mask=True + ) UnPool1d = paddle.nn.MaxUnPool1D(kernel_size=2, stride=2) output, indices = Pool1d(input_x) output_unpool = UnPool1d(output, indices) expected_output_unpool = unpool1dmax_forward_naive( - output.numpy(), indices.numpy(), [2], [2], [0], [16]) - np.testing.assert_allclose(output_unpool.numpy(), - expected_output_unpool, - rtol=1e-05) + output.numpy(), indices.numpy(), [2], [2], [0], [16] + ) + np.testing.assert_allclose( + output_unpool.numpy(), expected_output_unpool, rtol=1e-05 + ) paddle.enable_static() class TestUnpool1DOpAPI_static(unittest.TestCase): - def test_case(self): paddle.enable_static() places = [paddle.CPUPlace()] if paddle.fluid.core.is_compiled_with_cuda(): places.append(paddle.CUDAPlace(0)) for place in places: - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): - - input_data = np.array([[[1, 2, 3, 4], [5, 6, 7, 8], - [9, 10, 11, 12]]]).astype("float32") - x = paddle.fluid.data(name='x', - shape=[1, 3, 4], - dtype='float32') - output, indices = F.max_pool1d(x, - kernel_size=2, - stride=2, - return_mask=True) - output_unpool = F.max_unpool1d(output, - indices, - kernel_size=2, - stride=None) + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + + input_data = np.array( + [[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]] + ).astype("float32") + x = paddle.fluid.data( + name='x', shape=[1, 3, 4], dtype='float32' + ) + output, indices = F.max_pool1d( + x, kernel_size=2, stride=2, return_mask=True + ) + output_unpool = F.max_unpool1d( + output, indices, kernel_size=2, stride=None + ) exe = paddle.fluid.Executor(place) - fetches = exe.run(paddle.fluid.default_main_program(), - feed={"x": input_data}, - fetch_list=[output_unpool], - return_numpy=True) - pool1d_out_np = np.array([[[2., 4.], [6., 8.], - [10., 12.]]]).astype("float32") - indices_np = np.array([[[1, 3], [1, 3], [1, - 3]]]).astype("int32") + fetches = exe.run( + paddle.fluid.default_main_program(), + feed={"x": input_data}, + fetch_list=[output_unpool], + return_numpy=True, + ) + pool1d_out_np = np.array( + [[[2.0, 4.0], [6.0, 8.0], [10.0, 12.0]]] + ).astype("float32") + indices_np = np.array([[[1, 3], [1, 3], [1, 3]]]).astype( + "int32" + ) expected_output_unpool = unpool1dmax_forward_naive( - pool1d_out_np, indices_np, [2], [2], [0], [4]) - np.testing.assert_allclose(fetches[0], - expected_output_unpool, - rtol=1e-05) + pool1d_out_np, indices_np, [2], [2], [0], [4] + ) + np.testing.assert_allclose( + fetches[0], expected_output_unpool, rtol=1e-05 + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_unpool3d_op.py b/python/paddle/fluid/tests/unittests/test_unpool3d_op.py index 5c3411d6899aa5b6812b7eced0bc7e716f1df83a..3db2d1514caaf5e47bdd064557ec664da00d58c4 100644 --- a/python/paddle/fluid/tests/unittests/test_unpool3d_op.py +++ b/python/paddle/fluid/tests/unittests/test_unpool3d_op.py @@ -26,8 +26,11 @@ def _unpool_output_size(x, kernel_size, stride, padding, output_size): input_size = x.shape default_size = [] for d in range(len(kernel_size)): - default_size.append((input_size[-len(kernel_size) + d] - 1) * - stride[d] + kernel_size[d] - 2 * padding[d]) + default_size.append( + (input_size[-len(kernel_size) + d] - 1) * stride[d] + + kernel_size[d] + - 2 * padding[d] + ) if output_size is None: ret = default_size else: @@ -35,11 +38,13 @@ def _unpool_output_size(x, kernel_size, stride, padding, output_size): return ret -def unpool3dmax_forward_naive(input, indices, ksize, strides, paddings, - output_size): +def unpool3dmax_forward_naive( + input, indices, ksize, strides, paddings, output_size +): s0, s1, s2, s3, s4 = input.shape - output_size = _unpool_output_size(input, ksize, strides, paddings, - output_size) + output_size = _unpool_output_size( + input, ksize, strides, paddings, output_size + ) out_dsize = output_size[0] out_hsize = output_size[1] out_wsize = output_size[2] @@ -51,49 +56,58 @@ def unpool3dmax_forward_naive(input, indices, ksize, strides, paddings, for w in range(s4): index = indices[nidx, cidx, d, h, w] didx = index // (out_wsize * out_hsize) - hidx = (index - - didx * out_hsize * out_wsize) // out_wsize - widx = (index - - didx * out_hsize * out_wsize) % out_wsize - out[nidx, cidx, didx, hidx, widx] = \ - input[nidx, cidx, d, h, w] + hidx = ( + index - didx * out_hsize * out_wsize + ) // out_wsize + widx = ( + index - didx * out_hsize * out_wsize + ) % out_wsize + out[nidx, cidx, didx, hidx, widx] = input[ + nidx, cidx, d, h, w + ] return out -def max_unpool3d_wrapper(x, - indices, - kernel_size, - stride=None, - padding=0, - output_size=None, - data_format="NCDHW", - name=None): - out = paddle.nn.functional.max_unpool3d(x, - indices, - kernel_size, - stride=stride, - padding=padding, - data_format=data_format, - output_size=output_size, - name=name) +def max_unpool3d_wrapper( + x, + indices, + kernel_size, + stride=None, + padding=0, + output_size=None, + data_format="NCDHW", + name=None, +): + out = paddle.nn.functional.max_unpool3d( + x, + indices, + kernel_size, + stride=stride, + padding=padding, + data_format=data_format, + output_size=output_size, + name=name, + ) return out class TestUnpool3DOp(OpTest): - def setUp(self): self.op_type = "unpool3d" self.python_api = max_unpool3d_wrapper self.init_test_case() inputs = np.random.randint(0, 100, self.shape) nsize, csize, dsize, hsize, wsize = inputs.shape - self.output_size = _unpool_output_size(inputs, self.ksize, self.strides, - self.paddings, self.output_size) + self.output_size = _unpool_output_size( + inputs, self.ksize, self.strides, self.paddings, self.output_size + ) indices = np.random.permutation( np.arange( - 0, self.output_size[0] * self.output_size[1] * - self.output_size[2]))[:dsize * hsize * wsize] + 0, + self.output_size[0] * self.output_size[1] * self.output_size[2], + ) + )[: dsize * hsize * wsize] indices = np.reshape(indices, [dsize, hsize, wsize]) idx_list = [] for n in range(nsize): @@ -103,12 +117,18 @@ class TestUnpool3DOp(OpTest): idx_list.append(c_list) indices = np.array(idx_list) - output = self.unpool3d_forward_naive(inputs, indices, self.ksize, \ - self.strides, self.paddings, self.output_size).astype("float64") + output = self.unpool3d_forward_naive( + inputs, + indices, + self.ksize, + self.strides, + self.paddings, + self.output_size, + ).astype("float64") self.inputs = { 'X': inputs.astype('float64'), - 'Indices': indices.astype('int32') + 'Indices': indices.astype('int32'), } self.attrs = { 'strides': self.strides, @@ -136,7 +156,6 @@ class TestUnpool3DOp(OpTest): class TestUnpool3DOpcase1(TestUnpool3DOp): - def init_test_case(self): self.unpool3d_forward_naive = unpool3dmax_forward_naive self.unpooling_type = "max" @@ -148,7 +167,6 @@ class TestUnpool3DOpcase1(TestUnpool3DOp): class TestUnpool3DOpOutput(TestUnpool3DOp): - def init_test_case(self): self.unpool3d_forward_naive = unpool3dmax_forward_naive self.unpooling_type = "max" @@ -160,50 +178,51 @@ class TestUnpool3DOpOutput(TestUnpool3DOp): class TestUnpool3DOpException(unittest.TestCase): - def test_exception(self): - def indices_size_error(): data = paddle.randint(shape=[1, 1, 3, 3, 3]) - indices = paddle.reshape(paddle.arange(0, 36), - shape=[1, 1, 3, 3, 4]) + indices = paddle.reshape( + paddle.arange(0, 36), shape=[1, 1, 3, 3, 4] + ) MaxUnPool3D = F.maxunpool3d(data, indices, kernel_size=2, stride=2) def indices_value_error(): data = paddle.randint(shape=[1, 1, 3, 3, 3]) - indices = paddle.reshape(paddle.arange(4, 40), - shape=[1, 1, 3, 3, 3]) + indices = paddle.reshape( + paddle.arange(4, 40), shape=[1, 1, 3, 3, 3] + ) MaxUnPool3D = F.maxunpool3d(data, indices, kernel_size=2, stride=2) def data_format_error(): data = paddle.randint(shape=[1, 1, 3, 3, 3]) - indices = paddle.reshape(paddle.arange(0, 27), - shape=[1, 1, 3, 3, 3]) - MaxUnPool3D = F.maxunpool3d(data, - indices, - kernel_size=2, - stride=2, - data_format="NDHWC") + indices = paddle.reshape( + paddle.arange(0, 27), shape=[1, 1, 3, 3, 3] + ) + MaxUnPool3D = F.maxunpool3d( + data, indices, kernel_size=2, stride=2, data_format="NDHWC" + ) def data_outputsize_error(): data = paddle.randint(shape=[1, 1, 3, 3, 3]) - indices = paddle.reshape(paddle.arange(0, 27), - shape=[1, 1, 3, 3, 3]) - MaxUnPool3D = F.maxunpool3d(data, - indices, - kernel_size=2, - stride=2, - output_size=[2, 2, 3, 4, 5]) + indices = paddle.reshape( + paddle.arange(0, 27), shape=[1, 1, 3, 3, 3] + ) + MaxUnPool3D = F.maxunpool3d( + data, + indices, + kernel_size=2, + stride=2, + output_size=[2, 2, 3, 4, 5], + ) def data_outputsize_error2(): data = paddle.randint(shape=[1, 1, 3, 3, 3]) - indices = paddle.reshape(paddle.arange(0, 27), - shape=[1, 1, 3, 3, 3]) - MaxUnPool3D = F.maxunpool3d(data, - indices, - kernel_size=2, - stride=2, - output_size=[10, 10, 10]) + indices = paddle.reshape( + paddle.arange(0, 27), shape=[1, 1, 3, 3, 3] + ) + MaxUnPool3D = F.maxunpool3d( + data, indices, kernel_size=2, stride=2, output_size=[10, 10, 10] + ) self.assertRaises(ValueError, indices_size_error) self.assertRaises(ValueError, indices_value_error) @@ -213,7 +232,6 @@ class TestUnpool3DOpException(unittest.TestCase): class TestUnpool3DOpAPI_dygraph(unittest.TestCase): - def test_case(self): places = [paddle.CPUPlace()] if paddle.fluid.core.is_compiled_with_cuda(): @@ -222,26 +240,28 @@ class TestUnpool3DOpAPI_dygraph(unittest.TestCase): paddle.disable_static() input_data = np.random.rand(1, 3, 4, 4, 6) input_x = paddle.to_tensor(input_data) - output, indices = F.max_pool3d(input_x, - kernel_size=2, - stride=2, - return_mask=True) - output_unpool = F.max_unpool3d(output, - indices, - kernel_size=2, - stride=2) + output, indices = F.max_pool3d( + input_x, kernel_size=2, stride=2, return_mask=True + ) + output_unpool = F.max_unpool3d( + output, indices, kernel_size=2, stride=2 + ) expected_output_unpool = unpool3dmax_forward_naive( - output.numpy(), indices.numpy(), [2, 2, 2], [2, 2, 2], - [0, 0, 0], [4, 4, 6]) - np.testing.assert_allclose(output_unpool.numpy(), - expected_output_unpool, - rtol=1e-05) + output.numpy(), + indices.numpy(), + [2, 2, 2], + [2, 2, 2], + [0, 0, 0], + [4, 4, 6], + ) + np.testing.assert_allclose( + output_unpool.numpy(), expected_output_unpool, rtol=1e-05 + ) paddle.enable_static() class TestUnpool3DOpAPI_dygraph2(unittest.TestCase): - def test_case(self): places = [paddle.CPUPlace()] if paddle.fluid.core.is_compiled_with_cuda(): @@ -250,26 +270,28 @@ class TestUnpool3DOpAPI_dygraph2(unittest.TestCase): paddle.disable_static() input_data = np.random.rand(1, 3, 4, 4, 6) input_x = paddle.to_tensor(input_data) - output, indices = F.max_pool3d(input_x, - kernel_size=2, - stride=2, - return_mask=True) - output_unpool = F.max_unpool3d(output, - indices, - kernel_size=2, - stride=None) + output, indices = F.max_pool3d( + input_x, kernel_size=2, stride=2, return_mask=True + ) + output_unpool = F.max_unpool3d( + output, indices, kernel_size=2, stride=None + ) expected_output_unpool = unpool3dmax_forward_naive( - output.numpy(), indices.numpy(), [2, 2, 2], [2, 2, 2], - [0, 0, 0], [4, 4, 6]) - np.testing.assert_allclose(output_unpool.numpy(), - expected_output_unpool, - rtol=1e-05) + output.numpy(), + indices.numpy(), + [2, 2, 2], + [2, 2, 2], + [0, 0, 0], + [4, 4, 6], + ) + np.testing.assert_allclose( + output_unpool.numpy(), expected_output_unpool, rtol=1e-05 + ) paddle.enable_static() class TestUnpool3DOpAPI_dygraph3(unittest.TestCase): - def test_case(self): places = [paddle.CPUPlace()] if paddle.fluid.core.is_compiled_with_cuda(): @@ -278,63 +300,91 @@ class TestUnpool3DOpAPI_dygraph3(unittest.TestCase): paddle.disable_static() input_data = np.random.rand(1, 3, 4, 4, 6) input_x = paddle.to_tensor(input_data) - Pool3d = paddle.nn.MaxPool3D(kernel_size=2, - stride=2, - return_mask=True) + Pool3d = paddle.nn.MaxPool3D( + kernel_size=2, stride=2, return_mask=True + ) UnPool3d = paddle.nn.MaxUnPool3D(kernel_size=2, stride=2) output, indices = Pool3d(input_x) output_unpool = UnPool3d(output, indices) expected_output_unpool = unpool3dmax_forward_naive( - output.numpy(), indices.numpy(), [2, 2, 2], [2, 2, 2], - [0, 0, 0], [4, 4, 6]) - np.testing.assert_allclose(output_unpool.numpy(), - expected_output_unpool, - rtol=1e-05) + output.numpy(), + indices.numpy(), + [2, 2, 2], + [2, 2, 2], + [0, 0, 0], + [4, 4, 6], + ) + np.testing.assert_allclose( + output_unpool.numpy(), expected_output_unpool, rtol=1e-05 + ) paddle.enable_static() class TestUnpool3DOpAPI_static(unittest.TestCase): - def test_case(self): paddle.enable_static() places = [paddle.CPUPlace()] if paddle.fluid.core.is_compiled_with_cuda(): places.append(paddle.CUDAPlace(0)) for place in places: - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): - - input_data = np.array([[[[[1, 2, 3, 4], [5, 6, 7, 8], \ - [9, 10, 11, 12], [13, 14, 15, 16]], [[1, 2, 3, 4], [5, 6, 7, 8], \ - [9, 10, 11, 12], [13, 14, 15, 16]]]]]).astype("float32") - x = paddle.fluid.data(name='x', - shape=[1, 1, 2, 4, 4], - dtype='float32') - output, indices = F.max_pool3d(x, - kernel_size=2, - stride=2, - return_mask=True) - output_unpool = F.max_unpool3d(output, - indices, - kernel_size=2, - stride=None) + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + + input_data = np.array( + [ + [ + [ + [ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16], + ], + [ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16], + ], + ] + ] + ] + ).astype("float32") + x = paddle.fluid.data( + name='x', shape=[1, 1, 2, 4, 4], dtype='float32' + ) + output, indices = F.max_pool3d( + x, kernel_size=2, stride=2, return_mask=True + ) + output_unpool = F.max_unpool3d( + output, indices, kernel_size=2, stride=None + ) exe = paddle.fluid.Executor(place) - fetches = exe.run(paddle.fluid.default_main_program(), - feed={"x": input_data}, - fetch_list=[output_unpool], - return_numpy=True) - pool3d_out_np = np.array([[[[[6., 8.], - [14., 16.]]]]]).astype("float32") + fetches = exe.run( + paddle.fluid.default_main_program(), + feed={"x": input_data}, + fetch_list=[output_unpool], + return_numpy=True, + ) + pool3d_out_np = np.array( + [[[[[6.0, 8.0], [14.0, 16.0]]]]] + ).astype("float32") indices_np = np.array([[[[[5, 7], [13, 15]]]]]).astype("int32") expected_output_unpool = unpool3dmax_forward_naive( - pool3d_out_np, indices_np, [2, 2, 2], [2, 2, 2], [0, 0, 0], - [2, 4, 4]) - np.testing.assert_allclose(fetches[0], - expected_output_unpool, - rtol=1e-05) + pool3d_out_np, + indices_np, + [2, 2, 2], + [2, 2, 2], + [0, 0, 0], + [2, 4, 4], + ) + np.testing.assert_allclose( + fetches[0], expected_output_unpool, rtol=1e-05 + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_unpool_op.py b/python/paddle/fluid/tests/unittests/test_unpool_op.py index 7c59d628e81b2be9be01a0b1fae3c4880b518c3c..00c48e0872df825f145d7cb2ce8ecede8c7f6564 100644 --- a/python/paddle/fluid/tests/unittests/test_unpool_op.py +++ b/python/paddle/fluid/tests/unittests/test_unpool_op.py @@ -27,8 +27,11 @@ def _unpool_output_size(x, kernel_size, stride, padding, output_size): input_size = x.shape default_size = [] for d in range(len(kernel_size)): - default_size.append((input_size[-len(kernel_size) + d] - 1) * - stride[d] + kernel_size[d] - 2 * padding[d]) + default_size.append( + (input_size[-len(kernel_size) + d] - 1) * stride[d] + + kernel_size[d] + - 2 * padding[d] + ) if output_size is None: ret = default_size else: @@ -36,11 +39,13 @@ def _unpool_output_size(x, kernel_size, stride, padding, output_size): return ret -def unpool2dmax_forward_naive(input, indices, ksize, strides, paddings, - output_size): +def unpool2dmax_forward_naive( + input, indices, ksize, strides, paddings, output_size +): s0, s1, s2, s3 = input.shape - output_size = _unpool_output_size(input, ksize, strides, paddings, - output_size) + output_size = _unpool_output_size( + input, ksize, strides, paddings, output_size + ) out_hsize = output_size[0] out_wsize = output_size[1] out = np.zeros((s0, s1, out_hsize, out_wsize)) @@ -51,44 +56,47 @@ def unpool2dmax_forward_naive(input, indices, ksize, strides, paddings, index = indices[nidx, cidx, h, w] hidx = (index - index % out_wsize) // out_wsize widx = index % out_wsize - out[nidx, cidx, hidx, widx] = \ - input[nidx, cidx, h, w] + out[nidx, cidx, hidx, widx] = input[nidx, cidx, h, w] return out -def max_unpool2d_wrapper(x, - indices, - kernel_size, - stride=None, - padding=0, - output_size=None, - data_format="NCHW", - name=None): - out = paddle.nn.functional.max_unpool2d(x, - indices, - kernel_size, - stride=stride, - padding=padding, - data_format=data_format, - output_size=output_size, - name=name) +def max_unpool2d_wrapper( + x, + indices, + kernel_size, + stride=None, + padding=0, + output_size=None, + data_format="NCHW", + name=None, +): + out = paddle.nn.functional.max_unpool2d( + x, + indices, + kernel_size, + stride=stride, + padding=padding, + data_format=data_format, + output_size=output_size, + name=name, + ) return out class TestUnpoolOp(OpTest): - def setUp(self): self.op_type = "unpool" self.python_api = max_unpool2d_wrapper self.init_test_case() input = np.random.randint(0, 100, self.shape) nsize, csize, hsize, wsize = input.shape - self.output_size = _unpool_output_size(input, self.ksize, self.strides, - self.paddings, self.output_size) + self.output_size = _unpool_output_size( + input, self.ksize, self.strides, self.paddings, self.output_size + ) indices = np.random.permutation( - np.arange(0, self.output_size[0] * self.output_size[1]))[:hsize * - wsize] + np.arange(0, self.output_size[0] * self.output_size[1]) + )[: hsize * wsize] indices = np.reshape(indices, [hsize, wsize]) idx_list = [] for n in range(nsize): @@ -98,12 +106,18 @@ class TestUnpoolOp(OpTest): idx_list.append(c_list) indices = np.array(idx_list) - output = self.unpool2d_forward_naive(input, indices, self.ksize, \ - self.strides, self.paddings, self.output_size).astype("float64") + output = self.unpool2d_forward_naive( + input, + indices, + self.ksize, + self.strides, + self.paddings, + self.output_size, + ).astype("float64") self.inputs = { 'X': input.astype('float64'), - 'Indices': indices.astype('int32') + 'Indices': indices.astype('int32'), } self.attrs = { 'strides': self.strides, @@ -131,7 +145,6 @@ class TestUnpoolOp(OpTest): class TestUnpoolOpcase1(TestUnpoolOp): - def init_test_case(self): self.unpool2d_forward_naive = unpool2dmax_forward_naive self.unpooling_type = "max" @@ -143,7 +156,6 @@ class TestUnpoolOpcase1(TestUnpoolOp): class TestUnpoolOpOutputsize(TestUnpoolOp): - def init_test_case(self): self.unpool2d_forward_naive = unpool2dmax_forward_naive self.unpooling_type = "max" @@ -155,7 +167,6 @@ class TestUnpoolOpOutputsize(TestUnpoolOp): class TestUnpoolOpOutput(TestUnpoolOp): - def init_test_case(self): self.unpool2d_forward_naive = unpool2dmax_forward_naive self.unpooling_type = "max" @@ -167,7 +178,6 @@ class TestUnpoolOpOutput(TestUnpoolOp): class TestUnpoolOpException(unittest.TestCase): - def test_exception(self): import paddle.nn.functional as F import paddle @@ -185,29 +195,23 @@ class TestUnpoolOpException(unittest.TestCase): def data_format_error(): data = paddle.randint(shape=[1, 1, 3, 3]) indices = paddle.reshape(paddle.arange(4, 40), shape[1, 1, 3, 4]) - MaxPool2D = F.maxunpool2d(data, - indices, - kernel_size=2, - stride=2, - data_format="NHWC") + MaxPool2D = F.maxunpool2d( + data, indices, kernel_size=2, stride=2, data_format="NHWC" + ) def data_outputsize_error(): data = paddle.randint(shape=[1, 1, 3, 3]) indices = paddle.reshape(paddle.arange(4, 40), shape[1, 1, 3, 4]) - MaxPool2D = F.maxunpool2d(data, - indices, - kernel_size=2, - stride=2, - output_size=[5, 6, 7, 8]) + MaxPool2D = F.maxunpool2d( + data, indices, kernel_size=2, stride=2, output_size=[5, 6, 7, 8] + ) def data_outputsize_error2(): data = paddle.randint(shape=[1, 1, 3, 3]) indices = paddle.reshape(paddle.arange(4, 40), shape[1, 1, 3, 4]) - MaxPool2D = F.maxunpool2d(data, - indices, - kernel_size=2, - stride=2, - output_size=[100, 100]) + MaxPool2D = F.maxunpool2d( + data, indices, kernel_size=2, stride=2, output_size=[100, 100] + ) self.assertRaises(ValueError, indices_size_error) self.assertRaises(ValueError, indices_value_error) @@ -217,7 +221,6 @@ class TestUnpoolOpException(unittest.TestCase): class TestUnpoolOpAPI_dy(unittest.TestCase): - def test_case(self): import paddle import paddle.nn.functional as F @@ -230,28 +233,34 @@ class TestUnpoolOpAPI_dy(unittest.TestCase): else: place = core.CPUPlace() with fluid.dygraph.guard(place): - input_data = np.array([[[[1, 2, 3, 4], [5, 6, 7, - 8], [9, 10, 11, 12], - [13, 14, 15, 16]]]]).astype("float32") + input_data = np.array( + [ + [ + [ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16], + ] + ] + ] + ).astype("float32") input_x = paddle.to_tensor(input_data) - output, indices = F.max_pool2d(input_x, - kernel_size=2, - stride=2, - return_mask=True) - out_pp = F.max_unpool2d(output, - indices, - kernel_size=2, - stride=2, - output_size=(5, 5)) + output, indices = F.max_pool2d( + input_x, kernel_size=2, stride=2, return_mask=True + ) + out_pp = F.max_unpool2d( + output, indices, kernel_size=2, stride=2, output_size=(5, 5) + ) output_np = output.numpy() indices_np = indices.numpy() - expect_res =unpool2dmax_forward_naive(output_np, indices_np, [2,2], \ - [2,2], [0,0], [5,5]).astype("float64") + expect_res = unpool2dmax_forward_naive( + output_np, indices_np, [2, 2], [2, 2], [0, 0], [5, 5] + ).astype("float64") np.testing.assert_allclose(out_pp.numpy(), expect_res, rtol=1e-05) class TestUnpoolOpAPI_dy2(unittest.TestCase): - def test_case(self): import paddle import paddle.nn.functional as F @@ -264,28 +273,34 @@ class TestUnpoolOpAPI_dy2(unittest.TestCase): else: place = core.CPUPlace() with fluid.dygraph.guard(place): - input_data = np.array([[[[1, 2, 3, 4], [5, 6, 7, - 8], [9, 10, 11, 12], - [13, 14, 15, 16]]]]).astype("float32") + input_data = np.array( + [ + [ + [ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16], + ] + ] + ] + ).astype("float32") input_x = paddle.to_tensor(input_data) - output, indices = F.max_pool2d(input_x, - kernel_size=2, - stride=2, - return_mask=True) - out_pp = F.max_unpool2d(output, - indices, - kernel_size=2, - stride=None, - output_size=(5, 5)) + output, indices = F.max_pool2d( + input_x, kernel_size=2, stride=2, return_mask=True + ) + out_pp = F.max_unpool2d( + output, indices, kernel_size=2, stride=None, output_size=(5, 5) + ) output_np = output.numpy() indices_np = indices.numpy() - expect_res =unpool2dmax_forward_naive(output_np, indices_np, [2,2], \ - [2,2], [0,0], [5,5]).astype("float64") + expect_res = unpool2dmax_forward_naive( + output_np, indices_np, [2, 2], [2, 2], [0, 0], [5, 5] + ).astype("float64") np.testing.assert_allclose(out_pp.numpy(), expect_res, rtol=1e-05) class TestUnpoolOpAPI_dy3(unittest.TestCase): - def test_case(self): import paddle import paddle.fluid.core as core @@ -297,46 +312,54 @@ class TestUnpoolOpAPI_dy3(unittest.TestCase): else: place = core.CPUPlace() with fluid.dygraph.guard(place): - input_data = np.array([[[[1, 2, 3, 4], [5, 6, 7, - 8], [9, 10, 11, 12], - [13, 14, 15, 16]]]]).astype("float32") + input_data = np.array( + [ + [ + [ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16], + ] + ] + ] + ).astype("float32") input_x = paddle.to_tensor(input_data) - Pool2d = paddle.nn.MaxPool2D(kernel_size=2, - stride=2, - return_mask=True) + Pool2d = paddle.nn.MaxPool2D( + kernel_size=2, stride=2, return_mask=True + ) UnPool = paddle.nn.MaxUnPool2D(kernel_size=2, stride=2) output, indices = Pool2d(input_x) out_pp = UnPool(output, indices) output_np = output.numpy() indices_np = indices.numpy() - expect_res =unpool2dmax_forward_naive(output_np, indices_np, [2,2], \ - [2,2], [0,0], [4,4]).astype("float64") + expect_res = unpool2dmax_forward_naive( + output_np, indices_np, [2, 2], [2, 2], [0, 0], [4, 4] + ).astype("float64") np.testing.assert_allclose(out_pp.numpy(), expect_res, rtol=1e-05) class TestUnpoolOpAPI_st(unittest.TestCase): - def test_case(self): import paddle import paddle.nn.functional as F import paddle.fluid.core as core import paddle.fluid as fluid + paddle.enable_static() - input_data = np.array([[[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], - [13, 14, 15, 16]]]]).astype("float32") + input_data = np.array( + [[[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]]] + ).astype("float32") x = fluid.data(name="x", shape=[1, 1, 4, 4], dtype="float32") - output, indices = F.max_pool2d(x, - kernel_size=2, - stride=2, - return_mask=True) - unpool_out = F.max_unpool2d(output, - indices, - kernel_size=2, - stride=None, - output_size=(5, 5)) + output, indices = F.max_pool2d( + x, kernel_size=2, stride=2, return_mask=True + ) + unpool_out = F.max_unpool2d( + output, indices, kernel_size=2, stride=None, output_size=(5, 5) + ) if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) else: @@ -344,20 +367,22 @@ class TestUnpoolOpAPI_st(unittest.TestCase): exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - results = exe.run(paddle.fluid.default_main_program(),\ - feed={"x":input_data}, - fetch_list=[unpool_out], - return_numpy=True) + results = exe.run( + paddle.fluid.default_main_program(), + feed={"x": input_data}, + fetch_list=[unpool_out], + return_numpy=True, + ) - pool_out_np = np.array([[[[6., 8.], [14., 16.]]]]).astype("float32") + pool_out_np = np.array([[[[6.0, 8.0], [14.0, 16.0]]]]).astype("float32") indices_np = np.array([[[[5, 7], [13, 15]]]]).astype("int32") - expect_res =unpool2dmax_forward_naive(pool_out_np, indices_np, [2,2], \ - [2,2], [0,0], [5,5]).astype("float64") + expect_res = unpool2dmax_forward_naive( + pool_out_np, indices_np, [2, 2], [2, 2], [0, 0], [5, 5] + ).astype("float64") np.testing.assert_allclose(results[0], expect_res, rtol=1e-05) class TestOutputSizeTensor(UnittestBase): - def init_info(self): self.shapes = [[1, 3, 6, 6]] self.save_path = os.path.join(self.temp_dir.name, self.path_prefix()) @@ -394,23 +419,18 @@ class TestOutputSizeTensor(UnittestBase): def call_func(self, x): output_size = [paddle.assign([7]), paddle.assign([7])] - pool_out, indices = F.max_pool2d(x, - kernel_size=2, - stride=2, - padding=0, - return_mask=True) + pool_out, indices = F.max_pool2d( + x, kernel_size=2, stride=2, padding=0, return_mask=True + ) # pool_out shape: [1, 1, 6, 6], indices shape: [1, 1, 6, 6] - unpool_out = F.max_unpool2d(pool_out, - indices, - kernel_size=2, - padding=0, - output_size=output_size) + unpool_out = F.max_unpool2d( + pool_out, indices, kernel_size=2, padding=0, output_size=output_size + ) # unpool_out shape: [1, 1, 7, 7] return unpool_out class TestZOutputSizeTensor2(unittest.TestCase): - def setUp(self): paddle.disable_static() @@ -419,22 +439,17 @@ class TestZOutputSizeTensor2(unittest.TestCase): def test_dygraph(self): x = paddle.randn([1, 3, 6, 6]) - pool_out, indices = F.max_pool2d(x, - kernel_size=2, - stride=2, - padding=0, - return_mask=True) + pool_out, indices = F.max_pool2d( + x, kernel_size=2, stride=2, padding=0, return_mask=True + ) output_size = [paddle.assign([7]), paddle.assign([7])] - unpool_out = F.max_unpool2d(pool_out, - indices, - kernel_size=2, - padding=0, - output_size=output_size) + unpool_out = F.max_unpool2d( + pool_out, indices, kernel_size=2, padding=0, output_size=output_size + ) np.testing.assert_array_equal(unpool_out.shape, [1, 3, 7, 7]) class TestZOutputSizeTensor3(unittest.TestCase): - def setUp(self): paddle.disable_static() @@ -443,22 +458,18 @@ class TestZOutputSizeTensor3(unittest.TestCase): def test_dygraph(self): x = paddle.randn([1, 3, 6, 6]) - pool_out, indices = F.max_pool2d(x, - kernel_size=2, - stride=2, - padding=0, - return_mask=True) + pool_out, indices = F.max_pool2d( + x, kernel_size=2, stride=2, padding=0, return_mask=True + ) output_size = [ paddle.assign([1]), paddle.assign([1]), paddle.assign([7]), - paddle.assign([7]) + paddle.assign([7]), ] - unpool_out = F.max_unpool2d(pool_out, - indices, - kernel_size=2, - padding=0, - output_size=output_size) + unpool_out = F.max_unpool2d( + pool_out, indices, kernel_size=2, padding=0, output_size=output_size + ) np.testing.assert_array_equal(unpool_out.shape, [1, 3, 7, 7]) diff --git a/python/paddle/fluid/tests/unittests/test_unsqueeze2_op.py b/python/paddle/fluid/tests/unittests/test_unsqueeze2_op.py index d585c8a3d0f788332de13c808383d080fdbca9e3..8de9185162a2bb3409bfc05b3a4890771af9b9dd 100755 --- a/python/paddle/fluid/tests/unittests/test_unsqueeze2_op.py +++ b/python/paddle/fluid/tests/unittests/test_unsqueeze2_op.py @@ -25,7 +25,6 @@ paddle.enable_static() # Correct: General. class TestUnsqueezeOp(OpTest): - def setUp(self): self.init_test_case() self.op_type = "unsqueeze2" @@ -35,7 +34,7 @@ class TestUnsqueezeOp(OpTest): self.init_attrs() self.outputs = { "Out": self.inputs["X"].reshape(self.new_shape), - "XShape": np.random.random(self.ori_shape).astype("float64") + "XShape": np.random.random(self.ori_shape).astype("float64"), } def test_check_output(self): @@ -55,16 +54,14 @@ class TestUnsqueezeOp(OpTest): # Correct: Single input index. class TestUnsqueezeOp1(TestUnsqueezeOp): - def init_test_case(self): self.ori_shape = (20, 5) - self.axes = (-1, ) + self.axes = (-1,) self.new_shape = (20, 5, 1) # Correct: Mixed input axis. class TestUnsqueezeOp2(TestUnsqueezeOp): - def init_test_case(self): self.ori_shape = (20, 5) self.axes = (0, -1) @@ -73,7 +70,6 @@ class TestUnsqueezeOp2(TestUnsqueezeOp): # Correct: There is duplicated axis. class TestUnsqueezeOp3(TestUnsqueezeOp): - def init_test_case(self): self.ori_shape = (10, 2, 5) self.axes = (0, 3, 3) @@ -82,7 +78,6 @@ class TestUnsqueezeOp3(TestUnsqueezeOp): # Correct: Reversed axes. class TestUnsqueezeOp4(TestUnsqueezeOp): - def init_test_case(self): self.ori_shape = (10, 2, 5) self.axes = (3, 1, 1) @@ -90,15 +85,13 @@ class TestUnsqueezeOp4(TestUnsqueezeOp): class TestUnsqueezeOp_ZeroDim1(TestUnsqueezeOp): - def init_test_case(self): self.ori_shape = () - self.axes = (-1, ) - self.new_shape = (1) + self.axes = (-1,) + self.new_shape = 1 class TestUnsqueezeOp_ZeroDim2(TestUnsqueezeOp): - def init_test_case(self): self.ori_shape = () self.axes = (-1, 1) @@ -106,7 +99,6 @@ class TestUnsqueezeOp_ZeroDim2(TestUnsqueezeOp): class TestUnsqueezeOp_ZeroDim3(TestUnsqueezeOp): - def init_test_case(self): self.ori_shape = () self.axes = (0, 1, 2) @@ -115,7 +107,6 @@ class TestUnsqueezeOp_ZeroDim3(TestUnsqueezeOp): # axes is a list(with tensor) class TestUnsqueezeOp_AxesTensorList(OpTest): - def setUp(self): self.init_test_case() self.op_type = "unsqueeze2" @@ -124,17 +115,18 @@ class TestUnsqueezeOp_AxesTensorList(OpTest): axes_tensor_list = [] for index, ele in enumerate(self.axes): - axes_tensor_list.append(("axes" + str(index), np.ones( - (1)).astype('int32') * ele)) + axes_tensor_list.append( + ("axes" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = { "X": np.random.random(self.ori_shape).astype("float64"), - "AxesTensorList": axes_tensor_list + "AxesTensorList": axes_tensor_list, } self.init_attrs() self.outputs = { "Out": self.inputs["X"].reshape(self.new_shape), - "XShape": np.random.random(self.ori_shape).astype("float64") + "XShape": np.random.random(self.ori_shape).astype("float64"), } def test_check_output(self): @@ -153,15 +145,13 @@ class TestUnsqueezeOp_AxesTensorList(OpTest): class TestUnsqueezeOp1_AxesTensorList(TestUnsqueezeOp_AxesTensorList): - def init_test_case(self): self.ori_shape = (20, 5) - self.axes = (-1, ) + self.axes = (-1,) self.new_shape = (20, 5, 1) class TestUnsqueezeOp2_AxesTensorList(TestUnsqueezeOp_AxesTensorList): - def init_test_case(self): self.ori_shape = (20, 5) self.axes = (0, -1) @@ -169,7 +159,6 @@ class TestUnsqueezeOp2_AxesTensorList(TestUnsqueezeOp_AxesTensorList): class TestUnsqueezeOp3_AxesTensorList(TestUnsqueezeOp_AxesTensorList): - def init_test_case(self): self.ori_shape = (10, 2, 5) self.axes = (0, 3, 3) @@ -177,7 +166,6 @@ class TestUnsqueezeOp3_AxesTensorList(TestUnsqueezeOp_AxesTensorList): class TestUnsqueezeOp4_AxesTensorList(TestUnsqueezeOp_AxesTensorList): - def init_test_case(self): self.ori_shape = (10, 2, 5) self.axes = (3, 1, 1) @@ -186,7 +174,6 @@ class TestUnsqueezeOp4_AxesTensorList(TestUnsqueezeOp_AxesTensorList): # axes is a Tensor class TestUnsqueezeOp_AxesTensor(OpTest): - def setUp(self): self.init_test_case() self.op_type = "unsqueeze2" @@ -195,12 +182,12 @@ class TestUnsqueezeOp_AxesTensor(OpTest): self.inputs = { "X": np.random.random(self.ori_shape).astype("float64"), - "AxesTensor": np.array(self.axes).astype("int32") + "AxesTensor": np.array(self.axes).astype("int32"), } self.init_attrs() self.outputs = { "Out": self.inputs["X"].reshape(self.new_shape), - "XShape": np.random.random(self.ori_shape).astype("float64") + "XShape": np.random.random(self.ori_shape).astype("float64"), } def test_check_output(self): @@ -219,15 +206,13 @@ class TestUnsqueezeOp_AxesTensor(OpTest): class TestUnsqueezeOp1_AxesTensor(TestUnsqueezeOp_AxesTensor): - def init_test_case(self): self.ori_shape = (20, 5) - self.axes = (-1, ) + self.axes = (-1,) self.new_shape = (20, 5, 1) class TestUnsqueezeOp2_AxesTensor(TestUnsqueezeOp_AxesTensor): - def init_test_case(self): self.ori_shape = (20, 5) self.axes = (0, -1) @@ -235,7 +220,6 @@ class TestUnsqueezeOp2_AxesTensor(TestUnsqueezeOp_AxesTensor): class TestUnsqueezeOp3_AxesTensor(TestUnsqueezeOp_AxesTensor): - def init_test_case(self): self.ori_shape = (10, 2, 5) self.axes = (0, 3, 3) @@ -243,7 +227,6 @@ class TestUnsqueezeOp3_AxesTensor(TestUnsqueezeOp_AxesTensor): class TestUnsqueezeOp4_AxesTensor(TestUnsqueezeOp_AxesTensor): - def init_test_case(self): self.ori_shape = (10, 2, 5) self.axes = (3, 1, 1) @@ -252,7 +235,6 @@ class TestUnsqueezeOp4_AxesTensor(TestUnsqueezeOp_AxesTensor): # test api class TestUnsqueezeAPI(unittest.TestCase): - def setUp(self): self.executed_api() @@ -264,12 +246,12 @@ class TestUnsqueezeAPI(unittest.TestCase): x = paddle.static.data(name='x', shape=[3, 2, 5], dtype="float64") positive_3_int32 = fluid.layers.fill_constant([1], "int32", 3) positive_1_int64 = fluid.layers.fill_constant([1], "int64", 1) - axes_tensor_int32 = paddle.static.data(name='axes_tensor_int32', - shape=[3], - dtype="int32") - axes_tensor_int64 = paddle.static.data(name='axes_tensor_int64', - shape=[3], - dtype="int64") + axes_tensor_int32 = paddle.static.data( + name='axes_tensor_int32', shape=[3], dtype="int32" + ) + axes_tensor_int64 = paddle.static.data( + name='axes_tensor_int64', shape=[3], dtype="int64" + ) out_1 = self.unsqueeze(x, axis=[3, 1, 1]) out_2 = self.unsqueeze(x, axis=[positive_3_int32, positive_1_int64, 1]) @@ -283,9 +265,10 @@ class TestUnsqueezeAPI(unittest.TestCase): feed={ "x": input, "axes_tensor_int32": np.array([3, 1, 1]).astype("int32"), - "axes_tensor_int64": np.array([3, 1, 1]).astype("int64") + "axes_tensor_int64": np.array([3, 1, 1]).astype("int64"), }, - fetch_list=[out_1, out_2, out_3, out_4, out_5]) + fetch_list=[out_1, out_2, out_3, out_4, out_5], + ) assert np.array_equal(res_1, input.reshape([3, 1, 1, 2, 5, 1])) assert np.array_equal(res_2, input.reshape([3, 1, 1, 2, 5, 1])) @@ -294,7 +277,6 @@ class TestUnsqueezeAPI(unittest.TestCase): assert np.array_equal(res_5, input.reshape([3, 1, 1, 2, 5, 1])) def test_error(self): - def test_axes_type(): x2 = paddle.static.data(name="x2", shape=[2, 25], dtype="int32") self.unsqueeze(x2, axis=2.1) @@ -303,13 +285,11 @@ class TestUnsqueezeAPI(unittest.TestCase): class TestUnsqueezeInplaceAPI(TestUnsqueezeAPI): - def executed_api(self): self.unsqueeze = paddle.unsqueeze_ class TestUnsqueezeAPI_ZeroDim(unittest.TestCase): - def test_dygraph(self): paddle.disable_static() fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) diff --git a/python/paddle/fluid/tests/unittests/test_unsqueeze_op.py b/python/paddle/fluid/tests/unittests/test_unsqueeze_op.py index c1261d0485799f0ffabe567e7fde097e10d0d787..b73c2a3906ff48cee4cbbcc99c44ba98d7c752e8 100755 --- a/python/paddle/fluid/tests/unittests/test_unsqueeze_op.py +++ b/python/paddle/fluid/tests/unittests/test_unsqueeze_op.py @@ -29,7 +29,6 @@ paddle.enable_static() # Correct: General. class TestUnsqueezeOp(OpTest): - def setUp(self): self.init_test_case() self.op_type = "unsqueeze" @@ -53,7 +52,6 @@ class TestUnsqueezeOp(OpTest): class TestUnsqueezeBF16Op(OpTest): - def setUp(self): self.init_test_case() self.op_type = "unsqueeze" @@ -81,16 +79,14 @@ class TestUnsqueezeBF16Op(OpTest): # Correct: Single input index. class TestUnsqueezeOp1(TestUnsqueezeOp): - def init_test_case(self): self.ori_shape = (20, 5) - self.axes = (-1, ) + self.axes = (-1,) self.new_shape = (20, 5, 1) # Correct: Mixed input axis. class TestUnsqueezeOp2(TestUnsqueezeOp): - def init_test_case(self): self.ori_shape = (20, 5) self.axes = (0, -1) @@ -99,7 +95,6 @@ class TestUnsqueezeOp2(TestUnsqueezeOp): # Correct: There is duplicated axis. class TestUnsqueezeOp3(TestUnsqueezeOp): - def init_test_case(self): self.ori_shape = (10, 2, 5) self.axes = (0, 3, 3) @@ -108,7 +103,6 @@ class TestUnsqueezeOp3(TestUnsqueezeOp): # Correct: Reversed axes. class TestUnsqueezeOp4(TestUnsqueezeOp): - def init_test_case(self): self.ori_shape = (10, 2, 5) self.axes = (3, 1, 1) @@ -116,15 +110,13 @@ class TestUnsqueezeOp4(TestUnsqueezeOp): class TestUnsqueezeOp_ZeroDim1(TestUnsqueezeOp): - def init_test_case(self): self.ori_shape = () - self.axes = (-1, ) - self.new_shape = (1) + self.axes = (-1,) + self.new_shape = 1 class TestUnsqueezeOp_ZeroDim2(TestUnsqueezeOp): - def init_test_case(self): self.ori_shape = () self.axes = (-1, 1) @@ -132,7 +124,6 @@ class TestUnsqueezeOp_ZeroDim2(TestUnsqueezeOp): class TestUnsqueezeOp_ZeroDim3(TestUnsqueezeOp): - def init_test_case(self): self.ori_shape = () self.axes = (0, 1, 2) @@ -140,44 +131,45 @@ class TestUnsqueezeOp_ZeroDim3(TestUnsqueezeOp): class API_TestUnsqueeze(unittest.TestCase): - def test_out(self): paddle.enable_static() - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data1 = paddle.static.data('data1', shape=[-1, 10], dtype='float64') result_squeeze = paddle.unsqueeze(data1, axis=[1]) place = paddle.CPUPlace() exe = paddle.static.Executor(place) input1 = np.random.random([5, 1, 10]).astype('float64') input = np.squeeze(input1, axis=1) - result, = exe.run(feed={"data1": input}, - fetch_list=[result_squeeze]) + (result,) = exe.run( + feed={"data1": input}, fetch_list=[result_squeeze] + ) np.testing.assert_allclose(input1, result, rtol=1e-05) class TestUnsqueezeOpError(unittest.TestCase): - def test_errors(self): paddle.enable_static() - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): # The type of axis in split_op should be int or Variable. def test_axes_type(): - x6 = paddle.static.data(shape=[-1, 10], - dtype='float16', - name='x3') + x6 = paddle.static.data( + shape=[-1, 10], dtype='float16', name='x3' + ) paddle.unsqueeze(x6, axis=3.2) self.assertRaises(TypeError, test_axes_type) class API_TestUnsqueeze2(unittest.TestCase): - def test_out(self): paddle.enable_static() - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data1 = paddle.static.data('data1', shape=[-1, 10], dtype='float64') data2 = paddle.static.data('data2', shape=[1], dtype='int32') result_squeeze = paddle.unsqueeze(data1, axis=data2) @@ -186,20 +178,19 @@ class API_TestUnsqueeze2(unittest.TestCase): input1 = np.random.random([5, 1, 10]).astype('float64') input2 = np.array([1]).astype('int32') input = np.squeeze(input1, axis=1) - result1, = exe.run(feed={ - "data1": input, - "data2": input2 - }, - fetch_list=[result_squeeze]) + (result1,) = exe.run( + feed={"data1": input, "data2": input2}, + fetch_list=[result_squeeze], + ) np.testing.assert_allclose(input1, result1, rtol=1e-05) class API_TestUnsqueeze3(unittest.TestCase): - def test_out(self): paddle.enable_static() - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): data1 = paddle.static.data('data1', shape=[-1, 10], dtype='float64') data2 = paddle.static.data('data2', shape=[1], dtype='int32') result_squeeze = paddle.unsqueeze(data1, axis=[data2, 3]) @@ -208,17 +199,15 @@ class API_TestUnsqueeze3(unittest.TestCase): input1 = np.random.random([5, 1, 10, 1]).astype('float64') input2 = np.array([1]).astype('int32') input = np.squeeze(input1) - result1, = exe.run(feed={ - "data1": input, - "data2": input2 - }, - fetch_list=[result_squeeze]) + (result1,) = exe.run( + feed={"data1": input, "data2": input2}, + fetch_list=[result_squeeze], + ) np.testing.assert_array_equal(input1, result1) self.assertEqual(input1.shape, result1.shape) class API_TestDyUnsqueeze(unittest.TestCase): - def test_out(self): paddle.disable_static() input_1 = np.random.random([5, 1, 10]).astype("int32") @@ -231,7 +220,6 @@ class API_TestDyUnsqueeze(unittest.TestCase): class API_TestDyUnsqueeze2(unittest.TestCase): - def test_out(self): paddle.disable_static() input1 = np.random.random([5, 10]).astype("int32") @@ -244,7 +232,6 @@ class API_TestDyUnsqueeze2(unittest.TestCase): class API_TestDyUnsqueezeAxisTensor(unittest.TestCase): - def test_out(self): paddle.disable_static() input1 = np.random.random([5, 10]).astype("int32") @@ -258,7 +245,6 @@ class API_TestDyUnsqueezeAxisTensor(unittest.TestCase): class API_TestDyUnsqueezeAxisTensorList(unittest.TestCase): - def test_out(self): paddle.disable_static() input1 = np.random.random([5, 10]).astype("int32") @@ -268,15 +254,14 @@ class API_TestDyUnsqueezeAxisTensorList(unittest.TestCase): input = paddle.to_tensor(input1) output = paddle.unsqueeze( paddle.to_tensor(input1), - axis=[paddle.to_tensor([1]), - paddle.to_tensor([2])]) + axis=[paddle.to_tensor([1]), paddle.to_tensor([2])], + ) out_np = output.numpy() np.testing.assert_array_equal(out1, out_np) self.assertEqual(out1.shape, out_np.shape) class API_TestDygraphUnSqueeze(unittest.TestCase): - def setUp(self): self.executed_api() @@ -330,13 +315,11 @@ class API_TestDygraphUnSqueeze(unittest.TestCase): class API_TestDygraphUnSqueezeInplace(API_TestDygraphUnSqueeze): - def executed_api(self): self.unsqueeze = paddle.unsqueeze_ class TestUnsqueezeDoubleGradCheck(unittest.TestCase): - def unsqueeze_wrapper(self, x): return paddle.unsqueeze(x[0], [0, 2]) @@ -351,17 +334,13 @@ class TestUnsqueezeDoubleGradCheck(unittest.TestCase): out = paddle.unsqueeze(data, [0, 2]) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) - gradient_checker.double_grad_check([data], - out, - x_init=[data_arr], - place=place, - eps=eps) + gradient_checker.double_grad_check( + [data], out, x_init=[data_arr], place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.double_grad_check_for_dygraph(self.unsqueeze_wrapper, - [data], - out, - x_init=[data_arr], - place=place) + gradient_checker.double_grad_check_for_dygraph( + self.unsqueeze_wrapper, [data], out, x_init=[data_arr], place=place + ) def test_grad(self): paddle.enable_static() @@ -373,7 +352,6 @@ class TestUnsqueezeDoubleGradCheck(unittest.TestCase): class TestUnsqueezeTripleGradCheck(unittest.TestCase): - def unsqueeze_wrapper(self, x): return paddle.unsqueeze(x[0], [0, 2]) @@ -388,17 +366,13 @@ class TestUnsqueezeTripleGradCheck(unittest.TestCase): out = paddle.unsqueeze(data, [0, 2]) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) - gradient_checker.triple_grad_check([data], - out, - x_init=[data_arr], - place=place, - eps=eps) + gradient_checker.triple_grad_check( + [data], out, x_init=[data_arr], place=place, eps=eps + ) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) - gradient_checker.triple_grad_check_for_dygraph(self.unsqueeze_wrapper, - [data], - out, - x_init=[data_arr], - place=place) + gradient_checker.triple_grad_check_for_dygraph( + self.unsqueeze_wrapper, [data], out, x_init=[data_arr], place=place + ) def test_grad(self): paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_unstack_op.py b/python/paddle/fluid/tests/unittests/test_unstack_op.py index bb28bdeba79d3e5df1b3028288e114f0c9feaa9f..87095d8eeac24e676ea7dfb99ab16405be82cf44 100755 --- a/python/paddle/fluid/tests/unittests/test_unstack_op.py +++ b/python/paddle/fluid/tests/unittests/test_unstack_op.py @@ -19,7 +19,6 @@ import paddle class TestUnStackOpBase(OpTest): - def initDefaultParameters(self): self.input_dim = (5, 6, 7) self.axis = 0 @@ -64,25 +63,21 @@ class TestUnStackOpBase(OpTest): class TestStackOp3(TestUnStackOpBase): - def initParameters(self): self.axis = -1 class TestStackOp4(TestUnStackOpBase): - def initParameters(self): self.axis = -3 class TestStackOp5(TestUnStackOpBase): - def initParameters(self): self.axis = 1 class TestStackOp6(TestUnStackOpBase): - def initParameters(self): self.axis = 2 diff --git a/python/paddle/fluid/tests/unittests/test_update_loss_scaling_op.py b/python/paddle/fluid/tests/unittests/test_update_loss_scaling_op.py index 9032824950196bb5cc82e0670059885c57766b94..7f1e6c8614f88caa6612dfa9cbcc99877d68a79c 100644 --- a/python/paddle/fluid/tests/unittests/test_update_loss_scaling_op.py +++ b/python/paddle/fluid/tests/unittests/test_update_loss_scaling_op.py @@ -19,31 +19,43 @@ import paddle.fluid as fluid import paddle.fluid.contrib.mixed_precision.amp_nn as amp_nn -def update_loss_scaling_wrapper(x, - found_inf, - prev_loss_scaling, - num_good_steps, - num_bad_steps, - incr_every_n_steps, - decr_every_n_nan_or_inf, - incr_ratio, - decr_ratio, - stop_update=False): - amp_nn.update_loss_scaling([x], found_inf, prev_loss_scaling, - num_good_steps, num_bad_steps, - incr_every_n_steps, decr_every_n_nan_or_inf, - incr_ratio, decr_ratio, stop_update) +def update_loss_scaling_wrapper( + x, + found_inf, + prev_loss_scaling, + num_good_steps, + num_bad_steps, + incr_every_n_steps, + decr_every_n_nan_or_inf, + incr_ratio, + decr_ratio, + stop_update=False, +): + amp_nn.update_loss_scaling( + [x], + found_inf, + prev_loss_scaling, + num_good_steps, + num_bad_steps, + incr_every_n_steps, + decr_every_n_nan_or_inf, + incr_ratio, + decr_ratio, + stop_update, + ) return x, prev_loss_scaling, num_good_steps, num_bad_steps class TestUpdateLossScalingOp(OpTest): - def setUp(self): self.op_type = "update_loss_scaling" self.init() self.python_api = update_loss_scaling_wrapper self.python_out_sig = [ - "out0", "LossScaling", "OutGoodSteps", "OutBadSteps" + "out0", + "LossScaling", + "OutGoodSteps", + "OutBadSteps", ] found_inf = np.array([False], dtype=np.bool_) x = np.random.random((1024, 1024)).astype(self.dtype) @@ -53,14 +65,14 @@ class TestUpdateLossScalingOp(OpTest): 'FoundInfinite': found_inf, 'PrevLossScaling': self.prev_loss_scaling, 'InGoodSteps': self.num_good_steps, - 'InBadSteps': self.num_bad_steps + 'InBadSteps': self.num_bad_steps, } self.outputs = { 'Out': [('out0', x)], 'LossScaling': self.prev_loss_scaling * self.incr_ratio, 'OutGoodSteps': self.zero_steps, - 'OutBadSteps': self.zero_steps + 'OutBadSteps': self.zero_steps, } def init(self): @@ -84,13 +96,15 @@ class TestUpdateLossScalingOp(OpTest): class TestUpdateLossScalingOpBad(TestUpdateLossScalingOp): - def setUp(self): self.op_type = "update_loss_scaling" self.init() self.python_api = update_loss_scaling_wrapper self.python_out_sig = [ - "out0", "LossScaling", "OutGoodSteps", "OutBadSteps" + "out0", + "LossScaling", + "OutGoodSteps", + "OutBadSteps", ] found_inf = np.array([True], dtype=np.bool_) x = np.random.random((1024, 1024)).astype(self.dtype) @@ -104,14 +118,14 @@ class TestUpdateLossScalingOpBad(TestUpdateLossScalingOp): 'PrevLossScaling': self.prev_loss_scaling, 'InGoodSteps': self.num_good_steps, 'InBadSteps': self.num_bad_steps, - 'StopUpdate': self.stop_update + 'StopUpdate': self.stop_update, } self.outputs = { 'Out': [('out0', np.zeros_like(x))], 'LossScaling': self.prev_loss_scaling * self.decr_ratio, 'OutGoodSteps': self.zero_steps, - 'OutBadSteps': self.zero_steps + 'OutBadSteps': self.zero_steps, } def test_check_output(self): @@ -119,21 +133,20 @@ class TestUpdateLossScalingOpBad(TestUpdateLossScalingOp): class TestUpdateLossScalingLayer(unittest.TestCase): - def loss_scaling_check(self, use_cuda=True, scope=fluid.Scope()): a = fluid.data(name="a", shape=[1024, 1024], dtype='float32') b = fluid.data(name="b", shape=[512, 128], dtype='float32') x = [a, b] found_inf = fluid.data(name="found_inf", shape=[1], dtype='bool') - prev_loss_scaling = fluid.data(name="prev_loss_scaling", - shape=[1], - dtype='float32') - num_good_steps = fluid.data(name="num_good_steps", - shape=[1], - dtype='int32') - num_bad_steps = fluid.data(name="num_bad_steps", - shape=[1], - dtype='int32') + prev_loss_scaling = fluid.data( + name="prev_loss_scaling", shape=[1], dtype='float32' + ) + num_good_steps = fluid.data( + name="num_good_steps", shape=[1], dtype='int32' + ) + num_bad_steps = fluid.data( + name="num_bad_steps", shape=[1], dtype='int32' + ) a_v = np.random.random([1024, 1024]).astype('float32') b_v = np.random.random([512, 128]).astype('float32') @@ -147,33 +160,41 @@ class TestUpdateLossScalingLayer(unittest.TestCase): incr_ratio = 2 decr_ratio = 0.8 - result = amp_nn.update_loss_scaling(x, - found_inf, - prev_loss_scaling, - num_good_steps, - num_bad_steps, - incr_every_n_steps, - decr_every_n_nan_or_inf, - incr_ratio, - decr_ratio, - name="update_loss_scaling") + result = amp_nn.update_loss_scaling( + x, + found_inf, + prev_loss_scaling, + num_good_steps, + num_bad_steps, + incr_every_n_steps, + decr_every_n_nan_or_inf, + incr_ratio, + decr_ratio, + name="update_loss_scaling", + ) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) with fluid.scope_guard(scope): exe.run(fluid.default_startup_program()) - result_v = exe.run(feed={ - 'a': a_v, - 'b': b_v, - 'found_inf': found_inf_v, - 'prev_loss_scaling': prev_loss_scaling_v, - 'num_good_steps': num_good_steps_v, - 'num_bad_steps': num_bad_steps_v - }, - fetch_list=[ - result, x, found_inf, prev_loss_scaling, - num_good_steps, num_bad_steps - ]) + result_v = exe.run( + feed={ + 'a': a_v, + 'b': b_v, + 'found_inf': found_inf_v, + 'prev_loss_scaling': prev_loss_scaling_v, + 'num_good_steps': num_good_steps_v, + 'num_bad_steps': num_bad_steps_v, + }, + fetch_list=[ + result, + x, + found_inf, + prev_loss_scaling, + num_good_steps, + num_bad_steps, + ], + ) assert np.array_equal(result_v[0], a_v) assert np.array_equal(result_v[1], b_v) assert np.array_equal(result_v[0], result_v[2]) @@ -188,15 +209,15 @@ class TestUpdateLossScalingLayer(unittest.TestCase): b = fluid.data(name="b", shape=[512, 128], dtype='float32') x = [a, b] found_inf = fluid.data(name="found_inf", shape=[1], dtype='bool') - prev_loss_scaling = fluid.data(name="prev_loss_scaling", - shape=[1], - dtype='float32') - num_good_steps = fluid.data(name="num_good_steps", - shape=[1], - dtype='int32') - num_bad_steps = fluid.data(name="num_bad_steps", - shape=[1], - dtype='int32') + prev_loss_scaling = fluid.data( + name="prev_loss_scaling", shape=[1], dtype='float32' + ) + num_good_steps = fluid.data( + name="num_good_steps", shape=[1], dtype='int32' + ) + num_bad_steps = fluid.data( + name="num_bad_steps", shape=[1], dtype='int32' + ) a_v = np.random.random([1024, 1024]).astype('float32') b_v = np.random.random([512, 128]).astype('float32') @@ -213,33 +234,41 @@ class TestUpdateLossScalingLayer(unittest.TestCase): incr_ratio = 2 decr_ratio = 0.8 - result = amp_nn.update_loss_scaling(x, - found_inf, - prev_loss_scaling, - num_good_steps, - num_bad_steps, - incr_every_n_steps, - decr_every_n_nan_or_inf, - incr_ratio, - decr_ratio, - name="update_loss_scaling") + result = amp_nn.update_loss_scaling( + x, + found_inf, + prev_loss_scaling, + num_good_steps, + num_bad_steps, + incr_every_n_steps, + decr_every_n_nan_or_inf, + incr_ratio, + decr_ratio, + name="update_loss_scaling", + ) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) with fluid.scope_guard(scope): exe.run(fluid.default_startup_program()) - result_v = exe.run(feed={ - 'a': a_v, - 'b': b_v, - 'found_inf': found_inf_v, - 'prev_loss_scaling': prev_loss_scaling_v, - 'num_good_steps': num_good_steps_v, - 'num_bad_steps': num_bad_steps_v - }, - fetch_list=[ - result, x, found_inf, prev_loss_scaling, - num_good_steps, num_bad_steps - ]) + result_v = exe.run( + feed={ + 'a': a_v, + 'b': b_v, + 'found_inf': found_inf_v, + 'prev_loss_scaling': prev_loss_scaling_v, + 'num_good_steps': num_good_steps_v, + 'num_bad_steps': num_bad_steps_v, + }, + fetch_list=[ + result, + x, + found_inf, + prev_loss_scaling, + num_good_steps, + num_bad_steps, + ], + ) assert np.array_equal(result_v[0], np.zeros_like(a_v)) assert np.array_equal(result_v[1], np.zeros_like(b_v)) assert np.array_equal(result_v[2], np.zeros_like(a_v)) diff --git a/python/paddle/fluid/tests/unittests/test_var_base.py b/python/paddle/fluid/tests/unittests/test_var_base.py index 7804aec60c995490268652f74c991ee4512876c0..d1b820bd74c1d9e407eef080d06664ff41bb7a8a 100644 --- a/python/paddle/fluid/tests/unittests/test_var_base.py +++ b/python/paddle/fluid/tests/unittests/test_var_base.py @@ -23,14 +23,12 @@ from paddle.fluid.framework import _test_eager_guard, _in_legacy_dygraph class TestVarBase(unittest.TestCase): - def setUp(self): self.shape = [512, 1234] self.dtype = np.float32 self.array = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) def func_test_to_tensor(self): - def check_with_place(place): with fluid.dygraph.guard(): paddle.set_default_dtype('float32') @@ -43,11 +41,14 @@ class TestVarBase(unittest.TestCase): self.assertEqual(str(x.place), str(y.place)) # set_default_dtype should not take effect on numpy - x = paddle.to_tensor(np.array([1.2]).astype('float16'), - place=place, - stop_gradient=False) - np.testing.assert_array_equal(x.numpy(), - np.array([1.2], 'float16')) + x = paddle.to_tensor( + np.array([1.2]).astype('float16'), + place=place, + stop_gradient=False, + ) + np.testing.assert_array_equal( + x.numpy(), np.array([1.2], 'float16') + ) self.assertEqual(x.dtype, core.VarDesc.VarType.FP16) # set_default_dtype take effect on int @@ -56,17 +57,20 @@ class TestVarBase(unittest.TestCase): # set_default_dtype take effect on float x = paddle.to_tensor(1.2, place=place, stop_gradient=False) - np.testing.assert_array_equal(x.numpy(), - np.array([1.2]).astype('float32')) + np.testing.assert_array_equal( + x.numpy(), np.array([1.2]).astype('float32') + ) self.assertEqual(x.dtype, core.VarDesc.VarType.FP32) clone_x = x.clone() - np.testing.assert_array_equal(clone_x.numpy(), - np.array([1.2]).astype('float32')) + np.testing.assert_array_equal( + clone_x.numpy(), np.array([1.2]).astype('float32') + ) self.assertEqual(clone_x.dtype, core.VarDesc.VarType.FP32) y = clone_x**2 y.backward() - np.testing.assert_array_equal(x.grad.numpy(), - np.array([2.4]).astype('float32')) + np.testing.assert_array_equal( + x.grad.numpy(), np.array([2.4]).astype('float32') + ) y = x.cpu() self.assertEqual(y.place.__repr__(), "Place(cpu)") if core.is_compiled_with_cuda(): @@ -104,24 +108,21 @@ class TestVarBase(unittest.TestCase): np.testing.assert_array_equal(x.numpy(), [1 + 2j]) self.assertEqual(x.dtype, core.VarDesc.VarType.COMPLEX128) - x = paddle.to_tensor(1, - dtype='float32', - place=place, - stop_gradient=False) + x = paddle.to_tensor( + 1, dtype='float32', place=place, stop_gradient=False + ) np.testing.assert_array_equal(x.numpy(), [1.0]) self.assertEqual(x.dtype, core.VarDesc.VarType.FP32) self.assertEqual(x.shape, [1]) self.assertEqual(x.stop_gradient, False) self.assertEqual(x.type, core.VarDesc.VarType.LOD_TENSOR) - x = paddle.to_tensor((1, 2), - dtype='float32', - place=place, - stop_gradient=False) - x = paddle.to_tensor([1, 2], - dtype='float32', - place=place, - stop_gradient=False) + x = paddle.to_tensor( + (1, 2), dtype='float32', place=place, stop_gradient=False + ) + x = paddle.to_tensor( + [1, 2], dtype='float32', place=place, stop_gradient=False + ) np.testing.assert_array_equal(x.numpy(), [1.0, 2.0]) self.assertEqual(x.dtype, core.VarDesc.VarType.FP32) self.assertEqual(x.grad, None) @@ -129,10 +130,12 @@ class TestVarBase(unittest.TestCase): self.assertEqual(x.stop_gradient, False) self.assertEqual(x.type, core.VarDesc.VarType.LOD_TENSOR) - x = paddle.to_tensor(self.array, - dtype='float32', - place=place, - stop_gradient=False) + x = paddle.to_tensor( + self.array, + dtype='float32', + place=place, + stop_gradient=False, + ) np.testing.assert_array_equal(x.numpy(), self.array) self.assertEqual(x.dtype, core.VarDesc.VarType.FP32) self.assertEqual(x.shape, self.shape) @@ -149,9 +152,9 @@ class TestVarBase(unittest.TestCase): z = x + y np.testing.assert_array_equal(z.numpy(), 2 * self.array) - x = paddle.to_tensor([1 + 2j, 1 - 2j], - dtype='complex64', - place=place) + x = paddle.to_tensor( + [1 + 2j, 1 - 2j], dtype='complex64', place=place + ) y = paddle.to_tensor(x) np.testing.assert_array_equal(x.numpy(), [1 + 2j, 1 - 2j]) self.assertEqual(y.dtype, core.VarDesc.VarType.COMPLEX64) @@ -172,8 +175,9 @@ class TestVarBase(unittest.TestCase): self.assertTrue(isinstance(x.item(5), float)) self.assertTrue(isinstance(x.item(1, 0, 1), float)) self.assertEqual(x.item(5), x.item(1, 0, 1)) - np.testing.assert_array_equal(x.item(1, 0, 1), - x.numpy().item(1, 0, 1)) + np.testing.assert_array_equal( + x.item(1, 0, 1), x.numpy().item(1, 0, 1) + ) x = paddle.to_tensor([[1.111111, 2.222222, 3.333333]]) self.assertEqual(x.item(0, 2), x.item(2)) @@ -404,16 +408,18 @@ class TestVarBase(unittest.TestCase): y = x + 1 self.assertTrue(y.is_leaf) - x = paddle.to_tensor(np.random.uniform(-1, 1, size=[10, 10]), - stop_gradient=False) + x = paddle.to_tensor( + np.random.uniform(-1, 1, size=[10, 10]), stop_gradient=False + ) self.assertTrue(x.is_leaf) y = x + 1 self.assertFalse(y.is_leaf) linear = paddle.nn.Linear(10, 10) - input = paddle.to_tensor(np.random.uniform( - -1, 1, size=[10, 10]).astype('float32'), - stop_gradient=False) + input = paddle.to_tensor( + np.random.uniform(-1, 1, size=[10, 10]).astype('float32'), + stop_gradient=False, + ) self.assertTrue(input.is_leaf) out = linear(input) @@ -432,8 +438,9 @@ class TestVarBase(unittest.TestCase): detach_x = x.detach() self.assertTrue(detach_x.stop_gradient, True) - cmp_float = np.allclose if core.is_compiled_with_rocm( - ) else np.array_equal + cmp_float = ( + np.allclose if core.is_compiled_with_rocm() else np.array_equal + ) detach_x[:] = 10.0 self.assertTrue(cmp_float(x.numpy(), [10.0])) @@ -442,7 +449,9 @@ class TestVarBase(unittest.TestCase): self.assertTrue(cmp_float(x.grad.numpy(), [20.0])) self.assertEqual(detach_x.grad, None) - detach_x.stop_gradient = False # Set stop_gradient to be False, supported auto-grad + detach_x.stop_gradient = ( + False # Set stop_gradient to be False, supported auto-grad + ) z = 3 * detach_x**2 z.backward() self.assertTrue(cmp_float(x.grad.numpy(), [20.0])) @@ -492,14 +501,15 @@ class TestVarBase(unittest.TestCase): else: empty_var = core.eager.Tensor() empty_var_copy = copy.deepcopy(empty_var) - self.assertEqual(empty_var.stop_gradient, - empty_var_copy.stop_gradient) + self.assertEqual( + empty_var.stop_gradient, empty_var_copy.stop_gradient + ) self.assertEqual(empty_var.persistable, empty_var_copy.persistable) self.assertEqual(empty_var.type, empty_var_copy.type) self.assertEqual(empty_var.dtype, empty_var_copy.dtype) - x = paddle.to_tensor([2.], stop_gradient=False) - y = paddle.to_tensor([3.], stop_gradient=False) + x = paddle.to_tensor([2.0], stop_gradient=False) + y = paddle.to_tensor([3.0], stop_gradient=False) z = x * y memo = {} x_copy = copy.deepcopy(x, memo) @@ -516,7 +526,7 @@ class TestVarBase(unittest.TestCase): np.testing.assert_array_equal(x.numpy(), [2.0]) with self.assertRaises(ValueError): - x_copy[:] = 5. + x_copy[:] = 5.0 with self.assertRaises(RuntimeError): copy.deepcopy(z) @@ -528,17 +538,26 @@ class TestVarBase(unittest.TestCase): # test copy selected rows if _in_legacy_dygraph(): - x = core.VarBase(core.VarDesc.VarType.FP32, [3, 100], - "selected_rows", - core.VarDesc.VarType.SELECTED_ROWS, True) + x = core.VarBase( + core.VarDesc.VarType.FP32, + [3, 100], + "selected_rows", + core.VarDesc.VarType.SELECTED_ROWS, + True, + ) else: - x = core.eager.Tensor(core.VarDesc.VarType.FP32, [3, 100], - "selected_rows", - core.VarDesc.VarType.SELECTED_ROWS, True) + x = core.eager.Tensor( + core.VarDesc.VarType.FP32, + [3, 100], + "selected_rows", + core.VarDesc.VarType.SELECTED_ROWS, + True, + ) selected_rows = x.value().get_selected_rows() - selected_rows.get_tensor().set(np.random.rand(3, 100), - core.CPUPlace()) + selected_rows.get_tensor().set( + np.random.rand(3, 100), core.CPUPlace() + ) selected_rows.set_height(10) selected_rows.set_rows([3, 5, 7]) x_copy = copy.deepcopy(x) @@ -549,12 +568,14 @@ class TestVarBase(unittest.TestCase): self.assertEqual(x_copy.dtype, x.dtype) copy_selected_rows = x_copy.value().get_selected_rows() - self.assertEqual(copy_selected_rows.height(), - selected_rows.height()) + self.assertEqual( + copy_selected_rows.height(), selected_rows.height() + ) self.assertEqual(copy_selected_rows.rows(), selected_rows.rows()) np.testing.assert_array_equal( np.array(copy_selected_rows.get_tensor()), - np.array(selected_rows.get_tensor())) + np.array(selected_rows.get_tensor()), + ) def test_deep_copy(self): with _test_eager_guard(): @@ -658,8 +679,9 @@ class TestVarBase(unittest.TestCase): def func_test_block(self): with fluid.dygraph.guard(): var = fluid.dygraph.to_variable(self.array) - self.assertEqual(var.block, - fluid.default_main_program().global_block()) + self.assertEqual( + var.block, fluid.default_main_program().global_block() + ) def test_block(self): with _test_eager_guard(): @@ -668,7 +690,8 @@ class TestVarBase(unittest.TestCase): def _test_slice(self): w = fluid.dygraph.to_variable( - np.random.random((784, 100, 100)).astype('float64')) + np.random.random((784, 100, 100)).astype('float64') + ) for i in range(3): nw = w[i] @@ -691,10 +714,13 @@ class TestVarBase(unittest.TestCase): nw = w[:, :, :-1] self.assertEqual((784, 100, 99), tuple(nw.shape)) - tensor_array = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]], - [[10, 11, 12], [13, 14, 15], [16, 17, 18]], - [[19, 20, 21], [22, 23, 24], - [25, 26, 27]]]).astype('float32') + tensor_array = np.array( + [ + [[1, 2, 3], [4, 5, 6], [7, 8, 9]], + [[10, 11, 12], [13, 14, 15], [16, 17, 18]], + [[19, 20, 21], [22, 23, 24], [25, 26, 27]], + ] + ).astype('float32') var = fluid.dygraph.to_variable(tensor_array) var1 = var[0, 1, 1] var2 = var[1:] @@ -717,8 +743,25 @@ class TestVarBase(unittest.TestCase): var18 = var[:, 1:1:2] vars = [ - var, var1, var2, var3, var4, var5, var6, var7, var8, var9, var10, - var11, var12, var13, var14, var15, var16, var17, var18 + var, + var1, + var2, + var3, + var4, + var5, + var6, + var7, + var8, + var9, + var10, + var11, + var12, + var13, + var14, + var15, + var16, + var17, + var18, ] local_out = [var.numpy() for var in vars] @@ -728,32 +771,41 @@ class TestVarBase(unittest.TestCase): np.testing.assert_array_equal(local_out[4], tensor_array[::-1]) np.testing.assert_array_equal(local_out[5], tensor_array[1, 1:, 1:]) np.testing.assert_array_equal( - local_out[6], - tensor_array.reshape((3, -1, 3))[:, :, -1]) + local_out[6], tensor_array.reshape((3, -1, 3))[:, :, -1] + ) np.testing.assert_array_equal(local_out[7], tensor_array[:, :, :-1]) np.testing.assert_array_equal(local_out[8], tensor_array[:1, :1, :1]) np.testing.assert_array_equal(local_out[9], tensor_array[:-1, :-1, :-1]) - np.testing.assert_array_equal(local_out[10], - tensor_array[::-1, :1, :-1]) - np.testing.assert_array_equal(local_out[11], tensor_array[:-1, ::-1, - -1:]) - np.testing.assert_array_equal(local_out[12], tensor_array[1:2, - 2:, ::-1]) - np.testing.assert_array_equal(local_out[13], tensor_array[2:10, 2:, - -2:-1]) - np.testing.assert_array_equal(local_out[14], tensor_array[1:-1, - 0:2, ::-1]) - np.testing.assert_array_equal(local_out[15], - tensor_array[::-1, ::-1, ::-1]) + np.testing.assert_array_equal( + local_out[10], tensor_array[::-1, :1, :-1] + ) + np.testing.assert_array_equal( + local_out[11], tensor_array[:-1, ::-1, -1:] + ) + np.testing.assert_array_equal( + local_out[12], tensor_array[1:2, 2:, ::-1] + ) + np.testing.assert_array_equal( + local_out[13], tensor_array[2:10, 2:, -2:-1] + ) + np.testing.assert_array_equal( + local_out[14], tensor_array[1:-1, 0:2, ::-1] + ) + np.testing.assert_array_equal( + local_out[15], tensor_array[::-1, ::-1, ::-1] + ) np.testing.assert_array_equal(local_out[16], tensor_array[-4:4]) np.testing.assert_array_equal(local_out[17], tensor_array[:, 0, 0:0]) np.testing.assert_array_equal(local_out[18], tensor_array[:, 1:1:2]) def _test_slice_for_tensor_attr(self): - tensor_array = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]], - [[10, 11, 12], [13, 14, 15], [16, 17, 18]], - [[19, 20, 21], [22, 23, 24], - [25, 26, 27]]]).astype('float32') + tensor_array = np.array( + [ + [[1, 2, 3], [4, 5, 6], [7, 8, 9]], + [[10, 11, 12], [13, 14, 15], [16, 17, 18]], + [[19, 20, 21], [22, 23, 24], [25, 26, 27]], + ] + ).astype('float32') var = paddle.to_tensor(tensor_array) @@ -782,8 +834,23 @@ class TestVarBase(unittest.TestCase): var16 = var[-4:4] vars = [ - var, var1, var2, var3, var4, var5, var6, var7, var8, var9, var10, - var11, var12, var13, var14, var15, var16 + var, + var1, + var2, + var3, + var4, + var5, + var6, + var7, + var8, + var9, + var10, + var11, + var12, + var13, + var14, + var15, + var16, ] local_out = [var.numpy() for var in vars] @@ -793,23 +860,29 @@ class TestVarBase(unittest.TestCase): np.testing.assert_array_equal(local_out[4], tensor_array[::-1]) np.testing.assert_array_equal(local_out[5], tensor_array[1, 1:, 1:]) np.testing.assert_array_equal( - local_out[6], - tensor_array.reshape((3, -1, 3))[:, :, -1]) + local_out[6], tensor_array.reshape((3, -1, 3))[:, :, -1] + ) np.testing.assert_array_equal(local_out[7], tensor_array[:, :, :-1]) np.testing.assert_array_equal(local_out[8], tensor_array[:1, :1, :1]) np.testing.assert_array_equal(local_out[9], tensor_array[:-1, :-1, :-1]) - np.testing.assert_array_equal(local_out[10], - tensor_array[::-1, :1, :-1]) - np.testing.assert_array_equal(local_out[11], tensor_array[:-1, ::-1, - -1:]) - np.testing.assert_array_equal(local_out[12], tensor_array[1:2, - 2:, ::-1]) - np.testing.assert_array_equal(local_out[13], tensor_array[2:10, 2:, - -2:-1]) - np.testing.assert_array_equal(local_out[14], tensor_array[1:-1, - 0:2, ::-1]) - np.testing.assert_array_equal(local_out[15], - tensor_array[::-1, ::-1, ::-1]) + np.testing.assert_array_equal( + local_out[10], tensor_array[::-1, :1, :-1] + ) + np.testing.assert_array_equal( + local_out[11], tensor_array[:-1, ::-1, -1:] + ) + np.testing.assert_array_equal( + local_out[12], tensor_array[1:2, 2:, ::-1] + ) + np.testing.assert_array_equal( + local_out[13], tensor_array[2:10, 2:, -2:-1] + ) + np.testing.assert_array_equal( + local_out[14], tensor_array[1:-1, 0:2, ::-1] + ) + np.testing.assert_array_equal( + local_out[15], tensor_array[::-1, ::-1, ::-1] + ) np.testing.assert_array_equal(local_out[16], tensor_array[-4:4]) def _test_for_getitem_ellipsis_index(self): @@ -851,8 +924,9 @@ class TestVarBase(unittest.TestCase): # test 1 dim tensor var_one_dim = paddle.to_tensor([1, 2, 3, 4]) - np.testing.assert_array_equal(var_one_dim[..., 0].numpy(), - np.array([1])) + np.testing.assert_array_equal( + var_one_dim[..., 0].numpy(), np.array([1]) + ) def _test_none_index(self): shape = (8, 64, 5, 256) @@ -883,8 +957,9 @@ class TestVarBase(unittest.TestCase): np.testing.assert_array_equal(var[6], np_value[None, 2, None, 1]) np.testing.assert_array_equal(var[7], np_value[None]) np.testing.assert_array_equal(var[8], np_value[0, 0, None, 0, 0, None]) - np.testing.assert_array_equal(var[9], np_value[None, None, 0, ..., - None]) + np.testing.assert_array_equal( + var[9], np_value[None, None, 0, ..., None] + ) np.testing.assert_array_equal(var[10], np_value[..., None, :, None]) # TODO(zyfncg) there is a bug of dimensions when slice step > 1 and @@ -896,18 +971,25 @@ class TestVarBase(unittest.TestCase): shape = (4, 2, 5, 64) np_value = np.random.random(shape).astype('float32') var_tensor = paddle.to_tensor(np_value) - index = [[True, True, True, True], [True, False, True, True], - [True, False, False, True], [False, 0, 1, True, True], - [False, False, False, False]] - index2d = np.array([[True, True], [False, False], [True, False], - [True, True]]) + index = [ + [True, True, True, True], + [True, False, True, True], + [True, False, False, True], + [False, 0, 1, True, True], + [False, False, False, False], + ] + index2d = np.array( + [[True, True], [False, False], [True, False], [True, True]] + ) tensor_index = paddle.to_tensor(index2d) var = [ - var_tensor[index[0]].numpy(), var_tensor[index[1]].numpy(), - var_tensor[index[2]].numpy(), var_tensor[index[3]].numpy(), + var_tensor[index[0]].numpy(), + var_tensor[index[1]].numpy(), + var_tensor[index[2]].numpy(), + var_tensor[index[3]].numpy(), var_tensor[paddle.to_tensor(index[0])].numpy(), var_tensor[tensor_index].numpy(), - var_tensor[paddle.to_tensor(index[4])].numpy() + var_tensor[paddle.to_tensor(index[4])].numpy(), ] np.testing.assert_array_equal(var[0], np_value[index[0]]) np.testing.assert_array_equal(var[1], np_value[index[1]]) @@ -916,10 +998,12 @@ class TestVarBase(unittest.TestCase): np.testing.assert_array_equal(var[4], np_value[index[0]]) np.testing.assert_array_equal(var[5], np_value[index2d]) np.testing.assert_array_equal(var[6], np_value[index[4]]) - np.testing.assert_array_equal(var_tensor[var_tensor > 0.67], - np_value[np_value > 0.67]) - np.testing.assert_array_equal(var_tensor[var_tensor < 0.55], - np_value[np_value < 0.55]) + np.testing.assert_array_equal( + var_tensor[var_tensor > 0.67], np_value[np_value > 0.67] + ) + np.testing.assert_array_equal( + var_tensor[var_tensor < 0.55], np_value[np_value < 0.55] + ) with self.assertRaises(ValueError): var_tensor[[False, False, False, False]] @@ -953,13 +1037,16 @@ class TestVarBase(unittest.TestCase): t = paddle.to_tensor(array) np.testing.assert_array_equal(t[np.longlong(0)].numpy(), array[0]) np.testing.assert_array_equal( - t[np.longlong(0):np.longlong(4):np.longlong(2)].numpy(), - array[0:4:2]) + t[np.longlong(0) : np.longlong(4) : np.longlong(2)].numpy(), + array[0:4:2], + ) np.testing.assert_array_equal(t[np.int64(0)].numpy(), array[0]) np.testing.assert_array_equal( - t[np.int32(1):np.int32(4):np.int32(2)].numpy(), array[1:4:2]) + t[np.int32(1) : np.int32(4) : np.int32(2)].numpy(), array[1:4:2] + ) np.testing.assert_array_equal( - t[np.int16(0):np.int16(4):np.int16(2)].numpy(), array[0:4:2]) + t[np.int16(0) : np.int16(4) : np.int16(2)].numpy(), array[0:4:2] + ) def _test_list_index(self): # case1: @@ -971,13 +1058,18 @@ class TestVarBase(unittest.TestCase): np.testing.assert_array_equal(x[py_idx].numpy(), array[py_idx]) # case2: tensor_x = paddle.to_tensor( - np.zeros(12).reshape(2, 6).astype(np.float32)) + np.zeros(12).reshape(2, 6).astype(np.float32) + ) tensor_y1 = paddle.zeros([1], dtype='int32') + 2 tensor_y2 = paddle.zeros([1], dtype='int32') + 5 tensor_x[:, tensor_y1:tensor_y2] = 42 res = tensor_x.numpy() - exp = np.array([[0., 0., 42., 42., 42., 0.], - [0., 0., 42., 42., 42., 0.]]) + exp = np.array( + [ + [0.0, 0.0, 42.0, 42.0, 42.0, 0.0], + [0.0, 0.0, 42.0, 42.0, 42.0, 0.0], + ] + ) np.testing.assert_array_equal(res, exp) # case3: @@ -1019,8 +1111,9 @@ class TestVarBase(unittest.TestCase): def func_test_var_base_to_np(self): with fluid.dygraph.guard(): var = fluid.dygraph.to_variable(self.array) - np.testing.assert_array_equal(var.numpy(), - fluid.framework._var_base_to_np(var)) + np.testing.assert_array_equal( + var.numpy(), fluid.framework._var_base_to_np(var) + ) def test_var_base_to_np(self): with _test_eager_guard(): @@ -1031,8 +1124,9 @@ class TestVarBase(unittest.TestCase): with fluid.dygraph.guard(): var = fluid.dygraph.to_variable(self.array) np.testing.assert_array_equal(var.numpy(), np.array(var)) - np.testing.assert_array_equal(var.numpy(), - np.array(var, dtype=np.float32)) + np.testing.assert_array_equal( + var.numpy(), np.array(var, dtype=np.float32) + ) def test_var_base_as_np(self): with _test_eager_guard(): @@ -1081,7 +1175,9 @@ class TestVarBase(unittest.TestCase): param_attr=fluid.ParamAttr( learning_rate=0.001, do_model_average=True, - regularizer=fluid.regularizer.L1Decay())) + regularizer=fluid.regularizer.L1Decay(), + ), + ) weight = fc.parameters()[0] static_param = weight._to_static_var() self._assert_to_static(weight, static_param, True) @@ -1097,14 +1193,18 @@ class TestVarBase(unittest.TestCase): self.assertTrue(static_var.persistable, True) if isinstance(var_base, fluid.framework.ParamBase): for attr in ['trainable', 'is_distributed', 'do_model_average']: - self.assertEqual(getattr(var_base, attr), - getattr(static_var, attr)) + self.assertEqual( + getattr(var_base, attr), getattr(static_var, attr) + ) - self.assertEqual(static_var.optimize_attr['learning_rate'], - 0.001) + self.assertEqual( + static_var.optimize_attr['learning_rate'], 0.001 + ) self.assertTrue( - isinstance(static_var.regularizer, - fluid.regularizer.L1Decay)) + isinstance( + static_var.regularizer, fluid.regularizer.L1Decay + ) + ) else: self.assertTrue(isinstance(static_var, fluid.framework.Variable)) @@ -1205,10 +1305,9 @@ class TestVarBase(unittest.TestCase): paddle.disable_static(paddle.CPUPlace()) paddle.seed(2021) x = paddle.rand([128]) - paddle.set_printoptions(precision=4, - threshold=1000, - edgeitems=3, - linewidth=80) + paddle.set_printoptions( + precision=4, threshold=1000, edgeitems=3, linewidth=80 + ) a_str = str(x) expected = '''Tensor(shape=[128], dtype=float32, place=Place(cpu), stop_gradient=True, @@ -1301,7 +1400,6 @@ class TestVarBase(unittest.TestCase): class TestVarBaseSetitem(unittest.TestCase): - def func_setUp(self): self.set_dtype() self.tensor_x = paddle.to_tensor(np.ones((4, 2, 3)).astype(self.dtype)) @@ -1373,13 +1471,11 @@ class TestVarBaseSetitem(unittest.TestCase): class TestVarBaseSetitemInt64(TestVarBaseSetitem): - def set_dtype(self): self.dtype = "int64" class TestVarBaseSetitemFp32(TestVarBaseSetitem): - def set_dtype(self): self.dtype = "float32" @@ -1396,13 +1492,11 @@ class TestVarBaseSetitemFp32(TestVarBaseSetitem): class TestVarBaseSetitemFp64(TestVarBaseSetitem): - def set_dtype(self): self.dtype = "float64" class TestVarBaseSetitemBoolIndex(unittest.TestCase): - def func_setUp(self): paddle.disable_static() self.set_dtype() @@ -1481,7 +1575,6 @@ class TestVarBaseSetitemBoolIndex(unittest.TestCase): class TestVarBaseSetitemBoolScalarIndex(unittest.TestCase): - def set_input(self): self.tensor_x = paddle.to_tensor(np.ones((1, 2, 3)).astype(self.dtype)) self.np_value = np.random.random((2, 3)).astype(self.dtype) @@ -1507,7 +1600,6 @@ class TestVarBaseSetitemBoolScalarIndex(unittest.TestCase): class TestVarBaseInplaceVersion(unittest.TestCase): - def func_test_setitem(self): paddle.disable_static() @@ -1543,7 +1635,6 @@ class TestVarBaseInplaceVersion(unittest.TestCase): class TestVarBaseSlice(unittest.TestCase): - def func_test_slice(self): paddle.disable_static() np_x = np.random.random((3, 8, 8)) @@ -1559,7 +1650,6 @@ class TestVarBaseSlice(unittest.TestCase): class TestVarBaseClear(unittest.TestCase): - def func_test_clear(self): paddle.disable_static() np_x = np.random.random((3, 8, 8)) @@ -1574,7 +1664,6 @@ class TestVarBaseClear(unittest.TestCase): class TestVarBaseOffset(unittest.TestCase): - def func_offset(self): paddle.disable_static() np_x = np.random.random((3, 8, 8)) @@ -1591,7 +1680,6 @@ class TestVarBaseOffset(unittest.TestCase): class TestVarBaseShareBufferTo(unittest.TestCase): - def func_test_share_buffer_To(self): paddle.disable_static() np_src = np.random.random((3, 8, 8)) @@ -1611,7 +1699,6 @@ class TestVarBaseShareBufferTo(unittest.TestCase): class TestVarBaseTo(unittest.TestCase): - def func_setUp(self): paddle.disable_static() self.np_x = np.random.random((3, 8, 8)) @@ -1638,14 +1725,16 @@ class TestVarBaseTo(unittest.TestCase): x_gpu1 = self.x._to(device='gpu:0', dtype="float64") self.assertTrue(x_gpu1.place.is_gpu_place()) self.assertEqual(x_gpu1.place.gpu_device_id(), 0) - self.assertEqual(x_gpu1.dtype, - paddle.fluid.core.VarDesc.VarType.FP64) + self.assertEqual( + x_gpu1.dtype, paddle.fluid.core.VarDesc.VarType.FP64 + ) x_gpu2 = self.x._to(device='gpu:0', dtype="float16") self.assertTrue(x_gpu2.place.is_gpu_place()) self.assertEqual(x_gpu2.place.gpu_device_id(), 0) - self.assertEqual(x_gpu2.dtype, - paddle.fluid.core.VarDesc.VarType.FP16) + self.assertEqual( + x_gpu2.dtype, paddle.fluid.core.VarDesc.VarType.FP16 + ) x_cpu = self.x._to(device=paddle.CPUPlace()) self.assertTrue(x_cpu.place.is_cpu_place()) @@ -1673,7 +1762,6 @@ class TestVarBaseTo(unittest.TestCase): class TestVarBaseInitVarBaseFromTensorWithDevice(unittest.TestCase): - def func_test_varbase_init(self): paddle.disable_static() t = fluid.Tensor() @@ -1703,7 +1791,6 @@ class TestVarBaseInitVarBaseFromTensorWithDevice(unittest.TestCase): class TestVarBaseNumel(unittest.TestCase): - def func_test_numel_normal(self): paddle.disable_static() np_x = np.random.random((3, 8, 8)) @@ -1733,7 +1820,6 @@ class TestVarBaseNumel(unittest.TestCase): class TestVarBaseCopyGradientFrom(unittest.TestCase): - def func_test_copy_gradient_from(self): paddle.disable_static() np_x = np.random.random((2, 2)) @@ -1752,7 +1838,6 @@ class TestVarBaseCopyGradientFrom(unittest.TestCase): class TestEagerTensorGradNameValue(unittest.TestCase): - def test_eager_tensor_grad_name_value(self): with _test_eager_guard(): a_np = np.array([2, 3]).astype('float32') diff --git a/python/paddle/fluid/tests/unittests/test_var_conv_2d.py b/python/paddle/fluid/tests/unittests/test_var_conv_2d.py index 9836ea72aab5e7465bccd52100ebd055515db407..ef313a13944afa5fc92b802c04eb274b7b269233 100644 --- a/python/paddle/fluid/tests/unittests/test_var_conv_2d.py +++ b/python/paddle/fluid/tests/unittests/test_var_conv_2d.py @@ -18,7 +18,6 @@ from op_test import OpTest, skip_check_grad_ci class TestVarConv2DOp(OpTest): - def setUp(self): self.init_op_type() self.set_data() @@ -34,11 +33,13 @@ class TestVarConv2DOp(OpTest): stride = [1, 1] row = [2, 4] col = [3, 2] - self.init_data(input_channel, output_channel, filter_size, stride, row, - col) + self.init_data( + input_channel, output_channel, filter_size, stride, row, col + ) - def init_data(self, input_channel, output_channel, filter_size, stride, row, - col): + def init_data( + self, input_channel, output_channel, filter_size, stride, row, col + ): feature = [row[i] * col[i] for i in range(len(row))] numel = sum(feature) * input_channel @@ -46,14 +47,16 @@ class TestVarConv2DOp(OpTest): x_lod = [[x * input_channel for x in feature]] row_data = np.random.random((sum(row), 10)).astype('float32') col_data = np.random.random((sum(col), 10)).astype('float32') - w_shape = (output_channel, - input_channel * filter_size[0] * filter_size[1]) + w_shape = ( + output_channel, + input_channel * filter_size[0] * filter_size[1], + ) w_data = np.random.random(w_shape).astype('float32') self.inputs = { 'X': (x_data, x_lod), 'ROW': (row_data, [row]), 'COLUMN': (col_data, [col]), - 'W': w_data + 'W': w_data, } self.attrs = { 'InputChannel': input_channel, @@ -95,8 +98,9 @@ class TestVarConv2DOp(OpTest): if top_im_size == 0: out_tmp = np.zeros((out_ch * top_im_size, 1)).astype('float32') else: - col_batch_data = col_res_data[col_data_offset:col_data_offset + - col_res_lod[0][idx]] + col_batch_data = col_res_data[ + col_data_offset : col_data_offset + col_res_lod[0][idx] + ] gemm_shape = (in_ch * kernel_h * kernel_w, top_im_size) col_batch_data = col_batch_data.reshape(gemm_shape) out_tmp = np.dot(w_data, col_batch_data).reshape(-1, 1) @@ -106,7 +110,7 @@ class TestVarConv2DOp(OpTest): self.outputs = { 'Out': (out_data.astype('float32'), out_lod), - 'Col': (col_res_data, col_res_lod) + 'Col': (col_res_data, col_res_lod), } def Im2Col(self): @@ -159,11 +163,23 @@ class TestVarConv2DOp(OpTest): for kx in range(kernel_w): im_y = y + ky - half_kernel_h im_x = x + kx - half_kernel_w - if im_x >= 0 and im_x < width and im_y >= 0 and im_y < height: - col_res[t_offset + - (row_offset + ky * kernel_w + kx) * top_x + - col_offset] = \ - x_data[b_offset + im_offset + im_y * width + im_x] + if ( + im_x >= 0 + and im_x < width + and im_y >= 0 + and im_y < height + ): + col_res[ + t_offset + + (row_offset + ky * kernel_w + kx) + * top_x + + col_offset + ] = x_data[ + b_offset + + im_offset + + im_y * width + + im_x + ] t_offset += col_res_lod[0][idx] b_offset += x_lod[0][idx] @@ -174,14 +190,12 @@ class TestVarConv2DOp(OpTest): self.check_output(check_dygraph=False) def test_check_grad(self): - self.check_grad(['X'], - 'Out', - max_relative_error=0.005, - check_dygraph=False) + self.check_grad( + ['X'], 'Out', max_relative_error=0.005, check_dygraph=False + ) class TestVarConv2DOpCase1(TestVarConv2DOp): - def set_data(self): # set in_ch 1 input_channel = 1 @@ -190,12 +204,12 @@ class TestVarConv2DOpCase1(TestVarConv2DOp): stride = [1, 1] row = [1, 10] col = [40, 6] - self.init_data(input_channel, output_channel, filter_size, stride, row, - col) + self.init_data( + input_channel, output_channel, filter_size, stride, row, col + ) class TestVarConv2DOpCase2(TestVarConv2DOp): - def set_data(self): # set out_ch 1 input_channel = 2 @@ -204,12 +218,12 @@ class TestVarConv2DOpCase2(TestVarConv2DOp): stride = [2, 2] row = [6, 7] col = [8, 2] - self.init_data(input_channel, output_channel, filter_size, stride, row, - col) + self.init_data( + input_channel, output_channel, filter_size, stride, row, col + ) class TestVarConv2DOpCase3(TestVarConv2DOp): - def set_data(self): # set batch 1 input_channel = 2 @@ -218,12 +232,12 @@ class TestVarConv2DOpCase3(TestVarConv2DOp): stride = [2, 2] row = [14] col = [4] - self.init_data(input_channel, output_channel, filter_size, stride, row, - col) + self.init_data( + input_channel, output_channel, filter_size, stride, row, col + ) class TestVarConv2DOpCase4(TestVarConv2DOp): - def set_data(self): # set filter size very large input_channel = 3 @@ -232,12 +246,12 @@ class TestVarConv2DOpCase4(TestVarConv2DOp): stride = [2, 2] row = [4, 7] col = [5, 2] - self.init_data(input_channel, output_channel, filter_size, stride, row, - col) + self.init_data( + input_channel, output_channel, filter_size, stride, row, col + ) class TestVarConv2DOpCase5(TestVarConv2DOp): - def set_data(self): # set input very small input_channel = 50 @@ -246,16 +260,15 @@ class TestVarConv2DOpCase5(TestVarConv2DOp): stride = [1, 1] row = [1, 1] col = [1, 1] - self.init_data(input_channel, output_channel, filter_size, stride, row, - col) + self.init_data( + input_channel, output_channel, filter_size, stride, row, col + ) @skip_check_grad_ci( - reason= - "[skip shape check] Use shape of input_channel, row and col all is 1 to test special LoDTensor." + reason="[skip shape check] Use shape of input_channel, row and col all is 1 to test special LoDTensor." ) class TestVarConv2DOpCase6(TestVarConv2DOp): - def set_data(self): input_channel = 1 output_channel = 3 @@ -263,12 +276,12 @@ class TestVarConv2DOpCase6(TestVarConv2DOp): stride = [1, 1] row = [1, 1] col = [1, 1] - self.init_data(input_channel, output_channel, filter_size, stride, row, - col) + self.init_data( + input_channel, output_channel, filter_size, stride, row, col + ) class TestVarConv2DOpCase7(TestVarConv2DOp): - def set_data(self): input_channel = 2 output_channel = 3 @@ -276,43 +289,46 @@ class TestVarConv2DOpCase7(TestVarConv2DOp): stride = [1, 1] row = [5, 4] col = [6, 7] - self.init_data(input_channel, output_channel, filter_size, stride, row, - col) + self.init_data( + input_channel, output_channel, filter_size, stride, row, col + ) class TestVarConv2DApi(unittest.TestCase): - def test_api(self): import paddle.fluid as fluid x = fluid.layers.data(name='x', shape=[1], lod_level=1) row = fluid.layers.data(name='row', shape=[6], lod_level=1) col = fluid.layers.data(name='col', shape=[6], lod_level=1) - out = fluid.contrib.var_conv_2d(input=x, - row=row, - col=col, - input_channel=3, - output_channel=5, - filter_size=[3, 3], - stride=1) + out = fluid.contrib.var_conv_2d( + input=x, + row=row, + col=col, + input_channel=3, + output_channel=5, + filter_size=[3, 3], + stride=1, + ) place = fluid.CPUPlace() x_tensor = fluid.create_lod_tensor( - np.random.rand(116, 1).astype('float32'), [[60, 56]], place) + np.random.rand(116, 1).astype('float32'), [[60, 56]], place + ) row_tensor = fluid.create_lod_tensor( - np.random.rand(9, 6).astype('float32'), [[5, 4]], place) + np.random.rand(9, 6).astype('float32'), [[5, 4]], place + ) col_tensor = fluid.create_lod_tensor( - np.random.rand(13, 6).astype('float32'), [[6, 7]], place) + np.random.rand(13, 6).astype('float32'), [[6, 7]], place + ) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - ret = exe.run(feed={ - 'x': x_tensor, - 'row': row_tensor, - 'col': col_tensor - }, - fetch_list=[out], - return_numpy=False) + ret = exe.run( + feed={'x': x_tensor, 'row': row_tensor, 'col': col_tensor}, + fetch_list=[out], + return_numpy=False, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_var_info.py b/python/paddle/fluid/tests/unittests/test_var_info.py index 7af77ae96b55196650fc8b19c1f40b84df4c56dc..4bb6648488ee672974fcb4d53a630f500d3affb3 100644 --- a/python/paddle/fluid/tests/unittests/test_var_info.py +++ b/python/paddle/fluid/tests/unittests/test_var_info.py @@ -22,10 +22,10 @@ import unittest class TestVarInfo(unittest.TestCase): - """ TestCases for Dataset. """ + """TestCases for Dataset.""" def test_var_info(self): - """ Testcase for get and set info for variable. """ + """Testcase for get and set info for variable.""" value = np.random.randn(1) var = fluid.layers.create_global_var([1], value, "float32") var._set_info("name", "test") diff --git a/python/paddle/fluid/tests/unittests/test_variable.py b/python/paddle/fluid/tests/unittests/test_variable.py index 8ab9d14c924dbd0179a79d563a9adae3acb27fd9..bf9ee54afc7677c46f3d75e88dd63c56030e83bd 100644 --- a/python/paddle/fluid/tests/unittests/test_variable.py +++ b/python/paddle/fluid/tests/unittests/test_variable.py @@ -16,7 +16,11 @@ import unittest from functools import reduce import paddle -from paddle.fluid.framework import Program, convert_np_dtype_to_dtype_, default_main_program +from paddle.fluid.framework import ( + Program, + convert_np_dtype_to_dtype_, + default_main_program, +) import paddle import paddle.fluid as fluid import paddle.fluid.core as core @@ -26,7 +30,6 @@ paddle.enable_static() class TestVariable(unittest.TestCase): - def setUp(self): np.random.seed(2022) @@ -45,10 +48,9 @@ class TestVariable(unittest.TestCase): def test_var(self): b = default_main_program().current_block() - w = b.create_var(dtype="float64", - shape=[784, 100], - lod_level=0, - name="fc.w") + w = b.create_var( + dtype="float64", shape=[784, 100], lod_level=0, name="fc.w" + ) self.assertNotEqual(str(w), "") self.assertEqual(core.VarDesc.VarType.FP64, w.dtype) self.assertEqual((784, 100), w.shape) @@ -63,12 +65,15 @@ class TestVariable(unittest.TestCase): self.assertEqual("fc.w@GRAD", w.grad_name) self.assertEqual(0, w.lod_level) - self.assertRaises(ValueError, - lambda: b.create_var(name="fc.w", shape=(24, 100))) + self.assertRaises( + ValueError, lambda: b.create_var(name="fc.w", shape=(24, 100)) + ) - w = b.create_var(dtype=paddle.fluid.core.VarDesc.VarType.STRINGS, - shape=[1], - name="str_var") + w = b.create_var( + dtype=paddle.fluid.core.VarDesc.VarType.STRINGS, + shape=[1], + name="str_var", + ) self.assertEqual(None, w.lod_level) def test_element_size(self): @@ -103,8 +108,9 @@ class TestVariable(unittest.TestCase): def test_step_scopes(self): prog = Program() b = prog.current_block() - var = b.create_var(name='step_scopes', - type=core.VarDesc.VarType.STEP_SCOPES) + var = b.create_var( + name='step_scopes', type=core.VarDesc.VarType.STEP_SCOPES + ) self.assertEqual(core.VarDesc.VarType.STEP_SCOPES, var.type) def _test_slice(self, place): @@ -137,10 +143,13 @@ class TestVariable(unittest.TestCase): main = fluid.Program() with fluid.program_guard(main): exe = fluid.Executor(place) - tensor_array = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]], - [[10, 11, 12], [13, 14, 15], [16, 17, 18]], - [[19, 20, 21], [22, 23, 24], - [25, 26, 27]]]).astype('float32') + tensor_array = np.array( + [ + [[1, 2, 3], [4, 5, 6], [7, 8, 9]], + [[10, 11, 12], [13, 14, 15], [16, 17, 18]], + [[19, 20, 21], [22, 23, 24], [25, 26, 27]], + ] + ).astype('float32') var = fluid.layers.assign(tensor_array) var1 = var[0, 1, 1] var2 = var[1:] @@ -167,13 +176,28 @@ class TestVariable(unittest.TestCase): data.append((np.random.randint(10, size=[13]).astype('float32'))) exe.run(fluid.default_startup_program()) - local_out = exe.run(main, - feed=feeder.feed([data]), - fetch_list=[ - var, var1, var2, var3, var4, var5, var6, - var7, var8, var9, var10, var11, var12, - var13, var14, var15 - ]) + local_out = exe.run( + main, + feed=feeder.feed([data]), + fetch_list=[ + var, + var1, + var2, + var3, + var4, + var5, + var6, + var7, + var8, + var9, + var10, + var11, + var12, + var13, + var14, + var15, + ], + ) np.testing.assert_array_equal(local_out[1], tensor_array[0, 1, 1:2]) np.testing.assert_array_equal(local_out[2], tensor_array[1:]) @@ -181,25 +205,33 @@ class TestVariable(unittest.TestCase): np.testing.assert_array_equal(local_out[4], tensor_array[::-1]) np.testing.assert_array_equal(local_out[5], tensor_array[1, 1:, 1:]) np.testing.assert_array_equal( - local_out[6], - tensor_array.reshape((3, -1, 3))[:, :, -1]) + local_out[6], tensor_array.reshape((3, -1, 3))[:, :, -1] + ) np.testing.assert_array_equal(local_out[7], tensor_array[:, :, :-1]) - np.testing.assert_array_equal(local_out[8], - tensor_array[:1, :1, :1]) - np.testing.assert_array_equal(local_out[9], - tensor_array[:-1, :-1, :-1]) - np.testing.assert_array_equal(local_out[10], - tensor_array[::-1, :1, :-1]) - np.testing.assert_array_equal(local_out[11], tensor_array[:-1, ::-1, - -1:]) - np.testing.assert_array_equal(local_out[12], tensor_array[1:2, - 2:, ::-1]) - np.testing.assert_array_equal(local_out[13], tensor_array[2:10, 2:, - -2:-1]) - np.testing.assert_array_equal(local_out[14], - tensor_array[1:-1, 0:2, ::-1]) - np.testing.assert_array_equal(local_out[15], - tensor_array[::-1, ::-1, ::-1]) + np.testing.assert_array_equal( + local_out[8], tensor_array[:1, :1, :1] + ) + np.testing.assert_array_equal( + local_out[9], tensor_array[:-1, :-1, :-1] + ) + np.testing.assert_array_equal( + local_out[10], tensor_array[::-1, :1, :-1] + ) + np.testing.assert_array_equal( + local_out[11], tensor_array[:-1, ::-1, -1:] + ) + np.testing.assert_array_equal( + local_out[12], tensor_array[1:2, 2:, ::-1] + ) + np.testing.assert_array_equal( + local_out[13], tensor_array[2:10, 2:, -2:-1] + ) + np.testing.assert_array_equal( + local_out[14], tensor_array[1:-1, 0:2, ::-1] + ) + np.testing.assert_array_equal( + local_out[15], tensor_array[::-1, ::-1, ::-1] + ) def _test_slice_index_tensor(self, place): data = np.random.rand(2, 3).astype("float32") @@ -270,13 +302,18 @@ class TestVariable(unittest.TestCase): out7 = y[..., 0] exe = paddle.static.Executor(place) - result = exe.run(prog, - fetch_list=[out1, out2, out3, out4, out5, out6, out7]) + result = exe.run( + prog, fetch_list=[out1, out2, out3, out4, out5, out6, out7] + ) expected = [ - data[0:, ..., 1:], data[0:, ...], data[..., 1:], data[...], - data[[1, 0], [0, 0]], data[([1, 0], [0, 0])], - np.array([1]) + data[0:, ..., 1:], + data[0:, ...], + data[..., 1:], + data[...], + data[[1, 0], [0, 0]], + data[([1, 0], [0, 0])], + np.array([1]), ] self.assertTrue((result[0] == expected[0]).all()) @@ -314,11 +351,18 @@ class TestVariable(unittest.TestCase): exe = paddle.static.Executor(place) result = exe.run( - prog, fetch_list=[out0, out1, out2, out3, out4, out5, out6, out7]) + prog, fetch_list=[out0, out1, out2, out3, out4, out5, out6, out7] + ) expected = [ - data[idx0], data[idx1], data[idx2], data[idx3], data[idx4], - data[np_idx], data[data < 0.36], data[data > 0.6] + data[idx0], + data[idx1], + data[idx2], + data[idx3], + data[idx4], + data[np_idx], + data[data < 0.36], + data[data > 0.6], ] self.assertTrue((result[0] == expected[0]).all()) @@ -413,11 +457,13 @@ class TestVariable(unittest.TestCase): def test_create_selected_rows(self): b = default_main_program().current_block() - var = b.create_var(name="var", - shape=[1, 1], - dtype="float32", - type=fluid.core.VarDesc.VarType.SELECTED_ROWS, - persistable=True) + var = b.create_var( + name="var", + shape=[1, 1], + dtype="float32", + type=fluid.core.VarDesc.VarType.SELECTED_ROWS, + persistable=True, + ) def _test(): var.lod_level() @@ -452,17 +498,17 @@ class TestVariable(unittest.TestCase): scope = fluid.core.Scope() with paddle.static.scope_guard(scope): with paddle.static.program_guard(main, startup): - x = paddle.static.data(name='x', - shape=[3, 2, 1], - dtype='float32') + x = paddle.static.data( + name='x', shape=[3, 2, 1], dtype='float32' + ) x.persistable = True feed_data = np.ones(shape=[3, 2, 1], dtype=np.float32) detach_x = x.detach() exe = paddle.static.Executor(paddle.CPUPlace()) exe.run(startup) - result = exe.run(main, - feed={'x': feed_data}, - fetch_list=[x, detach_x]) + result = exe.run( + main, feed={'x': feed_data}, fetch_list=[x, detach_x] + ) self.assertTrue((result[1] == feed_data).all()) self.assertTrue((result[0] == result[1]).all()) @@ -472,9 +518,9 @@ class TestVariable(unittest.TestCase): self.assertTrue((result[1] == modified_value).all()) self.assertTrue((result[0] == result[1]).all()) - modified_value = np.random.uniform(-1, 1, - size=[3, 2, - 1]).astype('float32') + modified_value = np.random.uniform( + -1, 1, size=[3, 2, 1] + ).astype('float32') x.set_value(modified_value, scope) result = exe.run(main, fetch_list=[x, detach_x]) self.assertTrue((result[1] == modified_value).all()) @@ -482,7 +528,6 @@ class TestVariable(unittest.TestCase): class TestVariableSlice(unittest.TestCase): - def setUp(self): np.random.seed(2022) @@ -502,8 +547,11 @@ class TestVariableSlice(unittest.TestCase): result = exe.run(prog, fetch_list=outs) expected = [ - data[0:, None, 1:], data[0:, None], data[None, 1:], data[None], - data[..., None, :, None] + data[0:, None, 1:], + data[0:, None], + data[None, 1:], + data[None], + data[..., None, :, None], ] for i in range(len(outs)): self.assertEqual(outs[i].shape, expected[i].shape) @@ -525,8 +573,12 @@ class TestVariableSlice(unittest.TestCase): exe = paddle.static.Executor(place) result = exe.run(prog, fetch_list=outs) expected = [ - data[0, 1:, None], data[0, None], data[None, 1], data[None], - data[0, 0, 0, None], data[None, 0, 0, 0, None] + data[0, 1:, None], + data[0, None], + data[None, 1], + data[None], + data[0, 0, 0, None], + data[None, 0, 0, 0, None], ] for i in range(len(outs)): @@ -544,7 +596,6 @@ class TestVariableSlice(unittest.TestCase): class TestListIndex(unittest.TestCase): - def setUp(self): np.random.seed(2022) @@ -555,8 +606,9 @@ class TestListIndex(unittest.TestCase): paddle.enable_static() inps_shape = [3, 4, 5, 2] - array = np.arange(self.numel(inps_shape), - dtype='float32').reshape(inps_shape) + array = np.arange(self.numel(inps_shape), dtype='float32').reshape( + inps_shape + ) index_shape = [3, 3, 2, 1] index = np.arange(self.numel(index_shape)).reshape(index_shape) @@ -567,15 +619,17 @@ class TestListIndex(unittest.TestCase): index_mod = (index % (array.shape[0])).tolist() with paddle.static.program_guard(program): - x = paddle.static.data(name='x', - shape=array.shape, - dtype='float32') + x = paddle.static.data( + name='x', shape=array.shape, dtype='float32' + ) y = x[index_mod] - place = paddle.fluid.CPUPlace( - ) if not paddle.fluid.core.is_compiled_with_cuda( - ) else paddle.fluid.CUDAPlace(0) + place = ( + paddle.fluid.CPUPlace() + if not paddle.fluid.core.is_compiled_with_cuda() + else paddle.fluid.CUDAPlace(0) + ) prog = paddle.static.default_main_program() exe = paddle.static.Executor(place) @@ -584,9 +638,9 @@ class TestListIndex(unittest.TestCase): fetch_list = [y.name] getitem_np = array[index_mod] - getitem_pp = exe.run(prog, - feed={x.name: array}, - fetch_list=fetch_list) + getitem_pp = exe.run( + prog, feed={x.name: array}, fetch_list=fetch_list + ) np.testing.assert_array_equal(getitem_np, getitem_pp[0]) array = array[0] @@ -622,16 +676,21 @@ class TestListIndex(unittest.TestCase): def test_static_graph_list_index_muti_dim(self): paddle.enable_static() inps_shape = [3, 4, 5] - array = np.arange(self.numel(inps_shape), - dtype='float32').reshape(inps_shape) + array = np.arange(self.numel(inps_shape), dtype='float32').reshape( + inps_shape + ) index_shape = [2, 2] index1 = np.arange(self.numel(index_shape)).reshape(index_shape) index2 = np.arange(self.numel(index_shape)).reshape(index_shape) + 2 value_shape = [3, 2, 2, 3] - value_np = np.arange(self.numel(value_shape), - dtype='float32').reshape(value_shape) + 100 + value_np = ( + np.arange(self.numel(value_shape), dtype='float32').reshape( + value_shape + ) + + 100 + ) index_mod1 = (index1 % (min(array.shape))).tolist() index_mod2 = (index2 % (min(array.shape))).tolist() @@ -641,21 +700,23 @@ class TestListIndex(unittest.TestCase): x = paddle.static.data(name='x', shape=array.shape, dtype='float32') - value = paddle.static.data(name='value', - shape=value_np.shape, - dtype='float32') - index1 = paddle.static.data(name='index1', - shape=index1.shape, - dtype='int32') - index2 = paddle.static.data(name='index2', - shape=index2.shape, - dtype='int32') + value = paddle.static.data( + name='value', shape=value_np.shape, dtype='float32' + ) + index1 = paddle.static.data( + name='index1', shape=index1.shape, dtype='int32' + ) + index2 = paddle.static.data( + name='index2', shape=index2.shape, dtype='int32' + ) y = x[index1, index2] - place = paddle.fluid.CPUPlace( - ) if not paddle.fluid.core.is_compiled_with_cuda( - ) else paddle.fluid.CUDAPlace(0) + place = ( + paddle.fluid.CPUPlace() + if not paddle.fluid.core.is_compiled_with_cuda() + else paddle.fluid.CUDAPlace(0) + ) prog = paddle.static.default_main_program() exe = paddle.static.Executor(place) @@ -666,32 +727,40 @@ class TestListIndex(unittest.TestCase): y2 = array2[index_mod1, index_mod2] - getitem_pp = exe.run(prog, - feed={ - x.name: array, - index1.name: index_mod1, - index2.name: index_mod2 - }, - fetch_list=fetch_list) + getitem_pp = exe.run( + prog, + feed={ + x.name: array, + index1.name: index_mod1, + index2.name: index_mod2, + }, + fetch_list=fetch_list, + ) np.testing.assert_array_equal( y2, getitem_pp[0], - err_msg='\n numpy:{},\n paddle:{}'.format(y2, getitem_pp[0])) + err_msg='\n numpy:{},\n paddle:{}'.format(y2, getitem_pp[0]), + ) def test_dygraph_list_index_muti_dim(self): paddle.disable_static() inps_shape = [3, 4, 5] - array = np.arange(self.numel(inps_shape), - dtype='float32').reshape(inps_shape) + array = np.arange(self.numel(inps_shape), dtype='float32').reshape( + inps_shape + ) index_shape = [2, 2] index1 = np.arange(self.numel(index_shape)).reshape(index_shape) index2 = np.arange(self.numel(index_shape)).reshape(index_shape) + 2 value_shape = [3, 2, 2, 3] - value_np = np.arange(self.numel(value_shape), - dtype='float32').reshape(value_shape) + 100 + value_np = ( + np.arange(self.numel(value_shape), dtype='float32').reshape( + value_shape + ) + + 100 + ) index_mod1 = (index1 % (min(array.shape))).tolist() index_mod2 = (index2 % (min(array.shape))).tolist() @@ -721,16 +790,15 @@ class TestListIndex(unittest.TestCase): value_np = array2[index] except: with self.assertRaises(ValueError): - getitem_pp = exe.run(prog, - feed={x.name: array}, - fetch_list=fetch_list) + getitem_pp = exe.run( + prog, feed={x.name: array}, fetch_list=fetch_list + ) return getitem_pp = exe.run(prog, feed={x.name: array}, fetch_list=fetch_list) - np.testing.assert_allclose(value_np, - getitem_pp[0], - rtol=1e-5, - atol=1e-8) + np.testing.assert_allclose( + value_np, getitem_pp[0], rtol=1e-5, atol=1e-8 + ) def test_static_graph_getitem_bool_index(self): paddle.enable_static() @@ -762,9 +830,9 @@ class TestListIndex(unittest.TestCase): def run_setitem_list_index(self, array, index, value_np): x = paddle.static.data(name='x', shape=array.shape, dtype='float32') - value = paddle.static.data(name='value', - shape=value_np.shape, - dtype='float32') + value = paddle.static.data( + name='value', shape=value_np.shape, dtype='float32' + ) x[index] = value y = x @@ -781,19 +849,17 @@ class TestListIndex(unittest.TestCase): array2[index] = value_np except: with self.assertRaises(ValueError): - setitem_pp = exe.run(prog, - feed={ - x.name: array, - value.name: value_np - }, - fetch_list=fetch_list) + setitem_pp = exe.run( + prog, + feed={x.name: array, value.name: value_np}, + fetch_list=fetch_list, + ) return - setitem_pp = exe.run(prog, - feed={ - x.name: array, - value.name: value_np - }, - fetch_list=fetch_list) + setitem_pp = exe.run( + prog, + feed={x.name: array, value.name: value_np}, + fetch_list=fetch_list, + ) np.testing.assert_allclose(array2, setitem_pp[0], rtol=1e-5, atol=1e-8) @@ -801,15 +867,20 @@ class TestListIndex(unittest.TestCase): paddle.enable_static() # case 1: inps_shape = [3, 4, 5, 2, 3] - array = np.arange(self.numel(inps_shape), - dtype='float32').reshape(inps_shape) + array = np.arange(self.numel(inps_shape), dtype='float32').reshape( + inps_shape + ) index_shape = [3, 3, 1, 2] index = np.arange(self.numel(index_shape)).reshape(index_shape) value_shape = inps_shape[3:] - value_np = np.arange(self.numel(value_shape), - dtype='float32').reshape(value_shape) + 100 + value_np = ( + np.arange(self.numel(value_shape), dtype='float32').reshape( + value_shape + ) + + 100 + ) for _ in range(3): program = paddle.static.Program() @@ -824,15 +895,20 @@ class TestListIndex(unittest.TestCase): # case 2: inps_shape = [3, 4, 5, 4, 3] - array = np.arange(self.numel(inps_shape), - dtype='float32').reshape(inps_shape) + array = np.arange(self.numel(inps_shape), dtype='float32').reshape( + inps_shape + ) index_shape = [4, 3, 2, 2] index = np.arange(self.numel(index_shape)).reshape(index_shape) value_shape = [3] - value_np = np.arange(self.numel(value_shape), - dtype='float32').reshape(value_shape) + 100 + value_np = ( + np.arange(self.numel(value_shape), dtype='float32').reshape( + value_shape + ) + + 100 + ) for _ in range(4): program = paddle.static.Program() @@ -846,15 +922,20 @@ class TestListIndex(unittest.TestCase): # case 3: inps_shape = [3, 4, 5, 3, 3] - array = np.arange(self.numel(inps_shape), - dtype='float32').reshape(inps_shape) + array = np.arange(self.numel(inps_shape), dtype='float32').reshape( + inps_shape + ) index_shape = [4, 3, 2, 2] index = np.arange(self.numel(index_shape)).reshape(index_shape) value_shape = [3, 2, 2, 3] - value_np = np.arange(self.numel(value_shape), - dtype='float32').reshape(value_shape) + 100 + value_np = ( + np.arange(self.numel(value_shape), dtype='float32').reshape( + value_shape + ) + + 100 + ) index_mod = (index % (min(array.shape))).tolist() self.run_setitem_list_index(array, index_mod, value_np) @@ -897,18 +978,28 @@ class TestListIndex(unittest.TestCase): def test_static_graph_tensor_index_setitem_muti_dim(self): paddle.enable_static() inps_shape = [3, 4, 5, 4] - array = np.arange(self.numel(inps_shape), - dtype='float32').reshape(inps_shape) + array = np.arange(self.numel(inps_shape), dtype='float32').reshape( + inps_shape + ) index_shape = [2, 3, 4] - index1 = np.arange(self.numel(index_shape), - dtype='int32').reshape(index_shape) - index2 = np.arange(self.numel(index_shape), - dtype='int32').reshape(index_shape) + 2 + index1 = np.arange(self.numel(index_shape), dtype='int32').reshape( + index_shape + ) + index2 = ( + np.arange(self.numel(index_shape), dtype='int32').reshape( + index_shape + ) + + 2 + ) value_shape = [4] - value_np = np.arange(self.numel(value_shape), - dtype='float32').reshape(value_shape) + 100 + value_np = ( + np.arange(self.numel(value_shape), dtype='float32').reshape( + value_shape + ) + + 100 + ) for _ in range(3): index_mod1 = index1 % (min(array.shape)) @@ -922,29 +1013,31 @@ class TestListIndex(unittest.TestCase): program = paddle.static.Program() with paddle.static.program_guard(program): - x1 = paddle.static.data(name='x1', - shape=array.shape, - dtype='float32') - x2 = paddle.static.data(name='x2', - shape=array.shape, - dtype='float32') - - value = paddle.static.data(name='value', - shape=value_np.shape, - dtype='float32') - index_1 = paddle.static.data(name='index_1', - shape=index1.shape, - dtype='int32') - index_2 = paddle.static.data(name='index_2', - shape=index2.shape, - dtype='int32') + x1 = paddle.static.data( + name='x1', shape=array.shape, dtype='float32' + ) + x2 = paddle.static.data( + name='x2', shape=array.shape, dtype='float32' + ) + + value = paddle.static.data( + name='value', shape=value_np.shape, dtype='float32' + ) + index_1 = paddle.static.data( + name='index_1', shape=index1.shape, dtype='int32' + ) + index_2 = paddle.static.data( + name='index_2', shape=index2.shape, dtype='int32' + ) x1[index_1, index_2] = value x2[index_1] = value - place = paddle.fluid.CPUPlace( - ) if not paddle.fluid.core.is_compiled_with_cuda( - ) else paddle.fluid.CUDAPlace(0) + place = ( + paddle.fluid.CPUPlace() + if not paddle.fluid.core.is_compiled_with_cuda() + else paddle.fluid.CUDAPlace(0) + ) prog = paddle.static.default_main_program() exe = paddle.static.Executor(place) @@ -952,25 +1045,31 @@ class TestListIndex(unittest.TestCase): exe.run(paddle.static.default_startup_program()) fetch_list = [x1.name, x2.name] - setitem_pp = exe.run(prog, - feed={ - x1.name: array, - x2.name: array, - value.name: value_np, - index_1.name: index_mod1, - index_2.name: index_mod2 - }, - fetch_list=fetch_list) + setitem_pp = exe.run( + prog, + feed={ + x1.name: array, + x2.name: array, + value.name: value_np, + index_1.name: index_mod1, + index_2.name: index_mod2, + }, + fetch_list=fetch_list, + ) np.testing.assert_array_equal( array2, setitem_pp[0], err_msg='\n numpy:{},\n paddle:{}'.format( - array2, setitem_pp[0])) + array2, setitem_pp[0] + ), + ) np.testing.assert_array_equal( array3, setitem_pp[1], err_msg='\n numpy:{},\n paddle:{}'.format( - array3, setitem_pp[1])) + array3, setitem_pp[1] + ), + ) array = array[0] index1 = index1[0] index2 = index2[0] @@ -978,14 +1077,20 @@ class TestListIndex(unittest.TestCase): def test_static_graph_array_index_muti_dim(self): paddle.enable_static() inps_shape = [3, 4, 5, 4] - array = np.arange(self.numel(inps_shape), - dtype='float32').reshape(inps_shape) + array = np.arange(self.numel(inps_shape), dtype='float32').reshape( + inps_shape + ) index_shape = [2, 3, 4] - index1 = np.arange(self.numel(index_shape), - dtype='int32').reshape(index_shape) - index2 = np.arange(self.numel(index_shape), - dtype='int32').reshape(index_shape) + 2 + index1 = np.arange(self.numel(index_shape), dtype='int32').reshape( + index_shape + ) + index2 = ( + np.arange(self.numel(index_shape), dtype='int32').reshape( + index_shape + ) + + 2 + ) for _ in range(3): index_mod1 = index1 % (min(array.shape)) @@ -1001,53 +1106,62 @@ class TestListIndex(unittest.TestCase): program = paddle.static.Program() with paddle.static.program_guard(program): - x1 = paddle.static.data(name='x1', - shape=array.shape, - dtype='float32') - x2 = paddle.static.data(name='x2', - shape=array.shape, - dtype='float32') + x1 = paddle.static.data( + name='x1', shape=array.shape, dtype='float32' + ) + x2 = paddle.static.data( + name='x2', shape=array.shape, dtype='float32' + ) x1[index_mod1, index_mod2] = 1 x2[index_mod1] = 2.5 y1 = x1[index_mod2, index_mod1] y2 = x2[index_mod2] - place = paddle.fluid.CPUPlace( - ) if not paddle.fluid.core.is_compiled_with_cuda( - ) else paddle.fluid.CUDAPlace(0) + place = ( + paddle.fluid.CPUPlace() + if not paddle.fluid.core.is_compiled_with_cuda() + else paddle.fluid.CUDAPlace(0) + ) prog = paddle.static.default_main_program() exe = paddle.static.Executor(place) exe.run(paddle.static.default_startup_program()) fetch_list = [x1.name, x2.name, y1.name, y2.name] - setitem_pp = exe.run(prog, - feed={ - x1.name: array, - x2.name: array - }, - fetch_list=fetch_list) + setitem_pp = exe.run( + prog, + feed={x1.name: array, x2.name: array}, + fetch_list=fetch_list, + ) np.testing.assert_array_equal( array2, setitem_pp[0], err_msg='\n numpy:{},\n paddle:{}'.format( - array2, setitem_pp[0])) + array2, setitem_pp[0] + ), + ) np.testing.assert_array_equal( array3, setitem_pp[1], err_msg='\n numpy:{},\n paddle:{}'.format( - array3, setitem_pp[1])) + array3, setitem_pp[1] + ), + ) np.testing.assert_array_equal( y_np1, setitem_pp[2], err_msg='\n numpy:{},\n paddle:{}'.format( - y_np1, setitem_pp[2])) + y_np1, setitem_pp[2] + ), + ) np.testing.assert_array_equal( y_np2, setitem_pp[3], err_msg='\n numpy:{},\n paddle:{}'.format( - y_np2, setitem_pp[3])) + y_np2, setitem_pp[3] + ), + ) array = array[0] index1 = index1[0] index2 = index2[0] @@ -1055,13 +1169,19 @@ class TestListIndex(unittest.TestCase): def test_dygraph_array_index_muti_dim(self): paddle.disable_static() inps_shape = [3, 4, 5, 4] - array = np.arange(self.numel(inps_shape), - dtype='float32').reshape(inps_shape) + array = np.arange(self.numel(inps_shape), dtype='float32').reshape( + inps_shape + ) index_shape = [2, 3, 4] - index1 = np.arange(self.numel(index_shape), - dtype='int32').reshape(index_shape) - index2 = np.arange(self.numel(index_shape), - dtype='int32').reshape(index_shape) + 2 + index1 = np.arange(self.numel(index_shape), dtype='int32').reshape( + index_shape + ) + index2 = ( + np.arange(self.numel(index_shape), dtype='int32').reshape( + index_shape + ) + + 2 + ) for _ in range(3): @@ -1079,7 +1199,8 @@ class TestListIndex(unittest.TestCase): np.testing.assert_array_equal( y_t1.numpy(), y_np1, - err_msg='\n numpy:{},\n paddle:{}'.format(y_np1, y_t1.numpy())) + err_msg='\n numpy:{},\n paddle:{}'.format(y_np1, y_t1.numpy()), + ) # 1 dim getitem array2 = array.copy() y_np2 = array2[index_mod2] @@ -1089,7 +1210,8 @@ class TestListIndex(unittest.TestCase): np.testing.assert_array_equal( y_t2.numpy(), y_np2, - err_msg='\n numpy:{},\n paddle:{}'.format(y_np2, y_t2.numpy())) + err_msg='\n numpy:{},\n paddle:{}'.format(y_np2, y_t2.numpy()), + ) # 2 dim setitem array1 = array.copy() @@ -1099,7 +1221,9 @@ class TestListIndex(unittest.TestCase): tensor1.numpy(), array1, err_msg='\n numpy:{},\n paddle:{}'.format( - array1, tensor1.numpy())) + array1, tensor1.numpy() + ), + ) # 1 dim setitem array2 = array.copy() @@ -1111,7 +1235,9 @@ class TestListIndex(unittest.TestCase): tensor2.numpy(), array2, err_msg='\n numpy:{},\n paddle:{}'.format( - array2, tensor2.numpy())) + array2, tensor2.numpy() + ), + ) array = array[0] index1 = index1[0] diff --git a/python/paddle/fluid/tests/unittests/test_variance_layer.py b/python/paddle/fluid/tests/unittests/test_variance_layer.py index ca0ad7e9fd983b78835073a835adf127d353934c..fc97a20533bcb0e4590f0b71d9d39bb1b29f3e0c 100644 --- a/python/paddle/fluid/tests/unittests/test_variance_layer.py +++ b/python/paddle/fluid/tests/unittests/test_variance_layer.py @@ -20,14 +20,13 @@ import paddle def ref_var(x, axis=None, unbiased=True, keepdim=False): ddof = 1 if unbiased else 0 if isinstance(axis, int): - axis = (axis, ) + axis = (axis,) if axis is not None: axis = tuple(axis) return np.var(x, axis=axis, ddof=ddof, keepdims=keepdim) class TestVarAPI(unittest.TestCase): - def setUp(self): self.dtype = 'float64' self.shape = [1, 3, 4, 10] @@ -36,9 +35,11 @@ class TestVarAPI(unittest.TestCase): self.unbiased = True self.set_attrs() self.x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) - self.place=paddle.CUDAPlace(0) \ - if paddle.fluid.core.is_compiled_with_cuda() \ + self.place = ( + paddle.CUDAPlace(0) + if paddle.fluid.core.is_compiled_with_cuda() else paddle.CPUPlace() + ) def set_attrs(self): pass @@ -68,43 +69,36 @@ class TestVarAPI(unittest.TestCase): class TestVarAPI_dtype(TestVarAPI): - def set_attrs(self): self.dtype = 'float32' class TestVarAPI_axis_int(TestVarAPI): - def set_attrs(self): self.axis = 2 class TestVarAPI_axis_list(TestVarAPI): - def set_attrs(self): self.axis = [1, 2] class TestVarAPI_axis_tuple(TestVarAPI): - def set_attrs(self): self.axis = (1, 3) class TestVarAPI_keepdim(TestVarAPI): - def set_attrs(self): self.keepdim = False class TestVarAPI_unbiased(TestVarAPI): - def set_attrs(self): self.unbiased = False class TestVarAPI_alias(unittest.TestCase): - def test_alias(self): paddle.disable_static() x = paddle.to_tensor(np.array([10, 12], 'float32')) @@ -117,7 +111,6 @@ class TestVarAPI_alias(unittest.TestCase): class TestVarError(unittest.TestCase): - def test_error(self): with paddle.static.program_guard(paddle.static.Program()): x = paddle.fluid.data('X', [2, 3, 4], 'int32') diff --git a/python/paddle/fluid/tests/unittests/test_version.py b/python/paddle/fluid/tests/unittests/test_version.py index 0fda61f7bbeb0377cf73c792c8f0030fd5eee59c..507ec953764dff54cbae77506c9cec038c5d6368 100644 --- a/python/paddle/fluid/tests/unittests/test_version.py +++ b/python/paddle/fluid/tests/unittests/test_version.py @@ -19,7 +19,6 @@ import paddle.version as fluid_version class VersionTest(unittest.TestCase): - def setUp(self): self._major_regex = "[0-9]+" self._minor_regex = "[0-9]+" @@ -46,4 +45,5 @@ class VersionTest(unittest.TestCase): self.assertTrue(re.match(self._patch_regex, fluid_version.patch)) self.assertTrue(re.match(self._rc_regex, fluid_version.rc)) self.assertTrue( - re.match(self._version_regex, fluid_version.full_version)) + re.match(self._version_regex, fluid_version.full_version) + ) diff --git a/python/paddle/fluid/tests/unittests/test_view_op_reuse_allocation.py b/python/paddle/fluid/tests/unittests/test_view_op_reuse_allocation.py index 1f4725568a810d7b5fe37519a31c55aff58014e9..ab2c091cc0cb4227880cf4dc4b9e2ae3f1484ade 100644 --- a/python/paddle/fluid/tests/unittests/test_view_op_reuse_allocation.py +++ b/python/paddle/fluid/tests/unittests/test_view_op_reuse_allocation.py @@ -26,7 +26,6 @@ from paddle.fluid.framework import _test_eager_guard # reuse the input varbase's allocation. # View APIs include: `squeeze`, `unsqueeze`, `reshape`, `flatten`, `detach` class TestDygraphViewReuseAllocation(unittest.TestCase): - def setUp(self): self.init_shape() @@ -40,7 +39,7 @@ class TestDygraphViewReuseAllocation(unittest.TestCase): def func_test_view_api(self): var = paddle.rand(self.input_shape) view_var = self.view_api_processing(var) - view_var[0] = 2. + view_var[0] = 2.0 self.assertEqual(var.shape, self.input_shape) self.assertEqual(view_var.shape, self.output_shape) @@ -59,14 +58,14 @@ class TestDygraphViewReuseAllocation(unittest.TestCase): view_var = self.view_api_processing(var) self.assertEqual(view_var.inplace_version, 0) - var[0] = 2. + var[0] = 2.0 self.assertEqual(var.inplace_version, 1) self.assertEqual(view_var.inplace_version, 1) view_var_2 = self.view_api_processing(var) self.assertEqual(view_var_2.inplace_version, 1) - var[0] = 3. + var[0] = 3.0 self.assertEqual(view_var.inplace_version, 2) self.assertEqual(view_var_2.inplace_version, 2) @@ -87,13 +86,15 @@ class TestDygraphViewReuseAllocation(unittest.TestCase): # Here, the gradient computation will use the value of var_b var_c = var_b**2 view_var_b = self.view_api_processing(var_b) - view_var_b[0] = 2. # var_b is modified inplace + view_var_b[0] = 2.0 # var_b is modified inplace loss = paddle.nn.functional.relu(var_c) with self.assertRaisesRegexp( - RuntimeError, - "received tensor_version:{} != wrapper_version_snapshot:{}". - format(1, 0)): + RuntimeError, + "received tensor_version:{} != wrapper_version_snapshot:{}".format( + 1, 0 + ), + ): loss.backward() def test_backward_error(self): @@ -103,7 +104,6 @@ class TestDygraphViewReuseAllocation(unittest.TestCase): class TestUnsqueezeDygraphViewReuseAllocation(TestDygraphViewReuseAllocation): - def init_shape(self): self.input_shape = [2, 3] self.output_shape = [2, 3, 1] @@ -113,7 +113,6 @@ class TestUnsqueezeDygraphViewReuseAllocation(TestDygraphViewReuseAllocation): class TestReshapeDygraphViewReuseAllocation(TestDygraphViewReuseAllocation): - def init_shape(self): self.input_shape = [3, 4] self.output_shape = [2, 2, 3] @@ -123,7 +122,6 @@ class TestReshapeDygraphViewReuseAllocation(TestDygraphViewReuseAllocation): class TestFlattenDygraphViewReuseAllocation(TestDygraphViewReuseAllocation): - def init_shape(self): self.input_shape = [3, 4] self.output_shape = [12] diff --git a/python/paddle/fluid/tests/unittests/test_viterbi_decode_op.py b/python/paddle/fluid/tests/unittests/test_viterbi_decode_op.py index 7b789577771548e0340dd5e8a48453c1cdbfcd39..624f2f5e6165b646d19f83a8ef3bc3b979b34f6d 100644 --- a/python/paddle/fluid/tests/unittests/test_viterbi_decode_op.py +++ b/python/paddle/fluid/tests/unittests/test_viterbi_decode_op.py @@ -19,7 +19,6 @@ paddle.enable_static() class Decoder(object): - def __init__(self, transitions, use_tag=True): self.transitions = transitions self.use_tag = use_tag @@ -33,8 +32,11 @@ class Decoder(object): left_length = np.array(length) max_seq_len = np.amax(left_length) left_length = np.expand_dims(left_length, 1) - alpha = np.full((bs, n_label), -1e4, dtype='float32') if self.use_tag \ + alpha = ( + np.full((bs, n_label), -1e4, dtype='float32') + if self.use_tag else np.zeros((bs, n_label), dtype='float32') + ) alpha[:, -1] = 0 for i, logit in enumerate(inputs_t[:max_seq_len]): if i == 0 and not self.use_tag: @@ -46,7 +48,7 @@ class Decoder(object): max_res = np.amax(alpha_trn_sum, 1), np.argmax(alpha_trn_sum, 1) historys = historys + [max_res[1]] if i >= 1 else [] alpha_nxt = max_res[0] + logit - mask = (left_length > 0) + mask = left_length > 0 alpha = mask * alpha_nxt + (1 - mask) * alpha if self.use_tag: alpha += (left_length == 1) * trans_exp[:, self.stop_idx] @@ -60,7 +62,7 @@ class Decoder(object): left_length = left_length + 1 gather_idx = batch_offset + last_ids last_ids_update = np.take(hist, gather_idx) * (left_length > 0) - mask = (left_length == 0) + mask = left_length == 0 last_ids_update = last_ids_update * (1 - mask) + last_ids * mask batch_path.insert(0, last_ids_update) last_ids = last_ids_update + (left_length < 0) * last_ids @@ -69,7 +71,6 @@ class Decoder(object): class TestViterbiOp(OpTest): - def set_attr(self): self.dtype = "float32" if core.is_compiled_with_rocm() else "float64" self.use_tag = True @@ -88,7 +89,7 @@ class TestViterbiOp(OpTest): self.inputs = { 'Input': self.input, 'Transition': self.trans, - 'Length': self.length + 'Length': self.length, } self.attrs = { 'include_bos_eos_tag': self.use_tag, @@ -100,12 +101,14 @@ class TestViterbiOp(OpTest): class TestViterbiAPI(unittest.TestCase): - def set_attr(self): self.use_tag = True self.bz, self.len, self.ntags = 4, 8, 10 - self.places = [fluid.CPUPlace(), fluid.CUDAPlace(0)] \ - if core.is_compiled_with_cuda() else [fluid.CPUPlace()] + self.places = ( + [fluid.CPUPlace(), fluid.CUDAPlace(0)] + if core.is_compiled_with_cuda() + else [fluid.CPUPlace()] + ) def setUp(self): self.set_attr() @@ -119,12 +122,12 @@ class TestViterbiAPI(unittest.TestCase): def check_static_result(self, place): bz, length, ntags = self.bz, self.len, self.ntags with fluid.program_guard(fluid.Program(), fluid.Program()): - Input = fluid.data(name="Input", - shape=[bz, length, ntags], - dtype="float32") - Transition = fluid.data(name="Transition", - shape=[ntags, ntags], - dtype="float32") + Input = fluid.data( + name="Input", shape=[bz, length, ntags], dtype="float32" + ) + Transition = fluid.data( + name="Transition", shape=[ntags, ntags], dtype="float32" + ) Length = fluid.data(name="Length", shape=[bz], dtype="int64") decoder = paddle.text.ViterbiDecoder(Transition, self.use_tag) score, path = decoder(Input, Length) @@ -132,7 +135,7 @@ class TestViterbiAPI(unittest.TestCase): feed_list = { "Input": self.input, "Transition": self.transitions, - "Length": self.length + "Length": self.length, } fetches = exe.run(feed=feed_list, fetch_list=[score, path]) np.testing.assert_allclose(fetches[0], self.scores, rtol=1e-5) diff --git a/python/paddle/fluid/tests/unittests/test_warpctc_op.py b/python/paddle/fluid/tests/unittests/test_warpctc_op.py index 81dce5b1a8692d04f5ed2bf86c77927bcf71c84c..91bebab2f6c346198c995990e05d26b46ce2106e 100644 --- a/python/paddle/fluid/tests/unittests/test_warpctc_op.py +++ b/python/paddle/fluid/tests/unittests/test_warpctc_op.py @@ -29,9 +29,17 @@ CUDA_BLOCK_SIZE = 32 class CTCForward(object): - - def __init__(self, softmax, softmax_lod, labels, labels_lod, num_classes, - batch_size, blank, norm_by_times): + def __init__( + self, + softmax, + softmax_lod, + labels, + labels_lod, + num_classes, + batch_size, + blank, + norm_by_times, + ): self.softmax = softmax self.softmax_lod = softmax_lod self.labels = labels @@ -114,15 +122,17 @@ class CTCForward(object): # calculate the forward and backward variables, # reference Chapter 7.3 of "Alex Grave, Supervised Sequence # Labelling with Recurrent Neural Networks" - log_acts = np.zeros([total_times, self.num_classes], - dtype=softmax_a_sequence.dtype) + log_acts = np.zeros( + [total_times, self.num_classes], dtype=softmax_a_sequence.dtype + ) for i in range(total_times): for j in range(self.num_classes): log_acts[i, j] = self.safe_log(softmax_a_sequence[i, j]) # calculate the forward variables - forward_vars = np.zeros([total_times, total_segments], - dtype=softmax_a_sequence.dtype) + forward_vars = np.zeros( + [total_times, total_segments], dtype=softmax_a_sequence.dtype + ) for i in range(total_times): for j in range(total_segments): forward_vars[i, j] = self.LOG_ZERO @@ -142,10 +152,13 @@ class CTCForward(object): if j & 1 == 1: label_idx = j // 2 label_val = labels_a_sequence[label_idx, 0] - fv = self.log_add(forward_vars[i - 1, j], - forward_vars[i - 1, j - 1]) - if j > 1 and label_val != labels_a_sequence[label_idx - 1, - 0]: + fv = self.log_add( + forward_vars[i - 1, j], forward_vars[i - 1, j - 1] + ) + if ( + j > 1 + and label_val != labels_a_sequence[label_idx - 1, 0] + ): fv = self.log_add(fv, forward_vars[i - 1, j - 2]) fv = self.log_mul(fv, log_acts[i, label_val]) else: @@ -159,7 +172,8 @@ class CTCForward(object): log_prob = forward_vars[total_times - 1, total_segments - 1] if total_segments > 1: log_prob = self.log_add( - log_prob, forward_vars[total_times - 1, total_segments - 2]) + log_prob, forward_vars[total_times - 1, total_segments - 2] + ) return -log_prob @@ -174,33 +188,38 @@ class CTCForward(object): labels_end_i = labels_offset + self.labels_lod[self.level][i] softmax_a_sequence = self.softmax[ - softmax_start_i:softmax_end_i, :] + softmax_start_i:softmax_end_i, : + ] labels_a_sequence = self.labels[labels_start_i:labels_end_i, :] - self.loss[i] = self.forward_a_sequence(softmax_a_sequence, - labels_a_sequence) + self.loss[i] = self.forward_a_sequence( + softmax_a_sequence, labels_a_sequence + ) softmax_offset += self.softmax_lod[self.level][i] labels_offset += self.labels_lod[self.level][i] else: - softmax_a_sequence = self.softmax[:self.softmax_lod[i], i, :] - labels_a_sequence = self.labels[:self.labels_lod[i], :] - self.loss[i] = self.forward_a_sequence(softmax_a_sequence, - labels_a_sequence) + softmax_a_sequence = self.softmax[: self.softmax_lod[i], i, :] + labels_a_sequence = self.labels[: self.labels_lod[i], :] + self.loss[i] = self.forward_a_sequence( + softmax_a_sequence, labels_a_sequence + ) return self.loss -def python_api(logits, - label, - logits_length=None, - labels_length=None, - blank=0, - norm_by_times=False): - return paddle.fluid.layers.warpctc(logits, label, blank, norm_by_times, - logits_length, labels_length) +def python_api( + logits, + label, + logits_length=None, + labels_length=None, + blank=0, + norm_by_times=False, +): + return paddle.fluid.layers.warpctc( + logits, label, blank, norm_by_times, logits_length, labels_length + ) class TestWarpCTCOp(OpTest): - def config(self): self.batch_size = 4 self.num_classes = 12 @@ -214,31 +233,39 @@ class TestWarpCTCOp(OpTest): self.config() logits = np.random.uniform( - 0.1, 1.0, - [sum(self.logits_lod[0]), self.num_classes]).astype("float32") + 0.1, 1.0, [sum(self.logits_lod[0]), self.num_classes] + ).astype("float32") softmax = np.apply_along_axis(stable_softmax, 1, logits) # labels should not be blank - labels = np.random.randint(0, - self.num_classes - 1, - [sum(self.labels_lod[0]), 1], - dtype="int32") - - ctc = CTCForward(softmax, self.logits_lod, labels, self.labels_lod, - self.num_classes, self.batch_size, self.blank, - self.norm_by_times) + labels = np.random.randint( + 0, self.num_classes - 1, [sum(self.labels_lod[0]), 1], dtype="int32" + ) + + ctc = CTCForward( + softmax, + self.logits_lod, + labels, + self.labels_lod, + self.num_classes, + self.batch_size, + self.blank, + self.norm_by_times, + ) loss = ctc.forward() max_sequence_length = 0 for i in range(self.batch_size): - max_sequence_length = max(max_sequence_length, - self.logits_lod[0][i]) + max_sequence_length = max( + max_sequence_length, self.logits_lod[0][i] + ) self.gradient = np.zeros( [max_sequence_length, self.batch_size, self.num_classes], - dtype=logits.dtype) + dtype=logits.dtype, + ) self.inputs = { "Logits": (logits, self.logits_lod), - "Label": (labels, self.labels_lod) + "Label": (labels, self.labels_lod), } self.outputs = {"Loss": loss} self.attrs = { @@ -252,19 +279,22 @@ class TestWarpCTCOp(OpTest): def test_check_grad(self): self.outputs['WarpCTCGrad'] = self.gradient if core.is_compiled_with_rocm(): - self.check_grad(["Logits"], - "Loss", - max_relative_error=0.009, - check_dygraph=False) + self.check_grad( + ["Logits"], + "Loss", + max_relative_error=0.009, + check_dygraph=False, + ) else: - self.check_grad(["Logits"], - "Loss", - max_relative_error=0.007, - check_dygraph=False) + self.check_grad( + ["Logits"], + "Loss", + max_relative_error=0.007, + check_dygraph=False, + ) class TestWarpCTCOpCase1(TestWarpCTCOp): - def config(self): self.batch_size = 4 self.num_classes = CUDA_BLOCK_SIZE + 2 @@ -275,7 +305,6 @@ class TestWarpCTCOpCase1(TestWarpCTCOp): class TestWarpCTCOpWithPadding(OpTest): - def config(self): self.batch_size = 4 self.num_classes = 8 @@ -293,28 +322,36 @@ class TestWarpCTCOpWithPadding(OpTest): self.config() logits = np.random.uniform( - 0.1, 1.0, - [sum(self.logits_length), self.num_classes]).astype("float32") + 0.1, 1.0, [sum(self.logits_length), self.num_classes] + ).astype("float32") softmax = np.apply_along_axis(stable_softmax, 1, logits) # labels should not be blank - labels = np.random.randint(0, - self.num_classes - 1, - [sum(self.labels_length), 1], - dtype="int32") - - ctc = CTCForward(softmax, self.logits_lod, labels, self.labels_lod, - self.num_classes, self.batch_size, self.blank, - self.norm_by_times) + labels = np.random.randint( + 0, self.num_classes - 1, [sum(self.labels_length), 1], dtype="int32" + ) + + ctc = CTCForward( + softmax, + self.logits_lod, + labels, + self.labels_lod, + self.num_classes, + self.batch_size, + self.blank, + self.norm_by_times, + ) loss = ctc.forward() max_sequence_length = 0 for i in range(self.batch_size): - max_sequence_length = max(max_sequence_length, - self.logits_length[i]) + max_sequence_length = max( + max_sequence_length, self.logits_length[i] + ) # reshape logits to T*N*S new_logits = np.zeros( [max_sequence_length, self.batch_size, self.num_classes], - dtype=logits.dtype) + dtype=logits.dtype, + ) cur = 0 for batch_id in range(self.batch_size): @@ -326,10 +363,12 @@ class TestWarpCTCOpWithPadding(OpTest): # reshape labels to N*S max_target_seq_length = 0 for i in range(self.batch_size): - max_target_seq_length = max(max_target_seq_length, - self.labels_length[i]) - new_labels = np.zeros([self.batch_size, max_target_seq_length], - dtype="int32") + max_target_seq_length = max( + max_target_seq_length, self.labels_length[i] + ) + new_labels = np.zeros( + [self.batch_size, max_target_seq_length], dtype="int32" + ) cur = 0 for batch_id in range(self.batch_size): @@ -339,13 +378,14 @@ class TestWarpCTCOpWithPadding(OpTest): self.gradient = np.zeros( [max_sequence_length, self.batch_size, self.num_classes], - dtype=logits.dtype) + dtype=logits.dtype, + ) self.inputs = { "Logits": new_logits, "Label": new_labels, "LogitsLength": self.logits_length, - "LabelLength": self.labels_length + "LabelLength": self.labels_length, } self.outputs = {"Loss": loss} self.attrs = { @@ -359,19 +399,22 @@ class TestWarpCTCOpWithPadding(OpTest): def test_check_grad(self): self.outputs['WarpCTCGrad'] = self.gradient if core.is_compiled_with_rocm(): - self.check_grad(["Logits"], - "Loss", - max_relative_error=0.009, - check_dygraph=False) + self.check_grad( + ["Logits"], + "Loss", + max_relative_error=0.009, + check_dygraph=False, + ) else: - self.check_grad(["Logits"], - "Loss", - max_relative_error=0.007, - check_dygraph=False) + self.check_grad( + ["Logits"], + "Loss", + max_relative_error=0.007, + check_dygraph=False, + ) class TestWarpCTCOpWithPaddingCase1(TestWarpCTCOpWithPadding): - def config(self): self.batch_size = 4 self.num_classes = CUDA_BLOCK_SIZE + 2 @@ -384,7 +427,6 @@ class TestWarpCTCOpWithPaddingCase1(TestWarpCTCOpWithPadding): class TestWarpCTCOpFp64(OpTest): - def config(self): self.batch_size = 4 self.num_classes = 8 @@ -402,28 +444,36 @@ class TestWarpCTCOpFp64(OpTest): self.config() logits = np.random.uniform( - 0.1, 1.0, - [sum(self.logits_length), self.num_classes]).astype("float64") + 0.1, 1.0, [sum(self.logits_length), self.num_classes] + ).astype("float64") softmax = np.apply_along_axis(stable_softmax, 1, logits) # labels should not be blank - labels = np.random.randint(0, - self.num_classes - 1, - [sum(self.labels_length), 1], - dtype="int32") - - ctc = CTCForward(softmax, self.logits_lod, labels, self.labels_lod, - self.num_classes, self.batch_size, self.blank, - self.norm_by_times) + labels = np.random.randint( + 0, self.num_classes - 1, [sum(self.labels_length), 1], dtype="int32" + ) + + ctc = CTCForward( + softmax, + self.logits_lod, + labels, + self.labels_lod, + self.num_classes, + self.batch_size, + self.blank, + self.norm_by_times, + ) loss = ctc.forward() max_sequence_length = 0 for i in range(self.batch_size): - max_sequence_length = max(max_sequence_length, - self.logits_length[i]) + max_sequence_length = max( + max_sequence_length, self.logits_length[i] + ) # reshape logits to T*N*S new_logits = np.zeros( [max_sequence_length, self.batch_size, self.num_classes], - dtype=logits.dtype) + dtype=logits.dtype, + ) cur = 0 for batch_id in range(self.batch_size): @@ -435,10 +485,12 @@ class TestWarpCTCOpFp64(OpTest): # reshape labels to N*S max_target_seq_length = 0 for i in range(self.batch_size): - max_target_seq_length = max(max_target_seq_length, - self.labels_length[i]) - new_labels = np.zeros([self.batch_size, max_target_seq_length], - dtype="int32") + max_target_seq_length = max( + max_target_seq_length, self.labels_length[i] + ) + new_labels = np.zeros( + [self.batch_size, max_target_seq_length], dtype="int32" + ) cur = 0 for batch_id in range(self.batch_size): @@ -448,13 +500,14 @@ class TestWarpCTCOpFp64(OpTest): self.gradient = np.zeros( [max_sequence_length, self.batch_size, self.num_classes], - dtype=logits.dtype) + dtype=logits.dtype, + ) self.inputs = { "Logits": new_logits, "Label": new_labels, "LogitsLength": self.logits_length, - "LabelLength": self.labels_length + "LabelLength": self.labels_length, } self.outputs = {"Loss": loss} self.attrs = { @@ -471,58 +524,64 @@ class TestWarpCTCOpFp64(OpTest): class TestWarpCTCOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): - logits = fluid.data(name='logits', - shape=[5, 16, 6], - dtype='float32') - logits_length = fluid.data(name='logits_length', - shape=[None], - dtype='int64') + logits = fluid.data( + name='logits', shape=[5, 16, 6], dtype='float32' + ) + logits_length = fluid.data( + name='logits_length', shape=[None], dtype='int64' + ) label = fluid.data(name='label', shape=[16, 3], dtype='int32') - label_length = fluid.data(name='labels_length', - shape=[None], - dtype='int64') + label_length = fluid.data( + name='labels_length', shape=[None], dtype='int64' + ) def test_logits_Variable(): logits_data = np.random.rand(5, 16, 6).astype(logits.dtype) - fluid.layers.warpctc(input=logits_data, - label=label, - input_length=logits_length, - label_length=label_length) + fluid.layers.warpctc( + input=logits_data, + label=label, + input_length=logits_length, + label_length=label_length, + ) self.assertRaises(TypeError, test_logits_Variable) def test_label_Variable(): label_data = np.random.randint(0, 5, [5, 1]).astype("int32") - fluid.layers.warpctc(input=logits, - label=label_data, - input_length=logits_length, - label_length=label_length) + fluid.layers.warpctc( + input=logits, + label=label_data, + input_length=logits_length, + label_length=label_length, + ) self.assertRaises(TypeError, test_label_Variable) def test_logits_len_Variable(): logits_length_data = np.array([5] * 16).astype("int64") - fluid.layers.warpctc(input=logits, - label=label, - input_length=logits_length_data, - label_length=label_length) + fluid.layers.warpctc( + input=logits, + label=label, + input_length=logits_length_data, + label_length=label_length, + ) self.assertRaises(TypeError, test_logits_len_Variable) def test_label_len_Variable(): label_length_data = np.array([3] * 16).astype("int64") - fluid.layers.warpctc(input=logits, - label=label, - input_length=logits_length, - label_length=label_length_data) + fluid.layers.warpctc( + input=logits, + label=label, + input_length=logits_length, + label_length=label_length_data, + ) self.assertRaises(TypeError, test_label_len_Variable) def test_dygraph_errors(self): - def test_dygraph_with_lod(): logits = np.random.uniform(0.1, 1.0, [20, 15]).astype("float32") @@ -539,7 +598,6 @@ class TestWarpCTCOpError(unittest.TestCase): class TestCTCLossAPICase(unittest.TestCase): - def test_functinal_api(self): self.batch_size = 4 self.num_classes = CUDA_BLOCK_SIZE + 2 @@ -549,20 +607,29 @@ class TestCTCLossAPICase(unittest.TestCase): self.norm_by_times = False logits = np.random.uniform( - 0.1, 1.0, - [max(self.logits_length), self.batch_size, self.num_classes - ]).astype("float32") + 0.1, + 1.0, + [max(self.logits_length), self.batch_size, self.num_classes], + ).astype("float32") softmax = np.apply_along_axis(stable_softmax, -1, logits) # labels should not be blank labels = np.random.randint( 0, self.num_classes - 1, [self.batch_size, max(self.labels_length)], - dtype="int32") - - ctc = CTCForward(softmax, self.logits_length, labels, - self.labels_length, self.num_classes, self.batch_size, - self.blank, self.norm_by_times) + dtype="int32", + ) + + ctc = CTCForward( + softmax, + self.logits_length, + labels, + self.labels_length, + self.num_classes, + self.batch_size, + self.blank, + self.norm_by_times, + ) loss_np = ctc.forward() paddle.disable_static() @@ -570,30 +637,33 @@ class TestCTCLossAPICase(unittest.TestCase): labels = paddle.to_tensor(labels) logits_length = paddle.to_tensor(self.logits_length) labels_length = paddle.to_tensor(self.labels_length) - loss_pd_mean = F.ctc_loss(softmax, - labels, - logits_length, - labels_length, - blank=self.blank, - reduction='mean') + loss_pd_mean = F.ctc_loss( + softmax, + labels, + logits_length, + labels_length, + blank=self.blank, + reduction='mean', + ) loss_pd_mean = loss_pd_mean.numpy() - loss_pd_sum = F.ctc_loss(softmax, - labels, - logits_length, - labels_length, - blank=self.blank, - reduction='sum') + loss_pd_sum = F.ctc_loss( + softmax, + labels, + logits_length, + labels_length, + blank=self.blank, + reduction='sum', + ) loss_pd_sum = loss_pd_sum.numpy() paddle.enable_static() loss_np = np.squeeze(loss_np, axis=-1) loss_np_mean = (loss_np / labels_length.numpy()).mean() loss_np_sum = loss_np.sum() - np.testing.assert_allclose(loss_pd_mean, - loss_np_mean, - rtol=1e-05, - atol=1) + np.testing.assert_allclose( + loss_pd_mean, loss_np_mean, rtol=1e-05, atol=1 + ) np.testing.assert_allclose(loss_pd_sum, loss_np_sum, rtol=1e-05, atol=1) def test_class_api(self): @@ -605,20 +675,29 @@ class TestCTCLossAPICase(unittest.TestCase): self.norm_by_times = False logits = np.random.uniform( - 0.1, 1.0, - [max(self.logits_length), self.batch_size, self.num_classes - ]).astype("float32") + 0.1, + 1.0, + [max(self.logits_length), self.batch_size, self.num_classes], + ).astype("float32") softmax = np.apply_along_axis(stable_softmax, -1, logits) # labels should not be blank labels = np.random.randint( 1, self.num_classes, [self.batch_size, max(self.labels_length)], - dtype="int32") - - ctc = CTCForward(softmax, self.logits_length, labels, - self.labels_length, self.num_classes, self.batch_size, - self.blank, self.norm_by_times) + dtype="int32", + ) + + ctc = CTCForward( + softmax, + self.logits_length, + labels, + self.labels_length, + self.num_classes, + self.batch_size, + self.blank, + self.norm_by_times, + ) loss_np = ctc.forward() paddle.disable_static() @@ -627,9 +706,9 @@ class TestCTCLossAPICase(unittest.TestCase): logits_length = paddle.to_tensor(self.logits_length) labels_length = paddle.to_tensor(self.labels_length) - loss_pd = paddle.nn.CTCLoss(self.blank, - 'none')(softmax, labels, logits_length, - labels_length) + loss_pd = paddle.nn.CTCLoss(self.blank, 'none')( + softmax, labels, logits_length, labels_length + ) loss_pd = loss_pd.numpy() paddle.enable_static() loss_np = np.squeeze(loss_np, axis=-1) diff --git a/python/paddle/fluid/tests/unittests/test_weight_decay.py b/python/paddle/fluid/tests/unittests/test_weight_decay.py index 1b099ab25a3473e3cee18265ef350b2b71619fad..5133a203755937ebc79104c89caa800dca78f9b0 100644 --- a/python/paddle/fluid/tests/unittests/test_weight_decay.py +++ b/python/paddle/fluid/tests/unittests/test_weight_decay.py @@ -40,22 +40,24 @@ def prog_scope_guard(main_prog, startup_prog): yield -def bow_net(data, - label, - dict_dim, - is_sparse=False, - emb_dim=128, - hid_dim=128, - hid_dim2=96, - class_dim=2): +def bow_net( + data, + label, + dict_dim, + is_sparse=False, + emb_dim=128, + hid_dim=128, + hid_dim2=96, + class_dim=2, +): """ BOW net This model is from https://github.com/PaddlePaddle/models: fluid/PaddleNLP/text_classification/nets.py """ - emb = fluid.layers.embedding(input=data, - is_sparse=is_sparse, - size=[dict_dim, emb_dim]) + emb = fluid.layers.embedding( + input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim] + ) bow = fluid.layers.sequence_pool(input=emb, pool_type='sum') bow_tanh = fluid.layers.tanh(bow) fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh") @@ -68,13 +70,13 @@ def bow_net(data, class TestWeightDecay(unittest.TestCase): - def setUp(self): self.word_dict = paddle.dataset.imdb.word_dict() - reader = paddle.batch(paddle.dataset.imdb.train(self.word_dict), - batch_size=4)() + reader = paddle.batch( + paddle.dataset.imdb.train(self.word_dict), batch_size=4 + )() self.train_data = [next(reader) for _ in range(5)] - self.learning_rate = .5 + self.learning_rate = 0.5 def run_executor(self, place, feed_list, loss): exe = fluid.Executor(place) @@ -83,22 +85,24 @@ class TestWeightDecay(unittest.TestCase): main_prog = fluid.default_main_program() loss_set = [] for data in self.train_data: - out = exe.run(main_prog, - feed=feeder.feed(data), - fetch_list=[loss.name]) + out = exe.run( + main_prog, feed=feeder.feed(data), fetch_list=[loss.name] + ) print("loss %s" % (np.average(out))) loss_set.append(np.average(out)) return loss_set - def run_parallel_exe(self, - place, - feed_list, - loss, - use_reduce=False, - use_fast_executor=False, - use_ir_memory_optimize=False): + def run_parallel_exe( + self, + place, + feed_list, + loss, + use_reduce=False, + use_fast_executor=False, + use_ir_memory_optimize=False, + ): exe = fluid.Executor(place) feeder = fluid.DataFeeder(feed_list=feed_list, place=place) exe.run(fluid.default_startup_program()) @@ -108,57 +112,63 @@ class TestWeightDecay(unittest.TestCase): exec_strategy.use_experimental_executor = True build_strategy = fluid.BuildStrategy() - build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce \ - if use_reduce else fluid.BuildStrategy.ReduceStrategy.AllReduce + build_strategy.reduce_strategy = ( + fluid.BuildStrategy.ReduceStrategy.Reduce + if use_reduce + else fluid.BuildStrategy.ReduceStrategy.AllReduce + ) build_strategy.memory_optimize = use_ir_memory_optimize train_cp = compiler.CompiledProgram( - fluid.default_main_program()).with_data_parallel( - loss_name=loss.name, - exec_strategy=exec_strategy, - build_strategy=build_strategy) + fluid.default_main_program() + ).with_data_parallel( + loss_name=loss.name, + exec_strategy=exec_strategy, + build_strategy=build_strategy, + ) loss_set = [] for data in self.train_data: - out = exe.run(train_cp, - feed=feeder.feed(data), - fetch_list=[loss.name]) + out = exe.run( + train_cp, feed=feeder.feed(data), fetch_list=[loss.name] + ) loss_set.append(np.average(out)) return loss_set - def check_weight_decay(self, - place, - model, - use_parallel_exe=False, - use_reduce=False): + def check_weight_decay( + self, place, model, use_parallel_exe=False, use_reduce=False + ): main_prog = fluid.framework.Program() startup_prog = fluid.framework.Program() startup_prog.random_seed = 1 with prog_scope_guard(main_prog=main_prog, startup_prog=startup_prog): - data = fluid.layers.data(name="words", - shape=[1], - dtype="int64", - lod_level=1) + data = fluid.layers.data( + name="words", shape=[1], dtype="int64", lod_level=1 + ) label = fluid.layers.data(name="label", shape=[1], dtype="int64") avg_cost = model(data, label, len(self.word_dict)) - param_list = [(var, var * self.learning_rate) - for var in main_prog.block(0).all_parameters()] + param_list = [ + (var, var * self.learning_rate) + for var in main_prog.block(0).all_parameters() + ] optimizer = fluid.optimizer.Adagrad( - learning_rate=self.learning_rate) + learning_rate=self.learning_rate + ) optimizer.minimize(avg_cost) for params in param_list: - updated_p = fluid.layers.elementwise_sub(x=params[0], - y=params[1]) + updated_p = fluid.layers.elementwise_sub( + x=params[0], y=params[1] + ) fluid.layers.assign(input=updated_p, output=params[0]) if use_parallel_exe: - loss = self.run_parallel_exe(place, [data, label], - loss=avg_cost, - use_reduce=use_reduce) + loss = self.run_parallel_exe( + place, [data, label], loss=avg_cost, use_reduce=use_reduce + ) else: loss = self.run_executor(place, [data, label], loss=avg_cost) @@ -170,16 +180,21 @@ class TestWeightDecay(unittest.TestCase): loss = self.check_weight_decay(place, model, use_parallel_exe=False) # TODO(zcd): should test use_reduce=True - loss2 = self.check_weight_decay(place, - model, - use_parallel_exe=True, - use_reduce=False) + loss2 = self.check_weight_decay( + place, model, use_parallel_exe=True, use_reduce=False + ) for i in range(len(loss)): self.assertTrue( np.isclose(a=loss[i], b=loss2[i], rtol=5e-5), - "Expect " + str(loss[i]) + "\n" + "But Got" + - str(loss2[i]) + " in class " + self.__class__.__name__) + "Expect " + + str(loss[i]) + + "\n" + + "But Got" + + str(loss2[i]) + + " in class " + + self.__class__.__name__, + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_weight_normalization.py b/python/paddle/fluid/tests/unittests/test_weight_normalization.py index d7a0fa3cd185ae839136ce75a8933c6a706dc835..9d77dadf8dc09ea863edf64235f6e1b857c81e8f 100644 --- a/python/paddle/fluid/tests/unittests/test_weight_normalization.py +++ b/python/paddle/fluid/tests/unittests/test_weight_normalization.py @@ -24,7 +24,7 @@ from paddle.fluid.param_attr import WeightNormParamAttr class TestWeightNormalization(unittest.TestCase): batch_size = 3 hidden_size = 5 - data_desc = (['x', [10], 0], ) + data_desc = (['x', [10], 0],) @classmethod def setUpClass(cls): @@ -32,21 +32,26 @@ class TestWeightNormalization(unittest.TestCase): @classmethod def set_program(cls): - data = fluid.layers.data(name=cls.data_desc[0][0], - shape=cls.data_desc[0][1]) - out = fluid.layers.fc(input=data, - size=cls.hidden_size, - param_attr=WeightNormParamAttr( - dim=None, - name='weight_norm_param', - initializer=ConstantInitializer(1.0)), - bias_attr=False, - act=None) + data = fluid.layers.data( + name=cls.data_desc[0][0], shape=cls.data_desc[0][1] + ) + out = fluid.layers.fc( + input=data, + size=cls.hidden_size, + param_attr=WeightNormParamAttr( + dim=None, + name='weight_norm_param', + initializer=ConstantInitializer(1.0), + ), + bias_attr=False, + act=None, + ) loss = fluid.layers.reduce_sum(out) fluid.backward.append_backward(loss=loss) cls.fetch_list = [ - 'weight_norm_param_g', 'weight_norm_param_v', - 'weight_norm_param_g@GRAD' + 'weight_norm_param_g', + 'weight_norm_param_v', + 'weight_norm_param_g@GRAD', ] def run_program(self): @@ -58,10 +63,12 @@ class TestWeightNormalization(unittest.TestCase): self.set_inputs(place) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - output = exe.run(fluid.default_main_program(), - feed=self.inputs, - fetch_list=self.fetch_list, - return_numpy=False) + output = exe.run( + fluid.default_main_program(), + feed=self.inputs, + fetch_list=self.fetch_list, + return_numpy=False, + ) outputs.append(output) self.actual_outputs = outputs @@ -73,14 +80,16 @@ class TestWeightNormalization(unittest.TestCase): data_lod_level = desc[2] data_lod = [] for i in range(data_lod_level): - lod_level_i = np.random.randint(low=1, - high=5, - size=self.batch_size if i == 0 - else sum(lod_level_i)).tolist() + lod_level_i = np.random.randint( + low=1, + high=5, + size=self.batch_size if i == 0 else sum(lod_level_i), + ).tolist() data_lod.append(lod_level_i) data_value = np.random.random( - size=[sum(data_lod[-1]) if data_lod else self.batch_size] + - data_shape).astype('float32') + size=[sum(data_lod[-1]) if data_lod else self.batch_size] + + data_shape + ).astype('float32') self.data[data_name] = (data_value, data_lod) def set_inputs(self, place): @@ -94,14 +103,16 @@ class TestWeightNormalization(unittest.TestCase): def weight_normalize(self): v = np.ones( - (self.data[self.data_desc[0][0]][0].shape[-1], self.hidden_size)) + (self.data[self.data_desc[0][0]][0].shape[-1], self.hidden_size) + ) g = np.linalg.norm(v, axis=None, keepdims=True) w = g * v / np.linalg.norm(v, axis=None, keepdims=True) x = self.data[self.data_desc[0][0]][0] out = np.dot(x, w) - g_grad = (np.dot(x.T, np.ones_like(out)) * - (v / np.linalg.norm(v, axis=None, keepdims=True))).sum( - axis=None, keepdims=True) + g_grad = ( + np.dot(x.T, np.ones_like(out)) + * (v / np.linalg.norm(v, axis=None, keepdims=True)) + ).sum(axis=None, keepdims=True) return g, v, g_grad def test_weight_normalization(self): @@ -110,10 +121,9 @@ class TestWeightNormalization(unittest.TestCase): expect_output = self.weight_normalize() for actual_output in self.actual_outputs: [ - np.testing.assert_allclose(np.array(actual), - expect, - rtol=1e-05, - atol=0.001) + np.testing.assert_allclose( + np.array(actual), expect, rtol=1e-05, atol=0.001 + ) for expect, actual in zip(expect_output, actual_output) ] diff --git a/python/paddle/fluid/tests/unittests/test_where_index.py b/python/paddle/fluid/tests/unittests/test_where_index.py index ad561c4a512684d9ca96b4414c466661b393918a..96b26dd6cae881bdcca8a9b9d827fb1e3500f877 100644 --- a/python/paddle/fluid/tests/unittests/test_where_index.py +++ b/python/paddle/fluid/tests/unittests/test_where_index.py @@ -22,7 +22,6 @@ from paddle.fluid import Program, program_guard class TestWhereIndexOp(OpTest): - def setUp(self): self.op_type = "where_index" self.init_config() @@ -39,7 +38,6 @@ class TestWhereIndexOp(OpTest): class TestAllFalse(unittest.TestCase): - def setUp(self): self.op_type = "where_index" self.init_config() @@ -71,7 +69,6 @@ class TestAllFalse(unittest.TestCase): class TestRank2(TestWhereIndexOp): - def init_config(self): self.inputs = { 'Condition': np.array([[True, False], [False, True]]), @@ -81,24 +78,26 @@ class TestRank2(TestWhereIndexOp): class TestRank3(TestWhereIndexOp): - def init_config(self): self.inputs = { - 'Condition': - np.array([[[True, False], [False, True]], - [[False, True], [True, False]], - [[False, False], [False, True]]]), + 'Condition': np.array( + [ + [[True, False], [False, True]], + [[False, True], [True, False]], + [[False, False], [False, True]], + ] + ), } self.outputs = { - 'Out': - np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0], [2, 1, 1]], - dtype='int64') + 'Out': np.array( + [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0], [2, 1, 1]], + dtype='int64', + ) } class TestWhereOpError(unittest.TestCase): - def test_api(self): with program_guard(Program(), Program()): cond = fluid.layers.data(name='cond', shape=[4], dtype='bool') @@ -111,9 +110,7 @@ class TestWhereOpError(unittest.TestCase): class TestWhereRaiseError(unittest.TestCase): - def test_errors(self): - def test_type(): fluid.layers.where([10]) diff --git a/python/paddle/fluid/tests/unittests/test_where_op.py b/python/paddle/fluid/tests/unittests/test_where_op.py index fafa4e527914262f408b7d24539a628ee9853a3a..9ae7d9a48331b2c404361a3b5ac960ad69993425 100644 --- a/python/paddle/fluid/tests/unittests/test_where_op.py +++ b/python/paddle/fluid/tests/unittests/test_where_op.py @@ -23,7 +23,6 @@ from paddle.fluid.framework import _test_eager_guard class TestWhereOp(OpTest): - def setUp(self): self.op_type = 'where' self.python_api = paddle.where @@ -44,7 +43,6 @@ class TestWhereOp(OpTest): class TestWhereOp2(TestWhereOp): - def init_config(self): self.x = np.random.uniform((-5), 5, (60, 2)).astype('float64') self.y = np.random.uniform((-5), 5, (60, 2)).astype('float64') @@ -52,7 +50,6 @@ class TestWhereOp2(TestWhereOp): class TestWhereOp3(TestWhereOp): - def init_config(self): self.x = np.random.uniform((-3), 5, (20, 2, 4)).astype('float64') self.y = np.random.uniform((-3), 5, (20, 2, 4)).astype('float64') @@ -60,7 +57,6 @@ class TestWhereOp3(TestWhereOp): class TestWhereAPI(unittest.TestCase): - def setUp(self): self.init_data() @@ -81,48 +77,51 @@ class TestWhereAPI(unittest.TestCase): for x_stop_gradient in [False, True]: for y_stop_gradient in [False, True]: with fluid.program_guard(Program(), Program()): - cond = fluid.layers.data(name='cond', - shape=self.shape, - dtype='bool') - x = fluid.layers.data(name='x', - shape=self.shape, - dtype='float32') - y = fluid.layers.data(name='y', - shape=self.shape, - dtype='float32') + cond = fluid.layers.data( + name='cond', shape=self.shape, dtype='bool' + ) + x = fluid.layers.data( + name='x', shape=self.shape, dtype='float32' + ) + y = fluid.layers.data( + name='y', shape=self.shape, dtype='float32' + ) x.stop_gradient = x_stop_gradient y.stop_gradient = y_stop_gradient result = paddle.where(cond, x, y) append_backward(paddle.mean(result)) for use_cuda in [False, True]: - if (use_cuda - and (not fluid.core.is_compiled_with_cuda())): + if use_cuda and ( + not fluid.core.is_compiled_with_cuda() + ): break - place = (fluid.CUDAPlace(0) - if use_cuda else fluid.CPUPlace()) + place = ( + fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + ) exe = fluid.Executor(place) fetch_list = [result, result.grad_name] - if (x_stop_gradient is False): + if x_stop_gradient is False: fetch_list.append(x.grad_name) - if (y_stop_gradient is False): + if y_stop_gradient is False: fetch_list.append(y.grad_name) - out = exe.run(fluid.default_main_program(), - feed={ - 'cond': self.cond, - 'x': self.x, - 'y': self.y - }, - fetch_list=fetch_list) + out = exe.run( + fluid.default_main_program(), + feed={'cond': self.cond, 'x': self.x, 'y': self.y}, + fetch_list=fetch_list, + ) assert np.array_equal(out[0], self.out) - if (x_stop_gradient is False): - assert np.array_equal(out[2], - self.ref_x_backward(out[1])) - if (y.stop_gradient is False): + if x_stop_gradient is False: + assert np.array_equal( + out[2], self.ref_x_backward(out[1]) + ) + if y.stop_gradient is False: assert np.array_equal( - out[3], self.ref_y_backward(out[1])) - elif (y.stop_gradient is False): - assert np.array_equal(out[2], - self.ref_y_backward(out[1])) + out[3], self.ref_y_backward(out[1]) + ) + elif y.stop_gradient is False: + assert np.array_equal( + out[2], self.ref_y_backward(out[1]) + ) def test_api_broadcast(self, use_cuda=False): main_program = Program() @@ -130,20 +129,20 @@ class TestWhereAPI(unittest.TestCase): x = fluid.layers.data(name='x', shape=[4, 1], dtype='float32') y = fluid.layers.data(name='y', shape=[4, 2], dtype='float32') x_i = np.array([[0.9383, 0.1983, 3.2, 1.2]]).astype('float32') - y_i = np.array([[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, - 1.0]]).astype('float32') + y_i = np.array([[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]).astype( + 'float32' + ) result = paddle.where((x > 1), x=x, y=y) for use_cuda in [False, True]: - if (use_cuda and (not fluid.core.is_compiled_with_cuda())): + if use_cuda and (not fluid.core.is_compiled_with_cuda()): return - place = (fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()) + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) - out = exe.run(fluid.default_main_program(), - feed={ - 'x': x_i, - 'y': y_i - }, - fetch_list=[result]) + out = exe.run( + fluid.default_main_program(), + feed={'x': x_i, 'y': y_i}, + fetch_list=[result], + ) assert np.array_equal(out[0], np.where((x_i > 1), x_i, y_i)) def test_scalar(self): @@ -151,21 +150,23 @@ class TestWhereAPI(unittest.TestCase): main_program = Program() with fluid.program_guard(main_program): cond_shape = [2, 4] - cond = fluid.layers.data(name='cond', - shape=cond_shape, - dtype='bool') + cond = fluid.layers.data( + name='cond', shape=cond_shape, dtype='bool' + ) x_data = 1.0 y_data = 2.0 cond_data = np.array([False, False, True, True]).astype('bool') result = paddle.where(condition=cond, x=x_data, y=y_data) for use_cuda in [False, True]: - if (use_cuda and (not fluid.core.is_compiled_with_cuda())): + if use_cuda and (not fluid.core.is_compiled_with_cuda()): return - place = (fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()) + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) - out = exe.run(fluid.default_main_program(), - feed={'cond': cond_data}, - fetch_list=[result]) + out = exe.run( + fluid.default_main_program(), + feed={'cond': cond_data}, + fetch_list=[result], + ) expect = np.where(cond_data, x_data, y_data) assert np.array_equal(out[0], expect) @@ -173,28 +174,26 @@ class TestWhereAPI(unittest.TestCase): paddle.enable_static() main_program = Program() with fluid.program_guard(main_program): - cond = fluid.layers.data(name='cond', - shape=cond_shape, - dtype='bool') + cond = fluid.layers.data( + name='cond', shape=cond_shape, dtype='bool' + ) x = fluid.layers.data(name='x', shape=x_shape, dtype='float32') y = fluid.layers.data(name='y', shape=y_shape, dtype='float32') cond_data_tmp = np.random.random(size=cond_shape).astype('float32') - cond_data = (cond_data_tmp < 0.3) + cond_data = cond_data_tmp < 0.3 x_data = np.random.random(size=x_shape).astype('float32') y_data = np.random.random(size=y_shape).astype('float32') result = paddle.where(condition=cond, x=x, y=y) for use_cuda in [False, True]: - if (use_cuda and (not fluid.core.is_compiled_with_cuda())): + if use_cuda and (not fluid.core.is_compiled_with_cuda()): return - place = (fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()) + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) - out = exe.run(fluid.default_main_program(), - feed={ - 'cond': cond_data, - 'x': x_data, - 'y': y_data - }, - fetch_list=[result]) + out = exe.run( + fluid.default_main_program(), + feed={'cond': cond_data, 'x': x_data, 'y': y_data}, + fetch_list=[result], + ) expect = np.where(cond_data, x_data, y_data) assert np.array_equal(out[0], expect) @@ -248,7 +247,6 @@ class TestWhereAPI(unittest.TestCase): class TestWhereDygraphAPI(unittest.TestCase): - def test_api(self): with fluid.dygraph.guard(): x_i = np.array([0.9383, 0.1983, 3.2, 1.2]).astype('float64') @@ -272,7 +270,7 @@ class TestWhereDygraphAPI(unittest.TestCase): def __test_where_with_broadcast_dygraph(self, cond_shape, a_shape, b_shape): with fluid.dygraph.guard(): cond_tmp = paddle.rand(cond_shape) - cond = (cond_tmp < 0.3) + cond = cond_tmp < 0.3 a = paddle.rand(a_shape) b = paddle.rand(b_shape) result = paddle.where(cond, a, b) @@ -337,9 +335,9 @@ class TestWhereDygraphAPI(unittest.TestCase): self.assertEqual(len(y), 2) z = fluid.layers.concat(list(y), axis=1) exe = fluid.Executor(fluid.CPUPlace()) - (res, ) = exe.run(feed={'x': data}, - fetch_list=[z.name], - return_numpy=False) + (res,) = exe.run( + feed={'x': data}, fetch_list=[z.name], return_numpy=False + ) expect_out = np.array([[0, 0], [1, 1]]) np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) data = np.array([True, True, False]) @@ -350,9 +348,9 @@ class TestWhereDygraphAPI(unittest.TestCase): self.assertEqual(len(y), 1) z = fluid.layers.concat(list(y), axis=1) exe = fluid.Executor(fluid.CPUPlace()) - (res, ) = exe.run(feed={'x': data}, - fetch_list=[z.name], - return_numpy=False) + (res,) = exe.run( + feed={'x': data}, fetch_list=[z.name], return_numpy=False + ) expect_out = np.array([[0], [1]]) np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) @@ -370,7 +368,6 @@ class TestWhereDygraphAPI(unittest.TestCase): class TestWhereOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): x_i = np.array([0.9383, 0.1983, 3.2, 1.2]).astype('float64') @@ -394,7 +391,7 @@ class TestWhereOpError(unittest.TestCase): with fluid.dygraph.guard(): cond_shape = [2, 2, 4] cond_tmp = paddle.rand(cond_shape) - cond = (cond_tmp < 0.3) + cond = cond_tmp < 0.3 a = paddle.rand(cond_shape) self.assertRaises(ValueError, paddle.where, cond, a) diff --git a/python/paddle/fluid/tests/unittests/test_while_loop_op.py b/python/paddle/fluid/tests/unittests/test_while_loop_op.py index 7d5fc61f28badff2a9275923b884007b8d9fbff4..3c91b8c1e235cca8abcdec28a9fe492d371ae28e 100644 --- a/python/paddle/fluid/tests/unittests/test_while_loop_op.py +++ b/python/paddle/fluid/tests/unittests/test_while_loop_op.py @@ -26,9 +26,7 @@ paddle.enable_static() class TestApiWhileLoop(unittest.TestCase): - def test_var_tuple(self): - def cond(i): return layers.less_than(i, ten) @@ -41,18 +39,20 @@ class TestApiWhileLoop(unittest.TestCase): i = layers.fill_constant(shape=[1], dtype='int64', value=0) one = layers.fill_constant(shape=[1], dtype='int64', value=1) ten = layers.fill_constant(shape=[1], dtype='int64', value=10) - out = layers.while_loop(cond, body, (i, )) + out = layers.while_loop(cond, body, (i,)) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) res = exe.run(main_program, fetch_list=out) - np.testing.assert_allclose(np.asarray(res[0]), - np.full(1, 10, np.int64), - rtol=1e-05) + np.testing.assert_allclose( + np.asarray(res[0]), np.full(1, 10, np.int64), rtol=1e-05 + ) def test_var_list(self): - def cond(i, mem): return layers.less_than(i, ten) @@ -73,8 +73,11 @@ class TestApiWhileLoop(unittest.TestCase): data = np.random.rand(10).astype('float32') data_one = np.ones(10).astype('float32') - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) res = exe.run(main_program, feed={'mem': data}, fetch_list=out) for i in range(10): @@ -82,7 +85,6 @@ class TestApiWhileLoop(unittest.TestCase): np.testing.assert_allclose(np.asarray(res[1]), data, rtol=1e-05) def test_var_dict(self): - def cond(i, ten, test_dict, test_list, test_list_dict): return layers.less_than(i, ten) @@ -94,7 +96,8 @@ class TestApiWhileLoop(unittest.TestCase): test_list_dict[0]["test_key"] += 1 test_list_dict[0]["test_key"] = fluid.layers.relu( - test_list_dict[0]["test_key"]) + test_list_dict[0]["test_key"] + ) i = layers.increment(i) return [i, ten, test_dict, test_list, test_list_dict] @@ -110,47 +113,54 @@ class TestApiWhileLoop(unittest.TestCase): test_list = [ layers.fill_constant(shape=[1, 2], dtype='int64', value=0) ] - test_list_dict = [{ - "test_key": - layers.fill_constant(shape=[1], dtype='float32', value=0) - }] + test_list_dict = [ + { + "test_key": layers.fill_constant( + shape=[1], dtype='float32', value=0 + ) + } + ] i, ten, test_dict, test_list, test_list_dict = layers.while_loop( - cond, body, [i, ten, test_dict, test_list, test_list_dict]) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + cond, body, [i, ten, test_dict, test_list, test_list_dict] + ) + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) - res = exe.run(main_program, - fetch_list=[ - test_dict["test_key"], test_list[0], - test_list_dict[0]["test_key"] - ]) - np.testing.assert_allclose(np.asarray(res[0]), - np.full(shape=1, - fill_value=10, - dtype=np.int64), - rtol=1e-05) - np.testing.assert_allclose(np.asarray(res[1]), - np.full(shape=(2, 1), - fill_value=10, - dtype=np.int64), - rtol=1e-05) - np.testing.assert_allclose(np.asarray(res[2]), - np.full(shape=1, - fill_value=10, - dtype=np.float32), - rtol=1e-05) + res = exe.run( + main_program, + fetch_list=[ + test_dict["test_key"], + test_list[0], + test_list_dict[0]["test_key"], + ], + ) + np.testing.assert_allclose( + np.asarray(res[0]), + np.full(shape=1, fill_value=10, dtype=np.int64), + rtol=1e-05, + ) + np.testing.assert_allclose( + np.asarray(res[1]), + np.full(shape=(2, 1), fill_value=10, dtype=np.int64), + rtol=1e-05, + ) + np.testing.assert_allclose( + np.asarray(res[2]), + np.full(shape=1, fill_value=10, dtype=np.float32), + rtol=1e-05, + ) class TestApiWhileLoop_Nested(unittest.TestCase): - def test_nested_net(self): - def external_cond(i, j, init, sums): return layers.less_than(i, loop_len1) def external_body(i, j, init, sums): - def internal_cond(j, init, sums): return layers.less_than(j, loop_len2) @@ -160,8 +170,9 @@ class TestApiWhileLoop_Nested(unittest.TestCase): j = layers.increment(j) return [j, init, sums] - result = layers.while_loop(internal_cond, internal_body, - [j, init, sums]) + result = layers.while_loop( + internal_cond, internal_body, [j, init, sums] + ) j = result[0] init = result[1] sums = result[2] @@ -180,21 +191,22 @@ class TestApiWhileLoop_Nested(unittest.TestCase): loop_len2 = layers.fill_constant(shape=[1], dtype='int64', value=3) ones = layers.fill_constant(shape=[3, 3], dtype='float32', value=1) - out = layers.while_loop(external_cond, external_body, - [i, j, init, sums]) + out = layers.while_loop( + external_cond, external_body, [i, j, init, sums] + ) data = np.random.rand(3, 3).astype('float32') data_sums = np.zeros([3, 3]).astype('float32') - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) - res = exe.run(main_program, - feed={ - 'init': data, - 'sums': data_sums - }, - fetch_list=out) + res = exe.run( + main_program, feed={'init': data, 'sums': data_sums}, fetch_list=out + ) for i in range(3): data = np.add(data, 1) data_sums = np.add(data, data_sums) @@ -204,9 +216,7 @@ class TestApiWhileLoop_Nested(unittest.TestCase): class TestApiWhileLoop_Backward(unittest.TestCase): - def test_while_loop_backward(self): - def cond(i, x): return layers.less_than(i, eleven) @@ -229,8 +239,11 @@ class TestApiWhileLoop_Backward(unittest.TestCase): mean = paddle.mean(out[1]) append_backward(mean) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) feed_i = np.ones(1).astype('float32') @@ -238,17 +251,15 @@ class TestApiWhileLoop_Backward(unittest.TestCase): data = np.asarray([100]).astype('float32') i_grad = np.asarray([110]).astype('float32') - res = exe.run(main_program, - feed={ - 'i': feed_i, - 'x': feed_x - }, - fetch_list=[mean.name, i.grad_name]) + res = exe.run( + main_program, + feed={'i': feed_i, 'x': feed_x}, + fetch_list=[mean.name, i.grad_name], + ) np.testing.assert_allclose(np.asarray(res[0]), data, rtol=1e-05) np.testing.assert_allclose(np.asarray(res[1]), i_grad, rtol=1e-05) def test_while_loop_backward2(self): - def cond(i, x): return i < 3 @@ -269,8 +280,11 @@ class TestApiWhileLoop_Backward(unittest.TestCase): mean = paddle.mean(out[1]) append_backward(mean) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) feed_i = np.ones(1).astype('float32') @@ -279,26 +293,22 @@ class TestApiWhileLoop_Backward(unittest.TestCase): i_grad = np.asarray([3]).astype('float32') x_grad = np.asarray([2]).astype('float32') - res = exe.run(main_program, - feed={ - 'i': feed_i, - 'x': feed_x - }, - fetch_list=[mean.name, i.grad_name, x.grad_name]) + res = exe.run( + main_program, + feed={'i': feed_i, 'x': feed_x}, + fetch_list=[mean.name, i.grad_name, x.grad_name], + ) np.testing.assert_allclose(np.asarray(res[0]), data, rtol=1e-05) np.testing.assert_allclose(np.asarray(res[1]), i_grad, rtol=1e-05) np.testing.assert_allclose(np.asarray(res[2]), x_grad, rtol=1e-05) class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase): - def test_nested_net_with_backward_and_lodtensor(self): - def external_cond(i, j, x, mem_array): return layers.less_than(i, array_len) def external_body(i, j, x, mem_array): - def internal_cond(j, x, mem_array): return layers.less_than(j, array_len2) @@ -317,8 +327,9 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase): outer_sum_1 = layers.elementwise_add(x=x, y=outer_sum_0) i = layers.increment(x=i, in_place=True) layers.array_write(outer_sum_1, i=i, array=mem_array) - j, x, mem_array = layers.while_loop(internal_cond, internal_body, - [j, x, mem_array]) + j, x, mem_array = layers.while_loop( + internal_cond, internal_body, [j, x, mem_array] + ) return [i, j, x, mem_array] main_program = Program() @@ -345,15 +356,19 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase): j.stop_gradient = True array_len2 = layers.fill_constant(shape=[1], dtype='int64', value=3) - out = layers.while_loop(external_cond, external_body, - [i, j, x, mem_array]) + out = layers.while_loop( + external_cond, external_body, [i, j, x, mem_array] + ) sum_result = layers.array_read(array=mem_array, i=j) mean = paddle.mean(sum_result) append_backward(mean) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) d = [] @@ -362,27 +377,21 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase): feed_x = np.ones(10).astype('float32') data_sum = d[0] + d[1] + d[2] + 3 * feed_x x_grad = [0.3] * 10 - res = exe.run(main_program, - feed={ - 'd0': d[0], - 'd1': d[1], - 'd2': d[2], - 'x': feed_x - }, - fetch_list=[sum_result.name, x.grad_name]) + res = exe.run( + main_program, + feed={'d0': d[0], 'd1': d[1], 'd2': d[2], 'x': feed_x}, + fetch_list=[sum_result.name, x.grad_name], + ) np.testing.assert_allclose(res[0], data_sum, rtol=1e-05) np.testing.assert_allclose(res[1], x_grad, rtol=1e-05) class TestApiWhileLoopWithSwitchCase(unittest.TestCase): - def test_with_switch_case(self): - def cond(i): return layers.less_than(i, ten) def body(i): - def fn_add_three(): data_add_three = layers.elementwise_add(x=i, y=three) return data_add_three @@ -395,12 +404,11 @@ class TestApiWhileLoopWithSwitchCase(unittest.TestCase): data_add_one = layers.elementwise_add(x=i, y=one) return data_add_one - return layers.switch_case(branch_index=i, - branch_fns={ - 2: fn_add_three, - 5: fn_square - }, - default=fn_add_one) + return layers.switch_case( + branch_index=i, + branch_fns={2: fn_add_three, 5: fn_square}, + default=fn_add_one, + ) main_program = Program() startup_program = Program() @@ -411,8 +419,11 @@ class TestApiWhileLoopWithSwitchCase(unittest.TestCase): one = layers.fill_constant(shape=[1], dtype='int64', value=1) out = layers.while_loop(cond, body, [i]) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) res = exe.run(main_program, fetch_list=out) @@ -421,9 +432,7 @@ class TestApiWhileLoopWithSwitchCase(unittest.TestCase): class TestApiWhileLoop_Error(unittest.TestCase): - def test_error(self): - def cond_returns_constant(i): return 1 @@ -453,9 +462,9 @@ class TestApiWhileLoop_Error(unittest.TestCase): return i > 0 def body_returns_with_mutable_dict(i, test_dict): - test_dict['new_key'] = layers.fill_constant(shape=[1], - dtype='int64', - value=1) + test_dict['new_key'] = layers.fill_constant( + shape=[1], dtype='int64', value=1 + ) return layers.increment(i), test_dict def cond_returns_with_mutable_list(i, test_list): @@ -463,7 +472,8 @@ class TestApiWhileLoop_Error(unittest.TestCase): def body_returns_with_mutable_list(i, test_list): test_list.append( - layers.fill_constant(shape=[1], dtype='int64', value=1)) + layers.fill_constant(shape=[1], dtype='int64', value=1) + ) return layers.increment(i), test_list main_program = Program() @@ -483,8 +493,9 @@ class TestApiWhileLoop_Error(unittest.TestCase): # The type of `body` in Op(while_loop) must be callable def type_error_body(): - out = layers.while_loop(cond_returns_bool_tensor, data, - [data_1d]) + out = layers.while_loop( + cond_returns_bool_tensor, data, [data_1d] + ) self.assertRaises(TypeError, type_error_body) @@ -508,8 +519,9 @@ class TestApiWhileLoop_Error(unittest.TestCase): # The type of `cond` returns in Op(while_loop) must be a bollean variable def type_error_cond_returns_not_boolean(): - out = layers.while_loop(cond_returns_not_bool_tensor, body, - [data_1d]) + out = layers.while_loop( + cond_returns_not_bool_tensor, body, [data_1d] + ) self.assertRaises(TypeError, type_error_cond_returns_not_boolean) @@ -521,47 +533,54 @@ class TestApiWhileLoop_Error(unittest.TestCase): # The length of `body` returns in Op(while_loop) must be same as `loop_vars` def value_error_body_returns_error_length(): - out = layers.while_loop(cond_returns_bool_tensor, - body_returns_error_length, [data]) + out = layers.while_loop( + cond_returns_bool_tensor, body_returns_error_length, [data] + ) self.assertRaises(ValueError, value_error_body_returns_error_length) # The type of `body` returns in Op(while_loop) must be same as `loop_vars` def value_error_body_returns_error_type(): - out = layers.while_loop(cond_receives_two_args, - body_returns_error_type, [data, ten]) + out = layers.while_loop( + cond_receives_two_args, body_returns_error_type, [data, ten] + ) self.assertRaises(ValueError, value_error_body_returns_error_type) # The length of `output_vars` with mutable value should keep same with `loop_vars` def value_error_body_returns_with_mutable_dict(): test_dict = { - "int_constant": - layers.fill_constant(shape=[2, 2], dtype='int64', value=1) + "int_constant": layers.fill_constant( + shape=[2, 2], dtype='int64', value=1 + ) } - out = layers.while_loop(cond_returns_with_mutable_dict, - body_returns_with_mutable_dict, - [data, test_dict]) + out = layers.while_loop( + cond_returns_with_mutable_dict, + body_returns_with_mutable_dict, + [data, test_dict], + ) - self.assertRaises(ValueError, - value_error_body_returns_with_mutable_dict) + self.assertRaises( + ValueError, value_error_body_returns_with_mutable_dict + ) def value_error_body_returns_with_mutable_list(): test_list = [ layers.fill_constant(shape=[2, 2], dtype='int64', value=1) ] - out = layers.while_loop(cond_returns_with_mutable_list, - body_returns_with_mutable_list, - [data, test_list]) + out = layers.while_loop( + cond_returns_with_mutable_list, + body_returns_with_mutable_list, + [data, test_list], + ) - self.assertRaises(ValueError, - value_error_body_returns_with_mutable_list) + self.assertRaises( + ValueError, value_error_body_returns_with_mutable_list + ) class TestApiWhileLoopSliceInBody(unittest.TestCase): - def test_var_slice(self): - def cond(z, i): return i + 1 <= x_shape[0] @@ -579,8 +598,11 @@ class TestApiWhileLoopSliceInBody(unittest.TestCase): i = fluid.layers.fill_constant([1], 'int32', 0) z, _ = fluid.layers.while_loop(cond, body, [z, i]) - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) np_x = np.array([1, 2, 3, 4, 5], dtype='int32') diff --git a/python/paddle/fluid/tests/unittests/test_while_op.py b/python/paddle/fluid/tests/unittests/test_while_op.py index 648e93dede8d0f9bb1dcbeed50d5ed43f7578f06..341e6a8ef5644cc6bd9cd2b37df2ff90a5e6a71e 100644 --- a/python/paddle/fluid/tests/unittests/test_while_op.py +++ b/python/paddle/fluid/tests/unittests/test_while_op.py @@ -25,20 +25,16 @@ paddle.enable_static() class TestWhileOp(unittest.TestCase): - def simple_net(self): - d0 = layers.data("d0", - shape=[10], - append_batch_size=False, - dtype='float32') - d1 = layers.data("d1", - shape=[10], - append_batch_size=False, - dtype='float32') - d2 = layers.data("d2", - shape=[10], - append_batch_size=False, - dtype='float32') + d0 = layers.data( + "d0", shape=[10], append_batch_size=False, dtype='float32' + ) + d1 = layers.data( + "d1", shape=[10], append_batch_size=False, dtype='float32' + ) + d2 = layers.data( + "d2", shape=[10], append_batch_size=False, dtype='float32' + ) i = layers.zeros(shape=[1], dtype='int64') i.stop_gradient = True init = layers.zeros(shape=[10], dtype='float32') @@ -96,12 +92,10 @@ class TestWhileOp(unittest.TestCase): for i in range(3): d.append(numpy.random.random(size=[10]).astype('float32')) - outs = exe.run(feed={ - 'd0': d[0], - 'd1': d[1], - 'd2': d[2] - }, - fetch_list=[sum_result]) + outs = exe.run( + feed={'d0': d[0], 'd1': d[1], 'd2': d[2]}, + fetch_list=[sum_result], + ) self.assertAlmostEqual(numpy.sum(d), numpy.sum(outs[0]), delta=0.01) def test_simple_net_forward(self): @@ -133,7 +127,6 @@ class TestWhileOp(unittest.TestCase): class BadInputTest(unittest.TestCase): - def test_error(self): with fluid.program_guard(fluid.Program()): @@ -145,9 +138,7 @@ class BadInputTest(unittest.TestCase): class TestIgnoreVarNameInWhile(unittest.TestCase): - def test_ignore_var(self): - def cond(i, ten, temp, y): return i < ten @@ -164,8 +155,9 @@ class TestIgnoreVarNameInWhile(unittest.TestCase): i = layers.fill_constant(shape=[1], value=0, dtype='int32') num = layers.fill_constant(shape=[1], value=5, dtype='int32') - i, ten, shuffle_temp, y = layers.while_loop(cond, body_func, - [i, num, temp, y]) + i, ten, shuffle_temp, y = layers.while_loop( + cond, body_func, [i, num, temp, y] + ) output = shuffle_temp @@ -177,18 +169,16 @@ class TestIgnoreVarNameInWhile(unittest.TestCase): input_y = numpy.array([[10], [12], [33]]) input_y = input_y.reshape(3, 1, 1) - res, = exe.run(fluid.default_main_program(), - feed={ - 'x': input_x, - 'y': input_y - }, - fetch_list=[output]) + (res,) = exe.run( + fluid.default_main_program(), + feed={'x': input_x, 'y': input_y}, + fetch_list=[output], + ) self.assertListEqual(list(res.shape), [3, 1, 5]) class TestOutputsMustExistsInputs(unittest.TestCase): - def test_outputs_exists_inputs(self): """ We guarantee that the output tensor must be in the input tensor, so that the output and input can correspond to each other, but the input can be greater than the number of outputs. It's required in paddle2onnx. @@ -220,11 +210,14 @@ class TestOutputsMustExistsInputs(unittest.TestCase): for op in main_program.block(0).ops: if op.type == "while": for out_name in op.output("Out"): - if out_name in op.input("Condition"): continue + if out_name in op.input("Condition"): + continue self.assertTrue( out_name in op.input("X"), - "In while op, the variable in output(`Out`) must exists in inputs(`X`), but the variable with name `{}` not meet the precondition." - .format(out_name)) + "In while op, the variable in output(`Out`) must exists in inputs(`X`), but the variable with name `{}` not meet the precondition.".format( + out_name + ), + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_yolo_box_op.py b/python/paddle/fluid/tests/unittests/test_yolo_box_op.py index 0a4897b821d6698d2acf995c710fdb7f6b8dd480..8584332f02c07f0cf77838c22b19584930452387 100644 --- a/python/paddle/fluid/tests/unittests/test_yolo_box_op.py +++ b/python/paddle/fluid/tests/unittests/test_yolo_box_op.py @@ -20,7 +20,7 @@ from paddle.fluid.framework import _test_eager_guard def sigmoid(x): - return (1.0 / (1.0 + np.exp(((-1.0) * x)))) + return 1.0 / (1.0 + np.exp(((-1.0) * x))) def YoloBox(x, img_size, attrs): @@ -34,9 +34,9 @@ def YoloBox(x, img_size, attrs): scale_x_y = attrs['scale_x_y'] iou_aware = attrs['iou_aware'] iou_aware_factor = attrs['iou_aware_factor'] - bias_x_y = ((-0.5) * (scale_x_y - 1.0)) - input_h = (downsample * h) - input_w = (downsample * w) + bias_x_y = (-0.5) * (scale_x_y - 1.0) + input_h = downsample * h + input_w = downsample * w if iou_aware: ioup = x[:, :an_num, :, :] ioup = np.expand_dims(ioup, axis=(-1)) @@ -45,49 +45,54 @@ def YoloBox(x, img_size, attrs): pred_box = x[:, :, :, :, :4].copy() grid_x = np.tile(np.arange(w).reshape((1, w)), (h, 1)) grid_y = np.tile(np.arange(h).reshape((h, 1)), (1, w)) - pred_box[:, :, :, :, 0] = (( - (grid_x + (sigmoid(pred_box[:, :, :, :, 0]) * scale_x_y)) + bias_x_y) / - w) - pred_box[:, :, :, :, 1] = (( - (grid_y + (sigmoid(pred_box[:, :, :, :, 1]) * scale_x_y)) + bias_x_y) / - h) - anchors = [(anchors[i], anchors[(i + 1)]) - for i in range(0, len(anchors), 2)] - anchors_s = np.array([((an_w / input_w), (an_h / input_h)) - for (an_w, an_h) in anchors]) + pred_box[:, :, :, :, 0] = ( + (grid_x + (sigmoid(pred_box[:, :, :, :, 0]) * scale_x_y)) + bias_x_y + ) / w + pred_box[:, :, :, :, 1] = ( + (grid_y + (sigmoid(pred_box[:, :, :, :, 1]) * scale_x_y)) + bias_x_y + ) / h + anchors = [ + (anchors[i], anchors[(i + 1)]) for i in range(0, len(anchors), 2) + ] + anchors_s = np.array( + [((an_w / input_w), (an_h / input_h)) for (an_w, an_h) in anchors] + ) anchor_w = anchors_s[:, 0:1].reshape((1, an_num, 1, 1)) anchor_h = anchors_s[:, 1:2].reshape((1, an_num, 1, 1)) - pred_box[:, :, :, :, 2] = (np.exp(pred_box[:, :, :, :, 2]) * anchor_w) - pred_box[:, :, :, :, 3] = (np.exp(pred_box[:, :, :, :, 3]) * anchor_h) + pred_box[:, :, :, :, 2] = np.exp(pred_box[:, :, :, :, 2]) * anchor_w + pred_box[:, :, :, :, 3] = np.exp(pred_box[:, :, :, :, 3]) * anchor_h if iou_aware: - pred_conf = ((sigmoid(x[:, :, :, :, 4:5])**(1 - iou_aware_factor)) * - (sigmoid(ioup)**iou_aware_factor)) + pred_conf = (sigmoid(x[:, :, :, :, 4:5]) ** (1 - iou_aware_factor)) * ( + sigmoid(ioup) ** iou_aware_factor + ) else: pred_conf = sigmoid(x[:, :, :, :, 4:5]) pred_conf[(pred_conf < conf_thresh)] = 0.0 - pred_score = (sigmoid(x[:, :, :, :, 5:]) * pred_conf) - pred_box = (pred_box * (pred_conf > 0.0).astype('float32')) + pred_score = sigmoid(x[:, :, :, :, 5:]) * pred_conf + pred_box = pred_box * (pred_conf > 0.0).astype('float32') pred_box = pred_box.reshape((n, (-1), 4)) - (pred_box[:, :, :2], - pred_box[:, :, 2:4]) = ((pred_box[:, :, :2] - (pred_box[:, :, 2:4] / 2.0)), - (pred_box[:, :, :2] + (pred_box[:, :, 2:4] / 2.0))) - pred_box[:, :, 0] = (pred_box[:, :, 0] * img_size[:, 1][:, np.newaxis]) - pred_box[:, :, 1] = (pred_box[:, :, 1] * img_size[:, 0][:, np.newaxis]) - pred_box[:, :, 2] = (pred_box[:, :, 2] * img_size[:, 1][:, np.newaxis]) - pred_box[:, :, 3] = (pred_box[:, :, 3] * img_size[:, 0][:, np.newaxis]) + (pred_box[:, :, :2], pred_box[:, :, 2:4]) = ( + (pred_box[:, :, :2] - (pred_box[:, :, 2:4] / 2.0)), + (pred_box[:, :, :2] + (pred_box[:, :, 2:4] / 2.0)), + ) + pred_box[:, :, 0] = pred_box[:, :, 0] * img_size[:, 1][:, np.newaxis] + pred_box[:, :, 1] = pred_box[:, :, 1] * img_size[:, 0][:, np.newaxis] + pred_box[:, :, 2] = pred_box[:, :, 2] * img_size[:, 1][:, np.newaxis] + pred_box[:, :, 3] = pred_box[:, :, 3] * img_size[:, 0][:, np.newaxis] if clip_bbox: for i in range(len(pred_box)): pred_box[i, :, 0] = np.clip(pred_box[i, :, 0], 0, np.inf) pred_box[i, :, 1] = np.clip(pred_box[i, :, 1], 0, np.inf) - pred_box[i, :, 2] = np.clip(pred_box[i, :, 2], (-np.inf), - (img_size[(i, 1)] - 1)) - pred_box[i, :, 3] = np.clip(pred_box[i, :, 3], (-np.inf), - (img_size[(i, 0)] - 1)) + pred_box[i, :, 2] = np.clip( + pred_box[i, :, 2], (-np.inf), (img_size[(i, 1)] - 1) + ) + pred_box[i, :, 3] = np.clip( + pred_box[i, :, 3], (-np.inf), (img_size[(i, 0)] - 1) + ) return (pred_box, pred_score.reshape((n, (-1), class_num))) class TestYoloBoxOp(OpTest): - def setUp(self): self.initTestCase() self.op_type = 'yolo_box' @@ -102,7 +107,7 @@ class TestYoloBoxOp(OpTest): 'clip_bbox': self.clip_bbox, 'scale_x_y': self.scale_x_y, 'iou_aware': self.iou_aware, - 'iou_aware_factor': self.iou_aware_factor + 'iou_aware_factor': self.iou_aware_factor, } self.inputs = {'X': x, 'ImgSize': img_size} (boxes, scores) = YoloBox(x, img_size, self.attrs) @@ -119,8 +124,12 @@ class TestYoloBoxOp(OpTest): self.conf_thresh = 0.5 self.downsample = 32 self.clip_bbox = True - self.x_shape = (self.batch_size, (an_num * (5 + self.class_num)), 13, - 13) + self.x_shape = ( + self.batch_size, + (an_num * (5 + self.class_num)), + 13, + 13, + ) self.imgsize_shape = (self.batch_size, 2) self.scale_x_y = 1.0 self.iou_aware = False @@ -128,7 +137,6 @@ class TestYoloBoxOp(OpTest): class TestYoloBoxOpNoClipBbox(TestYoloBoxOp): - def initTestCase(self): self.anchors = [10, 13, 16, 30, 33, 23] an_num = int((len(self.anchors) // 2)) @@ -137,8 +145,12 @@ class TestYoloBoxOpNoClipBbox(TestYoloBoxOp): self.conf_thresh = 0.5 self.downsample = 32 self.clip_bbox = False - self.x_shape = (self.batch_size, (an_num * (5 + self.class_num)), 13, - 13) + self.x_shape = ( + self.batch_size, + (an_num * (5 + self.class_num)), + 13, + 13, + ) self.imgsize_shape = (self.batch_size, 2) self.scale_x_y = 1.0 self.iou_aware = False @@ -146,7 +158,6 @@ class TestYoloBoxOpNoClipBbox(TestYoloBoxOp): class TestYoloBoxOpScaleXY(TestYoloBoxOp): - def initTestCase(self): self.anchors = [10, 13, 16, 30, 33, 23] an_num = int((len(self.anchors) // 2)) @@ -155,8 +166,12 @@ class TestYoloBoxOpScaleXY(TestYoloBoxOp): self.conf_thresh = 0.5 self.downsample = 32 self.clip_bbox = True - self.x_shape = (self.batch_size, (an_num * (5 + self.class_num)), 13, - 13) + self.x_shape = ( + self.batch_size, + (an_num * (5 + self.class_num)), + 13, + 13, + ) self.imgsize_shape = (self.batch_size, 2) self.scale_x_y = 1.2 self.iou_aware = False @@ -164,7 +179,6 @@ class TestYoloBoxOpScaleXY(TestYoloBoxOp): class TestYoloBoxOpIoUAware(TestYoloBoxOp): - def initTestCase(self): self.anchors = [10, 13, 16, 30, 33, 23] an_num = int((len(self.anchors) // 2)) @@ -173,8 +187,12 @@ class TestYoloBoxOpIoUAware(TestYoloBoxOp): self.conf_thresh = 0.5 self.downsample = 32 self.clip_bbox = True - self.x_shape = (self.batch_size, (an_num * (6 + self.class_num)), 13, - 13) + self.x_shape = ( + self.batch_size, + (an_num * (6 + self.class_num)), + 13, + 13, + ) self.imgsize_shape = (self.batch_size, 2) self.scale_x_y = 1.0 self.iou_aware = True @@ -182,34 +200,37 @@ class TestYoloBoxOpIoUAware(TestYoloBoxOp): class TestYoloBoxDygraph(unittest.TestCase): - def test_dygraph(self): paddle.disable_static() img_size = np.ones((2, 2)).astype('int32') img_size = paddle.to_tensor(img_size) x1 = np.random.random([2, 14, 8, 8]).astype('float32') x1 = paddle.to_tensor(x1) - (boxes, scores) = paddle.vision.ops.yolo_box(x1, - img_size=img_size, - anchors=[10, 13, 16, 30], - class_num=2, - conf_thresh=0.01, - downsample_ratio=8, - clip_bbox=True, - scale_x_y=1.0) - assert ((boxes is not None) and (scores is not None)) + (boxes, scores) = paddle.vision.ops.yolo_box( + x1, + img_size=img_size, + anchors=[10, 13, 16, 30], + class_num=2, + conf_thresh=0.01, + downsample_ratio=8, + clip_bbox=True, + scale_x_y=1.0, + ) + assert (boxes is not None) and (scores is not None) x2 = np.random.random([2, 16, 8, 8]).astype('float32') x2 = paddle.to_tensor(x2) - (boxes, scores) = paddle.vision.ops.yolo_box(x2, - img_size=img_size, - anchors=[10, 13, 16, 30], - class_num=2, - conf_thresh=0.01, - downsample_ratio=8, - clip_bbox=True, - scale_x_y=1.0, - iou_aware=True, - iou_aware_factor=0.5) + (boxes, scores) = paddle.vision.ops.yolo_box( + x2, + img_size=img_size, + anchors=[10, 13, 16, 30], + class_num=2, + conf_thresh=0.01, + downsample_ratio=8, + clip_bbox=True, + scale_x_y=1.0, + iou_aware=True, + iou_aware_factor=0.5, + ) paddle.enable_static() def test_eager(self): @@ -218,35 +239,37 @@ class TestYoloBoxDygraph(unittest.TestCase): class TestYoloBoxStatic(unittest.TestCase): - def test_static(self): x1 = paddle.static.data('x1', [2, 14, 8, 8], 'float32') img_size = paddle.static.data('img_size', [2, 2], 'int32') - (boxes, scores) = paddle.vision.ops.yolo_box(x1, - img_size=img_size, - anchors=[10, 13, 16, 30], - class_num=2, - conf_thresh=0.01, - downsample_ratio=8, - clip_bbox=True, - scale_x_y=1.0) - assert ((boxes is not None) and (scores is not None)) + (boxes, scores) = paddle.vision.ops.yolo_box( + x1, + img_size=img_size, + anchors=[10, 13, 16, 30], + class_num=2, + conf_thresh=0.01, + downsample_ratio=8, + clip_bbox=True, + scale_x_y=1.0, + ) + assert (boxes is not None) and (scores is not None) x2 = paddle.static.data('x2', [2, 16, 8, 8], 'float32') - (boxes, scores) = paddle.vision.ops.yolo_box(x2, - img_size=img_size, - anchors=[10, 13, 16, 30], - class_num=2, - conf_thresh=0.01, - downsample_ratio=8, - clip_bbox=True, - scale_x_y=1.0, - iou_aware=True, - iou_aware_factor=0.5) - assert ((boxes is not None) and (scores is not None)) + (boxes, scores) = paddle.vision.ops.yolo_box( + x2, + img_size=img_size, + anchors=[10, 13, 16, 30], + class_num=2, + conf_thresh=0.01, + downsample_ratio=8, + clip_bbox=True, + scale_x_y=1.0, + iou_aware=True, + iou_aware_factor=0.5, + ) + assert (boxes is not None) and (scores is not None) class TestYoloBoxOpHW(TestYoloBoxOp): - def initTestCase(self): self.anchors = [10, 13, 16, 30, 33, 23] an_num = int((len(self.anchors) // 2)) diff --git a/python/paddle/fluid/tests/unittests/test_yolov3_loss_op.py b/python/paddle/fluid/tests/unittests/test_yolov3_loss_op.py index c2c747836eef9cca48a16f92a686ffd6ebed390d..9105f27e96d730349cfda58b4672e83b9ad46568 100644 --- a/python/paddle/fluid/tests/unittests/test_yolov3_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_yolov3_loss_op.py @@ -51,11 +51,12 @@ def batch_xywh_box_iou(box1, box2): left = np.maximum(b1_left[:, :, np.newaxis], b2_left[:, np.newaxis, :]) right = np.minimum(b1_right[:, :, np.newaxis], b2_right[:, np.newaxis, :]) top = np.maximum(b1_top[:, :, np.newaxis], b2_top[:, np.newaxis, :]) - bottom = np.minimum(b1_bottom[:, :, np.newaxis], b2_bottom[:, - np.newaxis, :]) + bottom = np.minimum( + b1_bottom[:, :, np.newaxis], b2_bottom[:, np.newaxis, :] + ) - inter_w = np.clip(right - left, 0., 1.) - inter_h = np.clip(bottom - top, 0., 1.) + inter_w = np.clip(right - left, 0.0, 1.0) + inter_h = np.clip(bottom - top, 0.0, 1.0) inter_area = inter_w * inter_h b1_area = (b1_right - b1_left) * (b1_bottom - b1_top) @@ -77,7 +78,7 @@ def YOLOv3Loss(x, gtbox, gtlabel, gtscore, attrs): downsample_ratio = attrs['downsample_ratio'] use_label_smooth = attrs['use_label_smooth'] scale_x_y = attrs['scale_x_y'] - bias_x_y = -0.5 * (scale_x_y - 1.) + bias_x_y = -0.5 * (scale_x_y - 1.0) input_size = downsample_ratio * h x = x.reshape((n, mask_num, 5 + class_num, h, w)).transpose((0, 1, 3, 4, 2)) loss = np.zeros((n)).astype('float64') @@ -89,18 +90,19 @@ def YOLOv3Loss(x, gtbox, gtlabel, gtscore, attrs): pred_box = x[:, :, :, :, :4].copy() grid_x = np.tile(np.arange(w).reshape((1, w)), (h, 1)) grid_y = np.tile(np.arange(h).reshape((h, 1)), (1, w)) - pred_box[:, :, :, :, - 0] = (grid_x + sigmoid(pred_box[:, :, :, :, 0]) * scale_x_y + - bias_x_y) / w - pred_box[:, :, :, :, - 1] = (grid_y + sigmoid(pred_box[:, :, :, :, 1]) * scale_x_y + - bias_x_y) / h + pred_box[:, :, :, :, 0] = ( + grid_x + sigmoid(pred_box[:, :, :, :, 0]) * scale_x_y + bias_x_y + ) / w + pred_box[:, :, :, :, 1] = ( + grid_y + sigmoid(pred_box[:, :, :, :, 1]) * scale_x_y + bias_x_y + ) / h mask_anchors = [] for m in anchor_mask: mask_anchors.append((anchors[2 * m], anchors[2 * m + 1])) - anchors_s = np.array([(an_w / input_size, an_h / input_size) - for an_w, an_h in mask_anchors]) + anchors_s = np.array( + [(an_w / input_size, an_h / input_size) for an_w, an_h in mask_anchors] + ) anchor_w = anchors_s[:, 0:1].reshape((1, mask_num, 1, 1)) anchor_h = anchors_s[:, 1:2].reshape((1, mask_num, 1, 1)) pred_box[:, :, :, :, 2] = np.exp(pred_box[:, :, :, :, 2]) * anchor_w @@ -111,18 +113,21 @@ def YOLOv3Loss(x, gtbox, gtlabel, gtscore, attrs): objness = np.zeros(pred_box.shape[:2]).astype('float64') ious = batch_xywh_box_iou(pred_box, gtbox) ious_max = np.max(ious, axis=-1) - objness = np.where(ious_max > ignore_thresh, -np.ones_like(objness), - objness) + objness = np.where( + ious_max > ignore_thresh, -np.ones_like(objness), objness + ) gtbox_shift = gtbox.copy() gtbox_shift[:, :, 0] = 0 gtbox_shift[:, :, 1] = 0 anchors = [(anchors[2 * i], anchors[2 * i + 1]) for i in range(0, an_num)] - anchors_s = np.array([(an_w / input_size, an_h / input_size) - for an_w, an_h in anchors]) - anchor_boxes = np.concatenate([np.zeros_like(anchors_s), anchors_s], - axis=-1) + anchors_s = np.array( + [(an_w / input_size, an_h / input_size) for an_w, an_h in anchors] + ) + anchor_boxes = np.concatenate( + [np.zeros_like(anchors_s), anchors_s], axis=-1 + ) anchor_boxes = np.tile(anchor_boxes[np.newaxis, :, :], (n, 1, 1)) ious = batch_xywh_box_iou(gtbox_shift, anchor_boxes) iou_matches = np.argmax(ious, axis=-1) @@ -153,9 +158,13 @@ def YOLOv3Loss(x, gtbox, gtlabel, gtscore, attrs): objness[i, an_idx * h * w + gj * w + gi] = gtscore[i, j] for label_idx in range(class_num): - loss[i] += sce( - x[i, an_idx, gj, gi, 5 + label_idx], label_pos if label_idx - == gtlabel[i, j] else label_neg) * gtscore[i, j] + loss[i] += ( + sce( + x[i, an_idx, gj, gi, 5 + label_idx], + label_pos if label_idx == gtlabel[i, j] else label_neg, + ) + * gtscore[i, j] + ) for j in range(mask_num * h * w): if objness[i, j] > 0: @@ -163,40 +172,62 @@ def YOLOv3Loss(x, gtbox, gtlabel, gtscore, attrs): elif objness[i, j] == 0: loss[i] += sce(pred_obj[i, j], 0.0) - return (loss, objness.reshape((n, mask_num, h, w)).astype('float64'), \ - gt_matches.astype('int32')) - - -def yolo_loss_wrapper(x, - gt_box, - gt_label, - gt_score=None, - anchors=[ - 10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, - 90, 156, 198, 373, 326 - ], - anchor_mask=[0, 1, 2], - class_num=5, - ignore_thresh=0.7, - downsample_ratio=32, - use_label_smooth=True, - scale_x_y=1.): - loss = paddle.vision.ops.yolo_loss(x, - gt_box=gt_box, - gt_label=gt_label, - anchors=anchors, - anchor_mask=anchor_mask, - class_num=class_num, - ignore_thresh=ignore_thresh, - downsample_ratio=downsample_ratio, - gt_score=gt_score, - use_label_smooth=use_label_smooth, - scale_x_y=scale_x_y) + return ( + loss, + objness.reshape((n, mask_num, h, w)).astype('float64'), + gt_matches.astype('int32'), + ) + + +def yolo_loss_wrapper( + x, + gt_box, + gt_label, + gt_score=None, + anchors=[ + 10, + 13, + 16, + 30, + 33, + 23, + 30, + 61, + 62, + 45, + 59, + 119, + 116, + 90, + 156, + 198, + 373, + 326, + ], + anchor_mask=[0, 1, 2], + class_num=5, + ignore_thresh=0.7, + downsample_ratio=32, + use_label_smooth=True, + scale_x_y=1.0, +): + loss = paddle.vision.ops.yolo_loss( + x, + gt_box=gt_box, + gt_label=gt_label, + anchors=anchors, + anchor_mask=anchor_mask, + class_num=class_num, + ignore_thresh=ignore_thresh, + downsample_ratio=downsample_ratio, + gt_score=gt_score, + use_label_smooth=use_label_smooth, + scale_x_y=scale_x_y, + ) return loss class TestYolov3LossOp(OpTest): - def setUp(self): self.initTestCase() self.op_type = 'yolov3_loss' @@ -230,12 +261,13 @@ class TestYolov3LossOp(OpTest): gtscore = np.random.random(self.gtbox_shape[:2]).astype('float64') self.inputs['GTScore'] = gtscore - loss, objness, gt_matches = YOLOv3Loss(x, gtbox, gtlabel, gtscore, - self.attrs) + loss, objness, gt_matches = YOLOv3Loss( + x, gtbox, gtlabel, gtscore, self.attrs + ) self.outputs = { 'Loss': loss, 'ObjectnessMask': objness, - "GTMatchMask": gt_matches + "GTMatchMask": gt_matches, } def test_check_output(self): @@ -244,15 +276,30 @@ class TestYolov3LossOp(OpTest): def test_check_grad_ignore_gtbox(self): place = core.CPUPlace() - self.check_grad_with_place(place, ['X'], - 'Loss', - max_relative_error=0.2, - check_eager=True) + self.check_grad_with_place( + place, ['X'], 'Loss', max_relative_error=0.2, check_eager=True + ) def initTestCase(self): self.anchors = [ - 10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, - 373, 326 + 10, + 13, + 16, + 30, + 33, + 23, + 30, + 61, + 62, + 45, + 59, + 119, + 116, + 90, + 156, + 198, + 373, + 326, ] self.anchor_mask = [0, 1, 2] self.class_num = 5 @@ -262,15 +309,30 @@ class TestYolov3LossOp(OpTest): self.gtbox_shape = (3, 5, 4) self.gtscore = True self.use_label_smooth = True - self.scale_x_y = 1. + self.scale_x_y = 1.0 class TestYolov3LossWithoutLabelSmooth(TestYolov3LossOp): - def initTestCase(self): self.anchors = [ - 10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, - 373, 326 + 10, + 13, + 16, + 30, + 33, + 23, + 30, + 61, + 62, + 45, + 59, + 119, + 116, + 90, + 156, + 198, + 373, + 326, ] self.anchor_mask = [0, 1, 2] self.class_num = 5 @@ -280,15 +342,30 @@ class TestYolov3LossWithoutLabelSmooth(TestYolov3LossOp): self.gtbox_shape = (3, 5, 4) self.gtscore = True self.use_label_smooth = False - self.scale_x_y = 1. + self.scale_x_y = 1.0 class TestYolov3LossNoGTScore(TestYolov3LossOp): - def initTestCase(self): self.anchors = [ - 10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, - 373, 326 + 10, + 13, + 16, + 30, + 33, + 23, + 30, + 61, + 62, + 45, + 59, + 119, + 116, + 90, + 156, + 198, + 373, + 326, ] self.anchor_mask = [0, 1, 2] self.class_num = 5 @@ -298,15 +375,30 @@ class TestYolov3LossNoGTScore(TestYolov3LossOp): self.gtbox_shape = (3, 5, 4) self.gtscore = False self.use_label_smooth = True - self.scale_x_y = 1. + self.scale_x_y = 1.0 class TestYolov3LossWithScaleXY(TestYolov3LossOp): - def initTestCase(self): self.anchors = [ - 10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, - 373, 326 + 10, + 13, + 16, + 30, + 33, + 23, + 30, + 61, + 62, + 45, + 59, + 119, + 116, + 90, + 156, + 198, + 373, + 326, ] self.anchor_mask = [0, 1, 2] self.class_num = 5 @@ -320,7 +412,6 @@ class TestYolov3LossWithScaleXY(TestYolov3LossOp): class TestYolov3LossDygraph(unittest.TestCase): - def test_dygraph(self): paddle.disable_static() x = np.random.random([2, 14, 8, 8]).astype('float32') @@ -331,52 +422,57 @@ class TestYolov3LossDygraph(unittest.TestCase): gt_box = paddle.to_tensor(gt_box) gt_label = paddle.to_tensor(gt_label) - loss = paddle.vision.ops.yolo_loss(x, - gt_box=gt_box, - gt_label=gt_label, - anchors=[10, 13, 16, 30], - anchor_mask=[0, 1], - class_num=2, - ignore_thresh=0.7, - downsample_ratio=8, - use_label_smooth=True, - scale_x_y=1.) + loss = paddle.vision.ops.yolo_loss( + x, + gt_box=gt_box, + gt_label=gt_label, + anchors=[10, 13, 16, 30], + anchor_mask=[0, 1], + class_num=2, + ignore_thresh=0.7, + downsample_ratio=8, + use_label_smooth=True, + scale_x_y=1.0, + ) assert loss is not None assert loss.shape == [2] paddle.enable_static() class TestYolov3LossStatic(unittest.TestCase): - def test_static(self): x = paddle.static.data('x', [2, 14, 8, 8], 'float32') gt_box = paddle.static.data('gt_box', [2, 10, 4], 'float32') gt_label = paddle.static.data('gt_label', [2, 10], 'int32') gt_score = paddle.static.data('gt_score', [2, 10], 'float32') - loss = paddle.vision.ops.yolo_loss(x, - gt_box=gt_box, - gt_label=gt_label, - anchors=[10, 13, 16, 30], - anchor_mask=[0, 1], - class_num=2, - ignore_thresh=0.7, - downsample_ratio=8, - gt_score=gt_score, - use_label_smooth=True, - scale_x_y=1.) + loss = paddle.vision.ops.yolo_loss( + x, + gt_box=gt_box, + gt_label=gt_label, + anchors=[10, 13, 16, 30], + anchor_mask=[0, 1], + class_num=2, + ignore_thresh=0.7, + downsample_ratio=8, + gt_score=gt_score, + use_label_smooth=True, + scale_x_y=1.0, + ) assert loss is not None - loss = paddle.vision.ops.yolo_loss(x, - gt_box=gt_box, - gt_label=gt_label, - anchors=[10, 13, 16, 30], - anchor_mask=[0, 1], - class_num=2, - ignore_thresh=0.7, - downsample_ratio=8, - use_label_smooth=True, - scale_x_y=1.) + loss = paddle.vision.ops.yolo_loss( + x, + gt_box=gt_box, + gt_label=gt_label, + anchors=[10, 13, 16, 30], + anchor_mask=[0, 1], + class_num=2, + ignore_thresh=0.7, + downsample_ratio=8, + use_label_smooth=True, + scale_x_y=1.0, + ) assert loss is not None diff --git a/python/paddle/fluid/tests/unittests/test_zero_dim_shape.py b/python/paddle/fluid/tests/unittests/test_zero_dim_shape.py index 089a076ad80dc46f2e5dbf43da2ebcf31fd30b87..df4fa96d4a36cf29e118f69fedf8fd5c87142a07 100644 --- a/python/paddle/fluid/tests/unittests/test_zero_dim_shape.py +++ b/python/paddle/fluid/tests/unittests/test_zero_dim_shape.py @@ -68,7 +68,6 @@ unary_api_list = [ # Use to test zero-dim in the whole API class TestUnaryAPI(unittest.TestCase): - def test_dygraph_unary(self): paddle.disable_static() fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) @@ -108,13 +107,14 @@ class TestUnaryAPI(unittest.TestCase): self.assertEqual(out.shape, ()) exe = fluid.Executor() - result = exe.run(main_prog, - fetch_list=[x, out, x_grad, out_grad]) + result = exe.run( + main_prog, fetch_list=[x, out, x_grad, out_grad] + ) # Test runtime shape self.assertEqual(result[0].shape, ()) self.assertEqual(result[1].shape, ()) - self.assertEqual(result[3].shape, (1, )) + self.assertEqual(result[3].shape, (1,)) # 0D will be stacked when 1+ place, due to it cannot be concated # for 1 place: [ x-place1 ] @@ -126,28 +126,34 @@ class TestUnaryAPI(unittest.TestCase): else: places = [paddle.CPUPlace()] * 4 device_num = 4 - expect_shape = (device_num, ) + expect_shape = (device_num,) compiled_program = fluid.CompiledProgram( - main_prog).with_data_parallel(out.name, places=places) - result = exe.run(compiled_program, - fetch_list=[x, out, x_grad, out_grad], - return_merged=True) + main_prog + ).with_data_parallel(out.name, places=places) + result = exe.run( + compiled_program, + fetch_list=[x, out, x_grad, out_grad], + return_merged=True, + ) # Test runtime parallel shape self.assertEqual(result[0].shape, expect_shape) self.assertEqual(result[1].shape, expect_shape) - self.assertEqual(result[3].shape, (device_num, )) + self.assertEqual(result[3].shape, (device_num,)) compiled_program = fluid.CompiledProgram( - main_prog).with_data_parallel(out.name, places=places) - result = exe.run(compiled_program, - fetch_list=[x, out, x_grad, out_grad], - return_merged=False) + main_prog + ).with_data_parallel(out.name, places=places) + result = exe.run( + compiled_program, + fetch_list=[x, out, x_grad, out_grad], + return_merged=False, + ) # [[x-place1, x-place2, ...], [], [], ...] - self.assertEqual(np.array(result[0]).shape, (device_num, )) - self.assertEqual(np.array(result[1]).shape, (device_num, )) + self.assertEqual(np.array(result[0]).shape, (device_num,)) + self.assertEqual(np.array(result[1]).shape, (device_num,)) self.assertEqual(np.array(result[3]).shape, (device_num, 1)) paddle.disable_static() diff --git a/python/paddle/fluid/tests/unittests/test_zeropad2d.py b/python/paddle/fluid/tests/unittests/test_zeropad2d.py index f15f3d277d58f24590a3d185f21eb36062cc0c97..99a6b9143a911b74b0dd1a98d4f883a80d1d93c6 100644 --- a/python/paddle/fluid/tests/unittests/test_zeropad2d.py +++ b/python/paddle/fluid/tests/unittests/test_zeropad2d.py @@ -85,7 +85,8 @@ class TestZeroPad2dAPI(unittest.TestCase): pad = [1, 2, 3, 4] x = np.random.randint(-255, 255, size=self.shape) expect_res = np.pad( - x, [[0, 0], [0, 0], [pad[2], pad[3]], [pad[0], pad[1]]]) + x, [[0, 0], [0, 0], [pad[2], pad[3]], [pad[0], pad[1]]] + ) x_tensor = to_tensor(x) ret_res = zeropad2d(x_tensor, pad).numpy() @@ -103,7 +104,8 @@ class TestZeroPad2dAPI(unittest.TestCase): pad = (1, 2, 3, 4) x = np.random.randint(-255, 255, size=self.shape) expect_res = np.pad( - x, [[0, 0], [0, 0], [pad[2], pad[3]], [pad[0], pad[1]]]) + x, [[0, 0], [0, 0], [pad[2], pad[3]], [pad[0], pad[1]]] + ) x_tensor = to_tensor(x) ret_res = zeropad2d(x_tensor, pad).numpy() @@ -121,7 +123,8 @@ class TestZeroPad2dAPI(unittest.TestCase): pad = [1, 2, 3, 4] x = np.random.randint(-255, 255, size=self.shape) expect_res = np.pad( - x, [[0, 0], [0, 0], [pad[2], pad[3]], [pad[0], pad[1]]]) + x, [[0, 0], [0, 0], [pad[2], pad[3]], [pad[0], pad[1]]] + ) x_tensor = to_tensor(x) pad_tensor = to_tensor(pad, dtype='int32') @@ -144,15 +147,22 @@ class TestZeroPad2DLayer(unittest.TestCase): self.pad = [2, 2, 4, 1] self.padLayer = ZeroPad2D(padding=self.pad) self.x = np.random.randint(-255, 255, size=self.shape) - self.expect_res = np.pad(self.x, - [[0, 0], [0, 0], [self.pad[2], self.pad[3]], - [self.pad[0], self.pad[1]]]) + self.expect_res = np.pad( + self.x, + [ + [0, 0], + [0, 0], + [self.pad[2], self.pad[3]], + [self.pad[0], self.pad[1]], + ], + ) def func_layer(self): - np.testing.assert_allclose(zeropad2d(to_tensor(self.x), - self.pad).numpy(), - self.padLayer(to_tensor(self.x)), - rtol=1e-05) + np.testing.assert_allclose( + zeropad2d(to_tensor(self.x), self.pad).numpy(), + self.padLayer(to_tensor(self.x)), + rtol=1e-05, + ) def test_layer(self): with paddle.fluid.framework._test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_zeros_like_op.py b/python/paddle/fluid/tests/unittests/test_zeros_like_op.py index 0a86123ea2c16c780265fd0b8361a43f53531284..5712fc4df6e5f88156ecb2d8c53a2b4ff2928fc4 100644 --- a/python/paddle/fluid/tests/unittests/test_zeros_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_zeros_like_op.py @@ -24,7 +24,6 @@ from paddle.fluid.framework import convert_np_dtype_to_dtype_ class TestZerosLikeAPIError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): x = paddle.fluid.data('x', [3, 4]) @@ -36,7 +35,6 @@ class TestZerosLikeAPIError(unittest.TestCase): class TestZerosLikeAPI(unittest.TestCase): - def test_api(self): shape = [3, 4] startup_program = Program() @@ -48,14 +46,20 @@ class TestZerosLikeAPI(unittest.TestCase): out3 = zeros_like(x, 'float64') out4 = zeros_like(x, 'int32') out5 = zeros_like(x, 'int64') - place = (fluid.CUDAPlace(0) - if core.is_compiled_with_cuda() else fluid.CPUPlace()) + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) - outs = exe.run(train_program, - feed={'X': np.ones(shape).astype('float32')}, - fetch_list=[out1, out2, out3, out4, out5]) + outs = exe.run( + train_program, + feed={'X': np.ones(shape).astype('float32')}, + fetch_list=[out1, out2, out3, out4, out5], + ) for (i, dtype) in enumerate( - [np.float32, np.bool_, np.float64, np.int32, np.int64]): + [np.float32, np.bool_, np.float64, np.int32, np.int64] + ): self.assertEqual(outs[i].dtype, dtype) self.assertEqual((outs[i] == np.zeros(shape, dtype)).all(), True) @@ -65,17 +69,20 @@ class TestZerosLikeAPI(unittest.TestCase): class TestZerosLikeImpeartive(unittest.TestCase): - def test_out(self): shape = [3, 4] - place = (fluid.CUDAPlace(0) - if core.is_compiled_with_cuda() else fluid.CPUPlace()) + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) paddle.disable_static(place) x = paddle.to_tensor(np.ones(shape)) for dtype in [np.bool_, np.float32, np.float64, np.int32, np.int64]: out = zeros_like(x, dtype) - self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(), - True) + self.assertEqual( + (out.numpy() == np.zeros(shape, dtype)).all(), True + ) out = paddle.tensor.zeros_like(x) self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(), True) out = paddle.tensor.creation.zeros_like(x) @@ -88,20 +95,23 @@ class TestZerosLikeImpeartive(unittest.TestCase): class TestZerosAPI(unittest.TestCase): - def test_api(self): shape = [3, 4] - place = fluid.CUDAPlace( - 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() + place = ( + fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() + else fluid.CPUPlace() + ) paddle.disable_static(place) for dtype in [np.float32, np.float64, np.int32, np.int64]: out = _C_ops.zeros(shape, convert_np_dtype_to_dtype_(dtype), place) - self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(), - True) + self.assertEqual( + (out.numpy() == np.zeros(shape, dtype)).all(), True + ) paddle.enable_static() -if (__name__ == '__main__'): +if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_zeros_op.py b/python/paddle/fluid/tests/unittests/test_zeros_op.py index d91230a48f258cc5df26f1a9d46aaea45f7cc4b9..59a87dba56d8615016de537a6c6932e671ad6532 100644 --- a/python/paddle/fluid/tests/unittests/test_zeros_op.py +++ b/python/paddle/fluid/tests/unittests/test_zeros_op.py @@ -21,7 +21,6 @@ from paddle.fluid.framework import _test_eager_guard class TestZerosOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): shape = [4] @@ -34,27 +33,26 @@ class TestZerosOpError(unittest.TestCase): class ApiZerosTest(unittest.TestCase): - def test_out(self): with program_guard(Program()): zeros = paddle.zeros(shape=[10], dtype='float64') place = paddle.CPUPlace() exe = paddle.static.Executor(place) - (result, ) = exe.run(fetch_list=[zeros]) + (result,) = exe.run(fetch_list=[zeros]) expected_result = np.zeros(10, dtype='float64') self.assertEqual((result == expected_result).all(), True) with paddle.static.program_guard(Program()): zeros = paddle.zeros(shape=[10], dtype='int64') place = paddle.CPUPlace() exe = paddle.static.Executor(place) - (result, ) = exe.run(fetch_list=[zeros]) + (result,) = exe.run(fetch_list=[zeros]) expected_result = np.zeros(10, dtype='int64') self.assertEqual((result == expected_result).all(), True) with program_guard(Program()): zeros = paddle.zeros(shape=[10], dtype='int64') place = paddle.CPUPlace() exe = paddle.static.Executor(place) - (result, ) = exe.run(fetch_list=[zeros]) + (result,) = exe.run(fetch_list=[zeros]) expected_result = np.zeros(10, dtype='int64') self.assertEqual((result == expected_result).all(), True) with program_guard(Program()): @@ -70,7 +68,7 @@ class ApiZerosTest(unittest.TestCase): zeros = fluid.layers.zeros(shape=[10], dtype='int64') place = paddle.CPUPlace() exe = paddle.static.Executor(place) - (result, ) = exe.run(fetch_list=[zeros]) + (result,) = exe.run(fetch_list=[zeros]) expected_result = np.zeros(10, dtype='int64') self.assertEqual((result == expected_result).all(), True) @@ -81,9 +79,7 @@ class ApiZerosTest(unittest.TestCase): class ApiZerosError(unittest.TestCase): - def test_errors(self): - def test_error1(): with paddle.static.program_guard(fluid.Program()): ones = fluid.layers.zeros(shape=10, dtype='int64') @@ -111,5 +107,5 @@ class ApiZerosError(unittest.TestCase): self.test_shape_errors() -if (__name__ == '__main__'): +if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/testsuite.py b/python/paddle/fluid/tests/unittests/testsuite.py index 619d8232811667c7d24882ac4bfa6583f2fe1494..a13b73f501bd623f29737a9fea1e74f671d378c3 100644 --- a/python/paddle/fluid/tests/unittests/testsuite.py +++ b/python/paddle/fluid/tests/unittests/testsuite.py @@ -70,7 +70,6 @@ def create_op(scope, op_type, inputs, outputs, attrs, cache_list=None): def set_input(scope, op, inputs, place): - def __set_input__(var_name, var): if isinstance(var, tuple) or isinstance(var, np.ndarray): tensor = scope.find_var(var_name).get_tensor() @@ -119,10 +118,9 @@ def append_input_output(block, op_proto, np_list, is_input, dtype): if is_input: shape = list(np_value.shape) lod_level = 0 - return block.create_var(dtype=dtype, - shape=shape, - lod_level=lod_level, - name=name) + return block.create_var( + dtype=dtype, shape=shape, lod_level=lod_level, name=name + ) var_dict = {} for var_proto in proto_list: @@ -130,15 +128,18 @@ def append_input_output(block, op_proto, np_list, is_input, dtype): if (var_name not in np_list) and var_proto.dispensable: continue if is_input: - assert (var_name in np_list) or (var_proto.dispensable), \ - "Missing {} as input".format(var_name) + assert (var_name in np_list) or ( + var_proto.dispensable + ), "Missing {} as input".format(var_name) if var_proto.duplicable: - assert isinstance(np_list[var_name], list), \ - "Duplicable {} should be set as list".format(var_name) + assert isinstance( + np_list[var_name], list + ), "Duplicable {} should be set as list".format(var_name) var_list = [] for (name, np_value) in np_list[var_name]: var_list.append( - create_var(block, name, {name: np_value}, var_proto)) + create_var(block, name, {name: np_value}, var_proto) + ) var_dict[var_name] = var_list else: var_dict[var_name] = create_var(block, var_name, np_list, var_proto) @@ -151,34 +152,38 @@ def append_loss_ops(block, output_names): if len(mean_inputs) == 1: loss = block.create_var(dtype=mean_inputs[0].dtype, shape=[1]) - op = block.append_op(inputs={"X": mean_inputs}, - outputs={"Out": loss}, - type='mean') + op = block.append_op( + inputs={"X": mean_inputs}, outputs={"Out": loss}, type='mean' + ) op.desc.infer_var_type(block.desc) op.desc.infer_shape(block.desc) else: avg_sum = [] for cur_loss in mean_inputs: cur_avg_loss = block.create_var(dtype=cur_loss.dtype, shape=[1]) - op = block.append_op(inputs={"X": [cur_loss]}, - outputs={"Out": [cur_avg_loss]}, - type="mean") + op = block.append_op( + inputs={"X": [cur_loss]}, + outputs={"Out": [cur_avg_loss]}, + type="mean", + ) op.desc.infer_var_type(block.desc) op.desc.infer_shape(block.desc) avg_sum.append(cur_avg_loss) loss_sum = block.create_var(dtype=avg_sum[0].dtype, shape=[1]) - op_sum = block.append_op(inputs={"X": avg_sum}, - outputs={"Out": loss_sum}, - type='sum') + op_sum = block.append_op( + inputs={"X": avg_sum}, outputs={"Out": loss_sum}, type='sum' + ) op_sum.desc.infer_var_type(block.desc) op_sum.desc.infer_shape(block.desc) loss = block.create_var(dtype=loss_sum.dtype, shape=[1]) - op_loss = block.append_op(inputs={"X": loss_sum}, - outputs={"Out": loss}, - type='scale', - attrs={'scale': 1.0 / float(len(avg_sum))}) + op_loss = block.append_op( + inputs={"X": loss_sum}, + outputs={"Out": loss}, + type='scale', + attrs={'scale': 1.0 / float(len(avg_sum))}, + ) op_loss.desc.infer_var_type(block.desc) op_loss.desc.infer_shape(block.desc) return loss diff --git a/python/paddle/fluid/tests/unittests/tokenizer/bert_tokenizer.py b/python/paddle/fluid/tests/unittests/tokenizer/bert_tokenizer.py index 24f2752854f38751152c1271f57762b3244bc2c7..cd3546ebe3cd07fe6904e10acfc8bef1be7f0e50 100755 --- a/python/paddle/fluid/tests/unittests/tokenizer/bert_tokenizer.py +++ b/python/paddle/fluid/tests/unittests/tokenizer/bert_tokenizer.py @@ -17,7 +17,13 @@ import os import unicodedata from tokenizer_utils import PretrainedTokenizer -from tokenizer_utils import convert_to_unicode, whitespace_tokenize, _is_whitespace, _is_control, _is_punctuation +from tokenizer_utils import ( + convert_to_unicode, + whitespace_tokenize, + _is_whitespace, + _is_control, + _is_punctuation, +) class BasicTokenizer(object): @@ -129,14 +135,16 @@ class BasicTokenizer(object): # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. - if ((cp >= 0x4E00 and cp <= 0x9FFF) or # - (cp >= 0x3400 and cp <= 0x4DBF) or # - (cp >= 0x20000 and cp <= 0x2A6DF) or # - (cp >= 0x2A700 and cp <= 0x2B73F) or # - (cp >= 0x2B740 and cp <= 0x2B81F) or # - (cp >= 0x2B820 and cp <= 0x2CEAF) or - (cp >= 0xF900 and cp <= 0xFAFF) or # - (cp >= 0x2F800 and cp <= 0x2FA1F)): # + if ( + (cp >= 0x4E00 and cp <= 0x9FFF) + or (cp >= 0x3400 and cp <= 0x4DBF) # + or (cp >= 0x20000 and cp <= 0x2A6DF) # + or (cp >= 0x2A700 and cp <= 0x2B73F) # + or (cp >= 0x2B740 and cp <= 0x2B81F) # + or (cp >= 0x2B820 and cp <= 0x2CEAF) # + or (cp >= 0xF900 and cp <= 0xFAFF) + or (cp >= 0x2F800 and cp <= 0x2FA1F) # + ): # return True return False @@ -148,7 +156,7 @@ class BasicTokenizer(object): output = [] for char in text: cp = ord(char) - if cp == 0 or cp == 0xfffd or _is_control(char): + if cp == 0 or cp == 0xFFFD or _is_control(char): continue if _is_whitespace(char): output.append(" ") @@ -273,101 +281,71 @@ class BertTokenizer(PretrainedTokenizer): {'input_ids': [101, 2002, 2001, 1037, 13997, 11510, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0]} ''' """ + resource_files_names = {"vocab_file": "vocab.txt"} # for save_pretrained pretrained_resource_files_map = { "vocab_file": { - "bert-base-uncased": - "https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-uncased-vocab.txt", - "bert-large-uncased": - "https://paddle-hapi.bj.bcebos.com/models/bert/bert-large-uncased-vocab.txt", - "bert-base-cased": - "https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-cased-vocab.txt", - "bert-large-cased": - "https://paddle-hapi.bj.bcebos.com/models/bert/bert-large-cased-vocab.txt", - "bert-base-multilingual-uncased": - "https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-multilingual-uncased-vocab.txt", - "bert-base-multilingual-cased": - "https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-multilingual-cased-vocab.txt", - "bert-base-chinese": - "https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-chinese-vocab.txt", - "bert-wwm-chinese": - "http://paddlenlp.bj.bcebos.com/models/transformers/bert/bert-wwm-chinese-vocab.txt", - "bert-wwm-ext-chinese": - "http://paddlenlp.bj.bcebos.com/models/transformers/bert/bert-wwm-ext-chinese-vocab.txt", - "macbert-large-chinese": - "https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-chinese-vocab.txt", - "macbert-base-chinese": - "https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-chinese-vocab.txt", - "simbert-base-chinese": - "https://paddlenlp.bj.bcebos.com/models/transformers/simbert/vocab.txt", + "bert-base-uncased": "https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-uncased-vocab.txt", + "bert-large-uncased": "https://paddle-hapi.bj.bcebos.com/models/bert/bert-large-uncased-vocab.txt", + "bert-base-cased": "https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-cased-vocab.txt", + "bert-large-cased": "https://paddle-hapi.bj.bcebos.com/models/bert/bert-large-cased-vocab.txt", + "bert-base-multilingual-uncased": "https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-multilingual-uncased-vocab.txt", + "bert-base-multilingual-cased": "https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-multilingual-cased-vocab.txt", + "bert-base-chinese": "https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-chinese-vocab.txt", + "bert-wwm-chinese": "http://paddlenlp.bj.bcebos.com/models/transformers/bert/bert-wwm-chinese-vocab.txt", + "bert-wwm-ext-chinese": "http://paddlenlp.bj.bcebos.com/models/transformers/bert/bert-wwm-ext-chinese-vocab.txt", + "macbert-large-chinese": "https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-chinese-vocab.txt", + "macbert-base-chinese": "https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-chinese-vocab.txt", + "simbert-base-chinese": "https://paddlenlp.bj.bcebos.com/models/transformers/simbert/vocab.txt", } } pretrained_init_configuration = { - "bert-base-uncased": { - "do_lower_case": True - }, - "bert-large-uncased": { - "do_lower_case": True - }, - "bert-base-cased": { - "do_lower_case": False - }, - "bert-large-cased": { - "do_lower_case": False - }, - "bert-base-multilingual-uncased": { - "do_lower_case": True - }, - "bert-base-multilingual-cased": { - "do_lower_case": False - }, - "bert-base-chinese": { - "do_lower_case": False - }, - "bert-wwm-chinese": { - "do_lower_case": False - }, - "bert-wwm-ext-chinese": { - "do_lower_case": False - }, - "macbert-large-chinese": { - "do_lower_case": False - }, - "macbert-base-chinese": { - "do_lower_case": False - }, - "simbert-base-chinese": { - "do_lower_case": True - }, + "bert-base-uncased": {"do_lower_case": True}, + "bert-large-uncased": {"do_lower_case": True}, + "bert-base-cased": {"do_lower_case": False}, + "bert-large-cased": {"do_lower_case": False}, + "bert-base-multilingual-uncased": {"do_lower_case": True}, + "bert-base-multilingual-cased": {"do_lower_case": False}, + "bert-base-chinese": {"do_lower_case": False}, + "bert-wwm-chinese": {"do_lower_case": False}, + "bert-wwm-ext-chinese": {"do_lower_case": False}, + "macbert-large-chinese": {"do_lower_case": False}, + "macbert-base-chinese": {"do_lower_case": False}, + "simbert-base-chinese": {"do_lower_case": True}, } padding_side = 'right' - def __init__(self, - vocab_file, - do_lower_case=True, - unk_token="[UNK]", - sep_token="[SEP]", - pad_token="[PAD]", - cls_token="[CLS]", - mask_token="[MASK]"): + def __init__( + self, + vocab_file, + do_lower_case=True, + unk_token="[UNK]", + sep_token="[SEP]", + pad_token="[PAD]", + cls_token="[CLS]", + mask_token="[MASK]", + ): if not os.path.isfile(vocab_file): raise ValueError( "Can't find a vocabulary file at path '{}'. To load the " "vocabulary from a pretrained model please use " - "`tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" - .format(vocab_file)) + "`tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format( + vocab_file + ) + ) self.vocab = self.load_vocabulary(vocab_file, unk_token=unk_token) self.do_lower_case = do_lower_case self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case) - self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, - unk_token=unk_token) + self.wordpiece_tokenizer = WordpieceTokenizer( + vocab=self.vocab, unk_token=unk_token + ) self.special_tokens_map = { 'unk_token': unk_token, 'sep_token': sep_token, 'pad_token': pad_token, 'cls_token': cls_token, - 'mask_token': mask_token + 'mask_token': mask_token, } @property @@ -430,7 +408,9 @@ class BertTokenizer(PretrainedTokenizer): token_ids_1 = [] return len( self.build_inputs_with_special_tokens( - token_ids_0, token_ids_1 if pair else None)) + token_ids_0, token_ids_1 if pair else None + ) + ) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): """ @@ -454,9 +434,9 @@ class BertTokenizer(PretrainedTokenizer): _sep = [self.sep_token_id] return _cls + token_ids_0 + _sep + token_ids_1 + _sep - def create_token_type_ids_from_sequences(self, - token_ids_0, - token_ids_1=None): + def create_token_type_ids_from_sequences( + self, token_ids_0, token_ids_1=None + ): """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format: @@ -476,13 +456,13 @@ class BertTokenizer(PretrainedTokenizer): _cls = [self.cls_token_id] if token_ids_1 is None: return len(_cls + token_ids_0 + _sep) * [0] - return len(_cls + token_ids_0 + _sep) * [0] + len(token_ids_1 + - _sep) * [1] + return len(_cls + token_ids_0 + _sep) * [0] + len( + token_ids_1 + _sep + ) * [1] - def get_special_tokens_mask(self, - token_ids_0, - token_ids_1=None, - already_has_special_tokens=False): + def get_special_tokens_mask( + self, token_ids_0, token_ids_1=None, already_has_special_tokens=False + ): """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer ``encode`` methods. @@ -506,10 +486,18 @@ class BertTokenizer(PretrainedTokenizer): return list( map( lambda x: 1 - if x in [self.sep_token_id, self.cls_token_id] else 0, - token_ids_0)) + if x in [self.sep_token_id, self.cls_token_id] + else 0, + token_ids_0, + ) + ) if token_ids_1 is not None: - return [1] + ([0] * len(token_ids_0)) + [1] + ( - [0] * len(token_ids_1)) + [1] + return ( + [1] + + ([0] * len(token_ids_0)) + + [1] + + ([0] * len(token_ids_1)) + + [1] + ) return [1] + ([0] * len(token_ids_0)) + [1] diff --git a/python/paddle/fluid/tests/unittests/tokenizer/tokenizer_utils.py b/python/paddle/fluid/tests/unittests/tokenizer/tokenizer_utils.py index 0524854834b5270a42ef35eeb5923fa9bb9553a6..9e60b29ffb7abf34d5a1cd30b5ff5aee75cdfa35 100644 --- a/python/paddle/fluid/tests/unittests/tokenizer/tokenizer_utils.py +++ b/python/paddle/fluid/tests/unittests/tokenizer/tokenizer_utils.py @@ -89,8 +89,12 @@ def _is_punctuation(char): # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. - if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) - or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): + if ( + (cp >= 33 and cp <= 47) + or (cp >= 58 and cp <= 64) + or (cp >= 91 and cp <= 96) + or (cp >= 123 and cp <= 126) + ): return True cat = unicodedata.category(char) if cat.startswith("P"): @@ -108,14 +112,16 @@ def is_chinese_char(cp): # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. - if ((cp >= 0x4E00 and cp <= 0x9FFF) or # - (cp >= 0x3400 and cp <= 0x4DBF) or # - (cp >= 0x20000 and cp <= 0x2A6DF) or # - (cp >= 0x2A700 and cp <= 0x2B73F) or # - (cp >= 0x2B740 and cp <= 0x2B81F) or # - (cp >= 0x2B820 and cp <= 0x2CEAF) or - (cp >= 0xF900 and cp <= 0xFAFF) or # - (cp >= 0x2F800 and cp <= 0x2FA1F)): # + if ( + (cp >= 0x4E00 and cp <= 0x9FFF) + or (cp >= 0x3400 and cp <= 0x4DBF) # + or (cp >= 0x20000 and cp <= 0x2A6DF) # + or (cp >= 0x2A700 and cp <= 0x2B73F) # + or (cp >= 0x2B740 and cp <= 0x2B81F) # + or (cp >= 0x2B820 and cp <= 0x2CEAF) # + or (cp >= 0xF900 and cp <= 0xFAFF) + or (cp >= 0x2F800 and cp <= 0x2FA1F) # + ): # return True return False @@ -172,6 +178,7 @@ class PretrainedTokenizer(object): by which subclasses can track arguments for initialization automatically and expose special tokens initialization used as attributes. """ + tokenizer_config_file = "tokenizer_config.json" pretrained_init_configuration = {} resource_files_names = {} # keys are arguments of __init__ @@ -179,20 +186,22 @@ class PretrainedTokenizer(object): padding_side = 'right' pad_token_type_id = 0 - def __call__(self, - text, - text_pair=None, - max_seq_len: Optional[int] = None, - stride=0, - is_split_into_words=False, - pad_to_max_seq_len=False, - truncation_strategy="longest_first", - return_position_ids=False, - return_token_type_ids=True, - return_attention_mask=False, - return_length=False, - return_overflowing_tokens=False, - return_special_tokens_mask=False): + def __call__( + self, + text, + text_pair=None, + max_seq_len: Optional[int] = None, + stride=0, + is_split_into_words=False, + pad_to_max_seq_len=False, + truncation_strategy="longest_first", + return_position_ids=False, + return_token_type_ids=True, + return_attention_mask=False, + return_length=False, + return_overflowing_tokens=False, + return_special_tokens_mask=False, + ): """ Performs tokenization and uses the tokenized tokens to prepare model inputs. It supports sequence or sequence pair as input, and batch input @@ -287,32 +296,60 @@ class PretrainedTokenizer(object): """ # Input type checking for clearer error assert isinstance(text, str) or ( - isinstance(text, (list, tuple)) and - (len(text) == 0 or - (isinstance(text[0], str) or - (isinstance(text[0], (list, tuple)) and - (len(text[0]) == 0 or isinstance(text[0][0], str))))) - ), ("text input must of type `str` (single example), `List[str]` (batch or single pretokenized example) " - "or `List[List[str]]` (batch of pretokenized examples).") + isinstance(text, (list, tuple)) + and ( + len(text) == 0 + or ( + isinstance(text[0], str) + or ( + isinstance(text[0], (list, tuple)) + and (len(text[0]) == 0 or isinstance(text[0][0], str)) + ) + ) + ) + ), ( + "text input must of type `str` (single example), `List[str]` (batch or single pretokenized example) " + "or `List[List[str]]` (batch of pretokenized examples)." + ) assert ( - text_pair is None or isinstance(text_pair, str) or - (isinstance(text_pair, (list, tuple)) and - (len(text_pair) == 0 or - (isinstance(text_pair[0], str) or - (isinstance(text_pair[0], (list, tuple)) and - (len(text_pair[0]) == 0 or isinstance(text_pair[0][0], str)))))) - ), ("text_pair input must of type `str` (single example), `List[str]` (batch or single pretokenized example) " - "or `List[List[str]]` (batch of pretokenized examples).") + text_pair is None + or isinstance(text_pair, str) + or ( + isinstance(text_pair, (list, tuple)) + and ( + len(text_pair) == 0 + or ( + isinstance(text_pair[0], str) + or ( + isinstance(text_pair[0], (list, tuple)) + and ( + len(text_pair[0]) == 0 + or isinstance(text_pair[0][0], str) + ) + ) + ) + ) + ) + ), ( + "text_pair input must of type `str` (single example), `List[str]` (batch or single pretokenized example) " + "or `List[List[str]]` (batch of pretokenized examples)." + ) is_batched = bool( (not is_split_into_words and isinstance(text, (list, tuple))) - or (is_split_into_words and isinstance(text, (list, tuple)) and text - and isinstance(text[0], (list, tuple)))) + or ( + is_split_into_words + and isinstance(text, (list, tuple)) + and text + and isinstance(text[0], (list, tuple)) + ) + ) if is_batched: - batch_text_or_text_pairs = list(zip( - text, text_pair)) if text_pair is not None else text + batch_text_or_text_pairs = ( + list(zip(text, text_pair)) if text_pair is not None else text + ) return self.batch_encode( batch_text_or_text_pairs=batch_text_or_text_pairs, max_seq_len=max_seq_len, @@ -325,7 +362,8 @@ class PretrainedTokenizer(object): return_attention_mask=return_attention_mask, return_length=return_length, return_overflowing_tokens=return_overflowing_tokens, - return_special_tokens_mask=return_special_tokens_mask) + return_special_tokens_mask=return_special_tokens_mask, + ) else: return self.encode( text=text, @@ -338,7 +376,8 @@ class PretrainedTokenizer(object): return_attention_mask=return_attention_mask, return_length=return_length, return_overflowing_tokens=return_overflowing_tokens, - return_special_tokens_mask=return_special_tokens_mask) + return_special_tokens_mask=return_special_tokens_mask, + ) @property def all_special_tokens(self): @@ -349,8 +388,11 @@ class PretrainedTokenizer(object): all_toks = [] set_attr = self.special_tokens_map for attr_value in set_attr.values(): - all_toks = all_toks + (list(attr_value) if isinstance( - attr_value, (list, tuple)) else [attr_value]) + all_toks = all_toks + ( + list(attr_value) + if isinstance(attr_value, (list, tuple)) + else [attr_value] + ) all_toks = list(set(all_toks)) return all_toks @@ -426,11 +468,13 @@ class PretrainedTokenizer(object): # From local dir path elif os.path.isdir(pretrained_model_name_or_path): for file_id, file_name in cls.resource_files_names.items(): - full_file_name = os.path.join(pretrained_model_name_or_path, - file_name) + full_file_name = os.path.join( + pretrained_model_name_or_path, file_name + ) vocab_files[file_id] = full_file_name vocab_files["tokenizer_config_file"] = os.path.join( - pretrained_model_name_or_path, cls.tokenizer_config_file) + pretrained_model_name_or_path, cls.tokenizer_config_file + ) default_root = os.path.join(DATA_HOME, pretrained_model_name_or_path) resolved_vocab_files = {} @@ -443,11 +487,13 @@ class PretrainedTokenizer(object): print("Already cached %s" % path) resolved_vocab_files[file_id] = path else: - print("Downloading %s and saved to %s" % - (file_path, default_root)) + print( + "Downloading %s and saved to %s" % (file_path, default_root) + ) try: resolved_vocab_files[file_id] = get_path_from_url( - file_path, default_root) + file_path, default_root + ) except RuntimeError as err: print(err) raise RuntimeError( @@ -461,7 +507,8 @@ class PretrainedTokenizer(object): # Prepare tokenizer initialization kwargs # Did we saved some inputs and kwargs to reload ? tokenizer_config_file = resolved_vocab_files.pop( - "tokenizer_config_file", None) + "tokenizer_config_file", None + ) if tokenizer_config_file is not None: with open(tokenizer_config_file, 'r', encoding="utf-8") as f: init_kwargs = json.load(f) @@ -489,8 +536,9 @@ class PretrainedTokenizer(object): # does include a vocab file path in it. However, if the vocab file # path included in json does not exist, such as was deleted, to make # it still work, use the vocab file under this dir. - elif not os.path.isfile( - init_kwargs[args_name]) and os.path.isfile(file_path): + elif not os.path.isfile(init_kwargs[args_name]) and os.path.isfile( + file_path + ): init_kwargs[args_name] = file_path # TODO(guosheng): avoid reduplication of position args and key word args tokenizer = cls(*init_args, **init_kwargs) @@ -519,11 +567,13 @@ class PretrainedTokenizer(object): assert not os.path.isfile( save_directory ), "Saving directory ({}) should be a directory, not a file".format( - save_directory) + save_directory + ) os.makedirs(save_directory, exist_ok=True) - tokenizer_config_file = os.path.join(save_directory, - self.tokenizer_config_file) + tokenizer_config_file = os.path.join( + save_directory, self.tokenizer_config_file + ) # init_config is set in metaclass created `__init__`, tokenizer_config = self.init_config with open(tokenizer_config_file, "w", encoding="utf-8") as f: @@ -545,12 +595,14 @@ class PretrainedTokenizer(object): copyfile(src_path, dst_path) @staticmethod - def load_vocabulary(filepath, - unk_token=None, - pad_token=None, - bos_token=None, - eos_token=None, - **kwargs): + def load_vocabulary( + filepath, + unk_token=None, + pad_token=None, + bos_token=None, + eos_token=None, + **kwargs, + ): """ Instantiate an instance of `Vocab` from a file reserving all tokens by using `Vocab.from_dict`. The file contains a token per line, and the @@ -581,15 +633,20 @@ class PretrainedTokenizer(object): return self.special_tokens_map[name] elif name.endswith('_token_id'): return self.vocab[self.special_tokens_map[name[:-3]]] - raise AttributeError("'{}' object has no attribute '{}'".format( - type(self).__name__, name)) - - def truncate_sequences(self, - ids, - pair_ids=None, - num_tokens_to_remove=0, - truncation_strategy='longest_first', - stride=0): + raise AttributeError( + "'{}' object has no attribute '{}'".format( + type(self).__name__, name + ) + ) + + def truncate_sequences( + self, + ids, + pair_ids=None, + num_tokens_to_remove=0, + truncation_strategy='longest_first', + stride=0, + ): """ Truncates a sequence pair in place to the maximum length. Args: @@ -662,9 +719,9 @@ class PretrainedTokenizer(object): return token_ids_0 + token_ids_1 - def build_offset_mapping_with_special_tokens(self, - offset_mapping_0, - offset_mapping_1=None): + def build_offset_mapping_with_special_tokens( + self, offset_mapping_0, offset_mapping_1=None + ): """ Build offset map from a pair of offset map by concatenating and adding offsets of special tokens. Should be overridden in a subclass if the model has a special way of building those. @@ -681,10 +738,9 @@ class PretrainedTokenizer(object): return offset_mapping_0 + offset_mapping_1 - def get_special_tokens_mask(self, - token_ids_0, - token_ids_1=None, - already_has_special_tokens=False): + def get_special_tokens_mask( + self, token_ids_0, token_ids_1=None, already_has_special_tokens=False + ): """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer ``encode`` methods. @@ -698,11 +754,12 @@ class PretrainedTokenizer(object): 1 for a special token, 0 for a sequence token. """ return [0] * ( - (len(token_ids_1) if token_ids_1 else 0) + len(token_ids_0)) + (len(token_ids_1) if token_ids_1 else 0) + len(token_ids_0) + ) - def create_token_type_ids_from_sequences(self, - token_ids_0, - token_ids_1=None): + def create_token_type_ids_from_sequences( + self, token_ids_0, token_ids_1=None + ): """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. Should be overridden in a subclass if the model has a special way of building those. @@ -733,20 +790,24 @@ class PretrainedTokenizer(object): token_ids_1 = [] return len( self.build_inputs_with_special_tokens( - token_ids_0, token_ids_1 if pair else None)) - - def encode(self, - text, - text_pair=None, - max_seq_len=512, - pad_to_max_seq_len=False, - truncation_strategy="longest_first", - return_position_ids=False, - return_token_type_ids=True, - return_attention_mask=False, - return_length=False, - return_overflowing_tokens=False, - return_special_tokens_mask=False): + token_ids_0, token_ids_1 if pair else None + ) + ) + + def encode( + self, + text, + text_pair=None, + max_seq_len=512, + pad_to_max_seq_len=False, + truncation_strategy="longest_first", + return_position_ids=False, + return_token_type_ids=True, + return_attention_mask=False, + return_length=False, + return_overflowing_tokens=False, + return_special_tokens_mask=False, + ): """ Performs tokenization and uses the tokenized tokens to prepare model inputs. It supports sequence or sequence pair as input, and batch input @@ -834,13 +895,17 @@ class PretrainedTokenizer(object): if isinstance(text, str): tokens = self._tokenize(text) return self.convert_tokens_to_ids(tokens) - elif isinstance(text, - (list, tuple)) and len(text) > 0 and isinstance( - text[0], str): + elif ( + isinstance(text, (list, tuple)) + and len(text) > 0 + and isinstance(text[0], str) + ): return self.convert_tokens_to_ids(text) - elif isinstance(text, - (list, tuple)) and len(text) > 0 and isinstance( - text[0], int): + elif ( + isinstance(text, (list, tuple)) + and len(text) > 0 + and isinstance(text[0], int) + ): return text else: raise ValueError( @@ -857,8 +922,9 @@ class PretrainedTokenizer(object): encoded_inputs = {} # Truncation: Handle max sequence length - total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add( - pair=pair)) + total_len = ( + len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair)) + ) if max_seq_len and total_len > max_seq_len: ids, pair_ids, overflowing_tokens = self.truncate_sequences( @@ -875,7 +941,8 @@ class PretrainedTokenizer(object): sequence = self.build_inputs_with_special_tokens(ids, pair_ids) token_type_ids = self.create_token_type_ids_from_sequences( - ids, pair_ids) + ids, pair_ids + ) # Build output dictionnary encoded_inputs["input_ids"] = sequence @@ -883,44 +950,53 @@ class PretrainedTokenizer(object): encoded_inputs["token_type_ids"] = token_type_ids if return_special_tokens_mask: encoded_inputs[ - "special_tokens_mask"] = self.get_special_tokens_mask( - ids, pair_ids) + "special_tokens_mask" + ] = self.get_special_tokens_mask(ids, pair_ids) if return_length: encoded_inputs["seq_len"] = len(encoded_inputs["input_ids"]) # Check lengths - assert max_seq_len is None or len( - encoded_inputs["input_ids"]) <= max_seq_len + assert ( + max_seq_len is None + or len(encoded_inputs["input_ids"]) <= max_seq_len + ) # Padding - needs_to_be_padded = pad_to_max_seq_len and \ - max_seq_len and len(encoded_inputs["input_ids"]) < max_seq_len + needs_to_be_padded = ( + pad_to_max_seq_len + and max_seq_len + and len(encoded_inputs["input_ids"]) < max_seq_len + ) if needs_to_be_padded: difference = max_seq_len - len(encoded_inputs["input_ids"]) if self.padding_side == 'right': if return_attention_mask: encoded_inputs["attention_mask"] = [1] * len( - encoded_inputs["input_ids"]) + [0] * difference + encoded_inputs["input_ids"] + ) + [0] * difference if return_token_type_ids: encoded_inputs["token_type_ids"] = ( - encoded_inputs["token_type_ids"] + - [self.pad_token_type_id] * difference) + encoded_inputs["token_type_ids"] + + [self.pad_token_type_id] * difference + ) if return_special_tokens_mask: - encoded_inputs["special_tokens_mask"] = encoded_inputs[ - "special_tokens_mask"] + [1] * difference - encoded_inputs["input_ids"] = encoded_inputs["input_ids"] + [ - self.pad_token_id - ] * difference + encoded_inputs["special_tokens_mask"] = ( + encoded_inputs["special_tokens_mask"] + [1] * difference + ) + encoded_inputs["input_ids"] = ( + encoded_inputs["input_ids"] + + [self.pad_token_id] * difference + ) elif self.padding_side == 'left': if return_attention_mask: encoded_inputs["attention_mask"] = [0] * difference + [ 1 ] * len(encoded_inputs["input_ids"]) if return_token_type_ids: - encoded_inputs["token_type_ids"] = ( - [self.pad_token_type_id] * difference + - encoded_inputs["token_type_ids"]) + encoded_inputs["token_type_ids"] = [ + self.pad_token_type_id + ] * difference + encoded_inputs["token_type_ids"] if return_special_tokens_mask: encoded_inputs["special_tokens_mask"] = [ 1 @@ -931,27 +1007,31 @@ class PretrainedTokenizer(object): else: if return_attention_mask: encoded_inputs["attention_mask"] = [1] * len( - encoded_inputs["input_ids"]) + encoded_inputs["input_ids"] + ) if return_position_ids: encoded_inputs["position_ids"] = list( - range(len(encoded_inputs["input_ids"]))) + range(len(encoded_inputs["input_ids"])) + ) return encoded_inputs - def batch_encode(self, - batch_text_or_text_pairs, - max_seq_len=512, - pad_to_max_seq_len=False, - stride=0, - is_split_into_words=False, - truncation_strategy="longest_first", - return_position_ids=False, - return_token_type_ids=True, - return_attention_mask=False, - return_length=False, - return_overflowing_tokens=False, - return_special_tokens_mask=False): + def batch_encode( + self, + batch_text_or_text_pairs, + max_seq_len=512, + pad_to_max_seq_len=False, + stride=0, + is_split_into_words=False, + truncation_strategy="longest_first", + return_position_ids=False, + return_token_type_ids=True, + return_attention_mask=False, + return_length=False, + return_overflowing_tokens=False, + return_special_tokens_mask=False, + ): """ Performs tokenization and uses the tokenized tokens to prepare model inputs. It supports batch inputs of sequence or sequence pair. @@ -1043,13 +1123,17 @@ class PretrainedTokenizer(object): if isinstance(text, str): tokens = self._tokenize(text) return self.convert_tokens_to_ids(tokens) - elif isinstance(text, - (list, tuple)) and len(text) > 0 and isinstance( - text[0], str): + elif ( + isinstance(text, (list, tuple)) + and len(text) > 0 + and isinstance(text[0], str) + ): return self.convert_tokens_to_ids(text) - elif isinstance(text, - (list, tuple)) and len(text) > 0 and isinstance( - text[0], int): + elif ( + isinstance(text, (list, tuple)) + and len(text) > 0 + and isinstance(text[0], int) + ): return text else: raise ValueError( @@ -1058,23 +1142,29 @@ class PretrainedTokenizer(object): batch_encode_inputs = [] for example_id, tokens_or_pair_tokens in enumerate( - batch_text_or_text_pairs): + batch_text_or_text_pairs + ): if not isinstance(tokens_or_pair_tokens, (list, tuple)): text, text_pair = tokens_or_pair_tokens, None elif is_split_into_words and not isinstance( - tokens_or_pair_tokens[0], (list, tuple)): + tokens_or_pair_tokens[0], (list, tuple) + ): text, text_pair = tokens_or_pair_tokens, None else: text, text_pair = tokens_or_pair_tokens first_ids = get_input_ids(text) - second_ids = get_input_ids( - text_pair) if text_pair is not None else None + second_ids = ( + get_input_ids(text_pair) if text_pair is not None else None + ) if stride > 0 and second_ids is not None: - max_len_for_pair = max_seq_len - len( - first_ids) - self.num_special_tokens_to_add(pair=True) + max_len_for_pair = ( + max_seq_len + - len(first_ids) + - self.num_special_tokens_to_add(pair=True) + ) token_offset_mapping = self.get_offset_mapping(text) token_pair_offset_mapping = self.get_offset_mapping(text_pair) @@ -1087,18 +1177,24 @@ class PretrainedTokenizer(object): length = max_len_for_pair ids = first_ids - pair_ids = second_ids[offset:offset + length] + pair_ids = second_ids[offset : offset + length] mapping = token_offset_mapping - pair_mapping = token_pair_offset_mapping[offset:offset + - length] - - offset_mapping = self.build_offset_mapping_with_special_tokens( - mapping, pair_mapping) + pair_mapping = token_pair_offset_mapping[ + offset : offset + length + ] + + offset_mapping = ( + self.build_offset_mapping_with_special_tokens( + mapping, pair_mapping + ) + ) sequence = self.build_inputs_with_special_tokens( - ids, pair_ids) + ids, pair_ids + ) token_type_ids = self.create_token_type_ids_from_sequences( - ids, pair_ids) + ids, pair_ids + ) # Build output dictionnary encoded_inputs["input_ids"] = sequence @@ -1106,25 +1202,32 @@ class PretrainedTokenizer(object): encoded_inputs["token_type_ids"] = token_type_ids if return_special_tokens_mask: encoded_inputs[ - "special_tokens_mask"] = self.get_special_tokens_mask( - ids, pair_ids) + "special_tokens_mask" + ] = self.get_special_tokens_mask(ids, pair_ids) if return_length: encoded_inputs["seq_len"] = len( - encoded_inputs["input_ids"]) + encoded_inputs["input_ids"] + ) # Check lengths - assert max_seq_len is None or len( - encoded_inputs["input_ids"]) <= max_seq_len + assert ( + max_seq_len is None + or len(encoded_inputs["input_ids"]) <= max_seq_len + ) # Padding - needs_to_be_padded = pad_to_max_seq_len and \ - max_seq_len and len(encoded_inputs["input_ids"]) < max_seq_len + needs_to_be_padded = ( + pad_to_max_seq_len + and max_seq_len + and len(encoded_inputs["input_ids"]) < max_seq_len + ) encoded_inputs['offset_mapping'] = offset_mapping if needs_to_be_padded: difference = max_seq_len - len( - encoded_inputs["input_ids"]) + encoded_inputs["input_ids"] + ) if self.padding_side == 'right': if return_attention_mask: encoded_inputs["attention_mask"] = [1] * len( @@ -1133,33 +1236,42 @@ class PretrainedTokenizer(object): if return_token_type_ids: # 0 for padding token mask encoded_inputs["token_type_ids"] = ( - encoded_inputs["token_type_ids"] + - [self.pad_token_type_id] * difference) + encoded_inputs["token_type_ids"] + + [self.pad_token_type_id] * difference + ) if return_special_tokens_mask: - encoded_inputs[ - "special_tokens_mask"] = encoded_inputs[ - "special_tokens_mask"] + [1 - ] * difference - encoded_inputs["input_ids"] = encoded_inputs[ - "input_ids"] + [self.pad_token_id] * difference - encoded_inputs['offset_mapping'] = encoded_inputs[ - 'offset_mapping'] + [(0, 0)] * difference + encoded_inputs["special_tokens_mask"] = ( + encoded_inputs["special_tokens_mask"] + + [1] * difference + ) + encoded_inputs["input_ids"] = ( + encoded_inputs["input_ids"] + + [self.pad_token_id] * difference + ) + encoded_inputs['offset_mapping'] = ( + encoded_inputs['offset_mapping'] + + [(0, 0)] * difference + ) elif self.padding_side == 'left': if return_attention_mask: encoded_inputs["attention_mask"] = [ 0 ] * difference + [1] * len( - encoded_inputs["input_ids"]) + encoded_inputs["input_ids"] + ) if return_token_type_ids: # 0 for padding token mask - encoded_inputs["token_type_ids"] = ( - [self.pad_token_type_id] * difference + - encoded_inputs["token_type_ids"]) + encoded_inputs["token_type_ids"] = [ + self.pad_token_type_id + ] * difference + encoded_inputs[ + "token_type_ids" + ] if return_special_tokens_mask: encoded_inputs["special_tokens_mask"] = [ 1 ] * difference + encoded_inputs[ - "special_tokens_mask"] + "special_tokens_mask" + ] encoded_inputs["input_ids"] = [ self.pad_token_id ] * difference + encoded_inputs["input_ids"] @@ -1169,11 +1281,13 @@ class PretrainedTokenizer(object): else: if return_attention_mask: encoded_inputs["attention_mask"] = [1] * len( - encoded_inputs["input_ids"]) + encoded_inputs["input_ids"] + ) if return_position_ids: encoded_inputs["position_ids"] = list( - range(len(encoded_inputs["input_ids"]))) + range(len(encoded_inputs["input_ids"])) + ) encoded_inputs['overflow_to_sample'] = example_id batch_encode_inputs.append(encoded_inputs) @@ -1194,7 +1308,9 @@ class PretrainedTokenizer(object): return_attention_mask=return_attention_mask, return_length=return_length, return_overflowing_tokens=return_overflowing_tokens, - return_special_tokens_mask=return_special_tokens_mask)) + return_special_tokens_mask=return_special_tokens_mask, + ) + ) return batch_encode_inputs @@ -1213,7 +1329,8 @@ class PretrainedTokenizer(object): for token in self.basic_tokenizer.tokenize(text): for sub_token in self.wordpiece_tokenizer.tokenize(token): split_tokens.append( - sub_token if sub_token != self.unk_token else token) + sub_token if sub_token != self.unk_token else token + ) normalized_text, char_mapping = '', [] @@ -1223,10 +1340,13 @@ class PretrainedTokenizer(object): ch = unicodedata.normalize('NFD', ch) ch = ''.join([c for c in ch if unicodedata.category(c) != 'Mn']) - ch = ''.join([ - c for c in ch - if not (ord(c) == 0 or ord(c) == 0xfffd or _is_control(c)) - ]) + ch = ''.join( + [ + c + for c in ch + if not (ord(c) == 0 or ord(c) == 0xFFFD or _is_control(c)) + ] + ) normalized_text += ch char_mapping.extend([i] * len(ch)) @@ -1241,7 +1361,8 @@ class PretrainedTokenizer(object): end = start + len(token) token_mapping.append( - (char_mapping[start], char_mapping[end - 1] + 1)) + (char_mapping[start], char_mapping[end - 1] + 1) + ) offset = end return token_mapping diff --git a/python/paddle/fluid/tests/unittests/transformer_model.py b/python/paddle/fluid/tests/unittests/transformer_model.py index 7fd02339ec910c19d7b2e73b95a29cdaa975899a..2baf56caaf39131bf39f9c91c7970e79c684cb4e 100644 --- a/python/paddle/fluid/tests/unittests/transformer_model.py +++ b/python/paddle/fluid/tests/unittests/transformer_model.py @@ -30,24 +30,33 @@ def position_encoding_init(n_position, d_pos_vec): """ Generate the initial values for the sinusoid position encoding table. """ - position_enc = np.array([[ - pos / np.power(10000, 2 * (j // 2) / d_pos_vec) - for j in range(d_pos_vec) - ] if pos != 0 else np.zeros(d_pos_vec) for pos in range(n_position)]) + position_enc = np.array( + [ + [ + pos / np.power(10000, 2 * (j // 2) / d_pos_vec) + for j in range(d_pos_vec) + ] + if pos != 0 + else np.zeros(d_pos_vec) + for pos in range(n_position) + ] + ) position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2]) # dim 2i position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2]) # dim 2i+1 return position_enc.astype("float32") -def multi_head_attention(queries, - keys, - values, - attn_bias, - d_key, - d_value, - d_model, - n_head=1, - dropout_rate=0.): +def multi_head_attention( + queries, + keys, + values, + attn_bias, + d_key, + d_value, + d_model, + n_head=1, + dropout_rate=0.0, +): """ Multi-Head Attention. Note that attn_bias is added to the logit before computing softmax activiation to mask certain selected positions so that @@ -55,36 +64,42 @@ def multi_head_attention(queries, """ if not (len(queries.shape) == len(keys.shape) == len(values.shape) == 3): raise ValueError( - "Inputs: queries, keys and values should all be 3-D tensors.") + "Inputs: queries, keys and values should all be 3-D tensors." + ) def __compute_qkv(queries, keys, values, n_head, d_key, d_value): """ Add linear projection to queries, keys, and values. """ - q = layers.fc(input=queries, - size=d_key * n_head, - param_attr=fluid.initializer.Xavier( - uniform=False, - fan_in=d_model * d_key, - fan_out=n_head * d_key), - bias_attr=False, - num_flatten_dims=2) - k = layers.fc(input=keys, - size=d_key * n_head, - param_attr=fluid.initializer.Xavier( - uniform=False, - fan_in=d_model * d_key, - fan_out=n_head * d_key), - bias_attr=False, - num_flatten_dims=2) - v = layers.fc(input=values, - size=d_value * n_head, - param_attr=fluid.initializer.Xavier( - uniform=False, - fan_in=d_model * d_value, - fan_out=n_head * d_value), - bias_attr=False, - num_flatten_dims=2) + q = layers.fc( + input=queries, + size=d_key * n_head, + param_attr=fluid.initializer.Xavier( + uniform=False, fan_in=d_model * d_key, fan_out=n_head * d_key + ), + bias_attr=False, + num_flatten_dims=2, + ) + k = layers.fc( + input=keys, + size=d_key * n_head, + param_attr=fluid.initializer.Xavier( + uniform=False, fan_in=d_model * d_key, fan_out=n_head * d_key + ), + bias_attr=False, + num_flatten_dims=2, + ) + v = layers.fc( + input=values, + size=d_value * n_head, + param_attr=fluid.initializer.Xavier( + uniform=False, + fan_in=d_model * d_value, + fan_out=n_head * d_value, + ), + bias_attr=False, + num_flatten_dims=2, + ) return q, k, v def __split_heads(x, n_head): @@ -100,7 +115,8 @@ def multi_head_attention(queries, hidden_size = x.shape[-1] # FIXME(guosheng): Decouple the program desc with batch_size. reshaped = layers.reshape( - x=x, shape=[batch_size, -1, n_head, hidden_size // n_head]) + x=x, shape=[batch_size, -1, n_head, hidden_size // n_head] + ) # permute the dimensions into: # [batch_size, n_head, max_sequence_len, hidden_size_per_head] @@ -111,7 +127,8 @@ def multi_head_attention(queries, Transpose and then reshape the last two dimensions of input tensor x so that it becomes one dimension, which is reverse to __split_heads. """ - if len(x.shape) == 3: return x + if len(x.shape) == 3: + return x if len(x.shape) != 4: raise ValueError("Input(x) should be a 4-D Tensor.") @@ -120,8 +137,9 @@ def multi_head_attention(queries, return layers.reshape( x=trans_x, shape=list( - map(int, - [batch_size, -1, trans_x.shape[2] * trans_x.shape[3]]))) + map(int, [batch_size, -1, trans_x.shape[2] * trans_x.shape[3]]) + ), + ) def scaled_dot_product_attention(q, k, v, attn_bias, d_model, dropout_rate): """ @@ -146,9 +164,9 @@ def multi_head_attention(queries, product = layers.matmul(x=scaled_q, y=k, transpose_y=True) weights = __softmax(layers.elementwise_add(x=product, y=attn_bias)) if dropout_rate: - weights = layers.dropout(weights, - dropout_prob=dropout_rate, - is_test=False) + weights = layers.dropout( + weights, dropout_prob=dropout_rate, is_test=False + ) out = layers.matmul(weights, v) return out @@ -158,17 +176,20 @@ def multi_head_attention(queries, k = __split_heads(k, n_head) v = __split_heads(v, n_head) - ctx_multiheads = scaled_dot_product_attention(q, k, v, attn_bias, d_model, - dropout_rate) + ctx_multiheads = scaled_dot_product_attention( + q, k, v, attn_bias, d_model, dropout_rate + ) out = __combine_heads(ctx_multiheads) # Project back to the model size. - proj_out = layers.fc(input=out, - size=d_model, - param_attr=fluid.initializer.Xavier(uniform=False), - bias_attr=False, - num_flatten_dims=2) + proj_out = layers.fc( + input=out, + size=d_model, + param_attr=fluid.initializer.Xavier(uniform=False), + bias_attr=False, + num_flatten_dims=2, + ) return proj_out @@ -178,21 +199,27 @@ def positionwise_feed_forward(x, d_inner_hid, d_hid): This module consists of two linear transformations with a ReLU activation in between, which is applied to each position separately and identically. """ - hidden = layers.fc(input=x, - size=d_inner_hid, - num_flatten_dims=2, - param_attr=fluid.initializer.Uniform(low=-(d_hid**-0.5), - high=(d_hid**-0.5)), - act="relu") - out = layers.fc(input=hidden, - size=d_hid, - num_flatten_dims=2, - param_attr=fluid.initializer.Uniform( - low=-(d_inner_hid**-0.5), high=(d_inner_hid**-0.5))) + hidden = layers.fc( + input=x, + size=d_inner_hid, + num_flatten_dims=2, + param_attr=fluid.initializer.Uniform( + low=-(d_hid**-0.5), high=(d_hid**-0.5) + ), + act="relu", + ) + out = layers.fc( + input=hidden, + size=d_hid, + num_flatten_dims=2, + param_attr=fluid.initializer.Uniform( + low=-(d_inner_hid**-0.5), high=(d_inner_hid**-0.5) + ), + ) return out -def pre_post_process_layer(prev_out, out, process_cmd, dropout=0.): +def pre_post_process_layer(prev_out, out, process_cmd, dropout=0.0): """ Add residual connection, layer normalization and droput to the out tensor optionally according to the value of process_cmd. @@ -204,10 +231,12 @@ def pre_post_process_layer(prev_out, out, process_cmd, dropout=0.): if cmd == "a": # add residual connection out = out + prev_out if prev_out else out elif cmd == "n": # add layer normalization - out = layers.layer_norm(out, - begin_norm_axis=len(out.shape) - 1, - param_attr=fluid.initializer.Constant(1.), - bias_attr=fluid.initializer.Constant(0.)) + out = layers.layer_norm( + out, + begin_norm_axis=len(out.shape) - 1, + param_attr=fluid.initializer.Constant(1.0), + bias_attr=fluid.initializer.Constant(0.0), + ) elif cmd == "d": # add dropout if dropout: out = layers.dropout(out, dropout_prob=dropout, is_test=False) @@ -218,53 +247,65 @@ pre_process_layer = partial(pre_post_process_layer, None) post_process_layer = pre_post_process_layer -def prepare_encoder(src_word, - src_pos, - src_vocab_size, - src_emb_dim, - src_pad_idx, - src_max_len, - dropout=0., - pos_pad_idx=0, - pos_enc_param_name=None): +def prepare_encoder( + src_word, + src_pos, + src_vocab_size, + src_emb_dim, + src_pad_idx, + src_max_len, + dropout=0.0, + pos_pad_idx=0, + pos_enc_param_name=None, +): """Add word embeddings and position encodings. The output tensor has a shape of: [batch_size, max_src_length_in_batch, d_model]. This module is used at the bottom of the encoder stacks. """ - src_word_emb = layers.embedding(src_word, - size=[src_vocab_size, src_emb_dim], - padding_idx=src_pad_idx, - param_attr=fluid.initializer.Normal(0., 1.)) + src_word_emb = layers.embedding( + src_word, + size=[src_vocab_size, src_emb_dim], + padding_idx=src_pad_idx, + param_attr=fluid.initializer.Normal(0.0, 1.0), + ) src_pos_enc = layers.embedding( src_pos, size=[src_max_len, src_emb_dim], padding_idx=pos_pad_idx, - param_attr=fluid.ParamAttr(name=pos_enc_param_name, trainable=False)) + param_attr=fluid.ParamAttr(name=pos_enc_param_name, trainable=False), + ) src_pos_enc.stop_gradient = True enc_input = src_word_emb + src_pos_enc # FIXME(guosheng): Decouple the program desc with batch_size. enc_input = layers.reshape(x=enc_input, shape=[batch_size, -1, src_emb_dim]) - return layers.dropout(enc_input, dropout_prob=dropout, - is_test=False) if dropout else enc_input + return ( + layers.dropout(enc_input, dropout_prob=dropout, is_test=False) + if dropout + else enc_input + ) -prepare_encoder = partial(prepare_encoder, - pos_enc_param_name=pos_enc_param_names[0]) -prepare_decoder = partial(prepare_encoder, - pos_enc_param_name=pos_enc_param_names[1]) +prepare_encoder = partial( + prepare_encoder, pos_enc_param_name=pos_enc_param_names[0] +) +prepare_decoder = partial( + prepare_encoder, pos_enc_param_name=pos_enc_param_names[1] +) -def encoder_layer(enc_input, - attn_bias, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - dropout_rate=0.): +def encoder_layer( + enc_input, + attn_bias, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + dropout_rate=0.0, +): """The encoder layers that can be stacked to form a deep encoder. This module consits of a multi-head (self) attention followed by @@ -272,46 +313,67 @@ def encoder_layer(enc_input, with the post_process_layer to add residual connection, layer normalization and droput. """ - attn_output = multi_head_attention(enc_input, enc_input, enc_input, - attn_bias, d_key, d_value, d_model, - n_head, dropout_rate) - attn_output = post_process_layer(enc_input, attn_output, "dan", - dropout_rate) + attn_output = multi_head_attention( + enc_input, + enc_input, + enc_input, + attn_bias, + d_key, + d_value, + d_model, + n_head, + dropout_rate, + ) + attn_output = post_process_layer( + enc_input, attn_output, "dan", dropout_rate + ) ffd_output = positionwise_feed_forward(attn_output, d_inner_hid, d_model) return post_process_layer(attn_output, ffd_output, "dan", dropout_rate) -def encoder(enc_input, +def encoder( + enc_input, + attn_bias, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + dropout_rate=0.0, +): + """ + The encoder is composed of a stack of identical layers returned by calling + encoder_layer. + """ + for i in range(n_layer): + enc_output = encoder_layer( + enc_input, attn_bias, - n_layer, n_head, d_key, d_value, d_model, d_inner_hid, - dropout_rate=0.): - """ - The encoder is composed of a stack of identical layers returned by calling - encoder_layer. - """ - for i in range(n_layer): - enc_output = encoder_layer(enc_input, attn_bias, n_head, d_key, d_value, - d_model, d_inner_hid, dropout_rate) + dropout_rate, + ) enc_input = enc_output return enc_output -def decoder_layer(dec_input, - enc_output, - slf_attn_bias, - dec_enc_attn_bias, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - dropout_rate=0.): - """ The layer to be stacked in decoder part. +def decoder_layer( + dec_input, + enc_output, + slf_attn_bias, + dec_enc_attn_bias, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + dropout_rate=0.0, +): + """The layer to be stacked in decoder part. The structure of this module is similar to that in the encoder part except a multi-head attention is added to implement encoder-decoder attention. @@ -364,17 +426,19 @@ def decoder_layer(dec_input, return dec_output -def decoder(dec_input, - enc_output, - dec_slf_attn_bias, - dec_enc_attn_bias, - n_layer, - n_head, - d_key, - d_value, - d_model, - d_inner_hid, - dropout_rate=0.): +def decoder( + dec_input, + enc_output, + dec_slf_attn_bias, + dec_enc_attn_bias, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + dropout_rate=0.0, +): """ The decoder is composed of a stack of identical decoder_layer layers. """ @@ -435,10 +499,10 @@ def build_inputs(max_length, n_head): all_inputs = [] for name, shape, dtype in zip(names, shapes, dtypes): all_inputs.append( - fluid.layers.data(name=name, - shape=shape, - dtype=dtype, - append_batch_size=False)) + fluid.layers.data( + name=name, shape=shape, dtype=dtype, append_batch_size=False + ) + ) return all_inputs @@ -458,8 +522,17 @@ def transformer( pos_pad_idx, ): - src_word, src_pos, trg_word, trg_pos, src_slf_attn_bias, trg_slf_attn_bias, trg_src_attn_bias, gold, weights = build_inputs( - max_length, n_head) + ( + src_word, + src_pos, + trg_word, + trg_pos, + src_slf_attn_bias, + trg_slf_attn_bias, + trg_src_attn_bias, + gold, + weights, + ) = build_inputs(max_length, n_head) enc_input = prepare_encoder( src_word, @@ -507,14 +580,17 @@ def transformer( # TODO(guosheng): Share the weight matrix between the embedding layers and # the pre-softmax linear transformation. - predict = layers.reshape(x=layers.fc( - input=dec_output, - size=trg_vocab_size, - param_attr=fluid.initializer.Xavier(uniform=False), - bias_attr=False, - num_flatten_dims=2), - shape=[-1, trg_vocab_size], - act="softmax") + predict = layers.reshape( + x=layers.fc( + input=dec_output, + size=trg_vocab_size, + param_attr=fluid.initializer.Xavier(uniform=False), + bias_attr=False, + num_flatten_dims=2, + ), + shape=[-1, trg_vocab_size], + act="softmax", + ) cost = layers.cross_entropy(input=predict, label=gold) weighted_cost = cost * weights diff --git a/python/paddle/fluid/tests/unittests/utils.py b/python/paddle/fluid/tests/unittests/utils.py index 346d40025837b915dc9c0a9f3891eda0806536e7..03993c2355eea3931d8333c14f581f47f03683e4 100644 --- a/python/paddle/fluid/tests/unittests/utils.py +++ b/python/paddle/fluid/tests/unittests/utils.py @@ -83,7 +83,6 @@ def _is_equal_program(prog1, prog2): def load_dygraph_vars_to_scope(model_path, scope, place): - def load_dict_to_scope(scope, dictionary): if scope is None: scope = fluid.global_scope() @@ -103,7 +102,6 @@ def load_dygraph_vars_to_scope(model_path, scope, place): class DyGraphProgramDescTracerTestHelper(object): - def __init__(self, unittest_obj): self.unittest_obj = unittest_obj diff --git a/python/paddle/fluid/tests/unittests/white_list/check_shape_white_list.py b/python/paddle/fluid/tests/unittests/white_list/check_shape_white_list.py index a3a3eca128cb0a3882e7a0d175456e88ac9efca8..730d77916216c33628688765325bff81dc6e2709 100644 --- a/python/paddle/fluid/tests/unittests/white_list/check_shape_white_list.py +++ b/python/paddle/fluid/tests/unittests/white_list/check_shape_white_list.py @@ -13,8 +13,21 @@ # limitations under the License. NEED_TO_FIX_OP_LIST = [ - 'fused_elemwise_activation', 'bilinear_tensor_product', 'conv2d_transpose', - 'depthwise_conv2d_transpose', 'grid_sampler', 'lstmp', 'margin_rank_loss', - 'matmul', 'scatter', 'soft_relu', 'squared_l2_distance', 'tree_conv', 'cvm', - 'cudnn_lstm', 'rnn', 'multi_dot', 'index_add' + 'fused_elemwise_activation', + 'bilinear_tensor_product', + 'conv2d_transpose', + 'depthwise_conv2d_transpose', + 'grid_sampler', + 'lstmp', + 'margin_rank_loss', + 'matmul', + 'scatter', + 'soft_relu', + 'squared_l2_distance', + 'tree_conv', + 'cvm', + 'cudnn_lstm', + 'rnn', + 'multi_dot', + 'index_add', ] diff --git a/python/paddle/fluid/tests/unittests/white_list/compile_vs_runtime_white_list.py b/python/paddle/fluid/tests/unittests/white_list/compile_vs_runtime_white_list.py index ee8202aa9f33e7718da710850e77baf6f89b8a1e..a00c1a720aa5cb4c68162eaa055584603e66c442 100644 --- a/python/paddle/fluid/tests/unittests/white_list/compile_vs_runtime_white_list.py +++ b/python/paddle/fluid/tests/unittests/white_list/compile_vs_runtime_white_list.py @@ -19,20 +19,20 @@ # reasons for skipping compile_vs_runtime test or be fixed later. COMPILE_RUN_OP_WHITE_LIST = [ - 'sequence_pool', \ - 'sequence_slice', \ - 'generate_proposals', \ - 'mine_hard_examples', \ - 'retinanet_detection_output', \ - 'ctc_align', \ - 'fusion_seqpool_cvm_concat', \ - 'gru', \ - 'rpn_target_assign', \ - 'retinanet_target_assign', \ - 'filter_by_instag', \ - 'im2sequence', \ - 'generate_proposal_labels', \ - 'detection_map', \ - 'locality_aware_nms', \ - 'var_conv_2d' + 'sequence_pool', + 'sequence_slice', + 'generate_proposals', + 'mine_hard_examples', + 'retinanet_detection_output', + 'ctc_align', + 'fusion_seqpool_cvm_concat', + 'gru', + 'rpn_target_assign', + 'retinanet_target_assign', + 'filter_by_instag', + 'im2sequence', + 'generate_proposal_labels', + 'detection_map', + 'locality_aware_nms', + 'var_conv_2d', ] diff --git a/python/paddle/fluid/tests/unittests/white_list/no_grad_set_white_list.py b/python/paddle/fluid/tests/unittests/white_list/no_grad_set_white_list.py index fb1cd35c45380d1413cc2fe33a0d0bf0f0615581..33960cf4c64d3a6215d4f86f9f3101cbfc56cccb 100644 --- a/python/paddle/fluid/tests/unittests/white_list/no_grad_set_white_list.py +++ b/python/paddle/fluid/tests/unittests/white_list/no_grad_set_white_list.py @@ -17,7 +17,6 @@ NOT_CHECK_OP_LIST = ['deformable_conv', 'row_conv', 'kron'] # TODO(Shixiaowei02): Check if the items do not need fix. # no_grad_set has value in NEED_TO_FIX_OP_LIST -# yapf: disable NEED_TO_FIX_OP_LIST = [ 'affine_channel', 'affine_grid', @@ -72,4 +71,3 @@ NEED_TO_FIX_OP_LIST = [ 'spectral_norm', 'complex', ] -# yapf: enable diff --git a/python/paddle/fluid/tests/unittests/white_list/op_accuracy_white_list.py b/python/paddle/fluid/tests/unittests/white_list/op_accuracy_white_list.py index 9b57b0d82471ce063dedcab816a7c1770c50c6b3..ba93c5bb874a3aa2a5affd61af32cf032ab35e2a 100644 --- a/python/paddle/fluid/tests/unittests/white_list/op_accuracy_white_list.py +++ b/python/paddle/fluid/tests/unittests/white_list/op_accuracy_white_list.py @@ -14,76 +14,76 @@ # For op in NO_FP64_CHECK_GRAD_OP_LIST, the op test requires check_grad with fp64 precision NO_FP64_CHECK_GRAD_OP_LIST = [ - 'affine_grid', \ - 'clip', \ - 'conv2d', \ - 'conv2d_transpose', \ - 'conv3d', \ - 'conv3d_transpose', \ - 'conv_shift', \ - 'cos_sim', \ - 'cudnn_lstm', \ - 'cvm', \ - 'data_norm', \ - 'deformable_conv', \ - 'deformable_conv_v1', \ - 'deformable_psroi_pooling', \ - 'depthwise_conv2d', \ - 'depthwise_conv2d_transpose', \ - 'dropout', \ - 'fused_elemwise_activation', \ - 'hinge_loss', \ - 'huber_loss', \ - 'im2sequence', \ - 'increment', \ - 'l1_norm', \ - 'log_loss', \ - 'lrn', \ - 'margin_rank_loss', \ - 'match_matrix_tensor', \ - 'matmul', \ - 'max_pool2d_with_index', \ - 'max_pool3d_with_index', \ - 'minus', \ - 'modified_huber_loss', \ - 'nce', \ - 'pool2d', \ - 'pool3d', \ - 'prroi_pool', \ - 'rank_loss', \ - 'reduce_max', \ - 'reduce_min', \ - 'reshape2', \ - 'roi_perspective_transform', \ - 'row_conv', \ - 'scatter', \ - 'sequence_conv', \ - 'sequence_pool', \ - 'sequence_reverse', \ - 'sequence_slice', \ - 'sequence_topk_avg_pooling', \ - 'shuffle_channel', \ - 'sigmoid', \ - 'smooth_l1_loss', \ - 'softmax', \ - 'spectral_norm', \ - 'squared_l2_distance', \ - 'squared_l2_norm', \ - 'tanh', \ - 'mish', \ - 'transpose2', \ - 'trilinear_interp', \ - 'trilinear_interp_v2', \ - 'var_conv_2d', \ - 'warpctc', \ - 'bilateral_slice', \ - 'cast' + 'affine_grid', + 'clip', + 'conv2d', + 'conv2d_transpose', + 'conv3d', + 'conv3d_transpose', + 'conv_shift', + 'cos_sim', + 'cudnn_lstm', + 'cvm', + 'data_norm', + 'deformable_conv', + 'deformable_conv_v1', + 'deformable_psroi_pooling', + 'depthwise_conv2d', + 'depthwise_conv2d_transpose', + 'dropout', + 'fused_elemwise_activation', + 'hinge_loss', + 'huber_loss', + 'im2sequence', + 'increment', + 'l1_norm', + 'log_loss', + 'lrn', + 'margin_rank_loss', + 'match_matrix_tensor', + 'matmul', + 'max_pool2d_with_index', + 'max_pool3d_with_index', + 'minus', + 'modified_huber_loss', + 'nce', + 'pool2d', + 'pool3d', + 'prroi_pool', + 'rank_loss', + 'reduce_max', + 'reduce_min', + 'reshape2', + 'roi_perspective_transform', + 'row_conv', + 'scatter', + 'sequence_conv', + 'sequence_pool', + 'sequence_reverse', + 'sequence_slice', + 'sequence_topk_avg_pooling', + 'shuffle_channel', + 'sigmoid', + 'smooth_l1_loss', + 'softmax', + 'spectral_norm', + 'squared_l2_distance', + 'squared_l2_norm', + 'tanh', + 'mish', + 'transpose2', + 'trilinear_interp', + 'trilinear_interp_v2', + 'var_conv_2d', + 'warpctc', + 'bilateral_slice', + 'cast', ] NO_FP16_CHECK_GRAD_OP_LIST = [ - 'fused_elemwise_activation', \ - 'pool2d', \ - 'pool3d', \ - 'softmax',\ - 'conv2d_transpose' + 'fused_elemwise_activation', + 'pool2d', + 'pool3d', + 'softmax', + 'conv2d_transpose', ] diff --git a/python/paddle/fluid/tests/unittests/white_list/op_threshold_white_list.py b/python/paddle/fluid/tests/unittests/white_list/op_threshold_white_list.py index 91731c1dd0b21ac30157ec0fa0f0c45b49bd4d38..22bc42d969468c4b1b1be16c841505eb66987eae 100644 --- a/python/paddle/fluid/tests/unittests/white_list/op_threshold_white_list.py +++ b/python/paddle/fluid/tests/unittests/white_list/op_threshold_white_list.py @@ -13,47 +13,48 @@ # limitations under the License. NEED_FIX_FP64_CHECK_GRAD_THRESHOLD_OP_LIST = [ - 'affine_channel', \ - 'bilinear_interp', \ - 'bilinear_interp_v2',\ - 'bilinear_tensor_product', \ - 'conv2d', \ - 'conv3d', \ - 'cross_entropy', \ - 'depthwise_conv2d_transpose', \ - 'grid_sampler', \ - 'group_norm', \ - 'gru', \ - 'gru_unit', \ - 'kldiv_loss', \ - 'lstm', \ - 'lstmp', \ - 'max_pool2d_with_index', \ - 'max_pool3d_with_index', \ - 'norm', \ - 'pool3d', \ - 'reduce_prod', \ - 'selu', \ - 'sigmoid_cross_entropy_with_logits', \ - 'soft_relu', \ - 'softmax_with_cross_entropy', \ - 'spp', \ - 'teacher_student_sigmoid_loss', \ - 'unpool', \ - 'yolov3_loss', \ - 'inverse', \ - 'bilateral_slice',\ - 'cudnn_lstm', \ - 'rnn', \ - 'lgamma', \ - 'sparse_attention', \ - 'svd', \ - 'matrix_power', \ - 'cholesky_solve', \ - 'solve', \ - 'qr', \ + 'affine_channel', + 'bilinear_interp', + 'bilinear_interp_v2', + 'bilinear_tensor_product', + 'conv2d', + 'conv3d', + 'cross_entropy', + 'depthwise_conv2d_transpose', + 'grid_sampler', + 'group_norm', + 'gru', + 'gru_unit', + 'kldiv_loss', + 'lstm', + 'lstmp', + 'max_pool2d_with_index', + 'max_pool3d_with_index', + 'norm', + 'pool3d', + 'reduce_prod', + 'selu', + 'sigmoid_cross_entropy_with_logits', + 'soft_relu', + 'softmax_with_cross_entropy', + 'spp', + 'teacher_student_sigmoid_loss', + 'unpool', + 'yolov3_loss', + 'inverse', + 'bilateral_slice', + 'cudnn_lstm', + 'rnn', + 'lgamma', + 'sparse_attention', + 'svd', + 'matrix_power', + 'cholesky_solve', + 'solve', + 'qr', ] -NEED_FIX_FP64_CHECK_OUTPUT_THRESHOLD_OP_LIST = ['bilinear_interp',\ - 'bilinear_interp_v2' - ] +NEED_FIX_FP64_CHECK_OUTPUT_THRESHOLD_OP_LIST = [ + 'bilinear_interp', + 'bilinear_interp_v2', +] diff --git a/python/paddle/fluid/tests/unittests/xpu/collective_allgather_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/collective_allgather_op_xpu.py index c2c2009a1f21ff78040378fa759a31113e813f16..e42c5144a7b7ca9fa1d3bfb1633174254fe11b13 100644 --- a/python/paddle/fluid/tests/unittests/xpu/collective_allgather_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/collective_allgather_op_xpu.py @@ -22,7 +22,6 @@ paddle.enable_static() class TestCollectiveAllGather(TestCollectiveRunnerBase): - def __init__(self): self.global_ring_id = 0 @@ -30,26 +29,28 @@ class TestCollectiveAllGather(TestCollectiveRunnerBase): ring_id = 0 nranks = 2 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype='float32') + tindata = layers.data( + name="tindata", shape=[10, 1000], dtype='float32' + ) toutdata = main_prog.current_block().create_var( name="outofgather", dtype='float32', type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=False) - main_prog.global_block().append_op(type="c_allgather", - inputs={'X': tindata}, - attrs={ - 'ring_id': ring_id, - 'nranks': nranks - }, - outputs={'Out': toutdata}) - main_prog.global_block().append_op(type="c_sync_comm_stream", - inputs={'X': toutdata}, - outputs={'Out': toutdata}, - attrs={'ring_id': ring_id}) + stop_gradient=False, + ) + main_prog.global_block().append_op( + type="c_allgather", + inputs={'X': tindata}, + attrs={'ring_id': ring_id, 'nranks': nranks}, + outputs={'Out': toutdata}, + ) + main_prog.global_block().append_op( + type="c_sync_comm_stream", + inputs={'X': toutdata}, + outputs={'Out': toutdata}, + attrs={'ring_id': ring_id}, + ) return toutdata diff --git a/python/paddle/fluid/tests/unittests/xpu/collective_allreduce_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/collective_allreduce_op_xpu.py index 07ea05d0ece8e1d90007a76c614119804f978ce5..889eecf6327de70f2976943f0869c86eb7d802cd 100644 --- a/python/paddle/fluid/tests/unittests/xpu/collective_allreduce_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/collective_allreduce_op_xpu.py @@ -23,32 +23,36 @@ paddle.enable_static() class TestCollectiveAllReduce(TestCollectiveRunnerBase): - def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program): ring_id = 0 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype='float32') + tindata = layers.data( + name="tindata", shape=[10, 1000], dtype='float32' + ) toutdata = main_prog.current_block().create_var( name="outofreduce", dtype='float32', type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=False) - main_prog.global_block().append_op(type="c_allreduce_sum", - inputs={'X': tindata}, - attrs={ - 'ring_id': ring_id, - }, - outputs={'Out': toutdata}) - main_prog.global_block().append_op(type="c_sync_comm_stream", - inputs={'X': toutdata}, - outputs={'Out': toutdata}, - attrs={'ring_id': ring_id}) + stop_gradient=False, + ) + main_prog.global_block().append_op( + type="c_allreduce_sum", + inputs={'X': tindata}, + attrs={ + 'ring_id': ring_id, + }, + outputs={'Out': toutdata}, + ) + main_prog.global_block().append_op( + type="c_sync_comm_stream", + inputs={'X': toutdata}, + outputs={'Out': toutdata}, + attrs={'ring_id': ring_id}, + ) return toutdata diff --git a/python/paddle/fluid/tests/unittests/xpu/collective_identity_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/collective_identity_op_xpu.py index 6737ab772d987460b7c8f630f6f992daba1a19b2..967d4fcc70a8f92fdb339c1c8ebed9e5fd9aeb56 100644 --- a/python/paddle/fluid/tests/unittests/xpu/collective_identity_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/collective_identity_op_xpu.py @@ -22,7 +22,6 @@ paddle.enable_static() class TestCollectiveIdentity(TestCollectiveRunnerBase): - def __init__(self): self.global_ring_id = 0 @@ -30,22 +29,22 @@ class TestCollectiveIdentity(TestCollectiveRunnerBase): ring_id = 0 nranks = 2 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data(name="tindata", - shape=[10, 1000], - dtype='float32') + tindata = layers.data( + name="tindata", shape=[10, 1000], dtype='float32' + ) toutdata = main_prog.current_block().create_var( name="outofgather", dtype='float32', type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=False) - main_prog.global_block().append_op(type="c_identity", - inputs={'X': tindata}, - outputs={'Out': toutdata}, - attrs={ - 'ring_id': ring_id, - 'nranks': nranks - }) + stop_gradient=False, + ) + main_prog.global_block().append_op( + type="c_identity", + inputs={'X': tindata}, + outputs={'Out': toutdata}, + attrs={'ring_id': ring_id, 'nranks': nranks}, + ) return toutdata diff --git a/python/paddle/fluid/tests/unittests/xpu/get_test_cover_info.py b/python/paddle/fluid/tests/unittests/xpu/get_test_cover_info.py index c0129fabe28b554969eaa199d6c9c4db705cb54f..f1276f765a87e644f029a90c2d473d565f127c97 100644 --- a/python/paddle/fluid/tests/unittests/xpu/get_test_cover_info.py +++ b/python/paddle/fluid/tests/unittests/xpu/get_test_cover_info.py @@ -99,7 +99,6 @@ xpu_test_device_op_type_white_list = [] class XPUOpTestWrapper(object): - def create_classes(self): base_class = None classes = [] @@ -110,7 +109,8 @@ def get_op_white_list(): op_white_list = xpu_test_op_white_list if os.getenv('XPU_TEST_OP_WHITE_LIST') is not None: op_white_list.extend( - os.getenv('XPU_TEST_OP_WHITE_LIST').strip().split(',')) + os.getenv('XPU_TEST_OP_WHITE_LIST').strip().split(',') + ) return list(set(op_white_list)) @@ -126,10 +126,13 @@ def get_type_white_list(): else: xpu2_type_white_list.append(t_type) - type_white_list = xpu1_type_white_list if version_str == "xpu1" else xpu2_type_white_list + type_white_list = ( + xpu1_type_white_list if version_str == "xpu1" else xpu2_type_white_list + ) if os.getenv('XPU_TEST_TYPE_WHITE_LIST') is not None: type_white_list.extend( - os.getenv('XPU_TEST_TYPE_WHITE_LIST').strip().split(',')) + os.getenv('XPU_TEST_TYPE_WHITE_LIST').strip().split(',') + ) return list(set(type_white_list)) @@ -137,7 +140,8 @@ def get_op_type_white_list(): op_type_white_list = xpu_test_op_type_white_list if os.getenv('XPU_TEST_OP_TYPE_WHITE_LIST') is not None: op_type_white_list.extend( - os.getenv('XPU_TEST_OP_TYPE_WHITE_LIST').strip().split(',')) + os.getenv('XPU_TEST_OP_TYPE_WHITE_LIST').strip().split(',') + ) return list(set(op_type_white_list)) @@ -145,7 +149,8 @@ def get_device_op_white_list(): device_op_white_list = xpu_test_device_op_white_list if os.getenv('XPU_TEST_DEVICE_OP_WHITE_LIST') is not None: device_op_white_list.extend( - os.getenv('XPU_TEST_DEVICE_OP_WHITE_LIST').strip().split(',')) + os.getenv('XPU_TEST_DEVICE_OP_WHITE_LIST').strip().split(',') + ) return list(set(device_op_white_list)) @@ -153,7 +158,8 @@ def get_device_op_type_white_list(): device_op_type_white_list = xpu_test_device_op_type_white_list if os.getenv('XPU_TEST_DEVICE_OP_TYPE_WHITE_LIST') is not None: device_op_type_white_list.extend( - os.getenv('XPU_TEST_DEVICE_OP_TYPE_WHITE_LIST').strip().split(',')) + os.getenv('XPU_TEST_DEVICE_OP_TYPE_WHITE_LIST').strip().split(',') + ) return list(set(device_op_type_white_list)) @@ -180,13 +186,15 @@ def make_xpu_op_list(xpu_version): if op_type == paddle.bfloat16: op_type = paddle.bfloat16 - if type_dict_paddle_to_str[ - op_type] in type_white_list or op_type not in type_dict_paddle_to_str.keys( - ): + if ( + type_dict_paddle_to_str[op_type] in type_white_list + or op_type not in type_dict_paddle_to_str.keys() + ): continue - device_op_type_name = device_op_name + '_' + type_dict_paddle_to_str[ - op_type] + device_op_type_name = ( + device_op_name + '_' + type_dict_paddle_to_str[op_type] + ) if device_op_type_name in device_op_type_white_list: continue @@ -201,12 +209,14 @@ def make_xpu_op_list(xpu_version): def get_xpu_op_support_types(op_name, dev_id=0): xpu_version = core.get_xpu_device_version(dev_id) support_type_list = core.get_xpu_device_op_support_types( - op_name, xpu_version) + op_name, xpu_version + ) support_type_str_list = [] for stype in support_type_list: if stype == paddle.bfloat16: support_type_str_list.append( - type_dict_paddle_to_str[paddle.bfloat16]) + type_dict_paddle_to_str[paddle.bfloat16] + ) else: support_type_str_list.append(type_dict_paddle_to_str[stype]) ops = make_xpu_op_list(xpu_version) @@ -243,12 +253,14 @@ def is_empty_grad_op_type(xpu_version, op, test_type): return False -def create_test_class(func_globals, - test_class, - test_type, - test_grad=True, - ignore_device_version=[], - test_device_version=[]): +def create_test_class( + func_globals, + test_class, + test_type, + test_grad=True, + ignore_device_version=[], + test_device_version=[], +): xpu_version = core.get_xpu_device_version(0) if xpu_version in ignore_device_version: return @@ -267,14 +279,19 @@ def create_test_class(func_globals, class_obj = test_class[1] cls_name = "{0}_{1}".format(test_class[0], str(test_type)) func_globals[cls_name] = type( - cls_name, (class_obj, ), { + cls_name, + (class_obj,), + { 'in_type': type_dict_str_to_numpy[test_type], 'in_type_str': test_type, - 'op_type_need_check_grad': True - }) - - if hasattr(test_class_obj, 'use_dynamic_create_class' - ) and test_class_obj.use_dynamic_create_class: + 'op_type_need_check_grad': True, + }, + ) + + if ( + hasattr(test_class_obj, 'use_dynamic_create_class') + and test_class_obj.use_dynamic_create_class + ): base_class, dynamic_classes = test_class_obj.dynamic_create_class() for dy_class in dynamic_classes: cls_name = "{0}_{1}".format(dy_class[0], str(test_type)) @@ -282,7 +299,7 @@ def create_test_class(func_globals, attr_dict['in_type'] = type_dict_str_to_numpy[test_type] attr_dict['in_type_str'] = test_type attr_dict['op_type_need_check_grad'] = True - func_globals[cls_name] = type(cls_name, (base_class, ), attr_dict) + func_globals[cls_name] = type(cls_name, (base_class,), attr_dict) record_op_test(op_name, test_type) if not no_grad: @@ -309,9 +326,12 @@ def get_test_cover_info(): total_len = len(set(xpu_op_list)) covered_len = len(set(xpu_op_covered)) print('{} test: {}/{}'.format(version_str, covered_len, total_len)) - if (len(diff_list) != 0): - print("These ops need to be tested on {0}! ops:{1}".format( - version_str, ','.join(diff_list))) + if len(diff_list) != 0: + print( + "These ops need to be tested on {0}! ops:{1}".format( + version_str, ','.join(diff_list) + ) + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/xpu/test_accuracy_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_accuracy_op_xpu.py index 495f1d4fbb606d0dedb03200a23d26e8a2ac7e78..760a45055ad38842e43b9201e0c32034e6562ed2 100755 --- a/python/paddle/fluid/tests/unittests/xpu/test_accuracy_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_accuracy_op_xpu.py @@ -20,19 +20,21 @@ sys.path.append("..") import paddle from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestAccuracyOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'accuracy' self.use_dynamic_create_class = False class TestXPUAccuracyOp(XPUOpTest): - def setUp(self): self.op_type = "accuracy" self.init_dtype() @@ -48,10 +50,11 @@ class XPUTestAccuracyOp(XPUOpTestWrapper): num_correct += 1 break self.outputs = { - 'Accuracy': - np.array([num_correct / float(n)]).astype(self.dtype), + 'Accuracy': np.array([num_correct / float(n)]).astype( + self.dtype + ), 'Correct': np.array([num_correct]).astype("int32"), - 'Total': np.array([n]).astype("int32") + 'Total': np.array([n]).astype("int32"), } self.attrs = {'use_xpu': True} diff --git a/python/paddle/fluid/tests/unittests/xpu/test_activation_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_activation_op_xpu.py index aac695e74c911f3303f40424fbb4b0753d61505a..49b673133fcaff7698dc98fc93094944eebddc81 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_activation_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_activation_op_xpu.py @@ -22,13 +22,16 @@ import paddle from op_test import OpTest from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class TestActivationOPBase(XPUOpTest): - def setUp(self): self.place = paddle.XPUPlace(0) self.init_dtype() @@ -57,13 +60,11 @@ class TestActivationOPBase(XPUOpTest): class XPUTestExpOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'exp' self.use_dynamic_create_class = False class XPUTestExp(TestActivationOPBase): - def set_case(self): self.op_type = 'exp' self.dtype = self.in_type @@ -81,13 +82,11 @@ for stype in support_types: class XPUTestSigmoidOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'sigmoid' self.use_dynamic_create_class = False class XPUTestSigmoid(TestActivationOPBase): - def set_case(self): self.op_type = "sigmoid" self.dtype = self.in_type @@ -102,25 +101,22 @@ class XPUTestSigmoidOP(XPUOpTestWrapper): self.x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) class XPUTestSigmoid2(XPUTestSigmoid): - def init_config(self): self.x = np.random.uniform(-2, 2, [100]).astype(self.dtype) class XPUTestSigmoid3(XPUTestSigmoid): - def init_config(self): self.x = np.random.uniform(-2, 2, [10, 12, 15]).astype(self.dtype) class XPUTestSigmoid4(XPUTestSigmoid): - def init_config(self): self.x = np.random.uniform(-2, 2, [19, 19]).astype(self.dtype) class XPUTestSigmoid5(XPUTestSigmoid): - def init_config(self): - self.x = np.random.uniform(-2, 2, - [10, 20, 30, 40]).astype(self.dtype) + self.x = np.random.uniform(-2, 2, [10, 20, 30, 40]).astype( + self.dtype + ) support_types = get_xpu_op_support_types('sigmoid') @@ -129,13 +125,11 @@ for stype in support_types: class XPUTestTanhOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'tanh' self.use_dynamic_create_class = False class XPUTestTanh(TestActivationOPBase): - def set_case(self): self.op_type = "tanh" self.dtype = self.in_type @@ -153,13 +147,11 @@ for stype in support_types: class XPUTestSqrtOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'sqrt' self.use_dynamic_create_class = False class XPUTestSqrt(TestActivationOPBase): - def set_case(self): self.op_type = "sqrt" self.dtype = self.in_type @@ -178,13 +170,11 @@ for stype in support_types: class XPUTestAbsOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'abs' self.use_dynamic_create_class = False class XPUTestAbs(TestActivationOPBase): - def set_case(self): self.op_type = "abs" self.dtype = self.in_type @@ -208,13 +198,11 @@ for stype in support_types: class XPUTestReluOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'relu' self.use_dynamic_create_class = False class XPUTestRelu(TestActivationOPBase): - def set_case(self): self.op_type = "relu" self.dtype = self.in_type @@ -235,13 +223,11 @@ for stype in support_types: class XPUTestGeluOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'gelu' self.use_dynamic_create_class = False class XPUTestGelu(TestActivationOPBase): - def set_case(self): self.op_type = "gelu" self.dtype = self.in_type @@ -262,22 +248,27 @@ for stype in support_types: def gelu(x, approximate): from scipy.special import erf + if approximate: - y_ref = 0.5 * x * ( - 1.0 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3)))) + y_ref = ( + 0.5 + * x + * ( + 1.0 + + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))) + ) + ) else: y_ref = 0.5 * x * (1 + erf(x / np.sqrt(2))) return y_ref.astype(x.dtype) class XPUTestHardSwishOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'hard_swish' self.use_dynamic_create_class = False class XPUTestHardSwish(TestActivationOPBase): - def set_case(self): self.op_type = "hard_swish" self.dtype = self.in_type @@ -304,13 +295,11 @@ def hard_swish(x, offset, threshold, scale): class XPUTestLogOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'log' self.use_dynamic_create_class = False class XPUTestLog(TestActivationOPBase): - def set_case(self): self.op_type = "log" self.dtype = self.in_type @@ -322,22 +311,18 @@ class XPUTestLogOP(XPUOpTestWrapper): self.outputs = {'Out': out} class TestLogCase1(XPUTestLog): - def set_shape(self): self.shape = [1, 11, 17] class TestLogCase2(XPUTestLog): - def set_shape(self): self.shape = [2, 2, 2] class TestLogCase3(XPUTestLog): - def set_shape(self): self.shape = [2] class TestLogCase4(XPUTestLog): - def set_shape(self): self.shape = [1, 2, 3, 4] @@ -348,13 +333,11 @@ for stype in support_types: class XPUTestSquareOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'square' self.use_dynamic_create_class = False class XPUTestSquare(TestActivationOPBase): - def set_case(self): self.op_type = "square" self.dtype = self.in_type @@ -369,22 +352,18 @@ class XPUTestSquareOP(XPUOpTestWrapper): self.x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) class XPUTestSquare2(XPUTestSquare): - def init_config(self): self.x = np.random.uniform(-2, 2, [100]).astype(self.dtype) class XPUTestSquare3(XPUTestSquare): - def init_config(self): self.x = np.random.uniform(-2, 2, [1, 15, 19]).astype(self.dtype) class XPUTestSquare4(XPUTestSquare): - def init_config(self): self.x = np.random.uniform(-2, 2, [100, 10]).astype(self.dtype) class XPUTestSquare5(XPUTestSquare): - def init_config(self): self.x = np.random.uniform(-2, 2, [1, 2, 5, 17]).astype(self.dtype) @@ -395,13 +374,11 @@ for stype in support_types: class XPUTestPowOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'pow' self.use_dynamic_create_class = False class XPUTestPowBase(TestActivationOPBase): - def set_case(self): self.op_type = "pow" self.dtype = self.in_type @@ -418,40 +395,37 @@ class XPUTestPowOP(XPUOpTestWrapper): self.factor = 3.0 class XPUTestPow1(XPUTestPowBase): - def init_config(self): self.x = np.random.uniform(-1, 1, [1024, 8]).astype(self.dtype) self.factor = 1 class XPUTestPow2(XPUTestPowBase): - def init_config(self): self.x = np.random.uniform(-1, 1, [1024, 8]).astype(self.dtype) self.factor = 2 class XPUTestPow3(XPUTestPowBase): - def init_config(self): - self.x = np.random.uniform(-2, 2, - [4, 512, 15, 15]).astype(self.dtype) + self.x = np.random.uniform(-2, 2, [4, 512, 15, 15]).astype( + self.dtype + ) self.factor = 3 class XPUTestPow4(XPUTestPowBase): - def init_config(self): - self.x = np.random.uniform(-2, 2, - [4, 256, 22, 22]).astype(self.dtype) + self.x = np.random.uniform(-2, 2, [4, 256, 22, 22]).astype( + self.dtype + ) self.factor = 4 class XPUTestPow5(XPUTestPowBase): - def init_config(self): - self.x = np.random.uniform(0, 1, - [4, 256, 22, 22]).astype(self.dtype) + self.x = np.random.uniform(0, 1, [4, 256, 22, 22]).astype( + self.dtype + ) self.factor = 1.2 class XPUTestPow6(XPUTestPowBase): - def init_config(self): self.x = np.random.uniform(0, 1, [1024, 8]).astype(self.dtype) self.factor = 3.2 @@ -463,13 +437,11 @@ for stype in support_types: class XPUTestLeakyReluOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'leaky_relu' self.use_dynamic_create_class = False class XPUTestLeakyRelu(TestActivationOPBase): - def set_case(self): self.op_type = "leaky_relu" self.dtype = self.in_type @@ -492,7 +464,7 @@ for stype in support_types: def leaky_relu(x, alpha): - if (alpha < 1): + if alpha < 1: y_ref = np.maximum(x, alpha * x) else: y_ref = np.minimum(x, alpha * x) @@ -500,13 +472,11 @@ def leaky_relu(x, alpha): class XPUTestReciprocalOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'reciprocal' self.use_dynamic_create_class = False class XPUTestRecipocal(TestActivationOPBase): - def set_case(self): self.op_type = "reciprocal" self.dtype = self.in_type @@ -526,13 +496,11 @@ for stype in support_types: class XPUTestSoftPlusOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'softplus' self.use_dynamic_create_class = False class XPUTestSoftPlusBase(TestActivationOPBase): - def set_case(self): self.op_type = "softplus" self.dtype = self.in_type @@ -550,21 +518,20 @@ class XPUTestSoftPlusOP(XPUOpTestWrapper): self.x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) class XPUTestSoftPlus2(XPUTestSoftPlusBase): - def init_config(self): self.x = np.random.uniform(-2, 2, [1024, 8]).astype(self.dtype) class XPUTestSoftPlus3(XPUTestSoftPlusBase): - def init_config(self): - self.x = np.random.uniform(-2, 2, - [4, 512, 15, 15]).astype(self.dtype) + self.x = np.random.uniform(-2, 2, [4, 512, 15, 15]).astype( + self.dtype + ) class XPUTestSoftPlus4(XPUTestSoftPlusBase): - def init_config(self): - self.x = np.random.uniform(-2, 2, - [4, 256, 22, 22]).astype(self.dtype) + self.x = np.random.uniform(-2, 2, [4, 256, 22, 22]).astype( + self.dtype + ) support_types = get_xpu_op_support_types('softplus') @@ -574,20 +541,20 @@ for stype in support_types: def ref_softplus(x, beta=1, threshold=20): x_beta = beta * x - out = np.select([x_beta <= threshold, x_beta > threshold], - [np.log(1 + np.exp(x_beta)) / beta, x]) + out = np.select( + [x_beta <= threshold, x_beta > threshold], + [np.log(1 + np.exp(x_beta)) / beta, x], + ) return out # XPU_KP unittests, these ops can be found from xpu_op_kpfirst_list.h class XPUTestBReluOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'brelu' self.use_dynamic_create_class = False class XPUTestBRelu(TestActivationOPBase): - def set_case(self): self.op_type = "brelu" self.dtype = self.in_type @@ -614,13 +581,11 @@ for stype in support_types: class XPUTestCeilOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'ceil' self.use_dynamic_create_class = False class XPUTestCeil(TestActivationOPBase): - def set_case(self): self.op_type = "ceil" self.dtype = self.in_type @@ -640,13 +605,11 @@ for stype in support_types: class XPUTestCeluOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'celu' self.use_dynamic_create_class = False class XPUTestCelu(TestActivationOPBase): - def set_case(self): self.op_type = "celu" self.dtype = self.in_type @@ -671,18 +634,16 @@ def ref_celu(x, alpha): class XPUTestEluOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'elu' self.use_dynamic_create_class = False class XPUTestElu(TestActivationOPBase): - def set_case(self): self.op_type = "elu" self.dtype = self.in_type - alpha = 1. + alpha = 1.0 x = np.random.uniform(-3, 3, [10, 12]).astype(self.dtype) out = ref_elu(x, alpha) @@ -702,13 +663,11 @@ def ref_elu(x, alpha): class XPUTestFloorOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'floor' self.use_dynamic_create_class = False class XPUTestFloor(TestActivationOPBase): - def set_case(self): self.op_type = "floor" self.dtype = self.in_type @@ -728,13 +687,11 @@ for stype in support_types: class XPUTestHardShrinkOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'hard_shrink' self.use_dynamic_create_class = False class XPUTestHardShrink(TestActivationOPBase): - def set_case(self): self.op_type = "hard_shrink" self.dtype = self.in_type @@ -762,13 +719,11 @@ def ref_hardshrink(x, threshold): class XPUTestHardSigmoidOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'hard_sigmoid' self.use_dynamic_create_class = False class XPUTestHardSigmoid(TestActivationOPBase): - def set_case(self): self.op_type = "hard_sigmoid" self.dtype = self.in_type @@ -777,7 +732,7 @@ class XPUTestHardSigmoidOP(XPUOpTestWrapper): x = np.random.uniform(-5, 5, [10, 12]).astype(self.dtype) lower_threshold = -self.offset / self.slope - upper_threshold = (1. - self.offset) / self.slope + upper_threshold = (1.0 - self.offset) / self.slope # Same reason as TestAbs delta = 0.005 @@ -789,7 +744,7 @@ class XPUTestHardSigmoidOP(XPUOpTestWrapper): self.attrs = { 'use_xpu': True, 'slope': self.slope, - 'offset': self.offset + 'offset': self.offset, } self.inputs = {'X': x} self.outputs = {'Out': out} @@ -801,17 +756,15 @@ for stype in support_types: def ref_hardsigmoid(x, slope=0.166666666666667, offset=0.5): - return np.maximum(np.minimum(x * slope + offset, 1.), 0.).astype(x.dtype) + return np.maximum(np.minimum(x * slope + offset, 1.0), 0.0).astype(x.dtype) class XPUTestLog1pOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'log1p' self.use_dynamic_create_class = False class XPUTestLog1p(TestActivationOPBase): - def set_case(self): self.op_type = "log1p" self.dtype = self.in_type @@ -831,13 +784,11 @@ for stype in support_types: class XPUTestLogsigmoidOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'logsigmoid' self.use_dynamic_create_class = False class XPUTestLogsigmoid(TestActivationOPBase): - def set_case(self): self.op_type = "logsigmoid" self.dtype = self.in_type @@ -857,13 +808,11 @@ for stype in support_types: class XPUTestRelu6OP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'relu6' self.use_dynamic_create_class = False class XPUTestRelu6(TestActivationOPBase): - def set_case(self): self.op_type = "relu6" self.dtype = self.in_type @@ -891,13 +840,11 @@ def ref_relu6(x, threshold=6.0): class XPUTestSiluOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'silu' self.use_dynamic_create_class = False class XPUTestSilu(TestActivationOPBase): - def set_case(self): self.op_type = "silu" self.dtype = self.in_type @@ -917,13 +864,11 @@ for stype in support_types: class XPUTestSoftReluOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'soft_relu' self.use_dynamic_create_class = False class XPUTestSoftRelu(TestActivationOPBase): - def set_case(self): self.op_type = "soft_relu" self.dtype = self.in_type @@ -950,13 +895,11 @@ for stype in support_types: class XPUTestSoftSignOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'softsign' self.use_dynamic_create_class = False class XPUTestSoftSign(TestActivationOPBase): - def set_case(self): self.op_type = "softsign" self.dtype = self.in_type @@ -981,13 +924,11 @@ def ref_softsign(x): class XPUTestSoftshrinkOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'softshrink' self.use_dynamic_create_class = False class XPUTestSoftshrink(TestActivationOPBase): - def set_case(self): self.op_type = "softshrink" self.dtype = self.in_type @@ -1010,18 +951,17 @@ for stype in support_types: def ref_softshrink(x, threshold=0.5): out = np.copy(x) out = (out < -threshold) * (out + threshold) + (out > threshold) * ( - out - threshold) + out - threshold + ) return out class XPUTestSwishOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'swish' self.use_dynamic_create_class = False class XPUTestSwishBase(TestActivationOPBase): - def set_case(self): self.op_type = "swish" self.dtype = self.in_type @@ -1037,21 +977,20 @@ class XPUTestSwishOP(XPUOpTestWrapper): self.x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) class XPUTestSwish2(XPUTestSwishBase): - def init_config(self): self.x = np.random.uniform(-2, 2, [1024, 8]).astype(self.dtype) class XPUTestSwish3(XPUTestSwishBase): - def init_config(self): - self.x = np.random.uniform(-2, 2, - [4, 512, 15, 15]).astype(self.dtype) + self.x = np.random.uniform(-2, 2, [4, 512, 15, 15]).astype( + self.dtype + ) class XPUTestSwish4(XPUTestSwishBase): - def init_config(self): - self.x = np.random.uniform(-2, 2, - [4, 256, 22, 22]).astype(self.dtype) + self.x = np.random.uniform(-2, 2, [4, 256, 22, 22]).astype( + self.dtype + ) support_types = get_xpu_op_support_types('swish') @@ -1061,18 +1000,17 @@ for stype in support_types: def ref_swish(x): from scipy.special import expit + out = x * expit(x) return out class XPUTestThresholdedReluOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'thresholded_relu' self.use_dynamic_create_class = False class XPUTestThresholdedRelu(TestActivationOPBase): - def set_case(self): self.op_type = "thresholded_relu" self.dtype = self.in_type @@ -1099,13 +1037,11 @@ def ref_thresholded_relu(x, threshold=1.0): class XPUTestMishOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'mish' self.use_dynamic_create_class = False class XPUTestMishBase(TestActivationOPBase): - def set_case(self): self.op_type = "mish" self.dtype = self.in_type @@ -1122,21 +1058,20 @@ class XPUTestMishOP(XPUOpTestWrapper): self.x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) class XPUTestMish2(XPUTestMishBase): - def init_config(self): self.x = np.random.uniform(-2, 2, [1024, 8]).astype(self.dtype) class XPUTestMish3(XPUTestMishBase): - def init_config(self): - self.x = np.random.uniform(-2, 2, - [4, 512, 15, 15]).astype(self.dtype) + self.x = np.random.uniform(-2, 2, [4, 512, 15, 15]).astype( + self.dtype + ) class XPUTestMish4(XPUTestMishBase): - def init_config(self): - self.x = np.random.uniform(-2, 2, - [4, 256, 22, 22]).astype(self.dtype) + self.x = np.random.uniform(-2, 2, [4, 256, 22, 22]).astype( + self.dtype + ) support_types = get_xpu_op_support_types('mish') diff --git a/python/paddle/fluid/tests/unittests/xpu/test_adam_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_adam_op_xpu.py index 332fc3fd489c4131b8cf5ed16a5f74432f9cbcd7..fb42d564577467a10f04080145b594d3e4666317 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_adam_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_adam_op_xpu.py @@ -22,18 +22,20 @@ from paddle.fluid.op import Operator import paddle from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) class XPUTestAdamOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'adam' self.use_dynamic_create_class = False class TestAdamOp(XPUOpTest): - '''Test Adam Op with supplied attributes - ''' + '''Test Adam Op with supplied attributes''' def setUp(self): self.init_dtype() @@ -45,20 +47,18 @@ class XPUTestAdamOp(XPUOpTestWrapper): self.set_shape() self.set_inputs() self.set_steps() - param_out, moment1_out, \ - moment2_out = adam_step(self.inputs, self.attrs) + param_out, moment1_out, moment2_out = adam_step( + self.inputs, self.attrs + ) self.outputs = { - 'Moment1Out': - moment1_out, - 'Moment2Out': - moment2_out, - 'ParamOut': - param_out, - 'Beta1PowOut': - np.array([self.beta1_pow]).astype("float32") * self.beta1, - 'Beta2PowOut': - np.array([self.beta2_pow]).astype("float32") * self.beta2 + 'Moment1Out': moment1_out, + 'Moment2Out': moment2_out, + 'ParamOut': param_out, + 'Beta1PowOut': np.array([self.beta1_pow]).astype("float32") + * self.beta1, + 'Beta2PowOut': np.array([self.beta2_pow]).astype("float32") + * self.beta2, } def set_xpu(self): @@ -73,7 +73,7 @@ class XPUTestAdamOp(XPUOpTestWrapper): self.attrs = { 'epsilon': self.epsilon, 'beta1': self.beta1, - 'beta2': self.beta2 + 'beta2': self.beta2, } def set_data(self): @@ -103,18 +103,18 @@ class XPUTestAdamOp(XPUOpTestWrapper): 'Grad': grad, 'Moment1': moment1, 'Moment2': moment2, - 'LearningRate': - np.array([self.learning_rate]).astype("float32"), + 'LearningRate': np.array([self.learning_rate]).astype( + "float32" + ), 'Beta1Pow': np.array([self.beta1_pow]).astype("float32"), - 'Beta2Pow': np.array([self.beta2_pow]).astype("float32") + 'Beta2Pow': np.array([self.beta2_pow]).astype("float32"), } def test_check_output(self): self.check_output_with_place(place=paddle.XPUPlace(0), atol=1e-2) class TestAdamOp2(TestAdamOp): - '''Test Adam Op with supplied attributes - ''' + '''Test Adam Op with supplied attributes''' def set_data(self): self.beta1 = 0.9 @@ -123,29 +123,25 @@ class XPUTestAdamOp(XPUOpTestWrapper): self.epsilon = 1e-8 class TestAdamOp3(TestAdamOp2): - '''Test Adam Op with supplied attributes - ''' + '''Test Adam Op with supplied attributes''' def set_shape(self): self.shape = (101, 47) class TestAdamOp4(TestAdamOp2): - '''Test Adam Op with supplied attributes - ''' + '''Test Adam Op with supplied attributes''' def set_shape(self): self.shape = (512, 26) class TestAdamOp5(TestAdamOp2): - '''Test Adam Op with supplied attributes - ''' + '''Test Adam Op with supplied attributes''' def set_shape(self): self.shape = (11, 1) class TestAdamOp6(TestAdamOp2): - '''Test Adam Op with beta as Variable - ''' + '''Test Adam Op with beta as Variable''' def set_shape(self): self.shape = (10, 10) @@ -157,8 +153,7 @@ class XPUTestAdamOp(XPUOpTestWrapper): self.epsilon = 1e-8 class TestAdamOp7(TestAdamOp): - '''Test Adam Op with float16 accuracy - ''' + '''Test Adam Op with float16 accuracy''' def setUp(self): self.init_dtype() @@ -170,20 +165,18 @@ class XPUTestAdamOp(XPUOpTestWrapper): self.set_shape() self.set_inputs() self.set_steps() - param_out, moment1_out, \ - moment2_out = adam_step(self.inputs, self.attrs) + param_out, moment1_out, moment2_out = adam_step( + self.inputs, self.attrs + ) self.outputs = { - 'Moment1Out': - moment1_out, - 'Moment2Out': - moment2_out, - 'ParamOut': - param_out, - 'Beta1PowOut': - np.array([self.beta1_pow]).astype("float16") * self.beta1, - 'Beta2PowOut': - np.array([self.beta2_pow]).astype("float16") * self.beta2 + 'Moment1Out': moment1_out, + 'Moment2Out': moment2_out, + 'ParamOut': param_out, + 'Beta1PowOut': np.array([self.beta1_pow]).astype("float16") + * self.beta1, + 'Beta2PowOut': np.array([self.beta2_pow]).astype("float16") + * self.beta2, } def set_inputs(self): @@ -201,23 +194,24 @@ class XPUTestAdamOp(XPUOpTestWrapper): 'Grad': grad, 'Moment1': moment1, 'Moment2': moment2, - 'LearningRate': - np.array([self.learning_rate]).astype("float16"), + 'LearningRate': np.array([self.learning_rate]).astype( + "float16" + ), 'Beta1Pow': np.array([self.beta1_pow]).astype("float16"), - 'Beta2Pow': np.array([self.beta2_pow]).astype("float16") + 'Beta2Pow': np.array([self.beta2_pow]).astype("float16"), } class TestAdamOpMultipleSteps(TestAdamOp2): - '''Test Adam Operator with supplied attributes - ''' + '''Test Adam Operator with supplied attributes''' def set_steps(self): self.num_steps = 10 def test_check_output(self): for _ in range(self.num_steps): - param_out, moment1_out, \ - moment2_out = adam_step(self.inputs, self.attrs) + param_out, moment1_out, moment2_out = adam_step( + self.inputs, self.attrs + ) beta1_pow_out = self.inputs['Beta1Pow'] * self.beta1 beta2_pow_out = self.inputs['Beta2Pow'] * self.beta2 @@ -226,12 +220,13 @@ class XPUTestAdamOp(XPUOpTestWrapper): 'Moment2Out': moment2_out, 'ParamOut': param_out, 'Beta1PowOut': beta1_pow_out, - 'Beta2PowOut': beta2_pow_out + 'Beta2PowOut': beta2_pow_out, } # Verify output for this step - self.check_output_with_place(place=paddle.XPUPlace(0), - atol=1e-2) + self.check_output_with_place( + place=paddle.XPUPlace(0), atol=1e-2 + ) # Output of this step becomes input for next step self.inputs['Param'] = param_out @@ -244,7 +239,8 @@ class XPUTestAdamOp(XPUOpTestWrapper): # Randomize gradient for next step self.inputs['Grad'] = np.random.uniform( - -1, 1, (102, 105)).astype("float32") + -1, 1, (102, 105) + ).astype("float32") def adam_step(inputs, attributes): @@ -281,8 +277,9 @@ def adam_step(inputs, attributes): return param_out, moment1_out, moment2_out -def adam_step_sparse(inputs, attributes, height, rows, row_numel, np_grad, - lazy_mode): +def adam_step_sparse( + inputs, attributes, height, rows, row_numel, np_grad, lazy_mode +): ''' Simulate one step of the adam optimizer :param inputs: dict of inputs @@ -307,13 +304,16 @@ def adam_step_sparse(inputs, attributes, height, rows, row_numel, np_grad, param_out = np.zeros(shape=[height, row_numel]) def update_row(row_id, update_value): - moment1_out[row_id] = beta1 * moment1[row_id] + (1 - - beta1) * update_value - moment2_out[row_id] = beta2 * moment2[row_id] + ( - 1 - beta2) * np.square(update_value) + moment1_out[row_id] = ( + beta1 * moment1[row_id] + (1 - beta1) * update_value + ) + moment2_out[row_id] = beta2 * moment2[row_id] + (1 - beta2) * np.square( + update_value + ) lr_t = lr * np.sqrt(1 - beta2_pow) / (1 - beta1_pow) param_out[row_id] = param[row_id] - lr_t * ( - moment1_out[row_id] / (np.sqrt(moment2_out[row_id]) + epsilon)) + moment1_out[row_id] / (np.sqrt(moment2_out[row_id]) + epsilon) + ) if lazy_mode: for idx, row_id in enumerate(rows): @@ -329,7 +329,6 @@ def adam_step_sparse(inputs, attributes, height, rows, row_numel, np_grad, class TestSparseAdamOp(unittest.TestCase): - def setup(self, scope, place, lazy_mode): beta1 = 0.78 beta2 = 0.836 @@ -348,14 +347,14 @@ class TestSparseAdamOp(unittest.TestCase): "Moment2": np.full((height, row_numel), 5.0).astype("float32"), 'Beta1Pow': beta1_pow, 'Beta2Pow': beta2_pow, - "LearningRate": np.full((1), 2.0).astype("float32") + "LearningRate": np.full((1), 2.0).astype("float32"), } self.init_output = np.full((height, row_numel), 0.0).astype("float32") self.attrs = { 'epsilon': epsilon, 'beta1': beta1, 'beta2': beta2, - 'min_row_size_to_use_multithread': 2 + 'min_row_size_to_use_multithread': 2, } grad_selected_rows = scope.var('Grad').get_selected_rows() @@ -370,15 +369,21 @@ class TestSparseAdamOp(unittest.TestCase): self.sparse_inputs = ["Grad"] - param_out, mom1, mom2 = adam_step_sparse(self.dense_inputs, self.attrs, - height, rows, row_numel, - np_array, lazy_mode) + param_out, mom1, mom2 = adam_step_sparse( + self.dense_inputs, + self.attrs, + height, + rows, + row_numel, + np_array, + lazy_mode, + ) self.outputs = { "ParamOut": param_out, "Moment1Out": mom1, "Moment2Out": mom2, 'Beta1PowOut': beta1_pow * beta1, - 'Beta2PowOut': beta2_pow * beta2 + 'Beta2PowOut': beta2_pow * beta2, } def check_with_place(self, place, lazy_mode): @@ -421,7 +426,6 @@ class TestSparseAdamOp(unittest.TestCase): class TestSparseAdamOp1(TestSparseAdamOp): - def setup(self, scope, place, lazy_mode): beta1 = 0.78 beta2 = 0.836 @@ -440,14 +444,14 @@ class TestSparseAdamOp1(TestSparseAdamOp): "Moment2": np.full((height, row_numel), 5.0).astype("float16"), 'Beta1Pow': beta1_pow, 'Beta2Pow': beta2_pow, - "LearningRate": np.full((1), 2.0).astype("float16") + "LearningRate": np.full((1), 2.0).astype("float16"), } self.init_output = np.full((height, row_numel), 0.0).astype("float16") self.attrs = { 'epsilon': epsilon, 'beta1': beta1, 'beta2': beta2, - 'min_row_size_to_use_multithread': 2 + 'min_row_size_to_use_multithread': 2, } grad_selected_rows = scope.var('Grad').get_selected_rows() @@ -462,15 +466,21 @@ class TestSparseAdamOp1(TestSparseAdamOp): self.sparse_inputs = ["Grad"] - param_out, mom1, mom2 = adam_step_sparse(self.dense_inputs, self.attrs, - height, rows, row_numel, - np_array, lazy_mode) + param_out, mom1, mom2 = adam_step_sparse( + self.dense_inputs, + self.attrs, + height, + rows, + row_numel, + np_array, + lazy_mode, + ) self.outputs = { "ParamOut": param_out, "Moment1Out": mom1, "Moment2Out": mom2, 'Beta1PowOut': beta1_pow * beta1, - 'Beta2PowOut': beta2_pow * beta2 + 'Beta2PowOut': beta2_pow * beta2, } diff --git a/python/paddle/fluid/tests/unittests/xpu/test_adamw_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_adamw_op_xpu.py index bc4c9095e8fa5bd71a3331032b4a636beda7b49a..7280258272b40b815daceae95ba011bb92177e99 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_adamw_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_adamw_op_xpu.py @@ -23,7 +23,11 @@ import paddle.fluid as fluid from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) def adamw_step(inputs, attributes): @@ -70,19 +74,17 @@ def simple_lr_setting(param, decay_rate, n_layers): else: depth = 0 - return decay_rate**(n_layers + 2 - depth) + return decay_rate ** (n_layers + 2 - depth) class XPUTestAdamwOp1(XPUOpTestWrapper): - def __init__(self): self.op_name = 'adamw' self.use_dynamic_create_class = False class TestAdamW(XPUOpTest): - def setUp(self): - #Test AdamW Op with supplied attributes + # Test AdamW Op with supplied attributes self.op_type = "adamw" self.init_shape() self.dtype = self.in_type_str @@ -106,7 +108,7 @@ class XPUTestAdamwOp1(XPUOpTestWrapper): 'Moment2': moment2, 'LearningRate': np.array([learning_rate]).astype(self.dtype), 'Beta1Pow': np.array([beta1_pow]).astype(self.dtype), - 'Beta2Pow': np.array([beta2_pow]).astype(self.dtype) + 'Beta2Pow': np.array([beta2_pow]).astype(self.dtype), } self.attrs = { @@ -114,18 +116,19 @@ class XPUTestAdamwOp1(XPUOpTestWrapper): 'beta1': beta1, 'beta2': beta2, "coeff": 0.5, - "with_decay": True + "with_decay": True, } - param_out, moment1_out, \ - moment2_out = adamw_step(self.inputs, self.attrs) + param_out, moment1_out, moment2_out = adamw_step( + self.inputs, self.attrs + ) self.outputs = { 'Moment1Out': moment1_out, 'Moment2Out': moment2_out, 'ParamOut': param_out, 'Beta1PowOut': np.array([beta1_pow]).astype(self.dtype) * beta1, - 'Beta2PowOut': np.array([beta2_pow]).astype(self.dtype) * beta2 + 'Beta2PowOut': np.array([beta2_pow]).astype(self.dtype) * beta2, } def init_shape(self): @@ -136,26 +139,22 @@ class XPUTestAdamwOp1(XPUOpTestWrapper): self.check_output_with_place(place=paddle.XPUPlace(0)) class TestAdamW2(TestAdamW): - def init_shape(self): self.shape = [ 1000, ] class TestAdamW3(TestAdamW): - def init_shape(self): self.shape = [200, 3000] class XPUTestAdamwOp2(XPUOpTestWrapper): - def __init__(self): self.op_name = 'adamw' self.use_dynamic_create_class = False class TestAdamWOp(unittest.TestCase): - def test_adamw_op_dygraph(self): paddle.disable_static() value = np.arange(26).reshape(2, 13).astype(self.in_type_str) @@ -165,7 +164,8 @@ class XPUTestAdamwOp2(XPUOpTestWrapper): learning_rate=0.01, parameters=linear.parameters(), apply_decay_param_fun=lambda name: True, - weight_decay=0.01) + weight_decay=0.01, + ) for _ in range(2): out = linear(a) @@ -182,8 +182,9 @@ class XPUTestAdamwOp2(XPUOpTestWrapper): learning_rate=0.0, parameters=linear.parameters(), apply_decay_param_fun=lambda name: True, - weight_decay=0.01) - assert (adam.__str__() is not None) + weight_decay=0.01, + ) + assert adam.__str__() is not None def test_adamw_op(self): paddle.enable_static() @@ -202,25 +203,29 @@ class XPUTestAdamwOp2(XPUOpTestWrapper): shape=[1], value=0.85, dtype=self.in_type_str, - persistable=True) + persistable=True, + ) beta2 = fluid.layers.create_global_var( shape=[1], value=0.95, dtype=self.in_type_str, - persistable=True) + persistable=True, + ) betas = [beta1, beta2] - opt = paddle.optimizer.AdamW(learning_rate=1e-5, - beta1=beta1, - beta2=beta2, - weight_decay=0.01, - epsilon=1e-8) + opt = paddle.optimizer.AdamW( + learning_rate=1e-5, + beta1=beta1, + beta2=beta2, + weight_decay=0.01, + epsilon=1e-8, + ) opt.minimize(loss) exe.run(startup) data_np = np.random.random(shape).astype(self.in_type_str) - rets = exe.run(train_prog, - feed={"data": data_np}, - fetch_list=[loss]) + rets = exe.run( + train_prog, feed={"data": data_np}, fetch_list=[loss] + ) assert rets[0] is not None paddle.disable_static() @@ -228,20 +233,19 @@ class XPUTestAdamwOp2(XPUOpTestWrapper): paddle.disable_static() linear = paddle.nn.Linear(10, 10) with self.assertRaises(ValueError): - adam = paddle.optimizer.AdamW(0.1, - beta1=-1, - parameters=linear.parameters()) + adam = paddle.optimizer.AdamW( + 0.1, beta1=-1, parameters=linear.parameters() + ) with self.assertRaises(ValueError): - adam = paddle.optimizer.AdamW(0.1, - beta2=-1, - parameters=linear.parameters()) + adam = paddle.optimizer.AdamW( + 0.1, beta2=-1, parameters=linear.parameters() + ) with self.assertRaises(ValueError): - adam = paddle.optimizer.AdamW(0.1, - epsilon=-1, - parameters=linear.parameters()) + adam = paddle.optimizer.AdamW( + 0.1, epsilon=-1, parameters=linear.parameters() + ) class TestAdamWOpGroup(TestAdamWOp): - def test_adamw_op_dygraph(self): paddle.disable_static() value = np.arange(26).reshape(2, 13).astype(self.in_type_str) @@ -250,14 +254,13 @@ class XPUTestAdamwOp2(XPUOpTestWrapper): linear_2 = paddle.nn.Linear(5, 3) adam = paddle.optimizer.AdamW( learning_rate=0.01, - parameters=[{ - 'params': linear_1.parameters() - }, { - 'params': linear_2.parameters(), - 'weight_decay': 0.001 - }], + parameters=[ + {'params': linear_1.parameters()}, + {'params': linear_2.parameters(), 'weight_decay': 0.001}, + ], apply_decay_param_fun=lambda name: True, - weight_decay=0.01) + weight_decay=0.01, + ) for _ in range(2): out = linear_1(a) @@ -267,7 +270,6 @@ class XPUTestAdamwOp2(XPUOpTestWrapper): adam.clear_gradients() class TestAdamWOpGroupWithLR(TestAdamWOp): - def test_adamw_op_dygraph(self): paddle.disable_static() value = np.arange(26).reshape(2, 13).astype(self.in_type_str) @@ -276,16 +278,21 @@ class XPUTestAdamwOp2(XPUOpTestWrapper): linear_2 = paddle.nn.Linear(5, 3) adam = paddle.optimizer.AdamW( learning_rate=paddle.optimizer.lr.PiecewiseDecay( - boundaries=[3, 6], values=[0.1, 0.2, 0.3]), - parameters=[{ - 'params': linear_1.parameters(), - 'learning_rate': 0.1, - }, { - 'params': linear_2.parameters(), - 'weight_decay': 0.001, - }], + boundaries=[3, 6], values=[0.1, 0.2, 0.3] + ), + parameters=[ + { + 'params': linear_1.parameters(), + 'learning_rate': 0.1, + }, + { + 'params': linear_2.parameters(), + 'weight_decay': 0.001, + }, + ], apply_decay_param_fun=lambda name: True, - weight_decay=0.01) + weight_decay=0.01, + ) for _ in range(2): out = linear_1(a) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_affine_channel_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_affine_channel_op_xpu.py index 447f18bd3c2af4f3c6b5ae5184c9c07b862317e0..0718c040bcacacbe34e2164efd98c9e0e5087709 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_affine_channel_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_affine_channel_op_xpu.py @@ -39,7 +39,6 @@ def affine_channel(x, scale, bias, layout): class TestAffineChannelOp(XPUOpTest): - def setUp(self): self.op_type = "affine_channel" self.init_test_case() @@ -70,17 +69,17 @@ class TestAffineChannelOp(XPUOpTest): if core.is_compiled_with_xpu(): paddle.enable_static() place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ['Scale', 'Bias'], - 'Out', - no_grad_set=set('X')) + self.check_grad_with_place( + place, ['Scale', 'Bias'], 'Out', no_grad_set=set('X') + ) def test_check_grad_stopgrad_dscale_dbias(self): if core.is_compiled_with_xpu(): paddle.enable_static() place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ['X'], - 'Out', - no_grad_set=set(['Scale', 'Bias'])) + self.check_grad_with_place( + place, ['X'], 'Out', no_grad_set=set(['Scale', 'Bias']) + ) def init_test_case(self): self.shape = [2, 100, 3, 3] @@ -89,7 +88,6 @@ class TestAffineChannelOp(XPUOpTest): class TestAffineChannelOpError(unittest.TestCase): - def test_errors(self): with fluid.program_guard(fluid.Program()): @@ -100,32 +98,31 @@ class TestAffineChannelOpError(unittest.TestCase): self.assertRaises(TypeError, test_x_type) def test_x_dtype(): - x2 = fluid.layers.data(name='x2', - shape=[None, 1, 2, 2], - dtype='int32') + x2 = fluid.layers.data( + name='x2', shape=[None, 1, 2, 2], dtype='int32' + ) fluid.layers.affine_channel(x2) self.assertRaises(TypeError, test_x_dtype) def test_scale_type(): - x3 = fluid.layers.data(name='x3', - shape=[None, 1, 2, 2], - dtype='float32') + x3 = fluid.layers.data( + name='x3', shape=[None, 1, 2, 2], dtype='float32' + ) fluid.layers.affine_channel(x3, scale=1) self.assertRaises(TypeError, test_scale_type) def test_bias_type(): - x4 = fluid.layers.data(name='x4', - shape=[None, 1, 2, 2], - dtype='float32') + x4 = fluid.layers.data( + name='x4', shape=[None, 1, 2, 2], dtype='float32' + ) fluid.layers.affine_channel(x4, bias=1) self.assertRaises(TypeError, test_bias_type) class TestAffineChannelNHWC(TestAffineChannelOp): - def init_test_case(self): self.shape = [2, 3, 3, 100] self.C = 100 @@ -139,7 +136,6 @@ class TestAffineChannelNHWC(TestAffineChannelOp): class TestAffineChannel2D(TestAffineChannelOp): - def init_test_case(self): self.shape = [2, 100] self.C = 100 diff --git a/python/paddle/fluid/tests/unittests/xpu/test_amp_check_finite_and_scale_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_amp_check_finite_and_scale_op_xpu.py index e6bc61b895abbe8506352195979db42d9448c4fc..b6e95e256901a92800c0c1fc493a9d3022e70e98 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_amp_check_finite_and_scale_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_amp_check_finite_and_scale_op_xpu.py @@ -19,19 +19,21 @@ import paddle import unittest import numpy as np from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestCheckFiniteAndUnscaleOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'check_finite_and_unscale' self.use_dynamic_create_class = False class TestCheckFiniteAndUnscaleOpNormal(XPUOpTest): - def setUp(self): self.op_type = "check_finite_and_unscale" self.init_dtype() @@ -52,7 +54,6 @@ class XPUTestCheckFiniteAndUnscaleOp(XPUOpTestWrapper): self.check_output_with_place(place) class TestCheckFiniteAndUnscaleOpWithNan(XPUOpTest): - def setUp(self): self.op_type = "check_finite_and_unscale" self.init_dtype() @@ -80,7 +81,6 @@ class XPUTestCheckFiniteAndUnscaleOp(XPUOpTestWrapper): self.check_output_with_place(place, no_check_set=['Out']) class TestCheckFiniteAndUnscaleOpWithInf(XPUOpTest): - def setUp(self): self.op_type = "check_finite_and_unscale" self.init_dtype() @@ -108,7 +108,6 @@ class XPUTestCheckFiniteAndUnscaleOp(XPUOpTestWrapper): self.check_output_with_place(place, no_check_set=['Out']) class TestCheckFiniteAndUnscaleOpWithInfAndNan(XPUOpTest): - def setUp(self): self.op_type = "check_finite_and_unscale" self.init_dtype() diff --git a/python/paddle/fluid/tests/unittests/xpu/test_arg_max_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_arg_max_op_xpu.py index cdc16d4507267686af5882021444ce7bc02938fa..d041e859d9862be29f7c66a28f07ff059fe1566c 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_arg_max_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_arg_max_op_xpu.py @@ -20,18 +20,20 @@ sys.path.append("..") import paddle from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestArgMax(XPUOpTestWrapper): - def __init__(self): self.op_name = 'arg_max' class XPUBaseTestCase(XPUOpTest): - def initTestCase(self): self.dims = (3, 4) self.axis = 1 @@ -52,63 +54,53 @@ class XPUTestArgMax(XPUOpTestWrapper): self.check_output_with_place(place) class TestArgMaxCase1(XPUBaseTestCase): - def initTestCase(self): self.dims = (3, 4, 5) self.axis = -1 class TestArgMaxCase2(XPUBaseTestCase): - def initTestCase(self): self.dims = (3, 4, 5) self.axis = 0 class TestArgMaxCase3(XPUBaseTestCase): - def initTestCase(self): self.dims = (3, 4, 5) self.axis = 1 class TestArgMaxCase4(XPUBaseTestCase): - def initTestCase(self): self.dims = (3, 4, 5) self.axis = 2 class TestArgMaxCase5(XPUBaseTestCase): - def initTestCase(self): self.dims = (3, 4) self.axis = -1 class TestArgMaxCase6(XPUBaseTestCase): - def initTestCase(self): self.dims = (3, 4) self.axis = 0 class TestArgMaxCase7(XPUBaseTestCase): - def initTestCase(self): self.dims = (3, 4) self.axis = 1 class TestArgMaxCase8(XPUBaseTestCase): - def initTestCase(self): - self.dims = (1, ) + self.dims = (1,) self.axis = 0 class TestArgMaxCase9(XPUBaseTestCase): - def initTestCase(self): - self.dims = (2, ) + self.dims = (2,) self.axis = 0 class TestArgMaxCase10(XPUBaseTestCase): - def initTestCase(self): - self.dims = (3, ) + self.dims = (3,) self.axis = 0 @@ -118,7 +110,6 @@ for stype in support_types: class TestArgMaxAPI(unittest.TestCase): - def initTestCase(self): self.dims = (3, 4, 5) self.dtype = 'float32' @@ -130,7 +121,6 @@ class TestArgMaxAPI(unittest.TestCase): self.place = [paddle.XPUPlace(0)] def test_dygraph_api(self): - def run(place): paddle.disable_static(place) np.random.seed(2021) @@ -138,9 +128,9 @@ class TestArgMaxAPI(unittest.TestCase): tensor_input = paddle.to_tensor(numpy_input) numpy_output = np.argmax(numpy_input, axis=self.axis) paddle_output = paddle.argmax(tensor_input, axis=self.axis) - np.testing.assert_allclose(numpy_output, - paddle_output.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + numpy_output, paddle_output.numpy(), rtol=1e-05 + ) paddle.enable_static() for place in self.place: @@ -148,7 +138,6 @@ class TestArgMaxAPI(unittest.TestCase): class TestArgMaxAPI_2(unittest.TestCase): - def initTestCase(self): self.dims = (3, 4, 5) self.dtype = 'float32' @@ -161,20 +150,20 @@ class TestArgMaxAPI_2(unittest.TestCase): self.place = [paddle.XPUPlace(0)] def test_dygraph_api(self): - def run(place): paddle.disable_static(place) np.random.seed(2021) numpy_input = (np.random.random(self.dims)).astype(self.dtype) tensor_input = paddle.to_tensor(numpy_input) - numpy_output = np.argmax(numpy_input, - axis=self.axis).reshape(1, 4, 5) - paddle_output = paddle.argmax(tensor_input, - axis=self.axis, - keepdim=self.keep_dims) - np.testing.assert_allclose(numpy_output, - paddle_output.numpy(), - rtol=1e-05) + numpy_output = np.argmax(numpy_input, axis=self.axis).reshape( + 1, 4, 5 + ) + paddle_output = paddle.argmax( + tensor_input, axis=self.axis, keepdim=self.keep_dims + ) + np.testing.assert_allclose( + numpy_output, paddle_output.numpy(), rtol=1e-05 + ) self.assertEqual(numpy_output.shape, paddle_output.numpy().shape) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/xpu/test_argsort_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_argsort_op_xpu.py index 039d3c2051e49aabf5bf0a9c8f0febc80ea2da75..8ee7716447f87dcab31fef20b97279205a2cef3c 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_argsort_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_argsort_op_xpu.py @@ -20,13 +20,16 @@ sys.path.append("..") import paddle from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestArgsortOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'argsort' self.use_dynamic_create_class = True @@ -36,14 +39,14 @@ class XPUTestArgsortOp(XPUOpTestWrapper): classes = [] for descending in [True, False]: for axis in [0, 1, 2, -1, -2]: - class_name = 'XPUTestArgsortOp_axis_' + str(axis) + '_' + str( - descending) + class_name = ( + 'XPUTestArgsortOp_axis_' + str(axis) + '_' + str(descending) + ) attr_dict = {'init_axis': axis, 'init_descending': descending} classes.append([class_name, attr_dict]) return base_class, classes class TestArgsortOp(XPUOpTest): - def setUp(self): self.set_xpu() self.op_type = "argsort" @@ -51,16 +54,18 @@ class XPUTestArgsortOp(XPUOpTestWrapper): self.dtype = self.in_type self.input_shape = (2, 2, 2, 3, 3) self.axis = -1 if not hasattr(self, 'init_axis') else self.init_axis - self.descending = False if not hasattr( - self, 'init_descending') else self.init_descending + self.descending = ( + False + if not hasattr(self, 'init_descending') + else self.init_descending + ) if self.dtype == np.float32: self.x = np.random.random(self.input_shape).astype(self.dtype) else: - self.x = np.random.randint(low=-1000, - high=1000, - size=self.input_shape).astype( - self.dtype) + self.x = np.random.randint( + low=-1000, high=1000, size=self.input_shape + ).astype(self.dtype) self.inputs = {"X": self.x} self.attrs = {"axis": self.axis, "descending": self.descending} @@ -71,13 +76,15 @@ class XPUTestArgsortOp(XPUOpTestWrapper): if self.descending: self.indices = np.flip( np.argsort(self.x, kind='heapsort', axis=self.axis), - self.axis) + self.axis, + ) self.sorted_x = np.flip( - np.sort(self.x, kind='heapsort', axis=self.axis), self.axis) + np.sort(self.x, kind='heapsort', axis=self.axis), self.axis + ) else: - self.indices = np.argsort(self.x, - kind='heapsort', - axis=self.axis) + self.indices = np.argsort( + self.x, kind='heapsort', axis=self.axis + ) self.sorted_x = np.sort(self.x, kind='heapsort', axis=self.axis) def set_xpu(self): diff --git a/python/paddle/fluid/tests/unittests/xpu/test_assign_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_assign_op_xpu.py index 306ebf957c385516f3dae88da2afc1eafdafe52d..74f2a41a9eee290f1ec14954fc230e02a2ad66d2 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_assign_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_assign_op_xpu.py @@ -17,6 +17,7 @@ import sys sys.path.append("..") import unittest import paddle + ''' class TestAssignOp(op_test.OpTest): def setUp(self): diff --git a/python/paddle/fluid/tests/unittests/xpu/test_assign_value_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_assign_value_op_xpu.py index c06ef86b4fee9d1fd8874de111903491a685a1bc..abb8dff9ecb76039e3d5754e81b262f9b621afc2 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_assign_value_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_assign_value_op_xpu.py @@ -21,20 +21,22 @@ import paddle.fluid as fluid import paddle.fluid.framework as framework import paddle.fluid.layers as layers from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) import paddle paddle.enable_static() class XPUTestAssignValueOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'assign_value' self.use_dynamic_create_class = False class TestAssignValueOp(XPUOpTest): - def init(self): self.dtype = self.in_type self.place = paddle.XPUPlace(0) @@ -47,7 +49,8 @@ class XPUTestAssignValueOp(XPUOpTestWrapper): self.init_data() self.attrs["shape"] = self.value.shape self.attrs["dtype"] = framework.convert_np_dtype_to_dtype_( - self.value.dtype) + self.value.dtype + ) self.outputs = {"Out": self.value} def init_data(self): @@ -58,31 +61,29 @@ class XPUTestAssignValueOp(XPUOpTestWrapper): self.check_output_with_place(self.place) class TestAssignValueOp2(TestAssignValueOp): - def init_data(self): self.value = np.random.random(size=(2, 5)).astype(np.int32) self.attrs["int32_values"] = [int(v) for v in self.value.flat] class TestAssignValueOp3(TestAssignValueOp): - def init_data(self): self.value = np.random.random(size=(2, 5)).astype(np.int64) self.attrs["int64_values"] = [int(v) for v in self.value.flat] class TestAssignValueOp4(TestAssignValueOp): - def init_data(self): - self.value = np.random.choice(a=[False, True], - size=(2, 5)).astype(np.bool) + self.value = np.random.choice(a=[False, True], size=(2, 5)).astype( + np.bool + ) self.attrs["bool_values"] = [int(v) for v in self.value.flat] class TestAssignApi(unittest.TestCase): - def setUp(self): self.init_dtype() self.value = (-100 + 200 * np.random.random(size=(2, 5))).astype( - self.dtype) + self.dtype + ) self.place = fluid.XPUPlace(0) def init_dtype(self): @@ -101,23 +102,21 @@ class TestAssignApi(unittest.TestCase): class TestAssignApi2(TestAssignApi): - def init_dtype(self): self.dtype = "int32" class TestAssignApi3(TestAssignApi): - def init_dtype(self): self.dtype = "int64" class TestAssignApi4(TestAssignApi): - def setUp(self): self.init_dtype() - self.value = np.random.choice(a=[False, True], - size=(2, 5)).astype(np.bool) + self.value = np.random.choice(a=[False, True], size=(2, 5)).astype( + np.bool + ) self.place = fluid.XPUPlace(0) def init_dtype(self): diff --git a/python/paddle/fluid/tests/unittests/xpu/test_batch_norm_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_batch_norm_op_xpu.py index 09d813d23769dd19beb9f5f3327b53a328954efa..7d818cc02c868aefe71b3203582e8c108ca84ef6 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_batch_norm_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_batch_norm_op_xpu.py @@ -21,13 +21,18 @@ import paddle.fluid.core as core import paddle import paddle.fluid as fluid import paddle.nn.functional as F -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() -def ref_batch_norm_infer(x, scale, bias, mean, variance, momentum, epsilon, - data_layout): +def ref_batch_norm_infer( + x, scale, bias, mean, variance, momentum, epsilon, data_layout +): if data_layout == "NCHW": n, c, h, w = x.shape mean_tile = np.reshape(mean, (1, c, 1, 1)) @@ -46,12 +51,14 @@ def ref_batch_norm_infer(x, scale, bias, mean, variance, momentum, epsilon, else: raise ValueError( "Unsupported data layout! Only NCHW and NHWC is supported, but received " - + data_layout) + + data_layout + ) return y -def ref_batch_norm_train(x, y_grad, scale, bias, mean, variance, momentum, - epsilon, data_layout): +def ref_batch_norm_train( + x, y_grad, scale, bias, mean, variance, momentum, epsilon, data_layout +): # Forward if data_layout == "NCHW": n, c, h, w = x.shape @@ -65,8 +72,9 @@ def ref_batch_norm_train(x, y_grad, scale, bias, mean, variance, momentum, saved_mean_tile = np.tile(saved_mean_tile, (n, 1, h, w)) saved_variance_tile = np.reshape(saved_variance, (1, c, 1, 1)) saved_variance_tile = np.tile(saved_variance_tile, (n, 1, h, w)) - normalized_x = (x - saved_mean_tile) / np.sqrt(saved_variance_tile + - epsilon) + normalized_x = (x - saved_mean_tile) / np.sqrt( + saved_variance_tile + epsilon + ) scale_tile = np.reshape(scale, (1, c, 1, 1)) scale_tile = np.tile(scale_tile, (n, 1, h, w)) bias_tile = np.reshape(bias, (1, c, 1, 1)) @@ -84,10 +92,11 @@ def ref_batch_norm_train(x, y_grad, scale, bias, mean, variance, momentum, else: raise ValueError( "Unsupported data layout! Only NCHW and NHWC is supported, but received " - + data_layout) - mean_out = saved_mean * (1. - momentum) + momentum * mean - variance_out = saved_variance * (1. - momentum) + momentum * variance - saved_inv_std = 1. / np.sqrt(saved_variance + epsilon) + + data_layout + ) + mean_out = saved_mean * (1.0 - momentum) + momentum * mean + variance_out = saved_variance * (1.0 - momentum) + momentum * variance + saved_inv_std = 1.0 / np.sqrt(saved_variance + epsilon) # Backward # Use the following formulas to calculate gradients: # grad_scale = @@ -102,32 +111,48 @@ def ref_batch_norm_train(x, y_grad, scale, bias, mean, variance, momentum, if data_layout == "NCHW": x = np.transpose(x, (0, 2, 3, 1)) y_grad = np.transpose(y_grad, (0, 2, 3, 1)) - x_grad = scale * ( - y_grad - np.mean(y_grad, axis=(0, 1, 2)) - - (x - saved_mean) * np.mean(y_grad * (x - saved_mean), axis=(0, 1, 2)) / - (saved_variance + epsilon)) / np.sqrt(saved_variance + epsilon) - scale_grad = np.sum(y_grad * (x - saved_mean) / - np.sqrt(saved_variance + epsilon), - axis=(0, 1, 2)) + x_grad = ( + scale + * ( + y_grad + - np.mean(y_grad, axis=(0, 1, 2)) + - (x - saved_mean) + * np.mean(y_grad * (x - saved_mean), axis=(0, 1, 2)) + / (saved_variance + epsilon) + ) + / np.sqrt(saved_variance + epsilon) + ) + scale_grad = np.sum( + y_grad * (x - saved_mean) / np.sqrt(saved_variance + epsilon), + axis=(0, 1, 2), + ) bias_grad = np.sum(y_grad, axis=(0, 1, 2)) # Transfer back to N, C, H, W if data_layout == "NCHW": x_grad = np.transpose(x_grad, (0, 3, 1, 2)) x = np.transpose(x, (0, 3, 1, 2)) y_grad = np.transpose(y_grad, (0, 3, 1, 2)) - return y, mean_out, variance_out, saved_mean, saved_inv_std, x_grad, scale_grad, bias_grad + return ( + y, + mean_out, + variance_out, + saved_mean, + saved_inv_std, + x_grad, + scale_grad, + bias_grad, + ) class XPUTestBatchNormOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'batch_norm' self.use_dynamic_create_class = False - @unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") + @unittest.skipIf( + not paddle.is_compiled_with_xpu(), "core is not compiled with XPU" + ) class TestBatchNormOp(unittest.TestCase): - def setUp(self): self.op_type = "batch_norm" self.dtype = np.float32 @@ -146,13 +171,16 @@ class XPUTestBatchNormOp(XPUOpTestWrapper): else: raise ValueError( "Unsupported data layout! Only NCHW and NHWC is supported, but received " - + self.data_layout) + + self.data_layout + ) np.random.seed(1024) self.x_np = np.random.random_sample(self.shape).astype(self.dtype) - self.scale_np = np.random.random_sample([channel_size - ]).astype(self.dtype) - self.bias_np = np.random.random_sample([channel_size - ]).astype(self.dtype) + self.scale_np = np.random.random_sample([channel_size]).astype( + self.dtype + ) + self.bias_np = np.random.random_sample([channel_size]).astype( + self.dtype + ) self.mean_np = np.zeros([channel_size]).astype(self.dtype) self.variance_np = np.ones([channel_size]).astype(self.dtype) self.saved_mean_np = np.zeros([channel_size]).astype(self.dtype) @@ -173,43 +201,81 @@ class XPUTestBatchNormOp(XPUOpTestWrapper): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) - scale = paddle.fluid.data('Scale', self.scale_np.shape, - self.scale_np.dtype) - bias = paddle.fluid.data('Bias', self.bias_np.shape, - self.bias_np.dtype) - mean = paddle.fluid.data('Mean', self.mean_np.shape, - self.mean_np.dtype) - variance = paddle.fluid.data('Variance', self.variance_np.shape, - self.variance_np.dtype) - y = F.batch_norm(x, mean, variance, scale, bias, False, - self.momentum, self.epsilon, self.data_layout) + scale = paddle.fluid.data( + 'Scale', self.scale_np.shape, self.scale_np.dtype + ) + bias = paddle.fluid.data( + 'Bias', self.bias_np.shape, self.bias_np.dtype + ) + mean = paddle.fluid.data( + 'Mean', self.mean_np.shape, self.mean_np.dtype + ) + variance = paddle.fluid.data( + 'Variance', self.variance_np.shape, self.variance_np.dtype + ) + y = F.batch_norm( + x, + mean, + variance, + scale, + bias, + False, + self.momentum, + self.epsilon, + self.data_layout, + ) exe = paddle.static.Executor(self.place) - [y_np] = exe.run(feed={ - 'X': self.x_np, - 'Scale': self.scale_np, - 'Bias': self.bias_np, - 'Mean': self.mean_np, - 'Variance': self.variance_np - }, - fetch_list=[y]) - y_np_ref = ref_batch_norm_infer(self.x_np, self.scale_np, - self.bias_np, self.mean_np, - self.variance_np, self.momentum, - self.epsilon, self.data_layout) + [y_np] = exe.run( + feed={ + 'X': self.x_np, + 'Scale': self.scale_np, + 'Bias': self.bias_np, + 'Mean': self.mean_np, + 'Variance': self.variance_np, + }, + fetch_list=[y], + ) + y_np_ref = ref_batch_norm_infer( + self.x_np, + self.scale_np, + self.bias_np, + self.mean_np, + self.variance_np, + self.momentum, + self.epsilon, + self.data_layout, + ) np.testing.assert_allclose(y_np_ref, y_np, rtol=1e-05) def test_train(self): y_grad_np = np.random.random_sample(self.shape).astype(self.dtype) - y_np, mean_out_np, variance_out_np, saved_mean_np, saved_variance_np, x_grad_np, scale_grad_np, bias_grad_np = ref_batch_norm_train( - self.x_np, y_grad_np, self.scale_np, self.bias_np, self.mean_np, - self.variance_np, self.momentum, self.epsilon, self.data_layout) + ( + y_np, + mean_out_np, + variance_out_np, + saved_mean_np, + saved_variance_np, + x_grad_np, + scale_grad_np, + bias_grad_np, + ) = ref_batch_norm_train( + self.x_np, + y_grad_np, + self.scale_np, + self.bias_np, + self.mean_np, + self.variance_np, + self.momentum, + self.epsilon, + self.data_layout, + ) inputs = { 'X': self.x_np, 'Scale': self.scale_np, 'Bias': self.bias_np, 'Mean': self.mean_np, 'Variance': self.variance_np, - 'Y@GRAD': y_grad_np + 'Y@GRAD': y_grad_np, } outputs = { 'Y': y_np, @@ -219,7 +285,7 @@ class XPUTestBatchNormOp(XPUOpTestWrapper): 'SavedVariance': saved_variance_np, 'X@GRAD': x_grad_np, 'Scale@GRAD': scale_grad_np, - 'Bias@GRAD': bias_grad_np + 'Bias@GRAD': bias_grad_np, } attrs = { 'momentum': self.momentum, @@ -240,9 +306,11 @@ class XPUTestBatchNormOp(XPUOpTestWrapper): arg_name = var_name np_value = inputs[var_name] if not block.has_var(var_name): - block.create_var(name=var_name, - shape=np_value.shape, - dtype=np_value.dtype) + block.create_var( + name=var_name, + shape=np_value.shape, + dtype=np_value.dtype, + ) input_vars[arg_name] = block.var(var_name) fetch_list = [] output_vars = {} @@ -250,22 +318,27 @@ class XPUTestBatchNormOp(XPUOpTestWrapper): arg_name = var_name np_value = outputs[var_name] if not block.has_var(var_name): - block.create_var(name=var_name, - shape=np_value.shape, - dtype=np_value.dtype) + block.create_var( + name=var_name, + shape=np_value.shape, + dtype=np_value.dtype, + ) if var_name == 'Mean': arg_name = 'MeanOut' # Share memory if var_name == 'Variance': arg_name = 'VarianceOut' # Share memory output_vars[arg_name] = block.var(var_name) fetch_list.append(var_name) - batch_norm_op = block.append_op(type="batch_norm", - inputs=input_vars, - outputs=output_vars, - attrs=attrs) + batch_norm_op = block.append_op( + type="batch_norm", + inputs=input_vars, + outputs=output_vars, + attrs=attrs, + ) # Generate the backward op_desc of batch_norm grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc( - batch_norm_op.desc, set(), []) + batch_norm_op.desc, set(), [] + ) grad_op_desc = grad_op_desc_list[0] new_op_desc = block.desc.append_op() new_op_desc.copy_from(grad_op_desc) @@ -273,13 +346,11 @@ class XPUTestBatchNormOp(XPUOpTestWrapper): exe = paddle.static.Executor(self.place) outs = exe.run(program, feed=inputs, fetch_list=fetch_list) for id, name in enumerate(fetch_list): - np.testing.assert_allclose(outputs[name], - outs[id], - rtol=1e-05, - atol=1e-4) + np.testing.assert_allclose( + outputs[name], outs[id], rtol=1e-05, atol=1e-4 + ) class TestBatchNormOpUseGlobalStats(unittest.TestCase): - def setUp(self): self.places = [paddle.XPUPlace(0)] self.init_test() @@ -296,11 +367,14 @@ class XPUTestBatchNormOp(XPUOpTestWrapper): net1 = paddle.fluid.dygraph.BatchNorm( 6, param_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(1.0)), + initializer=fluid.initializer.Constant(1.0) + ), use_global_stats=self.use_global_stats, - trainable_statistics=self.trainable_statistics) + trainable_statistics=self.trainable_statistics, + ) net2 = paddle.nn.BatchNorm2D( - 6, use_global_stats=self.use_global_stats) + 6, use_global_stats=self.use_global_stats + ) net2.weight = net1.weight net2.bias = net1.bias if self.trainable_statistics == True: @@ -308,9 +382,9 @@ class XPUTestBatchNormOp(XPUOpTestWrapper): net2.training = False y1 = net1(x) y2 = net2(x) - np.testing.assert_allclose(y1.numpy(), - y2.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + y1.numpy(), y2.numpy(), rtol=1e-05 + ) class TestBatchNormOpUseGlobalStats1(TestBatchNormOpUseGlobalStats): ### test mode diff --git a/python/paddle/fluid/tests/unittests/xpu/test_bce_loss_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_bce_loss_op_xpu.py index e5fa996f16cd88dd687719f1435151820072e646..01256f1d9cc556777d9fe2a20ca34fbf01c04fba 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_bce_loss_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_bce_loss_op_xpu.py @@ -19,30 +19,33 @@ import paddle import numpy as np import unittest from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() def bce_loss(input, label): - return -1 * (label * np.log(input) + (1. - label) * np.log(1. - input)) + return -1 * (label * np.log(input) + (1.0 - label) * np.log(1.0 - input)) class XPUTestBceLossOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'bce_loss' self.use_dynamic_create_class = False class TestBceLossOp(XPUOpTest): - def setUp(self): self.op_type = "bce_loss" self.dtype = self.in_type self.place = paddle.XPUPlace(0) self.init_test_case() - input_np = np.random.uniform(0.1, 0.8, - self.shape).astype(self.dtype) + input_np = np.random.uniform(0.1, 0.8, self.shape).astype( + self.dtype + ) label_np = np.random.randint(0, 2, self.shape).astype(self.dtype) output_np = bce_loss(input_np, label_np) @@ -59,12 +62,10 @@ class XPUTestBceLossOp(XPUOpTestWrapper): self.shape = [10, 10] class TestBceLossOpCase1(TestBceLossOp): - def init_test_cast(self): self.shape = [2, 3, 4, 5] class TestBceLossOpCase2(TestBceLossOp): - def init_test_cast(self): self.shape = [2, 3, 20] diff --git a/python/paddle/fluid/tests/unittests/xpu/test_bilinear_interp_v2_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_bilinear_interp_v2_op_xpu.py index 811a6f6cb7c803b46fb8c4f66e01409dd89404c9..842018ba8f4e9840c22187a08aeb5899aefe10dd 100755 --- a/python/paddle/fluid/tests/unittests/xpu/test_bilinear_interp_v2_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_bilinear_interp_v2_op_xpu.py @@ -20,21 +20,27 @@ sys.path.append("..") import paddle from op_test_xpu import XPUOpTest import unittest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() -def bilinear_interp_np(input, - out_h, - out_w, - scale_w=0, - scale_h=0, - out_size=None, - actual_shape=None, - align_corners=True, - align_mode=0, - data_layout='NCHW'): +def bilinear_interp_np( + input, + out_h, + out_w, + scale_w=0, + scale_h=0, + out_size=None, + actual_shape=None, + align_corners=True, + align_mode=0, + data_layout='NCHW', +): """bilinear interpolation implement in shape [N, C, H, W]""" if data_layout == "NHWC": input = np.transpose(input, (0, 3, 1, 2)) # NHWC => NCHW @@ -48,7 +54,7 @@ def bilinear_interp_np(input, ratio_h = ratio_w = 0.0 if out_h > 1: - if (align_corners): + if align_corners: ratio_h = (in_h - 1.0) / (out_h - 1.0) else: if scale_h > 0: @@ -56,7 +62,7 @@ def bilinear_interp_np(input, else: ratio_h = 1.0 * in_h / out_h if out_w > 1: - if (align_corners): + if align_corners: ratio_w = (in_w - 1.0) / (out_w - 1.0) else: if scale_w > 0: @@ -67,37 +73,40 @@ def bilinear_interp_np(input, out = np.zeros((batch_size, channel, out_h, out_w)) for i in range(out_h): - if (align_mode == 0 and not align_corners): + if align_mode == 0 and not align_corners: h = int(ratio_h * (i + 0.5) - 0.5) else: h = int(ratio_h * i) h = max(0, h) hid = 1 if h < in_h - 1 else 0 - if (align_mode == 0 and not align_corners): + if align_mode == 0 and not align_corners: idx_src_h = max(ratio_h * (i + 0.5) - 0.5, 0) h1lambda = idx_src_h - h else: h1lambda = ratio_h * i - h h2lambda = 1.0 - h1lambda for j in range(out_w): - if (align_mode == 0 and not align_corners): + if align_mode == 0 and not align_corners: w = int(ratio_w * (j + 0.5) - 0.5) else: w = int(ratio_w * j) w = max(0, w) wid = 1 if w < in_w - 1 else 0 - if (align_mode == 0 and not align_corners): + if align_mode == 0 and not align_corners: idx_src_w = max(ratio_w * (j + 0.5) - 0.5, 0) w1lambda = idx_src_w - w else: w1lambda = ratio_w * j - w w2lambda = 1.0 - w1lambda - out[:, :, i, j] = h2lambda*(w2lambda*input[:, :, h, w] + - w1lambda*input[:, :, h, w+wid]) + \ - h1lambda*(w2lambda*input[:, :, h+hid, w] + - w1lambda*input[:, :, h+hid, w+wid]) + out[:, :, i, j] = h2lambda * ( + w2lambda * input[:, :, h, w] + + w1lambda * input[:, :, h, w + wid] + ) + h1lambda * ( + w2lambda * input[:, :, h + hid, w] + + w1lambda * input[:, :, h + hid, w + wid] + ) if data_layout == "NHWC": out = np.transpose(out, (0, 2, 3, 1)) # NCHW => NHWC @@ -106,13 +115,11 @@ def bilinear_interp_np(input, class XPUTestBilinearInterpV2Op(XPUOpTestWrapper): - def __init__(self): self.op_name = 'bilinear_interp_v2' self.use_dynamic_create_class = False class TestBilinearInterpOp(XPUOpTest): - def setUp(self): self.out_size = None self.actual_shape = None @@ -133,7 +140,7 @@ class XPUTestBilinearInterpV2Op(XPUOpTestWrapper): scale_w = 0 if self.scale: if isinstance(self.scale, float) or isinstance(self.scale, int): - if self.scale > 0.: + if self.scale > 0.0: scale_h = scale_w = float(self.scale) if isinstance(self.scale, list) and len(self.scale) == 1: scale_w = scale_h = self.scale[0] @@ -146,10 +153,18 @@ class XPUTestBilinearInterpV2Op(XPUOpTestWrapper): out_h = self.out_h out_w = self.out_w - output_np = bilinear_interp_np(input_np, out_h, out_w, 0, 0, - self.out_size, self.actual_shape, - self.align_corners, self.align_mode, - self.data_layout) + output_np = bilinear_interp_np( + input_np, + out_h, + out_w, + 0, + 0, + self.out_size, + self.actual_shape, + self.align_corners, + self.align_mode, + self.data_layout, + ) self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size @@ -162,11 +177,11 @@ class XPUTestBilinearInterpV2Op(XPUOpTestWrapper): 'interp_method': self.interp_method, 'align_corners': self.align_corners, 'align_mode': self.align_mode, - 'data_layout': self.data_layout + 'data_layout': self.data_layout, } if self.scale: if isinstance(self.scale, float) or isinstance(self.scale, int): - if self.scale > 0.: + if self.scale > 0.0: self.scale = [self.scale] if isinstance(self.scale, list) and len(self.scale) == 1: self.scale = [self.scale[0], self.scale[0]] @@ -184,7 +199,7 @@ class XPUTestBilinearInterpV2Op(XPUOpTestWrapper): self.input_shape = [2, 3, 5, 5] self.out_h = 2 self.out_w = 2 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([3, 3]).astype("int32") self.align_corners = True self.align_mode = 1 @@ -193,76 +208,69 @@ class XPUTestBilinearInterpV2Op(XPUOpTestWrapper): self.place = paddle.XPUPlace(0) class TestBilinearInterpCase1(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [4, 1, 7, 8] self.out_h = 1 self.out_w = 1 - self.scale = 0. + self.scale = 0.0 self.align_corners = True self.align_mode = 1 class TestBilinearInterpCase2(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [3, 3, 9, 6] self.out_h = 12 self.out_w = 12 - self.scale = 0. + self.scale = 0.0 self.align_corners = True self.align_mode = 1 class TestBilinearInterpCase3(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [1, 1, 32, 64] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.align_corners = True self.align_mode = 1 class TestBilinearInterpCase4(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [4, 1, 7, 8] self.out_h = 1 self.out_w = 1 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([2, 2]).astype("int32") self.align_corners = True self.align_mode = 1 class TestBilinearInterpCase5(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [3, 3, 9, 6] self.out_h = 12 self.out_w = 12 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([11, 11]).astype("int32") self.align_corners = True self.align_mode = 1 class TestBilinearInterpCase6(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [1, 1, 32, 64] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([65, 33]).astype("int32") self.align_corners = True self.align_mode = 1 class TestBilinearInterpCase7(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [1, 1, 32, 64] @@ -273,70 +281,62 @@ class XPUTestBilinearInterpV2Op(XPUOpTestWrapper): self.align_mode = 1 class TestBilinearInterpSame(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [2, 3, 32, 64] self.out_h = 32 self.out_w = 64 - self.scale = 0. + self.scale = 0.0 self.align_corners = True self.align_mode = 1 class TestBilinearInterpActualShape(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [3, 2, 32, 16] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([66, 40]).astype("int32") self.align_corners = True self.align_mode = 1 class TestBilinearInterpOtherMethod1(TestBilinearInterpOp): - def set_align_mode(self): self.align_corners = False self.align_mode = 1 class TestBilinearInterpWithMethod2(TestBilinearInterpOp): - def set_align_mode(self): self.align_corners = False self.align_mode = 0 class TestBilinearInterpWithMethod3(TestBilinearInterpOp): - def set_align_mode(self): self.align_corners = True self.align_mode = 0 class TestBilinearInterpScale1(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [2, 3, 5, 7] self.out_h = 60 self.out_w = 25 - self.scale = 2. + self.scale = 2.0 self.align_corners = True self.align_mode = 1 class TestBilinearInterpScale2(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [2, 3, 5, 7] self.out_h = 60 self.out_w = 25 - self.scale = 1. + self.scale = 1.0 self.align_corners = True self.align_mode = 1 class TestBilinearInterpScale3(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [2, 3, 5, 7] @@ -347,7 +347,6 @@ class XPUTestBilinearInterpV2Op(XPUOpTestWrapper): self.align_mode = 1 class TestBilinearInterpScale4(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [2, 3, 5, 7] @@ -358,7 +357,6 @@ class XPUTestBilinearInterpV2Op(XPUOpTestWrapper): self.align_mode = 1 class TestBilinearInterpZero(TestBilinearInterpOp): - def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [2, 3, 5, 7] @@ -369,7 +367,6 @@ class XPUTestBilinearInterpV2Op(XPUOpTestWrapper): self.align_mode = 0 class TestBilinearInterpOp_attr_tensor(XPUOpTest): - def setUp(self): self.out_size = None self.actual_shape = None @@ -409,8 +406,9 @@ class XPUTestBilinearInterpV2Op(XPUOpTestWrapper): elif self.out_size is not None: size_tensor = [] for index, ele in enumerate(self.out_size): - size_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + size_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs['SizeTensor'] = size_tensor self.attrs['out_h'] = self.out_h @@ -422,9 +420,16 @@ class XPUTestBilinearInterpV2Op(XPUOpTestWrapper): if isinstance(self.scale, list) and len(self.scale) == 1: self.scale = [self.scale[0], self.scale[0]] self.attrs['scale'] = self.scale - output_np = bilinear_interp_np(input_np, out_h, out_w, 0, 0, - self.out_size, self.actual_shape, - self.align_corners) + output_np = bilinear_interp_np( + input_np, + out_h, + out_w, + 0, + 0, + self.out_size, + self.actual_shape, + self.align_corners, + ) self.outputs = {'Out': output_np} def test_check_output(self): @@ -438,7 +443,7 @@ class XPUTestBilinearInterpV2Op(XPUOpTestWrapper): self.input_shape = [2, 3, 5, 5] self.out_h = 3 self.out_w = 3 - self.scale = 0. + self.scale = 0.0 self.out_size = [3, 3] self.align_corners = True @@ -446,36 +451,36 @@ class XPUTestBilinearInterpV2Op(XPUOpTestWrapper): self.place = paddle.XPUPlace(0) # out_size is a 1-D tensor - class TestBilinearInterp_attr_tensor_Case1(TestBilinearInterpOp_attr_tensor - ): - + class TestBilinearInterp_attr_tensor_Case1( + TestBilinearInterpOp_attr_tensor + ): def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [3, 3, 9, 6] self.out_h = 12 self.out_w = 12 - self.scale = 0. + self.scale = 0.0 self.out_size = [8, 12] self.align_corners = True # scale is a 1-D tensor - class TestBilinearInterp_attr_tensor_Case2(TestBilinearInterpOp_attr_tensor - ): - + class TestBilinearInterp_attr_tensor_Case2( + TestBilinearInterpOp_attr_tensor + ): def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [3, 2, 32, 16] self.out_h = 64 self.out_w = 32 - self.scale = 0. + self.scale = 0.0 self.out_size = np.array([66, 40]).astype("int32") self.align_corners = True self.shape_by_1Dtensor = True # scale is a 1-D tensor - class TestBilinearInterp_attr_tensor_Case3(TestBilinearInterpOp_attr_tensor - ): - + class TestBilinearInterp_attr_tensor_Case3( + TestBilinearInterpOp_attr_tensor + ): def init_test_case(self): self.interp_method = 'bilinear' self.input_shape = [3, 2, 32, 16] diff --git a/python/paddle/fluid/tests/unittests/xpu/test_bitwise_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_bitwise_op_xpu.py index e7e81707376a851f0a62643104dee56d35da29a8..2482709f3452c25f5440300098a91a20f3226407 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_bitwise_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_bitwise_op_xpu.py @@ -21,19 +21,21 @@ sys.path.append("..") import paddle from op_test import OpTest from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() ################## TEST OP: BitwiseAnd ################## class XPUTestBitwiseAnd(XPUOpTestWrapper): - def __init__(self): self.op_name = 'bitwise_and' class XPUTestBitwiseAndBase(XPUOpTest): - def setUp(self): self.place = paddle.XPUPlace(0) self.init_case() @@ -42,20 +44,18 @@ class XPUTestBitwiseAnd(XPUOpTestWrapper): def set_case(self): self.op_type = 'bitwise_and' - x = np.random.randint(self.low, - self.high, - self.x_shape, - dtype=self.dtype) - y = np.random.randint(self.low, - self.high, - self.y_shape, - dtype=self.dtype) + x = np.random.randint( + self.low, self.high, self.x_shape, dtype=self.dtype + ) + y = np.random.randint( + self.low, self.high, self.y_shape, dtype=self.dtype + ) out = np.bitwise_and(x, y) self.attrs = {'use_xpu': True} self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(x), - 'Y': OpTest.np_dtype_to_fluid_dtype(y) + 'Y': OpTest.np_dtype_to_fluid_dtype(y), } self.outputs = {'Out': out} @@ -73,7 +73,6 @@ class XPUTestBitwiseAnd(XPUOpTestWrapper): pass class XPUTestBitwiseAndCase1(XPUTestBitwiseAndBase): - def init_case(self): self.dtype = np.int32 self.x_shape = [4, 5] @@ -82,7 +81,6 @@ class XPUTestBitwiseAnd(XPUOpTestWrapper): self.high = 100 class XPUTestBitwiseAndCase2(XPUTestBitwiseAndBase): - def init_case(self): self.dtype = np.int32 self.x_shape = [2, 3, 4, 5] @@ -91,7 +89,6 @@ class XPUTestBitwiseAnd(XPUOpTestWrapper): self.high = 100 class XPUTestBitwiseAndCase3(XPUTestBitwiseAndBase): - def init_case(self): self.dtype = np.int32 self.x_shape = [2, 3, 4, 5] @@ -107,12 +104,10 @@ for stype in support_types: ################## TEST OP: BitwiseOr ################## class XPUTestBitwiseOr(XPUOpTestWrapper): - def __init__(self): self.op_name = 'bitwise_or' class XPUTestBitwiseOrBase(XPUOpTest): - def setUp(self): self.place = paddle.XPUPlace(0) self.init_case() @@ -121,20 +116,18 @@ class XPUTestBitwiseOr(XPUOpTestWrapper): def set_case(self): self.op_type = 'bitwise_or' - x = np.random.randint(self.low, - self.high, - self.x_shape, - dtype=self.dtype) - y = np.random.randint(self.low, - self.high, - self.y_shape, - dtype=self.dtype) + x = np.random.randint( + self.low, self.high, self.x_shape, dtype=self.dtype + ) + y = np.random.randint( + self.low, self.high, self.y_shape, dtype=self.dtype + ) out = np.bitwise_or(x, y) self.attrs = {'use_xpu': True} self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(x), - 'Y': OpTest.np_dtype_to_fluid_dtype(y) + 'Y': OpTest.np_dtype_to_fluid_dtype(y), } self.outputs = {'Out': out} @@ -152,7 +145,6 @@ class XPUTestBitwiseOr(XPUOpTestWrapper): pass class XPUTestBitwiseOrCase1(XPUTestBitwiseOrBase): - def init_case(self): self.dtype = np.int32 self.x_shape = [4, 5] @@ -161,7 +153,6 @@ class XPUTestBitwiseOr(XPUOpTestWrapper): self.high = 100 class XPUTestBitwiseOrCase2(XPUTestBitwiseOrBase): - def init_case(self): self.dtype = np.int32 self.x_shape = [2, 3, 4, 5] @@ -170,7 +161,6 @@ class XPUTestBitwiseOr(XPUOpTestWrapper): self.high = 100 class XPUTestBitwiseOrCase3(XPUTestBitwiseOrBase): - def init_case(self): self.dtype = np.int32 self.x_shape = [2, 3, 4, 5] @@ -186,12 +176,10 @@ for stype in support_types: ################## TEST OP: BitwiseXor ################## class XPUTestBitwiseXor(XPUOpTestWrapper): - def __init__(self): self.op_name = 'bitwise_xor' class XPUTestBitwiseXorBase(XPUOpTest): - def setUp(self): self.place = paddle.XPUPlace(0) self.init_case() @@ -200,20 +188,18 @@ class XPUTestBitwiseXor(XPUOpTestWrapper): def set_case(self): self.op_type = 'bitwise_xor' - x = np.random.randint(self.low, - self.high, - self.x_shape, - dtype=self.dtype) - y = np.random.randint(self.low, - self.high, - self.y_shape, - dtype=self.dtype) + x = np.random.randint( + self.low, self.high, self.x_shape, dtype=self.dtype + ) + y = np.random.randint( + self.low, self.high, self.y_shape, dtype=self.dtype + ) out = np.bitwise_xor(x, y) self.attrs = {'use_xpu': True} self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(x), - 'Y': OpTest.np_dtype_to_fluid_dtype(y) + 'Y': OpTest.np_dtype_to_fluid_dtype(y), } self.outputs = {'Out': out} @@ -231,7 +217,6 @@ class XPUTestBitwiseXor(XPUOpTestWrapper): pass class XPUTestBitwiseXorCase1(XPUTestBitwiseXorBase): - def init_case(self): self.dtype = np.int32 self.x_shape = [4, 5] @@ -240,7 +225,6 @@ class XPUTestBitwiseXor(XPUOpTestWrapper): self.high = 100 class XPUTestBitwiseXorCase2(XPUTestBitwiseXorBase): - def init_case(self): self.dtype = np.int32 self.x_shape = [2, 3, 4, 5] @@ -249,7 +233,6 @@ class XPUTestBitwiseXor(XPUOpTestWrapper): self.high = 100 class XPUTestBitwiseXorCase3(XPUTestBitwiseXorBase): - def init_case(self): self.dtype = np.int32 self.x_shape = [2, 3, 4, 5] @@ -265,12 +248,10 @@ for stype in support_types: ################## TEST OP: BitwiseNot ################## class XPUTestBitwiseNot(XPUOpTestWrapper): - def __init__(self): self.op_name = 'bitwise_not' class XPUTestBitwiseNotBase(XPUOpTest): - def setUp(self): self.place = paddle.XPUPlace(0) self.init_case() @@ -279,10 +260,9 @@ class XPUTestBitwiseNot(XPUOpTestWrapper): def set_case(self): self.op_type = 'bitwise_not' - x = np.random.randint(self.low, - self.high, - self.x_shape, - dtype=self.dtype) + x = np.random.randint( + self.low, self.high, self.x_shape, dtype=self.dtype + ) out = np.bitwise_not(x) self.attrs = {'use_xpu': True} @@ -302,7 +282,6 @@ class XPUTestBitwiseNot(XPUOpTestWrapper): pass class XPUTestBitwiseNotBool(XPUTestBitwiseNotBase): - def setUp(self): self.place = paddle.XPUPlace(0) self.init_case() diff --git a/python/paddle/fluid/tests/unittests/xpu/test_bmm_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_bmm_op_xpu.py index 840f88198cbb5d8ecf19bc43a10e167d60e4feea..fbc0ca66921403ef8c1a199a36b8c3cc02d94cbc 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_bmm_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_bmm_op_xpu.py @@ -18,7 +18,11 @@ import paddle import unittest import numpy as np from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() @@ -33,7 +37,6 @@ class XPUTestBmmOp(XPUOpTestWrapper): self.use_dynamic_create_class = False class TestBmmOp(XPUOpTest): - def setUp(self): self.init_dtype() self.set_xpu() @@ -66,31 +69,26 @@ class XPUTestBmmOp(XPUOpTestWrapper): self.check_grad_with_place(self.place, ['X', 'Y'], 'Out') class TestBmmOp1(TestBmmOp): - def set_shape(self): self.Xshape = (3, 3, 3) self.Yshape = (3, 3, 3) class TestBmmOp2(TestBmmOp): - def set_shape(self): self.Xshape = (128, 3, 16) self.Yshape = (128, 16, 3) class TestBmmOp3(TestBmmOp): - def set_shape(self): self.Xshape = (2048, 16, 27) self.Yshape = (2048, 27, 16) class TestBmmOp4(TestBmmOp): - def set_shape(self): self.Xshape = (2, 27, 27) self.Yshape = (2, 27, 27) class TestBmmOp5(TestBmmOp): - def set_shape(self): self.Xshape = (2, 1, 1) self.Yshape = (2, 1, 1) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_c_embedding_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_c_embedding_op_xpu.py index 7324fc100d5b80dace6a47510ea7d5f68813f0e0..854ec71cf7374df31d8117dd793ed6cff13a3be0 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_c_embedding_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_c_embedding_op_xpu.py @@ -17,7 +17,11 @@ import sys sys.path.append("..") import paddle -from paddle.fluid.tests.unittests.c_embedding_op_base import TestCEmbeddingCPU, TestCEmbeddingOpBase, TestCEmbeddingOpFP32 +from paddle.fluid.tests.unittests.c_embedding_op_base import ( + TestCEmbeddingCPU, + TestCEmbeddingOpBase, + TestCEmbeddingOpFP32, +) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/xpu/test_cast_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_cast_op_xpu.py index 908be556fd1025d29fee94545f65fd2e3a76f81c..a2e136dccaab030c79341f36a9886a4257eace6b 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_cast_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_cast_op_xpu.py @@ -23,7 +23,11 @@ import paddle.fluid as fluid from paddle.fluid import Program, program_guard from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) typeid_dict = { 'int32': int(core.VarDesc.VarType.INT32), @@ -36,7 +40,6 @@ typeid_dict = { class XPUTestCastOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'cast' self.use_dynamic_create_class = True @@ -51,12 +54,14 @@ class XPUTestCastOp(XPUOpTestWrapper): return base_class, classes class TestCastOp(XPUOpTest): - def setUp(self): ipt = np.random.random(size=[10, 10]) in_typename = self.in_type_str - out_typename = 'float32' if not hasattr( - self, 'out_typename') else self.out_typename + out_typename = ( + 'float32' + if not hasattr(self, 'out_typename') + else self.out_typename + ) self.inputs = {'X': ipt.astype(in_typename)} self.outputs = {'Out': ipt.astype(in_typename).astype(out_typename)} @@ -77,12 +82,12 @@ for stype in support_types: class TestCastOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # The input type of cast_op must be Variable. - x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - fluid.XPUPlace(0)) + x1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.XPUPlace(0) + ) self.assertRaises(TypeError, fluid.layers.cast, x1, 'int32') diff --git a/python/paddle/fluid/tests/unittests/xpu/test_clip_by_norm_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_clip_by_norm_op_xpu.py index 32838a21a5afc80f55109330e6ee92b0aeda697d..1bfa96b5975deba6f12d42adea2533a5f27b8d25 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_clip_by_norm_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_clip_by_norm_op_xpu.py @@ -22,7 +22,6 @@ import paddle class TestXPUClipByNormOp(XPUOpTest): - def setUp(self): self.op_type = "clip_by_norm" self.dtype = np.float32 @@ -50,26 +49,23 @@ class TestXPUClipByNormOp(XPUOpTest): self.check_output_with_place(place) def initTestCase(self): - self.shape = (100, ) + self.shape = (100,) self.max_norm = 1.0 class TestCase1(TestXPUClipByNormOp): - def initTestCase(self): - self.shape = (100, ) + self.shape = (100,) self.max_norm = 1e20 class TestCase2(TestXPUClipByNormOp): - def initTestCase(self): self.shape = (16, 16) self.max_norm = 0.1 class TestCase3(TestXPUClipByNormOp): - def initTestCase(self): self.shape = (4, 8, 16) self.max_norm = 1.0 diff --git a/python/paddle/fluid/tests/unittests/xpu/test_clip_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_clip_op_xpu.py index 5b61bfa8d1be9dc6c57247482f261c94ab28a69e..cd6dd33b6facf6519677350c7f674226996642ba 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_clip_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_clip_op_xpu.py @@ -23,17 +23,19 @@ import paddle from paddle.fluid import Program, program_guard from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) class XPUTestClipOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'clip' self.use_dynamic_create_class = False class TestClipOp(XPUOpTest): - def setUp(self): self.init_dtype() self.set_xpu() @@ -90,28 +92,24 @@ class XPUTestClipOp(XPUOpTestWrapper): paddle.disable_static() class TestClipOp1(TestClipOp): - def init_data(self): self.shape = (8, 16, 8) self.max = 0.7 self.min = 0.0 class TestClipOp2(TestClipOp): - def init_data(self): self.shape = (8, 16) self.max = 1.0 self.min = 0.0 class TestClipOp3(TestClipOp): - def init_data(self): self.shape = (4, 8, 16) self.max = 0.7 self.min = 0.2 class TestClipOp4(TestClipOp): - def init_data(self): self.shape = (4, 8, 8) self.max = 0.7 @@ -120,7 +118,6 @@ class XPUTestClipOp(XPUOpTestWrapper): self.inputs['Min'] = np.array([0.3]).astype('float32') class TestClipOp5(TestClipOp): - def init_data(self): self.shape = (4, 8, 16) self.max = 0.5 @@ -128,7 +125,6 @@ class XPUTestClipOp(XPUOpTestWrapper): class TestClipOpError(unittest.TestCase): - def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): @@ -148,7 +144,6 @@ class TestClipOpError(unittest.TestCase): class TestClipAPI(unittest.TestCase): - def _executed_api(self, x, min=None, max=None): return paddle.clip(x, min, max) @@ -160,8 +155,11 @@ class TestClipAPI(unittest.TestCase): min = fluid.data(name='min', shape=[1], dtype='float32') max = fluid.data(name='max', shape=[1], dtype='float32') - place = fluid.XPUPlace( - 0) if fluid.core.is_compiled_with_xpu() else fluid.CPUPlace() + place = ( + fluid.XPUPlace(0) + if fluid.core.is_compiled_with_xpu() + else fluid.CPUPlace() + ) exe = fluid.Executor(place) out_1 = self._executed_api(images, min=min, max=max) @@ -170,7 +168,7 @@ class TestClipAPI(unittest.TestCase): out_4 = self._executed_api(images, max=0.7) out_5 = self._executed_api(images, min=min) out_6 = self._executed_api(images, max=max) - out_7 = self._executed_api(images, max=-1.) + out_7 = self._executed_api(images, max=-1.0) out_8 = self._executed_api(images) res1, res2, res3, res4, res5, res6, res7, res8 = exe.run( @@ -178,9 +176,10 @@ class TestClipAPI(unittest.TestCase): feed={ "image": data, "min": np.array([0.2]).astype('float32'), - "max": np.array([0.8]).astype('float32') + "max": np.array([0.8]).astype('float32'), }, - fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7, out_8]) + fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7, out_8], + ) np.testing.assert_allclose(res1, data.clip(0.2, 0.8)) np.testing.assert_allclose(res2, data.clip(0.2, 0.9)) @@ -194,8 +193,11 @@ class TestClipAPI(unittest.TestCase): def test_clip_dygraph(self): paddle.disable_static() - place = fluid.XPUPlace( - 0) if fluid.core.is_compiled_with_xpu() else fluid.CPUPlace() + place = ( + fluid.XPUPlace(0) + if fluid.core.is_compiled_with_xpu() + else fluid.CPUPlace() + ) paddle.disable_static(place) data_shape = [1, 9, 9, 4] data = np.random.random(data_shape).astype('float32') @@ -223,7 +225,6 @@ class TestClipAPI(unittest.TestCase): class TestInplaceClipAPI(TestClipAPI): - def _executed_api(self, x, min=None, max=None): return x.clip_(min, max) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_coalesce_tensor_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_coalesce_tensor_op_xpu.py index 9b295aa12cbb8d0535b91dbf318608b1ff9fe91f..5370c1947a8deea1e66d1e161e7e7da6fbde3f8d 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_coalesce_tensor_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_coalesce_tensor_op_xpu.py @@ -22,19 +22,21 @@ sys.path.append("..") alignment = 256 import paddle from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestCoalesceTensorOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'coalesce_tensor' self.use_dynamic_create_class = False class TestAllocContinuousSpace(XPUOpTest): - def setUp(self): self.op_type = "coalesce_tensor" self.use_xpu = True @@ -45,12 +47,13 @@ class XPUTestCoalesceTensorOp(XPUOpTestWrapper): self.set_constant = attrs["set_constant"] self.Inputs = self.init_input() self.Outputs, self.FusedOutput = self.init_output( - self.Inputs, self.set_constant, self.constant) + self.Inputs, self.set_constant, self.constant + ) self.inputs = {'Input': self.Inputs} self.attrs = attrs self.outputs = { 'Output': self.Outputs, - 'FusedOutput': self.FusedOutput + 'FusedOutput': self.FusedOutput, } def init_dtype(self): @@ -61,8 +64,9 @@ class XPUTestCoalesceTensorOp(XPUOpTestWrapper): inputs.append(("x1", np.random.random([20, 3]).astype(self.dtype))) inputs.append(("x2", np.random.random([20]).astype(self.dtype))) inputs.append(("x3", np.random.random([1]).astype(self.dtype))) - inputs.append(("x4", np.random.random([200, - 30]).astype(self.dtype))) + inputs.append( + ("x4", np.random.random([200, 30]).astype(self.dtype)) + ) inputs.append(("x5", np.random.random([30]).astype(self.dtype))) inputs.append(("x6", np.random.random([1]).astype(self.dtype))) return inputs @@ -72,7 +76,7 @@ class XPUTestCoalesceTensorOp(XPUOpTestWrapper): "copy_data": True, "set_constant": False, "constant": 0.0, - "dtype": self.fluid_dtype + "dtype": self.fluid_dtype, } def init_output(self, input_list, set_constant, constant): @@ -88,33 +92,37 @@ class XPUTestCoalesceTensorOp(XPUOpTestWrapper): coalesce_tensor_var = np.concatenate([input for input in inputs]) if set_constant: - coalesce_tensor_var = np.ones( - (len(coalesce_tensor_var))) * constant - outputs = [(out[0], - np.ones(out[1].shape).astype(self.dtype) * constant) - for out in outputs] + coalesce_tensor_var = ( + np.ones((len(coalesce_tensor_var))) * constant + ) + outputs = [ + ( + out[0], + np.ones(out[1].shape).astype(self.dtype) * constant, + ) + for out in outputs + ] return outputs, coalesce_tensor_var def test_check_output(self): - self.check_output_with_place(place=core.XPUPlace(0), - no_check_set=["FusedOutput"], - atol=1e-5) + self.check_output_with_place( + place=core.XPUPlace(0), no_check_set=["FusedOutput"], atol=1e-5 + ) class TestAllocContinuousSpace2(TestAllocContinuousSpace): - def init_attr(self): return { "copy_data": False, "set_constant": True, "constant": 0.5, "dtype": self.fluid_dtype, - "user_defined_size_of_dtype": 2 + "user_defined_size_of_dtype": 2, } def test_check_output(self): - self.check_output_with_place(place=core.XPUPlace(0), - no_check_set=["FusedOutput"], - atol=1e-5) + self.check_output_with_place( + place=core.XPUPlace(0), no_check_set=["FusedOutput"], atol=1e-5 + ) support_types = get_xpu_op_support_types('coalesce_tensor') diff --git a/python/paddle/fluid/tests/unittests/xpu/test_collective_allgather_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_collective_allgather_xpu.py index eed540811159f36df2dfed3812aabb3b39680717..568b3039a955e712035e55377fe1d93545167ca0 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_collective_allgather_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_collective_allgather_xpu.py @@ -22,33 +22,38 @@ import sys sys.path.append("..") -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestCAllgatherOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'c_allgather' self.use_dynamic_create_class = False class TestCAllgatherOp(TestDistBase): - def _setup_config(self): pass def test_allgather(self): - self.check_with_place("collective_allgather_op_xpu.py", "allgather", - self.in_type_str) + self.check_with_place( + "collective_allgather_op_xpu.py", "allgather", self.in_type_str + ) support_types = get_xpu_op_support_types('c_allgather') for stype in support_types: - create_test_class(globals(), - XPUTestCAllgatherOP, - stype, - ignore_device_version=[core.XPUVersion.XPU1]) + create_test_class( + globals(), + XPUTestCAllgatherOP, + stype, + ignore_device_version=[core.XPUVersion.XPU1], + ) if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/xpu/test_collective_allreduce_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_collective_allreduce_xpu.py index 57a96530c55925a5fbeabfc204864f346a345213..3fa7405d5489072a4bc5cc6913115a635d4fc558 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_collective_allreduce_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_collective_allreduce_xpu.py @@ -22,33 +22,38 @@ import sys sys.path.append("..") -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestCAllreduceOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'c_allreduce_sum' self.use_dynamic_create_class = False class TestCAllreduceOp(TestDistBase): - def _setup_config(self): pass def test_allreduce(self): - self.check_with_place("collective_allreduce_op_xpu.py", "allreduce", - self.in_type_str) + self.check_with_place( + "collective_allreduce_op_xpu.py", "allreduce", self.in_type_str + ) support_types = get_xpu_op_support_types('c_allreduce_sum') for stype in support_types: - create_test_class(globals(), - XPUTestCAllreduceOP, - stype, - ignore_device_version=[core.XPUVersion.XPU1]) + create_test_class( + globals(), + XPUTestCAllreduceOP, + stype, + ignore_device_version=[core.XPUVersion.XPU1], + ) if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/xpu/test_collective_base_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_collective_base_xpu.py index 74e72672369275f3d6bac4c21430687ff522259f..1c00f3b94b8605469cebceed67ad9a2298a75225 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_collective_base_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_collective_base_xpu.py @@ -50,10 +50,10 @@ def DataTypeCast(date_type): class TestCollectiveRunnerBase(object): - def get_model(self, train_prog, startup_prog): raise NotImplementedError( - "get model should be implemented by child class.") + "get model should be implemented by child class." + ) def wait_server_ready(self, endpoints): while True: @@ -61,13 +61,15 @@ class TestCollectiveRunnerBase(object): not_ready_endpoints = [] for ep in endpoints: ip_port = ep.split(":") - with closing(socket.socket(socket.AF_INET, - socket.SOCK_STREAM)) as sock: + with closing( + socket.socket(socket.AF_INET, socket.SOCK_STREAM) + ) as sock: sock.settimeout(2) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) if hasattr(socket, 'SO_REUSEPORT'): - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, - 1) + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT, 1 + ) result = sock.connect_ex((ip_port[0], int(ip_port[1]))) if result != 0: @@ -75,44 +77,51 @@ class TestCollectiveRunnerBase(object): not_ready_endpoints.append(ep) if not all_ok: sys.stderr.write("server not ready, wait 3 sec to retry...\n") - sys.stderr.write("not ready endpoints:" + - str(not_ready_endpoints) + "\n") + sys.stderr.write( + "not ready endpoints:" + str(not_ready_endpoints) + "\n" + ) sys.stderr.flush() time.sleep(3) else: break + # endpoints should be ["ip1:port1","ip2:port2"] -#endpoints should be ["ip1:port1","ip2:port2"] - - def initCommunicator(self, program, rank, nranks, wait_port, - current_endpoint, endpoints): + def initCommunicator( + self, program, rank, nranks, wait_port, current_endpoint, endpoints + ): other_endpoints = endpoints[:] other_endpoints.remove(current_endpoint) if rank == 0 and wait_port: self.wait_server_ready(other_endpoints) block = program.global_block() - bkcl_id_var = block.create_var(name=nameGen.generate('bkcl_id'), - persistable=True, - type=core.VarDesc.VarType.RAW) - - block.append_op(type='c_gen_bkcl_id', - inputs={}, - outputs={'Out': bkcl_id_var}, - attrs={ - 'rank': rank, - 'endpoint': current_endpoint, - 'other_endpoints': other_endpoints - }) - - block.append_op(type='c_comm_init', - inputs={'X': bkcl_id_var}, - outputs={}, - attrs={ - 'nranks': nranks, - 'rank': rank, - 'ring_id': self.global_ring_id - }) + bkcl_id_var = block.create_var( + name=nameGen.generate('bkcl_id'), + persistable=True, + type=core.VarDesc.VarType.RAW, + ) + + block.append_op( + type='c_gen_bkcl_id', + inputs={}, + outputs={'Out': bkcl_id_var}, + attrs={ + 'rank': rank, + 'endpoint': current_endpoint, + 'other_endpoints': other_endpoints, + }, + ) + + block.append_op( + type='c_comm_init', + inputs={'X': bkcl_id_var}, + outputs={}, + attrs={ + 'nranks': nranks, + 'rank': rank, + 'ring_id': self.global_ring_id, + }, + ) def run_trainer(self, args): train_prog = fluid.Program() @@ -121,8 +130,9 @@ class TestCollectiveRunnerBase(object): rank = args["trainerid"] current_endpoint = args["currentendpoint"] nranks = 2 - self.initCommunicator(startup_prog, rank, nranks, True, - current_endpoint, endpoints) + self.initCommunicator( + startup_prog, rank, nranks, True, current_endpoint, endpoints + ) self.rank = rank result = self.get_model(train_prog, startup_prog) device_id = int(os.getenv("FLAGS_selected_xpus", "0")) @@ -131,11 +141,12 @@ class TestCollectiveRunnerBase(object): exe.run(startup_prog) np.random.seed(os.getpid()) np_data_type = DataTypeCast(args["data_type"]) - indata = np.random.uniform(low=-10.0, high=10.0, - size=(10, 1000)).astype(np_data_type) - out = exe.run(train_prog, - feed={'tindata': indata}, - fetch_list=[result.name]) + indata = np.random.uniform( + low=-10.0, high=10.0, size=(10, 1000) + ).astype(np_data_type) + out = exe.run( + train_prog, feed={'tindata': indata}, fetch_list=[result.name] + ) sys.stdout.buffer.write(pickle.dumps(out[0])) @@ -157,12 +168,13 @@ from contextlib import closing class TestDistBase(unittest.TestCase): - def setUp(self): self._port_set = set() self._trainers = 2 self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % ( - self._find_free_port(), self._find_free_port()) + self._find_free_port(), + self._find_free_port(), + ) self._python_interp = sys.executable self.temp_dir = tempfile.TemporaryDirectory() @@ -171,10 +183,10 @@ class TestDistBase(unittest.TestCase): self.temp_dir.cleanup() def _find_free_port(self): - def __free_port(): - with closing(socket.socket(socket.AF_INET, - socket.SOCK_STREAM)) as s: + with closing( + socket.socket(socket.AF_INET, socket.SOCK_STREAM) + ) as s: s.bind(('', 0)) return s.getsockname()[1] @@ -192,7 +204,7 @@ class TestDistBase(unittest.TestCase): "PADDLE_TRAINER_ID": "0", "PADDLE_TRAINERS_NUM": "2", "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints, - "PADDLE_CURRENT_ENDPOINT": w0_ep + "PADDLE_CURRENT_ENDPOINT": w0_ep, } env1 = { @@ -200,9 +212,9 @@ class TestDistBase(unittest.TestCase): "PADDLE_TRAINER_ID": "1", "PADDLE_TRAINERS_NUM": "2", "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints, - "PADDLE_CURRENT_ENDPOINT": w1_ep + "PADDLE_CURRENT_ENDPOINT": w1_ep, } - #update environment + # update environment env0.update(envs) env1.update(envs) tr_cmd = "%s %s" @@ -215,14 +227,16 @@ class TestDistBase(unittest.TestCase): tr0_proc = subprocess.Popen( tr0_cmd.strip().split(), stdout=subprocess.PIPE, - #stderr=tr0_pipe, - env=env0) + # stderr=tr0_pipe, + env=env0, + ) tr1_proc = subprocess.Popen( tr0_cmd.strip().split(), stdout=subprocess.PIPE, - #stderr=tr1_pipe, - env=env1) + # stderr=tr1_pipe, + env=env1, + ) tr0_out, tr0_err = tr0_proc.communicate() tr1_out, tr1_err = tr1_proc.communicate() @@ -231,15 +245,21 @@ class TestDistBase(unittest.TestCase): # close trainer file tr0_pipe.close() tr1_pipe.close() - return pickle.loads(tr0_out), pickle.loads( - tr1_out), tr0_proc.pid, tr1_proc.pid - - def check_with_place(self, - model_file, - col_type, - data_type, - check_error_log=False, - need_envs={}): + return ( + pickle.loads(tr0_out), + pickle.loads(tr1_out), + tr0_proc.pid, + tr1_proc.pid, + ) + + def check_with_place( + self, + model_file, + col_type, + data_type, + check_error_log=False, + need_envs={}, + ): required_envs = { "FLAGS_eager_delete_tensor_gb": "0.0", "PATH": os.getenv("PATH"), @@ -254,14 +274,17 @@ class TestDistBase(unittest.TestCase): required_envs["GLOG_v"] = "3" required_envs["GLOG_logtostderr"] = "1" tr0_out, tr1_out, pid0, pid1 = self._run_cluster( - model_file, required_envs) + model_file, required_envs + ) np_data_type = DataTypeCast(data_type) np.random.seed(pid0) - input1 = np.random.uniform(low=-10.0, high=10.0, - size=(10, 1000)).astype(np_data_type) + input1 = np.random.uniform( + low=-10.0, high=10.0, size=(10, 1000) + ).astype(np_data_type) np.random.seed(pid1) - input2 = np.random.uniform(low=-10.0, high=10.0, - size=(10, 1000)).astype(np_data_type) + input2 = np.random.uniform( + low=-10.0, high=10.0, size=(10, 1000) + ).astype(np_data_type) if col_type == "allgather": need_result = np.vstack((input1, input2)) np.testing.assert_allclose(tr0_out, need_result) @@ -275,38 +298,33 @@ class TestDistBase(unittest.TestCase): np.testing.assert_allclose(tr1_out, need_result) elif col_type == "scatter": need_result = input2 - need_result1 = need_result[0:need_result.shape[0] // 2] - need_result2 = need_result[need_result.shape[0] // 2:] + need_result1 = need_result[0 : need_result.shape[0] // 2] + need_result2 = need_result[need_result.shape[0] // 2 :] np.testing.assert_allclose(tr0_out, need_result1) np.testing.assert_allclose(tr1_out, need_result2) elif col_type == "allreduce": need_result = input1 + input2 - np.testing.assert_allclose(tr0_out, - need_result, - rtol=1e-05, - atol=1e-05) - np.testing.assert_allclose(tr1_out, - need_result, - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + tr0_out, need_result, rtol=1e-05, atol=1e-05 + ) + np.testing.assert_allclose( + tr1_out, need_result, rtol=1e-05, atol=1e-05 + ) elif col_type == "reduce_scatter": tmp = input1 + input2 - need_result1 = tmp[0:tmp.shape[0] // 2] - need_result2 = tmp[tmp.shape[0] // 2:] - np.testing.assert_allclose(tr0_out, - need_result1, - rtol=1e-05, - atol=1e-05) - np.testing.assert_allclose(tr1_out, - need_result2, - rtol=1e-05, - atol=1e-05) + need_result1 = tmp[0 : tmp.shape[0] // 2] + need_result2 = tmp[tmp.shape[0] // 2 :] + np.testing.assert_allclose( + tr0_out, need_result1, rtol=1e-05, atol=1e-05 + ) + np.testing.assert_allclose( + tr1_out, need_result2, rtol=1e-05, atol=1e-05 + ) elif col_type == "sendrecv": need_result = input1 - np.testing.assert_allclose(tr1_out, - need_result, - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + tr1_out, need_result, rtol=1e-05, atol=1e-05 + ) elif col_type == "identity": need_result1 = input1 need_result2 = input2 @@ -324,35 +342,29 @@ class TestDistBase(unittest.TestCase): np.testing.assert_allclose(tr1_out, need_result2) elif col_type == "concat": need_result = np.concatenate((input1, input2), axis=1) - np.testing.assert_allclose(tr0_out, - need_result, - rtol=1e-05, - atol=1e-05) - np.testing.assert_allclose(tr1_out, - need_result, - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + tr0_out, need_result, rtol=1e-05, atol=1e-05 + ) + np.testing.assert_allclose( + tr1_out, need_result, rtol=1e-05, atol=1e-05 + ) elif col_type == "split": need_result1 = np.split(input1, 2, axis=1)[0] need_result2 = np.split(input2, 2, axis=1)[1] - np.testing.assert_allclose(tr0_out, - need_result1, - rtol=1e-05, - atol=1e-05) - np.testing.assert_allclose(tr1_out, - need_result2, - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + tr0_out, need_result1, rtol=1e-05, atol=1e-05 + ) + np.testing.assert_allclose( + tr1_out, need_result2, rtol=1e-05, atol=1e-05 + ) elif col_type == "sendrecv_array": need_result1 = np.array([[0, 1, 2]]) need_result2 = np.array([[3, 4, 5]]) - np.testing.assert_allclose(tr1_out[0][0], - need_result1, - rtol=1e-05, - atol=1e-05) - np.testing.assert_allclose(tr1_out[0][1], - need_result2, - rtol=1e-05, - atol=1e-05) + np.testing.assert_allclose( + tr1_out[0][0], need_result1, rtol=1e-05, atol=1e-05 + ) + np.testing.assert_allclose( + tr1_out[0][1], need_result2, rtol=1e-05, atol=1e-05 + ) else: pass diff --git a/python/paddle/fluid/tests/unittests/xpu/test_collective_identity_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_collective_identity_xpu.py index 07c523629b600d87144cec9670ef9a53c29f10ad..4d84efebbe2f2971141829114326307d5e29da9a 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_collective_identity_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_collective_identity_xpu.py @@ -22,33 +22,38 @@ import sys sys.path.append("..") -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestCIdentityOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'c_identity' self.use_dynamic_create_class = False class TestCIdentityOp(TestDistBase): - def _setup_config(self): pass def test_identity(self): - self.check_with_place("collective_identity_op_xpu.py", "identity", - self.in_type_str) + self.check_with_place( + "collective_identity_op_xpu.py", "identity", self.in_type_str + ) support_types = get_xpu_op_support_types('c_identity') for stype in support_types: - create_test_class(globals(), - XPUTestCIdentityOP, - stype, - ignore_device_version=[core.XPUVersion.XPU1]) + create_test_class( + globals(), + XPUTestCIdentityOP, + stype, + ignore_device_version=[core.XPUVersion.XPU1], + ) if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/xpu/test_compare_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_compare_op_xpu.py index cdaf767a1de68252d9b5db13c7b9a923df2641ca..8e7fb2eb3421de17be33c30f3285197ec78cff54 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_compare_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_compare_op_xpu.py @@ -24,7 +24,6 @@ from xpu.get_test_cover_info import XPUOpTestWrapper class TestCompareOpBase(XPUOpTest): - def setUp(self): self.place = paddle.XPUPlace(0) self.config() @@ -33,10 +32,12 @@ class TestCompareOpBase(XPUOpTest): self.outputs = {'Out': self.result} def set_case(self): - self.x = np.random.uniform(self.lbound, self.hbound, - self.x_shape).astype(self.dtype) - self.y = np.random.uniform(self.lbound, self.hbound, - self.y_shape).astype(self.dtype) + self.x = np.random.uniform( + self.lbound, self.hbound, self.x_shape + ).astype(self.dtype) + self.y = np.random.uniform( + self.lbound, self.hbound, self.y_shape + ).astype(self.dtype) self.result = self.compute(self.x, self.y) def config(self): @@ -54,13 +55,11 @@ class TestCompareOpBase(XPUOpTest): class XPUTestLessThanOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'less_than' self.use_dynamic_create_class = False class LessThanOpTestCase1(TestCompareOpBase): - def config(self): self.dtype = self.in_type self.op_type = 'less_than' @@ -74,7 +73,6 @@ class XPUTestLessThanOP(XPUOpTestWrapper): self.y_shape = [11, 17] class LessThanOpTestCase2(LessThanOpTestCase1): - def set_data(self): self.lbound = -200 self.hbound = 200 @@ -82,7 +80,6 @@ class XPUTestLessThanOP(XPUOpTestWrapper): self.y_shape = [1] class LessThanOpTestCase3(LessThanOpTestCase1): - def set_data(self): self.lbound = -300 self.hbound = 300 @@ -90,7 +87,6 @@ class XPUTestLessThanOP(XPUOpTestWrapper): self.y_shape = [1] class LessThanOpTestCase4(LessThanOpTestCase1): - def set_data(self): self.lbound = -200 self.hbound = 200 @@ -98,7 +94,6 @@ class XPUTestLessThanOP(XPUOpTestWrapper): self.y_shape = [1] class LessThanOpTestCase5(LessThanOpTestCase1): - def set_data(self): self.lbound = -100 self.hbound = 100 @@ -112,13 +107,11 @@ for stype in support_types: class XPUTestLessEqualOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'less_equal' self.use_dynamic_create_class = False class LessEqualOpTestCase1(TestCompareOpBase): - def config(self): self.dtype = self.in_type self.op_type = 'less_equal' @@ -132,7 +125,6 @@ class XPUTestLessEqualOp(XPUOpTestWrapper): self.y_shape = [11, 17] class LessEqualOpTestCase2(LessEqualOpTestCase1): - def set_data(self): self.lbound = -100 self.hbound = 100 @@ -140,7 +132,6 @@ class XPUTestLessEqualOp(XPUOpTestWrapper): self.y_shape = [11, 17, 255] class LessEqualOpTestCase3(LessEqualOpTestCase1): - def set_data(self): self.lbound = -200 self.hbound = 200 @@ -148,7 +139,6 @@ class XPUTestLessEqualOp(XPUOpTestWrapper): self.y_shape = [1] class LessEqualOpTestCase4(LessEqualOpTestCase1): - def set_data(self): self.lbound = -200 self.hbound = 200 @@ -156,7 +146,6 @@ class XPUTestLessEqualOp(XPUOpTestWrapper): self.y_shape = [1] class LessEqualOpTestCase5(LessEqualOpTestCase1): - def set_data(self): self.lbound = -200 self.hbound = 200 @@ -170,13 +159,11 @@ for stype in support_types: class XPUTestGreaterThanOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'greater_than' self.use_dynamic_create_class = False class GreaterThanOpTestCase1(TestCompareOpBase): - def config(self): self.dtype = self.in_type self.op_type = 'greater_than' @@ -190,7 +177,6 @@ class XPUTestGreaterThanOp(XPUOpTestWrapper): self.y_shape = [128, 128, 512] class GreaterThanOpTestCase2(GreaterThanOpTestCase1): - def set_data(self): self.lbound = -100 self.hbound = 100 @@ -198,7 +184,6 @@ class XPUTestGreaterThanOp(XPUOpTestWrapper): self.y_shape = [1] class GreaterThanOpTestCase3(GreaterThanOpTestCase1): - def set_data(self): self.lbound = -100 self.hbound = 100 @@ -206,7 +191,6 @@ class XPUTestGreaterThanOp(XPUOpTestWrapper): self.y_shape = [1] class GreaterThanOpTestCase4(GreaterThanOpTestCase1): - def set_data(self): self.lbound = -100 self.hbound = 100 @@ -214,7 +198,6 @@ class XPUTestGreaterThanOp(XPUOpTestWrapper): self.y_shape = [11, 17] class GreaterThanOpTestCase5(GreaterThanOpTestCase1): - def set_data(self): self.lbound = -100 self.hbound = 100 @@ -228,13 +211,11 @@ for stype in support_types: class XPUTestGreaterEqualOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'greater_equal' self.use_dynamic_create_class = False class GreaterEqualOpTestCase1(TestCompareOpBase): - def config(self): self.dtype = self.in_type self.op_type = 'greater_equal' @@ -248,7 +229,6 @@ class XPUTestGreaterEqualOp(XPUOpTestWrapper): self.y_shape = [10, 10, 20, 20] class GreaterEqualOpTestCase2(GreaterEqualOpTestCase1): - def set_data(self): self.lbound = -100 self.hbound = 100 @@ -256,7 +236,6 @@ class XPUTestGreaterEqualOp(XPUOpTestWrapper): self.y_shape = [10, 10] class GreaterEqualOpTestCase3(GreaterEqualOpTestCase1): - def set_data(self): self.lbound = -200 self.hbound = 200 @@ -264,7 +243,6 @@ class XPUTestGreaterEqualOp(XPUOpTestWrapper): self.y_shape = [1] class GreaterEqualOpTestCase4(GreaterEqualOpTestCase1): - def set_data(self): self.lbound = -100 self.hbound = 100 @@ -272,7 +250,6 @@ class XPUTestGreaterEqualOp(XPUOpTestWrapper): self.y_shape = [1] class GreaterEqualOpTestCase5(GreaterEqualOpTestCase1): - def set_data(self): self.lbound = -100 self.hbound = 100 @@ -286,13 +263,11 @@ for stype in support_types: class XPUTestEqualOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'equal' self.use_dynamic_create_class = False class EqualOpTestCase1(TestCompareOpBase): - def config(self): self.dtype = self.in_type self.op_type = 'equal' @@ -306,7 +281,6 @@ class XPUTestEqualOp(XPUOpTestWrapper): self.y_shape = [10, 30, 15] class EqualOpTestCase2(EqualOpTestCase1): - def set_data(self): self.lbound = -100 self.hbound = 100 @@ -314,7 +288,6 @@ class XPUTestEqualOp(XPUOpTestWrapper): self.y_shape = [1] class EqualOpTestCase3(EqualOpTestCase1): - def set_data(self): self.lbound = -200 self.hbound = 200 @@ -322,7 +295,6 @@ class XPUTestEqualOp(XPUOpTestWrapper): self.y_shape = [10, 30] class EqualOpTestCase4(EqualOpTestCase1): - def set_data(self): self.lbound = -100 self.hbound = 100 @@ -330,7 +302,6 @@ class XPUTestEqualOp(XPUOpTestWrapper): self.y_shape = [256, 256, 10] class EqualOpTestCase5(EqualOpTestCase1): - def set_data(self): self.lbound = -100 self.hbound = 100 @@ -344,13 +315,11 @@ for stype in support_types: class XPUTestNotEqualOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'not_equal' self.use_dynamic_create_class = False class NotEqualOpTestCase1(TestCompareOpBase): - def config(self): self.dtype = self.in_type self.op_type = 'not_equal' @@ -364,7 +333,6 @@ class XPUTestNotEqualOp(XPUOpTestWrapper): self.y_shape = [1] class NotEqualOpTestCase2(NotEqualOpTestCase1): - def set_data(self): self.lbound = -200 self.hbound = 200 @@ -372,7 +340,6 @@ class XPUTestNotEqualOp(XPUOpTestWrapper): self.y_shape = [11, 17] class NotEqualOpTestCase3(NotEqualOpTestCase1): - def set_data(self): self.lbound = -200 self.hbound = 200 @@ -380,7 +347,6 @@ class XPUTestNotEqualOp(XPUOpTestWrapper): self.y_shape = [1] class NotEqualOpTestCase4(NotEqualOpTestCase1): - def set_data(self): self.lbound = -200 self.hbound = 200 @@ -388,7 +354,6 @@ class XPUTestNotEqualOp(XPUOpTestWrapper): self.y_shape = [256, 256, 10] class NotEqualOpTestCase5(NotEqualOpTestCase1): - def set_data(self): self.lbound = -100 self.hbound = 100 diff --git a/python/paddle/fluid/tests/unittests/xpu/test_concat_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_concat_op_xpu.py index 951eec301c0952f752d653f1e363294db3bf10ff..e7e4ca073c05ebbe4bc3ff65717d09774371c6c3 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_concat_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_concat_op_xpu.py @@ -21,19 +21,21 @@ import numpy as np import paddle from op_test import skip_check_grad_ci from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestConcatOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'concat' self.use_dynamic_create_class = False class TestConcatOp(XPUOpTest): - def setUp(self): self.set_xpu() self.op_type = "concat" @@ -47,14 +49,16 @@ class XPUTestConcatOp(XPUOpTestWrapper): self.attrs = {'axis': self.axis} if self.axis < 0: self.actual_axis = self.axis + len(self.x0.shape) - self.actual_axis = self.actual_axis if self.actual_axis > 0 else 0 + self.actual_axis = ( + self.actual_axis if self.actual_axis > 0 else 0 + ) else: self.actual_axis = self.axis self.outputs = { - 'Out': - np.concatenate((self.x0, self.x1, self.x2), - axis=self.actual_axis) + 'Out': np.concatenate( + (self.x0, self.x1, self.x2), axis=self.actual_axis + ) } def set_inputs(self): @@ -83,12 +87,10 @@ class XPUTestConcatOp(XPUOpTestWrapper): self.check_grad_with_place(place, ['x2'], 'Out') class TestConcatOpAxis0XPU(TestConcatOp): - def init_axis(self): self.axis = 0 class TestConcatOpAxis1XPU(TestConcatOp): - def set_inputs(self): self.x0 = np.random.random((5, 1, 4, 5)).astype(self.dtype) self.x1 = np.random.random((5, 2, 4, 5)).astype(self.dtype) @@ -98,34 +100,29 @@ class XPUTestConcatOp(XPUOpTestWrapper): self.axis = 1 class TestConcatOpAxis2XPU(TestConcatOp): - def init_axis(self): self.axis = 2 class TestConcatOpAxis3XPU(TestConcatOp): - def init_axis(self): self.axis = 3 class TestConcatOpAxisNeg1XPU(TestConcatOp): - def init_axis(self): self.axis = -1 class TestConcatOpAxisNeg2XPU(TestConcatOp): - def init_axis(self): self.axis = -2 class TestConcatOpAxisNeg3XPU(TestConcatOp): - def init_axis(self): self.axis = -3 @skip_check_grad_ci( - reason="The function 'check_grad' for large inputs is too slow.") + reason="The function 'check_grad' for large inputs is too slow." + ) class TestConcatOp3(TestConcatOp): - def set_inputs(self): self.x0 = np.random.random((1, 256, 170, 256)).astype(self.dtype) self.x1 = np.random.random((1, 128, 170, 256)).astype(self.dtype) @@ -136,11 +133,9 @@ class XPUTestConcatOp(XPUOpTestWrapper): pass @skip_check_grad_ci( - reason= - "This test will meet fetch error when there is a null grad. The detailed information is in PR#17015." + reason="This test will meet fetch error when there is a null grad. The detailed information is in PR#17015." ) class TestConcatOp4(TestConcatOp): - def set_inputs(self): self.x0 = np.random.random((2, 3, 4, 5)).astype(self.dtype) self.x1 = np.random.random((2, 3, 4, 5)).astype(self.dtype) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_conv2d_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_conv2d_op_xpu.py index b7924818177e75ad0c0d0389b1a3207871662a44..973e2908c4eccdb76ac2d6f51954bff19b519874 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_conv2d_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_conv2d_op_xpu.py @@ -21,25 +21,34 @@ import numpy as np import paddle.fluid.core as core from op_test_xpu import XPUOpTest import paddle -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper - - -def conv2d_forward_naive(input, - filter, - group, - conv_param, - padding_algorithm='EXPLICIT', - data_format='NCHW'): +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) + + +def conv2d_forward_naive( + input, + filter, + group, + conv_param, + padding_algorithm='EXPLICIT', + data_format='NCHW', +): if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]: - raise ValueError("Unknown Attr(padding_algorithm): '%s'. " - "It can only be 'SAME' or 'VALID'." % - str(padding_algorithm)) + raise ValueError( + "Unknown Attr(padding_algorithm): '%s'. " + "It can only be 'SAME' or 'VALID'." % str(padding_algorithm) + ) if data_format not in ["NCHW", "NHWC"]: - raise ValueError("Unknown Attr(data_format): '%s' ." - "It can only be 'NCHW' or 'NHWC'." % str(data_format)) + raise ValueError( + "Unknown Attr(data_format): '%s' ." + "It can only be 'NCHW' or 'NHWC'." % str(data_format) + ) - channel_last = (data_format == "NHWC") + channel_last = data_format == "NHWC" if channel_last: input = np.transpose(input, [0, 3, 1, 2]) @@ -52,17 +61,22 @@ def conv2d_forward_naive(input, sub_out_c = out_c // group sub_f_n = f_n // group - stride, pad, dilation = conv_param['stride'], conv_param['pad'], conv_param[ - 'dilation'] + stride, pad, dilation = ( + conv_param['stride'], + conv_param['pad'], + conv_param['dilation'], + ) # update pad and dilation def _get_padding_with_SAME(input_shape, pool_size, pool_stride): padding = [] - for input_size, filter_size, stride_size in zip(input_shape, pool_size, - pool_stride): + for input_size, filter_size, stride_size in zip( + input_shape, pool_size, pool_stride + ): out_size = int((input_size + stride_size - 1) / stride_size) pad_sum = np.max( - ((out_size - 1) * stride_size + filter_size - input_size, 0)) + ((out_size - 1) * stride_size + filter_size - input_size, 0) + ) pad_0 = int(pad_sum / 2) pad_1 = int(pad_sum - pad_0) padding.append(pad_0) @@ -82,39 +96,52 @@ def conv2d_forward_naive(input, if len(pad) == 4: pad_h_0, pad_h_1 = pad[0], pad[1] pad_w_0, pad_w_1 = pad[2], pad[3] - out_h = 1 + (in_h + pad_h_0 + pad_h_1 - (dilation[0] * - (f_h - 1) + 1)) // stride[0] - out_w = 1 + (in_w + pad_w_0 + pad_w_1 - (dilation[1] * - (f_w - 1) + 1)) // stride[1] + out_h = ( + 1 + + (in_h + pad_h_0 + pad_h_1 - (dilation[0] * (f_h - 1) + 1)) + // stride[0] + ) + out_w = ( + 1 + + (in_w + pad_w_0 + pad_w_1 - (dilation[1] * (f_w - 1) + 1)) + // stride[1] + ) out = np.zeros((out_n, out_c, out_h, out_w)) - d_bolck_h = (dilation[0] * (f_h - 1) + 1) - d_bolck_w = (dilation[1] * (f_w - 1) + 1) + d_bolck_h = dilation[0] * (f_h - 1) + 1 + d_bolck_w = dilation[1] * (f_w - 1) + 1 - input_pad = np.pad(input, - ((0, 0), (0, 0), (pad_h_0, pad_h_1), (pad_w_0, pad_w_1)), - mode='constant', - constant_values=0) + input_pad = np.pad( + input, + ((0, 0), (0, 0), (pad_h_0, pad_h_1), (pad_w_0, pad_w_1)), + mode='constant', + constant_values=0, + ) filter_dilation = np.zeros((f_n, f_c, d_bolck_h, d_bolck_w)) - filter_dilation[:, :, 0:d_bolck_h:dilation[0], - 0:d_bolck_w:dilation[1]] = filter + filter_dilation[ + :, :, 0 : d_bolck_h : dilation[0], 0 : d_bolck_w : dilation[1] + ] = filter for i in range(out_h): for j in range(out_w): for g in range(group): - input_pad_masked = \ - input_pad[:, g * f_c:(g + 1) * f_c, - i * stride[0]:i * stride[0] + d_bolck_h, - j * stride[1]:j * stride[1] + d_bolck_w] - - f_sub = filter_dilation[g * sub_f_n:(g + 1) * sub_f_n, :, :, :] + input_pad_masked = input_pad[ + :, + g * f_c : (g + 1) * f_c, + i * stride[0] : i * stride[0] + d_bolck_h, + j * stride[1] : j * stride[1] + d_bolck_w, + ] + + f_sub = filter_dilation[ + g * sub_f_n : (g + 1) * sub_f_n, :, :, : + ] # sub_f_n == sub_out_c for k in range(sub_out_c): # Multiplication of Corresponding Elements, then sum all - out[:, g * sub_out_c + k, i, j] = \ - np.sum(input_pad_masked * f_sub[k, :, :, :], - axis=(1, 2, 3)) + out[:, g * sub_out_c + k, i, j] = np.sum( + input_pad_masked * f_sub[k, :, :, :], axis=(1, 2, 3) + ) if channel_last: out = np.transpose(out, [0, 2, 3, 1]) @@ -123,9 +150,7 @@ def conv2d_forward_naive(input, def create_test_channel_last_class(parent): - class TestChannelLastCase(parent): - def init_data_format(self): self.data_format = "NHWC" @@ -139,9 +164,7 @@ def create_test_channel_last_class(parent): def create_test_padding_SAME_class(parent): - class TestPaddingSMAECase(parent): - def init_paddings(self): self.pad = [0, 0] self.padding_algorithm = "SAME" @@ -152,9 +175,7 @@ def create_test_padding_SAME_class(parent): def create_test_padding_VALID_class(parent): - class TestPaddingVALIDCase(parent): - def init_paddings(self): self.pad = [1, 1] self.padding_algorithm = "VALID" @@ -165,13 +186,11 @@ def create_test_padding_VALID_class(parent): class XPUTestConv2DOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'conv2d' self.use_dynamic_create_class = False class TestConv2DOp(XPUOpTest): - def setUp(self): self.dtype = self.in_type self.place = paddle.XPUPlace(0) @@ -190,7 +209,7 @@ class XPUTestConv2DOp(XPUOpTestWrapper): conv2d_param = { 'stride': self.stride, 'pad': self.pad, - 'dilation': self.dilations + 'dilation': self.dilations, } np.random.seed(100) @@ -205,16 +224,18 @@ class XPUTestConv2DOp(XPUOpTestWrapper): else: input2 = input np.random.seed(1) - filter = np.random.uniform(-1, 1, - self.filter_size).astype(self.dtype) + filter = np.random.uniform(-1, 1, self.filter_size).astype( + self.dtype + ) - output, _, _, _, _ = conv2d_forward_naive(input2, filter, - self.groups, conv2d_param) + output, _, _, _, _ = conv2d_forward_naive( + input2, filter, self.groups, conv2d_param + ) output = output.astype(self.dtype) self.inputs = { 'Input': XPUOpTest.np_dtype_to_fluid_dtype(input), - 'Filter': XPUOpTest.np_dtype_to_fluid_dtype(filter) + 'Filter': XPUOpTest.np_dtype_to_fluid_dtype(filter), } self.attrs = { 'strides': self.stride, @@ -224,15 +245,15 @@ class XPUTestConv2DOp(XPUOpTestWrapper): 'use_cudnn': self.use_cudnn, 'use_mkldnn': self.use_mkldnn, 'data_format': self.data_format, - 'fuse_relu_before_depthwise_conv': - self.fuse_relu_before_depthwise_conv, - 'exhaustive_search': self.exhaustive_search + 'fuse_relu_before_depthwise_conv': self.fuse_relu_before_depthwise_conv, + 'exhaustive_search': self.exhaustive_search, } self.outputs = {'Output': output} def has_cuda(self): - return core.is_compiled_with_cuda() and (self.use_cudnn - or self.use_cuda) + return core.is_compiled_with_cuda() and ( + self.use_cudnn or self.use_cuda + ) def test_check_output(self): if core.is_compiled_with_xpu(): @@ -240,33 +261,40 @@ class XPUTestConv2DOp(XPUOpTestWrapper): self.check_output_with_place(self.place) def test_check_grad(self): - if (hasattr(self, "no_need_check_grad") - and self.no_need_check_grad == True): + if ( + hasattr(self, "no_need_check_grad") + and self.no_need_check_grad == True + ): return if core.is_compiled_with_xpu(): paddle.enable_static() - self.check_grad_with_place(self.place, {'Input', 'Filter'}, - 'Output') + self.check_grad_with_place( + self.place, {'Input', 'Filter'}, 'Output' + ) def test_check_grad_no_filter(self): - if (hasattr(self, "no_need_check_grad") - and self.no_need_check_grad == True): + if ( + hasattr(self, "no_need_check_grad") + and self.no_need_check_grad == True + ): return if core.is_compiled_with_xpu(): paddle.enable_static() - self.check_grad_with_place(self.place, ['Input'], - 'Output', - no_grad_set=set(['Filter'])) + self.check_grad_with_place( + self.place, ['Input'], 'Output', no_grad_set=set(['Filter']) + ) def test_check_grad_no_input(self): - if (hasattr(self, "no_need_check_grad") - and self.no_need_check_grad == True): + if ( + hasattr(self, "no_need_check_grad") + and self.no_need_check_grad == True + ): return if core.is_compiled_with_xpu(): paddle.enable_static() - self.check_grad_with_place(self.place, ['Filter'], - 'Output', - no_grad_set=set(['Input'])) + self.check_grad_with_place( + self.place, ['Filter'], 'Output', no_grad_set=set(['Input']) + ) def init_test_case(self): self.pad = [0, 0] @@ -289,7 +317,6 @@ class XPUTestConv2DOp(XPUOpTestWrapper): pass class TestWithPad(TestConv2DOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -299,7 +326,6 @@ class XPUTestConv2DOp(XPUOpTestWrapper): self.filter_size = [6, f_c, 3, 3] class TestWithStride(TestConv2DOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [2, 2] @@ -309,7 +335,6 @@ class XPUTestConv2DOp(XPUOpTestWrapper): self.filter_size = [6, f_c, 3, 3] class TestWith1x1(TestConv2DOp): - def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] @@ -324,13 +349,11 @@ class XPUTestConv2DOp(XPUOpTestWrapper): # ---- test asymmetric padding ---- class XPUTestConv2DOp_v2(XPUOpTestWrapper): - def __init__(self): self.op_name = 'conv2d' self.use_dynamic_create_class = False class TestConv2DOp_v2(XPUOpTest): - def setUp(self): self.dtype = self.in_type self.place = paddle.XPUPlace(0) @@ -351,7 +374,7 @@ class XPUTestConv2DOp_v2(XPUOpTestWrapper): conv2d_param = { 'stride': self.stride, 'pad': self.pad, - 'dilation': self.dilations + 'dilation': self.dilations, } np.random.seed(100) @@ -366,17 +389,22 @@ class XPUTestConv2DOp_v2(XPUOpTestWrapper): else: input2 = input np.random.seed(8) - filter = np.random.uniform(-1, 1, - self.filter_size).astype(self.dtype) - output, _, _, _, _ = conv2d_forward_naive(input2, filter, - self.groups, conv2d_param, - self.padding_algorithm, - self.data_format) + filter = np.random.uniform(-1, 1, self.filter_size).astype( + self.dtype + ) + output, _, _, _, _ = conv2d_forward_naive( + input2, + filter, + self.groups, + conv2d_param, + self.padding_algorithm, + self.data_format, + ) output = output.astype(self.dtype) self.inputs = { 'Input': XPUOpTest.np_dtype_to_fluid_dtype(input), - 'Filter': XPUOpTest.np_dtype_to_fluid_dtype(filter) + 'Filter': XPUOpTest.np_dtype_to_fluid_dtype(filter), } self.attrs = { 'strides': self.stride, @@ -387,15 +415,15 @@ class XPUTestConv2DOp_v2(XPUOpTestWrapper): 'use_cudnn': self.use_cudnn, 'use_mkldnn': self.use_mkldnn, 'data_format': self.data_format, - 'fuse_relu_before_depthwise_conv': - self.fuse_relu_before_depthwise_conv, - 'exhaustive_search': self.exhaustive_search + 'fuse_relu_before_depthwise_conv': self.fuse_relu_before_depthwise_conv, + 'exhaustive_search': self.exhaustive_search, } self.outputs = {'Output': output} def has_cuda(self): - return core.is_compiled_with_cuda() and (self.use_cudnn - or self.use_cuda) + return core.is_compiled_with_cuda() and ( + self.use_cudnn or self.use_cuda + ) def test_check_output(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode @@ -405,35 +433,42 @@ class XPUTestConv2DOp_v2(XPUOpTestWrapper): def test_check_grad(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode - if (hasattr(self, "no_need_check_grad") - and self.no_need_check_grad == True): + if ( + hasattr(self, "no_need_check_grad") + and self.no_need_check_grad == True + ): return if core.is_compiled_with_xpu(): paddle.enable_static() - self.check_grad_with_place(self.place, {'Input', 'Filter'}, - 'Output') + self.check_grad_with_place( + self.place, {'Input', 'Filter'}, 'Output' + ) def test_check_grad_no_filter(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode - if (hasattr(self, "no_need_check_grad") - and self.no_need_check_grad == True): + if ( + hasattr(self, "no_need_check_grad") + and self.no_need_check_grad == True + ): return if core.is_compiled_with_xpu(): paddle.enable_static() - self.check_grad_with_place(self.place, ['Input'], - 'Output', - no_grad_set=set(['Filter'])) + self.check_grad_with_place( + self.place, ['Input'], 'Output', no_grad_set=set(['Filter']) + ) def test_check_grad_no_input(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode - if (hasattr(self, "no_need_check_grad") - and self.no_need_check_grad == True): + if ( + hasattr(self, "no_need_check_grad") + and self.no_need_check_grad == True + ): return if core.is_compiled_with_xpu(): paddle.enable_static() - self.check_grad_with_place(self.place, ['Filter'], - 'Output', - no_grad_set=set(['Input'])) + self.check_grad_with_place( + self.place, ['Filter'], 'Output', no_grad_set=set(['Input']) + ) def init_test_case(self): self.pad = [0, 0] @@ -463,13 +498,11 @@ class XPUTestConv2DOp_v2(XPUOpTestWrapper): pass class TestConv2DOp_AsyPadding(TestConv2DOp_v2): - def init_paddings(self): self.pad = [0, 0, 0, 0] self.padding_algorithm = "EXPLICIT" class TestWithPad_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW @@ -482,7 +515,6 @@ class XPUTestConv2DOp_v2(XPUOpTestWrapper): self.padding_algorithm = "EXPLICIT" class TestWithStride_AsyPadding(TestConv2DOp_v2): - def init_test_case(self): self.stride = [2, 2] self.input_size = [2, 3, 6, 6] # NCHW @@ -496,14 +528,13 @@ class XPUTestConv2DOp_v2(XPUOpTestWrapper): class XPUTestConv2DOp_NHWC(XPUOpTestWrapper): - def __init__(self): self.op_name = 'conv2d' self.use_dynamic_create_class = False class TestConv2DOp_AsyPadding_NHWC( - XPUTestConv2DOp_v2.TestConv2DOp_AsyPadding): - + XPUTestConv2DOp_v2.TestConv2DOp_AsyPadding + ): def init_data_format(self): self.data_format = "NHWC" @@ -511,9 +542,9 @@ class XPUTestConv2DOp_NHWC(XPUOpTestWrapper): N, C, H, W = self.input_size self.input_size = [N, H, W, C] - class TestWithPad_AsyPadding_NHWC(XPUTestConv2DOp_v2.TestWithPad_AsyPadding - ): - + class TestWithPad_AsyPadding_NHWC( + XPUTestConv2DOp_v2.TestWithPad_AsyPadding + ): def init_data_format(self): self.data_format = "NHWC" @@ -526,19 +557,21 @@ support_types = get_xpu_op_support_types('conv2d') for stype in ['float32']: create_test_class(globals(), XPUTestConv2DOp, stype) create_test_class(globals(), XPUTestConv2DOp_v2, stype) - create_test_class(globals(), - XPUTestConv2DOp_NHWC, - stype, - ignore_device_version=[core.XPUVersion.XPU1]) - -#---------- test SAME VALID ----------- -#create_test_padding_SAME_class(TestConv2DOp_AsyPadding) -#create_test_padding_SAME_class(TestWithPad_AsyPadding) -#create_test_padding_SAME_class(TestWithStride_AsyPadding) - -#create_test_padding_VALID_class(TestConv2DOp_AsyPadding) -#create_test_padding_VALID_class(TestWithPad_AsyPadding) -#create_test_padding_VALID_class(TestWithStride_AsyPadding) + create_test_class( + globals(), + XPUTestConv2DOp_NHWC, + stype, + ignore_device_version=[core.XPUVersion.XPU1], + ) + +# ---------- test SAME VALID ----------- +# create_test_padding_SAME_class(TestConv2DOp_AsyPadding) +# create_test_padding_SAME_class(TestWithPad_AsyPadding) +# create_test_padding_SAME_class(TestWithStride_AsyPadding) + +# create_test_padding_VALID_class(TestConv2DOp_AsyPadding) +# create_test_padding_VALID_class(TestWithPad_AsyPadding) +# create_test_padding_VALID_class(TestWithStride_AsyPadding) if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/xpu/test_conv2d_transpose_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_conv2d_transpose_op_xpu.py index 49011dd36187d9b6dbc00051d6764558333f790a..47d13d04b3a1a1a95ca805b8ffe4b5dacaac374c 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_conv2d_transpose_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_conv2d_transpose_op_xpu.py @@ -19,7 +19,11 @@ import unittest import numpy as np from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) import paddle paddle.enable_static() @@ -28,9 +32,10 @@ paddle.enable_static() def conv2dtranspose_forward_naive(input_, filter_, attrs): padding_algorithm = attrs['padding_algorithm'] if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]: - raise ValueError("Unknown Attr(padding_algorithm): '%s'. " - "It can only be 'SAME' or 'VALID'." % - str(padding_algorithm)) + raise ValueError( + "Unknown Attr(padding_algorithm): '%s'. " + "It can only be 'SAME' or 'VALID'." % str(padding_algorithm) + ) if attrs['data_format'] == 'NHWC': input_ = np.transpose(input_, [0, 3, 1, 2]) @@ -41,18 +46,22 @@ def conv2dtranspose_forward_naive(input_, filter_, attrs): out_c = f_out_c * groups sub_in_c = in_c // groups - stride, pad, dilations = attrs['strides'], attrs['paddings'], attrs[ - 'dilations'] + stride, pad, dilations = ( + attrs['strides'], + attrs['paddings'], + attrs['dilations'], + ) # update pad and dilation def _get_padding_with_SAME(input_shape, kernel_size, kernel_stride): padding = [] - for input_size, filter_size, stride_size in zip(input_shape, - kernel_size, - kernel_stride): + for input_size, filter_size, stride_size in zip( + input_shape, kernel_size, kernel_stride + ): out_size = int((input_size + stride_size - 1) / stride_size) pad_sum = np.max( - ((out_size - 1) * stride_size + filter_size - input_size, 0)) + ((out_size - 1) * stride_size + filter_size - input_size, 0) + ) pad_0 = int(pad_sum / 2) pad_1 = int(pad_sum - pad_0) padding.append(pad_0) @@ -86,43 +95,54 @@ def conv2dtranspose_forward_naive(input_, filter_, attrs): if 'output_padding' in attrs: out_pad_h = attrs['output_padding'][0] out_pad_w = attrs['output_padding'][1] - out = np.zeros((in_n, out_c, out_h + out_pad_h, out_w + out_pad_w), - dtype=input_.dtype) + out = np.zeros( + (in_n, out_c, out_h + out_pad_h, out_w + out_pad_w), dtype=input_.dtype + ) for n in range(in_n): for i in range(in_h): for j in range(in_w): for g in range(groups): - input_masked = input_[n, g * sub_in_c:(g + 1) * sub_in_c, i, - j] # (c) + input_masked = input_[ + n, g * sub_in_c : (g + 1) * sub_in_c, i, j + ] # (c) input_masked = np.reshape(input_masked, (sub_in_c, 1, 1)) input_masked = np.tile(input_masked, (1, f_h, f_w)) for k in range(f_out_c): tmp_out = np.sum( - input_masked * - filter_[g * sub_in_c:(g + 1) * sub_in_c, k, :, :], - axis=0) + input_masked + * filter_[ + g * sub_in_c : (g + 1) * sub_in_c, k, :, : + ], + axis=0, + ) i1, i2 = i * stride[0], i * stride[0] + d_bolck_h j1, j2 = j * stride[1], j * stride[1] + d_bolck_w - out[n, g * f_out_c + k, i1:i2:dilations[0], - j1:j2:dilations[1]] += tmp_out - - out = out[:, :, pad_h_0:out_h - pad_h_1 + out_pad_h, - pad_w_0:out_w - pad_w_1 + out_pad_w] + out[ + n, + g * f_out_c + k, + i1 : i2 : dilations[0], + j1 : j2 : dilations[1], + ] += tmp_out + + out = out[ + :, + :, + pad_h_0 : out_h - pad_h_1 + out_pad_h, + pad_w_0 : out_w - pad_w_1 + out_pad_w, + ] if attrs['data_format'] == 'NHWC': out = np.transpose(out, [0, 2, 3, 1]) return out class XPUTestConv2DTransposeOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'conv2d_transpose' self.use_dynamic_create_class = False class TestConv2DTransposeOp(XPUOpTest): - def setUp(self): # init as conv transpose self.need_check_grad = True @@ -151,7 +171,7 @@ class XPUTestConv2DTransposeOp(XPUOpTestWrapper): 'use_cudnn': self.use_cudnn, 'is_test': self.is_test, 'use_mkldnn': self.use_mkldnn, - 'data_format': self.data_format + 'data_format': self.data_format, } if self.output_size is not None: self.attrs['output_size'] = self.output_size @@ -160,7 +180,8 @@ class XPUTestConv2DTransposeOp(XPUOpTestWrapper): self.attrs['output_padding'] = self.output_padding output = conv2dtranspose_forward_naive( - input_, filter_, self.attrs).astype(self.dtype) + input_, filter_, self.attrs + ).astype(self.dtype) self.outputs = {'Output': output} @@ -169,20 +190,21 @@ class XPUTestConv2DTransposeOp(XPUOpTestWrapper): def test_check_grad_no_input(self): if self.need_check_grad: - self.check_grad_with_place(self.place, ['Filter'], - 'Output', - no_grad_set=set(['Input'])) + self.check_grad_with_place( + self.place, ['Filter'], 'Output', no_grad_set=set(['Input']) + ) def test_check_grad_no_filter(self): if self.need_check_grad: - self.check_grad_with_place(self.place, ['Input'], - 'Output', - no_grad_set=set(['Filter'])) + self.check_grad_with_place( + self.place, ['Input'], 'Output', no_grad_set=set(['Filter']) + ) def test_check_grad(self): if self.need_check_grad: - self.check_grad_with_place(self.place, set(['Input', 'Filter']), - 'Output') + self.check_grad_with_place( + self.place, set(['Input', 'Filter']), 'Output' + ) def init_test_case(self): self.pad = [0, 0] @@ -199,7 +221,6 @@ class XPUTestConv2DTransposeOp(XPUOpTestWrapper): self.op_type = "conv2d_transpose" class TestWithSymmetricPad(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -210,7 +231,6 @@ class XPUTestConv2DTransposeOp(XPUOpTestWrapper): self.filter_size = [f_c, 6, 3, 3] class TestWithAsymmetricPad(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 0, 1, 2] self.stride = [1, 1] @@ -221,7 +241,6 @@ class XPUTestConv2DTransposeOp(XPUOpTestWrapper): self.filter_size = [f_c, 6, 3, 3] class TestWithSAMEPad(TestConv2DTransposeOp): - def init_test_case(self): self.stride = [2, 1] self.dilations = [1, 2] @@ -232,7 +251,6 @@ class XPUTestConv2DTransposeOp(XPUOpTestWrapper): self.padding_algorithm = 'SAME' class TestWithVALIDPad(TestConv2DTransposeOp): - def init_test_case(self): self.stride = [1, 1] self.dilations = [1, 1] @@ -243,7 +261,6 @@ class XPUTestConv2DTransposeOp(XPUOpTestWrapper): self.padding_algorithm = 'VALID' class TestWithGroups(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -254,7 +271,6 @@ class XPUTestConv2DTransposeOp(XPUOpTestWrapper): self.filter_size = [f_c, 3, 3, 3] class TestWithStride(TestConv2DTransposeOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [2, 2] diff --git a/python/paddle/fluid/tests/unittests/xpu/test_deformable_conv_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_deformable_conv_op_xpu.py index 85b05e8990ba9a3b49b42bca5b03179919787a19..678afafc08c4b41b0a55a856133f9a53486e77cf 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_deformable_conv_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_deformable_conv_op_xpu.py @@ -22,7 +22,11 @@ import paddle.fluid.core as core import paddle.fluid as fluid from op_test_xpu import OpTest, XPUOpTest import paddle -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) def dmc_bilinear(data_im, height, width, h, w): @@ -64,8 +68,11 @@ def dconv_im2col_gemm(input, offset, mask, filter, group, conv_param): assert f_c * group == in_c assert np.mod(out_c, group) == 0 - stride, pad, dilation = conv_param['stride'], conv_param['pad'],\ - conv_param['dilation'] + stride, pad, dilation = ( + conv_param['stride'], + conv_param['pad'], + conv_param['dilation'], + ) out_h = 1 + (in_h + 2 * pad[0] - (dilation[0] * (f_h - 1) + 1)) // stride[0] out_w = 1 + (in_w + 2 * pad[1] - (dilation[1] * (f_w - 1) + 1)) // stride[1] assert out_h == in_h @@ -78,31 +85,47 @@ def dconv_im2col_gemm(input, offset, mask, filter, group, conv_param): for w in range(out_w): for kh in range(f_h): for kw in range(f_w): - offset_h_table = \ - offset[n, ::2, h, w].reshape(f_h, f_w) - offset_w_table = \ - offset[n, 1::2, h, w].reshape(f_h, f_w) - mask_table = \ - mask[n, :, h, w].reshape(f_h, f_w) + offset_h_table = offset[n, ::2, h, w].reshape( + f_h, f_w + ) + offset_w_table = offset[n, 1::2, h, w].reshape( + f_h, f_w + ) + mask_table = mask[n, :, h, w].reshape(f_h, f_w) offset_h = offset_h_table[kh, kw] offset_w = offset_w_table[kh, kw] val = 0 - im_h = h * stride[0] + kh * dilation[0] \ - + offset_h - pad[0] - im_w = w * stride[0] + kw * dilation[0] \ - + offset_w - pad[1] - if im_h > -1 and im_w > -1 and \ - im_h < in_h and im_w < in_h: - val = dmc_bilinear(input[n, c], in_h, in_w, - im_h, im_w) + im_h = ( + h * stride[0] + + kh * dilation[0] + + offset_h + - pad[0] + ) + im_w = ( + w * stride[0] + + kw * dilation[0] + + offset_w + - pad[1] + ) + if ( + im_h > -1 + and im_w > -1 + and im_h < in_h + and im_w < in_h + ): + val = dmc_bilinear( + input[n, c], in_h, in_w, im_h, im_w + ) val_out = val * mask_table[kh, kw] - col_buffer[n, c * f_h * f_w + kh * f_w + kw, - h * in_w + w] = val_out + col_buffer[ + n, c * f_h * f_w + kh * f_w + kw, h * in_w + w + ] = val_out out = np.zeros((in_n, group, int(out_c // group), out_h * out_w)) weight = filter.reshape(group, int(out_c // group), f_c * f_h * f_w) col_buffer = col_buffer.reshape( - (in_n, group, int(in_c // group * f_h * f_w), in_h * in_w)) + (in_n, group, int(in_c // group * f_h * f_w), in_h * in_w) + ) for n in range(in_n): for g in range(group): out[n, g] = np.matmul(weight[g], col_buffer[n, g]) @@ -111,13 +134,11 @@ def dconv_im2col_gemm(input, offset, mask, filter, group, conv_param): class XPUTestModulatedDeformableConvOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'deformable_conv' self.use_dynamic_create_class = False class TestModulatedDeformableConvOp(XPUOpTest): - def setUp(self): self.op_type = "deformable_conv" self.dtype = self.in_type @@ -129,22 +150,23 @@ class XPUTestModulatedDeformableConvOp(XPUOpTestWrapper): conv_param = { 'stride': self.stride, 'pad': self.pad, - 'dilation': self.dilations + 'dilation': self.dilations, } input = np.random.random(self.input_size).astype(self.dtype) offset = 10 * np.random.random(self.offset_size).astype(self.dtype) mask = 10 * np.random.random(self.mask_size).astype(self.dtype) filter = np.random.random(self.filter_size).astype(self.dtype) - output = dconv_im2col_gemm(input, offset, mask, filter, self.groups, - conv_param) + output = dconv_im2col_gemm( + input, offset, mask, filter, self.groups, conv_param + ) output = output.astype(self.dtype) self.inputs = { 'Input': OpTest.np_dtype_to_fluid_dtype(input), 'Offset': OpTest.np_dtype_to_fluid_dtype(offset), 'Mask': OpTest.np_dtype_to_fluid_dtype(mask), - 'Filter': OpTest.np_dtype_to_fluid_dtype(filter) + 'Filter': OpTest.np_dtype_to_fluid_dtype(filter), } self.attrs = { 'strides': self.stride, @@ -165,9 +187,11 @@ class XPUTestModulatedDeformableConvOp(XPUOpTestWrapper): if core.is_compiled_with_xpu(): paddle.enable_static() self.check_grad_with_place( - self.place, {'Input', 'Offset', 'Mask', 'Filter'}, + self.place, + {'Input', 'Offset', 'Mask', 'Filter'}, 'Output', - max_relative_error=0.06) + max_relative_error=0.06, + ) def init_test_case(self): self.pad = [1, 1] @@ -179,17 +203,28 @@ class XPUTestModulatedDeformableConvOp(XPUOpTestWrapper): self.filter_size = [8, f_c, 3, 3] self.im2col_step = 1 self.deformable_groups = 1 - offset_c = 2 * self.deformable_groups * self.filter_size[ - 2] * self.filter_size[3] - mask_c = self.deformable_groups * self.filter_size[ - 2] * self.filter_size[3] + offset_c = ( + 2 + * self.deformable_groups + * self.filter_size[2] + * self.filter_size[3] + ) + mask_c = ( + self.deformable_groups + * self.filter_size[2] + * self.filter_size[3] + ) self.offset_size = [ - self.input_size[0], offset_c, self.input_size[2], - self.input_size[3] + self.input_size[0], + offset_c, + self.input_size[2], + self.input_size[3], ] self.mask_size = [ - self.input_size[0], mask_c, self.input_size[2], - self.input_size[3] + self.input_size[0], + mask_c, + self.input_size[2], + self.input_size[3], ] def init_dilation(self): @@ -199,7 +234,6 @@ class XPUTestModulatedDeformableConvOp(XPUOpTestWrapper): self.groups = 1 class TestWithDilation(TestModulatedDeformableConvOp): - def init_test_case(self): self.pad = [2, 2] self.stride = [1, 1] @@ -209,24 +243,34 @@ class XPUTestModulatedDeformableConvOp(XPUOpTestWrapper): self.filter_size = [6, f_c, 3, 3] self.im2col_step = 1 self.deformable_groups = 1 - offset_c = 2 * self.deformable_groups * self.filter_size[ - 2] * self.filter_size[3] - mask_c = self.deformable_groups * self.filter_size[ - 2] * self.filter_size[3] + offset_c = ( + 2 + * self.deformable_groups + * self.filter_size[2] + * self.filter_size[3] + ) + mask_c = ( + self.deformable_groups + * self.filter_size[2] + * self.filter_size[3] + ) self.offset_size = [ - self.input_size[0], offset_c, self.input_size[2], - self.input_size[3] + self.input_size[0], + offset_c, + self.input_size[2], + self.input_size[3], ] self.mask_size = [ - self.input_size[0], mask_c, self.input_size[2], - self.input_size[3] + self.input_size[0], + mask_c, + self.input_size[2], + self.input_size[3], ] def init_dilation(self): self.dilations = [2, 2] class TestWith3x3(TestModulatedDeformableConvOp): - def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] @@ -236,56 +280,61 @@ class XPUTestModulatedDeformableConvOp(XPUOpTestWrapper): self.filter_size = [6, f_c, 3, 3] self.im2col_step = 1 self.deformable_groups = 1 - offset_c = 2 * self.deformable_groups * self.filter_size[ - 2] * self.filter_size[3] - mask_c = self.deformable_groups * self.filter_size[ - 2] * self.filter_size[3] + offset_c = ( + 2 + * self.deformable_groups + * self.filter_size[2] + * self.filter_size[3] + ) + mask_c = ( + self.deformable_groups + * self.filter_size[2] + * self.filter_size[3] + ) self.offset_size = [ - self.input_size[0], offset_c, self.input_size[2], - self.input_size[3] + self.input_size[0], + offset_c, + self.input_size[2], + self.input_size[3], ] self.mask_size = [ - self.input_size[0], mask_c, self.input_size[2], - self.input_size[3] + self.input_size[0], + mask_c, + self.input_size[2], + self.input_size[3], ] class TestModulatedDeformableConvInvalidInput(unittest.TestCase): - def test_error(self): - def test_invalid_input(): paddle.enable_static() input = [1, 3, 32, 32] - offset = fluid.data(name='offset', - shape=[None, 3, 32, 32], - dtype='float32') - mask = fluid.data(name='mask', - shape=[None, 3, 32, 32], - dtype='float32') - loss = fluid.layers.deformable_conv(input, - offset, - mask, - num_filters=4, - filter_size=1) + offset = fluid.data( + name='offset', shape=[None, 3, 32, 32], dtype='float32' + ) + mask = fluid.data( + name='mask', shape=[None, 3, 32, 32], dtype='float32' + ) + loss = fluid.layers.deformable_conv( + input, offset, mask, num_filters=4, filter_size=1 + ) self.assertRaises(TypeError, test_invalid_input) def test_invalid_offset(): paddle.enable_static() - input = fluid.data(name='input', - shape=[None, 3, 32, 32], - dtype='int32') - offset = fluid.data(name='offset', - shape=[None, 3, 32, 32], - dtype='float32') - mask = fluid.data(name='mask', - shape=[None, 3, 32, 32], - dtype='float32') - loss = fluid.layers.deformable_conv(input, - offset, - mask, - num_filters=4, - filter_size=1) + input = fluid.data( + name='input', shape=[None, 3, 32, 32], dtype='int32' + ) + offset = fluid.data( + name='offset', shape=[None, 3, 32, 32], dtype='float32' + ) + mask = fluid.data( + name='mask', shape=[None, 3, 32, 32], dtype='float32' + ) + loss = fluid.layers.deformable_conv( + input, offset, mask, num_filters=4, filter_size=1 + ) self.assertRaises(TypeError, test_invalid_offset) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_depthwise_conv2d_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_depthwise_conv2d_op_xpu.py index a8bfd681634201a786435a05f930a85949705192..75c6bd7b524c4e8f7e606ee3b7fa731f15dd3954 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_depthwise_conv2d_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_depthwise_conv2d_op_xpu.py @@ -22,17 +22,19 @@ import paddle paddle.enable_static() from test_conv2d_op_xpu import XPUTestConv2DOp, XPUTestConv2DOp_v2 -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) class XPUTestDepthwiseConv2DOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'depthwise_conv2d' self.use_dynamic_create_class = False class TestDepthwiseConv(XPUTestConv2DOp.TestConv2DOp): - def init_test_case(self): self.use_cuda = False self.pad = [1, 1] @@ -45,7 +47,6 @@ class XPUTestDepthwiseConv2DOp(XPUOpTestWrapper): self.op_type = "depthwise_conv2d" class TestDepthwiseConv2(XPUTestConv2DOp.TestConv2DOp): - def init_test_case(self): self.use_cuda = False self.pad = [1, 1] @@ -58,7 +59,6 @@ class XPUTestDepthwiseConv2DOp(XPUOpTestWrapper): self.op_type = "depthwise_conv2d" class TestDepthwiseConv3(XPUTestConv2DOp.TestConv2DOp): - def init_test_case(self): self.use_cuda = False self.pad = [1, 1] @@ -71,7 +71,6 @@ class XPUTestDepthwiseConv2DOp(XPUOpTestWrapper): self.op_type = "depthwise_conv2d" class TestDepthwiseConvWithDilation(XPUTestConv2DOp.TestConv2DOp): - def init_test_case(self): self.use_cuda = False self.pad = [1, 1] @@ -85,7 +84,6 @@ class XPUTestDepthwiseConv2DOp(XPUOpTestWrapper): self.op_type = "depthwise_conv2d" class TestDepthwiseConvWithDilation2(XPUTestConv2DOp.TestConv2DOp): - def init_test_case(self): self.use_cuda = False self.pad = [1, 1] @@ -100,13 +98,11 @@ class XPUTestDepthwiseConv2DOp(XPUOpTestWrapper): class XPUTestDepthwiseConv2DOp_v2(XPUOpTestWrapper): - def __init__(self): self.op_name = 'depthwise_conv2d' self.use_dynamic_create_class = False class TestDepthwiseConv_AsyPadding(XPUTestConv2DOp_v2.TestConv2DOp_v2): - def init_test_case(self): self.use_cuda = False self.stride = [2, 2] @@ -122,7 +118,6 @@ class XPUTestDepthwiseConv2DOp_v2(XPUOpTestWrapper): self.padding_algorithm = "EXPLICIT" class TestDepthwiseConv2_AsyPadding(XPUTestConv2DOp_v2.TestConv2DOp_v2): - def init_test_case(self): self.use_cuda = False self.stride = [1, 1] @@ -138,7 +133,6 @@ class XPUTestDepthwiseConv2DOp_v2(XPUOpTestWrapper): self.padding_algorithm = "EXPLICIT" class TestDepthwiseConv3_AsyPadding(XPUTestConv2DOp_v2.TestConv2DOp_v2): - def init_test_case(self): self.use_cuda = False self.stride = [1, 1] @@ -154,8 +148,8 @@ class XPUTestDepthwiseConv2DOp_v2(XPUOpTestWrapper): self.padding_algorithm = "EXPLICIT" class TestDepthwiseConvWithDilation_AsyPadding( - XPUTestConv2DOp_v2.TestConv2DOp_v2): - + XPUTestConv2DOp_v2.TestConv2DOp_v2 + ): def init_test_case(self): self.use_cuda = False self.pad = [1, 1] @@ -173,8 +167,8 @@ class XPUTestDepthwiseConv2DOp_v2(XPUOpTestWrapper): self.padding_algorithm = "EXPLICIT" class TestDepthwiseConvWithDilation2_AsyPadding( - XPUTestConv2DOp_v2.TestConv2DOp_v2): - + XPUTestConv2DOp_v2.TestConv2DOp_v2 + ): def init_test_case(self): self.use_cuda = True self.pad = [1, 1] diff --git a/python/paddle/fluid/tests/unittests/xpu/test_device_guard_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_device_guard_xpu.py index 3fbd3c6c2fef35431ffb99892b8cf5bf5798baed..8c41cb8675ccb5875b9735825eb87221e0bc5384 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_device_guard_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_device_guard_xpu.py @@ -44,17 +44,16 @@ def get_vaild_warning_num(warning, w): class TestDeviceGuard(unittest.TestCase): - def test_device_guard(self): main_program = paddle.static.Program() startup_program = paddle.static.Program() with paddle.static.program_guard(main_program, startup_program): - data1 = paddle.full(shape=[1, 3, 8, 8], - fill_value=0.5, - dtype='float32') - data2 = paddle.full(shape=[1, 3, 5, 5], - fill_value=0.5, - dtype='float32') + data1 = paddle.full( + shape=[1, 3, 8, 8], fill_value=0.5, dtype='float32' + ) + data2 = paddle.full( + shape=[1, 3, 5, 5], fill_value=0.5, dtype='float32' + ) shape = paddle.shape(data2) with paddle.static.device_guard("cpu"): shape = paddle.slice(shape, axes=[0], starts=[0], ends=[4]) @@ -75,12 +74,12 @@ class TestDeviceGuard(unittest.TestCase): main_program = paddle.static.Program() startup_program = paddle.static.Program() with paddle.static.program_guard(main_program, startup_program): - data1 = paddle.full(shape=[1, 3, 8, 8], - fill_value=0.5, - dtype='float32') - data2 = paddle.full(shape=[1, 3, 5, 5], - fill_value=0.5, - dtype='float32') + data1 = paddle.full( + shape=[1, 3, 8, 8], fill_value=0.5, dtype='float32' + ) + data2 = paddle.full( + shape=[1, 3, 5, 5], fill_value=0.5, dtype='float32' + ) shape = paddle.shape(data2) with paddle.static.device_guard("cpu"): shape = paddle.slice(shape, axes=[0], starts=[0], ends=[4]) @@ -101,32 +100,50 @@ class TestDeviceGuard(unittest.TestCase): main_program = paddle.static.Program() startup_program = paddle.static.Program() with paddle.static.program_guard(main_program, startup_program): - x = paddle.full(shape=[2, 255, 13, 13], - fill_value=0.3, - dtype='float32') - gt_box = paddle.full(shape=[2, 6, 4], - fill_value=0.5, - dtype='float32') + x = paddle.full( + shape=[2, 255, 13, 13], fill_value=0.3, dtype='float32' + ) + gt_box = paddle.full( + shape=[2, 6, 4], fill_value=0.5, dtype='float32' + ) gt_label = paddle.full(shape=[2, 6], fill_value=1.0, dtype='int32') - gt_score = paddle.full(shape=[2, 6], - fill_value=0.5, - dtype='float32') + gt_score = paddle.full( + shape=[2, 6], fill_value=0.5, dtype='float32' + ) anchors = [ - 10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, - 198, 373, 326 + 10, + 13, + 16, + 30, + 33, + 23, + 30, + 61, + 62, + 45, + 59, + 119, + 116, + 90, + 156, + 198, + 373, + 326, ] anchor_mask = [0, 1, 2] with paddle.static.device_guard("xpu"): # yolov3_loss only has cpu kernel, so its cpu kernel will be executed - loss = fluid.layers.yolov3_loss(x=x, - gt_box=gt_box, - gt_label=gt_label, - gt_score=gt_score, - anchors=anchors, - anchor_mask=anchor_mask, - class_num=80, - ignore_thresh=0.7, - downsample_ratio=32) + loss = fluid.layers.yolov3_loss( + x=x, + gt_box=gt_box, + gt_label=gt_label, + gt_score=gt_score, + anchors=anchors, + anchor_mask=anchor_mask, + class_num=80, + ignore_thresh=0.7, + downsample_ratio=32, + ) execute(main_program, startup_program) @@ -159,7 +176,6 @@ class TestDeviceGuard(unittest.TestCase): execute(main_program, startup_program) def test_error(self): - def device_attr(): with paddle.static.device_guard("cpu1"): out = paddle.full(shape=[1], fill_value=0.2, dtype='float32') @@ -176,17 +192,18 @@ class TestDeviceGuard(unittest.TestCase): main_program = paddle.static.Program() startup_program = paddle.static.Program() with paddle.static.program_guard(main_program, startup_program): - data1 = paddle.static.data(name="data_1", - shape=[4, 2], - dtype="float32") - label = paddle.static.data(name="label", - shape=[4, 1], - dtype="int64") + data1 = paddle.static.data( + name="data_1", shape=[4, 2], dtype="float32" + ) + label = paddle.static.data( + name="label", shape=[4, 1], dtype="int64" + ) fc1 = paddle.static.nn.fc(x=data1, size=10) fc2 = paddle.static.nn.fc(x=fc1, size=10) with paddle.static.device_guard("xpu"): out = paddle.nn.functional.softmax_with_cross_entropy( - logits=fc1 + fc2, label=label) + logits=fc1 + fc2, label=label + ) loss = paddle.mean(out) opt = paddle.optimizer.SGD(0.1) opt.minimize(loss) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_dropout_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_dropout_op_xpu.py index 7aff1acfdf7ff991ee321dc546c498da7ce7dd06..794ff490d7ec908206e78335ccdc3ce9f24517e8 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_dropout_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_dropout_op_xpu.py @@ -25,17 +25,19 @@ from op_test_xpu import XPUOpTest paddle.enable_static() -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) class XPUTestDropoutOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'dropout' self.use_dynamic_create_class = False class TestDropoutOp(XPUOpTest): - def setUp(self): self.init_inputs_shape() self.init_attrs() @@ -46,7 +48,7 @@ class XPUTestDropoutOp(XPUOpTestWrapper): 'dropout_prob': self.dropout_prob, 'fix_seed': self.fix_seed, 'is_test': self.is_test, - 'dropout_implementation': self.dropout_implementation + 'dropout_implementation': self.dropout_implementation, } out = self.inputs['X'] * (1.0 - self.dropout_prob) @@ -74,19 +76,19 @@ class XPUTestDropoutOp(XPUOpTestWrapper): self.check_output() def test_check_grad_normal(self): - if hasattr(self.__class__, "no_need_check_grad" - ) and self.__class__.no_need_check_grad == True: + if ( + hasattr(self.__class__, "no_need_check_grad") + and self.__class__.no_need_check_grad == True + ): return self.check_grad(['X'], 'Out') class TestDropoutOpInput1d(TestDropoutOp): - def init_inputs_shape(self): self.shape = [2000] class TestDropoutOp2(TestDropoutOp): - def init_inputs_shape(self): self.shape = [32, 64] @@ -97,12 +99,10 @@ class XPUTestDropoutOp(XPUOpTestWrapper): self.dropout_implementation = "upscale_in_train" class TestDropoutOp3(TestDropoutOp): - def init_inputs_shape(self): self.shape = [32, 64, 2] class TestDropoutOp4(TestDropoutOp): - def init_attrs(self): self.__class__.no_need_check_grad = True self.dropout_prob = 0.35 @@ -111,7 +111,6 @@ class XPUTestDropoutOp(XPUOpTestWrapper): self.dropout_implementation = "downgrade_in_infer" class TestDropoutOp5(TestDropoutOp): - def init_inputs_shape(self): self.shape = [32, 64, 3] @@ -123,15 +122,16 @@ class XPUTestDropoutOp(XPUOpTestWrapper): self.dropout_implementation = "downgrade_in_infer" class TestDropoutOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): def test_Variable(): # the input of dropout must be Variable. - x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, - 5]), [[1, 1, 1, 1]], - fluid.CPUPlace()) + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), + [[1, 1, 1, 1]], + fluid.CPUPlace(), + ) fluid.layers.dropout(x1, dropout_prob=0.5) self.assertRaises(TypeError, test_Variable) @@ -139,15 +139,14 @@ class XPUTestDropoutOp(XPUOpTestWrapper): def test_dtype(): # the input dtype of dropout must be float16 or float32 or float64 # float16 only can be set on GPU place - x2 = fluid.layers.data(name='x2', - shape=[3, 4, 5, 6], - dtype="int32") + x2 = fluid.layers.data( + name='x2', shape=[3, 4, 5, 6], dtype="int32" + ) fluid.layers.dropout(x2, dropout_prob=0.5) self.assertRaises(TypeError, test_dtype) class TestDropoutCAPI(unittest.TestCase): - def setUp(self): np.random.seed(123) self.places = [fluid.CPUPlace()] @@ -159,13 +158,12 @@ class XPUTestDropoutOp(XPUOpTestWrapper): input_np = np.random.random([40, 40]).astype(self.in_type) result_np = input_np input = fluid.dygraph.to_variable(input_np) - m = paddle.nn.Dropout(p=0.) + m = paddle.nn.Dropout(p=0.0) m.eval() result = m(input) np.testing.assert_allclose(result.numpy(), result_np) class TestDropoutBackward(unittest.TestCase): - def setUp(self): np.random.seed(123) self.places = [fluid.CPUPlace()] @@ -183,13 +181,15 @@ class XPUTestDropoutOp(XPUOpTestWrapper): input = paddle.uniform([40, 40], dtype=self.in_type) input.stop_gradient = False - out, mask = _legacy_C_ops.dropout(input, 'dropout_prob', - 0.5) + out, mask = _legacy_C_ops.dropout( + input, 'dropout_prob', 0.5 + ) out.backward() np.testing.assert_allclose( input.gradient(), - self.cal_grad_downscale_in_infer(mask.numpy())) + self.cal_grad_downscale_in_infer(mask.numpy()), + ) def test_backward_upscale_train(self): for place in self.places: @@ -198,15 +198,19 @@ class XPUTestDropoutOp(XPUOpTestWrapper): prob = 0.5 input = paddle.uniform([40, 40], dtype=self.in_type) input.stop_gradient = False - out, mask = _legacy_C_ops.dropout(input, 'dropout_prob', - prob, - "dropout_implementation", - "upscale_in_train") + out, mask = _legacy_C_ops.dropout( + input, + 'dropout_prob', + prob, + "dropout_implementation", + "upscale_in_train", + ) out.backward() np.testing.assert_allclose( input.gradient(), - self.cal_grad_upscale_train(mask.numpy(), prob)) + self.cal_grad_upscale_train(mask.numpy(), prob), + ) def test_backward_upscale_train_2(self): for place in self.places: @@ -215,15 +219,19 @@ class XPUTestDropoutOp(XPUOpTestWrapper): prob = 0.3 input = paddle.uniform([40, 40], dtype=self.in_type) input.stop_gradient = False - out, mask = _legacy_C_ops.dropout(input, 'dropout_prob', - prob, - "dropout_implementation", - "upscale_in_train") + out, mask = _legacy_C_ops.dropout( + input, + 'dropout_prob', + prob, + "dropout_implementation", + "upscale_in_train", + ) out.backward() np.testing.assert_allclose( input.gradient(), - self.cal_grad_upscale_train(mask.numpy(), prob)) + self.cal_grad_upscale_train(mask.numpy(), prob), + ) support_types = get_xpu_op_support_types('dropout') diff --git a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op_xpu.py index 6ab52fcc836b8c7763db3c15532efb4430c672e3..2d04b3d7549e4ccf7440bacfedcd58921331dd45 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op_xpu.py @@ -22,19 +22,21 @@ from op_test_xpu import XPUOpTest import unittest import paddle.fluid as fluid from paddle.fluid import Program, program_guard -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestElementwiseAddOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'elementwise_add' self.use_dynamic_create_class = False class TestElementwiseAddOp(XPUOpTest): - def setUp(self): self.op_type = "elementwise_add" self.init_dtype() @@ -43,7 +45,7 @@ class XPUTestElementwiseAddOp(XPUOpTestWrapper): self.init_max_relative_error() self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} self.outputs = {'Out': self.out} @@ -57,27 +59,33 @@ class XPUTestElementwiseAddOp(XPUOpTestWrapper): if paddle.is_compiled_with_xpu(): place = paddle.XPUPlace(0) self.check_grad_with_place( - place, ['X', 'Y'], + place, + ['X', 'Y'], 'Out', - max_relative_error=self.max_relative_error) + max_relative_error=self.max_relative_error, + ) def test_check_grad_ingore_x(self): if paddle.is_compiled_with_xpu(): place = paddle.XPUPlace(0) self.check_grad_with_place( - place, ['Y'], + place, + ['Y'], 'Out', no_grad_set=set("X"), - max_relative_error=self.max_relative_error) + max_relative_error=self.max_relative_error, + ) def test_check_grad_ingore_y(self): if paddle.is_compiled_with_xpu(): place = paddle.XPUPlace(0) self.check_grad_with_place( - place, ['X'], + place, + ['X'], 'Out', no_grad_set=set("Y"), - max_relative_error=self.max_relative_error) + max_relative_error=self.max_relative_error, + ) def init_input_output(self): self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) @@ -94,32 +102,30 @@ class XPUTestElementwiseAddOp(XPUOpTestWrapper): self.max_relative_error = 0.006 @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." + ) class TestElementwiseAddOp_scalar(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 3, 4).astype(self.dtype) self.y = np.random.rand(1).astype(self.dtype) self.out = self.x + self.y @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1,1) to test broadcast.") + reason="[skip shape check] Use y_shape(1,1) to test broadcast." + ) class TestElementwiseAddOp_scalar2(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 3, 4).astype(self.dtype) self.y = np.random.rand(1, 1).astype(self.dtype) self.out = self.x + self.y class TestElementwiseAddOp_Vector(TestElementwiseAddOp): - def init_input_output(self): - self.x = np.random.random((100, )).astype(self.dtype) - self.y = np.random.random((100, )).astype(self.dtype) + self.x = np.random.random((100,)).astype(self.dtype) + self.y = np.random.random((100,)).astype(self.dtype) self.out = np.add(self.x, self.y) class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(100, 2, 3).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype) @@ -129,7 +135,6 @@ class XPUTestElementwiseAddOp(XPUOpTestWrapper): self.axis = 0 class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 100, 3).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype) @@ -139,14 +144,12 @@ class XPUTestElementwiseAddOp(XPUOpTestWrapper): self.axis = 1 class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 3, 100).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype) self.out = self.x + self.y.reshape(1, 1, 100) class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype) self.y = np.random.rand(10, 12).astype(self.dtype) @@ -156,7 +159,6 @@ class XPUTestElementwiseAddOp(XPUOpTestWrapper): self.axis = 1 class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(100, 2, 3, 4).astype(self.dtype) self.y = np.random.rand(100, 1).astype(self.dtype) @@ -166,28 +168,24 @@ class XPUTestElementwiseAddOp(XPUOpTestWrapper): self.axis = 0 class TestElementwiseAddOp_broadcast_5(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(10, 3, 12).astype(self.dtype) self.y = np.random.rand(10, 1, 12).astype(self.dtype) self.out = self.x + self.y class TestElementwiseAddOp_broadcast_6(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype) self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype) self.out = self.x + self.y class TestElementwiseAddOp_broadcast_7(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(1, 1, 20, 5).astype(self.dtype) self.y = np.random.rand(20, 5, 1, 1).astype(self.dtype) self.out = self.x + self.y class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 10, 12).astype(self.dtype) self.y = np.random.rand(10, 12).astype(self.dtype) @@ -197,9 +195,9 @@ class XPUTestElementwiseAddOp(XPUOpTestWrapper): self.axis = 1 @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." + ) class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(100, 1).astype(self.dtype) self.y = np.random.rand(1).astype(self.dtype) @@ -209,7 +207,6 @@ class XPUTestElementwiseAddOp(XPUOpTestWrapper): self.axis = 1 class TestElementwiseAddOp_channelwise_add(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(100, 2, 3).astype(self.dtype) self.y = np.random.rand(100, 1, 1).astype(self.dtype) @@ -219,7 +216,6 @@ class XPUTestElementwiseAddOp(XPUOpTestWrapper): self.axis = -1 class TestElementwiseAddOp_commonuse_add1(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 3, 100).astype(self.dtype) self.y = np.random.rand(1, 1, 100).astype(self.dtype) @@ -229,7 +225,6 @@ class XPUTestElementwiseAddOp(XPUOpTestWrapper): self.axis = -1 class TestElementwiseAddOp_commonuse_add2(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(10, 3, 1, 4).astype(self.dtype) self.y = np.random.rand(10, 1, 12, 1).astype(self.dtype) @@ -239,7 +234,6 @@ class XPUTestElementwiseAddOp(XPUOpTestWrapper): self.axis = -1 class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(10, 12).astype(self.dtype) self.y = np.random.rand(2, 3, 10, 12).astype(self.dtype) @@ -249,30 +243,32 @@ class XPUTestElementwiseAddOp(XPUOpTestWrapper): self.axis = 2 class TestElementwiseAddOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # the input of elementwise_add must be Variable. - x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.XPUPlace(0)) - y1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.XPUPlace(0)) - self.assertRaises(TypeError, fluid.layers.elementwise_add, x1, - y1) + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0) + ) + y1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0) + ) + self.assertRaises( + TypeError, fluid.layers.elementwise_add, x1, y1 + ) # the input dtype of elementwise_add must be float16 or float32 or float64 or int32 or int64 # float16 only can be set on GPU place - x2 = fluid.layers.data(name='x2', - shape=[3, 4, 5, 6], - dtype="uint8") - y2 = fluid.layers.data(name='y2', - shape=[3, 4, 5, 6], - dtype="uint8") - self.assertRaises(TypeError, fluid.layers.elementwise_add, x2, - y2) + x2 = fluid.layers.data( + name='x2', shape=[3, 4, 5, 6], dtype="uint8" + ) + y2 = fluid.layers.data( + name='y2', shape=[3, 4, 5, 6], dtype="uint8" + ) + self.assertRaises( + TypeError, fluid.layers.elementwise_add, x2, y2 + ) class TestAddOp(unittest.TestCase): - def test_name(self): with fluid.program_guard(fluid.Program()): x = fluid.data(name="x", shape=[2, 3], dtype="float32") @@ -287,7 +283,7 @@ class XPUTestElementwiseAddOp(XPUOpTestWrapper): def gen_data(): return { "x": np.array([2, 3, 4]).astype('float32'), - "y": np.array([1, 5, 2]).astype('float32') + "y": np.array([1, 5, 2]).astype('float32'), } x = fluid.data(name="x", shape=[3], dtype='float32') @@ -297,7 +293,7 @@ class XPUTestElementwiseAddOp(XPUOpTestWrapper): place = fluid.XPUPlace(0) exe = fluid.Executor(place) z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) - z_expected = np.array([3., 8., 6.]) + z_expected = np.array([3.0, 8.0, 6.0]) self.assertEqual((z_value == z_expected).all(), True) def test_dygraph(self): @@ -308,7 +304,7 @@ class XPUTestElementwiseAddOp(XPUOpTestWrapper): y = fluid.dygraph.to_variable(np_y) z = paddle.add(x, y) np_z = z.numpy() - z_expected = np.array([3., 8., 6.]) + z_expected = np.array([3.0, 8.0, 6.0]) self.assertEqual((np_z == z_expected).all(), True) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op_xpu_kp.py b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op_xpu_kp.py index c969332c5ae9d987250a687eb91076bf41ca3cc2..7f201d3487ed22a10ba0c4f90d4e0f14b21b0048 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op_xpu_kp.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op_xpu_kp.py @@ -26,10 +26,10 @@ from paddle.fluid import Program, program_guard paddle.enable_static() -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") +@unittest.skipIf( + not paddle.is_compiled_with_xpu(), "core is not compiled with XPU" +) class TestElementwiseAddOp(XPUOpTest): - def setUp(self): self.op_type = "elementwise_add" self.init_dtype() @@ -38,7 +38,7 @@ class TestElementwiseAddOp(XPUOpTest): self.init_max_relative_error() self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} self.outputs = {'Out': self.out} @@ -52,27 +52,33 @@ class TestElementwiseAddOp(XPUOpTest): if paddle.is_compiled_with_xpu(): place = paddle.XPUPlace(0) self.check_grad_with_place( - place, ['X', 'Y'], + place, + ['X', 'Y'], 'Out', - max_relative_error=self.max_relative_error) + max_relative_error=self.max_relative_error, + ) def test_check_grad_ingore_x(self): if paddle.is_compiled_with_xpu(): place = paddle.XPUPlace(0) self.check_grad_with_place( - place, ['Y'], + place, + ['Y'], 'Out', no_grad_set=set("X"), - max_relative_error=self.max_relative_error) + max_relative_error=self.max_relative_error, + ) def test_check_grad_ingore_y(self): if paddle.is_compiled_with_xpu(): place = paddle.XPUPlace(0) self.check_grad_with_place( - place, ['X'], + place, + ['X'], 'Out', no_grad_set=set("Y"), - max_relative_error=self.max_relative_error) + max_relative_error=self.max_relative_error, + ) def init_input_output(self): self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) @@ -89,44 +95,46 @@ class TestElementwiseAddOp(XPUOpTest): self.max_relative_error = 0.006 -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") +@unittest.skipIf( + not paddle.is_compiled_with_xpu(), "core is not compiled with XPU" +) @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." +) class TestElementwiseAddOp_scalar(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 3, 4).astype(self.dtype) self.y = np.random.rand(1).astype(self.dtype) self.out = self.x + self.y -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") +@unittest.skipIf( + not paddle.is_compiled_with_xpu(), "core is not compiled with XPU" +) @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1,1) to test broadcast.") + reason="[skip shape check] Use y_shape(1,1) to test broadcast." +) class TestElementwiseAddOp_scalar2(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 3, 4).astype(self.dtype) self.y = np.random.rand(1, 1).astype(self.dtype) self.out = self.x + self.y -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") +@unittest.skipIf( + not paddle.is_compiled_with_xpu(), "core is not compiled with XPU" +) class TestElementwiseAddOp_Vector(TestElementwiseAddOp): - def init_input_output(self): - self.x = np.random.random((100, )).astype(self.dtype) - self.y = np.random.random((100, )).astype(self.dtype) + self.x = np.random.random((100,)).astype(self.dtype) + self.y = np.random.random((100,)).astype(self.dtype) self.out = np.add(self.x, self.y) -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") +@unittest.skipIf( + not paddle.is_compiled_with_xpu(), "core is not compiled with XPU" +) class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(100, 2, 3).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype) @@ -136,10 +144,10 @@ class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp): self.axis = 0 -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") +@unittest.skipIf( + not paddle.is_compiled_with_xpu(), "core is not compiled with XPU" +) class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 100, 3).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype) @@ -149,20 +157,20 @@ class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp): self.axis = 1 -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") +@unittest.skipIf( + not paddle.is_compiled_with_xpu(), "core is not compiled with XPU" +) class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 3, 100).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype) self.out = self.x + self.y.reshape(1, 1, 100) -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") +@unittest.skipIf( + not paddle.is_compiled_with_xpu(), "core is not compiled with XPU" +) class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype) self.y = np.random.rand(10, 12).astype(self.dtype) @@ -172,10 +180,10 @@ class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp): self.axis = 1 -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") +@unittest.skipIf( + not paddle.is_compiled_with_xpu(), "core is not compiled with XPU" +) class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(100, 2, 3, 4).astype(self.dtype) self.y = np.random.rand(100, 1).astype(self.dtype) @@ -185,40 +193,40 @@ class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp): self.axis = 0 -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") +@unittest.skipIf( + not paddle.is_compiled_with_xpu(), "core is not compiled with XPU" +) class TestElementwiseAddOp_broadcast_5(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(10, 3, 12).astype(self.dtype) self.y = np.random.rand(10, 1, 12).astype(self.dtype) self.out = self.x + self.y -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") +@unittest.skipIf( + not paddle.is_compiled_with_xpu(), "core is not compiled with XPU" +) class TestElementwiseAddOp_broadcast_6(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype) self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype) self.out = self.x + self.y -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") +@unittest.skipIf( + not paddle.is_compiled_with_xpu(), "core is not compiled with XPU" +) class TestElementwiseAddOp_broadcast_7(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(1, 1, 20, 5).astype(self.dtype) self.y = np.random.rand(20, 5, 1, 1).astype(self.dtype) self.out = self.x + self.y -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") +@unittest.skipIf( + not paddle.is_compiled_with_xpu(), "core is not compiled with XPU" +) class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 10, 12).astype(self.dtype) self.y = np.random.rand(10, 12).astype(self.dtype) @@ -228,12 +236,13 @@ class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp): self.axis = 1 -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") +@unittest.skipIf( + not paddle.is_compiled_with_xpu(), "core is not compiled with XPU" +) @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." +) class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(100, 1).astype(self.dtype) self.y = np.random.rand(1).astype(self.dtype) @@ -243,10 +252,10 @@ class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp): self.axis = 1 -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") +@unittest.skipIf( + not paddle.is_compiled_with_xpu(), "core is not compiled with XPU" +) class TestElementwiseAddOp_channelwise_add(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(100, 2, 3).astype(self.dtype) self.y = np.random.rand(100, 1, 1).astype(self.dtype) @@ -256,10 +265,10 @@ class TestElementwiseAddOp_channelwise_add(TestElementwiseAddOp): self.axis = -1 -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") +@unittest.skipIf( + not paddle.is_compiled_with_xpu(), "core is not compiled with XPU" +) class TestElementwiseAddOp_commonuse_add1(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(2, 3, 100).astype(self.dtype) self.y = np.random.rand(1, 1, 100).astype(self.dtype) @@ -269,10 +278,10 @@ class TestElementwiseAddOp_commonuse_add1(TestElementwiseAddOp): self.axis = -1 -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") +@unittest.skipIf( + not paddle.is_compiled_with_xpu(), "core is not compiled with XPU" +) class TestElementwiseAddOp_commonuse_add2(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(10, 3, 1, 4).astype(self.dtype) self.y = np.random.rand(10, 1, 12, 1).astype(self.dtype) @@ -282,10 +291,10 @@ class TestElementwiseAddOp_commonuse_add2(TestElementwiseAddOp): self.axis = -1 -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") +@unittest.skipIf( + not paddle.is_compiled_with_xpu(), "core is not compiled with XPU" +) class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestElementwiseAddOp): - def init_input_output(self): self.x = np.random.rand(10, 12).astype(self.dtype) self.y = np.random.rand(2, 3, 10, 12).astype(self.dtype) @@ -295,17 +304,19 @@ class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestElementwiseAddOp): self.axis = 2 -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") +@unittest.skipIf( + not paddle.is_compiled_with_xpu(), "core is not compiled with XPU" +) class TestElementwiseAddOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # the input of elementwise_add must be Variable. - x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.XPUPlace(0)) - y1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.XPUPlace(0)) + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0) + ) + y1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0) + ) self.assertRaises(TypeError, fluid.layers.elementwise_add, x1, y1) # the input dtype of elementwise_add must be float16 or float32 or float64 or int32 or int64 @@ -315,10 +326,10 @@ class TestElementwiseAddOpError(unittest.TestCase): self.assertRaises(TypeError, fluid.layers.elementwise_add, x2, y2) -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") +@unittest.skipIf( + not paddle.is_compiled_with_xpu(), "core is not compiled with XPU" +) class TestAddOp(unittest.TestCase): - def test_name(self): with fluid.program_guard(fluid.Program()): x = fluid.data(name="x", shape=[2, 3], dtype="float32") @@ -333,7 +344,7 @@ class TestAddOp(unittest.TestCase): def gen_data(): return { "x": np.array([2, 3, 4]).astype('float32'), - "y": np.array([1, 5, 2]).astype('float32') + "y": np.array([1, 5, 2]).astype('float32'), } x = fluid.data(name="x", shape=[3], dtype='float32') @@ -343,7 +354,7 @@ class TestAddOp(unittest.TestCase): place = fluid.XPUPlace(0) exe = fluid.Executor(place) z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) - z_expected = np.array([3., 8., 6.]) + z_expected = np.array([3.0, 8.0, 6.0]) self.assertEqual((z_value == z_expected).all(), True) def test_dygraph(self): @@ -354,7 +365,7 @@ class TestAddOp(unittest.TestCase): y = fluid.dygraph.to_variable(np_y) z = paddle.add(x, y) np_z = z.numpy() - z_expected = np.array([3., 8., 6.]) + z_expected = np.array([3.0, 8.0, 6.0]) self.assertEqual((np_z == z_expected).all(), True) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_div_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_div_op_xpu.py index 956e71cb69ea9b348c9b2086a5573770ae40b500..4144a7068e0fa464bc59d253f96b4ba2f6fc196d 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_div_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_div_op_xpu.py @@ -20,19 +20,21 @@ import paddle import paddle.fluid as fluid from op_test import skip_check_grad_ci from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestElementwiseDivOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'elementwise_div' self.use_dynamic_create_class = False class ElementwiseDivOp(XPUOpTest): - def setUp(self): self.op_type = "elementwise_div" self.dtype = self.in_type @@ -48,7 +50,7 @@ class XPUTestElementwiseDivOp(XPUOpTestWrapper): def init_input_output(self): self.inputs = { 'X': np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype), - 'Y': np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) + 'Y': np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype), } self.outputs = { 'Out': np.divide(self.inputs['X'], self.inputs['Y']) @@ -62,133 +64,138 @@ class XPUTestElementwiseDivOp(XPUOpTestWrapper): def test_check_grad_normal(self): if paddle.is_compiled_with_xpu(): place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ['X', 'Y'], - 'Out', - max_relative_error=0.05) + self.check_grad_with_place( + place, ['X', 'Y'], 'Out', max_relative_error=0.05 + ) def test_check_grad_ingore_x(self): if paddle.is_compiled_with_xpu(): place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ['Y'], - 'Out', - max_relative_error=0.05, - no_grad_set=set("X")) + self.check_grad_with_place( + place, + ['Y'], + 'Out', + max_relative_error=0.05, + no_grad_set=set("X"), + ) def test_check_grad_ingore_y(self): if paddle.is_compiled_with_xpu(): place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ['X'], - 'Out', - max_relative_error=0.05, - no_grad_set=set('Y')) + self.check_grad_with_place( + place, + ['X'], + 'Out', + max_relative_error=0.05, + no_grad_set=set('Y'), + ) def init_dtype(self): pass @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." + ) class TestElementwiseDivOp_scalar(ElementwiseDivOp): - def init_input_output(self): self.inputs = { 'X': np.random.uniform(0.1, 1, [20, 3, 4]).astype(self.dtype), - 'Y': np.random.uniform(0.1, 1, [1]).astype(self.dtype) + 'Y': np.random.uniform(0.1, 1, [1]).astype(self.dtype), } self.outputs = {'Out': self.inputs['X'] / self.inputs['Y']} class TestElementwiseDivOp_Vector(ElementwiseDivOp): - def init_input_output(self): self.inputs = { 'X': np.random.uniform(0.1, 1, [100]).astype(self.dtype), - 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype) + 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype), } self.outputs = { 'Out': np.divide(self.inputs['X'], self.inputs['Y']) } class TestElementwiseDivOp_broadcast_0(ElementwiseDivOp): - def init_input_output(self): self.inputs = { 'X': np.random.uniform(0.1, 1, [100, 3, 4]).astype(self.dtype), - 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype) + 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype), } self.attrs = {'axis': 0} self.outputs = { - 'Out': - np.divide(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1)) + 'Out': np.divide( + self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1) + ) } class TestElementwiseDivOp_broadcast_1(ElementwiseDivOp): - def init_input_output(self): self.inputs = { 'X': np.random.uniform(0.1, 1, [2, 100, 4]).astype(self.dtype), - 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype) + 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype), } self.attrs = {'axis': 1} self.outputs = { - 'Out': - np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1)) + 'Out': np.divide( + self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1) + ) } class TestElementwiseDivOp_broadcast_2(ElementwiseDivOp): - def init_input_output(self): self.inputs = { 'X': np.random.uniform(0.1, 1, [2, 3, 100]).astype(self.dtype), - 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype) + 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype), } self.outputs = { - 'Out': - np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100)) + 'Out': np.divide( + self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100) + ) } class TestElementwiseDivOp_broadcast_3(ElementwiseDivOp): - def init_input_output(self): self.inputs = { - 'X': np.random.uniform(0.1, 1, - [2, 10, 12, 5]).astype(self.dtype), - 'Y': np.random.uniform(0.1, 1, [10, 12]).astype(self.dtype) + 'X': np.random.uniform(0.1, 1, [2, 10, 12, 5]).astype( + self.dtype + ), + 'Y': np.random.uniform(0.1, 1, [10, 12]).astype(self.dtype), } self.attrs = {'axis': 1} self.outputs = { - 'Out': - np.divide(self.inputs['X'], - self.inputs['Y'].reshape(1, 10, 12, 1)) + 'Out': np.divide( + self.inputs['X'], self.inputs['Y'].reshape(1, 10, 12, 1) + ) } class TestElementwiseDivOp_broadcast_4(ElementwiseDivOp): - def init_input_output(self): self.inputs = { 'X': np.random.uniform(0.1, 1, [2, 3, 50]).astype(self.dtype), - 'Y': np.random.uniform(0.1, 1, [2, 1, 50]).astype(self.dtype) + 'Y': np.random.uniform(0.1, 1, [2, 1, 50]).astype(self.dtype), } self.outputs = { 'Out': np.divide(self.inputs['X'], self.inputs['Y']) } class TestElementwiseDivOp_broadcast_5(ElementwiseDivOp): - def init_input_output(self): self.inputs = { - 'X': np.random.uniform(0.1, 1, - [2, 3, 4, 20]).astype(self.dtype), - 'Y': np.random.uniform(0.1, 1, [2, 3, 1, 20]).astype(self.dtype) + 'X': np.random.uniform(0.1, 1, [2, 3, 4, 20]).astype( + self.dtype + ), + 'Y': np.random.uniform(0.1, 1, [2, 3, 1, 20]).astype( + self.dtype + ), } self.outputs = { 'Out': np.divide(self.inputs['X'], self.inputs['Y']) } class TestElementwiseDivOp_commonuse_1(ElementwiseDivOp): - def init_input_output(self): self.inputs = { 'X': np.random.uniform(0.1, 1, [2, 3, 100]).astype(self.dtype), @@ -199,25 +206,26 @@ class XPUTestElementwiseDivOp(XPUOpTestWrapper): } class TestElementwiseDivOp_commonuse_2(ElementwiseDivOp): - def init_input_output(self): self.inputs = { - 'X': np.random.uniform(0.1, 1, - [30, 3, 1, 5]).astype(self.dtype), - 'Y': np.random.uniform(0.1, 1, - [30, 1, 4, 1]).astype(self.dtype), + 'X': np.random.uniform(0.1, 1, [30, 3, 1, 5]).astype( + self.dtype + ), + 'Y': np.random.uniform(0.1, 1, [30, 1, 4, 1]).astype( + self.dtype + ), } self.outputs = { 'Out': np.divide(self.inputs['X'], self.inputs['Y']) } class TestElementwiseDivOp_xsize_lessthan_ysize(ElementwiseDivOp): - def init_input_output(self): self.inputs = { 'X': np.random.uniform(0.1, 1, [10, 12]).astype(self.dtype), - 'Y': np.random.uniform(0.1, 1, - [2, 3, 10, 12]).astype(self.dtype), + 'Y': np.random.uniform(0.1, 1, [2, 3, 10, 12]).astype( + self.dtype + ), } self.attrs = {'axis': 2} @@ -227,18 +235,18 @@ class XPUTestElementwiseDivOp(XPUOpTestWrapper): } class TestElementwiseDivBroadcast(unittest.TestCase): - def test_shape_with_batch_sizes(self): with fluid.program_guard(fluid.Program()): - x_var = fluid.data(name='x', - dtype='float32', - shape=[None, 3, None, None]) - one = 2. + x_var = fluid.data( + name='x', dtype='float32', shape=[None, 3, None, None] + ) + one = 2.0 out = one / x_var exe = fluid.Executor(fluid.XPUPlace(0)) - x = np.random.uniform(0.1, 0.6, - (1, 3, 32, 32)).astype('float32') - out_result, = exe.run(feed={'x': x}, fetch_list=[out]) + x = np.random.uniform(0.1, 0.6, (1, 3, 32, 32)).astype( + 'float32' + ) + (out_result,) = exe.run(feed={'x': x}, fetch_list=[out]) self.assertEqual((out_result == (2 / x)).all(), True) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_floordiv_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_floordiv_op_xpu.py index d954e0d6ce642c9dc7d67cf3d5c3b3be4dabe90c..f557221b488df7d2b058ed08a8a0003fce1cc6f5 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_floordiv_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_floordiv_op_xpu.py @@ -19,20 +19,22 @@ import numpy as np import paddle from op_test import OpTest from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() import random class XPUTestElementwiseModOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'elementwise_floordiv' self.use_dynamic_create_class = False class TestElementwiseModOp(XPUOpTest): - def init_kernel_type(self): self.use_mkldnn = False @@ -46,7 +48,7 @@ class XPUTestElementwiseModOp(XPUOpTestWrapper): self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} self.outputs = {'Out': self.out} @@ -65,7 +67,6 @@ class XPUTestElementwiseModOp(XPUOpTestWrapper): pass class TestElementwiseModOp_scalar(TestElementwiseModOp): - def init_input_output(self): scale_x = random.randint(0, 100000) scale_y = random.randint(1, 100000) @@ -74,7 +75,6 @@ class XPUTestElementwiseModOp(XPUOpTestWrapper): self.out = np.floor_divide(self.x, self.y) class TestElementwiseModOpInverse(TestElementwiseModOp): - def init_input_output(self): self.x = np.random.uniform(0, 10000, [10]).astype(self.dtype) self.y = np.random.uniform(1, 1000, [10, 10]).astype(self.dtype) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_max_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_max_op_xpu.py index 582f73d525c9cd92a7de9aec31ce14cab4edeb3d..f2a05670f41b972047fc26a6d37e5be31e8f7704 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_max_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_max_op_xpu.py @@ -19,19 +19,21 @@ import numpy as np from op_test import skip_check_grad_ci from op_test_xpu import XPUOpTest import paddle -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestElementwiseMaxOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'elementwise_max' self.use_dynamic_create_class = False class TestElementwiseOp(XPUOpTest): - def setUp(self): self.use_xpu = True self.op_type = "elementwise_max" @@ -63,23 +65,29 @@ class XPUTestElementwiseMaxOp(XPUOpTestWrapper): def test_check_grad_ingore_x(self): if paddle.is_compiled_with_xpu(): place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ['Y'], - 'Out', - max_relative_error=0.006, - no_grad_set=set("X")) + self.check_grad_with_place( + place, + ['Y'], + 'Out', + max_relative_error=0.006, + no_grad_set=set("X"), + ) def test_check_grad_ingore_y(self): if paddle.is_compiled_with_xpu(): place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ['X'], - 'Out', - max_relative_error=0.006, - no_grad_set=set('Y')) + self.check_grad_with_place( + place, + ['X'], + 'Out', + max_relative_error=0.006, + no_grad_set=set('Y'), + ) @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." + ) class TestElementwiseMaxOp_scalar(TestElementwiseOp): - def init_input_output(self): x = np.random.random_integers(-5, 5, [2, 3, 20]).astype(self.dtype) y = np.array([0.5]).astype(self.dtype) @@ -89,86 +97,85 @@ class XPUTestElementwiseMaxOp(XPUOpTestWrapper): } class TestElementwiseMaxOp_Vector(TestElementwiseOp): - def init_input_output(self): - x = np.random.random((100, )).astype(self.dtype) - sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype) - y = x + sgn * np.random.uniform(0.1, 1, (100, )).astype(self.dtype) + x = np.random.random((100,)).astype(self.dtype) + sgn = np.random.choice([-1, 1], (100,)).astype(self.dtype) + y = x + sgn * np.random.uniform(0.1, 1, (100,)).astype(self.dtype) self.inputs = {'X': x, 'Y': y} self.outputs = { 'Out': np.maximum(self.inputs['X'], self.inputs['Y']) } class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp): - def init_input_output(self): x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(self.dtype) - sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype) - y = x[:, 0, 0] + sgn * \ - np.random.uniform(1, 2, (100, )).astype(self.dtype) + sgn = np.random.choice([-1, 1], (100,)).astype(self.dtype) + y = x[:, 0, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( + self.dtype + ) self.inputs = {'X': x, 'Y': y} self.attrs = {'axis': 0} self.outputs = { - 'Out': - np.maximum(self.inputs['X'], - self.inputs['Y'].reshape(100, 1, 1)) + 'Out': np.maximum( + self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1) + ) } class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp): - def init_input_output(self): x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(self.dtype) - sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype) - y = x[0, :, 0] + sgn * \ - np.random.uniform(1, 2, (100, )).astype(self.dtype) + sgn = np.random.choice([-1, 1], (100,)).astype(self.dtype) + y = x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( + self.dtype + ) self.inputs = {'X': x, 'Y': y} self.attrs = {'axis': 1} self.outputs = { - 'Out': - np.maximum(self.inputs['X'], - self.inputs['Y'].reshape(1, 100, 1)) + 'Out': np.maximum( + self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1) + ) } class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp): - def init_input_output(self): x = np.random.uniform(0.5, 1, (1, 3, 100)).astype(self.dtype) - sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype) - y = x[0, 0, :] + sgn * \ - np.random.uniform(1, 2, (100, )).astype(self.dtype) + sgn = np.random.choice([-1, 1], (100,)).astype(self.dtype) + y = x[0, 0, :] + sgn * np.random.uniform(1, 2, (100,)).astype( + self.dtype + ) self.inputs = {'X': x, 'Y': y} self.outputs = { - 'Out': - np.maximum(self.inputs['X'], - self.inputs['Y'].reshape(1, 1, 100)) + 'Out': np.maximum( + self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100) + ) } class TestElementwiseMaxOp_broadcast_3(TestElementwiseOp): - def init_input_output(self): x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(self.dtype) sgn = np.random.choice([-1, 1], (50, 2)).astype(self.dtype) - y = x[0, :, :, 0] + sgn * \ - np.random.uniform(1, 2, (50, 2)).astype(self.dtype) + y = x[0, :, :, 0] + sgn * np.random.uniform(1, 2, (50, 2)).astype( + self.dtype + ) self.inputs = {'X': x, 'Y': y} self.attrs = {'axis': 1} self.outputs = { - 'Out': - np.maximum(self.inputs['X'], - self.inputs['Y'].reshape(1, 50, 2, 1)) + 'Out': np.maximum( + self.inputs['X'], self.inputs['Y'].reshape(1, 50, 2, 1) + ) } class TestElementwiseMaxOp_broadcast_4(TestElementwiseOp): - def init_input_output(self): x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(self.dtype) sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(self.dtype) - y = x + sgn * \ - np.random.uniform(1, 2, (2, 3, 1, 5)).astype(self.dtype) + y = x + sgn * np.random.uniform(1, 2, (2, 3, 1, 5)).astype( + self.dtype + ) self.inputs = {'X': x, 'Y': y} self.outputs = { diff --git a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_min_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_min_op_xpu.py index c7eb91e83be9110832a7e47222f1f07dfc585673..498410d6dbb68a0c2644e9af4f9069eeb33cd5f8 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_min_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_min_op_xpu.py @@ -19,19 +19,21 @@ import numpy as np from op_test import skip_check_grad_ci import paddle from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestElementwiseMinOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'elementwise_min' self.use_dynamic_create_class = False class TestElementwiseOp(XPUOpTest): - def setUp(self): self.op_type = "elementwise_min" # If x and y have the same value, the min() is not differentiable. @@ -62,23 +64,29 @@ class XPUTestElementwiseMinOp(XPUOpTestWrapper): def test_check_grad_ingore_x(self): if paddle.is_compiled_with_xpu(): place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ['Y'], - 'Out', - max_relative_error=0.005, - no_grad_set=set("X")) + self.check_grad_with_place( + place, + ['Y'], + 'Out', + max_relative_error=0.005, + no_grad_set=set("X"), + ) def test_check_grad_ingore_y(self): if paddle.is_compiled_with_xpu(): place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ['X'], - 'Out', - max_relative_error=0.005, - no_grad_set=set('Y')) + self.check_grad_with_place( + place, + ['X'], + 'Out', + max_relative_error=0.005, + no_grad_set=set('Y'), + ) @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." + ) class TestElementwiseMinOp_scalar(TestElementwiseOp): - def init_input_output(self): x = np.random.random_integers(-5, 5, [10, 3, 4]).astype(self.dtype) y = np.array([0.5]).astype(self.dtype) @@ -88,82 +96,81 @@ class XPUTestElementwiseMinOp(XPUOpTestWrapper): } class TestElementwiseMinOp_Vector(TestElementwiseOp): - def init_input_output(self): - x = np.random.random((100, )).astype(self.dtype) - sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype) - y = x + sgn * np.random.uniform(0.1, 1, (100, )).astype(self.dtype) + x = np.random.random((100,)).astype(self.dtype) + sgn = np.random.choice([-1, 1], (100,)).astype(self.dtype) + y = x + sgn * np.random.uniform(0.1, 1, (100,)).astype(self.dtype) self.inputs = {'X': x, 'Y': y} self.outputs = { 'Out': np.minimum(self.inputs['X'], self.inputs['Y']) } class TestElementwiseMinOp_broadcast_0(TestElementwiseOp): - def init_input_output(self): x = np.random.uniform(0.5, 1, (100, 3, 2)).astype(self.dtype) - sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype) - y = x[:, 0, 0] + sgn * \ - np.random.uniform(1, 2, (100, )).astype(self.dtype) + sgn = np.random.choice([-1, 1], (100,)).astype(self.dtype) + y = x[:, 0, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( + self.dtype + ) self.attrs = {'axis': 0} self.inputs = {'X': x, 'Y': y} self.outputs = { - 'Out': - np.minimum(self.inputs['X'], - self.inputs['Y'].reshape(100, 1, 1)) + 'Out': np.minimum( + self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1) + ) } class TestElementwiseMinOp_broadcast_1(TestElementwiseOp): - def init_input_output(self): x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(self.dtype) - sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype) - y = x[0, :, 0] + sgn * \ - np.random.uniform(1, 2, (100, )).astype(self.dtype) + sgn = np.random.choice([-1, 1], (100,)).astype(self.dtype) + y = x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( + self.dtype + ) self.attrs = {'axis': 1} self.inputs = {'X': x, 'Y': y} self.outputs = { - 'Out': - np.minimum(self.inputs['X'], - self.inputs['Y'].reshape(1, 100, 1)) + 'Out': np.minimum( + self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1) + ) } class TestElementwiseMinOp_broadcast_2(TestElementwiseOp): - def init_input_output(self): x = np.random.uniform(0.5, 1, (2, 3, 100)).astype(self.dtype) - sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype) - y = x[0, 0, :] + sgn * \ - np.random.uniform(1, 2, (100, )).astype(self.dtype) + sgn = np.random.choice([-1, 1], (100,)).astype(self.dtype) + y = x[0, 0, :] + sgn * np.random.uniform(1, 2, (100,)).astype( + self.dtype + ) self.inputs = {'X': x, 'Y': y} self.outputs = { - 'Out': - np.minimum(self.inputs['X'], - self.inputs['Y'].reshape(1, 1, 100)) + 'Out': np.minimum( + self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100) + ) } class TestElementwiseMinOp_broadcast_3(TestElementwiseOp): - def init_input_output(self): x = np.random.uniform(0.5, 1, (2, 25, 4, 1)).astype(self.dtype) sgn = np.random.choice([-1, 1], (25, 4)).astype(self.dtype) - y = x[0, :, :, 0] + sgn * \ - np.random.uniform(1, 2, (25, 4)).astype(self.dtype) + y = x[0, :, :, 0] + sgn * np.random.uniform(1, 2, (25, 4)).astype( + self.dtype + ) self.attrs = {'axis': 1} self.inputs = {'X': x, 'Y': y} self.outputs = { - 'Out': - np.minimum(self.inputs['X'], - self.inputs['Y'].reshape(1, 25, 4, 1)) + 'Out': np.minimum( + self.inputs['X'], self.inputs['Y'].reshape(1, 25, 4, 1) + ) } class TestElementwiseMinOp_broadcast_4(TestElementwiseOp): - def init_input_output(self): x = np.random.uniform(0.5, 1, (2, 10, 2, 5)).astype(self.dtype) sgn = np.random.choice([-1, 1], (2, 10, 1, 5)).astype(self.dtype) - y = x + sgn * \ - np.random.uniform(1, 2, (2, 10, 1, 5)).astype(self.dtype) + y = x + sgn * np.random.uniform(1, 2, (2, 10, 1, 5)).astype( + self.dtype + ) self.inputs = {'X': x, 'Y': y} self.outputs = { 'Out': np.minimum(self.inputs['X'], self.inputs['Y']) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_mod_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_mod_op_xpu.py index f446fd1f2ade4cc67117059a8ca397e296080570..334cd0794b48ff81e370c1dab5b906cde7526142 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_mod_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_mod_op_xpu.py @@ -21,19 +21,21 @@ import paddle.fluid as fluid import paddle from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestElementwiseModOp(XPUOpTestWrapper): - def __init__(self) -> None: self.op_name = 'elementwise_mod' self.use_dynamic_create_class = False class ElementwiseModOp(XPUOpTest): - def init_kernel_type(self): self.use_mkldnn = False @@ -43,7 +45,7 @@ class XPUTestElementwiseModOp(XPUOpTestWrapper): self.out = np.mod(self.x, self.y) self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.outputs = {'Out': self.out} self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} @@ -70,29 +72,26 @@ class XPUTestElementwiseModOp(XPUOpTestWrapper): self.check_output_with_place(place) class TestElementwiseModOp_broadcast_1(ElementwiseModOp): - def init_input_output(self): self.inputs = { 'X': np.random.rand(2, 100, 3).astype(self.dtype), - 'Y': np.random.rand(2, 100, 3).astype(self.dtype) + 'Y': np.random.rand(2, 100, 3).astype(self.dtype), } self.attrs = {'axis': 1} self.outputs = {'Out': self.inputs['X'] % self.inputs['Y']} class TestElementwiseModOp_broadcast_2(ElementwiseModOp): - def init_input_output(self): self.inputs = { 'X': np.random.rand(22, 128, 3).astype(self.dtype), - 'Y': np.random.rand(22, 128, 3).astype(self.dtype) + 'Y': np.random.rand(22, 128, 3).astype(self.dtype), } self.attrs = {'axis': 1} self.outputs = {'Out': self.inputs['X'] % self.inputs['Y']} class TestRemainderOp(unittest.TestCase): - def test_dygraph(self): with fluid.dygraph.guard(): np_x = np.random.rand(22, 128, 3).astype('int64') @@ -105,7 +104,7 @@ class XPUTestElementwiseModOp(XPUOpTestWrapper): self.assertEqual((np_z == z_expected).all(), True) np_x = np.array([-3.3, 11.5, -2, 3.5]) - np_y = np.array([-1.2, 2., 3.3, -2.3]) + np_y = np.array([-1.2, 2.0, 3.3, -2.3]) x = paddle.to_tensor(np_x) y = paddle.to_tensor(np_y) z = x % y diff --git a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_mul_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_mul_op_xpu.py index 0e09eafbdb78b897af8abcbccd3dd15f29098cc9..22ee95c07d4cee2cc59e8e9512a7148b9d45e708 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_mul_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_mul_op_xpu.py @@ -21,19 +21,21 @@ import paddle.fluid as fluid from paddle.fluid import Program, program_guard import paddle from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestElementwiseMulOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'elementwise_mul' self.use_dynamic_create_class = False class ElementwiseMulOp(XPUOpTest): - def init_kernel_type(self): self.use_mkldnn = False @@ -56,27 +58,33 @@ class XPUTestElementwiseMulOp(XPUOpTestWrapper): if paddle.is_compiled_with_xpu(): place = paddle.XPUPlace(0) self.check_grad_with_place( - place, ['X', 'Y'], + place, + ['X', 'Y'], 'Out', - check_dygraph=(self.use_mkldnn == False)) + check_dygraph=(self.use_mkldnn == False), + ) def test_check_grad_ingore_x(self): if paddle.is_compiled_with_xpu(): place = paddle.XPUPlace(0) self.check_grad_with_place( - place, ['Y'], + place, + ['Y'], 'Out', no_grad_set=set("X"), - check_dygraph=(self.use_mkldnn == False)) + check_dygraph=(self.use_mkldnn == False), + ) def test_check_grad_ingore_y(self): if paddle.is_compiled_with_xpu(): place = paddle.XPUPlace(0) self.check_grad_with_place( - place, ['X'], + place, + ['X'], 'Out', no_grad_set=set('Y'), - check_dygraph=(self.use_mkldnn == False)) + check_dygraph=(self.use_mkldnn == False), + ) def init_input_output(self): self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) @@ -84,7 +92,7 @@ class XPUTestElementwiseMulOp(XPUOpTestWrapper): self.out = np.multiply(self.x, self.y) self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), } self.outputs = {'Out': self.out} self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} @@ -96,33 +104,31 @@ class XPUTestElementwiseMulOp(XPUOpTestWrapper): pass @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." + ) class TestElementwiseMulOp_scalar(ElementwiseMulOp): - def init_input_output(self): self.inputs = { 'X': np.random.rand(10, 3, 4).astype(self.dtype), - 'Y': np.random.rand(1).astype(self.dtype) + 'Y': np.random.rand(1).astype(self.dtype), } self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} class TestElementwiseMulOp_Vector(ElementwiseMulOp): - def init_input_output(self): self.inputs = { - 'X': np.random.random((100, )).astype(self.dtype), - 'Y': np.random.random((100, )).astype(self.dtype) + 'X': np.random.random((100,)).astype(self.dtype), + 'Y': np.random.random((100,)).astype(self.dtype), } self.outputs = { 'Out': np.multiply(self.inputs['X'], self.inputs['Y']) } class TestElementwiseMulOp_broadcast_0(ElementwiseMulOp): - def init_input_output(self): self.inputs = { 'X': np.random.rand(100, 2, 3).astype(self.dtype), - 'Y': np.random.rand(100).astype(self.dtype) + 'Y': np.random.rand(100).astype(self.dtype), } self.outputs = { 'Out': self.inputs['X'] * self.inputs['Y'].reshape(100, 1, 1) @@ -130,11 +136,10 @@ class XPUTestElementwiseMulOp(XPUOpTestWrapper): self.attrs = {'axis': 0} class TestElementwiseMulOp_broadcast_1(ElementwiseMulOp): - def init_input_output(self): self.inputs = { 'X': np.random.rand(2, 100, 3).astype(self.dtype), - 'Y': np.random.rand(100).astype(self.dtype) + 'Y': np.random.rand(100).astype(self.dtype), } self.attrs = {'axis': 1} @@ -143,11 +148,10 @@ class XPUTestElementwiseMulOp(XPUOpTestWrapper): } class TestElementwiseMulOp_broadcast_2(ElementwiseMulOp): - def init_input_output(self): self.inputs = { 'X': np.random.rand(2, 3, 100).astype(self.dtype), - 'Y': np.random.rand(100).astype(self.dtype) + 'Y': np.random.rand(100).astype(self.dtype), } self.outputs = { @@ -155,61 +159,54 @@ class XPUTestElementwiseMulOp(XPUOpTestWrapper): } class TestElementwiseMulOp_broadcast_3(ElementwiseMulOp): - def init_input_output(self): self.inputs = { 'X': np.random.rand(2, 10, 12, 3).astype(self.dtype), - 'Y': np.random.rand(10, 12).astype(self.dtype) + 'Y': np.random.rand(10, 12).astype(self.dtype), } self.attrs = {'axis': 1} self.outputs = { - 'Out': - self.inputs['X'] * self.inputs['Y'].reshape(1, 10, 12, 1) + 'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 10, 12, 1) } class TestElementwiseMulOp_broadcast_4(ElementwiseMulOp): - def init_input_output(self): self.inputs = { 'X': np.random.rand(10, 2, 11).astype(self.dtype), - 'Y': np.random.rand(10, 1, 11).astype(self.dtype) + 'Y': np.random.rand(10, 1, 11).astype(self.dtype), } self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} class TestElementwiseMulOp_broadcast_5(ElementwiseMulOp): - def init_input_output(self): self.inputs = { 'X': np.random.rand(10, 4, 2, 3).astype(self.dtype), - 'Y': np.random.rand(10, 4, 1, 3).astype(self.dtype) + 'Y': np.random.rand(10, 4, 1, 3).astype(self.dtype), } self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} class TestElementwiseMulOp_commonuse_1(ElementwiseMulOp): - def init_input_output(self): self.inputs = { 'X': np.random.rand(2, 3, 100).astype(self.dtype), - 'Y': np.random.rand(1, 1, 100).astype(self.dtype) + 'Y': np.random.rand(1, 1, 100).astype(self.dtype), } self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} class TestElementwiseMulOp_commonuse_2(ElementwiseMulOp): - def init_input_output(self): self.inputs = { 'X': np.random.rand(30, 3, 1, 5).astype(self.dtype), - 'Y': np.random.rand(30, 1, 4, 1).astype(self.dtype) + 'Y': np.random.rand(30, 1, 4, 1).astype(self.dtype), } self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} class TestElementwiseMulOp_xsize_lessthan_ysize(ElementwiseMulOp): - def init_input_output(self): self.inputs = { 'X': np.random.rand(10, 10).astype(self.dtype), - 'Y': np.random.rand(2, 2, 10, 10).astype(self.dtype) + 'Y': np.random.rand(2, 2, 10, 10).astype(self.dtype), } self.attrs = {'axis': 2} @@ -219,26 +216,29 @@ class XPUTestElementwiseMulOp(XPUOpTestWrapper): } class TestElementwiseMulOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # the input of elementwise_mul must be Variable. - x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.XPUPlace(0)) - y1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), - [[1, 1, 1, 1]], fluid.XPUPlace(0)) - self.assertRaises(TypeError, fluid.layers.elementwise_mul, x1, - y1) + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0) + ) + y1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0) + ) + self.assertRaises( + TypeError, fluid.layers.elementwise_mul, x1, y1 + ) # the input dtype of elementwise_mul must be float32 - x2 = fluid.layers.data(name='x2', - shape=[3, 4, 5, 6], - dtype="uint8") - y2 = fluid.layers.data(name='y2', - shape=[3, 4, 5, 6], - dtype="uint8") - self.assertRaises(TypeError, fluid.layers.elementwise_mul, x2, - y2) + x2 = fluid.layers.data( + name='x2', shape=[3, 4, 5, 6], dtype="uint8" + ) + y2 = fluid.layers.data( + name='y2', shape=[3, 4, 5, 6], dtype="uint8" + ) + self.assertRaises( + TypeError, fluid.layers.elementwise_mul, x2, y2 + ) support_types = get_xpu_op_support_types('elementwise_mul') diff --git a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_pow_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_pow_op_xpu.py index 6387346f4ec67136f8cb624251c61441aed2e31f..95315d55878c7fdffb3d16d55b4a62cc026831eb 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_pow_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_pow_op_xpu.py @@ -19,20 +19,22 @@ import numpy as np import paddle from op_test import OpTest, skip_check_grad_ci from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() @skip_check_grad_ci(reason="XPU does not support grad op currently") class XPUTestElementwisePowOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'elementwise_pow' self.use_dynamic_create_class = False class TestElementwisePowOp(XPUOpTest): - def setUp(self): self.op_type = "elementwise_pow" self.dtype = self.in_type @@ -42,7 +44,7 @@ class XPUTestElementwisePowOp(XPUOpTestWrapper): def compute_input_output(self): self.inputs = { 'X': np.random.uniform(1, 2, [20, 5]).astype(self.dtype), - 'Y': np.random.uniform(1, 2, [20, 5]).astype(self.dtype) + 'Y': np.random.uniform(1, 2, [20, 5]).astype(self.dtype), } self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} @@ -52,110 +54,107 @@ class XPUTestElementwisePowOp(XPUOpTestWrapper): self.check_output_with_place(place) class TestElementwisePowOp_big_shape_1(TestElementwisePowOp): - def compute_input_output(self): self.inputs = { 'X': np.random.uniform(1, 2, [10, 10]).astype(self.dtype), - 'Y': np.random.uniform(0.1, 1, [10, 10]).astype(self.dtype) + 'Y': np.random.uniform(0.1, 1, [10, 10]).astype(self.dtype), } self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} class TestElementwisePowOp_big_shape_2(TestElementwisePowOp): - def compute_input_output(self): self.inputs = { 'X': np.random.uniform(1, 2, [10, 10]).astype(self.dtype), - 'Y': np.random.uniform(0.2, 2, [10, 10]).astype(self.dtype) + 'Y': np.random.uniform(0.2, 2, [10, 10]).astype(self.dtype), } self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." + ) class TestElementwisePowOp_scalar(TestElementwisePowOp): - def compute_input_output(self): self.inputs = { 'X': np.random.uniform(0.1, 1, [3, 3, 4]).astype(self.dtype), - 'Y': np.random.uniform(0.1, 1, [1]).astype(self.dtype) + 'Y': np.random.uniform(0.1, 1, [1]).astype(self.dtype), } self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} class TestElementwisePowOp_tensor(TestElementwisePowOp): - def compute_input_output(self): self.inputs = { 'X': np.random.uniform(0.1, 1, [100]).astype(self.dtype), - 'Y': np.random.uniform(1, 3, [100]).astype(self.dtype) + 'Y': np.random.uniform(1, 3, [100]).astype(self.dtype), } self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} class TestElementwisePowOp_broadcast_0(TestElementwisePowOp): - def compute_input_output(self): self.inputs = { 'X': np.random.uniform(0.1, 1, [2, 1, 100]).astype(self.dtype), - 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype) + 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype), } self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} class TestElementwisePowOp_broadcast_1(TestElementwisePowOp): - def compute_input_output(self): self.inputs = { 'X': np.random.uniform(0.1, 1, [2, 100, 1]).astype(self.dtype), - 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype) + 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype), } self.attrs = {'axis': 1} self.outputs = { - 'Out': np.power(self.inputs['X'], - self.inputs['Y'].reshape(100, 1)) + 'Out': np.power( + self.inputs['X'], self.inputs['Y'].reshape(100, 1) + ) } class TestElementwisePowOp_broadcast_2(TestElementwisePowOp): - def compute_input_output(self): self.inputs = { 'X': np.random.uniform(0.1, 1, [100, 3, 1]).astype(self.dtype), - 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype) + 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype), } self.attrs = {'axis': 0} self.outputs = { - 'Out': - np.power(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1)) + 'Out': np.power( + self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1) + ) } class TestElementwisePowOp_broadcast_3(TestElementwisePowOp): - def compute_input_output(self): self.inputs = { - 'X': np.random.uniform(0.1, 1, - [2, 20, 5, 1]).astype(self.dtype), - 'Y': np.random.uniform(0.1, 1, [20, 5]).astype(self.dtype) + 'X': np.random.uniform(0.1, 1, [2, 20, 5, 1]).astype( + self.dtype + ), + 'Y': np.random.uniform(0.1, 1, [20, 5]).astype(self.dtype), } self.attrs = {'axis': 1} self.outputs = { - 'Out': - np.power(self.inputs['X'], - self.inputs['Y'].reshape(1, 20, 5, 1)) + 'Out': np.power( + self.inputs['X'], self.inputs['Y'].reshape(1, 20, 5, 1) + ) } class TestElementwisePowOp_broadcast_4(TestElementwisePowOp): - def compute_input_output(self): self.inputs = { - 'X': np.random.uniform(0.1, 1, - [2, 10, 3, 5]).astype(self.dtype), - 'Y': np.random.uniform(0.1, 1, [2, 10, 1, 5]).astype(self.dtype) + 'X': np.random.uniform(0.1, 1, [2, 10, 3, 5]).astype( + self.dtype + ), + 'Y': np.random.uniform(0.1, 1, [2, 10, 1, 5]).astype( + self.dtype + ), } self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} class TestElementwisePowOpInt(OpTest): - def setUp(self): self.op_type = "elementwise_pow" self.inputs = { 'X': np.asarray([1, 3, 6]), - 'Y': np.asarray([1, 1, 1]) + 'Y': np.asarray([1, 1, 1]), } self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} diff --git a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_sub_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_sub_op_xpu.py index 67f4d561f6e3e8b6ab9782508507291ca665c3e7..927855f461d348543d5cbe1034171c9d2d5deb62 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_sub_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_sub_op_xpu.py @@ -20,19 +20,21 @@ import paddle from op_test import skip_check_grad_ci from op_test_xpu import XPUOpTest import unittest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestElementwiseSubOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'elementwise_sub' self.use_dynamic_create_class = False class TestElementwiseOp(XPUOpTest): - def setUp(self): self.op_type = "elementwise_sub" self.use_xpu = True @@ -42,7 +44,7 @@ class XPUTestElementwiseSubOp(XPUOpTestWrapper): def init_input_output(self): self.inputs = { 'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype(self.dtype), - 'Y': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype(self.dtype) + 'Y': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype(self.dtype), } self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} @@ -59,45 +61,49 @@ class XPUTestElementwiseSubOp(XPUOpTestWrapper): def test_check_grad_ingore_x(self): if paddle.is_compiled_with_xpu(): place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ['Y'], - 'Out', - max_relative_error=0.005, - no_grad_set=set("X")) + self.check_grad_with_place( + place, + ['Y'], + 'Out', + max_relative_error=0.005, + no_grad_set=set("X"), + ) def test_check_grad_ingore_y(self): if paddle.is_compiled_with_xpu(): place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ['X'], - 'Out', - max_relative_error=0.005, - no_grad_set=set('Y')) + self.check_grad_with_place( + place, + ['X'], + 'Out', + max_relative_error=0.005, + no_grad_set=set('Y'), + ) @skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") + reason="[skip shape check] Use y_shape(1) to test broadcast." + ) class TestElementwiseSubOp_scalar(TestElementwiseOp): - def init_input_output(self): self.inputs = { 'X': np.random.rand(10, 3, 4).astype(self.dtype), - 'Y': np.random.rand(1).astype(self.dtype) + 'Y': np.random.rand(1).astype(self.dtype), } self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} class TestElementwiseSubOp_Vector(TestElementwiseOp): - def init_input_output(self): self.inputs = { - 'X': np.random.random((100, )).astype(self.dtype), - 'Y': np.random.random((100, )).astype(self.dtype) + 'X': np.random.random((100,)).astype(self.dtype), + 'Y': np.random.random((100,)).astype(self.dtype), } self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} class TestElementwiseSubOp_broadcast_0(TestElementwiseOp): - def init_input_output(self): self.inputs = { 'X': np.random.rand(100, 3, 2).astype(self.dtype), - 'Y': np.random.rand(100).astype(self.dtype) + 'Y': np.random.rand(100).astype(self.dtype), } self.attrs = {'axis': 0} @@ -106,11 +112,10 @@ class XPUTestElementwiseSubOp(XPUOpTestWrapper): } class TestElementwiseSubOp_broadcast_1(TestElementwiseOp): - def init_input_output(self): self.inputs = { 'X': np.random.rand(2, 100, 3).astype(self.dtype), - 'Y': np.random.rand(100).astype(self.dtype) + 'Y': np.random.rand(100).astype(self.dtype), } self.attrs = {'axis': 1} @@ -119,11 +124,10 @@ class XPUTestElementwiseSubOp(XPUOpTestWrapper): } class TestElementwiseSubOp_broadcast_2(TestElementwiseOp): - def init_input_output(self): self.inputs = { 'X': np.random.rand(2, 3, 100).astype(self.dtype), - 'Y': np.random.rand(100).astype(self.dtype) + 'Y': np.random.rand(100).astype(self.dtype), } self.outputs = { @@ -131,52 +135,46 @@ class XPUTestElementwiseSubOp(XPUOpTestWrapper): } class TestElementwiseSubOp_broadcast_3(TestElementwiseOp): - def init_input_output(self): self.inputs = { 'X': np.random.rand(2, 10, 12, 3).astype(self.dtype), - 'Y': np.random.rand(10, 12).astype(self.dtype) + 'Y': np.random.rand(10, 12).astype(self.dtype), } self.attrs = {'axis': 1} self.outputs = { - 'Out': - self.inputs['X'] - self.inputs['Y'].reshape(1, 10, 12, 1) + 'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 10, 12, 1) } class TestElementwiseSubOp_broadcast_4(TestElementwiseOp): - def init_input_output(self): self.inputs = { 'X': np.random.rand(2, 5, 3, 12).astype(self.dtype), - 'Y': np.random.rand(2, 5, 1, 12).astype(self.dtype) + 'Y': np.random.rand(2, 5, 1, 12).astype(self.dtype), } self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} class TestElementwiseSubOp_commonuse_1(TestElementwiseOp): - def init_input_output(self): self.inputs = { 'X': np.random.rand(2, 3, 100).astype(self.dtype), - 'Y': np.random.rand(1, 1, 100).astype(self.dtype) + 'Y': np.random.rand(1, 1, 100).astype(self.dtype), } self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} class TestElementwiseSubOp_commonuse_2(TestElementwiseOp): - def init_input_output(self): self.inputs = { 'X': np.random.rand(10, 3, 1, 4).astype(self.dtype), - 'Y': np.random.rand(10, 1, 12, 1).astype(self.dtype) + 'Y': np.random.rand(10, 1, 12, 1).astype(self.dtype), } self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} class TestElementwiseSubOp_xsize_lessthan_ysize(TestElementwiseOp): - def init_input_output(self): self.inputs = { 'X': np.random.rand(10, 12).astype(self.dtype), - 'Y': np.random.rand(2, 3, 10, 12).astype(self.dtype) + 'Y': np.random.rand(2, 3, 10, 12).astype(self.dtype), } self.attrs = {'axis': 2} diff --git a/python/paddle/fluid/tests/unittests/xpu/test_empty_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_empty_op_xpu.py index ec4a7e8b1cf3a5c1e5300dd60eb1d2d77dd12140..cb56e9b51f42dbddad1a8570c81fc4e2e29d073c 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_empty_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_empty_op_xpu.py @@ -1,4 +1,4 @@ -#Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -21,20 +21,22 @@ import numpy as np import paddle from op_test_xpu import XPUOpTest from paddle.fluid.framework import convert_np_dtype_to_dtype_ -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestEmptyOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'empty' self.use_dynamic_create_class = False # Situation 1: Attr(shape) is a list(without tensor) class TestEmptyOp(XPUOpTest): - def setUp(self): self.op_type = "empty" self.init_dtype() @@ -50,24 +52,34 @@ class XPUTestEmptyOp(XPUOpTestWrapper): def verify_output(self, outs): data_type = outs[0].dtype if data_type in [ - 'float32', 'float64', 'int32', 'int64', 'int8', 'uint8', - 'float16', 'int16' + 'float32', + 'float64', + 'int32', + 'int64', + 'int8', + 'uint8', + 'float16', + 'int16', ]: max_value = np.nanmax(outs[0]) min_value = np.nanmin(outs[0]) always_full_zero = max_value == 0.0 and min_value == 0.0 always_non_full_zero = max_value >= min_value - self.assertTrue(always_full_zero or always_non_full_zero, - 'always_full_zero or always_non_full_zero.') + self.assertTrue( + always_full_zero or always_non_full_zero, + 'always_full_zero or always_non_full_zero.', + ) elif data_type in ['bool']: total_num = outs[0].size true_num = np.sum(outs[0] == True) false_num = np.sum(outs[0] == False) - self.assertTrue(total_num == true_num + false_num, - 'The value should always be True or False.') + self.assertTrue( + total_num == true_num + false_num, + 'The value should always be True or False.', + ) else: - #pass + # pass self.assertTrue(False, 'invalid data type') def set_shape(self): @@ -90,34 +102,30 @@ class XPUTestEmptyOp(XPUOpTestWrapper): self.__class__.op_type = self.op_type class TestEmptyOpCase1(TestEmptyOp): - def set_shape(self): self.shape = [50] class TestEmptyOpCase2(TestEmptyOp): - def set_shape(self): self.shape = [1, 50, 3, 4] class TestEmptyOpCase3(TestEmptyOp): - def set_shape(self): self.shape = [5, 5, 5] # Situation 2: shape is a tensor class TestEmptyOp_ShapeTensor(TestEmptyOp): - def set_inputs(self): self.inputs = {"ShapeTensor": np.array(self.shape).astype("int32")} # Situation 3: Attr(shape) is a list(with tensor) class TestEmptyOp_ShapeTensorList(TestEmptyOp): - def set_inputs(self): shape_tensor_list = [] for index, ele in enumerate(self.shape): - shape_tensor_list.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + shape_tensor_list.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = {"ShapeTensorList": shape_tensor_list} diff --git a/python/paddle/fluid/tests/unittests/xpu/test_expand_as_v2_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_expand_as_v2_op_xpu.py index 36b53de7004a8af963e9576378ebb85b31360c61..246696be64315b71059a9f1fd5495a66f8f913eb 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_expand_as_v2_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_expand_as_v2_op_xpu.py @@ -20,20 +20,22 @@ sys.path.append("..") from op_test_xpu import XPUOpTest import paddle import paddle.fluid as fluid -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() np.random.seed(10) class XPUTestExpandAsV2Op(XPUOpTestWrapper): - def __init__(self): self.op_name = 'expand_as_v2' self.use_dynamic_create_class = False class TestExpandAsV2XPUOp(XPUOpTest): - def setUp(self): self.init_dtype() self.set_xpu() @@ -65,7 +67,6 @@ class XPUTestExpandAsV2Op(XPUOpTestWrapper): self.check_output_with_place(self.place) class TestExpandAsOpRank2(TestExpandAsV2XPUOp): - def set_inputs(self): x = np.random.rand(10, 12).astype(self.dtype) self.inputs = {'X': x} @@ -78,7 +79,6 @@ class XPUTestExpandAsV2Op(XPUOpTestWrapper): self.outputs = {'Out': output} class TestExpandAsOpRank3(TestExpandAsV2XPUOp): - def set_inputs(self): x = np.random.rand(2, 3, 20).astype(self.dtype) self.inputs = {'X': x} @@ -91,7 +91,6 @@ class XPUTestExpandAsV2Op(XPUOpTestWrapper): self.outputs = {'Out': output} class TestExpandAsOpRank4(TestExpandAsV2XPUOp): - def set_inputs(self): x = np.random.rand(1, 1, 7, 16).astype(self.dtype) self.inputs = {'X': x} @@ -104,7 +103,6 @@ class XPUTestExpandAsV2Op(XPUOpTestWrapper): self.outputs = {'Out': output} class TestExpandAsOpRank5(TestExpandAsV2XPUOp): - def set_inputs(self): x = np.random.rand(1, 1, 7, 16, 1).astype(self.dtype) self.inputs = {'X': x} @@ -117,7 +115,6 @@ class XPUTestExpandAsV2Op(XPUOpTestWrapper): self.outputs = {'Out': output} class TestExpandAsOpRank6(TestExpandAsV2XPUOp): - def set_inputs(self): x = np.random.rand(1, 1, 7, 16, 1, 1).astype(self.dtype) self.inputs = {'X': x} @@ -132,29 +129,28 @@ class XPUTestExpandAsV2Op(XPUOpTestWrapper): # Test python API class TestExpandAsV2API(unittest.TestCase): - def test_api(self): input1 = np.random.random([12, 14]).astype("float32") input2 = np.random.random([2, 12, 14]).astype("float32") - x = fluid.layers.data(name='x', - shape=[12, 14], - append_batch_size=False, - dtype="float32") + x = fluid.layers.data( + name='x', shape=[12, 14], append_batch_size=False, dtype="float32" + ) - y = fluid.layers.data(name='target_tensor', - shape=[2, 12, 14], - append_batch_size=False, - dtype="float32") + y = fluid.layers.data( + name='target_tensor', + shape=[2, 12, 14], + append_batch_size=False, + dtype="float32", + ) out_1 = paddle.expand_as(x, y=y) exe = fluid.Executor(place=fluid.XPUPlace(0)) - res_1 = exe.run(fluid.default_main_program(), - feed={ - "x": input1, - "target_tensor": input2 - }, - fetch_list=[out_1]) + res_1 = exe.run( + fluid.default_main_program(), + feed={"x": input1, "target_tensor": input2}, + fetch_list=[out_1], + ) assert np.array_equal(res_1[0], np.tile(input1, (2, 1, 1))) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_expand_v2_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_expand_v2_op_xpu.py index 545bebcb960d82820cbfda41ff9ca7b989f9e2fe..e5a30ad65bc652fa02ffbdf54877f6246bc8a948 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_expand_v2_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_expand_v2_op_xpu.py @@ -20,7 +20,11 @@ sys.path.append("..") from op_test_xpu import XPUOpTest import paddle.fluid as fluid import paddle -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() np.random.seed(10) @@ -29,13 +33,11 @@ np.random.seed(10) # CANN Op Support X: float32, int32, int64 # Situation 1: shape is a list(without tensor) class XPUTestExpandV2Op(XPUOpTestWrapper): - def __init__(self): self.op_name = 'expand_v2' self.use_dynamic_create_class = False class TestExpandV2XPUOp(XPUOpTest): - def setUp(self): self.init_dtype() self.set_xpu() @@ -65,42 +67,36 @@ class XPUTestExpandV2Op(XPUOpTestWrapper): self.check_output_with_place(self.place) class TestExpandV2OpRank2_DimExpanding(TestExpandV2XPUOp): - def init_data(self): self.ori_shape = [120] self.shape = [2, 120] self.expand_times = [2, 1] class TestExpandV2OpRank2(TestExpandV2XPUOp): - def init_data(self): self.ori_shape = [1, 140] self.shape = [12, 140] self.expand_times = [12, 1] class TestExpandV2OpRank3_Corner(TestExpandV2XPUOp): - def init_data(self): self.ori_shape = (2, 10, 5) self.shape = (2, 10, 5) self.expand_times = (1, 1, 1) class TestExpandV2OpRank4(TestExpandV2XPUOp): - def init_data(self): self.ori_shape = (2, 4, 5, 7) self.shape = (-1, -1, -1, -1) self.expand_times = (1, 1, 1, 1) class TestExpandV2OpRank5(TestExpandV2XPUOp): - def init_data(self): self.ori_shape = (2, 4, 1, 15) self.shape = (2, -1, 4, -1) self.expand_times = (1, 1, 4, 1) class TestExpandV2OpRank6(TestExpandV2XPUOp): - def init_data(self): self.ori_shape = (4, 1, 30) self.shape = (2, -1, 4, 30) @@ -108,7 +104,6 @@ class XPUTestExpandV2Op(XPUOpTestWrapper): # Situation 2: shape is a list(with tensor) class TestExpandV2OpXPURank1_tensor_attr(TestExpandV2XPUOp): - def setUp(self): self.set_xpu() self.place = paddle.XPUPlace(0) @@ -117,8 +112,9 @@ class XPUTestExpandV2Op(XPUOpTestWrapper): self.dtype = np.float32 expand_shapes_tensor = [] for index, ele in enumerate(self.expand_shape): - expand_shapes_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + expand_shapes_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = { 'X': np.random.random(self.ori_shape).astype(self.dtype), @@ -135,8 +131,8 @@ class XPUTestExpandV2Op(XPUOpTestWrapper): self.infer_expand_shape = [-1] class TestExpandV2OpRank2_Corner_tensor_attr( - TestExpandV2OpXPURank1_tensor_attr): - + TestExpandV2OpXPURank1_tensor_attr + ): def init_data(self): self.ori_shape = [12, 14] self.expand_times = [1, 1] @@ -145,7 +141,6 @@ class XPUTestExpandV2Op(XPUOpTestWrapper): # Situation 3: shape is a tensor class TestExpandV2XPUOp_tensor(TestExpandV2XPUOp): - def setUp(self): self.set_xpu() self.place = paddle.XPUPlace(0) @@ -170,7 +165,6 @@ class XPUTestExpandV2Op(XPUOpTestWrapper): # Situation 5: input x is int32 # skip grad check for int32 class TestExpandV2OpInteger(XPUOpTest): - def init_type(self): self.dtype = 'int32' @@ -198,20 +192,23 @@ class TestExpandV2OpInteger(XPUOpTest): # Test python API class TestExpandV2API(unittest.TestCase): - def test_static(self): with fluid.program_guard(fluid.Program(), fluid.Program()): input = np.random.random([12, 14]).astype("float32") - x = fluid.layers.data(name='x', - shape=[12, 14], - append_batch_size=False, - dtype="float32") + x = fluid.layers.data( + name='x', + shape=[12, 14], + append_batch_size=False, + dtype="float32", + ) positive_2 = fluid.layers.fill_constant([1], "int32", 12) - expand_shape = fluid.layers.data(name="expand_shape", - shape=[2], - append_batch_size=False, - dtype="int32") + expand_shape = fluid.layers.data( + name="expand_shape", + shape=[2], + append_batch_size=False, + dtype="int32", + ) out_1 = paddle.expand(x, shape=[12, 14]) out_2 = paddle.expand(x, shape=[positive_2, 14]) @@ -220,14 +217,14 @@ class TestExpandV2API(unittest.TestCase): g0 = fluid.backward.calc_gradient(out_2, x) exe = fluid.Executor(place=paddle.XPUPlace(0)) - res_1, res_2, res_3 = exe.run(fluid.default_main_program(), - feed={ - "x": - input, - "expand_shape": - np.array([12, 14]).astype("int32") - }, - fetch_list=[out_1, out_2, out_3]) + res_1, res_2, res_3 = exe.run( + fluid.default_main_program(), + feed={ + "x": input, + "expand_shape": np.array([12, 14]).astype("int32"), + }, + fetch_list=[out_1, out_2, out_3], + ) assert np.array_equal(res_1, np.tile(input, (1, 1))) assert np.array_equal(res_2, np.tile(input, (1, 1))) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_fill_any_like_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_fill_any_like_op_xpu.py index 614891d174dbf7686aa1ed6b7ad536247dde8d5a..56b099b1e86bc070825ccf74285c07e9a6ef1aff 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_fill_any_like_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_fill_any_like_op_xpu.py @@ -20,19 +20,21 @@ import paddle import unittest import numpy as np from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestFillAnyLikeOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'fill_any_like' self.use_dynamic_create_class = False class TestFillAnyLikeOp(XPUOpTest): - def setUp(self): self.init_dtype() self.set_xpu() @@ -60,22 +62,18 @@ class XPUTestFillAnyLikeOp(XPUOpTestWrapper): self.check_output_with_place(self.place) class TestFillAnyLikeOp2(TestFillAnyLikeOp): - def set_value(self): self.value = -0.0 class TestFillAnyLikeOp3(TestFillAnyLikeOp): - def set_value(self): self.value = 1.0 class TestFillAnyLikeOp4(TestFillAnyLikeOp): - def init(self): self.value = 1e-9 class TestFillAnyLikeOp5(TestFillAnyLikeOp): - def set_value(self): if self.dtype == "float16": self.value = 0.05 diff --git a/python/paddle/fluid/tests/unittests/xpu/test_fill_constant_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_fill_constant_op_xpu.py index 2494323d885cb5b458e7be948d5f565f15898b75..e09834075a7b9511e35d28ba007884227e093cc4 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_fill_constant_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_fill_constant_op_xpu.py @@ -20,21 +20,22 @@ import paddle import numpy as np from op_test import convert_float_to_uint16 from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) class XPUTestFillConstantOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'fill_constant' self.use_dynamic_create_class = False # Situation 1: Attr(shape) is a list(without tensor) class TestFillConstantOp(XPUOpTest): - def setUp(self): - '''Test fill_constant op with specified value - ''' + '''Test fill_constant op with specified value''' self.init_dtype() self.set_xpu() self.op_type = "fill_constant" @@ -105,7 +106,7 @@ class XPUTestFillConstantOp(XPUOpTestWrapper): self.attrs = { 'shape': self.shape, 'dtype': self.index, - 'value': self.value + 'value': self.value, } self.outputs = {'Out': np.full(self.shape, self.value)} @@ -113,58 +114,55 @@ class XPUTestFillConstantOp(XPUOpTestWrapper): self.check_output_with_place(self.place) class TestFillConstantOp2(TestFillConstantOp): - '''Test fill_constant op with default value - ''' + '''Test fill_constant op with default value''' def set_shape(self): self.shape = [10, 10] class TestFillConstantOp3(TestFillConstantOp): - '''Test fill_constant op with specified int64 value - ''' + '''Test fill_constant op with specified int64 value''' def set_shape(self): self.shape = [123, 2, 1] class TestFillConstantOp4(TestFillConstantOp): - '''Test fill_constant op with specified int value - ''' + '''Test fill_constant op with specified int value''' def set_shape(self): self.shape = [123, 3, 2, 1] class TestFillConstantOp5(TestFillConstantOp): - '''Test fill_constant op with specified float value - ''' + '''Test fill_constant op with specified float value''' def set_shape(self): self.shape = [123] # Situation 2: Attr(shape) is a list(with tensor) class TestFillConstantOp1_ShapeTensorList(TestFillConstantOp): - '''Test fill_constant op with specified value - ''' + '''Test fill_constant op with specified value''' def set_data(self): shape_tensor_list = [] for index, ele in enumerate(self.shape): - shape_tensor_list.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + shape_tensor_list.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = {"ShapeTensorList": shape_tensor_list} self.attrs = { 'shape': self.infer_shape, 'dtype': self.index, - 'value': self.value + 'value': self.value, } self.outputs = {'Out': np.full(self.shape, self.value)} if self.index == 22: self.outputs = { - 'Out': - np.full( + 'Out': np.full( self.shape, convert_float_to_uint16( - np.array([self.value]).astype("float32"))) + np.array([self.value]).astype("float32") + ), + ) } def set_shape(self): @@ -172,14 +170,14 @@ class XPUTestFillConstantOp(XPUOpTestWrapper): self.infer_shape = [123, 1] class TestFillConstantOp2_ShapeTensorList(TestFillConstantOp): - '''Test fill_constant op with default value - ''' + '''Test fill_constant op with default value''' def set_data(self): shape_tensor_list = [] for index, ele in enumerate(self.shape): - shape_tensor_list.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + shape_tensor_list.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = {"ShapeTensorList": shape_tensor_list} self.attrs = {'shape': self.infer_shape, 'dtype': self.index} @@ -190,23 +188,22 @@ class XPUTestFillConstantOp(XPUOpTestWrapper): self.infer_shape = [1, 1, 1] class TestFillConstantOp3_ShapeTensorList( - TestFillConstantOp1_ShapeTensorList): - + TestFillConstantOp1_ShapeTensorList + ): def set_shape(self): self.shape = [123, 3, 2, 1] self.infer_shape = [123, 111, 11, 1] class TestFillConstantOp4_ShapeTensorList( - TestFillConstantOp1_ShapeTensorList): - + TestFillConstantOp1_ShapeTensorList + ): def set_shape(self): self.shape = [123] self.infer_shape = [1] # Situation 3: shape is a tensor class TestFillConstantOp1_ShapeTensor(TestFillConstantOp): - '''Test fill_constant op with specified value - ''' + '''Test fill_constant op with specified value''' def set_data(self): self.inputs = {"ShapeTensor": np.array(self.shape).astype("int32")} @@ -214,11 +211,12 @@ class XPUTestFillConstantOp(XPUOpTestWrapper): self.outputs = {'Out': np.full(self.shape, self.value)} if self.index == 22: self.outputs = { - 'Out': - np.full( + 'Out': np.full( self.shape, convert_float_to_uint16( - np.array([self.value]).astype("float32"))) + np.array([self.value]).astype("float32") + ), + ) } def set_shape(self): @@ -226,19 +224,18 @@ class XPUTestFillConstantOp(XPUOpTestWrapper): # Situation 4: value is a tensor class TestFillConstantOp1_ValueTensor(TestFillConstantOp): - '''Test fill_constant op with specified value - ''' + '''Test fill_constant op with specified value''' def set_data(self): self.inputs = { "ShapeTensor": np.array(self.shape).astype("int32"), - 'ValueTensor': np.array([self.value]).astype(self.dtype) + 'ValueTensor': np.array([self.value]).astype(self.dtype), } if self.index == 22: self.inputs = { - 'ValueTensor': - convert_float_to_uint16( - np.array([self.value]).astype("float32")) + 'ValueTensor': convert_float_to_uint16( + np.array([self.value]).astype("float32") + ) } self.attrs = {'value': self.value, 'dtype': self.index} self.outputs = {'Out': np.full(self.shape, self.value)} diff --git a/python/paddle/fluid/tests/unittests/xpu/test_flatten2_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_flatten2_op_xpu.py index 29d1cb3c25e49ecfdbd938e63021b5cf6ca27203..307889b4f26b0cd76d51d9a371b481fe718ebd7e 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_flatten2_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_flatten2_op_xpu.py @@ -19,19 +19,21 @@ sys.path.append("..") import numpy as np import paddle from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestFlatten2Op(XPUOpTestWrapper): - def __init__(self): self.op_name = 'flatten2' self.use_dynamic_create_class = False class TestFlatten2Op(XPUOpTest): - def setUp(self): self.set_xpu() self.op_type = "flatten2" @@ -44,7 +46,7 @@ class XPUTestFlatten2Op(XPUOpTestWrapper): self.init_attrs() self.outputs = { "Out": self.inputs["X"].reshape(self.new_shape), - "XShape": np.random.random(self.in_shape).astype(self.dtype) + "XShape": np.random.random(self.in_shape).astype(self.dtype), } def set_xpu(self): @@ -65,14 +67,12 @@ class XPUTestFlatten2Op(XPUOpTestWrapper): self.attrs = {"axis": self.axis} class TestFlatten2OpWithCornerAxis(TestFlatten2Op): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.axis = 0 self.new_shape = (1, 120) class TestFlatten2OpWithDefaultAxis(TestFlatten2Op): - def init_test_case(self): self.in_shape = (10, 2, 2, 3) self.new_shape = (10, 12) @@ -81,7 +81,6 @@ class XPUTestFlatten2Op(XPUOpTestWrapper): self.attrs = {} class TestFlatten2OpSixDims(TestFlatten2Op): - def init_test_case(self): self.in_shape = (3, 2, 3, 2, 4, 4) self.axis = 4 diff --git a/python/paddle/fluid/tests/unittests/xpu/test_flatten_contiguous_range_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_flatten_contiguous_range_op_xpu.py index 1296f5ce72496e9747b0ca01dd6e5f175ea7f84b..aa029301003be880ec8d6312d1ee399007ddb5c3 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_flatten_contiguous_range_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_flatten_contiguous_range_op_xpu.py @@ -22,19 +22,21 @@ import sys sys.path.append("..") from op_test_xpu import XPUOpTest import paddle -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestFlattenOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'flatten_contiguous_range' self.use_dynamic_create_class = False class TestFlattenOp(XPUOpTest): - def setUp(self): self.set_xpu() self.op_type = "flatten_contiguous_range" @@ -52,7 +54,7 @@ class XPUTestFlattenOp(XPUOpTestWrapper): self.init_attrs() self.outputs = { "Out": self.inputs["X"].reshape(self.new_shape), - "XShape": np.random.random(self.in_shape).astype(self.dtype) + "XShape": np.random.random(self.in_shape).astype(self.dtype), } def set_xpu(self): @@ -68,7 +70,7 @@ class XPUTestFlattenOp(XPUOpTestWrapper): self.in_shape = (3, 2, 5, 4) self.start_axis = 0 self.stop_axis = -1 - self.new_shape = (120) + self.new_shape = 120 def init_attrs(self): self.attrs = { @@ -78,7 +80,6 @@ class XPUTestFlattenOp(XPUOpTestWrapper): } class TestFlattenOp_1(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = 1 @@ -88,11 +89,10 @@ class XPUTestFlattenOp(XPUOpTestWrapper): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } class TestFlattenOp_2(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = 0 @@ -102,11 +102,10 @@ class XPUTestFlattenOp(XPUOpTestWrapper): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } class TestFlattenOp_3(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = 0 @@ -116,11 +115,10 @@ class XPUTestFlattenOp(XPUOpTestWrapper): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } class TestFlattenOp_4(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = -2 @@ -130,11 +128,10 @@ class XPUTestFlattenOp(XPUOpTestWrapper): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } class TestFlattenOp_5(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = 2 @@ -144,11 +141,10 @@ class XPUTestFlattenOp(XPUOpTestWrapper): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } class TestFlattenOpSixDims(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 3, 2, 4, 4) self.start_axis = 3 @@ -158,11 +154,10 @@ class XPUTestFlattenOp(XPUOpTestWrapper): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } class TestFlattenOp_Float32(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = 0 @@ -173,11 +168,10 @@ class XPUTestFlattenOp(XPUOpTestWrapper): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } class TestFlattenOp_int32(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = 0 @@ -189,14 +183,13 @@ class XPUTestFlattenOp(XPUOpTestWrapper): self.attrs = { "start_axis": self.start_axis, "stop_axis": self.stop_axis, - 'use_xpu': True + 'use_xpu': True, } def test_check_grad(self): pass class TestFlattenOp_int8(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = 0 @@ -207,14 +200,13 @@ class XPUTestFlattenOp(XPUOpTestWrapper): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } def test_check_grad(self): pass class TestFlattenOp_int64(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 5, 4) self.start_axis = 0 @@ -225,7 +217,7 @@ class XPUTestFlattenOp(XPUOpTestWrapper): def init_attrs(self): self.attrs = { "start_axis": self.start_axis, - "stop_axis": self.stop_axis + "stop_axis": self.stop_axis, } def test_check_grad(self): @@ -233,45 +225,58 @@ class XPUTestFlattenOp(XPUOpTestWrapper): class TestFlatten2OpError(unittest.TestCase): - def test_errors(self): image_shape = (2, 3, 4, 4) - x = np.arange(image_shape[0] * image_shape[1] * image_shape[2] * - image_shape[3]).reshape(image_shape) / 100. + x = ( + np.arange( + image_shape[0] + * image_shape[1] + * image_shape[2] + * image_shape[3] + ).reshape(image_shape) + / 100.0 + ) x = x.astype('float32') def test_ValueError1(): - x_var = paddle.static.data(name="x", - shape=image_shape, - dtype='float32') + x_var = paddle.static.data( + name="x", shape=image_shape, dtype='float32' + ) out = paddle.flatten(x_var, start_axis=2, stop_axis=1) self.assertRaises(ValueError, test_ValueError1) def test_ValueError2(): - x_var = paddle.static.data(name="x", - shape=image_shape, - dtype='float32') + x_var = paddle.static.data( + name="x", shape=image_shape, dtype='float32' + ) paddle.flatten(x_var, start_axis=10, stop_axis=1) self.assertRaises(ValueError, test_ValueError2) def test_ValueError3(): - x_var = paddle.static.data(name="x", - shape=image_shape, - dtype='float32') + x_var = paddle.static.data( + name="x", shape=image_shape, dtype='float32' + ) paddle.flatten(x_var, start_axis=2, stop_axis=10) self.assertRaises(ValueError, test_ValueError3) def test_type(): # dtype must be float32, float64, int8, int32, int64 - x2 = np.arange(image_shape[0] * image_shape[1] * image_shape[2] * - image_shape[3]).reshape(image_shape) / 100. + x2 = ( + np.arange( + image_shape[0] + * image_shape[1] + * image_shape[2] + * image_shape[3] + ).reshape(image_shape) + / 100.0 + ) x2 = x2.astype('float16') - x2_var = paddle.fluid.data(name='x2', - shape=[3, 2, 4, 5], - dtype='float16') + x2_var = paddle.fluid.data( + name='x2', shape=[3, 2, 4, 5], dtype='float16' + ) paddle.flatten(x2_var) self.assertRaises(TypeError, test_type) @@ -283,7 +288,6 @@ class TestFlatten2OpError(unittest.TestCase): class TestStaticFlattenPythonAPI(unittest.TestCase): - def execute_api(self, x, start_axis=0, stop_axis=-1): return paddle.flatten(x, start_axis, stop_axis) @@ -293,9 +297,9 @@ class TestStaticFlattenPythonAPI(unittest.TestCase): main_prog = paddle.static.Program() with paddle.static.program_guard(main_prog, paddle.static.Program()): - x = paddle.static.data(name="x", - shape=[2, 3, 4, 4], - dtype='float32') + x = paddle.static.data( + name="x", shape=[2, 3, 4, 4], dtype='float32' + ) out = self.execute_api(x, start_axis=-2, stop_axis=-1) exe = paddle.static.Executor(place=paddle.XPUPlace(0)) @@ -304,17 +308,22 @@ class TestStaticFlattenPythonAPI(unittest.TestCase): class TestStaticInplaceFlattenPythonAPI(TestStaticFlattenPythonAPI): - def execute_api(self, x, start_axis=0, stop_axis=-1): return x.flatten_(start_axis, stop_axis) class TestFlattenPython(unittest.TestCase): - def test_python_api(self): image_shape = (2, 3, 4, 4) - x = np.arange(image_shape[0] * image_shape[1] * image_shape[2] * - image_shape[3]).reshape(image_shape) / 100. + x = ( + np.arange( + image_shape[0] + * image_shape[1] + * image_shape[2] + * image_shape[3] + ).reshape(image_shape) + / 100.0 + ) x = x.astype('float32') def test_InputError(): diff --git a/python/paddle/fluid/tests/unittests/xpu/test_flatten_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_flatten_op_xpu.py index 5d93a5a5ac0b87a53555567a6fe63d3fa197a342..13efc51fa261023f7ebdbd09f1906057f9fd3517 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_flatten_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_flatten_op_xpu.py @@ -19,19 +19,21 @@ sys.path.append("..") import numpy as np import paddle from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestFlattenOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'flatten' self.use_dynamic_create_class = False class TestFlattenOp(XPUOpTest): - def setUp(self): self.op_type = "flatten" self.use_xpu = True @@ -59,14 +61,12 @@ class XPUTestFlattenOp(XPUOpTestWrapper): self.attrs = {"axis": self.axis} class TestFlattenOp1(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 2, 10) self.axis = 0 self.new_shape = (1, 120) class TestFlattenOpWithDefaultAxis(TestFlattenOp): - def init_test_case(self): self.in_shape = (10, 2, 2, 3) self.new_shape = (10, 12) @@ -75,7 +75,6 @@ class XPUTestFlattenOp(XPUOpTestWrapper): self.attrs = {} class TestFlattenOpSixDims(TestFlattenOp): - def init_test_case(self): self.in_shape = (3, 2, 3, 2, 4, 4) self.axis = 4 diff --git a/python/paddle/fluid/tests/unittests/xpu/test_fleet_exe_dist_model_run_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_fleet_exe_dist_model_run_xpu.py index 867379bf81ef5c67234659c30cd786b0834ba135..8d5091d32354ad7bfe8a7669d75fcae6260c1abd 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_fleet_exe_dist_model_run_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_fleet_exe_dist_model_run_xpu.py @@ -23,7 +23,6 @@ paddle.enable_static() class TestDistModelRun(unittest.TestCase): - def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() @@ -34,8 +33,9 @@ class TestDistModelRun(unittest.TestCase): def test_dist_model_run(self): # step 0: declare folder to save the model and params - path_prefix = os.path.join(self.temp_dir.name, - "dist_model_run_test/inf") + path_prefix = os.path.join( + self.temp_dir.name, "dist_model_run_test/inf" + ) # step 1: saving the inference model and params x = paddle.static.data(name='x', shape=[28, 28], dtype='float32') @@ -47,12 +47,11 @@ class TestDistModelRun(unittest.TestCase): exe.run(paddle.static.default_startup_program()) x_data = np.random.randn(28, 28).astype('float32') y_data = np.random.randint(0, 9, size=[28, 1]).astype('int64') - exe.run(paddle.static.default_main_program(), - feed={ - 'x': x_data, - 'y': y_data - }, - fetch_list=[avg_loss]) + exe.run( + paddle.static.default_main_program(), + feed={'x': x_data, 'y': y_data}, + fetch_list=[avg_loss], + ) paddle.static.save_inference_model(path_prefix, [x, y], [avg_loss], exe) print('save model to', path_prefix) @@ -74,14 +73,16 @@ class TestDistModelRun(unittest.TestCase): print("dist model rst:", dist_model_rst) # step 4: use framework's api to inference with fake data - [inference_program, feed_target_names, - fetch_targets] = (paddle.static.load_inference_model(path_prefix, exe)) - results = exe.run(inference_program, - feed={ - 'x': x_tensor, - 'y': y_tensor - }, - fetch_list=fetch_targets) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = paddle.static.load_inference_model(path_prefix, exe) + results = exe.run( + inference_program, + feed={'x': x_tensor, 'y': y_tensor}, + fetch_list=fetch_targets, + ) load_inference_model_rst = results[0] print("load inference model api rst:", load_inference_model_rst) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_fused_gemm_epilogue_grad_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_fused_gemm_epilogue_grad_op_xpu.py index 0c68982d14065ef55207daae350c7fab7ec52625..435f44e47361d72c4b7ea99a574df858113192ad 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_fused_gemm_epilogue_grad_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_fused_gemm_epilogue_grad_op_xpu.py @@ -22,7 +22,11 @@ import numpy as np import paddle import paddle.fluid.core as core from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) def get_outputs(DOut, X, Y): @@ -34,13 +38,11 @@ def get_outputs(DOut, X, Y): class XPUTestFuseGemmGradOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'fused_gemm_epilogue_grad' self.use_dynamic_create_class = False class TestFuseGemmEpilogueGradOpDXYBias1(XPUOpTest): - def setUp(self): paddle.enable_static() self.op_type = "fused_gemm_epilogue_grad" @@ -53,13 +55,14 @@ class XPUTestFuseGemmGradOp(XPUOpTestWrapper): self.inputs = { 'DOut': np.random.random((8, 128)).astype(self.dtype) - 0.5, 'X': np.random.random((8, 4)).astype(self.dtype) - 0.5, - 'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5 + 'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5, } self.attrs = {"activation": 'none'} - DX, DY, DBias = get_outputs(self.inputs['DOut'], self.inputs['X'], - self.inputs['Y']) + DX, DY, DBias = get_outputs( + self.inputs['DOut'], self.inputs['X'], self.inputs['Y'] + ) self.outputs = {'DX': DX, 'DY': DY, 'DBias': DBias} def test_check_output(self): @@ -69,48 +72,48 @@ class XPUTestFuseGemmGradOp(XPUOpTestWrapper): self.check_output_with_place(core.XPUPlace(0), atol=self.atol) class TestFuseGemmEpilogueGradOpDXYBias2(XPUOpTest): - def init_data(self): self.inputs = { 'DOut': np.random.random((8, 128)).astype(self.dtype) - 0.5, 'X': np.random.random((8, 4)).astype(self.dtype) - 0.5, - 'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5 + 'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5, } self.attrs = {"activation": 'none'} - _, DY, DBias = get_outputs(self.inputs['DOut'], self.inputs['X'], - self.inputs['Y']) + _, DY, DBias = get_outputs( + self.inputs['DOut'], self.inputs['X'], self.inputs['Y'] + ) self.outputs = {'DY': DY, 'DBias': DBias} class TestFuseGemmEpilogueGradOpDXYBias3(XPUOpTest): - def init_data(self): self.inputs = { 'DOut': np.random.random((8, 128)).astype(self.dtype) - 0.5, 'X': np.random.random((8, 4)).astype(self.dtype) - 0.5, - 'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5 + 'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5, } self.attrs = {"activation": 'none'} - _, DY, _ = get_outputs(self.inputs['DOut'], self.inputs['X'], - self.inputs['Y']) + _, DY, _ = get_outputs( + self.inputs['DOut'], self.inputs['X'], self.inputs['Y'] + ) self.outputs = {'DY': DY} class TestFuseGemmEpilogueGradOpDXYBias4(XPUOpTest): - def init_data(self): self.inputs = { 'DOut': np.random.random((8, 128)).astype(self.dtype) - 0.5, 'X': np.random.random((8, 4)).astype(self.dtype) - 0.5, - 'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5 + 'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5, } self.attrs = {"activation": 'none'} - DX, DY, _ = get_outputs(self.inputs['DOut'], self.inputs['X'], - self.inputs['Y']) + DX, DY, _ = get_outputs( + self.inputs['DOut'], self.inputs['X'], self.inputs['Y'] + ) self.outputs = {'DX': DX, 'DY': DY} diff --git a/python/paddle/fluid/tests/unittests/xpu/test_fused_gemm_epilogue_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_fused_gemm_epilogue_op_xpu.py index bb6cc6a03cbe5371c93f996d4bbf0c521d08cbe4..c37d1bff5dd96edb929cc327640e3093ff6c8e48 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_fused_gemm_epilogue_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_fused_gemm_epilogue_op_xpu.py @@ -22,12 +22,19 @@ import paddle import paddle.fluid.core as core from paddle import _legacy_C_ops from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) def gelu(x): - y_ref = 0.5 * x * ( - 1.0 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3)))) + y_ref = ( + 0.5 + * x + * (1.0 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3)))) + ) return y_ref.astype(x.dtype) @@ -82,13 +89,11 @@ def matmul_grad(x, y, bias, dz, trans_x, trans_y): class XPUTestFuseGemmOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'fused_gemm_epilogue' self.use_dynamic_create_class = False class TestFuseGemmBase(XPUOpTest): - def setUp(self): self.__class__.no_need_check_grad = True self.op_type = "fused_gemm_epilogue" @@ -97,13 +102,14 @@ class XPUTestFuseGemmOp(XPUOpTestWrapper): self.inputs = { 'X': np.random.random(self.x_shape).astype(self.dtype) - 0.5, 'Y': np.random.random(self.y_shape).astype(self.dtype) - 0.5, - 'Bias': - np.random.random(self.bias_shape).astype(self.dtype) - 0.5 + 'Bias': np.random.random(self.bias_shape).astype(self.dtype) + - 0.5, } if self.trans_x == True: - numpy_input_x = self.inputs['X'].reshape( - (self.x_shape[0], -1)).T + numpy_input_x = ( + self.inputs['X'].reshape((self.x_shape[0], -1)).T + ) else: numpy_input_x = self.inputs['X'].reshape((-1, self.x_shape[-1])) @@ -113,14 +119,17 @@ class XPUTestFuseGemmOp(XPUOpTestWrapper): numpy_input_y = self.inputs['Y'] self.outputs = { - 'Out': - get_output(numpy_input_x, numpy_input_y, self.inputs['Bias'], - self.activation).reshape(self.out_shape) + 'Out': get_output( + numpy_input_x, + numpy_input_y, + self.inputs['Bias'], + self.activation, + ).reshape(self.out_shape) } self.attrs = { "activation": self.activation, "trans_y": self.trans_y, - "trans_x": self.trans_x + "trans_x": self.trans_x, } def init_dtype_type(self): @@ -144,7 +153,6 @@ class XPUTestFuseGemmOp(XPUOpTestWrapper): self.check_output_with_place(core.XPUPlace(0), atol=self.atol) class TestFuseGemmEpilogueOp1(TestFuseGemmBase): - def init_datas_shape_and_attrs(self): self.x_shape = [4, 8] self.y_shape = [4, 128] @@ -157,7 +165,6 @@ class XPUTestFuseGemmOp(XPUOpTestWrapper): self.trans_x = True class TestFuseGemmEpilogueOp2(TestFuseGemmBase): - def init_datas_shape_and_attrs(self): self.x_shape = [8, 4] self.y_shape = [128, 4] @@ -170,7 +177,6 @@ class XPUTestFuseGemmOp(XPUOpTestWrapper): self.trans_x = False class TestFuseGemmEpilogueOp3(TestFuseGemmBase): - def init_datas_shape_and_attrs(self): self.x_shape = [4, 8] self.y_shape = [128, 4] @@ -183,7 +189,6 @@ class XPUTestFuseGemmOp(XPUOpTestWrapper): self.trans_x = True class TestFuseGemmEpilogueOp4(TestFuseGemmBase): - def init_datas_shape_and_attrs(self): self.x_shape = [2, 2, 8, 4] self.y_shape = [4, 128] @@ -196,7 +201,6 @@ class XPUTestFuseGemmOp(XPUOpTestWrapper): self.trans_x = False class TestFuseGemmEpilogueOp5(TestFuseGemmBase): - def init_datas_shape_and_attrs(self): self.x_shape = [4, 2, 2, 8] self.y_shape = [4, 128] @@ -209,7 +213,6 @@ class XPUTestFuseGemmOp(XPUOpTestWrapper): self.trans_x = True class TestFuseGemmEpilogueOp6(TestFuseGemmBase): - def init_datas_shape_and_attrs(self): self.x_shape = [8, 4] self.y_shape = [4, 128] @@ -222,7 +225,6 @@ class XPUTestFuseGemmOp(XPUOpTestWrapper): self.trans_x = False class TestFuseGemmEpilogueOp7(TestFuseGemmBase): - def init_datas_shape_and_attrs(self): self.x_shape = [8, 4] self.y_shape = [4, 128] @@ -236,7 +238,6 @@ class XPUTestFuseGemmOp(XPUOpTestWrapper): class TestEagerFusedGemmEpilogue(unittest.TestCase): - def setUp(self): paddle.set_device('xpu') @@ -244,22 +245,22 @@ class TestEagerFusedGemmEpilogue(unittest.TestCase): paddle.disable_static() x_np = np.random.random((8, 4)).astype(np.float32) - 0.5 y_np = np.random.random((4, 128)).astype(np.float32) - 0.5 - bias_np = np.random.random((128, )).astype(np.float32) - 0.5 + bias_np = np.random.random((128,)).astype(np.float32) - 0.5 x = paddle.to_tensor(x_np) y = paddle.to_tensor(y_np) bias = paddle.to_tensor(bias_np) x.stop_gradient = False y.stop_gradient = False - out1 = _legacy_C_ops.fused_gemm_epilogue(x, y, bias, 'trans_x', False, - 'trans_y', False, 'activation', - 'none') - out2 = _legacy_C_ops.fused_gemm_epilogue(x, y, bias, 'trans_x', False, - 'trans_y', False, 'activation', - 'relu') - out3 = _legacy_C_ops.fused_gemm_epilogue(x, y, bias, 'trans_x', False, - 'trans_y', False, 'activation', - 'gelu') + out1 = _legacy_C_ops.fused_gemm_epilogue( + x, y, bias, 'trans_x', False, 'trans_y', False, 'activation', 'none' + ) + out2 = _legacy_C_ops.fused_gemm_epilogue( + x, y, bias, 'trans_x', False, 'trans_y', False, 'activation', 'relu' + ) + out3 = _legacy_C_ops.fused_gemm_epilogue( + x, y, bias, 'trans_x', False, 'trans_y', False, 'activation', 'gelu' + ) out_np1 = get_output(x_np, y_np, bias_np, 'none') out_np2 = get_output(x_np, y_np, bias_np, 'relu') @@ -269,13 +270,16 @@ class TestEagerFusedGemmEpilogue(unittest.TestCase): np.testing.assert_allclose(out2, out_np2, atol=1e-04) np.testing.assert_allclose(out3, out_np3, atol=1e-03) - out_grad_np1 = np.random.randint(low=-20, high=20, - size=out_np1.shape).astype(np.float32) - paddle.autograd.backward(out1, - grad_tensors=[paddle.to_tensor(out_grad_np1)]) + out_grad_np1 = np.random.randint( + low=-20, high=20, size=out_np1.shape + ).astype(np.float32) + paddle.autograd.backward( + out1, grad_tensors=[paddle.to_tensor(out_grad_np1)] + ) x_grad_np, y_grad_np, bias_grad_np = matmul_grad( - x_np, y_np, bias_np, out_grad_np1, False, False) + x_np, y_np, bias_np, out_grad_np1, False, False + ) np.testing.assert_allclose(x.grad.numpy(), x_grad_np, atol=1e-02) self.assertEqual(y_grad_np.shape, y_np.shape) np.testing.assert_allclose(y.grad.numpy(), y_grad_np, atol=1e-03) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_fused_resnet_basic_block_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_fused_resnet_basic_block_op_xpu.py index 7913e7e014857fc94a63c99ee6847534da29d60e..f45a4a135e3e961b5c3f025386d94bcd7c3c8fc3 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_fused_resnet_basic_block_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_fused_resnet_basic_block_op_xpu.py @@ -24,19 +24,21 @@ import paddle.nn as nn from paddle.fluid import core from paddle.incubate.xpu.resnet_block import ResNetBasicBlock from paddle.fluid.framework import default_main_program -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestResNetBasicBlockOp(XPUOpTestWrapper): - def __init__(self): self.op_name = "resnet_basic_block" self.use_dynamic_create_class = False class TestResNetBasicBlockOp(OpTest): - def setUp(self): paddle.disable_static() self.dtype = self.in_type @@ -70,65 +72,86 @@ class XPUTestResNetBasicBlockOp(XPUOpTestWrapper): conv1_weight = fluid.ParamAttr( initializer=fluid.initializer.Xavier(uniform=False), - learning_rate=0.001) + learning_rate=0.001, + ) conv2_weight = fluid.ParamAttr( initializer=fluid.initializer.Xavier(uniform=False), - learning_rate=0.001) + learning_rate=0.001, + ) conv3_weight = fluid.ParamAttr( initializer=fluid.initializer.Xavier(uniform=False), - learning_rate=0.001) - bn1_weight = fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=1.0)) - bn1_bias = fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.0)) - bn2_weight = fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=1.0)) - bn2_bias = fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.0)) - bn3_weight = fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=1.0)) - bn3_bias = fluid.ParamAttr(initializer=fluid.initializer.Constant( - value=0.0)) + learning_rate=0.001, + ) + bn1_weight = fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=1.0) + ) + bn1_bias = fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.0) + ) + bn2_weight = fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=1.0) + ) + bn2_bias = fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.0) + ) + bn3_weight = fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=1.0) + ) + bn3_bias = fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.0) + ) - self.conv1 = nn.Conv2D(in_channels=self.in_channels, - out_channels=self.out_channels, - kernel_size=3, - stride=self.stride, - padding=1, - weight_attr=conv1_weight, - bias_attr=None, - data_format='NCHW') - self.bn1 = nn.BatchNorm(self.out_channels, - act='relu', - param_attr=bn1_weight, - bias_attr=bn1_bias, - data_layout='NCHW') - self.conv2 = nn.Conv2D(in_channels=self.out_channels, - out_channels=self.out_channels, - kernel_size=3, - stride=1, - padding=1, - weight_attr=conv2_weight, - bias_attr=None, - data_format='NCHW') - self.bn2 = nn.BatchNorm(self.out_channels, - act=None, - param_attr=bn2_weight, - bias_attr=bn2_bias, - data_layout='NCHW') - self.conv3 = nn.Conv2D(in_channels=self.in_channels, - out_channels=self.out_channels, - kernel_size=1, - stride=self.stride, - padding=0, - weight_attr=conv3_weight, - bias_attr=None, - data_format='NCHW') - self.bn3 = nn.BatchNorm(self.out_channels, - act=None, - param_attr=bn3_weight, - bias_attr=bn3_bias, - data_layout='NCHW') + self.conv1 = nn.Conv2D( + in_channels=self.in_channels, + out_channels=self.out_channels, + kernel_size=3, + stride=self.stride, + padding=1, + weight_attr=conv1_weight, + bias_attr=None, + data_format='NCHW', + ) + self.bn1 = nn.BatchNorm( + self.out_channels, + act='relu', + param_attr=bn1_weight, + bias_attr=bn1_bias, + data_layout='NCHW', + ) + self.conv2 = nn.Conv2D( + in_channels=self.out_channels, + out_channels=self.out_channels, + kernel_size=3, + stride=1, + padding=1, + weight_attr=conv2_weight, + bias_attr=None, + data_format='NCHW', + ) + self.bn2 = nn.BatchNorm( + self.out_channels, + act=None, + param_attr=bn2_weight, + bias_attr=bn2_bias, + data_layout='NCHW', + ) + self.conv3 = nn.Conv2D( + in_channels=self.in_channels, + out_channels=self.out_channels, + kernel_size=1, + stride=self.stride, + padding=0, + weight_attr=conv3_weight, + bias_attr=None, + data_format='NCHW', + ) + self.bn3 = nn.BatchNorm( + self.out_channels, + act=None, + param_attr=bn3_weight, + bias_attr=bn3_bias, + data_layout='NCHW', + ) self.relu = nn.ReLU() tensor_src = paddle.to_tensor(self.src, stop_gradient=False) @@ -139,8 +162,9 @@ class XPUTestResNetBasicBlockOp(XPUOpTestWrapper): bn1_out = self.bn1(self.conv1(tensor_src)) bn2_out = self.bn2(self.conv2(bn1_out)) result = self.relu(bn2_out + z_out) - paddle.autograd.backward([result], [paddle.to_tensor(self.dout)], - True) + paddle.autograd.backward( + [result], [paddle.to_tensor(self.dout)], True + ) return result, tensor_src.grad def FusedResNetBasicBlock(self): @@ -148,25 +172,34 @@ class XPUTestResNetBasicBlockOp(XPUOpTestWrapper): fused_conv1_weight = fluid.ParamAttr( initializer=fluid.initializer.Xavier(uniform=False), - learning_rate=0.001) + learning_rate=0.001, + ) fused_conv2_weight = fluid.ParamAttr( initializer=fluid.initializer.Xavier(uniform=False), - learning_rate=0.001) + learning_rate=0.001, + ) fused_conv3_weight = fluid.ParamAttr( initializer=fluid.initializer.Xavier(uniform=False), - learning_rate=0.001) + learning_rate=0.001, + ) fused_bn1_weight = fluid.ParamAttr( - initializer=fluid.initializer.Constant(value=1.0)) + initializer=fluid.initializer.Constant(value=1.0) + ) fused_bn1_bias = fluid.ParamAttr( - initializer=fluid.initializer.Constant(value=0.0)) + initializer=fluid.initializer.Constant(value=0.0) + ) fused_bn2_weight = fluid.ParamAttr( - initializer=fluid.initializer.Constant(value=1.0)) + initializer=fluid.initializer.Constant(value=1.0) + ) fused_bn2_bias = fluid.ParamAttr( - initializer=fluid.initializer.Constant(value=0.0)) + initializer=fluid.initializer.Constant(value=0.0) + ) fused_bn3_weight = fluid.ParamAttr( - initializer=fluid.initializer.Constant(value=1.0)) + initializer=fluid.initializer.Constant(value=1.0) + ) fused_bn3_bias = fluid.ParamAttr( - initializer=fluid.initializer.Constant(value=0.0)) + initializer=fluid.initializer.Constant(value=0.0) + ) if self.has_shortcut: self.resnet_basic_block = ResNetBasicBlock( @@ -195,7 +228,8 @@ class XPUTestResNetBasicBlockOp(XPUOpTestWrapper): padding1=1, padding2=1, padding3=0, - has_shortcut=True) + has_shortcut=True, + ) else: self.resnet_basic_block = ResNetBasicBlock( num_channels1=self.in_channels, @@ -223,7 +257,8 @@ class XPUTestResNetBasicBlockOp(XPUOpTestWrapper): padding1=1, padding2=1, padding3=1, - has_shortcut=False) + has_shortcut=False, + ) x = paddle.to_tensor(self.src, stop_gradient=False) out = self.resnet_basic_block.forward(x) @@ -235,36 +270,46 @@ class XPUTestResNetBasicBlockOp(XPUOpTestWrapper): default_main_program().random_seed = 1 base_out, base_grad = self.Base() fused_out, fused_grad = self.FusedResNetBasicBlock() - np.testing.assert_allclose(base_out.numpy(), - fused_out.numpy(), - rtol=self.rtol, - atol=self.atol) - np.testing.assert_allclose(base_grad.numpy(), - fused_grad.numpy(), - rtol=self.rtol, - atol=self.atol) + np.testing.assert_allclose( + base_out.numpy(), + fused_out.numpy(), + rtol=self.rtol, + atol=self.atol, + ) + np.testing.assert_allclose( + base_grad.numpy(), + fused_grad.numpy(), + rtol=self.rtol, + atol=self.atol, + ) def test_out_and_grad(self): self.has_shortcut = False default_main_program().random_seed = 1 base_out, base_grad = self.Base() fused_out, fused_grad = self.FusedResNetBasicBlock() - np.testing.assert_allclose(base_out.numpy(), - fused_out.numpy(), - rtol=self.rtol, - atol=self.atol) - np.testing.assert_allclose(base_grad.numpy(), - fused_grad.numpy(), - rtol=self.rtol, - atol=self.atol) + np.testing.assert_allclose( + base_out.numpy(), + fused_out.numpy(), + rtol=self.rtol, + atol=self.atol, + ) + np.testing.assert_allclose( + base_grad.numpy(), + fused_grad.numpy(), + rtol=self.rtol, + atol=self.atol, + ) support_types = get_xpu_op_support_types('resnet_basic_block') for stype in support_types: - create_test_class(globals(), - XPUTestResNetBasicBlockOp, - stype, - ignore_device_version=[core.XPUVersion.XPU1]) + create_test_class( + globals(), + XPUTestResNetBasicBlockOp, + stype, + ignore_device_version=[core.XPUVersion.XPU1], + ) if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/xpu/test_gather_nd_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_gather_nd_op_xpu.py index 96e1ded83ae7cb48e8cdec1b4e25a48bd1930c1e..ec4db1fd741bdd77456c56ce513a62f5b4282ffc 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_gather_nd_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_gather_nd_op_xpu.py @@ -20,18 +20,20 @@ sys.path.append("..") import paddle from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestGatherNd(XPUOpTestWrapper): - def __init__(self): self.op_name = 'gather_nd' class XPUTestGatherNdBase(XPUOpTest): - def setUp(self): self.op_type = "gather_nd" self.dtype = self.in_type @@ -51,115 +53,104 @@ class XPUTestGatherNd(XPUOpTestWrapper): self.xnp = np.random.random((5, 20)).astype(self.in_type) self.inp = np.array([[], []]).astype("int32") self.output = np.vstack( - (self.xnp[np.newaxis, :], self.xnp[np.newaxis, :])) + (self.xnp[np.newaxis, :], self.xnp[np.newaxis, :]) + ) class XPUTestGatherNdOpWithEmptyIndex1(XPUTestGatherNdBase): - def init_data(self): self.xnp = np.random.random((5, 20)).astype(self.in_type) self.inp = np.array([[], []]).astype("int32") self.output = np.vstack( - (self.xnp[np.newaxis, :], self.xnp[np.newaxis, :])) + (self.xnp[np.newaxis, :], self.xnp[np.newaxis, :]) + ) class XPUTestGatherNdOpWithEmptyIndex2(XPUTestGatherNdBase): - def init_data(self): self.xnp = np.random.random((5, 20)).astype(self.in_type) self.inp = np.array([[], []]).astype("int64") self.output = np.vstack( - (self.xnp[np.newaxis, :], self.xnp[np.newaxis, :])) + (self.xnp[np.newaxis, :], self.xnp[np.newaxis, :]) + ) class XPUTestGatherNdOpWithIndex1(XPUTestGatherNdBase): - def init_data(self): self.xnp = np.random.random((5, 20)).astype(self.in_type) self.inp = np.array([1]).astype("int32") self.output = self.xnp[self.inp] class XPUTestGatherNdOpWithIndex2(XPUTestGatherNdBase): - def init_data(self): self.xnp = np.random.random((5, 20)).astype(self.in_type) self.inp = np.array([1]).astype("int64") self.output = self.xnp[self.inp] class XPUTestGatherNdOpWithLowIndex1(XPUTestGatherNdBase): - def init_data(self): self.xnp = np.random.uniform(0, 100, (10, 10)).astype(self.in_type) self.inp = np.array([[1], [2]]).astype("int32") self.output = self.xnp[tuple(self.inp.T)] class XPUTestGatherNdOpWithLowIndex2(XPUTestGatherNdBase): - def init_data(self): self.xnp = np.random.uniform(0, 100, (10, 10)).astype(self.in_type) self.inp = np.array([1, 2]).astype("int64") self.output = self.xnp[tuple(self.inp.T)] class XPUTestGatherNdOpWithHighRankSame1(XPUTestGatherNdBase): - def init_data(self): shape = (5, 2, 3, 1, 10) self.xnp = np.random.rand(*shape).astype(self.in_type) - self.inp = np.vstack([ - np.random.randint(0, s, size=2) for s in shape - ]).T.astype("int32") + self.inp = np.vstack( + [np.random.randint(0, s, size=2) for s in shape] + ).T.astype("int32") self.output = self.xnp[tuple(self.inp.T)] class XPUTestGatherNdOpWithHighRankSame2(XPUTestGatherNdBase): - def init_data(self): shape = (5, 2, 3, 1, 10) self.xnp = np.random.rand(*shape).astype(self.in_type) - self.inp = np.vstack([ - np.random.randint(0, s, size=2) for s in shape - ]).T.astype("int64") + self.inp = np.vstack( + [np.random.randint(0, s, size=2) for s in shape] + ).T.astype("int64") self.output = self.xnp[tuple(self.inp.T)] class XPUTestGatherNdOpWithHighRankDiff1(XPUTestGatherNdBase): - def init_data(self): shape = (2, 3, 4, 1, 10) self.xnp = np.random.rand(*shape).astype(self.in_type) - self.inp = np.vstack([ - np.random.randint(0, s, size=200) for s in shape - ]).T.astype("int32") + self.inp = np.vstack( + [np.random.randint(0, s, size=200) for s in shape] + ).T.astype("int32") self.output = self.xnp[tuple(self.inp.T)] class XPUTestGatherNdOpWithHighRankDiff2(XPUTestGatherNdBase): - def init_data(self): shape = (2, 3, 4, 1, 10) self.xnp = np.random.rand(*shape).astype(self.in_type) - self.inp = np.vstack([ - np.random.randint(0, s, size=200) for s in shape - ]).T.astype("int64") + self.inp = np.vstack( + [np.random.randint(0, s, size=200) for s in shape] + ).T.astype("int64") self.output = self.xnp[tuple(self.inp.T)] class XPUTestGatherNdOpWithSameIndexAsX1(XPUTestGatherNdBase): - def init_data(self): self.xnp = np.random.uniform(0, 100, (10, 10)).astype(self.in_type) self.inp = np.array([[1, 1], [2, 1]]).astype("int32") self.output = self.xnp[tuple(self.inp.T)] class XPUTestGatherNdOpWithSameIndexAsX2(XPUTestGatherNdBase): - def init_data(self): self.xnp = np.random.uniform(0, 100, (10, 10)).astype(self.in_type) self.inp = np.array([[1, 1], [2, 1]]).astype("int64") self.output = self.xnp[tuple(self.inp.T)] class XPUTestGatherNdOpIndex1(XPUTestGatherNdBase): - def init_data(self): self.xnp = np.random.uniform(0, 100, (10, 10)).astype(self.in_type) self.inp = np.array([1, 2]).astype("int32") self.output = self.xnp[tuple(self.inp.T)] class XPUTestGatherNdOpIndex2(XPUTestGatherNdBase): - def init_data(self): self.xnp = np.random.uniform(0, 100, (10, 10)).astype(self.in_type) self.inp = np.array([1, 2]).astype("int64") diff --git a/python/paddle/fluid/tests/unittests/xpu/test_gather_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_gather_op_xpu.py index 3a1cbdee77a9e2c3f81668d796d64f3db4c6abff..b1a776bd49bf645acf30c8d905855d9d142a2fe6 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_gather_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_gather_op_xpu.py @@ -21,7 +21,11 @@ import numpy as np import paddle from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() @@ -34,12 +38,10 @@ def gather_numpy(x, index, axis): class XPUTestGather(XPUOpTestWrapper): - def __init__(self): self.op_name = 'gather' class TestXPUGatherOp(XPUOpTest): - def setUp(self): self.op_type = "gather" self.place = paddle.XPUPlace(0) @@ -49,7 +51,7 @@ class XPUTestGather(XPUOpTestWrapper): xnp = np.random.random(self.x_shape).astype(self.dtype) self.inputs = { 'X': xnp, - 'Index': np.array(self.index).astype(self.index_type) + 'Index': np.array(self.index).astype(self.index_type), } self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]} @@ -67,28 +69,24 @@ class XPUTestGather(XPUOpTestWrapper): self.check_grad_with_place(self.place, ['X'], 'Out') class TestCase1(TestXPUGatherOp): - def init_config(self): - self.x_shape = (100) + self.x_shape = 100 self.index = [1, 3, 5] self.index_type = np.int32 class TestCase2(TestXPUGatherOp): - def init_config(self): - self.x_shape = (100) + self.x_shape = 100 self.index = [1, 3, 5] self.index_type = np.int64 class TestCase3(TestXPUGatherOp): - def init_config(self): self.x_shape = (10, 20) self.index = [1, 3, 5] self.index_type = np.int32 class TestCase4(TestXPUGatherOp): - def init_config(self): self.x_shape = (10, 20) self.attrs = {'overwrite': False} @@ -96,7 +94,6 @@ class XPUTestGather(XPUOpTestWrapper): self.index_type = np.int32 class TestCase5(TestXPUGatherOp): - def init_config(self): self.x_shape = (10, 20) self.attrs = {'overwrite': False} @@ -104,7 +101,6 @@ class XPUTestGather(XPUOpTestWrapper): self.index_type = np.int32 class TestCase6(TestXPUGatherOp): - def init_config(self): self.x_shape = (10, 20) self.attrs = {'overwrite': True} @@ -112,7 +108,6 @@ class XPUTestGather(XPUOpTestWrapper): self.index_type = np.int32 class TestCase7(TestXPUGatherOp): - def init_config(self): self.x_shape = (10, 20) self.attrs = {'overwrite': True} diff --git a/python/paddle/fluid/tests/unittests/xpu/test_gaussian_random_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_gaussian_random_op_xpu.py index d95f7b967d542fd887d70f1f06163ade962f5806..89725bb8b6d4a04286e0d0eda19098628bca47b8 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_gaussian_random_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_gaussian_random_op_xpu.py @@ -20,20 +20,22 @@ import numpy as np import paddle import paddle.fluid as fluid from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) import paddle paddle.enable_static() class XPUTestGaussianRandomOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'gaussian_random' self.use_dynamic_create_class = False class TestGaussianRandomOp(XPUOpTest): - def init(self): self.dtype = self.in_type self.place = paddle.XPUPlace(0) @@ -50,7 +52,7 @@ class XPUTestGaussianRandomOp(XPUOpTestWrapper): "mean": self.mean, "std": self.std, "seed": 10, - "use_mkldnn": self.use_mkldnn + "use_mkldnn": self.use_mkldnn, } paddle.seed(10) @@ -58,11 +60,12 @@ class XPUTestGaussianRandomOp(XPUOpTestWrapper): def set_attrs(self): self.mean = 1.0 - self.std = 2. + self.std = 2.0 def test_check_output(self): - self.check_output_with_place_customized(self.verify_output, - self.place) + self.check_output_with_place_customized( + self.verify_output, self.place + ) def verify_output(self, outs): self.assertEqual(outs[0].shape, (123, 92)) @@ -76,30 +79,28 @@ class XPUTestGaussianRandomOp(XPUOpTestWrapper): np.testing.assert_allclose(hist, hist2, rtol=0, atol=0.01) class TestMeanStdAreInt(TestGaussianRandomOp): - def set_attrs(self): self.mean = 1 self.std = 2 # Situation 2: Attr(shape) is a list(with tensor) class TestGaussianRandomOp_ShapeTensorList(TestGaussianRandomOp): - def setUp(self): - '''Test gaussian_random op with specified value - ''' + '''Test gaussian_random op with specified value''' self.init() self.init_data() shape_tensor_list = [] for index, ele in enumerate(self.shape): - shape_tensor_list.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + shape_tensor_list.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.attrs = { 'shape': self.infer_shape, 'mean': self.mean, 'std': self.std, 'seed': self.seed, - 'use_mkldnn': self.use_mkldnn + 'use_mkldnn': self.use_mkldnn, } self.inputs = {"ShapeTensorList": shape_tensor_list} @@ -114,12 +115,13 @@ class XPUTestGaussianRandomOp(XPUOpTestWrapper): self.seed = 10 def test_check_output(self): - self.check_output_with_place_customized(self.verify_output, - self.place) + self.check_output_with_place_customized( + self.verify_output, self.place + ) class TestGaussianRandomOp2_ShapeTensorList( - TestGaussianRandomOp_ShapeTensorList): - + TestGaussianRandomOp_ShapeTensorList + ): def init_data(self): self.shape = [123, 92] self.infer_shape = [-1, -1] @@ -129,8 +131,8 @@ class XPUTestGaussianRandomOp(XPUOpTestWrapper): self.seed = 10 class TestGaussianRandomOp3_ShapeTensorList( - TestGaussianRandomOp_ShapeTensorList): - + TestGaussianRandomOp_ShapeTensorList + ): def init_data(self): self.shape = [123, 92] self.infer_shape = [123, -1] @@ -140,8 +142,8 @@ class XPUTestGaussianRandomOp(XPUOpTestWrapper): self.seed = 10 class TestGaussianRandomOp4_ShapeTensorList( - TestGaussianRandomOp_ShapeTensorList): - + TestGaussianRandomOp_ShapeTensorList + ): def init_data(self): self.shape = [123, 92] self.infer_shape = [123, -1] @@ -152,10 +154,8 @@ class XPUTestGaussianRandomOp(XPUOpTestWrapper): # Situation 3: shape is a tensor class TestGaussianRandomOp1_ShapeTensor(TestGaussianRandomOp): - def setUp(self): - '''Test gaussian_random op with specified value - ''' + '''Test gaussian_random op with specified value''' self.init() self.init_data() self.use_mkldnn = False @@ -165,7 +165,7 @@ class XPUTestGaussianRandomOp(XPUOpTestWrapper): 'mean': self.mean, 'std': self.std, 'seed': self.seed, - 'use_mkldnn': self.use_mkldnn + 'use_mkldnn': self.use_mkldnn, } self.outputs = {'Out': np.zeros((123, 92), dtype=self.dtype)} @@ -179,54 +179,61 @@ class XPUTestGaussianRandomOp(XPUOpTestWrapper): # Test python API class TestGaussianRandomAPI(unittest.TestCase): - def test_api(self): positive_2_int32 = fluid.layers.fill_constant([1], "int32", 2000) positive_2_int64 = fluid.layers.fill_constant([1], "int64", 500) - shape_tensor_int32 = fluid.data(name="shape_tensor_int32", - shape=[2], - dtype="int32") - - shape_tensor_int64 = fluid.data(name="shape_tensor_int64", - shape=[2], - dtype="int64") - - out_1 = fluid.layers.gaussian_random(shape=[2000, 500], - dtype="float32", - mean=0.0, - std=1.0, - seed=10) - - out_2 = fluid.layers.gaussian_random(shape=[2000, positive_2_int32], - dtype="float32", - mean=0., - std=1.0, - seed=10) - - out_3 = fluid.layers.gaussian_random(shape=[2000, positive_2_int64], - dtype="float32", - mean=0., - std=1.0, - seed=10) - - out_4 = fluid.layers.gaussian_random(shape=shape_tensor_int32, - dtype="float32", - mean=0., - std=1.0, - seed=10) - - out_5 = fluid.layers.gaussian_random(shape=shape_tensor_int64, - dtype="float32", - mean=0., - std=1.0, - seed=10) - - out_6 = fluid.layers.gaussian_random(shape=shape_tensor_int64, - dtype=np.float32, - mean=0., - std=1.0, - seed=10) + shape_tensor_int32 = fluid.data( + name="shape_tensor_int32", shape=[2], dtype="int32" + ) + + shape_tensor_int64 = fluid.data( + name="shape_tensor_int64", shape=[2], dtype="int64" + ) + + out_1 = fluid.layers.gaussian_random( + shape=[2000, 500], dtype="float32", mean=0.0, std=1.0, seed=10 + ) + + out_2 = fluid.layers.gaussian_random( + shape=[2000, positive_2_int32], + dtype="float32", + mean=0.0, + std=1.0, + seed=10, + ) + + out_3 = fluid.layers.gaussian_random( + shape=[2000, positive_2_int64], + dtype="float32", + mean=0.0, + std=1.0, + seed=10, + ) + + out_4 = fluid.layers.gaussian_random( + shape=shape_tensor_int32, + dtype="float32", + mean=0.0, + std=1.0, + seed=10, + ) + + out_5 = fluid.layers.gaussian_random( + shape=shape_tensor_int64, + dtype="float32", + mean=0.0, + std=1.0, + seed=10, + ) + + out_6 = fluid.layers.gaussian_random( + shape=shape_tensor_int64, + dtype=np.float32, + mean=0.0, + std=1.0, + seed=10, + ) exe = fluid.Executor(place=fluid.XPUPlace(0)) res_1, res_2, res_3, res_4, res_5, res_6 = exe.run( @@ -235,20 +242,21 @@ class TestGaussianRandomAPI(unittest.TestCase): "shape_tensor_int32": np.array([2000, 500]).astype("int32"), "shape_tensor_int64": np.array([2000, 500]).astype("int64"), }, - fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6]) + fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6], + ) self.assertAlmostEqual(np.mean(res_1), 0.0, delta=0.1) - self.assertAlmostEqual(np.std(res_1), 1., delta=0.1) + self.assertAlmostEqual(np.std(res_1), 1.0, delta=0.1) self.assertAlmostEqual(np.mean(res_2), 0.0, delta=0.1) - self.assertAlmostEqual(np.std(res_2), 1., delta=0.1) + self.assertAlmostEqual(np.std(res_2), 1.0, delta=0.1) self.assertAlmostEqual(np.mean(res_3), 0.0, delta=0.1) - self.assertAlmostEqual(np.std(res_3), 1., delta=0.1) + self.assertAlmostEqual(np.std(res_3), 1.0, delta=0.1) self.assertAlmostEqual(np.mean(res_4), 0.0, delta=0.1) - self.assertAlmostEqual(np.std(res_5), 1., delta=0.1) + self.assertAlmostEqual(np.std(res_5), 1.0, delta=0.1) self.assertAlmostEqual(np.mean(res_5), 0.0, delta=0.1) - self.assertAlmostEqual(np.std(res_5), 1., delta=0.1) + self.assertAlmostEqual(np.std(res_5), 1.0, delta=0.1) self.assertAlmostEqual(np.mean(res_6), 0.0, delta=0.1) - self.assertAlmostEqual(np.std(res_6), 1., delta=0.1) + self.assertAlmostEqual(np.std(res_6), 1.0, delta=0.1) def test_default_dtype(self): paddle.disable_static() @@ -276,7 +284,6 @@ class TestGaussianRandomAPI(unittest.TestCase): class TestStandardNormalDtype(unittest.TestCase): - def test_default_dtype(self): paddle.disable_static() diff --git a/python/paddle/fluid/tests/unittests/xpu/test_gen_bkcl_id_op.py b/python/paddle/fluid/tests/unittests/xpu/test_gen_bkcl_id_op.py index e5ac263fb7d381bd6437e7b14408c220aa1ea061..ebecc1b8b71f55350d7568e01ffe208b3cb0354f 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_gen_bkcl_id_op.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_gen_bkcl_id_op.py @@ -37,30 +37,35 @@ def run_gen_bkc_id(attr): with paddle.static.program_guard(main_program, startup_program): bkcl_id_var = startup_program.global_block().create_var( - name="BKCLID", persistable=True, type=core.VarDesc.VarType.RAW) + name="BKCLID", persistable=True, type=core.VarDesc.VarType.RAW + ) for i in range(1, bkcl_comm_num): startup_program.global_block().create_var( name="BKCLID_{}".format(i), persistable=True, - type=core.VarDesc.VarType.RAW) + type=core.VarDesc.VarType.RAW, + ) if use_hallreduce: for i in range(0, bkcl_comm_num): startup_program.global_block().create_var( name="Hierarchical_inter_BKCLID_{}".format(i), persistable=True, - type=core.VarDesc.VarType.RAW) + type=core.VarDesc.VarType.RAW, + ) startup_program.global_block().create_var( name="Hierarchical_exter_BKCLID_{}".format(i), persistable=True, - type=core.VarDesc.VarType.RAW) + type=core.VarDesc.VarType.RAW, + ) startup_program.global_block().append_op( type="gen_bkcl_id", inputs={}, outputs={"BKCLID": bkcl_id_var}, - attrs=attr) + attrs=attr, + ) place = paddle.CPUPlace() exe = paddle.static.Executor(place) @@ -68,7 +73,6 @@ def run_gen_bkc_id(attr): class TestGenBKCLIdOp(unittest.TestCase): - def setUp(self): try: self._dist_ut_port_0 = int(os.environ["PADDLE_DIST_UT_PORT"]) @@ -101,7 +105,7 @@ class TestGenBKCLIdOp(unittest.TestCase): for i in range(nranks): attr['trainer_id'] = i # NOTE: multiprocessing cannot be covered by coverage - p = Process(target=run_gen_bkc_id, args=(attr, )) + p = Process(target=run_gen_bkc_id, args=(attr,)) p.start() procs.append(p) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_generate_proposals_v2_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_generate_proposals_v2_op_xpu.py index d9c5f90e9eefe48cd60090fb2ebf93271f30e3ef..d087e8bf7968b3c073f30cf0bc7934f37cc40398 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_generate_proposals_v2_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_generate_proposals_v2_op_xpu.py @@ -24,7 +24,11 @@ import paddle.fluid.core as core import copy from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() @@ -49,30 +53,48 @@ def box_coder(all_anchors, bbox_deltas, variances, pixel_offset=True): pred_bbox = np.zeros_like(bbox_deltas, dtype=np.float32) if variances is not None: for i in range(bbox_deltas.shape[0]): - pred_bbox[i, 0] = variances[i, 0] * bbox_deltas[i, 0] * anchor_loc[ - i, 0] + anchor_loc[i, 2] - pred_bbox[i, 1] = variances[i, 1] * bbox_deltas[i, 1] * anchor_loc[ - i, 1] + anchor_loc[i, 3] - pred_bbox[i, 2] = math.exp( - min(variances[i, 2] * bbox_deltas[i, 2], math.log( - 1000 / 16.0))) * anchor_loc[i, 0] - pred_bbox[i, 3] = math.exp( - min(variances[i, 3] * bbox_deltas[i, 3], math.log( - 1000 / 16.0))) * anchor_loc[i, 1] + pred_bbox[i, 0] = ( + variances[i, 0] * bbox_deltas[i, 0] * anchor_loc[i, 0] + + anchor_loc[i, 2] + ) + pred_bbox[i, 1] = ( + variances[i, 1] * bbox_deltas[i, 1] * anchor_loc[i, 1] + + anchor_loc[i, 3] + ) + pred_bbox[i, 2] = ( + math.exp( + min( + variances[i, 2] * bbox_deltas[i, 2], + math.log(1000 / 16.0), + ) + ) + * anchor_loc[i, 0] + ) + pred_bbox[i, 3] = ( + math.exp( + min( + variances[i, 3] * bbox_deltas[i, 3], + math.log(1000 / 16.0), + ) + ) + * anchor_loc[i, 1] + ) else: for i in range(bbox_deltas.shape[0]): - pred_bbox[i, - 0] = bbox_deltas[i, 0] * anchor_loc[i, 0] + anchor_loc[i, - 2] - pred_bbox[i, - 1] = bbox_deltas[i, 1] * anchor_loc[i, 1] + anchor_loc[i, - 3] - pred_bbox[i, 2] = math.exp( - min(bbox_deltas[i, 2], math.log(1000 / 16.0))) * anchor_loc[i, - 0] - pred_bbox[i, 3] = math.exp( - min(bbox_deltas[i, 3], math.log(1000 / 16.0))) * anchor_loc[i, - 1] + pred_bbox[i, 0] = ( + bbox_deltas[i, 0] * anchor_loc[i, 0] + anchor_loc[i, 2] + ) + pred_bbox[i, 1] = ( + bbox_deltas[i, 1] * anchor_loc[i, 1] + anchor_loc[i, 3] + ) + pred_bbox[i, 2] = ( + math.exp(min(bbox_deltas[i, 2], math.log(1000 / 16.0))) + * anchor_loc[i, 0] + ) + pred_bbox[i, 3] = ( + math.exp(min(bbox_deltas[i, 3], math.log(1000 / 16.0))) + * anchor_loc[i, 1] + ) proposals[:, 0] = pred_bbox[:, 0] - pred_bbox[:, 2] / 2 proposals[:, 1] = pred_bbox[:, 1] - pred_bbox[:, 3] / 2 proposals[:, 2] = pred_bbox[:, 0] + pred_bbox[:, 2] / 2 - offset @@ -84,43 +106,47 @@ def box_coder(all_anchors, bbox_deltas, variances, pixel_offset=True): def clip_tiled_boxes(boxes, im_shape, pixel_offset=True): """Clip boxes to image boundaries. im_shape is [height, width] and boxes has shape (N, 4 * num_tiled_boxes).""" - assert boxes.shape[1] % 4 == 0, \ - 'boxes.shape[1] is {:d}, but must be divisible by 4.'.format( + assert ( + boxes.shape[1] % 4 == 0 + ), 'boxes.shape[1] is {:d}, but must be divisible by 4.'.format( boxes.shape[1] ) offset = 1 if pixel_offset else 0 # x1 >= 0 - boxes[:, - 0::4] = np.maximum(np.minimum(boxes[:, 0::4], im_shape[1] - offset), - 0) + boxes[:, 0::4] = np.maximum( + np.minimum(boxes[:, 0::4], im_shape[1] - offset), 0 + ) # y1 >= 0 - boxes[:, - 1::4] = np.maximum(np.minimum(boxes[:, 1::4], im_shape[0] - offset), - 0) + boxes[:, 1::4] = np.maximum( + np.minimum(boxes[:, 1::4], im_shape[0] - offset), 0 + ) # x2 < im_shape[1] - boxes[:, - 2::4] = np.maximum(np.minimum(boxes[:, 2::4], im_shape[1] - offset), - 0) + boxes[:, 2::4] = np.maximum( + np.minimum(boxes[:, 2::4], im_shape[1] - offset), 0 + ) # y2 < im_shape[0] - boxes[:, - 3::4] = np.maximum(np.minimum(boxes[:, 3::4], im_shape[0] - offset), - 0) + boxes[:, 3::4] = np.maximum( + np.minimum(boxes[:, 3::4], im_shape[0] - offset), 0 + ) return boxes def filter_boxes(boxes, min_size, im_shape, pixel_offset=True): - """Only keep boxes with both sides >= min_size and center within the image. - """ + """Only keep boxes with both sides >= min_size and center within the image.""" # Scale min_size to match image scale min_size = max(min_size, 1.0) offset = 1 if pixel_offset else 0 ws = boxes[:, 2] - boxes[:, 0] + offset hs = boxes[:, 3] - boxes[:, 1] + offset if pixel_offset: - x_ctr = boxes[:, 0] + ws / 2. - y_ctr = boxes[:, 1] + hs / 2. - keep = np.where((ws >= min_size) & (hs >= min_size) - & (x_ctr < im_shape[1]) & (y_ctr < im_shape[0]))[0] + x_ctr = boxes[:, 0] + ws / 2.0 + y_ctr = boxes[:, 1] + hs / 2.0 + keep = np.where( + (ws >= min_size) + & (hs >= min_size) + & (x_ctr < im_shape[1]) + & (y_ctr < im_shape[0]) + )[0] else: keep = np.where((ws >= min_size) & (hs >= min_size))[0] return keep @@ -182,9 +208,9 @@ def nms(boxes, scores, nms_threshold, eta=1.0, pixel_offset=True): for k in range(len(selected_indices)): if keep: kept_idx = selected_indices[k] - overlap = iou(boxes[idx], - boxes[kept_idx], - pixel_offset=pixel_offset) + overlap = iou( + boxes[idx], boxes[kept_idx], pixel_offset=pixel_offset + ) keep = True if overlap <= adaptive_threshold else False else: break @@ -195,9 +221,19 @@ def nms(boxes, scores, nms_threshold, eta=1.0, pixel_offset=True): return selected_indices -def proposal_for_one_image(im_shape, all_anchors, variances, bbox_deltas, - scores, pre_nms_topN, post_nms_topN, nms_thresh, - min_size, eta, pixel_offset): +def proposal_for_one_image( + im_shape, + all_anchors, + variances, + bbox_deltas, + scores, + pre_nms_topN, + post_nms_topN, + nms_thresh, + min_size, + eta, + pixel_offset, +): # Transpose and reshape predicted bbox transformations to get them # into the same order as the anchors: # - bbox deltas will be (4 * A, H, W) format from conv output @@ -250,11 +286,13 @@ def proposal_for_one_image(im_shape, all_anchors, variances, bbox_deltas, # take post_nms_topN (e.g. 1000) # return the top proposals if nms_thresh > 0: - keep = nms(boxes=proposals, - scores=scores, - nms_threshold=nms_thresh, - eta=eta, - pixel_offset=pixel_offset) + keep = nms( + boxes=proposals, + scores=scores, + nms_threshold=nms_thresh, + eta=eta, + pixel_offset=pixel_offset, + ) if post_nms_topN > 0 and post_nms_topN < len(keep): keep = keep[:post_nms_topN] proposals = proposals[keep, :] @@ -263,9 +301,19 @@ def proposal_for_one_image(im_shape, all_anchors, variances, bbox_deltas, return proposals, scores -def generate_proposals_v2_in_python(scores, bbox_deltas, im_shape, anchors, - variances, pre_nms_topN, post_nms_topN, - nms_thresh, min_size, eta, pixel_offset): +def generate_proposals_v2_in_python( + scores, + bbox_deltas, + im_shape, + anchors, + variances, + pre_nms_topN, + post_nms_topN, + nms_thresh, + min_size, + eta, + pixel_offset, +): all_anchors = anchors.reshape(-1, 4) rois = np.empty((0, 5), dtype=np.float32) roi_probs = np.empty((0, 1), dtype=np.float32) @@ -276,10 +324,18 @@ def generate_proposals_v2_in_python(scores, bbox_deltas, im_shape, anchors, num_images = scores.shape[0] for img_idx in range(num_images): img_i_boxes, img_i_probs = proposal_for_one_image( - im_shape[img_idx, :], all_anchors, variances, - bbox_deltas[img_idx, :, :, :], scores[img_idx, :, :, :], - pre_nms_topN, post_nms_topN, nms_thresh, min_size, eta, - pixel_offset) + im_shape[img_idx, :], + all_anchors, + variances, + bbox_deltas[img_idx, :, :, :], + scores[img_idx, :, :, :], + pre_nms_topN, + post_nms_topN, + nms_thresh, + min_size, + eta, + pixel_offset, + ) rois_num.append(img_i_probs.shape[0]) rpn_rois.append(img_i_boxes) rpn_roi_probs.append(img_i_probs) @@ -287,8 +343,9 @@ def generate_proposals_v2_in_python(scores, bbox_deltas, im_shape, anchors, return rpn_rois, rpn_roi_probs, rois_num -def anchor_generator_in_python(input_feat, anchor_sizes, aspect_ratios, - variances, stride, offset): +def anchor_generator_in_python( + input_feat, anchor_sizes, aspect_ratios, variances, stride, offset +): num_anchors = len(aspect_ratios) * len(anchor_sizes) layer_h = input_feat.shape[2] layer_w = input_feat.shape[3] @@ -312,11 +369,12 @@ def anchor_generator_in_python(input_feat, anchor_sizes, aspect_ratios, scale_h = anchor_size / stride[1] w = scale_w * base_w h = scale_h * base_h - out_anchors[h_idx, w_idx, - idx, :] = [(x_ctr - 0.5 * (w - 1)), - (y_ctr - 0.5 * (h - 1)), - (x_ctr + 0.5 * (w - 1)), - (y_ctr + 0.5 * (h - 1))] + out_anchors[h_idx, w_idx, idx, :] = [ + (x_ctr - 0.5 * (w - 1)), + (y_ctr - 0.5 * (h - 1)), + (x_ctr + 0.5 * (w - 1)), + (y_ctr + 0.5 * (h - 1)), + ] idx += 1 # set the variance. @@ -327,13 +385,11 @@ def anchor_generator_in_python(input_feat, anchor_sizes, aspect_ratios, class XPUGenerateProposalsV2Op(XPUOpTestWrapper): - def __init__(self): self.op_name = 'generate_proposals_v2' self.use_dynamic_create_class = False class TestGenerateProposalsV2Op(XPUOpTest): - def set_data(self): self.init_input_shape() self.init_test_params() @@ -344,7 +400,7 @@ class XPUGenerateProposalsV2Op(XPUOpTestWrapper): 'BboxDeltas': self.bbox_deltas, 'ImShape': self.im_shape.astype(self.dtype), 'Anchors': self.anchors, - 'Variances': self.variances + 'Variances': self.variances, } self.attrs = { @@ -388,7 +444,7 @@ class XPUGenerateProposalsV2Op(XPUOpTestWrapper): self.post_nms_topN = 5000 # train 6000, test 1000 self.nms_thresh = 0.7 self.min_size = 3.0 - self.eta = 1. + self.eta = 1.0 self.pixel_offset = True def init_test_input(self): @@ -396,30 +452,45 @@ class XPUGenerateProposalsV2Op(XPUOpTestWrapper): input_channels = self.input_feat_shape[1] layer_h = self.input_feat_shape[2] layer_w = self.input_feat_shape[3] - input_feat = np.random.random((batch_size, input_channels, layer_h, - layer_w)).astype(self.dtype) + input_feat = np.random.random( + (batch_size, input_channels, layer_h, layer_w) + ).astype(self.dtype) self.anchors, self.variances = anchor_generator_in_python( input_feat=input_feat, - anchor_sizes=[16., 32.], + anchor_sizes=[16.0, 32.0], aspect_ratios=[0.5, 1.0], variances=[1.0, 1.0, 1.0, 1.0], stride=[16.0, 16.0], - offset=0.5) + offset=0.5, + ) num_anchors = self.anchors.shape[2] self.scores = np.random.random( - (batch_size, num_anchors, layer_h, layer_w)).astype(self.dtype) + (batch_size, num_anchors, layer_h, layer_w) + ).astype(self.dtype) self.bbox_deltas = np.random.random( - (batch_size, num_anchors * 4, layer_h, - layer_w)).astype(self.dtype) + (batch_size, num_anchors * 4, layer_h, layer_w) + ).astype(self.dtype) def init_test_output(self): - self.rpn_rois, self.rpn_roi_probs, self.rois_num = generate_proposals_v2_in_python( - self.scores, self.bbox_deltas, self.im_shape, self.anchors, - self.variances, self.pre_nms_topN, self.post_nms_topN, - self.nms_thresh, self.min_size, self.eta, self.pixel_offset) + ( + self.rpn_rois, + self.rpn_roi_probs, + self.rois_num, + ) = generate_proposals_v2_in_python( + self.scores, + self.bbox_deltas, + self.im_shape, + self.anchors, + self.variances, + self.pre_nms_topN, + self.post_nms_topN, + self.nms_thresh, + self.min_size, + self.eta, + self.pixel_offset, + ) class TestGenerateProposalsV2OutLodOp(TestGenerateProposalsV2Op): - def set_data(self): self.init_input_shape() self.init_test_params() @@ -430,7 +501,7 @@ class XPUGenerateProposalsV2Op(XPUOpTestWrapper): 'BboxDeltas': self.bbox_deltas, 'ImShape': self.im_shape.astype(np.float32), 'Anchors': self.anchors, - 'Variances': self.variances + 'Variances': self.variances, } self.attrs = { @@ -440,37 +511,34 @@ class XPUGenerateProposalsV2Op(XPUOpTestWrapper): 'min_size': self.min_size, 'eta': self.eta, 'pixel_offset': self.pixel_offset, - 'return_rois_num': True + 'return_rois_num': True, } self.outputs = { 'RpnRois': (self.rpn_rois[0], [self.rois_num]), 'RpnRoiProbs': (self.rpn_roi_probs[0], [self.rois_num]), - 'RpnRoisNum': (np.asarray(self.rois_num, dtype=np.int32)) + 'RpnRoisNum': (np.asarray(self.rois_num, dtype=np.int32)), } class TestGenerateProposalsV2OpNoBoxLeft(TestGenerateProposalsV2Op): - def init_test_params(self): self.pre_nms_topN = 12000 # train 12000, test 2000 self.post_nms_topN = 5000 # train 6000, test 1000 self.nms_thresh = 0.7 self.min_size = 1000.0 - self.eta = 1. + self.eta = 1.0 self.pixel_offset = True class TestGenerateProposalsV2OpNoOffset(TestGenerateProposalsV2Op): - def init_test_params(self): self.pre_nms_topN = 12000 # train 12000, test 2000 self.post_nms_topN = 5000 # train 6000, test 1000 self.nms_thresh = 0.7 self.min_size = 3.0 - self.eta = 1. + self.eta = 1.0 self.pixel_offset = False class TestGenerateProposalsV2OpMaskRcnn1XPU(TestGenerateProposalsV2Op): - def init_input_shape(self): self.input_feat_shape = (1, 20, 48, 64) # Another case is [768, 1024] @@ -481,7 +549,7 @@ class XPUGenerateProposalsV2Op(XPUOpTestWrapper): self.post_nms_topN = 2000 # train 6000, test 1000 self.nms_thresh = 0.7 self.min_size = 0.0 - self.eta = 1. + self.eta = 1.0 self.pixel_offset = False def init_test_input(self): @@ -489,21 +557,24 @@ class XPUGenerateProposalsV2Op(XPUOpTestWrapper): input_channels = self.input_feat_shape[1] layer_h = self.input_feat_shape[2] layer_w = self.input_feat_shape[3] - input_feat = np.random.random((batch_size, input_channels, layer_h, - layer_w)).astype(self.dtype) + input_feat = np.random.random( + (batch_size, input_channels, layer_h, layer_w) + ).astype(self.dtype) self.anchors, self.variances = anchor_generator_in_python( input_feat=input_feat, anchor_sizes=[32, 64, 128, 256, 512], aspect_ratios=[0.5, 1.0, 2.0], variances=[1.0, 1.0, 1.0, 1.0], stride=[16.0, 16.0], - offset=0.5) + offset=0.5, + ) num_anchors = self.anchors.shape[2] self.scores = np.random.random( - (batch_size, num_anchors, layer_h, layer_w)).astype(self.dtype) + (batch_size, num_anchors, layer_h, layer_w) + ).astype(self.dtype) self.bbox_deltas = np.random.random( - (batch_size, num_anchors * 4, layer_h, - layer_w)).astype(self.dtype) + (batch_size, num_anchors * 4, layer_h, layer_w) + ).astype(self.dtype) self.anchors = self.anchors.reshape(-1, 4) self.variances = self.variances.reshape(-1, 4) @@ -519,7 +590,7 @@ class XPUGenerateProposalsV2Op(XPUOpTestWrapper): 'BboxDeltas': self.bbox_deltas, 'ImShape': self.im_shape.astype(np.float32), 'Anchors': self.anchors, - 'Variances': self.variances + 'Variances': self.variances, } self.attrs = { @@ -529,23 +600,25 @@ class XPUGenerateProposalsV2Op(XPUOpTestWrapper): 'min_size': self.min_size, 'eta': self.eta, 'pixel_offset': self.pixel_offset, - 'return_rois_num': True + 'return_rois_num': True, } self.outputs = { 'RpnRois': (self.rpn_rois[0], [self.rois_num]), 'RpnRoiProbs': (self.rpn_roi_probs[0], [self.rois_num]), - 'RpnRoisNum': (np.asarray(self.rois_num, dtype=np.int32)) + 'RpnRoisNum': (np.asarray(self.rois_num, dtype=np.int32)), } support_types = get_xpu_op_support_types('generate_proposals_v2') for stype in support_types: - create_test_class(globals(), - XPUGenerateProposalsV2Op, - stype, - test_grad=False, - ignore_device_version=[core.XPUVersion.XPU1]) + create_test_class( + globals(), + XPUGenerateProposalsV2Op, + stype, + test_grad=False, + ignore_device_version=[core.XPUVersion.XPU1], + ) if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/xpu/test_grid_sampler_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_grid_sampler_op_xpu.py index fbd5c68ddd4f3dd480a9f7965f949c42ab1f2d58..eb992cd42132c2e424838caa2813e45e1ba589a5 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_grid_sampler_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_grid_sampler_op_xpu.py @@ -21,7 +21,11 @@ sys.path.append("..") import paddle from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() @@ -30,12 +34,15 @@ def AffineGrid(theta, grid_shape): n = grid_shape[0] h = grid_shape[1] w = grid_shape[2] - h_idx = np.repeat(np.linspace(-1, 1, h)[np.newaxis, :], w, - axis=0).T[:, :, np.newaxis] - w_idx = np.repeat(np.linspace(-1, 1, w)[np.newaxis, :], h, - axis=0)[:, :, np.newaxis] - grid = np.concatenate([w_idx, h_idx, np.ones([h, w, 1])], - axis=2) # h * w * 3 + h_idx = np.repeat(np.linspace(-1, 1, h)[np.newaxis, :], w, axis=0).T[ + :, :, np.newaxis + ] + w_idx = np.repeat(np.linspace(-1, 1, w)[np.newaxis, :], h, axis=0)[ + :, :, np.newaxis + ] + grid = np.concatenate( + [w_idx, h_idx, np.ones([h, w, 1])], axis=2 + ) # h * w * 3 grid = np.repeat(grid[np.newaxis, :], n, axis=0) # n * h * w *3 ret = np.zeros([n, h * w, 2]) @@ -55,13 +62,17 @@ def getGridPointValue(data, x, y): out_H = x.shape[1] out_W = x.shape[2] - #out = np.zeros(data_shape, dtype='float64') + # out = np.zeros(data_shape, dtype='float64') out = np.zeros([N, C, out_H, out_W], dtype='float64') for i in range(N): for j in range(out_H): for k in range(out_W): - if y[i, j, k] < 0 or y[i, j, k] > in_H - 1 or x[ - i, j, k] < 0 or x[i, j, k] > in_W - 1: + if ( + y[i, j, k] < 0 + or y[i, j, k] > in_H - 1 + or x[i, j, k] < 0 + or x[i, j, k] > in_W - 1 + ): out[i, :, j, k] = 0 else: out[i, :, j, k] = data[i, :, y[i, j, k], x[i, j, k]] @@ -77,27 +88,28 @@ def unnormalizeAndClip(grid_slice, max_val, align_corners, padding_mode): if align_corners: grid_slice = 0.5 * ((grid_slice.astype('float64') + 1.0) * max_val) else: - grid_slice = 0.5 * ((grid_slice.astype('float64') + 1.0) * - (max_val + 1)) - 0.5 + grid_slice = ( + 0.5 * ((grid_slice.astype('float64') + 1.0) * (max_val + 1)) - 0.5 + ) if padding_mode == "border": grid_slice = clip(grid_slice, 0, max_val) elif padding_mode == "reflection": double_range = 2 * max_val if align_corners else (max_val + 1) * 2 - grid_abs = np.abs(grid_slice) if align_corners else np.abs(grid_slice + - 0.5) + grid_abs = ( + np.abs(grid_slice) if align_corners else np.abs(grid_slice + 0.5) + ) extra = grid_abs - np.floor(grid_abs / double_range) * double_range grid_slice = np.minimum(extra, double_range - extra) - grid_slice = grid_slice if align_corners else clip( - grid_slice - 0.5, 0, max_val) + grid_slice = ( + grid_slice if align_corners else clip(grid_slice - 0.5, 0, max_val) + ) return grid_slice -def GridSampler(data, - grid, - align_corners=True, - mode="bilinear", - padding_mode="zeros"): +def GridSampler( + data, grid, align_corners=True, mode="bilinear", padding_mode="zeros" +): dims = data.shape N = dims[0] in_C = dims[1] @@ -121,14 +133,18 @@ def GridSampler(data, y0 = np.floor(y).astype('int32') y1 = y0 + 1 - wa = np.tile(((x1 - x) * (y1 - y)).reshape((N, 1, out_H, out_W)), - (1, in_C, 1, 1)) - wb = np.tile(((x1 - x) * (y - y0)).reshape((N, 1, out_H, out_W)), - (1, in_C, 1, 1)) - wc = np.tile(((x - x0) * (y1 - y)).reshape((N, 1, out_H, out_W)), - (1, in_C, 1, 1)) - wd = np.tile(((x - x0) * (y - y0)).reshape((N, 1, out_H, out_W)), - (1, in_C, 1, 1)) + wa = np.tile( + ((x1 - x) * (y1 - y)).reshape((N, 1, out_H, out_W)), (1, in_C, 1, 1) + ) + wb = np.tile( + ((x1 - x) * (y - y0)).reshape((N, 1, out_H, out_W)), (1, in_C, 1, 1) + ) + wc = np.tile( + ((x - x0) * (y1 - y)).reshape((N, 1, out_H, out_W)), (1, in_C, 1, 1) + ) + wd = np.tile( + ((x - x0) * (y - y0)).reshape((N, 1, out_H, out_W)), (1, in_C, 1, 1) + ) va = getGridPointValue(data, x0, y0) vb = getGridPointValue(data, x0, y1) @@ -144,13 +160,11 @@ def GridSampler(data, class XPUTestGridSamplerOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'grid_sampler' self.use_dynamic_create_class = False class TestXPUGridSamplerOp(XPUOpTest): - def setUp(self): self.place = paddle.XPUPlace(0) self.init_dtype() @@ -180,9 +194,9 @@ class XPUTestGridSamplerOP(XPUOpTestWrapper): "mode": self.mode, } self.outputs = { - 'Output': - GridSampler(x, grid, self.align_corners, self.mode, - self.padding_mode) + 'Output': GridSampler( + x, grid, self.align_corners, self.mode, self.padding_mode + ) } def initTestCase(self): @@ -203,7 +217,6 @@ class XPUTestGridSamplerOP(XPUOpTestWrapper): self.check_grad_with_place(self.place, ['X', 'Grid'], 'Output') class TestGridSample1(TestXPUGridSamplerOp): - def initTestCase(self): self.x_shape = (2, 3, 5, 6) self.grid_shape = (2, 8, 9, 2) @@ -213,7 +226,6 @@ class XPUTestGridSamplerOP(XPUOpTestWrapper): self.mode = "bilinear" class TestGridSample2(TestXPUGridSamplerOp): - def initTestCase(self): self.x_shape = (2, 3, 5, 6) self.grid_shape = (2, 8, 9, 2) @@ -223,7 +235,6 @@ class XPUTestGridSamplerOP(XPUOpTestWrapper): self.mode = "bilinear" class TestGridSample3(TestXPUGridSamplerOp): - def initTestCase(self): self.x_shape = (2, 3, 5, 6) self.grid_shape = (2, 8, 9, 2) @@ -233,7 +244,6 @@ class XPUTestGridSamplerOP(XPUOpTestWrapper): self.mode = "bilinear" class TestGridSample4(TestXPUGridSamplerOp): - def initTestCase(self): self.x_shape = (2, 3, 5, 6) self.grid_shape = (2, 8, 9, 2) @@ -243,7 +253,6 @@ class XPUTestGridSamplerOP(XPUOpTestWrapper): self.mode = "bilinear" class TestGridSample5(TestXPUGridSamplerOp): - def initTestCase(self): self.x_shape = (2, 3, 5, 6) self.grid_shape = (2, 8, 9, 2) @@ -253,7 +262,6 @@ class XPUTestGridSamplerOP(XPUOpTestWrapper): self.mode = "nearest" class TestGridSample6(TestXPUGridSamplerOp): - def initTestCase(self): self.x_shape = (2, 3, 128, 128) self.grid_shape = (2, 130, 130, 2) @@ -263,7 +271,6 @@ class XPUTestGridSamplerOP(XPUOpTestWrapper): self.mode = "bilinear" class TestGridSample7(TestXPUGridSamplerOp): - def initTestCase(self): self.x_shape = (2, 3, 128, 128) self.grid_shape = (2, 130, 130, 2) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_huber_loss_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_huber_loss_op_xpu.py index 1e7361035dfc02370d1a9bcb29ea0eaa62114a37..50d77fc1a3d5f0315bf2eedc13bc2f783bd64554 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_huber_loss_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_huber_loss_op_xpu.py @@ -22,7 +22,11 @@ import paddle from op_test import OpTest from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() @@ -36,13 +40,11 @@ def huber_loss_forward(val, delta): class XPUTestHuberLossOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'huber_loss' self.use_dynamic_create_class = False class TestHuberLossOp(XPUOpTest): - def setUp(self): self.set_xpu() self.op_type = 'huber_loss' @@ -55,11 +57,11 @@ class XPUTestHuberLossOp(XPUOpTestWrapper): def set_inputs(self): shape = self.set_shape() - x = np.random.uniform(0, 1., shape).astype(self.dtype) - y = np.random.uniform(0, 1., shape).astype(self.dtype) + x = np.random.uniform(0, 1.0, shape).astype(self.dtype) + y = np.random.uniform(0, 1.0, shape).astype(self.dtype) self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(x), - 'Y': OpTest.np_dtype_to_fluid_dtype(y) + 'Y': OpTest.np_dtype_to_fluid_dtype(y), } def set_attrs(self): @@ -69,8 +71,9 @@ class XPUTestHuberLossOp(XPUOpTestWrapper): delta = self.attrs['delta'] shape = self.set_shape() residual = self.inputs['Y'] - self.inputs['X'] - loss = np.vectorize(huber_loss_forward)(residual, - delta).astype(self.dtype) + loss = np.vectorize(huber_loss_forward)(residual, delta).astype( + self.dtype + ) self.outputs = {'Residual': residual, 'Out': loss.reshape(shape)} def set_shape(self): @@ -89,27 +92,24 @@ class XPUTestHuberLossOp(XPUOpTestWrapper): self.check_grad_with_place(self.place, ['X', 'Y'], 'Out') def test_check_grad_ingore_x(self): - self.check_grad_with_place(self.place, ['Y'], - 'Out', - no_grad_set=set("residual")) + self.check_grad_with_place( + self.place, ['Y'], 'Out', no_grad_set=set("residual") + ) def test_check_grad_ingore_y(self): - self.check_grad_with_place(self.place, ['X'], - 'Out', - no_grad_set=set('residual')) + self.check_grad_with_place( + self.place, ['X'], 'Out', no_grad_set=set('residual') + ) class TestHuberLossOp1(TestHuberLossOp): - def set_shape(self): - return (640) + return 640 class TestHuberLossOp2(TestHuberLossOp): - def set_shape(self): return (10, 10) class TestHuberLossOp3(TestHuberLossOp): - def set_shape(self): return (10, 10, 1) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_index_select_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_index_select_op_xpu.py index 3eb8ff49f7fd56572092dde5aefb942f03dfc73a..816e7ac7967de755bbe0c351d93730732edc90e4 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_index_select_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_index_select_op_xpu.py @@ -23,33 +23,34 @@ import numpy as np import paddle from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestIndexSelect(XPUOpTestWrapper): - def __init__(self): self.op_name = 'index_select' class TestXPUIndexSelectOp(XPUOpTest): - def setUp(self): self.op_type = "index_select" self.place = paddle.XPUPlace(0) self.dtype = self.in_type self.init_dtype_type() - index_np = np.random.randint(low=0, - high=self.x_shape[self.dim], - size=self.index_size).astype( - self.index_type) + index_np = np.random.randint( + low=0, high=self.x_shape[self.dim], size=self.index_size + ).astype(self.index_type) x_np = np.random.random(self.x_shape).astype(self.dtype) self.inputs = {'X': x_np, 'Index': index_np} self.attrs = {'dim': self.dim} - outer_loop = np.prod(self.x_shape[:self.dim]) - x_reshape = [outer_loop] + list(self.x_shape[self.dim:]) + outer_loop = np.prod(self.x_shape[: self.dim]) + x_reshape = [outer_loop] + list(self.x_shape[self.dim :]) x_np_reshape = np.reshape(x_np, tuple(x_reshape)) out_list = [] for i in range(outer_loop): @@ -77,7 +78,6 @@ class XPUTestIndexSelect(XPUOpTestWrapper): self.check_grad_with_place(self.place, ['X'], 'Out') class TestXPUIndexSelectOpCase2(TestXPUIndexSelectOp): - def init_dtype_type(self): self.index_type = np.int32 self.dim = -2 @@ -86,10 +86,14 @@ class XPUTestIndexSelect(XPUOpTestWrapper): class TestIndexSelectAPI(unittest.TestCase): - def input_data(self): - self.data_x = np.array([[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], - [9.0, 10.0, 11.0, 12.0]]) + self.data_x = np.array( + [ + [1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [9.0, 10.0, 11.0, 12.0], + ] + ) self.data_index = np.array([0, 1, 1]).astype('int32') def test_index_select_api(self): @@ -98,39 +102,37 @@ class TestIndexSelectAPI(unittest.TestCase): # case 1: with program_guard(Program(), Program()): x = fluid.layers.data(name='x', shape=[-1, 4]) - index = fluid.layers.data(name='index', - shape=[3], - dtype='int32', - append_batch_size=False) + index = fluid.layers.data( + name='index', shape=[3], dtype='int32', append_batch_size=False + ) z = paddle.index_select(x, index, axis=1) exe = fluid.Executor(fluid.XPUPlace(0)) - res, = exe.run(feed={ - 'x': self.data_x, - 'index': self.data_index - }, - fetch_list=[z.name], - return_numpy=False) - expect_out = np.array([[1.0, 2.0, 2.0], [5.0, 6.0, 6.0], - [9.0, 10.0, 10.0]]) + (res,) = exe.run( + feed={'x': self.data_x, 'index': self.data_index}, + fetch_list=[z.name], + return_numpy=False, + ) + expect_out = np.array( + [[1.0, 2.0, 2.0], [5.0, 6.0, 6.0], [9.0, 10.0, 10.0]] + ) np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) # case 2: with program_guard(Program(), Program()): x = fluid.layers.data(name='x', shape=[-1, 4]) - index = fluid.layers.data(name='index', - shape=[3], - dtype='int32', - append_batch_size=False) + index = fluid.layers.data( + name='index', shape=[3], dtype='int32', append_batch_size=False + ) z = paddle.index_select(x, index) exe = fluid.Executor(fluid.XPUPlace(0)) - res, = exe.run(feed={ - 'x': self.data_x, - 'index': self.data_index - }, - fetch_list=[z.name], - return_numpy=False) - expect_out = np.array([[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], - [5.0, 6.0, 7.0, 8.0]]) + (res,) = exe.run( + feed={'x': self.data_x, 'index': self.data_index}, + fetch_list=[z.name], + return_numpy=False, + ) + expect_out = np.array( + [[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], [5.0, 6.0, 7.0, 8.0]] + ) np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) def test_dygraph_api(self): @@ -141,8 +143,9 @@ class TestIndexSelectAPI(unittest.TestCase): index = fluid.dygraph.to_variable(self.data_index) z = paddle.index_select(x, index) np_z = z.numpy() - expect_out = np.array([[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], - [5.0, 6.0, 7.0, 8.0]]) + expect_out = np.array( + [[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], [5.0, 6.0, 7.0, 8.0]] + ) np.testing.assert_allclose(expect_out, np_z, rtol=1e-05) # case 2: @@ -151,8 +154,9 @@ class TestIndexSelectAPI(unittest.TestCase): index = fluid.dygraph.to_variable(self.data_index) z = paddle.index_select(x, index, axis=1) np_z = z.numpy() - expect_out = np.array([[1.0, 2.0, 2.0], [5.0, 6.0, 6.0], - [9.0, 10.0, 10.0]]) + expect_out = np.array( + [[1.0, 2.0, 2.0], [5.0, 6.0, 6.0], [9.0, 10.0, 10.0]] + ) np.testing.assert_allclose(expect_out, np_z, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_instance_norm_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_instance_norm_op_xpu.py index 55d942e0d72cea68a2e528ac97c9e29d021b4cbd..8fc1ca75240dbceaf38796dbc754574bd1226f06 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_instance_norm_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_instance_norm_op_xpu.py @@ -19,7 +19,11 @@ import unittest sys.path.append("..") from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() @@ -53,13 +57,11 @@ def _cal_mean_variance(x, epsilon, mean_shape): class XPUTestInstanceNormOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'instance_norm' self.use_dynamic_create_class = False class XPUTestInstanceNormOp(XPUOpTest): - def setUp(self): self.op_type = "instance_norm" self.dtype = self.in_type @@ -79,8 +81,13 @@ class XPUTestInstanceNormOp(XPUOpTestWrapper): bias_np = np.random.random_sample(scale_shape).astype(np.float32) mean, variance = self.set_global_mean_var(mean_shape, x_np) - ref_y_np, ref_saved_mean, variance_tmp = _reference_instance_norm_naive( - x_np, scale_np, bias_np, epsilon, mean, variance) + ( + ref_y_np, + ref_saved_mean, + variance_tmp, + ) = _reference_instance_norm_naive( + x_np, scale_np, bias_np, epsilon, mean, variance + ) ref_saved_variance = 1 / np.sqrt(variance_tmp + epsilon) @@ -88,7 +95,7 @@ class XPUTestInstanceNormOp(XPUOpTestWrapper): self.outputs = { 'Y': ref_y_np, 'SavedMean': ref_saved_mean, - 'SavedVariance': ref_saved_variance + 'SavedVariance': ref_saved_variance, } self.attrs = {'epsilon': epsilon, 'use_xpu': True} @@ -106,27 +113,22 @@ class XPUTestInstanceNormOp(XPUOpTestWrapper): self.check_grad_with_place(paddle.XPUPlace(0), ['X'], 'Y') class TestXPUInstanceNormOp1(XPUTestInstanceNormOp): - def set_attrs(self): self.shape = [10, 12, 32, 32] class TestXPUInstanceNormOp2(XPUTestInstanceNormOp): - def set_attrs(self): self.shape = [4, 5, 6, 7] class TestXPUInstanceNormOp3(XPUTestInstanceNormOp): - def set_attrs(self): self.shape = [1, 8, 16, 16] class TestXPUInstanceNormOp4(XPUTestInstanceNormOp): - def set_attrs(self): self.shape = [4, 16, 256, 128] class TestXPUInstanceNormOp5(XPUTestInstanceNormOp): - def set_attrs(self): self.shape = [10, 3, 512, 1] diff --git a/python/paddle/fluid/tests/unittests/xpu/test_iou_similarity_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_iou_similarity_op_xpu.py index 2aac4dc83fe29673cd8f6293a6b9a29ddd5b32f1..548976ae62e91fdf59ae74186be04c2a969d8ccd 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_iou_similarity_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_iou_similarity_op_xpu.py @@ -18,20 +18,22 @@ import sys sys.path.append("..") from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) import paddle paddle.enable_static() class XPUTestIOUSimilarityOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'iou_similarity' self.use_dynamic_create_class = False class TestXPUIOUSimilarityOp(XPUOpTest): - def init(self): self.dtype = self.in_type self.place = paddle.XPUPlace(0) @@ -51,11 +53,13 @@ class XPUTestIOUSimilarityOp(XPUOpTestWrapper): self.inputs = {'X': self.boxes1, 'Y': self.boxes2} self.attrs = { "box_normalized": self.box_normalized, - 'use_xpu': True + 'use_xpu': True, } self.outputs = {'Out': self.output} - def _compute_iou(self, ): + def _compute_iou( + self, + ): for row in range(self.boxes1.shape[0]): for col in range(self.boxes2.shape[0]): xmin1, ymin1, xmax1, ymax1 = self.boxes1[row] @@ -84,7 +88,6 @@ class XPUTestIOUSimilarityOp(XPUOpTestWrapper): self.output[row, col] = sim_score class TestXPUIOUSimilarityOpWithLoD(TestXPUIOUSimilarityOp): - def test_check_output(self): self.check_output_with_place(self.place, check_dygraph=False) @@ -97,13 +100,12 @@ class XPUTestIOUSimilarityOp(XPUOpTestWrapper): self._compute_iou() self.inputs = { 'X': (self.boxes1, self.boxes1_lod), - 'Y': self.boxes2 + 'Y': self.boxes2, } self.attrs = {"box_normalized": self.box_normalized} self.outputs = {'Out': (self.output, self.output_lod)} class TestXPUIOUSimilarityOpWithBoxNormalized(TestXPUIOUSimilarityOp): - def test_check_output(self): self.check_output_with_place(self.place, check_dygraph=False) @@ -116,7 +118,7 @@ class XPUTestIOUSimilarityOp(XPUOpTestWrapper): self._compute_iou() self.inputs = { 'X': (self.boxes1, self.boxes1_lod), - 'Y': self.boxes2 + 'Y': self.boxes2, } self.attrs = {"box_normalized": self.box_normalized} self.outputs = {'Out': (self.output, self.output_lod)} diff --git a/python/paddle/fluid/tests/unittests/xpu/test_label_smooth_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_label_smooth_op_xpu.py index 3384a0b093808deb0bf47325ffcec91bec491f3d..0a2c7b6b77331a47a67e6521a05997adc417e5f5 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_label_smooth_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_label_smooth_op_xpu.py @@ -19,13 +19,16 @@ import sys sys.path.append("..") from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestLabelSmoothOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'label_smooth' self.use_dynamic_create_class = True @@ -37,15 +40,15 @@ class XPUTestLabelSmoothOp(XPUOpTestWrapper): label_dims = [1, 7, 12] for bs in batch_sizes: for label_dim in label_dims: - class_name = 'XPUTestLabelSmooth_' + \ - str(bs) + "_" + str(label_dim) + class_name = ( + 'XPUTestLabelSmooth_' + str(bs) + "_" + str(label_dim) + ) attr_dict = {'batch_size': bs, 'label_dim': label_dim} classes.append([class_name, attr_dict]) classes.append(['XPUTestLabelSmooth_3d', {'is_3d': True}]) return base_class, classes class TestLabelSmoothOp(XPUOpTest): - def setUp(self): self.op_type = "label_smooth" self.epsilon = 0.1 @@ -53,21 +56,26 @@ class XPUTestLabelSmoothOp(XPUOpTestWrapper): if not hasattr(self, 'batch_size'): self.batch_size = 10 self.label_dim = 12 - self.label = np.zeros( - (self.batch_size, self.label_dim)).astype("float32") - nonzero_index = np.random.randint(self.label_dim, - size=(self.batch_size)) + self.label = np.zeros((self.batch_size, self.label_dim)).astype( + "float32" + ) + nonzero_index = np.random.randint( + self.label_dim, size=(self.batch_size) + ) self.label[np.arange(self.batch_size), nonzero_index] = 1 smoothed_label = ( - 1 - self.epsilon) * self.label + self.epsilon / self.label_dim + 1 - self.epsilon + ) * self.label + self.epsilon / self.label_dim self.inputs = {'X': self.label} self.attrs = {'epsilon': self.epsilon} self.outputs = {'Out': smoothed_label} if hasattr(self, 'is_3d') and self.is_3d: self.inputs['X'] = self.inputs['X'].reshape( - [2, -1, self.inputs['X'].shape[-1]]) + [2, -1, self.inputs['X'].shape[-1]] + ) self.outputs['Out'] = self.outputs['Out'].reshape( - self.inputs['X'].shape) + self.inputs['X'].shape + ) def test_check_output(self): if not paddle.is_compiled_with_xpu(): diff --git a/python/paddle/fluid/tests/unittests/xpu/test_lamb_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_lamb_op_xpu.py index b86dae39426a7cdd294aa1a08185e153f8ab85a0..0c5354c957597f1842633f29db3d949183f9d9af 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_lamb_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_lamb_op_xpu.py @@ -20,7 +20,11 @@ import numpy as np from op_test_xpu import XPUOpTest import paddle -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) def lamb_step(inputs, attributes): @@ -51,14 +55,16 @@ def lamb_step(inputs, attributes): moment2_unbiased = moment2_out / (1 - beta2_pow) r_1 = np.linalg.norm(param) - r_2 = np.linalg.norm(moment1_unbiased / - (np.sqrt(moment2_unbiased) + epsilon) + - weight_decay * param) + r_2 = np.linalg.norm( + moment1_unbiased / (np.sqrt(moment2_unbiased) + epsilon) + + weight_decay * param + ) lr_t = lr * r_1 / r_2 - param_out = param - lr_t * (moment1_unbiased / - (np.sqrt(moment2_unbiased) + epsilon) + - weight_decay * param) + param_out = param - lr_t * ( + moment1_unbiased / (np.sqrt(moment2_unbiased) + epsilon) + + weight_decay * param + ) beta1_pow_out = beta1_pow * beta1 beta2_pow_out = beta2_pow * beta2 @@ -67,24 +73,21 @@ def lamb_step(inputs, attributes): class XPUTestLambOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'lamb' self.use_dynamic_create_class = False class TestLambOp1(XPUOpTest): - def set_attrs(self): self.attrs = { 'epsilon': 1e-4, 'beta1': 0.78, 'beta2': 0.836, - 'weight_decay': 0.01 + 'weight_decay': 0.01, } def setUp(self): - '''Test Lamb Op with supplied attributes - ''' + '''Test Lamb Op with supplied attributes''' # self.op_type = self.op_name self.__class__.op_type = 'lamb' self.dtype = self.in_type @@ -105,56 +108,63 @@ class XPUTestLambOp(XPUOpTestWrapper): 'Moment2': moment2, 'LearningRate': np.array([learning_rate]).astype("float32"), 'Beta1Pow': np.array([beta1_pow]).astype("float32"), - 'Beta2Pow': np.array([beta2_pow]).astype("float32") + 'Beta2Pow': np.array([beta2_pow]).astype("float32"), } - - param_out, moment1_out, moment2_out, \ - beta1_pow_out, beta2_pow_out = lamb_step(self.inputs, self.attrs) + ( + param_out, + moment1_out, + moment2_out, + beta1_pow_out, + beta2_pow_out, + ) = lamb_step(self.inputs, self.attrs) self.outputs = { 'Moment1Out': moment1_out, 'Moment2Out': moment2_out, 'ParamOut': param_out, 'Beta1PowOut': beta1_pow_out, - 'Beta2PowOut': beta2_pow_out + 'Beta2PowOut': beta2_pow_out, } def test_check_output(self): self.check_output_with_place(paddle.XPUPlace(0)) class TestLambOp2(TestLambOp1): - def set_attrs(self): self.attrs = { 'epsilon': 1e-8, 'beta1': 0.9, 'beta2': 0.999, - 'weight_decay': 0.01 + 'weight_decay': 0.01, } class TestLambOpMultipleSteps(TestLambOp1): - def set_attrs(self): self.attrs = { 'epsilon': 1e-8, 'beta1': 0.9, 'beta2': 0.999, - 'weight_decay': 0.01 + 'weight_decay': 0.01, } self.num_steps = 10 def test_check_output(self): for i in range(self.num_steps): - param_out, moment1_out, moment2_out, \ - beta1_pow_out, beta2_pow_out = lamb_step(self.inputs, self.attrs) + ( + param_out, + moment1_out, + moment2_out, + beta1_pow_out, + beta2_pow_out, + ) = lamb_step(self.inputs, self.attrs) self.outputs = { 'Moment1Out': moment1_out, 'Moment2Out': moment2_out, 'ParamOut': param_out, 'Beta1PowOut': beta1_pow_out, - 'Beta2PowOut': beta2_pow_out + 'Beta2PowOut': beta2_pow_out, } # Verify output for this step @@ -171,7 +181,8 @@ class XPUTestLambOp(XPUOpTestWrapper): # Randomize gradient for next step self.inputs['Grad'] = np.random.uniform( - -1, 1, (102, 105)).astype("float32") + -1, 1, (102, 105) + ).astype("float32") support_types = get_xpu_op_support_types('lamb') diff --git a/python/paddle/fluid/tests/unittests/xpu/test_layer_norm_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_layer_norm_op_xpu.py index db0ac5189322fd57ec421247ebe4ac0fdf407732..3c377db4c0b558535c77186c88d74cedce621995 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_layer_norm_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_layer_norm_op_xpu.py @@ -21,7 +21,11 @@ from functools import reduce sys.path.append("..") from op_test_xpu import XPUOpTest from operator import mul -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() @@ -29,12 +33,13 @@ paddle.enable_static() def ref_layer_norm(x, scale, bias, epsilon, begin_norm_axis=1): x_shape = x.shape left = reduce(mul, x_shape[0:begin_norm_axis], 1) - right = reduce(mul, x_shape[begin_norm_axis:len(x_shape)], 1) + right = reduce(mul, x_shape[begin_norm_axis : len(x_shape)], 1) x.shape = [left, right] mean = np.mean(x, axis=1) variance = np.var(x, axis=1) + epsilon - y = np.divide((x - mean.reshape([left, 1])), - (np.sqrt(variance)).reshape([left, 1])) + y = np.divide( + (x - mean.reshape([left, 1])), (np.sqrt(variance)).reshape([left, 1]) + ) if scale is not None: y = scale.reshape([1, right]) * y if bias is not None: @@ -44,13 +49,11 @@ def ref_layer_norm(x, scale, bias, epsilon, begin_norm_axis=1): class XPUTestLayerNormOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'layer_norm' self.use_dynamic_create_class = False class TestXPULayerNormOp(XPUOpTest): - def setUp(self): self.op_type = "layer_norm" self.dtype = self.in_type @@ -63,25 +66,27 @@ class XPUTestLayerNormOp(XPUOpTestWrapper): if self.dtype == np.float16: self.atol = 1e-2 - right = reduce(mul, - self.shape[self.begin_norm_axis:len(self.shape)], 1) + right = reduce( + mul, self.shape[self.begin_norm_axis : len(self.shape)], 1 + ) np.random.seed(10) x_np = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) scale_np = np.random.uniform(0.1, 1, [right]).astype('float32') bias_np = np.random.uniform(0.1, 1, [right]).astype('float32') ref_y_np, ref_mean_np, ref_variance_np = ref_layer_norm( - x_np, scale_np, bias_np, self.epsilon, self.begin_norm_axis) + x_np, scale_np, bias_np, self.epsilon, self.begin_norm_axis + ) ref_y_np = ref_y_np.astype(self.dtype) self.inputs = {'X': x_np, 'Scale': scale_np, 'Bias': bias_np} self.outputs = { 'Y': ref_y_np, 'Mean': ref_mean_np, - 'Variance': ref_variance_np + 'Variance': ref_variance_np, } self.attrs = { 'begin_norm_axis': self.begin_norm_axis, - 'use_xpu': True + 'use_xpu': True, } def set_attrs(self): @@ -91,27 +96,23 @@ class XPUTestLayerNormOp(XPUOpTestWrapper): self.check_output_with_place(paddle.XPUPlace(0), atol=self.atol) def test_check_grad(self): - self.check_grad_with_place(paddle.XPUPlace(0), ['X'], - 'Y', - max_relative_error=self.atol) + self.check_grad_with_place( + paddle.XPUPlace(0), ['X'], 'Y', max_relative_error=self.atol + ) class TestXPULayerNormOpAxis2(TestXPULayerNormOp): - def set_attrs(self): self.begin_norm_axis = 2 class TestXPULayerNormOpAxis3(TestXPULayerNormOp): - def set_attrs(self): self.begin_norm_axis = 3 class TestXPULayerNormOp2D(TestXPULayerNormOp): - def set_attrs(self): self.shape = [10, 12] class TestXPULayerNormOp3D(TestXPULayerNormOp): - def set_attrs(self): self.shape = [4, 5, 6] diff --git a/python/paddle/fluid/tests/unittests/xpu/test_log_loss_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_log_loss_op_xpu.py index 94297432e30692dc48e58b8df17f17f76e9d8fd8..4d50fc2da7d521602fd37a206da5d613cd9d837b 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_log_loss_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_log_loss_op_xpu.py @@ -26,7 +26,6 @@ def sigmoid_array(x): class TestXPULogLossOp(OpTest): - def setUp(self): self.op_type = 'log_loss' samples_num = 100 @@ -41,8 +40,9 @@ class TestXPULogLossOp(OpTest): } self.attrs = {'epsilon': epsilon} - loss = -labels * np.log(predicted + epsilon) - ( - 1 - labels) * np.log(1 - predicted + epsilon) + loss = -labels * np.log(predicted + epsilon) - (1 - labels) * np.log( + 1 - predicted + epsilon + ) self.outputs = {'Loss': loss} def test_check_output(self): diff --git a/python/paddle/fluid/tests/unittests/xpu/test_log_softmax_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_log_softmax_op_xpu.py index c13161b17cb439351b3b3b9c6da01730300eff4a..d961e46bd62bfd877066937328a396700d5b11f9 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_log_softmax_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_log_softmax_op_xpu.py @@ -22,14 +22,18 @@ import paddle import paddle.nn.functional as F from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() np.random.seed(10) def ref_log_softmax(x): - shiftx = (x - np.max(x)) + shiftx = x - np.max(x) out = shiftx - np.log(np.exp(shiftx).sum()) return out @@ -39,14 +43,14 @@ def ref_log_softmax_grad(x, axis): axis += len(x.shape) out = np.apply_along_axis(ref_log_softmax, axis, x) axis_dim = x.shape[axis] - dout = np.full_like(x, fill_value=1. / x.size) + dout = np.full_like(x, fill_value=1.0 / x.size) dx = dout - np.exp(out) * dout.copy().sum(axis=axis, keepdims=True).repeat( - axis_dim, axis=axis) + axis_dim, axis=axis + ) return dx class XPUTestLogSoftmaxOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'log_softmax' self.use_dynamic_create_class = True @@ -58,14 +62,12 @@ class XPUTestLogSoftmaxOp(XPUOpTestWrapper): shape_arr = [[2, 3, 4, 5], [12, 10], [2, 5], [7, 7], [3, 5, 7]] for axis in axis_arr: for shape in shape_arr: - class_name = 'XPUTestLogSoftmax_' + \ - str(axis) + "_" + str(shape) + class_name = 'XPUTestLogSoftmax_' + str(axis) + "_" + str(shape) attr_dict = {'axis': axis, 'shape': shape} classes.append([class_name, attr_dict]) return base_class, classes class TestXPULogSoftmaxOp(XPUOpTest): - def setUp(self): self.op_type = 'log_softmax' self.python_api = F.log_softmax @@ -76,7 +78,7 @@ class XPUTestLogSoftmaxOp(XPUOpTestWrapper): self.shape = [2, 3, 4, 5] self.axis = -1 - x = np.random.uniform(0.1, 1., self.shape).astype(self.dtype) + x = np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype) out = np.apply_along_axis(ref_log_softmax, self.axis, x) self.x_grad = ref_log_softmax_grad(x, self.axis) @@ -91,9 +93,12 @@ class XPUTestLogSoftmaxOp(XPUOpTestWrapper): self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X'], ['Out'], - user_defined_grads=[self.x_grad], - check_eager=True) + self.check_grad( + ['X'], + ['Out'], + user_defined_grads=[self.x_grad], + check_eager=True, + ) support_types = get_xpu_op_support_types('log_softmax') diff --git a/python/paddle/fluid/tests/unittests/xpu/test_logical_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_logical_op_xpu.py index ad0868d29bdf34e51ac43ab00e3342876c5b1e1f..9b0d68ca08449f28a6111fb5e04d06e3dd5c9b86 100755 --- a/python/paddle/fluid/tests/unittests/xpu/test_logical_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_logical_op_xpu.py @@ -21,19 +21,21 @@ sys.path.append("..") import paddle from op_test import OpTest from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() ################## TEST OP: logical_and ################## class XPUTestLogicalAnd(XPUOpTestWrapper): - def __init__(self): self.op_name = 'logical_and' class XPUTestLogicalAndBase(XPUOpTest): - def setUp(self): self.place = paddle.XPUPlace(0) self.init_case() @@ -42,20 +44,18 @@ class XPUTestLogicalAnd(XPUOpTestWrapper): def set_case(self): self.op_type = 'logical_and' - x = np.random.randint(self.low, - self.high, - self.x_shape, - dtype=self.dtype) - y = np.random.randint(self.low, - self.high, - self.y_shape, - dtype=self.dtype) + x = np.random.randint( + self.low, self.high, self.x_shape, dtype=self.dtype + ) + y = np.random.randint( + self.low, self.high, self.y_shape, dtype=self.dtype + ) out = np.logical_and(x, y) self.attrs = {'use_xpu': True} self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(x), - 'Y': OpTest.np_dtype_to_fluid_dtype(y) + 'Y': OpTest.np_dtype_to_fluid_dtype(y), } self.outputs = {'Out': out} @@ -73,7 +73,6 @@ class XPUTestLogicalAnd(XPUOpTestWrapper): pass class XPUTestLogicalAndCase1(XPUTestLogicalAndBase): - def init_case(self): self.dtype = np.int32 self.x_shape = [4, 5] @@ -89,12 +88,10 @@ for stype in support_types: ################## TEST OP: logical_or ################## class XPUTestLogicalOr(XPUOpTestWrapper): - def __init__(self): self.op_name = 'logical_or' class XPUTestLogicalOrBase(XPUOpTest): - def setUp(self): self.place = paddle.XPUPlace(0) self.init_case() @@ -103,20 +100,18 @@ class XPUTestLogicalOr(XPUOpTestWrapper): def set_case(self): self.op_type = 'logical_or' - x = np.random.randint(self.low, - self.high, - self.x_shape, - dtype=self.dtype) - y = np.random.randint(self.low, - self.high, - self.y_shape, - dtype=self.dtype) + x = np.random.randint( + self.low, self.high, self.x_shape, dtype=self.dtype + ) + y = np.random.randint( + self.low, self.high, self.y_shape, dtype=self.dtype + ) out = np.logical_or(x, y) self.attrs = {'use_xpu': True} self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(x), - 'Y': OpTest.np_dtype_to_fluid_dtype(y) + 'Y': OpTest.np_dtype_to_fluid_dtype(y), } self.outputs = {'Out': out} @@ -134,7 +129,6 @@ class XPUTestLogicalOr(XPUOpTestWrapper): pass class XPUTestLogicalOrCase1(XPUTestLogicalOrBase): - def init_case(self): self.dtype = np.int32 self.x_shape = [4, 5] @@ -150,12 +144,10 @@ for stype in support_types: ################## TEST OP: logical_xor ################## class XPUTestLogicalXor(XPUOpTestWrapper): - def __init__(self): self.op_name = 'logical_xor' class XPUTestLogicalXorBase(XPUOpTest): - def setUp(self): self.place = paddle.XPUPlace(0) self.init_case() @@ -164,20 +156,18 @@ class XPUTestLogicalXor(XPUOpTestWrapper): def set_case(self): self.op_type = 'logical_xor' - x = np.random.randint(self.low, - self.high, - self.x_shape, - dtype=self.dtype) - y = np.random.randint(self.low, - self.high, - self.y_shape, - dtype=self.dtype) + x = np.random.randint( + self.low, self.high, self.x_shape, dtype=self.dtype + ) + y = np.random.randint( + self.low, self.high, self.y_shape, dtype=self.dtype + ) out = np.logical_xor(x, y) self.attrs = {'use_xpu': True} self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(x), - 'Y': OpTest.np_dtype_to_fluid_dtype(y) + 'Y': OpTest.np_dtype_to_fluid_dtype(y), } self.outputs = {'Out': out} @@ -195,7 +185,6 @@ class XPUTestLogicalXor(XPUOpTestWrapper): pass class XPUTestLogicalXorCase1(XPUTestLogicalXorBase): - def init_case(self): self.dtype = np.int32 self.x_shape = [4, 5] @@ -211,12 +200,10 @@ for stype in support_types: ################## TEST OP: LogicalNot ################## class XPUTestLogicalNot(XPUOpTestWrapper): - def __init__(self): self.op_name = 'logical_not' class XPUTestLogicalNotBase(XPUOpTest): - def setUp(self): self.place = paddle.XPUPlace(0) self.init_case() @@ -225,10 +212,9 @@ class XPUTestLogicalNot(XPUOpTestWrapper): def set_case(self): self.op_type = 'logical_not' - x = np.random.randint(self.low, - self.high, - self.x_shape, - dtype=self.dtype) + x = np.random.randint( + self.low, self.high, self.x_shape, dtype=self.dtype + ) out = np.logical_not(x) self.attrs = {'use_xpu': True} diff --git a/python/paddle/fluid/tests/unittests/xpu/test_logsumexp_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_logsumexp_op_xpu.py index db55aff3817d7a6cdb616e294216abb8fa27046c..932507066b9a858035acebd8222fb308d54f8884 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_logsumexp_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_logsumexp_op_xpu.py @@ -25,7 +25,7 @@ paddle.enable_static() def ref_logsumexp(x, axis=None, keepdim=False, reduce_all=False): if isinstance(axis, int): - axis = (axis, ) + axis = (axis,) elif isinstance(axis, list): axis = tuple(axis) if reduce_all: @@ -35,7 +35,6 @@ def ref_logsumexp(x, axis=None, keepdim=False, reduce_all=False): class XPUTestLogsumexp(XPUOpTest): - def setUp(self): self.op_type = 'logsumexp' self.shape = [2, 3, 4, 5] @@ -54,7 +53,7 @@ class XPUTestLogsumexp(XPUOpTest): self.attrs = { 'axis': self.axis, 'keepdim': self.keepdim, - 'reduce_all': self.reduce_all + 'reduce_all': self.reduce_all, } def set_attrs(self): @@ -70,31 +69,26 @@ class XPUTestLogsumexp(XPUOpTest): class TestLogsumexp_shape(XPUTestLogsumexp): - def set_attrs(self): self.shape = [4, 5, 6] class TestLogsumexp_axis(XPUTestLogsumexp): - def set_attrs(self): self.axis = [0, -1] class TestLogsumexp_axis_all(XPUTestLogsumexp): - def set_attrs(self): self.axis = [0, 1, 2, 3] class TestLogsumexp_keepdim(XPUTestLogsumexp): - def set_attrs(self): self.keepdim = True class TestLogsumexp_reduce_all(XPUTestLogsumexp): - def set_attrs(self): self.reduce_all = True diff --git a/python/paddle/fluid/tests/unittests/xpu/test_lookup_table_v2_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_lookup_table_v2_op_xpu.py index 35df3c244bd0d35ee3e47bc07bcbb000e65e012a..039effe0275a7100bab40f4d78d9bb3439ef49ec 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_lookup_table_v2_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_lookup_table_v2_op_xpu.py @@ -29,7 +29,6 @@ paddle.enable_static() class TestLookupTableOp(OpTest): - def setUp(self): self.op_type = "lookup_table_v2" table = np.random.random((17, 31)).astype("float64") @@ -42,15 +41,16 @@ class TestLookupTableOp(OpTest): def test_check_grad(self): - self.check_grad_with_place(inputs_to_check=['W'], - output_names='Out', - no_grad_set=set('Ids'), - place=paddle.XPUPlace(0), - in_place=True) + self.check_grad_with_place( + inputs_to_check=['W'], + output_names='Out', + no_grad_set=set('Ids'), + place=paddle.XPUPlace(0), + in_place=True, + ) class TestLookupTableOpWithTensorIds(OpTest): - def setUp(self): self.op_type = "lookup_table_v2" table = np.random.random((17, 31)).astype("float64") @@ -62,19 +62,21 @@ class TestLookupTableOpWithTensorIds(OpTest): self.check_output_with_place(place=paddle.XPUPlace(0)) def test_check_grad(self): - self.check_grad_with_place(inputs_to_check=['W'], - output_names='Out', - no_grad_set=set('Ids'), - place=paddle.XPUPlace(0), - in_place=True) + self.check_grad_with_place( + inputs_to_check=['W'], + output_names='Out', + no_grad_set=set('Ids'), + place=paddle.XPUPlace(0), + in_place=True, + ) @skip_check_grad_ci( reason="Since paddings are not trainable and fixed in forward," "the gradient of paddings makes no sense and we don't " - "test the gradient here.") + "test the gradient here." +) class TestLookupTableOpWithPadding(TestLookupTableOp): - def test_check_output(self): ids = np.squeeze(self.inputs['Ids']) padding_idx = np.random.choice(ids, 1)[0] @@ -86,9 +88,9 @@ class TestLookupTableOpWithPadding(TestLookupTableOp): @skip_check_grad_ci( reason="Since paddings are not trainable and fixed in forward," "the gradient of paddings makes no sense and we don't " - "test the gradient here.") + "test the gradient here." +) class TestLookupTableOpWithTensorIdsAndPadding(TestLookupTableOpWithTensorIds): - def test_check_output(self): ids = self.inputs['Ids'] flatten_idx = ids.flatten() @@ -99,7 +101,6 @@ class TestLookupTableOpWithTensorIdsAndPadding(TestLookupTableOpWithTensorIds): class TestLookupTableWIsSelectedRows(unittest.TestCase): - def prepare_ids(self, scope, place): ids_tensor = scope.var('Ids').get_tensor() ids_array = np.array([0, 4, 3, 5]).astype("int64") @@ -147,13 +148,14 @@ class TestLookupTableWIsSelectedRows(unittest.TestCase): self.check_with_place(place) -class TestLookupTableWithTensorIdsWIsSelectedRows(TestLookupTableWIsSelectedRows - ): - +class TestLookupTableWithTensorIdsWIsSelectedRows( + TestLookupTableWIsSelectedRows +): def prepare_ids(self, scope, place): ids_tensor = scope.var('Ids').get_tensor() - ids_array = np.random.randint(low=0, high=6, - size=(2, 4, 3)).astype("int64") + ids_array = np.random.randint(low=0, high=6, size=(2, 4, 3)).astype( + "int64" + ) ids_tensor.set(ids_array, place) return ids_array @@ -163,7 +165,6 @@ class TestLookupTableWithTensorIdsWIsSelectedRows(TestLookupTableWIsSelectedRows class TestLookupTableApi(unittest.TestCase): - def test_api(self): x = fluid.layers.data(name='x', shape=[20], dtype='int64') emb = fluid.embedding(input=x, size=[128, 64]) @@ -173,15 +174,16 @@ class TestLookupTableApi(unittest.TestCase): exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - ret = exe.run(feed={ - 'x': x_data, - }, - fetch_list=[emb], - return_numpy=False) + ret = exe.run( + feed={ + 'x': x_data, + }, + fetch_list=[emb], + return_numpy=False, + ) class TestEmbedOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): input_data = np.random.randint(0, 10, (4, 6)).astype("int64") diff --git a/python/paddle/fluid/tests/unittests/xpu/test_masked_select_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_masked_select_op_xpu.py index d4ccbc18c341109e6ffbd6060c41c606da50d354..6a2976ccbb528a4b60abfc4c6690ad6b061ae086 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_masked_select_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_masked_select_op_xpu.py @@ -20,7 +20,11 @@ sys.path.append("..") import paddle from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() @@ -34,12 +38,10 @@ def np_masked_select(x, mask): class XPUTestMaskedSelectOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'masked_select' class TestMaskedSelectOp(XPUOpTest): - def setUp(self): self.init() self.dtype = self.in_type @@ -60,14 +62,12 @@ class XPUTestMaskedSelectOp(XPUOpTestWrapper): self.shape = (50, 3) class TestMaskedSelectOp1(TestMaskedSelectOp): - def init(self): self.shape = (6, 8, 9, 18) class TestMaskedSelectOp2(TestMaskedSelectOp): - def init(self): - self.shape = (168, ) + self.shape = (168,) support_types = get_xpu_op_support_types('masked_select') @@ -76,7 +76,6 @@ for stype in support_types: class TestMaskedSelectAPI(unittest.TestCase): - def test_imperative_mode(self): paddle.disable_static(paddle.XPUPlace(0)) shape = (88, 6, 8) @@ -101,27 +100,26 @@ class TestMaskedSelectAPI(unittest.TestCase): exe = paddle.static.Executor(place=paddle.XPUPlace(0)) - res = exe.run(paddle.static.default_main_program(), - feed={ - "x": np_x, - "mask": np_mask - }, - fetch_list=[out]) + res = exe.run( + paddle.static.default_main_program(), + feed={"x": np_x, "mask": np_mask}, + fetch_list=[out], + ) self.assertEqual(np.allclose(res, np_out), True) class TestMaskedSelectError(unittest.TestCase): - def test_error(self): - with paddle.static.program_guard(paddle.static.Program(), - paddle.static.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): shape = [8, 9, 6] x = paddle.fluid.data(shape=shape, dtype='float32', name='x') mask = paddle.fluid.data(shape=shape, dtype='bool', name='mask') - mask_float = paddle.fluid.data(shape=shape, - dtype='float32', - name='mask_float') + mask_float = paddle.fluid.data( + shape=shape, dtype='float32', name='mask_float' + ) np_x = np.random.random(shape).astype('float32') np_mask = np.array(np.random.randint(2, size=shape, dtype=bool)) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py index 0afcd1c57343fd683a447f8dbadfb7bb4b34d0cd..c4aab23a95201c4b7266fcc170505ad15e2b277f 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py @@ -22,7 +22,11 @@ import paddle import paddle.fluid as fluid from paddle.fluid import Program, program_guard -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) def reference_matmul(X, Y, transpose_X=False, transpose_Y=False): @@ -64,8 +68,9 @@ def reference_matmul(X, Y, transpose_X=False, transpose_Y=False): return Out -def generate_compatible_shapes(dim_X, dim_Y, transpose_X, transpose_Y, - batch_size): +def generate_compatible_shapes( + dim_X, dim_Y, transpose_X, transpose_Y, batch_size +): BATCH_SIZE = 2 if batch_size != None: BATCH_SIZE = batch_size @@ -131,6 +136,7 @@ def generate_compatible_shapes_2(dim, transpose_X, transpose_Y): def generate_negative_dims(in_shape): from itertools import combinations + size = len(in_shape) indexs = list() shapes = list() @@ -138,7 +144,8 @@ def generate_negative_dims(in_shape): indexs.extend(list(combinations([j for j in range(size)], i + 1))) for idx in indexs: shapes.append( - [in_shape[i] if i not in idx else -1 for i in range(size)]) + [in_shape[i] if i not in idx else -1 for i in range(size)] + ) return shapes @@ -151,49 +158,48 @@ def test_negative_dims_program(obj): with program_guard(Program(), Program()): x = fluid.data(name='x', shape=shape_x, dtype=obj.in_type_str) y = fluid.data(name='y', shape=shape_y, dtype=obj.in_type_str) - output = fluid.layers.matmul(x, y, obj.transpose_X, - obj.transpose_Y) + output = fluid.layers.matmul( + x, y, obj.transpose_X, obj.transpose_Y + ) obj.assertEqual(len(Ref.shape), len(output.shape)) for idx in range(len(Ref.shape)): if output.shape[idx] != -1: obj.assertEqual(Ref.shape[idx], output.shape[idx]) exe = fluid.Executor(fluid.XPUPlace(0)) - res, = exe.run(fluid.default_main_program(), - feed={ - 'x': X, - 'y': Y - }, - fetch_list=[output]) + (res,) = exe.run( + fluid.default_main_program(), + feed={'x': X, 'y': Y}, + fetch_list=[output], + ) np.allclose(res, Ref, atol=1e-3) class XPUTestMatmulOpErr(XPUOpTestWrapper): - def __init__(self): self.op_name = "matmul" self.use_dynamic_create_class = False class TestMatmulOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # The inputs type of matmul_op must be Variable. input1 = 12 - self.assertRaises(TypeError, fluid.layers.matmul, input1, - input1) + self.assertRaises( + TypeError, fluid.layers.matmul, input1, input1 + ) # The inputs dtype of matmul_op must be float32, float16 - input2 = fluid.layers.data(name='input2', - shape=[10, 10], - dtype="int32") - self.assertRaises(TypeError, fluid.layers.matmul, input2, - input2) - input3 = fluid.layers.data(name='input3', - shape=[2, 2], - dtype="float16") + input2 = fluid.layers.data( + name='input2', shape=[10, 10], dtype="int32" + ) + self.assertRaises( + TypeError, fluid.layers.matmul, input2, input2 + ) + input3 = fluid.layers.data( + name='input3', shape=[2, 2], dtype="float16" + ) fluid.layers.matmul(input3, input3) class API_TestMm(unittest.TestCase): - def test_out(self): with fluid.program_guard(fluid.Program()): x = fluid.data(name="x", shape=[2], dtype=self.in_type) @@ -203,13 +209,12 @@ class XPUTestMatmulOpErr(XPUOpTestWrapper): exe = fluid.Executor(fluid.XPUPlace(0)) data1 = np.random.rand(2).astype(self.in_type) data2 = np.random.rand(2).astype(self.in_type) - np_res = exe.run(feed={ - 'x': data1, - 'y': data2 - }, - fetch_list=[result]) - expected_result = np.matmul(data1.reshape(1, 2), - data2.reshape(2, 1)) + np_res = exe.run( + feed={'x': data1, 'y': data2}, fetch_list=[result] + ) + expected_result = np.matmul( + data1.reshape(1, 2), data2.reshape(2, 1) + ) np.testing.assert_allclose(np_res, expected_result, atol=1e-3) @@ -222,89 +227,94 @@ class XPUTestMatmulOpErr(XPUOpTestWrapper): data2 = fluid.dygraph.to_variable(input_array2) out = paddle.mm(data1, data2) expected_result = np.matmul(input_array1, input_array2) - np.testing.assert_allclose(expected_result, - out.numpy(), - atol=1e-3) + np.testing.assert_allclose( + expected_result, out.numpy(), atol=1e-3 + ) class Test_API_Matmul(unittest.TestCase): - def test_dygraph_without_out(self): device = fluid.XPUPlace(0) with fluid.dygraph.guard(device): input_array1 = np.random.rand(3, 4).astype(self.in_type) input_array2 = np.random.rand(4, 3).astype(self.in_type) data1 = fluid.dygraph.to_variable(input_array1).astype( - self.in_type) + self.in_type + ) data2 = fluid.dygraph.to_variable(input_array2).astype( - self.in_type) + self.in_type + ) out = paddle.matmul(data1, data2) expected_result = np.matmul(input_array1, input_array2) - np.testing.assert_allclose(expected_result, - out.numpy(), - atol=1e-3) + np.testing.assert_allclose( + expected_result, out.numpy(), atol=1e-3 + ) class API_TestMmError(unittest.TestCase): - def test_errors(self): - def test_error1(): with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = fluid.data(name="data1", - shape=[10, 2], - dtype="float32") - data2 = fluid.data(name="data2", - shape=[3, 10], - dtype="float32") + data1 = fluid.data( + name="data1", shape=[10, 2], dtype="float32" + ) + data2 = fluid.data( + name="data2", shape=[3, 10], dtype="float32" + ) paddle.mm(data1, data2) self.assertRaises(ValueError, test_error1) def test_error2(): with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = fluid.data(name="data1", - shape=[-1, 10, 2], - dtype="float32") - data2 = fluid.data(name="data2", - shape=[-1, 2, 10], - dtype="float32") + data1 = fluid.data( + name="data1", shape=[-1, 10, 2], dtype="float32" + ) + data2 = fluid.data( + name="data2", shape=[-1, 2, 10], dtype="float32" + ) paddle.mm(data1, data2) test_error2() def test_error3(): with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = fluid.data(name="data1", - shape=[10, 10, 2], - dtype="float32") - data2 = fluid.data(name="data2", - shape=[3, 2, 10], - dtype="float32") + data1 = fluid.data( + name="data1", shape=[10, 10, 2], dtype="float32" + ) + data2 = fluid.data( + name="data2", shape=[3, 2, 10], dtype="float32" + ) paddle.mm(data1, data2) self.assertRaises(ValueError, test_error3) class TestMatmulBaseGenerator(XPUOpTest): - def setUp(self): self.op_type = "matmul" - self.dtype = np.float32 if not hasattr(self, - 'in_type') else self.in_type + self.dtype = ( + np.float32 if not hasattr(self, 'in_type') else self.in_type + ) - self.__class__.no_need_check_grad = False if not hasattr( - self, 'no_need_check_grad') else self.no_need_check_grad + self.__class__.no_need_check_grad = ( + False + if not hasattr(self, 'no_need_check_grad') + else self.no_need_check_grad + ) shape_X = [4, 5] if not hasattr(self, 'shape_X') else self.shape_X shape_Y = [5, 6] if not hasattr(self, 'shape_Y') else self.shape_Y - transpose_X = False if not hasattr(self, - 'transpose_X') else self.transpose_X - transpose_Y = False if not hasattr(self, - 'transpose_Y') else self.transpose_Y + transpose_X = ( + False if not hasattr(self, 'transpose_X') else self.transpose_X + ) + transpose_Y = ( + False if not hasattr(self, 'transpose_Y') else self.transpose_Y + ) X = np.random.random(shape_X).astype(self.dtype) Y = np.random.random(shape_Y).astype(self.dtype) - Out = reference_matmul(X, Y, transpose_X, - transpose_Y).astype(self.dtype) + Out = reference_matmul(X, Y, transpose_X, transpose_Y).astype( + self.dtype + ) self.inputs = {'X': X, 'Y': Y} self.attrs = {'transpose_X': transpose_X, 'transpose_Y': transpose_Y} self.outputs = {'Out': Out} @@ -314,40 +324,43 @@ class TestMatmulBaseGenerator(XPUOpTest): self.check_output_with_place(place, atol=1e-3) def test_check_grad_normal(self): - if hasattr(self.__class__, "no_need_check_grad" - ) and self.__class__.no_need_check_grad == True: + if ( + hasattr(self.__class__, "no_need_check_grad") + and self.__class__.no_need_check_grad == True + ): return place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ['X', 'Y'], - 'Out', - max_relative_error=5e-2) + self.check_grad_with_place( + place, ['X', 'Y'], 'Out', max_relative_error=5e-2 + ) def test_check_grad_ignore_x(self): - if hasattr(self.__class__, "no_need_check_grad" - ) and self.__class__.no_need_check_grad == True: + if ( + hasattr(self.__class__, "no_need_check_grad") + and self.__class__.no_need_check_grad == True + ): return place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ['Y'], - 'Out', - max_relative_error=5e-2, - no_grad_set=set("X")) + self.check_grad_with_place( + place, ['Y'], 'Out', max_relative_error=5e-2, no_grad_set=set("X") + ) def test_check_grad_ignore_y(self): - if hasattr(self.__class__, "no_need_check_grad" - ) and self.__class__.no_need_check_grad == True: + if ( + hasattr(self.__class__, "no_need_check_grad") + and self.__class__.no_need_check_grad == True + ): return place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ['X'], - 'Out', - max_relative_error=5e-2, - no_grad_set=set('Y')) + self.check_grad_with_place( + place, ['X'], 'Out', max_relative_error=5e-2, no_grad_set=set('Y') + ) class XPUTestMatmulOp1(XPUOpTestWrapper): - def __init__(self): self.op_name = "matmul" self.use_dynamic_create_class = True @@ -366,19 +379,19 @@ class XPUTestMatmulOp1(XPUOpTestWrapper): no_need_check_grad = False if batch >= 5: no_need_check_grad = True - class_name = ( - 'TestMatMulOp_dimX_{}_dim_Y_{}_transX_{}_transY_{}_batch_{}' - .format(dim_X, dim_Y, transose_x, transose_y, - batch)) + class_name = 'TestMatMulOp_dimX_{}_dim_Y_{}_transX_{}_transY_{}_batch_{}'.format( + dim_X, dim_Y, transose_x, transose_y, batch + ) shape_x, shape_y = generate_compatible_shapes( - dim_X, dim_Y, transose_x, transose_y, batch) + dim_X, dim_Y, transose_x, transose_y, batch + ) attr_dict = { 'shape_X': shape_x, 'shape_Y': shape_y, 'transpose_X': transose_x, 'transpose_Y': transose_y, 'no_need_check_grad': no_need_check_grad, - 'op_type': "matmul" + 'op_type': "matmul", } classes.append([class_name, attr_dict]) @@ -386,7 +399,6 @@ class XPUTestMatmulOp1(XPUOpTestWrapper): class XPUTestMatmulOp2(XPUOpTestWrapper): - def __init__(self): self.op_name = "matmul" self.use_dynamic_create_class = True @@ -402,12 +414,12 @@ class XPUTestMatmulOp2(XPUOpTestWrapper): for transose_x in [True, False]: for transose_y in [True, False]: for batch in batch_size: - class_name = ( - 'TestMatMulAPI_dimX_{}_dim_Y_{}_transX_{}_transY_{}_batch_{}' - .format(dim_X, dim_Y, transose_x, transose_y, - batch)) + class_name = 'TestMatMulAPI_dimX_{}_dim_Y_{}_transX_{}_transY_{}_batch_{}'.format( + dim_X, dim_Y, transose_x, transose_y, batch + ) shape_x, shape_y = generate_compatible_shapes( - dim_X, dim_Y, transose_x, transose_y, batch) + dim_X, dim_Y, transose_x, transose_y, batch + ) attr_dict = { 'shape_X': shape_x, 'shape_Y': shape_y, @@ -420,7 +432,6 @@ class XPUTestMatmulOp2(XPUOpTestWrapper): class XPUTestMatmulOp3(XPUOpTestWrapper): - def __init__(self): self.op_name = "matmul" self.use_dynamic_create_class = True @@ -431,17 +442,18 @@ class XPUTestMatmulOp3(XPUOpTestWrapper): for dim in [4]: for transpose_X in [False, True]: for transpose_Y in [False, True]: - class_name = ( - 'TestMatMulOp2_dimX_{}_dim_Y_{}_transX_{}_transY_{}'. - format(dim, dim, transpose_X, transpose_Y)) + class_name = 'TestMatMulOp2_dimX_{}_dim_Y_{}_transX_{}_transY_{}'.format( + dim, dim, transpose_X, transpose_Y + ) shape_X, shape_Y = generate_compatible_shapes_2( - dim, transpose_X, transpose_Y) + dim, transpose_X, transpose_Y + ) attr_dict = { 'shape_X': shape_X, 'shape_Y': shape_Y, 'transpose_X': transpose_X, 'transpose_Y': transpose_Y, - 'op_type': "matmul" + 'op_type': "matmul", } classes.append([class_name, attr_dict]) return base_class, classes diff --git a/python/paddle/fluid/tests/unittests/xpu/test_matmul_v2_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_matmul_v2_op_xpu.py index 8473f9c506dfc2e2eb5a7d5d5d42df8ecd48d047..63354ac7607ee8f118ae877943c5d1af66d03e58 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_matmul_v2_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_matmul_v2_op_xpu.py @@ -21,7 +21,11 @@ from op_test_xpu import XPUOpTest import paddle -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) def reference_matmul(X, Y, transpose_X=False, transpose_Y=False): @@ -30,7 +34,7 @@ def reference_matmul(X, Y, transpose_X=False, transpose_Y=False): # transpose X and Y appropriately. if transpose_X: if X.ndim == 1: - X = X.reshape((X.size, )) + X = X.reshape((X.size,)) elif X.ndim == 2: X = X.T else: @@ -39,7 +43,7 @@ def reference_matmul(X, Y, transpose_X=False, transpose_Y=False): X = np.transpose(X, tuple(dim)) if transpose_Y: if Y.ndim == 1: - Y = Y.reshape((Y.size, )) + Y = Y.reshape((Y.size,)) else: dim = [i for i in range(len(Y.shape))] dim[-1], dim[len(Y.shape) - 2] = dim[len(Y.shape) - 2], dim[-1] @@ -55,7 +59,6 @@ def reference_matmul(X, Y, transpose_X=False, transpose_Y=False): class XPUTestMatmulV2Op(XPUOpTestWrapper): - def __init__(self): self.op_name = "matmul_v2" self.use_dynamic_create_class = False @@ -66,8 +69,8 @@ class XPUTestMatmulV2Op(XPUOpTestWrapper): """ def config(self): - self.x_shape = (100, ) - self.y_shape = (100, ) + self.x_shape = (100,) + self.y_shape = (100,) self.trans_x = False self.trans_y = False @@ -96,8 +99,10 @@ class XPUTestMatmulV2Op(XPUOpTestWrapper): self.check_output_with_place(place) def test_check_grad(self): - if hasattr(self.__class__, "no_need_check_grad" - ) and self.__class__.no_need_check_grad == True: + if ( + hasattr(self.__class__, "no_need_check_grad") + and self.__class__.no_need_check_grad == True + ): return place = paddle.XPUPlace(0) self.check_grad_with_place(place, ['X', 'Y'], 'Out') @@ -108,7 +113,7 @@ class XPUTestMatmulV2Op(XPUOpTestWrapper): """ def config(self): - self.x_shape = (100) + self.x_shape = 100 self.y_shape = (100, 3) self.trans_x = False self.trans_y = False @@ -119,7 +124,7 @@ class XPUTestMatmulV2Op(XPUOpTestWrapper): """ def config(self): - self.x_shape = (100, ) + self.x_shape = (100,) self.y_shape = (1, 1, 100, 2) self.trans_x = False self.trans_y = False @@ -142,7 +147,7 @@ class XPUTestMatmulV2Op(XPUOpTestWrapper): def config(self): self.x_shape = (1, 1, 100, 1) - self.y_shape = (100, ) + self.y_shape = (100,) self.trans_x = True self.trans_y = False @@ -219,7 +224,7 @@ class XPUTestMatmulV2Op(XPUOpTestWrapper): def config(self): self.x_shape = (1, 20, 100) - self.y_shape = (100, ) + self.y_shape = (100,) self.trans_x = False self.trans_y = False @@ -274,7 +279,7 @@ class XPUTestMatmulV2Op(XPUOpTestWrapper): def config(self): self.x_shape = (2, 1, 100) - self.y_shape = (100) + self.y_shape = 100 self.trans_x = False self.trans_y = False @@ -285,7 +290,7 @@ class XPUTestMatmulV2Op(XPUOpTestWrapper): def config(self): self.x_shape = (8, 111, 4, 17) - self.y_shape = (17) + self.y_shape = 17 self.trans_x = False self.trans_y = False diff --git a/python/paddle/fluid/tests/unittests/xpu/test_mean_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_mean_op_xpu.py index 2ec886796f0b6abd3b607607507fc54b9a93b472..c86aaeea6cfb3207a2ee7fac784dc7a1c8e93fee 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_mean_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_mean_op_xpu.py @@ -25,19 +25,21 @@ from paddle.fluid import Program, program_guard np.random.seed(10) from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestMeanOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'mean' self.use_dynamic_create_class = False class TestMeanOp(XPUOpTest): - def setUp(self): self.init_dtype() self.set_xpu() @@ -65,41 +67,36 @@ class XPUTestMeanOp(XPUOpTestWrapper): self.check_grad_with_place(self.place, ['X'], 'Out') class TestMeanOp1(TestMeanOp): - def set_shape(self): - self.shape = (5) + self.shape = 5 class TestMeanOp2(TestMeanOp): - def set_shape(self): self.shape = (5, 7, 8) class TestMeanOp3(TestMeanOp): - def set_shape(self): self.shape = (10, 5, 7, 8) class TestMeanOp4(TestMeanOp): - def set_shape(self): self.shape = (2, 2, 3, 3, 3) class TestMeanOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # The input type of mean_op must be Variable. input1 = 12 self.assertRaises(TypeError, paddle.mean, input1) # The input dtype of mean_op must be float16, float32, float64. - input2 = fluid.layers.data(name='input2', - shape=[12, 10], - dtype="int32") + input2 = fluid.layers.data( + name='input2', shape=[12, 10], dtype="int32" + ) self.assertRaises(TypeError, paddle.mean, input2) - input3 = fluid.layers.data(name='input3', - shape=[4], - dtype="float16") + input3 = fluid.layers.data( + name='input3', shape=[4], dtype="float16" + ) fluid.layers.softmax(input3) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_merged_momentum_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_merged_momentum_op_xpu.py index 554508b877f0c79e2b38ce19fb852fdfe3742057..591cb32541723dd611faea4c1154f48f9f8701a9 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_merged_momentum_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_merged_momentum_op_xpu.py @@ -20,19 +20,21 @@ sys.path.append("..") import paddle from test_merged_momentum_op_xpu_base import TestMergedMomentumBase -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestMergedMomentumOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'merged_momentum' self.use_dynamic_create_class = False class TestMergedMomentumOp(TestMergedMomentumBase): - def setUp(self): super().setUp() self.set_case() @@ -46,22 +48,18 @@ class XPUTestMergedMomentumOP(XPUOpTestWrapper): self.check_with_place(self.place, self.in_type) class TestMergedMomentum1(TestMergedMomentumOp): - def set_case(self): self.shapes = [[3, 4], [2, 7], [5, 6, 8]] class TestMergedMomentum2(TestMergedMomentumOp): - def set_case(self): self.shapes = [[3, 4], [2, 7]] class TestMergedMomentum3(TestMergedMomentumOp): - def set_case(self): self.shapes = [[3, 4]] class TestMergedMomentum4(TestMergedMomentumOp): - def set_case(self): self.shapes = [[3, 4], [2, 7], [5, 6, 7], [9, 9], [10, 12]] diff --git a/python/paddle/fluid/tests/unittests/xpu/test_merged_momentum_op_xpu_base.py b/python/paddle/fluid/tests/unittests/xpu/test_merged_momentum_op_xpu_base.py index 64d1e5aa9b26cdcba6c6145131ee22ca4e375a24..05044f11ee41b122a634215fccc9f9350f3da9a4 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_merged_momentum_op_xpu_base.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_merged_momentum_op_xpu_base.py @@ -22,17 +22,19 @@ from paddle.fluid.layer_helper import LayerHelper from collections import OrderedDict -def run_momentum_op(params, - grads, - velocitys, - master_params, - learning_rate, - place, - multi_precision, - mu=0.9, - rescale_grad=0.01, - use_merged=False, - use_nesterov=True): +def run_momentum_op( + params, + grads, + velocitys, + master_params, + learning_rate, + place, + multi_precision, + mu=0.9, + rescale_grad=0.01, + use_merged=False, + use_nesterov=True, +): assert len(params) == len(grads) assert len(params) == len(velocitys) if multi_precision: @@ -44,48 +46,70 @@ def run_momentum_op(params, helper = LayerHelper(op_type, **locals()) param_vars = [ - helper.create_variable(persistable=True, - shape=p.shape, - dtype=p.dtype) for p in params + helper.create_variable( + persistable=True, shape=p.shape, dtype=p.dtype + ) + for p in params ] grad_vars = [ helper.create_variable(shape=g.shape, dtype=g.dtype) for g in grads ] velocity_vars = [ - helper.create_variable(persistable=True, - shape=v.shape, - dtype=v.dtype) for v in velocitys + helper.create_variable( + persistable=True, shape=v.shape, dtype=v.dtype + ) + for v in velocitys ] - lr_var = helper.create_variable(persistable=True, - shape=learning_rate.shape, - dtype=learning_rate.dtype) + lr_var = helper.create_variable( + persistable=True, + shape=learning_rate.shape, + dtype=learning_rate.dtype, + ) feed_dict = OrderedDict() feed_dict.update( - OrderedDict([(p_var.name, p_val) - for p_var, p_val in zip(param_vars, params)])) + OrderedDict( + [ + (p_var.name, p_val) + for p_var, p_val in zip(param_vars, params) + ] + ) + ) feed_dict.update( - OrderedDict([(v_var.name, v_val) - for v_var, v_val in zip(velocity_vars, velocitys)])) + OrderedDict( + [ + (v_var.name, v_val) + for v_var, v_val in zip(velocity_vars, velocitys) + ] + ) + ) fetch_list = list(feed_dict.keys()) feed_dict.update( - OrderedDict([(g_var.name, g_val) - for g_var, g_val in zip(grad_vars, grads)])) + OrderedDict( + [(g_var.name, g_val) for g_var, g_val in zip(grad_vars, grads)] + ) + ) feed_dict.update({lr_var.name: learning_rate}) if multi_precision: master_param_vars = [ - helper.create_variable(persistable=True, - shape=p.shape, - dtype=p.dtype) for p in master_params + helper.create_variable( + persistable=True, shape=p.shape, dtype=p.dtype + ) + for p in master_params ] feed_dict.update( - OrderedDict([ - (mp_var.name, mp_val) - for mp_var, mp_val in zip(master_param_vars, master_params) - ])) + OrderedDict( + [ + (mp_var.name, mp_val) + for mp_var, mp_val in zip( + master_param_vars, master_params + ) + ] + ) + ) # CPUPlace does not use MasterParam if isinstance(place, paddle.CUDAPlace): fetch_list = fetch_list + [ @@ -95,8 +119,9 @@ def run_momentum_op(params, master_param_vars = None if not use_merged: - for i, (p, g, - v) in enumerate(zip(param_vars, grad_vars, velocity_vars)): + for i, (p, g, v) in enumerate( + zip(param_vars, grad_vars, velocity_vars) + ): inputs = { 'Param': p, 'Grad': g, @@ -115,10 +140,9 @@ def run_momentum_op(params, 'regularization_method': 'l2_decay', 'regularization_coeff': 2.0, } - helper.append_op(type=op_type, - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type=op_type, inputs=inputs, outputs=outputs, attrs=attrs + ) else: inputs = { 'Param': param_vars, @@ -131,22 +155,18 @@ def run_momentum_op(params, inputs['MasterParam'] = master_param_vars outputs['MasterParamOut'] = master_param_vars attrs = { - 'mu': - mu, - 'multi_precision': - multi_precision, - 'rescale_grad': - rescale_grad, - 'use_nesterov': - use_nesterov, - 'regularization_method': - ['l2_decay' for i in range(len(param_vars))], + 'mu': mu, + 'multi_precision': multi_precision, + 'rescale_grad': rescale_grad, + 'use_nesterov': use_nesterov, + 'regularization_method': [ + 'l2_decay' for i in range(len(param_vars)) + ], 'regularization_coeff': [2.0 for i in range(len(param_vars))], } - helper.append_op(type=op_type, - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type=op_type, inputs=inputs, outputs=outputs, attrs=attrs + ) exe = paddle.static.Executor(place) with paddle.static.scope_guard(paddle.static.Scope()): @@ -155,7 +175,6 @@ def run_momentum_op(params, class TestMergedMomentumBase(unittest.TestCase): - def setUp(self): paddle.enable_static() self.shapes = [[3, 4], [2, 7], [5, 6], [7, 8]] @@ -179,22 +198,31 @@ class TestMergedMomentumBase(unittest.TestCase): return params, grads, velocitys, master_params, learning_rate def check_with_place(self, place, dtype, multi_precision=False): - params, grads, velocitys, master_params, learning_rate = self.prepare_data( - self.shapes, multi_precision, self.seed, dtype, place) + ( + params, + grads, + velocitys, + master_params, + learning_rate, + ) = self.prepare_data( + self.shapes, multi_precision, self.seed, dtype, place + ) def run_op(use_nesterov, use_merged): # NPU Momentum Op does not support rescale_grad rescale_grad = 1.0 - return run_momentum_op(params, - grads, - velocitys, - master_params, - learning_rate, - place, - multi_precision, - rescale_grad=rescale_grad, - use_merged=use_merged, - use_nesterov=use_nesterov) + return run_momentum_op( + params, + grads, + velocitys, + master_params, + learning_rate, + place, + multi_precision, + rescale_grad=rescale_grad, + use_merged=use_merged, + use_nesterov=use_nesterov, + ) outs1 = run_op(use_nesterov=True, use_merged=True) outs2 = run_op(use_nesterov=True, use_merged=False) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_momentum_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_momentum_op_xpu.py index 5cc9cb49e27cbdf06baa1773576d9f326242358f..0ee2af0e2023c3edbea9fb3566c767c1bd9ac492 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_momentum_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_momentum_op_xpu.py @@ -22,14 +22,25 @@ import paddle import paddle.fluid.core as core from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() -def calculate_momentum_by_numpy(param, grad, mu, velocity, use_nesterov, - learning_rate, regularization_method, - regularization_coeff): +def calculate_momentum_by_numpy( + param, + grad, + mu, + velocity, + use_nesterov, + learning_rate, + regularization_method, + regularization_coeff, +): if regularization_method == "l2_decay": grad = grad + regularization_coeff * param velocity_out = mu * velocity + grad @@ -40,21 +51,20 @@ def calculate_momentum_by_numpy(param, grad, mu, velocity, use_nesterov, else: velocity_out = mu * velocity + grad if use_nesterov: - param_out = param - grad * learning_rate - \ - velocity_out * mu * learning_rate + param_out = ( + param - grad * learning_rate - velocity_out * mu * learning_rate + ) else: param_out = param - learning_rate * velocity_out return param_out, velocity_out class XPUTestMomentumOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'momentum' self.use_dynamic_create_class = False class TestMomentumOPBase(XPUOpTest): - def setUp(self): self.place = paddle.XPUPlace(0) self.xpu_version = core.get_xpu_device_version(0) @@ -65,12 +75,15 @@ class XPUTestMomentumOP(XPUOpTestWrapper): self.op_type = 'momentum' self.init_config() - self.param = np.random.uniform(-1, 1, - self.input_shape).astype(self.dtype) - self.grad = np.random.uniform(-1, 1, - self.input_shape).astype(self.dtype) + self.param = np.random.uniform(-1, 1, self.input_shape).astype( + self.dtype + ) + self.grad = np.random.uniform(-1, 1, self.input_shape).astype( + self.dtype + ) self.velocity = np.random.uniform(-1, 1, self.input_shape).astype( - self.dtype) + self.dtype + ) param_out, velocity_out = calculate_momentum_by_numpy( param=self.param, grad=self.grad, @@ -79,7 +92,8 @@ class XPUTestMomentumOP(XPUOpTestWrapper): use_nesterov=self.use_nesterov, learning_rate=self.learning_rate, regularization_method=self.regularization_method, - regularization_coeff=self.regularization_coeff) + regularization_coeff=self.regularization_coeff, + ) param_out = param_out.astype(self.dtype) velocity_out = velocity_out.astype(self.dtype) self.inputs = { @@ -93,7 +107,7 @@ class XPUTestMomentumOP(XPUOpTestWrapper): 'mu': self.mu, 'use_nesterov': self.use_nesterov, 'regularization_method': self.regularization_method, - 'regularization_coeff': self.regularization_coeff + 'regularization_coeff': self.regularization_coeff, } self.outputs = {'ParamOut': param_out, 'VelocityOut': velocity_out} @@ -112,7 +126,6 @@ class XPUTestMomentumOP(XPUOpTestWrapper): self.regularization_coeff = 0 class XPUTestMomentum1(TestMomentumOPBase): - def init_config(self): self.input_shape = [2, 768] self.learning_rate = np.array([0.002]).astype(float) @@ -122,7 +135,6 @@ class XPUTestMomentumOP(XPUOpTestWrapper): self.regularization_coeff = 0 class XPUTestMomentum2(TestMomentumOPBase): - def init_config(self): self.input_shape = [3, 8, 4096] self.learning_rate = np.array([0.005]).astype(float) @@ -132,7 +144,6 @@ class XPUTestMomentumOP(XPUOpTestWrapper): self.regularization_coeff = 0 class XPUTestMomentum3(TestMomentumOPBase): - def init_config(self): self.input_shape = [1024] self.learning_rate = np.array([0.01]).astype(float) @@ -147,7 +158,6 @@ class XPUTestMomentumOP(XPUOpTestWrapper): self.regularization_coeff = 0 class XPUTestMomentum4(TestMomentumOPBase): - def init_config(self): self.input_shape = [2, 2, 255] self.learning_rate = np.array([0.0005]).astype(float) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_mul_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_mul_op_xpu.py index b8d4666761a745caac32891555455d9103b41d9d..08b3b3e89a3c69ebefaf271a4ba1a82901e9d272 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_mul_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_mul_op_xpu.py @@ -24,18 +24,23 @@ from paddle.fluid import Program, program_guard paddle.enable_static() -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) class TestMulOpError(unittest.TestCase): - def test_errors(self): with program_guard(Program(), Program()): # The input type of mul_op must be Variable. - x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - fluid.XPUPlace(0)) - x2 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - fluid.XPUPlace(0)) + x1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.XPUPlace(0) + ) + x2 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.XPUPlace(0) + ) self.assertRaises(TypeError, fluid.layers.mul, x1, x2) # The input dtype of mul_op must be float32. x3 = fluid.layers.data(name='x3', shape=[4], dtype="int32") @@ -44,26 +49,26 @@ class TestMulOpError(unittest.TestCase): class XPUTestMulOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'mul' self.use_dynamic_create_class = False class TestXPUMulOp1(XPUOpTest): - def setUp(self): self.op_type = "mul" self.dtype = self.in_type self.inputs = { 'X': np.random.random((3, 4, 2, 9)).astype(self.in_type_str), - 'Y': np.random.random((3, 6, 1, 2, 3)).astype(self.in_type_str) + 'Y': np.random.random((3, 6, 1, 2, 3)).astype(self.in_type_str), } self.attrs = { 'x_num_col_dims': 2, 'y_num_col_dims': 2, } - result = np.dot(self.inputs['X'].reshape(3 * 4, 2 * 9), - self.inputs['Y'].reshape(3 * 6, 1 * 2 * 3)) + result = np.dot( + self.inputs['X'].reshape(3 * 4, 2 * 9), + self.inputs['Y'].reshape(3 * 6, 1 * 2 * 3), + ) result = result.reshape(3, 4, 1, 2, 3) self.outputs = {'Out': result} @@ -75,35 +80,40 @@ class XPUTestMulOp(XPUOpTestWrapper): def test_check_grad_normal(self): place = paddle.XPUPlace(0) paddle.enable_static() - self.check_grad_with_place(place, ['X', 'Y'], - 'Out', - max_relative_error=0.1) + self.check_grad_with_place( + place, ['X', 'Y'], 'Out', max_relative_error=0.1 + ) def test_check_grad_ingore_x(self): place = paddle.XPUPlace(0) paddle.enable_static() - self.check_grad_with_place(place, ['Y'], - 'Out', - max_relative_error=0.1, - no_grad_set=set("X")) + self.check_grad_with_place( + place, + ['Y'], + 'Out', + max_relative_error=0.1, + no_grad_set=set("X"), + ) def test_check_grad_ignore_y(self): place = paddle.XPUPlace(0) paddle.enable_static() - self.check_grad_with_place(place, ['X'], - 'Out', - max_relative_error=0.1, - no_grad_set=set('Y')) + self.check_grad_with_place( + place, + ['X'], + 'Out', + max_relative_error=0.1, + no_grad_set=set('Y'), + ) class TestXPUMulOp2(XPUOpTest): - def setUp(self): self.op_type = "mul" self.use_xpu = True self.dtype = self.in_type self.inputs = { 'X': np.random.random((20, 5)).astype(self.in_type_str), - 'Y': np.random.random((5, 21)).astype(self.in_type_str) + 'Y': np.random.random((5, 21)).astype(self.in_type_str), } self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])} @@ -115,25 +125,31 @@ class XPUTestMulOp(XPUOpTestWrapper): def test_check_grad_normal(self): place = paddle.XPUPlace(0) paddle.enable_static() - self.check_grad_with_place(place, ['X', 'Y'], - 'Out', - max_relative_error=0.1) + self.check_grad_with_place( + place, ['X', 'Y'], 'Out', max_relative_error=0.1 + ) def test_check_grad_ingore_x(self): place = paddle.XPUPlace(0) paddle.enable_static() - self.check_grad_with_place(place, ['Y'], - 'Out', - max_relative_error=0.1, - no_grad_set=set("X")) + self.check_grad_with_place( + place, + ['Y'], + 'Out', + max_relative_error=0.1, + no_grad_set=set("X"), + ) def test_check_grad_ingore_y(self): place = paddle.XPUPlace(0) paddle.enable_static() - self.check_grad_with_place(place, ['X'], - 'Out', - max_relative_error=0.1, - no_grad_set=set('Y')) + self.check_grad_with_place( + place, + ['X'], + 'Out', + max_relative_error=0.1, + no_grad_set=set('Y'), + ) support_types = get_xpu_op_support_types('mul') diff --git a/python/paddle/fluid/tests/unittests/xpu/test_nearest_interp_v2_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_nearest_interp_v2_op_xpu.py index 3ded60a3d804c586594866286f1b2f7838f9cfc3..0f2d4fd27ecc910ff849f7dadc083a91e39a4ee2 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_nearest_interp_v2_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_nearest_interp_v2_op_xpu.py @@ -21,20 +21,26 @@ sys.path.append("..") import paddle from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() -def nearest_neighbor_interp_np(X, - out_h, - out_w, - scale_h=0, - scale_w=0, - out_size=None, - actual_shape=None, - align_corners=True, - data_layout='NCHW'): +def nearest_neighbor_interp_np( + X, + out_h, + out_w, + scale_h=0, + scale_w=0, + out_size=None, + actual_shape=None, + align_corners=True, + data_layout='NCHW', +): """nearest neighbor interpolation implement in shape [N, C, H, W]""" if data_layout == "NHWC": X = np.transpose(X, (0, 3, 1, 2)) # NHWC => NCHW @@ -47,16 +53,16 @@ def nearest_neighbor_interp_np(X, n, c, in_h, in_w = X.shape ratio_h = ratio_w = 0.0 - if (out_h > 1): - if (align_corners): + if out_h > 1: + if align_corners: ratio_h = (in_h - 1.0) / (out_h - 1.0) else: if scale_h > 0: ratio_h = 1.0 / scale_h else: ratio_h = 1.0 * in_h / out_h - if (out_w > 1): - if (align_corners): + if out_w > 1: + if align_corners: ratio_w = (in_w - 1.0) / (out_w - 1.0) else: if scale_w > 0: @@ -84,17 +90,19 @@ def nearest_neighbor_interp_np(X, return out.astype(X.dtype) -def nearest_neighbor_interp3d_np(X, - out_d, - out_h, - out_w, - scale_d=0, - scale_h=0, - scale_w=0, - out_size=None, - actual_shape=None, - align_corners=True, - data_layout='NCHW'): +def nearest_neighbor_interp3d_np( + X, + out_d, + out_h, + out_w, + scale_d=0, + scale_h=0, + scale_w=0, + out_size=None, + actual_shape=None, + align_corners=True, + data_layout='NCHW', +): """nearest neighbor interpolation implement in shape [N, C, H, W]""" if data_layout == "NHWC": X = np.transpose(X, (0, 4, 1, 2, 3)) # NDHWC => NCDHW @@ -109,24 +117,24 @@ def nearest_neighbor_interp3d_np(X, n, c, in_d, in_h, in_w = X.shape ratio_d = ratio_h = ratio_w = 0.0 - if (out_d > 1): - if (align_corners): + if out_d > 1: + if align_corners: ratio_d = (in_d - 1.0) / (out_d - 1.0) else: if scale_d > 0: ratio_d = 1.0 / scale_d else: ratio_d = 1.0 * in_d / out_d - if (out_h > 1): - if (align_corners): + if out_h > 1: + if align_corners: ratio_h = (in_h - 1.0) / (out_h - 1.0) else: if scale_h > 0: ratio_h = 1.0 / scale_h else: ratio_h = 1.0 * in_h / out_h - if (out_w > 1): - if (align_corners): + if out_w > 1: + if align_corners: ratio_w = (in_w - 1.0) / (out_w - 1.0) else: if scale_w > 0: @@ -158,13 +166,11 @@ def nearest_neighbor_interp3d_np(X, class XPUNearestInterpOpWrapper(XPUOpTestWrapper): - def __init__(self): self.op_name = 'nearest_interp_v2' self.use_dynamic_create_class = False class TestNearestInterpOp(XPUOpTest): - def setUp(self): self.place = paddle.XPUPlace(0) self.init_dtype() @@ -174,7 +180,7 @@ class XPUNearestInterpOpWrapper(XPUOpTestWrapper): self.data_layout = 'NCHW' self.interp_method = 'nearest' - self.scale = 0. + self.scale = 0.0 self.align_corners = True self.init_test_case() @@ -233,13 +239,30 @@ class XPUNearestInterpOpWrapper(XPUOpTestWrapper): # output_np if len(self.input_shape) == 4: output_np = nearest_neighbor_interp_np( - input_np, out_h, out_w, scale_h, scale_w, self.out_size, - self.actual_shape, self.align_corners, self.data_layout) + input_np, + out_h, + out_w, + scale_h, + scale_w, + self.out_size, + self.actual_shape, + self.align_corners, + self.data_layout, + ) elif len(self.input_shape) == 5: output_np = nearest_neighbor_interp3d_np( - input_np, out_d, out_h, out_w, scale_d, scale_h, scale_w, - self.out_size, self.actual_shape, self.align_corners, - self.data_layout) + input_np, + out_d, + out_h, + out_w, + scale_d, + scale_h, + scale_w, + self.out_size, + self.actual_shape, + self.align_corners, + self.data_layout, + ) self.outputs = {'Out': output_np} self.inputs = {'X': input_np} @@ -255,7 +278,7 @@ class XPUNearestInterpOpWrapper(XPUOpTestWrapper): 'out_w': self.out_w, 'interp_method': self.interp_method, 'align_corners': self.align_corners, - 'data_layout': self.data_layout + 'data_layout': self.data_layout, } else: self.attrs = { @@ -263,7 +286,7 @@ class XPUNearestInterpOpWrapper(XPUOpTestWrapper): 'out_w': self.out_w, 'interp_method': self.interp_method, 'align_corners': self.align_corners, - 'data_layout': self.data_layout + 'data_layout': self.data_layout, } if self.scale: @@ -298,21 +321,18 @@ class XPUNearestInterpOpWrapper(XPUOpTestWrapper): """ class TestNearestNeighborInterpCase2(TestNearestInterpOp): - def init_test_case(self): self.input_shape = [3, 3, 9, 6] self.out_h = 12 self.out_w = 12 class TestNearestNeighborInterpCase3(TestNearestInterpOp): - def init_test_case(self): self.input_shape = [1, 1, 32, 64] self.out_h = 64 self.out_w = 32 class TestNearestNeighborInterpCase4(TestNearestInterpOp): - def init_test_case(self): self.input_shape = [4, 1, 7, 8] self.out_h = 1 @@ -320,7 +340,6 @@ class XPUNearestInterpOpWrapper(XPUOpTestWrapper): self.out_size = np.array([2, 2]).astype("int32") class TestNearestNeighborInterpCase5(TestNearestInterpOp): - def init_test_case(self): self.input_shape = [3, 3, 9, 6] self.out_h = 12 @@ -328,7 +347,6 @@ class XPUNearestInterpOpWrapper(XPUOpTestWrapper): self.out_size = np.array([11, 11]).astype("int32") class TestNearestNeighborInterpCase6(TestNearestInterpOp): - def init_test_case(self): self.input_shape = [1, 1, 32, 64] self.out_h = 64 @@ -336,14 +354,12 @@ class XPUNearestInterpOpWrapper(XPUOpTestWrapper): self.out_size = np.array([65, 129]).astype("int32") class TestNearestNeighborInterpSame(TestNearestInterpOp): - def init_test_case(self): self.input_shape = [2, 3, 32, 64] self.out_h = 32 self.out_w = 64 class TestNearestNeighborInterpActualShape(TestNearestInterpOp): - def init_test_case(self): self.input_shape = [3, 2, 32, 16] self.out_h = 64 @@ -365,21 +381,18 @@ class XPUNearestInterpOpWrapper(XPUOpTestWrapper): """ class TestNearestInterpWithoutCorners(TestNearestInterpOp): - def set_align_corners(self): self.align_corners = False class TestNearestNeighborInterpScale1(TestNearestInterpOp): - def init_test_case(self): self.input_shape = [3, 2, 7, 5] self.out_h = 64 self.out_w = 32 - self.scale = 2. + self.scale = 2.0 self.out_size = np.array([66, 40]).astype("int32") class TestNearestNeighborInterpScale2(TestNearestInterpOp): - def init_test_case(self): self.input_shape = [3, 2, 5, 7] self.out_h = 64 @@ -388,7 +401,6 @@ class XPUNearestInterpOpWrapper(XPUOpTestWrapper): self.out_size = np.array([66, 40]).astype("int32") class TestNearestNeighborInterpScale3(TestNearestInterpOp): - def init_test_case(self): self.input_shape = [3, 2, 7, 5] self.out_h = 64 @@ -411,7 +423,6 @@ class XPUNearestInterpOpWrapper(XPUOpTestWrapper): """ class TestNearestInterpOp_attr_tensor(XPUOpTest): - def setUp(self): self.place = paddle.XPUPlace(0) self.init_dtype() @@ -420,7 +431,7 @@ class XPUNearestInterpOpWrapper(XPUOpTestWrapper): self.actual_shape = None self.interp_method = 'nearest' - self.scale = 0. + self.scale = 0.0 self.align_corners = True self.init_test_case() @@ -457,8 +468,9 @@ class XPUNearestInterpOpWrapper(XPUOpTestWrapper): elif self.out_size is not None: size_tensor = [] for index, ele in enumerate(self.out_size): - size_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + size_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs['SizeTensor'] = size_tensor self.attrs['out_h'] = self.out_h @@ -470,10 +482,16 @@ class XPUNearestInterpOpWrapper(XPUOpTestWrapper): if isinstance(self.scale, list) and len(self.scale) == 1: self.scale = [self.scale[0], self.scale[0]] self.attrs['scale'] = self.scale - output_np = nearest_neighbor_interp_np(input_np, out_h, out_w, 0, 0, - self.out_size, - self.actual_shape, - self.align_corners) + output_np = nearest_neighbor_interp_np( + input_np, + out_h, + out_w, + 0, + 0, + self.out_size, + self.actual_shape, + self.align_corners, + ) self.outputs = {'Out': output_np} def init_dtype(self): @@ -493,7 +511,6 @@ class XPUNearestInterpOpWrapper(XPUOpTestWrapper): # out_size is a tensor list class TestNearestInterp_attr_tensor_Case1(TestNearestInterpOp_attr_tensor): - def init_test_case(self): self.input_shape = [3, 3, 9, 6] self.out_h = 12 @@ -502,7 +519,6 @@ class XPUNearestInterpOpWrapper(XPUOpTestWrapper): # out_size is a 1-D tensor class TestNearestInterp_attr_tensor_Case2(TestNearestInterpOp_attr_tensor): - def init_test_case(self): self.input_shape = [3, 2, 32, 16] self.out_h = 64 @@ -512,7 +528,6 @@ class XPUNearestInterpOpWrapper(XPUOpTestWrapper): # scale is a 1-D tensor class TestNearestInterp_attr_tensor_Case3(TestNearestInterpOp_attr_tensor): - def init_test_case(self): self.input_shape = [3, 2, 32, 16] self.out_h = 64 diff --git a/python/paddle/fluid/tests/unittests/xpu/test_one_hot_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_one_hot_op_xpu.py index 579f8c1faf0fec7ed830b8d18527080e9889a055..d3f1a30066411e7395809f2c3df1746d03ae7578 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_one_hot_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_one_hot_op_xpu.py @@ -20,19 +20,21 @@ sys.path.append("..") import paddle import paddle.fluid.core as core from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestOneHotOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'one_hot' self.use_dynamic_create_class = False class TestXPUOneHotOP(XPUOpTest): - def setUp(self): self.place = paddle.XPUPlace(0) self.init_dtype() @@ -49,11 +51,15 @@ class XPUTestOneHotOP(XPUOpTestWrapper): np.random.randint(0, self.depth - 1) for i in range(sum(self.x_lod[0])) ] - self.x = np.array(self.x).astype(self.dtype).reshape( - [sum(self.x_lod[0]), 1]) - - self.out = np.zeros(shape=(np.product(self.x.shape[:-1]), - self.depth)).astype('float32') + self.x = ( + np.array(self.x) + .astype(self.dtype) + .reshape([sum(self.x_lod[0]), 1]) + ) + + self.out = np.zeros( + shape=(np.product(self.x.shape[:-1]), self.depth) + ).astype('float32') for i in range(np.product(self.x.shape)): self.out[i, self.x[i]] = 1.0 @@ -62,7 +68,7 @@ class XPUTestOneHotOP(XPUOpTestWrapper): def set_input(self): self.inputs = { 'X': (self.x, self.x_lod), - 'depth_tensor': self.depth_np + 'depth_tensor': self.depth_np, } self.attrs = {'dtype': int(core.VarDesc.VarType.FP32)} @@ -73,31 +79,27 @@ class XPUTestOneHotOP(XPUOpTestWrapper): self.dtype = self.in_type class TestXPUOneHotOP_attr(TestXPUOneHotOP): - def set_input(self): self.inputs = {'X': (self.x, self.x_lod)} self.attrs = { 'dtype': int(core.VarDesc.VarType.FP32), - 'depth': self.depth + 'depth': self.depth, } class TestXPUOneHotOP_default_dtype(TestXPUOneHotOP): - def set_input(self): self.inputs = { 'X': (self.x, self.x_lod), - 'depth_tensor': self.depth_np + 'depth_tensor': self.depth_np, } self.attrs = {} class TestXPUOneHotOP_default_dtype_attr(TestXPUOneHotOP): - def set_input(self): self.inputs = {'X': (self.x, self.x_lod)} self.attrs = {'depth': self.depth} class TestXPUOneHotOP_out_of_range(TestXPUOneHotOP): - def set_data(self): self.depth = 10 self.x_lod = [[4, 1, 3, 3]] @@ -105,11 +107,15 @@ class XPUTestOneHotOP(XPUOpTestWrapper): np.random.choice([-1, self.depth]) for i in range(sum(self.x_lod[0])) ] - self.x = np.array(self.x).astype(self.dtype).reshape( - [sum(self.x_lod[0]), 1]) - - self.out = np.zeros(shape=(np.product(self.x.shape[:-1]), - self.depth)).astype('float32') + self.x = ( + np.array(self.x) + .astype(self.dtype) + .reshape([sum(self.x_lod[0]), 1]) + ) + + self.out = np.zeros( + shape=(np.product(self.x.shape[:-1]), self.depth) + ).astype('float32') self.outputs = {'Out': (self.out, self.x_lod)} diff --git a/python/paddle/fluid/tests/unittests/xpu/test_one_hot_v2_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_one_hot_v2_op_xpu.py index 6e7241734494432d4e5e6892d3d8f3b25390d173..19228435ea4abebb79e59ed73e08a2dc5e94f58e 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_one_hot_v2_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_one_hot_v2_op_xpu.py @@ -21,19 +21,21 @@ import sys sys.path.append("..") from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestOneHotOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'one_hot_v2' self.use_dynamic_create_class = False class TestOneHotOp(XPUOpTest): - def init(self): self.dtype = self.in_type self.place = paddle.XPUPlace(0) @@ -48,8 +50,9 @@ class XPUTestOneHotOp(XPUOpTestWrapper): x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0])]) - out = np.zeros(shape=(np.product(x.shape), - depth)).astype(self.dtype) + out = np.zeros(shape=(np.product(x.shape), depth)).astype( + self.dtype + ) for i in range(np.product(x.shape)): out[i, x[i]] = 1.0 @@ -62,7 +65,6 @@ class XPUTestOneHotOp(XPUOpTestWrapper): self.check_output_with_place(self.place) class TestOneHotOp_attr(TestOneHotOp): - def setUp(self): self.init() depth = 10 @@ -71,8 +73,9 @@ class XPUTestOneHotOp(XPUOpTestWrapper): x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1]) - out = np.zeros(shape=(np.product(x.shape[:-1]), 1, - depth)).astype(self.dtype) + out = np.zeros(shape=(np.product(x.shape[:-1]), 1, depth)).astype( + self.dtype + ) for i in range(np.product(x.shape)): out[i, 0, x[i]] = 1.0 @@ -80,12 +83,11 @@ class XPUTestOneHotOp(XPUOpTestWrapper): self.inputs = {'X': (x, x_lod)} self.attrs = { 'dtype': int(core.VarDesc.VarType.FP32), - 'depth': depth + 'depth': depth, } self.outputs = {'Out': (out, x_lod)} class TestOneHotOp_default_dtype(TestOneHotOp): - def setUp(self): self.init() depth = 10 @@ -95,8 +97,9 @@ class XPUTestOneHotOp(XPUOpTestWrapper): x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0])]) - out = np.zeros(shape=(np.product(x.shape), - depth)).astype(self.dtype) + out = np.zeros(shape=(np.product(x.shape), depth)).astype( + self.dtype + ) for i in range(np.product(x.shape)): out[i, x[i]] = 1.0 @@ -106,7 +109,6 @@ class XPUTestOneHotOp(XPUOpTestWrapper): self.outputs = {'Out': (out, x_lod)} class TestOneHotOp_default_dtype_attr(TestOneHotOp): - def setUp(self): self.init() depth = 10 @@ -115,8 +117,9 @@ class XPUTestOneHotOp(XPUOpTestWrapper): x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1]) - out = np.zeros(shape=(np.product(x.shape[:-1]), 1, - depth)).astype(self.dtype) + out = np.zeros(shape=(np.product(x.shape[:-1]), 1, depth)).astype( + self.dtype + ) for i in range(np.product(x.shape)): out[i, 0, x[i]] = 1.0 @@ -126,7 +129,6 @@ class XPUTestOneHotOp(XPUOpTestWrapper): self.outputs = {'Out': (out, x_lod)} class TestOneHotOp_out_of_range(TestOneHotOp): - def setUp(self): self.init() depth = 10 @@ -134,8 +136,9 @@ class XPUTestOneHotOp(XPUOpTestWrapper): x = [np.random.choice([-1, depth]) for i in range(sum(x_lod[0]))] x = np.array(x).astype('int32').reshape([sum(x_lod[0])]) - out = np.zeros(shape=(np.product(x.shape), - depth)).astype(self.dtype) + out = np.zeros(shape=(np.product(x.shape), depth)).astype( + self.dtype + ) self.inputs = {'X': (x, x_lod)} self.attrs = {'depth': depth, 'allow_out_of_range': True} @@ -143,7 +146,6 @@ class XPUTestOneHotOp(XPUOpTestWrapper): class TestOneHotOpApi(unittest.TestCase): - def test_api(self): depth = 10 self._run(depth) @@ -154,39 +156,45 @@ class TestOneHotOpApi(unittest.TestCase): def test_api_with_dygraph(self): depth = 10 - label = np.array([np.random.randint(0, depth - 1) - for i in range(6)]).reshape([6, 1]) + label = np.array( + [np.random.randint(0, depth - 1) for i in range(6)] + ).reshape([6, 1]) with fluid.dygraph.guard(): one_hot_label = fluid.one_hot( - input=fluid.dygraph.to_variable(label), depth=depth) + input=fluid.dygraph.to_variable(label), depth=depth + ) def _run(self, depth): label = fluid.layers.data(name="label", shape=[1], dtype="int64") one_hot_label = fluid.one_hot(input=label, depth=depth) place = fluid.XPUPlace(0) - label_data = np.array([np.random.randint(0, 10 - 1) - for i in range(6)]).reshape([6, 1]) + label_data = np.array( + [np.random.randint(0, 10 - 1) for i in range(6)] + ).reshape([6, 1]) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - ret = exe.run(feed={ - 'label': label_data, - }, - fetch_list=[one_hot_label], - return_numpy=False) + ret = exe.run( + feed={ + 'label': label_data, + }, + fetch_list=[one_hot_label], + return_numpy=False, + ) class BadInputTestOnehotV2(unittest.TestCase): - def test_error(self): with fluid.program_guard(fluid.Program()): def test_bad_x(): - label = fluid.layers.data(name="label", - shape=[4], - append_batch_size=False, - dtype="float32") + label = fluid.layers.data( + name="label", + shape=[4], + append_batch_size=False, + dtype="float32", + ) one_hot_label = fluid.one_hot(input=label, depth=4) self.assertRaises(TypeError, test_bad_x) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_p_norm_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_p_norm_op_xpu.py index c99c8968018106e5934c3a8c106c7a993c2443a2..a03a6e9939ab4cd86c49119f6c26bb21fee6f859 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_p_norm_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_p_norm_op_xpu.py @@ -19,7 +19,11 @@ import unittest sys.path.append("..") from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() @@ -63,13 +67,11 @@ def ref_p_norm(x, axis, porder, keepdims=False, reduce_all=False): class XPUTestPNormOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'p_norm' self.use_dynamic_create_class = False class TestXPUPNormOp(XPUOpTest): - def setUp(self): self.op_type = "p_norm" self.dtype = self.in_type @@ -84,15 +86,16 @@ class XPUTestPNormOp(XPUOpTestWrapper): x_np = np.random.uniform(-10, 10, self.shape).astype(self.dtype) - ref_y_np = ref_p_norm(x_np, self.axis, self.porder, self.keepdims, - self.asvector) + ref_y_np = ref_p_norm( + x_np, self.axis, self.porder, self.keepdims, self.asvector + ) self.inputs = {'X': x_np} self.outputs = {'Out': ref_y_np} self.attrs = { 'epsilon': self.epsilon, 'axis': self.axis, 'porder': float(self.porder), - 'asvector': self.asvector + 'asvector': self.asvector, } def set_attrs(self): @@ -105,70 +108,60 @@ class XPUTestPNormOp(XPUOpTestWrapper): self.check_grad_with_place(paddle.XPUPlace(0), ['X'], 'Out') class TestPnormOp2(TestXPUPNormOp): - def set_attrs(self): self.shape = [3, 20, 3] self.axis = 2 self.porder = 2.0 class TestPnormOp3(TestXPUPNormOp): - def set_attrs(self): self.shape = [3, 20, 3] self.axis = 2 self.porder = np.inf class TestPnormOp4(TestXPUPNormOp): - def set_attrs(self): self.shape = [3, 20, 3] self.axis = 2 self.porder = -np.inf class TestPnormOp5(TestXPUPNormOp): - def set_attrs(self): self.shape = [3, 20, 3] self.axis = 2 self.porder = 0 class TestPnormOp6(TestXPUPNormOp): - def set_attrs(self): self.shape = [3, 20, 3] self.axis = -1 self.porder = 2 class TestPnormOp7(TestXPUPNormOp): - def set_attrs(self): self.shape = [3, 20, 3, 10] self.axis = 2 self.porder = 2.0 class TestPnormOp8(TestXPUPNormOp): - def set_attrs(self): self.shape = [3, 20, 3] self.axis = 2 self.porder = np.inf class TestPnormOp9(TestXPUPNormOp): - def set_attrs(self): self.shape = [3, 20, 3, 10] self.axis = 1 self.porder = -np.inf class TestPnormOp10(TestXPUPNormOp): - def set_attrs(self): self.shape = [3, 20, 3, 10] self.axis = 2 self.porder = 0 class TestPnormOp11(TestXPUPNormOp): - def set_attrs(self): self.shape = [3, 20, 3, 10] self.axis = -1 diff --git a/python/paddle/fluid/tests/unittests/xpu/test_pool2d_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_pool2d_op_xpu.py index f630c21c94aba839c25bcf796d18d8525b29afa0..45c9f518cbdddd9d30b1c360339d4bcfaf00b6c4 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_pool2d_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_pool2d_op_xpu.py @@ -20,33 +20,43 @@ import numpy as np from op_test_xpu import XPUOpTest from test_pool2d_op import adaptive_start_index, adaptive_end_index -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) import paddle paddle.enable_static() -def max_pool2D_forward_naive(x, - ksize, - strides, - paddings, - global_pool=0, - ceil_mode=False, - exclusive=True, - adaptive=False, - data_type=np.float64): +def max_pool2D_forward_naive( + x, + ksize, + strides, + paddings, + global_pool=0, + ceil_mode=False, + exclusive=True, + adaptive=False, + data_type=np.float64, +): N, C, H, W = x.shape if global_pool == 1: ksize = [H, W] if adaptive: H_out, W_out = ksize else: - H_out = (H - ksize[0] + 2 * paddings[0] + strides[0] - - 1) // strides[0] + 1 if ceil_mode else ( - H - ksize[0] + 2 * paddings[0]) // strides[0] + 1 - W_out = (W - ksize[1] + 2 * paddings[1] + strides[1] - - 1) // strides[1] + 1 if ceil_mode else ( - W - ksize[1] + 2 * paddings[1]) // strides[1] + 1 + H_out = ( + (H - ksize[0] + 2 * paddings[0] + strides[0] - 1) // strides[0] + 1 + if ceil_mode + else (H - ksize[0] + 2 * paddings[0]) // strides[0] + 1 + ) + W_out = ( + (W - ksize[1] + 2 * paddings[1] + strides[1] - 1) // strides[1] + 1 + if ceil_mode + else (W - ksize[1] + 2 * paddings[1]) // strides[1] + 1 + ) out = np.zeros((N, C, H_out, W_out)) for i in range(H_out): for j in range(W_out): @@ -66,27 +76,33 @@ def max_pool2D_forward_naive(x, return out -def avg_pool2D_forward_naive(x, - ksize, - strides, - paddings, - global_pool=0, - ceil_mode=False, - exclusive=True, - adaptive=False, - data_type=np.float64): +def avg_pool2D_forward_naive( + x, + ksize, + strides, + paddings, + global_pool=0, + ceil_mode=False, + exclusive=True, + adaptive=False, + data_type=np.float64, +): N, C, H, W = x.shape if global_pool == 1: ksize = [H, W] if adaptive: H_out, W_out = ksize else: - H_out = (H - ksize[0] + 2 * paddings[0] + strides[0] - - 1) // strides[0] + 1 if ceil_mode else ( - H - ksize[0] + 2 * paddings[0]) // strides[0] + 1 - W_out = (W - ksize[1] + 2 * paddings[1] + strides[1] - - 1) // strides[1] + 1 if ceil_mode else ( - W - ksize[1] + 2 * paddings[1]) // strides[1] + 1 + H_out = ( + (H - ksize[0] + 2 * paddings[0] + strides[0] - 1) // strides[0] + 1 + if ceil_mode + else (H - ksize[0] + 2 * paddings[0]) // strides[0] + 1 + ) + W_out = ( + (W - ksize[1] + 2 * paddings[1] + strides[1] - 1) // strides[1] + 1 + if ceil_mode + else (W - ksize[1] + 2 * paddings[1]) // strides[1] + 1 + ) out = np.zeros((N, C, H_out, W_out)) for i in range(H_out): for j in range(W_out): @@ -108,39 +124,44 @@ def avg_pool2D_forward_naive(x, x_masked = x[:, :, r_start:r_end, c_start:c_end] - if (exclusive or adaptive): + if exclusive or adaptive: field_size = (r_end - r_start) * (c_end - c_start) if data_type == np.int8 or data_type == np.uint8: - out[:, :, i, - j] = (np.rint(np.sum(x_masked, axis=(2, 3)) / - field_size)).astype(data_type) + out[:, :, i, j] = ( + np.rint(np.sum(x_masked, axis=(2, 3)) / field_size) + ).astype(data_type) else: - out[:, :, i, j] = (np.sum(x_masked, axis=(2, 3)) / - field_size).astype(data_type) + out[:, :, i, j] = ( + np.sum(x_masked, axis=(2, 3)) / field_size + ).astype(data_type) return out -def pool2D_forward_naive(x, - ksize, - strides, - paddings, - global_pool=0, - ceil_mode=False, - exclusive=True, - adaptive=False, - data_format='NCHW', - pool_type="max", - padding_algorithm="EXPLICIT"): +def pool2D_forward_naive( + x, + ksize, + strides, + paddings, + global_pool=0, + ceil_mode=False, + exclusive=True, + adaptive=False, + data_format='NCHW', + pool_type="max", + padding_algorithm="EXPLICIT", +): # update paddings def _get_padding_with_SAME(input_shape, pool_size, pool_stride): padding = [] - for input_size, filter_size, stride_size in zip(input_shape, pool_size, - pool_stride): + for input_size, filter_size, stride_size in zip( + input_shape, pool_size, pool_stride + ): out_size = int((input_size + stride_size - 1) / stride_size) pad_sum = np.max( - ((out_size - 1) * stride_size + filter_size - input_size, 0)) + ((out_size - 1) * stride_size + filter_size - input_size, 0) + ) pad_0 = int(pad_sum / 2) pad_1 = int(pad_sum - pad_0) padding.append(pad_0) @@ -150,9 +171,10 @@ def pool2D_forward_naive(x, if isinstance(padding_algorithm, str): padding_algorithm = padding_algorithm.upper() if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]: - raise ValueError("Unknown Attr(padding_algorithm): '%s'. " - "It can only be 'SAME' or 'VALID'." % - str(padding_algorithm)) + raise ValueError( + "Unknown Attr(padding_algorithm): '%s'. " + "It can only be 'SAME' or 'VALID'." % str(padding_algorithm) + ) if padding_algorithm == "VALID": paddings = [0, 0, 0, 0] @@ -160,7 +182,8 @@ def pool2D_forward_naive(x, raise ValueError( "When Attr(pool_padding) is \"VALID\", Attr(ceil_mode)" " must be False. " - "Received ceil_mode: True.") + "Received ceil_mode: True." + ) elif padding_algorithm == "SAME": input_data_shape = [] if data_format == "NCHW": @@ -173,8 +196,11 @@ def pool2D_forward_naive(x, is_sys = True if len(paddings) == 2 else False N = x.shape[0] - C, H, W = [x.shape[1], x.shape[2], x.shape[3]] if data_format == 'NCHW' \ + C, H, W = ( + [x.shape[1], x.shape[2], x.shape[3]] + if data_format == 'NCHW' else [x.shape[3], x.shape[1], x.shape[2]] + ) if global_pool == 1: ksize = [H, W] @@ -188,13 +214,26 @@ def pool2D_forward_naive(x, if adaptive: H_out, W_out = ksize else: - H_out = (H - ksize[0] + pad_h_up + pad_h_down + strides[0] - 1) // strides[0] + 1 \ - if ceil_mode else (H - ksize[0] + pad_h_up + pad_h_down) // strides[0] + 1 - W_out = (W - ksize[1] + pad_w_left + pad_w_right + strides[1] - 1) // strides[1] + 1 \ - if ceil_mode else (W - ksize[1] + pad_w_left + pad_w_right) // strides[1] + 1 - - out = np.zeros((N, C, H_out, W_out)) if data_format=='NCHW' \ + H_out = ( + (H - ksize[0] + pad_h_up + pad_h_down + strides[0] - 1) + // strides[0] + + 1 + if ceil_mode + else (H - ksize[0] + pad_h_up + pad_h_down) // strides[0] + 1 + ) + W_out = ( + (W - ksize[1] + pad_w_left + pad_w_right + strides[1] - 1) + // strides[1] + + 1 + if ceil_mode + else (W - ksize[1] + pad_w_left + pad_w_right) // strides[1] + 1 + ) + + out = ( + np.zeros((N, C, H_out, W_out)) + if data_format == 'NCHW' else np.zeros((N, H_out, W_out, C)) + ) for i in range(H_out): if adaptive: in_h_start = adaptive_start_index(i, H, ksize[0]) @@ -222,21 +261,22 @@ def pool2D_forward_naive(x, if data_format == 'NCHW': x_masked = x[:, :, in_h_start:in_h_end, in_w_start:in_w_end] if pool_type == 'avg': - if (exclusive or adaptive): - field_size = (in_h_end - in_h_start) * (in_w_end - - in_w_start) - + if exclusive or adaptive: + field_size = (in_h_end - in_h_start) * ( + in_w_end - in_w_start + ) -# if (exclusive or adaptive) else (ksize[0] * ksize[1]) + # if (exclusive or adaptive) else (ksize[0] * ksize[1]) out[:, :, i, j] = np.sum(x_masked, axis=(2, 3)) / field_size elif pool_type == 'max': out[:, :, i, j] = np.max(x_masked, axis=(2, 3)) elif data_format == 'NHWC': x_masked = x[:, in_h_start:in_h_end, in_w_start:in_w_end, :] if pool_type == 'avg': - if (exclusive or adaptive): - field_size = (in_h_end - in_h_start) * (in_w_end - - in_w_start) + if exclusive or adaptive: + field_size = (in_h_end - in_h_start) * ( + in_w_end - in_w_start + ) out[:, i, j, :] = np.sum(x_masked, axis=(1, 2)) / field_size elif pool_type == 'max': out[:, i, j, :] = np.max(x_masked, axis=(1, 2)) @@ -244,13 +284,11 @@ def pool2D_forward_naive(x, class XPUTestPool2D_Op(XPUOpTestWrapper): - def __init__(self): self.op_name = 'pool2d' self.use_dynamic_create_class = False class TestPool2D_Op(XPUOpTest): - def setUp(self): self.op_type = "pool2d" self.dtype = self.in_type @@ -272,10 +310,18 @@ class XPUTestPool2D_Op(XPUOpTestWrapper): input = np.random.random(self.shape).astype(self.dtype) output = pool2D_forward_naive( - input, self.ksize, self.strides, self.paddings, - self.global_pool, self.ceil_mode, self.exclusive, self.adaptive, - self.data_format, self.pool_type, - self.padding_algorithm).astype(self.dtype) + input, + self.ksize, + self.strides, + self.paddings, + self.global_pool, + self.ceil_mode, + self.exclusive, + self.adaptive, + self.data_format, + self.pool_type, + self.padding_algorithm, + ).astype(self.dtype) self.inputs = {'X': XPUOpTest.np_dtype_to_fluid_dtype(input)} self.attrs = { @@ -290,7 +336,7 @@ class XPUTestPool2D_Op(XPUOpTestWrapper): 'exclusive': self.exclusive, 'adaptive': self.adaptive, "padding_algorithm": self.padding_algorithm, - 'ceil_mode': self.ceil_mode + 'ceil_mode': self.ceil_mode, } self.outputs = {'Out': output} @@ -335,12 +381,10 @@ class XPUTestPool2D_Op(XPUOpTestWrapper): self.adaptive = False class TestAvgPoolAdaptive(TestPool2D_Op): - def init_adaptive(self): self.adaptive = True class TestAvgPoolAdaptiveAsyOutSize(TestPool2D_Op): - def init_adaptive(self): self.adaptive = True @@ -353,7 +397,6 @@ class XPUTestPool2D_Op(XPUOpTestWrapper): self.paddings = [0, 0, 0, 0] class TestCase1(TestPool2D_Op): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -372,7 +415,6 @@ class XPUTestPool2D_Op(XPUOpTestWrapper): self.shape = [2, 3, 7, 7] class TestCase2(TestPool2D_Op): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -391,25 +433,21 @@ class XPUTestPool2D_Op(XPUOpTestWrapper): self.shape = [2, 3, 7, 7] class TestCase3(TestPool2D_Op): - def init_pool_type(self): self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive class TestCase4(TestCase1): - def init_pool_type(self): self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive class TestCase5(TestCase2): - def init_pool_type(self): self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive class TestPool2D_AsyPadding(TestPool2D_Op): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -419,7 +457,6 @@ class XPUTestPool2D_Op(XPUOpTestWrapper): self.shape = [2, 3, 5, 5] class TestCase1_AsyPadding(TestCase1): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -429,7 +466,6 @@ class XPUTestPool2D_Op(XPUOpTestWrapper): self.shape = [2, 3, 7, 7] class TestCase2_AsyPadding(TestCase2): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -439,7 +475,6 @@ class XPUTestPool2D_Op(XPUOpTestWrapper): self.shape = [2, 3, 7, 7] class TestCase3_AsyPadding(TestCase3): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -449,7 +484,6 @@ class XPUTestPool2D_Op(XPUOpTestWrapper): self.shape = [2, 3, 5, 5] class TestCase4_AsyPadding(TestCase4): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -459,7 +493,6 @@ class XPUTestPool2D_Op(XPUOpTestWrapper): self.shape = [2, 3, 7, 7] class TestCase5_AsyPadding(TestCase5): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -469,7 +502,6 @@ class XPUTestPool2D_Op(XPUOpTestWrapper): self.shape = [2, 3, 7, 7] class TestAvgInclude_AsyPadding(TestCase2): - def init_exclusive(self): self.exclusive = False @@ -482,7 +514,6 @@ class XPUTestPool2D_Op(XPUOpTestWrapper): self.shape = [2, 3, 7, 7] class TestCaseCeil1(TestPool2D_Op): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -504,7 +535,6 @@ class XPUTestPool2D_Op(XPUOpTestWrapper): self.ceil_mode = True class TestCaseCeil2(TestPool2D_Op): - def init_test_case(self): self.ksize = [3, 3] self.strides = [1, 1] @@ -526,7 +556,6 @@ class XPUTestPool2D_Op(XPUOpTestWrapper): self.ceil_mode = True class TestCaseCeil3(TestPool2D_Op): - def init_pool_type(self): self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive @@ -535,7 +564,6 @@ class XPUTestPool2D_Op(XPUOpTestWrapper): self.ceil_mode = True class TestCaseCeil4(TestCaseCeil1): - def init_pool_type(self): self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive @@ -544,7 +572,6 @@ class XPUTestPool2D_Op(XPUOpTestWrapper): self.ceil_mode = True class TestCaseCeil5(TestCaseCeil2): - def init_pool_type(self): self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive @@ -553,7 +580,6 @@ class XPUTestPool2D_Op(XPUOpTestWrapper): self.ceil_mode = True class TestCaseAdaptiveAvg(TestPool2D_Op): - def init_test_case(self): self.ksize = [2, 2] self.strides = [2, 2] @@ -575,7 +601,6 @@ class XPUTestPool2D_Op(XPUOpTestWrapper): self.adaptive = True class TestCaseAdaptiveMax(TestPool2D_Op): - def init_test_case(self): self.ksize = [2, 2] self.strides = [2, 2] diff --git a/python/paddle/fluid/tests/unittests/xpu/test_pow2_decay_with_linear_warmup_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_pow2_decay_with_linear_warmup_op_xpu.py index 620cdac1811e984ac08d489b9e523f789e6fb808..246511c9f6c2c182c8ca2c153e758d333030904f 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_pow2_decay_with_linear_warmup_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_pow2_decay_with_linear_warmup_op_xpu.py @@ -28,8 +28,9 @@ def gen_pow2_warmup_op_lr(warmup_steps, total_steps, base_lr, end_lr, place): main = paddle.static.Program() startup = paddle.static.Program() with paddle.static.program_guard(main, startup): - lr = pow2_decay_with_linear_warmup(warmup_steps, total_steps, base_lr, - end_lr) + lr = pow2_decay_with_linear_warmup( + warmup_steps, total_steps, base_lr, end_lr + ) exe = paddle.static.Executor(place) with paddle.static.scope_guard(paddle.static.Scope()): exe.run(startup) @@ -39,18 +40,21 @@ def gen_pow2_warmup_op_lr(warmup_steps, total_steps, base_lr, end_lr, place): class Pow2Warmup(LinearWarmup): - def __init__(self, warmup_steps, total_steps, base_lr, end_lr): assert total_steps > warmup_steps - lr_sch = PolynomialDecay(learning_rate=base_lr, - decay_steps=total_steps - warmup_steps, - end_lr=end_lr, - power=2) + lr_sch = PolynomialDecay( + learning_rate=base_lr, + decay_steps=total_steps - warmup_steps, + end_lr=end_lr, + power=2, + ) - super(Pow2Warmup, self).__init__(learning_rate=lr_sch, - warmup_steps=warmup_steps, - start_lr=0.0, - end_lr=base_lr) + super(Pow2Warmup, self).__init__( + learning_rate=lr_sch, + warmup_steps=warmup_steps, + start_lr=0.0, + end_lr=base_lr, + ) def gen_pow2_warmup_py_lr(warmup_steps, total_steps, base_lr, end_lr, place): @@ -62,7 +66,6 @@ def gen_pow2_warmup_py_lr(warmup_steps, total_steps, base_lr, end_lr, place): class TestPowWarmup(unittest.TestCase): - def setUp(self): paddle.enable_static() self.op_type = 'pow2_decay_with_linear_warmup' diff --git a/python/paddle/fluid/tests/unittests/xpu/test_prior_box_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_prior_box_op_xpu.py index 4561a54d65e79b90815eae421c738977c74239bb..acc05963ee4355c1a3d5807c7794718b449b603a 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_prior_box_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_prior_box_op_xpu.py @@ -22,19 +22,21 @@ sys.path.append("..") import paddle from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestPriorBoxOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'prior_box' self.use_dynamic_create_class = False class TestPriorBoxOp(XPUOpTest): - def setUp(self): self.op_type = "prior_box" self.use_xpu = True @@ -56,7 +58,7 @@ class XPUTestPriorBoxOp(XPUOpTestWrapper): 'min_max_aspect_ratios_order': self.min_max_aspect_ratios_order, 'step_w': self.step_w, 'step_h': self.step_h, - 'offset': self.offset + 'offset': self.offset, } if len(self.max_sizes) > 0: self.attrs['max_sizes'] = self.max_sizes @@ -95,11 +97,13 @@ class XPUTestPriorBoxOp(XPUOpTestWrapper): self.flip = True self.set_min_max_aspect_ratios_order() self.real_aspect_ratios = [1, 2.0, 1.0 / 2.0, 3.0, 1.0 / 3.0] - self.aspect_ratios = np.array(self.aspect_ratios, - dtype=np.float64).flatten() + self.aspect_ratios = np.array( + self.aspect_ratios, dtype=np.float64 + ).flatten() self.variances = [0.1, 0.1, 0.2, 0.2] - self.variances = np.array(self.variances, - dtype=np.float64).flatten() + self.variances = np.array( + self.variances, dtype=np.float64 + ).flatten() self.clip = True self.num_priors = len(self.real_aspect_ratios) * len(self.min_sizes) @@ -109,12 +113,22 @@ class XPUTestPriorBoxOp(XPUOpTestWrapper): def init_test_input(self): self.image = np.random.random( - (self.batch_size, self.image_channels, self.image_w, - self.image_h)).astype(self.dtype) + ( + self.batch_size, + self.image_channels, + self.image_w, + self.image_h, + ) + ).astype(self.dtype) self.input = np.random.random( - (self.batch_size, self.input_channels, self.layer_w, - self.layer_h)).astype(self.dtype) + ( + self.batch_size, + self.input_channels, + self.layer_w, + self.layer_h, + ) + ).astype(self.dtype) def init_test_output(self): out_dim = (self.layer_h, self.layer_w, self.num_priors, 4) @@ -135,72 +149,76 @@ class XPUTestPriorBoxOp(XPUOpTestWrapper): ar = self.real_aspect_ratios[r] c_w = min_size * math.sqrt(ar) / 2 c_h = (min_size / math.sqrt(ar)) / 2 - out_boxes[h, w, - idx, :] = [(c_x - c_w) / self.image_w, - (c_y - c_h) / self.image_h, - (c_x + c_w) / self.image_w, - (c_y + c_h) / self.image_h] + out_boxes[h, w, idx, :] = [ + (c_x - c_w) / self.image_w, + (c_y - c_h) / self.image_h, + (c_x + c_w) / self.image_w, + (c_y + c_h) / self.image_h, + ] idx += 1 if len(self.max_sizes) > 0: max_size = self.max_sizes[s] # second prior: aspect_ratio = 1, c_w = c_h = math.sqrt(min_size * max_size) / 2 - out_boxes[h, w, - idx, :] = [(c_x - c_w) / self.image_w, - (c_y - c_h) / self.image_h, - (c_x + c_w) / self.image_w, - (c_y + c_h) / self.image_h] + out_boxes[h, w, idx, :] = [ + (c_x - c_w) / self.image_w, + (c_y - c_h) / self.image_h, + (c_x + c_w) / self.image_w, + (c_y + c_h) / self.image_h, + ] idx += 1 else: - c_w = c_h = min_size / 2. - out_boxes[h, w, - idx, :] = [(c_x - c_w) / self.image_w, - (c_y - c_h) / self.image_h, - (c_x + c_w) / self.image_w, - (c_y + c_h) / self.image_h] + c_w = c_h = min_size / 2.0 + out_boxes[h, w, idx, :] = [ + (c_x - c_w) / self.image_w, + (c_y - c_h) / self.image_h, + (c_x + c_w) / self.image_w, + (c_y + c_h) / self.image_h, + ] idx += 1 if len(self.max_sizes) > 0: max_size = self.max_sizes[s] # second prior: aspect_ratio = 1, c_w = c_h = math.sqrt(min_size * max_size) / 2 - out_boxes[h, w, - idx, :] = [(c_x - c_w) / self.image_w, - (c_y - c_h) / self.image_h, - (c_x + c_w) / self.image_w, - (c_y + c_h) / self.image_h] + out_boxes[h, w, idx, :] = [ + (c_x - c_w) / self.image_w, + (c_y - c_h) / self.image_h, + (c_x + c_w) / self.image_w, + (c_y + c_h) / self.image_h, + ] idx += 1 # rest of priors for r in range(len(self.real_aspect_ratios)): ar = self.real_aspect_ratios[r] - if abs(ar - 1.) < 1e-6: + if abs(ar - 1.0) < 1e-6: continue c_w = min_size * math.sqrt(ar) / 2 c_h = (min_size / math.sqrt(ar)) / 2 - out_boxes[h, w, - idx, :] = [(c_x - c_w) / self.image_w, - (c_y - c_h) / self.image_h, - (c_x + c_w) / self.image_w, - (c_y + c_h) / self.image_h] + out_boxes[h, w, idx, :] = [ + (c_x - c_w) / self.image_w, + (c_y - c_h) / self.image_h, + (c_x + c_w) / self.image_w, + (c_y + c_h) / self.image_h, + ] idx += 1 # clip the prior's coordidate such that it is within[0, 1] if self.clip: out_boxes = np.clip(out_boxes, 0.0, 1.0) # set the variance. - out_var = np.tile(self.variances, - (self.layer_h, self.layer_w, self.num_priors, 1)) + out_var = np.tile( + self.variances, (self.layer_h, self.layer_w, self.num_priors, 1) + ) self.out_boxes = out_boxes.astype(self.dtype) self.out_var = out_var.astype(self.dtype) class TestPriorBoxOpWithoutMaxSize(TestPriorBoxOp): - def set_max_sizes(self): self.max_sizes = [] class TestPriorBoxOpWithSpecifiedOutOrder(TestPriorBoxOp): - def set_min_max_aspect_ratios_order(self): self.min_max_aspect_ratios_order = True diff --git a/python/paddle/fluid/tests/unittests/xpu/test_range_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_range_xpu.py index 3135255bea2859c73a4da003cfd4f7e7e795d448..02893acc0e2d1f2862f0adf5a76ea4b01d82d4cb 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_range_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_range_xpu.py @@ -19,19 +19,21 @@ import sys sys.path.append("..") from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestRangeOp(XPUOpTestWrapper): - def __init__(self): self.op_name = "range" self.use_dynamic_create_class = False class TestRangeOp(XPUOpTest): - def setUp(self): self.set_xpu() self.op_type = "range" @@ -40,13 +42,13 @@ class XPUTestRangeOp(XPUOpTestWrapper): self.inputs = { 'Start': np.array([self.case[0]]).astype(self.dtype), 'End': np.array([self.case[1]]).astype(self.dtype), - 'Step': np.array([self.case[2]]).astype(self.dtype) + 'Step': np.array([self.case[2]]).astype(self.dtype), } self.outputs = { - 'Out': - np.arange(self.case[0], self.case[1], - self.case[2]).astype(self.dtype) + 'Out': np.arange( + self.case[0], self.case[1], self.case[2] + ).astype(self.dtype) } def set_xpu(self): @@ -63,27 +65,22 @@ class XPUTestRangeOp(XPUOpTestWrapper): self.check_output_with_place(place, check_dygraph=False) class TestRangeOpCase0(TestRangeOp): - def init_config(self): self.case = (0, 5, 1) class TestRangeOpCase1(TestRangeOp): - def init_config(self): self.case = (0, 5, 2) class TestRangeOpCase2(TestRangeOp): - def init_config(self): self.case = (10, 1, -2) class TestRangeOpCase3(TestRangeOp): - def init_config(self): self.case = (-1, -10, -2) class TestRangeOpCase4(TestRangeOp): - def init_config(self): self.case = (10, -10, -11) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_reduce_all_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_reduce_all_op_xpu.py index 1d70b5fb3cafe3c9a42292b0fa16ecda83f428c0..df485b49ac1ea1da5638ada935de4a7dc541022e 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_reduce_all_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_reduce_all_op_xpu.py @@ -20,18 +20,20 @@ sys.path.append("..") import paddle from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestReduceAllOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'reduce_all' class XPUTestReduceAllBase(XPUOpTest): - def setUp(self): self.place = paddle.XPUPlace(0) self.set_case() @@ -42,11 +44,12 @@ class XPUTestReduceAllOp(XPUOpTestWrapper): 'use_xpu': True, 'reduce_all': True, 'keep_dim': True, - 'dim': (3, 5, 4) + 'dim': (3, 5, 4), } self.inputs = { - 'X': - np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool") + 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( + "bool" + ) } self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])} @@ -57,14 +60,13 @@ class XPUTestReduceAllOp(XPUOpTestWrapper): pass class XPUTestReduceAllCase1(XPUTestReduceAllBase): - def set_case(self): self.op_type = 'reduce_all' self.attrs = { 'use_xpu': True, 'reduce_all': True, 'keep_dim': True, - 'dim': [1] + 'dim': [1], } self.inputs = { 'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool") @@ -72,23 +74,22 @@ class XPUTestReduceAllOp(XPUOpTestWrapper): self.outputs = {'Out': self.inputs['X'].all()} class XPUTestReduceAllCase2(XPUTestReduceAllBase): - def set_case(self): self.op_type = 'reduce_all' self.attrs = { 'use_xpu': True, 'reduce_all': True, 'keep_dim': False, - 'dim': (3, 6) + 'dim': (3, 6), } self.inputs = { - 'X': - np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool") + 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( + "bool" + ) } self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])} class XPUTestReduceAllCase3(XPUTestReduceAllBase): - def set_case(self): self.op_type = 'reduce_all' self.attrs = { diff --git a/python/paddle/fluid/tests/unittests/xpu/test_reduce_amax_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_reduce_amax_op_xpu.py index 05b9edf7d8ae327b381dd3b39e697d1474345fc1..4394340aa1c803836763f344fdca965726287140 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_reduce_amax_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_reduce_amax_op_xpu.py @@ -20,18 +20,20 @@ sys.path.append("..") import paddle from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestReduceAmaxOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'reduce_amax' class XPUTestReduceAmaxBase(XPUOpTest): - def setUp(self): self.place = paddle.XPUPlace(0) self.set_case() @@ -39,7 +41,7 @@ class XPUTestReduceAmaxOp(XPUOpTestWrapper): def set_case(self): self.op_type = 'reduce_amax' self.shape = (20, 10) - self.attrs = {'use_xpu': True, 'keep_dim': False, 'dim': (1, )} + self.attrs = {'use_xpu': True, 'keep_dim': False, 'dim': (1,)} self.inputs = { 'X': np.random.randint(0, 100, self.shape).astype("float32") @@ -47,10 +49,11 @@ class XPUTestReduceAmaxOp(XPUOpTestWrapper): expect_intput = self.inputs['X'] self.outputs = { - 'Out': - np.amax(expect_intput, - axis=self.attrs['dim'], - keepdims=self.attrs['keep_dim']) + 'Out': np.amax( + expect_intput, + axis=self.attrs['dim'], + keepdims=self.attrs['keep_dim'], + ) } def test_check_output(self): diff --git a/python/paddle/fluid/tests/unittests/xpu/test_reduce_amin_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_reduce_amin_op_xpu.py index 4e164840126c2c6c6a8c7c8bc5bf2b9d4dfc6ae7..77c45ce06424f513a26fbbdb4c3eb81263c87f5e 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_reduce_amin_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_reduce_amin_op_xpu.py @@ -20,18 +20,20 @@ sys.path.append("..") import paddle from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestReduceAmaxOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'reduce_amin' class XPUTestReduceAmaxBase(XPUOpTest): - def setUp(self): self.place = paddle.XPUPlace(0) self.set_case() @@ -39,7 +41,7 @@ class XPUTestReduceAmaxOp(XPUOpTestWrapper): def set_case(self): self.op_type = 'reduce_amin' self.shape = (20, 10) - self.attrs = {'use_xpu': True, 'keep_dim': False, 'dim': (1, )} + self.attrs = {'use_xpu': True, 'keep_dim': False, 'dim': (1,)} self.inputs = { 'X': np.random.randint(0, 100, self.shape).astype("float32") @@ -47,10 +49,11 @@ class XPUTestReduceAmaxOp(XPUOpTestWrapper): expect_intput = self.inputs['X'] self.outputs = { - 'Out': - np.amin(expect_intput, - axis=self.attrs['dim'], - keepdims=self.attrs['keep_dim']) + 'Out': np.amin( + expect_intput, + axis=self.attrs['dim'], + keepdims=self.attrs['keep_dim'], + ) } def test_check_output(self): diff --git a/python/paddle/fluid/tests/unittests/xpu/test_reduce_any_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_reduce_any_op_xpu.py index 116eea5bb105aafe2643ad4cd40db1afac323f22..032d138558d91b8736bdd7b798d3ba905e0c3f16 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_reduce_any_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_reduce_any_op_xpu.py @@ -20,18 +20,20 @@ sys.path.append("..") import paddle from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestReduceAnyOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'reduce_any' class XPUTestReduceAnyBase(XPUOpTest): - def setUp(self): self.place = paddle.XPUPlace(0) self.set_case() @@ -42,11 +44,12 @@ class XPUTestReduceAnyOp(XPUOpTestWrapper): 'use_xpu': True, 'reduce_all': True, 'keep_dim': True, - 'dim': (3, 5, 4) + 'dim': (3, 5, 4), } self.inputs = { - 'X': - np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool") + 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( + "bool" + ) } self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])} @@ -57,7 +60,6 @@ class XPUTestReduceAnyOp(XPUOpTestWrapper): pass class XPUTestReduceAnyCase1(XPUTestReduceAnyBase): - def set_case(self): self.op_type = 'reduce_any' self.attrs = { @@ -72,18 +74,18 @@ class XPUTestReduceAnyOp(XPUOpTestWrapper): self.outputs = {'Out': self.inputs['X'].any(axis=1)} class XPUTestReduceAnyCase2(XPUTestReduceAnyBase): - def set_case(self): self.op_type = 'reduce_any' self.attrs = { 'use_xpu': True, 'reduce_all': True, 'keep_dim': False, - 'dim': (3, 6) + 'dim': (3, 6), } self.inputs = { - 'X': - np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype("bool") + 'X': np.random.randint(0, 2, (2, 5, 3, 2, 2, 3, 4, 2)).astype( + "bool" + ) } self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])} diff --git a/python/paddle/fluid/tests/unittests/xpu/test_reduce_max_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_reduce_max_op_xpu.py index fd6b96d7ed7e1348febf4feb2716f29fe2bd3496..ee0922110e5bc4cb8f1de1527a3ec07ead6c5b58 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_reduce_max_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_reduce_max_op_xpu.py @@ -20,18 +20,20 @@ sys.path.append("..") import paddle from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestReduceMaxOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'reduce_max' class XPUTestReduceMaxBase(XPUOpTest): - def setUp(self): self.place = paddle.XPUPlace(0) self.init_case() @@ -42,21 +44,21 @@ class XPUTestReduceMaxOp(XPUOpTestWrapper): self.attrs = { 'use_xpu': True, 'reduce_all': self.reduce_all, - 'keep_dim': self.keep_dim + 'keep_dim': self.keep_dim, } self.inputs = {'X': np.random.random(self.shape).astype("float32")} if self.attrs['reduce_all']: self.outputs = {'Out': self.inputs['X'].max()} else: self.outputs = { - 'Out': - self.inputs['X'].max(axis=self.axis, - keepdims=self.attrs['keep_dim']) + 'Out': self.inputs['X'].max( + axis=self.axis, keepdims=self.attrs['keep_dim'] + ) } def init_case(self): self.shape = (5, 6, 10) - self.axis = (0, ) + self.axis = (0,) self.reduce_all = False self.keep_dim = False @@ -67,10 +69,9 @@ class XPUTestReduceMaxOp(XPUOpTestWrapper): self.check_grad_with_place(self.place, ['X'], 'Out') class XPUTestReduceMaxCase1(XPUTestReduceMaxBase): - def init_case(self): self.shape = (5, 6, 10) - self.axis = (0, ) + self.axis = (0,) self.reduce_all = False self.keep_dim = True diff --git a/python/paddle/fluid/tests/unittests/xpu/test_reduce_mean_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_reduce_mean_op_xpu.py index 0b1b9070025b3642539fefc38842b710d045bac6..41efec0c29b0e7685bd7a8a5fb082a5bc4807126 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_reduce_mean_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_reduce_mean_op_xpu.py @@ -18,20 +18,22 @@ import sys sys.path.append("..") from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) import paddle paddle.enable_static() class XPUTestMeanOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'reduce_mean' self.use_dynamic_create_class = False class TestMeanOp(XPUOpTest): - def setUp(self): self.dtype = self.in_type self.place = paddle.XPUPlace(0) @@ -47,7 +49,6 @@ class XPUTestMeanOp(XPUOpTestWrapper): self.check_grad_with_place(self.place, ['X'], 'Out') class TestMeanOp5D(TestMeanOp): - def setUp(self): super().setUp() self.inputs = { @@ -57,7 +58,6 @@ class XPUTestMeanOp(XPUOpTestWrapper): self.outputs = {'Out': self.inputs['X'].mean(axis=0)} class TestMeanOp6D(TestMeanOp): - def setUp(self): super().setUp() self.inputs = { @@ -67,25 +67,23 @@ class XPUTestMeanOp(XPUOpTestWrapper): self.outputs = {'Out': self.inputs['X'].mean(axis=0)} class TestMeanOp8D(TestMeanOp): - def setUp(self): super().setUp() self.inputs = { - 'X': np.random.random( - (1, 3, 1, 2, 1, 4, 3, 10)).astype(self.dtype) + 'X': np.random.random((1, 3, 1, 2, 1, 4, 3, 10)).astype( + self.dtype + ) } self.attrs = {'dim': (0, 3), 'use_xpu': True} self.outputs = {'Out': self.inputs['X'].mean(axis=(0, 3))} class XPUTestReduce(XPUOpTestWrapper): - def __init__(self): self.op_name = 'reduce_mean' self.use_dynamic_create_class = False class Test1DReduce(XPUOpTest): - def setUp(self): self.dtype = self.in_type self.place = paddle.XPUPlace(0) @@ -105,7 +103,6 @@ class XPUTestReduce(XPUOpTestWrapper): self.check_grad_with_place(self.place, ['X'], 'Out') class Test2DReduce0(Test1DReduce): - def setUp(self): super().setUp() self.attrs = {'dim': [0], 'use_xpu': True} @@ -113,7 +110,6 @@ class XPUTestReduce(XPUOpTestWrapper): self.outputs = {'Out': self.inputs['X'].mean(axis=0)} class Test2DReduce1(Test1DReduce): - def setUp(self): super().setUp() self.attrs = {'dim': [1], 'use_xpu': True} @@ -123,7 +119,6 @@ class XPUTestReduce(XPUOpTestWrapper): } class Test3DReduce0(Test1DReduce): - def setUp(self): super().setUp() self.attrs = {'dim': [1], 'use_xpu': True} @@ -133,7 +128,6 @@ class XPUTestReduce(XPUOpTestWrapper): } class Test3DReduce1(Test1DReduce): - def setUp(self): super().setUp() self.attrs = {'dim': [2], 'use_xpu': True} @@ -143,7 +137,6 @@ class XPUTestReduce(XPUOpTestWrapper): } class Test3DReduce2(Test1DReduce): - def setUp(self): super().setUp() self.attrs = {'dim': [-2], 'use_xpu': True} @@ -153,7 +146,6 @@ class XPUTestReduce(XPUOpTestWrapper): } class Test3DReduce3(Test1DReduce): - def setUp(self): super().setUp() self.attrs = {'dim': [1, 2], 'use_xpu': True} @@ -163,7 +155,6 @@ class XPUTestReduce(XPUOpTestWrapper): } class Test6DReduce(Test1DReduce): - def setUp(self): super().setUp() self.attrs = {'dim': [1, -1], 'use_xpu': True} @@ -175,30 +166,31 @@ class XPUTestReduce(XPUOpTestWrapper): } class TestKeepDimReduce(Test1DReduce): - def setUp(self): super().setUp() self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = {'dim': [1], 'keep_dim': True, 'use_xpu': True} self.outputs = { - 'Out': - self.inputs['X'].mean(axis=tuple(self.attrs['dim']), - keepdims=self.attrs['keep_dim']) + 'Out': self.inputs['X'].mean( + axis=tuple(self.attrs['dim']), + keepdims=self.attrs['keep_dim'], + ) } class TestKeepDim8DReduce(Test1DReduce): - def setUp(self): super().setUp() self.inputs = { - 'X': np.random.random( - (2, 5, 3, 2, 2, 3, 4, 2)).astype(self.dtype) + 'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype( + self.dtype + ) } self.attrs = {'dim': (3, 4, 5), 'keep_dim': True, 'use_xpu': True} self.outputs = { - 'Out': - self.inputs['X'].mean(axis=tuple(self.attrs['dim']), - keepdims=self.attrs['keep_dim']) + 'Out': self.inputs['X'].mean( + axis=tuple(self.attrs['dim']), + keepdims=self.attrs['keep_dim'], + ) } diff --git a/python/paddle/fluid/tests/unittests/xpu/test_reduce_min_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_reduce_min_op_xpu.py index 576f550f2397a776244687e17c78b7d2f3e871d9..6639071b3b0d1901639b6b1b4c90b17779aefa28 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_reduce_min_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_reduce_min_op_xpu.py @@ -20,18 +20,20 @@ sys.path.append("..") import paddle from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestReduceMinOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'reduce_min' class XPUTestReduceMinBase(XPUOpTest): - def setUp(self): self.place = paddle.XPUPlace(0) self.init_case() @@ -42,21 +44,21 @@ class XPUTestReduceMinOp(XPUOpTestWrapper): self.attrs = { 'use_xpu': True, 'reduce_all': self.reduce_all, - 'keep_dim': self.keep_dim + 'keep_dim': self.keep_dim, } self.inputs = {'X': np.random.random(self.shape).astype("float32")} if self.attrs['reduce_all']: self.outputs = {'Out': self.inputs['X'].min()} else: self.outputs = { - 'Out': - self.inputs['X'].min(axis=self.axis, - keepdims=self.attrs['keep_dim']) + 'Out': self.inputs['X'].min( + axis=self.axis, keepdims=self.attrs['keep_dim'] + ) } def init_case(self): self.shape = (5, 6, 10) - self.axis = (0, ) + self.axis = (0,) self.reduce_all = False self.keep_dim = False @@ -67,10 +69,9 @@ class XPUTestReduceMinOp(XPUOpTestWrapper): pass class XPUTestReduceMinCase1(XPUTestReduceMinBase): - def init_case(self): self.shape = (5, 6, 10) - self.axis = (0, ) + self.axis = (0,) self.reduce_all = False self.keep_dim = True diff --git a/python/paddle/fluid/tests/unittests/xpu/test_reduce_prod_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_reduce_prod_op_xpu.py index 17bd2356eff10090296f36bc293038159a335dcb..ad2cb5143b949463d8c51a7c7fd3137423fbccce 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_reduce_prod_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_reduce_prod_op_xpu.py @@ -21,19 +21,21 @@ sys.path.append("..") import paddle from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestReduceProdOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'reduce_prod' self.use_dynamic_create_class = False class TestXPUReduceProdOp(XPUOpTest): - def setUp(self): self.place = paddle.XPUPlace(0) self.init_dtype() @@ -45,21 +47,21 @@ class XPUTestReduceProdOP(XPUOpTestWrapper): self.attrs = { 'dim': self.axis, 'keep_dim': self.keep_dim, - 'reduce_all': self.reduce_all + 'reduce_all': self.reduce_all, } self.inputs = {'X': np.random.random(self.shape).astype(self.dtype)} if self.attrs['reduce_all']: self.outputs = {'Out': self.inputs['X'].prod()} else: self.outputs = { - 'Out': - self.inputs['X'].prod(axis=self.axis, - keepdims=self.attrs['keep_dim']) + 'Out': self.inputs['X'].prod( + axis=self.axis, keepdims=self.attrs['keep_dim'] + ) } def initTestCase(self): self.shape = (5, 6, 10) - self.axis = (0, ) + self.axis = (0,) def init_dtype(self): self.dtype = self.in_type @@ -71,84 +73,71 @@ class XPUTestReduceProdOP(XPUOpTestWrapper): self.check_grad_with_place(self.place, ['X'], 'Out') class TestProdOp5D(TestXPUReduceProdOp): - def initTestCase(self): self.shape = (1, 2, 5, 6, 10) - self.axis = (0, ) + self.axis = (0,) class TestProdOp6D(TestXPUReduceProdOp): - def initTestCase(self): self.shape = (1, 1, 2, 5, 6, 10) - self.axis = (0, ) + self.axis = (0,) class TestProdOp8D(TestXPUReduceProdOp): - def initTestCase(self): self.shape = (1, 3, 1, 2, 1, 4, 3, 10) self.axis = (0, 3) class Test1DReduce(TestXPUReduceProdOp): - def initTestCase(self): self.shape = 120 - self.axis = (0, ) + self.axis = (0,) class Test2DReduce0(TestXPUReduceProdOp): - def initTestCase(self): self.shape = (20, 10) - self.axis = (0, ) + self.axis = (0,) class Test2DReduce1(TestXPUReduceProdOp): - def initTestCase(self): self.shape = (20, 10) - self.axis = (1, ) + self.axis = (1,) class Test3DReduce0(TestXPUReduceProdOp): - def initTestCase(self): self.shape = (5, 6, 7) - self.axis = (1, ) + self.axis = (1,) class Test3DReduce1(TestXPUReduceProdOp): - def initTestCase(self): self.shape = (5, 6, 7) - self.axis = (2, ) + self.axis = (2,) class Test3DReduce2(TestXPUReduceProdOp): - def initTestCase(self): self.shape = (5, 6, 7) - self.axis = (-2, ) + self.axis = (-2,) class Test3DReduce3(TestXPUReduceProdOp): - def initTestCase(self): self.shape = (5, 6, 7) self.axis = (1, 2) class TestKeepDimReduce(TestXPUReduceProdOp): - def initTestCase(self): self.shape = (5, 6, 10) - self.axis = (1, ) + self.axis = (1,) self.keep_dim = True class TestKeepDim8DReduce(TestXPUReduceProdOp): - def initTestCase(self): self.shape = (2, 5, 3, 2, 2, 3, 4, 2) self.axis = (3, 4, 5) self.keep_dim = True class TestReduceAll(TestXPUReduceProdOp): - def initTestCase(self): self.shape = (5, 6, 2, 10) - self.axis = (0, ) + self.axis = (0,) self.reduce_all = True diff --git a/python/paddle/fluid/tests/unittests/xpu/test_reduce_sum_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_reduce_sum_op_xpu.py index f3e0287d47a58fe78e027c669b8cd4f36b97b57f..dc377df7ab1414ea620472fa360273e726f44695 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_reduce_sum_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_reduce_sum_op_xpu.py @@ -20,18 +20,20 @@ sys.path.append("..") import paddle from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestReduceSumOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'reduce_sum' class XPUTestReduceSumBase(XPUOpTest): - def setUp(self): self.place = paddle.XPUPlace(0) self.init_case() @@ -42,21 +44,21 @@ class XPUTestReduceSumOp(XPUOpTestWrapper): self.attrs = { 'use_xpu': True, 'reduce_all': self.reduce_all, - 'keep_dim': self.keep_dim + 'keep_dim': self.keep_dim, } self.inputs = {'X': np.random.random(self.shape).astype("float32")} if self.attrs['reduce_all']: self.outputs = {'Out': self.inputs['X'].sum()} else: self.outputs = { - 'Out': - self.inputs['X'].sum(axis=self.axis, - keepdims=self.attrs['keep_dim']) + 'Out': self.inputs['X'].sum( + axis=self.axis, keepdims=self.attrs['keep_dim'] + ) } def init_case(self): self.shape = (5, 6, 10) - self.axis = (0, ) + self.axis = (0,) self.reduce_all = False self.keep_dim = False @@ -67,10 +69,9 @@ class XPUTestReduceSumOp(XPUOpTestWrapper): self.check_grad_with_place(self.place, ['X'], 'Out') class XPUTestReduceSumCase1(XPUTestReduceSumBase): - def init_case(self): self.shape = (5, 6, 10) - self.axis = (0, ) + self.axis = (0,) self.reduce_all = False self.keep_dim = True diff --git a/python/paddle/fluid/tests/unittests/xpu/test_refactor_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_refactor_op_xpu.py index a338f04091d17d45da3db82284a76e65202888c2..aa87579755064fb6d5dca62c69aeb02de985094d 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_refactor_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_refactor_op_xpu.py @@ -23,7 +23,11 @@ from paddle.fluid import core from op_test import OpTest from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() @@ -39,7 +43,6 @@ def huber_loss_forward(val, delta): # 1.动态生成不同参数的测试case,wrapper类中必须实现dynamic_create_class方法 # self.use_dynamic_create_class置为True class XPUTestArgsortOp1(XPUOpTestWrapper): - def __init__(self): self.op_name = 'argsort' self.use_dynamic_create_class = True @@ -49,14 +52,14 @@ class XPUTestArgsortOp1(XPUOpTestWrapper): classes = [] for descending in [True, False]: for axis in [0, 1, 2, -1, -2]: - class_name = 'XPUTestArgsortOp_axis_' + str(axis) + '_' + str( - descending) + class_name = ( + 'XPUTestArgsortOp_axis_' + str(axis) + '_' + str(descending) + ) attr_dict = {'init_axis': axis, 'init_descending': descending} classes.append([class_name, attr_dict]) return base_class, classes class TestArgsortOp(XPUOpTest): - def setUp(self): self.op_type = "argsort" self.place = paddle.XPUPlace(0) @@ -65,16 +68,18 @@ class XPUTestArgsortOp1(XPUOpTestWrapper): self.input_shape = (2, 2, 2, 3, 3) self.axis = -1 if not hasattr(self, 'init_axis') else self.init_axis - self.descending = False if not hasattr( - self, 'init_descending') else self.init_descending + self.descending = ( + False + if not hasattr(self, 'init_descending') + else self.init_descending + ) if self.in_type == np.float32: self.x = np.random.random(self.input_shape).astype(self.dtype) else: - self.x = np.random.randint(low=-1000, - high=1000, - size=self.input_shape).astype( - self.dtype) + self.x = np.random.randint( + low=-1000, high=1000, size=self.input_shape + ).astype(self.dtype) self.inputs = {"X": self.x} self.attrs = {"axis": self.axis, "descending": self.descending} self.get_output() @@ -84,13 +89,15 @@ class XPUTestArgsortOp1(XPUOpTestWrapper): if self.descending: self.indices = np.flip( np.argsort(self.x, kind='heapsort', axis=self.axis), - self.axis) + self.axis, + ) self.sorted_x = np.flip( - np.sort(self.x, kind='heapsort', axis=self.axis), self.axis) + np.sort(self.x, kind='heapsort', axis=self.axis), self.axis + ) else: - self.indices = np.argsort(self.x, - kind='heapsort', - axis=self.axis) + self.indices = np.argsort( + self.x, kind='heapsort', axis=self.axis + ) self.sorted_x = np.sort(self.x, kind='heapsort', axis=self.axis) def test_check_output(self): @@ -99,13 +106,11 @@ class XPUTestArgsortOp1(XPUOpTestWrapper): # 2. 为不同参数的测试case定义一个测试类,self.use_dynamic_create_class需要置为False class XPUTestArgsortOp2(XPUOpTestWrapper): - def __init__(self): self.op_name = 'argsort' self.use_dynamic_create_class = False class TestArgsortOp(XPUOpTest): - def setUp(self): self.op_type = "argsort" self.place = paddle.XPUPlace(0) @@ -119,10 +124,9 @@ class XPUTestArgsortOp2(XPUOpTestWrapper): if self.in_type == np.float32: self.x = np.random.random(self.input_shape).astype(self.dtype) else: - self.x = np.random.randint(low=-1000, - high=1000, - size=self.input_shape).astype( - self.dtype) + self.x = np.random.randint( + low=-1000, high=1000, size=self.input_shape + ).astype(self.dtype) self.inputs = {"X": self.x} self.attrs = {"axis": self.axis, "descending": self.descending} self.get_output() @@ -132,13 +136,15 @@ class XPUTestArgsortOp2(XPUOpTestWrapper): if self.descending: self.indices = np.flip( np.argsort(self.x, kind='heapsort', axis=self.axis), - self.axis) + self.axis, + ) self.sorted_x = np.flip( - np.sort(self.x, kind='heapsort', axis=self.axis), self.axis) + np.sort(self.x, kind='heapsort', axis=self.axis), self.axis + ) else: - self.indices = np.argsort(self.x, - kind='heapsort', - axis=self.axis) + self.indices = np.argsort( + self.x, kind='heapsort', axis=self.axis + ) self.sorted_x = np.sort(self.x, kind='heapsort', axis=self.axis) def init_inputshape(self): @@ -157,57 +163,46 @@ class XPUTestArgsortOp2(XPUOpTestWrapper): self.descending = False class TestArgsortOpAxis0XPU(TestArgsortOp): - def init_axis(self): self.axis = 0 class TestArgsortOpAxis1XPU(TestArgsortOp): - def init_axis(self): self.axis = 1 class TestArgsortOpAxis2XPU(TestArgsortOp): - def init_axis(self): self.axis = 2 class TestArgsortOpAxisNeg1XPU(TestArgsortOp): - def init_axis(self): self.axis = -1 class TestArgsortOpAxisNeg2XPU(TestArgsortOp): - def init_axis(self): self.axis = -2 class TestArgsortOpDescendingAxisXPU(TestArgsortOp): - def init_direction(self): self.descending = True class TestArgsortOpDescendingAxis0XPU(TestArgsortOpAxis0XPU): - def init_direction(self): self.descending = True class TestArgsortOpDescendingAxis1XPU(TestArgsortOpAxis1XPU): - def init_direction(self): self.descending = True class TestArgsortOpDescendingAxis2XPU(TestArgsortOpAxis2XPU): - def init_direction(self): self.descending = True class TestArgsortOpDescendingAxisNeg1XPU(TestArgsortOpAxisNeg1XPU): - def init_direction(self): self.descending = True class TestArgsortOpDescendingAxisNeg2XPU(TestArgsortOpAxisNeg2XPU): - def init_direction(self): self.descending = True @@ -219,13 +214,11 @@ for stype in support_types: class XPUTestHuberLossOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'huber_loss' self.use_dynamic_create_class = False class TestHuberLossOp(XPUOpTest): - def setUp(self): self.op_type = 'huber_loss' self.place = paddle.XPUPlace(0) @@ -237,11 +230,11 @@ class XPUTestHuberLossOp(XPUOpTestWrapper): def set_inputs(self): shape = self.set_shape() - x = np.random.uniform(0, 1., shape).astype(self.dtype) - y = np.random.uniform(0, 1., shape).astype(self.dtype) + x = np.random.uniform(0, 1.0, shape).astype(self.dtype) + y = np.random.uniform(0, 1.0, shape).astype(self.dtype) self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(x), - 'Y': OpTest.np_dtype_to_fluid_dtype(y) + 'Y': OpTest.np_dtype_to_fluid_dtype(y), } def set_attrs(self): @@ -251,8 +244,9 @@ class XPUTestHuberLossOp(XPUOpTestWrapper): delta = self.attrs['delta'] shape = self.set_shape() residual = self.inputs['Y'] - self.inputs['X'] - loss = np.vectorize(huber_loss_forward)(residual, - delta).astype(self.dtype) + loss = np.vectorize(huber_loss_forward)(residual, delta).astype( + self.dtype + ) self.outputs = {'Residual': residual, 'Out': loss.reshape(shape)} def set_shape(self): @@ -265,27 +259,24 @@ class XPUTestHuberLossOp(XPUOpTestWrapper): self.check_grad_with_place(self.place, ['X', 'Y'], 'Out') def test_check_grad_ingore_x(self): - self.check_grad_with_place(self.place, ['Y'], - 'Out', - no_grad_set=set("residual")) + self.check_grad_with_place( + self.place, ['Y'], 'Out', no_grad_set=set("residual") + ) def test_check_grad_ingore_y(self): - self.check_grad_with_place(self.place, ['X'], - 'Out', - no_grad_set=set('residual')) + self.check_grad_with_place( + self.place, ['X'], 'Out', no_grad_set=set('residual') + ) class TestHuberLossOp1(TestHuberLossOp): - def set_shape(self): - return (640) + return 640 class TestHuberLossOp2(TestHuberLossOp): - def set_shape(self): return (10, 10) class TestHuberLossOp3(TestHuberLossOp): - def set_shape(self): return (10, 10, 1) @@ -293,10 +284,12 @@ class XPUTestHuberLossOp(XPUOpTestWrapper): support_types = get_xpu_op_support_types('huber_loss') for stype in support_types: create_test_class(globals(), XPUTestHuberLossOp, stype) - create_test_class(globals(), - XPUTestHuberLossOp, - stype, - ignore_device_version=[core.XPUVersion.XPU1]) + create_test_class( + globals(), + XPUTestHuberLossOp, + stype, + ignore_device_version=[core.XPUVersion.XPU1], + ) if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/xpu/test_reshape2_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_reshape2_op_xpu.py index 534fd17bf8c315d35a1fd187bbdfbaf3605842a3..987aaf54cfa8692de025b6e0734686585c18df61 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_reshape2_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_reshape2_op_xpu.py @@ -21,20 +21,22 @@ sys.path.append("..") import paddle from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestReshapeOp(XPUOpTestWrapper): - def __init__(self): self.op_name = "reshape2" self.use_dynamic_create_class = False # situation 1: have shape( list, no tensor), no actual shape(Tensor) class TestReshapeOp(XPUOpTest): - def setUp(self): self.init_data() self.op_type = "reshape2" @@ -55,7 +57,7 @@ class XPUTestReshapeOp(XPUOpTestWrapper): def init_test_output(self): self.outputs = { "Out": self.inputs["X"].reshape(self.infered_shape), - 'XShape': np.random.random(self.ori_shape).astype(self.dtype) + 'XShape': np.random.random(self.ori_shape).astype(self.dtype), } def init_attrs(self): @@ -72,14 +74,12 @@ class XPUTestReshapeOp(XPUOpTestWrapper): self.check_grad_with_place(place, ["X"], "Out") class TestReshapeOpDimInfer1(TestReshapeOp): - def init_data(self): self.ori_shape = (5, 25) self.new_shape = (5, -1, 5) self.infered_shape = (5, -1, 5) class TestReshapeOpDimInfer2(TestReshapeOp): - def init_data(self): self.ori_shape = (10, 2, 6) self.new_shape = (10, 0, 3, -1) @@ -87,7 +87,6 @@ class XPUTestReshapeOp(XPUOpTestWrapper): # situation 2: have shape(list, no tensor), have actual shape(Tensor) class TestReshapeOpWithInputShape(TestReshapeOp): - def init_data(self): self.ori_shape = (6, 20) self.new_shape = (0, -1, 20) @@ -96,18 +95,17 @@ class XPUTestReshapeOp(XPUOpTestWrapper): def init_test_input(self): self.inputs = { "X": np.random.random(self.ori_shape).astype(self.dtype), - "Shape": np.array(self.actual_shape, dtype="int32") + "Shape": np.array(self.actual_shape, dtype="int32"), } def init_test_output(self): self.outputs = { "Out": self.inputs["X"].reshape(self.actual_shape), - 'XShape': np.random.random(self.ori_shape).astype(self.dtype) + 'XShape': np.random.random(self.ori_shape).astype(self.dtype), } # Situation 3: have shape(list, have tensor), no actual shape(Tensor) class TestReshapeOp_attr_ShapeTensor(TestReshapeOp): - def init_data(self): self.ori_shape = (4, 25) self.new_shape = (10, 10) @@ -117,29 +115,30 @@ class XPUTestReshapeOp(XPUOpTestWrapper): def init_test_input(self): shape_tensor = [] for index, ele in enumerate(self.new_shape): - shape_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + shape_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = { "X": np.random.random(self.ori_shape).astype(self.dtype), - 'ShapeTensor': shape_tensor + 'ShapeTensor': shape_tensor, } def init_attrs(self): self.attrs = {'shape': self.shape, "use_xpu": True} - class TestReshapeOpDimInfer1_attr_ShapeTensor(TestReshapeOp_attr_ShapeTensor - ): - + class TestReshapeOpDimInfer1_attr_ShapeTensor( + TestReshapeOp_attr_ShapeTensor + ): def init_data(self): self.ori_shape = (5, 20) self.new_shape = (5, -1, 20) self.infered_shape = (5, -1, 20) self.shape = (5, -1, -1) - class TestReshapeOpDimInfer2_attr_ShapeTensor(TestReshapeOp_attr_ShapeTensor - ): - + class TestReshapeOpDimInfer2_attr_ShapeTensor( + TestReshapeOp_attr_ShapeTensor + ): def init_data(self): self.ori_shape = (10, 2, 6) self.new_shape = (10, 0, 3, -1) @@ -148,7 +147,6 @@ class XPUTestReshapeOp(XPUOpTestWrapper): # Situation 4: have shape(Tensor), no actual shape(Tensor) class TestReshapeOp_attr_OnlyShape(TestReshapeOp): - def init_data(self): self.ori_shape = (4, 25) self.new_shape = (10, 10) @@ -157,14 +155,13 @@ class XPUTestReshapeOp(XPUOpTestWrapper): def init_test_input(self): self.inputs = { "X": np.random.random(self.ori_shape).astype(self.dtype), - "Shape": np.array(self.new_shape, dtype="int32") + "Shape": np.array(self.new_shape, dtype="int32"), } def init_attrs(self): self.attrs = {"use_xpu": True} class TestReshapeOpDimInfer1_attr_OnlyShape(TestReshapeOp_attr_OnlyShape): - def init_data(self): self.ori_shape = (5, 20) self.new_shape = (5, -1, 10) @@ -172,7 +169,6 @@ class XPUTestReshapeOp(XPUOpTestWrapper): self.shape = (5, -1, -1) class TestReshapeOpDimInfer2_attr_OnlyShape(TestReshapeOp_attr_OnlyShape): - def init_data(self): self.ori_shape = (10, 2, 6) self.new_shape = (10, 0, 3, -1) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_rmsprop_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_rmsprop_op_xpu.py index 0fb0a855bb0095b0acf1ba5cc09d647c18391b3c..29b980fa12804bb80decad5d9ca92be43767bc24 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_rmsprop_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_rmsprop_op_xpu.py @@ -22,28 +22,32 @@ import paddle import paddle.fluid.core as core from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() -def calculate_rmsprop_by_numpy(param, grad, mean_square, moment, learning_rate, - epsilon, decay, momentum): +def calculate_rmsprop_by_numpy( + param, grad, mean_square, moment, learning_rate, epsilon, decay, momentum +): mean_square_out = decay * mean_square + (1 - decay) * grad * grad moment_out = momentum * moment + learning_rate * grad / np.sqrt( - mean_square_out + epsilon) + mean_square_out + epsilon + ) param_out = param - moment_out return param_out, mean_square_out, moment_out class XPUTestRMSPropOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'rmsprop' self.use_dynamic_create_class = False class TestRMSPropOPBase(XPUOpTest): - def setUp(self): self.place = paddle.XPUPlace(0) self.xpu_version = core.get_xpu_device_version(0) @@ -55,19 +59,25 @@ class XPUTestRMSPropOP(XPUOpTestWrapper): self.dtype = self.in_type self.init_config() - self.param = np.random.uniform(-1, 1, - self.input_shape).astype(self.dtype) - self.grad = np.random.uniform(-1, 1, - self.input_shape).astype(self.dtype) + self.param = np.random.uniform(-1, 1, self.input_shape).astype( + self.dtype + ) + self.grad = np.random.uniform(-1, 1, self.input_shape).astype( + self.dtype + ) self.mean_square = np.random.uniform(0, 1, self.input_shape).astype( - self.dtype) - self.moment = np.random.uniform(-1, 1, - self.input_shape).astype(self.dtype) + self.dtype + ) + self.moment = np.random.uniform(-1, 1, self.input_shape).astype( + self.dtype + ) self.mean_grad = np.random.uniform(-1, 1, self.input_shape).astype( - self.dtype) + self.dtype + ) self.mean_grad_out = np.random.uniform( - -1, 1, self.input_shape).astype(self.dtype) + -1, 1, self.input_shape + ).astype(self.dtype) param_out, mean_square_out, moment_out = calculate_rmsprop_by_numpy( param=self.param, @@ -77,7 +87,8 @@ class XPUTestRMSPropOP(XPUOpTestWrapper): learning_rate=self.learning_rate, epsilon=self.epsilon, decay=self.decay, - momentum=self.momentum) + momentum=self.momentum, + ) self.inputs = { 'Param': self.param, 'Grad': self.grad, @@ -92,22 +103,22 @@ class XPUTestRMSPropOP(XPUOpTestWrapper): 'epsilon': self.epsilon, 'decay': self.decay, 'momentum': self.momentum, - 'centered': - False, # TODO(houj04): when XDNN api supports 'center = True', add more test cases + 'centered': False, # TODO(houj04): when XDNN api supports 'center = True', add more test cases } self.outputs = { 'ParamOut': param_out, 'MomentOut': moment_out, 'MeanSquareOut': mean_square_out, - 'MeanGradOut': self.mean_grad_out + 'MeanGradOut': self.mean_grad_out, } def init_dtype(self): self.dtype = np.float32 def test_check_output(self): - self.check_output_with_place(self.place, - no_check_set=['MeanGradOut']) + self.check_output_with_place( + self.place, no_check_set=['MeanGradOut'] + ) def init_config(self): self.input_shape = [864] @@ -117,7 +128,6 @@ class XPUTestRMSPropOP(XPUOpTestWrapper): self.momentum = 0.1 class XPUTestRMSProp1(TestRMSPropOPBase): - def init_config(self): self.input_shape = [2, 768] self.learning_rate = np.array([0.002]).astype(self.dtype) @@ -126,7 +136,6 @@ class XPUTestRMSPropOP(XPUOpTestWrapper): self.momentum = 0.1 class XPUTestRMSProp2(TestRMSPropOPBase): - def init_config(self): self.input_shape = [3, 8, 4096] self.learning_rate = np.array([0.005]).astype(self.dtype) @@ -135,7 +144,6 @@ class XPUTestRMSPropOP(XPUOpTestWrapper): self.momentum = 0 class XPUTestRMSProp3(TestRMSPropOPBase): - def init_config(self): self.input_shape = [1024] self.learning_rate = np.array([0.01]).astype(self.dtype) @@ -144,7 +152,6 @@ class XPUTestRMSPropOP(XPUOpTestWrapper): self.momentum = 0.02 class XPUTestRMSProp4(TestRMSPropOPBase): - def init_config(self): self.input_shape = [2, 2, 255] self.learning_rate = np.array([0.0005]).astype(self.dtype) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_rnn_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_rnn_op_xpu.py index 6012ca710a9d81b0dc4046a879cc9ebb5146cad5..342123d0c84dbdb4bfd190b26c7a7cd67919903b 100755 --- a/python/paddle/fluid/tests/unittests/xpu/test_rnn_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_rnn_op_xpu.py @@ -24,7 +24,11 @@ from op_test_xpu import XPUOpTest sys.path.append("../rnn") from rnn_numpy import LSTM from convert import get_params_for_net -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) random.seed(2) np.set_printoptions(threshold=np.inf) @@ -32,13 +36,11 @@ paddle.enable_static() class XPUTestRNNOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'rnn' self.use_dynamic_create_class = False class TestRNNOp(XPUOpTest): - def setUp(self): self.init_size() self.init_dtype() @@ -55,41 +57,52 @@ class XPUTestRNNOp(XPUOpTestWrapper): self.direction_num = 2 if self.is_bidirec else 1 direction = "bidirectional" if self.is_bidirec else "forward" - input = np.random.uniform(low=-0.1, - high=0.1, - size=(self.seq_length, self.batch_size, - self.input_size)).astype(self.dtype) + input = np.random.uniform( + low=-0.1, + high=0.1, + size=(self.seq_length, self.batch_size, self.input_size), + ).astype(self.dtype) input[11][1:][:] = 0 input[10][2:][:] = 0 input[9][3:][:] = 0 input[8][4:][:] = 0 - rnn1 = LSTM(self.input_size, - self.hidden_size, - num_layers=self.num_layers, - time_major=True, - direction=direction, - dropout=self.dropout, - dtype=self.dtype) + rnn1 = LSTM( + self.input_size, + self.hidden_size, + num_layers=self.num_layers, + time_major=True, + direction=direction, + dropout=self.dropout, + dtype=self.dtype, + ) flat_w = get_params_for_net(rnn1) - output, (last_hidden, - last_cell) = rnn1(input, - sequence_length=self.sequence_length) + output, (last_hidden, last_cell) = rnn1( + input, sequence_length=self.sequence_length + ) init_h = np.zeros( - (self.num_layers * self.direction_num, self.batch_size, - self.hidden_size)).astype(self.dtype) + ( + self.num_layers * self.direction_num, + self.batch_size, + self.hidden_size, + ) + ).astype(self.dtype) init_c = np.zeros( - (self.num_layers * self.direction_num, self.batch_size, - self.hidden_size)).astype(self.dtype) + ( + self.num_layers * self.direction_num, + self.batch_size, + self.hidden_size, + ) + ).astype(self.dtype) state_out = np.ndarray((300)).astype("uint8") self.inputs = { 'Input': input, 'WeightList': flat_w, 'PreState': [('init_h', init_h), ('init_c', init_c)], - 'SequenceLength': self.sequence_length + 'SequenceLength': self.sequence_length, } if self.sequence_length is None: self.inputs = { @@ -104,14 +117,16 @@ class XPUTestRNNOp(XPUOpTestWrapper): 'hidden_size': self.hidden_size, 'num_layers': self.num_layers, 'mode': self.mode, - 'is_test': self.is_test + 'is_test': self.is_test, } self.outputs = { 'Out': output, - "State": [('last_hidden', last_hidden), - ('last_cell', last_cell)], + "State": [ + ('last_hidden', last_hidden), + ('last_cell', last_cell), + ], 'Reserve': np.ndarray((400)).astype("uint8"), - 'DropoutState': state_out + 'DropoutState': state_out, } def init_dtype(self): @@ -124,15 +139,19 @@ class XPUTestRNNOp(XPUOpTestWrapper): def test_check_output(self): self.check_output_with_place( - self.place, atol=0.01, no_check_set=['Reserve', 'DropoutState']) + self.place, atol=0.01, no_check_set=['Reserve', 'DropoutState'] + ) def test_grad(self): if not self.is_test: var_name_list = self.get_weight_names() grad_check_list = ['Input', 'init_h', 'init_c'] grad_check_list.extend(var_name_list) - self.check_grad_with_place(self.place, set(grad_check_list), - ['Out', 'last_hidden', 'last_cell']) + self.check_grad_with_place( + self.place, + set(grad_check_list), + ['Out', 'last_hidden', 'last_cell'], + ) def init_size(self): self.seq_length = 12 @@ -154,43 +173,36 @@ class XPUTestRNNOp(XPUOpTestWrapper): pass class TestRNNOp1(TestRNNOp): - def set_attrs(self): self.sequence_length = None class TestRNNOp2(TestRNNOp): - def set_attrs(self): self.num_layers = 1 self.is_bidirec = True class TestRNNOp3(TestRNNOp): - def set_attrs(self): self.num_layers = 2 self.is_bidirec = False class TestRNNOp4(TestRNNOp): - def set_attrs(self): self.num_layers = 3 self.is_bidirec = False class TestRNNOp5(TestRNNOp): - def set_attrs(self): self.num_layers = 2 self.is_bidirec = True class TestRNNOp6(TestRNNOp): - def set_attrs(self): self.num_layers = 2 self.is_bidirec = True self.sequence_length = None class TestRNNOp7(TestRNNOp): - def set_attrs(self): self.num_layers = 3 self.is_bidirec = True @@ -198,10 +210,12 @@ class XPUTestRNNOp(XPUOpTestWrapper): support_types = get_xpu_op_support_types('rnn') for stype in support_types: - create_test_class(globals(), - XPUTestRNNOp, - stype, - ignore_device_version=[core.XPUVersion.XPU1]) + create_test_class( + globals(), + XPUTestRNNOp, + stype, + ignore_device_version=[core.XPUVersion.XPU1], + ) if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/xpu/test_roi_align_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_roi_align_op_xpu.py index ad63cbb5c0a25ba66f3be446448d0aa7b0197b8e..f44596324327f2a8f8cea3f6a5eb4339ad90cd55 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_roi_align_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_roi_align_op_xpu.py @@ -21,19 +21,21 @@ import numpy as np import paddle.fluid.core as core from op_test_xpu import XPUOpTest import paddle -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestROIAlignOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'roi_align' self.use_dynamic_create_class = False class TestROIAlignOp(XPUOpTest): - def set_data(self): self.init_test_case() self.make_rois() @@ -48,7 +50,7 @@ class XPUTestROIAlignOp(XPUOpTestWrapper): 'pooled_height': self.pooled_height, 'pooled_width': self.pooled_width, 'sampling_ratio': self.sampling_ratio, - 'aligned': self.continuous_coordinate + 'aligned': self.continuous_coordinate, } self.outputs = {'Out': self.out_data} @@ -62,8 +64,12 @@ class XPUTestROIAlignOp(XPUOpTestWrapper): self.xpu_version = core.get_xpu_device_version(0) # n, c, h, w - self.x_dim = (self.batch_size, self.channels, self.height, - self.width) + self.x_dim = ( + self.batch_size, + self.channels, + self.height, + self.width, + ) self.spatial_scale = 1.0 / 2.0 self.pooled_height = 2 @@ -75,25 +81,51 @@ class XPUTestROIAlignOp(XPUOpTestWrapper): self.continuous_coordinate = bool(np.random.randint(2)) self.x = np.random.random(self.x_dim).astype(self.dtype) - def pre_calc(self, x_i, roi_xmin, roi_ymin, roi_bin_grid_h, - roi_bin_grid_w, bin_size_h, bin_size_w): + def pre_calc( + self, + x_i, + roi_xmin, + roi_ymin, + roi_bin_grid_h, + roi_bin_grid_w, + bin_size_h, + bin_size_w, + ): count = roi_bin_grid_h * roi_bin_grid_w - bilinear_pos = np.zeros([ - self.channels, self.pooled_height, self.pooled_width, count, 4 - ], np.float32) + bilinear_pos = np.zeros( + [ + self.channels, + self.pooled_height, + self.pooled_width, + count, + 4, + ], + np.float32, + ) bilinear_w = np.zeros( - [self.pooled_height, self.pooled_width, count, 4], np.float32) + [self.pooled_height, self.pooled_width, count, 4], np.float32 + ) for ph in range(self.pooled_width): for pw in range(self.pooled_height): c = 0 for iy in range(roi_bin_grid_h): - y = roi_ymin + ph * bin_size_h + (iy + 0.5) * \ - bin_size_h / roi_bin_grid_h + y = ( + roi_ymin + + ph * bin_size_h + + (iy + 0.5) * bin_size_h / roi_bin_grid_h + ) for ix in range(roi_bin_grid_w): - x = roi_xmin + pw * bin_size_w + (ix + 0.5) * \ - bin_size_w / roi_bin_grid_w - if y < -1.0 or y > self.height or \ - x < -1.0 or x > self.width: + x = ( + roi_xmin + + pw * bin_size_w + + (ix + 0.5) * bin_size_w / roi_bin_grid_w + ) + if ( + y < -1.0 + or y > self.height + or x < -1.0 + or x > self.width + ): continue if y <= 0: y = 0 @@ -114,14 +146,18 @@ class XPUTestROIAlignOp(XPUOpTestWrapper): hy = 1 - ly hx = 1 - lx for ch in range(self.channels): - bilinear_pos[ch, ph, pw, c, 0] = x_i[ch, y_low, - x_low] - bilinear_pos[ch, ph, pw, c, 1] = x_i[ch, y_low, - x_high] - bilinear_pos[ch, ph, pw, c, 2] = x_i[ch, y_high, - x_low] - bilinear_pos[ch, ph, pw, c, 3] = x_i[ch, y_high, - x_high] + bilinear_pos[ch, ph, pw, c, 0] = x_i[ + ch, y_low, x_low + ] + bilinear_pos[ch, ph, pw, c, 1] = x_i[ + ch, y_low, x_high + ] + bilinear_pos[ch, ph, pw, c, 2] = x_i[ + ch, y_high, x_low + ] + bilinear_pos[ch, ph, pw, c, 3] = x_i[ + ch, y_high, x_high + ] bilinear_w[ph, pw, c, 0] = hy * hx bilinear_w[ph, pw, c, 1] = hy * lx bilinear_w[ph, pw, c, 2] = ly * hx @@ -131,8 +167,13 @@ class XPUTestROIAlignOp(XPUOpTestWrapper): def calc_roi_align(self): self.out_data = np.zeros( - (self.rois_num, self.channels, self.pooled_height, - self.pooled_width)).astype(self.dtype) + ( + self.rois_num, + self.channels, + self.pooled_height, + self.pooled_width, + ) + ).astype(self.dtype) for i in range(self.rois_num): roi = self.rois[i] @@ -150,15 +191,27 @@ class XPUTestROIAlignOp(XPUOpTestWrapper): roi_height = max(roi_height, 1) bin_size_h = float(roi_height) / float(self.pooled_height) bin_size_w = float(roi_width) / float(self.pooled_width) - roi_bin_grid_h = self.sampling_ratio if self.sampling_ratio > 0 else \ - math.ceil(roi_height / self.pooled_height) - roi_bin_grid_w = self.sampling_ratio if self.sampling_ratio > 0 else \ - math.ceil(roi_width / self.pooled_width) + roi_bin_grid_h = ( + self.sampling_ratio + if self.sampling_ratio > 0 + else math.ceil(roi_height / self.pooled_height) + ) + roi_bin_grid_w = ( + self.sampling_ratio + if self.sampling_ratio > 0 + else math.ceil(roi_width / self.pooled_width) + ) count = int(roi_bin_grid_h * roi_bin_grid_w) pre_size = count * self.pooled_width * self.pooled_height bilinear_pos, bilinear_w = self.pre_calc( - x_i, roi_xmin, roi_ymin, int(roi_bin_grid_h), - int(roi_bin_grid_w), bin_size_h, bin_size_w) + x_i, + roi_xmin, + roi_ymin, + int(roi_bin_grid_h), + int(roi_bin_grid_w), + bin_size_h, + bin_size_w, + ) for ch in range(self.channels): align_per_bin = (bilinear_pos[ch] * bilinear_w).sum(axis=-1) output_val = align_per_bin.mean(axis=-1) @@ -171,17 +224,20 @@ class XPUTestROIAlignOp(XPUOpTestWrapper): self.rois_lod[0].append(bno + 1) for i in range(bno + 1): x1 = np.random.random_integers( - 0, self.width // self.spatial_scale - self.pooled_width) + 0, self.width // self.spatial_scale - self.pooled_width + ) y1 = np.random.random_integers( 0, - self.height // self.spatial_scale - self.pooled_height) + self.height // self.spatial_scale - self.pooled_height, + ) x2 = np.random.random_integers( - x1 + self.pooled_width, - self.width // self.spatial_scale) + x1 + self.pooled_width, self.width // self.spatial_scale + ) y2 = np.random.random_integers( y1 + self.pooled_height, - self.height // self.spatial_scale) + self.height // self.spatial_scale, + ) roi = [bno, x1, y1, x2, y2] rois.append(roi) @@ -205,7 +261,6 @@ class XPUTestROIAlignOp(XPUOpTestWrapper): self.check_grad_with_place(self.place, {'X'}, 'Out') class TestROIAlignInLodOp(TestROIAlignOp): - def set_data(self): self.init_test_case() self.make_rois() @@ -216,7 +271,7 @@ class XPUTestROIAlignOp(XPUOpTestWrapper): self.inputs = { 'X': self.x, 'ROIs': (self.rois[:, 1:5], self.rois_lod), - 'RoisNum': np.asarray(seq_len).astype('int32') + 'RoisNum': np.asarray(seq_len).astype('int32'), } self.attrs = { @@ -224,7 +279,7 @@ class XPUTestROIAlignOp(XPUOpTestWrapper): 'pooled_height': self.pooled_height, 'pooled_width': self.pooled_width, 'sampling_ratio': self.sampling_ratio, - 'aligned': self.continuous_coordinate + 'aligned': self.continuous_coordinate, } self.outputs = {'Out': self.out_data} diff --git a/python/paddle/fluid/tests/unittests/xpu/test_sampling_id_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_sampling_id_op_xpu.py index 75bdbff364da860df8743b384d12311b4fdadf22..d2e9aa481749fe7cea2cd2ea990b23139772fe98 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_sampling_id_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_sampling_id_op_xpu.py @@ -23,7 +23,6 @@ import paddle class TestSamplingIdShape(unittest.TestCase): - def test_shape(self): paddle.enable_static() x = fluid.layers.data(name='x', shape=[3], dtype='float32') diff --git a/python/paddle/fluid/tests/unittests/xpu/test_scale_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_scale_op_xpu.py index d41ba5e59d414f5c4627271dad56256ced294aa8..47c95c2158b3399eda217e2dd2aad4c823005f7b 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_scale_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_scale_op_xpu.py @@ -22,17 +22,19 @@ import paddle from paddle.fluid import Program, program_guard from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) class XPUTestScaleOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'scale' self.use_dynamic_create_class = False class TestScaleOp(XPUOpTest): - def setUp(self): self.init_dtype() self.set_xpu() @@ -77,33 +79,27 @@ class XPUTestScaleOp(XPUOpTestWrapper): self.check_output_with_place(place) class TestScaleOp1(TestScaleOp): - def set_attrs(self): self.attrs = {'scale': 3.5} class TestScaleOp2(TestScaleOp): - def set_attrs(self): self.attrs = {'scale': 6.77} class TestScaleOp3(TestScaleOp): - def set_attrs(self): self.attrs = {'scale': -9.19} class TestScaleOp4(TestScaleOp): - def set_attrs(self): self.attrs = {'scale': 0.0} class TestScaleOp5(TestScaleOp): - def set_attrs(self): self.attrs = {'scale': -0.003} class TestScaleApiStatic(unittest.TestCase): - def _executed_api(self, x, scale=1.0, bias=0.0): return paddle.scale(x, scale, bias) @@ -121,13 +117,11 @@ class TestScaleApiStatic(unittest.TestCase): class TestScaleInplaceApiStatic(TestScaleApiStatic): - def _executed_api(self, x, scale=1.0, bias=0.0): return x.scale_(scale, bias) class TestScaleApiDygraph(unittest.TestCase): - def _executed_api(self, x, scale=1.0, bias=0.0): return paddle.scale(x, scale, bias) @@ -141,7 +135,6 @@ class TestScaleApiDygraph(unittest.TestCase): class TestScaleInplaceApiDygraph(TestScaleApiDygraph): - def _executed_api(self, x, scale=1.0, bias=0.0): return x.scale_(scale, bias) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_scatter_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_scatter_op_xpu.py index a7047014f75c0b7d92d16a3e4983194dcab81404..4cd2de7e6a08aef70a8feda522b09eb5e4481141 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_scatter_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_scatter_op_xpu.py @@ -20,13 +20,17 @@ sys.path.append("..") import paddle from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper, type_dict_str_to_numpy +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, + type_dict_str_to_numpy, +) paddle.enable_static() class XPUTestScatterOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'scatter' self.use_dynamic_create_class = True @@ -47,7 +51,7 @@ class XPUTestScatterOp(XPUOpTestWrapper): 'init_index_np': index_np, 'init_updates_np': updates_np, 'init_output_np': output_np, - 'test_name': 'case1' + 'test_name': 'case1', } test_data_case.append(data_dict) @@ -62,7 +66,7 @@ class XPUTestScatterOp(XPUOpTestWrapper): 'init_index_np': index_np, 'init_updates_np': updates_np, 'init_output_np': output_np, - 'test_name': 'case2' + 'test_name': 'case2', } test_data_case.append(data_dict) @@ -80,16 +84,21 @@ class XPUTestScatterOp(XPUOpTestWrapper): 'init_index_np': index_np, 'init_updates_np': updates_np, 'init_output_np': output_np, - 'test_name': 'case3' + 'test_name': 'case3', } test_data_case.append(data_dict) for data_dict in test_data_case: for index_type in ['int32', 'int64']: for overwrite in [True, False]: - class_name = 'XPUTestScatterOp_index_type_' + data_dict[ - 'test_name'] + '_' + str(index_type) + '_' + str( - overwrite) + class_name = ( + 'XPUTestScatterOp_index_type_' + + data_dict['test_name'] + + '_' + + str(index_type) + + '_' + + str(overwrite) + ) attr_dict = data_dict attr_dict['index_type'] = type_dict_str_to_numpy[index_type] attr_dict['init_overwrite'] = overwrite @@ -97,13 +106,16 @@ class XPUTestScatterOp(XPUOpTestWrapper): return base_class, classes class TestScatterOp(XPUOpTest): - def setUp(self): self.init_config() - self.index_type = np.int32 if not hasattr( - self, 'index_type') else self.index_type - self.overwrite = True if not hasattr( - self, 'init_overwrite') else self.init_overwrite + self.index_type = ( + np.int32 if not hasattr(self, 'index_type') else self.index_type + ) + self.overwrite = ( + True + if not hasattr(self, 'init_overwrite') + else self.init_overwrite + ) if not hasattr(self, 'init_ref_np'): self.ref_np = np.ones((3, 50)).astype(self.dtype) @@ -120,7 +132,7 @@ class XPUTestScatterOp(XPUOpTestWrapper): self.inputs = { 'X': self.ref_np, 'Ids': self.index_np, - 'Updates': self.updates_np + 'Updates': self.updates_np, } self.attrs = {'overwrite': self.overwrite} self.outputs = {'Out': self.output_np} diff --git a/python/paddle/fluid/tests/unittests/xpu/test_sequence_conv_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_sequence_conv_op_xpu.py index 77327c529cc4da4192635f81abc4e5560041b527..e15bfdbbe5849a99991b04ef26d773c9ca98004a 100755 --- a/python/paddle/fluid/tests/unittests/xpu/test_sequence_conv_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_sequence_conv_op_xpu.py @@ -27,13 +27,15 @@ paddle.enable_static() np.set_printoptions(threshold=np.inf) -def seqconv(x, - lod, - filter, - context_length, - context_start, - padding_trainable=False, - padding_data=None): +def seqconv( + x, + lod, + filter, + context_length, + context_start, + padding_trainable=False, + padding_data=None, +): [T, M] = x.shape col = np.zeros((T, context_length * M)).astype('float32') offset = [0] @@ -48,67 +50,87 @@ def seqconv(x, out_end = offset[i + 1] if in_begin < offset[i]: pad_size = np.min( - [offset[i] - in_begin, offset[i + 1] - offset[i]]) + [offset[i] - in_begin, offset[i + 1] - offset[i]] + ) if padding_trainable: - sub_w = padding_data[j:j + pad_size, :] - col[offset[i]:offset[i] + pad_size, - j * M:(j + 1) * M] = sub_w + sub_w = padding_data[j : j + pad_size, :] + col[ + offset[i] : offset[i] + pad_size, j * M : (j + 1) * M + ] = sub_w out_begin = offset[i] + pad_size in_begin = offset[i] if in_end > offset[i + 1]: pad_size = np.min( - [in_end - offset[i + 1], offset[i + 1] - offset[i]]) + [in_end - offset[i + 1], offset[i + 1] - offset[i]] + ) if padding_trainable: - sub_w = padding_data[begin_pad + context_start + j - - pad_size:begin_pad + context_start + - j, :] - col[offset[i + 1] - pad_size:offset[i + 1], - j * M:(j + 1) * M] = sub_w + sub_w = padding_data[ + begin_pad + + context_start + + j + - pad_size : begin_pad + + context_start + + j, + :, + ] + col[ + offset[i + 1] - pad_size : offset[i + 1], + j * M : (j + 1) * M, + ] = sub_w in_end = offset[i + 1] out_end = offset[i + 1] - pad_size if in_end <= in_begin: continue in_sub = x[in_begin:in_end, :] - col[out_begin:out_end, j * M:(j + 1) * M] += in_sub + col[out_begin:out_end, j * M : (j + 1) * M] += in_sub return np.dot(col, filter) class XPUTestSequenceConv(XPUOpTestWrapper): - def __init__(self): self.op_name = 'sequence_conv' class TestSeqProject(XPUOpTest): - def setUp(self): self.init_test_case() self.op_type = 'sequence_conv' self.dtype = self.in_type self.use_xpu = True - if self.context_length == 1 \ - and self.context_start == 0 \ - and self.padding_trainable: - print("If context_start is 0 " \ - "and context_length is 1," \ - " padding_trainable should be false.") + if ( + self.context_length == 1 + and self.context_start == 0 + and self.padding_trainable + ): + print( + "If context_start is 0 " + "and context_length is 1," + " padding_trainable should be false." + ) return # one level, batch size x = np.random.uniform( - -6.10907e-05, 0.000104218, - [self.input_size[0], self.input_size[1]]).astype(self.dtype) - w = np.random.uniform(-3.17068e-05, 0.000159822, [ - self.context_length * self.input_size[1], - self.output_represention - ]).astype(self.dtype) + -6.10907e-05, + 0.000104218, + [self.input_size[0], self.input_size[1]], + ).astype(self.dtype) + w = np.random.uniform( + -3.17068e-05, + 0.000159822, + [ + self.context_length * self.input_size[1], + self.output_represention, + ], + ).astype(self.dtype) begin_pad = np.max([0, -self.context_start]) end_pad = np.max([0, self.context_start + self.context_length - 1]) total_pad = begin_pad + end_pad padding_data = np.random.uniform( - 0, 0, [total_pad, self.input_size[1]]).astype(self.dtype) + 0, 0, [total_pad, self.input_size[1]] + ).astype(self.dtype) self.pad_data = padding_data self.inputs = { 'X': (x, self.lod), @@ -128,11 +150,17 @@ class XPUTestSequenceConv(XPUOpTestWrapper): 'contextStart': self.context_start, 'contextLength': self.context_length, 'paddingTrainable': self.padding_trainable, - 'contextStride': self.context_stride + 'contextStride': self.context_stride, } - out = seqconv(x, self.lod, w, self.context_length, - self.context_start, self.padding_trainable, - self.pad_data) + out = seqconv( + x, + self.lod, + w, + self.context_length, + self.context_start, + self.padding_trainable, + self.pad_data, + ) self.outputs = {'Out': out} def test_check_output(self): @@ -144,32 +172,32 @@ class XPUTestSequenceConv(XPUOpTestWrapper): def test_check_grad_padding_data(self): if self.padding_trainable: - self.check_grad(['PaddingData'], - 'Out', - no_grad_set=set(['X', 'Filter'])) + self.check_grad( + ['PaddingData'], 'Out', no_grad_set=set(['X', 'Filter']) + ) def test_check_grad_Filter(self): - self.check_grad(['Filter'], - 'Out', - no_grad_set=set(self.inputs_val_no_f)) + self.check_grad( + ['Filter'], 'Out', no_grad_set=set(self.inputs_val_no_f) + ) def test_check_grad_input_filter(self): if self.padding_trainable: - self.check_grad(['X', 'Filter'], - 'Out', - no_grad_set=set(['PaddingData'])) + self.check_grad( + ['X', 'Filter'], 'Out', no_grad_set=set(['PaddingData']) + ) def test_check_grad_padding_input(self): if self.padding_trainable: - self.check_grad(self.inputs_val_no_f, - 'Out', - no_grad_set=set(['Filter'])) + self.check_grad( + self.inputs_val_no_f, 'Out', no_grad_set=set(['Filter']) + ) def test_check_grad_padding_filter(self): if self.padding_trainable: - self.check_grad(self.inputs_val_no_x, - 'Out', - no_grad_set=set(['X'])) + self.check_grad( + self.inputs_val_no_x, 'Out', no_grad_set=set(['X']) + ) def init_test_case(self): self.input_row = 7 @@ -188,7 +216,6 @@ class XPUTestSequenceConv(XPUOpTestWrapper): self.output_represention = 8 # output feature size class TestSeqProjectCase1(TestSeqProject): - def init_test_case(self): self.input_row = 11 self.context_start = -2 @@ -205,7 +232,6 @@ class XPUTestSequenceConv(XPUOpTestWrapper): self.output_represention = 8 # output feature size class TestSeqProjectCase2Len0(TestSeqProject): - def init_test_case(self): self.input_row = 11 self.context_start = -2 @@ -222,7 +248,6 @@ class XPUTestSequenceConv(XPUOpTestWrapper): self.output_represention = 8 # output feature size class TestSeqProjectCase3(TestSeqProject): - def init_test_case(self): self.input_row = 25 self.context_start = -2 @@ -233,8 +258,11 @@ class XPUTestSequenceConv(XPUOpTestWrapper): self.input_size = [self.input_row, 25] idx = list(range(self.input_size[0])) del idx[0] - offset_lod = [[0] + np.sort(random.sample(idx, 8)).tolist() + - [self.input_size[0]]] + offset_lod = [ + [0] + + np.sort(random.sample(idx, 8)).tolist() + + [self.input_size[0]] + ] self.lod = [[]] # convert from offset-based lod to length-based lod for i in range(len(offset_lod[0]) - 1): @@ -242,7 +270,6 @@ class XPUTestSequenceConv(XPUOpTestWrapper): self.output_represention = 8 # output feature size class TestSeqProjectCase4(TestSeqProject): - def init_test_case(self): self.input_row = 7835 self.input_col = 128 @@ -252,21 +279,139 @@ class XPUTestSequenceConv(XPUOpTestWrapper): self.context_stride = 1 self.input_size = [self.input_row, self.input_col] - offset_lod = [[ - 0, 1, 2, 3, 131, 241, 242, 263, 264, 265, 266, 267, 268, 387, - 515, 516, 644, 645, 772, 794, 922, 923, 924, 944, 945, 1073, - 1074, 1202, 1330, 1458, 1556, 1557, 1558, 1686, 1748, 1876, - 1912, 1913, 1914, 2032, 2066, 2194, 2308, 2309, 2347, 2475, - 2476, 2477, 2478, 2606, 2607, 2735, 2736, 2737, 2738, 2838, - 2966, 2967, 2968, 2969, 3097, 3225, 3353, 3481, 3482, 3520, - 3642, 3643, 3754, 3882, 3883, 4010, 4011, 4012, 4140, 4219, - 4228, 4356, 4357, 4415, 4475, 4476, 4604, 4605, 4606, 4694, - 4695, 4808, 4936, 4961, 4962, 5004, 5132, 5260, 5312, 5440, - 5441, 5569, 5570, 5675, 5676, 5750, 5810, 5811, 5939, 6021, - 6149, 6277, 6278, 6364, 6425, 6519, 6647, 6648, 6739, 6867, - 6995, 6996, 7120, 7223, 7244, 7367, 7407, 7408, 7467, 7595, - 7699, 7827, 7835 - ]] + offset_lod = [ + [ + 0, + 1, + 2, + 3, + 131, + 241, + 242, + 263, + 264, + 265, + 266, + 267, + 268, + 387, + 515, + 516, + 644, + 645, + 772, + 794, + 922, + 923, + 924, + 944, + 945, + 1073, + 1074, + 1202, + 1330, + 1458, + 1556, + 1557, + 1558, + 1686, + 1748, + 1876, + 1912, + 1913, + 1914, + 2032, + 2066, + 2194, + 2308, + 2309, + 2347, + 2475, + 2476, + 2477, + 2478, + 2606, + 2607, + 2735, + 2736, + 2737, + 2738, + 2838, + 2966, + 2967, + 2968, + 2969, + 3097, + 3225, + 3353, + 3481, + 3482, + 3520, + 3642, + 3643, + 3754, + 3882, + 3883, + 4010, + 4011, + 4012, + 4140, + 4219, + 4228, + 4356, + 4357, + 4415, + 4475, + 4476, + 4604, + 4605, + 4606, + 4694, + 4695, + 4808, + 4936, + 4961, + 4962, + 5004, + 5132, + 5260, + 5312, + 5440, + 5441, + 5569, + 5570, + 5675, + 5676, + 5750, + 5810, + 5811, + 5939, + 6021, + 6149, + 6277, + 6278, + 6364, + 6425, + 6519, + 6647, + 6648, + 6739, + 6867, + 6995, + 6996, + 7120, + 7223, + 7244, + 7367, + 7407, + 7408, + 7467, + 7595, + 7699, + 7827, + 7835, + ] + ] self.lod = [[]] # convert from offset-based lod to length-based lod for i in range(len(offset_lod[0]) - 1): @@ -280,19 +425,18 @@ for stype in support_types: class TestSeqConvApi(unittest.TestCase): - def test_api(self): import paddle.fluid as fluid x = fluid.layers.data('x', shape=[32], lod_level=1) - y = fluid.layers.sequence_conv(input=x, - num_filters=2, - filter_size=3, - padding_start=None) + y = fluid.layers.sequence_conv( + input=x, num_filters=2, filter_size=3, padding_start=None + ) place = fluid.CPUPlace() x_tensor = fluid.create_lod_tensor( - np.random.rand(10, 32).astype("float32"), [[2, 3, 1, 4]], place) + np.random.rand(10, 32).astype("float32"), [[2, 3, 1, 4]], place + ) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) ret = exe.run(feed={'x': x_tensor}, fetch_list=[y], return_numpy=False) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_sequence_unpad_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_sequence_unpad_op_xpu.py index 10979e21c62f73a98d98d6d2366dddbb6bbaa90b..d24c98ad6c956fba5d12ad09d7d84272057bc4f6 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_sequence_unpad_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_sequence_unpad_op_xpu.py @@ -23,19 +23,21 @@ import paddle.fluid as fluid import unittest import numpy as np from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestSequenceUnpadOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'sequence_unpad' self.use_dynamic_create_class = False class TestSequenceUnpadOp(XPUOpTest): - def setUp(self): self.init_dtype() self.initTestCase() @@ -63,49 +65,44 @@ class XPUTestSequenceUnpadOp(XPUOpTestWrapper): x = np.random.random(self.x_shape).astype(self.dtype) out_lod = [self.length] - out = x[0, 0:self.length[0]] + out = x[0, 0 : self.length[0]] for i in range(1, x.shape[0]): - out = np.append(out, x[i, 0:self.length[i]], axis=0) + out = np.append(out, x[i, 0 : self.length[i]], axis=0) - out_shape = (sum(self.length), ) + out_shape = (sum(self.length),) if len(self.x_shape) == 2: - out_shape = out_shape + (1, ) + out_shape = out_shape + (1,) else: out_shape = out_shape + self.x_shape[2:] self.inputs = { 'X': x, - 'Length': np.array(self.length).astype('int64') + 'Length': np.array(self.length).astype('int64'), } self.outputs = {'Out': (out.reshape(out_shape), out_lod)} class TestSequenceUnpadOp2(TestSequenceUnpadOp): - def initTestCase(self): self.length = [2, 3, 4] self.x_shape = (3, 5, 4, 3) class TestSequenceUnpadOp3(TestSequenceUnpadOp): - def initTestCase(self): self.length = [5, 2, 3, 4] self.x_shape = (4, 5, 3, 3, 6) class TestSequenceUnpadOp4(TestSequenceUnpadOp): - def initTestCase(self): self.length = [5, 5, 5, 5] self.x_shape = (4, 5, 3, 3, 6) class TestSequenceUnpadOp5(TestSequenceUnpadOp): - def initTestCase(self): self.length = [1, 4, 3, 1] self.x_shape = (4, 5, 3, 3, 6) class TestSequenceUnpadOpError(unittest.TestCase): - def test_error(self): """ The type of 'x' in fluid.layers.sequence_unpad must be , but received . diff --git a/python/paddle/fluid/tests/unittests/xpu/test_sgd_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_sgd_op_xpu.py index 07f5e524ccf1ffe968a215621e0a6dc9b89d002e..2c7bb9414104bc827b0297e6b4ed05e50a656bb9 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_sgd_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_sgd_op_xpu.py @@ -21,17 +21,19 @@ import paddle import paddle.fluid as fluid from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) class XPUTestSgdOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'sgd' self.use_dynamic_create_class = False class TestSGDOp(XPUOpTest): - def setUp(self): self.op_type = "sgd" self.dtype = self.in_type @@ -51,7 +53,6 @@ class XPUTestSgdOp(XPUOpTestWrapper): self.check_output_with_place(paddle.XPUPlace(0)) class TestSGDOpCase8X(TestSGDOp): - def conf(self): self.h = 10 self.w = 64 @@ -63,12 +64,11 @@ for stype in support_types: class TestSGDOpWithLargeInput(unittest.TestCase): - def runTest(self): data = fluid.layers.fill_constant(shape=[1], value=128, dtype='int64') - label = fluid.layers.fill_constant(shape=[1, 150], - value=0.5, - dtype='float32') + label = fluid.layers.fill_constant( + shape=[1, 150], value=0.5, dtype='float32' + ) emb = fluid.embedding(input=data, size=(10000, 150), dtype='float32') out = fluid.layers.l2_normalize(x=emb, axis=-1) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_shape_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_shape_op_xpu.py index a4128064a7e8f153343f7124cba4776129d2e989..1a7a51b83149a4969afa307bd61daa478abca808 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_shape_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_shape_op_xpu.py @@ -18,7 +18,11 @@ import sys sys.path.append("..") from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) import paddle from paddle.fluid import core from paddle.fluid.op import Operator @@ -27,13 +31,11 @@ paddle.enable_static() class XPUTestShapeOp(XPUOpTestWrapper): - def __init__(self): self.op_name = "shape" self.use_dynamic_create_class = False class TestShapeOp(XPUOpTest): - def setUp(self): self.dtype = self.in_type self.op_type = "shape" @@ -51,32 +53,26 @@ class XPUTestShapeOp(XPUOpTestWrapper): self.check_output_with_place(place) class TestShapeOp1(TestShapeOp): - def config(self): self.shape = [2] class TestShapeOp2(TestShapeOp): - def config(self): self.shape = [1, 2, 3] class TestShapeOp3(TestShapeOp): - def config(self): self.shape = [1, 2, 3, 4] class TestShapeOp4(TestShapeOp): - def config(self): self.shape = [1, 2, 3, 4, 1024] class TestShapeOp5(TestShapeOp): - def config(self): self.shape = [1, 2, 3, 4, 1, 201] class TestShapeWithSelectedRows(unittest.TestCase): - def setUp(self): self.dtype = self.in_type diff --git a/python/paddle/fluid/tests/unittests/xpu/test_sigmoid_cross_entropy_with_logits_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_sigmoid_cross_entropy_with_logits_op_xpu.py index e9c68dd19997b623fdbced85e51f984ba0bc6820..6221d4f608fb77b6fde6531d04f7e5c6df2e6cc7 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_sigmoid_cross_entropy_with_logits_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_sigmoid_cross_entropy_with_logits_op_xpu.py @@ -20,7 +20,11 @@ sys.path.append("..") from op_test_xpu import XPUOpTest import paddle -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) from scipy.special import logit from scipy.special import expit @@ -29,15 +33,13 @@ paddle.enable_static() class XPUTestSigmoidCrossEntropyWithLogitsOp(XPUOpTestWrapper): - """Test sigmoid_cross_entropy_with_logit_op with binary label - """ + """Test sigmoid_cross_entropy_with_logit_op with binary label""" def __init__(self): self.op_name = "sigmoid_cross_entropy_with_logits" self.use_dynamic_create_class = False class TestSigmoidCrossEntropyWithLogitsOp(XPUOpTest): - def setUp(self): self.set_xpu() self.op_type = "sigmoid_cross_entropy_with_logits" @@ -60,13 +62,14 @@ class XPUTestSigmoidCrossEntropyWithLogitsOp(XPUOpTestWrapper): batch_size = 64 num_classes = 20 self.inputs = { - 'X': - logit( + 'X': logit( np.random.uniform(0, 1, (batch_size, num_classes)).astype( - self.dtype)), - 'Label': - np.random.randint(0, 2, - (batch_size, num_classes)).astype(self.dtype) + self.dtype + ) + ), + 'Label': np.random.randint( + 0, 2, (batch_size, num_classes) + ).astype(self.dtype), } self.attrs = {'num_classes': num_classes, 'batch_size': batch_size} @@ -85,9 +88,9 @@ class XPUTestSigmoidCrossEntropyWithLogitsOp(XPUOpTestWrapper): self.dtype = self.in_type class TestSigmoidCrossEntropyWithLogitsOp2( - TestSigmoidCrossEntropyWithLogitsOp): - """Test sigmoid_cross_entropy_with_logit_op with probabalistic label - """ + TestSigmoidCrossEntropyWithLogitsOp + ): + """Test sigmoid_cross_entropy_with_logit_op with probabalistic label""" def set_inputs(self): batch_size = 64 @@ -95,13 +98,14 @@ class XPUTestSigmoidCrossEntropyWithLogitsOp(XPUOpTestWrapper): ignore_index = -1 self.ignore_index = ignore_index self.inputs = { - 'X': - logit( + 'X': logit( np.random.uniform(0, 1, (batch_size, num_classes)).astype( - self.dtype)), - 'Label': - np.random.randint(-1, 2, - (batch_size, num_classes)).astype(self.dtype) + self.dtype + ) + ), + 'Label': np.random.randint( + -1, 2, (batch_size, num_classes) + ).astype(self.dtype), } self.attrs = {'ignore_index': ignore_index} @@ -117,21 +121,22 @@ class XPUTestSigmoidCrossEntropyWithLogitsOp(XPUOpTestWrapper): self.outputs = {'Out': out} class TestSigmoidCrossEntropyWithLogitsOp3( - TestSigmoidCrossEntropyWithLogitsOp): - """Test sigmoid_cross_entropy_with_logit_op with probabalistic label - """ + TestSigmoidCrossEntropyWithLogitsOp + ): + """Test sigmoid_cross_entropy_with_logit_op with probabalistic label""" def set_inputs(self): batch_size = 64 num_classes = 20 self.inputs = { - 'X': - logit( + 'X': logit( np.random.uniform(0, 1, (batch_size, num_classes)).astype( - self.dtype)), - 'Label': - np.random.uniform(0, 1, - (batch_size, num_classes)).astype(self.dtype) + self.dtype + ) + ), + 'Label': np.random.uniform( + 0, 1, (batch_size, num_classes) + ).astype(self.dtype), } self.attrs = {'num_classes': num_classes, 'batch_size': batch_size} @@ -145,9 +150,9 @@ class XPUTestSigmoidCrossEntropyWithLogitsOp(XPUOpTestWrapper): self.outputs = {'Out': -term1 - term2} class TestSigmoidCrossEntropyWithLogitsOp4( - TestSigmoidCrossEntropyWithLogitsOp): - """Test sigmoid_cross_entropy_with_logit_op with probabalistic label - """ + TestSigmoidCrossEntropyWithLogitsOp + ): + """Test sigmoid_cross_entropy_with_logit_op with probabalistic label""" def set_inputs(self): batch_size = 64 @@ -155,13 +160,14 @@ class XPUTestSigmoidCrossEntropyWithLogitsOp(XPUOpTestWrapper): ignore_index = -1 self.ignore_index = ignore_index self.inputs = { - 'X': - logit( + 'X': logit( np.random.uniform(0, 1, (batch_size, num_classes)).astype( - self.dtype)), - 'Label': - np.random.randint(-1, 2, - (batch_size, num_classes)).astype(self.dtype) + self.dtype + ) + ), + 'Label': np.random.randint( + -1, 2, (batch_size, num_classes) + ).astype(self.dtype), } self.attrs = {'ignore_index': ignore_index, 'normalize': True} @@ -176,26 +182,27 @@ class XPUTestSigmoidCrossEntropyWithLogitsOp(XPUOpTestWrapper): out[np.where(self.inputs['Label'] == self.ignore_index)] = 0 if self.attrs['normalize']: out = out / float( - np.where(self.inputs['Label'] != self.ignore_index)[0].size) + np.where(self.inputs['Label'] != self.ignore_index)[0].size + ) self.outputs = {'Out': out} class TestSigmoidCrossEntropyWithLogitsOp5( - TestSigmoidCrossEntropyWithLogitsOp): - """Test sigmoid_cross_entropy_with_logit_op with probabalistic label - """ + TestSigmoidCrossEntropyWithLogitsOp + ): + """Test sigmoid_cross_entropy_with_logit_op with probabalistic label""" def set_inputs(self): batch_size = [10, 10] num_classes = 20 self.inputs = { - 'X': - logit( - np.random.uniform(0, 1, - tuple(batch_size + [num_classes])).astype( - self.dtype)), - 'Label': - np.random.uniform(0, 1, tuple(batch_size + - [num_classes])).astype(self.dtype) + 'X': logit( + np.random.uniform( + 0, 1, tuple(batch_size + [num_classes]) + ).astype(self.dtype) + ), + 'Label': np.random.uniform( + 0, 1, tuple(batch_size + [num_classes]) + ).astype(self.dtype), } self.attrs = {'num_classes': num_classes, 'batch_size': batch_size} @@ -209,22 +216,22 @@ class XPUTestSigmoidCrossEntropyWithLogitsOp(XPUOpTestWrapper): self.outputs = {'Out': -term1 - term2} class TestSigmoidCrossEntropyWithLogitsOp6( - TestSigmoidCrossEntropyWithLogitsOp): - """Test sigmoid_cross_entropy_with_logit_op with binary label - """ + TestSigmoidCrossEntropyWithLogitsOp + ): + """Test sigmoid_cross_entropy_with_logit_op with binary label""" def set_inputs(self): batch_size = [10, 10] num_classes = 20 self.inputs = { - 'X': - logit( - np.random.uniform(0, 1, - tuple(batch_size + [num_classes])).astype( - self.dtype)), - 'Label': - np.random.randint(0, 2, tuple(batch_size + - [num_classes])).astype(self.dtype) + 'X': logit( + np.random.uniform( + 0, 1, tuple(batch_size + [num_classes]) + ).astype(self.dtype) + ), + 'Label': np.random.randint( + 0, 2, tuple(batch_size + [num_classes]) + ).astype(self.dtype), } self.attrs = {'num_classes': num_classes, 'batch_size': batch_size} @@ -238,9 +245,9 @@ class XPUTestSigmoidCrossEntropyWithLogitsOp(XPUOpTestWrapper): self.outputs = {'Out': -term1 - term2} class TestSigmoidCrossEntropyWithLogitsNorm( - TestSigmoidCrossEntropyWithLogitsOp): - """Test sigmoid_cross_entropy_with_logit_op with probabalistic label - """ + TestSigmoidCrossEntropyWithLogitsOp + ): + """Test sigmoid_cross_entropy_with_logit_op with probabalistic label""" def set_inputs(self): batch_size = [10, 10] @@ -248,14 +255,14 @@ class XPUTestSigmoidCrossEntropyWithLogitsOp(XPUOpTestWrapper): ignore_index = -1 self.ignore_index = ignore_index self.inputs = { - 'X': - logit( - np.random.uniform(0, 1, - tuple(batch_size + [num_classes])).astype( - self.dtype)), - 'Label': - np.random.randint( - -1, 2, tuple(batch_size + [num_classes])).astype(self.dtype) + 'X': logit( + np.random.uniform( + 0, 1, tuple(batch_size + [num_classes]) + ).astype(self.dtype) + ), + 'Label': np.random.randint( + -1, 2, tuple(batch_size + [num_classes]) + ).astype(self.dtype), } self.attrs = {'ignore_index': ignore_index, 'normalize': True} @@ -270,7 +277,8 @@ class XPUTestSigmoidCrossEntropyWithLogitsOp(XPUOpTestWrapper): out[np.where(self.inputs['Label'] == self.ignore_index)] = 0 if self.attrs['normalize']: out = out / float( - np.where(self.inputs['Label'] != self.ignore_index)[0].size) + np.where(self.inputs['Label'] != self.ignore_index)[0].size + ) self.outputs = {'Out': out} diff --git a/python/paddle/fluid/tests/unittests/xpu/test_sign_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_sign_op_xpu.py index 976252928317782195668ee00b1719fc6178201a..b498c5fc3a17e59819715939bd7ae19a7918c9ec 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_sign_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_sign_op_xpu.py @@ -21,19 +21,21 @@ sys.path.append("..") import paddle from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestSignOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'sign' self.use_dynamic_create_class = False class TestSignOPBase(XPUOpTest): - def setUp(self): self.place = paddle.XPUPlace(0) self.init_dtype() @@ -43,8 +45,9 @@ class XPUTestSignOP(XPUOpTestWrapper): self.op_type = 'sign' self.dtype = self.in_type self.init_config() - self.x = np.random.uniform(-10, 10, - self.input_shape).astype(self.dtype) + self.x = np.random.uniform(-10, 10, self.input_shape).astype( + self.dtype + ) self.inputs = {'X': self.x} self.outputs = {'Out': np.sign(self.x)} self.attrs = {'use_xpu': True} @@ -62,22 +65,18 @@ class XPUTestSignOP(XPUOpTestWrapper): self.input_shape = [864] class XPUTestSign1(TestSignOPBase): - def init_config(self): self.input_shape = [2, 768] class XPUTestSign2(TestSignOPBase): - def init_config(self): self.input_shape = [3, 8, 4096] class XPUTestSign3(TestSignOPBase): - def init_config(self): self.input_shape = [1024] class XPUTestSign4(TestSignOPBase): - def init_config(self): self.input_shape = [2, 2, 255] diff --git a/python/paddle/fluid/tests/unittests/xpu/test_slice_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_slice_op_xpu.py index c22bb99b4a9c84f9fe01748f687c271fcb35761f..5a77d9cb51ab8ce116adc90de41a06df199d1b77 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_slice_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_slice_op_xpu.py @@ -19,7 +19,11 @@ import unittest sys.path.append("..") from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() @@ -27,13 +31,11 @@ paddle.enable_static() # Situation 1: starts(list, no tensor), ends(list, no tensor) # 1.1 without attr(decrease) class XPUTestSliceOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'slice' self.use_dynamic_create_class = False class TestSliceOp(XPUOpTest): - def setUp(self): self.dtype = self.in_type self.place = paddle.XPUPlace(0) @@ -46,7 +48,7 @@ class XPUTestSliceOp(XPUOpTestWrapper): 'starts': self.starts, 'ends': self.ends, 'infer_flags': self.infer_flags, - "use_xpu": True + "use_xpu": True, } def config(self): @@ -62,14 +64,16 @@ class XPUTestSliceOp(XPUOpTestWrapper): self.check_grad_with_place(self.place, ['Input'], 'Out') else: user_defined_grad_outputs = np.random.random( - self.out.shape).astype(self.dtype) + self.out.shape + ).astype(self.dtype) self.check_grad_with_place( - self.place, ['Input'], + self.place, + ['Input'], 'Out', - user_defined_grad_outputs=user_defined_grad_outputs) + user_defined_grad_outputs=user_defined_grad_outputs, + ) class TestCase1(TestSliceOp): - def config(self): self.input = np.random.random([3, 4, 5, 6]).astype(self.dtype) self.starts = [-3, 0, 2] @@ -79,7 +83,6 @@ class XPUTestSliceOp(XPUOpTestWrapper): self.out = self.input[-3:3, 0:100, 2:-1, :] class TestCase2(TestSliceOp): - def config(self): self.input = np.random.random([3, 4, 5, 6]).astype(self.dtype) self.starts = [-3, 0, 2] @@ -91,13 +94,11 @@ class XPUTestSliceOp(XPUOpTestWrapper): # 1.2 with attr(decrease) class XPUTestSliceOp_decs_dim(XPUOpTestWrapper): - def __init__(self): self.op_name = 'slice' self.use_dynamic_create_class = False class TestSliceOp_decs_dim(XPUOpTest): - def setUp(self): self.dtype = self.in_type self.place = paddle.XPUPlace(0) @@ -111,7 +112,7 @@ class XPUTestSliceOp_decs_dim(XPUOpTestWrapper): 'ends': self.ends, 'infer_flags': self.infer_flags, 'decrease_axis': self.decrease_axis, - "use_xpu": True + "use_xpu": True, } def config(self): @@ -131,14 +132,16 @@ class XPUTestSliceOp_decs_dim(XPUOpTestWrapper): self.check_grad_with_place(self.place, ['Input'], 'Out') else: user_defined_grad_outputs = np.random.random( - self.out.shape).astype(self.dtype) + self.out.shape + ).astype(self.dtype) self.check_grad_with_place( - self.place, ['Input'], + self.place, + ['Input'], 'Out', - user_defined_grad_outputs=user_defined_grad_outputs) + user_defined_grad_outputs=user_defined_grad_outputs, + ) class TestSliceOp_decs_dim_2(TestSliceOp_decs_dim): - def config(self): self.input = np.random.random([3, 4, 5, 6]).astype(self.dtype) self.starts = [1, 0, 2] @@ -149,7 +152,6 @@ class XPUTestSliceOp_decs_dim(XPUOpTestWrapper): self.out = self.input[1, 0, 2:4, :] class TestSliceOp_decs_dim_3(TestSliceOp_decs_dim): - def config(self): self.input = np.random.random([3, 4, 5, 6]).astype(self.dtype) self.starts = [-1, 0, 2] @@ -160,7 +162,6 @@ class XPUTestSliceOp_decs_dim(XPUOpTestWrapper): self.out = self.input[-1, 0, 2:4, :] class TestSliceOp_decs_dim_4(TestSliceOp_decs_dim): - def config(self): self.input = np.random.random([3, 4, 5, 7]).astype(self.dtype) self.starts = [0, 1, 2, 3] @@ -171,7 +172,6 @@ class XPUTestSliceOp_decs_dim(XPUOpTestWrapper): self.out = self.input[0, 1, 2, 3:4] class TestSliceOp_decs_dim_5(TestSliceOp_decs_dim): - def config(self): self.input = np.random.random([3, 4, 5, 6]).astype(self.dtype) self.starts = [-1] @@ -182,7 +182,6 @@ class XPUTestSliceOp_decs_dim(XPUOpTestWrapper): self.out = self.input[:, :, :, -1] class TestSliceOp_decs_dim_6(TestSliceOp_decs_dim): - def config(self): self.input = np.random.random([3, 4, 5, 6]).astype(self.dtype) self.starts = [0, 1, 2, 3] diff --git a/python/paddle/fluid/tests/unittests/xpu/test_softmax_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_softmax_op_xpu.py index a4997c91ffbf5ea358ff47bc914ecdedafe962b1..736b3b7fbe68af99ec0cbcf2f5db9bae7dceffe6 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_softmax_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_softmax_op_xpu.py @@ -19,7 +19,11 @@ import unittest sys.path.append("..") from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() np.random.seed(10) @@ -29,7 +33,7 @@ def stable_softmax(x): """Compute the softmax of vector x in a numerically stable way.""" # clip to shiftx, otherwise, when calc loss with # log(exp(shiftx)), may get log(0)=INF - shiftx = (x - np.max(x)).clip(-64.) + shiftx = (x - np.max(x)).clip(-64.0) exps = np.exp(shiftx) return exps / np.sum(exps) @@ -44,7 +48,6 @@ def ref_softmax(x, axis=None, dtype=None): class XPUTestSoftmaxOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'softmax' self.use_dynamic_create_class = True @@ -56,14 +59,12 @@ class XPUTestSoftmaxOp(XPUOpTestWrapper): axis = [-1, 0, 1] for shape in shapes: for axi in axis: - class_name = 'XPUTestSoftmax_' + \ - str(shape) + "_" + str(axi) + class_name = 'XPUTestSoftmax_' + str(shape) + "_" + str(axi) attr_dict = {'shape': shape, 'axis': axi} classes.append([class_name, attr_dict]) return base_class, classes class TestSoftmaxOp(XPUOpTest): - def setUp(self): self.op_type = "softmax" if not hasattr(self, 'shape'): diff --git a/python/paddle/fluid/tests/unittests/xpu/test_softmax_with_cross_entropy_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_softmax_with_cross_entropy_op_xpu.py index cb604af4f89a4a12150f91e4c797587b8d8d5b2c..f4482e5edd716f5e3e639867590985a6c139b7ec 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_softmax_with_cross_entropy_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_softmax_with_cross_entropy_op_xpu.py @@ -22,7 +22,11 @@ import paddle import unittest import numpy as np -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) def cross_entropy(softmax, label, soft_label, axis, ignore_index=-1): @@ -33,7 +37,7 @@ def cross_entropy(softmax, label, soft_label, axis, ignore_index=-1): axis %= len(shape) n = int(np.prod(shape[:axis])) axis_dim = shape[axis] - remain = int(np.prod(shape[axis + 1:])) + remain = int(np.prod(shape[axis + 1 :])) softmax_reshape = softmax.reshape((n, axis_dim, remain)) label_reshape = label.reshape((n, 1, remain)) result = np.zeros_like(label_reshape, dtype=softmax.dtype) @@ -46,7 +50,6 @@ def cross_entropy(softmax, label, soft_label, axis, ignore_index=-1): class XPUTestSoftmaxWithCrossEntropyOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'softmax_with_cross_entropy' self.use_dynamic_create_class = True @@ -54,26 +57,39 @@ class XPUTestSoftmaxWithCrossEntropyOp(XPUOpTestWrapper): def dynamic_create_class(self): base_class = self.TestSoftmaxWithCrossEntropyOp classes = [] - shapes = [[41, 37], [3, 5, 7, 11], [3, 5, 7, 1], [1023, 38512], - [1, 511]] + shapes = [ + [41, 37], + [3, 5, 7, 11], + [3, 5, 7, 1], + [1023, 38512], + [1, 511], + ] for soft_label in [True, False]: for numeric_stable_mode in [True, False]: for shape in shapes: for logits_type in [0, 1, 2]: for axis in range(len(shape)): - if (not numeric_stable_mode): + if not numeric_stable_mode: axis = -1 - class_name = 'XPUTestSoftmaxWithCrossEntropy_' + \ - str(soft_label) + "_" + \ - str(numeric_stable_mode) + "_" + \ - str(shape) + "_" + \ - str(logits_type) + "_" + \ - str(axis) - attr_dict = {'soft_label': soft_label, \ - 'numeric_stable_mode': numeric_stable_mode, \ - 'shape': shape, \ - 'logits_type': logits_type, - 'axis': axis} + class_name = ( + 'XPUTestSoftmaxWithCrossEntropy_' + + str(soft_label) + + "_" + + str(numeric_stable_mode) + + "_" + + str(shape) + + "_" + + str(logits_type) + + "_" + + str(axis) + ) + attr_dict = { + 'soft_label': soft_label, + 'numeric_stable_mode': numeric_stable_mode, + 'shape': shape, + 'logits_type': logits_type, + 'axis': axis, + } classes.append([class_name, attr_dict]) return base_class, classes @@ -95,8 +111,10 @@ class XPUTestSoftmaxWithCrossEntropyOp(XPUOpTestWrapper): self.soft_label = True self.axis = -1 logits = getattr( - self, "logits", - np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype)) + self, + "logits", + np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype), + ) if self.logits_type == 1: self.logits = np.full(self.shape, -500.0).astype(self.dtype) elif self.logits_type == 2 and len(self.shape) == 4: @@ -105,24 +123,25 @@ class XPUTestSoftmaxWithCrossEntropyOp(XPUOpTestWrapper): softmax = np.apply_along_axis(stable_softmax, self.axis, logits) if self.soft_label: - labels = np.random.uniform(0.1, 1.0, - self.shape).astype(self.dtype) + labels = np.random.uniform(0.1, 1.0, self.shape).astype( + self.dtype + ) labels /= np.sum(labels, axis=self.axis, keepdims=True) else: axis_dim = self.shape[self.axis] self.shape[self.axis] = 1 - labels = np.random.randint(0, - axis_dim, - self.shape, - dtype="int64") + labels = np.random.randint( + 0, axis_dim, self.shape, dtype="int64" + ) - loss = cross_entropy(softmax, labels, self.soft_label, self.axis, - self.ignore_index) + loss = cross_entropy( + softmax, labels, self.soft_label, self.axis, self.ignore_index + ) self.inputs = {"Logits": logits, "Label": labels} self.outputs = { "Softmax": softmax.astype(self.dtype), - "Loss": loss.astype(self.dtype) + "Loss": loss.astype(self.dtype), } self.attrs = { "numeric_stable_mode": self.numeric_stable_mode, @@ -143,9 +162,9 @@ class XPUTestSoftmaxWithCrossEntropyOp(XPUOpTestWrapper): if paddle.is_compiled_with_xpu(): paddle.enable_static() place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ["Logits"], - "Loss", - max_relative_error=0.2) + self.check_grad_with_place( + place, ["Logits"], "Loss", max_relative_error=0.2 + ) support_types = get_xpu_op_support_types('softmax_with_cross_entropy') diff --git a/python/paddle/fluid/tests/unittests/xpu/test_split_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_split_op_xpu.py index 3d6ab9e04811e7d900ed8e75dea975a566d54e4e..c7dff56cb621a0b238de84ee0a89dd9d8d04bc01 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_split_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_split_op_xpu.py @@ -20,20 +20,22 @@ import numpy as np from op_test_xpu import XPUOpTest import paddle from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestSplitOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'split' self.use_dynamic_create_class = False # test with attr(num) class TestSplitOp(XPUOpTest): - def setUp(self): self.init_dtype() self.__class__.use_xpu = True @@ -44,12 +46,13 @@ class XPUTestSplitOp(XPUOpTestWrapper): self.attrs = { 'axis': self.axis, 'sections': self.sections, - 'num': self.num + 'num': self.num, } out = np.split(self.x, self.indices_or_sections, self.axis) - self.outputs = {'Out': [('out%d' % i, out[i]) \ - for i in range(len(out))]} + self.outputs = { + 'Out': [('out%d' % i, out[i]) for i in range(len(out))] + } def init_dtype(self): self.dtype = self.in_type @@ -66,7 +69,6 @@ class XPUTestSplitOp(XPUOpTestWrapper): # unknown sections class TestSplitOp1(TestSplitOp): - def initParameters(self): self.x = np.random.random((4, 5, 6)).astype(self.dtype) self.axis = 2 @@ -76,7 +78,6 @@ class XPUTestSplitOp(XPUOpTestWrapper): # test with int32 class TestSplitOp2(TestSplitOp): - def initParameters(self): self.x = np.random.random((4, 5, 6)).astype(np.int32) self.axis = 2 diff --git a/python/paddle/fluid/tests/unittests/xpu/test_squeeze2_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_squeeze2_op_xpu.py index fdd81c0c1384d78af951cfd6a4ed32befba71d23..1b2a52a6962f19cc0364125268e9417f1e873063 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_squeeze2_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_squeeze2_op_xpu.py @@ -20,20 +20,22 @@ sys.path.append("..") import numpy as np from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) import paddle paddle.enable_static() class XPUTestSqueeze2Op(XPUOpTestWrapper): - def __init__(self): self.op_name = "squeeze2" self.use_dynamic_create_class = False class TestSqueeze2Op(XPUOpTest): - def setUp(self): self.op_type = "squeeze2" self.__class__.op_type = "squeeze2" @@ -45,7 +47,7 @@ class XPUTestSqueeze2Op(XPUOpTestWrapper): } self.outputs = { "Out": self.inputs["X"].reshape(self.new_shape), - "XShape": np.random.random(self.ori_shape).astype(self.dtype) + "XShape": np.random.random(self.ori_shape).astype(self.dtype), } self.init_attrs() @@ -73,7 +75,6 @@ class XPUTestSqueeze2Op(XPUOpTestWrapper): # Correct: There is mins axis. class TestSqueeze2Op1(TestSqueeze2Op): - def init_test_case(self): self.ori_shape = (1, 20, 1, 5) self.axes = (0, -2) @@ -81,7 +82,6 @@ class XPUTestSqueeze2Op(XPUOpTestWrapper): # Correct: No axes input. class TestSqueeze2Op2(TestSqueeze2Op): - def init_test_case(self): self.ori_shape = (1, 20, 1, 5) self.axes = () @@ -89,7 +89,6 @@ class XPUTestSqueeze2Op(XPUOpTestWrapper): # Correct: Just part of axes be squeezed. class TestSqueeze2Op3(TestSqueeze2Op): - def init_test_case(self): self.ori_shape = (6, 1, 5, 1, 4, 1) self.axes = (1, -1) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_squeeze_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_squeeze_op_xpu.py index f1c6873823f1b583cb3d1135bcf01ed720b7cb83..a87fe32d62e0de0a9d489e00124104a1624fc00b 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_squeeze_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_squeeze_op_xpu.py @@ -23,20 +23,22 @@ import paddle import paddle.fluid as fluid from paddle.fluid import Program, program_guard from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestSqueezeOp(XPUOpTestWrapper): - def __init__(self): self.op_name = "squeeze" self.use_dynamic_create_class = False # Correct: General. class TestSqueezeOp(XPUOpTest): - def setUp(self): self.op_type = "squeeze" self.__class__.op_type = "squeeze" @@ -75,7 +77,6 @@ class XPUTestSqueezeOp(XPUOpTestWrapper): # Correct: There is mins axis. class TestSqueezeOp1(TestSqueezeOp): - def init_test_case(self): self.ori_shape = (1, 3, 1, 40) self.axes = (0, -2) @@ -83,7 +84,6 @@ class XPUTestSqueezeOp(XPUOpTestWrapper): # Correct: No axes input. class TestSqueezeOp2(TestSqueezeOp): - def init_test_case(self): self.ori_shape = (1, 20, 1, 5) self.axes = () @@ -91,7 +91,6 @@ class XPUTestSqueezeOp(XPUOpTestWrapper): # Correct: Just part of axes be squeezed. class TestSqueezeOp3(TestSqueezeOp): - def init_test_case(self): self.ori_shape = (6, 1, 5, 1, 4, 1) self.axes = (1, -1) @@ -99,7 +98,6 @@ class XPUTestSqueezeOp(XPUOpTestWrapper): # Correct: The demension of axis is not of size 1 remains unchanged. class TestSqueezeOp4(TestSqueezeOp): - def init_test_case(self): self.ori_shape = (6, 1, 5, 1, 4, 1) self.axes = (1, 2) @@ -107,13 +105,13 @@ class XPUTestSqueezeOp(XPUOpTestWrapper): class TestSqueezeOpError(unittest.TestCase): - def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): # The input type of softmax_op must be Variable. - x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], - paddle.XPUPlace(0)) + x1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], paddle.XPUPlace(0) + ) self.assertRaises(TypeError, paddle.squeeze, x1) # The input axes of squeeze must be list. x2 = paddle.static.data(name='x2', shape=[4], dtype="int32") diff --git a/python/paddle/fluid/tests/unittests/xpu/test_stack_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_stack_op_xpu.py index 2102348de36e4b719feba26c6b0e4f4321f57b31..1f215acdb7437a5156b355904bb704c3783e2438 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_stack_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_stack_op_xpu.py @@ -21,20 +21,22 @@ from op_test import skip_check_grad_ci from op_test_xpu import XPUOpTest import paddle from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestStackOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'stack' self.use_dynamic_create_class = False @skip_check_grad_ci(reason="There is no grad kernel for stack_xpu op.") class TestStackOp(XPUOpTest): - def initDefaultParameters(self): self.num_inputs = 4 self.input_dim = (5, 6, 7) @@ -49,7 +51,8 @@ class XPUTestStackOp(XPUOpTestWrapper): self.x = [] for i in range(self.num_inputs): self.x.append( - np.random.random(size=self.input_dim).astype(self.dtype)) + np.random.random(size=self.input_dim).astype(self.dtype) + ) tmp = [] x_names = self.get_x_names() @@ -79,21 +82,19 @@ class XPUTestStackOp(XPUOpTestWrapper): if self.dtype == np.int32 or self.dtype == np.int64: pass else: - self.check_grad_with_place(paddle.XPUPlace(0), - self.get_x_names(), 'Y') + self.check_grad_with_place( + paddle.XPUPlace(0), self.get_x_names(), 'Y' + ) class TestStackOp1(TestStackOp): - def initParameters(self): self.num_inputs = 16 class TestStackOp2(TestStackOp): - def initParameters(self): self.num_inputs = 30 class TestStackOp3(TestStackOp): - def initParameters(self): self.axis = -1 @@ -101,7 +102,6 @@ class XPUTestStackOp(XPUOpTestWrapper): pass class TestStackOp4(TestStackOp): - def initParameters(self): self.axis = -4 @@ -109,17 +109,14 @@ class XPUTestStackOp(XPUOpTestWrapper): pass class TestStackOp5(TestStackOp): - def initParameters(self): self.axis = 1 class TestStackOp6(TestStackOp): - def initParameters(self): self.axis = 3 class TestStackOp7(TestStackOp): - def initParameters(self): self.num_inputs = 4 self.input_dim = (5, 6, 7) @@ -130,7 +127,6 @@ class XPUTestStackOp(XPUOpTestWrapper): pass class TestStackOp8(TestStackOp): - def initParameters(self): self.num_inputs = 4 self.input_dim = (5, 6, 7) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_sum_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_sum_op_xpu.py index 3d74b224cbbeabff9b7a864747c684da709ae402..778cb66b9a84b1ac54c31a9fe8bc78e2caf6180b 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_sum_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_sum_op_xpu.py @@ -22,19 +22,21 @@ import paddle import paddle.fluid as fluid import paddle.fluid.core as core from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestSumOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'sum' self.use_dynamic_create_class = False class TestSumOp(XPUOpTest): - def setUp(self): self.init_dtype() self.set_xpu() @@ -66,30 +68,24 @@ class XPUTestSumOp(XPUOpTestWrapper): self.check_grad_with_place(self.place, ['x0'], 'Out') class TestSumOp1(TestSumOp): - def set_shape(self): - self.shape = (5) + self.shape = 5 class TestSumOp2(TestSumOp): - def set_shape(self): self.shape = (1, 1, 1, 1, 1) class TestSumOp3(TestSumOp): - def set_shape(self): self.shape = (10, 5, 7) class TestSumOp4(TestSumOp): - def set_shape(self): self.shape = (2, 2, 3, 3) def create_test_sum_fp16_class(parent): - class TestSumFp16Case(parent): - def init_kernel_type(self): self.dtype = np.float16 @@ -105,15 +101,14 @@ def create_test_sum_fp16_class(parent): class API_Test_Add_n(unittest.TestCase): - def test_api(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - input0 = fluid.layers.fill_constant(shape=[2, 3], - dtype='int64', - value=5) - input1 = fluid.layers.fill_constant(shape=[2, 3], - dtype='int64', - value=3) + input0 = fluid.layers.fill_constant( + shape=[2, 3], dtype='int64', value=5 + ) + input1 = fluid.layers.fill_constant( + shape=[2, 3], dtype='int64', value=3 + ) expected_result = np.empty((2, 3)) expected_result.fill(8) sum_value = paddle.add_n([input0, input1]) @@ -132,9 +127,7 @@ class API_Test_Add_n(unittest.TestCase): class TestRaiseSumError(unittest.TestCase): - def test_errors(self): - def test_type(): fluid.layers.sum([11, 22]) @@ -155,9 +148,7 @@ class TestRaiseSumError(unittest.TestCase): class TestRaiseSumsError(unittest.TestCase): - def test_errors(self): - def test_type(): fluid.layers.sums([11, 22]) @@ -193,9 +184,7 @@ class TestRaiseSumsError(unittest.TestCase): class TestSumOpError(unittest.TestCase): - def test_errors(self): - def test_empty_list_input(): with fluid.dygraph.guard(): fluid._legacy_C_ops.sum([]) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_temporal_shift_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_temporal_shift_op_xpu.py index 0158284c01975e1e0d31c8e1622cb09e71cd9960..5fce6052502b1b1a47a2b4c21b04eed83382ee24 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_temporal_shift_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_temporal_shift_op_xpu.py @@ -22,7 +22,11 @@ import paddle import paddle.nn.functional as F from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() np.random.seed(10) @@ -33,13 +37,14 @@ def temporal_shift(x, seg_num, shift_ratio, data_format): x = np.transpose(x, (0, 3, 1, 2)) shape = x.shape reshape_x = x.reshape((-1, seg_num, shape[1], shape[2], shape[3])) - pad_x = np.pad(reshape_x, ((0, 0), (1, 1), (0, 0), (0, 0), (0, 0)), - 'constant') + pad_x = np.pad( + reshape_x, ((0, 0), (1, 1), (0, 0), (0, 0), (0, 0)), 'constant' + ) c1 = int(shape[1] * shift_ratio) c2 = int(shape[1] * 2 * shift_ratio) slice1 = pad_x[:, :seg_num, :c1, :, :] - slice2 = pad_x[:, 2:seg_num + 2, c1:c2, :, :] - slice3 = pad_x[:, 1:seg_num + 1, c2:, :, :] + slice2 = pad_x[:, 2 : seg_num + 2, c1:c2, :, :] + slice3 = pad_x[:, 1 : seg_num + 1, c2:, :, :] concat_x = np.concatenate([slice1, slice2, slice3], axis=2) out = concat_x.reshape(shape) if data_format == "NHWC": @@ -48,13 +53,11 @@ def temporal_shift(x, seg_num, shift_ratio, data_format): class XPUTestTemporalShiftOp(XPUOpTestWrapper): - def __init__(self): self.op_name = "temporal_shift" self.use_dynamic_create_class = False class TestXPUTemporalShift(XPUOpTest): - def setUp(self): self.initTestCase() self.op_type = 'temporal_shift' @@ -65,15 +68,16 @@ class XPUTestTemporalShiftOp(XPUOpTestWrapper): self.attrs = { "seg_num": self.seg_num, "shift_ratio": self.shift_ratio, - "data_format": self.data_format + "data_format": self.data_format, } self.inputs = { "X": x, } - output = temporal_shift(x, self.seg_num, self.shift_ratio, - self.data_format) + output = temporal_shift( + x, self.seg_num, self.shift_ratio, self.data_format + ) self.outputs = {"Out": output} self.python_out_sig = ["Out"] @@ -91,7 +95,6 @@ class XPUTestTemporalShiftOp(XPUOpTestWrapper): self.data_format = 'NCHW' class TestXPUTemporalShift2(TestXPUTemporalShift): - def initTestCase(self): self.x_shape = (1, 1, 1, 1) self.seg_num = 1 @@ -100,7 +103,6 @@ class XPUTestTemporalShiftOp(XPUOpTestWrapper): self.data_format = 'NCHW' class TestXPUTemporalShift3(TestXPUTemporalShift): - def initTestCase(self): self.x_shape = (4, 9, 1, 1) self.seg_num = 2 @@ -109,7 +111,6 @@ class XPUTestTemporalShiftOp(XPUOpTestWrapper): self.data_format = 'NCHW' class TestXPUTemporalShift4(TestXPUTemporalShift): - def initTestCase(self): self.x_shape = (4, 1, 10, 10) self.seg_num = 2 @@ -118,7 +119,6 @@ class XPUTestTemporalShiftOp(XPUOpTestWrapper): self.data_format = 'NCHW' class TestXPUTemporalShift5(TestXPUTemporalShift): - def initTestCase(self): self.x_shape = (1, 1, 1, 1) self.seg_num = 1 @@ -127,7 +127,6 @@ class XPUTestTemporalShiftOp(XPUOpTestWrapper): self.data_format = 'NHWC' class TestXPUTemporalShift6(TestXPUTemporalShift): - def initTestCase(self): self.x_shape = (6, 5, 5, 1) self.seg_num = 3 @@ -136,7 +135,6 @@ class XPUTestTemporalShiftOp(XPUOpTestWrapper): self.data_format = 'NHWC' class TestXPUTemporalShift7(TestXPUTemporalShift): - def initTestCase(self): self.x_shape = (9, 1, 1, 4) self.seg_num = 3 diff --git a/python/paddle/fluid/tests/unittests/xpu/test_tile_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_tile_op_xpu.py index 1854e164386c0987699822afc292d16ed90c875a..ef6c65f71dd841cb049d09f8466c1f897988bdfa 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_tile_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_tile_op_xpu.py @@ -20,21 +20,23 @@ sys.path.append("..") from op_test_xpu import XPUOpTest import paddle import paddle.fluid as fluid -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() np.random.seed(10) -#Situation 1: repeat_times is a list (without tensor) +# Situation 1: repeat_times is a list (without tensor) class XPUTestTileOpRank1(XPUOpTestWrapper): - def __init__(self): self.op_name = 'tile' self.use_dynamic_create_class = False class TestTileOpRank1(XPUOpTest): - def setUp(self): self.dtype = self.in_type self.__class__.no_need_check_grad = True @@ -55,39 +57,33 @@ class XPUTestTileOpRank1(XPUOpTestWrapper): def test_check_output(self): self.check_output_with_place(self.place) - #with dimension expanding + # with dimension expanding class TestTileOpRank2Expanding(TestTileOpRank1): - def init_data(self): self.ori_shape = [120] self.repeat_times = [2, 2] class TestTileOpRank2(TestTileOpRank1): - def init_data(self): self.ori_shape = [12, 14] self.repeat_times = [2, 3] class TestTileOpRank3_Corner(TestTileOpRank1): - def init_data(self): self.ori_shape = (2, 10, 5) self.repeat_times = (1, 1, 1) class TestTileOpRank3_Corner2(TestTileOpRank1): - def init_data(self): self.ori_shape = (2, 10, 5) self.repeat_times = (2, 2) class TestTileOpRank3(TestTileOpRank1): - def init_data(self): self.ori_shape = (2, 4, 15) self.repeat_times = (2, 1, 4) class TestTileOpRank4(TestTileOpRank1): - def init_data(self): self.ori_shape = (2, 4, 5, 7) self.repeat_times = (3, 2, 1, 2) @@ -95,13 +91,11 @@ class XPUTestTileOpRank1(XPUOpTestWrapper): # Situation 2: repeat_times is a list (with tensor) class XPUTestTileOpRank1_tensor_attr(XPUOpTestWrapper): - def __init__(self): self.op_name = 'tile' self.use_dynamic_create_class = False class TestTileOpRank1_tensor_attr(XPUOpTest): - def setUp(self): self.dtype = self.in_type self.__class__.no_need_check_grad = True @@ -110,8 +104,9 @@ class XPUTestTileOpRank1_tensor_attr(XPUOpTestWrapper): self.init_data() repeat_times_tensor = [] for index, ele in enumerate(self.repeat_times): - repeat_times_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) + repeat_times_tensor.append( + ("x" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = { 'X': np.random.random(self.ori_shape).astype(self.dtype), @@ -130,14 +125,12 @@ class XPUTestTileOpRank1_tensor_attr(XPUOpTestWrapper): self.check_output_with_place(self.place) class TestTileOpRank2_Corner_tensor_attr(TestTileOpRank1_tensor_attr): - def init_data(self): self.ori_shape = [12, 14] self.repeat_times = [1, 1] self.infer_repeat_times = [1, -1] class TestTileOpRank2_attr_tensor(TestTileOpRank1_tensor_attr): - def init_data(self): self.ori_shape = [12, 14] self.repeat_times = [2, 3] @@ -146,13 +139,11 @@ class XPUTestTileOpRank1_tensor_attr(XPUOpTestWrapper): # Situation 3: repeat_times is a tensor class XPUTestTileOpRank1_tensor(XPUOpTestWrapper): - def __init__(self): self.op_name = 'tile' self.use_dynamic_create_class = False class TestTileOpRank1_tensor(XPUOpTest): - def setUp(self): self.dtype = self.in_type self.__class__.no_need_check_grad = True @@ -176,7 +167,6 @@ class XPUTestTileOpRank1_tensor(XPUOpTestWrapper): self.check_output_with_place(self.place) class TestTileOpRank2_tensor(TestTileOpRank1_tensor): - def init_data(self): self.ori_shape = [12, 14] self.repeat_times = [2, 3] @@ -191,7 +181,6 @@ for stype in support_types: # Test python API class TestTileAPI(unittest.TestCase): - def test_api(self): with fluid.dygraph.guard(paddle.XPUPlace(0)): np_x = np.random.random([12, 14]).astype("float32") diff --git a/python/paddle/fluid/tests/unittests/xpu/test_top_k_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_top_k_op_xpu.py index 5cd42c912677ab044f9ae919ac80b9b59ad525b4..fe251b320a652c5d224a9a2170c80689f40eed8c 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_top_k_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_top_k_op_xpu.py @@ -19,7 +19,11 @@ import sys sys.path.append("..") import paddle from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() @@ -28,23 +32,24 @@ def random_unique_float(row, k, dtype): # create a random float array with 10x length arr = np.random.uniform(-10.0, 10.0, int(row * k * 10)).astype(dtype) arr = np.unique(arr) - assert arr.shape[ - 0] >= row * k, "failed to create enough unique values: %d vs %d" % ( - arr.shape[0], row * k) - arr = arr[:row * k] + assert ( + arr.shape[0] >= row * k + ), "failed to create enough unique values: %d vs %d" % ( + arr.shape[0], + row * k, + ) + arr = arr[: row * k] np.random.shuffle(arr) arr = arr.reshape(row, k) return arr class XPUTestTopkOP(XPUOpTestWrapper): - def __init__(self): self.op_name = 'top_k' self.use_dynamic_create_class = False class TestXPUTopkOP(XPUOpTest): - def setUp(self): self.place = paddle.XPUPlace(0) self.init_dtype() @@ -85,28 +90,24 @@ class XPUTestTopkOP(XPUOpTestWrapper): self.check_grad_with_place(self.place, ['X'], 'Out') class TestTopk1(TestXPUTopkOP): - def set_case(self): self.variable_k = True self.row = 100 self.top_k = 1 class TestTopk2(TestXPUTopkOP): - def set_case(self): self.variable_k = False self.row = 16 self.top_k = 256 class TestTopk3(TestXPUTopkOP): - def set_case(self): self.variable_k = True self.row = 10 self.top_k = 512 class TestTopk4(TestXPUTopkOP): - def set_case(self): self.variable_k = False self.row = 5 diff --git a/python/paddle/fluid/tests/unittests/xpu/test_top_k_v2_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_top_k_v2_op_xpu.py index 4ed7619bf306fd30ca5a51afc492f38f2e059580..5393834c370f16e0500f88ef2d186f3532cd2369 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_top_k_v2_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_top_k_v2_op_xpu.py @@ -19,7 +19,11 @@ import sys sys.path.append("..") from op_test_xpu import XPUOpTest import paddle -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() @@ -41,13 +45,11 @@ def numpy_topk(x, k=1, axis=-1, largest=True): class XPUTestTopKV2Op(XPUOpTestWrapper): - def __init__(self): self.op_name = 'top_k_v2' self.use_dynamic_create_class = False class TestTopkOp(XPUOpTest): - def init_args(self): self.k = 3 self.axis = 1 @@ -62,12 +64,11 @@ class XPUTestTopKV2Op(XPUOpTestWrapper): self.attrs = { 'k': self.k, 'axis': self.axis, - 'largest': self.largest + 'largest': self.largest, } - output, indices = numpy_topk(self.input_data, - axis=self.axis, - k=self.k, - largest=self.largest) + output, indices = numpy_topk( + self.input_data, axis=self.axis, k=self.k, largest=self.largest + ) self.outputs = {'Out': output, 'Indices': indices} def test_check_output(self): @@ -81,7 +82,6 @@ class XPUTestTopKV2Op(XPUOpTestWrapper): self.check_grad(set(['X']), 'Out') class TestTopkOp1(TestTopkOp): - def init_args(self): self.k = 3 self.axis = 1 @@ -89,7 +89,6 @@ class XPUTestTopKV2Op(XPUOpTestWrapper): self.input_data = np.random.rand(100, 155).astype(self.dtype) class TestTopkOp2(TestTopkOp): - def init_args(self): self.k = 3 self.axis = 1 @@ -97,7 +96,6 @@ class XPUTestTopKV2Op(XPUOpTestWrapper): self.input_data = np.random.rand(10, 10, 5).astype(self.dtype) class TestTopkOp3(TestTopkOp): - def init_args(self): self.k = 5 self.axis = 1 @@ -105,7 +103,6 @@ class XPUTestTopKV2Op(XPUOpTestWrapper): self.input_data = np.random.rand(10, 10, 5).astype(self.dtype) class TestTopkOp4(TestTopkOp): - def init_args(self): self.k = 1 self.axis = 1 @@ -113,7 +110,6 @@ class XPUTestTopKV2Op(XPUOpTestWrapper): self.input_data = np.random.rand(10, 10, 5).astype(self.dtype) class TestTopkOp5(TestTopkOp): - def init_args(self): self.k = 3 self.axis = 2 @@ -121,7 +117,6 @@ class XPUTestTopKV2Op(XPUOpTestWrapper): self.input_data = np.random.rand(10, 10, 5).astype(self.dtype) class TestTopkOp6(TestTopkOp): - def init_args(self): self.k = 5 self.axis = 1 @@ -129,7 +124,6 @@ class XPUTestTopKV2Op(XPUOpTestWrapper): self.input_data = np.random.rand(8, 32, 64).astype(self.dtype) class TestTopkOp7(TestTopkOp): - def init_args(self): self.k = 10 self.axis = 2 @@ -137,7 +131,6 @@ class XPUTestTopKV2Op(XPUOpTestWrapper): self.input_data = np.random.rand(8, 5, 10, 16).astype(self.dtype) class TestTopkOp8(TestTopkOp): - def init_args(self): self.k = 1 self.axis = 1 @@ -145,7 +138,6 @@ class XPUTestTopKV2Op(XPUOpTestWrapper): self.input_data = np.random.rand(8, 32, 64).astype(self.dtype) class TestTopkOp9(TestTopkOp): - def init_args(self): self.k = 3 self.axis = 1 @@ -153,7 +145,6 @@ class XPUTestTopKV2Op(XPUOpTestWrapper): self.input_data = np.random.rand(10, 10, 5).astype(self.dtype) class TestTopkOp10(TestTopkOp): - def init_args(self): self.k = 3 self.axis = 1 @@ -161,7 +152,6 @@ class XPUTestTopKV2Op(XPUOpTestWrapper): self.input_data = np.random.rand(10, 10, 5).astype(self.dtype) class TestTopkOp11(TestTopkOp): - def init_args(self): self.k = 5 self.axis = 1 @@ -169,7 +159,6 @@ class XPUTestTopKV2Op(XPUOpTestWrapper): self.input_data = np.random.rand(10, 10, 5).astype(self.dtype) class TestTopkOp12(TestTopkOp): - def init_args(self): self.k = 1 self.axis = 1 diff --git a/python/paddle/fluid/tests/unittests/xpu/test_transpose_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_transpose_op_xpu.py index 3dd7531f350483bb4d2385e14f806ded7ef4a0f5..1261487d3937354691952e819790147bb0518090 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_transpose_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_transpose_op_xpu.py @@ -22,7 +22,6 @@ import paddle class TestXPUTransposeOp(XPUOpTest): - def setUp(self): self.init_op_type() self.initTestCase() @@ -32,11 +31,11 @@ class TestXPUTransposeOp(XPUOpTest): self.attrs = { 'axis': list(self.axis), 'use_mkldnn': False, - 'use_xpu': True + 'use_xpu': True, } self.outputs = { 'XShape': np.random.random(self.shape).astype("float32"), - 'Out': self.inputs['X'].transpose(self.axis) + 'Out': self.inputs['X'].transpose(self.axis), } def init_op_type(self): @@ -61,77 +60,66 @@ class TestXPUTransposeOp(XPUOpTest): class TestCase_ZeroDim(TestXPUTransposeOp): - def initTestCase(self): self.shape = () self.axis = () class TestCase0(TestXPUTransposeOp): - def initTestCase(self): - self.shape = (100, ) - self.axis = (0, ) + self.shape = (100,) + self.axis = (0,) class TestCase1(TestXPUTransposeOp): - def initTestCase(self): self.shape = (3, 4, 10) self.axis = (0, 2, 1) class TestCase2(TestXPUTransposeOp): - def initTestCase(self): self.shape = (2, 3, 4, 5) self.axis = (0, 2, 3, 1) class TestCase3(TestXPUTransposeOp): - def initTestCase(self): self.shape = (2, 3, 4, 5, 6) self.axis = (4, 2, 3, 1, 0) class TestCase4(TestXPUTransposeOp): - def initTestCase(self): self.shape = (2, 3, 4, 5, 6, 1) self.axis = (4, 2, 3, 1, 0, 5) class TestCase5(TestXPUTransposeOp): - def initTestCase(self): self.shape = (2, 16, 96) self.axis = (0, 2, 1) class TestCase6(TestXPUTransposeOp): - def initTestCase(self): self.shape = (2, 10, 12, 16) self.axis = (3, 1, 2, 0) class TestCase7(TestXPUTransposeOp): - def initTestCase(self): self.shape = (2, 10, 2, 16) self.axis = (0, 1, 3, 2) class TestCase8(TestXPUTransposeOp): - def initTestCase(self): self.shape = (2, 3, 2, 3, 2, 4, 3, 3) self.axis = (0, 1, 3, 2, 4, 5, 6, 7) class TestCase9(TestXPUTransposeOp): - def initTestCase(self): self.shape = (2, 3, 2, 3, 2, 4, 3, 3) self.axis = (6, 1, 3, 5, 0, 2, 4, 7) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_tril_triu_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_tril_triu_op_xpu.py index 5acd5f22b6050a0d38028ee8a29534b5f44aab35..a80d6adfb94e2b480ba7cfe2496dd3da2b1c7343 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_tril_triu_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_tril_triu_op_xpu.py @@ -20,19 +20,21 @@ import paddle.tensor as tensor import unittest import numpy as np from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestTrilTriuOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'tril_triu' self.use_dynamic_create_class = False class TestTrilTriuOp(XPUOpTest): - def setUp(self): self.init_dtype() self.initTestCase() @@ -42,9 +44,9 @@ class XPUTestTrilTriuOp(XPUOpTestWrapper): self.op_type = "tril_triu" self.place = paddle.XPUPlace(0) if self.dtype == np.int32: - self.X = np.arange(1, - self.get_Xshape_prod() + 1, - dtype=self.dtype).reshape(self.Xshape) + self.X = np.arange( + 1, self.get_Xshape_prod() + 1, dtype=self.dtype + ).reshape(self.Xshape) else: self.X = np.random.random(self.Xshape).astype(dtype=self.dtype) self.inputs = {'X': self.X} @@ -53,9 +55,9 @@ class XPUTestTrilTriuOp(XPUOpTestWrapper): 'lower': True if self.real_op_type == 'tril' else False, } self.outputs = { - 'Out': - self.real_np_op(self.X, self.diagonal) - if self.diagonal else self.real_np_op(self.X) + 'Out': self.real_np_op(self.X, self.diagonal) + if self.diagonal + else self.real_np_op(self.X) } def init_dtype(self): @@ -78,11 +80,14 @@ class XPUTestTrilTriuOp(XPUOpTestWrapper): def test_check_grad_normal(self): if self.dtype == np.int32: user_defined_grad_outputs = np.random.random( - self.Xshape).astype('float32') + self.Xshape + ).astype('float32') self.check_grad_with_place( - self.place, ['X'], + self.place, + ['X'], 'Out', - user_defined_grad_outputs=user_defined_grad_outputs) + user_defined_grad_outputs=user_defined_grad_outputs, + ) else: self.check_grad_with_place(self.place, ['X'], 'Out') @@ -91,74 +96,70 @@ class XPUTestTrilTriuOp(XPUOpTestWrapper): self.Xshape = (10, 10) class TestTrilTriuOp1(TestTrilTriuOp): - def initTestCase(self): self.diagonal = -3 self.Xshape = (5, 5) class TestTrilTriuOp2(TestTrilTriuOp): - def initTestCase(self): self.diagonal = 4 self.Xshape = (11, 17) class TestTrilTriuOp3(TestTrilTriuOp): - def initTestCase(self): self.diagonal = 10 self.Xshape = (2, 25, 25) class TestTrilTriuOp4(TestTrilTriuOp): - def initTestCase(self): self.diagonal = -10 self.Xshape = (1, 2, 33, 11) class TestTrilTriuOp5(TestTrilTriuOp): - def initTestCase(self): self.diagonal = 11 self.Xshape = (1, 1, 99) class TestTrilTriuOp6(TestTrilTriuOp): - def initTestCase(self): self.diagonal = 5 self.Xshape = (1, 2, 3, 5, 99) class TestTrilTriuOp7(TestTrilTriuOp): - def initTestCase(self): self.diagonal = -100 self.Xshape = (2, 2, 3, 4, 5) class TestTrilTriuOpError(unittest.TestCase): - def test_errors1(self): paddle.enable_static() data = fluid.data(shape=(20, 22), dtype='float32', name="data1") op_type = np.random.choice(['triu', 'tril']) errmsg = { - "diagonal: TypeError": - "diagonal in {} must be a python Int".format(op_type), + "diagonal: TypeError": "diagonal in {} must be a python Int".format( + op_type + ), } expected = list(errmsg.keys())[0] - with self.assertRaisesRegex(eval(expected.split(':')[-1]), - errmsg[expected]): + with self.assertRaisesRegex( + eval(expected.split(':')[-1]), errmsg[expected] + ): getattr(tensor, op_type)(x=data, diagonal='2022') def test_errors2(self): paddle.enable_static() - data = fluid.data(shape=(200, ), dtype='float32', name="data2") + data = fluid.data(shape=(200,), dtype='float32', name="data2") op_type = np.random.choice(['triu', 'tril']) errmsg = { - "input: ValueError": - "x shape in {} must be at least 2-D".format(op_type), + "input: ValueError": "x shape in {} must be at least 2-D".format( + op_type + ), } expected = list(errmsg.keys())[0] - with self.assertRaisesRegex(eval(expected.split(':')[-1]), - errmsg[expected]): + with self.assertRaisesRegex( + eval(expected.split(':')[-1]), errmsg[expected] + ): getattr(tensor, op_type)(x=data, diagonal=[None]) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_truncated_gaussian_random_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_truncated_gaussian_random_op_xpu.py index 649538aeab2f7e3d589c1ec05e96d925cbc67b7a..ea79e9078dbaeb93b08e1816e160bc34f491248f 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_truncated_gaussian_random_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_truncated_gaussian_random_op_xpu.py @@ -21,19 +21,21 @@ import paddle import paddle.fluid as fluid from paddle.fluid.executor import Executor from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestTruncatedGaussianRandomOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'truncated_gaussian_random' self.use_dynamic_create_class = False class TestTruncatedGaussianRandomOp(XPUOpTest): - def init(self): self.dtype = self.in_type self.place = paddle.XPUPlace(0) @@ -64,9 +66,9 @@ class XPUTestTruncatedGaussianRandomOp(XPUOpTestWrapper): program = fluid.Program() block = program.global_block() vout = block.create_var(name="Out") - op = block.append_op(type=self.op_type, - outputs={"Out": vout}, - attrs=self.attrs) + op = block.append_op( + type=self.op_type, outputs={"Out": vout}, attrs=self.attrs + ) op.desc.infer_var_type(block.desc) op.desc.infer_shape(block.desc) @@ -82,28 +84,24 @@ class XPUTestTruncatedGaussianRandomOp(XPUOpTestWrapper): np.testing.assert_allclose(np.var(tensor), 0.773, atol=0.05) class TestTruncatedGaussianRandomOp_1(TestTruncatedGaussianRandomOp): - def set_attrs(self): self.shape = [4096, 2] self.mean = 5.0 self.std = 1.0 class TestTruncatedGaussianRandomOp_2(TestTruncatedGaussianRandomOp): - def set_attrs(self): self.shape = [1024] self.mean = -2.0 self.std = 1.0 class TestTruncatedGaussianRandomOp_3(TestTruncatedGaussianRandomOp): - def set_attrs(self): self.shape = [11 * 13 * 17] self.mean = -1.0 self.std = 1.0 class TestTruncatedGaussianRandomOp_4(TestTruncatedGaussianRandomOp): - def set_attrs(self): self.shape = [2049] self.mean = 5.1234 diff --git a/python/paddle/fluid/tests/unittests/xpu/test_uniform_random_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_uniform_random_op_xpu.py index 84f949a74bf7fd3784b95235e8a2746fdca51eb2..4847e9db7810a0cd9b8a9ade8ca86163b22aa2fe 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_uniform_random_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_uniform_random_op_xpu.py @@ -18,13 +18,15 @@ sys.path.append("..") import unittest import numpy as np import paddle -from test_uniform_random_op import TestUniformRandomOp, TestUniformRandomOpSelectedRows +from test_uniform_random_op import ( + TestUniformRandomOp, + TestUniformRandomOpSelectedRows, +) paddle.enable_static() class TestXPUUniformRandomOp(TestUniformRandomOp): - def test_check_output(self): if paddle.is_compiled_with_xpu(): place = paddle.XPUPlace(0) @@ -35,7 +37,6 @@ class TestXPUUniformRandomOp(TestUniformRandomOp): class TestXPUUniformRandomOpSelectedRows(TestUniformRandomOpSelectedRows): - def test_check_output(self): if paddle.is_compiled_with_xpu(): place = paddle.XPUPlace(0) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_unsqueeze2_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_unsqueeze2_op_xpu.py index 743a818952866927e58f64aa4796ea29ca0ed18c..c745466fb24bfbbe18902f1779a4a58815e2d83e 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_unsqueeze2_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_unsqueeze2_op_xpu.py @@ -21,19 +21,21 @@ import numpy as np import paddle from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestUnsqueeze2Op(XPUOpTestWrapper): - def __init__(self): self.op_name = "unsqueeze2" self.use_dynamic_create_class = False class TestUnsqueeze2Op(XPUOpTest): - def setUp(self): self.op_type = "unsqueeze2" self.__class__.op_type = "unsqueeze2" @@ -45,7 +47,7 @@ class XPUTestUnsqueeze2Op(XPUOpTestWrapper): } self.outputs = { "Out": self.inputs["X"].reshape(self.new_shape), - "XShape": np.random.random(self.ori_shape).astype(self.dtype) + "XShape": np.random.random(self.ori_shape).astype(self.dtype), } self.init_attrs() @@ -73,15 +75,13 @@ class XPUTestUnsqueeze2Op(XPUOpTestWrapper): # Correct: Single input index. class TestUnsqueeze2Op1(TestUnsqueeze2Op): - def init_test_case(self): self.ori_shape = (20, 5) - self.axes = (-1, ) + self.axes = (-1,) self.new_shape = (20, 5, 1) # Correct: Mixed input axis. class TestUnsqueeze2Op2(TestUnsqueeze2Op): - def init_test_case(self): self.ori_shape = (20, 5) self.axes = (0, -1) @@ -89,7 +89,6 @@ class XPUTestUnsqueeze2Op(XPUOpTestWrapper): # Correct: There is duplicated axis. class TestUnsqueeze2Op3(TestUnsqueeze2Op): - def init_test_case(self): self.ori_shape = (10, 2, 5) self.axes = (0, 3, 3) @@ -97,7 +96,6 @@ class XPUTestUnsqueeze2Op(XPUOpTestWrapper): # Correct: Reversed axes. class TestUnsqueeze2Op4(TestUnsqueeze2Op): - def init_test_case(self): self.ori_shape = (10, 2, 5) self.axes = (3, 1, 1) @@ -105,7 +103,6 @@ class XPUTestUnsqueeze2Op(XPUOpTestWrapper): # axes is a list(with tensor) class TestUnsqueeze2Op_AxesTensorList(XPUOpTest): - def setUp(self): self.op_type = "unsqueeze2" self.__class__.op_type = "unsqueeze2" @@ -115,17 +112,18 @@ class XPUTestUnsqueeze2Op(XPUOpTestWrapper): axes_tensor_list = [] for index, ele in enumerate(self.axes): - axes_tensor_list.append(("axes" + str(index), np.ones( - (1)).astype('int32') * ele)) + axes_tensor_list.append( + ("axes" + str(index), np.ones((1)).astype('int32') * ele) + ) self.inputs = { "X": np.random.random(self.ori_shape).astype(self.dtype), - "AxesTensorList": axes_tensor_list + "AxesTensorList": axes_tensor_list, } self.init_attrs() self.outputs = { "Out": self.inputs["X"].reshape(self.new_shape), - "XShape": np.random.random(self.ori_shape).astype(self.dtype) + "XShape": np.random.random(self.ori_shape).astype(self.dtype), } def init_dtype(self): @@ -151,28 +149,24 @@ class XPUTestUnsqueeze2Op(XPUOpTestWrapper): self.attrs = {} class TestUnsqueeze2Op1_AxesTensorList(TestUnsqueeze2Op_AxesTensorList): - def init_test_case(self): self.ori_shape = (20, 5) - self.axes = (-1, ) + self.axes = (-1,) self.new_shape = (20, 5, 1) class TestUnsqueeze2Op2_AxesTensorList(TestUnsqueeze2Op_AxesTensorList): - def init_test_case(self): self.ori_shape = (20, 5) self.axes = (0, -1) self.new_shape = (1, 20, 5, 1) class TestUnsqueeze2Op3_AxesTensorList(TestUnsqueeze2Op_AxesTensorList): - def init_test_case(self): self.ori_shape = (10, 2, 5) self.axes = (0, 3, 3) self.new_shape = (1, 10, 2, 1, 1, 5) class TestUnsqueeze2Op4_AxesTensorList(TestUnsqueeze2Op_AxesTensorList): - def init_test_case(self): self.ori_shape = (10, 2, 5) self.axes = (3, 1, 1) @@ -180,7 +174,6 @@ class XPUTestUnsqueeze2Op(XPUOpTestWrapper): # axes is a Tensor class TestUnsqueeze2Op_AxesTensor(XPUOpTest): - def setUp(self): self.op_type = "unsqueeze2" self.__class__.op_type = "unsqueeze2" @@ -190,12 +183,12 @@ class XPUTestUnsqueeze2Op(XPUOpTestWrapper): self.inputs = { "X": np.random.random(self.ori_shape).astype(self.dtype), - "AxesTensor": np.array(self.axes).astype("int32") + "AxesTensor": np.array(self.axes).astype("int32"), } self.init_attrs() self.outputs = { "Out": self.inputs["X"].reshape(self.new_shape), - "XShape": np.random.random(self.ori_shape).astype(self.dtype) + "XShape": np.random.random(self.ori_shape).astype(self.dtype), } def init_dtype(self): @@ -221,28 +214,24 @@ class XPUTestUnsqueeze2Op(XPUOpTestWrapper): self.attrs = {} class TestUnsqueeze2Op1_AxesTensor(TestUnsqueeze2Op_AxesTensor): - def init_test_case(self): self.ori_shape = (20, 5) - self.axes = (-1, ) + self.axes = (-1,) self.new_shape = (20, 5, 1) class TestUnsqueeze2Op2_AxesTensor(TestUnsqueeze2Op_AxesTensor): - def init_test_case(self): self.ori_shape = (20, 5) self.axes = (0, -1) self.new_shape = (1, 20, 5, 1) class TestUnsqueeze2Op3_AxesTensor(TestUnsqueeze2Op_AxesTensor): - def init_test_case(self): self.ori_shape = (10, 2, 5) self.axes = (0, 3, 3) self.new_shape = (1, 10, 2, 1, 1, 5) class TestUnsqueeze2Op4_AxesTensor(TestUnsqueeze2Op_AxesTensor): - def init_test_case(self): self.ori_shape = (10, 2, 5) self.axes = (3, 1, 1) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_unsqueeze_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_unsqueeze_op_xpu.py index 6340610a67d6ae441ba205ad6408d1b208ca5a8f..1f0cb53500f4a042617047547ec6742f620419f5 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_unsqueeze_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_unsqueeze_op_xpu.py @@ -21,20 +21,22 @@ import numpy as np import paddle from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() # Correct: General. class XPUTestUnsqueezeOp(XPUOpTestWrapper): - def __init__(self): self.op_name = "unsqueeze" self.use_dynamic_create_class = False class TestUnsqueezeOp(XPUOpTest): - def setUp(self): self.op_type = "unsqueeze" self.__class__.op_type = "unsqueeze" @@ -70,15 +72,13 @@ class XPUTestUnsqueezeOp(XPUOpTestWrapper): # Correct: Single input index. class TestUnsqueezeOp1(TestUnsqueezeOp): - def init_test_case(self): self.ori_shape = (20, 5) - self.axes = (-1, ) + self.axes = (-1,) self.new_shape = (20, 5, 1) # Correct: Mixed input axis. class TestUnsqueezeOp2(TestUnsqueezeOp): - def init_test_case(self): self.ori_shape = (20, 5) self.axes = (0, -1) @@ -86,7 +86,6 @@ class XPUTestUnsqueezeOp(XPUOpTestWrapper): # Correct: There is duplicated axis. class TestUnsqueezeOp3(TestUnsqueezeOp): - def init_test_case(self): self.ori_shape = (10, 2, 5) self.axes = (0, 3, 3) @@ -94,7 +93,6 @@ class XPUTestUnsqueezeOp(XPUOpTestWrapper): # Correct: Reversed axes. class TestUnsqueezeOp4(TestUnsqueezeOp): - def init_test_case(self): self.ori_shape = (10, 2, 5) self.axes = (3, 1, 1) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_update_loss_scaling_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_update_loss_scaling_op_xpu.py index 3b6136cd34646dcac8771644e60c5ca294f3c4eb..1c8715b93cddb370893be331b56f13494e9e1768 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_update_loss_scaling_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_update_loss_scaling_op_xpu.py @@ -23,19 +23,21 @@ import paddle.fluid as fluid import paddle.fluid.contrib.mixed_precision.amp_nn as amp_nn from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestUpdateLossScalingOp(XPUOpTestWrapper): - def __init__(self): self.op_name = "update_loss_scaling" self.use_dynamic_create_class = False class TestUpdateLossScalingOp(XPUOpTest): - def setUp(self): self.op_type = "update_loss_scaling" self.init() @@ -47,14 +49,14 @@ class XPUTestUpdateLossScalingOp(XPUOpTestWrapper): 'FoundInfinite': found_inf, 'PrevLossScaling': self.prev_loss_scaling, 'InGoodSteps': self.num_good_steps, - 'InBadSteps': self.num_bad_steps + 'InBadSteps': self.num_bad_steps, } self.outputs = { 'Out': [('out0', x)], 'LossScaling': self.prev_loss_scaling * self.incr_ratio, 'OutGoodSteps': self.zero_steps, - 'OutBadSteps': self.zero_steps + 'OutBadSteps': self.zero_steps, } def init(self): @@ -78,7 +80,6 @@ class XPUTestUpdateLossScalingOp(XPUOpTestWrapper): self.check_output_with_place(place, no_check_set=['Out']) class TestUpdateLossScalingOpBad(TestUpdateLossScalingOp): - def setUp(self): self.op_type = "update_loss_scaling" self.init() @@ -93,38 +94,37 @@ class XPUTestUpdateLossScalingOp(XPUOpTestWrapper): 'FoundInfinite': found_inf, 'PrevLossScaling': self.prev_loss_scaling, 'InGoodSteps': self.num_good_steps, - 'InBadSteps': self.num_bad_steps + 'InBadSteps': self.num_bad_steps, } self.outputs = { 'Out': [('out0', np.zeros_like(x))], 'LossScaling': self.prev_loss_scaling * self.decr_ratio, 'OutGoodSteps': self.zero_steps, - 'OutBadSteps': self.zero_steps + 'OutBadSteps': self.zero_steps, } def test_check_output(self): if paddle.is_compiled_with_xpu(): place = paddle.XPUPlace(0) self.check_output_with_place(place) - #self.check_output() + # self.check_output() class TestUpdateLossScalingLayer(unittest.TestCase): - def loss_scaling_check(self, scope=fluid.Scope()): a = fluid.data(name="a", shape=[1024, 1024], dtype='float32') b = fluid.data(name="b", shape=[512, 128], dtype='float32') x = [a, b] found_inf = fluid.data(name="found_inf", shape=[1], dtype='bool') - prev_loss_scaling = fluid.data(name="prev_loss_scaling", - shape=[1], - dtype='float32') - num_good_steps = fluid.data(name="num_good_steps", - shape=[1], - dtype='int32') - num_bad_steps = fluid.data(name="num_bad_steps", - shape=[1], - dtype='int32') + prev_loss_scaling = fluid.data( + name="prev_loss_scaling", shape=[1], dtype='float32' + ) + num_good_steps = fluid.data( + name="num_good_steps", shape=[1], dtype='int32' + ) + num_bad_steps = fluid.data( + name="num_bad_steps", shape=[1], dtype='int32' + ) a_v = np.random.random([1024, 1024]).astype('float32') b_v = np.random.random([512, 128]).astype('float32') @@ -138,33 +138,41 @@ class XPUTestUpdateLossScalingOp(XPUOpTestWrapper): incr_ratio = 2 decr_ratio = 0.8 - result = amp_nn.update_loss_scaling(x, - found_inf, - prev_loss_scaling, - num_good_steps, - num_bad_steps, - incr_every_n_steps, - decr_every_n_nan_or_inf, - incr_ratio, - decr_ratio, - name="update_loss_scaling") + result = amp_nn.update_loss_scaling( + x, + found_inf, + prev_loss_scaling, + num_good_steps, + num_bad_steps, + incr_every_n_steps, + decr_every_n_nan_or_inf, + incr_ratio, + decr_ratio, + name="update_loss_scaling", + ) place = fluid.XPUPlace(0) exe = fluid.Executor(place) with fluid.scope_guard(scope): exe.run(fluid.default_startup_program()) - result_v = exe.run(feed={ - 'a': a_v, - 'b': b_v, - 'found_inf': found_inf_v, - 'prev_loss_scaling': prev_loss_scaling_v, - 'num_good_steps': num_good_steps_v, - 'num_bad_steps': num_bad_steps_v - }, - fetch_list=[ - result, x, found_inf, prev_loss_scaling, - num_good_steps, num_bad_steps - ]) + result_v = exe.run( + feed={ + 'a': a_v, + 'b': b_v, + 'found_inf': found_inf_v, + 'prev_loss_scaling': prev_loss_scaling_v, + 'num_good_steps': num_good_steps_v, + 'num_bad_steps': num_bad_steps_v, + }, + fetch_list=[ + result, + x, + found_inf, + prev_loss_scaling, + num_good_steps, + num_bad_steps, + ], + ) assert np.array_equal(result_v[0], a_v) assert np.array_equal(result_v[1], b_v) assert np.array_equal(result_v[0], result_v[2]) @@ -179,15 +187,15 @@ class XPUTestUpdateLossScalingOp(XPUOpTestWrapper): b = fluid.data(name="b", shape=[512, 128], dtype='float32') x = [a, b] found_inf = fluid.data(name="found_inf", shape=[1], dtype='bool') - prev_loss_scaling = fluid.data(name="prev_loss_scaling", - shape=[1], - dtype='float32') - num_good_steps = fluid.data(name="num_good_steps", - shape=[1], - dtype='int32') - num_bad_steps = fluid.data(name="num_bad_steps", - shape=[1], - dtype='int32') + prev_loss_scaling = fluid.data( + name="prev_loss_scaling", shape=[1], dtype='float32' + ) + num_good_steps = fluid.data( + name="num_good_steps", shape=[1], dtype='int32' + ) + num_bad_steps = fluid.data( + name="num_bad_steps", shape=[1], dtype='int32' + ) a_v = np.random.random([1024, 1024]).astype('float32') b_v = np.random.random([512, 128]).astype('float32') @@ -204,33 +212,41 @@ class XPUTestUpdateLossScalingOp(XPUOpTestWrapper): incr_ratio = 2 decr_ratio = 0.8 - result = amp_nn.update_loss_scaling(x, - found_inf, - prev_loss_scaling, - num_good_steps, - num_bad_steps, - incr_every_n_steps, - decr_every_n_nan_or_inf, - incr_ratio, - decr_ratio, - name="update_loss_scaling") + result = amp_nn.update_loss_scaling( + x, + found_inf, + prev_loss_scaling, + num_good_steps, + num_bad_steps, + incr_every_n_steps, + decr_every_n_nan_or_inf, + incr_ratio, + decr_ratio, + name="update_loss_scaling", + ) place = fluid.XPUPlace(0) exe = fluid.Executor(place) with fluid.scope_guard(scope): exe.run(fluid.default_startup_program()) - result_v = exe.run(feed={ - 'a': a_v, - 'b': b_v, - 'found_inf': found_inf_v, - 'prev_loss_scaling': prev_loss_scaling_v, - 'num_good_steps': num_good_steps_v, - 'num_bad_steps': num_bad_steps_v - }, - fetch_list=[ - result, x, found_inf, prev_loss_scaling, - num_good_steps, num_bad_steps - ]) + result_v = exe.run( + feed={ + 'a': a_v, + 'b': b_v, + 'found_inf': found_inf_v, + 'prev_loss_scaling': prev_loss_scaling_v, + 'num_good_steps': num_good_steps_v, + 'num_bad_steps': num_bad_steps_v, + }, + fetch_list=[ + result, + x, + found_inf, + prev_loss_scaling, + num_good_steps, + num_bad_steps, + ], + ) assert np.array_equal(result_v[0], np.zeros_like(a_v)) assert np.array_equal(result_v[1], np.zeros_like(b_v)) assert np.array_equal(result_v[2], np.zeros_like(a_v)) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_where_index_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_where_index_xpu.py index 6c38d74ccea2f1285c76d31c60283a1cf5df0d84..8123bcd73f96c87e9a7db93168043e15e1901038 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_where_index_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_where_index_xpu.py @@ -23,18 +23,20 @@ import paddle.fluid as fluid from paddle.fluid import Program, program_guard from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestWhereIndexOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'where_index' class TestWhereIndexOp(XPUOpTest): - def setUp(self): self.init_config() self.init_data() @@ -55,7 +57,6 @@ class XPUTestWhereIndexOp(XPUOpTestWrapper): self.__class__.no_need_check_grad = True class TestAllFalse(TestWhereIndexOp): - def init_data(self): self.inputs = { 'Condition': np.array([False, False, False]).astype(self.dtype), @@ -63,29 +64,31 @@ class XPUTestWhereIndexOp(XPUOpTestWrapper): self.outputs = {'Out': np.array([], dtype='int64')} class TestRank2(TestWhereIndexOp): - def init_data(self): self.inputs = { - 'Condition': - np.array([[True, False], [False, True]]).astype(self.dtype), + 'Condition': np.array([[True, False], [False, True]]).astype( + self.dtype + ), } self.outputs = {'Out': np.array([[0, 0], [1, 1]], dtype='int64')} class TestRank3(TestWhereIndexOp): - def init_data(self): self.inputs = { - 'Condition': - np.array([[[True, False], [False, True]], - [[False, True], [True, False]], - [[False, False], [False, True]]]).astype(self.dtype), + 'Condition': np.array( + [ + [[True, False], [False, True]], + [[False, True], [True, False]], + [[False, False], [False, True]], + ] + ).astype(self.dtype), } self.outputs = { - 'Out': - np.array( + 'Out': np.array( [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0], [2, 1, 1]], - dtype='int64') + dtype='int64', + ) } @@ -95,7 +98,6 @@ for stype in support_types: class TestWhereOpError(unittest.TestCase): - def test_api(self): with program_guard(Program(), Program()): cond = fluid.layers.data(name='cond', shape=[4], dtype='bool') @@ -108,9 +110,7 @@ class TestWhereOpError(unittest.TestCase): class TestWhereRaiseError(unittest.TestCase): - def test_errors(self): - def test_type(): fluid.layers.where([10]) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_where_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_where_op_xpu.py index 018079109c9c1d40cdca00c262e3db25db1d72df..18af22f3c6465606c74cf882e93f83fcbdb07e6a 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_where_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_where_op_xpu.py @@ -23,18 +23,20 @@ import paddle.fluid as fluid from paddle.fluid.backward import append_backward from op_test_xpu import XPUOpTest -from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +from xpu.get_test_cover_info import ( + create_test_class, + get_xpu_op_support_types, + XPUOpTestWrapper, +) paddle.enable_static() class XPUTestWhereOp(XPUOpTestWrapper): - def __init__(self): self.op_name = 'where' class TestXPUWhereOp(XPUOpTest): - def setUp(self): self.init_config() self.init_data() @@ -56,19 +58,18 @@ class XPUTestWhereOp(XPUOpTestWrapper): self.check_output_with_place(self.place) class TestXPUWhereOp2(TestXPUWhereOp): - def init_data(self): self.x = np.random.uniform(-5, 5, (60, 2)).astype(self.dtype) self.y = np.random.uniform(-5, 5, (60, 2)).astype(self.dtype) self.cond = np.ones((60, 2)).astype("bool") class TestXPUWhereOp3(TestXPUWhereOp): - def init_data(self): self.x = np.random.uniform(-3, 5, (20, 2, 4)).astype(self.dtype) self.y = np.random.uniform(-3, 5, (20, 2, 4)).astype(self.dtype) - self.cond = np.array(np.random.randint(2, size=(20, 2, 4)), - dtype=bool) + self.cond = np.array( + np.random.randint(2, size=(20, 2, 4)), dtype=bool + ) support_types = get_xpu_op_support_types('where') @@ -77,7 +78,6 @@ for stype in support_types: class TestXPUWhereAPI(unittest.TestCase): - def setUp(self): self.__class__.use_xpu = True self.place = paddle.XPUPlace(0) @@ -102,9 +102,9 @@ class TestXPUWhereAPI(unittest.TestCase): train_prog = fluid.Program() startup = fluid.Program() with fluid.program_guard(train_prog, startup): - cond = fluid.data(name='cond', - shape=self.shape, - dtype='bool') + cond = fluid.data( + name='cond', shape=self.shape, dtype='bool' + ) x = fluid.data(name='x', shape=self.shape, dtype='float32') y = fluid.data(name='y', shape=self.shape, dtype='float32') @@ -122,24 +122,25 @@ class TestXPUWhereAPI(unittest.TestCase): fetch_list.append(x.grad_name) if y_stop_gradient is False: fetch_list.append(y.grad_name) - out = exe.run(train_prog, - feed={ - 'cond': self.cond, - 'x': self.x, - 'y': self.y - }, - fetch_list=fetch_list) + out = exe.run( + train_prog, + feed={'cond': self.cond, 'x': self.x, 'y': self.y}, + fetch_list=fetch_list, + ) assert np.array_equal(out[0], self.out) if x_stop_gradient is False: - assert np.array_equal(out[2], - self.ref_x_backward(out[1])) + assert np.array_equal( + out[2], self.ref_x_backward(out[1]) + ) if y.stop_gradient is False: - assert np.array_equal(out[3], - self.ref_y_backward(out[1])) + assert np.array_equal( + out[3], self.ref_y_backward(out[1]) + ) elif y.stop_gradient is False: - assert np.array_equal(out[2], - self.ref_y_backward(out[1])) + assert np.array_equal( + out[2], self.ref_y_backward(out[1]) + ) def test_api_broadcast(self, use_cuda=False): train_prog = fluid.Program() @@ -148,24 +149,21 @@ class TestXPUWhereAPI(unittest.TestCase): x = fluid.layers.data(name='x', shape=[4, 1], dtype='float32') y = fluid.layers.data(name='y', shape=[4, 2], dtype='float32') x_i = np.array([[0.9383, 0.1983, 3.2, 1.2]]).astype("float32") - y_i = np.array([[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, - 1.0]]).astype("float32") + y_i = np.array([[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]).astype( + "float32" + ) result = paddle.where(x > 1, x=x, y=y) exe = fluid.Executor(self.place) exe.run(startup) - out = exe.run(train_prog, - feed={ - 'x': x_i, - 'y': y_i - }, - fetch_list=[result]) + out = exe.run( + train_prog, feed={'x': x_i, 'y': y_i}, fetch_list=[result] + ) assert np.array_equal(out[0], np.where(x_i > 1, x_i, y_i)) class TestWhereDygraphAPI(unittest.TestCase): - def test_api(self): with fluid.dygraph.guard(paddle.XPUPlace(0)): x_i = np.array([0.9383, 0.1983, 3.2, 1.2]).astype("float32") diff --git a/python/paddle/fluid/tests/unittests/xpu/test_while_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_while_op_xpu.py index 80fe5f8c1444016e06aba6d086e4200f4a2b6db3..c1fa366cc88b2f7d8fb8ba2cdbed7790b34c6525 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_while_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_while_op_xpu.py @@ -24,20 +24,16 @@ paddle.enable_static() class TestWhileOp(unittest.TestCase): - def simple_net(self): - d0 = layers.data("d0", - shape=[10], - append_batch_size=False, - dtype='float32') - d1 = layers.data("d1", - shape=[10], - append_batch_size=False, - dtype='float32') - d2 = layers.data("d2", - shape=[10], - append_batch_size=False, - dtype='float32') + d0 = layers.data( + "d0", shape=[10], append_batch_size=False, dtype='float32' + ) + d1 = layers.data( + "d1", shape=[10], append_batch_size=False, dtype='float32' + ) + d2 = layers.data( + "d2", shape=[10], append_batch_size=False, dtype='float32' + ) i = layers.zeros(shape=[1], dtype='int64') i.stop_gradient = True init = layers.zeros(shape=[10], dtype='float32') @@ -95,12 +91,10 @@ class TestWhileOp(unittest.TestCase): for i in range(3): d.append(numpy.random.random(size=[10]).astype('float32')) - outs = exe.run(feed={ - 'd0': d[0], - 'd1': d[1], - 'd2': d[2] - }, - fetch_list=[sum_result]) + outs = exe.run( + feed={'d0': d[0], 'd1': d[1], 'd2': d[2]}, + fetch_list=[sum_result], + ) self.assertAlmostEqual(numpy.sum(d), numpy.sum(outs[0]), delta=0.01) def test_simple_net_forward(self): diff --git a/python/paddle/fluid/tests/unittests/xpu/test_xpu_place.py b/python/paddle/fluid/tests/unittests/xpu/test_xpu_place.py index 3576bbdaed3b996d7b681a9510fbbd28a9a36a17..75679047301df4757a67097d9cf2f940d9bd1d1c 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_xpu_place.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_xpu_place.py @@ -21,7 +21,6 @@ import paddle.static as static class Test_XPU_Places(unittest.TestCase): - def assert_places_equal(self, places0, places1): self.assertEqual(len(places0), len(places1)) for place0, place1 in zip(places0, places1): diff --git a/python/paddle/fluid/trainer_desc.py b/python/paddle/fluid/trainer_desc.py index c4c17c7095aa093276086b375d742e8e8b1f33ac..f8151297c2d108cab1b3f030e453c8ddb6c24f5e 100644 --- a/python/paddle/fluid/trainer_desc.py +++ b/python/paddle/fluid/trainer_desc.py @@ -17,8 +17,12 @@ import sys import os __all__ = [ - 'TrainerDesc', 'MultiTrainer', 'DistMultiTrainer', 'PipelineTrainer', - 'HeterXpuTrainer', 'HeterPipelineTrainer' + 'TrainerDesc', + 'MultiTrainer', + 'DistMultiTrainer', + 'PipelineTrainer', + 'HeterXpuTrainer', + 'HeterPipelineTrainer', ] @@ -43,8 +47,10 @@ class TrainerDesc(object): sys.path.append(cur_path + "/proto") from proto import trainer_desc_pb2 + self.proto_desc = trainer_desc_pb2.TrainerDesc() import multiprocessing as mp + # set default thread num == cpu count self.proto_desc.thread_num = mp.cpu_count() self._fleet_desc = None @@ -53,26 +59,26 @@ class TrainerDesc(object): self._infer = False def _set_heter_info(self, ret): - #ret = = fu.split_program_by_device(program) - #start_list, end_list, send_list, recv_list, program_list = fu.split_program_by_device(program) - #if len(start_list) != 3: + # ret = = fu.split_program_by_device(program) + # start_list, end_list, send_list, recv_list, program_list = fu.split_program_by_device(program) + # if len(start_list) != 3: # print("start_list len=", len(start_list), " will not set heter info") # return - #for i in start_list[0]: + # for i in start_list[0]: # self.proto_desc.op_run_start_idx.append(i) - #for i in end_list[0]: + # for i in end_list[0]: # self.proto_desc.op_run_end_idx.append(i) - #for i in send_list[0]: + # for i in send_list[0]: # self.proto_desc.op_run_send_list.append(i) - #for i in recv_list[0]: + # for i in recv_list[0]: # self.proto_desc.op_run_recv_list.append(i) if ret is None: return - #for i in ret[0]: # start_list[1]: + # for i in ret[0]: # start_list[1]: # self.proto_desc.xpu_start_idx.append(i) self.proto_desc.xpu_start_idx = ret[0] - #for i in ret[1]: #end_list[1]: + # for i in ret[1]: #end_list[1]: # self.proto_desc.o_end_idx.append(i) self.proto_desc.xpu_end_idx = ret[1] for i in ret[2]: # send_list[1]: @@ -80,13 +86,13 @@ class TrainerDesc(object): for i in ret[3]: # recv_list[1]: self.proto_desc.xpu_recv_list.append(i) - #for i in start_list[2]: + # for i in start_list[2]: # self.proto_desc.op_run_end_start_idx.append(i) - #for i in end_list[2]: + # for i in end_list[2]: # self.proto_desc.op_run_end_idx.append(i) - #for i in send_list[2]: + # for i in send_list[2]: # self.proto_desc.op_run_end_send_list.append(i) - #for i in recv_list[2]: + # for i in recv_list[2]: # self.proto_desc.op_run_end_recv_list.append(i) def _set_fetch_var_and_info(self, fetch_vars, fetch_info, print_period): @@ -95,7 +101,8 @@ class TrainerDesc(object): for i, v in enumerate(fetch_vars): self.proto_desc.fetch_config.fetch_var_names.extend([v.name]) self.proto_desc.fetch_config.fetch_var_str_format.extend( - [fetch_info[i]]) + [fetch_info[i]] + ) self.proto_desc.fetch_config.print_period = print_period def _set_debug(self, debug): @@ -114,6 +121,7 @@ class TrainerDesc(object): self._fleet_desc = fleet_desc ## serialize fleet_desc from google.protobuf import text_format + fleet_desc_str = text_format.MessageToString(fleet_desc) self.proto_desc.fleet_desc = fleet_desc_str @@ -137,8 +145,11 @@ class TrainerDesc(object): self.proto_desc.no_cvm = no_cvm def _set_scale_sparse_grad_with_batch_size( - self, scale_sparse_gradient_with_batch_size=True): - self.proto_desc.scale_sparse_gradient_with_batch_size = scale_sparse_gradient_with_batch_size + self, scale_sparse_gradient_with_batch_size=True + ): + self.proto_desc.scale_sparse_gradient_with_batch_size = ( + scale_sparse_gradient_with_batch_size + ) def _set_scale_datanorm(self, scale_datanorm=-1): self.proto_desc.scale_datanorm = scale_datanorm @@ -203,16 +214,21 @@ class TrainerDesc(object): self.proto_desc.loss_names.append(loss) def _set_adjust_ins_weight(self, config_dict): - self.proto_desc.adjust_ins_weight_config.need_adjust = \ - config_dict.get("need_adjust", False) - self.proto_desc.adjust_ins_weight_config.nid_slot = \ - config_dict.get("nid_slot", "") - self.proto_desc.adjust_ins_weight_config.nid_adjw_threshold = \ - config_dict.get("nid_adjw_threshold", 0.0) - self.proto_desc.adjust_ins_weight_config.nid_adjw_ratio = \ - config_dict.get("nid_adjw_ratio", 0.0) - self.proto_desc.adjust_ins_weight_config.ins_weight_slot = \ - config_dict.get("ins_weight_slot", "") + self.proto_desc.adjust_ins_weight_config.need_adjust = config_dict.get( + "need_adjust", False + ) + self.proto_desc.adjust_ins_weight_config.nid_slot = config_dict.get( + "nid_slot", "" + ) + self.proto_desc.adjust_ins_weight_config.nid_adjw_threshold = ( + config_dict.get("nid_adjw_threshold", 0.0) + ) + self.proto_desc.adjust_ins_weight_config.nid_adjw_ratio = ( + config_dict.get("nid_adjw_ratio", 0.0) + ) + self.proto_desc.adjust_ins_weight_config.ins_weight_slot = ( + config_dict.get("ins_weight_slot", "") + ) def _set_copy_table_config(self, config_dict): config = self.proto_desc.copy_table_config @@ -227,9 +243,9 @@ class TrainerDesc(object): dest_sparse_tables = [dest_sparse_tables] if len(src_sparse_tables) != len(dest_sparse_tables): raise ValueError( - "len(src_sparse_tables) != len(dest_sparse_tables)," \ - " %s vs %s" % (len(src_sparse_tables), \ - len(dest_sparse_tables))) + "len(src_sparse_tables) != len(dest_sparse_tables)," + " %s vs %s" % (len(src_sparse_tables), len(dest_sparse_tables)) + ) for i in src_sparse_tables: config.src_sparse_tables.append(i) for i in dest_sparse_tables: @@ -243,9 +259,9 @@ class TrainerDesc(object): dest_dense_tables = [dest_dense_tables] if len(src_dense_tables) != len(dest_dense_tables): raise ValueError( - "len(src_dense_tables) != len(dest_dense_tables)," \ - " %s vs %s" % (len(src_dense_tables), \ - len(dest_dense_tables))) + "len(src_dense_tables) != len(dest_dense_tables)," + " %s vs %s" % (len(src_dense_tables), len(dest_dense_tables)) + ) for i in src_dense_tables: config.src_dense_tables.append(i) for i in dest_dense_tables: @@ -261,8 +277,9 @@ class TrainerDesc(object): dest_var_list = [dest_var_list] if len(src_var_list) != len(dest_var_list): raise ValueError( - "len(src_var_list) != len(dest_var_list), %s vs" \ - " %s" % (len(src_var_list), len(dest_var_list))) + "len(src_var_list) != len(dest_var_list), %s vs" + " %s" % (len(src_var_list), len(dest_var_list)) + ) for i in src_var_list: config.src_var_list.append(i) for i in dest_var_list: @@ -279,19 +296,22 @@ class TrainerDesc(object): raise ValueError("dependency len %s != 1" % len(values)) for value in values: m.values.append(value) - config.dense_pull_after_copy = \ - config_dict.get("dense_pull_after_copy", True) - config.enable_dependency = \ - config_dict.get("enable_dependency", False) - config.sparse_copy_by_feasign = \ - config_dict.get("sparse_copy_by_feasign", True) + config.dense_pull_after_copy = config_dict.get( + "dense_pull_after_copy", True + ) + config.enable_dependency = config_dict.get("enable_dependency", False) + config.sparse_copy_by_feasign = config_dict.get( + "sparse_copy_by_feasign", True + ) def _desc(self): from google.protobuf import text_format + return self.proto_desc.SerializeToString() def __str__(self): from google.protobuf import text_format + return text_format.MessageToString(self.proto_desc) diff --git a/python/paddle/fluid/trainer_factory.py b/python/paddle/fluid/trainer_factory.py index 3ba9f9eea46d1bbf759cc5414a595d00aa148460..7ac367b38fd2cefa0968fca835def1e3a4c9eaab 100644 --- a/python/paddle/fluid/trainer_factory.py +++ b/python/paddle/fluid/trainer_factory.py @@ -19,12 +19,26 @@ import logging import numpy as np from paddle.fluid.log_helper import get_logger -local_logger = get_logger(__name__, - logging.INFO, - fmt='%(asctime)s-%(levelname)s: %(message)s') - -from .trainer_desc import MultiTrainer, DistMultiTrainer, PipelineTrainer, HeterXpuTrainer, PSGPUTrainer, HeterPipelineTrainer -from .device_worker import Hogwild, DownpourSGD, DownpourLite, Section, DownpourSGDOPT, HeterSection +local_logger = get_logger( + __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s' +) + +from .trainer_desc import ( + MultiTrainer, + DistMultiTrainer, + PipelineTrainer, + HeterXpuTrainer, + PSGPUTrainer, + HeterPipelineTrainer, +) +from .device_worker import ( + Hogwild, + DownpourSGD, + DownpourLite, + Section, + DownpourSGDOPT, + HeterSection, +) from .framework import Variable from multiprocessing import Process, Manager @@ -67,18 +81,24 @@ class TrainerFactory(object): trainer._set_mpi_rank(opt_info["mpi_rank"]) if opt_info.get("mpi_size") is not None: trainer._set_mpi_size(opt_info["mpi_size"]) - if opt_info.get("dump_fields") is not None and len( - opt_info.get("dump_fields")) != 0: + if ( + opt_info.get("dump_fields") is not None + and len(opt_info.get("dump_fields")) != 0 + ): trainer._set_dump_fields(opt_info["dump_fields"]) - if opt_info.get("dump_fields_path") is not None and len( - opt_info.get("dump_fields_path")) != 0: + if ( + opt_info.get("dump_fields_path") is not None + and len(opt_info.get("dump_fields_path")) != 0 + ): trainer._set_dump_fields_path(opt_info["dump_fields_path"]) if opt_info.get("dump_file_num") is not None: trainer._set_dump_file_num(opt_info["dump_file_num"]) if opt_info.get("dump_converter") is not None: trainer._set_dump_converter(opt_info["dump_converter"]) - if opt_info.get("dump_param") is not None and len( - opt_info.get("dump_param")) != 0: + if ( + opt_info.get("dump_param") is not None + and len(opt_info.get("dump_param")) != 0 + ): trainer._set_dump_param(opt_info["dump_param"]) if opt_info.get("worker_places") is not None: trainer._set_worker_places(opt_info["worker_places"]) @@ -86,15 +106,18 @@ class TrainerFactory(object): trainer._set_use_ps_gpu(opt_info["use_ps_gpu"]) if opt_info.get("is_dump_in_simple_mode") is not None: trainer._set_is_dump_in_simple_mode( - opt_info["is_dump_in_simple_mode"]) + opt_info["is_dump_in_simple_mode"] + ) if opt_info.get("enable_random_dump") is not None: trainer._set_enable_random_dump( - opt_info["enable_random_dump"]) + opt_info["enable_random_dump"] + ) if opt_info.get("dump_interval") is not None: trainer._set_dump_interval(opt_info["dump_interval"]) if opt_info.get("random_with_lineid") is not None: trainer._set_random_with_lineid( - opt_info["random_with_lineid"]) + opt_info["random_with_lineid"] + ) if "fleet_desc" in opt_info: device_worker._set_fleet_desc(opt_info["fleet_desc"]) @@ -103,20 +126,25 @@ class TrainerFactory(object): trainer._set_use_cvm(opt_info["use_cvm"]) if opt_info.get("no_cvm") is not None: trainer._set_no_cvm(opt_info["no_cvm"]) - if opt_info.get( - "scale_sparse_gradient_with_batch_size") is not None: + if ( + opt_info.get("scale_sparse_gradient_with_batch_size") + is not None + ): trainer._set_scale_sparse_grad_with_batch_size( - opt_info["scale_sparse_gradient_with_batch_size"]) + opt_info["scale_sparse_gradient_with_batch_size"] + ) if opt_info.get("scale_datanorm") is not None: trainer._set_scale_datanorm(opt_info["scale_datanorm"]) if opt_info.get("adjust_ins_weight") is not None: trainer._set_adjust_ins_weight( - opt_info["adjust_ins_weight"]) + opt_info["adjust_ins_weight"] + ) if opt_info.get("copy_table") is not None: trainer._set_copy_table_config(opt_info["copy_table"]) if opt_info.get("check_nan_var_names") is not None: trainer._set_check_nan_var_names( - opt_info["check_nan_var_names"]) + opt_info["check_nan_var_names"] + ) if opt_info.get("loss_names") is not None: trainer._set_loss_names(opt_info["loss_names"]) trainer._set_device_worker(device_worker) @@ -131,8 +159,9 @@ class FetchHandlerMonitor(object): def __init__(self, scope, handler): self.fetch_instance = handler - self.fetch_thread = threading.Thread(target=self.handler_launch_func, - args=(scope, self.fetch_instance)) + self.fetch_thread = threading.Thread( + target=self.handler_launch_func, args=(scope, self.fetch_instance) + ) self.running_lock = threading.Lock() self.running = False @@ -145,7 +174,8 @@ class FetchHandlerMonitor(object): var_name_to_key[fetch_instance.var_dict[key].name] = key else: local_logger.warning( - "the value of {} is not a Variable".format(key)) + "the value of {} is not a Variable".format(key) + ) var_name_to_key["None.var"] = key elapsed_secs = 0 while True: @@ -165,7 +195,9 @@ class FetchHandlerMonitor(object): if var == None: local_logger.warning( "{} value currently not available".format( - var_name_to_key[key])) + var_name_to_key[key] + ) + ) res_dict = {} for key in fetch_dict: user_name = var_name_to_key[key] @@ -177,12 +209,14 @@ class FetchHandlerMonitor(object): lod = res_dict[user_name].lod() if len(lod) > 0: - raise RuntimeError("Some of your fetched tensors \ + raise RuntimeError( + "Some of your fetched tensors \ hold LoD information. \ They can not be completely cast \ to Python ndarray. We can \ not return LoDTensor itself directly, \ - please choose another targets") + please choose another targets" + ) if res_dict[user_name]._is_initialized(): res_dict[user_name] = np.array(res_dict[user_name]) else: diff --git a/python/paddle/fluid/transpiler/__init__.py b/python/paddle/fluid/transpiler/__init__.py index 166b04c21ac761309f4e542342a2e55e4f83eea8..8da4210dba2e760883b3bb122bb7aa687645e25c 100644 --- a/python/paddle/fluid/transpiler/__init__.py +++ b/python/paddle/fluid/transpiler/__init__.py @@ -12,7 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .distribute_transpiler import DistributeTranspiler, DistributeTranspilerConfig +from .distribute_transpiler import ( + DistributeTranspiler, + DistributeTranspilerConfig, +) from .memory_optimization_transpiler import memory_optimize, release_memory from .ps_dispatcher import HashName, RoundRobin diff --git a/python/paddle/fluid/transpiler/ascend_transpiler.py b/python/paddle/fluid/transpiler/ascend_transpiler.py index 69fb2b1833655382d0b64202388ccae745638451..60ab474e9044c376fd4e5861cd000011b23b7ec3 100644 --- a/python/paddle/fluid/transpiler/ascend_transpiler.py +++ b/python/paddle/fluid/transpiler/ascend_transpiler.py @@ -20,7 +20,6 @@ from paddle.distributed import fleet class AscendTranspiler(collective.Collective): - def __init__(self, startup_program, main_program): self.nrings = 1 super(AscendTranspiler, self).__init__(self.nrings) @@ -32,8 +31,10 @@ class AscendTranspiler(collective.Collective): ring_id = -1 grad = None for idx, op in reversed(list(enumerate(block.ops))): - if self._is_backward_op(op) and \ - self.op_role_var_key in op.attr_names: + if ( + self._is_backward_op(op) + and self.op_role_var_key in op.attr_names + ): op_role_var = op.all_attrs()[self.op_role_var_key] if len(op_role_var) == 0: @@ -50,22 +51,26 @@ class AscendTranspiler(collective.Collective): # As we search ops reversedly, we should insert c_allreduce_sum # op in the same way to keep the ring_id alternate ring_id = (ring_id + 1) % self.nrings - block._insert_op(offset + 1, - type='c_allreduce_sum', - inputs={'X': grad}, - outputs={'Out': grad}, - attrs={ - 'ring_id': ring_id, - self.op_role_key: OpRole.Backward - }) - block._insert_op(offset + 2, - type='scale', - inputs={'X': grad}, - outputs={'Out': grad}, - attrs={ - 'scale': 1.0 / fleet.worker_num(), - self.op_role_key: OpRole.Backward - }) + block._insert_op( + offset + 1, + type='c_allreduce_sum', + inputs={'X': grad}, + outputs={'Out': grad}, + attrs={ + 'ring_id': ring_id, + self.op_role_key: OpRole.Backward, + }, + ) + block._insert_op( + offset + 2, + type='scale', + inputs={'X': grad}, + outputs={'Out': grad}, + attrs={ + 'scale': 1.0 / fleet.worker_num(), + self.op_role_key: OpRole.Backward, + }, + ) if grad is None: return diff --git a/python/paddle/fluid/transpiler/collective.py b/python/paddle/fluid/transpiler/collective.py index 84912529cfbee0d6661966a6fd4eaeeab9fa8f20..aedbaeb43cde7274a1adf9f1bdb292a5d7564a89 100644 --- a/python/paddle/fluid/transpiler/collective.py +++ b/python/paddle/fluid/transpiler/collective.py @@ -32,8 +32,7 @@ OpRole = core.op_proto_and_checker_maker.OpRole class Collective(object): - ''' - ''' + ''' ''' def __init__(self, nrings): self.nrings = nrings @@ -48,8 +47,15 @@ class Collective(object): self.op_role_key = op_maker.kOpRoleAttrName() self.op_role_var_key = op_maker.kOpRoleVarAttrName() - def transpile(self, startup_program, main_program, rank, endpoints, - current_endpoint, wait_port): + def transpile( + self, + startup_program, + main_program, + rank, + endpoints, + current_endpoint, + wait_port, + ): # in case of '127.0.0.1:6700,127.0.0.1:6701,...' if isinstance(endpoints, str): endpoints = endpoints.split(',') @@ -63,7 +69,11 @@ class Collective(object): self.main_program = default_main_program() self.nranks = len(endpoints) - if self.nranks == 1 and self.mode != "single_process_multi_thread" and self.mode != "box": + if ( + self.nranks == 1 + and self.mode != "single_process_multi_thread" + and self.mode != "box" + ): raise ValueError('the number of endpoints must > 1') if rank < 0: @@ -71,8 +81,11 @@ class Collective(object): self.rank = rank if current_endpoint not in endpoints: - raise ValueError('current endpoint %s is not in %s', - current_endpoint, str(endpoints)) + raise ValueError( + 'current endpoint %s is not in %s', + current_endpoint, + str(endpoints), + ) self.endpoints = endpoints self.current_endpoint = current_endpoint @@ -96,19 +109,26 @@ class Collective(object): def _transpile_startup_program(self): for ring_id in range(self.nrings): - self._init_communicator(self.startup_program, self.current_endpoint, - self.endpoints, self.rank, ring_id, - self.wait_port) + self._init_communicator( + self.startup_program, + self.current_endpoint, + self.endpoints, + self.rank, + ring_id, + self.wait_port, + ) self._broadcast_params() - def _init_communicator(self, - program, - current_endpoint, - endpoints, - rank, - ring_id, - wait_port, - has_multitrainer=False): + def _init_communicator( + self, + program, + current_endpoint, + endpoints, + rank, + ring_id, + wait_port, + has_multitrainer=False, + ): nranks = len(endpoints) other_endpoints = endpoints[:] other_endpoints.remove(current_endpoint) @@ -119,63 +139,76 @@ class Collective(object): block = program.global_block() if core.is_compiled_with_npu(): - hccl_id_var = block.create_var(name=unique_name.generate('hccl_id'), - persistable=True, - type=core.VarDesc.VarType.RAW) + hccl_id_var = block.create_var( + name=unique_name.generate('hccl_id'), + persistable=True, + type=core.VarDesc.VarType.RAW, + ) endpoint_to_index_map = {e: idx for idx, e in enumerate(endpoints)} - block.append_op(type='c_gen_hccl_id', - inputs={}, - outputs={'Out': hccl_id_var}, - attrs={ - 'rank': rank, - 'endpoint': current_endpoint, - 'other_endpoints': other_endpoints, - self.op_role_key: OpRole.Forward - }) - block.append_op(type='c_comm_init_hccl', - inputs={'X': hccl_id_var}, - outputs={}, - attrs={ - 'rank': rank, - 'ring_id': ring_id, - 'device_id': - int(os.getenv("FLAGS_selected_npus")), - 'rank_ids': nranks, - self.op_role_key: OpRole.Forward - }) + block.append_op( + type='c_gen_hccl_id', + inputs={}, + outputs={'Out': hccl_id_var}, + attrs={ + 'rank': rank, + 'endpoint': current_endpoint, + 'other_endpoints': other_endpoints, + self.op_role_key: OpRole.Forward, + }, + ) + block.append_op( + type='c_comm_init_hccl', + inputs={'X': hccl_id_var}, + outputs={}, + attrs={ + 'rank': rank, + 'ring_id': ring_id, + 'device_id': int(os.getenv("FLAGS_selected_npus")), + 'rank_ids': nranks, + self.op_role_key: OpRole.Forward, + }, + ) else: - nccl_id_var = block.create_var(name=unique_name.generate('nccl_id'), - persistable=True, - type=core.VarDesc.VarType.RAW) - block.append_op(type='c_gen_nccl_id', - inputs={}, - outputs={'Out': nccl_id_var}, - attrs={ - 'rank': rank, - 'endpoint': current_endpoint, - 'other_endpoints': other_endpoints, - self.op_role_key: OpRole.Forward - }) + nccl_id_var = block.create_var( + name=unique_name.generate('nccl_id'), + persistable=True, + type=core.VarDesc.VarType.RAW, + ) + block.append_op( + type='c_gen_nccl_id', + inputs={}, + outputs={'Out': nccl_id_var}, + attrs={ + 'rank': rank, + 'endpoint': current_endpoint, + 'other_endpoints': other_endpoints, + self.op_role_key: OpRole.Forward, + }, + ) if not has_multitrainer: - block.append_op(type='c_comm_init', - inputs={'X': nccl_id_var}, - outputs={}, - attrs={ - 'nranks': nranks, - 'rank': rank, - 'ring_id': ring_id, - self.op_role_key: OpRole.Forward - }) + block.append_op( + type='c_comm_init', + inputs={'X': nccl_id_var}, + outputs={}, + attrs={ + 'nranks': nranks, + 'rank': rank, + 'ring_id': ring_id, + self.op_role_key: OpRole.Forward, + }, + ) else: - block.append_op(type='c_comm_init_multitrainer', - inputs={'X': nccl_id_var}, - outputs={}, - attrs={ - 'ntrainers': nranks, - 'trainer_id': rank, - 'ring_id': ring_id, - self.op_role_key: OpRole.Forward - }) + block.append_op( + type='c_comm_init_multitrainer', + inputs={'X': nccl_id_var}, + outputs={}, + attrs={ + 'ntrainers': nranks, + 'trainer_id': rank, + 'ring_id': ring_id, + self.op_role_key: OpRole.Forward, + }, + ) def _broadcast_params(self): block = self.startup_program.global_block() @@ -185,23 +218,24 @@ class Collective(object): continue ring_id = (ring_id + 1) % self.nrings - block.append_op(type='c_broadcast', - inputs={'X': param}, - outputs={'Out': param}, - attrs={ - 'ring_id': ring_id, - 'root': 0, - self.op_role_key: OpRole.Forward - }) + block.append_op( + type='c_broadcast', + inputs={'X': param}, + outputs={'Out': param}, + attrs={ + 'ring_id': ring_id, + 'root': 0, + self.op_role_key: OpRole.Forward, + }, + ) for ring_id in range(self.nrings): - block.append_op(type='c_sync_comm_stream', - inputs={'X': param}, - outputs={'Out': param}, - attrs={ - 'ring_id': ring_id, - self.op_role_key: OpRole.Forward - }) + block.append_op( + type='c_sync_comm_stream', + inputs={'X': param}, + outputs={'Out': param}, + attrs={'ring_id': ring_id, self.op_role_key: OpRole.Forward}, + ) def _is_loss_grad_op(self, op): if self.op_role_key not in op.attr_names: @@ -210,21 +244,25 @@ class Collective(object): return op_role & int(OpRole.Backward) and op_role & int(OpRole.Loss) def _is_backward_op(self, op): - return self.op_role_key in op.attr_names and \ - int(op.all_attrs()[self.op_role_key]) & int(OpRole.Backward) + return self.op_role_key in op.attr_names and int( + op.all_attrs()[self.op_role_key] + ) & int(OpRole.Backward) def _is_update_op(self, op): - return 'Param' in op.input_names and 'Grad' in op.input_names and \ - "LearningRate" in op.input_names + return ( + 'Param' in op.input_names + and 'Grad' in op.input_names + and "LearningRate" in op.input_names + ) def _is_optimizer_op(self, op): - return self.op_role_key in op.attr_names and \ - int(op.all_attrs()[self.op_role_key]) & int(OpRole.Optimize) + return self.op_role_key in op.attr_names and int( + op.all_attrs()[self.op_role_key] + ) & int(OpRole.Optimize) class GradAllReduce(Collective): - ''' - ''' + ''' ''' def __init__(self, nrings=2): Collective.__init__(self, nrings) @@ -243,22 +281,26 @@ class GradAllReduce(Collective): for idx, op in reversed(list(enumerate(block.ops))): if self._is_loss_grad_op(op): loss_grad_var = block.vars[op.output_arg_names[0]] - block._insert_op(idx + 1, - type='scale', - inputs={'X': loss_grad_var}, - outputs={'Out': loss_grad_var}, - attrs={ - 'scale': 1.0 / self.nranks, - self.op_role_key: OpRole.Backward - }) + block._insert_op( + idx + 1, + type='scale', + inputs={'X': loss_grad_var}, + outputs={'Out': loss_grad_var}, + attrs={ + 'scale': 1.0 / self.nranks, + self.op_role_key: OpRole.Backward, + }, + ) def _insert_allreduce_ops(self): block = self.main_program.global_block() ring_id = -1 grad = None for idx, op in reversed(list(enumerate(block.ops))): - if self._is_backward_op(op) and \ - self.op_role_var_key in op.attr_names: + if ( + self._is_backward_op(op) + and self.op_role_var_key in op.attr_names + ): op_role_var = op.all_attrs()[self.op_role_var_key] if len(op_role_var) == 0: @@ -279,20 +321,23 @@ class GradAllReduce(Collective): type='c_sync_calc_stream', inputs={'X': grad}, outputs={'Out': grad}, - attrs={self.op_role_key: OpRole.Backward}) + attrs={self.op_role_key: OpRole.Backward}, + ) offset += 1 # As we search ops reversedly, we should insert c_allreduce_sum # op in the same way to keep the ring_id alternate ring_id = (ring_id + 1) % self.nrings - block._insert_op(offset, - type='c_allreduce_sum', - inputs={'X': grad}, - outputs={'Out': grad}, - attrs={ - 'ring_id': ring_id, - self.op_role_key: OpRole.Backward - }) + block._insert_op( + offset, + type='c_allreduce_sum', + inputs={'X': grad}, + outputs={'Out': grad}, + attrs={ + 'ring_id': ring_id, + self.op_role_key: OpRole.Backward, + }, + ) if grad is None: return @@ -300,20 +345,21 @@ class GradAllReduce(Collective): for idx, op in enumerate(block.ops): if self._is_optimizer_op(op): for ring_id in range(self.nrings): - block._insert_op(idx + ring_id, - type='c_sync_comm_stream', - inputs={'X': grad}, - outputs={'Out': grad}, - attrs={ - 'ring_id': ring_id, - self.op_role_key: OpRole.Backward - }) + block._insert_op( + idx + ring_id, + type='c_sync_comm_stream', + inputs={'X': grad}, + outputs={'Out': grad}, + attrs={ + 'ring_id': ring_id, + self.op_role_key: OpRole.Backward, + }, + ) break class LocalSGD(Collective): - ''' - ''' + ''' ''' def __init__(self, nrings=2): Collective.__init__(self, nrings) @@ -330,14 +376,18 @@ class LocalSGD(Collective): non_dist_params.append(param) for param in non_dist_params: - snapshot = block.create_var(name=self.snapshot_name(param.name), - shape=param.shape, - persistable=True, - stop_gradient=True) - block.append_op(type='assign', - inputs={'X': [param]}, - outputs={'Out': [snapshot]}, - attrs={self.op_role_key: OpRole.Forward}) + snapshot = block.create_var( + name=self.snapshot_name(param.name), + shape=param.shape, + persistable=True, + stop_gradient=True, + ) + block.append_op( + type='assign', + inputs={'X': [param]}, + outputs={'Out': [snapshot]}, + attrs={self.op_role_key: OpRole.Forward}, + ) def snapshot_name(self, param_name): return param_name + self.snapshot_key @@ -352,72 +402,78 @@ class LocalSGD(Collective): if param.is_distributed: continue - snapshot = block.create_var(name=self.snapshot_name(param.name), - shape=param.shape, - persistable=True, - stop_gradient=True, - dtype=param.dtype) - - block._insert_op(idx + 1, - type='elementwise_sub', - inputs={ - 'X': [snapshot], - 'Y': [param] - }, - outputs={'Out': [param]}, - attrs={self.op_role_key: OpRole.Optimize}) - block._insert_op(idx + 2, - type='c_sync_calc_stream', - inputs={'X': param}, - outputs={'Out': param}, - attrs={self.op_role_key: OpRole.Optimize}) + snapshot = block.create_var( + name=self.snapshot_name(param.name), + shape=param.shape, + persistable=True, + stop_gradient=True, + dtype=param.dtype, + ) + + block._insert_op( + idx + 1, + type='elementwise_sub', + inputs={'X': [snapshot], 'Y': [param]}, + outputs={'Out': [param]}, + attrs={self.op_role_key: OpRole.Optimize}, + ) + block._insert_op( + idx + 2, + type='c_sync_calc_stream', + inputs={'X': param}, + outputs={'Out': param}, + attrs={self.op_role_key: OpRole.Optimize}, + ) ring_id = (ring_id + 1) % self.nrings - block._insert_op(idx + 3, - type='c_allreduce_sum', - inputs={'X': [param]}, - outputs={'Out': [param]}, - attrs={ - 'ring_id': ring_id, - self.op_role_key: OpRole.Optimize - }) + block._insert_op( + idx + 3, + type='c_allreduce_sum', + inputs={'X': [param]}, + outputs={'Out': [param]}, + attrs={ + 'ring_id': ring_id, + self.op_role_key: OpRole.Optimize, + }, + ) ordered_param_snapshot.append((param, snapshot)) for ring_id in range(self.nrings): - block.append_op(type='c_sync_comm_stream', - inputs={'X': param}, - outputs={'Out': param}, - attrs={ - 'ring_id': ring_id, - self.op_role_key: OpRole.Optimize - }) + block.append_op( + type='c_sync_comm_stream', + inputs={'X': param}, + outputs={'Out': param}, + attrs={'ring_id': ring_id, self.op_role_key: OpRole.Optimize}, + ) for param_snapshot in reversed(ordered_param_snapshot): param = param_snapshot[0] snapshot = param_snapshot[1] - block.append_op(type='scale', - inputs={'X': [param]}, - outputs={'Out': [param]}, - attrs={ - 'scale': 1.0 / self.nranks, - self.op_role_key: OpRole.Optimize - }) - block.append_op(type='elementwise_sub', - inputs={ - 'X': [snapshot], - 'Y': [param] - }, - outputs={'Out': [param]}, - attrs={self.op_role_key: OpRole.Optimize}) - block.append_op(type='assign', - inputs={'X': [param]}, - outputs={'Out': [snapshot]}, - attrs={self.op_role_key: OpRole.Optimize}) + block.append_op( + type='scale', + inputs={'X': [param]}, + outputs={'Out': [param]}, + attrs={ + 'scale': 1.0 / self.nranks, + self.op_role_key: OpRole.Optimize, + }, + ) + block.append_op( + type='elementwise_sub', + inputs={'X': [snapshot], 'Y': [param]}, + outputs={'Out': [param]}, + attrs={self.op_role_key: OpRole.Optimize}, + ) + block.append_op( + type='assign', + inputs={'X': [param]}, + outputs={'Out': [snapshot]}, + attrs={self.op_role_key: OpRole.Optimize}, + ) class SingleProcessMultiThread(GradAllReduce): - ''' - ''' + ''' ''' def __init__(self): GradAllReduce.__init__(self, 1) @@ -429,16 +485,16 @@ class SingleProcessMultiThread(GradAllReduce): class MultiThread(GradAllReduce): - ''' - ''' + ''' ''' def __init__(self, nrings=1, trans_mode="all_reduce"): GradAllReduce.__init__(self, nrings) self.mode = "box" self.trans_mode = trans_mode self.fuse_grad_size_in_num = 128 - gpu_nums = os.getenv("FLAGS_selected_gpus", - "0,1,2,3,4,5,6,7,8").split(",") + gpu_nums = os.getenv("FLAGS_selected_gpus", "0,1,2,3,4,5,6,7,8").split( + "," + ) self.gpu_num = len(gpu_nums) def _transpile_startup_program(self): @@ -448,10 +504,15 @@ class MultiThread(GradAllReduce): print("total endpoints: ", self.endpoints) print("rank: %d, ring_id: %d" % (self.rank, self.nrings)) for ring_id in range(self.nrings): - self._init_communicator(self.startup_program, - self.current_endpoint, self.endpoints, - self.rank, ring_id, self.wait_port, - True) + self._init_communicator( + self.startup_program, + self.current_endpoint, + self.endpoints, + self.rank, + ring_id, + self.wait_port, + True, + ) else: if "xpu" in self.trans_mode: @@ -462,13 +523,14 @@ class MultiThread(GradAllReduce): block.append_op( type='c_comm_init_all', attrs={ - 'devices': - list( - map(int, - os.getenv("FLAGS_selected_gpus").split(","))), - 'ring_id': - 0 - }) + 'devices': list( + map( + int, os.getenv("FLAGS_selected_gpus").split(",") + ) + ), + 'ring_id': 0, + }, + ) else: print("begin to _transpile_startup_program for single-node") block = self.startup_program.global_block() @@ -484,8 +546,10 @@ class MultiThread(GradAllReduce): elif self.trans_mode == "fuse_all_reduce": print("begin to transpile in fuse all-reduce mode") self._insert_fuse_allreduce_ops() - elif self.trans_mode == "all_reduce_xpu" and len( - os.getenv("FLAGS_selected_gpus").split(",")) == 1: + elif ( + self.trans_mode == "all_reduce_xpu" + and len(os.getenv("FLAGS_selected_gpus").split(",")) == 1 + ): print( "skip transpile in all-reduce-xpu mode when number of devices is only one" ) @@ -501,8 +565,10 @@ class MultiThread(GradAllReduce): ring_id = -1 grad = None for idx, op in reversed(list(enumerate(block.ops))): - if self._is_backward_op(op) and \ - self.op_role_var_key in op.attr_names: + if ( + self._is_backward_op(op) + and self.op_role_var_key in op.attr_names + ): op_role_var = op.all_attrs()[self.op_role_var_key] if len(op_role_var) == 0: continue @@ -516,7 +582,8 @@ class MultiThread(GradAllReduce): shape=[self.allgather_ranks] + list(param.shape), persistable=False, dtype=core.VarDesc.VarType.FP32, - stop_gradient=True) + stop_gradient=True, + ) grad = block.vars[op_role_var[i + 1]] if param.is_distributed: # no need to care: used in PLSC continue @@ -528,21 +595,24 @@ class MultiThread(GradAllReduce): type='c_sync_calc_stream', inputs={'X': grad}, outputs={'Out': grad}, - attrs={self.op_role_key: OpRole.Backward}) + attrs={self.op_role_key: OpRole.Backward}, + ) offset += 1 # As we search ops reversedly, we should insert c_allgather # op in the same way to keep the ring_id alternate ring_id = (ring_id + 1) % self.nrings - block._insert_op(offset, - type='c_allgather', - inputs={'X': grad}, - outputs={'Out': new_grad_var}, - attrs={ - 'nranks': self.allgather_ranks, - 'ring_id': ring_id, - self.op_role_key: OpRole.Backward - }) + block._insert_op( + offset, + type='c_allgather', + inputs={'X': grad}, + outputs={'Out': new_grad_var}, + attrs={ + 'nranks': self.allgather_ranks, + 'ring_id': ring_id, + self.op_role_key: OpRole.Backward, + }, + ) if grad is None: return @@ -550,14 +620,16 @@ class MultiThread(GradAllReduce): for idx, op in enumerate(block.ops): if self._is_optimizer_op(op): for ring_id in range(self.nrings): - block._insert_op(idx + ring_id, - type='c_sync_comm_stream', - inputs={'X': grad}, - outputs={'Out': grad}, - attrs={ - 'ring_id': ring_id, - self.op_role_key: OpRole.Backward - }) + block._insert_op( + idx + ring_id, + type='c_sync_comm_stream', + inputs={'X': grad}, + outputs={'Out': grad}, + attrs={ + 'ring_id': ring_id, + self.op_role_key: OpRole.Backward, + }, + ) break def _update_adam_ops(self): @@ -569,7 +641,9 @@ class MultiThread(GradAllReduce): for idx, op in reversed(list(enumerate(block.ops))): if self._is_optimizer_op(op): offset = idx - if op.type != 'adam' and op.type != 'lamb': # filter out scale op + if ( + op.type != 'adam' and op.type != 'lamb' + ): # filter out scale op continue param_name = op.input("Param")[0] inputs = { @@ -578,26 +652,23 @@ class MultiThread(GradAllReduce): "Moment1": block.vars[op.input("Moment1")[0]], "Moment2": block.vars[op.input("Moment2")[0]], "Beta1Pow": block.vars[op.input("Beta1Pow")[0]], - "Beta2Pow": block.vars[op.input("Beta2Pow")[0]] + "Beta2Pow": block.vars[op.input("Beta2Pow")[0]], } outputs = { "ParamOut": block.vars[op.output("ParamOut")[0]], "Moment1Out": block.vars[op.output("Moment1Out")[0]], "Moment2Out": block.vars[op.output("Moment2Out")[0]], "Beta1PowOut": block.vars[op.output("Beta1PowOut")[0]], - "Beta2PowOut": block.vars[op.output("Beta2PowOut")[0]] + "Beta2PowOut": block.vars[op.output("Beta2PowOut")[0]], } attrs = { - "epsilon": - op.attr('epsilon'), - "beta1": - op.attr('beta1'), - "beta2": - op.attr('beta2'), - "lazy_mode": - op.attr('lazy_mode'), - "min_row_size_to_use_multithread": - op.attr('min_row_size_to_use_multithread') + "epsilon": op.attr('epsilon'), + "beta1": op.attr('beta1'), + "beta2": op.attr('beta2'), + "lazy_mode": op.attr('lazy_mode'), + "min_row_size_to_use_multithread": op.attr( + 'min_row_size_to_use_multithread' + ), } split_vars = [ block.create_var( @@ -605,29 +676,30 @@ class MultiThread(GradAllReduce): shape=block.vars[op.input("Param")[0]].shape, persistable=False, dtype=core.VarDesc.VarType.FP32, - stop_gradient=True) for i in range(self.allgather_ranks) + stop_gradient=True, + ) + for i in range(self.allgather_ranks) ] - block._insert_op(offset, - type="split", - inputs={ - 'X': - block.vars[op.input("Param")[0] + - "_allgather"] - }, - outputs={'Out': split_vars}, - attrs={ - 'num': self.allgather_ranks, - 'axis': 0 - }) + block._insert_op( + offset, + type="split", + inputs={ + 'X': block.vars[op.input("Param")[0] + "_allgather"] + }, + outputs={'Out': split_vars}, + attrs={'num': self.allgather_ranks, 'axis': 0}, + ) offset += 1 for i in range(self.allgather_ranks): inputs["Grad"] = split_vars[i] - block._insert_op(offset, - type=op.type, - inputs=inputs, - outputs=outputs, - attrs=attrs) + block._insert_op( + offset, + type=op.type, + inputs=inputs, + outputs=outputs, + attrs=attrs, + ) offset += 1 # remove the original adam op block._remove_op(offset) @@ -642,13 +714,17 @@ class MultiThread(GradAllReduce): param_grads = [] # find all grad params for op in reversed(block.ops): - if self._is_backward_op(op) and \ - self.op_role_var_key in op.attr_names: + if ( + self._is_backward_op(op) + and self.op_role_var_key in op.attr_names + ): op_role_var = op.all_attrs()[self.op_role_var_key] if len(op_role_var) == 0: continue - assert len(op_role_var) % 2 == 0, "vars need to be one param var followed by one grad var, " \ - "but got odd number of vars" + assert len(op_role_var) % 2 == 0, ( + "vars need to be one param var followed by one grad var, " + "but got odd number of vars" + ) for i in range(0, len(op_role_var), 2): param_name = op_role_var[i] param = block.var(param_name) @@ -664,9 +740,11 @@ class MultiThread(GradAllReduce): last_dtype = None # split the grad based on dtype and fused size for var in param_grads: - if len(segments) == 0 \ - or len(segments[-1]) == self.fuse_grad_size_in_num \ - or var.dtype != last_dtype: + if ( + len(segments) == 0 + or len(segments[-1]) == self.fuse_grad_size_in_num + or var.dtype != last_dtype + ): segments.append([var]) last_dtype = var.dtype else: @@ -677,45 +755,51 @@ class MultiThread(GradAllReduce): if self._is_optimizer_op(op): for segment in segments: # insert coalesce tensor - tmp_var = block.create_var(name=unique_name.generate( - 'FusedOutput_{}'.format(segment[0].name)), - dtype=segment[0].dtype, - persistable=False, - stop_gradient=True) + tmp_var = block.create_var( + name=unique_name.generate( + 'FusedOutput_{}'.format(segment[0].name) + ), + dtype=segment[0].dtype, + persistable=False, + stop_gradient=True, + ) fused_vars.append(tmp_var) - block._insert_op(idx, - type="coalesce_tensor", - inputs={"Input": segment}, - outputs={ - "Output": segment, - "FusedOutput": tmp_var - }, - attrs={ - "copy_data": True, - "use_align": True, - "dtype": segment[0].dtype, - self.op_role_key: OpRole.Backward - }) + block._insert_op( + idx, + type="coalesce_tensor", + inputs={"Input": segment}, + outputs={"Output": segment, "FusedOutput": tmp_var}, + attrs={ + "copy_data": True, + "use_align": True, + "dtype": segment[0].dtype, + self.op_role_key: OpRole.Backward, + }, + ) break # insert the allreduce_sum op for idx, op in enumerate(block.ops): if self._is_optimizer_op(op): for fused_var in fused_vars: - block._insert_op(idx, - type='c_allreduce_sum', - inputs={'X': fused_var}, - outputs={'Out': fused_var}, - attrs={ - 'ring_id': ring_id, - 'use_calc_stream': False, - self.op_role_key: OpRole.Backward - }) - block._insert_op(idx, - type='c_sync_calc_stream', - inputs={'X': fused_var}, - outputs={'Out': fused_var}, - attrs={self.op_role_key: OpRole.Backward}) + block._insert_op( + idx, + type='c_allreduce_sum', + inputs={'X': fused_var}, + outputs={'Out': fused_var}, + attrs={ + 'ring_id': ring_id, + 'use_calc_stream': False, + self.op_role_key: OpRole.Backward, + }, + ) + block._insert_op( + idx, + type='c_sync_calc_stream', + inputs={'X': fused_var}, + outputs={'Out': fused_var}, + attrs={self.op_role_key: OpRole.Backward}, + ) break if len(fused_vars) == 0: @@ -725,13 +809,15 @@ class MultiThread(GradAllReduce): # insert the sync comm op for idx, op in enumerate(block.ops): if self._is_optimizer_op(op): - block._insert_op(idx, - type='c_sync_comm_stream', - inputs={'X': fused_vars[0]}, - outputs={'Out': fused_vars[0]}, - attrs={ - 'ring_id': ring_id, - self.op_role_key: OpRole.Backward - }) + block._insert_op( + idx, + type='c_sync_comm_stream', + inputs={'X': fused_vars[0]}, + outputs={'Out': fused_vars[0]}, + attrs={ + 'ring_id': ring_id, + self.op_role_key: OpRole.Backward, + }, + ) break block._sync_with_cpp() diff --git a/python/paddle/fluid/transpiler/details/checkport.py b/python/paddle/fluid/transpiler/details/checkport.py index 71c9565a362c64038b82d98dd75eee5110fa98dc..c0d8cdc914502e82b341feab62882ffdee7c3c6f 100644 --- a/python/paddle/fluid/transpiler/details/checkport.py +++ b/python/paddle/fluid/transpiler/details/checkport.py @@ -38,8 +38,9 @@ def wait_server_ready(endpoints): not_ready_endpoints = [] for ep in endpoints: ip_port = ep.split(":") - with closing(socket.socket(socket.AF_INET, - socket.SOCK_STREAM)) as sock: + with closing( + socket.socket(socket.AF_INET, socket.SOCK_STREAM) + ) as sock: sock.settimeout(2) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) if hasattr(socket, 'SO_REUSEPORT'): @@ -51,8 +52,9 @@ def wait_server_ready(endpoints): not_ready_endpoints.append(ep) if not all_ok: sys.stderr.write("server not ready, wait 3 sec to retry...\n") - sys.stderr.write("not ready endpoints:" + str(not_ready_endpoints) + - "\n") + sys.stderr.write( + "not ready endpoints:" + str(not_ready_endpoints) + "\n" + ) sys.stderr.flush() time.sleep(3) else: diff --git a/python/paddle/fluid/transpiler/details/ufind.py b/python/paddle/fluid/transpiler/details/ufind.py index 0e30d0e3f9c5712c494daf17b2b4bcec86f69c23..fef6f24570c17bfc28dc87d699891d84c292a59e 100644 --- a/python/paddle/fluid/transpiler/details/ufind.py +++ b/python/paddle/fluid/transpiler/details/ufind.py @@ -14,7 +14,7 @@ class UnionFind(object): - """ Union-find data structure. + """Union-find data structure. Union-find is a data structure that keeps track of a set of elements partitioned into a number of disjoint (non-overlapping) subsets. diff --git a/python/paddle/fluid/transpiler/details/vars_distributed.py b/python/paddle/fluid/transpiler/details/vars_distributed.py index f78559e6559c03c8ca1d1a0b29ee124f3a765c8e..32a27ed33b0b013166c8810aced35f8779a4e9be 100644 --- a/python/paddle/fluid/transpiler/details/vars_distributed.py +++ b/python/paddle/fluid/transpiler/details/vars_distributed.py @@ -35,14 +35,16 @@ class VarDistributed(object): the slice var's properties, such as type/shape/offset/endpoint. """ - def __init__(self, - origin_var, - slice_var, - is_slice=None, - block_id=None, - offset=None, - vtype=None, - endpoint=None): + def __init__( + self, + origin_var, + slice_var, + is_slice=None, + block_id=None, + offset=None, + vtype=None, + endpoint=None, + ): """ Args: origin_var(Variable|VarStruct): origin var properties @@ -85,8 +87,14 @@ class VarDistributed(object): @staticmethod def __create_var_struct(var): - return VarStruct(var.name, var.shape, var.dtype, var.type, - var.lod_level, var.persistable) + return VarStruct( + var.name, + var.shape, + var.dtype, + var.type, + var.lod_level, + var.persistable, + ) @staticmethod def equal(var1, var2): @@ -97,26 +105,45 @@ class VarDistributed(object): """ assert isinstance(var1, VarStruct) and isinstance(var2, VarStruct) - return var1.name == var2.name and \ - var1.type == var2.type and \ - var1.shape == var2.shape and \ - var1.dtype == var2.dtype and \ - var1.lod_level == var2.lod_level and \ - var1.persistable == var2.persistable + return ( + var1.name == var2.name + and var1.type == var2.type + and var1.shape == var2.shape + and var1.dtype == var2.dtype + and var1.lod_level == var2.lod_level + and var1.persistable == var2.persistable + ) def __str__(self): - origin_var_str = "{name} : fluid.{type}.shape{shape}.astype({dtype})". \ - format(i="{", e="}", name=self.origin.name, type=self.origin.type, - shape=self.origin.shape, dtype=self.origin.dtype) - - slice_var_str = "{name} : fluid.{type}.shape{shape}.astype({dtype})" \ - ".slice({is_slice}).block({block_id}).offset({offset})". \ - format(i="{", e="}", name=self.slice.name, type=self.slice.type, - shape=self.slice.shape, dtype=self.slice.dtype, - is_slice=self.is_slice, block_id=self.block_id, offset=self.offset) + origin_var_str = ( + "{name} : fluid.{type}.shape{shape}.astype({dtype})".format( + i="{", + e="}", + name=self.origin.name, + type=self.origin.type, + shape=self.origin.shape, + dtype=self.origin.dtype, + ) + ) + + slice_var_str = ( + "{name} : fluid.{type}.shape{shape}.astype({dtype})" + ".slice({is_slice}).block({block_id}).offset({offset})".format( + i="{", + e="}", + name=self.slice.name, + type=self.slice.type, + shape=self.slice.shape, + dtype=self.slice.dtype, + is_slice=self.is_slice, + block_id=self.block_id, + offset=self.offset, + ) + ) return "var owned: {}, origin var: ( {} ), slice var: ( {} ), endpoint: {} ".format( - self.vtype, origin_var_str, slice_var_str, self.endpoint) + self.vtype, origin_var_str, slice_var_str, self.endpoint + ) class VarsDistributed(object): @@ -130,14 +157,16 @@ class VarsDistributed(object): def __init__(self): self.distributed_vars = [] - def add_distributed_var(self, - origin_var, - slice_var, - is_slice=None, - block_id=None, - offset=None, - vtype=None, - endpoint=None): + def add_distributed_var( + self, + origin_var, + slice_var, + is_slice=None, + block_id=None, + offset=None, + vtype=None, + endpoint=None, + ): """ add distributed var in this. @@ -153,8 +182,16 @@ class VarsDistributed(object): None """ self.distributed_vars.append( - VarDistributed(origin_var, slice_var, is_slice, block_id, offset, - vtype, endpoint)) + VarDistributed( + origin_var, + slice_var, + is_slice, + block_id, + offset, + vtype, + endpoint, + ) + ) def get_distributed_var_by_slice(self, var_name): """ @@ -177,12 +214,14 @@ class VarsDistributed(object): Returns: bool: equal will return True else False """ - return var1.name == var2.name and \ - var1.type == var2.type and \ - var1.shape == var2.shape and \ - var1.dtype == var2.dtype and \ - var1.lod_level == var2.lod_level and \ - var1.persistable == var2.persistable + return ( + var1.name == var2.name + and var1.type == var2.type + and var1.shape == var2.shape + and var1.dtype == var2.dtype + and var1.lod_level == var2.lod_level + and var1.persistable == var2.persistable + ) def get_distributed_var_by_origin_and_ep(self, origin_var_name, endpoint): """ @@ -195,7 +234,10 @@ class VarsDistributed(object): VarDistributed: distributed var. """ for dist_var in self.distributed_vars: - if dist_var.origin.name == origin_var_name and dist_var.endpoint == endpoint: + if ( + dist_var.origin.name == origin_var_name + and dist_var.endpoint == endpoint + ): return dist_var return None diff --git a/python/paddle/fluid/transpiler/distribute_transpiler.py b/python/paddle/fluid/transpiler/distribute_transpiler.py index 670f54311225fe1e991b8443c7d84d6857ff9dba..28dbf22153f7759257928275b50353468023d444 100644 --- a/python/paddle/fluid/transpiler/distribute_transpiler.py +++ b/python/paddle/fluid/transpiler/distribute_transpiler.py @@ -40,8 +40,14 @@ import numpy as np from .ps_dispatcher import RoundRobin, PSDispatcher from .. import core, framework, unique_name, initializer -from ..framework import Program, default_main_program, \ - default_startup_program, Block, Parameter, grad_var_name +from ..framework import ( + Program, + default_main_program, + default_startup_program, + Block, + Parameter, + grad_var_name, +) from .details import wait_server_ready, UnionFind, VarStruct, VarsDistributed from .details import delete_ops, find_op_by_output_arg from ..distribute_lookup_table import find_distributed_lookup_table @@ -52,8 +58,9 @@ LOOKUP_TABLE_GRAD_TYPE = ["lookup_table_grad", "lookup_table_v2_grad"] OP_NAME_SCOPE = "op_namescope" CLIP_OP_NAME_SCOPE = "@CLIP" OP_ROLE_VAR_ATTR_NAME = core.op_proto_and_checker_maker.kOpRoleVarAttrName() -RPC_OP_ROLE_ATTR_NAME = op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName( -) +RPC_OP_ROLE_ATTR_NAME = ( + op_role_attr_name +) = core.op_proto_and_checker_maker.kOpRoleAttrName() OPT_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.Optimize RPC_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.RPC DIST_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.Dist @@ -75,7 +82,6 @@ def log(*args): class VarBlock: - def __init__(self, varname, offset, size): self.varname = varname # NOTE: real offset is offset * size @@ -129,8 +135,9 @@ def slice_variable(var_list, slice_count, min_block_size): # update split_count after aligning split_count = int(math.ceil(var_numel / float(block_size))) for block_id in range(split_count): - curr_block_size = min(block_size, - var_numel - ((block_id) * block_size)) + curr_block_size = min( + block_size, var_numel - ((block_id) * block_size) + ) block = VarBlock(var.name, block_id, curr_block_size) blocks.append(str(block)) return blocks @@ -242,14 +249,16 @@ class DistributeTranspilerConfig(object): class ServerRuntimeConfig(object): - def __init__(self): self._rpc_send_thread_num = int( - os.getenv("FLAGS_rpc_send_thread_num", "12")) + os.getenv("FLAGS_rpc_send_thread_num", "12") + ) self._rpc_get_thread_num = int( - os.getenv("FLAGS_rpc_get_thread_num", "12")) + os.getenv("FLAGS_rpc_get_thread_num", "12") + ) self._rpc_prefetch_thread_num = int( - os.getenv("FLAGS_rpc_prefetch_thread_num", "12")) + os.getenv("FLAGS_rpc_prefetch_thread_num", "12") + ) class DistributeTranspiler(object): @@ -337,8 +346,8 @@ class DistributeTranspiler(object): global PRINT_LOG if self.config.print_log: PRINT_LOG = True - assert (self.config.min_block_size >= 8192) - assert (self.config.split_method.__bases__[0] == PSDispatcher) + assert self.config.min_block_size >= 8192 + assert self.config.split_method.__bases__[0] == PSDispatcher self.counter_var = None def _set_server_config(self, server_config=None): @@ -351,12 +360,14 @@ class DistributeTranspiler(object): "In DistributeTranspiler, server_config must be an instance of ServerRuntimeConfig" ) - def _transpile_nccl2(self, - trainer_id, - trainers, - current_endpoint, - startup_program=None, - wait_port=True): + def _transpile_nccl2( + self, + trainer_id, + trainers, + current_endpoint, + startup_program=None, + wait_port=True, + ): if not startup_program: startup_program = default_startup_program() if trainer_id >= 0: @@ -367,53 +378,55 @@ class DistributeTranspiler(object): wait_server_ready(worker_endpoints) nccl_id_var = startup_program.global_block().create_var( - name="NCCLID", persistable=True, type=core.VarDesc.VarType.RAW) + name="NCCLID", persistable=True, type=core.VarDesc.VarType.RAW + ) for i in range(1, self.config.nccl_comm_num): startup_program.global_block().create_var( name="NCCLID_{}".format(i), persistable=True, - type=core.VarDesc.VarType.RAW) + type=core.VarDesc.VarType.RAW, + ) if self.config.use_hierarchical_allreduce: for i in range(0, self.config.nccl_comm_num): startup_program.global_block().create_var( name="Hierarchical_inter_NCCLID_{}".format(i), persistable=True, - type=core.VarDesc.VarType.RAW) + type=core.VarDesc.VarType.RAW, + ) startup_program.global_block().create_var( name="Hierarchical_exter_NCCLID_{}".format(i), persistable=True, - type=core.VarDesc.VarType.RAW) + type=core.VarDesc.VarType.RAW, + ) startup_program.global_block().append_op( type="gen_nccl_id", inputs={}, outputs={"NCCLID": nccl_id_var}, attrs={ - "trainers": - trainers.split(","), - "trainer_id": - trainer_id, - "nccl_comm_num": - self.config.nccl_comm_num, - "use_hierarchical_allreduce": - self.config.use_hierarchical_allreduce, - "hierarchical_allreduce_inter_nranks": - self.config.hierarchical_allreduce_inter_nranks - }) + "trainers": trainers.split(","), + "trainer_id": trainer_id, + "nccl_comm_num": self.config.nccl_comm_num, + "use_hierarchical_allreduce": self.config.use_hierarchical_allreduce, + "hierarchical_allreduce_inter_nranks": self.config.hierarchical_allreduce_inter_nranks, + }, + ) return nccl_id_var else: raise ValueError("must set trainer_id > 0") - def _transpile_collective(self, - collective_mode, - trainer_id, - trainers, - current_endpoint, - startup_program=None, - main_program=None, - wait_port=True): + def _transpile_collective( + self, + collective_mode, + trainer_id, + trainers, + current_endpoint, + startup_program=None, + main_program=None, + wait_port=True, + ): if isinstance(trainers, str): endpoints = trainers.split(",") elif isinstance(trainers, list): @@ -421,8 +434,10 @@ class DistributeTranspiler(object): elif collective_mode != "single_process_multi_thread": raise ValueError('invalid trainers config: ' + str(trainers)) - if len(endpoints - ) == 1 and collective_mode != "single_process_multi_thread": + if ( + len(endpoints) == 1 + and collective_mode != "single_process_multi_thread" + ): raise ValueError('invalid trainer number in distributed: 1') if startup_program is None: @@ -441,28 +456,34 @@ class DistributeTranspiler(object): else: raise ValueError('invalid collective_mode: %s' % collective_mode) - transpiler.transpile(startup_program=startup_program, - main_program=main_program, - rank=trainer_id, - endpoints=endpoints, - current_endpoint=current_endpoint, - wait_port=wait_port) + transpiler.transpile( + startup_program=startup_program, + main_program=main_program, + rank=trainer_id, + endpoints=endpoints, + current_endpoint=current_endpoint, + wait_port=wait_port, + ) def _get_all_remote_sparse_update_op(self, main_program): sparse_update_ops = [] sparse_update_op_types = ["lookup_table", "nce", "lookup_table_v2"] for op in main_program.global_block().ops: - if op.type in sparse_update_op_types and op.attr( - 'remote_prefetch') is True: + if ( + op.type in sparse_update_op_types + and op.attr('remote_prefetch') is True + ): sparse_update_ops.append(op) return sparse_update_ops - def _update_remote_sparse_update_op(self, program, - need_sparse_update_params): + def _update_remote_sparse_update_op( + self, program, need_sparse_update_params + ): for param_varname, attrs in need_sparse_update_params.items(): height_sections = self.sparse_param_to_height_sections[ - param_varname] + param_varname + ] endpoints = attrs[0] table_names = attrs[1] @@ -518,10 +539,7 @@ class DistributeTranspiler(object): program.global_block()._insert_op( index=distributed_idx, type="distributed_lookup_table", - inputs={ - "Ids": inputs, - 'W': w - }, + inputs={"Ids": inputs, 'W': w}, outputs={"Outputs": outputs}, attrs={ "table_names": table_names, @@ -529,8 +547,9 @@ class DistributeTranspiler(object): "endpoints": endpoints, "padding_idx": padding_idx, "trainer_id": self.trainer_id, - "lookup_table_version": op_type - }) + "lookup_table_version": op_type, + }, + ) else: raise ValueError( "something wrong with distribute_transpiler, submit a issue is recommended" @@ -545,14 +564,16 @@ class DistributeTranspiler(object): return True return False - def transpile(self, - trainer_id, - program=None, - pservers="127.0.0.1:6174", - trainers=1, - sync_mode=True, - startup_program=None, - current_endpoint="127.0.0.1:6174"): + def transpile( + self, + trainer_id, + program=None, + pservers="127.0.0.1:6174", + trainers=1, + sync_mode=True, + startup_program=None, + current_endpoint="127.0.0.1:6174", + ): """ Transpile the input program to distributed programs with config and arguments. @@ -604,34 +625,49 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler self.origin_startup_program = self.startup_program.clone() if self.config.mode == "nccl2": - assert (isinstance(trainers, str)) + assert isinstance(trainers, str) self.origin_program._trainers_endpoints = trainers.split(",") self.origin_program._nccl_comm_num = self.config.nccl_comm_num - self.origin_program._use_hierarchical_allreduce = self.config.use_hierarchical_allreduce + self.origin_program._use_hierarchical_allreduce = ( + self.config.use_hierarchical_allreduce + ) # check use_hierarchical_allreduce options if self.config.use_hierarchical_allreduce: trainers_num = len(self.origin_program._trainers_endpoints) # selected automaticly if self.config.hierarchical_allreduce_inter_nranks <= 1: - self.config.hierarchical_allreduce_inter_nranks = core.get_cuda_device_count( + self.config.hierarchical_allreduce_inter_nranks = ( + core.get_cuda_device_count() ) - assert trainers_num > self.config.hierarchical_allreduce_inter_nranks, \ - "trainers_num:{} < hierarchical_allreduce_inter_nranks:{}".format( - trainers_num, self.config.hierarchical_allreduce_inter_nranks) - - assert trainers_num % self.config.hierarchical_allreduce_inter_nranks == 0, \ - "trainers_num:{} mod hierarchical_allreduce_inter_nranks:{} != 0".format( - trainers_num, self.config.hierarchical_allreduce_inter_nranks) - - self.origin_program._hierarchical_allreduce_inter_nranks = \ - int(self.config.hierarchical_allreduce_inter_nranks) + assert ( + trainers_num + > self.config.hierarchical_allreduce_inter_nranks + ), "trainers_num:{} < hierarchical_allreduce_inter_nranks:{}".format( + trainers_num, + self.config.hierarchical_allreduce_inter_nranks, + ) + + assert ( + trainers_num + % self.config.hierarchical_allreduce_inter_nranks + == 0 + ), "trainers_num:{} mod hierarchical_allreduce_inter_nranks:{} != 0".format( + trainers_num, + self.config.hierarchical_allreduce_inter_nranks, + ) + + self.origin_program._hierarchical_allreduce_inter_nranks = int( + self.config.hierarchical_allreduce_inter_nranks + ) - self._transpile_nccl2(trainer_id, - trainers, - current_endpoint, - startup_program=startup_program, - wait_port=self.config.wait_port) + self._transpile_nccl2( + trainer_id, + trainers, + current_endpoint, + startup_program=startup_program, + wait_port=self.config.wait_port, + ) return if self.config.mode == "collective": @@ -642,7 +678,8 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler current_endpoint=current_endpoint, startup_program=startup_program, main_program=program, - wait_port=self.config.wait_port) + wait_port=self.config.wait_port, + ) return self.trainer_num = trainers @@ -664,7 +701,8 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler # get all sparse update ops self.sparse_update_ops = self._get_all_remote_sparse_update_op( - self.origin_program) + self.origin_program + ) # use_sparse_update_param_name -> split_height_section self.sparse_param_to_height_sections = dict() self.need_delete_optimize_vars = [] @@ -674,7 +712,9 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler self.origin_program._endpoints = self.pserver_endpoints self.origin_program._ps_endpoint = current_endpoint self.origin_program._is_chief = self.trainer_id == 0 - self.origin_program._distributed_lookup_table = self.table_name if self.table_name else None + self.origin_program._distributed_lookup_table = ( + self.table_name if self.table_name else None + ) # split and create vars, then put split vars in dicts for later use. # step 1: split and create vars, then put split vars in dicts for later use. @@ -701,29 +741,31 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler eplist = ps_dispatcher.dispatch(splited_vars) if not self.config.slice_var_up: - assert (len(splited_vars) == 1) + assert len(splited_vars) == 1 splited_grad_varname = grad_varname if len(splited_vars) == 1: splited_grad_varname = splited_vars[0].name - index = find_op_by_output_arg(program.global_block(), - splited_grad_varname, - reverse=True) + index = find_op_by_output_arg( + program.global_block(), splited_grad_varname, reverse=True + ) elif len(splited_vars) > 1: orig_var = program.global_block().vars[splited_grad_varname] - index = find_op_by_output_arg(program.global_block(), - splited_grad_varname, - reverse=True) + index = find_op_by_output_arg( + program.global_block(), splited_grad_varname, reverse=True + ) if not self.config.runtime_split_send_recv: - self._insert_split_op(program, orig_var, index, - splited_vars) + self._insert_split_op( + program, orig_var, index, splited_vars + ) index += 1 else: AssertionError( - "Can not insert the send op by original " - "variable name :", splited_grad_varname) + "Can not insert the send op by original " "variable name :", + splited_grad_varname, + ) if splited_vars[0].type == core.VarDesc.VarType.SELECTED_ROWS: sparse_param_name = self.grad_name_to_param_name[grad_varname] @@ -733,7 +775,8 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler ] dummy_output = program.global_block().create_var( - name=framework.generate_control_dev_var_name()) + name=framework.generate_control_dev_var_name() + ) self.grad_name_to_send_dummy_out[grad_varname] = dummy_output if self.config.runtime_split_send_recv: @@ -764,35 +807,36 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler inputs={"X": send_input_vars}, outputs={"Out": dummy_output}, attrs={ - "epmap": - eplist, - "sections": - sections, - "send_varnames": - send_varnames, - RPC_OP_ROLE_ATTR_NAME: - RPC_OP_ROLE_ATTR_VALUE, + "epmap": eplist, + "sections": sections, + "send_varnames": send_varnames, + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE, OP_ROLE_VAR_ATTR_NAME: [ self.grad_name_to_param_name[grad_varname], - splited_grad_varname - ] - }) + splited_grad_varname, + ], + }, + ) for _, var in enumerate(splited_vars): send_vars.append(var) send_barrier_out = program.global_block().create_var( - name=framework.generate_control_dev_var_name()) + name=framework.generate_control_dev_var_name() + ) if self.has_distributed_lookup_table: self.grad_name_to_send_dummy_out[ - self.table_name] = program.global_block().create_var( - name=framework.generate_control_dev_var_name()) + self.table_name + ] = program.global_block().create_var( + name=framework.generate_control_dev_var_name() + ) input_deps = list(self.grad_name_to_send_dummy_out.values()) if not self.sync_mode: lr_ops = self._get_lr_ops() if len(lr_ops) > 0 and self.counter_var: decay_dummy_output = program.global_block().create_var( - name=framework.generate_control_dev_var_name()) + name=framework.generate_control_dev_var_name() + ) if self.config.runtime_split_send_recv: # async mode, using communicator to merge and send send_varnames = [self.counter_var.name] @@ -804,39 +848,34 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler inputs={"X": self.counter_var}, outputs={"Out": decay_dummy_output}, attrs={ - "epmap": - pserver_endpoints, - "sections": - sections, - "send_varnames": - send_varnames, - "merge_add": - True, - "use_send_handler": - False, - RPC_OP_ROLE_ATTR_NAME: - RPC_OP_ROLE_ATTR_VALUE, - OP_ROLE_VAR_ATTR_NAME: - [self.counter_var.name, self.counter_var.name] - }) + "epmap": pserver_endpoints, + "sections": sections, + "send_varnames": send_varnames, + "merge_add": True, + "use_send_handler": False, + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE, + OP_ROLE_VAR_ATTR_NAME: [ + self.counter_var.name, + self.counter_var.name, + ], + }, + ) input_deps.append(decay_dummy_output) if self.sync_mode: fetch_barrier_input = [] - program.global_block().append_op(type="send_barrier", - inputs={"X": list(input_deps)}, - outputs={"Out": send_barrier_out}, - attrs={ - "endpoints": - pserver_endpoints, - "trainer_id": - self.trainer_id, - "half_async": - False, - RPC_OP_ROLE_ATTR_NAME: - RPC_OP_ROLE_ATTR_VALUE - }) + program.global_block().append_op( + type="send_barrier", + inputs={"X": list(input_deps)}, + outputs={"Out": send_barrier_out}, + attrs={ + "endpoints": pserver_endpoints, + "trainer_id": self.trainer_id, + "half_async": False, + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE, + }, + ) fetch_barrier_input.append(send_barrier_out) else: @@ -849,8 +888,9 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler "endpoints": pserver_endpoints, "trainer_id": self.trainer_id, "half_async": True, - RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE - }) + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE, + }, + ) # step 3: insert recv op to receive parameters from parameter server recv_vars = [] @@ -864,7 +904,8 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler self.param_grad_ep_mapping[ep]["grads"].append(send_vars[i]) distributed_var = self.vars_overview.get_distributed_var_by_slice( - recv_vars[i].name) + recv_vars[i].name + ) distributed_var.endpoint = ep need_sparse_update_params = {} @@ -883,7 +924,8 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler else: # connect deps to send op in async mode recv_dep_in = self.grad_name_to_send_dummy_out[ - self.param_name_to_grad_name[param_varname]] + self.param_name_to_grad_name[param_varname] + ] # get recv op_role_var, if not split, the grad should have .trainer suffix # if split, grad should be the original grad var name. ParallelExecutor @@ -896,8 +938,11 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler if param_varname in self.sparse_param_to_height_sections: for table_name in table_names: - distributed_var = self.vars_overview.get_distributed_var_by_slice( - table_name) + distributed_var = ( + self.vars_overview.get_distributed_var_by_slice( + table_name + ) + ) distributed_var.vtype = "RemotePrefetch" need_sparse_update_params[param_varname] = (eps, table_names) @@ -914,33 +959,31 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler inputs={"X": [recv_dep_in]}, outputs={"Out": splited_var}, attrs={ - "epmap": - eps, - "recv_varnames": - recv_varnames, - "trainer_id": - self.trainer_id, - RPC_OP_ROLE_ATTR_NAME: - RPC_OP_ROLE_ATTR_VALUE, - OP_ROLE_VAR_ATTR_NAME: - [param_varname, recv_op_role_var_name] - }) + "epmap": eps, + "recv_varnames": recv_varnames, + "trainer_id": self.trainer_id, + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE, + OP_ROLE_VAR_ATTR_NAME: [ + param_varname, + recv_op_role_var_name, + ], + }, + ) self._update_remote_sparse_update_op(program, need_sparse_update_params) if self.sync_mode: # form a WAW dependency - program.global_block().append_op(type="fetch_barrier", - inputs={"X": fetch_barrier_input}, - outputs={"Out": all_recv_outputs}, - attrs={ - "endpoints": - pserver_endpoints, - "trainer_id": - self.trainer_id, - RPC_OP_ROLE_ATTR_NAME: - RPC_OP_ROLE_ATTR_VALUE - }) + program.global_block().append_op( + type="fetch_barrier", + inputs={"X": fetch_barrier_input}, + outputs={"Out": all_recv_outputs}, + attrs={ + "endpoints": pserver_endpoints, + "trainer_id": self.trainer_id, + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE, + }, + ) for param_varname, splited_var in self.param_var_mapping.items(): if len(splited_var) <= 1: @@ -954,14 +997,16 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler outputs={"Out": [orig_param]}, attrs={ "axis": 0, - RPC_OP_ROLE_ATTR_NAME: DIST_OP_ROLE_ATTR_VALUE - }) + RPC_OP_ROLE_ATTR_NAME: DIST_OP_ROLE_ATTR_VALUE, + }, + ) self._get_trainer_startup_program(recv_vars=recv_vars, eplist=eplist) if self.has_distributed_lookup_table: - self._replace_lookup_table_op_with_prefetch(program, - pserver_endpoints) + self._replace_lookup_table_op_with_prefetch( + program, pserver_endpoints + ) self._split_table_grad_and_add_send_vars(program, pserver_endpoints) self._get_distributed_optimizer_vars() @@ -972,8 +1017,10 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler sparse_table_names = [] for op in self.origin_program.global_block().ops: - if op.type in sparse_update_op_types and op.attr( - 'is_sparse') is True: + if ( + op.type in sparse_update_op_types + and op.attr('is_sparse') is True + ): sparse_table_names.append(op.input("W")[0]) if op.type == "distributed_lookup_table": sparse_table_names.append(op.input("W")[0]) @@ -993,14 +1040,16 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler table_param_init_op.append(op) init_op_num = len(table_param_init_op) if init_op_num != 1: - raise ValueError("table init op num should be 1, now is " + - str(init_op_num)) + raise ValueError( + "table init op num should be 1, now is " + str(init_op_num) + ) table_init_op = table_param_init_op[0] self.startup_program.global_block().append_op( type="fake_init", inputs={}, outputs={"Out": table_var}, - attrs={"shape": table_init_op.attr('shape')}) + attrs={"shape": table_init_op.attr('shape')}, + ) delete_ops(self.startup_program.global_block(), table_param_init_op) def _delete_trainer_optimizer(self, is_startup): @@ -1124,7 +1173,8 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler type=var.type, dtype=var.dtype, shape=var.shape, - lod_level=var.lod_level) + lod_level=var.lod_level, + ) op = startup_program.global_block().append_op( type="recv", @@ -1133,11 +1183,13 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler attrs={ "epmap": eps, "trainer_id": self.trainer_id, - RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE - }) + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE, + }, + ) fetch_barrier_out = startup_program.global_block().create_var( - name=framework.generate_control_dev_var_name()) + name=framework.generate_control_dev_var_name() + ) startup_program.global_block().append_op( type="fetch_barrier", inputs={}, @@ -1145,8 +1197,9 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler attrs={ "endpoints": self.pserver_endpoints, "trainer_id": self.trainer_id, - RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE - }) + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE, + }, + ) for varname, splited_var in self.param_var_mapping.items(): if varname in sparse_table_names: @@ -1158,19 +1211,22 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler if varname in startup_program.global_block().vars: orig_param = startup_program.global_block().vars[varname] else: - origin_param_var = self.origin_program.global_block( - ).vars[varname] + origin_param_var = self.origin_program.global_block().vars[ + varname + ] orig_param = startup_program.global_block().create_var( name=varname, persistable=origin_param_var.persistable, type=origin_param_var.type, dtype=origin_param_var.dtype, - shape=origin_param_var.shape) + shape=origin_param_var.shape, + ) startup_program.global_block().append_op( type="concat", inputs={"X": splited_var}, outputs={"Out": [orig_param]}, - attrs={"axis": 0}) + attrs={"axis": 0}, + ) return startup_program @@ -1231,21 +1287,26 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler orig_var_name = v.name # NOTE: single_trainer_var must be created for multi-trainer # case to merge grads from multiple trainers - single_trainer_var = \ - pserver_program.global_block().create_var( - name=orig_var_name, - persistable=True, - type=v.type, - dtype=v.dtype, - shape=v.shape) - if self.sync_mode or self.config.completely_not_async and self.trainer_num > 1: + single_trainer_var = pserver_program.global_block().create_var( + name=orig_var_name, + persistable=True, + type=v.type, + dtype=v.dtype, + shape=v.shape, + ) + if ( + self.sync_mode + or self.config.completely_not_async + and self.trainer_num > 1 + ): for trainer_id in range(self.trainer_num): var = pserver_program.global_block().create_var( name="%s.trainer_%d" % (orig_var_name, trainer_id), persistable=False, type=v.type, dtype=v.dtype, - shape=v.shape) + shape=v.shape, + ) recv_inputs.append(var) else: recv_inputs.append(single_trainer_var) @@ -1261,12 +1322,13 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler opt_op_on_pserver = [] for _, op in enumerate(self.optimize_ops): if self._is_optimizer_op(op) and self._is_opt_op_on_pserver( - endpoint, op): + endpoint, op + ): opt_op_on_pserver.append(op) # step 3.3 # prepare if dc asgd is enabled if self.config.enable_dc_asgd == True: - assert (self.sync_mode == False) + assert self.sync_mode == False self.param_bak_list = [] # add param_bak for each trainer for p in self.param_grad_ep_mapping[endpoint]["params"]: @@ -1278,7 +1340,8 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler name=param_bak_name, type=p.type, shape=p.shape, - dtype=p.dtype) + dtype=p.dtype, + ) self.param_bak_list.append((p, tmpvar)) # step 3.4 @@ -1291,12 +1354,19 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler # sparse grad name to param name sparse_grad_to_param = [] - def __append_optimize_op__(op, block, grad_to_block_id, merged_var, - lr_ops): + def __append_optimize_op__( + op, block, grad_to_block_id, merged_var, lr_ops + ): if self._is_optimizer_op(op): - self._append_pserver_ops(block, op, endpoint, grad_to_block_id, - self.origin_program, merged_var, - sparse_grad_to_param) + self._append_pserver_ops( + block, + op, + endpoint, + grad_to_block_id, + self.origin_program, + merged_var, + sparse_grad_to_param, + ) elif op not in lr_ops: self._append_pserver_non_opt_ops(block, op) @@ -1332,13 +1402,15 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler lr_decay_block_id = -1 if len(lr_ops) > 0: lr_decay_block = pserver_program._create_block( - pserver_program.num_blocks - 1) + pserver_program.num_blocks - 1 + ) optimize_blocks.append(lr_decay_block) for _, op in enumerate(lr_ops): cloned_op = self._append_pserver_non_opt_ops(lr_decay_block, op) # append sub blocks to pserver_program in lr_decay_op - __clone_lr_op_sub_block__(cloned_op, pserver_program, - lr_decay_block) + __clone_lr_op_sub_block__( + cloned_op, pserver_program, lr_decay_block + ) lr_decay_block_id = lr_decay_block.idx # append op to the current block @@ -1355,54 +1427,78 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler # find the origin grad var before clipping/L2Decay, # merged_var should be the input var name of L2Decay grad_varname_for_block = op.attr(OP_ROLE_VAR_ATTR_NAME)[1] - if op.attr( - OP_ROLE_VAR_ATTR_NAME)[0] == optimize_target_param_name: + if ( + op.attr(OP_ROLE_VAR_ATTR_NAME)[0] + == optimize_target_param_name + ): merged_var = self._append_pserver_grad_merge_ops( - per_opt_block, grad_varname_for_block, endpoint, - grad_to_block_id, self.origin_program) + per_opt_block, + grad_varname_for_block, + endpoint, + grad_to_block_id, + self.origin_program, + ) if merged_var: break # append optimize op once then append other ops. if merged_var: for _, op in enumerate(self.optimize_ops): # optimizer is connected to itself - if op.attr(OP_ROLE_VAR_ATTR_NAME)[0] == optimize_target_param_name and \ - op not in global_ops: - log("append opt op: ", op.type, op.input_arg_names, - merged_var) - __append_optimize_op__(op, per_opt_block, - grad_to_block_id, merged_var, - lr_ops) + if ( + op.attr(OP_ROLE_VAR_ATTR_NAME)[0] + == optimize_target_param_name + and op not in global_ops + ): + log( + "append opt op: ", + op.type, + op.input_arg_names, + merged_var, + ) + __append_optimize_op__( + op, + per_opt_block, + grad_to_block_id, + merged_var, + lr_ops, + ) # dedup grad to ids list grad_to_block_id = list(set(grad_to_block_id)) # append global ops if global_ops: opt_state_block = pserver_program._create_block( - pserver_program.num_blocks - 1) + pserver_program.num_blocks - 1 + ) optimize_blocks.append(opt_state_block) for glb_op in global_ops: - __append_optimize_op__(glb_op, opt_state_block, - grad_to_block_id, None, lr_ops) + __append_optimize_op__( + glb_op, opt_state_block, grad_to_block_id, None, lr_ops + ) # process distributed lookup_table prefetch_var_name_to_block_id = [] if self.has_distributed_lookup_table: pserver_index = self.pserver_endpoints.index(endpoint) table_opt_block = self._create_table_optimize_block( - pserver_index, pserver_program, pre_block_idx, grad_to_block_id) + pserver_index, pserver_program, pre_block_idx, grad_to_block_id + ) optimize_blocks.append(table_opt_block) lookup_table_var_name_to_block_id = self._create_prefetch_block( - pserver_index, pserver_program, table_opt_block) + pserver_index, pserver_program, table_opt_block + ) checkpoint_block_id = self._create_checkpoint_save_block( - pserver_program, table_opt_block.idx) + pserver_program, table_opt_block.idx + ) pserver_program._distributed_lookup_table = self.table_name prefetch_var_name_to_block_id.extend( - lookup_table_var_name_to_block_id) + lookup_table_var_name_to_block_id + ) if len(optimize_blocks) == 0: - logging.warn("pserver [" + str(endpoint) + - "] has no optimize block!!") + logging.warn( + "pserver [" + str(endpoint) + "] has no optimize block!!" + ) pre_block_idx = pserver_program.num_blocks - 1 empty_block = pserver_program._create_block(pre_block_idx) optimize_blocks.append(empty_block) @@ -1420,8 +1516,7 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler "lr_decay_block_id": lr_decay_block_id, "rpc_get_thread_num": self.server_config._rpc_get_thread_num, "rpc_send_thread_num": self.server_config._rpc_send_thread_num, - "rpc_prefetch_thread_num": - self.server_config._rpc_prefetch_thread_num + "rpc_prefetch_thread_num": self.server_config._rpc_prefetch_thread_num, } if self.has_distributed_lookup_table: @@ -1431,13 +1526,16 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler if len(prefetch_var_name_to_block_id) > 0: attrs[ - 'prefetch_var_name_to_block_id'] = prefetch_var_name_to_block_id + 'prefetch_var_name_to_block_id' + ] = prefetch_var_name_to_block_id # step5 append the listen_and_serv op - pserver_program.global_block().append_op(type="listen_and_serv", - inputs={'X': recv_inputs}, - outputs={}, - attrs=attrs) + pserver_program.global_block().append_op( + type="listen_and_serv", + inputs={'X': recv_inputs}, + outputs={}, + attrs=attrs, + ) pserver_program._sync_with_cpp() # save pserver program to generate pserver side startup relatively. @@ -1471,14 +1569,14 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler pserver_program, pserver_startup_program = t.get_pserver_programs(current_endpoint) """ pserver_prog = self.get_pserver_program(endpoint) - pserver_startup = self.get_startup_program(endpoint, - pserver_program=pserver_prog) + pserver_startup = self.get_startup_program( + endpoint, pserver_program=pserver_prog + ) return pserver_prog, pserver_startup - def get_startup_program(self, - endpoint, - pserver_program=None, - startup_program=None): + def get_startup_program( + self, endpoint, pserver_program=None, startup_program=None + ): """ **Deprecated** @@ -1550,22 +1648,28 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler new_inputs = self._get_input_map_from_op(pserver_vars, op) if op.type in [ - "gaussian_random", "fill_constant", "uniform_random", - "truncated_gaussian_random" + "gaussian_random", + "fill_constant", + "uniform_random", + "truncated_gaussian_random", ]: op._set_attr("shape", list(new_outputs["Out"].shape)) - s_prog.global_block().append_op(type=op.type, - inputs=new_inputs, - outputs=new_outputs, - attrs=op.all_attrs()) + s_prog.global_block().append_op( + type=op.type, + inputs=new_inputs, + outputs=new_outputs, + attrs=op.all_attrs(), + ) if self.config.enable_dc_asgd: for p, p_bak in self.param_bak_list: startup_param_var = s_prog.global_block().vars[p.name] startup_tmpvar = s_prog.global_block().vars[p_bak.name] # copy init random value to param_bak - s_prog.global_block().append_op(type="assign", - inputs={"X": startup_param_var}, - outputs={"Out": startup_tmpvar}) + s_prog.global_block().append_op( + type="assign", + inputs={"X": startup_param_var}, + outputs={"Out": startup_tmpvar}, + ) return s_prog @@ -1588,8 +1692,9 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler orig_dim1_flatten = 1 if len(slice_vars[0].shape) >= 2: - orig_dim1_flatten = reduce(lambda x, y: x * y, - slice_vars[0].shape[1:]) + orig_dim1_flatten = reduce( + lambda x, y: x * y, slice_vars[0].shape[1:] + ) for slice_var in slice_vars[:block_idx]: skip_dim0 += slice_var.shape[0] @@ -1599,12 +1704,12 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler return is_slice, block_idx, offset def _get_distributed_optimizer_vars(self): - def _get_distributed_optimizer_var(endpoint): opt_op_on_pserver = [] for _, op in enumerate(self.optimize_ops): if self._is_optimizer_op(op) and self._is_opt_op_on_pserver( - endpoint, op): + endpoint, op + ): opt_op_on_pserver.append(op) for opt_op in opt_op_on_pserver: @@ -1613,20 +1718,25 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler if key == "Param": param_name = opt_op.input(key)[0] dist_var = self.vars_overview.get_distributed_var_by_origin_and_ep( - param_name, endpoint) + param_name, endpoint + ) break for key in opt_op.input_names: if key in [ - "Param", "Grad", "LearningRate", "Beta1Tensor", - "Beta2Tensor" + "Param", + "Grad", + "LearningRate", + "Beta1Tensor", + "Beta2Tensor", ]: continue origin_var = self.origin_program.global_block().vars[ - opt_op.input(key)[0]] + opt_op.input(key)[0] + ] # update accumulator variable shape new_shape = self._get_optimizer_input_shape( - opt_op.type, key, origin_var.shape, - dist_var.slice.shape) + opt_op.type, key, origin_var.shape, dist_var.slice.shape + ) if new_shape == dist_var.slice.shape: splited_var = VarStruct( @@ -1635,7 +1745,8 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler dtype=origin_var.dtype, type=origin_var.type, lod_level=origin_var.lod_level, - persistable=origin_var.persistable) + persistable=origin_var.persistable, + ) self.vars_overview.add_distributed_var( origin_var=origin_var, @@ -1644,7 +1755,8 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler block_id=dist_var.block_id, offset=dist_var.offset, vtype="Optimizer", - endpoint=endpoint) + endpoint=endpoint, + ) else: self.vars_overview.add_distributed_var( origin_var=origin_var, @@ -1653,13 +1765,15 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler block_id=0, offset=0, vtype="Optimizer", - endpoint=endpoint) + endpoint=endpoint, + ) for ep in self.pserver_endpoints: _get_distributed_optimizer_var(ep) - def _update_dist_lookup_table_vars(self, param_list, grad_list, - params_grads): + def _update_dist_lookup_table_vars( + self, param_list, grad_list, params_grads + ): # TODO(wuyi): put find a way to put dist lookup table stuff all together. # update self.table_param_grad and self.trainer_side_table_grad_list program = self.origin_program @@ -1668,22 +1782,25 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler param for param in param_list if param.name != self.table_name ] grad_list = [ - grad for grad in grad_list + grad + for grad in grad_list if grad.name != grad_var_name(self.table_name) ] self.table_param_grad = [ - param_grad for param_grad in params_grads + param_grad + for param_grad in params_grads if param_grad[0].name == self.table_name ][0] table_grad_var = self.table_param_grad[1] if self.sync_mode: self.trainer_side_table_grad_list = [ program.global_block().create_var( - name="%s.trainer_%d.pserver_%d" % - (table_grad_var.name, self.trainer_id, index), + name="%s.trainer_%d.pserver_%d" + % (table_grad_var.name, self.trainer_id, index), type=table_grad_var.type, shape=table_grad_var.shape, - dtype=table_grad_var.dtype) + dtype=table_grad_var.dtype, + ) for index in range(len(self.pserver_endpoints)) ] else: @@ -1692,7 +1809,8 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler name="%s.pserver_%d" % (table_grad_var.name, index), type=table_grad_var.type, shape=table_grad_var.shape, - dtype=table_grad_var.dtype) + dtype=table_grad_var.dtype, + ) for index in range(len(self.pserver_endpoints)) ] return param_list, grad_list @@ -1719,68 +1837,81 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler param_grad_set.add(g.name) param_list, grad_list = self._update_dist_lookup_table_vars( - param_list, grad_list, self.params_grads) + param_list, grad_list, self.params_grads + ) if self.config.slice_var_up: # when we slice var up into blocks, we will slice the var according to # pserver services' count. A pserver may have two or more listening ports. - grad_blocks = slice_variable(grad_list, len(self.pserver_endpoints), - self.config.min_block_size) - param_blocks = slice_variable(param_list, - len(self.pserver_endpoints), - self.config.min_block_size) + grad_blocks = slice_variable( + grad_list, + len(self.pserver_endpoints), + self.config.min_block_size, + ) + param_blocks = slice_variable( + param_list, + len(self.pserver_endpoints), + self.config.min_block_size, + ) else: # when we do NOT slice var up into blocks, we will always slice params # grads into one block. - grad_blocks = slice_variable(grad_list, 1, - self.config.min_block_size) - param_blocks = slice_variable(param_list, 1, - self.config.min_block_size) - assert (len(grad_blocks) == len(param_blocks)) + grad_blocks = slice_variable( + grad_list, 1, self.config.min_block_size + ) + param_blocks = slice_variable( + param_list, 1, self.config.min_block_size + ) + assert len(grad_blocks) == len(param_blocks) # origin_param_name -> [splited_param_vars] self.param_var_mapping = self._create_vars_from_blocklist( - self.origin_program, param_blocks) + self.origin_program, param_blocks + ) for orig_name, splited_vars in self.param_var_mapping.items(): orig_var = self.origin_program.global_block().var(orig_name) for splited_var in splited_vars: is_slice, block_id, offset = self._get_slice_var_info( - splited_var) - - self.vars_overview.add_distributed_var(origin_var=orig_var, - slice_var=splited_var, - block_id=block_id, - offset=offset, - is_slice=is_slice, - vtype="Param") + splited_var + ) + + self.vars_overview.add_distributed_var( + origin_var=orig_var, + slice_var=splited_var, + block_id=block_id, + offset=offset, + is_slice=is_slice, + vtype="Param", + ) # origin_grad_name -> [splited_grad_vars] self.grad_var_mapping = self._create_vars_from_blocklist( self.origin_program, grad_blocks, - add_trainer_suffix=self.trainer_num > 1) + add_trainer_suffix=self.trainer_num > 1, + ) # dict(grad_splited_var -> param_splited_var) self.grad_param_mapping = collections.OrderedDict() for g, p in zip(grad_blocks, param_blocks): g_name, g_bid, _ = g.split(":") p_name, p_bid, _ = p.split(":") - self.grad_param_mapping[self.grad_var_mapping[g_name][int(g_bid)]] = \ - self.param_var_mapping[p_name][int(p_bid)] + self.grad_param_mapping[ + self.grad_var_mapping[g_name][int(g_bid)] + ] = self.param_var_mapping[p_name][int(p_bid)] # create mapping of endpoint -> split var to create pserver side program self.param_grad_ep_mapping = collections.OrderedDict() [ - self.param_grad_ep_mapping.update({ep: { - "params": [], - "grads": [] - }}) for ep in self.pserver_endpoints + self.param_grad_ep_mapping.update({ep: {"params": [], "grads": []}}) + for ep in self.pserver_endpoints ] # transpiler function for dis lookup_table - def _replace_lookup_table_op_with_prefetch(self, program, - pserver_endpoints): + def _replace_lookup_table_op_with_prefetch( + self, program, pserver_endpoints + ): # 1. replace lookup_table_op with split_ids_op -> prefetch_op -> sum_op self.all_in_ids_vars = [] self.all_prefetch_input_vars = [] @@ -1793,16 +1924,22 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler continue_search_lookup_table_op = False all_ops = program.global_block().ops for op in all_ops: - if op.type == LOOKUP_TABLE_TYPE and self.table_name == op.input( - "W")[0]: + if ( + op.type == LOOKUP_TABLE_TYPE + and self.table_name == op.input("W")[0] + ): if not op.attr('is_distributed'): raise RuntimeError( "lookup_table_op that lookup an distributed embedding table" - "should set is_distributed to true") + "should set is_distributed to true" + ) continue_search_lookup_table_op = True - lookup_table_op_index = lookup_table_op_index if lookup_table_op_index != -1 else list( - all_ops).index(op) + lookup_table_op_index = ( + lookup_table_op_index + if lookup_table_op_index != -1 + else list(all_ops).index(op) + ) ids_name = op.input("Ids") out_name = op.output("Out") @@ -1822,14 +1959,16 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler name=str("prefetch_compress_in_tmp_" + str(index)), type=self.all_in_ids_vars[0].type, shape=self.all_in_ids_vars[0].shape, - dtype=self.all_in_ids_vars[0].dtype) + dtype=self.all_in_ids_vars[0].dtype, + ) self.all_prefetch_input_vars.append(in_var) out_var = program.global_block().create_var( name=str("prefetch_compress_out_tmp_" + str(index)), type=self.all_out_emb_vars[0].type, shape=self.all_out_emb_vars[0].shape, - dtype=self.all_out_emb_vars[0].dtype) + dtype=self.all_out_emb_vars[0].dtype, + ) self.all_prefetch_output_vars.append(out_var) # insert split_ids_op @@ -1837,7 +1976,8 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler index=lookup_table_op_index, type="split_ids", inputs={'Ids': self.all_in_ids_vars}, - outputs={"Out": self.all_prefetch_input_vars}) + outputs={"Out": self.all_prefetch_input_vars}, + ) # insert prefetch_op program.global_block()._insert_op( @@ -1850,7 +1990,8 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler # FIXME(qiao) temporarily disable this config because prefetch # is not act as other rpc op, it's more like a forward op # RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE - }) + }, + ) # insert concat_op program.global_block()._insert_op( @@ -1859,9 +2000,10 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler inputs={ 'Ids': self.all_in_ids_vars, 'Rows': self.all_prefetch_input_vars, - 'X': self.all_prefetch_output_vars + 'X': self.all_prefetch_output_vars, }, - outputs={"Out": self.all_out_emb_vars}) + outputs={"Out": self.all_out_emb_vars}, + ) def _split_table_grad_and_add_send_vars(self, program, pserver_endpoints): # 2. add split_ids_op and send_op to send gradient to pservers @@ -1880,32 +2022,34 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler 'Ids': [program.global_block().vars[table_grad_name]] }, outputs={"Out": self.trainer_side_table_grad_list}, - attrs={RPC_OP_ROLE_ATTR_NAME: DIST_OP_ROLE_ATTR_VALUE}) + attrs={RPC_OP_ROLE_ATTR_NAME: DIST_OP_ROLE_ATTR_VALUE}, + ) program.global_block()._insert_op( index=op_index + 2, type="send", inputs={'X': self.trainer_side_table_grad_list}, outputs={ - 'Out': - [self.grad_name_to_send_dummy_out[self.table_name]] - if self.sync_mode else [] + 'Out': [ + self.grad_name_to_send_dummy_out[self.table_name] + ] + if self.sync_mode + else [] }, attrs={ - "epmap": - pserver_endpoints, - "trainer_id": - self.trainer_id, - RPC_OP_ROLE_ATTR_NAME: - RPC_OP_ROLE_ATTR_VALUE, + "epmap": pserver_endpoints, + "trainer_id": self.trainer_id, + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE, OP_ROLE_VAR_ATTR_NAME: [ self.grad_name_to_param_name[table_grad_name], - table_grad_name - ] - }) + table_grad_name, + ], + }, + ) break - def _create_prefetch_block(self, pserver_index, pserver_program, - optimize_block): + def _create_prefetch_block( + self, pserver_index, pserver_program, optimize_block + ): # STEP: create prefetch block table_var = pserver_program.global_block().vars[self.table_name] prefetch_var_name_to_block_id = [] @@ -1915,46 +2059,53 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler name=trainer_ids.name, type=trainer_ids.type, shape=trainer_ids.shape, - dtype=trainer_ids.dtype) + dtype=trainer_ids.dtype, + ) trainer_out = self.all_prefetch_output_vars[pserver_index] pserver_out = pserver_program.global_block().create_var( name=trainer_out.name, type=trainer_out.type, shape=trainer_out.shape, - dtype=trainer_out.dtype) + dtype=trainer_out.dtype, + ) prefetch_block.append_op( type="lookup_sparse_table", - inputs={ - 'Ids': pserver_ids, - "W": table_var - }, + inputs={'Ids': pserver_ids, "W": table_var}, outputs={"Out": pserver_out}, attrs={ "is_sparse": True, # has no effect on lookup_table op "is_distributed": True, - "padding_idx": -1 - }) - prefetch_var_name_to_block_id.append(trainer_ids.name + ":" + - str(prefetch_block.idx)) + "padding_idx": -1, + }, + ) + prefetch_var_name_to_block_id.append( + trainer_ids.name + ":" + str(prefetch_block.idx) + ) return prefetch_var_name_to_block_id - def _create_table_optimize_block(self, pserver_index, pserver_program, - pre_block_idx, grad_to_block_id): + def _create_table_optimize_block( + self, pserver_index, pserver_program, pre_block_idx, grad_to_block_id + ): # STEP: create table optimize block table_opt_block = pserver_program._create_block(pre_block_idx) # create table param and grad var in pserver program # create table optimize block in pserver program table_opt_op = [ - op for op in self.optimize_ops if 'Param' in op.input_names + op + for op in self.optimize_ops + if 'Param' in op.input_names and op.input("Param")[0] == self.table_name ][0] origin_param_var = self.origin_program.global_block().vars[ - self.table_name] + self.table_name + ] zero_dim = int( - math.ceil(origin_param_var.shape[0] / - float(len(self.pserver_endpoints)))) + math.ceil( + origin_param_var.shape[0] / float(len(self.pserver_endpoints)) + ) + ) table_shape = list(origin_param_var.shape) table_shape[0] = zero_dim @@ -1963,28 +2114,34 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler shape=table_shape, dtype=origin_param_var.dtype, type=core.VarDesc.VarType.SELECTED_ROWS, - persistable=True) + persistable=True, + ) # parameter must be selected rows param_var.desc.set_type(core.VarDesc.VarType.SELECTED_ROWS) grad_var = pserver_program.global_block()._clone_variable( - self.origin_program.global_block().vars[grad_var_name( - self.table_name)]) + self.origin_program.global_block().vars[ + grad_var_name(self.table_name) + ] + ) lr_var = pserver_program.global_block()._clone_variable( - self.origin_program.global_block().vars[table_opt_op.input( - "LearningRate")[0]]) + self.origin_program.global_block().vars[ + table_opt_op.input("LearningRate")[0] + ] + ) if self.sync_mode: # create grad vars in pserver program table_grad_var = self.table_param_grad[1] pserver_side_table_grad_list = [ pserver_program.global_block().create_var( - name="%s.trainer_%d.pserver_%d" % - (table_grad_var.name, index, pserver_index), + name="%s.trainer_%d.pserver_%d" + % (table_grad_var.name, index, pserver_index), type=table_grad_var.type, shape=table_grad_var.shape, - dtype=table_grad_var.dtype) + dtype=table_grad_var.dtype, + ) for index in range(self.trainer_num) ] @@ -1993,28 +2150,36 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler type="sum", inputs={"X": pserver_side_table_grad_list}, outputs={"Out": [grad_var]}, - attrs={"use_mkldnn": False}) + attrs={"use_mkldnn": False}, + ) else: # in async_mode, for table gradient, it also need to be split to each parameter server origin_grad_name = grad_var.name splited_grad_name = self.trainer_side_table_grad_list[ - pserver_index].name + pserver_index + ].name if not splited_grad_name.startswith(origin_grad_name): - raise ValueError("origin_grad_var: " + splited_grad_name + - " grad_var:" + grad_var.name) + raise ValueError( + "origin_grad_var: " + + splited_grad_name + + " grad_var:" + + grad_var.name + ) grad_var = pserver_program.global_block()._rename_var( - origin_grad_name, splited_grad_name) + origin_grad_name, splited_grad_name + ) inputs = { "Param": [param_var], "Grad": [grad_var], - "LearningRate": [lr_var] + "LearningRate": [lr_var], } outputs = {"ParamOut": [param_var]} # only support sgd now logging.warn( "distribute lookup table only support sgd optimizer, change it's optimizer to sgd instead of " - + table_opt_op.type) + + table_opt_op.type + ) table_opt_block.append_op(type="sgd", inputs=inputs, outputs=outputs) # add table parameter gradient and it's block id to grad_to_block_id @@ -2027,23 +2192,26 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler create a new block to handle save checkpoint. """ - pserver_program.global_block().create_var(name="kLookupTablePath", - persistable=True, - type=core.VarDesc.VarType.RAW) + pserver_program.global_block().create_var( + name="kLookupTablePath", + persistable=True, + type=core.VarDesc.VarType.RAW, + ) checkpoint_save_block = pserver_program._create_block(pre_block_idx) # this 'file_path' do not be used in save lookup table variable - checkpoint_save_block.append_op(type='save', - inputs={'X': [self.table_name]}, - outputs={}, - attrs={'file_path': "none"}) + checkpoint_save_block.append_op( + type='save', + inputs={'X': [self.table_name]}, + outputs={}, + attrs={'file_path': "none"}, + ) return checkpoint_save_block.idx - def _create_vars_from_blocklist(self, - program, - block_list, - add_trainer_suffix=False): + def _create_vars_from_blocklist( + self, program, block_list, add_trainer_suffix=False + ): """ Create vars for each split. NOTE: only grads need to be named for different trainers, use @@ -2071,14 +2239,18 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler orig_var = program.global_block().var(varname) if len(split) == 1: if self.sync_mode and add_trainer_suffix: - new_var_name = "%s.trainer_%d" % \ - (orig_var.name, self.trainer_id) + new_var_name = "%s.trainer_%d" % ( + orig_var.name, + self.trainer_id, + ) program.global_block()._rename_var(varname, new_var_name) - var_mapping[varname] = \ - [program.global_block().var(new_var_name)] + var_mapping[varname] = [ + program.global_block().var(new_var_name) + ] else: - var_mapping[varname] = \ - [program.global_block().var(orig_var.name)] + var_mapping[varname] = [ + program.global_block().var(orig_var.name) + ] continue var_mapping[varname] = [] orig_shape = orig_var.shape @@ -2094,28 +2266,33 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler splited_shape.extend(orig_shape[1:]) new_var_name = "" if self.sync_mode and add_trainer_suffix: - new_var_name = "%s.block%d.trainer_%d" % \ - (varname, i, self.trainer_id) + new_var_name = "%s.block%d.trainer_%d" % ( + varname, + i, + self.trainer_id, + ) else: - new_var_name = "%s.block%d" % \ - (varname, i) + new_var_name = "%s.block%d" % (varname, i) var = program.global_block().create_var( name=new_var_name, persistable=False, dtype=orig_var.dtype, type=orig_var.type, - shape=splited_shape) # flattend split var + shape=splited_shape, + ) # flattend split var var_mapping[varname].append(var) program.global_block()._sync_with_cpp() return var_mapping def _clone_var(self, block, var, persistable=True): - return block.create_var(name=var.name, - shape=var.shape, - dtype=var.dtype, - type=var.type, - lod_level=var.lod_level, - persistable=persistable) + return block.create_var( + name=var.name, + shape=var.shape, + dtype=var.dtype, + type=var.type, + lod_level=var.lod_level, + persistable=persistable, + ) @staticmethod def _get_splited_var_sections(splited_vars): @@ -2131,34 +2308,37 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler sparse_param_name = self.grad_name_to_param_name[orig_var.name] if self._is_input_of_remote_sparse_update_op(sparse_param_name): self.sparse_param_to_height_sections[ - sparse_param_name] = height_sections - program.global_block()._insert_op(index=index + 1, - type="split_selected_rows", - inputs={"X": orig_var}, - outputs={"Out": splited_vars}, - attrs={ - "height_sections": - height_sections, - RPC_OP_ROLE_ATTR_NAME: - DIST_OP_ROLE_ATTR_VALUE - }) + sparse_param_name + ] = height_sections + program.global_block()._insert_op( + index=index + 1, + type="split_selected_rows", + inputs={"X": orig_var}, + outputs={"Out": splited_vars}, + attrs={ + "height_sections": height_sections, + RPC_OP_ROLE_ATTR_NAME: DIST_OP_ROLE_ATTR_VALUE, + }, + ) elif orig_var.type == core.VarDesc.VarType.LOD_TENSOR: - program.global_block()._insert_op(index=index + 1, - type="split_byref", - inputs={"X": orig_var}, - outputs={"Out": splited_vars}, - attrs={ - "sections": - height_sections, - RPC_OP_ROLE_ATTR_NAME: - DIST_OP_ROLE_ATTR_VALUE - }) + program.global_block()._insert_op( + index=index + 1, + type="split_byref", + inputs={"X": orig_var}, + outputs={"Out": splited_vars}, + attrs={ + "sections": height_sections, + RPC_OP_ROLE_ATTR_NAME: DIST_OP_ROLE_ATTR_VALUE, + }, + ) else: - AssertionError("Variable type should be in set " - "[LOD_TENSOR, SELECTED_ROWS]") + AssertionError( + "Variable type should be in set " "[LOD_TENSOR, SELECTED_ROWS]" + ) - def _get_optimizer_input_shape(self, op_type, varkey, orig_shape, - param_shape): + def _get_optimizer_input_shape( + self, op_type, varkey, orig_shape, param_shape + ): """ Returns the shape for optimizer inputs that need to be reshaped when Param and Grad is split to multiple servers. @@ -2190,8 +2370,8 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler pass else: raise ValueError( - "Not supported optimizer for distributed training: %s" % - op_type) + "Not supported optimizer for distributed training: %s" % op_type + ) return orig_shape def _get_varname_parts(self, varname): @@ -2201,30 +2381,36 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler block_part = "" trainer_idx = varname.find(".trainer_") if trainer_idx >= 0: - trainer_part = varname[trainer_idx + 1:] + trainer_part = varname[trainer_idx + 1 :] else: trainer_idx = len(varname) block_index = varname.find(".block") if block_index >= 0: - block_part = varname[block_index + 1:trainer_idx] + block_part = varname[block_index + 1 : trainer_idx] else: block_index = len(varname) - orig_var_name = varname[0:min(block_index, trainer_idx)] + orig_var_name = varname[0 : min(block_index, trainer_idx)] return orig_var_name, block_part, trainer_part def _orig_varname(self, varname): orig, _, _ = self._get_varname_parts(varname) return orig - def _append_pserver_grad_merge_ops(self, optimize_block, - grad_varname_for_block, endpoint, - grad_to_block_id, origin_program): + def _append_pserver_grad_merge_ops( + self, + optimize_block, + grad_varname_for_block, + endpoint, + grad_to_block_id, + origin_program, + ): program = optimize_block.program pserver_block = program.global_block() grad_block = None for g in self.param_grad_ep_mapping[endpoint]["grads"]: - if self._orig_varname(g.name) == \ - self._orig_varname(grad_varname_for_block): + if self._orig_varname(g.name) == self._orig_varname( + grad_varname_for_block + ): grad_block = g break if not grad_block: @@ -2232,7 +2418,8 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler # is not dealing with this grad block return None orig_varname, block_name, trainer_name = self._get_varname_parts( - grad_block.name) + grad_block.name + ) if block_name: merged_var_name = '.'.join([orig_varname, block_name]) else: @@ -2240,90 +2427,104 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler merged_var = pserver_block.vars[merged_var_name] grad_to_block_id.append(merged_var.name + ":" + str(optimize_block.idx)) - if self.sync_mode or self.config.completely_not_async and self.trainer_num > 1: + if ( + self.sync_mode + or self.config.completely_not_async + and self.trainer_num > 1 + ): vars2merge = [] for i in range(self.trainer_num): - per_trainer_name = "%s.trainer_%d" % \ - (merged_var_name, i) + per_trainer_name = "%s.trainer_%d" % (merged_var_name, i) vars2merge.append(pserver_block.vars[per_trainer_name]) - optimize_block.append_op(type="sum", - inputs={"X": vars2merge}, - outputs={"Out": merged_var}, - attrs={"use_mkldnn": False}) + optimize_block.append_op( + type="sum", + inputs={"X": vars2merge}, + outputs={"Out": merged_var}, + attrs={"use_mkldnn": False}, + ) optimize_block.append_op( type="scale", inputs={"X": merged_var}, outputs={"Out": merged_var}, - attrs={"scale": 1.0 / float(self.trainer_num)}) + attrs={"scale": 1.0 / float(self.trainer_num)}, + ) return merged_var def _append_dc_asgd_ops(self, block, param_var, grad_var): # NOTE: can not use grammar candy here, should put ops in specific block - local_param_bak = block.create_var(name="%s.local_bak" % param_var.name, - shape=param_var.shape, - type=param_var.type, - dtype=param_var.dtype, - persistable=False) + local_param_bak = block.create_var( + name="%s.local_bak" % param_var.name, + shape=param_var.shape, + type=param_var.type, + dtype=param_var.dtype, + persistable=False, + ) # trainer_id_var is block local - trainer_id_var = block.create_var(name="@TRAINER_ID@", - type=core.VarDesc.VarType.LOD_TENSOR, - dtype=core.VarDesc.VarType.INT64, - shape=[1], - persistable=False) + trainer_id_var = block.create_var( + name="@TRAINER_ID@", + type=core.VarDesc.VarType.LOD_TENSOR, + dtype=core.VarDesc.VarType.INT64, + shape=[1], + persistable=False, + ) # ref_inputs = [x[1] for x in self.param_bak_list] ref_inputs = [] for p, p_bak in self.param_bak_list: if p.name == param_var.name: ref_inputs.append(p_bak) - block.append_op(type="ref_by_trainer_id", - inputs={ - "X": ref_inputs, - "TrainerId": trainer_id_var - }, - outputs={"Out": local_param_bak}) + block.append_op( + type="ref_by_trainer_id", + inputs={"X": ref_inputs, "TrainerId": trainer_id_var}, + outputs={"Out": local_param_bak}, + ) def __create_temp_var__(): - return block.create_var(name=unique_name.generate("tmp_dc_output"), - shape=param_var.shape, - type=param_var.type, - dtype=param_var.dtype, - persistable=False) + return block.create_var( + name=unique_name.generate("tmp_dc_output"), + shape=param_var.shape, + type=param_var.type, + dtype=param_var.dtype, + persistable=False, + ) o1 = __create_temp_var__() - block.append_op(type="elementwise_sub", - inputs={ - "X": param_var, - "Y": local_param_bak - }, - outputs={"Out": o1}) + block.append_op( + type="elementwise_sub", + inputs={"X": param_var, "Y": local_param_bak}, + outputs={"Out": o1}, + ) o2 = __create_temp_var__() - block.append_op(type="elementwise_mul", - inputs={ - "X": o1, - "Y": grad_var - }, - outputs={"Out": o2}) + block.append_op( + type="elementwise_mul", + inputs={"X": o1, "Y": grad_var}, + outputs={"Out": o2}, + ) o3 = __create_temp_var__() - block.append_op(type="elementwise_mul", - inputs={ - "X": o2, - "Y": grad_var - }, - outputs={"Out": o3}) + block.append_op( + type="elementwise_mul", + inputs={"X": o2, "Y": grad_var}, + outputs={"Out": o3}, + ) # TODO(typhoonzero): append scale o4 = __create_temp_var__() - block.append_op(type="elementwise_add", - inputs={ - "X": grad_var, - "Y": o3 - }, - outputs={"Out": o4}) + block.append_op( + type="elementwise_add", + inputs={"X": grad_var, "Y": o3}, + outputs={"Out": o4}, + ) return o4 - def _append_pserver_ops(self, optimize_block, opt_op, endpoint, - grad_to_block_id, origin_program, merged_var, - sparse_grad_to_param): + def _append_pserver_ops( + self, + optimize_block, + opt_op, + endpoint, + grad_to_block_id, + origin_program, + merged_var, + sparse_grad_to_param, + ): program = optimize_block.program pserver_block = program.global_block() new_inputs = collections.OrderedDict() @@ -2349,9 +2550,10 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler # Note!! This is for l2decay on sparse gradient, because it will create a new tensor for # decayed gradient but not inplace modify the origin one origin_grad_name = opt_op.input(key)[0] - if core.kNewGradSuffix( - ) in origin_grad_name and pserver_block.has_var( - origin_grad_name): + if ( + core.kNewGradSuffix() in origin_grad_name + and pserver_block.has_var(origin_grad_name) + ): new_grad = pserver_block.var(origin_grad_name) new_inputs[key] = new_grad else: @@ -2360,10 +2562,12 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler param_block = _get_param_block(opt_op) if not param_block: return - tmpvar = pserver_block.create_var(name=param_block.name, - persistable=True, - dtype=param_block.dtype, - shape=param_block.shape) + tmpvar = pserver_block.create_var( + name=param_block.name, + persistable=True, + dtype=param_block.dtype, + shape=param_block.shape, + ) new_inputs[key] = tmpvar elif key == "LearningRate": # learning rate variable has already be created by non-optimize op, @@ -2377,42 +2581,53 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler name=origin_var.name, persistable=origin_var.persistable, dtype=origin_var.dtype, - shape=origin_var.shape) + shape=origin_var.shape, + ) new_inputs[key] = tmpvar for key in opt_op.input_names: new_shape = None if key in [ - "Param", "Grad", "LearningRate", "Beta1Tensor", - "Beta2Tensor" + "Param", + "Grad", + "LearningRate", + "Beta1Tensor", + "Beta2Tensor", ]: continue var = self.origin_program.global_block().vars[opt_op.input(key)[0]] param_var = new_inputs["Param"] # update accumulator variable shape - new_shape = self._get_optimizer_input_shape(opt_op.type, key, - var.shape, - param_var.shape) - tmpvar = pserver_block.create_var(name=var.name, - persistable=var.persistable, - dtype=var.dtype, - shape=new_shape) + new_shape = self._get_optimizer_input_shape( + opt_op.type, key, var.shape, param_var.shape + ) + tmpvar = pserver_block.create_var( + name=var.name, + persistable=var.persistable, + dtype=var.dtype, + shape=new_shape, + ) new_inputs[key] = tmpvar # change output's ParamOut variable outputs = self._get_output_map_from_op( - self.origin_program.global_block().vars, opt_op) + self.origin_program.global_block().vars, opt_op + ) outputs["ParamOut"] = new_inputs["Param"] - optimize_block.append_op(type=opt_op.type, - inputs=new_inputs, - outputs=outputs, - attrs=opt_op.all_attrs()) + optimize_block.append_op( + type=opt_op.type, + inputs=new_inputs, + outputs=outputs, + attrs=opt_op.all_attrs(), + ) # record sparse grad to param name if new_inputs["Grad"].type == core.VarDesc.VarType.SELECTED_ROWS: sparse_grad_to_param.append( - str(new_inputs["Grad"].name) + ":" + - str(new_inputs["Param"].name)) + str(new_inputs["Grad"].name) + + ":" + + str(new_inputs["Param"].name) + ) def _get_pserver_grad_param_var(self, var, var_dict): """ @@ -2431,15 +2646,20 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler # skip per trainer vars if g.name.find(".trainer_") == -1: # only param or grads have split blocks - if self._orig_varname(g.name) in self.grad_name_to_param_name or \ - self._orig_varname(g.name) in self.param_name_to_grad_name: + if ( + self._orig_varname(g.name) + in self.grad_name_to_param_name + or self._orig_varname(g.name) + in self.param_name_to_grad_name + ): grad_block = g break return grad_block def _clone_lr_op(self, program, block, op): inputs = self._get_input_map_from_op( - self.origin_program.global_block().vars, op) + self.origin_program.global_block().vars, op + ) for key, varlist in inputs.items(): if not isinstance(varlist, list): varlist = [varlist] @@ -2448,7 +2668,8 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler block._clone_variable(var) outputs = self._get_output_map_from_op( - self.origin_program.global_block().vars, op) + self.origin_program.global_block().vars, op + ) for key, varlist in outputs.items(): if not isinstance(varlist, list): varlist = [varlist] @@ -2456,16 +2677,16 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler if var not in program.global_block().vars: block._clone_variable(var) - return block.append_op(type=op.type, - inputs=inputs, - outputs=outputs, - attrs=op.all_attrs()) + return block.append_op( + type=op.type, inputs=inputs, outputs=outputs, attrs=op.all_attrs() + ) def _append_pserver_non_opt_ops(self, optimize_block, opt_op): program = optimize_block.program # Append the ops for parameters that do not need to be optimized/updated inputs = self._get_input_map_from_op( - self.origin_program.global_block().vars, opt_op) + self.origin_program.global_block().vars, opt_op + ) for key, varlist in inputs.items(): if not isinstance(varlist, list): varlist = [varlist] @@ -2474,8 +2695,8 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler # for ops like clipping and weight decay, get the split var (xxx.block0) # for inputs/outputs grad_block = self._get_pserver_grad_param_var( - var, - program.global_block().vars) + var, program.global_block().vars + ) if grad_block: varlist[i] = grad_block elif var.name not in program.global_block().vars: @@ -2486,15 +2707,16 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler inputs[key] = varlist outputs = self._get_output_map_from_op( - self.origin_program.global_block().vars, opt_op) + self.origin_program.global_block().vars, opt_op + ) for key, varlist in outputs.items(): if not isinstance(varlist, list): varlist = [varlist] for i in range(len(varlist)): var = varlist[i] grad_block = self._get_pserver_grad_param_var( - var, - program.global_block().vars) + var, program.global_block().vars + ) if grad_block: varlist[i] = grad_block elif var.name not in program.global_block().vars: @@ -2504,17 +2726,20 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler varlist[i] = program.global_block().vars[var.name] outputs[key] = varlist - return optimize_block.append_op(type=opt_op.type, - inputs=inputs, - outputs=outputs, - attrs=opt_op.all_attrs()) + return optimize_block.append_op( + type=opt_op.type, + inputs=inputs, + outputs=outputs, + attrs=opt_op.all_attrs(), + ) def _is_op_connected(self, op1, op2): # If one op's input is another op's output or # one op's output is another op's input, we say # the two operator is connected. - if set(op1.desc.output_arg_names()) & set(op2.desc.input_arg_names()) or \ - set(op1.desc.input_arg_names()) & set(op2.desc.output_arg_names()): + if set(op1.desc.output_arg_names()) & set( + op2.desc.input_arg_names() + ) or set(op1.desc.input_arg_names()) & set(op2.desc.output_arg_names()): return True return False @@ -2530,8 +2755,7 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler return ufind def _is_optimizer_op(self, op): - if "Param" in op.input_names and \ - "LearningRate" in op.input_names: + if "Param" in op.input_names and "LearningRate" in op.input_names: return True return False @@ -2579,14 +2803,16 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler block = self.origin_program.global_block() for index, op in enumerate(block.ops): role_id = int(op.attr(RPC_OP_ROLE_ATTR_NAME)) - if role_id == int(LR_SCHED_OP_ROLE_ATTR_VALUE) or \ - role_id == int(LR_SCHED_OP_ROLE_ATTR_VALUE) | \ - int(OPT_OP_ROLE_ATTR_VALUE): + if role_id == int(LR_SCHED_OP_ROLE_ATTR_VALUE) or role_id == int( + LR_SCHED_OP_ROLE_ATTR_VALUE + ) | int(OPT_OP_ROLE_ATTR_VALUE): if self.sync_mode == False and op.type == 'increment': inputs = self._get_input_map_from_op( - self.origin_program.global_block().vars, op) + self.origin_program.global_block().vars, op + ) outputs = self._get_output_map_from_op( - self.origin_program.global_block().vars, op) + self.origin_program.global_block().vars, op + ) for key in outputs: counter_var = outputs[key] all_trainer_counter_inputs = [ @@ -2595,21 +2821,29 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler type=counter_var.type, shape=counter_var.shape, dtype=counter_var.dtype, - persistable=counter_var.persistable) + persistable=counter_var.persistable, + ) for id_ in range(self.trainer_num) ] for i, op in enumerate( - self.startup_program.global_block().ops): + self.startup_program.global_block().ops + ): if op.type == 'fill_constant': for key in op.output_names: - if len(op.output(key)) == 1 and op.output( - key)[0] == counter_var.name: - self.startup_program.global_block( - ).ops[i]._set_attr( - 'value', float(0.0 - self.trainer_num)) + if ( + len(op.output(key)) == 1 + and op.output(key)[0] == counter_var.name + ): + self.startup_program.global_block().ops[ + i + ]._set_attr( + 'value', float(0.0 - self.trainer_num) + ) for var in all_trainer_counter_inputs: - if var.name == "%s.trainer_%d" % (counter_var.name, - self.trainer_id): + if var.name == "%s.trainer_%d" % ( + counter_var.name, + self.trainer_id, + ): self.counter_var = var self.startup_program.global_block().create_var( name=var.name, @@ -2617,8 +2851,10 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler dtype=var.dtype, shape=var.shape, persistable=var.persistable, - initializer=initializer.Constant(1)) - op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName( + initializer=initializer.Constant(1), + ) + op_role_attr_name = ( + core.op_proto_and_checker_maker.kOpRoleAttrName() ) block._remove_op(index) op = block._insert_op( @@ -2626,7 +2862,8 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler type='sum', inputs={'X': all_trainer_counter_inputs}, outputs=outputs, - attrs={op_role_attr_name: LR_SCHED_OP_ROLE_ATTR_VALUE}) + attrs={op_role_attr_name: LR_SCHED_OP_ROLE_ATTR_VALUE}, + ) lr_ops.append(op) log("append lr op: ", op.type) return lr_ops @@ -2652,8 +2889,12 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler for op2 in block.ops: # NOTE: we need to skip all optimize ops, since it is connected # with forward/backward ops and lr ops, we only need the lr ops. - if op1 != op2 and self._is_op_connected(op1, op2) and \ - not self._is_optimizer_op(op1) and not self._is_optimizer_op(op2): + if ( + op1 != op2 + and self._is_op_connected(op1, op2) + and not self._is_optimizer_op(op1) + and not self._is_optimizer_op(op2) + ): ufind.union(op1, op2) # find all ops which is related with lr var for op1 in block.ops: @@ -2669,8 +2910,9 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler # optimize op_maker = core.op_proto_and_checker_maker optimize_role = core.op_proto_and_checker_maker.OpRole.Optimize - if op_maker.kOpRoleAttrName() in op.attr_names and \ - int(op.all_attrs()[op_maker.kOpRoleAttrName()]) == int(optimize_role): + if op_maker.kOpRoleAttrName() in op.attr_names and int( + op.all_attrs()[op_maker.kOpRoleAttrName()] + ) == int(optimize_role): return True return False @@ -2691,13 +2933,16 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler if self._is_opt_role_op(op): # Todo(chengmo): Whether clip related op belongs to Optimize guard should be discussed # delete clip op from opt_ops when run in Parameter Server mode - if OP_NAME_SCOPE in op.all_attrs( - ) and CLIP_OP_NAME_SCOPE in op.attr( - OP_NAME_SCOPE - ) and self.config.mode != "nccl2" and self.config.mode != "collective": + if ( + OP_NAME_SCOPE in op.all_attrs() + and CLIP_OP_NAME_SCOPE in op.attr(OP_NAME_SCOPE) + and self.config.mode != "nccl2" + and self.config.mode != "collective" + ): op._set_attr( "op_role", - int(core.op_proto_and_checker_maker.OpRole.Backward)) + int(core.op_proto_and_checker_maker.OpRole.Backward), + ) continue opt_ops.append(op) if op.attr(OP_ROLE_VAR_ATTR_NAME): @@ -2706,10 +2951,12 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler if not param_name in optimize_params: optimize_params.add(param_name) log("adding param_grad pair: ", param_name, grad_name) - params_grads.append([ - origin_var_dict[param_name], - origin_var_dict[grad_name] - ]) + params_grads.append( + [ + origin_var_dict[param_name], + origin_var_dict[grad_name], + ] + ) else: pass diff --git a/python/paddle/fluid/transpiler/geo_sgd_transpiler.py b/python/paddle/fluid/transpiler/geo_sgd_transpiler.py index fe018f611a376f1d06fea49a4c0f944fc9bace00..0163d591cb434b9b720b5da769ed8f5e495f1284 100644 --- a/python/paddle/fluid/transpiler/geo_sgd_transpiler.py +++ b/python/paddle/fluid/transpiler/geo_sgd_transpiler.py @@ -30,21 +30,32 @@ import numpy as np from .ps_dispatcher import RoundRobin, PSDispatcher from .. import core, framework -from ..framework import Program, default_main_program, \ - default_startup_program, Block, Parameter +from ..framework import ( + Program, + default_main_program, + default_startup_program, + Block, + Parameter, +) from .details import wait_server_ready, VarsDistributed from .details import delete_ops from ..distribute_lookup_table import find_distributed_lookup_table -from .distribute_transpiler import DistributeTranspiler, DistributeTranspilerConfig, slice_variable, same_or_split_var, ServerRuntimeConfig +from .distribute_transpiler import ( + DistributeTranspiler, + DistributeTranspilerConfig, + slice_variable, + same_or_split_var, + ServerRuntimeConfig, +) from paddle.fluid.incubate.fleet.parameter_server.mode import DistributedMode -RPC_OP_ROLE_ATTR_NAME = op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName( -) +RPC_OP_ROLE_ATTR_NAME = ( + op_role_attr_name +) = core.op_proto_and_checker_maker.kOpRoleAttrName() RPC_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.RPC class GeoSgdTranspiler(DistributeTranspiler): - def __init__(self, config=None): if config is not None: self.config = config @@ -55,17 +66,19 @@ class GeoSgdTranspiler(DistributeTranspiler): if self.config.split_method is None: self.config.split_method = RoundRobin - assert (self.config.min_block_size >= 8192) - assert (self.config.split_method.__bases__[0] == PSDispatcher) - - def transpile(self, - trainer_id, - program=None, - pservers="127.0.0.1:6174", - trainers=1, - sync_mode=False, - startup_program=None, - current_endpoint="127.0.0.1:6174"): + assert self.config.min_block_size >= 8192 + assert self.config.split_method.__bases__[0] == PSDispatcher + + def transpile( + self, + trainer_id, + program=None, + pservers="127.0.0.1:6174", + trainers=1, + sync_mode=False, + startup_program=None, + current_endpoint="127.0.0.1:6174", + ): if program is None: program = default_main_program() if startup_program is None: @@ -92,7 +105,9 @@ class GeoSgdTranspiler(DistributeTranspiler): # distribute lookup table self.table_name = find_distributed_lookup_table(self.origin_program) self.has_distributed_lookup_table = self.table_name != None - self.origin_program._distributed_lookup_table = self.table_name if self.table_name else None + self.origin_program._distributed_lookup_table = ( + self.table_name if self.table_name else None + ) # add distributed attrs to program self.origin_program._is_distributed = True @@ -127,7 +142,8 @@ class GeoSgdTranspiler(DistributeTranspiler): for i, ep in enumerate(eplist): self.param_opt_ep_mapping[ep]["params"].append(recv_vars[i]) distributed_var = self.vars_overview.get_distributed_var_by_slice( - recv_vars[i].name) + recv_vars[i].name + ) distributed_var.endpoint = ep origin_name = self.split_to_origin_mapping[recv_vars[i].name] self.vars_info[origin_name]["epmap"].append(ep) @@ -142,11 +158,14 @@ class GeoSgdTranspiler(DistributeTranspiler): if op.type == "lookup_table": op._set_attr('remote_prefetch', False) for input_var_name, sparse_var_name in zip( - op.input("Ids"), op.input("W")): + op.input("Ids"), op.input("W") + ): if sparse_var_name in self.sparse_var_list: if input_var_name in unique_sparse_var: - if unique_sparse_var[ - input_var_name] == sparse_var_name: + if ( + unique_sparse_var[input_var_name] + == sparse_var_name + ): continue input_var = program.global_block().var(input_var_name) self.sparse_var.append(input_var) @@ -155,32 +174,39 @@ class GeoSgdTranspiler(DistributeTranspiler): # batch training loop end flag dummy_output = program.global_block().create_var( - name=framework.generate_control_dev_var_name()) + name=framework.generate_control_dev_var_name() + ) program.global_block().append_op( type="send", inputs={"X": self.sparse_var}, outputs={"Out": dummy_output}, - attrs={"send_varnames": self.sparse_tables}) + attrs={"send_varnames": self.sparse_tables}, + ) # add param_init flag in trainer startup program self.trainer_startup_program = self._get_trainer_startup_program( - recv_vars=recv_vars, eplist=eplist) + recv_vars=recv_vars, eplist=eplist + ) for delta_var in self.delta_vars_list: self.trainer_startup_program.global_block().create_var( name=delta_var.name, persistable=delta_var.persistable, dtype=delta_var.dtype, type=delta_var.type, - shape=delta_var.shape) + shape=delta_var.shape, + ) dummy_output = self.trainer_startup_program.global_block().create_var( - name=framework.generate_control_dev_var_name()) + name=framework.generate_control_dev_var_name() + ) param_init = self.trainer_startup_program.global_block().create_var( - name="param_init") + name="param_init" + ) self.trainer_startup_program.global_block().append_op( type="send", inputs={"X": [param_init]}, outputs={"Out": dummy_output}, - attrs={"send_varnames": [param_init.name]}) + attrs={"send_varnames": [param_init.name]}, + ) def _get_vars_info(self): return self.vars_info @@ -193,8 +219,9 @@ class GeoSgdTranspiler(DistributeTranspiler): def get_pserver_programs(self, endpoint): pserver_prog = self.get_pserver_program(endpoint) self.param_grad_ep_mapping = self.param_opt_ep_mapping - pserver_startup = self.get_startup_program(endpoint, - pserver_program=pserver_prog) + pserver_startup = self.get_startup_program( + endpoint, pserver_program=pserver_prog + ) return pserver_prog, pserver_startup def get_pserver_program(self, endpoint): @@ -224,21 +251,27 @@ class GeoSgdTranspiler(DistributeTranspiler): delta_var_name = "%s.delta" % (param.name) if var.name in self.sparse_var_splited_list: delta_type = core.VarDesc.VarType.SELECTED_ROWS - sparse_grad_to_param.append(":".join( - [delta_var_name, param.name])) + sparse_grad_to_param.append( + ":".join([delta_var_name, param.name]) + ) else: delta_type = param.type - delta_var = pserver_block.create_var(name=delta_var_name, - persistable=False, - type=delta_type, - dtype=param.dtype, - shape=param.shape) - - per_opt_block.append_op(type="sum", - inputs={"X": [param, delta_var]}, - outputs={"Out": param}) - param_to_block_id.append(delta_var_name + ":" + - str(per_opt_block.idx)) + delta_var = pserver_block.create_var( + name=delta_var_name, + persistable=False, + type=delta_type, + dtype=param.dtype, + shape=param.shape, + ) + + per_opt_block.append_op( + type="sum", + inputs={"X": [param, delta_var]}, + outputs={"Out": param}, + ) + param_to_block_id.append( + delta_var_name + ":" + str(per_opt_block.idx) + ) attrs = { "optimize_blocks": optimize_block, @@ -249,15 +282,16 @@ class GeoSgdTranspiler(DistributeTranspiler): "sparse_grad_to_param": sparse_grad_to_param, "rpc_get_thread_num": self.server_config._rpc_get_thread_num, "rpc_send_thread_num": self.server_config._rpc_send_thread_num, - "rpc_prefetch_thread_num": - self.server_config._rpc_prefetch_thread_num + "rpc_prefetch_thread_num": self.server_config._rpc_prefetch_thread_num, } # step5 append the listen_and_serv op - pserver_program.global_block().append_op(type="listen_and_serv", - inputs={'X': recv_inputs}, - outputs={}, - attrs=attrs) + pserver_program.global_block().append_op( + type="listen_and_serv", + inputs={'X': recv_inputs}, + outputs={}, + attrs=attrs, + ) pserver_program._sync_with_cpp() # save pserver program to generate pserver side startup relatively. @@ -284,21 +318,28 @@ class GeoSgdTranspiler(DistributeTranspiler): # step 2. Slice vars into numbers of piece with block_size # when we slice var up into blocks, we will slice the var according to # pserver services' count. A pserver may have two or more listening ports. - param_blocks = slice_variable(param_list, len(self.pserver_endpoints), - self.config.min_block_size) + param_blocks = slice_variable( + param_list, len(self.pserver_endpoints), self.config.min_block_size + ) # step 3. Create split param from split blocks # origin_param_name -> [splited_param_vars] # Todo: update _create_vars_from_blocklist self.param_var_mapping = self._create_vars_from_blocklist( - self.origin_program, param_blocks) + self.origin_program, param_blocks + ) # step 4. Create mapping of endpoint -> split var to create pserver side program self.param_opt_ep_mapping = collections.OrderedDict() [ - self.param_opt_ep_mapping.update({ep: { - "params": [], - }}) for ep in self.pserver_endpoints + self.param_opt_ep_mapping.update( + { + ep: { + "params": [], + } + } + ) + for ep in self.pserver_endpoints ] # step 5. Create delta var of Geo-Sgd & record vars information @@ -325,28 +366,34 @@ class GeoSgdTranspiler(DistributeTranspiler): persistable=False, dtype=origin_var.dtype, type=delta_type, - shape=origin_var.shape) + shape=origin_var.shape, + ) self.delta_vars_list.append(delta_var) for splited_var in splited_vars: is_slice, block_id, offset = self._get_slice_var_info( - splited_var) - self.vars_overview.add_distributed_var(origin_var=origin_var, - slice_var=splited_var, - block_id=block_id, - offset=offset, - is_slice=is_slice, - vtype="Param") + splited_var + ) + self.vars_overview.add_distributed_var( + origin_var=origin_var, + slice_var=splited_var, + block_id=block_id, + offset=offset, + is_slice=is_slice, + vtype="Param", + ) self.split_to_origin_mapping[splited_var.name] = origin_name if origin_name in self.sparse_var_list: self.sparse_var_splited_list.append(splited_var.name) self.vars_info[origin_name]["var_names"].append( - splited_var.name) + splited_var.name + ) if len(splited_vars) != 1: self.origin_program.global_block().create_var( name=".".join([splited_var.name, "delta"]), persistable=False, dtype=splited_var.dtype, type=delta_type, - shape=splited_var.shape) + shape=splited_var.shape, + ) diff --git a/python/paddle/fluid/transpiler/memory_optimization_transpiler.py b/python/paddle/fluid/transpiler/memory_optimization_transpiler.py index 0aa4a9d8f5420ee59aa2f1c51bf2804ddd3963db..c71197fa0ad8160f60f7dfaffa6d15c75546ce45 100755 --- a/python/paddle/fluid/transpiler/memory_optimization_transpiler.py +++ b/python/paddle/fluid/transpiler/memory_optimization_transpiler.py @@ -15,13 +15,11 @@ import logging -def memory_optimize(input_program, - skip_opt_set=None, - print_log=False, - level=0, - skip_grads=True): +def memory_optimize( + input_program, skip_opt_set=None, print_log=False, level=0, skip_grads=True +): """ - :api_attr: Static Graph + :api_attr: Static Graph This API is deprecated since 1.6. Please do not use it. The better memory optimization strategies are enabled by default. @@ -38,15 +36,18 @@ def memory_optimize(input_program, 'default value).\n' ' 2. Inplace strategy, which is enabled by setting ' 'build_strategy.enable_inplace=True (True is the default value) ' - 'when using CompiledProgram or ParallelExecutor.\n') + 'when using CompiledProgram or ParallelExecutor.\n' + ) def release_memory(input_program, skip_opt_set=None): """ - :api_attr: Static Graph + :api_attr: Static Graph This API is deprecated since 1.6. Please do not use it. The better memory optimization strategies are enabled by default. """ - logging.warn('paddle.fluid.release_memory() is deprecated, it would not' - ' take any memory release on your program') + logging.warn( + 'paddle.fluid.release_memory() is deprecated, it would not' + ' take any memory release on your program' + ) diff --git a/python/paddle/fluid/transpiler/ps_dispatcher.py b/python/paddle/fluid/transpiler/ps_dispatcher.py index 0bf7442fdea0677707fef8e416d661bf87f469a9..6271f8152c19713539b26f629842c1d97834bd88 100644 --- a/python/paddle/fluid/transpiler/ps_dispatcher.py +++ b/python/paddle/fluid/transpiler/ps_dispatcher.py @@ -46,7 +46,7 @@ class PSDispatcher(object): class HashName(PSDispatcher): """ - :api_attr: Static Graph + :api_attr: Static Graph Hash variable names to several endpoints using python "hash()" function. @@ -88,7 +88,7 @@ class HashName(PSDispatcher): class RoundRobin(PSDispatcher): """ - :api_attr: Static Graph + :api_attr: Static Graph Distribute variables to several endpoints using RondRobin method. diff --git a/python/paddle/fluid/unique_name.py b/python/paddle/fluid/unique_name.py index 39270ec2dfecab51bccbc0a57413f29efacf9c3b..1c0c28dae8bda2e6e1c8d0d5336c7f3c820f7e45 100644 --- a/python/paddle/fluid/unique_name.py +++ b/python/paddle/fluid/unique_name.py @@ -121,6 +121,7 @@ def generate(key): # in order to keep name consistency. def generate_with_ignorable_key(key): from .framework import _non_static_mode, _dygraph_tracer + if _non_static_mode(): return _dygraph_tracer()._generate_unique_name() diff --git a/python/paddle/fluid/variable_index.py b/python/paddle/fluid/variable_index.py index 3d67cd7e2302aebea4789ee5483a7fe3cc2ef35a..f3c7fff38fa6dc98ec302a95ba1b84b193f82c7c 100644 --- a/python/paddle/fluid/variable_index.py +++ b/python/paddle/fluid/variable_index.py @@ -22,7 +22,6 @@ MAX_INTEGER = 2**31 - 1 def is_list_tuple(index, contain_type): - def _is_list_tuple(item): if not (isinstance(item, (list, tuple)) or type(item) == contain_type): return False @@ -58,14 +57,13 @@ def get_list_index_shape(var_dims, index_dims): out_dims_shape = [1] * out_dims_size - out_dims_shape[:index_dims_size - 1] = index_dims[1:] + out_dims_shape[: index_dims_size - 1] = index_dims[1:] - out_dims_shape[index_dims_size - 1:] = var_dims[index_dims[0]:] + out_dims_shape[index_dims_size - 1 :] = var_dims[index_dims[0] :] return out_dims_shape class SliceInfo: - def __init__(self): self.pre_shape = None self.indexes = [] @@ -73,7 +71,8 @@ class SliceInfo: def update(self, index): if is_list_tuple(index, int) or isinstance( - index, (paddle.fluid.Variable, np.ndarray)): + index, (paddle.fluid.Variable, np.ndarray) + ): # convert index to Tensor if not isinstance(index, paddle.fluid.Variable): index = paddle.assign(index) @@ -83,8 +82,10 @@ class SliceInfo: else: if index.dtype != self.dtype: raise IndexError( - "Data type of Tensor/List index should be same. The current data type is {}, but the previous data type is {}." - .format(index.dtype, self.dtype)) + "Data type of Tensor/List index should be same. The current data type is {}, but the previous data type is {}.".format( + index.dtype, self.dtype + ) + ) self.indexes.append(index) @@ -93,16 +94,20 @@ class SliceInfo: else: if self.pre_shape != index.shape: # broadcast - cur_shape = paddle.broadcast_shape(self.pre_shape, - index.shape) + cur_shape = paddle.broadcast_shape( + self.pre_shape, index.shape + ) for i in range(len(self.indexes)): self.indexes[i] = paddle.broadcast_to( - self.indexes[i], cur_shape) + self.indexes[i], cur_shape + ) self.pre_shape = self.indexes[-1].shape else: raise ValueError( - "Index should be list/tuple of int or Tensor, but received {}.". - format(index)) + "Index should be list/tuple of int or Tensor, but received {}.".format( + index + ) + ) def shape_stride(self, shape): s = [1] * len(shape) @@ -119,19 +124,22 @@ class SliceInfo: if not isinstance(index, paddle.fluid.Variable): raise ValueError( "only support list/tensor index, but received {}.".format( - type(index))) + type(index) + ) + ) if len(self.indexes) <= len(tensor_shape) or len(self.indexes) == 1: shape = paddle.stack(self.indexes) - axes = list(range(1, - len(self.pre_shape) + 1)) + [ - 0, - ] + axes = list(range(1, len(self.pre_shape) + 1)) + [ + 0, + ] else: raise ValueError( - "too many indices for tensor: tensor is {}-dimensional, but {} were indexed" - .format(len(tensor_shape), self.pre_shape[0])) + "too many indices for tensor: tensor is {}-dimensional, but {} were indexed".format( + len(tensor_shape), self.pre_shape[0] + ) + ) shape_transpose = paddle.transpose(shape, axes) return shape_transpose @@ -148,7 +156,8 @@ class SliceInfo: tensor_type = None if tensor_origin.dtype in [ - core.VarDesc.VarType.FP32, core.VarDesc.VarType.FP64 + core.VarDesc.VarType.FP32, + core.VarDesc.VarType.FP64, ]: tensor = tensor_origin else: @@ -161,36 +170,47 @@ class SliceInfo: shape_transpose = self.get_offset_stride(tensor_origin.shape) index = paddle.assign(shape_transpose) - gather_tensor_shape = get_list_index_shape(tensor.shape, [ - len(self.indexes), - ] + list(self.indexes[-1].shape)) + gather_tensor_shape = get_list_index_shape( + tensor.shape, + [ + len(self.indexes), + ] + + list(self.indexes[-1].shape), + ) value_dims_bd = [ 1, ] * len(gather_tensor_shape) - value_dims_bd[-len(value.shape):] = list(value.shape) + value_dims_bd[-len(value.shape) :] = list(value.shape) for i in range(len(gather_tensor_shape)): - if not (value_dims_bd[i] == gather_tensor_shape[i] - or value_dims_bd[i] == 1): - raise ValueError("{} can not broadcast into {}".format( - value.shape, gather_tensor_shape)) + if not ( + value_dims_bd[i] == gather_tensor_shape[i] + or value_dims_bd[i] == 1 + ): + raise ValueError( + "{} can not broadcast into {}".format( + value.shape, gather_tensor_shape + ) + ) value_broadcast = paddle.broadcast_to(value, gather_tensor_shape) value_1d = value_broadcast.reshape( - [-1] + gather_tensor_shape[len(index.shape) - 1:]) + [-1] + gather_tensor_shape[len(index.shape) - 1 :] + ) index_1d = index.reshape([-1, index.shape[-1]]) tensor_stride = paddle.assign( - self.shape_stride(tensor.shape[:index.shape[-1]])) + self.shape_stride(tensor.shape[: index.shape[-1]]) + ) inds = [] for i in range(index_1d.shape[0]): temp = (index_1d[i] * tensor_stride).sum() inds.append(temp) index_1d = paddle.stack(inds).reshape([-1]) - t_reshape = tensor.reshape([-1] + list(tensor.shape[index.shape[-1]:])) + t_reshape = tensor.reshape([-1] + list(tensor.shape[index.shape[-1] :])) out = paddle.scatter(t_reshape, index_1d, value_1d) if tensor_type is not None: out = out.astype(tensor_type) @@ -201,6 +221,7 @@ class SliceInfo: def replace_ellipsis(var, item): from .framework import Variable + # Use slice(None) to replace Ellipsis. # For var, var.shape = [3,4,5,6] # @@ -212,7 +233,8 @@ def replace_ellipsis(var, item): # Remove Variable to skip bug when counting Ellipsis item_remove_var = [ - ele for ele in item + ele + for ele in item if not isinstance(ele, (Variable, np.ndarray)) and ele is not None ] ell_count = item_remove_var.count(Ellipsis) @@ -226,9 +248,9 @@ def replace_ellipsis(var, item): if ell_idx == len(item) - 1: return item[:-1] else: - item[ell_idx:ell_idx + - 1] = [slice(None) - ] * (len(var.shape) - len(item) + item.count(None) + 1) + item[ell_idx : ell_idx + 1] = [slice(None)] * ( + len(var.shape) - len(item) + item.count(None) + 1 + ) return item @@ -256,6 +278,7 @@ def replace_none(item): def is_integer_or_scalar_tensor(ele): from .framework import Variable + if isinstance(ele, int): return True elif isinstance(ele, Variable): @@ -266,6 +289,7 @@ def is_integer_or_scalar_tensor(ele): def is_bool_tensor(ele): from .framework import Variable + if isinstance(ele, Variable) and ele.dtype == paddle.bool: return True return False @@ -276,8 +300,9 @@ def deal_attrs(attrs, attr, attr_name, tensor_attr_name, inputs, infer_flags): from .layers import utils if utils._contain_var(attr): - inputs[tensor_attr_name] = utils._convert_to_tensor_list(attr, - dtype="int64") + inputs[tensor_attr_name] = utils._convert_to_tensor_list( + attr, dtype="int64" + ) for i, dim in enumerate(attr): if isinstance(dim, Variable): attrs[attr_name].append(-1) @@ -291,16 +316,19 @@ def deal_attrs(attrs, attr, attr_name, tensor_attr_name, inputs, infer_flags): # the item is a tensor of bool def get_value_for_bool_tensor(var, item): if len(item.shape) > len(var.shape): - raise IndexError("The dims of bool index doesn't match indexed array, " - "the dims of bool index except to be equal or less " - "than {}, but received {}.".format( - len(var.shape), len(item.shape))) + raise IndexError( + "The dims of bool index doesn't match indexed array, " + "the dims of bool index except to be equal or less " + "than {}, but received {}.".format(len(var.shape), len(item.shape)) + ) for i, dim_len in enumerate(item.shape): if dim_len != var.shape[i]: raise IndexError( - "The dimension of bool index doesn't match indexed array along "\ - "dimension {}, the target dimension is {}, but received {}.". - format(i, var.shape[i], dim_len)) + "The dimension of bool index doesn't match indexed array along " + "dimension {}, the target dimension is {}, but received {}.".format( + i, var.shape[i], dim_len + ) + ) def idx_not_empty(var, item): from .layers.nn import where @@ -315,8 +343,10 @@ def get_value_for_bool_tensor(var, item): return paddle.empty(var_shape, dtype=var.dtype) from .layers.control_flow import cond - return cond(item.any(), lambda: idx_not_empty(var, item), - lambda: idx_empty(var)) + + return cond( + item.any(), lambda: idx_not_empty(var, item), lambda: idx_empty(var) + ) def _getitem_impl_(var, item): @@ -330,12 +360,13 @@ def _getitem_impl_(var, item): Sliced variable """ from .framework import default_main_program, Variable + if isinstance(item, list): if not is_one_dim_list(item, int): item = tuple(item) if not isinstance(item, tuple): - item = (item, ) + item = (item,) decrease_axes = [] axes = [] @@ -351,11 +382,15 @@ def _getitem_impl_(var, item): slice_info = SliceInfo() for dim, slice_item in enumerate(item): - if is_integer_or_scalar_tensor( - slice_item) and not is_bool_tensor(slice_item): - if isinstance(slice_item, - int) and var.shape[dim] is not None and var.shape[ - dim] >= 0 and slice_item >= var.shape[dim]: + if is_integer_or_scalar_tensor(slice_item) and not is_bool_tensor( + slice_item + ): + if ( + isinstance(slice_item, int) + and var.shape[dim] is not None + and var.shape[dim] >= 0 + and slice_item >= var.shape[dim] + ): # For python, if users write a, b = var, the __getitem__ # method will iterate through 0, 1, 2 ... until __getitem__ # throws an IndexError, then stop. The var[0], var[1] will @@ -365,7 +400,8 @@ def _getitem_impl_(var, item): # We raises IndexError here to support grammar like `a, b = var` raise IndexError( "slice_item %d at dim %d should be >= 0 and < var.shape[%d]: %d" - % (slice_item, dim, dim, var.shape[dim])) + % (slice_item, dim, dim, var.shape[dim]) + ) decrease_axes.append(dim) start = slice_item step = 1 @@ -385,9 +421,9 @@ def _getitem_impl_(var, item): start = 0 if step > 0 else MAX_INTEGER if end is None: if var.shape[dim] != -1 and ( - paddle.fluid.framework._non_static_mode() - or var.desc.type() != - core.VarDesc.VarType.LOD_TENSOR_ARRAY): + paddle.fluid.framework._non_static_mode() + or var.desc.type() != core.VarDesc.VarType.LOD_TENSOR_ARRAY + ): end = var.shape[dim] if step > 0 else -1 else: end = MAX_INTEGER if step > 0 else -1 @@ -407,15 +443,19 @@ def _getitem_impl_(var, item): if len(item) != 1: raise IndexError( - "When index contains a list, its length must be 1, but received {}." - .format(len(item))) + "When index contains a list, its length must be 1, but received {}.".format( + len(item) + ) + ) new_slice_item = [] if all_bool: if len(slice_item) != var.shape[0]: raise IndexError( - "The dimension of bool index doesn't match indexed array along "\ - "dimension 0, the target dimension is {}, but received {}.". - format(var.shape[0], len(slice_item))) + "The dimension of bool index doesn't match indexed array along " + "dimension 0, the target dimension is {}, but received {}.".format( + var.shape[0], len(slice_item) + ) + ) for idx, ele in enumerate(slice_item): if ele is True: new_slice_item.append(idx) @@ -455,8 +495,10 @@ def _getitem_impl_(var, item): else: raise IndexError( - "Valid index accept int or slice or ellipsis or list, but received {}." - .format(slice_item)) + "Valid index accept int or slice or ellipsis or list, but received {}.".format( + slice_item + ) + ) axes.append(dim) starts.append(start) @@ -467,8 +509,10 @@ def _getitem_impl_(var, item): if slice_info.indexes: if len(slice_info.indexes) != len(item): raise IndexError( - "Valid index accept int or slice or ellipsis or list, but received {}." - .format(item)) + "Valid index accept int or slice or ellipsis or list, but received {}.".format( + item + ) + ) return slice_info.get_item(var) inputs = {'Input': [var]} @@ -476,7 +520,7 @@ def _getitem_impl_(var, item): 'axes': axes, 'starts': [], 'ends': [], - 'decrease_axis': decrease_axes + 'decrease_axis': decrease_axes, } if use_strided_slice: attrs['strides'] = [] @@ -484,8 +528,9 @@ def _getitem_impl_(var, item): infer_flags = [1] * len(axes) deal_attrs(attrs, starts, "starts", "StartsTensorList", inputs, infer_flags) deal_attrs(attrs, ends, "ends", "EndsTensorList", inputs, infer_flags) - deal_attrs(attrs, steps, "strides", "StridesTensorList", inputs, - infer_flags) + deal_attrs( + attrs, steps, "strides", "StridesTensorList", inputs, infer_flags + ) attrs['infer_flags'] = infer_flags out = var @@ -500,23 +545,29 @@ def _getitem_impl_(var, item): end = inputs['EndsTensorList'] else: end = attrs['ends'] - out = paddle._C_ops.slice(var, axes, st, end, attrs['infer_flags'], - attrs['decrease_axis']) + out = paddle._C_ops.slice( + var, axes, st, end, attrs['infer_flags'], attrs['decrease_axis'] + ) else: target_block = default_main_program().current_block() slice_out_var = target_block.create_var( - name=unique_name.generate_with_ignorable_key(var.name + "_" + - op_type), - dtype=var.dtype) - target_block.append_op(type=op_type, - inputs=inputs, - outputs={'Out': [slice_out_var]}, - attrs=attrs) + name=unique_name.generate_with_ignorable_key( + var.name + "_" + op_type + ), + dtype=var.dtype, + ) + target_block.append_op( + type=op_type, + inputs=inputs, + outputs={'Out': [slice_out_var]}, + attrs=attrs, + ) out = slice_out_var if len(reverse_axes) > 0: from .layers.tensor import reverse + out = reverse(out, axis=reverse_axes) # Deal with cases when all axes are decreased. @@ -546,40 +597,49 @@ def _getitem_impl_(var, item): # out = x[0, 1, 1, None] # out.shape : (1) from ..tensor import unsqueeze + out = unsqueeze(out, axis=none_axes) return out def _setitem_for_tensor_array(var, item, value): - """ branches for tensor array setitem operation. - A item can be a: - (1) int/Variable, which is a simple number/variable such as [1], [-2] - (2) Slice, which is represented by bounds such as [2:-1] - (3) Tuple, which includes the above two cases such as [2:-1, 1] - If item is case (1), we perform paddle.tensor.array_write, - in other cases, we raise a NotImplementedError. + """branches for tensor array setitem operation. + A item can be a: + (1) int/Variable, which is a simple number/variable such as [1], [-2] + (2) Slice, which is represented by bounds such as [2:-1] + (3) Tuple, which includes the above two cases such as [2:-1, 1] + If item is case (1), we perform paddle.tensor.array_write, + in other cases, we raise a NotImplementedError. """ from ..framework import LayerHelper, core, _non_static_mode from .framework import Variable - assert not _non_static_mode( + + assert ( + not _non_static_mode() ), "setitem for tensor_array must be called in static graph mode." if isinstance(item, (Variable, int)): - from paddle.fluid.dygraph.dygraph_to_static.variable_trans_func import to_static_variable + from paddle.fluid.dygraph.dygraph_to_static.variable_trans_func import ( + to_static_variable, + ) from paddle import cast from paddle.tensor import array_write + item = paddle.cast(to_static_variable(item), dtype='int64') value = to_static_variable(value) array_write(x=value, i=item, array=var) else: raise NotImplementedError( - "Only support __setitem__ by Int/Variable in tensor_array, but gets {}" - .format(type(item))) + "Only support __setitem__ by Int/Variable in tensor_array, but gets {}".format( + type(item) + ) + ) def _setitem_impl_(var, item, value): from .framework import default_main_program, Variable from paddle.fluid import core + if var.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY: return _setitem_for_tensor_array(var, item, value) @@ -589,7 +649,7 @@ def _setitem_impl_(var, item, value): item = tuple(item) # 1. Parse item if not isinstance(item, tuple): - item = (item, ) + item = (item,) decrease_axes = [] axes = [] @@ -603,8 +663,9 @@ def _setitem_impl_(var, item, value): slice_info = SliceInfo() dim = 0 for _, slice_item in enumerate(item): - if is_integer_or_scalar_tensor( - slice_item) and not is_bool_tensor(slice_item): + if is_integer_or_scalar_tensor(slice_item) and not is_bool_tensor( + slice_item + ): decrease_axes.append(dim) start = slice_item end = slice_item + 1 if slice_item != -1 else MAX_INTEGER @@ -624,7 +685,8 @@ def _setitem_impl_(var, item, value): if not isinstance(step, Variable) and step == 0: raise ValueError( "When assign a value to a paddle.Tensor, step can not be 0, " - "but received step is {}.".format(step)) + "but received step is {}.".format(step) + ) if isinstance(step, Variable) and (start is None or end is None): raise ValueError( @@ -644,15 +706,19 @@ def _setitem_impl_(var, item, value): for i in slice_item: if not isinstance(i, bool): - raise TypeError("Doesn't support {} in index list.".format( - type(i))) + raise TypeError( + "Doesn't support {} in index list.".format(type(i)) + ) if len(item) != 1: raise IndexError( - "When index contains a bool list, its length must be 1, but received {}." - .format(len(item))) + "When index contains a bool list, its length must be 1, but received {}.".format( + len(item) + ) + ) from .layers import assign + idx_tensor = assign(slice_item) return set_value_for_bool_tensor(var, idx_tensor, value) @@ -660,8 +726,10 @@ def _setitem_impl_(var, item, value): if slice_item.dtype == core.VarDesc.VarType.BOOL: if len(item) != 1: raise IndexError( - "When index contains a bool tensor, its length must be 1, but received {}." - .format(len(item))) + "When index contains a bool tensor, its length must be 1, but received {}.".format( + len(item) + ) + ) return set_value_for_bool_tensor(var, slice_item, value) else: slice_info.update(slice_item) @@ -669,7 +737,8 @@ def _setitem_impl_(var, item, value): else: raise IndexError( "Valid index accept int, slice, ellipsis, None, list of bool, Variable, " - "but received {}.".format(slice_item)) + "but received {}.".format(slice_item) + ) axes.append(dim) starts.append(start) @@ -680,8 +749,10 @@ def _setitem_impl_(var, item, value): if slice_info.indexes: if len(slice_info.indexes) != len(item): raise IndexError( - "Valid index accept int or slice or ellipsis or list, but received {}." - .format(item)) + "Valid index accept int or slice or ellipsis or list, but received {}.".format( + item + ) + ) return slice_info.set_item(var, value) attrs = { 'axes': axes, @@ -689,10 +760,11 @@ def _setitem_impl_(var, item, value): 'ends': ends, 'steps': steps, 'decrease_axes': decrease_axes, - 'none_axes': none_axes + 'none_axes': none_axes, } from .layers import utils + if utils._contain_var(starts): inputs['StartsTensorList'] = utils._convert_to_tensor_list(starts) del attrs['starts'] @@ -708,6 +780,7 @@ def _setitem_impl_(var, item, value): attrs['dtype'] = dtype from .data_feeder import convert_dtype + # 2.1 value is an integer of float if isinstance(value, (int, float)): value = np.array([value]).astype(convert_dtype(dtype)) @@ -737,7 +810,8 @@ def _setitem_impl_(var, item, value): raise TypeError( "When assign a numpy.ndarray, integer or float to a paddle.Tensor, " "the data type of the paddle.Tensor must be bool, float32, int32, int64 or float16, but " - "received %s." % convert_dtype(dtype)) + "received %s." % convert_dtype(dtype) + ) attrs[value_name] = values attrs["shape"] = shape @@ -747,17 +821,21 @@ def _setitem_impl_(var, item, value): raise TypeError( "Only support to assign an integer, float, numpy.ndarray or " "paddle.Tensor to a paddle.Tensor, but received {}".format( - type(value))) + type(value) + ) + ) if paddle.fluid.framework._non_static_mode(): var._bump_inplace_version() cur_block = default_main_program().current_block() - cur_block.append_op(type="set_value", - inputs=inputs, - outputs={'Out': var}, - attrs=attrs, - inplace_map={"Input": "Out"}) + cur_block.append_op( + type="set_value", + inputs=inputs, + outputs={'Out': var}, + attrs=attrs, + inplace_map={"Input": "Out"}, + ) return var @@ -765,16 +843,19 @@ def _setitem_impl_(var, item, value): # the item is a tensor of bool def set_value_for_bool_tensor(var, item, value): if len(item.shape) > len(var.shape): - raise IndexError("The dims of bool index doesn't match indexed array, " - "the dims of bool index except to be equal or less " - "than {}, but received {}.".format( - len(var.shape), len(item.shape))) + raise IndexError( + "The dims of bool index doesn't match indexed array, " + "the dims of bool index except to be equal or less " + "than {}, but received {}.".format(len(var.shape), len(item.shape)) + ) for i, dim_len in enumerate(item.shape): if dim_len != var.shape[i]: raise IndexError( "The dimension of bool index doesn't match indexed array along " - "dimension {}, the target dimension is {}, but received {}.". - format(i, var.shape[i], dim_len)) + "dimension {}, the target dimension is {}, but received {}.".format( + i, var.shape[i], dim_len + ) + ) def idx_not_empty(var, item, value): from .framework import Variable @@ -792,6 +873,7 @@ def set_value_for_bool_tensor(var, item, value): var[:] = out from .layers.control_flow import cond + # If all the bool index is False, just do nothing cond(item.any(), lambda: idx_not_empty(var, item, value)) diff --git a/python/paddle/fluid/wrapped_decorator.py b/python/paddle/fluid/wrapped_decorator.py index 5f837b575637c561defc664f740bfc8e673c0aad..7e7dbff65611e947d1a11a0c33c6ecc27e6df636 100644 --- a/python/paddle/fluid/wrapped_decorator.py +++ b/python/paddle/fluid/wrapped_decorator.py @@ -19,7 +19,6 @@ __all__ = ['wrap_decorator', 'signature_safe_contextmanager'] def wrap_decorator(decorator_func): - @decorator.decorator def __impl__(func, *args, **kwargs): wrapped_func = decorator_func(func) diff --git a/python/paddle/framework/__init__.py b/python/paddle/framework/__init__.py index 34423e3f3ed6b5914dafc92232846b3580558545..a6b79dd29e285f4402d40a7380c421b554f759de 100644 --- a/python/paddle/framework/__init__.py +++ b/python/paddle/framework/__init__.py @@ -47,10 +47,19 @@ from ..fluid.framework import set_flags # noqa: F401 from ..fluid.dygraph.base import enable_dygraph as disable_static # noqa: F401 from ..fluid.dygraph.base import disable_dygraph as enable_static # noqa: F401 from ..fluid.framework import _non_static_mode as in_dynamic_mode # noqa: F401 -from ..fluid.framework import _non_static_mode # noqa: F401; temporary used for hackson -from ..fluid.framework import _current_expected_place, _get_paddle_place # noqa: F401 +from ..fluid.framework import ( + _non_static_mode, +) # noqa: F401; temporary used for hackson +from ..fluid.framework import ( + _current_expected_place, + _get_paddle_place, +) # noqa: F401 from ..fluid.framework import dygraph_only # noqa: F401 -from ..fluid.framework import convert_np_dtype_to_dtype_, _varbase_creator, OpProtoHolder # noqa: F401 +from ..fluid.framework import ( + convert_np_dtype_to_dtype_, + _varbase_creator, + OpProtoHolder, +) # noqa: F401 from ..fluid.framework import _dygraph_tracer # noqa: F401 from ..fluid.layer_helper import LayerHelper # noqa: F401 diff --git a/python/paddle/framework/framework.py b/python/paddle/framework/framework.py index 8da6a10a48527ae622b6cc0e377ca7181fccf4d9..e366b13077d2a35050ed2c1350e8fd8d1b8d2632 100644 --- a/python/paddle/framework/framework.py +++ b/python/paddle/framework/framework.py @@ -45,11 +45,16 @@ def set_default_dtype(d): else: raise TypeError( "set_default_dtype only supports [float16, float32, float64] " - ", but received %s" % d.__name__) + ", but received %s" % d.__name__ + ) else: if d in [ - 'float16', 'float32', 'float64', u'float16', u'float32', - u'float64' + 'float16', + 'float32', + 'float64', + u'float16', + u'float32', + u'float64', ]: # this code is a little bit dangerous, since error could happen # when casting no-ascii code to str in python2. @@ -60,7 +65,8 @@ def set_default_dtype(d): else: raise TypeError( "set_default_dtype only supports [float16, float32, float64] " - ", but received %s" % str(d)) + ", but received %s" % str(d) + ) LayerHelperBase.set_default_dtype(d) diff --git a/python/paddle/framework/io.py b/python/paddle/framework/io.py index 77e1fb47f882de374ef332d369cd07b4ce667216..0052c1fe6555891249bce6b9e635c26de2a45f1a 100644 --- a/python/paddle/framework/io.py +++ b/python/paddle/framework/io.py @@ -24,14 +24,31 @@ import paddle # deprecated module import from paddle import fluid from paddle.fluid import core -from paddle.fluid.io import _unpack_saved_dict, _pack_loaded_dict, _pickle_loads_mac +from paddle.fluid.io import ( + _unpack_saved_dict, + _pack_loaded_dict, + _pickle_loads_mac, +) from paddle.fluid.io import _legacy_save as _legacy_static_save from paddle.fluid.io import _open_file_buffer, _is_file_path, _is_memory_buffer -from paddle.fluid.framework import Variable, _varbase_creator, _dygraph_tracer, _non_static_mode, ParamBase, EagerParamBase, _current_expected_place, Program +from paddle.fluid.framework import ( + Variable, + _varbase_creator, + _dygraph_tracer, + _non_static_mode, + ParamBase, + EagerParamBase, + _current_expected_place, + Program, +) from paddle.fluid.dygraph.jit import _SaveLoadConfig -from paddle.fluid.dygraph.io import _construct_program_holders, _construct_params_and_buffers +from paddle.fluid.dygraph.io import ( + _construct_program_holders, + _construct_params_and_buffers, +) from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX + try: from collections.abc import Iterable except: @@ -68,7 +85,8 @@ def _load_state_dict_from_save_inference_model(model_path, config): # 2. load layer parameters & buffers with fluid.dygraph.guard(): persistable_var_dict = _construct_params_and_buffers( - model_path, programs, config.params_filename, append_suffix=False) + model_path, programs, config.params_filename, append_suffix=False + ) # 3. construct state_dict load_param_dict = dict() @@ -84,10 +102,15 @@ def _load_state_dict_from_save_inference_model(model_path, config): structured_para_dict = dict() for var_name in load_param_dict: structured_name = extra_var_info[var_name].get( - 'structured_name', None) - assert structured_name is not None, "Cannot find saved variable (%s)'s structured name in saved model." % var_name + 'structured_name', None + ) + assert structured_name is not None, ( + "Cannot find saved variable (%s)'s structured name in saved model." + % var_name + ) structured_para_dict[structured_name] = load_param_dict[ - var_name] + var_name + ] load_param_dict = structured_para_dict return load_param_dict @@ -115,7 +138,8 @@ def _load_state_dict_from_save_params(model_path): type='load', inputs={}, outputs={'Out': new_var}, - attrs={'file_path': os.path.join(model_path, name)}) + attrs={'file_path': os.path.join(model_path, name)}, + ) load_var_list.append(new_var) # 3. construct state_dict @@ -151,7 +175,8 @@ def _build_load_path_and_config(path, config): raise ValueError( "The %s.pdmodel and %s directory exist at the same time, " "don't know which one to load, please make sure that the specified target " - "of ``path`` is unique." % (path, path)) + "of ``path`` is unique." % (path, path) + ) elif not prefix_format_exist and not directory_format_exist: error_msg = "The ``path`` (%s) to load model not exists." # if current path is a prefix, and the path.pdparams or path.pdopt @@ -160,10 +185,12 @@ def _build_load_path_and_config(path, config): params_file_path = path + ".pdparams" opti_file_path = path + ".pdopt" if os.path.exists(params_file_path) or os.path.exists(opti_file_path): - error_msg += " If you want to load the results saved by `fluid.save_dygraph`, " \ - "please specify the full file name, not just the file name prefix. For " \ - "example, it should be written as `paddle.load('model.pdparams')` instead of " \ + error_msg += ( + " If you want to load the results saved by `fluid.save_dygraph`, " + "please specify the full file name, not just the file name prefix. For " + "example, it should be written as `paddle.load('model.pdparams')` instead of " "`paddle.load('model')`." + ) raise ValueError(error_msg % path) else: if prefix_format_exist: @@ -173,13 +200,15 @@ def _build_load_path_and_config(path, config): warnings.warn( "When loading the result saved with the " "specified file prefix, the ``model_filename`` config does " - "not take effect.") + "not take effect." + ) config.model_filename = file_prefix + INFER_MODEL_SUFFIX if config.params_filename is not None: warnings.warn( "When loading the result saved with the " "specified file prefix, the ``params_filename`` config does " - "not take effect.") + "not take effect." + ) config.params_filename = file_prefix + INFER_PARAMS_SUFFIX else: # Compatible with the old save_inference_model format @@ -190,7 +219,10 @@ def _build_load_path_and_config(path, config): def _parse_load_config(configs): supported_configs = [ - 'model_filename', 'params_filename', 'keep_name_table', 'return_numpy' + 'model_filename', + 'params_filename', + 'keep_name_table', + 'return_numpy', ] # input check @@ -198,7 +230,8 @@ def _parse_load_config(configs): if key not in supported_configs: raise ValueError( "The additional config (%s) of `paddle.load` is not supported." - % key) + % key + ) # construct inner config inner_config = _SaveLoadConfig() @@ -218,7 +251,8 @@ def _parse_save_config(configs): if key not in supported_configs: raise ValueError( "The additional config (%s) of `paddle.save` is not supported." - % key) + % key + ) # construct inner config inner_config = _SaveLoadConfig() @@ -231,19 +265,22 @@ def _parse_save_config(configs): def _pickle_save(obj, f, protocol): # TODO(weixin):add support for BytesIO. if not isinstance(protocol, int): - raise ValueError("The 'protocol' MUST be `int`, but received {}".format( - type(protocol))) + raise ValueError( + "The 'protocol' MUST be `int`, but received {}".format( + type(protocol) + ) + ) if protocol < 2 or protocol > 4: raise ValueError( - "Expected 1<'protocol'<5, but received protocol={}".format( - protocol)) + "Expected 1<'protocol'<5, but received protocol={}".format(protocol) + ) def reduce_varbase(self): data = self.numpy() name = self.name - return (tuple, ((name, data), )) + return (tuple, ((name, data),)) def reduce_LoDTensor(self): data = np.array(self) @@ -252,7 +289,8 @@ def _pickle_save(obj, f, protocol): def reduce_Layer(self): raise ValueError( - "paddle do not support saving `paddle.nn.Layer` object.") + "paddle do not support saving `paddle.nn.Layer` object." + ) dispatch_table_layer = dict() @@ -260,8 +298,9 @@ def _pickle_save(obj, f, protocol): dispatch_table_layer[layer.__class__] = reduce_Layer return layer - _parse_every_object(obj, lambda v: isinstance(v, fluid.Layer), - create_layer_dispatch_table) + _parse_every_object( + obj, lambda v: isinstance(v, fluid.Layer), create_layer_dispatch_table + ) def add_dispatch_table(): # This is not a good method, because the pickle module has been modified. @@ -289,7 +328,7 @@ def _pickle_save(obj, f, protocol): max_bytes = 2**30 for i in range(0, len(pickle_bytes), max_bytes): - f.write(pickle_bytes[i:i + max_bytes]) + f.write(pickle_bytes[i : i + max_bytes]) else: pickler = pickle.Pickler(f, protocol) pickler.dispatch_table = copyreg.dispatch_table.copy() @@ -306,7 +345,8 @@ def _pickle_save(obj, f, protocol): def _contain_x(obj, condition_func): if isinstance(obj, core.SelectedRows): raise NotImplementedError( - "`paddle.save` do not support saving 'SelectedRows'.") + "`paddle.save` do not support saving 'SelectedRows'." + ) if condition_func(obj): return True @@ -330,8 +370,16 @@ def _is_state_dict(obj): def condition(obj): return isinstance( - obj, (fluid.Layer, Program, core.VarBase, core.eager.Tensor, - core.LoDTensor, core.SelectedRows)) + obj, + ( + fluid.Layer, + Program, + core.VarBase, + core.eager.Tensor, + core.LoDTensor, + core.SelectedRows, + ), + ) # If the value of a dict is a core.VarBase/LoDTensor or a dict # that does not contain a paddle type(Layer, Program, VarBase, LoDTensor, SelectedRows), @@ -342,7 +390,8 @@ def _is_state_dict(obj): if _contain_x(v, condition): return False elif not isinstance( - value, (core.VarBase, core.eager.Tensor, core.LoDTensor)): + value, (core.VarBase, core.eager.Tensor, core.LoDTensor) + ): return False return True @@ -370,8 +419,10 @@ def _transformed_from_lodtensor(obj): def _to_LodTensor(ndarray): if not isinstance(ndarray, np.ndarray): raise TypeError( - 'Type of `ndarray` should be numpy.ndarray, but received {}.'. - format(type(ndarray))) + 'Type of `ndarray` should be numpy.ndarray, but received {}.'.format( + type(ndarray) + ) + ) t = core.LoDTensor() place = _current_expected_place() t.set(ndarray, place) @@ -418,26 +469,30 @@ def _parse_every_object(obj, condition_func, convert_func): if condition_func(obj[key]): obj[key] = convert_func(obj[key]) else: - obj[key] = _parse_every_object(obj[key], condition_func, - convert_func) + obj[key] = _parse_every_object( + obj[key], condition_func, convert_func + ) return obj elif type(obj) == tuple: return tuple( - _parse_every_object(list(obj), condition_func, convert_func)) + _parse_every_object(list(obj), condition_func, convert_func) + ) elif type(obj) == set: return set(_parse_every_object(list(obj), condition_func, convert_func)) else: if isinstance(obj, Iterable) and not isinstance( - obj, - (str, np.ndarray, core.VarBase, core.eager.Tensor, core.LoDTensor)): + obj, + (str, np.ndarray, core.VarBase, core.eager.Tensor, core.LoDTensor), + ): raise NotImplementedError( - "The iteratable objects supported are tuple, list, dict, OrderedDict, string. But received {}." - .format(type(obj))) + "The iteratable objects supported are tuple, list, dict, OrderedDict, string. But received {}.".format( + type(obj) + ) + ) return obj def _parse_load_result(obj, return_numpy): - def is_layer(obj): return isinstance(obj, fluid.Layer) @@ -463,13 +518,15 @@ def _parse_load_result(obj, return_numpy): # tuple(name, ndarry) was converted from varbase of paddle2.1, # and all tuple(name, ndarry) are converted to tensor. if _contain_x(obj, _transformed_from_varbase): - return _parse_every_object(obj, _transformed_from_varbase, - tuple_to_tensor) + return _parse_every_object( + obj, _transformed_from_varbase, tuple_to_tensor + ) # If there is no tuple(name, ndary), it is considered to be saved by paddle2.0 # or converted from LoDTensor, and all ndarrays are converted to tensor. else: - return _parse_every_object(obj, _transformed_from_lodtensor, - ndarray_to_tensor) + return _parse_every_object( + obj, _transformed_from_lodtensor, ndarray_to_tensor + ) def _save_lod_tensor(tensor, file_name): @@ -490,8 +547,10 @@ def _save_lod_tensor(tensor, file_name): else: raise NotImplementedError( - 'Only supports saving objects to file or BytesIO, but received {}'. - format(type(file_name))) + 'Only supports saving objects to file or BytesIO, but received {}'.format( + type(file_name) + ) + ) return _seek @@ -509,8 +568,10 @@ def _load_lod_tensor(file_name): else: raise NotImplementedError( - 'Only supports load objects from file or BytesIO, but received {}'. - format(type(file_name))) + 'Only supports load objects from file or BytesIO, but received {}'.format( + type(file_name) + ) + ) return temp_t, _seek @@ -529,8 +590,10 @@ def _save_selected_rows(selected_rows, file_name): _seek = f.tell() else: raise NotImplementedError( - 'Only supports saving objects to file or BytesIO, but received {}'. - format(type(file_name))) + 'Only supports saving objects to file or BytesIO, but received {}'.format( + type(file_name) + ) + ) return _seek @@ -544,13 +607,16 @@ def _load_selected_rows(file_name): with _open_file_buffer(file_name, 'rb') as f: selected_rows_bytes = f.read() paddle.fluid.core.load_selected_rows_from_memory( - temp_sr, selected_rows_bytes) + temp_sr, selected_rows_bytes + ) _seek = f.tell() else: raise NotImplementedError( - 'Only supports load objects from file or BytesIO, but received {}'. - format(type(file_name))) + 'Only supports load objects from file or BytesIO, but received {}'.format( + type(file_name) + ) + ) return temp_sr, _seek @@ -565,8 +631,10 @@ def _save_binary_var(obj, path): else: # Since the concept of 'Tensor' is only exposed to users, the error message can only contain tensor instead of 'LoDTensor' or 'SelectedRows' raise NotImplementedError( - "When use_binary_format = True, `paddle.save` expected Tensor, but received {}." - .format(type(obj))) + "When use_binary_format = True, `paddle.save` expected Tensor, but received {}.".format( + type(obj) + ) + ) def save(obj, path, protocol=4, **configs): @@ -694,7 +762,8 @@ def save(obj, path, protocol=4, **configs): raise ValueError( "The input path MUST be format of dirname/filename " "[dirname\\filename in Windows system], but received " - "filename is empty string.") + "filename is empty string." + ) # 2. save object dirname = os.path.dirname(path) @@ -702,15 +771,19 @@ def save(obj, path, protocol=4, **configs): os.makedirs(dirname) elif not _is_memory_buffer(path): raise ValueError( - "only supports saving objects to file and `BytesIO`, but got {}". - format(type(path))) + "only supports saving objects to file and `BytesIO`, but got {}".format( + type(path) + ) + ) config = _parse_save_config(configs) if not isinstance(config.use_binary_format, bool): raise TypeError( - "Type of `use_binary_format` should be bool, but received {}.". - format(type(config.use_binary_format))) + "Type of `use_binary_format` should be bool, but received {}.".format( + type(config.use_binary_format) + ) + ) if config.use_binary_format: _save_binary_var(obj, path) @@ -742,19 +815,23 @@ def _legacy_save(obj, path, protocol=2): if not isinstance(obj, dict): raise NotImplementedError( "Now only supports save state_dict of Layer or Optimizer, " - "expect dict, but received %s." % type(obj)) + "expect dict, but received %s." % type(obj) + ) if len(obj) == 0: warnings.warn("The input state dict is empty, no need to save.") if not isinstance(protocol, int): - raise ValueError("The 'protocol' MUST be `int`, but received {}".format( - type(protocol))) + raise ValueError( + "The 'protocol' MUST be `int`, but received {}".format( + type(protocol) + ) + ) if protocol < 2 or protocol > 4: raise ValueError( - "Expected 1<'protocol'<5, but received protocol={}".format( - protocol)) + "Expected 1<'protocol'<5, but received protocol={}".format(protocol) + ) if _is_file_path(path): filename = os.path.basename(path) @@ -762,7 +839,8 @@ def _legacy_save(obj, path, protocol=2): raise ValueError( "The input path MUST be format of dirname/filename " "[dirname\\filename in Windows system], but received " - "filename is empty string.") + "filename is empty string." + ) # 2. save object dirname = os.path.dirname(path) if dirname and not os.path.exists(dirname): @@ -774,13 +852,16 @@ def _legacy_save(obj, path, protocol=2): saved_obj = _unpack_saved_dict(saved_obj, protocol) # When value of dict is lager than 4GB ,there is a Bug on 'MAC python3' - if _is_file_path( - path) and sys.platform == 'darwin' and sys.version_info.major == 3: + if ( + _is_file_path(path) + and sys.platform == 'darwin' + and sys.version_info.major == 3 + ): pickle_bytes = pickle.dumps(saved_obj, protocol=protocol) with open(path, 'wb') as f: max_bytes = 2**30 for i in range(0, len(pickle_bytes), max_bytes): - f.write(pickle_bytes[i:i + max_bytes]) + f.write(pickle_bytes[i : i + max_bytes]) else: with _open_file_buffer(path, 'wb') as f: pickle.dump(saved_obj, f, protocol=protocol) @@ -947,9 +1028,11 @@ def load(path, **configs): try: with _open_file_buffer(path, 'rb') as f: # When value of dict is lager than 4GB ,there is a Bug on 'MAC python3' - if _is_file_path( - path - ) and sys.platform == 'darwin' and sys.version_info.major == 3: + if ( + _is_file_path(path) + and sys.platform == 'darwin' + and sys.version_info.major == 3 + ): load_result = _pickle_loads_mac(path, f) else: load_result = pickle.load(f, encoding='latin1') @@ -963,18 +1046,24 @@ def load(path, **configs): for key in load_result["StructuredToParameterName@@"]: if isinstance(load_result[key], np.ndarray): load_result[key] = _ndarray_to_tensor( - load_result[key], config.return_numpy) + load_result[key], config.return_numpy + ) - if not config.keep_name_table and "StructuredToParameterName@@" in load_result: + if ( + not config.keep_name_table + and "StructuredToParameterName@@" in load_result + ): del load_result["StructuredToParameterName@@"] else: # paddle2.1 static.save/load load_result = _parse_load_result( - load_result, config.return_numpy) + load_result, config.return_numpy + ) else: - load_result = _parse_load_result(load_result, - config.return_numpy) + load_result = _parse_load_result( + load_result, config.return_numpy + ) except exception_type as msg_pickle: try: @@ -994,12 +1083,15 @@ def load(path, **configs): with _open_file_buffer(path, "rb") as f: program_desc_str = f.read() program = Program.parse_from_string( - program_desc_str) + program_desc_str + ) return program except: raise ValueError( "`paddle.load` can not parse the file:{}.".format( - path)) + path + ) + ) else: load_result = _legacy_load(path, **configs) @@ -1016,7 +1108,10 @@ def _legacy_load(path, **configs): with _open_file_buffer(path, 'rb') as f: load_result = pickle.load(f, encoding='latin1') load_result = _pack_loaded_dict(load_result) - if not config.keep_name_table and "StructuredToParameterName@@" in load_result: + if ( + not config.keep_name_table + and "StructuredToParameterName@@" in load_result + ): del load_result["StructuredToParameterName@@"] else: # file prefix and directory are compatible cases @@ -1037,7 +1132,8 @@ def _legacy_load(path, **configs): # the user to configure the `use_structured_name` argument when `set_state_dict` # NOTE(chenweihang): `jit.save` doesn't save optimizer state load_result = _load_state_dict_from_save_inference_model( - model_path, config) + model_path, config + ) else: # load state dict by `io.save_params/persistables` save format # TODO(chenweihang): [ Now only supports loading parameters separately ] diff --git a/python/paddle/framework/random.py b/python/paddle/framework/random.py index 50235591cf2f1fa1141b239b755f2621a7943a41..6d7d704808d7e5fd884d6c42e1f0eb5dbd15e0db 100644 --- a/python/paddle/framework/random.py +++ b/python/paddle/framework/random.py @@ -37,7 +37,7 @@ def seed(seed): gen = paddle.seed(102) """ - #TODO(zhiqiu): 1. remove program.random_seed when all random-related op upgrade + # TODO(zhiqiu): 1. remove program.random_seed when all random-related op upgrade # 2. support gpu generator by global device seed = int(seed) diff --git a/python/paddle/geometric/math.py b/python/paddle/geometric/math.py index 186fe5c2d7ddd23b5f0a89b50346e1cd0c30a580..a4a27093484e15646fa28febb664bd304d7cc80d 100644 --- a/python/paddle/geometric/math.py +++ b/python/paddle/geometric/math.py @@ -54,29 +54,30 @@ def segment_sum(data, segment_ids, name=None): if in_dygraph_mode(): return _C_ops.segment_pool(data, segment_ids, "SUM")[0] if _in_legacy_dygraph(): - out, tmp = _legacy_C_ops.segment_pool(data, segment_ids, 'pooltype', - "SUM") + out, tmp = _legacy_C_ops.segment_pool( + data, segment_ids, 'pooltype', "SUM" + ) return out check_variable_and_dtype( - data, "X", ("float32", "float64", "int32", "int64", "float16"), - "segment_pool") - check_variable_and_dtype(segment_ids, "SegmentIds", ("int32", "int64"), - "segment_pool") + data, + "X", + ("float32", "float64", "int32", "int64", "float16"), + "segment_pool", + ) + check_variable_and_dtype( + segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool" + ) helper = LayerHelper("segment_sum", **locals()) out = helper.create_variable_for_type_inference(dtype=data.dtype) summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype) - helper.append_op(type="segment_pool", - inputs={ - "X": data, - "SegmentIds": segment_ids - }, - outputs={ - "Out": out, - "SummedIds": summed_ids - }, - attrs={"pooltype": "SUM"}) + helper.append_op( + type="segment_pool", + inputs={"X": data, "SegmentIds": segment_ids}, + outputs={"Out": out, "SummedIds": summed_ids}, + attrs={"pooltype": "SUM"}, + ) return out @@ -116,29 +117,30 @@ def segment_mean(data, segment_ids, name=None): if in_dygraph_mode(): return _C_ops.segment_pool(data, segment_ids, "MEAN")[0] if _in_legacy_dygraph(): - out, tmp = _legacy_C_ops.segment_pool(data, segment_ids, 'pooltype', - "MEAN") + out, tmp = _legacy_C_ops.segment_pool( + data, segment_ids, 'pooltype', "MEAN" + ) return out check_variable_and_dtype( - data, "X", ("float32", "float64", "int32", "int64", "float16"), - "segment_pool") - check_variable_and_dtype(segment_ids, "SegmentIds", ("int32", "int64"), - "segment_pool") + data, + "X", + ("float32", "float64", "int32", "int64", "float16"), + "segment_pool", + ) + check_variable_and_dtype( + segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool" + ) helper = LayerHelper("segment_mean", **locals()) out = helper.create_variable_for_type_inference(dtype=data.dtype) summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype) - helper.append_op(type="segment_pool", - inputs={ - "X": data, - "SegmentIds": segment_ids - }, - outputs={ - "Out": out, - "SummedIds": summed_ids - }, - attrs={"pooltype": "MEAN"}) + helper.append_op( + type="segment_pool", + inputs={"X": data, "SegmentIds": segment_ids}, + outputs={"Out": out, "SummedIds": summed_ids}, + attrs={"pooltype": "MEAN"}, + ) return out @@ -177,29 +179,30 @@ def segment_min(data, segment_ids, name=None): if in_dygraph_mode(): return _C_ops.segment_pool(data, segment_ids, "MIN")[0] if _in_legacy_dygraph(): - out, tmp = _legacy_C_ops.segment_pool(data, segment_ids, 'pooltype', - "MIN") + out, tmp = _legacy_C_ops.segment_pool( + data, segment_ids, 'pooltype', "MIN" + ) return out check_variable_and_dtype( - data, "X", ("float32", "float64", "int32", "int64", "float16"), - "segment_pool") - check_variable_and_dtype(segment_ids, "SegmentIds", ("int32", "int64"), - "segment_pool") + data, + "X", + ("float32", "float64", "int32", "int64", "float16"), + "segment_pool", + ) + check_variable_and_dtype( + segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool" + ) helper = LayerHelper("segment_min", **locals()) out = helper.create_variable_for_type_inference(dtype=data.dtype) summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype) - helper.append_op(type="segment_pool", - inputs={ - "X": data, - "SegmentIds": segment_ids - }, - outputs={ - "Out": out, - "SummedIds": summed_ids - }, - attrs={"pooltype": "MIN"}) + helper.append_op( + type="segment_pool", + inputs={"X": data, "SegmentIds": segment_ids}, + outputs={"Out": out, "SummedIds": summed_ids}, + attrs={"pooltype": "MIN"}, + ) return out @@ -238,27 +241,28 @@ def segment_max(data, segment_ids, name=None): if in_dygraph_mode(): return _C_ops.segment_pool(data, segment_ids, "MAX")[0] if _in_legacy_dygraph(): - out, tmp = _legacy_C_ops.segment_pool(data, segment_ids, 'pooltype', - "MAX") + out, tmp = _legacy_C_ops.segment_pool( + data, segment_ids, 'pooltype', "MAX" + ) return out check_variable_and_dtype( - data, "X", ("float32", "float64", "int32", "int64", "float16"), - "segment_pool") - check_variable_and_dtype(segment_ids, "SegmentIds", ("int32", "int64"), - "segment_pool") + data, + "X", + ("float32", "float64", "int32", "int64", "float16"), + "segment_pool", + ) + check_variable_and_dtype( + segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool" + ) helper = LayerHelper("segment_max", **locals()) out = helper.create_variable_for_type_inference(dtype=data.dtype) summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype) - helper.append_op(type="segment_pool", - inputs={ - "X": data, - "SegmentIds": segment_ids - }, - outputs={ - "Out": out, - "SummedIds": summed_ids - }, - attrs={"pooltype": "MAX"}) + helper.append_op( + type="segment_pool", + inputs={"X": data, "SegmentIds": segment_ids}, + outputs={"Out": out, "SummedIds": summed_ids}, + attrs={"pooltype": "MAX"}, + ) return out diff --git a/python/paddle/geometric/message_passing/send_recv.py b/python/paddle/geometric/message_passing/send_recv.py index cf290b29bee89be597c10b8eaec258bb402d6873..8a05ef1051626e120b18b79917149ece43e9a866 100644 --- a/python/paddle/geometric/message_passing/send_recv.py +++ b/python/paddle/geometric/message_passing/send_recv.py @@ -16,20 +16,25 @@ import numpy as np from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode from paddle.fluid.framework import Variable -from paddle.fluid.data_feeder import check_dtype, check_type, check_variable_and_dtype +from paddle.fluid.data_feeder import ( + check_dtype, + check_type, + check_variable_and_dtype, +) from paddle import _C_ops, _legacy_C_ops -from .utils import convert_out_size_to_list, get_out_size_tensor_inputs, reshape_lhs_rhs +from .utils import ( + convert_out_size_to_list, + get_out_size_tensor_inputs, + reshape_lhs_rhs, +) __all__ = [] -def send_u_recv(x, - src_index, - dst_index, - reduce_op="sum", - out_size=None, - name=None): +def send_u_recv( + x, src_index, dst_index, reduce_op="sum", out_size=None, name=None +): """ Graph Learning message passing api. @@ -110,66 +115,85 @@ def send_u_recv(x, if reduce_op not in ["sum", "mean", "max", "min"]: raise ValueError( "reduce_op should be `sum`, `mean`, `max` or `min`, but received %s" - % reduce_op) + % reduce_op + ) # TODO(daisiming): Should we add judgement for out_size: max(dst_index) + 1. if _in_legacy_dygraph(): out_size = convert_out_size_to_list(out_size) - out, tmp = _legacy_C_ops.graph_send_recv(x, src_index, dst_index, - None, 'reduce_op', - reduce_op.upper(), 'out_size', - out_size) + out, tmp = _legacy_C_ops.graph_send_recv( + x, + src_index, + dst_index, + None, + 'reduce_op', + reduce_op.upper(), + 'out_size', + out_size, + ) return out if in_dygraph_mode(): out_size = convert_out_size_to_list(out_size) - return _C_ops.graph_send_recv(x, src_index, dst_index, - reduce_op.upper(), out_size) + return _C_ops.graph_send_recv( + x, src_index, dst_index, reduce_op.upper(), out_size + ) check_variable_and_dtype( - x, "X", ("float32", "float64", "int32", "int64", "float16"), - "graph_send_recv") - check_variable_and_dtype(src_index, "Src_index", ("int32", "int64"), - "graph_send_recv") - check_variable_and_dtype(dst_index, "Dst_index", ("int32", "int64"), - "graph_send_recv") + x, + "X", + ("float32", "float64", "int32", "int64", "float16"), + "graph_send_recv", + ) + check_variable_and_dtype( + src_index, "Src_index", ("int32", "int64"), "graph_send_recv" + ) + check_variable_and_dtype( + dst_index, "Dst_index", ("int32", "int64"), "graph_send_recv" + ) if out_size: - check_type(out_size, 'out_size', (int, np.int32, np.int64, Variable), - 'graph_send_recv') + check_type( + out_size, + 'out_size', + (int, np.int32, np.int64, Variable), + 'graph_send_recv', + ) if isinstance(out_size, Variable): - check_dtype(out_size.dtype, 'out_size', ['int32', 'int64'], - 'graph_send_recv') + check_dtype( + out_size.dtype, 'out_size', ['int32', 'int64'], 'graph_send_recv' + ) helper = LayerHelper("send_u_recv", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - dst_count = helper.create_variable_for_type_inference(dtype="int32", - stop_gradient=True) + dst_count = helper.create_variable_for_type_inference( + dtype="int32", stop_gradient=True + ) inputs = {"X": x, "Src_index": src_index, "Dst_index": dst_index} attrs = {"reduce_op": reduce_op.upper()} - get_out_size_tensor_inputs(inputs=inputs, - attrs=attrs, - out_size=out_size, - op_type='graph_send_recv') - - helper.append_op(type="graph_send_recv", - inputs=inputs, - outputs={ - "Out": out, - "Dst_count": dst_count - }, - attrs=attrs) + get_out_size_tensor_inputs( + inputs=inputs, attrs=attrs, out_size=out_size, op_type='graph_send_recv' + ) + + helper.append_op( + type="graph_send_recv", + inputs=inputs, + outputs={"Out": out, "Dst_count": dst_count}, + attrs=attrs, + ) return out -def send_ue_recv(x, - y, - src_index, - dst_index, - message_op="add", - reduce_op="sum", - out_size=None, - name=None): +def send_ue_recv( + x, + y, + src_index, + dst_index, + message_op="add", + reduce_op="sum", + out_size=None, + name=None, +): """ Graph Learning message passing api. @@ -259,13 +283,15 @@ def send_ue_recv(x, if message_op not in ["add", "sub", "mul", "div"]: raise ValueError( - "message_op should be `add`, `sub`, `mul`, `div`, but received %s" % - message_op) + "message_op should be `add`, `sub`, `mul`, `div`, but received %s" + % message_op + ) if reduce_op not in ["sum", "mean", "max", "min"]: raise ValueError( "reduce_op should be `sum`, `mean`, `max` or `min`, but received %s" - % reduce_op) + % reduce_op + ) x, y = reshape_lhs_rhs(x, y) @@ -274,61 +300,89 @@ def send_ue_recv(x, y = -y if message_op == "div": message_op = 'mul' - y = 1. / (y + 1e-12) + y = 1.0 / (y + 1e-12) # TODO(daisiming): Should we add judgement for out_size: max(dst_index) + 1. if _in_legacy_dygraph(): out_size = convert_out_size_to_list(out_size) - out, tmp = _legacy_C_ops.graph_send_ue_recv(x, y, src_index, dst_index, - None, 'message_op', - message_op.upper(), - 'reduce_op', - reduce_op.upper(), - 'out_size', out_size) + out, tmp = _legacy_C_ops.graph_send_ue_recv( + x, + y, + src_index, + dst_index, + None, + 'message_op', + message_op.upper(), + 'reduce_op', + reduce_op.upper(), + 'out_size', + out_size, + ) return out if in_dygraph_mode(): out_size = convert_out_size_to_list(out_size) - return _C_ops.graph_send_ue_recv(x, y, src_index, dst_index, - message_op.upper(), reduce_op.upper(), - out_size) + return _C_ops.graph_send_ue_recv( + x, + y, + src_index, + dst_index, + message_op.upper(), + reduce_op.upper(), + out_size, + ) check_variable_and_dtype( - x, "X", ("float32", "float64", "int32", "int64", "float16"), - "graph_send_ue_recv") + x, + "X", + ("float32", "float64", "int32", "int64", "float16"), + "graph_send_ue_recv", + ) + check_variable_and_dtype( + y, + "Y", + ("float32", "float64", "int32", "int64", "float16"), + "graph_send_ue_recv", + ) check_variable_and_dtype( - y, "Y", ("float32", "float64", "int32", "int64", "float16"), - "graph_send_ue_recv") - check_variable_and_dtype(src_index, "Src_index", ("int32", "int64"), - "graph_send_ue_recv") - check_variable_and_dtype(dst_index, "Dst_index", ("int32", "int64"), - "graph_send_ue_recv") + src_index, "Src_index", ("int32", "int64"), "graph_send_ue_recv" + ) + check_variable_and_dtype( + dst_index, "Dst_index", ("int32", "int64"), "graph_send_ue_recv" + ) if out_size: - check_type(out_size, 'out_size', (int, np.int32, np.int64, Variable), - 'graph_send_ue_recv') + check_type( + out_size, + 'out_size', + (int, np.int32, np.int64, Variable), + 'graph_send_ue_recv', + ) if isinstance(out_size, Variable): - check_dtype(out_size.dtype, 'out_size', ['int32', 'int64'], - 'graph_send_ue_recv') + check_dtype( + out_size.dtype, 'out_size', ['int32', 'int64'], 'graph_send_ue_recv' + ) helper = LayerHelper("send_ue_recv", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - dst_count = helper.create_variable_for_type_inference(dtype="int32", - stop_gradient=True) + dst_count = helper.create_variable_for_type_inference( + dtype="int32", stop_gradient=True + ) inputs = {"X": x, "Y": y, "Src_index": src_index, "Dst_index": dst_index} attrs = {"message_op": message_op.upper(), "reduce_op": reduce_op.upper()} - get_out_size_tensor_inputs(inputs=inputs, - attrs=attrs, - out_size=out_size, - op_type='graph_send_ue_recv') - - helper.append_op(type="graph_send_ue_recv", - inputs=inputs, - outputs={ - "Out": out, - "Dst_count": dst_count - }, - attrs=attrs) + get_out_size_tensor_inputs( + inputs=inputs, + attrs=attrs, + out_size=out_size, + op_type='graph_send_ue_recv', + ) + + helper.append_op( + type="graph_send_ue_recv", + inputs=inputs, + outputs={"Out": out, "Dst_count": dst_count}, + attrs=attrs, + ) return out @@ -397,8 +451,9 @@ def send_uv(x, y, src_index, dst_index, message_op="add", name=None): if message_op not in ['add', 'sub', 'mul', 'div']: raise ValueError( - "message_op should be `add`, `sub`, `mul`, `div`, but received %s" % - message_op) + "message_op should be `add`, `sub`, `mul`, `div`, but received %s" + % message_op + ) x, y = reshape_lhs_rhs(x, y) @@ -407,38 +462,50 @@ def send_uv(x, y, src_index, dst_index, message_op="add", name=None): y = -y if message_op == 'div': message_op = 'mul' - y = 1. / (y + 1e-12) + y = 1.0 / (y + 1e-12) if in_dygraph_mode(): - return _C_ops.graph_send_uv(x, y, src_index, dst_index, - message_op.upper()) + return _C_ops.graph_send_uv( + x, y, src_index, dst_index, message_op.upper() + ) else: if _in_legacy_dygraph(): - return _legacy_C_ops.graph_send_uv(x, y, src_index, dst_index, - "message_op", message_op.upper()) + return _legacy_C_ops.graph_send_uv( + x, y, src_index, dst_index, "message_op", message_op.upper() + ) else: helper = LayerHelper("send_uv", **locals()) check_variable_and_dtype( - x, 'x', ['int32', 'int64', 'float32', 'float64', 'float16'], - 'graph_send_uv') + x, + 'x', + ['int32', 'int64', 'float32', 'float64', 'float16'], + 'graph_send_uv', + ) + check_variable_and_dtype( + y, + 'y', + ['int32', 'int64', 'float32', 'float64', 'float16'], + 'graph_send_uv', + ) + check_variable_and_dtype( + src_index, 'src_index', ['int32', 'int64'], 'graph_send_uv' + ) check_variable_and_dtype( - y, 'y', ['int32', 'int64', 'float32', 'float64', 'float16'], - 'graph_send_uv') - check_variable_and_dtype(src_index, 'src_index', ['int32', 'int64'], - 'graph_send_uv') - check_variable_and_dtype(dst_index, 'dst_index', ['int32', 'int64'], - 'graph_send_uv') + dst_index, 'dst_index', ['int32', 'int64'], 'graph_send_uv' + ) out = helper.create_variable_for_type_inference(dtype=x.dtype) inputs = { 'x': x, 'y': y, 'src_index': src_index, - 'dst_index': dst_index + 'dst_index': dst_index, } attrs = {'message_op': message_op.upper()} - helper.append_op(type="graph_send_uv", - inputs=inputs, - attrs=attrs, - outputs={"out": out}) + helper.append_op( + type="graph_send_uv", + inputs=inputs, + attrs=attrs, + outputs={"out": out}, + ) return out diff --git a/python/paddle/geometric/message_passing/utils.py b/python/paddle/geometric/message_passing/utils.py index 51c088522983b79dd9edbe4b90c866ea25eeaca1..12e2e52d6b454d8ece6fd3f1e2969b5207429458 100644 --- a/python/paddle/geometric/message_passing/utils.py +++ b/python/paddle/geometric/message_passing/utils.py @@ -44,9 +44,14 @@ def get_out_size_tensor_inputs(inputs, attrs, out_size, op_type): attrs['out_size'] = [out_size] elif isinstance(out_size, Variable): out_size.stop_gradient = True - check_dtype(out_size.dtype, 'out_size', ['int32', 'int64'], 'op_type', - '(When type of out_size in' + op_type + ' is Variable.)') - if (convert_dtype(out_size.dtype) == 'int64'): + check_dtype( + out_size.dtype, + 'out_size', + ['int32', 'int64'], + 'op_type', + '(When type of out_size in' + op_type + ' is Variable.)', + ) + if convert_dtype(out_size.dtype) == 'int64': out_size = cast(out_size, 'int32') inputs["Out_size"] = out_size else: @@ -69,16 +74,26 @@ def reshape_lhs_rhs(x, y): max_ndims = max(len(x.shape), len(y.shape)) x_pad_ndims = max_ndims - len(x.shape) y_pad_ndims = max_ndims - len(y.shape) - new_x_shape = [ - x_shape[0], - ] + [ - 1, - ] * x_pad_ndims + list(x_shape[1:]) - new_y_shape = [ - y_shape[0], - ] + [ - 1, - ] * y_pad_ndims + list(y_shape[1:]) + new_x_shape = ( + [ + x_shape[0], + ] + + [ + 1, + ] + * x_pad_ndims + + list(x_shape[1:]) + ) + new_y_shape = ( + [ + y_shape[0], + ] + + [ + 1, + ] + * y_pad_ndims + + list(y_shape[1:]) + ) x = paddle.reshape(x, new_x_shape) y = paddle.reshape(y, new_y_shape) diff --git a/python/paddle/geometric/reindex.py b/python/paddle/geometric/reindex.py index 3c4cfb964ed174f2fdf83b7b94636996616e855c..ade88222bbe5545100209c624ae97a3936567304 100644 --- a/python/paddle/geometric/reindex.py +++ b/python/paddle/geometric/reindex.py @@ -21,12 +21,9 @@ from paddle import _legacy_C_ops __all__ = [] -def reindex_graph(x, - neighbors, - count, - value_buffer=None, - index_buffer=None, - name=None): +def reindex_graph( + x, neighbors, count, value_buffer=None, index_buffer=None, name=None +): """ Reindex Graph API. @@ -93,58 +90,62 @@ def reindex_graph(x, # out_nodes: [0, 1, 2, 8, 9, 4, 7, 6] """ - use_buffer_hashtable = True if value_buffer is not None \ - and index_buffer is not None else False + use_buffer_hashtable = ( + True if value_buffer is not None and index_buffer is not None else False + ) if _non_static_mode(): - reindex_src, reindex_dst, out_nodes = \ - _legacy_C_ops.graph_reindex(x, neighbors, count, value_buffer, index_buffer, - "flag_buffer_hashtable", use_buffer_hashtable) + reindex_src, reindex_dst, out_nodes = _legacy_C_ops.graph_reindex( + x, + neighbors, + count, + value_buffer, + index_buffer, + "flag_buffer_hashtable", + use_buffer_hashtable, + ) return reindex_src, reindex_dst, out_nodes check_variable_and_dtype(x, "X", ("int32", "int64"), "graph_reindex") - check_variable_and_dtype(neighbors, "Neighbors", ("int32", "int64"), - "graph_reindex") + check_variable_and_dtype( + neighbors, "Neighbors", ("int32", "int64"), "graph_reindex" + ) check_variable_and_dtype(count, "Count", ("int32"), "graph_reindex") if use_buffer_hashtable: - check_variable_and_dtype(value_buffer, "HashTable_Value", ("int32"), - "graph_reindex") - check_variable_and_dtype(index_buffer, "HashTable_Index", ("int32"), - "graph_reindex") + check_variable_and_dtype( + value_buffer, "HashTable_Value", ("int32"), "graph_reindex" + ) + check_variable_and_dtype( + index_buffer, "HashTable_Index", ("int32"), "graph_reindex" + ) helper = LayerHelper("reindex_graph", **locals()) reindex_src = helper.create_variable_for_type_inference(dtype=x.dtype) reindex_dst = helper.create_variable_for_type_inference(dtype=x.dtype) out_nodes = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type="graph_reindex", - inputs={ - "X": - x, - "Neighbors": - neighbors, - "Count": - count, - "HashTable_Value": - value_buffer if use_buffer_hashtable else None, - "HashTable_Index": - index_buffer if use_buffer_hashtable else None, - }, - outputs={ - "Reindex_Src": reindex_src, - "Reindex_Dst": reindex_dst, - "Out_Nodes": out_nodes - }, - attrs={"flag_buffer_hashtable": use_buffer_hashtable}) + helper.append_op( + type="graph_reindex", + inputs={ + "X": x, + "Neighbors": neighbors, + "Count": count, + "HashTable_Value": value_buffer if use_buffer_hashtable else None, + "HashTable_Index": index_buffer if use_buffer_hashtable else None, + }, + outputs={ + "Reindex_Src": reindex_src, + "Reindex_Dst": reindex_dst, + "Out_Nodes": out_nodes, + }, + attrs={"flag_buffer_hashtable": use_buffer_hashtable}, + ) return reindex_src, reindex_dst, out_nodes -def reindex_heter_graph(x, - neighbors, - count, - value_buffer=None, - index_buffer=None, - name=None): +def reindex_heter_graph( + x, neighbors, count, value_buffer=None, index_buffer=None, name=None +): """ Reindex HeterGraph API. @@ -220,15 +221,22 @@ def reindex_heter_graph(x, # out_nodes: [0, 1, 2, 8, 9, 4, 7, 6, 3, 5] """ - use_buffer_hashtable = True if value_buffer is not None \ - and index_buffer is not None else False + use_buffer_hashtable = ( + True if value_buffer is not None and index_buffer is not None else False + ) if _non_static_mode(): neighbors = paddle.concat(neighbors, axis=0) count = paddle.concat(count, axis=0) - reindex_src, reindex_dst, out_nodes = \ - _legacy_C_ops.graph_reindex(x, neighbors, count, value_buffer, index_buffer, - "flag_buffer_hashtable", use_buffer_hashtable) + reindex_src, reindex_dst, out_nodes = _legacy_C_ops.graph_reindex( + x, + neighbors, + count, + value_buffer, + index_buffer, + "flag_buffer_hashtable", + use_buffer_hashtable, + ) return reindex_src, reindex_dst, out_nodes if isinstance(neighbors, Variable): @@ -240,15 +248,18 @@ def reindex_heter_graph(x, count = paddle.concat(count, axis=0) check_variable_and_dtype(x, "X", ("int32", "int64"), "heter_graph_reindex") - check_variable_and_dtype(neighbors, "Neighbors", ("int32", "int64"), - "graph_reindex") + check_variable_and_dtype( + neighbors, "Neighbors", ("int32", "int64"), "graph_reindex" + ) check_variable_and_dtype(count, "Count", ("int32"), "graph_reindex") if use_buffer_hashtable: - check_variable_and_dtype(value_buffer, "HashTable_Value", ("int32"), - "graph_reindex") - check_variable_and_dtype(index_buffer, "HashTable_Index", ("int32"), - "graph_reindex") + check_variable_and_dtype( + value_buffer, "HashTable_Value", ("int32"), "graph_reindex" + ) + check_variable_and_dtype( + index_buffer, "HashTable_Index", ("int32"), "graph_reindex" + ) helper = LayerHelper("reindex_heter_graph", **locals()) reindex_src = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -256,23 +267,20 @@ def reindex_heter_graph(x, out_nodes = helper.create_variable_for_type_inference(dtype=x.dtype) neighbors = paddle.concat(neighbors, axis=0) count = paddle.concat(count, axis=0) - helper.append_op(type="graph_reindex", - inputs={ - "X": - x, - "Neighbors": - neighbors, - "Count": - count, - "HashTable_Value": - value_buffer if use_buffer_hashtable else None, - "HashTable_Index": - index_buffer if use_buffer_hashtable else None, - }, - outputs={ - "Reindex_Src": reindex_src, - "Reindex_Dst": reindex_dst, - "Out_Nodes": out_nodes - }, - attrs={"flag_buffer_hashtable": use_buffer_hashtable}) + helper.append_op( + type="graph_reindex", + inputs={ + "X": x, + "Neighbors": neighbors, + "Count": count, + "HashTable_Value": value_buffer if use_buffer_hashtable else None, + "HashTable_Index": index_buffer if use_buffer_hashtable else None, + }, + outputs={ + "Reindex_Src": reindex_src, + "Reindex_Dst": reindex_dst, + "Out_Nodes": out_nodes, + }, + attrs={"flag_buffer_hashtable": use_buffer_hashtable}, + ) return reindex_src, reindex_dst, out_nodes diff --git a/python/paddle/geometric/sampling/neighbors.py b/python/paddle/geometric/sampling/neighbors.py index 63ec44b4f39d35e405b80f91eff13def55601d98..1353bd5814fa8ff4643810400f18cfe5fb302ef4 100644 --- a/python/paddle/geometric/sampling/neighbors.py +++ b/python/paddle/geometric/sampling/neighbors.py @@ -20,14 +20,16 @@ from paddle import _legacy_C_ops __all__ = [] -def sample_neighbors(row, - colptr, - input_nodes, - sample_size=-1, - eids=None, - return_eids=False, - perm_buffer=None, - name=None): +def sample_neighbors( + row, + colptr, + input_nodes, + sample_size=-1, + eids=None, + return_eids=False, + perm_buffer=None, + name=None, +): """ Graph Sample Neighbors API. @@ -90,54 +92,78 @@ def sample_neighbors(row, if return_eids: if eids is None: raise ValueError( - "`eids` should not be None if `return_eids` is True.") + "`eids` should not be None if `return_eids` is True." + ) use_perm_buffer = True if perm_buffer is not None else False if _non_static_mode(): - out_neighbors, out_count, out_eids = _legacy_C_ops.graph_sample_neighbors( - row, colptr, input_nodes, eids, perm_buffer, "sample_size", - sample_size, "return_eids", return_eids, "flag_perm_buffer", - use_perm_buffer) + ( + out_neighbors, + out_count, + out_eids, + ) = _legacy_C_ops.graph_sample_neighbors( + row, + colptr, + input_nodes, + eids, + perm_buffer, + "sample_size", + sample_size, + "return_eids", + return_eids, + "flag_perm_buffer", + use_perm_buffer, + ) if return_eids: return out_neighbors, out_count, out_eids return out_neighbors, out_count - check_variable_and_dtype(row, "Row", ("int32", "int64"), - "graph_sample_neighbors") - check_variable_and_dtype(colptr, "Col_Ptr", ("int32", "int64"), - "graph_sample_neighbors") - check_variable_and_dtype(input_nodes, "X", ("int32", "int64"), - "graph_sample_neighbors") + check_variable_and_dtype( + row, "Row", ("int32", "int64"), "graph_sample_neighbors" + ) + check_variable_and_dtype( + colptr, "Col_Ptr", ("int32", "int64"), "graph_sample_neighbors" + ) + check_variable_and_dtype( + input_nodes, "X", ("int32", "int64"), "graph_sample_neighbors" + ) if return_eids: - check_variable_and_dtype(eids, "Eids", ("int32", "int64"), - "graph_sample_neighbors") + check_variable_and_dtype( + eids, "Eids", ("int32", "int64"), "graph_sample_neighbors" + ) if use_perm_buffer: - check_variable_and_dtype(perm_buffer, "Perm_Buffer", ("int32", "int64"), - "graph_sample_neighbors") + check_variable_and_dtype( + perm_buffer, + "Perm_Buffer", + ("int32", "int64"), + "graph_sample_neighbors", + ) helper = LayerHelper("sample_neighbors", **locals()) out_neighbors = helper.create_variable_for_type_inference(dtype=row.dtype) out_count = helper.create_variable_for_type_inference(dtype=row.dtype) out_eids = helper.create_variable_for_type_inference(dtype=row.dtype) - helper.append_op(type="graph_sample_neighbors", - inputs={ - "Row": row, - "Col_Ptr": colptr, - "X": input_nodes, - "Eids": eids if return_eids else None, - "Perm_Buffer": perm_buffer if use_perm_buffer else None - }, - outputs={ - "Out": out_neighbors, - "Out_Count": out_count, - "Out_Eids": out_eids - }, - attrs={ - "sample_size": sample_size, - "return_eids": return_eids, - "flag_perm_buffer": use_perm_buffer - }) + helper.append_op( + type="graph_sample_neighbors", + inputs={ + "Row": row, + "Col_Ptr": colptr, + "X": input_nodes, + "Eids": eids if return_eids else None, + "Perm_Buffer": perm_buffer if use_perm_buffer else None, + }, + outputs={ + "Out": out_neighbors, + "Out_Count": out_count, + "Out_Eids": out_eids, + }, + attrs={ + "sample_size": sample_size, + "return_eids": return_eids, + "flag_perm_buffer": use_perm_buffer, + }, + ) if return_eids: return out_neighbors, out_count, out_eids return out_neighbors, out_count diff --git a/python/paddle/hapi/callbacks.py b/python/paddle/hapi/callbacks.py index 886a81801595806a03d8940517378050c18e3717..af53b181436ae573c00b5e86143ddb01bb8014db 100644 --- a/python/paddle/hapi/callbacks.py +++ b/python/paddle/hapi/callbacks.py @@ -28,17 +28,19 @@ from .progressbar import ProgressBar __all__ = [] -def config_callbacks(callbacks=None, - model=None, - batch_size=None, - epochs=None, - steps=None, - log_freq=2, - verbose=2, - save_freq=1, - save_dir=None, - metrics=None, - mode='train'): +def config_callbacks( + callbacks=None, + model=None, + batch_size=None, + epochs=None, + steps=None, + log_freq=2, + verbose=2, + save_freq=1, + save_dir=None, + metrics=None, + mode='train', +): cbks = callbacks or [] cbks = cbks if isinstance(cbks, (list, tuple)) else [cbks] if not any(isinstance(k, ProgBarLogger) for k in cbks) and verbose: @@ -68,7 +70,6 @@ def config_callbacks(callbacks=None, class CallbackList(object): - def __init__(self, callbacks=None): # copy self.callbacks = [c for c in callbacks] @@ -95,8 +96,11 @@ class CallbackList(object): func(*args) def _check_mode(self, mode): - assert mode in ['train', 'eval', 'predict'], \ - 'mode should be train, eval or predict' + assert mode in [ + 'train', + 'eval', + 'predict', + ], 'mode should be train, eval or predict' def on_begin(self, mode, logs=None): self._check_mode(mode) @@ -167,8 +171,7 @@ class Callback(object): self.params = params def set_model(self, model): - """model is instance of paddle.Model. - """ + """model is instance of paddle.Model.""" self.model = model def on_train_begin(self, logs=None): @@ -391,31 +394,39 @@ class ProgBarLogger(Callback): cnt = timer['count'] if timer['count'] > 0 else 1.0 samples = timer['samples'] if timer['samples'] > 0 else 1.0 values.append( - ('avg_reader_cost', "%.5f sec" % (timer['data_time'] / cnt))) + ('avg_reader_cost', "%.5f sec" % (timer['data_time'] / cnt)) + ) values.append( - ('avg_batch_cost', "%.5f sec" % (timer['batch_time'] / cnt))) + ('avg_batch_cost', "%.5f sec" % (timer['batch_time'] / cnt)) + ) values.append( - ('ips', "%.5f samples/sec" % - (samples / (timer['data_time'] + timer['batch_time'])))) + ( + 'ips', + "%.5f samples/sec" + % (samples / (timer['data_time'] + timer['batch_time'])), + ) + ) timer['count'] = 0 timer['samples'] = 0 - timer['data_time'] = 0. - timer['batch_time'] = 0. + timer['data_time'] = 0.0 + timer['batch_time'] = 0.0 progbar.update(steps, values) def on_train_batch_begin(self, step, logs=None): self._train_timer['batch_data_end_time'] = time.time() self._train_timer['data_time'] += ( - self._train_timer['batch_data_end_time'] - - self._train_timer['batch_start_time']) + self._train_timer['batch_data_end_time'] + - self._train_timer['batch_start_time'] + ) def on_train_batch_end(self, step, logs=None): logs = logs or {} self.train_step += 1 self._train_timer['batch_time'] += ( - time.time() - self._train_timer['batch_data_end_time']) + time.time() - self._train_timer['batch_data_end_time'] + ) self._train_timer['count'] += 1 samples = logs.get('batch_size', 1) self._train_timer['samples'] += samples @@ -442,8 +453,9 @@ class ProgBarLogger(Callback): 'samples': 0, } - self.eval_progbar = ProgressBar(num=self.eval_steps, - verbose=self.verbose) + self.eval_progbar = ProgressBar( + num=self.eval_steps, verbose=self.verbose + ) if self._is_print(): print('Eval begin...') @@ -452,8 +464,9 @@ class ProgBarLogger(Callback): def on_eval_batch_begin(self, step, logs=None): self._eval_timer['batch_data_end_time'] = time.time() self._eval_timer['data_time'] += ( - self._eval_timer['batch_data_end_time'] - - self._eval_timer['batch_start_time']) + self._eval_timer['batch_data_end_time'] + - self._eval_timer['batch_start_time'] + ) def on_eval_batch_end(self, step, logs=None): logs = logs or {} @@ -462,7 +475,8 @@ class ProgBarLogger(Callback): self.evaled_samples += samples self._eval_timer['batch_time'] += ( - time.time() - self._eval_timer['batch_data_end_time']) + time.time() - self._eval_timer['batch_data_end_time'] + ) self._eval_timer['count'] += 1 samples = logs.get('batch_size', 1) self._eval_timer['samples'] += samples @@ -486,8 +500,9 @@ class ProgBarLogger(Callback): 'samples': 0, } - self.test_progbar = ProgressBar(num=self.test_steps, - verbose=self.verbose) + self.test_progbar = ProgressBar( + num=self.test_steps, verbose=self.verbose + ) if self._is_print(): print('Predict begin...') @@ -496,8 +511,9 @@ class ProgBarLogger(Callback): def on_predict_batch_begin(self, step, logs=None): self._test_timer['batch_data_end_time'] = time.time() self._test_timer['data_time'] += ( - self._test_timer['batch_data_end_time'] - - self._test_timer['batch_start_time']) + self._test_timer['batch_data_end_time'] + - self._test_timer['batch_start_time'] + ) def on_predict_batch_end(self, step, logs=None): logs = logs or {} @@ -506,7 +522,8 @@ class ProgBarLogger(Callback): self.tested_samples += samples self._test_timer['batch_time'] += ( - time.time() - self._test_timer['batch_data_end_time']) + time.time() - self._test_timer['batch_data_end_time'] + ) self._test_timer['count'] += 1 samples = logs.get('batch_size', 1) self._test_timer['samples'] += samples @@ -665,25 +682,34 @@ class LRScheduler(Callback): def __init__(self, by_step=True, by_epoch=False): if by_step and by_epoch: raise ValueError( - "by_step option is mutually exclusive with by_epoch") + "by_step option is mutually exclusive with by_epoch" + ) self.by_step = by_step self.by_epoch = by_epoch def on_epoch_end(self, epoch, logs=None): if self.by_epoch: - if self.model._optimizer and \ - hasattr(self.model._optimizer, '_learning_rate') and \ - isinstance(self.model._optimizer._learning_rate, - paddle.optimizer.lr.LRScheduler): + if ( + self.model._optimizer + and hasattr(self.model._optimizer, '_learning_rate') + and isinstance( + self.model._optimizer._learning_rate, + paddle.optimizer.lr.LRScheduler, + ) + ): self.model._optimizer._learning_rate.step() def on_train_batch_end(self, step, logs=None): if self.by_step: - if self.model._optimizer and \ - hasattr(self.model._optimizer, '_learning_rate') and \ - isinstance(self.model._optimizer._learning_rate, - paddle.optimizer.lr.LRScheduler): + if ( + self.model._optimizer + and hasattr(self.model._optimizer, '_learning_rate') + and isinstance( + self.model._optimizer._learning_rate, + paddle.optimizer.lr.LRScheduler, + ) + ): self.model._optimizer._learning_rate.step() @@ -761,14 +787,16 @@ class EarlyStopping(Callback): callbacks=[callbacks]) """ - def __init__(self, - monitor='loss', - mode='auto', - patience=0, - verbose=1, - min_delta=0, - baseline=None, - save_best_model=True): + def __init__( + self, + monitor='loss', + mode='auto', + patience=0, + verbose=1, + min_delta=0, + baseline=None, + save_best_model=True, + ): super(EarlyStopping, self).__init__() self.monitor = monitor self.patience = patience @@ -782,8 +810,10 @@ class EarlyStopping(Callback): # The value of `save_dir` is set in function `config_callbacks` self.save_dir = None if mode not in ['auto', 'min', 'max']: - warnings.warn('EarlyStopping mode %s is unknown, ' - 'fallback to auto mode.' % mode) + warnings.warn( + 'EarlyStopping mode %s is unknown, ' + 'fallback to auto mode.' % mode + ) mode = 'auto' if mode == 'min': self.monitor_op = np.less @@ -812,7 +842,8 @@ class EarlyStopping(Callback): def on_eval_end(self, logs=None): if logs is None or self.monitor not in logs: warnings.warn( - 'Monitor of EarlyStopping should be loss or metric name.') + 'Monitor of EarlyStopping should be loss or metric name.' + ) return current = logs[self.monitor] if isinstance(current, (list, tuple)): @@ -835,9 +866,14 @@ class EarlyStopping(Callback): if self.verbose > 0: print('Epoch %d: Early stopping.' % (self.stopped_epoch + 1)) if self.save_best_model and self.save_dir is not None: - print('Best checkpoint has been saved at %s' % - (os.path.abspath( - os.path.join(self.save_dir, 'best_model')))) + print( + 'Best checkpoint has been saved at %s' + % ( + os.path.abspath( + os.path.join(self.save_dir, 'best_model') + ) + ) + ) self.stopped_epoch += 1 @@ -926,9 +962,9 @@ class VisualDL(Callback): else: continue - self.writer.add_scalar(tag=temp_tag, - step=total_step, - value=temp_value) + self.writer.add_scalar( + tag=temp_tag, step=total_step, value=temp_value + ) def on_train_batch_end(self, step, logs=None): logs = logs or {} @@ -1022,21 +1058,24 @@ class ReduceLROnPlateau(Callback): """ - def __init__(self, - monitor='loss', - factor=0.1, - patience=10, - verbose=1, - mode='auto', - min_delta=1e-4, - cooldown=0, - min_lr=0): + def __init__( + self, + monitor='loss', + factor=0.1, + patience=10, + verbose=1, + mode='auto', + min_delta=1e-4, + cooldown=0, + min_lr=0, + ): super(ReduceLROnPlateau, self).__init__() self.monitor = monitor if factor >= 1.0: - raise ValueError('ReduceLROnPlateau ' - 'does not support a factor >= 1.0.') + raise ValueError( + 'ReduceLROnPlateau ' 'does not support a factor >= 1.0.' + ) self.factor = factor self.min_lr = min_lr @@ -1053,14 +1092,16 @@ class ReduceLROnPlateau(Callback): self._reset() def _reset(self): - """Resets wait counter and cooldown counter. - """ + """Resets wait counter and cooldown counter.""" if self.mode not in ['auto', 'min', 'max']: - warnings.warn('Learning rate reduction mode %s is unknown, ' - 'fallback to auto mode.' % self.mode) + warnings.warn( + 'Learning rate reduction mode %s is unknown, ' + 'fallback to auto mode.' % self.mode + ) self.mode = 'auto' - if (self.mode == 'min' - or (self.mode == 'auto' and 'acc' not in self.monitor)): + if self.mode == 'min' or ( + self.mode == 'auto' and 'acc' not in self.monitor + ): self.monitor_op = lambda a, b: np.less(a, b - self.min_delta) self.best = np.Inf else: @@ -1075,7 +1116,8 @@ class ReduceLROnPlateau(Callback): def on_eval_end(self, logs=None): if logs is None or self.monitor not in logs: warnings.warn( - 'Monitor of ReduceLROnPlateau should be loss or metric name.') + 'Monitor of ReduceLROnPlateau should be loss or metric name.' + ) return else: try: @@ -1083,12 +1125,16 @@ class ReduceLROnPlateau(Callback): if not isinstance(lr, float): warnings.warn( 'Expected learning_rate be float, bug got {}.'.format( - type(lr))) + type(lr) + ) + ) return except Exception as e: warnings.warn( - 'There are something wrong when get learning_rate from optimizer: {}.' - .format(e)) + 'There are something wrong when get learning_rate from optimizer: {}.'.format( + e + ) + ) return current = logs[self.monitor] @@ -1115,8 +1161,10 @@ class ReduceLROnPlateau(Callback): new_lr = max(new_lr, self.min_lr) self.model._optimizer._learning_rate = new_lr if self.verbose > 0 and ParallelEnv().local_rank == 0: - print('\nEpoch %d: ReduceLROnPlateau reducing learning ' - 'rate to %s.' % (self.epoch + 1, new_lr)) + print( + '\nEpoch %d: ReduceLROnPlateau reducing learning ' + 'rate to %s.' % (self.epoch + 1, new_lr) + ) self.cooldown_counter = self.cooldown self.wait = 0 self.epoch += 1 diff --git a/python/paddle/hapi/dynamic_flops.py b/python/paddle/hapi/dynamic_flops.py index 8e2bed5bc747f2fd73015327fb0a05b73b71461b..50284e93e3b90b38a7275ac5e39b8cb5afab376f 100644 --- a/python/paddle/hapi/dynamic_flops.py +++ b/python/paddle/hapi/dynamic_flops.py @@ -17,7 +17,9 @@ import warnings import paddle.nn as nn import numpy as np from .static_flops import static_flops, Table -from paddle.fluid.dygraph.dygraph_to_static.program_translator import unwrap_decorators +from paddle.fluid.dygraph.dygraph_to_static.program_translator import ( + unwrap_decorators, +) __all__ = [] @@ -106,10 +108,9 @@ def flops(net, input_size, custom_ops=None, print_detail=False): _, net.forward = unwrap_decorators(net.forward) inputs = paddle.randn(input_size) - return dynamic_flops(net, - inputs=inputs, - custom_ops=custom_ops, - print_detail=print_detail) + return dynamic_flops( + net, inputs=inputs, custom_ops=custom_ops, print_detail=print_detail + ) elif isinstance(net, paddle.static.Program): return static_flops(net, print_detail=print_detail) else: @@ -123,8 +124,9 @@ def count_convNd(m, x, y): x = x[0] kernel_ops = np.product(m.weight.shape[2:]) bias_ops = 1 if m.bias is not None else 0 - total_ops = int( - y.numel()) * (x.shape[1] / m._groups * kernel_ops + bias_ops) + total_ops = int(y.numel()) * ( + x.shape[1] / m._groups * kernel_ops + bias_ops + ) m.total_ops += abs(int(total_ops)) @@ -205,7 +207,7 @@ register_hooks = { nn.AvgPool3D: count_avgpool, nn.AdaptiveAvgPool1D: count_adap_avgpool, nn.AdaptiveAvgPool2D: count_adap_avgpool, - nn.AdaptiveAvgPool3D: count_adap_avgpool + nn.AdaptiveAvgPool3D: count_adap_avgpool, } @@ -227,7 +229,8 @@ def dynamic_flops(model, inputs, custom_ops=None, print_detail=False): flops_fn = custom_ops[m_type] if m_type not in types_collection: print( - "Customize Function has been applied to {}".format(m_type)) + "Customize Function has been applied to {}".format(m_type) + ) elif m_type in register_hooks: flops_fn = register_hooks[m_type] if m_type not in types_collection: @@ -235,8 +238,10 @@ def dynamic_flops(model, inputs, custom_ops=None, print_detail=False): else: if m_type not in types_collection: print( - "Cannot find suitable count function for {}. Treat it as zero FLOPs." - .format(m_type)) + "Cannot find suitable count function for {}. Treat it as zero FLOPs.".format( + m_type + ) + ) if flops_fn is not None: flops_handler = m.register_forward_post_hook(flops_fn) @@ -260,8 +265,12 @@ def dynamic_flops(model, inputs, custom_ops=None, print_detail=False): for m in model.sublayers(): if len(list(m.children())) > 0: continue - if {'total_ops', 'total_params', 'input_shape', - 'output_shape'}.issubset(set(list(m._buffers.keys()))): + if { + 'total_ops', + 'total_params', + 'input_shape', + 'output_shape', + }.issubset(set(list(m._buffers.keys()))): total_ops += m.total_ops total_params += m.total_params @@ -271,26 +280,36 @@ def dynamic_flops(model, inputs, custom_ops=None, print_detail=False): handler.remove() table = Table( - ["Layer Name", "Input Shape", "Output Shape", "Params", "Flops"]) + ["Layer Name", "Input Shape", "Output Shape", "Params", "Flops"] + ) for n, m in model.named_sublayers(): if len(list(m.children())) > 0: continue - if {'total_ops', 'total_params', 'input_shape', - 'output_shape'}.issubset(set(list(m._buffers.keys()))): - table.add_row([ - m.full_name(), - list(m.input_shape.numpy()), - list(m.output_shape.numpy()), - int(m.total_params), - int(m.total_ops) - ]) + if { + 'total_ops', + 'total_params', + 'input_shape', + 'output_shape', + }.issubset(set(list(m._buffers.keys()))): + table.add_row( + [ + m.full_name(), + list(m.input_shape.numpy()), + list(m.output_shape.numpy()), + int(m.total_params), + int(m.total_ops), + ] + ) m._buffers.pop("total_ops") m._buffers.pop("total_params") m._buffers.pop('input_shape') m._buffers.pop('output_shape') if print_detail: table.print_table() - print('Total Flops: {} Total Params: {}'.format(int(total_ops), - int(total_params))) + print( + 'Total Flops: {} Total Params: {}'.format( + int(total_ops), int(total_params) + ) + ) return int(total_ops) diff --git a/python/paddle/hapi/hub.py b/python/paddle/hapi/hub.py index ccf03aa2db097defa0ec6328a990842b3b677aa0..3d5cb3027c49d35896fa946a1192997d16d3dd1a 100644 --- a/python/paddle/hapi/hub.py +++ b/python/paddle/hapi/hub.py @@ -53,10 +53,12 @@ def _import_module(name, repo_dir): def _git_archive_link(repo_owner, repo_name, branch, source): if source == 'github': return 'https://github.com/{}/{}/archive/{}.zip'.format( - repo_owner, repo_name, branch) + repo_owner, repo_name, branch + ) elif source == 'gitee': return 'https://gitee.com/{}/{}/repository/archive/{}.zip'.format( - repo_owner, repo_name, branch) + repo_owner, repo_name, branch + ) def _parse_repo_info(repo, source): @@ -94,8 +96,9 @@ def _get_cache_or_reload(repo, force_reload, verbose=True, source='github'): # We don't know the repo name before downloading the zip file # and inspect name from it. # To check if cached repo exists, we need to normalize folder names. - repo_dir = os.path.join(hub_dir, - '_'.join([repo_owner, repo_name, normalized_br])) + repo_dir = os.path.join( + hub_dir, '_'.join([repo_owner, repo_name, normalized_br]) + ) use_cache = (not force_reload) and os.path.exists(repo_dir) @@ -113,7 +116,8 @@ def _get_cache_or_reload(repo, force_reload, verbose=True, source='github'): hub_dir, check_exist=not force_reload, decompress=False, - method=('wget' if source == 'gitee' else 'get')) + method=('wget' if source == 'gitee' else 'get'), + ) shutil.move(fpath, cached_file) with zipfile.ZipFile(cached_file) as cached_zipfile: @@ -132,11 +136,11 @@ def _get_cache_or_reload(repo, force_reload, verbose=True, source='github'): def _load_entry_from_hubconf(m, name): - '''load entry from hubconf - ''' + '''load entry from hubconf''' if not isinstance(name, str): raise ValueError( - 'Invalid input: model should be a str of function name') + 'Invalid input: model should be a str of function name' + ) func = getattr(m, name, None) @@ -162,8 +166,9 @@ def _check_dependencies(m): pkg for pkg in dependencies if not _check_module_exists(pkg) ] if len(missing_deps): - raise RuntimeError('Missing dependencies: {}'.format( - ', '.join(missing_deps))) + raise RuntimeError( + 'Missing dependencies: {}'.format(', '.join(missing_deps)) + ) def list(repo_dir, source='github', force_reload=False): @@ -193,19 +198,21 @@ def list(repo_dir, source='github', force_reload=False): """ if source not in ('github', 'gitee', 'local'): raise ValueError( - 'Unknown source: "{}". Allowed values: "github" | "gitee" | "local".' - .format(source)) + 'Unknown source: "{}". Allowed values: "github" | "gitee" | "local".'.format( + source + ) + ) if source in ('github', 'gitee'): - repo_dir = _get_cache_or_reload(repo_dir, - force_reload, - True, - source=source) + repo_dir = _get_cache_or_reload( + repo_dir, force_reload, True, source=source + ) hub_module = _import_module(MODULE_HUBCONF.split('.')[0], repo_dir) entrypoints = [ - f for f in dir(hub_module) + f + for f in dir(hub_module) if callable(getattr(hub_module, f)) and not f.startswith('_') ] @@ -240,14 +247,15 @@ def help(repo_dir, model, source='github', force_reload=False): """ if source not in ('github', 'gitee', 'local'): raise ValueError( - 'Unknown source: "{}". Allowed values: "github" | "gitee" | "local".' - .format(source)) + 'Unknown source: "{}". Allowed values: "github" | "gitee" | "local".'.format( + source + ) + ) if source in ('github', 'gitee'): - repo_dir = _get_cache_or_reload(repo_dir, - force_reload, - True, - source=source) + repo_dir = _get_cache_or_reload( + repo_dir, force_reload, True, source=source + ) hub_module = _import_module(MODULE_HUBCONF.split('.')[0], repo_dir) @@ -283,14 +291,15 @@ def load(repo_dir, model, source='github', force_reload=False, **kwargs): """ if source not in ('github', 'gitee', 'local'): raise ValueError( - 'Unknown source: "{}". Allowed values: "github" | "gitee" | "local".' - .format(source)) + 'Unknown source: "{}". Allowed values: "github" | "gitee" | "local".'.format( + source + ) + ) if source in ('github', 'gitee'): - repo_dir = _get_cache_or_reload(repo_dir, - force_reload, - True, - source=source) + repo_dir = _get_cache_or_reload( + repo_dir, force_reload, True, source=source + ) hub_module = _import_module(MODULE_HUBCONF.split('.')[0], repo_dir) diff --git a/python/paddle/hapi/model.py b/python/paddle/hapi/model.py index 788043477a2598ed46587ef87b04df90d4a99e77..a790375a742dfc4724be4ccb0e2fe9ae1fea9dfb 100644 --- a/python/paddle/hapi/model.py +++ b/python/paddle/hapi/model.py @@ -64,8 +64,9 @@ def to_list(value): def to_numpy(var): - assert isinstance(var, (Variable, fluid.core.VarBase, - fluid.core.eager.Tensor)), "not a variable" + assert isinstance( + var, (Variable, fluid.core.VarBase, fluid.core.eager.Tensor) + ), "not a variable" if isinstance(var, (fluid.core.VarBase, fluid.core.eager.Tensor)): return var.numpy() t = global_scope().find_var(var.name).get_tensor() @@ -100,10 +101,9 @@ def extract_args(func): def _all_gather(x, nranks, ring_id=0, use_calc_stream=True): - return collective._c_allgather(x, - nranks, - ring_id=ring_id, - use_calc_stream=use_calc_stream) + return collective._c_allgather( + x, nranks, ring_id=ring_id, use_calc_stream=use_calc_stream + ) def wait_server_ready(endpoints): @@ -114,7 +114,8 @@ def wait_server_ready(endpoints): for ep in endpoints: ip_port = ep.split(":") with contextlib.closing( - socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock: + socket.socket(socket.AF_INET, socket.SOCK_STREAM) + ) as sock: sock.settimeout(2) result = sock.connect_ex((ip_port[0], int(ip_port[1]))) if result != 0: @@ -126,8 +127,9 @@ def wait_server_ready(endpoints): break -def init_communicator(program, rank, nranks, wait_port, current_endpoint, - endpoints): +def init_communicator( + program, rank, nranks, wait_port, current_endpoint, endpoints +): if nranks < 2: return other_endpoints = endpoints[:] @@ -139,53 +141,66 @@ def init_communicator(program, rank, nranks, wait_port, current_endpoint, nccl_id_var = block.create_var( name=fluid.unique_name.generate('nccl_id'), persistable=True, - type=fluid.core.VarDesc.VarType.RAW) - - block.append_op(type='c_gen_nccl_id', - inputs={}, - outputs={'Out': nccl_id_var}, - attrs={ - 'rank': rank, - 'endpoint': current_endpoint, - 'other_endpoints': other_endpoints - }) - - block.append_op(type='c_comm_init', - inputs={'X': nccl_id_var}, - outputs={}, - attrs={ - 'nranks': nranks, - 'rank': rank, - 'ring_id': 0, - }) + type=fluid.core.VarDesc.VarType.RAW, + ) + + block.append_op( + type='c_gen_nccl_id', + inputs={}, + outputs={'Out': nccl_id_var}, + attrs={ + 'rank': rank, + 'endpoint': current_endpoint, + 'other_endpoints': other_endpoints, + }, + ) + + block.append_op( + type='c_comm_init', + inputs={'X': nccl_id_var}, + outputs={}, + attrs={ + 'nranks': nranks, + 'rank': rank, + 'ring_id': 0, + }, + ) elif core.is_compiled_with_npu(): hccl_id_var = block.create_var( name=fluid.unique_name.generate('hccl_id'), persistable=True, - type=core.VarDesc.VarType.RAW) - block.append_op(type='c_gen_hccl_id', - inputs={}, - outputs={'Out': hccl_id_var}, - attrs={ - 'rank': rank, - 'endpoint': current_endpoint, - 'other_endpoints': other_endpoints - }) - block.append_op(type='c_comm_init_hccl', - inputs={'X': hccl_id_var}, - outputs={}, - attrs={ - 'rank': rank, - 'ring_id': 0, - 'device_id': int(os.getenv("FLAGS_selected_npus")), - 'rank_ids': nranks - }) + type=core.VarDesc.VarType.RAW, + ) + block.append_op( + type='c_gen_hccl_id', + inputs={}, + outputs={'Out': hccl_id_var}, + attrs={ + 'rank': rank, + 'endpoint': current_endpoint, + 'other_endpoints': other_endpoints, + }, + ) + block.append_op( + type='c_comm_init_hccl', + inputs={'X': hccl_id_var}, + outputs={}, + attrs={ + 'rank': rank, + 'ring_id': 0, + 'device_id': int(os.getenv("FLAGS_selected_npus")), + 'rank_ids': nranks, + }, + ) def prepare_distributed_context(place=None): if place is None: - place = fluid.CUDAPlace(ParallelEnv().dev_id) if ParallelEnv().nranks > 1 \ + place = ( + fluid.CUDAPlace(ParallelEnv().dev_id) + if ParallelEnv().nranks > 1 else fluid.CUDAPlace(0) + ) place = _get_paddle_place(place) strategy = fluid.dygraph.parallel.ParallelStrategy() @@ -203,9 +218,14 @@ def prepare_distributed_context(place=None): def _init_context(): communicator_prog = fluid.Program() - init_communicator(communicator_prog, strategy.local_rank, - strategy.nranks, True, strategy.current_endpoint, - strategy.trainer_endpoints) + init_communicator( + communicator_prog, + strategy.local_rank, + strategy.nranks, + True, + strategy.current_endpoint, + strategy.trainer_endpoints, + ) exe = fluid.Executor(place) exe.run(communicator_prog) @@ -215,7 +235,7 @@ def prepare_distributed_context(place=None): fluid.enable_dygraph(place) else: - assert ("Only support CUDAPlace for now.") + assert "Only support CUDAPlace for now." _parallel_context_initialized = True return strategy @@ -264,7 +284,7 @@ class StaticGraphAdapter(object): 'eval_total': 0, 'test_total': 0, 'eval_batch': 0, - 'test_batch': 0 + 'test_batch': 0, } self._nranks = ParallelEnv().nranks @@ -284,10 +304,13 @@ class StaticGraphAdapter(object): self.model.mode = value def train_batch(self, inputs, labels=None, update=True): - assert self.model._optimizer, \ - "model not ready, please call `model.prepare()` first" + assert ( + self.model._optimizer + ), "model not ready, please call `model.prepare()` first" self.mode = 'train' - assert update is True, "Does not support `update == False` in static mode by now." + assert ( + update is True + ), "Does not support `update == False` in static mode by now." return self._run(inputs, labels) def eval_batch(self, inputs, labels=None): @@ -302,7 +325,6 @@ class StaticGraphAdapter(object): return self.model.network.parameters(*args, **kwargs) def save(self, path): - def _save(state, path): if not state: return @@ -326,8 +348,7 @@ class StaticGraphAdapter(object): # XXX `optimizer.state_dict()` only work in dygraph mode optim_path = path + ".pdopt" optim = { - p.name: p - for p in filter(is_belong_to_optimizer, prog.list_vars()) + p.name: p for p in filter(is_belong_to_optimizer, prog.list_vars()) } if not optim: return @@ -343,8 +364,10 @@ class StaticGraphAdapter(object): # restore parameter states fluid.core._create_loaded_parameter( - [param for param, state in param_state_pairs], global_scope(), - executor) + [param for param, state in param_state_pairs], + global_scope(), + executor, + ) for param, state in param_state_pairs: self._set_var(param, state) @@ -372,9 +395,10 @@ class StaticGraphAdapter(object): # static-graph, since the time of global_step to increase is # different. state_val = ( - np.array(converted_state.pop("global_step")) - 1 - ) if "global_step" in converted_state else converted_state.pop( - "@LR_DECAY_COUNTER@", None) + (np.array(converted_state.pop("global_step")) - 1) + if "global_step" in converted_state + else converted_state.pop("@LR_DECAY_COUNTER@", None) + ) if state_val is not None: converted_state[var.name] = state_val elif var.name.startswith("learning_rate_"): @@ -391,36 +415,61 @@ class StaticGraphAdapter(object): opt_cls_name = self.model._optimizer.__class__.__name__ opt_unq_name = None for name in self.model._optimizer._accumulators.keys(): - accum_name = name if opt_name is None else name[ - len(opt_name) + 1:] - for param_name, state_var in self.model._optimizer._accumulators[ - name].items(): + accum_name = ( + name + if opt_name is None + else name[len(opt_name) + 1 :] + ) + for ( + param_name, + state_var, + ) in self.model._optimizer._accumulators[name].items(): if opt_unq_name is None: # can not infer out the exact unique(opt_name), # thus try to extract rather than generate - for state_key in sorted(state.keys(), - key=lambda x: len(x), - reverse=True): - prefix = param_name + "_" + ( - opt_cls_name - if opt_name is None else opt_name) + "_" + for state_key in sorted( + state.keys(), + key=lambda x: len(x), + reverse=True, + ): + prefix = ( + param_name + + "_" + + ( + opt_cls_name + if opt_name is None + else opt_name + ) + + "_" + ) if state_key.startswith(prefix): - prefix_offset = state_key[len( - prefix):].find("_") + len(prefix) + prefix_offset = state_key[ + len(prefix) : + ].find("_") + len(prefix) opt_unq_name = state_key[ - len(param_name + "_"):prefix_offset] + len( + param_name + "_" + ) : prefix_offset + ] # TODO: assert # assert opt_unq_name is None # gen(param.name + "_" + gen(opt_name) + "_" + accum_name) # always end with "_0" since the unique optimizer._name - dy_state_name = (param_name + "_" + opt_unq_name + - "_" + accum_name + "_0") + dy_state_name = ( + param_name + + "_" + + opt_unq_name + + "_" + + accum_name + + "_0" + ) converted_state[ - state_var.name] = converted_state.pop( - dy_state_name) + state_var.name + ] = converted_state.pop(dy_state_name) - assert var.name in converted_state, \ - "variable [{}] is not in optimizer state file".format(var.name) + assert ( + var.name in converted_state + ), "variable [{}] is not in optimizer state file".format(var.name) self._set_var(var, converted_state[var.name]) def _set_var(self, var, ndarray): @@ -439,15 +488,17 @@ class StaticGraphAdapter(object): def _run(self, inputs, labels=None): compiled_prog = self._compiled_progs.get(self.mode, None) - assert compiled_prog, \ - "Model is not ready, please call `model.prepare()` first" + assert ( + compiled_prog + ), "Model is not ready, please call `model.prepare()` first" inputs = to_list(inputs) if labels is not None: labels = to_list(labels) - assert len(inputs) == len(self._input_vars[self.mode]), \ - "number of inputs" \ + assert len(inputs) == len(self._input_vars[self.mode]), ( + "number of inputs" + " does not match number of arguments of `forward` method" + ) feed = {} input_names = [v.name for v in self._input_vars[self.mode]] @@ -457,8 +508,10 @@ class StaticGraphAdapter(object): # train and test may take different arguments if inputs[idx] is not None: feed[n] = inputs[idx] - if self._amp_level == 'O2' and input_dtypes[ - idx] == core.VarDesc.VarType.FP16: + if ( + self._amp_level == 'O2' + and input_dtypes[idx] == core.VarDesc.VarType.FP16 + ): if isinstance(feed[n], core.LoDTensor): feed[n] = feed[n]._as_type(core.VarDesc.VarType.FP16) elif isinstance(feed[n], np.array): @@ -486,10 +539,12 @@ class StaticGraphAdapter(object): else: pruned_fetch_list.append(fetch_var) - rets = self._executor.run(compiled_prog, - feed=feed, - fetch_list=pruned_fetch_list, - return_numpy=False) + rets = self._executor.run( + compiled_prog, + feed=feed, + fetch_list=pruned_fetch_list, + return_numpy=False, + ) # restore pruned fetch_list Variable from feeds for i, name in enumerate(pruned_fetch_idx_name_map): @@ -505,20 +560,24 @@ class StaticGraphAdapter(object): metrics = [] for metric, state in zip(self.model._metrics, metric_states): # cut off padding size - if self.mode != 'train' and self.model._test_dataloader is not None \ - and isinstance(self.model._test_dataloader, DataLoader) \ - and self._nranks > 1: + if ( + self.mode != 'train' + and self.model._test_dataloader is not None + and isinstance(self.model._test_dataloader, DataLoader) + and self._nranks > 1 + ): total_size = len(self.model._test_dataloader.dataset) # TODO: fixme if have better way to get batch size samples = state[0].shape[0] current_count = self._merge_count.get(self.mode + '_total', 0) if current_count + samples >= total_size: state = [ - s[:int(total_size - current_count), ...] for s in state + s[: int(total_size - current_count), ...] for s in state ] self._merge_count[self.mode + '_total'] = 0 - self._merge_count[self.mode + '_batch'] = int(total_size - - current_count) + self._merge_count[self.mode + '_batch'] = int( + total_size - current_count + ) else: self._merge_count[self.mode + '_total'] += samples self._merge_count[self.mode + '_batch'] = samples @@ -550,8 +609,11 @@ class StaticGraphAdapter(object): if mode != 'train': for op in list(prog.global_block().ops): prog.global_block()._remove_op(0) - if mode == 'train' and self.model._optimizer \ - and self.model._optimizer._learning_rate_map: + if ( + mode == 'train' + and self.model._optimizer + and self.model._optimizer._learning_rate_map + ): # HACK workaround learning rate map issue lr_var = self.model._optimizer._learning_rate_map[self._orig_prog] new_lr_var = prog.global_block().vars[lr_var.name] @@ -589,20 +651,27 @@ class StaticGraphAdapter(object): dist_strategy.amp = True dist_strategy.amp_configs = self._amp_configs.copy() dist_strategy.amp_configs.update(self._amp_custom_lists) - dist_strategy.amp_configs[ - 'use_pure_fp16'] = self._amp_level == 'O2' + dist_strategy.amp_configs['use_pure_fp16'] = ( + self._amp_level == 'O2' + ) self.model._optimizer = fleet.distributed_optimizer( - self.model._optimizer, strategy=dist_strategy) + self.model._optimizer, strategy=dist_strategy + ) elif self._amp_level != "O0" and core.is_compiled_with_cuda: - amp_lists = paddle.static.amp.AutoMixedPrecisionLists( - **self._amp_custom_lists - ) if self._amp_custom_lists else None + amp_lists = ( + paddle.static.amp.AutoMixedPrecisionLists( + **self._amp_custom_lists + ) + if self._amp_custom_lists + else None + ) self.model._optimizer = paddle.static.amp.decorate( self.model._optimizer, amp_lists=amp_lists, use_pure_fp16=self._amp_level == "O2", use_fp16_guard=self._use_fp16_guard, - **self._amp_configs) + **self._amp_configs + ) self.model._optimizer.minimize(self._loss_endpoint) @@ -615,7 +684,7 @@ class StaticGraphAdapter(object): self._endpoints[mode] = { "output": outputs, "loss": to_list(losses), - "metric": metrics + "metric": metrics, } def _compile_and_initialize(self, prog, mode): @@ -623,8 +692,9 @@ class StaticGraphAdapter(object): if compiled_prog is not None: return compiled_prog - assert self.model._place is not None, \ - "device is not set, please call `model.prepare()` first" + assert ( + self.model._place is not None + ), "device is not set, please call `model.prepare()` first" place = self.model._place @@ -637,8 +707,11 @@ class StaticGraphAdapter(object): uninitialized = [] for var_py in self._startup_prog.list_vars(): var = fluid.global_scope().find_var(var_py.name) - if not var_py.name.startswith('nccl_id') and var and \ - var.get_tensor()._is_initialized(): + if ( + not var_py.name.startswith('nccl_id') + and var + and var.get_tensor()._is_initialized() + ): continue uninitialized.append(var_py) @@ -646,7 +719,10 @@ class StaticGraphAdapter(object): startup_prog = self._startup_prog._prune(uninitialized) self._executor.run(startup_prog) - if self._amp_level == "O2" and mode == 'train' and core.is_compiled_with_cuda( + if ( + self._amp_level == "O2" + and mode == 'train' + and core.is_compiled_with_cuda() ): self.model._optimizer.amp_init(place) @@ -659,7 +735,6 @@ class StaticGraphAdapter(object): class DynamicGraphAdapter(object): - def __init__(self, model): super(DynamicGraphAdapter, self).__init__() self.model = model @@ -669,7 +744,7 @@ class DynamicGraphAdapter(object): 'eval_total': 0, 'test_total': 0, 'eval_batch': 0, - 'test_batch': 0 + 'test_batch': 0, } self._input_info = None @@ -686,7 +761,8 @@ class DynamicGraphAdapter(object): stradegy.trainer_endpoints = ParallelEnv().trainer_endpoints stradegy.current_endpoint = ParallelEnv().current_endpoint self.ddp_model = fluid.dygraph.parallel.DataParallel( - self.model.network, stradegy) + self.model.network, stradegy + ) @property def mode(self): @@ -698,8 +774,9 @@ class DynamicGraphAdapter(object): # TODO multi device in dygraph mode not implemented at present time def train_batch(self, inputs, labels=None, update=True): - assert self.model._optimizer, \ - "model not ready, please call `model.prepare()` first" + assert ( + self.model._optimizer + ), "model not ready, please call `model.prepare()` first" self.model.network.train() self.mode = 'train' inputs = to_list(inputs) @@ -711,9 +788,11 @@ class DynamicGraphAdapter(object): if self._amp_level != "O0" and self.model._scaler is None: self.model._scaler = paddle.amp.GradScaler(**self._amp_configs) - with paddle.amp.auto_cast(enable=self._amp_level != 'O0', - **self._amp_custom_lists, - level=self._amp_level): + with paddle.amp.auto_cast( + enable=self._amp_level != 'O0', + **self._amp_custom_lists, + level=self._amp_level + ): if self._nranks > 1: outputs = self.ddp_model(*[to_variable(x) for x in inputs]) else: @@ -741,8 +820,11 @@ class DynamicGraphAdapter(object): m = metric.update(*[to_numpy(m) for m in to_list(metric_outs)]) metrics.append(m) - return ([to_numpy(l) for l in losses], metrics) \ - if len(metrics) > 0 else [to_numpy(l) for l in losses] + return ( + ([to_numpy(l) for l in losses], metrics) + if len(metrics) > 0 + else [to_numpy(l) for l in losses] + ) def eval_batch(self, inputs, labels=None): self.model.network.eval() @@ -772,21 +854,25 @@ class DynamicGraphAdapter(object): metrics = [] for metric in self.model._metrics: # cut off padding value. - if self.model._test_dataloader is not None and self._nranks > 1 \ - and isinstance(self.model._test_dataloader, DataLoader): + if ( + self.model._test_dataloader is not None + and self._nranks > 1 + and isinstance(self.model._test_dataloader, DataLoader) + ): total_size = len(self.model._test_dataloader.dataset) samples = outputs[0].shape[0] current_count = self._merge_count.get(self.mode + '_total', 0) if current_count + samples >= total_size: outputs = [ - o[:int(total_size - current_count)] for o in outputs + o[: int(total_size - current_count)] for o in outputs ] labels = [ - l[:int(total_size - current_count)] for l in labels + l[: int(total_size - current_count)] for l in labels ] self._merge_count[self.mode + '_total'] = 0 - self._merge_count[self.mode + '_batch'] = int(total_size - - current_count) + self._merge_count[self.mode + '_batch'] = int( + total_size - current_count + ) else: self._merge_count[self.mode + '_total'] += samples self._merge_count[self.mode + '_batch'] = samples @@ -853,38 +939,48 @@ class DynamicGraphAdapter(object): opt_unq_name = '' opt_cls_name = self.model._optimizer.__class__.__name__ - opt_name = opt_unq_name[:opt_unq_name.rfind("_")] # remove suffix idx + opt_name = opt_unq_name[: opt_unq_name.rfind("_")] # remove suffix idx param_names = [param.name for param in self.model.network.parameters()] - for var_name, state_var in sorted(optim_state.items(), - key=lambda x: len(x[0]), - reverse=True): + for var_name, state_var in sorted( + optim_state.items(), key=lambda x: len(x[0]), reverse=True + ): if var_name in ["@LR_DECAY_COUNTER@", "global_step"]: # NOTE: dygraph saved global_step is 1 larger than that in # static-graph, since the time of global_step to increase is # different. if var_name == "@LR_DECAY_COUNTER@": - converted_state["global_step"] = np.array( - converted_state.pop("@LR_DECAY_COUNTER@")) + 1 + converted_state["global_step"] = ( + np.array(converted_state.pop("@LR_DECAY_COUNTER@")) + 1 + ) else: # moment and other accumulators # extend state dict to include promising dygraph names for param_name in param_names: if var_name.startswith(param_name + "_" + opt_name): # when init optimizer with name - accum_name = var_name[len(param_name + "_" + opt_name + - "_"):] - elif var_name.startswith(param_name + - "_") and opt_name == opt_cls_name: + accum_name = var_name[ + len(param_name + "_" + opt_name + "_") : + ] + elif ( + var_name.startswith(param_name + "_") + and opt_name == opt_cls_name + ): # when init optimizer without name - accum_name = var_name[len(param_name + "_"):] + accum_name = var_name[len(param_name + "_") :] else: continue # remove suffix idx - accum_name = accum_name[:accum_name.rfind("_")] + accum_name = accum_name[: accum_name.rfind("_")] # state names always end with "_0" in dygraph because of the # unique optimizer._name - dy_state_name = (param_name + "_" + opt_unq_name + "_" + - accum_name + "_0") + dy_state_name = ( + param_name + + "_" + + opt_unq_name + + "_" + + accum_name + + "_0" + ) converted_state[dy_state_name] = state_var if not hasattr(self.model._optimizer, 'set_state_dict'): @@ -896,12 +992,16 @@ class DynamicGraphAdapter(object): self.model._optimizer.set_state_dict(converted_state) def prepare(self): - if self._amp_level == "O2" and self.model.mode == 'train' and core.is_compiled_with_cuda( + if ( + self._amp_level == "O2" + and self.model.mode == 'train' + and core.is_compiled_with_cuda() ): self.model.network, self.model._optimizer = paddle.amp.decorate( models=self.model.network, optimizers=self.model._optimizer, - level='O2') + level='O2', + ) if self._amp_level != "O0": self.model._scaler = None @@ -1336,17 +1436,24 @@ class Model(object): state = param_state.get(key, None) if state is None: raise ValueError( - "{} is not found in the providing file.".format(key)) + "{} is not found in the providing file.".format(key) + ) if list(state.shape) != list(param.shape): raise ValueError( - "{} receives a shape {}, but the expected shape is {}.". - format(key, list(state.shape), list(param.shape))) + "{} receives a shape {}, but the expected shape is {}.".format( + key, list(state.shape), list(param.shape) + ) + ) return param, state def _strip_postfix(path): path, ext = os.path.splitext(path) - assert ext in ['', '.pdparams', '.pdopt', '.pdmodel'], \ - "Unknown postfix {} from weights".format(ext) + assert ext in [ + '', + '.pdparams', + '.pdopt', + '.pdmodel', + ], "Unknown postfix {} from weights".format(ext) return path path = _strip_postfix(path) @@ -1360,15 +1467,17 @@ class Model(object): except ValueError as err: if skip_mismatch: warnings.warn( - ("Skip loading for {}. ".format(key) + str(err))) + ("Skip loading for {}. ".format(key) + str(err)) + ) # reset optimizer when mismatch happens reset_optimizer = True else: raise err matched_param_state.append(match_res) - optim_state = None if reset_optimizer else _load_state_from_path( - path + ".pdopt") + optim_state = ( + None if reset_optimizer else _load_state_from_path(path + ".pdopt") + ) # TODO: support save/load scaler state in static graph if _non_static_mode(): @@ -1377,8 +1486,9 @@ class Model(object): if os.path.exists(path + '.pdscaler'): scaler_state = paddle.load(path + '.pdscaler') - return self._adapter.load(matched_param_state, optim_state, - scaler_state) + return self._adapter.load( + matched_param_state, optim_state, scaler_state + ) else: return self._adapter.load(matched_param_state, optim_state) @@ -1410,13 +1520,14 @@ class Model(object): return self._adapter.parameters() def _prepare_amp(self, amp_configs): - def _check_pure_fp16_configs(): # pure float16 training has some restricts now if self._adapter._amp_level == "O2" and self._optimizer._grad_clip: # clip by value is not supported - assert isinstance(self._optimizer._grad_clip, (paddle.nn.ClipGradByGlobalNorm, paddle.nn.ClipGradByNorm)), \ - "Only GradientClipByNorm and GradientClipByGlobalNorm are supported in amp training with level=O2 currently." + assert isinstance( + self._optimizer._grad_clip, + (paddle.nn.ClipGradByGlobalNorm, paddle.nn.ClipGradByNorm), + ), "Only GradientClipByNorm and GradientClipByGlobalNorm are supported in amp training with level=O2 currently." self._adapter._amp_custom_lists = {} self._adapter._amp_configs = {} @@ -1428,7 +1539,8 @@ class Model(object): elif isinstance(amp_configs, str): if amp_configs not in ('O0', 'O1', 'O2'): raise ValueError( - "The level of amp_configs should be 'O0', 'O1' or 'O2'.") + "The level of amp_configs should be 'O0', 'O1' or 'O2'." + ) self._adapter._amp_level = amp_configs _check_pure_fp16_configs() return @@ -1437,7 +1549,8 @@ class Model(object): self._adapter._amp_level = 'O1' elif amp_configs['level'] not in ('O0', 'O1', 'O2'): raise ValueError( - "amp_configs['level'] should be 'O0', 'O1' or 'O2'.") + "amp_configs['level'] should be 'O0', 'O1' or 'O2'." + ) else: self._adapter._amp_level = amp_configs['level'] amp_config_key_set = set(amp_configs.keys()) - {'level'} @@ -1454,12 +1567,14 @@ class Model(object): # construct amp_custom_lists if self._adapter._amp_level != 'O0' and amp_config_key_set: for param_name in [ - 'custom_white_list', 'custom_black_list', - 'custom_black_varnames' + 'custom_white_list', + 'custom_black_list', + 'custom_black_varnames', ]: if param_name in amp_config_key_set: self._adapter._amp_custom_lists[param_name] = amp_configs[ - param_name] + param_name + ] amp_config_key_set -= {param_name} def _check_amp_configs(amp_config_key_set): @@ -1474,13 +1589,16 @@ class Model(object): } if amp_config_key_set - accepted_param_set: raise ValueError( - "Except for 'level', the keys of 'amp_configs' must be accepted by mixed precision APIs, but {} could not be recognized." - .format(tuple(amp_config_key_set - accepted_param_set))) + "Except for 'level', the keys of 'amp_configs' must be accepted by mixed precision APIs, but {} could not be recognized.".format( + tuple(amp_config_key_set - accepted_param_set) + ) + ) if 'use_fp16_guard' in amp_config_key_set: if _non_static_mode(): raise ValueError( - "'use_fp16_guard' is supported in static mode only.") + "'use_fp16_guard' is supported in static mode only." + ) self._adapter._use_fp16_guard = amp_configs['use_fp16_guard'] amp_config_key_set.remove('use_fp16_guard') @@ -1490,11 +1608,9 @@ class Model(object): for key in amp_configs_set: self._adapter._amp_configs[key] = amp_configs[key] - def prepare(self, - optimizer=None, - loss=None, - metrics=None, - amp_configs=None): + def prepare( + self, optimizer=None, loss=None, metrics=None, amp_configs=None + ): """ Configures the model before runing. @@ -1534,15 +1650,17 @@ class Model(object): if ParallelEnv().nranks > 1 and not _parallel_context_initialized: if fluid._non_static_mode(): main_prog_seed = fluid.default_main_program().random_seed - startup_prog_seed = fluid.default_startup_program( - ).random_seed + startup_prog_seed = ( + fluid.default_startup_program().random_seed + ) fluid.disable_dygraph() paddle.disable_static(self._place) # enable_dygraph would create and switch to a new program, # thus also copy seed to the new program fluid.default_main_program().random_seed = main_prog_seed - fluid.default_startup_program( - ).random_seed = startup_prog_seed + fluid.default_startup_program().random_seed = ( + startup_prog_seed + ) else: prepare_distributed_context(self._place) _parallel_context_initialized = True @@ -1557,30 +1675,32 @@ class Model(object): metrics = metrics or [] for metric in to_list(metrics): - assert isinstance(metric, Metric), \ - "{} is not sub class of Metric".format( - metric.__class__.__name__) + assert isinstance( + metric, Metric + ), "{} is not sub class of Metric".format(metric.__class__.__name__) self._metrics = to_list(metrics) self._prepare_amp(amp_configs) self._adapter.prepare() - def fit(self, - train_data=None, - eval_data=None, - batch_size=1, - epochs=1, - eval_freq=1, - log_freq=10, - save_dir=None, - save_freq=1, - verbose=2, - drop_last=False, - shuffle=True, - num_workers=0, - callbacks=None, - accumulate_grad_batches=1, - num_iters=None): + def fit( + self, + train_data=None, + eval_data=None, + batch_size=1, + epochs=1, + eval_freq=1, + log_freq=10, + save_dir=None, + save_freq=1, + verbose=2, + drop_last=False, + shuffle=True, + num_workers=0, + callbacks=None, + accumulate_grad_batches=1, + num_iters=None, + ): """ Trains the model for a fixed number of epochs. If `eval_data` is set, evaluation will be done at the end of each epoch. @@ -1714,30 +1834,36 @@ class Model(object): epochs=2, save_dir='mnist_checkpoint') """ - assert train_data is not None, \ - "train_data must be given!" + assert train_data is not None, "train_data must be given!" if isinstance(train_data, Dataset): - train_sampler = DistributedBatchSampler(train_data, - batch_size=batch_size, - shuffle=shuffle, - drop_last=drop_last) - train_loader = DataLoader(train_data, - batch_sampler=train_sampler, - places=self._place, - num_workers=num_workers, - return_list=True) + train_sampler = DistributedBatchSampler( + train_data, + batch_size=batch_size, + shuffle=shuffle, + drop_last=drop_last, + ) + train_loader = DataLoader( + train_data, + batch_sampler=train_sampler, + places=self._place, + num_workers=num_workers, + return_list=True, + ) else: train_loader = train_data if eval_data is not None and isinstance(eval_data, Dataset): - eval_sampler = DistributedBatchSampler(eval_data, - batch_size=batch_size) - eval_loader = DataLoader(eval_data, - batch_sampler=eval_sampler, - places=self._place, - num_workers=num_workers, - return_list=True) + eval_sampler = DistributedBatchSampler( + eval_data, batch_size=batch_size + ) + eval_loader = DataLoader( + eval_data, + batch_sampler=eval_sampler, + places=self._place, + num_workers=num_workers, + return_list=True, + ) elif eval_data is not None: eval_loader = eval_data else: @@ -1750,8 +1876,11 @@ class Model(object): steps = self._len_data_loader(train_loader) self.num_iters = num_iters - if num_iters is not None and isinstance(num_iters, int) and isinstance( - steps, int): + if ( + num_iters is not None + and isinstance(num_iters, int) + and isinstance(steps, int) + ): assert num_iters > 0, "num_iters must be greater than 0!" epochs = (num_iters // steps) + 1 steps = min(num_iters, steps) @@ -1779,10 +1908,10 @@ class Model(object): if do_eval and epoch % eval_freq == 0: eval_steps = self._len_data_loader(eval_loader) - cbks.on_begin('eval', { - 'steps': eval_steps, - 'metrics': self._metrics_name() - }) + cbks.on_begin( + 'eval', + {'steps': eval_steps, 'metrics': self._metrics_name()}, + ) eval_logs = self._run_one_epoch(eval_loader, cbks, 'eval') @@ -1793,14 +1922,16 @@ class Model(object): cbks.on_end('train', logs) self._test_dataloader = None - def evaluate(self, - eval_data, - batch_size=1, - log_freq=10, - verbose=2, - num_workers=0, - callbacks=None, - num_iters=None): + def evaluate( + self, + eval_data, + batch_size=1, + log_freq=10, + verbose=2, + num_workers=0, + callbacks=None, + num_iters=None, + ): """ Evaluate the loss and metrics of the model on input dataset. @@ -1854,13 +1985,16 @@ class Model(object): """ if eval_data is not None and isinstance(eval_data, Dataset): - eval_sampler = DistributedBatchSampler(eval_data, - batch_size=batch_size) - eval_loader = DataLoader(eval_data, - batch_sampler=eval_sampler, - places=self._place, - num_workers=num_workers, - return_list=True) + eval_sampler = DistributedBatchSampler( + eval_data, batch_size=batch_size + ) + eval_loader = DataLoader( + eval_data, + batch_sampler=eval_sampler, + places=self._place, + num_workers=num_workers, + return_list=True, + ) else: eval_loader = eval_data @@ -1876,15 +2010,17 @@ class Model(object): eval_steps = self._len_data_loader(eval_loader) self.num_iters = num_iters - if num_iters is not None and isinstance(num_iters, int) and isinstance( - eval_steps, int): + if ( + num_iters is not None + and isinstance(num_iters, int) + and isinstance(eval_steps, int) + ): assert num_iters > 0, "num_iters must be greater than 0!" eval_steps = min(num_iters, eval_steps) self.num_iters = eval_steps - cbks.on_begin('eval', { - 'steps': eval_steps, - 'metrics': self._metrics_name() - }) + cbks.on_begin( + 'eval', {'steps': eval_steps, 'metrics': self._metrics_name()} + ) logs = self._run_one_epoch(eval_loader, cbks, 'eval') @@ -1898,13 +2034,15 @@ class Model(object): return eval_result - def predict(self, - test_data, - batch_size=1, - num_workers=0, - stack_outputs=False, - verbose=1, - callbacks=None): + def predict( + self, + test_data, + batch_size=1, + num_workers=0, + stack_outputs=False, + verbose=1, + callbacks=None, + ): """ Compute the output predictions on testing data. @@ -1975,13 +2113,16 @@ class Model(object): """ if test_data is not None and isinstance(test_data, Dataset): - test_sampler = DistributedBatchSampler(test_data, - batch_size=batch_size) - test_loader = DataLoader(test_data, - batch_sampler=test_sampler, - places=self._place, - num_workers=num_workers, - return_list=True) + test_sampler = DistributedBatchSampler( + test_data, batch_size=batch_size + ) + test_loader = DataLoader( + test_data, + batch_sampler=test_sampler, + places=self._place, + num_workers=num_workers, + return_list=True, + ) else: test_loader = test_data @@ -2031,7 +2172,8 @@ class Model(object): if self._is_shape_inferred: warnings.warn( "'inputs' was not specified when Model initialization, so the input shape to be saved will be the shape derived from the user's actual inputs. The input shape to be saved is %s. For saving correct input shapes, please provide 'inputs' for Model initialization." - % self._input_info[0]) + % self._input_info[0] + ) paddle.jit.save(layer, path, input_spec=self._inputs) @@ -2042,7 +2184,8 @@ class Model(object): raise ValueError( "The input path MUST be format of dirname/file_prefix " "[dirname\\file_prefix in Windows system], but received " - "file_prefix is empty string.") + "file_prefix is empty string." + ) dirname = os.path.dirname(path) if dirname and not os.path.exists(dirname): @@ -2053,21 +2196,24 @@ class Model(object): params_filename = file_prefix + INFER_PARAMS_SUFFIX prog = self._adapter._progs.get('test', None) - assert prog, \ - "Model is not ready, please call `model.prepare()` first" + assert ( + prog + ), "Model is not ready, please call `model.prepare()` first" infer_prog = prog.clone(for_test=True) input_names = [v.name for v in self._adapter._input_vars['test']] endpoints = self._adapter._endpoints['test']['output'] - fluid.io.save_inference_model(model_path, - input_names, - endpoints, - self._adapter._executor, - main_program=infer_prog, - model_filename=model_filename, - params_filename=params_filename) + fluid.io.save_inference_model( + model_path, + input_names, + endpoints, + self._adapter._executor, + main_program=infer_prog, + model_filename=model_filename, + params_filename=params_filename, + ) def _run_one_epoch( self, @@ -2093,16 +2239,21 @@ class Model(object): # LoDTensor.shape is callable, where LoDTensor comes from # DataLoader in static graph - batch_size = data[0].shape()[0] if callable( - data[0].shape) else data[0].shape[0] + batch_size = ( + data[0].shape()[0] + if callable(data[0].shape) + else data[0].shape[0] + ) callbacks.on_batch_begin(mode, step, logs) if mode != 'predict': - _inputs = [data[:len(self._inputs)], data[len(self._inputs):]] + _inputs = [data[: len(self._inputs)], data[len(self._inputs) :]] if mode == 'train': - _inputs.append((step + 1) % self._accumulate == 0 - or step + 1 == len(data_loader)) + _inputs.append( + (step + 1) % self._accumulate == 0 + or step + 1 == len(data_loader) + ) outs = getattr(self, mode + '_batch')(*_inputs) @@ -2123,15 +2274,17 @@ class Model(object): logs[k] = v else: if self._inputs is not None: - outs = self.predict_batch(data[:len(self._inputs)]) + outs = self.predict_batch(data[: len(self._inputs)]) else: outs = self.predict_batch(data) outputs.append(outs) logs['step'] = step - if mode == 'train' or self._adapter._merge_count.get( - mode + '_batch', 0) <= 0: + if ( + mode == 'train' + or self._adapter._merge_count.get(mode + '_batch', 0) <= 0 + ): logs['batch_size'] = batch_size * ParallelEnv().nranks else: logs['batch_size'] = self._adapter._merge_count[mode + '_batch'] @@ -2185,8 +2338,9 @@ class Model(object): # {'total_params': 61610, 'trainable_params': 61610} """ - assert (input_size is not None or self._inputs - is not None), "'input_size' or 'self._input' must be set" + assert ( + input_size is not None or self._inputs is not None + ), "'input_size' or 'self._input' must be set" if input_size is not None: _input_size = input_size else: @@ -2203,7 +2357,10 @@ class Model(object): if is_input: arg_names = extract_args(self.network.forward)[1:] # While Saving inference model in dygraph, and providing inputs only in running. - if shapes is not None and dtypes is not None and fluid._non_static_mode( + if ( + shapes is not None + and dtypes is not None + and fluid._non_static_mode() ): out_specs = [ Input(name=n, dtype=dtypes[i], shape=shapes[i]) @@ -2216,7 +2373,8 @@ class Model(object): elif isinstance(specs, dict): assert is_input is False out_specs = [ - specs[n] for n in extract_args(self.network.forward) + specs[n] + for n in extract_args(self.network.forward) if n != 'self' ] else: @@ -2227,8 +2385,10 @@ class Model(object): assert isinstance(spec, Input) if spec.name is None: raise ValueError( - "Requires Input[{}].name != None, but receive `None` with {}." - .format(i, spec)) + "Requires Input[{}].name != None, but receive `None` with {}.".format( + i, spec + ) + ) return out_specs @@ -2253,6 +2413,7 @@ class Model(object): "Update self._inputs according to given inputs." self._input_info = self._adapter._input_info if self._input_info is not None and len(self._input_info) == 2: - self._inputs = self._verify_spec(None, self._input_info[0], - self._input_info[1], True) + self._inputs = self._verify_spec( + None, self._input_info[0], self._input_info[1], True + ) self._is_shape_inferred = True diff --git a/python/paddle/hapi/model_summary.py b/python/paddle/hapi/model_summary.py index de4ec449a06b68b05cc71969f176cf2d5277659b..d90c64b76217b2d61bfd90dbd2382d6a1eea9ccf 100644 --- a/python/paddle/hapi/model_summary.py +++ b/python/paddle/hapi/model_summary.py @@ -160,18 +160,20 @@ def summary(net, input_size=None, dtypes=None, input=None): _input_size = [] for item in input_size: if isinstance(item, int): - item = (item, ) - assert isinstance(item, - (tuple, InputSpec)), 'When input_size is list, \ + item = (item,) + assert isinstance( + item, (tuple, InputSpec) + ), 'When input_size is list, \ expect item in input_size is a tuple or InputSpec, but got {}'.format( - type(item)) + type(item) + ) if isinstance(item, InputSpec): _input_size.append(tuple(item.shape)) else: _input_size.append(item) elif isinstance(input_size, int): - _input_size = (input_size, ) + _input_size = (input_size,) else: _input_size = input_size @@ -207,8 +209,10 @@ def summary(net, input_size=None, dtypes=None, input=None): elif isinstance(item, numbers.Number): if item <= 0: raise ValueError( - "Expected element in input size greater than zero, but got {}" - .format(item)) + "Expected element in input size greater than zero, but got {}".format( + item + ) + ) new_shape.append(item) return tuple(new_shape) @@ -231,7 +235,6 @@ def summary(net, input_size=None, dtypes=None, input=None): @no_grad() def summary_string(model, input_size=None, dtypes=None, input=None): - def _all_is_numper(items): for item in items: if not isinstance(item, numbers.Number): @@ -272,7 +275,6 @@ def summary_string(model, input_size=None, dtypes=None, input=None): return output_shape def register_hook(layer): - def hook(layer, input, output): class_name = str(layer.__class__).split(".")[-1].split("'")[0] @@ -310,7 +312,8 @@ def summary_string(model, input_size=None, dtypes=None, input=None): try: if (getattr(getattr(layer, k), 'trainable')) and ( - not getattr(getattr(layer, k), 'stop_gradient')): + not getattr(getattr(layer, k), 'stop_gradient') + ): summary[m_key]["trainable_params"] += np.prod(v.shape) summary[m_key]["trainable"] = True trainable_flag = True @@ -321,9 +324,11 @@ def summary_string(model, input_size=None, dtypes=None, input=None): summary[m_key]["nb_params"] = params - if (not isinstance(layer, nn.Sequential) - and not isinstance(layer, nn.LayerList) - and (not (layer == model) or depth < 1)): + if ( + not isinstance(layer, nn.Sequential) + and not isinstance(layer, nn.LayerList) + and (not (layer == model) or depth < 1) + ): hooks.append(layer.register_forward_post_hook(hook)) # For rnn, gru and lstm layer @@ -368,24 +373,30 @@ def summary_string(model, input_size=None, dtypes=None, input=None): 'input_shape_width': 20, 'output_shape_width': 20, 'params_width': 15, - 'table_width': 75 + 'table_width': 75, } for layer in summary: if head_length['output_shape_width'] < len( - str(summary[layer]["output_shape"])): + str(summary[layer]["output_shape"]) + ): head_length['output_shape_width'] = len( - str(summary[layer]["output_shape"])) + str(summary[layer]["output_shape"]) + ) if head_length['input_shape_width'] < len( - str(summary[layer]["input_shape"])): + str(summary[layer]["input_shape"]) + ): head_length['input_shape_width'] = len( - str(summary[layer]["input_shape"])) + str(summary[layer]["input_shape"]) + ) if head_length['layer_width'] < len(str(layer)): head_length['layer_width'] = len(str(layer)) if head_length['params_width'] < len( - str(summary[layer]["nb_params"])): + str(summary[layer]["nb_params"]) + ): head_length['params_width'] = len( - str(summary[layer]["nb_params"])) + str(summary[layer]["nb_params"]) + ) _temp_width = 0 for k, v in head_length.items(): @@ -401,10 +412,15 @@ def summary_string(model, input_size=None, dtypes=None, input=None): summary_str += "-" * table_width['table_width'] + "\n" line_new = "{:^{}} {:^{}} {:^{}} {:^{}}".format( - "Layer (type)", table_width['layer_width'], "Input Shape", - table_width['input_shape_width'], "Output Shape", - table_width['output_shape_width'], "Param #", - table_width['params_width']) + "Layer (type)", + table_width['layer_width'], + "Input Shape", + table_width['input_shape_width'], + "Output Shape", + table_width['output_shape_width'], + "Param #", + table_width['params_width'], + ) summary_str += line_new + "\n" summary_str += "=" * table_width['table_width'] + "\n" total_params = 0 @@ -414,17 +430,21 @@ def summary_string(model, input_size=None, dtypes=None, input=None): for layer in summary: # input_shape, output_shape, trainable, nb_params line_new = "{:^{}} {:^{}} {:^{}} {:^{}}".format( - layer, table_width['layer_width'], + layer, + table_width['layer_width'], str(summary[layer]["input_shape"]), table_width['input_shape_width'], str(summary[layer]["output_shape"]), - table_width['output_shape_width'], "{0:,}".format( - summary[layer]["nb_params"]), table_width['params_width']) + table_width['output_shape_width'], + "{0:,}".format(summary[layer]["nb_params"]), + table_width['params_width'], + ) total_params += summary[layer]["nb_params"] try: total_output += np.sum( - np.prod(summary[layer]["output_shape"], axis=-1)) + np.prod(summary[layer]["output_shape"], axis=-1) + ) except: for output_shape in summary[layer]["output_shape"]: total_output += np.sum(np.prod(output_shape, axis=-1)) @@ -436,26 +456,31 @@ def summary_string(model, input_size=None, dtypes=None, input=None): def _get_input_size(input_size, size): if isinstance(input_size, (list, tuple)) and _all_is_numper(input_size): - size = abs(np.prod(input_size) * 4. / (1024**2.)) + size = abs(np.prod(input_size) * 4.0 / (1024**2.0)) else: size = sum([_get_input_size(i, size) for i in input_size]) return size total_input_size = _get_input_size(input_size, 0) - total_output_size = abs(2. * total_output * 4. / - (1024**2.)) # x2 for gradients - total_params_size = abs(total_params * 4. / (1024**2.)) + total_output_size = abs( + 2.0 * total_output * 4.0 / (1024**2.0) + ) # x2 for gradients + total_params_size = abs(total_params * 4.0 / (1024**2.0)) total_size = total_params_size + total_output_size + total_input_size summary_str += "=" * table_width['table_width'] + "\n" summary_str += "Total params: {0:,}".format(total_params) + "\n" summary_str += "Trainable params: {0:,}".format(trainable_params) + "\n" - summary_str += "Non-trainable params: {0:,}".format(total_params - - trainable_params) + "\n" + summary_str += ( + "Non-trainable params: {0:,}".format(total_params - trainable_params) + + "\n" + ) summary_str += "-" * table_width['table_width'] + "\n" summary_str += "Input size (MB): %0.2f" % total_input_size + "\n" - summary_str += "Forward/backward pass size (MB): %0.2f" % total_output_size + "\n" + summary_str += ( + "Forward/backward pass size (MB): %0.2f" % total_output_size + "\n" + ) summary_str += "Params size (MB): %0.2f" % total_params_size + "\n" summary_str += "Estimated Total Size (MB): %0.2f" % total_size + "\n" summary_str += "-" * table_width['table_width'] + "\n" @@ -463,5 +488,5 @@ def summary_string(model, input_size=None, dtypes=None, input=None): # return summary return summary_str, { 'total_params': total_params, - 'trainable_params': trainable_params + 'trainable_params': trainable_params, } diff --git a/python/paddle/hapi/progressbar.py b/python/paddle/hapi/progressbar.py index 41e2b5422af6459cedf62517c10ea974657f7bca..43b913a6fc120da524288d44562b8423c2c85ac0 100644 --- a/python/paddle/hapi/progressbar.py +++ b/python/paddle/hapi/progressbar.py @@ -23,15 +23,17 @@ __all__ = [] class ProgressBar(object): - """progress bar """ - - def __init__(self, - num=None, - width=30, - verbose=1, - start=True, - file=sys.stdout, - name='step'): + """progress bar""" + + def __init__( + self, + num=None, + width=30, + verbose=1, + start=True, + file=sys.stdout, + name='step', + ): self._num = num if isinstance(num, int) and num <= 0: raise TypeError('num should be None or integer (> 0)') @@ -47,11 +49,12 @@ class ProgressBar(object): self._last_update = 0 self.name = name - self._dynamic_display = ((hasattr(self.file, 'isatty') - and self.file.isatty()) - or 'ipykernel' in sys.modules - or 'posix' in sys.modules - or 'PYCHARM_HOSTED' in os.environ) + self._dynamic_display = ( + (hasattr(self.file, 'isatty') and self.file.isatty()) + or 'ipykernel' in sys.modules + or 'posix' in sys.modules + or 'PYCHARM_HOSTED' in os.environ + ) def _get_max_width(self): if sys.version_info > (3, 3): @@ -81,13 +84,17 @@ class ProgressBar(object): in_list = np.asarray(in_list) out = np.vectorize( lambda x: struct.unpack(' 0: - bar_chars += ('=' * (prog_width - 1)) + bar_chars += '=' * (prog_width - 1) if current_num < self._num: bar_chars += '>' else: bar_chars += '=' - bar_chars += ('.' * (self._width - prog_width)) + bar_chars += '.' * (self._width - prog_width) bar_chars += ']' else: bar_chars = self.name + ' %3d' % current_num @@ -150,8 +159,11 @@ class ProgressBar(object): if self._num is not None and current_num < self._num: eta = time_per_unit * (self._num - current_num) if eta > 3600: - eta_format = '%d:%02d:%02d' % (eta // 3600, - (eta % 3600) // 60, eta % 60) + eta_format = '%d:%02d:%02d' % ( + eta // 3600, + (eta % 3600) // 60, + eta % 60, + ) elif eta > 60: eta_format = '%d:%02d' % (eta // 60, eta % 60) else: @@ -162,7 +174,7 @@ class ProgressBar(object): info += fps self._total_width += len(info) if prev_total_width > self._total_width: - info += (' ' * (prev_total_width - self._total_width)) + info += ' ' * (prev_total_width - self._total_width) # newline for another epoch if self._num is not None and current_num >= self._num: @@ -176,8 +188,10 @@ class ProgressBar(object): elif self._verbose == 2 or self._verbose == 3: if self._num: numdigits = int(np.log10(self._num)) + 1 - count = (self.name + ' %' + str(numdigits) + - 'd/%d') % (current_num, self._num) + count = (self.name + ' %' + str(numdigits) + 'd/%d') % ( + current_num, + self._num, + ) else: count = self.name + ' %3d' % current_num info = count + info @@ -191,9 +205,11 @@ class ProgressBar(object): info += ' %.4f' % v else: info += ' %.4e' % v - elif isinstance(v, np.ndarray) and \ - v.size == 1 and \ - v.dtype in [np.float32, np.float64]: + elif ( + isinstance(v, np.ndarray) + and v.size == 1 + and v.dtype in [np.float32, np.float64] + ): if abs(v[0]) > 1e-3: info += ' %.4f' % v[0] else: diff --git a/python/paddle/hapi/static_flops.py b/python/paddle/hapi/static_flops.py index 8926bcb35529d9788df0039ba6ef02abd2317f0e..d1effd27b463f52dea6dfbfc79e725e98a06bd19 100644 --- a/python/paddle/hapi/static_flops.py +++ b/python/paddle/hapi/static_flops.py @@ -20,7 +20,6 @@ __all__ = [] class VarWrapper(object): - def __init__(self, var, graph): assert isinstance(var, Variable) assert isinstance(graph, GraphWrapper) @@ -41,7 +40,6 @@ class VarWrapper(object): class OpWrapper(object): - def __init__(self, op, graph): assert isinstance(graph, GraphWrapper) self._op = op @@ -87,8 +85,7 @@ class GraphWrapper(object): """ def __init__(self, program=None, in_nodes=[], out_nodes=[]): - """ - """ + """ """ super(GraphWrapper, self).__init__() self.program = Program() if program is None else program self.persistables = {} @@ -212,7 +209,6 @@ def static_flops(program, print_detail=False): class Table(object): - def __init__(self, table_heads): self.table_heads = table_heads self.table_len = [] @@ -226,8 +222,10 @@ class Table(object): print('The row_str should be a list') if len(row_str) != self.col_num: print( - 'The length of row data should be equal the length of table heads, but the data: {} is not equal table heads {}' - .format(len(row_str), self.col_num)) + 'The length of row data should be equal the length of table heads, but the data: {} is not equal table heads {}'.format( + len(row_str), self.col_num + ) + ) for i in range(self.col_num): if len(str(row_str[i])) > self.table_len[i]: self.table_len[i] = len(str(row_str[i])) diff --git a/python/paddle/hub.py b/python/paddle/hub.py index acdb28cb6f08dfd51e9770c40283eb3f8d98a010..e6f38d6ee11f7019112d8c9cc1dfe7b21aee7c4a 100644 --- a/python/paddle/hub.py +++ b/python/paddle/hub.py @@ -16,6 +16,4 @@ from .hapi.hub import list # noqa: F401 from .hapi.hub import help # noqa: F401 from .hapi.hub import load # noqa: F401 -__all__ = [ #noqa - 'list', 'help', 'load' -] +__all__ = ['list', 'help', 'load'] # noqa diff --git a/python/paddle/incubate/__init__.py b/python/paddle/incubate/__init__.py index 68c7db054991a02b7c0a098b07680b9f6a193436..2730db97f0ed290e93a24b75c6b9c6dabe2df5ea 100644 --- a/python/paddle/incubate/__init__.py +++ b/python/paddle/incubate/__init__.py @@ -29,10 +29,10 @@ from .tensor import segment_max from .tensor import segment_min from .passes import fuse_resnet_unit_pass -from . import autograd #noqa: F401 -from . import autotune #noqa: F401 -from . import nn #noqa: F401 -from . import asp #noqa: F401 +from . import autograd # noqa: F401 +from . import autotune # noqa: F401 +from . import nn # noqa: F401 +from . import asp # noqa: F401 from ..fluid.layers.loss import identity_loss diff --git a/python/paddle/incubate/asp/__init__.py b/python/paddle/incubate/asp/__init__.py index d2a56fd117c41de59a8d5162fbc5f7533691c6e1..662e24900fce821ab67d2a206aa8a326975273db 100644 --- a/python/paddle/incubate/asp/__init__.py +++ b/python/paddle/incubate/asp/__init__.py @@ -13,13 +13,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ...fluid.contrib.sparsity import calculate_density #noqa: F401 -from ...fluid.contrib.sparsity import decorate #noqa: F401 -from ...fluid.contrib.sparsity import prune_model #noqa: F401 -from ...fluid.contrib.sparsity import set_excluded_layers #noqa: F401 -from ...fluid.contrib.sparsity import reset_excluded_layers #noqa: F401 +from ...fluid.contrib.sparsity import calculate_density # noqa: F401 +from ...fluid.contrib.sparsity import decorate # noqa: F401 +from ...fluid.contrib.sparsity import prune_model # noqa: F401 +from ...fluid.contrib.sparsity import set_excluded_layers # noqa: F401 +from ...fluid.contrib.sparsity import reset_excluded_layers # noqa: F401 -__all__ = [ #noqa - 'calculate_density', 'decorate', 'prune_model', 'set_excluded_layers', - 'reset_excluded_layers' +__all__ = [ # noqa + 'calculate_density', + 'decorate', + 'prune_model', + 'set_excluded_layers', + 'reset_excluded_layers', ] diff --git a/python/paddle/incubate/autograd/__init__.py b/python/paddle/incubate/autograd/__init__.py index c5ff3b18d4d4945af4dfb8a243f5ad32378b014a..d9b9e4178191753a6204db8f26ec5a7b5c08dc12 100644 --- a/python/paddle/incubate/autograd/__init__.py +++ b/python/paddle/incubate/autograd/__init__.py @@ -17,6 +17,12 @@ from .primx import prim2orig from .utils import disable_prim, enable_prim, prim_enabled __all__ = [ # noqa - 'vjp', 'jvp', 'Jacobian', 'Hessian', 'enable_prim', 'disable_prim', - 'forward_grad', 'grad' + 'vjp', + 'jvp', + 'Jacobian', + 'Hessian', + 'enable_prim', + 'disable_prim', + 'forward_grad', + 'grad', ] diff --git a/python/paddle/incubate/autograd/functional.py b/python/paddle/incubate/autograd/functional.py index dce83b7505fb208a22ca5595319929910f55ec65..eafaaaea329ad7c190312a5e103c97e12190de40 100644 --- a/python/paddle/incubate/autograd/functional.py +++ b/python/paddle/incubate/autograd/functional.py @@ -253,8 +253,7 @@ class Jacobian(object): @property def shape(self): - """The shape of flattened Jacobian matrix. - """ + """The shape of flattened Jacobian matrix.""" return self._jacobian.shape @@ -308,11 +307,11 @@ class Hessian(object): """ def __init__(self, func, xs, is_batched=False): - def _jac_func(*xs): jac = Jacobian(func, xs, is_batched=is_batched) - if (is_batched and jac.shape[1] != 1) or (not is_batched - and jac.shape[0] != 1): + if (is_batched and jac.shape[1] != 1) or ( + not is_batched and jac.shape[0] != 1 + ): raise RuntimeError( "The function given to Hessian shoud return as single element Tensor or batched single element Tensor." ) @@ -325,8 +324,7 @@ class Hessian(object): @property def shape(self): - """The shape of flattened Hessian matrix. - """ + """The shape of flattened Hessian matrix.""" return self.symbolic.shape @@ -369,32 +367,41 @@ class _Jacobian(object): @property def _lazy_axis(self): - """"The axis of lazily evaluated.""" + """ "The axis of lazily evaluated.""" raise NotImplementedError def _lazy_indexes(self, indexes): idx = indexes[self._lazy_axis] - return (idx, ) if isinstance(idx, int) else tuple( - range(idx.start, idx.stop, idx.step)) + return ( + (idx,) + if isinstance(idx, int) + else tuple(range(idx.start, idx.stop, idx.step)) + ) def _flatten(self, xs): raise NotImplementedError def _shifted_indexes(self, indexes, lazy_axis_size=0): idx = indexes[self._lazy_axis] - shifted_lazy_axis_idx = 0 if isinstance(idx, int) else slice( - 0, lazy_axis_size, 1) - return indexes[:self._lazy_axis] + ( - shifted_lazy_axis_idx, ) + indexes[self._lazy_axis + 1:] + shifted_lazy_axis_idx = ( + 0 if isinstance(idx, int) else slice(0, lazy_axis_size, 1) + ) + return ( + indexes[: self._lazy_axis] + + (shifted_lazy_axis_idx,) + + indexes[self._lazy_axis + 1 :] + ) def __getitem__(self, indexes): indexes = _multi_index(indexes, self.shape) if isinstance(indexes[self._lazy_axis], int): - other_indexes = indexes[:self._lazy_axis] + \ - indexes[self._lazy_axis+1:] - return self._cached_evaluate( - indexes[self._lazy_axis])[other_indexes] + other_indexes = ( + indexes[: self._lazy_axis] + indexes[self._lazy_axis + 1 :] + ) + return self._cached_evaluate(indexes[self._lazy_axis])[ + other_indexes + ] lazy_indexes = self._lazy_indexes(indexes) # Using concat and reshape to replace stack operator temporarily, as # it is not a primitive operator. @@ -402,7 +409,8 @@ class _Jacobian(object): shape[self._lazy_axis] = len(lazy_indexes) part_jac = paddle.concat( [self._cached_evaluate(i) for i in lazy_indexes], - axis=self._lazy_axis).reshape(shape) + axis=self._lazy_axis, + ).reshape(shape) return part_jac[self._shifted_indexes(indexes, len(lazy_indexes))] def _cached_evaluate(self, k): @@ -435,13 +443,15 @@ class _JacobianNoBatch(_Jacobian): return 0 def _flatten(self, xs): - return paddle.concat(tuple(x.reshape((-1, )) for x in xs)) + return paddle.concat(tuple(x.reshape((-1,)) for x in xs)) def _evaluate(self, row_index): - return self._flatten(_grad( - self._flatten_ys[row_index], - self._xs, - )) + return self._flatten( + _grad( + self._flatten_ys[row_index], + self._xs, + ) + ) class _JacobianBatchFirst(_Jacobian): @@ -455,8 +465,11 @@ class _JacobianBatchFirst(_Jacobian): @property def shape(self): - return (self._flatten_xs.shape[0], self._flatten_ys.shape[1], - self._flatten_xs.shape[1]) + return ( + self._flatten_xs.shape[0], + self._flatten_ys.shape[1], + self._flatten_xs.shape[1], + ) @property def _lazy_axis(self): @@ -464,7 +477,8 @@ class _JacobianBatchFirst(_Jacobian): def _flatten(self, xs): return paddle.concat( - tuple(x.reshape((x.shape[0], -1)) for x in utils.as_tensors(xs)), 1) + tuple(x.reshape((x.shape[0], -1)) for x in utils.as_tensors(xs)), 1 + ) def _evaluate(self, row_index): return self._flatten(_grad(self._flatten_ys[:, row_index], self._xs)) @@ -490,24 +504,27 @@ def _multi_index(indexes, shape): Returns: tuple: The standard format index as the above description. """ - indexes = indexes if isinstance(indexes, typing.Sequence) else (indexes, ) + indexes = indexes if isinstance(indexes, typing.Sequence) else (indexes,) if any(isinstance(i, type(Ellipsis)) for i in indexes): raise IndexError('Ellipsis index currently is not supported.') # Fill the right-most elements. - indexes = indexes + (slice(0, None, None), ) * (len(shape) - len(indexes)) + indexes = indexes + (slice(0, None, None),) * (len(shape) - len(indexes)) # Convert to positive index. positive_indexes = [] for i, index in enumerate(indexes): if isinstance(index, slice): - index = slice(index.start or 0, index.stop or shape[i], index.step - or 1) + index = slice( + index.start or 0, index.stop or shape[i], index.step or 1 + ) positive_indexes.append( slice( index.start + shape[i] if index.start < 0 else index.start, index.stop + shape[i] if index.stop < 0 else index.stop, # Negative step means index backward, no need to convert to # positive interger. - index.step)) + index.step, + ) + ) elif isinstance(index, int): positive_indexes.append(index + shape[i] if index < 0 else index) else: @@ -522,8 +539,8 @@ def _replace_none_with_zero_tensor(xs, refs): return xs elif isinstance(xs, typing.Sequence): return tuple( - _replace_none_with_zero_tensor(x, refs[i]) - for i, x in enumerate(xs)) + _replace_none_with_zero_tensor(x, refs[i]) for i, x in enumerate(xs) + ) else: return xs @@ -568,8 +585,11 @@ def _grad(ys, xs, v=None): # follow code snippet fixes the problem by return the first element of # xs_grad when the xs is a signle Tensor. xs_grad = paddle.grad(ys, xs, v, create_graph=True, allow_unused=True) - if isinstance(xs, paddle.fluid.framework.Variable) and isinstance( - xs_grad, typing.Sequence) and len(xs_grad) > 0: + if ( + isinstance(xs, paddle.fluid.framework.Variable) + and isinstance(xs_grad, typing.Sequence) + and len(xs_grad) > 0 + ): xs_grad = xs_grad[0] else: xs_grad = paddle.incubate.autograd.grad(ys, xs, v) @@ -645,18 +665,23 @@ def _check_inputs(func, xs, v=None): raise TypeError(f"Expected 'fun' is Callable, but got {type(func)}.") if not isinstance(xs, (framework.Variable, typing.Sequence)): - raise TypeError(f"Expected 'xs' is a Tensor|Sequence[Tensor]," - f"but got {type(xs)}.") + raise TypeError( + f"Expected 'xs' is a Tensor|Sequence[Tensor]," + f"but got {type(xs)}." + ) if isinstance(xs, typing.Sequence) and not all( - isinstance(x, framework.Variable) for x in xs): + isinstance(x, framework.Variable) for x in xs + ): raise TypeError("All elements of 'xs' shoule be Tensor.") if not isinstance(v, (framework.Variable, typing.Sequence, type(None))): raise TypeError( - f"Expected 'v' is Tensor|Sequence[Tensor]|None, but got {type(v)}.") + f"Expected 'v' is Tensor|Sequence[Tensor]|None, but got {type(v)}." + ) if isinstance(v, typing.Sequence) and not all( - isinstance(e, framework.Variable) for e in v): + isinstance(e, framework.Variable) for e in v + ): raise TypeError("All elements of 'xs' shoule be Tensor.") @@ -666,11 +691,14 @@ def _check_v_shape(v, refs): v, refs = utils.as_tensors(v), utils.as_tensors(refs) if len(refs) != len(v): - raise RuntimeError(f"The argument v is a tuple of invalid length:" - f"should be {len(refs)} but got {len(v)}.") + raise RuntimeError( + f"The argument v is a tuple of invalid length:" + f"should be {len(refs)} but got {len(v)}." + ) for index, (element_v, element_ref) in enumerate(zip(v, refs)): if element_v.shape != element_ref.shape: raise RuntimeError( f"The v[{index}] has invalid shape: should " - f"be {element_ref.shape} but got {element_v.shape}.") + f"be {element_ref.shape} but got {element_v.shape}." + ) diff --git a/python/paddle/incubate/autograd/primapi.py b/python/paddle/incubate/autograd/primapi.py index fa289e259cbc31395835b4000cecc8952f1eaf46..18a06af5dca7fccc7cc6645e92a6c8397c851182 100644 --- a/python/paddle/incubate/autograd/primapi.py +++ b/python/paddle/incubate/autograd/primapi.py @@ -64,25 +64,35 @@ def forward_grad(outputs, inputs, grad_inputs=None): paddle.disable_static() """ if not utils.prim_enabled(): - raise RuntimeError('forward_grad must be running on primitive' - 'operators, use enable_prim to turn it on.') + raise RuntimeError( + 'forward_grad must be running on primitive' + 'operators, use enable_prim to turn it on.' + ) if not isinstance(outputs, (framework.Variable, typing.Sequence)): - raise TypeError(f'Expected outputs is Tensor|Sequence[Tesnor], ' - f'but got {type(outputs)}.') + raise TypeError( + f'Expected outputs is Tensor|Sequence[Tesnor], ' + f'but got {type(outputs)}.' + ) if not isinstance(inputs, (framework.Variable, typing.Sequence)): - raise TypeError(f'Expected inputs is Tensor|Sequence[Tesnor], ' - f'but got {type(inputs)}.') + raise TypeError( + f'Expected inputs is Tensor|Sequence[Tesnor], ' + f'but got {type(inputs)}.' + ) - ys, xs, xs_dot = utils.as_tensors(outputs), utils.as_tensors( - inputs), utils.as_tensors(grad_inputs) + ys, xs, xs_dot = ( + utils.as_tensors(outputs), + utils.as_tensors(inputs), + utils.as_tensors(grad_inputs), + ) block = framework.default_main_program().current_block() if any(x.block != block for x in xs + ys): raise RuntimeError( 'Variable in inputs and targets should exist in current block of ' - 'main program.') + 'main program.' + ) primx.orig2prim(block) ad = primx.Transform(ys[0].block) @@ -141,22 +151,32 @@ def grad(outputs, inputs, grad_outputs=None): # backward.gradients returns a list though the inputs is a signle Tensor. # The follow code snippet fixes the problem by return the first element # of grad_inputs when the inputs is a signle Tensor. - if isinstance(inputs, framework.Variable) and isinstance( - grad_inputs, typing.Sequence) and len(grad_inputs) > 0: + if ( + isinstance(inputs, framework.Variable) + and isinstance(grad_inputs, typing.Sequence) + and len(grad_inputs) > 0 + ): return grad_inputs[0] else: return grad_inputs if not isinstance(outputs, (framework.Variable, typing.Sequence)): - raise TypeError(f'Expected outputs is Tensor|Sequence[Tesnor], ' - f'but got {type(outputs)}.') + raise TypeError( + f'Expected outputs is Tensor|Sequence[Tesnor], ' + f'but got {type(outputs)}.' + ) if not isinstance(inputs, (framework.Variable, typing.Sequence)): - raise TypeError(f'Expected inputs is Tensor|Sequence[Tesnor], ' - f'but got {type(inputs)}.') + raise TypeError( + f'Expected inputs is Tensor|Sequence[Tesnor], ' + f'but got {type(inputs)}.' + ) - ys, xs, ys_bar = utils.as_tensors(outputs), utils.as_tensors( - inputs), utils.as_tensors(grad_outputs) + ys, xs, ys_bar = ( + utils.as_tensors(outputs), + utils.as_tensors(inputs), + utils.as_tensors(grad_outputs), + ) block = framework.default_main_program().current_block() if any((x is not None and x.block != block) for x in xs + ys): raise RuntimeError( diff --git a/python/paddle/incubate/autograd/primops.py b/python/paddle/incubate/autograd/primops.py index 502c10783e887d12115cdb9d2b295a5c0d431cce..29a17dda1d45887ecd777262deaf2e57c0335bdd 100644 --- a/python/paddle/incubate/autograd/primops.py +++ b/python/paddle/incubate/autograd/primops.py @@ -37,13 +37,9 @@ def _simple_binop(helper): if out is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type=optype, - inputs={ - 'X': x, - 'Y': y - }, - outputs={'Z': out}, - attrs={}) + helper.append_op( + type=optype, inputs={'X': x, 'Y': y}, outputs={'Z': out}, attrs={} + ) return out @@ -53,16 +49,16 @@ def _manipulation_unop(helper): attrs = { k: helper.kwargs[k] - for k in ('shape', 'axis', 'index') if k in helper.kwargs + for k in ('shape', 'axis', 'index') + if k in helper.kwargs } if out is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type=optype, - inputs={'X': x}, - outputs={'Y': out}, - attrs=attrs) + helper.append_op( + type=optype, inputs={'X': x}, outputs={'Y': out}, attrs=attrs + ) return out @@ -94,23 +90,23 @@ def set_value(x, y, axis, starts, ends, strides, out): assert x is out, "x and out should be the same Tensor in set_value" attrs = {'axes': axis, 'starts': starts, 'ends': ends, 'steps': strides} helper = LayerHelper('set_value', **locals()) - helper.append_op(type=helper.layer_type, - inputs={ - 'Input': x, - 'ValueTensor': y - }, - outputs={'Out': out}, - attrs=attrs) + helper.append_op( + type=helper.layer_type, + inputs={'Input': x, 'ValueTensor': y}, + outputs={'Out': out}, + attrs=attrs, + ) return out def mean(x, axis=None, keepdim=False): axes = axis or tuple(range(0, len(x.shape))) sum = reduce_sum(x, axis=axes, keepdim=keepdim) - norm = fill_const(shape=sum.shape, - value=functools.reduce(operator.mul, - [x.shape[axis] for axis in axes]), - dtype=sum.dtype) + norm = fill_const( + shape=sum.shape, + value=functools.reduce(operator.mul, [x.shape[axis] for axis in axes]), + dtype=sum.dtype, + ) return div(sum, norm) @@ -122,16 +118,18 @@ def zeros(shape, dtype): return fill_const(0, shape, dtype) -def batch_norm(x, - axis, - gamma, - beta, - run_mean, - run_var, - eps=1e-5, - momentum=0.9, - use_run_stat=False, - reserve_space=None): +def batch_norm( + x, + axis, + gamma, + beta, + run_mean, + run_var, + eps=1e-5, + momentum=0.9, + use_run_stat=False, + reserve_space=None, +): """batch normalizer. Args: @@ -150,40 +148,59 @@ def batch_norm(x, Defaults to False. """ reduce_axes = tuple(i for i in range(len(x.shape)) if i != axis) - stats_shape = tuple(1 if i in reduce_axes else s - for i, s in enumerate(x.shape)) + stats_shape = tuple( + 1 if i in reduce_axes else s for i, s in enumerate(x.shape) + ) batch_mean = zeros(run_mean.shape, run_mean.dtype) batch_var = zeros(run_var.shape, run_var.dtype) if not use_run_stat: batch_mean = mean(x, reduce_axes, keepdim=True) - batch_var = mean(square(sub(x, broadcast(batch_mean, x.shape))), - reduce_axes, - keepdim=True) + batch_var = mean( + square(sub(x, broadcast(batch_mean, x.shape))), + reduce_axes, + keepdim=True, + ) x_hat = div( sub(x, broadcast(batch_mean, x.shape)), sqrt( - add(broadcast(batch_var, x.shape), - fill_const(eps, x.shape, batch_var.dtype)))) + add( + broadcast(batch_var, x.shape), + fill_const(eps, x.shape, batch_var.dtype), + ) + ), + ) momentum = fill_const(momentum, run_mean.shape, run_mean.dtype) run_mean = add( mul(momentum, run_mean), - mul(sub(ones(run_mean.shape, run_mean.dtype), momentum), - reshape(batch_mean, run_mean.shape))) + mul( + sub(ones(run_mean.shape, run_mean.dtype), momentum), + reshape(batch_mean, run_mean.shape), + ), + ) run_var = add( mul(momentum, run_var), - mul(sub(ones(run_var.shape, run_var.dtype), momentum), - reshape(batch_var, run_var.shape))) + mul( + sub(ones(run_var.shape, run_var.dtype), momentum), + reshape(batch_var, run_var.shape), + ), + ) else: x_hat = div( sub(x, broadcast(reshape(run_mean, stats_shape), x.shape)), sqrt( - add(broadcast(reshape(run_var, stats_shape), x.shape), - fill_const(eps, x.shape, x.dtype)))) - y = add(mul(broadcast(reshape(gamma, stats_shape), x_hat.shape), x_hat), - broadcast(reshape(beta, stats_shape), x_hat.shape)) + add( + broadcast(reshape(run_var, stats_shape), x.shape), + fill_const(eps, x.shape, x.dtype), + ) + ), + ) + y = add( + mul(broadcast(reshape(gamma, stats_shape), x_hat.shape), x_hat), + broadcast(reshape(beta, stats_shape), x_hat.shape), + ) if reserve_space: return run_mean, reserve_space, batch_mean, batch_var, run_var, y @@ -192,7 +209,7 @@ def batch_norm(x, def square(x): - return pow(x, fill_const(2., x.shape, x.dtype)) + return pow(x, fill_const(2.0, x.shape, x.dtype)) @REGISTER_FN('add_p', 'X', 'Y', 'Z') @@ -279,10 +296,12 @@ def split(x, num_or_sections, axis=0, outs=None): helper.create_variable_for_type_inference(dtype=x.dtype) for i in range(n) ] - helper.append_op(type=helper.layer_type, - inputs={'X': x}, - outputs={'YS': outs}, - attrs=attrs) + helper.append_op( + type=helper.layer_type, + inputs={'X': x}, + outputs={'YS': outs}, + attrs=attrs, + ) return outs @@ -294,17 +313,19 @@ def concat(xs, axis=0, out=None): helper = LayerHelper('concat_p', **locals()) if out is None: out = helper.create_variable_for_type_inference(dtype=xs[0].dtype) - helper.append_op(type=helper.layer_type, - inputs={'XS': xs}, - outputs={'Y': out}, - attrs=attrs) + helper.append_op( + type=helper.layer_type, + inputs={'XS': xs}, + outputs={'Y': out}, + attrs=attrs, + ) return out @REGISTER_FN('reduce_sum_p', 'X', 'Y') def reduce_sum(x, axis=None, keepdim=False, out=None): axes = axis or tuple(range(0, len(x.shape))) - axes = (axes, ) if isinstance(axes, int) else axes + axes = (axes,) if isinstance(axes, int) else axes if not isinstance(axis, (tuple, list)): raise TypeError(f'axis must be tuple or list, but got {type(axis)}') if not isinstance(keepdim, bool): @@ -315,10 +336,9 @@ def reduce_sum(x, axis=None, keepdim=False, out=None): if out is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type=helper.layer_type, - inputs={'X': x}, - outputs={'Y': out}, - attrs=attrs) + helper.append_op( + type=helper.layer_type, inputs={'X': x}, outputs={'Y': out}, attrs=attrs + ) return out @@ -330,28 +350,33 @@ def matmul(x, y, out=None): @REGISTER_FN('slice_select_p', 'X', 'Y') def slice_select(x, axis, starts, ends, strides, out=None): if not isinstance(axis, (list, tuple)): - raise TypeError(f'Argument type error. `axis` is supposed to be list or' - f' tuple but found {type(axis)}.') + raise TypeError( + f'Argument type error. `axis` is supposed to be list or' + f' tuple but found {type(axis)}.' + ) if not isinstance(starts, (list, tuple)): raise TypeError( f'Argument type error. `starts` is supposed to be list or' - f' tuple but found {type(starts)}.') + f' tuple but found {type(starts)}.' + ) if not isinstance(ends, (list, tuple)): - raise TypeError(f'Argument type error. `ends` is supposed to be list or' - f' tuple but found {type(ends)}.') + raise TypeError( + f'Argument type error. `ends` is supposed to be list or' + f' tuple but found {type(ends)}.' + ) assert len(axis) == len(starts) == len(ends) == len(strides), ( f'len(axis), len(starts), len(ends) and len(strides) should be equal, ' f'but len(axis)={len(axis)}, len(starts)={len(starts)}, ' - f'len(ends)={len(ends)} and len(strides)={len(strides)}') + f'len(ends)={len(ends)} and len(strides)={len(strides)}' + ) attrs = {'axis': axis, 'starts': starts, 'ends': ends, 'strides': strides} helper = LayerHelper('slice_select_p', **locals()) if out is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type=helper.layer_type, - inputs={'X': x}, - outputs={'Y': out}, - attrs=attrs) + helper.append_op( + type=helper.layer_type, inputs={'X': x}, outputs={'Y': out}, attrs=attrs + ) return out @@ -360,22 +385,23 @@ def slice_assign(x, y, axis, starts, ends, strides, out=None): assert len(starts) == len(ends) == len(strides) == len(axis), ( f'len(starts), len(ends), len(strides) and len(axis) should be equal, ' f'but len(starts)={len(starts)}, len(ends)={len(ends)}, ' - f'len(strides)={len(strides)} and len(axis)={len(axis)}') + f'len(strides)={len(strides)} and len(axis)={len(axis)}' + ) assert len(y.shape) == len(x.shape), ( f'len(y.shape) should be equal to len(x.shape), ' - f'but len(y.shape)={len(y.shape)} and len(x.shape)={len(x.shape)}.') + f'but len(y.shape)={len(y.shape)} and len(x.shape)={len(x.shape)}.' + ) attrs = {'axis': axis, 'starts': starts, 'ends': ends, 'strides': strides} helper = LayerHelper('slice_assign_p', **locals()) if out is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type=helper.layer_type, - inputs={ - 'X': x, - 'Y': y - }, - outputs={'Z': out}, - attrs=attrs) + helper.append_op( + type=helper.layer_type, + inputs={'X': x, 'Y': y}, + outputs={'Z': out}, + attrs=attrs, + ) return out @@ -385,13 +411,12 @@ def gather(x, indextensor, axis, out=None): helper = LayerHelper('gather_p', **locals()) if out is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type=helper.layer_type, - inputs={ - 'X': x, - 'IndexTensor': indextensor - }, - outputs={'Y': out}, - attrs=attrs) + helper.append_op( + type=helper.layer_type, + inputs={'X': x, 'IndexTensor': indextensor}, + outputs={'Y': out}, + attrs=attrs, + ) return out @@ -399,26 +424,26 @@ def gather(x, indextensor, axis, out=None): def scatter_add(x, y, indextensor, axis, out=None): assert len(x.shape) == len(y.shape), ( f'len(x.shape) should be equal to len(y.shape), ' - f'but len(x.shape)={len(x.shape)} and len(y.shape)={len(y.shape)}.') - assert len( - indextensor.shape - ) == 1, f'len(indextensor.shape) must be equal to 1, but got {len(indextensor.shape)}.' + f'but len(x.shape)={len(x.shape)} and len(y.shape)={len(y.shape)}.' + ) + assert ( + len(indextensor.shape) == 1 + ), f'len(indextensor.shape) must be equal to 1, but got {len(indextensor.shape)}.' assert y.shape[axis] == indextensor.shape[0], ( f'y.shape[axis] should be equal to indextensor.shape[0], ' f'but y.shape[axis]={y.shape[axis]} and ' - f'indextensor.shape[0]={indextensor.shape[0]}.') + f'indextensor.shape[0]={indextensor.shape[0]}.' + ) attrs = {'axis': axis} helper = LayerHelper('scatter_add_p', **locals()) if out is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type=helper.layer_type, - inputs={ - 'X': x, - 'Y': y, - 'IndexTensor': indextensor - }, - outputs={'Z': out}, - attrs=attrs) + helper.append_op( + type=helper.layer_type, + inputs={'X': x, 'Y': y, 'IndexTensor': indextensor}, + outputs={'Z': out}, + attrs=attrs, + ) return out @@ -431,24 +456,26 @@ def log(x, out=None): def select(cond, x, y, out=None): if len(cond.shape) != len(x.shape): raise ValueError( - "len(cond.shape) should be equal to len(x.shape), but len(cond.shape)={} and len(x.shape)={}." - .format(len(cond.shape), len(x.shape))) + "len(cond.shape) should be equal to len(x.shape), but len(cond.shape)={} and len(x.shape)={}.".format( + len(cond.shape), len(x.shape) + ) + ) if len(x.shape) != len(y.shape): raise ValueError( - "len(x.shape) should be equal to len(y.shape), but len(x.shape)={} and len(y.shape)={}." - .format(len(x.shape), len(y.shape))) + "len(x.shape) should be equal to len(y.shape), but len(x.shape)={} and len(y.shape)={}.".format( + len(x.shape), len(y.shape) + ) + ) helper = LayerHelper('select_p', **locals()) if out is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type=helper.layer_type, - inputs={ - 'Condition': cond, - 'X': x, - 'Y': y - }, - outputs={'Z': out}) + helper.append_op( + type=helper.layer_type, + inputs={'Condition': cond, 'X': x, 'Y': y}, + outputs={'Z': out}, + ) return out @@ -492,10 +519,12 @@ def cast(x, dtype, out=None): helper = LayerHelper('cast_p', **locals()) if out is None: out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type=helper.layer_type, - inputs={'X': x}, - outputs={'Y': out}, - attrs={'dtype': dtype}) + helper.append_op( + type=helper.layer_type, + inputs={'X': x}, + outputs={'Y': out}, + attrs={'dtype': dtype}, + ) return out @@ -511,7 +540,7 @@ def uniform_random(dtype, min_value, max_value, seed, shape=None, out=None): 'dtype': dtype, 'min': min_value, 'max': max_value, - 'seed': seed + 'seed': seed, } helper = LayerHelper('uniform_random_p', **locals()) if out is None: diff --git a/python/paddle/incubate/autograd/primreg.py b/python/paddle/incubate/autograd/primreg.py index 31f47f8196315ca971dd025ec7b547c47038fabf..7d81847f9052c640379dff08f31464dda165a446 100644 --- a/python/paddle/incubate/autograd/primreg.py +++ b/python/paddle/incubate/autograd/primreg.py @@ -14,7 +14,8 @@ class Registry(object): - """ A general registry object. """ + """A general registry object.""" + __slots__ = ['name', 'tab'] def __init__(self, name): @@ -22,7 +23,9 @@ class Registry(object): self.tab = {} def register(self, name, value): - assert name not in self.tab, f'name "{name}" should not be registered before.' + assert ( + name not in self.tab + ), f'name "{name}" should not be registered before.' self.tab[name] = value def lookup(self, name): @@ -78,15 +81,17 @@ def op_position_inputs(op): """ args = _primop_position_argnames.lookup(op.type) - assert args is not None, f'args of {op.type} should not be None in op_position_inputs().' + assert ( + args is not None + ), f'args of {op.type} should not be None in op_position_inputs().' *input_names, _ = args inputs = [] for name in input_names: vars = list(map(op.block.var, op.input(name))) - assert len( - vars - ) >= 0, f'len(vars) should be greater than or equal to 0, but len(vars)={len(vars)}.' + assert ( + len(vars) >= 0 + ), f'len(vars) should be greater than or equal to 0, but len(vars)={len(vars)}.' if len(vars) > 1: inputs.append(vars) else: @@ -120,9 +125,9 @@ def op_position_output(op): *_, output_name = args outvars = list(map(op.block.var, op.output(output_name))) - assert len( - outvars - ) >= 0, f'len(outvars) should be greater than or equal to 0, but len(outvars)={len(outvars)}.' + assert ( + len(outvars) >= 0 + ), f'len(outvars) should be greater than or equal to 0, but len(outvars)={len(outvars)}.' if len(outvars) > 1: output = outvars else: @@ -184,9 +189,10 @@ def REGISTER_ORIG2PRIM(op_type): raise TypeError(f'op_type must be str, but got {type(op_type)}.') def wrapper(f): - def _lower(op, *args, **kwargs): - assert op.type == op_type, f'op.type should be equal to op_type, but op.type is {op.type} and op_type is {op_type}' + assert ( + op.type == op_type + ), f'op.type should be equal to op_type, but op.type is {op.type} and op_type is {op_type}' return f(op, *args, **kwargs) _orig2prim.register(op_type, _lower) @@ -216,9 +222,10 @@ def REGISTER_PRIM2ORIG(op_type): raise TypeError(f'op_type must be str, but got {type(op_type)}.') def wrapper(f): - def _lower(op, *args, **kwargs): - assert op.type == op_type, f'op.type should be equal to op_type, but op.type is {op.type} and op_type is {op_type}' + assert ( + op.type == op_type + ), f'op.type should be equal to op_type, but op.type is {op.type} and op_type is {op_type}' return f(op, *args, **kwargs) _prim2orig.register(op_type, _lower) @@ -247,9 +254,10 @@ def REGISTER_JVP(op_type): raise TypeError(f'op_type must be str, but got {type(op_type)}.') def wrapper(f): - def _jvp(op, *args, **kwargs): - assert op.type == op_type, f'op.type should be equal to op_type, but op.type is {op.type} and op_type is {op_type}' + assert ( + op.type == op_type + ), f'op.type should be equal to op_type, but op.type is {op.type} and op_type is {op_type}' return f(op, *args, **kwargs) _primop_jvp.register(op_type, _jvp) @@ -280,9 +288,10 @@ def REGISTER_TRANSPOSE(op_type): raise TypeError(f'op_type must be str, but got {type(op_type)}.') def wrapper(f): - def _transpose(op, dot_checker, *args, **kwargs): - assert op.type == op_type, f'op.type should be equal to op_type, but op.type is {op.type} and op_type is {op_type}' + assert ( + op.type == op_type + ), f'op.type should be equal to op_type, but op.type is {op.type} and op_type is {op_type}' return f(op, dot_checker, *args, **kwargs) _primop_transpose.register(op_type, _transpose) diff --git a/python/paddle/incubate/autograd/primrules.py b/python/paddle/incubate/autograd/primrules.py index 8a2f94145cd06f55083e1b52cf476db71e698325..1d98d62cef3e0f4eba950a3493434caa45352a58 100644 --- a/python/paddle/incubate/autograd/primrules.py +++ b/python/paddle/incubate/autograd/primrules.py @@ -19,15 +19,54 @@ import typing import paddle from . import primops -from .primops import (add, broadcast, concat, cos, div, eq, erf, exp, - fill_const, gather, ge, gt, log, matmul, mul, ne, neg, - reduce_sum, reshape, scatter_add, select, set_value, sin, - slice_assign, slice_select, split, sqrt, sub, tanh, - transpose, bernoulli, rsqrt, uniform_random) -from .primreg import (REGISTER_JVP, REGISTER_ORIG2PRIM, REGISTER_PRIM2ORIG, - REGISTER_TRANSPOSE, lookup_fn, lookup_jvp, - lookup_orig2prim, lookup_prim2orig, lookup_transpose, - op_position_inputs, op_position_output) +from .primops import ( + add, + broadcast, + concat, + cos, + div, + eq, + erf, + exp, + fill_const, + gather, + ge, + gt, + log, + matmul, + mul, + ne, + neg, + reduce_sum, + reshape, + scatter_add, + select, + set_value, + sin, + slice_assign, + slice_select, + split, + sqrt, + sub, + tanh, + transpose, + bernoulli, + rsqrt, + uniform_random, +) +from .primreg import ( + REGISTER_JVP, + REGISTER_ORIG2PRIM, + REGISTER_PRIM2ORIG, + REGISTER_TRANSPOSE, + lookup_fn, + lookup_jvp, + lookup_orig2prim, + lookup_prim2orig, + lookup_transpose, + op_position_inputs, + op_position_output, +) from .utils import INT_DTYPE_2_STRING, get_output_var_list @@ -94,20 +133,20 @@ def elementwise_add_orig2prim(op, x, y): if x.shape != y.shape: y = broadcast(y, shape=x.shape) if op.attr('Scale_x') - 1.0 > 1e-5: - scale_x = fill_const(shape=x.shape, - dtype=x.dtype, - value=op.attr('Scale_x')) + scale_x = fill_const( + shape=x.shape, dtype=x.dtype, value=op.attr('Scale_x') + ) x = mul(x, scale_x) if op.attr('Scale_y') - 1.0 > 1e-5: - scale_y = fill_const(shape=y.shape, - dtype=y.dtype, - value=op.attr('Scale_y')) + scale_y = fill_const( + shape=y.shape, dtype=y.dtype, value=op.attr('Scale_y') + ) y = mul(y, scale_y) z = add(x, y) if op.attr('Scale_out') - 1.0 > 1e-5: - scale_out = fill_const(shape=z.shape, - dtype=z.dtype, - value=op.attr('Scale_out')) + scale_out = fill_const( + shape=z.shape, dtype=z.dtype, value=op.attr('Scale_out') + ) z = mul(z, scale_out) return z @@ -117,20 +156,20 @@ def elementwise_sub_orig2prim(op, x, y): if x.shape != y.shape: y = broadcast(y, shape=x.shape) if op.attr('Scale_x') - 1.0 > 1e-5: - scale_x = fill_const(shape=x.shape, - dtype=x.dtype, - value=op.attr('Scale_x')) + scale_x = fill_const( + shape=x.shape, dtype=x.dtype, value=op.attr('Scale_x') + ) x = mul(x, scale_x) if op.attr('Scale_y') - 1.0 > 1e-5: - scale_y = fill_const(shape=y.shape, - dtype=y.dtype, - value=op.attr('Scale_y')) + scale_y = fill_const( + shape=y.shape, dtype=y.dtype, value=op.attr('Scale_y') + ) y = mul(y, scale_y) z = sub(x, y) if op.attr('Scale_out') - 1.0 > 1e-5: - scale_out = fill_const(shape=z.shape, - dtype=z.dtype, - value=op.attr('Scale_out')) + scale_out = fill_const( + shape=z.shape, dtype=z.dtype, value=op.attr('Scale_out') + ) z = mul(z, scale_out) return z @@ -140,20 +179,20 @@ def elementwise_mul_orig2prim(op, x, y): if x.shape != y.shape: y = broadcast(y, shape=x.shape) if op.attr('Scale_x') - 1.0 > 1e-5: - scale_x = fill_const(shape=x.shape, - dtype=x.dtype, - value=op.attr('Scale_x')) + scale_x = fill_const( + shape=x.shape, dtype=x.dtype, value=op.attr('Scale_x') + ) x = mul(x, scale_x) if op.attr('Scale_y') - 1.0 > 1e-5: - scale_y = fill_const(shape=y.shape, - dtype=y.dtype, - value=op.attr('Scale_y')) + scale_y = fill_const( + shape=y.shape, dtype=y.dtype, value=op.attr('Scale_y') + ) y = mul(y, scale_y) z = mul(x, y) if op.attr('Scale_out') - 1.0 > 1e-5: - scale_out = fill_const(shape=z.shape, - dtype=z.dtype, - value=op.attr('Scale_out')) + scale_out = fill_const( + shape=z.shape, dtype=z.dtype, value=op.attr('Scale_out') + ) z = mul(z, scale_out) return z @@ -209,23 +248,26 @@ def fill_zeros_like_orig2prim(op, x): def fill_any_like_orig2prim(op, x): if op.attr('dtype') == -1: return fill_const(value=op.attr('value'), shape=x.shape, dtype=x.dtype) - return fill_const(value=op.attr('value'), - shape=x.shape, - dtype=paddle.dtype(op.attr('dtype'))) + return fill_const( + value=op.attr('value'), + shape=x.shape, + dtype=paddle.dtype(op.attr('dtype')), + ) @REGISTER_ORIG2PRIM('fill_constant') -def fill_const_orig2prim(op, - shape_tensor=None, - shape_tensor_list=None, - value_tensor=None): +def fill_const_orig2prim( + op, shape_tensor=None, shape_tensor_list=None, value_tensor=None +): if shape_tensor or shape_tensor_list or value_tensor: raise TypeError( 'fill_const_orig2prim currently not support Tensor input of shape and value.' ) - return fill_const(value=op.attr('value'), - shape=op.attr('shape'), - dtype=paddle.dtype(op.attr('dtype'))) + return fill_const( + value=op.attr('value'), + shape=op.attr('shape'), + dtype=paddle.dtype(op.attr('dtype')), + ) @REGISTER_ORIG2PRIM('sum') @@ -244,9 +286,9 @@ def index_select_orig2prim(op, index_t, x): @REGISTER_ORIG2PRIM('scale') def scale_orig2prim(op, scale_t, x): if scale_t is None: - scale_t = fill_const(shape=x.shape, - dtype=x.dtype, - value=op.attr('scale')) + scale_t = fill_const( + shape=x.shape, dtype=x.dtype, value=op.attr('scale') + ) bias_t = fill_const(shape=x.shape, dtype=x.dtype, value=op.attr('bias')) if op.attr('bias_after_scale'): return add(mul(x, scale_t), bias_t) @@ -272,14 +314,14 @@ def rsqrt_orig2prim(op, x): @REGISTER_ORIG2PRIM('matmul_v2') def matmul_v2_orig2prim(op, x, y): - def trans(shape): ret = [i for i in range(len(shape))] ret[-1], ret[-2] = ret[-2], ret[-1] return ret - assert len(x.shape) < 4 and len( - y.shape) < 4, 'Do not support multi batchsize dimensions currently.' + assert ( + len(x.shape) < 4 and len(y.shape) < 4 + ), 'Do not support multi batchsize dimensions currently.' if len(x.shape) == 1: x = broadcast(x, shape=[1, x.shape[0]]) @@ -295,12 +337,16 @@ def matmul_v2_orig2prim(op, x, y): ## NOTE(lml): The second output of reshape2 Xshape, which is only used in reshape2_grad, is meanlingless in new autograd mechanism, thus we use a zero tensor instead. @REGISTER_ORIG2PRIM('reshape2') def reshape2_orig2prim(op, shape_t, shape_tl, x): - assert shape_t is None, 'Can not lower reshape2 into prim ops with shapetensor.' - assert shape_tl is None, 'Can not lower reshape2 into prim ops with shapetensorlist.' + assert ( + shape_t is None + ), 'Can not lower reshape2 into prim ops with shapetensor.' + assert ( + shape_tl is None + ), 'Can not lower reshape2 into prim ops with shapetensorlist.' y, xshape = get_output_var_list(op) - return reshape(x, shape=y.shape), fill_const(shape=xshape.shape, - dtype=xshape.dtype, - value=0.0) + return reshape(x, shape=y.shape), fill_const( + shape=xshape.shape, dtype=xshape.dtype, value=0.0 + ) @REGISTER_ORIG2PRIM('concat') @@ -311,10 +357,16 @@ def concat_orig2prim(op, axis_t, xs): @REGISTER_ORIG2PRIM('slice') def slice_orig2prim(op, ends_t, ends_tl, x, starts_t, starts_tl): - assert starts_t is None, 'Can not lower concat into prim ops with startstensor.' + assert ( + starts_t is None + ), 'Can not lower concat into prim ops with startstensor.' assert ends_t is None, 'Can not lower concat into prim ops with endstensor.' - assert starts_tl is None, 'Can not lower concat into prim ops with startstensorlist.' - assert ends_tl is None, 'Can not lower concat into prim ops with endstensorlist.' + assert ( + starts_tl is None + ), 'Can not lower concat into prim ops with startstensorlist.' + assert ( + ends_tl is None + ), 'Can not lower concat into prim ops with endstensorlist.' starts = op.attr('starts') ends = op.attr('ends') strides = [1 for _ in starts] @@ -329,12 +381,12 @@ def slice_orig2prim(op, ends_t, ends_tl, x, starts_t, starts_tl): def sigmoid_orig2prim(op, x): return div( fill_const(value=1.0, shape=x.shape, dtype=x.dtype), - (add(fill_const(value=1.0, shape=x.shape, dtype=x.dtype), exp(neg(x))))) + (add(fill_const(value=1.0, shape=x.shape, dtype=x.dtype), exp(neg(x)))), + ) @REGISTER_ORIG2PRIM('p_norm') def p_norm_orig2prim(op, x): - def num_el(shape): n = 1 for s in shape: @@ -342,7 +394,8 @@ def p_norm_orig2prim(op, x): return n assert op.attr( - 'asvector'), 'Only support lower pnorm when asvector=True currently' + 'asvector' + ), 'Only support lower pnorm when asvector=True currently' if len(x.shape) > 1: x = reshape(x, shape=[num_el(x.shape)]) @@ -435,25 +488,38 @@ def gelu_orig2prim(op, x): x, mul( fill_const(0.044715, x.shape, x.dtype), - primops.pow(x, fill_const(3., x.shape, - x.dtype)))))))) + primops.pow( + x, fill_const(3.0, x.shape, x.dtype) + ), + ), + ), + ) + ), + ), + ) return mul(x, cdf) else: return mul( mul(fill_const(0.5, x.shape, x.dtype), x), - add(fill_const(1.0, x.shape, x.dtype), - erf(mul(x, fill_const(1 / math.sqrt(2.), x.shape, x.dtype))))) + add( + fill_const(1.0, x.shape, x.dtype), + erf(mul(x, fill_const(1 / math.sqrt(2.0), x.shape, x.dtype))), + ), + ) @REGISTER_ORIG2PRIM('dropout') def dropout_orig2prim(op, seed_t, x): - assert seed_t is None, 'Can not lower dropout into prim ops with seedtensor.' + assert ( + seed_t is None + ), 'Can not lower dropout into prim ops with seedtensor.' mask = bernoulli(shape=x.shape, dtype=x.dtype, p=op.attr('dropout_prob')) if op.attr('dropout_implementation') == 'upscale_in_train': if op.attr('is_test') == False: out = div( mul(x, mask), - fill_const(1.0 - op.attr('dropout_prob'), x.shape, x.dtype)) + fill_const(1.0 - op.attr('dropout_prob'), x.shape, x.dtype), + ) return primops.cast(mask, dtype=paddle.uint8), out else: return primops.cast(mask, dtype=paddle.uint8), x @@ -462,7 +528,8 @@ def dropout_orig2prim(op, seed_t, x): return primops.cast(mask, dtype=paddle.uint8), mul(x, mask) else: return primops.cast(mask, dtype=paddle.uint8), mul( - x, fill_const(1.0 - op.attr('dropout_prob'), x.shape, x.dtype)) + x, fill_const(1.0 - op.attr('dropout_prob'), x.shape, x.dtype) + ) else: raise RuntimeError( 'Unsupported dropout_implementation, only support upscale_in_train and downgrade_in_infer' @@ -485,50 +552,62 @@ def uniform_random_orig2prim(op, shape_t, shape_tl): @REGISTER_ORIG2PRIM('reduce_sum') def reduce_sum_orig2prim(op, x): - axes = tuple(range(0, len( - x.shape))) if op.attr('reduce_all') else op.attr('dim') + axes = ( + tuple(range(0, len(x.shape))) + if op.attr('reduce_all') + else op.attr('dim') + ) return reduce_sum(x, axis=axes, keepdim=op.attr('keep_dim')) @REGISTER_ORIG2PRIM('reduce_mean') def reduce_mean_orig2prim(op, x): - axes = tuple(range(0, len( - x.shape))) if op.attr('reduce_all') else op.attr('dim') + axes = ( + tuple(range(0, len(x.shape))) + if op.attr('reduce_all') + else op.attr('dim') + ) return primops.mean(x, axes, op.attr('keep_dim')) @REGISTER_ORIG2PRIM('batch_norm') -def batch_norm_orig2prim(op, bias, run_mean, momentum_tensor, scale, run_var, - x): +def batch_norm_orig2prim( + op, bias, run_mean, momentum_tensor, scale, run_var, x +): momentum = op.attr('momentum') eps = op.attr('epsilon') is_test = op.attr('is_test') data_layout = op.attr('data_layout') use_global_stats = op.attr('use_global_stats') trainable_statistics = op.attr('trainable_statistics') - reserve_space = None if len( - op.output_names) == 5 else get_output_var_list(op)[1] + reserve_space = ( + None if len(op.output_names) == 5 else get_output_var_list(op)[1] + ) - feature_axis = 1 if data_layout in ('NC', 'NCL', 'NCHW', - 'NCHWD') else len(x.shape) - 1 + feature_axis = ( + 1 if data_layout in ('NC', 'NCL', 'NCHW', 'NCHWD') else len(x.shape) - 1 + ) use_run_stat = (is_test and (not trainable_statistics)) or use_global_stats - return primops.batch_norm(x, - feature_axis, - scale, - bias, - run_mean, - run_var, - eps=eps, - momentum=momentum, - use_run_stat=use_run_stat, - reserve_space=reserve_space) + return primops.batch_norm( + x, + feature_axis, + scale, + bias, + run_mean, + run_var, + eps=eps, + momentum=momentum, + use_run_stat=use_run_stat, + reserve_space=reserve_space, + ) @REGISTER_ORIG2PRIM('size') def size_orig2prim(op, x): - return fill_const(functools.reduce(operator.mul, x.shape), (1, ), - paddle.int64) + return fill_const( + functools.reduce(operator.mul, x.shape), (1,), paddle.int64 + ) ## Register prim2orig lower rules @@ -617,9 +696,9 @@ def split_prim2orig(op, x): num_or_sections = op.attr('num_or_sections') if len(num_or_sections) == 1: num_or_sections = num_or_sections[0] - return paddle.split(x, - num_or_sections=num_or_sections, - axis=op.attr('axis')) + return paddle.split( + x, num_or_sections=num_or_sections, axis=op.attr('axis') + ) @REGISTER_PRIM2ORIG('concat_p') @@ -639,23 +718,27 @@ def matmul_prim2orig(op, x, y): @REGISTER_PRIM2ORIG('slice_select_p') def slice_select_prim2orig(op, x): - return paddle.strided_slice(x, - axes=op.attr('axis'), - starts=op.attr('starts'), - ends=op.attr('ends'), - strides=op.attr('strides')) + return paddle.strided_slice( + x, + axes=op.attr('axis'), + starts=op.attr('starts'), + ends=op.attr('ends'), + strides=op.attr('strides'), + ) @REGISTER_PRIM2ORIG('slice_assign_p') def slice_assign_prim2orig(op, x, y): x_copy = paddle.assign(x) - return set_value(x_copy, - y, - axis=op.attr('axis'), - starts=op.attr('starts'), - ends=op.attr('ends'), - strides=op.attr('strides'), - out=x_copy) + return set_value( + x_copy, + y, + axis=op.attr('axis'), + starts=op.attr('starts'), + ends=op.attr('ends'), + strides=op.attr('strides'), + out=x_copy, + ) @REGISTER_PRIM2ORIG('gather_p') @@ -673,26 +756,32 @@ def scatter_add_prim2orig(op, index_t, x, y): @REGISTER_PRIM2ORIG('fill_constant_p') def fill_constant_prim2orig(op): - return paddle.full(shape=op.attr('shape'), - fill_value=op.attr('value'), - dtype=INT_DTYPE_2_STRING[op.attr('dtype')]) + return paddle.full( + shape=op.attr('shape'), + fill_value=op.attr('value'), + dtype=INT_DTYPE_2_STRING[op.attr('dtype')], + ) @REGISTER_PRIM2ORIG('bernoulli_p') def bernoulli_prim2orig(op): - t = paddle.full(shape=op.attr('shape'), - fill_value=op.attr('p'), - dtype=INT_DTYPE_2_STRING[op.attr('dtype')]) + t = paddle.full( + shape=op.attr('shape'), + fill_value=op.attr('p'), + dtype=INT_DTYPE_2_STRING[op.attr('dtype')], + ) return paddle.bernoulli(t) @REGISTER_PRIM2ORIG('uniform_random_p') def uniform_random_prim2orig(op): - return paddle.uniform(shape=op.attr('shape'), - dtype=INT_DTYPE_2_STRING[op.attr('dtype')], - min=op.attr('min'), - max=op.attr('max'), - seed=op.attr('seed')) + return paddle.uniform( + shape=op.attr('shape'), + dtype=INT_DTYPE_2_STRING[op.attr('dtype')], + min=op.attr('min'), + max=op.attr('max'), + seed=op.attr('seed'), + ) @REGISTER_PRIM2ORIG('select_p') @@ -810,7 +899,7 @@ def tanh_jvp(op, x_dot): def sin_jvp(op, x_dot): if x_dot is None: return None - x, = op_position_inputs(op) + (x,) = op_position_inputs(op) return mul(x_dot, cos(x)) @@ -818,7 +907,7 @@ def sin_jvp(op, x_dot): def cos_jvp(op, x_dot): if x_dot is None: return None - x, = op_position_inputs(op) + (x,) = op_position_inputs(op) return mul(x_dot, neg(sin(x))) @@ -834,25 +923,26 @@ def exp_jvp(op, x_dot): def erf_jvp(op, x_dot): if x_dot is None: return None - x, = op_position_inputs(op) + (x,) = op_position_inputs(op) return mul( - fill_const(2. / math.sqrt(math.pi), x.shape, x.dtype), - mul(x_dot, exp(neg(primops.pow(x, fill_const(2., x.shape, x.dtype)))))) + fill_const(2.0 / math.sqrt(math.pi), x.shape, x.dtype), + mul(x_dot, exp(neg(primops.pow(x, fill_const(2.0, x.shape, x.dtype))))), + ) @REGISTER_JVP('abs_p') def abs_jvp(op, x_dot): if x_dot is None: return None - x, = op_position_inputs(op) - return select(ge(x, fill_const(0., x.shape, x.dtype)), x_dot, neg(x_dot)) + (x,) = op_position_inputs(op) + return select(ge(x, fill_const(0.0, x.shape, x.dtype)), x_dot, neg(x_dot)) @REGISTER_JVP('log_p') def log_jvp(op, x_dot): if x_dot is None: return None - x, = op_position_inputs(op) + (x,) = op_position_inputs(op) return div(x_dot, x) @@ -929,12 +1019,9 @@ def slice_select_jvp(op, x_dot): starts = op.attr('starts') ends = op.attr('ends') strides = op.attr('strides') - return linear_jvp(op, - x_dot, - axis=axis, - starts=starts, - ends=ends, - strides=strides) + return linear_jvp( + op, x_dot, axis=axis, starts=starts, ends=ends, strides=strides + ) @REGISTER_JVP('slice_assign_p') @@ -948,13 +1035,9 @@ def slice_assign_jvp(op, x_dot, y_dot): starts = op.attr('starts') ends = op.attr('ends') strides = op.attr('strides') - return linear_jvp(op, - x_dot, - y_dot, - axis=axis, - starts=starts, - ends=ends, - strides=strides) + return linear_jvp( + op, x_dot, y_dot, axis=axis, starts=starts, ends=ends, strides=strides + ) @REGISTER_JVP('gather_p') @@ -993,7 +1076,7 @@ def eq_jvp(op, x_dot, y_dot): if x_dot is None and y_dot is None: return None x, _ = op_position_inputs(op) - z_dot = fill_const(value=0., shape=x.shape, dtype=x.dtype) + z_dot = fill_const(value=0.0, shape=x.shape, dtype=x.dtype) return z_dot @@ -1002,7 +1085,7 @@ def gt_jvp(op, x_dot, y_dot): if x_dot is None and y_dot is None: return None x, _ = op_position_inputs(op) - z_dot = fill_const(value=0., shape=x.shape, dtype=x.dtype) + z_dot = fill_const(value=0.0, shape=x.shape, dtype=x.dtype) return z_dot @@ -1011,7 +1094,7 @@ def ge_jvp(op, x_dot, y_dot): if x_dot is None and y_dot is None: return None x, _ = op_position_inputs(op) - z_dot = fill_const(value=0., shape=x.shape, dtype=x.dtype) + z_dot = fill_const(value=0.0, shape=x.shape, dtype=x.dtype) return z_dot @@ -1020,13 +1103,12 @@ def ne_jvp(op, x_dot, y_dot): if x_dot is None and y_dot is None: return None x, _ = op_position_inputs(op) - z_dot = fill_const(value=0., shape=x.shape, dtype=x.dtype) + z_dot = fill_const(value=0.0, shape=x.shape, dtype=x.dtype) return z_dot @REGISTER_JVP('pow_p') def pow_jvp(op, x_dot, y_dot): - def _compute_t1(x, y): zero_y = fill_const(value=0.0, shape=y.shape, dtype=y.dtype) one_y = fill_const(value=1.0, shape=y.shape, dtype=y.dtype) @@ -1096,7 +1178,8 @@ def add_transpose(op, check_dot, z_bar): x, y = op_position_inputs(op) assert check_dot(x) or check_dot(y), ( f'(check_dot(x) or check_dot(y)) must be True, ' - f'but check_dot(x)={check_dot(x)} and check_dot(y)={check_dot(y)}.') + f'but check_dot(x)={check_dot(x)} and check_dot(y)={check_dot(y)}.' + ) x_bar = z_bar if check_dot(x) else None y_bar = z_bar if check_dot(y) else None return x_bar, y_bar @@ -1107,7 +1190,8 @@ def sub_transpose(op, check_dot, z_bar): x, y = op_position_inputs(op) assert check_dot(x) or check_dot(y), ( f'(check_dot(x) or check_dot(y)) must be True, ' - f'but check_dot(x)={check_dot(x)} and check_dot(y)={check_dot(y)}.') + f'but check_dot(x)={check_dot(x)} and check_dot(y)={check_dot(y)}.' + ) x_bar = z_bar if check_dot(x) else None y_bar = neg(z_bar) if check_dot(y) else None return x_bar, y_bar @@ -1118,7 +1202,8 @@ def mul_transpose(op, check_dot, z_bar): x, y = op_position_inputs(op) assert check_dot(x) ^ check_dot(y), ( f'(check_dot(x) ^ check_dot(y)) must be True, ' - f'but check_dot(x)={check_dot(x)} and check_dot(y)={check_dot(y)}.') + f'but check_dot(x)={check_dot(x)} and check_dot(y)={check_dot(y)}.' + ) if check_dot(x): return mul(z_bar, y), None else: @@ -1135,14 +1220,14 @@ def div_transpose(op, check_dot, z_bar): @REGISTER_TRANSPOSE('reshape_p') def reshape_transpose(op, check_dot, y_bar): - x, = op_position_inputs(op) + (x,) = op_position_inputs(op) assert check_dot(x), 'check_dot(x) must be True' return reshape(y_bar, shape=x.shape) @REGISTER_TRANSPOSE('broadcast_p') def broadcast_transpose(op, check_dot, y_bar): - x, = op_position_inputs(op) + (x,) = op_position_inputs(op) assert check_dot(x), 'check_dot(x) must be True' bat = len(y_bar.shape) - len(x.shape) axis = list(range(bat)) @@ -1155,7 +1240,7 @@ def broadcast_transpose(op, check_dot, y_bar): @REGISTER_TRANSPOSE('transpose_p') def transpose_transpose(op, check_dot, y_bar): - x, = op_position_inputs(op) + (x,) = op_position_inputs(op) assert check_dot(x), 'check_dot(x) must be True' axis = op.attr('axis') reordered = sorted((k, i) for i, k in enumerate(axis)) @@ -1165,14 +1250,14 @@ def transpose_transpose(op, check_dot, y_bar): @REGISTER_TRANSPOSE('split_p') def split_transpose(op, check_dot, ys_bar): - x, = op_position_inputs(op) + (x,) = op_position_inputs(op) assert check_dot(x), 'check_dot(x) must be True' return concat(ys_bar, axis=op.attr('axis')) @REGISTER_TRANSPOSE('concat_p') def concat_transpose(op, check_dot, y_bar): - xs, = op_position_inputs(op) + (xs,) = op_position_inputs(op) if not isinstance(xs, typing.Sequence): xs = [xs] for x in xs: @@ -1186,7 +1271,7 @@ def concat_transpose(op, check_dot, y_bar): @REGISTER_TRANSPOSE('reduce_sum_p') def reduce_sum_transpose(op, check_dot, y_bar): - x, = op_position_inputs(op) + (x,) = op_position_inputs(op) assert check_dot(x), 'check_dot(x) must be True' axes = op.attr('axis') shape = tuple(1 if i in axes else size for i, size in enumerate(x.shape)) @@ -1199,7 +1284,8 @@ def matmul_transpose(op, check_dot, z_bar): x, y = op_position_inputs(op) assert check_dot(x) ^ check_dot(y), ( f'(check_dot(x) ^ check_dot(y)) must be True, ' - f'but check_dot(x)={check_dot(x)} and check_dot(y)={check_dot(y)}.') + f'but check_dot(x)={check_dot(x)} and check_dot(y)={check_dot(y)}.' + ) # TODO: replace it. this is hacky axis = [1, 0] if len(x.shape) == 2 else [0, 2, 1] if check_dot(x): @@ -1210,19 +1296,16 @@ def matmul_transpose(op, check_dot, z_bar): @REGISTER_TRANSPOSE('slice_select_p') def slice_select_transpose(op, check_dot, y_bar): - x, = op_position_inputs(op) + (x,) = op_position_inputs(op) assert check_dot(x), 'check_dot(x) must be True' zeros = fill_const(value=0.0, shape=x.shape, dtype=x.dtype) axis = op.attr('axis') starts = op.attr('starts') ends = op.attr('ends') strides = op.attr('strides') - return slice_assign(zeros, - y_bar, - axis=axis, - starts=starts, - ends=ends, - strides=strides) + return slice_assign( + zeros, y_bar, axis=axis, starts=starts, ends=ends, strides=strides + ) @REGISTER_TRANSPOSE('slice_assign_p') @@ -1230,23 +1313,19 @@ def slice_assign_transpose(op, check_dot, z_bar): x, y = op_position_inputs(op) assert check_dot(x) and check_dot(y), ( f'(check_dot(x) and check_dot(y)) must be True, ' - f'but check_dot(x)={check_dot(x)} and check_dot(y)={check_dot(y)}.') + f'but check_dot(x)={check_dot(x)} and check_dot(y)={check_dot(y)}.' + ) zeros = fill_const(value=0.0, shape=y.shape, dtype=y.dtype) axis = op.attr('axis') starts = op.attr('starts') ends = op.attr('ends') strides = op.attr('strides') - x_bar = slice_assign(z_bar, - zeros, - axis=axis, - starts=starts, - ends=ends, - strides=strides) - y_bar = slice_select(z_bar, - axis=axis, - starts=starts, - ends=ends, - strides=strides) + x_bar = slice_assign( + z_bar, zeros, axis=axis, starts=starts, ends=ends, strides=strides + ) + y_bar = slice_select( + z_bar, axis=axis, starts=starts, ends=ends, strides=strides + ) return x_bar, y_bar @@ -1266,7 +1345,8 @@ def scatter_add_transpose(op, check_dot, z_bar): x, y, indextensor = op_position_inputs(op) assert check_dot(x) and check_dot(y), ( f'(check_dot(x) and check_dot(y)) must be True, ' - f'but check_dot(x)={check_dot(x)} and check_dot(y)={check_dot(y)}.') + f'but check_dot(x)={check_dot(x)} and check_dot(y)={check_dot(y)}.' + ) axis = op.attr('axis') zeros = fill_const(value=0.0, shape=y.shape, dtype=y.dtype) x_bar = scatter_add(z_bar, zeros, indextensor, axis=axis) @@ -1286,8 +1366,11 @@ def select_transpose(op, check_dot, z_bar): zeros_x = fill_const(value=0.0, shape=x.shape, dtype=x.dtype) zeros_y = fill_const(value=0.0, shape=y.shape, dtype=y.dtype) - cond_bar = fill_const(value=0.0, shape=y.shape, - dtype=cond.dtype) if check_dot(cond) else None + cond_bar = ( + fill_const(value=0.0, shape=y.shape, dtype=cond.dtype) + if check_dot(cond) + else None + ) x_bar = select(cond, z_bar, zeros_x) if check_dot(x) else None y_bar = select(cond, zeros_y, z_bar) if check_dot(y) else None @@ -1296,5 +1379,5 @@ def select_transpose(op, check_dot, z_bar): @REGISTER_TRANSPOSE('cast_p') def cast_transpose(op, check_dot, y_bar): - x, = op_position_inputs(op) + (x,) = op_position_inputs(op) return primops.cast(y_bar, x.dtype) diff --git a/python/paddle/incubate/autograd/primx.py b/python/paddle/incubate/autograd/primx.py index c57f651d344c34b156e2d37fab39d1049db33adc..cdf16e77b1fbc2b60e95a75d94b16cdfae2eb6af 100644 --- a/python/paddle/incubate/autograd/primx.py +++ b/python/paddle/incubate/autograd/primx.py @@ -20,15 +20,23 @@ from paddle.fluid.framework import Operator, default_main_program from paddle.incubate.autograd.utils import as_tensors from .primops import add, fill_const -from .primreg import (lookup_orig2prim, lookup_prim2orig, op_position_inputs, - op_position_output) +from .primreg import ( + lookup_orig2prim, + lookup_prim2orig, + op_position_inputs, + op_position_output, +) from .primrules import _jvp, _orig2prim, _prim2orig, _transpose -from .utils import (flatten, flatten_and_remove_none, get_input_var_list, - get_output_var_list) +from .utils import ( + flatten, + flatten_and_remove_none, + get_input_var_list, + get_output_var_list, +) def topo_path(xs, ys, block=None): - """ Returns the list of ops on the path from `xs` to `ys` in topological + """Returns the list of ops on the path from `xs` to `ys` in topological order. TODO(Tongxin): supporting control flow and nested blocks. @@ -50,13 +58,16 @@ def topo_path(xs, ys, block=None): # Initialize reached vars for x in xs: - assert x is None or x.block == block, 'x is not None and x.block != block' + assert ( + x is None or x.block == block + ), 'x is not None and x.block != block' reached_vars[id(x)] = x # Reaching test, returning whether an op is reached from the given input reaching = lambda op: any( id(v) in reached_vars - for v in flatten_and_remove_none(get_input_var_list(op))) + for v in flatten_and_remove_none(get_input_var_list(op)) + ) # block.ops are supposedly in the order that preserves correct data # dependence. @@ -70,7 +81,8 @@ def topo_path(xs, ys, block=None): used_vars = OrderedDict((id(y), y) for y in ys if id(y) in reached_vars) back_reaching = lambda op: any( id(out) in used_vars - for out in flatten_and_remove_none(get_output_var_list(op))) + for out in flatten_and_remove_none(get_output_var_list(op)) + ) # Backward pass to find all used variables for op in reversed(path): @@ -86,7 +98,7 @@ def topo_path(xs, ys, block=None): def output_vars_on_path(path): - """ Returns the output variables of all the ops on the path from `xs` + """Returns the output variables of all the ops on the path from `xs` to `ys`. Args: @@ -104,7 +116,7 @@ def output_vars_on_path(path): class VarMap(object): - """ A general map data structure for linking variables to variables. + """A general map data structure for linking variables to variables. An example is linking variables to their gradients. """ @@ -125,7 +137,8 @@ class VarMap(object): if isinstance(key_vars, paddle.fluid.framework.Variable): if not isinstance(value_vars, paddle.fluid.framework.Variable): raise TypeError( - f'value_vars must be Variable, but got {type(value_vars)}') + f'value_vars must be Variable, but got {type(value_vars)}' + ) self.tab[id(key_vars)] = id(value_vars) else: assert len(key_vars) == len(value_vars), ( @@ -168,11 +181,12 @@ class VarMap(object): # TODO(lml): supporting control flow, nested blocks, and block other than current block of main program. class Transform(object): - """ An object that maintains the state of transformations applied to a - primitve program. """ + """An object that maintains the state of transformations applied to a + primitve program.""" def __init__(self, block): - assert block == default_main_program().current_block( + assert ( + block == default_main_program().current_block() ), 'only support transform on current block of main program.' self.block = block self.vars = self.init_vars(block) @@ -224,7 +238,7 @@ class Transform(object): block._sync_with_cpp() def var2dot_rec(self, vars): - """ Lookup var2dot recursively.""" + """Lookup var2dot recursively.""" if isinstance(vars, paddle.fluid.framework.Variable): dot = self.var2dot.lookup(vars) return dot @@ -243,7 +257,7 @@ class Transform(object): return bars def linearize(self, xs, ys, xs_dot=None): - """ Performs the linearization transform, a.k.a, forward mode AD + """Performs the linearization transform, a.k.a, forward mode AD transform, on a primitive lowered program. Args: @@ -265,15 +279,18 @@ class Transform(object): else: assert len(xs) == len(xs_dot), ( f'len(xs) should be equal to len(xs_dot), ' - f'but len(xs)={len(xs)} and len(xs_dot)={len(xs_dot)}') + f'but len(xs)={len(xs)} and len(xs_dot)={len(xs_dot)}' + ) for x, dot in zip(xs, xs_dot): assert x.dtype == dot.dtype, ( f'x.dtype should be equal to dot.dtype, ' - f'but x.dtype={x.dtype} and dot.dtype={dot.dtype}') + f'but x.dtype={x.dtype} and dot.dtype={dot.dtype}' + ) assert x.shape == dot.shape, ( f'x.shape should be equal to dot.shape, ' - f'but x.shape={x.shape} and dot.shape={dot.shape}') + f'but x.shape={x.shape} and dot.shape={dot.shape}' + ) self.var2dot.add(x, dot) path, unused_xs, _ = topo_path(xs, ys, self.block) @@ -299,7 +316,7 @@ class Transform(object): return xs_dot, ys_dot def transpose(self, ys_dot, xs_dot, ys_bar=None, retain_fwd=False): - """ Performs the transpose transform, a.k.a, reverse mode AD + """Performs the transpose transform, a.k.a, reverse mode AD transform, on a linearized primitive program. Note, `transpose` is supposed to be used in couple with `linearize`. @@ -328,7 +345,8 @@ class Transform(object): else: assert len(ys_dot) == len(ys_bar), ( f'len(ys_dot) should be equal to len(ys_bar), ' - f'but len(ys_dot)={len(ys_dot)} and len(ys_bar)={len(ys_bar)}') + f'but len(ys_dot)={len(ys_dot)} and len(ys_bar)={len(ys_bar)}' + ) for y_dot, y_bar in zip(ys_dot, ys_bar): assert y_dot.shape == y_bar.shape, ( f'y_dot.shape should be equal to y_bar.shape, ' @@ -372,7 +390,8 @@ class Transform(object): ins = flatten(op_position_inputs(op)) assert len(ins) == len(ins_bar), ( f'len(ins) should be equal to len(ins_bar), ' - f'but len(ins)={len(ins)} and len(ins_bar)={len(ins_bar)}') + f'but len(ins)={len(ins)} and len(ins_bar)={len(ins_bar)}' + ) for dot, bar in zip(ins, ins_bar): if bar is not None: @@ -391,7 +410,8 @@ class Transform(object): vars_to_remove = set() for op in path: vars_to_remove.update( - flatten_and_remove_none(get_output_var_list(op))) + flatten_and_remove_none(get_output_var_list(op)) + ) op_indexes = [] @@ -459,10 +479,12 @@ def _lower(block, reverse, blacklist): bind(input_args, to_bind, value_table) for orig_out, new_out in zip( - expand_nested_list(get_output_var_list(op)), - expand_nested_list(as_tensors(lower_fn(op, *input_args)))): + expand_nested_list(get_output_var_list(op)), + expand_nested_list(as_tensors(lower_fn(op, *input_args))), + ): assert not (orig_out is None) ^ ( - new_out is None), "orig_out and new_out should match." + new_out is None + ), "orig_out and new_out should match." vars_to_remove.add(new_out.name) value_table[new_out.name] = new_out to_bind[orig_out.name] = new_out.name @@ -471,7 +493,8 @@ def _lower(block, reverse, blacklist): inputs = {} for i in range(len(op.input_names)): inputs[op.input_names[i]] = bind_name( - op.input(op.input_names[i]), to_bind) + op.input(op.input_names[i]), to_bind + ) outputs = {} for i in range(len(op.output_names)): @@ -481,14 +504,17 @@ def _lower(block, reverse, blacklist): for name in sorted(op.attr_names): attrs[name] = op.attr(name) from paddle.fluid.dygraph.base import param_guard + new_op_desc = block.desc.append_op() with param_guard(inputs), param_guard(outputs): - op = Operator(block=block, - desc=new_op_desc, - type=op.type, - inputs=inputs, - outputs=outputs, - attrs=attrs) + op = Operator( + block=block, + desc=new_op_desc, + type=op.type, + inputs=inputs, + outputs=outputs, + attrs=attrs, + ) block.ops.append(op) # Step3: Do some post-processing work @@ -508,8 +534,9 @@ def _lower(block, reverse, blacklist): op._rename_output(out_name, to_bind_rev[out_name]) for var_name in sorted(vars_to_remove): - assert var_name in to_bind_rev, 'var_name "{}" is not in to_bind_rev.'.format( - var_name) + assert ( + var_name in to_bind_rev + ), 'var_name "{}" is not in to_bind_rev.'.format(var_name) if var_name != to_bind_rev[var_name]: block.desc._remove_var(var_name.encode()) del block.vars[var_name] @@ -535,7 +562,8 @@ def orig2prim(block=None): """ block = default_main_program().current_block() if block is None else block - assert block == default_main_program().current_block( + assert ( + block == default_main_program().current_block() ), 'block is neither None nor current block of main program' _lower(block, reverse=False, blacklist=[]) @@ -580,7 +608,8 @@ def prim2orig(block=None, blacklist=None): """ block = default_main_program().current_block() if block is None else block - assert block == default_main_program().current_block( + assert ( + block == default_main_program().current_block() ), 'block is neither None nor current block of main program' blacklist = [] if blacklist is None else blacklist _lower(block, reverse=True, blacklist=blacklist) diff --git a/python/paddle/incubate/autograd/utils.py b/python/paddle/incubate/autograd/utils.py index effb8c1abc646b1e7e4497a6197006420ab097a8..2b8082bf48de7a83b585e38418ca8951fab548ae 100644 --- a/python/paddle/incubate/autograd/utils.py +++ b/python/paddle/incubate/autograd/utils.py @@ -18,7 +18,6 @@ from paddle.fluid import framework as framework class PrimOption(object): - def __init__(self): self.enable_prim = False @@ -175,7 +174,7 @@ def flatten_and_remove_none(inp): def as_tensors(xs): if isinstance(xs, framework.Variable): - return (xs, ) + return (xs,) elif isinstance(xs, typing.Sequence): return tuple(xs) else: diff --git a/python/paddle/incubate/autotune.py b/python/paddle/incubate/autotune.py index 4c577cba3e70c3348dd47dd1d9b987d5029be2a3..daca4aee14e0b8b97f1897e4cf48b9612bb9c68d 100644 --- a/python/paddle/incubate/autotune.py +++ b/python/paddle/incubate/autotune.py @@ -147,7 +147,8 @@ def set_config(config=None): if "tuning_steps" in dataloader_config: if isinstance(dataloader_config['tuning_steps'], int): paddle.fluid.reader.set_autotune_config( - use_autoune, dataloader_config['tuning_steps']) + use_autoune, dataloader_config['tuning_steps'] + ) else: warnings.warn( "The auto-tuning configuration of the dataloader is incorrect." diff --git a/python/paddle/incubate/distributed/fleet/__init__.py b/python/paddle/incubate/distributed/fleet/__init__.py index 94e1a7c8bbe77bbf763f462d48fc5812243817f9..f098e4c951c0e7099b6e859ccc9c2c67974215f5 100644 --- a/python/paddle/incubate/distributed/fleet/__init__.py +++ b/python/paddle/incubate/distributed/fleet/__init__.py @@ -12,6 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle.distributed.fleet.recompute import recompute_sequential, recompute_hybrid +from paddle.distributed.fleet.recompute import ( + recompute_sequential, + recompute_hybrid, +) __all__ = ["recompute_sequential", "recompute_hybrid"] diff --git a/python/paddle/incubate/distributed/models/moe/gate/base_gate.py b/python/paddle/incubate/distributed/models/moe/gate/base_gate.py index 9715f4b2a25a60ee7459294d25284e6503b8e293..f1b4e74c2411f1ebdee912bb37a13255e3742d4b 100644 --- a/python/paddle/incubate/distributed/models/moe/gate/base_gate.py +++ b/python/paddle/incubate/distributed/models/moe/gate/base_gate.py @@ -23,7 +23,6 @@ import paddle.nn as nn class BaseGate(nn.Layer): - def __init__(self, num_expert, world_size): super().__init__() self.world_size = world_size diff --git a/python/paddle/incubate/distributed/models/moe/gate/gshard_gate.py b/python/paddle/incubate/distributed/models/moe/gate/gshard_gate.py index 4152dee3a37067ef81bccc58e9716fc642b36fc6..9587dfb346adda8a82af7c52ea392be9d7b7d25e 100644 --- a/python/paddle/incubate/distributed/models/moe/gate/gshard_gate.py +++ b/python/paddle/incubate/distributed/models/moe/gate/gshard_gate.py @@ -27,15 +27,16 @@ from ..utils import limit_by_capacity class GShardGate(NaiveGate): - - def __init__(self, - d_model, - num_expert, - world_size, - topk=2, - capacity=(1.2, 2.4), - random_routing=True, - group=None): + def __init__( + self, + d_model, + num_expert, + world_size, + topk=2, + capacity=(1.2, 2.4), + random_routing=True, + group=None, + ): assert topk == 2, "topk should be 2 in gshard" super().__init__(d_model, num_expert, world_size) self.capacity = capacity @@ -43,29 +44,39 @@ class GShardGate(NaiveGate): self.group = group def forward(self, x): - topk_val, topk_idx, gate_score = super().forward(x, - return_all_scores=True) + topk_val, topk_idx, gate_score = super().forward( + x, return_all_scores=True + ) s = gate_score.shape[0] top1_idx = topk_idx.flatten() - c_e = paddle.scatter(paddle.zeros(shape=[self.tot_expert]), - top1_idx, - paddle.ones_like(top1_idx, dtype="float32"), - overwrite=False) / s + c_e = ( + paddle.scatter( + paddle.zeros(shape=[self.tot_expert]), + top1_idx, + paddle.ones_like(top1_idx, dtype="float32"), + overwrite=False, + ) + / s + ) m_e = paddle.mean(F.softmax(gate_score, axis=1), axis=0) loss = paddle.mean(c_e * m_e) * (self.num_expert**2) self.set_loss(loss) cap_rate = self.capacity[0 if self.training else 1] capacity = math.ceil(cap_rate * x.shape[0]) - _new_lec, _new_gec, topk_idx = limit_by_capacity(topk_idx, - self.num_expert, - self.world_size, - capacity, - group=self.group) + _new_lec, _new_gec, topk_idx = limit_by_capacity( + topk_idx, + self.num_expert, + self.world_size, + capacity, + group=self.group, + ) if self.random_routing: - rand_routing_prob = paddle.rand(shape=[gate_score.shape[0]], - dtype="float32") + rand_routing_prob = paddle.rand( + shape=[gate_score.shape[0]], dtype="float32" + ) topk_idx = paddle.distributed.models.moe.utils._random_routing( - topk_idx, topk_val, rand_routing_prob) + topk_idx, topk_val, rand_routing_prob + ) return topk_val, topk_idx diff --git a/python/paddle/incubate/distributed/models/moe/gate/naive_gate.py b/python/paddle/incubate/distributed/models/moe/gate/naive_gate.py index 20001377fd1e1a5f7f13339f2f860d488f506af6..4781f6bba898c46d5286e502c46fc7ccfbb7cd10 100644 --- a/python/paddle/incubate/distributed/models/moe/gate/naive_gate.py +++ b/python/paddle/incubate/distributed/models/moe/gate/naive_gate.py @@ -26,7 +26,6 @@ import paddle.nn as nn class NaiveGate(BaseGate): - def __init__(self, d_model, num_expert, world_size, topk=2): super().__init__(num_expert, world_size) self.gate = nn.Linear(d_model, self.tot_expert) @@ -36,11 +35,9 @@ class NaiveGate(BaseGate): def forward(self, inp, return_all_scores=False): gate = self.gate(inp) - gate_top_k_val, gate_top_k_idx = paddle.topk(gate, - k=self.top_k, - axis=-1, - largest=True, - sorted=False) + gate_top_k_val, gate_top_k_idx = paddle.topk( + gate, k=self.top_k, axis=-1, largest=True, sorted=False + ) if return_all_scores: return gate_top_k_val, gate_top_k_idx, gate diff --git a/python/paddle/incubate/distributed/models/moe/gate/switch_gate.py b/python/paddle/incubate/distributed/models/moe/gate/switch_gate.py index 62e32e4db70d9a0d5bcc0c525aebdce4499b963a..c9e2f1fca498aaa731bfd744b9fe70d0746ba8f8 100644 --- a/python/paddle/incubate/distributed/models/moe/gate/switch_gate.py +++ b/python/paddle/incubate/distributed/models/moe/gate/switch_gate.py @@ -27,15 +27,16 @@ from ..utils import limit_by_capacity class SwitchGate(NaiveGate): - - def __init__(self, - d_model, - num_expert, - world_size, - topk=1, - switch_eps=.1, - capacity=(1.2, 2.4), - group=None): + def __init__( + self, + d_model, + num_expert, + world_size, + topk=1, + switch_eps=0.1, + capacity=(1.2, 2.4), + group=None, + ): assert topk == 1, "topk should be 1 in switch" super().__init__(d_model, num_expert, world_size, topk=1) self.switch_eps = switch_eps @@ -55,19 +56,25 @@ class SwitchGate(NaiveGate): cap_rate = self.capacity[0 if self.training else 1] capacity = math.ceil(cap_rate * inp.shape[0]) - _new_lec, _new_gec, top1_idx = limit_by_capacity(top1_idx, - self.num_expert, - self.world_size, - capacity, - group=self.group) + _new_lec, _new_gec, top1_idx = limit_by_capacity( + top1_idx, + self.num_expert, + self.world_size, + capacity, + group=self.group, + ) valid_idx = top1_idx[top1_idx > -1] valid_idx_tmp = paddle.reshape(valid_idx, shape=[len(valid_idx), 1]) - fraction_expert = paddle.scatter_nd_add( - x=paddle.zeros(shape=[self.tot_expert]), - index=valid_idx_tmp, - updates=paddle.ones_like(valid_idx, dtype=paddle.float32).reshape( - shape=[len(valid_idx)]), - ) / valid_idx.numel() + fraction_expert = ( + paddle.scatter_nd_add( + x=paddle.zeros(shape=[self.tot_expert]), + index=valid_idx_tmp, + updates=paddle.ones_like( + valid_idx, dtype=paddle.float32 + ).reshape(shape=[len(valid_idx)]), + ) + / valid_idx.numel() + ) prob_expert = score.sum(axis=0) / valid_idx.numel() loss = (fraction_expert * prob_expert).sum() * self.tot_expert self.set_loss(loss) diff --git a/python/paddle/incubate/distributed/models/moe/grad_clip.py b/python/paddle/incubate/distributed/models/moe/grad_clip.py index 91de68e4bfa0f14989864bad716b631e9e25257f..aaeef8780782e6dacff111789ee1ec3e16831f30 100644 --- a/python/paddle/incubate/distributed/models/moe/grad_clip.py +++ b/python/paddle/incubate/distributed/models/moe/grad_clip.py @@ -80,18 +80,21 @@ class ClipGradForMOEByGlobalNorm(ClipGradBase): sdg.step() """ - def __init__(self, - clip_norm, - is_expert_param_func=None, - moe_group=None, - group_name="default_moe_group"): + def __init__( + self, + clip_norm, + is_expert_param_func=None, + moe_group=None, + group_name="default_moe_group", + ): super(ClipGradForMOEByGlobalNorm, self).__init__() self.clip_norm = float(clip_norm) self.group_name = group_name self.moe_group = moe_group if moe_group is not None and moe_group.nranks > 1: - assert is_expert_param_func is not None, \ - "When moe group size > 1, a function for selecting expert params must be specified." + assert ( + is_expert_param_func is not None + ), "When moe group size > 1, a function for selecting expert params must be specified." self.is_expert_param_func = is_expert_param_func def __str__(self): @@ -120,11 +123,18 @@ class ClipGradForMOEByGlobalNorm(ClipGradBase): sum_square_list.append(sum_square) # all parameters have been filterd out - if len(sum_square_list) + len(sum_square_list_fp16) + len( - sum_square_list_fp32) == 0: + if ( + len(sum_square_list) + + len(sum_square_list_fp16) + + len(sum_square_list_fp32) + == 0 + ): return None, None - assert sum_dtype in ["float64", "float32", None], \ - "sum's type must be float64/ float32 / None" + assert sum_dtype in [ + "float64", + "float32", + None, + ], "sum's type must be float64/ float32 / None" if sum_dtype != "float64": sum_dtype = 'float64' if len(sum_square_list) > 0 else "float32" @@ -166,16 +176,20 @@ class ClipGradForMOEByGlobalNorm(ClipGradBase): # why to return sum_dtype? # we will call `get_l2_norm_pow` twice and the precisions may be different. # For convenience and simplification, we use sum_dtype directly instead of global_norm_var_normal.dtype - global_norm_var_normal, sum_dtype \ - = self.get_l2_norm_pow(normal_params_grads) + global_norm_var_normal, sum_dtype = self.get_l2_norm_pow( + normal_params_grads + ) global_norm_var_moe = None if len(moe_params_grads) > 0: - global_norm_var_moe, _ \ - = self.get_l2_norm_pow(moe_params_grads, sum_dtype) + global_norm_var_moe, _ = self.get_l2_norm_pow( + moe_params_grads, sum_dtype + ) if global_norm_var_moe is not None: - collective.all_reduce(global_norm_var_moe, - op=collective.ReduceOp.SUM, - group=self.moe_group) + collective.all_reduce( + global_norm_var_moe, + op=collective.ReduceOp.SUM, + group=self.moe_group, + ) if global_norm_var_normal is None and global_norm_var_moe is None: return params_grads @@ -187,19 +201,20 @@ class ClipGradForMOEByGlobalNorm(ClipGradBase): if global_norm_var_normal.dtype != global_norm_var_moe.dtype: # compared with normal norm, moe norm is the later one, # so its precision is no lower than normal norm - global_norm_var_normal = \ - global_norm_var_normal.astype(global_norm_var_moe.dtype) + global_norm_var_normal = global_norm_var_normal.astype( + global_norm_var_moe.dtype + ) global_norm_var = global_norm_var_normal + global_norm_var_moe params_and_grads = [] global_norm_var = layers.sqrt(global_norm_var) - max_global_norm = layers.fill_constant(shape=[1], - dtype=global_norm_var.dtype, - value=self.clip_norm) - clip_var = layers.elementwise_div(x=max_global_norm, - y=layers.elementwise_max( - x=global_norm_var, - y=max_global_norm)) + max_global_norm = layers.fill_constant( + shape=[1], dtype=global_norm_var.dtype, value=self.clip_norm + ) + clip_var = layers.elementwise_div( + x=max_global_norm, + y=layers.elementwise_max(x=global_norm_var, y=max_global_norm), + ) for p, g in params_grads: if g is None: continue @@ -207,8 +222,11 @@ class ClipGradForMOEByGlobalNorm(ClipGradBase): params_and_grads.append((p, g)) continue # TODO(wangxi): use inplace elementwise_mul - clip_input = (clip_var.astype('float16') - if g.dtype == core.VarDesc.VarType.FP16 else clip_var) + clip_input = ( + clip_var.astype('float16') + if g.dtype == core.VarDesc.VarType.FP16 + else clip_var + ) new_grad = layers.elementwise_mul(x=g, y=clip_input) params_and_grads.append((p, new_grad)) return params_and_grads diff --git a/python/paddle/incubate/distributed/models/moe/moe_layer.py b/python/paddle/incubate/distributed/models/moe/moe_layer.py index b2d11e7a317b99966df92d5efb8207cbafb20729..345a10eed9cee0ba472c3b48b0534e75fd6cde5f 100644 --- a/python/paddle/incubate/distributed/models/moe/moe_layer.py +++ b/python/paddle/incubate/distributed/models/moe/moe_layer.py @@ -43,11 +43,14 @@ def _local_gather(inp, pos, out_batch_size, maybe_overlap=True): if pos.shape != [0]: origin_dtype = inp.dtype inp = paddle.cast(inp, dtype="float32") - inp_buf = paddle.scatter(paddle.zeros( - shape=[out_batch_size, inp.shape[-1]], dtype="float32"), - pos, - inp, - overwrite=True) + inp_buf = paddle.scatter( + paddle.zeros( + shape=[out_batch_size, inp.shape[-1]], dtype="float32" + ), + pos, + inp, + overwrite=True, + ) inp_buf = paddle.cast(inp_buf, dtype=origin_dtype) else: inp_buf = paddle.zeros([out_batch_size, inp.shape[-1]], dtype=inp.dtype) @@ -59,8 +62,11 @@ def _all_gather(tensor, group=None, use_calc_stream=True): return if in_dygraph_mode(): - group = paddle.distributed.collective._get_default_group( - ) if group is None else group + group = ( + paddle.distributed.collective._get_default_group() + if group is None + else group + ) tensor_shape = list(tensor.shape) tensor_shape[0] *= group.nranks out = paddle.empty(tensor_shape, tensor.dtype) @@ -70,11 +76,20 @@ def _all_gather(tensor, group=None, use_calc_stream=True): return out else: ring_id = 0 if group is None else group.id - nranks = paddle.distributed.collective._get_global_group( - ).nranks if group is None else group.nranks - return paddle._legacy_C_ops.c_allgather(tensor, 'use_calc_stream', - use_calc_stream, 'ring_id', - ring_id, 'nranks', nranks) + nranks = ( + paddle.distributed.collective._get_global_group().nranks + if group is None + else group.nranks + ) + return paddle._legacy_C_ops.c_allgather( + tensor, + 'use_calc_stream', + use_calc_stream, + 'ring_id', + ring_id, + 'nranks', + nranks, + ) class MoEScatter(PyLayer): @@ -85,20 +100,24 @@ class MoEScatter(PyLayer): """ @staticmethod - def forward(ctx, - inp, - pos, - local_expert_count, - global_expert_count, - fwd_batch_size, - world_size, - group=None): + def forward( + ctx, + inp, + pos, + local_expert_count, + global_expert_count, + fwd_batch_size, + world_size, + group=None, + ): local_input_buf = _local_scatter(inp, pos) if world_size > 1: - global_input_buf = global_scatter(local_input_buf, - local_expert_count, - global_expert_count, - group=group) + global_input_buf = global_scatter( + local_input_buf, + local_expert_count, + global_expert_count, + group=group, + ) else: global_input_buf = local_input_buf @@ -114,10 +133,9 @@ class MoEScatter(PyLayer): (inp_batch_size, world_size, group) = ctx.moe_args if world_size > 1: - local_grad_in = global_gather(grad, - local_expert_count, - global_expert_count, - group=group) + local_grad_in = global_gather( + grad, local_expert_count, global_expert_count, group=group + ) else: local_grad_in = grad grad_in = _local_gather(local_grad_in, pos, inp_batch_size) @@ -131,25 +149,28 @@ class MoEGather(PyLayer): """ @staticmethod - def forward(ctx, + def forward( + ctx, + global_output_buf, + pos, + local_expert_count, + global_expert_count, + local_batch_size, + world_size, + group=None, + ): + if world_size > 1: + local_output_buf = global_gather( global_output_buf, - pos, local_expert_count, global_expert_count, - local_batch_size, - world_size, - group=None): - if world_size > 1: - local_output_buf = global_gather(global_output_buf, - local_expert_count, - global_expert_count, - group=group) + group=group, + ) else: local_output_buf = global_output_buf - output = _local_gather(local_output_buf, - pos, - local_batch_size, - maybe_overlap=False) + output = _local_gather( + local_output_buf, pos, local_batch_size, maybe_overlap=False + ) ctx.moe_args = (global_output_buf.shape[0], world_size, group) variables = (pos, local_expert_count, global_expert_count) @@ -162,10 +183,12 @@ class MoEGather(PyLayer): fwd_batch_size, world_size, group = ctx.moe_args grad_out_buf = _local_scatter(grad_out, pos) if world_size > 1: - global_grad_out_buf = global_scatter(grad_out_buf, - local_expert_count, - global_expert_count, - group=group) + global_grad_out_buf = global_scatter( + grad_out_buf, + local_expert_count, + global_expert_count, + group=group, + ) else: global_grad_out_buf = grad_out_buf return global_grad_out_buf, None, None, None @@ -187,10 +210,9 @@ class AllGather(PyLayer): @staticmethod def backward(ctx, grad_out): rank, dim0 = ctx.args - return paddle.slice(grad_out, - axes=[0], - starts=[rank * dim0], - ends=[(rank + 1) * dim0]) + return paddle.slice( + grad_out, axes=[0], starts=[rank * dim0], ends=[(rank + 1) * dim0] + ) class Slice(PyLayer): @@ -204,10 +226,9 @@ class Slice(PyLayer): local_batch_size = B // world_size batch_start = local_batch_size * rank batch_end = min(batch_start + local_batch_size, B) - inp = paddle.slice(inp, - axes=[0], - starts=[batch_start], - ends=[batch_end]) + inp = paddle.slice( + inp, axes=[0], starts=[batch_start], ends=[batch_end] + ) ctx.args = world_size, group return inp @@ -219,10 +240,12 @@ class Slice(PyLayer): def prepare_forward(gate, num_expert, world_size, moe_group): pos, local_expert_count, global_expert_count = count_by_gate( - gate, num_expert, world_size, group=moe_group) + gate, num_expert, world_size, group=moe_group + ) with paddle.no_grad(): fwd_expert_count = global_expert_count.reshape_( - [world_size, num_expert]).sum(axis=0) + [world_size, num_expert] + ).sum(axis=0) fwd_batch_size = int(fwd_expert_count.sum().item()) return ( pos, @@ -296,14 +319,16 @@ class MoELayer(nn.Layer): """ - def __init__(self, - d_model, - experts, - gate=None, - moe_group=None, - mp_group=None, - recompute_interval=0, - recompute_ctx=None): + def __init__( + self, + d_model, + experts, + gate=None, + moe_group=None, + mp_group=None, + recompute_interval=0, + recompute_ctx=None, + ): super(MoELayer, self).__init__() self.recompute_ctx = recompute_ctx @@ -311,8 +336,9 @@ class MoELayer(nn.Layer): if gate is None: gate = dict() - assert isinstance(gate, (dict, BaseGate)), \ - "gate config' type must be dict or an instance of BaseGate" + assert isinstance( + gate, (dict, BaseGate) + ), "gate config' type must be dict or an instance of BaseGate" # only support mp/dp self.group = moe_group @@ -330,26 +356,36 @@ class MoELayer(nn.Layer): self.top_k = gate.get("top_k", 2) gate = gate.get("type", "gshard") if gate == "naive" or gate is None: - gate = NaiveGate(self.d_model, - num_expert=len(experts), - world_size=self.world_size, - topk=self.top_k) + gate = NaiveGate( + self.d_model, + num_expert=len(experts), + world_size=self.world_size, + topk=self.top_k, + ) elif gate == "gshard": - gate = GShardGate(self.d_model, - num_expert=len(experts), - world_size=self.world_size, - topk=self.top_k, - group=self.group) + gate = GShardGate( + self.d_model, + num_expert=len(experts), + world_size=self.world_size, + topk=self.top_k, + group=self.group, + ) elif gate == "switch": - gate = SwitchGate(self.d_model, - num_expert=len(experts), - world_size=self.world_size, - topk=self.top_k, - group=self.group) + gate = SwitchGate( + self.d_model, + num_expert=len(experts), + world_size=self.world_size, + topk=self.top_k, + group=self.group, + ) else: - assert False, "We only support naive gate, \ + assert ( + False + ), "We only support naive gate, \ gshard gate and switch gate, \ - but you choose {} gate.".format(str(gate)) + but you choose {} gate.".format( + str(gate) + ) elif isinstance(gate, NaiveGate): self.top_k = gate.top_k elif isinstance(gate, BaseGate): @@ -391,9 +427,15 @@ class MoELayer(nn.Layer): temp_pos = pos assert topk == self.top_k - x = MoEScatter.apply(inp, temp_pos, local_expert_count, - global_expert_count, fwd_batch_size, - self.world_size, self.group) + x = MoEScatter.apply( + inp, + temp_pos, + local_expert_count, + global_expert_count, + fwd_batch_size, + self.world_size, + self.group, + ) d_model = self.d_model @@ -408,22 +450,36 @@ class MoELayer(nn.Layer): for idx, expert_count in enumerate(fwd_expert_count): if expert_count <= 0: continue - y.append(experts[idx](x[last_index:expert_count + last_index])) + y.append( + experts[idx](x[last_index : expert_count + last_index]) + ) last_index = expert_count + last_index return paddle.concat(y, axis=0) if self.recompute_interval <= 0 or x.shape[0] == 0: x = experts_fwd(x, fwd_expert_count.numpy(), self.experts) else: - x = recompute_hybrid(self.recompute_ctx, experts_fwd, x, - fwd_expert_count.numpy(), self.experts) + x = recompute_hybrid( + self.recompute_ctx, + experts_fwd, + x, + fwd_expert_count.numpy(), + self.experts, + ) out_batch_size = inp.shape[0] if len(gate.shape) == 2: out_batch_size *= gate.shape[1] - x = MoEGather.apply(x, pos, local_expert_count, global_expert_count, - out_batch_size, self.world_size, self.group) + x = MoEGather.apply( + x, + pos, + local_expert_count, + global_expert_count, + out_batch_size, + self.world_size, + self.group, + ) x = x.reshape([-1, self.top_k, d_model]) value = value.reshape([x.shape[0], 1, self.top_k]) diff --git a/python/paddle/incubate/distributed/models/moe/utils.py b/python/paddle/incubate/distributed/models/moe/utils.py index 10203a0cd18aae913aab9363d4665d13c406dd8a..65ab86ded6aa929dea2000d100588a4fd912cbd5 100644 --- a/python/paddle/incubate/distributed/models/moe/utils.py +++ b/python/paddle/incubate/distributed/models/moe/utils.py @@ -19,7 +19,12 @@ # Copyright 2021, Jiaao He. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"). -from paddle.distributed.models.moe.utils import _number_count, _limit_by_capacity, _prune_gate_by_capacity, _assign_pos +from paddle.distributed.models.moe.utils import ( + _number_count, + _limit_by_capacity, + _prune_gate_by_capacity, + _assign_pos, +) import paddle from paddle.fluid.framework import in_dygraph_mode @@ -29,17 +34,24 @@ def _alltoall(in_tensor_list, group=None, use_calc_stream=True): return if in_dygraph_mode(): - group = paddle.distributed.collective._get_default_group( - ) if group is None else group + group = ( + paddle.distributed.collective._get_default_group() + if group is None + else group + ) out = paddle.empty(in_tensor_list.shape, in_tensor_list.dtype) task = group.process_group.alltoall(in_tensor_list, out) task.wait() return out else: ring_id = 0 if group is None else group.id - return paddle._legacy_C_ops.alltoall(in_tensor_list, 'use_calc_stream', - use_calc_stream, 'ring_id', - ring_id) + return paddle._legacy_C_ops.alltoall( + in_tensor_list, + 'use_calc_stream', + use_calc_stream, + 'ring_id', + ring_id, + ) def count_by_gate(gate, num_expert, world_size, require_pos=True, group=None): @@ -61,13 +73,12 @@ def count_by_gate(gate, num_expert, world_size, require_pos=True, group=None): def limit_by_capacity(topk_idx, num_expert, world_size, capacity, group=None): with paddle.no_grad(): - capacity = paddle.ones(shape=[num_expert], - dtype=paddle.int64) * capacity - pos, lec, gec = count_by_gate(topk_idx, - num_expert, - world_size, - require_pos=False, - group=group) + capacity = ( + paddle.ones(shape=[num_expert], dtype=paddle.int64) * capacity + ) + pos, lec, gec = count_by_gate( + topk_idx, num_expert, world_size, require_pos=False, group=group + ) new_gec = _limit_by_capacity(gec, capacity, world_size) if world_size > 1: assert group.nranks == world_size @@ -75,7 +86,8 @@ def limit_by_capacity(topk_idx, num_expert, world_size, capacity, group=None): else: new_lec = new_gec - topk_idx = _prune_gate_by_capacity(topk_idx, new_lec, num_expert, - world_size) + topk_idx = _prune_gate_by_capacity( + topk_idx, new_lec, num_expert, world_size + ) return new_lec, new_gec, topk_idx diff --git a/python/paddle/incubate/multiprocessing/reductions.py b/python/paddle/incubate/multiprocessing/reductions.py index c54626175bc7db047738f8199ee64e41c57fd7c6..b16361971edfcf26453b93b2d5941ccbc3f2175e 100644 --- a/python/paddle/incubate/multiprocessing/reductions.py +++ b/python/paddle/incubate/multiprocessing/reductions.py @@ -35,16 +35,17 @@ def _supported_check(): return False if not sys.version_info >= (3, 4): - warnings.warn("Use `paddle.multiprocessing` to share paddle tensor " - "requires python version greater than 3.4 ." - " `paddle.multiprocessing` will not take any effect !!!") + warnings.warn( + "Use `paddle.multiprocessing` to share paddle tensor " + "requires python version greater than 3.4 ." + " `paddle.multiprocessing` will not take any effect !!!" + ) return False return True class LRUSharedCache(OrderedDict): - def __init__(self): self.limit = 128 self._after_fork() @@ -84,9 +85,9 @@ def cuda_from_cache(key): def rebuild_tensor(cls, lodtensor, metadata): if cls == paddle.fluid.framework.ParamBase: - tensor = paddle.fluid.framework.ParamBase(lodtensor.shape(), - lodtensor._dtype(), - **metadata) + tensor = paddle.fluid.framework.ParamBase( + lodtensor.shape(), lodtensor._dtype(), **metadata + ) tensor.value().get_tensor()._share_data_with(lodtensor) else: size, stop_gradient = metadata @@ -107,8 +108,11 @@ def reduce_tensor(tensor): "Refusing to serialize non-leaf tensor which not stop_gradient, you can detach it!" ) # TODO: add serializing name and hooks check - if tensor.place.is_cpu_place() or tensor.place.is_gpu_place( - ) or tensor.place.is_cuda_pinned_place(): + if ( + tensor.place.is_cpu_place() + or tensor.place.is_gpu_place() + or tensor.place.is_cuda_pinned_place() + ): if type(tensor) == paddle.fluid.framework.ParamBase: metadata = copy.deepcopy(tensor.__dict__) else: @@ -118,7 +122,8 @@ def reduce_tensor(tensor): else: raise ValueError( "Only support tensors of CPU/CUDA/CUDAPinned Place, Not support %s for now!" - % tensor.place) + % tensor.place + ) def rebuild_lodtensor_filename(cls, ipc_name, size, type_idx, dims, lod): @@ -127,12 +132,14 @@ def rebuild_lodtensor_filename(cls, ipc_name, size, type_idx, dims, lod): return lodtensor -def rebuild_cuda_tensor(cls, handle, offset_bytes, size, type_idx, dims, lod, - device_idx): +def rebuild_cuda_tensor( + cls, handle, offset_bytes, size, type_idx, dims, lod, device_idx +): cache_tensor = cuda_from_cache((handle, offset_bytes)) if cache_tensor is None: lodtensor = cls._new_shared_cuda( - (handle, offset_bytes, size, type_idx, dims, lod, device_idx)) + (handle, offset_bytes, size, type_idx, dims, lod, device_idx) + ) # We only cache cuda shared tensor here. # The opening cost of cudaIpcMemoryHandle is very high. # Since we cache the recived tensor directly, @@ -141,28 +148,32 @@ def rebuild_cuda_tensor(cls, handle, offset_bytes, size, type_idx, dims, lod, shared_cache[(handle, offset_bytes)] = lodtensor else: lodtensor = paddle.fluid.core.LoDTensor() - lodtensor._share_buffer_with(cache_tensor, - (size, type_idx, dims, lod, device_idx)) + lodtensor._share_buffer_with( + cache_tensor, (size, type_idx, dims, lod, device_idx) + ) return lodtensor def rebuild_lodtensor_empty(cls): - #TODO: check if tensor initialized - #TODO: handle the dtype of empty tensor + # TODO: check if tensor initialized + # TODO: handle the dtype of empty tensor return cls() def reduce_lodtensor(lodtensor): - if lodtensor._place().is_cpu_place() or lodtensor._place( - ).is_cuda_pinned_place(): + if ( + lodtensor._place().is_cpu_place() + or lodtensor._place().is_cuda_pinned_place() + ): for dim in lodtensor.shape(): if dim == 0: # Empty tensors have nothing be mmapped. - return (rebuild_lodtensor_empty, (type(lodtensor), )) + return (rebuild_lodtensor_empty, (type(lodtensor),)) # Default use share filename stratege - metadata = lodtensor._share_filename( + metadata = ( + lodtensor._share_filename() ) # ipc_name, size, type_idx, dims, lod rebuild = rebuild_lodtensor_filename lodtensor._shared_incref() @@ -174,7 +185,7 @@ def reduce_lodtensor(lodtensor): else: raise RuntimeError("We only support pass cpu/gpu lodtensor for now!") - return (rebuild, (type(lodtensor), ) + metadata) + return (rebuild, (type(lodtensor),) + metadata) def init_reductions(): diff --git a/python/paddle/incubate/nn/__init__.py b/python/paddle/incubate/nn/__init__.py index cf15ee7d8ffaa321b2700c38b2dbea8682ad0a3f..62a48b783df9be47f189eb9fd36f871348468b28 100644 --- a/python/paddle/incubate/nn/__init__.py +++ b/python/paddle/incubate/nn/__init__.py @@ -17,9 +17,11 @@ from .layer.fused_transformer import FusedFeedForward # noqa: F401 from .layer.fused_transformer import FusedTransformerEncoderLayer # noqa: F401 from .layer.fused_transformer import FusedMultiTransformer # noqa: F401 from .layer.fused_linear import FusedLinear # noqa: F401 -from .layer.fused_transformer import FusedBiasDropoutResidualLayerNorm # noqa: F401 +from .layer.fused_transformer import ( + FusedBiasDropoutResidualLayerNorm, +) # noqa: F401 -__all__ = [ #noqa +__all__ = [ # noqa 'FusedMultiHeadAttention', 'FusedFeedForward', 'FusedTransformerEncoderLayer', diff --git a/python/paddle/incubate/nn/functional/fused_matmul_bias.py b/python/paddle/incubate/nn/functional/fused_matmul_bias.py index 6c0722fc7061002aab9986f585f64fbb01b37a50..e7180abfe9b14d95f9cb5c61b78149da26e34150 100644 --- a/python/paddle/incubate/nn/functional/fused_matmul_bias.py +++ b/python/paddle/incubate/nn/functional/fused_matmul_bias.py @@ -18,12 +18,9 @@ from paddle.tensor.linalg import matmul from paddle import _legacy_C_ops -def fused_matmul_bias(x, - y, - bias=None, - transpose_x=False, - transpose_y=False, - name=None): +def fused_matmul_bias( + x, y, bias=None, transpose_x=False, transpose_y=False, name=None +): """ Applies matrix multiplication of two tensors and then bias addition if provided. This method requires CUDA version >= 11.6. @@ -57,23 +54,18 @@ def fused_matmul_bias(x, if bias is None: return matmul(x, y, transpose_x, transpose_y, name) if _non_static_mode(): - return _legacy_C_ops.fused_gemm_epilogue(x, y, bias, 'trans_x', - transpose_x, 'trans_y', - transpose_y) + return _legacy_C_ops.fused_gemm_epilogue( + x, y, bias, 'trans_x', transpose_x, 'trans_y', transpose_y + ) helper = LayerHelper('fused_matmul_bias', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='fused_gemm_epilogue', - inputs={ - 'X': x, - 'Y': y, - 'Bias': bias - }, - outputs={'Out': out}, - attrs={ - 'trans_x': transpose_x, - 'trans_y': transpose_y - }) + helper.append_op( + type='fused_gemm_epilogue', + inputs={'X': x, 'Y': y, 'Bias': bias}, + outputs={'Out': out}, + attrs={'trans_x': transpose_x, 'trans_y': transpose_y}, + ) return out diff --git a/python/paddle/incubate/nn/functional/fused_transformer.py b/python/paddle/incubate/nn/functional/fused_transformer.py index 2eaa7ceadc71370411b0990d9c4c277618393cbc..dffddb8b9eca21f13403a5db660271fa0159e792 100644 --- a/python/paddle/incubate/nn/functional/fused_transformer.py +++ b/python/paddle/incubate/nn/functional/fused_transformer.py @@ -28,26 +28,28 @@ def _verify_dropout_rate(dropout_rate): raise ValueError("dropout_rate argument should between 0 and 1") -def fused_feedforward(x, - linear1_weight, - linear2_weight, - linear1_bias=None, - linear2_bias=None, - ln1_scale=None, - ln1_bias=None, - ln2_scale=None, - ln2_bias=None, - dropout1_rate=0.5, - dropout2_rate=0.5, - activation="relu", - ln1_epsilon=1e-5, - ln2_epsilon=1e-5, - pre_layer_norm=False, - training=True, - mode='upscale_in_train', - ring_id=-1, - add_residual=True, - name=None): +def fused_feedforward( + x, + linear1_weight, + linear2_weight, + linear1_bias=None, + linear2_bias=None, + ln1_scale=None, + ln1_bias=None, + ln2_scale=None, + ln2_bias=None, + dropout1_rate=0.5, + dropout2_rate=0.5, + activation="relu", + ln1_epsilon=1e-5, + ln2_epsilon=1e-5, + pre_layer_norm=False, + training=True, + mode='upscale_in_train', + ring_id=-1, + add_residual=True, + name=None, +): r""" This is a fusion operator to compute feed forward layer in transformer model architecture. This operator only supports running on GPU. The function of the operator is consistent with @@ -126,112 +128,161 @@ def fused_feedforward(x, raise ValueError( "mode argument should be 'downscale_in_infer' or 'upscale_in_train'" ) - mode = 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode #semantic transfer + mode = ( + 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode + ) # semantic transfer if _non_static_mode(): if default_main_program().random_seed != 0: seed = default_main_program().random_seed out, _, _, _, _, _, _, _, _, _, _ = _legacy_C_ops.fused_feedforward( - x, None, None, linear1_weight, linear1_bias, linear2_weight, - linear2_bias, ln1_scale, ln1_bias, ln2_scale, ln2_bias, - 'pre_layer_norm', pre_layer_norm, 'ln1_epsilon', ln1_epsilon, - 'ln2_epsilon', ln2_epsilon, 'act_method', activation, - 'dropout1_rate', dropout1_rate, 'dropout2_rate', dropout2_rate, - "is_test", not training, "dropout1_fix_seed", seed is not None, - "dropout2_fix_seed", seed is not None, "dropout1_seed", - seed if seed is not None else 0, "dropout2_seed", - seed if seed is not None else 0, 'dropout1_implementation', mode, - 'dropout2_implementation', mode, 'add_residual', add_residual, - 'ring_id', ring_id) + x, + None, + None, + linear1_weight, + linear1_bias, + linear2_weight, + linear2_bias, + ln1_scale, + ln1_bias, + ln2_scale, + ln2_bias, + 'pre_layer_norm', + pre_layer_norm, + 'ln1_epsilon', + ln1_epsilon, + 'ln2_epsilon', + ln2_epsilon, + 'act_method', + activation, + 'dropout1_rate', + dropout1_rate, + 'dropout2_rate', + dropout2_rate, + "is_test", + not training, + "dropout1_fix_seed", + seed is not None, + "dropout2_fix_seed", + seed is not None, + "dropout1_seed", + seed if seed is not None else 0, + "dropout2_seed", + seed if seed is not None else 0, + 'dropout1_implementation', + mode, + 'dropout2_implementation', + mode, + 'add_residual', + add_residual, + 'ring_id', + ring_id, + ) return out helper = LayerHelper("fused_feedforward") dtype = x.dtype - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'fused_feedforward') - check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'], - 'fused_feedforward') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], 'fused_feedforward' + ) + check_dtype( + dtype, 'dtype', ['float16', 'float32', 'float64'], 'fused_feedforward' + ) out = helper.create_variable_for_type_inference(x.dtype) dropout1_mask = helper.create_variable_for_type_inference( - 'uint8', stop_gradient=True) + 'uint8', stop_gradient=True + ) dropout2_mask = helper.create_variable_for_type_inference( - 'uint8', stop_gradient=True) - ln1_mean = helper.create_variable_for_type_inference(x.dtype, - stop_gradient=True) - ln1_variance = helper.create_variable_for_type_inference(x.dtype, - stop_gradient=True) - ln2_mean = helper.create_variable_for_type_inference(x.dtype, - stop_gradient=True) - ln2_variance = helper.create_variable_for_type_inference(x.dtype, - stop_gradient=True) - linear1_out = helper.create_variable_for_type_inference(x.dtype, - stop_gradient=True) - ln1_out = helper.create_variable_for_type_inference(x.dtype, - stop_gradient=True) - dropout1_out = helper.create_variable_for_type_inference(x.dtype, - stop_gradient=True) - dropout2_out = helper.create_variable_for_type_inference(x.dtype, - stop_gradient=True) + 'uint8', stop_gradient=True + ) + ln1_mean = helper.create_variable_for_type_inference( + x.dtype, stop_gradient=True + ) + ln1_variance = helper.create_variable_for_type_inference( + x.dtype, stop_gradient=True + ) + ln2_mean = helper.create_variable_for_type_inference( + x.dtype, stop_gradient=True + ) + ln2_variance = helper.create_variable_for_type_inference( + x.dtype, stop_gradient=True + ) + linear1_out = helper.create_variable_for_type_inference( + x.dtype, stop_gradient=True + ) + ln1_out = helper.create_variable_for_type_inference( + x.dtype, stop_gradient=True + ) + dropout1_out = helper.create_variable_for_type_inference( + x.dtype, stop_gradient=True + ) + dropout2_out = helper.create_variable_for_type_inference( + x.dtype, stop_gradient=True + ) if (seed is None or seed == 0) and helper.main_program.random_seed != 0: seed = helper.main_program.random_seed - helper.append_op(type='fused_feedforward', - inputs={ - 'X': x, - 'Linear1Weight': linear1_weight, - 'Linear1Bias': linear1_bias, - 'Linear2Weight': linear2_weight, - 'Linear2Bias': linear2_bias, - 'Ln1Scale': ln1_scale, - 'Ln1Bias': ln1_bias, - 'Ln2Scale': ln2_scale, - 'Ln2Bias': ln2_bias, - }, - outputs={ - 'Out': out, - 'Dropout1Mask': dropout1_mask, - 'Dropout2Mask': dropout2_mask, - 'Ln1Mean': ln1_mean, - 'Ln1Variance': ln1_variance, - 'Ln2Mean': ln2_mean, - 'Ln2Variance': ln2_variance, - 'Linear1Out': linear1_out, - 'Ln1Out': ln1_out, - 'Dropout1Out': dropout1_out, - 'Dropout2Out': dropout2_out, - }, - attrs={ - 'dropout1_rate': dropout1_rate, - 'dropout2_rate': dropout2_rate, - 'act_method': activation, - 'pre_layer_norm': pre_layer_norm, - 'ln1_epsilon': ln1_epsilon, - 'ln2_epsilon': ln2_epsilon, - 'is_test': not training, - 'dropout1_fix_seed': seed is not None, - 'dropout2_fix_seed': seed is not None, - 'dropout1_seed': seed if seed is not None else 0, - 'dropout2_seed': seed if seed is not None else 0, - 'dropout1_implementation': mode, - 'dropout2_implementation': mode, - 'add_residual': add_residual, - 'ring_id': ring_id, - }) + helper.append_op( + type='fused_feedforward', + inputs={ + 'X': x, + 'Linear1Weight': linear1_weight, + 'Linear1Bias': linear1_bias, + 'Linear2Weight': linear2_weight, + 'Linear2Bias': linear2_bias, + 'Ln1Scale': ln1_scale, + 'Ln1Bias': ln1_bias, + 'Ln2Scale': ln2_scale, + 'Ln2Bias': ln2_bias, + }, + outputs={ + 'Out': out, + 'Dropout1Mask': dropout1_mask, + 'Dropout2Mask': dropout2_mask, + 'Ln1Mean': ln1_mean, + 'Ln1Variance': ln1_variance, + 'Ln2Mean': ln2_mean, + 'Ln2Variance': ln2_variance, + 'Linear1Out': linear1_out, + 'Ln1Out': ln1_out, + 'Dropout1Out': dropout1_out, + 'Dropout2Out': dropout2_out, + }, + attrs={ + 'dropout1_rate': dropout1_rate, + 'dropout2_rate': dropout2_rate, + 'act_method': activation, + 'pre_layer_norm': pre_layer_norm, + 'ln1_epsilon': ln1_epsilon, + 'ln2_epsilon': ln2_epsilon, + 'is_test': not training, + 'dropout1_fix_seed': seed is not None, + 'dropout2_fix_seed': seed is not None, + 'dropout1_seed': seed if seed is not None else 0, + 'dropout2_seed': seed if seed is not None else 0, + 'dropout1_implementation': mode, + 'dropout2_implementation': mode, + 'add_residual': add_residual, + 'ring_id': ring_id, + }, + ) return out -def fused_bias_dropout_residual_layer_norm(x, - residual, - bias=None, - ln_scale=None, - ln_bias=None, - dropout_rate=0.5, - ln_epsilon=1e-5, - training=True, - mode='upscale_in_train', - name=None): +def fused_bias_dropout_residual_layer_norm( + x, + residual, + bias=None, + ln_scale=None, + ln_bias=None, + dropout_rate=0.5, + ln_epsilon=1e-5, + training=True, + mode='upscale_in_train', + name=None, +): r""" The fused_bias_dropout_residual_layer_norm operator. The pseudo code is as follows: @@ -291,37 +342,72 @@ def fused_bias_dropout_residual_layer_norm(x, raise ValueError( "mode argument should be 'downscale_in_infer' or 'upscale_in_train'" ) - mode = 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode #semantic transfer + mode = ( + 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode + ) # semantic transfer if ln_scale is not None: - assert len(ln_scale.shape - ) == 1, "The dims of the shape of ln_scale should be 1." - assert x.shape[len(x.shape) - 1] == ln_scale.shape[ - 0], "The dim of ln_scale must equal to the last dim of x." + assert ( + len(ln_scale.shape) == 1 + ), "The dims of the shape of ln_scale should be 1." + assert ( + x.shape[len(x.shape) - 1] == ln_scale.shape[0] + ), "The dim of ln_scale must equal to the last dim of x." if ln_bias is not None: - assert len( - ln_bias.shape) == 1, "The dims of the shape of ln_bias should be 1." - assert x.shape[len(x.shape) - 1] == ln_bias.shape[ - 0], "The dim of ln_bias must equal to the last dim of x." + assert ( + len(ln_bias.shape) == 1 + ), "The dims of the shape of ln_bias should be 1." + assert ( + x.shape[len(x.shape) - 1] == ln_bias.shape[0] + ), "The dim of ln_bias must equal to the last dim of x." if _non_static_mode(): if default_main_program().random_seed != 0: seed = default_main_program().random_seed - _, _, _, _, final_out = _legacy_C_ops.fused_bias_dropout_residual_layer_norm( - x, residual, bias, ln_scale, ln_bias, 'dropout_rate', dropout_rate, - 'ln_epsilon', ln_epsilon, 'is_test', not training, - 'dropout_fix_seed', seed is not None, 'dropout_seed', - seed if seed is not None else 0, 'dropout_implementation', mode) + ( + _, + _, + _, + _, + final_out, + ) = _legacy_C_ops.fused_bias_dropout_residual_layer_norm( + x, + residual, + bias, + ln_scale, + ln_bias, + 'dropout_rate', + dropout_rate, + 'ln_epsilon', + ln_epsilon, + 'is_test', + not training, + 'dropout_fix_seed', + seed is not None, + 'dropout_seed', + seed if seed is not None else 0, + 'dropout_implementation', + mode, + ) return final_out else: - helper = LayerHelper('fused_bias_dropout_residual_layer_norm', - **locals()) + helper = LayerHelper( + 'fused_bias_dropout_residual_layer_norm', **locals() + ) dtype = x.dtype # check dtypes - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'fused_bias_dropout_residual_layer_norm') - check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'], - 'fused_bias_dropout_residual_layer_norm') + check_variable_and_dtype( + x, + 'x', + ['float16', 'float32', 'float64'], + 'fused_bias_dropout_residual_layer_norm', + ) + check_dtype( + dtype, + 'dtype', + ['float16', 'float32', 'float64'], + 'fused_bias_dropout_residual_layer_norm', + ) # set inputs inputs = dict() inputs['X'] = [x] @@ -345,50 +431,57 @@ def fused_bias_dropout_residual_layer_norm(x, } # set outputs dropout_mask_out = helper.create_variable_for_type_inference( - dtype=core.VarDesc.VarType.UINT8, stop_gradient=True) + dtype=core.VarDesc.VarType.UINT8, stop_gradient=True + ) ln_mean_out = helper.create_variable_for_type_inference( - dtype=dtype, stop_gradient=True) + dtype=dtype, stop_gradient=True + ) ln_variance_out = helper.create_variable_for_type_inference( - dtype=dtype, stop_gradient=True) + dtype=dtype, stop_gradient=True + ) bias_dropout_residual_out = helper.create_variable_for_type_inference( - dtype=dtype) + dtype=dtype + ) final_out = helper.create_variable_for_type_inference(dtype=dtype) - helper.append_op(type='fused_bias_dropout_residual_layer_norm', - inputs=inputs, - outputs={ - "BiasDropoutResidualOut": - bias_dropout_residual_out, - "DropoutMaskOut": dropout_mask_out, - "LnMean": ln_mean_out, - "LnVariance": ln_variance_out, - 'Y': final_out, - }, - attrs=attrs) + helper.append_op( + type='fused_bias_dropout_residual_layer_norm', + inputs=inputs, + outputs={ + "BiasDropoutResidualOut": bias_dropout_residual_out, + "DropoutMaskOut": dropout_mask_out, + "LnMean": ln_mean_out, + "LnVariance": ln_variance_out, + 'Y': final_out, + }, + attrs=attrs, + ) return final_out -def fused_multi_head_attention(x, - qkv_weight, - linear_weight, - pre_layer_norm=False, - pre_ln_scale=None, - pre_ln_bias=None, - ln_scale=None, - ln_bias=None, - pre_ln_epsilon=1e-05, - qkv_bias=None, - linear_bias=None, - cache_kv=None, - attn_mask=None, - dropout_rate=0.5, - attn_dropout_rate=0.5, - ln_epsilon=1e-05, - training=True, - mode='upscale_in_train', - ring_id=-1, - add_residual=True, - name=None): +def fused_multi_head_attention( + x, + qkv_weight, + linear_weight, + pre_layer_norm=False, + pre_ln_scale=None, + pre_ln_bias=None, + ln_scale=None, + ln_bias=None, + pre_ln_epsilon=1e-05, + qkv_bias=None, + linear_bias=None, + cache_kv=None, + attn_mask=None, + dropout_rate=0.5, + attn_dropout_rate=0.5, + ln_epsilon=1e-05, + training=True, + mode='upscale_in_train', + ring_id=-1, + add_residual=True, + name=None, +): r""" Attention mapps queries and a set of key-value pairs to outputs, and Multi-Head Attention performs multiple parallel attention to jointly attending @@ -512,7 +605,9 @@ def fused_multi_head_attention(x, raise ValueError( "mode argument should be 'downscale_in_infer' or 'upscale_in_train'" ) - mode = 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode #semantic transfer + mode = ( + 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode + ) # semantic transfer if _non_static_mode(): if default_main_program().random_seed != 0: @@ -520,29 +615,83 @@ def fused_multi_head_attention(x, # pre_ln_mean, pre_ln_variance, pre_ln_out, qkv_out, qkv_bias_out, transpose_out, qk_out, # qktv_out, softmax_out, attn_dropout_mask_out, attn_dropout_out, attn_mask_out, fmha_out, # linear_out, dropout_mask_out, ln_mean_out, ln_var_out, bias_dropout_residual_out, final_out - assert len(qkv_weight.shape - ) == 4, "The dims of the shape of qkv_weight should be 4." - assert qkv_weight.shape[ - 0] == 3, "The shape of qkv_weight should be [3, num_head, head_dim, embed_dim]." - assert qkv_weight.shape[3] == x.shape[ - 2], "The 3rd dim of qkv_weight and 2nd dim of x should be the same, i.e., embed_dim." + assert ( + len(qkv_weight.shape) == 4 + ), "The dims of the shape of qkv_weight should be 4." + assert ( + qkv_weight.shape[0] == 3 + ), "The shape of qkv_weight should be [3, num_head, head_dim, embed_dim]." + assert ( + qkv_weight.shape[3] == x.shape[2] + ), "The 3rd dim of qkv_weight and 2nd dim of x should be the same, i.e., embed_dim." if ring_id == -1: # under mp, the num head will be split, this equation will not hold - assert qkv_weight.shape[1] * qkv_weight.shape[2] == qkv_weight.shape[ - 3], "embed_dim must be divisible by num_heads." - - _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, cache_kv_out, final_out = _legacy_C_ops.fused_attention( - x, pre_ln_scale, pre_ln_bias, qkv_weight, qkv_bias, cache_kv, - attn_mask, linear_weight, linear_bias, ln_scale, ln_bias, - 'pre_layer_norm', pre_layer_norm, 'epsilon', pre_ln_epsilon, - 'dropout_rate', dropout_rate, 'attn_dropout_rate', - attn_dropout_rate, 'ln_epsilon', ln_epsilon, 'is_test', - not training, 'attn_dropout_fix_seed', seed is not None, - 'dropout_fix_seed', seed is not None, 'attn_dropout_seed', - seed if seed is not None else 0, 'dropout_seed', - seed if seed is not None else 0, 'attn_dropout_implementation', - mode, 'dropout_implementation', mode, 'add_residual', add_residual, - 'ring_id', ring_id) + assert ( + qkv_weight.shape[1] * qkv_weight.shape[2] == qkv_weight.shape[3] + ), "embed_dim must be divisible by num_heads." + + ( + _, + _, + _, + _, + _, + _, + _, + _, + _, + _, + _, + _, + _, + _, + _, + _, + _, + _, + cache_kv_out, + final_out, + ) = _legacy_C_ops.fused_attention( + x, + pre_ln_scale, + pre_ln_bias, + qkv_weight, + qkv_bias, + cache_kv, + attn_mask, + linear_weight, + linear_bias, + ln_scale, + ln_bias, + 'pre_layer_norm', + pre_layer_norm, + 'epsilon', + pre_ln_epsilon, + 'dropout_rate', + dropout_rate, + 'attn_dropout_rate', + attn_dropout_rate, + 'ln_epsilon', + ln_epsilon, + 'is_test', + not training, + 'attn_dropout_fix_seed', + seed is not None, + 'dropout_fix_seed', + seed is not None, + 'attn_dropout_seed', + seed if seed is not None else 0, + 'dropout_seed', + seed if seed is not None else 0, + 'attn_dropout_implementation', + mode, + 'dropout_implementation', + mode, + 'add_residual', + add_residual, + 'ring_id', + ring_id, + ) if cache_kv is not None: return final_out, cache_kv_out return final_out @@ -550,10 +699,18 @@ def fused_multi_head_attention(x, helper = LayerHelper('fused_multi_head_attention', **locals()) dtype = x.dtype # check dtypes - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'fused_multihead_attention') - check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'], - 'fused_multi_head_attention') + check_variable_and_dtype( + x, + 'x', + ['float16', 'float32', 'float64'], + 'fused_multihead_attention', + ) + check_dtype( + dtype, + 'dtype', + ['float16', 'float32', 'float64'], + 'fused_multi_head_attention', + ) # set inputs inputs = dict() @@ -573,7 +730,8 @@ def fused_multi_head_attention(x, inputs['Ln2Scale'] = [ln_scale] if ln_bias: inputs['Ln2Bias'] = [ln_bias] - if cache_kv: inputs['CacheKV'] = [cache_kv] + if cache_kv: + inputs['CacheKV'] = [cache_kv] if (seed is None or seed == 0) and helper.main_program.random_seed != 0: seed = helper.main_program.random_seed @@ -593,14 +751,16 @@ def fused_multi_head_attention(x, 'attn_dropout_implementation': mode, 'dropout_implementation': mode, 'add_residual': add_residual, - 'ring_id': ring_id + 'ring_id': ring_id, } # set outputs pre_ln_mean_out = helper.create_variable_for_type_inference( - dtype=dtype, stop_gradient=True) + dtype=dtype, stop_gradient=True + ) pre_ln_variance_out = helper.create_variable_for_type_inference( - dtype=dtype, stop_gradient=True) + dtype=dtype, stop_gradient=True + ) pre_ln_out = helper.create_variable_for_type_inference(dtype=dtype) qkv_out = helper.create_variable_for_type_inference(dtype=dtype) @@ -611,79 +771,88 @@ def fused_multi_head_attention(x, qktv_out = helper.create_variable_for_type_inference(dtype=dtype) softmax_out = helper.create_variable_for_type_inference(dtype=dtype) attn_dropout_mask_out = helper.create_variable_for_type_inference( - dtype=core.VarDesc.VarType.UINT8, stop_gradient=True) + dtype=core.VarDesc.VarType.UINT8, stop_gradient=True + ) attn_dropout_out = helper.create_variable_for_type_inference( - dtype=dtype) + dtype=dtype + ) attn_mask_out = helper.create_variable_for_type_inference(dtype=dtype) fmha_out = helper.create_variable_for_type_inference(dtype=dtype) out_linear_out = helper.create_variable_for_type_inference(dtype=dtype) dropout_mask_out = helper.create_variable_for_type_inference( - dtype=core.VarDesc.VarType.UINT8, stop_gradient=True) + dtype=core.VarDesc.VarType.UINT8, stop_gradient=True + ) ln_mean_out = helper.create_variable_for_type_inference( - dtype=dtype, stop_gradient=True) + dtype=dtype, stop_gradient=True + ) ln_variance_out = helper.create_variable_for_type_inference( - dtype=dtype, stop_gradient=True) + dtype=dtype, stop_gradient=True + ) bias_dropout_residual_out = helper.create_variable_for_type_inference( - dtype=dtype) + dtype=dtype + ) final_out = helper.create_variable_for_type_inference(dtype=dtype) cache_kv_out = helper.create_variable_for_type_inference(dtype=dtype) - helper.append_op(type='fused_attention', - inputs=inputs, - outputs={ - "LnMean": pre_ln_mean_out, - "LnVariance": pre_ln_variance_out, - "LnOut": pre_ln_out, - "QKVOut": qkv_out, - "QKVBiasOut": qkv_bias_out, - "TransposeOut2": transpose_out, - "QKOut": qk_out, - "QKTVOut": qktv_out, - "SoftmaxOut": softmax_out, - "AttnDropoutMaskOut": attn_dropout_mask_out, - "AttnDropoutOut": attn_dropout_out, - "SrcMaskOut": attn_mask_out, - "FMHAOut": fmha_out, - "OutLinearOut": out_linear_out, - "DropoutMaskOut": dropout_mask_out, - "Ln2Mean": ln_mean_out, - "Ln2Variance": ln_variance_out, - "BiasDropoutResidualOut": - bias_dropout_residual_out, - 'Y': final_out, - 'CacheKVOut': cache_kv_out - }, - attrs=attrs) + helper.append_op( + type='fused_attention', + inputs=inputs, + outputs={ + "LnMean": pre_ln_mean_out, + "LnVariance": pre_ln_variance_out, + "LnOut": pre_ln_out, + "QKVOut": qkv_out, + "QKVBiasOut": qkv_bias_out, + "TransposeOut2": transpose_out, + "QKOut": qk_out, + "QKTVOut": qktv_out, + "SoftmaxOut": softmax_out, + "AttnDropoutMaskOut": attn_dropout_mask_out, + "AttnDropoutOut": attn_dropout_out, + "SrcMaskOut": attn_mask_out, + "FMHAOut": fmha_out, + "OutLinearOut": out_linear_out, + "DropoutMaskOut": dropout_mask_out, + "Ln2Mean": ln_mean_out, + "Ln2Variance": ln_variance_out, + "BiasDropoutResidualOut": bias_dropout_residual_out, + 'Y': final_out, + 'CacheKVOut': cache_kv_out, + }, + attrs=attrs, + ) return (final_out, cache_kv_out) if cache_kv else final_out -def fused_multi_transformer(x, - ln_scales, - ln_biases, - qkv_weights, - qkv_biases, - linear_weights, - linear_biases, - ffn_ln_scales, - ffn_ln_biases, - ffn1_weights, - ffn1_biases, - ffn2_weights, - ffn2_biases, - pre_layer_norm=True, - epsilon=1e-05, - cache_kvs=None, - pre_caches=None, - time_step=None, - attn_mask=None, - dropout_rate=0.0, - activation="gelu", - training=False, - mode='upscale_in_train', - trans_qkvw=True, - ring_id=-1, - name=None): +def fused_multi_transformer( + x, + ln_scales, + ln_biases, + qkv_weights, + qkv_biases, + linear_weights, + linear_biases, + ffn_ln_scales, + ffn_ln_biases, + ffn1_weights, + ffn1_biases, + ffn2_weights, + ffn2_biases, + pre_layer_norm=True, + epsilon=1e-05, + cache_kvs=None, + pre_caches=None, + time_step=None, + attn_mask=None, + dropout_rate=0.0, + activation="gelu", + training=False, + mode='upscale_in_train', + trans_qkvw=True, + ring_id=-1, + name=None, +): r""" This is a fusion operator to compute multi transformer layers in transformer model architecture. This operator only supports running on GPU. The function of the transformer layer is consistent @@ -823,18 +992,47 @@ def fused_multi_transformer(x, raise ValueError( "mode argument should be 'downscale_in_infer' or 'upscale_in_train'" ) - mode = 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode #semantic transfer + mode = ( + 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode + ) # semantic transfer if _non_static_mode(): cache_kv_out, final_out = _legacy_C_ops.fused_multi_transformer( - x, ln_scales, ln_biases, qkv_weights, qkv_biases, cache_kvs, - pre_caches, time_step, attn_mask, linear_weights, linear_biases, - ffn_ln_scales, ffn_ln_biases, ffn1_weights, ffn1_biases, - ffn2_weights, ffn2_biases, cache_kvs, 'pre_layer_norm', - pre_layer_norm, 'epsilon', epsilon, 'dropout_rate', dropout_rate, - 'is_test', not training, 'dropout_implementation', mode, - 'act_method', activation, 'trans_qkvw', trans_qkvw, 'ring_id', - ring_id) + x, + ln_scales, + ln_biases, + qkv_weights, + qkv_biases, + cache_kvs, + pre_caches, + time_step, + attn_mask, + linear_weights, + linear_biases, + ffn_ln_scales, + ffn_ln_biases, + ffn1_weights, + ffn1_biases, + ffn2_weights, + ffn2_biases, + cache_kvs, + 'pre_layer_norm', + pre_layer_norm, + 'epsilon', + epsilon, + 'dropout_rate', + dropout_rate, + 'is_test', + not training, + 'dropout_implementation', + mode, + 'act_method', + activation, + 'trans_qkvw', + trans_qkvw, + 'ring_id', + ring_id, + ) if cache_kvs is not None: return final_out, cache_kv_out return final_out @@ -842,10 +1040,12 @@ def fused_multi_transformer(x, helper = LayerHelper('fused_multi_transformer', **locals()) dtype = x.dtype # check dtypes - check_variable_and_dtype(x, 'x', ['float16', 'float32'], - 'fused_multi_transformer') - check_dtype(dtype, 'dtype', ['float16', 'float32'], - 'fused_multi_transformer') + check_variable_and_dtype( + x, 'x', ['float16', 'float32'], 'fused_multi_transformer' + ) + check_dtype( + dtype, 'dtype', ['float16', 'float32'], 'fused_multi_transformer' + ) # set inputs inputs = dict() @@ -885,7 +1085,7 @@ def fused_multi_transformer(x, 'dropout_implementation': mode, 'act_method': activation, 'trans_qkvw': trans_qkvw, - 'ring_id': ring_id + 'ring_id': ring_id, } outputs = dict() @@ -895,9 +1095,11 @@ def fused_multi_transformer(x, # NOTE: inplace outputs['CacheKVOut'] = cache_kvs - helper.append_op(type='fused_multi_transformer', - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type='fused_multi_transformer', + inputs=inputs, + outputs=outputs, + attrs=attrs, + ) return (final_out, cache_kvs) if cache_kvs else final_out diff --git a/python/paddle/incubate/nn/layer/fused_linear.py b/python/paddle/incubate/nn/layer/fused_linear.py index 4a5f2c12105b2bd1e1f34c8cc287bd7c633e1ad5..70a18233f78a600ae63923733692e60f972555cd 100644 --- a/python/paddle/incubate/nn/layer/fused_linear.py +++ b/python/paddle/incubate/nn/layer/fused_linear.py @@ -66,30 +66,31 @@ class FusedLinear(Layer): print(y.shape) # [3, 5] """ - def __init__(self, - in_features, - out_features, - weight_attr=None, - bias_attr=None, - transpose_weight=False, - name=None): + def __init__( + self, + in_features, + out_features, + weight_attr=None, + bias_attr=None, + transpose_weight=False, + name=None, + ): super(FusedLinear, self).__init__() if transpose_weight: weight_shape = [out_features, in_features] else: weight_shape = [in_features, out_features] dtype = self._helper.get_default_dtype() - self.weight = self.create_parameter(shape=weight_shape, - attr=weight_attr, - dtype=dtype, - is_bias=False) - self.bias = self.create_parameter(shape=[out_features], - attr=bias_attr, - dtype=dtype, - is_bias=True) + self.weight = self.create_parameter( + shape=weight_shape, attr=weight_attr, dtype=dtype, is_bias=False + ) + self.bias = self.create_parameter( + shape=[out_features], attr=bias_attr, dtype=dtype, is_bias=True + ) self.transpose_weight = transpose_weight self.name = name def forward(self, input): - return F.fused_linear(input, self.weight, self.bias, - self.transpose_weight, self.name) + return F.fused_linear( + input, self.weight, self.bias, self.transpose_weight, self.name + ) diff --git a/python/paddle/incubate/nn/layer/fused_transformer.py b/python/paddle/incubate/nn/layer/fused_transformer.py index ca58f81e19dd58e69ac0099212450e7671779f6e..0f9b225eb6deedc7547284dc9d17ba92154d071a 100644 --- a/python/paddle/incubate/nn/layer/fused_transformer.py +++ b/python/paddle/incubate/nn/layer/fused_transformer.py @@ -14,7 +14,10 @@ from paddle.incubate.nn import functional as incubate_f from paddle.nn import Layer import paddle -from paddle.nn.layer.transformer import _convert_attention_mask, _convert_param_attr_to_list +from paddle.nn.layer.transformer import ( + _convert_attention_mask, + _convert_param_attr_to_list, +) from paddle.nn.initializer import Constant from paddle.fluid.dygraph import no_grad from paddle.fluid.framework import convert_np_dtype_to_dtype_, _non_static_mode @@ -49,7 +52,8 @@ def _to_dtype(t, dtype): if t.place.is_gpu_place(): size_dtype = core.size_of_dtype(dtype) waiting_alloc_memory = ( - (np.prod(t.shape) * size_dtype) / 256 + 1) * 256 * 1.2 + ((np.prod(t.shape) * size_dtype) / 256 + 1) * 256 * 1.2 + ) gpu_memory_available = core.gpu_memory_available() if gpu_memory_available < waiting_alloc_memory: t_used = t._copy_to(paddle.CPUPlace(), False) @@ -104,31 +108,38 @@ class FusedBiasDropoutResidualLayerNorm(Layer): output = fused_bias_dropout_residual_ln(x, residual) # [2, 4, 128] """ - def __init__(self, - embed_dim, - dropout_rate=0.5, - weight_attr=None, - bias_attr=None, - epsilon=1e-5, - name=None): + def __init__( + self, + embed_dim, + dropout_rate=0.5, + weight_attr=None, + bias_attr=None, + epsilon=1e-5, + name=None, + ): super(FusedBiasDropoutResidualLayerNorm, self).__init__() - assert embed_dim > 0, ("Expected embed_dim to be greater than 0, " - "but recieved {}".format(embed_dim)) + assert embed_dim > 0, ( + "Expected embed_dim to be greater than 0, " + "but recieved {}".format(embed_dim) + ) self._dtype = self._helper.get_default_dtype() self._bias_attr = bias_attr self._weight_attr = weight_attr self.embed_dim = embed_dim - self.linear_bias = self.create_parameter(shape=[embed_dim], - attr=self._bias_attr, - dtype=self._dtype, - is_bias=True) + self.linear_bias = self.create_parameter( + shape=[embed_dim], + attr=self._bias_attr, + dtype=self._dtype, + is_bias=True, + ) self.ln_scale = self.create_parameter( attr=self._weight_attr, shape=[embed_dim], - default_initializer=Constant(value=1.0)) - self.ln_bias = self.create_parameter(attr=self._bias_attr, - shape=[embed_dim], - is_bias=True) + default_initializer=Constant(value=1.0), + ) + self.ln_bias = self.create_parameter( + attr=self._bias_attr, shape=[embed_dim], is_bias=True + ) self.dropout_rate = dropout_rate self._epsilon = epsilon @@ -161,14 +172,20 @@ class FusedBiasDropoutResidualLayerNorm(Layer): ln_epsilon=self._epsilon, training=self.training, mode='upscale_in_train', - name=self.name) + name=self.name, + ) return out def extra_repr(self): name_str = ', name={}'.format(self.name) if self.name else '' return 'embed_dim={}, seq_len={}, dropout_rate={}, epsilon={}, dtype={}{}'.format( - self.embed_dim, self.seq_len, self.dropout_rate, self._epsilon, - self._dtype, name_str) + self.embed_dim, + self.seq_len, + self.dropout_rate, + self._epsilon, + self._dtype, + name_str, + ) class FusedMultiHeadAttention(Layer): @@ -244,33 +261,40 @@ class FusedMultiHeadAttention(Layer): output = multi_head_attn(query, None, None, attn_mask=attn_mask) # [2, 4, 128] """ - def __init__(self, - embed_dim, - num_heads, - dropout_rate=0.5, - attn_dropout_rate=0.5, - kdim=None, - vdim=None, - normalize_before=False, - need_weights=False, - qkv_weight_attr=None, - qkv_bias_attr=None, - linear_weight_attr=None, - linear_bias_attr=None, - pre_ln_scale_attr=None, - pre_ln_bias_attr=None, - ln_scale_attr=None, - ln_bias_attr=None, - epsilon=1e-5, - nranks=1, - ring_id=-1, - name=None): + def __init__( + self, + embed_dim, + num_heads, + dropout_rate=0.5, + attn_dropout_rate=0.5, + kdim=None, + vdim=None, + normalize_before=False, + need_weights=False, + qkv_weight_attr=None, + qkv_bias_attr=None, + linear_weight_attr=None, + linear_bias_attr=None, + pre_ln_scale_attr=None, + pre_ln_bias_attr=None, + ln_scale_attr=None, + ln_bias_attr=None, + epsilon=1e-5, + nranks=1, + ring_id=-1, + name=None, + ): super(FusedMultiHeadAttention, self).__init__() - assert embed_dim > 0, ("Expected embed_dim to be greater than 0, " - "but received {}".format(embed_dim)) - assert num_heads > 0, ("Expected nhead to be greater than 0, " - "but received {}".format(num_heads)) + assert embed_dim > 0, ( + "Expected embed_dim to be greater than 0, " + "but received {}".format(embed_dim) + ) + assert ( + num_heads > 0 + ), "Expected nhead to be greater than 0, " "but received {}".format( + num_heads + ) self.normalize_before = normalize_before self._dtype = self._helper.get_default_dtype() @@ -283,7 +307,9 @@ class FusedMultiHeadAttention(Layer): self.kdim = kdim self.vdim = vdim self.need_weights = need_weights - assert self.head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads" + assert ( + self.head_dim * num_heads == embed_dim + ), "embed_dim must be divisible by num_heads" assert need_weights is False, "Only support need_weight is False now." # tensor model parallel @@ -294,21 +320,26 @@ class FusedMultiHeadAttention(Layer): shape=[3, num_heads, self.head_dim, embed_dim], attr=qkv_weight_attr, dtype=self._dtype, - is_bias=False) + is_bias=False, + ) self.qkv_bias = self.create_parameter( shape=[3, num_heads, self.head_dim], attr=qkv_bias_attr, dtype=self._dtype, - is_bias=True) + is_bias=True, + ) self.linear_weight = self.create_parameter( shape=[num_heads * self.head_dim, embed_dim], attr=linear_weight_attr, dtype=self._dtype, - is_bias=False) - self.linear_bias = self.create_parameter(shape=[embed_dim], - attr=linear_bias_attr, - dtype=self._dtype, - is_bias=True) + is_bias=False, + ) + self.linear_bias = self.create_parameter( + shape=[embed_dim], + attr=linear_bias_attr, + dtype=self._dtype, + is_bias=True, + ) # tensor model parallel if nranks > 1: @@ -323,10 +354,11 @@ class FusedMultiHeadAttention(Layer): self.pre_ln_scale = self.create_parameter( attr=pre_ln_scale_attr, shape=[embed_dim], - default_initializer=Constant(value=1.0)) - self.pre_ln_bias = self.create_parameter(attr=pre_ln_bias_attr, - shape=[embed_dim], - is_bias=True) + default_initializer=Constant(value=1.0), + ) + self.pre_ln_bias = self.create_parameter( + attr=pre_ln_bias_attr, shape=[embed_dim], is_bias=True + ) self.ln_scale = None self.ln_bias = None else: @@ -335,10 +367,11 @@ class FusedMultiHeadAttention(Layer): self.ln_scale = self.create_parameter( attr=ln_scale_attr, shape=[embed_dim], - default_initializer=Constant(value=1.0)) - self.ln_bias = self.create_parameter(attr=ln_bias_attr, - shape=[embed_dim], - is_bias=True) + default_initializer=Constant(value=1.0), + ) + self.ln_bias = self.create_parameter( + attr=ln_bias_attr, shape=[embed_dim], is_bias=True + ) self.dropout_rate = dropout_rate self.attn_dropout_rate = attn_dropout_rate @@ -402,15 +435,25 @@ class FusedMultiHeadAttention(Layer): ln_epsilon=self._epsilon, training=self.training, ring_id=self._ring_id, - name=self.name) + name=self.name, + ) return out def extra_repr(self): name_str = ', name={}'.format(self.name) if self.name else '' return 'embed_dim={}, num_heads={}, dropout_rate={}, attn_dropout_rate={}, epsilon={}, kdim={}, vdim={}, normalize_before={}, need_weights={}, dtype={}{}'.format( - self.embed_dim, self.num_heads, self.dropout_rate, - self.attn_dropout_rate, self._epsilon, self.kdim, self.vdim, - self.normalize_before, self.need_weights, self._dtype, name_str) + self.embed_dim, + self.num_heads, + self.dropout_rate, + self.attn_dropout_rate, + self._epsilon, + self.kdim, + self.vdim, + self.normalize_before, + self.need_weights, + self._dtype, + name_str, + ) def _amp_decorate(self, dtype): # tmp fix for amp.decorator(O2) @@ -493,33 +536,39 @@ class FusedFeedForward(Layer): # (1, 8, 8) """ - def __init__(self, - d_model, - dim_feedforward, - dropout_rate=0.1, - epsilon=1e-05, - activation="relu", - act_dropout_rate=None, - normalize_before=False, - linear1_weight_attr=None, - linear1_bias_attr=None, - linear2_weight_attr=None, - linear2_bias_attr=None, - ln1_scale_attr=None, - ln1_bias_attr=None, - ln2_scale_attr=None, - ln2_bias_attr=None, - nranks=1, - ring_id=-1, - name=None): + def __init__( + self, + d_model, + dim_feedforward, + dropout_rate=0.1, + epsilon=1e-05, + activation="relu", + act_dropout_rate=None, + normalize_before=False, + linear1_weight_attr=None, + linear1_bias_attr=None, + linear2_weight_attr=None, + linear2_bias_attr=None, + ln1_scale_attr=None, + ln1_bias_attr=None, + ln2_scale_attr=None, + ln2_bias_attr=None, + nranks=1, + ring_id=-1, + name=None, + ): super(FusedFeedForward, self).__init__() - assert d_model > 0, ( - "Expected d_model to be greater than 0, but received {}".format( - d_model)) - assert dim_feedforward > 0, ( - "Expected dim_feedforward to be greater than 0, but received {}". - format(dim_feedforward)) + assert ( + d_model > 0 + ), "Expected d_model to be greater than 0, but received {}".format( + d_model + ) + assert ( + dim_feedforward > 0 + ), "Expected dim_feedforward to be greater than 0, but received {}".format( + dim_feedforward + ) self._dtype = self._helper.get_default_dtype() self._d_model = d_model @@ -528,7 +577,9 @@ class FusedFeedForward(Layer): dim_feedforward = dim_feedforward // nranks self._dim_feedforward = dim_feedforward self._dropout_rate = dropout_rate - self._act_dropout_rate = dropout_rate if act_dropout_rate is None else act_dropout_rate + self._act_dropout_rate = ( + dropout_rate if act_dropout_rate is None else act_dropout_rate + ) self._act_method = activation self._normalize_before = normalize_before self._epsilon = epsilon @@ -538,22 +589,28 @@ class FusedFeedForward(Layer): shape=[d_model, dim_feedforward], attr=linear1_weight_attr, dtype=self._dtype, - is_bias=False) - self._linear1_bias = self.create_parameter(shape=[dim_feedforward], - attr=linear1_bias_attr, - dtype=self._dtype, - is_bias=True) + is_bias=False, + ) + self._linear1_bias = self.create_parameter( + shape=[dim_feedforward], + attr=linear1_bias_attr, + dtype=self._dtype, + is_bias=True, + ) self._linear2_weight = self.create_parameter( shape=[dim_feedforward, d_model], attr=linear2_weight_attr, dtype=self._dtype, - is_bias=False) + is_bias=False, + ) - self._linear2_bias = self.create_parameter(shape=[d_model], - attr=linear2_bias_attr, - dtype=self._dtype, - is_bias=True) + self._linear2_bias = self.create_parameter( + shape=[d_model], + attr=linear2_bias_attr, + dtype=self._dtype, + is_bias=True, + ) if nranks > 1: assert ring_id != -1 @@ -567,10 +624,11 @@ class FusedFeedForward(Layer): shape=[d_model], attr=ln1_scale_attr, is_bias=False, - default_initializer=Constant(1.0)) - self._ln1_bias = self.create_parameter(shape=[d_model], - attr=ln1_bias_attr, - is_bias=True) + default_initializer=Constant(1.0), + ) + self._ln1_bias = self.create_parameter( + shape=[d_model], attr=ln1_bias_attr, is_bias=True + ) self._ln2_scale = None self._ln2_bias = None else: @@ -580,10 +638,11 @@ class FusedFeedForward(Layer): shape=[d_model], attr=ln2_scale_attr, is_bias=False, - default_initializer=Constant(1.0)) - self._ln2_bias = self.create_parameter(shape=[d_model], - attr=ln2_bias_attr, - is_bias=True) + default_initializer=Constant(1.0), + ) + self._ln2_bias = self.create_parameter( + shape=[d_model], attr=ln2_bias_attr, is_bias=True + ) self.name = name @@ -606,15 +665,23 @@ class FusedFeedForward(Layer): pre_layer_norm=self._normalize_before, training=self.training, ring_id=self._ring_id, - name=self.name) + name=self.name, + ) return out def extra_repr(self): name_str = ', name={}'.format(self.name) if self.name else '' return 'd_model={}, dim_feedforward={}, dropout_rate={}, epsilon={}, activation={}, act_dropout_rate={}, normalize_before={}, dtype={}{}'.format( - self._d_model, self._dim_feedforward, self._dropout_rate, - self._epsilon, self._act_method, self._act_dropout_rate, - self._normalize_before, self._dtype, name_str) + self._d_model, + self._dim_feedforward, + self._dropout_rate, + self._epsilon, + self._act_method, + self._act_dropout_rate, + self._normalize_before, + self._dtype, + name_str, + ) def _amp_decorate(self, dtype): # tmp fix for amp.decorator(O2) @@ -694,31 +761,44 @@ class FusedTransformerEncoderLayer(Layer): enc_output = encoder_layer(enc_input, attn_mask) # [2, 4, 128] """ - def __init__(self, - d_model, - nhead, - dim_feedforward, - dropout_rate=0.1, - activation="relu", - attn_dropout_rate=None, - act_dropout_rate=None, - normalize_before=False, - weight_attr=None, - bias_attr=None): + def __init__( + self, + d_model, + nhead, + dim_feedforward, + dropout_rate=0.1, + activation="relu", + attn_dropout_rate=None, + act_dropout_rate=None, + normalize_before=False, + weight_attr=None, + bias_attr=None, + ): self._config = locals() self._config.pop("self") self._config.pop("__class__", None) # py3 super(FusedTransformerEncoderLayer, self).__init__() - assert d_model > 0, ("Expected d_model to be greater than 0, " - "but received {}".format(d_model)) - assert nhead > 0, ("Expected nhead to be greater than 0, " - "but received {}".format(nhead)) + assert ( + d_model > 0 + ), "Expected d_model to be greater than 0, " "but received {}".format( + d_model + ) + assert ( + nhead > 0 + ), "Expected nhead to be greater than 0, " "but received {}".format( + nhead + ) assert dim_feedforward > 0, ( "Expected dim_feedforward to be greater than 0, " - "but received {}".format(dim_feedforward)) - attn_dropout_rate = dropout_rate if attn_dropout_rate is None else attn_dropout_rate - act_dropout_rate = dropout_rate if act_dropout_rate is None else act_dropout_rate + "but received {}".format(dim_feedforward) + ) + attn_dropout_rate = ( + dropout_rate if attn_dropout_rate is None else attn_dropout_rate + ) + act_dropout_rate = ( + dropout_rate if act_dropout_rate is None else act_dropout_rate + ) self.normalize_before = normalize_before weight_attrs = _convert_param_attr_to_list(weight_attr, 2) @@ -737,18 +817,21 @@ class FusedTransformerEncoderLayer(Layer): pre_ln_scale_attr=weight_attrs[0], pre_ln_bias_attr=bias_attrs[0], ln_scale_attr=weight_attrs[0], - ln_bias_attr=bias_attrs[0]) - - self.ffn = FusedFeedForward(d_model, - dim_feedforward, - dropout_rate=dropout_rate, - activation=activation, - act_dropout_rate=act_dropout_rate, - normalize_before=self.normalize_before, - linear1_weight_attr=weight_attrs[1], - linear1_bias_attr=bias_attrs[1], - linear2_weight_attr=weight_attrs[1], - linear2_bias_attr=bias_attrs[1]) + ln_bias_attr=bias_attrs[0], + ) + + self.ffn = FusedFeedForward( + d_model, + dim_feedforward, + dropout_rate=dropout_rate, + activation=activation, + act_dropout_rate=act_dropout_rate, + normalize_before=self.normalize_before, + linear1_weight_attr=weight_attrs[1], + linear1_bias_attr=bias_attrs[1], + linear2_weight_attr=weight_attrs[1], + linear2_bias_attr=bias_attrs[1], + ) def forward(self, src, src_mask=None, cache=None): """ @@ -784,9 +867,9 @@ class FusedTransformerEncoderLayer(Layer): if cache is None: attn_out = self.fused_attn(src, attn_mask=src_mask) else: - attn_out, incremental_cache = self.fused_attn(src, - attn_mask=src_mask, - cache=cache) + attn_out, incremental_cache = self.fused_attn( + src, attn_mask=src_mask, cache=cache + ) ffn_out = self.ffn(attn_out) @@ -887,21 +970,23 @@ class FusedTransformer(Layer): cross_attn_mask) # [2, 6, 128] """ - def __init__(self, - d_model=512, - nhead=8, - num_encoder_layers=6, - num_decoder_layers=6, - dim_feedforward=2048, - dropout=0.1, - activation="relu", - attn_dropout=None, - act_dropout=None, - normalize_before=False, - weight_attr=None, - bias_attr=None, - custom_encoder=None, - custom_decoder=None): + def __init__( + self, + d_model=512, + nhead=8, + num_encoder_layers=6, + num_decoder_layers=6, + dim_feedforward=2048, + dropout=0.1, + activation="relu", + attn_dropout=None, + act_dropout=None, + normalize_before=False, + weight_attr=None, + bias_attr=None, + custom_encoder=None, + custom_decoder=None, + ): super(fusedTransformer, self).__init__() raise NotImplementedError() @@ -1069,40 +1154,49 @@ class FusedMultiTransformer(Layer): enc_output = encoder_layers(enc_input, attn_mask) # [2, 4, 128] """ - def __init__(self, - embed_dim, - num_heads, - dim_feedforward, - dropout_rate=0.0, - activation="gelu", - normalize_before=True, - ln_scale_attrs=None, - ln_bias_attrs=None, - qkv_weight_attrs=None, - qkv_bias_attrs=None, - linear_weight_attrs=None, - linear_bias_attrs=None, - ffn_ln_scale_attrs=None, - ffn_ln_bias_attrs=None, - ffn1_weight_attrs=None, - ffn1_bias_attrs=None, - ffn2_weight_attrs=None, - ffn2_bias_attrs=None, - epsilon=1e-5, - num_layers=-1, - nranks=1, - trans_qkvw=True, - ring_id=-1, - name=None): + def __init__( + self, + embed_dim, + num_heads, + dim_feedforward, + dropout_rate=0.0, + activation="gelu", + normalize_before=True, + ln_scale_attrs=None, + ln_bias_attrs=None, + qkv_weight_attrs=None, + qkv_bias_attrs=None, + linear_weight_attrs=None, + linear_bias_attrs=None, + ffn_ln_scale_attrs=None, + ffn_ln_bias_attrs=None, + ffn1_weight_attrs=None, + ffn1_bias_attrs=None, + ffn2_weight_attrs=None, + ffn2_bias_attrs=None, + epsilon=1e-5, + num_layers=-1, + nranks=1, + trans_qkvw=True, + ring_id=-1, + name=None, + ): super(FusedMultiTransformer, self).__init__() - assert embed_dim > 0, ("Expected embed_dim to be greater than 0, " - "but received {}".format(embed_dim)) - assert num_heads > 0, ("Expected nhead to be greater than 0, " - "but received {}".format(num_heads)) - assert dim_feedforward > 0, ( - "Expected dim_feedforward to be greater than 0, but received {}". - format(dim_feedforward)) + assert embed_dim > 0, ( + "Expected embed_dim to be greater than 0, " + "but received {}".format(embed_dim) + ) + assert ( + num_heads > 0 + ), "Expected nhead to be greater than 0, " "but received {}".format( + num_heads + ) + assert ( + dim_feedforward > 0 + ), "Expected dim_feedforward to be greater than 0, but received {}".format( + dim_feedforward + ) self.normalize_before = normalize_before self._dtype = self._helper.get_default_dtype() @@ -1113,7 +1207,9 @@ class FusedMultiTransformer(Layer): self.embed_dim = embed_dim self.num_heads = num_heads self.head_dim = embed_dim // num_heads - assert self.head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads" + assert ( + self.head_dim * num_heads == embed_dim + ), "embed_dim must be divisible by num_heads" # tensor model parallel if nranks > 1: @@ -1159,57 +1255,71 @@ class FusedMultiTransformer(Layer): ln_scale = self.create_parameter( attr=ln_scale_attr, shape=[embed_dim], - default_initializer=Constant(value=1.0)) - ln_bias = self.create_parameter(attr=ln_bias_attr, - shape=[embed_dim], - is_bias=True) + default_initializer=Constant(value=1.0), + ) + ln_bias = self.create_parameter( + attr=ln_bias_attr, shape=[embed_dim], is_bias=True + ) qkv_weight = self.create_parameter( shape=[3, num_heads, self.head_dim, embed_dim] - if trans_qkvw else [embed_dim, 3, num_heads, self.head_dim], + if trans_qkvw + else [embed_dim, 3, num_heads, self.head_dim], attr=qkv_weight_attr, dtype=self._dtype, - is_bias=False) + is_bias=False, + ) qkv_bias = self.create_parameter( shape=[3, num_heads, self.head_dim], attr=qkv_bias_attr, dtype=self._dtype, - is_bias=True) + is_bias=True, + ) linear_weight = self.create_parameter( shape=[num_heads * self.head_dim, embed_dim], attr=linear_weight_attr, dtype=self._dtype, - is_bias=False) - linear_bias = self.create_parameter(shape=[embed_dim], - attr=linear_bias_attr, - dtype=self._dtype, - is_bias=True) + is_bias=False, + ) + linear_bias = self.create_parameter( + shape=[embed_dim], + attr=linear_bias_attr, + dtype=self._dtype, + is_bias=True, + ) ffn_ln_scale = self.create_parameter( shape=[embed_dim], attr=ffn_ln_scale_attr, is_bias=False, - default_initializer=Constant(1.0)) - ffn_ln_bias = self.create_parameter(shape=[embed_dim], - attr=ffn_ln_bias_attr, - is_bias=True) + default_initializer=Constant(1.0), + ) + ffn_ln_bias = self.create_parameter( + shape=[embed_dim], attr=ffn_ln_bias_attr, is_bias=True + ) ffn1_weight = self.create_parameter( shape=[embed_dim, dim_feedforward], attr=ffn1_weight_attr, dtype=self._dtype, - is_bias=False) - ffn1_bias = self.create_parameter(shape=[dim_feedforward], - attr=ffn1_bias_attr, - dtype=self._dtype, - is_bias=True) + is_bias=False, + ) + ffn1_bias = self.create_parameter( + shape=[dim_feedforward], + attr=ffn1_bias_attr, + dtype=self._dtype, + is_bias=True, + ) ffn2_weight = self.create_parameter( shape=[dim_feedforward, embed_dim], attr=ffn2_weight_attr, dtype=self._dtype, - is_bias=False) - ffn2_bias = self.create_parameter(shape=[embed_dim], - attr=ffn2_bias_attr, - dtype=self._dtype, - is_bias=True) + is_bias=False, + ) + ffn2_bias = self.create_parameter( + shape=[embed_dim], + attr=ffn2_bias_attr, + dtype=self._dtype, + is_bias=True, + ) # tensor model parallel if nranks > 1: @@ -1240,12 +1350,9 @@ class FusedMultiTransformer(Layer): self.activation = activation self.name = name - def forward(self, - src, - attn_mask=None, - caches=None, - pre_caches=None, - time_step=None): + def forward( + self, src, attn_mask=None, caches=None, pre_caches=None, time_step=None + ): r""" Applies multi transformer layers on the input. @@ -1306,5 +1413,6 @@ class FusedMultiTransformer(Layer): mode='upscale_in_train', trans_qkvw=self._trans_qkvw, ring_id=self._ring_id, - name=self.name) + name=self.name, + ) return out diff --git a/python/paddle/incubate/operators/__init__.py b/python/paddle/incubate/operators/__init__.py index bc4ba8c3890fda80cc8001a925cb24cdff890ca0..eb105a12e1ab6429a46596387ee80c28e99166ad 100644 --- a/python/paddle/incubate/operators/__init__.py +++ b/python/paddle/incubate/operators/__init__.py @@ -12,10 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .softmax_mask_fuse_upper_triangle import softmax_mask_fuse_upper_triangle # noqa: F401 +from .softmax_mask_fuse_upper_triangle import ( + softmax_mask_fuse_upper_triangle, +) # noqa: F401 from .softmax_mask_fuse import softmax_mask_fuse # noqa: F401 -from .resnet_unit import ResNetUnit #noqa: F401 -from .graph_send_recv import graph_send_recv #noqa: F401 -from .graph_khop_sampler import graph_khop_sampler #noqa: F401 -from .graph_sample_neighbors import graph_sample_neighbors #noqa: F401 -from .graph_reindex import graph_reindex #noqa: F401 +from .resnet_unit import ResNetUnit # noqa: F401 +from .graph_send_recv import graph_send_recv # noqa: F401 +from .graph_khop_sampler import graph_khop_sampler # noqa: F401 +from .graph_sample_neighbors import graph_sample_neighbors # noqa: F401 +from .graph_reindex import graph_reindex # noqa: F401 diff --git a/python/paddle/incubate/operators/graph_khop_sampler.py b/python/paddle/incubate/operators/graph_khop_sampler.py index 2122383381358e7f8fed0fa354a136a10da208e0..821c4b418ed7ea3d55cd024b062392f7a53106f7 100644 --- a/python/paddle/incubate/operators/graph_khop_sampler.py +++ b/python/paddle/incubate/operators/graph_khop_sampler.py @@ -18,13 +18,15 @@ from paddle.fluid.data_feeder import check_variable_and_dtype from paddle import _legacy_C_ops -def graph_khop_sampler(row, - colptr, - input_nodes, - sample_sizes, - sorted_eids=None, - return_eids=False, - name=None): +def graph_khop_sampler( + row, + colptr, + input_nodes, + sample_sizes, + sorted_eids=None, + return_eids=False, + name=None, +): """ Graph Khop Sampler API. @@ -86,36 +88,64 @@ def graph_khop_sampler(row, if _non_static_mode(): if return_eids: if sorted_eids is None: - raise ValueError("`sorted_eid` should not be None " - "if return_eids is True.") - edge_src, edge_dst, sample_index, reindex_nodes, edge_eids = \ - _legacy_C_ops.graph_khop_sampler(row, sorted_eids, - colptr, input_nodes, - "sample_sizes", sample_sizes, - "return_eids", True) + raise ValueError( + "`sorted_eid` should not be None " "if return_eids is True." + ) + ( + edge_src, + edge_dst, + sample_index, + reindex_nodes, + edge_eids, + ) = _legacy_C_ops.graph_khop_sampler( + row, + sorted_eids, + colptr, + input_nodes, + "sample_sizes", + sample_sizes, + "return_eids", + True, + ) return edge_src, edge_dst, sample_index, reindex_nodes, edge_eids else: - edge_src, edge_dst, sample_index, reindex_nodes, _ = \ - _legacy_C_ops.graph_khop_sampler(row, None, - colptr, input_nodes, - "sample_sizes", sample_sizes, - "return_eids", False) + ( + edge_src, + edge_dst, + sample_index, + reindex_nodes, + _, + ) = _legacy_C_ops.graph_khop_sampler( + row, + None, + colptr, + input_nodes, + "sample_sizes", + sample_sizes, + "return_eids", + False, + ) return edge_src, edge_dst, sample_index, reindex_nodes - check_variable_and_dtype(row, "Row", ("int32", "int64"), - "graph_khop_sampler") + check_variable_and_dtype( + row, "Row", ("int32", "int64"), "graph_khop_sampler" + ) if return_eids: if sorted_eids is None: - raise ValueError("`sorted_eid` should not be None " - "if return_eids is True.") - check_variable_and_dtype(sorted_eids, "Eids", ("int32", "int64"), - "graph_khop_sampler") - - check_variable_and_dtype(colptr, "Col_Ptr", ("int32", "int64"), - "graph_khop_sampler") - check_variable_and_dtype(input_nodes, "X", ("int32", "int64"), - "graph_khop_sampler") + raise ValueError( + "`sorted_eid` should not be None " "if return_eids is True." + ) + check_variable_and_dtype( + sorted_eids, "Eids", ("int32", "int64"), "graph_khop_sampler" + ) + + check_variable_and_dtype( + colptr, "Col_Ptr", ("int32", "int64"), "graph_khop_sampler" + ) + check_variable_and_dtype( + input_nodes, "X", ("int32", "int64"), "graph_khop_sampler" + ) helper = LayerHelper("graph_khop_sampler", **locals()) edge_src = helper.create_variable_for_type_inference(dtype=row.dtype) @@ -123,24 +153,23 @@ def graph_khop_sampler(row, sample_index = helper.create_variable_for_type_inference(dtype=row.dtype) reindex_nodes = helper.create_variable_for_type_inference(dtype=row.dtype) edge_eids = helper.create_variable_for_type_inference(dtype=row.dtype) - helper.append_op(type="graph_khop_sampler", - inputs={ - "Row": row, - "Eids": sorted_eids, - "Col_Ptr": colptr, - "X": input_nodes - }, - outputs={ - "Out_Src": edge_src, - "Out_Dst": edge_dst, - "Sample_Index": sample_index, - "Reindex_X": reindex_nodes, - "Out_Eids": edge_eids - }, - attrs={ - "sample_sizes": sample_sizes, - "return_eids": return_eids - }) + helper.append_op( + type="graph_khop_sampler", + inputs={ + "Row": row, + "Eids": sorted_eids, + "Col_Ptr": colptr, + "X": input_nodes, + }, + outputs={ + "Out_Src": edge_src, + "Out_Dst": edge_dst, + "Sample_Index": sample_index, + "Reindex_X": reindex_nodes, + "Out_Eids": edge_eids, + }, + attrs={"sample_sizes": sample_sizes, "return_eids": return_eids}, + ) if return_eids: return edge_src, edge_dst, sample_index, reindex_nodes, edge_eids else: diff --git a/python/paddle/incubate/operators/graph_reindex.py b/python/paddle/incubate/operators/graph_reindex.py index d8f67741908019e38326cc1f82ea29b7244ff0f0..d721c9a002e1895b7650b2e7bc343dab2a6c2a69 100644 --- a/python/paddle/incubate/operators/graph_reindex.py +++ b/python/paddle/incubate/operators/graph_reindex.py @@ -19,17 +19,21 @@ from paddle import _legacy_C_ops import paddle.utils.deprecated as deprecated -@deprecated(since="2.4.0", - update_to="paddle.geometric.reindex_graph", - level=1, - reason="paddle.incubate.graph_reindex will be removed in future") -def graph_reindex(x, - neighbors, - count, - value_buffer=None, - index_buffer=None, - flag_buffer_hashtable=False, - name=None): +@deprecated( + since="2.4.0", + update_to="paddle.geometric.reindex_graph", + level=1, + reason="paddle.incubate.graph_reindex will be removed in future", +) +def graph_reindex( + x, + neighbors, + count, + value_buffer=None, + index_buffer=None, + flag_buffer_hashtable=False, + name=None, +): """ Graph Reindex API. @@ -107,47 +111,55 @@ def graph_reindex(x, """ if flag_buffer_hashtable: if value_buffer is None or index_buffer is None: - raise ValueError("`value_buffer` and `index_buffer` should not" - "be None if `flag_buffer_hashtable` is True.") + raise ValueError( + "`value_buffer` and `index_buffer` should not" + "be None if `flag_buffer_hashtable` is True." + ) if _non_static_mode(): - reindex_src, reindex_dst, out_nodes = \ - _legacy_C_ops.graph_reindex(x, neighbors, count, value_buffer, index_buffer, - "flag_buffer_hashtable", flag_buffer_hashtable) + reindex_src, reindex_dst, out_nodes = _legacy_C_ops.graph_reindex( + x, + neighbors, + count, + value_buffer, + index_buffer, + "flag_buffer_hashtable", + flag_buffer_hashtable, + ) return reindex_src, reindex_dst, out_nodes check_variable_and_dtype(x, "X", ("int32", "int64"), "graph_reindex") - check_variable_and_dtype(neighbors, "Neighbors", ("int32", "int64"), - "graph_reindex") + check_variable_and_dtype( + neighbors, "Neighbors", ("int32", "int64"), "graph_reindex" + ) check_variable_and_dtype(count, "Count", ("int32"), "graph_reindex") if flag_buffer_hashtable: - check_variable_and_dtype(value_buffer, "HashTable_Value", ("int32"), - "graph_reindex") - check_variable_and_dtype(index_buffer, "HashTable_Index", ("int32"), - "graph_reindex") + check_variable_and_dtype( + value_buffer, "HashTable_Value", ("int32"), "graph_reindex" + ) + check_variable_and_dtype( + index_buffer, "HashTable_Index", ("int32"), "graph_reindex" + ) helper = LayerHelper("graph_reindex", **locals()) reindex_src = helper.create_variable_for_type_inference(dtype=x.dtype) reindex_dst = helper.create_variable_for_type_inference(dtype=x.dtype) out_nodes = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type="graph_reindex", - inputs={ - "X": - x, - "Neighbors": - neighbors, - "Count": - count, - "HashTable_Value": - value_buffer if flag_buffer_hashtable else None, - "HashTable_Index": - index_buffer if flag_buffer_hashtable else None, - }, - outputs={ - "Reindex_Src": reindex_src, - "Reindex_Dst": reindex_dst, - "Out_Nodes": out_nodes - }, - attrs={"flag_buffer_hashtable": flag_buffer_hashtable}) + helper.append_op( + type="graph_reindex", + inputs={ + "X": x, + "Neighbors": neighbors, + "Count": count, + "HashTable_Value": value_buffer if flag_buffer_hashtable else None, + "HashTable_Index": index_buffer if flag_buffer_hashtable else None, + }, + outputs={ + "Reindex_Src": reindex_src, + "Reindex_Dst": reindex_dst, + "Out_Nodes": out_nodes, + }, + attrs={"flag_buffer_hashtable": flag_buffer_hashtable}, + ) return reindex_src, reindex_dst, out_nodes diff --git a/python/paddle/incubate/operators/graph_sample_neighbors.py b/python/paddle/incubate/operators/graph_sample_neighbors.py index b39910f9e1126b5035c90123f73ce4ef1331d82e..48e52bc691408387aa6e7ebd3c5eadd0f7b4eb9a 100644 --- a/python/paddle/incubate/operators/graph_sample_neighbors.py +++ b/python/paddle/incubate/operators/graph_sample_neighbors.py @@ -23,16 +23,19 @@ import paddle.utils.deprecated as deprecated since="2.4.0", update_to="paddle.geometric.sample_neighbors", level=1, - reason="paddle.incubate.graph_sample_neighbors will be removed in future") -def graph_sample_neighbors(row, - colptr, - input_nodes, - eids=None, - perm_buffer=None, - sample_size=-1, - return_eids=False, - flag_perm_buffer=False, - name=None): + reason="paddle.incubate.graph_sample_neighbors will be removed in future", +) +def graph_sample_neighbors( + row, + colptr, + input_nodes, + eids=None, + perm_buffer=None, + sample_size=-1, + return_eids=False, + flag_perm_buffer=False, + name=None, +): """ Graph Sample Neighbors API. @@ -96,59 +99,83 @@ def graph_sample_neighbors(row, if return_eids: if eids is None: raise ValueError( - "`eids` should not be None if `return_eids` is True.") + "`eids` should not be None if `return_eids` is True." + ) if flag_perm_buffer: if perm_buffer is None: raise ValueError( "`perm_buffer` should not be None if `flag_perm_buffer`" - "is True.") + "is True." + ) if _non_static_mode(): - out_neighbors, out_count, out_eids = _legacy_C_ops.graph_sample_neighbors( - row, colptr, input_nodes, eids, perm_buffer, "sample_size", - sample_size, "return_eids", return_eids, "flag_perm_buffer", - flag_perm_buffer) + ( + out_neighbors, + out_count, + out_eids, + ) = _legacy_C_ops.graph_sample_neighbors( + row, + colptr, + input_nodes, + eids, + perm_buffer, + "sample_size", + sample_size, + "return_eids", + return_eids, + "flag_perm_buffer", + flag_perm_buffer, + ) if return_eids: return out_neighbors, out_count, out_eids return out_neighbors, out_count - check_variable_and_dtype(row, "Row", ("int32", "int64"), - "graph_sample_neighbors") - check_variable_and_dtype(colptr, "Col_Ptr", ("int32", "int64"), - "graph_sample_neighbors") - check_variable_and_dtype(input_nodes, "X", ("int32", "int64"), - "graph_sample_neighbors") + check_variable_and_dtype( + row, "Row", ("int32", "int64"), "graph_sample_neighbors" + ) + check_variable_and_dtype( + colptr, "Col_Ptr", ("int32", "int64"), "graph_sample_neighbors" + ) + check_variable_and_dtype( + input_nodes, "X", ("int32", "int64"), "graph_sample_neighbors" + ) if return_eids: - check_variable_and_dtype(eids, "Eids", ("int32", "int64"), - "graph_sample_neighbors") + check_variable_and_dtype( + eids, "Eids", ("int32", "int64"), "graph_sample_neighbors" + ) if flag_perm_buffer: - check_variable_and_dtype(perm_buffer, "Perm_Buffer", ("int32", "int64"), - "graph_sample_neighbors") + check_variable_and_dtype( + perm_buffer, + "Perm_Buffer", + ("int32", "int64"), + "graph_sample_neighbors", + ) helper = LayerHelper("graph_sample_neighbors", **locals()) out_neighbors = helper.create_variable_for_type_inference(dtype=row.dtype) out_count = helper.create_variable_for_type_inference(dtype=row.dtype) out_eids = helper.create_variable_for_type_inference(dtype=row.dtype) - helper.append_op(type="graph_sample_neighbors", - inputs={ - "Row": row, - "Col_Ptr": colptr, - "X": input_nodes, - "Eids": eids if return_eids else None, - "Perm_Buffer": - perm_buffer if flag_perm_buffer else None - }, - outputs={ - "Out": out_neighbors, - "Out_Count": out_count, - "Out_Eids": out_eids - }, - attrs={ - "sample_size": sample_size, - "return_eids": return_eids, - "flag_perm_buffer": flag_perm_buffer - }) + helper.append_op( + type="graph_sample_neighbors", + inputs={ + "Row": row, + "Col_Ptr": colptr, + "X": input_nodes, + "Eids": eids if return_eids else None, + "Perm_Buffer": perm_buffer if flag_perm_buffer else None, + }, + outputs={ + "Out": out_neighbors, + "Out_Count": out_count, + "Out_Eids": out_eids, + }, + attrs={ + "sample_size": sample_size, + "return_eids": return_eids, + "flag_perm_buffer": flag_perm_buffer, + }, + ) if return_eids: return out_neighbors, out_count, out_eids return out_neighbors, out_count diff --git a/python/paddle/incubate/operators/graph_send_recv.py b/python/paddle/incubate/operators/graph_send_recv.py index 939d8c343fd5c25a61727e59a8b91eb31dc97176..88fc421f4a09a94c1dc5a61627424c497750f6cb 100644 --- a/python/paddle/incubate/operators/graph_send_recv.py +++ b/python/paddle/incubate/operators/graph_send_recv.py @@ -16,7 +16,12 @@ import numpy as np from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode from paddle.fluid.framework import Variable -from paddle.fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype +from paddle.fluid.data_feeder import ( + check_variable_and_dtype, + check_type, + check_dtype, + convert_dtype, +) from paddle.fluid.layers.tensor import cast from paddle import _C_ops, _legacy_C_ops import paddle.utils.deprecated as deprecated @@ -26,13 +31,11 @@ import paddle.utils.deprecated as deprecated since="2.4.0", update_to="paddle.geometric.send_u_recv", level=1, - reason="graph_send_recv in paddle.incubate will be removed in future") -def graph_send_recv(x, - src_index, - dst_index, - pool_type="sum", - out_size=None, - name=None): + reason="graph_send_recv in paddle.incubate will be removed in future", +) +def graph_send_recv( + x, src_index, dst_index, pool_type="sum", out_size=None, name=None +): r""" Graph Learning Send_Recv combine operator. @@ -116,54 +119,69 @@ def graph_send_recv(x, if pool_type not in ["sum", "mean", "max", "min"]: raise ValueError( "pool_type should be `sum`, `mean`, `max` or `min`, but received %s" - % pool_type) + % pool_type + ) # TODO(daisiming): Should we add judgement for out_size: max(dst_index) + 1. if _in_legacy_dygraph(): out_size = convert_out_size_to_list(out_size) - out, tmp = _legacy_C_ops.graph_send_recv(x, src_index, dst_index, - None, 'reduce_op', - pool_type.upper(), 'out_size', - out_size) + out, tmp = _legacy_C_ops.graph_send_recv( + x, + src_index, + dst_index, + None, + 'reduce_op', + pool_type.upper(), + 'out_size', + out_size, + ) return out if in_dygraph_mode(): out_size = convert_out_size_to_list(out_size) - return _C_ops.graph_send_recv(x, src_index, dst_index, - pool_type.upper(), out_size) - - check_variable_and_dtype(x, "X", ("float32", "float64", "int32", "int64"), - "graph_send_recv") - check_variable_and_dtype(src_index, "Src_index", ("int32", "int64"), - "graph_send_recv") - check_variable_and_dtype(dst_index, "Dst_index", ("int32", "int64"), - "graph_send_recv") + return _C_ops.graph_send_recv( + x, src_index, dst_index, pool_type.upper(), out_size + ) + + check_variable_and_dtype( + x, "X", ("float32", "float64", "int32", "int64"), "graph_send_recv" + ) + check_variable_and_dtype( + src_index, "Src_index", ("int32", "int64"), "graph_send_recv" + ) + check_variable_and_dtype( + dst_index, "Dst_index", ("int32", "int64"), "graph_send_recv" + ) if out_size: - check_type(out_size, 'out_size', (int, np.int32, np.int64, Variable), - 'graph_send_recv') + check_type( + out_size, + 'out_size', + (int, np.int32, np.int64, Variable), + 'graph_send_recv', + ) if isinstance(out_size, Variable): - check_dtype(out_size.dtype, 'out_size', ['int32', 'int64'], - 'graph_send_recv') + check_dtype( + out_size.dtype, 'out_size', ['int32', 'int64'], 'graph_send_recv' + ) helper = LayerHelper("graph_send_recv", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - dst_count = helper.create_variable_for_type_inference(dtype="int32", - stop_gradient=True) + dst_count = helper.create_variable_for_type_inference( + dtype="int32", stop_gradient=True + ) inputs = {"X": x, "Src_index": src_index, "Dst_index": dst_index} attrs = {"reduce_op": pool_type.upper()} - get_out_size_tensor_inputs(inputs=inputs, - attrs=attrs, - out_size=out_size, - op_type='graph_send_recv') - - helper.append_op(type="graph_send_recv", - inputs=inputs, - outputs={ - "Out": out, - "Dst_count": dst_count - }, - attrs=attrs) + get_out_size_tensor_inputs( + inputs=inputs, attrs=attrs, out_size=out_size, op_type='graph_send_recv' + ) + + helper.append_op( + type="graph_send_recv", + inputs=inputs, + outputs={"Out": out, "Dst_count": dst_count}, + attrs=attrs, + ) return out @@ -192,9 +210,14 @@ def get_out_size_tensor_inputs(inputs, attrs, out_size, op_type): attrs['out_size'] = [out_size] elif isinstance(out_size, Variable): out_size.stop_gradient = True - check_dtype(out_size.dtype, 'out_size', ['int32', 'int64'], op_type, - '(When type of out_size in' + op_type + ' is Variable.)') - if (convert_dtype(out_size.dtype) == 'int64'): + check_dtype( + out_size.dtype, + 'out_size', + ['int32', 'int64'], + op_type, + '(When type of out_size in' + op_type + ' is Variable.)', + ) + if convert_dtype(out_size.dtype) == 'int64': out_size = cast(out_size, 'int32') inputs["Out_size"] = out_size else: diff --git a/python/paddle/incubate/operators/resnet_unit.py b/python/paddle/incubate/operators/resnet_unit.py index c8fb583bff4b6dfb91738b5d7aa41e791fb67dbe..601bbfd11de475a0b73a0eb9b01cbfb71142de6e 100644 --- a/python/paddle/incubate/operators/resnet_unit.py +++ b/python/paddle/incubate/operators/resnet_unit.py @@ -21,37 +21,77 @@ from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.param_attr import ParamAttr -def resnet_unit(x, filter_x, scale_x, bias_x, mean_x, var_x, z, filter_z, - scale_z, bias_z, mean_z, var_z, stride, stride_z, padding, - dilation, groups, momentum, eps, data_format, fuse_add, - has_shortcut, use_global_stats, is_test, act): +def resnet_unit( + x, + filter_x, + scale_x, + bias_x, + mean_x, + var_x, + z, + filter_z, + scale_z, + bias_z, + mean_z, + var_z, + stride, + stride_z, + padding, + dilation, + groups, + momentum, + eps, + data_format, + fuse_add, + has_shortcut, + use_global_stats, + is_test, + act, +): helper = LayerHelper('resnet_unit', **locals()) bn_param_dtype = fluid.core.VarDesc.VarType.FP32 bit_mask_dtype = fluid.core.VarDesc.VarType.INT32 out = helper.create_variable_for_type_inference(x.dtype) - bit_mask = helper.create_variable_for_type_inference(dtype=bit_mask_dtype, - stop_gradient=True) + bit_mask = helper.create_variable_for_type_inference( + dtype=bit_mask_dtype, stop_gradient=True + ) # intermediate_out for x - conv_x = helper.create_variable_for_type_inference(dtype=x.dtype, - stop_gradient=True) + conv_x = helper.create_variable_for_type_inference( + dtype=x.dtype, stop_gradient=True + ) saved_mean_x = helper.create_variable_for_type_inference( - dtype=bn_param_dtype, stop_gradient=True) + dtype=bn_param_dtype, stop_gradient=True + ) saved_invstd_x = helper.create_variable_for_type_inference( - dtype=bn_param_dtype, stop_gradient=True) + dtype=bn_param_dtype, stop_gradient=True + ) running_mean_x = mean_x running_var_x = var_x # intermediate_out for z - conv_z = helper.create_variable_for_type_inference(dtype=x.dtype, - stop_gradient=True) + conv_z = helper.create_variable_for_type_inference( + dtype=x.dtype, stop_gradient=True + ) saved_mean_z = helper.create_variable_for_type_inference( - dtype=bn_param_dtype, stop_gradient=True) + dtype=bn_param_dtype, stop_gradient=True + ) saved_invstd_z = helper.create_variable_for_type_inference( - dtype=bn_param_dtype, stop_gradient=True) - running_mean_z = helper.create_variable_for_type_inference( - dtype=bn_param_dtype, stop_gradient=True) if mean_z is None else mean_z - running_var_z = helper.create_variable_for_type_inference( - dtype=bn_param_dtype, stop_gradient=True) if var_z is None else var_z + dtype=bn_param_dtype, stop_gradient=True + ) + running_mean_z = ( + helper.create_variable_for_type_inference( + dtype=bn_param_dtype, stop_gradient=True + ) + if mean_z is None + else mean_z + ) + running_var_z = ( + helper.create_variable_for_type_inference( + dtype=bn_param_dtype, stop_gradient=True + ) + if var_z is None + else var_z + ) inputs = { 'X': x, @@ -65,7 +105,7 @@ def resnet_unit(x, filter_x, scale_x, bias_x, mean_x, var_x, z, filter_z, 'ScaleZ': scale_z, 'BiasZ': bias_z, 'MeanZ': mean_z, - 'VarZ': var_z + 'VarZ': var_z, } attrs = { @@ -81,7 +121,7 @@ def resnet_unit(x, filter_x, scale_x, bias_x, mean_x, var_x, z, filter_z, 'has_shortcut': has_shortcut, 'use_global_stats': use_global_stats, 'is_test': is_test, - 'act_type': act + 'act_type': act, } outputs = { @@ -99,10 +139,9 @@ def resnet_unit(x, filter_x, scale_x, bias_x, mean_x, var_x, z, filter_z, 'RunningVarZ': running_var_z, } - helper.append_op(type='resnet_unit', - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type='resnet_unit', inputs=inputs, outputs=outputs, attrs=attrs + ) return out @@ -113,31 +152,33 @@ class ResNetUnit(Layer): ResNetUnit is designed for optimize the performence by using cudnnv8 API. """ - def __init__(self, - num_channels_x, - num_filters, - filter_size, - stride=1, - momentum=0.9, - eps=1e-5, - data_format='NHWC', - act='relu', - fuse_add=False, - has_shortcut=False, - use_global_stats=False, - is_test=False, - filter_x_attr=None, - scale_x_attr=None, - bias_x_attr=None, - moving_mean_x_name=None, - moving_var_x_name=None, - num_channels_z=1, - stride_z=1, - filter_z_attr=None, - scale_z_attr=None, - bias_z_attr=None, - moving_mean_z_name=None, - moving_var_z_name=None): + def __init__( + self, + num_channels_x, + num_filters, + filter_size, + stride=1, + momentum=0.9, + eps=1e-5, + data_format='NHWC', + act='relu', + fuse_add=False, + has_shortcut=False, + use_global_stats=False, + is_test=False, + filter_x_attr=None, + scale_x_attr=None, + bias_x_attr=None, + moving_mean_x_name=None, + moving_var_x_name=None, + num_channels_z=1, + stride_z=1, + filter_z_attr=None, + scale_z_attr=None, + bias_z_attr=None, + moving_mean_z_name=None, + moving_var_z_name=None, + ): super(ResNetUnit, self).__init__() self._stride = stride self._stride_z = stride_z @@ -158,89 +199,124 @@ class ResNetUnit(Layer): valid_format = {'NHWC', 'NCHW'} if data_format not in valid_format: raise ValueError( - "conv_format must be one of {}, but got conv_format='{}'". - format(valid_format, data_format)) + "conv_format must be one of {}, but got conv_format='{}'".format( + valid_format, data_format + ) + ) def _get_default_param_initializer(channels): filter_elem_num = np.prod(self._kernel_size) * channels - std = (2.0 / filter_elem_num)**0.5 + std = (2.0 / filter_elem_num) ** 0.5 return I.Normal(0.0, std) - is_nchw = (data_format == 'NCHW') + is_nchw = data_format == 'NCHW' # initial filter bn_param_dtype = fluid.core.VarDesc.VarType.FP32 if not is_nchw: bn_param_shape = [1, 1, 1, num_filters] filter_x_shape = [ - num_filters, filter_size, filter_size, num_channels_x + num_filters, + filter_size, + filter_size, + num_channels_x, ] filter_z_shape = [ - num_filters, filter_size, filter_size, num_channels_z + num_filters, + filter_size, + filter_size, + num_channels_z, ] else: bn_param_shape = [1, num_filters, 1, 1] filter_x_shape = [ - num_filters, num_channels_x, filter_size, filter_size + num_filters, + num_channels_x, + filter_size, + filter_size, ] filter_z_shape = [ - num_filters, num_channels_z, filter_size, filter_size + num_filters, + num_channels_z, + filter_size, + filter_size, ] self.filter_x = self.create_parameter( shape=filter_x_shape, attr=filter_x_attr, - default_initializer=_get_default_param_initializer(num_channels_x)) + default_initializer=_get_default_param_initializer(num_channels_x), + ) self.scale_x = self.create_parameter( shape=bn_param_shape, attr=scale_x_attr, dtype=bn_param_dtype, - default_initializer=I.Constant(1.0)) - self.bias_x = self.create_parameter(shape=bn_param_shape, - attr=bias_x_attr, - dtype=bn_param_dtype, - is_bias=True) - self.mean_x = self.create_parameter(attr=ParamAttr( - name=moving_mean_x_name, - initializer=I.Constant(0.0), - trainable=False), - shape=bn_param_shape, - dtype=bn_param_dtype) + default_initializer=I.Constant(1.0), + ) + self.bias_x = self.create_parameter( + shape=bn_param_shape, + attr=bias_x_attr, + dtype=bn_param_dtype, + is_bias=True, + ) + self.mean_x = self.create_parameter( + attr=ParamAttr( + name=moving_mean_x_name, + initializer=I.Constant(0.0), + trainable=False, + ), + shape=bn_param_shape, + dtype=bn_param_dtype, + ) self.mean_x.stop_gradient = True - self.var_x = self.create_parameter(attr=ParamAttr( - name=moving_var_x_name, - initializer=I.Constant(1.0), - trainable=False), - shape=bn_param_shape, - dtype=bn_param_dtype) + self.var_x = self.create_parameter( + attr=ParamAttr( + name=moving_var_x_name, + initializer=I.Constant(1.0), + trainable=False, + ), + shape=bn_param_shape, + dtype=bn_param_dtype, + ) self.var_x.stop_gradient = True if has_shortcut: self.filter_z = self.create_parameter( shape=filter_z_shape, attr=filter_z_attr, default_initializer=_get_default_param_initializer( - num_channels_z)) + num_channels_z + ), + ) self.scale_z = self.create_parameter( shape=bn_param_shape, attr=scale_z_attr, dtype=bn_param_dtype, - default_initializer=I.Constant(1.0)) - self.bias_z = self.create_parameter(shape=bn_param_shape, - attr=bias_z_attr, - dtype=bn_param_dtype, - is_bias=True) - self.mean_z = self.create_parameter(attr=ParamAttr( - name=moving_mean_z_name, - initializer=I.Constant(0.0), - trainable=False), - shape=bn_param_shape, - dtype=bn_param_dtype) + default_initializer=I.Constant(1.0), + ) + self.bias_z = self.create_parameter( + shape=bn_param_shape, + attr=bias_z_attr, + dtype=bn_param_dtype, + is_bias=True, + ) + self.mean_z = self.create_parameter( + attr=ParamAttr( + name=moving_mean_z_name, + initializer=I.Constant(0.0), + trainable=False, + ), + shape=bn_param_shape, + dtype=bn_param_dtype, + ) self.mean_z.stop_gradient = True - self.var_z = self.create_parameter(attr=ParamAttr( - name=moving_var_z_name, - initializer=I.Constant(1.0), - trainable=False), - shape=bn_param_shape, - dtype=bn_param_dtype) + self.var_z = self.create_parameter( + attr=ParamAttr( + name=moving_var_z_name, + initializer=I.Constant(1.0), + trainable=False, + ), + shape=bn_param_shape, + dtype=bn_param_dtype, + ) self.var_z.stop_gradient = True else: self.filter_z = None @@ -253,12 +329,31 @@ class ResNetUnit(Layer): if self._fuse_add and z is None: raise ValueError("z can not be None") - out = resnet_unit(x, self.filter_x, self.scale_x, self.bias_x, - self.mean_x, self.var_x, z, self.filter_z, - self.scale_z, self.bias_z, self.mean_z, self.var_z, - self._stride, self._stride_z, self._padding, - self._dilation, self._groups, self._momentum, - self._eps, self._data_format, self._fuse_add, - self._has_shortcut, self._use_global_stats, - self._is_test, self._act) + out = resnet_unit( + x, + self.filter_x, + self.scale_x, + self.bias_x, + self.mean_x, + self.var_x, + z, + self.filter_z, + self.scale_z, + self.bias_z, + self.mean_z, + self.var_z, + self._stride, + self._stride_z, + self._padding, + self._dilation, + self._groups, + self._momentum, + self._eps, + self._data_format, + self._fuse_add, + self._has_shortcut, + self._use_global_stats, + self._is_test, + self._act, + ) return out diff --git a/python/paddle/incubate/operators/softmax_mask_fuse.py b/python/paddle/incubate/operators/softmax_mask_fuse.py index 27afabaa0db6bbad578509b0e98dce1da83584bd..b8081af826da3039c6517609dd85567fc7c58fb6 100644 --- a/python/paddle/incubate/operators/softmax_mask_fuse.py +++ b/python/paddle/incubate/operators/softmax_mask_fuse.py @@ -60,10 +60,9 @@ def softmax_mask_fuse(x, mask, name=None): return out helper = LayerHelper('fused_softmax_mask', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='fused_softmax_mask', - inputs={ - 'X': [x], - 'Mask': [mask] - }, - outputs={'Out': [out]}) + helper.append_op( + type='fused_softmax_mask', + inputs={'X': [x], 'Mask': [mask]}, + outputs={'Out': [out]}, + ) return out diff --git a/python/paddle/incubate/operators/softmax_mask_fuse_upper_triangle.py b/python/paddle/incubate/operators/softmax_mask_fuse_upper_triangle.py index d73b8ba6d652e172e988884544a3e50cbaeb294a..aab40986befd0c88d1e1ea59c3ed2c612b961f2b 100644 --- a/python/paddle/incubate/operators/softmax_mask_fuse_upper_triangle.py +++ b/python/paddle/incubate/operators/softmax_mask_fuse_upper_triangle.py @@ -63,7 +63,9 @@ def softmax_mask_fuse_upper_triangle(x): out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='fused_softmax_mask_upper_triangle', - inputs={'X': [x]}, - outputs={'Out': [out]}) + helper.append_op( + type='fused_softmax_mask_upper_triangle', + inputs={'X': [x]}, + outputs={'Out': [out]}, + ) return out diff --git a/python/paddle/incubate/optimizer/distributed_fused_lamb.py b/python/paddle/incubate/optimizer/distributed_fused_lamb.py index 56a9a99134775375439a129d71e046446302a14d..ef1bacd075268ee60aea848ca7c370dd0d483c3f 100644 --- a/python/paddle/incubate/optimizer/distributed_fused_lamb.py +++ b/python/paddle/incubate/optimizer/distributed_fused_lamb.py @@ -32,84 +32,88 @@ def init_communicator(block, rank, ranks, ring_id): local_rank = ranks.index(rank) comm_var_name = unique_name.generate('comm_id') - comm_id_var = block.create_var(name=comm_var_name, - persistable=True, - type=core.VarDesc.VarType.RAW) - block.append_op(type='c_gen_nccl_id', - inputs={}, - outputs={'Out': comm_id_var}, - attrs={ - 'rank': local_rank, - 'endpoint': cur_ep, - 'other_endpoints': other_eps, - 'ring_id': ring_id - }) - block.append_op(type='c_comm_init', - inputs={'X': comm_id_var}, - outputs={}, - attrs={ - 'nranks': len(ranks), - 'rank': local_rank, - 'ring_id': ring_id - }) + comm_id_var = block.create_var( + name=comm_var_name, persistable=True, type=core.VarDesc.VarType.RAW + ) + block.append_op( + type='c_gen_nccl_id', + inputs={}, + outputs={'Out': comm_id_var}, + attrs={ + 'rank': local_rank, + 'endpoint': cur_ep, + 'other_endpoints': other_eps, + 'ring_id': ring_id, + }, + ) + block.append_op( + type='c_comm_init', + inputs={'X': comm_id_var}, + outputs={}, + attrs={'nranks': len(ranks), 'rank': local_rank, 'ring_id': ring_id}, + ) tmp_var = block.create_var(name=unique_name.generate('tmp')) - block.append_op(type='fill_constant', - outputs={'Out': tmp_var}, - attrs={'value': 1}) - block.append_op(type='c_allreduce_sum', - inputs={'X': tmp_var}, - outputs={'Out': tmp_var}, - attrs={ - 'ring_id': ring_id, - 'use_calc_stream': True - }) - block.append_op(type='c_sync_calc_stream', - inputs={'X': tmp_var}, - outputs={'Out': tmp_var}) + block.append_op( + type='fill_constant', outputs={'Out': tmp_var}, attrs={'value': 1} + ) + block.append_op( + type='c_allreduce_sum', + inputs={'X': tmp_var}, + outputs={'Out': tmp_var}, + attrs={'ring_id': ring_id, 'use_calc_stream': True}, + ) + block.append_op( + type='c_sync_calc_stream', + inputs={'X': tmp_var}, + outputs={'Out': tmp_var}, + ) return ring_id def broadcast_parameters(block, parameters, ring_id): for p in parameters: - block.append_op(type='c_broadcast', - inputs={'X': p}, - outputs={'Out': p}, - attrs={ - 'ring_id': ring_id, - 'use_calc_stream': True - }) + block.append_op( + type='c_broadcast', + inputs={'X': p}, + outputs={'Out': p}, + attrs={'ring_id': ring_id, 'use_calc_stream': True}, + ) class DistributedFusedLamb(Optimizer): - - def __init__(self, - learning_rate=0.001, - lamb_weight_decay=0.01, - beta1=0.9, - beta2=0.999, - epsilon=1e-6, - parameters=None, - grad_clip=None, - exclude_from_weight_decay_fn=None, - clip_after_allreduce=True, - is_grad_scaled_by_nranks=True, - alignment=128, - use_master_param_norm=True, - gradient_accumulation_steps=1, - use_master_acc_grad=True, - nproc_per_node=None, - use_hierarchical_allreduce=False, - name=None): - assert not framework._non_static_mode( + def __init__( + self, + learning_rate=0.001, + lamb_weight_decay=0.01, + beta1=0.9, + beta2=0.999, + epsilon=1e-6, + parameters=None, + grad_clip=None, + exclude_from_weight_decay_fn=None, + clip_after_allreduce=True, + is_grad_scaled_by_nranks=True, + alignment=128, + use_master_param_norm=True, + gradient_accumulation_steps=1, + use_master_acc_grad=True, + nproc_per_node=None, + use_hierarchical_allreduce=False, + name=None, + ): + assert ( + not framework._non_static_mode() ), "DistributedFusedLamb does not support dygraph mode" - super(DistributedFusedLamb, self).__init__(learning_rate=learning_rate, - grad_clip=None, - name=name) + super(DistributedFusedLamb, self).__init__( + learning_rate=learning_rate, grad_clip=None, name=name + ) self._beta1 = beta1 self._beta2 = beta2 self._epsilon = epsilon - self._weight_decay = lamb_weight_decay if lamb_weight_decay is not None else 0.0 + self._weight_decay = ( + lamb_weight_decay if lamb_weight_decay is not None else 0.0 + ) if grad_clip is not None: assert isinstance( grad_clip, ClipGradByGlobalNorm @@ -137,14 +141,16 @@ class DistributedFusedLamb(Optimizer): self._found_inf = main_block.create_var( name=unique_name.generate('found_inf'), shape=[1], - dtype=core.VarDesc.VarType.BOOL) + dtype=core.VarDesc.VarType.BOOL, + ) self._step = None if self._gradient_accumulation_steps > 1: self._stop_update = main_block.create_var( name=unique_name.generate('stop_update'), shape=[1], - dtype=core.VarDesc.VarType.BOOL) + dtype=core.VarDesc.VarType.BOOL, + ) else: self._stop_update = None @@ -169,11 +175,13 @@ class DistributedFusedLamb(Optimizer): def _create_scale_from_constant(self, value): name = unique_name.generate('global_scale') - return layers.create_global_var(name=name, - shape=[1], - dtype='float32', - value=float(value), - persistable=True) + return layers.create_global_var( + name=name, + shape=[1], + dtype='float32', + value=float(value), + persistable=True, + ) def _get_or_create_scale(self): if self._scale is None: @@ -184,17 +192,21 @@ class DistributedFusedLamb(Optimizer): startup_block = self.helper.startup_program.global_block() if name is not None: name = unique_name.generate(name) - startup_var = startup_block.create_var(name=name, - shape=shape, - dtype=dtype, - persistable=True, - stop_gradient=True) + startup_var = startup_block.create_var( + name=name, + shape=shape, + dtype=dtype, + persistable=True, + stop_gradient=True, + ) main_block = self.helper.main_program.global_block() - main_var = main_block.create_var(name=startup_var.name, - shape=startup_var.shape, - dtype=startup_var.dtype, - persistable=True, - stop_gradient=True) + main_var = main_block.create_var( + name=startup_var.name, + shape=startup_var.shape, + dtype=startup_var.dtype, + persistable=True, + stop_gradient=True, + ) return main_var def _get_parameter(self, name, scope=None): @@ -224,20 +236,25 @@ class DistributedFusedLamb(Optimizer): for p, g in params_grads: flattened.extend([p, g]) with flattened[0].block.program._optimized_guard(flattened), name_scope( - "optimizer"): + "optimizer" + ): self._apply_gradients_impl(params_grads) def _apply_gradients_impl(self, params_grads): for p, g in params_grads: - assert g.type == core.VarDesc.VarType.LOD_TENSOR, "Only support dense gradient" + assert ( + g.type == core.VarDesc.VarType.LOD_TENSOR + ), "Only support dense gradient" g.persistable = True # the gradient must be persistable for fusion fp32_fused_param = self._create_persistable_var('fp32_fused_param') fp32_fused_grad = self._create_persistable_var('fp32_fused_grad') - fp16_fused_param = self._create_persistable_var('fp16_fused_param', - dtype='float16') - fp16_fused_grad = self._create_persistable_var('fp16_fused_grad', - dtype='float16') + fp16_fused_param = self._create_persistable_var( + 'fp16_fused_param', dtype='float16' + ) + fp16_fused_grad = self._create_persistable_var( + 'fp16_fused_grad', dtype='float16' + ) master_params = [] for p, g in params_grads: @@ -255,15 +272,18 @@ class DistributedFusedLamb(Optimizer): param_info = self._create_persistable_var('param_info', dtype='int32') param_info.is_distributed = True - fused_offsets = self._create_persistable_var('fused_offsets', - dtype='int32') + fused_offsets = self._create_persistable_var( + 'fused_offsets', dtype='int32' + ) fp32_partial_fused_offsets = self._create_persistable_var( - 'fp32_partial_fused_offsets', dtype='int32') + 'fp32_partial_fused_offsets', dtype='int32' + ) fp32_partial_fused_offsets.is_distributed = True fp16_partial_fused_offsets = self._create_persistable_var( - 'fp16_partial_fused_offsets', dtype='int32') + 'fp16_partial_fused_offsets', dtype='int32' + ) fp16_partial_fused_offsets.is_distributed = True param_order = self._create_persistable_var('param_order', dtype='int32') @@ -274,8 +294,9 @@ class DistributedFusedLamb(Optimizer): self._create_persistable_var('fp32_acc_fused_grad') ] fp16_acc_fused_grad = [ - self._create_persistable_var('fp16_acc_fused_grad', - dtype='float16') + self._create_persistable_var( + 'fp16_acc_fused_grad', dtype='float16' + ) ] acc_step = [self._create_persistable_var('acc_step', dtype='int64')] else: @@ -291,33 +312,40 @@ class DistributedFusedLamb(Optimizer): nproc_per_node = nranks else: nproc_per_node = self._nproc_per_node - assert nranks % nproc_per_node == 0, "nranks should be exactly divided by nproc_per_node" + assert ( + nranks % nproc_per_node == 0 + ), "nranks should be exactly divided by nproc_per_node" - shard_inside_node = (nranks > nproc_per_node) + shard_inside_node = nranks > nproc_per_node local_rank = rank % nproc_per_node node_id = int(rank / nproc_per_node) node_num = int(nranks / nproc_per_node) ring_ids = [] startup_block = self.helper.startup_program.global_block() if nranks > 1: - ring_id = init_communicator(startup_block, rank, - list(range(nranks)), 0) + ring_id = init_communicator( + startup_block, rank, list(range(nranks)), 0 + ) ring_ids.append(ring_id) use_hierarchical_allreduce = False if node_num > 1 and len(ring_ids) <= 1 and shard_inside_node: local_group_ranks = list( - range(node_id * nproc_per_node, (node_id + 1) * nproc_per_node)) - ring_id = init_communicator(startup_block, rank, local_group_ranks, - 1) + range(node_id * nproc_per_node, (node_id + 1) * nproc_per_node) + ) + ring_id = init_communicator( + startup_block, rank, local_group_ranks, 1 + ) ring_ids.append(ring_id) if self._use_hierarchical_allreduce and nranks > nproc_per_node: use_hierarchical_allreduce = True outer_group_ranks = list( - range(rank % nproc_per_node, nranks, nproc_per_node)) - ring_id = init_communicator(startup_block, rank, - outer_group_ranks, ring_ids[-1] + 1) + range(rank % nproc_per_node, nranks, nproc_per_node) + ) + ring_id = init_communicator( + startup_block, rank, outer_group_ranks, ring_ids[-1] + 1 + ) ring_ids.append(ring_id) scale = self._get_or_create_scale() @@ -331,11 +359,13 @@ class DistributedFusedLamb(Optimizer): apply_weight_decay[i] = 0 for g in grads: - startup_block.create_var(name=g.name, - type=g.type, - dtype=g.dtype, - persistable=g.persistable, - shape=g.shape) + startup_block.create_var( + name=g.name, + type=g.type, + dtype=g.dtype, + persistable=g.persistable, + shape=g.shape, + ) if nranks > 1: broadcast_parameters(startup_block, params, ring_ids[0]) @@ -375,7 +405,8 @@ class DistributedFusedLamb(Optimizer): 'moment2': 0.0, 'beta1': self._beta1, 'beta2': self._beta2, - }) + }, + ) main_block = self.helper.main_program.global_block() self._create_global_learning_rate() @@ -418,19 +449,15 @@ class DistributedFusedLamb(Optimizer): 'Moment2Out': [moment2], 'Beta1PowOut': [beta1pow], 'Beta2PowOut': [beta2pow], - 'ParamOut': - params, - 'GradOut': - grads, + 'ParamOut': params, + 'GradOut': grads, 'FoundInf': [self._found_inf], - 'FP32AccFusedGrad': - fp32_acc_fused_grad, - 'FP16AccFusedGrad': - fp16_acc_fused_grad, - 'AccStep': - acc_step, - 'StopUpdate': - self._stop_update if self._stop_update is not None else [], + 'FP32AccFusedGrad': fp32_acc_fused_grad, + 'FP16AccFusedGrad': fp16_acc_fused_grad, + 'AccStep': acc_step, + 'StopUpdate': self._stop_update + if self._stop_update is not None + else [], 'Step': [step], }, attrs={ @@ -448,5 +475,6 @@ class DistributedFusedLamb(Optimizer): 'acc_steps': self._gradient_accumulation_steps, 'use_master_acc_grad': self._use_master_acc_grad, 'use_hierarchical_allreduce': use_hierarchical_allreduce, - }) + }, + ) return [lamb_op] diff --git a/python/paddle/incubate/optimizer/functional/bfgs.py b/python/paddle/incubate/optimizer/functional/bfgs.py index 58d647f4dd4f955a75601e28e4cea9b57d994779..3349cd533e2d7902d006a5009196b4e14d405a70 100644 --- a/python/paddle/incubate/optimizer/functional/bfgs.py +++ b/python/paddle/incubate/optimizer/functional/bfgs.py @@ -15,22 +15,28 @@ import numpy as np from .line_search import strong_wolfe -from .utils import _value_and_gradient, check_input_type, check_initial_inverse_hessian_estimate +from .utils import ( + _value_and_gradient, + check_input_type, + check_initial_inverse_hessian_estimate, +) import paddle -def minimize_bfgs(objective_func, - initial_position, - max_iters=50, - tolerance_grad=1e-7, - tolerance_change=1e-9, - initial_inverse_hessian_estimate=None, - line_search_fn='strong_wolfe', - max_line_search_iters=50, - initial_step_length=1.0, - dtype='float32', - name=None): +def minimize_bfgs( + objective_func, + initial_position, + max_iters=50, + tolerance_grad=1e-7, + tolerance_change=1e-9, + initial_inverse_hessian_estimate=None, + line_search_fn='strong_wolfe', + max_line_search_iters=50, + initial_step_length=1.0, + dtype='float32', + name=None, +): r""" Minimizes a differentiable function `func` using the BFGS method. The BFGS is a quasi-Newton method for solving an unconstrained optimization problem over a differentiable function. @@ -91,8 +97,10 @@ def minimize_bfgs(objective_func, if dtype not in ['float32', 'float64']: raise ValueError( - "The dtype must be 'float32' or 'float64', but the specified is {}." - .format(dtype)) + "The dtype must be 'float32' or 'float64', but the specified is {}.".format( + dtype + ) + ) op_name = 'minimize_bfgs' check_input_type(initial_position, 'initial_position', op_name) @@ -101,8 +109,11 @@ def minimize_bfgs(objective_func, if initial_inverse_hessian_estimate is None: initial_inverse_hessian_estimate = I else: - check_input_type(initial_inverse_hessian_estimate, - 'initial_inverse_hessian_estimate', op_name) + check_input_type( + initial_inverse_hessian_estimate, + 'initial_inverse_hessian_estimate', + op_name, + ) check_initial_inverse_hessian_estimate(initial_inverse_hessian_estimate) Hk = paddle.assign(initial_inverse_hessian_estimate) @@ -131,11 +142,14 @@ def minimize_bfgs(objective_func, xk=xk, pk=pk, initial_step_length=initial_step_length, - dtype=dtype) + dtype=dtype, + ) else: raise NotImplementedError( - "Currently only support line_search_fn = 'strong_wolfe', but the specified is '{}'" - .format(line_search_fn)) + "Currently only support line_search_fn = 'strong_wolfe', but the specified is '{}'".format( + line_search_fn + ) + ) num_func_calls += ls_func_calls ############# update Hk ############# @@ -150,14 +164,17 @@ def minimize_bfgs(objective_func, rhok_inv = paddle.dot(yk, sk) rhok = paddle.static.nn.cond( - rhok_inv == 0., + rhok_inv == 0.0, lambda: paddle.full(shape=[1], fill_value=1000.0, dtype=dtype), - lambda: 1. / rhok_inv) + lambda: 1.0 / rhok_inv, + ) Vk_transpose = I - rhok * sk * yk.t() Vk = I - rhok * yk * sk.t() - Hk = paddle.matmul(paddle.matmul(Vk_transpose, Hk), - Vk) + rhok * sk * sk.t() + Hk = ( + paddle.matmul(paddle.matmul(Vk_transpose, Hk), Vk) + + rhok * sk * sk.t() + ) k += 1 @@ -165,15 +182,16 @@ def minimize_bfgs(objective_func, gnorm = paddle.linalg.norm(g1, p=np.inf) pk_norm = paddle.linalg.norm(pk, p=np.inf) paddle.assign( - done | (gnorm < tolerance_grad) | (pk_norm < tolerance_change), - done) + done | (gnorm < tolerance_grad) | (pk_norm < tolerance_change), done + ) paddle.assign(done, is_converge) # when alpha=0, there is no chance to get xk change. - paddle.assign(done | (alpha == 0.), done) + paddle.assign(done | (alpha == 0.0), done) return [k, done, is_converge, num_func_calls, xk, value, g1, Hk] paddle.static.nn.while_loop( cond=cond, body=body, - loop_vars=[k, done, is_converge, num_func_calls, xk, value, g1, Hk]) + loop_vars=[k, done, is_converge, num_func_calls, xk, value, g1, Hk], + ) return is_converge, num_func_calls, xk, value, g1, Hk diff --git a/python/paddle/incubate/optimizer/functional/lbfgs.py b/python/paddle/incubate/optimizer/functional/lbfgs.py index 5925d47decd8655db1dcc537bdb3b00716abc6ed..a0cf07f8d4ab4f6235796cd36b59d7d8cae4cc38 100644 --- a/python/paddle/incubate/optimizer/functional/lbfgs.py +++ b/python/paddle/incubate/optimizer/functional/lbfgs.py @@ -15,23 +15,29 @@ import numpy as np from .line_search import strong_wolfe -from .utils import _value_and_gradient, check_input_type, check_initial_inverse_hessian_estimate +from .utils import ( + _value_and_gradient, + check_input_type, + check_initial_inverse_hessian_estimate, +) import paddle -def minimize_lbfgs(objective_func, - initial_position, - history_size=100, - max_iters=50, - tolerance_grad=1e-8, - tolerance_change=1e-8, - initial_inverse_hessian_estimate=None, - line_search_fn='strong_wolfe', - max_line_search_iters=50, - initial_step_length=1.0, - dtype='float32', - name=None): +def minimize_lbfgs( + objective_func, + initial_position, + history_size=100, + max_iters=50, + tolerance_grad=1e-8, + tolerance_change=1e-8, + initial_inverse_hessian_estimate=None, + line_search_fn='strong_wolfe', + max_line_search_iters=50, + initial_step_length=1.0, + dtype='float32', + name=None, +): r""" Minimizes a differentiable function `func` using the L-BFGS method. The L-BFGS is a quasi-Newton method for solving an unconstrained optimization problem over a differentiable function. @@ -91,8 +97,10 @@ def minimize_lbfgs(objective_func, """ if dtype not in ['float32', 'float64']: raise ValueError( - "The dtype must be 'float32' or 'float64', but the specified is {}." - .format(dtype)) + "The dtype must be 'float32' or 'float64', but the specified is {}.".format( + dtype + ) + ) op_name = 'minimize_lbfgs' check_input_type(initial_position, 'initial_position', op_name) @@ -100,8 +108,11 @@ def minimize_lbfgs(objective_func, if initial_inverse_hessian_estimate is None: H0 = paddle.eye(initial_position.shape[0], dtype=dtype) else: - check_input_type(initial_inverse_hessian_estimate, - 'initial_inverse_hessian_estimate', op_name) + check_input_type( + initial_inverse_hessian_estimate, + 'initial_inverse_hessian_estimate', + op_name, + ) check_initial_inverse_hessian_estimate(initial_inverse_hessian_estimate) H0 = initial_inverse_hessian_estimate @@ -114,9 +125,9 @@ def minimize_lbfgs(objective_func, is_converge = paddle.full(shape=[1], fill_value=False, dtype='bool') num_func_calls = paddle.full(shape=[1], fill_value=1, dtype='int64') - history_size = paddle.full(shape=[1], - fill_value=history_size, - dtype='int64') + history_size = paddle.full( + shape=[1], fill_value=history_size, dtype='int64' + ) head = paddle.full(shape=[1], fill_value=1, dtype='int64') tail = paddle.full(shape=[1], fill_value=0, dtype='int64') @@ -130,20 +141,44 @@ def minimize_lbfgs(objective_func, rhok_vec = paddle.zeros((history_size + 1, 1), dtype=dtype) ai_vec = paddle.zeros((history_size + 1, 1), dtype=dtype) - def cond(k, done, is_converge, num_func_calls, value, xk, g1, sk_vec, - yk_vec, rhok_vec, head, tail): + def cond( + k, + done, + is_converge, + num_func_calls, + value, + xk, + g1, + sk_vec, + yk_vec, + rhok_vec, + head, + tail, + ): return (k < max_iters) & ~done - def body(k, done, is_converge, num_func_calls, value, xk, g1, sk_vec, - yk_vec, rhok_vec, head, tail): + def body( + k, + done, + is_converge, + num_func_calls, + value, + xk, + g1, + sk_vec, + yk_vec, + rhok_vec, + head, + tail, + ): # use assign to cut off the relevance between g1 and q, or they will change together. ############# compute p_k by two-loop recursion ############# q = paddle.assign(g1) # In a array circle, the index may out of range, so must use mod. - i = paddle.full(shape=[1], - fill_value=(head - 1).mod(history_size), - dtype='int64') + i = paddle.full( + shape=[1], fill_value=(head - 1).mod(history_size), dtype='int64' + ) def cond(i, q): return i != tail @@ -180,11 +215,14 @@ def minimize_lbfgs(objective_func, xk=xk, pk=pk, initial_step_length=initial_step_length, - dtype=dtype) + dtype=dtype, + ) else: raise NotImplementedError( - "Currently only support line_search_fn = 'strong_wolfe', but the specified is '{}'" - .format(line_search_fn)) + "Currently only support line_search_fn = 'strong_wolfe', but the specified is '{}'".format( + line_search_fn + ) + ) paddle.assign(num_func_calls + ls_func_calls, num_func_calls) ############# update sk_vec, yk_vec, rhok_vec ############# @@ -193,9 +231,10 @@ def minimize_lbfgs(objective_func, rhok_inv = paddle.dot(yk, sk) rhok = paddle.static.nn.cond( - rhok_inv == 0., + rhok_inv == 0.0, lambda: paddle.full(shape=[1], fill_value=1000.0, dtype=dtype), - lambda: 1. / rhok_inv) + lambda: 1.0 / rhok_inv, + ) sk_vec[head] = sk yk_vec[head] = yk @@ -216,21 +255,43 @@ def minimize_lbfgs(objective_func, gnorm = paddle.linalg.norm(g1, p=np.inf) pk_norm = paddle.linalg.norm(pk, p=np.inf) paddle.assign( - done | (gnorm < tolerance_grad) | (pk_norm < tolerance_change), - done) + done | (gnorm < tolerance_grad) | (pk_norm < tolerance_change), done + ) paddle.assign(done, is_converge) # when alpha=0, there is no chance to get xk change. - paddle.assign(done | (alpha == 0.), done) + paddle.assign(done | (alpha == 0.0), done) return [ - k, done, is_converge, num_func_calls, value, xk, g1, sk_vec, yk_vec, - rhok_vec, head, tail + k, + done, + is_converge, + num_func_calls, + value, + xk, + g1, + sk_vec, + yk_vec, + rhok_vec, + head, + tail, ] - paddle.static.nn.while_loop(cond=cond, - body=body, - loop_vars=[ - k, done, is_converge, num_func_calls, value, - xk, g1, sk_vec, yk_vec, rhok_vec, head, tail - ]) + paddle.static.nn.while_loop( + cond=cond, + body=body, + loop_vars=[ + k, + done, + is_converge, + num_func_calls, + value, + xk, + g1, + sk_vec, + yk_vec, + rhok_vec, + head, + tail, + ], + ) return is_converge, num_func_calls, xk, value, g1 diff --git a/python/paddle/incubate/optimizer/functional/line_search.py b/python/paddle/incubate/optimizer/functional/line_search.py index 375f86ddbe53330e52dc33e140b51bd3b82b0f4c..94400200f2535d2584134353843cc43b3a579ef0 100644 --- a/python/paddle/incubate/optimizer/functional/line_search.py +++ b/python/paddle/incubate/optimizer/functional/line_search.py @@ -31,8 +31,9 @@ def cubic_interpolation_(x1, f1, g1, x2, f2, g2): Returns: min_pos: the minimun point between the specified points in the cubic curve. """ - xmin, xmax = paddle.static.nn.cond(x1 <= x2, lambda: (x1, x2), lambda: - (x2, x1)) + xmin, xmax = paddle.static.nn.cond( + x1 <= x2, lambda: (x1, x2), lambda: (x2, x1) + ) d1 = g1 + g2 - 3 * (f1 - f2) / (x1 - x2) d2_square = d1**2 - g1 * g2 @@ -51,22 +52,24 @@ def cubic_interpolation_(x1, f1, g1, x2, f2, g2): return paddle.minimum(paddle.maximum(min_pos, xmin), xmax) def false_func1(): - return (xmin + xmax) / 2. + return (xmin + xmax) / 2.0 - min_pos = paddle.static.nn.cond(d2_square >= 0., true_func1, false_func1) + min_pos = paddle.static.nn.cond(d2_square >= 0.0, true_func1, false_func1) return min_pos -def strong_wolfe(f, - xk, - pk, - max_iters=20, - tolerance_change=1e-8, - initial_step_length=1.0, - c1=1e-4, - c2=0.9, - alpha_max=10, - dtype='float32'): +def strong_wolfe( + f, + xk, + pk, + max_iters=20, + tolerance_change=1e-8, + initial_step_length=1.0, + c1=1e-4, + c2=0.9, + alpha_max=10, + dtype='float32', +): r"""Implements of line search algorithm that satisfies the strong Wolfe conditions using double zoom. Reference: @@ -143,36 +146,67 @@ def strong_wolfe(f, def phi_and_derphi(a): r"""Compute function value and derivative of phi at a. - phi = f(xk + a * pk) - phi'(a) = f'(xk + a * pk) * pk + phi = f(xk + a * pk) + phi'(a) = f'(xk + a * pk) * pk """ phi_value, f_grad = _value_and_gradient(f, xk + a * pk) phi_grad = paddle.dot(f_grad, pk) # return f_grad to be used in bfgs/l-bfgs to compute yk to avoid computint repeatly. return phi_value, f_grad, phi_grad - def zoom(a_lo, phi_lo, derphi_lo, derf_lo, a_hi, phi_hi, derphi_hi, phi_0, - derphi_0): + def zoom( + a_lo, + phi_lo, + derphi_lo, + derf_lo, + a_hi, + phi_hi, + derphi_hi, + phi_0, + derphi_0, + ): # find the exact a from the bracket [a_lo, a_hi] max_zoom_iters = max_iters j = paddle.full(shape=[1], fill_value=0, dtype='int64') done_zoom = paddle.full(shape=[1], fill_value=False, dtype='bool') - def cond_zoom(j, done_zoom, a_lo, phi_lo, derphi_lo, derf_lo, a_hi, - phi_hi, derphi_hi): + def cond_zoom( + j, + done_zoom, + a_lo, + phi_lo, + derphi_lo, + derf_lo, + a_hi, + phi_hi, + derphi_hi, + ): pred = paddle.abs(a_hi - a_lo) < tolerance_change paddle.assign(done_zoom | pred, done_zoom) return (j < max_zoom_iters) & ~done_zoom - def body_zoom(j, done_zoom, a_lo, phi_lo, derphi_lo, derf_lo, a_hi, - phi_hi, derphi_hi): - aj = cubic_interpolation_(a_lo, phi_lo, derphi_lo, a_hi, phi_hi, - derphi_hi) # 21 + def body_zoom( + j, + done_zoom, + a_lo, + phi_lo, + derphi_lo, + derf_lo, + a_hi, + phi_hi, + derphi_hi, + ): + aj = cubic_interpolation_( + a_lo, phi_lo, derphi_lo, a_hi, phi_hi, derphi_hi + ) # 21 min_change = 0.1 * paddle.abs(a_hi - a_lo) - pred = paddle.minimum(paddle.abs(aj - a_lo), - paddle.abs(aj - a_hi)) < min_change - aj = paddle.static.nn.cond(pred, lambda: 0.5 * (a_lo + a_hi), - lambda: aj) + pred = ( + paddle.minimum(paddle.abs(aj - a_lo), paddle.abs(aj - a_hi)) + < min_change + ) + aj = paddle.static.nn.cond( + pred, lambda: 0.5 * (a_lo + a_hi), lambda: aj + ) phi_j, derf_j, derphi_j = phi_and_derphi(aj) @@ -183,7 +217,7 @@ def strong_wolfe(f, paddle.assign(derphi_j, derphi_hi) def false_fn(a_lo, done_zoom): - pred3 = (paddle.abs(derphi_j) <= -c2 * derphi_0) + pred3 = paddle.abs(derphi_j) <= -c2 * derphi_0 paddle.assign(pred3, done_zoom) def true_fn(): @@ -200,26 +234,43 @@ def strong_wolfe(f, paddle.assign(derf_j, derf_lo) pred2 = (phi_j > phi_0 + c1 * aj * derphi_0) | (phi_j >= phi_lo) - paddle.static.nn.cond(pred2, true_fn, - lambda: false_fn(a_lo, done_zoom)) + paddle.static.nn.cond( + pred2, true_fn, lambda: false_fn(a_lo, done_zoom) + ) j = paddle.static.nn.cond(done_zoom, lambda: j, lambda: j + 1) return [ - j, done_zoom, a_lo, phi_lo, derphi_lo, derf_lo, a_hi, phi_hi, - derphi_hi + j, + done_zoom, + a_lo, + phi_lo, + derphi_lo, + derf_lo, + a_hi, + phi_hi, + derphi_hi, ] - paddle.static.nn.while_loop(cond=cond_zoom, - body=body_zoom, - loop_vars=[ - j, done_zoom, a_lo, phi_lo, derphi_lo, - derf_lo, a_hi, phi_hi, derphi_hi - ]) + paddle.static.nn.while_loop( + cond=cond_zoom, + body=body_zoom, + loop_vars=[ + j, + done_zoom, + a_lo, + phi_lo, + derphi_lo, + derf_lo, + a_hi, + phi_hi, + derphi_hi, + ], + ) # j is the number of object function called in zoom. return j alpha_max = paddle.full(shape=[1], fill_value=alpha_max, dtype=dtype) - a1 = paddle.full(shape=[1], fill_value=0., dtype=dtype) + a1 = paddle.full(shape=[1], fill_value=0.0, dtype=dtype) a2 = paddle.full(shape=[1], fill_value=initial_step_length, dtype=dtype) phi_1, derf_1, derphi_1 = phi_and_derphi(a1) @@ -245,15 +296,25 @@ def strong_wolfe(f, paddle.assign(done | paddle.any(paddle.isinf(phi_2)), done) def true_fn1(): - j = zoom(a1, phi_1, derphi_1, derf_1, a2, phi_2, derphi_2, phi_0, - derphi_0) + j = zoom( + a1, + phi_1, + derphi_1, + derf_1, + a2, + phi_2, + derphi_2, + phi_0, + derphi_0, + ) paddle.assign(a1, a_star) paddle.assign(phi_1, phi_star) paddle.assign(derf_1, derf_star) paddle.assign(ls_func_calls + j, ls_func_calls) - pred1 = ~done & ((phi_2 > phi_0 + c1 * a2 * derphi_0) | - ((phi_2 >= phi_0) & (i > 1))) + pred1 = ~done & ( + (phi_2 > phi_0 + c1 * a2 * derphi_0) | ((phi_2 >= phi_0) & (i > 1)) + ) paddle.assign(done | pred1, done) paddle.static.nn.cond(pred1, true_fn1, None) @@ -267,8 +328,17 @@ def strong_wolfe(f, paddle.static.nn.cond(pred2, true_fn2, None) def true_fn3(): - j = zoom(a2, phi_2, derphi_2, derf_2, a1, phi_1, derphi_1, phi_0, - derphi_0) + j = zoom( + a2, + phi_2, + derphi_2, + derf_2, + a1, + phi_1, + derphi_1, + phi_0, + derphi_0, + ) paddle.assign(a2, a_star) paddle.assign(phi_2, phi_star) paddle.assign(derf_2, derf_star) @@ -291,6 +361,7 @@ def strong_wolfe(f, paddle.static.nn.while_loop( cond=cond, body=body, - loop_vars=[i, ls_func_calls, a1, a2, phi_1, derf_1, done]) + loop_vars=[i, ls_func_calls, a1, a2, phi_1, derf_1, done], + ) return a_star, phi_star, derf_star, ls_func_calls diff --git a/python/paddle/incubate/optimizer/functional/utils.py b/python/paddle/incubate/optimizer/functional/utils.py index e6612878e710c92ecaf4220f613688b3d3522a65..f9d0dddb117df0c48a05a2f198dc26c62dd8e964 100644 --- a/python/paddle/incubate/optimizer/functional/utils.py +++ b/python/paddle/incubate/optimizer/functional/utils.py @@ -53,26 +53,29 @@ def check_initial_inverse_hessian_estimate(H0): else: def create_tmp_var(program, name, dtype, shape): - return program.current_block().create_var(name=name, - dtype=dtype, - shape=shape) + return program.current_block().create_var( + name=name, dtype=dtype, shape=shape + ) - out_var = create_tmp_var(paddle.static.default_main_program(), - name='output', - dtype='float32', - shape=[-1]) + out_var = create_tmp_var( + paddle.static.default_main_program(), + name='output', + dtype='float32', + shape=[-1], + ) def false_fn(): - paddle.static.nn.py_func(func=raise_func, - x=is_symmetric, - out=out_var) + paddle.static.nn.py_func( + func=raise_func, x=is_symmetric, out=out_var + ) paddle.static.nn.cond(is_symmetric, None, false_fn) # eigvals only support cpu paddle.set_device("cpu") eigvals = paddle.paddle.linalg.eigvals(H0) - is_positive = paddle.all(eigvals.real() > 0.) and paddle.all( - eigvals.imag() == 0.) + is_positive = paddle.all(eigvals.real() > 0.0) and paddle.all( + eigvals.imag() == 0.0 + ) paddle.static.nn.cond(is_positive, None, false_fn) diff --git a/python/paddle/incubate/optimizer/lookahead.py b/python/paddle/incubate/optimizer/lookahead.py index dcf50cb5224eded23b21e5fc516793c30fdc108d..99a3be4f8e4d2517a41db34fd0b09e0c7908cd2e 100644 --- a/python/paddle/incubate/optimizer/lookahead.py +++ b/python/paddle/incubate/optimizer/lookahead.py @@ -115,24 +115,27 @@ class LookAhead(Optimizer): _slow_str = "slow" def __init__(self, inner_optimizer, alpha=0.5, k=5, name=None): - assert (inner_optimizer is not None), "inner optimizer can not be None" + assert inner_optimizer is not None, "inner optimizer can not be None" assert ( 0.0 <= alpha <= 1.0 ), "alpha should be larger or equal to 0.0, and less or equal than 1.0" - assert (isinstance(k, int) and k > 0), "k should be a positive integer" + assert isinstance(k, int) and k > 0, "k should be a positive integer" self.inner_optimizer = inner_optimizer if self.inner_optimizer._parameter_list is None: - parameters = framework.default_main_program().global_block( - ).all_parameters() + parameters = ( + framework.default_main_program().global_block().all_parameters() + ) else: parameters = self.inner_optimizer._parameter_list - super(LookAhead, self).__init__(learning_rate=alpha, - parameters=parameters, - weight_decay=None, - grad_clip=None, - name=name) + super(LookAhead, self).__init__( + learning_rate=alpha, + parameters=parameters, + weight_decay=None, + grad_clip=None, + name=name, + ) self.alpha = alpha self.k = k @@ -178,9 +181,9 @@ class LookAhead(Optimizer): grad_var = param._grad_ivar() params_grads.append((param, grad_var)) - self._apply_optimize(loss=None, - startup_program=None, - params_grads=params_grads) + self._apply_optimize( + loss=None, startup_program=None, params_grads=params_grads + ) def _create_accumulators(self, block, parameters): assert isinstance(block, framework.Block) @@ -195,24 +198,28 @@ class LookAhead(Optimizer): shape=[1], value=0, dtype='int32', - persistable=True) + persistable=True, + ) - self.helper.append_op(type='increment', - inputs={'X': [self._global_step_var]}, - outputs={'Out': [self._global_step_var]}, - attrs={'step': 1.0}) + self.helper.append_op( + type='increment', + inputs={'X': [self._global_step_var]}, + outputs={'Out': [self._global_step_var]}, + attrs={'step': 1.0}, + ) def _append_optimize_op(self, block, param_and_grad): one_var = paddle.ones(shape=[1], dtype='int32', name='lookahead_ones') - zero_var = paddle.zeros(shape=[1], - dtype='int32', - name='lookahead_zeros') + zero_var = paddle.zeros( + shape=[1], dtype='int32', name='lookahead_zeros' + ) k_var = layers.create_global_var( name=unique_name.generate("lookahead_k"), shape=[1], value=self.k, dtype='int32', - persistable=True) + persistable=True, + ) mod = paddle.remainder(self._global_step_var, k_var) @@ -235,11 +242,9 @@ class LookAhead(Optimizer): paddle.assign(tmp_var_1, slow_var) @imperative_base.no_grad - def minimize(self, - loss, - startup_program=None, - parameters=None, - no_grad_set=None): + def minimize( + self, loss, startup_program=None, parameters=None, no_grad_set=None + ): """ Add operations to minimize ``loss`` by updating ``parameters``. @@ -286,12 +291,13 @@ class LookAhead(Optimizer): loss, startup_program=startup_program, parameters=parameters, - no_grad_set=no_grad_set) + no_grad_set=no_grad_set, + ) self._increment_global_var() - _ = self._apply_optimize(loss, - startup_program=startup_program, - params_grads=params_grads) + _ = self._apply_optimize( + loss, startup_program=startup_program, params_grads=params_grads + ) return optimize_ops, params_grads diff --git a/python/paddle/incubate/optimizer/modelaverage.py b/python/paddle/incubate/optimizer/modelaverage.py index 5fa9dfb67bcb43e517ebbe0f4e5a34b8cc763f83..fc00f28cb4467e30968d1b570a13b059cc27258d 100644 --- a/python/paddle/incubate/optimizer/modelaverage.py +++ b/python/paddle/incubate/optimizer/modelaverage.py @@ -162,17 +162,21 @@ class ModelAverage(Optimizer): """ - def __init__(self, - average_window_rate, - parameters=None, - min_average_window=10000, - max_average_window=10000, - name=None): - super(ModelAverage, self).__init__(learning_rate=0.0, - parameters=parameters, - weight_decay=None, - grad_clip=None, - name=name) + def __init__( + self, + average_window_rate, + parameters=None, + min_average_window=10000, + max_average_window=10000, + name=None, + ): + super(ModelAverage, self).__init__( + learning_rate=0.0, + parameters=parameters, + weight_decay=None, + grad_clip=None, + name=name, + ) self.helper = LayerHelper(self.__class__.__name__) self.average_window = average_window_rate @@ -182,7 +186,8 @@ class ModelAverage(Optimizer): if not framework._non_static_mode(): global_block = framework.default_main_program().global_block() - all_parameters = parameters if parameters else global_block.all_parameters( + all_parameters = ( + parameters if parameters else global_block.all_parameters() ) self._create_accumulators(global_block, all_parameters) @@ -207,18 +212,15 @@ class ModelAverage(Optimizer): self._add_accumulator('sum_2', param) self._add_accumulator('sum_3', param) self._add_accumulator('restore', param) - self._add_accumulator('num_accumulates', - param, - dtype='int64', - shape=[1]) - self._add_accumulator('old_num_accumulates', - param, - dtype='int64', - shape=[1]) - self._add_accumulator('num_updates', - param, - dtype='int64', - shape=[1]) + self._add_accumulator( + 'num_accumulates', param, dtype='int64', shape=[1] + ) + self._add_accumulator( + 'old_num_accumulates', param, dtype='int64', shape=[1] + ) + self._add_accumulator( + 'num_updates', param, dtype='int64', shape=[1] + ) def _append_optimize_op(self, block, param_and_grad): assert isinstance(block, framework.Block) @@ -226,26 +228,50 @@ class ModelAverage(Optimizer): sum_1 = self._get_accumulator('sum_1', param_and_grad[0]) sum_2 = self._get_accumulator('sum_2', param_and_grad[0]) sum_3 = self._get_accumulator('sum_3', param_and_grad[0]) - num_accumulates = self._get_accumulator('num_accumulates', - param_and_grad[0]) - old_num_accumulates = self._get_accumulator('old_num_accumulates', - param_and_grad[0]) + num_accumulates = self._get_accumulator( + 'num_accumulates', param_and_grad[0] + ) + old_num_accumulates = self._get_accumulator( + 'old_num_accumulates', param_and_grad[0] + ) num_updates = self._get_accumulator('num_updates', param_and_grad[0]) if in_dygraph_mode(): _, _, _, _, _, _ = _C_ops.average_accumulates_( - param_and_grad[0], sum_1, sum_2, sum_3, num_accumulates, - old_num_accumulates, num_updates, self.average_window, - self.max_average_window, self.min_average_window) + param_and_grad[0], + sum_1, + sum_2, + sum_3, + num_accumulates, + old_num_accumulates, + num_updates, + self.average_window, + self.max_average_window, + self.min_average_window, + ) return None elif framework._non_static_mode(): _, _, _, _, _, _ = _legacy_C_ops.average_accumulates( - param_and_grad[0], sum_1, sum_2, sum_3, num_accumulates, - old_num_accumulates, num_updates, sum_1, sum_2, sum_3, - num_accumulates, old_num_accumulates, num_updates, - 'average_window', self.average_window, 'min_average_window', - self.min_average_window, 'max_average_window', - self.max_average_window) + param_and_grad[0], + sum_1, + sum_2, + sum_3, + num_accumulates, + old_num_accumulates, + num_updates, + sum_1, + sum_2, + sum_3, + num_accumulates, + old_num_accumulates, + num_updates, + 'average_window', + self.average_window, + 'min_average_window', + self.min_average_window, + 'max_average_window', + self.max_average_window, + ) return None block = framework.default_main_program().global_block() @@ -262,7 +288,7 @@ class ModelAverage(Optimizer): "in_sum_3": sum_3, "in_num_accumulates": num_accumulates, "in_old_num_accumulates": old_num_accumulates, - "in_num_updates": num_updates + "in_num_updates": num_updates, } outputs = { @@ -274,20 +300,20 @@ class ModelAverage(Optimizer): "out_num_updates": num_updates, } - average_accumulates_op = block.append_op(type=self.type, - inputs=inputs, - outputs=outputs, - attrs=attrs, - stop_gradient=True) + average_accumulates_op = block.append_op( + type=self.type, + inputs=inputs, + outputs=outputs, + attrs=attrs, + stop_gradient=True, + ) return average_accumulates_op @imperative_base.no_grad - def minimize(self, - loss, - startup_program=None, - parameters=None, - no_grad_set=None): + def minimize( + self, loss, startup_program=None, parameters=None, no_grad_set=None + ): """ Add operations to minimize ``loss`` by updating ``parameters``. @@ -423,10 +449,12 @@ class ModelAverage(Optimizer): """ if framework._non_static_mode(): for param in self._parameter_list: - num_accumulates = self._get_accumulator('num_accumulates', - param) + num_accumulates = self._get_accumulator( + 'num_accumulates', param + ) old_num_accumulates = self._get_accumulator( - 'old_num_accumulates', param) + 'old_num_accumulates', param + ) sum_1 = self._get_accumulator('sum_1', param) sum_2 = self._get_accumulator('sum_2', param) sum_3 = self._get_accumulator('sum_3', param) @@ -436,8 +464,9 @@ class ModelAverage(Optimizer): total_param = sum_1 + sum_2 + sum_3 total_accumulates = num_accumulates + old_num_accumulates total_param = paddle.cast(total_param, dtype='float32') - total_accumulates = paddle.cast(total_accumulates, - dtype='float32') + total_accumulates = paddle.cast( + total_accumulates, dtype='float32' + ) average_param = total_param / total_accumulates paddle.assign(average_param, param) try: @@ -448,7 +477,8 @@ class ModelAverage(Optimizer): return if executor is None: raise RuntimeError( - "Executor should not be None in static graph mode.") + "Executor should not be None in static graph mode." + ) executor.run(self.apply_program) try: yield @@ -504,7 +534,8 @@ class ModelAverage(Optimizer): return if executor is None: raise RuntimeError( - "Executor should not be None in static graph mode.") + "Executor should not be None in static graph mode." + ) executor.run(self.restore_program) def _add_average_apply_op(self, block, param): @@ -514,18 +545,22 @@ class ModelAverage(Optimizer): sum_2 = block._clone_variable(self._get_accumulator('sum_2', param)) sum_3 = block._clone_variable(self._get_accumulator('sum_3', param)) num_accumulates = block._clone_variable( - self._get_accumulator('num_accumulates', param)) + self._get_accumulator('num_accumulates', param) + ) old_num_accumulates = block._clone_variable( - self._get_accumulator('old_num_accumulates', param)) + self._get_accumulator('old_num_accumulates', param) + ) # backup param value to grad layers.assign(input=param, output=grad) # param = (sum_1 + sum_2 + sum_3) / (num_accumulates + old_num_accumulates) tmp = layers.sum(x=[num_accumulates, old_num_accumulates]) sum = layers.sum(x=[sum_1, sum_2, sum_3]) tmp = layers.cast( - x=tmp, dtype='float32' if self._dtype is None else self._dtype) + x=tmp, dtype='float32' if self._dtype is None else self._dtype + ) sum = layers.cast( - x=sum, dtype='float32' if self._dtype is None else self._dtype) + x=sum, dtype='float32' if self._dtype is None else self._dtype + ) layers.ops._elementwise_div(x=sum, y=tmp, out=param) def _add_average_restore_op(self, block, param): diff --git a/python/paddle/incubate/passes/fuse_resnet_unit_pass.py b/python/paddle/incubate/passes/fuse_resnet_unit_pass.py index fce745ffaf8c247c87b4c8b217444b224d7874bf..6441427f469d660f499b9c6242add3c7b2b90137 100644 --- a/python/paddle/incubate/passes/fuse_resnet_unit_pass.py +++ b/python/paddle/incubate/passes/fuse_resnet_unit_pass.py @@ -21,32 +21,35 @@ def set_resnet_unit_attrs(resnet_unit, has_shortcut): resnet_unit.SetAttr("has_shortcut", has_shortcut) resnet_unit.SetAttr("data_format", 'NHWC') resnet_unit.SetAttr("dilation", 1) - resnet_unit.Attr("stride").MappedPattern(op="conv2d", - name="strides", - element_index=0) - resnet_unit.Attr("padding").MappedPattern(op="conv2d", - name="paddings", - element_index=0) + resnet_unit.Attr("stride").MappedPattern( + op="conv2d", name="strides", element_index=0 + ) + resnet_unit.Attr("padding").MappedPattern( + op="conv2d", name="paddings", element_index=0 + ) resnet_unit.Attr("group").MappedPattern(op="conv2d", name="groups") resnet_unit.Attr("op_device").MappedPattern(op="conv2d", name="op_device") - resnet_unit.Attr("op_namescope").MappedPattern(op="conv2d", - name="op_namescope") + resnet_unit.Attr("op_namescope").MappedPattern( + op="conv2d", name="op_namescope" + ) resnet_unit.Attr("momentum").MappedPattern(op="batch_norm", name="momentum") resnet_unit.Attr("epsilon").MappedPattern(op="batch_norm", name="epsilon") - resnet_unit.Attr("use_global_stats").MappedPattern(op="batch_norm", - name="use_global_stats") + resnet_unit.Attr("use_global_stats").MappedPattern( + op="batch_norm", name="use_global_stats" + ) def set_resnet_unit_outputs(resnet_unit, meanX, varX, meanZ=None, varZ=None): - resnet_unit.SetOutputs(RunningMeanX=meanX, - RunningVarX=varX, - RunningMeanZ=meanZ, - RunningVarZ=varZ) + resnet_unit.SetOutputs( + RunningMeanX=meanX, + RunningVarX=varX, + RunningMeanZ=meanZ, + RunningVarZ=varZ, + ) @ir.RegisterPass def fuse_resnet_unit(): - def pattern_conv_bn(x, filter, scale, bias, mean, var): filter.Attr("shape")[0].Mod(32).EQ(0) filter.Attr("shape")[1].Mod(8).EQ(0) @@ -54,11 +57,9 @@ def fuse_resnet_unit(): filter.Attr("shape")[3].EQ(1) conv2d = ir.PassDesc.OP.conv2d(Input=x, Filter=filter) conv2d.SetAttr("data_format", 'NHWC') - bn = ir.PassDesc.OP.batch_norm(X=conv2d, - Bias=bias, - Mean=mean, - Scale=scale, - Variance=var) + bn = ir.PassDesc.OP.batch_norm( + X=conv2d, Bias=bias, Mean=mean, Scale=scale, Variance=var + ) return bn def pattern_one_input(x, filter, scale, bias, mean, var): @@ -67,42 +68,68 @@ def fuse_resnet_unit(): return relu def replace_one_input(x, filter, scale, bias, mean, var): - resnet_unit = ir.PassDesc.OP.resnet_unit(X=x, - FilterX=filter, - ScaleX=scale, - BiasX=bias, - MeanX=mean, - VarX=var) + resnet_unit = ir.PassDesc.OP.resnet_unit( + X=x, FilterX=filter, ScaleX=scale, BiasX=bias, MeanX=mean, VarX=var + ) set_resnet_unit_attrs(resnet_unit, False) set_resnet_unit_outputs(resnet_unit, mean, var) return resnet_unit.Output("Y") - def pattern_two_input(x, filterX, scaleX, biasX, meanX, varX, z, filterZ, - scaleZ, biasZ, meanZ, varZ): + def pattern_two_input( + x, + filterX, + scaleX, + biasX, + meanX, + varX, + z, + filterZ, + scaleZ, + biasZ, + meanZ, + varZ, + ): bnX = pattern_conv_bn(x, filterX, scaleX, biasX, meanX, varX) bnZ = pattern_conv_bn(x, filterZ, scaleZ, biasZ, meanZ, varZ) - ewadd = ir.PassDesc.OP.elementwise_add(X=bnX.Output("Y"), - Y=bnZ.Output("Y")) + ewadd = ir.PassDesc.OP.elementwise_add( + X=bnX.Output("Y"), Y=bnZ.Output("Y") + ) relu = ir.PassDesc.OP.relu(X=ewadd) return relu - def replace_two_input(x, filterX, scaleX, biasX, meanX, varX, z, filterZ, - scaleZ, biasZ, meanZ, varZ): - resnet_unit = ir.PassDesc.OP.resnet_unit(X=x, - FilterX=filterX, - ScaleX=scaleX, - BiasX=biasX, - MeanX=meanX, - VarX=varX, - Z=z, - FilterZ=filterZ, - ScaleZ=scaleZ, - BiasZ=biasZ, - MeanZ=meanZ, - VarZ=varZ) + def replace_two_input( + x, + filterX, + scaleX, + biasX, + meanX, + varX, + z, + filterZ, + scaleZ, + biasZ, + meanZ, + varZ, + ): + resnet_unit = ir.PassDesc.OP.resnet_unit( + X=x, + FilterX=filterX, + ScaleX=scaleX, + BiasX=biasX, + MeanX=meanX, + VarX=varX, + Z=z, + FilterZ=filterZ, + ScaleZ=scaleZ, + BiasZ=biasZ, + MeanZ=meanZ, + VarZ=varZ, + ) set_resnet_unit_attrs(resnet_unit, True) set_resnet_unit_outputs(resnet_unit, meanX, varX, meanZ, varZ) return resnet_unit.Output("Y") - return (pattern_one_input, replace_one_input), (pattern_two_input, - replace_two_input) + return (pattern_one_input, replace_one_input), ( + pattern_two_input, + replace_two_input, + ) diff --git a/python/paddle/incubate/tensor/math.py b/python/paddle/incubate/tensor/math.py index 005d2cee2ddfcfc7bde9b42499ffa38c24a13a19..36f4944427f37229a3cdb6758c53c185f10654e1 100644 --- a/python/paddle/incubate/tensor/math.py +++ b/python/paddle/incubate/tensor/math.py @@ -21,10 +21,12 @@ import paddle.utils.deprecated as deprecated __all__ = [] -@deprecated(since="2.4.0", - update_to="paddle.geometric.segment_sum", - level=1, - reason="paddle.incubate.segment_sum will be removed in future") +@deprecated( + since="2.4.0", + update_to="paddle.geometric.segment_sum", + level=1, + reason="paddle.incubate.segment_sum will be removed in future", +) def segment_sum(data, segment_ids, name=None): r""" Segment Sum Operator. @@ -59,36 +61,36 @@ def segment_sum(data, segment_ids, name=None): if in_dygraph_mode(): return _C_ops.segment_pool(data, segment_ids, "SUM")[0] if _in_legacy_dygraph(): - out, tmp = _legacy_C_ops.segment_pool(data, segment_ids, 'pooltype', - "SUM") + out, tmp = _legacy_C_ops.segment_pool( + data, segment_ids, 'pooltype', "SUM" + ) return out - check_variable_and_dtype(data, "X", - ("float32", "float64", "int32", "int64"), - "segment_pool") - check_variable_and_dtype(segment_ids, "SegmentIds", ("int32", "int64"), - "segment_pool") + check_variable_and_dtype( + data, "X", ("float32", "float64", "int32", "int64"), "segment_pool" + ) + check_variable_and_dtype( + segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool" + ) helper = LayerHelper("segment_sum", **locals()) out = helper.create_variable_for_type_inference(dtype=data.dtype) summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype) - helper.append_op(type="segment_pool", - inputs={ - "X": data, - "SegmentIds": segment_ids - }, - outputs={ - "Out": out, - "SummedIds": summed_ids - }, - attrs={"pooltype": "SUM"}) + helper.append_op( + type="segment_pool", + inputs={"X": data, "SegmentIds": segment_ids}, + outputs={"Out": out, "SummedIds": summed_ids}, + attrs={"pooltype": "SUM"}, + ) return out -@deprecated(since="2.4.0", - update_to="paddle.geometric.segment_mean", - level=1, - reason="paddle.incubate.segment_mean will be removed in future") +@deprecated( + since="2.4.0", + update_to="paddle.geometric.segment_mean", + level=1, + reason="paddle.incubate.segment_mean will be removed in future", +) def segment_mean(data, segment_ids, name=None): r""" Segment mean Operator. @@ -125,36 +127,36 @@ def segment_mean(data, segment_ids, name=None): if in_dygraph_mode(): return _C_ops.segment_pool(data, segment_ids, "MEAN")[0] if _non_static_mode(): - out, tmp = _legacy_C_ops.segment_pool(data, segment_ids, 'pooltype', - "MEAN") + out, tmp = _legacy_C_ops.segment_pool( + data, segment_ids, 'pooltype', "MEAN" + ) return out - check_variable_and_dtype(data, "X", - ("float32", "float64", "int32", "int64"), - "segment_pool") - check_variable_and_dtype(segment_ids, "SegmentIds", ("int32", "int64"), - "segment_pool") + check_variable_and_dtype( + data, "X", ("float32", "float64", "int32", "int64"), "segment_pool" + ) + check_variable_and_dtype( + segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool" + ) helper = LayerHelper("segment_mean", **locals()) out = helper.create_variable_for_type_inference(dtype=data.dtype) summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype) - helper.append_op(type="segment_pool", - inputs={ - "X": data, - "SegmentIds": segment_ids - }, - outputs={ - "Out": out, - "SummedIds": summed_ids - }, - attrs={"pooltype": "MEAN"}) + helper.append_op( + type="segment_pool", + inputs={"X": data, "SegmentIds": segment_ids}, + outputs={"Out": out, "SummedIds": summed_ids}, + attrs={"pooltype": "MEAN"}, + ) return out -@deprecated(since="2.4.0", - update_to="paddle.geometric.segment_min", - level=1, - reason="paddle.incubate.segment_min will be removed in future") +@deprecated( + since="2.4.0", + update_to="paddle.geometric.segment_min", + level=1, + reason="paddle.incubate.segment_min will be removed in future", +) def segment_min(data, segment_ids, name=None): r""" Segment min operator. @@ -191,36 +193,36 @@ def segment_min(data, segment_ids, name=None): return _C_ops.segment_pool(data, segment_ids, "MIN")[0] if _non_static_mode(): - out, tmp = _legacy_C_ops.segment_pool(data, segment_ids, 'pooltype', - "MIN") + out, tmp = _legacy_C_ops.segment_pool( + data, segment_ids, 'pooltype', "MIN" + ) return out - check_variable_and_dtype(data, "X", - ("float32", "float64", "int32", "int64"), - "segment_pool") - check_variable_and_dtype(segment_ids, "SegmentIds", ("int32", "int64"), - "segment_pool") + check_variable_and_dtype( + data, "X", ("float32", "float64", "int32", "int64"), "segment_pool" + ) + check_variable_and_dtype( + segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool" + ) helper = LayerHelper("segment_min", **locals()) out = helper.create_variable_for_type_inference(dtype=data.dtype) summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype) - helper.append_op(type="segment_pool", - inputs={ - "X": data, - "SegmentIds": segment_ids - }, - outputs={ - "Out": out, - "SummedIds": summed_ids - }, - attrs={"pooltype": "MIN"}) + helper.append_op( + type="segment_pool", + inputs={"X": data, "SegmentIds": segment_ids}, + outputs={"Out": out, "SummedIds": summed_ids}, + attrs={"pooltype": "MIN"}, + ) return out -@deprecated(since="2.4.0", - update_to="paddle.geometric.segment_max", - level=1, - reason="paddle.incubate.segment_max will be removed in future") +@deprecated( + since="2.4.0", + update_to="paddle.geometric.segment_max", + level=1, + reason="paddle.incubate.segment_max will be removed in future", +) def segment_max(data, segment_ids, name=None): r""" Segment max operator. @@ -258,27 +260,25 @@ def segment_max(data, segment_ids, name=None): return out if _non_static_mode(): - out, tmp = _legacy_C_ops.segment_pool(data, segment_ids, 'pooltype', - "MAX") + out, tmp = _legacy_C_ops.segment_pool( + data, segment_ids, 'pooltype', "MAX" + ) return out - check_variable_and_dtype(data, "X", - ("float32", "float64", "int32", "int64"), - "segment_pool") - check_variable_and_dtype(segment_ids, "SegmentIds", ("int32", "int64"), - "segment_pool") + check_variable_and_dtype( + data, "X", ("float32", "float64", "int32", "int64"), "segment_pool" + ) + check_variable_and_dtype( + segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool" + ) helper = LayerHelper("segment_max", **locals()) out = helper.create_variable_for_type_inference(dtype=data.dtype) summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype) - helper.append_op(type="segment_pool", - inputs={ - "X": data, - "SegmentIds": segment_ids - }, - outputs={ - "Out": out, - "SummedIds": summed_ids - }, - attrs={"pooltype": "MAX"}) + helper.append_op( + type="segment_pool", + inputs={"X": data, "SegmentIds": segment_ids}, + outputs={"Out": out, "SummedIds": summed_ids}, + attrs={"pooltype": "MAX"}, + ) return out diff --git a/python/paddle/incubate/xpu/resnet_block.py b/python/paddle/incubate/xpu/resnet_block.py index 1eb3637fa585197f5a839c5ebc1b70228848455c..148ea5a5e919f6e47ea10d6a3cca8c0ddd0dfa08 100644 --- a/python/paddle/incubate/xpu/resnet_block.py +++ b/python/paddle/incubate/xpu/resnet_block.py @@ -24,106 +24,232 @@ from paddle import _legacy_C_ops __all__ = ['resnet_basic_block', 'ResNetBasicBlock'] -def resnet_basic_block(x, - filter1, - scale1, - bias1, - mean1, - var1, - filter2, - scale2, - bias2, - mean2, - var2, - filter3, - scale3, - bias3, - mean3, - var3, - stride1, - stride2, - stride3, - padding1, - padding2, - padding3, - dilation1, - dilation2, - dilation3, - groups, - momentum, - eps, - data_format, - has_shortcut, - use_global_stats=None, - training=False, - trainable_statistics=False, - find_conv_max=True): +def resnet_basic_block( + x, + filter1, + scale1, + bias1, + mean1, + var1, + filter2, + scale2, + bias2, + mean2, + var2, + filter3, + scale3, + bias3, + mean3, + var3, + stride1, + stride2, + stride3, + padding1, + padding2, + padding3, + dilation1, + dilation2, + dilation3, + groups, + momentum, + eps, + data_format, + has_shortcut, + use_global_stats=None, + training=False, + trainable_statistics=False, + find_conv_max=True, +): if fluid.framework._non_static_mode(): - attrs = ('stride1', stride1, 'stride2', stride2, 'stride3', stride3, - 'padding1', padding1, 'padding2', padding2, 'padding3', - padding3, 'dilation1', dilation1, 'dilation2', dilation2, - 'dilation3', dilation3, 'group', groups, 'momentum', momentum, - 'epsilon', eps, 'data_format', data_format, 'has_shortcut', - has_shortcut, 'use_global_stats', use_global_stats, - "trainable_statistics", trainable_statistics, 'is_test', - not training, 'act_type', "relu", 'find_conv_input_max', - find_conv_max) + attrs = ( + 'stride1', + stride1, + 'stride2', + stride2, + 'stride3', + stride3, + 'padding1', + padding1, + 'padding2', + padding2, + 'padding3', + padding3, + 'dilation1', + dilation1, + 'dilation2', + dilation2, + 'dilation3', + dilation3, + 'group', + groups, + 'momentum', + momentum, + 'epsilon', + eps, + 'data_format', + data_format, + 'has_shortcut', + has_shortcut, + 'use_global_stats', + use_global_stats, + "trainable_statistics", + trainable_statistics, + 'is_test', + not training, + 'act_type', + "relu", + 'find_conv_input_max', + find_conv_max, + ) - out, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = \ - _legacy_C_ops.resnet_basic_block(x, filter1, scale1, bias1, mean1, var1, filter2, scale2, bias2, mean2, var2, \ - filter3, scale3, bias3, mean3, var3, mean1, var1, mean2, var2, mean3, var3, *attrs) + ( + out, + _, + _, + _, + _, + _, + _, + _, + _, + _, + _, + _, + _, + _, + _, + _, + _, + _, + _, + _, + _, + _, + _, + ) = _legacy_C_ops.resnet_basic_block( + x, + filter1, + scale1, + bias1, + mean1, + var1, + filter2, + scale2, + bias2, + mean2, + var2, + filter3, + scale3, + bias3, + mean3, + var3, + mean1, + var1, + mean2, + var2, + mean3, + var3, + *attrs + ) return out helper = LayerHelper('resnet_basic_block', **locals()) bn_param_dtype = fluid.core.VarDesc.VarType.FP32 max_dtype = fluid.core.VarDesc.VarType.FP32 - out = helper.create_variable_for_type_inference(dtype=x.dtype, - stop_gradient=True) - conv1 = helper.create_variable_for_type_inference(dtype=x.dtype, - stop_gradient=True) + out = helper.create_variable_for_type_inference( + dtype=x.dtype, stop_gradient=True + ) + conv1 = helper.create_variable_for_type_inference( + dtype=x.dtype, stop_gradient=True + ) saved_mean1 = helper.create_variable_for_type_inference( - dtype=bn_param_dtype, stop_gradient=True) + dtype=bn_param_dtype, stop_gradient=True + ) saved_invstd1 = helper.create_variable_for_type_inference( - dtype=bn_param_dtype, stop_gradient=True) - running_mean1 = helper.create_variable_for_type_inference( - dtype=bn_param_dtype, stop_gradient=True) if mean1 is None else mean1 - running_var1 = helper.create_variable_for_type_inference( - dtype=bn_param_dtype, stop_gradient=True) if var1 is None else var1 - conv2 = helper.create_variable_for_type_inference(dtype=x.dtype, - stop_gradient=True) - conv2_input = helper.create_variable_for_type_inference(dtype=x.dtype, - stop_gradient=True) + dtype=bn_param_dtype, stop_gradient=True + ) + running_mean1 = ( + helper.create_variable_for_type_inference( + dtype=bn_param_dtype, stop_gradient=True + ) + if mean1 is None + else mean1 + ) + running_var1 = ( + helper.create_variable_for_type_inference( + dtype=bn_param_dtype, stop_gradient=True + ) + if var1 is None + else var1 + ) + conv2 = helper.create_variable_for_type_inference( + dtype=x.dtype, stop_gradient=True + ) + conv2_input = helper.create_variable_for_type_inference( + dtype=x.dtype, stop_gradient=True + ) saved_mean2 = helper.create_variable_for_type_inference( - dtype=bn_param_dtype, stop_gradient=True) + dtype=bn_param_dtype, stop_gradient=True + ) saved_invstd2 = helper.create_variable_for_type_inference( - dtype=bn_param_dtype, stop_gradient=True) - running_mean2 = helper.create_variable_for_type_inference( - dtype=bn_param_dtype, stop_gradient=True) if mean2 is None else mean2 - running_var2 = helper.create_variable_for_type_inference( - dtype=bn_param_dtype, stop_gradient=True) if var2 is None else var2 - conv3 = helper.create_variable_for_type_inference(dtype=x.dtype, - stop_gradient=True) + dtype=bn_param_dtype, stop_gradient=True + ) + running_mean2 = ( + helper.create_variable_for_type_inference( + dtype=bn_param_dtype, stop_gradient=True + ) + if mean2 is None + else mean2 + ) + running_var2 = ( + helper.create_variable_for_type_inference( + dtype=bn_param_dtype, stop_gradient=True + ) + if var2 is None + else var2 + ) + conv3 = helper.create_variable_for_type_inference( + dtype=x.dtype, stop_gradient=True + ) saved_mean3 = helper.create_variable_for_type_inference( - dtype=bn_param_dtype, stop_gradient=True) + dtype=bn_param_dtype, stop_gradient=True + ) saved_invstd3 = helper.create_variable_for_type_inference( - dtype=bn_param_dtype, stop_gradient=True) - running_mean3 = helper.create_variable_for_type_inference( - dtype=bn_param_dtype, stop_gradient=True) if mean3 is None else mean3 - running_var3 = helper.create_variable_for_type_inference( - dtype=bn_param_dtype, stop_gradient=True) if var3 is None else var3 + dtype=bn_param_dtype, stop_gradient=True + ) + running_mean3 = ( + helper.create_variable_for_type_inference( + dtype=bn_param_dtype, stop_gradient=True + ) + if mean3 is None + else mean3 + ) + running_var3 = ( + helper.create_variable_for_type_inference( + dtype=bn_param_dtype, stop_gradient=True + ) + if var3 is None + else var3 + ) conv1_input_max = helper.create_variable_for_type_inference( - dtype=max_dtype, stop_gradient=True) + dtype=max_dtype, stop_gradient=True + ) conv1_filter_max = helper.create_variable_for_type_inference( - dtype=max_dtype, stop_gradient=True) + dtype=max_dtype, stop_gradient=True + ) conv2_input_max = helper.create_variable_for_type_inference( - dtype=max_dtype, stop_gradient=True) + dtype=max_dtype, stop_gradient=True + ) conv2_filter_max = helper.create_variable_for_type_inference( - dtype=max_dtype, stop_gradient=True) + dtype=max_dtype, stop_gradient=True + ) conv3_input_max = helper.create_variable_for_type_inference( - dtype=max_dtype, stop_gradient=True) + dtype=max_dtype, stop_gradient=True + ) conv3_filter_max = helper.create_variable_for_type_inference( - dtype=max_dtype, stop_gradient=True) + dtype=max_dtype, stop_gradient=True + ) inputs = { 'X': x, @@ -163,7 +289,7 @@ def resnet_basic_block(x, "trainable_statistics": trainable_statistics, 'is_test': not training, 'act_type': "relu", - 'find_conv_input_max': find_conv_max + 'find_conv_input_max': find_conv_max, } outputs = { @@ -191,10 +317,9 @@ def resnet_basic_block(x, 'MaxInput3': conv3_input_max, 'MaxFilter3': conv3_filter_max, } - helper.append_op(type='resnet_basic_block', - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type='resnet_basic_block', inputs=inputs, outputs=outputs, attrs=attrs + ) return out @@ -303,56 +428,60 @@ class ResNetBasicBlock(Layer): print(out.shape) # [2, 8, 16, 16] """ - def __init__(self, - num_channels1, - num_filter1, - filter1_size, - num_channels2, - num_filter2, - filter2_size, - num_channels3, - num_filter3, - filter3_size, - stride1=1, - stride2=1, - stride3=1, - act='relu', - momentum=0.9, - eps=1e-5, - data_format='NCHW', - has_shortcut=False, - use_global_stats=False, - is_test=False, - filter1_attr=None, - scale1_attr=None, - bias1_attr=None, - moving_mean1_name=None, - moving_var1_name=None, - filter2_attr=None, - scale2_attr=None, - bias2_attr=None, - moving_mean2_name=None, - moving_var2_name=None, - filter3_attr=None, - scale3_attr=None, - bias3_attr=None, - moving_mean3_name=None, - moving_var3_name=None, - padding1=0, - padding2=0, - padding3=0, - dilation1=1, - dilation2=1, - dilation3=1, - trainable_statistics=False, - find_conv_max=True): + def __init__( + self, + num_channels1, + num_filter1, + filter1_size, + num_channels2, + num_filter2, + filter2_size, + num_channels3, + num_filter3, + filter3_size, + stride1=1, + stride2=1, + stride3=1, + act='relu', + momentum=0.9, + eps=1e-5, + data_format='NCHW', + has_shortcut=False, + use_global_stats=False, + is_test=False, + filter1_attr=None, + scale1_attr=None, + bias1_attr=None, + moving_mean1_name=None, + moving_var1_name=None, + filter2_attr=None, + scale2_attr=None, + bias2_attr=None, + moving_mean2_name=None, + moving_var2_name=None, + filter3_attr=None, + scale3_attr=None, + bias3_attr=None, + moving_mean3_name=None, + moving_var3_name=None, + padding1=0, + padding2=0, + padding3=0, + dilation1=1, + dilation2=1, + dilation3=1, + trainable_statistics=False, + find_conv_max=True, + ): super(ResNetBasicBlock, self).__init__() self._stride1 = stride1 self._stride2 = stride2 - self._kernel1_size = utils.convert_to_list(filter1_size, 2, - 'filter1_size') - self._kernel2_size = utils.convert_to_list(filter2_size, 2, - 'filter2_size') + self._kernel1_size = utils.convert_to_list( + filter1_size, 2, 'filter1_size' + ) + self._kernel2_size = utils.convert_to_list( + filter2_size, 2, 'filter2_size' + ) self._dilation1 = dilation1 self._dilation2 = dilation2 self._padding1 = padding1 @@ -369,8 +498,9 @@ class ResNetBasicBlock(Layer): self._find_conv_max = find_conv_max if has_shortcut: - self._kernel3_size = utils.convert_to_list(filter3_size, 2, - 'filter3_size') + self._kernel3_size = utils.convert_to_list( + filter3_size, 2, 'filter3_size' + ) self._padding3 = padding3 self._stride3 = stride3 self._dilation3 = dilation3 @@ -385,11 +515,13 @@ class ResNetBasicBlock(Layer): if data_format not in valid_format: raise ValueError( "conv_format must be one of {}, but got conv_format={}".format( - valid_format, data_format)) + valid_format, data_format + ) + ) def _get_default_param_initializer(channels, kernel_size): filter_elem_num = np.prod(kernel_size) * channels - std = (2.0 / filter_elem_num)**0.5 + std = (2.0 / filter_elem_num) ** 0.5 return I.Normal(0.0, std) # init filter @@ -403,92 +535,128 @@ class ResNetBasicBlock(Layer): shape=filter1_shape, attr=filter1_attr, default_initializer=_get_default_param_initializer( - num_channels1, self._kernel1_size)) + num_channels1, self._kernel1_size + ), + ) self.scale_1 = self.create_parameter( shape=bn1_param_shape, attr=scale1_attr, dtype=bn_param_dtype, - default_initializer=I.Constant(1.0)) - self.bias_1 = self.create_parameter(shape=bn1_param_shape, - attr=bias1_attr, - dtype=bn_param_dtype, - is_bias=True) - self.mean_1 = self.create_parameter(attr=ParamAttr( - name=moving_mean1_name, - initializer=I.Constant(0.0), - trainable=False), - shape=bn1_param_shape, - dtype=bn_param_dtype) + default_initializer=I.Constant(1.0), + ) + self.bias_1 = self.create_parameter( + shape=bn1_param_shape, + attr=bias1_attr, + dtype=bn_param_dtype, + is_bias=True, + ) + self.mean_1 = self.create_parameter( + attr=ParamAttr( + name=moving_mean1_name, + initializer=I.Constant(0.0), + trainable=False, + ), + shape=bn1_param_shape, + dtype=bn_param_dtype, + ) self.mean_1.stop_gradient = True self.var_1 = self.create_parameter( - attr=ParamAttr(name=moving_var1_name, - initializer=I.Constant(1.0), - trainable=False), + attr=ParamAttr( + name=moving_var1_name, + initializer=I.Constant(1.0), + trainable=False, + ), shape=bn1_param_shape, - dtype=bn_param_dtype) + dtype=bn_param_dtype, + ) self.var_1.stop_gradient = True self.filter_2 = self.create_parameter( shape=filter2_shape, attr=filter2_attr, default_initializer=_get_default_param_initializer( - num_channels2, self._kernel2_size)) + num_channels2, self._kernel2_size + ), + ) self.scale_2 = self.create_parameter( shape=bn2_param_shape, attr=scale2_attr, dtype=bn_param_dtype, - default_initializer=I.Constant(1.0)) - self.bias_2 = self.create_parameter(shape=bn2_param_shape, - attr=bias2_attr, - dtype=bn_param_dtype, - is_bias=True) - self.mean_2 = self.create_parameter(attr=ParamAttr( - name=moving_mean2_name, - initializer=I.Constant(0.0), - trainable=False), - shape=bn2_param_shape, - dtype=bn_param_dtype) + default_initializer=I.Constant(1.0), + ) + self.bias_2 = self.create_parameter( + shape=bn2_param_shape, + attr=bias2_attr, + dtype=bn_param_dtype, + is_bias=True, + ) + self.mean_2 = self.create_parameter( + attr=ParamAttr( + name=moving_mean2_name, + initializer=I.Constant(0.0), + trainable=False, + ), + shape=bn2_param_shape, + dtype=bn_param_dtype, + ) self.mean_2.stop_gradient = True self.var_2 = self.create_parameter( - attr=ParamAttr(name=moving_var2_name, - initializer=I.Constant(1.0), - trainable=False), + attr=ParamAttr( + name=moving_var2_name, + initializer=I.Constant(1.0), + trainable=False, + ), shape=bn2_param_shape, - dtype=bn_param_dtype) + dtype=bn_param_dtype, + ) self.var_2.stop_gradient = True if has_shortcut: bn3_param_shape = [1, 1, num_filter3] filter3_shape = [ - num_filter3, num_channels3, filter3_size, filter3_size + num_filter3, + num_channels3, + filter3_size, + filter3_size, ] self.filter_3 = self.create_parameter( shape=filter3_shape, attr=filter3_attr, default_initializer=_get_default_param_initializer( - num_channels3, self._kernel3_size)) + num_channels3, self._kernel3_size + ), + ) self.scale_3 = self.create_parameter( shape=bn3_param_shape, attr=scale3_attr, dtype=bn_param_dtype, - default_initializer=I.Constant(1.0)) - self.bias_3 = self.create_parameter(shape=bn3_param_shape, - attr=bias3_attr, - dtype=bn_param_dtype, - is_bias=True) - self.mean_3 = self.create_parameter(attr=ParamAttr( - name=moving_mean3_name, - initializer=I.Constant(0.0), - trainable=False), - shape=bn3_param_shape, - dtype=bn_param_dtype) + default_initializer=I.Constant(1.0), + ) + self.bias_3 = self.create_parameter( + shape=bn3_param_shape, + attr=bias3_attr, + dtype=bn_param_dtype, + is_bias=True, + ) + self.mean_3 = self.create_parameter( + attr=ParamAttr( + name=moving_mean3_name, + initializer=I.Constant(0.0), + trainable=False, + ), + shape=bn3_param_shape, + dtype=bn_param_dtype, + ) self.mean_3.stop_gradient = True - self.var_3 = self.create_parameter(attr=ParamAttr( - name=moving_var3_name, - initializer=I.Constant(1.0), - trainable=False), - shape=bn3_param_shape, - dtype=bn_param_dtype) + self.var_3 = self.create_parameter( + attr=ParamAttr( + name=moving_var3_name, + initializer=I.Constant(1.0), + trainable=False, + ), + shape=bn3_param_shape, + dtype=bn_param_dtype, + ) self.var_3.stop_gradient = True else: self.filter_3 = None @@ -532,5 +700,6 @@ class ResNetBasicBlock(Layer): use_global_stats=self._use_global_stats, training=self.training, trainable_statistics=self._trainable_statistics, - find_conv_max=self._find_conv_max) + find_conv_max=self._find_conv_max, + ) return out diff --git a/python/paddle/inference/__init__.py b/python/paddle/inference/__init__.py index 1b9a96f437bbd2f9fae49fe28c3c4d40bd48cff8..359f5caccb0fa0021b3980f1400ed9c3dc1c439b 100644 --- a/python/paddle/inference/__init__.py +++ b/python/paddle/inference/__init__.py @@ -28,8 +28,18 @@ from ..fluid.inference import get_num_bytes_of_data_type # noqa: F401 from ..fluid.inference import PredictorPool # noqa: F401 __all__ = [ # noqa - 'Config', 'DataType', 'PlaceType', 'PrecisionType', 'Tensor', 'Predictor', - 'create_predictor', 'get_version', '_get_phi_kernel_name', - 'get_trt_compile_version', 'convert_to_mixed_precision', - 'get_trt_runtime_version', 'get_num_bytes_of_data_type', 'PredictorPool' + 'Config', + 'DataType', + 'PlaceType', + 'PrecisionType', + 'Tensor', + 'Predictor', + 'create_predictor', + 'get_version', + '_get_phi_kernel_name', + 'get_trt_compile_version', + 'convert_to_mixed_precision', + 'get_trt_runtime_version', + 'get_num_bytes_of_data_type', + 'PredictorPool', ] diff --git a/python/paddle/io/__init__.py b/python/paddle/io/__init__.py index 87acda904b5dae6251d72cd0f371286c0b94d1df..a9c0e9a2f2d2f89041648322b4512b90651d50e1 100755 --- a/python/paddle/io/__init__.py +++ b/python/paddle/io/__init__.py @@ -30,9 +30,20 @@ from ..fluid.dataloader import WeightedRandomSampler # noqa: F401 from ..fluid.dataloader import Subset # noqa: F401 from ..fluid.dataloader import random_split # noqa: F401 -__all__ = [ #noqa - 'Dataset', 'IterableDataset', 'TensorDataset', 'ComposeDataset', - 'ChainDataset', 'BatchSampler', 'DistributedBatchSampler', 'DataLoader', - 'get_worker_info', 'Sampler', 'SequenceSampler', 'RandomSampler', - 'WeightedRandomSampler', 'random_split', 'Subset' +__all__ = [ # noqa + 'Dataset', + 'IterableDataset', + 'TensorDataset', + 'ComposeDataset', + 'ChainDataset', + 'BatchSampler', + 'DistributedBatchSampler', + 'DataLoader', + 'get_worker_info', + 'Sampler', + 'SequenceSampler', + 'RandomSampler', + 'WeightedRandomSampler', + 'random_split', + 'Subset', ] diff --git a/python/paddle/jit/__init__.py b/python/paddle/jit/__init__.py index 857a5d8695a7569234ab3e950f5bce3500bed043..a706d4d1211029a60f34dbb24bc03a0418e9030a 100644 --- a/python/paddle/jit/__init__.py +++ b/python/paddle/jit/__init__.py @@ -26,6 +26,13 @@ from ..fluid.dygraph.io import TranslatedLayer # noqa: F401 from . import dy2static # noqa: F401 __all__ = [ # noqa - 'save', 'load', 'TracedLayer', 'to_static', 'ProgramTranslator', - 'TranslatedLayer', 'set_code_level', 'set_verbosity', 'not_to_static' + 'save', + 'load', + 'TracedLayer', + 'to_static', + 'ProgramTranslator', + 'TranslatedLayer', + 'set_code_level', + 'set_verbosity', + 'not_to_static', ] diff --git a/python/paddle/jit/dy2static/convert_call_func.py b/python/paddle/jit/dy2static/convert_call_func.py index 247efa8f4f15bc8bb66bde918f452aa7d9acd021..17e33f53bca727b5e9120450f4d91546cc93cff8 100644 --- a/python/paddle/jit/dy2static/convert_call_func.py +++ b/python/paddle/jit/dy2static/convert_call_func.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ...fluid.dygraph.dygraph_to_static.convert_call_func import convert_call # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.convert_call_func import ( # noqa: F401 + convert_call, +) __all__ = [] diff --git a/python/paddle/jit/dy2static/convert_operators.py b/python/paddle/jit/dy2static/convert_operators.py index 3a941a1dff31904680f404d627797dd3b419575f..54210302c41787b5f47fb5c569ba0ca63104d3c7 100644 --- a/python/paddle/jit/dy2static/convert_operators.py +++ b/python/paddle/jit/dy2static/convert_operators.py @@ -12,19 +12,23 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ...fluid.dygraph.dygraph_to_static.convert_operators import cast_bool_if_necessary # noqa: F401 -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_assert # noqa: F401 -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_ifelse # noqa: F401 -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_len # noqa: F401 -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_logical_and # noqa: F401 -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_logical_not # noqa: F401 -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_logical_or # noqa: F401 -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_pop # noqa: F401 -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_print # noqa: F401 -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_shape_compare # noqa: F401 -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_var_dtype # noqa: F401 -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_shape # noqa: F401 -from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_while_loop # noqa: F401 -from ...fluid.dygraph.dygraph_to_static.convert_operators import unpack_by_structure, indexable, convert_attr # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.convert_operators import ( # noqa: F401 + cast_bool_if_necessary, + convert_assert, + convert_ifelse, + convert_len, + convert_logical_and, + convert_logical_not, + convert_logical_or, + convert_pop, + convert_print, + convert_shape_compare, + convert_var_dtype, + convert_shape, + convert_while_loop, + unpack_by_structure, + indexable, + convert_attr, +) __all__ = [] diff --git a/python/paddle/jit/dy2static/variable_trans_func.py b/python/paddle/jit/dy2static/variable_trans_func.py index 582e677a503262031ce4cb28fa5c4e80b99f4e27..88f8cd3c2c464f7356f6f50c9ab554a49b810ba5 100644 --- a/python/paddle/jit/dy2static/variable_trans_func.py +++ b/python/paddle/jit/dy2static/variable_trans_func.py @@ -12,7 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ...fluid.dygraph.dygraph_to_static.variable_trans_func import create_bool_as_type # noqa: F401 -from ...fluid.dygraph.dygraph_to_static.variable_trans_func import to_static_variable # noqa: F401 +from ...fluid.dygraph.dygraph_to_static.variable_trans_func import ( # noqa: F401 + create_bool_as_type, + to_static_variable, +) __all__ = [] diff --git a/python/paddle/jit/layer.py b/python/paddle/jit/layer.py index 97b598948500b1772656bfc4bd9154caa1e6078e..25d9ca5dbccebe7486e1759905aa48021a646a80 100644 --- a/python/paddle/jit/layer.py +++ b/python/paddle/jit/layer.py @@ -18,7 +18,6 @@ from paddle.fluid.core import Load class Layer(object): - def __init__(self): self.cpp_layer = None # {name: Function} @@ -34,8 +33,7 @@ class Layer(object): setattr(self, name, self.functions[name]) -class Function(): - +class Function: def __init__(self, function, info): self.function = function self.info = FunctionInfo(info) @@ -44,8 +42,7 @@ class Function(): return core.eager.jit_function_call(self.function, args) -class FunctionInfo(): - +class FunctionInfo: def __init__(self, info): self.info = info diff --git a/python/paddle/linalg.py b/python/paddle/linalg.py index 834b631e5c51981cbf20cec977a135ba78b9d426..9ea43479569b404037534c14e9d66766998ec5b3 100644 --- a/python/paddle/linalg.py +++ b/python/paddle/linalg.py @@ -38,7 +38,7 @@ from .tensor.linalg import triangular_solve # noqa: F401 from .tensor.linalg import lstsq __all__ = [ - 'cholesky', #noqa + 'cholesky', # noqa 'norm', 'cond', 'cov', @@ -61,5 +61,5 @@ __all__ = [ 'solve', 'cholesky_solve', 'triangular_solve', - 'lstsq' + 'lstsq', ] diff --git a/python/paddle/metric/__init__.py b/python/paddle/metric/__init__.py index 70fe075e57744bff725c8ff24b1ff6f4ee6f89fe..60dff58ec48bfe3877caf3e731a11a65abcf4893 100644 --- a/python/paddle/metric/__init__.py +++ b/python/paddle/metric/__init__.py @@ -19,6 +19,11 @@ from .metrics import Recall # noqa: F401 from .metrics import Auc # noqa: F401 from .metrics import accuracy # noqa: F401 -__all__ = [ #noqa - 'Metric', 'Accuracy', 'Precision', 'Recall', 'Auc', 'accuracy' +__all__ = [ # noqa + 'Metric', + 'Accuracy', + 'Precision', + 'Recall', + 'Auc', + 'accuracy', ] diff --git a/python/paddle/metric/metrics.py b/python/paddle/metric/metrics.py index 1404c7531b4ee7ef38a379fe386fc0e0a12780fa..e8493b17b9256e9aad404cbe12b223e94a510386 100644 --- a/python/paddle/metric/metrics.py +++ b/python/paddle/metric/metrics.py @@ -116,7 +116,9 @@ class Metric(metaclass=abc.ABCMeta): """ raise NotImplementedError( "function 'reset' not implemented in {}.".format( - self.__class__.__name__)) + self.__class__.__name__ + ) + ) @abc.abstractmethod def update(self, *args): @@ -132,7 +134,9 @@ class Metric(metaclass=abc.ABCMeta): """ raise NotImplementedError( "function 'update' not implemented in {}.".format( - self.__class__.__name__)) + self.__class__.__name__ + ) + ) @abc.abstractmethod def accumulate(self): @@ -141,7 +145,9 @@ class Metric(metaclass=abc.ABCMeta): """ raise NotImplementedError( "function 'accumulate' not implemented in {}.".format( - self.__class__.__name__)) + self.__class__.__name__ + ) + ) @abc.abstractmethod def name(self): @@ -150,7 +156,9 @@ class Metric(metaclass=abc.ABCMeta): """ raise NotImplementedError( "function 'name' not implemented in {}.".format( - self.__class__.__name__)) + self.__class__.__name__ + ) + ) def compute(self, *args): """ @@ -231,7 +239,7 @@ class Accuracy(Metric): """ - def __init__(self, topk=(1, ), name=None, *args, **kwargs): + def __init__(self, topk=(1,), name=None, *args, **kwargs): super(Accuracy, self).__init__(*args, **kwargs) self.topk = topk self.maxk = max(topk) @@ -253,12 +261,12 @@ class Accuracy(Metric): Tensor: Correct mask, a tensor with shape [batch_size, d0, ..., topk]. """ pred = paddle.argsort(pred, descending=True) - pred = paddle.slice(pred, - axes=[len(pred.shape) - 1], - starts=[0], - ends=[self.maxk]) - if (len(label.shape) == 1) or \ - (len(label.shape) == 2 and label.shape[-1] == 1): + pred = paddle.slice( + pred, axes=[len(pred.shape) - 1], starts=[0], ends=[self.maxk] + ) + if (len(label.shape) == 1) or ( + len(label.shape) == 2 and label.shape[-1] == 1 + ): # In static mode, the real label data shape may be different # from shape defined by paddle.static.InputSpec in model # building, reshape to the right shape. @@ -297,7 +305,7 @@ class Accuracy(Metric): """ Resets all of the metric state. """ - self.total = [0.] * len(self.topk) + self.total = [0.0] * len(self.topk) self.count = [0] * len(self.topk) def accumulate(self): @@ -306,7 +314,7 @@ class Accuracy(Metric): """ res = [] for t, c in zip(self.total, self.count): - r = float(t) / c if c > 0 else 0. + r = float(t) / c if c > 0 else 0.0 res.append(r) res = res[0] if len(self.topk) == 1 else res return res @@ -446,7 +454,7 @@ class Precision(Metric): A scaler float: results of the calculated precision. """ ap = self.tp + self.fp - return float(self.tp) / ap if ap != 0 else .0 + return float(self.tp) / ap if ap != 0 else 0.0 def name(self): """ @@ -572,7 +580,7 @@ class Recall(Metric): A scaler float: results of the calculated Recall. """ recall = self.tp + self.fn - return float(self.tp) / recall if recall != 0 else .0 + return float(self.tp) / recall if recall != 0 else 0.0 def reset(self): """ @@ -670,12 +678,9 @@ class Auc(Metric): model.fit(data, batch_size=16) """ - def __init__(self, - curve='ROC', - num_thresholds=4095, - name='auc', - *args, - **kwargs): + def __init__( + self, curve='ROC', num_thresholds=4095, name='auc', *args, **kwargs + ): super(Auc, self).__init__(*args, **kwargs) self._curve = curve self._num_thresholds = num_thresholds @@ -737,11 +742,14 @@ class Auc(Metric): tot_neg_prev = tot_neg tot_pos += self._stat_pos[idx] tot_neg += self._stat_neg[idx] - auc += self.trapezoid_area(tot_neg, tot_neg_prev, tot_pos, - tot_pos_prev) + auc += self.trapezoid_area( + tot_neg, tot_neg_prev, tot_pos, tot_pos_prev + ) idx -= 1 - return auc / tot_pos / tot_neg if tot_pos > 0.0 and tot_neg > 0.0 else 0.0 + return ( + auc / tot_pos / tot_neg if tot_pos > 0.0 and tot_neg > 0.0 else 0.0 + ) def reset(self): """ @@ -799,29 +807,29 @@ def accuracy(input, label, k=1, correct=None, total=None, name=None): total = _varbase_creator(dtype="int32") topk_out, topk_indices = paddle.topk(input, k=k) - _acc, _, _ = _legacy_C_ops.accuracy(topk_out, topk_indices, label, - correct, total) + _acc, _, _ = _legacy_C_ops.accuracy( + topk_out, topk_indices, label, correct, total + ) return _acc helper = LayerHelper("accuracy", **locals()) - check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'], - 'accuracy') + check_variable_and_dtype( + input, 'input', ['float16', 'float32', 'float64'], 'accuracy' + ) topk_out, topk_indices = paddle.topk(input, k=k) acc_out = helper.create_variable_for_type_inference(dtype="float32") if correct is None: correct = helper.create_variable_for_type_inference(dtype="int32") if total is None: total = helper.create_variable_for_type_inference(dtype="int32") - helper.append_op(type="accuracy", - inputs={ - "Out": [topk_out], - "Indices": [topk_indices], - "Label": [label] - }, - outputs={ - "Accuracy": [acc_out], - "Correct": [correct], - "Total": [total], - }) + helper.append_op( + type="accuracy", + inputs={"Out": [topk_out], "Indices": [topk_indices], "Label": [label]}, + outputs={ + "Accuracy": [acc_out], + "Correct": [correct], + "Total": [total], + }, + ) return acc_out diff --git a/python/paddle/nn/__init__.py b/python/paddle/nn/__init__.py index 331131d6e23198eda86b7a882432f5ac17f4e400..8e02cdffd5e05ae1c344dd950ebd16c589b32d36 100644 --- a/python/paddle/nn/__init__.py +++ b/python/paddle/nn/__init__.py @@ -163,35 +163,41 @@ from . import quant # noqa: F401 import paddle.utils.deprecated as deprecated -@deprecated(since="2.0.0", - update_to="paddle.nn.funcitional.diag_embed", - level=1, - reason="diag_embed in paddle.nn will be removed in future") +@deprecated( + since="2.0.0", + update_to="paddle.nn.funcitional.diag_embed", + level=1, + reason="diag_embed in paddle.nn will be removed in future", +) def diag_embed(*args): ''' - alias name of paddle.nn.functional.diag_embed + alias name of paddle.nn.functional.diag_embed ''' return functional.diag_embed(*args) -@deprecated(since="2.0.0", - update_to="paddle.nn.utils.remove_weight_norm", - level=1, - reason="remove_weight_norm in paddle.nn will be removed in future") +@deprecated( + since="2.0.0", + update_to="paddle.nn.utils.remove_weight_norm", + level=1, + reason="remove_weight_norm in paddle.nn will be removed in future", +) def remove_weight_norm(*args): ''' - alias name of paddle.nn.utils.remove_weight_norm + alias name of paddle.nn.utils.remove_weight_norm ''' return utils.remove_weight_norm(*args) -@deprecated(since="2.0.0", - update_to="paddle.nn.utils.weight_norm", - level=1, - reason="weight_norm in paddle.nn will be removed in future") +@deprecated( + since="2.0.0", + update_to="paddle.nn.utils.weight_norm", + level=1, + reason="weight_norm in paddle.nn will be removed in future", +) def weight_norm(*args): ''' - alias name of paddle.nn.utils.weight_norm + alias name of paddle.nn.utils.weight_norm ''' return utils.weight_norm(*args) diff --git a/python/paddle/nn/functional/activation.py b/python/paddle/nn/functional/activation.py index 929fd2437156158f9f1348772f5f9127691365d6..254ab7ff8a005e87541066937e8469ff79b56c7a 100644 --- a/python/paddle/nn/functional/activation.py +++ b/python/paddle/nn/functional/activation.py @@ -68,10 +68,12 @@ def celu(x, alpha=1.0, name=None): check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'celu') helper = LayerHelper("celu", **locals()) out = helper.create_variable_for_type_inference(x.dtype) - helper.append_op(type='celu', - inputs={'X': x}, - outputs={'Out': out}, - attrs={'alpha': alpha}) + helper.append_op( + type='celu', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'alpha': alpha}, + ) return out @@ -118,10 +120,12 @@ def elu(x, alpha=1.0, name=None): check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'elu') helper = LayerHelper("elu", **locals()) out = helper.create_variable_for_type_inference(x.dtype) - helper.append_op(type='elu', - inputs={'X': x}, - outputs={'Out': out}, - attrs={'alpha': alpha}) + helper.append_op( + type='elu', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'alpha': alpha}, + ) return out @@ -131,7 +135,7 @@ def elu_(x, alpha=1.0, name=None): Inplace version of ``elu`` API, the output Tensor will be inplaced with input ``x``. Please refer to :ref:`api_nn_cn_elu`. """ - assert alpha >= 0., "elu_ only support alpha >= 0, please use elu instead." + assert alpha >= 0.0, "elu_ only support alpha >= 0, please use elu instead." if in_dygraph_mode(): return _C_ops.elu_(x, alpha) return _legacy_C_ops.elu_(x, 'alpha', alpha) @@ -187,10 +191,12 @@ def gelu(x, approximate=False, name=None): check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'gelu') helper = LayerHelper("gelu", **locals()) out = helper.create_variable_for_type_inference(x.dtype) - helper.append_op(type='gelu', - inputs={'X': x}, - outputs={'Out': out}, - attrs={'approximate': approximate}) + helper.append_op( + type='gelu', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'approximate': approximate}, + ) return out @@ -233,14 +239,17 @@ def hardshrink(x, threshold=0.5, name=None): if _in_legacy_dygraph(): return _legacy_C_ops.hard_shrink(x, 'threshold', threshold) - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'hardshrink') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], 'hardshrink' + ) helper = LayerHelper('hardshrink', **locals()) out = helper.create_variable_for_type_inference(x.dtype) - helper.append_op(type='hard_shrink', - inputs={'X': x}, - outputs={'Out': out}, - attrs={'threshold': threshold}) + helper.append_op( + type='hard_shrink', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'threshold': threshold}, + ) return out @@ -284,18 +293,18 @@ def hardtanh(x, min=-1.0, max=1.0, name=None): if _in_legacy_dygraph(): return _legacy_C_ops.brelu(x, 't_min', min, 't_max', max) - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'hardtanh') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], 'hardtanh' + ) helper = LayerHelper('hardtanh', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='brelu', - inputs={'X': x}, - outputs={'Out': out}, - attrs={ - 't_min': min, - 't_max': max - }) + helper.append_op( + type='brelu', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'t_min': min, 't_max': max}, + ) return out @@ -341,18 +350,18 @@ def hardsigmoid(x, slope=0.1666667, offset=0.5, name=None): if _in_legacy_dygraph(): return _legacy_C_ops.hard_sigmoid(x, 'slope', slope, 'offset', offset) - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'hardsigmoid') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], 'hardsigmoid' + ) helper = LayerHelper('hardsigmoid', **locals()) out = helper.create_variable_for_type_inference(x.dtype) - helper.append_op(type='hard_sigmoid', - inputs={'X': x}, - outputs={'Out': out}, - attrs={ - 'slope': slope, - 'offset': offset - }) + helper.append_op( + type='hard_sigmoid', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'slope': slope, 'offset': offset}, + ) return out @@ -395,8 +404,9 @@ def hardswish(x, name=None): if in_dygraph_mode(): return _C_ops.hardswish(x, 6, 6, 3) - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'hardswish') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], 'hardswish' + ) helper = LayerHelper('hardswish', **locals()) out = helper.create_variable_for_type_inference(x.dtype) @@ -444,14 +454,17 @@ def leaky_relu(x, negative_slope=0.01, name=None): if _in_legacy_dygraph(): return _legacy_C_ops.leaky_relu(x, 'alpha', negative_slope) - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'leaky_relu') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], 'leaky_relu' + ) helper = LayerHelper('leaky_relu', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='leaky_relu', - inputs={'X': x}, - outputs={'Out': out}, - attrs={'alpha': negative_slope}) + helper.append_op( + type='leaky_relu', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'alpha': negative_slope}, + ) return out @@ -498,60 +511,68 @@ def prelu(x, weight, data_format="NCHW", name=None): # [ 6. , 7. , 8. , 9. ]]]] """ check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'prelu') - check_variable_and_dtype(weight, 'weight', - ['float16', 'float32', 'float64'], 'prelu') + check_variable_and_dtype( + weight, 'weight', ['float16', 'float32', 'float64'], 'prelu' + ) - assert len(weight.shape - ) == 1, "The dim count of weight shape should be 1 in prelu()." + assert ( + len(weight.shape) == 1 + ), "The dim count of weight shape should be 1 in prelu()." mode = 'all' if weight.shape[0] > 1: true_data_format = [ - 'NC', 'NCL', 'NCHW', 'NCDHW', 'NLC', 'NHWC', 'NDHWC' + 'NC', + 'NCL', + 'NCHW', + 'NCDHW', + 'NLC', + 'NHWC', + 'NDHWC', ] if data_format not in true_data_format: raise ValueError( "data_format must be one of 'NC', 'NCL', 'NCHW', 'NCDHW', " - "'NLC', 'NHWC', 'NDHWC' but receive {}".format(data_format)) + "'NLC', 'NHWC', 'NDHWC' but receive {}".format(data_format) + ) data_format = 'NCHW' if data_format[1] == 'C' else 'NHWC' - assert len( - x.shape - ) > 1, "The dim count of x should be equal or larger than 2 in prelu() when weight shape is not [1]." + assert ( + len(x.shape) > 1 + ), "The dim count of x should be equal or larger than 2 in prelu() when weight shape is not [1]." - #NOTE(GuoxiaWang): support NHWC data format + # NOTE(GuoxiaWang): support NHWC data format if data_format == 'NHWC': - assert weight.shape[0] == x.shape[ - -1], "The weight size should be equal to x input channel in prelu() when weight shape is not [1]." + assert ( + weight.shape[0] == x.shape[-1] + ), "The weight size should be equal to x input channel in prelu() when weight shape is not [1]." else: - assert weight.shape[0] == x.shape[ - 1], "The weight size should be equal to x input channel in prelu() when weight shape is not [1]." + assert ( + weight.shape[0] == x.shape[1] + ), "The weight size should be equal to x input channel in prelu() when weight shape is not [1]." mode = 'channel' if in_dygraph_mode(): return _C_ops.prelu(x, weight, data_format, mode) if _in_legacy_dygraph(): - return _legacy_C_ops.prelu(x, weight, 'mode', mode, 'data_format', - data_format) + return _legacy_C_ops.prelu( + x, weight, 'mode', mode, 'data_format', data_format + ) helper = LayerHelper('prelu', **locals()) out = helper.create_variable_for_type_inference(x.dtype) - helper.append_op(type="prelu", - inputs={ - "X": x, - "Alpha": weight - }, - outputs={"Out": out}, - attrs={ - "mode": mode, - "data_format": data_format - }) + helper.append_op( + type="prelu", + inputs={"X": x, "Alpha": weight}, + outputs={"Out": out}, + attrs={"mode": mode, "data_format": data_format}, + ) return out -def rrelu(x, lower=1. / 8., upper=1. / 3., training=True, name=None): +def rrelu(x, lower=1.0 / 8.0, upper=1.0 / 3.0, training=True, name=None): r""" rrelu activation. @@ -623,47 +644,56 @@ def rrelu(x, lower=1. / 8., upper=1. / 3., training=True, name=None): """ if not in_dynamic_mode(): - check_variable_and_dtype(x, 'X', ['float16', 'float32', 'float64'], - 'rrelu') + check_variable_and_dtype( + x, 'X', ['float16', 'float32', 'float64'], 'rrelu' + ) if not isinstance(lower, float) or not isinstance(upper, float): raise TypeError( - "The lower and upper values must be float type. Received: lower {}, upper {}." - .format(lower, upper)) + "The lower and upper values must be float type. Received: lower {}, upper {}.".format( + lower, upper + ) + ) if lower < 0 or lower > 1: raise ValueError( - "The lower value must be no less than zero or greater than one. Received: {}." - .format(lower)) + "The lower value must be no less than zero or greater than one. Received: {}.".format( + lower + ) + ) if upper < lower: raise ValueError( - "The upper value must be greater than lower value. Received: lower {}, upper {}." - .format(lower, upper)) + "The upper value must be greater than lower value. Received: lower {}, upper {}.".format( + lower, upper + ) + ) if upper > 1: raise ValueError( "The upper value must be no greater than one. Received: {}.".format( - upper)) + upper + ) + ) is_test = not training if _in_legacy_dygraph(): - out, noise = _legacy_C_ops.rrelu(x, 'lower', lower, 'upper', upper, - 'is_test', is_test) + out, noise = _legacy_C_ops.rrelu( + x, 'lower', lower, 'upper', upper, 'is_test', is_test + ) return out helper = LayerHelper('rrelu', **locals()) out = helper.create_variable_for_type_inference(x.dtype) noise = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = {'lower': lower, 'upper': upper, 'is_test': is_test} - helper.append_op(type='rrelu', - inputs={"X": x}, - outputs={ - "Out": out, - "Noise": noise - }, - attrs=attrs) + helper.append_op( + type='rrelu', + inputs={"X": x}, + outputs={"Out": out, "Noise": noise}, + attrs=attrs, + ) return out @@ -748,8 +778,9 @@ def log_sigmoid(x, name=None): if _in_legacy_dygraph(): return _legacy_C_ops.logsigmoid(x) - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'log_sigmoid') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], 'log_sigmoid' + ) helper = LayerHelper("log_sigmoid", **locals()) out = helper.create_variable_for_type_inference(x.dtype) helper.append_op(type='logsigmoid', inputs={'X': x}, outputs={'Out': out}) @@ -818,19 +849,19 @@ def maxout(x, groups, axis=1, name=None): if axis not in [1, -1, 3]: raise ValueError( "Attr(axis) should be 1 when data format is NCHW, -1 or 3 when data format is NHWC. Received " - "Attr(axis): %s." % str(axis)) + "Attr(axis): %s." % str(axis) + ) if axis == -1: axis = 3 helper = LayerHelper('maxout', **locals()) out = helper.create_variable_for_type_inference(x.dtype) - helper.append_op(type='maxout', - inputs={'X': x}, - outputs={'Out': out}, - attrs={ - 'groups': groups, - 'axis': axis - }) + helper.append_op( + type='maxout', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'groups': groups, 'axis': axis}, + ) return out @@ -869,17 +900,21 @@ def relu6(x, name=None): check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu6') helper = LayerHelper('relu6', **locals()) out = helper.create_variable_for_type_inference(x.dtype) - helper.append_op(type='relu6', - inputs={'X': x}, - outputs={'Out': out}, - attrs={'threshold': threshold}) + helper.append_op( + type='relu6', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'threshold': threshold}, + ) return out -def selu(x, - scale=1.0507009873554804934193349852946, - alpha=1.6732632423543772848170429916717, - name=None): +def selu( + x, + scale=1.0507009873554804934193349852946, + alpha=1.6732632423543772848170429916717, + name=None, +): r""" selu activation @@ -915,11 +950,13 @@ def selu(x, """ if scale <= 1.0: raise ValueError( - "The scale must be greater than 1.0. Received: {}.".format(scale)) + "The scale must be greater than 1.0. Received: {}.".format(scale) + ) if alpha < 0: raise ValueError( - "The alpha must be no less than zero. Received: {}.".format(alpha)) + "The alpha must be no less than zero. Received: {}.".format(alpha) + ) if in_dygraph_mode(): return _C_ops.selu(x, scale, alpha) @@ -929,13 +966,12 @@ def selu(x, check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'selu') helper = LayerHelper('selu', **locals()) out = helper.create_variable_for_type_inference(x.dtype) - helper.append_op(type='selu', - inputs={'X': x}, - outputs={'Out': out}, - attrs={ - 'scale': scale, - 'alpha': alpha - }) + helper.append_op( + type='selu', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'scale': scale, 'alpha': alpha}, + ) return out @@ -1096,44 +1132,50 @@ def softmax(x, axis=-1, dtype=None, name=None): use_cudnn = True if in_dygraph_mode(): - outs_cast = x if dtype is None \ - else _C_ops.cast(x, dtype) + outs_cast = x if dtype is None else _C_ops.cast(x, dtype) return _C_ops.softmax(outs_cast, axis) if _in_legacy_dygraph(): - outs_cast = x if dtype is None \ + outs_cast = ( + x + if dtype is None else _legacy_C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype) - return _legacy_C_ops.softmax(outs_cast, 'axis', axis, 'use_cudnn', - use_cudnn) + ) + return _legacy_C_ops.softmax( + outs_cast, 'axis', axis, 'use_cudnn', use_cudnn + ) if dtype is None: - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'softmax') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], 'softmax' + ) else: check_dtype( - dtype, 'dtype', ['float32', 'float64'], 'softmax', - 'If dtype is not None, it only support float32 or float64.') + dtype, + 'dtype', + ['float32', 'float64'], + 'softmax', + 'If dtype is not None, it only support float32 or float64.', + ) helper = LayerHelper("softmax", **locals()) outs_cast = x if dtype is not None: outs_cast = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='cast', - inputs={'X': x}, - outputs={'Out': outs_cast}, - attrs={ - 'in_dtype': x.dtype, - 'out_dtype': dtype - }) + helper.append_op( + type='cast', + inputs={'X': x}, + outputs={'Out': outs_cast}, + attrs={'in_dtype': x.dtype, 'out_dtype': dtype}, + ) outs_softmax = helper.create_variable_for_type_inference(outs_cast.dtype) - helper.append_op(type='softmax', - inputs={'X': outs_cast}, - outputs={'Out': outs_softmax}, - attrs={ - 'axis': axis, - 'use_cudnn': use_cudnn - }) + helper.append_op( + type='softmax', + inputs={'X': outs_cast}, + outputs={'Out': outs_softmax}, + attrs={'axis': axis, 'use_cudnn': use_cudnn}, + ) return outs_softmax @@ -1149,15 +1191,22 @@ def softmax_(x, axis=-1, dtype=None, name=None): use_cudnn = True if in_dygraph_mode(): - outs_cast = x if dtype is None \ + outs_cast = ( + x + if dtype is None else _legacy_C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype) + ) return _C_ops.softmax_(outs_cast, axis) if _in_legacy_dygraph(): - outs_cast = x if dtype is None \ + outs_cast = ( + x + if dtype is None else _legacy_C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype) - return _legacy_C_ops.softmax_(outs_cast, 'axis', axis, 'use_cudnn', - use_cudnn) + ) + return _legacy_C_ops.softmax_( + outs_cast, 'axis', axis, 'use_cudnn', use_cudnn + ) def softplus(x, beta=1, threshold=20, name=None): @@ -1195,17 +1244,17 @@ def softplus(x, beta=1, threshold=20, name=None): if _in_legacy_dygraph(): return _legacy_C_ops.softplus(x, 'beta', beta, 'threshold', threshold) - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'softplus') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], 'softplus' + ) helper = LayerHelper('softplus', **locals()) out = helper.create_variable_for_type_inference(x.dtype) - helper.append_op(type='softplus', - inputs={'X': x}, - outputs={'Out': out}, - attrs={ - 'beta': beta, - 'threshold': threshold - }) + helper.append_op( + type='softplus', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'beta': beta, 'threshold': threshold}, + ) return out @@ -1245,21 +1294,26 @@ def softshrink(x, threshold=0.5, name=None): if threshold < 0: raise ValueError( "The threshold must be no less than zero. Received: {}.".format( - threshold)) + threshold + ) + ) if in_dygraph_mode(): return _C_ops.softshrink(x, threshold) if _in_legacy_dygraph(): return _legacy_C_ops.softshrink(x, 'lambda', threshold) - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'softshrink') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], 'softshrink' + ) helper = LayerHelper('softshrink', **locals()) out = helper.create_variable_for_type_inference(x.dtype) - helper.append_op(type='softshrink', - inputs={'X': x}, - outputs={'Out': out}, - attrs={'lambda': threshold}) + helper.append_op( + type='softshrink', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'lambda': threshold}, + ) return out @@ -1293,8 +1347,9 @@ def softsign(x, name=None): if in_dynamic_mode(): return _legacy_C_ops.softsign(x) - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'softsign') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], 'softsign' + ) helper = LayerHelper('softsign', **locals()) out = helper.create_variable_for_type_inference(x.dtype) helper.append_op(type='softsign', inputs={'X': x}, outputs={'Out': out}) @@ -1334,10 +1389,9 @@ def swish(x, name=None): check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'swish') helper = LayerHelper('swish', **locals()) out = helper.create_variable_for_type_inference(x.dtype) - helper.append_op(type='swish', - inputs={'X': x}, - outputs={'Out': out}, - attrs={'beta': 1.0}) + helper.append_op( + type='swish', inputs={'X': x}, outputs={'Out': out}, attrs={'beta': 1.0} + ) return out @@ -1413,8 +1467,9 @@ def tanhshrink(x, name=None): if _in_legacy_dygraph(): return _legacy_C_ops.tanh_shrink(x) - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'tanhshrink') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], 'tanhshrink' + ) helper = LayerHelper('tanh_shrink', **locals()) out = helper.create_variable_for_type_inference(x.dtype) helper.append_op(type='tanh_shrink', inputs={'X': x}, outputs={'Out': out}) @@ -1461,14 +1516,17 @@ def thresholded_relu(x, threshold=1.0, name=None): if _in_legacy_dygraph(): return _legacy_C_ops.thresholded_relu(x, 'threshold', threshold) - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'thresholded_relu') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], 'thresholded_relu' + ) helper = LayerHelper('thresholded_relu', **locals()) out = helper.create_variable_for_type_inference(x.dtype) - helper.append_op(type='thresholded_relu', - inputs={'X': x}, - outputs={'Out': out}, - attrs={'threshold': threshold}) + helper.append_op( + type='thresholded_relu', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'threshold': threshold}, + ) return out @@ -1541,30 +1599,36 @@ def log_softmax(x, axis=-1, dtype=None, name=None): return _legacy_C_ops.log_softmax(x, 'axis', axis) if dtype is None: - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'log_softmax') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], 'log_softmax' + ) else: check_dtype( - dtype, 'dtype', ['float32', 'float64'], 'log_softmax', - 'If dtype is not None, it only support float32 or float64.') + dtype, + 'dtype', + ['float32', 'float64'], + 'log_softmax', + 'If dtype is not None, it only support float32 or float64.', + ) helper = LayerHelper("log_softmax", **locals()) out_cast = x if dtype is not None: out_cast = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='cast', - inputs={'X': x}, - outputs={'Out': out_cast}, - attrs={ - 'in_dtype': x.dtype, - 'out_dtype': dtype - }) + helper.append_op( + type='cast', + inputs={'X': x}, + outputs={'Out': out_cast}, + attrs={'in_dtype': x.dtype, 'out_dtype': dtype}, + ) out = helper.create_variable_for_type_inference(out_cast.dtype) - helper.append_op(type='log_softmax', - inputs={'X': out_cast}, - outputs={'Out': out}, - attrs={'axis': axis}) + helper.append_op( + type='log_softmax', + inputs={'X': out_cast}, + outputs={'Out': out}, + attrs={'axis': axis}, + ) return out @@ -1607,8 +1671,9 @@ def glu(x, axis=-1, name=None): # [-1.0577879 , -0.46985325]], dtype=float32) """ - check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'], - "glu") + check_variable_and_dtype( + x, 'input', ['float16', 'float32', 'float64'], "glu" + ) a, b = chunk(x, 2, axis=axis, name=name) gate = sigmoid(b, name=name) out = paddle.multiply(a, gate, name=name) @@ -1676,18 +1741,17 @@ def gumbel_softmax(x, temperature=1.0, hard=False, axis=-1, name=None): return _C_ops.gumbel_softmax(x, temperature, hard, axis) if in_dynamic_mode(): - return _legacy_C_ops.gumbel_softmax(x, 'temperature', temperature, - 'hard', hard, 'axis', axis) + return _legacy_C_ops.gumbel_softmax( + x, 'temperature', temperature, 'hard', hard, 'axis', axis + ) helper = LayerHelper("gumbel_softmax", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'gumbel_softmax') out = helper.create_variable_for_type_inference(x.dtype) - helper.append_op(type='gumbel_softmax', - inputs={'X': x}, - outputs={'Out': out}, - attrs={ - 'temperature': temperature, - 'hard': hard, - 'axis': axis - }) + helper.append_op( + type='gumbel_softmax', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'temperature': temperature, 'hard': hard, 'axis': axis}, + ) return out diff --git a/python/paddle/nn/functional/common.py b/python/paddle/nn/functional/common.py index 2dc4db3026557e096e5eebd85ac71a5c19000179..b6936c5a90c9b1ea6009f7d3996df620818ef76a 100644 --- a/python/paddle/nn/functional/common.py +++ b/python/paddle/nn/functional/common.py @@ -18,14 +18,23 @@ from paddle.fluid.layers.tensor import fill_constant from ...tensor import concat from ...tensor.creation import zeros from paddle.static import Variable + # TODO: define the common functions to build a neural network from ...tensor.manipulation import squeeze from ...tensor.manipulation import unsqueeze from ...tensor import clip from ...tensor import sum from ...tensor import sqrt -from ...fluid.data_feeder import check_variable_and_dtype, check_dtype, check_type -from ...fluid.framework import _in_legacy_dygraph, _non_static_mode, in_dygraph_mode +from ...fluid.data_feeder import ( + check_variable_and_dtype, + check_dtype, + check_type, +) +from ...fluid.framework import ( + _in_legacy_dygraph, + _non_static_mode, + in_dygraph_mode, +) from paddle import _C_ops, _legacy_C_ops from paddle.framework import in_dynamic_mode @@ -108,26 +117,28 @@ def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None): check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'unfold') - assert len(x.shape) == 4, \ - "input should be the format of [N, C, H, W]" + assert len(x.shape) == 4, "input should be the format of [N, C, H, W]" if isinstance(kernel_sizes, int): kernel_sizes = [kernel_sizes, kernel_sizes] else: - assert isinstance(kernel_sizes, list) and (len(kernel_sizes) == 2), \ - "kernel_sizes should either be an integer or a list of two integers" + assert isinstance(kernel_sizes, list) and ( + len(kernel_sizes) == 2 + ), "kernel_sizes should either be an integer or a list of two integers" if isinstance(strides, int): strides = [strides, strides] else: - assert isinstance(strides, list) and (len(strides) == 2), \ - "strides should either be an integer or a list of two integers" + assert isinstance(strides, list) and ( + len(strides) == 2 + ), "strides should either be an integer or a list of two integers" if isinstance(dilations, int): dilations = [dilations, dilations] else: - assert isinstance(dilations, list) and (len(dilations) == 2), \ - "dilations should either be an integer or a list of two integers" + assert isinstance(dilations, list) and ( + len(dilations) == 2 + ), "dilations should either be an integer or a list of two integers" if isinstance(paddings, int): paddings = [paddings] * 4 @@ -143,32 +154,37 @@ def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None): else: raise ValueError( "Unexpected type of paddings, it should be either an integer or a list" - "of 2 or 4 integers") + "of 2 or 4 integers" + ) if in_dygraph_mode(): return _C_ops.unfold(x, kernel_sizes, strides, paddings, dilations) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type="unfold", - inputs={"X": x}, - outputs={"Y": out}, - attrs={ - "kernel_sizes": kernel_sizes, - "strides": strides, - "paddings": paddings, - "dilations": dilations - }) + helper.append_op( + type="unfold", + inputs={"X": x}, + outputs={"Y": out}, + attrs={ + "kernel_sizes": kernel_sizes, + "strides": strides, + "paddings": paddings, + "dilations": dilations, + }, + ) return out -def interpolate(x, - size=None, - scale_factor=None, - mode='nearest', - align_corners=False, - align_mode=0, - data_format='NCHW', - name=None): +def interpolate( + x, + size=None, + scale_factor=None, + mode='nearest', + align_corners=False, + align_mode=0, + data_format='NCHW', + name=None, +): """ This API resizes a batch of images. @@ -376,7 +392,8 @@ def interpolate(x, if resample not in resample_methods: raise ValueError( "The 'resample' of image_resize can only be 'area', 'linear', 'bilinear', 'trilinear', " - " 'bicubic' or 'nearest' currently.") + " 'bicubic' or 'nearest' currently." + ) if resample in ['LINEAR'] and len(x.shape) != 3: raise ValueError("'linear' only support 3-D tensor.") @@ -403,8 +420,11 @@ def interpolate(x, ) if resample == 'AREA': - if isinstance(size, list) or isinstance(size, tuple) or isinstance( - size, Variable): + if ( + isinstance(size, list) + or isinstance(size, tuple) + or isinstance(size, Variable) + ): if len(size) == 0: raise ValueError("output size can not be empty") if len(x.shape) == 3: @@ -418,19 +438,25 @@ def interpolate(x, dtype = helper.input_dtype(input_param_name='x') if len(x.shape) == 3 and data_format not in ['NCW', 'NWC']: raise ValueError( - "Got wrong value for param `data_format`: " + data_format + - " received but only `NCW` or `NWC` supported for 3-D input.") + "Got wrong value for param `data_format`: " + + data_format + + " received but only `NCW` or `NWC` supported for 3-D input." + ) elif len(x.shape) == 4 and data_format not in ['NCHW', 'NHWC']: raise ValueError( - "Got wrong value for param `data_format`: " + data_format + - " received but only `NCHW` or `NHWC` supported for 4-D input.") + "Got wrong value for param `data_format`: " + + data_format + + " received but only `NCHW` or `NHWC` supported for 4-D input." + ) elif len(x.shape) == 5 and data_format not in ['NCDHW', 'NDHWC']: raise ValueError( - "Got wrong value for param `data_format`: " + data_format + - " received but only `NCDHW` or `NDHWC` supported for 5-D input.") + "Got wrong value for param `data_format`: " + + data_format + + " received but only `NCDHW` or `NDHWC` supported for 5-D input." + ) def _is_list_or_turple_(data): - return (isinstance(data, list) or isinstance(data, tuple)) + return isinstance(data, list) or isinstance(data, tuple) if data_format == 'NCHW' or data_format == 'NCDHW' or data_format == 'NCW': data_layout = 'NCHW' @@ -448,7 +474,7 @@ def interpolate(x, "interp_method": resample_type, "align_corners": align_corners, "align_mode": align_mode, - "data_layout": data_layout + "data_layout": data_layout, } out_shape = size @@ -476,9 +502,9 @@ def interpolate(x, if isinstance(dim_size, Variable): contain_var = True continue - assert dim_size > 0, ( - "Each dimension size given in out_shape must be greater than 0." - ) + assert ( + dim_size > 0 + ), "Each dimension size given in out_shape must be greater than 0." if contain_var: new_size_tensor = [] @@ -489,14 +515,13 @@ def interpolate(x, new_size_tensor.append(dim) size_list.append(-1) else: - assert (isinstance(dim, int)) + assert isinstance(dim, int) temp_out = helper.create_variable_for_type_inference( - 'int32') - fill_constant([1], - 'int32', - dim, - force_cpu=True, - out=temp_out) + 'int32' + ) + fill_constant( + [1], 'int32', dim, force_cpu=True, out=temp_out + ) new_size_tensor.append(temp_out) size_list.append(dim) inputs['SizeTensor'] = new_size_tensor @@ -504,7 +529,8 @@ def interpolate(x, if len(x.shape) == 3: if len(out_shape) != 1: raise ValueError( - "size length should be 2 for input 3-D tensor") + "size length should be 2 for input 3-D tensor" + ) if contain_var: attrs['out_w'] = size_list[0] else: @@ -512,8 +538,9 @@ def interpolate(x, attrs['out_w'] = out_shape[0] if len(x.shape) == 4: if len(out_shape) != 2: - raise ValueError("size length should be 2 for " - "input 4-D tensor.") + raise ValueError( + "size length should be 2 for " "input 4-D tensor." + ) if contain_var: attrs['out_h'] = size_list[0] attrs['out_w'] = size_list[1] @@ -523,8 +550,9 @@ def interpolate(x, attrs['out_w'] = out_shape[1] if len(x.shape) == 5: if len(out_shape) != 3: - raise ValueError("size length should be 3 for " - "input 5-D tensor.") + raise ValueError( + "size length should be 3 for " "input 5-D tensor." + ) if contain_var: attrs['out_d'] = size_list[0] attrs['out_h'] = size_list[1] @@ -550,9 +578,10 @@ def interpolate(x, attrs['scale'] = list(map(float, scale_list)) elif isinstance(scale, list) or isinstance(scale, tuple): if len(scale) != len(x.shape) - 2: - raise ValueError("scale_shape length should be {} for " - "input {}-D tensor.".format( - len(x.shape) - 2, len(x.shape))) + raise ValueError( + "scale_shape length should be {} for " + "input {}-D tensor.".format(len(x.shape) - 2, len(x.shape)) + ) for value in scale: if value <= 0: raise ValueError("Attr(scale) should be greater than zero.") @@ -572,80 +601,114 @@ def interpolate(x, if resample_type == "linear": if in_dygraph_mode(): out = _C_ops.linear_interp( - x, inputs['OutSize'] if 'OutSize' in inputs else None, + x, + inputs['OutSize'] if 'OutSize' in inputs else None, inputs['SizeTensor'] if 'SizeTensor' in inputs else None, inputs['Scale'] if 'Scale' in inputs else None, - attrs['data_layout'], attrs['out_d'], attrs['out_h'], - attrs['out_w'], attrs['scale'] if 'scale' in attrs else [], - attrs['interp_method'], attrs['align_corners'], - attrs['align_mode']) + attrs['data_layout'], + attrs['out_d'], + attrs['out_h'], + attrs['out_w'], + attrs['scale'] if 'scale' in attrs else [], + attrs['interp_method'], + attrs['align_corners'], + attrs['align_mode'], + ) else: out = _legacy_C_ops.linear_interp_v2(x, *dy_attr) elif resample_type == "bilinear": if in_dygraph_mode(): out = _C_ops.bilinear_interp( - x, inputs['OutSize'] if 'OutSize' in inputs else None, + x, + inputs['OutSize'] if 'OutSize' in inputs else None, inputs['SizeTensor'] if 'SizeTensor' in inputs else None, inputs['Scale'] if 'Scale' in inputs else None, - attrs['data_layout'], attrs['out_d'], attrs['out_h'], - attrs['out_w'], attrs['scale'] if 'scale' in attrs else [], - attrs['interp_method'], attrs['align_corners'], - attrs['align_mode']) + attrs['data_layout'], + attrs['out_d'], + attrs['out_h'], + attrs['out_w'], + attrs['scale'] if 'scale' in attrs else [], + attrs['interp_method'], + attrs['align_corners'], + attrs['align_mode'], + ) else: out = _legacy_C_ops.bilinear_interp_v2(x, *dy_attr) elif resample_type == "trilinear": if in_dygraph_mode(): out = _C_ops.trilinear_interp( - x, inputs['OutSize'] if 'OutSize' in inputs else None, + x, + inputs['OutSize'] if 'OutSize' in inputs else None, inputs['SizeTensor'] if 'SizeTensor' in inputs else None, inputs['Scale'] if 'Scale' in inputs else None, - attrs['data_layout'], attrs['out_d'], attrs['out_h'], - attrs['out_w'], attrs['scale'] if 'scale' in attrs else [], - attrs['interp_method'], attrs['align_corners'], - attrs['align_mode']) + attrs['data_layout'], + attrs['out_d'], + attrs['out_h'], + attrs['out_w'], + attrs['scale'] if 'scale' in attrs else [], + attrs['interp_method'], + attrs['align_corners'], + attrs['align_mode'], + ) else: out = _legacy_C_ops.trilinear_interp_v2(x, *dy_attr) elif resample_type == "nearest": if in_dygraph_mode(): out = _C_ops.nearest_interp( - x, inputs['OutSize'] if 'OutSize' in inputs else None, + x, + inputs['OutSize'] if 'OutSize' in inputs else None, inputs['SizeTensor'] if 'SizeTensor' in inputs else None, inputs['Scale'] if 'Scale' in inputs else None, - attrs['data_layout'], attrs['out_d'], attrs['out_h'], - attrs['out_w'], attrs['scale'] if 'scale' in attrs else [], - attrs['interp_method'], attrs['align_corners'], - attrs['align_mode']) + attrs['data_layout'], + attrs['out_d'], + attrs['out_h'], + attrs['out_w'], + attrs['scale'] if 'scale' in attrs else [], + attrs['interp_method'], + attrs['align_corners'], + attrs['align_mode'], + ) else: out = _legacy_C_ops.nearest_interp_v2(x, *dy_attr) elif resample_type == "bicubic": if in_dygraph_mode(): out = _C_ops.bicubic_interp( - x, inputs['OutSize'] if 'OutSize' in inputs else None, + x, + inputs['OutSize'] if 'OutSize' in inputs else None, inputs['SizeTensor'] if 'SizeTensor' in inputs else None, inputs['Scale'] if 'Scale' in inputs else None, - attrs['data_layout'], attrs['out_d'], attrs['out_h'], - attrs['out_w'], attrs['scale'] if 'scale' in attrs else [], - attrs['interp_method'], attrs['align_corners'], - attrs['align_mode']) + attrs['data_layout'], + attrs['out_d'], + attrs['out_h'], + attrs['out_w'], + attrs['scale'] if 'scale' in attrs else [], + attrs['interp_method'], + attrs['align_corners'], + attrs['align_mode'], + ) else: out = _legacy_C_ops.bicubic_interp_v2(x, *dy_attr) return out out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='{}_interp_v2'.format(resample_type), - inputs=inputs, - outputs={"Out": out}, - attrs=attrs) + helper.append_op( + type='{}_interp_v2'.format(resample_type), + inputs=inputs, + outputs={"Out": out}, + attrs=attrs, + ) return out -def upsample(x, - size=None, - scale_factor=None, - mode='nearest', - align_corners=False, - align_mode=0, - data_format='NCHW', - name=None): +def upsample( + x, + size=None, + scale_factor=None, + mode='nearest', + align_corners=False, + align_mode=0, + data_format='NCHW', + name=None, +): """ This API resizes a batch of images. @@ -827,8 +890,9 @@ def upsample(x, # [2L, 3L, 12L, 12L] """ - return interpolate(x, size, scale_factor, mode, align_corners, align_mode, - data_format) + return interpolate( + x, size, scale_factor, mode, align_corners, align_mode, data_format + ) def bilinear(x1, x2, weight, bias=None, name=None): @@ -879,19 +943,16 @@ def bilinear(x1, x2, weight, bias=None, name=None): helper = LayerHelper("bilinear", **locals()) out = helper.create_variable_for_type_inference(dtype=x1.dtype) - helper.append_op(type="bilinear_tensor_product", - inputs=inputs, - outputs={"Out": out}) + helper.append_op( + type="bilinear_tensor_product", inputs=inputs, outputs={"Out": out} + ) return out -def dropout(x, - p=0.5, - axis=None, - training=True, - mode="upscale_in_train", - name=None): +def dropout( + x, p=0.5, axis=None, training=True, mode="upscale_in_train", name=None +): """ Dropout is a regularization technique for reducing overfitting by preventing neuron co-adaption during training. The dropout operator randomly sets the @@ -1043,7 +1104,8 @@ def dropout(x, if isinstance(p, (int, float)): # fast return for p == 0 - if p == 0: return x + if p == 0: + return x elif p < 0 or p > 1: raise ValueError("p argument should between 0 and 1") if mode not in ('downscale_in_infer', 'upscale_in_train'): @@ -1055,41 +1117,63 @@ def dropout(x, if axis == None: # commonly used dropout seed = None - mode = 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode #semantic transfer + mode = ( + 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode + ) # semantic transfer if _non_static_mode(): if default_main_program().random_seed != 0: seed = default_main_program().random_seed if in_dygraph_mode(): - out, mask = _C_ops.dropout( x, None, p, not training, mode, \ - seed if seed is not None else 0, seed is not None) + out, mask = _C_ops.dropout( + x, + None, + p, + not training, + mode, + seed if seed is not None else 0, + seed is not None, + ) return out - out, mask = _legacy_C_ops.dropout(x, 'dropout_prob', p, 'is_test', - not training, 'fix_seed', seed - is not None, 'seed', - seed if seed is not None else 0, - 'dropout_implementation', mode) + out, mask = _legacy_C_ops.dropout( + x, + 'dropout_prob', + p, + 'is_test', + not training, + 'fix_seed', + seed is not None, + 'seed', + seed if seed is not None else 0, + 'dropout_implementation', + mode, + ) return out helper = LayerHelper('dropout', **locals()) - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'dropout') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], 'dropout' + ) out = helper.create_variable_for_type_inference(dtype=x.dtype) mask = helper.create_variable_for_type_inference( - dtype=core.VarDesc.VarType.UINT8, stop_gradient=True) + dtype=core.VarDesc.VarType.UINT8, stop_gradient=True + ) def get_attrs(prog, dropout_prob, is_test, seed): if (seed is None or seed == 0) and prog.random_seed != 0: seed = prog.random_seed - if isinstance(dropout_prob, - Variable) and not dropout_prob.shape != [1]: + if isinstance( + dropout_prob, Variable + ) and not dropout_prob.shape != [1]: raise TypeError( - "Required p.shape == [1] if type(p) is Variable, but received p.shape = {}" - .format(p.shape)) + "Required p.shape == [1] if type(p) is Variable, but received p.shape = {}".format( + p.shape + ) + ) attrs = { 'dropout_prob': dropout_prob, 'is_test': is_test, @@ -1101,38 +1185,45 @@ def dropout(x, attrs = get_attrs(helper.main_program, p, not training, seed) - helper.append_op(type='dropout', - inputs={'X': [x]}, - outputs={ - 'Out': [out], - 'Mask': [mask] - }, - attrs=attrs) + helper.append_op( + type='dropout', + inputs={'X': [x]}, + outputs={'Out': [out], 'Mask': [mask]}, + attrs=attrs, + ) return out - else: #sometimes called dropout_nd #TODO: optimize with c++ + else: # sometimes called dropout_nd #TODO: optimize with c++ if not in_dynamic_mode(): check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'dropout') dtype = x.dtype keep_prob = 1 - p if training: - if in_dynamic_mode() and p == 1.: - return paddle.scale(x, scale=0.) + if in_dynamic_mode() and p == 1.0: + return paddle.scale(x, scale=0.0) - scale_input = paddle.scale( - x, scale=1 / keep_prob) if mode == 'upscale_in_train' else x + scale_input = ( + paddle.scale(x, scale=1 / keep_prob) + if mode == 'upscale_in_train' + else x + ) - #get mask shape + # get mask shape input_shape = x.shape if not in_dynamic_mode(): input_shape_tensor = paddle.shape(x) drop_axes = [axis] if isinstance(axis, int) else list(axis) if min(drop_axes) < 0 or max(drop_axes) > len(input_shape) - 1: - raise ValueError("axis value should be greater than or equal to 0 and less than dimensions of x:{}, but get axis value:{} " \ - .format(len(input_shape), max(drop_axes))) + raise ValueError( + "axis value should be greater than or equal to 0 and less than dimensions of x:{}, but get axis value:{} ".format( + len(input_shape), max(drop_axes) + ) + ) if len(drop_axes) > len(input_shape): raise ValueError( - "length of axis should not be greater than dimensions of x:{}, but get length of axis: {}" - .format(len(input_shape), len(drop_axes))) + "length of axis should not be greater than dimensions of x:{}, but get length of axis: {}".format( + len(input_shape), len(drop_axes) + ) + ) mask_shape = [1] * len(input_shape) if not in_dynamic_mode(): for i in drop_axes: @@ -1141,11 +1232,10 @@ def dropout(x, for i in drop_axes: mask_shape[i] = input_shape[i] - #get mask - random_tensor = paddle.uniform(mask_shape, - dtype='float32', - min=0., - max=1.0) + # get mask + random_tensor = paddle.uniform( + mask_shape, dtype='float32', min=0.0, max=1.0 + ) p = full(shape=[1], fill_value=p, dtype='float32') keep_mask = paddle.greater_equal(random_tensor, p) @@ -1154,8 +1244,11 @@ def dropout(x, ret = paddle.multiply(scale_input, keep_mask, name=name) return ret else: # test - ret = paddle.scale( - x, scale=keep_prob) if mode == 'downscale_in_infer' else x + ret = ( + paddle.scale(x, scale=keep_prob) + if mode == 'downscale_in_infer' + else x + ) return ret @@ -1196,20 +1289,26 @@ def dropout2d(x, p=0.5, training=True, data_format='NCHW', name=None): """ input_shape = x.shape if len(input_shape) != 4: - raise ValueError("dimensions of x should be 4, but received {} != 4"\ - .format(len(input_shape))) + raise ValueError( + "dimensions of x should be 4, but received {} != 4".format( + len(input_shape) + ) + ) if data_format not in ["NCHW", "NHWC"]: raise ValueError( "Attr(data_format) should be 'NCHW' or 'NHWC'. Received " - "Attr(data_format): %s." % str(data_format)) + "Attr(data_format): %s." % str(data_format) + ) - return dropout(x, - p=p, - axis=[0, 1] if data_format == 'NCHW' else [0, 3], - training=training, - mode="upscale_in_train", - name=name) + return dropout( + x, + p=p, + axis=[0, 1] if data_format == 'NCHW' else [0, 3], + training=training, + mode="upscale_in_train", + name=name, + ) def dropout3d(x, p=0.5, training=True, data_format='NCDHW', name=None): @@ -1248,20 +1347,26 @@ def dropout3d(x, p=0.5, training=True, data_format='NCDHW', name=None): input_shape = x.shape if len(input_shape) != 5: - raise ValueError("dimensions of x should be 5, but received {} != 5" \ - .format(len(input_shape))) + raise ValueError( + "dimensions of x should be 5, but received {} != 5".format( + len(input_shape) + ) + ) if data_format not in ["NCDHW", "NDHWC"]: raise ValueError( "Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received " - "Attr(data_format): %s." % str(data_format)) + "Attr(data_format): %s." % str(data_format) + ) - return dropout(x, - p=p, - axis=[0, 1] if data_format == 'NCDHW' else [0, 4], - training=training, - mode="upscale_in_train", - name=name) + return dropout( + x, + p=p, + axis=[0, 1] if data_format == 'NCDHW' else [0, 4], + training=training, + mode="upscale_in_train", + name=name, + ) def alpha_dropout(x, p=0.5, training=True, name=None): @@ -1303,37 +1408,40 @@ def alpha_dropout(x, p=0.5, training=True, name=None): raise ValueError("p argument should between 0 and 1") if not in_dynamic_mode(): - check_variable_and_dtype(x, 'x', ['float32', 'float64'], - 'alpha_dropout') + check_variable_and_dtype( + x, 'x', ['float32', 'float64'], 'alpha_dropout' + ) if training: if p == 1: - return paddle.scale(x, scale=0.) - #get transformation params + return paddle.scale(x, scale=0.0) + # get transformation params alpha = 1.6732632423543772848170429916717 scale = 1.0507009873554804934193349852946 alpha_p = -alpha * scale - a = ((1 - p) * (1 + p * alpha_p**2))**-0.5 + a = ((1 - p) * (1 + p * alpha_p**2)) ** -0.5 b = -a * alpha_p * p dtype = x.dtype input_shape = x.shape - #get mask - random_tensor = paddle.uniform(input_shape, - dtype='float32', - min=0., - max=1.0) + # get mask + random_tensor = paddle.uniform( + input_shape, dtype='float32', min=0.0, max=1.0 + ) p = full(shape=[1], fill_value=p, dtype='float32') keep_mask = paddle.greater_equal(random_tensor, p) keep_mask = paddle.cast(keep_mask, dtype) drop_mask = paddle.subtract( - full(shape=input_shape, fill_value=1., dtype=dtype), keep_mask) + full(shape=input_shape, fill_value=1.0, dtype=dtype), keep_mask + ) - #apply mask + # apply mask b = full(shape=[1], fill_value=b, dtype=dtype) - y = paddle.add(paddle.multiply(x, keep_mask), - paddle.scale(drop_mask, scale=alpha_p)) + y = paddle.add( + paddle.multiply(x, keep_mask), + paddle.scale(drop_mask, scale=alpha_p), + ) res = paddle.add(paddle.scale(y, scale=a), b, name=name) return res else: # test @@ -1452,18 +1560,28 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None): # [6. 4. 5. 6. 4. 5.] # [3. 1. 2. 3. 1. 2.]]]] """ - assert mode in ['reflect', 'replicate', 'constant', 'circular'], \ - "mode should be one of constant, reflect, replicate, circular, but got {}.".format(mode) + assert mode in [ + 'reflect', + 'replicate', + 'constant', + 'circular', + ], "mode should be one of constant, reflect, replicate, circular, but got {}.".format( + mode + ) data_format = data_format.upper() - assert data_format in ["NCL", "NCHW", "NCDHW", "NLC", "NHWC", "NDHWC"], \ - "data_format should be in one of [NCL, NCHW, NCDHW, NLC, NHWC, NDHWC], " \ + assert data_format in ["NCL", "NCHW", "NCDHW", "NLC", "NHWC", "NDHWC"], ( + "data_format should be in one of [NCL, NCHW, NCDHW, NLC, NHWC, NDHWC], " "but got {}".format(data_format) + ) x_dim = len(x.shape) - if mode == "constant" and isinstance( - pad, (list, tuple)) and len(pad) == x_dim * 2: + if ( + mode == "constant" + and isinstance(pad, (list, tuple)) + and len(pad) == x_dim * 2 + ): paddings = pad pad_value = value @@ -1471,10 +1589,20 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None): out = _C_ops.pad(x, paddings, float(pad_value)) return out - check_variable_and_dtype(x, 'x', [ - 'float16', 'float32', 'float64', 'int32', 'int64', 'complex64', - 'complex128' - ], "pad") + check_variable_and_dtype( + x, + 'x', + [ + 'float16', + 'float32', + 'float64', + 'int32', + 'int64', + 'complex64', + 'complex128', + ], + "pad", + ) check_type(pad_value, 'pad_value', (float, int, Variable), 'pad') if isinstance(pad_value, int): @@ -1483,17 +1611,18 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None): helper = LayerHelper('pad', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='pad', - inputs={'X': x}, - outputs={'Out': out}, - attrs={ - 'paddings': paddings, - 'pad_value': pad_value - }) + helper.append_op( + type='pad', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'paddings': paddings, 'pad_value': pad_value}, + ) return out assert x_dim in [ - 3, 4, 5 + 3, + 4, + 5, ], "input tesor dimension must be in [3, 4, 5] but got {}".format(x_dim) supported_format_map = { @@ -1501,9 +1630,11 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None): 4: ["NCHW", "NHWC"], 5: ["NCDHW", "NDHWC"], } - assert data_format in supported_format_map[x_dim], \ - "input tensor dimension is {}, it's data format should be in {} but got {}".format( - x_dim, supported_format_map[x_dim], data_format) + assert ( + data_format in supported_format_map[x_dim] + ), "input tensor dimension is {}, it's data format should be in {} but got {}".format( + x_dim, supported_format_map[x_dim], data_format + ) unsqueezed_dim = [] @@ -1511,21 +1642,21 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None): if data_format in ["NCL", "NCHW", "NCDHW"]: data_format = "NCDHW" if x_dim == 3: - pad = concat([zeros((4, ), dtype="int32"), pad], axis=0) + pad = concat([zeros((4,), dtype="int32"), pad], axis=0) unsqueezed_dim = [3, 4] x = unsqueeze(x, axis=unsqueezed_dim) elif x_dim == 4: - pad = concat([pad, zeros((2, ), dtype="int32")], axis=0) + pad = concat([pad, zeros((2,), dtype="int32")], axis=0) unsqueezed_dim = [2] x = unsqueeze(x, axis=unsqueezed_dim) elif data_format in ["NLC", "NHWC", "NDHWC"]: data_format = "NDHWC" if x_dim == 3: - pad = concat([zeros((4, ), dtype="int32"), pad], axis=0) + pad = concat([zeros((4,), dtype="int32"), pad], axis=0) unsqueezed_dim = [2, 3] x = unsqueeze(x, axis=unsqueezed_dim) elif x_dim == 4: - pad = concat([pad, zeros((2, ), dtype="int32")], axis=0) + pad = concat([pad, zeros((2,), dtype="int32")], axis=0) unsqueezed_dim = [1] x = unsqueeze(x, axis=unsqueezed_dim) else: @@ -1559,9 +1690,19 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None): if _in_legacy_dygraph(): if isinstance(pad, Variable): pad = pad.numpy().tolist() - out = _legacy_C_ops.pad3d(x, "paddings", pad, "mode", mode, "value", - value, "data_format", data_format, "name", - name) + out = _legacy_C_ops.pad3d( + x, + "paddings", + pad, + "mode", + mode, + "value", + value, + "data_format", + data_format, + "name", + name, + ) else: attrs = {'mode': mode, 'value': value, 'data_format': data_format} inputs = {'X': [x]} @@ -1575,10 +1716,9 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None): dtype = helper.input_dtype(input_param_name='input') out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='pad3d', - inputs=inputs, - outputs={"Out": out}, - attrs=attrs) + helper.append_op( + type='pad3d', inputs=inputs, outputs={"Out": out}, attrs=attrs + ) if len(unsqueezed_dim) != 0: out = squeeze(out, axis=unsqueezed_dim) @@ -1619,12 +1759,14 @@ def zeropad2d(x, padding, data_format="NCHW", name=None): # [0. 0. 0. 0. 0. 0.]]]] """ - return pad(x, - pad=padding, - mode='constant', - value=0, - data_format=data_format, - name=name) + return pad( + x, + pad=padding, + mode='constant', + value=0, + data_format=data_format, + name=name, + ) def cosine_similarity(x1, x2, axis=1, eps=1e-8): @@ -1731,12 +1873,13 @@ def linear(x, weight, bias=None, name=None): # [2.1077576 2.1077576 2.1077576 2.1077576 ]] """ if in_dygraph_mode(): - #TODO(jiabin): using addmm for fast forward route + # TODO(jiabin): using addmm for fast forward route return _C_ops.linear(x, weight, bias) else: if _in_legacy_dygraph(): - pre_bias = _legacy_C_ops.matmul_v2(x, weight, 'trans_x', False, - 'trans_y', False) + pre_bias = _legacy_C_ops.matmul_v2( + x, weight, 'trans_x', False, 'trans_y', False + ) if bias is None: return pre_bias @@ -1746,27 +1889,30 @@ def linear(x, weight, bias=None, name=None): helper = LayerHelper('linear', **locals()) dtype = x.dtype - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'linear') - check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'], - 'linear') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], 'linear' + ) + check_dtype( + dtype, 'dtype', ['float16', 'float32', 'float64'], 'linear' + ) inputs = {'X': [x], 'Y': [weight]} attrs = {'trans_x': False, 'trans_y': False} tmp = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='matmul_v2', - inputs=inputs, - outputs={'Out': tmp}, - attrs=attrs) + helper.append_op( + type='matmul_v2', + inputs=inputs, + outputs={'Out': tmp}, + attrs=attrs, + ) if bias is not None: res = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='elementwise_add', - inputs={ - 'X': [tmp], - 'Y': [bias] - }, - outputs={'Out': [res]}, - attrs={'axis': len(x.shape) - 1}) + helper.append_op( + type='elementwise_add', + inputs={'X': [tmp], 'Y': [bias]}, + outputs={'Out': [res]}, + attrs={'axis': len(x.shape) - 1}, + ) else: res = tmp return res @@ -1830,29 +1976,32 @@ def label_smooth(label, prior_dist=None, epsilon=0.1, name=None): #[[[0.03333334 0.93333334 0.03333334] # [0.93333334 0.03333334 0.93333334]]] """ - if epsilon > 1. or epsilon < 0.: + if epsilon > 1.0 or epsilon < 0.0: raise ValueError("The value of epsilon must be between 0 and 1.") if in_dygraph_mode(): return _C_ops.label_smooth(label, prior_dist, float(epsilon)) elif paddle.in_dynamic_mode(): - return _legacy_C_ops.label_smooth(label, prior_dist, 'epsilon', - float(epsilon)) + return _legacy_C_ops.label_smooth( + label, prior_dist, 'epsilon', float(epsilon) + ) - check_variable_and_dtype(label, 'label', ['float32', 'float64'], - 'label_smooth') + check_variable_and_dtype( + label, 'label', ['float32', 'float64'], 'label_smooth' + ) helper = LayerHelper("label_smooth", **locals()) label.stop_gradient = True smooth_label = helper.create_variable_for_type_inference(label.dtype) - helper.append_op(type="label_smooth", - inputs={ - "X": label, - "PriorDist": prior_dist - } if prior_dist else {"X": label}, - outputs={"Out": smooth_label}, - attrs={"epsilon": float(epsilon)}) + helper.append_op( + type="label_smooth", + inputs={"X": label, "PriorDist": prior_dist} + if prior_dist + else {"X": label}, + outputs={"Out": smooth_label}, + attrs={"epsilon": float(epsilon)}, + ) return smooth_label @@ -1963,7 +2112,10 @@ def class_center_sample(label, num_classes, num_samples, group=None): if not (group == False or group is None or hasattr(group, 'is_member')): raise ValueError( 'Expected group is False, None or instance of paddle.distributed.collective.Group \ - (got group: {})'.format(group)) + (got group: {})'.format( + group + ) + ) return if hasattr(group, 'is_member') and not group.is_member(): @@ -1976,76 +2128,112 @@ def class_center_sample(label, num_classes, num_samples, group=None): if core.is_compiled_with_dist(): parallel_env = paddle.distributed.ParallelEnv() global_rank = parallel_env.rank - rank = global_rank if group is None else group.get_group_rank( - global_rank) + rank = ( + global_rank + if group is None + else group.get_group_rank(global_rank) + ) nranks = parallel_env.world_size if group is None else group.nranks if num_samples > num_classes: raise ValueError( - 'Expected num_samples less than or equal to {}, got num_samples {}'. - format(num_classes, num_samples)) + 'Expected num_samples less than or equal to {}, got num_samples {}'.format( + num_classes, num_samples + ) + ) label_size = 1 for dim in list(label.shape): label_size *= dim if label_size != -1 and label_size < 1: - raise ValueError('Expected label_size > 0 \ - (got label_size: {})'.format(label_size)) + raise ValueError( + 'Expected label_size > 0 \ + (got label_size: {})'.format( + label_size + ) + ) label_dims = len(list(label.shape)) if label_dims != 1: - raise ValueError('Expected label_dims == 1 \ - (got label_dims: {})'.format(label_dims)) + raise ValueError( + 'Expected label_dims == 1 \ + (got label_dims: {})'.format( + label_dims + ) + ) seed = None if (seed is None or seed == 0) and default_main_program().random_seed != 0: seed = default_main_program().random_seed if in_dygraph_mode(): - return _C_ops.class_center_sample(label, num_classes, num_samples, - ring_id, rank, nranks, seed - is not None, - seed if seed is not None else 0) + return _C_ops.class_center_sample( + label, + num_classes, + num_samples, + ring_id, + rank, + nranks, + seed is not None, + seed if seed is not None else 0, + ) elif paddle.in_dynamic_mode(): - remapped_label, sampled_class_center = _legacy_C_ops.class_center_sample( - label, 'num_classes', num_classes, 'num_samples', num_samples, - 'ring_id', ring_id, 'nranks', nranks, 'rank', rank, 'fix_seed', seed - is not None, 'seed', seed if seed is not None else 0) + ( + remapped_label, + sampled_class_center, + ) = _legacy_C_ops.class_center_sample( + label, + 'num_classes', + num_classes, + 'num_samples', + num_samples, + 'ring_id', + ring_id, + 'nranks', + nranks, + 'rank', + rank, + 'fix_seed', + seed is not None, + 'seed', + seed if seed is not None else 0, + ) return remapped_label, sampled_class_center - check_variable_and_dtype(label, 'label', ['int64', 'int32'], - 'class_center_sample') + check_variable_and_dtype( + label, 'label', ['int64', 'int32'], 'class_center_sample' + ) op_type = 'class_center_sample' helper = LayerHelper(op_type, **locals()) remapped_label = helper.create_variable_for_type_inference( - dtype=label.dtype) + dtype=label.dtype + ) sampled_class_center = helper.create_variable_for_type_inference( - dtype=label.dtype) - helper.append_op(type=op_type, - inputs={'Label': label}, - outputs={ - 'RemappedLabel': remapped_label, - 'SampledLocalClassCenter': sampled_class_center - }, - attrs={ - 'num_classes': num_classes, - 'num_samples': num_samples, - 'ring_id': ring_id, - 'nranks': nranks, - 'rank': rank, - 'fix_seed': seed is not None, - 'seed': seed if seed is not None else 0 - }) + dtype=label.dtype + ) + helper.append_op( + type=op_type, + inputs={'Label': label}, + outputs={ + 'RemappedLabel': remapped_label, + 'SampledLocalClassCenter': sampled_class_center, + }, + attrs={ + 'num_classes': num_classes, + 'num_samples': num_samples, + 'ring_id': ring_id, + 'nranks': nranks, + 'rank': rank, + 'fix_seed': seed is not None, + 'seed': seed if seed is not None else 0, + }, + ) return remapped_label, sampled_class_center -def fold(x, - output_sizes, - kernel_sizes, - strides=1, - paddings=0, - dilations=1, - name=None): +def fold( + x, output_sizes, kernel_sizes, strides=1, paddings=0, dilations=1, name=None +): r""" Combines an array of sliding local blocks into a large containing @@ -2108,35 +2296,38 @@ def fold(x, check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'fold') - assert len(x.shape) == 3, \ - "input should be the format of [N, C, L]" + assert len(x.shape) == 3, "input should be the format of [N, C, L]" def _is_list_or_turple_(data): - return (isinstance(data, list) or isinstance(data, tuple)) + return isinstance(data, list) or isinstance(data, tuple) if isinstance(output_sizes, int): output_sizes = [output_sizes, output_sizes] else: - assert _is_list_or_turple_(output_sizes) and (len(output_sizes) == 2), \ - "output_sizes should either be an integer or a list/tuple of two integers" + assert _is_list_or_turple_(output_sizes) and ( + len(output_sizes) == 2 + ), "output_sizes should either be an integer or a list/tuple of two integers" if isinstance(kernel_sizes, int): kernel_sizes = [kernel_sizes, kernel_sizes] else: - assert _is_list_or_turple_(kernel_sizes) and (len(kernel_sizes) == 2), \ - "kernel_sizes should either be an integer or a list/tuple of two integers" + assert _is_list_or_turple_(kernel_sizes) and ( + len(kernel_sizes) == 2 + ), "kernel_sizes should either be an integer or a list/tuple of two integers" if isinstance(strides, int): strides = [strides, strides] else: - assert _is_list_or_turple_(strides) and (len(strides) == 2), \ - "strides should either be an integer or a list/tuple of two integers" + assert _is_list_or_turple_(strides) and ( + len(strides) == 2 + ), "strides should either be an integer or a list/tuple of two integers" if isinstance(dilations, int): dilations = [dilations, dilations] else: - assert _is_list_or_turple_(dilations) and (len(dilations) == 2), \ - "dilations should either be an integer or a list/tuple of two integers" + assert _is_list_or_turple_(dilations) and ( + len(dilations) == 2 + ), "dilations should either be an integer or a list/tuple of two integers" if isinstance(paddings, int): paddings = [paddings] * 4 @@ -2152,26 +2343,39 @@ def fold(x, else: raise ValueError( "Unexpected type of paddings, it should be either an integer or a list" - "of 2 or 4 integers") + "of 2 or 4 integers" + ) if in_dygraph_mode(): - out = _C_ops.fold(x, output_sizes, kernel_sizes, strides, paddings, - dilations) + out = _C_ops.fold( + x, output_sizes, kernel_sizes, strides, paddings, dilations + ) elif in_dynamic_mode(): - out = _legacy_C_ops.fold(x, "output_sizes", output_sizes, - "kernel_sizes", kernel_sizes, "strides", - strides, "paddings", paddings, "dilations", - dilations) + out = _legacy_C_ops.fold( + x, + "output_sizes", + output_sizes, + "kernel_sizes", + kernel_sizes, + "strides", + strides, + "paddings", + paddings, + "dilations", + dilations, + ) else: out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type="fold", - inputs={"X": x}, - outputs={"Y": out}, - attrs={ - "output_sizes": output_sizes, - "kernel_sizes": kernel_sizes, - "strides": strides, - "paddings": paddings, - "dilations": dilations - }) + helper.append_op( + type="fold", + inputs={"X": x}, + outputs={"Y": out}, + attrs={ + "output_sizes": output_sizes, + "kernel_sizes": kernel_sizes, + "strides": strides, + "paddings": paddings, + "dilations": dilations, + }, + ) return out diff --git a/python/paddle/nn/functional/conv.py b/python/paddle/nn/functional/conv.py index 17eded8ac06a8465c88cb4d6d60d9eecea628acb..573594216177ebf45bc0e95725508c454e3daa54 100644 --- a/python/paddle/nn/functional/conv.py +++ b/python/paddle/nn/functional/conv.py @@ -14,7 +14,12 @@ from ...device import get_cudnn_version from ...static import Variable -from ...fluid.layers.utils import convert_to_list, _is_symmetric_padding, _contain_var, _convert_to_tensor_list +from ...fluid.layers.utils import ( + convert_to_list, + _is_symmetric_padding, + _contain_var, + _convert_to_tensor_list, +) from ...fluid.data_feeder import check_variable_and_dtype, check_dtype from ...fluid.layer_helper import LayerHelper from ...tensor.manipulation import unsqueeze, squeeze @@ -56,8 +61,10 @@ def _update_padding_nd(padding, channel_last, num_dims): padding = padding.upper() if padding not in ["SAME", "VALID"]: raise ValueError( - "Unknown padding: '{}'. It can only be 'SAME' or 'VALID'.". - format(padding)) + "Unknown padding: '{}'. It can only be 'SAME' or 'VALID'.".format( + padding + ) + ) if padding == "VALID": padding_algorithm = "VALID" padding = [0] * num_dims @@ -72,10 +79,12 @@ def _update_padding_nd(padding, channel_last, num_dims): if not _zero_padding_in_batch_and_channel(padding, channel_last): raise ValueError( "Non-zero padding({}) in the batch or channel dimensions " - "is not supported.".format(padding)) + "is not supported.".format(padding) + ) padding_algorithm = "EXPLICIT" padding = _exclude_padding_in_batch_and_channel( - padding, channel_last) + padding, channel_last + ) if _is_symmetric_padding(padding, num_dims): padding = padding[0::2] # for padding like [pad_before, pad_after, pad_before, pad_after, ...] @@ -96,42 +105,60 @@ def _update_padding_nd(padding, channel_last, num_dims): padding = convert_to_list(padding, num_dims, 'padding') if not all([p >= 0 for p in padding]): raise ValueError( - "Invalid padding, all value should be larger than or equal to 0, but received: {}" - .format(padding)) + "Invalid padding, all value should be larger than or equal to 0, but received: {}".format( + padding + ) + ) return padding, padding_algorithm -def _conv_nd(x, - weight, - bias=None, - stride=1, - padding=0, - padding_algorithm=None, - dilation=1, - groups=1, - data_format="NCHW", - channel_dim=1, - op_type="conv2d", - use_cudnn=True, - use_mkldnn=False, - name=None): +def _conv_nd( + x, + weight, + bias=None, + stride=1, + padding=0, + padding_algorithm=None, + dilation=1, + groups=1, + data_format="NCHW", + channel_dim=1, + op_type="conv2d", + use_cudnn=True, + use_mkldnn=False, + name=None, +): # Due to the poor performance of NHWC, we transpose the input to NCHW. if in_dygraph_mode() and op_type == "conv2d": - pre_bias = _C_ops.conv2d(x, weight, stride, padding, padding_algorithm, - groups, dilation, data_format, False, -1, - False) + pre_bias = _C_ops.conv2d( + x, + weight, + stride, + padding, + padding_algorithm, + groups, + dilation, + data_format, + False, + -1, + False, + ) if bias is not None: - channel_dim = channel_dim + len( - x.shape) if channel_dim < 0 else channel_dim + channel_dim = ( + channel_dim + len(x.shape) if channel_dim < 0 else channel_dim + ) if isinstance(x, tuple): x = x[0] if isinstance(bias, tuple): bias = bias[0] if len(bias.shape) < len(x.shape): tmp_bias = _C_ops.reshape( - bias, [1 for i in range(channel_dim)] + bias.shape + - [1 for i in range(len(x.shape) - channel_dim - 1)]) + bias, + [1 for i in range(channel_dim)] + + bias.shape + + [1 for i in range(len(x.shape) - channel_dim - 1)], + ) return _C_ops.add(pre_bias, tmp_bias) else: return _C_ops.add(pre_bias, bias) @@ -139,40 +166,82 @@ def _conv_nd(x, return pre_bias if in_dygraph_mode() and op_type == "depthwise_conv2d": - pre_bias = _C_ops.depthwise_conv2d(x, weight, stride, padding, - padding_algorithm, groups, dilation, - data_format, False, -1, False, False, - use_cudnn) + pre_bias = _C_ops.depthwise_conv2d( + x, + weight, + stride, + padding, + padding_algorithm, + groups, + dilation, + data_format, + False, + -1, + False, + False, + use_cudnn, + ) if bias is not None: - channel_dim = channel_dim + len( - x.shape) if channel_dim < 0 else channel_dim + channel_dim = ( + channel_dim + len(x.shape) if channel_dim < 0 else channel_dim + ) tmp_bias = _C_ops.reshape( - bias, [1 for i in range(channel_dim)] + bias.shape + - [1 for i in range(len(x.shape) - channel_dim - 1)]) + bias, + [1 for i in range(channel_dim)] + + bias.shape + + [1 for i in range(len(x.shape) - channel_dim - 1)], + ) return _C_ops.add(pre_bias, tmp_bias) else: return pre_bias if in_dygraph_mode() and op_type == "conv3d": - pre_bias = _C_ops.conv3d(x, weight, stride, padding, padding_algorithm, - groups, dilation, data_format, False, -1, - False) + pre_bias = _C_ops.conv3d( + x, + weight, + stride, + padding, + padding_algorithm, + groups, + dilation, + data_format, + False, + -1, + False, + ) if bias is not None: - channel_dim = channel_dim + len( - x.shape) if channel_dim < 0 else channel_dim + channel_dim = ( + channel_dim + len(x.shape) if channel_dim < 0 else channel_dim + ) tmp_bias = _C_ops.reshape( bias, - bias.shape + [1 for i in range(len(x.shape) - channel_dim - 1)]) + bias.shape + [1 for i in range(len(x.shape) - channel_dim - 1)], + ) return _C_ops.add(pre_bias, tmp_bias) else: return pre_bias if in_dynamic_mode(): - attrs = ('strides', stride, 'paddings', padding, 'dilations', dilation, - 'groups', groups, 'use_cudnn', use_cudnn, 'use_mkldnn', - use_mkldnn, 'fuse_relu_before_depthwise_conv', False, - "padding_algorithm", padding_algorithm, "data_format", - data_format) + attrs = ( + 'strides', + stride, + 'paddings', + padding, + 'dilations', + dilation, + 'groups', + groups, + 'use_cudnn', + use_cudnn, + 'use_mkldnn', + use_mkldnn, + 'fuse_relu_before_depthwise_conv', + False, + "padding_algorithm", + padding_algorithm, + "data_format", + data_format, + ) pre_bias = getattr(_legacy_C_ops, op_type)(x, weight, *attrs) if bias is not None: out = nn.elementwise_add(pre_bias, bias, axis=channel_dim) @@ -189,44 +258,42 @@ def _conv_nd(x, 'use_mkldnn': use_mkldnn, 'fuse_relu_before_depthwise_conv': False, "padding_algorithm": padding_algorithm, - "data_format": data_format + "data_format": data_format, } - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - op_type) + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], op_type + ) helper = LayerHelper(op_type, **locals()) dtype = helper.input_dtype(input_param_name='x') pre_bias = helper.create_variable_for_type_inference(dtype) outputs = {"Output": [pre_bias]} - helper.append_op(type=op_type, - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type=op_type, inputs=inputs, outputs=outputs, attrs=attrs + ) if bias is not None: out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='elementwise_add', - inputs={ - 'X': [pre_bias], - 'Y': [bias] - }, - outputs={'Out': [out]}, - attrs={ - 'axis': channel_dim, - 'use_mkldnn': use_mkldnn - }) + helper.append_op( + type='elementwise_add', + inputs={'X': [pre_bias], 'Y': [bias]}, + outputs={'Out': [out]}, + attrs={'axis': channel_dim, 'use_mkldnn': use_mkldnn}, + ) else: out = pre_bias return out -def conv1d(x, - weight, - bias=None, - stride=1, - padding=0, - dilation=1, - groups=1, - data_format='NCL', - name=None): +def conv1d( + x, + weight, + bias=None, + stride=1, + padding=0, + dilation=1, + groups=1, + data_format='NCL', + name=None, +): r""" The convolution1D layer calculates the output based on the input, filter and strides, paddings, dilations, groups parameters. Input and @@ -339,36 +406,45 @@ def conv1d(x, use_cudnn = False if data_format not in ["NCL", "NLC"]: - raise ValueError("Attr(data_format) should be 'NCL' or 'NLC'. " - "Received Attr(data_format): {}.".format(data_format)) + raise ValueError( + "Attr(data_format) should be 'NCL' or 'NLC'. " + "Received Attr(data_format): {}.".format(data_format) + ) - channel_last = (data_format == "NLC") + channel_last = data_format == "NLC" channel_dim = -1 if channel_last else 1 conv2d_data_format = "NHWC" if channel_last else "NCHW" if len(x.shape) != 3: raise ValueError( - "Input x should be 3D tensor, but received x with the shape of {}". - format(x.shape)) + "Input x should be 3D tensor, but received x with the shape of {}".format( + x.shape + ) + ) num_channels = x.shape[channel_dim] num_filters = weight.shape[0] if num_channels < 0: - raise ValueError("The channel dimension of the input({}) " - "should be defined. Received: {}.".format( - x.shape, num_channels)) + raise ValueError( + "The channel dimension of the input({}) " + "should be defined. Received: {}.".format(x.shape, num_channels) + ) if groups <= 0: raise ValueError( - "The groups of conv1d should be greater than 0. Received groups: {}" - .format(groups)) + "The groups of conv1d should be greater than 0. Received groups: {}".format( + groups + ) + ) if num_channels % groups != 0: raise ValueError( "the channel of input must be divisible by groups," "received: the channel of input is {}, the shape of input is {}" - ", the groups is {}".format(num_channels, x.shape, groups)) + ", the groups is {}".format(num_channels, x.shape, groups) + ) if num_filters % groups != 0: raise ValueError( "the number of filters must be divisible by groups," "received: the number of filters is {}, the shape of weight is {}" - ", the groups is {}".format(num_filters, weight.shape, groups)) + ", the groups is {}".format(num_filters, weight.shape, groups) + ) # update attrs padding, padding_algorithm = _update_padding_nd(padding, channel_last, 1) @@ -379,8 +455,10 @@ def conv1d(x, padding = [0] + padding else: raise ValueError( - "The size of padding's dimension should be 1 or 2. But got padding={}" - .format(padding)) + "The size of padding's dimension should be 1 or 2. But got padding={}".format( + padding + ) + ) stride = [1] + convert_to_list(stride, 1, 'stride') dilation = [1] + convert_to_list(dilation, 1, 'dilation') weight = unsqueeze(weight, axis=[-2]) @@ -388,14 +466,18 @@ def conv1d(x, l_type = "conv2d" # When "groups==num_channels and num_filters% num_channels == 0" using depthwise_conv2d has better performance - if (is_compiled_with_cuda() and num_channels == groups and num_channels != 1 - and num_filters % num_channels == 0): + if ( + is_compiled_with_cuda() + and num_channels == groups + and num_channels != 1 + and num_filters % num_channels == 0 + ): l_type = 'depthwise_conv2d' use_cudnn = False # NPU only supports depthwise_conv2d when "input_channel = output_channel = groups" if is_compiled_with_npu(): - if (num_channels == groups and num_channels == num_filters): + if num_channels == groups and num_channels == num_filters: l_type = 'depthwise_conv2d' else: l_type = 'conv2d' @@ -404,17 +486,44 @@ def conv1d(x, x = unsqueeze(x, axis=[squeeze_aixs]) if in_dygraph_mode(): - out = getattr(_C_ops, - l_type)(x, weight, stride, padding, padding_algorithm, - groups, dilation, conv2d_data_format, False, -1, - False, False, use_cudnn) + out = getattr(_C_ops, l_type)( + x, + weight, + stride, + padding, + padding_algorithm, + groups, + dilation, + conv2d_data_format, + False, + -1, + False, + False, + use_cudnn, + ) if bias is not None: out = nn.elementwise_add(out, bias, axis=channel_dim) elif _in_legacy_dygraph(): - attrs = ('strides', stride, 'paddings', padding, 'dilations', dilation, - 'groups', groups, 'use_cudnn', use_cudnn, 'use_mkldnn', False, - 'fuse_relu_before_depthwise_conv', False, "padding_algorithm", - padding_algorithm, "data_format", conv2d_data_format) + attrs = ( + 'strides', + stride, + 'paddings', + padding, + 'dilations', + dilation, + 'groups', + groups, + 'use_cudnn', + use_cudnn, + 'use_mkldnn', + False, + 'fuse_relu_before_depthwise_conv', + False, + "padding_algorithm", + padding_algorithm, + "data_format", + conv2d_data_format, + ) out = getattr(_legacy_C_ops, l_type)(x, weight, *attrs) if bias is not None: out = nn.elementwise_add(out, bias, axis=channel_dim) @@ -429,33 +538,35 @@ def conv1d(x, 'use_mkldnn': False, 'fuse_relu_before_depthwise_conv': False, "padding_algorithm": padding_algorithm, - "data_format": conv2d_data_format + "data_format": conv2d_data_format, } - check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'], - 'conv2d') + check_variable_and_dtype( + x, 'input', ['float16', 'float32', 'float64'], 'conv2d' + ) helper = LayerHelper(l_type, **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) outputs = {"Output": [out]} - helper.append_op(type=l_type, - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type=l_type, inputs=inputs, outputs=outputs, attrs=attrs + ) if bias is not None: out = nn.elementwise_add(out, bias, axis=channel_dim) out = squeeze(out, axis=[squeeze_aixs]) return out -def conv2d(x, - weight, - bias=None, - stride=1, - padding=0, - dilation=1, - groups=1, - data_format="NCHW", - name=None): +def conv2d( + x, + weight, + bias=None, + stride=1, + padding=0, + dilation=1, + groups=1, + data_format="NCHW", + name=None, +): r""" The convolution2D layer calculates the output based on the input, filter @@ -564,40 +675,52 @@ def conv2d(x, """ # entry checks if data_format not in ["NCHW", "NHWC"]: - raise ValueError("Attr(data_format) should be 'NCHW' or 'NHWC'. " - "Received Attr(data_format): {}.".format(data_format)) + raise ValueError( + "Attr(data_format) should be 'NCHW' or 'NHWC'. " + "Received Attr(data_format): {}.".format(data_format) + ) - channel_last = (data_format == "NHWC") + channel_last = data_format == "NHWC" channel_dim = -1 if channel_last else 1 if len(x.shape) != 4: raise ValueError( - "Input x should be 4D tensor, but received x with the shape of {}". - format(x.shape)) + "Input x should be 4D tensor, but received x with the shape of {}".format( + x.shape + ) + ) num_channels = x.shape[channel_dim] num_filters = weight.shape[0] if num_channels < 0: - raise ValueError("The channel dimension of the input({}) " - "should be defined. Received: {}.".format( - x.shape, num_channels)) + raise ValueError( + "The channel dimension of the input({}) " + "should be defined. Received: {}.".format(x.shape, num_channels) + ) if groups <= 0: raise ValueError( - "The groups of conv2d should be greater than 0. Received groups: {}" - .format(groups)) + "The groups of conv2d should be greater than 0. Received groups: {}".format( + groups + ) + ) if num_channels % groups != 0: raise ValueError( "the channel of input must be divisible by groups," "received: the channel of input is {}, the shape of input is {}" - ", the groups is {}".format(num_channels, x.shape, groups)) + ", the groups is {}".format(num_channels, x.shape, groups) + ) if num_filters % groups != 0: raise ValueError( "the number of filters must be divisible by groups," "received: the number of filters is {}, the shape of weight is {}" - ", the groups is {}".format(num_filters, weight.shape, groups)) + ", the groups is {}".format(num_filters, weight.shape, groups) + ) cudnn_version = get_cudnn_version() - use_cudnn = True if (is_compiled_with_cuda() - and cudnn_version is not None) else False + use_cudnn = ( + True + if (is_compiled_with_cuda() and cudnn_version is not None) + else False + ) # update attrs padding, padding_algorithm = _update_padding_nd(padding, channel_last, 2) @@ -605,8 +728,11 @@ def conv2d(x, dilation = convert_to_list(dilation, 2, 'dilation') l_type = "conv2d" - if (num_channels == groups and num_channels != 1 - and num_filters % num_channels == 0): + if ( + num_channels == groups + and num_channels != 1 + and num_filters % num_channels == 0 + ): l_type = 'depthwise_conv2d' if is_compiled_with_rocm(): use_cudnn = True @@ -614,9 +740,19 @@ def conv2d(x, use_cudnn = False else: if in_dygraph_mode(): - pre_bias = _C_ops.conv2d(x, weight, stride, padding, - padding_algorithm, groups, dilation, - data_format, False, -1, False) + pre_bias = _C_ops.conv2d( + x, + weight, + stride, + padding, + padding_algorithm, + groups, + dilation, + data_format, + False, + -1, + False, + ) if bias is not None: out = nn.elementwise_add(pre_bias, bias, axis=channel_dim) return out @@ -627,31 +763,50 @@ def conv2d(x, # NPU only supports depthwise_conv2d when "input_channel = output_channel = groups" if is_compiled_with_npu(): - if (num_channels == groups and num_channels == num_filters): + if num_channels == groups and num_channels == num_filters: l_type = 'depthwise_conv2d' else: l_type = 'conv2d' - if (is_compiled_with_cuda() and get_flags("FLAGS_conv2d_disable_cudnn") - ["FLAGS_conv2d_disable_cudnn"]): + if ( + is_compiled_with_cuda() + and get_flags("FLAGS_conv2d_disable_cudnn")[ + "FLAGS_conv2d_disable_cudnn" + ] + ): use_cudnn = False - return _conv_nd(x, weight, bias, stride, padding, padding_algorithm, - dilation, groups, data_format, channel_dim, l_type, - use_cudnn, use_mkldnn, name) - - -def conv1d_transpose(x, - weight, - bias=None, - stride=1, - padding=0, - output_padding=0, - groups=1, - dilation=1, - output_size=None, - data_format="NCL", - name=None): + return _conv_nd( + x, + weight, + bias, + stride, + padding, + padding_algorithm, + dilation, + groups, + data_format, + channel_dim, + l_type, + use_cudnn, + use_mkldnn, + name, + ) + + +def conv1d_transpose( + x, + weight, + bias=None, + stride=1, + padding=0, + output_padding=0, + groups=1, + dilation=1, + output_size=None, + data_format="NCL", + name=None, +): r""" The 1-D convolution transpose layer calculates the output based on the input, filter, and dilation, stride, padding. Input(Input) and output(Output) @@ -780,28 +935,36 @@ def conv1d_transpose(x, raise ValueError( "Attr(data_format) of conv2d_transpose got wrong value: " "received {}, but only 'NCL' or 'NLC' are supported.".format( - data_format)) - channel_last = (data_format == "NLC") + data_format + ) + ) + channel_last = data_format == "NLC" channel_dim = -1 if channel_last else 1 if len(x.shape) != 3: raise ValueError( - "Input x should be 3D tensor, but received x with the shape of {}". - format(x.shape)) + "Input x should be 3D tensor, but received x with the shape of {}".format( + x.shape + ) + ) num_channels = x.shape[channel_dim] if num_channels < 0: - raise ValueError("The channel dimension of the input({}) " - "should be defined. Received: {}.".format( - x.shape, num_channels)) + raise ValueError( + "The channel dimension of the input({}) " + "should be defined. Received: {}.".format(x.shape, num_channels) + ) if groups <= 0: raise ValueError( - "The groups of conv1d_transpose should be greater than 0. Received groups: {}" - .format(groups)) + "The groups of conv1d_transpose should be greater than 0. Received groups: {}".format( + groups + ) + ) if num_channels % groups != 0: raise ValueError( "the channel of input must be divisible by groups," "received: the channel of input is {}, the shape of input is {}" - ", the groups is {}".format(num_channels, x.shape, groups)) + ", the groups is {}".format(num_channels, x.shape, groups) + ) # update attrs padding, padding_algorithm = _update_padding_nd(padding, channel_last, 1) @@ -812,8 +975,10 @@ def conv1d_transpose(x, padding = padding + [0] else: raise ValueError( - "The size of padding's dimension should 1 or 2. But got padding={}". - format(padding)) + "The size of padding's dimension should 1 or 2. But got padding={}".format( + padding + ) + ) stride = convert_to_list(stride, 1, 'stride') + [1] dilation = convert_to_list(dilation, 1, 'dilation') + [1] @@ -822,30 +987,40 @@ def conv1d_transpose(x, output_size = [] else: if output_padding != 0: - raise ValueError('output_padding option is mutually exclusive with ' - 'output_size') + raise ValueError( + 'output_padding option is mutually exclusive with ' + 'output_size' + ) if isinstance(output_size, (list, tuple, int)): output_size = convert_to_list(output_size, 1, 'output_size') + [1] else: raise ValueError( - "output_size should be int, or list, tuple of ints") + "output_size should be int, or list, tuple of ints" + ) if output_padding == 0: output_padding = [] else: - output_padding = convert_to_list(output_padding, 1, - 'output_padding') + [0] + output_padding = convert_to_list( + output_padding, 1, 'output_padding' + ) + [0] if len(output_padding) > 0 and output_padding[0] > stride[0]: raise ValueError( "The size of output_padding should not be greater than stride." "But got output_padding={} and stride={}".format( - output_padding[0], stride[0])) + output_padding[0], stride[0] + ) + ) op_type = 'conv2d_transpose' num_filters = weight.shape[1] - if (num_channels == groups and num_channels != 1 and num_filters == 1 - and not use_cudnn): + if ( + num_channels == groups + and num_channels != 1 + and num_filters == 1 + and not use_cudnn + ): op_type = 'depthwise_conv2d_transpose' use_cudnn = False @@ -856,17 +1031,41 @@ def conv1d_transpose(x, weight = unsqueeze(weight, axis=[-1]) if in_dygraph_mode(): - out = getattr(_C_ops, - op_type)(x, weight, stride, padding, output_padding, - output_size, padding_algorithm, groups, dilation, - conv2d_data_format) + out = getattr(_C_ops, op_type)( + x, + weight, + stride, + padding, + output_padding, + output_size, + padding_algorithm, + groups, + dilation, + conv2d_data_format, + ) if bias is not None: out = nn.elementwise_add(out, bias, axis=channel_dim) elif _in_legacy_dygraph(): - attrs = ('output_padding', output_padding, 'output_size', output_size, - 'strides', stride, 'paddings', padding, 'padding_algorithm', - padding_algorithm, 'dilations', dilation, 'groups', groups, - 'use_cudnn', use_cudnn, 'data_format', conv2d_data_format) + attrs = ( + 'output_padding', + output_padding, + 'output_size', + output_size, + 'strides', + stride, + 'paddings', + padding, + 'padding_algorithm', + padding_algorithm, + 'dilations', + dilation, + 'groups', + groups, + 'use_cudnn', + use_cudnn, + 'data_format', + conv2d_data_format, + ) out = getattr(_legacy_C_ops, op_type)(x, weight, *attrs) if bias is not None: out = nn.elementwise_add(out, bias, axis=channel_dim) @@ -881,18 +1080,18 @@ def conv1d_transpose(x, 'dilations': dilation, 'groups': groups, 'use_cudnn': use_cudnn, - 'data_format': conv2d_data_format + 'data_format': conv2d_data_format, } - check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'], - 'conv2d_transpose') + check_variable_and_dtype( + x, 'input', ['float16', 'float32', 'float64'], 'conv2d_transpose' + ) helper = LayerHelper(op_type, **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) outputs = {"Output": [out]} - helper.append_op(type=op_type, - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type=op_type, inputs=inputs, outputs=outputs, attrs=attrs + ) if bias is not None: out = nn.elementwise_add(out, bias, axis=channel_dim) @@ -900,17 +1099,19 @@ def conv1d_transpose(x, return out -def conv2d_transpose(x, - weight, - bias=None, - stride=1, - padding=0, - output_padding=0, - dilation=1, - groups=1, - output_size=None, - data_format='NCHW', - name=None): +def conv2d_transpose( + x, + weight, + bias=None, + stride=1, + padding=0, + output_padding=0, + dilation=1, + groups=1, + output_size=None, + data_format='NCHW', + name=None, +): r""" The convolution2D transpose layer calculates the output based on the input, @@ -1039,32 +1240,43 @@ def conv2d_transpose(x, raise ValueError( "Attr(data_format) of conv2d_transpose got wrong value: " "received {}, but only 'NCHW' or 'NHWC' are supported.".format( - data_format)) - channel_last = (data_format == "NHWC") + data_format + ) + ) + channel_last = data_format == "NHWC" channel_dim = -1 if channel_last else 1 if len(x.shape) != 4: raise ValueError( - "Input x should be 4D tensor, but received x with the shape of {}". - format(x.shape)) + "Input x should be 4D tensor, but received x with the shape of {}".format( + x.shape + ) + ) num_channels = x.shape[channel_dim] if num_channels < 0: - raise ValueError("The channel dimension of the input({}) " - "should be defined. Received: {}.".format( - x.shape, num_channels)) + raise ValueError( + "The channel dimension of the input({}) " + "should be defined. Received: {}.".format(x.shape, num_channels) + ) if groups <= 0: raise ValueError( - "The groups of conv2d_transpose should be greater than 0. Received groups: {}" - .format(groups)) + "The groups of conv2d_transpose should be greater than 0. Received groups: {}".format( + groups + ) + ) if num_channels % groups != 0: raise ValueError( "the channel of input must be divisible by groups," "received: the channel of input is {}, the shape of input is {}" - ", the groups is {}".format(num_channels, x.shape, groups)) + ", the groups is {}".format(num_channels, x.shape, groups) + ) cudnn_version = get_cudnn_version() - use_cudnn = True if (is_compiled_with_cuda() - and cudnn_version is not None) else False + use_cudnn = ( + True + if (is_compiled_with_cuda() and cudnn_version is not None) + else False + ) # update attrs padding, padding_algorithm = _update_padding_nd(padding, channel_last, 2) @@ -1075,8 +1287,10 @@ def conv2d_transpose(x, output_size = [] else: if output_padding != 0: - raise ValueError('output_padding option is mutually exclusive with ' - 'output_size') + raise ValueError( + 'output_padding option is mutually exclusive with ' + 'output_size' + ) if isinstance(output_size, (list, tuple)): if _contain_var(output_size): output_size = _convert_to_tensor_list(output_size) @@ -1085,15 +1299,21 @@ def conv2d_transpose(x, elif isinstance(output_size, int): output_size = convert_to_list(output_size, 2, 'output_size') elif isinstance(output_size, Variable): - check_dtype(output_size.dtype, 'output_size', ['int32', 'int64'], - 'conv2d_transpose') - if len(output_size.shape) == 1 and (output_size.shape[0] == 1 - or output_size.shape[0] == 2): + check_dtype( + output_size.dtype, + 'output_size', + ['int32', 'int64'], + 'conv2d_transpose', + ) + if len(output_size.shape) == 1 and ( + output_size.shape[0] == 1 or output_size.shape[0] == 2 + ): if output_size.shape[0] == 1: output_size = [output_size, output_size] else: raise ValueError( - "output_size must contain one or two integers.") + "output_size must contain one or two integers." + ) else: raise ValueError( "output_size should be int or Tensor or list, tuple of ints or Tensor" @@ -1106,24 +1326,54 @@ def conv2d_transpose(x, op_type = 'conv2d_transpose' num_filters = weight.shape[1] - if (num_channels == groups and num_channels != 1 and num_filters == 1): + if num_channels == groups and num_channels != 1 and num_filters == 1: op_type = 'depthwise_conv2d_transpose' use_cudnn = False if in_dygraph_mode(): - op = _C_ops.conv2d_transpose if op_type == 'conv2d_transpose' else _C_ops.depthwise_conv2d_transpose - pre_bias = op(x, weight, stride, padding, output_padding, output_size, - padding_algorithm, groups, dilation, data_format) + op = ( + _C_ops.conv2d_transpose + if op_type == 'conv2d_transpose' + else _C_ops.depthwise_conv2d_transpose + ) + pre_bias = op( + x, + weight, + stride, + padding, + output_padding, + output_size, + padding_algorithm, + groups, + dilation, + data_format, + ) if bias is not None: return nn.elementwise_add(pre_bias, bias, axis=channel_dim) else: return pre_bias if _in_legacy_dygraph(): - attrs = ('output_padding', output_padding, 'output_size', output_size, - 'strides', stride, 'paddings', padding, 'padding_algorithm', - padding_algorithm, 'dilations', dilation, 'groups', groups, - 'use_cudnn', use_cudnn, 'data_format', data_format) + attrs = ( + 'output_padding', + output_padding, + 'output_size', + output_size, + 'strides', + stride, + 'paddings', + padding, + 'padding_algorithm', + padding_algorithm, + 'dilations', + dilation, + 'groups', + groups, + 'use_cudnn', + use_cudnn, + 'data_format', + data_format, + ) pre_bias = getattr(_legacy_C_ops, op_type)(x, weight, *attrs) if bias is not None: out = nn.elementwise_add(pre_bias, bias, axis=channel_dim) @@ -1140,17 +1390,17 @@ def conv2d_transpose(x, 'dilations': dilation, 'groups': groups, 'use_cudnn': use_cudnn, - 'data_format': data_format + 'data_format': data_format, } - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'conv2d_transpose') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], 'conv2d_transpose' + ) helper = LayerHelper(op_type, **locals()) pre_bias = helper.create_variable_for_type_inference(x.dtype) outputs = {"Output": [pre_bias]} - helper.append_op(type=op_type, - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type=op_type, inputs=inputs, outputs=outputs, attrs=attrs + ) if bias is not None: out = nn.elementwise_add(pre_bias, bias, axis=channel_dim) @@ -1160,15 +1410,17 @@ def conv2d_transpose(x, return out -def conv3d(x, - weight, - bias=None, - stride=1, - padding=0, - dilation=1, - groups=1, - data_format="NCDHW", - name=None): +def conv3d( + x, + weight, + bias=None, + stride=1, + padding=0, + dilation=1, + groups=1, + data_format="NCDHW", + name=None, +): r""" The convolution3D layer calculates the output based on the input, filter @@ -1276,60 +1528,88 @@ def conv3d(x, if data_format not in ["NCDHW", "NDHWC"]: raise ValueError( "Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received " - "Attr(data_format): {}.".format(data_format)) + "Attr(data_format): {}.".format(data_format) + ) - channel_last = (data_format == "NDHWC") + channel_last = data_format == "NDHWC" channel_dim = -1 if channel_last else 1 if len(x.shape) != 5: raise ValueError( - "Input x should be 5D tensor, but received x with the shape of {}". - format(x.shape)) + "Input x should be 5D tensor, but received x with the shape of {}".format( + x.shape + ) + ) num_channels = x.shape[channel_dim] num_filters = weight.shape[0] if num_channels < 0: raise ValueError( "The channel dimension of the input({}) should be defined. " - "Received: {}.".format(x.shape, num_channels)) + "Received: {}.".format(x.shape, num_channels) + ) if groups <= 0: raise ValueError( - "The groups of conv3d should be greater than 0. Received groups: {}" - .format(groups)) + "The groups of conv3d should be greater than 0. Received groups: {}".format( + groups + ) + ) if num_channels % groups != 0: raise ValueError( "The number of input channels must be divisible by Attr(groups). " "Received: number of channels({}), groups({}).".format( - num_channels, groups)) + num_channels, groups + ) + ) if num_filters % groups != 0: raise ValueError( "The number of filters must be divisible by Attr(groups). " "Received: number of filters({}), groups({}).".format( - num_filters, groups)) + num_filters, groups + ) + ) cudnn_version = get_cudnn_version() - use_cudnn = True if (is_compiled_with_cuda() - and cudnn_version is not None) else False + use_cudnn = ( + True + if (is_compiled_with_cuda() and cudnn_version is not None) + else False + ) padding, padding_algorithm = _update_padding_nd(padding, channel_last, 3) stride = convert_to_list(stride, 3, 'stride') dilation = convert_to_list(dilation, 3, 'dilation') op_type = "conv3d" - return _conv_nd(x, weight, bias, stride, padding, padding_algorithm, - dilation, groups, data_format, channel_dim, op_type, - use_cudnn, False, name) - - -def conv3d_transpose(x, - weight, - bias=None, - stride=1, - padding=0, - output_padding=0, - groups=1, - dilation=1, - output_size=None, - data_format='NCDHW', - name=None): + return _conv_nd( + x, + weight, + bias, + stride, + padding, + padding_algorithm, + dilation, + groups, + data_format, + channel_dim, + op_type, + use_cudnn, + False, + name, + ) + + +def conv3d_transpose( + x, + weight, + bias=None, + stride=1, + padding=0, + output_padding=0, + groups=1, + dilation=1, + output_size=None, + data_format='NCDHW', + name=None, +): r""" The convolution3d transpose layer calculates the output based on the input, filter, and dilations, strides, paddings. Input(Input) and output(Output) @@ -1463,29 +1743,37 @@ def conv3d_transpose(x, if data_format not in ["NCDHW", "NDHWC"]: raise ValueError( "Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received " - "Attr(data_format): {}.".format(data_format)) + "Attr(data_format): {}.".format(data_format) + ) - channel_last = (data_format == "NDHWC") + channel_last = data_format == "NDHWC" channel_dim = -1 if channel_last else 1 if len(x.shape) != 5: raise ValueError( - "Input x should be 5D tensor, but received x with the shape of {}". - format(x.shape)) + "Input x should be 5D tensor, but received x with the shape of {}".format( + x.shape + ) + ) num_channels = x.shape[channel_dim] num_filters = weight.shape[1] if num_channels < 0: raise ValueError( "The channel dimension of the input({}) should be defined. " - "Received: {}.".format(x.shape, num_channels)) + "Received: {}.".format(x.shape, num_channels) + ) if groups <= 0: raise ValueError( - "The groups of conv3d_transpose should be greater than 0. Received groups: {}" - .format(groups)) + "The groups of conv3d_transpose should be greater than 0. Received groups: {}".format( + groups + ) + ) if num_channels % groups != 0: raise ValueError( "The number of input channels must be divisible by Attr(groups). " "Received: number of channels({}), groups({}).".format( - num_channels, groups)) + num_channels, groups + ) + ) padding, padding_algorithm = _update_padding_nd(padding, channel_last, 3) stride = convert_to_list(stride, 3, 'stride') @@ -1494,13 +1782,16 @@ def conv3d_transpose(x, output_size = [] else: if output_padding != 0: - raise ValueError('output_padding option is mutually exclusive with ' - 'output_size') + raise ValueError( + 'output_padding option is mutually exclusive with ' + 'output_size' + ) if isinstance(output_size, (list, tuple, int)): output_size = convert_to_list(output_size, 3, 'output_size') else: raise ValueError( - "output_size should be int, or list, tuple of ints") + "output_size should be int, or list, tuple of ints" + ) if output_padding == 0: output_padding = [] @@ -1509,28 +1800,55 @@ def conv3d_transpose(x, cudnn_version = get_cudnn_version() - #TODO(LielinJiang): whether to use cudnn according to the version of cudnn - use_cudnn = True if (is_compiled_with_cuda() - and cudnn_version is not None) else False + # TODO(LielinJiang): whether to use cudnn according to the version of cudnn + use_cudnn = ( + True + if (is_compiled_with_cuda() and cudnn_version is not None) + else False + ) op_type = 'conv3d_transpose' data_format_ = "NHWC" if channel_last else "NCHW" if in_dygraph_mode(): - pre_bias = _C_ops.conv3d_transpose(x, weight, stride, padding, - output_padding, output_size, - padding_algorithm, groups, dilation, - data_format_) + pre_bias = _C_ops.conv3d_transpose( + x, + weight, + stride, + padding, + output_padding, + output_size, + padding_algorithm, + groups, + dilation, + data_format_, + ) if bias is not None: return nn.elementwise_add(pre_bias, bias, axis=channel_dim) else: return pre_bias if _in_legacy_dygraph(): - attrs = ('output_padding', output_padding, 'output_size', output_size, - 'paddings', padding, "padding_algorithm", padding_algorithm, - 'strides', stride, 'dilations', dilation, 'groups', groups, - 'use_cudnn', use_cudnn, "data_format", data_format_) + attrs = ( + 'output_padding', + output_padding, + 'output_size', + output_size, + 'paddings', + padding, + "padding_algorithm", + padding_algorithm, + 'strides', + stride, + 'dilations', + dilation, + 'groups', + groups, + 'use_cudnn', + use_cudnn, + "data_format", + data_format_, + ) pre_bias = getattr(_legacy_C_ops, op_type)(x, weight, *attrs) if bias is not None: out = nn.elementwise_add(pre_bias, bias, axis=channel_dim) @@ -1547,19 +1865,19 @@ def conv3d_transpose(x, 'dilations': dilation, 'groups': groups, 'use_cudnn': use_cudnn, - "data_format": data_format_ + "data_format": data_format_, } helper = LayerHelper(op_type, **locals()) - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'conv3d') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], 'conv3d' + ) pre_bias = helper.create_variable_for_type_inference(x.dtype) outputs = {"Output": [pre_bias]} - helper.append_op(type=op_type, - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type=op_type, inputs=inputs, outputs=outputs, attrs=attrs + ) if bias is not None: out = nn.elementwise_add(pre_bias, bias, axis=channel_dim) else: diff --git a/python/paddle/nn/functional/distance.py b/python/paddle/nn/functional/distance.py index 1c29d509741018623ea3436c8116fc984053fbb1..c1d40a83cfb0d9b9ca7d744a19bc3deec557ded4 100644 --- a/python/paddle/nn/functional/distance.py +++ b/python/paddle/nn/functional/distance.py @@ -21,7 +21,7 @@ from paddle.fluid.framework import in_dygraph_mode, _in_legacy_dygraph __all__ = [] -def pairwise_distance(x, y, p=2., epsilon=1e-6, keepdim=False, name=None): +def pairwise_distance(x, y, p=2.0, epsilon=1e-6, keepdim=False, name=None): r""" It computes the pairwise distance between two vectors. The distance is calculated by p-oreder norm: @@ -70,40 +70,42 @@ def pairwise_distance(x, y, p=2., epsilon=1e-6, keepdim=False, name=None): sub = _C_ops.subtract(x, y) # p_norm op has not uesd epsilon, so change it to the following. if epsilon != 0.0: - epsilon = paddle.fluid.dygraph.base.to_variable([epsilon], - dtype=sub.dtype) + epsilon = paddle.fluid.dygraph.base.to_variable( + [epsilon], dtype=sub.dtype + ) sub = _C_ops.add(sub, epsilon) - return _C_ops.p_norm(sub, p, -1, 0., keepdim, False) + return _C_ops.p_norm(sub, p, -1, 0.0, keepdim, False) if _in_legacy_dygraph(): sub = _legacy_C_ops.elementwise_sub(x, y) if epsilon != 0.0: - epsilon = paddle.fluid.dygraph.base.to_variable([epsilon], - dtype=sub.dtype) + epsilon = paddle.fluid.dygraph.base.to_variable( + [epsilon], dtype=sub.dtype + ) sub = _legacy_C_ops.elementwise_add(sub, epsilon) - return _legacy_C_ops.p_norm(sub, 'axis', -1, 'porder', p, 'keepdim', - keepdim, 'epsilon', 0.) + return _legacy_C_ops.p_norm( + sub, 'axis', -1, 'porder', p, 'keepdim', keepdim, 'epsilon', 0.0 + ) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'PairwiseDistance') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'PairwiseDistance') sub = paddle.subtract(x, y) if epsilon != 0.0: epsilon_var = sub.block.create_var(dtype=sub.dtype) - epsilon_var = paddle.full(shape=[1], - fill_value=epsilon, - dtype=sub.dtype) + epsilon_var = paddle.full( + shape=[1], fill_value=epsilon, dtype=sub.dtype + ) sub = paddle.add(sub, epsilon_var) helper = LayerHelper("PairwiseDistance", name=name) attrs = { 'axis': -1, 'porder': p, 'keepdim': keepdim, - 'epsilon': 0., + 'epsilon': 0.0, } out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='p_norm', - inputs={'X': sub}, - outputs={'Out': out}, - attrs=attrs) + helper.append_op( + type='p_norm', inputs={'X': sub}, outputs={'Out': out}, attrs=attrs + ) return out diff --git a/python/paddle/nn/functional/extension.py b/python/paddle/nn/functional/extension.py index cc11472643c21b7015fade4839d4682dad2b5729..1cc9ad6caf4364defaa72a86ec1783e1575ca516 100644 --- a/python/paddle/nn/functional/extension.py +++ b/python/paddle/nn/functional/extension.py @@ -22,7 +22,11 @@ from ...tensor.creation import assign from ...tensor.layer_function_generator import templatedoc from paddle import in_dynamic_mode from paddle import _C_ops, _legacy_C_ops -from ...fluid.framework import _non_static_mode, _in_legacy_dygraph, in_dygraph_mode +from ...fluid.framework import ( + _non_static_mode, + _in_legacy_dygraph, + in_dygraph_mode, +) from ...fluid.data_feeder import check_variable_and_dtype, check_type from ...framework import core, convert_np_dtype_to_dtype_ @@ -102,50 +106,55 @@ def diag_embed(input, offset=0, dim1=-2, dim2=-1): if in_dygraph_mode(): return _C_ops.diag_embed(input, offset, dim1, dim2) elif in_dynamic_mode(): - return _legacy_C_ops.diag_embed(input, "offset", offset, "dim1", dim1, - "dim2", dim2) + return _legacy_C_ops.diag_embed( + input, "offset", offset, "dim1", dim1, "dim2", dim2 + ) inputs = {'Input': [input]} attrs = {'offset': offset, 'dim1': dim1, 'dim2': dim2} def __check_input(input, offset, dim1, dim2): - check_dtype(input.dtype, 'Input', - ['int32', 'int64', 'float16', 'float32', 'float64'], - 'diag_embed') + check_dtype( + input.dtype, + 'Input', + ['int32', 'int64', 'float16', 'float32', 'float64'], + 'diag_embed', + ) input_shape = list(input.shape) - assert len(input_shape) >= 1, \ - "Input must be at least 1-dimensional, " \ - "But received Input's dimensional: %s.\n" % \ - len(input_shape) + assert len(input_shape) >= 1, ( + "Input must be at least 1-dimensional, " + "But received Input's dimensional: %s.\n" % len(input_shape) + ) - assert np.abs(dim1) <= len(input_shape), \ - "Dim1 is out of range (expected to be in range of [%d, %d], but got %d).\n" \ + assert np.abs(dim1) <= len(input_shape), ( + "Dim1 is out of range (expected to be in range of [%d, %d], but got %d).\n" % (-(len(input_shape) + 1), len(input_shape), dim1) + ) - assert np.abs(dim2) <= len(input_shape), \ - "Dim2 is out of range (expected to be in range of [%d, %d], but got %d).\n" \ + assert np.abs(dim2) <= len(input_shape), ( + "Dim2 is out of range (expected to be in range of [%d, %d], but got %d).\n" % (-(len(input_shape) + 1), len(input_shape), dim2) + ) dim1_ = dim1 if dim1 >= 0 else len(input_shape) + dim1 + 1 dim2_ = dim2 if dim2 >= 0 else len(input_shape) + dim2 + 1 - assert dim1_ != dim2_, \ - "dim1 and dim2 cannot be the same dimension." \ - "But received dim1 = %d, dim2 = %d\n"%(dim1, dim2) + assert dim1_ != dim2_, ( + "dim1 and dim2 cannot be the same dimension." + "But received dim1 = %d, dim2 = %d\n" % (dim1, dim2) + ) __check_input(input, offset, dim1, dim2) helper = LayerHelper("diag_embed", **locals()) out = helper.create_variable_for_type_inference(dtype=input.dtype) - helper.append_op(type='diag_embed', - inputs={'Input': [input]}, - attrs={ - 'offset': offset, - 'dim1': dim1, - 'dim2': dim2 - }, - outputs={'Out': [out]}) + helper.append_op( + type='diag_embed', + inputs={'Input': [input]}, + attrs={'offset': offset, 'dim1': dim1, 'dim2': dim2}, + outputs={'Out': [out]}, + ) out.stop_gradient = True return out @@ -233,10 +242,9 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None): else: attrs['maxlen'] = maxlen - helper.append_op(type='sequence_mask', - inputs=inputs, - outputs={'Y': out}, - attrs=attrs) + helper.append_op( + type='sequence_mask', inputs=inputs, outputs={'Y': out}, attrs=attrs + ) out.stop_gradient = True return out @@ -317,18 +325,19 @@ def gather_tree(ids, parents): return _legacy_C_ops.gather_tree(ids, parents) else: helper = LayerHelper('gather_tree', **locals()) - check_variable_and_dtype(ids, 'ids', ['int32', 'int64'], - 'gather_tree') - check_variable_and_dtype(parents, 'parents', ['int32', 'int64'], - 'gather_tree') + check_variable_and_dtype( + ids, 'ids', ['int32', 'int64'], 'gather_tree' + ) + check_variable_and_dtype( + parents, 'parents', ['int32', 'int64'], 'gather_tree' + ) out = helper.create_variable_for_type_inference(dtype=ids.dtype) - helper.append_op(type="gather_tree", - inputs={ - "Ids": ids, - "Parents": parents - }, - outputs={"Out": out}) + helper.append_op( + type="gather_tree", + inputs={"Ids": ids, "Parents": parents}, + outputs={"Out": out}, + ) return out @@ -365,14 +374,22 @@ def temporal_shift(x, seg_num, shift_ratio=0.25, name=None, data_format="NCHW"): out = F.temporal_shift(x=input, seg_num=2, shift_ratio=0.2) """ if data_format not in ["NCHW", "NHWC"]: - raise ValueError("Attr(data_format) should be 'NCHW' or 'NHWC'. " - "Received Attr(data_format): {}.".format(data_format)) + raise ValueError( + "Attr(data_format) should be 'NCHW' or 'NHWC'. " + "Received Attr(data_format): {}.".format(data_format) + ) if in_dygraph_mode(): return _C_ops.temporal_shift(x, seg_num, shift_ratio, data_format) if _non_static_mode(): - return _legacy_C_ops.temporal_shift(x, 'seg_num', seg_num, - 'shift_ratio', shift_ratio, - 'data_format', data_format) + return _legacy_C_ops.temporal_shift( + x, + 'seg_num', + seg_num, + 'shift_ratio', + shift_ratio, + 'data_format', + data_format, + ) helper = LayerHelper("temporal_shift", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'temporal_shift') @@ -384,12 +401,14 @@ def temporal_shift(x, seg_num, shift_ratio=0.25, name=None, data_format="NCHW"): if not isinstance(seg_num, int): raise TypeError("seg_num must be int type.") - helper.append_op(type="temporal_shift", - inputs={"X": x}, - outputs={"Out": out}, - attrs={ - "seg_num": seg_num, - "shift_ratio": shift_ratio, - "data_format": data_format - }) + helper.append_op( + type="temporal_shift", + inputs={"X": x}, + outputs={"Out": out}, + attrs={ + "seg_num": seg_num, + "shift_ratio": shift_ratio, + "data_format": data_format, + }, + ) return out diff --git a/python/paddle/nn/functional/input.py b/python/paddle/nn/functional/input.py index 8df602e5c776d16d7dcde09be6f7bd0c12428888..674483208204cfb93b073fea94d81d27c945590e 100644 --- a/python/paddle/nn/functional/input.py +++ b/python/paddle/nn/functional/input.py @@ -88,15 +88,18 @@ def one_hot(x, num_classes, name=None): return _C_ops.one_hot(x, num_classes) else: if _in_legacy_dygraph(): - return _legacy_C_ops.one_hot_v2(x, 'depth', num_classes, - 'allow_out_of_range', False) + return _legacy_C_ops.one_hot_v2( + x, 'depth', num_classes, 'allow_out_of_range', False + ) else: - check_variable_and_dtype(x, 'input', ['int32', 'int64'], - 'one_hot_v2') + check_variable_and_dtype( + x, 'input', ['int32', 'int64'], 'one_hot_v2' + ) helper = LayerHelper("one_hot_v2", **locals()) one_hot_out = helper.create_variable_for_type_inference( - dtype='float32') + dtype='float32' + ) if not isinstance(num_classes, Variable): # user attribute inputs = {'X': x} @@ -105,11 +108,13 @@ def one_hot(x, num_classes, name=None): num_classes.stop_gradient = True inputs = {'X': x, 'depth_tensor': num_classes} attrs = {'allow_out_of_range': False} - helper.append_op(type="one_hot_v2", - inputs=inputs, - attrs=attrs, - outputs={'Out': one_hot_out}, - stop_gradient=True) + helper.append_op( + type="one_hot_v2", + inputs=inputs, + attrs=attrs, + outputs={'Out': one_hot_out}, + stop_gradient=True, + ) return one_hot_out @@ -189,43 +194,61 @@ def embedding(x, weight, padding_idx=None, sparse=False, name=None): x=x, weight=w, sparse=True, name="embedding") """ - padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else ( - weight.shape[0] + padding_idx) + padding_idx = ( + -1 + if padding_idx is None + else padding_idx + if padding_idx >= 0 + else (weight.shape[0] + padding_idx) + ) if padding_idx >= weight.shape[0] or padding_idx < -weight.shape[0]: - raise ValueError("padding_idx must be within [-{}, {})".format( - weight.shape[0], weight.shape[0])) + raise ValueError( + "padding_idx must be within [-{}, {})".format( + weight.shape[0], weight.shape[0] + ) + ) if in_dygraph_mode(): return _C_ops.embedding(x, weight, padding_idx, sparse) elif _in_legacy_dygraph(): - return _legacy_C_ops.lookup_table_v2(weight, x, 'is_sparse', sparse, - 'is_distributed', False, - 'remote_prefetch', False, - 'padding_idx', padding_idx) + return _legacy_C_ops.lookup_table_v2( + weight, + x, + 'is_sparse', + sparse, + 'is_distributed', + False, + 'remote_prefetch', + False, + 'padding_idx', + padding_idx, + ) else: helper = LayerHelper('embedding', **locals()) dtype = helper.input_dtype(input_param_name='weight') - check_variable_and_dtype(x, 'input', - ['uint8', 'int8', 'int16', 'int32', 'int64'], - 'embedding') + check_variable_and_dtype( + x, + 'input', + ['uint8', 'int8', 'int16', 'int32', 'int64'], + 'embedding', + ) is_distributed = False remote_prefetch = sparse and (not is_distributed) tmp = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='lookup_table_v2', - inputs={ - 'Ids': x, - 'W': weight - }, - outputs={'Out': tmp}, - attrs={ - 'is_sparse': sparse, - 'is_distributed': is_distributed, - 'remote_prefetch': remote_prefetch, - 'padding_idx': padding_idx - }) + helper.append_op( + type='lookup_table_v2', + inputs={'Ids': x, 'W': weight}, + outputs={'Out': tmp}, + attrs={ + 'is_sparse': sparse, + 'is_distributed': is_distributed, + 'remote_prefetch': remote_prefetch, + 'padding_idx': padding_idx, + }, + ) return tmp diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index ed28bc2190261027e3650f528041a99b78dac7b4..48cda9d0b4f95347c0165953365743be89695d30 100755 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -28,7 +28,12 @@ from paddle.utils import deprecated from paddle import _C_ops, _legacy_C_ops from paddle import in_dynamic_mode from paddle.framework import core, _non_static_mode -from ...fluid.framework import _in_legacy_dygraph, in_dygraph_mode, _non_static_mode, _current_expected_place +from ...fluid.framework import ( + _in_legacy_dygraph, + in_dygraph_mode, + _non_static_mode, + _current_expected_place, +) __all__ = [] @@ -77,25 +82,32 @@ def dice_loss(input, label, epsilon=0.00001, name=None): """ assert input.dtype in (paddle.float32, paddle.float64) assert label.dtype in (paddle.int32, paddle.int64) - assert len(input.shape) >= 2, \ - "The rank of input should be greater than or equal to 2." - assert len(input.shape) == len( - label.shape), ("The rank of input and label should be equal, " - "but received input: %d, label: %d." % - (len(input.shape), len(label.shape))) - assert label.shape[-1] == 1, ("The last dimension of label should be 1, " - "but received %d." % label.shape[-1]) - assert input.shape[:-1] == label.shape[:-1], ( - "All dimensions should be equal except the last one.") - assert input.numel() > 0 and label.numel() > 0, \ - "Any dimension of input and label cannot be equal to 0." + assert ( + len(input.shape) >= 2 + ), "The rank of input should be greater than or equal to 2." + assert len(input.shape) == len(label.shape), ( + "The rank of input and label should be equal, " + "but received input: %d, label: %d." + % (len(input.shape), len(label.shape)) + ) + assert label.shape[-1] == 1, ( + "The last dimension of label should be 1, " + "but received %d." % label.shape[-1] + ) + assert ( + input.shape[:-1] == label.shape[:-1] + ), "All dimensions should be equal except the last one." + assert ( + input.numel() > 0 and label.numel() > 0 + ), "Any dimension of input and label cannot be equal to 0." label = paddle.squeeze(label, [-1]) label = paddle.nn.functional.one_hot(label, input.shape[-1]) reduce_dim = list(range(1, len(input.shape))) inse = paddle.sum(input * label, axis=reduce_dim) dice_denominator = paddle.sum(input, axis=reduce_dim) + paddle.sum( - label, axis=reduce_dim) + label, axis=reduce_dim + ) dice_score = 1 - inse * 2 / (dice_denominator + epsilon) return paddle.mean(dice_score) @@ -146,23 +158,24 @@ def log_loss(input, label, epsilon=1e-4, name=None): loss = helper.create_variable_for_type_inference(dtype=input.dtype) - helper.append_op(type='log_loss', - inputs={ - 'Predicted': [input], - 'Labels': [label] - }, - outputs={'Loss': [loss]}, - attrs={'epsilon': epsilon}) + helper.append_op( + type='log_loss', + inputs={'Predicted': [input], 'Labels': [label]}, + outputs={'Loss': [loss]}, + attrs={'epsilon': epsilon}, + ) return loss -def fluid_softmax_with_cross_entropy(logits, - label, - soft_label=False, - ignore_index=-100, - numeric_stable_mode=True, - return_softmax=False, - axis=-1): +def fluid_softmax_with_cross_entropy( + logits, + label, + soft_label=False, + ignore_index=-100, + numeric_stable_mode=True, + return_softmax=False, + axis=-1, +): r""" This operator implements the cross entropy loss function with softmax. This function @@ -252,19 +265,41 @@ def fluid_softmax_with_cross_entropy(logits, if _non_static_mode(): if core.is_compiled_with_npu(): softmax, backprop, loss = _legacy_C_ops.softmax_with_cross_entropy( - logits, label, 'soft_label', soft_label, 'ignore_index', - ignore_index, 'numeric_stable_mode', numeric_stable_mode, - 'axis', axis) + logits, + label, + 'soft_label', + soft_label, + 'ignore_index', + ignore_index, + 'numeric_stable_mode', + numeric_stable_mode, + 'axis', + axis, + ) else: if in_dygraph_mode(): softmax, loss = _C_ops.cross_entropy_with_softmax( - logits, label, soft_label, True, numeric_stable_mode, - ignore_index, axis) + logits, + label, + soft_label, + True, + numeric_stable_mode, + ignore_index, + axis, + ) if _in_legacy_dygraph(): softmax, loss = _legacy_C_ops.softmax_with_cross_entropy( - logits, label, 'soft_label', soft_label, 'ignore_index', - ignore_index, 'numeric_stable_mode', numeric_stable_mode, - 'axis', axis) + logits, + label, + 'soft_label', + soft_label, + 'ignore_index', + ignore_index, + 'numeric_stable_mode', + numeric_stable_mode, + 'axis', + axis, + ) if not return_softmax: return loss else: @@ -274,7 +309,7 @@ def fluid_softmax_with_cross_entropy(logits, 'soft_label': soft_label, 'ignore_index': ignore_index, 'numeric_stable_mode': numeric_stable_mode, - 'axis': axis + 'axis': axis, } helper = LayerHelper('softmax_with_cross_entropy', **locals()) softmax = helper.create_variable_for_type_inference(dtype=logits.dtype) @@ -284,13 +319,12 @@ def fluid_softmax_with_cross_entropy(logits, if core.is_compiled_with_npu() or core.is_compiled_with_mlu(): backprop = helper.create_variable_for_type_inference(dtype=logits.dtype) outputs['Backprop'] = backprop - helper.append_op(type='softmax_with_cross_entropy', - inputs={ - 'Logits': logits, - 'Label': label - }, - outputs=outputs, - attrs=attrs) + helper.append_op( + type='softmax_with_cross_entropy', + inputs={'Logits': logits, 'Label': label}, + outputs=outputs, + attrs=attrs, + ) if return_softmax: return loss, softmax @@ -336,34 +370,37 @@ def npair_loss(anchor, positive, labels, l2_reg=0.002): print(npair_loss) """ - check_variable_and_dtype(anchor, 'anchor', ['float32', 'float64'], - 'npair_loss') - check_variable_and_dtype(positive, 'positive', ['float32', 'float64'], - 'positive') - check_variable_and_dtype(labels, 'labels', ['float32', 'float64', 'int64'], - 'labels') + check_variable_and_dtype( + anchor, 'anchor', ['float32', 'float64'], 'npair_loss' + ) + check_variable_and_dtype( + positive, 'positive', ['float32', 'float64'], 'positive' + ) + check_variable_and_dtype( + labels, 'labels', ['float32', 'float64', 'int64'], 'labels' + ) Beta = 0.25 batch_size = labels.shape[0] labels = paddle.reshape(labels, shape=[batch_size, 1]) labels = paddle.tile(labels, repeat_times=[1, batch_size]) - labels = paddle.equal(labels, paddle.transpose(labels, - perm=[1, - 0])).astype('float32') + labels = paddle.equal(labels, paddle.transpose(labels, perm=[1, 0])).astype( + 'float32' + ) labels = labels / paddle.sum(labels, axis=1, keepdim=True) - l2loss = paddle.mean(paddle.sum(paddle.square(anchor), 1)) \ - + paddle.mean(paddle.sum(paddle.square(positive), 1)) + l2loss = paddle.mean(paddle.sum(paddle.square(anchor), 1)) + paddle.mean( + paddle.sum(paddle.square(positive), 1) + ) l2loss = l2loss * Beta * l2_reg - similarity_matrix = paddle.matmul(anchor, - positive, - transpose_x=False, - transpose_y=True) - softmax_ce = fluid_softmax_with_cross_entropy(logits=similarity_matrix, - label=labels, - soft_label=True) + similarity_matrix = paddle.matmul( + anchor, positive, transpose_x=False, transpose_y=True + ) + softmax_ce = fluid_softmax_with_cross_entropy( + logits=similarity_matrix, label=labels, soft_label=True + ) cross_entropy = paddle.sum(labels * softmax_ce, 0) celoss = paddle.mean(cross_entropy) @@ -411,32 +448,35 @@ def square_error_cost(input, label): square_out = _legacy_C_ops.square(minus_out) return square_out - check_variable_and_dtype(input, "input", ['float32', 'float64'], - 'square_error_cost') - check_variable_and_dtype(label, "label", ['float32', 'float64'], - 'square_error_cost') + check_variable_and_dtype( + input, "input", ['float32', 'float64'], 'square_error_cost' + ) + check_variable_and_dtype( + label, "label", ['float32', 'float64'], 'square_error_cost' + ) helper = LayerHelper('square_error_cost', **locals()) minus_out = helper.create_variable_for_type_inference(dtype=input.dtype) - helper.append_op(type='elementwise_sub', - inputs={ - 'X': [input], - 'Y': [label] - }, - outputs={'Out': [minus_out]}) + helper.append_op( + type='elementwise_sub', + inputs={'X': [input], 'Y': [label]}, + outputs={'Out': [minus_out]}, + ) square_out = helper.create_variable_for_type_inference(dtype=input.dtype) - helper.append_op(type='square', - inputs={'X': [minus_out]}, - outputs={'Out': [square_out]}) + helper.append_op( + type='square', inputs={'X': [minus_out]}, outputs={'Out': [square_out]} + ) return square_out -def edit_distance(input, - label, - normalized=True, - ignored_tokens=None, - input_length=None, - label_length=None): +def edit_distance( + input, + label, + normalized=True, + ignored_tokens=None, + input_length=None, + label_length=None, +): """ This op computes the edit distances, also called Levenshtein distance, between a batch of hypothesis strings and their references. It measures how dissimilar two strings are by counting @@ -512,21 +552,26 @@ def edit_distance(input, erased_input = helper.create_variable_for_type_inference(dtype="int64") erased_label = helper.create_variable_for_type_inference(dtype="int64") - helper.append_op(type="sequence_erase", - inputs={"X": [input]}, - outputs={"Out": [erased_input]}, - attrs={"tokens": ignored_tokens}) + helper.append_op( + type="sequence_erase", + inputs={"X": [input]}, + outputs={"Out": [erased_input]}, + attrs={"tokens": ignored_tokens}, + ) input = erased_input - helper.append_op(type="sequence_erase", - inputs={"X": [label]}, - outputs={"Out": [erased_label]}, - attrs={"tokens": ignored_tokens}) + helper.append_op( + type="sequence_erase", + inputs={"X": [label]}, + outputs={"Out": [erased_label]}, + attrs={"tokens": ignored_tokens}, + ) label = erased_label if in_dygraph_mode(): - return _C_ops.edit_distance(input, label, input_length, label_length, - normalized) + return _C_ops.edit_distance( + input, label, input_length, label_length, normalized + ) this_inputs = {"Hyps": [input], "Refs": [label]} if input_length is not None and label_length is not None: @@ -536,22 +581,19 @@ def edit_distance(input, # edit distance op edit_distance_out = helper.create_variable_for_type_inference(dtype="int64") sequence_num = helper.create_variable_for_type_inference(dtype="int64") - helper.append_op(type="edit_distance", - inputs=this_inputs, - outputs={ - "Out": [edit_distance_out], - "SequenceNum": [sequence_num] - }, - attrs={"normalized": normalized}) + helper.append_op( + type="edit_distance", + inputs=this_inputs, + outputs={"Out": [edit_distance_out], "SequenceNum": [sequence_num]}, + attrs={"normalized": normalized}, + ) return edit_distance_out, sequence_num -def binary_cross_entropy(input, - label, - weight=None, - reduction='mean', - name=None): +def binary_cross_entropy( + input, label, weight=None, reduction='mean', name=None +): """ This op measures the binary_cross_entropy loss between input predictions ``input`` and target labels ``label`` . The binary_cross_entropy loss can be described as: @@ -619,8 +661,9 @@ def binary_cross_entropy(input, if reduction not in ['sum', 'mean', 'none']: raise ValueError( "The value of 'reduction' in binary_cross_entropy should be 'sum', " - "'mean' or 'none', but received %s, which is not allowed." % - reduction) + "'mean' or 'none', but received %s, which is not allowed." + % reduction + ) if in_dygraph_mode(): out = _C_ops.bce_loss(input, label) @@ -640,27 +683,32 @@ def binary_cross_entropy(input, if weight is not None: out = _legacy_C_ops.elementwise_mul(out, weight, 'axis', -1) if reduction == 'sum': - return _legacy_C_ops.reduce_sum(out, 'dim', [0], 'keep_dim', - False, "reduce_all", True) + return _legacy_C_ops.reduce_sum( + out, 'dim', [0], 'keep_dim', False, "reduce_all", True + ) elif reduction == 'mean': return _legacy_C_ops.mean(out) else: return out else: - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'binary_cross_entropy') - check_variable_and_dtype(label, 'label', ['float32', 'float64'], - 'binary_cross_entropy') + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'binary_cross_entropy' + ) + check_variable_and_dtype( + label, 'label', ['float32', 'float64'], 'binary_cross_entropy' + ) sub_name = name if weight is None and reduction == 'none' else None helper = LayerHelper("binary_cross_entropy", name=sub_name) out = helper.create_variable_for_type_inference(dtype=input.dtype) - helper.append_op(type='bce_loss', - inputs={ - 'X': [input], - 'Label': [label], - }, - outputs={'Out': [out]}) + helper.append_op( + type='bce_loss', + inputs={ + 'X': [input], + 'Label': [label], + }, + outputs={'Out': [out]}, + ) if weight is not None: if isinstance(weight, paddle.static.Variable): @@ -668,7 +716,8 @@ def binary_cross_entropy(input, out = paddle.multiply(out, weight, name=weight_name) else: raise ValueError( - "The weight is not a Tensor, please convert to Tensor.") + "The weight is not a Tensor, please convert to Tensor." + ) if reduction == 'sum': return paddle.sum(out, name=name) @@ -678,12 +727,9 @@ def binary_cross_entropy(input, return out -def binary_cross_entropy_with_logits(logit, - label, - weight=None, - reduction='mean', - pos_weight=None, - name=None): +def binary_cross_entropy_with_logits( + logit, label, weight=None, reduction='mean', pos_weight=None, name=None +): r""" This operator combines the sigmoid layer and the :ref:`api_nn_loss_BCELoss` layer. Also, we can see it as the combine of ``sigmoid_cross_entropy_with_logits`` @@ -765,16 +811,23 @@ def binary_cross_entropy_with_logits(logit, raise ValueError( "The value of 'reduction' in binary_cross_entropy_with_logits " "should be 'sum', 'mean' or 'none', but received %s, which is not allowed." - % reduction) + % reduction + ) if in_dygraph_mode(): - one = _C_ops.full([1], float(1.0), core.VarDesc.VarType.FP32, - _current_expected_place()) - out = _C_ops.sigmoid_cross_entropy_with_logits(logit, label, False, - -100) + one = _C_ops.full( + [1], + float(1.0), + core.VarDesc.VarType.FP32, + _current_expected_place(), + ) + out = _C_ops.sigmoid_cross_entropy_with_logits( + logit, label, False, -100 + ) if pos_weight is not None: log_weight = _C_ops.add( - _C_ops.multiply(label, _C_ops.subtract(pos_weight, one)), one) + _C_ops.multiply(label, _C_ops.subtract(pos_weight, one)), one + ) out = _C_ops.multiply(out, log_weight) if weight is not None: out = _C_ops.multiply(out, weight) @@ -787,14 +840,27 @@ def binary_cross_entropy_with_logits(logit, return out elif _in_legacy_dygraph(): one = _varbase_creator(dtype=logit.dtype) - _legacy_C_ops.fill_constant(one, 'value', float(1.0), 'force_cpu', - False, 'dtype', one.dtype, 'str_value', - '1.0', 'shape', [1]) + _legacy_C_ops.fill_constant( + one, + 'value', + float(1.0), + 'force_cpu', + False, + 'dtype', + one.dtype, + 'str_value', + '1.0', + 'shape', + [1], + ) out = _legacy_C_ops.sigmoid_cross_entropy_with_logits(logit, label) if pos_weight is not None: log_weight = _legacy_C_ops.elementwise_add( _legacy_C_ops.elementwise_mul( - label, _legacy_C_ops.elementwise_sub(pos_weight, one)), one) + label, _legacy_C_ops.elementwise_sub(pos_weight, one) + ), + one, + ) out = _legacy_C_ops.elementwise_mul(out, log_weight) if weight is not None: out = _legacy_C_ops.elementwise_mul(out, weight) @@ -806,30 +872,49 @@ def binary_cross_entropy_with_logits(logit, else: return out - check_variable_and_dtype(logit, 'logit', ['float32', 'float64'], - 'binary_cross_entropy_with_logits') - check_variable_and_dtype(label, 'label', ['float32', 'float64'], - 'binary_cross_entropy_with_logits') + check_variable_and_dtype( + logit, + 'logit', + ['float32', 'float64'], + 'binary_cross_entropy_with_logits', + ) + check_variable_and_dtype( + label, + 'label', + ['float32', 'float64'], + 'binary_cross_entropy_with_logits', + ) sigmoid_name = None if reduction == 'none' and pos_weight is None and weight is None: sigmoid_name = name out = paddle.fluid.layers.sigmoid_cross_entropy_with_logits( - logit, label, name=sigmoid_name) + logit, label, name=sigmoid_name + ) one = paddle.full(shape=[1], fill_value=1.0, dtype=logit.dtype) if pos_weight is not None: - check_variable_and_dtype(pos_weight, 'pos_weight', - ['float32', 'float64'], - 'binary_cross_entropy_with_logits') + check_variable_and_dtype( + pos_weight, + 'pos_weight', + ['float32', 'float64'], + 'binary_cross_entropy_with_logits', + ) log_weight = paddle.add( - paddle.multiply(label, paddle.subtract(pos_weight, one)), one) - pos_weight_name = name if reduction == 'none' and weight is None else None + paddle.multiply(label, paddle.subtract(pos_weight, one)), one + ) + pos_weight_name = ( + name if reduction == 'none' and weight is None else None + ) out = paddle.multiply(out, log_weight, name=pos_weight_name) if weight is not None: - check_variable_and_dtype(weight, 'weight', ['float32', 'float64'], - 'binary_cross_entropy_with_logits') + check_variable_and_dtype( + weight, + 'weight', + ['float32', 'float64'], + 'binary_cross_entropy_with_logits', + ) weight_name = name if reduction == 'none' else None out = paddle.multiply(out, weight, name=weight_name) @@ -840,15 +925,17 @@ def binary_cross_entropy_with_logits(logit, return out -def hsigmoid_loss(input, - label, - num_classes, - weight, - bias=None, - path_table=None, - path_code=None, - is_sparse=False, - name=None): +def hsigmoid_loss( + input, + label, + num_classes, + weight, + bias=None, + path_table=None, + path_code=None, + is_sparse=False, + name=None, +): """ The hierarchical sigmoid organizes the classes into a complete binary tree to reduce the computational complexity and speed up the model training, especially the training of language model. @@ -930,36 +1017,63 @@ def hsigmoid_loss(input, # [1.92374969]] """ if in_dygraph_mode(): - out, _, _ = _C_ops.hierarchical_sigmoid(input, weight, label, - path_table, path_code, bias, - num_classes, is_sparse, 0, [], - [], [], is_sparse) + out, _, _ = _C_ops.hierarchical_sigmoid( + input, + weight, + label, + path_table, + path_code, + bias, + num_classes, + is_sparse, + 0, + [], + [], + [], + is_sparse, + ) return out elif _in_legacy_dygraph(): out, _, _ = _legacy_C_ops.hierarchical_sigmoid( - input, weight, label, path_table, path_code, bias, 'num_classes', - num_classes, 'is_sparse', is_sparse, 'remote_prefetch', is_sparse) + input, + weight, + label, + path_table, + path_code, + bias, + 'num_classes', + num_classes, + 'is_sparse', + is_sparse, + 'remote_prefetch', + is_sparse, + ) return out - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'hsigmoid_loss') + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'hsigmoid_loss' + ) check_variable_and_dtype(label, 'label', ['int64'], 'hsigmoid_loss') - check_variable_and_dtype(weight, 'weight', ['float32', 'float64'], - 'hsigmoid_loss') + check_variable_and_dtype( + weight, 'weight', ['float32', 'float64'], 'hsigmoid_loss' + ) if bias is not None: - check_variable_and_dtype(bias, 'bias', ['float32', 'float64'], - 'hsigmoid_loss') + check_variable_and_dtype( + bias, 'bias', ['float32', 'float64'], 'hsigmoid_loss' + ) if path_table is not None: - check_variable_and_dtype(path_table, 'path_table', ['int64'], - 'hsigmoid_loss') + check_variable_and_dtype( + path_table, 'path_table', ['int64'], 'hsigmoid_loss' + ) if path_code is not None: - check_variable_and_dtype(path_code, 'path_code', ['int64'], - 'hsigmoid_loss') + check_variable_and_dtype( + path_code, 'path_code', ['int64'], 'hsigmoid_loss' + ) attrs = { "num_classes": num_classes, "is_sparse": is_sparse, - "remote_prefetch": is_sparse + "remote_prefetch": is_sparse, } inputs = { @@ -968,7 +1082,7 @@ def hsigmoid_loss(input, "Bias": bias, "PathTable": path_table, "PathCode": path_code, - "Label": label + "Label": label, } helper = LayerHelper('hsigmoid_loss', **locals()) @@ -976,10 +1090,9 @@ def hsigmoid_loss(input, pre_out = helper.create_variable_for_type_inference(input.dtype) outputs = {"Out": out, "PreOut": pre_out, "W_Out": weight} - helper.append_op(type="hierarchical_sigmoid", - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type="hierarchical_sigmoid", inputs=inputs, outputs=outputs, attrs=attrs + ) return out @@ -1036,34 +1149,35 @@ def smooth_l1_loss(input, label, reduction='mean', delta=1.0, name=None): print(output) # [0.068004] """ - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'smooth_l1_loss') - check_variable_and_dtype(label, 'label', ['float32', 'float64'], - 'smooth_l1_loss') + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'smooth_l1_loss' + ) + check_variable_and_dtype( + label, 'label', ['float32', 'float64'], 'smooth_l1_loss' + ) if in_dygraph_mode(): out, residual = _C_ops.huber_loss(input, label, delta) else: helper = LayerHelper('huber_loss', **locals()) residual = helper.create_variable_for_type_inference( - dtype=helper.input_dtype()) + dtype=helper.input_dtype() + ) out = helper.create_variable_for_type_inference( - dtype=helper.input_dtype()) - helper.append_op(type='huber_loss', - inputs={ - 'X': input, - 'Y': label - }, - outputs={ - 'Out': out, - 'Residual': residual - }, - attrs={'delta': delta}) + dtype=helper.input_dtype() + ) + helper.append_op( + type='huber_loss', + inputs={'X': input, 'Y': label}, + outputs={'Out': out, 'Residual': residual}, + attrs={'delta': delta}, + ) if reduction not in ['sum', 'mean', 'none']: raise ValueError( "The value of 'reduction' in smooth_l1_loss should be 'sum', 'mean' or" - " 'none', but received %s, which is not allowed." % reduction) + " 'none', but received %s, which is not allowed." % reduction + ) if reduction == 'none': return out elif reduction == 'mean': @@ -1072,12 +1186,9 @@ def smooth_l1_loss(input, label, reduction='mean', delta=1.0, name=None): return paddle.sum(out) -def margin_ranking_loss(input, - other, - label, - margin=0.0, - reduction='mean', - name=None): +def margin_ranking_loss( + input, other, label, margin=0.0, reduction='mean', name=None +): r""" Calcluate the margin rank loss between the input, other and label, use the math function as follows. @@ -1123,7 +1234,8 @@ def margin_ranking_loss(input, if reduction not in ['sum', 'mean', 'none']: raise ValueError( "The value of 'reduction' in MarginRankingLoss should be 'sum', 'mean' or 'none', but " - "received %s, which is not allowed." % reduction) + "received %s, which is not allowed." % reduction + ) if in_dygraph_mode(): out = _C_ops.subtract(other, input) out = _C_ops.multiply(out, label) @@ -1150,12 +1262,15 @@ def margin_ranking_loss(input, return out helper = LayerHelper("margin_ranking_loss", **locals()) - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'margin_rank_loss') - check_variable_and_dtype(other, 'other', ['float32', 'float64'], - 'margin_rank_loss') - check_variable_and_dtype(label, 'label', ['float32', 'float64'], - 'margin_rank_loss') + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'margin_rank_loss' + ) + check_variable_and_dtype( + other, 'other', ['float32', 'float64'], 'margin_rank_loss' + ) + check_variable_and_dtype( + label, 'label', ['float32', 'float64'], 'margin_rank_loss' + ) out = paddle.subtract(input, other) neg_label = paddle.neg(label) @@ -1169,24 +1284,28 @@ def margin_ranking_loss(input, result_out = helper.create_variable_for_type_inference(input.dtype) if reduction == 'none': - helper.append_op(type="relu", - inputs={"X": out}, - outputs={"Out": result_out}) + helper.append_op( + type="relu", inputs={"X": out}, outputs={"Out": result_out} + ) return result_out elif reduction == 'sum': out = paddle.nn.functional.relu(out) attrs = {"dim": [0], "keep_dim": False, "reduce_all": True} - helper.append_op(type="reduce_sum", - inputs={"X": out}, - outputs={"Out": result_out}, - attrs=attrs) + helper.append_op( + type="reduce_sum", + inputs={"X": out}, + outputs={"Out": result_out}, + attrs=attrs, + ) return result_out elif reduction == 'mean': out = paddle.nn.functional.relu(out) - helper.append_op(type="mean", - inputs={"X": out}, - outputs={"Out": result_out}, - attrs={}) + helper.append_op( + type="mean", + inputs={"X": out}, + outputs={"Out": result_out}, + attrs={}, + ) return result_out @@ -1250,7 +1369,8 @@ def l1_loss(input, label, reduction='mean', name=None): if reduction not in ['sum', 'mean', 'none']: raise ValueError( "The value of 'reduction' in L1Loss should be 'sum', 'mean' or 'none', but " - "received %s, which is not allowed." % reduction) + "received %s, which is not allowed." % reduction + ) if in_dygraph_mode(): unreduced = _C_ops.abs(_C_ops.subtract(input, label)) @@ -1262,25 +1382,24 @@ def l1_loss(input, label, reduction='mean', name=None): else: return unreduced elif _in_legacy_dygraph(): - unreduced = _elementwise_op_in_dygraph(input, - label, - axis=-1, - act='abs', - op_name='elementwise_sub') + unreduced = _elementwise_op_in_dygraph( + input, label, axis=-1, act='abs', op_name='elementwise_sub' + ) if reduction == 'mean': return _legacy_C_ops.mean(unreduced) elif reduction == 'sum': - return _legacy_C_ops.reduce_sum(unreduced, 'dim', [0], 'keep_dim', - False, 'reduce_all', True) + return _legacy_C_ops.reduce_sum( + unreduced, 'dim', [0], 'keep_dim', False, 'reduce_all', True + ) else: return unreduced - check_variable_and_dtype(input, 'input', - ['float32', 'float64', 'int32', 'int64'], - 'l1_loss') - check_variable_and_dtype(label, 'label', - ['float32', 'float64', 'int32', 'int64'], - 'l1_loss') + check_variable_and_dtype( + input, 'input', ['float32', 'float64', 'int32', 'int64'], 'l1_loss' + ) + check_variable_and_dtype( + label, 'label', ['float32', 'float64', 'int32', 'int64'], 'l1_loss' + ) if reduction == 'sum': unreduced = paddle.fluid.layers.elementwise_sub(input, label, act='abs') @@ -1289,18 +1408,14 @@ def l1_loss(input, label, reduction='mean', name=None): unreduced = paddle.fluid.layers.elementwise_sub(input, label, act='abs') return paddle.mean(unreduced, name=name) else: - return paddle.fluid.layers.elementwise_sub(input, - label, - act='abs', - name=name) - - -def nll_loss(input, - label, - weight=None, - ignore_index=-100, - reduction='mean', - name=None): + return paddle.fluid.layers.elementwise_sub( + input, label, act='abs', name=name + ) + + +def nll_loss( + input, label, weight=None, ignore_index=-100, reduction='mean', name=None +): """ This api returns negative log likelihood. See more detail in :ref:`api_nn_loss_NLLLoss` . @@ -1349,13 +1464,15 @@ def nll_loss(input, if reduction not in ['sum', 'mean', 'none']: raise ValueError( "The value of 'reduction' in nll_loss should be 'sum', 'mean' or " - "'none', but received %s, which is not allowed." % reduction) + "'none', but received %s, which is not allowed." % reduction + ) input_shape = list(input.shape) input_dims = len(input_shape) if input_dims < 2: raise ValueError( - 'Expected 2 or more dimensions (got {})'.format(input_dims)) + 'Expected 2 or more dimensions (got {})'.format(input_dims) + ) n = input_shape[0] c = input_shape[1] if in_dygraph_mode(): @@ -1363,21 +1480,29 @@ def nll_loss(input, input = _C_ops.reshape(input, [n, c, 1, -1]) label = _C_ops.reshape(label, [n, 1, -1]) out_shape = [n] + input_shape[2:] - out, total_weight = _C_ops.nll_loss(input, label, weight, ignore_index, - reduction) + out, total_weight = _C_ops.nll_loss( + input, label, weight, ignore_index, reduction + ) if input_dims != 2 and input_dims != 4 and reduction == 'none': out = _C_ops.reshape(out, out_shape) return out elif _in_legacy_dygraph(): if input_dims != 2 and input_dims != 4: - input, _ = _legacy_C_ops.reshape2(input, None, 'shape', - [n, c, 1, -1]) + input, _ = _legacy_C_ops.reshape2( + input, None, 'shape', [n, c, 1, -1] + ) label, _ = _legacy_C_ops.reshape2(label, None, 'shape', [n, 1, -1]) out_shape = [n] + input_shape[2:] - out, total_weight = _legacy_C_ops.nll_loss(input, label, weight, - 'ignore_index', ignore_index, - 'reduction', reduction) + out, total_weight = _legacy_C_ops.nll_loss( + input, + label, + weight, + 'ignore_index', + ignore_index, + 'reduction', + reduction, + ) if input_dims != 2 and input_dims != 4 and reduction == 'none': out, _ = _legacy_C_ops.reshape2(out, None, 'shape', out_shape) return out @@ -1401,10 +1526,9 @@ def nll_loss(input, total_weight = helper.create_variable_for_type_inference(dtype=input.dtype) outputs = {'Out': out, 'Total_weight': total_weight} - helper.append_op(type='nll_loss', - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type='nll_loss', inputs=inputs, outputs=outputs, attrs=attrs + ) if input_dims != 2 and input_dims != 4 and reduction == 'none': out = reshape(out, shape=out_shape) @@ -1482,13 +1606,15 @@ def kl_div(input, label, reduction='mean', name=None): """ # ugly type promotion - if fluid.data_feeder.convert_dtype( - input.dtype) == 'float32' and fluid.data_feeder.convert_dtype( - label.dtype) == 'float64': + if ( + fluid.data_feeder.convert_dtype(input.dtype) == 'float32' + and fluid.data_feeder.convert_dtype(label.dtype) == 'float64' + ): input = paddle.cast(input, 'float64') - elif fluid.data_feeder.convert_dtype( - input.dtype) == 'float64' and fluid.data_feeder.convert_dtype( - label.dtype) == 'float32': + elif ( + fluid.data_feeder.convert_dtype(input.dtype) == 'float64' + and fluid.data_feeder.convert_dtype(label.dtype) == 'float32' + ): label = paddle.cast(label, 'float64') if in_dygraph_mode(): @@ -1521,13 +1647,12 @@ def kl_div(input, label, reduction='mean', name=None): fluid.data_feeder.check_type(reduction, 'reduction', str, 'kl_div') loss = helper.create_variable_for_type_inference(dtype=input.dtype) - helper.append_op(type='kldiv_loss', - inputs={ - 'X': input, - 'Target': label - }, - outputs={'Loss': loss}, - attrs={'reduction': 'none'}) + helper.append_op( + type='kldiv_loss', + inputs={'X': input, 'Target': label}, + outputs={'Loss': loss}, + attrs={'reduction': 'none'}, + ) if reduction == 'mean': loss = paddle.mean(loss) @@ -1590,31 +1715,38 @@ def mse_loss(input, label, reduction='mean', name=None): if reduction not in ['sum', 'mean', 'none']: raise ValueError( "'reduction' in 'mse_loss' should be 'sum', 'mean' or 'none', " - "but received {}.".format(reduction)) + "but received {}.".format(reduction) + ) if not in_dynamic_mode(): - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'mse_loss') - check_variable_and_dtype(label, 'label', ['float32', 'float64'], - 'mse_loss') + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'mse_loss' + ) + check_variable_and_dtype( + label, 'label', ['float32', 'float64'], 'mse_loss' + ) if reduction == 'none': return paddle.square(paddle.subtract(input, label), name=name) elif reduction == 'mean': - return paddle.mean(paddle.square(paddle.subtract(input, label)), - name=name) + return paddle.mean( + paddle.square(paddle.subtract(input, label)), name=name + ) else: - return paddle.sum(paddle.square(paddle.subtract(input, label)), - name=name) + return paddle.sum( + paddle.square(paddle.subtract(input, label)), name=name + ) -def ctc_loss(log_probs, - labels, - input_lengths, - label_lengths, - blank=0, - reduction='mean', - norm_by_times=False): +def ctc_loss( + log_probs, + labels, + input_lengths, + label_lengths, + blank=0, + reduction='mean', + norm_by_times=False, +): """ An operator integrating the open source Warp-CTC library (https://github.com/baidu-research/warp-ctc) @@ -1693,8 +1825,9 @@ def ctc_loss(log_probs, """ - loss_out = fluid.layers.warpctc(log_probs, labels, blank, norm_by_times, - input_lengths, label_lengths) + loss_out = fluid.layers.warpctc( + log_probs, labels, blank, norm_by_times, input_lengths, label_lengths + ) loss_out = paddle.squeeze(loss_out, [-1]) assert reduction in ['mean', 'sum', 'none'] @@ -1705,15 +1838,17 @@ def ctc_loss(log_probs, return loss_out -def margin_cross_entropy(logits, - label, - margin1=1.0, - margin2=0.5, - margin3=0.0, - scale=64.0, - group=None, - return_softmax=False, - reduction='mean'): +def margin_cross_entropy( + logits, + label, + margin1=1.0, + margin2=0.5, + margin3=0.0, + scale=64.0, + group=None, + return_softmax=False, + reduction='mean', +): r""" .. math:: @@ -1901,7 +2036,10 @@ def margin_cross_entropy(logits, if not (group == False or group is None or hasattr(group, 'is_member')): raise ValueError( 'Expected group is False, None or instance of paddle.distributed.collective.Group \ - (got group: {})'.format(group)) + (got group: {})'.format( + group + ) + ) return if hasattr(group, 'is_member') and not group.is_member(): @@ -1915,8 +2053,11 @@ def margin_cross_entropy(logits, if core.is_compiled_with_dist(): parallel_env = paddle.distributed.ParallelEnv() global_rank = parallel_env.rank - rank = global_rank if group is None else group.get_group_rank( - global_rank) + rank = ( + global_rank + if group is None + else group.get_group_rank(global_rank) + ) nranks = parallel_env.world_size if group is None else group.nranks input_dims = len(list(logits.shape)) @@ -1924,15 +2065,26 @@ def margin_cross_entropy(logits, if input_dims - 1 != label_dims and input_dims != label_dims: raise ValueError( 'Expected input_dims - 1 = label_dims or input_dims == label_dims\ - (got nput_dims{}, label_dims{})'.format(input_dims, label_dims)) + (got nput_dims{}, label_dims{})'.format( + input_dims, label_dims + ) + ) if input_dims - 1 == label_dims: label = paddle.unsqueeze(label, axis=-1) if in_dygraph_mode(): - softmax, loss = _C_ops.margin_cross_entropy(logits, label, - return_softmax, ring_id, - rank, nranks, margin1, - margin2, margin3, scale) + softmax, loss = _C_ops.margin_cross_entropy( + logits, + label, + return_softmax, + ring_id, + rank, + nranks, + margin1, + margin2, + margin3, + scale, + ) if reduction == 'mean': loss = paddle.mean(loss) elif reduction == 'sum': @@ -1943,9 +2095,25 @@ def margin_cross_entropy(logits, return loss, softmax elif _in_legacy_dygraph(): softmax, loss = _legacy_C_ops.margin_cross_entropy( - logits, label, 'ring_id', ring_id, 'rank', rank, 'nranks', nranks, - 'margin1', margin1, 'margin2', margin2, 'margin3', margin3, 'scale', - scale, 'return_softmax', return_softmax) + logits, + label, + 'ring_id', + ring_id, + 'rank', + rank, + 'nranks', + nranks, + 'margin1', + margin1, + 'margin2', + margin2, + 'margin3', + margin3, + 'scale', + scale, + 'return_softmax', + return_softmax, + ) if reduction == 'mean': loss = paddle.mean(loss) elif reduction == 'sum': @@ -1960,31 +2128,31 @@ def margin_cross_entropy(logits, softmax = helper.create_variable_for_type_inference(dtype=logits.dtype) loss = helper.create_variable_for_type_inference(dtype=logits.dtype) - check_variable_and_dtype(logits, 'logits', - ['float16', 'float32', 'float64'], - 'margin_cross_entropy') - check_variable_and_dtype(label, 'label', ['int32', 'int64'], - 'margin_cross_entropy') - - helper.append_op(type=op_type, - inputs={ - 'Logits': logits, - 'Label': label - }, - outputs={ - 'Softmax': softmax, - 'Loss': loss - }, - attrs={ - 'return_softmax': return_softmax, - 'ring_id': ring_id, - 'rank': rank, - 'nranks': nranks, - 'margin1': margin1, - 'margin2': margin2, - 'margin3': margin3, - 'scale': scale, - }) + check_variable_and_dtype( + logits, + 'logits', + ['float16', 'float32', 'float64'], + 'margin_cross_entropy', + ) + check_variable_and_dtype( + label, 'label', ['int32', 'int64'], 'margin_cross_entropy' + ) + + helper.append_op( + type=op_type, + inputs={'Logits': logits, 'Label': label}, + outputs={'Softmax': softmax, 'Loss': loss}, + attrs={ + 'return_softmax': return_softmax, + 'ring_id': ring_id, + 'rank': rank, + 'nranks': nranks, + 'margin1': margin1, + 'margin2': margin2, + 'margin3': margin3, + 'scale': scale, + }, + ) if reduction == 'mean': loss = paddle.mean(loss) @@ -2001,16 +2169,20 @@ def margin_cross_entropy(logits, since="2.0.0", update_to="paddle.nn.functional.cross_entropy", level=1, - reason= - ('Please notice that behavior of "paddle.nn.functional.softmax_with_cross_entropy" ' - 'and "paddle.nn.functional.cross_entropy" is different.')) -def softmax_with_cross_entropy(logits, - label, - soft_label=False, - ignore_index=-100, - numeric_stable_mode=True, - return_softmax=False, - axis=-1): + reason=( + 'Please notice that behavior of "paddle.nn.functional.softmax_with_cross_entropy" ' + 'and "paddle.nn.functional.cross_entropy" is different.' + ), +) +def softmax_with_cross_entropy( + logits, + label, + soft_label=False, + ignore_index=-100, + numeric_stable_mode=True, + return_softmax=False, + axis=-1, +): r""" This operator implements the cross entropy loss function with softmax. This function combines the calculation of the softmax operation and the cross entropy loss function @@ -2096,20 +2268,28 @@ def softmax_with_cross_entropy(logits, out = paddle.nn.functional.softmax_with_cross_entropy(logits=x, label=label) print(out) """ - return fluid_softmax_with_cross_entropy(logits, label, soft_label, - ignore_index, numeric_stable_mode, - return_softmax, axis) - - -def cross_entropy(input, - label, - weight=None, - ignore_index=-100, - reduction='mean', - soft_label=False, - axis=-1, - use_softmax=True, - name=None): + return fluid_softmax_with_cross_entropy( + logits, + label, + soft_label, + ignore_index, + numeric_stable_mode, + return_softmax, + axis, + ) + + +def cross_entropy( + input, + label, + weight=None, + ignore_index=-100, + reduction='mean', + soft_label=False, + axis=-1, + use_softmax=True, + name=None, +): r""" By default, this operator implements the cross entropy loss function with softmax. This function combines the calculation of the softmax operation and the cross entropy loss function @@ -2355,12 +2535,14 @@ def cross_entropy(input, raise ValueError( "The value of 'reduction' in softmax_cross_entropy" "should be 'sum', 'mean' or 'none', but received %s, which is not allowed." - % reduction) + % reduction + ) if ignore_index > 0 and soft_label == True: raise ValueError( "When soft_label == True, the value of 'ignore_index' in softmax_cross_entropy" - "should be '-100', but received %s, which is not allowed." % - ignore_index) + "should be '-100', but received %s, which is not allowed." + % ignore_index + ) input_dims = len(list(input.shape)) if input_dims == 0: @@ -2370,29 +2552,53 @@ def cross_entropy(input, if input_dims - 1 != label_dims and input_dims != label_dims: raise ValueError( 'Expected nput_dims - 1 = label_dims or input_dims == label_dims\ - (got nput_dims{}, label_dims{})'.format(input_dims, label_dims)) + (got nput_dims{}, label_dims{})'.format( + input_dims, label_dims + ) + ) if input_dims - 1 == label_dims: label = paddle.unsqueeze(label, axis=axis) if in_dygraph_mode(): if soft_label == False: - valid_label = paddle.cast(label != ignore_index, - dtype=label.dtype) * label + valid_label = ( + paddle.cast(label != ignore_index, dtype=label.dtype) * label + ) if core.is_compiled_with_npu() or core.is_compiled_with_mlu(): if soft_label == False: _, _, out = _legacy_C_ops.softmax_with_cross_entropy( - input, valid_label, 'soft_label', soft_label, - 'ignore_index', ignore_index, 'numeric_stable_mode', True, - 'axis', axis, 'use_softmax', use_softmax) + input, + valid_label, + 'soft_label', + soft_label, + 'ignore_index', + ignore_index, + 'numeric_stable_mode', + True, + 'axis', + axis, + 'use_softmax', + use_softmax, + ) else: _, _, out = _legacy_C_ops.softmax_with_cross_entropy( - input, label, 'soft_label', soft_label, 'ignore_index', - ignore_index, 'numeric_stable_mode', True, 'axis', axis, - 'use_softmax', use_softmax) + input, + label, + 'soft_label', + soft_label, + 'ignore_index', + ignore_index, + 'numeric_stable_mode', + True, + 'axis', + axis, + 'use_softmax', + use_softmax, + ) else: - _, out = _C_ops.cross_entropy_with_softmax(input, label, soft_label, - use_softmax, True, - ignore_index, axis) + _, out = _C_ops.cross_entropy_with_softmax( + input, label, soft_label, use_softmax, True, ignore_index, axis + ) if weight is not None: @@ -2402,11 +2608,12 @@ def cross_entropy(input, # weight's shape is C, where C is class num. # for 1d case: label's shape is [N,C], weight_gather's shape is N. # for 2d case: label's shape is [N,H,W,C], weight_gather's shape is [N,H,W]. - weight_gather = paddle.matmul(x=paddle.cast( - label, weight.dtype), - y=weight, - transpose_x=False, - transpose_y=True) + weight_gather = paddle.matmul( + x=paddle.cast(label, weight.dtype), + y=weight, + transpose_x=False, + transpose_y=True, + ) out_shape = list(out.shape) weight_gather_reshape = reshape(weight_gather, shape=out_shape) out = paddle.cast(out, weight_gather_reshape.dtype) @@ -2417,29 +2624,44 @@ def cross_entropy(input, raise ValueError( "input's class_dimension({}) must equal to " "weight's class_dimension({}) " - "when weight is provided" \ - .format(input.shape[axis], weight.shape[-1])) - - ignore_weight_mask = paddle.cast((label != ignore_index), - out.dtype) - if ignore_weight_mask.ndim > 1 and ignore_weight_mask.shape[ - axis] == 1: + "when weight is provided".format( + input.shape[axis], weight.shape[-1] + ) + ) + + ignore_weight_mask = paddle.cast( + (label != ignore_index), out.dtype + ) + if ( + ignore_weight_mask.ndim > 1 + and ignore_weight_mask.shape[axis] == 1 + ): # TODO: Temporarily use squeeze instead of squeeze_ - ignore_weight_mask = paddle.squeeze(ignore_weight_mask, - axis) + ignore_weight_mask = paddle.squeeze( + ignore_weight_mask, axis + ) if axis != -1 and axis != valid_label.ndim - 1: - temp_perm = list(range(axis % valid_label.ndim)) \ - + list(range((axis % valid_label.ndim + 1), valid_label.ndim)) \ - + [axis % valid_label.ndim] + temp_perm = ( + list(range(axis % valid_label.ndim)) + + list( + range( + (axis % valid_label.ndim + 1), valid_label.ndim + ) + ) + + [axis % valid_label.ndim] + ) weight_gather = _C_ops.gather_nd( - weight, valid_label.transpose(temp_perm)) + weight, valid_label.transpose(temp_perm) + ) else: weight_gather = _C_ops.gather_nd(weight, valid_label) - weight_gather = _C_ops.multiply(weight_gather, - ignore_weight_mask) + weight_gather = _C_ops.multiply( + weight_gather, ignore_weight_mask + ) input_shape = list(label.shape) - weight_gather_reshape = reshape(weight_gather, - shape=input_shape) + weight_gather_reshape = reshape( + weight_gather, shape=input_shape + ) out = paddle.cast(out, weight_gather_reshape.dtype) out = _C_ops.multiply(out, weight_gather_reshape) @@ -2460,22 +2682,24 @@ def cross_entropy(input, # for each label[i],set 1 or 0, according to ignore_index # mask[i]=0, if label[i]==ignore_index # mask[i]=1, otherwise - mask = (label != ignore_index) + mask = label != ignore_index if weight is None: mask = paddle.cast(mask, dtype=out_sum.dtype) count = _C_ops.sum(mask, [], None, False) ret = out_sum / (count + (count == 0.0)) else: mask = paddle.cast(mask, weight_gather_reshape.dtype) - weight_ignored = _C_ops.multiply(mask, - weight_gather_reshape) + weight_ignored = _C_ops.multiply( + mask, weight_gather_reshape + ) weight_sum = _C_ops.sum(weight_ignored, [], None, False) ret = out_sum / (weight_sum + (weight_sum == 0.0)) return ret elif weight is not None: out_sum = _C_ops.sum(out, [], None, False) - total_weight = _C_ops.sum(weight_gather_reshape, [], None, - False) + total_weight = _C_ops.sum( + weight_gather_reshape, [], None, False + ) return out_sum / (total_weight + (total_weight == 0.0)) else: return _C_ops.mean_all(out) @@ -2487,32 +2711,65 @@ def cross_entropy(input, elif _in_legacy_dygraph(): if soft_label == False: - valid_label = paddle.cast(label != ignore_index, - dtype=label.dtype) * label + valid_label = ( + paddle.cast(label != ignore_index, dtype=label.dtype) * label + ) label_min = paddle.min(valid_label) label_max = paddle.max(valid_label) if label_min < 0: - raise ValueError("Target {} is out of lower bound.".format( - label_min.item())) + raise ValueError( + "Target {} is out of lower bound.".format(label_min.item()) + ) if label_max >= input.shape[axis]: - raise ValueError("Target {} is out of upper bound.".format( - label_max.item())) + raise ValueError( + "Target {} is out of upper bound.".format(label_max.item()) + ) if core.is_compiled_with_npu() or core.is_compiled_with_mlu(): if soft_label == False: _, _, out = _legacy_C_ops.softmax_with_cross_entropy( - input, valid_label, 'soft_label', soft_label, - 'ignore_index', ignore_index, 'numeric_stable_mode', True, - 'axis', axis, 'use_softmax', use_softmax) + input, + valid_label, + 'soft_label', + soft_label, + 'ignore_index', + ignore_index, + 'numeric_stable_mode', + True, + 'axis', + axis, + 'use_softmax', + use_softmax, + ) else: _, _, out = _legacy_C_ops.softmax_with_cross_entropy( - input, label, 'soft_label', soft_label, 'ignore_index', - ignore_index, 'numeric_stable_mode', True, 'axis', axis, - 'use_softmax', use_softmax) + input, + label, + 'soft_label', + soft_label, + 'ignore_index', + ignore_index, + 'numeric_stable_mode', + True, + 'axis', + axis, + 'use_softmax', + use_softmax, + ) else: _, out = _legacy_C_ops.softmax_with_cross_entropy( - input, label, 'soft_label', soft_label, 'ignore_index', - ignore_index, 'numeric_stable_mode', True, 'axis', axis, - 'use_softmax', use_softmax) + input, + label, + 'soft_label', + soft_label, + 'ignore_index', + ignore_index, + 'numeric_stable_mode', + True, + 'axis', + axis, + 'use_softmax', + use_softmax, + ) if weight is not None: @@ -2522,11 +2779,12 @@ def cross_entropy(input, # weight's shape is C, where C is class num. # for 1d case: label's shape is [N,C], weight_gather's shape is N. # for 2d case: label's shape is [N,H,W,C], weight_gather's shape is [N,H,W]. - weight_gather = paddle.matmul(x=paddle.cast( - label, weight.dtype), - y=weight, - transpose_x=False, - transpose_y=True) + weight_gather = paddle.matmul( + x=paddle.cast(label, weight.dtype), + y=weight, + transpose_x=False, + transpose_y=True, + ) out_shape = list(out.shape) weight_gather_reshape = reshape(weight_gather, shape=out_shape) out = paddle.cast(out, weight_gather_reshape.dtype) @@ -2538,29 +2796,44 @@ def cross_entropy(input, raise ValueError( "input's class_dimension({}) must equal to " "weight's class_dimension({}) " - "when weight is provided" \ - .format(input.shape[axis], weight.shape[-1])) - - ignore_weight_mask = paddle.cast((label != ignore_index), - out.dtype) - if ignore_weight_mask.ndim > 1 and ignore_weight_mask.shape[ - axis] == 1: + "when weight is provided".format( + input.shape[axis], weight.shape[-1] + ) + ) + + ignore_weight_mask = paddle.cast( + (label != ignore_index), out.dtype + ) + if ( + ignore_weight_mask.ndim > 1 + and ignore_weight_mask.shape[axis] == 1 + ): # TODO: Temporarily use squeeze instead of squeeze_ - ignore_weight_mask = paddle.squeeze(ignore_weight_mask, - axis) + ignore_weight_mask = paddle.squeeze( + ignore_weight_mask, axis + ) if axis != -1 and axis != valid_label.ndim - 1: - temp_perm = list(range(axis % valid_label.ndim)) \ - + list(range((axis % valid_label.ndim + 1), valid_label.ndim)) \ - + [axis % valid_label.ndim] + temp_perm = ( + list(range(axis % valid_label.ndim)) + + list( + range( + (axis % valid_label.ndim + 1), valid_label.ndim + ) + ) + + [axis % valid_label.ndim] + ) weight_gather = _legacy_C_ops.gather_nd( - weight, valid_label.transpose(temp_perm)) + weight, valid_label.transpose(temp_perm) + ) else: weight_gather = _legacy_C_ops.gather_nd(weight, valid_label) weight_gather = _legacy_C_ops.elementwise_mul( - weight_gather, ignore_weight_mask) + weight_gather, ignore_weight_mask + ) input_shape = list(label.shape) - weight_gather_reshape = reshape(weight_gather, - shape=input_shape) + weight_gather_reshape = reshape( + weight_gather, shape=input_shape + ) out = paddle.cast(out, weight_gather_reshape.dtype) out = _legacy_C_ops.elementwise_mul(out, weight_gather_reshape) @@ -2581,7 +2854,7 @@ def cross_entropy(input, # for each label[i],set 1 or 0, according to ignore_index # mask[i]=0, if label[i]==ignore_index # mask[i]=1, otherwise - mask = (label != ignore_index) + mask = label != ignore_index if weight is None: mask = paddle.cast(mask, dtype=out_sum.dtype) count = _legacy_C_ops.reduce_sum(mask, 'reduce_all', True) @@ -2589,15 +2862,18 @@ def cross_entropy(input, else: mask = paddle.cast(mask, weight_gather_reshape.dtype) weight_ignored = _legacy_C_ops.elementwise_mul( - mask, weight_gather_reshape) + mask, weight_gather_reshape + ) weight_sum = _legacy_C_ops.reduce_sum( - weight_ignored, 'reduce_all', True) + weight_ignored, 'reduce_all', True + ) ret = out_sum / (weight_sum + (weight_sum == 0.0)) return ret elif weight is not None: out_sum = _legacy_C_ops.reduce_sum(out, 'reduce_all', True) - total_weight = _legacy_C_ops.reduce_sum(weight_gather_reshape, - 'reduce_all', True) + total_weight = _legacy_C_ops.reduce_sum( + weight_gather_reshape, 'reduce_all', True + ) return out_sum / (total_weight + (total_weight == 0.0)) else: return _legacy_C_ops.mean(out) @@ -2606,18 +2882,24 @@ def cross_entropy(input, out = paddle.squeeze(out, axis=axis) return out - check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'], - 'softmax_cross_entropy') check_variable_and_dtype( - label, 'label', + input, + 'input', + ['float16', 'float32', 'float64'], + 'softmax_cross_entropy', + ) + check_variable_and_dtype( + label, + 'label', ['uint8', 'int8', 'int16', 'int32', 'int64', 'float32', 'float64'], - 'softmax_cross_entropy') + 'softmax_cross_entropy', + ) attrs = { 'soft_label': soft_label, 'ignore_index': ignore_index, 'numeric_stable_mode': True, 'axis': axis, - 'use_softmax': use_softmax + 'use_softmax': use_softmax, } helper = LayerHelper('softmax_with_cross_entropy', **locals()) softmax = helper.create_variable_for_type_inference(dtype=input.dtype) @@ -2627,17 +2909,17 @@ def cross_entropy(input, if core.is_compiled_with_npu() or core.is_compiled_with_mlu(): backprop = helper.create_variable_for_type_inference(dtype=input.dtype) outputs['Backprop'] = backprop - helper.append_op(type='softmax_with_cross_entropy', - inputs={ - 'Logits': input, - 'Label': label - }, - outputs=outputs, - attrs=attrs) + helper.append_op( + type='softmax_with_cross_entropy', + inputs={'Logits': input, 'Label': label}, + outputs=outputs, + attrs=attrs, + ) if weight is not None: - check_variable_and_dtype(weight, 'weight', ['float32', 'float64'], - 'softmax_cross_entropy') + check_variable_and_dtype( + weight, 'weight', ['float32', 'float64'], 'softmax_cross_entropy' + ) weight_name = name if reduction == 'none' else None if soft_label == True: # chajchaj: @@ -2645,34 +2927,48 @@ def cross_entropy(input, # weight's shape is C, where C is class num. # for 1d case: label's shape is [N,C], weight_gather's shape is N. # for 2d case: label's shape is [N,H,W,C], weight_gather's shape is [N,H,W]. - weight_gather = paddle.matmul(x=paddle.cast(label, weight.dtype), - y=weight, - transpose_x=False, - transpose_y=True) + weight_gather = paddle.matmul( + x=paddle.cast(label, weight.dtype), + y=weight, + transpose_x=False, + transpose_y=True, + ) out_shape = list(out.shape) weight_gather_reshape = reshape(weight_gather, shape=out_shape) out = paddle.cast(out, weight_gather_reshape.dtype) else: if input.shape[axis] != weight.shape[-1]: - raise ValueError("input's class_dimension({}) must equal to " - "weight's class_dimension({}) " - "when weight is provided" \ - .format(input.shape[axis], weight.shape[-1])) + raise ValueError( + "input's class_dimension({}) must equal to " + "weight's class_dimension({}) " + "when weight is provided".format( + input.shape[axis], weight.shape[-1] + ) + ) valid_label = paddle.multiply( - paddle.cast(label != ignore_index, dtype=label.dtype), label) - ignore_weight_mask = paddle.cast((label != ignore_index), - input.dtype) - if ignore_weight_mask.ndim > 1 and ignore_weight_mask.shape[ - axis] == 1: + paddle.cast(label != ignore_index, dtype=label.dtype), label + ) + ignore_weight_mask = paddle.cast( + (label != ignore_index), input.dtype + ) + if ( + ignore_weight_mask.ndim > 1 + and ignore_weight_mask.shape[axis] == 1 + ): ignore_weight_mask = paddle.squeeze(ignore_weight_mask, axis) if axis != -1 and axis != valid_label.ndim - 1: - temp_perm = list(range(axis % valid_label.ndim)) \ - + list(range((axis % valid_label.ndim + 1), valid_label.ndim)) \ - + [axis % valid_label.ndim] + temp_perm = ( + list(range(axis % valid_label.ndim)) + + list( + range((axis % valid_label.ndim + 1), valid_label.ndim) + ) + + [axis % valid_label.ndim] + ) weight_gather = paddle.gather_nd( - weight, paddle.transpose(valid_label, temp_perm)) + weight, paddle.transpose(valid_label, temp_perm) + ) else: weight_gather = paddle.gather_nd(weight, valid_label) weight_gather = paddle.multiply(weight_gather, ignore_weight_mask) @@ -2689,8 +2985,8 @@ def cross_entropy(input, # for each label[i],set 1 or 0, according to ignore_index # mask[i]=0, if label[i]==ignore_index # mask[i]=1, otherwise - mask = (label != ignore_index) - if (weight is None): + mask = label != ignore_index + if weight is None: mask = paddle.cast(mask, dtype=out_sum.dtype) count = paddle.sum(mask, name=name) ret = out_sum / (count + (count == 0.0)) @@ -2714,13 +3010,15 @@ def cross_entropy(input, return out -def sigmoid_focal_loss(logit, - label, - normalizer=None, - alpha=0.25, - gamma=2.0, - reduction='sum', - name=None): +def sigmoid_focal_loss( + logit, + label, + normalizer=None, + alpha=0.25, + gamma=2.0, + reduction='sum', + name=None, +): r""" `Focal Loss `_ is proposed to address the foreground-background class imbalance for classification tasks. It down-weights @@ -2794,37 +3092,49 @@ def sigmoid_focal_loss(logit, raise ValueError( "The value of 'reduction' in sigmoid_focal_loss " "should be 'sum', 'mean' or 'none', but received %s, which is not allowed." - % reduction) + % reduction + ) if normalizer is not None: - check_variable_and_dtype(normalizer, 'normalizer', - ['float32', 'float64'], 'sigmoid_focal_loss') + check_variable_and_dtype( + normalizer, + 'normalizer', + ['float32', 'float64'], + 'sigmoid_focal_loss', + ) normalizer_shape = list(normalizer.shape) normalizer_dims = len(normalizer_shape) if normalizer_dims > 1: raise ValueError( - "Expected one dimension of normalizer in sigmoid_focal_loss but got {}." - .format(normalizer_dims)) + "Expected one dimension of normalizer in sigmoid_focal_loss but got {}.".format( + normalizer_dims + ) + ) if in_dygraph_mode(): place = _current_expected_place() one = _C_ops.full(logit.shape, float(1.0), logit.dtype, place) - loss = _C_ops.sigmoid_cross_entropy_with_logits(logit, label, False, - -100) + loss = _C_ops.sigmoid_cross_entropy_with_logits( + logit, label, False, -100 + ) pred = _C_ops.sigmoid(logit) p_t = _C_ops.add( _C_ops.multiply(pred, label), - _C_ops.multiply(_C_ops.subtract(one, pred), - _C_ops.subtract(one, label))) + _C_ops.multiply( + _C_ops.subtract(one, pred), _C_ops.subtract(one, label) + ), + ) alpha = fluid.dygraph.base.to_variable([alpha], dtype=loss.dtype) alpha_t = _C_ops.add( _C_ops.multiply(alpha, label), - _C_ops.multiply(_C_ops.subtract(one, alpha), - _C_ops.subtract(one, label))) + _C_ops.multiply( + _C_ops.subtract(one, alpha), _C_ops.subtract(one, label) + ), + ) loss = _C_ops.multiply(alpha_t, loss) gamma = fluid.dygraph.base.to_variable([gamma], dtype=loss.dtype) @@ -2843,9 +3153,19 @@ def sigmoid_focal_loss(logit, elif _in_legacy_dygraph(): one = _varbase_creator(dtype=logit.dtype) - _legacy_C_ops.fill_constant(one, 'value', float(1.0), 'force_cpu', - False, 'dtype', one.dtype, 'str_value', - '1.0', 'shape', logit.shape) + _legacy_C_ops.fill_constant( + one, + 'value', + float(1.0), + 'force_cpu', + False, + 'dtype', + one.dtype, + 'str_value', + '1.0', + 'shape', + logit.shape, + ) loss = _legacy_C_ops.sigmoid_cross_entropy_with_logits(logit, label) pred = _legacy_C_ops.sigmoid(logit) @@ -2854,19 +3174,24 @@ def sigmoid_focal_loss(logit, _legacy_C_ops.elementwise_mul(pred, label), _legacy_C_ops.elementwise_mul( _legacy_C_ops.elementwise_sub(one, pred), - _legacy_C_ops.elementwise_sub(one, label))) + _legacy_C_ops.elementwise_sub(one, label), + ), + ) alpha = fluid.dygraph.base.to_variable([alpha], dtype=loss.dtype) alpha_t = _legacy_C_ops.elementwise_add( _legacy_C_ops.elementwise_mul(alpha, label), _legacy_C_ops.elementwise_mul( _legacy_C_ops.elementwise_sub(one, alpha), - _legacy_C_ops.elementwise_sub(one, label))) + _legacy_C_ops.elementwise_sub(one, label), + ), + ) loss = _legacy_C_ops.elementwise_mul(alpha_t, loss) gamma = fluid.dygraph.base.to_variable([gamma], dtype=loss.dtype) gamma_t = _legacy_C_ops.elementwise_pow( - _legacy_C_ops.elementwise_sub(one, p_t), gamma) + _legacy_C_ops.elementwise_sub(one, p_t), gamma + ) loss = _legacy_C_ops.elementwise_mul(gamma_t, loss) if normalizer is not None: @@ -2879,16 +3204,19 @@ def sigmoid_focal_loss(logit, return loss - check_variable_and_dtype(logit, 'logit', ['float32', 'float64'], - 'sigmoid_focal_loss') - check_variable_and_dtype(label, 'label', ['float32', 'float64'], - 'sigmoid_focal_loss') + check_variable_and_dtype( + logit, 'logit', ['float32', 'float64'], 'sigmoid_focal_loss' + ) + check_variable_and_dtype( + label, 'label', ['float32', 'float64'], 'sigmoid_focal_loss' + ) bce_name = None if reduction == 'none' and normalizer is None: bce_name = name loss = paddle.nn.functional.binary_cross_entropy_with_logits( - logit, label, reduction='none', name=bce_name) + logit, label, reduction='none', name=bce_name + ) pred = paddle.nn.functional.sigmoid(logit) p_t = pred * label + (1 - pred) * (1 - label) @@ -2911,11 +3239,9 @@ def sigmoid_focal_loss(logit, return loss -def multi_label_soft_margin_loss(input, - label, - weight=None, - reduction="mean", - name=None): +def multi_label_soft_margin_loss( + input, label, weight=None, reduction="mean", name=None +): r""" Calculate a multi-class multi-classification hinge loss (margin-based loss) between input :math:`x` (a 2D mini-batch `Tensor`) @@ -2973,25 +3299,42 @@ def multi_label_soft_margin_loss(input, if reduction not in ['sum', 'mean', 'none']: raise ValueError( "'reduction' in 'multi_label_soft_margin_loss' should be 'sum', 'mean' or 'none', " - "but received {}.".format(reduction)) + "but received {}.".format(reduction) + ) if not (input.shape == label.shape): - raise ValueError("The input and label should have same dimension," - "but received {}!={}".format(input.shape, label.shape)) + raise ValueError( + "The input and label should have same dimension," + "but received {}!={}".format(input.shape, label.shape) + ) if not _non_static_mode(): - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'multilabel_soft_margin_loss') - check_variable_and_dtype(label, 'label', ['float32', 'float64'], - 'multilabel_soft_margin_loss') + check_variable_and_dtype( + input, + 'input', + ['float32', 'float64'], + 'multilabel_soft_margin_loss', + ) + check_variable_and_dtype( + label, + 'label', + ['float32', 'float64'], + 'multilabel_soft_margin_loss', + ) - loss = -(label * paddle.nn.functional.log_sigmoid(input) + - (1 - label) * paddle.nn.functional.log_sigmoid(-input)) + loss = -( + label * paddle.nn.functional.log_sigmoid(input) + + (1 - label) * paddle.nn.functional.log_sigmoid(-input) + ) if weight is not None: if not _non_static_mode(): - check_variable_and_dtype(weight, 'weight', ['float32', 'float64'], - 'multilabel_soft_margin_loss') + check_variable_and_dtype( + weight, + 'weight', + ['float32', 'float64'], + 'multilabel_soft_margin_loss', + ) loss = loss * weight loss = loss.mean(axis=-1) # only return N loss values @@ -3081,17 +3424,21 @@ def hinge_embedding_loss(input, label, margin=1.0, reduction='mean', name=None): if reduction not in ['sum', 'mean', 'none']: raise ValueError( "'reduction' in 'hinge_embedding_loss' should be 'sum', 'mean' or 'none', " - "but received {}.".format(reduction)) + "but received {}.".format(reduction) + ) if not _non_static_mode(): - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'hinge_embedding_loss') - check_variable_and_dtype(label, 'label', ['float32', 'float64'], - 'hinge_embedding_loss') + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'hinge_embedding_loss' + ) + check_variable_and_dtype( + label, 'label', ['float32', 'float64'], 'hinge_embedding_loss' + ) zero_ = paddle.zeros([1], dtype=input.dtype) - loss = paddle.where(label == 1., input, zero_) + \ - paddle.where(label == -1., paddle.nn.functional.relu(margin - input), zero_) + loss = paddle.where(label == 1.0, input, zero_) + paddle.where( + label == -1.0, paddle.nn.functional.relu(margin - input), zero_ + ) if reduction == 'mean': return paddle.mean(loss, name=name) @@ -3101,12 +3448,9 @@ def hinge_embedding_loss(input, label, margin=1.0, reduction='mean', name=None): return loss -def cosine_embedding_loss(input1, - input2, - label, - margin=0, - reduction='mean', - name=None): +def cosine_embedding_loss( + input1, input2, label, margin=0, reduction='mean', name=None +): r""" This operator computes the cosine embedding loss of Tensor ``input1``, ``input2`` and ``label`` as follows. @@ -3167,12 +3511,14 @@ def cosine_embedding_loss(input1, """ if len(label.shape) != 1: raise ValueError( - "1D target tensor expected, multi-target not supported") + "1D target tensor expected, multi-target not supported" + ) if input1.shape != input2.shape: raise ValueError( "the shape of input tensor 1 should be equal to input tensor 2, but found inputs with " - "different sizes") + "different sizes" + ) if len(input1.shape) > 2: raise ValueError( @@ -3181,9 +3527,13 @@ def cosine_embedding_loss(input1, if input1.dtype not in [paddle.float32, paddle.float64]: raise ValueError( - "The data type of input Variable must be 'float32' or 'float64'") + "The data type of input Variable must be 'float32' or 'float64'" + ) if label.dtype not in [ - paddle.int32, paddle.int64, paddle.float32, paddle.float64 + paddle.int32, + paddle.int64, + paddle.float32, + paddle.float64, ]: raise ValueError( "The data type of label Variable must be 'int32', 'int64', 'float32', 'float64'" @@ -3209,14 +3559,16 @@ def cosine_embedding_loss(input1, return paddle.sum(out, name=name) -def triplet_margin_with_distance_loss(input, - positive, - negative, - distance_function=None, - margin=1.0, - swap=False, - reduction='mean', - name=None): +def triplet_margin_with_distance_loss( + input, + positive, + negative, + distance_function=None, + margin=1.0, + swap=False, + reduction='mean', + name=None, +): r""" Measures the triplet loss given an input tensors :math:`x1`, :math:`x2`, :math:`x3` and a margin with a value greater than :math:`0`. @@ -3291,28 +3643,47 @@ def triplet_margin_with_distance_loss(input, """ if reduction not in ['sum', 'mean', 'none']: - raise ValueError("'reduction' in 'triplet_margin_with_distance_loss' " - "should be 'sum', 'mean' or 'none', " - "but received {}.".format(reduction)) + raise ValueError( + "'reduction' in 'triplet_margin_with_distance_loss' " + "should be 'sum', 'mean' or 'none', " + "but received {}.".format(reduction) + ) if margin < 0: raise ValueError( "The margin between positive samples and negative samples should be greater than 0." ) if not _non_static_mode(): - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'triplet_margin_with_distance_loss') - check_variable_and_dtype(positive, 'positive', ['float32', 'float64'], - 'triplet_margin_with_distance_loss') - check_variable_and_dtype(negative, 'negative', ['float32', 'float64'], - 'triplet_margin_with_distance_loss') + check_variable_and_dtype( + input, + 'input', + ['float32', 'float64'], + 'triplet_margin_with_distance_loss', + ) + check_variable_and_dtype( + positive, + 'positive', + ['float32', 'float64'], + 'triplet_margin_with_distance_loss', + ) + check_variable_and_dtype( + negative, + 'negative', + ['float32', 'float64'], + 'triplet_margin_with_distance_loss', + ) if not (input.shape == positive.shape == negative.shape): - raise ValueError("input's shape must equal to " - "positive's shape and " - "negative's shape") + raise ValueError( + "input's shape must equal to " + "positive's shape and " + "negative's shape" + ) - distance_function = distance_function if distance_function is not None \ + distance_function = ( + distance_function + if distance_function is not None else paddle.nn.PairwiseDistance(2) + ) positive_dist = distance_function(input, positive) negative_dist = distance_function(input, negative) @@ -3324,7 +3695,8 @@ def triplet_margin_with_distance_loss(input, if not paddle.all(positive_dist > 0) or not paddle.all(negative_dist > 0): raise ValueError( "The positive distance or negative distance should be greater than 0, " - "The distance functions should be checked.") + "The distance functions should be checked." + ) loss = paddle.clip(positive_dist - negative_dist + margin, min=0.0) @@ -3336,15 +3708,17 @@ def triplet_margin_with_distance_loss(input, return loss -def triplet_margin_loss(input, - positive, - negative, - margin=1.0, - p=2, - epsilon=1e-6, - swap=False, - reduction='mean', - name=None): +def triplet_margin_loss( + input, + positive, + negative, + margin=1.0, + p=2, + epsilon=1e-6, + swap=False, + reduction='mean', + name=None, +): r""" Measures the triplet loss given an input tensors :math:`x1`, :math:`x2`, :math:`x3` and a margin with a value greater than :math:`0`. @@ -3421,23 +3795,29 @@ def triplet_margin_loss(input, if reduction not in ['sum', 'mean', 'none']: raise ValueError( "'reduction' in 'triplet_margin_loss' should be 'sum', 'mean' or 'none', " - "but received {}.".format(reduction)) + "but received {}.".format(reduction) + ) if margin < 0: raise ValueError( "The margin between positive samples and negative samples should be greater than 0." ) if not _non_static_mode(): - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'triplet_margin_loss') - check_variable_and_dtype(positive, 'positive', ['float32', 'float64'], - 'triplet_margin_loss') - check_variable_and_dtype(negative, 'negative', ['float32', 'float64'], - 'triplet_margin_loss') + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'triplet_margin_loss' + ) + check_variable_and_dtype( + positive, 'positive', ['float32', 'float64'], 'triplet_margin_loss' + ) + check_variable_and_dtype( + negative, 'negative', ['float32', 'float64'], 'triplet_margin_loss' + ) if not (input.shape == positive.shape == negative.shape): - raise ValueError("input's shape must equal to " - "positive's shape and " - "negative's shape") + raise ValueError( + "input's shape must equal to " + "positive's shape and " + "negative's shape" + ) distance_function = paddle.nn.PairwiseDistance(p, epsilon=epsilon) positive_dist = distance_function(input, positive) @@ -3457,13 +3837,15 @@ def triplet_margin_loss(input, return loss -def multi_margin_loss(input, - label, - p: int = 1, - margin: float = 1.0, - weight=None, - reduction='mean', - name=None): +def multi_margin_loss( + input, + label, + p: int = 1, + margin: float = 1.0, + weight=None, + reduction='mean', + name=None, +): r""" Measures a multi-class classification hinge loss between input :math:`input` and label :math:`label`: @@ -3526,39 +3908,55 @@ def multi_margin_loss(input, if reduction not in ['sum', 'mean', 'none']: raise ValueError( "'reduction' in 'multi_margin_loss' should be 'sum', 'mean' or 'none', " - "but received {}.".format(reduction)) + "but received {}.".format(reduction) + ) if not _non_static_mode(): - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'multi_margin_loss') - check_variable_and_dtype(label, 'label', ['int32', 'int64'], - 'multi_margin_loss') + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'multi_margin_loss' + ) + check_variable_and_dtype( + label, 'label', ['int32', 'int64'], 'multi_margin_loss' + ) if not (input.shape[0] == label.shape[0]): raise ValueError( "The label's shape[0] should be equal to input's shape[0], " "but received input's shape[0] {} and label's shape[0]:{}. ".format( - input.shape[0], label.shape[0])) + input.shape[0], label.shape[0] + ) + ) label = label.reshape((-1, 1)) index_sample = paddle.index_sample(input, label) if weight is not None: if not _non_static_mode(): - check_variable_and_dtype(weight, 'weight', ['float32', 'float64'], - 'multi_margin_loss') + check_variable_and_dtype( + weight, 'weight', ['float32', 'float64'], 'multi_margin_loss' + ) if not (input.shape[1] == weight.shape[0]): raise ValueError( "The weight's shape[0] should be equal to input's shape[1]" - "but received weight's shape[0]: {} and input's shape[1]: {}". - format(weight.shape[0], input.shape[1])) + "but received weight's shape[0]: {} and input's shape[1]: {}".format( + weight.shape[0], input.shape[1] + ) + ) weight = paddle.gather(weight, label, axis=0).reshape((-1, 1)) loss = paddle.mean( paddle.pow( - paddle.clip(weight * - (margin - index_sample + input), min=0.0), p), - axis=1) - weight * (margin**p / paddle.shape(input)[1]) + paddle.clip(weight * (margin - index_sample + input), min=0.0), + p, + ), + axis=1, + ) - weight * (margin**p / paddle.shape(input)[1]) else: - loss = paddle.mean(paddle.pow( - paddle.clip(margin - index_sample + input, min=0.0), p), - axis=1) - margin**p / paddle.shape(input)[1] + loss = ( + paddle.mean( + paddle.pow( + paddle.clip(margin - index_sample + input, min=0.0), p + ), + axis=1, + ) + - margin**p / paddle.shape(input)[1] + ) if reduction == 'mean': return paddle.mean(loss, name=name) @@ -3621,20 +4019,23 @@ def soft_margin_loss(input, label, reduction='mean', name=None): if reduction not in ['sum', 'mean', 'none']: raise ValueError( "The value of 'reduction' in soft_margin_loss should be 'sum', " - "'mean' or 'none', but received %s, which is not allowed." % - reduction) + "'mean' or 'none', but received %s, which is not allowed." + % reduction + ) if not _non_static_mode(): - fluid.data_feeder.check_variable_and_dtype(input, 'input', - ['float32', 'float64'], - 'soft_margin_loss') fluid.data_feeder.check_variable_and_dtype( - label, 'label', ['int32', 'int64', 'float32', 'float64'], - 'soft_margin_loss') + input, 'input', ['float32', 'float64'], 'soft_margin_loss' + ) + fluid.data_feeder.check_variable_and_dtype( + label, + 'label', + ['int32', 'int64', 'float32', 'float64'], + 'soft_margin_loss', + ) if not (input.shape == label.shape): - raise ValueError("input's shape must equal to " - "label's shape") + raise ValueError("input's shape must equal to " "label's shape") label = fluid.layers.cast(label, input.dtype) out = paddle.log(1 + paddle.exp(-label * input)) diff --git a/python/paddle/nn/functional/norm.py b/python/paddle/nn/functional/norm.py index d62dcdcfbf4f4059059019dc9e6d68afd5c8ee69..87d61e91809134a910857de4beaf6c31fe9d0db2 100644 --- a/python/paddle/nn/functional/norm.py +++ b/python/paddle/nn/functional/norm.py @@ -83,18 +83,30 @@ def normalize(x, p=2, axis=1, epsilon=1e-12, name=None): if _in_legacy_dygraph(): eps = fluid.dygraph.base.to_variable([epsilon], dtype=x.dtype) - out = _legacy_C_ops.p_norm(x, 'axis', axis, 'porder', float(p), - 'keepdim', True, 'epsilon', epsilon) + out = _legacy_C_ops.p_norm( + x, + 'axis', + axis, + 'porder', + float(p), + 'keepdim', + True, + 'epsilon', + epsilon, + ) return x / _legacy_C_ops.elementwise_max(out, eps) check_type(p, 'p', (float, int), 'normalize') check_type(axis, 'axis', (int), 'normalize') - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'normalize') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], 'normalize' + ) if len(x.shape) == 1 and axis != 0 and axis != -1: raise ValueError( - "Axis must be 0 or -1 when x is a 1-D tensor, but received axis = {}" - .format(axis)) + "Axis must be 0 or -1 when x is a 1-D tensor, but received axis = {}".format( + axis + ) + ) attrs = { 'axis': axis, @@ -104,26 +116,27 @@ def normalize(x, p=2, axis=1, epsilon=1e-12, name=None): } helper = LayerHelper('p_norm', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='p_norm', - inputs={'X': x}, - outputs={'Out': out}, - attrs=attrs) + helper.append_op( + type='p_norm', inputs={'X': x}, outputs={'Out': out}, attrs=attrs + ) eps = out.block.create_var(dtype=out.dtype) eps = paddle.full(shape=[1], fill_value=epsilon, dtype=out.dtype) return paddle.divide(x, paddle.maximum(out, eps), name=name) -def batch_norm(x, - running_mean, - running_var, - weight, - bias, - training=False, - momentum=0.9, - epsilon=1e-05, - data_format="NCHW", - use_global_stats=None, - name=None): +def batch_norm( + x, + running_mean, + running_var, + weight, + bias, + training=False, + momentum=0.9, + epsilon=1e-05, + data_format="NCHW", + use_global_stats=None, + name=None, +): """ Applies Batch Normalization as described in the paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift . @@ -175,7 +188,8 @@ def batch_norm(x, if data_format not in true_data_format: raise ValueError( "data_format must be one of 'NC', 'NCL', 'NCHW', 'NCDHW', " - "'NLC', 'NHWC', 'NDHWC' but receive {}".format(data_format)) + "'NLC', 'NHWC', 'NDHWC' but receive {}".format(data_format) + ) data_format = 'NCHW' if data_format[1] == 'C' else 'NHWC' @@ -187,29 +201,64 @@ def batch_norm(x, if in_dygraph_mode(): batch_norm_out, _, _, _, _, _ = _C_ops.batch_norm( - x, weight, bias, running_mean, running_var, momentum, epsilon, - data_format, not training, use_global_stats, trainable_statistics, - False) - - return dygraph_utils._append_activation_in_dygraph(batch_norm_out, - act=None) + x, + weight, + bias, + running_mean, + running_var, + momentum, + epsilon, + data_format, + not training, + use_global_stats, + trainable_statistics, + False, + ) + + return dygraph_utils._append_activation_in_dygraph( + batch_norm_out, act=None + ) elif _in_legacy_dygraph(): # for dygraph need tuple - attrs = ("momentum", momentum, "epsilon", epsilon, "is_test", - not training, "data_layout", data_format, "use_mkldnn", False, - "fuse_with_relu", False, "use_global_stats", use_global_stats, - "trainable_statistics", trainable_statistics) + attrs = ( + "momentum", + momentum, + "epsilon", + epsilon, + "is_test", + not training, + "data_layout", + data_format, + "use_mkldnn", + False, + "fuse_with_relu", + False, + "use_global_stats", + use_global_stats, + "trainable_statistics", + trainable_statistics, + ) batch_norm_out, _, _, _, _, _ = _legacy_C_ops.batch_norm( - x, weight, bias, running_mean, running_var, None, mean_out, - variance_out, *attrs) - - return dygraph_utils._append_activation_in_dygraph(batch_norm_out, - act=None) - - check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'], - 'BatchNorm') + x, + weight, + bias, + running_mean, + running_var, + None, + mean_out, + variance_out, + *attrs + ) + + return dygraph_utils._append_activation_in_dygraph( + batch_norm_out, act=None + ) + + check_variable_and_dtype( + x, 'input', ['float16', 'float32', 'float64'], 'BatchNorm' + ) # for static need dict attrs = { @@ -228,16 +277,18 @@ def batch_norm(x, "Scale": [weight], "Bias": [bias], "Mean": [running_mean], - "Variance": [running_var] + "Variance": [running_var], } helper = LayerHelper('batch_norm', **locals()) param_dtype = x.dtype if x.dtype != 'float16' else 'float32' - saved_mean = helper.create_variable_for_type_inference(dtype=param_dtype, - stop_gradient=True) + saved_mean = helper.create_variable_for_type_inference( + dtype=param_dtype, stop_gradient=True + ) saved_variance = helper.create_variable_for_type_inference( - dtype=param_dtype, stop_gradient=True) + dtype=param_dtype, stop_gradient=True + ) batch_norm_out = helper.create_variable_for_type_inference(x.dtype) outputs = { @@ -245,29 +296,26 @@ def batch_norm(x, "MeanOut": [running_mean], "VarianceOut": [running_var], "SavedMean": [saved_mean], - "SavedVariance": [saved_variance] + "SavedVariance": [saved_variance], } if training or trainable_statistics: # reserve_space is only used for training. reserve_space = helper.create_variable_for_type_inference( - dtype=x.dtype, stop_gradient=True) + dtype=x.dtype, stop_gradient=True + ) outputs["ReserveSpace"] = [reserve_space] - helper.append_op(type="batch_norm", - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type="batch_norm", inputs=inputs, outputs=outputs, attrs=attrs + ) return helper.append_activation(batch_norm_out) -def layer_norm(x, - normalized_shape, - weight=None, - bias=None, - epsilon=1e-05, - name=None): +def layer_norm( + x, normalized_shape, weight=None, bias=None, epsilon=1e-05, name=None +): """ see more detail in paddle.nn.LayerNorm @@ -304,32 +352,49 @@ def layer_norm(x, normalized_shape = list(normalized_shape) elif not isinstance(normalized_shape, list): raise ValueError( - "`normalized_shape` should be int, list of ints or tuple of ints.") + "`normalized_shape` should be int, list of ints or tuple of ints." + ) normalized_ndim = len(normalized_shape) begin_norm_axis = input_ndim - normalized_ndim - if input_ndim < normalized_ndim or input_shape[ - begin_norm_axis:] != normalized_shape: + if ( + input_ndim < normalized_ndim + or input_shape[begin_norm_axis:] != normalized_shape + ): str_normalized_shape = str(normalized_shape) - raise ValueError('Given normalized_shape is ' + str_normalized_shape + - ', expected input with shape [*, ' + - str_normalized_shape[1:] + ', but got input shape ' + - str(input_shape)) + raise ValueError( + 'Given normalized_shape is ' + + str_normalized_shape + + ', expected input with shape [*, ' + + str_normalized_shape[1:] + + ', but got input shape ' + + str(input_shape) + ) if in_dygraph_mode(): - pre_act, _, _, = _C_ops.layer_norm(x, weight, bias, epsilon, - begin_norm_axis, False) + ( + pre_act, + _, + _, + ) = _C_ops.layer_norm(x, weight, bias, epsilon, begin_norm_axis, False) return dygraph_utils._append_activation_in_dygraph(pre_act, act=None) if _in_legacy_dygraph(): - pre_act, _, _ = _legacy_C_ops.layer_norm(x, weight, bias, 'epsilon', - epsilon, 'begin_norm_axis', - begin_norm_axis) + pre_act, _, _ = _legacy_C_ops.layer_norm( + x, + weight, + bias, + 'epsilon', + epsilon, + 'begin_norm_axis', + begin_norm_axis, + ) return dygraph_utils._append_activation_in_dygraph(pre_act, act=None) - check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'], - 'LayerNorm') + check_variable_and_dtype( + x, 'input', ['float16', 'float32', 'float64'], 'LayerNorm' + ) inputs = dict() inputs['X'] = [x] @@ -343,37 +408,40 @@ def layer_norm(x, helper = LayerHelper('layer_norm', **locals()) dtype = x.dtype - mean_out = helper.create_variable_for_type_inference(dtype=dtype, - stop_gradient=True) - variance_out = helper.create_variable_for_type_inference(dtype=dtype, - stop_gradient=True) + mean_out = helper.create_variable_for_type_inference( + dtype=dtype, stop_gradient=True + ) + variance_out = helper.create_variable_for_type_inference( + dtype=dtype, stop_gradient=True + ) layer_norm_out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type="layer_norm", - inputs=inputs, - outputs={ - "Y": layer_norm_out, - "Mean": mean_out, - "Variance": variance_out, - }, - attrs={ - "epsilon": epsilon, - "begin_norm_axis": begin_norm_axis - }) + helper.append_op( + type="layer_norm", + inputs=inputs, + outputs={ + "Y": layer_norm_out, + "Mean": mean_out, + "Variance": variance_out, + }, + attrs={"epsilon": epsilon, "begin_norm_axis": begin_norm_axis}, + ) return helper.append_activation(layer_norm_out) -def instance_norm(x, - running_mean=None, - running_var=None, - weight=None, - bias=None, - use_input_stats=True, - momentum=0.9, - eps=1e-05, - data_format="NCHW", - name=None): +def instance_norm( + x, + running_mean=None, + running_var=None, + weight=None, + bias=None, + use_input_stats=True, + momentum=0.9, + eps=1e-05, + data_format="NCHW", + name=None, +): """ See more detail in nn.layer.InstanceNorm2D. @@ -410,9 +478,17 @@ def instance_norm(x, out = _C_ops.instance_norm(x, weight, bias, eps) return out if _in_legacy_dygraph(): - out, _, _ = _legacy_C_ops.instance_norm(x, weight, bias, "epsilon", eps, - "momentum", momentum, - "data_format", data_format) + out, _, _ = _legacy_C_ops.instance_norm( + x, + weight, + bias, + "epsilon", + eps, + "momentum", + momentum, + "data_format", + data_format, + ) return out check_variable_and_dtype(x, 'input', ['float32', 'float64'], "InstanceNorm") @@ -425,104 +501,106 @@ def instance_norm(x, inputs = {"X": [x]} helper = LayerHelper('instance_norm', **locals()) - saved_mean = helper.create_variable_for_type_inference(dtype=x.dtype, - stop_gradient=True) + saved_mean = helper.create_variable_for_type_inference( + dtype=x.dtype, stop_gradient=True + ) saved_variance = helper.create_variable_for_type_inference( - dtype=x.dtype, stop_gradient=True) + dtype=x.dtype, stop_gradient=True + ) instance_norm_out = helper.create_variable_for_type_inference(x.dtype) outputs = { "Y": [instance_norm_out], "SavedMean": [saved_mean], - "SavedVariance": [saved_variance] + "SavedVariance": [saved_variance], } - helper.append_op(type="instance_norm", - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type="instance_norm", inputs=inputs, outputs=outputs, attrs=attrs + ) return instance_norm_out -def local_response_norm(x, - size, - alpha=1e-4, - beta=0.75, - k=1., - data_format="NCHW", - name=None): +def local_response_norm( + x, size, alpha=1e-4, beta=0.75, k=1.0, data_format="NCHW", name=None +): r""" - Local Response Normalization performs a type of "lateral inhibition" by normalizing over local input regions. - For more information, please refer to `ImageNet Classification with Deep Convolutional Neural Networks `_ - - The formula is as follows: - - .. math:: - - Output(i, x, y) = Input(i, x, y) / \left(k + \alpha \sum\limits^{\min(C-1, i + size/2)}_{j = \max(0, i - size/2)}(Input(j, x, y))^2\right)^{\beta} - - In the above equation: + Local Response Normalization performs a type of "lateral inhibition" by normalizing over local input regions. + For more information, please refer to `ImageNet Classification with Deep Convolutional Neural Networks `_ - - :math:`size` : The number of channels to sum over. - - :math:`k` : The offset (avoid being divided by 0). - - :math:`\\alpha` : The scaling parameter. - - :math:`\\beta` : The exponent parameter. + The formula is as follows: + .. math:: - Args: - x (Tensor): The input 3-D/4-D/5-D tensor. The data type is float32. - size (int): The number of channels to sum over. - alpha (float, optional): The scaling parameter, positive. Default:1e-4 - beta (float, optional): The exponent, positive. Default:0.75 - k (float, optional): An offset, positive. Default: 1.0 - data_format (str, optional): Specify the data format of the input, and the data format of the output - will be consistent with that of the input. An optional string from: - If x is 3-D Tensor, the string could be `"NCL"` or `"NLC"` . When it is `"NCL"`, - the data is stored in the order of: `[batch_size, input_channels, feature_length]`. - If x is 4-D Tensor, the string could be `"NCHW"`, `"NHWC"`. When it is `"NCHW"`, - the data is stored in the order of: `[batch_size, input_channels, input_height, input_width]`. - If x is 5-D Tensor, the string could be `"NCDHW"`, `"NDHWC"` . When it is `"NCDHW"`, - the data is stored in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`. - name (str, optional): Name for the operation (optional, default is None). For more information, - please refer to :ref:`api_guide_Name`. + Output(i, x, y) = Input(i, x, y) / \left(k + \alpha \sum\limits^{\min(C-1, i + size/2)}_{j = \max(0, i - size/2)}(Input(j, x, y))^2\right)^{\beta} + + In the above equation: + + - :math:`size` : The number of channels to sum over. + - :math:`k` : The offset (avoid being divided by 0). + - :math:`\\alpha` : The scaling parameter. + - :math:`\\beta` : The exponent parameter. + + + Args: + x (Tensor): The input 3-D/4-D/5-D tensor. The data type is float32. + size (int): The number of channels to sum over. + alpha (float, optional): The scaling parameter, positive. Default:1e-4 + beta (float, optional): The exponent, positive. Default:0.75 + k (float, optional): An offset, positive. Default: 1.0 + data_format (str, optional): Specify the data format of the input, and the data format of the output + will be consistent with that of the input. An optional string from: + If x is 3-D Tensor, the string could be `"NCL"` or `"NLC"` . When it is `"NCL"`, + the data is stored in the order of: `[batch_size, input_channels, feature_length]`. + If x is 4-D Tensor, the string could be `"NCHW"`, `"NHWC"`. When it is `"NCHW"`, + the data is stored in the order of: `[batch_size, input_channels, input_height, input_width]`. + If x is 5-D Tensor, the string could be `"NCDHW"`, `"NDHWC"` . When it is `"NCDHW"`, + the data is stored in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`. + name (str, optional): Name for the operation (optional, default is None). For more information, + please refer to :ref:`api_guide_Name`. - Returns: - A tensor storing the transformation result with the same shape and data type as input. + Returns: + A tensor storing the transformation result with the same shape and data type as input. - Examples: + Examples: - .. code-block:: python + .. code-block:: python - import paddle + import paddle - x = paddle.rand(shape=(3, 3, 112, 112), dtype="float32") - y = paddle.nn.functional.local_response_norm(x, size=5) - print(y.shape) # [3, 3, 112, 112] - """ + x = paddle.rand(shape=(3, 3, 112, 112), dtype="float32") + y = paddle.nn.functional.local_response_norm(x, size=5) + print(y.shape) # [3, 3, 112, 112] + """ if not in_dynamic_mode(): check_variable_and_dtype(x, 'x', ['float32'], 'local_response_norm') if data_format not in ['NCL', 'NLC', 'NCHW', 'NHWC', 'NCDHW', 'NDHWC']: raise ValueError( - "data_format should be in one of [NCL, NCHW, NCDHW, NLC, NHWC, NDHWC], " \ - "but got {}".format(data_format)) + "data_format should be in one of [NCL, NCHW, NCDHW, NLC, NHWC, NDHWC], " + "but got {}".format(data_format) + ) sizes = x.shape dim = len(sizes) if dim < 3: raise ValueError( - 'Expected 3D or higher dimensionality input, but got {} dimensions'. - format(dim)) + 'Expected 3D or higher dimensionality input, but got {} dimensions'.format( + dim + ) + ) for i, sz in enumerate(sizes): if not sz > 0 and i > 0: - raise ValueError("Expected every dim's size to be larger than 0, " - "but the size of the {}-th dim is {}".format( - i, sz)) + raise ValueError( + "Expected every dim's size to be larger than 0, " + "but the size of the {}-th dim is {}".format(i, sz) + ) channel_last = True if data_format[-1] == "C" else False from functools import reduce + sum_sizes = reduce(lambda x, y: x * y, sizes[1:]) div = paddle.unsqueeze(paddle.multiply(x, x), axis=1) @@ -530,8 +608,11 @@ def local_response_norm(x, pad4d_shape = [0, 0, size // 2, (size - 1) // 2] pool2d_shape = (size, 1) reshape_shape = [ - sizes[0], 1, sizes[1], sizes[2], - int(sum_sizes / (sizes[1] * sizes[2])) + sizes[0], + 1, + sizes[1], + sizes[2], + int(sum_sizes / (sizes[1] * sizes[2])), ] pad5d_shape = [0, 0, 0, 0, size // 2, (size - 1) // 2] pool3d_shape = (size, 1, 1) @@ -539,26 +620,29 @@ def local_response_norm(x, pad4d_shape = [size // 2, (size - 1) // 2, 0, 0] pool2d_shape = (1, size) reshape_shape = [ - sizes[0], 1, sizes[1], - int(sum_sizes / (sizes[1] * sizes[-1])), sizes[-1] + sizes[0], + 1, + sizes[1], + int(sum_sizes / (sizes[1] * sizes[-1])), + sizes[-1], ] pad5d_shape = [size // 2, (size - 1) // 2, 0, 0, 0, 0] pool3d_shape = (1, 1, size) if dim == 3: div = paddle.nn.functional.pad(div, pad=pad4d_shape) - div = paddle.nn.functional.avg_pool2d(div, - kernel_size=pool2d_shape, - stride=1) + div = paddle.nn.functional.avg_pool2d( + div, kernel_size=pool2d_shape, stride=1 + ) div = paddle.squeeze(div, axis=1) else: div = paddle.reshape(div, shape=reshape_shape) - div = paddle.nn.functional.pad(div, - pad=pad5d_shape, - data_format='NCDHW') - div = paddle.nn.functional.avg_pool3d(div, - kernel_size=pool3d_shape, - stride=1) + div = paddle.nn.functional.pad( + div, pad=pad5d_shape, data_format='NCDHW' + ) + div = paddle.nn.functional.avg_pool3d( + div, kernel_size=pool3d_shape, stride=1 + ) div = paddle.reshape(paddle.squeeze(div, axis=1), sizes) div = paddle.scale(div, scale=alpha, bias=k) diff --git a/python/paddle/nn/functional/pooling.py b/python/paddle/nn/functional/pooling.py index 7e465b5974888d6c80888423c345c79294bf4f8a..f9ece56dc7ef51647436dde25dbfcc18c7c0a688 100755 --- a/python/paddle/nn/functional/pooling.py +++ b/python/paddle/nn/functional/pooling.py @@ -32,7 +32,9 @@ def _check_input(x, dimension): if len(x.shape) != dimension: raise ValueError( "Excepted Input X is {}-D tensor, but received {}-D {}".format( - dimension, len(x.shape), type(x))) + dimension, len(x.shape), type(x) + ) + ) def _check_instance(x, x_name, types=(int, float)): @@ -40,16 +42,19 @@ def _check_instance(x, x_name, types=(int, float)): if not isinstance(x, types): raise ValueError( "Excepted {} type for {} but received type: {}. ".format( - types, x_name, type(x))) + types, x_name, type(x) + ) + ) def _check_value_limitation(x, x_name, min_limit=1e-3): - def _check_value(x, x_name, min_limit=1e-3): if isinstance(x, int) and min_limit is not None and x < min_limit: raise ValueError( - "Excepted the input {} to be greater than {} but received x: {}. " - .format(x_name, min_limit, x)) + "Excepted the input {} to be greater than {} but received x: {}. ".format( + x_name, min_limit, x + ) + ) for ele in x: _check_value(ele, x_name) @@ -73,21 +78,24 @@ def _channel_last(data_format, num_dims): if data_format not in ['NCL', 'NLC']: raise ValueError( "Attr(data_format) should be 'NCL' or 'NLC'. Received " - "Attr(data_format): %s" % str(data_format)) + "Attr(data_format): %s" % str(data_format) + ) else: return True if data_format == "NLC" else False if num_dims == 2: if data_format not in ['NCHW', 'NHWC']: raise ValueError( "Attr(data_format) should be 'NCHW' or 'NHWC'. Received " - "Attr(data_format): %s" % str(data_format)) + "Attr(data_format): %s" % str(data_format) + ) else: return True if data_format == "NHWC" else False if num_dims == 3: if data_format not in ['NCDHW', 'NDHWC']: raise ValueError( "Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received " - "Attr(data_format): %s" % str(data_format)) + "Attr(data_format): %s" % str(data_format) + ) else: return True if data_format == "NDHWC" else False @@ -97,13 +105,16 @@ def _update_padding_nd(padding, num_dims, channel_last=False, ceil_mode=False): padding = padding.upper() if padding not in ["SAME", "VALID"]: raise ValueError( - "Unknown padding: '{}'. It can only be 'SAME' or 'VALID'.". - format(padding)) + "Unknown padding: '{}'. It can only be 'SAME' or 'VALID'.".format( + padding + ) + ) if padding == "VALID": if ceil_mode != False: raise ValueError( "When Attr(padding) is \"VALID\", Attr(ceil_mode) must be False. " - "Received ceil_mode: True.") + "Received ceil_mode: True." + ) padding_algorithm = "VALID" padding = [0] * num_dims @@ -118,10 +129,12 @@ def _update_padding_nd(padding, num_dims, channel_last=False, ceil_mode=False): if not _zero_padding_in_batch_and_channel(padding, channel_last): raise ValueError( "Non-zero padding({}) in the batch or channel dimensions " - "is not supported.".format(padding)) + "is not supported.".format(padding) + ) padding_algorithm = "EXPLICIT" padding = _exclude_padding_in_batch_and_channel( - padding, channel_last) + padding, channel_last + ) if utils._is_symmetric_padding(padding, num_dims): padding = padding[0::2] # for padding like [pad_before, pad_after, pad_before, pad_after, ...] @@ -144,25 +157,29 @@ def _update_padding_nd(padding, num_dims, channel_last=False, ceil_mode=False): def _expand_low_nd_padding(padding): - #1d to 2d fake input + # 1d to 2d fake input if len(padding) == 2: padding = [0] * 2 + padding elif len(padding) == 1: padding = [0] + padding else: raise ValueError( - "The size of padding's dimmention should be 1 or 2. But got padding={}" - .format(padding)) + "The size of padding's dimmention should be 1 or 2. But got padding={}".format( + padding + ) + ) return padding -def avg_pool1d(x, - kernel_size, - stride=None, - padding=0, - exclusive=True, - ceil_mode=False, - name=None): +def avg_pool1d( + x, + kernel_size, + stride=None, + padding=0, + exclusive=True, + ceil_mode=False, + name=None, +): """ This API implements average pooling 1d operation, See more details in :ref:`api_nn_pooling_AvgPool1d` . @@ -221,28 +238,56 @@ def avg_pool1d(x, _check_value_limitation(stride, "stride", min_limit=1e-3) channel_last = _channel_last("NCL", 1) - padding, padding_algorithm = _update_padding_nd(padding, - 1, - channel_last=channel_last, - ceil_mode=ceil_mode) + padding, padding_algorithm = _update_padding_nd( + padding, 1, channel_last=channel_last, ceil_mode=ceil_mode + ) # use 2d to implenment 1d should expand padding in advance. padding = _expand_low_nd_padding(padding) if in_dygraph_mode(): - output = _C_ops.pool2d(x, kernel_size, stride, padding, ceil_mode, - exclusive, data_format, 'avg', False, False, - padding_algorithm, True) + output = _C_ops.pool2d( + x, + kernel_size, + stride, + padding, + ceil_mode, + exclusive, + data_format, + 'avg', + False, + False, + padding_algorithm, + True, + ) return squeeze(output, [2]) if _in_legacy_dygraph(): - output = _legacy_C_ops.pool2d(x, 'pooling_type', 'avg', 'ksize', - kernel_size, 'global_pooling', False, - 'strides', stride, 'paddings', padding, - 'padding_algorithm', padding_algorithm, - 'use_cudnn', True, 'ceil_mode', ceil_mode, - 'use_mkldnn', False, 'exclusive', - exclusive, 'data_format', data_format) + output = _legacy_C_ops.pool2d( + x, + 'pooling_type', + 'avg', + 'ksize', + kernel_size, + 'global_pooling', + False, + 'strides', + stride, + 'paddings', + padding, + 'padding_algorithm', + padding_algorithm, + 'use_cudnn', + True, + 'ceil_mode', + ceil_mode, + 'use_mkldnn', + False, + 'exclusive', + exclusive, + 'data_format', + data_format, + ) return squeeze(output, [2]) op_type = 'pool2d' @@ -250,35 +295,39 @@ def avg_pool1d(x, dtype = helper.input_dtype(input_param_name='x') pool_out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type=op_type, - inputs={"X": x}, - outputs={"Out": pool_out}, - attrs={ - "pooling_type": 'avg', - "ksize": kernel_size, - "global_pooling": False, - "strides": stride, - "paddings": padding, - "padding_algorithm": padding_algorithm, - "use_cudnn": True, - "ceil_mode": ceil_mode, - "use_mkldnn": False, - "exclusive": exclusive, - "data_format": data_format, - }) + helper.append_op( + type=op_type, + inputs={"X": x}, + outputs={"Out": pool_out}, + attrs={ + "pooling_type": 'avg', + "ksize": kernel_size, + "global_pooling": False, + "strides": stride, + "paddings": padding, + "padding_algorithm": padding_algorithm, + "use_cudnn": True, + "ceil_mode": ceil_mode, + "use_mkldnn": False, + "exclusive": exclusive, + "data_format": data_format, + }, + ) return squeeze(pool_out, [2]) -def avg_pool2d(x, - kernel_size, - stride=None, - padding=0, - ceil_mode=False, - exclusive=True, - divisor_override=None, - data_format="NCHW", - name=None): +def avg_pool2d( + x, + kernel_size, + stride=None, + padding=0, + ceil_mode=False, + exclusive=True, + divisor_override=None, + data_format="NCHW", + name=None, +): """ This API implements average pooling 2d operation. See more details in :ref:`api_nn_pooling_AvgPool2d` . @@ -340,23 +389,52 @@ def avg_pool2d(x, _check_value_limitation(stride, "stride", min_limit=1e-3) channel_last = _channel_last(data_format, 2) - padding, padding_algorithm = _update_padding_nd(padding, - 2, - channel_last, - ceil_mode=ceil_mode) + padding, padding_algorithm = _update_padding_nd( + padding, 2, channel_last, ceil_mode=ceil_mode + ) if _non_static_mode(): if in_dygraph_mode(): - output = _C_ops.pool2d(x, kernel_size, stride, padding, ceil_mode, - exclusive, data_format, 'avg', False, False, - padding_algorithm, True) + output = _C_ops.pool2d( + x, + kernel_size, + stride, + padding, + ceil_mode, + exclusive, + data_format, + 'avg', + False, + False, + padding_algorithm, + True, + ) else: output = _legacy_C_ops.pool2d( - x, 'pooling_type', 'avg', 'ksize', kernel_size, - 'global_pooling', False, 'padding_algorithm', padding_algorithm, - 'strides', stride, 'paddings', padding, 'use_cudnn', True, - 'ceil_mode', ceil_mode, 'use_mkldnn', False, 'exclusive', - exclusive, 'data_format', data_format) + x, + 'pooling_type', + 'avg', + 'ksize', + kernel_size, + 'global_pooling', + False, + 'padding_algorithm', + padding_algorithm, + 'strides', + stride, + 'paddings', + padding, + 'use_cudnn', + True, + 'ceil_mode', + ceil_mode, + 'use_mkldnn', + False, + 'exclusive', + exclusive, + 'data_format', + data_format, + ) if divisor_override is None: return output else: @@ -369,22 +447,24 @@ def avg_pool2d(x, dtype = helper.input_dtype(input_param_name='x') pool_out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type=op_type, - inputs={"X": x}, - outputs={"Out": pool_out}, - attrs={ - "pooling_type": "avg", - "ksize": kernel_size, - "global_pooling": False, - "strides": stride, - "paddings": padding, - "padding_algorithm": padding_algorithm, - "use_cudnn": True, - "ceil_mode": ceil_mode, - "use_mkldnn": False, - "exclusive": exclusive, - "data_format": data_format, - }) + helper.append_op( + type=op_type, + inputs={"X": x}, + outputs={"Out": pool_out}, + attrs={ + "pooling_type": "avg", + "ksize": kernel_size, + "global_pooling": False, + "strides": stride, + "paddings": padding, + "padding_algorithm": padding_algorithm, + "use_cudnn": True, + "ceil_mode": ceil_mode, + "use_mkldnn": False, + "exclusive": exclusive, + "data_format": data_format, + }, + ) if divisor_override is None: return pool_out @@ -393,15 +473,17 @@ def avg_pool2d(x, return pool_out * (kernel_size[0] * kernel_size[1]) / divisor_override -def avg_pool3d(x, - kernel_size, - stride=None, - padding=0, - ceil_mode=False, - exclusive=True, - divisor_override=None, - data_format="NCDHW", - name=None): +def avg_pool3d( + x, + kernel_size, + stride=None, + padding=0, + ceil_mode=False, + exclusive=True, + divisor_override=None, + data_format="NCDHW", + name=None, +): """ This API implements average pooling 3d operation. See more details in :ref:`api_nn_pooling_AvgPool3d` . @@ -459,25 +541,54 @@ def avg_pool3d(x, stride = utils.convert_to_list(stride, 3, 'pool_stride') channel_last = _channel_last(data_format, 3) - padding, padding_algorithm = _update_padding_nd(padding, - 3, - channel_last=channel_last, - ceil_mode=ceil_mode) + padding, padding_algorithm = _update_padding_nd( + padding, 3, channel_last=channel_last, ceil_mode=ceil_mode + ) _check_value_limitation(kernel_size, "kernel_size", min_limit=1e-3) _check_value_limitation(stride, "stride", min_limit=1e-3) if in_dygraph_mode(): - pool_out = _C_ops.pool3d(x, kernel_size, stride, padding, ceil_mode, - exclusive, data_format, 'avg', False, False, - padding_algorithm, True) + pool_out = _C_ops.pool3d( + x, + kernel_size, + stride, + padding, + ceil_mode, + exclusive, + data_format, + 'avg', + False, + False, + padding_algorithm, + True, + ) elif _in_legacy_dygraph(): pool_out = _legacy_C_ops.pool3d( - x, 'pooling_type', 'avg', 'ksize', kernel_size, 'strides', stride, - 'paddings', padding, 'global_pooling', False, 'padding_algorithm', - padding_algorithm, 'use_cudnn', True, 'ceil_mode', ceil_mode, - 'use_mkldnn', False, 'exclusive', exclusive, 'data_format', - data_format) + x, + 'pooling_type', + 'avg', + 'ksize', + kernel_size, + 'strides', + stride, + 'paddings', + padding, + 'global_pooling', + False, + 'padding_algorithm', + padding_algorithm, + 'use_cudnn', + True, + 'ceil_mode', + ceil_mode, + 'use_mkldnn', + False, + 'exclusive', + exclusive, + 'data_format', + data_format, + ) else: op_type = "pool3d" helper = LayerHelper(op_type, **locals()) @@ -486,38 +597,45 @@ def avg_pool3d(x, pool_out = helper.create_variable_for_type_inference(dtype) outputs = {"Out": pool_out} - helper.append_op(type=op_type, - inputs={"X": x}, - outputs=outputs, - attrs={ - "pooling_type": 'avg', - "ksize": kernel_size, - "global_pooling": False, - "strides": stride, - "paddings": padding, - "padding_algorithm": padding_algorithm, - "use_cudnn": True, - "ceil_mode": ceil_mode, - "use_mkldnn": False, - "exclusive": exclusive, - "data_format": data_format, - }) + helper.append_op( + type=op_type, + inputs={"X": x}, + outputs=outputs, + attrs={ + "pooling_type": 'avg', + "ksize": kernel_size, + "global_pooling": False, + "strides": stride, + "paddings": padding, + "padding_algorithm": padding_algorithm, + "use_cudnn": True, + "ceil_mode": ceil_mode, + "use_mkldnn": False, + "exclusive": exclusive, + "data_format": data_format, + }, + ) if divisor_override is None: return pool_out else: _check_instance(divisor_override, "divisor_override") - return pool_out * (kernel_size[0] * kernel_size[1] * - kernel_size[2]) / divisor_override + return ( + pool_out + * (kernel_size[0] * kernel_size[1] * kernel_size[2]) + / divisor_override + ) -def max_pool1d(x, - kernel_size, - stride=None, - padding=0, - return_mask=False, - ceil_mode=False, - name=None): +def max_pool1d( + x, + kernel_size, + stride=None, + padding=0, + return_mask=False, + ceil_mode=False, + name=None, +): """ This API implements max pooling 1d opereation. See more details in :ref:`api_nn_pooling_MaxPool1d` . @@ -570,44 +688,96 @@ def max_pool1d(x, else: stride = [1] + utils.convert_to_list(stride, 1, 'pool_stride') - padding, padding_algorithm = _update_padding_nd(padding, - 1, - ceil_mode=ceil_mode) + padding, padding_algorithm = _update_padding_nd( + padding, 1, ceil_mode=ceil_mode + ) # use 2d to implenment 1d should expand padding in advance. padding = _expand_low_nd_padding(padding) if in_dygraph_mode(): if return_mask: - pool_out = _C_ops.max_pool2d_with_index(x, kernel_size, stride, - padding, False, False) - return (squeeze(pool_out[0], [2]), - squeeze(pool_out[1], [2])) if return_mask else squeeze( - pool_out[0], [2]) + pool_out = _C_ops.max_pool2d_with_index( + x, kernel_size, stride, padding, False, False + ) + return ( + (squeeze(pool_out[0], [2]), squeeze(pool_out[1], [2])) + if return_mask + else squeeze(pool_out[0], [2]) + ) else: - pool_out = _C_ops.pool2d(x, kernel_size, stride, padding, ceil_mode, - True, data_format, 'max', False, False, - padding_algorithm, True) + pool_out = _C_ops.pool2d( + x, + kernel_size, + stride, + padding, + ceil_mode, + True, + data_format, + 'max', + False, + False, + padding_algorithm, + True, + ) return squeeze(pool_out, [2]) if _in_legacy_dygraph(): if return_mask: pool_out = _legacy_C_ops.max_pool2d_with_index( - x, 'ksize', kernel_size, 'global_pooling', False, 'strides', - stride, 'paddings', padding, 'padding_algorithm', - padding_algorithm, 'use_cudnn', True, 'ceil_mode', ceil_mode, - 'use_mkldnn', False, 'exclusive', True, 'data_format', - data_format) - return (squeeze(pool_out[0], [2]), - squeeze(pool_out[1], [2])) if return_mask else squeeze( - pool_out[0], [2]) + x, + 'ksize', + kernel_size, + 'global_pooling', + False, + 'strides', + stride, + 'paddings', + padding, + 'padding_algorithm', + padding_algorithm, + 'use_cudnn', + True, + 'ceil_mode', + ceil_mode, + 'use_mkldnn', + False, + 'exclusive', + True, + 'data_format', + data_format, + ) + return ( + (squeeze(pool_out[0], [2]), squeeze(pool_out[1], [2])) + if return_mask + else squeeze(pool_out[0], [2]) + ) else: pool_out = _legacy_C_ops.pool2d( - x, 'pooling_type', 'max', 'ksize', kernel_size, - 'global_pooling', False, 'padding_algorithm', padding_algorithm, - 'strides', stride, 'paddings', padding, 'use_cudnn', True, - 'ceil_mode', ceil_mode, 'use_mkldnn', False, 'exclusive', True, - 'data_format', data_format) + x, + 'pooling_type', + 'max', + 'ksize', + kernel_size, + 'global_pooling', + False, + 'padding_algorithm', + padding_algorithm, + 'strides', + stride, + 'paddings', + padding, + 'use_cudnn', + True, + 'ceil_mode', + ceil_mode, + 'use_mkldnn', + False, + 'exclusive', + True, + 'data_format', + data_format, + ) return squeeze(pool_out, [2]) op_type = 'max_pool2d_with_index' if return_mask else "pool2d" @@ -617,36 +787,44 @@ def max_pool1d(x, mask = helper.create_variable_for_type_inference('int32') outputs = {"Out": pool_out, "Mask": mask} - helper.append_op(type=op_type, - inputs={"X": x}, - outputs=outputs, - attrs={ - "pooling_type": 'max', - "ksize": kernel_size, - "global_pooling": False, - "strides": stride, - "paddings": padding, - "padding_algorithm": padding_algorithm, - "use_cudnn": True, - "ceil_mode": ceil_mode, - "use_mkldnn": False, - "exclusive": True, - "data_format": data_format, - }) - - return (squeeze(pool_out, [2]), - squeeze(mask, [2])) if return_mask else squeeze(pool_out, [2]) + helper.append_op( + type=op_type, + inputs={"X": x}, + outputs=outputs, + attrs={ + "pooling_type": 'max', + "ksize": kernel_size, + "global_pooling": False, + "strides": stride, + "paddings": padding, + "padding_algorithm": padding_algorithm, + "use_cudnn": True, + "ceil_mode": ceil_mode, + "use_mkldnn": False, + "exclusive": True, + "data_format": data_format, + }, + ) + + return ( + (squeeze(pool_out, [2]), squeeze(mask, [2])) + if return_mask + else squeeze(pool_out, [2]) + ) def _unpool_output_size(x, kernel_size, stride, padding, output_size): - assert output_size is None or isinstance( - output_size, (list, tuple) - ), "Required output_size is None|list|tuple, but received %s" % output_size + assert output_size is None or isinstance(output_size, (list, tuple)), ( + "Required output_size is None|list|tuple, but received %s" % output_size + ) input_size = x.shape default_size = [] for d in range(len(kernel_size)): - default_size.append((input_size[-len(kernel_size) + d] - 1) * - stride[d] + kernel_size[d] - 2 * padding[d]) + default_size.append( + (input_size[-len(kernel_size) + d] - 1) * stride[d] + + kernel_size[d] + - 2 * padding[d] + ) has_static_var = False if output_size is None: @@ -666,28 +844,33 @@ def _unpool_output_size(x, kernel_size, stride, padding, output_size): raise ValueError( "output_size should be a sequence containing " "{} or {} elements, but it has a length of '{}'".format( - len(kernel_size), - len(kernel_size) + 2, len(output_size))) + len(kernel_size), len(kernel_size) + 2, len(output_size) + ) + ) if not has_static_var: for d in range(len(kernel_size)): min_size = default_size[d] - stride[d] max_size = default_size[d] + stride[d] if not (min_size < output_size[d] < max_size): raise ValueError( - 'invalid output_size "{}" (dim {} must be between {} and {})' - .format(output_size, d, min_size, max_size)) + 'invalid output_size "{}" (dim {} must be between {} and {})'.format( + output_size, d, min_size, max_size + ) + ) return output_size -def max_unpool1d(x, - indices, - kernel_size, - stride=None, - padding=0, - data_format="NCL", - output_size=None, - name=None): +def max_unpool1d( + x, + indices, + kernel_size, + stride=None, + padding=0, + data_format="NCL", + output_size=None, + name=None, +): r""" This API implements max unpooling 1d opereation. `max_unpool1d` accepts the output of `max_pool1d` as input, @@ -745,8 +928,10 @@ def max_unpool1d(x, """ """NCL to NCHW""" if data_format not in ["NCL"]: - raise ValueError("Attr(data_format) should be 'NCL'. Received " - "Attr(data_format): %s." % str(data_format)) + raise ValueError( + "Attr(data_format) should be 'NCL'. Received " + "Attr(data_format): %s." % str(data_format) + ) data_format = "NCHW" x = unsqueeze(x, [2]) indices = unsqueeze(indices, [2]) @@ -759,18 +944,32 @@ def max_unpool1d(x, # use 2d to implenment 1d should expand padding in advance. padding = _expand_low_nd_padding(padding) - output_size = _unpool_output_size(x, kernel_size, stride, padding, - output_size) + output_size = _unpool_output_size( + x, kernel_size, stride, padding, output_size + ) if in_dygraph_mode(): - output = _C_ops.unpool(x, indices, kernel_size, stride, padding, - output_size, data_format) + output = _C_ops.unpool( + x, indices, kernel_size, stride, padding, output_size, data_format + ) return squeeze(output, [2]) elif in_dynamic_mode(): - output = _legacy_C_ops.unpool(x, indices, 'unpooling_type', 'max', - 'ksize', kernel_size, 'strides', stride, - 'paddings', padding, "output_size", - output_size, "data_format", data_format) + output = _legacy_C_ops.unpool( + x, + indices, + 'unpooling_type', + 'max', + 'ksize', + kernel_size, + 'strides', + stride, + 'paddings', + padding, + "output_size", + output_size, + "data_format", + data_format, + ) return squeeze(output, [2]) op_type = "unpool" @@ -778,30 +977,31 @@ def max_unpool1d(x, dtype = helper.input_dtype(input_param_name="x") unpool_out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type=op_type, - inputs={ - "X": x, - "Indices": indices - }, - outputs={"Out": unpool_out}, - attrs={ - "unpooling_type": "max", - "ksize": kernel_size, - "strides": stride, - "paddings": padding, - "output_size": output_size - }) + helper.append_op( + type=op_type, + inputs={"X": x, "Indices": indices}, + outputs={"Out": unpool_out}, + attrs={ + "unpooling_type": "max", + "ksize": kernel_size, + "strides": stride, + "paddings": padding, + "output_size": output_size, + }, + ) return squeeze(unpool_out, [2]) -def max_unpool2d(x, - indices, - kernel_size, - stride=None, - padding=0, - data_format="NCHW", - output_size=None, - name=None): +def max_unpool2d( + x, + indices, + kernel_size, + stride=None, + padding=0, + data_format="NCHW", + output_size=None, + name=None, +): r""" This API implements max unpooling 2d opereation. See more details in :ref:`api_nn_pooling_MaxUnPool2D` . @@ -875,21 +1075,37 @@ def max_unpool2d(x, padding = utils.convert_to_list(padding, 2, 'padding') if data_format not in ["NCHW"]: - raise ValueError("Attr(data_format) should be 'NCHW'. Received " - "Attr(data_format): %s." % str(data_format)) + raise ValueError( + "Attr(data_format) should be 'NCHW'. Received " + "Attr(data_format): %s." % str(data_format) + ) - output_size = _unpool_output_size(x, kernel_size, stride, padding, - output_size) + output_size = _unpool_output_size( + x, kernel_size, stride, padding, output_size + ) if in_dygraph_mode(): - output = _C_ops.unpool(x, indices, kernel_size, stride, padding, - output_size, data_format) + output = _C_ops.unpool( + x, indices, kernel_size, stride, padding, output_size, data_format + ) return output elif in_dynamic_mode(): - output = _legacy_C_ops.unpool(x, indices, 'unpooling_type', 'max', - 'ksize', kernel_size, 'strides', stride, - 'paddings', padding, "output_size", - output_size, "data_format", data_format) + output = _legacy_C_ops.unpool( + x, + indices, + 'unpooling_type', + 'max', + 'ksize', + kernel_size, + 'strides', + stride, + 'paddings', + padding, + "output_size", + output_size, + "data_format", + data_format, + ) return output op_type = "unpool" @@ -897,30 +1113,31 @@ def max_unpool2d(x, dtype = helper.input_dtype(input_param_name="x") unpool_out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type=op_type, - inputs={ - "X": x, - "Indices": indices - }, - outputs={"Out": unpool_out}, - attrs={ - "unpooling_type": "max", - "ksize": kernel_size, - "strides": stride, - "paddings": padding, - "output_size": output_size - }) + helper.append_op( + type=op_type, + inputs={"X": x, "Indices": indices}, + outputs={"Out": unpool_out}, + attrs={ + "unpooling_type": "max", + "ksize": kernel_size, + "strides": stride, + "paddings": padding, + "output_size": output_size, + }, + ) return unpool_out -def max_unpool3d(x, - indices, - kernel_size, - stride=None, - padding=0, - data_format="NCDHW", - output_size=None, - name=None): +def max_unpool3d( + x, + indices, + kernel_size, + stride=None, + padding=0, + data_format="NCDHW", + output_size=None, + name=None, +): r""" This API implements max unpooling 3d opereation. `max_unpool3d` accepts the output of `max_pool3d` as input, @@ -992,21 +1209,37 @@ def max_unpool3d(x, padding = utils.convert_to_list(padding, 3, 'padding') if data_format not in ["NCDHW"]: - raise ValueError("Attr(data_format) should be 'NCDHW'. Received " - "Attr(data_format): %s." % str(data_format)) + raise ValueError( + "Attr(data_format) should be 'NCDHW'. Received " + "Attr(data_format): %s." % str(data_format) + ) - output_size = _unpool_output_size(x, kernel_size, stride, padding, - output_size) + output_size = _unpool_output_size( + x, kernel_size, stride, padding, output_size + ) if in_dygraph_mode(): - output = _C_ops.unpool3d(x, indices, kernel_size, stride, padding, - output_size, data_format) + output = _C_ops.unpool3d( + x, indices, kernel_size, stride, padding, output_size, data_format + ) return output elif in_dynamic_mode(): - output = _legacy_C_ops.unpool3d(x, indices, 'unpooling_type', 'max', - 'ksize', kernel_size, 'strides', stride, - 'paddings', padding, "output_size", - output_size, "data_format", data_format) + output = _legacy_C_ops.unpool3d( + x, + indices, + 'unpooling_type', + 'max', + 'ksize', + kernel_size, + 'strides', + stride, + 'paddings', + padding, + "output_size", + output_size, + "data_format", + data_format, + ) return output op_type = "unpool3d" @@ -1014,30 +1247,31 @@ def max_unpool3d(x, dtype = helper.input_dtype(input_param_name="x") unpool_out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type=op_type, - inputs={ - "X": x, - "Indices": indices - }, - outputs={"Out": unpool_out}, - attrs={ - "unpooling_type": "max", - "ksize": kernel_size, - "strides": stride, - "paddings": padding, - "output_size": output_size - }) + helper.append_op( + type=op_type, + inputs={"X": x, "Indices": indices}, + outputs={"Out": unpool_out}, + attrs={ + "unpooling_type": "max", + "ksize": kernel_size, + "strides": stride, + "paddings": padding, + "output_size": output_size, + }, + ) return unpool_out -def max_pool2d(x, - kernel_size, - stride=None, - padding=0, - return_mask=False, - ceil_mode=False, - data_format="NCHW", - name=None): +def max_pool2d( + x, + kernel_size, + stride=None, + padding=0, + return_mask=False, + ceil_mode=False, + data_format="NCHW", + name=None, +): """ This API implements max pooling 2d operation. See more details in :ref:`api_nn_pooling_MaxPool2d` . @@ -1096,14 +1330,14 @@ def max_pool2d(x, if data_format not in ["NCHW", "NHWC"]: raise ValueError( "Attr(data_format) should be 'NCHW' or 'NHWC'. Received " - "Attr(data_format): %s." % str(data_format)) + "Attr(data_format): %s." % str(data_format) + ) channel_last = True if data_format == "NHWC" else False - padding, padding_algorithm = _update_padding_nd(padding, - num_dims=2, - channel_last=channel_last, - ceil_mode=ceil_mode) + padding, padding_algorithm = _update_padding_nd( + padding, num_dims=2, channel_last=channel_last, ceil_mode=ceil_mode + ) if data_format == "NHWC" and return_mask: raise ValueError( @@ -1112,69 +1346,122 @@ def max_pool2d(x, if in_dygraph_mode(): if return_mask: - output = _C_ops.max_pool2d_with_index(x, kernel_size, stride, - padding, False, False) + output = _C_ops.max_pool2d_with_index( + x, kernel_size, stride, padding, False, False + ) return output if return_mask else output[0] else: - return _C_ops.pool2d(x, kernel_size, stride, padding, ceil_mode, - True, data_format, 'max', False, False, - padding_algorithm, True) + return _C_ops.pool2d( + x, + kernel_size, + stride, + padding, + ceil_mode, + True, + data_format, + 'max', + False, + False, + padding_algorithm, + True, + ) if _in_legacy_dygraph(): if return_mask: output = _legacy_C_ops.max_pool2d_with_index( - x, 'ksize', kernel_size, 'global_pooling', False, 'strides', - stride, 'paddings', padding, 'padding_algorithm', - padding_algorithm, 'use_cudnn', True, 'ceil_mode', ceil_mode, - 'use_mkldnn', False, 'exclusive', True, 'data_format', - data_format) + x, + 'ksize', + kernel_size, + 'global_pooling', + False, + 'strides', + stride, + 'paddings', + padding, + 'padding_algorithm', + padding_algorithm, + 'use_cudnn', + True, + 'ceil_mode', + ceil_mode, + 'use_mkldnn', + False, + 'exclusive', + True, + 'data_format', + data_format, + ) return output if return_mask else output[0] else: output = _legacy_C_ops.pool2d( - x, 'pooling_type', 'max', 'ksize', kernel_size, - 'global_pooling', False, 'padding_algorithm', padding_algorithm, - 'strides', stride, 'paddings', padding, 'use_cudnn', True, - 'ceil_mode', ceil_mode, 'use_mkldnn', False, 'exclusive', True, - 'data_format', data_format) + x, + 'pooling_type', + 'max', + 'ksize', + kernel_size, + 'global_pooling', + False, + 'padding_algorithm', + padding_algorithm, + 'strides', + stride, + 'paddings', + padding, + 'use_cudnn', + True, + 'ceil_mode', + ceil_mode, + 'use_mkldnn', + False, + 'exclusive', + True, + 'data_format', + data_format, + ) return output op_type = 'max_pool2d_with_index' if return_mask else "pool2d" helper = LayerHelper(op_type, **locals()) - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'max_pool2d') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], 'max_pool2d' + ) dtype = helper.input_dtype(input_param_name='x') pool_out = helper.create_variable_for_type_inference(dtype) mask = helper.create_variable_for_type_inference("int32") outputs = {"Out": pool_out, "Mask": mask} - helper.append_op(type=op_type, - inputs={"X": x}, - outputs=outputs, - attrs={ - "pooling_type": 'max', - "ksize": kernel_size, - "global_pooling": False, - "strides": stride, - "paddings": padding, - "padding_algorithm": padding_algorithm, - "use_cudnn": True, - "ceil_mode": ceil_mode, - "use_mkldnn": False, - "exclusive": True, - "data_format": data_format, - }) + helper.append_op( + type=op_type, + inputs={"X": x}, + outputs=outputs, + attrs={ + "pooling_type": 'max', + "ksize": kernel_size, + "global_pooling": False, + "strides": stride, + "paddings": padding, + "padding_algorithm": padding_algorithm, + "use_cudnn": True, + "ceil_mode": ceil_mode, + "use_mkldnn": False, + "exclusive": True, + "data_format": data_format, + }, + ) return (pool_out, mask) if return_mask else pool_out -def max_pool3d(x, - kernel_size, - stride=None, - padding=0, - return_mask=False, - ceil_mode=False, - data_format="NCDHW", - name=None): +def max_pool3d( + x, + kernel_size, + stride=None, + padding=0, + return_mask=False, + ceil_mode=False, + data_format="NCDHW", + name=None, +): """ This API implements max pooling 2d operation. See more details in :ref:`api_nn_pooling_MaxPool3d` . @@ -1239,10 +1526,9 @@ def max_pool3d(x, channel_last = _channel_last(data_format, 3) - padding, padding_algorithm = _update_padding_nd(padding, - 3, - channel_last=channel_last, - ceil_mode=ceil_mode) + padding, padding_algorithm = _update_padding_nd( + padding, 3, channel_last=channel_last, ceil_mode=ceil_mode + ) if data_format == "NDHWC" and return_mask: raise ValueError( @@ -1251,30 +1537,80 @@ def max_pool3d(x, if in_dygraph_mode(): if return_mask: - output = _C_ops.max_pool3d_with_index(x, kernel_size, stride, - padding, False, False) + output = _C_ops.max_pool3d_with_index( + x, kernel_size, stride, padding, False, False + ) return output if return_mask else output[0] else: - return _C_ops.pool3d(x, kernel_size, stride, padding, ceil_mode, - True, data_format, 'max', False, False, - padding_algorithm, True) + return _C_ops.pool3d( + x, + kernel_size, + stride, + padding, + ceil_mode, + True, + data_format, + 'max', + False, + False, + padding_algorithm, + True, + ) if _in_legacy_dygraph(): if return_mask: output = _legacy_C_ops.max_pool3d_with_index( - x, 'pooling_type', 'max', 'ksize', kernel_size, 'strides', - stride, 'paddings', padding, 'global_pooling', False, - 'padding_algorithm', padding_algorithm, 'use_cudnn', True, - 'ceil_mode', ceil_mode, 'use_mkldnn', False, 'exclusive', True, - 'data_format', data_format) + x, + 'pooling_type', + 'max', + 'ksize', + kernel_size, + 'strides', + stride, + 'paddings', + padding, + 'global_pooling', + False, + 'padding_algorithm', + padding_algorithm, + 'use_cudnn', + True, + 'ceil_mode', + ceil_mode, + 'use_mkldnn', + False, + 'exclusive', + True, + 'data_format', + data_format, + ) return output if return_mask else output[0] else: output = _legacy_C_ops.pool3d( - x, 'pooling_type', 'max', 'ksize', kernel_size, - 'global_pooling', False, 'padding_algorithm', padding_algorithm, - 'strides', stride, 'paddings', padding, 'use_cudnn', True, - 'ceil_mode', ceil_mode, 'use_mkldnn', False, 'exclusive', True, - 'data_format', data_format) + x, + 'pooling_type', + 'max', + 'ksize', + kernel_size, + 'global_pooling', + False, + 'padding_algorithm', + padding_algorithm, + 'strides', + stride, + 'paddings', + padding, + 'use_cudnn', + True, + 'ceil_mode', + ceil_mode, + 'use_mkldnn', + False, + 'exclusive', + True, + 'data_format', + data_format, + ) return output op_type = "max_pool3d_with_index" if return_mask else "pool3d" @@ -1285,22 +1621,24 @@ def max_pool3d(x, mask = helper.create_variable_for_type_inference('int32') outputs = {"Out": pool_out, "Mask": mask} - helper.append_op(type=op_type, - inputs={"X": x}, - outputs=outputs, - attrs={ - "pooling_type": 'max', - "ksize": kernel_size, - "global_pooling": False, - "strides": stride, - "paddings": padding, - "padding_algorithm": padding_algorithm, - "use_cudnn": True, - "ceil_mode": ceil_mode, - "use_mkldnn": False, - "exclusive": False, - "data_format": data_format, - }) + helper.append_op( + type=op_type, + inputs={"X": x}, + outputs=outputs, + attrs={ + "pooling_type": 'max', + "ksize": kernel_size, + "global_pooling": False, + "strides": stride, + "paddings": padding, + "padding_algorithm": padding_algorithm, + "use_cudnn": True, + "ceil_mode": ceil_mode, + "use_mkldnn": False, + "exclusive": False, + "data_format": data_format, + }, + ) return (pool_out, mask) if return_mask else pool_out @@ -1344,21 +1682,34 @@ def adaptive_avg_pool1d(x, output_size, name=None): """ pool_type = 'avg' if not in_dynamic_mode(): - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'adaptive_pool2d') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], 'adaptive_pool2d' + ) check_type(output_size, 'pool_size', (int), 'adaptive_pool1d') _check_input(x, 3) pool_size = [1] + utils.convert_to_list(output_size, 1, 'pool_size') x = unsqueeze(x, [2]) if in_dygraph_mode(): - pool_out = _C_ops.pool2d(x, pool_size, [1, 1], [0, 0], False, True, - "NCHW", pool_type, False, True, "EXPLICIT", - False) + pool_out = _C_ops.pool2d( + x, + pool_size, + [1, 1], + [0, 0], + False, + True, + "NCHW", + pool_type, + False, + True, + "EXPLICIT", + False, + ) return squeeze(pool_out, [2]) if _in_legacy_dygraph(): - pool_out = _legacy_C_ops.pool2d(x, 'pooling_type', pool_type, 'ksize', - pool_size, 'adaptive', True) + pool_out = _legacy_C_ops.pool2d( + x, 'pooling_type', pool_type, 'ksize', pool_size, 'adaptive', True + ) return squeeze(pool_out, [2]) l_type = "pool2d" @@ -1368,14 +1719,16 @@ def adaptive_avg_pool1d(x, output_size, name=None): pool_out = helper.create_variable_for_type_inference(dtype) outputs = {"Out": pool_out} - helper.append_op(type=l_type, - inputs={"X": x}, - outputs=outputs, - attrs={ - "pooling_type": pool_type, - "ksize": pool_size, - "adaptive": True, - }) + helper.append_op( + type=l_type, + inputs={"X": x}, + outputs=outputs, + attrs={ + "pooling_type": pool_type, + "ksize": pool_size, + "adaptive": True, + }, + ) return squeeze(pool_out, [2]) @@ -1438,14 +1791,16 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None): # out.shape is [2, 3, 3, 3] """ if not in_dynamic_mode(): - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - 'adaptive_avg_pool2d') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], 'adaptive_avg_pool2d' + ) check_type(data_format, 'data_format', str, 'adaptive_avg_pool2d') if data_format not in ["NCHW", "NHWC"]: raise ValueError( "Attr(data_format) should be 'NCHW' or 'NHWC'. Received " - "Attr(data_format): %s." % str(data_format)) + "Attr(data_format): %s." % str(data_format) + ) if data_format == "NCHW": in_h, in_w = x.shape[2:4] @@ -1471,14 +1826,35 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None): output_size = utils._convert_to_tensor_list(output_size) if in_dygraph_mode(): - return _C_ops.pool2d(x, output_size, [1, 1], [0, 0], False, True, - data_format, 'avg', False, True, "EXPLICIT", False) + return _C_ops.pool2d( + x, + output_size, + [1, 1], + [0, 0], + False, + True, + data_format, + 'avg', + False, + True, + "EXPLICIT", + False, + ) if _in_legacy_dygraph(): - return _legacy_C_ops.pool2d(x, 'pooling_type', 'avg', 'ksize', - output_size, 'global_pooling', False, - 'adaptive', True, 'data_format', - data_format) + return _legacy_C_ops.pool2d( + x, + 'pooling_type', + 'avg', + 'ksize', + output_size, + 'global_pooling', + False, + 'adaptive', + True, + 'data_format', + data_format, + ) l_type = 'pool2d' @@ -1488,15 +1864,17 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None): outputs = {"Out": pool_out} - helper.append_op(type=l_type, - inputs={"X": x}, - outputs=outputs, - attrs={ - "pooling_type": "avg", - "ksize": output_size, - "adaptive": True, - "data_format": data_format, - }) + helper.append_op( + type=l_type, + inputs={"X": x}, + outputs=outputs, + attrs={ + "pooling_type": "avg", + "ksize": output_size, + "adaptive": True, + "data_format": data_format, + }, + ) return pool_out @@ -1562,14 +1940,16 @@ def adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None): # out.shape is [2, 3, 3, 3, 3] """ if not in_dynamic_mode(): - check_variable_and_dtype(x, 'x', ['float32', 'float64'], - 'adaptive_avg_pool3d') + check_variable_and_dtype( + x, 'x', ['float32', 'float64'], 'adaptive_avg_pool3d' + ) check_type(data_format, 'data_format', str, 'adaptive_avg_pool3d') if data_format not in ["NCDHW", "NDHWC"]: raise ValueError( "Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received " - "Attr(data_format): %s." % str(data_format)) + "Attr(data_format): %s." % str(data_format) + ) if data_format == "NCDHW": in_l, in_h, in_w = x.shape[2:5] @@ -1588,13 +1968,34 @@ def adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None): output_size[2] = in_w if in_dygraph_mode(): - return _C_ops.pool3d(x, output_size, [1, 1, 1], [0, 0, 0], False, True, - data_format, 'avg', False, True, "EXPLICIT", False) + return _C_ops.pool3d( + x, + output_size, + [1, 1, 1], + [0, 0, 0], + False, + True, + data_format, + 'avg', + False, + True, + "EXPLICIT", + False, + ) elif _in_legacy_dygraph(): - return _legacy_C_ops.pool3d(x, 'pooling_type', 'avg', 'ksize', - output_size, 'global_pooling', False, - 'adaptive', True, 'data_format', - data_format) + return _legacy_C_ops.pool3d( + x, + 'pooling_type', + 'avg', + 'ksize', + output_size, + 'global_pooling', + False, + 'adaptive', + True, + 'data_format', + data_format, + ) l_type = 'pool3d' @@ -1603,15 +2004,17 @@ def adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None): pool_out = helper.create_variable_for_type_inference(dtype) outputs = {"Out": pool_out} - helper.append_op(type=l_type, - inputs={"X": x}, - outputs=outputs, - attrs={ - "pooling_type": "avg", - "ksize": output_size, - "adaptive": True, - "data_format": data_format, - }) + helper.append_op( + type=l_type, + inputs={"X": x}, + outputs=outputs, + attrs={ + "pooling_type": "avg", + "ksize": output_size, + "adaptive": True, + "data_format": data_format, + }, + ) return pool_out @@ -1662,8 +2065,9 @@ def adaptive_max_pool1d(x, output_size, return_mask=False, name=None): """ pool_type = 'max' if not in_dynamic_mode(): - check_variable_and_dtype(x, 'x', ['float32', 'float64'], - 'adaptive_max_pool1d') + check_variable_and_dtype( + x, 'x', ['float32', 'float64'], 'adaptive_max_pool1d' + ) check_type(output_size, 'pool_size', int, 'adaptive_max_pool1d') check_type(return_mask, 'return_mask', bool, 'adaptive_max_pool1d') _check_input(x, 3) @@ -1672,17 +2076,23 @@ def adaptive_max_pool1d(x, output_size, return_mask=False, name=None): x = unsqueeze(x, [2]) if in_dygraph_mode(): - pool_out = _C_ops.max_pool2d_with_index(x, pool_size, [1, 1], [0, 0], - False, True) - return (squeeze(pool_out[0], [2]), squeeze( - pool_out[1], [2])) if return_mask else squeeze(pool_out[0], [2]) + pool_out = _C_ops.max_pool2d_with_index( + x, pool_size, [1, 1], [0, 0], False, True + ) + return ( + (squeeze(pool_out[0], [2]), squeeze(pool_out[1], [2])) + if return_mask + else squeeze(pool_out[0], [2]) + ) if _in_legacy_dygraph(): - pool_out = _legacy_C_ops.max_pool2d_with_index(x, 'pooling_type', - pool_type, 'ksize', - pool_size, 'adaptive', - True) - return (squeeze(pool_out[0], [2]), squeeze( - pool_out[1], [2])) if return_mask else squeeze(pool_out[0], [2]) + pool_out = _legacy_C_ops.max_pool2d_with_index( + x, 'pooling_type', pool_type, 'ksize', pool_size, 'adaptive', True + ) + return ( + (squeeze(pool_out[0], [2]), squeeze(pool_out[1], [2])) + if return_mask + else squeeze(pool_out[0], [2]) + ) l_type = 'max_pool2d_with_index' @@ -1693,64 +2103,70 @@ def adaptive_max_pool1d(x, output_size, return_mask=False, name=None): mask = helper.create_variable_for_type_inference('int32') outputs = {"Out": pool_out, "Mask": mask} - helper.append_op(type=l_type, - inputs={"X": x}, - outputs=outputs, - attrs={ - "pooling_type": pool_type, - "ksize": pool_size, - "adaptive": True, - }) + helper.append_op( + type=l_type, + inputs={"X": x}, + outputs=outputs, + attrs={ + "pooling_type": pool_type, + "ksize": pool_size, + "adaptive": True, + }, + ) - return (squeeze(pool_out, [2]), - squeeze(mask, [2])) if return_mask else squeeze(pool_out, [2]) + return ( + (squeeze(pool_out, [2]), squeeze(mask, [2])) + if return_mask + else squeeze(pool_out, [2]) + ) def adaptive_max_pool2d(x, output_size, return_mask=False, name=None): """ - This operation applies a 2D adaptive max pooling on input tensor. - See more details in :ref:`api_nn_pooling_AdaptiveMaxPool2d` . + This operation applies a 2D adaptive max pooling on input tensor. + See more details in :ref:`api_nn_pooling_AdaptiveMaxPool2d` . - Args: - x (Tensor): The input tensor of adaptive max pool2d operator, which is a 4-D tensor. The data type can be float16, float32, float64, int32 or int64. - output_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list, it must contain two elements, (H, W). H and W can be either a int, or None which means the size will be the same as that of the input. - return_mask (bool): If true, the index of max pooling point will be returned along with outputs. Default False. - name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. + Args: + x (Tensor): The input tensor of adaptive max pool2d operator, which is a 4-D tensor. The data type can be float16, float32, float64, int32 or int64. + output_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list, it must contain two elements, (H, W). H and W can be either a int, or None which means the size will be the same as that of the input. + return_mask (bool): If true, the index of max pooling point will be returned along with outputs. Default False. + name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. - Returns: - Tensor: The output tensor of adaptive max pool2d result. The data type is same as input tensor. + Returns: + Tensor: The output tensor of adaptive max pool2d result. The data type is same as input tensor. - Examples: - .. code-block:: python + Examples: + .. code-block:: python - # max adaptive pool2d - # suppose input data in the shape of [N, C, H, W], `output_size` is [m, n] - # output shape is [N, C, m, n], adaptive pool divide H and W dimensions - # of input data into m*n grids averagely and performs poolings in each - # grid to get output. - # adaptive max pool performs calculations as follow: - # - # for i in range(m): - # for j in range(n): - # hstart = floor(i * H / m) - # hend = ceil((i + 1) * H / m) - # wstart = floor(i * W / n) - # wend = ceil((i + 1) * W / n) - # output[:, :, i, j] = max(input[:, :, hstart: hend, wstart: wend]) - # - import paddle + # max adaptive pool2d + # suppose input data in the shape of [N, C, H, W], `output_size` is [m, n] + # output shape is [N, C, m, n], adaptive pool divide H and W dimensions + # of input data into m*n grids averagely and performs poolings in each + # grid to get output. + # adaptive max pool performs calculations as follow: + # + # for i in range(m): + # for j in range(n): + # hstart = floor(i * H / m) + # hend = ceil((i + 1) * H / m) + # wstart = floor(i * W / n) + # wend = ceil((i + 1) * W / n) + # output[:, :, i, j] = max(input[:, :, hstart: hend, wstart: wend]) + # + import paddle - input_data = paddle.randn(shape=(2, 3, 32, 32)) - out = paddle.nn.functional.adaptive_max_pool2d( - x = input_data, - output_size=[3, 3]) - # out.shape is [2, 3, 3, 3] + input_data = paddle.randn(shape=(2, 3, 32, 32)) + out = paddle.nn.functional.adaptive_max_pool2d( + x = input_data, + output_size=[3, 3]) + # out.shape is [2, 3, 3, 3] """ if not in_dynamic_mode(): - check_variable_and_dtype(x, 'x', ['float32', 'float64'], - 'adaptive_max_pool2d') + check_variable_and_dtype( + x, 'x', ['float32', 'float64'], 'adaptive_max_pool2d' + ) check_type(return_mask, 'return_mask', bool, 'adaptive_max_pool2d') - #check_type(output_size, 'pool_size', (int), 'adaptive_max_pool2d') + # check_type(output_size, 'pool_size', (int), 'adaptive_max_pool2d') _check_input(x, 4) in_h, in_w = x.shape[2:4] @@ -1763,13 +2179,14 @@ def adaptive_max_pool2d(x, output_size, return_mask=False, name=None): if output_size[1] == None: output_size[1] = in_w if in_dygraph_mode(): - pool_out = _C_ops.max_pool2d_with_index(x, output_size, [1, 1], [0, 0], - False, True) + pool_out = _C_ops.max_pool2d_with_index( + x, output_size, [1, 1], [0, 0], False, True + ) return pool_out if return_mask else pool_out[0] if _in_legacy_dygraph(): - pool_out = _legacy_C_ops.max_pool2d_with_index(x, 'pooling_type', 'max', - 'ksize', output_size, - 'adaptive', True) + pool_out = _legacy_C_ops.max_pool2d_with_index( + x, 'pooling_type', 'max', 'ksize', output_size, 'adaptive', True + ) return pool_out if return_mask else pool_out[0] l_type = 'max_pool2d_with_index' @@ -1781,67 +2198,70 @@ def adaptive_max_pool2d(x, output_size, return_mask=False, name=None): mask = helper.create_variable_for_type_inference('int32') outputs = {"Out": pool_out, "Mask": mask} - helper.append_op(type=l_type, - inputs={"X": x}, - outputs=outputs, - attrs={ - "pooling_type": 'max', - "ksize": output_size, - "adaptive": True, - }) - #return (pool_out, mask) if return_mask else pool_out + helper.append_op( + type=l_type, + inputs={"X": x}, + outputs=outputs, + attrs={ + "pooling_type": 'max', + "ksize": output_size, + "adaptive": True, + }, + ) + # return (pool_out, mask) if return_mask else pool_out return pool_out def adaptive_max_pool3d(x, output_size, return_mask=False, name=None): """ - This operation applies a 3D adaptive max pooling on input tensor. - See more details in :ref:`api_nn_pooling_AdaptiveMaxPool3d` . + This operation applies a 3D adaptive max pooling on input tensor. + See more details in :ref:`api_nn_pooling_AdaptiveMaxPool3d` . - Args: - x (Tensor): The input tensor of adaptive max pool3d operator, which is a 5-D tensor. The data type can be float32, float64. - output_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list, it must contain three elements, (D, H, W). D, H and W can be either a int, or None which means the size will be the same as that of the input. - return_mask (bool): If true, the index of max pooling point will be returned along with outputs. Default False. - name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. + Args: + x (Tensor): The input tensor of adaptive max pool3d operator, which is a 5-D tensor. The data type can be float32, float64. + output_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list, it must contain three elements, (D, H, W). D, H and W can be either a int, or None which means the size will be the same as that of the input. + return_mask (bool): If true, the index of max pooling point will be returned along with outputs. Default False. + name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. - Returns: - Tensor: The output tensor of adaptive max pool3d result. The data type is same as input tensor. + Returns: + Tensor: The output tensor of adaptive max pool3d result. The data type is same as input tensor. - Examples: - .. code-block:: python + Examples: + .. code-block:: python - # adaptive max pool3d - # suppose input data in the shape of [N, C, D, H, W], `output_size` is [l, m, n] - # output shape is [N, C, l, m, n], adaptive pool divide D, H and W dimensions - # of input data into m*n grids averagely and performs poolings in each - # grid to get output. - # adaptive max pool performs calculations as follow: - # - # for i in range(l): - # for j in range(m): - # for k in range(n): - # dstart = floor(i * D / l) - # dend = ceil((i + 1) * D / l) - # hstart = floor(i * H / m) - # hend = ceil((i + 1) * H / m) - # wstart = floor(i * W / n) - # wend = ceil((i + 1) * W / n) - # output[:, :, i, j, k] = max(input[:, :, dstart: dend, hstart: hend, wstart: wend]) - # - import paddle + # adaptive max pool3d + # suppose input data in the shape of [N, C, D, H, W], `output_size` is [l, m, n] + # output shape is [N, C, l, m, n], adaptive pool divide D, H and W dimensions + # of input data into m*n grids averagely and performs poolings in each + # grid to get output. + # adaptive max pool performs calculations as follow: + # + # for i in range(l): + # for j in range(m): + # for k in range(n): + # dstart = floor(i * D / l) + # dend = ceil((i + 1) * D / l) + # hstart = floor(i * H / m) + # hend = ceil((i + 1) * H / m) + # wstart = floor(i * W / n) + # wend = ceil((i + 1) * W / n) + # output[:, :, i, j, k] = max(input[:, :, dstart: dend, hstart: hend, wstart: wend]) + # + import paddle - input_data = paddle.randn(shape=(2, 3, 8, 32, 32)) - out = paddle.nn.functional.adaptive_max_pool3d( - x = input_data, - output_size=[3, 3, 3]) - # out.shape is [2, 3, 3, 3, 3] + input_data = paddle.randn(shape=(2, 3, 8, 32, 32)) + out = paddle.nn.functional.adaptive_max_pool3d( + x = input_data, + output_size=[3, 3, 3]) + # out.shape is [2, 3, 3, 3, 3] """ if not in_dynamic_mode(): - check_variable_and_dtype(x, 'x', ['float32', 'float64'], - 'adaptive_max_pool3d') + check_variable_and_dtype( + x, 'x', ['float32', 'float64'], 'adaptive_max_pool3d' + ) check_type(return_mask, 'return_mask', bool, 'adaptive_max_pool3d') - #check_type(output_size, 'pool_size', (int), 'adaptive_max_pool3d') + # check_type(output_size, 'pool_size', (int), 'adaptive_max_pool3d') _check_input(x, 5) in_l, in_h, in_w = x.shape[2:5] @@ -1859,12 +2279,13 @@ def adaptive_max_pool3d(x, output_size, return_mask=False, name=None): if in_dynamic_mode(): if in_dygraph_mode(): # By default, strides is [1,1,1] and paddings is [0, 0, 0] - pool_out = _C_ops.max_pool3d_with_index(x, output_size, [1, 1, 1], - [0, 0, 0], False, True) + pool_out = _C_ops.max_pool3d_with_index( + x, output_size, [1, 1, 1], [0, 0, 0], False, True + ) elif _in_legacy_dygraph(): pool_out = _legacy_C_ops.max_pool3d_with_index( - x, 'pooling_type', 'max', 'ksize', output_size, 'adaptive', - True) + x, 'pooling_type', 'max', 'ksize', output_size, 'adaptive', True + ) return pool_out if return_mask else pool_out[0] l_type = 'max_pool3d_with_index' @@ -1876,13 +2297,15 @@ def adaptive_max_pool3d(x, output_size, return_mask=False, name=None): mask = helper.create_variable_for_type_inference('int32') outputs = {"Out": pool_out, "Mask": mask} - helper.append_op(type=l_type, - inputs={"X": x}, - outputs=outputs, - attrs={ - "pooling_type": 'max', - "ksize": output_size, - "adaptive": True, - }) + helper.append_op( + type=l_type, + inputs={"X": x}, + outputs=outputs, + attrs={ + "pooling_type": 'max', + "ksize": output_size, + "adaptive": True, + }, + ) return (pool_out, mask) if return_mask else pool_out diff --git a/python/paddle/nn/functional/sparse_attention.py b/python/paddle/nn/functional/sparse_attention.py index 87df72fe0ef2f3edd080cda9e8f89ae96c5c6d48..0dfb545aae85b4f7e1eb4570d35c221e6973dd42 100644 --- a/python/paddle/nn/functional/sparse_attention.py +++ b/python/paddle/nn/functional/sparse_attention.py @@ -17,14 +17,16 @@ from paddle import _legacy_C_ops from paddle import in_dynamic_mode -def sparse_attention(query, - key, - value, - sparse_csr_offset, - sparse_csr_columns, - key_padding_mask=None, - attn_mask=None, - name=None): +def sparse_attention( + query, + key, + value, + sparse_csr_offset, + sparse_csr_columns, + key_padding_mask=None, + attn_mask=None, + name=None, +): r""" This operator sparsify the Attention matrix in Transformer module to achieve the effect of reducing memory consumption and computation. @@ -141,9 +143,19 @@ def sparse_attention(query, # [1.99830270, 2.99830270]]]] """ if in_dynamic_mode(): - result_attention, result_sdd, result_softmax = _legacy_C_ops.sparse_attention( - query, key, value, sparse_csr_offset, sparse_csr_columns, - key_padding_mask, attn_mask) + ( + result_attention, + result_sdd, + result_softmax, + ) = _legacy_C_ops.sparse_attention( + query, + key, + value, + sparse_csr_offset, + sparse_csr_columns, + key_padding_mask, + attn_mask, + ) return result_attention helper = LayerHelper('sparse_attention', **locals()) @@ -163,7 +175,7 @@ def sparse_attention(query, outputs = { 'Out': out, 'SparseDotSdd': result_sdd, - 'Softmax': result_softmax + 'Softmax': result_softmax, } helper.append_op(type='sparse_attention', inputs=inputs, outputs=outputs) return out diff --git a/python/paddle/nn/functional/vision.py b/python/paddle/nn/functional/vision.py index af1656d641740b223f82a3e0ed393133adcdffe8..bc77dcefa45f931ccd497581201a3ffcdcb6e25b 100644 --- a/python/paddle/nn/functional/vision.py +++ b/python/paddle/nn/functional/vision.py @@ -82,45 +82,65 @@ def affine_grid(theta, out_shape, align_corners=True, name=None): if theta.shape[1] == 3: use_cudnn = False if is_compiled_with_rocm(): - use_cudnn = False # ROCM platform do not have MIOPEN kernel for affine_grid + use_cudnn = ( + False # ROCM platform do not have MIOPEN kernel for affine_grid + ) if in_dygraph_mode(): - _out_shape = out_shape.numpy().tolist() if isinstance( - out_shape, Variable) else out_shape + _out_shape = ( + out_shape.numpy().tolist() + if isinstance(out_shape, Variable) + else out_shape + ) return _C_ops.affine_grid(theta, _out_shape, use_cudnn, align_corners) elif in_dynamic_mode(): - _out_shape = out_shape.numpy().tolist() if isinstance( - out_shape, Variable) else out_shape - return _legacy_C_ops.affine_grid(theta, "output_shape", _out_shape, - "align_corners", align_corners, - "use_cudnn", use_cudnn) + _out_shape = ( + out_shape.numpy().tolist() + if isinstance(out_shape, Variable) + else out_shape + ) + return _legacy_C_ops.affine_grid( + theta, + "output_shape", + _out_shape, + "align_corners", + align_corners, + "use_cudnn", + use_cudnn, + ) helper = LayerHelper('affine_grid') - check_variable_and_dtype(theta, 'theta', ['float32', 'float64'], - 'affine_grid') + check_variable_and_dtype( + theta, 'theta', ['float32', 'float64'], 'affine_grid' + ) out = helper.create_variable_for_type_inference(theta.dtype) ipts = {'Theta': theta} attrs = {"align_corners": align_corners, "use_cudnn": use_cudnn} if isinstance(out_shape, Variable): ipts['OutputShape'] = out_shape - check_variable_and_dtype(out_shape, 'out_shape', ['int32'], - 'affine_grid') + check_variable_and_dtype( + out_shape, 'out_shape', ['int32'], 'affine_grid' + ) else: attrs['output_shape'] = out_shape - helper.append_op(type='affine_grid', - inputs=ipts, - outputs={'Output': out}, - attrs=None if len(attrs) == 0 else attrs) + helper.append_op( + type='affine_grid', + inputs=ipts, + outputs={'Output': out}, + attrs=None if len(attrs) == 0 else attrs, + ) return out -def grid_sample(x, - grid, - mode='bilinear', - padding_mode='zeros', - align_corners=True, - name=None): +def grid_sample( + x, + grid, + mode='bilinear', + padding_mode='zeros', + align_corners=True, + name=None, +): """ Sample input X by using bilinear interpolation or nearest interpolation based on flow field grid, which is usually @@ -253,22 +273,33 @@ def grid_sample(x, _padding_modes = ['zeros', 'reflection', 'border'] if mode not in _modes: raise ValueError( - "The mode of grid sample function should be in {}, but got: {}". - format(_modes, mode)) + "The mode of grid sample function should be in {}, but got: {}".format( + _modes, mode + ) + ) if padding_mode not in _padding_modes: raise ValueError( - "The padding mode of grid sample function should be in {}, but got: {}" - .format(_padding_modes, padding_mode)) + "The padding mode of grid sample function should be in {}, but got: {}".format( + _padding_modes, padding_mode + ) + ) if not isinstance(align_corners, bool): - raise ValueError("The align corners should be bool, but got: {}".format( - align_corners)) + raise ValueError( + "The align corners should be bool, but got: {}".format( + align_corners + ) + ) cudnn_version = get_cudnn_version() use_cudnn = False - if not is_compiled_with_rocm() and ( - cudnn_version is not None - ) and align_corners and mode == 'bilinear' and padding_mode == 'zeros': + if ( + not is_compiled_with_rocm() + and (cudnn_version is not None) + and align_corners + and mode == 'bilinear' + and padding_mode == 'zeros' + ): use_cudnn = True # CUDNN always computes gradients for all inputs x.stop_gradient = False @@ -280,26 +311,37 @@ def grid_sample(x, if in_dygraph_mode(): return _C_ops.grid_sample(x, grid, mode, padding_mode, align_corners) elif in_dynamic_mode(): - attrs = ('mode', mode, 'padding_mode', padding_mode, 'align_corners', - align_corners, 'use_cudnn', use_cudnn) + attrs = ( + 'mode', + mode, + 'padding_mode', + padding_mode, + 'align_corners', + align_corners, + 'use_cudnn', + use_cudnn, + ) out = getattr(_legacy_C_ops, 'grid_sampler')(x, grid, *attrs) else: helper = LayerHelper("grid_sample", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'grid_sample') - check_variable_and_dtype(grid, 'grid', ['float32', 'float64'], - 'grid_sample') + check_variable_and_dtype( + grid, 'grid', ['float32', 'float64'], 'grid_sample' + ) ipts = {'X': x, 'Grid': grid} attrs = { 'mode': mode, 'padding_mode': padding_mode, 'align_corners': align_corners, - 'use_cudnn': use_cudnn + 'use_cudnn': use_cudnn, } out = helper.create_variable_for_type_inference(x.dtype) - helper.append_op(type='grid_sampler', - inputs=ipts, - attrs=attrs, - outputs={'Output': out}) + helper.append_op( + type='grid_sampler', + inputs=ipts, + attrs=attrs, + outputs={'Output': out}, + ) return out @@ -336,24 +378,25 @@ def pixel_shuffle(x, upscale_factor, data_format="NCHW", name=None): if data_format not in ["NCHW", "NHWC"]: raise ValueError( "Attr(data_format) should be 'NCHW' or 'NHWC'." - "But recevie Attr(data_format): {} ".format(data_format)) + "But recevie Attr(data_format): {} ".format(data_format) + ) if in_dygraph_mode(): return _C_ops.pixel_shuffle(x, upscale_factor, data_format) if _in_legacy_dygraph(): - return _legacy_C_ops.pixel_shuffle(x, "upscale_factor", upscale_factor, - "data_format", data_format) + return _legacy_C_ops.pixel_shuffle( + x, "upscale_factor", upscale_factor, "data_format", data_format + ) helper = LayerHelper("pixel_shuffle", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pixel_shuffle') out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type="pixel_shuffle", - inputs={"X": x}, - outputs={"Out": out}, - attrs={ - "upscale_factor": upscale_factor, - "data_format": data_format - }) + helper.append_op( + type="pixel_shuffle", + inputs={"X": x}, + outputs={"Out": out}, + attrs={"upscale_factor": upscale_factor, "data_format": data_format}, + ) return out @@ -383,8 +426,10 @@ def pixel_unshuffle(x, downscale_factor, data_format="NCHW", name=None): """ if len(x.shape) != 4: raise ValueError( - "Input x should be 4D tensor, but received x with the shape of {}". - format(x.shape)) + "Input x should be 4D tensor, but received x with the shape of {}".format( + x.shape + ) + ) if not isinstance(downscale_factor, int): raise TypeError("Downscale factor must be int type") @@ -395,23 +440,26 @@ def pixel_unshuffle(x, downscale_factor, data_format="NCHW", name=None): if data_format not in ["NCHW", "NHWC"]: raise ValueError( "Attr(data_format) should be 'NCHW' or 'NHWC'." - "But recevie Attr(data_format): {} ".format(data_format)) + "But recevie Attr(data_format): {} ".format(data_format) + ) if _non_static_mode(): - return _legacy_C_ops.pixel_unshuffle(x, "downscale_factor", - downscale_factor, "data_format", - data_format) + return _legacy_C_ops.pixel_unshuffle( + x, "downscale_factor", downscale_factor, "data_format", data_format + ) helper = LayerHelper("pixel_unshuffle", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pixel_unshuffle') out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type="pixel_unshuffle", - inputs={"X": x}, - outputs={"Out": out}, - attrs={ - "downscale_factor": downscale_factor, - "data_format": data_format - }) + helper.append_op( + type="pixel_unshuffle", + inputs={"X": x}, + outputs={"Out": out}, + attrs={ + "downscale_factor": downscale_factor, + "data_format": data_format, + }, + ) return out @@ -452,8 +500,10 @@ def channel_shuffle(x, groups, data_format="NCHW", name=None): """ if len(x.shape) != 4: raise ValueError( - "Input x should be 4D tensor, but received x with the shape of {}". - format(x.shape)) + "Input x should be 4D tensor, but received x with the shape of {}".format( + x.shape + ) + ) if not isinstance(groups, int): raise TypeError("groups must be int type") @@ -464,20 +514,21 @@ def channel_shuffle(x, groups, data_format="NCHW", name=None): if data_format not in ["NCHW", "NHWC"]: raise ValueError( "Attr(data_format) should be 'NCHW' or 'NHWC'." - "But recevie Attr(data_format): {} ".format(data_format)) + "But recevie Attr(data_format): {} ".format(data_format) + ) if _non_static_mode(): - return _legacy_C_ops.channel_shuffle(x, "groups", groups, "data_format", - data_format) + return _legacy_C_ops.channel_shuffle( + x, "groups", groups, "data_format", data_format + ) helper = LayerHelper("channel_shuffle", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'channel_shuffle') out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type="channel_shuffle", - inputs={"X": x}, - outputs={"Out": out}, - attrs={ - "groups": groups, - "data_format": data_format - }) + helper.append_op( + type="channel_shuffle", + inputs={"X": x}, + outputs={"Out": out}, + attrs={"groups": groups, "data_format": data_format}, + ) return out diff --git a/python/paddle/nn/initializer/__init__.py b/python/paddle/nn/initializer/__init__.py index 530c52bf5f26d269046ea734fd14544cdeb8e29f..e078e19ed2b4db8b40c629bafab89451e61433b5 100644 --- a/python/paddle/nn/initializer/__init__.py +++ b/python/paddle/nn/initializer/__init__.py @@ -36,8 +36,19 @@ from .orthogonal import Orthogonal # noqa: F401 from .dirac import Dirac # noqa: F401 -__all__ = [ #noqa - 'Bilinear', 'Constant', 'KaimingUniform', 'KaimingNormal', 'XavierNormal', - 'XavierUniform', 'Assign', 'Normal', 'TruncatedNormal', 'Uniform', - 'Orthogonal', 'Dirac', 'set_global_initializer', 'calculate_gain' +__all__ = [ # noqa + 'Bilinear', + 'Constant', + 'KaimingUniform', + 'KaimingNormal', + 'XavierNormal', + 'XavierUniform', + 'Assign', + 'Normal', + 'TruncatedNormal', + 'Uniform', + 'Orthogonal', + 'Dirac', + 'set_global_initializer', + 'calculate_gain', ] diff --git a/python/paddle/nn/initializer/assign.py b/python/paddle/nn/initializer/assign.py index 2cdd5fdf1aa3dbee95bca419c28061f7647aa5fa..22560890fe90b8019ab365131a10b1b03d24b28b 100644 --- a/python/paddle/nn/initializer/assign.py +++ b/python/paddle/nn/initializer/assign.py @@ -83,15 +83,19 @@ class Assign(NumpyArrayInitializer): def __init__(self, value, name=None): import numpy - check_type(value, 'value', - (numpy.ndarray, list, tuple, paddle.static.Variable), - 'Assign') - if (isinstance(value, (list, tuple))): + check_type( + value, + 'value', + (numpy.ndarray, list, tuple, paddle.static.Variable), + 'Assign', + ) + + if isinstance(value, (list, tuple)): value = numpy.array(value) # TODO: value is already is a tensor, accounting efficiency maybe it does not need to convert tensor to numpy data and then initialized. - if (isinstance(value, paddle.static.Variable)): + if isinstance(value, paddle.static.Variable): value = value.numpy() super(Assign, self).__init__(value) diff --git a/python/paddle/nn/initializer/dirac.py b/python/paddle/nn/initializer/dirac.py index 74fc285c6bbf3d46d9017503de6039cf1d67742c..f95cd2d8274d91cfcd87039efb399a1ac0f21862 100644 --- a/python/paddle/nn/initializer/dirac.py +++ b/python/paddle/nn/initializer/dirac.py @@ -90,7 +90,8 @@ class Dirac(Initializer): def __init__(self, groups=1, name=None): assert groups > 0 and isinstance( - groups, int), " 'groups' must be a positive integer. " + groups, int + ), " 'groups' must be a positive integer. " super(Dirac, self).__init__() self._groups = groups @@ -108,43 +109,49 @@ class Dirac(Initializer): block = self._check_block(block) assert isinstance(var, framework.Parameter) assert isinstance(block, framework.Block) - check_variable_and_dtype(var, "Out", - ['float16', 'bfloat16', 'float32', 'float64'], - 'Dirac') + check_variable_and_dtype( + var, "Out", ['float16', 'bfloat16', 'float32', 'float64'], 'Dirac' + ) assert len(var.shape) in [ - 3, 4, 5 + 3, + 4, + 5, ], "Only Tensor with 3/4/5 dimensions can be initialized by Dirac" assert ( - var.shape[0] % - self._groups) == 0, "Tensor 0-dimension must be divisible by groups" + var.shape[0] % self._groups + ) == 0, "Tensor 0-dimension must be divisible by groups" if var.dtype != VarDesc.VarType.FP32: - out_var = block.create_var(name=unique_name.generate(".".join( - ['dirac', var.name, 'tmp'])), - shape=var.shape, - dtype=VarDesc.VarType.FP32, - type=VarDesc.VarType.LOD_TENSOR, - persistable=False) + out_var = block.create_var( + name=unique_name.generate(".".join(['dirac', var.name, 'tmp'])), + shape=var.shape, + dtype=VarDesc.VarType.FP32, + type=VarDesc.VarType.LOD_TENSOR, + persistable=False, + ) else: out_var = var op = None if framework.in_dygraph_mode(): with fluid.dygraph.no_grad(): place = _current_expected_place() - _C_ops.full_(out_var, out_var.shape, str(float(0)), - out_var.dtype, place) + _C_ops.full_( + out_var, out_var.shape, str(float(0)), out_var.dtype, place + ) else: - block.append_op(type='fill_constant', - inputs={}, - outputs={'Out': out_var}, - attrs={ - 'value': float(0), - 'dtype': out_var.dtype, - 'shape': out_var.shape, - }, - stop_gradient=True) + block.append_op( + type='fill_constant', + inputs={}, + outputs={'Out': out_var}, + attrs={ + 'value': float(0), + 'dtype': out_var.dtype, + 'shape': out_var.shape, + }, + stop_gradient=True, + ) origin_shape = var.shape num_per_group = origin_shape[0] // self._groups @@ -162,9 +169,9 @@ class Dirac(Initializer): value_list.append(1.0) offset = 0 for (k, stride) in enumerate(strides): - if (k == 0): + if k == 0: offset += (j + i * num_per_group) * stride - elif (k == 1): + elif k == 1: offset += j * stride else: offset += origin_shape[k] // 2 * stride @@ -174,71 +181,86 @@ class Dirac(Initializer): tmp_out = _C_ops.reshape(out_var, [-1]) tmp_out._share_underline_tensor_to(out_var) else: - x_shape = block.create_var(name=unique_name.generate(".".join( - [out_var.name, "XShape"])), - dtype=out_var.dtype, - shape=out_var.shape, - type=VarDesc.VarType.LOD_TENSOR, - persistable=False, - stop_gradient=True) - block.append_op(type="reshape2", - inputs={"X": out_var}, - attrs={'shape': [-1]}, - outputs={ - "Out": out_var, - "XShape": x_shape - }, - stop_gradient=True) + x_shape = block.create_var( + name=unique_name.generate(".".join([out_var.name, "XShape"])), + dtype=out_var.dtype, + shape=out_var.shape, + type=VarDesc.VarType.LOD_TENSOR, + persistable=False, + stop_gradient=True, + ) + block.append_op( + type="reshape2", + inputs={"X": out_var}, + attrs={'shape': [-1]}, + outputs={"Out": out_var, "XShape": x_shape}, + stop_gradient=True, + ) index_tensor = block.create_var( name=unique_name.generate('scatter_index'), persistable=False, - stop_gradient=True) + stop_gradient=True, + ) if framework.in_dygraph_mode(): with fluid.dygraph.no_grad(): tmp_tensor = framework._varbase_creator() - _C_ops.assign_value_(tmp_tensor, [len(idx_list)], - VarDesc.VarType.INT64, idx_list, - _current_expected_place()) + _C_ops.assign_value_( + tmp_tensor, + [len(idx_list)], + VarDesc.VarType.INT64, + idx_list, + _current_expected_place(), + ) tmp_tensor._share_underline_tensor_to(index_tensor) else: - block.append_op(type='assign_value', - outputs={'Out': index_tensor}, - attrs={ - 'dtype': VarDesc.VarType.INT64, - 'shape': [len(idx_list)], - 'int64_values': idx_list - }, - stop_gradient=True) + block.append_op( + type='assign_value', + outputs={'Out': index_tensor}, + attrs={ + 'dtype': VarDesc.VarType.INT64, + 'shape': [len(idx_list)], + 'int64_values': idx_list, + }, + stop_gradient=True, + ) value_tensor = block.create_var( name=unique_name.generate('scatter_value'), persistable=False, - stop_gradient=True) + stop_gradient=True, + ) if framework.in_dygraph_mode(): with fluid.dygraph.no_grad(): tmp_tensor = framework._varbase_creator() - _C_ops.assign_value_(tmp_tensor, [len(value_list)], - VarDesc.VarType.FP32, value_list, - _current_expected_place()) + _C_ops.assign_value_( + tmp_tensor, + [len(value_list)], + VarDesc.VarType.FP32, + value_list, + _current_expected_place(), + ) tmp_tensor._share_underline_tensor_to(value_tensor) else: - block.append_op(type='assign_value', - outputs={'Out': value_tensor}, - attrs={ - 'dtype': VarDesc.VarType.FP32, - 'shape': [len(value_list)], - 'fp32_values': value_list - }, - stop_gradient=True) + block.append_op( + type='assign_value', + outputs={'Out': value_tensor}, + attrs={ + 'dtype': VarDesc.VarType.FP32, + 'shape': [len(value_list)], + 'fp32_values': value_list, + }, + stop_gradient=True, + ) if framework.in_dygraph_mode(): with fluid.dygraph.no_grad(): - tmp_out = _C_ops.scatter(out_var, index_tensor, value_tensor, - True) + tmp_out = _C_ops.scatter( + out_var, index_tensor, value_tensor, True + ) tmp_out._share_underline_tensor_to(out_var) tmp_reshape_out = _C_ops.reshape(out_var, origin_shape) tmp_reshape_out._share_underline_tensor_to(out_var) @@ -246,39 +268,40 @@ class Dirac(Initializer): tmp_cast_out = _C_ops.cast(out_var, var.dtype) tmp_cast_out._share_underline_tensor_to(var) else: - op = block.append_op(type="scatter", - inputs={ - "X": out_var, - "Ids": index_tensor, - "Updates": value_tensor - }, - attrs={'overwrite': True}, - outputs={"Out": out_var}, - stop_gradient=True) - x_shape = block.create_var(name=unique_name.generate(".".join( - [out_var.name, "XShape"])), - dtype=out_var.dtype, - shape=out_var.shape, - type=VarDesc.VarType.LOD_TENSOR, - persistable=False, - stop_gradient=True) - block.append_op(type="reshape2", - inputs={"X": out_var}, - attrs={'shape': origin_shape}, - outputs={ - "Out": out_var, - "XShape": x_shape - }, - stop_gradient=True) + op = block.append_op( + type="scatter", + inputs={ + "X": out_var, + "Ids": index_tensor, + "Updates": value_tensor, + }, + attrs={'overwrite': True}, + outputs={"Out": out_var}, + stop_gradient=True, + ) + x_shape = block.create_var( + name=unique_name.generate(".".join([out_var.name, "XShape"])), + dtype=out_var.dtype, + shape=out_var.shape, + type=VarDesc.VarType.LOD_TENSOR, + persistable=False, + stop_gradient=True, + ) + block.append_op( + type="reshape2", + inputs={"X": out_var}, + attrs={'shape': origin_shape}, + outputs={"Out": out_var, "XShape": x_shape}, + stop_gradient=True, + ) if var.dtype != VarDesc.VarType.FP32: - block.append_op(type="cast", - inputs={"X": out_var}, - outputs={"Out": var}, - attrs={ - "in_dtype": out_var.dtype, - "out_dtype": var.dtype - }, - stop_gradient=True) + block.append_op( + type="cast", + inputs={"X": out_var}, + outputs={"Out": var}, + attrs={"in_dtype": out_var.dtype, "out_dtype": var.dtype}, + stop_gradient=True, + ) if not in_dynamic_mode(): var.op = op return op diff --git a/python/paddle/nn/initializer/kaiming.py b/python/paddle/nn/initializer/kaiming.py index ab4c90343fec5eb1f5c08c6c97e5bc1de9f987f1..8449ce925fb369c409d1e045cc5455958647c81d 100644 --- a/python/paddle/nn/initializer/kaiming.py +++ b/python/paddle/nn/initializer/kaiming.py @@ -58,11 +58,13 @@ class KaimingNormal(MSRAInitializer): """ def __init__(self, fan_in=None, negative_slope=0.0, nonlinearity='relu'): - super(KaimingNormal, self).__init__(uniform=False, - fan_in=fan_in, - seed=0, - negative_slope=negative_slope, - nonlinearity=nonlinearity) + super(KaimingNormal, self).__init__( + uniform=False, + fan_in=fan_in, + seed=0, + negative_slope=negative_slope, + nonlinearity=nonlinearity, + ) class KaimingUniform(MSRAInitializer): @@ -104,8 +106,10 @@ class KaimingUniform(MSRAInitializer): """ def __init__(self, fan_in=None, negative_slope=0.0, nonlinearity='relu'): - super(KaimingUniform, self).__init__(uniform=True, - fan_in=fan_in, - seed=0, - negative_slope=negative_slope, - nonlinearity=nonlinearity) + super(KaimingUniform, self).__init__( + uniform=True, + fan_in=fan_in, + seed=0, + negative_slope=negative_slope, + nonlinearity=nonlinearity, + ) diff --git a/python/paddle/nn/initializer/orthogonal.py b/python/paddle/nn/initializer/orthogonal.py index 65871ebad0b503613c0963bfeb0ae8762ead583d..84fabba483deb1ca1af3d2ca2fb49d851a00baf8 100644 --- a/python/paddle/nn/initializer/orthogonal.py +++ b/python/paddle/nn/initializer/orthogonal.py @@ -85,15 +85,16 @@ class Orthogonal(Initializer): assert isinstance(var, framework.Parameter) assert isinstance(block, framework.Block) # 'qr' op only support float32/float64 now - check_variable_and_dtype(var, "Out", ["float32", "float64"], - "Orthogonal") + check_variable_and_dtype( + var, "Out", ["float32", "float64"], "Orthogonal" + ) self._seed = block.program.random_seed shape = var.shape - assert len( - shape - ) >= 2, "Only Tensor with 2 or more dimensions can be initialized by Orthogonal" + assert ( + len(shape) >= 2 + ), "Only Tensor with 2 or more dimensions can be initialized by Orthogonal" row = shape[0] col = 1 @@ -105,9 +106,9 @@ class Orthogonal(Initializer): if framework.in_dygraph_mode(): with no_grad(): place = framework._current_expected_place() - normal_var = _C_ops.gaussian_random(flatten_shape, 0.0, 1.0, - self._seed, var.dtype, - place) + normal_var = _C_ops.gaussian_random( + flatten_shape, 0.0, 1.0, self._seed, var.dtype, place + ) q, r = _C_ops.qr(normal_var, 'reduced') r_diag = _C_ops.diag(r, 0, 0) @@ -127,107 +128,114 @@ class Orthogonal(Initializer): return None - normal_var = block.create_var(name=unique_name.generate('.'.join( - ['gaussian_random', 'tmp'])), - dtype=var.dtype, - persistable=False, - stop_gradient=True) - block.append_op(type='gaussian_random', - inputs={}, - outputs={'Out': normal_var}, - attrs={ - 'mean': 0.0, - 'std': 1.0, - 'shape': flatten_shape, - 'seed': self._seed, - 'dtype': var.dtype - }, - stop_gradient=True) - - q = block.create_var(name=unique_name.generate('.'.join( - ['qr', 'q', 'tmp'])), - dtype=normal_var.dtype, - persistable=False, - stop_gradient=True) - r = block.create_var(name=unique_name.generate('.'.join( - ['qr', 'r', 'tmp'])), - dtype=normal_var.dtype, - persistable=False, - stop_gradient=True) - block.append_op(type='qr', - inputs={'X': [normal_var]}, - outputs={ - 'Q': q, - 'R': r, - }, - attrs={'mode': 'reduced'}, - stop_gradient=True) - - r_diag = block.create_var(name=unique_name.generate('.'.join( - ['diag', 'tmp'])), - dtype=r.dtype, - persistable=False, - stop_gradient=True) - block.append_op(type='diag_v2', - inputs={'X': r}, - outputs={'Out': r_diag}, - attrs={ - 'offset': 0, - 'padding_value': 0 - }, - stop_gradient=True) + normal_var = block.create_var( + name=unique_name.generate('.'.join(['gaussian_random', 'tmp'])), + dtype=var.dtype, + persistable=False, + stop_gradient=True, + ) + block.append_op( + type='gaussian_random', + inputs={}, + outputs={'Out': normal_var}, + attrs={ + 'mean': 0.0, + 'std': 1.0, + 'shape': flatten_shape, + 'seed': self._seed, + 'dtype': var.dtype, + }, + stop_gradient=True, + ) + + q = block.create_var( + name=unique_name.generate('.'.join(['qr', 'q', 'tmp'])), + dtype=normal_var.dtype, + persistable=False, + stop_gradient=True, + ) + r = block.create_var( + name=unique_name.generate('.'.join(['qr', 'r', 'tmp'])), + dtype=normal_var.dtype, + persistable=False, + stop_gradient=True, + ) + block.append_op( + type='qr', + inputs={'X': [normal_var]}, + outputs={ + 'Q': q, + 'R': r, + }, + attrs={'mode': 'reduced'}, + stop_gradient=True, + ) + + r_diag = block.create_var( + name=unique_name.generate('.'.join(['diag', 'tmp'])), + dtype=r.dtype, + persistable=False, + stop_gradient=True, + ) + block.append_op( + type='diag_v2', + inputs={'X': r}, + outputs={'Out': r_diag}, + attrs={'offset': 0, 'padding_value': 0}, + stop_gradient=True, + ) r_sign = r_diag - block.append_op(type='sign', - inputs={'X': [r_diag]}, - outputs={'Out': r_sign}, - stop_gradient=True) - - block.append_op(type='elementwise_mul', - inputs={ - 'X': q, - 'Y': r_sign - }, - outputs={'Out': q}, - attrs={}, - stop_gradient=True) - - x_shape = block.create_var(name=unique_name.generate('.'.join( - ['transpose', 'shape', 'tmp'])), - dtype=q.dtype, - persistable=False, - stop_gradient=True) + block.append_op( + type='sign', + inputs={'X': [r_diag]}, + outputs={'Out': r_sign}, + stop_gradient=True, + ) + + block.append_op( + type='elementwise_mul', + inputs={'X': q, 'Y': r_sign}, + outputs={'Out': q}, + attrs={}, + stop_gradient=True, + ) + + x_shape = block.create_var( + name=unique_name.generate('.'.join(['transpose', 'shape', 'tmp'])), + dtype=q.dtype, + persistable=False, + stop_gradient=True, + ) if row < col: - q_transpose = block.create_var(name=unique_name.generate('.'.join( - ['transpose', 'tmp'])), - dtype=q.dtype, - persistable=False, - stop_gradient=True) - block.append_op(type='transpose2', - inputs={'X': q}, - outputs={ - 'Out': q_transpose, - 'XShape': x_shape - }, - attrs={'axis': [1, 0]}, - stop_gradient=True) + q_transpose = block.create_var( + name=unique_name.generate('.'.join(['transpose', 'tmp'])), + dtype=q.dtype, + persistable=False, + stop_gradient=True, + ) + block.append_op( + type='transpose2', + inputs={'X': q}, + outputs={'Out': q_transpose, 'XShape': x_shape}, + attrs={'axis': [1, 0]}, + stop_gradient=True, + ) q = q_transpose - block.append_op(type='reshape2', - inputs={'X': q}, - outputs={ - 'Out': q, - "XShape": x_shape - }, - attrs={'shape': var.shape}, - stop_gradient=True) - - op = block.append_op(type='scale', - inputs={'X': q}, - outputs={'Out': var}, - attrs={ - 'scale': self._gain, - 'bias': 0.0 - }) + block.append_op( + type='reshape2', + inputs={'X': q}, + outputs={'Out': q, "XShape": x_shape}, + attrs={'shape': var.shape}, + stop_gradient=True, + ) + + op = block.append_op( + type='scale', + inputs={'X': q}, + outputs={'Out': var}, + attrs={'scale': self._gain, 'bias': 0.0}, + ) return op diff --git a/python/paddle/nn/initializer/uniform.py b/python/paddle/nn/initializer/uniform.py index 2c7a57b1195cdca9ab8898be13682b6d8404a5b8..578a477247874901e8906044f576ed7b8ad50052 100644 --- a/python/paddle/nn/initializer/uniform.py +++ b/python/paddle/nn/initializer/uniform.py @@ -55,9 +55,6 @@ class Uniform(UniformInitializer): assert low is not None, 'low should not be None' assert high is not None, 'high should not be None' assert high >= low, 'high should greater or equal than low' - super(Uniform, self).__init__(low=low, - high=high, - seed=0, - diag_num=0, - diag_step=0, - diag_val=1.0) + super(Uniform, self).__init__( + low=low, high=high, seed=0, diag_num=0, diag_step=0, diag_val=1.0 + ) diff --git a/python/paddle/nn/initializer/xavier.py b/python/paddle/nn/initializer/xavier.py index 2c6d60a6bb86be6a5d08d3807800d6f8182dc889..149b13df389e46ffbfa95ebd582b9842b6194b16 100644 --- a/python/paddle/nn/initializer/xavier.py +++ b/python/paddle/nn/initializer/xavier.py @@ -63,10 +63,9 @@ class XavierNormal(XavierInitializer): """ def __init__(self, fan_in=None, fan_out=None, name=None): - super(XavierNormal, self).__init__(uniform=False, - fan_in=fan_in, - fan_out=fan_out, - seed=0) + super(XavierNormal, self).__init__( + uniform=False, fan_in=fan_in, fan_out=fan_out, seed=0 + ) class XavierUniform(XavierInitializer): @@ -118,7 +117,6 @@ class XavierUniform(XavierInitializer): """ def __init__(self, fan_in=None, fan_out=None, name=None): - super(XavierUniform, self).__init__(uniform=True, - fan_in=fan_in, - fan_out=fan_out, - seed=0) + super(XavierUniform, self).__init__( + uniform=True, fan_in=fan_in, fan_out=fan_out, seed=0 + ) diff --git a/python/paddle/nn/layer/activation.py b/python/paddle/nn/layer/activation.py index 2382acbd0cab0a831b83c0b1b635a7bace2b8084..e3fa71c0332856ba694d410c046f70919a290afa 100644 --- a/python/paddle/nn/layer/activation.py +++ b/python/paddle/nn/layer/activation.py @@ -402,12 +402,14 @@ class PReLU(Layer): # [ 6. , 7. , 8. , 9. ]]]] """ - def __init__(self, - num_parameters=1, - init=0.25, - weight_attr=None, - data_format="NCHW", - name=None): + def __init__( + self, + num_parameters=1, + init=0.25, + weight_attr=None, + data_format="NCHW", + name=None, + ): super(PReLU, self).__init__() self._num_parameters = num_parameters self._init = init @@ -415,12 +417,13 @@ class PReLU(Layer): self._name = name self._data_format = data_format - self._weight = self.create_parameter(attr=self._weight_attr, - shape=[self._num_parameters], - dtype=get_default_dtype(), - is_bias=False, - default_initializer=Constant( - self._init)) + self._weight = self.create_parameter( + attr=self._weight_attr, + shape=[self._num_parameters], + dtype=get_default_dtype(), + is_bias=False, + default_initializer=Constant(self._init), + ) def forward(self, x): return F.prelu(x, self._weight, data_format=self._data_format) @@ -428,8 +431,12 @@ class PReLU(Layer): def extra_repr(self): name_str = ', name={}'.format(self._name) if self._name else '' return 'num_parameters={}, data_format={}, init={}, dtype={}{}'.format( - self._num_parameters, self._data_format, self._init, self._dtype, - name_str) + self._num_parameters, + self._data_format, + self._init, + self._dtype, + name_str, + ) class RReLU(Layer): @@ -503,22 +510,22 @@ class RReLU(Layer): # [ 6. 7. 8. 9. ]]]] """ - def __init__(self, lower=1. / 8., upper=1. / 3., name=None): + def __init__(self, lower=1.0 / 8.0, upper=1.0 / 3.0, name=None): super(RReLU, self).__init__() self._lower = lower self._upper = upper self._name = name def forward(self, x): - return F.rrelu(x, - lower=self._lower, - upper=self._upper, - training=self.training) + return F.rrelu( + x, lower=self._lower, upper=self._upper, training=self.training + ) def extra_repr(self): name_str = ', name={}'.format(self._name) if self._name else '' return 'lower={}, upper={}, training={}, dtype={}{}'.format( - self._lower, self._upper, self.training, self._dtype, name_str) + self._lower, self._upper, self.training, self._dtype, name_str + ) class ReLU(Layer): @@ -637,10 +644,12 @@ class SELU(Layer): # [[0, 1.050701],[2.101402, 3.152103]] """ - def __init__(self, - scale=1.0507009873554804934193349852946, - alpha=1.6732632423543772848170429916717, - name=None): + def __init__( + self, + scale=1.0507009873554804934193349852946, + alpha=1.6732632423543772848170429916717, + name=None, + ): super(SELU, self).__init__() self._scale = scale self._alpha = alpha @@ -651,8 +660,9 @@ class SELU(Layer): def extra_repr(self): name_str = ', name={}'.format(self._name) if self._name else '' - return 'scale={:.16f}, alpha={:.16f}{}'.format(self._scale, self._alpha, - name_str) + return 'scale={:.16f}, alpha={:.16f}{}'.format( + self._scale, self._alpha, name_str + ) class LeakyReLU(Layer): @@ -835,8 +845,9 @@ class Softplus(Layer): def extra_repr(self): name_str = ', name={}'.format(self._name) if self._name else '' - return 'beta={}, threshold={}{}'.format(self._beta, self._threshold, - name_str) + return 'beta={}, threshold={}{}'.format( + self._beta, self._threshold, name_str + ) class Softshrink(Layer): @@ -1469,8 +1480,11 @@ class Softmax2D(Layer): self._name = name def forward(self, x): - assert x.ndim == 3 or x.ndim == 4, "Softmax2D requires a 3D or 4D tensor as input. Received: {}D.".format( - x.ndim) + assert ( + x.ndim == 3 or x.ndim == 4 + ), "Softmax2D requires a 3D or 4D tensor as input. Received: {}D.".format( + x.ndim + ) return F.softmax(x, axis=-3, dtype=self._dtype, name=self._name) def extra_repr(self): diff --git a/python/paddle/nn/layer/common.py b/python/paddle/nn/layer/common.py index 6cc2056459f81a8cbb1876b7134f2f5d3507e9ac..215f395e1fbccc03dbf7dec8b45b77ffd9f8a77c 100644 --- a/python/paddle/nn/layer/common.py +++ b/python/paddle/nn/layer/common.py @@ -144,37 +144,43 @@ class Linear(Layer): # [2.1077576 2.1077576 2.1077576 2.1077576 ]] """ - def __init__(self, - in_features, - out_features, - weight_attr=None, - bias_attr=None, - name=None): + def __init__( + self, + in_features, + out_features, + weight_attr=None, + bias_attr=None, + name=None, + ): super(Linear, self).__init__() self._dtype = self._helper.get_default_dtype() self._weight_attr = weight_attr self._bias_attr = bias_attr - self.weight = self.create_parameter(shape=[in_features, out_features], - attr=self._weight_attr, - dtype=self._dtype, - is_bias=False) - self.bias = self.create_parameter(shape=[out_features], - attr=self._bias_attr, - dtype=self._dtype, - is_bias=True) + self.weight = self.create_parameter( + shape=[in_features, out_features], + attr=self._weight_attr, + dtype=self._dtype, + is_bias=False, + ) + self.bias = self.create_parameter( + shape=[out_features], + attr=self._bias_attr, + dtype=self._dtype, + is_bias=True, + ) self.name = name def forward(self, input): - out = F.linear(x=input, - weight=self.weight, - bias=self.bias, - name=self.name) + out = F.linear( + x=input, weight=self.weight, bias=self.bias, name=self.name + ) return out def extra_repr(self): name_str = ', name={}'.format(self.name) if self.name else '' return 'in_features={}, out_features={}, dtype={}{}'.format( - self.weight.shape[0], self.weight.shape[1], self._dtype, name_str) + self.weight.shape[0], self.weight.shape[1], self._dtype, name_str + ) class Upsample(Layer): @@ -371,14 +377,16 @@ class Upsample(Layer): """ - def __init__(self, - size=None, - scale_factor=None, - mode='nearest', - align_corners=False, - align_mode=0, - data_format='NCHW', - name=None): + def __init__( + self, + size=None, + scale_factor=None, + mode='nearest', + align_corners=False, + align_mode=0, + data_format='NCHW', + name=None, + ): super(Upsample, self).__init__() self.size = size self.scale_factor = scale_factor @@ -389,14 +397,16 @@ class Upsample(Layer): self.name = name def forward(self, x): - out = F.interpolate(x, - size=self.size, - scale_factor=self.scale_factor, - mode=self.mode, - align_corners=self.align_corners, - align_mode=self.align_mode, - data_format=self.data_format, - name=self.name) + out = F.interpolate( + x, + size=self.size, + scale_factor=self.scale_factor, + mode=self.mode, + align_corners=self.align_corners, + align_mode=self.align_mode, + data_format=self.data_format, + name=self.name, + ) return out @@ -407,8 +417,13 @@ class Upsample(Layer): main_str = 'size={}'.format(self.size) name_str = ', name={}'.format(self.name) if self.name else '' return '{}, mode={}, align_corners={}, align_mode={}, data_format={}{}'.format( - main_str, self.mode, self.align_corners, self.align_mode, - self.data_format, name_str) + main_str, + self.mode, + self.align_corners, + self.align_mode, + self.data_format, + name_str, + ) class UpsamplingNearest2D(Layer): @@ -462,11 +477,9 @@ class UpsamplingNearest2D(Layer): # [2L, 3L, 12L, 12L] """ - def __init__(self, - size=None, - scale_factor=None, - data_format='NCHW', - name=None): + def __init__( + self, size=None, scale_factor=None, data_format='NCHW', name=None + ): super(UpsamplingNearest2D, self).__init__() self.size = size self.scale_factor = scale_factor @@ -474,14 +487,16 @@ class UpsamplingNearest2D(Layer): self.name = name def forward(self, x): - out = F.interpolate(x, - size=self.size, - scale_factor=self.scale_factor, - mode='nearest', - align_corners=False, - align_mode=0, - data_format=self.data_format, - name=self.name) + out = F.interpolate( + x, + size=self.size, + scale_factor=self.scale_factor, + mode='nearest', + align_corners=False, + align_mode=0, + data_format=self.data_format, + name=self.name, + ) return out @@ -491,8 +506,9 @@ class UpsamplingNearest2D(Layer): else: main_str = 'size={}'.format(self.size) name_str = ', name={}'.format(self.name) if self.name else '' - return '{}, data_format={}{}'.format(main_str, self.data_format, - name_str) + return '{}, data_format={}{}'.format( + main_str, self.data_format, name_str + ) class UpsamplingBilinear2D(Layer): @@ -547,11 +563,9 @@ class UpsamplingBilinear2D(Layer): # [2L, 3L, 12L, 12L] """ - def __init__(self, - size=None, - scale_factor=None, - data_format='NCHW', - name=None): + def __init__( + self, size=None, scale_factor=None, data_format='NCHW', name=None + ): super(UpsamplingBilinear2D, self).__init__() self.size = size self.scale_factor = scale_factor @@ -559,14 +573,16 @@ class UpsamplingBilinear2D(Layer): self.name = name def forward(self, x): - out = F.interpolate(x, - size=self.size, - scale_factor=self.scale_factor, - mode='bilinear', - align_corners=True, - align_mode=0, - data_format=self.data_format, - name=self.name) + out = F.interpolate( + x, + size=self.size, + scale_factor=self.scale_factor, + mode='bilinear', + align_corners=True, + align_mode=0, + data_format=self.data_format, + name=self.name, + ) return out @@ -576,8 +592,9 @@ class UpsamplingBilinear2D(Layer): else: main_str = 'size={}'.format(self.size) name_str = ', name={}'.format(self.name) if self.name else '' - return '{}, data_format={}{}'.format(main_str, self.data_format, - name_str) + return '{}, data_format={}{}'.format( + main_str, self.data_format, name_str + ) class Bilinear(Layer): @@ -634,13 +651,15 @@ class Bilinear(Layer): """ - def __init__(self, - in1_features, - in2_features, - out_features, - weight_attr=None, - bias_attr=None, - name=None): + def __init__( + self, + in1_features, + in2_features, + out_features, + weight_attr=None, + bias_attr=None, + name=None, + ): super(Bilinear, self).__init__() self._weight_attr = weight_attr self._bias_attr = bias_attr @@ -651,17 +670,23 @@ class Bilinear(Layer): self._dtype = self._helper.get_default_dtype() weight_shape = [ - self._out_features, self._in1_features, self._in2_features + self._out_features, + self._in1_features, + self._in2_features, ] - self.weight = self.create_parameter(attr=self._weight_attr, - shape=weight_shape, - dtype=self._dtype, - is_bias=False) + self.weight = self.create_parameter( + attr=self._weight_attr, + shape=weight_shape, + dtype=self._dtype, + is_bias=False, + ) bias_shape = [1, self._out_features] - self.bias = self.create_parameter(attr=self._bias_attr, - shape=bias_shape, - dtype=self._dtype, - is_bias=True) + self.bias = self.create_parameter( + attr=self._bias_attr, + shape=bias_shape, + dtype=self._dtype, + is_bias=True, + ) def forward(self, x1, x2): return F.bilinear(x1, x2, self.weight, self.bias, self._name) @@ -669,8 +694,12 @@ class Bilinear(Layer): def extra_repr(self): name_str = ', name={}'.format(self._name) if self._name else '' return 'in1_features={}, in2_features={}, out_features={}, dtype={}{}'.format( - self._in1_features, self._in2_features, self._out_features, - self._dtype, name_str) + self._in1_features, + self._in2_features, + self._out_features, + self._dtype, + name_str, + ) class Dropout(Layer): @@ -721,7 +750,7 @@ class Dropout(Layer): print(x) print(y_train) print(y_test) - """ + """ def __init__(self, p=0.5, axis=None, mode="upscale_in_train", name=None): super(Dropout, self).__init__() @@ -732,18 +761,21 @@ class Dropout(Layer): self.name = name def forward(self, input): - out = F.dropout(input, - p=self.p, - axis=self.axis, - training=self.training, - mode=self.mode, - name=self.name) + out = F.dropout( + input, + p=self.p, + axis=self.axis, + training=self.training, + mode=self.mode, + name=self.name, + ) return out def extra_repr(self): name_str = ', name={}'.format(self.name) if self.name else '' - return 'p={}, axis={}, mode={}{}'.format(self.p, self.axis, self.mode, - name_str) + return 'p={}, axis={}, mode={}{}'.format( + self.p, self.axis, self.mode, name_str + ) class Dropout2D(Layer): @@ -783,7 +815,7 @@ class Dropout2D(Layer): print(x) print(y_train) print(y_test) - """ + """ def __init__(self, p=0.5, data_format='NCHW', name=None): super(Dropout2D, self).__init__() @@ -793,17 +825,20 @@ class Dropout2D(Layer): self.name = name def forward(self, input): - out = F.dropout2d(input, - p=self.p, - training=self.training, - data_format=self.data_format, - name=self.name) + out = F.dropout2d( + input, + p=self.p, + training=self.training, + data_format=self.data_format, + name=self.name, + ) return out def extra_repr(self): name_str = ', name={}'.format(self.name) if self.name else '' - return 'p={}, data_format={}{}'.format(self.p, self.data_format, - name_str) + return 'p={}, data_format={}{}'.format( + self.p, self.data_format, name_str + ) class Dropout3D(Layer): @@ -843,7 +878,7 @@ class Dropout3D(Layer): print(x) print(y_train) print(y_test) - """ + """ def __init__(self, p=0.5, data_format='NCDHW', name=None): super(Dropout3D, self).__init__() @@ -853,17 +888,20 @@ class Dropout3D(Layer): self.name = name def forward(self, input): - out = F.dropout3d(input, - p=self.p, - training=self.training, - data_format=self.data_format, - name=self.name) + out = F.dropout3d( + input, + p=self.p, + training=self.training, + data_format=self.data_format, + name=self.name, + ) return out def extra_repr(self): name_str = ', name={}'.format(self.name) if self.name else '' - return 'p={}, data_format={}{}'.format(self.p, self.data_format, - name_str) + return 'p={}, data_format={}{}'.format( + self.p, self.data_format, name_str + ) class AlphaDropout(Layer): @@ -902,7 +940,7 @@ class AlphaDropout(Layer): print(y_train) # [[-0.10721093, 1.6655989 ], [-0.7791938, -0.7791938]] (randomly) print(y_test) - """ + """ def __init__(self, p=0.5, name=None): super(AlphaDropout, self).__init__() @@ -910,10 +948,9 @@ class AlphaDropout(Layer): self.name = name def forward(self, input): - out = F.alpha_dropout(input, - p=self.p, - training=self.training, - name=self.name) + out = F.alpha_dropout( + input, p=self.p, training=self.training, name=self.name + ) return out def extra_repr(self): @@ -963,12 +1000,9 @@ class Pad1D(Layer): # [0. 4. 5. 6. 0. 0.]]] """ - def __init__(self, - padding, - mode='constant', - value=0.0, - data_format="NCL", - name=None): + def __init__( + self, padding, mode='constant', value=0.0, data_format="NCL", name=None + ): super(Pad1D, self).__init__() self._pad = _npairs(padding, 1) self._mode = mode @@ -977,17 +1011,20 @@ class Pad1D(Layer): self._name = name def forward(self, x): - return F.pad(x, - pad=self._pad, - mode=self._mode, - value=self._value, - data_format=self._data_format, - name=self._name) + return F.pad( + x, + pad=self._pad, + mode=self._mode, + value=self._value, + data_format=self._data_format, + name=self._name, + ) def extra_repr(self): name_str = ', name={}'.format(self._name) if self._name else '' return 'padding={}, mode={}, value={}, data_format={}{}'.format( - self._pad, self._mode, self._value, self._data_format, name_str) + self._pad, self._mode, self._value, self._data_format, name_str + ) class Pad2D(Layer): @@ -1036,12 +1073,9 @@ class Pad2D(Layer): # [0. 0. 0. 0.]]]] """ - def __init__(self, - padding, - mode='constant', - value=0.0, - data_format="NCHW", - name=None): + def __init__( + self, padding, mode='constant', value=0.0, data_format="NCHW", name=None + ): super(Pad2D, self).__init__() self._pad = _npairs(padding, 2) self._mode = mode @@ -1050,17 +1084,20 @@ class Pad2D(Layer): self._name = name def forward(self, x): - return F.pad(x, - pad=self._pad, - mode=self._mode, - value=self._value, - data_format=self._data_format, - name=self._name) + return F.pad( + x, + pad=self._pad, + mode=self._mode, + value=self._value, + data_format=self._data_format, + name=self._name, + ) def extra_repr(self): name_str = ', name={}'.format(self._name) if self._name else '' return 'padding={}, mode={}, value={}, data_format={}{}'.format( - self._pad, self._mode, self._value, self._data_format, name_str) + self._pad, self._mode, self._value, self._data_format, name_str + ) class ZeroPad2D(Layer): @@ -1111,23 +1148,25 @@ class ZeroPad2D(Layer): super(ZeroPad2D, self).__init__() self._pad = _npairs(padding, 2) self._mode = 'constant' - self._value = 0. + self._value = 0.0 self._data_format = data_format self._name = name def forward(self, x): - return F.pad(x, - pad=self._pad, - mode=self._mode, - value=self._value, - data_format=self._data_format, - name=self._name) + return F.pad( + x, + pad=self._pad, + mode=self._mode, + value=self._value, + data_format=self._data_format, + name=self._name, + ) def extra_repr(self): name_str = ', name={}'.format(self._name) if self._name else '' - return 'padding={}, data_format={}{}'.format(self._pad, - self._data_format, - name_str) + return 'padding={}, data_format={}{}'.format( + self._pad, self._data_format, name_str + ) class Pad3D(Layer): @@ -1176,12 +1215,14 @@ class Pad3D(Layer): # [0. 0. 0. 0.]]]]] """ - def __init__(self, - padding, - mode='constant', - value=0.0, - data_format="NCDHW", - name=None): + def __init__( + self, + padding, + mode='constant', + value=0.0, + data_format="NCDHW", + name=None, + ): super(Pad3D, self).__init__() self._pad = _npairs(padding, 3) self._mode = mode @@ -1190,17 +1231,20 @@ class Pad3D(Layer): self._name = name def forward(self, x): - return F.pad(x, - pad=self._pad, - mode=self._mode, - value=self._value, - data_format=self._data_format, - name=self._name) + return F.pad( + x, + pad=self._pad, + mode=self._mode, + value=self._value, + data_format=self._data_format, + name=self._name, + ) def extra_repr(self): name_str = ', name={}'.format(self._name) if self._name else '' return 'padding={}, mode={}, value={}, data_format={}{}'.format( - self._pad, self._mode, self._value, self._data_format, name_str) + self._pad, self._mode, self._value, self._data_format, name_str + ) class CosineSimilarity(Layer): @@ -1361,13 +1405,15 @@ class Embedding(Layer): """ - def __init__(self, - num_embeddings, - embedding_dim, - padding_idx=None, - sparse=False, - weight_attr=None, - name=None): + def __init__( + self, + num_embeddings, + embedding_dim, + padding_idx=None, + sparse=False, + weight_attr=None, + name=None, + ): super(Embedding, self).__init__() self._num_embeddings = num_embeddings self._embedding_dim = embedding_dim @@ -1381,12 +1427,20 @@ class Embedding(Layer): if self._embedding_dim <= 0: raise ValueError("embedding_dim must be gather than 0") - padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else ( - num_embeddings + padding_idx) + padding_idx = ( + -1 + if padding_idx is None + else padding_idx + if padding_idx >= 0 + else (num_embeddings + padding_idx) + ) if padding_idx >= num_embeddings or padding_idx < -num_embeddings: - raise ValueError("padding_idx must be within [-{}, {})".format( - num_embeddings, num_embeddings)) + raise ValueError( + "padding_idx must be within [-{}, {})".format( + num_embeddings, num_embeddings + ) + ) self._dtype = self._helper.get_default_dtype() self._size = [self._num_embeddings, self._embedding_dim] @@ -1394,21 +1448,25 @@ class Embedding(Layer): self._weight_attr = weight_attr self._remote_prefetch = False self._name = name - self.weight = self.create_parameter(attr=self._weight_attr, - shape=self._size, - dtype=self._dtype, - is_bias=False) + self.weight = self.create_parameter( + attr=self._weight_attr, + shape=self._size, + dtype=self._dtype, + is_bias=False, + ) if in_dynamic_mode() and padding_idx != -1: with paddle.no_grad(): self.weight[padding_idx] = 0.0 def forward(self, x): - return F.embedding(x, - weight=self.weight, - padding_idx=self._padding_idx, - sparse=self._sparse, - name=self._name) + return F.embedding( + x, + weight=self.weight, + padding_idx=self._padding_idx, + sparse=self._sparse, + name=self._name, + ) def extra_repr(self): main_str = '{_num_embeddings}, {_embedding_dim}' @@ -1466,12 +1524,9 @@ class Unfold(Layer): print(result) """ - def __init__(self, - kernel_sizes, - dilations=1, - paddings=0, - strides=1, - name=None): + def __init__( + self, kernel_sizes, dilations=1, paddings=0, strides=1, name=None + ): super(Unfold, self).__init__() self.kernel_sizes = kernel_sizes @@ -1481,17 +1536,24 @@ class Unfold(Layer): self.name = name def forward(self, input): - return F.unfold(input, - kernel_sizes=self.kernel_sizes, - strides=self.strides, - paddings=self.paddings, - dilations=self.dilations, - name=self.name) + return F.unfold( + input, + kernel_sizes=self.kernel_sizes, + strides=self.strides, + paddings=self.paddings, + dilations=self.dilations, + name=self.name, + ) def extra_repr(self): name_str = ', name={}'.format(self.name) if self.name else '' - return 'kernel_size={}, dilation={}, padding={}, stride={}{}'.\ - format(self.kernel_sizes, self.dilations, self.paddings, self.strides, name_str) + return 'kernel_size={}, dilation={}, padding={}, stride={}{}'.format( + self.kernel_sizes, + self.dilations, + self.paddings, + self.strides, + name_str, + ) class Fold(Layer): @@ -1551,13 +1613,15 @@ class Fold(Layer): # y.shape = [2,3,4,5] """ - def __init__(self, - output_sizes, - kernel_sizes, - dilations=1, - paddings=0, - strides=1, - name=None): + def __init__( + self, + output_sizes, + kernel_sizes, + dilations=1, + paddings=0, + strides=1, + name=None, + ): super(Fold, self).__init__() self.output_sizes = output_sizes @@ -1568,15 +1632,22 @@ class Fold(Layer): self.name = name def forward(self, input): - return F.fold(input, - output_sizes=self.output_sizes, - kernel_sizes=self.kernel_sizes, - strides=self.strides, - paddings=self.paddings, - dilations=self.dilations, - name=self.name) + return F.fold( + input, + output_sizes=self.output_sizes, + kernel_sizes=self.kernel_sizes, + strides=self.strides, + paddings=self.paddings, + dilations=self.dilations, + name=self.name, + ) def extra_repr(self): name_str = ', name={}'.format(self.name) if self.name else '' - return 'kernel_size={}, dilation={}, padding={}, stride={}{}'.\ - format(self.kernel_sizes, self.dilations, self.paddings, self.strides, name_str) + return 'kernel_size={}, dilation={}, padding={}, stride={}{}'.format( + self.kernel_sizes, + self.dilations, + self.paddings, + self.strides, + name_str, + ) diff --git a/python/paddle/nn/layer/container.py b/python/paddle/nn/layer/container.py index fe2753a61415917eded2ac83b517ee4dab9894e3..a3ca6294e381740ef211e9e9ba3a99f9c8cc1de0 100644 --- a/python/paddle/nn/layer/container.py +++ b/python/paddle/nn/layer/container.py @@ -275,10 +275,10 @@ class LayerDict(Layer): """ - assert isinstance( - sublayers, Iterable - ), "The type of sublayers is not iterable of key/value pairs, the type of sublayers is " + type( - sublayers).__name__ + assert isinstance(sublayers, Iterable), ( + "The type of sublayers is not iterable of key/value pairs, the type of sublayers is " + + type(sublayers).__name__ + ) if isinstance(sublayers, (OrderedDict, LayerDict, Mapping)): for key, layer in sublayers.items(): @@ -287,7 +287,11 @@ class LayerDict(Layer): # handle this format [(key1, layer1), (key2, layer2)...] for i, kv in enumerate(sublayers): if len(kv) != 2: - raise ValueError("The length of the " + str(i) + - "'s element in sublayers is " + - str(len(kv)) + ", which must be 2.") + raise ValueError( + "The length of the " + + str(i) + + "'s element in sublayers is " + + str(len(kv)) + + ", which must be 2." + ) self.add_sublayer(kv[0], kv[1]) diff --git a/python/paddle/nn/layer/conv.py b/python/paddle/nn/layer/conv.py index fd5583d50baae87ac46023bc800a3f684ef1381b..61f9044d03a93caaac88ed3681cb4ced343d1b5d 100644 --- a/python/paddle/nn/layer/conv.py +++ b/python/paddle/nn/layer/conv.py @@ -31,7 +31,7 @@ __all__ = [] def _get_default_param_initializer(num_channels, filter_size): filter_elem_num = num_channels * np.prod(filter_size) - std = (2.0 / filter_elem_num)**0.5 + std = (2.0 / filter_elem_num) ** 0.5 return Normal(0.0, std) @@ -44,24 +44,27 @@ def _reverse_repeat_list(t, n): class _ConvNd(Layer): - - def __init__(self, - in_channels, - out_channels, - kernel_size, - transposed, - dims, - stride=1, - padding=0, - padding_mode='zeros', - output_padding=0, - dilation=1, - groups=1, - weight_attr=None, - bias_attr=None, - data_format="NCHW"): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + transposed, + dims, + stride=1, + padding=0, + padding_mode='zeros', + output_padding=0, + dilation=1, + groups=1, + weight_attr=None, + bias_attr=None, + data_format="NCHW", + ): super(_ConvNd, self).__init__() - assert weight_attr is not False, "weight_attr should not be False in Conv." + assert ( + weight_attr is not False + ), "weight_attr should not be False in Conv." self._param_attr = weight_attr self._bias_attr = bias_attr self._groups = groups @@ -72,11 +75,16 @@ class _ConvNd(Layer): valid_padding_modes = {'zeros', 'reflect', 'replicate', 'circular'} if padding_mode not in valid_padding_modes: raise ValueError( - "padding_mode must be one of {}, but got padding_mode='{}'". - format(valid_padding_modes, padding_mode)) + "padding_mode must be one of {}, but got padding_mode='{}'".format( + valid_padding_modes, padding_mode + ) + ) - if padding_mode in {'reflect', 'replicate', 'circular' - } and not isinstance(padding, int): + if padding_mode in { + 'reflect', + 'replicate', + 'circular', + } and not isinstance(padding, int): raise TypeError( "when padding_mode in ['reflect', 'replicate', 'circular'], type of padding must be int" ) @@ -84,12 +92,16 @@ class _ConvNd(Layer): valid_format = {'NHWC', 'NCHW', 'NDHWC', 'NCDHW', 'NLC', 'NCL'} if data_format not in valid_format: raise ValueError( - "data_format must be one of {}, but got data_format='{}'". - format(valid_format, data_format)) + "data_format must be one of {}, but got data_format='{}'".format( + valid_format, data_format + ) + ) - channel_last = (data_format == "NHWC") or (data_format - == "NDHWC") or (data_format - == "NLC") + channel_last = ( + (data_format == "NHWC") + or (data_format == "NDHWC") + or (data_format == "NLC") + ) if channel_last: self._channel_dim = len(data_format) - 1 else: @@ -97,66 +109,86 @@ class _ConvNd(Layer): self._stride = utils.convert_to_list(stride, dims, 'stride') self._dilation = utils.convert_to_list(dilation, dims, 'dilation') - self._kernel_size = utils.convert_to_list(kernel_size, dims, - 'kernel_size') + self._kernel_size = utils.convert_to_list( + kernel_size, dims, 'kernel_size' + ) self._padding = padding self._padding_mode = padding_mode self.output_padding = output_padding if dims != 1: self._updated_padding, self._padding_algorithm = _update_padding_nd( - padding, channel_last, dims) + padding, channel_last, dims + ) if transposed: - filter_shape = [self._in_channels, out_channels // groups - ] + self._kernel_size + filter_shape = [ + self._in_channels, + out_channels // groups, + ] + self._kernel_size else: if in_channels % groups != 0: raise ValueError("in_channels must be divisible by groups.") if padding_mode in {'reflect', 'replicate', 'circular'}: - _paired_padding = utils.convert_to_list(padding, dims, - 'padding') + _paired_padding = utils.convert_to_list( + padding, dims, 'padding' + ) self._reversed_padding_repeated_twice = _reverse_repeat_list( - _paired_padding, 2) + _paired_padding, 2 + ) - self._updated_padding, self._padding_algorithm = _update_padding_nd( - 0, channel_last, dims) + ( + self._updated_padding, + self._padding_algorithm, + ) = _update_padding_nd(0, channel_last, dims) - filter_shape = [out_channels, in_channels // groups - ] + self._kernel_size + filter_shape = [ + out_channels, + in_channels // groups, + ] + self._kernel_size def _get_default_param_initializer(): if transposed: return None filter_elem_num = np.prod(self._kernel_size) * self._in_channels - std = (2.0 / filter_elem_num)**0.5 + std = (2.0 / filter_elem_num) ** 0.5 return Normal(0.0, std) self.weight = self.create_parameter( shape=filter_shape, attr=self._param_attr, - default_initializer=_get_default_param_initializer()) - self.bias = self.create_parameter(attr=self._bias_attr, - shape=[self._out_channels], - is_bias=True) + default_initializer=_get_default_param_initializer(), + ) + self.bias = self.create_parameter( + attr=self._bias_attr, shape=[self._out_channels], is_bias=True + ) cudnn_version = get_cudnn_version() - self._use_cudnn = True if (is_compiled_with_cuda() - and cudnn_version is not None) else False + self._use_cudnn = ( + True + if (is_compiled_with_cuda() and cudnn_version is not None) + else False + ) self._op_type = "conv" + str(dims) + 'd' - if self._op_type == 'conv2d' and (in_channels == groups - and in_channels != 1 - and out_channels % in_channels == 0): + if self._op_type == 'conv2d' and ( + in_channels == groups + and in_channels != 1 + and out_channels % in_channels == 0 + ): self._op_type = 'depthwise_conv2d' if is_compiled_with_rocm(): self._use_cudnn = True else: self._use_cudnn = False - if (is_compiled_with_cuda() and get_flags("FLAGS_conv2d_disable_cudnn") - ["FLAGS_conv2d_disable_cudnn"]): + if ( + is_compiled_with_cuda() + and get_flags("FLAGS_conv2d_disable_cudnn")[ + "FLAGS_conv2d_disable_cudnn" + ] + ): self._use_cudnn = False def extra_repr(self): @@ -299,50 +331,58 @@ class Conv1D(_ConvNd): # [160. 211.]]] """ - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - padding_mode='zeros', - weight_attr=None, - bias_attr=None, - data_format="NCL"): - super(Conv1D, self).__init__(in_channels, - out_channels, - kernel_size, - False, - 1, - stride=stride, - padding=padding, - padding_mode=padding_mode, - dilation=dilation, - groups=groups, - weight_attr=weight_attr, - bias_attr=bias_attr, - data_format=data_format) + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + padding_mode='zeros', + weight_attr=None, + bias_attr=None, + data_format="NCL", + ): + super(Conv1D, self).__init__( + in_channels, + out_channels, + kernel_size, + False, + 1, + stride=stride, + padding=padding, + padding_mode=padding_mode, + dilation=dilation, + groups=groups, + weight_attr=weight_attr, + bias_attr=bias_attr, + data_format=data_format, + ) def forward(self, x): padding = 0 if self._padding_mode != "zeros": - x = F.pad(x, - self._reversed_padding_repeated_twice, - mode=self._padding_mode, - data_format=self._data_format) + x = F.pad( + x, + self._reversed_padding_repeated_twice, + mode=self._padding_mode, + data_format=self._data_format, + ) else: padding = self._padding - out = F.conv1d(x, - self.weight, - bias=self.bias, - padding=padding, - stride=self._stride, - dilation=self._dilation, - groups=self._groups, - data_format=self._data_format) + out = F.conv1d( + x, + self.weight, + bias=self.bias, + padding=padding, + stride=self._stride, + dilation=self._dilation, + groups=self._groups, + data_format=self._data_format, + ) return out @@ -474,43 +514,49 @@ class Conv1DTranspose(_ConvNd): # [[[60. 16. 99. 75. 4.]]] """ - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - output_padding=0, - groups=1, - dilation=1, - weight_attr=None, - bias_attr=None, - data_format="NCL"): - super(Conv1DTranspose, self).__init__(in_channels, - out_channels, - kernel_size, - True, - 1, - stride=stride, - padding=padding, - dilation=dilation, - output_padding=output_padding, - groups=groups, - weight_attr=weight_attr, - bias_attr=bias_attr, - data_format=data_format) + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + output_padding=0, + groups=1, + dilation=1, + weight_attr=None, + bias_attr=None, + data_format="NCL", + ): + super(Conv1DTranspose, self).__init__( + in_channels, + out_channels, + kernel_size, + True, + 1, + stride=stride, + padding=padding, + dilation=dilation, + output_padding=output_padding, + groups=groups, + weight_attr=weight_attr, + bias_attr=bias_attr, + data_format=data_format, + ) def forward(self, x, output_size=None): - out = F.conv1d_transpose(x, - self.weight, - bias=self.bias, - output_size=output_size, - output_padding=self.output_padding, - padding=self._padding, - stride=self._stride, - dilation=self._dilation, - groups=self._groups, - data_format=self._data_format) + out = F.conv1d_transpose( + x, + self.weight, + bias=self.bias, + output_size=output_size, + output_padding=self.output_padding, + padding=self._padding, + stride=self._stride, + dilation=self._dilation, + groups=self._groups, + data_format=self._data_format, + ) return out @@ -625,51 +671,59 @@ class Conv2D(_ConvNd): # (2, 6, 6, 6) """ - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - padding_mode='zeros', - weight_attr=None, - bias_attr=None, - data_format="NCHW"): - super(Conv2D, self).__init__(in_channels, - out_channels, - kernel_size, - False, - 2, - stride=stride, - padding=padding, - padding_mode=padding_mode, - dilation=dilation, - groups=groups, - weight_attr=weight_attr, - bias_attr=bias_attr, - data_format=data_format) + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + padding_mode='zeros', + weight_attr=None, + bias_attr=None, + data_format="NCHW", + ): + super(Conv2D, self).__init__( + in_channels, + out_channels, + kernel_size, + False, + 2, + stride=stride, + padding=padding, + padding_mode=padding_mode, + dilation=dilation, + groups=groups, + weight_attr=weight_attr, + bias_attr=bias_attr, + data_format=data_format, + ) def forward(self, x): if self._padding_mode != 'zeros': - x = F.pad(x, - self._reversed_padding_repeated_twice, - mode=self._padding_mode, - data_format=self._data_format) - - out = F.conv._conv_nd(x, - self.weight, - bias=self.bias, - stride=self._stride, - padding=self._updated_padding, - padding_algorithm=self._padding_algorithm, - dilation=self._dilation, - groups=self._groups, - data_format=self._data_format, - channel_dim=self._channel_dim, - op_type=self._op_type, - use_cudnn=self._use_cudnn) + x = F.pad( + x, + self._reversed_padding_repeated_twice, + mode=self._padding_mode, + data_format=self._data_format, + ) + + out = F.conv._conv_nd( + x, + self.weight, + bias=self.bias, + stride=self._stride, + padding=self._updated_padding, + padding_algorithm=self._padding_algorithm, + dilation=self._dilation, + groups=self._groups, + data_format=self._data_format, + channel_dim=self._channel_dim, + op_type=self._op_type, + use_cudnn=self._use_cudnn, + ) return out @@ -790,31 +844,35 @@ class Conv2DTranspose(_ConvNd): # (2, 6, 10, 10) """ - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - output_padding=0, - dilation=1, - groups=1, - weight_attr=None, - bias_attr=None, - data_format="NCHW"): - super(Conv2DTranspose, self).__init__(in_channels, - out_channels, - kernel_size, - True, - 2, - stride=stride, - padding=padding, - dilation=dilation, - output_padding=output_padding, - groups=groups, - weight_attr=weight_attr, - bias_attr=bias_attr, - data_format=data_format) + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + output_padding=0, + dilation=1, + groups=1, + weight_attr=None, + bias_attr=None, + data_format="NCHW", + ): + super(Conv2DTranspose, self).__init__( + in_channels, + out_channels, + kernel_size, + True, + 2, + stride=stride, + padding=padding, + dilation=dilation, + output_padding=output_padding, + groups=groups, + weight_attr=weight_attr, + bias_attr=bias_attr, + data_format=data_format, + ) def forward(self, x, output_size=None): if output_size is None: @@ -822,16 +880,18 @@ class Conv2DTranspose(_ConvNd): else: output_padding = 0 - out = F.conv2d_transpose(x, - self.weight, - bias=self.bias, - padding=self._padding, - output_padding=output_padding, - stride=self._stride, - dilation=self._dilation, - groups=self._groups, - output_size=output_size, - data_format=self._data_format) + out = F.conv2d_transpose( + x, + self.weight, + bias=self.bias, + padding=self._padding, + output_padding=output_padding, + stride=self._stride, + dilation=self._dilation, + groups=self._groups, + output_size=output_size, + data_format=self._data_format, + ) return out @@ -942,51 +1002,59 @@ class Conv3D(_ConvNd): # (2, 6, 6, 6, 6) """ - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - padding_mode='zeros', - weight_attr=None, - bias_attr=None, - data_format="NCDHW"): - super(Conv3D, self).__init__(in_channels, - out_channels, - kernel_size, - False, - 3, - stride=stride, - padding=padding, - padding_mode=padding_mode, - dilation=dilation, - groups=groups, - weight_attr=weight_attr, - bias_attr=bias_attr, - data_format=data_format) + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + padding_mode='zeros', + weight_attr=None, + bias_attr=None, + data_format="NCDHW", + ): + super(Conv3D, self).__init__( + in_channels, + out_channels, + kernel_size, + False, + 3, + stride=stride, + padding=padding, + padding_mode=padding_mode, + dilation=dilation, + groups=groups, + weight_attr=weight_attr, + bias_attr=bias_attr, + data_format=data_format, + ) def forward(self, x): if self._padding_mode != 'zeros': - x = F.pad(x, - self._reversed_padding_repeated_twice, - mode=self._padding_mode, - data_format=self._data_format) - - out = F.conv._conv_nd(x, - self.weight, - bias=self.bias, - stride=self._stride, - padding=self._updated_padding, - padding_algorithm=self._padding_algorithm, - dilation=self._dilation, - groups=self._groups, - data_format=self._data_format, - channel_dim=self._channel_dim, - op_type=self._op_type, - use_cudnn=self._use_cudnn) + x = F.pad( + x, + self._reversed_padding_repeated_twice, + mode=self._padding_mode, + data_format=self._data_format, + ) + + out = F.conv._conv_nd( + x, + self.weight, + bias=self.bias, + stride=self._stride, + padding=self._updated_padding, + padding_algorithm=self._padding_algorithm, + dilation=self._dilation, + groups=self._groups, + data_format=self._data_format, + channel_dim=self._channel_dim, + op_type=self._op_type, + use_cudnn=self._use_cudnn, + ) return out @@ -1116,31 +1184,35 @@ class Conv3DTranspose(_ConvNd): # (2, 6, 10, 10, 10) """ - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - output_padding=0, - dilation=1, - groups=1, - weight_attr=None, - bias_attr=None, - data_format="NCDHW"): - super(Conv3DTranspose, self).__init__(in_channels, - out_channels, - kernel_size, - True, - 3, - stride=stride, - padding=padding, - dilation=dilation, - output_padding=output_padding, - groups=groups, - weight_attr=weight_attr, - bias_attr=bias_attr, - data_format=data_format) + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + output_padding=0, + dilation=1, + groups=1, + weight_attr=None, + bias_attr=None, + data_format="NCDHW", + ): + super(Conv3DTranspose, self).__init__( + in_channels, + out_channels, + kernel_size, + True, + 3, + stride=stride, + padding=padding, + dilation=dilation, + output_padding=output_padding, + groups=groups, + weight_attr=weight_attr, + bias_attr=bias_attr, + data_format=data_format, + ) def forward(self, x, output_size=None): if output_size is None: @@ -1148,14 +1220,16 @@ class Conv3DTranspose(_ConvNd): else: output_padding = 0 - out = F.conv3d_transpose(x, - self.weight, - bias=self.bias, - padding=self._padding, - output_padding=output_padding, - stride=self._stride, - dilation=self._dilation, - groups=self._groups, - output_size=output_size, - data_format=self._data_format) + out = F.conv3d_transpose( + x, + self.weight, + bias=self.bias, + padding=self._padding, + output_padding=output_padding, + stride=self._stride, + dilation=self._dilation, + groups=self._groups, + output_size=output_size, + data_format=self._data_format, + ) return out diff --git a/python/paddle/nn/layer/distance.py b/python/paddle/nn/layer/distance.py index a7a488c833d7ffaa88634b729938c57ef1c55452..344c1d482d4217003d9a47c2311609213abbd0f5 100644 --- a/python/paddle/nn/layer/distance.py +++ b/python/paddle/nn/layer/distance.py @@ -59,7 +59,7 @@ class PairwiseDistance(Layer): """ - def __init__(self, p=2., epsilon=1e-6, keepdim=False, name=None): + def __init__(self, p=2.0, epsilon=1e-6, keepdim=False, name=None): super(PairwiseDistance, self).__init__() self.p = p self.epsilon = epsilon @@ -68,8 +68,9 @@ class PairwiseDistance(Layer): def forward(self, x, y): - return F.pairwise_distance(x, y, self.p, self.epsilon, self.keepdim, - self.name) + return F.pairwise_distance( + x, y, self.p, self.epsilon, self.keepdim, self.name + ) def extra_repr(self): main_str = 'p={p}' diff --git a/python/paddle/nn/layer/loss.py b/python/paddle/nn/layer/loss.py index 6de2717a0616532d074fdca1b75cf57990b93df7..cd1c8bcb470d39f532752003cac42f95827ed6f6 100644 --- a/python/paddle/nn/layer/loss.py +++ b/python/paddle/nn/layer/loss.py @@ -105,15 +105,14 @@ class BCEWithLogitsLoss(Layer): """ - def __init__(self, - weight=None, - reduction='mean', - pos_weight=None, - name=None): + def __init__( + self, weight=None, reduction='mean', pos_weight=None, name=None + ): if reduction not in ['sum', 'mean', 'none']: raise ValueError( "The value of 'reduction' in BCEWithLogitsLoss should be 'sum', 'mean' or 'none', but " - "received %s, which is not allowed." % reduction) + "received %s, which is not allowed." % reduction + ) super(BCEWithLogitsLoss, self).__init__() self.weight = weight @@ -123,8 +122,13 @@ class BCEWithLogitsLoss(Layer): def forward(self, logit, label): out = paddle.nn.functional.binary_cross_entropy_with_logits( - logit, label, self.weight, self.reduction, self.pos_weight, - self.name) + logit, + label, + self.weight, + self.reduction, + self.pos_weight, + self.name, + ) return out @@ -374,14 +378,16 @@ class CrossEntropyLoss(Layer): """ - def __init__(self, - weight=None, - ignore_index=-100, - reduction='mean', - soft_label=False, - axis=-1, - use_softmax=True, - name=None): + def __init__( + self, + weight=None, + ignore_index=-100, + reduction='mean', + soft_label=False, + axis=-1, + use_softmax=True, + name=None, + ): super(CrossEntropyLoss, self).__init__() self.weight = weight self.reduction = reduction @@ -392,15 +398,17 @@ class CrossEntropyLoss(Layer): self.name = name def forward(self, input, label): - ret = paddle.nn.functional.cross_entropy(input, - label, - weight=self.weight, - ignore_index=self.ignore_index, - reduction=self.reduction, - soft_label=self.soft_label, - axis=self.axis, - use_softmax=self.use_softmax, - name=self.name) + ret = paddle.nn.functional.cross_entropy( + input, + label, + weight=self.weight, + ignore_index=self.ignore_index, + reduction=self.reduction, + soft_label=self.soft_label, + axis=self.axis, + use_softmax=self.use_softmax, + name=self.name, + ) return ret @@ -476,18 +484,21 @@ class HSigmoidLoss(Layer): # [2.34564662]] """ - def __init__(self, - feature_size, - num_classes, - weight_attr=None, - bias_attr=None, - is_custom=False, - is_sparse=False, - name=None): + def __init__( + self, + feature_size, + num_classes, + weight_attr=None, + bias_attr=None, + is_custom=False, + is_sparse=False, + name=None, + ): super(HSigmoidLoss, self).__init__() if (num_classes < 2) and (not is_custom): raise ValueError( - "num_classes must not be less than 2 with default tree") + "num_classes must not be less than 2 with default tree" + ) if (not is_custom) and (is_sparse): print("Sparse mode should not be used without custom tree") @@ -505,29 +516,34 @@ class HSigmoidLoss(Layer): self._dtype = paddle.get_default_dtype() remote_prefetch = is_sparse - print("With sparse mode, if your models has only" - " small parameter prefetch may cause speed down") + print( + "With sparse mode, if your models has only" + " small parameter prefetch may cause speed down" + ) C = self._num_classes if is_custom else self._num_classes - 1 - self.weight = self.create_parameter([C, self._feature_size], - attr=self._weight_attr, - is_bias=False, - dtype=self._dtype) - self.bias = self.create_parameter([C, 1], - attr=self._bias_attr, - is_bias=True, - dtype=self._dtype) + self.weight = self.create_parameter( + [C, self._feature_size], + attr=self._weight_attr, + is_bias=False, + dtype=self._dtype, + ) + self.bias = self.create_parameter( + [C, 1], attr=self._bias_attr, is_bias=True, dtype=self._dtype + ) def forward(self, input, label, path_table=None, path_code=None): - out = F.hsigmoid_loss(input, - label, - self._num_classes, - self.weight, - self.bias, - path_table=path_table, - path_code=path_code, - is_sparse=self._is_sparse, - name=self._name) + out = F.hsigmoid_loss( + input, + label, + self._num_classes, + self.weight, + self.bias, + path_table=path_table, + path_code=path_code, + is_sparse=self._is_sparse, + name=self._name, + ) return out @@ -584,17 +600,18 @@ class MSELoss(Layer): if reduction not in ['sum', 'mean', 'none']: raise ValueError( "'reduction' in 'MSELoss' should be 'sum', 'mean' or 'none', " - "but received {}.".format(reduction)) + "but received {}.".format(reduction) + ) self.reduction = reduction def forward(self, input, label): if not in_dynamic_mode(): - fluid.data_feeder.check_variable_and_dtype(input, 'input', - ['float32', 'float64'], - 'MSELoss') - fluid.data_feeder.check_variable_and_dtype(label, 'label', - ['float32', 'float64'], - 'MSELoss') + fluid.data_feeder.check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'MSELoss' + ) + fluid.data_feeder.check_variable_and_dtype( + label, 'label', ['float32', 'float64'], 'MSELoss' + ) if in_dygraph_mode(): square_out = paddle._C_ops.square(paddle.subtract(input, label)) @@ -676,16 +693,16 @@ class L1Loss(Layer): if reduction not in ['sum', 'mean', 'none']: raise ValueError( "The value of 'reduction' in L1Loss should be 'sum', 'mean' or 'none', but " - "received %s, which is not allowed." % reduction) + "received %s, which is not allowed." % reduction + ) super(L1Loss, self).__init__() self.reduction = reduction self.name = name def forward(self, input, label): - return paddle.nn.functional.l1_loss(input, - label, - self.reduction, - name=self.name) + return paddle.nn.functional.l1_loss( + input, label, self.reduction, name=self.name + ) class BCELoss(Layer): @@ -765,7 +782,8 @@ class BCELoss(Layer): if reduction not in ['sum', 'mean', 'none']: raise ValueError( "The value of 'reduction' in bce_loss should be 'sum', 'mean' or 'none', but " - "received %s, which is not allowed." % reduction) + "received %s, which is not allowed." % reduction + ) super(BCELoss, self).__init__() self.weight = weight @@ -773,10 +791,9 @@ class BCELoss(Layer): self.name = name def forward(self, input, label): - out = paddle.nn.functional.binary_cross_entropy(input, label, - self.weight, - self.reduction, - self.name) + out = paddle.nn.functional.binary_cross_entropy( + input, label, self.weight, self.reduction, self.name + ) return out @@ -866,15 +883,14 @@ class NLLLoss(Layer): """ - def __init__(self, - weight=None, - ignore_index=-100, - reduction='mean', - name=None): + def __init__( + self, weight=None, ignore_index=-100, reduction='mean', name=None + ): if reduction not in ['sum', 'mean', 'none']: raise ValueError( "The value of 'reduction' in nll_loss should be 'sum', 'mean' or " - "'none', but received %s, which is not allowed." % reduction) + "'none', but received %s, which is not allowed." % reduction + ) super(NLLLoss, self).__init__() self._weight = weight self._ignore_index = ignore_index @@ -882,12 +898,14 @@ class NLLLoss(Layer): self._name = name def forward(self, input, label): - return F.nll_loss(input, - label, - weight=self._weight, - ignore_index=self._ignore_index, - reduction=self._reduction, - name=self._name) + return F.nll_loss( + input, + label, + weight=self._weight, + ignore_index=self._ignore_index, + reduction=self._reduction, + name=self._name, + ) class KLDivLoss(Layer): @@ -1019,17 +1037,17 @@ class MarginRankingLoss(Layer): if reduction not in ['sum', 'mean', 'none']: raise ValueError( "The value of 'reduction' in MarginRankingLoss should be 'sum', 'mean' or 'none', but " - "received %s, which is not allowed." % reduction) + "received %s, which is not allowed." % reduction + ) super(MarginRankingLoss, self).__init__() self.margin = margin self.reduction = reduction self.name = name def forward(self, input, other, label): - out = paddle.nn.functional.margin_ranking_loss(input, other, label, - self.margin, - self.reduction, - self.name) + out = paddle.nn.functional.margin_ranking_loss( + input, other, label, self.margin, self.reduction, self.name + ) return out @@ -1113,19 +1131,23 @@ class CTCLoss(Layer): self.blank = blank self.reduction = reduction - def forward(self, - log_probs, - labels, - input_lengths, - label_lengths, - norm_by_times=False): - return paddle.nn.functional.ctc_loss(log_probs, - labels, - input_lengths, - label_lengths, - self.blank, - self.reduction, - norm_by_times=norm_by_times) + def forward( + self, + log_probs, + labels, + input_lengths, + label_lengths, + norm_by_times=False, + ): + return paddle.nn.functional.ctc_loss( + log_probs, + labels, + input_lengths, + label_lengths, + self.blank, + self.reduction, + norm_by_times=norm_by_times, + ) class SmoothL1Loss(Layer): @@ -1192,11 +1214,13 @@ class SmoothL1Loss(Layer): self.name = name def forward(self, input, label): - return F.smooth_l1_loss(input, - label, - reduction=self.reduction, - delta=self.delta, - name=self.name) + return F.smooth_l1_loss( + input, + label, + reduction=self.reduction, + delta=self.delta, + name=self.name, + ) class MultiLabelSoftMarginLoss(Layer): @@ -1264,17 +1288,20 @@ class MultiLabelSoftMarginLoss(Layer): if reduction not in ['sum', 'mean', 'none']: raise ValueError( "'reduction' in 'MultiLabelSoftMarginloss' should be 'sum', 'mean' or 'none', " - "but received {}.".format(reduction)) + "but received {}.".format(reduction) + ) self.weight = weight self.reduction = reduction self.name = name def forward(self, input, label): - return F.multi_label_soft_margin_loss(input, - label, - weight=self.weight, - reduction=self.reduction, - name=self.name) + return F.multi_label_soft_margin_loss( + input, + label, + weight=self.weight, + reduction=self.reduction, + name=self.name, + ) class HingeEmbeddingLoss(Layer): @@ -1364,11 +1391,13 @@ class HingeEmbeddingLoss(Layer): self.name = name def forward(self, input, label): - return F.hinge_embedding_loss(input, - label, - reduction=self.reduction, - margin=self.margin, - name=self.name) + return F.hinge_embedding_loss( + input, + label, + reduction=self.reduction, + margin=self.margin, + name=self.name, + ) class CosineEmbeddingLoss(Layer): @@ -1442,23 +1471,27 @@ class CosineEmbeddingLoss(Layer): if margin > 1 or margin < -1: raise ValueError( "The value of 'margin' should be in the interval of [-1, 1], but received %f, which is not allowed." - % margin) + % margin + ) if reduction not in ['sum', 'mean', 'none']: raise ValueError( "The value of 'reduction' should be 'sum', 'mean' or " - "'none', but received %s, which is not allowed." % reduction) + "'none', but received %s, which is not allowed." % reduction + ) super(CosineEmbeddingLoss, self).__init__() self.margin = margin self.reduction = reduction self.name = name def forward(self, input1, input2, label): - return F.cosine_embedding_loss(input1, - input2, - label, - margin=self.margin, - reduction=self.reduction, - name=self.name) + return F.cosine_embedding_loss( + input1, + input2, + label, + margin=self.margin, + reduction=self.reduction, + name=self.name, + ) class TripletMarginWithDistanceLoss(Layer): @@ -1540,18 +1573,21 @@ class TripletMarginWithDistanceLoss(Layer): """ - def __init__(self, - distance_function=None, - margin=1.0, - swap=False, - reduction: str = 'mean', - name=None): + def __init__( + self, + distance_function=None, + margin=1.0, + swap=False, + reduction: str = 'mean', + name=None, + ): super(TripletMarginWithDistanceLoss, self).__init__() if reduction not in ['sum', 'mean', 'none']: raise ValueError( "The value of 'reduction' in TripletMarginWithDistanceLoss " "should be 'sum', 'mean' or 'none', but " - "received %s, which is not allowed." % reduction) + "received %s, which is not allowed." % reduction + ) self.margin = margin self.swap = swap self.reduction = reduction @@ -1559,13 +1595,15 @@ class TripletMarginWithDistanceLoss(Layer): self.name = name def forward(self, input, positive, negative): - return F.triplet_margin_with_distance_loss(input, - positive, - negative, - margin=self.margin, - swap=self.swap, - reduction=self.reduction, - name=self.name) + return F.triplet_margin_with_distance_loss( + input, + positive, + negative, + margin=self.margin, + swap=self.swap, + reduction=self.reduction, + name=self.name, + ) class TripletMarginLoss(Layer): @@ -1643,18 +1681,21 @@ class TripletMarginLoss(Layer): """ - def __init__(self, - margin=1.0, - p=2., - epsilon=1e-6, - swap=False, - reduction='mean', - name=None): + def __init__( + self, + margin=1.0, + p=2.0, + epsilon=1e-6, + swap=False, + reduction='mean', + name=None, + ): super(TripletMarginLoss, self).__init__() if reduction not in ['sum', 'mean', 'none']: raise ValueError( "The value of 'reduction' in TripletMarginLoss should be 'sum', 'mean' or 'none', but " - "received %s, which is not allowed." % reduction) + "received %s, which is not allowed." % reduction + ) self.margin = margin self.p = p self.epsilon = epsilon @@ -1663,98 +1704,103 @@ class TripletMarginLoss(Layer): self.name = name def forward(self, input, positive, negative): - return F.triplet_margin_loss(input, - positive, - negative, - margin=self.margin, - p=self.p, - epsilon=self.epsilon, - swap=self.swap, - reduction=self.reduction, - name=self.name) + return F.triplet_margin_loss( + input, + positive, + negative, + margin=self.margin, + p=self.p, + epsilon=self.epsilon, + swap=self.swap, + reduction=self.reduction, + name=self.name, + ) class MultiMarginLoss(Layer): r"""Creates a criterion that optimizes a multi-class classification hinge loss (margin-based loss) between - input :math:`input` and label :math:`label`: + input :math:`input` and label :math:`label`: - For i-th mini-batch sample, the loss in terms of the 1D input :math:`input_i` and scalar - output :math:`label_i` is: + For i-th mini-batch sample, the loss in terms of the 1D input :math:`input_i` and scalar + output :math:`label_i` is: - .. math:: - \text{loss}(input_i, label_i) = \frac{\sum_{j} \max(0, \text{margin} - input_i[label_i] + input_i[j])^p}{\text{C}} + .. math:: + \text{loss}(input_i, label_i) = \frac{\sum_{j} \max(0, \text{margin} - input_i[label_i] + input_i[j])^p}{\text{C}} - where :math:`0 \leq j \leq \text{C}-1`, :math:`0 \leq i \leq \text{N}-1` and :math:`j \neq label_i`. + where :math:`0 \leq j \leq \text{C}-1`, :math:`0 \leq i \leq \text{N}-1` and :math:`j \neq label_i`. - Optionally, you can give non-equal weighting on the classes by passing - a 1D :attr:`weight` tensor into the constructor. + Optionally, you can give non-equal weighting on the classes by passing + a 1D :attr:`weight` tensor into the constructor. - The loss function for i-th sample then becomes: + The loss function for i-th sample then becomes: - .. math:: - \text{loss}(input_i, label_i) = \frac{\sum_{j} \max(0, weight[label_i] * (\text{margin} - input_i[label_i] + input_i[j]))^p}{\text{C}} + .. math:: + \text{loss}(input_i, label_i) = \frac{\sum_{j} \max(0, weight[label_i] * (\text{margin} - input_i[label_i] + input_i[j]))^p}{\text{C}} - Parameters: + Parameters: - p (int, Optional):The norm degree for pairwise distance. Default: :math:`1`. + p (int, Optional):The norm degree for pairwise distance. Default: :math:`1`. - margin (float, Optional):Default: :math:`1`. + margin (float, Optional):Default: :math:`1`. - weight (Tensor,optional): a manual rescaling weight given to each class. - If given, has to be a Tensor of shape (C,) and the data type is float32, float64. - Default is ``'None'`` . + weight (Tensor,optional): a manual rescaling weight given to each class. + If given, has to be a Tensor of shape (C,) and the data type is float32, float64. + Default is ``'None'`` . - reduction (str, optional): Indicate how to calculate the loss by batch_size, - the candidates are ``'none'`` | ``'mean'`` | ``'sum'``. - If :attr:`reduction` is ``'none'``, the unreduced loss is returned; - If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned; - If :attr:`reduction` is ``'sum'``, the summed loss is returned. - Default: ``'mean'`` + reduction (str, optional): Indicate how to calculate the loss by batch_size, + the candidates are ``'none'`` | ``'mean'`` | ``'sum'``. + If :attr:`reduction` is ``'none'``, the unreduced loss is returned; + If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned; + If :attr:`reduction` is ``'sum'``, the summed loss is returned. + Default: ``'mean'`` - name (str, optional): Name for the operation (optional, default is None). - For more information, please refer to :ref:`api_guide_Name`. + name (str, optional): Name for the operation (optional, default is None). + For more information, please refer to :ref:`api_guide_Name`. - Call parameters: - input (Tensor): Input tensor, the data type is float32 or float64. + Call parameters: + input (Tensor): Input tensor, the data type is float32 or float64. - label (Tensor): Label tensor, 0<= label < input.shape[1], the data type is int32 or int64. + label (Tensor): Label tensor, 0<= label < input.shape[1], the data type is int32 or int64. - Shape: - input: 2-D Tensor, the shape is [N, C], N is batch size and `C` means number of classes. + Shape: + input: 2-D Tensor, the shape is [N, C], N is batch size and `C` means number of classes. - label: 1-D Tensor, the shape is [N,]. + label: 1-D Tensor, the shape is [N,]. - output: scalar. If :attr:`reduction` is ``'none'``, then same shape as the label. + output: scalar. If :attr:`reduction` is ``'none'``, then same shape as the label. - Returns: - A callable object of MultiMarginLoss. + Returns: + A callable object of MultiMarginLoss. - Examples: - .. code-block:: python + Examples: + .. code-block:: python - import paddle - import paddle.nn as nn + import paddle + import paddle.nn as nn - input = paddle.to_tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]], dtype=paddle.float32) - label = paddle.to_tensor([0, 1, 2], dtype=paddle.int32) + input = paddle.to_tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]], dtype=paddle.float32) + label = paddle.to_tensor([0, 1, 2], dtype=paddle.int32) - multi_margin_loss = nn.MultiMarginLoss(reduction='mean') - loss = multi_margin_loss(input, label) - print(loss) - """ + multi_margin_loss = nn.MultiMarginLoss(reduction='mean') + loss = multi_margin_loss(input, label) + print(loss) + """ - def __init__(self, - p: int = 1, - margin: float = 1.0, - weight=None, - reduction="mean", - name=None): + def __init__( + self, + p: int = 1, + margin: float = 1.0, + weight=None, + reduction="mean", + name=None, + ): super(MultiMarginLoss, self).__init__() if reduction not in ['sum', 'mean', 'none']: raise ValueError( "'reduction' in 'MultiMarginLoss' should be 'sum', 'mean' or 'none', " - "but received {}.".format(reduction)) + "but received {}.".format(reduction) + ) self.p = p self.margin = margin self.weight = weight @@ -1762,13 +1808,15 @@ class MultiMarginLoss(Layer): self.name = name def forward(self, input, label): - return F.multi_margin_loss(input, - label, - p=self.p, - margin=self.margin, - weight=self.weight, - reduction=self.reduction, - name=self.name) + return F.multi_margin_loss( + input, + label, + p=self.p, + margin=self.margin, + weight=self.weight, + reduction=self.reduction, + name=self.name, + ) class SoftMarginLoss(Layer): @@ -1830,13 +1878,15 @@ class SoftMarginLoss(Layer): if reduction not in ['sum', 'mean', 'none']: raise ValueError( "The value of 'reduction' in SoftMarginLoss should be 'sum', 'mean' or 'none', but " - "received %s, which is not allowed." % reduction) + "received %s, which is not allowed." % reduction + ) super(SoftMarginLoss, self).__init__() self.reduction = reduction self.name = name def forward(self, input, label): - out = paddle.nn.functional.soft_margin_loss(input, label, - self.reduction, self.name) + out = paddle.nn.functional.soft_margin_loss( + input, label, self.reduction, self.name + ) return out diff --git a/python/paddle/nn/layer/norm.py b/python/paddle/nn/layer/norm.py index d011bfd34aa8a95ead0cb25c916314b2606e4b39..1fca251c57007a410a25d12b996c7bdcd37b27c9 100644 --- a/python/paddle/nn/layer/norm.py +++ b/python/paddle/nn/layer/norm.py @@ -59,18 +59,22 @@ class _InstanceNormBase(Layer): See InstaceNorm1D, InstanceNorm2D or InstanceNorm3D for more details. """ - def __init__(self, - num_features, - epsilon=1e-5, - momentum=0.9, - weight_attr=None, - bias_attr=None, - data_format="NCHW", - name=None): + def __init__( + self, + num_features, + epsilon=1e-5, + momentum=0.9, + weight_attr=None, + bias_attr=None, + data_format="NCHW", + name=None, + ): super(_InstanceNormBase, self).__init__() if weight_attr == False or bias_attr == False: - assert weight_attr == bias_attr, "weight_attr and bias_attr must be set to Fasle at the same time in InstanceNorm" + assert ( + weight_attr == bias_attr + ), "weight_attr and bias_attr must be set to Fasle at the same time in InstanceNorm" self._epsilon = epsilon self._weight_attr = weight_attr self._bias_attr = bias_attr @@ -81,11 +85,14 @@ class _InstanceNormBase(Layer): attr=self._weight_attr, shape=[num_features], default_initializer=Constant(1.0), - is_bias=False) - self.bias = self.create_parameter(attr=self._bias_attr, - shape=[num_features], - default_initializer=Constant(0.0), - is_bias=True) + is_bias=False, + ) + self.bias = self.create_parameter( + attr=self._bias_attr, + shape=[num_features], + default_initializer=Constant(0.0), + is_bias=True, + ) else: self.scale = None self.bias = None @@ -96,14 +103,14 @@ class _InstanceNormBase(Layer): def forward(self, input): self._check_input_dim(input) - return instance_norm(input, - weight=self.scale, - bias=self.bias, - eps=self._epsilon) + return instance_norm( + input, weight=self.scale, bias=self.bias, eps=self._epsilon + ) def extra_repr(self): - return 'num_features={}, epsilon={}'.format(self._num_features, - self._epsilon) + return 'num_features={}, epsilon={}'.format( + self._num_features, self._epsilon + ) class InstanceNorm1D(_InstanceNormBase): @@ -169,8 +176,11 @@ Where `H` means height of feature map, `W` means width of feature map. def _check_input_dim(self, input): if len(input.shape) != 2 and len(input.shape) != 3: - raise ValueError('expected 2D or 3D input (got {}D input)'.format( - len(input.shape))) + raise ValueError( + 'expected 2D or 3D input (got {}D input)'.format( + len(input.shape) + ) + ) class InstanceNorm2D(_InstanceNormBase): @@ -235,8 +245,9 @@ Where `H` means height of feature map, `W` means width of feature map. def _check_input_dim(self, input): if len(input.shape) != 4: - raise ValueError('expected 4D input (got {}D input)'.format( - len(input.shape))) + raise ValueError( + 'expected 4D input (got {}D input)'.format(len(input.shape)) + ) class InstanceNorm3D(_InstanceNormBase): @@ -301,8 +312,9 @@ Where `H` means height of feature map, `W` means width of feature map. def _check_input_dim(self, input): if len(input.shape) != 5: - raise ValueError('expected 5D input (got {}D input)'.format( - len(input.shape))) + raise ValueError( + 'expected 5D input (got {}D input)'.format(len(input.shape)) + ) class GroupNorm(Layer): @@ -349,14 +361,16 @@ class GroupNorm(Layer): print(group_norm_out.numpy()) """ - def __init__(self, - num_groups, - num_channels, - epsilon=1e-05, - weight_attr=None, - bias_attr=None, - data_format='NCHW', - name=None): + def __init__( + self, + num_groups, + num_channels, + epsilon=1e-05, + weight_attr=None, + bias_attr=None, + data_format='NCHW', + name=None, + ): super(GroupNorm, self).__init__() self._weight_attr = weight_attr self._bias_attr = bias_attr @@ -370,39 +384,57 @@ class GroupNorm(Layer): if weight_attr == False: self.weight = self.create_parameter( - attr=None, shape=param_shape, default_initializer=Constant(1.0)) + attr=None, shape=param_shape, default_initializer=Constant(1.0) + ) self.weight.stop_gradient = True else: self.weight = self.create_parameter( attr=self._weight_attr, shape=param_shape, - default_initializer=Constant(1.0)) - self.weight.stop_gradient = self._weight_attr != None and self._weight_attr.learning_rate == 0. + default_initializer=Constant(1.0), + ) + self.weight.stop_gradient = ( + self._weight_attr != None + and self._weight_attr.learning_rate == 0.0 + ) if bias_attr == False: - self.bias = self.create_parameter(attr=None, - shape=param_shape, - default_initializer=Constant(0.0), - is_bias=True) + self.bias = self.create_parameter( + attr=None, + shape=param_shape, + default_initializer=Constant(0.0), + is_bias=True, + ) self.bias.stop_gradient = True else: - self.bias = self.create_parameter(attr=self._bias_attr, - shape=param_shape, - is_bias=True) - self.bias.stop_gradient = self._bias_attr != None and self._bias_attr.learning_rate == 0. + self.bias = self.create_parameter( + attr=self._bias_attr, shape=param_shape, is_bias=True + ) + self.bias.stop_gradient = ( + self._bias_attr != None and self._bias_attr.learning_rate == 0.0 + ) def forward(self, input): mean_out = self._helper.create_variable_for_type_inference( - dtype=input.dtype, stop_gradient=True) + dtype=input.dtype, stop_gradient=True + ) variance_out = self._helper.create_variable_for_type_inference( - dtype=input.dtype, stop_gradient=True) + dtype=input.dtype, stop_gradient=True + ) if in_dygraph_mode(): - pre_act = _C_ops.group_norm(input, self.weight, self.bias, - self._epsilon, self._num_groups, "NCHW") + pre_act = _C_ops.group_norm( + input, + self.weight, + self.bias, + self._epsilon, + self._num_groups, + "NCHW", + ) - return dygraph_utils._append_activation_in_dygraph(pre_act, - act=None) + return dygraph_utils._append_activation_in_dygraph( + pre_act, act=None + ) elif _in_legacy_dygraph(): pre_act, _, _ = _legacy_C_ops.group_norm( @@ -416,8 +448,9 @@ class GroupNorm(Layer): 'groups', self._num_groups, ) - return dygraph_utils._append_activation_in_dygraph(pre_act, - act=None) + return dygraph_utils._append_activation_in_dygraph( + pre_act, act=None + ) inputs = {'X': input} if self.bias is not None: @@ -427,25 +460,26 @@ class GroupNorm(Layer): # create output group_norm_out = self._helper.create_variable_for_type_inference( - dtype=input.dtype) - - self._helper.append_op(type="group_norm", - inputs=inputs, - outputs={ - "Y": group_norm_out, - "Mean": mean_out, - "Variance": variance_out, - }, - attrs={ - "epsilon": self._epsilon, - "groups": self._num_groups - }) + dtype=input.dtype + ) + + self._helper.append_op( + type="group_norm", + inputs=inputs, + outputs={ + "Y": group_norm_out, + "Mean": mean_out, + "Variance": variance_out, + }, + attrs={"epsilon": self._epsilon, "groups": self._num_groups}, + ) return self._helper.append_activation(group_norm_out, None) def extra_repr(self): return 'num_groups={}, num_channels={}, epsilon={}'.format( - self._num_groups, self._num_channels, self._epsilon) + self._num_groups, self._num_channels, self._epsilon + ) class LayerNorm(Layer): @@ -506,12 +540,14 @@ class LayerNorm(Layer): print(layer_norm_out) """ - def __init__(self, - normalized_shape, - epsilon=1e-05, - weight_attr=None, - bias_attr=None, - name=None): + def __init__( + self, + normalized_shape, + epsilon=1e-05, + weight_attr=None, + bias_attr=None, + name=None, + ): super(LayerNorm, self).__init__() if isinstance(normalized_shape, numbers.Integral): normalized_shape = [normalized_shape] @@ -528,25 +564,29 @@ class LayerNorm(Layer): self.weight = self.create_parameter( attr=self._weight_attr, shape=param_shape, - default_initializer=Constant(1.0)) + default_initializer=Constant(1.0), + ) if bias_attr is False: self.bias = None else: - self.bias = self.create_parameter(attr=self._bias_attr, - shape=param_shape, - is_bias=True) + self.bias = self.create_parameter( + attr=self._bias_attr, shape=param_shape, is_bias=True + ) def forward(self, input): - return layer_norm(input, - normalized_shape=self._normalized_shape, - weight=self.weight, - bias=self.bias, - epsilon=self._epsilon) + return layer_norm( + input, + normalized_shape=self._normalized_shape, + weight=self.weight, + bias=self.bias, + epsilon=self._epsilon, + ) def extra_repr(self): - return 'normalized_shape={}, epsilon={}'.format(self._normalized_shape, - self._epsilon) + return 'normalized_shape={}, epsilon={}'.format( + self._normalized_shape, self._epsilon + ) class _BatchNormBase(Layer): @@ -554,15 +594,17 @@ class _BatchNormBase(Layer): BatchNorm base . """ - def __init__(self, - num_features, - momentum=0.9, - epsilon=1e-05, - weight_attr=None, - bias_attr=None, - data_format='NCHW', - use_global_stats=None, - name=None): + def __init__( + self, + num_features, + momentum=0.9, + epsilon=1e-05, + weight_attr=None, + bias_attr=None, + data_format='NCHW', + use_global_stats=None, + name=None, + ): super(_BatchNormBase, self).__init__() self._num_features = num_features self._weight_attr = weight_attr @@ -582,29 +624,40 @@ class _BatchNormBase(Layer): attr=None, shape=param_shape, dtype=self._dtype, - default_initializer=Constant(1.0)) + default_initializer=Constant(1.0), + ) self.weight.stop_gradient = True else: self.weight = self.create_parameter( attr=self._weight_attr, shape=param_shape, dtype=self._dtype, - default_initializer=Constant(1.0)) - self.weight.stop_gradient = self._weight_attr != None and self._weight_attr.learning_rate == 0. + default_initializer=Constant(1.0), + ) + self.weight.stop_gradient = ( + self._weight_attr != None + and self._weight_attr.learning_rate == 0.0 + ) if bias_attr == False: - self.bias = self.create_parameter(attr=None, - shape=param_shape, - dtype=self._dtype, - default_initializer=Constant(0.0), - is_bias=True) + self.bias = self.create_parameter( + attr=None, + shape=param_shape, + dtype=self._dtype, + default_initializer=Constant(0.0), + is_bias=True, + ) self.bias.stop_gradient = True else: - self.bias = self.create_parameter(attr=self._bias_attr, - shape=param_shape, - dtype=self._dtype, - is_bias=True) - self.bias.stop_gradient = self._bias_attr != None and self._bias_attr.learning_rate == 0. + self.bias = self.create_parameter( + attr=self._bias_attr, + shape=param_shape, + dtype=self._dtype, + is_bias=True, + ) + self.bias.stop_gradient = ( + self._bias_attr != None and self._bias_attr.learning_rate == 0.0 + ) moving_mean_name = None moving_variance_name = None @@ -613,22 +666,28 @@ class _BatchNormBase(Layer): moving_mean_name = name + "_mean" moving_variance_name = name + "_variance" - self._mean = self.create_parameter(dtype=self._dtype, - attr=ParamAttr( - name=moving_mean_name, - initializer=Constant(0.0), - trainable=False, - do_model_average=True), - shape=param_shape) + self._mean = self.create_parameter( + dtype=self._dtype, + attr=ParamAttr( + name=moving_mean_name, + initializer=Constant(0.0), + trainable=False, + do_model_average=True, + ), + shape=param_shape, + ) self._mean.stop_gradient = True - self._variance = self.create_parameter(dtype=self._dtype, - attr=ParamAttr( - name=moving_variance_name, - initializer=Constant(1.0), - trainable=False, - do_model_average=True), - shape=param_shape) + self._variance = self.create_parameter( + dtype=self._dtype, + attr=ParamAttr( + name=moving_variance_name, + initializer=Constant(1.0), + trainable=False, + do_model_average=True, + ), + shape=param_shape, + ) self._variance.stop_gradient = True self._data_format = data_format @@ -652,22 +711,26 @@ class _BatchNormBase(Layer): if self.training: warnings.warn( - "When training, we now always track global mean and variance.") - - return batch_norm(input, - self._mean, - self._variance, - weight=self.weight, - bias=self.bias, - training=self.training, - momentum=self._momentum, - epsilon=self._epsilon, - data_format=self._data_format, - use_global_stats=self._use_global_stats) + "When training, we now always track global mean and variance." + ) + + return batch_norm( + input, + self._mean, + self._variance, + weight=self.weight, + bias=self.bias, + training=self.training, + momentum=self._momentum, + epsilon=self._epsilon, + data_format=self._data_format, + use_global_stats=self._use_global_stats, + ) def extra_repr(self): main_str = 'num_features={}, momentum={}, epsilon={}'.format( - self._num_features, self._momentum, self._epsilon) + self._num_features, self._momentum, self._epsilon + ) if self._data_format != 'NCHW': main_str += ', data_format={}'.format(self._data_format) if self._name is not None: @@ -747,18 +810,27 @@ class BatchNorm1D(_BatchNormBase): print(batch_norm_out) """ - def __init__(self, - num_features, - momentum=0.9, - epsilon=1e-05, - weight_attr=None, - bias_attr=None, - data_format='NCL', - use_global_stats=None, - name=None): - super(BatchNorm1D, - self).__init__(num_features, momentum, epsilon, weight_attr, - bias_attr, data_format, use_global_stats, name) + def __init__( + self, + num_features, + momentum=0.9, + epsilon=1e-05, + weight_attr=None, + bias_attr=None, + data_format='NCL', + use_global_stats=None, + name=None, + ): + super(BatchNorm1D, self).__init__( + num_features, + momentum, + epsilon, + weight_attr, + bias_attr, + data_format, + use_global_stats, + name, + ) def _check_data_format(self, input): if input == 'NCHW' or input == 'NC' or input == 'NCL': @@ -767,12 +839,16 @@ class BatchNorm1D(_BatchNormBase): self._data_format = "NHWC" else: raise ValueError( - 'expected NC , NCL, NLC or None for data_format input') + 'expected NC , NCL, NLC or None for data_format input' + ) def _check_input_dim(self, input): if len(input.shape) != 2 and len(input.shape) != 3: - raise ValueError('expected 2D or 3D input (got {}D input)'.format( - len(input.shape))) + raise ValueError( + 'expected 2D or 3D input (got {}D input)'.format( + len(input.shape) + ) + ) class BatchNorm2D(_BatchNormBase): @@ -856,8 +932,9 @@ class BatchNorm2D(_BatchNormBase): def _check_input_dim(self, input): if len(input.shape) != 4: - raise ValueError('expected 4D input (got {}D input)'.format( - len(input.shape))) + raise ValueError( + 'expected 4D input (got {}D input)'.format(len(input.shape)) + ) class BatchNorm3D(_BatchNormBase): @@ -931,18 +1008,27 @@ class BatchNorm3D(_BatchNormBase): print(batch_norm_out) """ - def __init__(self, - num_features, - momentum=0.9, - epsilon=1e-05, - weight_attr=None, - bias_attr=None, - data_format='NCDHW', - use_global_stats=None, - name=None): - super(BatchNorm3D, - self).__init__(num_features, momentum, epsilon, weight_attr, - bias_attr, data_format, use_global_stats, name) + def __init__( + self, + num_features, + momentum=0.9, + epsilon=1e-05, + weight_attr=None, + bias_attr=None, + data_format='NCDHW', + use_global_stats=None, + name=None, + ): + super(BatchNorm3D, self).__init__( + num_features, + momentum, + epsilon, + weight_attr, + bias_attr, + data_format, + use_global_stats, + name, + ) def _check_data_format(self, input): if input == 'NCHW' or input == 'NCDHW': @@ -951,12 +1037,14 @@ class BatchNorm3D(_BatchNormBase): self._data_format = 'NHWC' else: raise ValueError( - 'expected NCDHW, NDHWC or None for data_format input') + 'expected NCDHW, NDHWC or None for data_format input' + ) def _check_input_dim(self, input): if len(input.shape) != 5: - raise ValueError('expected 5D input (got {}D input)'.format( - len(input.shape))) + raise ValueError( + 'expected 5D input (got {}D input)'.format(len(input.shape)) + ) class SyncBatchNorm(_BatchNormBase): @@ -1046,17 +1134,26 @@ class SyncBatchNorm(_BatchNormBase): # [[[[0.26824948, 1.0936325],[0.26824948, -1.6301316]],[[ 0.8095662, -0.665287],[-1.2744656, 1.1301866 ]]]] """ - def __init__(self, - num_features, - momentum=0.9, - epsilon=1e-05, - weight_attr=None, - bias_attr=None, - data_format='NCHW', - name=None): - super(SyncBatchNorm, - self).__init__(num_features, momentum, epsilon, weight_attr, - bias_attr, data_format, None, name) + def __init__( + self, + num_features, + momentum=0.9, + epsilon=1e-05, + weight_attr=None, + bias_attr=None, + data_format='NCHW', + name=None, + ): + super(SyncBatchNorm, self).__init__( + num_features, + momentum, + epsilon, + weight_attr, + bias_attr, + data_format, + None, + name, + ) def _check_data_format(self): if self._data_format in ['NCHW', 'NCDHW', 'NC', 'NCL']: @@ -1080,24 +1177,55 @@ class SyncBatchNorm(_BatchNormBase): ### use_global_stats only support False in sync_batch_norm if in_dygraph_mode(): sync_batch_norm_out, _, _, _, _, _ = _C_ops.sync_batch_norm_( - x, self.weight, self.bias, self._mean, self._variance, - self._momentum, self._epsilon, self._data_format, - not self.training, False, False, False) + x, + self.weight, + self.bias, + self._mean, + self._variance, + self._momentum, + self._epsilon, + self._data_format, + not self.training, + False, + False, + False, + ) return sync_batch_norm_out elif in_dynamic_mode(): - attrs = ("momentum", self._momentum, "epsilon", self._epsilon, - "is_test", not self.training, "data_layout", - self._data_format, "use_mkldnn", False, "fuse_with_relu", - False, "use_global_stats", False, 'trainable_statistics', - False) + attrs = ( + "momentum", + self._momentum, + "epsilon", + self._epsilon, + "is_test", + not self.training, + "data_layout", + self._data_format, + "use_mkldnn", + False, + "fuse_with_relu", + False, + "use_global_stats", + False, + 'trainable_statistics', + False, + ) sync_batch_norm_out, _, _, _, _, _ = _legacy_C_ops.sync_batch_norm( - x, self.weight, self.bias, self._mean, self._variance, mean_out, - variance_out, *attrs) + x, + self.weight, + self.bias, + self._mean, + self._variance, + mean_out, + variance_out, + *attrs + ) return sync_batch_norm_out - check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'], - 'SyncBatchNorm') + check_variable_and_dtype( + x, 'input', ['float16', 'float32', 'float64'], 'SyncBatchNorm' + ) attrs = { "momentum": self._momentum, @@ -1115,28 +1243,30 @@ class SyncBatchNorm(_BatchNormBase): "Scale": [self.weight], "Bias": [self.bias], "Mean": [self._mean], - "Variance": [self._variance] + "Variance": [self._variance], } saved_mean = self._helper.create_variable_for_type_inference( - dtype=self._dtype, stop_gradient=True) + dtype=self._dtype, stop_gradient=True + ) saved_variance = self._helper.create_variable_for_type_inference( - dtype=self._dtype, stop_gradient=True) + dtype=self._dtype, stop_gradient=True + ) sync_batch_norm_out = self._helper.create_variable_for_type_inference( - self._dtype) + self._dtype + ) outputs = { "Y": [sync_batch_norm_out], "MeanOut": [mean_out], "VarianceOut": [variance_out], "SavedMean": [saved_mean], - "SavedVariance": [saved_variance] + "SavedVariance": [saved_variance], } - self._helper.append_op(type="sync_batch_norm", - inputs=inputs, - outputs=outputs, - attrs=attrs) + self._helper.append_op( + type="sync_batch_norm", inputs=inputs, outputs=outputs, attrs=attrs + ) return sync_batch_norm_out @classmethod @@ -1162,18 +1292,28 @@ class SyncBatchNorm(_BatchNormBase): """ layer_output = layer if isinstance(layer, _BatchNormBase): - if layer._weight_attr != None and not isinstance( - layer._weight_attr, - bool) and layer._weight_attr.name != None: + if ( + layer._weight_attr != None + and not isinstance(layer._weight_attr, bool) + and layer._weight_attr.name != None + ): layer._weight_attr.name = layer._weight_attr.name + '_sync' - if layer._bias_attr != None and not isinstance( - layer._bias_attr, bool) and layer._bias_attr.name != None: + if ( + layer._bias_attr != None + and not isinstance(layer._bias_attr, bool) + and layer._bias_attr.name != None + ): layer._bias_attr.name = layer._bias_attr.name + '_sync' - layer_output = SyncBatchNorm(layer._num_features, layer._momentum, - layer._epsilon, layer._weight_attr, - layer._bias_attr, layer._data_format, - layer._name) + layer_output = SyncBatchNorm( + layer._num_features, + layer._momentum, + layer._epsilon, + layer._weight_attr, + layer._bias_attr, + layer._data_format, + layer._name, + ) if layer._weight_attr != False and layer._bias_attr != False: with no_grad(): @@ -1183,58 +1323,61 @@ class SyncBatchNorm(_BatchNormBase): layer_output._variance = layer._variance for name, sublayer in layer.named_children(): - layer_output.add_sublayer(name, - cls.convert_sync_batchnorm(sublayer)) + layer_output.add_sublayer( + name, cls.convert_sync_batchnorm(sublayer) + ) del layer return layer_output class LocalResponseNorm(Layer): """ - Local Response Normalization performs a type of "lateral inhibition" by normalizing over local input regions. - For more information, please refer to `ImageNet Classification with Deep Convolutional Neural Networks `_ + Local Response Normalization performs a type of "lateral inhibition" by normalizing over local input regions. + For more information, please refer to `ImageNet Classification with Deep Convolutional Neural Networks `_ - See more details in :ref:`api_paddle_nn_functional_local_response_norm` . + See more details in :ref:`api_paddle_nn_functional_local_response_norm` . - Parameters: - size (int): The number of channels to sum over. - alpha (float, optional): The scaling parameter, positive. Default:1e-4 - beta (float, optional): The exponent, positive. Default:0.75 - k (float, optional): An offset, positive. Default: 1.0 - data_format (str, optional): Specify the data format of the input, and the data format of the output - will be consistent with that of the input. An optional string from: - If input is 3-D Tensor, the string could be `"NCL"` or `"NLC"` . When it is `"NCL"`, - the data is stored in the order of: `[batch_size, input_channels, feature_length]`. - If input is 4-D Tensor, the string could be `"NCHW"`, `"NHWC"`. When it is `"NCHW"`, - the data is stored in the order of: `[batch_size, input_channels, input_height, input_width]`. - If input is 5-D Tensor, the string could be `"NCDHW"`, `"NDHWC"` . When it is `"NCDHW"`, - the data is stored in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`. - name (str, optional): Name for the operation (optional, default is None). For more information, - please refer to :ref:`api_guide_Name`. - - Shape: - - input: 3-D/4-D/5-D tensor. - - output: 3-D/4-D/5-D tensor, the same shape as input. + Parameters: + size (int): The number of channels to sum over. + alpha (float, optional): The scaling parameter, positive. Default:1e-4 + beta (float, optional): The exponent, positive. Default:0.75 + k (float, optional): An offset, positive. Default: 1.0 + data_format (str, optional): Specify the data format of the input, and the data format of the output + will be consistent with that of the input. An optional string from: + If input is 3-D Tensor, the string could be `"NCL"` or `"NLC"` . When it is `"NCL"`, + the data is stored in the order of: `[batch_size, input_channels, feature_length]`. + If input is 4-D Tensor, the string could be `"NCHW"`, `"NHWC"`. When it is `"NCHW"`, + the data is stored in the order of: `[batch_size, input_channels, input_height, input_width]`. + If input is 5-D Tensor, the string could be `"NCDHW"`, `"NDHWC"` . When it is `"NCDHW"`, + the data is stored in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`. + name (str, optional): Name for the operation (optional, default is None). For more information, + please refer to :ref:`api_guide_Name`. - Examples: + Shape: + - input: 3-D/4-D/5-D tensor. + - output: 3-D/4-D/5-D tensor, the same shape as input. - .. code-block:: python + Examples: - import paddle + .. code-block:: python - x = paddle.rand(shape=(3, 3, 112, 112), dtype="float32") - m = paddle.nn.LocalResponseNorm(size=5) - y = m(x) - print(y.shape) # [3, 3, 112, 112] - """ + import paddle + + x = paddle.rand(shape=(3, 3, 112, 112), dtype="float32") + m = paddle.nn.LocalResponseNorm(size=5) + y = m(x) + print(y.shape) # [3, 3, 112, 112] + """ - def __init__(self, - size, - alpha=0.0001, - beta=0.75, - k=1.0, - data_format="NCHW", - name=None): + def __init__( + self, + size, + alpha=0.0001, + beta=0.75, + k=1.0, + data_format="NCHW", + name=None, + ): super(LocalResponseNorm, self).__init__() self.size = size self.alpha = alpha @@ -1244,13 +1387,21 @@ class LocalResponseNorm(Layer): self.name = name def forward(self, input): - out = F.local_response_norm(input, self.size, self.alpha, self.beta, - self.k, self.data_format, self.name) + out = F.local_response_norm( + input, + self.size, + self.alpha, + self.beta, + self.k, + self.data_format, + self.name, + ) return out def extra_repr(self): main_str = 'size={}, alpha={}, beta={}, k={}'.format( - self.size, self.alpha, self.beta, self.k) + self.size, self.alpha, self.beta, self.k + ) if self.data_format != 'NCHW': main_str += ', data_format={}'.format(self.data_format) if self.name is not None: diff --git a/python/paddle/nn/layer/pooling.py b/python/paddle/nn/layer/pooling.py index 3e47275410d09be16903d05c0be70b95edea32b1..a9b5af5199faf0d9af79ca5bceb6d5a8554bd861 100755 --- a/python/paddle/nn/layer/pooling.py +++ b/python/paddle/nn/layer/pooling.py @@ -76,13 +76,15 @@ class AvgPool1D(Layer): """ - def __init__(self, - kernel_size, - stride=None, - padding=0, - exclusive=True, - ceil_mode=False, - name=None): + def __init__( + self, + kernel_size, + stride=None, + padding=0, + exclusive=True, + ceil_mode=False, + name=None, + ): super(AvgPool1D, self).__init__() self.kernel_size = kernel_size self.stride = stride @@ -92,13 +94,21 @@ class AvgPool1D(Layer): self.name = name def forward(self, x): - out = F.avg_pool1d(x, self.kernel_size, self.stride, self.padding, - self.exclusive, self.ceil_mode, self.name) + out = F.avg_pool1d( + x, + self.kernel_size, + self.stride, + self.padding, + self.exclusive, + self.ceil_mode, + self.name, + ) return out def extra_repr(self): return 'kernel_size={kernel_size}, stride={stride}, padding={padding}'.format( - **self.__dict__) + **self.__dict__ + ) class AvgPool2D(Layer): @@ -173,15 +183,17 @@ class AvgPool2D(Layer): """ - def __init__(self, - kernel_size, - stride=None, - padding=0, - ceil_mode=False, - exclusive=True, - divisor_override=None, - data_format="NCHW", - name=None): + def __init__( + self, + kernel_size, + stride=None, + padding=0, + ceil_mode=False, + exclusive=True, + divisor_override=None, + data_format="NCHW", + name=None, + ): super(AvgPool2D, self).__init__() self.ksize = kernel_size self.stride = stride @@ -193,19 +205,22 @@ class AvgPool2D(Layer): self.name = name def forward(self, x): - return F.avg_pool2d(x, - kernel_size=self.ksize, - stride=self.stride, - padding=self.padding, - ceil_mode=self.ceil_mode, - exclusive=self.exclusive, - divisor_override=self.divisor, - data_format=self.data_format, - name=self.name) + return F.avg_pool2d( + x, + kernel_size=self.ksize, + stride=self.stride, + padding=self.padding, + ceil_mode=self.ceil_mode, + exclusive=self.exclusive, + divisor_override=self.divisor, + data_format=self.data_format, + name=self.name, + ) def extra_repr(self): return 'kernel_size={ksize}, stride={stride}, padding={padding}'.format( - **self.__dict__) + **self.__dict__ + ) class AvgPool3D(Layer): @@ -266,15 +281,17 @@ class AvgPool3D(Layer): """ - def __init__(self, - kernel_size, - stride=None, - padding=0, - ceil_mode=False, - exclusive=True, - divisor_override=None, - data_format="NCDHW", - name=None): + def __init__( + self, + kernel_size, + stride=None, + padding=0, + ceil_mode=False, + exclusive=True, + divisor_override=None, + data_format="NCDHW", + name=None, + ): super(AvgPool3D, self).__init__() self.ksize = kernel_size self.stride = stride @@ -286,19 +303,22 @@ class AvgPool3D(Layer): self.name = name def forward(self, x): - return F.avg_pool3d(x, - kernel_size=self.ksize, - stride=self.stride, - padding=self.padding, - ceil_mode=self.ceil_mode, - exclusive=self.exclusive, - divisor_override=self.divisor, - data_format=self.data_format, - name=self.name) + return F.avg_pool3d( + x, + kernel_size=self.ksize, + stride=self.stride, + padding=self.padding, + ceil_mode=self.ceil_mode, + exclusive=self.exclusive, + divisor_override=self.divisor, + data_format=self.data_format, + name=self.name, + ) def extra_repr(self): return 'kernel_size={ksize}, stride={stride}, padding={padding}'.format( - **self.__dict__) + **self.__dict__ + ) class MaxPool1D(Layer): @@ -362,13 +382,15 @@ class MaxPool1D(Layer): """ - def __init__(self, - kernel_size, - stride=None, - padding=0, - return_mask=False, - ceil_mode=False, - name=None): + def __init__( + self, + kernel_size, + stride=None, + padding=0, + return_mask=False, + ceil_mode=False, + name=None, + ): super(MaxPool1D, self).__init__() self.kernel_size = kernel_size self.stride = stride @@ -378,13 +400,21 @@ class MaxPool1D(Layer): self.name = name def forward(self, input): - out = F.max_pool1d(input, self.kernel_size, self.stride, self.padding, - self.return_mask, self.ceil_mode, self.name) + out = F.max_pool1d( + input, + self.kernel_size, + self.stride, + self.padding, + self.return_mask, + self.ceil_mode, + self.name, + ) return out def extra_repr(self): return 'kernel_size={kernel_size}, stride={stride}, padding={padding}'.format( - **self.__dict__) + **self.__dict__ + ) class MaxPool2D(Layer): @@ -460,14 +490,16 @@ class MaxPool2D(Layer): # output.shape [1, 3, 16, 16], max_indices.shape [1, 3, 16, 16], """ - def __init__(self, - kernel_size, - stride=None, - padding=0, - return_mask=False, - ceil_mode=False, - data_format="NCHW", - name=None): + def __init__( + self, + kernel_size, + stride=None, + padding=0, + return_mask=False, + ceil_mode=False, + data_format="NCHW", + name=None, + ): super(MaxPool2D, self).__init__() self.ksize = kernel_size self.stride = stride @@ -478,18 +510,21 @@ class MaxPool2D(Layer): self.name = name def forward(self, x): - return F.max_pool2d(x, - kernel_size=self.ksize, - stride=self.stride, - padding=self.padding, - return_mask=self.return_mask, - ceil_mode=self.ceil_mode, - data_format=self.data_format, - name=self.name) + return F.max_pool2d( + x, + kernel_size=self.ksize, + stride=self.stride, + padding=self.padding, + return_mask=self.return_mask, + ceil_mode=self.ceil_mode, + data_format=self.data_format, + name=self.name, + ) def extra_repr(self): return 'kernel_size={ksize}, stride={stride}, padding={padding}'.format( - **self.__dict__) + **self.__dict__ + ) class MaxPool3D(Layer): @@ -553,14 +588,16 @@ class MaxPool3D(Layer): # output.shape [1, 2, 3, 16, 16], max_indices.shape [1, 2, 3, 16, 16], """ - def __init__(self, - kernel_size, - stride=None, - padding=0, - return_mask=False, - ceil_mode=False, - data_format="NCDHW", - name=None): + def __init__( + self, + kernel_size, + stride=None, + padding=0, + return_mask=False, + ceil_mode=False, + data_format="NCDHW", + name=None, + ): super(MaxPool3D, self).__init__() self.ksize = kernel_size self.stride = stride @@ -571,18 +608,21 @@ class MaxPool3D(Layer): self.name = name def forward(self, x): - return F.max_pool3d(x, - kernel_size=self.ksize, - stride=self.stride, - padding=self.padding, - return_mask=self.return_mask, - ceil_mode=self.ceil_mode, - data_format=self.data_format, - name=self.name) + return F.max_pool3d( + x, + kernel_size=self.ksize, + stride=self.stride, + padding=self.padding, + return_mask=self.return_mask, + ceil_mode=self.ceil_mode, + data_format=self.data_format, + name=self.name, + ) def extra_repr(self): return 'kernel_size={ksize}, stride={stride}, padding={padding}'.format( - **self.__dict__) + **self.__dict__ + ) class AdaptiveAvgPool1D(Layer): @@ -724,10 +764,12 @@ class AdaptiveAvgPool2D(Layer): self._name = name def forward(self, x): - return F.adaptive_avg_pool2d(x, - output_size=self._output_size, - data_format=self._data_format, - name=self._name) + return F.adaptive_avg_pool2d( + x, + output_size=self._output_size, + data_format=self._data_format, + name=self._name, + ) def extra_repr(self): return 'output_size={}'.format(self._output_size) @@ -816,10 +858,12 @@ class AdaptiveAvgPool3D(Layer): self._name = name def forward(self, x): - return F.adaptive_avg_pool3d(x, - output_size=self._output_size, - data_format=self._data_format, - name=self._name) + return F.adaptive_avg_pool3d( + x, + output_size=self._output_size, + data_format=self._data_format, + name=self._name, + ) def extra_repr(self): return 'output_size={}'.format(self._output_size) @@ -898,12 +942,14 @@ class AdaptiveMaxPool1D(Layer): self.name = name def forward(self, input): - return F.adaptive_max_pool1d(input, self.output_size, self.return_mask, - self.name) + return F.adaptive_max_pool1d( + input, self.output_size, self.return_mask, self.name + ) def extra_repr(self): - return 'output_size={}, return_mask={}'.format(self.output_size, - self.return_mask) + return 'output_size={}, return_mask={}'.format( + self.output_size, self.return_mask + ) class AdaptiveMaxPool2D(Layer): @@ -976,14 +1022,17 @@ class AdaptiveMaxPool2D(Layer): self._name = name def forward(self, x): - return F.adaptive_max_pool2d(x, - output_size=self._output_size, - return_mask=self._return_mask, - name=self._name) + return F.adaptive_max_pool2d( + x, + output_size=self._output_size, + return_mask=self._return_mask, + name=self._name, + ) def extra_repr(self): - return 'output_size={}, return_mask={}'.format(self._output_size, - self._return_mask) + return 'output_size={}, return_mask={}'.format( + self._output_size, self._return_mask + ) class AdaptiveMaxPool3D(Layer): @@ -1068,14 +1117,17 @@ class AdaptiveMaxPool3D(Layer): self._name = name def forward(self, x): - return F.adaptive_max_pool3d(x, - output_size=self._output_size, - return_mask=self._return_mask, - name=self._name) + return F.adaptive_max_pool3d( + x, + output_size=self._output_size, + return_mask=self._return_mask, + name=self._name, + ) def extra_repr(self): - return 'output_size={}, return_mask={}'.format(self._output_size, - self._return_mask) + return 'output_size={}, return_mask={}'.format( + self._output_size, self._return_mask + ) class MaxUnPool1D(Layer): @@ -1130,13 +1182,15 @@ class MaxUnPool1D(Layer): """ - def __init__(self, - kernel_size, - stride=None, - padding=0, - data_format="NCL", - output_size=None, - name=None): + def __init__( + self, + kernel_size, + stride=None, + padding=0, + data_format="NCL", + output_size=None, + name=None, + ): super(MaxUnPool1D, self).__init__() self.ksize = kernel_size self.stride = stride @@ -1146,14 +1200,16 @@ class MaxUnPool1D(Layer): self.name = name def forward(self, x, indices): - return F.max_unpool1d(x, - indices, - kernel_size=self.ksize, - stride=self.stride, - padding=self.padding, - data_format=self.data_format, - output_size=self.output_size, - name=self.name) + return F.max_unpool1d( + x, + indices, + kernel_size=self.ksize, + stride=self.stride, + padding=self.padding, + data_format=self.data_format, + output_size=self.output_size, + name=self.name, + ) def extra_repr(self): return 'output_size={}'.format(self.output_size) @@ -1214,13 +1270,15 @@ class MaxUnPool2D(Layer): """ - def __init__(self, - kernel_size, - stride=None, - padding=0, - data_format="NCHW", - output_size=None, - name=None): + def __init__( + self, + kernel_size, + stride=None, + padding=0, + data_format="NCHW", + output_size=None, + name=None, + ): super(MaxUnPool2D, self).__init__() self.ksize = kernel_size self.stride = stride @@ -1230,14 +1288,16 @@ class MaxUnPool2D(Layer): self.name = name def forward(self, x, indices): - return F.max_unpool2d(x, - indices, - kernel_size=self.ksize, - stride=self.stride, - padding=self.padding, - data_format=self.data_format, - output_size=self.output_size, - name=self.name) + return F.max_unpool2d( + x, + indices, + kernel_size=self.ksize, + stride=self.stride, + padding=self.padding, + data_format=self.data_format, + output_size=self.output_size, + name=self.name, + ) def extra_repr(self): return 'output_size={}'.format(self.output_size) @@ -1302,13 +1362,15 @@ class MaxUnPool3D(Layer): """ - def __init__(self, - kernel_size, - stride=None, - padding=0, - data_format="NCDHW", - output_size=None, - name=None): + def __init__( + self, + kernel_size, + stride=None, + padding=0, + data_format="NCDHW", + output_size=None, + name=None, + ): super(MaxUnPool3D, self).__init__() self.ksize = kernel_size self.stride = stride @@ -1318,14 +1380,16 @@ class MaxUnPool3D(Layer): self.name = name def forward(self, x, indices): - return F.max_unpool3d(x, - indices, - kernel_size=self.ksize, - stride=self.stride, - padding=self.padding, - data_format=self.data_format, - output_size=self.output_size, - name=self.name) + return F.max_unpool3d( + x, + indices, + kernel_size=self.ksize, + stride=self.stride, + padding=self.padding, + data_format=self.data_format, + output_size=self.output_size, + name=self.name, + ) def extra_repr(self): return 'output_size={}'.format(self.output_size) diff --git a/python/paddle/nn/layer/rnn.py b/python/paddle/nn/layer/rnn.py index 2821277c8f84a433bf2954e1e7d71430c9b220d5..7e0fa6d7d703d356c08850c27bc5d129b89506f5 100644 --- a/python/paddle/nn/layer/rnn.py +++ b/python/paddle/nn/layer/rnn.py @@ -29,6 +29,7 @@ from paddle.fluid.framework import in_dygraph_mode from paddle.framework import core from paddle.static import default_startup_program from paddle.static import program_guard + try: from collections.abc import Sequence except: @@ -139,12 +140,9 @@ class RNNCellBase(Layer): and mostly used in RNN. """ - def get_initial_states(self, - batch_ref, - shape=None, - dtype=None, - init_value=0., - batch_dim_idx=0): + def get_initial_states( + self, batch_ref, shape=None, dtype=None, init_value=0.0, batch_dim_idx=0 + ): r""" Generate initialized states according to provided shape, data type and value. @@ -180,17 +178,17 @@ class RNNCellBase(Layer): def _is_shape_sequence(seq): """For shape, list/tuple of integer is the finest-grained objection""" - if (isinstance(seq, list) or isinstance(seq, tuple)): - if reduce(lambda flag, x: isinstance(x, int) and flag, seq, - True): + if isinstance(seq, list) or isinstance(seq, tuple): + if reduce( + lambda flag, x: isinstance(x, int) and flag, seq, True + ): return False # TODO: Add check for the illegal if isinstance(seq, dict): return True - return (isinstance(seq, Sequence) and not isinstance(seq, str)) + return isinstance(seq, Sequence) and not isinstance(seq, str) class Shape(object): - def __init__(self, shape): self.shape = shape if shape[0] == -1 else ([-1] + list(shape)) @@ -211,13 +209,16 @@ class RNNCellBase(Layer): states_dtypes = map_structure(lambda shape: dtype, states_shapes) init_states = map_structure( - lambda shape, dtype: paddle.fluid.layers. - fill_constant_batch_size_like(input=batch_ref, - shape=shape.shape, - dtype=dtype, - value=init_value, - input_dim_idx=batch_dim_idx), - states_shapes, states_dtypes) + lambda shape, dtype: paddle.fluid.layers.fill_constant_batch_size_like( + input=batch_ref, + shape=shape.shape, + dtype=dtype, + value=init_value, + input_dim_idx=batch_dim_idx, + ), + states_shapes, + states_dtypes, + ) return init_states @property @@ -233,7 +234,8 @@ class RNNCellBase(Layer): `get_initial_states`. """ raise NotImplementedError( - "Please add implementaion for `state_shape` in the used cell.") + "Please add implementaion for `state_shape` in the used cell." + ) @property def state_dtype(self): @@ -248,7 +250,8 @@ class RNNCellBase(Layer): `get_initial_states`. """ raise NotImplementedError( - "Please add implementaion for `state_dtype` in the used cell.") + "Please add implementaion for `state_dtype` in the used cell." + ) class SimpleRNNCell(RNNCellBase): @@ -318,50 +321,57 @@ class SimpleRNNCell(RNNCellBase): """ - def __init__(self, - input_size, - hidden_size, - activation="tanh", - weight_ih_attr=None, - weight_hh_attr=None, - bias_ih_attr=None, - bias_hh_attr=None, - name=None): + def __init__( + self, + input_size, + hidden_size, + activation="tanh", + weight_ih_attr=None, + weight_hh_attr=None, + bias_ih_attr=None, + bias_hh_attr=None, + name=None, + ): super(SimpleRNNCell, self).__init__() if hidden_size <= 0: raise ValueError( - "hidden_size of {} must be greater than 0, but now equals to {}" - .format(self.__class__.__name__, hidden_size)) + "hidden_size of {} must be greater than 0, but now equals to {}".format( + self.__class__.__name__, hidden_size + ) + ) std = 1.0 / math.sqrt(hidden_size) self.weight_ih = self.create_parameter( (hidden_size, input_size), weight_ih_attr, - default_initializer=I.Uniform(-std, std)) + default_initializer=I.Uniform(-std, std), + ) self.weight_hh = self.create_parameter( (hidden_size, hidden_size), weight_hh_attr, - default_initializer=I.Uniform(-std, std)) + default_initializer=I.Uniform(-std, std), + ) self.bias_ih = self.create_parameter( - (hidden_size, ), + (hidden_size,), bias_ih_attr, is_bias=True, - default_initializer=I.Uniform(-std, std)) + default_initializer=I.Uniform(-std, std), + ) self.bias_hh = self.create_parameter( - (hidden_size, ), + (hidden_size,), bias_hh_attr, is_bias=True, - default_initializer=I.Uniform(-std, std)) + default_initializer=I.Uniform(-std, std), + ) self.input_size = input_size self.hidden_size = hidden_size if activation not in ["tanh", "relu"]: raise ValueError( "activation for SimpleRNNCell should be tanh or relu, " - "but get {}".format(activation)) + "but get {}".format(activation) + ) self.activation = activation - self._activation_fn = paddle.tanh \ - if activation == "tanh" \ - else F.relu + self._activation_fn = paddle.tanh if activation == "tanh" else F.relu def forward(self, inputs, states=None): if states is None: @@ -378,7 +388,7 @@ class SimpleRNNCell(RNNCellBase): @property def state_shape(self): - return (self.hidden_size, ) + return (self.hidden_size,) def extra_repr(self): s = '{input_size}, {hidden_size}' @@ -471,38 +481,46 @@ class LSTMCell(RNNCellBase): """ - def __init__(self, - input_size, - hidden_size, - weight_ih_attr=None, - weight_hh_attr=None, - bias_ih_attr=None, - bias_hh_attr=None, - name=None): + def __init__( + self, + input_size, + hidden_size, + weight_ih_attr=None, + weight_hh_attr=None, + bias_ih_attr=None, + bias_hh_attr=None, + name=None, + ): super(LSTMCell, self).__init__() if hidden_size <= 0: raise ValueError( - "hidden_size of {} must be greater than 0, but now equals to {}" - .format(self.__class__.__name__, hidden_size)) + "hidden_size of {} must be greater than 0, but now equals to {}".format( + self.__class__.__name__, hidden_size + ) + ) std = 1.0 / math.sqrt(hidden_size) self.weight_ih = self.create_parameter( (4 * hidden_size, input_size), weight_ih_attr, - default_initializer=I.Uniform(-std, std)) + default_initializer=I.Uniform(-std, std), + ) self.weight_hh = self.create_parameter( (4 * hidden_size, hidden_size), weight_hh_attr, - default_initializer=I.Uniform(-std, std)) + default_initializer=I.Uniform(-std, std), + ) self.bias_ih = self.create_parameter( - (4 * hidden_size, ), + (4 * hidden_size,), bias_ih_attr, is_bias=True, - default_initializer=I.Uniform(-std, std)) + default_initializer=I.Uniform(-std, std), + ) self.bias_hh = self.create_parameter( - (4 * hidden_size, ), + (4 * hidden_size,), bias_hh_attr, is_bias=True, - default_initializer=I.Uniform(-std, std)) + default_initializer=I.Uniform(-std, std), + ) self.hidden_size = hidden_size self.input_size = input_size @@ -538,7 +556,7 @@ class LSTMCell(RNNCellBase): automatically inserted into shape). These two shapes correspond to :math:`h_{t-1}` and :math:`c_{t-1}` separately. """ - return ((self.hidden_size, ), (self.hidden_size, )) + return ((self.hidden_size,), (self.hidden_size,)) def extra_repr(self): return '{input_size}, {hidden_size}'.format(**self.__dict__) @@ -622,38 +640,46 @@ class GRUCell(RNNCellBase): """ - def __init__(self, - input_size, - hidden_size, - weight_ih_attr=None, - weight_hh_attr=None, - bias_ih_attr=None, - bias_hh_attr=None, - name=None): + def __init__( + self, + input_size, + hidden_size, + weight_ih_attr=None, + weight_hh_attr=None, + bias_ih_attr=None, + bias_hh_attr=None, + name=None, + ): super(GRUCell, self).__init__() if hidden_size <= 0: raise ValueError( - "hidden_size of {} must be greater than 0, but now equals to {}" - .format(self.__class__.__name__, hidden_size)) + "hidden_size of {} must be greater than 0, but now equals to {}".format( + self.__class__.__name__, hidden_size + ) + ) std = 1.0 / math.sqrt(hidden_size) self.weight_ih = self.create_parameter( (3 * hidden_size, input_size), weight_ih_attr, - default_initializer=I.Uniform(-std, std)) + default_initializer=I.Uniform(-std, std), + ) self.weight_hh = self.create_parameter( (3 * hidden_size, hidden_size), weight_hh_attr, - default_initializer=I.Uniform(-std, std)) + default_initializer=I.Uniform(-std, std), + ) self.bias_ih = self.create_parameter( - (3 * hidden_size, ), + (3 * hidden_size,), bias_ih_attr, is_bias=True, - default_initializer=I.Uniform(-std, std)) + default_initializer=I.Uniform(-std, std), + ) self.bias_hh = self.create_parameter( - (3 * hidden_size, ), + (3 * hidden_size,), bias_hh_attr, is_bias=True, - default_initializer=I.Uniform(-std, std)) + default_initializer=I.Uniform(-std, std), + ) self.hidden_size = hidden_size self.input_size = input_size @@ -689,7 +715,7 @@ class GRUCell(RNNCellBase): size would be automatically inserted into shape). The shape corresponds to the shape of :math:`h_{t-1}`. """ - return (self.hidden_size, ) + return (self.hidden_size,) def extra_repr(self): return '{input_size}, {hidden_size}'.format(**self.__dict__) @@ -754,11 +780,9 @@ class RNN(Layer): self.is_reverse = is_reverse self.time_major = time_major - def forward(self, - inputs, - initial_states=None, - sequence_length=None, - **kwargs): + def forward( + self, inputs, initial_states=None, sequence_length=None, **kwargs + ): final_outputs, final_states = paddle.fluid.layers.rnn( self.cell, inputs, @@ -766,7 +790,8 @@ class RNN(Layer): sequence_length=sequence_length, time_major=self.time_major, is_reverse=self.is_reverse, - **kwargs) + **kwargs + ) return final_outputs, final_states @@ -825,27 +850,35 @@ class BiRNN(Layer): self.cell_fw = cell_fw self.cell_bw = cell_bw if cell_fw.input_size != cell_bw.input_size: - raise ValueError("input size of forward cell({}) does not equals" - "that of backward cell({})".format( - cell_fw.input_size, cell_bw.input_size)) + raise ValueError( + "input size of forward cell({}) does not equals" + "that of backward cell({})".format( + cell_fw.input_size, cell_bw.input_size + ) + ) for cell in [self.cell_fw, self.cell_bw]: if not hasattr(cell, "call"): # for non-dygraph mode, `rnn` api uses cell.call cell.call = cell.forward self.time_major = time_major - def forward(self, - inputs, - initial_states=None, - sequence_length=None, - **kwargs): + def forward( + self, inputs, initial_states=None, sequence_length=None, **kwargs + ): if isinstance(initial_states, (list, tuple)): - assert len(initial_states) == 2, \ - "length of initial_states should be 2 when it is a list/tuple" + assert ( + len(initial_states) == 2 + ), "length of initial_states should be 2 when it is a list/tuple" outputs, final_states = paddle.fluid.layers.birnn( - self.cell_fw, self.cell_bw, inputs, initial_states, sequence_length, - self.time_major, **kwargs) + self.cell_fw, + self.cell_bw, + inputs, + initial_states, + sequence_length, + self.time_major, + **kwargs + ) return outputs, final_states @@ -855,18 +888,20 @@ class RNNBase(LayerList): and other common methods for SimpleRNN, LSTM and GRU. """ - def __init__(self, - mode, - input_size, - hidden_size, - num_layers=1, - direction="forward", - time_major=False, - dropout=0., - weight_ih_attr=None, - weight_hh_attr=None, - bias_ih_attr=None, - bias_hh_attr=None): + def __init__( + self, + mode, + input_size, + hidden_size, + num_layers=1, + direction="forward", + time_major=False, + dropout=0.0, + weight_ih_attr=None, + weight_hh_attr=None, + bias_ih_attr=None, + bias_hh_attr=None, + ): super(RNNBase, self).__init__() bidirectional_list = ["bidirectional", "bidirect"] self.mode = mode @@ -882,7 +917,7 @@ class RNNBase(LayerList): "weight_ih_attr": weight_ih_attr, "weight_hh_attr": weight_hh_attr, "bias_ih_attr": bias_ih_attr, - "bias_hh_attr": bias_hh_attr + "bias_hh_attr": bias_hh_attr, } if mode == "LSTM": @@ -911,11 +946,13 @@ class RNNBase(LayerList): else: raise ValueError( "direction should be forward or bidirect (or bidirectional), " - "received direction = {}".format(direction)) + "received direction = {}".format(direction) + ) self.could_use_cudnn = True self.could_use_cudnn &= len(self.parameters()) == num_layers * 4 * ( - 2 if direction in bidirectional_list else 1) + 2 if direction in bidirectional_list else 1 + ) # Expose params as RNN's attribute, which can make it compatible when # replacing small ops composed rnn with cpp rnn kernel. @@ -927,8 +964,10 @@ class RNNBase(LayerList): for direction in range(self.num_directions): suffix = '_reverse' if direction == 1 else '' param_names.extend(['weight_ih_l{}{}', 'weight_hh_l{}{}']) - if bias_ih_attr != False: param_names.append('bias_ih_l{}{}') - if bias_hh_attr != False: param_names.append('bias_hh_l{}{}') + if bias_ih_attr != False: + param_names.append('bias_ih_l{}{}') + if bias_hh_attr != False: + param_names.append('bias_hh_l{}{}') param_names = [x.format(layer, suffix) for x in param_names] for name, param in zip(param_names, self.parameters()): setattr(self, name, param) @@ -949,8 +988,11 @@ class RNNBase(LayerList): shape = [np.prod(param.shape) for param in params] self._all_weights = [None] * len(params) for i, param in enumerate(params): - offset = 0 if i % 4 < 2 else (2 * self.num_layers * - self.num_directions) + offset = ( + 0 + if i % 4 < 2 + else (2 * self.num_layers * self.num_directions) + ) layer_idx = i // 4 self._all_weights[offset + layer_idx * 2 + i % 2] = param # Wrap using a list to avoid registed into params and saving, maybe @@ -958,38 +1000,49 @@ class RNNBase(LayerList): # add both to main_program and startup_program for static-graph. # Use Constant initializer to avoid make effect on random generator. self._flat_weight = [ - self.create_parameter(shape=[np.sum(shape)], - dtype=params[0].dtype, - default_initializer=I.Constant(0.0)) + self.create_parameter( + shape=[np.sum(shape)], + dtype=params[0].dtype, + default_initializer=I.Constant(0.0), + ) ] # dropout state may also can be hided and avoid saving # should dropout state be persistable for static-graph self._dropout_state = self.create_variable( - dtype=core.VarDesc.VarType.UINT8) + dtype=core.VarDesc.VarType.UINT8 + ) if in_dynamic_mode(): with paddle.no_grad(): - _legacy_C_ops.coalesce_tensor(self._all_weights, - self._all_weights, - self._flat_weight[0], - "copy_data", True, - "use_align", False, "dtype", - params[0].dtype) + _legacy_C_ops.coalesce_tensor( + self._all_weights, + self._all_weights, + self._flat_weight[0], + "copy_data", + True, + "use_align", + False, + "dtype", + params[0].dtype, + ) return # for static-graph, append coalesce_tensor into startup program - with program_guard(default_startup_program(), - default_startup_program()): + with program_guard( + default_startup_program(), default_startup_program() + ): with paddle.no_grad(): - self._helper.append_op(type="coalesce_tensor", - inputs={"Input": self._all_weights}, - outputs={ - "Output": self._all_weights, - "FusedOutput": self._flat_weight - }, - attrs={ - "copy_data": True, - "use_align": False, - "dtype": params[0].dtype - }) + self._helper.append_op( + type="coalesce_tensor", + inputs={"Input": self._all_weights}, + outputs={ + "Output": self._all_weights, + "FusedOutput": self._flat_weight, + }, + attrs={ + "copy_data": True, + "use_align": False, + "dtype": params[0].dtype, + }, + ) def _cudnn_impl(self, inputs, initial_states, sequence_length): if not self.time_major: @@ -997,18 +1050,43 @@ class RNNBase(LayerList): if in_dygraph_mode(): out, _, state = _C_ops.rnn( - inputs, initial_states, self._all_weights, sequence_length, - self._dropout_state, self.dropout, self.num_directions == 2, - self.input_size, self.hidden_size, self.num_layers, self.mode, - 0, not self.training) + inputs, + initial_states, + self._all_weights, + sequence_length, + self._dropout_state, + self.dropout, + self.num_directions == 2, + self.input_size, + self.hidden_size, + self.num_layers, + self.mode, + 0, + not self.training, + ) elif in_dynamic_mode(): _, _, out, state = _legacy_C_ops.rnn( - inputs, initial_states, self._all_weights, sequence_length, - self._dropout_state, self.state_components, 'dropout_prob', - self.dropout, 'is_bidirec', self.num_directions == 2, - 'input_size', self.input_size, 'hidden_size', self.hidden_size, - 'num_layers', self.num_layers, 'mode', self.mode, 'is_test', - not self.training) + inputs, + initial_states, + self._all_weights, + sequence_length, + self._dropout_state, + self.state_components, + 'dropout_prob', + self.dropout, + 'is_bidirec', + self.num_directions == 2, + 'input_size', + self.input_size, + 'hidden_size', + self.hidden_size, + 'num_layers', + self.num_layers, + 'mode', + self.mode, + 'is_test', + not self.training, + ) else: out = self._helper.create_variable_for_type_inference(inputs.dtype) state = [ @@ -1016,13 +1094,14 @@ class RNNBase(LayerList): for i in range(self.state_components) ] reserve = self._helper.create_variable_for_type_inference( - dtype=core.VarDesc.VarType.UINT8, stop_gradient=True) + dtype=core.VarDesc.VarType.UINT8, stop_gradient=True + ) inputs = { 'Input': inputs, 'WeightList': self._all_weights, 'PreState': initial_states, - 'SequenceLength': sequence_length + 'SequenceLength': sequence_length, } attrs = { 'dropout_prob': self.dropout, @@ -1031,7 +1110,7 @@ class RNNBase(LayerList): 'hidden_size': self.hidden_size, 'num_layers': self.num_layers, 'mode': self.mode, - 'is_test': not self.training + 'is_test': not self.training, } outputs = { @@ -1041,51 +1120,67 @@ class RNNBase(LayerList): 'DropoutState': self._dropout_state, } - self._helper.append_op(type="rnn", - inputs=inputs, - outputs=outputs, - attrs=attrs) + self._helper.append_op( + type="rnn", inputs=inputs, outputs=outputs, attrs=attrs + ) - out = paddle.tensor.transpose(out, - [1, 0, 2]) if not self.time_major else out + out = ( + paddle.tensor.transpose(out, [1, 0, 2]) + if not self.time_major + else out + ) return out, tuple(state) if len(state) > 1 else state[0] def forward(self, inputs, initial_states=None, sequence_length=None): batch_index = 1 if self.time_major else 0 dtype = inputs.dtype if initial_states is None: - state_shape = (self.num_layers * self.num_directions, -1, - self.hidden_size) - initial_states = tuple([ - paddle.fluid.layers.fill_constant_batch_size_like( - inputs, state_shape, dtype, 0, batch_index, 1) - for _ in range(self.state_components) - ]) + state_shape = ( + self.num_layers * self.num_directions, + -1, + self.hidden_size, + ) + initial_states = tuple( + [ + paddle.fluid.layers.fill_constant_batch_size_like( + inputs, state_shape, dtype, 0, batch_index, 1 + ) + for _ in range(self.state_components) + ] + ) else: - initial_states = [initial_states] if isinstance( - initial_states, paddle.static.Variable) else initial_states - - if self.could_use_cudnn and (not paddle.device.is_compiled_with_rocm() - or sequence_length is None): + initial_states = ( + [initial_states] + if isinstance(initial_states, paddle.static.Variable) + else initial_states + ) + + if self.could_use_cudnn and ( + not paddle.device.is_compiled_with_rocm() or sequence_length is None + ): # Add CPU kernel and dispatch in backend later return self._cudnn_impl(inputs, initial_states, sequence_length) - states = split_states(initial_states, self.num_directions == 2, - self.state_components) + states = split_states( + initial_states, self.num_directions == 2, self.state_components + ) final_states = [] for i, rnn_layer in enumerate(self): if i > 0: - inputs = F.dropout(inputs, - self.dropout, - training=self.training, - mode="upscale_in_train") + inputs = F.dropout( + inputs, + self.dropout, + training=self.training, + mode="upscale_in_train", + ) outputs, final_state = rnn_layer(inputs, states[i], sequence_length) final_states.append(final_state) inputs = outputs - final_states = concat_states(final_states, self.num_directions == 2, - self.state_components) + final_states = concat_states( + final_states, self.num_directions == 2, self.state_components + ) return outputs, final_states def extra_repr(self): @@ -1184,19 +1279,21 @@ class SimpleRNN(RNNBase): """ - def __init__(self, - input_size, - hidden_size, - num_layers=1, - direction="forward", - time_major=False, - dropout=0., - activation="tanh", - weight_ih_attr=None, - weight_hh_attr=None, - bias_ih_attr=None, - bias_hh_attr=None, - name=None): + def __init__( + self, + input_size, + hidden_size, + num_layers=1, + direction="forward", + time_major=False, + dropout=0.0, + activation="tanh", + weight_ih_attr=None, + weight_hh_attr=None, + bias_ih_attr=None, + bias_hh_attr=None, + name=None, + ): if activation == "tanh": mode = "RNN_TANH" elif activation == "relu": @@ -1204,10 +1301,19 @@ class SimpleRNN(RNNBase): else: raise ValueError("Unknown activation '{}'".format(activation)) self.activation = activation - super(SimpleRNN, - self).__init__(mode, input_size, hidden_size, num_layers, - direction, time_major, dropout, weight_ih_attr, - weight_hh_attr, bias_ih_attr, bias_hh_attr) + super(SimpleRNN, self).__init__( + mode, + input_size, + hidden_size, + num_layers, + direction, + time_major, + dropout, + weight_ih_attr, + weight_hh_attr, + bias_ih_attr, + bias_hh_attr, + ) class LSTM(RNNBase): @@ -1307,22 +1413,33 @@ class LSTM(RNNBase): """ - def __init__(self, - input_size, - hidden_size, - num_layers=1, - direction="forward", - time_major=False, - dropout=0., - weight_ih_attr=None, - weight_hh_attr=None, - bias_ih_attr=None, - bias_hh_attr=None, - name=None): - super(LSTM, - self).__init__("LSTM", input_size, hidden_size, num_layers, - direction, time_major, dropout, weight_ih_attr, - weight_hh_attr, bias_ih_attr, bias_hh_attr) + def __init__( + self, + input_size, + hidden_size, + num_layers=1, + direction="forward", + time_major=False, + dropout=0.0, + weight_ih_attr=None, + weight_hh_attr=None, + bias_ih_attr=None, + bias_hh_attr=None, + name=None, + ): + super(LSTM, self).__init__( + "LSTM", + input_size, + hidden_size, + num_layers, + direction, + time_major, + dropout, + weight_ih_attr, + weight_hh_attr, + bias_ih_attr, + bias_hh_attr, + ) class GRU(RNNBase): @@ -1415,19 +1532,30 @@ class GRU(RNNBase): """ - def __init__(self, - input_size, - hidden_size, - num_layers=1, - direction="forward", - time_major=False, - dropout=0., - weight_ih_attr=None, - weight_hh_attr=None, - bias_ih_attr=None, - bias_hh_attr=None, - name=None): - super(GRU, - self).__init__("GRU", input_size, hidden_size, num_layers, - direction, time_major, dropout, weight_ih_attr, - weight_hh_attr, bias_ih_attr, bias_hh_attr) + def __init__( + self, + input_size, + hidden_size, + num_layers=1, + direction="forward", + time_major=False, + dropout=0.0, + weight_ih_attr=None, + weight_hh_attr=None, + bias_ih_attr=None, + bias_hh_attr=None, + name=None, + ): + super(GRU, self).__init__( + "GRU", + input_size, + hidden_size, + num_layers, + direction, + time_major, + dropout, + weight_ih_attr, + weight_hh_attr, + bias_ih_attr, + bias_hh_attr, + ) diff --git a/python/paddle/nn/layer/transformer.py b/python/paddle/nn/layer/transformer.py index 0ea83b3d84ce401abb61a13a91a828451cfeb17d..4115cbc7a80716bda64ff170b19f7fb35f0143fe 100644 --- a/python/paddle/nn/layer/transformer.py +++ b/python/paddle/nn/layer/transformer.py @@ -49,7 +49,8 @@ def _convert_param_attr_to_list(param_attr, n): """ if isinstance(param_attr, (list, tuple)): assert len(param_attr) == n, ( - "length of param_attr should be %d when it is a list/tuple" % n) + "length of param_attr should be %d when it is a list/tuple" % n + ) param_attrs = [] for attr in param_attr: if isinstance(attr, bool): @@ -151,21 +152,27 @@ class MultiHeadAttention(Layer): Cache = collections.namedtuple("Cache", ["k", "v"]) StaticCache = collections.namedtuple("StaticCache", ["k", "v"]) - def __init__(self, - embed_dim, - num_heads, - dropout=0., - kdim=None, - vdim=None, - need_weights=False, - weight_attr=None, - bias_attr=None): + def __init__( + self, + embed_dim, + num_heads, + dropout=0.0, + kdim=None, + vdim=None, + need_weights=False, + weight_attr=None, + bias_attr=None, + ): super(MultiHeadAttention, self).__init__() - assert embed_dim > 0, ("Expected embed_dim to be greater than 0, " - "but received {}".format(embed_dim)) - assert num_heads > 0, ("Expected num_heads to be greater than 0, " - "but received {}".format(num_heads)) + assert embed_dim > 0, ( + "Expected embed_dim to be greater than 0, " + "but received {}".format(embed_dim) + ) + assert num_heads > 0, ( + "Expected num_heads to be greater than 0, " + "but received {}".format(num_heads) + ) self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim @@ -175,24 +182,22 @@ class MultiHeadAttention(Layer): self.need_weights = need_weights self.head_dim = embed_dim // num_heads - assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" - - self.q_proj = Linear(embed_dim, - embed_dim, - weight_attr, - bias_attr=bias_attr) - self.k_proj = Linear(self.kdim, - embed_dim, - weight_attr, - bias_attr=bias_attr) - self.v_proj = Linear(self.vdim, - embed_dim, - weight_attr, - bias_attr=bias_attr) - self.out_proj = Linear(embed_dim, - embed_dim, - weight_attr, - bias_attr=bias_attr) + assert ( + self.head_dim * num_heads == self.embed_dim + ), "embed_dim must be divisible by num_heads" + + self.q_proj = Linear( + embed_dim, embed_dim, weight_attr, bias_attr=bias_attr + ) + self.k_proj = Linear( + self.kdim, embed_dim, weight_attr, bias_attr=bias_attr + ) + self.v_proj = Linear( + self.vdim, embed_dim, weight_attr, bias_attr=bias_attr + ) + self.out_proj = Linear( + embed_dim, embed_dim, weight_attr, bias_attr=bias_attr + ) def _prepare_qkv(self, query, key, value, cache=None): r""" @@ -338,12 +343,14 @@ class MultiHeadAttention(Layer): input=key, shape=[-1, self.num_heads, 0, self.head_dim], dtype=key.dtype, - value=0) + value=0, + ) v = layers.fill_constant_batch_size_like( input=key, shape=[-1, self.num_heads, 0, self.head_dim], dtype=key.dtype, - value=0) + value=0, + ) return self.Cache(k, v) else: # incremental_state with initial value, mainly for usage like UniLM @@ -410,19 +417,21 @@ class MultiHeadAttention(Layer): q, k, v, cache = self._prepare_qkv(query, key, value, cache) # scale dot product attention - product = paddle.matmul(x=q * (self.head_dim**-0.5), - y=k, - transpose_y=True) + product = paddle.matmul( + x=q * (self.head_dim**-0.5), y=k, transpose_y=True + ) if attn_mask is not None: # Support bool or int mask attn_mask = _convert_attention_mask(attn_mask, product.dtype) product = product + attn_mask weights = F.softmax(product) if self.dropout: - weights = F.dropout(weights, - self.dropout, - training=self.training, - mode="upscale_in_train") + weights = F.dropout( + weights, + self.dropout, + training=self.training, + mode="upscale_in_train", + ) out = tensor.matmul(weights, v) @@ -498,30 +507,39 @@ class TransformerEncoderLayer(Layer): enc_output = encoder_layer(enc_input, attn_mask) # [2, 4, 128] """ - def __init__(self, - d_model, - nhead, - dim_feedforward, - dropout=0.1, - activation="relu", - attn_dropout=None, - act_dropout=None, - normalize_before=False, - weight_attr=None, - bias_attr=None): + def __init__( + self, + d_model, + nhead, + dim_feedforward, + dropout=0.1, + activation="relu", + attn_dropout=None, + act_dropout=None, + normalize_before=False, + weight_attr=None, + bias_attr=None, + ): self._config = locals() self._config.pop("self") self._config.pop("__class__", None) # py3 super(TransformerEncoderLayer, self).__init__() - assert d_model > 0, ("Expected d_model to be greater than 0, " - "but received {}".format(d_model)) - assert nhead > 0, ("Expected nhead to be greater than 0, " - "but received {}".format(nhead)) + assert ( + d_model > 0 + ), "Expected d_model to be greater than 0, " "but received {}".format( + d_model + ) + assert ( + nhead > 0 + ), "Expected nhead to be greater than 0, " "but received {}".format( + nhead + ) assert dim_feedforward > 0, ( "Expected dim_feedforward to be greater than 0, " - "but received {}".format(dim_feedforward)) + "but received {}".format(dim_feedforward) + ) attn_dropout = dropout if attn_dropout is None else attn_dropout act_dropout = dropout if act_dropout is None else act_dropout @@ -530,20 +548,20 @@ class TransformerEncoderLayer(Layer): weight_attrs = _convert_param_attr_to_list(weight_attr, 2) bias_attrs = _convert_param_attr_to_list(bias_attr, 2) - self.self_attn = MultiHeadAttention(d_model, - nhead, - dropout=attn_dropout, - weight_attr=weight_attrs[0], - bias_attr=bias_attrs[0]) - self.linear1 = Linear(d_model, - dim_feedforward, - weight_attrs[1], - bias_attr=bias_attrs[1]) + self.self_attn = MultiHeadAttention( + d_model, + nhead, + dropout=attn_dropout, + weight_attr=weight_attrs[0], + bias_attr=bias_attrs[0], + ) + self.linear1 = Linear( + d_model, dim_feedforward, weight_attrs[1], bias_attr=bias_attrs[1] + ) self.dropout = Dropout(act_dropout, mode="upscale_in_train") - self.linear2 = Linear(dim_feedforward, - d_model, - weight_attrs[1], - bias_attr=bias_attrs[1]) + self.linear2 = Linear( + dim_feedforward, d_model, weight_attrs[1], bias_attr=bias_attrs[1] + ) self.norm1 = LayerNorm(d_model) self.norm2 = LayerNorm(d_model) self.dropout1 = Dropout(dropout, mode="upscale_in_train") @@ -591,8 +609,9 @@ class TransformerEncoderLayer(Layer): if cache is None: src = self.self_attn(src, src, src, src_mask) else: - src, incremental_cache = self.self_attn(src, src, src, src_mask, - cache) + src, incremental_cache = self.self_attn( + src, src, src, src_mask, cache + ) src = residual + self.dropout1(src) if not self.normalize_before: @@ -624,8 +643,9 @@ class TransformerEncoderLayer(Layer): `MultiHeadAttention.gen_cache` and `MultiHeadAttention.forward` \ for more details. """ - incremental_cache = self.self_attn.gen_cache(src, - type=self.self_attn.Cache) + incremental_cache = self.self_attn.gen_cache( + src, type=self.self_attn.Cache + ) return incremental_cache @@ -659,10 +679,16 @@ class TransformerEncoder(Layer): def __init__(self, encoder_layer, num_layers, norm=None): super(TransformerEncoder, self).__init__() - self.layers = LayerList([ - (encoder_layer if i == 0 else type(encoder_layer)( - **encoder_layer._config)) for i in range(num_layers) - ]) + self.layers = LayerList( + [ + ( + encoder_layer + if i == 0 + else type(encoder_layer)(**encoder_layer._config) + ) + for i in range(num_layers) + ] + ) self.num_layers = num_layers self.norm = norm @@ -708,9 +734,9 @@ class TransformerEncoder(Layer): if cache is None: output = mod(output, src_mask=src_mask) else: - output, new_cache = mod(output, - src_mask=src_mask, - cache=cache[i]) + output, new_cache = mod( + output, src_mask=src_mask, cache=cache[i] + ) new_caches.append(new_cache) if self.norm is not None: @@ -807,30 +833,39 @@ class TransformerDecoderLayer(Layer): cross_attn_mask) # [2, 4, 128] """ - def __init__(self, - d_model, - nhead, - dim_feedforward, - dropout=0.1, - activation="relu", - attn_dropout=None, - act_dropout=None, - normalize_before=False, - weight_attr=None, - bias_attr=None): + def __init__( + self, + d_model, + nhead, + dim_feedforward, + dropout=0.1, + activation="relu", + attn_dropout=None, + act_dropout=None, + normalize_before=False, + weight_attr=None, + bias_attr=None, + ): self._config = locals() self._config.pop("self") self._config.pop("__class__", None) # py3 super(TransformerDecoderLayer, self).__init__() - assert d_model > 0, ("Expected d_model to be greater than 0, " - "but received {}".format(d_model)) - assert nhead > 0, ("Expected nhead to be greater than 0, " - "but received {}".format(nhead)) + assert ( + d_model > 0 + ), "Expected d_model to be greater than 0, " "but received {}".format( + d_model + ) + assert ( + nhead > 0 + ), "Expected nhead to be greater than 0, " "but received {}".format( + nhead + ) assert dim_feedforward > 0, ( "Expected dim_feedforward to be greater than 0, " - "but received {}".format(dim_feedforward)) + "but received {}".format(dim_feedforward) + ) attn_dropout = dropout if attn_dropout is None else attn_dropout act_dropout = dropout if act_dropout is None else act_dropout @@ -839,25 +874,27 @@ class TransformerDecoderLayer(Layer): weight_attrs = _convert_param_attr_to_list(weight_attr, 3) bias_attrs = _convert_param_attr_to_list(bias_attr, 3) - self.self_attn = MultiHeadAttention(d_model, - nhead, - dropout=attn_dropout, - weight_attr=weight_attrs[0], - bias_attr=bias_attrs[0]) - self.cross_attn = MultiHeadAttention(d_model, - nhead, - dropout=attn_dropout, - weight_attr=weight_attrs[1], - bias_attr=bias_attrs[1]) - self.linear1 = Linear(d_model, - dim_feedforward, - weight_attrs[2], - bias_attr=bias_attrs[2]) + self.self_attn = MultiHeadAttention( + d_model, + nhead, + dropout=attn_dropout, + weight_attr=weight_attrs[0], + bias_attr=bias_attrs[0], + ) + self.cross_attn = MultiHeadAttention( + d_model, + nhead, + dropout=attn_dropout, + weight_attr=weight_attrs[1], + bias_attr=bias_attrs[1], + ) + self.linear1 = Linear( + d_model, dim_feedforward, weight_attrs[2], bias_attr=bias_attrs[2] + ) self.dropout = Dropout(act_dropout, mode="upscale_in_train") - self.linear2 = Linear(dim_feedforward, - d_model, - weight_attrs[2], - bias_attr=bias_attrs[2]) + self.linear2 = Linear( + dim_feedforward, d_model, weight_attrs[2], bias_attr=bias_attrs[2] + ) self.norm1 = LayerNorm(d_model) self.norm2 = LayerNorm(d_model) self.norm3 = LayerNorm(d_model) @@ -922,8 +959,9 @@ class TransformerDecoderLayer(Layer): if cache is None: tgt = self.self_attn(tgt, tgt, tgt, tgt_mask, None) else: - tgt, incremental_cache = self.self_attn(tgt, tgt, tgt, tgt_mask, - cache[0]) + tgt, incremental_cache = self.self_attn( + tgt, tgt, tgt, tgt_mask, cache[0] + ) tgt = residual + self.dropout1(tgt) if not self.normalize_before: tgt = self.norm1(tgt) @@ -934,8 +972,9 @@ class TransformerDecoderLayer(Layer): if cache is None: tgt = self.cross_attn(tgt, memory, memory, memory_mask, None) else: - tgt, static_cache = self.cross_attn(tgt, memory, memory, - memory_mask, cache[1]) + tgt, static_cache = self.cross_attn( + tgt, memory, memory, memory_mask, cache[1] + ) tgt = residual + self.dropout2(tgt) if not self.normalize_before: tgt = self.norm2(tgt) @@ -947,8 +986,9 @@ class TransformerDecoderLayer(Layer): tgt = residual + self.dropout3(tgt) if not self.normalize_before: tgt = self.norm3(tgt) - return tgt if cache is None else (tgt, (incremental_cache, - static_cache)) + return ( + tgt if cache is None else (tgt, (incremental_cache, static_cache)) + ) def gen_cache(self, memory): r""" @@ -972,10 +1012,12 @@ class TransformerDecoderLayer(Layer): See `MultiHeadAttention.gen_cache` and `MultiHeadAttention.forward` \ for more details. """ - incremental_cache = self.self_attn.gen_cache(memory, - type=self.self_attn.Cache) + incremental_cache = self.self_attn.gen_cache( + memory, type=self.self_attn.Cache + ) static_cache = self.cross_attn.gen_cache( - memory, memory, type=self.cross_attn.StaticCache) + memory, memory, type=self.cross_attn.StaticCache + ) return incremental_cache, static_cache @@ -1016,10 +1058,16 @@ class TransformerDecoder(Layer): def __init__(self, decoder_layer, num_layers, norm=None): super(TransformerDecoder, self).__init__() - self.layers = LayerList([ - (decoder_layer if i == 0 else type(decoder_layer)( - **decoder_layer._config)) for i in range(num_layers) - ]) + self.layers = LayerList( + [ + ( + decoder_layer + if i == 0 + else type(decoder_layer)(**decoder_layer._config) + ) + for i in range(num_layers) + ] + ) self.num_layers = num_layers self.norm = norm @@ -1077,17 +1125,21 @@ class TransformerDecoder(Layer): new_caches = [] for i, mod in enumerate(self.layers): if cache is None: - output = mod(output, - memory, - tgt_mask=tgt_mask, - memory_mask=memory_mask, - cache=None) + output = mod( + output, + memory, + tgt_mask=tgt_mask, + memory_mask=memory_mask, + cache=None, + ) else: - output, new_cache = mod(output, - memory, - tgt_mask=tgt_mask, - memory_mask=memory_mask, - cache=cache[i]) + output, new_cache = mod( + output, + memory, + tgt_mask=tgt_mask, + memory_mask=memory_mask, + cache=cache[i], + ) new_caches.append(new_cache) if self.norm is not None: @@ -1217,30 +1269,39 @@ class Transformer(Layer): cross_attn_mask) # [2, 6, 128] """ - def __init__(self, - d_model=512, - nhead=8, - num_encoder_layers=6, - num_decoder_layers=6, - dim_feedforward=2048, - dropout=0.1, - activation="relu", - attn_dropout=None, - act_dropout=None, - normalize_before=False, - weight_attr=None, - bias_attr=None, - custom_encoder=None, - custom_decoder=None): + def __init__( + self, + d_model=512, + nhead=8, + num_encoder_layers=6, + num_decoder_layers=6, + dim_feedforward=2048, + dropout=0.1, + activation="relu", + attn_dropout=None, + act_dropout=None, + normalize_before=False, + weight_attr=None, + bias_attr=None, + custom_encoder=None, + custom_decoder=None, + ): super(Transformer, self).__init__() - assert d_model > 0, ("Expected d_model to be greater than 0, " - "but received {}".format(d_model)) - assert nhead > 0, ("Expected nhead to be greater than 0, " - "but received {}".format(nhead)) + assert ( + d_model > 0 + ), "Expected d_model to be greater than 0, " "but received {}".format( + d_model + ) + assert ( + nhead > 0 + ), "Expected nhead to be greater than 0, " "but received {}".format( + nhead + ) assert dim_feedforward > 0, ( "Expected dim_feedforward to be greater than 0, " - "but received {}".format(dim_feedforward)) + "but received {}".format(dim_feedforward) + ) if isinstance(bias_attr, (list, tuple)): if len(bias_attr) == 1: @@ -1253,9 +1314,9 @@ class Transformer(Layer): encoder_bias_attr = [bias_attr[0], bias_attr[-1]] decoder_bias_attr = bias_attr else: - assert False, ( - "length of bias_attr should be 1 or 2 or 3 when it is a list/tuple" - ) + assert ( + False + ), "length of bias_attr should be 1 or 2 or 3 when it is a list/tuple" else: encoder_bias_attr = bias_attr decoder_bias_attr = bias_attr @@ -1267,15 +1328,17 @@ class Transformer(Layer): elif len(weight_attr) == 2: encoder_weight_attr = weight_attr decoder_weight_attr = [ - weight_attr[0], weight_attr[0], weight_attr[-1] + weight_attr[0], + weight_attr[0], + weight_attr[-1], ] elif len(weight_attr) == 3: encoder_weight_attr = [weight_attr[0], weight_attr[-1]] decoder_weight_attr = weight_attr else: - assert False, ( - "length of weight_attr should be 1 or 2 or 3 when it is a list/tuple" - ) + assert ( + False + ), "length of weight_attr should be 1 or 2 or 3 when it is a list/tuple" else: encoder_weight_attr = weight_attr decoder_weight_attr = weight_attr @@ -1284,23 +1347,41 @@ class Transformer(Layer): self.encoder = custom_encoder else: encoder_layer = TransformerEncoderLayer( - d_model, nhead, dim_feedforward, dropout, activation, - attn_dropout, act_dropout, normalize_before, - encoder_weight_attr, encoder_bias_attr) + d_model, + nhead, + dim_feedforward, + dropout, + activation, + attn_dropout, + act_dropout, + normalize_before, + encoder_weight_attr, + encoder_bias_attr, + ) encoder_norm = LayerNorm(d_model) - self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, - encoder_norm) + self.encoder = TransformerEncoder( + encoder_layer, num_encoder_layers, encoder_norm + ) if custom_decoder is not None: self.decoder = custom_decoder else: decoder_layer = TransformerDecoderLayer( - d_model, nhead, dim_feedforward, dropout, activation, - attn_dropout, act_dropout, normalize_before, - decoder_weight_attr, decoder_bias_attr) + d_model, + nhead, + dim_feedforward, + dropout, + activation, + attn_dropout, + act_dropout, + normalize_before, + decoder_weight_attr, + decoder_bias_attr, + ) decoder_norm = LayerNorm(d_model) - self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, - decoder_norm) + self.decoder = TransformerDecoder( + decoder_layer, num_decoder_layers, decoder_norm + ) self.d_model = d_model self.nhead = nhead @@ -1359,10 +1440,9 @@ class Transformer(Layer): tgt_mask = _convert_attention_mask(tgt_mask, tgt.dtype) memory_mask = _convert_attention_mask(memory_mask, memory.dtype) - output = self.decoder(tgt, - memory, - tgt_mask=tgt_mask, - memory_mask=memory_mask) + output = self.decoder( + tgt, memory, tgt_mask=tgt_mask, memory_mask=memory_mask + ) return output def generate_square_subsequent_mask(self, length): @@ -1397,6 +1477,10 @@ class Transformer(Layer): """ return paddle.tensor.triu( - paddle.full(shape=[length, length], - fill_value=-np.inf, - dtype=paddle.get_default_dtype()), 1) + paddle.full( + shape=[length, length], + fill_value=-np.inf, + dtype=paddle.get_default_dtype(), + ), + 1, + ) diff --git a/python/paddle/nn/layer/vision.py b/python/paddle/nn/layer/vision.py index fe367828815c5f6e29c8d9e341991bb9ded410f3..704baeb98f9cf031014f4c717682a869953d895f 100644 --- a/python/paddle/nn/layer/vision.py +++ b/python/paddle/nn/layer/vision.py @@ -67,16 +67,19 @@ class PixelShuffle(Layer): raise TypeError("upscale factor must be int type") if data_format not in ["NCHW", "NHWC"]: - raise ValueError("Data format should be 'NCHW' or 'NHWC'." - "But recevie data format: {}".format(data_format)) + raise ValueError( + "Data format should be 'NCHW' or 'NHWC'." + "But recevie data format: {}".format(data_format) + ) self._upscale_factor = upscale_factor self._data_format = data_format self._name = name def forward(self, x): - return functional.pixel_shuffle(x, self._upscale_factor, - self._data_format, self._name) + return functional.pixel_shuffle( + x, self._upscale_factor, self._data_format, self._name + ) def extra_repr(self): main_str = 'upscale_factor={}'.format(self._upscale_factor) @@ -130,16 +133,19 @@ class PixelUnshuffle(Layer): raise ValueError("Downscale factor must be positive") if data_format not in ["NCHW", "NHWC"]: - raise ValueError("Data format should be 'NCHW' or 'NHWC'." - "But recevie data format: {}".format(data_format)) + raise ValueError( + "Data format should be 'NCHW' or 'NHWC'." + "But recevie data format: {}".format(data_format) + ) self._downscale_factor = downscale_factor self._data_format = data_format self._name = name def forward(self, x): - return functional.pixel_unshuffle(x, self._downscale_factor, - self._data_format, self._name) + return functional.pixel_unshuffle( + x, self._downscale_factor, self._data_format, self._name + ) def extra_repr(self): main_str = 'downscale_factor={}'.format(self._downscale_factor) @@ -202,16 +208,19 @@ class ChannelShuffle(Layer): raise ValueError("groups must be positive") if data_format not in ["NCHW", "NHWC"]: - raise ValueError("Data format should be 'NCHW' or 'NHWC'." - "But recevie data format: {}".format(data_format)) + raise ValueError( + "Data format should be 'NCHW' or 'NHWC'." + "But recevie data format: {}".format(data_format) + ) self._groups = groups self._data_format = data_format self._name = name def forward(self, x): - return functional.channel_shuffle(x, self._groups, self._data_format, - self._name) + return functional.channel_shuffle( + x, self._groups, self._data_format, self._name + ) def extra_repr(self): main_str = 'groups={}'.format(self._groups) diff --git a/python/paddle/nn/quant/functional_layers.py b/python/paddle/nn/quant/functional_layers.py index ca1eb5f4fb3c190cb71992312d02f19599d92a1f..2c0eb88e0875c95b7ff832ef947a04d752fa068a 100644 --- a/python/paddle/nn/quant/functional_layers.py +++ b/python/paddle/nn/quant/functional_layers.py @@ -19,13 +19,11 @@ __all__ = [] class FloatFunctionalLayer(Layer): - def __init__(self): super(FloatFunctionalLayer, self).__init__() class add(FloatFunctionalLayer): - def __init__(self): super(add, self).__init__() @@ -34,7 +32,6 @@ class add(FloatFunctionalLayer): class subtract(FloatFunctionalLayer): - def __init__(self): super(subtract, self).__init__() @@ -43,7 +40,6 @@ class subtract(FloatFunctionalLayer): class multiply(FloatFunctionalLayer): - def __init__(self): super(multiply, self).__init__() @@ -52,7 +48,6 @@ class multiply(FloatFunctionalLayer): class divide(FloatFunctionalLayer): - def __init__(self): super(divide, self).__init__() @@ -61,7 +56,6 @@ class divide(FloatFunctionalLayer): class reshape(FloatFunctionalLayer): - def __init__(self): super(reshape, self).__init__() @@ -70,7 +64,6 @@ class reshape(FloatFunctionalLayer): class transpose(FloatFunctionalLayer): - def __init__(self): super(transpose, self).__init__() @@ -79,7 +72,6 @@ class transpose(FloatFunctionalLayer): class concat(FloatFunctionalLayer): - def __init__(self): super(concat, self).__init__() @@ -88,7 +80,6 @@ class concat(FloatFunctionalLayer): class flatten(FloatFunctionalLayer): - def __init__(self): super(flatten, self).__init__() diff --git a/python/paddle/nn/quant/lsq.py b/python/paddle/nn/quant/lsq.py index 6ed6e78a6d98eee1413f449efae7dfc2d2007516..ff2c3d12807ed43f3e464bb52b8ac0f83d547a8e 100644 --- a/python/paddle/nn/quant/lsq.py +++ b/python/paddle/nn/quant/lsq.py @@ -28,7 +28,6 @@ def round(x): class LsqFunc(PyLayer): - @staticmethod def forward(ctx, weight, alpha, g, Qn, Qp, per_channel=False, quant_axis=0): ctx.save_for_backward(weight, alpha) @@ -65,21 +64,39 @@ class LsqFunc(PyLayer): upper_flag = paddle.cast((q_w > Qp), 'float32') middle_flag = 1.0 - lower_flag - upper_flag if per_channel: - grad_alpha = ((lower_flag * Qn + upper_flag * Qp + - middle_flag * round(q_w) - middle_flag * q_w) * - grad_weight * g) + grad_alpha = ( + ( + lower_flag * Qn + + upper_flag * Qp + + middle_flag * round(q_w) + - middle_flag * q_w + ) + * grad_weight + * g + ) grad_alpha = grad_alpha.reshape( - (grad_alpha.shape[quant_axis], -1)).sum(axis=1) + (grad_alpha.shape[quant_axis], -1) + ).sum(axis=1) else: - grad_alpha = ((lower_flag * Qn + upper_flag * Qp + - middle_flag * round(q_w) - middle_flag * q_w) * - grad_weight * g).sum().unsqueeze(axis=0)[0] + grad_alpha = ( + ( + ( + lower_flag * Qn + + upper_flag * Qp + + middle_flag * round(q_w) + - middle_flag * q_w + ) + * grad_weight + * g + ) + .sum() + .unsqueeze(axis=0)[0] + ) grad_weight = middle_flag * grad_weight return grad_weight, grad_alpha class LsqPlusActFunc(PyLayer): - @staticmethod def forward(ctx, x, alpha, beta, g, Qn, Qp): ctx.save_for_backward(x, alpha, beta) @@ -95,25 +112,38 @@ class LsqPlusActFunc(PyLayer): lower_flag = paddle.cast((q_x < Qn), 'float32') upper_flag = paddle.cast((q_x > Qp), 'float32') middle_flag = 1.0 - lower_flag - upper_flag - grad_alpha = ((lower_flag * Qn + upper_flag * Qp + - middle_flag * round(q_x) - middle_flag * q_x) * grad_x * - g).sum().unsqueeze(axis=0)[0] - grad_beta = ((lower_flag + upper_flag) * grad_x * - g).sum().unsqueeze(axis=0)[0] + grad_alpha = ( + ( + ( + lower_flag * Qn + + upper_flag * Qp + + middle_flag * round(q_x) + - middle_flag * q_x + ) + * grad_x + * g + ) + .sum() + .unsqueeze(axis=0)[0] + ) + grad_beta = ( + ((lower_flag + upper_flag) * grad_x * g).sum().unsqueeze(axis=0)[0] + ) grad_x = middle_flag * grad_x return grad_x, grad_alpha, grad_beta class FakeQuantActLSQPlus(Layer): - - def __init__(self, - quant_bits, - all_postive=False, - symmetric=False, - batch_init=20, - dtype='float32', - name=None, - reduce_type=None): + def __init__( + self, + quant_bits, + all_postive=False, + symmetric=False, + batch_init=20, + dtype='float32', + name=None, + reduce_type=None, + ): super(FakeQuantActLSQPlus, self).__init__() ''' Args: @@ -138,46 +168,51 @@ class FakeQuantActLSQPlus(Layer): self.Qp = 2**self.bits - 1 else: # signed activation - self.Qn = -2**(self.bits - 1) - self.Qp = 2**(self.bits - 1) - 1 + self.Qn = -(2 ** (self.bits - 1)) + self.Qp = 2 ** (self.bits - 1) - 1 - scale_prefix = "{}.scale".format( - name) if name else 'quant_dequant.scale' + scale_prefix = ( + "{}.scale".format(name) if name else 'quant_dequant.scale' + ) self._scale_name = unique_name.generate(scale_prefix) - s_attr = ParamAttr(name=self._scale_name, - initializer=Constant(1.0), - trainable=True) + s_attr = ParamAttr( + name=self._scale_name, initializer=Constant(1.0), trainable=True + ) self.s = self.create_parameter(shape=[1], attr=s_attr, dtype='float32') self.s.stop_gradient = False if not self.symmetric: - beta_prefix = "{}.beta".format( - name) if name else 'quant_dequant.beta' + beta_prefix = ( + "{}.beta".format(name) if name else 'quant_dequant.beta' + ) self._beta_name = unique_name.generate(beta_prefix) - beta_attr = ParamAttr(name=self._beta_name, - initializer=Constant(0.0), - trainable=True) - self.beta = self.create_parameter(shape=[1], - attr=beta_attr, - dtype='float32') + beta_attr = ParamAttr( + name=self._beta_name, initializer=Constant(0.0), trainable=True + ) + self.beta = self.create_parameter( + shape=[1], attr=beta_attr, dtype='float32' + ) self.beta.stop_gradient = False self.init_state = 0 def forward(self, activation): if self.reduce_type == "max": - paddle.distributed.all_reduce(self.s, - op=paddle.distributed.ReduceOp.MAX) + paddle.distributed.all_reduce( + self.s, op=paddle.distributed.ReduceOp.MAX + ) if not self.symmetric and self.reduce_type == "max": - paddle.distributed.all_reduce(self.beta, - op=paddle.distributed.ReduceOp.MAX) + paddle.distributed.all_reduce( + self.beta, op=paddle.distributed.ReduceOp.MAX + ) if self.init_state == 0: - self.g = paddle.to_tensor(1.0 / - math.sqrt(activation.numel() * self.Qp)) + self.g = paddle.to_tensor( + 1.0 / math.sqrt(activation.numel() * self.Qp) + ) min_a = paddle.min(activation.detach()) max_a = paddle.max(activation.detach()) self.s.set_value((max_a - min_a) / (self.Qp - self.Qn)) @@ -187,40 +222,41 @@ class FakeQuantActLSQPlus(Layer): elif self.init_state < self.batch_init: min_a = paddle.min(activation.detach()) max_a = paddle.max(activation.detach()) - self.s.set_value(self.s * 0.9 + 0.1 * (max_a - min_a) / - (self.Qp - self.Qn)) + self.s.set_value( + self.s * 0.9 + 0.1 * (max_a - min_a) / (self.Qp - self.Qn) + ) if not self.symmetric: - self.beta.set_value(self.s * 0.9 + 0.1 * - (min_a - self.s * self.Qn)) + self.beta.set_value( + self.s * 0.9 + 0.1 * (min_a - self.s * self.Qn) + ) self.init_state += 1 else: self.init_state += 1 activation.stop_gradient = False if not self.symmetric: - q_a = LsqPlusActFunc.apply(activation, self.s, self.beta, self.g, - self.Qn, self.Qp) + q_a = LsqPlusActFunc.apply( + activation, self.s, self.beta, self.g, self.Qn, self.Qp + ) else: - q_a = LsqFunc.apply(activation, - self.s, - self.g, - self.Qn, - self.Qp, - per_channel=False) + q_a = LsqFunc.apply( + activation, self.s, self.g, self.Qn, self.Qp, per_channel=False + ) return q_a class FakeQuantWeightLSQPlus(Layer): - - def __init__(self, - quant_bits, - all_postive=False, - per_channel=False, - batch_init=20, - channel_num=None, - quant_linear=False, - dtype='float32', - name=None, - reduce_type=None): + def __init__( + self, + quant_bits, + all_postive=False, + per_channel=False, + batch_init=20, + channel_num=None, + quant_linear=False, + dtype='float32', + name=None, + reduce_type=None, + ): super(FakeQuantWeightLSQPlus, self).__init__() ''' Args: @@ -251,25 +287,27 @@ class FakeQuantWeightLSQPlus(Layer): self.Qp = 2**self.bits - 1 else: # signed weight - self.Qn = -2**(self.bits - 1) - self.Qp = 2**(self.bits - 1) - 1 + self.Qn = -(2 ** (self.bits - 1)) + self.Qp = 2 ** (self.bits - 1) - 1 self.init_state = 0 - scale_prefix = "{}.scale".format( - name) if name else 'quant_dequant.scale' + scale_prefix = ( + "{}.scale".format(name) if name else 'quant_dequant.scale' + ) self._scale_name = unique_name.generate(scale_prefix) - s_attr = ParamAttr(name=self._scale_name, - initializer=Constant(1.0), - trainable=True) - self.s = self.create_parameter(shape=[channel_num], - attr=s_attr, - dtype=dtype) + s_attr = ParamAttr( + name=self._scale_name, initializer=Constant(1.0), trainable=True + ) + self.s = self.create_parameter( + shape=[channel_num], attr=s_attr, dtype=dtype + ) self.s.stop_gradient = False def forward(self, weight): if self.reduce_type == "max": - paddle.distributed.all_reduce(self.s, - op=paddle.distributed.ReduceOp.MAX) + paddle.distributed.all_reduce( + self.s, op=paddle.distributed.ReduceOp.MAX + ) if self.init_state == 0: self.g = paddle.to_tensor(1.0 / math.sqrt(weight.numel() * self.Qp)) @@ -278,19 +316,22 @@ class FakeQuantWeightLSQPlus(Layer): weight_tmp = weight.detach().reshape((weight.shape[0], -1)) mean = paddle.mean(weight_tmp, axis=self.collect_axis) std = paddle.std(weight_tmp, axis=self.collect_axis) - s = paddle.max(paddle.stack( - [paddle.abs(mean - 3 * std), - paddle.abs(mean + 3 * std)]), - axis=0) + s = paddle.max( + paddle.stack( + [paddle.abs(mean - 3 * std), paddle.abs(mean + 3 * std)] + ), + axis=0, + ) self.s.set_value(s / self.div) else: mean = paddle.mean(weight.detach()) std = paddle.std(weight.detach()) self.s.set_value( - max([ - paddle.abs(mean - 3 * std), - paddle.abs(mean + 3 * std) - ]) / self.div) + max( + [paddle.abs(mean - 3 * std), paddle.abs(mean + 3 * std)] + ) + / self.div + ) self.init_state += 1 elif self.init_state < self.batch_init: self.div = 2**self.bits - 1 @@ -298,22 +339,36 @@ class FakeQuantWeightLSQPlus(Layer): weight_tmp = weight.detach().reshape((weight.shape[0], -1)) mean = paddle.mean(weight_tmp, axis=self.collect_axis) std = paddle.std(weight_tmp, axis=self.collect_axis) - s = paddle.max(paddle.stack( - [paddle.abs(mean - 3 * std), - paddle.abs(mean + 3 * std)]), - axis=0) + s = paddle.max( + paddle.stack( + [paddle.abs(mean - 3 * std), paddle.abs(mean + 3 * std)] + ), + axis=0, + ) self.s.set_value(s * 0.9 + 0.1 * s / self.div) else: mean = paddle.mean(weight.detach()) std = paddle.std(weight.detach()) - self.s.set_value(self.s * 0.9 + 0.1 * max( - [paddle.abs(mean - 3 * std), - paddle.abs(mean + 3 * std)]) / self.div) + self.s.set_value( + self.s * 0.9 + + 0.1 + * max( + [paddle.abs(mean - 3 * std), paddle.abs(mean + 3 * std)] + ) + / self.div + ) self.init_state += 1 elif self.init_state == self.batch_init: self.init_state += 1 weight.stop_gradient = False - w_q = LsqFunc.apply(weight, self.s, self.g, self.Qn, self.Qp, - self.per_channel, self.quant_axis) + w_q = LsqFunc.apply( + weight, + self.s, + self.g, + self.Qn, + self.Qp, + self.per_channel, + self.quant_axis, + ) return w_q diff --git a/python/paddle/nn/quant/quant_layers.py b/python/paddle/nn/quant/quant_layers.py index affa180bd303a8fac804d8c845d7c7e798d0788f..6eeaed7f86fecf91f77c586637a7da9ed1599765 100644 --- a/python/paddle/nn/quant/quant_layers.py +++ b/python/paddle/nn/quant/quant_layers.py @@ -42,9 +42,9 @@ __all__ = [ 'QuantizedColumnParallelLinear', ] -_logger = get_logger(__name__, - logging.INFO, - fmt='%(asctime)s-%(levelname)s: %(message)s') +_logger = get_logger( + __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s' +) class FakeQuantAbsMax(Layer): @@ -57,26 +57,31 @@ class FakeQuantAbsMax(Layer): :math:`Out = round(X / scale * range) * scale / range` """ - def __init__(self, - name=None, - quant_bits=8, - dtype='float32', - quant_on_weight=False, - reduce_type=None): + def __init__( + self, + name=None, + quant_bits=8, + dtype='float32', + quant_on_weight=False, + reduce_type=None, + ): super(FakeQuantAbsMax, self).__init__() self._quant_bits = quant_bits self._name = name self._reduce_type = reduce_type - scale_prefix = "{}.scale".format( - name) if name else 'quant_dequant.scale' + scale_prefix = ( + "{}.scale".format(name) if name else 'quant_dequant.scale' + ) self._scale_name = unique_name.generate(scale_prefix) if quant_on_weight: - scale_attr = ParamAttr(name=self._scale_name, - initializer=Constant(0.001), - trainable=False) - self._scale = self.create_parameter(shape=[1], - attr=scale_attr, - dtype=self._dtype) + scale_attr = ParamAttr( + name=self._scale_name, + initializer=Constant(0.001), + trainable=False, + ) + self._scale = self.create_parameter( + shape=[1], attr=scale_attr, dtype=self._dtype + ) self._scale.stop_gradient = True else: self._scale = None @@ -84,16 +89,18 @@ class FakeQuantAbsMax(Layer): def forward(self, input): if in_dynamic_mode(): attrs = ('bit_length', self._quant_bits) - quant_out = _varbase_creator(type=input.type, - name="{}.quantized.dequantized".format( - input.name), - shape=input.shape, - dtype=input.dtype, - persistable=False) + quant_out = _varbase_creator( + type=input.type, + name="{}.quantized.dequantized".format(input.name), + shape=input.shape, + dtype=input.dtype, + persistable=False, + ) out_scale = self._scale if self._reduce_type == "max": paddle.distributed.all_reduce( - out_scale, op=paddle.distributed.ReduceOp.MAX) + out_scale, op=paddle.distributed.ReduceOp.MAX + ) if not out_scale: out_scale = _varbase_creator( @@ -101,10 +108,12 @@ class FakeQuantAbsMax(Layer): name=self._scale_name, shape=[1], dtype=self._dtype, - persistable=False) + persistable=False, + ) out_scale.stop_gradient = True out, _, = _legacy_C_ops.fake_quantize_dequantize_abs_max( - input, quant_out, out_scale, *attrs) + input, quant_out, out_scale, *attrs + ) return out check_variable_and_dtype(input, 'input', ['float32'], "FakeQuantAbsMax") @@ -115,7 +124,8 @@ class FakeQuantAbsMax(Layer): dtype=input.dtype, type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=False) + stop_gradient=False, + ) out_scale = self._scale if not out_scale: out_scale = self._helper.create_variable( @@ -123,13 +133,16 @@ class FakeQuantAbsMax(Layer): dtype=self._dtype, type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=True) + stop_gradient=True, + ) outputs = {"Out": [quant_out], "OutScale": [out_scale]} - self._helper.append_op(type="fake_quantize_dequantize_abs_max", - inputs=inputs, - outputs=outputs, - attrs=attrs) + self._helper.append_op( + type="fake_quantize_dequantize_abs_max", + inputs=inputs, + outputs=outputs, + attrs=attrs, + ) return quant_out @@ -144,75 +157,108 @@ class FakeQuantMovingAverageAbsMax(Layer): :math:`Out = round(X / scale * range) * scale / range` """ - def __init__(self, - name=None, - moving_rate=0.9, - quant_bits=8, - dtype='float32', - reduce_type=None): + def __init__( + self, + name=None, + moving_rate=0.9, + quant_bits=8, + dtype='float32', + reduce_type=None, + ): super(FakeQuantMovingAverageAbsMax, self).__init__() self._moving_rate = moving_rate self._quant_bits = quant_bits self._reduce_type = reduce_type - scale_prefix = "{}.scale".format( - name) if name else 'quant_dequant.scale' - scale_attr = ParamAttr(name=unique_name.generate(scale_prefix), - initializer=Constant(0.001), - trainable=False) - self._scale = self.create_parameter(shape=[1], - attr=scale_attr, - dtype=dtype) + scale_prefix = ( + "{}.scale".format(name) if name else 'quant_dequant.scale' + ) + scale_attr = ParamAttr( + name=unique_name.generate(scale_prefix), + initializer=Constant(0.001), + trainable=False, + ) + self._scale = self.create_parameter( + shape=[1], attr=scale_attr, dtype=dtype + ) self._scale.stop_gradient = True - state_prefix = "{}.state".format( - name) if name else 'quant_dequant.state' - state_attr = ParamAttr(name=unique_name.generate(state_prefix), - initializer=Constant(1), - trainable=False) - self._state = self.create_parameter(shape=[1], - attr=state_attr, - dtype=dtype) + state_prefix = ( + "{}.state".format(name) if name else 'quant_dequant.state' + ) + state_attr = ParamAttr( + name=unique_name.generate(state_prefix), + initializer=Constant(1), + trainable=False, + ) + self._state = self.create_parameter( + shape=[1], attr=state_attr, dtype=dtype + ) self._state.stop_gradient = True - accum_prefix = "{}.accum".format( - name) if name else 'quant_dequant.accum' - accum_attr = ParamAttr(name=unique_name.generate(accum_prefix), - initializer=Constant(1), - trainable=False) - self._accum = self.create_parameter(shape=[1], - attr=accum_attr, - dtype=dtype) + accum_prefix = ( + "{}.accum".format(name) if name else 'quant_dequant.accum' + ) + accum_attr = ParamAttr( + name=unique_name.generate(accum_prefix), + initializer=Constant(1), + trainable=False, + ) + self._accum = self.create_parameter( + shape=[1], attr=accum_attr, dtype=dtype + ) self._accum.stop_gradient = True def forward(self, input): if in_dynamic_mode(): - attrs = ('moving_rate', self._moving_rate, 'bit_length', - self._quant_bits, 'is_test', not self.training) - quant_out = _varbase_creator(type=input.type, - name="{}.quantized.dequantized".format( - input.name), - shape=input.shape, - dtype=input.dtype, - persistable=False) + attrs = ( + 'moving_rate', + self._moving_rate, + 'bit_length', + self._quant_bits, + 'is_test', + not self.training, + ) + quant_out = _varbase_creator( + type=input.type, + name="{}.quantized.dequantized".format(input.name), + shape=input.shape, + dtype=input.dtype, + persistable=False, + ) if self._reduce_type == "max": paddle.distributed.all_reduce( - self._scale, op=paddle.distributed.ReduceOp.MAX) + self._scale, op=paddle.distributed.ReduceOp.MAX + ) state = self._state if self.training else None accum = self._accum if self.training else None - out, _, _, _ = _legacy_C_ops.fake_quantize_dequantize_moving_average_abs_max( - input, self._scale, accum, state, quant_out, self._scale, state, - accum, *attrs) + ( + out, + _, + _, + _, + ) = _legacy_C_ops.fake_quantize_dequantize_moving_average_abs_max( + input, + self._scale, + accum, + state, + quant_out, + self._scale, + state, + accum, + *attrs + ) return out - check_variable_and_dtype(input, 'input', ['float32'], - "FakeQuantMovingAverageAbsMax") + check_variable_and_dtype( + input, 'input', ['float32'], "FakeQuantMovingAverageAbsMax" + ) attrs = { 'moving_rate': self._moving_rate, 'bit_length': self._quant_bits, - 'is_test': not self.training + 'is_test': not self.training, } inputs = {"X": [input], "InScale": [self._scale]} quant_out = self._helper.create_variable( @@ -220,7 +266,8 @@ class FakeQuantMovingAverageAbsMax(Layer): dtype=input.dtype, type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=False) + stop_gradient=False, + ) outputs = {"Out": [quant_out], "OutScale": [self._scale]} if self.training: @@ -233,22 +280,26 @@ class FakeQuantMovingAverageAbsMax(Layer): type="fake_quantize_dequantize_moving_average_abs_max", inputs=inputs, outputs=outputs, - attrs=attrs) + attrs=attrs, + ) return quant_out class FakeQuantChannelWiseAbsMax(Layer): - - def __init__(self, - name=None, - channel_num=None, - quant_bits=8, - quant_axis=0, - dtype='float32', - quant_on_weight=False, - reduce_type=None): - assert quant_on_weight == True, "Channel_wise only can be used on weight quantization." + def __init__( + self, + name=None, + channel_num=None, + quant_bits=8, + quant_axis=0, + dtype='float32', + quant_on_weight=False, + reduce_type=None, + ): + assert ( + quant_on_weight == True + ), "Channel_wise only can be used on weight quantization." super(FakeQuantChannelWiseAbsMax, self).__init__() self._quant_bits = quant_bits self._quant_axis = quant_axis @@ -256,50 +307,65 @@ class FakeQuantChannelWiseAbsMax(Layer): self._name = name self._channel_num = channel_num self._reduce_type = reduce_type - scale_prefix = "{}.scale".format( - name) if name else 'quant_dequant.scale' + scale_prefix = ( + "{}.scale".format(name) if name else 'quant_dequant.scale' + ) self._scale_name = unique_name.generate(scale_prefix) if quant_on_weight: - scale_attr = ParamAttr(name=self._scale_name, - initializer=Constant(0.0), - trainable=False) - self._scale = self.create_parameter(shape=[self._channel_num], - attr=scale_attr, - dtype=self._dtype) + scale_attr = ParamAttr( + name=self._scale_name, + initializer=Constant(0.0), + trainable=False, + ) + self._scale = self.create_parameter( + shape=[self._channel_num], attr=scale_attr, dtype=self._dtype + ) self._scale.stop_gradient = True else: self._scale = None def forward(self, input): if in_dynamic_mode(): - attrs = ('bit_length', self._quant_bits, 'quant_axis', - self._quant_axis) - quant_out = _varbase_creator(type=input.type, - name="{}.quantized.dequantized".format( - input.name), - shape=input.shape, - dtype=input.dtype, - persistable=False) + attrs = ( + 'bit_length', + self._quant_bits, + 'quant_axis', + self._quant_axis, + ) + quant_out = _varbase_creator( + type=input.type, + name="{}.quantized.dequantized".format(input.name), + shape=input.shape, + dtype=input.dtype, + persistable=False, + ) out_scale = self._scale if self._reduce_type == "max": paddle.distributed.all_reduce( - out_scale, op=paddle.distributed.ReduceOp.MAX) + out_scale, op=paddle.distributed.ReduceOp.MAX + ) if out_scale is None: out_scale = _varbase_creator( type=core.VarDesc.VarType.LOD_TENSOR, name=self._scale_name, shape=[self._channel_num], dtype=self._dtype, - persistable=False) + persistable=False, + ) out_scale.stop_gradient = True - out, _, = _legacy_C_ops.fake_channel_wise_quantize_dequantize_abs_max( - input, quant_out, out_scale, *attrs) + ( + out, + _, + ) = _legacy_C_ops.fake_channel_wise_quantize_dequantize_abs_max( + input, quant_out, out_scale, *attrs + ) return out - check_variable_and_dtype(input, 'input', ['float32'], - "FakeQuantChannelWiseAbsMax") + check_variable_and_dtype( + input, 'input', ['float32'], "FakeQuantChannelWiseAbsMax" + ) attrs = {'bit_length': self._quant_bits, 'quant_axis': self._quant_axis} inputs = {"X": [input]} quant_out = self._helper.create_variable( @@ -307,7 +373,8 @@ class FakeQuantChannelWiseAbsMax(Layer): dtype=input.dtype, type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=False) + stop_gradient=False, + ) out_scale = self._scale if not out_scale: out_scale = self._helper.create_variable( @@ -315,25 +382,24 @@ class FakeQuantChannelWiseAbsMax(Layer): dtype=self._dtype, type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=True) + stop_gradient=True, + ) outputs = {"Out": [quant_out], "OutScale": [out_scale]} self._helper.append_op( type="fake_channel_wise_quantize_dequantize_abs_max", inputs=inputs, outputs=outputs, - attrs=attrs) + attrs=attrs, + ) return quant_out class MovingAverageAbsMaxScale(Layer): - - def __init__(self, - name=None, - moving_rate=0.9, - dtype='float32', - reduce_type=None): + def __init__( + self, name=None, moving_rate=0.9, dtype='float32', reduce_type=None + ): r""" MovingAverageMaxScale layer is used to calculating the output quantization scale of Layer. Its computational formula is described as below: @@ -346,56 +412,75 @@ class MovingAverageAbsMaxScale(Layer): self._reduce_type = reduce_type scale_prefix = '{}.scale'.format(name) if name else 'outscale.scale' scale_name = unique_name.generate(scale_prefix) - scale_attr = ParamAttr(name=scale_name, - initializer=Constant(0), - trainable=False) - self._scale = self.create_parameter(shape=[1], - attr=scale_attr, - dtype=dtype) + scale_attr = ParamAttr( + name=scale_name, initializer=Constant(0), trainable=False + ) + self._scale = self.create_parameter( + shape=[1], attr=scale_attr, dtype=dtype + ) self._scale.stop_gradient = True state_prefix = "{}.state".format(name) if name else 'outscale.state' - state_attr = ParamAttr(name=unique_name.generate(state_prefix), - initializer=Constant(0), - trainable=False) - self._state = self.create_parameter(shape=[1], - attr=state_attr, - dtype=dtype) + state_attr = ParamAttr( + name=unique_name.generate(state_prefix), + initializer=Constant(0), + trainable=False, + ) + self._state = self.create_parameter( + shape=[1], attr=state_attr, dtype=dtype + ) self._state.stop_gradient = True accum_prefix = "{}.accum".format(name) if name else 'outscale.accum' - accum_attr = ParamAttr(name=unique_name.generate(accum_prefix), - initializer=Constant(0), - trainable=False) - self._accum = self.create_parameter(shape=[1], - attr=accum_attr, - dtype=dtype) + accum_attr = ParamAttr( + name=unique_name.generate(accum_prefix), + initializer=Constant(0), + trainable=False, + ) + self._accum = self.create_parameter( + shape=[1], attr=accum_attr, dtype=dtype + ) self._accum.stop_gradient = True def forward(self, input): if in_dynamic_mode(): - attrs = ('moving_rate', self._moving_rate, 'is_test', - not self.training) - - quant_out = _varbase_creator(type=input.type, - name="{}.tmp".format(input.name), - shape=input.shape, - dtype=input.dtype, - persistable=False) + attrs = ( + 'moving_rate', + self._moving_rate, + 'is_test', + not self.training, + ) + + quant_out = _varbase_creator( + type=input.type, + name="{}.tmp".format(input.name), + shape=input.shape, + dtype=input.dtype, + persistable=False, + ) if self._reduce_type == "max": paddle.distributed.all_reduce( - self._scale, op=paddle.distributed.ReduceOp.MAX) + self._scale, op=paddle.distributed.ReduceOp.MAX + ) state = self._state if self.training else None accum = self._accum if self.training else None out, _, _, _ = _legacy_C_ops.moving_average_abs_max_scale( - input, accum, state, quant_out, self._scale, state, accum, - *attrs) + input, + accum, + state, + quant_out, + self._scale, + state, + accum, + *attrs + ) return out - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'MovingAverageAbsMaxScale') + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'MovingAverageAbsMaxScale' + ) attrs = {'moving_rate': self._moving_rate, 'is_test': not self.training} inputs = {"X": [input]} @@ -404,7 +489,8 @@ class MovingAverageAbsMaxScale(Layer): dtype=input.dtype, type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=False) + stop_gradient=False, + ) outputs = {"Out": [quant_out], "OutScale": [self._scale]} if self.training: @@ -413,10 +499,12 @@ class MovingAverageAbsMaxScale(Layer): outputs['OutState'] = [self._state] outputs['OutAccum'] = [self._accum] - self._helper.append_op(type="moving_average_abs_max_scale", - inputs=inputs, - outputs=outputs, - attrs=attrs) + self._helper.append_op( + type="moving_average_abs_max_scale", + inputs=inputs, + outputs=outputs, + attrs=attrs, + ) return quant_out @@ -430,17 +518,19 @@ class QuantizedConv2D(Layer): The only difference is that its inputs are all fake quantized. """ - def __init__(self, - layer, - weight_bits=8, - activation_bits=8, - moving_rate=0.9, - weight_quantize_type='abs_max', - activation_quantize_type='abs_max', - weight_pre_layer=None, - act_pre_layer=None, - weight_quant_layer=None, - act_quant_layer=None): + def __init__( + self, + layer, + weight_bits=8, + activation_bits=8, + moving_rate=0.9, + weight_quantize_type='abs_max', + activation_quantize_type='abs_max', + weight_pre_layer=None, + act_pre_layer=None, + weight_quant_layer=None, + act_quant_layer=None, + ): super(QuantizedConv2D, self).__init__() # For Conv2D self._groups = getattr(layer, '_groups') @@ -449,7 +539,8 @@ class QuantizedConv2D(Layer): self._padding_mode = getattr(layer, '_padding_mode') if self._padding_mode != 'zeros': self._reversed_padding_repeated_twice = getattr( - layer, '_reversed_padding_repeated_twice') + layer, '_reversed_padding_repeated_twice' + ) self._dilation = getattr(layer, '_dilation') self._data_format = getattr(layer, '_data_format') self.weight = getattr(layer, 'weight') @@ -468,7 +559,8 @@ class QuantizedConv2D(Layer): dtype=self._dtype, quant_on_weight=True, channel_num=self.weight.shape[self._conv2d_quant_axis], - quant_axis=self._conv2d_quant_axis) + quant_axis=self._conv2d_quant_axis, + ) if act_quant_layer is not None: self._fake_quant_input = act_quant_layer() else: @@ -478,12 +570,15 @@ class QuantizedConv2D(Layer): moving_rate=moving_rate, quant_bits=activation_bits, dtype=self._dtype, - quant_on_weight=False) + quant_on_weight=False, + ) - self._act_preprocess = act_pre_layer( - ) if act_pre_layer is not None else None - self._weight_preprocess = weight_pre_layer( - ) if weight_pre_layer is not None else None + self._act_preprocess = ( + act_pre_layer() if act_pre_layer is not None else None + ) + self._weight_preprocess = ( + weight_pre_layer() if weight_pre_layer is not None else None + ) def forward(self, input): if self._act_preprocess is not None: @@ -496,20 +591,24 @@ class QuantizedConv2D(Layer): quant_weight = self._fake_quant_weight(weight) if self._padding_mode != 'zeros': - quant_input = F.pad(quant_input, - self._reversed_padding_repeated_twice, - mode=self._padding_mode, - data_format=self._data_format) + quant_input = F.pad( + quant_input, + self._reversed_padding_repeated_twice, + mode=self._padding_mode, + data_format=self._data_format, + ) self._padding = 0 - return F.conv2d(quant_input, - quant_weight, - bias=self.bias, - padding=self._padding, - stride=self._stride, - dilation=self._dilation, - groups=self._groups, - data_format=self._data_format) + return F.conv2d( + quant_input, + quant_weight, + bias=self.bias, + padding=self._padding, + stride=self._stride, + dilation=self._dilation, + groups=self._groups, + data_format=self._data_format, + ) class QuantizedConv2DTranspose(Layer): @@ -533,17 +632,19 @@ class QuantizedConv2DTranspose(Layer): # (2, 6, 10, 10), (2, 6, 10, 10) """ - def __init__(self, - layer, - weight_bits=8, - activation_bits=8, - moving_rate=0.9, - weight_quantize_type='abs_max', - activation_quantize_type='abs_max', - weight_pre_layer=None, - act_pre_layer=None, - weight_quant_layer=None, - act_quant_layer=None): + def __init__( + self, + layer, + weight_bits=8, + activation_bits=8, + moving_rate=0.9, + weight_quantize_type='abs_max', + activation_quantize_type='abs_max', + weight_pre_layer=None, + act_pre_layer=None, + weight_quant_layer=None, + act_quant_layer=None, + ): r""" Constructor. @@ -572,8 +673,10 @@ class QuantizedConv2DTranspose(Layer): dtype=self._dtype, quant_on_weight=True, channel_num=self.weight.shape[ - self._conv2d_transpose_quant_axis], - quant_axis=self._conv2d_transpose_quant_axis) + self._conv2d_transpose_quant_axis + ], + quant_axis=self._conv2d_transpose_quant_axis, + ) if act_quant_layer is not None: self._fake_quant_input = act_quant_layer() else: @@ -583,12 +686,15 @@ class QuantizedConv2DTranspose(Layer): moving_rate=moving_rate, quant_bits=activation_bits, dtype=self._dtype, - quant_on_weight=False) + quant_on_weight=False, + ) - self._act_preprocess = act_pre_layer( - ) if act_pre_layer is not None else None - self._weight_preprocess = weight_pre_layer( - ) if weight_pre_layer is not None else None + self._act_preprocess = ( + act_pre_layer() if act_pre_layer is not None else None + ) + self._weight_preprocess = ( + weight_pre_layer() if weight_pre_layer is not None else None + ) def forward(self, input, output_size=None): if self._act_preprocess is not None: @@ -605,16 +711,18 @@ class QuantizedConv2DTranspose(Layer): else: output_padding = 0 - return F.conv2d_transpose(quant_input, - quant_weight, - bias=self.bias, - padding=self._padding, - output_padding=output_padding, - stride=self._stride, - dilation=self._dilation, - groups=self._groups, - output_size=output_size, - data_format=self._data_format) + return F.conv2d_transpose( + quant_input, + quant_weight, + bias=self.bias, + padding=self._padding, + output_padding=output_padding, + stride=self._stride, + dilation=self._dilation, + groups=self._groups, + output_size=output_size, + data_format=self._data_format, + ) class QuantizedLinear(Layer): @@ -623,17 +731,19 @@ class QuantizedLinear(Layer): The only difference is that its inputs are all fake quantized. """ - def __init__(self, - layer, - weight_bits=8, - activation_bits=8, - moving_rate=0.9, - weight_quantize_type='abs_max', - activation_quantize_type='abs_max', - weight_pre_layer=None, - act_pre_layer=None, - weight_quant_layer=None, - act_quant_layer=None): + def __init__( + self, + layer, + weight_bits=8, + activation_bits=8, + moving_rate=0.9, + weight_quantize_type='abs_max', + activation_quantize_type='abs_max', + weight_pre_layer=None, + act_pre_layer=None, + weight_quant_layer=None, + act_quant_layer=None, + ): super(QuantizedLinear, self).__init__() # For Linear self.weight = getattr(layer, 'weight') @@ -654,7 +764,8 @@ class QuantizedLinear(Layer): quant_on_weight=True, channel_num=self.weight.shape[self._linear_quant_axis], quant_axis=self._linear_quant_axis, - quant_linear=True) + quant_linear=True, + ) if act_quant_layer is not None: self._fake_quant_input = act_quant_layer() @@ -665,12 +776,15 @@ class QuantizedLinear(Layer): moving_rate=moving_rate, quant_bits=activation_bits, dtype=self._dtype, - quant_on_weight=False) + quant_on_weight=False, + ) - self._act_preprocess = act_pre_layer( - ) if act_pre_layer is not None else None - self._weight_preprocess = weight_pre_layer( - ) if weight_pre_layer is not None else None + self._act_preprocess = ( + act_pre_layer() if act_pre_layer is not None else None + ) + self._weight_preprocess = ( + weight_pre_layer() if weight_pre_layer is not None else None + ) def forward(self, input): if self._act_preprocess is not None: @@ -682,32 +796,36 @@ class QuantizedLinear(Layer): weight = self._weight_preprocess(self.weight) quant_weight = self._fake_quant_weight(weight) - out = F.linear(x=quant_input, - weight=quant_weight, - bias=self.bias, - name=self.name) + out = F.linear( + x=quant_input, weight=quant_weight, bias=self.bias, name=self.name + ) return out class QuantizedColumnParallelLinear(Layer): - - def __init__(self, - layer, - weight_bits=8, - activation_bits=8, - moving_rate=0.9, - weight_quantize_type='abs_max', - activation_quantize_type='abs_max', - weight_pre_layer=None, - act_pre_layer=None, - weight_quant_layer=None, - act_quant_layer=None): + def __init__( + self, + layer, + weight_bits=8, + activation_bits=8, + moving_rate=0.9, + weight_quantize_type='abs_max', + activation_quantize_type='abs_max', + weight_pre_layer=None, + act_pre_layer=None, + weight_quant_layer=None, + act_quant_layer=None, + ): super(QuantizedColumnParallelLinear, self).__init__() ''' ''' - assert weight_quant_layer is None, "When quantizing ColumnParallelLinear, weight_quant_layer should be None." - assert act_quant_layer is None, "When quantizing ColumnParallelLinear, act_quant_layer should be None." + assert ( + weight_quant_layer is None + ), "When quantizing ColumnParallelLinear, weight_quant_layer should be None." + assert ( + act_quant_layer is None + ), "When quantizing ColumnParallelLinear, act_quant_layer should be None." self.weight = getattr(layer, 'weight') self.bias = getattr(layer, 'bias') @@ -729,7 +847,9 @@ class QuantizedColumnParallelLinear(Layer): channel_num=self.weight.shape[self._linear_quant_axis], quant_axis=self._linear_quant_axis, reduce_type='max' - if paddle.distributed.get_world_size() > 1 else None) + if paddle.distributed.get_world_size() > 1 + else None, + ) self._fake_quant_input = _get_fake_quant_type( activation_quantize_type, @@ -738,17 +858,21 @@ class QuantizedColumnParallelLinear(Layer): quant_bits=activation_bits, dtype=self._dtype, quant_on_weight=False, - reduce_type=None) + reduce_type=None, + ) - self._act_preprocess = act_pre_layer( - ) if act_pre_layer is not None else None - self._weight_preprocess = weight_pre_layer( - ) if weight_pre_layer is not None else None + self._act_preprocess = ( + act_pre_layer() if act_pre_layer is not None else None + ) + self._weight_preprocess = ( + weight_pre_layer() if weight_pre_layer is not None else None + ) def forward(self, input): if self.is_mp: input_parallel = paddle.distributed.collective._c_identity( - input, group=self.model_parallel_group) + input, group=self.model_parallel_group + ) else: input_parallel = input @@ -761,35 +885,40 @@ class QuantizedColumnParallelLinear(Layer): weight = self._weight_preprocess(self.weight) quant_weight = self._fake_quant_weight(weight) - output_parallel = F.linear(x=quant_input, - weight=quant_weight, - bias=self.bias, - name=self.name) + output_parallel = F.linear( + x=quant_input, weight=quant_weight, bias=self.bias, name=self.name + ) if self.gather_output and self.is_mp: output = paddle.distributed.collective._c_concat( - output_parallel, group=self.model_parallel_group) + output_parallel, group=self.model_parallel_group + ) else: output = output_parallel return output class QuantizedRowParallelLinear(Layer): - - def __init__(self, - layer, - weight_bits=8, - activation_bits=8, - moving_rate=0.9, - weight_quantize_type='abs_max', - activation_quantize_type='abs_max', - weight_pre_layer=None, - act_pre_layer=None, - weight_quant_layer=None, - act_quant_layer=None): + def __init__( + self, + layer, + weight_bits=8, + activation_bits=8, + moving_rate=0.9, + weight_quantize_type='abs_max', + activation_quantize_type='abs_max', + weight_pre_layer=None, + act_pre_layer=None, + weight_quant_layer=None, + act_quant_layer=None, + ): super(QuantizedRowParallelLinear, self).__init__() - assert weight_quant_layer is None, "When quantizing RowParallelLinear, weight_quant_layer cannot defined by yourself." - assert act_quant_layer is None, "When quantizing RowParallelLinear, act_quant_layer cannot defined by yourself." + assert ( + weight_quant_layer is None + ), "When quantizing RowParallelLinear, weight_quant_layer cannot defined by yourself." + assert ( + act_quant_layer is None + ), "When quantizing RowParallelLinear, act_quant_layer cannot defined by yourself." # For Linear self.weight = getattr(layer, 'weight') @@ -812,7 +941,9 @@ class QuantizedRowParallelLinear(Layer): channel_num=self.weight.shape[self._linear_quant_axis], quant_axis=self._linear_quant_axis, reduce_type='max' - if paddle.distributed.get_world_size() > 1 else None) + if paddle.distributed.get_world_size() > 1 + else None, + ) self._fake_quant_input = _get_fake_quant_type( activation_quantize_type, @@ -822,12 +953,16 @@ class QuantizedRowParallelLinear(Layer): dtype=self._dtype, quant_on_weight=False, reduce_type='max' - if paddle.distributed.get_world_size() > 1 else None) + if paddle.distributed.get_world_size() > 1 + else None, + ) - self._act_preprocess = act_pre_layer( - ) if act_pre_layer is not None else None - self._weight_preprocess = weight_pre_layer( - ) if weight_pre_layer is not None else None + self._act_preprocess = ( + act_pre_layer() if act_pre_layer is not None else None + ) + self._weight_preprocess = ( + weight_pre_layer() if weight_pre_layer is not None else None + ) def forward(self, input): if self.input_is_parallel or (not self.is_mp): @@ -835,7 +970,8 @@ class QuantizedRowParallelLinear(Layer): else: # split last dim input_parallel = paddle.distributed.collective._c_split( - input, group=self.model_parallel_group) + input, group=self.model_parallel_group + ) if self._act_preprocess is not None: input_parallel = self._act_preprocess(input_parallel) @@ -846,15 +982,16 @@ class QuantizedRowParallelLinear(Layer): weight = self._weight_preprocess(self.weight) quant_weight = self._fake_quant_weight(weight) - output_parallel = F.linear(x=quant_input, - weight=quant_weight, - name=self.name) + output_parallel = F.linear( + x=quant_input, weight=quant_weight, name=self.name + ) if self.is_mp: output_ = paddle.distributed.collective._mp_allreduce( output_parallel, group=self.model_parallel_group, use_calc_stream=True, - use_model_parallel=True) + use_model_parallel=True, + ) else: output_ = output_parallel output = output_ + self.bias if self.bias is not None else output_ @@ -867,12 +1004,14 @@ class MAOutputScaleLayer(Layer): Calculate the scale (moving average abs max) for the output of the input layer. """ - def __init__(self, - layer=None, - moving_rate=0.9, - name=None, - dtype='float32', - reduce_type=None): + def __init__( + self, + layer=None, + moving_rate=0.9, + name=None, + dtype='float32', + reduce_type=None, + ): r""" Construct """ @@ -880,14 +1019,18 @@ class MAOutputScaleLayer(Layer): self._layer = layer if name is None: name = layer.full_name() - self._ma_output_scale = \ - MovingAverageAbsMaxScale(name, moving_rate, dtype, reduce_type) + self._ma_output_scale = MovingAverageAbsMaxScale( + name, moving_rate, dtype, reduce_type + ) def forward(self, *inputs, **kwargs): out = self._layer(*inputs, **kwargs) # TODO (jc): support the ops of several outputs - if (isinstance(out, list) or isinstance(out, tuple) - or isinstance(out, dict)): + if ( + isinstance(out, list) + or isinstance(out, tuple) + or isinstance(out, dict) + ): return out else: return self._ma_output_scale(out) @@ -898,15 +1041,17 @@ class FakeQuantMAOutputScaleLayer(Layer): Add FakeQuantMovingAverageAbsMax layer to the behind of the input layer. """ - def __init__(self, - layer, - weight_bits=8, - activation_bits=8, - moving_rate=0.9, - name=None, - reduce_type=None, - *args, - **kwargs): + def __init__( + self, + layer, + weight_bits=8, + activation_bits=8, + moving_rate=0.9, + name=None, + reduce_type=None, + *args, + **kwargs + ): super(FakeQuantMAOutputScaleLayer, self).__init__() self._layer = layer @@ -917,7 +1062,8 @@ class FakeQuantMAOutputScaleLayer(Layer): quant_bits=activation_bits, dtype=self._dtype, quant_on_weight=False, - reduce_type=reduce_type) + reduce_type=reduce_type, + ) def forward(self, *inputs, **kwargs): out = self._layer(*inputs, **kwargs) @@ -933,7 +1079,7 @@ def _get_fake_quant_type(quant_type, **kwargs): "name": kwargs.get("name", None), "quant_bits": kwargs.get("quant_bits", 8), "dtype": kwargs.get("dtype", "float32"), - "reduce_type": kwargs.get("reduce_type", None) + "reduce_type": kwargs.get("reduce_type", None), } if quant_type == 'abs_max': @@ -946,7 +1092,8 @@ def _get_fake_quant_type(quant_type, **kwargs): call_args["quant_axis"] = kwargs.get("quant_axis", 0) assert call_args["channel_num"] is not None, ( "You need to input channel_num" - "when you use channel_wise_abs_max strategy.") + "when you use channel_wise_abs_max strategy." + ) elif quant_type == 'lsq_weight': call_args["all_postive"] = kwargs.get("all_postive", False) call_args["per_channel"] = False @@ -960,7 +1107,8 @@ def _get_fake_quant_type(quant_type, **kwargs): call_args["quant_linear"] = kwargs.get("quant_linear", False) assert call_args["channel_num"] is not None, ( "You need to input channel_num" - "when you use channel_wise_abs_max strategy.") + "when you use channel_wise_abs_max strategy." + ) elif quant_type == 'lsq_act': call_args["all_postive"] = kwargs.get("all_postive", False) call_args["symmetric"] = kwargs.get("symmetric", True) @@ -969,7 +1117,7 @@ def _get_fake_quant_type(quant_type, **kwargs): 'moving_average_abs_max': FakeQuantMovingAverageAbsMax, 'channel_wise_abs_max': FakeQuantChannelWiseAbsMax, 'lsq_weight': FakeQuantWeightLSQPlus, - 'lsq_act': FakeQuantActLSQPlus + 'lsq_act': FakeQuantActLSQPlus, } return fake_quant_map[quant_type](**call_args) diff --git a/python/paddle/nn/utils/__init__.py b/python/paddle/nn/utils/__init__.py index 5afdaa8d8489662e25e1eeb16bfe18f3ef8c1ac9..23e1e233cc0dcd9d064ac1f6fa0211c2d6961648 100644 --- a/python/paddle/nn/utils/__init__.py +++ b/python/paddle/nn/utils/__init__.py @@ -14,9 +14,16 @@ from .spectral_norm_hook import spectral_norm from .weight_norm_hook import weight_norm, remove_weight_norm # noqa: F401 -from .transform_parameters import parameters_to_vector, vector_to_parameters, _stride_column # noqa: F401 +from .transform_parameters import ( + parameters_to_vector, + vector_to_parameters, + _stride_column, +) # noqa: F401 -__all__ = [ #noqa - 'weight_norm', 'remove_weight_norm', 'spectral_norm', - 'parameters_to_vector', 'vector_to_parameters' +__all__ = [ # noqa + 'weight_norm', + 'remove_weight_norm', + 'spectral_norm', + 'parameters_to_vector', + 'vector_to_parameters', ] diff --git a/python/paddle/nn/utils/spectral_norm_hook.py b/python/paddle/nn/utils/spectral_norm_hook.py index a2a93d3241e3de4709dfcd91776d1188f93d6a16..288e5ea59c91a8f841d39f24e2f10600365202cb 100644 --- a/python/paddle/nn/utils/spectral_norm_hook.py +++ b/python/paddle/nn/utils/spectral_norm_hook.py @@ -20,21 +20,21 @@ from .. import functional as F __all__ = [] -def normal_(x, mean=0., std=1.): +def normal_(x, mean=0.0, std=1.0): temp_value = paddle.normal(mean, std, shape=x.shape) x.set_value(temp_value) return x class SpectralNorm(object): - def __init__(self, name='weight', n_power_iterations=1, dim=0, eps=1e-12): self.name = name self.dim = dim if n_power_iterations <= 0: raise ValueError( 'Expected n_power_iterations to be positive, but ' - 'got n_power_iterations={}'.format(n_power_iterations)) + 'got n_power_iterations={}'.format(n_power_iterations) + ) self.n_power_iterations = n_power_iterations self.eps = eps @@ -43,8 +43,9 @@ class SpectralNorm(object): if self.dim != 0: # transpose dim to front weight_mat = weight_mat.transpose( - [self.dim] + - [d for d in range(weight_mat.dim()) if d != self.dim]) + [self.dim] + + [d for d in range(weight_mat.dim()) if d != self.dim] + ) height = weight_mat.shape[0] @@ -61,20 +62,24 @@ class SpectralNorm(object): for _ in range(self.n_power_iterations): v.set_value( F.normalize( - paddle.matmul(weight_mat, - u, - transpose_x=True, - transpose_y=False), + paddle.matmul( + weight_mat, + u, + transpose_x=True, + transpose_y=False, + ), axis=0, epsilon=self.eps, - )) + ) + ) u.set_value( F.normalize( paddle.matmul(weight_mat, v), axis=0, epsilon=self.eps, - )) + ) + ) if self.n_power_iterations > 0: u = u.clone() v = v.clone() @@ -84,15 +89,20 @@ class SpectralNorm(object): return weight def __call__(self, layer, inputs): - setattr(layer, self.name, - self.compute_weight(layer, do_power_iteration=layer.training)) + setattr( + layer, + self.name, + self.compute_weight(layer, do_power_iteration=layer.training), + ) @staticmethod def apply(layer, name, n_power_iterations, dim, eps): for k, hook in layer._forward_pre_hooks.items(): if isinstance(hook, SpectralNorm) and hook.name == name: - raise RuntimeError("Cannot register two spectral_norm hooks on " - "the same parameter {}".format(name)) + raise RuntimeError( + "Cannot register two spectral_norm hooks on " + "the same parameter {}".format(name) + ) fn = SpectralNorm(name, n_power_iterations, dim, eps) weight = layer._parameters[name] @@ -103,9 +113,9 @@ class SpectralNorm(object): # randomly initialize u and v u = layer.create_parameter([h]) - u = normal_(u, 0., 1.) + u = normal_(u, 0.0, 1.0) v = layer.create_parameter([w]) - v = normal_(v, 0., 1.) + v = normal_(v, 0.0, 1.0) u = F.normalize(u, axis=0, epsilon=fn.eps) v = F.normalize(v, axis=0, epsilon=fn.eps) @@ -124,11 +134,9 @@ class SpectralNorm(object): return fn -def spectral_norm(layer, - name='weight', - n_power_iterations=1, - eps=1e-12, - dim=None): +def spectral_norm( + layer, name='weight', n_power_iterations=1, eps=1e-12, dim=None +): r""" Applies spectral normalization to a parameter according to the following Calculation: @@ -198,8 +206,8 @@ def spectral_norm(layer, if dim is None: if isinstance( - layer, - (Conv1DTranspose, Conv2DTranspose, Conv3DTranspose, Linear)): + layer, (Conv1DTranspose, Conv2DTranspose, Conv3DTranspose, Linear) + ): dim = 1 else: dim = 0 diff --git a/python/paddle/nn/utils/transform_parameters.py b/python/paddle/nn/utils/transform_parameters.py index 63ddd0974446abc2a01f860699d6eb6f576b0f8e..4076b808dda7449286bbbf9082d086a17605bbf9 100644 --- a/python/paddle/nn/utils/transform_parameters.py +++ b/python/paddle/nn/utils/transform_parameters.py @@ -15,11 +15,16 @@ from functools import reduce import paddle -from paddle.fluid.framework import dygraph_only, _dygraph_tracer, _varbase_creator, in_dygraph_mode +from paddle.fluid.framework import ( + dygraph_only, + _dygraph_tracer, + _varbase_creator, + in_dygraph_mode, +) from paddle import _C_ops -#input==output, inplace strategy of reshape has no cost almostly +# input==output, inplace strategy of reshape has no cost almostly def _inplace_reshape_dygraph(x, shape): x_shape = _varbase_creator(dtype='int64') if in_dygraph_mode(): @@ -27,14 +32,13 @@ def _inplace_reshape_dygraph(x, shape): tmp_out = _C_ops.reshape(x, shape) tmp_out._share_underline_tensor_to(x) else: - _dygraph_tracer().trace_op(type="reshape2", - inputs={'X': x}, - outputs={ - 'Out': x, - 'XShape': x_shape - }, - attrs={'shape': shape}, - stop_gradient=True) + _dygraph_tracer().trace_op( + type="reshape2", + inputs={'X': x}, + outputs={'Out': x, 'XShape': x_shape}, + attrs={'shape': shape}, + stop_gradient=True, + ) @dygraph_only @@ -106,11 +110,13 @@ def parameters_to_vector(parameters, name=None): tmp = _C_ops.concat(parameters, 0) tmp._share_underline_tensor_to(out) else: - _dygraph_tracer().trace_op(type='concat', - inputs={'X': parameters}, - outputs={'Out': [out]}, - attrs={'axis': 0}, - stop_gradient=True) + _dygraph_tracer().trace_op( + type='concat', + inputs={'X': parameters}, + outputs={'Out': [out]}, + attrs={'axis': 0}, + stop_gradient=True, + ) for i, param in enumerate(parameters): _inplace_reshape_dygraph(param, origin_shapes[i]) return out @@ -161,14 +167,13 @@ def vector_to_parameters(vec, parameters, name=None): for i in range(0, len(parameters)): res[i]._share_underline_tensor_to(parameters[i]) else: - _dygraph_tracer().trace_op(type='split', - inputs={'X': [vec]}, - outputs={'Out': parameters}, - attrs={ - 'axis': 0, - 'sections': sections - }, - stop_gradient=True) + _dygraph_tracer().trace_op( + type='split', + inputs={'X': [vec]}, + outputs={'Out': parameters}, + attrs={'axis': 0, 'sections': sections}, + stop_gradient=True, + ) for i, param in enumerate(parameters): _inplace_reshape_dygraph(param, origin_shapes[i]) diff --git a/python/paddle/nn/utils/weight_norm_hook.py b/python/paddle/nn/utils/weight_norm_hook.py index b836ad2843907003ddca5f33297f7a765b3bb4b8..cdcb97aa9a0dc25b6b15e9c46463ffccf63e6e4d 100755 --- a/python/paddle/nn/utils/weight_norm_hook.py +++ b/python/paddle/nn/utils/weight_norm_hook.py @@ -34,16 +34,15 @@ def l2_norm(x, axis, epsilon=1e-12, name=None): helper = LayerHelper("l2_normalize", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) norm = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type="norm", - inputs={"X": x}, - outputs={ - "Out": out, - "Norm": norm - }, - attrs={ - "axis": 1 if axis is None else axis, - "epsilon": epsilon, - }) + helper.append_op( + type="norm", + inputs={"X": x}, + outputs={"Out": out, "Norm": norm}, + attrs={ + "axis": 1 if axis is None else axis, + "epsilon": epsilon, + }, + ) return paddle.squeeze(norm, axis=[axis]) @@ -90,14 +89,13 @@ def _weight_norm(v, g, dim): v_normalized = F.l2_normalize(p_matrix, axis=1) v_normalized = paddle.reshape(v_normalized, transposed_shape) v_normalized = paddle.transpose(v_normalized, perm) - weight = F.elementwise_mul(v_normalized, - g, - axis=dim if dim is not None else -1) + weight = F.elementwise_mul( + v_normalized, g, axis=dim if dim is not None else -1 + ) return weight class WeightNorm(object): - def __init__(self, name, dim): if dim is None: dim = -1 @@ -113,8 +111,10 @@ class WeightNorm(object): def apply(layer, name, dim): for k, hook in layer._forward_pre_hooks.items(): if isinstance(hook, WeightNorm) and hook.name == name: - raise RuntimeError("Cannot register two weight_norm hooks on " - "the same parameter {}".format(name)) + raise RuntimeError( + "Cannot register two weight_norm hooks on " + "the same parameter {}".format(name) + ) if dim is None: dim = -1 diff --git a/python/paddle/onnx/export.py b/python/paddle/onnx/export.py index 5cdf0b06eece8bd8dce17a80f32d22a859dda7ac..7123f485bf8889781e287331d0ac1ef073a5ebee 100644 --- a/python/paddle/onnx/export.py +++ b/python/paddle/onnx/export.py @@ -94,11 +94,14 @@ def export(layer, path, input_spec=None, opset_version=9, **configs): raise ValueError( "The input path MUST be format of dirname/file_prefix " "[dirname\\file_prefix in Windows system], but " - "the file_prefix is empty in received path: {}".format(path)) + "the file_prefix is empty in received path: {}".format(path) + ) save_file = path + '.onnx' - p2o.dygraph2onnx(layer, - save_file, - input_spec=input_spec, - opset_version=opset_version, - **configs) + p2o.dygraph2onnx( + layer, + save_file, + input_spec=input_spec, + opset_version=opset_version, + **configs + ) diff --git a/python/paddle/optimizer/__init__.py b/python/paddle/optimizer/__init__.py index cd75fd4906ea51cbdf18d534b750b7eddad49425..cef51897b20ab87211a89d0b86635d7e185ad4b7 100644 --- a/python/paddle/optimizer/__init__.py +++ b/python/paddle/optimizer/__init__.py @@ -24,7 +24,15 @@ from .momentum import Momentum # noqa: F401 from .lamb import Lamb # noqa: F401 from . import lr # noqa: F401 -__all__ = [ #noqa - 'Optimizer', 'Adagrad', 'Adam', 'AdamW', 'Adamax', 'RMSProp', 'Adadelta', - 'SGD', 'Momentum', 'Lamb' +__all__ = [ # noqa + 'Optimizer', + 'Adagrad', + 'Adam', + 'AdamW', + 'Adamax', + 'RMSProp', + 'Adadelta', + 'SGD', + 'Momentum', + 'Lamb', ] diff --git a/python/paddle/optimizer/adadelta.py b/python/paddle/optimizer/adadelta.py index 203ad8f617c3dfd193dbbdfa7e95baa67e528e97..9587f4dd8ee430fc37e60e4ff354ace094e3654c 100644 --- a/python/paddle/optimizer/adadelta.py +++ b/python/paddle/optimizer/adadelta.py @@ -107,25 +107,29 @@ class Adadelta(Optimizer): _avg_squared_grad_acc_str = "_avg_squared_grad" _avg_squared_update_acc_str = "_avg_squared_update" - def __init__(self, - learning_rate=0.001, - epsilon=1.0e-6, - rho=0.95, - parameters=None, - weight_decay=None, - grad_clip=None, - name=None): + def __init__( + self, + learning_rate=0.001, + epsilon=1.0e-6, + rho=0.95, + parameters=None, + weight_decay=None, + grad_clip=None, + name=None, + ): if learning_rate is None: raise ValueError("learning_rate is not set.") if epsilon is None: raise ValueError("epsilon is not set.") if rho is None: raise ValueError("rho is not set.") - super(Adadelta, self).__init__(learning_rate=learning_rate, - parameters=parameters, - weight_decay=weight_decay, - grad_clip=grad_clip, - name=name) + super(Adadelta, self).__init__( + learning_rate=learning_rate, + parameters=parameters, + weight_decay=weight_decay, + grad_clip=grad_clip, + name=name, + ) self.type = "adadelta" self._epsilon = epsilon self._rho = rho @@ -149,43 +153,44 @@ class Adadelta(Optimizer): param_and_grad = self._update_param_group(param_and_grad) avg_squared_grad_acc = self._get_accumulator( - self._avg_squared_grad_acc_str, param_and_grad[0]) + self._avg_squared_grad_acc_str, param_and_grad[0] + ) avg_squared_update_acc = self._get_accumulator( - self._avg_squared_update_acc_str, param_and_grad[0]) + self._avg_squared_update_acc_str, param_and_grad[0] + ) if in_dygraph_mode(): with no_grad(): - _C_ops.adadelta_(param_and_grad[0], param_and_grad[1], - avg_squared_grad_acc, avg_squared_update_acc, - self._rho, self._epsilon) + _C_ops.adadelta_( + param_and_grad[0], + param_and_grad[1], + avg_squared_grad_acc, + avg_squared_update_acc, + self._rho, + self._epsilon, + ) return None if not isinstance(block, framework.Block): raise TypeError("block is not instance of framework.Block.") # Create the adadelta optimizer op - adadelta_op = block.append_op(type=self.type, - inputs={ - "Param": param_and_grad[0], - "Grad": param_and_grad[1], - "AvgSquaredGrad": - avg_squared_grad_acc, - "AvgSquaredUpdate": - avg_squared_update_acc - }, - outputs={ - "ParamOut": - param_and_grad[0], - "AvgSquaredGradOut": - avg_squared_grad_acc, - "AvgSquaredUpdateOut": - avg_squared_update_acc - }, - attrs={ - "epsilon": self._epsilon, - "rho": self._rho - }, - stop_gradient=True) + adadelta_op = block.append_op( + type=self.type, + inputs={ + "Param": param_and_grad[0], + "Grad": param_and_grad[1], + "AvgSquaredGrad": avg_squared_grad_acc, + "AvgSquaredUpdate": avg_squared_update_acc, + }, + outputs={ + "ParamOut": param_and_grad[0], + "AvgSquaredGradOut": avg_squared_grad_acc, + "AvgSquaredUpdateOut": avg_squared_update_acc, + }, + attrs={"epsilon": self._epsilon, "rho": self._rho}, + stop_gradient=True, + ) return adadelta_op diff --git a/python/paddle/optimizer/adagrad.py b/python/paddle/optimizer/adagrad.py index e0296a1cb0a6c9422644b4f650b366dd2f54b721..a4d9416e93bcc2ff10a444a1d7b4af37582da350 100644 --- a/python/paddle/optimizer/adagrad.py +++ b/python/paddle/optimizer/adagrad.py @@ -106,21 +106,25 @@ class Adagrad(Optimizer): """ _moment_acc_str = "moment" - def __init__(self, - learning_rate, - epsilon=1.0e-6, - parameters=None, - weight_decay=None, - grad_clip=None, - name=None, - initial_accumulator_value=0.0): + def __init__( + self, + learning_rate, + epsilon=1.0e-6, + parameters=None, + weight_decay=None, + grad_clip=None, + name=None, + initial_accumulator_value=0.0, + ): assert learning_rate is not None assert epsilon is not None - super(Adagrad, self).__init__(learning_rate=learning_rate, - parameters=parameters, - weight_decay=weight_decay, - grad_clip=grad_clip, - name=name) + super(Adagrad, self).__init__( + learning_rate=learning_rate, + parameters=parameters, + weight_decay=weight_decay, + grad_clip=grad_clip, + name=name, + ) self.type = "adagrad" self._epsilon = epsilon self.initial_accumulator_value = initial_accumulator_value @@ -136,9 +140,11 @@ class Adagrad(Optimizer): parameters = self._update_param_group(parameters) for p in parameters: - self._add_accumulator(self._moment_acc_str, - p, - fill_value=self.initial_accumulator_value) + self._add_accumulator( + self._moment_acc_str, + p, + fill_value=self.initial_accumulator_value, + ) def _append_optimize_op(self, block, param_and_grad): assert isinstance(block, framework.Block) @@ -146,26 +152,22 @@ class Adagrad(Optimizer): if isinstance(param_and_grad, dict): param_and_grad = self._update_param_group(param_and_grad) - moment_acc = self._get_accumulator(self._moment_acc_str, - param_and_grad[0]) + moment_acc = self._get_accumulator( + self._moment_acc_str, param_and_grad[0] + ) # Create the adagrad optimizer op - adagrad_op = block.append_op(type=self.type, - inputs={ - "Param": - param_and_grad[0], - "Grad": - param_and_grad[1], - "Moment": - moment_acc, - "LearningRate": - self._create_param_lr(param_and_grad) - }, - outputs={ - "ParamOut": param_and_grad[0], - "MomentOut": moment_acc - }, - attrs={"epsilon": self._epsilon}, - stop_gradient=True) + adagrad_op = block.append_op( + type=self.type, + inputs={ + "Param": param_and_grad[0], + "Grad": param_and_grad[1], + "Moment": moment_acc, + "LearningRate": self._create_param_lr(param_and_grad), + }, + outputs={"ParamOut": param_and_grad[0], "MomentOut": moment_acc}, + attrs={"epsilon": self._epsilon}, + stop_gradient=True, + ) return adagrad_op @@ -173,6 +175,7 @@ class Adagrad(Optimizer): self._epsilon = parameters.get('epsilon', self._default_dict['epsilon']) self.initial_accumulator_value = parameters.get( 'initial_accumulator_value', - self._default_dict['initial_accumulator_value']) + self._default_dict['initial_accumulator_value'], + ) parameters = parameters.get('params') return parameters diff --git a/python/paddle/optimizer/adam.py b/python/paddle/optimizer/adam.py index a49af1a458988ebc50c9166ce0e7d9204475f35e..c6e03ccf64cc5ab5edc65045a1ac889bd221f405 100644 --- a/python/paddle/optimizer/adam.py +++ b/python/paddle/optimizer/adam.py @@ -161,18 +161,20 @@ class Adam(Optimizer): _beta1_pow_acc_str = "beta1_pow_acc" _beta2_pow_acc_str = "beta2_pow_acc" - def __init__(self, - learning_rate=0.001, - beta1=0.9, - beta2=0.999, - epsilon=1e-8, - parameters=None, - weight_decay=None, - grad_clip=None, - lazy_mode=False, - multi_precision=False, - use_multi_tensor=False, - name=None): + def __init__( + self, + learning_rate=0.001, + beta1=0.9, + beta2=0.999, + epsilon=1e-8, + parameters=None, + weight_decay=None, + grad_clip=None, + lazy_mode=False, + multi_precision=False, + use_multi_tensor=False, + name=None, + ): assert learning_rate is not None assert beta1 is not None assert beta2 is not None @@ -180,20 +182,25 @@ class Adam(Optimizer): if not isinstance(beta1, Variable): if not 0 <= beta1 < 1: raise ValueError( - "Invaild value of beta1, expect beta1 in [0,1).") + "Invaild value of beta1, expect beta1 in [0,1)." + ) if not isinstance(beta2, Variable): if not 0 <= beta2 < 1: raise ValueError( - "Invaild value of beta2, expect beta2 in [0,1).") + "Invaild value of beta2, expect beta2 in [0,1)." + ) if not isinstance(epsilon, Variable): if not 0 <= epsilon: raise ValueError( - "Invaild value of epsilon, expect epsilon >= 0.") - super(Adam, self).__init__(learning_rate=learning_rate, - parameters=parameters, - weight_decay=weight_decay, - grad_clip=grad_clip, - name=name) + "Invaild value of epsilon, expect epsilon >= 0." + ) + super(Adam, self).__init__( + learning_rate=learning_rate, + parameters=parameters, + weight_decay=weight_decay, + grad_clip=grad_clip, + name=name, + ) self.type = "adam" self._beta1 = beta1 self._beta2 = beta2 @@ -215,15 +222,15 @@ class Adam(Optimizer): self._moment2_dict = {'FP32_LODTensor': [], 'FP16_LODTensor': []} self._beta1_pow_acc_dict = { 'FP32_LODTensor': [], - 'FP16_LODTensor': [] + 'FP16_LODTensor': [], } self._beta2_pow_acc_dict = { 'FP32_LODTensor': [], - 'FP16_LODTensor': [] + 'FP16_LODTensor': [], } self._master_weight_dict = { 'FP32_LODTensor': None, - 'FP16_LODTensor': [] + 'FP16_LODTensor': [], } def _create_master_weight(self, param): @@ -234,19 +241,23 @@ class Adam(Optimizer): var_name = param.name + "_fp32_master" var_name = unique_name.generate(var_name) - var = layers.create_global_var(name=var_name, - shape=param.shape, - value=0, - dtype='float32', - persistable=True) + var = layers.create_global_var( + name=var_name, + shape=param.shape, + value=0, + dtype='float32', + persistable=True, + ) block = self.helper.startup_program.global_block() - block.append_op(type="cast", - inputs={"X": [param]}, - outputs={"Out": [var]}, - attrs={ - "in_dtype": param.dtype, - "out_dtype": core.VarDesc.VarType.FP32 - }) + block.append_op( + type="cast", + inputs={"X": [param]}, + outputs={"Out": [var]}, + attrs={ + "in_dtype": param.dtype, + "out_dtype": core.VarDesc.VarType.FP32, + }, + ) self._master_weights[param.name] = var return var @@ -260,20 +271,30 @@ class Adam(Optimizer): """ if self._name is not None: name = self._name + "_" + name - find_master = self._multi_precision and param.dtype == core.VarDesc.VarType.FP16 - target_param = self._master_weights[ - param.name] if find_master else param + find_master = ( + self._multi_precision and param.dtype == core.VarDesc.VarType.FP16 + ) + target_param = ( + self._master_weights[param.name] if find_master else param + ) target_name = target_param.name - if (name not in self._accumulators - or target_name not in self._accumulators[name]): + if ( + name not in self._accumulators + or target_name not in self._accumulators[name] + ): raise Exception( "Accumulator {} does not exist for parameter {}".format( - name, target_name)) + name, target_name + ) + ) return self._accumulators[name][target_name] def _add_moments_pows(self, p): acc_dtype = p.dtype - if acc_dtype == core.VarDesc.VarType.FP16 or acc_dtype == core.VarDesc.VarType.BF16: + if ( + acc_dtype == core.VarDesc.VarType.FP16 + or acc_dtype == core.VarDesc.VarType.BF16 + ): acc_dtype = core.VarDesc.VarType.FP32 self._add_accumulator(self._moment1_acc_str, p, dtype=acc_dtype) self._add_accumulator(self._moment2_acc_str, p, dtype=acc_dtype) @@ -281,18 +302,24 @@ class Adam(Optimizer): name=self._beta1_pow_acc_str, param=p, dtype=acc_dtype, - fill_value=0.9 if isinstance(self._beta1, Variable) \ - else self._beta1, + fill_value=0.9 + if isinstance(self._beta1, Variable) + else self._beta1, shape=[1], - type=core.VarDesc.VarType.LOD_TENSOR, device='cpu') + type=core.VarDesc.VarType.LOD_TENSOR, + device='cpu', + ) self._add_accumulator( name=self._beta2_pow_acc_str, param=p, dtype=acc_dtype, - fill_value=0.999 if isinstance(self._beta2, Variable) \ - else self._beta2, + fill_value=0.999 + if isinstance(self._beta2, Variable) + else self._beta2, shape=[1], - type=core.VarDesc.VarType.LOD_TENSOR, device='cpu') + type=core.VarDesc.VarType.LOD_TENSOR, + device='cpu', + ) def _create_accumulators(self, block, parameters): assert isinstance(block, framework.Block) @@ -305,7 +332,10 @@ class Adam(Optimizer): master_p = self._create_master_weight(p) self._add_moments_pows(master_p) continue - if p.dtype == core.VarDesc.VarType.FP16 and not self._multi_precision: + if ( + p.dtype == core.VarDesc.VarType.FP16 + and not self._multi_precision + ): warnings.warn( "Accumulating with FP16 in optimizer can lead to poor accuracy or slow convergence." "Consider using multi_precision=True option of the Adam optimizer." @@ -317,50 +347,105 @@ class Adam(Optimizer): if isinstance(param_and_grad, dict): param_and_grad = self._update_param_group(param_and_grad) - moment1 = self._get_accumulator(self._moment1_acc_str, - param_and_grad[0]) - moment2 = self._get_accumulator(self._moment2_acc_str, - param_and_grad[0]) - beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str, - param_and_grad[0]) - beta2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str, - param_and_grad[0]) - find_master = self._multi_precision and param_and_grad[ - 0].dtype == core.VarDesc.VarType.FP16 - master_weight = (self._master_weights[param_and_grad[0].name] - if find_master else None) + moment1 = self._get_accumulator( + self._moment1_acc_str, param_and_grad[0] + ) + moment2 = self._get_accumulator( + self._moment2_acc_str, param_and_grad[0] + ) + beta1_pow_acc = self._get_accumulator( + self._beta1_pow_acc_str, param_and_grad[0] + ) + beta2_pow_acc = self._get_accumulator( + self._beta2_pow_acc_str, param_and_grad[0] + ) + find_master = ( + self._multi_precision + and param_and_grad[0].dtype == core.VarDesc.VarType.FP16 + ) + master_weight = ( + self._master_weights[param_and_grad[0].name] + if find_master + else None + ) lr = self._create_param_lr(param_and_grad) # create the adam optimize op if framework.in_dygraph_mode(): found_inf = self._get_auxiliary_var('found_inf') - _beta1 = self._beta1 if not isinstance( - self._beta1, Variable) else self._beta1.numpy().item(0) - _beta2 = self._beta2 if not isinstance( - self._beta2, Variable) else self._beta2.numpy().item(0) + _beta1 = ( + self._beta1 + if not isinstance(self._beta1, Variable) + else self._beta1.numpy().item(0) + ) + _beta2 = ( + self._beta2 + if not isinstance(self._beta2, Variable) + else self._beta2.numpy().item(0) + ) _, _, _, _, _, _ = _C_ops.adam_( - param_and_grad[0], param_and_grad[1], lr, moment1, moment2, - beta1_pow_acc, beta2_pow_acc, master_weight, found_inf, _beta1, - _beta2, self._epsilon, self._lazy_mode, 1000, find_master, - False) + param_and_grad[0], + param_and_grad[1], + lr, + moment1, + moment2, + beta1_pow_acc, + beta2_pow_acc, + master_weight, + found_inf, + _beta1, + _beta2, + self._epsilon, + self._lazy_mode, + 1000, + find_master, + False, + ) return None if framework._in_legacy_dygraph(): - _beta1 = self._beta1 if not isinstance( - self._beta1, Variable) else self._beta1.numpy().item(0) - _beta2 = self._beta2 if not isinstance( - self._beta2, Variable) else self._beta2.numpy().item(0) + _beta1 = ( + self._beta1 + if not isinstance(self._beta1, Variable) + else self._beta1.numpy().item(0) + ) + _beta2 = ( + self._beta2 + if not isinstance(self._beta2, Variable) + else self._beta2.numpy().item(0) + ) _, _, _, _, _, _ = _legacy_C_ops.adam( - param_and_grad[0], param_and_grad[1], lr, moment1, moment2, - beta1_pow_acc, beta2_pow_acc, master_weight, param_and_grad[0], - moment1, moment2, beta1_pow_acc, beta2_pow_acc, master_weight, - 'epsilon', self._epsilon, 'lazy_mode', self._lazy_mode, - 'min_row_size_to_use_multithread', 1000, 'beta1', _beta1, - 'beta2', _beta2, 'multi_precision', find_master) + param_and_grad[0], + param_and_grad[1], + lr, + moment1, + moment2, + beta1_pow_acc, + beta2_pow_acc, + master_weight, + param_and_grad[0], + moment1, + moment2, + beta1_pow_acc, + beta2_pow_acc, + master_weight, + 'epsilon', + self._epsilon, + 'lazy_mode', + self._lazy_mode, + 'min_row_size_to_use_multithread', + 1000, + 'beta1', + _beta1, + 'beta2', + _beta2, + 'multi_precision', + find_master, + ) return None @@ -371,7 +456,7 @@ class Adam(Optimizer): "Moment1": [moment1], "Moment2": [moment2], "Beta1Pow": [beta1_pow_acc], - "Beta2Pow": [beta2_pow_acc] + "Beta2Pow": [beta2_pow_acc], } outputs = { "ParamOut": [param_and_grad[0]], @@ -383,7 +468,7 @@ class Adam(Optimizer): attrs = { "lazy_mode": self._lazy_mode, "min_row_size_to_use_multithread": 1000, - "multi_precision": find_master + "multi_precision": find_master, } if isinstance(self._beta1, Variable): @@ -403,11 +488,13 @@ class Adam(Optimizer): inputs["MasterParam"] = master_weight outputs["MasterParamOut"] = master_weight - adam_op = block.append_op(type=self.type, - inputs=inputs, - outputs=outputs, - attrs=attrs, - stop_gradient=True) + adam_op = block.append_op( + type=self.type, + inputs=inputs, + outputs=outputs, + attrs=attrs, + stop_gradient=True, + ) return adam_op @@ -443,24 +530,28 @@ class Adam(Optimizer): if param._grad_ivar() is not None: grad_var = param._grad_ivar() if in_dygraph_mode(): - if hasattr(grad_var, "is_selected_rows" - ) and grad_var.is_selected_rows( - ) and self.regularization is not None: + if ( + hasattr(grad_var, "is_selected_rows") + and grad_var.is_selected_rows() + and self.regularization is not None + ): raise RuntimeError( "Adam don't support weight_decay with sparse parameters, please set it to None." ) else: - if hasattr( - grad_var, "_is_sparse") and grad_var._is_sparse( - ) and self.regularization is not None: + if ( + hasattr(grad_var, "_is_sparse") + and grad_var._is_sparse() + and self.regularization is not None + ): raise RuntimeError( "Adam don't support weight_decay with sparse parameters, please set it to None." ) params_grads.append((param, grad_var)) - optimize_ops = self._apply_optimize(loss=None, - startup_program=None, - params_grads=params_grads) + optimize_ops = self._apply_optimize( + loss=None, startup_program=None, params_grads=params_grads + ) else: # optimize parameters in groups for param_group in self._param_groups: @@ -472,11 +563,11 @@ class Adam(Optimizer): grad_var = param._grad_ivar() params_grads['params'].append((param, grad_var)) params_grads.update( - {k: v - for k, v in param_group.items() if k != 'params'}) - self._apply_optimize(loss=None, - startup_program=None, - params_grads=params_grads) + {k: v for k, v in param_group.items() if k != 'params'} + ) + self._apply_optimize( + loss=None, startup_program=None, params_grads=params_grads + ) def _multi_tensor_init(self, target_block, parameters): """ @@ -490,10 +581,12 @@ class Adam(Optimizer): for param in parameters: moment1 = self._get_accumulator(self._moment1_acc_str, param) moment2 = self._get_accumulator(self._moment2_acc_str, param) - beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str, - param) - beta2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str, - param) + beta1_pow_acc = self._get_accumulator( + self._beta1_pow_acc_str, param + ) + beta2_pow_acc = self._get_accumulator( + self._beta2_pow_acc_str, param + ) if param.dtype == paddle.float32: self._param_dict['FP32_LODTensor'].append(param) @@ -509,7 +602,8 @@ class Adam(Optimizer): self._beta2_pow_acc_dict['FP16_LODTensor'].append(beta2_pow_acc) if self._multi_precision: self._master_weight_dict['FP16_LODTensor'].append( - self._master_weights[param.name]) + self._master_weights[param.name] + ) else: self._master_weight_dict['FP16_LODTensor'] = None else: @@ -517,8 +611,9 @@ class Adam(Optimizer): "Now multi_tensor_momentum only support fp32 and fp16 parameters and grad is LOD_TENSOR." ) - def _append_optimize_multi_tensor_op(self, target_block, - parameters_and_grads): + def _append_optimize_multi_tensor_op( + self, target_block, parameters_and_grads + ): """ For Multi Tensor, append optimize merged_operator to block. """ @@ -532,15 +627,19 @@ class Adam(Optimizer): if param_and_grad[1] is None: continue if param_and_grad[0].stop_gradient is False: - if param_and_grad[ - 0].dtype == paddle.float32 and param_and_grad[ - 1].type == core.VarDesc.VarType.LOD_TENSOR: + if ( + param_and_grad[0].dtype == paddle.float32 + and param_and_grad[1].type + == core.VarDesc.VarType.LOD_TENSOR + ): grad_dict['FP32_LODTensor'].append(param_and_grad[1]) lr = self._create_param_lr(param_and_grad) lr_dict['FP32_LODTensor'].append(lr) - elif param_and_grad[ - 0].dtype == paddle.float16 and param_and_grad[ - 1].type == core.VarDesc.VarType.LOD_TENSOR: + elif ( + param_and_grad[0].dtype == paddle.float16 + and param_and_grad[1].type + == core.VarDesc.VarType.LOD_TENSOR + ): grad_dict['FP16_LODTensor'].append(param_and_grad[1]) lr = self._create_param_lr(param_and_grad) lr_dict['FP16_LODTensor'].append(lr) @@ -551,21 +650,27 @@ class Adam(Optimizer): if param_and_grad[0].stop_gradient is False: param_grad_dict = dict() param_grad_dict['params'] = param_and_grad - param_grad_dict.update({ - k: v - for k, v in parameters_and_grads.items() - if k != 'params' - }) + param_grad_dict.update( + { + k: v + for k, v in parameters_and_grads.items() + if k != 'params' + } + ) param_and_grad = self._update_param_group(param_grad_dict) - if param_and_grad[ - 0].dtype == paddle.float32 and param_and_grad[ - 1].type == core.VarDesc.VarType.LOD_TENSOR: + if ( + param_and_grad[0].dtype == paddle.float32 + and param_and_grad[1].type + == core.VarDesc.VarType.LOD_TENSOR + ): grad_dict['FP32_LODTensor'].append(param_and_grad[1]) lr = self._create_param_lr(param_and_grad) lr_dict['FP32_LODTensor'].append(lr) - elif param_and_grad[ - 0].dtype == paddle.float16 and param_and_grad[ - 1].type == core.VarDesc.VarType.LOD_TENSOR: + elif ( + param_and_grad[0].dtype == paddle.float16 + and param_and_grad[1].type + == core.VarDesc.VarType.LOD_TENSOR + ): grad_dict['FP16_LODTensor'].append(param_and_grad[1]) lr = self._create_param_lr(param_and_grad) lr_dict['FP16_LODTensor'].append(lr) @@ -575,34 +680,59 @@ class Adam(Optimizer): if len(self._param_dict[key]) > 0: find_master = self._multi_precision and key == 'FP16_LODTensor' - _beta1 = self._beta1 if not isinstance( - self._beta1, Variable) else self._beta1.numpy().item(0) - _beta2 = self._beta2 if not isinstance( - self._beta2, Variable) else self._beta2.numpy().item(0) + _beta1 = ( + self._beta1 + if not isinstance(self._beta1, Variable) + else self._beta1.numpy().item(0) + ) + _beta2 = ( + self._beta2 + if not isinstance(self._beta2, Variable) + else self._beta2.numpy().item(0) + ) if framework._non_static_mode(): if in_dygraph_mode(): _, _, _, _, _, _ = _C_ops.merged_adam_( - self._param_dict[key], grad_dict[key], lr_dict[key], - self._moment1_dict[key], self._moment2_dict[key], + self._param_dict[key], + grad_dict[key], + lr_dict[key], + self._moment1_dict[key], + self._moment2_dict[key], self._beta1_pow_acc_dict[key], self._beta2_pow_acc_dict[key], - self._master_weight_dict[key], _beta1, _beta2, - self._epsilon, find_master, False) + self._master_weight_dict[key], + _beta1, + _beta2, + self._epsilon, + find_master, + False, + ) else: _, _, _, _, _, _ = _legacy_C_ops.merged_adam( - self._param_dict[key], grad_dict[key], lr_dict[key], - self._moment1_dict[key], self._moment2_dict[key], + self._param_dict[key], + grad_dict[key], + lr_dict[key], + self._moment1_dict[key], + self._moment2_dict[key], self._beta1_pow_acc_dict[key], self._beta2_pow_acc_dict[key], self._master_weight_dict[key], - self._param_dict[key], self._moment1_dict[key], + self._param_dict[key], + self._moment1_dict[key], self._moment2_dict[key], self._beta1_pow_acc_dict[key], self._beta2_pow_acc_dict[key], - self._master_weight_dict[key], 'epsilon', - self._epsilon, 'beta1', _beta1, 'beta2', _beta2, - 'multi_precision', find_master) + self._master_weight_dict[key], + 'epsilon', + self._epsilon, + 'beta1', + _beta1, + 'beta2', + _beta2, + 'multi_precision', + find_master, + ) else: inputs = { "Param": self._param_dict[key], @@ -611,37 +741,41 @@ class Adam(Optimizer): "Moment1": self._moment1_dict[key], "Moment2": self._moment2_dict[key], "Beta1Pow": self._beta1_pow_acc_dict[key], - "Beta2Pow": self._beta2_pow_acc_dict[key] + "Beta2Pow": self._beta2_pow_acc_dict[key], } outputs = { "ParamOut": self._param_dict[key], "Moment1Out": self._moment1_dict[key], "Moment2Out": self._moment2_dict[key], "Beta1PowOut": self._beta1_pow_acc_dict[key], - "Beta2PowOut": self._beta2_pow_acc_dict[key] + "Beta2PowOut": self._beta2_pow_acc_dict[key], } attrs = { "epsilon": self._epsilon, "beta1": _beta1, - "beta2": _beta2 + "beta2": _beta2, } if find_master: inputs["MasterParam"] = self._master_weight_dict[key] outputs["MasterParamOut"] = self._master_weight_dict[ - key] + key + ] attrs["multi_precision"] = find_master - target_block.append_op(type="merged_adam", - inputs=inputs, - outputs=outputs, - attrs=attrs, - stop_gradient=True) + target_block.append_op( + type="merged_adam", + inputs=inputs, + outputs=outputs, + attrs=attrs, + stop_gradient=True, + ) return None def _update_param_group(self, parameters): self._beta1 = parameters.get('beta1', self._default_dict['beta1']) self._beta2 = parameters.get('beta2', self._default_dict['beta2']) self._epsilon = parameters.get('epsilon', self._default_dict['epsilon']) - self._lazy_mode = parameters.get('lazy_mode', - self._default_dict['lazy_mode']) + self._lazy_mode = parameters.get( + 'lazy_mode', self._default_dict['lazy_mode'] + ) parameters = parameters.get('params') return parameters diff --git a/python/paddle/optimizer/adamax.py b/python/paddle/optimizer/adamax.py index 81ae8531f0e13239e972586af390d4541fd221c6..0a809e6acf8db6c6900fb6be57c05bbaed6eb566 100644 --- a/python/paddle/optimizer/adamax.py +++ b/python/paddle/optimizer/adamax.py @@ -132,15 +132,17 @@ class Adamax(Optimizer): _inf_norm_acc_str = "inf_norm" _beta1_pow_acc_str = "beta1_pow_acc" - def __init__(self, - learning_rate=0.001, - beta1=0.9, - beta2=0.999, - epsilon=1e-8, - parameters=None, - weight_decay=None, - grad_clip=None, - name=None): + def __init__( + self, + learning_rate=0.001, + beta1=0.9, + beta2=0.999, + epsilon=1e-8, + parameters=None, + weight_decay=None, + grad_clip=None, + name=None, + ): assert learning_rate is not None assert beta1 is not None assert beta2 is not None @@ -151,11 +153,13 @@ class Adamax(Optimizer): raise ValueError("Invaild value of beta2, expect beta2 in [0,1).") if not 0 <= epsilon: raise ValueError("Invaild value of epsilon, expect epsilon >= 0.") - super(Adamax, self).__init__(learning_rate=learning_rate, - parameters=parameters, - weight_decay=weight_decay, - grad_clip=grad_clip, - name=name) + super(Adamax, self).__init__( + learning_rate=learning_rate, + parameters=parameters, + weight_decay=weight_decay, + grad_clip=grad_clip, + name=name, + ) self.type = "adamax" self._beta1 = beta1 self._beta2 = beta2 @@ -163,7 +167,7 @@ class Adamax(Optimizer): self._default_dict = { 'beta1': beta1, 'beta2': beta2, - 'epsilon': epsilon + 'epsilon': epsilon, } def _create_accumulators(self, block, parameters): @@ -174,10 +178,12 @@ class Adamax(Optimizer): for p in parameters: self._add_accumulator(self._moment_acc_str, p) self._add_accumulator(self._inf_norm_acc_str, p) - self._add_accumulator(name=self._beta1_pow_acc_str, - param=p, - fill_value=self._beta1, - shape=[1]) + self._add_accumulator( + name=self._beta1_pow_acc_str, + param=p, + fill_value=self._beta1, + shape=[1], + ) def _append_optimize_op(self, block, param_and_grad): assert isinstance(block, framework.Block) @@ -185,22 +191,43 @@ class Adamax(Optimizer): param_and_grad = self._update_param_group(param_and_grad) moment = self._get_accumulator(self._moment_acc_str, param_and_grad[0]) - inf_norm = self._get_accumulator(self._inf_norm_acc_str, - param_and_grad[0]) - beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str, - param_and_grad[0]) + inf_norm = self._get_accumulator( + self._inf_norm_acc_str, param_and_grad[0] + ) + beta1_pow_acc = self._get_accumulator( + self._beta1_pow_acc_str, param_and_grad[0] + ) if framework.in_dygraph_mode(): - _C_ops.adamax_(param_and_grad[0], param_and_grad[1], - self._create_param_lr(param_and_grad), moment, - inf_norm, beta1_pow_acc, self._beta1, self._beta2, - self._epsilon) + _C_ops.adamax_( + param_and_grad[0], + param_and_grad[1], + self._create_param_lr(param_and_grad), + moment, + inf_norm, + beta1_pow_acc, + self._beta1, + self._beta2, + self._epsilon, + ) elif framework._in_legacy_dygraph(): - _legacy_C_ops.adamax(param_and_grad[0], param_and_grad[1], - self._create_param_lr(param_and_grad), moment, - inf_norm, beta1_pow_acc, param_and_grad[0], - moment, inf_norm, "beta1", self._beta1, - "beta2", self._beta2, "epsilon", self._epsilon) + _legacy_C_ops.adamax( + param_and_grad[0], + param_and_grad[1], + self._create_param_lr(param_and_grad), + moment, + inf_norm, + beta1_pow_acc, + param_and_grad[0], + moment, + inf_norm, + "beta1", + self._beta1, + "beta2", + self._beta2, + "epsilon", + self._epsilon, + ) else: # create the adamax optimize op adamax_op = block.append_op( @@ -211,25 +238,25 @@ class Adamax(Optimizer): "LearningRate": self._create_param_lr(param_and_grad), "Moment": moment, "InfNorm": inf_norm, - "Beta1Pow": beta1_pow_acc + "Beta1Pow": beta1_pow_acc, }, outputs={ "ParamOut": param_and_grad[0], "MomentOut": moment, - "InfNormOut": inf_norm + "InfNormOut": inf_norm, }, attrs={ "beta1": self._beta1, "beta2": self._beta2, - "epsilon": self._epsilon + "epsilon": self._epsilon, }, - stop_gradient=True) + stop_gradient=True, + ) return adamax_op def _finish_update(self, block, parameters_and_grads): - """Update Beta1 Power accumulator - """ + """Update Beta1 Power accumulator""" assert isinstance(block, framework.Block) if isinstance(parameters_and_grads, list): for param, grad in parameters_and_grads: @@ -237,47 +264,61 @@ class Adamax(Optimizer): continue if framework.in_dygraph_mode(): beta1_pow_acc = self._get_accumulator( - self._beta1_pow_acc_str, param) + self._beta1_pow_acc_str, param + ) with no_grad(): - tmp = _C_ops.scale(beta1_pow_acc, self._beta1, 0.0, - True) + tmp = _C_ops.scale( + beta1_pow_acc, self._beta1, 0.0, True + ) beta1_pow_acc.copy_(tmp, False) continue with param.block.program._optimized_guard( - [param, grad]), name_scope('adamax'): + [param, grad] + ), name_scope('adamax'): beta1_pow_acc = self._get_accumulator( - self._beta1_pow_acc_str, param) - block.append_op(type="scale", - inputs={"X": beta1_pow_acc}, - outputs={"Out": beta1_pow_acc}, - attrs={"scale": self._beta1}, - stop_gradient=True) + self._beta1_pow_acc_str, param + ) + block.append_op( + type="scale", + inputs={"X": beta1_pow_acc}, + outputs={"Out": beta1_pow_acc}, + attrs={"scale": self._beta1}, + stop_gradient=True, + ) else: for param, grad in parameters_and_grads['params']: if grad is None or param.stop_gradient is True: continue if framework.in_dygraph_mode(): beta1_pow_acc = self._get_accumulator( - self._beta1_pow_acc_str, param) + self._beta1_pow_acc_str, param + ) self._beta1 = parameters_and_grads.get( - 'beta1', self._default_dict['beta1']) + 'beta1', self._default_dict['beta1'] + ) with no_grad(): - tmp = _C_ops.scale(beta1_pow_acc, self._beta1, 0.0, - True) + tmp = _C_ops.scale( + beta1_pow_acc, self._beta1, 0.0, True + ) beta1_pow_acc.copy_(tmp, False) continue with param.block.program._optimized_guard( - [param, grad]), name_scope('adamax'): + [param, grad] + ), name_scope('adamax'): beta1_pow_acc = self._get_accumulator( - self._beta1_pow_acc_str, param) + self._beta1_pow_acc_str, param + ) self._beta1 = parameters_and_grads.get( - 'beta1', self._default_dict['beta1']) - block.append_op(type="scale", - inputs={"X": beta1_pow_acc}, - outputs={"Out": beta1_pow_acc}, - attrs={"scale": self._beta1}, - stop_gradient=True) + 'beta1', self._default_dict['beta1'] + ) + block.append_op( + type="scale", + inputs={"X": beta1_pow_acc}, + outputs={"Out": beta1_pow_acc}, + attrs={"scale": self._beta1}, + stop_gradient=True, + ) def _update_param_group(self, parameters): self._beta1 = parameters.get('beta1', self._default_dict['beta1']) diff --git a/python/paddle/optimizer/adamw.py b/python/paddle/optimizer/adamw.py index 1c0dbb3134c809b299824906f265e48b34018f49..dca844b6682759394d618e3220a2dadfe6be8460 100644 --- a/python/paddle/optimizer/adamw.py +++ b/python/paddle/optimizer/adamw.py @@ -148,19 +148,21 @@ class AdamW(Optimizer): _beta1_pow_acc_str = "beta1_pow_acc" _beta2_pow_acc_str = "beta2_pow_acc" - def __init__(self, - learning_rate=0.001, - beta1=0.9, - beta2=0.999, - epsilon=1e-8, - parameters=None, - weight_decay=0.01, - lr_ratio=None, - apply_decay_param_fun=None, - grad_clip=None, - lazy_mode=False, - multi_precision=False, - name=None): + def __init__( + self, + learning_rate=0.001, + beta1=0.9, + beta2=0.999, + epsilon=1e-8, + parameters=None, + weight_decay=0.01, + lr_ratio=None, + apply_decay_param_fun=None, + grad_clip=None, + lazy_mode=False, + multi_precision=False, + name=None, + ): assert learning_rate is not None assert beta1 is not None assert beta2 is not None @@ -171,14 +173,16 @@ class AdamW(Optimizer): raise ValueError("Invaild value of beta2, expect beta2 in [0,1).") if not 0 <= epsilon: raise ValueError("Invaild value of epsilon, expect epsilon >= 0.") - if not isinstance(weight_decay, float) and \ - not isinstance(weight_decay, framework.Variable): + if not isinstance(weight_decay, float) and not isinstance( + weight_decay, framework.Variable + ): raise TypeError("weight_decay should be float or Tensor.") if lr_ratio is not None: assert isinstance(lr_ratio, Callable) if not core.is_compiled_with_cuda(): raise NotImplementedError( - "'lr_ratio' is unimplemented in CPU, XPU and NPU") + "'lr_ratio' is unimplemented in CPU, XPU and NPU" + ) if parameters is not None: # paddle.Tensor is also iterable, so here we don't check whether @@ -187,13 +191,16 @@ class AdamW(Optimizer): if isinstance(parameters, (paddle.Tensor, core.eager.Tensor)): raise TypeError( "`parameters` argument given to the optimizer should be " - "an iterable of paddle Tensors, but got argument type is `{}`." - .format(type(parameters))) + "an iterable of paddle Tensors, but got argument type is `{}`.".format( + type(parameters) + ) + ) if isinstance(parameters, dict): raise TypeError( "`parameters` argument should not get dict type, " "if parameter groups is needed, please set `parameters`" - " as list of dict") + " as list of dict" + ) self._parameter_list = list(parameters) else: self._parameter_list = None @@ -207,8 +214,9 @@ class AdamW(Optimizer): if not isinstance(learning_rate, (float, LRScheduler)): raise TypeError( - "learning rate should be float or LRScheduler, got %s here" % - type(learning_rate)) + "learning rate should be float or LRScheduler, got %s here" + % type(learning_rate) + ) if grad_clip is not None: if not isinstance(grad_clip, GradientClipBase): raise TypeError( @@ -220,8 +228,9 @@ class AdamW(Optimizer): if self._parameter_list: if isinstance(self._parameter_list[0], dict): for param_group in self._parameter_list: - assert 'params' in param_group, \ - 'params should be set in parameters if parameter groups are optimized in different options' + assert ( + 'params' in param_group + ), 'params should be set in parameters if parameter groups are optimized in different options' self._dtype = self._parameter_list[0]['params'][0].dtype else: self._dtype = self._parameter_list[0].dtype @@ -260,7 +269,7 @@ class AdamW(Optimizer): 'beta2': beta2, 'epsilon': epsilon, 'lazy_mode': lazy_mode, - 'grad_clip': grad_clip + 'grad_clip': grad_clip, } self._param_groups = [] @@ -297,7 +306,8 @@ class AdamW(Optimizer): elif isinstance(params, set): raise TypeError( "optimizer parameters should be in ordered collections," - "but received set, please use list instead.") + "but received set, please use list instead." + ) else: param_group['params'] = list(params) @@ -311,11 +321,13 @@ class AdamW(Optimizer): if not param_set.isdisjoint(set(param_group['params'])): raise ValueError( - "some parameters appear in more than one parameter group") + "some parameters appear in more than one parameter group" + ) for param in param_group['params']: param.optimize_attr['learning_rate'] = param_group.get( - 'learning_rate', 1.) + 'learning_rate', 1.0 + ) self._param_groups.append(param_group) @@ -327,19 +339,23 @@ class AdamW(Optimizer): var_name = param.name + "_fp32_master" var_name = unique_name.generate(var_name) - var = layers.create_global_var(name=var_name, - shape=param.shape, - value=0, - dtype='float32', - persistable=True) + var = layers.create_global_var( + name=var_name, + shape=param.shape, + value=0, + dtype='float32', + persistable=True, + ) block = self.helper.startup_program.global_block() - block.append_op(type="cast", - inputs={"X": [param]}, - outputs={"Out": [var]}, - attrs={ - "in_dtype": param.dtype, - "out_dtype": core.VarDesc.VarType.FP32 - }) + block.append_op( + type="cast", + inputs={"X": [param]}, + outputs={"Out": [var]}, + attrs={ + "in_dtype": param.dtype, + "out_dtype": core.VarDesc.VarType.FP32, + }, + ) self._master_weights[param.name] = var return var @@ -353,15 +369,22 @@ class AdamW(Optimizer): """ if self._name is not None: name = self._name + "_" + name - find_master = self._multi_precision and param.dtype == core.VarDesc.VarType.FP16 - target_param = self._master_weights[ - param.name] if find_master else param + find_master = ( + self._multi_precision and param.dtype == core.VarDesc.VarType.FP16 + ) + target_param = ( + self._master_weights[param.name] if find_master else param + ) target_name = target_param.name - if (name not in self._accumulators - or target_name not in self._accumulators[name]): + if ( + name not in self._accumulators + or target_name not in self._accumulators[name] + ): raise Exception( "Accumulator {} does not exist for parameter {}".format( - name, target_name)) + name, target_name + ) + ) return self._accumulators[name][target_name] def _add_moments_pows(self, p): @@ -374,18 +397,24 @@ class AdamW(Optimizer): name=self._beta1_pow_acc_str, param=p, dtype=acc_dtype, - fill_value=0.9 if isinstance(self._beta1, Variable) \ - else self._beta1, + fill_value=0.9 + if isinstance(self._beta1, Variable) + else self._beta1, shape=[1], - type=core.VarDesc.VarType.LOD_TENSOR, device='cpu') + type=core.VarDesc.VarType.LOD_TENSOR, + device='cpu', + ) self._add_accumulator( name=self._beta2_pow_acc_str, param=p, dtype=acc_dtype, - fill_value=0.999 if isinstance(self._beta2, Variable) \ - else self._beta2, + fill_value=0.999 + if isinstance(self._beta2, Variable) + else self._beta2, shape=[1], - type=core.VarDesc.VarType.LOD_TENSOR, device='cpu') + type=core.VarDesc.VarType.LOD_TENSOR, + device='cpu', + ) def _create_accumulators(self, block, parameters): assert isinstance(block, framework.Block) @@ -398,7 +427,10 @@ class AdamW(Optimizer): master_p = self._create_master_weight(p) self._add_moments_pows(master_p) continue - if p.dtype == core.VarDesc.VarType.FP16 and not self._multi_precision: + if ( + p.dtype == core.VarDesc.VarType.FP16 + and not self._multi_precision + ): warnings.warn( "Accumulating with FP16 in optimizer can lead to poor accuracy or slow convergence." "Consider using multi_precision=True option of the Adam optimizer." @@ -413,53 +445,112 @@ class AdamW(Optimizer): # Whether we should do weight decay for the parameter. with_decay = True - if self._apply_decay_param_fun is not None \ - and not self._apply_decay_param_fun(param.name): + if ( + self._apply_decay_param_fun is not None + and not self._apply_decay_param_fun(param.name) + ): with_decay = False - moment1 = self._get_accumulator(self._moment1_acc_str, - param_and_grad[0]) - moment2 = self._get_accumulator(self._moment2_acc_str, - param_and_grad[0]) - beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str, - param_and_grad[0]) - beta2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str, - param_and_grad[0]) - find_master = self._multi_precision and param_and_grad[ - 0].dtype == core.VarDesc.VarType.FP16 - master_weight = (self._master_weights[param_and_grad[0].name] - if find_master else None) + moment1 = self._get_accumulator( + self._moment1_acc_str, param_and_grad[0] + ) + moment2 = self._get_accumulator( + self._moment2_acc_str, param_and_grad[0] + ) + beta1_pow_acc = self._get_accumulator( + self._beta1_pow_acc_str, param_and_grad[0] + ) + beta2_pow_acc = self._get_accumulator( + self._beta2_pow_acc_str, param_and_grad[0] + ) + find_master = ( + self._multi_precision + and param_and_grad[0].dtype == core.VarDesc.VarType.FP16 + ) + master_weight = ( + self._master_weights[param_and_grad[0].name] + if find_master + else None + ) lr = self._create_param_lr(param_and_grad) # create the adamw optimize op if framework._non_static_mode(): - lr_ratio_ = 1. if self._lr_ratio is None else self._lr_ratio( - param_and_grad[0]) - - _beta1 = self._beta1 if not isinstance( - self._beta1, Variable) else self._beta1.numpy().item(0) - _beta2 = self._beta2 if not isinstance( - self._beta2, Variable) else self._beta2.numpy().item(0) + lr_ratio_ = ( + 1.0 + if self._lr_ratio is None + else self._lr_ratio(param_and_grad[0]) + ) + + _beta1 = ( + self._beta1 + if not isinstance(self._beta1, Variable) + else self._beta1.numpy().item(0) + ) + _beta2 = ( + self._beta2 + if not isinstance(self._beta2, Variable) + else self._beta2.numpy().item(0) + ) if framework.in_dygraph_mode(): found_inf = self._get_auxiliary_var('found_inf') _, _, _, _, _, _ = _C_ops.adamw_( - param_and_grad[0], param_and_grad[1], lr, moment1, moment2, - beta1_pow_acc, beta2_pow_acc, master_weight, found_inf, - _beta1, _beta2, self._epsilon, lr_ratio_, - self._weight_decay, with_decay, self._lazy_mode, 1000, - find_master, False) + param_and_grad[0], + param_and_grad[1], + lr, + moment1, + moment2, + beta1_pow_acc, + beta2_pow_acc, + master_weight, + found_inf, + _beta1, + _beta2, + self._epsilon, + lr_ratio_, + self._weight_decay, + with_decay, + self._lazy_mode, + 1000, + find_master, + False, + ) else: _, _, _, _, _, _ = _legacy_C_ops.adamw( - param_and_grad[0], param_and_grad[1], lr, moment1, moment2, - beta1_pow_acc, beta2_pow_acc, master_weight, - param_and_grad[0], moment1, moment2, beta1_pow_acc, - beta2_pow_acc, master_weight, 'epsilon', self._epsilon, - 'lazy_mode', self._lazy_mode, - 'min_row_size_to_use_multithread', 1000, 'beta1', _beta1, - 'beta2', _beta2, "with_decay", with_decay, 'coeff', - self._weight_decay, 'multi_precision', find_master, - 'lr_ratio', lr_ratio_) + param_and_grad[0], + param_and_grad[1], + lr, + moment1, + moment2, + beta1_pow_acc, + beta2_pow_acc, + master_weight, + param_and_grad[0], + moment1, + moment2, + beta1_pow_acc, + beta2_pow_acc, + master_weight, + 'epsilon', + self._epsilon, + 'lazy_mode', + self._lazy_mode, + 'min_row_size_to_use_multithread', + 1000, + 'beta1', + _beta1, + 'beta2', + _beta2, + "with_decay", + with_decay, + 'coeff', + self._weight_decay, + 'multi_precision', + find_master, + 'lr_ratio', + lr_ratio_, + ) return None inputs = { @@ -486,18 +577,14 @@ class AdamW(Optimizer): "Beta2PowOut": [beta2_pow_acc], } attrs = { - "lazy_mode": - self._lazy_mode, - "min_row_size_to_use_multithread": - 1000, - "multi_precision": - find_master, - "with_decay": - with_decay, - "coeff": - self._weight_decay, - "lr_ratio": - 1. if self._lr_ratio is None else self._lr_ratio(param_and_grad[0]) + "lazy_mode": self._lazy_mode, + "min_row_size_to_use_multithread": 1000, + "multi_precision": find_master, + "with_decay": with_decay, + "coeff": self._weight_decay, + "lr_ratio": 1.0 + if self._lr_ratio is None + else self._lr_ratio(param_and_grad[0]), } if isinstance(self._beta1, Variable): @@ -517,11 +604,13 @@ class AdamW(Optimizer): inputs["MasterParam"] = master_weight outputs["MasterParamOut"] = master_weight - adamw_op = block.append_op(type=self.type, - inputs=inputs, - outputs=outputs, - attrs=attrs, - stop_gradient=True) + adamw_op = block.append_op( + type=self.type, + inputs=inputs, + outputs=outputs, + attrs=attrs, + stop_gradient=True, + ) return adamw_op @@ -560,24 +649,28 @@ class AdamW(Optimizer): if param._grad_ivar() is not None: grad_var = param._grad_ivar() if framework.in_dygraph_mode(): - if hasattr(grad_var, "is_selected_rows" - ) and grad_var.is_selected_rows( - ) and self.regularization is not None: + if ( + hasattr(grad_var, "is_selected_rows") + and grad_var.is_selected_rows() + and self.regularization is not None + ): raise RuntimeError( "AdamW don't support weight_decay with sparse parameters, please set it to None." ) else: - if hasattr( - grad_var, "_is_sparse") and grad_var._is_sparse( - ) and self.regularization is not None: + if ( + hasattr(grad_var, "_is_sparse") + and grad_var._is_sparse() + and self.regularization is not None + ): raise RuntimeError( "AdamW don't support weight_decay with sparse parameters, please set it to None." ) params_grads.append((param, grad_var)) - optimize_ops = self._apply_optimize(loss=None, - startup_program=None, - params_grads=params_grads) + optimize_ops = self._apply_optimize( + loss=None, startup_program=None, params_grads=params_grads + ) else: # optimize parameters in groups for param_group in self._param_groups: @@ -588,35 +681,41 @@ class AdamW(Optimizer): if param._grad_ivar() is not None: grad_var = param._grad_ivar() if framework.in_dygraph_mode(): - if hasattr(grad_var, "is_selected_rows" - ) and grad_var.is_selected_rows( - ) and self.regularization is not None: + if ( + hasattr(grad_var, "is_selected_rows") + and grad_var.is_selected_rows() + and self.regularization is not None + ): raise RuntimeError( "AdamW don't support weight_decay with sparse parameters, please set it to None." ) else: - if hasattr(grad_var, - "_is_sparse") and grad_var._is_sparse( - ) and self.regularization is not None: + if ( + hasattr(grad_var, "_is_sparse") + and grad_var._is_sparse() + and self.regularization is not None + ): raise RuntimeError( "AdamW don't support weight_decay with sparse parameters, please set it to None." ) params_grads['params'].append((param, grad_var)) params_grads.update( - {k: v - for k, v in param_group.items() if k != 'params'}) - self._apply_optimize(loss=None, - startup_program=None, - params_grads=params_grads) + {k: v for k, v in param_group.items() if k != 'params'} + ) + self._apply_optimize( + loss=None, startup_program=None, params_grads=params_grads + ) def _update_param_group(self, parameters): self._beta1 = parameters.get('beta1', self._default_dict['beta1']) self._beta2 = parameters.get('beta2', self._default_dict['beta2']) self._epsilon = parameters.get('epsilon', self._default_dict['epsilon']) - self._lazy_mode = parameters.get('lazy_mode', - self._default_dict['lazy_mode']) - self._weight_decay = parameters.get('weight_decay', - self._default_dict['weight_decay']) + self._lazy_mode = parameters.get( + 'lazy_mode', self._default_dict['lazy_mode'] + ) + self._weight_decay = parameters.get( + 'weight_decay', self._default_dict['weight_decay'] + ) parameters = parameters.get('params') return parameters diff --git a/python/paddle/optimizer/lamb.py b/python/paddle/optimizer/lamb.py index 6a683c6ef91716540b64b0817b2616409711a3e1..4fc46ad1052b46de61969deff4dc5926070dc9c2 100644 --- a/python/paddle/optimizer/lamb.py +++ b/python/paddle/optimizer/lamb.py @@ -98,26 +98,30 @@ class Lamb(Optimizer): _beta1_pow_acc_str = "beta1_pow_acc" _beta2_pow_acc_str = "beta2_pow_acc" - def __init__(self, - learning_rate=0.001, - lamb_weight_decay=0.01, - beta1=0.9, - beta2=0.999, - epsilon=1e-6, - parameters=None, - grad_clip=None, - exclude_from_weight_decay_fn=None, - multi_precision=False, - name=None): + def __init__( + self, + learning_rate=0.001, + lamb_weight_decay=0.01, + beta1=0.9, + beta2=0.999, + epsilon=1e-6, + parameters=None, + grad_clip=None, + exclude_from_weight_decay_fn=None, + multi_precision=False, + name=None, + ): assert learning_rate is not None assert beta1 is not None assert beta2 is not None assert epsilon is not None - super(Lamb, self).__init__(learning_rate=learning_rate, - parameters=parameters, - weight_decay=None, - grad_clip=grad_clip, - name=name) + super(Lamb, self).__init__( + learning_rate=learning_rate, + parameters=parameters, + weight_decay=None, + grad_clip=grad_clip, + name=name, + ) self.type = "lamb" self._beta1 = beta1 self._beta2 = beta2 @@ -160,19 +164,23 @@ class Lamb(Optimizer): var_name = param.name + "_fp32_master" var_name = unique_name.generate(var_name) - var = layers.create_global_var(name=var_name, - shape=param.shape, - value=0, - dtype='float32', - persistable=True) + var = layers.create_global_var( + name=var_name, + shape=param.shape, + value=0, + dtype='float32', + persistable=True, + ) block = self.helper.startup_program.global_block() - block.append_op(type="cast", - inputs={"X": [param]}, - outputs={"Out": [var]}, - attrs={ - "in_dtype": param.dtype, - "out_dtype": core.VarDesc.VarType.FP32 - }) + block.append_op( + type="cast", + inputs={"X": [param]}, + outputs={"Out": [var]}, + attrs={ + "in_dtype": param.dtype, + "out_dtype": core.VarDesc.VarType.FP32, + }, + ) self._master_weights[param.name] = var return var @@ -199,15 +207,22 @@ class Lamb(Optimizer): """ if self._name is not None: name = self._name + "_" + name - find_master = self._multi_precision and param.dtype == core.VarDesc.VarType.FP16 - target_param = self._master_weights[ - param.name] if find_master else param + find_master = ( + self._multi_precision and param.dtype == core.VarDesc.VarType.FP16 + ) + target_param = ( + self._master_weights[param.name] if find_master else param + ) target_name = target_param.name - if (name not in self._accumulators - or target_name not in self._accumulators[name]): + if ( + name not in self._accumulators + or target_name not in self._accumulators[name] + ): raise Exception( "Accumulator {} does not exist for parameter {}".format( - name, target_name)) + name, target_name + ) + ) return self._accumulators[name][target_name] def _add_moments_pows(self, p): @@ -218,21 +233,27 @@ class Lamb(Optimizer): self._add_accumulator(self._moment1_acc_str, p, dtype=acc_dtype) self._add_accumulator(self._moment2_acc_str, p, dtype=acc_dtype) self._add_accumulator( - name=self._beta1_pow_acc_str, - param=p, - dtype=acc_dtype, - fill_value=0.9 if isinstance(self._beta1, Variable) \ - else self._beta1, - shape=[1], - type=core.VarDesc.VarType.LOD_TENSOR, device='cpu') + name=self._beta1_pow_acc_str, + param=p, + dtype=acc_dtype, + fill_value=0.9 + if isinstance(self._beta1, Variable) + else self._beta1, + shape=[1], + type=core.VarDesc.VarType.LOD_TENSOR, + device='cpu', + ) self._add_accumulator( - name=self._beta2_pow_acc_str, - param=p, - dtype=acc_dtype, - fill_value=0.999 if isinstance(self._beta2, Variable) \ - else self._beta2, - shape=[1], - type=core.VarDesc.VarType.LOD_TENSOR, device='cpu') + name=self._beta2_pow_acc_str, + param=p, + dtype=acc_dtype, + fill_value=0.999 + if isinstance(self._beta2, Variable) + else self._beta2, + shape=[1], + type=core.VarDesc.VarType.LOD_TENSOR, + device='cpu', + ) def _append_optimize_op(self, block, param_and_grad): assert isinstance(block, framework.Block) @@ -241,24 +262,32 @@ class Lamb(Optimizer): block.program._use_lamb = True - moment1 = self._get_accumulator(self._moment1_acc_str, - param_and_grad[0]) - moment2 = self._get_accumulator(self._moment2_acc_str, - param_and_grad[0]) - beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str, - param_and_grad[0]) - beta2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str, - param_and_grad[0]) - - if self._exclude_from_weight_decay_fn is not None \ - and self._exclude_from_weight_decay_fn(param_and_grad[0]): + moment1 = self._get_accumulator( + self._moment1_acc_str, param_and_grad[0] + ) + moment2 = self._get_accumulator( + self._moment2_acc_str, param_and_grad[0] + ) + beta1_pow_acc = self._get_accumulator( + self._beta1_pow_acc_str, param_and_grad[0] + ) + beta2_pow_acc = self._get_accumulator( + self._beta2_pow_acc_str, param_and_grad[0] + ) + + if ( + self._exclude_from_weight_decay_fn is not None + and self._exclude_from_weight_decay_fn(param_and_grad[0]) + ): weight_decay = 0.0 else: weight_decay = self._lamb_weight_decay lr = self._create_param_lr(param_and_grad) - find_master = self._multi_precision and param_and_grad[ - 0].dtype == core.VarDesc.VarType.FP16 + find_master = ( + self._multi_precision + and param_and_grad[0].dtype == core.VarDesc.VarType.FP16 + ) p_name = param_and_grad[0].name if find_master: master_weight = self._master_weights[p_name] @@ -268,20 +297,50 @@ class Lamb(Optimizer): found_inf = self._get_auxiliary_var('found_inf') if framework.in_dygraph_mode(): - _C_ops.lamb_(param_and_grad[0], param_and_grad[1], lr, moment1, - moment2, beta1_pow_acc, beta2_pow_acc, master_weight, - found_inf, weight_decay, self._beta1, self._beta2, - self._epsilon, find_master) + _C_ops.lamb_( + param_and_grad[0], + param_and_grad[1], + lr, + moment1, + moment2, + beta1_pow_acc, + beta2_pow_acc, + master_weight, + found_inf, + weight_decay, + self._beta1, + self._beta2, + self._epsilon, + find_master, + ) return None if framework._non_static_mode(): - _legacy_C_ops.lamb(param_and_grad[0], param_and_grad[1], lr, - moment1, moment2, beta1_pow_acc, beta2_pow_acc, - master_weight, param_and_grad[0], moment1, - moment2, beta1_pow_acc, beta2_pow_acc, - master_weight, 'beta1', self._beta1, 'beta2', - self._beta2, 'epsilon', self._epsilon, - 'weight_decay', weight_decay, 'multi_precision', - find_master) + _legacy_C_ops.lamb( + param_and_grad[0], + param_and_grad[1], + lr, + moment1, + moment2, + beta1_pow_acc, + beta2_pow_acc, + master_weight, + param_and_grad[0], + moment1, + moment2, + beta1_pow_acc, + beta2_pow_acc, + master_weight, + 'beta1', + self._beta1, + 'beta2', + self._beta2, + 'epsilon', + self._epsilon, + 'weight_decay', + weight_decay, + 'multi_precision', + find_master, + ) return None # create the lamb optimize op @@ -292,14 +351,14 @@ class Lamb(Optimizer): "Moment1": moment1, "Moment2": moment2, "Beta1Pow": beta1_pow_acc, - "Beta2Pow": beta2_pow_acc + "Beta2Pow": beta2_pow_acc, } outputs = { "ParamOut": param_and_grad[0], "Moment1Out": moment1, "Moment2Out": moment2, "Beta1PowOut": beta1_pow_acc, - "Beta2PowOut": beta2_pow_acc + "Beta2PowOut": beta2_pow_acc, } attrs = { "beta1": self._beta1, @@ -316,11 +375,13 @@ class Lamb(Optimizer): if found_inf: inputs["SkipUpdate"] = found_inf - lamb_op = block.append_op(type=self.type, - inputs=inputs, - outputs=outputs, - attrs=attrs, - stop_gradient=True) + lamb_op = block.append_op( + type=self.type, + inputs=inputs, + outputs=outputs, + attrs=attrs, + stop_gradient=True, + ) return lamb_op @@ -329,9 +390,11 @@ class Lamb(Optimizer): self._beta2 = parameters.get('beta2', self._default_dict['beta2']) self._epsilon = parameters.get('epsilon', self._default_dict['epsilon']) self._lamb_weight_decay = parameters.get( - 'lamb_weight_decay', self._default_dict['lamb_weight_decay']) + 'lamb_weight_decay', self._default_dict['lamb_weight_decay'] + ) self._exclude_from_weight_decay_fn = parameters.get( 'exclude_from_weight_decay_fn', - self._default_dict['exclude_from_weight_decay_fn']) + self._default_dict['exclude_from_weight_decay_fn'], + ) parameters = parameters.get('params') return parameters diff --git a/python/paddle/optimizer/lr.py b/python/paddle/optimizer/lr.py index 4dc451d6334eabf48d48ce2d415d665cc25e8637..4e2dbafcabbc0b474c6a4b8fd83f7593032fb239 100644 --- a/python/paddle/optimizer/lr.py +++ b/python/paddle/optimizer/lr.py @@ -93,8 +93,10 @@ class LRScheduler(object): def __init__(self, learning_rate=0.1, last_epoch=-1, verbose=False): if not isinstance(learning_rate, (float, int)): raise TypeError( - "The type of learning rate must be float, but received {}". - format(type(learning_rate))) + "The type of learning rate must be float, but received {}".format( + type(learning_rate) + ) + ) self.base_lr = float(learning_rate) self.last_lr = float(learning_rate) self.last_epoch = last_epoch @@ -133,8 +135,11 @@ class LRScheduler(object): self.last_lr = self.get_lr() if self.verbose: - print('Epoch {}: {} set learning rate to {}.'.format( - self.last_epoch, self.__class__.__name__, self.last_lr)) + print( + 'Epoch {}: {} set learning rate to {}.'.format( + self.last_epoch, self.__class__.__name__, self.last_lr + ) + ) def state_dict(self): """ @@ -153,7 +158,8 @@ class LRScheduler(object): assert value.shape == [ 1 ], "shape of Tensor in state_dict must be [1] {}".format( - value.shape) + value.shape + ) value = value.numpy()[0] state_dict[key] = value @@ -184,8 +190,10 @@ class LRScheduler(object): self.__dict__[key] = state_dict[key] else: raise RuntimeError( - "Please check whether state_dict is correct for optimizer. Can't find [ {} ] in state_dict" - .format(key)) + "Please check whether state_dict is correct for optimizer. Can't find [ {} ] in state_dict".format( + key + ) + ) if len(state_dict) > len(self.keys): warnings.warn( "There are some unused values in state_dict. Maybe the optimizer have different 'LearningRateDecay' when invoking state_dict and set_dict" @@ -279,12 +287,14 @@ class NoamDecay(LRScheduler): """ - def __init__(self, - d_model, - warmup_steps, - learning_rate=1.0, - last_epoch=-1, - verbose=False): + def __init__( + self, + d_model, + warmup_steps, + learning_rate=1.0, + last_epoch=-1, + verbose=False, + ): self.d_model = d_model self.warmup_steps = warmup_steps super(NoamDecay, self).__init__(learning_rate, last_epoch, verbose) @@ -379,8 +389,9 @@ class PiecewiseDecay(LRScheduler): def __init__(self, boundaries, values, last_epoch=-1, verbose=False): self.boundaries = boundaries self.values = values - super(PiecewiseDecay, self).__init__(last_epoch=last_epoch, - verbose=verbose) + super(PiecewiseDecay, self).__init__( + last_epoch=last_epoch, verbose=verbose + ) def get_lr(self): for i in range(len(self.boundaries)): @@ -460,10 +471,13 @@ class NaturalExpDecay(LRScheduler): """ def __init__(self, learning_rate, gamma, last_epoch=-1, verbose=False): - assert gamma > 0.0, " 'gamma' must be a positive number so that the learning rate will decay." + assert ( + gamma > 0.0 + ), " 'gamma' must be a positive number so that the learning rate will decay." self.gamma = gamma - super(NaturalExpDecay, self).__init__(learning_rate, last_epoch, - verbose) + super(NaturalExpDecay, self).__init__( + learning_rate, last_epoch, verbose + ) def get_lr(self): return self.base_lr * math.exp(-1 * self.gamma * self.last_epoch) @@ -543,8 +557,9 @@ class InverseTimeDecay(LRScheduler): def __init__(self, learning_rate, gamma, last_epoch=-1, verbose=False): self.gamma = gamma - super(InverseTimeDecay, self).__init__(learning_rate, last_epoch, - verbose) + super(InverseTimeDecay, self).__init__( + learning_rate, last_epoch, verbose + ) def get_lr(self): return self.base_lr / (1 + self.gamma * self.last_epoch) @@ -637,30 +652,37 @@ class PolynomialDecay(LRScheduler): # scheduler.step() # If you update learning rate each epoch """ - def __init__(self, - learning_rate, - decay_steps, - end_lr=0.0001, - power=1.0, - cycle=False, - last_epoch=-1, - verbose=False): + def __init__( + self, + learning_rate, + decay_steps, + end_lr=0.0001, + power=1.0, + cycle=False, + last_epoch=-1, + verbose=False, + ): assert decay_steps > 0 and isinstance( - decay_steps, int), " 'decay_steps' must be a positive integer." + decay_steps, int + ), " 'decay_steps' must be a positive integer." self.decay_steps = decay_steps self.end_lr = end_lr - assert power > 0.0, " 'power' must be greater than 0.0 so that the learning rate will decay." + assert ( + power > 0.0 + ), " 'power' must be greater than 0.0 so that the learning rate will decay." self.power = power self.cycle = cycle - super(PolynomialDecay, self).__init__(learning_rate, last_epoch, - verbose) + super(PolynomialDecay, self).__init__( + learning_rate, last_epoch, verbose + ) def get_lr(self): tmp_epoch_num = self.last_epoch tmp_decay_steps = self.decay_steps if self.cycle: div_res = math.ceil( - float(self.last_epoch) / float(self.decay_steps)) + float(self.last_epoch) / float(self.decay_steps) + ) if self.last_epoch == 0: div_res = 1 @@ -669,8 +691,8 @@ class PolynomialDecay(LRScheduler): tmp_epoch_num = min(self.last_epoch, self.decay_steps) return (self.base_lr - self.end_lr) * ( - (1 - float(tmp_epoch_num) / float(tmp_decay_steps))** - self.power) + self.end_lr + (1 - float(tmp_epoch_num) / float(tmp_decay_steps)) ** self.power + ) + self.end_lr class LinearWarmup(LRScheduler): @@ -758,27 +780,36 @@ class LinearWarmup(LRScheduler): # scheduler.step() # If you update learning rate each epoch """ - def __init__(self, - learning_rate, - warmup_steps, - start_lr, - end_lr, - last_epoch=-1, - verbose=False): - type_check = isinstance(learning_rate, float) or isinstance( - learning_rate, int) or isinstance(learning_rate, LRScheduler) + def __init__( + self, + learning_rate, + warmup_steps, + start_lr, + end_lr, + last_epoch=-1, + verbose=False, + ): + type_check = ( + isinstance(learning_rate, float) + or isinstance(learning_rate, int) + or isinstance(learning_rate, LRScheduler) + ) if not type_check: raise TypeError( - "the type of learning_rate should be [int, float or LRScheduler], the current type is {}" - .format(learning_rate)) + "the type of learning_rate should be [int, float or LRScheduler], the current type is {}".format( + learning_rate + ) + ) self.learning_rate = learning_rate assert warmup_steps > 0 and isinstance( - warmup_steps, int), " 'warmup_steps' must be a positive integer." + warmup_steps, int + ), " 'warmup_steps' must be a positive integer." self.warmup_steps = warmup_steps self.start_lr = start_lr self.end_lr = end_lr - assert end_lr > start_lr, "end_lr {} must be greater than start_lr {}".format( - end_lr, start_lr) + assert ( + end_lr > start_lr + ), "end_lr {} must be greater than start_lr {}".format(end_lr, start_lr) super(LinearWarmup, self).__init__(start_lr, last_epoch, verbose) def state_dict(self): @@ -803,7 +834,8 @@ class LinearWarmup(LRScheduler): def get_lr(self): if self.last_epoch < self.warmup_steps: return (self.end_lr - self.start_lr) * float( - self.last_epoch) / float(self.warmup_steps) + self.start_lr + self.last_epoch + ) / float(self.warmup_steps) + self.start_lr else: if isinstance(self.learning_rate, LRScheduler): self.learning_rate.step(self.last_epoch - self.warmup_steps) @@ -884,10 +916,13 @@ class ExponentialDecay(LRScheduler): """ def __init__(self, learning_rate, gamma, last_epoch=-1, verbose=False): - assert gamma > 0.0 and gamma < 1.0, " 'gamma' must be in interval (0.0, 1.0) so that the learning rate will decay." + assert ( + gamma > 0.0 and gamma < 1.0 + ), " 'gamma' must be in interval (0.0, 1.0) so that the learning rate will decay." self.gamma = gamma - super(ExponentialDecay, self).__init__(learning_rate, last_epoch, - verbose) + super(ExponentialDecay, self).__init__( + learning_rate, last_epoch, verbose + ) def get_lr(self): return self.base_lr * (self.gamma**self.last_epoch) @@ -973,21 +1008,21 @@ class MultiStepDecay(LRScheduler): # scheduler.step() # If you update learning rate each epoch """ - def __init__(self, - learning_rate, - milestones, - gamma=0.1, - last_epoch=-1, - verbose=False): + def __init__( + self, learning_rate, milestones, gamma=0.1, last_epoch=-1, verbose=False + ): if not isinstance(milestones, (tuple, list)): raise TypeError( "The type of 'milestones' in 'MultiStepDecay' must be 'tuple, list', but received %s." - % type(milestones)) + % type(milestones) + ) - if not all([ + if not all( + [ milestones[i] < milestones[i + 1] for i in range(len(milestones) - 1) - ]): + ] + ): raise ValueError('The elements of milestones must be incremented') if gamma >= 1.0: raise ValueError('gamma should be < 1.0.') @@ -1000,7 +1035,7 @@ class MultiStepDecay(LRScheduler): for i in range(len(self.milestones)): if self.last_epoch < self.milestones[i]: return self.base_lr * (self.gamma**i) - return self.base_lr * (self.gamma**len(self.milestones)) + return self.base_lr * (self.gamma ** len(self.milestones)) class StepDecay(LRScheduler): @@ -1082,21 +1117,20 @@ class StepDecay(LRScheduler): # scheduler.step() # If you update learning rate each epoch """ - def __init__(self, - learning_rate, - step_size, - gamma=0.1, - last_epoch=-1, - verbose=False): + def __init__( + self, learning_rate, step_size, gamma=0.1, last_epoch=-1, verbose=False + ): if not isinstance(step_size, int): raise TypeError( - "The type of 'step_size' must be 'int', but received %s." % - type(step_size)) + "The type of 'step_size' must be 'int', but received %s." + % type(step_size) + ) if gamma >= 1.0: raise ValueError('gamma should be < 1.0.') assert step_size > 0 and isinstance( - step_size, int), " 'step_size' must be a positive integer." + step_size, int + ), " 'step_size' must be a positive integer." self.step_size = step_size self.gamma = gamma super(StepDecay, self).__init__(learning_rate, last_epoch, verbose) @@ -1185,7 +1219,8 @@ class LambdaDecay(LRScheduler): if not callable(lr_lambda): raise TypeError( "The type of 'lr_lambda' in 'LambdaDecay' must be 'function', but received %s." - % type(lr_lambda)) + % type(lr_lambda) + ) self.lr_lambda = lr_lambda super(LambdaDecay, self).__init__(learning_rate, last_epoch, verbose) @@ -1281,17 +1316,19 @@ class ReduceOnPlateau(LRScheduler): """ - def __init__(self, - learning_rate, - mode='min', - factor=0.1, - patience=10, - threshold=1e-4, - threshold_mode='rel', - cooldown=0, - min_lr=0, - epsilon=1e-8, - verbose=False): + def __init__( + self, + learning_rate, + mode='min', + factor=0.1, + patience=10, + threshold=1e-4, + threshold_mode='rel', + cooldown=0, + min_lr=0, + epsilon=1e-8, + verbose=False, + ): mode = mode.lower() if mode not in ['min', 'max']: raise ValueError('mode: ' + mode + ' is unknown!') @@ -1299,18 +1336,21 @@ class ReduceOnPlateau(LRScheduler): if factor >= 1.0: raise ValueError( - 'new_lr = origin_lr * gamma and gamma should be < 1.0.') + 'new_lr = origin_lr * gamma and gamma should be < 1.0.' + ) self.factor = factor threshold_mode = threshold_mode.lower() if threshold_mode not in ['rel', 'abs']: - raise ValueError('threshold mode: ' + threshold_mode + - ' is unknown!') + raise ValueError( + 'threshold mode: ' + threshold_mode + ' is unknown!' + ) self.threshold_mode = threshold_mode if not isinstance(learning_rate, (float, int)): raise TypeError( "The type of 'learning_rate' in 'ReduceOnPlateau' must be 'float', but received %s." - % type(learning_rate)) + % type(learning_rate) + ) self.patience = patience self.threshold = threshold @@ -1333,8 +1373,11 @@ class ReduceOnPlateau(LRScheduler): # "cooldown_counter / best / num_bad_epochs / last_epoch / last_lr" will be stored. def state_keys(self): self.keys = [ - 'cooldown_counter', 'best', 'num_bad_epochs', 'last_epoch', - 'last_lr' + 'cooldown_counter', + 'best', + 'num_bad_epochs', + 'last_epoch', + 'last_lr', ] def step(self, metrics, epoch=None): @@ -1364,18 +1407,25 @@ class ReduceOnPlateau(LRScheduler): else: # need to declarate explicitly from paddle.framework import VarBase as Tensor + tmp = Tensor # loss must be float, numpy.ndarray or 1-D Tensor with shape [1] if isinstance(metrics, (tmp, numpy.ndarray)): - assert len(metrics.shape) == 1 and metrics.shape[0] == 1, "the metrics.shape " \ - "should be (1L,), but the current metrics.shape is {}. Maybe that " \ - "you should call paddle.mean to process it first.".format( - metrics.shape) - elif not isinstance(metrics, - (int, float, numpy.float32, numpy.float64)): + assert len(metrics.shape) == 1 and metrics.shape[0] == 1, ( + "the metrics.shape " + "should be (1L,), but the current metrics.shape is {}. Maybe that " + "you should call paddle.mean to process it first.".format( + metrics.shape + ) + ) + elif not isinstance( + metrics, (int, float, numpy.float32, numpy.float64) + ): raise TypeError( - "metrics must be 'int', 'float', 'np.float', 'numpy.ndarray' or 'paddle.Tensor', but receive {}" - .format(type(metrics))) + "metrics must be 'int', 'float', 'np.float', 'numpy.ndarray' or 'paddle.Tensor', but receive {}".format( + type(metrics) + ) + ) if self.cooldown_counter > 0: self.cooldown_counter -= 1 @@ -1393,9 +1443,13 @@ class ReduceOnPlateau(LRScheduler): if self.last_lr - new_lr > self.epsilon: self.last_lr = new_lr if self.verbose: - print('Epoch {}: {} set learning rate to {}.'.format( - self.last_epoch, self.__class__.__name__, - self.last_lr)) + print( + 'Epoch {}: {} set learning rate to {}.'.format( + self.last_epoch, + self.__class__.__name__, + self.last_lr, + ) + ) def _is_better(self, current, best): if self.mode == 'min' and self.threshold_mode == 'rel': @@ -1493,41 +1547,50 @@ class CosineAnnealingDecay(LRScheduler): # scheduler.step() # If you update learning rate each epoch """ - def __init__(self, - learning_rate, - T_max, - eta_min=0, - last_epoch=-1, - verbose=False): + def __init__( + self, learning_rate, T_max, eta_min=0, last_epoch=-1, verbose=False + ): if not isinstance(T_max, int): raise TypeError( "The type of 'T_max' in 'CosineAnnealingDecay' must be 'int', but received %s." - % type(T_max)) + % type(T_max) + ) if not isinstance(eta_min, (float, int)): raise TypeError( "The type of 'eta_min' in 'CosineAnnealingDecay' must be 'float, int', but received %s." - % type(eta_min)) + % type(eta_min) + ) assert T_max > 0 and isinstance( - T_max, int), " 'T_max' must be a positive integer." + T_max, int + ), " 'T_max' must be a positive integer." self.T_max = T_max self.eta_min = float(eta_min) - super(CosineAnnealingDecay, self).__init__(learning_rate, last_epoch, - verbose) + super(CosineAnnealingDecay, self).__init__( + learning_rate, last_epoch, verbose + ) def get_lr(self): if self.last_epoch == 0: return self.base_lr elif (self.last_epoch - 1 - self.T_max) % (2 * self.T_max) == 0: - return self.last_lr + (self.base_lr - self.eta_min) * ( - 1 - math.cos(math.pi / self.T_max)) / 2 + return ( + self.last_lr + + (self.base_lr - self.eta_min) + * (1 - math.cos(math.pi / self.T_max)) + / 2 + ) return (1 + math.cos(math.pi * self.last_epoch / self.T_max)) / ( - 1 + math.cos(math.pi * (self.last_epoch - 1) / self.T_max)) * ( - self.last_lr - self.eta_min) + self.eta_min + 1 + math.cos(math.pi * (self.last_epoch - 1) / self.T_max) + ) * (self.last_lr - self.eta_min) + self.eta_min def _get_closed_form_lr(self): - return self.eta_min + (self.base_lr - self.eta_min) * ( - 1 + math.cos(math.pi * self.last_epoch / self.T_max)) / 2 + return ( + self.eta_min + + (self.base_lr - self.eta_min) + * (1 + math.cos(math.pi * self.last_epoch / self.T_max)) + / 2 + ) class MultiplicativeDecay(LRScheduler): @@ -1581,11 +1644,13 @@ class MultiplicativeDecay(LRScheduler): if not callable(lr_lambda): raise TypeError( "The type of 'lr_lambda' in 'MultiplicativeDecay' must be 'function', but received %s." - % type(lr_lambda)) + % type(lr_lambda) + ) self.lr_lambda = lr_lambda - super(MultiplicativeDecay, self).__init__(learning_rate, last_epoch, - verbose) + super(MultiplicativeDecay, self).__init__( + learning_rate, last_epoch, verbose + ) def get_lr(self): cur_lr = self.base_lr @@ -1678,29 +1743,35 @@ class OneCycleLR(LRScheduler): scheduler.step() # You should update learning rate each step """ - def __init__(self, - max_learning_rate, - total_steps, - divide_factor=25., - end_learning_rate=0.0001, - phase_pct=0.3, - anneal_strategy='cos', - three_phase=False, - last_epoch=-1, - verbose=False): + def __init__( + self, + max_learning_rate, + total_steps, + divide_factor=25.0, + end_learning_rate=0.0001, + phase_pct=0.3, + anneal_strategy='cos', + three_phase=False, + last_epoch=-1, + verbose=False, + ): # Check type and value of max_learning_rate if not isinstance(max_learning_rate, (float, int)): raise TypeError( - "'max_learning_rate' must be 'float' or 'int', but received {}". - format(type(max_learning_rate))) + "'max_learning_rate' must be 'float' or 'int', but received {}".format( + type(max_learning_rate) + ) + ) if max_learning_rate < 0: raise ValueError("'max_learning_rate' must be a positive integer.") # Check type and value of end_learning_rate if not isinstance(end_learning_rate, (float, int)): raise TypeError( - "'end_learning_rate' must be 'float' or 'int', but received {}". - format(type(end_learning_rate))) + "'end_learning_rate' must be 'float' or 'int', but received {}".format( + type(end_learning_rate) + ) + ) if end_learning_rate < 0: raise ValueError("'end_learning_rate' must be a positive integer.") @@ -1708,7 +1779,9 @@ class OneCycleLR(LRScheduler): if not isinstance(total_steps, int): raise TypeError( "'total_step' must be 'int', but received {}".format( - type(total_steps))) + type(total_steps) + ) + ) if total_steps <= 0: raise ValueError("'total_step' must be a positive integer.") self.total_steps = total_steps @@ -1717,17 +1790,23 @@ class OneCycleLR(LRScheduler): if not isinstance(phase_pct, float): raise TypeError( "'phase_pct' must be 'float', but received {}".format( - type(phase_pct))) + type(phase_pct) + ) + ) if phase_pct < 0 or phase_pct > 1: raise ValueError( "'phase_pct' must be between 0 and 1, but received {}".format( - phase_pct)) + phase_pct + ) + ) # Check type and value of divide_factor if not isinstance(divide_factor, (float, int)): raise TypeError( - "'divide_factor' must be 'float' or 'int', but received {}". - format(type(divide_factor))) + "'divide_factor' must be 'float' or 'int', but received {}".format( + type(divide_factor) + ) + ) initial_lr = max_learning_rate / float(divide_factor) min_lr = float(end_learning_rate) @@ -1750,17 +1829,22 @@ class OneCycleLR(LRScheduler): self._step_config[1] - self._step_config[0], self._step_config[2] - self._step_config[1], self._step_config[3] - self._step_config[2], - self._step_config[3] - - self._step_config[2], # for the last step. + self._step_config[3] + - self._step_config[2], # for the last step. ] # start lr and end lr of each phase. self._lr_config = [ - initial_lr, max_learning_rate, initial_lr, min_lr + initial_lr, + max_learning_rate, + initial_lr, + min_lr, ] else: self._step_config = [ - 0, phase_pct * self.total_steps - 1, self.total_steps - 1, - self.total_steps - 1 + 0, + phase_pct * self.total_steps - 1, + self.total_steps - 1, + self.total_steps - 1, ] self._steps_size = [ self._step_config[1] - self._step_config[0], @@ -1776,8 +1860,10 @@ class OneCycleLR(LRScheduler): self.anneal_func = self._linear_annealing else: raise ValueError( - "'anneal_strategy' must by one of 'cos' or 'linear', but received {}" - .format(anneal_strategy)) + "'anneal_strategy' must by one of 'cos' or 'linear', but received {}".format( + anneal_strategy + ) + ) super(OneCycleLR, self).__init__(initial_lr, last_epoch, verbose) def _cos_annealing(self, start_lr, end_lr, pct): @@ -1792,17 +1878,21 @@ class OneCycleLR(LRScheduler): if current_step > self.total_steps: raise ValueError( - "Tried to step {} times. However the number of total steps is {}" - .format(current_step, self.total_steps)) + "Tried to step {} times. However the number of total steps is {}".format( + current_step, self.total_steps + ) + ) for (i, (end_step, step_size)) in enumerate( - zip(self._step_config[1:], self._steps_size)): + zip(self._step_config[1:], self._steps_size) + ): # i == len(self._lr_config) - 2 catch the last step, otherwise it will return None. if current_step <= end_step or i == len(self._lr_config) - 2: # self._step_config[i] means start step of a phase. percentage = (current_step - self._step_config[i]) / step_size - return self.anneal_func(self._lr_config[i], - self._lr_config[i + 1], percentage) + return self.anneal_func( + self._lr_config[i], self._lr_config[i + 1], percentage + ) class CyclicLR(LRScheduler): @@ -1896,71 +1986,93 @@ class CyclicLR(LRScheduler): scheduler.step() # You should update learning rate each step """ - def __init__(self, - base_learning_rate, - max_learning_rate, - step_size_up, - step_size_down=None, - mode='triangular', - exp_gamma=1., - scale_fn=None, - scale_mode='cycle', - last_epoch=-1, - verbose=False): + def __init__( + self, + base_learning_rate, + max_learning_rate, + step_size_up, + step_size_down=None, + mode='triangular', + exp_gamma=1.0, + scale_fn=None, + scale_mode='cycle', + last_epoch=-1, + verbose=False, + ): # check type and value of max_learning_rate if not isinstance(max_learning_rate, (float, int)): raise TypeError( - "'max_learning_rate' must be 'float' or 'int', but received {}". - format(type(max_learning_rate))) + "'max_learning_rate' must be 'float' or 'int', but received {}".format( + type(max_learning_rate) + ) + ) if max_learning_rate < 0: raise ValueError( - "'max_learning_rate' must be a positive integer, but received {}" - .format(max_learning_rate)) + "'max_learning_rate' must be a positive integer, but received {}".format( + max_learning_rate + ) + ) # check type and value of step_size_up if not isinstance(step_size_up, int): raise TypeError( - "The type of 'step_size_up' must be int, but received {}". - format(type(step_size_up))) + "The type of 'step_size_up' must be int, but received {}".format( + type(step_size_up) + ) + ) if step_size_up <= 0: raise ValueError( - "'step_size_up' must be a positive integer, but received {}". - format(step_size_up)) + "'step_size_up' must be a positive integer, but received {}".format( + step_size_up + ) + ) # check type and value of step_size_down if step_size_down is not None: if not isinstance(step_size_down, int): raise TypeError( - "The type of 'step_size_down' must be int, but received {}". - format(type(step_size_down))) + "The type of 'step_size_down' must be int, but received {}".format( + type(step_size_down) + ) + ) if step_size_down <= 0: raise ValueError( - "'step_size_down' must be a positive integer, but received {}" - .format(step_size_down)) + "'step_size_down' must be a positive integer, but received {}".format( + step_size_down + ) + ) # check type of exp_gamma if not isinstance(exp_gamma, float): raise TypeError( "The type of 'exp_gamma' must be float, but received {}".format( - type(exp_gamma))) + type(exp_gamma) + ) + ) step_size_up = float(step_size_up) - step_size_down = float( - step_size_down) if step_size_down is not None else step_size_up + step_size_down = ( + float(step_size_down) + if step_size_down is not None + else step_size_up + ) self.cycle_size = step_size_up + step_size_down self.step_up_pct = step_size_up / self.cycle_size self.max_lr = float(max_learning_rate) self.amplitude = self.max_lr - base_learning_rate - if mode not in ['triangular', 'triangular2', 'exp_range' - ] and scale_fn is None: + if ( + mode not in ['triangular', 'triangular2', 'exp_range'] + and scale_fn is None + ): raise ValueError( "'mode' is invalid and 'scale_fn' is not specified, make sure one of 'mode' or 'scale_fn' is valid" ) if scale_mode not in ['cycle', 'iterations']: raise ValueError( - "'scale_mode' must be one of 'cycle' or 'iterations") + "'scale_mode' must be one of 'cycle' or 'iterations" + ) self.mode = mode self.gamma = exp_gamma # only for exp_range mode @@ -1981,10 +2093,10 @@ class CyclicLR(LRScheduler): super().__init__(base_learning_rate, last_epoch, verbose) def _triangular_scale_fn(self, x): - return 1. + return 1.0 def _triangular2_scale_fn(self, x): - return 1 / (2.**(x - 1)) + return 1 / (2.0 ** (x - 1)) def _exp_range_scale_fn(self, x): return self.gamma**x @@ -1993,7 +2105,7 @@ class CyclicLR(LRScheduler): iterations = self.last_epoch cycle = 1 + iterations // self.cycle_size - pct_per_cycle = 1. + iterations / self.cycle_size - cycle + pct_per_cycle = 1.0 + iterations / self.cycle_size - cycle if pct_per_cycle <= self.step_up_pct: scale_factor = pct_per_cycle / self.step_up_pct diff --git a/python/paddle/optimizer/momentum.py b/python/paddle/optimizer/momentum.py index 5ec696f691ce475084c5521c0f5d08133eb0b1bb..8cca050625a425c54c24336bc3336f626e17aae9 100644 --- a/python/paddle/optimizer/momentum.py +++ b/python/paddle/optimizer/momentum.py @@ -121,29 +121,35 @@ class Momentum(Optimizer): """ _velocity_acc_str = "velocity" - def __init__(self, - learning_rate=0.001, - momentum=0.9, - parameters=None, - use_nesterov=False, - weight_decay=None, - grad_clip=None, - multi_precision=False, - rescale_grad=1.0, - use_multi_tensor=False, - name=None): + def __init__( + self, + learning_rate=0.001, + momentum=0.9, + parameters=None, + use_nesterov=False, + weight_decay=None, + grad_clip=None, + multi_precision=False, + rescale_grad=1.0, + use_multi_tensor=False, + name=None, + ): if learning_rate is None: raise ValueError("learning_rate is not set") if momentum is None: raise ValueError("momentum is not set") - predicate = lambda regular: isinstance(regular, - (L2DecayRegularizer, float)) + predicate = lambda regular: isinstance( + regular, (L2DecayRegularizer, float) + ) if isinstance(parameters, list): if isinstance(parameters[0], dict): for param_group in parameters: - decay = param_group[ - 'weight_decay'] if 'weight_decay' in param_group else weight_decay + decay = ( + param_group['weight_decay'] + if 'weight_decay' in param_group + else weight_decay + ) reg_method, reg_coeff = self._update_regularization(decay) param_group['regularization_method'] = reg_method param_group['regularization_coeff'] = reg_coeff @@ -151,16 +157,20 @@ class Momentum(Optimizer): param_group['weight_decay'] = py_regular py_regular = None if predicate(weight_decay) else weight_decay - super(Momentum, self).__init__(learning_rate=learning_rate, - parameters=parameters, - weight_decay=py_regular, - grad_clip=grad_clip, - name=name) + super(Momentum, self).__init__( + learning_rate=learning_rate, + parameters=parameters, + weight_decay=py_regular, + grad_clip=grad_clip, + name=name, + ) self.type = "momentum" self._momentum = momentum self._use_nesterov = bool(use_nesterov) - self._regularization_method, self._regularization_coeff = self._update_regularization( - weight_decay) + ( + self._regularization_method, + self._regularization_coeff, + ) = self._update_regularization(weight_decay) self._multi_precision = multi_precision self._rescale_grad = rescale_grad self._master_weights = {} @@ -178,25 +188,25 @@ class Momentum(Optimizer): self._velocity_dict = {'FP32_LODTensor': [], 'FP16_LODTensor': []} self._master_weight_dict = { 'FP32_LODTensor': None, - 'FP16_LODTensor': [] + 'FP16_LODTensor': [], } self._regularization_method_dict = { 'FP32_LODTensor': [], - 'FP16_LODTensor': [] + 'FP16_LODTensor': [], } self._regularization_coeff_dict = { 'FP32_LODTensor': [], - 'FP16_LODTensor': [] + 'FP16_LODTensor': [], } def _update_regularization(self, weight_decay): reg_method = "" reg_coeff = 0.0 - if (isinstance(weight_decay, L2DecayRegularizer)): + if isinstance(weight_decay, L2DecayRegularizer): reg_method = "l2_decay" reg_coeff = weight_decay._regularization_coeff - if (isinstance(weight_decay, float)): + if isinstance(weight_decay, float): reg_method = "l2_decay" reg_coeff = weight_decay return reg_method, reg_coeff @@ -209,19 +219,23 @@ class Momentum(Optimizer): var_name = param.name + "_fp32_master" var_name = unique_name.generate(var_name) - var = layers.create_global_var(name=var_name, - shape=param.shape, - value=0, - dtype='float32', - persistable=True) + var = layers.create_global_var( + name=var_name, + shape=param.shape, + value=0, + dtype='float32', + persistable=True, + ) block = self.helper.startup_program.global_block() - block.append_op(type="cast", - inputs={"X": [param]}, - outputs={"Out": [var]}, - attrs={ - "in_dtype": param.dtype, - "out_dtype": core.VarDesc.VarType.FP32 - }) + block.append_op( + type="cast", + inputs={"X": [param]}, + outputs={"Out": [var]}, + attrs={ + "in_dtype": param.dtype, + "out_dtype": core.VarDesc.VarType.FP32, + }, + ) self._master_weights[param.name] = var return var @@ -237,15 +251,22 @@ class Momentum(Optimizer): """ if self._name is not None: name = self._name + "_" + name - find_master = self._multi_precision and param.dtype == core.VarDesc.VarType.FP16 - target_param = self._master_weights[ - param.name] if find_master else param + find_master = ( + self._multi_precision and param.dtype == core.VarDesc.VarType.FP16 + ) + target_param = ( + self._master_weights[param.name] if find_master else param + ) target_name = target_param.name - if (name not in self._accumulators - or target_name not in self._accumulators[name]): + if ( + name not in self._accumulators + or target_name not in self._accumulators[name] + ): raise Exception( "Accumulator {} does not exist for parameter {}".format( - name, target_name)) + name, target_name + ) + ) return self._accumulators[name][target_name] def _create_accumulators(self, block, parameters): @@ -263,7 +284,10 @@ class Momentum(Optimizer): master_p = self._create_master_weight(p) self._add_accumulator(self._velocity_acc_str, master_p) continue - if p.dtype == core.VarDesc.VarType.FP16 and not self._multi_precision: + if ( + p.dtype == core.VarDesc.VarType.FP16 + and not self._multi_precision + ): warnings.warn( "Accumulating with FP16 in optimizer can lead to poor accuracy or slow convergence." "Consider using multi_precision=True option of the Momentum optimizer." @@ -271,25 +295,28 @@ class Momentum(Optimizer): self._add_accumulator(self._velocity_acc_str, p) def _create_regularization_of_grad(self, param, grad, regularization=None): - """ Create and add backward regularization Operators + """Create and add backward regularization Operators Function helper of append_regularization_ops. """ # If ParamAttr is set to L2Decay, we skip doing regularization here. And then we fused # L2Decay with momentum which can refer to _append_optimize_op below. - if hasattr(param, 'regularizer') and isinstance(param.regularizer, - L2DecayRegularizer): + if hasattr(param, 'regularizer') and isinstance( + param.regularizer, L2DecayRegularizer + ): return grad return super(Momentum, self)._create_regularization_of_grad( - param, grad, regularization) + param, grad, regularization + ) def _append_optimize_op(self, block, param_and_grad): assert isinstance(block, framework.Block) if isinstance(param_and_grad, dict): param_and_grad = self._update_param_group(param_and_grad) - velocity_acc = self._get_accumulator(self._velocity_acc_str, - param_and_grad[0]) + velocity_acc = self._get_accumulator( + self._velocity_acc_str, param_and_grad[0] + ) lr = self._create_param_lr(param_and_grad) # For fusion of momentum and l2decay @@ -306,30 +333,56 @@ class Momentum(Optimizer): regularization_method = "" regularization_coeff = 0.0 - find_master = self._multi_precision and param_and_grad[ - 0].dtype == core.VarDesc.VarType.FP16 - master_weight = (self._master_weights[param_and_grad[0].name] - if find_master else None) + find_master = ( + self._multi_precision + and param_and_grad[0].dtype == core.VarDesc.VarType.FP16 + ) + master_weight = ( + self._master_weights[param_and_grad[0].name] + if find_master + else None + ) if _in_legacy_dygraph(): if isinstance(param_and_grad, dict): self._update_regularization(param_and_grad['weight_decay']) _, _, _ = _legacy_C_ops.momentum( - param_and_grad[0], param_and_grad[1], velocity_acc, lr, - master_weight, param_and_grad[0], velocity_acc, master_weight, - 'mu', self._momentum, 'use_nesterov', self._use_nesterov, - 'regularization_method', regularization_method, - 'regularization_coeff', regularization_coeff, 'multi_precision', - find_master) + param_and_grad[0], + param_and_grad[1], + velocity_acc, + lr, + master_weight, + param_and_grad[0], + velocity_acc, + master_weight, + 'mu', + self._momentum, + 'use_nesterov', + self._use_nesterov, + 'regularization_method', + regularization_method, + 'regularization_coeff', + regularization_coeff, + 'multi_precision', + find_master, + ) return None if in_dygraph_mode(): if isinstance(param_and_grad, dict): self._update_regularization(param_and_grad['weight_decay']) - return _C_ops.momentum_(param_and_grad[0], param_and_grad[1], - velocity_acc, lr, master_weight, - self._momentum, self._use_nesterov, - regularization_method, regularization_coeff, - find_master, self._rescale_grad) + return _C_ops.momentum_( + param_and_grad[0], + param_and_grad[1], + velocity_acc, + lr, + master_weight, + self._momentum, + self._use_nesterov, + regularization_method, + regularization_coeff, + find_master, + self._rescale_grad, + ) attrs = { "mu": self._momentum, @@ -337,19 +390,19 @@ class Momentum(Optimizer): "regularization_method": regularization_method, "regularization_coeff": regularization_coeff, "multi_precision": find_master, - "rescale_grad": self._rescale_grad + "rescale_grad": self._rescale_grad, } inputs = { "Param": [param_and_grad[0]], "Grad": [param_and_grad[1]], "Velocity": [velocity_acc], - "LearningRate": [lr] + "LearningRate": [lr], } outputs = { "ParamOut": [param_and_grad[0]], - "VelocityOut": [velocity_acc] + "VelocityOut": [velocity_acc], } if find_master: @@ -357,11 +410,13 @@ class Momentum(Optimizer): outputs["MasterParamOut"] = master_weight # create the momentum optimize op - momentum_op = block.append_op(type=self.type, - inputs=inputs, - outputs=outputs, - attrs=attrs, - stop_gradient=True) + momentum_op = block.append_op( + type=self.type, + inputs=inputs, + outputs=outputs, + attrs=attrs, + stop_gradient=True, + ) return momentum_op @@ -383,7 +438,9 @@ class Momentum(Optimizer): # we skip param's l2decay before, so fuse it with momentum here. if isinstance(param.regularizer, L2DecayRegularizer): regularization_method = "l2_decay" - regularization_coeff = param.regularizer._regularization_coeff + regularization_coeff = ( + param.regularizer._regularization_coeff + ) elif param.regularizer is not None: regularization_method = "" regularization_coeff = 0.0 @@ -392,28 +449,34 @@ class Momentum(Optimizer): self._velocity_dict['FP32_LODTensor'].append(velocity_acc) # fp32 no master weight self._regularization_method_dict['FP32_LODTensor'].append( - regularization_method) + regularization_method + ) self._regularization_coeff_dict['FP32_LODTensor'].append( - regularization_coeff) + regularization_coeff + ) elif param.dtype == paddle.float16: self._param_dict['FP16_LODTensor'].append(param) self._velocity_dict['FP16_LODTensor'].append(velocity_acc) if self._multi_precision: self._master_weight_dict['FP16_LODTensor'].append( - self._master_weights[param.name]) + self._master_weights[param.name] + ) else: self._master_weight_dict['FP16_LODTensor'] = None self._regularization_method_dict['FP16_LODTensor'].append( - regularization_method) + regularization_method + ) self._regularization_coeff_dict['FP16_LODTensor'].append( - regularization_coeff) + regularization_coeff + ) else: raise ValueError( "Now multi_tensor_momentum only support fp32 and fp16 parameters and grad is LOD_TENSOR." ) - def _append_optimize_multi_tensor_op(self, target_block, - parameters_and_grads): + def _append_optimize_multi_tensor_op( + self, target_block, parameters_and_grads + ): """ For Multi Tensor, append optimize merged_operator to block. """ @@ -427,15 +490,19 @@ class Momentum(Optimizer): if param_and_grad[1] is None: continue if param_and_grad[0].stop_gradient is False: - if param_and_grad[ - 0].dtype == paddle.float32 and param_and_grad[ - 1].type == core.VarDesc.VarType.LOD_TENSOR: + if ( + param_and_grad[0].dtype == paddle.float32 + and param_and_grad[1].type + == core.VarDesc.VarType.LOD_TENSOR + ): grad_dict['FP32_LODTensor'].append(param_and_grad[1]) lr = self._create_param_lr(param_and_grad) lr_dict['FP32_LODTensor'].append(lr) - elif param_and_grad[ - 0].dtype == paddle.float16 and param_and_grad[ - 1].type == core.VarDesc.VarType.LOD_TENSOR: + elif ( + param_and_grad[0].dtype == paddle.float16 + and param_and_grad[1].type + == core.VarDesc.VarType.LOD_TENSOR + ): grad_dict['FP16_LODTensor'].append(param_and_grad[1]) lr = self._create_param_lr(param_and_grad) lr_dict['FP16_LODTensor'].append(lr) @@ -446,21 +513,27 @@ class Momentum(Optimizer): if param_and_grad[0].stop_gradient is False: param_grad_dict = dict() param_grad_dict['params'] = param_and_grad - param_grad_dict.update({ - k: v - for k, v in parameters_and_grads.items() - if k != 'params' - }) + param_grad_dict.update( + { + k: v + for k, v in parameters_and_grads.items() + if k != 'params' + } + ) param_and_grad = self._update_param_group(param_grad_dict) - if param_and_grad[ - 0].dtype == paddle.float32 and param_and_grad[ - 1].type == core.VarDesc.VarType.LOD_TENSOR: + if ( + param_and_grad[0].dtype == paddle.float32 + and param_and_grad[1].type + == core.VarDesc.VarType.LOD_TENSOR + ): grad_dict['FP32_LODTensor'].append(param_and_grad[1]) lr = self._create_param_lr(param_and_grad) lr_dict['FP32_LODTensor'].append(lr) - elif param_and_grad[ - 0].dtype == paddle.float16 and param_and_grad[ - 1].type == core.VarDesc.VarType.LOD_TENSOR: + elif ( + param_and_grad[0].dtype == paddle.float16 + and param_and_grad[1].type + == core.VarDesc.VarType.LOD_TENSOR + ): grad_dict['FP16_LODTensor'].append(param_and_grad[1]) lr = self._create_param_lr(param_and_grad) lr_dict['FP16_LODTensor'].append(lr) @@ -473,26 +546,39 @@ class Momentum(Optimizer): if framework._non_static_mode(): if in_dygraph_mode(): _, _, _ = _C_ops.merged_momentum_( - self._param_dict[key], grad_dict[key], - self._velocity_dict[key], lr_dict[key], - self._master_weight_dict[key], self._momentum, + self._param_dict[key], + grad_dict[key], + self._velocity_dict[key], + lr_dict[key], + self._master_weight_dict[key], + self._momentum, self._use_nesterov, self._regularization_method_dict[key], - self._regularization_coeff_dict[key], find_master, - self._rescale_grad) + self._regularization_coeff_dict[key], + find_master, + self._rescale_grad, + ) else: _, _, _ = _legacy_C_ops.merged_momentum( - self._param_dict[key], grad_dict[key], - self._velocity_dict[key], lr_dict[key], + self._param_dict[key], + grad_dict[key], + self._velocity_dict[key], + lr_dict[key], self._master_weight_dict[key], - self._param_dict[key], self._velocity_dict[key], - self._master_weight_dict[key], 'mu', self._momentum, - 'use_nesterov', self._use_nesterov, + self._param_dict[key], + self._velocity_dict[key], + self._master_weight_dict[key], + 'mu', + self._momentum, + 'use_nesterov', + self._use_nesterov, 'regularization_method', self._regularization_method_dict[key], 'regularization_coeff', self._regularization_coeff_dict[key], - 'multi_precision', find_master) + 'multi_precision', + find_master, + ) else: inputs = { "Param": self._param_dict[key], @@ -505,38 +591,45 @@ class Momentum(Optimizer): "VelocityOut": self._velocity_dict[key], } attrs = { - "mu": - self._momentum, - "use_nesterov": - self._use_nesterov, - "regularization_method": - self._regularization_method_dict[key], - "regularization_coeff": - self._regularization_coeff_dict[key], + "mu": self._momentum, + "use_nesterov": self._use_nesterov, + "regularization_method": self._regularization_method_dict[ + key + ], + "regularization_coeff": self._regularization_coeff_dict[ + key + ], } if find_master: inputs["MasterParam"] = self._master_weight_dict[key] outputs["MasterParamOut"] = self._master_weight_dict[ - key] + key + ] attrs["multi_precision"] = find_master - target_block.append_op(type="merged_momentum", - inputs=inputs, - outputs=outputs, - attrs=attrs, - stop_gradient=True) + target_block.append_op( + type="merged_momentum", + inputs=inputs, + outputs=outputs, + attrs=attrs, + stop_gradient=True, + ) return None def _update_param_group(self, parameters): - self._momentum = parameters.get('momentum', - self._default_dict['momentum']) - self._use_nesterov = parameters.get('use_nesterov', - self._default_dict['use_nesterov']) - self._rescale_grad = parameters.get('rescale_grad', - self._default_dict['rescale_grad']) + self._momentum = parameters.get( + 'momentum', self._default_dict['momentum'] + ) + self._use_nesterov = parameters.get( + 'use_nesterov', self._default_dict['use_nesterov'] + ) + self._rescale_grad = parameters.get( + 'rescale_grad', self._default_dict['rescale_grad'] + ) self._regularization_method = parameters.get( - 'regularization_method', - self._default_dict['regularization_method']) + 'regularization_method', self._default_dict['regularization_method'] + ) self._regularization_coeff = parameters.get( - 'regularization_coeff', self._default_dict['regularization_coeff']) + 'regularization_coeff', self._default_dict['regularization_coeff'] + ) parameters = parameters.get('params') return parameters diff --git a/python/paddle/optimizer/optimizer.py b/python/paddle/optimizer/optimizer.py index b90bd5ae3ff1e97f8e0709adc61926963d59e823..54ac0db5536cda4e466d4a8f3ae7b3c54ccc5b13 100644 --- a/python/paddle/optimizer/optimizer.py +++ b/python/paddle/optimizer/optimizer.py @@ -17,13 +17,22 @@ import logging from collections import defaultdict import paddle -from paddle.fluid.framework import Variable, default_main_program, device_guard, name_scope +from paddle.fluid.framework import ( + Variable, + default_main_program, + device_guard, + name_scope, +) from ..fluid import framework from ..fluid import layers from ..fluid import unique_name from ..fluid.backward import _get_no_grad_set_name, append_backward -from ..fluid.clip import GradientClipBase, append_gradient_clip_ops, error_clip_callback +from ..fluid.clip import ( + GradientClipBase, + append_gradient_clip_ops, + error_clip_callback, +) from ..fluid.framework import program_guard, Parameter from ..fluid.initializer import Constant from ..fluid.layer_helper import LayerHelper @@ -31,24 +40,36 @@ from ..fluid.dygraph import base as imperative_base from paddle.fluid import core from .lr import LRScheduler from paddle import _C_ops, _legacy_C_ops -from paddle.fluid.framework import _in_legacy_dygraph, _in_eager_without_dygraph_check, _current_expected_place, in_dygraph_mode +from paddle.fluid.framework import ( + _in_legacy_dygraph, + _in_eager_without_dygraph_check, + _current_expected_place, + in_dygraph_mode, +) __all__ = [] @framework.static_only -def append_backward_new(loss_list, - parameter_list=None, - no_grad_set=None, - callbacks=None, - checkpoints=None, - distop_context=None): +def append_backward_new( + loss_list, + parameter_list=None, + no_grad_set=None, + callbacks=None, + checkpoints=None, + distop_context=None, +): from paddle.incubate.autograd.primx import orig2prim, Transform + program = default_main_program() - assert program.num_blocks == 1, "The append_backward_new interface is designed to process only one block." + assert ( + program.num_blocks == 1 + ), "The append_backward_new interface is designed to process only one block." block = program.current_block() for el in loss_list: - assert el.block == block, 'variable in loss_list should be in current block of main program' + assert ( + el.block == block + ), 'variable in loss_list should be in current block of main program' orig2prim(block) ad = Transform(block) @@ -152,12 +173,14 @@ class Optimizer(object): """ @imperative_base.no_grad - def __init__(self, - learning_rate, - parameters=None, - weight_decay=None, - grad_clip=None, - name=None): + def __init__( + self, + learning_rate, + parameters=None, + weight_decay=None, + grad_clip=None, + name=None, + ): if parameters is not None: # paddle.Tensor is also iterable, so here we don't check whether @@ -166,13 +189,16 @@ class Optimizer(object): if isinstance(parameters, (paddle.Tensor, core.eager.Tensor)): raise TypeError( "`parameters` argument given to the optimizer should be " - "an iterable of paddle Tensors, but got argument type is `{}`." - .format(type(parameters))) + "an iterable of paddle Tensors, but got argument type is `{}`.".format( + type(parameters) + ) + ) if isinstance(parameters, dict): raise TypeError( "`parameters` argument should not get dict type, " "if parameter groups is needed, please set `parameters`" - " as list of dict") + " as list of dict" + ) self._parameter_list = list(parameters) else: self._parameter_list = None @@ -186,18 +212,22 @@ class Optimizer(object): if weight_decay is not None: if not isinstance(self._parameter_list[0], dict): for param in self._parameter_list: - if hasattr(param, 'regularizer' - ) and param.regularizer is not None: + if ( + hasattr(param, 'regularizer') + and param.regularizer is not None + ): logging.info( "If regularizer of a Parameter has been set by 'paddle.ParamAttr' or 'static.WeightNormParamAttr' already. " "The weight_decay[%s] in Optimizer will not take effect, and it will only be applied to other Parameters!" - % weight_decay.__str__()) + % weight_decay.__str__() + ) break if not isinstance(learning_rate, (float, LRScheduler)): raise TypeError( - "learning rate should be float or LRScheduler, got %s here" % - type(learning_rate)) + "learning rate should be float or LRScheduler, got %s here" + % type(learning_rate) + ) if grad_clip is not None: if not isinstance(grad_clip, GradientClipBase): raise TypeError( @@ -205,6 +235,7 @@ class Optimizer(object): ) if isinstance(weight_decay, float): from ..fluid.regularizer import L2Decay + self.regularization = L2Decay(weight_decay) else: self.regularization = weight_decay @@ -216,8 +247,9 @@ class Optimizer(object): if self._parameter_list: if isinstance(self._parameter_list[0], dict): for param_group in self._parameter_list: - assert 'params' in param_group, \ - 'params should be set in parameters if parameter groups are optimized in different options' + assert ( + 'params' in param_group + ), 'params should be set in parameters if parameter groups are optimized in different options' self._dtype = self._parameter_list[0]['params'][0].dtype else: self._dtype = self._parameter_list[0].dtype @@ -237,7 +269,7 @@ class Optimizer(object): self.clear_gradients = self.clear_grad self._default_dict = { 'weight_decay': self.regularization, - 'grad_clip': self._grad_clip + 'grad_clip': self._grad_clip, } self._param_groups = [] @@ -342,8 +374,9 @@ class Optimizer(object): self._accumulators_holder = state_dict for k, v in self._accumulators.items(): for para_name, var_tmp in v.items(): - assert var_tmp.name in state_dict, \ - "optimizer Tensor {} not found".format( var_tmp.name ) + assert ( + var_tmp.name in state_dict + ), "optimizer Tensor {} not found".format(var_tmp.name) var = var_tmp.value() tensor = var.get_tensor() model_np = np.array(tensor) @@ -357,16 +390,23 @@ class Optimizer(object): elif isinstance(load_para, np.ndarray): load_para_np = load_para else: - raise RuntimeError("State dict type {} not supprt".format( - str(type(load_para)))) - - assert model_np.shape == load_para_np.shape, \ - "Parameter shape not match, Dygraph Parameter [ {} ] need tensor with shape {} but load tensor with shape {}".format( - model_np.name, model_np.shape, load_para_np.shape) + raise RuntimeError( + "State dict type {} not supprt".format( + str(type(load_para)) + ) + ) + + assert ( + model_np.shape == load_para_np.shape + ), "Parameter shape not match, Dygraph Parameter [ {} ] need tensor with shape {} but load tensor with shape {}".format( + model_np.name, model_np.shape, load_para_np.shape + ) - assert model_np.dtype == load_para_np.dtype, \ - "Parameter dtype not match, Dygraph Parameter [ {} ] need tensor with dtype {} but load tensor with dtype {}".format( - model_np.name, model_np.dtype, load_para_np.dtype) + assert ( + model_np.dtype == load_para_np.dtype + ), "Parameter dtype not match, Dygraph Parameter [ {} ] need tensor with dtype {} but load tensor with dtype {}".format( + model_np.name, model_np.dtype, load_para_np.dtype + ) tensor.set(load_para_np, framework._current_expected_place()) @@ -375,45 +415,57 @@ class Optimizer(object): def _create_global_learning_rate(self): # lr var can't be float16, for pure fp16 training, should extra handle the dtype for lr - _lr_dtype = paddle.get_default_dtype( - ) if self._dtype is None else self._dtype - _lr_dtype = paddle.float32 if ( - paddle.get_default_dtype() != "float16" - and _lr_dtype == paddle.float16) else _lr_dtype + _lr_dtype = ( + paddle.get_default_dtype() if self._dtype is None else self._dtype + ) + _lr_dtype = ( + paddle.float32 + if ( + paddle.get_default_dtype() != "float16" + and _lr_dtype == paddle.float16 + ) + else _lr_dtype + ) if isinstance(self._learning_rate, LRScheduler): lr_var = self._global_learning_rate() # only create global lr_var once if not isinstance(lr_var, framework.Variable): lr_name = unique_name.generate('learning_rate') self._learning_rate._var_name = lr_name - lr_var = self.helper.create_global_variable(name=lr_name, - shape=[1], - persistable=True, - stop_gradient=True, - dtype=_lr_dtype) + lr_var = self.helper.create_global_variable( + name=lr_name, + shape=[1], + persistable=True, + stop_gradient=True, + dtype=_lr_dtype, + ) main_prog = framework.default_main_program() main_prog.lr_sheduler = self._learning_rate main_prog.lr_var = lr_var self._learning_rate_map[ - framework.default_main_program()] = lr_var + framework.default_main_program() + ] = lr_var lr_value = float(self._learning_rate()) self.helper.set_variable_initializer( - lr_var, initializer=Constant(value=lr_value)) + lr_var, initializer=Constant(value=lr_value) + ) elif isinstance(self._learning_rate, float): # only create global lr_var once lr = self._global_learning_rate() if isinstance(lr, framework.Variable): return else: - self._learning_rate_map[framework.default_main_program( - )] = layers.create_global_var( + self._learning_rate_map[ + framework.default_main_program() + ] = layers.create_global_var( name=unique_name.generate("learning_rate"), shape=[1], value=float(self._learning_rate), dtype=_lr_dtype, - persistable=True) + persistable=True, + ) @framework.dygraph_only def set_lr(self, value): @@ -454,7 +506,8 @@ class Optimizer(object): if not isinstance(value, (int, float)): raise TypeError( "The type of 'value' in optimizer.set_lr must be float, but received %s." - % (type(value))) + % (type(value)) + ) if isinstance(self._learning_rate, LRScheduler): raise RuntimeError( "optimizer's learning rate can't be LRScheduler when invoke this API, because this will lead to conflict." @@ -464,23 +517,36 @@ class Optimizer(object): if current_lr is not None: if in_dygraph_mode(): place = _current_expected_place() - _C_ops.full_(current_lr, list(current_lr.shape), float(value), - current_lr.dtype, place) + _C_ops.full_( + current_lr, + list(current_lr.shape), + float(value), + current_lr.dtype, + place, + ) elif _in_legacy_dygraph(): - _legacy_C_ops.fill_constant(current_lr, 'value', float(value), - 'dtype', current_lr.dtype, 'shape', - list(current_lr.shape)) + _legacy_C_ops.fill_constant( + current_lr, + 'value', + float(value), + 'dtype', + current_lr.dtype, + 'shape', + list(current_lr.shape), + ) else: global_block = framework.default_main_program().global_block() - global_block.append_op(type='fill_constant', - outputs={'Out': [current_lr]}, - attrs={ - 'dtype': current_lr.dtype, - 'shape': list(current_lr.shape), - 'value': float(value) - }, - stop_gradient=True) + global_block.append_op( + type='fill_constant', + outputs={'Out': [current_lr]}, + attrs={ + 'dtype': current_lr.dtype, + 'shape': list(current_lr.shape), + 'value': float(value), + }, + stop_gradient=True, + ) def get_lr(self): """ @@ -554,8 +620,7 @@ class Optimizer(object): return self._learning_rate_map.get(program, None) def _append_optimize_op(self, block, param_and_grad): - """ append optimize operator to block and return all the added optimize_op - """ + """append optimize operator to block and return all the added optimize_op""" raise NotImplementedError( "Class \"Optimizer\" connot be used directly as an optimizer, please use its subclasses such as \"Adam\"" ) @@ -572,8 +637,8 @@ class Optimizer(object): return self._global_learning_rate() else: with default_main_program()._lr_schedule_guard( - is_with_opt=True), framework.name_scope( - 'scale_with_param_lr'): + is_with_opt=True + ), framework.name_scope('scale_with_param_lr'): return self._global_learning_rate() * param_lr else: return self._global_learning_rate() @@ -600,14 +665,16 @@ class Optimizer(object): """ pass - def _add_accumulator(self, - name, - param, - dtype=None, - fill_value=0.0, - shape=None, - type=None, - device=None): + def _add_accumulator( + self, + name, + param, + dtype=None, + fill_value=0.0, + shape=None, + type=None, + device=None, + ): """Utility function to add an accumulator for a parameter Args: @@ -619,13 +686,17 @@ class Optimizer(object): """ if self._name is not None: name = self._name + "_" + name - if (name in self._accumulators - and param.name in self._accumulators[name]): + if ( + name in self._accumulators + and param.name in self._accumulators[name] + ): if framework._non_static_mode(): return self._accumulators[name][param.name] raise Exception( "Accumulator {} already exists for parameter {}".format( - name, param.name)) + name, param.name + ) + ) if shape == None: shape = param.shape assert isinstance(self.helper, LayerHelper) @@ -639,20 +710,25 @@ class Optimizer(object): persistable=True, dtype=dtype or param.dtype, type=core.VarDesc.VarType.LOD_TENSOR - if framework._in_eager_without_dygraph_check() else - (param.type if type is None else type), + if framework._in_eager_without_dygraph_check() + else (param.type if type is None else type), shape=shape, - belong_to_optimizer=True) + belong_to_optimizer=True, + ) if device is None: device = self._get_device_for_param(param.name) with device_guard(device): self.helper.set_variable_initializer( - var, initializer=Constant(value=float(fill_value))) + var, initializer=Constant(value=float(fill_value)) + ) if framework._non_static_mode(): if len(self._accumulators_holder) > 0: - assert var_name in self._accumulators_holder, \ - "Optimizer set error, {} should in state dict".format( var_name ) + assert ( + var_name in self._accumulators_holder + ), "Optimizer set error, {} should in state dict".format( + var_name + ) var.set_value(self._accumulators_holder[var_name]) self._accumulators[name][param.name] = var @@ -670,11 +746,15 @@ class Optimizer(object): """ if self._name is not None: name = self._name + "_" + name - if (name not in self._accumulators - or param.name not in self._accumulators[name]): + if ( + name not in self._accumulators + or param.name not in self._accumulators[name] + ): raise Exception( "Accumulator {} does not exist for parameter {}".format( - name, param.name)) + name, param.name + ) + ) return self._accumulators[name][param.name] def _update_param_device_map(self, parameters_and_grads, target_block): @@ -682,13 +762,15 @@ class Optimizer(object): if param_and_grad[0].stop_gradient is False: param_name = param_and_grad[0].name ops = target_block.ops - device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName( + device_attr_name = ( + core.op_proto_and_checker_maker.kOpDeviceAttrName() ) for op in ops: input_arg_names = op.input_arg_names if param_name in input_arg_names: self._param_device_map[param_name] = op.attr( - device_attr_name) + device_attr_name + ) break def _get_device_for_param(self, param_name): @@ -725,10 +807,12 @@ class Optimizer(object): target_block = global_block current_block = framework.default_main_program().current_block() if current_block.idx != global_block.idx: - assert current_block.backward_block_idx != -1, \ - "current block is not global_block, but it doesn't have backward block." + assert ( + current_block.backward_block_idx != -1 + ), "current block is not global_block, but it doesn't have backward block." target_block = framework.default_main_program().blocks[ - current_block.backward_block_idx] + current_block.backward_block_idx + ] start = len(target_block.ops) self.helper = LayerHelper(self.__class__.__name__) @@ -737,57 +821,83 @@ class Optimizer(object): # NOTE: Multi Tensor support [ Momentum, Adam ] for dygraph mode if self._use_multi_tensor and self.__class__.__name__ in [ - 'Momentum', 'Adam' + 'Momentum', + 'Adam', ]: - if len(self._param_dict['FP32_LODTensor']) == 0 and len( - self._param_dict['FP16_LODTensor']) == 0: + if ( + len(self._param_dict['FP32_LODTensor']) == 0 + and len(self._param_dict['FP16_LODTensor']) == 0 + ): if isinstance(parameters_and_grads, list): - self._multi_tensor_init(target_block, [ - p[0] - for p in parameters_and_grads if not p[0].stop_gradient - ]) + self._multi_tensor_init( + target_block, + [ + p[0] + for p in parameters_and_grads + if not p[0].stop_gradient + ], + ) else: self._update_param_group(parameters_and_grads) - self._multi_tensor_init(target_block, [ - p[0] for p in parameters_and_grads['params'] - if not p[0].stop_gradient - ]) + self._multi_tensor_init( + target_block, + [ + p[0] + for p in parameters_and_grads['params'] + if not p[0].stop_gradient + ], + ) if framework._non_static_mode(): - self._append_optimize_multi_tensor_op(target_block, - parameters_and_grads) + self._append_optimize_multi_tensor_op( + target_block, parameters_and_grads + ) else: - self._update_param_device_map(parameters_and_grads, - target_block) + self._update_param_device_map( + parameters_and_grads, target_block + ) # NOTE: Multi Tensor requires all parameters to be in the same device and program. # param_grad_list = [p_0,g_0,p_1,g_1,....] param_grad_list = [] for param_and_grad in parameters_and_grads: - if not param_and_grad[0].stop_gradient and param_and_grad[ - 1] is not None: + if ( + not param_and_grad[0].stop_gradient + and param_and_grad[1] is not None + ): param_grad_list.append(param_and_grad[0]) param_grad_list.append(param_and_grad[1]) with param_grad_list[0].block.program._optimized_guard( - param_grad_list), name_scope("optimizer"): + param_grad_list + ), name_scope("optimizer"): device = self._get_device_for_param(param_grad_list[0].name) with device_guard(device): self._append_optimize_multi_tensor_op( - target_block, parameters_and_grads) + target_block, parameters_and_grads + ) else: if not framework._non_static_mode(): - params_grads_device_map = parameters_and_grads[ - 'params'] if isinstance(parameters_and_grads, - dict) else parameters_and_grads - self._update_param_device_map(params_grads_device_map, - target_block) + params_grads_device_map = ( + parameters_and_grads['params'] + if isinstance(parameters_and_grads, dict) + else parameters_and_grads + ) + self._update_param_device_map( + params_grads_device_map, target_block + ) if isinstance(parameters_and_grads, list): - self._create_accumulators(target_block, [ - p[0] for p in parameters_and_grads if not p[0].stop_gradient - ]) + self._create_accumulators( + target_block, + [ + p[0] + for p in parameters_and_grads + if not p[0].stop_gradient + ], + ) else: params_acc_dict = parameters_and_grads.copy() params_acc_dict['params'] = [ - p[0] for p in params_acc_dict['params'] + p[0] + for p in params_acc_dict['params'] if not p[0].stop_gradient ] self._create_accumulators(target_block, params_acc_dict) @@ -798,8 +908,9 @@ class Optimizer(object): if param_and_grad[1] is None: continue if param_and_grad[0].stop_gradient is False: - self._append_optimize_op(target_block, - param_and_grad) + self._append_optimize_op( + target_block, param_and_grad + ) else: for param_and_grad in parameters_and_grads['params']: if param_and_grad[1] is None: @@ -807,25 +918,31 @@ class Optimizer(object): if param_and_grad[0].stop_gradient is False: param_grad_dict = dict() param_grad_dict['params'] = param_and_grad - param_grad_dict.update({ - k: v - for k, v in parameters_and_grads.items() - if k != 'params' - }) - self._append_optimize_op(target_block, - param_grad_dict) + param_grad_dict.update( + { + k: v + for k, v in parameters_and_grads.items() + if k != 'params' + } + ) + self._append_optimize_op( + target_block, param_grad_dict + ) else: for param_and_grad in parameters_and_grads: if param_and_grad[1] is None: continue with param_and_grad[0].block.program._optimized_guard( - param_and_grad), name_scope("optimizer"): + param_and_grad + ), name_scope("optimizer"): if param_and_grad[0].stop_gradient is False: device = self._get_device_for_param( - param_and_grad[0].name) + param_and_grad[0].name + ) with device_guard(device): optimize_op = self._append_optimize_op( - target_block, param_and_grad) + target_block, param_and_grad + ) # Get custom finish ops for subclasses # FIXME: Need to fix this once we figure out how to handle dependencies @@ -837,12 +954,14 @@ class Optimizer(object): def _append_dgc_ops(self, param_and_grad): pass - def backward(self, - loss, - startup_program=None, - parameters=None, - no_grad_set=None, - callbacks=None): + def backward( + self, + loss, + startup_program=None, + parameters=None, + no_grad_set=None, + callbacks=None, + ): """ The first part of ``minimize``, do auto-diff to append backward operations for the current program. @@ -891,8 +1010,7 @@ class Optimizer(object): self._dtype = loss.dtype if framework._non_static_mode(): - parameter_list = parameters if parameters \ - else self._parameter_list + parameter_list = parameters if parameters else self._parameter_list params_grads = [] for param in parameter_list: @@ -906,23 +1024,26 @@ class Optimizer(object): if callbacks is None: callbacks = [error_clip_callback] else: - assert (isinstance(callbacks, list)) + assert isinstance(callbacks, list) program = loss.block.program - assert len(loss.shape) == 1 and loss.shape[0] == 1, \ - "The loss.shape should be (1L,), but the current loss.shape is {}. " \ + assert len(loss.shape) == 1 and loss.shape[0] == 1, ( + "The loss.shape should be (1L,), but the current loss.shape is {}. " "Maybe that you should call paddle.mean to process the current loss.".format( - loss.shape) - parameter_list = parameters if parameters \ - else self._parameter_list + loss.shape + ) + ) + parameter_list = parameters if parameters else self._parameter_list with program_guard(program, startup_program): from paddle.incubate.autograd.utils import prim_enabled + if prim_enabled(): - params_grads = append_backward_new([loss], parameter_list, - act_no_grad_set, - callbacks) + params_grads = append_backward_new( + [loss], parameter_list, act_no_grad_set, callbacks + ) else: - params_grads = append_backward(loss, parameter_list, - act_no_grad_set, callbacks) + params_grads = append_backward( + loss, parameter_list, act_no_grad_set, callbacks + ) # Note: since we can't use all_reduce_op now, # dgc_op should be the last op of one grad. self._append_dgc_ops(params_grads) @@ -967,8 +1088,9 @@ class Optimizer(object): params_grads = append_gradient_clip_ops(params_grads) # Add regularization if any - params_grads = self.append_regularization_ops(params_grads, - self.regularization) + params_grads = self.append_regularization_ops( + params_grads, self.regularization + ) optimize_ops = self._create_optimization_pass(params_grads) return optimize_ops @@ -986,21 +1108,26 @@ class Optimizer(object): list: A list of operators appended to the current program. """ if framework._non_static_mode(): - with program_guard(framework.default_main_program(), - framework.default_startup_program()): + with program_guard( + framework.default_main_program(), + framework.default_startup_program(), + ): if isinstance(params_grads, list): if self._grad_clip is not None: params_grads = self._grad_clip(params_grads) params_grads = self.append_regularization_ops( - params_grads, self.regularization) + params_grads, self.regularization + ) else: grad_clip = params_grads['grad_clip'] if grad_clip is not None: params_grads['params'] = grad_clip( - params_grads['params']) + params_grads['params'] + ) params_grads['params'] = self.append_regularization_ops( - params_grads['params'], self.regularization) + params_grads['params'], self.regularization + ) optimize_ops = self._create_optimization_pass(params_grads) else: program = loss.block.program @@ -1009,15 +1136,18 @@ class Optimizer(object): return optimize_ops def _create_regularization_of_grad(self, param, grad, regularization=None): - """ Create and add backward regularization Operators + """Create and add backward regularization Operators Function helper of append_regularization_ops. """ # If no gradient or no regularization is specified, then we don't need to do anything if grad is None or ( - (not hasattr(param, 'regularizer') or - (hasattr(param, 'regularizer') and param.regularizer is None)) - and regularization is None): + ( + not hasattr(param, 'regularizer') + or (hasattr(param, 'regularizer') and param.regularizer is None) + ) + and regularization is None + ): return grad regularization_term = None if hasattr(param, 'regularizer') and param.regularizer is not None: @@ -1044,7 +1174,8 @@ class Optimizer(object): dtype=param.dtype, shape=param.shape, lod_level=param.lod_level, - type=core.VarDesc.VarType.LOD_TENSOR) + type=core.VarDesc.VarType.LOD_TENSOR, + ) inputs = {"X": [grad, regularization_term]} outputs = {"Out": [new_grad]} @@ -1052,9 +1183,9 @@ class Optimizer(object): return new_grad - def append_regularization_ops(self, - parameters_and_grads, - regularization=None): + def append_regularization_ops( + self, parameters_and_grads, regularization=None + ): r"""Create and add backward regularization Operators Creates and adds backward regularization operators in the BlockDesc. @@ -1079,21 +1210,28 @@ class Optimizer(object): if framework._non_static_mode(): for param, grad in parameters_and_grads: new_grad = self._create_regularization_of_grad( - param, grad, regularization) + param, grad, regularization + ) params_and_grads.append((param, new_grad)) else: repeate_regularizer = False with framework.name_scope('regularization'): for param, grad in parameters_and_grads: - if not repeate_regularizer and param.regularizer is not None and regularization is not None: + if ( + not repeate_regularizer + and param.regularizer is not None + and regularization is not None + ): repeate_regularizer = True logging.info( "If regularizer of a Parameter has been set by 'fluid.ParamAttr' or 'fluid.WeightNormParamAttr' already. " "The Regularization[%s] in Optimizer will not take effect, and it will only be applied to other Parameters!" - % regularization.__str__()) + % regularization.__str__() + ) with param.block.program._optimized_guard([param, grad]): new_grad = self._create_regularization_of_grad( - param, grad, regularization) + param, grad, regularization + ) params_and_grads.append((param, new_grad)) return params_and_grads @@ -1101,7 +1239,8 @@ class Optimizer(object): no_grad_set = _get_no_grad_set_name(no_grad_set) parameters = loss.block.program.global_block().all_parameters() param_no_trainable = set( - [param.name for param in parameters if param.stop_gradient is True]) + [param.name for param in parameters if param.stop_gradient is True] + ) # If the parameter is no trainable, it should not have a gradient. no_grad_set.update(param_no_trainable) @@ -1142,7 +1281,8 @@ class Optimizer(object): """ param_list = [] if self._parameter_list is None or not isinstance( - self._parameter_list[0], dict): + self._parameter_list[0], dict + ): for p in self._parameter_list: if not p.stop_gradient: param_list.append(p) @@ -1159,11 +1299,9 @@ class Optimizer(object): core.clear_gradients(param_list, set_to_zero) @imperative_base.no_grad - def minimize(self, - loss, - startup_program=None, - parameters=None, - no_grad_set=None): + def minimize( + self, loss, startup_program=None, parameters=None, no_grad_set=None + ): """ Add operations to minimize ``loss`` by updating ``parameters``. @@ -1208,17 +1346,18 @@ class Optimizer(object): """ assert isinstance(loss, Variable), "The loss should be an Tensor." - parameter_list = parameters if parameters \ - else self._parameter_list + parameter_list = parameters if parameters else self._parameter_list - params_grads = self.backward(loss, - startup_program=startup_program, - parameters=parameter_list, - no_grad_set=no_grad_set) + params_grads = self.backward( + loss, + startup_program=startup_program, + parameters=parameter_list, + no_grad_set=no_grad_set, + ) - optimize_ops = self._apply_optimize(loss, - startup_program=startup_program, - params_grads=params_grads) + optimize_ops = self._apply_optimize( + loss, startup_program=startup_program, params_grads=params_grads + ) return optimize_ops, params_grads @@ -1258,9 +1397,9 @@ class Optimizer(object): grad_var = param._grad_ivar() params_grads.append((param, grad_var)) - self._apply_optimize(loss=None, - startup_program=None, - params_grads=params_grads) + self._apply_optimize( + loss=None, startup_program=None, params_grads=params_grads + ) else: # optimize parameters in groups @@ -1273,11 +1412,11 @@ class Optimizer(object): grad_var = param._grad_ivar() params_grads['params'].append((param, grad_var)) params_grads.update( - {k: v - for k, v in param_group.items() if k != 'params'}) - self._apply_optimize(loss=None, - startup_program=None, - params_grads=params_grads) + {k: v for k, v in param_group.items() if k != 'params'} + ) + self._apply_optimize( + loss=None, startup_program=None, params_grads=params_grads + ) def _add_param_group(self, param_group): """ @@ -1293,7 +1432,8 @@ class Optimizer(object): elif isinstance(params, set): raise TypeError( "optimizer parameters should be in ordered collections," - "but received set, please use list instead.") + "but received set, please use list instead." + ) else: param_group['params'] = list(params) @@ -1307,18 +1447,21 @@ class Optimizer(object): if not param_set.isdisjoint(set(param_group['params'])): raise ValueError( - "some parameters appear in more than one parameter group") + "some parameters appear in more than one parameter group" + ) for param in param_group['params']: weight_decay = param_group['weight_decay'] if isinstance(weight_decay, float): from ..fluid.regularizer import L2Decay + regularization = L2Decay(weight_decay) else: regularization = weight_decay param.regularizer = regularization param.optimize_attr['learning_rate'] = param_group.get( - 'learning_rate', 1.) + 'learning_rate', 1.0 + ) self._param_groups.append(param_group) @@ -1344,8 +1487,9 @@ class Optimizer(object): pass @framework.dygraph_only - def _append_optimize_multi_tensor_op(self, target_block, - parameters_and_grads): + def _append_optimize_multi_tensor_op( + self, target_block, parameters_and_grads + ): """ For Multi Tensor, append optimize merged_operator to block. """ diff --git a/python/paddle/optimizer/rmsprop.py b/python/paddle/optimizer/rmsprop.py index 5ae5e1468beedd6a834db93c38cd5b80af142c59..06d7c58fb39f17581a72543886d7b087e0343ca2 100644 --- a/python/paddle/optimizer/rmsprop.py +++ b/python/paddle/optimizer/rmsprop.py @@ -141,16 +141,18 @@ class RMSProp(Optimizer): _mean_square_acc_str = "mean_square" _mean_grad_acc_str = "mean_grad" - def __init__(self, - learning_rate, - rho=0.95, - epsilon=1.0e-6, - momentum=0.0, - centered=False, - parameters=None, - weight_decay=None, - grad_clip=None, - name=None): + def __init__( + self, + learning_rate, + rho=0.95, + epsilon=1.0e-6, + momentum=0.0, + centered=False, + parameters=None, + weight_decay=None, + grad_clip=None, + name=None, + ): if learning_rate is None: raise ValueError("learning_rate is not set.") if rho is None: @@ -166,11 +168,13 @@ class RMSProp(Optimizer): if not 0.0 <= rho: raise ValueError("Invalid value of rho, expect rho >= 0.") - super(RMSProp, self).__init__(learning_rate=learning_rate, - parameters=parameters, - weight_decay=weight_decay, - grad_clip=grad_clip, - name=name) + super(RMSProp, self).__init__( + learning_rate=learning_rate, + parameters=parameters, + weight_decay=weight_decay, + grad_clip=grad_clip, + name=name, + ) self.type = "rmsprop" self._rho = rho @@ -203,49 +207,50 @@ class RMSProp(Optimizer): if isinstance(param_and_grad, dict): param_and_grad = self._update_param_group(param_and_grad) - momentum_acc = self._get_accumulator(self._momentum_acc_str, - param_and_grad[0]) - mean_square_acc = self._get_accumulator(self._mean_square_acc_str, - param_and_grad[0]) - mean_grad_acc = self._get_accumulator(self._mean_grad_acc_str, - param_and_grad[0]) - rmsprop_op = block.append_op(type=self.type, - inputs={ - "Param": - param_and_grad[0], - "Grad": - param_and_grad[1], - "Moment": - momentum_acc, - "MeanSquare": - mean_square_acc, - "MeanGrad": - mean_grad_acc, - "LearningRate": - self._create_param_lr(param_and_grad), - }, - outputs={ - "ParamOut": param_and_grad[0], - "MomentOut": momentum_acc, - "MeanSquareOut": mean_square_acc, - "MeanGradOut": mean_grad_acc - }, - attrs={ - "epsilon": self._epsilon, - "decay": self._rho, - "momentum": self._momentum, - "centered": self._centered - }, - stop_gradient=True) + momentum_acc = self._get_accumulator( + self._momentum_acc_str, param_and_grad[0] + ) + mean_square_acc = self._get_accumulator( + self._mean_square_acc_str, param_and_grad[0] + ) + mean_grad_acc = self._get_accumulator( + self._mean_grad_acc_str, param_and_grad[0] + ) + rmsprop_op = block.append_op( + type=self.type, + inputs={ + "Param": param_and_grad[0], + "Grad": param_and_grad[1], + "Moment": momentum_acc, + "MeanSquare": mean_square_acc, + "MeanGrad": mean_grad_acc, + "LearningRate": self._create_param_lr(param_and_grad), + }, + outputs={ + "ParamOut": param_and_grad[0], + "MomentOut": momentum_acc, + "MeanSquareOut": mean_square_acc, + "MeanGradOut": mean_grad_acc, + }, + attrs={ + "epsilon": self._epsilon, + "decay": self._rho, + "momentum": self._momentum, + "centered": self._centered, + }, + stop_gradient=True, + ) return rmsprop_op def _update_param_group(self, parameters): self._epsilon = parameters.get('epsilon', self._default_dict['epsilon']) self._rho = parameters.get('rho', self._default_dict['rho']) - self._momentum = parameters.get('momentum', - self._default_dict['momentum']) - self._centered = parameters.get('centered', - self._default_dict['centered']) + self._momentum = parameters.get( + 'momentum', self._default_dict['momentum'] + ) + self._centered = parameters.get( + 'centered', self._default_dict['centered'] + ) parameters = parameters.get('params') return parameters diff --git a/python/paddle/optimizer/sgd.py b/python/paddle/optimizer/sgd.py index 40db1d361eba5d4354674c6a47e28a89fbf610a4..b8a9331b422edad313000caf296ffc28545bfe02 100644 --- a/python/paddle/optimizer/sgd.py +++ b/python/paddle/optimizer/sgd.py @@ -72,20 +72,24 @@ class SGD(Optimizer): """ - def __init__(self, - learning_rate=0.001, - parameters=None, - weight_decay=None, - grad_clip=None, - multi_precision=False, - name=None): + def __init__( + self, + learning_rate=0.001, + parameters=None, + weight_decay=None, + grad_clip=None, + multi_precision=False, + name=None, + ): if learning_rate is None: raise ValueError("learning_rate is not set") - super(SGD, self).__init__(learning_rate=learning_rate, - parameters=parameters, - weight_decay=weight_decay, - grad_clip=grad_clip, - name=name) + super(SGD, self).__init__( + learning_rate=learning_rate, + parameters=parameters, + weight_decay=weight_decay, + grad_clip=grad_clip, + name=name, + ) self.type = "sgd" self._multi_precision = multi_precision self._master_weights = {} @@ -98,19 +102,23 @@ class SGD(Optimizer): var_name = param.name + "_fp32_master" var_name = unique_name.generate(var_name) - var = layers.create_global_var(name=var_name, - shape=param.shape, - value=0, - dtype='float32', - persistable=True) + var = layers.create_global_var( + name=var_name, + shape=param.shape, + value=0, + dtype='float32', + persistable=True, + ) block = self.helper.startup_program.global_block() - block.append_op(type="cast", - inputs={"X": [param]}, - outputs={"Out": [var]}, - attrs={ - "in_dtype": param.dtype, - "out_dtype": core.VarDesc.VarType.FP32 - }) + block.append_op( + type="cast", + inputs={"X": [param]}, + outputs={"Out": [var]}, + attrs={ + "in_dtype": param.dtype, + "out_dtype": core.VarDesc.VarType.FP32, + }, + ) self._master_weights[param.name] = var return var @@ -124,7 +132,10 @@ class SGD(Optimizer): if self._multi_precision and p.dtype == core.VarDesc.VarType.FP16: master_p = self._create_master_weight(p) continue - if p.dtype == core.VarDesc.VarType.FP16 and not self._multi_precision: + if ( + p.dtype == core.VarDesc.VarType.FP16 + and not self._multi_precision + ): warnings.warn( "Accumulating with FP16 in optimizer can lead to poor accuracy or slow convergence." "Consider using multi_precision=True option of the Adam optimizer." @@ -135,19 +146,35 @@ class SGD(Optimizer): if isinstance(param_and_grad, dict): param_and_grad = self._update_param_group(param_and_grad) - find_master = self._multi_precision and param_and_grad[ - 0].dtype == core.VarDesc.VarType.FP16 - master_weight = (self._master_weights[param_and_grad[0].name] - if find_master else None) + find_master = ( + self._multi_precision + and param_and_grad[0].dtype == core.VarDesc.VarType.FP16 + ) + master_weight = ( + self._master_weights[param_and_grad[0].name] + if find_master + else None + ) lr = self._create_param_lr(param_and_grad) if in_dygraph_mode(): - _C_ops.sgd_(param_and_grad[0], lr, param_and_grad[1], master_weight, - find_master) + _C_ops.sgd_( + param_and_grad[0], + lr, + param_and_grad[1], + master_weight, + find_master, + ) return None if _in_legacy_dygraph(): - _legacy_C_ops.sgd(param_and_grad[0], lr, param_and_grad[1], - master_weight, param_and_grad[0], master_weight) + _legacy_C_ops.sgd( + param_and_grad[0], + lr, + param_and_grad[1], + master_weight, + param_and_grad[0], + master_weight, + ) return None assert isinstance(block, framework.Block) @@ -155,7 +182,7 @@ class SGD(Optimizer): inputs = { "Param": param_and_grad[0], "Grad": param_and_grad[1], - "LearningRate": lr + "LearningRate": lr, } outputs = {"ParamOut": param_and_grad[0]} @@ -166,11 +193,13 @@ class SGD(Optimizer): inputs["MasterParam"] = master_weight outputs["MasterParamOut"] = master_weight - sgd_op = block.append_op(type=self.type, - inputs=inputs, - outputs=outputs, - attrs=attrs, - stop_gradient=True) + sgd_op = block.append_op( + type=self.type, + inputs=inputs, + outputs=outputs, + attrs=attrs, + stop_gradient=True, + ) return sgd_op diff --git a/python/paddle/profiler/__init__.py b/python/paddle/profiler/__init__.py index 76108cf2953f8313adfbec3e0958876ab52e8372..a59f8f14a54a2572a7a4e00c44ec9e8258082e4b 100644 --- a/python/paddle/profiler/__init__.py +++ b/python/paddle/profiler/__init__.py @@ -21,7 +21,14 @@ from .utils import RecordEvent, load_profiler_result from .profiler_statistic import SortedKeys __all__ = [ - 'ProfilerState', 'ProfilerTarget', 'make_scheduler', - 'export_chrome_tracing', 'export_protobuf', 'Profiler', 'RecordEvent', - 'load_profiler_result', 'SortedKeys', 'SummaryView' + 'ProfilerState', + 'ProfilerTarget', + 'make_scheduler', + 'export_chrome_tracing', + 'export_protobuf', + 'Profiler', + 'RecordEvent', + 'load_profiler_result', + 'SortedKeys', + 'SummaryView', ] diff --git a/python/paddle/profiler/profiler.py b/python/paddle/profiler/profiler.py index e4c32db2e9f836bf3be214c1be34b8b4df66f183..bcd87d9c826fce1cff21f2188b708b75ef99f485 100644 --- a/python/paddle/profiler/profiler.py +++ b/python/paddle/profiler/profiler.py @@ -22,11 +22,15 @@ import importlib import json import paddle -from paddle.fluid.core import (_Profiler, ProfilerOptions, TracerEventType, - enable_memory_recorder, - enable_input_shape_recorder, - disable_memory_recorder, - disable_input_shape_recorder) +from paddle.fluid.core import ( + _Profiler, + ProfilerOptions, + TracerEventType, + enable_memory_recorder, + enable_input_shape_recorder, + disable_memory_recorder, + disable_input_shape_recorder, +) from .utils import RecordEvent, wrap_optimizers from .profiler_statistic import StatisticData, _build_table, SortedKeys @@ -105,12 +109,14 @@ class ProfilerTarget(Enum): CUSTOM_DEVICE = 3 -def make_scheduler(*, - closed: int, - ready: int, - record: int, - repeat: int = 0, - skip_first: int = 0) -> Callable: +def make_scheduler( + *, + closed: int, + ready: int, + record: int, + repeat: int = 0, + skip_first: int = 0 +) -> Callable: r""" Return a scheduler function, which scheduler the :ref:`state ` according to the setting. The state transform confirms to: @@ -164,7 +170,9 @@ def make_scheduler(*, step = step - skip_first period_steps = closed + ready + record has_repeated = step // period_steps - if repeat > 0 and has_repeated >= repeat: # the period has repeated repeat times, return CLOSED state + if ( + repeat > 0 and has_repeated >= repeat + ): # the period has repeated repeat times, return CLOSED state return ProfilerState.CLOSED mod_step = step % period_steps if mod_step < closed: @@ -176,12 +184,19 @@ def make_scheduler(*, return ProfilerState.RECORD else: return ProfilerState.RECORD_AND_RETURN - assert closed >= 0 and ready >= 0 and record > 0 and \ - repeat >= 0 and skip_first >= 0, "Invalid profiler scheduler arguments" + + assert ( + closed >= 0 + and ready >= 0 + and record > 0 + and repeat >= 0 + and skip_first >= 0 + ), "Invalid profiler scheduler arguments" if ready == 0: - warn("Profiler will record data after enabling profiler immediately, \ + warn( + "Profiler will record data after enabling profiler immediately, \ some data collected at the beginning of profiling may be 'noisy' because of overhead." - ) + ) return getScheduleState @@ -192,8 +207,9 @@ def _default_state_scheduler(step: int): return ProfilerState.RECORD -def export_chrome_tracing(dir_name: str, - worker_name: Optional[str] = None) -> Callable: +def export_chrome_tracing( + dir_name: str, worker_name: Optional[str] = None +) -> Callable: r""" Return a callable, used for outputing tracing data to chrome tracing format file. The output file will be saved in directory ``dir_name``, and file name will be set as `worker_name`. @@ -226,24 +242,29 @@ def export_chrome_tracing(dir_name: str, os.makedirs(dir_name, exist_ok=True) except Exception: raise RuntimeError( - "Can not create directory '{}' for saving profiling results.". - format(dir_name)) + "Can not create directory '{}' for saving profiling results.".format( + dir_name + ) + ) def handle_fn(prof): nonlocal worker_name if not worker_name: - worker_name = "host_{}pid_{}".format(socket.gethostname(), - str(os.getpid())) + worker_name = "host_{}pid_{}".format( + socket.gethostname(), str(os.getpid()) + ) now = datetime.datetime.now() filename = '{}_time_{}.paddle_trace.json'.format( - worker_name, now.strftime('%Y_%m_%d_%H_%M_%S_%f')) + worker_name, now.strftime('%Y_%m_%d_%H_%M_%S_%f') + ) prof.export(os.path.join(dir_name, filename), "json") return handle_fn -def export_protobuf(dir_name: str, - worker_name: Optional[str] = None) -> Callable: +def export_protobuf( + dir_name: str, worker_name: Optional[str] = None +) -> Callable: r""" Return a callable, used for outputing tracing data to protobuf file. The output file will be saved in directory ``dir_name``, and file name will be set as ``worker_name``. @@ -276,17 +297,21 @@ def export_protobuf(dir_name: str, os.makedirs(dir_name, exist_ok=True) except Exception: raise RuntimeError( - "Can not create directory '{}' for saving profiling results.". - format(dir_name)) + "Can not create directory '{}' for saving profiling results.".format( + dir_name + ) + ) def handle_fn(prof): nonlocal worker_name if not worker_name: - worker_name = "host_{}pid_{}".format(socket.gethostname(), - str(os.getpid())) + worker_name = "host_{}pid_{}".format( + socket.gethostname(), str(os.getpid()) + ) now = datetime.datetime.now() filename = '{}_time_{}.paddle_trace.pb'.format( - worker_name, now.strftime('%Y_%m_%d_%H_%M_%S_%f')) + worker_name, now.strftime('%Y_%m_%d_%H_%M_%S_%f') + ) prof.export(os.path.join(dir_name, filename), "pb") return handle_fn @@ -298,11 +323,15 @@ def _get_supported_targets() -> Iterable[ProfilerTarget]: """ if _Profiler.is_cupti_supported(): return [ - ProfilerTarget.CPU, ProfilerTarget.GPU, ProfilerTarget.CUSTOM_DEVICE + ProfilerTarget.CPU, + ProfilerTarget.GPU, + ProfilerTarget.CUSTOM_DEVICE, ] if _Profiler.is_cnpapi_supported(): return [ - ProfilerTarget.CPU, ProfilerTarget.MLU, ProfilerTarget.CUSTOM_DEVICE + ProfilerTarget.CPU, + ProfilerTarget.MLU, + ProfilerTarget.CUSTOM_DEVICE, ] return [ProfilerTarget.CPU, ProfilerTarget.CUSTOM_DEVICE] @@ -432,36 +461,40 @@ class Profiler: # | ips | 1086.42904 | 1227.30604 | 959.92796 | """ - def __init__(self, - *, - targets: Optional[Iterable[ProfilerTarget]] = None, - scheduler: Union[Callable[[int], ProfilerState], tuple, - None] = None, - on_trace_ready: Optional[Callable[..., Any]] = None, - record_shapes: Optional[bool] = False, - profile_memory=False, - timer_only: Optional[bool] = False, - emit_nvtx: Optional[bool] = False, - custom_device_types: Optional[list] = []): + def __init__( + self, + *, + targets: Optional[Iterable[ProfilerTarget]] = None, + scheduler: Union[Callable[[int], ProfilerState], tuple, None] = None, + on_trace_ready: Optional[Callable[..., Any]] = None, + record_shapes: Optional[bool] = False, + profile_memory=False, + timer_only: Optional[bool] = False, + emit_nvtx: Optional[bool] = False, + custom_device_types: Optional[list] = [] + ): supported_targets = _get_supported_targets() if targets: self.targets = set(targets) for target in targets: if target not in supported_targets: self.targets.remove(target) - warn("Profiling {} is not supported in current context.". - format(target)) + warn( + "Profiling {} is not supported in current context.".format( + target + ) + ) else: self.targets = supported_targets profileoption = ProfilerOptions() if ProfilerTarget.CPU in self.targets: profileoption.trace_switch |= 1 if ProfilerTarget.GPU in self.targets: - profileoption.trace_switch |= (1 << 1) + profileoption.trace_switch |= 1 << 1 if ProfilerTarget.MLU in self.targets: - profileoption.trace_switch |= (1 << 2) + profileoption.trace_switch |= 1 << 2 if ProfilerTarget.CUSTOM_DEVICE in self.targets: - profileoption.trace_switch |= (1 << 3) + profileoption.trace_switch |= 1 << 3 if not custom_device_types: custom_device_types = paddle.device.get_all_custom_device_type() wrap_optimizers() @@ -473,17 +506,19 @@ class Profiler: start_batch, end_batch = scheduler start_batch = max(start_batch, 0) if start_batch >= 1: - self.scheduler = make_scheduler(closed=max(start_batch - 1, 0), - ready=1, - record=(end_batch - - start_batch), - repeat=1) + self.scheduler = make_scheduler( + closed=max(start_batch - 1, 0), + ready=1, + record=(end_batch - start_batch), + repeat=1, + ) else: - self.scheduler = make_scheduler(closed=0, - ready=0, - record=(end_batch - - start_batch), - repeat=1) + self.scheduler = make_scheduler( + closed=0, + ready=0, + record=(end_batch - start_batch), + repeat=1, + ) else: self.scheduler = _default_state_scheduler @@ -549,9 +584,10 @@ class Profiler: elif self.current_state == ProfilerState.RECORD_AND_RETURN: self.profiler.prepare() self.profiler.start() - self.record_event = RecordEvent(name="ProfileStep#{}".format( - self.step_num), - event_type=TracerEventType.ProfileStep) + self.record_event = RecordEvent( + name="ProfileStep#{}".format(self.step_num), + event_type=TracerEventType.ProfileStep, + ) self.record_event.begin() def stop(self): @@ -593,7 +629,10 @@ class Profiler: ) self.profiler.start() self.profiler.stop() - if self.current_state == ProfilerState.RECORD or self.current_state == ProfilerState.RECORD_AND_RETURN: + if ( + self.current_state == ProfilerState.RECORD + or self.current_state == ProfilerState.RECORD_AND_RETURN + ): self.profiler_result = self.profiler.stop() if self.on_trace_ready: self.on_trace_ready(self) @@ -635,9 +674,10 @@ class Profiler: self.step_num += 1 self.current_state = self.scheduler(self.step_num) self._trigger_action() - self.record_event = RecordEvent(name="ProfileStep#{}".format( - self.step_num), - event_type=TracerEventType.ProfileStep) + self.record_event = RecordEvent( + name="ProfileStep#{}".format(self.step_num), + event_type=TracerEventType.ProfileStep, + ) self.record_event.begin() def step_info(self, unit=None): @@ -694,7 +734,9 @@ class Profiler: if self.current_state == ProfilerState.RECORD: # CLOSED -> RECORD self.profiler.prepare() self.profiler.start() - if self.current_state == ProfilerState.RECORD_AND_RETURN: # CLOSED -> RECORD_AND_RETURN + if ( + self.current_state == ProfilerState.RECORD_AND_RETURN + ): # CLOSED -> RECORD_AND_RETURN self.profiler.prepare() self.profiler.start() @@ -707,7 +749,9 @@ class Profiler: self.profiler.stop() if self.current_state == ProfilerState.RECORD: # READY -> RECORD self.profiler.start() - if self.current_state == ProfilerState.RECORD_AND_RETURN: # READY -> RECORD_AND_RETURN + if ( + self.current_state == ProfilerState.RECORD_AND_RETURN + ): # READY -> RECORD_AND_RETURN self.profiler.start() elif self.previous_state == ProfilerState.RECORD: @@ -723,21 +767,31 @@ class Profiler: ) self.profiler.stop() self.profiler.prepare() - if self.current_state == ProfilerState.RECORD_AND_RETURN: # RECORD -> RECORD_AND_RETURN + if ( + self.current_state == ProfilerState.RECORD_AND_RETURN + ): # RECORD -> RECORD_AND_RETURN pass else: assert self.previous_state == ProfilerState.RECORD_AND_RETURN - if self.current_state == ProfilerState.CLOSED: # RECORD_AND_RETURN -> CLOSED + if ( + self.current_state == ProfilerState.CLOSED + ): # RECORD_AND_RETURN -> CLOSED self.profiler_result = self.profiler.stop() - if self.current_state == ProfilerState.READY: # RECORD_AND_RETURN -> READY + if ( + self.current_state == ProfilerState.READY + ): # RECORD_AND_RETURN -> READY self.profiler_result = self.profiler.stop() self.profiler.prepare() - if self.current_state == ProfilerState.RECORD: # RECORD_AND_RETURN -> RECORD + if ( + self.current_state == ProfilerState.RECORD + ): # RECORD_AND_RETURN -> RECORD self.profiler_result = self.profiler.stop() self.profiler.prepare() self.profiler.start() - if self.current_state == ProfilerState.RECORD_AND_RETURN: # RECORD_AND_RETURN -> RECORD_AND_RETURN + if ( + self.current_state == ProfilerState.RECORD_AND_RETURN + ): # RECORD_AND_RETURN -> RECORD_AND_RETURN self.profiler_result = self.profiler.stop() self.profiler.prepare() self.profiler.start() @@ -772,12 +826,14 @@ class Profiler: if self.profiler_result: self.profiler_result.save(path, format) - def summary(self, - sorted_by=SortedKeys.CPUTotal, - op_detail=True, - thread_sep=False, - time_unit='ms', - views=None): + def summary( + self, + sorted_by=SortedKeys.CPUTotal, + op_detail=True, + thread_sep=False, + time_unit='ms', + views=None, + ): r""" Print the Summary table. Currently support overview, model, distributed, operator, memory manipulation and userdefined summary. @@ -811,14 +867,18 @@ class Profiler: if self.profiler_result: statistic_data = StatisticData( self.profiler_result.get_data(), - self.profiler_result.get_extra_info()) + self.profiler_result.get_extra_info(), + ) print( - _build_table(statistic_data, - sorted_by=sorted_by, - op_detail=op_detail, - thread_sep=thread_sep, - time_unit=time_unit, - views=views)) + _build_table( + statistic_data, + sorted_by=sorted_by, + op_detail=op_detail, + thread_sep=thread_sep, + time_unit=time_unit, + views=views, + ) + ) def get_profiler(config_path): @@ -851,17 +911,20 @@ def get_profiler(config_path): method = getattr(module, key) if not use_direct: translated_config_dict['scheduler'] = method( - *value['args'], **value['kwargs']) + *value['args'], **value['kwargs'] + ) else: translated_config_dict['scheduler'] = method else: translated_config_dict['scheduler'] = [ - config_dict['scheduler'][0], config_dict['scheduler'][1] + config_dict['scheduler'][0], + config_dict['scheduler'][1], ] except: print( - 'Set scheduler parameter error, use default parameter instead.') + 'Set scheduler parameter error, use default parameter instead.' + ) translated_config_dict['scheduler'] = None if "on_trace_ready" in config_dict: try: @@ -873,7 +936,8 @@ def get_profiler(config_path): method = getattr(module, key) if not use_direct: translated_config_dict['on_trace_ready'] = method( - *value['args'], **value['kwargs']) + *value['args'], **value['kwargs'] + ) else: translated_config_dict['on_trace_ready'] = method except: diff --git a/python/paddle/profiler/profiler_statistic.py b/python/paddle/profiler/profiler_statistic.py index 20465f1ae85ce93ee58403fcf1af6bd9ea12eb21..c383323d51f98a9442f5739333ffea71f7384c62 100755 --- a/python/paddle/profiler/profiler_statistic.py +++ b/python/paddle/profiler/profiler_statistic.py @@ -17,16 +17,29 @@ import re from paddle.fluid.core import TracerEventType, TracerMemEventType -from .statistic_helper import intersection_ranges, merge_ranges, merge_self_ranges, sum_ranges +from .statistic_helper import ( + intersection_ranges, + merge_ranges, + merge_self_ranges, + sum_ranges, +) _AllTracerEventType = [ - TracerEventType.Operator, TracerEventType.Dataloader, - TracerEventType.ProfileStep, TracerEventType.CudaRuntime, - TracerEventType.Kernel, TracerEventType.Memcpy, TracerEventType.Memset, - TracerEventType.UserDefined, TracerEventType.OperatorInner, - TracerEventType.Forward, TracerEventType.Backward, - TracerEventType.Optimization, TracerEventType.Communication, - TracerEventType.PythonOp, TracerEventType.PythonUserDefined + TracerEventType.Operator, + TracerEventType.Dataloader, + TracerEventType.ProfileStep, + TracerEventType.CudaRuntime, + TracerEventType.Kernel, + TracerEventType.Memcpy, + TracerEventType.Memset, + TracerEventType.UserDefined, + TracerEventType.OperatorInner, + TracerEventType.Forward, + TracerEventType.Backward, + TracerEventType.Optimization, + TracerEventType.Communication, + TracerEventType.PythonOp, + TracerEventType.PythonUserDefined, ] _CommunicationOpName = ['allreduce', 'broadcast', 'rpc'] @@ -90,19 +103,19 @@ class HostStatisticNode: for child in self.children_node: self.gpu_time += child.gpu_time self.general_gpu_time += child.general_gpu_time - self.self_cpu_time -= (child.end_ns - child.start_ns) + self.self_cpu_time -= child.end_ns - child.start_ns for rt in self.runtime_node: - self.self_cpu_time -= (rt.end_ns - rt.start_ns) + self.self_cpu_time -= rt.end_ns - rt.start_ns self.gpu_time += rt.gpu_time self.self_gpu_time += rt.gpu_time self.general_gpu_time += rt.general_gpu_time self.self_general_gpu_time += rt.general_gpu_time for device in self.hostnode.device_node: if device.type == TracerEventType.Kernel: - self.gpu_time += (device.end_ns - device.start_ns) - self.self_gpu_time += (device.end_ns - device.start_ns) - self.general_gpu_time += (device.end_ns - device.start_ns) - self.self_general_gpu_time += (device.end_ns - device.start_ns) + self.gpu_time += device.end_ns - device.start_ns + self.self_gpu_time += device.end_ns - device.start_ns + self.general_gpu_time += device.end_ns - device.start_ns + self.self_general_gpu_time += device.end_ns - device.start_ns @property def end_ns(self): @@ -172,12 +185,14 @@ def wrap_tree(nodetrees): stack.append(childnode) child_statistic_node = HostStatisticNode(childnode) current_statistic_node.children_node.append( - child_statistic_node) + child_statistic_node + ) newstack.append(child_statistic_node) for runtimenode in current_node.runtime_node: runtime_statistic_node = HostStatisticNode(runtimenode) current_statistic_node.runtime_node.append( - runtime_statistic_node) + runtime_statistic_node + ) # recursive calculate node statistic values for thread_id, root_statistic_node in node_statistic_tree.items(): root_statistic_node.cal_statistic() @@ -193,11 +208,12 @@ class TimeRangeSummary: def __init__(self): self.CPUTimeRange = collections.defaultdict(list) self.GPUTimeRange = collections.defaultdict( - lambda: collections.defaultdict( - list)) # GPU events should be divided into different devices + lambda: collections.defaultdict(list) + ) # GPU events should be divided into different devices self.CPUTimeRangeSum = collections.defaultdict(int) self.GPUTimeRangeSum = collections.defaultdict( - lambda: collections.defaultdict(int)) + lambda: collections.defaultdict(int) + ) self.call_times = collections.defaultdict(int) def parse(self, nodetrees): @@ -208,42 +224,50 @@ class TimeRangeSummary: for threadid, hostnodes in thread2hostnodes.items(): CPUTimeRange = collections.defaultdict(list) GPUTimeRange = collections.defaultdict( - lambda: collections.defaultdict(lambda: collections.defaultdict( - list))) # device_id/type/stream_id - for hostnode in hostnodes[1:]: #skip root node + lambda: collections.defaultdict( + lambda: collections.defaultdict(list) + ) + ) # device_id/type/stream_id + for hostnode in hostnodes[1:]: # skip root node CPUTimeRange[hostnode.type].append( - (hostnode.start_ns, hostnode.end_ns)) + (hostnode.start_ns, hostnode.end_ns) + ) self.call_times[hostnode.type] += 1 for runtimenode in hostnode.runtime_node: CPUTimeRange[runtimenode.type].append( - (runtimenode.start_ns, runtimenode.end_ns)) + (runtimenode.start_ns, runtimenode.end_ns) + ) self.call_times[runtimenode.type] += 1 for devicenode in runtimenode.device_node: GPUTimeRange[devicenode.device_id][devicenode.type][ - devicenode.stream_id].append( - (devicenode.start_ns, devicenode.end_ns)) + devicenode.stream_id + ].append((devicenode.start_ns, devicenode.end_ns)) self.call_times[devicenode.type] += 1 for event_type, time_ranges in CPUTimeRange.items(): time_ranges = merge_self_ranges(time_ranges, is_sorted=False) self.CPUTimeRange[event_type] = merge_ranges( - self.CPUTimeRange[event_type], time_ranges, is_sorted=True) + self.CPUTimeRange[event_type], time_ranges, is_sorted=True + ) for device_id, device_time_ranges in GPUTimeRange.items(): for event_type, event_time_ranges in device_time_ranges.items(): for stream_id, time_ranges in event_time_ranges.items(): - time_ranges = merge_self_ranges(time_ranges, - is_sorted=False) + time_ranges = merge_self_ranges( + time_ranges, is_sorted=False + ) self.GPUTimeRange[device_id][event_type] = merge_ranges( self.GPUTimeRange[device_id][event_type], time_ranges, - is_sorted=True) + is_sorted=True, + ) for event_type, time_ranges in self.CPUTimeRange.items(): self.CPUTimeRangeSum[event_type] = sum_ranges(time_ranges) for device_id, device_time_ranges in self.GPUTimeRange.items(): for event_type, time_ranges in device_time_ranges.items(): self.GPUTimeRangeSum[device_id][event_type] = sum_ranges( - time_ranges) + time_ranges + ) def get_gpu_devices(self): return self.GPUTimeRange.keys() @@ -276,57 +300,68 @@ class DistributedSummary: ''' thread2hostnodes = traverse_tree(nodetrees) for threadid, hostnodes in thread2hostnodes.items(): - for hostnode in hostnodes[1:]: #skip root node + for hostnode in hostnodes[1:]: # skip root node # case 1: TracerEventType is Communication if hostnode.type == TracerEventType.Communication: self.cpu_communication_range.append( - (hostnode.start_ns, hostnode.end_ns)) + (hostnode.start_ns, hostnode.end_ns) + ) device_nodes = get_device_nodes(hostnode) for device_node in device_nodes: if device_node.type == TracerEventType.Kernel: self.gpu_communication_range.append( - (device_node.start_ns, device_node.end_ns)) + (device_node.start_ns, device_node.end_ns) + ) - #case 2: TracerEventType is Operator but is communication op - elif hostnode.type == TracerEventType.Operator and any([ + # case 2: TracerEventType is Operator but is communication op + elif hostnode.type == TracerEventType.Operator and any( + [ name in hostnode.name.lower() for name in _CommunicationOpName - ]): + ] + ): self.cpu_communication_range.append( - (hostnode.start_ns, hostnode.end_ns)) + (hostnode.start_ns, hostnode.end_ns) + ) device_nodes = get_device_nodes(hostnode) for device_node in device_nodes: if device_node.type == TracerEventType.Kernel: self.gpu_communication_range.append( - (device_node.start_ns, device_node.end_ns)) + (device_node.start_ns, device_node.end_ns) + ) - #case 3: Others, filter kernels named with nccl + # case 3: Others, filter kernels named with nccl else: for runtimenode in hostnode.runtime_node: for devicenode in runtimenode.device_node: if devicenode.type == TracerEventType.Kernel: if 'nccl' in devicenode.name.lower(): self.gpu_communication_range.append( - (devicenode.start_ns, - devicenode.end_ns)) + (devicenode.start_ns, devicenode.end_ns) + ) else: self.computation_range.append( - (devicenode.start_ns, - devicenode.end_ns)) + (devicenode.start_ns, devicenode.end_ns) + ) self.cpu_calls = len(set(self.cpu_communication_range)) self.gpu_calls = len(set(self.gpu_communication_range)) self.cpu_communication_range = merge_self_ranges( - self.cpu_communication_range, is_sorted=False) + self.cpu_communication_range, is_sorted=False + ) self.gpu_communication_range = merge_self_ranges( - self.gpu_communication_range, is_sorted=False) - self.communication_range = merge_ranges(self.cpu_communication_range, - self.gpu_communication_range, - is_sorted=True) - self.computation_range = merge_self_ranges(self.computation_range, - is_sorted=False) - self.overlap_range = intersection_ranges(self.communication_range, - self.computation_range, - is_sorted=True) + self.gpu_communication_range, is_sorted=False + ) + self.communication_range = merge_ranges( + self.cpu_communication_range, + self.gpu_communication_range, + is_sorted=True, + ) + self.computation_range = merge_self_ranges( + self.computation_range, is_sorted=False + ) + self.overlap_range = intersection_ranges( + self.communication_range, self.computation_range, is_sorted=True + ) class EventSummary: @@ -335,7 +370,6 @@ class EventSummary: """ class DeviceItem: - def __init__(self, name): self.name = name self.call = 0 @@ -359,7 +393,6 @@ class EventSummary: self.add_gpu_time(node.end_ns - node.start_ns) class OperatorItem: - def __init__(self, name): self.name = name self.call = 0 @@ -420,7 +453,8 @@ class EventSummary: if child.type != TracerEventType.Operator: if child.name not in self.operator_inners: self.operator_inners[ - child.name] = EventSummary.OperatorItem(child.name) + child.name + ] = EventSummary.OperatorItem(child.name) self.operator_inners[child.name].add_item(child) for runtimenode in node.runtime_node: @@ -431,7 +465,6 @@ class EventSummary: self.devices[name].add_item(devicenode) class GeneralItem: - def __init__(self, name): self.name = name self.call = 0 @@ -490,10 +523,12 @@ class EventSummary: def __init__(self): self.items = {} # for operator summary self.thread_items = collections.defaultdict( - dict) # for operator summary + dict + ) # for operator summary self.userdefined_items = {} # for userdefined summary self.userdefined_thread_items = collections.defaultdict( - dict) # for userdefined summary + dict + ) # for userdefined summary self.model_perspective_items = {} # for model summary self.memory_manipulation_items = {} # for memory manipulation summary self.kernel_items = {} # for kernel summary @@ -503,19 +538,31 @@ class EventSummary: Analysis operator event in the nodetress. """ node_statistic_trees, thread2host_statistic_nodes = wrap_tree(nodetrees) - for threadid, host_statistic_nodes in thread2host_statistic_nodes.items( - ): + for ( + threadid, + host_statistic_nodes, + ) in thread2host_statistic_nodes.items(): for host_statistic_node in host_statistic_nodes[ - 1:]: #skip root node + 1: + ]: # skip root node if host_statistic_node.type == TracerEventType.Operator: self.add_operator_item(host_statistic_node) - if host_statistic_node.type == TracerEventType.UserDefined\ - or host_statistic_node.type == TracerEventType.PythonUserDefined: - if 'memcpy' in host_statistic_node.name.lower() or 'memorycopy' in host_statistic_node.name.lower()\ - or 'memset' in host_statistic_node.name.lower(): + if ( + host_statistic_node.type == TracerEventType.UserDefined + or host_statistic_node.type + == TracerEventType.PythonUserDefined + ): + if ( + 'memcpy' in host_statistic_node.name.lower() + or 'memorycopy' in host_statistic_node.name.lower() + or 'memset' in host_statistic_node.name.lower() + ): self.add_memory_manipulation_item(host_statistic_node) else: - if host_statistic_node.type == TracerEventType.PythonUserDefined: + if ( + host_statistic_node.type + == TracerEventType.PythonUserDefined + ): self.add_userdefined_item(host_statistic_node) self.add_kernel_item(host_statistic_nodes[0]) @@ -525,10 +572,15 @@ class EventSummary: while deque: current_node = deque.popleft() for child in current_node.children_node: - if child.type == TracerEventType.Forward or child.type == TracerEventType.Dataloader\ - or child.type == TracerEventType.Backward or child.type == TracerEventType.Optimization: + if ( + child.type == TracerEventType.Forward + or child.type == TracerEventType.Dataloader + or child.type == TracerEventType.Backward + or child.type == TracerEventType.Optimization + ): self.add_model_perspective_item( - child) #find first model perspective node + child + ) # find first model perspective node else: if child.type == TracerEventType.ProfileStep: self.add_model_perspective_item(child) @@ -537,40 +589,46 @@ class EventSummary: def add_operator_item(self, operator_node): if operator_node.name not in self.items: self.items[operator_node.name] = EventSummary.OperatorItem( - operator_node.name) + operator_node.name + ) self.items[operator_node.name].add_item(operator_node) if operator_node.name not in self.thread_items[operator_node.thread_id]: self.thread_items[operator_node.thread_id][ - operator_node.name] = EventSummary.OperatorItem( - operator_node.name) + operator_node.name + ] = EventSummary.OperatorItem(operator_node.name) self.thread_items[operator_node.thread_id][operator_node.name].add_item( - operator_node) + operator_node + ) def add_userdefined_item(self, userdefined_node): if userdefined_node.name not in self.userdefined_items: self.userdefined_items[ - userdefined_node.name] = EventSummary.GeneralItem( - userdefined_node.name) + userdefined_node.name + ] = EventSummary.GeneralItem(userdefined_node.name) self.userdefined_items[userdefined_node.name].add_item(userdefined_node) - if userdefined_node.name not in self.userdefined_thread_items[ - userdefined_node.thread_id]: + if ( + userdefined_node.name + not in self.userdefined_thread_items[userdefined_node.thread_id] + ): self.userdefined_thread_items[userdefined_node.thread_id][ - userdefined_node.name] = EventSummary.GeneralItem( - userdefined_node.name) + userdefined_node.name + ] = EventSummary.GeneralItem(userdefined_node.name) self.userdefined_thread_items[userdefined_node.thread_id][ - userdefined_node.name].add_item(userdefined_node) + userdefined_node.name + ].add_item(userdefined_node) def add_memory_manipulation_item(self, memory_manipulation_node): if memory_manipulation_node.name not in self.memory_manipulation_items: self.memory_manipulation_items[ - memory_manipulation_node.name] = EventSummary.GeneralItem( - memory_manipulation_node.name) + memory_manipulation_node.name + ] = EventSummary.GeneralItem(memory_manipulation_node.name) self.memory_manipulation_items[memory_manipulation_node.name].add_item( - memory_manipulation_node) + memory_manipulation_node + ) def add_model_perspective_item(self, model_perspective_node): if model_perspective_node.type == TracerEventType.Forward: @@ -605,7 +663,6 @@ class MemorySummary: """ class MemoryItem: - def __init__(self, event_name, place, memory_type='Allocated'): self.event_name = event_name self.place = place @@ -617,11 +674,17 @@ class MemorySummary: self.memory_type = memory_type def add_memory_record(self, size, allocation_type): - if allocation_type == TracerMemEventType.Allocate or allocation_type == TracerMemEventType.ReservedAllocate: + if ( + allocation_type == TracerMemEventType.Allocate + or allocation_type == TracerMemEventType.ReservedAllocate + ): self.allocation_count += 1 self.allocation_size += size - elif allocation_type == TracerMemEventType.Free or allocation_type == TracerMemEventType.ReservedFree: + elif ( + allocation_type == TracerMemEventType.Free + or allocation_type == TracerMemEventType.ReservedFree + ): self.free_count += 1 self.free_size -= size # size is sign(-) when free. @@ -631,35 +694,49 @@ class MemorySummary: def __init__(self): self.allocated_items = collections.defaultdict( - dict) # for memory summary, device type: event + dict + ) # for memory summary, device type: event self.reserved_items = collections.defaultdict( - dict) # for memory summary, device type: event + dict + ) # for memory summary, device type: event self.peak_allocation_values = collections.defaultdict(int) self.peak_reserved_values = collections.defaultdict(int) def _analyse_node_memory(self, event_name, node): for memnode in node.mem_node: # self mem node - if memnode.type == TracerMemEventType.Allocate or memnode.type == TracerMemEventType.Free: + if ( + memnode.type == TracerMemEventType.Allocate + or memnode.type == TracerMemEventType.Free + ): if event_name not in self.allocated_items[memnode.place]: - self.allocated_items[ - memnode.place][event_name] = MemorySummary.MemoryItem( - event_name, memnode.place, 'Allocated') - self.allocated_items[ - memnode.place][event_name].add_memory_record( - memnode.increase_bytes, memnode.type) - elif memnode.type == TracerMemEventType.ReservedAllocate or memnode.type == TracerMemEventType.ReservedFree: + self.allocated_items[memnode.place][ + event_name + ] = MemorySummary.MemoryItem( + event_name, memnode.place, 'Allocated' + ) + self.allocated_items[memnode.place][ + event_name + ].add_memory_record(memnode.increase_bytes, memnode.type) + elif ( + memnode.type == TracerMemEventType.ReservedAllocate + or memnode.type == TracerMemEventType.ReservedFree + ): if event_name not in self.reserved_items[memnode.place]: - self.reserved_items[ - memnode.place][event_name] = MemorySummary.MemoryItem( - event_name, memnode.place, 'Reserved') - self.reserved_items[ - memnode.place][event_name].add_memory_record( - memnode.increase_bytes, memnode.type) + self.reserved_items[memnode.place][ + event_name + ] = MemorySummary.MemoryItem( + event_name, memnode.place, 'Reserved' + ) + self.reserved_items[memnode.place][ + event_name + ].add_memory_record(memnode.increase_bytes, memnode.type) self.peak_allocation_values[memnode.place] = max( self.peak_allocation_values[memnode.place], - memnode.peak_allocated) + memnode.peak_allocated, + ) self.peak_reserved_values[memnode.place] = max( - self.peak_reserved_values[memnode.place], memnode.peak_reserved) + self.peak_reserved_values[memnode.place], memnode.peak_reserved + ) def parse(self, nodetrees): r""" @@ -667,7 +744,7 @@ class MemorySummary: """ thread2hostnodes = traverse_tree(nodetrees) for threadid, host_nodes in thread2hostnodes.items(): - for host_node in host_nodes[1:]: #skip root node + for host_node in host_nodes[1:]: # skip root node if host_node.type == TracerEventType.OperatorInner: continue if host_node.type == TracerEventType.Operator: @@ -694,16 +771,19 @@ class StatisticData: self.memory_summary.parse(node_trees) -def _build_table(statistic_data, - sorted_by=SortedKeys.CPUTotal, - op_detail=True, - thread_sep=False, - time_unit='ms', - row_limit=100, - max_src_column_width=75, - views=None): +def _build_table( + statistic_data, + sorted_by=SortedKeys.CPUTotal, + op_detail=True, + thread_sep=False, + time_unit='ms', + row_limit=100, + max_src_column_width=75, + views=None, +): from .profiler import SummaryView + """Prints a summary of events.""" # format table row SPACING_SIZE = 2 @@ -712,8 +792,9 @@ def _build_table(statistic_data, line_length_list = [-SPACING_SIZE] def add_column(padding, text_dir='<'): - row_format_list[0] += '{: ' + text_dir + str(padding) + '}' + ( - ' ' * SPACING_SIZE) + row_format_list[0] += ( + '{: ' + text_dir + str(padding) + '}' + (' ' * SPACING_SIZE) + ) header_sep_list[0] += '-' * padding + (' ' * SPACING_SIZE) line_length_list[0] += padding + SPACING_SIZE @@ -751,7 +832,8 @@ def _build_table(statistic_data, return '{}{:.2f}'.format(' ' * indent, ratio * 100) total_time = statistic_data.time_range_summary.get_cpu_range_sum( - TracerEventType.ProfileStep) + TracerEventType.ProfileStep + ) if views is None or SummaryView.DeviceView in views: @@ -776,19 +858,23 @@ def _build_table(statistic_data, row_values = [ 'CPU(Process)', format_ratio( - float(statistic_data.extra_info['Process Cpu Utilization'])) + float(statistic_data.extra_info['Process Cpu Utilization']) + ), ] append(row_format.format(*row_values)) row_values = [ 'CPU(System)', format_ratio( - float(statistic_data.extra_info['System Cpu Utilization'])) + float(statistic_data.extra_info['System Cpu Utilization']) + ), ] append(row_format.format(*row_values)) for gpu_name in statistic_data.time_range_summary.get_gpu_devices(): gpu_time = float( statistic_data.time_range_summary.get_gpu_range_sum( - gpu_name, TracerEventType.Kernel)) + gpu_name, TracerEventType.Kernel + ) + ) utilization = gpu_time / total_time row_values = ['GPU{}'.format(gpu_name), format_ratio(utilization)] append(row_format.format(*row_values)) @@ -797,7 +883,8 @@ def _build_table(statistic_data, append( "Note:\nCPU(Process) Utilization = Current process CPU time over all cpu cores / elapsed time, so max utilization can be reached 100% * number of cpu cores.\n" "CPU(System) Utilization = All processes CPU time over all cpu cores(busy time) / (busy time + idle time).\n" - "GPU Utilization = Current process GPU time / elapsed time.") + "GPU Utilization = Current process GPU time / elapsed time." + ) append('-' * line_length) append('') append('') @@ -833,54 +920,71 @@ def _build_table(statistic_data, cpu_call_times.update(statistic_data.time_range_summary.call_times) gpu_call_times.update(statistic_data.time_range_summary.call_times) - for event_type, value in statistic_data.time_range_summary.CPUTimeRangeSum.items( - ): + for ( + event_type, + value, + ) in statistic_data.time_range_summary.CPUTimeRangeSum.items(): if event_type != TracerEventType.Communication: cpu_type_time[event_type] = value if statistic_data.distributed_summary.cpu_communication_range: cpu_type_time[TracerEventType.Communication] = sum_ranges( - statistic_data.distributed_summary.cpu_communication_range) + statistic_data.distributed_summary.cpu_communication_range + ) cpu_call_times[ - TracerEventType. - Communication] = statistic_data.distributed_summary.cpu_calls + TracerEventType.Communication + ] = statistic_data.distributed_summary.cpu_calls for event_type in [ - TracerEventType.Dataloader, TracerEventType.Forward, - TracerEventType.Backward, TracerEventType.Optimization + TracerEventType.Dataloader, + TracerEventType.Forward, + TracerEventType.Backward, + TracerEventType.Optimization, ]: event_type_name = str(event_type).split('.')[1] - if event_type in cpu_call_times and event_type_name in statistic_data.event_summary.model_perspective_items: + if ( + event_type in cpu_call_times + and event_type_name + in statistic_data.event_summary.model_perspective_items + ): cpu_call_times[ - event_type] = statistic_data.event_summary.model_perspective_items[ - event_type_name].call + event_type + ] = statistic_data.event_summary.model_perspective_items[ + event_type_name + ].call cpu_type_time[ - event_type] = statistic_data.event_summary.model_perspective_items[ - event_type_name].cpu_time + event_type + ] = statistic_data.event_summary.model_perspective_items[ + event_type_name + ].cpu_time gpu_time_range = collections.defaultdict(list) - for device_id, device_time_ranges in statistic_data.time_range_summary.GPUTimeRange.items( - ): + for ( + device_id, + device_time_ranges, + ) in statistic_data.time_range_summary.GPUTimeRange.items(): for event_type, time_range in device_time_ranges.items(): gpu_time_range[event_type] = merge_ranges( - gpu_time_range[event_type], time_range, is_sorted=True) + gpu_time_range[event_type], time_range, is_sorted=True + ) for event_type, time_range in gpu_time_range.items(): gpu_type_time[event_type] = sum_ranges(time_range) if statistic_data.distributed_summary.gpu_communication_range: gpu_type_time[TracerEventType.Communication] = sum_ranges( - statistic_data.distributed_summary.gpu_communication_range) + statistic_data.distributed_summary.gpu_communication_range + ) gpu_call_times[ - TracerEventType. - Communication] = statistic_data.distributed_summary.gpu_calls + TracerEventType.Communication + ] = statistic_data.distributed_summary.gpu_calls - sorted_items = sorted(cpu_type_time.items(), - key=lambda x: x[1], - reverse=True) + sorted_items = sorted( + cpu_type_time.items(), key=lambda x: x[1], reverse=True + ) event_type, time = sorted_items[0] row_values = [ '{}'.format(str(event_type).split('.')[1]), cpu_call_times[event_type], format_time(time, unit=time_unit), - format_ratio(float(time) / total_time) + format_ratio(float(time) / total_time), ] append(row_format.format(*row_values)) for event_type, time in sorted_items[1:]: @@ -888,7 +992,7 @@ def _build_table(statistic_data, ' {}'.format(str(event_type).split('.')[1]), cpu_call_times[event_type], format_time(time, unit=time_unit), - format_ratio(float(time) / total_time) + format_ratio(float(time) / total_time), ] append(row_format.format(*row_values)) append(header_sep) @@ -900,7 +1004,7 @@ def _build_table(statistic_data, ' {}'.format(str(event_type).split('.')[1]), gpu_call_times[event_type], format_time(time, unit=time_unit), - format_ratio(float(time) / total_time) + format_ratio(float(time) / total_time), ] append(row_format.format(*row_values)) @@ -916,7 +1020,8 @@ def _build_table(statistic_data, "Thread 2:\n" " Operator: |____________| |___|\n" "After merged:\n" - " Result: |______________| |__________|\n") + " Result: |______________| |__________|\n" + ) append('-' * line_length) append('') append('') @@ -924,16 +1029,24 @@ def _build_table(statistic_data, if views is None or SummaryView.ModelView in views: ###### Print Model Summary Report ###### - model_perspective_items = statistic_data.event_summary.model_perspective_items + model_perspective_items = ( + statistic_data.event_summary.model_perspective_items + ) if len(model_perspective_items) > 1: all_row_values = [] accmulation_time = 0 gpu_accmulation_time = 0 - gpu_total_time = statistic_data.event_summary.model_perspective_items[ - 'ProfileStep'].gpu_time + gpu_total_time = ( + statistic_data.event_summary.model_perspective_items[ + 'ProfileStep' + ].gpu_time + ) for name in [ - 'ProfileStep', 'Dataloader', 'Forward', 'Backward', - 'Optimization' + 'ProfileStep', + 'Dataloader', + 'Forward', + 'Backward', + 'Optimization', ]: if name in model_perspective_items: item = model_perspective_items[name] @@ -941,22 +1054,28 @@ def _build_table(statistic_data, gpu_ratio = 0 else: gpu_ratio = float(item.gpu_time) / gpu_total_time - name = '{}'.format( - name) if 'ProfileStep' in name else ' {}'.format(name) + name = ( + '{}'.format(name) + if 'ProfileStep' in name + else ' {}'.format(name) + ) row_values = [ - '{}'.format(name), item.call, + '{}'.format(name), + item.call, '{} / {} / {} / {} / {}'.format( format_time(item.cpu_time, unit=time_unit), format_time(item.avg_cpu_time, unit=time_unit), format_time(item.max_cpu_time, unit=time_unit), format_time(item.min_cpu_time, unit=time_unit), - format_ratio(float(item.cpu_time) / total_time)), + format_ratio(float(item.cpu_time) / total_time), + ), '{} / {} / {} / {} / {}'.format( format_time(item.gpu_time, unit=time_unit), format_time(item.avg_gpu_time, unit=time_unit), format_time(item.max_gpu_time, unit=time_unit), format_time(item.min_gpu_time, unit=time_unit), - format_ratio(gpu_ratio)) + format_ratio(gpu_ratio), + ), ] all_row_values.append(row_values) if 'ProfileStep' not in name: @@ -970,12 +1089,16 @@ def _build_table(statistic_data, else: gpu_ratio = float(other_gpu_time) / gpu_total_time row_values = [ - ' Others', '-', '{} / - / - / - / {}'.format( + ' Others', + '-', + '{} / - / - / - / {}'.format( format_time(other_time, unit=time_unit), - format_ratio(float(other_time) / total_time)), + format_ratio(float(other_time) / total_time), + ), '{} / - / - / - / {}'.format( format_time(other_gpu_time, unit=time_unit), - format_ratio(gpu_ratio)) + format_ratio(gpu_ratio), + ), ] all_row_values.append(row_values) # Calculate the column width @@ -983,16 +1106,20 @@ def _build_table(statistic_data, cpu_data_description_width = 40 gpu_data_description_width = 40 for row_values in all_row_values: - if isinstance(row_values[1], - int) and len(str(row_values[1])) > calltime_width: + if ( + isinstance(row_values[1], int) + and len(str(row_values[1])) > calltime_width + ): calltime_width = len(str(row_values[1])) if len(row_values[2]) > cpu_data_description_width: cpu_data_description_width = len(row_values[2]) if len(row_values[3]) > gpu_data_description_width: gpu_data_description_width = len(row_values[3]) headers = [ - 'Name', 'Calls', 'CPU Total / Avg / Max / Min / Ratio(%)', - 'GPU Total / Avg / Max / Min / Ratio(%)' + 'Name', + 'Calls', + 'CPU Total / Avg / Max / Min / Ratio(%)', + 'GPU Total / Avg / Max / Min / Ratio(%)', ] row_format_list = [""] header_sep_list = [""] @@ -1052,35 +1179,38 @@ def _build_table(statistic_data, append(row_format.format(*headers)) append(header_sep) communication_time = sum_ranges( - statistic_data.distributed_summary.communication_range) + statistic_data.distributed_summary.communication_range + ) computation_time = sum_ranges( - statistic_data.distributed_summary.computation_range) + statistic_data.distributed_summary.computation_range + ) overlap_time = sum_ranges( - statistic_data.distributed_summary.overlap_range) + statistic_data.distributed_summary.overlap_range + ) row_values = [ 'ProfileStep', format_time(total_time, unit=time_unit), - format_ratio(float(total_time) / total_time) + format_ratio(float(total_time) / total_time), ] append(row_format.format(*row_values)) row_values = [ ' Communication', format_time(communication_time, unit=time_unit), - format_ratio(float(communication_time) / total_time) + format_ratio(float(communication_time) / total_time), ] append(row_format.format(*row_values)) row_values = [ ' Computation', format_time(computation_time, unit=time_unit), - format_ratio(float(computation_time) / total_time) + format_ratio(float(computation_time) / total_time), ] append(row_format.format(*row_values)) row_values = [ ' Overlap', format_time(overlap_time, unit=time_unit), - format_ratio(float(overlap_time) / total_time) + format_ratio(float(overlap_time) / total_time), ] append(row_format.format(*row_values)) append(header_sep) @@ -1095,7 +1225,8 @@ def _build_table(statistic_data, " Total: |_________________| |______________|\n" "Computation time(Kernel):\n" " GPU: |________________|\n" - "Overlap time: |___________|\n") + "Overlap time: |___________|\n" + ) append('-' * line_length) append('') append('') @@ -1115,37 +1246,47 @@ def _build_table(statistic_data, for thread_id, items in thread_items.items(): all_row_values.append("Thread: {}".format(thread_id)) if sorted_by == SortedKeys.CPUTotal: - sorted_items = sorted(items.items(), - key=lambda x: x[1].cpu_time, - reverse=True) + sorted_items = sorted( + items.items(), key=lambda x: x[1].cpu_time, reverse=True + ) elif sorted_by == SortedKeys.CPUAvg: - sorted_items = sorted(items.items(), - key=lambda x: x[1].avg_cpu_time, - reverse=True) + sorted_items = sorted( + items.items(), + key=lambda x: x[1].avg_cpu_time, + reverse=True, + ) elif sorted_by == SortedKeys.CPUMax: - sorted_items = sorted(items.items(), - key=lambda x: x[1].max_cpu_time, - reverse=True) + sorted_items = sorted( + items.items(), + key=lambda x: x[1].max_cpu_time, + reverse=True, + ) elif sorted_by == SortedKeys.CPUMin: - sorted_items = sorted(items.items(), - key=lambda x: x[1].min_cpu_time) + sorted_items = sorted( + items.items(), key=lambda x: x[1].min_cpu_time + ) elif sorted_by == SortedKeys.GPUTotal: - sorted_items = sorted(items.items(), - key=lambda x: x[1].general_gpu_time, - reverse=True) + sorted_items = sorted( + items.items(), + key=lambda x: x[1].general_gpu_time, + reverse=True, + ) elif sorted_by == SortedKeys.GPUAvg: sorted_items = sorted( items.items(), key=lambda x: x[1].avg_general_gpu_time, - reverse=True) + reverse=True, + ) elif sorted_by == SortedKeys.GPUMax: sorted_items = sorted( items.items(), key=lambda x: x[1].max_general_gpu_time, - reverse=True) + reverse=True, + ) elif sorted_by == SortedKeys.GPUMin: sorted_items = sorted( - items.items(), key=lambda x: x[1].min_general_gpu_time) + items.items(), key=lambda x: x[1].min_general_gpu_time + ) total_op_cpu_time = 0 total_op_gpu_time = 0 @@ -1161,124 +1302,177 @@ def _build_table(statistic_data, if total_op_gpu_time == 0: gpu_ratio = 0 else: - gpu_ratio = float( - item.general_gpu_time) / total_op_gpu_time + gpu_ratio = ( + float(item.general_gpu_time) / total_op_gpu_time + ) row_values = [ - name, item.call, '{} / {} / {} / {} / {}'.format( + name, + item.call, + '{} / {} / {} / {} / {}'.format( format_time(item.cpu_time, unit=time_unit), format_time(item.avg_cpu_time, unit=time_unit), format_time(item.max_cpu_time, unit=time_unit), format_time(item.min_cpu_time, unit=time_unit), - format_ratio(cpu_ratio)), + format_ratio(cpu_ratio), + ), '{} / {} / {} / {} / {}'.format( format_time(item.general_gpu_time, unit=time_unit), - format_time(item.avg_general_gpu_time, - unit=time_unit), - format_time(item.max_general_gpu_time, - unit=time_unit), - format_time(item.min_general_gpu_time, - unit=time_unit), - format_ratio(gpu_ratio)) + format_time( + item.avg_general_gpu_time, unit=time_unit + ), + format_time( + item.max_general_gpu_time, unit=time_unit + ), + format_time( + item.min_general_gpu_time, unit=time_unit + ), + format_ratio(gpu_ratio), + ), ] all_row_values.append(row_values) if op_detail: - for innerop_name, innerop_node in item.operator_inners.items( - ): + for ( + innerop_name, + innerop_node, + ) in item.operator_inners.items(): if item.cpu_time == 0: cpu_ratio = 0 else: - cpu_ratio = float( - innerop_node.cpu_time) / item.cpu_time + cpu_ratio = ( + float(innerop_node.cpu_time) / item.cpu_time + ) if item.general_gpu_time == 0: gpu_ratio = 0 else: - gpu_ratio = float(innerop_node.general_gpu_time - ) / item.general_gpu_time + gpu_ratio = ( + float(innerop_node.general_gpu_time) + / item.general_gpu_time + ) if len(innerop_name) + 2 > name_column_width: - innerop_name = innerop_name[:name_column_width - - 5] + innerop_name = innerop_name[ + : name_column_width - 5 + ] innerop_name += "..." row_values = [ - ' {}'.format(innerop_name), innerop_node.call, + ' {}'.format(innerop_name), + innerop_node.call, '{} / {} / {} / {} / {}'.format( - format_time(innerop_node.cpu_time, - unit=time_unit), - format_time(innerop_node.avg_cpu_time, - unit=time_unit), - format_time(innerop_node.max_cpu_time, - unit=time_unit), - format_time(innerop_node.min_cpu_time, - unit=time_unit), - format_ratio(cpu_ratio)), + format_time( + innerop_node.cpu_time, unit=time_unit + ), + format_time( + innerop_node.avg_cpu_time, + unit=time_unit, + ), + format_time( + innerop_node.max_cpu_time, + unit=time_unit, + ), + format_time( + innerop_node.min_cpu_time, + unit=time_unit, + ), + format_ratio(cpu_ratio), + ), '{} / {} / {} / {} / {}'.format( - format_time(innerop_node.general_gpu_time, - unit=time_unit), + format_time( + innerop_node.general_gpu_time, + unit=time_unit, + ), format_time( innerop_node.avg_general_gpu_time, - unit=time_unit), + unit=time_unit, + ), format_time( innerop_node.max_general_gpu_time, - unit=time_unit), + unit=time_unit, + ), format_time( innerop_node.min_general_gpu_time, - unit=time_unit), - format_ratio(gpu_ratio)) + unit=time_unit, + ), + format_ratio(gpu_ratio), + ), ] all_row_values.append(row_values) - for device_node_name, device_node in innerop_node.devices.items( - ): + for ( + device_node_name, + device_node, + ) in innerop_node.devices.items(): if innerop_node.general_gpu_time == 0: gpu_ratio = 0 else: - gpu_ratio = float( - device_node.gpu_time - ) / innerop_node.general_gpu_time - if len(device_node_name - ) + 4 > name_column_width: - device_node_name = device_node_name[: - name_column_width - - 7] + gpu_ratio = ( + float(device_node.gpu_time) + / innerop_node.general_gpu_time + ) + if ( + len(device_node_name) + 4 + > name_column_width + ): + device_node_name = device_node_name[ + : name_column_width - 7 + ] device_node_name += "..." row_values = [ ' {}'.format(device_node_name), - device_node.call, '- / - / - / - / -', + device_node.call, + '- / - / - / - / -', '{} / {} / {} / {} / {}'.format( - format_time(device_node.gpu_time, - unit=time_unit), - format_time(device_node.avg_gpu_time, - unit=time_unit), - format_time(device_node.max_gpu_time, - unit=time_unit), - format_time(device_node.min_gpu_time, - unit=time_unit), - format_ratio(gpu_ratio)) + format_time( + device_node.gpu_time, unit=time_unit + ), + format_time( + device_node.avg_gpu_time, + unit=time_unit, + ), + format_time( + device_node.max_gpu_time, + unit=time_unit, + ), + format_time( + device_node.min_gpu_time, + unit=time_unit, + ), + format_ratio(gpu_ratio), + ), ] all_row_values.append(row_values) - for device_node_name, device_node in item.devices.items( - ): + for ( + device_node_name, + device_node, + ) in item.devices.items(): if item.general_gpu_time == 0: gpu_ratio = 0 else: - gpu_ratio = float(device_node.gpu_time - ) / item.general_gpu_time + gpu_ratio = ( + float(device_node.gpu_time) + / item.general_gpu_time + ) if len(device_node_name) + 2 > name_column_width: - device_node_name = device_node_name[: - name_column_width - - 5] + device_node_name = device_node_name[ + : name_column_width - 5 + ] device_node_name += "..." row_values = [ ' {}'.format(device_node_name), - device_node.call, '- / - / - / - / -', + device_node.call, + '- / - / - / - / -', '{} / {} / {} / {} / {}'.format( - format_time(device_node.gpu_time, - unit=time_unit), - format_time(device_node.avg_gpu_time, - unit=time_unit), - format_time(device_node.max_gpu_time, - unit=time_unit), - format_time(device_node.min_gpu_time, - unit=time_unit), - format_ratio(gpu_ratio)) + format_time( + device_node.gpu_time, unit=time_unit + ), + format_time( + device_node.avg_gpu_time, unit=time_unit + ), + format_time( + device_node.max_gpu_time, unit=time_unit + ), + format_time( + device_node.min_gpu_time, unit=time_unit + ), + format_ratio(gpu_ratio), + ), ] all_row_values.append(row_values) # Calculate the column width @@ -1288,16 +1482,20 @@ def _build_table(statistic_data, for row_values in all_row_values: if isinstance(row_values, str): continue - if isinstance(row_values[1], - int) and len(str(row_values[1])) > calltime_width: + if ( + isinstance(row_values[1], int) + and len(str(row_values[1])) > calltime_width + ): calltime_width = len(str(row_values[1])) if len(row_values[2]) > cpu_data_description_width: cpu_data_description_width = len(row_values[2]) if len(row_values[3]) > gpu_data_description_width: gpu_data_description_width = len(row_values[3]) headers = [ - 'Name', 'Calls', 'CPU Total / Avg / Max / Min / Ratio(%)', - 'GPU Total / Avg / Max / Min / Ratio(%)' + 'Name', + 'Calls', + 'CPU Total / Avg / Max / Min / Ratio(%)', + 'GPU Total / Avg / Max / Min / Ratio(%)', ] row_format_list = [""] header_sep_list = [""] @@ -1333,20 +1531,27 @@ def _build_table(statistic_data, all_row_values = [] kernel_items = statistic_data.event_summary.kernel_items if sorted_by == SortedKeys.GPUAvg: - sorted_items = sorted(kernel_items.items(), - key=lambda x: x[1].avg_gpu_time, - reverse=True) + sorted_items = sorted( + kernel_items.items(), + key=lambda x: x[1].avg_gpu_time, + reverse=True, + ) elif sorted_by == SortedKeys.GPUMax: - sorted_items = sorted(kernel_items.items(), - key=lambda x: x[1].max_gpu_time, - reverse=True) + sorted_items = sorted( + kernel_items.items(), + key=lambda x: x[1].max_gpu_time, + reverse=True, + ) elif sorted_by == SortedKeys.GPUMin: - sorted_items = sorted(kernel_items.items(), - key=lambda x: x[1].min_gpu_time) + sorted_items = sorted( + kernel_items.items(), key=lambda x: x[1].min_gpu_time + ) else: - sorted_items = sorted(kernel_items.items(), - key=lambda x: x[1].gpu_time, - reverse=True) + sorted_items = sorted( + kernel_items.items(), + key=lambda x: x[1].gpu_time, + reverse=True, + ) total_kernel_gpu_time = 0 for name, item in sorted_items: @@ -1364,20 +1569,25 @@ def _build_table(statistic_data, format_time(item.avg_gpu_time, unit=time_unit), format_time(item.max_gpu_time, unit=time_unit), format_time(item.min_gpu_time, unit=time_unit), - format_ratio(gpu_ratio)), + format_ratio(gpu_ratio), + ), ] all_row_values.append(row_values) headers = [ - 'Name', 'Calls', 'GPU Total / Avg / Max / Min / Ratio(%)' + 'Name', + 'Calls', + 'GPU Total / Avg / Max / Min / Ratio(%)', ] # Calculate the column width name_column_width = 90 calltime_width = 6 gpu_data_description_width = 40 for row_values in all_row_values: - if isinstance(row_values[1], - int) and len(str(row_values[1])) > calltime_width: + if ( + isinstance(row_values[1], int) + and len(str(row_values[1])) > calltime_width + ): calltime_width = len(str(row_values[1])) if len(row_values[2]) > gpu_data_description_width: gpu_data_description_width = len(row_values[2]) @@ -1407,7 +1617,7 @@ def _build_table(statistic_data, else: name = row_values[0] if len(name) > name_column_width: - row_values[0] = name[:name_column_width - 3] + '...' + row_values[0] = name[: name_column_width - 3] + '...' else: row_values[0] = name append(row_format.format(*row_values)) @@ -1420,9 +1630,14 @@ def _build_table(statistic_data, ###### Print Memory Manipulation Summary Report ###### if statistic_data.event_summary.memory_manipulation_items: all_row_values = [] - memory_manipulation_items = statistic_data.event_summary.memory_manipulation_items - gpu_total_time = statistic_data.event_summary.model_perspective_items[ - 'ProfileStep'].general_gpu_time + memory_manipulation_items = ( + statistic_data.event_summary.memory_manipulation_items + ) + gpu_total_time = ( + statistic_data.event_summary.model_perspective_items[ + 'ProfileStep' + ].general_gpu_time + ) for name, item in memory_manipulation_items.items(): if gpu_total_time == 0: gpu_ratio = 0 @@ -1436,19 +1651,23 @@ def _build_table(statistic_data, format_time(item.avg_cpu_time, unit=time_unit), format_time(item.max_cpu_time, unit=time_unit), format_time(item.min_cpu_time, unit=time_unit), - format_ratio(float(item.cpu_time) / total_time)), + format_ratio(float(item.cpu_time) / total_time), + ), '{} / {} / {} / {} / {}'.format( format_time(item.general_gpu_time, unit=time_unit), format_time(item.avg_general_gpu_time, unit=time_unit), format_time(item.max_general_gpu_time, unit=time_unit), format_time(item.min_general_gpu_time, unit=time_unit), - format_ratio(gpu_ratio)), + format_ratio(gpu_ratio), + ), ] all_row_values.append(row_values) headers = [ - 'Name', 'Calls', 'CPU Total / Avg / Max / Min / Ratio(%)', - 'GPU Total / Avg / Max / Min / Ratio(%)' + 'Name', + 'Calls', + 'CPU Total / Avg / Max / Min / Ratio(%)', + 'GPU Total / Avg / Max / Min / Ratio(%)', ] # Calculate the column width name_column_width = 0 @@ -1458,8 +1677,10 @@ def _build_table(statistic_data, for row_values in all_row_values: if len(row_values[0]) > name_column_width: name_column_width = len(row_values[0]) - if isinstance(row_values[1], - int) and len(str(row_values[1])) > calltime_width: + if ( + isinstance(row_values[1], int) + and len(str(row_values[1])) > calltime_width + ): calltime_width = len(str(row_values[1])) if len(row_values[2]) > cpu_data_description_width: cpu_data_description_width = len(row_values[2]) @@ -1495,56 +1716,71 @@ def _build_table(statistic_data, ###### Print UserDefined Summary Report ###### if statistic_data.event_summary.userdefined_items: all_row_values = [] - gpu_total_time = statistic_data.event_summary.model_perspective_items[ - 'ProfileStep'].general_gpu_time + gpu_total_time = ( + statistic_data.event_summary.model_perspective_items[ + 'ProfileStep' + ].general_gpu_time + ) if thread_sep == True: - userdefined_thread_items = statistic_data.event_summary.userdefined_thread_items + userdefined_thread_items = ( + statistic_data.event_summary.userdefined_thread_items + ) else: userdefined_thread_items = { - 'All threads merged': - statistic_data.event_summary.userdefined_items + 'All threads merged': statistic_data.event_summary.userdefined_items } for thread_id, items in userdefined_thread_items.items(): all_row_values.append("Thread: {}".format(thread_id)) if sorted_by == SortedKeys.CPUTotal: - sorted_items = sorted(items.items(), - key=lambda x: x[1].cpu_time, - reverse=True) + sorted_items = sorted( + items.items(), key=lambda x: x[1].cpu_time, reverse=True + ) elif sorted_by == SortedKeys.CPUAvg: - sorted_items = sorted(items.items(), - key=lambda x: x[1].avg_cpu_time, - reverse=True) + sorted_items = sorted( + items.items(), + key=lambda x: x[1].avg_cpu_time, + reverse=True, + ) elif sorted_by == SortedKeys.CPUMax: - sorted_items = sorted(items.items(), - key=lambda x: x[1].max_cpu_time, - reverse=True) + sorted_items = sorted( + items.items(), + key=lambda x: x[1].max_cpu_time, + reverse=True, + ) elif sorted_by == SortedKeys.CPUMin: - sorted_items = sorted(items.items(), - key=lambda x: x[1].min_cpu_time) + sorted_items = sorted( + items.items(), key=lambda x: x[1].min_cpu_time + ) elif sorted_by == SortedKeys.GPUTotal: - sorted_items = sorted(items.items(), - key=lambda x: x[1].general_gpu_time, - reverse=True) + sorted_items = sorted( + items.items(), + key=lambda x: x[1].general_gpu_time, + reverse=True, + ) elif sorted_by == SortedKeys.GPUAvg: sorted_items = sorted( items.items(), key=lambda x: x[1].avg_general_gpu_time, - reverse=True) + reverse=True, + ) elif sorted_by == SortedKeys.GPUMax: sorted_items = sorted( items.items(), key=lambda x: x[1].max_general_gpu_time, - reverse=True) + reverse=True, + ) elif sorted_by == SortedKeys.GPUMin: sorted_items = sorted( - items.items(), key=lambda x: x[1].min_general_gpu_time) + items.items(), key=lambda x: x[1].min_general_gpu_time + ) for name, item in sorted_items: if gpu_total_time == 0: gpu_ratio = 0 else: - gpu_ratio = float( - item.general_gpu_time) / gpu_total_time + gpu_ratio = ( + float(item.general_gpu_time) / gpu_total_time + ) row_values = [ name, item.call, @@ -1553,16 +1789,21 @@ def _build_table(statistic_data, format_time(item.avg_cpu_time, unit=time_unit), format_time(item.max_cpu_time, unit=time_unit), format_time(item.min_cpu_time, unit=time_unit), - format_ratio(float(item.cpu_time) / total_time)), + format_ratio(float(item.cpu_time) / total_time), + ), '{} / {} / {} / {} / {}'.format( format_time(item.general_gpu_time, unit=time_unit), - format_time(item.avg_general_gpu_time, - unit=time_unit), - format_time(item.max_general_gpu_time, - unit=time_unit), - format_time(item.min_general_gpu_time, - unit=time_unit), - format_ratio(gpu_ratio)), + format_time( + item.avg_general_gpu_time, unit=time_unit + ), + format_time( + item.max_general_gpu_time, unit=time_unit + ), + format_time( + item.min_general_gpu_time, unit=time_unit + ), + format_ratio(gpu_ratio), + ), ] all_row_values.append(row_values) @@ -1576,8 +1817,10 @@ def _build_table(statistic_data, continue if len(row_values[0]) > name_column_width: name_column_width = len(row_values[0]) - if isinstance(row_values[1], - int) and len(str(row_values[1])) > calltime_width: + if ( + isinstance(row_values[1], int) + and len(str(row_values[1])) > calltime_width + ): calltime_width = len(str(row_values[1])) if len(row_values[2]) > cpu_data_description_width: cpu_data_description_width = len(row_values[2]) @@ -1585,8 +1828,10 @@ def _build_table(statistic_data, gpu_data_description_width = len(row_values[3]) headers = [ - 'Name', 'Calls', 'CPU Total / Avg / Max / Min / Ratio(%)', - 'GPU Total / Avg / Max / Min / Ratio(%)' + 'Name', + 'Calls', + 'CPU Total / Avg / Max / Min / Ratio(%)', + 'GPU Total / Avg / Max / Min / Ratio(%)', ] row_format_list = [""] header_sep_list = [""] @@ -1618,39 +1863,61 @@ def _build_table(statistic_data, if views is None or SummaryView.MemoryView in views: ###### Print Memory Summary Report ###### - if statistic_data.memory_summary.allocated_items or statistic_data.memory_summary.reserved_items: - for device_type, memory_events in statistic_data.memory_summary.allocated_items.items( - ): + if ( + statistic_data.memory_summary.allocated_items + or statistic_data.memory_summary.reserved_items + ): + for ( + device_type, + memory_events, + ) in statistic_data.memory_summary.allocated_items.items(): all_row_values = [] - sorted_items = sorted(memory_events.items(), - key=lambda x: x[1].increase_size, - reverse=True) + sorted_items = sorted( + memory_events.items(), + key=lambda x: x[1].increase_size, + reverse=True, + ) for event_name, item in sorted_items: row_values = [ - event_name, item.memory_type, item.allocation_count, - item.free_count, item.allocation_size, item.free_size, - item.increase_size + event_name, + item.memory_type, + item.allocation_count, + item.free_count, + item.allocation_size, + item.free_size, + item.increase_size, ] all_row_values.append(row_values) sorted_reserved_items = sorted( - statistic_data.memory_summary.reserved_items[device_type]. - items(), + statistic_data.memory_summary.reserved_items[ + device_type + ].items(), key=lambda x: x[1].increase_size, - reverse=True) + reverse=True, + ) for event_name, item in sorted_reserved_items: row_values = [ - event_name, item.memory_type, item.allocation_count, - item.free_count, item.allocation_size, item.free_size, - item.increase_size + event_name, + item.memory_type, + item.allocation_count, + item.free_count, + item.allocation_size, + item.free_size, + item.increase_size, ] all_row_values.append(row_values) # Calculate the column width headers = [ - 'Name', 'Type', 'Allocation Count', 'Free Count', - 'Allocation Size', 'Free Size', 'Increased Size' + 'Name', + 'Type', + 'Allocation Count', + 'Free Count', + 'Allocation Size', + 'Free Size', + 'Increased Size', ] row_format_list = [""] header_sep_list = [""] @@ -1671,14 +1938,24 @@ def _build_table(statistic_data, # construct table string append( - add_title(line_length, - "Memory Summary - {}".format(device_type))) - append('Peak Allocated Memory: {}'.format( - statistic_data.memory_summary. - peak_allocation_values[device_type])) - append('Peak Reserved Memory: {}'.format( - statistic_data.memory_summary. - peak_reserved_values[device_type])) + add_title( + line_length, "Memory Summary - {}".format(device_type) + ) + ) + append( + 'Peak Allocated Memory: {}'.format( + statistic_data.memory_summary.peak_allocation_values[ + device_type + ] + ) + ) + append( + 'Peak Reserved Memory: {}'.format( + statistic_data.memory_summary.peak_reserved_values[ + device_type + ] + ) + ) append(header_sep) append(row_format.format(*headers)) append(header_sep) diff --git a/python/paddle/profiler/statistic_helper.py b/python/paddle/profiler/statistic_helper.py index b6925a2b774820d863456b316c6dfdf5c2f1cb73..13ce42d7d73d6d1a44123afc3c3ed3ccd67c7aeb 100644 --- a/python/paddle/profiler/statistic_helper.py +++ b/python/paddle/profiler/statistic_helper.py @@ -16,7 +16,7 @@ def sum_ranges(ranges): result = 0 for time_range in ranges: - result += (time_range[1] - time_range[0]) + result += time_range[1] - time_range[0] return result @@ -30,11 +30,14 @@ def merge_self_ranges(src_ranges, is_sorted=False): for cur_indx in range(1, len(src_ranges)): if src_ranges[cur_indx][1] > merged_ranges[-1][1]: if src_ranges[cur_indx][0] <= merged_ranges[-1][1]: - merged_ranges[-1] = (merged_ranges[-1][0], - src_ranges[cur_indx][1]) + merged_ranges[-1] = ( + merged_ranges[-1][0], + src_ranges[cur_indx][1], + ) else: merged_ranges.append( - (src_ranges[cur_indx][0], src_ranges[cur_indx][1])) + (src_ranges[cur_indx][0], src_ranges[cur_indx][1]) + ) return merged_ranges @@ -129,7 +132,7 @@ def intersection_ranges(range_list1, range_list2, is_sorted=False): range2 = range_list2[indx2] elif range2[0] <= range1[0] and range2[1] < range1[1]: - assert (range2[1] > range1[0]) + assert range2[1] > range1[0] result_range.append((range1[0], range2[1])) range1 = (range2[1], range1[1]) indx2 += 1 @@ -138,7 +141,7 @@ def intersection_ranges(range_list1, range_list2, is_sorted=False): range2 = range_list2[indx2] elif range2[0] <= range1[0]: - assert (range2[1] >= range1[1]) + assert range2[1] >= range1[1] result_range.append(range1) range2 = (range1[1], range2[1]) indx1 += 1 @@ -147,7 +150,7 @@ def intersection_ranges(range_list1, range_list2, is_sorted=False): range1 = range_list1[indx1] elif range2[1] < range1[1]: - assert (range2[0] > range1[0]) + assert range2[0] > range1[0] result_range.append(range2) range1 = (range2[1], range1[1]) indx2 += 1 @@ -156,7 +159,7 @@ def intersection_ranges(range_list1, range_list2, is_sorted=False): range2 = range_list2[indx2] elif range2[0] < range1[1]: - assert (range2[1] >= range1[1]) + assert range2[1] >= range1[1] result_range.append((range2[0], range1[1])) range2 = (range1[1], range2[1]) indx1 += 1 @@ -165,7 +168,7 @@ def intersection_ranges(range_list1, range_list2, is_sorted=False): range1 = range_list1[indx1] else: - assert (range2[0] >= range1[1]) + assert range2[0] >= range1[1] indx1 += 1 if indx1 == len1: break @@ -207,17 +210,17 @@ def subtract_ranges(range_list1, range_list2, is_sorted=False): if indx2 != len2: range2 = range_list2[indx2] elif range2[0] <= range1[0]: - assert (range2[1] >= range1[1]) + assert range2[1] >= range1[1] range2 = (range1[1], range2[1]) indx1 += 1 if indx1 != len1: range1 = range_list1[indx1] elif range2[0] < range1[1]: - assert (range2[0] > range1[0]) + assert range2[0] > range1[0] result_range.append((range1[0], range2[0])) range1 = (range2[0], range1[1]) else: - assert (range2[0] >= range1[1]) + assert range2[0] >= range1[1] result_range.append(range1) indx1 += 1 if indx1 != len1: diff --git a/python/paddle/profiler/timer.py b/python/paddle/profiler/timer.py index 1f0fb6acde63cb871e56aa614153379ef98e83e3..311fc373c5f7ded55d188343ed181ce94adf127d 100644 --- a/python/paddle/profiler/timer.py +++ b/python/paddle/profiler/timer.py @@ -126,20 +126,28 @@ class Event(object): else: speed_avg = float(self.total_iters) / self.batch_records['total'] - reader_summary = dict(max=self.reader_records['max'], - min=self.reader_records['min'], - avg=reader_avg) - batch_summary = dict(max=self.batch_records['max'], - min=self.batch_records['min'], - avg=batch_avg) - ips_summary = dict(max=self.speed_records['max'], - min=self.speed_records['min'], - avg=speed_avg) + reader_summary = dict( + max=self.reader_records['max'], + min=self.reader_records['min'], + avg=reader_avg, + ) + batch_summary = dict( + max=self.batch_records['max'], + min=self.batch_records['min'], + avg=batch_avg, + ) + ips_summary = dict( + max=self.speed_records['max'], + min=self.speed_records['min'], + avg=speed_avg, + ) reader_ratio = (reader_avg / batch_avg) * 100 - summary = dict(reader_summary=reader_summary, - batch_summary=batch_summary, - ips_summary=ips_summary, - reader_ratio=reader_ratio) + summary = dict( + reader_summary=reader_summary, + batch_summary=batch_summary, + ips_summary=ips_summary, + reader_ratio=reader_ratio, + ) return summary @@ -205,8 +213,11 @@ class TimerHook(Hook): """ reader_cost = timeit.default_timer() - self.start_reader - if (benchmark.current_event is None) or ( - not benchmark.current_event.need_record) or (reader_cost == 0): + if ( + (benchmark.current_event is None) + or (not benchmark.current_event.need_record) + or (reader_cost == 0) + ): return benchmark.current_event.record_reader(reader_cost) @@ -220,8 +231,9 @@ class TimerHook(Hook): """ - if (benchmark.current_event is - None) or (not benchmark.current_event.need_record): + if (benchmark.current_event is None) or ( + not benchmark.current_event.need_record + ): return batch_cost = timeit.default_timer() - self.start_time benchmark.current_event.record_batch(batch_cost, benchmark.num_samples) @@ -250,10 +262,20 @@ class TimerHook(Hook): print('Perf Summary'.center(100, '=')) if summary['reader_ratio'] != 0: print('Reader Ratio: ' + '%.3f' % (summary['reader_ratio']) + '%') - print('Time Unit: s, IPS Unit: %s' % - (benchmark.current_event.speed_unit)) - print('|', ''.center(15), '|', 'avg'.center(15), '|', 'max'.center(15), - '|', 'min'.center(15), '|') + print( + 'Time Unit: s, IPS Unit: %s' % (benchmark.current_event.speed_unit) + ) + print( + '|', + ''.center(15), + '|', + 'avg'.center(15), + '|', + 'max'.center(15), + '|', + 'min'.center(15), + '|', + ) # if DataLoader is not called, reader_summary is unnecessary. if summary['reader_summary']['avg'] != 0: self._print_stats('reader_cost', summary['reader_summary']) @@ -264,8 +286,17 @@ class TimerHook(Hook): avg_str = '%.5f' % (message_dict['avg']) max_str = '%.5f' % (message_dict['max']) min_str = '%.5f' % (message_dict['min']) - print('|', item.center(15), '|', avg_str.center(15), '|', - max_str.center(15), '|', min_str.center(15), '|') + print( + '|', + item.center(15), + '|', + avg_str.center(15), + '|', + max_str.center(15), + '|', + min_str.center(15), + '|', + ) class TimeAverager(object): @@ -357,8 +388,10 @@ class Benchmark(object): message += ' %s: %.5f s' % ('batch_cost', batch_average) speed_average = self.current_event.speed_average() if speed_average: - message += ' ips: %.3f %s' % (speed_average, - self.current_event.speed_unit) + message += ' ips: %.3f %s' % ( + speed_average, + self.current_event.speed_unit, + ) self.current_event.reset() return message @@ -389,8 +422,10 @@ class Benchmark(object): # set reader for the current event at the first iter if self.current_event.reader is None: self.current_event.reader = reader - elif self.current_event.reader.__dict__[ - '_dataset'] != reader.__dict__['_dataset']: + elif ( + self.current_event.reader.__dict__['_dataset'] + != reader.__dict__['_dataset'] + ): # enter a new task but not calling beign() to record it. # we pause the timer until the end of new task, so that # the cost of new task is not added to the current event. @@ -398,8 +433,10 @@ class Benchmark(object): self.current_event.need_record = False else: # when the new task exits, continue timing for the current event. - if self.current_event.reader.__dict__[ - '_dataset'] == reader.__dict__['_dataset']: + if ( + self.current_event.reader.__dict__['_dataset'] + == reader.__dict__['_dataset'] + ): self.current_event.need_record = True self.hooks['timer_hook'].start_time = timeit.default_timer() diff --git a/python/paddle/profiler/utils.py b/python/paddle/profiler/utils.py index fe05aaeb81f9e4e84bdc3a5792c8354bdb4e3be2..efe3975f1445246678aeed42c2cf7414e4dc188a 100644 --- a/python/paddle/profiler/utils.py +++ b/python/paddle/profiler/utils.py @@ -18,16 +18,19 @@ import functools from contextlib import ContextDecorator from paddle.fluid import core -from paddle.fluid.core import (_RecordEvent, TracerEventType) +from paddle.fluid.core import _RecordEvent, TracerEventType _is_profiler_used = False _has_optimizer_wrapped = False _AllowedEventTypeList = [ - TracerEventType.Dataloader, TracerEventType.ProfileStep, - TracerEventType.Forward, TracerEventType.Backward, - TracerEventType.Optimization, TracerEventType.PythonOp, - TracerEventType.PythonUserDefined + TracerEventType.Dataloader, + TracerEventType.ProfileStep, + TracerEventType.Forward, + TracerEventType.Backward, + TracerEventType.Optimization, + TracerEventType.PythonOp, + TracerEventType.PythonUserDefined, ] @@ -65,9 +68,10 @@ class RecordEvent(ContextDecorator): """ def __init__( - self, - name: str, - event_type: TracerEventType = TracerEventType.PythonUserDefined): + self, + name: str, + event_type: TracerEventType = TracerEventType.PythonUserDefined, + ): self.name = name self.event_type = event_type self.event = None @@ -100,8 +104,12 @@ class RecordEvent(ContextDecorator): if not _is_profiler_used: return if self.event_type not in _AllowedEventTypeList: - warn("Only TracerEvent Type in [{}, {}, {}, {}, {}, {},{}]\ - can be recorded.".format(*_AllowedEventTypeList)) + warn( + "Only TracerEvent Type in [{}, {}, {}, {}, {}, {},{}]\ + can be recorded.".format( + *_AllowedEventTypeList + ) + ) self.event = None else: self.event = _RecordEvent(self.name, self.event_type) @@ -160,14 +168,13 @@ def in_profiler_mode(): def wrap_optimizers(): - def optimizer_warpper(func): - @functools.wraps(func) def warpper(*args, **kwargs): if in_profiler_mode(): - with RecordEvent('Optimization Step', - event_type=TracerEventType.Optimization): + with RecordEvent( + 'Optimization Step', event_type=TracerEventType.Optimization + ): return func(*args, **kwargs) else: return func(*args, **kwargs) @@ -178,6 +185,7 @@ def wrap_optimizers(): if _has_optimizer_wrapped == True: return import paddle.optimizer as optimizer + for classname in optimizer.__all__: if classname != 'Optimizer': classobject = getattr(optimizer, classname) diff --git a/python/paddle/quantization/__init__.py b/python/paddle/quantization/__init__.py index 77da408d8e001d4fa1dd62bf51f715d6b714084a..66d2354d48052414b87a419d4185f88d2a10c512 100644 --- a/python/paddle/quantization/__init__.py +++ b/python/paddle/quantization/__init__.py @@ -12,14 +12,35 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ...fluid.contrib.slim.quantization.imperative.ptq_config import PTQConfig, default_ptq_config -from ...fluid.contrib.slim.quantization.imperative.ptq_quantizer import BaseQuantizer -from ...fluid.contrib.slim.quantization.imperative.ptq_quantizer import AbsmaxQuantizer -from ...fluid.contrib.slim.quantization.imperative.ptq_quantizer import PerChannelAbsmaxQuantizer -from ...fluid.contrib.slim.quantization.imperative.ptq_quantizer import KLQuantizer -from ...fluid.contrib.slim.quantization.imperative.ptq_quantizer import HistQuantizer -from ...fluid.contrib.slim.quantization.imperative.ptq_quantizer import SUPPORT_ACT_QUANTIZERS -from ...fluid.contrib.slim.quantization.imperative.ptq_quantizer import SUPPORT_WT_QUANTIZERS -from ...fluid.contrib.slim.quantization.imperative.ptq_registry import PTQRegistry +from ...fluid.contrib.slim.quantization.imperative.ptq_config import ( + PTQConfig, + default_ptq_config, +) +from ...fluid.contrib.slim.quantization.imperative.ptq_quantizer import ( + BaseQuantizer, +) +from ...fluid.contrib.slim.quantization.imperative.ptq_quantizer import ( + AbsmaxQuantizer, +) +from ...fluid.contrib.slim.quantization.imperative.ptq_quantizer import ( + PerChannelAbsmaxQuantizer, +) +from ...fluid.contrib.slim.quantization.imperative.ptq_quantizer import ( + KLQuantizer, +) +from ...fluid.contrib.slim.quantization.imperative.ptq_quantizer import ( + HistQuantizer, +) +from ...fluid.contrib.slim.quantization.imperative.ptq_quantizer import ( + SUPPORT_ACT_QUANTIZERS, +) +from ...fluid.contrib.slim.quantization.imperative.ptq_quantizer import ( + SUPPORT_WT_QUANTIZERS, +) +from ...fluid.contrib.slim.quantization.imperative.ptq_registry import ( + PTQRegistry, +) from ...fluid.contrib.slim.quantization.imperative.ptq import ImperativePTQ -from ...fluid.contrib.slim.quantization.imperative.qat import ImperativeQuantAware +from ...fluid.contrib.slim.quantization.imperative.qat import ( + ImperativeQuantAware, +) diff --git a/python/paddle/reader/decorator.py b/python/paddle/reader/decorator.py index f7ba974fc0fd9722c5b53dc16caa13e3b5c0e7ff..b2aa88cc810b4d46ea005aebc35323997d06b78b 100644 --- a/python/paddle/reader/decorator.py +++ b/python/paddle/reader/decorator.py @@ -277,7 +277,7 @@ def compose(*readers, **kwargs): if isinstance(x, tuple): return x else: - return (x, ) + return (x,) def reader(): rs = [] @@ -292,7 +292,8 @@ def compose(*readers, **kwargs): if o is None: # None will be not be present if compose is aligned raise ComposeNotAligned( - "outputs of readers are not aligned.") + "outputs of readers are not aligned." + ) yield sum(list(map(make_tuple, outputs)), ()) return reader @@ -330,7 +331,7 @@ def buffered(reader, size): print(i) """ - class EndSignal(): + class EndSignal: pass end = EndSignal() @@ -343,10 +344,13 @@ def buffered(reader, size): def data_reader(): r = reader() q = Queue(maxsize=size) - t = Thread(target=read_worker, args=( - r, - q, - )) + t = Thread( + target=read_worker, + args=( + r, + q, + ), + ) t.daemon = True t.start() e = q.get() @@ -398,7 +402,7 @@ def firstn(reader, n): return firstn_reader -class XmapEndSignal(): +class XmapEndSignal: pass @@ -470,8 +474,11 @@ def xmap_readers(mapper, reader, process_num, buffer_size, order=False): t.start() # start several handle_workers target = order_handle_worker if order else handle_worker - args = (in_queue, out_queue, mapper, - out_order) if order else (in_queue, out_queue, mapper) + args = ( + (in_queue, out_queue, mapper, out_order) + if order + else (in_queue, out_queue, mapper) + ) workers = [] for i in range(process_num): worker = Thread(target=target, args=args) @@ -579,7 +586,8 @@ def multiprocess_reader(readers, use_pipe=True, queue_size=1000): if sys.platform == 'win32': raise NotImplementedError( - "The multiprocess_reader method is not supported on windows.") + "The multiprocess_reader method is not supported on windows." + ) # ujson is ultra fast json encoder and decoder written in pure C with bindings for Python 3.6+. try: @@ -587,11 +595,13 @@ def multiprocess_reader(readers, use_pipe=True, queue_size=1000): except Exception as e: warnings.warn( "The `ujson` module is not found, use the `json` module, `ujson` encodes and decodes faster, " - "you can install `ujson` through `pip install ujson`.") + "you can install `ujson` through `pip install ujson`." + ) import json - assert isinstance(readers, (list, tuple)) and len(readers) > 0, ( - "`readers` must be list or tuple.") + assert ( + isinstance(readers, (list, tuple)) and len(readers) > 0 + ), "`readers` must be list or tuple." def _read_into_queue(reader, queue): try: @@ -607,8 +617,9 @@ def multiprocess_reader(readers, use_pipe=True, queue_size=1000): def queue_reader(): queue = fork_context.Queue(queue_size) for reader in readers: - p = fork_context.Process(target=_read_into_queue, - args=(reader, queue)) + p = fork_context.Process( + target=_read_into_queue, args=(reader, queue) + ) p.start() reader_num = len(readers) @@ -649,8 +660,9 @@ def multiprocess_reader(readers, use_pipe=True, queue_size=1000): for reader in readers: parent_conn, child_conn = fork_context.Pipe() conns.append(parent_conn) - p = fork_context.Process(target=_read_into_pipe, - args=(reader, child_conn)) + p = fork_context.Process( + target=_read_into_pipe, args=(reader, child_conn) + ) p.start() reader_num = len(readers) diff --git a/python/paddle/reader/tests/decorator_test.py b/python/paddle/reader/tests/decorator_test.py index 902a8cbe06c2fc522c6ce6c6afd75843f191b67d..92d257e065a01ef6307f677f5e3cc45b37aa9864 100644 --- a/python/paddle/reader/tests/decorator_test.py +++ b/python/paddle/reader/tests/decorator_test.py @@ -23,7 +23,6 @@ __all__ = [] def reader_creator_10(dur): - def reader(): for i in range(10): # this invocation helps testing paddle.reader.buffer @@ -34,7 +33,6 @@ def reader_creator_10(dur): class TestMap(unittest.TestCase): - def test_map(self): d = {"h": 0, "i": 1} @@ -51,7 +49,6 @@ class TestMap(unittest.TestCase): class TestBuffered(unittest.TestCase): - def test_read(self): for size in range(20): b = paddle.reader.buffered(reader_creator_10(0), size) @@ -76,10 +73,10 @@ class TestBuffered(unittest.TestCase): class TestCompose(unittest.TestCase): - def test_compse(self): - reader = paddle.reader.compose(reader_creator_10(0), - reader_creator_10(0)) + reader = paddle.reader.compose( + reader_creator_10(0), reader_creator_10(0) + ) for idx, e in enumerate(reader()): self.assertEqual(e, (idx, idx)) @@ -87,7 +84,8 @@ class TestCompose(unittest.TestCase): total = 0 reader = paddle.reader.compose( paddle.reader.chain(reader_creator_10(0), reader_creator_10(0)), - reader_creator_10(0)) + reader_creator_10(0), + ) with self.assertRaises(paddle.reader.ComposeNotAligned): for e in reader(): total += 1 @@ -96,10 +94,11 @@ class TestCompose(unittest.TestCase): def test_compose_not_aligned_no_check(self): total = 0 - reader = paddle.reader.compose(paddle.reader.chain( - reader_creator_10(0), reader_creator_10(0)), - reader_creator_10(0), - check_alignment=False) + reader = paddle.reader.compose( + paddle.reader.chain(reader_creator_10(0), reader_creator_10(0)), + reader_creator_10(0), + check_alignment=False, + ) for e in reader(): total += 1 # expecting 10, not 20 @@ -107,7 +106,6 @@ class TestCompose(unittest.TestCase): class TestChain(unittest.TestCase): - def test_chain(self): c = paddle.reader.chain(reader_creator_10(0), reader_creator_10(0)) idx = 0 @@ -118,7 +116,6 @@ class TestChain(unittest.TestCase): class TestShuffle(unittest.TestCase): - def test_shuffle(self): case = [(0, True), (1, True), (10, False), (100, False)] a = reader_creator_10(0) @@ -133,11 +130,9 @@ class TestShuffle(unittest.TestCase): class TestXmap(unittest.TestCase): - def test_xmap(self): - def mapper(x): - return (x + 1) + return x + 1 orders = (True, False) thread_nums = (1, 2, 4, 8, 16) @@ -145,9 +140,9 @@ class TestXmap(unittest.TestCase): for order in orders: for tNum in thread_nums: for size in buffered_size: - reader = paddle.reader.xmap_readers(mapper, - reader_creator_10(0), - tNum, size, order) + reader = paddle.reader.xmap_readers( + mapper, reader_creator_10(0), tNum, size, order + ) for n in range(3): result = [] for i in reader(): @@ -159,7 +154,6 @@ class TestXmap(unittest.TestCase): class TestMultiProcessReader(unittest.TestCase): - def setup(self): self.samples = [] for i in range(1000): @@ -178,7 +172,8 @@ class TestMultiProcessReader(unittest.TestCase): self.setup() results = [] for data in paddle.reader.multiprocess_reader( - [self.reader0, self.reader1, self.reader2], 100, use_pipe)(): + [self.reader0, self.reader1, self.reader2], 100, use_pipe + )(): results.append(data) self.assertEqual(sorted(self.samples), sorted(results)) diff --git a/python/paddle/signal.py b/python/paddle/signal.py index 535a388d0e0df2f88e364c5bf39900cf1ff72afc..82d46b81967634fe4e30ac4f71b6316ecb4c227b 100644 --- a/python/paddle/signal.py +++ b/python/paddle/signal.py @@ -122,7 +122,8 @@ def frame(x, frame_length, hop_length, axis=-1, name=None): if frame_length > x.shape[axis]: raise ValueError( f'Attribute frame_length should be less equal than sequence length, ' - f'but got ({frame_length}) > ({x.shape[axis]}).') + f'but got ({frame_length}) > ({x.shape[axis]}).' + ) op_type = 'frame' @@ -130,25 +131,33 @@ def frame(x, frame_length, hop_length, axis=-1, name=None): return _C_ops.frame(x, frame_length, hop_length, axis) if _in_legacy_dygraph(): - attrs = ('frame_length', frame_length, 'hop_length', hop_length, 'axis', - axis) + attrs = ( + 'frame_length', + frame_length, + 'hop_length', + hop_length, + 'axis', + axis, + ) op = getattr(_legacy_C_ops, op_type) out = op(x, *attrs) else: check_variable_and_dtype( - x, 'x', ['int32', 'int64', 'float16', 'float32', 'float64'], - op_type) + x, 'x', ['int32', 'int64', 'float16', 'float32', 'float64'], op_type + ) helper = LayerHelper(op_type, **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype=dtype) - helper.append_op(type=op_type, - inputs={'X': x}, - attrs={ - 'frame_length': frame_length, - 'hop_length': hop_length, - 'axis': axis - }, - outputs={'Out': out}) + helper.append_op( + type=op_type, + inputs={'X': x}, + attrs={ + 'frame_length': frame_length, + 'hop_length': hop_length, + 'axis': axis, + }, + outputs={'Out': out}, + ) return out @@ -223,31 +232,32 @@ def overlap_add(x, hop_length, axis=-1, name=None): out = op(x, *attrs) else: check_variable_and_dtype( - x, 'x', ['int32', 'int64', 'float16', 'float32', 'float64'], - op_type) + x, 'x', ['int32', 'int64', 'float16', 'float32', 'float64'], op_type + ) helper = LayerHelper(op_type, **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype=dtype) - helper.append_op(type=op_type, - inputs={'X': x}, - attrs={ - 'hop_length': hop_length, - 'axis': axis - }, - outputs={'Out': out}) + helper.append_op( + type=op_type, + inputs={'X': x}, + attrs={'hop_length': hop_length, 'axis': axis}, + outputs={'Out': out}, + ) return out -def stft(x, - n_fft, - hop_length=None, - win_length=None, - window=None, - center=True, - pad_mode='reflect', - normalized=False, - onesided=True, - name=None): +def stft( + x, + n_fft, + hop_length=None, + win_length=None, + window=None, + center=True, + pad_mode='reflect', + normalized=False, + onesided=True, + name=None, +): r""" Short-time Fourier transform (STFT). @@ -310,13 +320,15 @@ def stft(x, paddle.randn([8, 48000], dtype=paddle.float64)*1j # [8, 48000] complex128 y1 = stft(x, n_fft=512, center=False, onesided=False) # [8, 512, 372] """ - check_variable_and_dtype(x, 'x', - ['float32', 'float64', 'complex64', 'complex128'], - 'stft') + check_variable_and_dtype( + x, 'x', ['float32', 'float64', 'complex64', 'complex128'], 'stft' + ) x_rank = len(x.shape) - assert x_rank in [1, 2], \ - f'x should be a 1D or 2D real tensor, but got rank of x is {x_rank}' + assert x_rank in [ + 1, + 2, + ], f'x should be a 1D or 2D real tensor, but got rank of x is {x_rank}' if x_rank == 1: # (batch, seq_length) x = x.unsqueeze(0) @@ -324,69 +336,77 @@ def stft(x, if hop_length is None: hop_length = int(n_fft // 4) - assert hop_length > 0, \ - f'hop_length should be > 0, but got {hop_length}.' + assert hop_length > 0, f'hop_length should be > 0, but got {hop_length}.' if win_length is None: win_length = n_fft if _non_static_mode(): - assert 0 < n_fft <= x.shape[-1], \ - f'n_fft should be in (0, seq_length({x.shape[-1]})], but got {n_fft}.' + assert ( + 0 < n_fft <= x.shape[-1] + ), f'n_fft should be in (0, seq_length({x.shape[-1]})], but got {n_fft}.' - assert 0 < win_length <= n_fft, \ - f'win_length should be in (0, n_fft({n_fft})], but got {win_length}.' + assert ( + 0 < win_length <= n_fft + ), f'win_length should be in (0, n_fft({n_fft})], but got {win_length}.' if window is not None: - assert len(window.shape) == 1 and len(window) == win_length, \ - f'expected a 1D window tensor of size equal to win_length({win_length}), but got window with shape {window.shape}.' + assert ( + len(window.shape) == 1 and len(window) == win_length + ), f'expected a 1D window tensor of size equal to win_length({win_length}), but got window with shape {window.shape}.' else: - window = paddle.ones(shape=(win_length, ), dtype=x.dtype) + window = paddle.ones(shape=(win_length,), dtype=x.dtype) if win_length < n_fft: pad_left = (n_fft - win_length) // 2 pad_right = n_fft - win_length - pad_left - window = paddle.nn.functional.pad(window, - pad=[pad_left, pad_right], - mode='constant') + window = paddle.nn.functional.pad( + window, pad=[pad_left, pad_right], mode='constant' + ) if center: - assert pad_mode in ['constant', 'reflect'], \ - 'pad_mode should be "reflect" or "constant", but got "{}".'.format(pad_mode) + assert pad_mode in [ + 'constant', + 'reflect', + ], 'pad_mode should be "reflect" or "constant", but got "{}".'.format( + pad_mode + ) pad_length = n_fft // 2 # FIXME: Input `x` can be a complex tensor but pad does not supprt complex input. - x = paddle.nn.functional.pad(x.unsqueeze(-1), - pad=[pad_length, pad_length], - mode=pad_mode, - data_format="NLC").squeeze(-1) + x = paddle.nn.functional.pad( + x.unsqueeze(-1), + pad=[pad_length, pad_length], + mode=pad_mode, + data_format="NLC", + ).squeeze(-1) x_frames = frame(x=x, frame_length=n_fft, hop_length=hop_length, axis=-1) x_frames = x_frames.transpose( - perm=[0, 2, - 1]) # switch n_fft to last dim, egs: (batch, num_frames, n_fft) + perm=[0, 2, 1] + ) # switch n_fft to last dim, egs: (batch, num_frames, n_fft) x_frames = paddle.multiply(x_frames, window) norm = 'ortho' if normalized else 'backward' if is_complex(x_frames): - assert not onesided, \ - 'onesided should be False when input or window is a complex Tensor.' + assert ( + not onesided + ), 'onesided should be False when input or window is a complex Tensor.' if not is_complex(x): - out = fft_r2c(x=x_frames, - n=None, - axis=-1, - norm=norm, - forward=True, - onesided=onesided, - name=name) + out = fft_r2c( + x=x_frames, + n=None, + axis=-1, + norm=norm, + forward=True, + onesided=onesided, + name=name, + ) else: - out = fft_c2c(x=x_frames, - n=None, - axis=-1, - norm=norm, - forward=True, - name=name) + out = fft_c2c( + x=x_frames, n=None, axis=-1, norm=norm, forward=True, name=name + ) out = out.transpose(perm=[0, 2, 1]) # (batch, n_fft, num_frames) @@ -396,17 +416,19 @@ def stft(x, return out -def istft(x, - n_fft, - hop_length=None, - win_length=None, - window=None, - center=True, - normalized=False, - onesided=True, - length=None, - return_complex=False, - name=None): +def istft( + x, + n_fft, + hop_length=None, + win_length=None, + window=None, + center=True, + normalized=False, + onesided=True, + length=None, + return_complex=False, + name=None, +): r""" Inverse short-time Fourier transform (ISTFT). @@ -482,8 +504,12 @@ def istft(x, check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'istft') x_rank = len(x.shape) - assert x_rank in [2, 3], \ - 'x should be a 2D or 3D complex tensor, but got rank of x is {}'.format(x_rank) + assert x_rank in [ + 2, + 3, + ], 'x should be a 2D or 3D complex tensor, but got rank of x is {}'.format( + x_rank + ) if x_rank == 2: # (batch, n_fft, n_frames) x = x.unsqueeze(0) @@ -495,83 +521,107 @@ def istft(x, win_length = n_fft # Assure no gaps between frames. - assert 0 < hop_length <= win_length, \ - 'hop_length should be in (0, win_length({})], but got {}.'.format(win_length, hop_length) - - assert 0 < win_length <= n_fft, \ - 'win_length should be in (0, n_fft({})], but got {}.'.format(n_fft, win_length) + assert ( + 0 < hop_length <= win_length + ), 'hop_length should be in (0, win_length({})], but got {}.'.format( + win_length, hop_length + ) + + assert ( + 0 < win_length <= n_fft + ), 'win_length should be in (0, n_fft({})], but got {}.'.format( + n_fft, win_length + ) n_frames = x.shape[-1] fft_size = x.shape[-2] if _non_static_mode(): if onesided: - assert (fft_size == n_fft // 2 + 1), \ - 'fft_size should be equal to n_fft // 2 + 1({}) when onesided is True, but got {}.'.format(n_fft // 2 + 1, fft_size) + assert ( + fft_size == n_fft // 2 + 1 + ), 'fft_size should be equal to n_fft // 2 + 1({}) when onesided is True, but got {}.'.format( + n_fft // 2 + 1, fft_size + ) else: - assert (fft_size == n_fft), \ - 'fft_size should be equal to n_fft({}) when onesided is False, but got {}.'.format(n_fft, fft_size) + assert ( + fft_size == n_fft + ), 'fft_size should be equal to n_fft({}) when onesided is False, but got {}.'.format( + n_fft, fft_size + ) if window is not None: - assert len(window.shape) == 1 and len(window) == win_length, \ - 'expected a 1D window tensor of size equal to win_length({}), but got window with shape {}.'.format(win_length, window.shape) + assert ( + len(window.shape) == 1 and len(window) == win_length + ), 'expected a 1D window tensor of size equal to win_length({}), but got window with shape {}.'.format( + win_length, window.shape + ) else: - window_dtype = paddle.float32 if x.dtype in [ - paddle.float32, paddle.complex64 - ] else paddle.float64 - window = paddle.ones(shape=(win_length, ), dtype=window_dtype) + window_dtype = ( + paddle.float32 + if x.dtype in [paddle.float32, paddle.complex64] + else paddle.float64 + ) + window = paddle.ones(shape=(win_length,), dtype=window_dtype) if win_length < n_fft: pad_left = (n_fft - win_length) // 2 pad_right = n_fft - win_length - pad_left # FIXME: Input `window` can be a complex tensor but pad does not supprt complex input. - window = paddle.nn.functional.pad(window, - pad=[pad_left, pad_right], - mode='constant') + window = paddle.nn.functional.pad( + window, pad=[pad_left, pad_right], mode='constant' + ) x = x.transpose( - perm=[0, 2, - 1]) # switch n_fft to last dim, egs: (batch, num_frames, n_fft) + perm=[0, 2, 1] + ) # switch n_fft to last dim, egs: (batch, num_frames, n_fft) norm = 'ortho' if normalized else 'backward' if return_complex: - assert not onesided, \ - 'onesided should be False when input(output of istft) or window is a complex Tensor.' + assert ( + not onesided + ), 'onesided should be False when input(output of istft) or window is a complex Tensor.' out = fft_c2c(x=x, n=None, axis=-1, norm=norm, forward=False, name=None) else: - assert not is_complex(window), \ - 'Data type of window should not be complex when return_complex is False.' + assert not is_complex( + window + ), 'Data type of window should not be complex when return_complex is False.' if onesided is False: - x = x[:, :, :n_fft // 2 + 1] + x = x[:, :, : n_fft // 2 + 1] out = fft_c2r(x=x, n=None, axis=-1, norm=norm, forward=False, name=None) out = paddle.multiply(out, window).transpose( - perm=[0, 2, 1]) # (batch, n_fft, num_frames) - out = overlap_add(x=out, hop_length=hop_length, - axis=-1) # (batch, seq_length) + perm=[0, 2, 1] + ) # (batch, n_fft, num_frames) + out = overlap_add( + x=out, hop_length=hop_length, axis=-1 + ) # (batch, seq_length) window_envelop = overlap_add( x=paddle.tile( x=paddle.multiply(window, window).unsqueeze(0), - repeat_times=[n_frames, - 1]).transpose(perm=[1, 0]), # (n_fft, num_frames) + repeat_times=[n_frames, 1], + ).transpose( + perm=[1, 0] + ), # (n_fft, num_frames) hop_length=hop_length, - axis=-1) # (seq_length, ) + axis=-1, + ) # (seq_length, ) if length is None: if center: - out = out[:, (n_fft // 2):-(n_fft // 2)] - window_envelop = window_envelop[(n_fft // 2):-(n_fft // 2)] + out = out[:, (n_fft // 2) : -(n_fft // 2)] + window_envelop = window_envelop[(n_fft // 2) : -(n_fft // 2)] else: if center: start = n_fft // 2 else: start = 0 - out = out[:, start:start + length] - window_envelop = window_envelop[start:start + length] + out = out[:, start : start + length] + window_envelop = window_envelop[start : start + length] # Check whether the Nonzero Overlap Add (NOLA) constraint is met. if _non_static_mode() and window_envelop.abs().min().item() < 1e-11: diff --git a/python/paddle/sparse/binary.py b/python/paddle/sparse/binary.py index 1e7fcfe76345ae61a887bb8309afb088759daa60..45c39e874a54c213caf01562f3037046ea89c0b2 100644 --- a/python/paddle/sparse/binary.py +++ b/python/paddle/sparse/binary.py @@ -266,10 +266,9 @@ def add(x, y, name=None): inputs = {'x': x, 'y': y} helper = LayerHelper(op_type) out = helper.create_sparse_variable_for_type_inference(x.dtype) - helper.append_op(type=op_type, - inputs=inputs, - outputs={'out': out}, - attrs={}) + helper.append_op( + type=op_type, inputs=inputs, outputs={'out': out}, attrs={} + ) return out diff --git a/python/paddle/sparse/creation.py b/python/paddle/sparse/creation.py index 5ca050be08a8b0ef3e71cb4a0fc12b8fff2ca3aa..684d449af7126b1e13bdbaa48da99cdba440832a 100644 --- a/python/paddle/sparse/creation.py +++ b/python/paddle/sparse/creation.py @@ -51,8 +51,8 @@ def _get_place(place): if place is None: place = _current_expected_place() elif not isinstance( - place, - (core.Place, core.CPUPlace, core.CUDAPinnedPlace, core.CUDAPlace)): + place, (core.Place, core.CPUPlace, core.CUDAPinnedPlace, core.CUDAPlace) + ): raise ValueError( "'place' must be any of paddle.Place, paddle.CPUPlace, paddle.CUDAPinnedPlace, paddle.CUDAPlace" ) @@ -66,12 +66,9 @@ def _check_indices_dtype(dtype): ) -def sparse_coo_tensor(indices, - values, - shape=None, - dtype=None, - place=None, - stop_gradient=True): +def sparse_coo_tensor( + indices, values, shape=None, dtype=None, place=None, stop_gradient=True +): r""" Constructs a sparse ``paddle.Tensor`` in coordinate format according to the indices and values of the specified non-zero elements. @@ -119,10 +116,9 @@ def sparse_coo_tensor(indices, place = _get_place(place) if not isinstance(indices, core.eager.Tensor): - indices = to_tensor(indices, - dtype=None, - place=place, - stop_gradient=True) + indices = to_tensor( + indices, dtype=None, place=place, stop_gradient=True + ) if not isinstance(values, core.eager.Tensor): values = to_tensor(values, dtype, place, stop_gradient) if len(indices.shape) != 2: @@ -135,8 +131,10 @@ def sparse_coo_tensor(indices, if nnz != values.shape[0]: raise ValueError( - "the indices and values must have same number of non-zero, but get {} and {}" - .format(nnz, values.shape[0])) + "the indices and values must have same number of non-zero, but get {} and {}".format( + nnz, values.shape[0] + ) + ) dense_dim = len(values.shape) - 1 @@ -156,11 +154,15 @@ def sparse_coo_tensor(indices, if shape < min_shape: raise ValueError( "the minimun shape required is {}, but get {}".format( - min_shape, shape)) + min_shape, shape + ) + ) if len(shape) != sparse_dim + dense_dim: raise ValueError( - "the number of dimensions(len(shape) must be sparse_dim({}) + dense_dim({}), but get {}" - .format(sparse_dim, dense_dim, len(shape))) + "the number of dimensions(len(shape) must be sparse_dim({}) + dense_dim({}), but get {}".format( + sparse_dim, dense_dim, len(shape) + ) + ) return _C_ops.sparse_sparse_coo_tensor(values, indices, shape) @@ -172,22 +174,17 @@ def sparse_coo_tensor(indices, attrs = {'dense_shape': shape} helper = LayerHelper(op_type) out = helper.create_sparse_variable_for_type_inference(dtype) - helper.append_op(type=op_type, - inputs=inputs, - outputs={'out': out}, - attrs=attrs) + helper.append_op( + type=op_type, inputs=inputs, outputs={'out': out}, attrs=attrs + ) return out -#TODO: need to support shape is None +# TODO: need to support shape is None @dygraph_only -def sparse_csr_tensor(crows, - cols, - values, - shape, - dtype=None, - place=None, - stop_gradient=True): +def sparse_csr_tensor( + crows, cols, values, shape, dtype=None, place=None, stop_gradient=True +): r""" Constructs a sparse ``paddle.Tensor`` in CSR(Compressed Sparse Row) format according to the ``crows``, ``cols`` and ``values``. @@ -250,8 +247,10 @@ def sparse_csr_tensor(crows, if len(shape) != 2 and len(shape) != 3: raise ValueError( - "SparseCsrTensor only support 2-D or 3-D matrix. but get shape {}". - format(shape)) + "SparseCsrTensor only support 2-D or 3-D matrix. but get shape {}".format( + shape + ) + ) rows = shape[len(shape) - 2] if not crows.place._equals(place): @@ -268,26 +267,32 @@ def sparse_csr_tensor(crows, if len(crows.shape) != 1 or len(cols.shape) != 1 or len(values.shape) != 1: raise ValueError("The 'crows', 'cols' and 'values' must be 1-D.") - if (len(cols) != len(values)): + if len(cols) != len(values): raise ValueError("the length of cols must be same as length of values") if len(shape) == 2: if crows.shape[0] != rows + 1: raise ValueError( - "The length({}) of crows must be equal to the rows({})+1 of matrix." - .format(crows.shape[0], rows)) + "The length({}) of crows must be equal to the rows({})+1 of matrix.".format( + crows.shape[0], rows + ) + ) if crows[0] != 0: raise ValueError("the 0th value of crows must be 0") if crows[-1] != values.shape[0]: raise ValueError( - "the last value of crows must be equal the number of non-zero") + "the last value of crows must be equal the number of non-zero" + ) else: if crows.shape[0] % (rows + 1) != 0: raise ValueError( - "The length({}) of crows must be divisible the rows({})+1 of matrix." - .format(crows.shape[0], rows)) + "The length({}) of crows must be divisible the rows({})+1 of matrix.".format( + crows.shape[0], rows + ) + ) # TODO(zkh2016): check whether the value in crows and cols is legal - return core.eager.sparse_csr_tensor(crows, cols, values, shape, - stop_gradient) + return core.eager.sparse_csr_tensor( + crows, cols, values, shape, stop_gradient + ) diff --git a/python/paddle/sparse/nn/functional/activation.py b/python/paddle/sparse/nn/functional/activation.py index 516cfc5cd583df78c89260a60b05ea622fb7f8e8..a50a64f5164fe09c17796b0612c1984983214c36 100644 --- a/python/paddle/sparse/nn/functional/activation.py +++ b/python/paddle/sparse/nn/functional/activation.py @@ -52,10 +52,9 @@ def relu(x, name=None): op_type = 'sparse_relu' helper = LayerHelper(op_type) out = helper.create_sparse_variable_for_type_inference(x.dtype) - helper.append_op(type=op_type, - inputs={'x': x}, - outputs={'out': out}, - attrs={}) + helper.append_op( + type=op_type, inputs={'x': x}, outputs={'out': out}, attrs={} + ) return out diff --git a/python/paddle/sparse/nn/functional/conv.py b/python/paddle/sparse/nn/functional/conv.py index 03b0f2e9be628e12eef48b3157bea005ef05162e..ec1907a7770c8be33f900c18d440ac7ac66258fb 100644 --- a/python/paddle/sparse/nn/functional/conv.py +++ b/python/paddle/sparse/nn/functional/conv.py @@ -21,50 +21,66 @@ from paddle.nn.functional.conv import _update_padding_nd from paddle.fluid.layer_helper import LayerHelper -def _conv3d(x, - weight, - bias=None, - stride=1, - padding=0, - dilation=1, - groups=1, - subm=False, - key=None, - data_format="NDHWC", - name=None): +def _conv3d( + x, + weight, + bias=None, + stride=1, + padding=0, + dilation=1, + groups=1, + subm=False, + key=None, + data_format="NDHWC", + name=None, +): assert groups == 1, "Currently, only support groups=1" dims = 3 # Currently, only support 'NDHWC' if data_format not in ["NDHWC"]: - raise ValueError("Attr(data_format) should be 'NDHWC'. Received " - "Attr(data_format): {}.".format(data_format)) + raise ValueError( + "Attr(data_format) should be 'NDHWC'. Received " + "Attr(data_format): {}.".format(data_format) + ) if len(x.shape) != 5: raise ValueError( - "Input x should be 5D tensor, but received x with the shape of {}". - format(x.shape)) + "Input x should be 5D tensor, but received x with the shape of {}".format( + x.shape + ) + ) - channel_last = (data_format == "NDHWC") + channel_last = data_format == "NDHWC" channel_dim = -1 if channel_last else 1 if len(x.shape) != 5: raise ValueError( - "Input x should be 5D tensor, but received x with the shape of {}". - format(x.shape)) + "Input x should be 5D tensor, but received x with the shape of {}".format( + x.shape + ) + ) num_channels = x.shape[channel_dim] if num_channels < 0: raise ValueError( "The channel dimension of the input({}) should be defined. " - "Received: {}.".format(x.shape, num_channels)) + "Received: {}.".format(x.shape, num_channels) + ) padding, padding_algorithm = _update_padding_nd(padding, channel_last, dims) stride = convert_to_list(stride, dims, 'stride') dilation = convert_to_list(dilation, dims, 'dilation') if in_dynamic_mode(): - pre_bias = _C_ops.sparse_conv3d(x, weight, padding, dilation, stride, - groups, subm, - key if key is not None else "") + pre_bias = _C_ops.sparse_conv3d( + x, + weight, + padding, + dilation, + stride, + groups, + subm, + key if key is not None else "", + ) if bias is not None: return add(pre_bias, bias) else: @@ -77,35 +93,38 @@ def _conv3d(x, 'strides': stride, 'groups': groups, 'subm': subm, - 'key': key + 'key': key, } op_type = 'sparse_conv3d' helper = LayerHelper(op_type, **locals()) - rulebook = helper.create_variable_for_type_inference(dtype='int32', - stop_gradient=True) - counter = helper.create_variable_for_type_inference(dtype='int32', - stop_gradient=True) + rulebook = helper.create_variable_for_type_inference( + dtype='int32', stop_gradient=True + ) + counter = helper.create_variable_for_type_inference( + dtype='int32', stop_gradient=True + ) pre_bias = helper.create_sparse_variable_for_type_inference(x.dtype) outputs = {"out": pre_bias, "rulebook": rulebook, "counter": counter} - helper.append_op(type=op_type, - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type=op_type, inputs=inputs, outputs=outputs, attrs=attrs + ) if bias is not None: return add(pre_bias, bias) else: return pre_bias -def conv3d(x, - weight, - bias=None, - stride=1, - padding=0, - dilation=1, - groups=1, - data_format="NDHWC", - name=None): +def conv3d( + x, + weight, + bias=None, + stride=1, + padding=0, + dilation=1, + groups=1, + data_format="NDHWC", + name=None, +): r""" The sparse convolution3d functional calculates the output based on the input, filter @@ -207,20 +226,33 @@ def conv3d(x, print(y.shape) # (1, 1, 1, 2, 1) """ - return _conv3d(x, weight, bias, stride, padding, dilation, groups, False, - None, data_format, name) - - -def subm_conv3d(x, - weight, - bias=None, - stride=1, - padding=0, - dilation=1, - groups=1, - data_format="NDHWC", - key=None, - name=None): + return _conv3d( + x, + weight, + bias, + stride, + padding, + dilation, + groups, + False, + None, + data_format, + name, + ) + + +def subm_conv3d( + x, + weight, + bias=None, + stride=1, + padding=0, + dilation=1, + groups=1, + data_format="NDHWC", + key=None, + name=None, +): r""" The sparse submanifold convolution3d functional calculates the output based on the input, filter @@ -327,5 +359,16 @@ def subm_conv3d(x, print(y.shape) #(1, 1, 3, 4, 1) """ - return _conv3d(x, weight, bias, stride, padding, dilation, groups, True, - key, data_format, name) + return _conv3d( + x, + weight, + bias, + stride, + padding, + dilation, + groups, + True, + key, + data_format, + name, + ) diff --git a/python/paddle/sparse/nn/functional/pooling.py b/python/paddle/sparse/nn/functional/pooling.py index d86ac58e9229d56e264d758ff4da0eef3ff133a1..740324ea3979d49fb456349248cd6792e30fab2e 100644 --- a/python/paddle/sparse/nn/functional/pooling.py +++ b/python/paddle/sparse/nn/functional/pooling.py @@ -19,13 +19,15 @@ from paddle.nn.functional.pooling import _update_padding_nd __all__ = [] -def max_pool3d(x, - kernel_size, - stride=None, - padding=0, - ceil_mode=False, - data_format="NDHWC", - name=None): +def max_pool3d( + x, + kernel_size, + stride=None, + padding=0, + ceil_mode=False, + data_format="NDHWC", + name=None, +): """ Implements sparse max pooling 3d operation. See more details in :ref:`api_sparse_pooling_MaxPool3d` . @@ -75,9 +77,12 @@ def max_pool3d(x, """ assert in_dynamic_mode(), "Currently, Sparse API only support dynamic mode" - assert x.is_sparse_coo( + assert ( + x.is_sparse_coo() ), "Currently, sparse.relu only support the input of SparseCooTensor" - assert data_format == 'NDHWC', "Currently, sparse.max_pool3d only support data format of 'NDHWC'" + assert ( + data_format == 'NDHWC' + ), "Currently, sparse.max_pool3d only support data format of 'NDHWC'" kernel_size = utils.convert_to_list(kernel_size, 3, 'pool_size') if stride is None: @@ -87,12 +92,11 @@ def max_pool3d(x, channel_last = True - padding, padding_algorithm = _update_padding_nd(padding, - 3, - channel_last=channel_last, - ceil_mode=ceil_mode) + padding, padding_algorithm = _update_padding_nd( + padding, 3, channel_last=channel_last, ceil_mode=ceil_mode + ) - #TODO(zkh2016): remove the dependency on dilation from the backend + # TODO(zkh2016): remove the dependency on dilation from the backend dilation = [1, 1, 1] return _C_ops.sparse_maxpool(x, kernel_size, padding, dilation, stride) diff --git a/python/paddle/sparse/nn/functional/transformer.py b/python/paddle/sparse/nn/functional/transformer.py index f17d728078a79b9a77bb0fc1afbe712499552b1f..38118ba359b5730331be7a3cf07bba8ed1c01885 100644 --- a/python/paddle/sparse/nn/functional/transformer.py +++ b/python/paddle/sparse/nn/functional/transformer.py @@ -19,14 +19,16 @@ from paddle.fluid.framework import dygraph_only @dygraph_only -def attention(query, - key, - value, - sparse_mask, - key_padding_mask=None, - attn_mask=None, - name=None): - """ +def attention( + query, + key, + value, + sparse_mask, + key_padding_mask=None, + attn_mask=None, + name=None, +): + r""" Note: This API is only used from ``CUDA 11.7`` . @@ -37,7 +39,7 @@ def attention(query, .. math:: - result = softmax(\\frac{ Q * K^T }{\\sqrt{d}}) * V + result = softmax(\frac{ Q * K^T }{\sqrt{d}}) * V where : ``Q``, ``K``, and ``V`` represent the three input parameters of the attention module. The shape of the three parameters are: `[batch_size, num_heads, seq_len, head_dim]`, and @@ -89,5 +91,6 @@ def attention(query, output = paddle.sparse.nn.functional.attention(query, key, value, sp_mask, kp_mask, attn_mask) output.backward() """ - return _C_ops.sparse_fused_attention(query, key, value, sparse_mask, - key_padding_mask, attn_mask) + return _C_ops.sparse_fused_attention( + query, key, value, sparse_mask, key_padding_mask, attn_mask + ) diff --git a/python/paddle/sparse/nn/layer/conv.py b/python/paddle/sparse/nn/layer/conv.py index 8368eeeb03caee5d300d16544cb587d1570e4c88..599fb2d7b2e5cb59bda5171e989816d6fd789b03 100644 --- a/python/paddle/sparse/nn/layer/conv.py +++ b/python/paddle/sparse/nn/layer/conv.py @@ -23,23 +23,26 @@ __all__ = [] class _Conv3D(Layer): - - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - subm=False, - key=None, - padding_mode='zeros', - weight_attr=None, - bias_attr=None, - data_format="NDHWC"): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + subm=False, + key=None, + padding_mode='zeros', + weight_attr=None, + bias_attr=None, + data_format="NDHWC", + ): super(_Conv3D, self).__init__() - assert weight_attr is not False, "weight_attr should not be False in Conv." + assert ( + weight_attr is not False + ), "weight_attr should not be False in Conv." self._param_attr = weight_attr self._bias_attr = bias_attr self._groups = groups @@ -49,56 +52,66 @@ class _Conv3D(Layer): self._subm = subm self._key = key - assert padding_mode == 'zeros', "Currently, only support padding_mode='zeros'" + assert ( + padding_mode == 'zeros' + ), "Currently, only support padding_mode='zeros'" assert groups == 1, "Currently, only support groups=1" valid_format = {'NDHWC'} if data_format not in valid_format: raise ValueError( - "data_format must be one of {}, but got data_format='{}'". - format(valid_format, data_format)) + "data_format must be one of {}, but got data_format='{}'".format( + valid_format, data_format + ) + ) channel_last = data_format == "NDHWC" dims = 3 self._stride = utils.convert_to_list(stride, dims, 'stride') self._dilation = utils.convert_to_list(dilation, dims, 'dilation') - self._kernel_size = utils.convert_to_list(kernel_size, dims, - 'kernel_size') + self._kernel_size = utils.convert_to_list( + kernel_size, dims, 'kernel_size' + ) self._padding = padding self._padding_mode = padding_mode self._updated_padding, self._padding_algorithm = _update_padding_nd( - padding, channel_last, dims) + padding, channel_last, dims + ) # the sparse conv restricts the shape is [D, H, W, in_channels, out_channels] filter_shape = self._kernel_size + [ - self._in_channels, self._out_channels + self._in_channels, + self._out_channels, ] def _get_default_param_initializer(): filter_elem_num = np.prod(self._kernel_size) * self._in_channels - std = (2.0 / filter_elem_num)**0.5 + std = (2.0 / filter_elem_num) ** 0.5 return Normal(0.0, std) self.weight = self.create_parameter( shape=filter_shape, attr=self._param_attr, - default_initializer=_get_default_param_initializer()) - self.bias = self.create_parameter(attr=self._bias_attr, - shape=[self._out_channels], - is_bias=True) + default_initializer=_get_default_param_initializer(), + ) + self.bias = self.create_parameter( + attr=self._bias_attr, shape=[self._out_channels], is_bias=True + ) def forward(self, x): - out = F.conv._conv3d(x, - self.weight, - bias=self.bias, - stride=self._stride, - padding=self._updated_padding, - dilation=self._dilation, - groups=self._groups, - subm=self._subm, - key=self._key, - data_format=self._data_format) + out = F.conv._conv3d( + x, + self.weight, + bias=self.bias, + stride=self._stride, + padding=self._updated_padding, + dilation=self._dilation, + groups=self._groups, + subm=self._subm, + key=self._key, + data_format=self._data_format, + ) return out def extra_repr(self): @@ -223,31 +236,35 @@ class Conv3D(_Conv3D): # (1, 1, 1, 2, 1) """ - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - padding_mode='zeros', - weight_attr=None, - bias_attr=None, - data_format="NDHWC"): - super(Conv3D, self).__init__(in_channels, - out_channels, - kernel_size, - stride=stride, - padding=padding, - dilation=dilation, - groups=groups, - subm=False, - key=None, - padding_mode=padding_mode, - weight_attr=weight_attr, - bias_attr=bias_attr, - data_format=data_format) + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + padding_mode='zeros', + weight_attr=None, + bias_attr=None, + data_format="NDHWC", + ): + super(Conv3D, self).__init__( + in_channels, + out_channels, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + subm=False, + key=None, + padding_mode=padding_mode, + weight_attr=weight_attr, + bias_attr=bias_attr, + data_format=data_format, + ) class SubmConv3D(_Conv3D): @@ -360,29 +377,33 @@ class SubmConv3D(_Conv3D): # (1, 1, 3, 4, 1) """ - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - padding_mode='zeros', - key=None, - weight_attr=None, - bias_attr=None, - data_format="NDHWC"): - super(SubmConv3D, self).__init__(in_channels, - out_channels, - kernel_size, - stride=stride, - padding=padding, - dilation=dilation, - groups=groups, - subm=True, - key=key, - padding_mode=padding_mode, - weight_attr=weight_attr, - bias_attr=bias_attr, - data_format=data_format) + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + padding_mode='zeros', + key=None, + weight_attr=None, + bias_attr=None, + data_format="NDHWC", + ): + super(SubmConv3D, self).__init__( + in_channels, + out_channels, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + subm=True, + key=key, + padding_mode=padding_mode, + weight_attr=weight_attr, + bias_attr=bias_attr, + data_format=data_format, + ) diff --git a/python/paddle/sparse/nn/layer/norm.py b/python/paddle/sparse/nn/layer/norm.py index b36a85f6b789b3872afc07377b82d4e41026e849..99b96fb530aef5f19351e95510356c4677540e77 100644 --- a/python/paddle/sparse/nn/layer/norm.py +++ b/python/paddle/sparse/nn/layer/norm.py @@ -97,23 +97,27 @@ class BatchNorm(paddle.nn.BatchNorm1D): # [1, 6, 6, 6, 3] """ - def __init__(self, - num_features, - momentum=0.9, - epsilon=1e-05, - weight_attr=None, - bias_attr=None, - data_format='NDHWC', - use_global_stats=None, - name=None): - super(BatchNorm, self).__init__(num_features, - momentum=momentum, - epsilon=epsilon, - weight_attr=weight_attr, - bias_attr=bias_attr, - data_format=data_format, - use_global_stats=use_global_stats, - name=name) + def __init__( + self, + num_features, + momentum=0.9, + epsilon=1e-05, + weight_attr=None, + bias_attr=None, + data_format='NDHWC', + use_global_stats=None, + name=None, + ): + super(BatchNorm, self).__init__( + num_features, + momentum=momentum, + epsilon=epsilon, + weight_attr=weight_attr, + bias_attr=bias_attr, + data_format=data_format, + use_global_stats=use_global_stats, + name=name, + ) def _check_data_format(self, input): if input != "NDHWC": @@ -124,7 +128,8 @@ class BatchNorm(paddle.nn.BatchNorm1D): if self.training: warnings.warn( - "When training, we now always track global mean and variance.") + "When training, we now always track global mean and variance." + ) if self._use_global_stats == None: self._use_global_stats = not self.training @@ -136,9 +141,19 @@ class BatchNorm(paddle.nn.BatchNorm1D): if in_dynamic_mode(): batch_norm_out, _, _, _, _, _ = _C_ops.sparse_batch_norm( - input, self.weight, self.bias, self._mean, self._variance, - self._momentum, self._epsilon, data_format, not self.training, - self._use_global_stats, trainable_statistics, False) + input, + self.weight, + self.bias, + self._mean, + self._variance, + self._momentum, + self._epsilon, + data_format, + not self.training, + self._use_global_stats, + trainable_statistics, + False, + ) return batch_norm_out else: inputs = { @@ -146,7 +161,7 @@ class BatchNorm(paddle.nn.BatchNorm1D): 'scale': self.weight, 'bias': self.bias, 'mean': self._mean, - 'variance': self._variance + 'variance': self._variance, } attrs = { 'momentum': self._momentum, @@ -155,21 +170,26 @@ class BatchNorm(paddle.nn.BatchNorm1D): 'is_test': not self.training, 'use_global_stats': self._use_global_stats, 'trainable_statistics': trainable_statistics, - 'fuse_with_relu': False + 'fuse_with_relu': False, } op_type = 'sparse_batch_norm' helper = LayerHelper(op_type) dtype = input.dtype mean_out = helper.create_variable_for_type_inference( - dtype=dtype, stop_gradient=True) + dtype=dtype, stop_gradient=True + ) variance_out = helper.create_variable_for_type_inference( - dtype=dtype, stop_gradient=True) + dtype=dtype, stop_gradient=True + ) saved_mean = helper.create_variable_for_type_inference( - dtype=dtype, stop_gradient=True) + dtype=dtype, stop_gradient=True + ) saved_variance = helper.create_variable_for_type_inference( - dtype=dtype, stop_gradient=True) + dtype=dtype, stop_gradient=True + ) reserve_space = helper.create_variable_for_type_inference( - dtype=dtype, stop_gradient=True) + dtype=dtype, stop_gradient=True + ) out = helper.create_sparse_variable_for_type_inference(dtype) outputs = { "out": out, @@ -177,12 +197,11 @@ class BatchNorm(paddle.nn.BatchNorm1D): "variance_out": variance_out, "saved_mean": saved_mean, "saved_variance": saved_variance, - "reserve_space": reserve_space + "reserve_space": reserve_space, } - helper.append_op(type=op_type, - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type=op_type, inputs=inputs, outputs=outputs, attrs=attrs + ) return out @@ -283,24 +302,42 @@ class SyncBatchNorm(paddle.nn.SyncBatchNorm): # [-0.88415730, 1.57439375]]) """ - def __init__(self, - num_features, - momentum=0.9, - epsilon=1e-05, - weight_attr=None, - bias_attr=None, - data_format='NCHW', - name=None): - super(SyncBatchNorm, - self).__init__(num_features, momentum, epsilon, weight_attr, - bias_attr, data_format, name) + def __init__( + self, + num_features, + momentum=0.9, + epsilon=1e-05, + weight_attr=None, + bias_attr=None, + data_format='NCHW', + name=None, + ): + super(SyncBatchNorm, self).__init__( + num_features, + momentum, + epsilon, + weight_attr, + bias_attr, + data_format, + name, + ) def forward(self, x): self._check_data_format() sync_batch_norm_out, _, _, _, _, _ = _C_ops.sparse_sync_batch_norm_( - x, self.weight, self.bias, self._mean, self._variance, - self._momentum, self._epsilon, self._data_format, not self.training, - False, False, False) + x, + self.weight, + self.bias, + self._mean, + self._variance, + self._momentum, + self._epsilon, + self._data_format, + not self.training, + False, + False, + False, + ) return sync_batch_norm_out @classmethod @@ -327,27 +364,41 @@ class SyncBatchNorm(paddle.nn.SyncBatchNorm): layer_output = layer if isinstance(layer, _BatchNormBase): - if layer._weight_attr != None and not isinstance( - layer._weight_attr, - bool) and layer._weight_attr.name != None: + if ( + layer._weight_attr != None + and not isinstance(layer._weight_attr, bool) + and layer._weight_attr.name != None + ): layer._weight_attr.name = layer._weight_attr.name + '_sync' - if layer._bias_attr != None and not isinstance( - layer._bias_attr, bool) and layer._bias_attr.name != None: + if ( + layer._bias_attr != None + and not isinstance(layer._bias_attr, bool) + and layer._bias_attr.name != None + ): layer._bias_attr.name = layer._bias_attr.name + '_sync' - #convert sparse BatchNorm + # convert sparse BatchNorm if isinstance(layer, BatchNorm): - layer_output = SyncBatchNorm(layer._num_features, - layer._momentum, layer._epsilon, - layer._weight_attr, - layer._bias_attr, - layer._data_format, layer._name) - #convert dense BatchNorm + layer_output = SyncBatchNorm( + layer._num_features, + layer._momentum, + layer._epsilon, + layer._weight_attr, + layer._bias_attr, + layer._data_format, + layer._name, + ) + # convert dense BatchNorm else: layer_output = paddle.nn.SyncBatchNorm( - layer._num_features, layer._momentum, layer._epsilon, - layer._weight_attr, layer._bias_attr, layer._data_format, - layer._name) + layer._num_features, + layer._momentum, + layer._epsilon, + layer._weight_attr, + layer._bias_attr, + layer._data_format, + layer._name, + ) if layer._weight_attr != False and layer._bias_attr != False: with no_grad(): @@ -357,7 +408,8 @@ class SyncBatchNorm(paddle.nn.SyncBatchNorm): layer_output._variance = layer._variance for name, sublayer in layer.named_children(): - layer_output.add_sublayer(name, - cls.convert_sync_batchnorm(sublayer)) + layer_output.add_sublayer( + name, cls.convert_sync_batchnorm(sublayer) + ) del layer return layer_output diff --git a/python/paddle/sparse/nn/layer/pooling.py b/python/paddle/sparse/nn/layer/pooling.py index ee15bf6f7936936093d796212469458f737a371b..7d6141bab0fd53f170f5637a87243ad7309e35f7 100644 --- a/python/paddle/sparse/nn/layer/pooling.py +++ b/python/paddle/sparse/nn/layer/pooling.py @@ -73,14 +73,16 @@ class MaxPool3D(Layer): """ - def __init__(self, - kernel_size, - stride=None, - padding=0, - return_mask=False, - ceil_mode=False, - data_format="NDHWC", - name=None): + def __init__( + self, + kernel_size, + stride=None, + padding=0, + return_mask=False, + ceil_mode=False, + data_format="NDHWC", + name=None, + ): super(MaxPool3D, self).__init__() self.ksize = kernel_size self.stride = stride @@ -91,14 +93,17 @@ class MaxPool3D(Layer): self.name = name def forward(self, x): - return F.max_pool3d(x, - kernel_size=self.ksize, - stride=self.stride, - padding=self.padding, - ceil_mode=self.ceil_mode, - data_format=self.data_format, - name=self.name) + return F.max_pool3d( + x, + kernel_size=self.ksize, + stride=self.stride, + padding=self.padding, + ceil_mode=self.ceil_mode, + data_format=self.data_format, + name=self.name, + ) def extra_repr(self): return 'kernel_size={ksize}, stride={stride}, padding={padding}'.format( - **self.__dict__) + **self.__dict__ + ) diff --git a/python/paddle/sparse/unary.py b/python/paddle/sparse/unary.py index c26ec35d3b7ed03838666df5533a1d483dc8e980..14a97d6cb03288c2c1f1715f90c0e910358e4301 100644 --- a/python/paddle/sparse/unary.py +++ b/python/paddle/sparse/unary.py @@ -15,7 +15,11 @@ import numpy as np from paddle import _C_ops -from paddle.fluid.framework import dygraph_only, core, convert_np_dtype_to_dtype_ +from paddle.fluid.framework import ( + dygraph_only, + core, + convert_np_dtype_to_dtype_, +) __all__ = [] diff --git a/python/paddle/static/__init__.py b/python/paddle/static/__init__.py index 44ab13360618a9d7d6139eddc0ff3ec6f5314993..78ad1cfabc31101c52962a4fafdfd99e9fe47668 100644 --- a/python/paddle/static/__init__.py +++ b/python/paddle/static/__init__.py @@ -73,18 +73,55 @@ from ..fluid.layers.metric_op import accuracy # noqa: F401 from ..fluid.contrib.layers import ctr_metric_bundle # noqa: F401 from ..fluid.layers import exponential_decay # noqa: F401 -__all__ = [ #noqa - 'append_backward', 'gradients', 'Executor', 'global_scope', 'scope_guard', - 'BuildStrategy', 'CompiledProgram', 'ipu_shard_guard', 'IpuCompiledProgram', - 'IpuStrategy', 'Print', 'py_func', 'ExecutionStrategy', 'name_scope', - 'ParallelExecutor', 'program_guard', 'WeightNormParamAttr', - 'ExponentialMovingAverage', 'default_main_program', - 'default_startup_program', 'Program', 'data', 'InputSpec', 'save', 'load', - 'save_inference_model', 'load_inference_model', 'serialize_program', - 'serialize_persistables', 'save_to_file', 'deserialize_program', - 'deserialize_persistables', 'load_from_file', 'normalize_program', - 'load_program_state', 'set_program_state', 'cpu_places', 'cuda_places', - 'xpu_places', 'npu_places', 'mlu_places', 'Variable', 'create_global_var', - 'accuracy', 'auc', 'device_guard', 'create_parameter', 'set_ipu_shard', - 'ctr_metric_bundle', 'exponential_decay' +__all__ = [ # noqa + 'append_backward', + 'gradients', + 'Executor', + 'global_scope', + 'scope_guard', + 'BuildStrategy', + 'CompiledProgram', + 'ipu_shard_guard', + 'IpuCompiledProgram', + 'IpuStrategy', + 'Print', + 'py_func', + 'ExecutionStrategy', + 'name_scope', + 'ParallelExecutor', + 'program_guard', + 'WeightNormParamAttr', + 'ExponentialMovingAverage', + 'default_main_program', + 'default_startup_program', + 'Program', + 'data', + 'InputSpec', + 'save', + 'load', + 'save_inference_model', + 'load_inference_model', + 'serialize_program', + 'serialize_persistables', + 'save_to_file', + 'deserialize_program', + 'deserialize_persistables', + 'load_from_file', + 'normalize_program', + 'load_program_state', + 'set_program_state', + 'cpu_places', + 'cuda_places', + 'xpu_places', + 'npu_places', + 'mlu_places', + 'Variable', + 'create_global_var', + 'accuracy', + 'auc', + 'device_guard', + 'create_parameter', + 'set_ipu_shard', + 'ctr_metric_bundle', + 'exponential_decay', ] diff --git a/python/paddle/static/amp/__init__.py b/python/paddle/static/amp/__init__.py index 8ee3225057d0a581b7c0c2c98953d059c7f99e0b..ffcfa404cd3567e1f6784f274f90f0f436f0f9da 100644 --- a/python/paddle/static/amp/__init__.py +++ b/python/paddle/static/amp/__init__.py @@ -14,8 +14,12 @@ from ...fluid.contrib.mixed_precision import decorate # noqa: F401 from ...fluid.contrib.mixed_precision import CustomOpLists # noqa: F401 -from ...fluid.contrib.mixed_precision import AutoMixedPrecisionLists # noqa: F401 +from ...fluid.contrib.mixed_precision import ( + AutoMixedPrecisionLists, +) # noqa: F401 from ...fluid.contrib.mixed_precision import fp16_guard # noqa: F401 from ...fluid.contrib.mixed_precision import cast_model_to_fp16 # noqa: F401 -from ...fluid.contrib.mixed_precision import cast_parameters_to_fp16 # noqa: F401 +from ...fluid.contrib.mixed_precision import ( + cast_parameters_to_fp16, +) # noqa: F401 from ...fluid.contrib.mixed_precision import bf16 # noqa: F401 diff --git a/python/paddle/static/input.py b/python/paddle/static/input.py index 6cd5700764118047db35070894f7d75bacdd5275..8a398b42ded8d5876460d976bd18a0015a61d355 100644 --- a/python/paddle/static/input.py +++ b/python/paddle/static/input.py @@ -105,7 +105,8 @@ def data(name, shape, dtype=None, lod_level=0): stop_gradient=True, lod_level=lod_level, is_data=True, - need_check_feed=True) + need_check_feed=True, + ) else: return helper.create_global_variable( name=name, @@ -115,7 +116,8 @@ def data(name, shape, dtype=None, lod_level=0): stop_gradient=True, lod_level=lod_level, is_data=True, - need_check_feed=True) + need_check_feed=True, + ) class InputSpec(object): @@ -164,7 +166,8 @@ class InputSpec(object): def __repr__(self): return '{}(shape={}, dtype={}, name={})'.format( - type(self).__name__, self.shape, self.dtype, self.name) + type(self).__name__, self.shape, self.dtype, self.name + ) @classmethod def from_tensor(cls, tensor, name=None): @@ -196,7 +199,9 @@ class InputSpec(object): else: raise ValueError( "Input `tensor` should be a Tensor, but received {}.".format( - type(tensor).__name__)) + type(tensor).__name__ + ) + ) @classmethod def from_numpy(cls, ndarray, name=None): @@ -245,13 +250,17 @@ class InputSpec(object): if isinstance(batch_size, (list, tuple)): if len(batch_size) != 1: raise ValueError( - "Length of batch_size: {} shall be 1, but received {}.". - format(batch_size, len(batch_size))) + "Length of batch_size: {} shall be 1, but received {}.".format( + batch_size, len(batch_size) + ) + ) batch_size = batch_size[1] elif not isinstance(batch_size, int): raise TypeError( "type(batch_size) shall be `int`, but received {}.".format( - type(batch_size).__name__)) + type(batch_size).__name__ + ) + ) new_shape = [batch_size] + list(self.shape) self.shape = tuple(new_shape) @@ -277,7 +286,8 @@ class InputSpec(object): """ if len(self.shape) == 0: raise ValueError( - "Not support to unbatch a InputSpec when len(shape) == 0.") + "Not support to unbatch a InputSpec when len(shape) == 0." + ) self.shape = self._verify(self.shape[1:]) return self @@ -288,20 +298,25 @@ class InputSpec(object): """ if not isinstance(shape, (list, tuple)): raise TypeError( - "Type of `shape` in InputSpec should be one of (tuple, list), but received {}." - .format(type(shape).__name__)) + "Type of `shape` in InputSpec should be one of (tuple, list), but received {}.".format( + type(shape).__name__ + ) + ) if len(shape) == 0: raise ValueError( - "`shape` in InputSpec should contain at least 1 element, but received {}." - .format(shape)) + "`shape` in InputSpec should contain at least 1 element, but received {}.".format( + shape + ) + ) for i, ele in enumerate(shape): if ele is not None: if not isinstance(ele, int): raise ValueError( - "shape[{}] should be an `int`, but received `{}`:{}.". - format(i, - type(ele).__name__, ele)) + "shape[{}] should be an `int`, but received `{}`:{}.".format( + i, type(ele).__name__, ele + ) + ) if ele is None or ele < -1: shape[i] = -1 @@ -326,8 +341,9 @@ class InputSpec(object): def __eq__(self, other): slots = ['shape', 'dtype', 'name'] - return (type(self) is type(other) and all( - getattr(self, attr) == getattr(other, attr) for attr in slots)) + return type(self) is type(other) and all( + getattr(self, attr) == getattr(other, attr) for attr in slots + ) def __ne__(self, other): return not self == other diff --git a/python/paddle/static/io.py b/python/paddle/static/io.py index 279fc60a669fe3f70cf861d6eff9d1cee9635d78..ac598328352a9531e73975424655a94939d8d8e0 100644 --- a/python/paddle/static/io.py +++ b/python/paddle/static/io.py @@ -37,9 +37,9 @@ from paddle.fluid.log_helper import get_logger __all__ = [] -_logger = get_logger(__name__, - logging.INFO, - fmt='%(asctime)s-%(levelname)s: %(message)s') +_logger = get_logger( + __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s' +) def _check_args(caller, args, supported_args=None, deprecated_args=None): @@ -48,12 +48,16 @@ def _check_args(caller, args, supported_args=None, deprecated_args=None): for arg in args: if arg in deprecated_args: raise ValueError( - "argument '{}' in function '{}' is deprecated, only {} are supported." - .format(arg, caller, supported_args)) + "argument '{}' in function '{}' is deprecated, only {} are supported.".format( + arg, caller, supported_args + ) + ) elif arg not in supported_args: raise ValueError( - "function '{}' doesn't support argument '{}',\n only {} are supported." - .format(caller, arg, supported_args)) + "function '{}' doesn't support argument '{}',\n only {} are supported.".format( + caller, arg, supported_args + ) + ) def _check_vars(name, var_list): @@ -61,7 +65,8 @@ def _check_vars(name, var_list): var_list = [var_list] if not var_list or not all([isinstance(var, Variable) for var in var_list]): raise ValueError( - "'{}' should be a Variable or a list of Variable.".format(name)) + "'{}' should be a Variable or a list of Variable.".format(name) + ) def _normalize_path_prefix(path_prefix): @@ -90,29 +95,35 @@ def _get_valid_program(program=None): "The type of input program is invalid, expected tyep is Program, but received None" ) warnings.warn( - "The input is a CompiledProgram, this is not recommended.") + "The input is a CompiledProgram, this is not recommended." + ) if not isinstance(program, Program): raise TypeError( "The type of input program is invalid, expected type is fluid.Program, but received %s" - % type(program)) + % type(program) + ) return program def _clone_var_in_block(block, var): assert isinstance(var, Variable) if var.desc.type() == core.VarDesc.VarType.LOD_TENSOR: - return block.create_var(name=var.name, - shape=var.shape, - dtype=var.dtype, - type=var.type, - lod_level=var.lod_level, - persistable=True) + return block.create_var( + name=var.name, + shape=var.shape, + dtype=var.dtype, + type=var.type, + lod_level=var.lod_level, + persistable=True, + ) else: - return block.create_var(name=var.name, - shape=var.shape, - dtype=var.dtype, - type=var.type, - persistable=True) + return block.create_var( + name=var.name, + shape=var.shape, + dtype=var.dtype, + type=var.type, + persistable=True, + ) def normalize_program(program, feed_vars, fetch_vars): @@ -155,18 +166,21 @@ def normalize_program(program, feed_vars, fetch_vars): """ if not isinstance(program, Program): raise TypeError( - "program type must be `fluid.Program`, but received `%s`" % - type(program)) + "program type must be `fluid.Program`, but received `%s`" + % type(program) + ) if not isinstance(feed_vars, list): feed_vars = [feed_vars] if not all(isinstance(v, Variable) for v in feed_vars): raise TypeError( - "feed_vars type must be a Variable or a list of Variable.") + "feed_vars type must be a Variable or a list of Variable." + ) if not isinstance(fetch_vars, list): fetch_vars = [fetch_vars] if not all(isinstance(v, Variable) for v in fetch_vars): raise TypeError( - "fetch_vars type must be a Variable or a list of Variable.") + "fetch_vars type must be a Variable or a list of Variable." + ) # remind users to set auc_states to 0 if auc op were found. for op in program.global_block().ops: @@ -174,8 +188,10 @@ def normalize_program(program, feed_vars, fetch_vars): device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName() op._set_attr(device_attr_name, "") if op.type == 'auc': - warnings.warn("Be sure that you have set auc states to 0 " - "before saving inference model.") + warnings.warn( + "Be sure that you have set auc states to 0 " + "before saving inference model." + ) break # fix the bug that the activation op's output as target will be pruned. @@ -185,9 +201,9 @@ def normalize_program(program, feed_vars, fetch_vars): uniq_fetch_vars = [] for i, var in enumerate(fetch_vars): if var.dtype != paddle.bool: - var = layers.scale(var, - 1., - name="save_infer_model/scale_{}".format(i)) + var = layers.scale( + var, 1.0, name="save_infer_model/scale_{}".format(i) + ) uniq_fetch_vars.append(var) fetch_vars = uniq_fetch_vars @@ -205,7 +221,8 @@ def normalize_program(program, feed_vars, fetch_vars): feed_var_names = [var.name for var in feed_vars] copy_program = copy_program._prune_with_input( - feeded_var_names=feed_var_names, targets=fetch_vars) + feeded_var_names=feed_var_names, targets=fetch_vars + ) copy_program = copy_program._inference_optimize(prune_read_op=True) fetch_var_names = [var.name for var in fetch_vars] prepend_feed_ops(copy_program, feed_var_names) @@ -235,9 +252,11 @@ def is_persistable(var): param = fluid.default_main_program().global_block().var('fc.b') res = fluid.io.is_persistable(param) """ - if var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH or \ - var.desc.type() == core.VarDesc.VarType.FETCH_LIST or \ - var.desc.type() == core.VarDesc.VarType.READER: + if ( + var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH + or var.desc.type() == core.VarDesc.VarType.FETCH_LIST + or var.desc.type() == core.VarDesc.VarType.READER + ): return False return var.persistable @@ -361,8 +380,10 @@ def _serialize_persistables(program, executor): vars_ = list(filter(is_persistable, program.list_vars())) # warn if no variable found in model if len(vars_) == 0: - warnings.warn("no variable in your model, please ensure there are any " - "variables in your model to save") + warnings.warn( + "no variable in your model, please ensure there are any " + "variables in your model to save" + ) return None # create a new program and clone persitable vars to it save_program = Program() @@ -379,16 +400,16 @@ def _serialize_persistables(program, executor): in_vars.append(save_var_map[name]) out_var_name = unique_name.generate("out_var") - out_var = save_block.create_var(type=core.VarDesc.VarType.RAW, - name=out_var_name) + out_var = save_block.create_var( + type=core.VarDesc.VarType.RAW, name=out_var_name + ) out_var.desc.set_persistable(True) - save_block.append_op(type='save_combine', - inputs={'X': in_vars}, - outputs={'Y': out_var}, - attrs={ - 'file_path': '', - 'save_to_memory': True - }) + save_block.append_op( + type='save_combine', + inputs={'X': in_vars}, + outputs={'Y': out_var}, + attrs={'file_path': '', 'save_to_memory': True}, + ) # run save_program to save vars # NOTE(zhiqiu): save op will add variable kLookupTablePath to save_program.desc, # which leads to diff between save_program and its desc. Call _sync_with_cpp @@ -435,8 +456,9 @@ def save_to_file(path, content): @static_only -def save_inference_model(path_prefix, feed_vars, fetch_vars, executor, - **kwargs): +def save_inference_model( + path_prefix, feed_vars, fetch_vars, executor, **kwargs +): """ Save current model and its parameters to given path. i.e. Given path_prefix = "/path/to/modelname", after invoking @@ -516,7 +538,8 @@ def save_inference_model(path_prefix, feed_vars, fetch_vars, executor, program = normalize_program(program, feed_vars, fetch_vars) # serialize and save program program_bytes = _serialize_program( - program._remove_training_info(clip_extra=clip_extra)) + program._remove_training_info(clip_extra=clip_extra) + ) save_to_file(model_path, program_bytes) # serialize and save params params_bytes = _serialize_persistables(program, executor) @@ -566,8 +589,9 @@ def deserialize_program(data): """ program = Program.parse_from_string(data) if not core._is_program_version_supported(program._version()): - raise ValueError("Unsupported program version: %d\n" % - program._version()) + raise ValueError( + "Unsupported program version: %d\n" % program._version() + ) return program @@ -616,8 +640,9 @@ def deserialize_persistables(program, data, executor): """ if not isinstance(program, Program): raise TypeError( - "program type must be `fluid.Program`, but received `%s`" % - type(program)) + "program type must be `fluid.Program`, but received `%s`" + % type(program) + ) # load params to a tmp program load_program = Program() load_block = load_program.global_block() @@ -641,9 +666,9 @@ def deserialize_persistables(program, data, executor): load_var_map[var_copy.name] = var_copy if data is None: - assert len( - origin_shape_map - ) == 0, "Required 'data' shall be not None if program contains parameter, but received 'data' is None." + assert ( + len(origin_shape_map) == 0 + ), "Required 'data' shall be not None if program contains parameter, but received 'data' is None." return # append load_combine op to load parameters, @@ -655,10 +680,8 @@ def deserialize_persistables(program, data, executor): inputs={}, outputs={"Out": load_var_list}, # if load from memory, file_path is data - attrs={ - 'file_path': data, - 'model_from_memory': True - }) + attrs={'file_path': data, 'model_from_memory': True}, + ) executor.run(load_program) # check var shape for var in check_vars: @@ -673,7 +696,9 @@ def deserialize_persistables(program, data, executor): raise RuntimeError( "Shape mismatch, program needs a parameter with shape ({}), " "but the loaded parameter ('{}') has a shape of ({}).".format( - origin_shape, var.name, new_shape)) + origin_shape, var.name, new_shape + ) + ) def load_from_file(path): @@ -778,7 +803,7 @@ def load_inference_model(path_prefix, executor, **kwargs): """ # check kwargs supported_args = ('model_filename', 'params_filename') - deprecated_args = ('pserver_endpoints', ) + deprecated_args = ('pserver_endpoints',) caller = inspect.currentframe().f_code.co_name _check_args(caller, kwargs, supported_args, deprecated_args) @@ -789,7 +814,8 @@ def load_inference_model(path_prefix, executor, **kwargs): params_filename = kwargs.get('params_filename', None) if params_filename is None: raise ValueError( - "params_filename cannot be None when path_prefix is None.") + "params_filename cannot be None when path_prefix is None." + ) load_dirname = '' program_bytes = model_filename params_bytes = params_filename @@ -812,21 +838,26 @@ def load_inference_model(path_prefix, executor, **kwargs): if model_filename is None: model_path = os.path.join(path_prefix, "__model__") else: - model_path = os.path.join(path_prefix, - model_filename + ".pdmodel") + model_path = os.path.join( + path_prefix, model_filename + ".pdmodel" + ) if not os.path.exists(model_path): model_path = os.path.join(path_prefix, model_filename) # set params_path if params_filename is None: params_path = os.path.join(path_prefix, "") else: - params_path = os.path.join(path_prefix, - params_filename + ".pdiparams") + params_path = os.path.join( + path_prefix, params_filename + ".pdiparams" + ) if not os.path.exists(params_path): params_path = os.path.join(path_prefix, params_filename) - _logger.warning("The old way to load inference model is deprecated." - " model path: {}, params path: {}".format( - model_path, params_path)) + _logger.warning( + "The old way to load inference model is deprecated." + " model path: {}, params path: {}".format( + model_path, params_path + ) + ) program_bytes = load_from_file(model_path) load_dirname = os.path.dirname(params_path) params_filename = os.path.basename(params_path) diff --git a/python/paddle/static/nn/__init__.py b/python/paddle/static/nn/__init__.py index 41f8c99b52644896846e960eff7f550b45863e61..64e079ae375e3dab833c9ffc0ac089061c553600 100755 --- a/python/paddle/static/nn/__init__.py +++ b/python/paddle/static/nn/__init__.py @@ -59,7 +59,7 @@ from ...fluid.layers.sequence_lod import sequence_scatter # noqa: F401 from ...fluid.layers.sequence_lod import sequence_enumerate # noqa: F401 from ...fluid.layers.sequence_lod import sequence_reverse # noqa: F401 -__all__ = [ #noqa +__all__ = [ # noqa 'fc', 'batch_norm', 'embedding', diff --git a/python/paddle/static/nn/common.py b/python/paddle/static/nn/common.py index baeb39e8cf650637fade9c23cb0f290d5e1b9bb7..98c5e81a2e8e063243cc994cefcdbff1da09fc1c 100755 --- a/python/paddle/static/nn/common.py +++ b/python/paddle/static/nn/common.py @@ -19,13 +19,15 @@ __all__ = [] @static_only -def fc(x, - size, - num_flatten_dims=1, - weight_attr=None, - bias_attr=None, - activation=None, - name=None): +def fc( + x, + size, + num_flatten_dims=1, + weight_attr=None, + bias_attr=None, + activation=None, + name=None, +): r""" Fully-Connected layer can take a tensor or a list of tensor as its inputs. @@ -154,30 +156,34 @@ def fc(x, bias_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=1.0))) # out: [[1.8 1.8]] """ - return paddle.fluid.layers.fc(input=x, - size=size, - num_flatten_dims=num_flatten_dims, - param_attr=weight_attr, - bias_attr=bias_attr, - act=activation, - name=name) + return paddle.fluid.layers.fc( + input=x, + size=size, + num_flatten_dims=num_flatten_dims, + param_attr=weight_attr, + bias_attr=bias_attr, + act=activation, + name=name, + ) @static_only -def deform_conv2d(x, - offset, - mask, - num_filters, - filter_size, - stride=1, - padding=0, - dilation=1, - groups=1, - deformable_groups=1, - im2col_step=1, - weight_attr=None, - bias_attr=None, - name=None): +def deform_conv2d( + x, + offset, + mask, + num_filters, + filter_size, + stride=1, + padding=0, + dilation=1, + groups=1, + deformable_groups=1, + im2col_step=1, + weight_attr=None, + bias_attr=None, + name=None, +): r""" Compute 2-D deformable convolution on 4-D input. @@ -318,7 +324,8 @@ def deform_conv2d(x, param_attr=weight_attr, bias_attr=bias_attr, modulated=False, - name=name) + name=name, + ) else: return paddle.fluid.layers.deformable_conv( input=x, @@ -335,4 +342,5 @@ def deform_conv2d(x, param_attr=weight_attr, bias_attr=bias_attr, modulated=True, - name=name) + name=name, + ) diff --git a/python/paddle/static/quantization/__init__.py b/python/paddle/static/quantization/__init__.py index 325fee5624bff614b5289303154cab7ead7d0a6b..3a1a7549f83027ab8db4ee27918e9867809cce5e 100644 --- a/python/paddle/static/quantization/__init__.py +++ b/python/paddle/static/quantization/__init__.py @@ -12,20 +12,50 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ...fluid.contrib.slim.quantization.quantization_pass import QuantizationTransformPass -from ...fluid.contrib.slim.quantization.quantization_pass import QuantizationFreezePass -from ...fluid.contrib.slim.quantization.quantization_pass import ConvertToInt8Pass -from ...fluid.contrib.slim.quantization.quantization_pass import TransformForMobilePass -from ...fluid.contrib.slim.quantization.quantization_pass import OutScaleForTrainingPass -from ...fluid.contrib.slim.quantization.quantization_pass import OutScaleForInferencePass -from ...fluid.contrib.slim.quantization.quantization_pass import AddQuantDequantPass -from ...fluid.contrib.slim.quantization.quantization_pass import ReplaceFakeQuantDequantPass +from ...fluid.contrib.slim.quantization.quantization_pass import ( + QuantizationTransformPass, +) +from ...fluid.contrib.slim.quantization.quantization_pass import ( + QuantizationFreezePass, +) +from ...fluid.contrib.slim.quantization.quantization_pass import ( + ConvertToInt8Pass, +) +from ...fluid.contrib.slim.quantization.quantization_pass import ( + TransformForMobilePass, +) +from ...fluid.contrib.slim.quantization.quantization_pass import ( + OutScaleForTrainingPass, +) +from ...fluid.contrib.slim.quantization.quantization_pass import ( + OutScaleForInferencePass, +) +from ...fluid.contrib.slim.quantization.quantization_pass import ( + AddQuantDequantPass, +) +from ...fluid.contrib.slim.quantization.quantization_pass import ( + ReplaceFakeQuantDequantPass, +) from ...fluid.contrib.slim.quantization.quantization_pass import QuantWeightPass -from ...fluid.contrib.slim.quantization.quantization_pass import QuantizationTransformPassV2 -from ...fluid.contrib.slim.quantization.quantization_pass import AddQuantDequantPassV2 -from ...fluid.contrib.slim.quantization.quant_int8_mkldnn_pass import QuantInt8MkldnnPass -from ...fluid.contrib.slim.quantization.quant2_int8_mkldnn_pass import Quant2Int8MkldnnPass +from ...fluid.contrib.slim.quantization.quantization_pass import ( + QuantizationTransformPassV2, +) +from ...fluid.contrib.slim.quantization.quantization_pass import ( + AddQuantDequantPassV2, +) +from ...fluid.contrib.slim.quantization.quant_int8_mkldnn_pass import ( + QuantInt8MkldnnPass, +) +from ...fluid.contrib.slim.quantization.quant2_int8_mkldnn_pass import ( + Quant2Int8MkldnnPass, +) -from ...fluid.contrib.slim.quantization.post_training_quantization import PostTrainingQuantization -from ...fluid.contrib.slim.quantization.post_training_quantization import PostTrainingQuantizationProgram -from ...fluid.contrib.slim.quantization.post_training_quantization import WeightQuantization +from ...fluid.contrib.slim.quantization.post_training_quantization import ( + PostTrainingQuantization, +) +from ...fluid.contrib.slim.quantization.post_training_quantization import ( + PostTrainingQuantizationProgram, +) +from ...fluid.contrib.slim.quantization.post_training_quantization import ( + WeightQuantization, +) diff --git a/python/paddle/static/sparsity/__init__.py b/python/paddle/static/sparsity/__init__.py index 8d3166b19a099defb54499a63d510758bd6bf95c..37b48132bda900bea7385c0d9ce12597344f9775 100644 --- a/python/paddle/static/sparsity/__init__.py +++ b/python/paddle/static/sparsity/__init__.py @@ -13,20 +13,25 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ...fluid.contrib.sparsity import calculate_density #noqa: F401 -from ...fluid.contrib.sparsity import decorate #noqa: F401 -from ...fluid.contrib.sparsity import prune_model #noqa: F401 -from ...fluid.contrib.sparsity import reset_excluded_layers #noqa: F401 -from ...fluid.contrib.sparsity import add_supported_layer #noqa: F401 -from ...fluid.contrib import sparsity #noqa: F401 +from ...fluid.contrib.sparsity import calculate_density # noqa: F401 +from ...fluid.contrib.sparsity import decorate # noqa: F401 +from ...fluid.contrib.sparsity import prune_model # noqa: F401 +from ...fluid.contrib.sparsity import reset_excluded_layers # noqa: F401 +from ...fluid.contrib.sparsity import add_supported_layer # noqa: F401 +from ...fluid.contrib import sparsity # noqa: F401 def set_excluded_layers(main_program, param_names): - sparsity.set_excluded_layers(param_names=param_names, - main_program=main_program) + sparsity.set_excluded_layers( + param_names=param_names, main_program=main_program + ) -__all__ = [ #noqa - 'calculate_density', 'decorate', 'prune_model', 'set_excluded_layers', - 'reset_excluded_layers', 'add_supported_layer' +__all__ = [ # noqa + 'calculate_density', + 'decorate', + 'prune_model', + 'set_excluded_layers', + 'reset_excluded_layers', + 'add_supported_layer', ] diff --git a/python/paddle/sysconfig.py b/python/paddle/sysconfig.py index 2ce327c76961ad2febc020ed1a2595b7aad459a0..9a839f199a71dab9ebd68e8f3b1c65c9183be291 100644 --- a/python/paddle/sysconfig.py +++ b/python/paddle/sysconfig.py @@ -31,6 +31,7 @@ def get_include(): """ import paddle + return os.path.join(os.path.dirname(paddle.__file__), 'include') @@ -48,4 +49,5 @@ def get_lib(): """ import paddle + return os.path.join(os.path.dirname(paddle.__file__), 'libs') diff --git a/python/paddle/tensor/array.py b/python/paddle/tensor/array.py index 527cf692525ac0553af708c4aff145d92d4a12e4..382c6f524b4c57acfa27960189c3479cf449aca5 100644 --- a/python/paddle/tensor/array.py +++ b/python/paddle/tensor/array.py @@ -48,22 +48,24 @@ def array_length(array): """ if _non_static_mode(): assert isinstance( - array, - list), "The 'array' in array_write must be a list in dygraph mode" + array, list + ), "The 'array' in array_write must be a list in dygraph mode" return len(array) - if not isinstance( - array, - Variable) or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY: + if ( + not isinstance(array, Variable) + or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY + ): raise TypeError( - "array should be tensor array vairable in array_length Op") + "array should be tensor array vairable in array_length Op" + ) helper = LayerHelper('array_length', **locals()) tmp = helper.create_variable_for_type_inference(dtype='int64') tmp.stop_gradient = True - helper.append_op(type='lod_array_length', - inputs={'X': [array]}, - outputs={'Out': [tmp]}) + helper.append_op( + type='lod_array_length', inputs={'X': [array]}, outputs={'Out': [tmp]} + ) return tmp @@ -108,8 +110,8 @@ def array_read(array, i): """ if _non_static_mode(): assert isinstance( - array, - list), "The 'array' in array_read must be list in dygraph mode" + array, list + ), "The 'array' in array_read must be list in dygraph mode" assert isinstance( i, Variable ), "The index 'i' in array_read must be Variable in dygraph mode" @@ -121,17 +123,17 @@ def array_read(array, i): check_variable_and_dtype(i, 'i', ['int64'], 'array_read') helper = LayerHelper('array_read', **locals()) - if not isinstance( - array, - Variable) or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY: + if ( + not isinstance(array, Variable) + or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY + ): raise TypeError("array should be tensor array vairable") out = helper.create_variable_for_type_inference(dtype=array.dtype) - helper.append_op(type='read_from_array', - inputs={ - 'X': [array], - 'I': [i] - }, - outputs={'Out': [out]}) + helper.append_op( + type='read_from_array', + inputs={'X': [array], 'I': [i]}, + outputs={'Out': [out]}, + ) return out @@ -180,8 +182,8 @@ def array_write(x, i, array=None): if array is None: array = create_array(x.dtype) assert isinstance( - array, - list), "The 'array' in array_write must be a list in dygraph mode" + array, list + ), "The 'array' in array_write must be a list in dygraph mode" assert i <= len( array ), "The index 'i' should not be greater than the length of 'array' in dygraph mode" @@ -195,22 +197,24 @@ def array_write(x, i, array=None): check_type(x, 'x', (Variable), 'array_write') helper = LayerHelper('array_write', **locals()) if array is not None: - if not isinstance( - array, Variable - ) or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY: + if ( + not isinstance(array, Variable) + or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY + ): raise TypeError( - "array should be tensor array vairable in array_write Op") + "array should be tensor array vairable in array_write Op" + ) if array is None: array = helper.create_variable( name="{0}.out".format(helper.name), type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, - dtype=x.dtype) - helper.append_op(type='write_to_array', - inputs={ - 'X': [x], - 'I': [i] - }, - outputs={'Out': [array]}) + dtype=x.dtype, + ) + helper.append_op( + type='write_to_array', + inputs={'X': [x], 'I': [i]}, + outputs={'Out': [array]}, + ) return array @@ -247,16 +251,20 @@ def create_array(dtype, initialized_list=None): if initialized_list is not None: if not isinstance(initialized_list, (list, tuple)): raise TypeError( - "Require type(initialized_list) should be list/tuple, but received {}" - .format(type(initialized_list))) + "Require type(initialized_list) should be list/tuple, but received {}".format( + type(initialized_list) + ) + ) array = list(initialized_list) # NOTE: Only support plain list like [x, y,...], not support nested list in static mode. for val in array: if not isinstance(val, Variable): raise TypeError( - "All values in `initialized_list` should be Variable, but recevied {}." - .format(type(val))) + "All values in `initialized_list` should be Variable, but recevied {}.".format( + type(val) + ) + ) if _non_static_mode(): return array @@ -265,7 +273,8 @@ def create_array(dtype, initialized_list=None): tensor_array = helper.create_variable( name="{0}.out".format(helper.name), type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, - dtype=dtype) + dtype=dtype, + ) for val in array: array_write(x=val, i=array_length(tensor_array), array=tensor_array) diff --git a/python/paddle/tensor/attribute.py b/python/paddle/tensor/attribute.py index d68eebf1364b61ca0c49d48f31e79092348a09cd..e0db6a6e8b413f4173cc37c992e8078a2fc6b165 100644 --- a/python/paddle/tensor/attribute.py +++ b/python/paddle/tensor/attribute.py @@ -115,16 +115,29 @@ def shape(input): out.stop_gradient = True return out - check_variable_and_dtype(input, 'input', [ - 'bool', 'float16', 'float32', 'float64', 'int32', 'int64', 'complex64', - 'complex128' - ], 'shape') + check_variable_and_dtype( + input, + 'input', + [ + 'bool', + 'float16', + 'float32', + 'float64', + 'int32', + 'int64', + 'complex64', + 'complex128', + ], + 'shape', + ) helper = LayerHelper('shape', **locals()) out = helper.create_variable_for_type_inference(dtype='int32') - helper.append_op(type='shape', - inputs={'Input': input}, - outputs={'Out': out}, - stop_gradient=True) + helper.append_op( + type='shape', + inputs={'Input': input}, + outputs={'Out': out}, + stop_gradient=True, + ) return out @@ -156,11 +169,14 @@ def is_complex(x): # False """ if not isinstance(x, (paddle.Tensor, paddle.static.Variable)): - raise TypeError("Expected Tensor, but received type of x: {}".format( - type(x))) + raise TypeError( + "Expected Tensor, but received type of x: {}".format(type(x)) + ) dtype = x.dtype - is_complex_dtype = (dtype == core.VarDesc.VarType.COMPLEX64 - or dtype == core.VarDesc.VarType.COMPLEX128) + is_complex_dtype = ( + dtype == core.VarDesc.VarType.COMPLEX64 + or dtype == core.VarDesc.VarType.COMPLEX128 + ) return is_complex_dtype @@ -187,13 +203,16 @@ def is_floating_point(x): # False """ if not isinstance(x, (paddle.Tensor, paddle.static.Variable)): - raise TypeError("Expected Tensor, but received type of x: {}".format( - type(x))) + raise TypeError( + "Expected Tensor, but received type of x: {}".format(type(x)) + ) dtype = x.dtype - is_fp_dtype = (dtype == core.VarDesc.VarType.FP32 - or dtype == core.VarDesc.VarType.FP64 - or dtype == core.VarDesc.VarType.FP16 - or dtype == core.VarDesc.VarType.BF16) + is_fp_dtype = ( + dtype == core.VarDesc.VarType.FP32 + or dtype == core.VarDesc.VarType.FP64 + or dtype == core.VarDesc.VarType.FP16 + or dtype == core.VarDesc.VarType.BF16 + ) return is_fp_dtype @@ -224,14 +243,17 @@ def is_integer(x): # True """ if not isinstance(x, (paddle.Tensor, paddle.static.Variable)): - raise TypeError("Expected Tensor, but received type of x: {}".format( - type(x))) + raise TypeError( + "Expected Tensor, but received type of x: {}".format(type(x)) + ) dtype = x.dtype - is_int_dtype = (dtype == core.VarDesc.VarType.UINT8 - or dtype == core.VarDesc.VarType.INT8 - or dtype == core.VarDesc.VarType.INT16 - or dtype == core.VarDesc.VarType.INT32 - or dtype == core.VarDesc.VarType.INT64) + is_int_dtype = ( + dtype == core.VarDesc.VarType.UINT8 + or dtype == core.VarDesc.VarType.INT8 + or dtype == core.VarDesc.VarType.INT16 + or dtype == core.VarDesc.VarType.INT32 + or dtype == core.VarDesc.VarType.INT64 + ) return is_int_dtype @@ -276,7 +298,8 @@ def real(x, name=None): check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'real') helper = LayerHelper('real', **locals()) out = helper.create_variable_for_type_inference( - dtype=_complex_to_real_dtype(helper.input_dtype())) + dtype=_complex_to_real_dtype(helper.input_dtype()) + ) helper.append_op(type='real', inputs={'X': x}, outputs={'Out': out}) return out @@ -322,6 +345,7 @@ def imag(x, name=None): check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'imag') helper = LayerHelper('imag', **locals()) out = helper.create_variable_for_type_inference( - dtype=_complex_to_real_dtype(helper.input_dtype())) + dtype=_complex_to_real_dtype(helper.input_dtype()) + ) helper.append_op(type='imag', inputs={'X': x}, outputs={'Out': out}) return out diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index 675c186d47fc30c5118ef1301047d560802fc6c7..8f44f3ffe162eaa5bad80ea41733b5b00d9536e4 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -22,12 +22,21 @@ from ..framework import _current_expected_place, _get_paddle_place from ..framework import core from ..framework import in_dygraph_mode, _non_static_mode from ..framework import LayerHelper -from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype +from ..fluid.data_feeder import ( + check_variable_and_dtype, + check_type, + check_dtype, + convert_dtype, +) from ..framework import convert_np_dtype_to_dtype_ + # TODO: define functions to get create a tensor import paddle from paddle import _C_ops, _legacy_C_ops -from ..fluid.framework import _in_legacy_dygraph, _in_eager_without_dygraph_check +from ..fluid.framework import ( + _in_legacy_dygraph, + _in_eager_without_dygraph_check, +) import warnings __all__ = [] @@ -98,11 +107,17 @@ def linspace(start, stop, num, dtype=None, name=None): with device_guard("cpu"): tensor_num = fill_constant([1], 'int32', num, force_cpu=True) if in_dygraph_mode(): - return _C_ops.linspace(tensor_start, tensor_stop, tensor_num, dtype, - _current_expected_place()) + return _C_ops.linspace( + tensor_start, + tensor_stop, + tensor_num, + dtype, + _current_expected_place(), + ) if _in_legacy_dygraph(): - return _legacy_C_ops.linspace(tensor_start, tensor_stop, tensor_num, - 'dtype', dtype) + return _legacy_C_ops.linspace( + tensor_start, tensor_stop, tensor_num, 'dtype', dtype + ) helper = LayerHelper("linspace", **locals()) @@ -110,41 +125,53 @@ def linspace(start, stop, num, dtype=None, name=None): stop_dtype = convert_dtype(tensor_stop.dtype) out_dtype = convert_dtype(dtype) if isinstance(start, Variable): - check_dtype(start.dtype, 'start', - ['float32', 'float64', 'int32', 'int64'], 'linspace') + check_dtype( + start.dtype, + 'start', + ['float32', 'float64', 'int32', 'int64'], + 'linspace', + ) else: check_type(start, 'start', (int, float), 'linspace') if isinstance(stop, Variable): - check_dtype(stop.dtype, 'stop', - ['float32', 'float64', 'int32', 'int64'], 'linspace') + check_dtype( + stop.dtype, + 'stop', + ['float32', 'float64', 'int32', 'int64'], + 'linspace', + ) else: check_type(stop, 'stop', (int, float), 'linspace') if isinstance(num, Variable): check_dtype(num.dtype, 'num', ['int32'], 'linspace') - check_dtype(dtype, 'dtype', ['int32', 'int64', 'float32', 'float64'], - 'linspace') - if ((stop_dtype == "float64" or start_dtype == "float64") - and out_dtype in ["float32", "int32"]) or ( - (stop_dtype == "int64" or start_dtype == "int64") - and out_dtype == "int32"): + check_dtype( + dtype, 'dtype', ['int32', 'int64', 'float32', 'float64'], 'linspace' + ) + if ( + (stop_dtype == "float64" or start_dtype == "float64") + and out_dtype in ["float32", "int32"] + ) or ( + (stop_dtype == "int64" or start_dtype == "int64") + and out_dtype == "int32" + ): raise ValueError( "The dtype of start/stop is {}/{} but the attr(dtype) of linspace is {}, " - "which may cause data type overflows. Please reset attr(dtype) of linspace." - .format(start_dtype, stop_dtype, dtype)) + "which may cause data type overflows. Please reset attr(dtype) of linspace.".format( + start_dtype, stop_dtype, dtype + ) + ) out = helper.create_variable_for_type_inference(dtype=dtype) - helper.append_op(type='linspace', - inputs={ - 'Start': tensor_start, - 'Stop': tensor_stop, - 'Num': tensor_num - }, - attrs={'dtype': dtype}, - outputs={'Out': [out]}) + helper.append_op( + type='linspace', + inputs={'Start': tensor_start, 'Stop': tensor_stop, 'Num': tensor_num}, + attrs={'dtype': dtype}, + outputs={'Out': [out]}, + ) if isinstance(num, int): - out.desc.set_shape((num, )) + out.desc.set_shape((num,)) return out @@ -210,8 +237,9 @@ def logspace(start, stop, num, base=10.0, dtype=None, name=None): with device_guard("cpu"): tensor_base = fill_constant([1], dtype, base) if _non_static_mode(): - return _legacy_C_ops.logspace(tensor_start, tensor_stop, tensor_num, - tensor_base, 'dtype', dtype) + return _legacy_C_ops.logspace( + tensor_start, tensor_stop, tensor_num, tensor_base, 'dtype', dtype + ) helper = LayerHelper("logspace", **locals()) @@ -220,14 +248,22 @@ def logspace(start, stop, num, base=10.0, dtype=None, name=None): base_dtype = convert_dtype(tensor_base.dtype) out_dtype = convert_dtype(dtype) if isinstance(start, Variable): - check_dtype(start.dtype, 'start', - ['float32', 'float64', 'int32', 'int64'], 'logspace') + check_dtype( + start.dtype, + 'start', + ['float32', 'float64', 'int32', 'int64'], + 'logspace', + ) else: check_type(start, 'start', (int, float), 'logspace') if isinstance(stop, Variable): - check_dtype(stop.dtype, 'stop', - ['float32', 'float64', 'int32', 'int64'], 'logspace') + check_dtype( + stop.dtype, + 'stop', + ['float32', 'float64', 'int32', 'int64'], + 'logspace', + ) else: check_type(stop, 'stop', (int, float), 'logspace') @@ -235,37 +271,55 @@ def logspace(start, stop, num, base=10.0, dtype=None, name=None): check_dtype(num.dtype, 'num', ['int32'], 'logspace') if isinstance(base, Variable): - check_dtype(base.dtype, 'base', - ['float32', 'float64', 'int32', 'int64'], 'logspace') + check_dtype( + base.dtype, + 'base', + ['float32', 'float64', 'int32', 'int64'], + 'logspace', + ) else: check_type(base, 'base', (int, float), 'logspace') - check_dtype(dtype, 'dtype', ['int32', 'int64', 'float32', 'float64'], - 'logspace') - if ((stop_dtype == "float64" or start_dtype == "float64" - or base_dtype == "float64") - and out_dtype in ["float32", "int32"]) or \ - ((stop_dtype == "int64" or start_dtype == "int64" - or base_dtype == "int64") - and out_dtype == "int32"): + check_dtype( + dtype, 'dtype', ['int32', 'int64', 'float32', 'float64'], 'logspace' + ) + if ( + ( + stop_dtype == "float64" + or start_dtype == "float64" + or base_dtype == "float64" + ) + and out_dtype in ["float32", "int32"] + ) or ( + ( + stop_dtype == "int64" + or start_dtype == "int64" + or base_dtype == "int64" + ) + and out_dtype == "int32" + ): raise ValueError( "The dtype of start/stop/base is {}/{}/{} but the attr(dtype) of logspace is {}, " - "which may cause data type overflows. Please reset attr(dtype) of logspace." - .format(start_dtype, stop_dtype, base_dtype, dtype)) + "which may cause data type overflows. Please reset attr(dtype) of logspace.".format( + start_dtype, stop_dtype, base_dtype, dtype + ) + ) out = helper.create_variable_for_type_inference(dtype=dtype) - helper.append_op(type='logspace', - inputs={ - 'Start': tensor_start, - 'Stop': tensor_stop, - 'Num': tensor_num, - 'Base': tensor_base - }, - attrs={'dtype': dtype}, - outputs={'Out': [out]}) + helper.append_op( + type='logspace', + inputs={ + 'Start': tensor_start, + 'Stop': tensor_stop, + 'Num': tensor_num, + 'Base': tensor_base, + }, + attrs={'dtype': dtype}, + outputs={'Out': [out]}, + ) if isinstance(num, int): - out.desc.set_shape((num, )) + out.desc.set_shape((num,)) return out @@ -313,17 +367,25 @@ def _to_tensor_non_static(data, dtype=None, place=None, stop_gradient=True): return data else: raise TypeError( - "Can't constructs a 'paddle.Tensor' with data type {}, data type must be scalar|list|tuple|np.ndarray|paddle.Tensor" - .format(type(data))) + "Can't constructs a 'paddle.Tensor' with data type {}, data type must be scalar|list|tuple|np.ndarray|paddle.Tensor".format( + type(data) + ) + ) if not dtype: if data.dtype in [ - 'float16', 'float32', 'float64', 'complex64', 'complex128' + 'float16', + 'float32', + 'float64', + 'complex64', + 'complex128', ]: default_type = paddle.get_default_dtype() if np.iscomplexobj(data): - default_type = 'complex64' if default_type in [ - 'float16', 'float32' - ] else 'complex128' + default_type = ( + 'complex64' + if default_type in ['float16', 'float32'] + else 'complex128' + ) data = data.astype(default_type) # Windows default type is 'int32', while Linux/Mac is 'int64'. Unify they. if data.dtype in ['int32']: @@ -334,18 +396,22 @@ def _to_tensor_non_static(data, dtype=None, place=None, stop_gradient=True): data = data.astype(convert_dtype(dtype)) if _in_eager_without_dygraph_check() and isinstance(data, np.ndarray): - return core.eager.Tensor(value=data, - place=place, - persistable=False, - zero_copy=False, - name=None, - stop_gradient=stop_gradient) + return core.eager.Tensor( + value=data, + place=place, + persistable=False, + zero_copy=False, + name=None, + stop_gradient=stop_gradient, + ) else: - return paddle.Tensor(value=data, - place=place, - persistable=False, - zero_copy=False, - stop_gradient=stop_gradient) + return paddle.Tensor( + value=data, + place=place, + persistable=False, + zero_copy=False, + stop_gradient=stop_gradient, + ) def _to_tensor_static(data, dtype=None, stop_gradient=None): @@ -360,8 +426,11 @@ def _to_tensor_static(data, dtype=None, stop_gradient=None): elif isinstance(data, (list, tuple)): data = np.array(data) - if isinstance(data, - np.ndarray) and not dtype and data.dtype != 'object': + if ( + isinstance(data, np.ndarray) + and not dtype + and data.dtype != 'object' + ): if data.dtype in ['float16', 'float32', 'float64']: data = data.astype(paddle.get_default_dtype()) elif data.dtype in ['int32']: @@ -376,10 +445,14 @@ def _to_tensor_static(data, dtype=None, stop_gradient=None): target_dtype = convert_dtype(target_dtype) - if isinstance(data, np.ndarray) and len(data.shape) > 0 and any( - isinstance(x, Variable) for x in data): + if ( + isinstance(data, np.ndarray) + and len(data.shape) > 0 + and any(isinstance(x, Variable) for x in data) + ): if not all( - [x.shape == (1, ) for x in data if isinstance(x, Variable)]): + [x.shape == (1,) for x in data if isinstance(x, Variable)] + ): raise TypeError( "Unsupport paddle.to_tensor([Variable, Variable...]) with non-scalar variable." ) @@ -513,27 +586,31 @@ def full_like(x, fill_value, dtype=None, name=None): return _C_ops.full_like(x, fill_value, dtype, x.place) if _in_legacy_dygraph(): - return _legacy_C_ops.fill_any_like(x, 'value', fill_value, 'dtype', - dtype) + return _legacy_C_ops.fill_any_like( + x, 'value', fill_value, 'dtype', dtype + ) helper = LayerHelper("full_like", **locals()) check_variable_and_dtype( - x, 'x', + x, + 'x', ['bool', 'float16', 'float32', 'float64', 'int16', 'int32', 'int64'], - 'full_like') + 'full_like', + ) check_dtype( - dtype, 'dtype', + dtype, + 'dtype', ['bool', 'float16', 'float32', 'float64', 'int16', 'int32', 'int64'], - 'full_like/zeros_like/ones_like') + 'full_like/zeros_like/ones_like', + ) out = helper.create_variable_for_type_inference(dtype=dtype) - helper.append_op(type='fill_any_like', - inputs={'X': [x]}, - attrs={ - 'value': fill_value, - "dtype": dtype - }, - outputs={'Out': [out]}) + helper.append_op( + type='fill_any_like', + inputs={'X': [x]}, + attrs={'value': fill_value, "dtype": dtype}, + outputs={'Out': [out]}, + ) out.stop_gradient = True return out @@ -728,26 +805,34 @@ def eye(num_rows, num_columns=None, dtype=None, name=None): if _non_static_mode(): if in_dygraph_mode(): - out = _C_ops.eye(num_rows, num_columns, dtype, - _current_expected_place()) + out = _C_ops.eye( + num_rows, num_columns, dtype, _current_expected_place() + ) elif _in_legacy_dygraph(): - out = _legacy_C_ops.eye('dtype', dtype, 'num_rows', num_rows, - 'num_columns', num_columns) + out = _legacy_C_ops.eye( + 'dtype', dtype, 'num_rows', num_rows, 'num_columns', num_columns + ) else: helper = LayerHelper("eye", **locals()) - check_dtype(dtype, 'dtype', - ['float16', 'float32', 'float64', 'int32', 'int64'], 'eye') + check_dtype( + dtype, + 'dtype', + ['float16', 'float32', 'float64', 'int32', 'int64'], + 'eye', + ) out = helper.create_variable_for_type_inference(dtype=dtype) - helper.append_op(type='eye', - inputs={}, - outputs={'Out': [out]}, - attrs={ - 'num_rows': num_rows, - 'num_columns': num_columns, - 'dtype': dtype - }, - stop_gradient=True) + helper.append_op( + type='eye', + inputs={}, + outputs={'Out': [out]}, + attrs={ + 'num_rows': num_rows, + 'num_columns': num_columns, + 'dtype': dtype, + }, + stop_gradient=True, + ) out.stop_gradient = True return out @@ -866,8 +951,11 @@ def arange(start=0, end=None, step=1, dtype=None, name=None): start = 0 out_shape = None - if not isinstance(start, Variable) and not isinstance( - end, Variable) and not isinstance(step, Variable): + if ( + not isinstance(start, Variable) + and not isinstance(end, Variable) + and not isinstance(step, Variable) + ): out_shape = [int(math.ceil((end - start) / step))] if not isinstance(dtype, core.VarDesc.VarType): @@ -899,17 +987,16 @@ def arange(start=0, end=None, step=1, dtype=None, name=None): out.stop_gradient = True return out - check_dtype(dtype, 'dtype', ['float32', 'float64', 'int32', 'int64'], - 'range/arange') + check_dtype( + dtype, 'dtype', ['float32', 'float64', 'int32', 'int64'], 'range/arange' + ) helper = LayerHelper('range', **locals()) out = helper.create_variable_for_type_inference(dtype, shape=out_shape) - helper.append_op(type='range', - inputs={ - 'Start': start, - 'End': end, - 'Step': step - }, - outputs={'Out': out}) + helper.append_op( + type='range', + inputs={'Start': start, 'End': end, 'Step': step}, + outputs={'Out': out}, + ) out.stop_gradient = True if out_shape is not None: out.desc.set_shape(out_shape) @@ -917,28 +1004,30 @@ def arange(start=0, end=None, step=1, dtype=None, name=None): def _tril_triu_op(helper): - """Base op of tril_op and triu_op - """ + """Base op of tril_op and triu_op""" op_type = helper.layer_type x = helper.kwargs.get('x', None) assert x is not None, 'x cannot be None in {}'.format(op_type) check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64', 'bool'], - op_type) + x, + 'x', + ['float16', 'float32', 'float64', 'int32', 'int64', 'bool'], + op_type, + ) if len(x.shape) < 2: raise ValueError("x shape in {} must be at least 2-D".format(op_type)) diagonal = helper.kwargs.get('diagonal', 0) - if not isinstance(diagonal, (int, )): + if not isinstance(diagonal, (int,)): raise TypeError("diagonal in {} must be a python Int".format(op_type)) name = helper.kwargs.get('name', None) if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) else: - out = helper.create_variable(name=name, - dtype=x.dtype, - persistable=False) + out = helper.create_variable( + name=name, dtype=x.dtype, persistable=False + ) helper.append_op( type="tril_triu", @@ -1131,18 +1220,21 @@ def meshgrid(*args, **kwargs): raise TypeError("The type of input args in meshgrid should be list.") for id, input_ in enumerate(args): - check_dtype(input_.dtype, 'create data type', - ['float16', 'float32', 'float64', 'int32', 'int64'], - 'meshgrid') + check_dtype( + input_.dtype, + 'create data type', + ['float16', 'float32', 'float64', 'int32', 'int64'], + 'meshgrid', + ) num = len(args) out = [ helper.create_variable_for_type_inference(dtype=args[i].dtype) for i in range(num) ] - helper.append_op(type='meshgrid', - inputs={'X': list(args)}, - outputs={'Out': out}) + helper.append_op( + type='meshgrid', inputs={'X': list(args)}, outputs={'Out': out} + ) return out @@ -1242,17 +1334,21 @@ def diagflat(x, offset=0, name=None): if _in_legacy_dygraph(): if len(x.shape) == 1: - return _legacy_C_ops.diag_v2(x, "offset", offset, "padding_value", - padding_value) + return _legacy_C_ops.diag_v2( + x, "offset", offset, "padding_value", padding_value + ) else: y, _ = _legacy_C_ops.flatten_contiguous_range( - x, "start_axis", 0, "stop_axis", -1) - return _legacy_C_ops.diag_v2(y, "offset", offset, "padding_value", - padding_value) + x, "start_axis", 0, "stop_axis", -1 + ) + return _legacy_C_ops.diag_v2( + y, "offset", offset, "padding_value", padding_value + ) check_type(x, 'x', (Variable), 'diagflat') - check_dtype(x.dtype, 'x', ['float32', 'float64', 'int32', 'int64'], - 'diagflat') + check_dtype( + x.dtype, 'x', ['float32', 'float64', 'int32', 'int64'], 'diagflat' + ) check_type(offset, 'offset', (int), 'diagflat') helper = LayerHelper("diagflat", **locals()) @@ -1261,33 +1357,27 @@ def diagflat(x, offset=0, name=None): out2 = helper.create_variable_for_type_inference(dtype=x.dtype) if len(x.shape) == 1: - helper.append_op(type='diag_v2', - inputs={'X': x}, - outputs={'Out': out2}, - attrs={ - 'offset': offset, - 'padding_value': padding_value - }) + helper.append_op( + type='diag_v2', + inputs={'X': x}, + outputs={'Out': out2}, + attrs={'offset': offset, 'padding_value': padding_value}, + ) else: - helper.append_op(type='flatten_contiguous_range', - inputs={'X': x}, - outputs={ - 'Out': out1, - 'XShape': out1_shape - }, - attrs={ - 'start_axis': 0, - 'stop_axis': -1 - }) + helper.append_op( + type='flatten_contiguous_range', + inputs={'X': x}, + outputs={'Out': out1, 'XShape': out1_shape}, + attrs={'start_axis': 0, 'stop_axis': -1}, + ) out1.stop_gradient = True - helper.append_op(type='diag_v2', - inputs={'X': out1}, - outputs={'Out': out2}, - attrs={ - 'offset': offset, - 'padding_value': padding_value - }) + helper.append_op( + type='diag_v2', + inputs={'X': out1}, + outputs={'Out': out2}, + attrs={'offset': offset, 'padding_value': padding_value}, + ) out2.stop_gradient = True return out2 @@ -1371,30 +1461,36 @@ def diag(x, offset=0, padding_value=0, name=None): return _C_ops.diag(x, offset, padding_value) else: if _in_legacy_dygraph(): - return _legacy_C_ops.diag_v2(x, "offset", offset, "padding_value", - padding_value) + return _legacy_C_ops.diag_v2( + x, "offset", offset, "padding_value", padding_value + ) else: check_type(x, 'x', (Variable), 'diag_v2') - check_dtype(x.dtype, 'x', ['float32', 'float64', 'int32', 'int64'], - 'diag_v2') + check_dtype( + x.dtype, + 'x', + ['float32', 'float64', 'int32', 'int64'], + 'diag_v2', + ) check_type(offset, 'offset', (int), 'diag_v2') check_type(padding_value, 'padding_value', (int, float), 'diag_v2') if len(x.shape) != 1 and len(x.shape) != 2: raise ValueError( - "The dimension of input x must be either 1 or 2, but received {}" - .format(len(x.shape))) + "The dimension of input x must be either 1 or 2, but received {}".format( + len(x.shape) + ) + ) helper = LayerHelper("diag_v2", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='diag_v2', - inputs={'X': x}, - outputs={'Out': out}, - attrs={ - 'offset': offset, - 'padding_value': padding_value - }) + helper.append_op( + type='diag_v2', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'offset': offset, 'padding_value': padding_value}, + ) out.stop_gradient = True return out @@ -1456,42 +1552,48 @@ def empty(shape, dtype=None, name=None): if in_dygraph_mode(): shape = utils.convert_shape_to_list(shape) - out = _C_ops.empty(shape, convert_np_dtype_to_dtype_(dtype), - _current_expected_place()) + out = _C_ops.empty( + shape, convert_np_dtype_to_dtype_(dtype), _current_expected_place() + ) out.stop_gradient = True return out if _in_legacy_dygraph(): shape = utils.convert_shape_to_list(shape) - out = _legacy_C_ops.empty('shape', shape, 'dtype', - convert_np_dtype_to_dtype_(dtype)) + out = _legacy_C_ops.empty( + 'shape', shape, 'dtype', convert_np_dtype_to_dtype_(dtype) + ) out.stop_gradient = True return out helper = LayerHelper("empty", **locals()) inputs = {} - check_dtype(dtype, 'dtype', - ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], - 'empty') + check_dtype( + dtype, + 'dtype', + ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], + 'empty', + ) check_type(shape, 'shape', (Variable, list, tuple), 'empty') if isinstance(shape, Variable): check_dtype(shape.dtype, 'shape', ['int32', 'int64'], 'empty') attrs = {} - utils.get_shape_tensor_inputs(inputs=inputs, - attrs=attrs, - shape=shape, - op_type='empty') + utils.get_shape_tensor_inputs( + inputs=inputs, attrs=attrs, shape=shape, op_type='empty' + ) out = helper.create_variable_for_type_inference(dtype=dtype) attrs['dtype'] = convert_np_dtype_to_dtype_(dtype) - helper.append_op(type='empty', - inputs=inputs, - outputs={'Out': [out]}, - attrs=attrs, - stop_gradient=True) + helper.append_op( + type='empty', + inputs=inputs, + outputs={'Out': [out]}, + attrs=attrs, + stop_gradient=True, + ) out.stop_gradient = True return out @@ -1529,40 +1631,51 @@ def empty_like(x, dtype=None, name=None): dtype = convert_dtype(dtype) if in_dygraph_mode(): - out = _C_ops.empty(x.shape, convert_np_dtype_to_dtype_(dtype), - _current_expected_place()) + out = _C_ops.empty( + x.shape, + convert_np_dtype_to_dtype_(dtype), + _current_expected_place(), + ) out.stop_gradient = True return out if _in_legacy_dygraph(): - out = _legacy_C_ops.empty('shape', x.shape, 'dtype', - convert_np_dtype_to_dtype_(dtype)) + out = _legacy_C_ops.empty( + 'shape', x.shape, 'dtype', convert_np_dtype_to_dtype_(dtype) + ) out.stop_gradient = True return out helper = LayerHelper("empty_like", **locals()) check_variable_and_dtype( - x, 'x', ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], - 'empty_like') - check_dtype(dtype, 'dtype', - ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], - 'empty_like') + x, + 'x', + ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], + 'empty_like', + ) + check_dtype( + dtype, + 'dtype', + ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], + 'empty_like', + ) out = helper.create_variable_for_type_inference(dtype=dtype) inputs = {} attrs = {} attrs['dtype'] = convert_np_dtype_to_dtype_(dtype) shape = paddle.shape(x) - utils.get_shape_tensor_inputs(inputs=inputs, - attrs=attrs, - shape=shape, - op_type='empty_like') - - helper.append_op(type='empty', - inputs=inputs, - outputs={'Out': [out]}, - attrs=attrs, - stop_gradient=True) + utils.get_shape_tensor_inputs( + inputs=inputs, attrs=attrs, shape=shape, op_type='empty_like' + ) + + helper.append_op( + type='empty', + inputs=inputs, + outputs={'Out': [out]}, + attrs=attrs, + stop_gradient=True, + ) out.stop_gradient = True return out @@ -1597,8 +1710,12 @@ def assign(x, output=None): """ input = x helper = LayerHelper('assign', **locals()) - check_type(input, 'input', - (Variable, np.ndarray, list, tuple, float, int, bool), 'assign') + check_type( + input, + 'input', + (Variable, np.ndarray, list, tuple, float, int, bool), + 'assign', + ) is_inplace = True if output is not None else False if np.isscalar(input) and not isinstance(input, str): @@ -1621,24 +1738,40 @@ def assign(x, output=None): output = core.VarBase() _legacy_C_ops.assign(input, output) else: - check_dtype(input.dtype, 'input', [ - 'float16', 'uint16', 'float32', 'float64', 'int32', 'int64', - 'uint8', 'bool' - ], 'assign', '(When the type of input in assign is Variable.)') + check_dtype( + input.dtype, + 'input', + [ + 'float16', + 'uint16', + 'float32', + 'float64', + 'int32', + 'int64', + 'uint8', + 'bool', + ], + 'assign', + '(When the type of input in assign is Variable.)', + ) if output is None: output = helper.create_variable_for_type_inference( - dtype=input.dtype) - helper.append_op(type='assign', - inputs={'X': [input]}, - outputs={'Out': [output]}) + dtype=input.dtype + ) + helper.append_op( + type='assign', inputs={'X': [input]}, outputs={'Out': [output]} + ) elif isinstance(input, np.ndarray): # We now support the form of [var, VAR...] if the Var.shape=[1,] if len(input.shape) > 0 and any(isinstance(x, Variable) for x in input): # We only deal with the case where the list is nested one level, convert all scalars into variables, and then use stack to process. It is necessary to ensure the consistency of types. - if not all([ - x.shape == (1, ) for x in input + if not all( + [ + x.shape == (1,) + for x in input if isinstance(x, (Variable, core.eager.Tensor)) - ]): + ] + ): raise TypeError( "Unsupport paddle.assign([Variable, Variable...]) with non-scalar variable." ) @@ -1654,8 +1787,7 @@ def assign(x, output=None): return ret if input.dtype == 'object': - """ may be this form [[Var], [Var], [3], [4]], we reject them. - """ + """may be this form [[Var], [Var], [3], [4]], we reject them.""" raise TypeError( "The type of received input == `object`, it is not supported to convert to tensor, such as [[Var], [Var], [3], [4]]" ) @@ -1667,7 +1799,8 @@ def assign(x, output=None): warnings.warn( "paddle.assign doesn't support float64 input now due " "to current platform protobuf data limitation, we convert " - "it to float32") + "it to float32" + ) dtype = core.VarDesc.VarType.FP32 if dtype == core.VarDesc.VarType.BOOL: value_name = "bool_values" @@ -1685,31 +1818,49 @@ def assign(x, output=None): raise TypeError( "When the type of 'input' in assign is numpy.ndarray, " "the data type of 'input' must be bool, float32, int32 or int64, but " - "received %s." % convert_dtype(dtype)) + "received %s." % convert_dtype(dtype) + ) if input.size > 1024 * 1024: - raise ValueError("The size of input is too big. Please consider " - "saving it to file and 'load_op' to load it") + raise ValueError( + "The size of input is too big. Please consider " + "saving it to file and 'load_op' to load it" + ) if in_dygraph_mode(): if output is None: output = zeros(list(input.shape), dtype) - _C_ops.assign_value_(output, list(input.shape), dtype, values, - _current_expected_place()) + _C_ops.assign_value_( + output, + list(input.shape), + dtype, + values, + _current_expected_place(), + ) elif _in_legacy_dygraph(): if output is None: output = core.VarBase() - _legacy_C_ops.assign_value(output, 'shape', list(input.shape), - 'dtype', dtype, value_name, values) + _legacy_C_ops.assign_value( + output, + 'shape', + list(input.shape), + 'dtype', + dtype, + value_name, + values, + ) else: if output is None: output = helper.create_variable_for_type_inference( - dtype=input.dtype) - helper.append_op(type='assign_value', - outputs={'Out': [output]}, - attrs={ - 'dtype': dtype, - 'shape': list(input.shape), - value_name: values - }) + dtype=input.dtype + ) + helper.append_op( + type='assign_value', + outputs={'Out': [output]}, + attrs={ + 'dtype': dtype, + 'shape': list(input.shape), + value_name: values, + }, + ) if is_inplace and _in_legacy_dygraph(): output._bump_inplace_version() @@ -1747,7 +1898,7 @@ def clone(x, name=None): return x.clone() -#NOTE(zhiqiu): not public +# NOTE(zhiqiu): not public def _memcpy(input, place=None, output=None): """ @@ -1775,10 +1926,22 @@ def _memcpy(input, place=None, output=None): check_type(input, 'input', (Variable), 'memcpy') if isinstance(input, (Variable, core.VarBase)): - check_dtype(input.dtype, 'input', [ - 'float16', 'uint16', 'float32', 'float64', 'int32', 'int64', - 'uint8', 'bool' - ], 'memcpy', '(When the type of input in memcpy is Variable.)') + check_dtype( + input.dtype, + 'input', + [ + 'float16', + 'uint16', + 'float32', + 'float64', + 'int32', + 'int64', + 'uint8', + 'bool', + ], + 'memcpy', + '(When the type of input in memcpy is Variable.)', + ) if output is None: output = helper.create_variable_for_type_inference(dtype=input.dtype) @@ -1800,10 +1963,12 @@ def _memcpy(input, place=None, output=None): dst_place_type = 4 attrs = {'dst_place_type': dst_place_type} - helper.append_op(type='memcpy', - inputs={'X': [input]}, - outputs={'Out': [output]}, - attrs=attrs) + helper.append_op( + type='memcpy', + inputs={'X': [input]}, + outputs={'Out': [output]}, + attrs=attrs, + ) return output @@ -1846,7 +2011,8 @@ def complex(real, imag, name=None): helper = LayerHelper(op_type, **locals()) inputs = {"X": real, "Y": imag} out = helper.create_variable_for_type_inference( - dtype=_real_to_complex_dtype(real.dtype)) + dtype=_real_to_complex_dtype(real.dtype) + ) outputs = {"Out": out} attrs = {} helper.append_op(type=op_type, inputs=inputs, attrs=attrs, outputs=outputs) @@ -1914,13 +2080,15 @@ def tril_indices(row, col, offset=0, dtype='int64'): dtype = convert_np_dtype_to_dtype_(dtype) if in_dygraph_mode(): - out = _C_ops.tril_indices(row, col, offset, dtype, - _current_expected_place()) + out = _C_ops.tril_indices( + row, col, offset, dtype, _current_expected_place() + ) return out if _in_legacy_dygraph(): - out = _legacy_C_ops.tril_indices('rows', row, 'cols', col, 'offset', - offset, "dtype", dtype) + out = _legacy_C_ops.tril_indices( + 'rows', row, 'cols', col, 'offset', offset, "dtype", dtype + ) return out else: @@ -1928,15 +2096,12 @@ def tril_indices(row, col, offset=0, dtype='int64'): out = helper.create_variable_for_type_inference(dtype=dtype) - helper.append_op(type='tril_indices', - inputs={}, - outputs={'out': [out]}, - attrs={ - 'rows': row, - 'cols': col, - 'offset': offset, - 'dtype': dtype - }) + helper.append_op( + type='tril_indices', + inputs={}, + outputs={'out': [out]}, + attrs={'rows': row, 'cols': col, 'offset': offset, 'dtype': dtype}, + ) return out @@ -1999,13 +2164,15 @@ def triu_indices(row, col=None, offset=0, dtype='int64'): dtype = convert_np_dtype_to_dtype_(dtype) if in_dygraph_mode(): - out = _C_ops.triu_indices(row, col, offset, dtype, - _current_expected_place()) + out = _C_ops.triu_indices( + row, col, offset, dtype, _current_expected_place() + ) return out if _in_legacy_dygraph(): - out = _legacy_C_ops.triu_indices('row', row, 'col', col, 'offset', - offset, "dtype", dtype) + out = _legacy_C_ops.triu_indices( + 'row', row, 'col', col, 'offset', offset, "dtype", dtype + ) return out else: @@ -2013,13 +2180,10 @@ def triu_indices(row, col=None, offset=0, dtype='int64'): out = helper.create_variable_for_type_inference(dtype=dtype) - helper.append_op(type='triu_indices', - inputs={}, - outputs={'out': [out]}, - attrs={ - 'row': row, - 'col': col, - 'offset': offset, - 'dtype': dtype - }) + helper.append_op( + type='triu_indices', + inputs={}, + outputs={'out': [out]}, + attrs={'row': row, 'col': col, 'offset': offset, 'dtype': dtype}, + ) return out diff --git a/python/paddle/tensor/einsum.py b/python/paddle/tensor/einsum.py index f8b014afb617af781629e33e7ae1bdbc1ae96775..5c792f8fe0df80032b201dd597e6c48d8c2c0621 100644 --- a/python/paddle/tensor/einsum.py +++ b/python/paddle/tensor/einsum.py @@ -50,12 +50,13 @@ def parse_op_labels(labelstr, operand): ''' # Sanity checks for c in labelstr.replace('.', ''): - assert c.isalpha(), ( - f"Invalid equation: {c} is not a valid label, which should be letters." - ) + assert ( + c.isalpha() + ), f"Invalid equation: {c} is not a valid label, which should be letters." - assert labelstr.replace('...', '', 1).find('.') == -1, ( - "Invalid equation: `.` is found outside of an ellipsis.") + assert ( + labelstr.replace('...', '', 1).find('.') == -1 + ), "Invalid equation: `.` is found outside of an ellipsis." # Check shape. Note, in Paddle a tensor rank is always nonzero ndims = len(operand.shape) @@ -63,8 +64,9 @@ def parse_op_labels(labelstr, operand): full_labelstr = labelstr.replace('...', '.' * (ndims - len(labelstr) + 3)) - assert len(full_labelstr) == ndims, ( - f"Invalid equation: the label string '{labelstr}' misses dimensions.") + assert ( + len(full_labelstr) == ndims + ), f"Invalid equation: the label string '{labelstr}' misses dimensions." return full_labelstr @@ -88,7 +90,8 @@ def parse_labels(labelstr, operands): nop_labels = labelstr.split(',') assert len(nop_labels) == len(operands), ( f"Invalid equation: the number of operands is {len(operands)}, " - f"but found {len(nop_labels)} segments in the label equation.") + f"but found {len(nop_labels)} segments in the label equation." + ) return list(map(parse_op_labels, nop_labels, operands)) @@ -99,8 +102,9 @@ def validate_rhs(rhs, input_labels, n_bcast_dims): ''' # Sanity check. if n_bcast_dims > 0: - assert '...' in rhs, ( - "Invalid equation: missing ellipsis in output labels.") + assert ( + '...' in rhs + ), "Invalid equation: missing ellipsis in output labels." rhs = rhs.replace('...', '') rhs_set = set(rhs) @@ -112,10 +116,12 @@ def validate_rhs(rhs, input_labels, n_bcast_dims): non_input_labels = rhs_set.difference(input_labels) assert not non_input_labels, ( f"Invalid equation: " - f"output label {sorted(non_input_labels)} not used by any input.") + f"output label {sorted(non_input_labels)} not used by any input." + ) # Verify that output labels are not duplicate - assert len(rhs) == len(rhs_set), ( - "Invalid equation: duplicate output labels are found.") + assert len(rhs) == len( + rhs_set + ), "Invalid equation: duplicate output labels are found." def build_view(in_labels, out_labels): @@ -157,8 +163,8 @@ def build_view(in_labels, out_labels): # fill the broadcast dimension indices from right to left. if s: for ax, dim in zip( - range(start, end)[::-1], - range(s.start(), s.end())[::-1]): + range(start, end)[::-1], range(s.start(), s.end())[::-1] + ): inv_map[ax] = dim # Now work on non-broadcast dimensions @@ -217,7 +223,8 @@ def build_global_view(nop_labels, rhs, n_bcast_dims): g_labels_out = rhs.replace('...', '.' * n_bcast_dims) else: g_labels_out = '.' * n_bcast_dims + ''.join( - l for l, c in zip(labels, count) if c == 1) + l for l, c in zip(labels, count) if c == 1 + ) for i in range(len(count))[::-1]: if labels[i] in g_labels_out: @@ -265,12 +272,14 @@ def build_global_shape(g_view, g_labels, op_shapes): assert not non_bcastable, ( f"Invalid operands: label {g_labels[non_bcastable[0]]} " - f"corresponds to non-broadcastable dimensions.") + f"corresponds to non-broadcastable dimensions." + ) g_shape = [sizes.pop() if len(sizes) > 0 else 1 for sizes in g_shape] - g_masks = [[s > 1 or s == -1 for s in view_shape] - for view_shape in view_shapes] + g_masks = [ + [s > 1 or s == -1 for s in view_shape] for view_shape in view_shapes + ] return g_shape, g_masks @@ -295,8 +304,9 @@ def diagonalize(labels, operand): -------- 'ijj...i' would be merged into 'ij...' ''' - assert not has_duplicated_labels(labels), ( - 'Duplicate labels are not supported.') + assert not has_duplicated_labels( + labels + ), 'Duplicate labels are not supported.' return labels, operand @@ -356,12 +366,21 @@ def plan_matmul(plan, g_view, op1, op2, g_supports, g_shape, I, J1, J2, K): plan.add_step(step) # Check if conditions hold for turnning the operation into a matmul - if j1 + j2 > 0 and k > 0 and -1 not in np.concatenate( - (op1_vshape, op2_vshape)): - op1_shape = list(op1_vshape[I]) + [np.prod(op1_vshape[J1]) - ] + [np.prod(op1_vshape[K])] - op2_shape = list(op2_vshape[I]) + [np.prod(op2_vshape[J2]) - ] + [np.prod(op2_vshape[K])] + if ( + j1 + j2 > 0 + and k > 0 + and -1 not in np.concatenate((op1_vshape, op2_vshape)) + ): + op1_shape = ( + list(op1_vshape[I]) + + [np.prod(op1_vshape[J1])] + + [np.prod(op1_vshape[K])] + ) + op2_shape = ( + list(op2_vshape[I]) + + [np.prod(op2_vshape[J2])] + + [np.prod(op2_vshape[K])] + ) # Merge J dims and K dims by reshaping step = reshape, [var1], var1, op1_shape @@ -410,15 +429,22 @@ def plan_matmul(plan, g_view, op1, op2, g_supports, g_shape, I, J1, J2, K): step = squeeze, [var2], var2, [-1, -2] plan.add_step(step) elif j1 + j2 == 0 and -1 not in np.concatenate( - (op1_vshape[K], op2_vshape[K])): + (op1_vshape[K], op2_vshape[K]) + ): assert all(op1_vshape[K] == op2_vshape[K]) - step = reshape, [ - var1 - ], var1, list(op1_vshape[I]) + [1] + [np.prod(op1_vshape[K])] + step = ( + reshape, + [var1], + var1, + list(op1_vshape[I]) + [1] + [np.prod(op1_vshape[K])], + ) plan.add_step(step) - step = reshape, [ - var2 - ], var2, list(op2_vshape[I]) + [1] + [np.prod(op2_vshape[K])] + step = ( + reshape, + [var2], + var2, + list(op2_vshape[I]) + [1] + [np.prod(op2_vshape[K])], + ) plan.add_step(step) step = matmul, [var1, var2], var2, False, True plan.add_step(step) @@ -447,8 +473,9 @@ def plan_matmul(plan, g_view, op1, op2, g_supports, g_shape, I, J1, J2, K): g_view[op2] = list(op2_view) -def plan_summation(plan, g_view, op1, op2, g_supports, g_shape, g_count, - n_bcast): +def plan_summation( + plan, g_view, op1, op2, g_supports, g_shape, g_count, n_bcast +): ''' Plan various kinds of summation ''' @@ -462,8 +489,9 @@ def plan_summation(plan, g_view, op1, op2, g_supports, g_shape, g_count, I, K, J1, J2 = list(range(n_bcast)), [], [], [] - for ax, dim1, dim2 in zip(range(n_bcast, ndim), op1_view[n_bcast:], - op2_view[n_bcast:]): + for ax, dim1, dim2 in zip( + range(n_bcast, ndim), op1_view[n_bcast:], op2_view[n_bcast:] + ): if (dim1 != -1) != (dim2 != -1): if dim1 != -1: @@ -529,7 +557,6 @@ def plan_broadcast(plan, operands, nop_axes): class Plan: - def __init__(self): self.env = {} self.steps = [] @@ -633,8 +660,9 @@ def plan_einsum(operands, g_view, g_shape, g_supports, g_count, n_bcast): # op1 is a one element tensor. plan_scalar_prod(plan, i - 1, i) else: - plan_summation(plan, g_view, i - 1, i, g_supports, g_shape, g_count, - n_bcast) + plan_summation( + plan, g_view, i - 1, i, g_supports, g_shape, g_count, n_bcast + ) # for ax, dim in enumerate(g_view[nop-1][:nout]): # assert dim == ax @@ -676,7 +704,9 @@ def preprocess(equation, *operands): """ equation = equation.replace(" ", "") nop = len(operands) - assert nop > 0, "Required at least one operand in Einsum API, but received %s " % nop + assert nop > 0, ( + "Required at least one operand in Einsum API, but received %s " % nop + ) # Part the equation to left hand side and right hand side lhs, *rhs = equation.lower().split('->') @@ -690,16 +720,20 @@ def preprocess(equation, *operands): assert len(lhs.split(',')) == len(operands), ( f"Invalid equation: the number of operands is {len(operands)}, " - f"but found {len(lhs.split(','))} segments in the label equation.") + f"but found {len(lhs.split(','))} segments in the label equation." + ) - assert not ('...' in lhs and '...' not in rhs - ), 'Invalid equation: missing ellipsis in output labels.' + assert not ( + '...' in lhs and '...' not in rhs + ), 'Invalid equation: missing ellipsis in output labels.' - assert not (len(list(filter(has_duplicated_labels, lhs.split(',')))) > - 0), 'Duplicate labels are not supported.' + assert not ( + len(list(filter(has_duplicated_labels, lhs.split(',')))) > 0 + ), 'Duplicate labels are not supported.' assert not has_duplicated_labels( - rhs), 'Invalid equation: duplicate output labels are found.' + rhs + ), 'Invalid equation: duplicate output labels are found.' return lhs, rhs, labels @@ -717,10 +751,10 @@ def parse_fake_shape(equation, operands, labels): shaped = collections.namedtuple('shaped', ['shape']) def fake_shape(label, op): - assert len(op.shape) == len( - label - ), "length of shape and length of label must be the same, but received %d != %d" % ( - len(op.shape), len(label)) + assert len(op.shape) == len(label), ( + "length of shape and length of label must be the same, but received %d != %d" + % (len(op.shape), len(label)) + ) fakes = [s for i, (l, s) in enumerate(zip(label, op.shape)) if l != '.'] fakes = list(map(abs, fakes)) # make -1 -> 1 if '.' in label: @@ -732,7 +766,6 @@ def parse_fake_shape(equation, operands, labels): def rhs_inference(lhs): - def is_free(key): return cnt.get(key) == 1 and key not in ['.', ','] @@ -751,7 +784,8 @@ def gen_equation_for_opteinsum(lhs, rhs): def get_used_label(counter): used = set(counter.elements()) for c in string.ascii_lowercase: - if c not in used: return c + if c not in used: + return c raise ValueError( "You have used all `a` - `z`, there can't find a unused for einsum optimization" ) @@ -784,14 +818,15 @@ def einsum_v2(equation, *operands): var_list = list(operands) for path in cons: (a, b), _, eq, *__ = path - assert a > b, "Assume the first var_idx is smaller than the second_idx. opt_einsum can guarantee it." + assert ( + a > b + ), "Assume the first var_idx is smaller than the second_idx. opt_einsum can guarantee it." var_s = [var_list.pop(a), var_list.pop(b)] eq = eq.replace(broadcast_label, "...") var_list.append(gen_einsum_op(eq, *var_s)) - assert len( - var_list - ) == 1, "There must be one elements in list, but received %d." % len( - var_list) + assert ( + len(var_list) == 1 + ), "There must be one elements in list, but received %d." % len(var_list) return var_list[0] @@ -805,8 +840,9 @@ def gen_einsum_op(equation, *operands): if _in_legacy_dygraph(): # dygraph - return _legacy_C_ops.einsum(operands, len(operands), len(operands), - 'equation', equation)[0] + return _legacy_C_ops.einsum( + operands, len(operands), len(operands), 'equation', equation + )[0] for inp in operands: check_variable_and_dtype(inp, 'dtype', ['float32', 'float64'], 'einsum') @@ -823,14 +859,12 @@ def gen_einsum_op(equation, *operands): helper.create_variable_for_type_inference(dtype=operands[0].dtype) for i in range(len(operands)) ] - helper.append_op(type='einsum', - inputs={'Operands': operands}, - outputs={ - 'Out': out, - "InnerCache": caches, - "XShape": xshape - }, - attrs=attrs) + helper.append_op( + type='einsum', + inputs={'Operands': operands}, + outputs={'Out': out, "InnerCache": caches, "XShape": xshape}, + attrs=attrs, + ) return out @@ -992,6 +1026,7 @@ def einsum(equation, *operands): # [0.51476848, 0.23367381, 0.39229113]]]) """ import os + if int(os.environ.get('FLAGS_new_einsum', "1")): return einsum_v2(equation, *operands) @@ -1037,9 +1072,11 @@ def einsum(equation, *operands): # Counting how many non-trivial dimensions remain for each ax g_labels, g_view, g_nout, g_count = build_global_view( - nop_labels, rhs, n_bcast_dims) - g_shape, g_supports = build_global_shape(g_view, g_labels, - [op.shape for op in operands]) + nop_labels, rhs, n_bcast_dims + ) + g_shape, g_supports = build_global_shape( + g_view, g_labels, [op.shape for op in operands] + ) # Now we're ready to build up an execution plan args = operands, g_view, g_shape, g_supports, g_count, n_bcast_dims diff --git a/python/paddle/tensor/layer_function_generator.py b/python/paddle/tensor/layer_function_generator.py index e4b36ba5e1418312aaeeb0c14c6ca71edabef37a..fb2a611257e13113bd8fedcabacf82405e4b12be 100644 --- a/python/paddle/tensor/layer_function_generator.py +++ b/python/paddle/tensor/layer_function_generator.py @@ -19,7 +19,13 @@ import string from io import StringIO from ..static import Variable from ..fluid.proto import framework_pb2 -from ..framework import OpProtoHolder, _non_static_mode, convert_np_dtype_to_dtype_, core, in_dygraph_mode +from ..framework import ( + OpProtoHolder, + _non_static_mode, + convert_np_dtype_to_dtype_, + core, + in_dygraph_mode, +) from ..framework import LayerHelper from ..fluid.data_feeder import check_variable_and_dtype from paddle import _C_ops, _legacy_C_ops @@ -53,16 +59,16 @@ _two_bang_pattern_ = re.compile(r"!!([^!]+)!!") def escape_math(text): - #return _two_bang_pattern_.sub( + # return _two_bang_pattern_.sub( # r'$$\1$$', # _single_dollar_pattern_.sub(r':math:\n`\1`', # _two_dollar_pattern_.sub(r"!!\1!!", text))) return _two_dollar_pattern_.sub(r':math:`\1`', text) -def _generate_doc_string_(op_proto, - additional_args_lines=None, - skip_attrs_set=None): +def _generate_doc_string_( + op_proto, additional_args_lines=None, skip_attrs_set=None +): """ Generate docstring by OpProto @@ -142,23 +148,30 @@ def generate_layer_fn(op_type): """ op_proto = OpProtoHolder.instance().get_op_proto(op_type) - not_intermediate_outputs = \ - [output for output in op_proto.outputs if not output.intermediate] - intermediate_outputs = \ - [output for output in op_proto.outputs if output.intermediate] + not_intermediate_outputs = [ + output for output in op_proto.outputs if not output.intermediate + ] + intermediate_outputs = [ + output for output in op_proto.outputs if output.intermediate + ] if len(not_intermediate_outputs) != 1: - raise ValueError("Only one non intermediate output operator can be", - "automatically generated. {0}".format(op_type)) + raise ValueError( + "Only one non intermediate output operator can be", + "automatically generated. {0}".format(op_type), + ) if not_intermediate_outputs[0].duplicable: raise ValueError( - "Only non duplicable op can be automatically generated.") + "Only non duplicable op can be automatically generated." + ) for output in intermediate_outputs: if output.duplicable: - raise ValueError("The op can be automatically generated only when ", - "all intermediate ops are not duplicable.") + raise ValueError( + "The op can be automatically generated only when ", + "all intermediate ops are not duplicable.", + ) o_name = not_intermediate_outputs[0].name intermediate_output_names = [output.name for output in intermediate_outputs] @@ -183,14 +196,17 @@ def generate_layer_fn(op_type): for each in val: if not isinstance(each, Variable): raise ValueError( - "input of {0} must be variable".format(op_type)) + "input of {0} must be variable".format(op_type) + ) if dtype is None: dtype = each.dtype elif dtype != each.dtype: raise ValueError( "operator {0} must input same dtype. {1} vs {2}".format( - op_type, dtype, each.dtype)) + op_type, dtype, each.dtype + ) + ) if dtype is None: arg_dtype = kwargs.get("dtype") @@ -222,8 +238,11 @@ def generate_layer_fn(op_type): outputs = dict() out = kwargs.pop(_convert_(o_name), []) if out: - out_var = out[0] if (isinstance(out, list) - or isinstance(out, tuple)) else out + out_var = ( + out[0] + if (isinstance(out, list) or isinstance(out, tuple)) + else out + ) else: out_var = helper.create_variable_for_type_inference(dtype=dtype) outputs[o_name] = [out_var] @@ -231,10 +250,9 @@ def generate_layer_fn(op_type): outputs[name] = [ helper.create_variable_for_type_inference(dtype=dtype) ] - helper.append_op(type=op_type, - inputs=inputs, - outputs=outputs, - attrs=kwargs) + helper.append_op( + type=op_type, inputs=inputs, outputs=outputs, attrs=kwargs + ) return helper.append_activation(out_var) func.__name__ = op_type @@ -265,14 +283,25 @@ def generate_activation_fn(op_type): return op(x) if op_type not in ["abs", "exp", "square"]: - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - op_type) + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], op_type + ) else: # abs exp square ops support dtype(int32, int64, float16, float32, float64) - check_variable_and_dtype(x, 'x', [ - 'int32', 'int64', 'float16', 'float32', 'float64', 'complex64', - 'complex128' - ], op_type) + check_variable_and_dtype( + x, + 'x', + [ + 'int32', + 'int64', + 'float16', + 'float32', + 'float64', + 'complex64', + 'complex128', + ], + op_type, + ) helper = LayerHelper(op_type, **locals()) @@ -285,7 +314,8 @@ def generate_activation_fn(op_type): op_proto, additional_args_lines=[ "name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`." - ]) + ], + ) return func @@ -308,15 +338,19 @@ def generate_inplace_fn(inplace_op_type): op = getattr(_legacy_C_ops, inplace_op_type) return op(x) warnings.warn( - "In static mode, {}() is the same as {}() and does not perform inplace operation." - .format(inplace_op_type, origin_op_type)) + "In static mode, {}() is the same as {}() and does not perform inplace operation.".format( + inplace_op_type, origin_op_type + ) + ) return generate_activation_fn(origin_op_type)(x, name) func.__name__ = inplace_op_type func.__doc__ = """ Inplace version of ``{0}`` API, the output Tensor will be inplaced with input ``x``. Please refer to :ref:`api_fluid_layers_{1}`. -""".format(origin_op_type, origin_op_type) +""".format( + origin_op_type, origin_op_type + ) return func @@ -360,18 +394,21 @@ def templatedoc(op_type=None): for each_input in op_proto.inputs: input_name = _convert_(each_input.name) args["{0}_comment".format(input_name)] = trim_ending_dot( - each_input.comment) + each_input.comment + ) args["{0}_type".format(input_name)] = "Variable" for each_attr in op_proto.attrs: input_name = _convert_(each_attr.name) args["{0}_comment".format(input_name)] = trim_ending_dot( - each_attr.comment) + each_attr.comment + ) args["{0}_type".format(input_name)] = _type_to_str_(each_attr.type) for each_opt in op_proto.outputs: output_name = _convert_(each_opt.name) args["{0}_comment".format(output_name)] = trim_ending_dot( - each_opt.comment) + each_opt.comment + ) args["{0}_type".format(output_name)] = "Variable" func.__doc__ = tmpl.substitute(args) return func diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index d341deabbd2b042277dde09853f41c6a36b6f391..3ed56a35dfa9cdc720872a8fd489907d8285d5fb 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -15,7 +15,11 @@ import numpy as np from ..framework import LayerHelper from ..framework import _non_static_mode, in_dygraph_mode -from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype +from ..fluid.data_feeder import ( + check_variable_and_dtype, + check_type, + check_dtype, +) from ..static import Variable from ..fluid.framework import _in_legacy_dygraph from .manipulation import cast @@ -90,10 +94,21 @@ def transpose(x, perm, name=None): out, _ = _legacy_C_ops.transpose2(x, 'axis', perm) return out - check_variable_and_dtype(x, 'x', [ - 'bool', 'float16', 'float32', 'float64', 'int32', 'int64', 'complex64', - 'complex128' - ], 'transpose') + check_variable_and_dtype( + x, + 'x', + [ + 'bool', + 'float16', + 'float32', + 'float64', + 'int32', + 'int64', + 'complex64', + 'complex128', + ], + 'transpose', + ) check_type(perm, 'perm', (list, tuple), 'transpose') if isinstance(perm, tuple): perm = list(perm) @@ -102,24 +117,25 @@ def transpose(x, perm, name=None): "Input(perm) is the permutation of dimensions of Input(x), " "its length should be equal to dimensions of Input(x), " "but received dimension of Input(x) is %s, " - "the length of Input(perm) is %s." % (len(x.shape), len(perm))) + "the length of Input(perm) is %s." % (len(x.shape), len(perm)) + ) for idx, dim in enumerate(perm): if dim >= len(x.shape): raise ValueError( "Each element in Input(perm) should be less than Input(x)'s dimension, " "but %d-th element in Input(perm) is %d which exceeds Input(x)'s " - "dimension %d." % (idx, perm[idx], len(x.shape))) + "dimension %d." % (idx, perm[idx], len(x.shape)) + ) helper = LayerHelper('transpose', **locals()) out = helper.create_variable_for_type_inference(x.dtype) x_shape = helper.create_variable_for_type_inference(x.dtype) - helper.append_op(type='transpose2', - inputs={'X': [x]}, - outputs={ - 'Out': [out], - 'XShape': [x_shape] - }, - attrs={'axis': perm}) + helper.append_op( + type='transpose2', + inputs={'X': [x]}, + outputs={'Out': [out], 'XShape': [x_shape]}, + attrs={'axis': perm}, + ) return out @@ -234,21 +250,22 @@ def matmul(x, y, transpose_x=False, transpose_y=False, name=None): var_names = {'x': x, 'y': y} for name, val in var_names.items(): check_variable_and_dtype( - val, name, + val, + name, ['float16', 'float32', 'float64', 'complex64', 'complex128'], - 'matmul') + 'matmul', + ) __check_input(x, y) helper = LayerHelper('matmul_v2', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='matmul_v2', - inputs={ - 'X': x, - 'Y': y - }, - outputs={'Out': out}, - attrs=attrs) + helper.append_op( + type='matmul_v2', + inputs={'X': x, 'Y': y}, + outputs={'Out': out}, + attrs=attrs, + ) return out @@ -358,32 +375,35 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None): return _C_ops.frobenius_norm(input, dim, keepdim, False) if _in_legacy_dygraph(): if dim is None: - return _legacy_C_ops.frobenius_norm(input, 'keep_dim', keepdim, - 'reduce_all', True) - return _legacy_C_ops.frobenius_norm(input, 'dim', dim, 'keep_dim', - keepdim, 'reduce_all', False) + return _legacy_C_ops.frobenius_norm( + input, 'keep_dim', keepdim, 'reduce_all', True + ) + return _legacy_C_ops.frobenius_norm( + input, 'dim', dim, 'keep_dim', keepdim, 'reduce_all', False + ) attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False} if dim is None: attrs['reduce_all'] = True - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'frobenius_norm') + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'frobenius_norm' + ) helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( - dtype=helper.input_dtype()) + dtype=helper.input_dtype() + ) - helper.append_op(type='frobenius_norm', - inputs={'X': input}, - outputs={'Out': out}, - attrs=attrs) + helper.append_op( + type='frobenius_norm', + inputs={'X': input}, + outputs={'Out': out}, + attrs=attrs, + ) return out - def vector_norm(input, - porder=None, - axis=None, - keepdim=False, - asvector=False, - name=None): + def vector_norm( + input, porder=None, axis=None, keepdim=False, asvector=False, name=None + ): """ Calculate the p-order vector norm for certain dimension of Tensor `input`. Args: @@ -393,21 +413,32 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None): keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if in_dygraph_mode(): - if axis is None: axis = -1 + if axis is None: + axis = -1 return _C_ops.p_norm(input, porder, axis, 1e-12, keepdim, asvector) if _in_legacy_dygraph(): - if axis is None: axis = -1 - return _legacy_C_ops.p_norm(input, 'porder', porder, 'axis', axis, - 'keepdim', keepdim, 'asvector', - asvector) + if axis is None: + axis = -1 + return _legacy_C_ops.p_norm( + input, + 'porder', + porder, + 'axis', + axis, + 'keepdim', + keepdim, + 'asvector', + asvector, + ) if porder is not None: check_type(porder, 'porder', (float, int), 'p_norm') if axis is not None: check_type(axis, 'axis', (int), 'p_norm') - check_variable_and_dtype(input, 'input', ['float32', 'float64'], - 'p_norm') + check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'p_norm' + ) attrs = { 'axis': axis if axis is not None else -1, @@ -418,23 +449,27 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None): } helper = LayerHelper('p_norm', **locals()) out = helper.create_variable_for_type_inference( - dtype=helper.input_dtype()) + dtype=helper.input_dtype() + ) - helper.append_op(type='p_norm', - inputs={'X': input}, - outputs={'Out': out}, - attrs=attrs) + helper.append_op( + type='p_norm', + inputs={'X': input}, + outputs={'Out': out}, + attrs=attrs, + ) return out - def inf_norm(input, - porder=None, - axis=axis, - keepdim=False, - asvector=False, - name=None): + def inf_norm( + input, porder=None, axis=axis, keepdim=False, asvector=False, name=None + ): if in_dygraph_mode(): out = _C_ops.abs(input) - reduce_all = True if axis == None or axis == [] or asvector == True else False + reduce_all = ( + True + if axis == None or axis == [] or asvector == True + else False + ) axis = axis if axis != None and axis != [] else [0] if reduce_all: assert (axis == []) or (axis is None) @@ -445,28 +480,31 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None): helper = LayerHelper('inf_norm', **locals()) out = helper.create_variable_for_type_inference( - dtype=helper.input_dtype()) + dtype=helper.input_dtype() + ) helper.append_op(type='abs', inputs={'X': input}, outputs={'Out': out}) reduce_out = helper.create_variable_for_type_inference( - dtype=helper.input_dtype()) + dtype=helper.input_dtype() + ) - reduce_all = True if axis == None or axis == [] or asvector == True else False + reduce_all = ( + True if axis == None or axis == [] or asvector == True else False + ) axis = axis if axis != None and axis != [] else [0] - reduce_type = 'reduce_max' if porder == np.float64( - 'inf') else 'reduce_min' - helper.append_op(type=reduce_type, - inputs={'X': out}, - outputs={'Out': reduce_out}, - attrs={ - 'dim': axis, - 'keep_dim': keepdim, - 'reduce_all': reduce_all - }) + reduce_type = ( + 'reduce_max' if porder == np.float64('inf') else 'reduce_min' + ) + helper.append_op( + type=reduce_type, + inputs={'X': out}, + outputs={'Out': reduce_out}, + attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}, + ) return reduce_out - def p_matrix_norm(input, porder=1., axis=axis, keepdim=False, name=None): + def p_matrix_norm(input, porder=1.0, axis=axis, keepdim=False, name=None): """ NOTE: This function actually treats the matrix as flattened vector to calculate vector norm instead of matrix norm. @@ -475,38 +513,48 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None): abs_out = _C_ops.abs(input) pow_out = _C_ops.pow(abs_out, porder) sum_out = _C_ops.sum(pow_out, axis, None, keepdim) - out = _C_ops.pow(sum_out, float(1. / porder)) + out = _C_ops.pow(sum_out, float(1.0 / porder)) return out block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( - dtype=block.input_dtype()) + dtype=block.input_dtype() + ) abs_out = block.create_variable_for_type_inference( - dtype=block.input_dtype()) - block.append_op(type='abs', - inputs={'X': input}, - outputs={'Out': abs_out}) + dtype=block.input_dtype() + ) + block.append_op( + type='abs', inputs={'X': input}, outputs={'Out': abs_out} + ) pow_out = block.create_variable_for_type_inference( - dtype=block.input_dtype()) + dtype=block.input_dtype() + ) - block.append_op(type='pow', - inputs={'X': abs_out}, - outputs={'Out': pow_out}, - attrs={'factor': porder}) + block.append_op( + type='pow', + inputs={'X': abs_out}, + outputs={'Out': pow_out}, + attrs={'factor': porder}, + ) sum_out = block.create_variable_for_type_inference( - dtype=block.input_dtype()) - block.append_op(type='reduce_sum', - inputs={'X': pow_out}, - outputs={'Out': sum_out}, - attrs={ - 'dim': axis, - 'keep_dim': keepdim, - 'reduce_all': True if axis is None else False - }) - block.append_op(type='pow', - inputs={'X': sum_out}, - outputs={'Out': out}, - attrs={'factor': float(1. / porder)}) + dtype=block.input_dtype() + ) + block.append_op( + type='reduce_sum', + inputs={'X': pow_out}, + outputs={'Out': sum_out}, + attrs={ + 'dim': axis, + 'keep_dim': keepdim, + 'reduce_all': True if axis is None else False, + }, + ) + block.append_op( + type='pow', + inputs={'X': sum_out}, + outputs={'Out': out}, + attrs={'factor': float(1.0 / porder)}, + ) return out if axis is None and p is not None: @@ -515,50 +563,60 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None): return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) else: raise ValueError( - "only valid string values are 'fro', found {}".format(p)) + "only valid string values are 'fro', found {}".format(p) + ) elif isinstance(p, (int, float)): - return vector_norm(x, - porder=p, - axis=axis, - keepdim=keepdim, - asvector=True, - name=name) + return vector_norm( + x, + porder=p, + axis=axis, + keepdim=keepdim, + asvector=True, + name=name, + ) else: raise ValueError( - "only valid p type is string or float, found {}".format( - type(p))) + "only valid p type is string or float, found {}".format(type(p)) + ) if isinstance(axis, tuple): axis = list(axis) if isinstance(axis, list) and len(axis) == 1: axis = axis[0] - #calculate vector norm, where axis is int or list with only one integer + # calculate vector norm, where axis is int or list with only one integer if isinstance(axis, int): if isinstance(p, str): if p == "fro": - return vector_norm(x, - porder=2, - axis=axis, - keepdim=keepdim, - asvector=False, - name=name) + return vector_norm( + x, + porder=2, + axis=axis, + keepdim=keepdim, + asvector=False, + name=name, + ) else: raise ValueError( - "only valid string values are 'fro', found {}".format(p)) + "only valid string values are 'fro', found {}".format(p) + ) elif isinstance(p, (int, float)): - return vector_norm(x, - axis=axis, - porder=p, - keepdim=keepdim, - asvector=False, - name=name) + return vector_norm( + x, + axis=axis, + porder=p, + keepdim=keepdim, + asvector=False, + name=name, + ) else: raise ValueError( - "unspport p for p-order vector norm. except float, found {}". - format(p)) - #calculate matrix norm, where axis is list with two integers + "unspport p for p-order vector norm. except float, found {}".format( + p + ) + ) + # calculate matrix norm, where axis is list with two integers elif isinstance(axis, list) and len(axis) == 2: if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) @@ -566,18 +624,20 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None): return inf_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name) elif p == 0: raise ValueError( - "just suport axis type int or list (length of list <=1) if p = 0, found {}" - .format(axis)) + "just suport axis type int or list (length of list <=1) if p = 0, found {}".format( + axis + ) + ) else: - return p_matrix_norm(x, - porder=p, - axis=axis, - keepdim=keepdim, - name=name) + return p_matrix_norm( + x, porder=p, axis=axis, keepdim=keepdim, name=name + ) else: raise ValueError( - "except axis type int or list (length of list <=2), found {}". - format(axis)) + "except axis type int or list (length of list <=2), found {}".format( + axis + ) + ) def dist(x, y, p=2, name=None): @@ -674,10 +734,9 @@ def dist(x, y, p=2, name=None): inputs = {"X": [x], "Y": [y]} outputs = {'Out': [out]} attrs = {"p": float(p)} - helper.append_op(type='dist', - inputs=inputs, - outputs={'Out': out}, - attrs=attrs) + helper.append_op( + type='dist', inputs=inputs, outputs={'Out': out}, attrs=attrs + ) return out @@ -768,7 +827,7 @@ def cond(x, p=None, name=None): """ - def mat_norm(input, porder=1., axis=None): + def mat_norm(input, porder=1.0, axis=None): """ NOTE: Calculate the matrix norm of a square matrix or batches of square matrices, @@ -789,54 +848,81 @@ def cond(x, p=None, name=None): elif _in_legacy_dygraph(): abs_out = _legacy_C_ops.abs(input) - sum_out = _legacy_C_ops.reduce_sum(abs_out, 'dim', axis, 'keepdim', - keepdim, 'reduce_all', - reduce_all) + sum_out = _legacy_C_ops.reduce_sum( + abs_out, + 'dim', + axis, + 'keepdim', + keepdim, + 'reduce_all', + reduce_all, + ) if porder == 1 or porder == np.inf: - return _legacy_C_ops.reduce_max(sum_out, 'dim', [-1], 'keepdim', - keepdim, 'reduce_all', - reduce_all) + return _legacy_C_ops.reduce_max( + sum_out, + 'dim', + [-1], + 'keepdim', + keepdim, + 'reduce_all', + reduce_all, + ) if porder == -1 or porder == -np.inf: - return _legacy_C_ops.reduce_min(sum_out, 'dim', [-1], 'keepdim', - keepdim, 'reduce_all', - reduce_all) + return _legacy_C_ops.reduce_min( + sum_out, + 'dim', + [-1], + 'keepdim', + keepdim, + 'reduce_all', + reduce_all, + ) else: block = LayerHelper('norm', **locals()) abs_out = block.create_variable_for_type_inference( - dtype=block.input_dtype()) + dtype=block.input_dtype() + ) sum_out = block.create_variable_for_type_inference( - dtype=block.input_dtype()) + dtype=block.input_dtype() + ) out = block.create_variable_for_type_inference( - dtype=block.input_dtype()) - block.append_op(type='abs', - inputs={'X': input}, - outputs={'Out': abs_out}) - block.append_op(type='reduce_sum', - inputs={'X': abs_out}, - outputs={'Out': sum_out}, - attrs={ - 'dim': axis, - 'keep_dim': keepdim, - 'reduce_all': reduce_all - }) + dtype=block.input_dtype() + ) + block.append_op( + type='abs', inputs={'X': input}, outputs={'Out': abs_out} + ) + block.append_op( + type='reduce_sum', + inputs={'X': abs_out}, + outputs={'Out': sum_out}, + attrs={ + 'dim': axis, + 'keep_dim': keepdim, + 'reduce_all': reduce_all, + }, + ) if porder == 1 or porder == np.inf: - block.append_op(type='reduce_max', - inputs={'X': sum_out}, - outputs={'Out': out}, - attrs={ - 'dim': [-1], - 'keep_dim': keepdim, - 'reduce_all': reduce_all - }) + block.append_op( + type='reduce_max', + inputs={'X': sum_out}, + outputs={'Out': out}, + attrs={ + 'dim': [-1], + 'keep_dim': keepdim, + 'reduce_all': reduce_all, + }, + ) if porder == -1 or porder == -np.inf: - block.append_op(type='reduce_min', - inputs={'X': sum_out}, - outputs={'Out': out}, - attrs={ - 'dim': [-1], - 'keep_dim': keepdim, - 'reduce_all': reduce_all - }) + block.append_op( + type='reduce_min', + inputs={'X': sum_out}, + outputs={'Out': out}, + attrs={ + 'dim': [-1], + 'keep_dim': keepdim, + 'reduce_all': reduce_all, + }, + ) return out def fro_norm(input, porder=2, axis=[-1]): @@ -851,50 +937,66 @@ def cond(x, p=None, name=None): pow_out = _C_ops.pow(input, porder) sum_out_1 = _C_ops.sum(pow_out, axis, None, keepdim) sum_out_2 = _C_ops.sum(sum_out_1, axis, None, keepdim) - return _C_ops.pow(sum_out_2, float(1. / porder)) + return _C_ops.pow(sum_out_2, float(1.0 / porder)) elif paddle.in_dynamic_mode(): pow_out = _legacy_C_ops.pow(input, 'factor', porder) - sum_out_1 = _legacy_C_ops.reduce_sum(pow_out, 'dim', axis, - 'keepdim', keepdim, - 'reduce_all', reduce_all) - sum_out_2 = _legacy_C_ops.reduce_sum(sum_out_1, 'dim', axis, - 'keepdim', keepdim, - 'reduce_all', reduce_all) - return _legacy_C_ops.pow(sum_out_2, 'factor', float(1. / porder)) + sum_out_1 = _legacy_C_ops.reduce_sum( + pow_out, + 'dim', + axis, + 'keepdim', + keepdim, + 'reduce_all', + reduce_all, + ) + sum_out_2 = _legacy_C_ops.reduce_sum( + sum_out_1, + 'dim', + axis, + 'keepdim', + keepdim, + 'reduce_all', + reduce_all, + ) + return _legacy_C_ops.pow(sum_out_2, 'factor', float(1.0 / porder)) block = LayerHelper('norm', **locals()) pow_out = block.create_variable_for_type_inference( - dtype=block.input_dtype()) + dtype=block.input_dtype() + ) sum_out_1 = block.create_variable_for_type_inference( - dtype=block.input_dtype()) + dtype=block.input_dtype() + ) sum_out_2 = block.create_variable_for_type_inference( - dtype=block.input_dtype()) + dtype=block.input_dtype() + ) out = block.create_variable_for_type_inference( - dtype=block.input_dtype()) - block.append_op(type='pow', - inputs={'X': input}, - outputs={'Out': pow_out}, - attrs={'factor': porder}) - block.append_op(type='reduce_sum', - inputs={'X': pow_out}, - outputs={'Out': sum_out_1}, - attrs={ - 'dim': axis, - 'keep_dim': keepdim, - 'reduce_all': reduce_all - }) - block.append_op(type='reduce_sum', - inputs={'X': sum_out_1}, - outputs={'Out': sum_out_2}, - attrs={ - 'dim': axis, - 'keep_dim': keepdim, - 'reduce_all': reduce_all - }) - block.append_op(type='pow', - inputs={'X': sum_out_2}, - outputs={'Out': out}, - attrs={'factor': float(1. / porder)}) + dtype=block.input_dtype() + ) + block.append_op( + type='pow', + inputs={'X': input}, + outputs={'Out': pow_out}, + attrs={'factor': porder}, + ) + block.append_op( + type='reduce_sum', + inputs={'X': pow_out}, + outputs={'Out': sum_out_1}, + attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}, + ) + block.append_op( + type='reduce_sum', + inputs={'X': sum_out_1}, + outputs={'Out': sum_out_2}, + attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}, + ) + block.append_op( + type='pow', + inputs={'X': sum_out_2}, + outputs={'Out': out}, + attrs={'factor': float(1.0 / porder)}, + ) return out def svd_norm(input, porder, axis=[-1]): @@ -913,9 +1015,15 @@ def cond(x, p=None, name=None): if in_dygraph_mode(): return _C_ops.sum(s, axis, None, keepdim) else: - return _legacy_C_ops.reduce_sum(s, 'dim', axis, 'keepdim', - keepdim, 'reduce_all', - reduce_all) + return _legacy_C_ops.reduce_sum( + s, + 'dim', + axis, + 'keepdim', + keepdim, + 'reduce_all', + reduce_all, + ) if in_dygraph_mode(): max_out = _C_ops.max(s, axis, keepdim) min_out = _C_ops.min(s, axis, keepdim) @@ -925,75 +1033,70 @@ def cond(x, p=None, name=None): return _C_ops.divide(min_out, max_out) else: - max_out = _legacy_C_ops.reduce_max(s, 'dim', axis, 'keepdim', - keepdim, 'reduce_all', - reduce_all) - min_out = _legacy_C_ops.reduce_min(s, 'dim', axis, 'keepdim', - keepdim, 'reduce_all', - reduce_all) + max_out = _legacy_C_ops.reduce_max( + s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all + ) + min_out = _legacy_C_ops.reduce_min( + s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all + ) if porder == 2: return _legacy_C_ops.elementwise_div( - max_out, min_out, 'aixs', axis, 'use_mkldnn', False) + max_out, min_out, 'aixs', axis, 'use_mkldnn', False + ) if porder == -2: return _legacy_C_ops.elementwise_div( - min_out, max_out, 'aixs', axis, 'use_mkldnn', False) + min_out, max_out, 'aixs', axis, 'use_mkldnn', False + ) block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( - dtype=block.input_dtype()) + dtype=block.input_dtype() + ) if porder == "nuc": - block.append_op(type='reduce_sum', - inputs={'X': s}, - outputs={'Out': out}, - attrs={ - 'dim': axis, - 'keep_dim': keepdim, - 'reduce_all': reduce_all - }) + block.append_op( + type='reduce_sum', + inputs={'X': s}, + outputs={'Out': out}, + attrs={ + 'dim': axis, + 'keep_dim': keepdim, + 'reduce_all': reduce_all, + }, + ) return out max_out = block.create_variable_for_type_inference( - dtype=block.input_dtype()) + dtype=block.input_dtype() + ) min_out = block.create_variable_for_type_inference( - dtype=block.input_dtype()) - block.append_op(type='reduce_max', - inputs={'X': s}, - outputs={'Out': max_out}, - attrs={ - 'dim': axis, - 'keep_dim': keepdim, - 'reduce_all': reduce_all - }) - block.append_op(type='reduce_min', - inputs={'X': s}, - outputs={'Out': min_out}, - attrs={ - 'dim': axis, - 'keep_dim': keepdim, - 'reduce_all': reduce_all - }) + dtype=block.input_dtype() + ) + block.append_op( + type='reduce_max', + inputs={'X': s}, + outputs={'Out': max_out}, + attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}, + ) + block.append_op( + type='reduce_min', + inputs={'X': s}, + outputs={'Out': min_out}, + attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}, + ) if porder == 2: - block.append_op(type='elementwise_div', - inputs={ - 'X': max_out, - 'Y': min_out - }, - outputs={'Out': out}, - attrs={ - 'aixs': axis, - 'use_mkldnn': False - }) + block.append_op( + type='elementwise_div', + inputs={'X': max_out, 'Y': min_out}, + outputs={'Out': out}, + attrs={'aixs': axis, 'use_mkldnn': False}, + ) return out if porder == -2: - block.append_op(type='elementwise_div', - inputs={ - 'X': min_out, - 'Y': max_out - }, - outputs={'Out': out}, - attrs={ - 'aixs': axis, - 'use_mkldnn': False - }) + block.append_op( + type='elementwise_div', + inputs={'X': min_out, 'Y': max_out}, + outputs={'Out': out}, + attrs={'aixs': axis, 'use_mkldnn': False}, + ) return out def empty_tensor(input, shape): @@ -1004,8 +1107,9 @@ def cond(x, p=None, name=None): x_shape = list(x.shape) if not len(x_shape) >= 2: raise ValueError( - "input should be a matrix or batches of matrices, " + - "but the dimention of received input is {}".format(len(x_shape))) + "input should be a matrix or batches of matrices, " + + "but the dimention of received input is {}".format(len(x_shape)) + ) if p == None: p = 2 x_size = 0 if (0 in x_shape) else 1 @@ -1020,21 +1124,26 @@ def cond(x, p=None, name=None): return svd_norm(x, p) * svd_norm(x_inv, p) if p in (1, -1): return mat_norm(x, porder=p, axis=[-2]) * mat_norm( - x_inv, porder=p, axis=[-2]) + x_inv, porder=p, axis=[-2] + ) if p in (np.inf, -np.inf): return mat_norm(x, porder=p, axis=[-1]) * mat_norm( - x_inv, porder=p, axis=[-1]) + x_inv, porder=p, axis=[-1] + ) else: - raise ValueError("only support p is {} when input is a ".format(p) + - "square matrix or batches of square matrices") + raise ValueError( + "only support p is {} when input is a ".format(p) + + "square matrix or batches of square matrices" + ) elif p in (2, -2): if x_size == 0: return empty_tensor(x, x_shape[:-2]) return svd_norm(x, porder=p) else: raise ValueError( - "unsupported {} for p, only supporting ('fro', 'nuc', ".format(p) + - "1, -1, 2, -2, inf, -inf) or none") + "unsupported {} for p, only supporting ('fro', 'nuc', ".format(p) + + "1, -1, 2, -2, inf, -inf) or none" + ) def dot(x, y, name=None): @@ -1082,25 +1191,23 @@ def dot(x, y, name=None): assert x is not None, 'x cannot be None in {}'.format(op_type) assert y is not None, 'y cannot be None in {}'.format(op_type) - check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], - op_type) - check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'], - op_type) + check_variable_and_dtype( + x, 'x', ['float32', 'float64', 'int32', 'int64'], op_type + ) + check_variable_and_dtype( + y, 'y', ['float32', 'float64', 'int32', 'int64'], op_type + ) helper = LayerHelper(op_type, **locals()) if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) else: - out = helper.create_variable(name=name, - dtype=x.dtype, - persistable=False) - helper.append_op(type="dot", - inputs={ - 'X': x, - 'Y': y - }, - attrs={}, - outputs={"Out": out}) + out = helper.create_variable( + name=name, dtype=x.dtype, persistable=False + ) + helper.append_op( + type="dot", inputs={'X': x, 'Y': y}, attrs={}, outputs={"Out": out} + ) return out @@ -1143,7 +1250,8 @@ def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None): if len(x.shape) > 2 or len(x.shape) < 1: raise ValueError( "Input(x) only support N-D (1<=N<=2) tensor in cov, but received " - "length of Input(input) is %s." % len(x.shape)) + "length of Input(input) is %s." % len(x.shape) + ) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cov') nx = x if len(x.shape) == 1: @@ -1157,16 +1265,20 @@ def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None): if len(w.shape) > 1: raise ValueError( "Input(fweights) only support N-D (N<=1) tensor in cov, but received " - "shape of Input(input) is %s." % len(fweights.shape)) + "shape of Input(input) is %s." % len(fweights.shape) + ) if fweights.shape[0] != observation_num: raise ValueError( "The number of Input(fweights) should equal to x's dim[1]: {}, but received " - "size of Input(fweights) is {}.".format(observation_num, - fweights.shape[0])) + "size of Input(fweights) is {}.".format( + observation_num, fweights.shape[0] + ) + ) if fweights.min() < 0: raise ValueError( "The value of Input(fweights) cannot be negtive, but received " - "min of Input(fweights) is {}.".format(fweights.min())) + "min of Input(fweights) is {}.".format(fweights.min()) + ) if not paddle.all(fweights == paddle.round(fweights.astype('float64'))): raise ValueError("Input(fweights) must be integer ") @@ -1175,18 +1287,23 @@ def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None): if len(aw.shape) > 1: raise ValueError( "Input(aweights) only support N-D (N<=1) tensor in cov, but received " - "length of Input(input) is %s." % len(aweights.shape)) - check_variable_and_dtype(aweights, 'dtype', ['float32', 'float64'], - 'cov') + "length of Input(input) is %s." % len(aweights.shape) + ) + check_variable_and_dtype( + aweights, 'dtype', ['float32', 'float64'], 'cov' + ) if aweights.shape[0] != observation_num: raise ValueError( "The number of Input(aweights) should equal to x's dim[1]: {}, but received " - "size of Input(aweights) is {}.".format(observation_num, - aweights.shape[0])) + "size of Input(aweights) is {}.".format( + observation_num, aweights.shape[0] + ) + ) if aweights.min() < 0: raise ValueError( "The value of Input(aweights) cannot be negtive, but received " - "min of Input(aweights) is {}.".format(aweights.min())) + "min of Input(aweights) is {}.".format(aweights.min()) + ) if w is not None: w = w * aw else: @@ -1260,7 +1377,8 @@ def t(input, name=None): raise ValueError( "Input(input) only support N-D (N<=2) tensor, but received " "length of Input(input) is %s. Perhaps you can use paddle." - "tensor.transpose() instead." % len(input.shape)) + "tensor.transpose() instead." % len(input.shape) + ) if in_dygraph_mode(): if len(input.shape) == 1: return input @@ -1278,8 +1396,11 @@ def t(input, name=None): return out check_variable_and_dtype( - input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], - 'transpose') + input, + 'input', + ['float16', 'float32', 'float64', 'int32', 'int64'], + 'transpose', + ) helper = LayerHelper('t', **locals()) out = helper.create_variable_for_type_inference(input.dtype) @@ -1287,13 +1408,12 @@ def t(input, name=None): if len(input.shape) == 1: out = input else: - helper.append_op(type='transpose2', - inputs={'X': [input]}, - outputs={ - 'Out': [out], - 'XShape': [input_shape] - }, - attrs={'axis': [1, 0]}) + helper.append_op( + type='transpose2', + inputs={'X': [input]}, + outputs={'Out': [out], 'XShape': [input_shape]}, + attrs={'axis': [1, 0]}, + ) return out @@ -1350,13 +1470,12 @@ def cross(x, y, axis=9, name=None): attrs = dict() attrs['dim'] = axis - helper.append_op(type='cross', - inputs={ - 'X': x, - 'Y': y - }, - outputs={'Out': out}, - attrs=attrs) + helper.append_op( + type='cross', + inputs={'X': x, 'Y': y}, + outputs={'Out': out}, + attrs=attrs, + ) return out @@ -1411,10 +1530,12 @@ def cholesky(x, upper=False, name=None): check_type(upper, 'upper', bool, 'cholesky') helper = LayerHelper('cholesky', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='cholesky', - inputs={'X': [x]}, - outputs={'Out': out}, - attrs={'upper': upper}) + helper.append_op( + type='cholesky', + inputs={'X': [x]}, + outputs={'Out': out}, + attrs={'upper': upper}, + ) return out @@ -1464,8 +1585,9 @@ def matrix_rank(x, tol=None, hermitian=False, name=None): else: tol_tensor = tol use_default_tol = False - return _C_ops.matrix_rank_tol(x, tol_tensor, use_default_tol, - hermitian) + return _C_ops.matrix_rank_tol( + x, tol_tensor, use_default_tol, hermitian + ) if tol is None: tol_attr = 0.0 @@ -1491,9 +1613,16 @@ def matrix_rank(x, tol=None, hermitian=False, name=None): tol_tensor = None tol_attr = float(tol) use_default_tol = False - return _legacy_C_ops.matrix_rank(x, tol_tensor, "tol", tol_attr, - 'hermitian', hermitian, - 'use_default_tol', use_default_tol) + return _legacy_C_ops.matrix_rank( + x, + tol_tensor, + "tol", + tol_attr, + 'hermitian', + hermitian, + 'use_default_tol', + use_default_tol, + ) inputs = {} attrs = {} @@ -1516,10 +1645,9 @@ def matrix_rank(x, tol=None, hermitian=False, name=None): helper = LayerHelper('matrix_rank', **locals()) out = helper.create_variable_for_type_inference(dtype='int32') - helper.append_op(type='matrix_rank', - inputs=inputs, - outputs={'Out': out}, - attrs=attrs) + helper.append_op( + type='matrix_rank', inputs=inputs, outputs={'Out': out}, attrs=attrs + ) return out @@ -1566,16 +1694,22 @@ def bmm(x, y, name=None): y_shape = y.shape if not len(x_shape) == len(y_shape) == 3: raise ValueError( - "x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}" - .format(x_shape, y_shape)) + "x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}".format( + x_shape, y_shape + ) + ) if x_shape[2] != y_shape[1]: raise ValueError( - "x's width must be equal with y's height. But received x's shape: {}, y's shape: {}" - .format(x_shape, y_shape)) + "x's width must be equal with y's height. But received x's shape: {}, y's shape: {}".format( + x_shape, y_shape + ) + ) if x_shape[0] != y_shape[0]: raise ValueError( - "x's batch (shape[0]) must be equal with y's batch (shape[0]). But received x's shape: {}, y's shape: {}" - .format(x_shape, y_shape)) + "x's batch (shape[0]) must be equal with y's batch (shape[0]). But received x's shape: {}, y's shape: {}".format( + x_shape, y_shape + ) + ) if in_dygraph_mode(): return _C_ops.bmm(x, y) @@ -1618,22 +1752,21 @@ def histogram(input, bins=100, min=0, max=0, name=None): return _C_ops.histogram(input, bins, min, max) if _in_legacy_dygraph(): - return _legacy_C_ops.histogram(input, "bins", bins, "min", min, "max", - max) + return _legacy_C_ops.histogram( + input, "bins", bins, "min", min, "max", max + ) helper = LayerHelper('histogram', **locals()) - check_variable_and_dtype(input, 'X', - ['int32', 'int64', 'float32', 'float64'], - 'histogram') + check_variable_and_dtype( + input, 'X', ['int32', 'int64', 'float32', 'float64'], 'histogram' + ) out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) - helper.append_op(type='histogram', - inputs={'X': input}, - outputs={'Out': out}, - attrs={ - 'bins': bins, - 'min': min, - 'max': max - }) + helper.append_op( + type='histogram', + inputs={'X': input}, + outputs={'Out': out}, + attrs={'bins': bins, 'min': min, 'max': max}, + ) return out @@ -1677,19 +1810,21 @@ def bincount(x, weights=None, minlength=0, name=None): check_variable_and_dtype(x, 'X', ['int32', 'int64'], 'bincount') if weights is not None: - check_variable_and_dtype(weights, 'Weights', - ['int32', 'int64', 'float32', 'float64'], - 'bincount') + check_variable_and_dtype( + weights, + 'Weights', + ['int32', 'int64', 'float32', 'float64'], + 'bincount', + ) out = helper.create_variable_for_type_inference(dtype=weights.dtype) else: out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='bincount', - inputs={ - 'X': x, - 'Weights': weights - }, - outputs={'Out': out}, - attrs={'minlength': minlength}) + helper.append_op( + type='bincount', + inputs={'X': x, 'Weights': weights}, + outputs={'Out': out}, + attrs={'minlength': minlength}, + ) return out @@ -1734,29 +1869,31 @@ def mv(x, vec, name=None): def __check_input(x, vec): var_names = {'x': x, 'vec': vec} for name, val in var_names.items(): - check_variable_and_dtype(val, name, ['float32', 'float64'], - 'mv') + check_variable_and_dtype( + val, name, ['float32', 'float64'], 'mv' + ) x_shape = list(x.shape) vec_shape = list(vec.shape) if len(x_shape) != 2: raise ValueError( - "x should be 2-dimensional. But received x's dimention: {}" - .format(x_shape)) + "x should be 2-dimensional. But received x's dimention: {}".format( + x_shape + ) + ) if len(vec_shape) != 1: raise ValueError( - "vec should be 1-dimensional. But received vec's dimention: {}" - .format(vec_shape)) + "vec should be 1-dimensional. But received vec's dimention: {}".format( + vec_shape + ) + ) __check_input(x, vec) helper = LayerHelper('mv', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='mv', - inputs={ - 'X': x, - 'Vec': vec - }, - outputs={'Out': out}) + helper.append_op( + type='mv', inputs={'X': x, 'Vec': vec}, outputs={'Out': out} + ) return out @@ -1796,22 +1933,23 @@ def det(x, name=None): check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'det') input_shape = list(x.shape) - assert len(input_shape) >= 2, \ - "The x must be at least 2-dimensional, " \ - "but received Input x's dimensional: %s.\n" % \ - len(input_shape) - - assert (input_shape[-1] == input_shape[-2]), \ - "Expect squared input," \ - "but received %s by %s matrix.\n" \ - %(input_shape[-2], input_shape[-1]) \ + assert len(input_shape) >= 2, ( + "The x must be at least 2-dimensional, " + "but received Input x's dimensional: %s.\n" % len(input_shape) + ) + assert ( + input_shape[-1] == input_shape[-2] + ), "Expect squared input," "but received %s by %s matrix.\n" % ( + input_shape[-2], + input_shape[-1], + ) helper = LayerHelper('determinant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='determinant', - inputs={'Input': [x]}, - outputs={'Out': [out]}) + helper.append_op( + type='determinant', inputs={'Input': [x]}, outputs={'Out': [out]} + ) return out @@ -1855,22 +1993,23 @@ def slogdet(x, name=None): check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'slogdet') input_shape = list(x.shape) - assert len(input_shape) >= 2, \ - "The x must be at least 2-dimensional, " \ - "but received Input x's dimensional: %s.\n" % \ - len(input_shape) - - assert (input_shape[-1] == input_shape[-2]), \ - "Expect squared input," \ - "but received %s by %s matrix.\n" \ - %(input_shape[-2], input_shape[-1]) \ + assert len(input_shape) >= 2, ( + "The x must be at least 2-dimensional, " + "but received Input x's dimensional: %s.\n" % len(input_shape) + ) + assert ( + input_shape[-1] == input_shape[-2] + ), "Expect squared input," "but received %s by %s matrix.\n" % ( + input_shape[-2], + input_shape[-1], + ) helper = LayerHelper('slogdeterminant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='slogdeterminant', - inputs={'Input': [x]}, - outputs={'Out': [out]}) + helper.append_op( + type='slogdeterminant', inputs={'Input': [x]}, outputs={'Out': [out]} + ) return out @@ -1937,11 +2076,7 @@ def svd(x, full_matrices=False, name=None): helper.append_op( type='svd', inputs={'X': [x]}, - outputs={ - 'U': u, - 'VH': vh, - 'S': s - }, + outputs={'U': u, 'VH': vh, 'S': s}, attrs=attrs, ) return u, s, vh @@ -2010,10 +2145,12 @@ def matrix_power(x, n, name=None): check_type(n, 'n', int, 'matrix_power') helper = LayerHelper('matrix_power', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='matrix_power', - inputs={'X': x}, - outputs={'Out': out}, - attrs={'n': n}) + helper.append_op( + type='matrix_power', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'n': n}, + ) return out @@ -2078,13 +2215,9 @@ def qr(x, mode="reduced", name=None): r = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['mode'] = mode - helper.append_op(type='qr', - inputs={'X': [x]}, - outputs={ - 'Q': q, - 'R': r - }, - attrs=attrs) + helper.append_op( + type='qr', inputs={'X': [x]}, outputs={'Q': q, 'R': r}, attrs=attrs + ) if mode == "r": return r else: @@ -2183,14 +2316,12 @@ def lu(x, pivot=True, get_infos=False, name=None): info = helper.create_variable_for_type_inference(dtype='int') attrs = dict() attrs['pivot'] = pivot - helper.append_op(type='lu', - inputs={'X': x}, - outputs={ - 'Out': lu, - 'Pivots': p, - 'Infos': info - }, - attrs=attrs) + helper.append_op( + type='lu', + inputs={'X': x}, + outputs={'Out': lu, 'Pivots': p, 'Infos': info}, + attrs=attrs, + ) if get_infos: return lu, p, info else: @@ -2275,8 +2406,9 @@ def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None): return P, L, U if paddle.in_dynamic_mode(): - P, L, U = _legacy_C_ops.lu_unpack(x, y, 'unpack_ludata', unpack_ludata, - 'unpack_pivots', unpack_pivots) + P, L, U = _legacy_C_ops.lu_unpack( + x, y, 'unpack_ludata', unpack_ludata, 'unpack_pivots', unpack_pivots + ) return P, L, U check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu_unpack') @@ -2288,17 +2420,12 @@ def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None): attrs = dict() attrs['unpack_ludata'] = unpack_ludata attrs['unpack_pivots'] = unpack_pivots - helper.append_op(type='lu_unpack', - inputs={ - 'X': x, - 'Pivots': y - }, - outputs={ - 'Pmat': p, - 'L': l, - 'U': u - }, - attrs=attrs) + helper.append_op( + type='lu_unpack', + inputs={'X': x, 'Pivots': y}, + outputs={'Pmat': p, 'L': l, 'U': u}, + attrs=attrs, + ) return p, l, u @@ -2354,9 +2481,9 @@ def eig(x, name=None): w, v = _legacy_C_ops.eig(x) return w, v - check_variable_and_dtype(x, 'X', - ['float32', 'float64', 'complex64', 'complex128'], - 'eig') + check_variable_and_dtype( + x, 'X', ['float32', 'float64', 'complex64', 'complex128'], 'eig' + ) helper = LayerHelper('eig', **locals()) w = helper.create_variable_for_type_inference(x.dtype) @@ -2405,20 +2532,24 @@ def eigvals(x, name=None): # [(-0.27078833542132674+0j), (0.29962280156230725+0j), (0.8824477020120244+0j)] #complex128 """ - check_variable_and_dtype(x, 'dtype', - ['float32', 'float64', 'complex64', 'complex128'], - 'eigvals') + check_variable_and_dtype( + x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvals' + ) x_shape = list(x.shape) if len(x_shape) < 2: raise ValueError( - "The dimension of Input(x) should be at least 2, but received x's dimention = {}, x's shape = {}" - .format(len(x_shape), x_shape)) + "The dimension of Input(x) should be at least 2, but received x's dimention = {}, x's shape = {}".format( + len(x_shape), x_shape + ) + ) if x_shape[-1] != x_shape[-2]: raise ValueError( - "The last two dimensions of Input(x) should be equal, but received x's shape = {}" - .format(x_shape)) + "The last two dimensions of Input(x) should be equal, but received x's shape = {}".format( + x_shape + ) + ) if in_dygraph_mode(): return _C_ops.eigvals(x) @@ -2494,11 +2625,16 @@ def multi_dot(x, name=None): check_type(x, 'x', (list, tuple), 'multi_dot') for id, item in enumerate(x): - check_variable_and_dtype(item, 'x[' + str(id) + ']', - ['float16', 'float32', 'float64'], 'multi_dot') + check_variable_and_dtype( + item, + 'x[' + str(id) + ']', + ['float16', 'float32', 'float64'], + 'multi_dot', + ) if item.dtype != x[0].dtype: raise TypeError( - "All the Tensors in the input must have the same data type.") + "All the Tensors in the input must have the same data type." + ) helper = LayerHelper('multi_dot', **locals()) dtype = helper.input_dtype(input_param_name='x') @@ -2551,32 +2687,35 @@ def eigh(x, UPLO='L', name=None): if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " - "length of Input(input) is %s." % len(x.shape)) + "length of Input(input) is %s." % len(x.shape) + ) if x_shape[-1] != x_shape[-2]: raise ValueError( - "The input matrix must be batches of square matrices. But received x's dimention: {}" - .format(x_shape)) + "The input matrix must be batches of square matrices. But received x's dimention: {}".format( + x_shape + ) + ) if UPLO != 'L' and UPLO != 'U': raise ValueError( - "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) + "UPLO must be L or U. But received UPLO is: {}".format(UPLO) + ) __check_input(x, UPLO) helper = LayerHelper('eigh', **locals()) - check_variable_and_dtype(x, 'dtype', - ['float32', 'float64', 'complex64', 'complex128'], - 'eigh') + check_variable_and_dtype( + x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigh' + ) out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='eigh', - inputs={'X': x}, - outputs={ - 'Eigenvalues': out_value, - 'Eigenvectors': out_vector - }, - attrs={'UPLO': UPLO}) + helper.append_op( + type='eigh', + inputs={'X': x}, + outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, + attrs={'UPLO': UPLO}, + ) return out_value, out_vector @@ -2695,8 +2834,9 @@ def pinv(x, rcond=1e-15, hermitian=False, name=None): if not hermitian: # combine svd and matmul op u, s, vt = _legacy_C_ops.svd(x, 'full_matrices', False) - max_singular_val = _legacy_C_ops.reduce_max(s, 'dim', [-1], 'keep_dim', True, \ - 'reduce_all', False) + max_singular_val = _legacy_C_ops.reduce_max( + s, 'dim', [-1], 'keep_dim', True, 'reduce_all', False + ) rcond = paddle.to_tensor(rcond, dtype=x.dtype) cutoff = rcond * max_singular_val y = float('inf') @@ -2718,15 +2858,17 @@ def pinv(x, rcond=1e-15, hermitian=False, name=None): if in_dygraph_mode(): out_2 = _C_ops.matmul(out_1, u, False, True) else: - out_2 = _legacy_C_ops.matmul_v2(out_1, u, 'trans_x', False, - 'trans_y', True) + out_2 = _legacy_C_ops.matmul_v2( + out_1, u, 'trans_x', False, 'trans_y', True + ) return out_2 else: # combine eigh and matmul op s, u = _legacy_C_ops.eigh(x, 'UPLO', 'L') s_abs = paddle.abs(s) - max_singular_val = _legacy_C_ops.reduce_max(s_abs, 'dim', [-1], 'keep_dim', True, \ - 'reduce_all', False) + max_singular_val = _legacy_C_ops.reduce_max( + s_abs, 'dim', [-1], 'keep_dim', True, 'reduce_all', False + ) rcond = paddle.to_tensor(rcond, dtype=s.dtype) cutoff = rcond * max_singular_val y = float('inf') @@ -2745,8 +2887,9 @@ def pinv(x, rcond=1e-15, hermitian=False, name=None): if in_dygraph_mode(): out_2 = _C_ops.matmul(out_1, u_conj, False, True) else: - out_2 = _legacy_C_ops.matmul_v2(out_1, u_conj, 'trans_x', False, - 'trans_y', True) + out_2 = _legacy_C_ops.matmul_v2( + out_1, u_conj, 'trans_x', False, 'trans_y', True + ) return out_2 else: if not hermitian: @@ -2760,23 +2903,17 @@ def pinv(x, rcond=1e-15, hermitian=False, name=None): helper.append_op( type='svd', inputs={'X': [x]}, - outputs={ - 'U': u, - 'VH': vt, - 'S': s - }, + outputs={'U': u, 'VH': vt, 'S': s}, attrs={'full_matrices': False}, ) max_singular_val = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='reduce_max', - inputs={'X': s}, - outputs={'Out': max_singular_val}, - attrs={ - 'dim': [-1], - 'keep_dim': True, - 'reduce_all': False - }) + helper.append_op( + type='reduce_max', + inputs={'X': s}, + outputs={'Out': max_singular_val}, + attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}, + ) rcond = full(shape=[1], fill_value=rcond, dtype=dtype) cutoff = rcond * max_singular_val @@ -2792,59 +2929,50 @@ def pinv(x, rcond=1e-15, hermitian=False, name=None): st = helper.create_variable_for_type_inference(dtype=dtype) st_shape = helper.create_variable_for_type_inference(dtype=dtype) - helper.append_op(type='unsqueeze2', - inputs={'X': singular}, - attrs={'axes': [-2]}, - outputs={ - 'Out': st, - 'XShape': st_shape - }) + helper.append_op( + type='unsqueeze2', + inputs={'X': singular}, + attrs={'axes': [-2]}, + outputs={'Out': st, 'XShape': st_shape}, + ) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v = helper.create_variable_for_type_inference(dtype) v_shape = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='transpose2', - inputs={'X': [vt]}, - outputs={ - 'Out': [v], - 'XShape': [v_shape] - }, - attrs={'axis': perm}) + helper.append_op( + type='transpose2', + inputs={'X': [vt]}, + outputs={'Out': [v], 'XShape': [v_shape]}, + attrs={'axis': perm}, + ) out_1 = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='elementwise_mul', - inputs={ - 'X': v, - 'Y': st - }, - outputs={'Out': out_1}, - attrs={ - 'axis': -1, - 'use_mkldnn': False - }) + helper.append_op( + type='elementwise_mul', + inputs={'X': v, 'Y': st}, + outputs={'Out': out_1}, + attrs={'axis': -1, 'use_mkldnn': False}, + ) out_1 = helper.append_activation(out_1) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', - inputs={ - 'X': out_1, - 'Y': u - }, + inputs={'X': out_1, 'Y': u}, outputs={'Out': out_2}, - attrs={ - 'trans_x': False, - 'trans_y': True - }, + attrs={'trans_x': False, 'trans_y': True}, ) return out_2 else: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype( - x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], - 'pinv') + x, + 'dtype', + ['float32', 'float64', 'complex64', 'complex128'], + 'pinv', + ) if dtype == paddle.complex128: s_type = 'float64' @@ -2855,26 +2983,23 @@ def pinv(x, rcond=1e-15, hermitian=False, name=None): u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(s_type) - helper.append_op(type='eigh', - inputs={'X': x}, - outputs={ - 'Eigenvalues': s, - 'Eigenvectors': u - }, - attrs={'UPLO': 'L'}) + helper.append_op( + type='eigh', + inputs={'X': x}, + outputs={'Eigenvalues': s, 'Eigenvectors': u}, + attrs={'UPLO': 'L'}, + ) s_abs = helper.create_variable_for_type_inference(s_type) - helper.append_op(type='abs', - inputs={'X': s}, - outputs={'Out': s_abs}) + helper.append_op( + type='abs', inputs={'X': s}, outputs={'Out': s_abs} + ) max_singular_val = helper.create_variable_for_type_inference(s_type) - helper.append_op(type='reduce_max', - inputs={'X': s_abs}, - outputs={'Out': max_singular_val}, - attrs={ - 'dim': [-1], - 'keep_dim': True, - 'reduce_all': False - }) + helper.append_op( + type='reduce_max', + inputs={'X': s_abs}, + outputs={'Out': max_singular_val}, + attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}, + ) rcond = full(shape=[1], fill_value=rcond, dtype=s_type) cutoff = rcond * max_singular_val @@ -2890,44 +3015,33 @@ def pinv(x, rcond=1e-15, hermitian=False, name=None): st = helper.create_variable_for_type_inference(dtype=s_type) st_shape = helper.create_variable_for_type_inference(dtype=s_type) - helper.append_op(type='unsqueeze2', - inputs={'X': singular}, - attrs={'axes': [-2]}, - outputs={ - 'Out': st, - 'XShape': st_shape - }) + helper.append_op( + type='unsqueeze2', + inputs={'X': singular}, + attrs={'axes': [-2]}, + outputs={'Out': st, 'XShape': st_shape}, + ) out_1 = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='elementwise_mul', - inputs={ - 'X': u, - 'Y': st - }, - outputs={'Out': out_1}, - attrs={ - 'axis': -1, - 'use_mkldnn': False - }) + helper.append_op( + type='elementwise_mul', + inputs={'X': u, 'Y': st}, + outputs={'Out': out_1}, + attrs={'axis': -1, 'use_mkldnn': False}, + ) out_1 = helper.append_activation(out_1) u_conj = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='conj', - inputs={'X': u}, - outputs={'Out': [u_conj]}) + helper.append_op( + type='conj', inputs={'X': u}, outputs={'Out': [u_conj]} + ) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', - inputs={ - 'X': out_1, - 'Y': u_conj - }, + inputs={'X': out_1, 'Y': u_conj}, outputs={'Out': out_2}, - attrs={ - 'trans_x': False, - 'trans_y': True - }, + attrs={'trans_x': False, 'trans_y': True}, ) return out_2 @@ -2984,21 +3098,15 @@ def solve(x, y, name=None): check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type="solve", - inputs={ - "X": x, - "Y": y - }, - outputs={"Out": out}) + helper.append_op( + type="solve", inputs={"X": x, "Y": y}, outputs={"Out": out} + ) return out -def triangular_solve(x, - y, - upper=True, - transpose=False, - unitriangular=False, - name=None): +def triangular_solve( + x, y, upper=True, transpose=False, unitriangular=False, name=None +): r""" Computes the solution of a system of equations with a triangular coefficient. `x` is coefficient matrix `y` is multiple right-hand sides of equations. @@ -3054,9 +3162,16 @@ def triangular_solve(x, return _C_ops.triangular_solve(x, y, upper, transpose, unitriangular) if paddle.in_dynamic_mode(): - return _legacy_C_ops.triangular_solve(x, y, 'upper', upper, 'transpose', - transpose, 'unitriangular', - unitriangular) + return _legacy_C_ops.triangular_solve( + x, + y, + 'upper', + upper, + 'transpose', + transpose, + 'unitriangular', + unitriangular, + ) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("triangular_solve", **locals()) @@ -3064,17 +3179,16 @@ def triangular_solve(x, check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'triangular_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='triangular_solve', - inputs={ - 'X': x, - 'Y': y - }, - outputs={'Out': out}, - attrs={ - 'upper': upper, - 'transpose': transpose, - 'unitriangular': unitriangular - }) + helper.append_op( + type='triangular_solve', + inputs={'X': x, 'Y': y}, + outputs={'Out': out}, + attrs={ + 'upper': upper, + 'transpose': transpose, + 'unitriangular': unitriangular, + }, + ) return out @@ -3122,13 +3236,12 @@ def cholesky_solve(x, y, upper=False, name=None): check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'cholesky_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='cholesky_solve', - inputs={ - 'X': x, - 'Y': y - }, - outputs={'Out': out}, - attrs={'upper': upper}) + helper.append_op( + type='cholesky_solve', + inputs={'X': x, 'Y': y}, + outputs={'Out': out}, + attrs={'upper': upper}, + ) return out @@ -3172,36 +3285,39 @@ def eigvalsh(x, UPLO='L', name=None): if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " - "length of Input(input) is %s." % len(x.shape)) + "length of Input(input) is %s." % len(x.shape) + ) if x_shape[-1] != x_shape[-2]: raise ValueError( - "The input matrix must be batches of square matrices. But received x's dimention: {}" - .format(x_shape)) + "The input matrix must be batches of square matrices. But received x's dimention: {}".format( + x_shape + ) + ) if UPLO != 'L' and UPLO != 'U': raise ValueError( - "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) + "UPLO must be L or U. But received UPLO is: {}".format(UPLO) + ) __check_input(x, UPLO) helper = LayerHelper('eigvalsh', **locals()) - check_variable_and_dtype(x, 'dtype', - ['float32', 'float64', 'complex64', 'complex128'], - 'eigvalsh') + check_variable_and_dtype( + x, + 'dtype', + ['float32', 'float64', 'complex64', 'complex128'], + 'eigvalsh', + ) out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) is_test = x.stop_gradient - helper.append_op(type='eigvalsh', - inputs={'X': x}, - outputs={ - 'Eigenvalues': out_value, - 'Eigenvectors': out_vector - }, - attrs={ - 'UPLO': UPLO, - 'is_test': is_test - }) + helper.append_op( + type='eigvalsh', + inputs={'X': x}, + outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, + attrs={'UPLO': UPLO, 'is_test': is_test}, + ) return out_value @@ -3268,14 +3384,18 @@ def lstsq(x, y, rcond=None, driver=None, name=None): if device == "cpu": if driver not in (None, "gels", "gelss", "gelsd", "gelsy"): raise ValueError( - "Only support valid driver is 'gels', 'gelss', 'gelsd', 'gelsy' or None for CPU inputs. But got {}" - .format(driver)) + "Only support valid driver is 'gels', 'gelss', 'gelsd', 'gelsy' or None for CPU inputs. But got {}".format( + driver + ) + ) driver = "gelsy" if driver is None else driver elif "gpu" in device: if driver not in (None, "gels"): raise ValueError( - "Only support valid driver is 'gels' or None for CUDA inputs. But got {}" - .format(driver)) + "Only support valid driver is 'gels' or None for CUDA inputs. But got {}".format( + driver + ) + ) driver = "gels" if driver is None else driver else: raise RuntimeError("Only support lstsq api for CPU or CUDA device.") @@ -3296,10 +3416,12 @@ def lstsq(x, y, rcond=None, driver=None, name=None): if _non_static_mode(): if in_dygraph_mode(): solution, residuals, rank, singular_values = _C_ops.lstsq( - x, y, rcond, driver) + x, y, rcond, driver + ) else: solution, residuals, rank, singular_values = _legacy_C_ops.lstsq( - x, y, 'rcond', rcond, 'driver', driver) + x, y, 'rcond', rcond, 'driver', driver + ) if driver == "gels": rank = paddle.empty(shape=[0], dtype=paddle.int32) @@ -3310,33 +3432,29 @@ def lstsq(x, y, rcond=None, driver=None, name=None): return solution, residuals, rank, singular_values helper = LayerHelper('lstsq', **locals()) - check_variable_and_dtype(x, 'dtype', - ['float32', 'float64', 'complex64', 'complex128'], - 'lstsq') - check_variable_and_dtype(y, 'dtype', - ['float32', 'float64', 'complex64', 'complex128'], - 'lstsq') + check_variable_and_dtype( + x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq' + ) + check_variable_and_dtype( + y, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq' + ) solution = helper.create_variable_for_type_inference(dtype=x.dtype) residuals = helper.create_variable_for_type_inference(dtype=x.dtype) rank = helper.create_variable_for_type_inference(dtype=paddle.int32) singular_values = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='lstsq', - inputs={ - 'X': x, - 'Y': y - }, - outputs={ - 'Solution': solution, - 'Residuals': residuals, - 'Rank': rank, - 'SingularValues': singular_values - }, - attrs={ - 'rcond': rcond, - 'driver': driver - }) + helper.append_op( + type='lstsq', + inputs={'X': x, 'Y': y}, + outputs={ + 'Solution': solution, + 'Residuals': residuals, + 'Rank': rank, + 'SingularValues': singular_values, + }, + attrs={'rcond': rcond, 'driver': driver}, + ) if driver == "gels": rank = paddle.static.data(name='rank', shape=[0]) @@ -3388,11 +3506,12 @@ def corrcoef(x, rowvar=True, name=None): if len(x.shape) > 2 or len(x.shape) < 1: raise ValueError( "Input(x) only support N-D (1<=N<=2) tensor in corrcoef, but received " - "length of Input(input) is %s." % len(x.shape)) + "length of Input(input) is %s." % len(x.shape) + ) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'corrcoef') c = cov(x, rowvar) - if (c.ndim == 0): + if c.ndim == 0: # scalar covariance # nan if incorrect value (nan, inf, 0), 1 otherwise return c / c @@ -3407,8 +3526,9 @@ def corrcoef(x, rowvar=True, name=None): # Clip to [-1, 1]. This does not guarantee if paddle.is_complex(c): - return paddle.complex(paddle.clip(c.real(), -1, 1), - paddle.clip(c.imag(), -1, 1)) + return paddle.complex( + paddle.clip(c.real(), -1, 1), paddle.clip(c.imag(), -1, 1) + ) else: c = paddle.clip(c, -1, 1) diff --git a/python/paddle/tensor/logic.py b/python/paddle/tensor/logic.py index 6945ab3fd3f11d7ab5632f385544cc063d8f8334..bd7052153f6258c45ade2c0b10751562a37aa5ba 100755 --- a/python/paddle/tensor/logic.py +++ b/python/paddle/tensor/logic.py @@ -16,8 +16,10 @@ import paddle from ..fluid.data_feeder import check_type, check_variable_and_dtype from .layer_function_generator import templatedoc from ..static import Variable + # TODO: define logic functions of a tensor from ..fluid.framework import _in_eager_mode_ + if _in_eager_mode_: Tensor = paddle.fluid.framework.core.eager.Tensor else: @@ -26,6 +28,7 @@ else: from ..framework import in_dygraph_mode from ..framework import LayerHelper from ..fluid.framework import _in_legacy_dygraph + # TODO: define logic functions of a tensor from paddle import _C_ops, _legacy_C_ops from paddle.tensor.creation import full @@ -47,14 +50,18 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): else: return op(x) check_variable_and_dtype( - x, "x", + x, + "x", ["bool", "int8", "int16", "int32", "int64", "float32", "float64"], - op_name) + op_name, + ) if y is not None: check_variable_and_dtype( - y, "y", + y, + "y", ["bool", "int8", "int16", "int32", "int64", "float32", "float64"], - op_name) + op_name, + ) if out is not None: check_type(out, "out", Variable, op_name) @@ -63,18 +70,16 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): if binary_op and x.dtype != y.dtype: raise ValueError( "(InvalidArgument) The DataType of %s Op's Variable must be consistent, but received %s and %s." - % (op_name, x.dtype, y.dtype)) + % (op_name, x.dtype, y.dtype) + ) if out is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) if binary_op: - helper.append_op(type=op_name, - inputs={ - "X": x, - "Y": y - }, - outputs={"Out": out}) + helper.append_op( + type=op_name, inputs={"X": x, "Y": y}, outputs={"Out": out} + ) else: helper.append_op(type=op_name, inputs={"X": x}, outputs={"Out": out}) @@ -116,12 +121,9 @@ def logical_and(x, y, out=None, name=None): if in_dygraph_mode(): return _C_ops.logical_and(x, y) - return _logical_op(op_name="logical_and", - x=x, - y=y, - name=name, - out=out, - binary_op=True) + return _logical_op( + op_name="logical_and", x=x, y=y, name=name, out=out, binary_op=True + ) def logical_or(x, y, out=None, name=None): @@ -161,12 +163,9 @@ def logical_or(x, y, out=None, name=None): """ if in_dygraph_mode(): return _C_ops.logical_or(x, y) - return _logical_op(op_name="logical_or", - x=x, - y=y, - name=name, - out=out, - binary_op=True) + return _logical_op( + op_name="logical_or", x=x, y=y, name=name, out=out, binary_op=True + ) def logical_xor(x, y, out=None, name=None): @@ -207,12 +206,9 @@ def logical_xor(x, y, out=None, name=None): if in_dygraph_mode(): return _C_ops.logical_xor(x, y) - return _logical_op(op_name="logical_xor", - x=x, - y=y, - name=name, - out=out, - binary_op=True) + return _logical_op( + op_name="logical_xor", x=x, y=y, name=name, out=out, binary_op=True + ) @templatedoc() @@ -245,12 +241,9 @@ def logical_not(x, out=None, name=None): """ if in_dygraph_mode(): return _C_ops.logical_not(x) - return _logical_op(op_name="logical_not", - x=x, - y=None, - name=name, - out=out, - binary_op=False) + return _logical_op( + op_name="logical_not", x=x, y=None, name=name, out=out, binary_op=False + ) def is_empty(x, name=None): @@ -288,16 +281,17 @@ def is_empty(x, name=None): if _in_legacy_dygraph(): return _legacy_C_ops.is_empty(x) - check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], - 'is_empty') + check_variable_and_dtype( + x, 'x', ['float32', 'float64', 'int32', 'int64'], 'is_empty' + ) check_type(name, "name", (str, type(None)), "is_empty") helper = LayerHelper("is_empty", **locals()) cond = helper.create_variable_for_type_inference(dtype='bool') cond.stop_gradient = True - helper.append_op(type='is_empty', - inputs={'X': [x]}, - outputs={'Out': [cond]}) + helper.append_op( + type='is_empty', inputs={'X': [x]}, outputs={'Out': [cond]} + ) return cond @@ -338,12 +332,9 @@ def equal_all(x, y, name=None): helper = LayerHelper("equal_all", **locals()) out = helper.create_variable_for_type_inference(dtype='bool') - helper.append_op(type='equal_all', - inputs={ - 'X': [x], - 'Y': [y] - }, - outputs={'Out': [out]}) + helper.append_op( + type='equal_all', inputs={'X': [x], 'Y': [y]}, outputs={'Out': [out]} + ) return out @@ -393,8 +384,9 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): if in_dygraph_mode(): return _C_ops.allclose(x, y, rtol, atol, equal_nan) if _in_legacy_dygraph(): - return _legacy_C_ops.allclose(x, y, 'rtol', str(rtol), 'atol', - str(atol), 'equal_nan', equal_nan) + return _legacy_C_ops.allclose( + x, y, 'rtol', str(rtol), 'atol', str(atol), 'equal_nan', equal_nan + ) check_variable_and_dtype(x, "input", ['float32', 'float64'], 'allclose') check_variable_and_dtype(y, "input", ['float32', 'float64'], 'allclose') check_type(rtol, 'rtol', float, 'allclose') @@ -407,10 +399,9 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): inputs = {'Input': x, 'Other': y} outputs = {'Out': out} attrs = {'rtol': str(rtol), 'atol': str(atol), 'equal_nan': equal_nan} - helper.append_op(type='allclose', - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type='allclose', inputs=inputs, outputs=outputs, attrs=attrs + ) return out @@ -446,8 +437,10 @@ def equal(x, y, name=None): """ if not isinstance(y, (int, bool, float, Variable)): raise TypeError( - "Type of input args must be float, bool, int or Tensor, but received type {}" - .format(type(y))) + "Type of input args must be float, bool, int or Tensor, but received type {}".format( + type(y) + ) + ) if not isinstance(y, Variable): y = full(shape=[1], dtype=x.dtype, fill_value=y) @@ -459,21 +452,26 @@ def equal(x, y, name=None): return _legacy_C_ops.equal(x, y) else: check_variable_and_dtype( - x, "x", ["bool", "float32", "float64", "int32", "int64"], - "equal") + x, + "x", + ["bool", "float32", "float64", "int32", "int64"], + "equal", + ) check_variable_and_dtype( - y, "y", ["bool", "float32", "float64", "int32", "int64"], - "equal") + y, + "y", + ["bool", "float32", "float64", "int32", "int64"], + "equal", + ) helper = LayerHelper("equal", **locals()) out = helper.create_variable_for_type_inference(dtype='bool') out.stop_gradient = True - helper.append_op(type='equal', - inputs={ - 'X': [x], - 'Y': [y] - }, - outputs={'Out': [out]}) + helper.append_op( + type='equal', + inputs={'X': [x], 'Y': [y]}, + outputs={'Out': [out]}, + ) return out @@ -511,21 +509,26 @@ def greater_equal(x, y, name=None): return _legacy_C_ops.greater_equal(x, y) else: check_variable_and_dtype( - x, "x", ["bool", "float32", "float64", "int32", "int64"], - "greater_equal") + x, + "x", + ["bool", "float32", "float64", "int32", "int64"], + "greater_equal", + ) check_variable_and_dtype( - y, "y", ["bool", "float32", "float64", "int32", "int64"], - "greater_equal") + y, + "y", + ["bool", "float32", "float64", "int32", "int64"], + "greater_equal", + ) helper = LayerHelper("greater_equal", **locals()) out = helper.create_variable_for_type_inference(dtype='bool') out.stop_gradient = True - helper.append_op(type='greater_equal', - inputs={ - 'X': [x], - 'Y': [y] - }, - outputs={'Out': [out]}) + helper.append_op( + type='greater_equal', + inputs={'X': [x], 'Y': [y]}, + outputs={'Out': [out]}, + ) return out @@ -562,21 +565,26 @@ def greater_than(x, y, name=None): return _legacy_C_ops.greater_than(x, y) else: check_variable_and_dtype( - x, "x", ["bool", "float32", "float64", "int32", "int64"], - "greater_than") + x, + "x", + ["bool", "float32", "float64", "int32", "int64"], + "greater_than", + ) check_variable_and_dtype( - y, "y", ["bool", "float32", "float64", "int32", "int64"], - "greater_than") + y, + "y", + ["bool", "float32", "float64", "int32", "int64"], + "greater_than", + ) helper = LayerHelper("greater_than", **locals()) out = helper.create_variable_for_type_inference(dtype='bool') out.stop_gradient = True - helper.append_op(type='greater_than', - inputs={ - 'X': [x], - 'Y': [y] - }, - outputs={'Out': [out]}) + helper.append_op( + type='greater_than', + inputs={'X': [x], 'Y': [y]}, + outputs={'Out': [out]}, + ) return out @@ -615,21 +623,26 @@ def less_equal(x, y, name=None): return _legacy_C_ops.less_equal(x, y) else: check_variable_and_dtype( - x, "x", ["bool", "float32", "float64", "int32", "int64"], - "less_equal") + x, + "x", + ["bool", "float32", "float64", "int32", "int64"], + "less_equal", + ) check_variable_and_dtype( - y, "y", ["bool", "float32", "float64", "int32", "int64"], - "less_equal") + y, + "y", + ["bool", "float32", "float64", "int32", "int64"], + "less_equal", + ) helper = LayerHelper("less_equal", **locals()) out = helper.create_variable_for_type_inference(dtype='bool') out.stop_gradient = True - helper.append_op(type='less_equal', - inputs={ - 'X': [x], - 'Y': [y] - }, - outputs={'Out': [out]}) + helper.append_op( + type='less_equal', + inputs={'X': [x], 'Y': [y]}, + outputs={'Out': [out]}, + ) return out @@ -668,21 +681,26 @@ def less_than(x, y, name=None): return _legacy_C_ops.less_than(x, y) else: check_variable_and_dtype( - x, "x", ["bool", "float32", "float64", "int32", "int64"], - "less_than") + x, + "x", + ["bool", "float32", "float64", "int32", "int64"], + "less_than", + ) check_variable_and_dtype( - y, "y", ["bool", "float32", "float64", "int32", "int64"], - "less_than") + y, + "y", + ["bool", "float32", "float64", "int32", "int64"], + "less_than", + ) helper = LayerHelper("less_than", **locals()) out = helper.create_variable_for_type_inference(dtype='bool') out.stop_gradient = True - helper.append_op(type='less_than', - inputs={ - 'X': [x], - 'Y': [y] - }, - outputs={'Out': [out]}) + helper.append_op( + type='less_than', + inputs={'X': [x], 'Y': [y]}, + outputs={'Out': [out]}, + ) return out @@ -721,21 +739,26 @@ def not_equal(x, y, name=None): return _legacy_C_ops.not_equal(x, y) else: check_variable_and_dtype( - x, "x", ["bool", "float32", "float64", "int32", "int64"], - "not_equal") + x, + "x", + ["bool", "float32", "float64", "int32", "int64"], + "not_equal", + ) check_variable_and_dtype( - y, "y", ["bool", "float32", "float64", "int32", "int64"], - "not_equal") + y, + "y", + ["bool", "float32", "float64", "int32", "int64"], + "not_equal", + ) helper = LayerHelper("not_equal", **locals()) out = helper.create_variable_for_type_inference(dtype='bool') out.stop_gradient = True - helper.append_op(type='not_equal', - inputs={ - 'X': [x], - 'Y': [y] - }, - outputs={'Out': [out]}) + helper.append_op( + type='not_equal', + inputs={'X': [x], 'Y': [y]}, + outputs={'Out': [out]}, + ) return out @@ -782,11 +805,15 @@ def _bitwise_op(op_name, x, y, out=None, name=None, binary_op=True): return op(x) check_variable_and_dtype( - x, "x", ["bool", "uint8", "int8", "int16", "int32", "int64"], op_name) + x, "x", ["bool", "uint8", "int8", "int16", "int32", "int64"], op_name + ) if y is not None: check_variable_and_dtype( - y, "y", ["bool", "uint8", "int8", "int16", "int32", "int64"], - op_name) + y, + "y", + ["bool", "uint8", "int8", "int16", "int32", "int64"], + op_name, + ) if out is not None: check_type(out, "out", Variable, op_name) @@ -798,12 +825,9 @@ def _bitwise_op(op_name, x, y, out=None, name=None, binary_op=True): out = helper.create_variable_for_type_inference(dtype=x.dtype) if binary_op: - helper.append_op(type=op_name, - inputs={ - "X": x, - "Y": y - }, - outputs={"Out": out}) + helper.append_op( + type=op_name, inputs={"X": x, "Y": y}, outputs={"Out": out} + ) else: helper.append_op(type=op_name, inputs={"X": x}, outputs={"Out": out}) @@ -834,12 +858,9 @@ def bitwise_and(x, y, out=None, name=None): """ if in_dygraph_mode() and out is None: return _C_ops.bitwise_and(x, y) - return _bitwise_op(op_name="bitwise_and", - x=x, - y=y, - name=name, - out=out, - binary_op=True) + return _bitwise_op( + op_name="bitwise_and", x=x, y=y, name=name, out=out, binary_op=True + ) @templatedoc() @@ -867,12 +888,9 @@ def bitwise_or(x, y, out=None, name=None): if in_dygraph_mode() and out is None: return _C_ops.bitwise_or(x, y) - return _bitwise_op(op_name="bitwise_or", - x=x, - y=y, - name=name, - out=out, - binary_op=True) + return _bitwise_op( + op_name="bitwise_or", x=x, y=y, name=name, out=out, binary_op=True + ) @templatedoc() @@ -899,12 +917,9 @@ def bitwise_xor(x, y, out=None, name=None): """ if in_dygraph_mode() and out is None: return _C_ops.bitwise_xor(x, y) - return _bitwise_op(op_name="bitwise_xor", - x=x, - y=y, - name=name, - out=out, - binary_op=True) + return _bitwise_op( + op_name="bitwise_xor", x=x, y=y, name=name, out=out, binary_op=True + ) @templatedoc() @@ -930,12 +945,9 @@ def bitwise_not(x, out=None, name=None): if in_dygraph_mode() and out is None: return _C_ops.bitwise_not(x) - return _bitwise_op(op_name="bitwise_not", - x=x, - y=None, - name=name, - out=out, - binary_op=False) + return _bitwise_op( + op_name="bitwise_not", x=x, y=None, name=name, out=out, binary_op=False + ) @templatedoc() @@ -982,8 +994,9 @@ def isclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): if in_dygraph_mode(): return _C_ops.isclose(x, y, rtol, atol, equal_nan) if _in_legacy_dygraph(): - return _legacy_C_ops.isclose(x, y, 'rtol', str(rtol), 'atol', str(atol), - 'equal_nan', equal_nan) + return _legacy_C_ops.isclose( + x, y, 'rtol', str(rtol), 'atol', str(atol), 'equal_nan', equal_nan + ) check_variable_and_dtype(x, "input", ['float32', 'float64'], 'isclose') check_variable_and_dtype(y, "input", ['float32', 'float64'], 'isclose') @@ -997,8 +1010,7 @@ def isclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): inputs = {'Input': x, 'Other': y} outputs = {'Out': out} attrs = {'rtol': str(rtol), 'atol': str(atol), 'equal_nan': equal_nan} - helper.append_op(type='isclose', - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type='isclose', inputs=inputs, outputs=outputs, attrs=attrs + ) return out diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 485e18f5064aeab52b8cd2ad600b640b3cd6e82c..27e40992334204513b395a17132dba85dcc139b4 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -19,9 +19,15 @@ from ..framework import core, in_dygraph_mode from ..fluid.framework import _in_legacy_dygraph, _non_static_mode from ..framework import LayerHelper from ..framework import convert_np_dtype_to_dtype_, dygraph_only -from ..fluid.data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype +from ..fluid.data_feeder import ( + convert_dtype, + check_variable_and_dtype, + check_type, + check_dtype, +) from ..fluid.layers import utils import numpy as np + # TODO: define functions to manipulate a tensor from ..fluid.dygraph.inplace_utils import inplace_apis_in_dygraph_only import paddle @@ -70,25 +76,50 @@ def cast(x, dtype): out = _legacy_C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype) return out - check_variable_and_dtype(x, 'x', [ - 'bool', 'float16', 'float32', 'float64', 'int16', 'int32', 'int64', - 'uint8', 'uint16' - ], 'cast') - check_dtype(dtype, 'dtype', [ - 'bool', 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', - 'int64', 'uint8', 'uint16' - ], 'cast') + check_variable_and_dtype( + x, + 'x', + [ + 'bool', + 'float16', + 'float32', + 'float64', + 'int16', + 'int32', + 'int64', + 'uint8', + 'uint16', + ], + 'cast', + ) + check_dtype( + dtype, + 'dtype', + [ + 'bool', + 'float16', + 'float32', + 'float64', + 'int8', + 'int16', + 'int32', + 'int64', + 'uint8', + 'uint16', + ], + 'cast', + ) helper = LayerHelper('cast', **locals()) out = helper.create_variable_for_type_inference( - dtype=dtype, stop_gradient=x.stop_gradient) - helper.append_op(type='cast', - inputs={'X': [x]}, - outputs={'Out': [out]}, - attrs={ - 'in_dtype': x.dtype, - 'out_dtype': out.dtype - }) + dtype=dtype, stop_gradient=x.stop_gradient + ) + helper.append_op( + type='cast', + inputs={'X': [x]}, + outputs={'Out': [out]}, + attrs={'in_dtype': x.dtype, 'out_dtype': out.dtype}, + ) return out @@ -169,7 +200,8 @@ def slice(input, axes, starts, ends): axes = list(axes) if len(axes) == 0: raise ValueError( - "Input axes should not be an empty list/tuple.") + "Input axes should not be an empty list/tuple." + ) for i in range(len(axes)): if axes[i] < 0: axes[i] = max(0, axes[i] + len(input.shape)) @@ -178,8 +210,10 @@ def slice(input, axes, starts, ends): else: raise ValueError( - "Input axes must be a python list or tuple, but reveived {}". - format(type(axes))) + "Input axes must be a python list or tuple, but reveived {}".format( + type(axes) + ) + ) infer_flags = list(1 for i in range(len(axes))) @@ -188,7 +222,8 @@ def slice(input, axes, starts, ends): if isinstance(starts, (list, tuple)): starts = [ item.numpy().item(0) - if isinstance(item, tmp_tensor_type) else item + if isinstance(item, tmp_tensor_type) + else item for item in starts ] elif isinstance(starts, tmp_tensor_type): @@ -199,7 +234,9 @@ def slice(input, axes, starts, ends): if isinstance(ends, (list, tuple)): ends = [ item.numpy().item(0) - if isinstance(item, tmp_tensor_type) else item for item in ends + if isinstance(item, tmp_tensor_type) + else item + for item in ends ] elif isinstance(ends, tmp_tensor_type): tensor_t = ends.numpy() @@ -217,7 +254,8 @@ def slice(input, axes, starts, ends): axes = list(axes) if len(axes) == 0: raise ValueError( - "Input axes should not be an empty list/tuple.") + "Input axes should not be an empty list/tuple." + ) for i in range(len(axes)): if axes[i] < 0: axes[i] = max(0, axes[i] + len(input.shape)) @@ -226,8 +264,10 @@ def slice(input, axes, starts, ends): else: raise ValueError( - "Input axes must be a python list or tuple, but reveived {}" - .format(type(axes))) + "Input axes must be a python list or tuple, but reveived {}".format( + type(axes) + ) + ) infer_flags = list(1 for i in range(len(axes))) @@ -236,7 +276,8 @@ def slice(input, axes, starts, ends): if isinstance(starts, (list, tuple)): starts = [ item.numpy().item(0) - if isinstance(item, tmp_tensor_type) else item + if isinstance(item, tmp_tensor_type) + else item for item in starts ] attrs += ('starts', starts) @@ -248,7 +289,8 @@ def slice(input, axes, starts, ends): if isinstance(ends, (list, tuple)): ends = [ item.numpy().item(0) - if isinstance(item, tmp_tensor_type) else item + if isinstance(item, tmp_tensor_type) + else item for item in ends ] attrs += ('ends', ends) @@ -257,16 +299,27 @@ def slice(input, axes, starts, ends): ends_tensor.stop_gradient = True infer_flags = list(-1 for i in range(len(axes))) - return _legacy_C_ops.slice(input, starts_tensor, ends_tensor, None, - None, 'axes', axes, 'infer_flags', - infer_flags, *attrs) + return _legacy_C_ops.slice( + input, + starts_tensor, + ends_tensor, + None, + None, + 'axes', + axes, + 'infer_flags', + infer_flags, + *attrs, + ) if not isinstance(starts, (list, tuple, Variable)): raise ValueError( - "Input starts must be an Variable, python list or tuple.") + "Input starts must be an Variable, python list or tuple." + ) if not isinstance(ends, (list, tuple, Variable)): raise ValueError( - "Input ends must be an Variable, python list or tuple.") + "Input ends must be an Variable, python list or tuple." + ) helper = LayerHelper('slice', **locals()) @@ -313,11 +366,11 @@ def slice(input, axes, starts, ends): # infer_flags attrs['infer_flags'] = infer_flags out = helper.create_variable_for_type_inference( - dtype=helper.input_dtype('input')) - helper.append_op(type='slice', - inputs=inputs, - attrs=attrs, - outputs={'Out': out}) + dtype=helper.input_dtype('input') + ) + helper.append_op( + type='slice', inputs=inputs, attrs=attrs, outputs={'Out': out} + ) return out @@ -379,10 +432,21 @@ def transpose(x, perm, name=None): out, _ = _legacy_C_ops.transpose2(x, 'axis', perm) return out - check_variable_and_dtype(x, 'x', [ - 'bool', 'float16', 'float32', 'float64', 'int32', 'int64', 'complex64', - 'complex128' - ], 'transpose') + check_variable_and_dtype( + x, + 'x', + [ + 'bool', + 'float16', + 'float32', + 'float64', + 'int32', + 'int64', + 'complex64', + 'complex128', + ], + 'transpose', + ) check_type(perm, 'perm', (list, tuple), 'transpose') if isinstance(perm, tuple): perm = list(perm) @@ -391,24 +455,25 @@ def transpose(x, perm, name=None): "Input(perm) is the permutation of dimensions of Input(x), " "its length should be equal to dimensions of Input(x), " "but received dimension of Input(x) is %s, " - "the length of Input(perm) is %s." % (len(x.shape), len(perm))) + "the length of Input(perm) is %s." % (len(x.shape), len(perm)) + ) for idx, dim in enumerate(perm): if dim >= len(x.shape): raise ValueError( "Each element in Input(perm) should be less than Input(x)'s dimension, " "but %d-th element in Input(perm) is %d which exceeds Input(x)'s " - "dimension %d." % (idx, perm[idx], len(x.shape))) + "dimension %d." % (idx, perm[idx], len(x.shape)) + ) helper = LayerHelper('transpose', **locals()) out = helper.create_variable_for_type_inference(x.dtype) x_shape = helper.create_variable_for_type_inference(x.dtype) - helper.append_op(type='transpose2', - inputs={'X': [x]}, - outputs={ - 'Out': [out], - 'XShape': [x_shape] - }, - attrs={'axis': perm}) + helper.append_op( + type='transpose2', + inputs={'X': [x]}, + outputs={'Out': [out], 'XShape': [x_shape]}, + attrs={'axis': perm}, + ) return out @@ -462,13 +527,12 @@ def unstack(x, axis=0, num=None): for _ in range(num): outs.append(helper.create_variable_for_type_inference(x.dtype)) - helper.append_op(type='unstack', - inputs={'X': [x]}, - outputs={'Y': outs}, - attrs={ - 'axis': axis, - 'num': num - }) + helper.append_op( + type='unstack', + inputs={'X': [x]}, + outputs={'Y': outs}, + attrs={'axis': axis, 'num': num}, + ) return outs @@ -519,27 +583,31 @@ def shard_index(input, index_num, nshards, shard_id, ignore_value=-1): # [[-1], [1]] """ if in_dygraph_mode(): - return _C_ops.shard_index(input, index_num, nshards, shard_id, - ignore_value) + return _C_ops.shard_index( + input, index_num, nshards, shard_id, ignore_value + ) check_variable_and_dtype(input, 'input', ['int64', 'int32'], 'shard_index') op_type = 'shard_index' helper = LayerHelper(op_type, **locals()) if shard_id < 0 or shard_id >= nshards: - raise ValueError('The shard_id(%d) should be in [0, %d)' % - (shard_id, nshards)) + raise ValueError( + 'The shard_id(%d) should be in [0, %d)' % (shard_id, nshards) + ) out = helper.create_variable_for_type_inference(dtype=input.dtype) - helper.append_op(type=op_type, - inputs={'X': [input]}, - outputs={'Out': out}, - attrs={ - 'index_num': index_num, - 'nshards': nshards, - 'shard_id': shard_id, - 'ignore_value': ignore_value - }, - stop_gradient=True) + helper.append_op( + type=op_type, + inputs={'X': [input]}, + outputs={'Out': out}, + attrs={ + 'index_num': index_num, + 'nshards': nshards, + 'shard_id': shard_id, + 'ignore_value': ignore_value, + }, + stop_gradient=True, + ) return out @@ -631,12 +699,15 @@ def crop(x, shape=None, offsets=None, name=None): """ helper = LayerHelper('crop_tensor', **locals()) - check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], - 'crop_tensor') - check_type(shape, 'shape', (list, tuple, Variable, type(None)), - 'crop_tensor') - check_type(offsets, 'offsets', (list, tuple, Variable, type(None)), - 'crop_tensor') + check_variable_and_dtype( + x, 'x', ['float32', 'float64', 'int32', 'int64'], 'crop_tensor' + ) + check_type( + shape, 'shape', (list, tuple, Variable, type(None)), 'crop_tensor' + ) + check_type( + offsets, 'offsets', (list, tuple, Variable, type(None)), 'crop_tensor' + ) if offsets is None: offsets = [0] * len(x.shape) @@ -655,25 +726,30 @@ def crop(x, shape=None, offsets=None, name=None): if not isinstance(shape_val, int): raise TypeError( "Attr(shape)'s dtype of Op(crop_tensor) should be int32, but received: %s." - % type(shape_val)) + % type(shape_val) + ) if shape_val == 0: raise ValueError( "Attr(shape) of Op(crop_tensor) should not be zero, but received: %s." - % str(shape_val)) + % str(shape_val) + ) if shape_val < -1: raise ValueError( "When the element in Attr(shape) of Op(crop_tensor) is negative, only -1 is supported, but received: %s." - % str(shape_val)) + % str(shape_val) + ) def _attr_offsets_check(offset_val): if not isinstance(offset_val, int): raise TypeError( "Attr(offsets)'s dtype of Op(crop_tensor) should be int32, but received: %s." - % type(offset_val)) + % type(offset_val) + ) if offset_val < 0: raise ValueError( "Attr(offsets) of Op(crop_tensor) should be greater or equal to zero, but received: %s." - % str(offset_val)) + % str(offset_val) + ) if isinstance(offsets, Variable): offsets.stop_gradient = True @@ -714,11 +790,9 @@ def crop(x, shape=None, offsets=None, name=None): else: _attr_shape_check(dim_size) temp_out = helper.create_variable_for_type_inference('int32') - fill_constant([1], - 'int32', - dim_size, - force_cpu=True, - out=temp_out) + fill_constant( + [1], 'int32', dim_size, force_cpu=True, out=temp_out + ) new_shape_tensor.append(temp_out) shape_attr.append(dim_size) ipts['ShapeTensor'] = new_shape_tensor @@ -728,10 +802,12 @@ def crop(x, shape=None, offsets=None, name=None): _attr_shape_check(dim_size) attrs['shape'] = shape - helper.append_op(type='crop_tensor', - inputs=ipts, - outputs={'Out': out}, - attrs=None if len(attrs) == 0 else attrs) + helper.append_op( + type='crop_tensor', + inputs=ipts, + outputs={'Out': out}, + attrs=None if len(attrs) == 0 else attrs, + ) return out @@ -763,13 +839,15 @@ def fill_(x, value): """ if not isinstance(value, (float, int)): raise TypeError( - "The type of 'value' must be int or float, but received %s." % - (type(value))) + "The type of 'value' must be int or float, but received %s." + % (type(value)) + ) if in_dygraph_mode(): return _C_ops.fill_(x, value) else: - return _legacy_C_ops.fill_any_(x, "value_float", float(value), - "value_int", int(value)) + return _legacy_C_ops.fill_any_( + x, "value_float", float(value), "value_int", int(value) + ) @dygraph_only @@ -798,10 +876,11 @@ def zero_(x): """ if in_dygraph_mode(): - return _C_ops.fill_(x, 0.) + return _C_ops.fill_(x, 0.0) else: - return _legacy_C_ops.fill_any_(x, "value_float", 0., "value_int", - int(0)) + return _legacy_C_ops.fill_any_( + x, "value_float", 0.0, "value_int", int(0) + ) @dygraph_only @@ -833,39 +912,45 @@ def fill_diagonal_(x, value, offset=0, wrap=False, name=None): helper = LayerHelper("fill_diagonal_", **locals()) check_type(x, 'X', (Variable), 'fill_diagonal_') dtype = helper.input_dtype('x') - check_dtype(dtype, 'X', - ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], - 'fill_diagonal_') + check_dtype( + dtype, + 'X', + ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], + 'fill_diagonal_', + ) check_type(value, 'value', (bool, int, float), 'fill_diagonal_') check_type(wrap, 'wrap', (bool), 'fill_diagonal_') inshape = x.shape inshapeset = set(inshape) - assert len(inshape) >= 2, ('Tensor dims should >= 2 in fill_diagonal_ API') + assert len(inshape) >= 2, 'Tensor dims should >= 2 in fill_diagonal_ API' if len(inshape) > 2: - assert len(inshapeset) == 1, ( - 'Tensor dims should be equal while input dims > 2 in fill_diagonal_ API' - ) + assert ( + len(inshapeset) == 1 + ), 'Tensor dims should be equal while input dims > 2 in fill_diagonal_ API' if in_dygraph_mode(): if len(inshape) == 2: return _C_ops.fill_diagonal_(x, value, offset, wrap) return _C_ops.fill_diagonal_(x, value, offset, True) if len(inshape) == 2: - return _legacy_C_ops.fill_diagonal_(x, 'value', value, 'offset', offset, - 'wrap', wrap) - return _legacy_C_ops.fill_diagonal_(x, 'value', value, 'offset', offset, - 'wrap', True) + return _legacy_C_ops.fill_diagonal_( + x, 'value', value, 'offset', offset, 'wrap', wrap + ) + return _legacy_C_ops.fill_diagonal_( + x, 'value', value, 'offset', offset, 'wrap', True + ) def _fill_diagonal_tensor_impl(x, y, offset=0, dim1=0, dim2=1, inplace=False): inshape = x.shape - assert dim1 < len(inshape) and dim1 >= -len(inshape), ( - 'dim1 should between [-rank,rank) in fill_diagonal_tensor_') - assert dim2 < len(inshape) and dim2 >= -len(inshape), ( - 'dim2 should between [-rank,rank) in fill_diagonal_tensor_') - assert len(inshape) >= 2, ( - 'Tensor dims should >= 2 in fill_diagonal_tensor_') + assert dim1 < len(inshape) and dim1 >= -len( + inshape + ), 'dim1 should between [-rank,rank) in fill_diagonal_tensor_' + assert dim2 < len(inshape) and dim2 >= -len( + inshape + ), 'dim2 should between [-rank,rank) in fill_diagonal_tensor_' + assert len(inshape) >= 2, 'Tensor dims should >= 2 in fill_diagonal_tensor_' dim1 %= len(inshape) dim2 %= len(inshape) @@ -873,11 +958,14 @@ def _fill_diagonal_tensor_impl(x, y, offset=0, dim1=0, dim2=1, inplace=False): for i in range(len(inshape)): if i != dim1 and i != dim2: predshape.append(inshape[i]) - diaglen = min(min(inshape[dim1], inshape[dim1] + offset), - min(inshape[dim2], inshape[dim2] - offset)) + diaglen = min( + min(inshape[dim1], inshape[dim1] + offset), + min(inshape[dim2], inshape[dim2] - offset), + ) predshape.append(diaglen) assert tuple(predshape) == tuple( - y.shape), ("the y shape should be {}".format(predshape)) + y.shape + ), "the y shape should be {}".format(predshape) if len(y.shape) == 1: y = y.reshape([1, -1]) @@ -885,14 +973,15 @@ def _fill_diagonal_tensor_impl(x, y, offset=0, dim1=0, dim2=1, inplace=False): if in_dygraph_mode(): return _C_ops.fill_diagonal_tensor_(x, y, offset, dim1, dim2) else: - return _legacy_C_ops.fill_diagonal_tensor_(x, y, 'offset', offset, - 'dim1', dim1, 'dim2', - dim2) + return _legacy_C_ops.fill_diagonal_tensor_( + x, y, 'offset', offset, 'dim1', dim1, 'dim2', dim2 + ) if in_dygraph_mode(): return _C_ops.fill_diagonal_tensor(x, y, offset, dim1, dim2) else: - return _legacy_C_ops.fill_diagonal_tensor(x, y, 'offset', offset, - 'dim1', dim1, 'dim2', dim2) + return _legacy_C_ops.fill_diagonal_tensor( + x, y, 'offset', offset, 'dim1', dim1, 'dim2', dim2 + ) def fill_diagonal_tensor_(x, y, offset=0, dim1=0, dim2=1, name=None): @@ -924,12 +1013,9 @@ def fill_diagonal_tensor_(x, y, offset=0, dim1=0, dim2=1, name=None): print(x.tolist()) #[[1.0, 2.0, 2.0], [2.0, 1.0, 2.0], [2.0, 2.0, 1.0], [2.0, 2.0, 2.0]] """ - return _fill_diagonal_tensor_impl(x, - y, - offset=offset, - dim1=dim1, - dim2=dim2, - inplace=True) + return _fill_diagonal_tensor_impl( + x, y, offset=offset, dim1=dim1, dim2=dim2, inplace=True + ) def fill_diagonal_tensor(x, y, offset=0, dim1=0, dim2=1, name=None): @@ -958,12 +1044,9 @@ def fill_diagonal_tensor(x, y, offset=0, dim1=0, dim2=1, name=None): print(nx.tolist()) #[[1.0, 2.0, 2.0], [2.0, 1.0, 2.0], [2.0, 2.0, 1.0], [2.0, 2.0, 2.0]] """ - return _fill_diagonal_tensor_impl(x, - y, - offset=offset, - dim1=dim1, - dim2=dim2, - inplace=False) + return _fill_diagonal_tensor_impl( + x, y, offset=offset, dim1=dim1, dim2=dim2, inplace=False + ) @dygraph_only @@ -1062,10 +1145,21 @@ def concat(x, axis=0, name=None): check_type(input, 'input', (list, tuple, Variable), 'concat') if not isinstance(input, Variable): for id, x in enumerate(input): - check_variable_and_dtype(x, 'input[' + str(id) + ']', [ - 'bool', 'float16', 'float32', 'float64', 'int32', 'int64', - 'int8', 'unit8' - ], 'concat') + check_variable_and_dtype( + x, + 'input[' + str(id) + ']', + [ + 'bool', + 'float16', + 'float32', + 'float64', + 'int32', + 'int64', + 'int8', + 'unit8', + ], + 'concat', + ) if x.dtype != input[0].dtype: raise TypeError( "All the Tensors in the input must have the same data type." @@ -1076,8 +1170,11 @@ def concat(x, axis=0, name=None): if isinstance(axis, Variable): check_dtype( - axis.dtype, 'axis', ['int32', 'int64'], 'concat', - "The data type of axis must be int32 or int64 when axis is a Tensor" + axis.dtype, + 'axis', + ['int32', 'int64'], + 'concat', + "The data type of axis must be int32 or int64 when axis is a Tensor", ) helper = LayerHelper('concat', **locals()) @@ -1088,19 +1185,17 @@ def concat(x, axis=0, name=None): # This feature is supported for Dynamic-to-Static, because after transformed, the type of inputs[0] # is LOD_TENSOR_ARRAY in some scenarios. And this feature can be used in static mode. - assert len(input) == 1, "If the elements of 'input' in concat are Variable(LoDTensorArray), " \ - "number of the elements must be 1, but received %s." % len(input) + assert len(input) == 1, ( + "If the elements of 'input' in concat are Variable(LoDTensorArray), " + "number of the elements must be 1, but received %s." % len(input) + ) out_index = helper.create_variable_for_type_inference(dtype="int32") - helper.append_op(type='tensor_array_to_tensor', - inputs={'X': input[0]}, - outputs={ - 'Out': [out], - 'OutIndex': [out_index] - }, - attrs={ - 'axis': axis, - 'use_stack': False - }) + helper.append_op( + type='tensor_array_to_tensor', + inputs={'X': input[0]}, + outputs={'Out': [out], 'OutIndex': [out_index]}, + attrs={'axis': axis, 'use_stack': False}, + ) else: inputs = {'X': input} attrs = {} @@ -1110,10 +1205,9 @@ def concat(x, axis=0, name=None): else: attrs['axis'] = axis - helper.append_op(type='concat', - inputs=inputs, - outputs={'Out': [out]}, - attrs=attrs) + helper.append_op( + type='concat', inputs=inputs, outputs={'Out': [out]}, attrs=attrs + ) return out @@ -1155,17 +1249,21 @@ def broadcast_tensors(input, name=None): check_type(input, 'input', (list, tuple), 'broadcast_tensors') if num_inputs < 1: raise TypeError( - "At least 1 tensor is needed to perform broadcast_tensors") + "At least 1 tensor is needed to perform broadcast_tensors" + ) # Check input types for id, x in enumerate(input): check_variable_and_dtype( - x, 'input[' + str(id) + ']', + x, + 'input[' + str(id) + ']', ['bool', 'float32', 'float64', 'int32', 'int64'], - 'broadcast_tensors') + 'broadcast_tensors', + ) if x.dtype != input[0].dtype: raise TypeError( - "All the Tensors in the input must have the same data type.") + "All the Tensors in the input must have the same data type." + ) # Check bcast semantics output_shape_r_last_tensor_index = [] @@ -1183,8 +1281,11 @@ def broadcast_tensors(input, name=None): output_shape_r.append(shape[i]) output_shape_r_last_tensor_index.append(j) else: - invalid = (output_shape_r[i] != shape[i] - and output_shape_r[i] != 1 and shape[i] != 1) + invalid = ( + output_shape_r[i] != shape[i] + and output_shape_r[i] != 1 + and shape[i] != 1 + ) if invalid: last_index = output_shape_r_last_tensor_index[i] raise TypeError( @@ -1203,14 +1304,15 @@ def broadcast_tensors(input, name=None): while i < num_inputs: out.append( helper.create_variable_for_type_inference( - dtype=helper.input_dtype())) + dtype=helper.input_dtype() + ) + ) i += 1 inputs = {'X': input} - helper.append_op(type='broadcast_tensors', - inputs=inputs, - outputs={'Out': out}, - attrs={}) + helper.append_op( + type='broadcast_tensors', inputs=inputs, outputs={'Out': out}, attrs={} + ) return out @@ -1253,19 +1355,21 @@ def flip(x, axis, name=None): helper = LayerHelper("flip", **locals()) check_type(x, 'X', (Variable), 'flip') dtype = helper.input_dtype('x') - check_dtype(dtype, 'X', - ['float16', 'float32', 'float64', 'int32', 'int64', 'bool'], - 'flip') + check_dtype( + dtype, + 'X', + ['float16', 'float32', 'float64', 'int32', 'int64', 'bool'], + 'flip', + ) check_type(axis, 'axis', (list, tuple), 'flip') if name is None: out = helper.create_variable_for_type_inference(dtype) else: out = helper.create_variable(name=name, dtype=dtype, persistable=False) - helper.append_op(type="flip", - inputs={"X": x}, - outputs={"Out": out}, - attrs={"axis": axis}) + helper.append_op( + type="flip", inputs={"X": x}, outputs={"Out": out}, attrs={"axis": axis} + ) return out @@ -1324,9 +1428,12 @@ def rot90(x, k=1, axes=[0, 1], name=None): helper = LayerHelper("rot90", **locals()) check_type(x, 'X', (Variable), 'rot90') dtype = helper.input_dtype('x') - check_dtype(dtype, 'X', - ['float16', 'float32', 'float64', 'int32', 'int64', 'bool'], - 'rot90') + check_dtype( + dtype, + 'X', + ['float16', 'float32', 'float64', 'int32', 'int64', 'bool'], + 'rot90', + ) check_type(axes, 'axes', (list, tuple), 'rot90') input_total_dims = len(x.shape) @@ -1334,23 +1441,31 @@ def rot90(x, k=1, axes=[0, 1], name=None): if total_rot_dims != 2: raise ValueError( "expected total rotation axes == 2, but got axes = {}".format( - total_rot_dims)) + total_rot_dims + ) + ) if input_total_dims < 2: raise ValueError( "expected total dims >= 2, but got total dims = {}".format( - input_total_dims)) + input_total_dims + ) + ) if not (axes[0] != axes[1] and abs(axes[0] - axes[1]) != input_total_dims): raise ValueError( - "expected rotation axes to be different, but got axis0 = {}, and axis1 = {}" - .format(axes[0], axes[1])) + "expected rotation axes to be different, but got axis0 = {}, and axis1 = {}".format( + axes[0], axes[1] + ) + ) if not (axes[0] < input_total_dims and axes[0] >= -input_total_dims): - raise ValueError("Rotation axis0 out of range, axis0 = {}".format( - axes[0])) + raise ValueError( + "Rotation axis0 out of range, axis0 = {}".format(axes[0]) + ) if not (axes[1] < input_total_dims and axes[1] >= -input_total_dims): - raise ValueError("Rotation axis1 out of range, axis1 = {}".format( - axes[1])) + raise ValueError( + "Rotation axis1 out of range, axis1 = {}".format(axes[1]) + ) k %= 4 if k == 0: @@ -1359,8 +1474,10 @@ def rot90(x, k=1, axes=[0, 1], name=None): return flip(flip(x, axes[0]), axes[1]) axes_list = list(range(0, input_total_dims)) - (axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]], - axes_list[axes[0]]) + (axes_list[axes[0]], axes_list[axes[1]]) = ( + axes_list[axes[1]], + axes_list[axes[0]], + ) if k == 1: return transpose(flip(x, axes[1]), axes_list) else: @@ -1439,19 +1556,29 @@ def flatten(x, start_axis=0, stop_axis=-1, name=None): if not paddle.in_dynamic_mode(): check_variable_and_dtype( - x, 'x', + x, + 'x', ['float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'uint8'], - 'flatten') + 'flatten', + ) x_dim = len(x.shape) - if not (isinstance(start_axis, - int)) or (start_axis > x_dim - 1) or start_axis < -x_dim: + if ( + not (isinstance(start_axis, int)) + or (start_axis > x_dim - 1) + or start_axis < -x_dim + ): raise ValueError( - "The start_axis should be a int, and in range [-rank(x), rank(x))") - if not (isinstance(stop_axis, - int)) or (stop_axis > x_dim - 1) or stop_axis < -x_dim: + "The start_axis should be a int, and in range [-rank(x), rank(x))" + ) + if ( + not (isinstance(stop_axis, int)) + or (stop_axis > x_dim - 1) + or stop_axis < -x_dim + ): raise ValueError( - "The stop_axis should be a int, and in range [-rank(x), rank(x))") + "The stop_axis should be a int, and in range [-rank(x), rank(x))" + ) if start_axis < 0: start_axis = start_axis + x_dim if stop_axis < 0: @@ -1464,22 +1591,19 @@ def flatten(x, start_axis=0, stop_axis=-1, name=None): if _in_legacy_dygraph(): dy_out, _ = _legacy_C_ops.flatten_contiguous_range( - x, 'start_axis', start_axis, 'stop_axis', stop_axis) + x, 'start_axis', start_axis, 'stop_axis', stop_axis + ) return dy_out helper = LayerHelper('flatten', **locals()) out = helper.create_variable_for_type_inference(x.dtype) x_shape = helper.create_variable_for_type_inference(x.dtype) - helper.append_op(type='flatten_contiguous_range', - inputs={"X": x}, - outputs={ - 'Out': out, - 'XShape': x_shape - }, - attrs={ - "start_axis": start_axis, - "stop_axis": stop_axis - }) + helper.append_op( + type='flatten_contiguous_range', + inputs={"X": x}, + outputs={'Out': out, 'XShape': x_shape}, + attrs={"start_axis": start_axis, "stop_axis": stop_axis}, + ) return out @@ -1493,14 +1617,22 @@ def flatten_(x, start_axis=0, stop_axis=-1, name=None): raise ValueError("The input x should be a Tensor") x_dim = len(x.shape) - if not (isinstance(start_axis, - int)) or (start_axis > x_dim - 1) or start_axis < -x_dim: + if ( + not (isinstance(start_axis, int)) + or (start_axis > x_dim - 1) + or start_axis < -x_dim + ): raise ValueError( - "The start_axis should be a int, and in range [-rank(x), rank(x))") - if not (isinstance(stop_axis, - int)) or (stop_axis > x_dim - 1) or stop_axis < -x_dim: + "The start_axis should be a int, and in range [-rank(x), rank(x))" + ) + if ( + not (isinstance(stop_axis, int)) + or (stop_axis > x_dim - 1) + or stop_axis < -x_dim + ): raise ValueError( - "The stop_axis should be a int, and in range [-rank(x), rank(x))") + "The stop_axis should be a int, and in range [-rank(x), rank(x))" + ) if start_axis < 0: start_axis = start_axis + x_dim if stop_axis < 0: @@ -1513,7 +1645,8 @@ def flatten_(x, start_axis=0, stop_axis=-1, name=None): if _in_legacy_dygraph(): dy_out, _ = _legacy_C_ops.flatten_contiguous_range_( - x, 'start_axis', start_axis, 'stop_axis', stop_axis) + x, 'start_axis', start_axis, 'stop_axis', stop_axis + ) return dy_out @@ -1571,8 +1704,10 @@ def roll(x, shifts, axis=None, name=None): for i in range(len(axis)): if axis[i] >= len_origin_shape or axis[i] < -len_origin_shape: raise ValueError( - "axis is out of range, it should be in range [{}, {}), but received {}" - .format(-len_origin_shape, len_origin_shape, axis)) + "axis is out of range, it should be in range [{}, {}), but received {}".format( + -len_origin_shape, len_origin_shape, axis + ) + ) else: axis = [] @@ -1588,22 +1723,20 @@ def roll(x, shifts, axis=None, name=None): out = helper.create_variable_for_type_inference(x.dtype) if isinstance(shifts, Variable): - helper.append_op(type='roll', - inputs={ - 'X': x, - "ShiftsTensor": shifts - }, - outputs={'Out': out}, - attrs={'axis': axis}) + helper.append_op( + type='roll', + inputs={'X': x, "ShiftsTensor": shifts}, + outputs={'Out': out}, + attrs={'axis': axis}, + ) else: check_type(shifts, 'shifts', (list, tuple), 'roll') - helper.append_op(type='roll', - inputs={'X': x}, - outputs={'Out': out}, - attrs={ - 'axis': axis, - 'shifts': shifts - }) + helper.append_op( + type='roll', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'axis': axis, 'shifts': shifts}, + ) return out @@ -1704,42 +1837,53 @@ def stack(x, axis=0, name=None): if not isinstance(x, list) and not isinstance(x, tuple): # NOTE:(zhiqiu) Only support Variable as input if the Variable is a LOD_TENSOR_ARRAY create by create_array, array_write, array_read, etc. # In that case, Variable is array of tensors indeed. - if isinstance(x, Variable) and x.desc.type( - ) == core.VarDesc.VarType.LOD_TENSOR_ARRAY: + if ( + isinstance(x, Variable) + and x.desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY + ): x = [x] else: raise TypeError( - "The type of '%s' in %s must be %s, but received %s" % - ('x', 'stack', 'list[Tensor], tuple[Tensor] or TensorArray', - type(x))) + "The type of '%s' in %s must be %s, but received %s" + % ( + 'x', + 'stack', + 'list[Tensor], tuple[Tensor] or TensorArray', + type(x), + ) + ) helper = LayerHelper('stack', **locals()) out = helper.create_variable_for_type_inference(x[0].dtype) if x[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY: - assert len(x) == 1, "If the elements of 'x' in stack are Variable(LoDTensorArray), " \ - "number of the elements must be 1, but received %s." % len(x) + assert len(x) == 1, ( + "If the elements of 'x' in stack are Variable(LoDTensorArray), " + "number of the elements must be 1, but received %s." % len(x) + ) out_index = helper.create_variable_for_type_inference(dtype="int32") for i in x: - check_variable_and_dtype(i, 'x', \ - ['float16', 'float32', 'float64', 'int32', 'int64'], 'stack') - - helper.append_op(type='tensor_array_to_tensor', - inputs={'X': x[0]}, - outputs={ - 'Out': [out], - 'OutIndex': [out_index] - }, - attrs={ - 'axis': axis, - 'use_stack': True - }) + check_variable_and_dtype( + i, + 'x', + ['float16', 'float32', 'float64', 'int32', 'int64'], + 'stack', + ) + + helper.append_op( + type='tensor_array_to_tensor', + inputs={'X': x[0]}, + outputs={'Out': [out], 'OutIndex': [out_index]}, + attrs={'axis': axis, 'use_stack': True}, + ) else: - helper.append_op(type='stack', - inputs={'X': x}, - outputs={'Y': out}, - attrs={'axis': axis}) + helper.append_op( + type='stack', + inputs={'X': x}, + outputs={'Y': out}, + attrs={'axis': axis}, + ) return out @@ -1813,15 +1957,17 @@ def split(x, num_or_sections, axis=0, name=None): if utils._contain_var(num_or_sections): for index, item in enumerate(num_or_sections): if isinstance(item, Variable): - num_or_sections[index] = num_or_sections[index].numpy( - )[0] + num_or_sections[index] = num_or_sections[index].numpy()[ + 0 + ] attrs += ('sections', list(num_or_sections)) else: attrs += ('sections', list(num_or_sections)) else: raise TypeError( "The type of 'num_or_sections' in split must be int, list or tuple in imperative mode, but " - "received %s." % (type(num_or_sections))) + "received %s." % (type(num_or_sections)) + ) if in_dygraph_mode(): if isinstance(num_or_sections, int): return _C_ops.split_with_num(input, num_or_sections, dim) @@ -1832,10 +1978,21 @@ def split(x, num_or_sections, axis=0, name=None): _legacy_C_ops.split(input, out, *attrs) return out - check_variable_and_dtype(input, 'input', [ - 'bool', 'float16', 'float32', 'float64', 'int32', 'int64', 'uint8', - 'int8' - ], 'split') + check_variable_and_dtype( + input, + 'input', + [ + 'bool', + 'float16', + 'float32', + 'float64', + 'int32', + 'int64', + 'uint8', + 'int8', + ], + 'split', + ) check_type(num_or_sections, 'num_or_sections', (list, int, tuple), 'split') check_type(dim, 'dim', (int, Variable), 'split') if isinstance(dim, Variable): @@ -1855,19 +2012,18 @@ def split(x, num_or_sections, axis=0, name=None): dim_size.stop_gradient = True tensor_list.append(dim_size) else: - assert (isinstance(dim_size, int)) + assert isinstance(dim_size, int) if dim_size == -1: assert unk_dim_idx == -1, ( "Only one value of 'num_or_section' in split can " - "be -1. But received num_or_section[%d] is also -1." % - idx) + "be -1. But received num_or_section[%d] is also -1." + % idx + ) unk_dim_idx = idx temp_out = helper.create_variable_for_type_inference('int32') - fill_constant([1], - 'int32', - dim_size, - force_cpu=True, - out=temp_out) + fill_constant( + [1], 'int32', dim_size, force_cpu=True, out=temp_out + ) tensor_list.append(temp_out) return tensor_list @@ -1882,31 +2038,37 @@ def split(x, num_or_sections, axis=0, name=None): if isinstance(num_or_sections, int): assert num_or_sections > 1, 'num_or_sections must be more than 1.' if isinstance(dim, int) and input_shape[dim] > 0: - assert input_shape[dim] % num_or_sections ==0, \ - "The input's size along the split dimension " \ - "must be evenly divisible by Attr(num_or_sections). " \ - "But %d is not evenly divisible by %d. " % (num_or_sections,input_shape[dim]) + assert input_shape[dim] % num_or_sections == 0, ( + "The input's size along the split dimension " + "must be evenly divisible by Attr(num_or_sections). " + "But %d is not evenly divisible by %d. " + % (num_or_sections, input_shape[dim]) + ) num = num_or_sections else: if isinstance(dim, int) and input_shape[dim] > 0: - assert len(num_or_sections) <= input_shape[ - dim], 'len(num_or_sections) must not be more than input.shape[dim].' + assert ( + len(num_or_sections) <= input_shape[dim] + ), 'len(num_or_sections) must not be more than input.shape[dim].' num = len(num_or_sections) attrs['sections'] = list( - map(lambda ele: -1 - if isinstance(ele, Variable) else ele, num_or_sections)) + map( + lambda ele: -1 if isinstance(ele, Variable) else ele, + num_or_sections, + ) + ) if utils._contain_var(num_or_sections): inputs['SectionsTensorList'] = _get_SectionsTensorList( - num_or_sections) + num_or_sections + ) outs = [ helper.create_variable_for_type_inference(dtype=helper.input_dtype()) for i in range(num) ] - helper.append_op(type='split', - inputs=inputs, - outputs={'Out': outs}, - attrs=attrs) + helper.append_op( + type='split', inputs=inputs, outputs={'Out': outs}, attrs=attrs + ) return outs @@ -1947,8 +2109,10 @@ def vsplit(x, num_or_sections, name=None): """ if x.ndim < 2: raise ValueError( - "The input tensor's dimension must be greater than 1, but got {}". - format(x.ndim)) + "The input tensor's dimension must be greater than 1, but got {}".format( + x.ndim + ) + ) return split(x, num_or_sections, axis=0, name=name) @@ -2041,10 +2205,22 @@ def squeeze(x, axis=None, name=None): return out helper = LayerHelper("squeeze", **locals()) - check_variable_and_dtype(input, 'input', [ - 'float16', 'float32', 'float64', 'bool', 'int8', 'int32', 'int64', - 'complex64', 'complex128' - ], 'squeeze') + check_variable_and_dtype( + input, + 'input', + [ + 'float16', + 'float32', + 'float64', + 'bool', + 'int8', + 'int32', + 'int64', + 'complex64', + 'complex128', + ], + 'squeeze', + ) check_type(axes, 'axis/axes', (int, list, tuple, Variable), 'squeeze') attrs = {} @@ -2059,13 +2235,12 @@ def squeeze(x, axis=None, name=None): out = helper.create_variable_for_type_inference(dtype=input.dtype) x_shape = helper.create_variable_for_type_inference(dtype=input.dtype) - helper.append_op(type="squeeze2", - inputs={"X": input}, - attrs=attrs, - outputs={ - "Out": out, - "XShape": x_shape - }) + helper.append_op( + type="squeeze2", + inputs={"X": input}, + attrs=attrs, + outputs={"Out": out, "XShape": x_shape}, + ) return out @@ -2092,12 +2267,14 @@ def squeeze_(x, axis=None, name=None): return out -def unique_consecutive(x, - return_inverse=False, - return_counts=False, - axis=None, - dtype="int64", - name=None): +def unique_consecutive( + x, + return_inverse=False, + return_counts=False, + axis=None, + dtype="int64", + name=None, +): r""" Eliminates all but the first element from every consecutive group of equivalent elements. @@ -2164,7 +2341,8 @@ def unique_consecutive(x, attr_dtype = convert_np_dtype_to_dtype_(dtype) if in_dygraph_mode(): out, inverse, counts = _C_ops.unique_consecutive( - x, return_inverse, return_counts, axis, attr_dtype) + x, return_inverse, return_counts, axis, attr_dtype + ) outs = [out] if return_inverse: outs.append(inverse) @@ -2175,8 +2353,16 @@ def unique_consecutive(x, return tuple(outs) elif paddle.in_dynamic_mode(): out, inverse, counts = _legacy_C_ops.unique_consecutive( - x, 'dtype', attr_dtype, 'return_inverse', return_inverse, - 'return_counts', return_counts, 'axis', axis) + x, + 'dtype', + attr_dtype, + 'return_inverse', + return_inverse, + 'return_counts', + return_counts, + 'axis', + axis, + ) outs = [out] if return_inverse: outs.append(inverse) @@ -2185,9 +2371,12 @@ def unique_consecutive(x, if len(outs) == 1: return outs[0] return tuple(outs) - check_variable_and_dtype(x, "input", - ['float32', 'float64', 'int32', 'int64'], - 'unique_consecutive') + check_variable_and_dtype( + x, + "input", + ['float32', 'float64', 'int32', 'int64'], + 'unique_consecutive', + ) check_type(return_inverse, 'return_inverse', bool, 'unique_consecutive') check_type(return_counts, 'return_counts', bool, 'unique_consecutive') check_dtype(dtype, 'dtype', ['int32', 'int64'], 'unique_consecutive') @@ -2200,34 +2389,38 @@ def unique_consecutive(x, "return_counts": return_counts, "axis": axis, } - out = helper.create_variable_for_type_inference(dtype=x.dtype, - stop_gradient=True) - inverse = helper.create_variable_for_type_inference(dtype=attr_dtype, - stop_gradient=True) - counts = helper.create_variable_for_type_inference(dtype=attr_dtype, - stop_gradient=True) + out = helper.create_variable_for_type_inference( + dtype=x.dtype, stop_gradient=True + ) + inverse = helper.create_variable_for_type_inference( + dtype=attr_dtype, stop_gradient=True + ) + counts = helper.create_variable_for_type_inference( + dtype=attr_dtype, stop_gradient=True + ) outputs = {"Out": out, "Index": inverse, "Counts": counts} outs = [out] if return_inverse: outs.append(inverse) if return_counts: outs.append(counts) - helper.append_op(type="unique_consecutive", - inputs={"X": x}, - attrs=attrs, - outputs=outputs) + helper.append_op( + type="unique_consecutive", inputs={"X": x}, attrs=attrs, outputs=outputs + ) if len(outs) == 1: return outs[0] return tuple(outs) -def unique(x, - return_index=False, - return_inverse=False, - return_counts=False, - axis=None, - dtype="int64", - name=None): +def unique( + x, + return_index=False, + return_inverse=False, + return_counts=False, + axis=None, + dtype="int64", + name=None, +): r""" Returns the unique elements of `x` in ascending order. @@ -2289,13 +2482,24 @@ def unique(x, if _non_static_mode(): if in_dygraph_mode(): out, indices, inverse, counts = _C_ops.unique( - x, return_index, return_inverse, return_counts, axis, - attr_dtype) + x, return_index, return_inverse, return_counts, axis, attr_dtype + ) if _in_legacy_dygraph(): out, inverse, indices, counts = _legacy_C_ops.unique( - x, 'dtype', attr_dtype, 'return_index', return_index, - 'return_inverse', return_inverse, 'return_counts', - return_counts, 'axis', axis, "is_sorted", True) + x, + 'dtype', + attr_dtype, + 'return_index', + return_index, + 'return_inverse', + return_inverse, + 'return_counts', + return_counts, + 'axis', + axis, + "is_sorted", + True, + ) outs = [out] if return_index: outs.append(indices) @@ -2309,8 +2513,9 @@ def unique(x, return tuple(outs) - check_variable_and_dtype(x, "input", - ['float32', 'float64', 'int32', 'int64'], 'unique') + check_variable_and_dtype( + x, "input", ['float32', 'float64', 'int32', 'int64'], 'unique' + ) check_type(return_index, 'return_index', bool, 'unique') check_type(return_inverse, 'return_inverse', bool, 'unique') check_type(return_counts, 'return_counts', bool, 'unique') @@ -2325,21 +2530,25 @@ def unique(x, "return_inverse": return_inverse, "return_counts": return_counts, "axis": axis, - "is_sorted": True + "is_sorted": True, } - out = helper.create_variable_for_type_inference(dtype=x.dtype, - stop_gradient=True) - indices = helper.create_variable_for_type_inference(dtype=attr_dtype, - stop_gradient=True) - inverse = helper.create_variable_for_type_inference(dtype=attr_dtype, - stop_gradient=True) - counts = helper.create_variable_for_type_inference(dtype=attr_dtype, - stop_gradient=True) + out = helper.create_variable_for_type_inference( + dtype=x.dtype, stop_gradient=True + ) + indices = helper.create_variable_for_type_inference( + dtype=attr_dtype, stop_gradient=True + ) + inverse = helper.create_variable_for_type_inference( + dtype=attr_dtype, stop_gradient=True + ) + counts = helper.create_variable_for_type_inference( + dtype=attr_dtype, stop_gradient=True + ) outputs = { "Out": out, "Indices": indices, "Index": inverse, - "Counts": counts + "Counts": counts, } outs = [out] if return_index: @@ -2349,10 +2558,9 @@ def unique(x, if return_counts: outs.append(counts) - helper.append_op(type="unique", - inputs={"X": x}, - attrs=attrs, - outputs=outputs) + helper.append_op( + type="unique", inputs={"X": x}, attrs=attrs, outputs=outputs + ) if len(outs) == 1: return outs[0] @@ -2424,18 +2632,23 @@ def unsqueeze(x, axis, name=None): return _C_ops.unsqueeze(input, axes) check_type(axes, 'axis/axes', (int, list, tuple, Variable), 'unsqueeze') - check_variable_and_dtype(input, 'input', [ - 'float16', - 'float32', - 'float64', - 'bool', - 'int8', - 'int16', - 'int32', - 'int64', - 'complex64', - 'complex128', - ], 'unsqueeze') + check_variable_and_dtype( + input, + 'input', + [ + 'float16', + 'float32', + 'float64', + 'bool', + 'int8', + 'int16', + 'int32', + 'int64', + 'complex64', + 'complex128', + ], + 'unsqueeze', + ) helper = LayerHelper("unsqueeze2", **locals()) inputs = {"X": input} attrs = {} @@ -2453,13 +2666,12 @@ def unsqueeze(x, axis, name=None): out = helper.create_variable_for_type_inference(dtype=input.dtype) x_shape = helper.create_variable_for_type_inference(dtype=input.dtype) - helper.append_op(type="unsqueeze2", - inputs=inputs, - attrs=attrs, - outputs={ - "Out": out, - "XShape": x_shape - }) + helper.append_op( + type="unsqueeze2", + inputs=inputs, + attrs=attrs, + outputs={"Out": out, "XShape": x_shape}, + ) return out @@ -2539,13 +2751,16 @@ def gather(x, index, axis=None, name=None): return _C_ops.gather(x, index, axis) if _in_legacy_dygraph(): axis = axis.item() if isinstance(axis, paddle.Tensor) else axis - return _legacy_C_ops.gather(x, index, None, "axis", axis, "overwrite", - False) + return _legacy_C_ops.gather( + x, index, None, "axis", axis, "overwrite", False + ) check_variable_and_dtype( - x, 'x', + x, + 'x', ['float16', 'float32', 'float64', 'int16', 'int32', 'int64', 'uint8'], - 'gather') + 'gather', + ) check_variable_and_dtype(index, 'index', ['int32', 'int64'], 'gather') if isinstance(axis, Variable): @@ -2555,25 +2770,19 @@ def gather(x, index, axis=None, name=None): dtype = helper.input_dtype('x') out = helper.create_variable_for_type_inference(dtype) if not isinstance(axis, Variable): - helper.append_op(type="gather", - inputs={ - "X": x, - "Index": index - }, - attrs={ - 'axis': axis, - 'overwrite': False - }, - outputs={"Out": out}) + helper.append_op( + type="gather", + inputs={"X": x, "Index": index}, + attrs={'axis': axis, 'overwrite': False}, + outputs={"Out": out}, + ) else: - helper.append_op(type="gather", - inputs={ - "X": x, - "Index": index, - "Axis": axis - }, - attrs={"overwrite": False}, - outputs={"Out": out}) + helper.append_op( + type="gather", + inputs={"X": x, "Index": index, "Axis": axis}, + attrs={"overwrite": False}, + outputs={"Out": out}, + ) return out @@ -2613,8 +2822,9 @@ def unbind(input, axis=0): return _C_ops.unbind(input, axis) if not isinstance(axis, (int)): - raise TypeError("The type of 'axis' must be int, but received %s." % - (type(axis))) + raise TypeError( + "The type of 'axis' must be int, but received %s." % (type(axis)) + ) if isinstance(axis, np.generic): axis = np.asscalar(axis) input_shape = input.shape @@ -2626,16 +2836,19 @@ def unbind(input, axis=0): helper = LayerHelper("unbind", **locals()) check_type(input, 'input', (Variable), 'unbind') dtype = helper.input_dtype() - check_dtype(dtype, 'unbind', ['float32', 'float64', 'int32', 'int64'], - 'unbind') + check_dtype( + dtype, 'unbind', ['float32', 'float64', 'int32', 'int64'], 'unbind' + ) outs = [ helper.create_variable_for_type_inference(dtype=helper.input_dtype()) for i in range(num) ] - helper.append_op(type="unbind", - inputs={"X": input}, - outputs={"Out": outs}, - attrs={"axis": axis}) + helper.append_op( + type="unbind", + inputs={"X": input}, + outputs={"Out": outs}, + attrs={"axis": axis}, + ) return outs @@ -2717,23 +2930,25 @@ def scatter(x, index, updates, overwrite=True, name=None): return _C_ops.scatter(x, index, updates, overwrite) else: if _in_legacy_dygraph(): - return _legacy_C_ops.scatter(x, index, updates, 'overwrite', - overwrite) + return _legacy_C_ops.scatter( + x, index, updates, 'overwrite', overwrite + ) else: check_variable_and_dtype( - x, 'dtype', ['float32', 'float64', 'float16', 'int32', 'int64'], - 'scatter') + x, + 'dtype', + ['float32', 'float64', 'float16', 'int32', 'int64'], + 'scatter', + ) check_type(overwrite, 'overwrite', bool, 'scatter') helper = LayerHelper('scatter', **locals()) out = helper.create_variable_for_type_inference(x.dtype) - helper.append_op(type="scatter", - inputs={ - "X": x, - "Ids": index, - "Updates": updates - }, - attrs={'overwrite': overwrite}, - outputs={"Out": out}) + helper.append_op( + type="scatter", + inputs={"X": x, "Ids": index, "Updates": updates}, + attrs={'overwrite': overwrite}, + outputs={"Out": out}, + ) return out @@ -2830,13 +3045,11 @@ def scatter_nd_add(x, index, updates, name=None): helper = LayerHelper('scatter_nd_add', **locals()) dtype = helper.input_dtype(input_param_name='x') output = helper.create_variable_for_type_inference(dtype) - helper.append_op(type="scatter_nd_add", - inputs={ - "X": x, - "Index": index, - "Updates": updates - }, - outputs={"Out": output}) + helper.append_op( + type="scatter_nd_add", + inputs={"X": x, "Index": index, "Updates": updates}, + outputs={"Out": output}, + ) return output @@ -2963,7 +3176,9 @@ def tile(x, repeat_times, name=None): """ if in_dygraph_mode(): if isinstance(repeat_times, core.eager.Tensor): - assert repeat_times.ndim == 1, "Only support ndim == 1 while repeat_times is a Tensor." + assert ( + repeat_times.ndim == 1 + ), "Only support ndim == 1 while repeat_times is a Tensor." repeat_times = repeat_times.numpy().tolist() return _C_ops.tile(x, repeat_times) @@ -2973,26 +3188,30 @@ def tile(x, repeat_times, name=None): check_type(repeat_times, 'repeat_times', (list, tuple, Variable), 'tile') if isinstance(repeat_times, Variable): - assert len( - repeat_times.shape) == 1, ('repeat_times must be an 1-D Tensor.') + assert ( + len(repeat_times.shape) == 1 + ), 'repeat_times must be an 1-D Tensor.' else: for elem in repeat_times: if isinstance(elem, Variable): - assert len(elem.shape) == 1, ( - 'Elements in repeat_times must be 1-D Tensors or integers.') + assert ( + len(elem.shape) == 1 + ), 'Elements in repeat_times must be 1-D Tensors or integers.' else: type_tuple = (int, np.int32, np.int64) - assert isinstance(elem, type_tuple), ( - 'Elements in repeat_times must be 1-D Tensors or integers.') + assert isinstance( + elem, type_tuple + ), 'Elements in repeat_times must be 1-D Tensors or integers.' - check_variable_and_dtype(x, 'x', - ['bool', 'float32', 'float64', 'int32', 'int64'], - 'tile') + check_variable_and_dtype( + x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'tile' + ) if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False: raise ValueError( "When the date type is bool for the input 'x' of tile op, you " "must set its stop_gradient to be True by " - "some_var.stop_gradient == True supporting some_var is the input.") + "some_var.stop_gradient == True supporting some_var is the input." + ) helper = LayerHelper('tile', **locals()) @@ -3006,8 +3225,9 @@ def tile(x, repeat_times, name=None): attrs_repeat_times.append(-1) else: attrs_repeat_times.append(times) - assert times > 0, ( - "All elements in repeat_times must be positive for tile.") + assert ( + times > 0 + ), "All elements in repeat_times must be positive for tile." return attrs_repeat_times if isinstance(repeat_times, Variable): @@ -3018,14 +3238,14 @@ def tile(x, repeat_times, name=None): attrs['repeat_times'] = get_attr_repeat_times(repeat_times) if utils._contain_var(repeat_times): inputs['repeat_times_tensor'] = utils._convert_to_tensor_list( - repeat_times) + repeat_times + ) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='tile', - inputs=inputs, - outputs={'Out': out}, - attrs=attrs) + helper.append_op( + type='tile', inputs=inputs, outputs={'Out': out}, attrs=attrs + ) return out @@ -3063,9 +3283,9 @@ def expand_as(x, y, name=None): if _non_static_mode(): return _legacy_C_ops.expand_as_v2(x, 'target_shape', y.shape) - check_variable_and_dtype(x, 'x', - ['bool', 'float32', 'float64', 'int32', 'int64'], - 'expand_as') + check_variable_and_dtype( + x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'expand_as' + ) check_type(y, 'y', Variable, 'expand_as') if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False: @@ -3073,16 +3293,19 @@ def expand_as(x, y, name=None): "When the data type of input 'x' for expand_as is bool, " "you must set its stop_gradient to be False by " "some_var.stop_gradient = True, supporting " - "some_var as the input 'x'.") + "some_var as the input 'x'." + ) inputs = {"X": [x], "Y": [y]} helper = LayerHelper('expand_as', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='expand_as_v2', - inputs=inputs, - attrs={'target_shape': y.shape}, - outputs={'Out': out}) + helper.append_op( + type='expand_as_v2', + inputs=inputs, + attrs={'target_shape': y.shape}, + outputs={'Out': out}, + ) return out @@ -3119,27 +3342,30 @@ def broadcast_to(x, shape, name=None): return _legacy_C_ops.expand_v2(x, 'shape', shape) if isinstance(shape, Variable): - assert len(shape.shape) == 1, ('shape must be an 1-D Tensor.') + assert len(shape.shape) == 1, 'shape must be an 1-D Tensor.' else: for elem in shape: if isinstance(elem, Variable): - assert len(elem.shape) == 1, ( - 'Elements in shape must be 1-D Tensors or integers.') + assert ( + len(elem.shape) == 1 + ), 'Elements in shape must be 1-D Tensors or integers.' else: type_tuple = (int, np.int32, np.int64) - assert isinstance(elem, type_tuple), ( - 'Elements in shape must be 1-D Tensors or integers.') + assert isinstance( + elem, type_tuple + ), 'Elements in shape must be 1-D Tensors or integers.' - check_variable_and_dtype(x, 'x', - ['bool', 'float32', 'float64', 'int32', 'int64'], - 'broadcast_to') + check_variable_and_dtype( + x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'broadcast_to' + ) check_type(shape, 'shape', (list, tuple, Variable), 'broadcast_to') if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False: raise ValueError( "When the data type of input 'x' for broadcast_to is bool, " "you must set its stop_gradient to be False by " "some_var.stop_gradient = True, supporting " - "some_var as the input.") + "some_var as the input." + ) inputs = {"X": [x]} attrs = {} @@ -3153,9 +3379,9 @@ def broadcast_to(x, shape, name=None): attrs_expand_shape.append(-1) else: attrs_expand_shape.append(shape) - assert shape > 0 or shape == -1, ( - "All elements in shape of broadcast_to must be positive or -1." - ) + assert ( + shape > 0 or shape == -1 + ), "All elements in shape of broadcast_to must be positive or -1." return attrs_expand_shape if isinstance(shape, Variable): @@ -3165,14 +3391,14 @@ def broadcast_to(x, shape, name=None): attrs['shape'] = get_attr_expand_shape(shape) if utils._contain_var(shape): inputs['expand_shapes_tensor'] = utils._convert_to_tensor_list( - shape) + shape + ) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='expand_v2', - inputs=inputs, - outputs={'Out': out}, - attrs=attrs) + helper.append_op( + type='expand_v2', inputs=inputs, outputs={'Out': out}, attrs=attrs + ) return out @@ -3211,26 +3437,33 @@ def expand(x, shape, name=None): return _legacy_C_ops.expand_v2(x, 'shape', shape) if isinstance(shape, Variable): - assert len(shape.shape) == 1, ('shape must be an 1-D Tensor.') + assert len(shape.shape) == 1, 'shape must be an 1-D Tensor.' else: for elem in shape: if isinstance(elem, Variable): - assert len(elem.shape) == 1, ( - 'Elements in shape must be 1-D Tensors or integers.') + assert ( + len(elem.shape) == 1 + ), 'Elements in shape must be 1-D Tensors or integers.' else: type_tuple = (int, np.int32, np.int64) - assert isinstance(elem, type_tuple), ( - 'Elements in shape must be 1-D Tensors or integers.') + assert isinstance( + elem, type_tuple + ), 'Elements in shape must be 1-D Tensors or integers.' check_variable_and_dtype( - x, 'x', ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], - 'expand') + x, + 'x', + ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], + 'expand', + ) check_type(shape, 'shape', (list, tuple, Variable), 'expand') if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False: - raise ValueError("When the data type of input 'x' for expand is bool, " - "you must set its stop_gradient to be False by " - "some_var.stop_gradient = True, supporting " - "some_var as the input.") + raise ValueError( + "When the data type of input 'x' for expand is bool, " + "you must set its stop_gradient to be False by " + "some_var.stop_gradient = True, supporting " + "some_var as the input." + ) inputs = {"X": [x]} attrs = {} @@ -3244,8 +3477,9 @@ def expand(x, shape, name=None): attrs_expand_shape.append(-2) else: attrs_expand_shape.append(shape) - assert shape > 0 or shape == -1, ( - "All elements in shape of expand must be positive or -1.") + assert ( + shape > 0 or shape == -1 + ), "All elements in shape of expand must be positive or -1." return attrs_expand_shape if isinstance(shape, Variable): @@ -3255,14 +3489,14 @@ def expand(x, shape, name=None): attrs['shape'] = get_attr_expand_shape(shape) if utils._contain_var(shape): inputs['expand_shapes_tensor'] = utils._convert_to_tensor_list( - shape) + shape + ) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='expand_v2', - inputs=inputs, - outputs={'Out': out}, - attrs=attrs) + helper.append_op( + type='expand_v2', inputs=inputs, outputs={'Out': out}, attrs=attrs + ) return out @@ -3331,7 +3565,7 @@ def reshape(x, shape, name=None): if in_dygraph_mode(): tmp_tensor_type = core.eager.Tensor - #TODO(zhiqiu): enable inplace in dygraph mode. + # TODO(zhiqiu): enable inplace in dygraph mode. if inplace: warnings.warn( "Inplace on reshape is not allowed and will be discarded in dygraph mode currently." @@ -3339,7 +3573,9 @@ def reshape(x, shape, name=None): if isinstance(shape, (list, tuple)): shape = [ item.numpy().item(0) - if isinstance(item, tmp_tensor_type) else item for item in shape + if isinstance(item, tmp_tensor_type) + else item + for item in shape ] out = _C_ops.reshape(x, shape) elif isinstance(shape, tmp_tensor_type): @@ -3348,7 +3584,8 @@ def reshape(x, shape, name=None): else: raise ValueError( "shape must be an instance of `list`, `tuple` or `Variable`," - " got '{}.'".format(type(shape))) + " got '{}.'".format(type(shape)) + ) return dygraph_utils._append_activation_in_dygraph(out, act) else: @@ -3370,14 +3607,26 @@ def reshape(x, shape, name=None): else: raise ValueError( "shape must be an instance of `list`, `tuple` or `Variable`," - " got '{}.'".format(type(shape))) + " got '{}.'".format(type(shape)) + ) return dygraph_utils._append_activation_in_dygraph(out, act) - check_variable_and_dtype(x, 'x', [ - 'float16', 'float32', 'float64', 'int16', 'int32', 'int64', 'bool', - 'uint16' - ], 'reshape') + check_variable_and_dtype( + x, + 'x', + [ + 'float16', + 'float32', + 'float64', + 'int16', + 'int32', + 'int64', + 'bool', + 'uint16', + ], + 'reshape', + ) check_type(shape, 'shape', (list, tuple, Variable), 'reshape') check_type(actual_shape, 'actual_shape', (Variable, type(None)), 'reshape') @@ -3401,20 +3650,23 @@ def reshape(x, shape, name=None): "\t# z.shape is [-1, -1, 4]\n\n" " If your target shape in Reshape represents dynamic shape, " "please turn it into a Tensor under @to_static. See above example for details." - % dim_idx) + % dim_idx + ) unk_dim_idx = dim_idx elif dim_size == 0: assert dim_idx < len(x.shape), ( "The index of 0 in `shape` must be less than " "the input tensor X's dimensions. " - "But received shape[%d] = 0, X's dimensions = %d." % - (dim_idx, len(x.shape))) + "But received shape[%d] = 0, X's dimensions = %d." + % (dim_idx, len(x.shape)) + ) else: assert dim_size > 0, ( "Each dimension value of 'shape' in reshape must not " "be negative except one unknown dimension. " - "But received shape[%d] = %s." % - (dim_idx, str(dim_size))) + "But received shape[%d] = %s." + % (dim_idx, str(dim_size)) + ) return attrs_shape inputs = {"X": x} @@ -3423,8 +3675,10 @@ def reshape(x, shape, name=None): shape.stop_gradient = True inputs["Shape"] = shape elif isinstance(shape, (list, tuple)): - assert len(shape) > 0, ("The size of 'shape' in reshape can't be zero, " - "but received %s." % len(shape)) + assert len(shape) > 0, ( + "The size of 'shape' in reshape can't be zero, " + "but received %s." % len(shape) + ) attrs["shape"] = get_attr_shape(shape) if utils._contain_var(shape): inputs['ShapeTensor'] = utils._convert_to_tensor_list(shape) @@ -3432,16 +3686,18 @@ def reshape(x, shape, name=None): actual_shape.stop_gradient = True inputs["Shape"] = actual_shape - out = x if inplace else helper.create_variable_for_type_inference( - dtype=x.dtype) + out = ( + x + if inplace + else helper.create_variable_for_type_inference(dtype=x.dtype) + ) x_shape = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type="reshape2", - inputs=inputs, - attrs=attrs, - outputs={ - "Out": out, - "XShape": x_shape - }) + helper.append_op( + type="reshape2", + inputs=inputs, + attrs=attrs, + outputs={"Out": out, "XShape": x_shape}, + ) return helper.append_activation(out) @@ -3457,7 +3713,9 @@ def reshape_(x, shape, name=None): if isinstance(shape, (list, tuple)): shape = [ item.numpy().item(0) - if isinstance(item, tmp_tensor_type) else item for item in shape + if isinstance(item, tmp_tensor_type) + else item + for item in shape ] out = _C_ops.reshape_(x, shape) elif isinstance(shape, tmp_tensor_type): @@ -3466,7 +3724,8 @@ def reshape_(x, shape, name=None): else: raise ValueError( "shape must be an instance of `list`, `tuple` or `Variable`," - " got '{}.'".format(type(shape))) + " got '{}.'".format(type(shape)) + ) return out else: @@ -3567,18 +3826,20 @@ def gather_nd(x, index, name=None): if _in_legacy_dygraph(): return _legacy_C_ops.gather_nd(x, index) check_variable_and_dtype( - x, 'x', ['bool', 'float32', 'float64', 'int16', 'int32', 'int64'], - 'gather_np') + x, + 'x', + ['bool', 'float32', 'float64', 'int16', 'int32', 'int64'], + 'gather_np', + ) check_variable_and_dtype(index, 'index', ['int32', 'int64'], 'gather_np') helper = LayerHelper('gather_nd', **locals()) dtype = helper.input_dtype() output = helper.create_variable_for_type_inference(dtype) - helper.append_op(type="gather_nd", - inputs={ - "X": x, - "Index": index - }, - outputs={"Out": output}) + helper.append_op( + type="gather_nd", + inputs={"X": x, "Index": index}, + outputs={"Out": output}, + ) return output @@ -3670,8 +3931,11 @@ def strided_slice(x, axes, starts, ends, strides, name=None): helper = LayerHelper('strided_slice', **locals()) check_variable_and_dtype( - x, 'x', ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], - 'strided_slice') + x, + 'x', + ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], + 'strided_slice', + ) check_type(axes, 'axes', (list, tuple), 'strided_slice') check_type(starts, 'starts', (list, tuple, Variable), 'strided_slice') check_type(ends, 'ends', (list, tuple, Variable), 'strided_slice') @@ -3679,8 +3943,9 @@ def strided_slice(x, axes, starts, ends, strides, name=None): def check_list_elements_dtype(list_input, input_name): if isinstance(list_input, Variable): - check_dtype(list_input.dtype, input_name, ['int32'], - 'strided_slice') + check_dtype( + list_input.dtype, input_name, ['int32'], 'strided_slice' + ) else: for i, var in enumerate(list_input): var_name = input_name + '[' + str(i) + ']' @@ -3699,7 +3964,7 @@ def strided_slice(x, axes, starts, ends, strides, name=None): dim.stop_gradient = True new_list_tensor.append(dim) else: - assert (isinstance(dim, int)) + assert isinstance(dim, int) temp_out = helper.create_variable_for_type_inference('int32') fill_constant([1], 'int32', dim, force_cpu=True, out=temp_out) new_list_tensor.append(temp_out) @@ -3716,7 +3981,7 @@ def strided_slice(x, axes, starts, ends, strides, name=None): 'starts': starts, 'ends': ends, 'strides': strides, - 'infer_flags': infer_flags + 'infer_flags': infer_flags, } else: # starts @@ -3771,11 +4036,11 @@ def strided_slice(x, axes, starts, ends, strides, name=None): attrs['strides'] = strides attrs['infer_flags'] = infer_flags out = helper.create_variable_for_type_inference( - dtype=helper.input_dtype('x')) - helper.append_op(type='strided_slice', - inputs=inputs, - attrs=attrs, - outputs={'Out': out}) + dtype=helper.input_dtype('x') + ) + helper.append_op( + type='strided_slice', inputs=inputs, attrs=attrs, outputs={'Out': out} + ) return out @@ -3903,8 +4168,9 @@ def tensordot(x, y, axes=2, name=None): if paddle.in_dynamic_mode(): return tolist(var) raise TypeError( - "The 'axes' with type 'Tensor' in " + op_type + - " is not available in static graph mode, " + "The 'axes' with type 'Tensor' in " + + op_type + + " is not available in static graph mode, " "please convert its type to int|Tuple|List, or use dynamic graph mode." ) @@ -3912,8 +4178,10 @@ def tensordot(x, y, axes=2, name=None): axes_y = [] if np.issubdtype(type(axes), np.integer): assert axes >= 0, ( - "The 'axes' in " + op_type + - f" should not be negative, but received axes={axes}.") + "The 'axes' in " + + op_type + + f" should not be negative, but received axes={axes}." + ) axes_x = range(x.ndim - axes, x.ndim) axes_y = range(axes) else: @@ -3953,7 +4221,11 @@ def tensordot(x, y, axes=2, name=None): shape_x[dim_x] = 1 x = x.sum(dim_x).reshape(shape_x) else: - assert sx == sy, "The dimensional size for 'x' and 'y' in " + op_type + f" should match each other, but 'x' has size {sx} in dim {dim_x} while 'y' has size {sy} in dim {dim_y}." + assert sx == sy, ( + "The dimensional size for 'x' and 'y' in " + + op_type + + f" should match each other, but 'x' has size {sx} in dim {dim_x} while 'y' has size {sy} in dim {dim_y}." + ) need_contracted_dim_x[dim_x] = True need_contracted_dim_y[dim_y] = True @@ -3981,9 +4253,11 @@ def tensordot(x, y, axes=2, name=None): shape_out = [1] x = x.transpose(perm=perm_x).reshape( - [not_contraction_size_x, contraction_size]) + [not_contraction_size_x, contraction_size] + ) y = y.transpose(perm=perm_y).reshape( - [contraction_size, not_contraction_size_y]) + [contraction_size, not_contraction_size_y] + ) out = x.matmul(y).reshape(shape_out) return out @@ -4027,7 +4301,8 @@ def as_complex(x, name=None): helper = LayerHelper(op_type, **locals()) inputs = {"X": x} out = helper.create_variable_for_type_inference( - dtype=_real_to_complex_dtype(x.dtype)) + dtype=_real_to_complex_dtype(x.dtype) + ) outputs = {"Out": out} attrs = {} helper.append_op(type=op_type, inputs=inputs, attrs=attrs, outputs=outputs) @@ -4079,7 +4354,8 @@ def as_real(x, name=None): helper = LayerHelper(op_type, **locals()) inputs = {"X": x} out = helper.create_variable_for_type_inference( - dtype=_complex_to_real_dtype(x.dtype)) + dtype=_complex_to_real_dtype(x.dtype) + ) outputs = {"Out": out} helper.append_op(type=op_type, inputs=inputs, outputs=outputs) return out @@ -4131,23 +4407,27 @@ def repeat_interleave(x, repeats, axis=None, name=None): return _C_ops.repeat_interleave(x, repeats, axis) helper = LayerHelper("repeat_interleave", **locals()) - check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], - 'paddle.tensor.manipulation.repeat_interleave') + check_variable_and_dtype( + x, + 'x', + ['float32', 'float64', 'int32', 'int64'], + 'paddle.tensor.manipulation.repeat_interleave', + ) out = helper.create_variable_for_type_inference(x.dtype) - helper.append_op(type='repeat_interleave', - inputs={ - 'X': - x, - 'RepeatsTensor': - repeats if isinstance(repeats, Variable) else None - }, - outputs={'Out': out}, - attrs={ - 'dim': axis, - 'Repeats': repeats if isinstance(repeats, int) else 0 - }) + helper.append_op( + type='repeat_interleave', + inputs={ + 'X': x, + 'RepeatsTensor': repeats if isinstance(repeats, Variable) else None, + }, + outputs={'Out': out}, + attrs={ + 'dim': axis, + 'Repeats': repeats if isinstance(repeats, int) else 0, + }, + ) return out @@ -4184,7 +4464,8 @@ def moveaxis(x, source, destination, name=None): dst = [destination] if isinstance(destination, int) else destination assert len(src) == len( - dst), "'source' must have the same number with 'destination'" + dst + ), "'source' must have the same number with 'destination'" count = Counter(src).most_common(1) if count[0][1] > 1: @@ -4201,29 +4482,31 @@ def moveaxis(x, source, destination, name=None): dst_dims = list(range(ndim)) for i, axis in enumerate(zip(src, dst)): - assert isinstance(axis[0], - int), "Each elemment of 'source' must be integer." + assert isinstance( + axis[0], int + ), "Each elemment of 'source' must be integer." if axis[0] < 0: - assert axis[ - 0] >= -ndim, "'source' must be in the range of [-{0}, {0})".format( - ndim) + assert ( + axis[0] >= -ndim + ), "'source' must be in the range of [-{0}, {0})".format(ndim) src[i] += ndim else: - assert axis[ - 0] < ndim, "'source' must be in the range of [-{0}, {0})".format( - ndim) + assert ( + axis[0] < ndim + ), "'source' must be in the range of [-{0}, {0})".format(ndim) - assert isinstance(axis[1], - int), "Each elemment of 'source' must be integer." + assert isinstance( + axis[1], int + ), "Each elemment of 'source' must be integer." if axis[1] < 0: - assert axis[ - 1] >= -ndim, "'source' must be in the range of [-{0}, {0})".format( - ndim) + assert ( + axis[1] >= -ndim + ), "'source' must be in the range of [-{0}, {0})".format(ndim) dst[i] += ndim else: - assert axis[ - 1] < ndim, "'source' must be in the range of [-{0}, {0})".format( - ndim) + assert ( + axis[1] < ndim + ), "'source' must be in the range of [-{0}, {0})".format(ndim) perm[dst[i]] = src[i] src_dims.remove(src[i]) dst_dims.remove(dst[i]) @@ -4239,32 +4522,44 @@ def moveaxis(x, source, destination, name=None): out, _ = _legacy_C_ops.transpose2(x, 'axis', perm) return out - check_variable_and_dtype(x, 'x', [ - 'bool', 'float16', 'float32', 'float64', 'int32', 'int64', 'complex64', - 'complex128' - ], 'moveaxis') + check_variable_and_dtype( + x, + 'x', + [ + 'bool', + 'float16', + 'float32', + 'float64', + 'int32', + 'int64', + 'complex64', + 'complex128', + ], + 'moveaxis', + ) helper = LayerHelper('moveaxis', **locals()) out = helper.create_variable_for_type_inference(x.dtype) x_shape = helper.create_variable_for_type_inference(x.dtype) - helper.append_op(type='transpose2', - inputs={'X': [x]}, - outputs={ - 'Out': [out], - 'XShape': [x_shape] - }, - attrs={'axis': perm}) + helper.append_op( + type='transpose2', + inputs={'X': [x]}, + outputs={'Out': [out], 'XShape': [x_shape]}, + attrs={'axis': perm}, + ) return out def non_negative_axis(arr, axis): ndim = len(arr.shape) if axis >= 0: - assert axis < ndim, "'axis' must be in the range of [-{0}, {0})".format( - ndim) + assert ( + axis < ndim + ), "'axis' must be in the range of [-{0}, {0})".format(ndim) else: - assert axis >= -ndim, "'axis' must be in the range of [-{0}, {0})".format( - ndim) + assert ( + axis >= -ndim + ), "'axis' must be in the range of [-{0}, {0})".format(ndim) axis += ndim return axis @@ -4307,9 +4602,10 @@ def take_along_axis(arr, indices, axis): print(result) # [[1, 2, 3]] """ - if (len(arr.shape) != len(indices.shape)): + if len(arr.shape) != len(indices.shape): raise ValueError( - "`indices` and `arr` must have the same number of dimensions!") + "`indices` and `arr` must have the same number of dimensions!" + ) axis = non_negative_axis(arr, axis) broadcast_shape = infer_broadcast_shape(arr, indices, axis) if not broadcast_shape: @@ -4325,10 +4621,14 @@ def take_along_axis(arr, indices, axis): return _C_ops.take_along_axis(arr, indices, axis) return _legacy_C_ops.take_along_axis(arr, indices, 'Axis', axis) check_variable_and_dtype( - arr, 'x', ['float16', 'float32', 'float64', 'int32', 'int64', 'uint8'], - 'take_along_axis') - check_variable_and_dtype(indices, 'index', ['int32', 'int64'], - 'take_along_axis') + arr, + 'x', + ['float16', 'float32', 'float64', 'int32', 'int64', 'uint8'], + 'take_along_axis', + ) + check_variable_and_dtype( + indices, 'index', ['int32', 'int64'], 'take_along_axis' + ) indices = paddle.broadcast_to(indices, broadcast_shape) broadcast_shape_list = list(broadcast_shape) broadcast_shape_list[axis] = list(arr.shape)[axis] @@ -4337,13 +4637,12 @@ def take_along_axis(arr, indices, axis): helper = LayerHelper('take_along_axis', **locals()) dtype = helper.input_dtype() result = helper.create_variable_for_type_inference(dtype) - helper.append_op(type="take_along_axis", - inputs={ - "Input": arr, - "Index": indices - }, - attrs={"Axis": axis}, - outputs={"Result": result}) + helper.append_op( + type="take_along_axis", + inputs={"Input": arr, "Index": indices}, + attrs={"Axis": axis}, + outputs={"Result": result}, + ) return result @@ -4376,44 +4675,48 @@ def put_along_axis(arr, indices, values, axis, reduce='assign'): # [60, 40, 50]] """ - if (len(arr.shape) != len(indices.shape)): + if len(arr.shape) != len(indices.shape): raise ValueError( - "`indices` and `arr` must have the same number of dimensions!") + "`indices` and `arr` must have the same number of dimensions!" + ) axis = non_negative_axis(arr, axis) broadcast_shape = infer_broadcast_shape(arr, indices, axis) if _non_static_mode(): - values = paddle.to_tensor(values) if not isinstance( - values, paddle.Tensor) else values + values = ( + paddle.to_tensor(values) + if not isinstance(values, paddle.Tensor) + else values + ) if broadcast_shape: indices = paddle.broadcast_to(indices, broadcast_shape) values = paddle.broadcast_to(values, indices.shape) if in_dygraph_mode(): return _C_ops.put_along_axis(arr, indices, values, axis, reduce) - return _legacy_C_ops.put_along_axis(arr, indices, values, "Axis", axis, - "Reduce", reduce) + return _legacy_C_ops.put_along_axis( + arr, indices, values, "Axis", axis, "Reduce", reduce + ) check_variable_and_dtype( - arr, 'x', ['float16', 'float32', 'float64', 'int32', 'int64', 'uint8'], - 'put_along_axis') - check_variable_and_dtype(indices, 'index', ['int32', 'int64'], - 'put_along_axis') + arr, + 'x', + ['float16', 'float32', 'float64', 'int32', 'int64', 'uint8'], + 'put_along_axis', + ) + check_variable_and_dtype( + indices, 'index', ['int32', 'int64'], 'put_along_axis' + ) if broadcast_shape: indices = paddle.broadcast_to(indices, broadcast_shape) values = paddle.broadcast_to(values, indices.shape) helper = LayerHelper('put_along_axis', **locals()) dtype = helper.input_dtype() result = helper.create_variable_for_type_inference(dtype) - helper.append_op(type="put_along_axis", - inputs={ - "Input": arr, - "Index": indices, - "Value": values - }, - attrs={ - "Axis": axis, - "Reduce": reduce - }, - outputs={"Result": result}) + helper.append_op( + type="put_along_axis", + inputs={"Input": arr, "Index": indices, "Value": values}, + attrs={"Axis": axis, "Reduce": reduce}, + outputs={"Result": result}, + ) return result @@ -4423,20 +4726,25 @@ def put_along_axis_(arr, indices, values, axis, reduce='assign'): Inplace version of ``put_along_axis`` API, the output Tensor will be inplaced with input ``arr``. Please refer to :ref:`api_tensor_put_along_axis`. """ - if (len(arr.shape) != len(indices.shape)): + if len(arr.shape) != len(indices.shape): raise ValueError( - "`indices` and `arr` must have the same number of dimensions!") + "`indices` and `arr` must have the same number of dimensions!" + ) axis = non_negative_axis(arr, axis) broadcast_shape = infer_broadcast_shape(arr, indices, axis) - values = paddle.to_tensor(values) if not isinstance( - values, paddle.Tensor) else values + values = ( + paddle.to_tensor(values) + if not isinstance(values, paddle.Tensor) + else values + ) if broadcast_shape: indices = paddle.broadcast_to(indices, broadcast_shape) values = paddle.broadcast_to(values, indices.shape) if in_dygraph_mode(): return _C_ops.put_along_axis_(arr, indices, values, axis, reduce) - return _legacy_C_ops.put_along_axis_(arr, indices, values, "Axis", axis, - "Reduce", reduce) + return _legacy_C_ops.put_along_axis_( + arr, indices, values, "Axis", axis, "Reduce", reduce + ) def index_add(x, index, axis, value, name=None): @@ -4475,24 +4783,36 @@ def index_add(x, index, axis, value, name=None): helper = LayerHelper("index_add", **locals()) check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], - 'paddle.tensor.manipulation.index_add') - check_variable_and_dtype(index, 'index', ['int32', 'int64'], - 'paddle.tensor.manipulation.index_add') + x, + 'x', + ['float16', 'float32', 'float64', 'int32', 'int64'], + 'paddle.tensor.manipulation.index_add', + ) + check_variable_and_dtype( + index, + 'index', + ['int32', 'int64'], + 'paddle.tensor.manipulation.index_add', + ) check_variable_and_dtype( - value, 'add_value', ['float16', 'float32', 'float64', 'int32', 'int64'], - 'paddle.tensor.manipulation.index_add') + value, + 'add_value', + ['float16', 'float32', 'float64', 'int32', 'int64'], + 'paddle.tensor.manipulation.index_add', + ) out = helper.create_variable_for_type_inference(x.dtype) - helper.append_op(type='index_add', - inputs={ - 'X': x, - 'Index': index, - 'AddValue': value, - }, - outputs={'Out': out}, - attrs={'axis': axis}) + helper.append_op( + type='index_add', + inputs={ + 'X': x, + 'Index': index, + 'AddValue': value, + }, + outputs={'Out': out}, + attrs={'axis': axis}, + ) return out @@ -4528,7 +4848,7 @@ __METHODS = { 'fill_diagonal_': fill_diagonal_, 'fill_diagonal_tensor_': fill_diagonal_tensor_, "fill_diagonal_tensor": fill_diagonal_tensor, - 'tolist': tolist + 'tolist': tolist, } for name, func in __METHODS.items(): setattr(core.VarBase, name, func) diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index a2720ec48f22bc78725ae867dd7ce50178ab4dc7..cd8dbd08b94d865116bc68f390259f5427721873 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -27,10 +27,21 @@ from .layer_function_generator import generate_layer_fn import paddle from ..static import Variable -from ..framework import core, in_dygraph_mode, _non_static_mode, LayerHelper, _in_legacy_dygraph +from ..framework import ( + core, + in_dygraph_mode, + _non_static_mode, + LayerHelper, + _in_legacy_dygraph, +) from ..fluid.framework import _in_legacy_dygraph from ..framework import _varbase_creator, convert_np_dtype_to_dtype_ -from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype +from ..fluid.data_feeder import ( + check_variable_and_dtype, + check_type, + check_dtype, + convert_dtype, +) from ..fluid.dygraph.inplace_utils import inplace_apis_in_dygraph_only from ..fluid.layers import utils @@ -177,15 +188,33 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): return dygraph_utils._append_activation_in_dygraph(out, act) elif _in_legacy_dygraph(): _scale = scale.numpy().item(0) if isinstance(scale, Variable) else scale - out = _legacy_C_ops.scale(x, 'scale', float(_scale), 'bias', - float(bias), 'bias_after_scale', - bias_after_scale) + out = _legacy_C_ops.scale( + x, + 'scale', + float(_scale), + 'bias', + float(bias), + 'bias_after_scale', + bias_after_scale, + ) return dygraph_utils._append_activation_in_dygraph(out, act) - check_variable_and_dtype(x, "x", [ - 'float16', 'uint16', 'float32', 'float64', 'int8', 'int16', 'int32', - 'int64', 'uint8' - ], "scale") + check_variable_and_dtype( + x, + "x", + [ + 'float16', + 'uint16', + 'float32', + 'float64', + 'int8', + 'int16', + 'int32', + 'int64', + 'uint8', + ], + "scale", + ) inputs = {'X': [x]} attrs = { 'bias': float(bias), @@ -198,10 +227,9 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): helper = LayerHelper('scale', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='scale', - inputs=inputs, - outputs={'Out': out}, - attrs=attrs) + helper.append_op( + type='scale', inputs=inputs, outputs={'Out': out}, attrs=attrs + ) return helper.append_activation(out) @@ -240,13 +268,12 @@ def stanh(x, scale_a=0.67, scale_b=1.7159, name=None): helper = LayerHelper('stanh', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='stanh', - inputs={'X': x}, - outputs={'Out': out}, - attrs={ - 'scale_a': scale_a, - 'scale_b': scale_b - }) + helper.append_op( + type='stanh', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'scale_a': scale_a, 'scale_b': scale_b}, + ) return out @@ -310,20 +337,23 @@ def multiplex(inputs, index, name=None): check_type(inputs, 'inputs', (list), 'multiplex') if len(inputs) < 2: raise ValueError( - "inputs should be a list object with at least 2 elements.") + "inputs should be a list object with at least 2 elements." + ) for id, x in enumerate(inputs): - check_variable_and_dtype(x, 'input[' + str(id) + ']', - ['float32', 'float64', 'int32', 'int64'], - 'multiplex') + check_variable_and_dtype( + x, + 'input[' + str(id) + ']', + ['float32', 'float64', 'int32', 'int64'], + 'multiplex', + ) check_variable_and_dtype(index, "index", ['int32', 'int64'], 'multiplex') out = helper.create_variable_for_type_inference(inputs[0].dtype) - helper.append_op(type='multiplex', - inputs={ - 'X': inputs, - 'Ids': index - }, - outputs={'Out': [out]}) + helper.append_op( + type='multiplex', + inputs={'X': inputs, 'Ids': index}, + outputs={'Out': [out]}, + ) return out @@ -337,9 +367,15 @@ def scale_(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): return _C_ops.scale_(x, scale, float(bias), bias_after_scale) if _in_legacy_dygraph(): _scale = scale.numpy().item(0) if isinstance(scale, Variable) else scale - return _legacy_C_ops.scale_(x, 'scale', float(_scale), 'bias', - float(bias), 'bias_after_scale', - bias_after_scale) + return _legacy_C_ops.scale_( + x, + 'scale', + float(_scale), + 'bias', + float(bias), + 'bias_after_scale', + bias_after_scale, + ) def pow(x, y, name=None): @@ -395,31 +431,28 @@ def pow(x, y, name=None): return _C_ops.elementwise_pow(x, y) else: raise TypeError( - 'y must be scalar or tensor type, but received: %s ' % - (y.dtype)) + 'y must be scalar or tensor type, but received: %s ' % (y.dtype) + ) if _in_legacy_dygraph(): if isinstance(y, (int, float)): return _legacy_C_ops.pow(x, 'factor', y) elif isinstance(y, (paddle.Tensor, Variable)): - return _elementwise_op_in_dygraph(x, - y, - axis=-1, - act=None, - op_name='elementwise_pow') + return _elementwise_op_in_dygraph( + x, y, axis=-1, act=None, op_name='elementwise_pow' + ) else: raise TypeError( - 'y must be scalar or tensor type, but received: %s ' % - (y.dtype)) + 'y must be scalar or tensor type, but received: %s ' % (y.dtype) + ) # in static graph mode if isinstance(y, (int, float)): helper = LayerHelper('pow', **locals()) inputs = {'X': x} attrs = {'factor': y} out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='pow', - inputs=inputs, - outputs={'Out': out}, - attrs=attrs) + helper.append_op( + type='pow', inputs=inputs, outputs={'Out': out}, attrs=attrs + ) return out elif isinstance(y, (paddle.Tensor, Variable)): # TODO A potential speed improvement is supporting different types in C++ and removing the cast ops here @@ -427,8 +460,9 @@ def pow(x, y, name=None): out = helper.create_variable_for_type_inference(dtype=x.dtype) return _elementwise_op(LayerHelper('elementwise_pow', **locals())) else: - raise TypeError('y must be scalar or tensor type, but received: %s ' % - (type(y))) + raise TypeError( + 'y must be scalar or tensor type, but received: %s ' % (type(y)) + ) OP_NAMEMAPPING = { @@ -445,13 +479,9 @@ OP_NAMEMAPPING = { @dygraph_only -def _elementwise_op_in_dygraph(x, - y, - axis=-1, - act=None, - use_mkldnn=False, - op_name=None): - +def _elementwise_op_in_dygraph( + x, y, axis=-1, act=None, use_mkldnn=False, op_name=None +): def is_inplace(op_name): return op_name[-1] == "_" @@ -462,16 +492,17 @@ def _elementwise_op_in_dygraph(x, if in_dygraph_mode(): op = getattr( _C_ops, - OP_NAMEMAPPING[op_name] if not is_inplace(op_name) else op_name) + OP_NAMEMAPPING[op_name] if not is_inplace(op_name) else op_name, + ) out = op(x, y) if _in_legacy_dygraph(): op = getattr(_legacy_C_ops, op_name) out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn) - return dygraph_utils._append_activation_in_dygraph(out, - act, - use_mkldnn=use_mkldnn) + return dygraph_utils._append_activation_in_dygraph( + out, act, use_mkldnn=use_mkldnn + ) def _elementwise_op(helper): @@ -485,11 +516,17 @@ def _elementwise_op(helper): assert x is not None, 'x cannot be None in {}'.format(original_op_type) assert y is not None, 'y cannot be None in {}'.format(original_op_type) check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64', 'bool'], - original_op_type) + x, + 'x', + ['float16', 'float32', 'float64', 'int32', 'int64', 'bool'], + original_op_type, + ) check_variable_and_dtype( - y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64', 'bool'], - original_op_type) + y, + 'y', + ['float16', 'float32', 'float64', 'int32', 'int64', 'bool'], + original_op_type, + ) axis = helper.kwargs.get('axis', -1) use_mkldnn = helper.kwargs.get('use_mkldnn', False) @@ -499,20 +536,16 @@ def _elementwise_op(helper): if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) else: - out = helper.create_variable(name=name, - dtype=x.dtype, - persistable=False) - - helper.append_op(type=op_type, - inputs={ - 'X': x, - 'Y': y - }, - outputs={'Out': out}, - attrs={ - 'axis': axis, - 'use_mkldnn': use_mkldnn - }) + out = helper.create_variable( + name=name, dtype=x.dtype, persistable=False + ) + + helper.append_op( + type=op_type, + inputs={'X': x, 'Y': y}, + outputs={'Out': out}, + attrs={'axis': axis, 'use_mkldnn': use_mkldnn}, + ) return helper.append_activation(out) @@ -592,8 +625,10 @@ def add_(x, y, name=None): out_shape = broadcast_shape(x.shape, y.shape) if out_shape != x.shape: raise ValueError( - "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation." - .format(out_shape, x.shape)) + "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format( + out_shape, x.shape + ) + ) if in_dygraph_mode(): return _C_ops.add_(x, y) @@ -663,11 +698,9 @@ def subtract(x, y, name=None): return _C_ops.subtract(x, y) else: if _in_legacy_dygraph(): - return _elementwise_op_in_dygraph(x, - y, - axis=axis, - act=act, - op_name=op_type) + return _elementwise_op_in_dygraph( + x, y, axis=axis, act=act, op_name=op_type + ) else: return _elementwise_op(LayerHelper(op_type, **locals())) @@ -684,17 +717,17 @@ def subtract_(x, y, name=None): out_shape = broadcast_shape(x.shape, y.shape) if out_shape != x.shape: raise ValueError( - "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation." - .format(out_shape, x.shape)) + "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format( + out_shape, x.shape + ) + ) if in_dygraph_mode(): return _C_ops.subtract_(x, y) else: - out = _elementwise_op_in_dygraph(x, - y, - axis=axis, - act=act, - op_name='elementwise_sub_') + out = _elementwise_op_in_dygraph( + x, y, axis=axis, act=act, op_name='elementwise_sub_' + ) return out @@ -735,11 +768,9 @@ def divide(x, y, name=None): return _C_ops.divide(x, y) else: if _in_legacy_dygraph(): - return _elementwise_op_in_dygraph(x, - y, - axis=axis, - act=act, - op_name=op_type) + return _elementwise_op_in_dygraph( + x, y, axis=axis, act=act, op_name=op_type + ) else: return _elementwise_op(LayerHelper(op_type, **locals())) @@ -839,8 +870,10 @@ def remainder_(x, y, name=None): out_shape = broadcast_shape(x.shape, y.shape) if out_shape != x.shape: raise ValueError( - "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation." - .format(out_shape, x.shape)) + "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format( + out_shape, x.shape + ) + ) return _elementwise_op_in_dygraph(x, y, axis=axis, op_name=op_type) @@ -892,16 +925,15 @@ def multiply(x, y, name=None): return _C_ops.multiply(x, y) else: if _in_legacy_dygraph(): - return _elementwise_op_in_dygraph(x, - y, - axis=axis, - act=act, - op_name=op_type) + return _elementwise_op_in_dygraph( + x, y, axis=axis, act=act, op_name=op_type + ) else: if x.dtype != y.dtype: raise TypeError( 'Input tensors must be same type, but received type of x: %s, type of y: %s ' - % (x.dtype, y.dtype)) + % (x.dtype, y.dtype) + ) return _elementwise_op(LayerHelper(op_type, **locals())) @@ -966,11 +998,9 @@ def maximum(x, y, name=None): if in_dygraph_mode(): return _C_ops.maximum(x, y) elif _in_legacy_dygraph(): - return _elementwise_op_in_dygraph(x, - y, - axis=axis, - act=act, - op_name=op_type) + return _elementwise_op_in_dygraph( + x, y, axis=axis, act=act, op_name=op_type + ) return _elementwise_op(LayerHelper(op_type, **locals())) @@ -1034,11 +1064,9 @@ def minimum(x, y, name=None): if in_dygraph_mode(): return _C_ops.minimum(x, y) elif _in_legacy_dygraph(): - return _elementwise_op_in_dygraph(x, - y, - axis=axis, - act=act, - op_name=op_type) + return _elementwise_op_in_dygraph( + x, y, axis=axis, act=act, op_name=op_type + ) return _elementwise_op(LayerHelper(op_type, **locals())) @@ -1104,11 +1132,9 @@ def fmax(x, y, name=None): if in_dygraph_mode(): return _C_ops.fmax(x, y, axis) if _in_legacy_dygraph(): - return _elementwise_op_in_dygraph(x, - y, - axis=axis, - act=act, - op_name=op_type) + return _elementwise_op_in_dygraph( + x, y, axis=axis, act=act, op_name=op_type + ) return _elementwise_op(LayerHelper(op_type, **locals())) @@ -1174,11 +1200,9 @@ def fmin(x, y, name=None): if in_dygraph_mode(): return _C_ops.fmin(x, y, axis) if _in_legacy_dygraph(): - return _elementwise_op_in_dygraph(x, - y, - axis=axis, - act=act, - op_name=op_type) + return _elementwise_op_in_dygraph( + x, y, axis=axis, act=act, op_name=op_type + ) return _elementwise_op(LayerHelper(op_type, **locals())) @@ -1273,24 +1297,59 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None): if _in_legacy_dygraph(): if dtype_flag: - return _legacy_C_ops.reduce_sum(x, 'dim', axis, 'keep_dim', keepdim, - 'reduce_all', reduce_all_flag, - 'in_dtype', x.dtype, 'out_dtype', - dtype) + return _legacy_C_ops.reduce_sum( + x, + 'dim', + axis, + 'keep_dim', + keepdim, + 'reduce_all', + reduce_all_flag, + 'in_dtype', + x.dtype, + 'out_dtype', + dtype, + ) else: - return _legacy_C_ops.reduce_sum(x, 'dim', axis, 'keep_dim', keepdim, - 'reduce_all', reduce_all_flag) + return _legacy_C_ops.reduce_sum( + x, + 'dim', + axis, + 'keep_dim', + keepdim, + 'reduce_all', + reduce_all_flag, + ) attrs = {'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all_flag} if dtype_flag: attrs.update({'in_dtype': x.dtype, 'out_dtype': dtype}) - check_variable_and_dtype(x, 'x', [ - 'bool', 'float16', 'float32', 'float64', 'int16', 'int32', 'int64', - 'complex64', 'complex128', u'bool', u'float16', u'float32', u'float64', - u'int32', u'int64', u'complex64', u'complex128' - ], 'sum') + check_variable_and_dtype( + x, + 'x', + [ + 'bool', + 'float16', + 'float32', + 'float64', + 'int16', + 'int32', + 'int64', + 'complex64', + 'complex128', + u'bool', + u'float16', + u'float32', + u'float64', + u'int32', + u'int64', + u'complex64', + u'complex128', + ], + 'sum', + ) check_type(axis, 'axis', (int, list, tuple, type(None), Variable), 'sum') @@ -1299,10 +1358,9 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None): out = helper.create_variable_for_type_inference(dtype=dtype) else: out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='reduce_sum', - inputs={'X': x}, - outputs={'Out': out}, - attrs=attrs) + helper.append_op( + type='reduce_sum', inputs={'X': x}, outputs={'Out': out}, attrs=attrs + ) return out @@ -1353,8 +1411,9 @@ def nansum(x, axis=None, dtype=None, keepdim=False, name=None): out5 = paddle.nansum(y, axis=[1, 2]) # [8, 19] out6 = paddle.nansum(y, axis=[0, 1]) # [9, 18] """ - check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], - 'nansum') + check_variable_and_dtype( + x, 'x', ['float32', 'float64', 'int32', 'int64'], 'nansum' + ) check_type(axis, 'axis', (int, list, tuple, type(None)), 'nansum') zero_tensor = paddle.zeros_like(x) @@ -1419,16 +1478,17 @@ def nanmean(x, axis=None, keepdim=False, name=None): """ if isinstance(axis, int): axis = [axis] - check_variable_and_dtype(x, 'x/input', - ['uint16', 'float16', 'float32', 'float64'], - 'nanmean') + check_variable_and_dtype( + x, 'x/input', ['uint16', 'float16', 'float32', 'float64'], 'nanmean' + ) if axis is not None: check_type(axis, 'axis/dim', (int, list, tuple), 'nanmean') cnt = paddle.sum(~paddle.isnan(x), axis=axis, keepdim=keepdim) return paddle.divide( paddle.nansum(x, axis=axis, keepdim=keepdim, name=name), - cnt.astype(x.dtype)) + cnt.astype(x.dtype), + ) def count_nonzero(x, axis=None, keepdim=False, name=None): @@ -1485,8 +1545,9 @@ def count_nonzero(x, axis=None, keepdim=False, name=None): axis = [axis] dims = len(x.shape) for i in range(len(axis)): - if not isinstance(axis[i], int) or not (axis[i] < dims - and axis[i] >= -dims): + if not isinstance(axis[i], int) or not ( + axis[i] < dims and axis[i] >= -dims + ): raise ValueError( "Axis should be None, int, or a list, element should in range [-rank(x), rank(x))." ) @@ -1568,18 +1629,29 @@ def add_n(inputs, name=None): if isinstance(inputs, list) or isinstance(inputs, tuple): if len(inputs) > 0: for input in inputs: - check_variable_and_dtype(input, "inputs", \ - ['float16', 'float32', 'float64', 'int32', 'int64'], 'add_n') + check_variable_and_dtype( + input, + "inputs", + ['float16', 'float32', 'float64', 'int32', 'int64'], + 'add_n', + ) else: - check_variable_and_dtype(inputs, "inputs", \ - ['float16', 'float32', 'float64', 'int32', 'int64'], 'add_n') + check_variable_and_dtype( + inputs, + "inputs", + ['float16', 'float32', 'float64', 'int32', 'int64'], + 'add_n', + ) out = helper.create_variable_for_type_inference( - dtype=helper.input_dtype('inputs')) - helper.append_op(type='sum', - inputs={'X': inputs}, - outputs={'Out': out}, - attrs={'use_mkldnn': False}) + dtype=helper.input_dtype('inputs') + ) + helper.append_op( + type='sum', + inputs={'X': inputs}, + outputs={'Out': out}, + attrs={'use_mkldnn': False}, + ) return out @@ -1622,15 +1694,14 @@ def trunc(input, name=None): attrs = {} helper = LayerHelper("trunc", **locals()) - check_variable_and_dtype(input, 'X', - ['int32', 'int64', 'float32', 'float64'], - 'trunc') + check_variable_and_dtype( + input, 'X', ['int32', 'int64', 'float32', 'float64'], 'trunc' + ) out = helper.create_variable_for_type_inference(dtype=input.dtype) - helper.append_op(type="trunc", - inputs=inputs, - attrs=attrs, - outputs={"Out": out}) + helper.append_op( + type="trunc", inputs=inputs, attrs=attrs, outputs={"Out": out} + ) return out @@ -1709,8 +1780,9 @@ def mm(input, mat2, name=None): def __check_input(x, y): var_names = {'x': x, 'y': y} for name, val in var_names.items(): - check_variable_and_dtype(val, name, - ['float16', 'float32', 'float64'], 'mm') + check_variable_and_dtype( + val, name, ['float16', 'float32', 'float64'], 'mm' + ) x_shape = list(x.shape) y_shape = list(y.shape) if len(x_shape) == 1: @@ -1725,7 +1797,8 @@ def mm(input, mat2, name=None): "After performing an optional transpose, Input X's width should be " "equal to Y's width for multiplication " "prerequisites. But received X's shape: %s, Y's shape: %s\n" - % (x_shape, y_shape)) + % (x_shape, y_shape) + ) if len(y_shape) > 2 and len(x_shape) > 2: for i, dim_x in enumerate(x_shape[:-2]): @@ -1737,18 +1810,16 @@ def mm(input, mat2, name=None): "When the matrix is larger than 2 dimensions, the higher " "dimensional values of the two matrices need to be equal. " "But received x_shape[%d] != y_shape[%d]. X's shape: %s, " - "Y's shape: %s.\n" % (i, i, x_shape, y_shape)) + "Y's shape: %s.\n" % (i, i, x_shape, y_shape) + ) __check_input(input, mat2) helper = LayerHelper('mm', **locals()) out = helper.create_variable_for_type_inference(dtype=input.dtype) - helper.append_op(type='matmul_v2', - inputs={ - 'X': input, - 'Y': mat2 - }, - outputs={'Out': out}) + helper.append_op( + type='matmul_v2', inputs={'X': input, 'Y': mat2}, outputs={'Out': out} + ) return out @@ -1796,36 +1867,50 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None): y_shape = y.shape if not len(x_shape) == len(y_shape) == 2: raise ValueError( - "The dimention of x, y should be 2 but receive x's shape: {}, y's shape: {}" - .format(x_shape, y_shape)) + "The dimention of x, y should be 2 but receive x's shape: {}, y's shape: {}".format( + x_shape, y_shape + ) + ) if x_shape[1] != y_shape[0]: raise ValueError( - "The input Variable x's width must be equal with Variable y' height. But received x's shape = {}, y's shape = {}." - .format(x_shape, y_shape)) + "The input Variable x's width must be equal with Variable y' height. But received x's shape = {}, y's shape = {}.".format( + x_shape, y_shape + ) + ) if len(input_shape) == 2: if input_shape[0] != x_shape[0]: if input_shape[0] != 1: raise ValueError( - "When x's dimension[0] is not equal with input's dimension[0], input's dimension[0] must be 1 but got {}" - .format(input_shape[0])) + "When x's dimension[0] is not equal with input's dimension[0], input's dimension[0] must be 1 but got {}".format( + input_shape[0] + ) + ) if input_shape[1] != y_shape[1] and input_shape[1] != 1: raise ValueError( - "When y's dimension[1] is not equal with input's dimension[1], input's dimension[1] must be 1 but got {}" - .format(input_shape[1])) + "When y's dimension[1] is not equal with input's dimension[1], input's dimension[1] must be 1 but got {}".format( + input_shape[1] + ) + ) if input_shape[1] != y_shape[1]: if input_shape[1] != 1: raise ValueError( - "When y's dimension[1] is not equal with input's dimension[1], input's dimension[1] must be 1 but got {}" - .format(input_shape[1])) + "When y's dimension[1] is not equal with input's dimension[1], input's dimension[1] must be 1 but got {}".format( + input_shape[1] + ) + ) elif len(input_shape) == 1: if input_shape[0] not in (y_shape[1], 1): raise ValueError( - "The input's shape: {} is not broadcastable with [x.shape[0], y.shape[1]]: [{},{}]" - .format(input_shape, x_shape[0], y_shape[1])) + "The input's shape: {} is not broadcastable with [x.shape[0], y.shape[1]]: [{},{}]".format( + input_shape, x_shape[0], y_shape[1] + ) + ) else: raise ValueError( - "The dimention of input should be 2 or 1 but receive input's shape: {}" - .format(input_shape)) + "The dimention of input should be 2 or 1 but receive input's shape: {}".format( + input_shape + ) + ) if in_dygraph_mode(): return _C_ops.addmm(input, x, y, alpha, beta) @@ -1838,16 +1923,16 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None): attrs = {'Alpha': alpha, 'Beta': beta} helper = LayerHelper("addmm", **locals()) - check_variable_and_dtype(input, 'Input', ['float32', 'float64'], - 'addmm') + check_variable_and_dtype( + input, 'Input', ['float32', 'float64'], 'addmm' + ) check_variable_and_dtype(x, 'X', ['float32', 'float64'], 'addmm') check_variable_and_dtype(y, 'Y', ['float32', 'float64'], 'addmm') out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type="addmm", - inputs=inputs, - attrs=attrs, - outputs={"Out": out}) + helper.append_op( + type="addmm", inputs=inputs, attrs=attrs, outputs={"Out": out} + ) return out @@ -1890,19 +1975,24 @@ def renorm(x, p, axis, max_norm): if not axis < len(input_shape): raise ValueError( "the axis:{} should be less then the shape's size {}:{}".format( - axis, len(input_shape), input_shape)) + axis, len(input_shape), input_shape + ) + ) if not axis >= 0: if not axis >= -1 * len(input_shape): raise ValueError( - "the axis:{} should not be less than -1 * length of input_shape:{}" - .format(axis, -1 * len(input_shape))) + "the axis:{} should not be less than -1 * length of input_shape:{}".format( + axis, -1 * len(input_shape) + ) + ) axis = axis + len(input_shape) if in_dygraph_mode(): out = _C_ops.renorm(x, p, axis, max_norm) return out elif _in_legacy_dygraph(): - out = _legacy_C_ops.renorm(x, 'p', p, 'axis', axis, 'max_norm', - max_norm) + out = _legacy_C_ops.renorm( + x, 'p', p, 'axis', axis, 'max_norm', max_norm + ) return out inputs = {'X': x} @@ -1911,10 +2001,9 @@ def renorm(x, p, axis, max_norm): helper = LayerHelper("renorm", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type="renorm", - inputs=inputs, - attrs=attrs, - outputs={"Out": out}) + helper.append_op( + type="renorm", inputs=inputs, attrs=attrs, outputs={"Out": out} + ) return out @@ -1965,9 +2054,9 @@ def inner(x, y, name=None): def __check_input(x, y): var_names = {'x': x, 'y': y} for name, val in var_names.items(): - check_variable_and_dtype(val, name, - ['float16', 'float32', 'float64'], - 'inner') + check_variable_and_dtype( + val, name, ['float16', 'float32', 'float64'], 'inner' + ) x_shape = list(xshape) y_shape = list(yshape) @@ -1978,18 +2067,16 @@ def inner(x, y, name=None): "After performing an optional transpose, Input X's last dim should be " "equal to Y's last dim for multiplication " "prerequisites. But received X's shape: %s, Y's shape: %s\n" - % (x_shape, y_shape)) + % (x_shape, y_shape) + ) __check_input(nx, ny) helper = LayerHelper('inner', **locals()) out = helper.create_variable_for_type_inference(dtype=nx.dtype) - helper.append_op(type='matmul_v2', - inputs={ - 'X': nx, - 'Y': ny.T - }, - outputs={'Out': out}) + helper.append_op( + type='matmul_v2', inputs={'X': nx, 'Y': ny.T}, outputs={'Out': out} + ) return out.reshape(dstshape) @@ -2033,19 +2120,17 @@ def outer(x, y, name=None): def __check_input(x, y): var_names = {'x': x, 'y': y} for name, val in var_names.items(): - check_variable_and_dtype(val, name, - ['float16', 'float32', 'float64'], 'inner') + check_variable_and_dtype( + val, name, ['float16', 'float32', 'float64'], 'inner' + ) __check_input(nx, ny) helper = LayerHelper('outer', **locals()) out = helper.create_variable_for_type_inference(dtype=nx.dtype) - helper.append_op(type='matmul_v2', - inputs={ - 'X': nx, - 'Y': ny - }, - outputs={'Out': out}) + helper.append_op( + type='matmul_v2', inputs={'X': nx, 'Y': ny}, outputs={'Out': out} + ) return out @@ -2093,9 +2178,11 @@ def logsumexp(x, axis=None, keepdim=False, name=None): """ if isinstance(axis, int): axis = [axis] - reduce_all = True if axis is None \ - or len(axis)==0 \ - or len(axis) == len(x.shape) else False + reduce_all = ( + True + if axis is None or len(axis) == 0 or len(axis) == len(x.shape) + else False + ) if axis is None or len(axis) == 0: axis = [0] @@ -2104,18 +2191,18 @@ def logsumexp(x, axis=None, keepdim=False, name=None): axis = range(len(x.shape)) return _C_ops.logsumexp(x, axis, keepdim, reduce_all) if _in_legacy_dygraph(): - return _legacy_C_ops.logsumexp(x, 'axis', axis, 'keepdim', keepdim, - 'reduce_all', reduce_all) + return _legacy_C_ops.logsumexp( + x, 'axis', axis, 'keepdim', keepdim, 'reduce_all', reduce_all + ) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'logsumexp') helper = LayerHelper('logsumexp', **locals()) attrs = {'axis': axis, 'keepdim': keepdim, 'reduce_all': reduce_all} out = helper.create_variable_for_type_inference(x.dtype) - helper.append_op(type='logsumexp', - inputs={'X': x}, - outputs={'Out': out}, - attrs=attrs) + helper.append_op( + type='logsumexp', inputs={'X': x}, outputs={'Out': out}, attrs=attrs + ) return out @@ -2157,14 +2244,15 @@ def inverse(x, name=None): raise ValueError( "The input of inverse is expected to be a Tensor whose number " "of dimensions is no less than 2. But reviced: %d, " - "x's shape: %s." % (len(x.shape), x.shape)) + "x's shape: %s." % (len(x.shape), x.shape) + ) _check_input(x) helper = LayerHelper('inverse', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='inverse', - inputs={'Input': [x]}, - outputs={'Output': [out]}) + helper.append_op( + type='inverse', inputs={'Input': [x]}, outputs={'Output': [out]} + ) return out @@ -2180,8 +2268,10 @@ def _get_reduce_axis(axis): axis = [axis] else: raise TypeError( - "The type of axis must be int, list or tuple, but received {}". - format(type(axis))) + "The type of axis must be int, list or tuple, but received {}".format( + type(axis) + ) + ) reduce_all = True if axis == None or axis == [] else False if axis == None: axis = [] @@ -2206,8 +2296,10 @@ def _get_reduce_all_value(axis): axis = [axis] else: raise TypeError( - "The type of axis must be int, list or tuple, but received {}". - format(type(axis))) + "The type of axis must be int, list or tuple, but received {}".format( + type(axis) + ) + ) reduce_all = True if axis == None or axis == [] else False axis = axis if axis != None and axis != [] else [0] @@ -2296,24 +2388,24 @@ def max(x, axis=None, keepdim=False, name=None): if in_dygraph_mode(): return _C_ops.max(x, axis, keepdim) if _in_legacy_dygraph(): - return _legacy_C_ops.reduce_max(x, 'dim', axis, 'keep_dim', keepdim, - 'reduce_all', reduce_all) + return _legacy_C_ops.reduce_max( + x, 'dim', axis, 'keep_dim', keepdim, 'reduce_all', reduce_all + ) helper = LayerHelper('max', **locals()) - check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], - 'max') + check_variable_and_dtype( + x, 'x', ['float32', 'float64', 'int32', 'int64'], 'max' + ) if not isinstance(axis, Variable) and utils._contain_var(axis): axis = utils._convert_to_tensor_list(axis) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='reduce_max', - inputs={'X': x}, - outputs={'Out': out}, - attrs={ - 'dim': axis, - 'keep_dim': keepdim, - 'reduce_all': reduce_all - }) + helper.append_op( + type='reduce_max', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}, + ) return out @@ -2399,24 +2491,24 @@ def min(x, axis=None, keepdim=False, name=None): return _C_ops.min(x, axis, keepdim) if _in_legacy_dygraph(): - return _legacy_C_ops.reduce_min(x, 'dim', axis, 'keep_dim', keepdim, - 'reduce_all', reduce_all) + return _legacy_C_ops.reduce_min( + x, 'dim', axis, 'keep_dim', keepdim, 'reduce_all', reduce_all + ) helper = LayerHelper('min', **locals()) - check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], - 'min') + check_variable_and_dtype( + x, 'x', ['float32', 'float64', 'int32', 'int64'], 'min' + ) if not isinstance(axis, Variable) and utils._contain_var(axis): axis = utils._convert_to_tensor_list(axis) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='reduce_min', - inputs={'X': x}, - outputs={'Out': out}, - attrs={ - 'dim': axis, - 'keep_dim': keepdim, - 'reduce_all': reduce_all - }) + helper.append_op( + type='reduce_min', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}, + ) return out @@ -2514,22 +2606,22 @@ def amax(x, axis=None, keepdim=False, name=None): if in_dygraph_mode(): return _C_ops.amax(x, axis, keepdim) if _in_legacy_dygraph(): - return _legacy_C_ops.reduce_amax(x, 'dim', axis, 'keep_dim', keepdim, - 'reduce_all', reduce_all) + return _legacy_C_ops.reduce_amax( + x, 'dim', axis, 'keep_dim', keepdim, 'reduce_all', reduce_all + ) helper = LayerHelper('amax', **locals()) - check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], - 'amax') + check_variable_and_dtype( + x, 'x', ['float32', 'float64', 'int32', 'int64'], 'amax' + ) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='reduce_amax', - inputs={'X': x}, - outputs={'Out': out}, - attrs={ - 'dim': axis, - 'keep_dim': keepdim, - 'reduce_all': reduce_all - }) + helper.append_op( + type='reduce_amax', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}, + ) return out @@ -2628,21 +2720,21 @@ def amin(x, axis=None, keepdim=False, name=None): if in_dygraph_mode(): return _C_ops.amin(x, axis, keepdim) elif _in_legacy_dygraph(): - return _legacy_C_ops.reduce_amin(x, 'dim', axis, 'keep_dim', keepdim, - 'reduce_all', reduce_all) + return _legacy_C_ops.reduce_amin( + x, 'dim', axis, 'keep_dim', keepdim, 'reduce_all', reduce_all + ) helper = LayerHelper('amin', **locals()) - check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], - 'amin') + check_variable_and_dtype( + x, 'x', ['float32', 'float64', 'int32', 'int64'], 'amin' + ) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='reduce_amin', - inputs={'X': x}, - outputs={'Out': out}, - attrs={ - 'dim': axis, - 'keep_dim': keepdim, - 'reduce_all': reduce_all - }) + helper.append_op( + type='reduce_amin', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}, + ) return out @@ -2856,16 +2948,27 @@ def clip(x, min=None, max=None, name=None): if min is not None: check_type(min, 'min', (float, int, Variable), 'clip') if isinstance(min, Variable): - check_dtype(min.dtype, 'min', ['float32', 'float64', 'int32'], - 'clip', '(When the type of min in clip is Variable.)') + check_dtype( + min.dtype, + 'min', + ['float32', 'float64', 'int32'], + 'clip', + '(When the type of min in clip is Variable.)', + ) if max is not None: check_type(max, 'max', (float, int, Variable), 'clip') if isinstance(max, Variable): - check_dtype(max.dtype, 'max', ['float32', 'float64', 'int32'], - 'clip', '(When the type of max in clip is Variable.)') + check_dtype( + max.dtype, + 'max', + ['float32', 'float64', 'int32'], + 'clip', + '(When the type of max in clip is Variable.)', + ) - check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], - 'clip') + check_variable_and_dtype( + x, 'x', ['float32', 'float64', 'int32', 'int64'], 'clip' + ) inputs = {'X': x} attrs = {'min': min_, 'max': max_} @@ -2884,11 +2987,11 @@ def clip(x, min=None, max=None, name=None): helper = LayerHelper('clip', **locals()) output = helper.create_variable_for_type_inference( - dtype=helper.input_dtype('x')) - helper.append_op(type='clip', - inputs=inputs, - outputs={'Out': [output]}, - attrs=attrs) + dtype=helper.input_dtype('x') + ) + helper.append_op( + type='clip', inputs=inputs, outputs={'Out': [output]}, attrs=attrs + ) return output @@ -2957,52 +3060,56 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None): """ def __check_input(x, offset, axis1, axis2): - check_dtype(x.dtype, 'Input', - ['int32', 'int64', 'float16', 'float32', 'float64'], - 'trace') + check_dtype( + x.dtype, + 'Input', + ['int32', 'int64', 'float16', 'float32', 'float64'], + 'trace', + ) input_shape = list(x.shape) - assert len(input_shape) >= 2, \ - "The x must be at least 2-dimensional, " \ - "But received Input x's dimensional: %s.\n" % \ - len(input_shape) + assert len(input_shape) >= 2, ( + "The x must be at least 2-dimensional, " + "But received Input x's dimensional: %s.\n" % len(input_shape) + ) axis1_ = axis1 if axis1 >= 0 else len(input_shape) + axis1 axis2_ = axis2 if axis2 >= 0 else len(input_shape) + axis2 - assert ((0 <= axis1_) and (axis1_ < len(input_shape))), \ - "The argument axis1 is out of range (expected to be in range of [%d, %d], but got %d).\n" \ + assert (0 <= axis1_) and (axis1_ < len(input_shape)), ( + "The argument axis1 is out of range (expected to be in range of [%d, %d], but got %d).\n" % (-(len(input_shape)), len(input_shape) - 1, axis1) + ) - assert ((0 <= axis2_) and (axis2_ < len(input_shape))), \ - "The argument axis2 is out of range (expected to be in range of [%d, %d], but got %d).\n" \ + assert (0 <= axis2_) and (axis2_ < len(input_shape)), ( + "The argument axis2 is out of range (expected to be in range of [%d, %d], but got %d).\n" % (-(len(input_shape)), len(input_shape) - 1, axis2) + ) - - assert axis1_ != axis2_, \ - "axis1 and axis2 cannot be the same axis." \ - "But received axis1 = %d, axis2 = %d\n"%(axis1, axis2) + assert axis1_ != axis2_, ( + "axis1 and axis2 cannot be the same axis." + "But received axis1 = %d, axis2 = %d\n" % (axis1, axis2) + ) if in_dygraph_mode(): return _C_ops.trace(x, offset, axis1, axis2) if _in_legacy_dygraph(): - return _legacy_C_ops.trace(x, 'offset', offset, 'axis1', axis1, 'axis2', - axis2) + return _legacy_C_ops.trace( + x, 'offset', offset, 'axis1', axis1, 'axis2', axis2 + ) __check_input(x, offset, axis1, axis2) helper = LayerHelper('trace', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='trace', - inputs={'Input': [x]}, - attrs={ - 'offset': offset, - 'axis1': axis1, - 'axis2': axis2 - }, - outputs={'Out': [out]}) + helper.append_op( + type='trace', + inputs={'Input': [x]}, + attrs={'offset': offset, 'axis1': axis1, 'axis2': axis2}, + outputs={'Out': [out]}, + ) return out @@ -3075,47 +3182,52 @@ def diagonal(x, offset=0, axis1=0, axis2=1, name=None): return _C_ops.diagonal(x, offset, axis1, axis2) else: if _in_legacy_dygraph(): - return _legacy_C_ops.diagonal(x, 'offset', offset, 'axis1', axis1, - 'axis2', axis2) + return _legacy_C_ops.diagonal( + x, 'offset', offset, 'axis1', axis1, 'axis2', axis2 + ) def __check_input(x, offset, axis1, axis2): - check_dtype(x.dtype, 'Input', - ['bool', 'int32', 'int64', 'float16', 'float32', 'float64'], - 'diagonal') + check_dtype( + x.dtype, + 'Input', + ['bool', 'int32', 'int64', 'float16', 'float32', 'float64'], + 'diagonal', + ) input_shape = list(x.shape) - assert len(input_shape) >= 2, \ - "The x must be at least 2-dimensional, " \ - "But received Input x's dimensional: %s.\n" % \ - len(input_shape) + assert len(input_shape) >= 2, ( + "The x must be at least 2-dimensional, " + "But received Input x's dimensional: %s.\n" % len(input_shape) + ) axis1_ = axis1 if axis1 >= 0 else len(input_shape) + axis1 axis2_ = axis2 if axis2 >= 0 else len(input_shape) + axis2 - assert axis1_ < len(input_shape), \ - "The argument axis1 is out of range (expected to be in range of [%d, %d], but got %d).\n" \ + assert axis1_ < len(input_shape), ( + "The argument axis1 is out of range (expected to be in range of [%d, %d], but got %d).\n" % (-(len(input_shape)), len(input_shape) - 1, axis1) + ) - assert axis2_ < len(input_shape), \ - "The argument axis2 is out of range (expected to be in range of [%d, %d], but got %d).\n" \ + assert axis2_ < len(input_shape), ( + "The argument axis2 is out of range (expected to be in range of [%d, %d], but got %d).\n" % (-(len(input_shape)), len(input_shape) - 1, axis2) + ) - assert axis1_ != axis2_, \ - "axis1 and axis2 cannot be the same axis." \ - "But received axis1 = %d, axis2 = %d\n"%(axis1, axis2) + assert axis1_ != axis2_, ( + "axis1 and axis2 cannot be the same axis." + "But received axis1 = %d, axis2 = %d\n" % (axis1, axis2) + ) __check_input(x, offset, axis1, axis2) helper = LayerHelper('diagonal', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='diagonal', - inputs={'Input': [x]}, - attrs={ - 'offset': offset, - 'axis1': axis1, - 'axis2': axis2 - }, - outputs={'Out': [out]}) + helper.append_op( + type='diagonal', + inputs={'Input': [x]}, + attrs={'offset': offset, 'axis1': axis1, 'axis2': axis2}, + outputs={'Out': [out]}, + ) return out @@ -3154,9 +3266,11 @@ def kron(x, y, name=None): return _C_ops.kron(x, y) helper = LayerHelper('kron', **locals()) check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron') + x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron' + ) check_variable_and_dtype( - y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron') + y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron' + ) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type="kron", inputs={"X": x, "Y": y}, outputs={"Out": out}) @@ -3212,7 +3326,8 @@ def cumsum(x, axis=None, dtype=None, name=None): x = cast(x, dtype) if in_dygraph_mode(): - if axis is None: axis = -1 + if axis is None: + axis = -1 return _C_ops.cumsum(x, axis, flatten, False, False) if _in_legacy_dygraph(): if axis is None: @@ -3286,27 +3401,29 @@ def logcumsumexp(x, axis=None, dtype=None, name=None): x = cast(x, dtype) if in_dygraph_mode(): - if axis is None: axis = -1 + if axis is None: + axis = -1 return _C_ops.logcumsumexp(x, axis, flatten, False, False) if _in_legacy_dygraph(): if axis is None: return _legacy_C_ops.logcumsumexp(x, 'flatten', flatten) else: - return _legacy_C_ops.logcumsumexp(x, 'axis', axis, 'flatten', - flatten) + return _legacy_C_ops.logcumsumexp( + x, 'axis', axis, 'flatten', flatten + ) - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], - "logcumsumexp") + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], "logcumsumexp" + ) helper = LayerHelper('logcumsumexp', **locals()) out = helper.create_variable_for_type_inference(x.dtype) - helper.append_op(type='logcumsumexp', - inputs={'X': x}, - outputs={'Out': out}, - attrs={ - 'axis': axis, - 'flatten': flatten - }) + helper.append_op( + type='logcumsumexp', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'axis': axis, 'flatten': flatten}, + ) return out @@ -3366,17 +3483,21 @@ def cumprod(x, dim=None, dtype=None, name=None): return _legacy_C_ops.cumprod(x, 'dim', dim) check_variable_and_dtype( - x, "x", + x, + "x", ['complex64', 'complex128', 'float32', 'float64', 'int32', 'int64'], - 'cumprod') + 'cumprod', + ) check_type(dim, 'dim', int, 'cumprod') helper = LayerHelper('cumprod', **locals()) out = helper.create_variable_for_type_inference(x.dtype) - helper.append_op(type='cumprod', - inputs={'X': x}, - outputs={'Out': out}, - attrs={'dim': dim}) + helper.append_op( + type='cumprod', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'dim': dim}, + ) return out @@ -3407,7 +3528,8 @@ def isfinite(x, name=None): return _legacy_C_ops.isfinite_v2(x) helper = LayerHelper("isfinite_v2", **locals()) check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isfinite') + x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isfinite' + ) out = helper.create_variable_for_type_inference('bool') helper.append_op(type="isfinite_v2", inputs={"X": x}, outputs={"Out": out}) return out @@ -3440,7 +3562,8 @@ def isinf(x, name=None): return _legacy_C_ops.isinf_v2(x) helper = LayerHelper("isinf_v2", **locals()) check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isinf') + x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isinf' + ) out = helper.create_variable_for_type_inference(dtype='bool') helper.append_op(type="isinf_v2", inputs={"X": x}, outputs={"Out": out}) return out @@ -3474,7 +3597,8 @@ def isnan(x, name=None): return _legacy_C_ops.isnan_v2(x) helper = LayerHelper("isnan_v2", **locals()) check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isnan') + x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isnan' + ) out = helper.create_variable_for_type_inference(dtype='bool') helper.append_op(type="isnan_v2", inputs={"X": x}, outputs={"Out": out}) return out @@ -3535,8 +3659,9 @@ def prod(x, axis=None, keepdim=False, dtype=None, name=None): """ if dtype is not None: - check_dtype(dtype, 'dtype', ['float32', 'float64', 'int32', 'int64'], - 'prod') + check_dtype( + dtype, 'dtype', ['float32', 'float64', 'int32', 'int64'], 'prod' + ) if x.dtype != convert_np_dtype_to_dtype_(dtype): x = cast(x, dtype) @@ -3551,35 +3676,39 @@ def prod(x, axis=None, keepdim=False, dtype=None, name=None): dim = [dim] else: raise TypeError( - "The type of axis must be int, list or tuple, but received {}" - .format(type(dim))) + "The type of axis must be int, list or tuple, but received {}".format( + type(dim) + ) + ) - reduce_all = True if dim is None or len(dim) == 0 or len(dim) == len( - x.shape) else False + reduce_all = ( + True + if dim is None or len(dim) == 0 or len(dim) == len(x.shape) + else False + ) if dim is None or len(dim) == 0: dim = [0] if in_dygraph_mode(): return _C_ops.reduce_prod(x, dim, keepdim, reduce_all) if _in_legacy_dygraph(): - return _legacy_C_ops.reduce_prod(x, 'dim', dim, 'keep_dim', keepdim, - 'reduce_all', reduce_all) + return _legacy_C_ops.reduce_prod( + x, 'dim', dim, 'keep_dim', keepdim, 'reduce_all', reduce_all + ) helper = LayerHelper('reduce_prod', **locals()) - check_variable_and_dtype(x, 'x/input', - ['float32', 'float64', 'int32', 'int64'], - 'reduce_prod') + check_variable_and_dtype( + x, 'x/input', ['float32', 'float64', 'int32', 'int64'], 'reduce_prod' + ) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) if not isinstance(dim, Variable) and utils._contain_var(dim): dim = utils._convert_to_tensor_list(dim) - helper.append_op(type='reduce_prod', - inputs={'X': x}, - outputs={'Out': out}, - attrs={ - 'dim': dim, - 'keep_dim': keepdim, - 'reduce_all': reduce_all - }) + helper.append_op( + type='reduce_prod', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'dim': dim, 'keep_dim': keepdim, 'reduce_all': reduce_all}, + ) return out @@ -3697,13 +3826,16 @@ def increment(x, value=1.0, name=None): if _in_legacy_dygraph(): return _legacy_C_ops.increment(x, 'step', value) - check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], - 'increment') + check_variable_and_dtype( + x, 'x', ['float32', 'float64', 'int32', 'int64'], 'increment' + ) helper = LayerHelper("increment", **locals()) - helper.append_op(type='increment', - inputs={'X': [x]}, - outputs={'Out': [x]}, - attrs={'step': float(value)}) + helper.append_op( + type='increment', + inputs={'X': [x]}, + outputs={'Out': [x]}, + attrs={'step': float(value)}, + ) return x @@ -3774,13 +3906,14 @@ def all(x, axis=None, keepdim=False, name=None): if _in_legacy_dygraph(): axis = axis if axis != None and axis != [] else [0] - return _legacy_C_ops.reduce_all(x, 'dim', axis, 'keep_dim', keepdim, - 'reduce_all', reduce_all_flag) + return _legacy_C_ops.reduce_all( + x, 'dim', axis, 'keep_dim', keepdim, 'reduce_all', reduce_all_flag + ) attrs = { 'dim': axis if axis != None and axis != [] and axis != () else [0], 'keep_dim': keepdim, - 'reduce_all': reduce_all_flag + 'reduce_all': reduce_all_flag, } check_variable_and_dtype(x, 'x', ['bool'], 'all') @@ -3788,10 +3921,9 @@ def all(x, axis=None, keepdim=False, name=None): helper = LayerHelper('all', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='reduce_all', - inputs={'X': x}, - outputs={'Out': out}, - attrs=attrs) + helper.append_op( + type='reduce_all', inputs={'X': x}, outputs={'Out': out}, attrs=attrs + ) return out @@ -3863,13 +3995,14 @@ def any(x, axis=None, keepdim=False, name=None): if _in_legacy_dygraph(): axis = axis if axis != None and axis != [] else [0] - return _legacy_C_ops.reduce_any(x, 'dim', axis, 'keep_dim', keepdim, - 'reduce_all', reduce_all_flag) + return _legacy_C_ops.reduce_any( + x, 'dim', axis, 'keep_dim', keepdim, 'reduce_all', reduce_all_flag + ) attrs = { 'dim': axis if axis != None and axis != [] and axis != () else [0], 'keep_dim': keepdim, - 'reduce_all': reduce_all_flag + 'reduce_all': reduce_all_flag, } check_variable_and_dtype(x, 'x', ['bool'], 'any') @@ -3878,10 +4011,9 @@ def any(x, axis=None, keepdim=False, name=None): helper = LayerHelper('any', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='reduce_any', - inputs={'X': x}, - outputs={'Out': out}, - attrs=attrs) + helper.append_op( + type='reduce_any', inputs={'X': x}, outputs={'Out': out}, attrs=attrs + ) return out @@ -3948,9 +4080,11 @@ def conj(x, name=None): return _legacy_C_ops.conj(x) check_variable_and_dtype( - x, "x", + x, + "x", ['complex64', 'complex128', 'float32', 'float64', 'int32', 'int64'], - 'conj') + 'conj', + ) helper = LayerHelper('conj', **locals()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) @@ -4057,12 +4191,9 @@ def neg(x, name=None): # [0.4 0.2 -0.1 -0.3] """ - return scale(x, - scale=-1.0, - bias=0.0, - bias_after_scale=True, - act=None, - name=name) + return scale( + x, scale=-1.0, bias=0.0, bias_after_scale=True, act=None, name=name + ) def atan2(x, y, name=None): @@ -4115,11 +4246,17 @@ def atan2(x, y, name=None): return _legacy_C_ops.atan2(x, y) else: check_variable_and_dtype( - x, 'x', ['int32', 'int64', 'float16', 'float32', 'float64'], - 'atan2') + x, + 'x', + ['int32', 'int64', 'float16', 'float32', 'float64'], + 'atan2', + ) check_variable_and_dtype( - y, 'y', ['int32', 'int64', 'float16', 'float32', 'float64'], - 'atan2') + y, + 'y', + ['int32', 'int64', 'float16', 'float32', 'float64'], + 'atan2', + ) helper = LayerHelper('atan2', **locals()) inputs = {'X1': x, 'X2': y} @@ -4178,10 +4315,9 @@ def logit(x, eps=None, name=None): check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'logit') helper = LayerHelper("logit", **locals()) out = helper.create_variable_for_type_inference(x.dtype) - helper.append_op(type='logit', - inputs={'X': x}, - outputs={'Out': out}, - attrs={'eps': eps}) + helper.append_op( + type='logit', inputs={'X': x}, outputs={'Out': out}, attrs={'eps': eps} + ) return out @@ -4254,8 +4390,10 @@ def lerp_(x, y, weight, name=None): out_shape = broadcast_shape(out_shape, weight.shape) if out_shape != x.shape: raise ValueError( - "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation." - .format(out_shape, x.shape)) + "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format( + out_shape, x.shape + ) + ) if in_dygraph_mode(): return _C_ops.lerp_(x, y, weight) return _legacy_C_ops.lerp_(x, y, weight) @@ -4363,26 +4501,28 @@ def rad2deg(x, name=None): x = cast(x, dtype="float32") return _legacy_C_ops.scale(x, 'scale', rad2deg_scale) else: - check_variable_and_dtype(x, 'x', - ['int32', 'int64', 'float32', 'float64'], - 'rad2deg') + check_variable_and_dtype( + x, 'x', ['int32', 'int64', 'float32', 'float64'], 'rad2deg' + ) helper = LayerHelper('rad2deg', **locals()) out_cast = x if convert_dtype(x.dtype) in ['int32', 'int64']: out_cast = helper.create_variable_for_type_inference( - dtype=paddle.float32) - helper.append_op(type='cast', - inputs={'X': x}, - outputs={'Out': out_cast}, - attrs={ - 'in_dtype': x.dtype, - 'out_dtype': paddle.float32 - }) + dtype=paddle.float32 + ) + helper.append_op( + type='cast', + inputs={'X': x}, + outputs={'Out': out_cast}, + attrs={'in_dtype': x.dtype, 'out_dtype': paddle.float32}, + ) out = helper.create_variable_for_type_inference(dtype=out_cast.dtype) - helper.append_op(type='scale', - inputs={'X': out_cast}, - outputs={'Out': out}, - attrs={'scale': rad2deg_scale}) + helper.append_op( + type='scale', + inputs={'X': out_cast}, + outputs={'Out': out}, + attrs={'scale': rad2deg_scale}, + ) return out @@ -4429,26 +4569,28 @@ def deg2rad(x, name=None): x = cast(x, dtype="float32") return _legacy_C_ops.scale(x, 'scale', deg2rad_scale) else: - check_variable_and_dtype(x, 'x', - ['int32', 'int64', 'float32', 'float64'], - 'deg2rad') + check_variable_and_dtype( + x, 'x', ['int32', 'int64', 'float32', 'float64'], 'deg2rad' + ) helper = LayerHelper('deg2rad', **locals()) out_cast = x if convert_dtype(x.dtype) in ['int32', 'int64']: out_cast = helper.create_variable_for_type_inference( - dtype=paddle.float32) - helper.append_op(type='cast', - inputs={'X': x}, - outputs={'Out': out_cast}, - attrs={ - 'in_dtype': x.dtype, - 'out_dtype': paddle.float32 - }) + dtype=paddle.float32 + ) + helper.append_op( + type='cast', + inputs={'X': x}, + outputs={'Out': out_cast}, + attrs={'in_dtype': x.dtype, 'out_dtype': paddle.float32}, + ) out = helper.create_variable_for_type_inference(dtype=out_cast.dtype) - helper.append_op(type='scale', - inputs={'X': out_cast}, - outputs={'Out': out}, - attrs={'scale': deg2rad_scale}) + helper.append_op( + type='scale', + inputs={'X': out_cast}, + outputs={'Out': out}, + attrs={'scale': deg2rad_scale}, + ) return out @@ -4513,11 +4655,16 @@ def gcd(x, y, name=None): # paddle.mod will raise an error when any element of y is 0. To avoid # that, we change those zeros to ones. Their values don't matter because # they won't be used. - y_not_equal_0 = (y != 0) + y_not_equal_0 = y != 0 y_safe = paddle.where(y_not_equal_0, y, paddle.ones(y.shape, y.dtype)) - x, y = (paddle.where(y_not_equal_0, y, x), - paddle.where(y_not_equal_0, paddle.mod(x, y_safe), - paddle.zeros(y.shape, y.dtype))) + x, y = ( + paddle.where(y_not_equal_0, y, x), + paddle.where( + y_not_equal_0, + paddle.mod(x, y_safe), + paddle.zeros(y.shape, y.dtype), + ), + ) return (paddle.where(x < y, y, x), paddle.where(x < y, x, y)) if paddle.in_dynamic_mode(): @@ -4586,8 +4733,9 @@ def lcm(x, y, name=None): # they won't be used. d_equal_0 = paddle.equal(d, 0) d_safe = paddle.where(d_equal_0, paddle.ones(d.shape, d.dtype), d) - out = paddle.where(d_equal_0, paddle.zeros(d.shape, d.dtype), - paddle.abs(x * y) // d_safe) + out = paddle.where( + d_equal_0, paddle.zeros(d.shape, d.dtype), paddle.abs(x * y) // d_safe + ) return out @@ -4682,14 +4830,16 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None): attrs_1 += ('starts', starts_1) ends_1 = [dim_len - 1] attrs_1 += ('ends', ends_1) - input_front = _C_ops.slice(new_input, axes, starts_1, ends_1, - infer_flags, []) + input_front = _C_ops.slice( + new_input, axes, starts_1, ends_1, infer_flags, [] + ) starts_2 = [1] attrs_2 += ('starts', starts_2) ends_2 = [dim_len] attrs_2 += ('ends', ends_2) - input_back = _C_ops.slice(new_input, axes, starts_2, ends_2, - infer_flags, []) + input_back = _C_ops.slice( + new_input, axes, starts_2, ends_2, infer_flags, [] + ) if x.dtype == paddle.bool: return _C_ops.logical_xor(input_back, input_front) @@ -4722,14 +4872,34 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None): attrs_1 += ('starts', starts_1) ends_1 = [dim_len - 1] attrs_1 += ('ends', ends_1) - input_front = _legacy_C_ops.slice(new_input, None, None, None, None, 'axes', axes, \ - 'infer_flags', infer_flags, *attrs_1) + input_front = _legacy_C_ops.slice( + new_input, + None, + None, + None, + None, + 'axes', + axes, + 'infer_flags', + infer_flags, + *attrs_1 + ) starts_2 = [1] attrs_2 += ('starts', starts_2) ends_2 = [dim_len] attrs_2 += ('ends', ends_2) - input_back = _legacy_C_ops.slice(new_input, None, None, None, None, 'axes', axes, \ - 'infer_flags', infer_flags, *attrs_2) + input_back = _legacy_C_ops.slice( + new_input, + None, + None, + None, + None, + 'axes', + axes, + 'infer_flags', + infer_flags, + *attrs_2 + ) if x.dtype == paddle.bool: return _legacy_C_ops.logical_xor(input_back, input_front) @@ -4737,7 +4907,8 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None): return elementwise_sub(input_back, input_front, axis=axis) else: check_variable_and_dtype( - x, 'x', ['float32', 'float64', 'bool', 'int32', 'int64'], 'diff') + x, 'x', ['float32', 'float64', 'bool', 'int32', 'int64'], 'diff' + ) check_type(axis, 'axis', (int), 'diff') helper = LayerHelper('diff', **locals()) has_pend = False @@ -4754,10 +4925,12 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None): if has_pend: new_input = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='concat', - inputs={'X': input_list}, - outputs={'Out': [new_input]}, - attrs={'axis': axis}) + helper.append_op( + type='concat', + inputs={'X': input_list}, + outputs={'Out': [new_input]}, + attrs={'axis': axis}, + ) else: new_input = x @@ -4768,29 +4941,32 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None): attrs_1['starts'] = starts_1 attrs_1['ends'] = ends_1 input_front = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='slice', - inputs={'Input': new_input}, - attrs=attrs_1, - outputs={'Out': input_front}) + helper.append_op( + type='slice', + inputs={'Input': new_input}, + attrs=attrs_1, + outputs={'Out': input_front}, + ) attrs_2 = {'axes': axes} starts_2 = [1] ends_2 = [dim_len] attrs_2['starts'] = starts_2 attrs_2['ends'] = ends_2 input_back = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='slice', - inputs={'Input': new_input}, - attrs=attrs_2, - outputs={'Out': input_back}) + helper.append_op( + type='slice', + inputs={'Input': new_input}, + attrs=attrs_2, + outputs={'Out': input_back}, + ) if dtype == paddle.bool: out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='logical_xor', - inputs={ - "X": input_back, - "Y": input_front - }, - outputs={"Out": out}) + helper.append_op( + type='logical_xor', + inputs={"X": input_back, "Y": input_front}, + outputs={"Out": out}, + ) else: out = elementwise_sub(input_back, input_front, axis=axis) @@ -4843,14 +5019,15 @@ def angle(x, name=None): elif paddle.in_dynamic_mode(): return _legacy_C_ops.angle(x) - check_variable_and_dtype(x, 'x', - ['float32', 'float64', 'complex64', 'complex128'], - 'angle') + check_variable_and_dtype( + x, 'x', ['float32', 'float64', 'complex64', 'complex128'], 'angle' + ) op_type = "angle" helper = LayerHelper(op_type, **locals()) inputs = {"X": x} out = helper.create_variable_for_type_inference( - dtype=_complex_to_real_dtype(x.dtype)) + dtype=_complex_to_real_dtype(x.dtype) + ) outputs = {"Out": out} helper.append_op(type=op_type, inputs=inputs, outputs=outputs) return out @@ -4899,11 +5076,9 @@ def heaviside(x, y, name=None): axis = -1 act = None if _non_static_mode(): - return _elementwise_op_in_dygraph(x, - y, - axis=axis, - act=act, - op_name=op_type) + return _elementwise_op_in_dygraph( + x, y, axis=axis, act=act, op_name=op_type + ) return _elementwise_op(LayerHelper(op_type, **locals())) @@ -4935,35 +5110,37 @@ def frac(x, name=None): axis = -1 act = None if x.dtype not in [ - paddle.int32, paddle.int64, paddle.float32, paddle.float64 + paddle.int32, + paddle.int64, + paddle.float32, + paddle.float64, ]: raise TypeError( - "The data type of input must be one of ['int32', 'int64', 'float32', 'float64'], but got {}" - .format(x.dtype)) + "The data type of input must be one of ['int32', 'int64', 'float32', 'float64'], but got {}".format( + x.dtype + ) + ) if in_dygraph_mode(): y = _C_ops.trunc(x) return _C_ops.subtract(x, y) else: if _in_legacy_dygraph(): y = _legacy_C_ops.trunc(x) - return _elementwise_op_in_dygraph(x, - y, - axis=axis, - act=act, - op_name=op_type) + return _elementwise_op_in_dygraph( + x, y, axis=axis, act=act, op_name=op_type + ) else: inputs = {"X": x} attrs = {} helper = LayerHelper("trunc", **locals()) - check_variable_and_dtype(x, "X", - ['int32', 'int64', 'float32', 'float64'], - 'trunc') + check_variable_and_dtype( + x, "X", ['int32', 'int64', 'float32', 'float64'], 'trunc' + ) y = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type="trunc", - inputs=inputs, - attrs=attrs, - outputs={"Out": y}) + helper.append_op( + type="trunc", inputs=inputs, attrs=attrs, outputs={"Out": y} + ) return _elementwise_op(LayerHelper(op_type, **locals())) @@ -4993,12 +5170,17 @@ def sgn(x, name=None): """ if x.dtype not in [ - paddle.float16, paddle.float32, paddle.float64, paddle.complex64, - paddle.complex128 + paddle.float16, + paddle.float32, + paddle.float64, + paddle.complex64, + paddle.complex128, ]: raise TypeError( - "The data type of input must be one of ['float16', 'float32', 'float64', 'complex64', 'complex128'], but got {}" - .format(x.dtype)) + "The data type of input must be one of ['float16', 'float32', 'float64', 'complex64', 'complex128'], but got {}".format( + x.dtype + ) + ) if paddle.is_complex(x): expand_x = paddle.as_real(x) x_abs = paddle.abs(x) @@ -5079,18 +5261,24 @@ def take(x, index, mode='raise', name=None): """ if mode not in ['raise', 'wrap', 'clip']: raise ValueError( - "'mode' in 'take' should be 'raise', 'wrap', 'clip', but received {}." - .format(mode)) + "'mode' in 'take' should be 'raise', 'wrap', 'clip', but received {}.".format( + mode + ) + ) if paddle.in_dynamic_mode(): if not isinstance(index, (paddle.Tensor, Variable)): raise TypeError( "The type of 'index' must be Tensor, but got {}".format( - type(index))) + type(index) + ) + ) if index.dtype not in [paddle.int32, paddle.int64]: raise TypeError( - "The data type of 'index' must be one of ['int32', 'int64'], but got {}" - .format(index.dtype)) + "The data type of 'index' must be one of ['int32', 'int64'], but got {}".format( + index.dtype + ) + ) else: check_variable_and_dtype(index, 'index', ['int32', 'int64'], 'take') @@ -5105,8 +5293,9 @@ def take(x, index, mode='raise', name=None): elif mode == 'wrap': # The out of range indices are constrained by taking the remainder. index_1d = paddle.where(index_1d < 0, index_1d % max_index, index_1d) - index_1d = paddle.where(index_1d >= max_index, index_1d % max_index, - index_1d) + index_1d = paddle.where( + index_1d >= max_index, index_1d % max_index, index_1d + ) elif mode == 'clip': # 'clip' mode disables indexing with negative numbers. index_1d = clip(index_1d, 0, max_index - 1) @@ -5140,26 +5329,32 @@ def frexp(x, name=None): print(paddle.tensor.math.frexp(x)) # (Tensor(shape=[1, 4], dtype=float32, place=Place(cpu), stop_gradient=True,[[0.50000000, 0.50000000, 0.75000000, 0.50000000]]), # Tensor(shape=[1, 4], dtype=float32, place=Place(cpu), stop_gradient=True,[[1., 2., 2., 3.]])) - """ + """ if x.dtype not in [paddle.float32, paddle.float64]: raise TypeError( - "The data type of input must be one of ['float32', 'float64'], but got {}" - .format(x.dtype)) + "The data type of input must be one of ['float32', 'float64'], but got {}".format( + x.dtype + ) + ) input_x = paddle.abs(x) exponent = paddle.floor(paddle.log2(input_x)) - exponent = paddle.where(paddle.isinf(exponent), - paddle.full_like(exponent, 0), exponent) + exponent = paddle.where( + paddle.isinf(exponent), paddle.full_like(exponent, 0), exponent + ) # 0填充 mantissa = paddle.divide(input_x, 2**exponent) # 计算exponent - exponent = paddle.where((mantissa >= 1), - paddle.add(exponent, paddle.ones_like(exponent)), - exponent) - mantissa = paddle.where((mantissa >= 1), - paddle.divide(mantissa, - 2**paddle.ones_like(exponent)), - mantissa) + exponent = paddle.where( + (mantissa >= 1), + paddle.add(exponent, paddle.ones_like(exponent)), + exponent, + ) + mantissa = paddle.where( + (mantissa >= 1), + paddle.divide(mantissa, 2 ** paddle.ones_like(exponent)), + mantissa, + ) mantissa = paddle.where((x < 0), mantissa * -1, mantissa) return mantissa, exponent diff --git a/python/paddle/tensor/ops.py b/python/paddle/tensor/ops.py index 46648f06d8205b765e32c6053d814828635b95c0..b3f88e9fcdf415b292fd71530cebffea95351e5a 100644 --- a/python/paddle/tensor/ops.py +++ b/python/paddle/tensor/ops.py @@ -12,13 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .layer_function_generator import generate_layer_fn, generate_activation_fn, generate_inplace_fn, add_sample_code +from .layer_function_generator import ( + generate_layer_fn, + generate_activation_fn, + generate_inplace_fn, + add_sample_code, +) from ..fluid.framework import in_dygraph_mode from .. import _C_ops __deprecated_func_name__ = { 'tanh_shrink': 'tanhshrink', - 'logsigmoid': 'log_sigmoid' + 'logsigmoid': 'log_sigmoid', } __activations_noattr__ = [ @@ -103,7 +108,8 @@ for _OP in set(__inplace_unary_func__): globals()[_OP] = _func add_sample_code( - globals()["sigmoid"], r""" + globals()["sigmoid"], + r""" Examples: .. code-block:: python @@ -115,10 +121,12 @@ Examples: print(out) # [0.40131234 0.450166 0.52497919 0.57444252] -""") +""", +) add_sample_code( - globals()["silu"], r""" + globals()["silu"], + r""" Examples: .. code-block:: python @@ -130,10 +138,12 @@ Examples: print(out) # [ 0.7310586 1.7615942 2.8577224, 3.9280552 ] -""") +""", +) add_sample_code( - globals()["logsigmoid"], r""" + globals()["logsigmoid"], + r""" Examples: .. code-block:: python @@ -145,10 +155,12 @@ Examples: print(out) # [-0.91301525 -0.79813887 -0.64439666 -0.55435524] -""") +""", +) add_sample_code( - globals()["exp"], r""" + globals()["exp"], + r""" Examples: .. code-block:: python @@ -159,10 +171,12 @@ Examples: print(out) # [0.67032005 0.81873075 1.10517092 1.34985881] -""") +""", +) add_sample_code( - globals()["expm1"], r""" + globals()["expm1"], + r""" Examples: .. code-block:: python @@ -173,10 +187,12 @@ Examples: print(out) # [-0.32967997, -0.18126924, 0.10517092, 0.34985882] -""") +""", +) add_sample_code( - globals()["tanh"], r""" + globals()["tanh"], + r""" Examples: .. code-block:: python @@ -187,10 +203,12 @@ Examples: print(out) # [-0.37994896 -0.19737532 0.09966799 0.29131261] -""") +""", +) add_sample_code( - globals()["atan"], r""" + globals()["atan"], + r""" Examples: .. code-block:: python @@ -201,10 +219,12 @@ Examples: print(out) # [-0.38050638 -0.19739556 0.09966865 0.29145679] -""") +""", +) add_sample_code( - globals()["tanh_shrink"], r""" + globals()["tanh_shrink"], + r""" Examples: .. code-block:: python @@ -216,10 +236,12 @@ Examples: print(out) # [-0.020051, -0.00262468, 0.000332005, 0.00868739] -""") +""", +) add_sample_code( - globals()["sqrt"], r""" + globals()["sqrt"], + r""" Examples: .. code-block:: python @@ -230,10 +252,12 @@ Examples: print(out) # [0.31622777 0.4472136 0.54772256 0.63245553] -""") +""", +) add_sample_code( - globals()["rsqrt"], r""" + globals()["rsqrt"], + r""" Examples: .. code-block:: python @@ -244,10 +268,12 @@ Examples: print(out) # [3.16227766 2.23606798 1.82574186 1.58113883] -""") +""", +) add_sample_code( - globals()["abs"], r""" + globals()["abs"], + r""" Examples: .. code-block:: python @@ -258,10 +284,12 @@ Examples: print(out) # [0.4 0.2 0.1 0.3] -""") +""", +) add_sample_code( - globals()["ceil"], r""" + globals()["ceil"], + r""" Examples: .. code-block:: python @@ -272,10 +300,12 @@ Examples: print(out) # [-0. -0. 1. 1.] -""") +""", +) add_sample_code( - globals()["floor"], r""" + globals()["floor"], + r""" Examples: .. code-block:: python @@ -286,10 +316,12 @@ Examples: print(out) # [-1. -1. 0. 0.] -""") +""", +) add_sample_code( - globals()["cos"], r""" + globals()["cos"], + r""" Examples: .. code-block:: python @@ -300,10 +332,12 @@ Examples: print(out) # [0.92106099 0.98006658 0.99500417 0.95533649] -""") +""", +) add_sample_code( - globals()["tan"], r""" + globals()["tan"], + r""" Examples: .. code-block:: python @@ -314,10 +348,12 @@ Examples: print(out) # [-0.42279324, -0.20271005, 0.10033467, 0.30933627] -""") +""", +) add_sample_code( - globals()["acos"], r""" + globals()["acos"], + r""" Examples: .. code-block:: python @@ -328,10 +364,12 @@ Examples: print(out) # [1.98231317 1.77215425 1.47062891 1.26610367] -""") +""", +) add_sample_code( - globals()["sin"], r""" + globals()["sin"], + r""" Examples: .. code-block:: python @@ -342,10 +380,12 @@ Examples: print(out) # [-0.38941834 -0.19866933 0.09983342 0.29552021] -""") +""", +) add_sample_code( - globals()["asin"], r""" + globals()["asin"], + r""" Examples: .. code-block:: python @@ -356,10 +396,12 @@ Examples: print(out) # [-0.41151685 -0.20135792 0.10016742 0.30469265] -""") +""", +) add_sample_code( - globals()["cosh"], r""" + globals()["cosh"], + r""" Examples: .. code-block:: python @@ -370,10 +412,12 @@ Examples: print(out) # [1.08107237 1.02006676 1.00500417 1.04533851] -""") +""", +) add_sample_code( - globals()["sinh"], r""" + globals()["sinh"], + r""" Examples: .. code-block:: python @@ -384,10 +428,12 @@ Examples: print(out) # [-0.41075233 -0.201336 0.10016675 0.30452029] -""") +""", +) add_sample_code( - globals()["asinh"], r""" + globals()["asinh"], + r""" Examples: .. code-block:: python @@ -398,10 +444,12 @@ Examples: print(out) # [-0.39003533, -0.19869010, 0.09983408, 0.29567307] -""") +""", +) add_sample_code( - globals()["acosh"], r""" + globals()["acosh"], + r""" Examples: .. code-block:: python @@ -412,10 +460,12 @@ Examples: print(out) # [0. , 1.76274729, 2.06343699, 2.29243159] -""") +""", +) add_sample_code( - globals()["atanh"], r""" + globals()["atanh"], + r""" Examples: .. code-block:: python @@ -426,10 +476,12 @@ Examples: print(out) # [-0.42364895, -0.20273256, 0.10033535, 0.30951962] -""") +""", +) add_sample_code( - globals()["round"], r""" + globals()["round"], + r""" Examples: .. code-block:: python @@ -440,10 +492,12 @@ Examples: print(out) # [-1. -0. 1. 2.] -""") +""", +) add_sample_code( - globals()["reciprocal"], r""" + globals()["reciprocal"], + r""" Examples: .. code-block:: python @@ -454,10 +508,12 @@ Examples: print(out) # [-2.5 -5. 10. 3.33333333] -""") +""", +) add_sample_code( - globals()["square"], r""" + globals()["square"], + r""" Examples: .. code-block:: python @@ -468,10 +524,12 @@ Examples: print(out) # [0.16 0.04 0.01 0.09] -""") +""", +) add_sample_code( - globals()["softplus"], r""" + globals()["softplus"], + r""" Examples: .. code-block:: python @@ -483,10 +541,12 @@ Examples: print(out) # [0.513015, 0.598139, 0.744397, 0.854355] -""") +""", +) add_sample_code( - globals()["softsign"], r""" + globals()["softsign"], + r""" Examples: .. code-block:: python @@ -498,7 +558,8 @@ Examples: print(out) # [-0.285714, -0.166667, 0.0909091, 0.230769] -""") +""", +) __all__ += ['erf'] diff --git a/python/paddle/tensor/random.py b/python/paddle/tensor/random.py index 208139b1abbb6de955a377c7732b66c59bd304cf..2449f9c3194e7004ab66b8a50d551e3721161543 100644 --- a/python/paddle/tensor/random.py +++ b/python/paddle/tensor/random.py @@ -17,12 +17,21 @@ from ..framework import core from ..framework import convert_np_dtype_to_dtype_, dygraph_only from ..framework import LayerHelper -from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype, check_shape +from ..fluid.data_feeder import ( + check_variable_and_dtype, + check_type, + check_dtype, + check_shape, +) from ..fluid.layers import utils import paddle from paddle import _C_ops, _legacy_C_ops from paddle.static import Variable -from paddle.fluid.framework import in_dygraph_mode, _in_legacy_dygraph, _current_expected_place +from paddle.fluid.framework import ( + in_dygraph_mode, + _in_legacy_dygraph, + _current_expected_place, +) __all__ = [] @@ -75,11 +84,11 @@ def bernoulli(x, name=None): helper = LayerHelper("randint", **locals()) out = helper.create_variable_for_type_inference( - dtype=x.dtype) # maybe set out to int32 ? - helper.append_op(type='bernoulli', - inputs={"X": x}, - outputs={'Out': out}, - attrs={}) + dtype=x.dtype + ) # maybe set out to int32 ? + helper.append_op( + type='bernoulli', inputs={"X": x}, outputs={'Out': out}, attrs={} + ) out.stop_gradient = True return out @@ -124,10 +133,9 @@ def poisson(x, name=None): helper = LayerHelper("poisson", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='poisson', - inputs={'X': x}, - outputs={'Out': out}, - attrs={}) + helper.append_op( + type='poisson', inputs={'X': x}, outputs={'Out': out}, attrs={} + ) return out @@ -179,28 +187,30 @@ def multinomial(x, num_samples=1, replacement=False, name=None): """ - assert core.is_compiled_with_rocm() == False, ( - "multinomial op is not supported on ROCM yet.") + assert ( + core.is_compiled_with_rocm() == False + ), "multinomial op is not supported on ROCM yet." if in_dygraph_mode(): return _C_ops.multinomial(x, num_samples, replacement) if _in_legacy_dygraph(): - return _legacy_C_ops.multinomial(x, 'num_samples', num_samples, - 'replacement', replacement) + return _legacy_C_ops.multinomial( + x, 'num_samples', num_samples, 'replacement', replacement + ) check_variable_and_dtype(x, "x", ["float32", "float64"], "multinomial") helper = LayerHelper("multinomial", **locals()) out = helper.create_variable_for_type_inference( - dtype=convert_np_dtype_to_dtype_('int64')) - helper.append_op(type='multinomial', - inputs={"X": x}, - outputs={'Out': out}, - attrs={ - 'num_samples': num_samples, - 'replacement': replacement - }) + dtype=convert_np_dtype_to_dtype_('int64') + ) + helper.append_op( + type='multinomial', + inputs={"X": x}, + outputs={'Out': out}, + attrs={'num_samples': num_samples, 'replacement': replacement}, + ) out.stop_gradient = True return out @@ -237,23 +247,34 @@ def gaussian(shape, mean=0.0, std=1.0, dtype=None, name=None): dtype = paddle.framework.get_default_dtype() if dtype not in ['float32', 'float64']: raise TypeError( - "{} only supports [float32, float64], but the default dtype is {}" - .format(op_type_for_check, dtype)) + "{} only supports [float32, float64], but the default dtype is {}".format( + op_type_for_check, dtype + ) + ) if not isinstance(dtype, core.VarDesc.VarType): dtype = convert_np_dtype_to_dtype_(dtype) if in_dygraph_mode(): shape = utils.convert_shape_to_list(shape) place = _current_expected_place() - return _C_ops.gaussian_random(shape, float(mean), float(std), seed, - dtype, place) + return _C_ops.gaussian_random( + shape, float(mean), float(std), seed, dtype, place + ) if _in_legacy_dygraph(): shape = utils.convert_shape_to_list(shape) - return _legacy_C_ops.gaussian_random('shape', shape, - 'mean', float(mean), 'std', - float(std), 'seed', seed, 'dtype', - dtype) + return _legacy_C_ops.gaussian_random( + 'shape', + shape, + 'mean', + float(mean), + 'std', + float(std), + 'seed', + seed, + 'dtype', + dtype, + ) check_shape(shape, op_type_for_check) check_dtype(dtype, 'dtype', ['float32', 'float64'], op_type_for_check) @@ -264,19 +285,17 @@ def gaussian(shape, mean=0.0, std=1.0, dtype=None, name=None): 'std': std, 'seed': seed, 'dtype': dtype, - 'use_mkldnn': False + 'use_mkldnn': False, } - utils.get_shape_tensor_inputs(inputs=inputs, - attrs=attrs, - shape=shape, - op_type=op_type_for_check) + utils.get_shape_tensor_inputs( + inputs=inputs, attrs=attrs, shape=shape, op_type=op_type_for_check + ) helper = LayerHelper('gaussian', **locals()) out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='gaussian_random', - inputs=inputs, - outputs={'Out': out}, - attrs=attrs) + helper.append_op( + type='gaussian_random', inputs=inputs, outputs={'Out': out}, attrs=attrs + ) out.stop_gradient = True return out @@ -446,13 +465,19 @@ def normal(mean=0.0, std=1.0, shape=None, name=None): check_type(std, 'std', (int, float, Variable), 'normal') if isinstance(mean, Variable): check_dtype( - mean.dtype, 'mean', ['float32', 'float64'], 'normal', - "If mean is Tensor, it's data type only support float32, float64." + mean.dtype, + 'mean', + ['float32', 'float64'], + 'normal', + "If mean is Tensor, it's data type only support float32, float64.", ) if isinstance(std, Variable): check_dtype( - std.dtype, 'std', ['float32', 'float64'], 'normal', - "If std is Tensor, it's data type only support float32, float64." + std.dtype, + 'std', + ['float32', 'float64'], + 'normal', + "If std is Tensor, it's data type only support float32, float64.", ) if shape is not None: check_shape(shape, 'normal') @@ -549,23 +574,39 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None): dtype = paddle.framework.get_default_dtype() if dtype not in ['float32', 'float64']: raise TypeError( - "uniform/rand only supports [float32, float64], but the default dtype is {}" - .format(dtype)) + "uniform/rand only supports [float32, float64], but the default dtype is {}".format( + dtype + ) + ) if not isinstance(dtype, core.VarDesc.VarType): dtype = convert_np_dtype_to_dtype_(dtype) if in_dygraph_mode(): shape = utils.convert_shape_to_list(shape) - return _C_ops.uniform_random(shape, dtype, float(min), float(max), seed, - _current_expected_place()) + return _C_ops.uniform_random( + shape, + dtype, + float(min), + float(max), + seed, + _current_expected_place(), + ) if _in_legacy_dygraph(): shape = utils.convert_shape_to_list(shape) - return _legacy_C_ops.uniform_random('shape', - shape, 'min', float(min), 'max', - float(max), 'seed', seed, 'dtype', - dtype) + return _legacy_C_ops.uniform_random( + 'shape', + shape, + 'min', + float(min), + 'max', + float(max), + 'seed', + seed, + 'dtype', + dtype, + ) check_type(shape, 'shape', (list, tuple, Variable), 'uniform/rand') check_dtype(dtype, 'dtype', ('float32', 'float64'), 'uniform/rand') @@ -574,17 +615,15 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None): inputs = dict() attrs = {'seed': seed, 'min': min, 'max': max, 'dtype': dtype} - utils.get_shape_tensor_inputs(inputs=inputs, - attrs=attrs, - shape=shape, - op_type='uniform/rand') + utils.get_shape_tensor_inputs( + inputs=inputs, attrs=attrs, shape=shape, op_type='uniform/rand' + ) helper = LayerHelper("uniform", **locals()) out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type="uniform_random", - inputs=inputs, - attrs=attrs, - outputs={"Out": out}) + helper.append_op( + type="uniform_random", inputs=inputs, attrs=attrs, outputs={"Out": out} + ) out.stop_gradient = True return out @@ -627,8 +666,9 @@ def uniform_(x, min=-1.0, max=1.0, seed=0, name=None): if in_dygraph_mode(): return _C_ops.uniform_random_inplace_(x, min, max, seed, 0, 0, 1.0) else: - return _legacy_C_ops.uniform_random_inplace_(x, 'min', min, 'max', max, - 'seed', seed) + return _legacy_C_ops.uniform_random_inplace_( + x, 'min', min, 'max', max, 'seed', seed + ) def randint(low=0, high=None, shape=[1], dtype=None, name=None): @@ -699,8 +739,10 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None): if high is None: if low <= 0: raise ValueError( - "If high is None, low must be greater than 0, but received low = {0}." - .format(low)) + "If high is None, low must be greater than 0, but received low = {0}.".format( + low + ) + ) high = low low = 0 if dtype is None: @@ -714,29 +756,29 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None): return _C_ops.randint(low, high, shape, dtype, place) if _in_legacy_dygraph(): shape = utils.convert_shape_to_list(shape) - return _legacy_C_ops.randint('shape', shape, 'low', low, 'high', high, - 'seed', 0, 'dtype', dtype) + return _legacy_C_ops.randint( + 'shape', shape, 'low', low, 'high', high, 'seed', 0, 'dtype', dtype + ) check_shape(shape, 'randint') check_dtype(dtype, 'dtype', ['int32', 'int64'], 'randint') if low >= high: raise ValueError( "randint's low must less then high, but received low = {0}, " - "high = {1}".format(low, high)) + "high = {1}".format(low, high) + ) inputs = dict() attrs = {'low': low, 'high': high, 'seed': 0, 'dtype': dtype} - utils.get_shape_tensor_inputs(inputs=inputs, - attrs=attrs, - shape=shape, - op_type='randint') + utils.get_shape_tensor_inputs( + inputs=inputs, attrs=attrs, shape=shape, op_type='randint' + ) helper = LayerHelper("randint", **locals()) out = helper.create_variable_for_type_inference(dtype=dtype) - helper.append_op(type='randint', - inputs=inputs, - outputs={'Out': out}, - attrs=attrs) + helper.append_op( + type='randint', inputs=inputs, outputs={'Out': out}, attrs=attrs + ) out.stop_gradient = True return out @@ -868,8 +910,10 @@ def randint_like(x, low=0, high=None, dtype=None, name=None): if high is None: if low <= 0: raise ValueError( - "If high is None, low must be greater than 0, but received low = {0}." - .format(low)) + "If high is None, low must be greater than 0, but received low = {0}.".format( + low + ) + ) high = low low = 0 if dtype is None: @@ -881,36 +925,49 @@ def randint_like(x, low=0, high=None, dtype=None, name=None): if low >= high: raise ValueError( "randint_like's low must less then high, but received low = {0}, " - "high = {1}".format(low, high)) + "high = {1}".format(low, high) + ) if paddle.in_dynamic_mode(): shape = utils.convert_shape_to_list(shape) - out = _legacy_C_ops.randint('shape', shape, 'low', low, 'high', high, - 'seed', 0, 'dtype', - core.VarDesc.VarType.INT64) + out = _legacy_C_ops.randint( + 'shape', + shape, + 'low', + low, + 'high', + high, + 'seed', + 0, + 'dtype', + core.VarDesc.VarType.INT64, + ) out = paddle.cast(out, dtype) return out check_shape(shape, 'randint_like') - check_dtype(dtype, 'dtype', - ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], - 'randint_like') + check_dtype( + dtype, + 'dtype', + ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], + 'randint_like', + ) inputs = {"ShapeTensor": shape} attrs = { 'low': low, 'high': high, 'seed': 0, - 'dtype': core.VarDesc.VarType.INT64 + 'dtype': core.VarDesc.VarType.INT64, } helper = LayerHelper("randint", **locals()) out = helper.create_variable_for_type_inference( - dtype=core.VarDesc.VarType.INT64) - helper.append_op(type='randint', - inputs=inputs, - outputs={'Out': out}, - attrs=attrs) + dtype=core.VarDesc.VarType.INT64 + ) + helper.append_op( + type='randint', inputs=inputs, outputs={'Out': out}, attrs=attrs + ) out.stop_gradient = True out = paddle.cast(out, dtype) return out @@ -956,16 +1013,16 @@ def randperm(n, dtype="int64", name=None): if n < 1: raise ValueError("The input n should be greater than 0 in randperm op.") - check_dtype(dtype, 'dtype', ['int64', 'int32', 'float32', 'float64'], - 'randperm') + check_dtype( + dtype, 'dtype', ['int64', 'int32', 'float32', 'float64'], 'randperm' + ) helper = LayerHelper("randperm", **locals()) out = helper.create_variable_for_type_inference(dtype) attrs = {'n': n, 'dtype': dtype, 'seed': 0} - helper.append_op(type='randperm', - inputs={}, - outputs={'Out': out}, - attrs=attrs) + helper.append_op( + type='randperm', inputs={}, outputs={'Out': out}, attrs=attrs + ) out.stop_gradient = True return out @@ -1064,8 +1121,10 @@ def exponential_(x, lam=1.0, name=None): check_variable_and_dtype(x, "x", ["float32", "float64"], "exponential") helper = LayerHelper("exponential", **locals()) - helper.append_op(type='exponential', - inputs={"X": x}, - outputs={'Out': x}, - attrs={"lambda": lam}) + helper.append_op( + type='exponential', + inputs={"X": x}, + outputs={'Out': x}, + attrs={"lambda": lam}, + ) return x diff --git a/python/paddle/tensor/search.py b/python/paddle/tensor/search.py index ffb6573c9eb181d5d605d43d9f7b9dc8696bfd3e..165809b39fb62974a964d101f048ba349f38f979 100644 --- a/python/paddle/tensor/search.py +++ b/python/paddle/tensor/search.py @@ -93,28 +93,30 @@ def argsort(x, axis=-1, descending=False, name=None): return ids if _in_legacy_dygraph(): - _, ids = _legacy_C_ops.argsort(x, 'axis', axis, 'descending', - descending) + _, ids = _legacy_C_ops.argsort( + x, 'axis', axis, 'descending', descending + ) return ids check_variable_and_dtype( - x, 'x', ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'], - 'argsort') + x, + 'x', + ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'], + 'argsort', + ) helper = LayerHelper("argsort", **locals()) - out = helper.create_variable_for_type_inference(dtype=x.dtype, - stop_gradient=True) - ids = helper.create_variable_for_type_inference(VarDesc.VarType.INT64, - stop_gradient=True) - helper.append_op(type='argsort', - inputs={'X': x}, - outputs={ - 'Out': out, - 'Indices': ids - }, - attrs={ - 'axis': axis, - 'descending': descending - }) + out = helper.create_variable_for_type_inference( + dtype=x.dtype, stop_gradient=True + ) + ids = helper.create_variable_for_type_inference( + VarDesc.VarType.INT64, stop_gradient=True + ) + helper.append_op( + type='argsort', + inputs={'X': x}, + outputs={'Out': out, 'Indices': ids}, + attrs={'axis': axis, 'descending': descending}, + ) return ids @@ -161,7 +163,8 @@ def argmax(x, axis=None, keepdim=False, dtype="int64", name=None): if axis is not None and not isinstance(axis, (int, Variable)): raise TypeError( "The type of 'axis' must be int or Tensor or None in argmax, but received %s." - % (type(axis))) + % (type(axis)) + ) if dtype is None: raise ValueError( @@ -177,14 +180,26 @@ def argmax(x, axis=None, keepdim=False, dtype="int64", name=None): if in_dygraph_mode(): return _C_ops.argmax(x, axis, keepdim, flatten, var_dtype) if _in_legacy_dygraph(): - out = _legacy_C_ops.arg_max(x, 'axis', axis, 'dtype', var_dtype, - 'keepdims', keepdim, 'flatten', flatten) + out = _legacy_C_ops.arg_max( + x, + 'axis', + axis, + 'dtype', + var_dtype, + 'keepdims', + keepdim, + 'flatten', + flatten, + ) return out helper = LayerHelper("argmax", **locals()) check_variable_and_dtype( - x, 'x', ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'], - 'paddle.argmax') + x, + 'x', + ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'], + 'paddle.argmax', + ) check_dtype(var_dtype, 'dtype', ['int32', 'int64'], 'argmin') attrs = {} out = helper.create_variable_for_type_inference(var_dtype) @@ -192,10 +207,9 @@ def argmax(x, axis=None, keepdim=False, dtype="int64", name=None): attrs['axis'] = axis attrs['flatten'] = flatten attrs['dtype'] = var_dtype - helper.append_op(type='arg_max', - inputs={'X': x}, - outputs={'Out': [out]}, - attrs=attrs) + helper.append_op( + type='arg_max', inputs={'X': x}, outputs={'Out': [out]}, attrs=attrs + ) out.stop_gradient = True return out @@ -243,7 +257,8 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None): if axis is not None and not isinstance(axis, (int, Variable)): raise TypeError( "The type of 'axis' must be int or Tensor or None in argmin, but received %s." - % (type(axis))) + % (type(axis)) + ) if dtype is None: raise ValueError( @@ -259,14 +274,26 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None): if in_dygraph_mode(): return _C_ops.argmin(x, axis, keepdim, flatten, var_dtype) if _in_legacy_dygraph(): - out = _legacy_C_ops.arg_min(x, 'axis', axis, 'dtype', var_dtype, - 'keepdims', keepdim, 'flatten', flatten) + out = _legacy_C_ops.arg_min( + x, + 'axis', + axis, + 'dtype', + var_dtype, + 'keepdims', + keepdim, + 'flatten', + flatten, + ) return out helper = LayerHelper("argmin", **locals()) check_variable_and_dtype( - x, 'x', ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'], - 'paddle.argmin') + x, + 'x', + ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'], + 'paddle.argmin', + ) check_dtype(var_dtype, 'dtype', ['int32', 'int64'], 'argmin') out = helper.create_variable_for_type_inference(var_dtype) attrs = {} @@ -274,10 +301,9 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None): attrs['axis'] = axis attrs['flatten'] = flatten attrs['dtype'] = var_dtype - helper.append_op(type='arg_min', - inputs={'X': x}, - outputs={'Out': [out]}, - attrs=attrs) + helper.append_op( + type='arg_min', inputs={'X': x}, outputs={'Out': [out]}, attrs=attrs + ) out.stop_gradient = True return out @@ -325,20 +351,24 @@ def index_select(x, index, axis=0, name=None): return _legacy_C_ops.index_select(x, index, 'dim', axis) helper = LayerHelper("index_select", **locals()) - check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], - 'paddle.tensor.search.index_select') - check_variable_and_dtype(index, 'index', ['int32', 'int64'], - 'paddle.tensor.search.index_select') + check_variable_and_dtype( + x, + 'x', + ['float32', 'float64', 'int32', 'int64'], + 'paddle.tensor.search.index_select', + ) + check_variable_and_dtype( + index, 'index', ['int32', 'int64'], 'paddle.tensor.search.index_select' + ) out = helper.create_variable_for_type_inference(x.dtype) - helper.append_op(type='index_select', - inputs={ - 'X': x, - 'Index': index - }, - outputs={'Out': out}, - attrs={'dim': axis}) + helper.append_op( + type='index_select', + inputs={'X': x, 'Index': index}, + outputs={'Out': out}, + attrs={'dim': axis}, + ) return out @@ -406,11 +436,12 @@ def nonzero(x, as_tuple=False): helper = LayerHelper("where_index", **locals()) outs = helper.create_variable_for_type_inference( - dtype=core.VarDesc.VarType.INT64) + dtype=core.VarDesc.VarType.INT64 + ) - helper.append_op(type='where_index', - inputs={'Condition': x}, - outputs={'Out': [outs]}) + helper.append_op( + type='where_index', inputs={'Condition': x}, outputs={'Out': [outs]} + ) if not as_tuple: return outs @@ -419,7 +450,8 @@ def nonzero(x, as_tuple=False): else: for i in range(rank): list_out.append( - paddle.slice(outs, axes=[1], starts=[i], ends=[i + 1])) + paddle.slice(outs, axes=[1], starts=[i], ends=[i + 1]) + ) return tuple(list_out) @@ -484,24 +516,23 @@ def sort(x, axis=-1, descending=False, name=None): return outs if _in_legacy_dygraph(): - outs, _ = _legacy_C_ops.argsort(x, 'axis', axis, 'descending', - descending) + outs, _ = _legacy_C_ops.argsort( + x, 'axis', axis, 'descending', descending + ) return outs helper = LayerHelper("sort", **locals()) - out = helper.create_variable_for_type_inference(dtype=x.dtype, - stop_gradient=False) - ids = helper.create_variable_for_type_inference(VarDesc.VarType.INT64, - stop_gradient=True) - helper.append_op(type='argsort', - inputs={'X': x}, - outputs={ - 'Out': out, - 'Indices': ids - }, - attrs={ - 'axis': axis, - 'descending': descending - }) + out = helper.create_variable_for_type_inference( + dtype=x.dtype, stop_gradient=False + ) + ids = helper.create_variable_for_type_inference( + VarDesc.VarType.INT64, stop_gradient=True + ) + helper.append_op( + type='argsort', + inputs={'X': x}, + outputs={'Out': out, 'Indices': ids}, + attrs={'axis': axis, 'descending': descending}, + ) return out @@ -550,13 +581,12 @@ def mode(x, axis=-1, keepdim=False, name=None): values = helper.create_variable_for_type_inference(dtype=x.dtype) indices = helper.create_variable_for_type_inference(dtype="int64") - helper.append_op(type="mode", - inputs=inputs, - outputs={ - "Out": [values], - "Indices": [indices] - }, - attrs=attrs) + helper.append_op( + type="mode", + inputs=inputs, + outputs={"Out": [values], "Indices": [indices]}, + attrs=attrs, + ) indices.stop_gradient = True return values, indices @@ -618,12 +648,12 @@ def where(condition, x=None, y=None, name=None): if not paddle.in_dynamic_mode(): check_variable_and_dtype(condition, 'condition', ['bool'], 'where') - check_variable_and_dtype(x, 'x', - ['float32', 'float64', 'int32', 'int64'], - 'where') - check_variable_and_dtype(y, 'y', - ['float32', 'float64', 'int32', 'int64'], - 'where') + check_variable_and_dtype( + x, 'x', ['float32', 'float64', 'int32', 'int64'], 'where' + ) + check_variable_and_dtype( + y, 'y', ['float32', 'float64', 'int32', 'int64'], 'where' + ) condition_shape = list(condition.shape) x_shape = list(x.shape) @@ -651,19 +681,22 @@ def where(condition, x=None, y=None, name=None): return _C_ops.where(broadcast_condition, broadcast_x, broadcast_y) else: if _in_legacy_dygraph(): - return _legacy_C_ops.where(broadcast_condition, broadcast_x, - broadcast_y) + return _legacy_C_ops.where( + broadcast_condition, broadcast_x, broadcast_y + ) else: helper = LayerHelper("where", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='where', - inputs={ - 'Condition': broadcast_condition, - 'X': broadcast_x, - 'Y': broadcast_y - }, - outputs={'Out': [out]}) + helper.append_op( + type='where', + inputs={ + 'Condition': broadcast_condition, + 'X': broadcast_x, + 'Y': broadcast_y, + }, + outputs={'Out': [out]}, + ) return out @@ -748,19 +781,25 @@ def index_sample(x, index): return _legacy_C_ops.index_sample(x, index) else: helper = LayerHelper("index_sample", **locals()) - check_variable_and_dtype(x, 'x', - ['float32', 'float64', 'int32', 'int64'], - 'paddle.tensor.search.index_sample') - check_variable_and_dtype(index, 'index', ['int32', 'int64'], - 'paddle.tensor.search.index_sample') + check_variable_and_dtype( + x, + 'x', + ['float32', 'float64', 'int32', 'int64'], + 'paddle.tensor.search.index_sample', + ) + check_variable_and_dtype( + index, + 'index', + ['int32', 'int64'], + 'paddle.tensor.search.index_sample', + ) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='index_sample', - inputs={ - 'X': x, - 'Index': index - }, - outputs={'Out': out}) + helper.append_op( + type='index_sample', + inputs={'X': x, 'Index': index}, + outputs={'Out': out}, + ) return out @@ -800,17 +839,19 @@ def masked_select(x, mask, name=None): return _legacy_C_ops.masked_select(x, mask) helper = LayerHelper("masked_select", **locals()) - check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], - 'paddle.tensor.search.mask_select') - check_variable_and_dtype(mask, 'mask', ['bool'], - 'paddle.tensor.search.masked_select') + check_variable_and_dtype( + x, + 'x', + ['float32', 'float64', 'int32', 'int64'], + 'paddle.tensor.search.mask_select', + ) + check_variable_and_dtype( + mask, 'mask', ['bool'], 'paddle.tensor.search.masked_select' + ) out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='masked_select', - inputs={ - 'X': x, - 'Mask': mask - }, - outputs={'Y': out}) + helper.append_op( + type='masked_select', inputs={'X': x, 'Mask': mask}, outputs={'Y': out} + ) return out @@ -870,12 +911,21 @@ def topk(x, k, axis=None, largest=True, sorted=True, name=None): if _non_static_mode(): if axis is None: - out, indices = _legacy_C_ops.top_k_v2(x, 'k', int(k), 'largest', - largest, 'sorted', sorted) + out, indices = _legacy_C_ops.top_k_v2( + x, 'k', int(k), 'largest', largest, 'sorted', sorted + ) else: - out, indices = _legacy_C_ops.top_k_v2(x, 'k', int(k), 'axis', axis, - 'largest', largest, 'sorted', - sorted) + out, indices = _legacy_C_ops.top_k_v2( + x, + 'k', + int(k), + 'axis', + axis, + 'largest', + largest, + 'sorted', + sorted, + ) return out, indices helper = LayerHelper("top_k_v2", **locals()) @@ -893,13 +943,12 @@ def topk(x, k, axis=None, largest=True, sorted=True, name=None): values = helper.create_variable_for_type_inference(dtype=x.dtype) indices = helper.create_variable_for_type_inference(dtype="int64") - helper.append_op(type="top_k_v2", - inputs=inputs, - outputs={ - "Out": [values], - "Indices": [indices] - }, - attrs=attrs) + helper.append_op( + type="top_k_v2", + inputs=inputs, + outputs={"Out": [values], "Indices": [indices]}, + attrs=attrs, + ) indices.stop_gradient = True return values, indices @@ -949,9 +998,12 @@ def bucketize(x, sorted_sequence, out_int32=False, right=False, name=None): # [0, 1, 3, 2]]) """ - check_variable_and_dtype(sorted_sequence, 'SortedSequence', - ['float32', 'float64', 'int32', 'int64'], - 'paddle.searchsorted') + check_variable_and_dtype( + sorted_sequence, + 'SortedSequence', + ['float32', 'float64', 'int32', 'int64'], + 'paddle.searchsorted', + ) if sorted_sequence.dim() != 1: raise ValueError( f"sorted_sequence tensor must be 1 dimension, but got dim {sorted_sequence.dim()}" @@ -959,11 +1011,9 @@ def bucketize(x, sorted_sequence, out_int32=False, right=False, name=None): return searchsorted(sorted_sequence, x, out_int32, right, name) -def searchsorted(sorted_sequence, - values, - out_int32=False, - right=False, - name=None): +def searchsorted( + sorted_sequence, values, out_int32=False, right=False, name=None +): """ Find the index of the corresponding `sorted_sequence` in the innermost dimension based on the given `values`. @@ -1009,29 +1059,32 @@ def searchsorted(sorted_sequence, return _C_ops.searchsorted(sorted_sequence, values, out_int32, right) if _in_legacy_dygraph(): - return _legacy_C_ops.searchsorted(sorted_sequence, values, "out_int32", - out_int32, "right", right) + return _legacy_C_ops.searchsorted( + sorted_sequence, values, "out_int32", out_int32, "right", right + ) - check_variable_and_dtype(sorted_sequence, 'SortedSequence', - ['float32', 'float64', 'int32', 'int64'], - 'paddle.searchsorted') - check_variable_and_dtype(values, 'Values', - ['float32', 'float64', 'int32', 'int64'], - 'paddle.searchsorted') + check_variable_and_dtype( + sorted_sequence, + 'SortedSequence', + ['float32', 'float64', 'int32', 'int64'], + 'paddle.searchsorted', + ) + check_variable_and_dtype( + values, + 'Values', + ['float32', 'float64', 'int32', 'int64'], + 'paddle.searchsorted', + ) helper = LayerHelper('searchsorted', **locals()) out_type = 'int32' if out_int32 else 'int64' out = helper.create_variable_for_type_inference(dtype=out_type) - helper.append_op(type='searchsorted', - inputs={ - 'SortedSequence': sorted_sequence, - "Values": values - }, - outputs={'Out': out}, - attrs={ - "out_int32": out_int32, - "right": right - }) + helper.append_op( + type='searchsorted', + inputs={'SortedSequence': sorted_sequence, "Values": values}, + outputs={'Out': out}, + attrs={"out_int32": out_int32, "right": right}, + ) return out @@ -1077,8 +1130,9 @@ def kthvalue(x, k, axis=None, keepdim=False, name=None): if _non_static_mode(): if axis is not None: if _in_legacy_dygraph(): - return _legacy_C_ops.kthvalue(x, 'k', k, "axis", axis, - "keepdim", keepdim) + return _legacy_C_ops.kthvalue( + x, 'k', k, "axis", axis, "keepdim", keepdim + ) return _C_ops.kthvalue(x, k, axis, keepdim) else: if _in_legacy_dygraph(): @@ -1093,12 +1147,11 @@ def kthvalue(x, k, axis=None, keepdim=False, name=None): values = helper.create_variable_for_type_inference(dtype=x.dtype) indices = helper.create_variable_for_type_inference(dtype="int64") - helper.append_op(type="kthvalue", - inputs=inputs, - outputs={ - "Out": [values], - "Indices": [indices] - }, - attrs=attrs) + helper.append_op( + type="kthvalue", + inputs=inputs, + outputs={"Out": [values], "Indices": [indices]}, + attrs=attrs, + ) indices.stop_gradient = True return values, indices diff --git a/python/paddle/tensor/stat.py b/python/paddle/tensor/stat.py index bbe98c6ab7e8972ad7a38de1e29778c582ce299d..371e3fafd057e5beda0fecd824ab974da2bbb3db 100644 --- a/python/paddle/tensor/stat.py +++ b/python/paddle/tensor/stat.py @@ -85,9 +85,11 @@ def mean(x, axis=None, keepdim=False, name=None): else: if isinstance(axis, int): axis = [axis] - reduce_all = True if axis is None \ - or len(axis)==0 \ - or len(axis) == len(x.shape) else False + reduce_all = ( + True + if axis is None or len(axis) == 0 or len(axis) == len(x.shape) + else False + ) if axis is None or len(axis) == 0: axis = [0] @@ -96,18 +98,27 @@ def mean(x, axis=None, keepdim=False, name=None): axis = list(range(len(x.shape))) return _C_ops.mean(x, axis, keepdim) if _in_legacy_dygraph(): - return _legacy_C_ops.reduce_mean(x, 'dim', axis, 'keep_dim', keepdim, - 'reduce_all', reduce_all) - - check_variable_and_dtype(x, 'x/input', - ['uint16', 'float16', 'float32', 'float64'], - 'mean/reduce_mean') - check_type(axis, 'axis/dim', (int, list, tuple, Variable), - 'mean/reduce_mean') + return _legacy_C_ops.reduce_mean( + x, 'dim', axis, 'keep_dim', keepdim, 'reduce_all', reduce_all + ) + + check_variable_and_dtype( + x, + 'x/input', + ['uint16', 'float16', 'float32', 'float64'], + 'mean/reduce_mean', + ) + check_type( + axis, 'axis/dim', (int, list, tuple, Variable), 'mean/reduce_mean' + ) if isinstance(axis, (list, tuple)): for item in axis: - check_type(item, 'elements of axis/dim', (int, Variable), - 'mean/reduce_mean') + check_type( + item, + 'elements of axis/dim', + (int, Variable), + 'mean/reduce_mean', + ) helper = LayerHelper('mean', **locals()) @@ -115,10 +126,9 @@ def mean(x, axis=None, keepdim=False, name=None): axis = utils._convert_to_tensor_list(axis) attrs = {'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all} out = helper.create_variable_for_type_inference(x.dtype) - helper.append_op(type='reduce_mean', - inputs={'X': x}, - outputs={'Out': out}, - attrs=attrs) + helper.append_op( + type='reduce_mean', inputs={'X': x}, outputs={'Out': out}, attrs=attrs + ) return out @@ -156,15 +166,16 @@ def var(x, axis=None, unbiased=True, keepdim=False, name=None): check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'var') u = mean(x, axis, True, name) - out = paddle.sum((x - u)**2, axis, keepdim=keepdim, name=name) + out = paddle.sum((x - u) ** 2, axis, keepdim=keepdim, name=name) dtype = x.dtype - n = paddle.cast(paddle.numel(x), paddle.int64) \ - / paddle.cast(paddle.numel(out), paddle.int64) + n = paddle.cast(paddle.numel(x), paddle.int64) / paddle.cast( + paddle.numel(out), paddle.int64 + ) n = n.astype(dtype) if unbiased: one_const = paddle.ones([1], x.dtype) - n = where(n > one_const, n - 1., one_const) + n = where(n > one_const, n - 1.0, one_const) out /= n return out @@ -252,7 +263,8 @@ def numel(x, name=None): raise TypeError("x must be a Tensor in numel") helper = LayerHelper('numel', **locals()) out = helper.create_variable_for_type_inference( - dtype=core.VarDesc.VarType.INT64) + dtype=core.VarDesc.VarType.INT64 + ) helper.append_op(type='size', inputs={'Input': x}, outputs={'Out': out}) return out @@ -320,8 +332,9 @@ def nanmedian(x, axis=None, keepdim=True, name=None): ) for i in range(len(axis)): - if not isinstance(axis[i], int) or not (axis[i] < dims - and axis[i] >= -dims): + if not isinstance(axis[i], int) or not ( + axis[i] < dims and axis[i] >= -dims + ): raise ValueError( "Axis should be None, int, or a list, element should in range [-rank(x), rank(x))." ) @@ -332,25 +345,25 @@ def nanmedian(x, axis=None, keepdim=True, name=None): raise ValueError("Axis has duplicated elements.") if _in_legacy_dygraph(): - median_index, out = _legacy_C_ops.nanmedian(x, 'axis', axis, 'keepdim', - keepdim) + median_index, out = _legacy_C_ops.nanmedian( + x, 'axis', axis, 'keepdim', keepdim + ) return out check_variable_and_dtype( - x, 'X', ['int32', 'int64', 'float16', 'float32', 'float64'], - 'nanmedian') + x, 'X', ['int32', 'int64', 'float16', 'float32', 'float64'], 'nanmedian' + ) helper = LayerHelper('nanmedian', **locals()) attrs = {'axis': axis, 'keepdim': keepdim} out = helper.create_variable_for_type_inference(x.dtype) medians = helper.create_variable_for_type_inference(x.dtype) - helper.append_op(type='nanmedian', - inputs={'X': x}, - outputs={ - 'Out': out, - 'MedianIndex': medians - }, - attrs=attrs) + helper.append_op( + type='nanmedian', + inputs={'X': x}, + outputs={'Out': out, 'MedianIndex': medians}, + attrs=attrs, + ) return out @@ -423,21 +436,22 @@ def median(x, axis=None, keepdim=False, name=None): dtype = 'float64' if x.dtype == core.VarDesc.VarType.FP64 else 'float32' if sz & 1 == 0: out_tensor = paddle.slice( - tensor_topk, axes=[axis], starts=[kth - 1], - ends=[kth]) + paddle.slice( - tensor_topk, axes=[axis], starts=[kth], ends=[kth + 1]) + tensor_topk, axes=[axis], starts=[kth - 1], ends=[kth] + ) + paddle.slice(tensor_topk, axes=[axis], starts=[kth], ends=[kth + 1]) out_tensor = paddle.cast(out_tensor, dtype=dtype) / 2 else: - out_tensor = paddle.cast(paddle.slice(tensor_topk, - axes=[axis], - starts=[kth], - ends=[kth + 1]), - dtype=dtype) + out_tensor = paddle.cast( + paddle.slice( + tensor_topk, axes=[axis], starts=[kth], ends=[kth + 1] + ), + dtype=dtype, + ) out_tensor = out_tensor + paddle.sum( - paddle.cast(paddle.isnan(x), dtype=dtype) * x, axis=axis, keepdim=True) + paddle.cast(paddle.isnan(x), dtype=dtype) * x, axis=axis, keepdim=True + ) if not keepdim or is_flatten: if not is_flatten: - newshape = x.shape[:axis] + x.shape[axis + 1:] + newshape = x.shape[:axis] + x.shape[axis + 1 :] elif not keepdim: newshape = [1] else: @@ -501,7 +515,8 @@ def _compute_quantile(x, q, axis=None, keepdim=False, ignore_nan=False): axis_src, axis_dst = [], [] for axis_single in axis: if not isinstance(axis_single, int) or not ( - axis_single < dims and axis_single >= -dims): + axis_single < dims and axis_single >= -dims + ): raise ValueError( "Axis should be None, int, or a list, element should in range [-rank(x), rank(x))." ) @@ -523,9 +538,9 @@ def _compute_quantile(x, q, axis=None, keepdim=False, ignore_nan=False): out_shape[axis] = 1 mask = x.isnan() - valid_counts = mask.logical_not().sum(axis=axis, - keepdim=True, - dtype='float64') + valid_counts = mask.logical_not().sum( + axis=axis, keepdim=True, dtype='float64' + ) indices = [] @@ -552,15 +567,18 @@ def _compute_quantile(x, q, axis=None, keepdim=False, ignore_nan=False): for index in indices: indices_below = paddle.floor(index).astype(paddle.int32) indices_upper = paddle.ceil(index).astype(paddle.int32) - tensor_upper = paddle.take_along_axis(sorted_tensor, - indices_upper, - axis=axis) - tensor_below = paddle.take_along_axis(sorted_tensor, - indices_below, - axis=axis) - weights = (index - indices_below.astype('float64')) - out = paddle.lerp(tensor_below.astype('float64'), - tensor_upper.astype('float64'), weights) + tensor_upper = paddle.take_along_axis( + sorted_tensor, indices_upper, axis=axis + ) + tensor_below = paddle.take_along_axis( + sorted_tensor, indices_below, axis=axis + ) + weights = index - indices_below.astype('float64') + out = paddle.lerp( + tensor_below.astype('float64'), + tensor_upper.astype('float64'), + weights, + ) if not keepdim: out = paddle.squeeze(out, axis=axis) else: diff --git a/python/paddle/tensor/to_string.py b/python/paddle/tensor/to_string.py index 76f5c603f0f4b41e10f6db344153f0b47b817f82..4739628f7176b4023abb618759dce57d8d929f98 100644 --- a/python/paddle/tensor/to_string.py +++ b/python/paddle/tensor/to_string.py @@ -30,11 +30,13 @@ class PrintOptions(object): DEFAULT_PRINT_OPTIONS = PrintOptions() -def set_printoptions(precision=None, - threshold=None, - edgeitems=None, - sci_mode=None, - linewidth=None): +def set_printoptions( + precision=None, + threshold=None, + edgeitems=None, + sci_mode=None, + linewidth=None, +): """Set the printing options for Tensor. Args: @@ -105,29 +107,35 @@ def _to_summary(var): return var elif len(var.shape) == 1: if var.shape[0] > 2 * edgeitems: - return np.concatenate([var[:edgeitems], var[(-1 * edgeitems):]]) + return np.concatenate([var[:edgeitems], var[(-1 * edgeitems) :]]) else: return var else: # recursively handle all dimensions if var.shape[0] > 2 * edgeitems: begin = [x for x in var[:edgeitems]] - end = [x for x in var[(-1 * edgeitems):]] + end = [x for x in var[(-1 * edgeitems) :]] return np.stack([_to_summary(x) for x in (begin + end)]) else: return np.stack([_to_summary(x) for x in var]) def _format_item(np_var, max_width=0, signed=False): - if np_var.dtype == np.float32 or np_var.dtype == np.float64 or np_var.dtype == np.float16: + if ( + np_var.dtype == np.float32 + or np_var.dtype == np.float64 + or np_var.dtype == np.float16 + ): if DEFAULT_PRINT_OPTIONS.sci_mode: item_str = '{{:.{}e}}'.format( - DEFAULT_PRINT_OPTIONS.precision).format(np_var) + DEFAULT_PRINT_OPTIONS.precision + ).format(np_var) elif np.ceil(np_var) == np_var: item_str = '{:.0f}.'.format(np_var) else: item_str = '{{:.{}f}}'.format( - DEFAULT_PRINT_OPTIONS.precision).format(np_var) + DEFAULT_PRINT_OPTIONS.precision + ).format(np_var) else: item_str = '{}'.format(np_var) @@ -180,42 +188,56 @@ def _format_tensor(var, summary, indent=0, max_width=0, signed=False): items_per_line = max(1, items_per_line) if summary and var.shape[0] > 2 * edgeitems: - items = [ - _format_item(item, max_width, signed) - for item in list(var)[:edgeitems] - ] + ['...'] + [ - _format_item(item, max_width, signed) - for item in list(var)[(-1 * edgeitems):] - ] + items = ( + [ + _format_item(item, max_width, signed) + for item in list(var)[:edgeitems] + ] + + ['...'] + + [ + _format_item(item, max_width, signed) + for item in list(var)[(-1 * edgeitems) :] + ] + ) else: items = [ _format_item(item, max_width, signed) for item in list(var) ] lines = [ - items[i:i + items_per_line] + items[i : i + items_per_line] for i in range(0, len(items), items_per_line) ] s = (',\n' + ' ' * (indent + 1)).join( - [', '.join(line) for line in lines]) + [', '.join(line) for line in lines] + ) return '[' + s + ']' else: # recursively handle all dimensions if summary and var.shape[0] > 2 * edgeitems: - vars = [ - _format_tensor(x, summary, indent + 1, max_width, signed) - for x in var[:edgeitems] - ] + ['...'] + [ - _format_tensor(x, summary, indent + 1, max_width, signed) - for x in var[(-1 * edgeitems):] - ] + vars = ( + [ + _format_tensor(x, summary, indent + 1, max_width, signed) + for x in var[:edgeitems] + ] + + ['...'] + + [ + _format_tensor(x, summary, indent + 1, max_width, signed) + for x in var[(-1 * edgeitems) :] + ] + ) else: vars = [ _format_tensor(x, summary, indent + 1, max_width, signed) for x in var ] - return '[' + (',' + '\n' * (len(var.shape) - 1) + ' ' * - (indent + 1)).join(vars) + ']' + return ( + '[' + + (',' + '\n' * (len(var.shape) - 1) + ' ' * (indent + 1)).join( + vars + ) + + ']' + ) def to_string(var, prefix='Tensor'): @@ -248,19 +270,19 @@ def to_string(var, prefix='Tensor'): max_width, signed = _get_max_width(_to_summary(np_var)) - data = _format_tensor(np_var, - summary, - indent=indent, - max_width=max_width, - signed=signed) + data = _format_tensor( + np_var, summary, indent=indent, max_width=max_width, signed=signed + ) - return _template.format(prefix=prefix, - shape=var.shape, - dtype=dtype, - place=var._place_str, - stop_gradient=var.stop_gradient, - indent=' ' * indent, - data=data) + return _template.format( + prefix=prefix, + shape=var.shape, + dtype=dtype, + place=var._place_str, + stop_gradient=var.stop_gradient, + indent=' ' * indent, + data=data, + ) def _format_dense_tensor(tensor, indent): @@ -282,11 +304,9 @@ def _format_dense_tensor(tensor, indent): max_width, signed = _get_max_width(_to_summary(np_tensor)) - data = _format_tensor(np_tensor, - sumary, - indent=indent, - max_width=max_width, - signed=signed) + data = _format_tensor( + np_tensor, sumary, indent=indent, max_width=max_width, signed=signed + ) return data @@ -297,38 +317,47 @@ def sparse_tensor_to_string(tensor, prefix='Tensor'): indices_tensor = tensor.indices() values_tensor = tensor.values() indices_data = 'indices=' + _format_dense_tensor( - indices_tensor, indent + len('indices=')) - values_data = 'values=' + _format_dense_tensor(values_tensor, - indent + len('values=')) - return _template.format(prefix=prefix, - shape=tensor.shape, - dtype=tensor.dtype, - place=tensor._place_str, - stop_gradient=tensor.stop_gradient, - indent=' ' * indent, - indices=indices_data, - values=values_data) + indices_tensor, indent + len('indices=') + ) + values_data = 'values=' + _format_dense_tensor( + values_tensor, indent + len('values=') + ) + return _template.format( + prefix=prefix, + shape=tensor.shape, + dtype=tensor.dtype, + place=tensor._place_str, + stop_gradient=tensor.stop_gradient, + indent=' ' * indent, + indices=indices_data, + values=values_data, + ) else: _template = "{prefix}(shape={shape}, dtype={dtype}, place={place}, stop_gradient={stop_gradient}, \n{indent}{crows}, \n{indent}{cols}, \n{indent}{values})" crows_tensor = tensor.crows() cols_tensor = tensor.cols() elements_tensor = tensor.values() - crows_data = 'crows=' + _format_dense_tensor(crows_tensor, - indent + len('crows=')) - cols_data = 'cols=' + _format_dense_tensor(cols_tensor, - indent + len('cols=')) - values_data = 'values=' + _format_dense_tensor(elements_tensor, - indent + len('values=')) - - return _template.format(prefix=prefix, - shape=tensor.shape, - dtype=tensor.dtype, - place=tensor._place_str, - stop_gradient=tensor.stop_gradient, - indent=' ' * indent, - crows=crows_data, - cols=cols_data, - values=values_data) + crows_data = 'crows=' + _format_dense_tensor( + crows_tensor, indent + len('crows=') + ) + cols_data = 'cols=' + _format_dense_tensor( + cols_tensor, indent + len('cols=') + ) + values_data = 'values=' + _format_dense_tensor( + elements_tensor, indent + len('values=') + ) + + return _template.format( + prefix=prefix, + shape=tensor.shape, + dtype=tensor.dtype, + place=tensor._place_str, + stop_gradient=tensor.stop_gradient, + indent=' ' * indent, + crows=crows_data, + cols=cols_data, + values=values_data, + ) def tensor_to_string(tensor, prefix='Tensor'): @@ -347,10 +376,12 @@ def tensor_to_string(tensor, prefix='Tensor'): return "Tensor(Not initialized)" else: data = _format_dense_tensor(tensor, indent) - return _template.format(prefix=prefix, - shape=tensor.shape, - dtype=dtype, - place=tensor._place_str, - stop_gradient=tensor.stop_gradient, - indent=' ' * indent, - data=data) + return _template.format( + prefix=prefix, + shape=tensor.shape, + dtype=dtype, + place=tensor._place_str, + stop_gradient=tensor.stop_gradient, + indent=' ' * indent, + data=data, + ) diff --git a/python/paddle/tests/dist_hapi_mnist_dynamic.py b/python/paddle/tests/dist_hapi_mnist_dynamic.py index 71c63eb62b4fe2387b2ae918524da6cfdf90d2f6..50e77976da4ae9994ca9b9344d9e3eafd4baaebb 100644 --- a/python/paddle/tests/dist_hapi_mnist_dynamic.py +++ b/python/paddle/tests/dist_hapi_mnist_dynamic.py @@ -28,7 +28,6 @@ from paddle.vision.datasets import MNIST class MnistDataset(MNIST): - def __init__(self, mode, return_label=True): super(MnistDataset, self).__init__(mode=mode) self.return_label = return_label @@ -37,7 +36,7 @@ class MnistDataset(MNIST): img = np.reshape(self.images[idx], [1, 28, 28]) if self.return_label: return img, np.array(self.labels[idx]).astype('int64') - return img, + return (img,) def __len__(self): return len(self.images) @@ -52,10 +51,10 @@ def compute_accuracy(pred, gt): return np.sum(correct) / correct.shape[0] -@unittest.skipIf(not fluid.is_compiled_with_cuda(), - 'CPU testing is not supported') +@unittest.skipIf( + not fluid.is_compiled_with_cuda(), 'CPU testing is not supported' +) class TestDistTraning(unittest.TestCase): - def test_dynamic_multiple_gpus(self): device = set_device('gpu') @@ -66,9 +65,9 @@ class TestDistTraning(unittest.TestCase): labels = [Input([None, 1], 'int64', 'label')] model = Model(LeNet(), inputs, labels) - optim = fluid.optimizer.Momentum(learning_rate=0.001, - momentum=.9, - parameter_list=model.parameters()) + optim = fluid.optimizer.Momentum( + learning_rate=0.001, momentum=0.9, parameter_list=model.parameters() + ) model.prepare(optim, CrossEntropyLoss(), Accuracy()) train_dataset = MnistDataset(mode='train') @@ -76,17 +75,19 @@ class TestDistTraning(unittest.TestCase): test_dataset = MnistDataset(mode='test', return_label=False) cbk = paddle.callbacks.ProgBarLogger(50) - model.fit(train_dataset, - val_dataset, - epochs=2, - batch_size=batch_size, - callbacks=cbk) + model.fit( + train_dataset, + val_dataset, + epochs=2, + batch_size=batch_size, + callbacks=cbk, + ) eval_result = model.evaluate(val_dataset, batch_size=batch_size) - output = model.predict(test_dataset, - batch_size=batch_size, - stack_outputs=True) + output = model.predict( + test_dataset, batch_size=batch_size, stack_outputs=True + ) np.testing.assert_equal(output[0].shape[0], len(test_dataset)) diff --git a/python/paddle/tests/dist_hapi_mnist_static.py b/python/paddle/tests/dist_hapi_mnist_static.py index cc3b65725013c77dd75f2febcd9244276fcbfa67..ce7e5c18a6a61a262bfe344c4af6271f90beb3b5 100644 --- a/python/paddle/tests/dist_hapi_mnist_static.py +++ b/python/paddle/tests/dist_hapi_mnist_static.py @@ -28,7 +28,6 @@ from paddle.vision.datasets import MNIST class MnistDataset(MNIST): - def __init__(self, mode, return_label=True): super(MnistDataset, self).__init__(mode=mode) self.return_label = return_label @@ -37,7 +36,7 @@ class MnistDataset(MNIST): img = np.reshape(self.images[idx], [1, 28, 28]) if self.return_label: return img, np.array(self.labels[idx]).astype('int64') - return img, + return (img,) def __len__(self): return len(self.images) @@ -52,10 +51,10 @@ def compute_accuracy(pred, gt): return np.sum(correct) / correct.shape[0] -@unittest.skipIf(not fluid.is_compiled_with_cuda(), - 'CPU testing is not supported') +@unittest.skipIf( + not fluid.is_compiled_with_cuda(), 'CPU testing is not supported' +) class TestDistTraning(unittest.TestCase): - def test_static_multiple_gpus(self): paddle.enable_static() device = set_device('gpu') @@ -67,9 +66,9 @@ class TestDistTraning(unittest.TestCase): labels = [Input([None, 1], 'int64', 'label')] model = Model(LeNet(), inputs, labels) - optim = fluid.optimizer.Momentum(learning_rate=0.001, - momentum=.9, - parameter_list=model.parameters()) + optim = fluid.optimizer.Momentum( + learning_rate=0.001, momentum=0.9, parameter_list=model.parameters() + ) model.prepare(optim, CrossEntropyLoss(), Accuracy()) train_dataset = MnistDataset(mode='train') @@ -77,17 +76,19 @@ class TestDistTraning(unittest.TestCase): test_dataset = MnistDataset(mode='test', return_label=False) cbk = paddle.callbacks.ProgBarLogger(50) - model.fit(train_dataset, - val_dataset, - epochs=2, - batch_size=batch_size, - callbacks=cbk) + model.fit( + train_dataset, + val_dataset, + epochs=2, + batch_size=batch_size, + callbacks=cbk, + ) eval_result = model.evaluate(val_dataset, batch_size=batch_size) - output = model.predict(test_dataset, - batch_size=batch_size, - stack_outputs=True) + output = model.predict( + test_dataset, batch_size=batch_size, stack_outputs=True + ) np.testing.assert_equal(output[0].shape[0], len(test_dataset)) diff --git a/python/paddle/tests/dist_hapi_pure_fp16_static.py b/python/paddle/tests/dist_hapi_pure_fp16_static.py index 0d7fa491fc1a6accdda739a405338778de448339..6be15ec44be1f80f01f40b1a733e7007c5db2901 100644 --- a/python/paddle/tests/dist_hapi_pure_fp16_static.py +++ b/python/paddle/tests/dist_hapi_pure_fp16_static.py @@ -25,10 +25,10 @@ from paddle.nn.layer.loss import CrossEntropyLoss from paddle.vision.models import LeNet -@unittest.skipIf(not fluid.is_compiled_with_cuda(), - 'CPU testing is not supported') +@unittest.skipIf( + not fluid.is_compiled_with_cuda(), 'CPU testing is not supported' +) class TestDistTraningWithPureFP16(unittest.TestCase): - def test_amp_training_purefp16(self): if not fluid.is_compiled_with_cuda(): self.skipTest('module not tested when ONLY_CPU compling') @@ -42,13 +42,17 @@ class TestDistTraningWithPureFP16(unittest.TestCase): inputs = InputSpec([None, 1, 28, 28], "float32", 'x') labels = InputSpec([None, 1], "int64", "y") model = Model(net, inputs, labels) - optim = paddle.optimizer.Adam(learning_rate=0.001, - parameters=model.parameters(), - multi_precision=True) + optim = paddle.optimizer.Adam( + learning_rate=0.001, + parameters=model.parameters(), + multi_precision=True, + ) amp_configs = {"level": amp_level, "use_fp16_guard": False} - model.prepare(optimizer=optim, - loss=CrossEntropyLoss(reduction="sum"), - amp_configs=amp_configs) + model.prepare( + optimizer=optim, + loss=CrossEntropyLoss(reduction="sum"), + amp_configs=amp_configs, + ) model.train_batch([data], [label]) diff --git a/python/paddle/tests/hapi_mnist_bf16_static.py b/python/paddle/tests/hapi_mnist_bf16_static.py index 55db397be493be4ecde78cd2ac6f1dc0b752c6a1..489215301ed5969104cad60a69de3bd399c59ab3 100644 --- a/python/paddle/tests/hapi_mnist_bf16_static.py +++ b/python/paddle/tests/hapi_mnist_bf16_static.py @@ -37,17 +37,18 @@ set_device('cpu') def parse_args(): parser = argparse.ArgumentParser("Lenet BF16 train static script") - parser.add_argument('-bf16', - '--bf16', - type=ast.literal_eval, - default=False, - help="whether use bf16") + parser.add_argument( + '-bf16', + '--bf16', + type=ast.literal_eval, + default=False, + help="whether use bf16", + ) args = parser.parse_args() return args class MnistDataset(MNIST): - def __init__(self, mode, return_label=True): super(MnistDataset, self).__init__(mode=mode) self.return_label = return_label @@ -56,7 +57,7 @@ class MnistDataset(MNIST): img = np.reshape(self.images[idx], [1, 28, 28]) if self.return_label: return img, np.array(self.labels[idx]).astype('int64') - return img, + return (img,) def __len__(self): return len(self.images) @@ -73,8 +74,12 @@ def compute_accuracy(pred, gt): def main(args): print('download training data and load training data') - train_dataset = MnistDataset(mode='train', ) - val_dataset = MnistDataset(mode='test', ) + train_dataset = MnistDataset( + mode='train', + ) + val_dataset = MnistDataset( + mode='test', + ) test_dataset = MnistDataset(mode='test', return_label=False) im_shape = (-1, 1, 28, 28) @@ -88,10 +93,20 @@ def main(args): if args.bf16: optim = amp.bf16.decorate_bf16( optim, - amp_lists=amp.bf16.AutoMixedPrecisionListsBF16(custom_bf16_list={ - 'matmul_v2', 'pool2d', 'relu', 'scale', 'elementwise_add', - 'reshape2', 'slice', 'reduce_mean', 'conv2d' - }, )) + amp_lists=amp.bf16.AutoMixedPrecisionListsBF16( + custom_bf16_list={ + 'matmul_v2', + 'pool2d', + 'relu', + 'scale', + 'elementwise_add', + 'reshape2', + 'slice', + 'reduce_mean', + 'conv2d', + }, + ), + ) # Configuration model model.prepare(optim, paddle.nn.CrossEntropyLoss(), Accuracy()) @@ -103,9 +118,9 @@ def main(args): model.fit(train_dataset, epochs=2, batch_size=batch_size, verbose=1) eval_result = model.evaluate(val_dataset, batch_size=batch_size, verbose=1) - output = model.predict(test_dataset, - batch_size=batch_size, - stack_outputs=True) + output = model.predict( + test_dataset, batch_size=batch_size, stack_outputs=True + ) np.testing.assert_equal(output[0].shape[0], len(test_dataset)) diff --git a/python/paddle/tests/hubconf.py b/python/paddle/tests/hubconf.py index d730dcff51e244543fd08529f2eae5a5e007dca8..921948983311218573adcd508d94d1a9e8993971 100644 --- a/python/paddle/tests/hubconf.py +++ b/python/paddle/tests/hubconf.py @@ -19,5 +19,5 @@ from test_hapi_hub_model import MM as _MM def MM(out_channels=8, pretrained=False): '''This is a test demo for paddle hub - ''' + ''' # fmt: skip return _MM(out_channels) diff --git a/python/paddle/tests/test_async_read_write.py b/python/paddle/tests/test_async_read_write.py index a235b7864809a80eda8320299f7803a99a89a48d..682f72422d6068a60a9c3cb52543c5b46bf7729e 100644 --- a/python/paddle/tests/test_async_read_write.py +++ b/python/paddle/tests/test_async_read_write.py @@ -22,60 +22,78 @@ from paddle.fluid.framework import _test_eager_guard, _in_legacy_dygraph class TestAsyncRead(unittest.TestCase): - def func_setUp(self): - self.empty = paddle.to_tensor(np.array([], dtype="int64"), - place=paddle.CPUPlace()) + self.empty = paddle.to_tensor( + np.array([], dtype="int64"), place=paddle.CPUPlace() + ) data = np.random.randn(100, 50, 50).astype("float32") self.src = paddle.to_tensor(data, place=paddle.CUDAPinnedPlace()) self.dst = paddle.empty(shape=[100, 50, 50], dtype="float32") - self.index = paddle.to_tensor(np.array([1, 3, 5, 7, 9], - dtype="int64")).cpu() - self.buffer = paddle.empty(shape=[50, 50, 50], - dtype="float32").pin_memory() + self.index = paddle.to_tensor( + np.array([1, 3, 5, 7, 9], dtype="int64") + ).cpu() + self.buffer = paddle.empty( + shape=[50, 50, 50], dtype="float32" + ).pin_memory() self.stream = cuda.Stream() def func_test_async_read_empty_offset_and_count(self): with cuda.stream_guard(self.stream): if _in_legacy_dygraph(): - core.async_read(self.src, self.dst, self.index, self.buffer, - self.empty, self.empty) + core.async_read( + self.src, + self.dst, + self.index, + self.buffer, + self.empty, + self.empty, + ) else: - core.eager.async_read(self.src, self.dst, self.index, - self.buffer, self.empty, self.empty) + core.eager.async_read( + self.src, + self.dst, + self.index, + self.buffer, + self.empty, + self.empty, + ) array1 = paddle.gather(self.src, self.index) - array2 = self.dst[:len(self.index)] + array2 = self.dst[: len(self.index)] np.testing.assert_allclose(array1.numpy(), array2.numpy(), rtol=1e-05) def func_test_async_read_success(self): - offset = paddle.to_tensor(np.array([10, 20], dtype="int64"), - place=paddle.CPUPlace()) - count = paddle.to_tensor(np.array([5, 10], dtype="int64"), - place=paddle.CPUPlace()) + offset = paddle.to_tensor( + np.array([10, 20], dtype="int64"), place=paddle.CPUPlace() + ) + count = paddle.to_tensor( + np.array([5, 10], dtype="int64"), place=paddle.CPUPlace() + ) with cuda.stream_guard(self.stream): if _in_legacy_dygraph(): - core.async_read(self.src, self.dst, self.index, self.buffer, - offset, count) + core.async_read( + self.src, self.dst, self.index, self.buffer, offset, count + ) else: - core.eager.async_read(self.src, self.dst, self.index, - self.buffer, offset, count) + core.eager.async_read( + self.src, self.dst, self.index, self.buffer, offset, count + ) # index data index_array1 = paddle.gather(self.src, self.index) count_numel = paddle.sum(count).numpy()[0] - index_array2 = self.dst[count_numel:count_numel + len(self.index)] - np.testing.assert_allclose(index_array1.numpy(), - index_array2.numpy(), - rtol=1e-05) + index_array2 = self.dst[count_numel : count_numel + len(self.index)] + np.testing.assert_allclose( + index_array1.numpy(), index_array2.numpy(), rtol=1e-05 + ) # offset, count offset_a = paddle.gather(self.src, paddle.to_tensor(np.arange(10, 15))) offset_b = paddle.gather(self.src, paddle.to_tensor(np.arange(20, 30))) offset_array1 = paddle.concat([offset_a, offset_b], axis=0) offset_array2 = self.dst[:count_numel] - np.testing.assert_allclose(offset_array1.numpy(), - offset_array2.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + offset_array1.numpy(), offset_array2.numpy(), rtol=1e-05 + ) def func_test_async_read_only_1dim(self): src = paddle.rand([40], dtype="float32").pin_memory() @@ -83,13 +101,15 @@ class TestAsyncRead(unittest.TestCase): buffer_ = paddle.empty([20]).pin_memory() with cuda.stream_guard(self.stream): if _in_legacy_dygraph(): - core.async_read(src, dst, self.index, buffer_, self.empty, - self.empty) + core.async_read( + src, dst, self.index, buffer_, self.empty, self.empty + ) else: - core.eager.async_read(src, dst, self.index, buffer_, self.empty, - self.empty) + core.eager.async_read( + src, dst, self.index, buffer_, self.empty, self.empty + ) array1 = paddle.gather(src, self.index) - array2 = dst[:len(self.index)] + array2 = dst[: len(self.index)] np.testing.assert_allclose(array1.numpy(), array2.numpy(), rtol=1e-05) def test_main(self): @@ -109,18 +129,20 @@ class TestAsyncRead(unittest.TestCase): class TestAsyncWrite(unittest.TestCase): - def func_setUp(self): self.src = paddle.rand(shape=[100, 50, 50, 5], dtype="float32") - self.dst = paddle.empty(shape=[200, 50, 50, 5], - dtype="float32").pin_memory() + self.dst = paddle.empty( + shape=[200, 50, 50, 5], dtype="float32" + ).pin_memory() self.stream = cuda.Stream() def func_test_async_write_success(self): - offset = paddle.to_tensor(np.array([0, 60], dtype="int64"), - place=paddle.CPUPlace()) - count = paddle.to_tensor(np.array([40, 60], dtype="int64"), - place=paddle.CPUPlace()) + offset = paddle.to_tensor( + np.array([0, 60], dtype="int64"), place=paddle.CPUPlace() + ) + count = paddle.to_tensor( + np.array([40, 60], dtype="int64"), place=paddle.CPUPlace() + ) with cuda.stream_guard(self.stream): if _in_legacy_dygraph(): core.async_write(self.src, self.dst, offset, count) @@ -130,9 +152,9 @@ class TestAsyncWrite(unittest.TestCase): offset_a = paddle.gather(self.dst, paddle.to_tensor(np.arange(0, 40))) offset_b = paddle.gather(self.dst, paddle.to_tensor(np.arange(60, 120))) offset_array = paddle.concat([offset_a, offset_b], axis=0) - np.testing.assert_allclose(self.src.numpy(), - offset_array.numpy(), - rtol=1e-05) + np.testing.assert_allclose( + self.src.numpy(), offset_array.numpy(), rtol=1e-05 + ) def test_async_write_success(self): with _test_eager_guard(): diff --git a/python/paddle/tests/test_audio_backend.py b/python/paddle/tests/test_audio_backend.py index 79e793e2dc8653964d041672be01a4579d613bea..fb583d96e1f9b75fa375ada39c2e02abb9a5029d 100644 --- a/python/paddle/tests/test_audio_backend.py +++ b/python/paddle/tests/test_audio_backend.py @@ -20,12 +20,10 @@ import paddle.audio class TestAudioBackends(unittest.TestCase): - def setUp(self): self.initParmas() def initParmas(self): - def get_wav_data(dtype: str, num_channels: int, num_frames: int): dtype_ = getattr(paddle, dtype) base = paddle.linspace(-1.0, 1.0, num_frames, dtype=dtype_) * 0.1 @@ -37,19 +35,21 @@ class TestAudioBackends(unittest.TestCase): self.sr = 16000 self.dtype = "float32" self.window_size = 1024 - waveform_tensor = get_wav_data(self.dtype, - self.num_channels, - num_frames=self.duration * self.sr) + waveform_tensor = get_wav_data( + self.dtype, self.num_channels, num_frames=self.duration * self.sr + ) # shape (1, 8000) self.waveform = waveform_tensor.numpy() def test_backend(self): base_dir = os.getcwd() wave_wav_path = os.path.join(base_dir, "wave_test.wav") - paddle.audio.save(wave_wav_path, - paddle.to_tensor(self.waveform), - self.sr, - channels_first=True) + paddle.audio.save( + wave_wav_path, + paddle.to_tensor(self.waveform), + self.sr, + channels_first=True, + ) # test backends(wave)(wave_backend) info wav_info = paddle.audio.info(wave_wav_path) @@ -74,9 +74,9 @@ class TestAudioBackends(unittest.TestCase): np.testing.assert_array_almost_equal(wav_data, waveform) with open(wave_wav_path, 'rb') as file_: - wav_data, sr = paddle.audio.load(file_, - normalize=False, - num_frames=10000) + wav_data, sr = paddle.audio.load( + file_, normalize=False, num_frames=10000 + ) with soundfile.SoundFile(wave_wav_path, "r") as file_: dtype = "int16" frames = file_._prepare_read(0, None, -1) @@ -101,6 +101,7 @@ class TestAudioBackends(unittest.TestCase): try: import paddleaudio + backends = paddle.audio.backends.list_available_backends() for backend in backends: self.assertTrue(backend in ["wave_backend", "soundfile"]) @@ -119,18 +120,22 @@ class TestAudioBackends(unittest.TestCase): pass try: - paddle.audio.save(wave_wav_path, - paddle.to_tensor(self.waveform), - self.sr, - bits_per_sample=24, - channels_first=True) + paddle.audio.save( + wave_wav_path, + paddle.to_tensor(self.waveform), + self.sr, + bits_per_sample=24, + channels_first=True, + ) except ValueError: pass try: - paddle.audio.save(wave_wav_path, - paddle.to_tensor(self.waveform).unsqueeze(0), - self.sr) + paddle.audio.save( + wave_wav_path, + paddle.to_tensor(self.waveform).unsqueeze(0), + self.sr, + ) except AssertionError: pass diff --git a/python/paddle/tests/test_audio_datasets.py b/python/paddle/tests/test_audio_datasets.py index 59ba1d543bda6901dd9f05fa21d5a9b5889934ce..4db1730ae41891c526c0105dda40cd208a8b017c 100644 --- a/python/paddle/tests/test_audio_datasets.py +++ b/python/paddle/tests/test_audio_datasets.py @@ -24,7 +24,6 @@ def parameterize(*params): class TestAudioDatasets(unittest.TestCase): - @parameterize(["dev", "train"], [40, 64]) def test_tess_dataset(self, mode: str, params: int): """ @@ -34,36 +33,34 @@ class TestAudioDatasets(unittest.TestCase): https://doi.org/10.5683/SP2/E8H2MF """ archive = { - 'url': - 'https://bj.bcebos.com/paddleaudio/datasets/TESS_Toronto_emotional_speech_set_lite.zip', + 'url': 'https://bj.bcebos.com/paddleaudio/datasets/TESS_Toronto_emotional_speech_set_lite.zip', 'md5': '9ffb5e3adf28d4d6b787fa94bd59b975', } # small part of TESS dataset for test. - tess_dataset = paddle.audio.datasets.TESS(mode=mode, - feat_type='mfcc', - n_mfcc=params, - archive=archive) + tess_dataset = paddle.audio.datasets.TESS( + mode=mode, feat_type='mfcc', n_mfcc=params, archive=archive + ) idx = np.random.randint(0, 30) elem = tess_dataset[idx] self.assertTrue(elem[0].shape[0] == params) self.assertTrue(0 <= elem[1] <= 6) - tess_dataset = paddle.audio.datasets.TESS(mode=mode, - feat_type='spectrogram', - n_fft=params) + tess_dataset = paddle.audio.datasets.TESS( + mode=mode, feat_type='spectrogram', n_fft=params + ) elem = tess_dataset[idx] self.assertTrue(elem[0].shape[0] == (params // 2 + 1)) self.assertTrue(0 <= elem[1] <= 6) - tess_dataset = paddle.audio.datasets.TESS(mode="dev", - feat_type='logmelspectrogram', - n_mels=params) + tess_dataset = paddle.audio.datasets.TESS( + mode="dev", feat_type='logmelspectrogram', n_mels=params + ) elem = tess_dataset[idx] self.assertTrue(elem[0].shape[0] == params) self.assertTrue(0 <= elem[1] <= 6) - tess_dataset = paddle.audio.datasets.TESS(mode="dev", - feat_type='melspectrogram', - n_mels=params) + tess_dataset = paddle.audio.datasets.TESS( + mode="dev", feat_type='melspectrogram', n_mels=params + ) elem = tess_dataset[idx] self.assertTrue(elem[0].shape[0] == params) self.assertTrue(0 <= elem[1] <= 6) @@ -77,43 +74,42 @@ class TestAudioDatasets(unittest.TestCase): http://dx.doi.org/10.1145/2733373.2806390 """ archive = { - 'url': - 'https://bj.bcebos.com/paddleaudio/datasets/ESC-50-master-lite.zip', + 'url': 'https://bj.bcebos.com/paddleaudio/datasets/ESC-50-master-lite.zip', 'md5': '1e9ba53265143df5b2804a743f2d1956', } # small part of ESC50 dataset for test. - esc50_dataset = paddle.audio.datasets.ESC50(mode=mode, - feat_type='raw', - archive=archive) + esc50_dataset = paddle.audio.datasets.ESC50( + mode=mode, feat_type='raw', archive=archive + ) idx = np.random.randint(0, 6) elem = esc50_dataset[idx] self.assertTrue(elem[0].shape[0] == 220500) self.assertTrue(0 <= elem[1] <= 2) - esc50_dataset = paddle.audio.datasets.ESC50(mode=mode, - feat_type='mfcc', - n_mfcc=params, - archive=archive) + esc50_dataset = paddle.audio.datasets.ESC50( + mode=mode, feat_type='mfcc', n_mfcc=params, archive=archive + ) idx = np.random.randint(0, 6) elem = esc50_dataset[idx] self.assertTrue(elem[0].shape[0] == params) self.assertTrue(0 <= elem[1] <= 2) - esc50_dataset = paddle.audio.datasets.ESC50(mode=mode, - feat_type='spectrogram', - n_fft=params) + esc50_dataset = paddle.audio.datasets.ESC50( + mode=mode, feat_type='spectrogram', n_fft=params + ) elem = esc50_dataset[idx] self.assertTrue(elem[0].shape[0] == (params // 2 + 1)) self.assertTrue(0 <= elem[1] <= 2) esc50_dataset = paddle.audio.datasets.ESC50( - mode=mode, feat_type='logmelspectrogram', n_mels=params) + mode=mode, feat_type='logmelspectrogram', n_mels=params + ) elem = esc50_dataset[idx] self.assertTrue(elem[0].shape[0] == params) self.assertTrue(0 <= elem[1] <= 2) - esc50_dataset = paddle.audio.datasets.ESC50(mode=mode, - feat_type='melspectrogram', - n_mels=params) + esc50_dataset = paddle.audio.datasets.ESC50( + mode=mode, feat_type='melspectrogram', n_mels=params + ) elem = esc50_dataset[idx] self.assertTrue(elem[0].shape[0] == params) self.assertTrue(0 <= elem[1] <= 2) diff --git a/python/paddle/tests/test_audio_functions.py b/python/paddle/tests/test_audio_functions.py index 5766299f24307ab3bb1603a5d9f7ecfbf45bf8bb..5542b4483b62c79ff59b08cdee5a48ad60d2d33a 100644 --- a/python/paddle/tests/test_audio_functions.py +++ b/python/paddle/tests/test_audio_functions.py @@ -28,12 +28,10 @@ def parameterize(*params): class TestAudioFuncitons(unittest.TestCase): - def setUp(self): self.initParmas() def initParmas(self): - def get_wav_data(dtype: str, num_channels: int, num_frames: int): dtype_ = getattr(paddle, dtype) base = paddle.linspace(-1.0, 1.0, num_frames, dtype=dtype_) * 0.1 @@ -53,49 +51,56 @@ class TestAudioFuncitons(unittest.TestCase): self.sr = 16000 self.dtype = "float32" self.window_size = 1024 - waveform_tensor = get_wav_data(self.dtype, - self.num_channels, - num_frames=self.duration * self.sr) + waveform_tensor = get_wav_data( + self.dtype, self.num_channels, num_frames=self.duration * self.sr + ) self.waveform = waveform_tensor.numpy() @parameterize([1.0, 3.0, 9.0, 25.0], [True, False]) def test_audio_function(self, val: float, htk_flag: bool): mel_paddle = paddle.audio.functional.hz_to_mel(val, htk_flag) mel_paddle_tensor = paddle.audio.functional.hz_to_mel( - paddle.to_tensor(val), htk_flag) + paddle.to_tensor(val), htk_flag + ) mel_librosa = librosa.hz_to_mel(val, htk_flag) np.testing.assert_almost_equal(mel_paddle, mel_librosa, decimal=5) - np.testing.assert_almost_equal(mel_paddle_tensor.numpy(), - mel_librosa, - decimal=4) + np.testing.assert_almost_equal( + mel_paddle_tensor.numpy(), mel_librosa, decimal=4 + ) hz_paddle = paddle.audio.functional.mel_to_hz(val, htk_flag) hz_paddle_tensor = paddle.audio.functional.mel_to_hz( - paddle.to_tensor(val), htk_flag) + paddle.to_tensor(val), htk_flag + ) hz_librosa = librosa.mel_to_hz(val, htk_flag) np.testing.assert_almost_equal(hz_paddle, hz_librosa, decimal=4) - np.testing.assert_almost_equal(hz_paddle_tensor.numpy(), - hz_librosa, - decimal=4) + np.testing.assert_almost_equal( + hz_paddle_tensor.numpy(), hz_librosa, decimal=4 + ) decibel_paddle = paddle.audio.functional.power_to_db( - paddle.to_tensor(val)) + paddle.to_tensor(val) + ) decibel_librosa = librosa.power_to_db(val) - np.testing.assert_almost_equal(decibel_paddle.numpy(), - decibel_paddle, - decimal=5) - - @parameterize([64, 128, 256], [0.0, 0.5, 1.0], [10000, 11025], - [False, True]) - def test_audio_function_mel(self, n_mels: int, f_min: float, f_max: float, - htk_flag: bool): - librosa_mel_freq = librosa.mel_frequencies(n_mels, f_min, f_max, - htk_flag) + np.testing.assert_almost_equal( + decibel_paddle.numpy(), decibel_paddle, decimal=5 + ) + + @parameterize( + [64, 128, 256], [0.0, 0.5, 1.0], [10000, 11025], [False, True] + ) + def test_audio_function_mel( + self, n_mels: int, f_min: float, f_max: float, htk_flag: bool + ): + librosa_mel_freq = librosa.mel_frequencies( + n_mels, f_min, f_max, htk_flag + ) paddle_mel_freq = paddle.audio.functional.mel_frequencies( - n_mels, f_min, f_max, htk_flag, 'float64') - np.testing.assert_almost_equal(paddle_mel_freq, - librosa_mel_freq, - decimal=3) + n_mels, f_min, f_max, htk_flag, 'float64' + ) + np.testing.assert_almost_equal( + paddle_mel_freq, librosa_mel_freq, decimal=3 + ) @parameterize([8000, 16000], [64, 128, 256]) def test_audio_function_fft(self, sr: int, n_fft: int): @@ -106,60 +111,77 @@ class TestAudioFuncitons(unittest.TestCase): @parameterize([1.0, 3.0, 9.0]) def test_audio_function_exception(self, spect: float): try: - paddle.audio.functional.power_to_db(paddle.to_tensor([spect]), - amin=0) + paddle.audio.functional.power_to_db( + paddle.to_tensor([spect]), amin=0 + ) except Exception: pass try: - paddle.audio.functional.power_to_db(paddle.to_tensor([spect]), - ref_value=0) + paddle.audio.functional.power_to_db( + paddle.to_tensor([spect]), ref_value=0 + ) except Exception: pass try: - paddle.audio.functional.power_to_db(paddle.to_tensor([spect]), - top_db=-1) + paddle.audio.functional.power_to_db( + paddle.to_tensor([spect]), top_db=-1 + ) except Exception: pass - @parameterize([ - "hamming", "hann", "triang", "bohman", "blackman", "cosine", "tukey", - "taylor" - ], [1, 512]) + @parameterize( + [ + "hamming", + "hann", + "triang", + "bohman", + "blackman", + "cosine", + "tukey", + "taylor", + ], + [1, 512], + ) def test_window(self, window_type: str, n_fft: int): window_scipy = signal.get_window(window_type, n_fft) window_paddle = paddle.audio.functional.get_window(window_type, n_fft) - np.testing.assert_array_almost_equal(window_scipy, - window_paddle.numpy(), - decimal=5) + np.testing.assert_array_almost_equal( + window_scipy, window_paddle.numpy(), decimal=5 + ) @parameterize([1, 512]) def test_gaussian_window_and_exception(self, n_fft: int): window_scipy_gaussain = signal.windows.gaussian(n_fft, std=7) window_paddle_gaussian = paddle.audio.functional.get_window( - ('gaussian', 7), n_fft, False) - np.testing.assert_array_almost_equal(window_scipy_gaussain, - window_paddle_gaussian.numpy(), - decimal=5) + ('gaussian', 7), n_fft, False + ) + np.testing.assert_array_almost_equal( + window_scipy_gaussain, window_paddle_gaussian.numpy(), decimal=5 + ) window_scipy_general_gaussain = signal.windows.general_gaussian( - n_fft, 1, 7) + n_fft, 1, 7 + ) window_paddle_general_gaussian = paddle.audio.functional.get_window( - ('general_gaussian', 1, 7), n_fft, False) - np.testing.assert_array_almost_equal(window_scipy_gaussain, - window_paddle_gaussian.numpy(), - decimal=5) + ('general_gaussian', 1, 7), n_fft, False + ) + np.testing.assert_array_almost_equal( + window_scipy_gaussain, window_paddle_gaussian.numpy(), decimal=5 + ) window_scipy_exp = signal.windows.exponential(n_fft) window_paddle_exp = paddle.audio.functional.get_window( - ('exponential', None, 1), n_fft, False) - np.testing.assert_array_almost_equal(window_scipy_exp, - window_paddle_exp.numpy(), - decimal=5) + ('exponential', None, 1), n_fft, False + ) + np.testing.assert_array_almost_equal( + window_scipy_exp, window_paddle_exp.numpy(), decimal=5 + ) try: - window_paddle = paddle.audio.functional.get_window(("kaiser", 1.0), - self.n_fft) + window_paddle = paddle.audio.functional.get_window( + ("kaiser", 1.0), self.n_fft + ) except NotImplementedError: pass @@ -170,7 +192,8 @@ class TestAudioFuncitons(unittest.TestCase): try: window_paddle = paddle.audio.functional.get_window( - "fake_window", self.n_fft) + "fake_window", self.n_fft + ) except ValueError: pass @@ -181,7 +204,6 @@ class TestAudioFuncitons(unittest.TestCase): @parameterize([5, 13, 23], [257, 513, 1025]) def test_create_dct(self, n_mfcc: int, n_mels: int): - def dct(n_filters, n_input): basis = np.empty((n_filters, n_input)) basis[0, :] = 1.0 / np.sqrt(n_input) @@ -195,14 +217,17 @@ class TestAudioFuncitons(unittest.TestCase): paddle_dct = paddle.audio.functional.create_dct(n_mfcc, n_mels) np.testing.assert_array_almost_equal(librosa_dct, paddle_dct, decimal=5) - @parameterize([128, 256, 512], ["hamming", "hann", "triang", "bohman"], - [True, False]) - def test_stft_and_spect(self, n_fft: int, window_str: str, - center_flag: bool): + @parameterize( + [128, 256, 512], ["hamming", "hann", "triang", "bohman"], [True, False] + ) + def test_stft_and_spect( + self, n_fft: int, window_str: str, center_flag: bool + ): hop_length = int(n_fft / 4) if len(self.waveform.shape) == 2: # (C, T) self.waveform = self.waveform.squeeze( - 0) # 1D input for librosa.feature.melspectrogram + 0 + ) # 1D input for librosa.feature.melspectrogram feature_librosa = librosa.core.stft( y=self.waveform, n_fft=n_fft, @@ -214,9 +239,9 @@ class TestAudioFuncitons(unittest.TestCase): pad_mode=self.pad_mode, ) x = paddle.to_tensor(self.waveform).unsqueeze(0) - window = paddle.audio.functional.get_window(window_str, - n_fft, - dtype=x.dtype) + window = paddle.audio.functional.get_window( + window_str, n_fft, dtype=x.dtype + ) feature_paddle = paddle.signal.stft( x=x, n_fft=n_fft, @@ -228,9 +253,9 @@ class TestAudioFuncitons(unittest.TestCase): normalized=False, onesided=True, ).squeeze(0) - np.testing.assert_array_almost_equal(feature_librosa, - feature_paddle, - decimal=5) + np.testing.assert_array_almost_equal( + feature_librosa, feature_paddle, decimal=5 + ) feature_bg = np.power(np.abs(feature_librosa), 2.0) feature_extractor = paddle.audio.features.Spectrogram( @@ -243,16 +268,18 @@ class TestAudioFuncitons(unittest.TestCase): pad_mode=self.pad_mode, ) feature_layer = feature_extractor(x).squeeze(0) - np.testing.assert_array_almost_equal(feature_layer, - feature_bg, - decimal=3) + np.testing.assert_array_almost_equal( + feature_layer, feature_bg, decimal=3 + ) - @parameterize([128, 256, 512], [64, 82], - ["hamming", "hann", "triang", "bohman"]) + @parameterize( + [128, 256, 512], [64, 82], ["hamming", "hann", "triang", "bohman"] + ) def test_istft(self, n_fft: int, hop_length: int, window_str: str): if len(self.waveform.shape) == 2: # (C, T) self.waveform = self.waveform.squeeze( - 0) # 1D input for librosa.feature.melspectrogram + 0 + ) # 1D input for librosa.feature.melspectrogram # librosa # Get stft result from librosa. stft_matrix = librosa.core.stft( @@ -274,10 +301,9 @@ class TestAudioFuncitons(unittest.TestCase): length=None, ) x = paddle.to_tensor(stft_matrix).unsqueeze(0) - window = paddle.audio.functional.get_window(window_str, - n_fft, - dtype=paddle.to_tensor( - self.waveform).dtype) + window = paddle.audio.functional.get_window( + window_str, n_fft, dtype=paddle.to_tensor(self.waveform).dtype + ) feature_paddle = paddle.signal.istft( x=x, n_fft=n_fft, @@ -291,9 +317,9 @@ class TestAudioFuncitons(unittest.TestCase): return_complex=False, ).squeeze(0) - np.testing.assert_array_almost_equal(feature_librosa, - feature_paddle, - decimal=5) + np.testing.assert_array_almost_equal( + feature_librosa, feature_paddle, decimal=5 + ) if __name__ == '__main__': diff --git a/python/paddle/tests/test_audio_logmel_feature.py b/python/paddle/tests/test_audio_logmel_feature.py index 413f4053c9c708ec7d4a9d1116382216b0e59006..a3a39cd79baf2ab70844c7e50db49014e284129c 100644 --- a/python/paddle/tests/test_audio_logmel_feature.py +++ b/python/paddle/tests/test_audio_logmel_feature.py @@ -28,12 +28,10 @@ def parameterize(*params): class TestFeatures(unittest.TestCase): - def setUp(self): self.initParmas() def initParmas(self): - def get_wav_data(dtype: str, num_channels: int, num_frames: int): dtype_ = getattr(paddle, dtype) base = paddle.linspace(-1.0, 1.0, num_frames, dtype=dtype_) * 0.1 @@ -46,32 +44,44 @@ class TestFeatures(unittest.TestCase): self.num_channels = 1 self.sr = 16000 self.dtype = "float32" - waveform_tensor = get_wav_data(self.dtype, - self.num_channels, - num_frames=self.duration * self.sr) + waveform_tensor = get_wav_data( + self.dtype, self.num_channels, num_frames=self.duration * self.sr + ) self.waveform = waveform_tensor.numpy() - @parameterize([16000], ["hamming", "bohman"], [128], [128, 64], [64, 32], - [0.0, 50.0]) - def test_log_melspect(self, sr: int, window_str: str, n_fft: int, - hop_length: int, n_mels: int, fmin: float): + @parameterize( + [16000], ["hamming", "bohman"], [128], [128, 64], [64, 32], [0.0, 50.0] + ) + def test_log_melspect( + self, + sr: int, + window_str: str, + n_fft: int, + hop_length: int, + n_mels: int, + fmin: float, + ): if len(self.waveform.shape) == 2: # (C, T) self.waveform = self.waveform.squeeze( - 0) # 1D input for librosa.feature.melspectrogram + 0 + ) # 1D input for librosa.feature.melspectrogram # librosa: - feature_librosa = librosa.feature.melspectrogram(y=self.waveform, - sr=sr, - n_fft=n_fft, - hop_length=hop_length, - window=window_str, - n_mels=n_mels, - center=True, - fmin=fmin, - pad_mode='reflect') + feature_librosa = librosa.feature.melspectrogram( + y=self.waveform, + sr=sr, + n_fft=n_fft, + hop_length=hop_length, + window=window_str, + n_mels=n_mels, + center=True, + fmin=fmin, + pad_mode='reflect', + ) feature_librosa = librosa.power_to_db(feature_librosa, top_db=None) x = paddle.to_tensor(self.waveform, dtype=paddle.float64).unsqueeze( - 0) # Add batch dim. + 0 + ) # Add batch dim. feature_extractor = paddle.audio.features.LogMelSpectrogram( sr=sr, n_fft=n_fft, @@ -81,54 +91,63 @@ class TestFeatures(unittest.TestCase): n_mels=n_mels, f_min=fmin, top_db=None, - dtype=x.dtype) + dtype=x.dtype, + ) feature_layer = feature_extractor(x).squeeze(0).numpy() - np.testing.assert_array_almost_equal(feature_librosa, - feature_layer, - decimal=2) + np.testing.assert_array_almost_equal( + feature_librosa, feature_layer, decimal=2 + ) # relative difference np.testing.assert_allclose(feature_librosa, feature_layer, rtol=1e-4) - @parameterize([16000], [256, 128], [40, 64], [64, 128], - ['float32', 'float64']) - def test_mfcc(self, sr: int, n_fft: int, n_mfcc: int, n_mels: int, - dtype: str): + @parameterize( + [16000], [256, 128], [40, 64], [64, 128], ['float32', 'float64'] + ) + def test_mfcc( + self, sr: int, n_fft: int, n_mfcc: int, n_mels: int, dtype: str + ): if paddle.version.cuda() != 'False': if float(paddle.version.cuda()) >= 11.0: return if len(self.waveform.shape) == 2: # (C, T) self.waveform = self.waveform.squeeze( - 0) # 1D input for librosa.feature.melspectrogram + 0 + ) # 1D input for librosa.feature.melspectrogram # librosa: np_dtype = getattr(np, dtype) - feature_librosa = librosa.feature.mfcc(y=self.waveform, - sr=sr, - S=None, - n_mfcc=n_mfcc, - dct_type=2, - lifter=0, - n_fft=n_fft, - hop_length=64, - n_mels=n_mels, - fmin=50.0, - dtype=np_dtype) + feature_librosa = librosa.feature.mfcc( + y=self.waveform, + sr=sr, + S=None, + n_mfcc=n_mfcc, + dct_type=2, + lifter=0, + n_fft=n_fft, + hop_length=64, + n_mels=n_mels, + fmin=50.0, + dtype=np_dtype, + ) # paddlespeech.audio.features.layer - x = paddle.to_tensor(self.waveform, - dtype=dtype).unsqueeze(0) # Add batch dim. - feature_extractor = paddle.audio.features.MFCC(sr=sr, - n_mfcc=n_mfcc, - n_fft=n_fft, - hop_length=64, - n_mels=n_mels, - top_db=self.top_db, - dtype=x.dtype) + x = paddle.to_tensor(self.waveform, dtype=dtype).unsqueeze( + 0 + ) # Add batch dim. + feature_extractor = paddle.audio.features.MFCC( + sr=sr, + n_mfcc=n_mfcc, + n_fft=n_fft, + hop_length=64, + n_mels=n_mels, + top_db=self.top_db, + dtype=x.dtype, + ) feature_layer = feature_extractor(x).squeeze(0).numpy() - np.testing.assert_array_almost_equal(feature_librosa, - feature_layer, - decimal=3) + np.testing.assert_array_almost_equal( + feature_librosa, feature_layer, decimal=3 + ) np.testing.assert_allclose(feature_librosa, feature_layer, rtol=1e-1) @@ -142,19 +161,19 @@ class TestFeatures(unittest.TestCase): center=True, pad_mode='reflect', top_db=self.top_db, - dtype=x.dtype) + dtype=x.dtype, + ) feature_layer_logmel = feature_extractor(x).squeeze(0).numpy() - feature_layer_mfcc = scipy.fftpack.dct(feature_layer_logmel, - axis=0, - type=2, - norm="ortho")[:n_mfcc] - np.testing.assert_array_almost_equal(feature_layer_mfcc, - feature_librosa, - decimal=3) - np.testing.assert_allclose(feature_layer_mfcc, - feature_librosa, - rtol=1e-1) + feature_layer_mfcc = scipy.fftpack.dct( + feature_layer_logmel, axis=0, type=2, norm="ortho" + )[:n_mfcc] + np.testing.assert_array_almost_equal( + feature_layer_mfcc, feature_librosa, decimal=3 + ) + np.testing.assert_allclose( + feature_layer_mfcc, feature_librosa, rtol=1e-1 + ) if __name__ == '__main__': diff --git a/python/paddle/tests/test_audio_mel_feature.py b/python/paddle/tests/test_audio_mel_feature.py index 7ddfc9ac4872163c85641f69fb40726b47bfdefe..ec7b843caf77df6c730f0b8f2f3cb92d9eba3dd4 100644 --- a/python/paddle/tests/test_audio_mel_feature.py +++ b/python/paddle/tests/test_audio_mel_feature.py @@ -27,12 +27,10 @@ def parameterize(*params): class TestFeatures(unittest.TestCase): - def setUp(self): self.initParmas() def initParmas(self): - def get_wav_data(dtype: str, num_channels: int, num_frames: int): dtype_ = getattr(paddle, dtype) base = paddle.linspace(-1.0, 1.0, num_frames, dtype=dtype_) * 0.1 @@ -44,15 +42,17 @@ class TestFeatures(unittest.TestCase): self.num_channels = 1 self.sr = 16000 self.dtype = "float32" - waveform_tensor = get_wav_data(self.dtype, - self.num_channels, - num_frames=self.duration * self.sr) + waveform_tensor = get_wav_data( + self.dtype, self.num_channels, num_frames=self.duration * self.sr + ) self.waveform = waveform_tensor.numpy() - @parameterize([8000], [128, 256], [64, 32], [0.0, 1.0], - ['float32', 'float64']) - def test_mel(self, sr: int, n_fft: int, n_mels: int, fmin: float, - dtype: str): + @parameterize( + [8000], [128, 256], [64, 32], [0.0, 1.0], ['float32', 'float64'] + ) + def test_mel( + self, sr: int, n_fft: int, n_mels: int, fmin: float, dtype: str + ): feature_librosa = librosa.filters.mel( sr=sr, n_fft=n_fft, @@ -75,40 +75,47 @@ class TestFeatures(unittest.TestCase): dtype=paddle_dtype, ) - np.testing.assert_array_almost_equal(feature_librosa, - feature_functional) + np.testing.assert_array_almost_equal( + feature_librosa, feature_functional + ) @parameterize([8000, 16000], [128, 256], [64, 82], [40, 80], [False, True]) - def test_melspect(self, sr: int, n_fft: int, hop_length: int, n_mels: int, - htk: bool): + def test_melspect( + self, sr: int, n_fft: int, hop_length: int, n_mels: int, htk: bool + ): if len(self.waveform.shape) == 2: # (C, T) self.waveform = self.waveform.squeeze( - 0) # 1D input for librosa.feature.melspectrogram + 0 + ) # 1D input for librosa.feature.melspectrogram # librosa: - feature_librosa = librosa.feature.melspectrogram(y=self.waveform, - sr=sr, - n_fft=n_fft, - hop_length=hop_length, - n_mels=n_mels, - htk=htk, - fmin=50.0) + feature_librosa = librosa.feature.melspectrogram( + y=self.waveform, + sr=sr, + n_fft=n_fft, + hop_length=hop_length, + n_mels=n_mels, + htk=htk, + fmin=50.0, + ) # paddle.audio.features.layer x = paddle.to_tensor(self.waveform, dtype=paddle.float64).unsqueeze( - 0) # Add batch dim. + 0 + ) # Add batch dim. feature_extractor = paddle.audio.features.MelSpectrogram( sr=sr, n_fft=n_fft, hop_length=hop_length, n_mels=n_mels, htk=htk, - dtype=x.dtype) + dtype=x.dtype, + ) feature_layer = feature_extractor(x).squeeze(0).numpy() - np.testing.assert_array_almost_equal(feature_librosa, - feature_layer, - decimal=5) + np.testing.assert_array_almost_equal( + feature_librosa, feature_layer, decimal=5 + ) if __name__ == '__main__': diff --git a/python/paddle/tests/test_callback_early_stop.py b/python/paddle/tests/test_callback_early_stop.py index 79a6265420148309fd1cbdef0dee768992851be4..612c25a6d60d4d7f19265d92506d2c96df46105a 100644 --- a/python/paddle/tests/test_callback_early_stop.py +++ b/python/paddle/tests/test_callback_early_stop.py @@ -27,7 +27,6 @@ from paddle.nn.layer.loss import CrossEntropyLoss class MnistDataset(MNIST): - def __init__(self, mode, return_label=True, sample_num=None): super(MnistDataset, self).__init__(mode=mode) self.return_label = return_label @@ -40,14 +39,13 @@ class MnistDataset(MNIST): img = np.reshape(img, [1, 28, 28]) if self.return_label: return img, np.array(self.labels[idx]).astype('int64') - return img, + return (img,) def __len__(self): return len(self.images) class TestCallbacks(unittest.TestCase): - def setUp(self): self.save_dir = tempfile.mkdtemp() @@ -64,44 +62,55 @@ class TestCallbacks(unittest.TestCase): val_dataset = MnistDataset(mode='test', sample_num=sample_num) net = LeNet() - optim = paddle.optimizer.Adam(learning_rate=0.001, - parameters=net.parameters()) + optim = paddle.optimizer.Adam( + learning_rate=0.001, parameters=net.parameters() + ) inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')] labels = [InputSpec([None, 1], 'int64', 'label')] model = Model(net, inputs=inputs, labels=labels) - model.prepare(optim, - loss=CrossEntropyLoss(reduction="sum"), - metrics=[Accuracy()]) - callbacks_0 = paddle.callbacks.EarlyStopping('loss', - mode='min', - patience=1, - verbose=1, - min_delta=0, - baseline=None, - save_best_model=True) - callbacks_1 = paddle.callbacks.EarlyStopping('acc', - mode='auto', - patience=1, - verbose=1, - min_delta=0, - baseline=0, - save_best_model=True) - callbacks_2 = paddle.callbacks.EarlyStopping('loss', - mode='auto_', - patience=1, - verbose=1, - min_delta=0, - baseline=None, - save_best_model=True) - callbacks_3 = paddle.callbacks.EarlyStopping('acc_', - mode='max', - patience=1, - verbose=1, - min_delta=0, - baseline=0, - save_best_model=True) + model.prepare( + optim, + loss=CrossEntropyLoss(reduction="sum"), + metrics=[Accuracy()], + ) + callbacks_0 = paddle.callbacks.EarlyStopping( + 'loss', + mode='min', + patience=1, + verbose=1, + min_delta=0, + baseline=None, + save_best_model=True, + ) + callbacks_1 = paddle.callbacks.EarlyStopping( + 'acc', + mode='auto', + patience=1, + verbose=1, + min_delta=0, + baseline=0, + save_best_model=True, + ) + callbacks_2 = paddle.callbacks.EarlyStopping( + 'loss', + mode='auto_', + patience=1, + verbose=1, + min_delta=0, + baseline=None, + save_best_model=True, + ) + callbacks_3 = paddle.callbacks.EarlyStopping( + 'acc_', + mode='max', + patience=1, + verbose=1, + min_delta=0, + baseline=0, + save_best_model=True, + ) model.fit( train_dataset, val_dataset, @@ -110,15 +119,18 @@ class TestCallbacks(unittest.TestCase): save_dir=self.save_dir, epochs=10, verbose=0, - callbacks=[callbacks_0, callbacks_1, callbacks_2, callbacks_3]) + callbacks=[callbacks_0, callbacks_1, callbacks_2, callbacks_3], + ) # Test for no val_loader - model.fit(train_dataset, - batch_size=64, - save_freq=10, - save_dir=self.save_dir, - epochs=10, - verbose=0, - callbacks=[callbacks_0]) + model.fit( + train_dataset, + batch_size=64, + save_freq=10, + save_dir=self.save_dir, + epochs=10, + verbose=0, + callbacks=[callbacks_0], + ) if __name__ == '__main__': diff --git a/python/paddle/tests/test_callback_reduce_lr_on_plateau.py b/python/paddle/tests/test_callback_reduce_lr_on_plateau.py index 02ac3245b7f87cd82504439b59a08df5639880b9..06ffb5888b8b28c68bedb01b2bfabee71993d23c 100644 --- a/python/paddle/tests/test_callback_reduce_lr_on_plateau.py +++ b/python/paddle/tests/test_callback_reduce_lr_on_plateau.py @@ -27,34 +27,35 @@ from paddle.fluid.framework import _test_eager_guard # Accelerate unittest class CustomMnist(MNIST): - def __len__(self): return 8 class TestReduceLROnPlateau(unittest.TestCase): - def func_reduce_lr_on_plateau(self): transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])]) train_dataset = CustomMnist(mode='train', transform=transform) val_dataset = CustomMnist(mode='test', transform=transform) net = LeNet() - optim = paddle.optimizer.Adam(learning_rate=0.001, - parameters=net.parameters()) + optim = paddle.optimizer.Adam( + learning_rate=0.001, parameters=net.parameters() + ) inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')] labels = [InputSpec([None, 1], 'int64', 'label')] model = Model(net, inputs=inputs, labels=labels) model.prepare(optim, loss=CrossEntropyLoss(), metrics=[Accuracy()]) - callbacks = paddle.callbacks.ReduceLROnPlateau(patience=1, - verbose=1, - cooldown=1) - model.fit(train_dataset, - val_dataset, - batch_size=8, - log_freq=1, - save_freq=10, - epochs=10, - callbacks=[callbacks]) + callbacks = paddle.callbacks.ReduceLROnPlateau( + patience=1, verbose=1, cooldown=1 + ) + model.fit( + train_dataset, + val_dataset, + batch_size=8, + log_freq=1, + save_freq=10, + epochs=10, + callbacks=[callbacks], + ) def test_reduce_lr_on_plateau(self): with _test_eager_guard(): @@ -71,41 +72,46 @@ class TestReduceLROnPlateau(unittest.TestCase): train_dataset = CustomMnist(mode='train', transform=transform) val_dataset = CustomMnist(mode='test', transform=transform) net = LeNet() - optim = paddle.optimizer.Adam(learning_rate=0.001, - parameters=net.parameters()) + optim = paddle.optimizer.Adam( + learning_rate=0.001, parameters=net.parameters() + ) inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')] labels = [InputSpec([None, 1], 'int64', 'label')] model = Model(net, inputs=inputs, labels=labels) model.prepare(optim, loss=CrossEntropyLoss(), metrics=[Accuracy()]) - callbacks = paddle.callbacks.ReduceLROnPlateau(monitor='miou', - patience=3, - verbose=1) - model.fit(train_dataset, - val_dataset, - batch_size=8, - log_freq=1, - save_freq=10, - epochs=1, - callbacks=[callbacks]) + callbacks = paddle.callbacks.ReduceLROnPlateau( + monitor='miou', patience=3, verbose=1 + ) + model.fit( + train_dataset, + val_dataset, + batch_size=8, + log_freq=1, + save_freq=10, + epochs=1, + callbacks=[callbacks], + ) optim = paddle.optimizer.Adam( - learning_rate=paddle.optimizer.lr.PiecewiseDecay([0.001, 0.0001], - [5, 10]), - parameters=net.parameters()) + learning_rate=paddle.optimizer.lr.PiecewiseDecay( + [0.001, 0.0001], [5, 10] + ), + parameters=net.parameters(), + ) model.prepare(optim, loss=CrossEntropyLoss(), metrics=[Accuracy()]) - callbacks = paddle.callbacks.ReduceLROnPlateau(monitor='acc', - mode='max', - patience=3, - verbose=1, - cooldown=1) - model.fit(train_dataset, - val_dataset, - batch_size=8, - log_freq=1, - save_freq=10, - epochs=3, - callbacks=[callbacks]) + callbacks = paddle.callbacks.ReduceLROnPlateau( + monitor='acc', mode='max', patience=3, verbose=1, cooldown=1 + ) + model.fit( + train_dataset, + val_dataset, + batch_size=8, + log_freq=1, + save_freq=10, + epochs=3, + callbacks=[callbacks], + ) def test_warn_or_error(self): with _test_eager_guard(): diff --git a/python/paddle/tests/test_callback_visualdl.py b/python/paddle/tests/test_callback_visualdl.py index a1fc1cbadad4bef70a996fd279975e5c938973d8..aa6616f673053b04e1c771c2ac94c4e62e79af9d 100644 --- a/python/paddle/tests/test_callback_visualdl.py +++ b/python/paddle/tests/test_callback_visualdl.py @@ -24,13 +24,11 @@ from paddle.fluid.framework import _test_eager_guard class MnistDataset(MNIST): - def __len__(self): return 512 class TestCallbacks(unittest.TestCase): - def setUp(self): self.save_dir = tempfile.mkdtemp() @@ -49,15 +47,16 @@ class TestCallbacks(unittest.TestCase): model = paddle.Model(net, inputs, labels) optim = paddle.optimizer.Adam(0.001, parameters=net.parameters()) - model.prepare(optimizer=optim, - loss=paddle.nn.CrossEntropyLoss(), - metrics=paddle.metric.Accuracy()) + model.prepare( + optimizer=optim, + loss=paddle.nn.CrossEntropyLoss(), + metrics=paddle.metric.Accuracy(), + ) callback = paddle.callbacks.VisualDL(log_dir='visualdl_log_dir') - model.fit(train_dataset, - eval_dataset, - batch_size=64, - callbacks=callback) + model.fit( + train_dataset, eval_dataset, batch_size=64, callbacks=callback + ) def test_visualdl_callback(self): with _test_eager_guard(): diff --git a/python/paddle/tests/test_callbacks.py b/python/paddle/tests/test_callbacks.py index cc43f82c5c279e9a7ca61b4709e8a5221fab3fa1..70dda874dacccb28d937873fdfe1aad29eebddb5 100644 --- a/python/paddle/tests/test_callbacks.py +++ b/python/paddle/tests/test_callbacks.py @@ -27,7 +27,6 @@ from paddle.vision.datasets import MNIST class MnistDataset(MNIST): - def __init__(self, mode, return_label=True, sample_num=None): super(MnistDataset, self).__init__(mode=mode) self.return_label = return_label @@ -40,14 +39,13 @@ class MnistDataset(MNIST): img = np.reshape(img, [1, 28, 28]) if self.return_label: return img, np.array(self.labels[idx]).astype('int64') - return img, + return (img,) def __len__(self): return len(self.images) class TestCallbacks(unittest.TestCase): - def setUp(self): self.save_dir = tempfile.mkdtemp() @@ -64,14 +62,16 @@ class TestCallbacks(unittest.TestCase): lenet = Model(LeNet(), inputs) lenet.prepare() - cbks = config_callbacks(model=lenet, - batch_size=128, - epochs=epochs, - steps=steps, - log_freq=freq, - verbose=self.verbose, - metrics=['loss', 'acc'], - save_dir=self.save_dir) + cbks = config_callbacks( + model=lenet, + batch_size=128, + epochs=epochs, + steps=steps, + log_freq=freq, + verbose=self.verbose, + metrics=['loss', 'acc'], + save_dir=self.save_dir, + ) cbks.on_begin('train') logs = {'loss': 50.341673, 'acc': 0.00256} diff --git a/python/paddle/tests/test_dataset_cifar.py b/python/paddle/tests/test_dataset_cifar.py index 95bf21c4da181b3c969f25819dc6e79935dbf8c8..abf79fb1e3974ce0c1d9de4efd1df05056ff3821 100644 --- a/python/paddle/tests/test_dataset_cifar.py +++ b/python/paddle/tests/test_dataset_cifar.py @@ -19,7 +19,6 @@ from paddle.vision.datasets import Cifar10, Cifar100 class TestCifar10Train(unittest.TestCase): - def test_main(self): cifar = Cifar10(mode='train') self.assertTrue(len(cifar) == 50000) @@ -37,7 +36,6 @@ class TestCifar10Train(unittest.TestCase): class TestCifar10Test(unittest.TestCase): - def test_main(self): cifar = Cifar10(mode='test') self.assertTrue(len(cifar) == 10000) @@ -72,7 +70,6 @@ class TestCifar10Test(unittest.TestCase): class TestCifar100Train(unittest.TestCase): - def test_main(self): cifar = Cifar100(mode='train') self.assertTrue(len(cifar) == 50000) @@ -90,7 +87,6 @@ class TestCifar100Train(unittest.TestCase): class TestCifar100Test(unittest.TestCase): - def test_main(self): cifar = Cifar100(mode='test') self.assertTrue(len(cifar) == 10000) diff --git a/python/paddle/tests/test_dataset_conll05.py b/python/paddle/tests/test_dataset_conll05.py index 8dc1f56779f7124064cca5b0d83b5b23acac8871..9eb0036718b35516eb651a2937f5c49ac8cca14b 100644 --- a/python/paddle/tests/test_dataset_conll05.py +++ b/python/paddle/tests/test_dataset_conll05.py @@ -20,7 +20,6 @@ from paddle.text.datasets import Conll05st class TestConll05st(unittest.TestCase): - def test_main(self): conll05st = Conll05st() self.assertTrue(len(conll05st) == 5267) diff --git a/python/paddle/tests/test_dataset_imdb.py b/python/paddle/tests/test_dataset_imdb.py index c70b7fedf36325e8244bb5b451d85914e391ad37..aed8c387409dce30710cfb3b65232310f99f8410 100644 --- a/python/paddle/tests/test_dataset_imdb.py +++ b/python/paddle/tests/test_dataset_imdb.py @@ -19,7 +19,6 @@ from paddle.text.datasets import Imdb class TestImdbTrain(unittest.TestCase): - def test_main(self): imdb = Imdb(mode='train') self.assertTrue(len(imdb) == 25000) @@ -34,7 +33,6 @@ class TestImdbTrain(unittest.TestCase): class TestImdbTest(unittest.TestCase): - def test_main(self): imdb = Imdb(mode='test') self.assertTrue(len(imdb) == 25000) diff --git a/python/paddle/tests/test_dataset_imikolov.py b/python/paddle/tests/test_dataset_imikolov.py index 6379ed11e5daa5dbcee2f4e8bd86bdc493131d74..6ffeeda73c362c69d6a614cdf43888f34c05d875 100644 --- a/python/paddle/tests/test_dataset_imikolov.py +++ b/python/paddle/tests/test_dataset_imikolov.py @@ -19,7 +19,6 @@ from paddle.text.datasets import Imikolov class TestImikolovTrain(unittest.TestCase): - def test_main(self): imikolov = Imikolov(mode='train', data_type='NGRAM', window_size=2) self.assertTrue(len(imikolov) == 929589) @@ -32,7 +31,6 @@ class TestImikolovTrain(unittest.TestCase): class TestImikolovTest(unittest.TestCase): - def test_main(self): imikolov = Imikolov(mode='test', data_type='NGRAM', window_size=2) self.assertTrue(len(imikolov) == 82430) diff --git a/python/paddle/tests/test_dataset_movielens.py b/python/paddle/tests/test_dataset_movielens.py index 78a6211647656024f2f73ffed37f6b597c52bd47..e5c6d8376eed970b0016593e874b89dbf8ceb459 100644 --- a/python/paddle/tests/test_dataset_movielens.py +++ b/python/paddle/tests/test_dataset_movielens.py @@ -19,7 +19,6 @@ from paddle.text.datasets import Movielens class TestMovielensTrain(unittest.TestCase): - def test_main(self): movielens = Movielens(mode='train') # movielens dataset random split train/test @@ -37,7 +36,6 @@ class TestMovielensTrain(unittest.TestCase): class TestMovielensTest(unittest.TestCase): - def test_main(self): movielens = Movielens(mode='test') # movielens dataset random split train/test diff --git a/python/paddle/tests/test_dataset_uci_housing.py b/python/paddle/tests/test_dataset_uci_housing.py index 563dd62efa3b62572d59a95c3d772caf33fc099d..fe85e7683dd9c6e635edf1c1875027bc49f0f975 100644 --- a/python/paddle/tests/test_dataset_uci_housing.py +++ b/python/paddle/tests/test_dataset_uci_housing.py @@ -19,7 +19,6 @@ from paddle.text.datasets import UCIHousing, WMT14 class TestUCIHousingTrain(unittest.TestCase): - def test_main(self): uci_housing = UCIHousing(mode='train') self.assertTrue(len(uci_housing) == 404) @@ -36,7 +35,6 @@ class TestUCIHousingTrain(unittest.TestCase): class TestUCIHousingTest(unittest.TestCase): - def test_main(self): uci_housing = UCIHousing(mode='test') self.assertTrue(len(uci_housing) == 102) @@ -53,7 +51,6 @@ class TestUCIHousingTest(unittest.TestCase): class TestWMT14Train(unittest.TestCase): - def test_main(self): wmt14 = WMT14(mode='train', dict_size=50) self.assertTrue(len(wmt14) == 191155) @@ -69,7 +66,6 @@ class TestWMT14Train(unittest.TestCase): class TestWMT14Test(unittest.TestCase): - def test_main(self): wmt14 = WMT14(mode='test', dict_size=50) self.assertTrue(len(wmt14) == 5957) @@ -85,7 +81,6 @@ class TestWMT14Test(unittest.TestCase): class TestWMT14Gen(unittest.TestCase): - def test_main(self): wmt14 = WMT14(mode='gen', dict_size=50) self.assertTrue(len(wmt14) == 3001) diff --git a/python/paddle/tests/test_dataset_voc.py b/python/paddle/tests/test_dataset_voc.py index 4d67ba9c10a5415f28dcd4f88364d98838847822..459068c2df2016a5e5f75d5f76c645f462dca7c0 100644 --- a/python/paddle/tests/test_dataset_voc.py +++ b/python/paddle/tests/test_dataset_voc.py @@ -23,7 +23,6 @@ voc2012.VOC_MD5 = '34cb1fe5bdc139a5454b25b16118fff8' class TestVOC2012Train(unittest.TestCase): - def test_main(self): voc2012 = VOC2012(mode='train') self.assertTrue(len(voc2012) == 3) @@ -40,7 +39,6 @@ class TestVOC2012Train(unittest.TestCase): class TestVOC2012Valid(unittest.TestCase): - def test_main(self): voc2012 = VOC2012(mode='valid') self.assertTrue(len(voc2012) == 1) @@ -57,7 +55,6 @@ class TestVOC2012Valid(unittest.TestCase): class TestVOC2012Test(unittest.TestCase): - def test_main(self): voc2012 = VOC2012(mode='test') self.assertTrue(len(voc2012) == 2) diff --git a/python/paddle/tests/test_dataset_wmt.py b/python/paddle/tests/test_dataset_wmt.py index 48186ab1864aa1da2fc80f15868031a167869ff3..43663945e20fd5d7437f781ab73ec2c417af4ddc 100644 --- a/python/paddle/tests/test_dataset_wmt.py +++ b/python/paddle/tests/test_dataset_wmt.py @@ -19,7 +19,6 @@ from paddle.text.datasets import WMT14, WMT16 class TestWMT14Train(unittest.TestCase): - def test_main(self): wmt14 = WMT14(mode='train', dict_size=50) self.assertTrue(len(wmt14) == 191155) @@ -35,7 +34,6 @@ class TestWMT14Train(unittest.TestCase): class TestWMT14Test(unittest.TestCase): - def test_main(self): wmt14 = WMT14(mode='test', dict_size=50) self.assertTrue(len(wmt14) == 5957) @@ -51,7 +49,6 @@ class TestWMT14Test(unittest.TestCase): class TestWMT14Gen(unittest.TestCase): - def test_main(self): wmt14 = WMT14(mode='gen', dict_size=50) self.assertTrue(len(wmt14) == 3001) @@ -67,12 +64,10 @@ class TestWMT14Gen(unittest.TestCase): class TestWMT16Train(unittest.TestCase): - def test_main(self): - wmt16 = WMT16(mode='train', - src_dict_size=50, - trg_dict_size=50, - lang='en') + wmt16 = WMT16( + mode='train', src_dict_size=50, trg_dict_size=50, lang='en' + ) self.assertTrue(len(wmt16) == 29000) # traversal whole dataset may cost a @@ -86,12 +81,10 @@ class TestWMT16Train(unittest.TestCase): class TestWMT16Test(unittest.TestCase): - def test_main(self): - wmt16 = WMT16(mode='test', - src_dict_size=50, - trg_dict_size=50, - lang='en') + wmt16 = WMT16( + mode='test', src_dict_size=50, trg_dict_size=50, lang='en' + ) self.assertTrue(len(wmt16) == 1000) # traversal whole dataset may cost a @@ -105,7 +98,6 @@ class TestWMT16Test(unittest.TestCase): class TestWMT16Val(unittest.TestCase): - def test_main(self): wmt16 = WMT16(mode='val', src_dict_size=50, trg_dict_size=50, lang='en') self.assertTrue(len(wmt16) == 1014) diff --git a/python/paddle/tests/test_datasets.py b/python/paddle/tests/test_datasets.py index 0f6396ec051491bf3fe54a37fa3a17699cb7554f..172d9007de595a74c589c8d2eb005f9e9779288f 100644 --- a/python/paddle/tests/test_datasets.py +++ b/python/paddle/tests/test_datasets.py @@ -20,13 +20,18 @@ import shutil import cv2 import paddle.vision.transforms as T -from paddle.vision.datasets import DatasetFolder, ImageFolder, MNIST, FashionMNIST, Flowers +from paddle.vision.datasets import ( + DatasetFolder, + ImageFolder, + MNIST, + FashionMNIST, + Flowers, +) from paddle.dataset.common import _check_exists_and_download from paddle.fluid.framework import _test_eager_guard class TestFolderDatasets(unittest.TestCase): - def setUp(self): self.data_dir = tempfile.mkdtemp() self.empty_dir = tempfile.mkdtemp() @@ -77,7 +82,6 @@ class TestFolderDatasets(unittest.TestCase): self.func_test_folder() def func_test_transform(self): - def fake_transform(img): return img @@ -112,7 +116,6 @@ class TestFolderDatasets(unittest.TestCase): class TestMNISTTest(unittest.TestCase): - def func_test_main(self): transform = T.Transpose() mnist = MNIST(mode='test', transform=transform) @@ -133,7 +136,6 @@ class TestMNISTTest(unittest.TestCase): class TestMNISTTrain(unittest.TestCase): - def func_test_main(self): transform = T.Transpose() mnist = MNIST(mode='train', transform=transform) @@ -170,7 +172,6 @@ class TestMNISTTrain(unittest.TestCase): class TestFASHIONMNISTTest(unittest.TestCase): - def func_test_main(self): transform = T.Transpose() mnist = FashionMNIST(mode='test', transform=transform) @@ -191,7 +192,6 @@ class TestFASHIONMNISTTest(unittest.TestCase): class TestFASHIONMNISTTrain(unittest.TestCase): - def func_test_main(self): transform = T.Transpose() mnist = FashionMNIST(mode='train', transform=transform) @@ -240,7 +240,6 @@ class TestFASHIONMNISTTrain(unittest.TestCase): class TestFlowersTrain(unittest.TestCase): - def func_test_main(self): flowers = Flowers(mode='train') self.assertTrue(len(flowers) == 6149) @@ -261,7 +260,6 @@ class TestFlowersTrain(unittest.TestCase): class TestFlowersValid(unittest.TestCase): - def func_test_main(self): flowers = Flowers(mode='valid') self.assertTrue(len(flowers) == 1020) @@ -282,7 +280,6 @@ class TestFlowersValid(unittest.TestCase): class TestFlowersTest(unittest.TestCase): - def func_test_main(self): flowers = Flowers(mode='test') self.assertTrue(len(flowers) == 1020) diff --git a/python/paddle/tests/test_dist_hapi_model.py b/python/paddle/tests/test_dist_hapi_model.py index 852572c3c7d044f5f293f54faa85b638bc64614e..0ec5cc4f845bf671372f1c41323e1fe3df06e139 100644 --- a/python/paddle/tests/test_dist_hapi_model.py +++ b/python/paddle/tests/test_dist_hapi_model.py @@ -19,7 +19,12 @@ import copy import subprocess import paddle.fluid as fluid -from paddle.distributed.utils.launch_utils import find_free_ports, watch_local_trainers, get_cluster, TrainerProc +from paddle.distributed.utils.launch_utils import ( + find_free_ports, + watch_local_trainers, + get_cluster, + TrainerProc, +) def get_cluster_from_args(selected_gpus): @@ -47,17 +52,19 @@ def get_gpus(selected_gpus): return selected_gpus -def start_local_trainers(cluster, - pod, - training_script, - eager_mode, - training_script_args, - log_dir=None): +def start_local_trainers( + cluster, + pod, + training_script, + eager_mode, + training_script_args, + log_dir=None, +): current_env = copy.copy(os.environ.copy()) - #paddle broadcast ncclUniqueId use socket, and - #proxy maybe make trainers unreachable, so delete them. - #if we set them to "", grpc will log error message "bad uri" - #so just delete them. + # paddle broadcast ncclUniqueId use socket, and + # proxy maybe make trainers unreachable, so delete them. + # if we set them to "", grpc will log error message "bad uri" + # so just delete them. current_env.pop("http_proxy", None) current_env.pop("https_proxy", None) @@ -68,7 +75,7 @@ def start_local_trainers(cluster, "PADDLE_TRAINER_ID": "%d" % t.rank, "PADDLE_CURRENT_ENDPOINT": "%s" % t.endpoint, "PADDLE_TRAINERS_NUM": "%d" % cluster.trainers_nranks(), - "PADDLE_TRAINER_ENDPOINTS": ",".join(cluster.trainers_endpoints()) + "PADDLE_TRAINER_ENDPOINTS": ",".join(cluster.trainers_endpoints()), } if not eager_mode: @@ -101,7 +108,6 @@ def start_local_trainers(cluster, class TestMultipleGpus(unittest.TestCase): - def run_mnist_2gpu(self, target_file_name, eager_mode=True): if fluid.core.get_cuda_device_count() == 0: return @@ -112,11 +118,13 @@ class TestMultipleGpus(unittest.TestCase): cluster, pod = get_cluster_from_args(selected_gpus) - procs = start_local_trainers(cluster, - pod, - eager_mode=eager_mode, - training_script=target_file_name, - training_script_args=[]) + procs = start_local_trainers( + cluster, + pod, + eager_mode=eager_mode, + training_script=target_file_name, + training_script_args=[], + ) while True: alive = watch_local_trainers(procs, cluster.trainers_nranks()) diff --git a/python/paddle/tests/test_dlpack.py b/python/paddle/tests/test_dlpack.py index 256ca41ffff2d5383366902026d0dcfb030d1de2..5e4b8d2ef20b60fbc4897a1a1193accd046dcbe0 100644 --- a/python/paddle/tests/test_dlpack.py +++ b/python/paddle/tests/test_dlpack.py @@ -22,7 +22,6 @@ from paddle.fluid.framework import _test_eager_guard class TestDLPack(unittest.TestCase): - def func_test_dlpack_dygraph(self): paddle.disable_static() tensor = paddle.to_tensor(np.array([1, 2, 3, 4]).astype('int')) @@ -30,11 +29,13 @@ class TestDLPack(unittest.TestCase): out_from_dlpack = paddle.utils.dlpack.from_dlpack(dlpack) if paddle.fluid.framework.in_dygraph_mode(): self.assertTrue( - isinstance(out_from_dlpack, paddle.fluid.core.eager.Tensor)) + isinstance(out_from_dlpack, paddle.fluid.core.eager.Tensor) + ) else: self.assertTrue(isinstance(out_from_dlpack, paddle.Tensor)) - np.testing.assert_array_equal(np.array(out_from_dlpack), - np.array([1, 2, 3, 4]).astype('int')) + np.testing.assert_array_equal( + np.array(out_from_dlpack), np.array([1, 2, 3, 4]).astype('int') + ) def test_dlpack_dygraph(self): with _test_eager_guard(): @@ -58,26 +59,32 @@ class TestDLPack(unittest.TestCase): def test_dlpack_static(self): paddle.enable_static() tensor = fluid.create_lod_tensor( - np.array([[1], [2], [3], [4]]).astype('int'), [[1, 3]], - fluid.CPUPlace()) + np.array([[1], [2], [3], [4]]).astype('int'), + [[1, 3]], + fluid.CPUPlace(), + ) dlpack = paddle.utils.dlpack.to_dlpack(tensor) out_from_dlpack = paddle.utils.dlpack.from_dlpack(dlpack) self.assertTrue(isinstance(out_from_dlpack, fluid.core.Tensor)) np.testing.assert_array_equal( np.array(out_from_dlpack), - np.array([[1], [2], [3], [4]]).astype('int')) + np.array([[1], [2], [3], [4]]).astype('int'), + ) # when build with cuda if core.is_compiled_with_cuda(): gtensor = fluid.create_lod_tensor( - np.array([[1], [2], [3], [4]]).astype('int'), [[1, 3]], - fluid.CUDAPlace(0)) + np.array([[1], [2], [3], [4]]).astype('int'), + [[1, 3]], + fluid.CUDAPlace(0), + ) gdlpack = paddle.utils.dlpack.to_dlpack(gtensor) gout_from_dlpack = paddle.utils.dlpack.from_dlpack(gdlpack) self.assertTrue(isinstance(gout_from_dlpack, fluid.core.Tensor)) np.testing.assert_array_equal( np.array(gout_from_dlpack), - np.array([[1], [2], [3], [4]]).astype('int')) + np.array([[1], [2], [3], [4]]).astype('int'), + ) def func_test_dlpack_dtype_conversion(self): paddle.disable_static() @@ -104,7 +111,8 @@ class TestDLPack(unittest.TestCase): for dtype in complex_dtypes: x = paddle.to_tensor( [[1 + 6j, 2 + 5j, 3 + 4j], [4 + 3j, 5 + 2j, 6 + 1j]], - dtype=dtype) + dtype=dtype, + ) dlpack = paddle.utils.dlpack.to_dlpack(x) o = paddle.utils.dlpack.from_dlpack(dlpack) self.assertEqual(x.dtype, o.dtype) @@ -117,10 +125,10 @@ class TestDLPack(unittest.TestCase): class TestRaiseError(unittest.TestCase): - def func_test_from_dlpack_raise_type_error(self): - self.assertRaises(TypeError, paddle.utils.dlpack.from_dlpack, - np.zeros(5)) + self.assertRaises( + TypeError, paddle.utils.dlpack.from_dlpack, np.zeros(5) + ) def test_from_dlpack_raise_type_error(self): with _test_eager_guard(): diff --git a/python/paddle/tests/test_download.py b/python/paddle/tests/test_download.py index 3e6fcc54297809aff620c46adb56dd31b686d679..0978acbf56ab4edf12c21893bb8ebc4b5d512776 100644 --- a/python/paddle/tests/test_download.py +++ b/python/paddle/tests/test_download.py @@ -20,7 +20,6 @@ from paddle.utils.download import get_path_from_url class TestDownload(unittest.TestCase): - def download(self, url, md5sum): get_weights_path_from_url(url, md5sum) @@ -75,8 +74,9 @@ class TestDownload(unittest.TestCase): def test_uncompress_result(self): results = [ [ - "files/single_dir/file1", "files/single_dir/file2", - "files/single_file.pdparams" + "files/single_dir/file1", + "files/single_dir/file2", + "files/single_file.pdparams", ], ["single_dir/file1", "single_dir/file2"], ["single_file.pdparams"], @@ -89,8 +89,14 @@ class TestDownload(unittest.TestCase): for url, uncompressd_res in zip(tar_urls, results): uncompressed_path = get_path_from_url(url, root_dir='./test_tar') - self.assertTrue(all([os.path.exists(os.path.join("./test_tar", filepath)) \ - for filepath in uncompressd_res])) + self.assertTrue( + all( + [ + os.path.exists(os.path.join("./test_tar", filepath)) + for filepath in uncompressd_res + ] + ) + ) zip_urls = [ "https://paddle-hapi.bj.bcebos.com/unittest/files.zip", @@ -99,23 +105,37 @@ class TestDownload(unittest.TestCase): ] for url, uncompressd_res in zip(zip_urls, results): uncompressed_path = get_path_from_url(url, root_dir='./test_zip') - self.assertTrue(all([os.path.exists(os.path.join("./test_zip", filepath)) \ - for filepath in uncompressd_res])) + self.assertTrue( + all( + [ + os.path.exists(os.path.join("./test_zip", filepath)) + for filepath in uncompressd_res + ] + ) + ) - def test_retry_exception(self, ): + def test_retry_exception( + self, + ): with self.assertRaises(RuntimeError): from paddle.utils.download import _download + _download( 'www.baidu.com', './test', ) - def test_wget_download_error(self, ): + def test_wget_download_error( + self, + ): with self.assertRaises(RuntimeError): from paddle.utils.download import _download + _download('www.baidu', './test', method='wget') - def test_download_methods(self, ): + def test_download_methods( + self, + ): urls = [ "https://paddle-hapi.bj.bcebos.com/unittest/files.tar", "https://paddle-hapi.bj.bcebos.com/unittest/files.zip", @@ -123,6 +143,7 @@ class TestDownload(unittest.TestCase): import sys from paddle.utils.download import _download + if sys.platform == 'linux': methods = ['wget', 'get'] else: diff --git a/python/paddle/tests/test_hapi_amp.py b/python/paddle/tests/test_hapi_amp.py index 537dab34a6853038e34eb19758b97478994d45b2..e5be62a9e4868133bf485008b15593300c4f7907 100644 --- a/python/paddle/tests/test_hapi_amp.py +++ b/python/paddle/tests/test_hapi_amp.py @@ -31,30 +31,31 @@ from paddle.vision.datasets import MNIST import paddle.vision.transforms as T -@unittest.skipIf(not fluid.is_compiled_with_cuda(), - 'CPU testing is not supported') +@unittest.skipIf( + not fluid.is_compiled_with_cuda(), 'CPU testing is not supported' +) class TestHapiWithAmp(unittest.TestCase): - def get_model(self, amp_config): net = LeNet() inputs = InputSpec([None, 1, 28, 28], "float32", 'x') labels = InputSpec([None, 1], "int64", "y") model = Model(net, inputs, labels) - optim = paddle.optimizer.Adam(learning_rate=0.001, - parameters=model.parameters()) - model.prepare(optimizer=optim, - loss=CrossEntropyLoss(reduction="sum"), - amp_configs=amp_config) + optim = paddle.optimizer.Adam( + learning_rate=0.001, parameters=model.parameters() + ) + model.prepare( + optimizer=optim, + loss=CrossEntropyLoss(reduction="sum"), + amp_configs=amp_config, + ) return model def run_model(self, model): transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])]) train_dataset = MNIST(mode='train', transform=transform) - model.fit(train_dataset, - epochs=1, - batch_size=64, - num_iters=2, - log_freq=1) + model.fit( + train_dataset, epochs=1, batch_size=64, num_iters=2, log_freq=1 + ) def run_amp(self, amp_level): for dynamic in [True, False]: @@ -93,11 +94,9 @@ class TestHapiWithAmp(unittest.TestCase): model = self.get_model(amp_level) transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])]) train_dataset = MNIST(mode='train', transform=transform) - model.fit(train_dataset, - epochs=1, - batch_size=64, - num_iters=2, - log_freq=1) + model.fit( + train_dataset, epochs=1, batch_size=64, num_iters=2, log_freq=1 + ) temp_dir = tempfile.TemporaryDirectory() lenet_amp_path = os.path.join(temp_dir.name, './lenet_amp') model.save(lenet_amp_path) @@ -106,42 +105,43 @@ class TestHapiWithAmp(unittest.TestCase): paddle.seed(2021) new_model = self.get_model(amp_level) train_dataset = MNIST(mode='train', transform=transform) - new_model.fit(train_dataset, - epochs=1, - batch_size=64, - num_iters=1, - log_freq=1) + new_model.fit( + train_dataset, epochs=1, batch_size=64, num_iters=1, log_freq=1 + ) # not equal before load - self.assertNotEqual(new_model._scaler.state_dict()['incr_count'], - model._scaler.state_dict()['incr_count']) - print((new_model._scaler.state_dict()['incr_count'], - model._scaler.state_dict()['incr_count'])) + self.assertNotEqual( + new_model._scaler.state_dict()['incr_count'], + model._scaler.state_dict()['incr_count'], + ) + print( + ( + new_model._scaler.state_dict()['incr_count'], + model._scaler.state_dict()['incr_count'], + ) + ) # equal after load new_model.load(lenet_amp_path) temp_dir.cleanup() - self.assertEqual(new_model._scaler.state_dict()['incr_count'], - model._scaler.state_dict()['incr_count']) - self.assertEqual(new_model._scaler.state_dict()['decr_count'], - model._scaler.state_dict()['decr_count']) + self.assertEqual( + new_model._scaler.state_dict()['incr_count'], + model._scaler.state_dict()['incr_count'], + ) + self.assertEqual( + new_model._scaler.state_dict()['decr_count'], + model._scaler.state_dict()['decr_count'], + ) np.testing.assert_array_equal( new_model._optimizer.state_dict()['conv2d_1.w_0_moment1_0'].numpy(), - model._optimizer.state_dict()['conv2d_1.w_0_moment1_0'].numpy()) + model._optimizer.state_dict()['conv2d_1.w_0_moment1_0'].numpy(), + ) def test_dynamic_check_input(self): paddle.disable_static() amp_configs_list = [ - { - "level": "O3" - }, - { - "level": "O1", - "test": 0 - }, - { - "level": "O1", - "use_fp16_guard": True - }, + {"level": "O3"}, + {"level": "O1", "test": 0}, + {"level": "O1", "use_fp16_guard": True}, "O3", ] if not fluid.is_compiled_with_cuda(): @@ -149,21 +149,24 @@ class TestHapiWithAmp(unittest.TestCase): paddle.set_device('gpu') net = LeNet() model = Model(net) - optim = paddle.optimizer.Adam(learning_rate=0.001, - parameters=model.parameters()) + optim = paddle.optimizer.Adam( + learning_rate=0.001, parameters=model.parameters() + ) loss = CrossEntropyLoss(reduction="sum") with self.assertRaises(ValueError): for amp_configs in amp_configs_list: - model.prepare(optimizer=optim, - loss=loss, - amp_configs=amp_configs) + model.prepare( + optimizer=optim, loss=loss, amp_configs=amp_configs + ) model.prepare(optimizer=optim, loss=loss, amp_configs="O2") - model.prepare(optimizer=optim, - loss=loss, - amp_configs={ - "custom_white_list": {"matmul"}, - "init_loss_scaling": 1.0 - }) + model.prepare( + optimizer=optim, + loss=loss, + amp_configs={ + "custom_white_list": {"matmul"}, + "init_loss_scaling": 1.0, + }, + ) def test_static_check_input(self): paddle.enable_static() @@ -177,8 +180,9 @@ class TestHapiWithAmp(unittest.TestCase): labels = InputSpec([None, 1], "int64", "y") model = Model(net, inputs, labels) - optim = paddle.optimizer.Adam(learning_rate=0.001, - parameters=model.parameters()) + optim = paddle.optimizer.Adam( + learning_rate=0.001, parameters=model.parameters() + ) loss = CrossEntropyLoss(reduction="sum") with self.assertRaises(ValueError): model.prepare(optimizer=optim, loss=loss, amp_configs=amp_configs) diff --git a/python/paddle/tests/test_hapi_hub.py b/python/paddle/tests/test_hapi_hub.py index 526b35eddff6cfba95c4c63dc6deb02b42746da1..2a383311e1eba546533b556359692074620f0dc1 100644 --- a/python/paddle/tests/test_hapi_hub.py +++ b/python/paddle/tests/test_hapi_hub.py @@ -22,72 +22,90 @@ import numpy as np class TestHub(unittest.TestCase): - - def setUp(self, ): + def setUp( + self, + ): self.local_repo = os.path.dirname(os.path.abspath(__file__)) self.github_repo = 'lyuwenyu/paddlehub_demo:main' - def testLoad(self, ): - model = hub.load(self.local_repo, - model='MM', - source='local', - out_channels=8) + def testLoad( + self, + ): + model = hub.load( + self.local_repo, model='MM', source='local', out_channels=8 + ) data = paddle.rand((1, 3, 100, 100)) out = model(data) np.testing.assert_equal(out.shape, [1, 8, 50, 50]) - model = hub.load(self.github_repo, - model='MM', - source='github', - force_reload=True) - - model = hub.load(self.github_repo, - model='MM', - source='github', - force_reload=False, - pretrained=False) - - model = hub.load(self.github_repo.split(':')[0], - model='MM', - source='github', - force_reload=False, - pretrained=False) - - model = hub.load(self.github_repo, - model='MM', - source='github', - force_reload=False, - pretrained=True, - out_channels=8) + model = hub.load( + self.github_repo, model='MM', source='github', force_reload=True + ) + + model = hub.load( + self.github_repo, + model='MM', + source='github', + force_reload=False, + pretrained=False, + ) + + model = hub.load( + self.github_repo.split(':')[0], + model='MM', + source='github', + force_reload=False, + pretrained=False, + ) + + model = hub.load( + self.github_repo, + model='MM', + source='github', + force_reload=False, + pretrained=True, + out_channels=8, + ) data = paddle.ones((1, 3, 2, 2)) out = model(data) - gt = np.array([ - 1.53965068, 0., 0., 1.39455748, 0.72066200, 0.19773030, 2.09201908, - 0.37345418 - ]) + gt = np.array( + [ + 1.53965068, + 0.0, + 0.0, + 1.39455748, + 0.72066200, + 0.19773030, + 2.09201908, + 0.37345418, + ] + ) np.testing.assert_equal(out.shape, [1, 8, 1, 1]) - np.testing.assert_almost_equal(out.numpy(), - gt.reshape(1, 8, 1, 1), - decimal=5) + np.testing.assert_almost_equal( + out.numpy(), gt.reshape(1, 8, 1, 1), decimal=5 + ) - def testHelp(self, ): + def testHelp( + self, + ): docs1 = hub.help( self.local_repo, model='MM', source='local', ) - docs2 = hub.help(self.github_repo, - model='MM', - source='github', - force_reload=False) + docs2 = hub.help( + self.github_repo, model='MM', source='github', force_reload=False + ) assert docs1 == docs2 == 'This is a test demo for paddle hub\n ', '' - def testList(self, ): + def testList( + self, + ): models1 = hub.list( self.local_repo, source='local', @@ -102,35 +120,39 @@ class TestHub(unittest.TestCase): assert models1 == models2 == ['MM'], '' - def testExcept(self, ): + def testExcept( + self, + ): with self.assertRaises(ValueError): - _ = hub.help(self.github_repo, - model='MM', - source='github-test', - force_reload=False) + _ = hub.help( + self.github_repo, + model='MM', + source='github-test', + force_reload=False, + ) with self.assertRaises(ValueError): - _ = hub.load(self.github_repo, - model='MM', - source='github-test', - force_reload=False) + _ = hub.load( + self.github_repo, + model='MM', + source='github-test', + force_reload=False, + ) with self.assertRaises(ValueError): - _ = hub.list(self.github_repo, - source='github-test', - force_reload=False) + _ = hub.list( + self.github_repo, source='github-test', force_reload=False + ) with self.assertRaises(ValueError): - _ = hub.load(self.local_repo, - model=123, - source='local', - force_reload=False) + _ = hub.load( + self.local_repo, model=123, source='local', force_reload=False + ) with self.assertRaises(RuntimeError): - _ = hub.load(self.local_repo, - model='123', - source='local', - force_reload=False) + _ = hub.load( + self.local_repo, model='123', source='local', force_reload=False + ) if __name__ == '__main__': diff --git a/python/paddle/tests/test_hapi_hub_model.py b/python/paddle/tests/test_hapi_hub_model.py index 6170409fbe084c30f618c25ebc413752d33711cd..f7e2b716a1df5701b503d8770ef4ccb1b7f7ece8 100644 --- a/python/paddle/tests/test_hapi_hub_model.py +++ b/python/paddle/tests/test_hapi_hub_model.py @@ -17,7 +17,6 @@ import paddle.nn.functional as F class MM(nn.Layer): - def __init__(self, out_channels): super(MM, self).__init__() self.conv = nn.Conv2D(3, out_channels, 3, 2, 1) diff --git a/python/paddle/tests/test_logger.py b/python/paddle/tests/test_logger.py index 4566a7ff693fb614ed7c662e9266ca45c8326826..e63b4e2cab25a2d34b382c38062d3469d2dde59f 100644 --- a/python/paddle/tests/test_logger.py +++ b/python/paddle/tests/test_logger.py @@ -21,7 +21,6 @@ from paddle.hapi.logger import setup_logger class TestSetupLogger(unittest.TestCase): - def setUp(self): self.save_dir = tempfile.mkdtemp() self.save_file = os.path.join(self.save_dir, 'logger.txt') diff --git a/python/paddle/tests/test_metrics.py b/python/paddle/tests/test_metrics.py index 61b20251e048603f8a7449603247ad116588b78e..b3bfb1eb9c842bc2f0bf0c3dfff8eaccf41acc0b 100644 --- a/python/paddle/tests/test_metrics.py +++ b/python/paddle/tests/test_metrics.py @@ -27,7 +27,7 @@ def one_hot(x, n_class): return res -def accuracy(pred, label, topk=(1, )): +def accuracy(pred, label, topk=(1,)): maxk = max(topk) pred = np.argsort(pred)[..., ::-1][..., :maxk] if len(label.shape) == 1: @@ -35,7 +35,7 @@ def accuracy(pred, label, topk=(1, )): elif label.shape[-1] != 1: label = np.argmax(label, axis=-1) label = label[..., np.newaxis] - correct = (pred == np.repeat(label, maxk, -1)) + correct = pred == np.repeat(label, maxk, -1) total = np.prod(np.array(label.shape[:-1])) @@ -50,16 +50,22 @@ def convert_to_one_hot(y, C): oh = np.random.choice(np.arange(C), C, replace=False).astype('float32') / C oh = np.tile(oh[np.newaxis, :], (y.shape[0], 1)) for i in range(y.shape[0]): - oh[i, int(y[i])] = 1. + oh[i, int(y[i])] = 1.0 return oh class TestAccuracy(unittest.TestCase): - def test_acc(self, squeeze_y=False): x = paddle.to_tensor( - np.array([[0.1, 0.2, 0.3, 0.4], [0.1, 0.4, 0.3, 0.2], - [0.1, 0.2, 0.4, 0.3], [0.1, 0.2, 0.3, 0.4]])) + np.array( + [ + [0.1, 0.2, 0.3, 0.4], + [0.1, 0.4, 0.3, 0.2], + [0.1, 0.2, 0.4, 0.3], + [0.1, 0.2, 0.3, 0.4], + ] + ) + ) y = np.array([[0], [1], [2], [3]]) if squeeze_y: @@ -79,8 +85,15 @@ class TestAccuracy(unittest.TestCase): self.assertEqual(m.accumulate(), 0.75) x = paddle.to_tensor( - np.array([[0.1, 0.2, 0.3, 0.4], [0.1, 0.3, 0.4, 0.2], - [0.1, 0.2, 0.4, 0.3], [0.1, 0.2, 0.3, 0.4]])) + np.array( + [ + [0.1, 0.2, 0.3, 0.4], + [0.1, 0.3, 0.4, 0.2], + [0.1, 0.2, 0.4, 0.3], + [0.1, 0.2, 0.3, 0.4], + ] + ) + ) y = paddle.to_tensor(np.array([[0], [1], [2], [3]])) correct = m.compute(x, y) # check results @@ -95,7 +108,7 @@ class TestAccuracy(unittest.TestCase): def test_1d_label(self): self.test_acc(True) - def compare(self, x_np, y_np, k=(1, )): + def compare(self, x_np, y_np, k=(1,)): x = paddle.to_tensor(x_np) y = paddle.to_tensor(y_np) @@ -123,19 +136,20 @@ class TestAccuracy(unittest.TestCase): class TestAccuracyDynamic(unittest.TestCase): - def setUp(self): - self.topk = (1, ) + self.topk = (1,) self.class_num = 5 self.sample_num = 1000 self.name = None self.squeeze_label = False def random_pred_label(self): - label = np.random.randint(0, self.class_num, - (self.sample_num, 1)).astype('int64') - pred = np.random.randint(0, self.class_num, - (self.sample_num, 1)).astype('int32') + label = np.random.randint( + 0, self.class_num, (self.sample_num, 1) + ).astype('int64') + pred = np.random.randint( + 0, self.class_num, (self.sample_num, 1) + ).astype('int32') if self.squeeze_label: label = label.squeeze() pred_one_hot = convert_to_one_hot(pred, self.class_num) @@ -154,16 +168,19 @@ class TestAccuracyDynamic(unittest.TestCase): acc.update(*[s.numpy() for s in state]) res_m = acc.accumulate() res_f = accuracy(pred, label, self.topk) - assert np.all(np.isclose(np.array(res_m, dtype='float64'), - np.array(res_f, dtype='float64'), rtol=1e-3)), \ - "Accuracy precision error: {} != {}".format(res_m, res_f) + assert np.all( + np.isclose( + np.array(res_m, dtype='float64'), + np.array(res_f, dtype='float64'), + rtol=1e-3, + ) + ), "Accuracy precision error: {} != {}".format(res_m, res_f) acc.reset() assert np.sum(acc.total) == 0 assert np.sum(acc.count) == 0 class TestAccuracyDynamicMultiTopk(TestAccuracyDynamic): - def setUp(self): self.topk = (1, 5) self.class_num = 10 @@ -173,9 +190,8 @@ class TestAccuracyDynamicMultiTopk(TestAccuracyDynamic): class TestAccuracyStatic(TestAccuracyDynamic): - def setUp(self): - self.topk = (1, ) + self.topk = (1,) self.class_num = 5 self.sample_num = 1000 self.name = None @@ -189,9 +205,9 @@ class TestAccuracyStatic(TestAccuracyDynamic): main_prog.random_seed = 1024 startup_prog.random_seed = 1024 with fluid.program_guard(main_prog, startup_prog): - pred = fluid.data(name='pred', - shape=[None, self.class_num], - dtype='float32') + pred = fluid.data( + name='pred', shape=[None, self.class_num], dtype='float32' + ) label = fluid.data(name='label', shape=[None, 1], dtype='int64') acc = paddle.metric.Accuracy(topk=self.topk, name=self.name) state = acc.compute(pred, label) @@ -201,18 +217,18 @@ class TestAccuracyStatic(TestAccuracyDynamic): for _ in range(10): label, pred = self.random_pred_label() - state_ret = exe.run(compiled_main_prog, - feed={ - 'pred': pred, - 'label': label - }, - fetch_list=[s.name for s in to_list(state)], - return_numpy=True) + state_ret = exe.run( + compiled_main_prog, + feed={'pred': pred, 'label': label}, + fetch_list=[s.name for s in to_list(state)], + return_numpy=True, + ) acc.update(*state_ret) res_m = acc.accumulate() res_f = accuracy(pred, label, self.topk) - assert np.all(np.isclose(np.array(res_m), np.array(res_f), rtol=1e-3)), \ - "Accuracy precision error: {} != {}".format(res_m, res_f) + assert np.all( + np.isclose(np.array(res_m), np.array(res_f), rtol=1e-3) + ), "Accuracy precision error: {} != {}".format(res_m, res_f) acc.reset() assert np.sum(acc.total) == 0 assert np.sum(acc.count) == 0 @@ -221,7 +237,6 @@ class TestAccuracyStatic(TestAccuracyDynamic): class TestAccuracyStaticMultiTopk(TestAccuracyStatic): - def setUp(self): self.topk = (1, 5) self.class_num = 10 @@ -231,7 +246,6 @@ class TestAccuracyStaticMultiTopk(TestAccuracyStatic): class TestPrecision(unittest.TestCase): - def test_1d(self): x = np.array([0.1, 0.5, 0.6, 0.7]) @@ -240,13 +254,13 @@ class TestPrecision(unittest.TestCase): m = paddle.metric.Precision() m.update(x, y) r = m.accumulate() - self.assertAlmostEqual(r, 2. / 3.) + self.assertAlmostEqual(r, 2.0 / 3.0) x = paddle.to_tensor(np.array([0.1, 0.5, 0.6, 0.7, 0.2])) y = paddle.to_tensor(np.array([1, 0, 1, 1, 1])) m.update(x, y) r = m.accumulate() - self.assertAlmostEqual(r, 4. / 6.) + self.assertAlmostEqual(r, 4.0 / 6.0) def test_2d(self): x = np.array([0.1, 0.5, 0.6, 0.7]).reshape(-1, 1) @@ -255,13 +269,13 @@ class TestPrecision(unittest.TestCase): m = paddle.metric.Precision() m.update(x, y) r = m.accumulate() - self.assertAlmostEqual(r, 2. / 3.) + self.assertAlmostEqual(r, 2.0 / 3.0) x = np.array([0.1, 0.5, 0.6, 0.7, 0.2]).reshape(-1, 1) y = np.array([1, 0, 1, 1, 1]).reshape(-1, 1) m.update(x, y) r = m.accumulate() - self.assertAlmostEqual(r, 4. / 6.) + self.assertAlmostEqual(r, 4.0 / 6.0) # check reset m.reset() @@ -271,7 +285,6 @@ class TestPrecision(unittest.TestCase): class TestRecall(unittest.TestCase): - def test_1d(self): x = np.array([0.1, 0.5, 0.6, 0.7]) y = np.array([1, 0, 1, 1]) @@ -279,13 +292,13 @@ class TestRecall(unittest.TestCase): m = paddle.metric.Recall() m.update(x, y) r = m.accumulate() - self.assertAlmostEqual(r, 2. / 3.) + self.assertAlmostEqual(r, 2.0 / 3.0) x = paddle.to_tensor(np.array([0.1, 0.5, 0.6, 0.7])) y = paddle.to_tensor(np.array([1, 0, 0, 1])) m.update(x, y) r = m.accumulate() - self.assertAlmostEqual(r, 3. / 5.) + self.assertAlmostEqual(r, 3.0 / 5.0) # check reset m.reset() @@ -295,10 +308,19 @@ class TestRecall(unittest.TestCase): class TestAuc(unittest.TestCase): - def test_auc_numpy(self): - x = np.array([[0.78, 0.22], [0.62, 0.38], [0.55, 0.45], [0.30, 0.70], - [0.14, 0.86], [0.59, 0.41], [0.91, 0.08], [0.16, 0.84]]) + x = np.array( + [ + [0.78, 0.22], + [0.62, 0.38], + [0.55, 0.45], + [0.30, 0.70], + [0.14, 0.86], + [0.59, 0.41], + [0.91, 0.08], + [0.16, 0.84], + ] + ) y = np.array([[0], [1], [1], [0], [1], [0], [0], [1]]) m = paddle.metric.Auc() m.update(x, y) @@ -310,8 +332,19 @@ class TestAuc(unittest.TestCase): def test_auc_tensor(self): x = paddle.to_tensor( - np.array([[0.78, 0.22], [0.62, 0.38], [0.55, 0.45], [0.30, 0.70], - [0.14, 0.86], [0.59, 0.41], [0.91, 0.08], [0.16, 0.84]])) + np.array( + [ + [0.78, 0.22], + [0.62, 0.38], + [0.55, 0.45], + [0.30, 0.70], + [0.14, 0.86], + [0.59, 0.41], + [0.91, 0.08], + [0.16, 0.84], + ] + ) + ) y = paddle.to_tensor(np.array([[0], [1], [1], [0], [1], [0], [0], [1]])) m = paddle.metric.Auc() m.update(x, y) diff --git a/python/paddle/tests/test_model.py b/python/paddle/tests/test_model.py index 5bb47b0ddfb32b7040d4a26a3f6e4bed93396090..212939932cb3e99b4c90f64478366a82354dc36f 100644 --- a/python/paddle/tests/test_model.py +++ b/python/paddle/tests/test_model.py @@ -34,23 +34,28 @@ import paddle.vision.models as models import paddle.fluid.dygraph.jit as jit from paddle.io import DistributedBatchSampler, Dataset from paddle.hapi.model import prepare_distributed_context -from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator +from paddle.fluid.dygraph.dygraph_to_static.program_translator import ( + ProgramTranslator, +) class LeNetDygraph(paddle.nn.Layer): - def __init__(self, num_classes=10): super(LeNetDygraph, self).__init__() self.num_classes = num_classes - self.features = Sequential(Conv2D(1, 6, 3, stride=1, padding=1), ReLU(), - paddle.fluid.dygraph.Pool2D(2, 'max', 2), - Conv2D(6, 16, 5, stride=1, padding=0), - ReLU(), - paddle.fluid.dygraph.Pool2D(2, 'max', 2)) + self.features = Sequential( + Conv2D(1, 6, 3, stride=1, padding=1), + ReLU(), + paddle.fluid.dygraph.Pool2D(2, 'max', 2), + Conv2D(6, 16, 5, stride=1, padding=0), + ReLU(), + paddle.fluid.dygraph.Pool2D(2, 'max', 2), + ) if num_classes > 0: - self.fc = Sequential(Linear(400, 120), Linear(120, 84), - Linear(84, 10)) + self.fc = Sequential( + Linear(400, 120), Linear(120, 84), Linear(84, 10) + ) def forward(self, inputs): x = self.features(inputs) @@ -62,7 +67,6 @@ class LeNetDygraph(paddle.nn.Layer): class ModelInner(paddle.nn.Layer): - def __init__(self): super(ModelInner, self).__init__() self.fc = paddle.nn.Linear(3, 4) @@ -73,7 +77,6 @@ class ModelInner(paddle.nn.Layer): class ModelOutter(paddle.nn.Layer): - def __init__(self): super(ModelOutter, self).__init__() self.module1 = ModelInner() @@ -86,22 +89,25 @@ class ModelOutter(paddle.nn.Layer): class LeNetListInput(paddle.nn.Layer): - def __init__(self, num_classes=10): super(LeNetListInput, self).__init__() self.num_classes = num_classes self.cov = Conv2D(1, 6, 3, stride=1, padding=1) for param in self.cov.parameters(): param.trainable = False - self.features = Sequential(self.cov, ReLU(), - paddle.fluid.dygraph.Pool2D(2, 'max', 2), - Conv2D(6, 16, 5, stride=1, padding=0), - ReLU(), - paddle.fluid.dygraph.Pool2D(2, 'max', 2)) + self.features = Sequential( + self.cov, + ReLU(), + paddle.fluid.dygraph.Pool2D(2, 'max', 2), + Conv2D(6, 16, 5, stride=1, padding=0), + ReLU(), + paddle.fluid.dygraph.Pool2D(2, 'max', 2), + ) if num_classes > 0: - self.fc = Sequential(Linear(400, 120), Linear(120, 84), - Linear(84, 10)) + self.fc = Sequential( + Linear(400, 120), Linear(120, 84), Linear(84, 10) + ) def forward(self, inputs): x = inputs[0] @@ -114,7 +120,6 @@ class LeNetListInput(paddle.nn.Layer): class LeNetDictInput(LeNetDygraph): - def forward(self, inputs): x = self.features(inputs['x1']) @@ -125,7 +130,6 @@ class LeNetDictInput(LeNetDygraph): class MnistDataset(MNIST): - def __init__(self, mode, return_label=True, sample_num=None): super(MnistDataset, self).__init__(mode=mode) self.return_label = return_label @@ -138,7 +142,7 @@ class MnistDataset(MNIST): img = np.reshape(img, [1, 28, 28]) if self.return_label: return img, np.array(self.labels[idx]).astype('int64') - return img, + return (img,) def __len__(self): return len(self.images) @@ -152,8 +156,9 @@ def compute_acc(pred, label): def dynamic_train(model, dataloader): - optim = fluid.optimizer.Adam(learning_rate=0.001, - parameter_list=model.parameters()) + optim = fluid.optimizer.Adam( + learning_rate=0.001, parameter_list=model.parameters() + ) model.train() for inputs, labels in dataloader: outputs = model(inputs) @@ -171,17 +176,22 @@ def dynamic_evaluate(model, dataloader): for inputs, labels in dataloader: outputs = model(inputs) - cnt += (np.argmax( - outputs.numpy(), - -1)[:, np.newaxis] == labels.numpy()).astype('int').sum() + cnt += ( + ( + np.argmax(outputs.numpy(), -1)[:, np.newaxis] + == labels.numpy() + ) + .astype('int') + .sum() + ) return cnt / len(dataloader.dataset) -@unittest.skipIf(not fluid.is_compiled_with_cuda(), - 'CPU testing is not supported') +@unittest.skipIf( + not fluid.is_compiled_with_cuda(), 'CPU testing is not supported' +) class TestModel(unittest.TestCase): - @classmethod def setUpClass(cls): if not fluid.is_compiled_with_cuda(): @@ -192,19 +202,19 @@ class TestModel(unittest.TestCase): sp_num = 1280 cls.train_dataset = MnistDataset(mode='train', sample_num=sp_num) cls.val_dataset = MnistDataset(mode='test', sample_num=sp_num) - cls.test_dataset = MnistDataset(mode='test', - return_label=False, - sample_num=sp_num) - - cls.train_loader = fluid.io.DataLoader(cls.train_dataset, - places=cls.device, - batch_size=64) - cls.val_loader = fluid.io.DataLoader(cls.val_dataset, - places=cls.device, - batch_size=64) - cls.test_loader = fluid.io.DataLoader(cls.test_dataset, - places=cls.device, - batch_size=64) + cls.test_dataset = MnistDataset( + mode='test', return_label=False, sample_num=sp_num + ) + + cls.train_loader = fluid.io.DataLoader( + cls.train_dataset, places=cls.device, batch_size=64 + ) + cls.val_loader = fluid.io.DataLoader( + cls.val_dataset, places=cls.device, batch_size=64 + ) + cls.test_loader = fluid.io.DataLoader( + cls.test_dataset, places=cls.device, batch_size=64 + ) seed = 333 paddle.seed(seed) @@ -277,46 +287,59 @@ class TestModel(unittest.TestCase): paddle.framework.random._manual_program_seed(seed) net = LeNet() - optim_new = fluid.optimizer.Adam(learning_rate=0.001, - parameter_list=net.parameters()) + optim_new = fluid.optimizer.Adam( + learning_rate=0.001, parameter_list=net.parameters() + ) model = Model(net, inputs=self.inputs, labels=self.labels) - model.prepare(optim_new, - loss=CrossEntropyLoss(reduction="sum"), - metrics=Accuracy()) + model.prepare( + optim_new, + loss=CrossEntropyLoss(reduction="sum"), + metrics=Accuracy(), + ) model.fit(self.train_dataset, batch_size=64, shuffle=False) result = model.evaluate(self.val_dataset, batch_size=64) np.testing.assert_allclose(result['acc'], self.acc1) - model.fit(self.train_dataset, - batch_size=64, - shuffle=False, - num_iters=num_iters) - - result = model.evaluate(self.val_dataset, - batch_size=64, - num_iters=num_iters) - - train_sampler = DistributedBatchSampler(self.train_dataset, - batch_size=64, - shuffle=False, - num_replicas=num_replicas, - rank=rank) - val_sampler = DistributedBatchSampler(self.val_dataset, - batch_size=64, - shuffle=False, - num_replicas=num_replicas, - rank=rank) - - train_loader = fluid.io.DataLoader(self.train_dataset, - batch_sampler=train_sampler, - places=self.device, - return_list=True) - - val_loader = fluid.io.DataLoader(self.val_dataset, - batch_sampler=val_sampler, - places=self.device, - return_list=True) + model.fit( + self.train_dataset, + batch_size=64, + shuffle=False, + num_iters=num_iters, + ) + + result = model.evaluate( + self.val_dataset, batch_size=64, num_iters=num_iters + ) + + train_sampler = DistributedBatchSampler( + self.train_dataset, + batch_size=64, + shuffle=False, + num_replicas=num_replicas, + rank=rank, + ) + val_sampler = DistributedBatchSampler( + self.val_dataset, + batch_size=64, + shuffle=False, + num_replicas=num_replicas, + rank=rank, + ) + + train_loader = fluid.io.DataLoader( + self.train_dataset, + batch_sampler=train_sampler, + places=self.device, + return_list=True, + ) + + val_loader = fluid.io.DataLoader( + self.val_dataset, + batch_sampler=val_sampler, + places=self.device, + return_list=True, + ) model.fit(train_loader, val_loader) fluid.disable_dygraph() if dynamic else None @@ -328,37 +351,48 @@ class TestModel(unittest.TestCase): paddle.framework.random._manual_program_seed(seed) net = LeNet() - optim_new = fluid.optimizer.Adam(learning_rate=0.001, - parameter_list=net.parameters()) + optim_new = fluid.optimizer.Adam( + learning_rate=0.001, parameter_list=net.parameters() + ) model = Model(net, inputs=tuple(self.inputs), labels=tuple(self.labels)) - model.prepare(optim_new, - loss=CrossEntropyLoss(reduction="sum"), - metrics=Accuracy()) + model.prepare( + optim_new, + loss=CrossEntropyLoss(reduction="sum"), + metrics=Accuracy(), + ) model.fit(self.train_dataset, batch_size=64, shuffle=False) result = model.evaluate(self.val_dataset, batch_size=64) np.testing.assert_allclose(result['acc'], self.acc1) - train_sampler = DistributedBatchSampler(self.train_dataset, - batch_size=64, - shuffle=False, - num_replicas=num_replicas, - rank=rank) - val_sampler = DistributedBatchSampler(self.val_dataset, - batch_size=64, - shuffle=False, - num_replicas=num_replicas, - rank=rank) - - train_loader = fluid.io.DataLoader(self.train_dataset, - batch_sampler=train_sampler, - places=self.device, - return_list=True) - - val_loader = fluid.io.DataLoader(self.val_dataset, - batch_sampler=val_sampler, - places=self.device, - return_list=True) + train_sampler = DistributedBatchSampler( + self.train_dataset, + batch_size=64, + shuffle=False, + num_replicas=num_replicas, + rank=rank, + ) + val_sampler = DistributedBatchSampler( + self.val_dataset, + batch_size=64, + shuffle=False, + num_replicas=num_replicas, + rank=rank, + ) + + train_loader = fluid.io.DataLoader( + self.train_dataset, + batch_sampler=train_sampler, + places=self.device, + return_list=True, + ) + + val_loader = fluid.io.DataLoader( + self.val_dataset, + batch_sampler=val_sampler, + places=self.device, + return_list=True, + ) model.fit(train_loader, val_loader) fluid.disable_dygraph() if dynamic else None @@ -371,14 +405,16 @@ class TestModel(unittest.TestCase): result = model.evaluate(self.val_dataset, batch_size=64) np.testing.assert_allclose(result['acc'], self.acc1) - sampler = DistributedBatchSampler(self.val_dataset, - batch_size=64, - shuffle=False) + sampler = DistributedBatchSampler( + self.val_dataset, batch_size=64, shuffle=False + ) - val_loader = fluid.io.DataLoader(self.val_dataset, - batch_sampler=sampler, - places=self.device, - return_list=True) + val_loader = fluid.io.DataLoader( + self.val_dataset, + batch_sampler=sampler, + places=self.device, + return_list=True, + ) model.evaluate(val_loader) @@ -389,22 +425,24 @@ class TestModel(unittest.TestCase): model = Model(LeNet(), self.inputs) model.prepare() model.load(self.weight_path) - output = model.predict(self.test_dataset, - batch_size=64, - stack_outputs=True) + output = model.predict( + self.test_dataset, batch_size=64, stack_outputs=True + ) np.testing.assert_equal(output[0].shape[0], len(self.test_dataset)) acc = compute_acc(output[0], self.val_dataset.labels) np.testing.assert_allclose(acc, self.acc1) - sampler = DistributedBatchSampler(self.test_dataset, - batch_size=64, - shuffle=False) + sampler = DistributedBatchSampler( + self.test_dataset, batch_size=64, shuffle=False + ) - test_loader = fluid.io.DataLoader(self.test_dataset, - batch_sampler=sampler, - places=self.device, - return_list=True) + test_loader = fluid.io.DataLoader( + self.test_dataset, + batch_sampler=sampler, + places=self.device, + return_list=True, + ) model.evaluate(test_loader) @@ -416,21 +454,21 @@ class TestModel(unittest.TestCase): model.prepare() model.load(self.weight_path) model._inputs = None - output = model.predict(self.test_dataset, - batch_size=64, - stack_outputs=True) + output = model.predict( + self.test_dataset, batch_size=64, stack_outputs=True + ) np.testing.assert_equal(output[0].shape[0], len(self.test_dataset)) fluid.disable_dygraph() def test_summary_gpu(self): paddle.disable_static(self.device) rnn = paddle.nn.LSTM(16, 32, 2) - params_info = paddle.summary(rnn, [(-1, 23, 16), - ((2, None, 32), (2, -1, 32))]) + params_info = paddle.summary( + rnn, [(-1, 23, 16), ((2, None, 32), (2, -1, 32))] + ) class MyModel(paddle.nn.Layer): - def __init__(self): super(MyModel, self).__init__() self._fc = Linear(20, 10) @@ -441,17 +479,16 @@ class MyModel(paddle.nn.Layer): class MyDataset(Dataset): - def __getitem__(self, idx): - return np.random.random(size=(20,)).astype(np.float32), \ - np.random.randint(0, 10, size=(1,)).astype(np.int64) + return np.random.random(size=(20,)).astype( + np.float32 + ), np.random.randint(0, 10, size=(1,)).astype(np.int64) def __len__(self): return 40 class TestModelFunction(unittest.TestCase): - def set_seed(self, seed=1024): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) @@ -465,8 +502,9 @@ class TestModelFunction(unittest.TestCase): fluid.enable_dygraph(fluid.CPUPlace()) self.set_seed() m = MyModel() - optim = fluid.optimizer.SGD(learning_rate=0.001, - parameter_list=m.parameters()) + optim = fluid.optimizer.SGD( + learning_rate=0.001, parameter_list=m.parameters() + ) m.train() output = m(to_tensor(data)) loss = CrossEntropyLoss(reduction='sum')(output, to_tensor(label)) @@ -484,14 +522,15 @@ class TestModelFunction(unittest.TestCase): self.set_seed() net = MyModel() - optim2 = fluid.optimizer.SGD(learning_rate=0.001, - parameter_list=net.parameters()) + optim2 = fluid.optimizer.SGD( + learning_rate=0.001, parameter_list=net.parameters() + ) inputs = [InputSpec([None, dim], 'float32', 'x')] labels = [InputSpec([None, 1], 'int64', 'label')] model = Model(net, inputs, labels) model.prepare(optim2, loss=CrossEntropyLoss(reduction="sum")) - loss, = model.train_batch([data], [label]) + (loss,) = model.train_batch([data], [label]) np.testing.assert_allclose(loss.flatten(), ref.flatten()) fluid.disable_dygraph() if dynamic else None @@ -517,7 +556,7 @@ class TestModelFunction(unittest.TestCase): inputs = [InputSpec([None, dim], 'float32', 'x')] model = Model(net, inputs) model.prepare() - out, = model.predict_batch([data]) + (out,) = model.predict_batch([data]) np.testing.assert_allclose(out, ref, rtol=1e-6) fluid.disable_dygraph() if dynamic else None @@ -532,11 +571,13 @@ class TestModelFunction(unittest.TestCase): net = MyModel() inputs = [InputSpec([None, 20], 'float32', 'x')] labels = [InputSpec([None, 1], 'int64', 'label')] - optim = fluid.optimizer.SGD(learning_rate=0.001, - parameter_list=net.parameters()) + optim = fluid.optimizer.SGD( + learning_rate=0.001, parameter_list=net.parameters() + ) model = Model(net, inputs, labels) - model.prepare(optimizer=optim, - loss=CrossEntropyLoss(reduction="sum")) + model.prepare( + optimizer=optim, loss=CrossEntropyLoss(reduction="sum") + ) model.save(path) model.load(path) fluid.disable_dygraph() if dynamic else None @@ -555,14 +596,17 @@ class TestModelFunction(unittest.TestCase): inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')] labels = [InputSpec([None, 1], 'int64', 'label')] if new_optimizer: - optim = paddle.optimizer.Adam(learning_rate=0.001, - parameters=net.parameters()) + optim = paddle.optimizer.Adam( + learning_rate=0.001, parameters=net.parameters() + ) else: - optim = fluid.optimizer.Adam(learning_rate=0.001, - parameter_list=net.parameters()) + optim = fluid.optimizer.Adam( + learning_rate=0.001, parameter_list=net.parameters() + ) model = Model(net, inputs, labels) - model.prepare(optimizer=optim, - loss=CrossEntropyLoss(reduction="sum")) + model.prepare( + optimizer=optim, loss=CrossEntropyLoss(reduction="sum") + ) model.fit(mnist_data, batch_size=64, verbose=0) model.save(path) model.load(path) @@ -570,16 +614,18 @@ class TestModelFunction(unittest.TestCase): shutil.rmtree(path) def test_dynamic_save_static_load(self): - path = os.path.join(tempfile.mkdtemp(), - '.cache_dynamic_save_static_load') + path = os.path.join( + tempfile.mkdtemp(), '.cache_dynamic_save_static_load' + ) if not os.path.exists(path): os.makedirs(path) # dynamic saving device = paddle.set_device('cpu') fluid.enable_dygraph(device) model = Model(MyModel()) - optim = fluid.optimizer.SGD(learning_rate=0.001, - parameter_list=model.parameters()) + optim = fluid.optimizer.SGD( + learning_rate=0.001, parameter_list=model.parameters() + ) model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum")) model.save(path) fluid.disable_dygraph() @@ -587,34 +633,38 @@ class TestModelFunction(unittest.TestCase): inputs = [InputSpec([None, 20], 'float32', 'x')] labels = [InputSpec([None, 1], 'int64', 'label')] model = Model(MyModel(), inputs, labels) - optim = fluid.optimizer.SGD(learning_rate=0.001, - parameter_list=model.parameters()) + optim = fluid.optimizer.SGD( + learning_rate=0.001, parameter_list=model.parameters() + ) model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum")) model.load(path) shutil.rmtree(path) def test_static_save_dynamic_load(self): - path = os.path.join(tempfile.mkdtemp(), - '.cache_test_static_save_dynamic_load') + path = os.path.join( + tempfile.mkdtemp(), '.cache_test_static_save_dynamic_load' + ) if not os.path.exists(path): os.makedirs(path) net = MyModel() inputs = [InputSpec([None, 20], 'float32', 'x')] labels = [InputSpec([None, 1], 'int64', 'label')] - optim = fluid.optimizer.SGD(learning_rate=0.001, - parameter_list=net.parameters()) + optim = fluid.optimizer.SGD( + learning_rate=0.001, parameter_list=net.parameters() + ) model = Model(net, inputs, labels) model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum")) model.save(path) device = paddle.set_device('cpu') - fluid.enable_dygraph(device) #if dynamic else None + fluid.enable_dygraph(device) # if dynamic else None net = MyModel() inputs = [InputSpec([None, 20], 'float32', 'x')] labels = [InputSpec([None, 1], 'int64', 'label')] - optim = fluid.optimizer.SGD(learning_rate=0.001, - parameter_list=net.parameters()) + optim = fluid.optimizer.SGD( + learning_rate=0.001, parameter_list=net.parameters() + ) model = Model(net, inputs, labels) model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum")) model.load(path) @@ -635,7 +685,6 @@ class TestModelFunction(unittest.TestCase): fluid.disable_dygraph() if dynamic else None def test_summary(self): - def _get_param_from_state_dict(state_dict): params = 0 for k, v in state_dict.items(): @@ -663,22 +712,21 @@ class TestModelFunction(unittest.TestCase): paddle.summary(ModelOutter(), input_size=(-1, 3)) def test_summary_nlp(self): - def _get_param_from_state_dict(state_dict): params = 0 for k, v in state_dict.items(): params += np.prod(v.numpy().shape) return params - nlp_net = paddle.nn.GRU(input_size=2, - hidden_size=3, - num_layers=3, - direction="bidirectional") + nlp_net = paddle.nn.GRU( + input_size=2, hidden_size=3, num_layers=3, direction="bidirectional" + ) paddle.summary(nlp_net, (1, 1, 2)) rnn = paddle.nn.LSTM(16, 32, 2) - params_info = paddle.summary(rnn, [(-1, 23, 16), - ((2, None, 32), (2, -1, 32))]) + params_info = paddle.summary( + rnn, [(-1, 23, 16), ((2, None, 32), (2, -1, 32))] + ) gt_params = _get_param_from_state_dict(rnn.state_dict()) np.testing.assert_allclose(params_info['total_params'], gt_params / 2.0) @@ -710,7 +758,7 @@ class TestModelFunction(unittest.TestCase): lenet_dict_input = LeNetDictInput() input_data = { 'x1': paddle.rand([1, 1, 28, 28]), - 'x2': paddle.rand([1, 400]) + 'x2': paddle.rand([1, 400]), } paddle.summary(lenet_dict_input, input=input_data) @@ -747,29 +795,35 @@ class TestModelFunction(unittest.TestCase): def customize_dropout(m, x, y): m.total_ops += 0 - paddle.flops(net, [1, 3, 224, 224], - custom_ops={paddle.nn.Dropout: customize_dropout}, - print_detail=True) + paddle.flops( + net, + [1, 3, 224, 224], + custom_ops={paddle.nn.Dropout: customize_dropout}, + print_detail=True, + ) def test_dynamic_flops_with_multiple_outputs(self): - net = paddle.nn.MaxPool2D(kernel_size=2, - stride=2, - padding=0, - return_mask=True) + net = paddle.nn.MaxPool2D( + kernel_size=2, stride=2, padding=0, return_mask=True + ) def customize_dropout(m, x, y): m.total_ops += 0 - paddle.flops(net, [1, 2, 32, 32], - custom_ops={paddle.nn.Dropout: customize_dropout}, - print_detail=True) + paddle.flops( + net, + [1, 2, 32, 32], + custom_ops={paddle.nn.Dropout: customize_dropout}, + print_detail=True, + ) def test_export_deploy_model(self): self.set_seed() np.random.seed(201) - save_dir = os.path.join(tempfile.mkdtemp(), - '.cache_test_export_deploy_model') + save_dir = os.path.join( + tempfile.mkdtemp(), '.cache_test_export_deploy_model' + ) if not os.path.exists(save_dir): os.makedirs(save_dir) @@ -782,28 +836,37 @@ class TestModelFunction(unittest.TestCase): model = Model(net, inputs) model.prepare() - tensor_img = np.array(np.random.random((1, 1, 28, 28)), - dtype=np.float32) + tensor_img = np.array( + np.random.random((1, 1, 28, 28)), dtype=np.float32 + ) model.save(save_dir, training=False) ori_results = model.predict_batch(tensor_img) fluid.disable_dygraph() if dynamic else None - place = fluid.CPUPlace( - ) if not fluid.is_compiled_with_cuda() else fluid.CUDAPlace(0) + place = ( + fluid.CPUPlace() + if not fluid.is_compiled_with_cuda() + else fluid.CUDAPlace(0) + ) new_scope = fluid.Scope() with fluid.scope_guard(new_scope): exe = fluid.Executor(place) - [inference_program, feed_target_names, - fetch_targets] = (paddle.static.io.load_inference_model( - path_prefix=save_dir, executor=exe)) - results = exe.run(inference_program, - feed={feed_target_names[0]: tensor_img}, - fetch_list=fetch_targets) - np.testing.assert_allclose(results, - ori_results, - rtol=1e-5, - atol=1e-6) + [ + inference_program, + feed_target_names, + fetch_targets, + ] = paddle.static.io.load_inference_model( + path_prefix=save_dir, executor=exe + ) + results = exe.run( + inference_program, + feed={feed_target_names[0]: tensor_img}, + fetch_list=fetch_targets, + ) + np.testing.assert_allclose( + results, ori_results, rtol=1e-5, atol=1e-6 + ) paddle.enable_static() @@ -815,22 +878,26 @@ class TestModelFunction(unittest.TestCase): mnist_data = MnistDataset(mode='train') paddle.disable_static() # without inputs - save_dir = os.path.join(tempfile.mkdtemp(), - '.cache_test_dygraph_export_deploy') + save_dir = os.path.join( + tempfile.mkdtemp(), '.cache_test_dygraph_export_deploy' + ) if not os.path.exists(save_dir): os.makedirs(save_dir) for initial in ["fit", "train_batch", "eval_batch", "predict_batch"]: net = LeNet() model = Model(net) - optim = fluid.optimizer.Adam(learning_rate=0.001, - parameter_list=model.parameters()) - model.prepare(optimizer=optim, - loss=CrossEntropyLoss(reduction="sum")) + optim = fluid.optimizer.Adam( + learning_rate=0.001, parameter_list=model.parameters() + ) + model.prepare( + optimizer=optim, loss=CrossEntropyLoss(reduction="sum") + ) if initial == "fit": model.fit(mnist_data, batch_size=64, verbose=0) else: - img = np.array(np.random.random((1, 1, 28, 28)), - dtype=np.float32) + img = np.array( + np.random.random((1, 1, 28, 28)), dtype=np.float32 + ) label = np.array(np.random.rand(1, 1), dtype=np.int64) if initial == "train_batch": model.train_batch([img], [label]) @@ -842,52 +909,58 @@ class TestModelFunction(unittest.TestCase): model.save(save_dir, training=False) shutil.rmtree(save_dir) # with inputs, and the type of inputs is InputSpec - save_dir = os.path.join(tempfile.mkdtemp(), - '.cache_test_dygraph_export_deploy_2') + save_dir = os.path.join( + tempfile.mkdtemp(), '.cache_test_dygraph_export_deploy_2' + ) if not os.path.exists(save_dir): os.makedirs(save_dir) net = LeNet() inputs = InputSpec([None, 1, 28, 28], 'float32', 'x') model = Model(net, inputs) - optim = fluid.optimizer.Adam(learning_rate=0.001, - parameter_list=model.parameters()) + optim = fluid.optimizer.Adam( + learning_rate=0.001, parameter_list=model.parameters() + ) model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum")) model.save(save_dir, training=False) shutil.rmtree(save_dir) - def test_accumulate(self, ): + def test_accumulate( + self, + ): dim = 20 data = np.random.random(size=(4, dim)).astype(np.float32) label = np.random.randint(0, 10, size=(4, 1)).astype(np.int64) net = MyModel() - optim = fluid.optimizer.SGD(learning_rate=0.001, - parameter_list=net.parameters()) + optim = fluid.optimizer.SGD( + learning_rate=0.001, parameter_list=net.parameters() + ) inputs = [InputSpec([None, dim], 'float32', 'x')] labels = [InputSpec([None, 1], 'int64', 'label')] for amp_cfg in [None, 'O1']: model = Model(net, inputs, labels) - model.prepare(optim, - loss=CrossEntropyLoss(reduction="sum"), - amp_configs=amp_cfg) + model.prepare( + optim, + loss=CrossEntropyLoss(reduction="sum"), + amp_configs=amp_cfg, + ) losses, grads = [], [] for stat in [False, False, True]: - loss, = model.train_batch([data], [label], update=stat) + (loss,) = model.train_batch([data], [label], update=stat) losses.append(loss) grads.append([p.grad.numpy() for p in net.parameters()]) for grad1, grad2, grad3 in zip(*grads): np.testing.assert_almost_equal(grad1 * 2, grad2, decimal=4) - np.testing.assert_almost_equal(grad3, - np.zeros_like(grad3), - decimal=4) + np.testing.assert_almost_equal( + grad3, np.zeros_like(grad3), decimal=4 + ) np.testing.assert_almost_equal(losses[0], losses[1], decimal=4) np.testing.assert_almost_equal(losses[0], losses[2], decimal=4) class TestModelWithLRScheduler(unittest.TestCase): - def test_fit_by_step(self): base_lr = 1e-3 boundaries = [5, 8] @@ -897,17 +970,21 @@ class TestModelWithLRScheduler(unittest.TestCase): weight_decay = 5e-4 values = [base_lr * (0.1**i) for i in range(len(boundaries) + 1)] learning_rate = paddle.optimizer.lr.PiecewiseDecay( - boundaries=boundaries, values=values) + boundaries=boundaries, values=values + ) learning_rate = paddle.optimizer.lr.LinearWarmup( learning_rate=learning_rate, warmup_steps=4, - start_lr=base_lr / 5., + start_lr=base_lr / 5.0, end_lr=base_lr, - verbose=True) - optimizer = paddle.optimizer.Momentum(learning_rate=learning_rate, - weight_decay=weight_decay, - momentum=momentum, - parameters=parameters) + verbose=True, + ) + optimizer = paddle.optimizer.Momentum( + learning_rate=learning_rate, + weight_decay=weight_decay, + momentum=momentum, + parameters=parameters, + ) return optimizer # dynamic test @@ -923,8 +1000,10 @@ class TestModelWithLRScheduler(unittest.TestCase): dataset = MyDataset() model.fit(dataset, dataset, batch_size=4, epochs=10, num_workers=0) - np.testing.assert_allclose(model._optimizer._learning_rate.last_lr, - base_lr * (0.1**len(boundaries))) + np.testing.assert_allclose( + model._optimizer._learning_rate.last_lr, + base_lr * (0.1 ** len(boundaries)), + ) # static test paddle.enable_static() @@ -938,8 +1017,10 @@ class TestModelWithLRScheduler(unittest.TestCase): dataset = MyDataset() model.fit(dataset, dataset, batch_size=4, epochs=10, num_workers=0) - np.testing.assert_allclose(model._optimizer._learning_rate.last_lr, - base_lr * (0.1**len(boundaries))) + np.testing.assert_allclose( + model._optimizer._learning_rate.last_lr, + base_lr * (0.1 ** len(boundaries)), + ) def test_fit_by_epoch(self): base_lr = 1e-3 @@ -952,17 +1033,21 @@ class TestModelWithLRScheduler(unittest.TestCase): weight_decay = 5e-4 values = [base_lr * (0.1**i) for i in range(len(boundaries) + 1)] learning_rate = paddle.optimizer.lr.PiecewiseDecay( - boundaries=boundaries, values=values) + boundaries=boundaries, values=values + ) learning_rate = paddle.optimizer.lr.LinearWarmup( learning_rate=learning_rate, warmup_steps=wamup_epochs, - start_lr=base_lr / 5., + start_lr=base_lr / 5.0, end_lr=base_lr, - verbose=True) - optimizer = paddle.optimizer.Momentum(learning_rate=learning_rate, - weight_decay=weight_decay, - momentum=momentum, - parameters=parameters) + verbose=True, + ) + optimizer = paddle.optimizer.Momentum( + learning_rate=learning_rate, + weight_decay=weight_decay, + momentum=momentum, + parameters=parameters, + ) return optimizer # dynamic test @@ -977,23 +1062,27 @@ class TestModelWithLRScheduler(unittest.TestCase): dataset = MyDataset() - lr_scheduler_callback = paddle.callbacks.LRScheduler(by_step=False, - by_epoch=True) + lr_scheduler_callback = paddle.callbacks.LRScheduler( + by_step=False, by_epoch=True + ) - model.fit(dataset, - dataset, - batch_size=4, - epochs=epochs, - num_workers=0, - callbacks=lr_scheduler_callback) + model.fit( + dataset, + dataset, + batch_size=4, + epochs=epochs, + num_workers=0, + callbacks=lr_scheduler_callback, + ) cnt = 0 for b in boundaries: if b + wamup_epochs <= epochs: cnt += 1 - np.testing.assert_allclose(model._optimizer._learning_rate.last_lr, - base_lr * (0.1**cnt)) + np.testing.assert_allclose( + model._optimizer._learning_rate.last_lr, base_lr * (0.1**cnt) + ) # static test paddle.enable_static() @@ -1006,27 +1095,30 @@ class TestModelWithLRScheduler(unittest.TestCase): dataset = MyDataset() - lr_scheduler_callback = paddle.callbacks.LRScheduler(by_step=False, - by_epoch=True) + lr_scheduler_callback = paddle.callbacks.LRScheduler( + by_step=False, by_epoch=True + ) - model.fit(dataset, - dataset, - batch_size=4, - epochs=epochs, - num_workers=0, - callbacks=lr_scheduler_callback) + model.fit( + dataset, + dataset, + batch_size=4, + epochs=epochs, + num_workers=0, + callbacks=lr_scheduler_callback, + ) cnt = 0 for b in boundaries: if b + wamup_epochs <= epochs: cnt += 1 - np.testing.assert_allclose(model._optimizer._learning_rate.last_lr, - base_lr * (0.1**cnt)) + np.testing.assert_allclose( + model._optimizer._learning_rate.last_lr, base_lr * (0.1**cnt) + ) class TestRaiseError(unittest.TestCase): - def test_input_without_name(self): net = MyModel() inputs = [InputSpec([None, 10], 'float32')] @@ -1059,8 +1151,9 @@ class TestRaiseError(unittest.TestCase): model = Model(net, inputs) model.prepare() path = "" - tensor_img = np.array(np.random.random((1, 1, 28, 28)), - dtype=np.float32) + tensor_img = np.array( + np.random.random((1, 1, 28, 28)), dtype=np.float32 + ) with self.assertRaises(ValueError): model.save(path, training=False) diff --git a/python/paddle/tests/test_ops_roi_align.py b/python/paddle/tests/test_ops_roi_align.py index 145f77e846b57683e15866d1abe8f79645ce0dc2..05c221e83a15729088615ffd3d6f44e0d6f73cd0 100644 --- a/python/paddle/tests/test_ops_roi_align.py +++ b/python/paddle/tests/test_ops_roi_align.py @@ -20,7 +20,6 @@ from paddle.vision.ops import roi_align, RoIAlign class TestRoIAlign(unittest.TestCase): - def setUp(self): self.data = np.random.rand(1, 256, 32, 32).astype('float32') boxes = np.random.rand(3, 4) @@ -40,38 +39,40 @@ class TestRoIAlign(unittest.TestCase): boxes = paddle.to_tensor(self.boxes) boxes_num = paddle.to_tensor(self.boxes_num) - align_out = roi_align(data, - boxes, - boxes_num=boxes_num, - output_size=output_size) + align_out = roi_align( + data, boxes, boxes_num=boxes_num, output_size=output_size + ) np.testing.assert_equal(align_out.shape, output_shape) else: - data = paddle.static.data(shape=self.data.shape, - dtype=self.data.dtype, - name='data') - boxes = paddle.static.data(shape=self.boxes.shape, - dtype=self.boxes.dtype, - name='boxes') - boxes_num = paddle.static.data(shape=self.boxes_num.shape, - dtype=self.boxes_num.dtype, - name='boxes_num') - - align_out = roi_align(data, - boxes, - boxes_num=boxes_num, - output_size=output_size) + data = paddle.static.data( + shape=self.data.shape, dtype=self.data.dtype, name='data' + ) + boxes = paddle.static.data( + shape=self.boxes.shape, dtype=self.boxes.dtype, name='boxes' + ) + boxes_num = paddle.static.data( + shape=self.boxes_num.shape, + dtype=self.boxes_num.dtype, + name='boxes_num', + ) + + align_out = roi_align( + data, boxes, boxes_num=boxes_num, output_size=output_size + ) place = paddle.CPUPlace() exe = paddle.static.Executor(place) - align_out = exe.run(paddle.static.default_main_program(), - feed={ - 'data': self.data, - 'boxes': self.boxes, - 'boxes_num': self.boxes_num - }, - fetch_list=[align_out]) + align_out = exe.run( + paddle.static.default_main_program(), + feed={ + 'data': self.data, + 'boxes': self.boxes, + 'boxes_num': self.boxes_num, + }, + fetch_list=[align_out], + ) np.testing.assert_equal(align_out[0].shape, output_shape) @@ -93,13 +94,19 @@ class TestRoIAlign(unittest.TestCase): align_out = roi_align_c(data, boxes, boxes_num) np.testing.assert_equal(align_out.shape, (3, 256, 4, 3)) - def test_value(self, ): - data = np.array([i for i in range(1, 17)]).reshape(1, 1, 4, - 4).astype(np.float32) - boxes = np.array([[1., 1., 2., 2.], [1.5, 1.5, 3., - 3.]]).astype(np.float32) + def test_value( + self, + ): + data = ( + np.array([i for i in range(1, 17)]) + .reshape(1, 1, 4, 4) + .astype(np.float32) + ) + boxes = np.array([[1.0, 1.0, 2.0, 2.0], [1.5, 1.5, 3.0, 3.0]]).astype( + np.float32 + ) boxes_num = np.array([2]).astype(np.int32) - output = np.array([[[[6.]]], [[[9.75]]]], dtype=np.float32) + output = np.array([[[[6.0]]], [[[9.75]]]], dtype=np.float32) data = paddle.to_tensor(data) boxes = paddle.to_tensor(boxes) diff --git a/python/paddle/tests/test_ops_roi_pool.py b/python/paddle/tests/test_ops_roi_pool.py index eaeb785df7d4885e4f527356e882d0fc3818e262..923ec01503cd2299eba1a2365dc16ffff736383a 100644 --- a/python/paddle/tests/test_ops_roi_pool.py +++ b/python/paddle/tests/test_ops_roi_pool.py @@ -20,7 +20,6 @@ from paddle.vision.ops import roi_pool, RoIPool class TestRoIPool(unittest.TestCase): - def setUp(self): self.data = np.random.rand(1, 256, 32, 32).astype('float32') boxes = np.random.rand(3, 4) @@ -41,38 +40,40 @@ class TestRoIPool(unittest.TestCase): boxes = paddle.to_tensor(self.boxes) boxes_num = paddle.to_tensor(self.boxes_num) - pool_out = roi_pool(data, - boxes, - boxes_num=boxes_num, - output_size=output_size) + pool_out = roi_pool( + data, boxes, boxes_num=boxes_num, output_size=output_size + ) np.testing.assert_equal(pool_out.shape, output_shape) else: - data = paddle.static.data(shape=self.data.shape, - dtype=self.data.dtype, - name='data') - boxes = paddle.static.data(shape=self.boxes.shape, - dtype=self.boxes.dtype, - name='boxes') - boxes_num = paddle.static.data(shape=self.boxes_num.shape, - dtype=self.boxes_num.dtype, - name='boxes_num') - - pool_out = roi_pool(data, - boxes, - boxes_num=boxes_num, - output_size=output_size) + data = paddle.static.data( + shape=self.data.shape, dtype=self.data.dtype, name='data' + ) + boxes = paddle.static.data( + shape=self.boxes.shape, dtype=self.boxes.dtype, name='boxes' + ) + boxes_num = paddle.static.data( + shape=self.boxes_num.shape, + dtype=self.boxes_num.dtype, + name='boxes_num', + ) + + pool_out = roi_pool( + data, boxes, boxes_num=boxes_num, output_size=output_size + ) place = paddle.CPUPlace() exe = paddle.static.Executor(place) - pool_out = exe.run(paddle.static.default_main_program(), - feed={ - 'data': self.data, - 'boxes': self.boxes, - 'boxes_num': self.boxes_num - }, - fetch_list=[pool_out]) + pool_out = exe.run( + paddle.static.default_main_program(), + feed={ + 'data': self.data, + 'boxes': self.boxes, + 'boxes_num': self.boxes_num, + }, + fetch_list=[pool_out], + ) np.testing.assert_equal(pool_out[0].shape, output_shape) @@ -94,13 +95,19 @@ class TestRoIPool(unittest.TestCase): pool_out = roi_pool_c(data, boxes, boxes_num) np.testing.assert_equal(pool_out.shape, (3, 256, 4, 3)) - def test_value(self, ): - data = np.array([i for i in range(1, 17)]).reshape(1, 1, 4, - 4).astype(np.float32) - boxes = np.array([[1., 1., 2., 2.], [1.5, 1.5, 3., - 3.]]).astype(np.float32) + def test_value( + self, + ): + data = ( + np.array([i for i in range(1, 17)]) + .reshape(1, 1, 4, 4) + .astype(np.float32) + ) + boxes = np.array([[1.0, 1.0, 2.0, 2.0], [1.5, 1.5, 3.0, 3.0]]).astype( + np.float32 + ) boxes_num = np.array([2]).astype(np.int32) - output = np.array([[[[11.]]], [[[16.]]]], dtype=np.float32) + output = np.array([[[[11.0]]], [[[16.0]]]], dtype=np.float32) data = paddle.to_tensor(data) boxes = paddle.to_tensor(boxes) diff --git a/python/paddle/tests/test_pretrained_model.py b/python/paddle/tests/test_pretrained_model.py index 4c655ce3c1f8fa4806f03fdaab9ede2236c18d4d..81df3adf8c7bc70d9b8d115872e88c5a31ec6240 100644 --- a/python/paddle/tests/test_pretrained_model.py +++ b/python/paddle/tests/test_pretrained_model.py @@ -26,7 +26,6 @@ import paddle.vision.models as models # test the predicted resutls of static graph and dynamic graph are equal # when used pretrained model class TestPretrainedModel(unittest.TestCase): - def infer(self, arch): path = os.path.join(tempfile.mkdtemp(), '.cache_test_pretrained_model') if not os.path.exists(path): diff --git a/python/paddle/tests/test_progressbar.py b/python/paddle/tests/test_progressbar.py index c42f1e4db0fe52c173f7d4f5985d72a50dc638ee..09340a9c247655b43c31b83633a2fa4a913cd5d9 100644 --- a/python/paddle/tests/test_progressbar.py +++ b/python/paddle/tests/test_progressbar.py @@ -21,7 +21,6 @@ from paddle.hapi.progressbar import ProgressBar class TestProgressBar(unittest.TestCase): - def prog_bar(self, num, epoch, width, verbose=1): for epoch in range(epoch): progbar = ProgressBar(num, verbose=verbose) @@ -40,7 +39,7 @@ class TestProgressBar(unittest.TestCase): progbar.update(1, [['loss', int(1)]]) progbar.update(1, [['loss', 'INF']]) progbar.update(1, [['loss', 1e-4]]) - progbar.update(1, [['loss', np.array([1.])]]) + progbar.update(1, [['loss', np.array([1.0])]]) progbar.update(1, [['loss', np.array([1e-4])]]) progbar.update(1, [['loss', np.array([1]).astype(np.uint16)]]) progbar.start() diff --git a/python/paddle/tests/test_read_file.py b/python/paddle/tests/test_read_file.py index ce387f039c7e688aa46f4d7a1b6461814cf89f82..d13e901b322b5d36af89f88a042191d56d868a73 100644 --- a/python/paddle/tests/test_read_file.py +++ b/python/paddle/tests/test_read_file.py @@ -23,7 +23,6 @@ from paddle.vision.ops import read_file, decode_jpeg class TestReadFile(unittest.TestCase): - def setUp(self): fake_img = (np.random.random((400, 300, 3)) * 255).astype('uint8') self.temp_dir = tempfile.TemporaryDirectory() @@ -51,11 +50,13 @@ class TestReadFile(unittest.TestCase): place = paddle.CUDAPlace(0) exe = paddle.static.Executor(place) exe.run(paddle.static.default_startup_program()) - out = exe.run(paddle.static.default_main_program(), - fetch_list=[img]) + out = exe.run( + paddle.static.default_main_program(), fetch_list=[img] + ) - np.testing.assert_equal(out[0].shape, - img_cv2.transpose(2, 0, 1).shape) + np.testing.assert_equal( + out[0].shape, img_cv2.transpose(2, 0, 1).shape + ) def test_read_file_decode_jpeg_dynamic(self): self.read_file_decode_jpeg() diff --git a/python/paddle/tests/test_transforms.py b/python/paddle/tests/test_transforms.py index f182cbd799c189923395e1db8429564e5e5e4799..9e2cad57c37ece3dd9d9c963b77fd3470a4dd417 100644 --- a/python/paddle/tests/test_transforms.py +++ b/python/paddle/tests/test_transforms.py @@ -28,7 +28,6 @@ import paddle.vision.transforms.functional as F class TestTransformsCV2(unittest.TestCase): - def setUp(self): self.backend = self.get_backend() set_image_backend(self.backend) @@ -39,11 +38,13 @@ class TestTransformsCV2(unittest.TestCase): os.makedirs(sub_dir) for j in range(2): if j == 0: - fake_img = (np.random.random( - (280, 350, 3)) * 255).astype('uint8') + fake_img = (np.random.random((280, 350, 3)) * 255).astype( + 'uint8' + ) else: - fake_img = (np.random.random( - (400, 300, 3)) * 255).astype('uint8') + fake_img = (np.random.random((400, 300, 3)) * 255).astype( + 'uint8' + ) cv2.imwrite(os.path.join(sub_dir, str(j) + '.jpg'), fake_img) def get_backend(self): @@ -54,7 +55,8 @@ class TestTransformsCV2(unittest.TestCase): return (np.random.rand(*shape) * 255).astype('uint8') elif self.backend == 'pil': return Image.fromarray( - (np.random.rand(*shape) * 255).astype('uint8')) + (np.random.rand(*shape) * 255).astype('uint8') + ) def get_shape(self, img): if isinstance(img, paddle.Tensor): @@ -79,16 +81,17 @@ class TestTransformsCV2(unittest.TestCase): mean=[123.675, 116.28, 103.53], std=[58.395, 57.120, 57.375], ) - trans = transforms.Compose([ - transforms.RandomResizedCrop(224), - transforms.ColorJitter(brightness=0.4, - contrast=0.4, - saturation=0.4, - hue=0.4), - transforms.RandomHorizontalFlip(), - transforms.Transpose(), - normalize, - ]) + trans = transforms.Compose( + [ + transforms.RandomResizedCrop(224), + transforms.ColorJitter( + brightness=0.4, contrast=0.4, saturation=0.4, hue=0.4 + ), + transforms.RandomHorizontalFlip(), + transforms.Transpose(), + normalize, + ] + ) self.do_transform(trans) @@ -98,83 +101,104 @@ class TestTransformsCV2(unittest.TestCase): self.do_transform(trans) def test_trans_resize(self): - trans = transforms.Compose([ - transforms.Resize(300), - transforms.RandomResizedCrop((280, 280)), - transforms.Resize(280), - transforms.Resize((256, 200)), - transforms.Resize((180, 160)), - transforms.CenterCrop(128), - transforms.CenterCrop((128, 128)), - ]) + trans = transforms.Compose( + [ + transforms.Resize(300), + transforms.RandomResizedCrop((280, 280)), + transforms.Resize(280), + transforms.Resize((256, 200)), + transforms.Resize((180, 160)), + transforms.CenterCrop(128), + transforms.CenterCrop((128, 128)), + ] + ) self.do_transform(trans) def test_flip(self): - trans = transforms.Compose([ - transforms.RandomHorizontalFlip(1.0), - transforms.RandomHorizontalFlip(0.0), - transforms.RandomVerticalFlip(0.0), - transforms.RandomVerticalFlip(1.0), - ]) + trans = transforms.Compose( + [ + transforms.RandomHorizontalFlip(1.0), + transforms.RandomHorizontalFlip(0.0), + transforms.RandomVerticalFlip(0.0), + transforms.RandomVerticalFlip(1.0), + ] + ) self.do_transform(trans) def test_color_jitter(self): - trans = transforms.Compose([ - transforms.BrightnessTransform(0.0), - transforms.HueTransform(0.0), - transforms.SaturationTransform(0.0), - transforms.ContrastTransform(0.0), - ]) + trans = transforms.Compose( + [ + transforms.BrightnessTransform(0.0), + transforms.HueTransform(0.0), + transforms.SaturationTransform(0.0), + transforms.ContrastTransform(0.0), + ] + ) self.do_transform(trans) def test_affine(self): - trans = transforms.Compose([ - transforms.RandomAffine(90), - transforms.RandomAffine([-10, 10], translate=[0.1, 0.3]), - transforms.RandomAffine(45, translate=[0.2, 0.2], scale=[0.2, 0.5]), - transforms.RandomAffine(10, - translate=[0.2, 0.2], - scale=[0.5, 0.5], - shear=[-10, 10]), - transforms.RandomAffine(10, - translate=[0.5, 0.3], - scale=[0.7, 1.3], - shear=[-10, 10, 20, 40]), - transforms.RandomAffine(10, - translate=[0.5, 0.3], - scale=[0.7, 1.3], - shear=[-10, 10, 20, 40], - interpolation='bilinear'), - transforms.RandomAffine(10, - translate=[0.5, 0.3], - scale=[0.7, 1.3], - shear=[-10, 10, 20, 40], - interpolation='bilinear', - fill=114), - transforms.RandomAffine(10, - translate=[0.5, 0.3], - scale=[0.7, 1.3], - shear=[-10, 10, 20, 40], - interpolation='bilinear', - fill=114, - center=(60, 80)), - ]) + trans = transforms.Compose( + [ + transforms.RandomAffine(90), + transforms.RandomAffine([-10, 10], translate=[0.1, 0.3]), + transforms.RandomAffine( + 45, translate=[0.2, 0.2], scale=[0.2, 0.5] + ), + transforms.RandomAffine( + 10, translate=[0.2, 0.2], scale=[0.5, 0.5], shear=[-10, 10] + ), + transforms.RandomAffine( + 10, + translate=[0.5, 0.3], + scale=[0.7, 1.3], + shear=[-10, 10, 20, 40], + ), + transforms.RandomAffine( + 10, + translate=[0.5, 0.3], + scale=[0.7, 1.3], + shear=[-10, 10, 20, 40], + interpolation='bilinear', + ), + transforms.RandomAffine( + 10, + translate=[0.5, 0.3], + scale=[0.7, 1.3], + shear=[-10, 10, 20, 40], + interpolation='bilinear', + fill=114, + ), + transforms.RandomAffine( + 10, + translate=[0.5, 0.3], + scale=[0.7, 1.3], + shear=[-10, 10, 20, 40], + interpolation='bilinear', + fill=114, + center=(60, 80), + ), + ] + ) self.do_transform(trans) def test_rotate(self): - trans = transforms.Compose([ - transforms.RandomRotation(90), - transforms.RandomRotation([-10, 10]), - transforms.RandomRotation(45, expand=True), - transforms.RandomRotation(10, expand=True, center=(60, 80)), - ]) + trans = transforms.Compose( + [ + transforms.RandomRotation(90), + transforms.RandomRotation([-10, 10]), + transforms.RandomRotation(45, expand=True), + transforms.RandomRotation(10, expand=True, center=(60, 80)), + ] + ) self.do_transform(trans) def test_perspective(self): - trans = transforms.Compose([ - transforms.RandomPerspective(prob=1.0), - transforms.RandomPerspective(prob=1.0, distortion_scale=0.9), - ]) + trans = transforms.Compose( + [ + transforms.RandomPerspective(prob=1.0), + transforms.RandomPerspective(prob=1.0, distortion_scale=0.9), + ] + ) self.do_transform(trans) def test_pad(self): @@ -191,10 +215,12 @@ class TestTransformsCV2(unittest.TestCase): img = trans_pad2(img) def test_random_crop(self): - trans = transforms.Compose([ - transforms.RandomCrop(200), - transforms.RandomCrop((140, 160)), - ]) + trans = transforms.Compose( + [ + transforms.RandomCrop(200), + transforms.RandomCrop((140, 160)), + ] + ) self.do_transform(trans) trans_random_crop1 = transforms.RandomCrop(224) @@ -211,18 +237,21 @@ class TestTransformsCV2(unittest.TestCase): trans_random_crop_same = transforms.RandomCrop((140, 160)) img = trans_random_crop_same(fake_img_crop2) - trans_random_crop_bigger = transforms.RandomCrop((180, 200), - pad_if_needed=True) + trans_random_crop_bigger = transforms.RandomCrop( + (180, 200), pad_if_needed=True + ) img = trans_random_crop_bigger(img) trans_random_crop_pad = transforms.RandomCrop((224, 256), 2, True) img = trans_random_crop_pad(img) def test_erase(self): - trans = transforms.Compose([ - transforms.RandomErasing(), - transforms.RandomErasing(value="random") - ]) + trans = transforms.Compose( + [ + transforms.RandomErasing(), + transforms.RandomErasing(value="random"), + ] + ) self.do_transform(trans) def test_grayscale(self): @@ -261,7 +290,7 @@ class TestTransformsCV2(unittest.TestCase): def test_keys(self): fake_img1 = self.create_image((200, 150, 3)) fake_img2 = self.create_image((200, 150, 3)) - trans_pad = transforms.Pad(10, keys=("image", )) + trans_pad = transforms.Pad(10, keys=("image",)) fake_img_padded = trans_pad((fake_img1, fake_img2)) def test_exception(self): @@ -332,24 +361,27 @@ class TestTransformsCV2(unittest.TestCase): transforms.RandomAffine(10, translate=[0.2, 0.2], scale=[1, 2, 3]), with self.assertRaises(ValueError): - transforms.RandomAffine(10, - translate=[0.2, 0.2], - scale=[0.5, 0.5], - shear=[1, 2, 3]), + transforms.RandomAffine( + 10, translate=[0.2, 0.2], scale=[0.5, 0.5], shear=[1, 2, 3] + ), with self.assertRaises(ValueError): - transforms.RandomAffine(10, - translate=[0.5, 0.3], - scale=[0.7, 1.3], - shear=[-10, 10, 0, 20, 40]) + transforms.RandomAffine( + 10, + translate=[0.5, 0.3], + scale=[0.7, 1.3], + shear=[-10, 10, 0, 20, 40], + ) with self.assertRaises(ValueError): - transforms.RandomAffine(10, - translate=[0.5, 0.3], - scale=[0.7, 1.3], - shear=[-10, 10, 20, 40], - fill=114, - center=(1, 2, 3)) + transforms.RandomAffine( + 10, + translate=[0.5, 0.3], + scale=[0.7, 1.3], + shear=[-10, 10, 20, 40], + fill=114, + center=(1, 2, 3), + ) with self.assertRaises(ValueError): transforms.RandomRotation(-2) @@ -402,19 +434,18 @@ class TestTransformsCV2(unittest.TestCase): class TestTransformsPIL(TestTransformsCV2): - def get_backend(self): return 'pil' class TestTransformsTensor(TestTransformsCV2): - def get_backend(self): return 'tensor' def create_image(self, shape): return paddle.to_tensor(np.random.rand(*shape)).transpose( - (2, 0, 1)) # hwc->chw + (2, 0, 1) + ) # hwc->chw def do_transform(self, trans): trans.transforms.insert(0, transforms.ToTensor(data_format='CHW')) @@ -428,11 +459,13 @@ class TestTransformsTensor(TestTransformsCV2): mean=[123.675, 116.28, 103.53], std=[58.395, 57.120, 57.375], ) - trans = transforms.Compose([ - transforms.RandomResizedCrop(224), - transforms.RandomHorizontalFlip(), - normalize, - ]) + trans = transforms.Compose( + [ + transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(), + normalize, + ] + ) self.do_transform(trans) def test_grayscale(self): @@ -489,10 +522,12 @@ class TestTransformsTensor(TestTransformsCV2): img = trans_pad4(img) def test_random_crop(self): - trans = transforms.Compose([ - transforms.RandomCrop(200), - transforms.RandomCrop((140, 160)), - ]) + trans = transforms.Compose( + [ + transforms.RandomCrop(200), + transforms.RandomCrop((140, 160)), + ] + ) self.do_transform(trans) trans_random_crop1 = transforms.RandomCrop(224) @@ -509,18 +544,21 @@ class TestTransformsTensor(TestTransformsCV2): trans_random_crop_same = transforms.RandomCrop((140, 160)) img = trans_random_crop_same(fake_img_crop2) - trans_random_crop_bigger = transforms.RandomCrop((180, 200), - pad_if_needed=True) + trans_random_crop_bigger = transforms.RandomCrop( + (180, 200), pad_if_needed=True + ) img = trans_random_crop_bigger(img) trans_random_crop_pad = transforms.RandomCrop((224, 256), 2, True) img = trans_random_crop_pad(img) def test_erase(self): - trans = transforms.Compose([ - transforms.RandomErasing(value=(0.5, )), - transforms.RandomErasing(value="random") - ]) + trans = transforms.Compose( + [ + transforms.RandomErasing(value=(0.5,)), + transforms.RandomErasing(value="random"), + ] + ) self.do_transform(trans) erase_trans = transforms.RandomErasing(value=(0.5, 0.2, 0.01)) @@ -586,16 +624,17 @@ class TestTransformsTensor(TestTransformsCV2): transforms.RandomAffine(10, translate=[0.2, 0.2], scale=[1, 2, 3]), with self.assertRaises(ValueError): - transforms.RandomAffine(10, - translate=[0.2, 0.2], - scale=[0.5, 0.5], - shear=[1, 2, 3]), + transforms.RandomAffine( + 10, translate=[0.2, 0.2], scale=[0.5, 0.5], shear=[1, 2, 3] + ), with self.assertRaises(ValueError): - transforms.RandomAffine(10, - translate=[0.5, 0.3], - scale=[0.7, 1.3], - shear=[-10, 10, 0, 20, 40]) + transforms.RandomAffine( + 10, + translate=[0.5, 0.3], + scale=[0.7, 1.3], + shear=[-10, 10, 0, 20, 40], + ) with self.assertRaises(ValueError): transforms.RandomRotation(-2) @@ -616,14 +655,14 @@ class TestTransformsTensor(TestTransformsCV2): class TestFunctional(unittest.TestCase): - def test_errors(self): with self.assertRaises(TypeError): F.to_tensor(1) with self.assertRaises(ValueError): fake_img = Image.fromarray( - (np.random.rand(28, 28, 3) * 255).astype('uint8')) + (np.random.rand(28, 28, 3) * 255).astype('uint8') + ) F.to_tensor(fake_img, data_format=1) with self.assertRaises(ValueError): @@ -636,7 +675,8 @@ class TestFunctional(unittest.TestCase): with self.assertRaises(TypeError): fake_img = Image.fromarray( - (np.random.rand(28, 28, 3) * 255).astype('uint8')) + (np.random.rand(28, 28, 3) * 255).astype('uint8') + ) F.resize(fake_img, '1') with self.assertRaises(TypeError): @@ -685,18 +725,18 @@ class TestFunctional(unittest.TestCase): F.affine(45, translate=[0.2, 0.2], scale=0.5, shear=[-10, 0, 10]) with self.assertRaises(TypeError): - F.affine(45, - translate=[0.2, 0.2], - scale=0.5, - shear=[-10, 10], - interpolation=2) + F.affine( + 45, + translate=[0.2, 0.2], + scale=0.5, + shear=[-10, 10], + interpolation=2, + ) with self.assertRaises(TypeError): - F.affine(45, - translate=[0.2, 0.2], - scale=0.5, - shear=[-10, 10], - center=0) + F.affine( + 45, translate=[0.2, 0.2], scale=0.5, shear=[-10, 10], center=0 + ) with self.assertRaises(TypeError): F.rotate(1, 0.1) @@ -720,23 +760,21 @@ class TestFunctional(unittest.TestCase): std = [0.5, 0.5, 0.5] normalized_img = F.normalize(tensor_img, mean, std) - normalized_img_tensor = F.normalize(tensor_img_hwc, - mean, - std, - data_format='HWC') + normalized_img_tensor = F.normalize( + tensor_img_hwc, mean, std, data_format='HWC' + ) normalized_img_pil = F.normalize(pil_img, mean, std, data_format='HWC') - normalized_img_np = F.normalize(np_img, - mean, - std, - data_format='HWC', - to_rgb=False) - - np.testing.assert_almost_equal(np.array(normalized_img_pil), - normalized_img_np) - np.testing.assert_almost_equal(normalized_img_tensor.numpy(), - normalized_img_np, - decimal=4) + normalized_img_np = F.normalize( + np_img, mean, std, data_format='HWC', to_rgb=False + ) + + np.testing.assert_almost_equal( + np.array(normalized_img_pil), normalized_img_np + ) + np.testing.assert_almost_equal( + normalized_img_tensor.numpy(), normalized_img_np, decimal=4 + ) def test_center_crop(self): np_img = (np.random.rand(28, 24, 3) * 255).astype('uint8') @@ -747,12 +785,14 @@ class TestFunctional(unittest.TestCase): pil_cropped_img = F.center_crop(pil_img, 4) tensor_cropped_img = F.center_crop(tensor_img, 4) - np.testing.assert_almost_equal(np_cropped_img, - np.array(pil_cropped_img)) - np.testing.assert_almost_equal(np_cropped_img, - tensor_cropped_img.numpy().transpose( - (1, 2, 0)), - decimal=4) + np.testing.assert_almost_equal( + np_cropped_img, np.array(pil_cropped_img) + ) + np.testing.assert_almost_equal( + np_cropped_img, + tensor_cropped_img.numpy().transpose((1, 2, 0)), + decimal=4, + ) def test_color_jitter_sub_function(self): np.random.seed(555) @@ -771,8 +811,9 @@ class TestFunctional(unittest.TestCase): def test_adjust_brightness(np_img, tensor_img): result_cv2 = np.array(F.adjust_brightness(np_img, 1.2)) result_tensor = F.adjust_brightness(tensor_img, 1.2).numpy() - result_tensor = np.transpose(result_tensor * 255, - (1, 2, 0)).astype('uint8') + result_tensor = np.transpose(result_tensor * 255, (1, 2, 0)).astype( + 'uint8' + ) np.testing.assert_equal(result_cv2, result_tensor) # For adjust_contrast / adjust_saturation / adjust_hue the implement is kind @@ -788,7 +829,7 @@ class TestFunctional(unittest.TestCase): def test_adjust_saturation(np_img, tensor_img): result_pil = np.array(F.adjust_saturation(np_img, 1.0)) result_tensor = F.adjust_saturation(tensor_img, 1.0).numpy() - result_tensor = np.transpose(result_tensor * 255., (1, 2, 0)) + result_tensor = np.transpose(result_tensor * 255.0, (1, 2, 0)) diff = np.max(np.abs(result_tensor - result_pil)) self.assertTrue(diff < 1.1) @@ -817,14 +858,16 @@ class TestFunctional(unittest.TestCase): tensor_padded_img = F.pad(tensor_img, [1, 2], padding_mode='reflect') np.testing.assert_almost_equal(np_padded_img, np.array(pil_padded_img)) - np.testing.assert_almost_equal(np_padded_img, - tensor_padded_img.numpy().transpose( - (1, 2, 0)), - decimal=3) + np.testing.assert_almost_equal( + np_padded_img, + tensor_padded_img.numpy().transpose((1, 2, 0)), + decimal=3, + ) tensor_padded_img = F.pad(tensor_img, 1, padding_mode='reflect') - tensor_padded_img = F.pad(tensor_img, [1, 2, 1, 2], - padding_mode='reflect') + tensor_padded_img = F.pad( + tensor_img, [1, 2, 1, 2], padding_mode='reflect' + ) pil_p_img = pil_img.convert('P') pil_padded_img = F.pad(pil_p_img, [1, 2]) @@ -840,16 +883,19 @@ class TestFunctional(unittest.TestCase): tensor_reseized_img = F.resize(tensor_img, 40) tensor_reseized_img2 = F.resize(tensor_img, (46, 40)) - np.testing.assert_almost_equal(np_reseized_img, - np.array(pil_reseized_img)) - np.testing.assert_almost_equal(np_reseized_img, - tensor_reseized_img.numpy().transpose( - (1, 2, 0)), - decimal=3) - np.testing.assert_almost_equal(np_reseized_img, - tensor_reseized_img2.numpy().transpose( - (1, 2, 0)), - decimal=3) + np.testing.assert_almost_equal( + np_reseized_img, np.array(pil_reseized_img) + ) + np.testing.assert_almost_equal( + np_reseized_img, + tensor_reseized_img.numpy().transpose((1, 2, 0)), + decimal=3, + ) + np.testing.assert_almost_equal( + np_reseized_img, + tensor_reseized_img2.numpy().transpose((1, 2, 0)), + decimal=3, + ) gray_img = (np.zeros([28, 32])).astype('uint8') gray_resize_img = F.resize(gray_img, 40) @@ -905,26 +951,30 @@ class TestFunctional(unittest.TestCase): expected_tensor = tensor_img.clone() expected_tensor[:, 10:15, 10:15] = paddle.to_tensor([0.88]) - tensor_result = F.erase(tensor_img, 10, 10, 5, 5, - paddle.to_tensor([0.88])) - np.testing.assert_equal(tensor_result.numpy(), - expected_tensor.numpy()) + tensor_result = F.erase( + tensor_img, 10, 10, 5, 5, paddle.to_tensor([0.88]) + ) + np.testing.assert_equal( + tensor_result.numpy(), expected_tensor.numpy() + ) def test_erase_backward(self): img = paddle.randn((3, 14, 14), dtype=np.float32) img.stop_gradient = False - erased = F.erase(img, 3, 3, 5, 5, paddle.ones((1, 1, 1), - dtype='float32')) + erased = F.erase( + img, 3, 3, 5, 5, paddle.ones((1, 1, 1), dtype='float32') + ) loss = erased.sum() loss.backward() expected_grad = np.ones((3, 14, 14), dtype=np.float32) - expected_grad[:, 3:8, 3:8] = 0. + expected_grad[:, 3:8, 3:8] = 0.0 np.testing.assert_equal(img.grad.numpy(), expected_grad) def test_image_load(self): - fake_img = Image.fromarray((np.random.random( - (32, 32, 3)) * 255).astype('uint8')) + fake_img = Image.fromarray( + (np.random.random((32, 32, 3)) * 255).astype('uint8') + ) temp_dir = tempfile.TemporaryDirectory() path = os.path.join(temp_dir.name, 'temp.jpg') @@ -947,35 +997,32 @@ class TestFunctional(unittest.TestCase): pil_img = Image.fromarray(np_img).convert('RGB') tensor_img = F.to_tensor(pil_img, data_format='CHW') * 255 - np.testing.assert_almost_equal(np_img, - tensor_img.transpose((1, 2, 0)), - decimal=4) - - np_affined_img = F.affine(np_img, - 45, - translate=[0.2, 0.2], - scale=0.5, - shear=[-10, 10]) - pil_affined_img = F.affine(pil_img, - 45, - translate=[0.2, 0.2], - scale=0.5, - shear=[-10, 10]) - tensor_affined_img = F.affine(tensor_img, - 45, - translate=[0.2, 0.2], - scale=0.5, - shear=[-10, 10]) - - np.testing.assert_equal(np_affined_img.shape, - np.array(pil_affined_img).shape) - np.testing.assert_equal(np_affined_img.shape, - tensor_affined_img.transpose((1, 2, 0)).shape) - - np.testing.assert_almost_equal(np.array(pil_affined_img), - tensor_affined_img.numpy().transpose( - (1, 2, 0)), - decimal=4) + np.testing.assert_almost_equal( + np_img, tensor_img.transpose((1, 2, 0)), decimal=4 + ) + + np_affined_img = F.affine( + np_img, 45, translate=[0.2, 0.2], scale=0.5, shear=[-10, 10] + ) + pil_affined_img = F.affine( + pil_img, 45, translate=[0.2, 0.2], scale=0.5, shear=[-10, 10] + ) + tensor_affined_img = F.affine( + tensor_img, 45, translate=[0.2, 0.2], scale=0.5, shear=[-10, 10] + ) + + np.testing.assert_equal( + np_affined_img.shape, np.array(pil_affined_img).shape + ) + np.testing.assert_equal( + np_affined_img.shape, tensor_affined_img.transpose((1, 2, 0)).shape + ) + + np.testing.assert_almost_equal( + np.array(pil_affined_img), + tensor_affined_img.numpy().transpose((1, 2, 0)), + decimal=4, + ) def test_rotate(self): np_img = (np.random.rand(28, 28, 3) * 255).astype('uint8') @@ -987,64 +1034,70 @@ class TestFunctional(unittest.TestCase): rotated_tensor_img1 = F.rotate(tensor_img, 80, expand=True) - rotated_tensor_img2 = F.rotate(tensor_img, - 80, - interpolation='bilinear', - center=(10, 10), - expand=False) + rotated_tensor_img2 = F.rotate( + tensor_img, + 80, + interpolation='bilinear', + center=(10, 10), + expand=False, + ) - np.testing.assert_equal(rotated_np_img.shape, - np.array(rotated_pil_img).shape) - np.testing.assert_equal(rotated_np_img.shape, - rotated_tensor_img1.transpose((1, 2, 0)).shape) + np.testing.assert_equal( + rotated_np_img.shape, np.array(rotated_pil_img).shape + ) + np.testing.assert_equal( + rotated_np_img.shape, rotated_tensor_img1.transpose((1, 2, 0)).shape + ) def test_rotate1(self): np_img = (np.random.rand(28, 28, 3) * 255).astype('uint8') pil_img = Image.fromarray(np_img).convert('RGB') - rotated_np_img = F.rotate(np_img, - 80, - expand=True, - center=[0, 0], - fill=[0, 0, 0]) - rotated_pil_img = F.rotate(pil_img, - 80, - expand=True, - center=[0, 0], - fill=[0, 0, 0]) + rotated_np_img = F.rotate( + np_img, 80, expand=True, center=[0, 0], fill=[0, 0, 0] + ) + rotated_pil_img = F.rotate( + pil_img, 80, expand=True, center=[0, 0], fill=[0, 0, 0] + ) - np.testing.assert_equal(rotated_np_img.shape, - np.array(rotated_pil_img).shape) + np.testing.assert_equal( + rotated_np_img.shape, np.array(rotated_pil_img).shape + ) def test_perspective(self): np_img = (np.random.rand(32, 26, 3) * 255).astype('uint8') pil_img = Image.fromarray(np_img).convert('RGB') tensor_img = F.to_tensor(pil_img, data_format='CHW') * 255 - np.testing.assert_almost_equal(np_img, - tensor_img.transpose((1, 2, 0)), - decimal=4) + np.testing.assert_almost_equal( + np_img, tensor_img.transpose((1, 2, 0)), decimal=4 + ) startpoints = [[0, 0], [13, 0], [13, 15], [0, 15]] endpoints = [[3, 2], [12, 3], [10, 14], [2, 15]] np_perspectived_img = F.perspective(np_img, startpoints, endpoints) pil_perspectived_img = F.perspective(pil_img, startpoints, endpoints) - tensor_perspectived_img = F.perspective(tensor_img, startpoints, - endpoints) + tensor_perspectived_img = F.perspective( + tensor_img, startpoints, endpoints + ) - np.testing.assert_equal(np_perspectived_img.shape, - np.array(pil_perspectived_img).shape) + np.testing.assert_equal( + np_perspectived_img.shape, np.array(pil_perspectived_img).shape + ) np.testing.assert_equal( np_perspectived_img.shape, - tensor_perspectived_img.transpose((1, 2, 0)).shape) + tensor_perspectived_img.transpose((1, 2, 0)).shape, + ) result_pil = np.array(pil_perspectived_img) - result_tensor = tensor_perspectived_img.numpy().transpose( - (1, 2, 0)).astype('uint8') + result_tensor = ( + tensor_perspectived_img.numpy().transpose((1, 2, 0)).astype('uint8') + ) num_diff_pixels = (result_pil != result_tensor).sum() / 3.0 - ratio_diff_pixels = num_diff_pixels / result_tensor.shape[ - 0] / result_tensor.shape[1] + ratio_diff_pixels = ( + num_diff_pixels / result_tensor.shape[0] / result_tensor.shape[1] + ) # Tolerance : less than 6% of different pixels assert ratio_diff_pixels < 0.06 @@ -1054,10 +1107,12 @@ class TestFunctional(unittest.TestCase): def test_erase(batch_tensor): input1, input2 = paddle.unbind(batch_tensor, axis=0) - target_result = paddle.stack([ - F.erase(input1, 1, 1, 2, 2, 0.5), - F.erase(input2, 1, 1, 2, 2, 0.5) - ]) + target_result = paddle.stack( + [ + F.erase(input1, 1, 1, 2, 2, 0.5), + F.erase(input2, 1, 1, 2, 2, 0.5), + ] + ) batch_result = F.erase(batch_tensor, 1, 1, 2, 2, 0.5) @@ -1067,23 +1122,31 @@ class TestFunctional(unittest.TestCase): def test_affine(batch_tensor): input1, input2 = paddle.unbind(batch_tensor, axis=0) - target_result = paddle.stack([ - F.affine(input1, - 45, - translate=[0.2, 0.2], - scale=0.5, - shear=[-10, 10]), - F.affine(input2, - 45, - translate=[0.2, 0.2], - scale=0.5, - shear=[-10, 10]) - ]) - batch_result = F.affine(batch_tensor, - 45, - translate=[0.2, 0.2], - scale=0.5, - shear=[-10, 10]) + target_result = paddle.stack( + [ + F.affine( + input1, + 45, + translate=[0.2, 0.2], + scale=0.5, + shear=[-10, 10], + ), + F.affine( + input2, + 45, + translate=[0.2, 0.2], + scale=0.5, + shear=[-10, 10], + ), + ] + ) + batch_result = F.affine( + batch_tensor, + 45, + translate=[0.2, 0.2], + scale=0.5, + shear=[-10, 10], + ) return paddle.allclose(batch_result, target_result) @@ -1093,10 +1156,12 @@ class TestFunctional(unittest.TestCase): input1, input2 = paddle.unbind(batch_tensor, axis=0) startpoints = [[0, 0], [3, 0], [4, 5], [6, 7]] endpoints = [[0, 1], [3, 1], [4, 4], [5, 7]] - target_result = paddle.stack([ - F.perspective(input1, startpoints, endpoints), - F.perspective(input2, startpoints, endpoints) - ]) + target_result = paddle.stack( + [ + F.perspective(input1, startpoints, endpoints), + F.perspective(input2, startpoints, endpoints), + ] + ) batch_result = F.perspective(batch_tensor, startpoints, endpoints) @@ -1106,10 +1171,12 @@ class TestFunctional(unittest.TestCase): def test_adjust_brightness(batch_tensor): input1, input2 = paddle.unbind(batch_tensor, axis=0) - target_result = paddle.stack([ - F.adjust_brightness(input1, 2.1), - F.adjust_brightness(input2, 2.1) - ]) + target_result = paddle.stack( + [ + F.adjust_brightness(input1, 2.1), + F.adjust_brightness(input2, 2.1), + ] + ) batch_result = F.adjust_brightness(batch_tensor, 2.1) @@ -1119,10 +1186,9 @@ class TestFunctional(unittest.TestCase): def test_adjust_contrast(batch_tensor): input1, input2 = paddle.unbind(batch_tensor, axis=0) - target_result = paddle.stack([ - F.adjust_contrast(input1, 0.3), - F.adjust_contrast(input2, 0.3) - ]) + target_result = paddle.stack( + [F.adjust_contrast(input1, 0.3), F.adjust_contrast(input2, 0.3)] + ) batch_result = F.adjust_contrast(batch_tensor, 0.3) @@ -1132,10 +1198,12 @@ class TestFunctional(unittest.TestCase): def test_adjust_saturation(batch_tensor): input1, input2 = paddle.unbind(batch_tensor, axis=0) - target_result = paddle.stack([ - F.adjust_saturation(input1, 1.1), - F.adjust_saturation(input2, 1.1) - ]) + target_result = paddle.stack( + [ + F.adjust_saturation(input1, 1.1), + F.adjust_saturation(input2, 1.1), + ] + ) batch_result = F.adjust_saturation(batch_tensor, 1.1) @@ -1146,8 +1214,8 @@ class TestFunctional(unittest.TestCase): def test_adjust_hue(batch_tensor): input1, input2 = paddle.unbind(batch_tensor, axis=0) target_result = paddle.stack( - [F.adjust_hue(input1, -0.2), - F.adjust_hue(input2, -0.2)]) + [F.adjust_hue(input1, -0.2), F.adjust_hue(input2, -0.2)] + ) batch_result = F.adjust_hue(batch_tensor, -0.2) diff --git a/python/paddle/tests/test_vision_models.py b/python/paddle/tests/test_vision_models.py index 1f53060beb049480c53e9a05a88cbf20d7368f54..dc98fc3219bff6d2ae5e65a4ad21e4303baba8c7 100644 --- a/python/paddle/tests/test_vision_models.py +++ b/python/paddle/tests/test_vision_models.py @@ -20,7 +20,6 @@ import paddle.vision.models as models class TestVisonModels(unittest.TestCase): - def models_infer(self, arch, pretrained=False, batch_norm=False): x = np.array(np.random.random((2, 3, 224, 224)), dtype=np.float32) diff --git a/python/paddle/text/__init__.py b/python/paddle/text/__init__.py index 5775a24785804743a811ffa54d23bb1452f2a2eb..fbfa0c3fe2e028bb0e6de028bdb4d4c005ca752e 100644 --- a/python/paddle/text/__init__.py +++ b/python/paddle/text/__init__.py @@ -21,7 +21,14 @@ from .datasets import UCIHousing # noqa: F401 from .datasets import WMT14 # noqa: F401 from .datasets import WMT16 # noqa: F401 -__all__ = [ #noqa - 'Conll05st', 'Imdb', 'Imikolov', 'Movielens', 'UCIHousing', 'WMT14', - 'WMT16', 'ViterbiDecoder', 'viterbi_decode' +__all__ = [ # noqa + 'Conll05st', + 'Imdb', + 'Imikolov', + 'Movielens', + 'UCIHousing', + 'WMT14', + 'WMT16', + 'ViterbiDecoder', + 'viterbi_decode', ] diff --git a/python/paddle/text/datasets/conll05.py b/python/paddle/text/datasets/conll05.py index ee475e7fc5d43ea253d699a4a5b0d509846459e4..807c8c3fbebc9017b5cdbbc24597c473a0c91055 100644 --- a/python/paddle/text/datasets/conll05.py +++ b/python/paddle/text/datasets/conll05.py @@ -91,47 +91,71 @@ class Conll05st(Dataset): """ - def __init__(self, - data_file=None, - word_dict_file=None, - verb_dict_file=None, - target_dict_file=None, - emb_file=None, - download=True): + def __init__( + self, + data_file=None, + word_dict_file=None, + verb_dict_file=None, + target_dict_file=None, + emb_file=None, + download=True, + ): self.data_file = data_file if self.data_file is None: - assert download, "data_file is not set and downloading automatically is disabled" - self.data_file = _check_exists_and_download(data_file, DATA_URL, - DATA_MD5, 'conll05st', - download) + assert ( + download + ), "data_file is not set and downloading automatically is disabled" + self.data_file = _check_exists_and_download( + data_file, DATA_URL, DATA_MD5, 'conll05st', download + ) self.word_dict_file = word_dict_file if self.word_dict_file is None: - assert download, "word_dict_file is not set and downloading automatically is disabled" + assert ( + download + ), "word_dict_file is not set and downloading automatically is disabled" self.word_dict_file = _check_exists_and_download( - word_dict_file, WORDDICT_URL, WORDDICT_MD5, 'conll05st', - download) + word_dict_file, + WORDDICT_URL, + WORDDICT_MD5, + 'conll05st', + download, + ) self.verb_dict_file = verb_dict_file if self.verb_dict_file is None: - assert download, "verb_dict_file is not set and downloading automatically is disabled" + assert ( + download + ), "verb_dict_file is not set and downloading automatically is disabled" self.verb_dict_file = _check_exists_and_download( - verb_dict_file, VERBDICT_URL, VERBDICT_MD5, 'conll05st', - download) + verb_dict_file, + VERBDICT_URL, + VERBDICT_MD5, + 'conll05st', + download, + ) self.target_dict_file = target_dict_file if self.target_dict_file is None: - assert download, "target_dict_file is not set and downloading automatically is disabled" + assert ( + download + ), "target_dict_file is not set and downloading automatically is disabled" self.target_dict_file = _check_exists_and_download( - target_dict_file, TRGDICT_URL, TRGDICT_MD5, 'conll05st', - download) + target_dict_file, + TRGDICT_URL, + TRGDICT_MD5, + 'conll05st', + download, + ) self.emb_file = emb_file if self.emb_file is None: - assert download, "emb_file is not set and downloading automatically is disabled" - self.emb_file = _check_exists_and_download(emb_file, EMB_URL, - EMB_MD5, 'conll05st', - download) + assert ( + download + ), "emb_file is not set and downloading automatically is disabled" + self.emb_file = _check_exists_and_download( + emb_file, EMB_URL, EMB_MD5, 'conll05st', download + ) self.word_dict = self._load_dict(self.word_dict_file) self.predicate_dict = self._load_dict(self.verb_dict_file) @@ -169,14 +193,17 @@ class Conll05st(Dataset): def _load_anno(self): tf = tarfile.open(self.data_file) wf = tf.extractfile( - "conll05st-release/test.wsj/words/test.wsj.words.gz") + "conll05st-release/test.wsj/words/test.wsj.words.gz" + ) pf = tf.extractfile( - "conll05st-release/test.wsj/props/test.wsj.props.gz") + "conll05st-release/test.wsj/props/test.wsj.props.gz" + ) self.sentences = [] self.predicates = [] self.labels = [] with gzip.GzipFile(fileobj=wf) as words_file, gzip.GzipFile( - fileobj=pf) as props_file: + fileobj=pf + ) as props_file: sentences = [] labels = [] one_seg = [] @@ -209,16 +236,17 @@ class Conll05st(Dataset): lbl_seq.append('I-' + cur_tag) is_in_bracket = False elif l.find('(') != -1 and l.find(')') != -1: - cur_tag = l[1:l.find('*')] + cur_tag = l[1 : l.find('*')] lbl_seq.append('B-' + cur_tag) is_in_bracket = False elif l.find('(') != -1 and l.find(')') == -1: - cur_tag = l[1:l.find('*')] + cur_tag = l[1 : l.find('*')] lbl_seq.append('B-' + cur_tag) is_in_bracket = True else: - raise RuntimeError('Unexpected label: %s' % - l) + raise RuntimeError( + 'Unexpected label: %s' % l + ) self.sentences.append(sentences) self.predicates.append(verb_list[i]) @@ -282,9 +310,17 @@ class Conll05st(Dataset): pred_idx = [self.predicate_dict.get(predicate)] * sen_len label_idx = [self.label_dict.get(w) for w in labels] - return (np.array(word_idx), np.array(ctx_n2_idx), np.array(ctx_n1_idx), - np.array(ctx_0_idx), np.array(ctx_p1_idx), np.array(ctx_p2_idx), - np.array(pred_idx), np.array(mark), np.array(label_idx)) + return ( + np.array(word_idx), + np.array(ctx_n2_idx), + np.array(ctx_n1_idx), + np.array(ctx_0_idx), + np.array(ctx_p1_idx), + np.array(ctx_p2_idx), + np.array(pred_idx), + np.array(mark), + np.array(label_idx), + ) def __len__(self): return len(self.sentences) diff --git a/python/paddle/text/datasets/imdb.py b/python/paddle/text/datasets/imdb.py index 80b650574b4ddd870cf88186b1112616d71aa461..d71d23ff69231843b31438cde8dd47664a1362c1 100644 --- a/python/paddle/text/datasets/imdb.py +++ b/python/paddle/text/datasets/imdb.py @@ -71,15 +71,20 @@ class Imdb(Dataset): """ def __init__(self, data_file=None, mode='train', cutoff=150, download=True): - assert mode.lower() in ['train', 'test'], \ - "mode should be 'train', 'test', but got {}".format(mode) + assert mode.lower() in [ + 'train', + 'test', + ], "mode should be 'train', 'test', but got {}".format(mode) self.mode = mode.lower() self.data_file = data_file if self.data_file is None: - assert download, "data_file is not set and downloading automatically is disabled" - self.data_file = _check_exists_and_download(data_file, URL, MD5, - 'imdb', download) + assert ( + download + ), "data_file is not set and downloading automatically is disabled" + self.data_file = _check_exists_and_download( + data_file, URL, MD5, 'imdb', download + ) # Build a word dictionary from the corpus self.word_idx = self._build_work_dict(cutoff) @@ -111,9 +116,13 @@ class Imdb(Dataset): if bool(pattern.match(tf.name)): # newline and punctuations removal and ad-hoc tokenization. data.append( - tarf.extractfile(tf).read().rstrip(b'\n\r').translate( - None, string.punctuation.encode( - 'latin-1')).lower().split()) + tarf.extractfile(tf) + .read() + .rstrip(b'\n\r') + .translate(None, string.punctuation.encode('latin-1')) + .lower() + .split() + ) tf = tarf.next() return data diff --git a/python/paddle/text/datasets/imikolov.py b/python/paddle/text/datasets/imikolov.py index 03d6560e9bce8d1c76ae3a1612eac732d7ba6e63..d1081f4dda61000813eef6084bf8a5ffcd9dfb40 100644 --- a/python/paddle/text/datasets/imikolov.py +++ b/python/paddle/text/datasets/imikolov.py @@ -70,19 +70,25 @@ class Imikolov(Dataset): """ - def __init__(self, - data_file=None, - data_type='NGRAM', - window_size=-1, - mode='train', - min_word_freq=50, - download=True): - assert data_type.upper() in ['NGRAM', 'SEQ'], \ - "data type should be 'NGRAM', 'SEQ', but got {}".format(data_type) + def __init__( + self, + data_file=None, + data_type='NGRAM', + window_size=-1, + mode='train', + min_word_freq=50, + download=True, + ): + assert data_type.upper() in [ + 'NGRAM', + 'SEQ', + ], "data type should be 'NGRAM', 'SEQ', but got {}".format(data_type) self.data_type = data_type.upper() - assert mode.lower() in ['train', 'test'], \ - "mode should be 'train', 'test', but got {}".format(mode) + assert mode.lower() in [ + 'train', + 'test', + ], "mode should be 'train', 'test', but got {}".format(mode) self.mode = mode.lower() self.window_size = window_size @@ -90,9 +96,12 @@ class Imikolov(Dataset): self.data_file = data_file if self.data_file is None: - assert download, "data_file is not set and downloading automatically disabled" - self.data_file = _check_exists_and_download(data_file, URL, MD5, - 'imikolov', download) + assert ( + download + ), "data_file is not set and downloading automatically disabled" + self.data_file = _check_exists_and_download( + data_file, URL, MD5, 'imikolov', download + ) # Build a word dictionary from the corpus self.word_idx = self._build_work_dict(min_word_freq) @@ -148,7 +157,7 @@ class Imikolov(Dataset): if len(l) >= self.window_size: l = [self.word_idx.get(w, UNK) for w in l] for i in range(self.window_size, len(l) + 1): - self.data.append(tuple(l[i - self.window_size:i])) + self.data.append(tuple(l[i - self.window_size : i])) elif self.data_type == 'SEQ': l = l.strip().split() l = [self.word_idx.get(w, UNK) for w in l] diff --git a/python/paddle/text/datasets/movielens.py b/python/paddle/text/datasets/movielens.py index f735d22f91f2863a02c0a50ee208e32fc4fcde09..cf23eae1d8449b14394efea47f9ac18afee62e4b 100644 --- a/python/paddle/text/datasets/movielens.py +++ b/python/paddle/text/datasets/movielens.py @@ -41,12 +41,18 @@ class MovieInfo(object): """ Get information from a movie. """ - return [[self.index], [categories_dict[c] for c in self.categories], - [movie_title_dict[w.lower()] for w in self.title.split()]] + return [ + [self.index], + [categories_dict[c] for c in self.categories], + [movie_title_dict[w.lower()] for w in self.title.split()], + ] def __str__(self): return "" % ( - self.index, self.title, self.categories) + self.index, + self.title, + self.categories, + ) def __repr__(self): return self.__str__() @@ -67,13 +73,20 @@ class UserInfo(object): """ Get information from a user. """ - return [[self.index], [0 if self.is_male else 1], [self.age], - [self.job_id]] + return [ + [self.index], + [0 if self.is_male else 1], + [self.age], + [self.job_id], + ] def __str__(self): return "" % ( - self.index, "M" if self.is_male else "F", age_table[self.age], - self.job_id) + self.index, + "M" if self.is_male else "F", + age_table[self.age], + self.job_id, + ) def __repr__(self): return str(self) @@ -124,21 +137,28 @@ class Movielens(Dataset): """ - def __init__(self, - data_file=None, - mode='train', - test_ratio=0.1, - rand_seed=0, - download=True): - assert mode.lower() in ['train', 'test'], \ - "mode should be 'train', 'test', but got {}".format(mode) + def __init__( + self, + data_file=None, + mode='train', + test_ratio=0.1, + rand_seed=0, + download=True, + ): + assert mode.lower() in [ + 'train', + 'test', + ], "mode should be 'train', 'test', but got {}".format(mode) self.mode = mode.lower() self.data_file = data_file if self.data_file is None: - assert download, "data_file is not set and downloading automatically is disabled" - self.data_file = _check_exists_and_download(data_file, URL, MD5, - 'sentiment', download) + assert ( + download + ), "data_file is not set and downloading automatically is disabled" + self.data_file = _check_exists_and_download( + data_file, URL, MD5, 'sentiment', download + ) self.test_ratio = test_ratio self.rand_seed = rand_seed @@ -167,7 +187,8 @@ class Movielens(Dataset): categories_set.add(c) title = pattern.match(title).group(1) self.movie_info[int(movie_id)] = MovieInfo( - index=movie_id, categories=categories, title=title) + index=movie_id, categories=categories, title=title + ) for w in title.split(): title_word_set.add(w.lower()) @@ -181,10 +202,9 @@ class Movielens(Dataset): for line in user_file: line = line.decode(encoding='latin') uid, gender, age, job, _ = line.strip().split("::") - self.user_info[int(uid)] = UserInfo(index=uid, - gender=gender, - age=age, - job_id=job) + self.user_info[int(uid)] = UserInfo( + index=uid, gender=gender, age=age, job_id=job + ) def _load_data(self): self.data = [] @@ -201,9 +221,13 @@ class Movielens(Dataset): mov = self.movie_info[mov_id] usr = self.user_info[uid] - self.data.append(usr.value() + \ - mov.value(self.categories_dict, self.movie_title_dict) + \ - [[rating]]) + self.data.append( + usr.value() + + mov.value( + self.categories_dict, self.movie_title_dict + ) + + [[rating]] + ) def __getitem__(self, idx): data = self.data[idx] diff --git a/python/paddle/text/datasets/uci_housing.py b/python/paddle/text/datasets/uci_housing.py index 0bf677e271a5cb3ec12d66416076aa34ffae8a0d..bf43332dd39057de48def783288e11ba83a1d9a0 100644 --- a/python/paddle/text/datasets/uci_housing.py +++ b/python/paddle/text/datasets/uci_housing.py @@ -23,8 +23,19 @@ __all__ = [] URL = 'http://paddlemodels.bj.bcebos.com/uci_housing/housing.data' MD5 = 'd4accdce7a25600298819f8e28e8d593' feature_names = [ - 'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', - 'PTRATIO', 'B', 'LSTAT' + 'CRIM', + 'ZN', + 'INDUS', + 'CHAS', + 'NOX', + 'RM', + 'AGE', + 'DIS', + 'RAD', + 'TAX', + 'PTRATIO', + 'B', + 'LSTAT', ] @@ -73,15 +84,20 @@ class UCIHousing(Dataset): """ def __init__(self, data_file=None, mode='train', download=True): - assert mode.lower() in ['train', 'test'], \ - "mode should be 'train' or 'test', but got {}".format(mode) + assert mode.lower() in [ + 'train', + 'test', + ], "mode should be 'train' or 'test', but got {}".format(mode) self.mode = mode.lower() self.data_file = data_file if self.data_file is None: - assert download, "data_file is not set and downloading automatically is disabled" - self.data_file = _check_exists_and_download(data_file, URL, MD5, - 'uci_housing', download) + assert ( + download + ), "data_file is not set and downloading automatically is disabled" + self.data_file = _check_exists_and_download( + data_file, URL, MD5, 'uci_housing', download + ) # read dataset into memory self._load_data() @@ -91,8 +107,11 @@ class UCIHousing(Dataset): def _load_data(self, feature_num=14, ratio=0.8): data = np.fromfile(self.data_file, sep=' ') data = data.reshape(data.shape[0] // feature_num, feature_num) - maximums, minimums, avgs = data.max(axis=0), data.min( - axis=0), data.sum(axis=0) / data.shape[0] + maximums, minimums, avgs = ( + data.max(axis=0), + data.min(axis=0), + data.sum(axis=0) / data.shape[0], + ) for i in range(feature_num - 1): data[:, i] = (data[:, i] - avgs[i]) / (maximums[i] - minimums[i]) offset = int(data.shape[0] * ratio) @@ -103,8 +122,9 @@ class UCIHousing(Dataset): def __getitem__(self, idx): data = self.data[idx] - return np.array(data[:-1]).astype(self.dtype), \ - np.array(data[-1:]).astype(self.dtype) + return np.array(data[:-1]).astype(self.dtype), np.array( + data[-1:] + ).astype(self.dtype) def __len__(self): return len(self.data) diff --git a/python/paddle/text/datasets/wmt14.py b/python/paddle/text/datasets/wmt14.py index 33c92c875e8dea7ebed268c37266b07ea88cbb7c..71f5c7af0f4fb92e881d2817fe93f3a1761417dc 100644 --- a/python/paddle/text/datasets/wmt14.py +++ b/python/paddle/text/datasets/wmt14.py @@ -20,12 +20,14 @@ from paddle.dataset.common import _check_exists_and_download __all__ = [] -URL_DEV_TEST = ('http://www-lium.univ-lemans.fr/~schwenk/' - 'cslm_joint_paper/data/dev+test.tgz') +URL_DEV_TEST = ( + 'http://www-lium.univ-lemans.fr/~schwenk/' + 'cslm_joint_paper/data/dev+test.tgz' +) MD5_DEV_TEST = '7d7897317ddd8ba0ae5c5fa7248d3ff5' # this is a small set of data for test. The original data is too large and # will be add later. -URL_TRAIN = ('http://paddlemodels.bj.bcebos.com/wmt/wmt14.tgz') +URL_TRAIN = 'http://paddlemodels.bj.bcebos.com/wmt/wmt14.tgz' MD5_TRAIN = '0791583d57d5beb693b9414c5b36798c' START = "" @@ -82,21 +84,24 @@ class WMT14(Dataset): """ - def __init__(self, - data_file=None, - mode='train', - dict_size=-1, - download=True): - assert mode.lower() in ['train', 'test', 'gen'], \ - "mode should be 'train', 'test' or 'gen', but got {}".format(mode) + def __init__( + self, data_file=None, mode='train', dict_size=-1, download=True + ): + assert mode.lower() in [ + 'train', + 'test', + 'gen', + ], "mode should be 'train', 'test' or 'gen', but got {}".format(mode) self.mode = mode.lower() self.data_file = data_file if self.data_file is None: - assert download, "data_file is not set and downloading automatically is disabled" - self.data_file = _check_exists_and_download(data_file, URL_TRAIN, - MD5_TRAIN, 'wmt14', - download) + assert ( + download + ), "data_file is not set and downloading automatically is disabled" + self.data_file = _check_exists_and_download( + data_file, URL_TRAIN, MD5_TRAIN, 'wmt14', download + ) # read dataset into memory assert dict_size > 0, "dict_size should be set as positive number" @@ -104,7 +109,6 @@ class WMT14(Dataset): self._load_data() def _load_data(self): - def __to_dict(fd, size): out_dict = dict() for line_count, line in enumerate(fd): @@ -119,13 +123,15 @@ class WMT14(Dataset): self.trg_ids_next = [] with tarfile.open(self.data_file, mode='r') as f: names = [ - each_item.name for each_item in f + each_item.name + for each_item in f if each_item.name.endswith("src.dict") ] assert len(names) == 1 self.src_dict = __to_dict(f.extractfile(names[0]), self.dict_size) names = [ - each_item.name for each_item in f + each_item.name + for each_item in f if each_item.name.endswith("trg.dict") ] assert len(names) == 1 @@ -133,7 +139,8 @@ class WMT14(Dataset): file_name = "{}/{}".format(self.mode, self.mode) names = [ - each_item.name for each_item in f + each_item.name + for each_item in f if each_item.name.endswith(file_name) ] for name in names: @@ -164,8 +171,11 @@ class WMT14(Dataset): self.trg_ids_next.append(trg_ids_next) def __getitem__(self, idx): - return (np.array(self.src_ids[idx]), np.array(self.trg_ids[idx]), - np.array(self.trg_ids_next[idx])) + return ( + np.array(self.src_ids[idx]), + np.array(self.trg_ids[idx]), + np.array(self.trg_ids_next[idx]), + ) def __len__(self): return len(self.src_ids) diff --git a/python/paddle/text/datasets/wmt16.py b/python/paddle/text/datasets/wmt16.py index 4538f4b8a6a2ca229860c07f3ed766554a1ed7e6..3048160acff111d344989c8807ad121951b32db5 100644 --- a/python/paddle/text/datasets/wmt16.py +++ b/python/paddle/text/datasets/wmt16.py @@ -25,7 +25,7 @@ from paddle.dataset.common import _check_exists_and_download __all__ = [] -DATA_URL = ("http://paddlemodels.bj.bcebos.com/wmt/wmt16.tar.gz") +DATA_URL = "http://paddlemodels.bj.bcebos.com/wmt/wmt16.tar.gz" DATA_MD5 = "0c38be43600334966403524a40dcd81e" TOTAL_EN_WORDS = 11250 @@ -102,43 +102,55 @@ class WMT16(Dataset): """ - def __init__(self, - data_file=None, - mode='train', - src_dict_size=-1, - trg_dict_size=-1, - lang='en', - download=True): - assert mode.lower() in ['train', 'test', 'val'], \ - "mode should be 'train', 'test' or 'val', but got {}".format(mode) + def __init__( + self, + data_file=None, + mode='train', + src_dict_size=-1, + trg_dict_size=-1, + lang='en', + download=True, + ): + assert mode.lower() in [ + 'train', + 'test', + 'val', + ], "mode should be 'train', 'test' or 'val', but got {}".format(mode) self.mode = mode.lower() self.data_file = data_file if self.data_file is None: - assert download, "data_file is not set and downloading automatically is disabled" - self.data_file = _check_exists_and_download(data_file, DATA_URL, - DATA_MD5, 'wmt16', - download) + assert ( + download + ), "data_file is not set and downloading automatically is disabled" + self.data_file = _check_exists_and_download( + data_file, DATA_URL, DATA_MD5, 'wmt16', download + ) self.lang = lang assert src_dict_size > 0, "dict_size should be set as positive number" assert trg_dict_size > 0, "dict_size should be set as positive number" self.src_dict_size = min( - src_dict_size, (TOTAL_EN_WORDS if lang == "en" else TOTAL_DE_WORDS)) + src_dict_size, (TOTAL_EN_WORDS if lang == "en" else TOTAL_DE_WORDS) + ) self.trg_dict_size = min( - trg_dict_size, (TOTAL_DE_WORDS if lang == "en" else TOTAL_EN_WORDS)) + trg_dict_size, (TOTAL_DE_WORDS if lang == "en" else TOTAL_EN_WORDS) + ) # load source and target word dict self.src_dict = self._load_dict(lang, src_dict_size) - self.trg_dict = self._load_dict("de" if lang == "en" else "en", - trg_dict_size) + self.trg_dict = self._load_dict( + "de" if lang == "en" else "en", trg_dict_size + ) # load data self.data = self._load_data() def _load_dict(self, lang, dict_size, reverse=False): - dict_path = os.path.join(paddle.dataset.common.DATA_HOME, - "wmt16/%s_%d.dict" % (lang, dict_size)) + dict_path = os.path.join( + paddle.dataset.common.DATA_HOME, + "wmt16/%s_%d.dict" % (lang, dict_size), + ) dict_found = False if os.path.exists(dict_path): with open(dict_path, "rb") as d: @@ -161,18 +173,21 @@ class WMT16(Dataset): for line in f.extractfile("wmt16/train"): line = line.decode() line_split = line.strip().split("\t") - if len(line_split) != 2: continue + if len(line_split) != 2: + continue sen = line_split[0] if self.lang == "en" else line_split[1] for w in sen.split(): word_dict[w] += 1 with open(dict_path, "wb") as fout: fout.write( - ("%s\n%s\n%s\n" % (START_MARK, END_MARK, UNK_MARK)).encode()) + ("%s\n%s\n%s\n" % (START_MARK, END_MARK, UNK_MARK)).encode() + ) for idx, word in enumerate( - sorted(word_dict.items(), key=lambda x: x[1], - reverse=True)): - if idx + 3 == dict_size: break + sorted(word_dict.items(), key=lambda x: x[1], reverse=True) + ): + if idx + 3 == dict_size: + break fout.write(word[0].encode()) fout.write(b'\n') @@ -197,9 +212,11 @@ class WMT16(Dataset): if len(line_split) != 2: continue src_words = line_split[src_col].split() - src_ids = [start_id] + [ - self.src_dict.get(w, unk_id) for w in src_words - ] + [end_id] + src_ids = ( + [start_id] + + [self.src_dict.get(w, unk_id) for w in src_words] + + [end_id] + ) trg_words = line_split[trg_col].split() trg_ids = [self.trg_dict.get(w, unk_id) for w in trg_words] @@ -212,8 +229,11 @@ class WMT16(Dataset): self.trg_ids_next.append(trg_ids_next) def __getitem__(self, idx): - return (np.array(self.src_ids[idx]), np.array(self.trg_ids[idx]), - np.array(self.trg_ids_next[idx])) + return ( + np.array(self.src_ids[idx]), + np.array(self.trg_ids[idx]), + np.array(self.trg_ids_next[idx]), + ) def __len__(self): return len(self.src_ids) @@ -243,10 +263,14 @@ class WMT16(Dataset): en_dict = wmt16.get_dict('en') """ - dict_size = self.src_dict_size if lang == self.lang else self.trg_dict_size - - dict_path = os.path.join(paddle.dataset.common.DATA_HOME, - "wmt16/%s_%d.dict" % (lang, dict_size)) + dict_size = ( + self.src_dict_size if lang == self.lang else self.trg_dict_size + ) + + dict_path = os.path.join( + paddle.dataset.common.DATA_HOME, + "wmt16/%s_%d.dict" % (lang, dict_size), + ) assert os.path.exists(dict_path), "Word dictionary does not exist. " "Please invoke paddle.dataset.wmt16.train/test/validation first " "to build the dictionary." diff --git a/python/paddle/text/viterbi_decode.py b/python/paddle/text/viterbi_decode.py index a2a92da947288ecf8aef8c9fd24cfb2242a93ebc..07d4515cab8e2f1c022aa83ff74a1c974536e82f 100644 --- a/python/paddle/text/viterbi_decode.py +++ b/python/paddle/text/viterbi_decode.py @@ -21,11 +21,9 @@ from paddle import _C_ops, _legacy_C_ops __all__ = ['viterbi_decode', 'ViterbiDecoder'] -def viterbi_decode(potentials, - transition_params, - lengths, - include_bos_eos_tag=True, - name=None): +def viterbi_decode( + potentials, transition_params, lengths, include_bos_eos_tag=True, name=None +): """ Decode the highest scoring sequence of tags computed by transitions and potentials and get the viterbi path. @@ -59,34 +57,43 @@ def viterbi_decode(potentials, scores, path = paddle.text.viterbi_decode(emission, transition, length, False) # scores: [3.37089300, 1.56825531], path: [[1, 0, 0], [1, 1, 0]] """ if in_dygraph_mode(): - return _C_ops.viterbi_decode(potentials, transition_params, lengths, - include_bos_eos_tag) + return _C_ops.viterbi_decode( + potentials, transition_params, lengths, include_bos_eos_tag + ) if _non_static_mode(): - return _legacy_C_ops.viterbi_decode(potentials, transition_params, - lengths, 'include_bos_eos_tag', - include_bos_eos_tag) - check_variable_and_dtype(potentials, 'input', ['float32', 'float64'], - 'viterbi_decode') - check_variable_and_dtype(transition_params, 'transitions', - ['float32', 'float64'], 'viterbi_decode') + return _legacy_C_ops.viterbi_decode( + potentials, + transition_params, + lengths, + 'include_bos_eos_tag', + include_bos_eos_tag, + ) + check_variable_and_dtype( + potentials, 'input', ['float32', 'float64'], 'viterbi_decode' + ) + check_variable_and_dtype( + transition_params, + 'transitions', + ['float32', 'float64'], + 'viterbi_decode', + ) check_variable_and_dtype(lengths, 'length', 'int64', 'viterbi_decode') check_type(include_bos_eos_tag, 'include_tag', bool, 'viterbi_decode') helper = LayerHelper('viterbi_decode', **locals()) attrs = {'include_bos_eos_tag': include_bos_eos_tag} scores = helper.create_variable_for_type_inference(potentials.dtype) path = helper.create_variable_for_type_inference('int64') - helper.append_op(type='viterbi_decode', - inputs={ - 'Input': potentials, - 'Transition': transition_params, - 'Length': lengths - }, - outputs={ - 'Scores': scores, - 'Path': path - }, - attrs=attrs) + helper.append_op( + type='viterbi_decode', + inputs={ + 'Input': potentials, + 'Transition': transition_params, + 'Length': lengths, + }, + outputs={'Scores': scores, 'Path': path}, + attrs=attrs, + ) return scores, path @@ -134,5 +141,10 @@ class ViterbiDecoder(Layer): self.name = name def forward(self, potentials, lengths): - return viterbi_decode(potentials, self.transitions, lengths, - self.include_bos_eos_tag, self.name) + return viterbi_decode( + potentials, + self.transitions, + lengths, + self.include_bos_eos_tag, + self.name, + ) diff --git a/python/paddle/utils/__init__.py b/python/paddle/utils/__init__.py index 6994fd139b647af272e0d75a3937bed79ae717c6..b51bf4b375305679336beb4f8e657e0a76b4ef3c 100644 --- a/python/paddle/utils/__init__.py +++ b/python/paddle/utils/__init__.py @@ -28,6 +28,4 @@ from . import image_util # noqa: F401 from . import cpp_extension # noqa: F401 from . import dlpack -__all__ = [ #noqa - 'deprecated', 'run_check', 'require_version', 'try_import' -] +__all__ = ['deprecated', 'run_check', 'require_version', 'try_import'] # noqa diff --git a/python/paddle/utils/cpp_extension/__init__.py b/python/paddle/utils/cpp_extension/__init__.py index 843f78d5c803a5f9dedc00865c7eaa5ad6c5fd6a..9ad431f00a65fc667c5c471453798be2a6e1a330 100644 --- a/python/paddle/utils/cpp_extension/__init__.py +++ b/python/paddle/utils/cpp_extension/__init__.py @@ -22,6 +22,10 @@ from .extension_utils import parse_op_info # noqa: F401 from .extension_utils import get_build_directory # noqa: F401 from .extension_utils import load_op_meta_info_and_register_op # noqa: F401 -__all__ = [ #noqa - 'CppExtension', 'CUDAExtension', 'load', 'setup', 'get_build_directory' +__all__ = [ # noqa + 'CppExtension', + 'CUDAExtension', + 'load', + 'setup', + 'get_build_directory', ] diff --git a/python/paddle/utils/cpp_extension/cpp_extension.py b/python/paddle/utils/cpp_extension/cpp_extension.py index 5ff1d1cc29207d1517c84ca69dd49f0a5b4a3e4d..9dfa2f103da19fdb153e262284e32fb50e61773b 100644 --- a/python/paddle/utils/cpp_extension/cpp_extension.py +++ b/python/paddle/utils/cpp_extension/cpp_extension.py @@ -21,14 +21,41 @@ from setuptools.command.easy_install import easy_install from setuptools.command.build_ext import build_ext from distutils.command.build import build -from .extension_utils import add_compile_flag, find_cuda_home, find_rocm_home, normalize_extension_kwargs -from .extension_utils import is_cuda_file, prepare_unix_cudaflags, prepare_win_cudaflags -from .extension_utils import _import_module_from_library, _write_setup_file, _jit_compile -from .extension_utils import check_abi_compatibility, log_v, CustomOpInfo, parse_op_name_from +from .extension_utils import ( + add_compile_flag, + find_cuda_home, + find_rocm_home, + normalize_extension_kwargs, +) +from .extension_utils import ( + is_cuda_file, + prepare_unix_cudaflags, + prepare_win_cudaflags, +) +from .extension_utils import ( + _import_module_from_library, + _write_setup_file, + _jit_compile, +) +from .extension_utils import ( + check_abi_compatibility, + log_v, + CustomOpInfo, + parse_op_name_from, +) from .extension_utils import _reset_so_rpath, clean_object_if_change_cflags -from .extension_utils import bootstrap_context, get_build_directory, add_std_without_repeat - -from .extension_utils import IS_WINDOWS, OS_NAME, MSVC_COMPILE_FLAGS, MSVC_COMPILE_FLAGS +from .extension_utils import ( + bootstrap_context, + get_build_directory, + add_std_without_repeat, +) + +from .extension_utils import ( + IS_WINDOWS, + OS_NAME, + MSVC_COMPILE_FLAGS, + MSVC_COMPILE_FLAGS, +) from .extension_utils import CLANG_COMPILE_FLAGS, CLANG_LINK_FLAGS from ...fluid import core @@ -39,6 +66,7 @@ from ...fluid import core if IS_WINDOWS: from distutils.command.build_ext import build_ext as _du_build_ext from unittest.mock import Mock + _du_build_ext.get_export_symbols = Mock(return_value=None) CUDA_HOME = find_cuda_home() @@ -147,7 +175,8 @@ def setup(**attr): # if not specific cmdclass in setup, add it automatically. if 'build_ext' not in cmdclass: cmdclass['build_ext'] = BuildExtension.with_options( - no_python_abi_suffix=True) + no_python_abi_suffix=True + ) attr['cmdclass'] = cmdclass error_msg = """ @@ -167,17 +196,19 @@ def setup(**attr): if 'name' not in attr: raise ValueError(error_msg) - assert not attr['name'].endswith('module'), \ - "Please don't use 'module' as suffix in `name` argument, " + assert not attr['name'].endswith( + 'module' + ), "Please don't use 'module' as suffix in `name` argument, " "it will be stripped in setuptools.bdist_egg and cause import error." ext_modules = attr.get('ext_modules', []) if not isinstance(ext_modules, list): ext_modules = [ext_modules] - assert len( - ext_modules - ) == 1, "Required only one Extension, but received {}. If you want to compile multi operators, you can include all necessary source files in one Extension.".format( - len(ext_modules)) + assert ( + len(ext_modules) == 1 + ), "Required only one Extension, but received {}. If you want to compile multi operators, you can include all necessary source files in one Extension.".format( + len(ext_modules) + ) # replace Extension.name with attr['name] to keep consistant with Package name. for ext_module in ext_modules: ext_module.name = attr['name'] @@ -335,7 +366,6 @@ class BuildExtension(build_ext, object): """ class cls_with_options(cls): - def __init__(self, *args, **kwargs): kwargs.update(options) cls.__init__(self, *args, **kwargs) @@ -380,8 +410,9 @@ class BuildExtension(build_ext, object): # cflags have changed and delete the built shared library to re-compile the source # even though source file content keep unchanged. so_name = self.get_ext_fullpath(self.extensions[0].name) - clean_object_if_change_cflags(os.path.abspath(so_name), - self.extensions[0]) + clean_object_if_change_cflags( + os.path.abspath(so_name), self.extensions[0] + ) # Consider .cu, .cu.cc as valid source extensions. self.compiler.src_extensions += ['.cu', '.cu.cc'] @@ -393,8 +424,9 @@ class BuildExtension(build_ext, object): else: original_compile = self.compiler._compile - def unix_custom_single_compiler(obj, src, ext, cc_args, extra_postargs, - pp_opts): + def unix_custom_single_compiler( + obj, src, ext, cc_args, extra_postargs, pp_opts + ): """ Monkey patch machanism to replace inner compiler to custom complie process on Unix platform. """ @@ -407,7 +439,9 @@ class BuildExtension(build_ext, object): # nvcc or hipcc compile CUDA source if is_cuda_file(src): if core.is_compiled_with_rocm(): - assert ROCM_HOME is not None, "Not found ROCM runtime, \ + assert ( + ROCM_HOME is not None + ), "Not found ROCM runtime, \ please use `export ROCM_PATH= XXX` to specify it." hipcc_cmd = os.path.join(ROCM_HOME, 'bin', 'hipcc') @@ -416,7 +450,9 @@ class BuildExtension(build_ext, object): if isinstance(cflags, dict): cflags = cflags['hipcc'] else: - assert CUDA_HOME is not None, "Not found CUDA runtime, \ + assert ( + CUDA_HOME is not None + ), "Not found CUDA runtime, \ please use `export CUDA_HOME= XXX` to specify it." nvcc_cmd = os.path.join(CUDA_HOME, 'bin', 'nvcc') @@ -435,7 +471,8 @@ class BuildExtension(build_ext, object): cflags.append('-D__HIP_PLATFORM_HCC__') cflags.append('-D__HIP_NO_HALF_CONVERSIONS__=1') cflags.append( - '-DTHRUST_DEVICE_SYSTEM=THRUST_DEVICE_SYSTEM_HIP') + '-DTHRUST_DEVICE_SYSTEM=THRUST_DEVICE_SYSTEM_HIP' + ) # NOTE(Aurelius84): Since Paddle 2.0, we require gcc version > 5.x, # so we add this flag to ensure the symbol names from user compiled @@ -449,22 +486,24 @@ class BuildExtension(build_ext, object): else: cflags.append('-DPADDLE_WITH_CUDA') - add_std_without_repeat(cflags, - self.compiler.compiler_type, - use_std14=True) + add_std_without_repeat( + cflags, self.compiler.compiler_type, use_std14=True + ) original_compile(obj, src, ext, cc_args, cflags, pp_opts) finally: # restore original_compiler self.compiler.set_executable('compiler_so', original_compiler) - def win_custom_single_compiler(sources, - output_dir=None, - macros=None, - include_dirs=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - depends=None): + def win_custom_single_compiler( + sources, + output_dir=None, + macros=None, + include_dirs=None, + debug=0, + extra_preargs=None, + extra_postargs=None, + depends=None, + ): self.cflags = copy.deepcopy(extra_postargs) extra_postargs = None @@ -481,27 +520,32 @@ class BuildExtension(build_ext, object): # Using regex to match src, obj and include files src_regex = re.compile('/T(p|c)(.*)') src_list = [ - m.group(2) for m in (src_regex.match(elem) for elem in cmd) + m.group(2) + for m in (src_regex.match(elem) for elem in cmd) if m ] obj_regex = re.compile('/Fo(.*)') obj_list = [ - m.group(1) for m in (obj_regex.match(elem) for elem in cmd) + m.group(1) + for m in (obj_regex.match(elem) for elem in cmd) if m ] include_regex = re.compile(r'((\-|\/)I.*)') include_list = [ m.group(1) - for m in (include_regex.match(elem) for elem in cmd) if m + for m in (include_regex.match(elem) for elem in cmd) + if m ] assert len(src_list) == 1 and len(obj_list) == 1 src = src_list[0] obj = obj_list[0] if is_cuda_file(src): - assert CUDA_HOME is not None, "Not found CUDA runtime, \ + assert ( + CUDA_HOME is not None + ), "Not found CUDA runtime, \ please use `export CUDA_HOME= XXX` to specify it." nvcc_cmd = os.path.join(CUDA_HOME, 'bin', 'nvcc') @@ -515,8 +559,9 @@ class BuildExtension(build_ext, object): cflags = prepare_win_cudaflags(cflags) + ['--use-local-env'] for flag in MSVC_COMPILE_FLAGS: cflags = ['-Xcompiler', flag] + cflags - cmd = [nvcc_cmd, '-c', src, '-o', obj - ] + include_list + cflags + cmd = ( + [nvcc_cmd, '-c', src, '-o', obj] + include_list + cflags + ) elif isinstance(self.cflags, dict): cflags = MSVC_COMPILE_FLAGS + self.cflags['cxx'] cmd += cflags @@ -531,9 +576,16 @@ class BuildExtension(build_ext, object): try: self.compiler.spawn = win_custom_spawn - return original_compile(sources, output_dir, macros, - include_dirs, debug, extra_preargs, - extra_postargs, depends) + return original_compile( + sources, + output_dir, + macros, + include_dirs, + debug, + extra_preargs, + extra_postargs, + depends, + ) finally: self.compiler.spawn = original_spawn @@ -546,8 +598,9 @@ class BuildExtension(build_ext, object): def wrapper(source_filenames, strip_dir=0, output_dir=''): try: - objects = origina_func(source_filenames, strip_dir, - output_dir) + objects = origina_func( + source_filenames, strip_dir, output_dir + ) for i, source in enumerate(source_filenames): # modify xx.o -> xx.cu.o/xx.cu.obj if is_cuda_file(source): @@ -578,7 +631,8 @@ class BuildExtension(build_ext, object): self.compiler._compile = unix_custom_single_compiler self.compiler.object_filenames = object_filenames_with_cuda( - self.compiler.object_filenames, self.build_lib) + self.compiler.object_filenames, self.build_lib + ) self._record_op_info() print("Compiling user custom op, it will cost a few seconds.....") @@ -594,10 +648,11 @@ class BuildExtension(build_ext, object): split_str = '.' name_items = ext_name.split(split_str) if self.no_python_abi_suffix: - assert len( - name_items - ) > 2, "Expected len(name_items) > 2, but received {}".format( - len(name_items)) + assert ( + len(name_items) > 2 + ), "Expected len(name_items) > 2, but received {}".format( + len(name_items) + ) name_items.pop(-2) ext_name = split_str.join(name_items) @@ -613,11 +668,13 @@ class BuildExtension(build_ext, object): """ compiler_infos = ['clang'] + CLANG_COMPILE_FLAGS linker_infos = ['clang'] + CLANG_LINK_FLAGS - self.compiler.set_executables(compiler=compiler_infos, - compiler_so=compiler_infos, - compiler_cxx=['clang'], - linker_exe=['clang'], - linker_so=linker_infos) + self.compiler.set_executables( + compiler=compiler_infos, + compiler_so=compiler_infos, + compiler_cxx=['clang'], + linker_exe=['clang'], + linker_so=linker_infos, + ) def _check_abi(self): """ @@ -632,11 +689,16 @@ class BuildExtension(build_ext, object): check_abi_compatibility(compiler) # Warn user if VC env is activated but `DISTUTILS_USE_SDK` is not set. - if IS_WINDOWS and 'VSCMD_ARG_TGT_ARCH' in os.environ and 'DISTUTILS_USE_SDK' not in os.environ: + if ( + IS_WINDOWS + and 'VSCMD_ARG_TGT_ARCH' in os.environ + and 'DISTUTILS_USE_SDK' not in os.environ + ): msg = ( 'It seems that the VC environment is activated but DISTUTILS_USE_SDK is not set.' 'This may lead to multiple activations of the VC env.' - 'Please run `set DISTUTILS_USE_SDK=1` and try again.') + 'Please run `set DISTUTILS_USE_SDK=1` and try again.' + ) raise UserWarning(msg) def _record_op_info(self): @@ -657,9 +719,9 @@ class BuildExtension(build_ext, object): op_names = parse_op_name_from(sources) for op_name in op_names: - CustomOpInfo.instance().add(op_name, - so_name=so_name, - so_path=so_path) + CustomOpInfo.instance().add( + op_name, so_name=so_name, so_path=so_path + ) class EasyInstallCommand(easy_install, object): @@ -712,7 +774,6 @@ class BuildCommand(build, object): """ class cls_with_options(cls): - def __init__(self, *args, **kwargs): kwargs.update(options) cls.__init__(self, *args, **kwargs) @@ -735,14 +796,16 @@ class BuildCommand(build, object): self.build_base = self._specified_build_base -def load(name, - sources, - extra_cxx_cflags=None, - extra_cuda_cflags=None, - extra_ldflags=None, - extra_include_paths=None, - build_directory=None, - verbose=False): +def load( + name, + sources, + extra_cxx_cflags=None, + extra_cuda_cflags=None, + extra_ldflags=None, + extra_include_paths=None, + build_directory=None, + verbose=False, +): """ An Interface to automatically compile C++/CUDA source files Just-In-Time and return callable python function as other Paddle layers API. It will @@ -836,27 +899,42 @@ def load(name, file_path = os.path.join(build_directory, "{}_setup.py".format(name)) sources = [os.path.abspath(source) for source in sources] - if extra_cxx_cflags is None: extra_cxx_cflags = [] - if extra_cuda_cflags is None: extra_cuda_cflags = [] + if extra_cxx_cflags is None: + extra_cxx_cflags = [] + if extra_cuda_cflags is None: + extra_cuda_cflags = [] assert isinstance( extra_cxx_cflags, list ), "Required type(extra_cxx_cflags) == list[str], but received {}".format( - extra_cxx_cflags) + extra_cxx_cflags + ) assert isinstance( extra_cuda_cflags, list ), "Required type(extra_cuda_cflags) == list[str], but received {}".format( - extra_cuda_cflags) + extra_cuda_cflags + ) log_v( "additional extra_cxx_cflags: [{}], extra_cuda_cflags: [{}]".format( - ' '.join(extra_cxx_cflags), ' '.join(extra_cuda_cflags)), verbose) + ' '.join(extra_cxx_cflags), ' '.join(extra_cuda_cflags) + ), + verbose, + ) # write setup.py file and compile it build_base_dir = os.path.join(build_directory, name) - _write_setup_file(name, sources, file_path, build_base_dir, - extra_include_paths, extra_cxx_cflags, extra_cuda_cflags, - extra_ldflags, verbose) + _write_setup_file( + name, + sources, + file_path, + build_base_dir, + extra_include_paths, + extra_cxx_cflags, + extra_cuda_cflags, + extra_ldflags, + verbose, + ) _jit_compile(file_path, verbose) # import as callable python api diff --git a/python/paddle/utils/cpp_extension/extension_utils.py b/python/paddle/utils/cpp_extension/extension_utils.py index 8b07abe7c8501062269c9b265b4ff047acbfafe7..57a1ae1f0f581c20f0decbc34019cc08e8b4fc28 100644 --- a/python/paddle/utils/cpp_extension/extension_utils.py +++ b/python/paddle/utils/cpp_extension/extension_utils.py @@ -50,23 +50,46 @@ OS_NAME = sys.platform IS_WINDOWS = OS_NAME.startswith('win') MSVC_COMPILE_FLAGS = [ - '/MT', '/wd4819', '/wd4251', '/wd4244', '/wd4267', '/wd4275', '/wd4018', - '/wd4190', '/EHsc', '/w', '/DGOOGLE_GLOG_DLL_DECL', - '/DBOOST_HAS_STATIC_ASSERT', '/DNDEBUG', '/DPADDLE_USE_DSO' + '/MT', + '/wd4819', + '/wd4251', + '/wd4244', + '/wd4267', + '/wd4275', + '/wd4018', + '/wd4190', + '/EHsc', + '/w', + '/DGOOGLE_GLOG_DLL_DECL', + '/DBOOST_HAS_STATIC_ASSERT', + '/DNDEBUG', + '/DPADDLE_USE_DSO', ] CLANG_COMPILE_FLAGS = [ - '-fno-common', '-dynamic', '-DNDEBUG', '-g', '-fwrapv', '-O3', '-arch', - 'x86_64' + '-fno-common', + '-dynamic', + '-DNDEBUG', + '-g', + '-fwrapv', + '-O3', + '-arch', + 'x86_64', ] CLANG_LINK_FLAGS = [ - '-dynamiclib', '-undefined', 'dynamic_lookup', '-arch', 'x86_64' + '-dynamiclib', + '-undefined', + 'dynamic_lookup', + '-arch', + 'x86_64', ] MSVC_LINK_FLAGS = ['/MACHINE:X64'] if core.is_compiled_with_rocm(): COMMON_HIPCC_FLAGS = [ - '-DPADDLE_WITH_HIP', '-DEIGEN_USE_GPU', '-DEIGEN_USE_HIP' + '-DPADDLE_WITH_HIP', + '-DEIGEN_USE_GPU', + '-DEIGEN_USE_HIP', ] else: COMMON_NVCC_FLAGS = ['-DPADDLE_WITH_CUDA', '-DEIGEN_USE_GPU'] @@ -114,7 +137,7 @@ DEFAULT_OP_ATTR_NAMES = [ core.op_proto_and_checker_maker.kOpNameScopeAttrName(), core.op_proto_and_checker_maker.kOpCreationCallstackAttrName(), core.op_proto_and_checker_maker.kOpDeviceAttrName(), - core.op_proto_and_checker_maker.kOpWithQuantAttrName() + core.op_proto_and_checker_maker.kOpWithQuantAttrName(), ] @@ -140,7 +163,8 @@ def custom_write_stub(resource, pyfile): Customized write_stub function to allow us to inject generated python api codes into egg python file. """ - _stub_template = textwrap.dedent(""" + _stub_template = textwrap.dedent( + """ import os import sys import types @@ -170,17 +194,18 @@ def custom_write_stub(resource, pyfile): {custom_api} - """).lstrip() + """ + ).lstrip() # Parse registerring op information _, op_info = CustomOpInfo.instance().last() so_path = op_info.so_path new_custom_ops = load_op_meta_info_and_register_op(so_path) - assert len( - new_custom_ops - ) > 0, "Required at least one custom operators, but received len(custom_op) = %d" % len( - new_custom_ops) + assert len(new_custom_ops) > 0, ( + "Required at least one custom operators, but received len(custom_op) = %d" + % len(new_custom_ops) + ) # NOTE: To avoid importing .so file instead of python file because they have same name, # we rename .so shared library to another name, see EasyInstallCommand. @@ -193,8 +218,10 @@ def custom_write_stub(resource, pyfile): with open(pyfile, 'w') as f: f.write( - _stub_template.format(resource=resource, - custom_api='\n\n'.join(api_content))) + _stub_template.format( + resource=resource, custom_api='\n\n'.join(api_content) + ) + ) OpInfo = collections.namedtuple('OpInfo', ['so_name', 'so_path']) @@ -213,8 +240,8 @@ class CustomOpInfo: def __init__(self): assert not hasattr( - self.__class__, - '_instance'), 'Please use `instance()` to get CustomOpInfo object!' + self.__class__, '_instance' + ), 'Please use `instance()` to get CustomOpInfo object!' # NOTE(Aurelius84): Use OrderedDict to save more order information self.op_info_map = collections.OrderedDict() @@ -229,20 +256,22 @@ class CustomOpInfo: return next(reversed(self.op_info_map.items())) -VersionFields = collections.namedtuple('VersionFields', [ - 'sources', - 'extra_compile_args', - 'extra_link_args', - 'library_dirs', - 'runtime_library_dirs', - 'include_dirs', - 'define_macros', - 'undef_macros', -]) +VersionFields = collections.namedtuple( + 'VersionFields', + [ + 'sources', + 'extra_compile_args', + 'extra_link_args', + 'library_dirs', + 'runtime_library_dirs', + 'include_dirs', + 'define_macros', + 'undef_macros', + ], +) class VersionManager: - def __init__(self, version_field): self.version_field = version_field self.version = self.hasher(version_field) @@ -253,14 +282,17 @@ class VersionManager: md5 = hashlib.md5() for field in version_field._fields: elem = getattr(version_field, field) - if not elem: continue + if not elem: + continue if isinstance(elem, (list, tuple, dict)): flat_elem = flatten(elem) md5 = combine_hash(md5, tuple(flat_elem)) else: raise RuntimeError( - "Support types with list, tuple and dict, but received {} with {}." - .format(type(elem), elem)) + "Support types with list, tuple and dict, but received {} with {}.".format( + type(elem), elem + ) + ) return md5.hexdigest() @@ -314,8 +346,10 @@ def clean_object_if_change_cflags(so_path, extension): # delete shared library file if version is changed to re-compile it. if so_version is not None and so_version != versioner.version: log_v( - "Re-Compiling {}, because specified cflags have been changed. New signature {} has been saved into {}." - .format(so_name, versioner.version, version_file)) + "Re-Compiling {}, because specified cflags have been changed. New signature {} has been saved into {}.".format( + so_name, versioner.version, version_file + ) + ) os.remove(so_path) # update new version information new_version_info = versioner.details @@ -335,13 +369,26 @@ def prepare_unix_cudaflags(cflags): Prepare all necessary compiled flags for nvcc compiling CUDA files. """ if core.is_compiled_with_rocm(): - cflags = COMMON_HIPCC_FLAGS + ['-Xcompiler', '-fPIC' - ] + cflags + get_rocm_arch_flags(cflags) + cflags = ( + COMMON_HIPCC_FLAGS + + ['-Xcompiler', '-fPIC'] + + cflags + + get_rocm_arch_flags(cflags) + ) else: - cflags = COMMON_NVCC_FLAGS + [ - '-ccbin', 'cc', '-Xcompiler', '-fPIC', '--expt-relaxed-constexpr', - '-DNVCC' - ] + cflags + get_cuda_arch_flags(cflags) + cflags = ( + COMMON_NVCC_FLAGS + + [ + '-ccbin', + 'cc', + '-Xcompiler', + '-fPIC', + '--expt-relaxed-constexpr', + '-DNVCC', + ] + + cflags + + get_cuda_arch_flags(cflags) + ) return cflags @@ -390,6 +437,7 @@ def _get_fluid_path(): Return installed fluid dir path. """ import paddle + return os.path.join(os.path.dirname(paddle.__file__), 'fluid') @@ -434,7 +482,8 @@ def _reset_so_rpath(so_path): origin_runtime_path = "@loader_path/../libs/" rpath = "@rpath/{}".format(_get_core_name()) cmd = 'install_name_tool -change {} {} {}'.format( - origin_runtime_path, rpath, so_path) + origin_runtime_path, rpath, so_path + ) run_cmd(cmd) @@ -448,7 +497,8 @@ def _get_include_dirs_when_compiling(compile_dir): path = os.path.abspath(compile_dir) include_dirs_file = os.path.join(path, include_dirs_file) assert os.path.isfile(include_dirs_file), "File {} does not exist".format( - include_dirs_file) + include_dirs_file + ) with open(include_dirs_file, 'r') as f: include_dirs = [line.strip() for line in f.readlines() if line.strip()] @@ -566,9 +616,13 @@ def create_sym_link_if_not_exist(): except Exception: warnings.warn( "Failed to create soft symbol link for {}.\n You can run prompt as administrator and execute the " - "following command manually: `mklink {} {}`. Now it will create hard link for {} trickly." - .format(raw_core_name, new_dll_core_path, core_path, - raw_core_name)) + "following command manually: `mklink {} {}`. Now it will create hard link for {} trickly.".format( + raw_core_name, + new_dll_core_path, + core_path, + raw_core_name, + ) + ) run_cmd('mklink /H {} {}'.format(new_dll_core_path, core_path)) # libpaddle with lib suffix assert os.path.exists(new_dll_core_path) @@ -583,8 +637,10 @@ def create_sym_link_if_not_exist(): assert os.path.exists(new_lib_core_path) except Exception: raise RuntimeError( - "Failed to create soft symbol link for {}.\n Please execute the following command manually: `ln -s {} {}`" - .format(raw_core_name, core_path, new_lib_core_path)) + "Failed to create soft symbol link for {}.\n Please execute the following command manually: `ln -s {} {}`".format( + raw_core_name, core_path, new_lib_core_path + ) + ) # libpaddle without suffix return raw_core_name[:-3] @@ -602,8 +658,9 @@ def find_cuda_home(): which_cmd = 'where' if IS_WINDOWS else 'which' try: with open(os.devnull, 'w') as devnull: - nvcc_path = subprocess.check_output([which_cmd, 'nvcc'], - stderr=devnull) + nvcc_path = subprocess.check_output( + [which_cmd, 'nvcc'], stderr=devnull + ) nvcc_path = nvcc_path.decode() # Multi CUDA, select the first nvcc_path = nvcc_path.split('\r\n')[0] @@ -621,8 +678,11 @@ def find_cuda_home(): else: cuda_home = "/usr/local/cuda" # step 3. check whether path is valid - if cuda_home and not os.path.exists( - cuda_home) and core.is_compiled_with_cuda(): + if ( + cuda_home + and not os.path.exists(cuda_home) + and core.is_compiled_with_cuda() + ): cuda_home = None return cuda_home @@ -640,8 +700,9 @@ def find_rocm_home(): which_cmd = 'where' if IS_WINDOWS else 'which' try: with open(os.devnull, 'w') as devnull: - hipcc_path = subprocess.check_output([which_cmd, 'hipcc'], - stderr=devnull) + hipcc_path = subprocess.check_output( + [which_cmd, 'hipcc'], stderr=devnull + ) hipcc_path = hipcc_path.decode() hipcc_path = hipcc_path.rstrip('\r\n') @@ -650,8 +711,11 @@ def find_rocm_home(): except: rocm_home = "/opt/rocm" # step 3. check whether path is valid - if rocm_home and not os.path.exists( - rocm_home) and core.is_compiled_with_rocm(): + if ( + rocm_home + and not os.path.exists(rocm_home) + and core.is_compiled_with_rocm() + ): rocm_home = None return rocm_home @@ -719,8 +783,9 @@ def find_clang_cpp_include(compiler='clang'): if "InstalledDir" in info: v1_path = info.split(':')[-1].strip() if v1_path and os.path.exists(v1_path): - std_v1_includes = os.path.join(os.path.dirname(v1_path), - 'include/c++/v1') + std_v1_includes = os.path.join( + os.path.dirname(v1_path), 'include/c++/v1' + ) except Exception: # Just raise warnings because the include dir is not required. warnings.warn( @@ -820,15 +885,20 @@ def get_build_directory(verbose=False): root_extensions_directory = os.environ.get('PADDLE_EXTENSION_DIR') if root_extensions_directory is None: dir_name = "paddle_extensions" - root_extensions_directory = os.path.join(os.path.expanduser('~/.cache'), - dir_name) + root_extensions_directory = os.path.join( + os.path.expanduser('~/.cache'), dir_name + ) if IS_WINDOWS: root_extensions_directory = os.path.normpath( - root_extensions_directory) + root_extensions_directory + ) log_v( - "$PADDLE_EXTENSION_DIR is not set, using path: {} by default.". - format(root_extensions_directory), verbose) + "$PADDLE_EXTENSION_DIR is not set, using path: {} by default.".format( + root_extensions_directory + ), + verbose, + ) if not os.path.exists(root_extensions_directory): os.makedirs(root_extensions_directory) @@ -843,8 +913,10 @@ def parse_op_info(op_name): """ if op_name not in OpProtoHolder.instance().op_proto_map: raise ValueError( - "Please load {} shared library file firstly by `paddle.utils.cpp_extension.load_op_meta_info_and_register_op(...)`" - .format(op_name)) + "Please load {} shared library file firstly by `paddle.utils.cpp_extension.load_op_meta_info_and_register_op(...)`".format( + op_name + ) + ) op_proto = OpProtoHolder.instance().get_op_proto(op_name) in_names = [x.name for x in op_proto.inputs] @@ -869,21 +941,22 @@ def _import_module_from_library(module_name, build_directory, verbose=False): ext_path = os.path.join(build_directory, module_name + dynamic_suffix) if not os.path.exists(ext_path): raise FileNotFoundError( - "Extension path: {} does not exist.".format(ext_path)) + "Extension path: {} does not exist.".format(ext_path) + ) # load custom op_info and kernels from .so shared library log_v('loading shared library from: {}'.format(ext_path), verbose) op_names = load_op_meta_info_and_register_op(ext_path) # generate Python api in ext_path - return _generate_python_module(module_name, op_names, build_directory, - verbose) + return _generate_python_module( + module_name, op_names, build_directory, verbose + ) -def _generate_python_module(module_name, - op_names, - build_directory, - verbose=False): +def _generate_python_module( + module_name, op_names, build_directory, verbose=False +): """ Automatically generate python file to allow import or load into as module """ @@ -895,8 +968,9 @@ def _generate_python_module(module_name, # NOTE: Use unique id as suffix to avoid write same file at same time in # both multi-thread and multi-process. thread_id = str(threading.currentThread().ident) - api_file = os.path.join(build_directory, - module_name + '_' + thread_id + '.py') + api_file = os.path.join( + build_directory, module_name + '_' + thread_id + '.py' + ) log_v("generate api file: {}".format(api_file), verbose) # delete the temp file before exit python process @@ -913,10 +987,17 @@ def _generate_python_module(module_name, def _custom_api_content(op_name): - params_str, ins_str, attrs_str, outs_str, in_names, attrs_names = _get_api_inputs_str( - op_name) + ( + params_str, + ins_str, + attrs_str, + outs_str, + in_names, + attrs_names, + ) = _get_api_inputs_str(op_name) lower_in_names = [p.split("@")[0].lower() for p in in_names] - API_TEMPLATE = textwrap.dedent(""" + API_TEMPLATE = textwrap.dedent( + """ import paddle.fluid.core as core from paddle.fluid.core import VarBase, CustomOpKernelContext from paddle.fluid.framework import _non_static_mode, _dygraph_tracer, _in_legacy_dygraph, in_dygraph_mode @@ -956,7 +1037,8 @@ def _custom_api_content(op_name): res = [outs[out_name] for out_name in out_names] return res[0] if len(res)==1 else res - """).lstrip() + """ + ).lstrip() # generate python api file api_content = API_TEMPLATE.format( @@ -967,7 +1049,8 @@ def _custom_api_content(op_name): # "[x, y, z]"" in_names="[" + ",".join(lower_in_names) + "]", attr_names="[" + ",".join(attrs_names) + "]", - out_names=outs_str) + out_names=outs_str, + ) return api_content @@ -978,7 +1061,8 @@ def _load_module_from_file(api_file_path, module_name, verbose=False): """ if not os.path.exists(api_file_path): raise FileNotFoundError( - "File : {} does not exist.".format(api_file_path)) + "File : {} does not exist.".format(api_file_path) + ) # Unique readable module name to place custom api. log_v('import module from file: {}'.format(api_file_path), verbose) @@ -1003,35 +1087,40 @@ def _get_api_inputs_str(op_name): # input name by `@`, and only use first substr as argument params_str = ','.join([p.split("@")[0].lower() for p in param_names]) # e.g: {'X': x, 'Y': y, 'Z': z} - ins_str = "{%s}" % ','.join([ - "'{}' : {}".format(in_name, - in_name.split("@")[0].lower()) - for in_name in in_names - ]) + ins_str = "{%s}" % ','.join( + [ + "'{}' : {}".format(in_name, in_name.split("@")[0].lower()) + for in_name in in_names + ] + ) # e.g: {'num': n} - attrs_str = "{%s}" % ",".join([ - "'{}' : {}".format(attr_name, - attr_name.split("@")[0].lower()) - for attr_name in attr_names - ]) + attrs_str = "{%s}" % ",".join( + [ + "'{}' : {}".format(attr_name, attr_name.split("@")[0].lower()) + for attr_name in attr_names + ] + ) # e.g: ['Out', 'Index'] outs_str = "[%s]" % ','.join(["'{}'".format(name) for name in out_names]) return params_str, ins_str, attrs_str, outs_str, in_names, attr_names -def _write_setup_file(name, - sources, - file_path, - build_dir, - include_dirs, - extra_cxx_cflags, - extra_cuda_cflags, - link_args, - verbose=False): +def _write_setup_file( + name, + sources, + file_path, + build_dir, + include_dirs, + extra_cxx_cflags, + extra_cuda_cflags, + link_args, + verbose=False, +): """ Automatically generate setup.py and write it into build directory. """ - template = textwrap.dedent(""" + template = textwrap.dedent( + """ import os from paddle.utils.cpp_extension import CppExtension, CUDAExtension, BuildExtension, setup from paddle.utils.cpp_extension import get_build_directory @@ -1048,21 +1137,24 @@ def _write_setup_file(name, cmdclass={{"build_ext" : BuildExtension.with_options( output_dir=r'{build_dir}', no_python_abi_suffix=True) - }})""").lstrip() + }})""" + ).lstrip() with_cuda = False if any([is_cuda_file(source) for source in sources]): with_cuda = True log_v("with_cuda: {}".format(with_cuda), verbose) - content = template.format(name=name, - prefix='CUDA' if with_cuda else 'Cpp', - sources=list2str(sources), - include_dirs=list2str(include_dirs), - extra_cxx_cflags=list2str(extra_cxx_cflags), - extra_cuda_cflags=list2str(extra_cuda_cflags), - extra_link_args=list2str(link_args), - build_dir=build_dir) + content = template.format( + name=name, + prefix='CUDA' if with_cuda else 'Cpp', + sources=list2str(sources), + include_dirs=list2str(include_dirs), + extra_cxx_cflags=list2str(extra_cxx_cflags), + extra_cuda_cflags=list2str(extra_cuda_cflags), + extra_link_args=list2str(link_args), + build_dir=build_dir, + ) log_v('write setup.py into {}'.format(file_path), verbose) with open(file_path, 'w') as f: @@ -1073,7 +1165,8 @@ def list2str(args): """ Convert list[str] into string. For example: ['x', 'y'] -> "['x', 'y']" """ - if args is None: return '[]' + if args is None: + return '[]' assert isinstance(args, (list, tuple)) args = ["{}".format(arg) for arg in args] return repr(args) @@ -1094,19 +1187,26 @@ def _jit_compile(file_path, verbose=False): py_version = py_version.decode() log_v( "Using Python interpreter: {}, version: {}".format( - interpreter, py_version.strip()), verbose) + interpreter, py_version.strip() + ), + verbose, + ) except Exception: _, error, _ = sys.exc_info() raise RuntimeError( 'Failed to check Python interpreter with `{}`, errors: {}'.format( - interpreter, error)) + interpreter, error + ) + ) if IS_WINDOWS: - compile_cmd = 'cd /d {} && {} {} build'.format(ext_dir, interpreter, - setup_file) + compile_cmd = 'cd /d {} && {} {} build'.format( + ext_dir, interpreter, setup_file + ) else: - compile_cmd = 'cd {} && {} {} build'.format(ext_dir, interpreter, - setup_file) + compile_cmd = 'cd {} && {} {} build'.format( + ext_dir, interpreter, setup_file + ) print("Compiling user custom op, it will cost a few seconds.....") run_cmd(compile_cmd, verbose) @@ -1144,15 +1244,16 @@ def run_cmd(command, verbose=False): # execute command try: if verbose: - return subprocess.check_call(command, - shell=True, - stderr=subprocess.STDOUT) + return subprocess.check_call( + command, shell=True, stderr=subprocess.STDOUT + ) else: return subprocess.check_call(command, shell=True, stdout=DEVNULL) except Exception: _, error, _ = sys.exc_info() - raise RuntimeError("Failed to run command: {}, errors: {}".format( - compile, error)) + raise RuntimeError( + "Failed to run command: {}, errors: {}".format(compile, error) + ) def check_abi_compatibility(compiler, verbose=False): @@ -1164,17 +1265,22 @@ def check_abi_compatibility(compiler, verbose=False): return True if not IS_WINDOWS: - cmd_out = subprocess.check_output(['which', compiler], - stderr=subprocess.STDOUT) + cmd_out = subprocess.check_output( + ['which', compiler], stderr=subprocess.STDOUT + ) compiler_path = os.path.realpath(cmd_out.decode()).strip() # if not found any suitable compiler, raise warning - if not any(name in compiler_path - for name in _expected_compiler_current_platform()): + if not any( + name in compiler_path + for name in _expected_compiler_current_platform() + ): warnings.warn( WRONG_COMPILER_WARNING.format( user_compiler=compiler, paddle_compiler=_expected_compiler_current_platform()[0], - platform=OS_NAME)) + platform=OS_NAME, + ) + ) return False version = (0, 0, 0) @@ -1185,13 +1291,15 @@ def check_abi_compatibility(compiler, verbose=False): if OS_NAME.startswith('linux'): mini_required_version = GCC_MINI_VERSION version_info = subprocess.check_output( - [compiler, '-dumpfullversion', '-dumpversion']) + [compiler, '-dumpfullversion', '-dumpversion'] + ) version_info = version_info.decode() version = version_info.strip().split('.') elif IS_WINDOWS: mini_required_version = MSVC_MINI_VERSION - compiler_info = subprocess.check_output(compiler, - stderr=subprocess.STDOUT) + compiler_info = subprocess.check_output( + compiler, stderr=subprocess.STDOUT + ) try: compiler_info = compiler_info.decode('UTF-8') except UnicodeDecodeError: @@ -1202,8 +1310,11 @@ def check_abi_compatibility(compiler, verbose=False): except Exception: # check compiler version failed _, error, _ = sys.exc_info() - warnings.warn('Failed to check compiler version for {}: {}'.format( - compiler, error)) + warnings.warn( + 'Failed to check compiler version for {}: {}'.format( + compiler, error + ) + ) return False # check version compatibility @@ -1211,8 +1322,10 @@ def check_abi_compatibility(compiler, verbose=False): if tuple(map(int, version)) >= mini_required_version: return True warnings.warn( - ABI_INCOMPATIBILITY_WARNING.format(user_compiler=compiler, - version='.'.join(version))) + ABI_INCOMPATIBILITY_WARNING.format( + user_compiler=compiler, version='.'.join(version) + ) + ) return False diff --git a/python/paddle/utils/deprecated.py b/python/paddle/utils/deprecated.py index 00a38cb1368defbfcc5956027a7ac8d9082d88d5..b4c17de3f178f493f4dc2ed769506eb3badabcd4 100755 --- a/python/paddle/utils/deprecated.py +++ b/python/paddle/utils/deprecated.py @@ -34,23 +34,23 @@ warnings.simplefilter('default', DeprecationWarning) def deprecated(update_to="", since="", reason="", level=0): """Decorate a function to signify its deprecation. - This function wraps a method that will soon be removed and does two things: - - The docstring of the API will be modified to include a notice - about deprecation." - - Raises a :class:`~exceptions.DeprecatedWarning` when old API is called. - - Args: - since(str, optional): The version at which the decorated method is considered deprecated. - update_to(str, optional): The new API users should use. - reason(str, optional): The reason why the API is deprecated. - level(int, optional): The deprecated warning log level. It must be - an Integer and must be one of 0, 1, 2. - If `level == 0`, the warning message will not be showed. - If `level == 1`, the warning message will be showed normally. - If `level == 2`, it will raise `RuntimeError`. - - Returns: - decorator: decorated function or class. + This function wraps a method that will soon be removed and does two things: + - The docstring of the API will be modified to include a notice + about deprecation." + - Raises a :class:`~exceptions.DeprecatedWarning` when old API is called. + + Args: + since(str, optional): The version at which the decorated method is considered deprecated. + update_to(str, optional): The new API users should use. + reason(str, optional): The reason why the API is deprecated. + level(int, optional): The deprecated warning log level. It must be + an Integer and must be one of 0, 1, 2. + If `level == 0`, the warning message will not be showed. + If `level == 1`, the warning message will be showed normally. + If `level == 2`, it will raise `RuntimeError`. + + Returns: + decorator: decorated function or class. """ def decorator(func): @@ -62,7 +62,8 @@ def deprecated(update_to="", since="", reason="", level=0): assert isinstance(reason, str), 'type of "reason" must be str.' assert isinstance(level, int) and level >= 0 and level < 3, ( 'type of "level" must be int and must be one of 0, 1, 2. But ' - 'received: {}.'.format(level)) + 'received: {}.'.format(level) + ) _since = since.strip() _update_to = update_to.strip() @@ -77,7 +78,8 @@ def deprecated(update_to="", since="", reason="", level=0): assert _update_to.startswith( "paddle." ), 'Argument update_to must start with "paddle.", your value is "{}"'.format( - update_to) + update_to + ) msg += ' Please use "{}" instead.'.format(_update_to) if len(_reason) > 0: msg += "\nreason: {}".format(_reason) @@ -90,14 +92,17 @@ def deprecated(update_to="", since="", reason="", level=0): @functools.wraps(func) def wrapper(*args, **kwargs): """deprecated warning should be fired in 3 circumstances: - 1. current version is develop version, i.e. "0.0.0", because we assume develop version is always the latest version. - 2. since version is empty, in this case, API is deprecated in all versions. - 3. current version is newer than since version. + 1. current version is develop version, i.e. "0.0.0", because we assume develop version is always the latest version. + 2. since version is empty, in this case, API is deprecated in all versions. + 3. current version is newer than since version. """ if level == 2: - raise RuntimeError('API "{}.{}" has been deprecated.'.format( - func.__module__, func.__name__)) + raise RuntimeError( + 'API "{}.{}" has been deprecated.'.format( + func.__module__, func.__name__ + ) + ) warningmsg = "\033[93m\nWarning:\n%s \033[0m" % (msg) # ensure ANSI escape sequences print correctly in cmd and powershell @@ -108,10 +113,14 @@ def deprecated(update_to="", since="", reason="", level=0): v_current += [0] * (4 - len(v_current)) v_since = [int(i) for i in _since.split(".")] v_since += [0] * (4 - len(v_since)) - if paddle.__version__ == "0.0.0" or _since == "" or v_current >= v_since: - warnings.warn(warningmsg, - category=DeprecationWarning, - stacklevel=2) + if ( + paddle.__version__ == "0.0.0" + or _since == "" + or v_current >= v_since + ): + warnings.warn( + warningmsg, category=DeprecationWarning, stacklevel=2 + ) return func(*args, **kwargs) diff --git a/python/paddle/utils/dlpack.py b/python/paddle/utils/dlpack.py index bc05cd9c29964c8fd0d6e70f7bd56aad28346cb1..c0449cdcae935ac97e4f10050a1e140838f0ecdc 100644 --- a/python/paddle/utils/dlpack.py +++ b/python/paddle/utils/dlpack.py @@ -51,7 +51,8 @@ def to_dlpack(x): if not isinstance(x, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): raise TypeError( "The type of 'x' in to_dlpack must be paddle.Tensor," - " but received {}.".format(type(x))) + " but received {}.".format(type(x)) + ) return x.value().get_tensor()._to_dlpack() @@ -87,11 +88,12 @@ def from_dlpack(dlpack): """ t = type(dlpack) - dlpack_flag = (t.__module__ == 'builtins' and t.__name__ == 'PyCapsule') + dlpack_flag = t.__module__ == 'builtins' and t.__name__ == 'PyCapsule' if not dlpack_flag: raise TypeError( "The type of 'dlpack' in from_dlpack must be PyCapsule object," - " but received {}.".format(type(dlpack))) + " but received {}.".format(type(dlpack)) + ) if _non_static_mode(): out = paddle.fluid.core.from_dlpack(dlpack) diff --git a/python/paddle/utils/download.py b/python/paddle/utils/download.py index 270ad51c66f6d2fb4e1c206369583b4aa6b1a319..a77d7b60dea5074d166302f50b6ac4c0a2d157d2 100644 --- a/python/paddle/utils/download.py +++ b/python/paddle/utils/download.py @@ -28,7 +28,6 @@ try: except: class tqdm(object): - def __init__(self, total=None): self.total = total self.n = 0 @@ -38,8 +37,9 @@ except: if self.total is None: sys.stderr.write("\r{0:.1f} bytes".format(self.n)) else: - sys.stderr.write("\r{0:.1f}%".format(100 * self.n / - float(self.total))) + sys.stderr.write( + "\r{0:.1f}%".format(100 * self.n / float(self.total)) + ) sys.stderr.flush() def __enter__(self): @@ -115,13 +115,10 @@ def _get_unique_endpoints(trainer_endpoints): return unique_endpoints -def get_path_from_url(url, - root_dir, - md5sum=None, - check_exist=True, - decompress=True, - method='get'): - """ Download from given url to root_dir. +def get_path_from_url( + url, root_dir, md5sum=None, check_exist=True, decompress=True, method='get' +): + """Download from given url to root_dir. if file or directory specified by url is exists under root_dir, return the path directly, otherwise download from url and decompress it, return the path. @@ -157,8 +154,9 @@ def get_path_from_url(url, time.sleep(1) if ParallelEnv().current_endpoint in unique_endpoints: - if decompress and (tarfile.is_tarfile(fullpath) - or zipfile.is_zipfile(fullpath)): + if decompress and ( + tarfile.is_tarfile(fullpath) or zipfile.is_zipfile(fullpath) + ): fullpath = _decompress(fullpath) return fullpath @@ -170,13 +168,18 @@ def _get_download(url, fullname): try: req = requests.get(url, stream=True) except Exception as e: # requests.exceptions.ConnectionError - logger.info("Downloading {} from {} failed with exception {}".format( - fname, url, str(e))) + logger.info( + "Downloading {} from {} failed with exception {}".format( + fname, url, str(e) + ) + ) return False if req.status_code != 200: - raise RuntimeError("Downloading from {} failed with code " - "{}!".format(url, req.status_code)) + raise RuntimeError( + "Downloading from {} failed with code " + "{}!".format(url, req.status_code) + ) # For protecting download interupted, download to # tmp_fullname firstly, move tmp_fullname to fullname @@ -202,18 +205,20 @@ def _wget_download(url, fullname): # using wget to download url tmp_fullname = fullname + "_tmp" # –user-agent - command = 'wget -O {} -t {} {}'.format(tmp_fullname, DOWNLOAD_RETRY_LIMIT, - url) - subprc = subprocess.Popen(command, - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + command = 'wget -O {} -t {} {}'.format( + tmp_fullname, DOWNLOAD_RETRY_LIMIT, url + ) + subprc = subprocess.Popen( + command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) _ = subprc.communicate() if subprc.returncode != 0: raise RuntimeError( - '{} failed. Please make sure `wget` is installed or {} exists'. - format(command, url)) + '{} failed. Please make sure `wget` is installed or {} exists'.format( + command, url + ) + ) shutil.move(tmp_fullname, fullname) @@ -237,7 +242,8 @@ def _download(url, path, md5sum=None, method='get'): """ assert method in _download_methods, 'make sure `{}` implemented'.format( - method) + method + ) if not osp.exists(path): os.makedirs(path) @@ -251,8 +257,9 @@ def _download(url, path, md5sum=None, method='get'): if retry_cnt < DOWNLOAD_RETRY_LIMIT: retry_cnt += 1 else: - raise RuntimeError("Download from {} failed. " - "Retry limit reached".format(url)) + raise RuntimeError( + "Download from {} failed. " "Retry limit reached".format(url) + ) if not _download_methods[method](url, fullname): time.sleep(1) @@ -273,8 +280,10 @@ def _md5check(fullname, md5sum=None): calc_md5sum = md5.hexdigest() if calc_md5sum != md5sum: - logger.info("File {} md5 check failed, {}(calc) != " - "{}(base)".format(fullname, calc_md5sum, md5sum)) + logger.info( + "File {} md5 check failed, {}(calc) != " + "{}(base)".format(fullname, calc_md5sum, md5sum) + ) return False return True @@ -314,7 +323,8 @@ def _uncompress_file_zip(filepath): elif _is_a_single_dir(file_list): # `strip(os.sep)` to remove `os.sep` in the tail of path rootpath = os.path.splitext(file_list[0].strip(os.sep))[0].split( - os.sep)[-1] + os.sep + )[-1] uncompressed_path = os.path.join(file_dir, rootpath) files.extractall(file_dir) @@ -340,7 +350,8 @@ def _uncompress_file_tar(filepath, mode="r:*"): files.extractall(file_dir) elif _is_a_single_dir(file_list): rootpath = os.path.splitext(file_list[0].strip(os.sep))[0].split( - os.sep)[-1] + os.sep + )[-1] uncompressed_path = os.path.join(file_dir, rootpath) files.extractall(file_dir) else: diff --git a/python/paddle/utils/gast/ast3.py b/python/paddle/utils/gast/ast3.py index 4696c1ba497491a9a926b5b5b9750a9b6e704b5d..e8c9ce7b01af6798fecf1429e43685fc24f88410 100644 --- a/python/paddle/utils/gast/ast3.py +++ b/python/paddle/utils/gast/ast3.py @@ -49,10 +49,7 @@ class Ast3ToGAst(AstToGAst): if sys.version_info.minor < 8: def visit_Module(self, node): - new_node = gast.Module( - self._visit(node.body), - [] # type_ignores - ) + new_node = gast.Module(self._visit(node.body), []) # type_ignores return new_node def visit_Num(self, node): @@ -231,7 +228,8 @@ class Ast3ToGAst(AstToGAst): new_node = gast.ExceptHandler( self._visit(node.type), gast.Name(node.name, gast.Store(), None, None), - self._visit(node.body)) + self._visit(node.body), + ) ast.copy_location(new_node, node) return new_node else: @@ -253,7 +251,6 @@ class GAstToAst3(GAstToAst): if sys.version_info.minor < 9: def visit_Subscript(self, node): - def adjust_slice(s): if isinstance(s, ast.Slice): return s @@ -263,7 +260,8 @@ class GAstToAst3(GAstToAst): if isinstance(node.slice, gast.Tuple): if any(isinstance(elt, gast.slice) for elt in node.slice.elts): new_slice = ast.ExtSlice( - [adjust_slice(x) for x in self._visit(node.slice.elts)]) + [adjust_slice(x) for x in self._visit(node.slice.elts)] + ) else: value = ast.Tuple(self._visit(node.slice.elts), ast.Load()) ast.copy_location(value, node.slice) @@ -309,10 +307,11 @@ class GAstToAst3(GAstToAst): if sys.version_info.minor < 8: extra_args = tuple() else: - extra_args = self._visit(node.type_comment), + extra_args = (self._visit(node.type_comment),) - new_node = ast.arg(self._visit(node.id), self._visit(node.annotation), - *extra_args) + new_node = ast.arg( + self._visit(node.id), self._visit(node.annotation), *extra_args + ) return ast.copy_location(new_node, node) def visit_Name(self, node): @@ -325,8 +324,9 @@ class GAstToAst3(GAstToAst): def visit_ExceptHandler(self, node): if node.name: - new_node = ast.ExceptHandler(self._visit(node.type), node.name.id, - self._visit(node.body)) + new_node = ast.ExceptHandler( + self._visit(node.type), node.name.id, self._visit(node.body) + ) return ast.copy_location(new_node, node) else: return self.generic_visit(node) @@ -452,10 +452,13 @@ class GAstToAst3(GAstToAst): if sys.version_info.minor >= 8: new_node = ast.arguments( [self._make_arg(arg) for arg in node.posonlyargs], - [self._make_arg(n) for n in node.args], *extra_args) + [self._make_arg(n) for n in node.args], + *extra_args + ) else: - new_node = ast.arguments([self._make_arg(n) for n in node.args], - *extra_args) + new_node = ast.arguments( + [self._make_arg(n) for n in node.args], *extra_args + ) return new_node diff --git a/python/paddle/utils/gast/astn.py b/python/paddle/utils/gast/astn.py index eb45bd4e4500a6e5a0e54b1ceb7e3b797d2c68f6..bd88ba5efc512a9b072793a9c595dab066b0296e 100644 --- a/python/paddle/utils/gast/astn.py +++ b/python/paddle/utils/gast/astn.py @@ -34,9 +34,7 @@ from . import gast def _generate_translators(to): - class Translator(ast.NodeTransformer): - def _visit(self, node): if isinstance(node, list): return [self._visit(n) for n in node] diff --git a/python/paddle/utils/gast/gast.py b/python/paddle/utils/gast/gast.py index 1248434fe3533124cea74bc5b2a9c78567467595..34ada01244080c023e28723559098dfa1b069905 100644 --- a/python/paddle/utils/gast/gast.py +++ b/python/paddle/utils/gast/gast.py @@ -44,557 +44,991 @@ except ImportError: def _make_node(Name, Fields, Attributes, Bases): - def create_node(self, *args, **kwargs): nbparam = len(args) + len(kwargs) - assert nbparam in (0, len(Fields)), \ - "Bad argument number for {}: {}, expecting {}".\ - format(Name, nbparam, len(Fields)) + assert nbparam in ( + 0, + len(Fields), + ), "Bad argument number for {}: {}, expecting {}".format( + Name, nbparam, len(Fields) + ) self._fields = Fields self._attributes = Attributes for argname, argval in zip(self._fields, args): setattr(self, argname, argval) for argname, argval in kwargs.items(): - assert argname in Fields, \ - "Invalid Keyword argument for {}: {}".format(Name, argname) + assert ( + argname in Fields + ), "Invalid Keyword argument for {}: {}".format(Name, argname) setattr(self, argname, argval) - setattr(_sys.modules[__name__], Name, - type(Name, Bases, {'__init__': create_node})) + setattr( + _sys.modules[__name__], + Name, + type(Name, Bases, {'__init__': create_node}), + ) _nodes = ( # mod - ('Module', (('body', 'type_ignores'), (), (mod, ))), - ('Interactive', (('body', ), (), (mod, ))), - ('Expression', (('body', ), (), (mod, ))), - ('FunctionType', (('argtypes', 'returns'), (), (mod, ))), - ('Suite', (('body', ), (), (mod, ))), - + ('Module', (('body', 'type_ignores'), (), (mod,))), + ('Interactive', (('body',), (), (mod,))), + ('Expression', (('body',), (), (mod,))), + ('FunctionType', (('argtypes', 'returns'), (), (mod,))), + ('Suite', (('body',), (), (mod,))), # stmt - ('FunctionDef', (('name', 'args', 'body', 'decorator_list', 'returns', - 'type_comment'), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (stmt, ))), - ('AsyncFunctionDef', (('name', 'args', 'body', 'decorator_list', 'returns', - 'type_comment'), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (stmt, ))), - ('ClassDef', (( - 'name', - 'bases', - 'keywords', - 'body', - 'decorator_list', - ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (stmt, ))), - ('Return', (('value', ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (stmt, ))), - ('Delete', (('targets', ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (stmt, ))), - ('Assign', (( - 'targets', - 'value', - ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (stmt, ))), - ('AugAssign', (( - 'target', - 'op', - 'value', - ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (stmt, ))), - ('AnnAssign', (( - 'target', - 'annotation', - 'value', - 'simple', - ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (stmt, ))), - ('Print', (( - 'dest', - 'values', - 'nl', - ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (stmt, ))), - ('For', (('target', 'iter', 'body', 'orelse', 'type_comment'), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (stmt, ))), - ('AsyncFor', (('target', 'iter', 'body', 'orelse', 'type_comment'), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (stmt, ))), - ('While', (( - 'test', - 'body', - 'orelse', - ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (stmt, ))), - ('If', (( - 'test', - 'body', - 'orelse', - ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (stmt, ))), - ('With', (('items', 'body', 'type_comment'), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (stmt, ))), - ('AsyncWith', (('items', 'body', 'type_comment'), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (stmt, ))), - ('Raise', (( - 'exc', - 'cause', - ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (stmt, ))), - ('Try', (( - 'body', - 'handlers', - 'orelse', - 'finalbody', - ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (stmt, ))), - ('Assert', (( - 'test', - 'msg', - ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (stmt, ))), - ('Import', (('names', ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (stmt, ))), - ('ImportFrom', (( - 'module', - 'names', - 'level', - ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (stmt, ))), - ('Exec', (( - 'body', - 'globals', - 'locals', - ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (stmt, ))), - ('Global', (('names', ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (stmt, ))), - ('Nonlocal', (('names', ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (stmt, ))), - ('Expr', (('value', ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (stmt, ))), - ('Pass', ((), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (stmt, ))), - ('Break', ((), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (stmt, ))), - ('Continue', ((), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (stmt, ))), - + ( + 'FunctionDef', + ( + ( + 'name', + 'args', + 'body', + 'decorator_list', + 'returns', + 'type_comment', + ), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (stmt,), + ), + ), + ( + 'AsyncFunctionDef', + ( + ( + 'name', + 'args', + 'body', + 'decorator_list', + 'returns', + 'type_comment', + ), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (stmt,), + ), + ), + ( + 'ClassDef', + ( + ( + 'name', + 'bases', + 'keywords', + 'body', + 'decorator_list', + ), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (stmt,), + ), + ), + ( + 'Return', + ( + ('value',), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (stmt,), + ), + ), + ( + 'Delete', + ( + ('targets',), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (stmt,), + ), + ), + ( + 'Assign', + ( + ( + 'targets', + 'value', + ), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (stmt,), + ), + ), + ( + 'AugAssign', + ( + ( + 'target', + 'op', + 'value', + ), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (stmt,), + ), + ), + ( + 'AnnAssign', + ( + ( + 'target', + 'annotation', + 'value', + 'simple', + ), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (stmt,), + ), + ), + ( + 'Print', + ( + ( + 'dest', + 'values', + 'nl', + ), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (stmt,), + ), + ), + ( + 'For', + ( + ('target', 'iter', 'body', 'orelse', 'type_comment'), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (stmt,), + ), + ), + ( + 'AsyncFor', + ( + ('target', 'iter', 'body', 'orelse', 'type_comment'), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (stmt,), + ), + ), + ( + 'While', + ( + ( + 'test', + 'body', + 'orelse', + ), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (stmt,), + ), + ), + ( + 'If', + ( + ( + 'test', + 'body', + 'orelse', + ), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (stmt,), + ), + ), + ( + 'With', + ( + ('items', 'body', 'type_comment'), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (stmt,), + ), + ), + ( + 'AsyncWith', + ( + ('items', 'body', 'type_comment'), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (stmt,), + ), + ), + ( + 'Raise', + ( + ( + 'exc', + 'cause', + ), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (stmt,), + ), + ), + ( + 'Try', + ( + ( + 'body', + 'handlers', + 'orelse', + 'finalbody', + ), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (stmt,), + ), + ), + ( + 'Assert', + ( + ( + 'test', + 'msg', + ), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (stmt,), + ), + ), + ( + 'Import', + ( + ('names',), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (stmt,), + ), + ), + ( + 'ImportFrom', + ( + ( + 'module', + 'names', + 'level', + ), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (stmt,), + ), + ), + ( + 'Exec', + ( + ( + 'body', + 'globals', + 'locals', + ), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (stmt,), + ), + ), + ( + 'Global', + ( + ('names',), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (stmt,), + ), + ), + ( + 'Nonlocal', + ( + ('names',), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (stmt,), + ), + ), + ( + 'Expr', + ( + ('value',), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (stmt,), + ), + ), + ( + 'Pass', + ( + (), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (stmt,), + ), + ), + ( + 'Break', + ( + (), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (stmt,), + ), + ), + ( + 'Continue', + ( + (), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (stmt,), + ), + ), # expr - ('BoolOp', (( - 'op', - 'values', - ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (expr, ))), - ('BinOp', (( - 'left', - 'op', - 'right', - ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (expr, ))), - ('UnaryOp', (( - 'op', - 'operand', - ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (expr, ))), - ('Lambda', (( - 'args', - 'body', - ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (expr, ))), - ('IfExp', (( - 'test', - 'body', - 'orelse', - ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (expr, ))), - ('Dict', (( - 'keys', - 'values', - ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (expr, ))), - ('Set', (('elts', ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (expr, ))), - ('ListComp', (( - 'elt', - 'generators', - ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (expr, ))), - ('SetComp', (( - 'elt', - 'generators', - ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (expr, ))), - ('DictComp', (( - 'key', - 'value', - 'generators', - ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (expr, ))), - ('GeneratorExp', (( - 'elt', - 'generators', - ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (expr, ))), - ('Await', (('value', ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (expr, ))), - ('Yield', (('value', ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (expr, ))), - ('YieldFrom', (('value', ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (expr, ))), - ('Compare', (( - 'left', - 'ops', - 'comparators', - ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (expr, ))), - ('Call', (( - 'func', - 'args', - 'keywords', - ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (expr, ))), - ('Repr', (('value', ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (expr, ))), - ('FormattedValue', (( - 'value', - 'conversion', - 'format_spec', - ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (expr, ))), - ('JoinedStr', (('values', ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (expr, ))), - ('Constant', (('value', 'kind'), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (expr, ))), - ('Attribute', (( - 'value', - 'attr', - 'ctx', - ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (expr, ))), - ('Subscript', (( - 'value', - 'slice', - 'ctx', - ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (expr, ))), - ('Starred', (( - 'value', - 'ctx', - ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (expr, ))), - ('Name', (('id', 'ctx', 'annotation', 'type_comment'), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (expr, ))), - ('List', (( - 'elts', - 'ctx', - ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (expr, ))), - ('Tuple', (( - 'elts', - 'ctx', - ), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (expr, ))), - + ( + 'BoolOp', + ( + ( + 'op', + 'values', + ), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (expr,), + ), + ), + ( + 'BinOp', + ( + ( + 'left', + 'op', + 'right', + ), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (expr,), + ), + ), + ( + 'UnaryOp', + ( + ( + 'op', + 'operand', + ), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (expr,), + ), + ), + ( + 'Lambda', + ( + ( + 'args', + 'body', + ), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (expr,), + ), + ), + ( + 'IfExp', + ( + ( + 'test', + 'body', + 'orelse', + ), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (expr,), + ), + ), + ( + 'Dict', + ( + ( + 'keys', + 'values', + ), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (expr,), + ), + ), + ( + 'Set', + ( + ('elts',), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (expr,), + ), + ), + ( + 'ListComp', + ( + ( + 'elt', + 'generators', + ), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (expr,), + ), + ), + ( + 'SetComp', + ( + ( + 'elt', + 'generators', + ), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (expr,), + ), + ), + ( + 'DictComp', + ( + ( + 'key', + 'value', + 'generators', + ), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (expr,), + ), + ), + ( + 'GeneratorExp', + ( + ( + 'elt', + 'generators', + ), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (expr,), + ), + ), + ( + 'Await', + ( + ('value',), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (expr,), + ), + ), + ( + 'Yield', + ( + ('value',), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (expr,), + ), + ), + ( + 'YieldFrom', + ( + ('value',), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (expr,), + ), + ), + ( + 'Compare', + ( + ( + 'left', + 'ops', + 'comparators', + ), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (expr,), + ), + ), + ( + 'Call', + ( + ( + 'func', + 'args', + 'keywords', + ), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (expr,), + ), + ), + ( + 'Repr', + ( + ('value',), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (expr,), + ), + ), + ( + 'FormattedValue', + ( + ( + 'value', + 'conversion', + 'format_spec', + ), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (expr,), + ), + ), + ( + 'JoinedStr', + ( + ('values',), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (expr,), + ), + ), + ( + 'Constant', + ( + ('value', 'kind'), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (expr,), + ), + ), + ( + 'Attribute', + ( + ( + 'value', + 'attr', + 'ctx', + ), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (expr,), + ), + ), + ( + 'Subscript', + ( + ( + 'value', + 'slice', + 'ctx', + ), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (expr,), + ), + ), + ( + 'Starred', + ( + ( + 'value', + 'ctx', + ), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (expr,), + ), + ), + ( + 'Name', + ( + ('id', 'ctx', 'annotation', 'type_comment'), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (expr,), + ), + ), + ( + 'List', + ( + ( + 'elts', + 'ctx', + ), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (expr,), + ), + ), + ( + 'Tuple', + ( + ( + 'elts', + 'ctx', + ), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (expr,), + ), + ), # expr_context - ('Load', ((), (), (expr_context, ))), - ('Store', ((), (), (expr_context, ))), - ('Del', ((), (), (expr_context, ))), - ('AugLoad', ((), (), (expr_context, ))), - ('AugStore', ((), (), (expr_context, ))), - ('Param', ((), (), (expr_context, ))), - + ('Load', ((), (), (expr_context,))), + ('Store', ((), (), (expr_context,))), + ('Del', ((), (), (expr_context,))), + ('AugLoad', ((), (), (expr_context,))), + ('AugStore', ((), (), (expr_context,))), + ('Param', ((), (), (expr_context,))), # slice - ('Slice', (('lower', 'upper', 'step'), ( - 'lineno', - 'col_offset', - 'end_lineno', - 'end_col_offset', - ), (slice, ))), - + ( + 'Slice', + ( + ('lower', 'upper', 'step'), + ( + 'lineno', + 'col_offset', + 'end_lineno', + 'end_col_offset', + ), + (slice,), + ), + ), # boolop - ('And', ((), (), (boolop, ))), - ('Or', ((), (), (boolop, ))), - + ('And', ((), (), (boolop,))), + ('Or', ((), (), (boolop,))), # operator - ('Add', ((), (), (operator, ))), - ('Sub', ((), (), (operator, ))), - ('Mult', ((), (), (operator, ))), - ('MatMult', ((), (), (operator, ))), - ('Div', ((), (), (operator, ))), - ('Mod', ((), (), (operator, ))), - ('Pow', ((), (), (operator, ))), - ('LShift', ((), (), (operator, ))), - ('RShift', ((), (), (operator, ))), - ('BitOr', ((), (), (operator, ))), - ('BitXor', ((), (), (operator, ))), - ('BitAnd', ((), (), (operator, ))), - ('FloorDiv', ((), (), (operator, ))), - + ('Add', ((), (), (operator,))), + ('Sub', ((), (), (operator,))), + ('Mult', ((), (), (operator,))), + ('MatMult', ((), (), (operator,))), + ('Div', ((), (), (operator,))), + ('Mod', ((), (), (operator,))), + ('Pow', ((), (), (operator,))), + ('LShift', ((), (), (operator,))), + ('RShift', ((), (), (operator,))), + ('BitOr', ((), (), (operator,))), + ('BitXor', ((), (), (operator,))), + ('BitAnd', ((), (), (operator,))), + ('FloorDiv', ((), (), (operator,))), # unaryop - ('Invert', ((), (), ( - unaryop, - AST, - ))), - ('Not', ((), (), ( - unaryop, - AST, - ))), - ('UAdd', ((), (), ( - unaryop, - AST, - ))), - ('USub', ((), (), ( - unaryop, - AST, - ))), - + ( + 'Invert', + ( + (), + (), + ( + unaryop, + AST, + ), + ), + ), + ( + 'Not', + ( + (), + (), + ( + unaryop, + AST, + ), + ), + ), + ( + 'UAdd', + ( + (), + (), + ( + unaryop, + AST, + ), + ), + ), + ( + 'USub', + ( + (), + (), + ( + unaryop, + AST, + ), + ), + ), # cmpop - ('Eq', ((), (), (cmpop, ))), - ('NotEq', ((), (), (cmpop, ))), - ('Lt', ((), (), (cmpop, ))), - ('LtE', ((), (), (cmpop, ))), - ('Gt', ((), (), (cmpop, ))), - ('GtE', ((), (), (cmpop, ))), - ('Is', ((), (), (cmpop, ))), - ('IsNot', ((), (), (cmpop, ))), - ('In', ((), (), (cmpop, ))), - ('NotIn', ((), (), (cmpop, ))), - + ('Eq', ((), (), (cmpop,))), + ('NotEq', ((), (), (cmpop,))), + ('Lt', ((), (), (cmpop,))), + ('LtE', ((), (), (cmpop,))), + ('Gt', ((), (), (cmpop,))), + ('GtE', ((), (), (cmpop,))), + ('Is', ((), (), (cmpop,))), + ('IsNot', ((), (), (cmpop,))), + ('In', ((), (), (cmpop,))), + ('NotIn', ((), (), (cmpop,))), # comprehension - ('comprehension', (('target', 'iter', 'ifs', 'is_async'), (), (AST, ))), - + ('comprehension', (('target', 'iter', 'ifs', 'is_async'), (), (AST,))), # excepthandler - ('ExceptHandler', (('type', 'name', 'body'), - ('lineno', 'col_offset', 'end_lineno', - 'end_col_offset'), (excepthandler, ))), - + ( + 'ExceptHandler', + ( + ('type', 'name', 'body'), + ('lineno', 'col_offset', 'end_lineno', 'end_col_offset'), + (excepthandler,), + ), + ), # arguments - ('arguments', (('args', 'posonlyargs', 'vararg', 'kwonlyargs', - 'kw_defaults', 'kwarg', 'defaults'), (), (AST, ))), - + ( + 'arguments', + ( + ( + 'args', + 'posonlyargs', + 'vararg', + 'kwonlyargs', + 'kw_defaults', + 'kwarg', + 'defaults', + ), + (), + (AST,), + ), + ), # keyword - ('keyword', (('arg', 'value'), ('lineno', 'col_offset', 'end_lineno', - 'end_col_offset'), (AST, ))), - + ( + 'keyword', + ( + ('arg', 'value'), + ('lineno', 'col_offset', 'end_lineno', 'end_col_offset'), + (AST,), + ), + ), # alias - ('alias', (('name', 'asname'), (), (AST, ))), - + ('alias', (('name', 'asname'), (), (AST,))), # withitem - ('withitem', (('context_expr', 'optional_vars'), (), (AST, ))), - + ('withitem', (('context_expr', 'optional_vars'), (), (AST,))), # type_ignore - ('type_ignore', ((), ('lineno', 'tag'), (TypeIgnore, ))), + ('type_ignore', ((), ('lineno', 'tag'), (TypeIgnore,))), ) for name, descr in _nodes: @@ -603,8 +1037,10 @@ for name, descr in _nodes: py_version = _sys.version_info.major if py_version != 3: raise RuntimeError( - 'Required Python version >= 3, but received Python version == {}'. - format(py_version)) + 'Required Python version >= 3, but received Python version == {}'.format( + py_version + ) + ) from .ast3 import ast_to_gast, gast_to_ast @@ -622,10 +1058,14 @@ def literal_eval(node_or_string): def get_docstring(node, clean=True): if not isinstance(node, (FunctionDef, ClassDef, Module)): raise TypeError("%r can't have docstrings" % node.__class__.__name__) - if node.body and isinstance(node.body[0], Expr) and \ - isinstance(node.body[0].value, Constant): + if ( + node.body + and isinstance(node.body[0], Expr) + and isinstance(node.body[0].value, Constant) + ): if clean: import inspect + holder = node.body[0].value return inspect.cleandoc(getattr(holder, holder._fields[0])) return node.body[0].value.s @@ -641,8 +1081,11 @@ def copy_location(new_node, old_node): and return *new_node*. """ for attr in 'lineno', 'col_offset', 'end_lineno', 'end_col_offset': - if attr in old_node._attributes and attr in new_node._attributes \ - and hasattr(old_node, attr): + if ( + attr in old_node._attributes + and attr in new_node._attributes + and hasattr(old_node, attr) + ): setattr(new_node, attr, getattr(old_node, attr)) return new_node diff --git a/python/paddle/utils/image_util.py b/python/paddle/utils/image_util.py index a694d32c93b55ac89c8e3dedce66990ab18f6eec..107ee828af48e7563cea36dfa8a35431e78ea07c 100644 --- a/python/paddle/utils/image_util.py +++ b/python/paddle/utils/image_util.py @@ -25,9 +25,10 @@ def resize_image(img, target_size): img: the input image to be resized. target_size: the target resized image size. """ - percent = (target_size / float(min(img.size[0], img.size[1]))) + percent = target_size / float(min(img.size[0], img.size[1])) resized_size = int(round(img.size[0] * percent)), int( - round(img.size[1] * percent)) + round(img.size[1] * percent) + ) img = img.resize(resized_size, Image.ANTIALIAS) return img @@ -56,8 +57,9 @@ def crop_img(im, inner_size, color=True, test=True): If True, crop the center of images. """ if color: - height, width = max(inner_size, - im.shape[1]), max(inner_size, im.shape[2]) + height, width = max(inner_size, im.shape[1]), max( + inner_size, im.shape[2] + ) padded_im = np.zeros((3, height, width)) startY = (height - im.shape[1]) / 2 startX = (width - im.shape[2]) / 2 @@ -65,8 +67,9 @@ def crop_img(im, inner_size, color=True, test=True): padded_im[:, startY:endY, startX:endX] = im else: im = im.astype('float32') - height, width = max(inner_size, - im.shape[0]), max(inner_size, im.shape[1]) + height, width = max(inner_size, im.shape[0]), max( + inner_size, im.shape[1] + ) padded_im = np.zeros((height, width)) startY = (height - im.shape[0]) / 2 startX = (width - im.shape[1]) / 2 @@ -120,15 +123,17 @@ def load_meta(meta_path, mean_img_size, crop_size, color=True): mean = np.load(meta_path)['data_mean'] border = (mean_img_size - crop_size) / 2 if color: - assert (mean_img_size * mean_img_size * 3 == mean.shape[0]) + assert mean_img_size * mean_img_size * 3 == mean.shape[0] mean = mean.reshape(3, mean_img_size, mean_img_size) - mean = mean[:, border:border + crop_size, - border:border + crop_size].astype('float32') + mean = mean[ + :, border : border + crop_size, border : border + crop_size + ].astype('float32') else: - assert (mean_img_size * mean_img_size == mean.shape[0]) + assert mean_img_size * mean_img_size == mean.shape[0] mean = mean.reshape(mean_img_size, mean_img_size) - mean = mean[border:border + crop_size, - border:border + crop_size].astype('float32') + mean = mean[ + border : border + crop_size, border : border + crop_size + ].astype('float32') return mean @@ -166,28 +171,28 @@ def oversample(img, crop_dims): crops_ix[curr] = (i, j, i + crop_dims[0], j + crop_dims[1]) curr += 1 crops_ix[4] = np.tile(im_center, (1, 2)) + np.concatenate( - [-crop_dims / 2.0, crop_dims / 2.0]) + [-crop_dims / 2.0, crop_dims / 2.0] + ) crops_ix = np.tile(crops_ix, (2, 1)) # Extract crops - crops = np.empty((10 * len(img), crop_dims[0], crop_dims[1], im_shape[-1]), - dtype=np.float32) + crops = np.empty( + (10 * len(img), crop_dims[0], crop_dims[1], im_shape[-1]), + dtype=np.float32, + ) ix = 0 for im in img: for crop in crops_ix: - crops[ix] = im[crop[0]:crop[2], crop[1]:crop[3], :] + crops[ix] = im[crop[0] : crop[2], crop[1] : crop[3], :] ix += 1 - crops[ix - 5:ix] = crops[ix - 5:ix, :, ::-1, :] # flip for mirrors + crops[ix - 5 : ix] = crops[ix - 5 : ix, :, ::-1, :] # flip for mirrors return crops class ImageTransformer: - - def __init__(self, - transpose=None, - channel_swap=None, - mean=None, - is_color=True): + def __init__( + self, transpose=None, channel_swap=None, mean=None, is_color=True + ): self.is_color = is_color self.set_transpose(transpose) self.set_channel_swap(channel_swap) diff --git a/python/paddle/utils/install_check.py b/python/paddle/utils/install_check.py index f8fd9d71df58e52b985d2dc027e0ba058ac96fa5..c9cc1bb7a49e274e45f4ed07803ea21e6f416ada 100644 --- a/python/paddle/utils/install_check.py +++ b/python/paddle/utils/install_check.py @@ -24,13 +24,14 @@ def _simple_network(): """ Define a simple network composed by a single linear layer. """ - input = paddle.static.data(name="input", - shape=[None, 2, 2], - dtype="float32") + input = paddle.static.data( + name="input", shape=[None, 2, 2], dtype="float32" + ) weight = paddle.create_parameter( shape=[2, 3], dtype="float32", - attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(0.1))) + attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(0.1)), + ) bias = paddle.create_parameter(shape=[3], dtype="float32") linear_out = paddle.nn.functional.linear(x=input, weight=weight, bias=bias) out = paddle.tensor.sum(linear_out) @@ -68,7 +69,8 @@ def _is_cuda_available(): logging.warning( "You are using GPU version PaddlePaddle, but there is no GPU " "detected on your machine. Maybe CUDA devices is not set properly." - "\n Original Error is {}".format(e)) + "\n Original Error is {}".format(e) + ) return False @@ -83,7 +85,8 @@ def _is_npu_available(): logging.warning( "You are using NPU version PaddlePaddle, but there is no NPU " "detected on your machine. Maybe NPU devices is not set properly." - "\n Original Error is {}".format(e)) + "\n Original Error is {}".format(e) + ) return False @@ -98,7 +101,8 @@ def _is_xpu_available(): logging.warning( "You are using XPU version PaddlePaddle, but there is no XPU " "detected on your machine. Maybe XPU devices is not set properly." - "\n Original Error is {}".format(e)) + "\n Original Error is {}".format(e) + ) return False @@ -121,20 +125,22 @@ def _run_dygraph_single(use_cuda, use_xpu, use_npu): else: paddle.set_device('cpu') weight_attr = paddle.ParamAttr( - name="weight", initializer=paddle.nn.initializer.Constant(value=0.5)) + name="weight", initializer=paddle.nn.initializer.Constant(value=0.5) + ) bias_attr = paddle.ParamAttr( - name="bias", initializer=paddle.nn.initializer.Constant(value=1.0)) - linear = paddle.nn.Linear(2, - 4, - weight_attr=weight_attr, - bias_attr=bias_attr) + name="bias", initializer=paddle.nn.initializer.Constant(value=1.0) + ) + linear = paddle.nn.Linear( + 2, 4, weight_attr=weight_attr, bias_attr=bias_attr + ) input_np = _prepare_data(1) input_tensor = paddle.to_tensor(input_np) linear_out = linear(input_tensor) out = paddle.tensor.sum(linear_out) out.backward() - opt = paddle.optimizer.Adam(learning_rate=0.001, - parameters=linear.parameters()) + opt = paddle.optimizer.Adam( + learning_rate=0.001, parameters=linear.parameters() + ) opt.step() @@ -155,7 +161,8 @@ def _run_static_single(use_cuda, use_xpu, use_npu): with paddle.static.program_guard(train_prog, startup_prog): input, out, weight = _simple_network() param_grads = paddle.static.append_backward( - out, parameter_list=[weight.name])[0] + out, parameter_list=[weight.name] + )[0] if use_cuda: place = paddle.CUDAPlace(0) @@ -168,9 +175,11 @@ def _run_static_single(use_cuda, use_xpu, use_npu): exe = paddle.static.Executor(place) exe.run(startup_prog) - exe.run(train_prog, - feed={input.name: _prepare_data(1)}, - fetch_list=[out.name, param_grads[1].name]) + exe.run( + train_prog, + feed={input.name: _prepare_data(1)}, + fetch_list=[out.name, param_grads[1].name], + ) paddle.disable_static() @@ -195,8 +204,8 @@ def _run_static_parallel(use_cuda, use_xpu, use_npu, device_list): paddle.optimizer.SGD(learning_rate=0.01).minimize(loss) compiled_prog = paddle.static.CompiledProgram( - train_prog).with_data_parallel(loss_name=loss.name, - places=device_list) + train_prog + ).with_data_parallel(loss_name=loss.name, places=device_list) if use_cuda: place = paddle.CUDAPlace(0) @@ -211,9 +220,11 @@ def _run_static_parallel(use_cuda, use_xpu, use_npu, device_list): exe = paddle.static.Executor(place) exe.run(startup_prog) - exe.run(compiled_prog, - feed={input.name: _prepare_data(len(device_list))}, - fetch_list=[loss.name]) + exe.run( + compiled_prog, + feed={input.name: _prepare_data(len(device_list))}, + fetch_list=[loss.name], + ) paddle.disable_static() @@ -269,8 +280,11 @@ def run_check(): try: _run_static_parallel(use_cuda, use_xpu, use_npu, device_list) - print("PaddlePaddle works well on {} {}s.".format( - device_count, device_str)) + print( + "PaddlePaddle works well on {} {}s.".format( + device_count, device_str + ) + ) print( "PaddlePaddle is installed successfully! Let's start deep learning with PaddlePaddle now." ) @@ -280,10 +294,15 @@ def run_check(): "\n 1. There is not enough GPUs visible on your system" "\n 2. Some GPUs are occupied by other process now" "\n 3. NVIDIA-NCCL2 is not installed correctly on your system. Please follow instruction on https://github.com/NVIDIA/nccl-tests " - "\n to test your NCCL, or reinstall it following https://docs.nvidia.com/deeplearning/sdk/nccl-install-guide/index.html" - .format(device_count, device_str)) + "\n to test your NCCL, or reinstall it following https://docs.nvidia.com/deeplearning/sdk/nccl-install-guide/index.html".format( + device_count, device_str + ) + ) logging.warning("\n Original Error is: {}".format(e)) - print("PaddlePaddle is installed successfully ONLY for single {}! " - "Let's start deep learning with PaddlePaddle now.".format( - device_str)) + print( + "PaddlePaddle is installed successfully ONLY for single {}! " + "Let's start deep learning with PaddlePaddle now.".format( + device_str + ) + ) diff --git a/python/paddle/utils/lazy_import.py b/python/paddle/utils/lazy_import.py index d9146422819f8aa2262fca89cf9e0dd673695b96..e5ca0cdd5a815438c7daf4b8af2d4d72a499a79b 100644 --- a/python/paddle/utils/lazy_import.py +++ b/python/paddle/utils/lazy_import.py @@ -35,6 +35,6 @@ def try_import(module_name): err_msg = ( "Failed importing {}. This likely means that some paddle modules " "require additional dependencies that have to be " - "manually installed (usually with `pip install {}`). ").format( - module_name, install_name) + "manually installed (usually with `pip install {}`). " + ).format(module_name, install_name) raise ImportError(err_msg) diff --git a/python/paddle/utils/op_version.py b/python/paddle/utils/op_version.py index 575e5f40772eb08ea2c79d4ac73d7d04c5f9cfbf..9f9ae4d73c7bb0f0d223e3f8ee7ed96686b0d4a6 100644 --- a/python/paddle/utils/op_version.py +++ b/python/paddle/utils/op_version.py @@ -29,7 +29,6 @@ def Singleton(cls): class OpUpdateInfoHelper(object): - def __init__(self, info): self._info = info @@ -49,7 +48,6 @@ class OpUpdateInfoHelper(object): @Singleton class OpLastCheckpointChecker(object): - def __init__(self): self.raw_version_map = core.get_op_version_map() self.checkpoints_map = {} @@ -65,8 +63,9 @@ class OpLastCheckpointChecker(object): updates = [] if op_name in self.checkpoints_map: for update in self.checkpoints_map[op_name]: - if (update.type() == type) or (type - == core.OpUpdateType.kInvalid): + if (update.type() == type) or ( + type == core.OpUpdateType.kInvalid + ): if OpUpdateInfoHelper(update.info()).verify_key_value(key): updates.append(update.info()) return updates diff --git a/python/paddle/utils/profiler.py b/python/paddle/utils/profiler.py index e0bad20a327dd9dd468c817fef2014b4288d2607..27803cfa442dd6effc0afe2a7121c2ed780b9200 100644 --- a/python/paddle/utils/profiler.py +++ b/python/paddle/utils/profiler.py @@ -22,14 +22,19 @@ from ..fluid.profiler import profiler # noqa: F401 from ..fluid.profiler import stop_profiler from ..fluid.profiler import reset_profiler -__all__ = [ #noqa - 'Profiler', 'get_profiler', 'ProfilerOptions', 'cuda_profiler', - 'start_profiler', 'profiler', 'stop_profiler', 'reset_profiler' +__all__ = [ # noqa + 'Profiler', + 'get_profiler', + 'ProfilerOptions', + 'cuda_profiler', + 'start_profiler', + 'profiler', + 'stop_profiler', + 'reset_profiler', ] class ProfilerOptions(object): - def __init__(self, options=None): self.options = { 'state': 'All', @@ -39,7 +44,7 @@ class ProfilerOptions(object): 'output_thread_detail': False, 'profile_path': 'none', 'timeline_path': 'none', - 'op_summary_path': 'none' + 'op_summary_path': 'none', } if options is not None: for key in self.options.keys(): @@ -54,10 +59,13 @@ class ProfilerOptions(object): def __getitem__(self, name): if self.options.get(name, None) is None: raise ValueError( - "ProfilerOptions does not have an option named %s." % name) + "ProfilerOptions does not have an option named %s." % name + ) else: - if isinstance(self.options[name], - str) and self.options[name] == 'none': + if ( + isinstance(self.options[name], str) + and self.options[name] == 'none' + ): return None else: return self.options[name] @@ -67,7 +75,6 @@ _current_profiler = None class Profiler(object): - def __init__(self, enabled=True, options=None): if options is not None: self.profiler_options = options @@ -99,22 +106,28 @@ class Profiler(object): try: start_profiler( state=self.profiler_options['state'], - tracer_option=self.profiler_options['tracer_level']) + tracer_option=self.profiler_options['tracer_level'], + ) except Exception as e: warnings.warn( - "Profiler is not enabled becuase following exception:\n{}". - format(e)) + "Profiler is not enabled becuase following exception:\n{}".format( + e + ) + ) def stop(self): if self.enabled: try: stop_profiler( sorted_key=self.profiler_options['sorted_key'], - profile_path=self.profiler_options['profile_path']) + profile_path=self.profiler_options['profile_path'], + ) except Exception as e: warnings.warn( - "Profiler is not disabled becuase following exception:\n{}". - format(e)) + "Profiler is not disabled becuase following exception:\n{}".format( + e + ) + ) def reset(self): if self.enabled and core.is_profiler_enabled(): diff --git a/python/paddle/utils/unique_name.py b/python/paddle/utils/unique_name.py index d0d487c933d767d4f1dca9642d5346bf97b7fe06..77ecebcc45b316a66092951a3a92ff8e0e8d7187 100644 --- a/python/paddle/utils/unique_name.py +++ b/python/paddle/utils/unique_name.py @@ -16,6 +16,4 @@ from ..fluid.unique_name import generate # noqa: F401 from ..fluid.unique_name import switch # noqa: F401 from ..fluid.unique_name import guard # noqa: F401 -__all__ = [ #noqa - 'generate', 'switch', 'guard' -] +__all__ = ['generate', 'switch', 'guard'] # noqa diff --git a/python/paddle/vision/__init__.py b/python/paddle/vision/__init__.py index 2f0052537e251fb5beed4c650d9ab95de6fcf95e..3e9b68ef8fdcdaf351c34496d987ded17e218c70 100644 --- a/python/paddle/vision/__init__.py +++ b/python/paddle/vision/__init__.py @@ -112,6 +112,4 @@ from .transforms import adjust_contrast # noqa: F401 from .transforms import adjust_hue # noqa: F401 from .transforms import normalize # noqa: F401 -__all__ = [ #noqa - 'set_image_backend', 'get_image_backend', 'image_load' -] +__all__ = ['set_image_backend', 'get_image_backend', 'image_load'] # noqa diff --git a/python/paddle/vision/datasets/__init__.py b/python/paddle/vision/datasets/__init__.py index 10666b7c7194acd54c97ee3eae13e9d83152b465..970c8cfcae86a1479f03bfeb45033578d60d1d38 100644 --- a/python/paddle/vision/datasets/__init__.py +++ b/python/paddle/vision/datasets/__init__.py @@ -21,7 +21,13 @@ from .cifar import Cifar10 # noqa: F401 from .cifar import Cifar100 # noqa: F401 from .voc2012 import VOC2012 # noqa: F401 -__all__ = [ #noqa - 'DatasetFolder', 'ImageFolder', 'MNIST', 'FashionMNIST', 'Flowers', - 'Cifar10', 'Cifar100', 'VOC2012' +__all__ = [ # noqa + 'DatasetFolder', + 'ImageFolder', + 'MNIST', + 'FashionMNIST', + 'Flowers', + 'Cifar10', + 'Cifar100', + 'VOC2012', ] diff --git a/python/paddle/vision/datasets/cifar.py b/python/paddle/vision/datasets/cifar.py index 1e3eb6515c8ee2dffc6ba8981a876d4506dbf797..5610b237379c8110a9a4b65e851276700cacd1a1 100644 --- a/python/paddle/vision/datasets/cifar.py +++ b/python/paddle/vision/datasets/cifar.py @@ -33,7 +33,7 @@ MODE_FLAG_MAP = { 'train10': 'data_batch', 'test10': 'test_batch', 'train100': 'train', - 'test100': 'test' + 'test100': 'test', } @@ -102,14 +102,18 @@ class Cifar10(Dataset): # [3, 64, 64] 3 """ - def __init__(self, - data_file=None, - mode='train', - transform=None, - download=True, - backend=None): - assert mode.lower() in ['train', 'test'], \ - "mode.lower() should be 'train' or 'test', but got {}".format(mode) + def __init__( + self, + data_file=None, + mode='train', + transform=None, + download=True, + backend=None, + ): + assert mode.lower() in [ + 'train', + 'test', + ], "mode.lower() should be 'train' or 'test', but got {}".format(mode) self.mode = mode.lower() if backend is None: @@ -117,18 +121,21 @@ class Cifar10(Dataset): if backend not in ['pil', 'cv2']: raise ValueError( "Expected backend are one of ['pil', 'cv2'], but got {}".format( - backend)) + backend + ) + ) self.backend = backend self._init_url_md5_flag() self.data_file = data_file if self.data_file is None: - assert download, "data_file is not set and downloading automatically is disabled" - self.data_file = _check_exists_and_download(data_file, - self.data_url, - self.data_md5, 'cifar', - download) + assert ( + download + ), "data_file is not set and downloading automatically is disabled" + self.data_file = _check_exists_and_download( + data_file, self.data_url, self.data_md5, 'cifar', download + ) self.transform = transform @@ -145,8 +152,9 @@ class Cifar10(Dataset): def _load_data(self): self.data = [] with tarfile.open(self.data_file, mode='r') as f: - names = (each_item.name for each_item in f - if self.flag in each_item.name) + names = ( + each_item.name for each_item in f if self.flag in each_item.name + ) names = sorted(list(names)) @@ -243,14 +251,17 @@ class Cifar100(Cifar10): # [3, 64, 64] 49 """ - def __init__(self, - data_file=None, - mode='train', - transform=None, - download=True, - backend=None): - super(Cifar100, self).__init__(data_file, mode, transform, download, - backend) + def __init__( + self, + data_file=None, + mode='train', + transform=None, + download=True, + backend=None, + ): + super(Cifar100, self).__init__( + data_file, mode, transform, download, backend + ) def _init_url_md5_flag(self): self.data_url = CIFAR100_URL diff --git a/python/paddle/vision/datasets/flowers.py b/python/paddle/vision/datasets/flowers.py index 963d639ed11796d98b3b87348c47712811cf86f7..1b1c556407b5b217d15390b3480f85945e94f9dc 100644 --- a/python/paddle/vision/datasets/flowers.py +++ b/python/paddle/vision/datasets/flowers.py @@ -106,44 +106,57 @@ class Flowers(Dataset): # [3, 64, 96] [1] """ - def __init__(self, - data_file=None, - label_file=None, - setid_file=None, - mode='train', - transform=None, - download=True, - backend=None): - assert mode.lower() in ['train', 'valid', 'test'], \ - "mode should be 'train', 'valid' or 'test', but got {}".format(mode) + def __init__( + self, + data_file=None, + label_file=None, + setid_file=None, + mode='train', + transform=None, + download=True, + backend=None, + ): + assert mode.lower() in [ + 'train', + 'valid', + 'test', + ], "mode should be 'train', 'valid' or 'test', but got {}".format(mode) if backend is None: backend = paddle.vision.get_image_backend() if backend not in ['pil', 'cv2']: raise ValueError( "Expected backend are one of ['pil', 'cv2'], but got {}".format( - backend)) + backend + ) + ) self.backend = backend flag = MODE_FLAG_MAP[mode.lower()] if not data_file: - assert download, "data_file is not set and downloading automatically is disabled" - data_file = _check_exists_and_download(data_file, DATA_URL, - DATA_MD5, 'flowers', - download) + assert ( + download + ), "data_file is not set and downloading automatically is disabled" + data_file = _check_exists_and_download( + data_file, DATA_URL, DATA_MD5, 'flowers', download + ) if not label_file: - assert download, "label_file is not set and downloading automatically is disabled" - label_file = _check_exists_and_download(label_file, LABEL_URL, - LABEL_MD5, 'flowers', - download) + assert ( + download + ), "label_file is not set and downloading automatically is disabled" + label_file = _check_exists_and_download( + label_file, LABEL_URL, LABEL_MD5, 'flowers', download + ) if not setid_file: - assert download, "setid_file is not set and downloading automatically is disabled" - setid_file = _check_exists_and_download(setid_file, SETID_URL, - SETID_MD5, 'flowers', - download) + assert ( + download + ), "setid_file is not set and downloading automatically is disabled" + setid_file = _check_exists_and_download( + setid_file, SETID_URL, SETID_MD5, 'flowers', download + ) self.transform = transform diff --git a/python/paddle/vision/datasets/folder.py b/python/paddle/vision/datasets/folder.py index 4a024fa27793d3700fb6eb492f8ae0fea6541510..6ac0c4ca917238f0a078ac5d05201ba6928d414c 100644 --- a/python/paddle/vision/datasets/folder.py +++ b/python/paddle/vision/datasets/folder.py @@ -33,8 +33,9 @@ def has_valid_extension(filename, extensions): Returns: bool: True if the filename ends with one of given extensions """ - assert isinstance(extensions, - (list, tuple)), ("`extensions` must be list or tuple.") + assert isinstance( + extensions, (list, tuple) + ), "`extensions` must be list or tuple." extensions = tuple([x.lower() for x in extensions]) return filename.lower().endswith(extensions) @@ -199,24 +200,29 @@ class DatasetFolder(Dataset): shutil.rmtree(fake_data_dir) """ - def __init__(self, - root, - loader=None, - extensions=None, - transform=None, - is_valid_file=None): + def __init__( + self, + root, + loader=None, + extensions=None, + transform=None, + is_valid_file=None, + ): self.root = root self.transform = transform if extensions is None: extensions = IMG_EXTENSIONS classes, class_to_idx = self._find_classes(self.root) - samples = make_dataset(self.root, class_to_idx, extensions, - is_valid_file) + samples = make_dataset( + self.root, class_to_idx, extensions, is_valid_file + ) if len(samples) == 0: - raise (RuntimeError("Found 0 directories in subfolders of: " + - self.root + "\n" - "Supported extensions are: " + - ",".join(extensions))) + raise ( + RuntimeError( + "Found 0 directories in subfolders of: " + self.root + "\n" + "Supported extensions are: " + ",".join(extensions) + ) + ) self.loader = default_loader if loader is None else loader self.extensions = extensions @@ -245,7 +251,8 @@ class DatasetFolder(Dataset): classes = [d.name for d in os.scandir(dir) if d.is_dir()] else: classes = [ - d for d in os.listdir(dir) + d + for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d)) ] classes.sort() @@ -271,8 +278,17 @@ class DatasetFolder(Dataset): return len(self.samples) -IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', - '.tiff', '.webp') +IMG_EXTENSIONS = ( + '.jpg', + '.jpeg', + '.png', + '.ppm', + '.bmp', + '.pgm', + '.tif', + '.tiff', + '.webp', +) def pil_loader(path): @@ -288,6 +304,7 @@ def cv2_loader(path): def default_loader(path): from paddle.vision import get_image_backend + if get_image_backend() == 'cv2': return cv2_loader(path) else: @@ -412,14 +429,16 @@ class ImageFolder(Dataset): # [3, 64, 64] shutil.rmtree(fake_data_dir) - """ - - def __init__(self, - root, - loader=None, - extensions=None, - transform=None, - is_valid_file=None): + """ + + def __init__( + self, + root, + loader=None, + extensions=None, + transform=None, + is_valid_file=None, + ): self.root = root if extensions is None: extensions = IMG_EXTENSIONS @@ -439,10 +458,12 @@ class ImageFolder(Dataset): samples.append(f) if len(samples) == 0: - raise (RuntimeError("Found 0 files in subfolders of: " + self.root + - "\n" - "Supported extensions are: " + - ",".join(extensions))) + raise ( + RuntimeError( + "Found 0 files in subfolders of: " + self.root + "\n" + "Supported extensions are: " + ",".join(extensions) + ) + ) self.loader = default_loader if loader is None else loader self.extensions = extensions diff --git a/python/paddle/vision/datasets/mnist.py b/python/paddle/vision/datasets/mnist.py index 5b819fb3bb8cd0bb70be188703ba2529a1964b5b..c273d41d4a6cc9b5498a58a2d0802b5d58679413 100644 --- a/python/paddle/vision/datasets/mnist.py +++ b/python/paddle/vision/datasets/mnist.py @@ -88,6 +88,7 @@ class MNIST(Dataset): print(type(img), img.shape, label) # [1, 28, 28] [7] """ + NAME = 'mnist' URL_PREFIX = 'https://dataset.bj.bcebos.com/mnist/' TEST_IMAGE_URL = URL_PREFIX + 't10k-images-idx3-ubyte.gz' @@ -99,40 +100,64 @@ class MNIST(Dataset): TRAIN_LABEL_URL = URL_PREFIX + 'train-labels-idx1-ubyte.gz' TRAIN_LABEL_MD5 = 'd53e105ee54ea40749a09fcbcd1e9432' - def __init__(self, - image_path=None, - label_path=None, - mode='train', - transform=None, - download=True, - backend=None): - assert mode.lower() in ['train', 'test'], \ - "mode should be 'train' or 'test', but got {}".format(mode) + def __init__( + self, + image_path=None, + label_path=None, + mode='train', + transform=None, + download=True, + backend=None, + ): + assert mode.lower() in [ + 'train', + 'test', + ], "mode should be 'train' or 'test', but got {}".format(mode) if backend is None: backend = paddle.vision.get_image_backend() if backend not in ['pil', 'cv2']: raise ValueError( "Expected backend are one of ['pil', 'cv2'], but got {}".format( - backend)) + backend + ) + ) self.backend = backend self.mode = mode.lower() self.image_path = image_path if self.image_path is None: - assert download, "image_path is not set and downloading automatically is disabled" - image_url = self.TRAIN_IMAGE_URL if mode == 'train' else self.TEST_IMAGE_URL - image_md5 = self.TRAIN_IMAGE_MD5 if mode == 'train' else self.TEST_IMAGE_MD5 + assert ( + download + ), "image_path is not set and downloading automatically is disabled" + image_url = ( + self.TRAIN_IMAGE_URL if mode == 'train' else self.TEST_IMAGE_URL + ) + image_md5 = ( + self.TRAIN_IMAGE_MD5 if mode == 'train' else self.TEST_IMAGE_MD5 + ) self.image_path = _check_exists_and_download( - image_path, image_url, image_md5, self.NAME, download) + image_path, image_url, image_md5, self.NAME, download + ) self.label_path = label_path if self.label_path is None: - assert download, "label_path is not set and downloading automatically is disabled" - label_url = self.TRAIN_LABEL_URL if self.mode == 'train' else self.TEST_LABEL_URL - label_md5 = self.TRAIN_LABEL_MD5 if self.mode == 'train' else self.TEST_LABEL_MD5 + assert ( + download + ), "label_path is not set and downloading automatically is disabled" + label_url = ( + self.TRAIN_LABEL_URL + if self.mode == 'train' + else self.TEST_LABEL_URL + ) + label_md5 = ( + self.TRAIN_LABEL_MD5 + if self.mode == 'train' + else self.TEST_LABEL_MD5 + ) self.label_path = _check_exists_and_download( - label_path, label_url, label_md5, self.NAME, download) + label_path, label_url, label_md5, self.NAME, download + ) self.transform = transform @@ -156,14 +181,16 @@ class MNIST(Dataset): # image file : 16B magic_byte_img = '>IIII' magic_img, image_num, rows, cols = struct.unpack_from( - magic_byte_img, img_buf, offset_img) + magic_byte_img, img_buf, offset_img + ) offset_img += struct.calcsize(magic_byte_img) offset_lab = 0 # label file : 8B magic_byte_lab = '>II' magic_lab, label_num = struct.unpack_from( - magic_byte_lab, lab_buf, offset_lab) + magic_byte_lab, lab_buf, offset_lab + ) offset_lab += struct.calcsize(magic_byte_lab) while True: @@ -175,17 +202,19 @@ class MNIST(Dataset): step_label += buffer_size fmt_images = '>' + str(buffer_size * rows * cols) + 'B' - images_temp = struct.unpack_from(fmt_images, img_buf, - offset_img) + images_temp = struct.unpack_from( + fmt_images, img_buf, offset_img + ) images = np.reshape( - images_temp, - (buffer_size, rows * cols)).astype('float32') + images_temp, (buffer_size, rows * cols) + ).astype('float32') offset_img += struct.calcsize(fmt_images) for i in range(buffer_size): self.images.append(images[i, :]) self.labels.append( - np.array([labels[i]]).astype('int64')) + np.array([labels[i]]).astype('int64') + ) def __getitem__(self, idx): image, label = self.images[idx], self.labels[idx] diff --git a/python/paddle/vision/datasets/voc2012.py b/python/paddle/vision/datasets/voc2012.py index 8931b0f86cc6c046c8e04a89544b423f8cae7ead..81478378ae41251899e75ae5ece0383d2ce9dbf5 100644 --- a/python/paddle/vision/datasets/voc2012.py +++ b/python/paddle/vision/datasets/voc2012.py @@ -102,31 +102,40 @@ class VOC2012(Dataset): # (281, 500) """ - def __init__(self, - data_file=None, - mode='train', - transform=None, - download=True, - backend=None): - assert mode.lower() in ['train', 'valid', 'test'], \ - "mode should be 'train', 'valid' or 'test', but got {}".format(mode) + def __init__( + self, + data_file=None, + mode='train', + transform=None, + download=True, + backend=None, + ): + assert mode.lower() in [ + 'train', + 'valid', + 'test', + ], "mode should be 'train', 'valid' or 'test', but got {}".format(mode) if backend is None: backend = paddle.vision.get_image_backend() if backend not in ['pil', 'cv2']: raise ValueError( "Expected backend are one of ['pil', 'cv2'], but got {}".format( - backend)) + backend + ) + ) self.backend = backend self.flag = MODE_FLAG_MAP[mode.lower()] self.data_file = data_file if self.data_file is None: - assert download, "data_file is not set and downloading automatically is disabled" - self.data_file = _check_exists_and_download(data_file, VOC_URL, - VOC_MD5, CACHE_DIR, - download) + assert ( + download + ), "data_file is not set and downloading automatically is disabled" + self.data_file = _check_exists_and_download( + data_file, VOC_URL, VOC_MD5, CACHE_DIR, download + ) self.transform = transform # read dataset into memory diff --git a/python/paddle/vision/image.py b/python/paddle/vision/image.py index c30b99799c0971a8a28ee351cc7e684d6bcc7c67..eda914d939cd35133f341d8a7165dd757c70a996 100644 --- a/python/paddle/vision/image.py +++ b/python/paddle/vision/image.py @@ -82,8 +82,10 @@ def set_image_backend(backend): global _image_backend if backend not in ['pil', 'cv2', 'tensor']: raise ValueError( - "Expected backend are one of ['pil', 'cv2', 'tensor'], but got {}". - format(backend)) + "Expected backend are one of ['pil', 'cv2', 'tensor'], but got {}".format( + backend + ) + ) _image_backend = backend @@ -152,8 +154,10 @@ def image_load(path, backend=None): backend = _image_backend if backend not in ['pil', 'cv2', 'tensor']: raise ValueError( - "Expected backend are one of ['pil', 'cv2', 'tensor'], but got {}". - format(backend)) + "Expected backend are one of ['pil', 'cv2', 'tensor'], but got {}".format( + backend + ) + ) if backend == 'pil': return Image.open(path) diff --git a/python/paddle/vision/models/__init__.py b/python/paddle/vision/models/__init__.py index 72bb6ee8e8d5b5d7e018be37802da538f38bc88d..08f559bd440c9bdb435ee5c94bbb6f7c3f3385be 100644 --- a/python/paddle/vision/models/__init__.py +++ b/python/paddle/vision/models/__init__.py @@ -1,16 +1,16 @@ # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. # -#Licensed under the Apache License, Version 2.0 (the "License"); -#you may not use this file except in compliance with the License. -#You may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -#Unless required by applicable law or agreed to in writing, software -#distributed under the License is distributed on an "AS IS" BASIS, -#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#See the License for the specific language governing permissions and -#limitations under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from .resnet import ResNet # noqa: F401 from .resnet import resnet18 # noqa: F401 @@ -64,18 +64,56 @@ from .shufflenetv2 import shufflenet_v2_x1_5 # noqa: F401 from .shufflenetv2 import shufflenet_v2_x2_0 # noqa: F401 from .shufflenetv2 import shufflenet_v2_swish # noqa: F401 -__all__ = [ #noqa - 'ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152', - 'resnext50_32x4d', 'resnext50_64x4d', 'resnext101_32x4d', - 'resnext101_64x4d', 'resnext152_32x4d', 'resnext152_64x4d', - 'wide_resnet50_2', 'wide_resnet101_2', 'VGG', 'vgg11', 'vgg13', 'vgg16', - 'vgg19', 'MobileNetV1', 'mobilenet_v1', 'MobileNetV2', 'mobilenet_v2', - 'MobileNetV3Small', 'MobileNetV3Large', 'mobilenet_v3_small', - 'mobilenet_v3_large', 'LeNet', 'DenseNet', 'densenet121', 'densenet161', - 'densenet169', 'densenet201', 'densenet264', 'AlexNet', 'alexnet', - 'InceptionV3', 'inception_v3', 'SqueezeNet', 'squeezenet1_0', - 'squeezenet1_1', 'GoogLeNet', 'googlenet', 'ShuffleNetV2', - 'shufflenet_v2_x0_25', 'shufflenet_v2_x0_33', 'shufflenet_v2_x0_5', - 'shufflenet_v2_x1_0', 'shufflenet_v2_x1_5', 'shufflenet_v2_x2_0', - 'shufflenet_v2_swish' +__all__ = [ # noqa + 'ResNet', + 'resnet18', + 'resnet34', + 'resnet50', + 'resnet101', + 'resnet152', + 'resnext50_32x4d', + 'resnext50_64x4d', + 'resnext101_32x4d', + 'resnext101_64x4d', + 'resnext152_32x4d', + 'resnext152_64x4d', + 'wide_resnet50_2', + 'wide_resnet101_2', + 'VGG', + 'vgg11', + 'vgg13', + 'vgg16', + 'vgg19', + 'MobileNetV1', + 'mobilenet_v1', + 'MobileNetV2', + 'mobilenet_v2', + 'MobileNetV3Small', + 'MobileNetV3Large', + 'mobilenet_v3_small', + 'mobilenet_v3_large', + 'LeNet', + 'DenseNet', + 'densenet121', + 'densenet161', + 'densenet169', + 'densenet201', + 'densenet264', + 'AlexNet', + 'alexnet', + 'InceptionV3', + 'inception_v3', + 'SqueezeNet', + 'squeezenet1_0', + 'squeezenet1_1', + 'GoogLeNet', + 'googlenet', + 'ShuffleNetV2', + 'shufflenet_v2_x0_25', + 'shufflenet_v2_x0_33', + 'shufflenet_v2_x0_5', + 'shufflenet_v2_x1_0', + 'shufflenet_v2_x1_5', + 'shufflenet_v2_x2_0', + 'shufflenet_v2_swish', ] diff --git a/python/paddle/vision/models/alexnet.py b/python/paddle/vision/models/alexnet.py index 634125ba612c140db20f87cd62c675fb2a3cc153..c0d1e0299389c39bf5ff85e9bca639a4b7e1502c 100644 --- a/python/paddle/vision/models/alexnet.py +++ b/python/paddle/vision/models/alexnet.py @@ -34,16 +34,17 @@ __all__ = [] class ConvPoolLayer(nn.Layer): - - def __init__(self, - input_channels, - output_channels, - filter_size, - stride, - padding, - stdv, - groups=1, - act=None): + def __init__( + self, + input_channels, + output_channels, + filter_size, + stride, + padding, + stdv, + groups=1, + act=None, + ): super(ConvPoolLayer, self).__init__() self.relu = ReLU() if act == "relu" else None @@ -56,7 +57,8 @@ class ConvPoolLayer(nn.Layer): padding=padding, groups=groups, weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)), - bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv))) + bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)), + ) self._pool = MaxPool2D(kernel_size=3, stride=2, padding=0) def forward(self, inputs): @@ -109,7 +111,8 @@ class AlexNet(nn.Layer): stride=1, padding=1, weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)), - bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv))) + bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)), + ) stdv = 1.0 / math.sqrt(384 * 3 * 3) self._conv4 = Conv2D( 384, @@ -118,7 +121,8 @@ class AlexNet(nn.Layer): stride=1, padding=1, weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)), - bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv))) + bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)), + ) stdv = 1.0 / math.sqrt(256 * 3 * 3) self._conv5 = ConvPoolLayer(256, 256, 3, 1, 1, stdv, act="relu") @@ -129,19 +133,22 @@ class AlexNet(nn.Layer): in_features=256 * 6 * 6, out_features=4096, weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)), - bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv))) + bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)), + ) self._drop2 = Dropout(p=0.5, mode="downscale_in_infer") self._fc7 = Linear( in_features=4096, out_features=4096, weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)), - bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv))) + bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)), + ) self._fc8 = Linear( in_features=4096, out_features=num_classes, weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)), - bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv))) + bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)), + ) def forward(self, inputs): x = self._conv1(inputs) @@ -169,10 +176,14 @@ def _alexnet(arch, pretrained, **kwargs): model = AlexNet(**kwargs) if pretrained: - assert arch in model_urls, "{} model do not have a pretrained model now, you should set pretrained=False".format( - arch) - weight_path = get_weights_path_from_url(model_urls[arch][0], - model_urls[arch][1]) + assert ( + arch in model_urls + ), "{} model do not have a pretrained model now, you should set pretrained=False".format( + arch + ) + weight_path = get_weights_path_from_url( + model_urls[arch][0], model_urls[arch][1] + ) param = paddle.load(weight_path) model.load_dict(param) diff --git a/python/paddle/vision/models/densenet.py b/python/paddle/vision/models/densenet.py index 309f5424cebb1462989151a1f20c455c5f99ecbb..89716701443206b14092b5a91c73da70ff5e072d 100644 --- a/python/paddle/vision/models/densenet.py +++ b/python/paddle/vision/models/densenet.py @@ -25,45 +25,53 @@ from paddle.utils.download import get_weights_path_from_url __all__ = [] model_urls = { - 'densenet121': - ('https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet121_pretrained.pdparams', - 'db1b239ed80a905290fd8b01d3af08e4'), - 'densenet161': - ('https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet161_pretrained.pdparams', - '62158869cb315098bd25ddbfd308a853'), - 'densenet169': - ('https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet169_pretrained.pdparams', - '82cc7c635c3f19098c748850efb2d796'), - 'densenet201': - ('https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet201_pretrained.pdparams', - '16ca29565a7712329cf9e36e02caaf58'), - 'densenet264': - ('https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet264_pretrained.pdparams', - '3270ce516b85370bba88cfdd9f60bff4'), + 'densenet121': ( + 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet121_pretrained.pdparams', + 'db1b239ed80a905290fd8b01d3af08e4', + ), + 'densenet161': ( + 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet161_pretrained.pdparams', + '62158869cb315098bd25ddbfd308a853', + ), + 'densenet169': ( + 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet169_pretrained.pdparams', + '82cc7c635c3f19098c748850efb2d796', + ), + 'densenet201': ( + 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet201_pretrained.pdparams', + '16ca29565a7712329cf9e36e02caaf58', + ), + 'densenet264': ( + 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/DenseNet264_pretrained.pdparams', + '3270ce516b85370bba88cfdd9f60bff4', + ), } class BNACConvLayer(nn.Layer): - - def __init__(self, - num_channels, - num_filters, - filter_size, - stride=1, - pad=0, - groups=1, - act="relu"): + def __init__( + self, + num_channels, + num_filters, + filter_size, + stride=1, + pad=0, + groups=1, + act="relu", + ): super(BNACConvLayer, self).__init__() self._batch_norm = BatchNorm(num_channels, act=act) - self._conv = Conv2D(in_channels=num_channels, - out_channels=num_filters, - kernel_size=filter_size, - stride=stride, - padding=pad, - groups=groups, - weight_attr=ParamAttr(), - bias_attr=False) + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=pad, + groups=groups, + weight_attr=ParamAttr(), + bias_attr=False, + ) def forward(self, input): y = self._batch_norm(input) @@ -72,22 +80,25 @@ class BNACConvLayer(nn.Layer): class DenseLayer(nn.Layer): - def __init__(self, num_channels, growth_rate, bn_size, dropout): super(DenseLayer, self).__init__() self.dropout = dropout - self.bn_ac_func1 = BNACConvLayer(num_channels=num_channels, - num_filters=bn_size * growth_rate, - filter_size=1, - pad=0, - stride=1) - - self.bn_ac_func2 = BNACConvLayer(num_channels=bn_size * growth_rate, - num_filters=growth_rate, - filter_size=3, - pad=1, - stride=1) + self.bn_ac_func1 = BNACConvLayer( + num_channels=num_channels, + num_filters=bn_size * growth_rate, + filter_size=1, + pad=0, + stride=1, + ) + + self.bn_ac_func2 = BNACConvLayer( + num_channels=bn_size * growth_rate, + num_filters=growth_rate, + filter_size=3, + pad=1, + stride=1, + ) if dropout: self.dropout_func = Dropout(p=dropout, mode="downscale_in_infer") @@ -102,14 +113,9 @@ class DenseLayer(nn.Layer): class DenseBlock(nn.Layer): - - def __init__(self, - num_channels, - num_layers, - bn_size, - growth_rate, - dropout, - name=None): + def __init__( + self, num_channels, num_layers, bn_size, growth_rate, dropout, name=None + ): super(DenseBlock, self).__init__() self.dropout = dropout self.dense_layer_func = [] @@ -119,10 +125,14 @@ class DenseBlock(nn.Layer): self.dense_layer_func.append( self.add_sublayer( "{}_{}".format(name, layer + 1), - DenseLayer(num_channels=pre_channel, - growth_rate=growth_rate, - bn_size=bn_size, - dropout=dropout))) + DenseLayer( + num_channels=pre_channel, + growth_rate=growth_rate, + bn_size=bn_size, + dropout=dropout, + ), + ) + ) pre_channel = pre_channel + growth_rate def forward(self, input): @@ -133,15 +143,16 @@ class DenseBlock(nn.Layer): class TransitionLayer(nn.Layer): - def __init__(self, num_channels, num_output_features): super(TransitionLayer, self).__init__() - self.conv_ac_func = BNACConvLayer(num_channels=num_channels, - num_filters=num_output_features, - filter_size=1, - pad=0, - stride=1) + self.conv_ac_func = BNACConvLayer( + num_channels=num_channels, + num_filters=num_output_features, + filter_size=1, + pad=0, + stride=1, + ) self.pool2d_avg = AvgPool2D(kernel_size=2, stride=2, padding=0) @@ -152,25 +163,28 @@ class TransitionLayer(nn.Layer): class ConvBNLayer(nn.Layer): - - def __init__(self, - num_channels, - num_filters, - filter_size, - stride=1, - pad=0, - groups=1, - act="relu"): + def __init__( + self, + num_channels, + num_filters, + filter_size, + stride=1, + pad=0, + groups=1, + act="relu", + ): super(ConvBNLayer, self).__init__() - self._conv = Conv2D(in_channels=num_channels, - out_channels=num_filters, - kernel_size=filter_size, - stride=stride, - padding=pad, - groups=groups, - weight_attr=ParamAttr(), - bias_attr=False) + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=pad, + groups=groups, + weight_attr=ParamAttr(), + bias_attr=False, + ) self._batch_norm = BatchNorm(num_filters, act=act) def forward(self, input): @@ -210,34 +224,40 @@ class DenseNet(nn.Layer): # [1, 1000] """ - def __init__(self, - layers=121, - bn_size=4, - dropout=0., - num_classes=1000, - with_pool=True): + def __init__( + self, + layers=121, + bn_size=4, + dropout=0.0, + num_classes=1000, + with_pool=True, + ): super(DenseNet, self).__init__() self.num_classes = num_classes self.with_pool = with_pool supported_layers = [121, 161, 169, 201, 264] - assert layers in supported_layers, \ - "supported layers are {} but input layer is {}".format( - supported_layers, layers) + assert ( + layers in supported_layers + ), "supported layers are {} but input layer is {}".format( + supported_layers, layers + ) densenet_spec = { 121: (64, 32, [6, 12, 24, 16]), 161: (96, 48, [6, 12, 36, 24]), 169: (64, 32, [6, 12, 32, 32]), 201: (64, 32, [6, 12, 48, 32]), - 264: (64, 32, [6, 12, 64, 48]) + 264: (64, 32, [6, 12, 64, 48]), } num_init_features, growth_rate, block_config = densenet_spec[layers] - self.conv1_func = ConvBNLayer(num_channels=3, - num_filters=num_init_features, - filter_size=7, - stride=2, - pad=3, - act='relu') + self.conv1_func = ConvBNLayer( + num_channels=3, + num_filters=num_init_features, + filter_size=7, + stride=2, + pad=3, + act='relu', + ) self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1) self.block_config = block_config self.dense_block_func_list = [] @@ -248,12 +268,16 @@ class DenseNet(nn.Layer): self.dense_block_func_list.append( self.add_sublayer( "db_conv_{}".format(i + 2), - DenseBlock(num_channels=pre_num_channels, - num_layers=num_layers, - bn_size=bn_size, - growth_rate=growth_rate, - dropout=dropout, - name='conv' + str(i + 2)))) + DenseBlock( + num_channels=pre_num_channels, + num_layers=num_layers, + bn_size=bn_size, + growth_rate=growth_rate, + dropout=dropout, + name='conv' + str(i + 2), + ), + ) + ) num_features = num_features + num_layers * growth_rate pre_num_channels = num_features @@ -262,8 +286,12 @@ class DenseNet(nn.Layer): self.transition_func_list.append( self.add_sublayer( "tr_conv{}_blk".format(i + 2), - TransitionLayer(num_channels=pre_num_channels, - num_output_features=num_features // 2))) + TransitionLayer( + num_channels=pre_num_channels, + num_output_features=num_features // 2, + ), + ) + ) pre_num_channels = num_features // 2 num_features = num_features // 2 @@ -277,7 +305,8 @@ class DenseNet(nn.Layer): num_features, num_classes, weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)), - bias_attr=ParamAttr()) + bias_attr=ParamAttr(), + ) def forward(self, input): conv = self.conv1_func(input) @@ -303,10 +332,14 @@ class DenseNet(nn.Layer): def _densenet(arch, layers, pretrained, **kwargs): model = DenseNet(layers=layers, **kwargs) if pretrained: - assert arch in model_urls, "{} model do not have a pretrained model now, you should set pretrained=False".format( - arch) - weight_path = get_weights_path_from_url(model_urls[arch][0], - model_urls[arch][1]) + assert ( + arch in model_urls + ), "{} model do not have a pretrained model now, you should set pretrained=False".format( + arch + ) + weight_path = get_weights_path_from_url( + model_urls[arch][0], model_urls[arch][1] + ) param = paddle.load(weight_path) model.set_dict(param) diff --git a/python/paddle/vision/models/googlenet.py b/python/paddle/vision/models/googlenet.py index 4eb02f1de3c88429941ff500ce69126efadedb61..8d6409dc42f80a7b3325ea99c2ad876d55dd9322 100644 --- a/python/paddle/vision/models/googlenet.py +++ b/python/paddle/vision/models/googlenet.py @@ -25,35 +25,34 @@ from paddle.utils.download import get_weights_path_from_url __all__ = [] model_urls = { - "googlenet": - ("https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GoogLeNet_pretrained.pdparams", - "80c06f038e905c53ab32c40eca6e26ae") + "googlenet": ( + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/GoogLeNet_pretrained.pdparams", + "80c06f038e905c53ab32c40eca6e26ae", + ) } def xavier(channels, filter_size): - stdv = (3.0 / (filter_size**2 * channels))**0.5 + stdv = (3.0 / (filter_size**2 * channels)) ** 0.5 param_attr = ParamAttr(initializer=Uniform(-stdv, stdv)) return param_attr class ConvLayer(nn.Layer): - - def __init__(self, - num_channels, - num_filters, - filter_size, - stride=1, - groups=1): + def __init__( + self, num_channels, num_filters, filter_size, stride=1, groups=1 + ): super(ConvLayer, self).__init__() - self._conv = Conv2D(in_channels=num_channels, - out_channels=num_filters, - kernel_size=filter_size, - stride=stride, - padding=(filter_size - 1) // 2, - groups=groups, - bias_attr=False) + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + bias_attr=False, + ) def forward(self, inputs): y = self._conv(inputs) @@ -61,9 +60,17 @@ class ConvLayer(nn.Layer): class Inception(nn.Layer): - - def __init__(self, input_channels, output_channels, filter1, filter3R, - filter3, filter5R, filter5, proj): + def __init__( + self, + input_channels, + output_channels, + filter1, + filter3R, + filter3, + filter5R, + filter5, + proj, + ): super(Inception, self).__init__() self._conv1 = ConvLayer(input_channels, filter1, 1) @@ -153,9 +160,9 @@ class GoogLeNet(nn.Layer): if num_classes > 0: # out self._drop = Dropout(p=0.4, mode="downscale_in_infer") - self._fc_out = Linear(1024, - num_classes, - weight_attr=xavier(1024, 1)) + self._fc_out = Linear( + 1024, num_classes, weight_attr=xavier(1024, 1) + ) # out1 self._conv_o1 = ConvLayer(512, 128, 1) @@ -254,9 +261,11 @@ def googlenet(pretrained=False, **kwargs): assert ( arch in model_urls ), "{} model do not have a pretrained model now, you should set pretrained=False".format( - arch) - weight_path = get_weights_path_from_url(model_urls[arch][0], - model_urls[arch][1]) + arch + ) + weight_path = get_weights_path_from_url( + model_urls[arch][0], model_urls[arch][1] + ) param = paddle.load(weight_path) model.set_dict(param) diff --git a/python/paddle/vision/models/inceptionv3.py b/python/paddle/vision/models/inceptionv3.py index a4baa2e9a77627178eff72ae582896a44325951e..24c55bdd7578f833b3d3e334238298f5d4e6881a 100644 --- a/python/paddle/vision/models/inceptionv3.py +++ b/python/paddle/vision/models/inceptionv3.py @@ -26,45 +26,55 @@ from ..ops import ConvNormActivation __all__ = [] model_urls = { - "inception_v3": - ("https://paddle-hapi.bj.bcebos.com/models/inception_v3.pdparams", - "649a4547c3243e8b59c656f41fe330b8") + "inception_v3": ( + "https://paddle-hapi.bj.bcebos.com/models/inception_v3.pdparams", + "649a4547c3243e8b59c656f41fe330b8", + ) } class InceptionStem(nn.Layer): - def __init__(self): super().__init__() - self.conv_1a_3x3 = ConvNormActivation(in_channels=3, - out_channels=32, - kernel_size=3, - stride=2, - padding=0, - activation_layer=nn.ReLU) - self.conv_2a_3x3 = ConvNormActivation(in_channels=32, - out_channels=32, - kernel_size=3, - stride=1, - padding=0, - activation_layer=nn.ReLU) - self.conv_2b_3x3 = ConvNormActivation(in_channels=32, - out_channels=64, - kernel_size=3, - padding=1, - activation_layer=nn.ReLU) + self.conv_1a_3x3 = ConvNormActivation( + in_channels=3, + out_channels=32, + kernel_size=3, + stride=2, + padding=0, + activation_layer=nn.ReLU, + ) + self.conv_2a_3x3 = ConvNormActivation( + in_channels=32, + out_channels=32, + kernel_size=3, + stride=1, + padding=0, + activation_layer=nn.ReLU, + ) + self.conv_2b_3x3 = ConvNormActivation( + in_channels=32, + out_channels=64, + kernel_size=3, + padding=1, + activation_layer=nn.ReLU, + ) self.max_pool = MaxPool2D(kernel_size=3, stride=2, padding=0) - self.conv_3b_1x1 = ConvNormActivation(in_channels=64, - out_channels=80, - kernel_size=1, - padding=0, - activation_layer=nn.ReLU) - self.conv_4a_3x3 = ConvNormActivation(in_channels=80, - out_channels=192, - kernel_size=3, - padding=0, - activation_layer=nn.ReLU) + self.conv_3b_1x1 = ConvNormActivation( + in_channels=64, + out_channels=80, + kernel_size=1, + padding=0, + activation_layer=nn.ReLU, + ) + self.conv_4a_3x3 = ConvNormActivation( + in_channels=80, + out_channels=192, + kernel_size=3, + padding=0, + activation_layer=nn.ReLU, + ) def forward(self, x): x = self.conv_1a_3x3(x) @@ -78,51 +88,63 @@ class InceptionStem(nn.Layer): class InceptionA(nn.Layer): - def __init__(self, num_channels, pool_features): super().__init__() - self.branch1x1 = ConvNormActivation(in_channels=num_channels, - out_channels=64, - kernel_size=1, - padding=0, - activation_layer=nn.ReLU) - - self.branch5x5_1 = ConvNormActivation(in_channels=num_channels, - out_channels=48, - kernel_size=1, - padding=0, - activation_layer=nn.ReLU) - self.branch5x5_2 = ConvNormActivation(in_channels=48, - out_channels=64, - kernel_size=5, - padding=2, - activation_layer=nn.ReLU) - - self.branch3x3dbl_1 = ConvNormActivation(in_channels=num_channels, - out_channels=64, - kernel_size=1, - padding=0, - activation_layer=nn.ReLU) - self.branch3x3dbl_2 = ConvNormActivation(in_channels=64, - out_channels=96, - kernel_size=3, - padding=1, - activation_layer=nn.ReLU) - self.branch3x3dbl_3 = ConvNormActivation(in_channels=96, - out_channels=96, - kernel_size=3, - padding=1, - activation_layer=nn.ReLU) - - self.branch_pool = AvgPool2D(kernel_size=3, - stride=1, - padding=1, - exclusive=False) - self.branch_pool_conv = ConvNormActivation(in_channels=num_channels, - out_channels=pool_features, - kernel_size=1, - padding=0, - activation_layer=nn.ReLU) + self.branch1x1 = ConvNormActivation( + in_channels=num_channels, + out_channels=64, + kernel_size=1, + padding=0, + activation_layer=nn.ReLU, + ) + + self.branch5x5_1 = ConvNormActivation( + in_channels=num_channels, + out_channels=48, + kernel_size=1, + padding=0, + activation_layer=nn.ReLU, + ) + self.branch5x5_2 = ConvNormActivation( + in_channels=48, + out_channels=64, + kernel_size=5, + padding=2, + activation_layer=nn.ReLU, + ) + + self.branch3x3dbl_1 = ConvNormActivation( + in_channels=num_channels, + out_channels=64, + kernel_size=1, + padding=0, + activation_layer=nn.ReLU, + ) + self.branch3x3dbl_2 = ConvNormActivation( + in_channels=64, + out_channels=96, + kernel_size=3, + padding=1, + activation_layer=nn.ReLU, + ) + self.branch3x3dbl_3 = ConvNormActivation( + in_channels=96, + out_channels=96, + kernel_size=3, + padding=1, + activation_layer=nn.ReLU, + ) + + self.branch_pool = AvgPool2D( + kernel_size=3, stride=1, padding=1, exclusive=False + ) + self.branch_pool_conv = ConvNormActivation( + in_channels=num_channels, + out_channels=pool_features, + kernel_size=1, + padding=0, + activation_layer=nn.ReLU, + ) def forward(self, x): branch1x1 = self.branch1x1(x) @@ -135,38 +157,46 @@ class InceptionA(nn.Layer): branch_pool = self.branch_pool(x) branch_pool = self.branch_pool_conv(branch_pool) - x = paddle.concat([branch1x1, branch5x5, branch3x3dbl, branch_pool], - axis=1) + x = paddle.concat( + [branch1x1, branch5x5, branch3x3dbl, branch_pool], axis=1 + ) return x class InceptionB(nn.Layer): - def __init__(self, num_channels): super().__init__() - self.branch3x3 = ConvNormActivation(in_channels=num_channels, - out_channels=384, - kernel_size=3, - stride=2, - padding=0, - activation_layer=nn.ReLU) - - self.branch3x3dbl_1 = ConvNormActivation(in_channels=num_channels, - out_channels=64, - kernel_size=1, - padding=0, - activation_layer=nn.ReLU) - self.branch3x3dbl_2 = ConvNormActivation(in_channels=64, - out_channels=96, - kernel_size=3, - padding=1, - activation_layer=nn.ReLU) - self.branch3x3dbl_3 = ConvNormActivation(in_channels=96, - out_channels=96, - kernel_size=3, - stride=2, - padding=0, - activation_layer=nn.ReLU) + self.branch3x3 = ConvNormActivation( + in_channels=num_channels, + out_channels=384, + kernel_size=3, + stride=2, + padding=0, + activation_layer=nn.ReLU, + ) + + self.branch3x3dbl_1 = ConvNormActivation( + in_channels=num_channels, + out_channels=64, + kernel_size=1, + padding=0, + activation_layer=nn.ReLU, + ) + self.branch3x3dbl_2 = ConvNormActivation( + in_channels=64, + out_channels=96, + kernel_size=3, + padding=1, + activation_layer=nn.ReLU, + ) + self.branch3x3dbl_3 = ConvNormActivation( + in_channels=96, + out_channels=96, + kernel_size=3, + stride=2, + padding=0, + activation_layer=nn.ReLU, + ) self.branch_pool = MaxPool2D(kernel_size=3, stride=2) @@ -185,69 +215,87 @@ class InceptionB(nn.Layer): class InceptionC(nn.Layer): - def __init__(self, num_channels, channels_7x7): super().__init__() - self.branch1x1 = ConvNormActivation(in_channels=num_channels, - out_channels=192, - kernel_size=1, - padding=0, - activation_layer=nn.ReLU) - - self.branch7x7_1 = ConvNormActivation(in_channels=num_channels, - out_channels=channels_7x7, - kernel_size=1, - stride=1, - padding=0, - activation_layer=nn.ReLU) - self.branch7x7_2 = ConvNormActivation(in_channels=channels_7x7, - out_channels=channels_7x7, - kernel_size=(1, 7), - stride=1, - padding=(0, 3), - activation_layer=nn.ReLU) - self.branch7x7_3 = ConvNormActivation(in_channels=channels_7x7, - out_channels=192, - kernel_size=(7, 1), - stride=1, - padding=(3, 0), - activation_layer=nn.ReLU) - - self.branch7x7dbl_1 = ConvNormActivation(in_channels=num_channels, - out_channels=channels_7x7, - kernel_size=1, - padding=0, - activation_layer=nn.ReLU) - self.branch7x7dbl_2 = ConvNormActivation(in_channels=channels_7x7, - out_channels=channels_7x7, - kernel_size=(7, 1), - padding=(3, 0), - activation_layer=nn.ReLU) - self.branch7x7dbl_3 = ConvNormActivation(in_channels=channels_7x7, - out_channels=channels_7x7, - kernel_size=(1, 7), - padding=(0, 3), - activation_layer=nn.ReLU) - self.branch7x7dbl_4 = ConvNormActivation(in_channels=channels_7x7, - out_channels=channels_7x7, - kernel_size=(7, 1), - padding=(3, 0), - activation_layer=nn.ReLU) - self.branch7x7dbl_5 = ConvNormActivation(in_channels=channels_7x7, - out_channels=192, - kernel_size=(1, 7), - padding=(0, 3), - activation_layer=nn.ReLU) - - self.branch_pool = AvgPool2D(kernel_size=3, - stride=1, - padding=1, - exclusive=False) - self.branch_pool_conv = ConvNormActivation(in_channels=num_channels, - out_channels=192, - kernel_size=1, - padding=0, - activation_layer=nn.ReLU) + self.branch1x1 = ConvNormActivation( + in_channels=num_channels, + out_channels=192, + kernel_size=1, + padding=0, + activation_layer=nn.ReLU, + ) + + self.branch7x7_1 = ConvNormActivation( + in_channels=num_channels, + out_channels=channels_7x7, + kernel_size=1, + stride=1, + padding=0, + activation_layer=nn.ReLU, + ) + self.branch7x7_2 = ConvNormActivation( + in_channels=channels_7x7, + out_channels=channels_7x7, + kernel_size=(1, 7), + stride=1, + padding=(0, 3), + activation_layer=nn.ReLU, + ) + self.branch7x7_3 = ConvNormActivation( + in_channels=channels_7x7, + out_channels=192, + kernel_size=(7, 1), + stride=1, + padding=(3, 0), + activation_layer=nn.ReLU, + ) + + self.branch7x7dbl_1 = ConvNormActivation( + in_channels=num_channels, + out_channels=channels_7x7, + kernel_size=1, + padding=0, + activation_layer=nn.ReLU, + ) + self.branch7x7dbl_2 = ConvNormActivation( + in_channels=channels_7x7, + out_channels=channels_7x7, + kernel_size=(7, 1), + padding=(3, 0), + activation_layer=nn.ReLU, + ) + self.branch7x7dbl_3 = ConvNormActivation( + in_channels=channels_7x7, + out_channels=channels_7x7, + kernel_size=(1, 7), + padding=(0, 3), + activation_layer=nn.ReLU, + ) + self.branch7x7dbl_4 = ConvNormActivation( + in_channels=channels_7x7, + out_channels=channels_7x7, + kernel_size=(7, 1), + padding=(3, 0), + activation_layer=nn.ReLU, + ) + self.branch7x7dbl_5 = ConvNormActivation( + in_channels=channels_7x7, + out_channels=192, + kernel_size=(1, 7), + padding=(0, 3), + activation_layer=nn.ReLU, + ) + + self.branch_pool = AvgPool2D( + kernel_size=3, stride=1, padding=1, exclusive=False + ) + self.branch_pool_conv = ConvNormActivation( + in_channels=num_channels, + out_channels=192, + kernel_size=1, + padding=0, + activation_layer=nn.ReLU, + ) def forward(self, x): branch1x1 = self.branch1x1(x) @@ -265,49 +313,61 @@ class InceptionC(nn.Layer): branch_pool = self.branch_pool(x) branch_pool = self.branch_pool_conv(branch_pool) - x = paddle.concat([branch1x1, branch7x7, branch7x7dbl, branch_pool], - axis=1) + x = paddle.concat( + [branch1x1, branch7x7, branch7x7dbl, branch_pool], axis=1 + ) return x class InceptionD(nn.Layer): - def __init__(self, num_channels): super().__init__() - self.branch3x3_1 = ConvNormActivation(in_channels=num_channels, - out_channels=192, - kernel_size=1, - padding=0, - activation_layer=nn.ReLU) - self.branch3x3_2 = ConvNormActivation(in_channels=192, - out_channels=320, - kernel_size=3, - stride=2, - padding=0, - activation_layer=nn.ReLU) - - self.branch7x7x3_1 = ConvNormActivation(in_channels=num_channels, - out_channels=192, - kernel_size=1, - padding=0, - activation_layer=nn.ReLU) - self.branch7x7x3_2 = ConvNormActivation(in_channels=192, - out_channels=192, - kernel_size=(1, 7), - padding=(0, 3), - activation_layer=nn.ReLU) - self.branch7x7x3_3 = ConvNormActivation(in_channels=192, - out_channels=192, - kernel_size=(7, 1), - padding=(3, 0), - activation_layer=nn.ReLU) - self.branch7x7x3_4 = ConvNormActivation(in_channels=192, - out_channels=192, - kernel_size=3, - stride=2, - padding=0, - activation_layer=nn.ReLU) + self.branch3x3_1 = ConvNormActivation( + in_channels=num_channels, + out_channels=192, + kernel_size=1, + padding=0, + activation_layer=nn.ReLU, + ) + self.branch3x3_2 = ConvNormActivation( + in_channels=192, + out_channels=320, + kernel_size=3, + stride=2, + padding=0, + activation_layer=nn.ReLU, + ) + + self.branch7x7x3_1 = ConvNormActivation( + in_channels=num_channels, + out_channels=192, + kernel_size=1, + padding=0, + activation_layer=nn.ReLU, + ) + self.branch7x7x3_2 = ConvNormActivation( + in_channels=192, + out_channels=192, + kernel_size=(1, 7), + padding=(0, 3), + activation_layer=nn.ReLU, + ) + self.branch7x7x3_3 = ConvNormActivation( + in_channels=192, + out_channels=192, + kernel_size=(7, 1), + padding=(3, 0), + activation_layer=nn.ReLU, + ) + self.branch7x7x3_4 = ConvNormActivation( + in_channels=192, + out_channels=192, + kernel_size=3, + stride=2, + padding=0, + activation_layer=nn.ReLU, + ) self.branch_pool = MaxPool2D(kernel_size=3, stride=2) @@ -327,60 +387,76 @@ class InceptionD(nn.Layer): class InceptionE(nn.Layer): - def __init__(self, num_channels): super().__init__() - self.branch1x1 = ConvNormActivation(in_channels=num_channels, - out_channels=320, - kernel_size=1, - padding=0, - activation_layer=nn.ReLU) - self.branch3x3_1 = ConvNormActivation(in_channels=num_channels, - out_channels=384, - kernel_size=1, - padding=0, - activation_layer=nn.ReLU) - self.branch3x3_2a = ConvNormActivation(in_channels=384, - out_channels=384, - kernel_size=(1, 3), - padding=(0, 1), - activation_layer=nn.ReLU) - self.branch3x3_2b = ConvNormActivation(in_channels=384, - out_channels=384, - kernel_size=(3, 1), - padding=(1, 0), - activation_layer=nn.ReLU) - - self.branch3x3dbl_1 = ConvNormActivation(in_channels=num_channels, - out_channels=448, - kernel_size=1, - padding=0, - activation_layer=nn.ReLU) - self.branch3x3dbl_2 = ConvNormActivation(in_channels=448, - out_channels=384, - kernel_size=3, - padding=1, - activation_layer=nn.ReLU) - self.branch3x3dbl_3a = ConvNormActivation(in_channels=384, - out_channels=384, - kernel_size=(1, 3), - padding=(0, 1), - activation_layer=nn.ReLU) - self.branch3x3dbl_3b = ConvNormActivation(in_channels=384, - out_channels=384, - kernel_size=(3, 1), - padding=(1, 0), - activation_layer=nn.ReLU) - - self.branch_pool = AvgPool2D(kernel_size=3, - stride=1, - padding=1, - exclusive=False) - self.branch_pool_conv = ConvNormActivation(in_channels=num_channels, - out_channels=192, - kernel_size=1, - padding=0, - activation_layer=nn.ReLU) + self.branch1x1 = ConvNormActivation( + in_channels=num_channels, + out_channels=320, + kernel_size=1, + padding=0, + activation_layer=nn.ReLU, + ) + self.branch3x3_1 = ConvNormActivation( + in_channels=num_channels, + out_channels=384, + kernel_size=1, + padding=0, + activation_layer=nn.ReLU, + ) + self.branch3x3_2a = ConvNormActivation( + in_channels=384, + out_channels=384, + kernel_size=(1, 3), + padding=(0, 1), + activation_layer=nn.ReLU, + ) + self.branch3x3_2b = ConvNormActivation( + in_channels=384, + out_channels=384, + kernel_size=(3, 1), + padding=(1, 0), + activation_layer=nn.ReLU, + ) + + self.branch3x3dbl_1 = ConvNormActivation( + in_channels=num_channels, + out_channels=448, + kernel_size=1, + padding=0, + activation_layer=nn.ReLU, + ) + self.branch3x3dbl_2 = ConvNormActivation( + in_channels=448, + out_channels=384, + kernel_size=3, + padding=1, + activation_layer=nn.ReLU, + ) + self.branch3x3dbl_3a = ConvNormActivation( + in_channels=384, + out_channels=384, + kernel_size=(1, 3), + padding=(0, 1), + activation_layer=nn.ReLU, + ) + self.branch3x3dbl_3b = ConvNormActivation( + in_channels=384, + out_channels=384, + kernel_size=(3, 1), + padding=(1, 0), + activation_layer=nn.ReLU, + ) + + self.branch_pool = AvgPool2D( + kernel_size=3, stride=1, padding=1, exclusive=False + ) + self.branch_pool_conv = ConvNormActivation( + in_channels=num_channels, + out_channels=192, + kernel_size=1, + padding=0, + activation_layer=nn.ReLU, + ) def forward(self, x): branch1x1 = self.branch1x1(x) @@ -403,8 +479,9 @@ class InceptionE(nn.Layer): branch_pool = self.branch_pool(x) branch_pool = self.branch_pool_conv(branch_pool) - x = paddle.concat([branch1x1, branch3x3, branch3x3dbl, branch_pool], - axis=1) + x = paddle.concat( + [branch1x1, branch3x3, branch3x3dbl, branch_pool], axis=1 + ) return x @@ -444,7 +521,7 @@ class InceptionV3(nn.Layer): "inception_b": [288], "inception_c": [[768, 768, 768, 768], [128, 160, 160, 192]], "inception_d": [768], - "inception_e": [1280, 2048] + "inception_e": [1280, 2048], } inception_a_list = self.layers_config["inception_a"] @@ -457,8 +534,9 @@ class InceptionV3(nn.Layer): self.inception_block_list = nn.LayerList() for i in range(len(inception_a_list[0])): - inception_a = InceptionA(inception_a_list[0][i], - inception_a_list[1][i]) + inception_a = InceptionA( + inception_a_list[0][i], inception_a_list[1][i] + ) self.inception_block_list.append(inception_a) for i in range(len(inception_b_list)): @@ -466,8 +544,9 @@ class InceptionV3(nn.Layer): self.inception_block_list.append(inception_b) for i in range(len(inception_c_list[0])): - inception_c = InceptionC(inception_c_list[0][i], - inception_c_list[1][i]) + inception_c = InceptionC( + inception_c_list[0][i], inception_c_list[1][i] + ) self.inception_block_list.append(inception_c) for i in range(len(inception_d_list)): @@ -488,7 +567,8 @@ class InceptionV3(nn.Layer): 2048, num_classes, weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)), - bias_attr=ParamAttr()) + bias_attr=ParamAttr(), + ) def forward(self, x): x = self.inception_stem(x) @@ -538,10 +618,14 @@ def inception_v3(pretrained=False, **kwargs): model = InceptionV3(**kwargs) arch = "inception_v3" if pretrained: - assert arch in model_urls, "{} model do not have a pretrained model now, you should set pretrained=False".format( - arch) - weight_path = get_weights_path_from_url(model_urls[arch][0], - model_urls[arch][1]) + assert ( + arch in model_urls + ), "{} model do not have a pretrained model now, you should set pretrained=False".format( + arch + ) + weight_path = get_weights_path_from_url( + model_urls[arch][0], model_urls[arch][1] + ) param = paddle.load(weight_path) model.set_dict(param) diff --git a/python/paddle/vision/models/lenet.py b/python/paddle/vision/models/lenet.py index 44cfed2ef307bde02e62e7d48b790824959b65e5..49c9bd2d68a4f36476664528a81b3b5cb7560e88 100644 --- a/python/paddle/vision/models/lenet.py +++ b/python/paddle/vision/models/lenet.py @@ -1,16 +1,16 @@ # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. # -#Licensed under the Apache License, Version 2.0 (the "License"); -#you may not use this file except in compliance with the License. -#You may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # -#Unless required by applicable law or agreed to in writing, software -#distributed under the License is distributed on an "AS IS" BASIS, -#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#See the License for the specific language governing permissions and -#limitations under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import paddle import paddle.nn as nn @@ -47,14 +47,21 @@ class LeNet(nn.Layer): def __init__(self, num_classes=10): super(LeNet, self).__init__() self.num_classes = num_classes - self.features = nn.Sequential(nn.Conv2D(1, 6, 3, stride=1, padding=1), - nn.ReLU(), nn.MaxPool2D(2, 2), - nn.Conv2D(6, 16, 5, stride=1, padding=0), - nn.ReLU(), nn.MaxPool2D(2, 2)) + self.features = nn.Sequential( + nn.Conv2D(1, 6, 3, stride=1, padding=1), + nn.ReLU(), + nn.MaxPool2D(2, 2), + nn.Conv2D(6, 16, 5, stride=1, padding=0), + nn.ReLU(), + nn.MaxPool2D(2, 2), + ) if num_classes > 0: - self.fc = nn.Sequential(nn.Linear(400, 120), nn.Linear(120, 84), - nn.Linear(84, num_classes)) + self.fc = nn.Sequential( + nn.Linear(400, 120), + nn.Linear(120, 84), + nn.Linear(84, num_classes), + ) def forward(self, inputs): x = self.features(inputs) diff --git a/python/paddle/vision/models/mobilenetv1.py b/python/paddle/vision/models/mobilenetv1.py index 22be953f2c135b210a656c567fe2897aff3ec1f2..78e83304db75bdb744b7ac68dcb31a558057e5da 100644 --- a/python/paddle/vision/models/mobilenetv1.py +++ b/python/paddle/vision/models/mobilenetv1.py @@ -21,31 +21,41 @@ from ..ops import ConvNormActivation __all__ = [] model_urls = { - 'mobilenetv1_1.0': - ('https://paddle-hapi.bj.bcebos.com/models/mobilenetv1_1.0.pdparams', - '3033ab1975b1670bef51545feb65fc45') + 'mobilenetv1_1.0': ( + 'https://paddle-hapi.bj.bcebos.com/models/mobilenetv1_1.0.pdparams', + '3033ab1975b1670bef51545feb65fc45', + ) } class DepthwiseSeparable(nn.Layer): - - def __init__(self, in_channels, out_channels1, out_channels2, num_groups, - stride, scale): + def __init__( + self, + in_channels, + out_channels1, + out_channels2, + num_groups, + stride, + scale, + ): super(DepthwiseSeparable, self).__init__() - self._depthwise_conv = ConvNormActivation(in_channels, - int(out_channels1 * scale), - kernel_size=3, - stride=stride, - padding=1, - groups=int(num_groups * - scale)) - - self._pointwise_conv = ConvNormActivation(int(out_channels1 * scale), - int(out_channels2 * scale), - kernel_size=1, - stride=1, - padding=0) + self._depthwise_conv = ConvNormActivation( + in_channels, + int(out_channels1 * scale), + kernel_size=3, + stride=stride, + padding=1, + groups=int(num_groups * scale), + ) + + self._pointwise_conv = ConvNormActivation( + int(out_channels1 * scale), + int(out_channels2 * scale), + kernel_size=1, + stride=1, + padding=0, + ) def forward(self, x): x = self._depthwise_conv(x) @@ -88,101 +98,130 @@ class MobileNetV1(nn.Layer): self.num_classes = num_classes self.with_pool = with_pool - self.conv1 = ConvNormActivation(in_channels=3, - out_channels=int(32 * scale), - kernel_size=3, - stride=2, - padding=1) - - dws21 = self.add_sublayer(sublayer=DepthwiseSeparable(in_channels=int( - 32 * scale), - out_channels1=32, - out_channels2=64, - num_groups=32, - stride=1, - scale=scale), - name="conv2_1") + self.conv1 = ConvNormActivation( + in_channels=3, + out_channels=int(32 * scale), + kernel_size=3, + stride=2, + padding=1, + ) + + dws21 = self.add_sublayer( + sublayer=DepthwiseSeparable( + in_channels=int(32 * scale), + out_channels1=32, + out_channels2=64, + num_groups=32, + stride=1, + scale=scale, + ), + name="conv2_1", + ) self.dwsl.append(dws21) - dws22 = self.add_sublayer(sublayer=DepthwiseSeparable(in_channels=int( - 64 * scale), - out_channels1=64, - out_channels2=128, - num_groups=64, - stride=2, - scale=scale), - name="conv2_2") + dws22 = self.add_sublayer( + sublayer=DepthwiseSeparable( + in_channels=int(64 * scale), + out_channels1=64, + out_channels2=128, + num_groups=64, + stride=2, + scale=scale, + ), + name="conv2_2", + ) self.dwsl.append(dws22) - dws31 = self.add_sublayer(sublayer=DepthwiseSeparable(in_channels=int( - 128 * scale), - out_channels1=128, - out_channels2=128, - num_groups=128, - stride=1, - scale=scale), - name="conv3_1") + dws31 = self.add_sublayer( + sublayer=DepthwiseSeparable( + in_channels=int(128 * scale), + out_channels1=128, + out_channels2=128, + num_groups=128, + stride=1, + scale=scale, + ), + name="conv3_1", + ) self.dwsl.append(dws31) - dws32 = self.add_sublayer(sublayer=DepthwiseSeparable(in_channels=int( - 128 * scale), - out_channels1=128, - out_channels2=256, - num_groups=128, - stride=2, - scale=scale), - name="conv3_2") + dws32 = self.add_sublayer( + sublayer=DepthwiseSeparable( + in_channels=int(128 * scale), + out_channels1=128, + out_channels2=256, + num_groups=128, + stride=2, + scale=scale, + ), + name="conv3_2", + ) self.dwsl.append(dws32) - dws41 = self.add_sublayer(sublayer=DepthwiseSeparable(in_channels=int( - 256 * scale), - out_channels1=256, - out_channels2=256, - num_groups=256, - stride=1, - scale=scale), - name="conv4_1") + dws41 = self.add_sublayer( + sublayer=DepthwiseSeparable( + in_channels=int(256 * scale), + out_channels1=256, + out_channels2=256, + num_groups=256, + stride=1, + scale=scale, + ), + name="conv4_1", + ) self.dwsl.append(dws41) - dws42 = self.add_sublayer(sublayer=DepthwiseSeparable(in_channels=int( - 256 * scale), - out_channels1=256, - out_channels2=512, - num_groups=256, - stride=2, - scale=scale), - name="conv4_2") + dws42 = self.add_sublayer( + sublayer=DepthwiseSeparable( + in_channels=int(256 * scale), + out_channels1=256, + out_channels2=512, + num_groups=256, + stride=2, + scale=scale, + ), + name="conv4_2", + ) self.dwsl.append(dws42) for i in range(5): - tmp = self.add_sublayer(sublayer=DepthwiseSeparable( + tmp = self.add_sublayer( + sublayer=DepthwiseSeparable( + in_channels=int(512 * scale), + out_channels1=512, + out_channels2=512, + num_groups=512, + stride=1, + scale=scale, + ), + name="conv5_" + str(i + 1), + ) + self.dwsl.append(tmp) + + dws56 = self.add_sublayer( + sublayer=DepthwiseSeparable( in_channels=int(512 * scale), out_channels1=512, - out_channels2=512, + out_channels2=1024, num_groups=512, - stride=1, - scale=scale), - name="conv5_" + str(i + 1)) - self.dwsl.append(tmp) - - dws56 = self.add_sublayer(sublayer=DepthwiseSeparable( - in_channels=int(512 * scale), - out_channels1=512, - out_channels2=1024, - num_groups=512, - stride=2, - scale=scale), - name="conv5_6") + stride=2, + scale=scale, + ), + name="conv5_6", + ) self.dwsl.append(dws56) - dws6 = self.add_sublayer(sublayer=DepthwiseSeparable(in_channels=int( - 1024 * scale), - out_channels1=1024, - out_channels2=1024, - num_groups=1024, - stride=1, - scale=scale), - name="conv6") + dws6 = self.add_sublayer( + sublayer=DepthwiseSeparable( + in_channels=int(1024 * scale), + out_channels1=1024, + out_channels2=1024, + num_groups=1024, + stride=1, + scale=scale, + ), + name="conv6", + ) self.dwsl.append(dws6) if with_pool: @@ -208,10 +247,14 @@ class MobileNetV1(nn.Layer): def _mobilenet(arch, pretrained=False, **kwargs): model = MobileNetV1(**kwargs) if pretrained: - assert arch in model_urls, "{} model do not have a pretrained model now, you should set pretrained=False".format( - arch) - weight_path = get_weights_path_from_url(model_urls[arch][0], - model_urls[arch][1]) + assert ( + arch in model_urls + ), "{} model do not have a pretrained model now, you should set pretrained=False".format( + arch + ) + weight_path = get_weights_path_from_url( + model_urls[arch][0], model_urls[arch][1] + ) param = paddle.load(weight_path) model.load_dict(param) @@ -253,8 +296,7 @@ def mobilenet_v1(pretrained=False, scale=1.0, **kwargs): print(out.shape) # [1, 1000] """ - model = _mobilenet('mobilenetv1_' + str(scale), - pretrained, - scale=scale, - **kwargs) + model = _mobilenet( + 'mobilenetv1_' + str(scale), pretrained, scale=scale, **kwargs + ) return model diff --git a/python/paddle/vision/models/mobilenetv2.py b/python/paddle/vision/models/mobilenetv2.py index f67ea54fa63d826f596721394cb878c23c11e9f8..43fade9e6dd57f9a5c10a36da3cab07e47e0b7f5 100644 --- a/python/paddle/vision/models/mobilenetv2.py +++ b/python/paddle/vision/models/mobilenetv2.py @@ -22,20 +22,17 @@ from ..ops import ConvNormActivation __all__ = [] model_urls = { - 'mobilenetv2_1.0': - ('https://paddle-hapi.bj.bcebos.com/models/mobilenet_v2_x1.0.pdparams', - '0340af0a901346c8d46f4529882fb63d') + 'mobilenetv2_1.0': ( + 'https://paddle-hapi.bj.bcebos.com/models/mobilenet_v2_x1.0.pdparams', + '0340af0a901346c8d46f4529882fb63d', + ) } class InvertedResidual(nn.Layer): - - def __init__(self, - inp, - oup, - stride, - expand_ratio, - norm_layer=nn.BatchNorm2D): + def __init__( + self, inp, oup, stride, expand_ratio, norm_layer=nn.BatchNorm2D + ): super(InvertedResidual, self).__init__() self.stride = stride assert stride in [1, 2] @@ -46,21 +43,28 @@ class InvertedResidual(nn.Layer): layers = [] if expand_ratio != 1: layers.append( - ConvNormActivation(inp, - hidden_dim, - kernel_size=1, - norm_layer=norm_layer, - activation_layer=nn.ReLU6)) - layers.extend([ - ConvNormActivation(hidden_dim, - hidden_dim, - stride=stride, - groups=hidden_dim, - norm_layer=norm_layer, - activation_layer=nn.ReLU6), - nn.Conv2D(hidden_dim, oup, 1, 1, 0, bias_attr=False), - norm_layer(oup), - ]) + ConvNormActivation( + inp, + hidden_dim, + kernel_size=1, + norm_layer=norm_layer, + activation_layer=nn.ReLU6, + ) + ) + layers.extend( + [ + ConvNormActivation( + hidden_dim, + hidden_dim, + stride=stride, + groups=hidden_dim, + norm_layer=norm_layer, + activation_layer=nn.ReLU6, + ), + nn.Conv2D(hidden_dim, oup, 1, 1, 0, bias_attr=False), + norm_layer(oup), + ] + ) self.conv = nn.Sequential(*layers) def forward(self, x): @@ -119,14 +123,17 @@ class MobileNetV2(nn.Layer): ] input_channel = _make_divisible(input_channel * scale, round_nearest) - self.last_channel = _make_divisible(last_channel * max(1.0, scale), - round_nearest) + self.last_channel = _make_divisible( + last_channel * max(1.0, scale), round_nearest + ) features = [ - ConvNormActivation(3, - input_channel, - stride=2, - norm_layer=norm_layer, - activation_layer=nn.ReLU6) + ConvNormActivation( + 3, + input_channel, + stride=2, + norm_layer=norm_layer, + activation_layer=nn.ReLU6, + ) ] for t, c, n, s in inverted_residual_setting: @@ -134,19 +141,25 @@ class MobileNetV2(nn.Layer): for i in range(n): stride = s if i == 0 else 1 features.append( - block(input_channel, - output_channel, - stride, - expand_ratio=t, - norm_layer=norm_layer)) + block( + input_channel, + output_channel, + stride, + expand_ratio=t, + norm_layer=norm_layer, + ) + ) input_channel = output_channel features.append( - ConvNormActivation(input_channel, - self.last_channel, - kernel_size=1, - norm_layer=norm_layer, - activation_layer=nn.ReLU6)) + ConvNormActivation( + input_channel, + self.last_channel, + kernel_size=1, + norm_layer=norm_layer, + activation_layer=nn.ReLU6, + ) + ) self.features = nn.Sequential(*features) @@ -155,7 +168,8 @@ class MobileNetV2(nn.Layer): if self.num_classes > 0: self.classifier = nn.Sequential( - nn.Dropout(0.2), nn.Linear(self.last_channel, num_classes)) + nn.Dropout(0.2), nn.Linear(self.last_channel, num_classes) + ) def forward(self, x): x = self.features(x) @@ -172,10 +186,14 @@ class MobileNetV2(nn.Layer): def _mobilenet(arch, pretrained=False, **kwargs): model = MobileNetV2(**kwargs) if pretrained: - assert arch in model_urls, "{} model do not have a pretrained model now, you should set pretrained=False".format( - arch) - weight_path = get_weights_path_from_url(model_urls[arch][0], - model_urls[arch][1]) + assert ( + arch in model_urls + ), "{} model do not have a pretrained model now, you should set pretrained=False".format( + arch + ) + weight_path = get_weights_path_from_url( + model_urls[arch][0], model_urls[arch][1] + ) param = paddle.load(weight_path) model.load_dict(param) @@ -217,8 +235,7 @@ def mobilenet_v2(pretrained=False, scale=1.0, **kwargs): print(out.shape) # [1, 1000] """ - model = _mobilenet('mobilenetv2_' + str(scale), - pretrained, - scale=scale, - **kwargs) + model = _mobilenet( + 'mobilenetv2_' + str(scale), pretrained, scale=scale, **kwargs + ) return model diff --git a/python/paddle/vision/models/mobilenetv3.py b/python/paddle/vision/models/mobilenetv3.py index f1bf6d8ae3730629ce601391fe312d403c6aeef2..865f8d0dd3222d29a8f957cf4bff3d95010138f8 100644 --- a/python/paddle/vision/models/mobilenetv3.py +++ b/python/paddle/vision/models/mobilenetv3.py @@ -23,12 +23,14 @@ from ..ops import ConvNormActivation __all__ = [] model_urls = { - "mobilenet_v3_small_x1.0": - ("https://paddle-hapi.bj.bcebos.com/models/mobilenet_v3_small_x1.0.pdparams", - "34fe0e7c1f8b00b2b056ad6788d0590c"), - "mobilenet_v3_large_x1.0": - ("https://paddle-hapi.bj.bcebos.com/models/mobilenet_v3_large_x1.0.pdparams", - "118db5792b4e183b925d8e8e334db3df"), + "mobilenet_v3_small_x1.0": ( + "https://paddle-hapi.bj.bcebos.com/models/mobilenet_v3_small_x1.0.pdparams", + "34fe0e7c1f8b00b2b056ad6788d0590c", + ), + "mobilenet_v3_large_x1.0": ( + "https://paddle-hapi.bj.bcebos.com/models/mobilenet_v3_large_x1.0.pdparams", + "118db5792b4e183b925d8e8e334db3df", + ), } @@ -45,11 +47,13 @@ class SqueezeExcitation(nn.Layer): scale_activation (Callable[..., paddle.nn.Layer]): ``sigma`` activation. Default: ``paddle.nn.Sigmoid`` """ - def __init__(self, - input_channels, - squeeze_channels, - activation=nn.ReLU, - scale_activation=nn.Sigmoid): + def __init__( + self, + input_channels, + squeeze_channels, + activation=nn.ReLU, + scale_activation=nn.Sigmoid, + ): super().__init__() self.avgpool = nn.AdaptiveAvgPool2D(1) self.fc1 = nn.Conv2D(input_channels, squeeze_channels, 1) @@ -70,20 +74,22 @@ class SqueezeExcitation(nn.Layer): class InvertedResidualConfig: - - def __init__(self, - in_channels, - kernel, - expanded_channels, - out_channels, - use_se, - activation, - stride, - scale=1.0): + def __init__( + self, + in_channels, + kernel, + expanded_channels, + out_channels, + use_se, + activation, + stride, + scale=1.0, + ): self.in_channels = self.adjust_channels(in_channels, scale=scale) self.kernel = kernel - self.expanded_channels = self.adjust_channels(expanded_channels, - scale=scale) + self.expanded_channels = self.adjust_channels( + expanded_channels, scale=scale + ) self.out_channels = self.adjust_channels(out_channels, scale=scale) self.use_se = use_se if activation is None: @@ -95,7 +101,9 @@ class InvertedResidualConfig: else: raise RuntimeError( "The activation function is not supported: {}".format( - activation)) + activation + ) + ) self.stride = stride @staticmethod @@ -104,9 +112,17 @@ class InvertedResidualConfig: class InvertedResidual(nn.Layer): - - def __init__(self, in_channels, expanded_channels, out_channels, - filter_size, stride, use_se, activation_layer, norm_layer): + def __init__( + self, + in_channels, + expanded_channels, + out_channels, + filter_size, + stride, + use_se, + activation_layer, + norm_layer, + ): super().__init__() self.use_res_connect = stride == 1 and in_channels == out_channels self.use_se = use_se @@ -120,7 +136,8 @@ class InvertedResidual(nn.Layer): stride=1, padding=0, norm_layer=norm_layer, - activation_layer=activation_layer) + activation_layer=activation_layer, + ) self.bottleneck_conv = ConvNormActivation( in_channels=expanded_channels, @@ -130,21 +147,25 @@ class InvertedResidual(nn.Layer): padding=int((filter_size - 1) // 2), groups=expanded_channels, norm_layer=norm_layer, - activation_layer=activation_layer) + activation_layer=activation_layer, + ) if self.use_se: - self.mid_se = SqueezeExcitation(expanded_channels, - _make_divisible(expanded_channels // - 4), - scale_activation=nn.Hardsigmoid) - - self.linear_conv = ConvNormActivation(in_channels=expanded_channels, - out_channels=out_channels, - kernel_size=1, - stride=1, - padding=0, - norm_layer=norm_layer, - activation_layer=None) + self.mid_se = SqueezeExcitation( + expanded_channels, + _make_divisible(expanded_channels // 4), + scale_activation=nn.Hardsigmoid, + ) + + self.linear_conv = ConvNormActivation( + in_channels=expanded_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + padding=0, + norm_layer=norm_layer, + activation_layer=None, + ) def forward(self, x): identity = x @@ -172,12 +193,9 @@ class MobileNetV3(nn.Layer): with_pool (bool, optional): Use pool before the last fc layer or not. Default: True. """ - def __init__(self, - config, - last_channel, - scale=1.0, - num_classes=1000, - with_pool=True): + def __init__( + self, config, last_channel, scale=1.0, num_classes=1000, with_pool=True + ): super().__init__() self.config = config @@ -190,25 +208,32 @@ class MobileNetV3(nn.Layer): self.lastconv_out_channels = self.lastconv_in_channels * 6 norm_layer = partial(nn.BatchNorm2D, epsilon=0.001, momentum=0.99) - self.conv = ConvNormActivation(in_channels=3, - out_channels=self.firstconv_in_channels, - kernel_size=3, - stride=2, - padding=1, - groups=1, - activation_layer=nn.Hardswish, - norm_layer=norm_layer) - - self.blocks = nn.Sequential(*[ - InvertedResidual(in_channels=cfg.in_channels, - expanded_channels=cfg.expanded_channels, - out_channels=cfg.out_channels, - filter_size=cfg.kernel, - stride=cfg.stride, - use_se=cfg.use_se, - activation_layer=cfg.activation_layer, - norm_layer=norm_layer) for cfg in self.config - ]) + self.conv = ConvNormActivation( + in_channels=3, + out_channels=self.firstconv_in_channels, + kernel_size=3, + stride=2, + padding=1, + groups=1, + activation_layer=nn.Hardswish, + norm_layer=norm_layer, + ) + + self.blocks = nn.Sequential( + *[ + InvertedResidual( + in_channels=cfg.in_channels, + expanded_channels=cfg.expanded_channels, + out_channels=cfg.out_channels, + filter_size=cfg.kernel, + stride=cfg.stride, + use_se=cfg.use_se, + activation_layer=cfg.activation_layer, + norm_layer=norm_layer, + ) + for cfg in self.config + ] + ) self.lastconv = ConvNormActivation( in_channels=self.lastconv_in_channels, @@ -218,7 +243,8 @@ class MobileNetV3(nn.Layer): padding=0, groups=1, norm_layer=norm_layer, - activation_layer=nn.Hardswish) + activation_layer=nn.Hardswish, + ) if with_pool: self.avgpool = nn.AdaptiveAvgPool2D(1) @@ -226,8 +252,10 @@ class MobileNetV3(nn.Layer): if num_classes > 0: self.classifier = nn.Sequential( nn.Linear(self.lastconv_out_channels, self.last_channel), - nn.Hardswish(), nn.Dropout(p=0.2), - nn.Linear(self.last_channel, num_classes)) + nn.Hardswish(), + nn.Dropout(p=0.2), + nn.Linear(self.last_channel, num_classes), + ) def forward(self, x): x = self.conv(x) @@ -288,11 +316,13 @@ class MobileNetV3Small(MobileNetV3): InvertedResidualConfig(96, 5, 576, 96, True, "hardswish", 1, scale), ] last_channel = _make_divisible(1024 * scale, 8) - super().__init__(config, - last_channel=last_channel, - scale=scale, - with_pool=with_pool, - num_classes=num_classes) + super().__init__( + config, + last_channel=last_channel, + scale=scale, + with_pool=with_pool, + num_classes=num_classes, + ) class MobileNetV3Large(MobileNetV3): @@ -332,31 +362,42 @@ class MobileNetV3Large(MobileNetV3): InvertedResidualConfig(24, 5, 72, 40, True, "relu", 2, scale), InvertedResidualConfig(40, 5, 120, 40, True, "relu", 1, scale), InvertedResidualConfig(40, 5, 120, 40, True, "relu", 1, scale), - InvertedResidualConfig(40, 3, 240, 80, False, "hardswish", 2, - scale), - InvertedResidualConfig(80, 3, 200, 80, False, "hardswish", 1, - scale), - InvertedResidualConfig(80, 3, 184, 80, False, "hardswish", 1, - scale), - InvertedResidualConfig(80, 3, 184, 80, False, "hardswish", 1, - scale), - InvertedResidualConfig(80, 3, 480, 112, True, "hardswish", 1, - scale), - InvertedResidualConfig(112, 3, 672, 112, True, "hardswish", 1, - scale), - InvertedResidualConfig(112, 5, 672, 160, True, "hardswish", 2, - scale), - InvertedResidualConfig(160, 5, 960, 160, True, "hardswish", 1, - scale), - InvertedResidualConfig(160, 5, 960, 160, True, "hardswish", 1, - scale), + InvertedResidualConfig( + 40, 3, 240, 80, False, "hardswish", 2, scale + ), + InvertedResidualConfig( + 80, 3, 200, 80, False, "hardswish", 1, scale + ), + InvertedResidualConfig( + 80, 3, 184, 80, False, "hardswish", 1, scale + ), + InvertedResidualConfig( + 80, 3, 184, 80, False, "hardswish", 1, scale + ), + InvertedResidualConfig( + 80, 3, 480, 112, True, "hardswish", 1, scale + ), + InvertedResidualConfig( + 112, 3, 672, 112, True, "hardswish", 1, scale + ), + InvertedResidualConfig( + 112, 5, 672, 160, True, "hardswish", 2, scale + ), + InvertedResidualConfig( + 160, 5, 960, 160, True, "hardswish", 1, scale + ), + InvertedResidualConfig( + 160, 5, 960, 160, True, "hardswish", 1, scale + ), ] last_channel = _make_divisible(1280 * scale, 8) - super().__init__(config, - last_channel=last_channel, - scale=scale, - with_pool=with_pool, - num_classes=num_classes) + super().__init__( + config, + last_channel=last_channel, + scale=scale, + with_pool=with_pool, + num_classes=num_classes, + ) def _mobilenet_v3(arch, pretrained=False, scale=1.0, **kwargs): @@ -369,9 +410,11 @@ def _mobilenet_v3(arch, pretrained=False, scale=1.0, **kwargs): assert ( arch in model_urls ), "{} model do not have a pretrained model now, you should set pretrained=False".format( - arch) - weight_path = get_weights_path_from_url(model_urls[arch][0], - model_urls[arch][1]) + arch + ) + weight_path = get_weights_path_from_url( + model_urls[arch][0], model_urls[arch][1] + ) param = paddle.load(weight_path) model.set_dict(param) @@ -412,10 +455,9 @@ def mobilenet_v3_small(pretrained=False, scale=1.0, **kwargs): print(out.shape) # [1, 1000] """ - model = _mobilenet_v3("mobilenet_v3_small", - scale=scale, - pretrained=pretrained, - **kwargs) + model = _mobilenet_v3( + "mobilenet_v3_small", scale=scale, pretrained=pretrained, **kwargs + ) return model @@ -453,8 +495,7 @@ def mobilenet_v3_large(pretrained=False, scale=1.0, **kwargs): print(out.shape) # [1, 1000] """ - model = _mobilenet_v3("mobilenet_v3_large", - scale=scale, - pretrained=pretrained, - **kwargs) + model = _mobilenet_v3( + "mobilenet_v3_large", scale=scale, pretrained=pretrained, **kwargs + ) return model diff --git a/python/paddle/vision/models/resnet.py b/python/paddle/vision/models/resnet.py index 4293e119582c009ecf3318b0726d0967be14ec2a..f43bdb9bc1a4c130f7d6e4da3e1e93f2c4c91e72 100644 --- a/python/paddle/vision/models/resnet.py +++ b/python/paddle/vision/models/resnet.py @@ -20,69 +20,87 @@ from paddle.utils.download import get_weights_path_from_url __all__ = [] model_urls = { - 'resnet18': ('https://paddle-hapi.bj.bcebos.com/models/resnet18.pdparams', - 'cf548f46534aa3560945be4b95cd11c4'), - 'resnet34': ('https://paddle-hapi.bj.bcebos.com/models/resnet34.pdparams', - '8d2275cf8706028345f78ac0e1d31969'), - 'resnet50': ('https://paddle-hapi.bj.bcebos.com/models/resnet50.pdparams', - 'ca6f485ee1ab0492d38f323885b0ad80'), - 'resnet101': ('https://paddle-hapi.bj.bcebos.com/models/resnet101.pdparams', - '02f35f034ca3858e1e54d4036443c92d'), - 'resnet152': ('https://paddle-hapi.bj.bcebos.com/models/resnet152.pdparams', - '7ad16a2f1e7333859ff986138630fd7a'), - 'resnext50_32x4d': - ('https://paddle-hapi.bj.bcebos.com/models/resnext50_32x4d.pdparams', - 'dc47483169be7d6f018fcbb7baf8775d'), - "resnext50_64x4d": - ('https://paddle-hapi.bj.bcebos.com/models/resnext50_64x4d.pdparams', - '063d4b483e12b06388529450ad7576db'), - 'resnext101_32x4d': - ('https://paddle-hapi.bj.bcebos.com/models/resnext101_32x4d.pdparams', - '967b090039f9de2c8d06fe994fb9095f'), - 'resnext101_64x4d': - ('https://paddle-hapi.bj.bcebos.com/models/resnext101_64x4d.pdparams', - '98e04e7ca616a066699230d769d03008'), - 'resnext152_32x4d': - ('https://paddle-hapi.bj.bcebos.com/models/resnext152_32x4d.pdparams', - '18ff0beee21f2efc99c4b31786107121'), - 'resnext152_64x4d': - ('https://paddle-hapi.bj.bcebos.com/models/resnext152_64x4d.pdparams', - '77c4af00ca42c405fa7f841841959379'), - 'wide_resnet50_2': - ('https://paddle-hapi.bj.bcebos.com/models/wide_resnet50_2.pdparams', - '0282f804d73debdab289bd9fea3fa6dc'), - 'wide_resnet101_2': - ('https://paddle-hapi.bj.bcebos.com/models/wide_resnet101_2.pdparams', - 'd4360a2d23657f059216f5d5a1a9ac93'), + 'resnet18': ( + 'https://paddle-hapi.bj.bcebos.com/models/resnet18.pdparams', + 'cf548f46534aa3560945be4b95cd11c4', + ), + 'resnet34': ( + 'https://paddle-hapi.bj.bcebos.com/models/resnet34.pdparams', + '8d2275cf8706028345f78ac0e1d31969', + ), + 'resnet50': ( + 'https://paddle-hapi.bj.bcebos.com/models/resnet50.pdparams', + 'ca6f485ee1ab0492d38f323885b0ad80', + ), + 'resnet101': ( + 'https://paddle-hapi.bj.bcebos.com/models/resnet101.pdparams', + '02f35f034ca3858e1e54d4036443c92d', + ), + 'resnet152': ( + 'https://paddle-hapi.bj.bcebos.com/models/resnet152.pdparams', + '7ad16a2f1e7333859ff986138630fd7a', + ), + 'resnext50_32x4d': ( + 'https://paddle-hapi.bj.bcebos.com/models/resnext50_32x4d.pdparams', + 'dc47483169be7d6f018fcbb7baf8775d', + ), + "resnext50_64x4d": ( + 'https://paddle-hapi.bj.bcebos.com/models/resnext50_64x4d.pdparams', + '063d4b483e12b06388529450ad7576db', + ), + 'resnext101_32x4d': ( + 'https://paddle-hapi.bj.bcebos.com/models/resnext101_32x4d.pdparams', + '967b090039f9de2c8d06fe994fb9095f', + ), + 'resnext101_64x4d': ( + 'https://paddle-hapi.bj.bcebos.com/models/resnext101_64x4d.pdparams', + '98e04e7ca616a066699230d769d03008', + ), + 'resnext152_32x4d': ( + 'https://paddle-hapi.bj.bcebos.com/models/resnext152_32x4d.pdparams', + '18ff0beee21f2efc99c4b31786107121', + ), + 'resnext152_64x4d': ( + 'https://paddle-hapi.bj.bcebos.com/models/resnext152_64x4d.pdparams', + '77c4af00ca42c405fa7f841841959379', + ), + 'wide_resnet50_2': ( + 'https://paddle-hapi.bj.bcebos.com/models/wide_resnet50_2.pdparams', + '0282f804d73debdab289bd9fea3fa6dc', + ), + 'wide_resnet101_2': ( + 'https://paddle-hapi.bj.bcebos.com/models/wide_resnet101_2.pdparams', + 'd4360a2d23657f059216f5d5a1a9ac93', + ), } class BasicBlock(nn.Layer): expansion = 1 - def __init__(self, - inplanes, - planes, - stride=1, - downsample=None, - groups=1, - base_width=64, - dilation=1, - norm_layer=None): + def __init__( + self, + inplanes, + planes, + stride=1, + downsample=None, + groups=1, + base_width=64, + dilation=1, + norm_layer=None, + ): super(BasicBlock, self).__init__() if norm_layer is None: norm_layer = nn.BatchNorm2D if dilation > 1: raise NotImplementedError( - "Dilation > 1 not supported in BasicBlock") - - self.conv1 = nn.Conv2D(inplanes, - planes, - 3, - padding=1, - stride=stride, - bias_attr=False) + "Dilation > 1 not supported in BasicBlock" + ) + + self.conv1 = nn.Conv2D( + inplanes, planes, 3, padding=1, stride=stride, bias_attr=False + ) self.bn1 = norm_layer(planes) self.relu = nn.ReLU() self.conv2 = nn.Conv2D(planes, planes, 3, padding=1, bias_attr=False) @@ -113,37 +131,40 @@ class BottleneckBlock(nn.Layer): expansion = 4 - def __init__(self, - inplanes, - planes, - stride=1, - downsample=None, - groups=1, - base_width=64, - dilation=1, - norm_layer=None): + def __init__( + self, + inplanes, + planes, + stride=1, + downsample=None, + groups=1, + base_width=64, + dilation=1, + norm_layer=None, + ): super(BottleneckBlock, self).__init__() if norm_layer is None: norm_layer = nn.BatchNorm2D - width = int(planes * (base_width / 64.)) * groups + width = int(planes * (base_width / 64.0)) * groups self.conv1 = nn.Conv2D(inplanes, width, 1, bias_attr=False) self.bn1 = norm_layer(width) - self.conv2 = nn.Conv2D(width, - width, - 3, - padding=dilation, - stride=stride, - groups=groups, - dilation=dilation, - bias_attr=False) + self.conv2 = nn.Conv2D( + width, + width, + 3, + padding=dilation, + stride=stride, + groups=groups, + dilation=dilation, + bias_attr=False, + ) self.bn2 = norm_layer(width) - self.conv3 = nn.Conv2D(width, - planes * self.expansion, - 1, - bias_attr=False) + self.conv3 = nn.Conv2D( + width, planes * self.expansion, 1, bias_attr=False + ) self.bn3 = norm_layer(planes * self.expansion) self.relu = nn.ReLU() self.downsample = downsample @@ -214,20 +235,22 @@ class ResNet(nn.Layer): # [1, 1000] """ - def __init__(self, - block, - depth=50, - width=64, - num_classes=1000, - with_pool=True, - groups=1): + def __init__( + self, + block, + depth=50, + width=64, + num_classes=1000, + with_pool=True, + groups=1, + ): super(ResNet, self).__init__() layer_cfg = { 18: [2, 2, 2, 2], 34: [3, 4, 6, 3], 50: [3, 4, 6, 3], 101: [3, 4, 23, 3], - 152: [3, 8, 36, 3] + 152: [3, 8, 36, 3], } layers = layer_cfg[depth] self.groups = groups @@ -239,12 +262,14 @@ class ResNet(nn.Layer): self.inplanes = 64 self.dilation = 1 - self.conv1 = nn.Conv2D(3, - self.inplanes, - kernel_size=7, - stride=2, - padding=3, - bias_attr=False) + self.conv1 = nn.Conv2D( + 3, + self.inplanes, + kernel_size=7, + stride=2, + padding=3, + bias_attr=False, + ) self.bn1 = self._norm_layer(self.inplanes) self.relu = nn.ReLU() self.maxpool = nn.MaxPool2D(kernel_size=3, stride=2, padding=1) @@ -267,26 +292,40 @@ class ResNet(nn.Layer): stride = 1 if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( - nn.Conv2D(self.inplanes, - planes * block.expansion, - 1, - stride=stride, - bias_attr=False), + nn.Conv2D( + self.inplanes, + planes * block.expansion, + 1, + stride=stride, + bias_attr=False, + ), norm_layer(planes * block.expansion), ) layers = [] layers.append( - block(self.inplanes, planes, stride, downsample, self.groups, - self.base_width, previous_dilation, norm_layer)) + block( + self.inplanes, + planes, + stride, + downsample, + self.groups, + self.base_width, + previous_dilation, + norm_layer, + ) + ) self.inplanes = planes * block.expansion for _ in range(1, blocks): layers.append( - block(self.inplanes, - planes, - groups=self.groups, - base_width=self.base_width, - norm_layer=norm_layer)) + block( + self.inplanes, + planes, + groups=self.groups, + base_width=self.base_width, + norm_layer=norm_layer, + ) + ) return nn.Sequential(*layers) @@ -313,10 +352,14 @@ class ResNet(nn.Layer): def _resnet(arch, Block, depth, pretrained, **kwargs): model = ResNet(Block, depth, **kwargs) if pretrained: - assert arch in model_urls, "{} model do not have a pretrained model now, you should set pretrained=False".format( - arch) - weight_path = get_weights_path_from_url(model_urls[arch][0], - model_urls[arch][1]) + assert ( + arch in model_urls + ), "{} model do not have a pretrained model now, you should set pretrained=False".format( + arch + ) + weight_path = get_weights_path_from_url( + model_urls[arch][0], model_urls[arch][1] + ) param = paddle.load(weight_path) model.set_dict(param) @@ -591,8 +634,9 @@ def resnext101_32x4d(pretrained=False, **kwargs): """ kwargs['groups'] = 32 kwargs['width'] = 4 - return _resnet('resnext101_32x4d', BottleneckBlock, 101, pretrained, - **kwargs) + return _resnet( + 'resnext101_32x4d', BottleneckBlock, 101, pretrained, **kwargs + ) def resnext101_64x4d(pretrained=False, **kwargs): @@ -627,8 +671,9 @@ def resnext101_64x4d(pretrained=False, **kwargs): """ kwargs['groups'] = 64 kwargs['width'] = 4 - return _resnet('resnext101_64x4d', BottleneckBlock, 101, pretrained, - **kwargs) + return _resnet( + 'resnext101_64x4d', BottleneckBlock, 101, pretrained, **kwargs + ) def resnext152_32x4d(pretrained=False, **kwargs): @@ -663,8 +708,9 @@ def resnext152_32x4d(pretrained=False, **kwargs): """ kwargs['groups'] = 32 kwargs['width'] = 4 - return _resnet('resnext152_32x4d', BottleneckBlock, 152, pretrained, - **kwargs) + return _resnet( + 'resnext152_32x4d', BottleneckBlock, 152, pretrained, **kwargs + ) def resnext152_64x4d(pretrained=False, **kwargs): @@ -699,8 +745,9 @@ def resnext152_64x4d(pretrained=False, **kwargs): """ kwargs['groups'] = 64 kwargs['width'] = 4 - return _resnet('resnext152_64x4d', BottleneckBlock, 152, pretrained, - **kwargs) + return _resnet( + 'resnext152_64x4d', BottleneckBlock, 152, pretrained, **kwargs + ) def wide_resnet50_2(pretrained=False, **kwargs): @@ -768,5 +815,6 @@ def wide_resnet101_2(pretrained=False, **kwargs): # [1, 1000] """ kwargs['width'] = 64 * 2 - return _resnet('wide_resnet101_2', BottleneckBlock, 101, pretrained, - **kwargs) + return _resnet( + 'wide_resnet101_2', BottleneckBlock, 101, pretrained, **kwargs + ) diff --git a/python/paddle/vision/models/shufflenetv2.py b/python/paddle/vision/models/shufflenetv2.py index e6cadaed1fd9b08e1513ebc9ff62cb45ba8f1795..c443fa5a3ee568ab5c233350cfda00e51cbeb21f 100644 --- a/python/paddle/vision/models/shufflenetv2.py +++ b/python/paddle/vision/models/shufflenetv2.py @@ -62,7 +62,8 @@ def create_activation_layer(act): return None else: raise RuntimeError( - "The activation function is not supported: {}".format(act)) + "The activation function is not supported: {}".format(act) + ) def channel_shuffle(x, groups): @@ -71,7 +72,8 @@ def channel_shuffle(x, groups): # reshape x = paddle.reshape( - x, shape=[batch_size, groups, channels_per_group, height, width]) + x, shape=[batch_size, groups, channels_per_group, height, width] + ) # transpose x = paddle.transpose(x, perm=[0, 2, 1, 3, 4]) @@ -82,27 +84,28 @@ def channel_shuffle(x, groups): class InvertedResidual(nn.Layer): - - def __init__(self, - in_channels, - out_channels, - stride, - activation_layer=nn.ReLU): + def __init__( + self, in_channels, out_channels, stride, activation_layer=nn.ReLU + ): super(InvertedResidual, self).__init__() - self._conv_pw = ConvNormActivation(in_channels=in_channels // 2, - out_channels=out_channels // 2, - kernel_size=1, - stride=1, - padding=0, - groups=1, - activation_layer=activation_layer) - self._conv_dw = ConvNormActivation(in_channels=out_channels // 2, - out_channels=out_channels // 2, - kernel_size=3, - stride=stride, - padding=1, - groups=out_channels // 2, - activation_layer=None) + self._conv_pw = ConvNormActivation( + in_channels=in_channels // 2, + out_channels=out_channels // 2, + kernel_size=1, + stride=1, + padding=0, + groups=1, + activation_layer=activation_layer, + ) + self._conv_dw = ConvNormActivation( + in_channels=out_channels // 2, + out_channels=out_channels // 2, + kernel_size=3, + stride=stride, + padding=1, + groups=out_channels // 2, + activation_layer=None, + ) self._conv_linear = ConvNormActivation( in_channels=out_channels // 2, out_channels=out_channels // 2, @@ -110,13 +113,15 @@ class InvertedResidual(nn.Layer): stride=1, padding=0, groups=1, - activation_layer=activation_layer) + activation_layer=activation_layer, + ) def forward(self, inputs): x1, x2 = paddle.split( inputs, num_or_sections=[inputs.shape[1] // 2, inputs.shape[1] // 2], - axis=1) + axis=1, + ) x2 = self._conv_pw(x2) x2 = self._conv_dw(x2) x2 = self._conv_linear(x2) @@ -125,22 +130,21 @@ class InvertedResidual(nn.Layer): class InvertedResidualDS(nn.Layer): - - def __init__(self, - in_channels, - out_channels, - stride, - activation_layer=nn.ReLU): + def __init__( + self, in_channels, out_channels, stride, activation_layer=nn.ReLU + ): super(InvertedResidualDS, self).__init__() # branch1 - self._conv_dw_1 = ConvNormActivation(in_channels=in_channels, - out_channels=in_channels, - kernel_size=3, - stride=stride, - padding=1, - groups=in_channels, - activation_layer=None) + self._conv_dw_1 = ConvNormActivation( + in_channels=in_channels, + out_channels=in_channels, + kernel_size=3, + stride=stride, + padding=1, + groups=in_channels, + activation_layer=None, + ) self._conv_linear_1 = ConvNormActivation( in_channels=in_channels, out_channels=out_channels // 2, @@ -148,22 +152,27 @@ class InvertedResidualDS(nn.Layer): stride=1, padding=0, groups=1, - activation_layer=activation_layer) + activation_layer=activation_layer, + ) # branch2 - self._conv_pw_2 = ConvNormActivation(in_channels=in_channels, - out_channels=out_channels // 2, - kernel_size=1, - stride=1, - padding=0, - groups=1, - activation_layer=activation_layer) - self._conv_dw_2 = ConvNormActivation(in_channels=out_channels // 2, - out_channels=out_channels // 2, - kernel_size=3, - stride=stride, - padding=1, - groups=out_channels // 2, - activation_layer=None) + self._conv_pw_2 = ConvNormActivation( + in_channels=in_channels, + out_channels=out_channels // 2, + kernel_size=1, + stride=1, + padding=0, + groups=1, + activation_layer=activation_layer, + ) + self._conv_dw_2 = ConvNormActivation( + in_channels=out_channels // 2, + out_channels=out_channels // 2, + kernel_size=3, + stride=stride, + padding=1, + groups=out_channels // 2, + activation_layer=None, + ) self._conv_linear_2 = ConvNormActivation( in_channels=out_channels // 2, out_channels=out_channels // 2, @@ -171,7 +180,8 @@ class InvertedResidualDS(nn.Layer): stride=1, padding=0, groups=1, - activation_layer=activation_layer) + activation_layer=activation_layer, + ) def forward(self, inputs): x1 = self._conv_dw_1(inputs) @@ -232,15 +242,18 @@ class ShuffleNetV2(nn.Layer): elif scale == 2.0: stage_out_channels = [-1, 24, 224, 488, 976, 2048] else: - raise NotImplementedError("This scale size:[" + str(scale) + - "] is not implemented!") + raise NotImplementedError( + "This scale size:[" + str(scale) + "] is not implemented!" + ) # 1. conv1 - self._conv1 = ConvNormActivation(in_channels=3, - out_channels=stage_out_channels[1], - kernel_size=3, - stride=2, - padding=1, - activation_layer=activation_layer) + self._conv1 = ConvNormActivation( + in_channels=3, + out_channels=stage_out_channels[1], + kernel_size=3, + stride=2, + padding=1, + activation_layer=activation_layer, + ) self._max_pool = MaxPool2D(kernel_size=3, stride=2, padding=1) # 2. bottleneck sequences @@ -248,21 +261,25 @@ class ShuffleNetV2(nn.Layer): for stage_id, num_repeat in enumerate(stage_repeats): for i in range(num_repeat): if i == 0: - block = self.add_sublayer(sublayer=InvertedResidualDS( - in_channels=stage_out_channels[stage_id + 1], - out_channels=stage_out_channels[stage_id + 2], - stride=2, - activation_layer=activation_layer), - name=str(stage_id + 2) + "_" + - str(i + 1)) + block = self.add_sublayer( + sublayer=InvertedResidualDS( + in_channels=stage_out_channels[stage_id + 1], + out_channels=stage_out_channels[stage_id + 2], + stride=2, + activation_layer=activation_layer, + ), + name=str(stage_id + 2) + "_" + str(i + 1), + ) else: - block = self.add_sublayer(sublayer=InvertedResidual( - in_channels=stage_out_channels[stage_id + 2], - out_channels=stage_out_channels[stage_id + 2], - stride=1, - activation_layer=activation_layer), - name=str(stage_id + 2) + "_" + - str(i + 1)) + block = self.add_sublayer( + sublayer=InvertedResidual( + in_channels=stage_out_channels[stage_id + 2], + out_channels=stage_out_channels[stage_id + 2], + stride=1, + activation_layer=activation_layer, + ), + name=str(stage_id + 2) + "_" + str(i + 1), + ) self._block_list.append(block) # 3. last_conv self._last_conv = ConvNormActivation( @@ -271,7 +288,8 @@ class ShuffleNetV2(nn.Layer): kernel_size=1, stride=1, padding=0, - activation_layer=activation_layer) + activation_layer=activation_layer, + ) # 4. pool if with_pool: self._pool2d_avg = AdaptiveAvgPool2D(1) @@ -303,9 +321,11 @@ def _shufflenet_v2(arch, pretrained=False, **kwargs): assert ( arch in model_urls ), "{} model do not have a pretrained model now, you should set pretrained=False".format( - arch) - weight_path = get_weights_path_from_url(model_urls[arch][0], - model_urls[arch][1]) + arch + ) + weight_path = get_weights_path_from_url( + model_urls[arch][0], model_urls[arch][1] + ) param = paddle.load(weight_path) model.set_dict(param) @@ -342,10 +362,9 @@ def shufflenet_v2_x0_25(pretrained=False, **kwargs): print(out.shape) # [1, 1000] """ - return _shufflenet_v2("shufflenet_v2_x0_25", - scale=0.25, - pretrained=pretrained, - **kwargs) + return _shufflenet_v2( + "shufflenet_v2_x0_25", scale=0.25, pretrained=pretrained, **kwargs + ) def shufflenet_v2_x0_33(pretrained=False, **kwargs): @@ -378,10 +397,9 @@ def shufflenet_v2_x0_33(pretrained=False, **kwargs): print(out.shape) # [1, 1000] """ - return _shufflenet_v2("shufflenet_v2_x0_33", - scale=0.33, - pretrained=pretrained, - **kwargs) + return _shufflenet_v2( + "shufflenet_v2_x0_33", scale=0.33, pretrained=pretrained, **kwargs + ) def shufflenet_v2_x0_5(pretrained=False, **kwargs): @@ -414,10 +432,9 @@ def shufflenet_v2_x0_5(pretrained=False, **kwargs): print(out.shape) # [1, 1000] """ - return _shufflenet_v2("shufflenet_v2_x0_5", - scale=0.5, - pretrained=pretrained, - **kwargs) + return _shufflenet_v2( + "shufflenet_v2_x0_5", scale=0.5, pretrained=pretrained, **kwargs + ) def shufflenet_v2_x1_0(pretrained=False, **kwargs): @@ -450,10 +467,9 @@ def shufflenet_v2_x1_0(pretrained=False, **kwargs): print(out.shape) # [1, 1000] """ - return _shufflenet_v2("shufflenet_v2_x1_0", - scale=1.0, - pretrained=pretrained, - **kwargs) + return _shufflenet_v2( + "shufflenet_v2_x1_0", scale=1.0, pretrained=pretrained, **kwargs + ) def shufflenet_v2_x1_5(pretrained=False, **kwargs): @@ -486,10 +502,9 @@ def shufflenet_v2_x1_5(pretrained=False, **kwargs): print(out.shape) # [1, 1000] """ - return _shufflenet_v2("shufflenet_v2_x1_5", - scale=1.5, - pretrained=pretrained, - **kwargs) + return _shufflenet_v2( + "shufflenet_v2_x1_5", scale=1.5, pretrained=pretrained, **kwargs + ) def shufflenet_v2_x2_0(pretrained=False, **kwargs): @@ -522,10 +537,9 @@ def shufflenet_v2_x2_0(pretrained=False, **kwargs): print(out.shape) # [1, 1000] """ - return _shufflenet_v2("shufflenet_v2_x2_0", - scale=2.0, - pretrained=pretrained, - **kwargs) + return _shufflenet_v2( + "shufflenet_v2_x2_0", scale=2.0, pretrained=pretrained, **kwargs + ) def shufflenet_v2_swish(pretrained=False, **kwargs): @@ -558,8 +572,10 @@ def shufflenet_v2_swish(pretrained=False, **kwargs): print(out.shape) # [1, 1000] """ - return _shufflenet_v2("shufflenet_v2_swish", - scale=1.0, - act="swish", - pretrained=pretrained, - **kwargs) + return _shufflenet_v2( + "shufflenet_v2_swish", + scale=1.0, + act="swish", + pretrained=pretrained, + **kwargs + ) diff --git a/python/paddle/vision/models/squeezenet.py b/python/paddle/vision/models/squeezenet.py index e2e7dc4b455b89f094c5af5188c152d6bcecb32a..efb69b5da3afdd8bfecd54aee607c4dc0d6aaf03 100644 --- a/python/paddle/vision/models/squeezenet.py +++ b/python/paddle/vision/models/squeezenet.py @@ -24,25 +24,28 @@ from paddle.utils.download import get_weights_path_from_url __all__ = [] model_urls = { - 'squeezenet1_0': - ('https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SqueezeNet1_0_pretrained.pdparams', - '30b95af60a2178f03cf9b66cd77e1db1'), - 'squeezenet1_1': - ('https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SqueezeNet1_1_pretrained.pdparams', - 'a11250d3a1f91d7131fd095ebbf09eee'), + 'squeezenet1_0': ( + 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SqueezeNet1_0_pretrained.pdparams', + '30b95af60a2178f03cf9b66cd77e1db1', + ), + 'squeezenet1_1': ( + 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SqueezeNet1_1_pretrained.pdparams', + 'a11250d3a1f91d7131fd095ebbf09eee', + ), } class MakeFireConv(nn.Layer): - def __init__(self, input_channels, output_channels, filter_size, padding=0): super(MakeFireConv, self).__init__() - self._conv = Conv2D(input_channels, - output_channels, - filter_size, - padding=padding, - weight_attr=ParamAttr(), - bias_attr=ParamAttr()) + self._conv = Conv2D( + input_channels, + output_channels, + filter_size, + padding=padding, + weight_attr=ParamAttr(), + bias_attr=ParamAttr(), + ) def forward(self, x): x = self._conv(x) @@ -51,16 +54,19 @@ class MakeFireConv(nn.Layer): class MakeFire(nn.Layer): - - def __init__(self, input_channels, squeeze_channels, expand1x1_channels, - expand3x3_channels): + def __init__( + self, + input_channels, + squeeze_channels, + expand1x1_channels, + expand3x3_channels, + ): super(MakeFire, self).__init__() self._conv = MakeFireConv(input_channels, squeeze_channels, 1) self._conv_path1 = MakeFireConv(squeeze_channels, expand1x1_channels, 1) - self._conv_path2 = MakeFireConv(squeeze_channels, - expand3x3_channels, - 3, - padding=1) + self._conv_path2 = MakeFireConv( + squeeze_channels, expand3x3_channels, 3, padding=1 + ) def forward(self, inputs): x = self._conv(inputs) @@ -109,17 +115,21 @@ class SqueezeNet(nn.Layer): self.with_pool = with_pool supported_versions = ['1.0', '1.1'] - assert version in supported_versions, \ - "supported versions are {} but input version is {}".format( - supported_versions, version) + assert ( + version in supported_versions + ), "supported versions are {} but input version is {}".format( + supported_versions, version + ) if self.version == "1.0": - self._conv = Conv2D(3, - 96, - 7, - stride=2, - weight_attr=ParamAttr(), - bias_attr=ParamAttr()) + self._conv = Conv2D( + 3, + 96, + 7, + stride=2, + weight_attr=ParamAttr(), + bias_attr=ParamAttr(), + ) self._pool = MaxPool2D(kernel_size=3, stride=2, padding=0) self._conv1 = MakeFire(96, 16, 64, 64) self._conv2 = MakeFire(128, 16, 64, 64) @@ -130,13 +140,15 @@ class SqueezeNet(nn.Layer): self._conv7 = MakeFire(384, 64, 256, 256) self._conv8 = MakeFire(512, 64, 256, 256) else: - self._conv = Conv2D(3, - 64, - 3, - stride=2, - padding=1, - weight_attr=ParamAttr(), - bias_attr=ParamAttr()) + self._conv = Conv2D( + 3, + 64, + 3, + stride=2, + padding=1, + weight_attr=ParamAttr(), + bias_attr=ParamAttr(), + ) self._pool = MaxPool2D(kernel_size=3, stride=2, padding=0) self._conv1 = MakeFire(64, 16, 64, 64) self._conv2 = MakeFire(128, 16, 64, 64) @@ -148,11 +160,9 @@ class SqueezeNet(nn.Layer): self._conv8 = MakeFire(512, 64, 256, 256) self._drop = Dropout(p=0.5, mode="downscale_in_infer") - self._conv9 = Conv2D(512, - num_classes, - 1, - weight_attr=ParamAttr(), - bias_attr=ParamAttr()) + self._conv9 = Conv2D( + 512, num_classes, 1, weight_attr=ParamAttr(), bias_attr=ParamAttr() + ) self._avg_pool = AdaptiveAvgPool2D(1) def forward(self, inputs): @@ -195,10 +205,14 @@ class SqueezeNet(nn.Layer): def _squeezenet(arch, version, pretrained, **kwargs): model = SqueezeNet(version, **kwargs) if pretrained: - assert arch in model_urls, "{} model do not have a pretrained model now, you should set pretrained=False".format( - arch) - weight_path = get_weights_path_from_url(model_urls[arch][0], - model_urls[arch][1]) + assert ( + arch in model_urls + ), "{} model do not have a pretrained model now, you should set pretrained=False".format( + arch + ) + weight_path = get_weights_path_from_url( + model_urls[arch][0], model_urls[arch][1] + ) param = paddle.load(weight_path) model.set_dict(param) diff --git a/python/paddle/vision/models/vgg.py b/python/paddle/vision/models/vgg.py index c9558c9c382ee771d27d26b04b0c4fef4fcc0a28..1f996439274f7a574ff1750555f4e56748bdc3bc 100644 --- a/python/paddle/vision/models/vgg.py +++ b/python/paddle/vision/models/vgg.py @@ -20,10 +20,14 @@ from paddle.utils.download import get_weights_path_from_url __all__ = [] model_urls = { - 'vgg16': ('https://paddle-hapi.bj.bcebos.com/models/vgg16.pdparams', - '89bbffc0f87d260be9b8cdc169c991c4'), - 'vgg19': ('https://paddle-hapi.bj.bcebos.com/models/vgg19.pdparams', - '23b18bb13d8894f60f54e642be79a0dd') + 'vgg16': ( + 'https://paddle-hapi.bj.bcebos.com/models/vgg16.pdparams', + '89bbffc0f87d260be9b8cdc169c991c4', + ), + 'vgg19': ( + 'https://paddle-hapi.bj.bcebos.com/models/vgg19.pdparams', + '23b18bb13d8894f60f54e642be79a0dd', + ), } @@ -111,15 +115,65 @@ def make_layers(cfg, batch_norm=False): cfgs = { 'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], - 'B': - [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], + 'B': [ + 64, + 64, + 'M', + 128, + 128, + 'M', + 256, + 256, + 'M', + 512, + 512, + 'M', + 512, + 512, + 'M', + ], 'D': [ - 64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, - 512, 512, 'M' + 64, + 64, + 'M', + 128, + 128, + 'M', + 256, + 256, + 256, + 'M', + 512, + 512, + 512, + 'M', + 512, + 512, + 512, + 'M', ], 'E': [ - 64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, - 'M', 512, 512, 512, 512, 'M' + 64, + 64, + 'M', + 128, + 128, + 'M', + 256, + 256, + 256, + 256, + 'M', + 512, + 512, + 512, + 512, + 'M', + 512, + 512, + 512, + 512, + 'M', ], } @@ -128,10 +182,14 @@ def _vgg(arch, cfg, batch_norm, pretrained, **kwargs): model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs) if pretrained: - assert arch in model_urls, "{} model do not have a pretrained model now, you should set pretrained=False".format( - arch) - weight_path = get_weights_path_from_url(model_urls[arch][0], - model_urls[arch][1]) + assert ( + arch in model_urls + ), "{} model do not have a pretrained model now, you should set pretrained=False".format( + arch + ) + weight_path = get_weights_path_from_url( + model_urls[arch][0], model_urls[arch][1] + ) param = paddle.load(weight_path) model.load_dict(param) @@ -172,7 +230,7 @@ def vgg11(pretrained=False, batch_norm=False, **kwargs): """ model_name = 'vgg11' if batch_norm: - model_name += ('_bn') + model_name += '_bn' return _vgg(model_name, 'A', batch_norm, pretrained, **kwargs) @@ -209,7 +267,7 @@ def vgg13(pretrained=False, batch_norm=False, **kwargs): """ model_name = 'vgg13' if batch_norm: - model_name += ('_bn') + model_name += '_bn' return _vgg(model_name, 'B', batch_norm, pretrained, **kwargs) @@ -246,7 +304,7 @@ def vgg16(pretrained=False, batch_norm=False, **kwargs): """ model_name = 'vgg16' if batch_norm: - model_name += ('_bn') + model_name += '_bn' return _vgg(model_name, 'D', batch_norm, pretrained, **kwargs) @@ -283,5 +341,5 @@ def vgg19(pretrained=False, batch_norm=False, **kwargs): """ model_name = 'vgg19' if batch_norm: - model_name += ('_bn') + model_name += '_bn' return _vgg(model_name, 'E', batch_norm, pretrained, **kwargs) diff --git a/python/paddle/vision/ops.py b/python/paddle/vision/ops.py index b66db2aa737e38aad664c4a0e56e5780f53b18f2..e2ed14fa78df60eb5e73cdff51a0a0f8a7265e7f 100755 --- a/python/paddle/vision/ops.py +++ b/python/paddle/vision/ops.py @@ -18,30 +18,48 @@ from ..fluid.data_feeder import check_type, check_variable_and_dtype from ..fluid.layers import nn, utils from ..nn import Layer, Conv2D, Sequential, ReLU, BatchNorm2D from ..fluid.initializer import Normal -from ..fluid.framework import _non_static_mode, in_dygraph_mode, _in_legacy_dygraph +from ..fluid.framework import ( + _non_static_mode, + in_dygraph_mode, + _in_legacy_dygraph, +) from paddle import _C_ops, _legacy_C_ops from ..framework import _current_expected_place -__all__ = [ #noqa - 'yolo_loss', 'yolo_box', 'deform_conv2d', 'DeformConv2D', - 'distribute_fpn_proposals', 'generate_proposals', 'read_file', - 'decode_jpeg', 'roi_pool', 'RoIPool', 'psroi_pool', 'PSRoIPool', - 'roi_align', 'RoIAlign', 'nms', 'matrix_nms' +__all__ = [ # noqa + 'yolo_loss', + 'yolo_box', + 'deform_conv2d', + 'DeformConv2D', + 'distribute_fpn_proposals', + 'generate_proposals', + 'read_file', + 'decode_jpeg', + 'roi_pool', + 'RoIPool', + 'psroi_pool', + 'PSRoIPool', + 'roi_align', + 'RoIAlign', + 'nms', + 'matrix_nms', ] -def yolo_loss(x, - gt_box, - gt_label, - anchors, - anchor_mask, - class_num, - ignore_thresh, - downsample_ratio, - gt_score=None, - use_label_smooth=True, - name=None, - scale_x_y=1.): +def yolo_loss( + x, + gt_box, + gt_label, + anchors, + anchor_mask, + class_num, + ignore_thresh, + downsample_ratio, + gt_score=None, + use_label_smooth=True, + name=None, + scale_x_y=1.0, +): r""" This operator generates YOLOv3 loss based on given predict result and ground @@ -176,25 +194,50 @@ def yolo_loss(x, """ if in_dygraph_mode(): - loss, _, _ = _C_ops.yolov3_loss(x, gt_box, gt_label, gt_score, anchors, - anchor_mask, class_num, ignore_thresh, - downsample_ratio, use_label_smooth, - scale_x_y) + loss, _, _ = _C_ops.yolov3_loss( + x, + gt_box, + gt_label, + gt_score, + anchors, + anchor_mask, + class_num, + ignore_thresh, + downsample_ratio, + use_label_smooth, + scale_x_y, + ) return loss if _non_static_mode(): loss, _, _ = _legacy_C_ops.yolov3_loss( - x, gt_box, gt_label, gt_score, 'anchors', anchors, 'anchor_mask', - anchor_mask, 'class_num', class_num, 'ignore_thresh', ignore_thresh, - 'downsample_ratio', downsample_ratio, 'use_label_smooth', - use_label_smooth, 'scale_x_y', scale_x_y) + x, + gt_box, + gt_label, + gt_score, + 'anchors', + anchors, + 'anchor_mask', + anchor_mask, + 'class_num', + class_num, + 'ignore_thresh', + ignore_thresh, + 'downsample_ratio', + downsample_ratio, + 'use_label_smooth', + use_label_smooth, + 'scale_x_y', + scale_x_y, + ) return loss helper = LayerHelper('yolov3_loss', **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'yolo_loss') - check_variable_and_dtype(gt_box, 'gt_box', ['float32', 'float64'], - 'yolo_loss') + check_variable_and_dtype( + gt_box, 'gt_box', ['float32', 'float64'], 'yolo_loss' + ) check_variable_and_dtype(gt_label, 'gt_label', 'int32', 'yolo_loss') check_type(anchors, 'anchors', (list, tuple), 'yolo_loss') check_type(anchor_mask, 'anchor_mask', (list, tuple), 'yolo_loss') @@ -225,28 +268,32 @@ def yolo_loss(x, "scale_x_y": scale_x_y, } - helper.append_op(type='yolov3_loss', - inputs=inputs, - outputs={ - 'Loss': loss, - 'ObjectnessMask': objectness_mask, - 'GTMatchMask': gt_match_mask - }, - attrs=attrs) + helper.append_op( + type='yolov3_loss', + inputs=inputs, + outputs={ + 'Loss': loss, + 'ObjectnessMask': objectness_mask, + 'GTMatchMask': gt_match_mask, + }, + attrs=attrs, + ) return loss -def yolo_box(x, - img_size, - anchors, - class_num, - conf_thresh, - downsample_ratio, - clip_bbox=True, - name=None, - scale_x_y=1., - iou_aware=False, - iou_aware_factor=0.5): +def yolo_box( + x, + img_size, + anchors, + class_num, + conf_thresh, + downsample_ratio, + clip_bbox=True, + name=None, + scale_x_y=1.0, + iou_aware=False, + iou_aware_factor=0.5, +): r""" This operator generates YOLO detection boxes from output of YOLOv3 network. @@ -359,18 +406,41 @@ def yolo_box(x, scale_x_y=1.) """ if in_dygraph_mode(): - boxes, scores = _C_ops.yolo_box(x, img_size, anchors, class_num, - conf_thresh, downsample_ratio, - clip_bbox, scale_x_y, iou_aware, - iou_aware_factor) + boxes, scores = _C_ops.yolo_box( + x, + img_size, + anchors, + class_num, + conf_thresh, + downsample_ratio, + clip_bbox, + scale_x_y, + iou_aware, + iou_aware_factor, + ) return boxes, scores if _non_static_mode(): boxes, scores = _legacy_C_ops.yolo_box( - x, img_size, 'anchors', anchors, 'class_num', class_num, - 'conf_thresh', conf_thresh, 'downsample_ratio', downsample_ratio, - 'clip_bbox', clip_bbox, 'scale_x_y', scale_x_y, 'iou_aware', - iou_aware, 'iou_aware_factor', iou_aware_factor) + x, + img_size, + 'anchors', + anchors, + 'class_num', + class_num, + 'conf_thresh', + conf_thresh, + 'downsample_ratio', + downsample_ratio, + 'clip_bbox', + clip_bbox, + 'scale_x_y', + scale_x_y, + 'iou_aware', + iou_aware, + 'iou_aware_factor', + iou_aware_factor, + ) return boxes, scores helper = LayerHelper('yolo_box', **locals()) @@ -391,33 +461,37 @@ def yolo_box(x, "clip_bbox": clip_bbox, "scale_x_y": scale_x_y, "iou_aware": iou_aware, - "iou_aware_factor": iou_aware_factor + "iou_aware_factor": iou_aware_factor, } - helper.append_op(type='yolo_box', - inputs={ - "X": x, - "ImgSize": img_size, - }, - outputs={ - 'Boxes': boxes, - 'Scores': scores, - }, - attrs=attrs) + helper.append_op( + type='yolo_box', + inputs={ + "X": x, + "ImgSize": img_size, + }, + outputs={ + 'Boxes': boxes, + 'Scores': scores, + }, + attrs=attrs, + ) return boxes, scores -def deform_conv2d(x, - offset, - weight, - bias=None, - stride=1, - padding=0, - dilation=1, - deformable_groups=1, - groups=1, - mask=None, - name=None): +def deform_conv2d( + x, + offset, + weight, + bias=None, + stride=1, + padding=0, + dilation=1, + deformable_groups=1, + groups=1, + mask=None, + name=None, +): r""" Compute 2-D deformable convolution on 4-D input. Given input image x, output feature map y, the deformable convolution operation can be expressed as follow: @@ -537,34 +611,58 @@ def deform_conv2d(x, use_deform_conv2d_v1 = True if mask is None else False if in_dygraph_mode(): - pre_bias = _C_ops.deformable_conv(x, offset, weight, mask, stride, - padding, dilation, deformable_groups, - groups, 1) + pre_bias = _C_ops.deformable_conv( + x, + offset, + weight, + mask, + stride, + padding, + dilation, + deformable_groups, + groups, + 1, + ) if bias is not None: out = nn.elementwise_add(pre_bias, bias, axis=1) else: out = pre_bias elif _in_legacy_dygraph(): - attrs = ('strides', stride, 'paddings', padding, 'dilations', dilation, - 'deformable_groups', deformable_groups, 'groups', groups, - 'im2col_step', 1) + attrs = ( + 'strides', + stride, + 'paddings', + padding, + 'dilations', + dilation, + 'deformable_groups', + deformable_groups, + 'groups', + groups, + 'im2col_step', + 1, + ) if use_deform_conv2d_v1: op_type = 'deformable_conv_v1' - pre_bias = getattr(_legacy_C_ops, op_type)(x, offset, weight, - *attrs) + pre_bias = getattr(_legacy_C_ops, op_type)( + x, offset, weight, *attrs + ) else: op_type = 'deformable_conv' - pre_bias = getattr(_legacy_C_ops, op_type)(x, offset, mask, weight, - *attrs) + pre_bias = getattr(_legacy_C_ops, op_type)( + x, offset, mask, weight, *attrs + ) if bias is not None: out = nn.elementwise_add(pre_bias, bias, axis=1) else: out = pre_bias else: - check_variable_and_dtype(x, "x", ['float32', 'float64'], - 'deform_conv2d') - check_variable_and_dtype(offset, "offset", ['float32', 'float64'], - 'deform_conv2d') + check_variable_and_dtype( + x, "x", ['float32', 'float64'], 'deform_conv2d' + ) + check_variable_and_dtype( + offset, "offset", ['float32', 'float64'], 'deform_conv2d' + ) num_channels = x.shape[1] @@ -602,20 +700,18 @@ def deform_conv2d(x, 'deformable_groups': deformable_groups, 'im2col_step': 1, } - helper.append_op(type=op_type, - inputs=inputs, - outputs=outputs, - attrs=attrs) + helper.append_op( + type=op_type, inputs=inputs, outputs=outputs, attrs=attrs + ) if bias is not None: out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='elementwise_add', - inputs={ - 'X': [pre_bias], - 'Y': [bias] - }, - outputs={'Out': [out]}, - attrs={'axis': 1}) + helper.append_op( + type='elementwise_add', + inputs={'X': [pre_bias], 'Y': [bias]}, + outputs={'Out': [out]}, + attrs={'axis': 1}, + ) else: out = pre_bias return out @@ -755,19 +851,23 @@ class DeformConv2D(Layer): [8, 16, 26, 26] """ - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - deformable_groups=1, - groups=1, - weight_attr=None, - bias_attr=None): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + deformable_groups=1, + groups=1, + weight_attr=None, + bias_attr=None, + ): super(DeformConv2D, self).__init__() - assert weight_attr is not False, "weight_attr should not be False in Conv." + assert ( + weight_attr is not False + ), "weight_attr should not be False in Conv." self._weight_attr = weight_attr self._bias_attr = bias_attr self._deformable_groups = deformable_groups @@ -789,39 +889,44 @@ class DeformConv2D(Layer): def _get_default_param_initializer(): filter_elem_num = np.prod(self._kernel_size) * self._in_channels - std = (2.0 / filter_elem_num)**0.5 + std = (2.0 / filter_elem_num) ** 0.5 return Normal(0.0, std, 0) self.weight = self.create_parameter( shape=filter_shape, attr=self._weight_attr, - default_initializer=_get_default_param_initializer()) - self.bias = self.create_parameter(attr=self._bias_attr, - shape=[self._out_channels], - is_bias=True) + default_initializer=_get_default_param_initializer(), + ) + self.bias = self.create_parameter( + attr=self._bias_attr, shape=[self._out_channels], is_bias=True + ) def forward(self, x, offset, mask=None): - out = deform_conv2d(x=x, - offset=offset, - weight=self.weight, - bias=self.bias, - stride=self._stride, - padding=self._padding, - dilation=self._dilation, - deformable_groups=self._deformable_groups, - groups=self._groups, - mask=mask) + out = deform_conv2d( + x=x, + offset=offset, + weight=self.weight, + bias=self.bias, + stride=self._stride, + padding=self._padding, + dilation=self._dilation, + deformable_groups=self._deformable_groups, + groups=self._groups, + mask=mask, + ) return out -def distribute_fpn_proposals(fpn_rois, - min_level, - max_level, - refer_level, - refer_scale, - pixel_offset=False, - rois_num=None, - name=None): +def distribute_fpn_proposals( + fpn_rois, + min_level, + max_level, + refer_level, + refer_scale, + pixel_offset=False, + rois_num=None, + name=None, +): r""" In Feature Pyramid Networks (FPN) models, it is needed to distribute all proposals into different FPN level, with respect to scale of the proposals, @@ -882,24 +987,56 @@ def distribute_fpn_proposals(fpn_rois, num_lvl = max_level - min_level + 1 if in_dygraph_mode(): - assert rois_num is not None, "rois_num should not be None in dygraph mode." - multi_rois, rois_num_per_level, restore_ind = _C_ops.distribute_fpn_proposals( - fpn_rois, rois_num, min_level, max_level, refer_level, refer_scale, - pixel_offset) + assert ( + rois_num is not None + ), "rois_num should not be None in dygraph mode." + ( + multi_rois, + rois_num_per_level, + restore_ind, + ) = _C_ops.distribute_fpn_proposals( + fpn_rois, + rois_num, + min_level, + max_level, + refer_level, + refer_scale, + pixel_offset, + ) return multi_rois, restore_ind, rois_num_per_level if _non_static_mode(): - assert rois_num is not None, "rois_num should not be None in dygraph mode." - attrs = ('min_level', min_level, 'max_level', max_level, 'refer_level', - refer_level, 'refer_scale', refer_scale, 'pixel_offset', - pixel_offset) - multi_rois, restore_ind, rois_num_per_level = _legacy_C_ops.distribute_fpn_proposals( - fpn_rois, rois_num, num_lvl, num_lvl, *attrs) + assert ( + rois_num is not None + ), "rois_num should not be None in dygraph mode." + attrs = ( + 'min_level', + min_level, + 'max_level', + max_level, + 'refer_level', + refer_level, + 'refer_scale', + refer_scale, + 'pixel_offset', + pixel_offset, + ) + ( + multi_rois, + restore_ind, + rois_num_per_level, + ) = _legacy_C_ops.distribute_fpn_proposals( + fpn_rois, rois_num, num_lvl, num_lvl, *attrs + ) return multi_rois, restore_ind, rois_num_per_level else: - check_variable_and_dtype(fpn_rois, 'fpn_rois', ['float32', 'float64'], - 'distribute_fpn_proposals') + check_variable_and_dtype( + fpn_rois, + 'fpn_rois', + ['float32', 'float64'], + 'distribute_fpn_proposals', + ) helper = LayerHelper('distribute_fpn_proposals', **locals()) dtype = helper.input_dtype('fpn_rois') multi_rois = [ @@ -925,16 +1062,18 @@ def distribute_fpn_proposals(fpn_rois, else: rois_num_per_level = None - helper.append_op(type='distribute_fpn_proposals', - inputs=inputs, - outputs=outputs, - attrs={ - 'min_level': min_level, - 'max_level': max_level, - 'refer_level': refer_level, - 'refer_scale': refer_scale, - 'pixel_offset': pixel_offset - }) + helper.append_op( + type='distribute_fpn_proposals', + inputs=inputs, + outputs=outputs, + attrs={ + 'min_level': min_level, + 'max_level': max_level, + 'refer_level': refer_level, + 'refer_scale': refer_scale, + 'pixel_offset': pixel_offset, + }, + ) return multi_rois, restore_ind, rois_num_per_level @@ -976,10 +1115,9 @@ def read_file(filename, name=None): helper = LayerHelper("read_file", **locals()) out = helper.create_variable_for_type_inference('uint8') - helper.append_op(type="read_file", - inputs=inputs, - attrs=attrs, - outputs={"Out": out}) + helper.append_op( + type="read_file", inputs=inputs, attrs=attrs, outputs={"Out": out} + ) return out @@ -1029,10 +1167,9 @@ def decode_jpeg(x, mode='unchanged', name=None): helper = LayerHelper("decode_jpeg", **locals()) out = helper.create_variable_for_type_inference('uint8') - helper.append_op(type="decode_jpeg", - inputs=inputs, - attrs=attrs, - outputs={"Out": out}) + helper.append_op( + type="decode_jpeg", inputs=inputs, attrs=attrs, outputs={"Out": out} + ) return out @@ -1080,34 +1217,47 @@ def psroi_pool(x, boxes, boxes_num, output_size, spatial_scale=1.0, name=None): if isinstance(output_size, int): output_size = (output_size, output_size) pooled_height, pooled_width = output_size - assert len(x.shape) == 4, \ - "Input features with shape should be (N, C, H, W)" + assert len(x.shape) == 4, "Input features with shape should be (N, C, H, W)" output_channels = int(x.shape[1] / (pooled_height * pooled_width)) if in_dygraph_mode(): - return _C_ops.psroi_pool(x, boxes, boxes_num, pooled_height, - pooled_width, output_channels, spatial_scale) + return _C_ops.psroi_pool( + x, + boxes, + boxes_num, + pooled_height, + pooled_width, + output_channels, + spatial_scale, + ) if _in_legacy_dygraph(): - return _legacy_C_ops.psroi_pool(x, boxes, boxes_num, "output_channels", - output_channels, "spatial_scale", - spatial_scale, "pooled_height", - pooled_height, "pooled_width", - pooled_width) + return _legacy_C_ops.psroi_pool( + x, + boxes, + boxes_num, + "output_channels", + output_channels, + "spatial_scale", + spatial_scale, + "pooled_height", + pooled_height, + "pooled_width", + pooled_width, + ) helper = LayerHelper('psroi_pool', **locals()) dtype = helper.input_dtype() out = helper.create_variable_for_type_inference(dtype) - helper.append_op(type='psroi_pool', - inputs={ - 'X': x, - 'ROIs': boxes - }, - outputs={'Out': out}, - attrs={ - 'output_channels': output_channels, - 'spatial_scale': spatial_scale, - 'pooled_height': pooled_height, - 'pooled_width': pooled_width - }) + helper.append_op( + type='psroi_pool', + inputs={'X': x, 'ROIs': boxes}, + outputs={'Out': out}, + attrs={ + 'output_channels': output_channels, + 'spatial_scale': spatial_scale, + 'pooled_height': pooled_height, + 'pooled_width': pooled_width, + }, + ) return out @@ -1151,8 +1301,9 @@ class PSRoIPool(Layer): self.spatial_scale = spatial_scale def forward(self, x, boxes, boxes_num): - return psroi_pool(x, boxes, boxes_num, self.output_size, - self.spatial_scale) + return psroi_pool( + x, boxes, boxes_num, self.output_size, self.spatial_scale + ) def roi_pool(x, boxes, boxes_num, output_size, spatial_scale=1.0, name=None): @@ -1199,14 +1350,27 @@ def roi_pool(x, boxes, boxes_num, output_size, spatial_scale=1.0, name=None): pooled_height, pooled_width = output_size if in_dygraph_mode(): - assert boxes_num is not None, "boxes_num should not be None in dygraph mode." - return _C_ops.roi_pool(x, boxes, boxes_num, pooled_height, pooled_width, - spatial_scale) + assert ( + boxes_num is not None + ), "boxes_num should not be None in dygraph mode." + return _C_ops.roi_pool( + x, boxes, boxes_num, pooled_height, pooled_width, spatial_scale + ) if _in_legacy_dygraph(): - assert boxes_num is not None, "boxes_num should not be None in dygraph mode." + assert ( + boxes_num is not None + ), "boxes_num should not be None in dygraph mode." pool_out, argmaxes = _legacy_C_ops.roi_pool( - x, boxes, boxes_num, "pooled_height", pooled_height, "pooled_width", - pooled_width, "spatial_scale", spatial_scale) + x, + boxes, + boxes_num, + "pooled_height", + pooled_height, + "pooled_width", + pooled_width, + "spatial_scale", + spatial_scale, + ) return pool_out else: @@ -1223,17 +1387,16 @@ def roi_pool(x, boxes, boxes_num, output_size, spatial_scale=1.0, name=None): } if boxes_num is not None: inputs['RoisNum'] = boxes_num - helper.append_op(type="roi_pool", - inputs=inputs, - outputs={ - "Out": pool_out, - "Argmax": argmaxes - }, - attrs={ - "pooled_height": pooled_height, - "pooled_width": pooled_width, - "spatial_scale": spatial_scale - }) + helper.append_op( + type="roi_pool", + inputs=inputs, + outputs={"Out": pool_out, "Argmax": argmaxes}, + attrs={ + "pooled_height": pooled_height, + "pooled_width": pooled_width, + "spatial_scale": spatial_scale, + }, + ) return pool_out @@ -1271,25 +1434,29 @@ class RoIPool(Layer): self._spatial_scale = spatial_scale def forward(self, x, boxes, boxes_num): - return roi_pool(x=x, - boxes=boxes, - boxes_num=boxes_num, - output_size=self._output_size, - spatial_scale=self._spatial_scale) + return roi_pool( + x=x, + boxes=boxes, + boxes_num=boxes_num, + output_size=self._output_size, + spatial_scale=self._spatial_scale, + ) def extra_repr(self): main_str = 'output_size={_output_size}, spatial_scale={_spatial_scale}' return main_str.format(**self.__dict__) -def roi_align(x, - boxes, - boxes_num, - output_size, - spatial_scale=1.0, - sampling_ratio=-1, - aligned=True, - name=None): +def roi_align( + x, + boxes, + boxes_num, + output_size, + spatial_scale=1.0, + sampling_ratio=-1, + aligned=True, + name=None, +): """ Implementing the roi_align layer. Region of Interest (RoI) Align operator (also known as RoI Align) is to @@ -1358,24 +1525,45 @@ def roi_align(x, pooled_height, pooled_width = output_size if in_dygraph_mode(): - assert boxes_num is not None, "boxes_num should not be None in dygraph mode." - return _C_ops.roi_align(x, boxes, boxes_num, pooled_height, - pooled_width, spatial_scale, sampling_ratio, - aligned) + assert ( + boxes_num is not None + ), "boxes_num should not be None in dygraph mode." + return _C_ops.roi_align( + x, + boxes, + boxes_num, + pooled_height, + pooled_width, + spatial_scale, + sampling_ratio, + aligned, + ) if _in_legacy_dygraph(): - assert boxes_num is not None, "boxes_num should not be None in dygraph mode." - align_out = _legacy_C_ops.roi_align(x, boxes, boxes_num, - "pooled_height", pooled_height, - "pooled_width", pooled_width, - "spatial_scale", spatial_scale, - "sampling_ratio", sampling_ratio, - "aligned", aligned) + assert ( + boxes_num is not None + ), "boxes_num should not be None in dygraph mode." + align_out = _legacy_C_ops.roi_align( + x, + boxes, + boxes_num, + "pooled_height", + pooled_height, + "pooled_width", + pooled_width, + "spatial_scale", + spatial_scale, + "sampling_ratio", + sampling_ratio, + "aligned", + aligned, + ) return align_out else: check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'roi_align') - check_variable_and_dtype(boxes, 'boxes', ['float32', 'float64'], - 'roi_align') + check_variable_and_dtype( + boxes, 'boxes', ['float32', 'float64'], 'roi_align' + ) helper = LayerHelper('roi_align', **locals()) dtype = helper.input_dtype() align_out = helper.create_variable_for_type_inference(dtype) @@ -1385,16 +1573,18 @@ def roi_align(x, } if boxes_num is not None: inputs['RoisNum'] = boxes_num - helper.append_op(type="roi_align", - inputs=inputs, - outputs={"Out": align_out}, - attrs={ - "pooled_height": pooled_height, - "pooled_width": pooled_width, - "spatial_scale": spatial_scale, - "sampling_ratio": sampling_ratio, - "aligned": aligned, - }) + helper.append_op( + type="roi_align", + inputs=inputs, + outputs={"Out": align_out}, + attrs={ + "pooled_height": pooled_height, + "pooled_width": pooled_width, + "spatial_scale": spatial_scale, + "sampling_ratio": sampling_ratio, + "aligned": aligned, + }, + ) return align_out @@ -1436,12 +1626,14 @@ class RoIAlign(Layer): self._spatial_scale = spatial_scale def forward(self, x, boxes, boxes_num, aligned=True): - return roi_align(x=x, - boxes=boxes, - boxes_num=boxes_num, - output_size=self._output_size, - spatial_scale=self._spatial_scale, - aligned=aligned) + return roi_align( + x=x, + boxes=boxes, + boxes_num=boxes_num, + output_size=self._output_size, + spatial_scale=self._spatial_scale, + aligned=aligned, + ) class ConvNormActivation(Sequential): @@ -1465,30 +1657,34 @@ class ConvNormActivation(Sequential): bias (bool, optional): Whether to use bias in the convolution layer. By default, biases are included if ``norm_layer is None``. """ - def __init__(self, - in_channels, - out_channels, - kernel_size=3, - stride=1, - padding=None, - groups=1, - norm_layer=BatchNorm2D, - activation_layer=ReLU, - dilation=1, - bias=None): + def __init__( + self, + in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=None, + groups=1, + norm_layer=BatchNorm2D, + activation_layer=ReLU, + dilation=1, + bias=None, + ): if padding is None: padding = (kernel_size - 1) // 2 * dilation if bias is None: bias = norm_layer is None layers = [ - Conv2D(in_channels, - out_channels, - kernel_size, - stride, - padding, - dilation=dilation, - groups=groups, - bias_attr=bias) + Conv2D( + in_channels, + out_channels, + kernel_size, + stride, + padding, + dilation=dilation, + groups=groups, + bias_attr=bias, + ) ] if norm_layer is not None: layers.append(norm_layer(out_channels)) @@ -1497,12 +1693,14 @@ class ConvNormActivation(Sequential): super().__init__(*layers) -def nms(boxes, - iou_threshold=0.3, - scores=None, - category_idxs=None, - categories=None, - top_k=None): +def nms( + boxes, + iou_threshold=0.3, + scores=None, + category_idxs=None, + categories=None, + top_k=None, +): r""" This operator implements non-maximum suppression. Non-maximum suppression (NMS) is used to select one bounding box out of many overlapping bounding boxes in object detection. @@ -1582,34 +1780,42 @@ def nms(boxes, helper = LayerHelper('nms', **locals()) out = helper.create_variable_for_type_inference('int64') - helper.append_op(type='nms', - inputs={'Boxes': boxes}, - outputs={'KeepBoxesIdxs': out}, - attrs={'iou_threshold': iou_threshold}) + helper.append_op( + type='nms', + inputs={'Boxes': boxes}, + outputs={'KeepBoxesIdxs': out}, + attrs={'iou_threshold': iou_threshold}, + ) return out if scores is None: return _nms(boxes, iou_threshold) import paddle + if category_idxs is None: sorted_global_indices = paddle.argsort(scores, descending=True) - sorted_keep_boxes_indices = _nms(boxes[sorted_global_indices], - iou_threshold) + sorted_keep_boxes_indices = _nms( + boxes[sorted_global_indices], iou_threshold + ) return sorted_global_indices[sorted_keep_boxes_indices] if top_k is not None: - assert top_k <= scores.shape[ - 0], "top_k should be smaller equal than the number of boxes" - assert categories is not None, "if category_idxs is given, categories which is a list of unique id of all categories is necessary" + assert ( + top_k <= scores.shape[0] + ), "top_k should be smaller equal than the number of boxes" + assert ( + categories is not None + ), "if category_idxs is given, categories which is a list of unique id of all categories is necessary" mask = paddle.zeros_like(scores, dtype=paddle.int32) for category_id in categories: cur_category_boxes_idxs = paddle.where(category_idxs == category_id)[0] shape = cur_category_boxes_idxs.shape[0] - cur_category_boxes_idxs = paddle.reshape(cur_category_boxes_idxs, - [shape]) + cur_category_boxes_idxs = paddle.reshape( + cur_category_boxes_idxs, [shape] + ) if shape == 0: continue elif shape == 1: @@ -1617,27 +1823,33 @@ def nms(boxes, continue cur_category_boxes = boxes[cur_category_boxes_idxs] cur_category_scores = scores[cur_category_boxes_idxs] - cur_category_sorted_indices = paddle.argsort(cur_category_scores, - descending=True) + cur_category_sorted_indices = paddle.argsort( + cur_category_scores, descending=True + ) cur_category_sorted_boxes = cur_category_boxes[ - cur_category_sorted_indices] + cur_category_sorted_indices + ] - cur_category_keep_boxes_sub_idxs = cur_category_sorted_indices[_nms( - cur_category_sorted_boxes, iou_threshold)] + cur_category_keep_boxes_sub_idxs = cur_category_sorted_indices[ + _nms(cur_category_sorted_boxes, iou_threshold) + ] updates = paddle.ones_like( cur_category_boxes_idxs[cur_category_keep_boxes_sub_idxs], - dtype=paddle.int32) + dtype=paddle.int32, + ) mask = paddle.scatter( mask, cur_category_boxes_idxs[cur_category_keep_boxes_sub_idxs], updates, - overwrite=True) + overwrite=True, + ) keep_boxes_idxs = paddle.where(mask)[0] shape = keep_boxes_idxs.shape[0] keep_boxes_idxs = paddle.reshape(keep_boxes_idxs, [shape]) - sorted_sub_indices = paddle.argsort(scores[keep_boxes_idxs], - descending=True) + sorted_sub_indices = paddle.argsort( + scores[keep_boxes_idxs], descending=True + ) if top_k is None: return keep_boxes_idxs[sorted_sub_indices] @@ -1650,19 +1862,21 @@ def nms(boxes, return keep_boxes_idxs[sorted_sub_indices][:top_k] -def generate_proposals(scores, - bbox_deltas, - img_size, - anchors, - variances, - pre_nms_top_n=6000, - post_nms_top_n=1000, - nms_thresh=0.5, - min_size=0.1, - eta=1.0, - pixel_offset=False, - return_rois_num=False, - name=None): +def generate_proposals( + scores, + bbox_deltas, + img_size, + anchors, + variances, + pre_nms_top_n=6000, + post_nms_top_n=1000, + nms_thresh=0.5, + min_size=0.1, + eta=1.0, + pixel_offset=False, + return_rois_num=False, + name=None, +): """ This operation proposes RoIs according to each box with their probability to be a foreground object. And @@ -1733,40 +1947,74 @@ def generate_proposals(scores, """ if in_dygraph_mode(): - assert return_rois_num, "return_rois_num should be True in dygraph mode." - attrs = (pre_nms_top_n, post_nms_top_n, nms_thresh, min_size, eta, - pixel_offset) + assert ( + return_rois_num + ), "return_rois_num should be True in dygraph mode." + attrs = ( + pre_nms_top_n, + post_nms_top_n, + nms_thresh, + min_size, + eta, + pixel_offset, + ) rpn_rois, rpn_roi_probs, rpn_rois_num = _C_ops.generate_proposals( - scores, bbox_deltas, img_size, anchors, variances, *attrs) + scores, bbox_deltas, img_size, anchors, variances, *attrs + ) return rpn_rois, rpn_roi_probs, rpn_rois_num elif _non_static_mode(): - assert return_rois_num, "return_rois_num should be True in dygraph mode." - attrs = ('pre_nms_topN', pre_nms_top_n, 'post_nms_topN', post_nms_top_n, - 'nms_thresh', nms_thresh, 'min_size', min_size, 'eta', eta, - 'pixel_offset', pixel_offset) - rpn_rois, rpn_roi_probs, rpn_rois_num = _legacy_C_ops.generate_proposals_v2( - scores, bbox_deltas, img_size, anchors, variances, *attrs) + assert ( + return_rois_num + ), "return_rois_num should be True in dygraph mode." + attrs = ( + 'pre_nms_topN', + pre_nms_top_n, + 'post_nms_topN', + post_nms_top_n, + 'nms_thresh', + nms_thresh, + 'min_size', + min_size, + 'eta', + eta, + 'pixel_offset', + pixel_offset, + ) + ( + rpn_rois, + rpn_roi_probs, + rpn_rois_num, + ) = _legacy_C_ops.generate_proposals_v2( + scores, bbox_deltas, img_size, anchors, variances, *attrs + ) return rpn_rois, rpn_roi_probs, rpn_rois_num helper = LayerHelper('generate_proposals_v2', **locals()) - check_variable_and_dtype(scores, 'scores', ['float32'], - 'generate_proposals_v2') - check_variable_and_dtype(bbox_deltas, 'bbox_deltas', ['float32'], - 'generate_proposals_v2') - check_variable_and_dtype(img_size, 'img_size', ['float32', 'float64'], - 'generate_proposals_v2') - check_variable_and_dtype(anchors, 'anchors', ['float32'], - 'generate_proposals_v2') - check_variable_and_dtype(variances, 'variances', ['float32'], - 'generate_proposals_v2') + check_variable_and_dtype( + scores, 'scores', ['float32'], 'generate_proposals_v2' + ) + check_variable_and_dtype( + bbox_deltas, 'bbox_deltas', ['float32'], 'generate_proposals_v2' + ) + check_variable_and_dtype( + img_size, 'img_size', ['float32', 'float64'], 'generate_proposals_v2' + ) + check_variable_and_dtype( + anchors, 'anchors', ['float32'], 'generate_proposals_v2' + ) + check_variable_and_dtype( + variances, 'variances', ['float32'], 'generate_proposals_v2' + ) rpn_rois = helper.create_variable_for_type_inference( - dtype=bbox_deltas.dtype) + dtype=bbox_deltas.dtype + ) rpn_roi_probs = helper.create_variable_for_type_inference( - dtype=scores.dtype) + dtype=scores.dtype + ) outputs = { 'RpnRois': rpn_rois, 'RpnRoiProbs': rpn_roi_probs, @@ -1776,23 +2024,25 @@ def generate_proposals(scores, rpn_rois_num.stop_gradient = True outputs['RpnRoisNum'] = rpn_rois_num - helper.append_op(type="generate_proposals_v2", - inputs={ - 'Scores': scores, - 'BboxDeltas': bbox_deltas, - 'ImShape': img_size, - 'Anchors': anchors, - 'Variances': variances - }, - attrs={ - 'pre_nms_topN': pre_nms_top_n, - 'post_nms_topN': post_nms_top_n, - 'nms_thresh': nms_thresh, - 'min_size': min_size, - 'eta': eta, - 'pixel_offset': pixel_offset - }, - outputs=outputs) + helper.append_op( + type="generate_proposals_v2", + inputs={ + 'Scores': scores, + 'BboxDeltas': bbox_deltas, + 'ImShape': img_size, + 'Anchors': anchors, + 'Variances': variances, + }, + attrs={ + 'pre_nms_topN': pre_nms_top_n, + 'post_nms_topN': post_nms_top_n, + 'nms_thresh': nms_thresh, + 'min_size': min_size, + 'eta': eta, + 'pixel_offset': pixel_offset, + }, + outputs=outputs, + ) rpn_rois.stop_gradient = True rpn_roi_probs.stop_gradient = True if not return_rois_num: @@ -1801,19 +2051,21 @@ def generate_proposals(scores, return rpn_rois, rpn_roi_probs, rpn_rois_num -def matrix_nms(bboxes, - scores, - score_threshold, - post_threshold, - nms_top_k, - keep_top_k, - use_gaussian=False, - gaussian_sigma=2., - background_label=0, - normalized=True, - return_index=False, - return_rois_num=True, - name=None): +def matrix_nms( + bboxes, + scores, + score_threshold, + post_threshold, + nms_top_k, + keep_top_k, + use_gaussian=False, + gaussian_sigma=2.0, + background_label=0, + normalized=True, + return_index=False, + return_rois_num=True, + name=None, +): """ This operator does matrix non maximum suppression (NMS). First selects a subset of candidate bounding boxes that have higher scores @@ -1876,10 +2128,12 @@ def matrix_nms(bboxes, score_threshold=0.5, post_threshold=0.1, nms_top_k=400, keep_top_k=200, normalized=False) """ - check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'], - 'matrix_nms') - check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'], - 'matrix_nms') + check_variable_and_dtype( + bboxes, 'BBoxes', ['float32', 'float64'], 'matrix_nms' + ) + check_variable_and_dtype( + scores, 'Scores', ['float32', 'float64'], 'matrix_nms' + ) check_type(score_threshold, 'score_threshold', float, 'matrix_nms') check_type(post_threshold, 'post_threshold', float, 'matrix_nms') check_type(nms_top_k, 'nums_top_k', int, 'matrix_nms') @@ -1890,22 +2144,42 @@ def matrix_nms(bboxes, check_type(background_label, 'background_label', int, 'matrix_nms') if in_dygraph_mode(): - out, index, rois_num = _C_ops.matrix_nms(bboxes, scores, - score_threshold, nms_top_k, - keep_top_k, post_threshold, - use_gaussian, gaussian_sigma, - background_label, normalized) + out, index, rois_num = _C_ops.matrix_nms( + bboxes, + scores, + score_threshold, + nms_top_k, + keep_top_k, + post_threshold, + use_gaussian, + gaussian_sigma, + background_label, + normalized, + ) if not return_index: index = None if not return_rois_num: rois_num = None return out, rois_num, index elif _in_legacy_dygraph(): - attrs = ('background_label', background_label, 'score_threshold', - score_threshold, 'post_threshold', post_threshold, 'nms_top_k', - nms_top_k, 'gaussian_sigma', gaussian_sigma, 'use_gaussian', - use_gaussian, 'keep_top_k', keep_top_k, 'normalized', - normalized) + attrs = ( + 'background_label', + background_label, + 'score_threshold', + score_threshold, + 'post_threshold', + post_threshold, + 'nms_top_k', + nms_top_k, + 'gaussian_sigma', + gaussian_sigma, + 'use_gaussian', + use_gaussian, + 'keep_top_k', + keep_top_k, + 'normalized', + normalized, + ) out, index, rois_num = _legacy_C_ops.matrix_nms(bboxes, scores, *attrs) if not return_index: index = None @@ -1921,22 +2195,21 @@ def matrix_nms(bboxes, rois_num = helper.create_variable_for_type_inference(dtype='int32') outputs['RoisNum'] = rois_num - helper.append_op(type="matrix_nms", - inputs={ - 'BBoxes': bboxes, - 'Scores': scores - }, - attrs={ - 'background_label': background_label, - 'score_threshold': score_threshold, - 'post_threshold': post_threshold, - 'nms_top_k': nms_top_k, - 'gaussian_sigma': gaussian_sigma, - 'use_gaussian': use_gaussian, - 'keep_top_k': keep_top_k, - 'normalized': normalized - }, - outputs=outputs) + helper.append_op( + type="matrix_nms", + inputs={'BBoxes': bboxes, 'Scores': scores}, + attrs={ + 'background_label': background_label, + 'score_threshold': score_threshold, + 'post_threshold': post_threshold, + 'nms_top_k': nms_top_k, + 'gaussian_sigma': gaussian_sigma, + 'use_gaussian': use_gaussian, + 'keep_top_k': keep_top_k, + 'normalized': normalized, + }, + outputs=outputs, + ) output.stop_gradient = True if not return_index: diff --git a/python/paddle/vision/transforms/__init__.py b/python/paddle/vision/transforms/__init__.py index d615598bf2bccb2b00d4d08ead2285a64ba7e092..890e4b8982714d5dcbed57cebc63ea84147b1a5d 100644 --- a/python/paddle/vision/transforms/__init__.py +++ b/python/paddle/vision/transforms/__init__.py @@ -51,7 +51,7 @@ from .functional import adjust_hue # noqa: F401 from .functional import normalize # noqa: F401 from .functional import erase # noqa: F401 -__all__ = [ #noqa +__all__ = [ # noqa 'BaseTransform', 'Compose', 'Resize', diff --git a/python/paddle/vision/transforms/functional.py b/python/paddle/vision/transforms/functional.py index 99d27a60ae3b0261dbd36dbd33559456a9af7b1c..45e50712c9e75d4138fc7c0ceba5d6b65c56911b 100644 --- a/python/paddle/vision/transforms/functional.py +++ b/python/paddle/vision/transforms/functional.py @@ -66,11 +66,14 @@ def to_tensor(pic, data_format='CHW'): print(tensor.shape) """ - if not (_is_pil_image(pic) or _is_numpy_image(pic) - or _is_tensor_image(pic)): + if not ( + _is_pil_image(pic) or _is_numpy_image(pic) or _is_tensor_image(pic) + ): raise TypeError( - 'pic should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}' - .format(type(pic))) + 'pic should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.format( + type(pic) + ) + ) if _is_pil_image(pic): return F_pil.to_tensor(pic, data_format) @@ -124,11 +127,14 @@ def resize(img, size, interpolation='bilinear'): print(converted_img.size) # (150, 200) """ - if not (_is_pil_image(img) or _is_numpy_image(img) - or _is_tensor_image(img)): + if not ( + _is_pil_image(img) or _is_numpy_image(img) or _is_tensor_image(img) + ): raise TypeError( - 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}' - .format(type(img))) + 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.format( + type(img) + ) + ) if _is_pil_image(img): return F_pil.resize(img, size, interpolation) @@ -188,11 +194,14 @@ def pad(img, padding, fill=0, padding_mode='constant'): padded_img = F.pad(fake_img, padding=(2, 1)) print(padded_img.size) """ - if not (_is_pil_image(img) or _is_numpy_image(img) - or _is_tensor_image(img)): + if not ( + _is_pil_image(img) or _is_numpy_image(img) or _is_tensor_image(img) + ): raise TypeError( - 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}' - .format(type(img))) + 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.format( + type(img) + ) + ) if _is_pil_image(img): return F_pil.pad(img, padding, fill, padding_mode) @@ -231,11 +240,14 @@ def crop(img, top, left, height, width): print(cropped_img.size) """ - if not (_is_pil_image(img) or _is_numpy_image(img) - or _is_tensor_image(img)): + if not ( + _is_pil_image(img) or _is_numpy_image(img) or _is_tensor_image(img) + ): raise TypeError( - 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}' - .format(type(img))) + 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.format( + type(img) + ) + ) if _is_pil_image(img): return F_pil.crop(img, top, left, height, width) @@ -248,33 +260,36 @@ def crop(img, top, left, height, width): def center_crop(img, output_size): """Crops the given Image and resize it to desired size. - Args: - img (PIL.Image|np.array): Image to be cropped. (0,0) denotes the top left corner of the image. - output_size (sequence or int): (height, width) of the crop box. If int, - it is used for both directions + Args: + img (PIL.Image|np.array): Image to be cropped. (0,0) denotes the top left corner of the image. + output_size (sequence or int): (height, width) of the crop box. If int, + it is used for both directions - Returns: - PIL.Image or np.array: Cropped image. + Returns: + PIL.Image or np.array: Cropped image. - Examples: - .. code-block:: python + Examples: + .. code-block:: python - import numpy as np - from PIL import Image - from paddle.vision.transforms import functional as F + import numpy as np + from PIL import Image + from paddle.vision.transforms import functional as F - fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8') + fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8') - fake_img = Image.fromarray(fake_img) + fake_img = Image.fromarray(fake_img) - cropped_img = F.center_crop(fake_img, (150, 100)) - print(cropped_img.size) - """ - if not (_is_pil_image(img) or _is_numpy_image(img) - or _is_tensor_image(img)): + cropped_img = F.center_crop(fake_img, (150, 100)) + print(cropped_img.size) + """ + if not ( + _is_pil_image(img) or _is_numpy_image(img) or _is_tensor_image(img) + ): raise TypeError( - 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}' - .format(type(img))) + 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.format( + type(img) + ) + ) if _is_pil_image(img): return F_pil.center_crop(img, output_size) @@ -308,11 +323,14 @@ def hflip(img): print(flpped_img.size) """ - if not (_is_pil_image(img) or _is_numpy_image(img) - or _is_tensor_image(img)): + if not ( + _is_pil_image(img) or _is_numpy_image(img) or _is_tensor_image(img) + ): raise TypeError( - 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}' - .format(type(img))) + 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.format( + type(img) + ) + ) if _is_pil_image(img): return F_pil.hflip(img) @@ -346,11 +364,14 @@ def vflip(img): print(flpped_img.size) """ - if not (_is_pil_image(img) or _is_numpy_image(img) - or _is_tensor_image(img)): + if not ( + _is_pil_image(img) or _is_numpy_image(img) or _is_tensor_image(img) + ): raise TypeError( - 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}' - .format(type(img))) + 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.format( + type(img) + ) + ) if _is_pil_image(img): return F_pil.vflip(img) @@ -391,11 +412,14 @@ def adjust_brightness(img, brightness_factor): """ - if not (_is_pil_image(img) or _is_numpy_image(img) - or _is_tensor_image(img)): + if not ( + _is_pil_image(img) or _is_numpy_image(img) or _is_tensor_image(img) + ): raise TypeError( - 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}' - .format(type(img))) + 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.format( + type(img) + ) + ) if _is_pil_image(img): return F_pil.adjust_brightness(img, brightness_factor) @@ -431,11 +455,14 @@ def adjust_contrast(img, contrast_factor): converted_img = F.adjust_contrast(fake_img, 0.4) print(converted_img.size) """ - if not (_is_pil_image(img) or _is_numpy_image(img) - or _is_tensor_image(img)): + if not ( + _is_pil_image(img) or _is_numpy_image(img) or _is_tensor_image(img) + ): raise TypeError( - 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}' - .format(type(img))) + 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.format( + type(img) + ) + ) if _is_pil_image(img): return F_pil.adjust_contrast(img, contrast_factor) @@ -472,11 +499,14 @@ def adjust_saturation(img, saturation_factor): print(converted_img.size) """ - if not (_is_pil_image(img) or _is_numpy_image(img) - or _is_tensor_image(img)): + if not ( + _is_pil_image(img) or _is_numpy_image(img) or _is_tensor_image(img) + ): raise TypeError( - 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}' - .format(type(img))) + 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.format( + type(img) + ) + ) if _is_pil_image(img): return F_pil.adjust_saturation(img, saturation_factor) @@ -522,11 +552,14 @@ def adjust_hue(img, hue_factor): print(converted_img.size) """ - if not (_is_pil_image(img) or _is_numpy_image(img) - or _is_tensor_image(img)): + if not ( + _is_pil_image(img) or _is_numpy_image(img) or _is_tensor_image(img) + ): raise TypeError( - 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}' - .format(type(img))) + 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.format( + type(img) + ) + ) if _is_pil_image(img): return F_pil.adjust_hue(img, hue_factor) @@ -567,14 +600,16 @@ def _get_affine_matrix(center, angle, translate, scale, shear): return matrix -def affine(img, - angle, - translate, - scale, - shear, - interpolation="nearest", - fill=0, - center=None): +def affine( + img, + angle, + translate, + scale, + shear, + interpolation="nearest", + fill=0, + center=None, +): """Apply affine transformation on the image. Args: @@ -615,11 +650,14 @@ def affine(img, print(affined_img.shape) """ - if not (_is_pil_image(img) or _is_numpy_image(img) - or _is_tensor_image(img)): + if not ( + _is_pil_image(img) or _is_numpy_image(img) or _is_tensor_image(img) + ): raise TypeError( - 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}' - .format(type(img))) + 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.format( + type(img) + ) + ) if not isinstance(angle, (int, float)): raise TypeError("Argument angle should be int or float") @@ -635,7 +673,8 @@ def affine(img, if not isinstance(shear, (numbers.Number, (list, tuple))): raise TypeError( - "Shear should be either a single value or a sequence of two values") + "Shear should be either a single value or a sequence of two values" + ) if not isinstance(interpolation, str): raise TypeError("Argument interpolation should be a string") @@ -657,7 +696,8 @@ def affine(img, if len(shear) != 2: raise ValueError( - f"Shear should be a sequence containing two values. Got {shear}") + f"Shear should be a sequence containing two values. Got {shear}" + ) if center is not None and not isinstance(center, (list, tuple)): raise TypeError("Argument center should be a sequence") @@ -680,8 +720,9 @@ def affine(img, # otherwise image rotated by 90 degrees is shifted vs output image of F_t.affine if center is None: center = (width * 0.5, height * 0.5) - return F_cv2.affine(img, angle, translate, scale, shear, interpolation, - fill, center) + return F_cv2.affine( + img, angle, translate, scale, shear, interpolation, fill, center + ) if _is_tensor_image(img): center_f = [0.0, 0.0] @@ -696,12 +737,9 @@ def affine(img, return F_t.affine(img, matrix, interpolation, fill) -def rotate(img, - angle, - interpolation="nearest", - expand=False, - center=None, - fill=0): +def rotate( + img, angle, interpolation="nearest", expand=False, center=None, fill=0 +): """Rotates the image by angle. @@ -747,11 +785,14 @@ def rotate(img, print(rotated_img.size) """ - if not (_is_pil_image(img) or _is_numpy_image(img) - or _is_tensor_image(img)): + if not ( + _is_pil_image(img) or _is_numpy_image(img) or _is_tensor_image(img) + ): raise TypeError( - 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}' - .format(type(img))) + 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.format( + type(img) + ) + ) if isinstance(center, list): center = tuple(center) @@ -784,10 +825,24 @@ def _get_perspective_coeffs(startpoints, endpoints): for i, (p1, p2) in enumerate(zip(endpoints, startpoints)): a_matrix[2 * i, :] = [ - p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1] + p1[0], + p1[1], + 1, + 0, + 0, + 0, + -p2[0] * p1[0], + -p2[0] * p1[1], ] a_matrix[2 * i + 1, :] = [ - 0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1] + 0, + 0, + 0, + p1[0], + p1[1], + 1, + -p2[1] * p1[0], + -p2[1] * p1[1], ] b_matrix = np.array(startpoints).reshape([8]) @@ -838,11 +893,14 @@ def perspective(img, startpoints, endpoints, interpolation='nearest', fill=0): print(perspectived_img.shape) """ - if not (_is_pil_image(img) or _is_numpy_image(img) - or _is_tensor_image(img)): + if not ( + _is_pil_image(img) or _is_numpy_image(img) or _is_tensor_image(img) + ): raise TypeError( - 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}' - .format(type(img))) + 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.format( + type(img) + ) + ) if _is_pil_image(img): coeffs = _get_perspective_coeffs(startpoints, endpoints) @@ -851,8 +909,9 @@ def perspective(img, startpoints, endpoints, interpolation='nearest', fill=0): coeffs = _get_perspective_coeffs(startpoints, endpoints) return F_t.perspective(img, coeffs, interpolation, fill) else: - return F_cv2.perspective(img, startpoints, endpoints, interpolation, - fill) + return F_cv2.perspective( + img, startpoints, endpoints, interpolation, fill + ) def to_grayscale(img, num_output_channels=1): @@ -882,11 +941,14 @@ def to_grayscale(img, num_output_channels=1): print(gray_img.size) """ - if not (_is_pil_image(img) or _is_numpy_image(img) - or _is_tensor_image(img)): + if not ( + _is_pil_image(img) or _is_numpy_image(img) or _is_tensor_image(img) + ): raise TypeError( - 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}' - .format(type(img))) + 'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.format( + type(img) + ) + ) if _is_pil_image(img): return F_pil.to_grayscale(img, num_output_channels) @@ -942,53 +1004,53 @@ def normalize(img, mean, std, data_format='CHW', to_rgb=False): def erase(img, i, j, h, w, v, inplace=False): """Erase the pixels of selected area in input image with given value. - Args: - img (paddle.Tensor | np.array | PIL.Image): input Tensor image. - For Tensor input, the shape should be (C, H, W). For np.array input, - the shape should be (H, W, C). - i (int): y coordinate of the top-left point of erased region. - j (int): x coordinate of the top-left point of erased region. - h (int): Height of the erased region. - w (int): Width of the erased region. - v (paddle.Tensor | np.array): value used to replace the pixels in erased region. It - should be np.array when img is np.array or PIL.Image. - inplace (bool, optional): Whether this transform is inplace. Default: False. - - Returns: - paddle.Tensor | np.array | PIL.Image: Erased image. The type is same with input image. - - Examples: - .. code-block:: python - - import paddle - - fake_img = paddle.randn((3, 2, 4)).astype(paddle.float32) - print(fake_img) - - #Tensor(shape=[3, 2, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True, - # [[[ 0.02169025, -0.97859967, -1.39175487, -1.07478464], - # [ 0.20654772, 1.74624777, 0.32268861, -0.13857445]], - # - # [[-0.14993843, 1.10793507, -0.40056887, -1.94395220], - # [ 0.41686651, 0.44551995, -0.09356714, -0.60898107]], - # - # [[-0.24998808, -1.47699273, -0.88838995, 0.42629015], - # [ 0.56948012, -0.96200180, 0.53355658, 3.20450878]]]) - - values = paddle.zeros((1,1,1), dtype=paddle.float32) - result = paddle.vision.transforms.erase(fake_img, 0, 1, 1, 2, values) - - print(result) - - #Tensor(shape=[3, 2, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True, - # [[[ 0.02169025, 0. , 0. , -1.07478464], - # [ 0.20654772, 1.74624777, 0.32268861, -0.13857445]], - # - # [[-0.14993843, 0. , 0. , -1.94395220], - # [ 0.41686651, 0.44551995, -0.09356714, -0.60898107]], - # - # [[-0.24998808, 0. , 0. , 0.42629015], - # [ 0.56948012, -0.96200180, 0.53355658, 3.20450878]]]) + Args: + img (paddle.Tensor | np.array | PIL.Image): input Tensor image. + For Tensor input, the shape should be (C, H, W). For np.array input, + the shape should be (H, W, C). + i (int): y coordinate of the top-left point of erased region. + j (int): x coordinate of the top-left point of erased region. + h (int): Height of the erased region. + w (int): Width of the erased region. + v (paddle.Tensor | np.array): value used to replace the pixels in erased region. It + should be np.array when img is np.array or PIL.Image. + inplace (bool, optional): Whether this transform is inplace. Default: False. + + Returns: + paddle.Tensor | np.array | PIL.Image: Erased image. The type is same with input image. + + Examples: + .. code-block:: python + + import paddle + + fake_img = paddle.randn((3, 2, 4)).astype(paddle.float32) + print(fake_img) + + #Tensor(shape=[3, 2, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True, + # [[[ 0.02169025, -0.97859967, -1.39175487, -1.07478464], + # [ 0.20654772, 1.74624777, 0.32268861, -0.13857445]], + # + # [[-0.14993843, 1.10793507, -0.40056887, -1.94395220], + # [ 0.41686651, 0.44551995, -0.09356714, -0.60898107]], + # + # [[-0.24998808, -1.47699273, -0.88838995, 0.42629015], + # [ 0.56948012, -0.96200180, 0.53355658, 3.20450878]]]) + + values = paddle.zeros((1,1,1), dtype=paddle.float32) + result = paddle.vision.transforms.erase(fake_img, 0, 1, 1, 2, values) + + print(result) + + #Tensor(shape=[3, 2, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True, + # [[[ 0.02169025, 0. , 0. , -1.07478464], + # [ 0.20654772, 1.74624777, 0.32268861, -0.13857445]], + # + # [[-0.14993843, 0. , 0. , -1.94395220], + # [ 0.41686651, 0.44551995, -0.09356714, -0.60898107]], + # + # [[-0.24998808, 0. , 0. , 0.42629015], + # [ 0.56948012, -0.96200180, 0.53355658, 3.20450878]]]) """ if _is_tensor_image(img): diff --git a/python/paddle/vision/transforms/functional_cv2.py b/python/paddle/vision/transforms/functional_cv2.py index ccd638089af7be21c649298a4a0c942b42b27c6c..52609b786fe1f5376e0f2d2117d2451996aa104a 100644 --- a/python/paddle/vision/transforms/functional_cv2.py +++ b/python/paddle/vision/transforms/functional_cv2.py @@ -49,7 +49,8 @@ def to_tensor(pic, data_format='CHW'): if data_format not in ['CHW', 'HWC']: raise ValueError( - 'data_format should be CHW or HWC. Got {}'.format(data_format)) + 'data_format should be CHW or HWC. Got {}'.format(data_format) + ) if pic.ndim == 2: pic = pic[:, :, None] @@ -60,7 +61,7 @@ def to_tensor(pic, data_format='CHW'): img = paddle.to_tensor(pic) if paddle.fluid.data_feeder.convert_dtype(img.dtype) == 'uint8': - return paddle.cast(img, np.float32) / 255. + return paddle.cast(img, np.float32) / 255.0 else: return img @@ -90,11 +91,12 @@ def resize(img, size, interpolation='bilinear'): 'bilinear': cv2.INTER_LINEAR, 'area': cv2.INTER_AREA, 'bicubic': cv2.INTER_CUBIC, - 'lanczos': cv2.INTER_LANCZOS4 + 'lanczos': cv2.INTER_LANCZOS4, } - if not (isinstance(size, int) or - (isinstance(size, Iterable) and len(size) == 2)): + if not ( + isinstance(size, int) or (isinstance(size, Iterable) and len(size) == 2) + ): raise TypeError('Got inappropriate size arg: {}'.format(size)) h, w = img.shape[:2] @@ -108,18 +110,22 @@ def resize(img, size, interpolation='bilinear'): output = cv2.resize( img, dsize=(ow, oh), - interpolation=_cv2_interp_from_str[interpolation]) + interpolation=_cv2_interp_from_str[interpolation], + ) else: oh = size ow = int(size * w / h) output = cv2.resize( img, dsize=(ow, oh), - interpolation=_cv2_interp_from_str[interpolation]) + interpolation=_cv2_interp_from_str[interpolation], + ) else: - output = cv2.resize(img, - dsize=(size[1], size[0]), - interpolation=_cv2_interp_from_str[interpolation]) + output = cv2.resize( + img, + dsize=(size[1], size[0]), + interpolation=_cv2_interp_from_str[interpolation], + ) if len(img.shape) == 3 and img.shape[2] == 1: return output[:, :, np.newaxis] else: @@ -165,7 +171,7 @@ def pad(img, padding, fill=0, padding_mode='constant'): 'constant': cv2.BORDER_CONSTANT, 'edge': cv2.BORDER_REPLICATE, 'reflect': cv2.BORDER_REFLECT_101, - 'symmetric': cv2.BORDER_REFLECT + 'symmetric': cv2.BORDER_REFLECT, } if not isinstance(padding, (numbers.Number, list, tuple)): @@ -177,11 +183,16 @@ def pad(img, padding, fill=0, padding_mode='constant'): if isinstance(padding, Sequence) and len(padding) not in [2, 4]: raise ValueError( - "Padding must be an int or a 2, or 4 element tuple, not a " + - "{} element tuple".format(len(padding))) + "Padding must be an int or a 2, or 4 element tuple, not a " + + "{} element tuple".format(len(padding)) + ) - assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'], \ - 'Padding mode should be either constant, edge, reflect or symmetric' + assert padding_mode in [ + 'constant', + 'edge', + 'reflect', + 'symmetric', + ], 'Padding mode should be either constant, edge, reflect or symmetric' if isinstance(padding, list): padding = tuple(padding) @@ -197,21 +208,25 @@ def pad(img, padding, fill=0, padding_mode='constant'): pad_bottom = padding[3] if len(img.shape) == 3 and img.shape[2] == 1: - return cv2.copyMakeBorder(img, - top=pad_top, - bottom=pad_bottom, - left=pad_left, - right=pad_right, - borderType=_cv2_pad_from_str[padding_mode], - value=fill)[:, :, np.newaxis] + return cv2.copyMakeBorder( + img, + top=pad_top, + bottom=pad_bottom, + left=pad_left, + right=pad_right, + borderType=_cv2_pad_from_str[padding_mode], + value=fill, + )[:, :, np.newaxis] else: - return cv2.copyMakeBorder(img, - top=pad_top, - bottom=pad_bottom, - left=pad_left, - right=pad_right, - borderType=_cv2_pad_from_str[padding_mode], - value=fill) + return cv2.copyMakeBorder( + img, + top=pad_top, + bottom=pad_bottom, + left=pad_left, + right=pad_right, + borderType=_cv2_pad_from_str[padding_mode], + value=fill, + ) def crop(img, top, left, height, width): @@ -230,30 +245,30 @@ def crop(img, top, left, height, width): """ - return img[top:top + height, left:left + width, :] + return img[top : top + height, left : left + width, :] def center_crop(img, output_size): """Crops the given image and resize it to desired size. - Args: - img (np.array): Image to be cropped. (0,0) denotes the top left corner of the image. - output_size (sequence or int): (height, width) of the crop box. If int, - it is used for both directions - backend (str, optional): The image proccess backend type. Options are `pil`, `cv2`. Default: 'pil'. + Args: + img (np.array): Image to be cropped. (0,0) denotes the top left corner of the image. + output_size (sequence or int): (height, width) of the crop box. If int, + it is used for both directions + backend (str, optional): The image proccess backend type. Options are `pil`, `cv2`. Default: 'pil'. - Returns: - np.array: Cropped image. + Returns: + np.array: Cropped image. - """ + """ if isinstance(output_size, numbers.Number): output_size = (int(output_size), int(output_size)) h, w = img.shape[0:2] th, tw = output_size - i = int(round((h - th) / 2.)) - j = int(round((w - tw) / 2.)) + i = int(round((h - th) / 2.0)) + j = int(round((w - tw) / 2.0)) return crop(img, i, j, th, tw) @@ -305,8 +320,11 @@ def adjust_brightness(img, brightness_factor): """ cv2 = try_import('cv2') - table = np.array([i * brightness_factor - for i in range(0, 256)]).clip(0, 255).astype('uint8') + table = ( + np.array([i * brightness_factor for i in range(0, 256)]) + .clip(0, 255) + .astype('uint8') + ) if len(img.shape) == 3 and img.shape[2] == 1: return cv2.LUT(img, table)[:, :, np.newaxis] @@ -329,8 +347,11 @@ def adjust_contrast(img, contrast_factor): """ cv2 = try_import('cv2') - table = np.array([(i - 74) * contrast_factor + 74 - for i in range(0, 256)]).clip(0, 255).astype('uint8') + table = ( + np.array([(i - 74) * contrast_factor + 74 for i in range(0, 256)]) + .clip(0, 255) + .astype('uint8') + ) if len(img.shape) == 3 and img.shape[2] == 1: return cv2.LUT(img, table)[:, :, np.newaxis] else: @@ -354,8 +375,9 @@ def adjust_saturation(img, saturation_factor): dtype = img.dtype img = img.astype(np.float32) - alpha = np.random.uniform(max(0, 1 - saturation_factor), - 1 + saturation_factor) + alpha = np.random.uniform( + max(0, 1 - saturation_factor), 1 + saturation_factor + ) gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) gray_img = gray_img[..., np.newaxis] img = img * alpha + gray_img * (1 - alpha) @@ -388,7 +410,8 @@ def adjust_hue(img, hue_factor): if not (-0.5 <= hue_factor <= 0.5): raise ValueError( - 'hue_factor:{} is not in [-0.5, 0.5].'.format(hue_factor)) + 'hue_factor:{} is not in [-0.5, 0.5].'.format(hue_factor) + ) dtype = img.dtype img = img.astype(np.uint8) @@ -404,14 +427,16 @@ def adjust_hue(img, hue_factor): return cv2.cvtColor(hsv_img, cv2.COLOR_HSV2BGR_FULL).astype(dtype) -def affine(img, - angle, - translate, - scale, - shear, - interpolation='nearest', - fill=0, - center=None): +def affine( + img, + angle, + translate, + scale, + shear, + interpolation='nearest', + fill=0, + center=None, +): """Affine the image by matrix. Args: @@ -442,7 +467,7 @@ def affine(img, 'bilinear': cv2.INTER_LINEAR, 'area': cv2.INTER_AREA, 'bicubic': cv2.INTER_CUBIC, - 'lanczos': cv2.INTER_LANCZOS4 + 'lanczos': cv2.INTER_LANCZOS4, } h, w = img.shape[0:2] @@ -469,25 +494,26 @@ def affine(img, M[1, 2] = ty if len(img.shape) == 3 and img.shape[2] == 1: - return cv2.warpAffine(img, - M, - dsize=(w, h), - flags=_cv2_interp_from_str[interpolation], - borderValue=fill)[:, :, np.newaxis] + return cv2.warpAffine( + img, + M, + dsize=(w, h), + flags=_cv2_interp_from_str[interpolation], + borderValue=fill, + )[:, :, np.newaxis] else: - return cv2.warpAffine(img, - M, - dsize=(w, h), - flags=_cv2_interp_from_str[interpolation], - borderValue=fill) - - -def rotate(img, - angle, - interpolation='nearest', - expand=False, - center=None, - fill=0): + return cv2.warpAffine( + img, + M, + dsize=(w, h), + flags=_cv2_interp_from_str[interpolation], + borderValue=fill, + ) + + +def rotate( + img, angle, interpolation='nearest', expand=False, center=None, fill=0 +): """Rotates the image by angle. Args: @@ -519,7 +545,7 @@ def rotate(img, 'bilinear': cv2.INTER_LINEAR, 'area': cv2.INTER_AREA, 'bicubic': cv2.INTER_CUBIC, - 'lanczos': cv2.INTER_LANCZOS4 + 'lanczos': cv2.INTER_LANCZOS4, } h, w = img.shape[0:2] @@ -549,8 +575,10 @@ def rotate(img, post_trans = (0, 0) expand_matrix[2], expand_matrix[5] = transform( - -center[0] - post_trans[0], -center[1] - post_trans[1], - expand_matrix) + -center[0] - post_trans[0], + -center[1] - post_trans[1], + expand_matrix, + ) expand_matrix[2] += center[0] expand_matrix[5] += center[1] @@ -567,15 +595,21 @@ def rotate(img, w, h = int(nw), int(nh) if len(img.shape) == 3 and img.shape[2] == 1: - return cv2.warpAffine(img, - M, (w, h), - flags=_cv2_interp_from_str[interpolation], - borderValue=fill)[:, :, np.newaxis] + return cv2.warpAffine( + img, + M, + (w, h), + flags=_cv2_interp_from_str[interpolation], + borderValue=fill, + )[:, :, np.newaxis] else: - return cv2.warpAffine(img, - M, (w, h), - flags=_cv2_interp_from_str[interpolation], - borderValue=fill) + return cv2.warpAffine( + img, + M, + (w, h), + flags=_cv2_interp_from_str[interpolation], + borderValue=fill, + ) def perspective(img, startpoints, endpoints, interpolation='nearest', fill=0): @@ -604,7 +638,7 @@ def perspective(img, startpoints, endpoints, interpolation='nearest', fill=0): 'bilinear': cv2.INTER_LINEAR, 'area': cv2.INTER_AREA, 'bicubic': cv2.INTER_CUBIC, - 'lanczos': cv2.INTER_LANCZOS4 + 'lanczos': cv2.INTER_LANCZOS4, } h, w = img.shape[0:2] @@ -613,17 +647,21 @@ def perspective(img, startpoints, endpoints, interpolation='nearest', fill=0): matrix = cv2.getPerspectiveTransform(startpoints, endpoints) if len(img.shape) == 3 and img.shape[2] == 1: - return cv2.warpPerspective(img, - matrix, - dsize=(w, h), - flags=_cv2_interp_from_str[interpolation], - borderValue=fill)[:, :, np.newaxis] + return cv2.warpPerspective( + img, + matrix, + dsize=(w, h), + flags=_cv2_interp_from_str[interpolation], + borderValue=fill, + )[:, :, np.newaxis] else: - return cv2.warpPerspective(img, - matrix, - dsize=(w, h), - flags=_cv2_interp_from_str[interpolation], - borderValue=fill) + return cv2.warpPerspective( + img, + matrix, + dsize=(w, h), + flags=_cv2_interp_from_str[interpolation], + borderValue=fill, + ) def to_grayscale(img, num_output_channels=1): @@ -646,7 +684,8 @@ def to_grayscale(img, num_output_channels=1): elif num_output_channels == 3: # much faster than doing cvtColor to go back to gray img = np.broadcast_to( - cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)[:, :, np.newaxis], img.shape) + cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)[:, :, np.newaxis], img.shape + ) else: raise ValueError('num_output_channels should be either 1 or 3') @@ -686,21 +725,21 @@ def normalize(img, mean, std, data_format='CHW', to_rgb=False): def erase(img, i, j, h, w, v, inplace=False): """Erase the pixels of selected area in input image array with given value. - Args: - img (np.array): input image array, which shape is (H, W, C). - i (int): y coordinate of the top-left point of erased region. - j (int): x coordinate of the top-left point of erased region. - h (int): Height of the erased region. - w (int): Width of the erased region. - v (np.array): value used to replace the pixels in erased region. - inplace (bool, optional): Whether this transform is inplace. Default: False. + Args: + img (np.array): input image array, which shape is (H, W, C). + i (int): y coordinate of the top-left point of erased region. + j (int): x coordinate of the top-left point of erased region. + h (int): Height of the erased region. + w (int): Width of the erased region. + v (np.array): value used to replace the pixels in erased region. + inplace (bool, optional): Whether this transform is inplace. Default: False. - Returns: - np.array: Erased image. + Returns: + np.array: Erased image. """ if not inplace: img = img.copy() - img[i:i + h, j:j + w, ...] = v + img[i : i + h, j : j + w, ...] = v return img diff --git a/python/paddle/vision/transforms/functional_pil.py b/python/paddle/vision/transforms/functional_pil.py index cd4556effb7d953f86a2aae2bec2c586a29cb9fd..595d92a84b11b0c388cbd5ccc748f63a906f2e01 100644 --- a/python/paddle/vision/transforms/functional_pil.py +++ b/python/paddle/vision/transforms/functional_pil.py @@ -35,7 +35,7 @@ try: 'bicubic': Image.Resampling.BICUBIC, 'box': Image.Resampling.BOX, 'lanczos': Image.Resampling.LANCZOS, - 'hamming': Image.Resampling.HAMMING + 'hamming': Image.Resampling.HAMMING, } except: _pil_interp_from_str = { @@ -44,7 +44,7 @@ except: 'bicubic': Image.BICUBIC, 'box': Image.BOX, 'lanczos': Image.LANCZOS, - 'hamming': Image.HAMMING + 'hamming': Image.HAMMING, } __all__ = [] @@ -67,7 +67,8 @@ def to_tensor(pic, data_format='CHW'): if data_format not in ['CHW', 'HWC']: raise ValueError( - 'data_format should be CHW or HWC. Got {}'.format(data_format)) + 'data_format should be CHW or HWC. Got {}'.format(data_format) + ) # PIL Image if pic.mode == 'I': @@ -91,7 +92,7 @@ def to_tensor(pic, data_format='CHW'): dtype = paddle.fluid.data_feeder.convert_dtype(img.dtype) if dtype == 'uint8': - img = paddle.cast(img, np.float32) / 255. + img = paddle.cast(img, np.float32) / 255.0 img = img.reshape([pic.size[1], pic.size[0], nchannel]) @@ -122,8 +123,9 @@ def resize(img, size, interpolation='bilinear'): """ - if not (isinstance(size, int) or - (isinstance(size, Iterable) and len(size) == 2)): + if not ( + isinstance(size, int) or (isinstance(size, Iterable) and len(size) == 2) + ): raise TypeError('Got inappropriate size arg: {}'.format(size)) if isinstance(size, int): @@ -186,11 +188,16 @@ def pad(img, padding, fill=0, padding_mode='constant'): if isinstance(padding, Sequence) and len(padding) not in [2, 4]: raise ValueError( - "Padding must be an int or a 2, or 4 element tuple, not a " + - "{} element tuple".format(len(padding))) + "Padding must be an int or a 2, or 4 element tuple, not a " + + "{} element tuple".format(len(padding)) + ) - assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'], \ - 'Padding mode should be either constant, edge, reflect or symmetric' + assert padding_mode in [ + 'constant', + 'edge', + 'reflect', + 'symmetric', + ], 'Padding mode should be either constant, edge, reflect or symmetric' if isinstance(padding, list): padding = tuple(padding) @@ -217,8 +224,11 @@ def pad(img, padding, fill=0, padding_mode='constant'): if img.mode == 'P': palette = img.getpalette() img = np.asarray(img) - img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), - padding_mode) + img = np.pad( + img, + ((pad_top, pad_bottom), (pad_left, pad_right)), + padding_mode, + ) img = Image.fromarray(img) img.putpalette(palette) return img @@ -226,13 +236,18 @@ def pad(img, padding, fill=0, padding_mode='constant'): img = np.asarray(img) # RGB image if len(img.shape) == 3: - img = np.pad(img, - ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), - padding_mode) + img = np.pad( + img, + ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), + padding_mode, + ) # Grayscale image if len(img.shape) == 2: - img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), - padding_mode) + img = np.pad( + img, + ((pad_top, pad_bottom), (pad_left, pad_right)), + padding_mode, + ) return Image.fromarray(img) @@ -258,24 +273,24 @@ def crop(img, top, left, height, width): def center_crop(img, output_size): """Crops the given PIL Image and resize it to desired size. - Args: - img (PIL.Image): Image to be cropped. (0,0) denotes the top left corner of the image. - output_size (sequence or int): (height, width) of the crop box. If int, - it is used for both directions - backend (str, optional): The image proccess backend type. Options are `pil`, `cv2`. Default: 'pil'. + Args: + img (PIL.Image): Image to be cropped. (0,0) denotes the top left corner of the image. + output_size (sequence or int): (height, width) of the crop box. If int, + it is used for both directions + backend (str, optional): The image proccess backend type. Options are `pil`, `cv2`. Default: 'pil'. - Returns: - PIL.Image: Cropped image. + Returns: + PIL.Image: Cropped image. - """ + """ if isinstance(output_size, numbers.Number): output_size = (int(output_size), int(output_size)) image_width, image_height = img.size crop_height, crop_width = output_size - crop_top = int(round((image_height - crop_height) / 2.)) - crop_left = int(round((image_width - crop_width) / 2.)) + crop_top = int(round((image_height - crop_height) / 2.0)) + crop_left = int(round((image_width - crop_width) / 2.0)) return crop(img, crop_top, crop_left, crop_height, crop_width) @@ -388,7 +403,8 @@ def adjust_hue(img, hue_factor): """ if not (-0.5 <= hue_factor <= 0.5): raise ValueError( - 'hue_factor:{} is not in [-0.5, 0.5].'.format(hue_factor)) + 'hue_factor:{} is not in [-0.5, 0.5].'.format(hue_factor) + ) input_mode = img.mode if input_mode in {'L', '1', 'I', 'F'}: @@ -428,16 +444,18 @@ def affine(img, matrix, interpolation="nearest", fill=0): if isinstance(fill, int): fill = tuple([fill] * 3) - return img.transform(img.size, Image.AFFINE, matrix, - _pil_interp_from_str[interpolation], fill) + return img.transform( + img.size, + Image.AFFINE, + matrix, + _pil_interp_from_str[interpolation], + fill, + ) -def rotate(img, - angle, - interpolation="nearest", - expand=False, - center=None, - fill=0): +def rotate( + img, angle, interpolation="nearest", expand=False, center=None, fill=0 +): """Rotates the image by angle. Args: @@ -467,11 +485,13 @@ def rotate(img, if isinstance(fill, int): fill = tuple([fill] * 3) - return img.rotate(angle, - _pil_interp_from_str[interpolation], - expand, - center, - fillcolor=fill) + return img.rotate( + angle, + _pil_interp_from_str[interpolation], + expand, + center, + fillcolor=fill, + ) def perspective(img, coeffs, interpolation="nearest", fill=0): @@ -497,8 +517,13 @@ def perspective(img, coeffs, interpolation="nearest", fill=0): if isinstance(fill, int): fill = tuple([fill] * 3) - return img.transform(img.size, Image.PERSPECTIVE, coeffs, - _pil_interp_from_str[interpolation], fill) + return img.transform( + img.size, + Image.PERSPECTIVE, + coeffs, + _pil_interp_from_str[interpolation], + fill, + ) def to_grayscale(img, num_output_channels=1): @@ -532,22 +557,22 @@ def to_grayscale(img, num_output_channels=1): def erase(img, i, j, h, w, v, inplace=False): """Erase the pixels of selected area in input image with given value. PIL format is - not support inplace. + not support inplace. - Args: - img (PIL.Image): input image, which shape is (C, H, W). - i (int): y coordinate of the top-left point of erased region. - j (int): x coordinate of the top-left point of erased region. - h (int): Height of the erased region. - w (int): Width of the erased region. - v (np.array): value used to replace the pixels in erased region. - inplace (bool, optional): Whether this transform is inplace. Default: False. + Args: + img (PIL.Image): input image, which shape is (C, H, W). + i (int): y coordinate of the top-left point of erased region. + j (int): x coordinate of the top-left point of erased region. + h (int): Height of the erased region. + w (int): Width of the erased region. + v (np.array): value used to replace the pixels in erased region. + inplace (bool, optional): Whether this transform is inplace. Default: False. - Returns: - PIL.Image: Erased image. + Returns: + PIL.Image: Erased image. """ np_img = np.array(img, dtype=np.uint8) - np_img[i:i + h, j:j + w, ...] = v + np_img[i : i + h, j : j + w, ...] = v img = Image.fromarray(np_img, 'RGB') return img diff --git a/python/paddle/vision/transforms/functional_tensor.py b/python/paddle/vision/transforms/functional_tensor.py index 911f7e2d260bd62cf8280c2ceb85cbe9d07c7341..d18fdfc51c9c1f3ee21a9fc1a850c202038d8949 100644 --- a/python/paddle/vision/transforms/functional_tensor.py +++ b/python/paddle/vision/transforms/functional_tensor.py @@ -22,13 +22,17 @@ __all__ = [] def _assert_image_tensor(img, data_format): - if not isinstance( - img, paddle.Tensor - ) or img.ndim < 3 or img.ndim > 4 or not data_format.lower() in ('chw', - 'hwc'): + if ( + not isinstance(img, paddle.Tensor) + or img.ndim < 3 + or img.ndim > 4 + or not data_format.lower() in ('chw', 'hwc') + ): raise RuntimeError( - 'not support [type={}, ndim={}, data_format={}] paddle image'. - format(type(img), img.ndim, data_format)) + 'not support [type={}, ndim={}, data_format={}] paddle image'.format( + type(img), img.ndim, data_format + ) + ) def _get_image_h_axis(data_format): @@ -78,13 +82,15 @@ def _get_image_num_channels(img, data_format): def _get_image_size(img, data_format): - return img.shape[_get_image_w_axis(data_format)], img.shape[ - _get_image_h_axis(data_format)] + return ( + img.shape[_get_image_w_axis(data_format)], + img.shape[_get_image_h_axis(data_format)], + ) def _rgb_to_hsv(img): """Convert a image Tensor from RGB to HSV. This implementation is based on Pillow ( - https://github.com/python-pillow/Pillow/blob/main/src/libImaging/Convert.c) + https://github.com/python-pillow/Pillow/blob/main/src/libImaging/Convert.c) """ maxc = img.max(axis=-3) minc = img.min(axis=-3) @@ -111,8 +117,7 @@ def _rgb_to_hsv(img): def _hsv_to_rgb(img): - """Convert a image Tensor from HSV to RGB. - """ + """Convert a image Tensor from HSV to RGB.""" h, s, v = img.unbind(axis=-3) f = h * 6.0 i = paddle.floor(f) @@ -123,22 +128,28 @@ def _hsv_to_rgb(img): q = paddle.clip(v * (1.0 - s * f), 0.0, 1.0) t = paddle.clip(v * (1.0 - s * (1.0 - f)), 0.0, 1.0) - mask = paddle.equal(i.unsqueeze(axis=-3), - paddle.arange(6, dtype=i.dtype).reshape( - (-1, 1, 1))).astype(img.dtype) - matrix = paddle.stack([ - paddle.stack([v, q, p, p, t, v], axis=-3), - paddle.stack([t, v, v, q, p, p], axis=-3), - paddle.stack([p, p, t, v, v, q], axis=-3) - ], - axis=-4) + mask = paddle.equal( + i.unsqueeze(axis=-3), + paddle.arange(6, dtype=i.dtype).reshape((-1, 1, 1)), + ).astype(img.dtype) + matrix = paddle.stack( + [ + paddle.stack([v, q, p, p, t, v], axis=-3), + paddle.stack([t, v, v, q, p, p], axis=-3), + paddle.stack([p, p, t, v, v, q], axis=-3), + ], + axis=-4, + ) return paddle.einsum("...ijk, ...xijk -> ...xjk", mask, matrix) def _blend_images(img1, img2, ratio): max_value = 1.0 if paddle.is_floating_point(img1) else 255.0 - return paddle.lerp(img2, img1, - float(ratio)).clip(0, max_value).astype(img1.dtype) + return ( + paddle.lerp(img2, img1, float(ratio)) + .clip(0, max_value) + .astype(img1.dtype) + ) def normalize(img, mean, std, data_format='CHW'): @@ -186,8 +197,9 @@ def to_grayscale(img, num_output_channels=1, data_format='CHW'): if num_output_channels not in (1, 3): raise ValueError('num_output_channels should be either 1 or 3') - rgb_weights = paddle.to_tensor([0.2989, 0.5870, 0.1140], - place=img.place).astype(img.dtype) + rgb_weights = paddle.to_tensor( + [0.2989, 0.5870, 0.1140], place=img.place + ).astype(img.dtype) if _is_channel_first(data_format): rgb_weights = rgb_weights.reshape((-1, 1, 1)) @@ -210,8 +222,9 @@ def _affine_grid(theta, w, h, ow, oh): y_grid = paddle.linspace(-oh * 0.5 + d, oh * 0.5 + d - 1, oh).unsqueeze_(-1) base_grid[..., 1] = y_grid - scaled_theta = theta.transpose( - (0, 2, 1)) / paddle.to_tensor([0.5 * w, 0.5 * h]) + scaled_theta = theta.transpose((0, 2, 1)) / paddle.to_tensor( + [0.5 * w, 0.5 * h] + ) output_grid = base_grid.reshape((1, oh * ow, 3)).bmm(scaled_theta) return output_grid.reshape((1, oh, ow, 2)) @@ -220,18 +233,18 @@ def _affine_grid(theta, w, h, ow, oh): def _grid_transform(img, grid, mode, fill): if img.shape[0] > 1: grid = grid.expand( - shape=[img.shape[0], grid.shape[1], grid.shape[2], grid.shape[3]]) + shape=[img.shape[0], grid.shape[1], grid.shape[2], grid.shape[3]] + ) if fill is not None: - dummy = paddle.ones((img.shape[0], 1, img.shape[2], img.shape[3]), - dtype=img.dtype) + dummy = paddle.ones( + (img.shape[0], 1, img.shape[2], img.shape[3]), dtype=img.dtype + ) img = paddle.concat((img, dummy), axis=1) - img = F.grid_sample(img, - grid, - mode=mode, - padding_mode="zeros", - align_corners=False) + img = F.grid_sample( + img, grid, mode=mode, padding_mode="zeros", align_corners=False + ) # Fill with required color if fill is not None: @@ -239,12 +252,13 @@ def _grid_transform(img, grid, mode, fill): img = img[:, :-1, :, :] # n c h w mask = mask.expand_as(img) len_fill = len(fill) if isinstance(fill, (tuple, list)) else 1 - fill_img = paddle.to_tensor(fill).reshape( - (1, len_fill, 1, 1)).expand_as(img) + fill_img = ( + paddle.to_tensor(fill).reshape((1, len_fill, 1, 1)).expand_as(img) + ) if mode == 'nearest': mask = paddle.cast(mask < 0.5, img.dtype) - img = img * (1. - mask) + mask * fill_img + img = img * (1.0 - mask) + mask * fill_img else: # 'bilinear' img = img * mask + (1.0 - mask) * fill_img @@ -282,11 +296,9 @@ def affine(img, matrix, interpolation="nearest", fill=None, data_format='CHW'): matrix = matrix.reshape((1, 2, 3)) shape = img.shape - grid = _affine_grid(matrix, - w=shape[-1], - h=shape[-2], - ow=shape[-1], - oh=shape[-2]) + grid = _affine_grid( + matrix, w=shape[-1], h=shape[-2], ow=shape[-1], oh=shape[-2] + ) if isinstance(fill, int): fill = tuple([fill] * 3) @@ -299,13 +311,15 @@ def affine(img, matrix, interpolation="nearest", fill=None, data_format='CHW'): return out -def rotate(img, - angle, - interpolation='nearest', - expand=False, - center=None, - fill=None, - data_format='CHW'): +def rotate( + img, + angle, + interpolation='nearest', + expand=False, + center=None, + fill=None, + data_format='CHW', +): """Rotates the image by angle. Args: @@ -358,9 +372,11 @@ def rotate(img, ] matrix[2] += matrix[0] * (-rotn_center[0] - post_trans[0]) + matrix[1] * ( - -rotn_center[1] - post_trans[1]) + -rotn_center[1] - post_trans[1] + ) matrix[5] += matrix[3] * (-rotn_center[0] - post_trans[0]) + matrix[4] * ( - -rotn_center[1] - post_trans[1]) + -rotn_center[1] - post_trans[1] + ) matrix[2] += rotn_center[0] matrix[5] += rotn_center[1] @@ -371,12 +387,20 @@ def rotate(img, if expand: # calculate output size corners = paddle.to_tensor( - [[-0.5 * w, -0.5 * h, 1.0], [-0.5 * w, 0.5 * h, 1.0], - [0.5 * w, 0.5 * h, 1.0], [0.5 * w, -0.5 * h, 1.0]], - place=matrix.place).astype(matrix.dtype) - - _pos = corners.reshape((1, -1, 3)).bmm(matrix.transpose( - (0, 2, 1))).reshape((1, -1, 2)) + [ + [-0.5 * w, -0.5 * h, 1.0], + [-0.5 * w, 0.5 * h, 1.0], + [0.5 * w, 0.5 * h, 1.0], + [0.5 * w, -0.5 * h, 1.0], + ], + place=matrix.place, + ).astype(matrix.dtype) + + _pos = ( + corners.reshape((1, -1, 3)) + .bmm(matrix.transpose((0, 2, 1))) + .reshape((1, -1, 2)) + ) _min = _pos.min(axis=-2).floor() _max = _pos.max(axis=-2).ceil() @@ -412,21 +436,21 @@ def _perspective_grid(img, coeffs, ow, oh, dtype): y_grid = paddle.linspace(d, oh * 1.0 + d - 1.0, oh).unsqueeze_(-1) base_grid[..., 1] = y_grid - scaled_theta1 = theta1.transpose( - (0, 2, 1)) / paddle.to_tensor([0.5 * ow, 0.5 * oh]) + scaled_theta1 = theta1.transpose((0, 2, 1)) / paddle.to_tensor( + [0.5 * ow, 0.5 * oh] + ) output_grid1 = base_grid.reshape((1, oh * ow, 3)).bmm(scaled_theta1) - output_grid2 = base_grid.reshape( - (1, oh * ow, 3)).bmm(theta2.transpose((0, 2, 1))) + output_grid2 = base_grid.reshape((1, oh * ow, 3)).bmm( + theta2.transpose((0, 2, 1)) + ) output_grid = output_grid1 / output_grid2 - 1.0 return output_grid.reshape((1, oh, ow, 2)) -def perspective(img, - coeffs, - interpolation="nearest", - fill=None, - data_format='CHW'): +def perspective( + img, coeffs, interpolation="nearest", fill=None, data_format='CHW' +): """Perspective the image. Args: @@ -521,48 +545,48 @@ def crop(img, top, left, height, width, data_format='CHW'): _assert_image_tensor(img, data_format) if _is_channel_first(data_format): - return img[:, top:top + height, left:left + width] + return img[:, top : top + height, left : left + width] else: - return img[top:top + height, left:left + width, :] + return img[top : top + height, left : left + width, :] def erase(img, i, j, h, w, v, inplace=False): """Erase the pixels of selected area in input Tensor image with given value. - Args: - img (paddle.Tensor): input Tensor image. - i (int): y coordinate of the top-left point of erased region. - j (int): x coordinate of the top-left point of erased region. - h (int): Height of the erased region. - w (int): Width of the erased region. - v (paddle.Tensor): value used to replace the pixels in erased region. - inplace (bool, optional): Whether this transform is inplace. Default: False. + Args: + img (paddle.Tensor): input Tensor image. + i (int): y coordinate of the top-left point of erased region. + j (int): x coordinate of the top-left point of erased region. + h (int): Height of the erased region. + w (int): Width of the erased region. + v (paddle.Tensor): value used to replace the pixels in erased region. + inplace (bool, optional): Whether this transform is inplace. Default: False. - Returns: - paddle.Tensor: Erased image. + Returns: + paddle.Tensor: Erased image. """ _assert_image_tensor(img, 'CHW') if not inplace: img = img.clone() - img[..., i:i + h, j:j + w] = v + img[..., i : i + h, j : j + w] = v return img def center_crop(img, output_size, data_format='CHW'): """Crops the given paddle.Tensor Image and resize it to desired size. - Args: - img (paddle.Tensor): Image to be cropped. (0,0) denotes the top left corner of the image. - output_size (sequence or int): (height, width) of the crop box. If int, - it is used for both directions - data_format (str, optional): Data format of img, should be 'HWC' or - 'CHW'. Default: 'CHW'. - Returns: - paddle.Tensor: Cropped image. + Args: + img (paddle.Tensor): Image to be cropped. (0,0) denotes the top left corner of the image. + output_size (sequence or int): (height, width) of the crop box. If int, + it is used for both directions + data_format (str, optional): Data format of img, should be 'HWC' or + 'CHW'. Default: 'CHW'. + Returns: + paddle.Tensor: Cropped image. - """ + """ _assert_image_tensor(img, data_format) if isinstance(output_size, numbers.Number): @@ -570,14 +594,16 @@ def center_crop(img, output_size, data_format='CHW'): image_width, image_height = _get_image_size(img, data_format) crop_height, crop_width = output_size - crop_top = int(round((image_height - crop_height) / 2.)) - crop_left = int(round((image_width - crop_width) / 2.)) - return crop(img, - crop_top, - crop_left, - crop_height, - crop_width, - data_format=data_format) + crop_top = int(round((image_height - crop_height) / 2.0)) + crop_left = int(round((image_width - crop_width) / 2.0)) + return crop( + img, + crop_top, + crop_left, + crop_height, + crop_width, + data_format=data_format, + ) def pad(img, padding, fill=0, padding_mode='constant', data_format='CHW'): @@ -625,11 +651,16 @@ def pad(img, padding, fill=0, padding_mode='constant', data_format='CHW'): if isinstance(padding, (list, tuple)) and len(padding) not in [2, 4]: raise ValueError( - "Padding must be an int or a 2, or 4 element tuple, not a " + - "{} element tuple".format(len(padding))) + "Padding must be an int or a 2, or 4 element tuple, not a " + + "{} element tuple".format(len(padding)) + ) - assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'], \ - 'Padding mode should be either constant, edge, reflect or symmetric' + assert padding_mode in [ + 'constant', + 'edge', + 'reflect', + 'symmetric', + ], 'Padding mode should be either constant, edge, reflect or symmetric' if isinstance(padding, int): pad_left = pad_right = pad_top = pad_bottom = padding @@ -651,11 +682,13 @@ def pad(img, padding, fill=0, padding_mode='constant', data_format='CHW'): img = img.unsqueeze(0) # 'constant', 'reflect', 'replicate', 'circular' - img = F.pad(img, - pad=padding, - mode=padding_mode, - value=float(fill), - data_format='N' + data_format) + img = F.pad( + img, + pad=padding, + mode=padding_mode, + value=float(fill), + data_format='N' + data_format, + ) return img.squeeze(0) @@ -684,8 +717,10 @@ def resize(img, size, interpolation='bilinear', data_format='CHW'): """ _assert_image_tensor(img, data_format) - if not (isinstance(size, int) or - (isinstance(size, (tuple, list)) and len(size) == 2)): + if not ( + isinstance(size, int) + or (isinstance(size, (tuple, list)) and len(size) == 2) + ): raise TypeError('Got inappropriate size arg: {}'.format(size)) if isinstance(size, int): @@ -702,10 +737,12 @@ def resize(img, size, interpolation='bilinear', data_format='CHW'): oh, ow = size img = img.unsqueeze(0) - img = F.interpolate(img, - size=(oh, ow), - mode=interpolation.lower(), - data_format='N' + data_format.upper()) + img = F.interpolate( + img, + size=(oh, ow), + mode=interpolation.lower(), + data_format='N' + data_format.upper(), + ) return img.squeeze(0) @@ -725,8 +762,10 @@ def adjust_brightness(img, brightness_factor): """ _assert_image_tensor(img, 'CHW') assert brightness_factor >= 0, "brightness_factor should be non-negative." - assert _get_image_num_channels( - img, 'CHW') in [1, 3], "channels of input should be either 1 or 3." + assert _get_image_num_channels(img, 'CHW') in [ + 1, + 3, + ], "channels of input should be either 1 or 3." extreme_target = paddle.zeros_like(img, img.dtype) return _blend_images(img, extreme_target, brightness_factor) @@ -751,13 +790,13 @@ def adjust_contrast(img, contrast_factor): channels = _get_image_num_channels(img, 'CHW') dtype = img.dtype if paddle.is_floating_point(img) else paddle.float32 if channels == 1: - extreme_target = paddle.mean(img.astype(dtype), - axis=(-3, -2, -1), - keepdim=True) + extreme_target = paddle.mean( + img.astype(dtype), axis=(-3, -2, -1), keepdim=True + ) elif channels == 3: - extreme_target = paddle.mean(to_grayscale(img).astype(dtype), - axis=(-3, -2, -1), - keepdim=True) + extreme_target = paddle.mean( + to_grayscale(img).astype(dtype), axis=(-3, -2, -1), keepdim=True + ) else: raise ValueError("channels of input should be either 1 or 3.") @@ -813,7 +852,9 @@ def adjust_hue(img, hue_factor): """ _assert_image_tensor(img, 'CHW') - assert hue_factor >= -0.5 and hue_factor <= 0.5, "hue_factor should be in range [-0.5, 0.5]" + assert ( + hue_factor >= -0.5 and hue_factor <= 0.5 + ), "hue_factor should be in range [-0.5, 0.5]" channels = _get_image_num_channels(img, 'CHW') if channels == 1: return img @@ -824,7 +865,7 @@ def adjust_hue(img, hue_factor): img_hsv = _rgb_to_hsv(img) h, s, v = img_hsv.unbind(axis=-3) - h = (h + hue_factor) + h = h + hue_factor h = h - h.floor() img_adjusted = _hsv_to_rgb(paddle.stack([h, s, v], axis=-3)) diff --git a/python/paddle/vision/transforms/transforms.py b/python/paddle/vision/transforms/transforms.py index 5472df2fd9e0beeaec8d81e6002c385c64c9c318..37270c97b2874817c74a0e4fcffe6e4a77bc12ea 100644 --- a/python/paddle/vision/transforms/transforms.py +++ b/python/paddle/vision/transforms/transforms.py @@ -46,33 +46,38 @@ def _get_image_size(img): return img.shape[2:][::-1] # nchw -> wh else: raise ValueError( - "The dim for input Tensor should be 3-D or 4-D, but received {}" - .format(len(img.shape))) + "The dim for input Tensor should be 3-D or 4-D, but received {}".format( + len(img.shape) + ) + ) else: raise TypeError("Unexpected type {}".format(type(img))) -def _check_input(value, - name, - center=1, - bound=(0, float('inf')), - clip_first_on_zero=True): +def _check_input( + value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True +): if isinstance(value, numbers.Number): if value < 0: raise ValueError( "If {} is a single number, it must be non negative.".format( - name)) + name + ) + ) value = [center - value, center + value] if clip_first_on_zero: value[0] = max(value[0], 0) elif isinstance(value, (tuple, list)) and len(value) == 2: if not bound[0] <= value[0] <= value[1] <= bound[1]: - raise ValueError("{} values should be between {}".format( - name, bound)) + raise ValueError( + "{} values should be between {}".format(name, bound) + ) else: raise TypeError( - "{} should be a single number or a list/tuple with lenght 2.". - format(name)) + "{} should be a single number or a list/tuple with lenght 2.".format( + name + ) + ) if value[0] == value[1] == center: value = None @@ -116,8 +121,10 @@ class Compose(object): data = f(data) except Exception as e: stack_info = traceback.format_exc() - print("fail to perform transform [{}] with error: " - "{} and stack:\n{}".format(f, e, str(stack_info))) + print( + "fail to perform transform [{}] with error: " + "{} and stack:\n{}".format(f, e, str(stack_info)) + ) raise e return data @@ -240,14 +247,16 @@ class BaseTransform(object): def __init__(self, keys=None): if keys is None: - keys = ("image", ) + keys = ("image",) elif not isinstance(keys, Sequence): raise ValueError( - "keys should be a sequence, but got keys={}".format(keys)) + "keys should be a sequence, but got keys={}".format(keys) + ) for k in keys: if self._get_apply(k) is None: raise NotImplementedError( - "{} is unsupported data structure".format(k)) + "{} is unsupported data structure".format(k) + ) self.keys = keys # storage some params get from function get_params() @@ -259,7 +268,7 @@ class BaseTransform(object): def __call__(self, inputs): """Apply transform on single input data""" if not isinstance(inputs, tuple): - inputs = (inputs, ) + inputs = (inputs,) self.params = self._get_params(inputs) @@ -271,7 +280,7 @@ class BaseTransform(object): else: outputs.append(apply_func(inputs[i])) if len(inputs) > len(self.keys): - outputs.extend(inputs[len(self.keys):]) + outputs.extend(inputs[len(self.keys) :]) if len(outputs) == 1: outputs = outputs[0] @@ -413,8 +422,9 @@ class Resize(BaseTransform): def __init__(self, size, interpolation='bilinear', keys=None): super(Resize, self).__init__(keys) - assert isinstance(size, int) or (isinstance(size, Iterable) - and len(size) == 2) + assert isinstance(size, int) or ( + isinstance(size, Iterable) and len(size) == 2 + ) self.size = size self.interpolation = interpolation @@ -473,19 +483,21 @@ class RandomResizedCrop(BaseTransform): """ - def __init__(self, - size, - scale=(0.08, 1.0), - ratio=(3. / 4, 4. / 3), - interpolation='bilinear', - keys=None): + def __init__( + self, + size, + scale=(0.08, 1.0), + ratio=(3.0 / 4, 4.0 / 3), + interpolation='bilinear', + keys=None, + ): super(RandomResizedCrop, self).__init__(keys) if isinstance(size, int): self.size = (size, size) else: self.size = size - assert (scale[0] <= scale[1]), "scale should be of kind (min, max)" - assert (ratio[0] <= ratio[1]), "ratio should be of kind (min, max)" + assert scale[0] <= scale[1], "scale should be of kind (min, max)" + assert ratio[0] <= ratio[1], "ratio should be of kind (min, max)" self.scale = scale self.ratio = ratio self.interpolation = interpolation @@ -696,12 +708,9 @@ class Normalize(BaseTransform): """ - def __init__(self, - mean=0.0, - std=1.0, - data_format='CHW', - to_rgb=False, - keys=None): + def __init__( + self, mean=0.0, std=1.0, data_format='CHW', to_rgb=False, keys=None + ): super(Normalize, self).__init__(keys) if isinstance(mean, numbers.Number): mean = [mean, mean, mean] @@ -715,8 +724,9 @@ class Normalize(BaseTransform): self.to_rgb = to_rgb def _apply_image(self, img): - return F.normalize(img, self.mean, self.std, self.data_format, - self.to_rgb) + return F.normalize( + img, self.mean, self.std, self.data_format, self.to_rgb + ) class Transpose(BaseTransform): @@ -934,11 +944,9 @@ class HueTransform(BaseTransform): def __init__(self, value, keys=None): super(HueTransform, self).__init__(keys) - self.value = _check_input(value, - 'hue', - center=0, - bound=(-0.5, 0.5), - clip_first_on_zero=False) + self.value = _check_input( + value, 'hue', center=0, bound=(-0.5, 0.5), clip_first_on_zero=False + ) def _apply_image(self, img): if self.value is None: @@ -985,12 +993,9 @@ class ColorJitter(BaseTransform): """ - def __init__(self, - brightness=0, - contrast=0, - saturation=0, - hue=0, - keys=None): + def __init__( + self, brightness=0, contrast=0, saturation=0, hue=0, keys=None + ): super(ColorJitter, self).__init__(keys) self.brightness = brightness self.contrast = contrast @@ -1033,8 +1038,9 @@ class ColorJitter(BaseTransform): Returns: PIL Image: Color jittered image. """ - transform = self._get_param(self.brightness, self.contrast, - self.saturation, self.hue) + transform = self._get_param( + self.brightness, self.contrast, self.saturation, self.hue + ) return transform(img) @@ -1093,13 +1099,15 @@ class RandomCrop(BaseTransform): print(crop_img.shape) # [3, 224, 224] """ - def __init__(self, - size, - padding=None, - pad_if_needed=False, - fill=0, - padding_mode='constant', - keys=None): + def __init__( + self, + size, + padding=None, + pad_if_needed=False, + fill=0, + padding_mode='constant', + keys=None, + ): super(RandomCrop, self).__init__(keys) if isinstance(size, numbers.Number): self.size = (int(size), int(size)) @@ -1144,12 +1152,14 @@ class RandomCrop(BaseTransform): # pad the width if needed if self.pad_if_needed and w < self.size[1]: - img = F.pad(img, (self.size[1] - w, 0), self.fill, - self.padding_mode) + img = F.pad( + img, (self.size[1] - w, 0), self.fill, self.padding_mode + ) # pad the height if needed if self.pad_if_needed and h < self.size[0]: - img = F.pad(img, (0, self.size[0] - h), self.fill, - self.padding_mode) + img = F.pad( + img, (0, self.size[0] - h), self.fill, self.padding_mode + ) i, j, h, w = self._get_param(img, self.size) @@ -1214,8 +1224,9 @@ class Pad(BaseTransform): if isinstance(padding, Sequence) and len(padding) not in [2, 4]: raise ValueError( - "Padding must be an int or a 2, or 4 element tuple, not a " + - "{} element tuple".format(len(padding))) + "Padding must be an int or a 2, or 4 element tuple, not a " + + "{} element tuple".format(len(padding)) + ) super(Pad, self).__init__(keys) self.padding = padding @@ -1234,19 +1245,23 @@ class Pad(BaseTransform): def _check_sequence_input(x, name, req_sizes): - msg = req_sizes[0] if len(req_sizes) < 2 else " or ".join( - [str(s) for s in req_sizes]) + msg = ( + req_sizes[0] + if len(req_sizes) < 2 + else " or ".join([str(s) for s in req_sizes]) + ) if not isinstance(x, Sequence): raise TypeError(f"{name} should be a sequence of length {msg}.") if len(x) not in req_sizes: raise ValueError(f"{name} should be sequence of length {msg}.") -def _setup_angle(x, name, req_sizes=(2, )): +def _setup_angle(x, name, req_sizes=(2,)): if isinstance(x, numbers.Number): if x < 0: raise ValueError( - f"If {name} is a single number, it must be positive.") + f"If {name} is a single number, it must be positive." + ) x = [-x, x] else: _check_sequence_input(x, name, req_sizes) @@ -1312,31 +1327,34 @@ class RandomAffine(BaseTransform): print(fake_img.shape) """ - def __init__(self, - degrees, - translate=None, - scale=None, - shear=None, - interpolation='nearest', - fill=0, - center=None, - keys=None): - self.degrees = _setup_angle(degrees, name="degrees", req_sizes=(2, )) + def __init__( + self, + degrees, + translate=None, + scale=None, + shear=None, + interpolation='nearest', + fill=0, + center=None, + keys=None, + ): + self.degrees = _setup_angle(degrees, name="degrees", req_sizes=(2,)) super(RandomAffine, self).__init__(keys) assert interpolation in ['nearest', 'bilinear', 'bicubic'] self.interpolation = interpolation if translate is not None: - _check_sequence_input(translate, "translate", req_sizes=(2, )) + _check_sequence_input(translate, "translate", req_sizes=(2,)) for t in translate: if not (0.0 <= t <= 1.0): raise ValueError( - "translation values should be between 0 and 1") + "translation values should be between 0 and 1" + ) self.translate = translate if scale is not None: - _check_sequence_input(scale, "scale", req_sizes=(2, )) + _check_sequence_input(scale, "scale", req_sizes=(2,)) for s in scale: if s <= 0: raise ValueError("scale values should be positive") @@ -1354,15 +1372,12 @@ class RandomAffine(BaseTransform): self.fill = fill if center is not None: - _check_sequence_input(center, "center", req_sizes=(2, )) + _check_sequence_input(center, "center", req_sizes=(2,)) self.center = center - def _get_param(self, - img_size, - degrees, - translate=None, - scale_ranges=None, - shears=None): + def _get_param( + self, img_size, degrees, translate=None, scale_ranges=None, shears=None + ): """Get parameters for affine transformation Returns: @@ -1405,14 +1420,17 @@ class RandomAffine(BaseTransform): w, h = _get_image_size(img) img_size = [w, h] - ret = self._get_param(img_size, self.degrees, self.translate, - self.scale, self.shear) + ret = self._get_param( + img_size, self.degrees, self.translate, self.scale, self.shear + ) - return F.affine(img, - *ret, - interpolation=self.interpolation, - fill=self.fill, - center=self.center) + return F.affine( + img, + *ret, + interpolation=self.interpolation, + fill=self.fill, + center=self.center, + ) class RandomRotation(BaseTransform): @@ -1464,22 +1482,26 @@ class RandomRotation(BaseTransform): print(fake_img.size) """ - def __init__(self, - degrees, - interpolation='nearest', - expand=False, - center=None, - fill=0, - keys=None): + def __init__( + self, + degrees, + interpolation='nearest', + expand=False, + center=None, + fill=0, + keys=None, + ): if isinstance(degrees, numbers.Number): if degrees < 0: raise ValueError( - "If degrees is a single number, it must be positive.") + "If degrees is a single number, it must be positive." + ) self.degrees = (-degrees, degrees) else: if len(degrees) != 2: raise ValueError( - "If degrees is a sequence, it must be of len 2.") + "If degrees is a sequence, it must be of len 2." + ) self.degrees = degrees super(RandomRotation, self).__init__(keys) @@ -1504,8 +1526,9 @@ class RandomRotation(BaseTransform): angle = self._get_param(self.degrees) - return F.rotate(img, angle, self.interpolation, self.expand, - self.center, self.fill) + return F.rotate( + img, angle, self.interpolation, self.expand, self.center, self.fill + ) class RandomPerspective(BaseTransform): @@ -1553,15 +1576,19 @@ class RandomPerspective(BaseTransform): print(fake_img.shape) """ - def __init__(self, - prob=0.5, - distortion_scale=0.5, - interpolation='nearest', - fill=0, - keys=None): + def __init__( + self, + prob=0.5, + distortion_scale=0.5, + interpolation='nearest', + fill=0, + keys=None, + ): super(RandomPerspective, self).__init__(keys) assert 0 <= prob <= 1, "probability must be between 0 and 1" - assert 0 <= distortion_scale <= 1, "distortion_scale must be between 0 and 1" + assert ( + 0 <= distortion_scale <= 1 + ), "distortion_scale must be between 0 and 1" assert interpolation in ['nearest', 'bilinear', 'bicubic'] assert isinstance(fill, (numbers.Number, str, list, tuple)) @@ -1579,35 +1606,43 @@ class RandomPerspective(BaseTransform): half_height = height // 2 half_width = width // 2 topleft = [ - int(random.uniform(0, - int(distortion_scale * half_width) + 1)), - int(random.uniform(0, - int(distortion_scale * half_height) + 1)), + int(random.uniform(0, int(distortion_scale * half_width) + 1)), + int(random.uniform(0, int(distortion_scale * half_height) + 1)), ] topright = [ int( - random.uniform(width - int(distortion_scale * half_width) - 1, - width)), - int(random.uniform(0, - int(distortion_scale * half_height) + 1)), + random.uniform( + width - int(distortion_scale * half_width) - 1, width + ) + ), + int(random.uniform(0, int(distortion_scale * half_height) + 1)), ] botright = [ int( - random.uniform(width - int(distortion_scale * half_width) - 1, - width)), + random.uniform( + width - int(distortion_scale * half_width) - 1, width + ) + ), int( - random.uniform(height - int(distortion_scale * half_height) - 1, - height)), + random.uniform( + height - int(distortion_scale * half_height) - 1, height + ) + ), ] botleft = [ - int(random.uniform(0, - int(distortion_scale * half_width) + 1)), + int(random.uniform(0, int(distortion_scale * half_width) + 1)), int( - random.uniform(height - int(distortion_scale * half_height) - 1, - height)), + random.uniform( + height - int(distortion_scale * half_height) - 1, height + ) + ), + ] + startpoints = [ + [0, 0], + [width - 1, 0], + [width - 1, height - 1], + [0, height - 1], ] - startpoints = [[0, 0], [width - 1, 0], [width - 1, height - 1], - [0, height - 1]] endpoints = [topleft, topright, botright, botleft] return startpoints, endpoints @@ -1624,10 +1659,12 @@ class RandomPerspective(BaseTransform): width, height = _get_image_size(img) if random.random() < self.prob: - startpoints, endpoints = self.get_params(width, height, - self.distortion_scale) - return F.perspective(img, startpoints, endpoints, - self.interpolation, self.fill) + startpoints, endpoints = self.get_params( + width, height, self.distortion_scale + ) + return F.perspective( + img, startpoints, endpoints, self.interpolation, self.fill + ) return img @@ -1715,27 +1752,34 @@ class RandomErasing(BaseTransform): print(result) """ - def __init__(self, - prob=0.5, - scale=(0.02, 0.33), - ratio=(0.3, 3.3), - value=0, - inplace=False, - keys=None): + def __init__( + self, + prob=0.5, + scale=(0.02, 0.33), + ratio=(0.3, 3.3), + value=0, + inplace=False, + keys=None, + ): super(RandomErasing, self).__init__(keys) - assert isinstance(scale, - (tuple, list)), "scale should be a tuple or list" - assert (scale[0] >= 0 and scale[1] <= 1 and scale[0] <= scale[1] - ), "scale should be of kind (min, max) and in range [0, 1]" - assert isinstance(ratio, - (tuple, list)), "ratio should be a tuple or list" - assert (ratio[0] >= 0 - and ratio[0] <= ratio[1]), "ratio should be of kind (min, max)" - assert (prob >= 0 - and prob <= 1), "The probability should be in range [0, 1]" assert isinstance( - value, (numbers.Number, str, tuple, - list)), "value should be a number, tuple, list or str" + scale, (tuple, list) + ), "scale should be a tuple or list" + assert ( + scale[0] >= 0 and scale[1] <= 1 and scale[0] <= scale[1] + ), "scale should be of kind (min, max) and in range [0, 1]" + assert isinstance( + ratio, (tuple, list) + ), "ratio should be a tuple or list" + assert ( + ratio[0] >= 0 and ratio[0] <= ratio[1] + ), "ratio should be of kind (min, max)" + assert ( + prob >= 0 and prob <= 1 + ), "The probability should be in range [0, 1]" + assert isinstance( + value, (numbers.Number, str, tuple, list) + ), "value should be a number, tuple, list or str" if isinstance(value, str) and value != "random": raise ValueError("value must be 'random' when type is str") @@ -1779,7 +1823,8 @@ class RandomErasing(BaseTransform): if F._is_tensor_image(img): if value is None: v = paddle.normal(shape=[c, erase_h, erase_w]).astype( - img.dtype) + img.dtype + ) else: v = paddle.to_tensor(value, dtype=img.dtype)[:, None, None] else: @@ -1815,6 +1860,7 @@ class RandomErasing(BaseTransform): "Value should be a single number or a sequence with length equals to image's channel." ) top, left, erase_h, erase_w, v = self._get_param( - img, self.scale, self.ratio, value) + img, self.scale, self.ratio, value + ) return F.erase(img, top, left, erase_h, erase_w, v, self.inplace) return img diff --git a/r/example/mobilenet.py b/r/example/mobilenet.py index 96deb0aaf5fd08d04300ec2dc1d857b4f9e53e85..082a1dea7deac9148d65e9d58d042f65f0344535 100755 --- a/r/example/mobilenet.py +++ b/r/example/mobilenet.py @@ -51,7 +51,7 @@ def set_config(): def parse_data(): - """ parse input and output data """ + """parse input and output data""" with open('data/data.txt', 'r') as fr: data = np.array([float(_) for _ in fr.read().split()]) diff --git a/tools/CrossStackProfiler/CspChromeTraceFormatter.py b/tools/CrossStackProfiler/CspChromeTraceFormatter.py index ec5dba54b002feff51c9c7763b56af4a79b3fa96..1fa8efe9880508dbbedca02541337d7f795a8afa 100755 --- a/tools/CrossStackProfiler/CspChromeTraceFormatter.py +++ b/tools/CrossStackProfiler/CspChromeTraceFormatter.py @@ -16,7 +16,6 @@ import json class ChromeTraceFormatter(object): - def __init__(self): self._events = [] self._metadata = [] diff --git a/tools/CrossStackProfiler/CspFileReader.py b/tools/CrossStackProfiler/CspFileReader.py index 0825842fa1a11ba41bbc5f5f1ba60949bacf4986..11dd052283c1564f0cb116787f268bd69f12fce8 100755 --- a/tools/CrossStackProfiler/CspFileReader.py +++ b/tools/CrossStackProfiler/CspFileReader.py @@ -18,6 +18,7 @@ import json import glob import logging from multiprocessing import Lock + """ Some terms to clarify the code in most case, one or more paremeters may be set as input args for a class or a function in form of single variable or k-v dict @@ -42,10 +43,17 @@ dcgmMetricParameterMap = { "02_gpuUtility": [("GPUTL", "GPUTL"), ("GRACT", "GRACT")], "03_smUtility": [("SMACT", "SMACT"), ("SMOCC", "SMOCC")], "04_memUtility": [("FB_USED_RATIO", "FB_USED_RATIO"), ("DRAMA", "DRAMA")], - "05_txUtility": [("NVLTX", "NVLTX"), ("NVLRX", "NVLRX"), ("PCITX", "PCITX"), - ("PCIRX", "PCIRX")], - "06_calUtility": [("FP32A", "FP32A"), ("FP16A", "FP16A"), - ("TENSO", "TENSO")] + "05_txUtility": [ + ("NVLTX", "NVLTX"), + ("NVLRX", "NVLRX"), + ("PCITX", "PCITX"), + ("PCIRX", "PCIRX"), + ], + "06_calUtility": [ + ("FP32A", "FP32A"), + ("FP16A", "FP16A"), + ("TENSO", "TENSO"), + ], } DCGMINFO_TRACE_NUM = len(dcgmMetricParameterMap.keys()) NETINFO_TRACE_NUM = 2 @@ -59,13 +67,13 @@ FILEORGANIZEFORM_BYRANK = "byRank" FILEORGANIZEFORM_BYTRAINER = "byTrainer" FILEORGANIZEFORM_BYOTHER = "other" FILEORGANIZEFORM = [ - FILEORGANIZEFORM_BYRANK, FILEORGANIZEFORM_BYTRAINER, - FILEORGANIZEFORM_BYOTHER + FILEORGANIZEFORM_BYRANK, + FILEORGANIZEFORM_BYTRAINER, + FILEORGANIZEFORM_BYOTHER, ] class FileReader(object): - def __init__(self, logger, args): self._logger = logger self._args = args @@ -104,8 +112,9 @@ class FileReader(object): if not isinstance(self._args[key], type): raise TypeError( - "Invalid type of key [%s] in args dict, it should be a %s!" % - (key, type)) + "Invalid type of key [%s] in args dict, it should be a %s!" + % (key, type) + ) exec("self._%s = self._args[\"%s\"]" % (key, key)) @@ -117,18 +126,22 @@ class FileReader(object): raise TypeError("Invalid type of args, it should be a dict!") self._checkArgsKey("organizeForm", str) - if self._organizeForm not in FILEORGANIZEFORM or \ - self._organizeForm == FILEORGANIZEFORM_BYOTHER: + if ( + self._organizeForm not in FILEORGANIZEFORM + or self._organizeForm == FILEORGANIZEFORM_BYOTHER + ): raise NotImplementedError( - "we have not known how to process this form of file [%s]!" % - self._organizeForm) + "we have not known how to process this form of file [%s]!" + % self._organizeForm + ) self._checkArgsKey("gpuPerTrainer", int) self._checkArgsKey("dataPath", str) if not os.path.exists(self._dataPath): - raise IOError("input data path [%s] not existed!" % - (self._dataPath)) + raise IOError( + "input data path [%s] not existed!" % (self._dataPath) + ) self._checkArgsKey("groupSize", int) self._checkArgsKey("displaySize", int) @@ -155,7 +168,8 @@ class FileReader(object): def _cmp(self, x, y): return self._getId(x, self._organizeForm) - self._getId( - y, self._organizeForm) + y, self._organizeForm + ) def _getFileList(self): self._fileList = glob.glob(os.path.join(self._dataPath, "*.*")) @@ -170,12 +184,14 @@ class FileReader(object): newFileList.append(file) else: raise NotImplementedError( - "[%s] is repeated by id, we don not how to process it!" % - file) + "[%s] is repeated by id, we don not how to process it!" + % file + ) if not self._fileList: - if (self._getId(self._fileList[-1]) - - self._getId(self._fileList[0])) != len(self._fileList) - 1: + if ( + self._getId(self._fileList[-1]) - self._getId(self._fileList[0]) + ) != len(self._fileList) - 1: raise Exception("The file id should be countious!") # sort @@ -185,19 +201,23 @@ class FileReader(object): self._fileList.sort(key=_sortBySuffix) if not self._fileList: - self._logger.warning("we can not find any file in dir [%s]!" % - self._dataPath) + self._logger.warning( + "we can not find any file in dir [%s]!" % self._dataPath + ) else: - self._logger.info("file list in dir [%s] is : %s !" % - (self._dataPath, ', '.join(self._fileList))) + self._logger.info( + "file list in dir [%s] is : %s !" + % (self._dataPath, ', '.join(self._fileList)) + ) return self._fileList def _getId(self, fileName, organizeForm, sed="."): if self._organizeForm != organizeForm: raise TypeError( - "Can not get rank id when organizer form is not %s!" % - organizeForm) + "Can not get rank id when organizer form is not %s!" + % organizeForm + ) if not os.path.isfile(fileName): raise IOError("[%s] is not a valid file!" % (fileName)) @@ -213,8 +233,9 @@ class FileReader(object): except IndexError as e: print(e) raise TypeError( - "invalid fileName [%s], the prefix should be a number!" % - fileName) + "invalid fileName [%s], the prefix should be a number!" + % fileName + ) def getRankId(self, fileName, sed="."): return self._getId(fileName, FILEORGANIZEFORM_BYRANK, sed) @@ -251,7 +272,7 @@ class FileReader(object): end = 0 for i in range(0, (n) * j, j): if i < len(ls) and (i + j) < len(ls): - ls_return.append(ls[i:i + j]) + ls_return.append(ls[i : i + j]) end = i + j ls_return.append(ls[end:]) return ls_return @@ -284,40 +305,29 @@ class FileReader(object): try: data = json.load(rf) except Exception: - self._logger.error("read [%s] error. not a json file!" % - (fileName)) - raise TypeError("read [%s] error. not a json file!" % - (fileName)) + self._logger.error( + "read [%s] error. not a json file!" % (fileName) + ) + raise TypeError( + "read [%s] error. not a json file!" % (fileName) + ) return data - def dumpOpInfoDict(self, - data, - groupId, - gpuId, - pretty=False, - tmpPath="./tmp"): - return self.dumpDict(data, - "opinfo", - groupId, - gpuId, - pretty=False, - tmpPath="./tmp") + def dumpOpInfoDict( + self, data, groupId, gpuId, pretty=False, tmpPath="./tmp" + ): + return self.dumpDict( + data, "opinfo", groupId, gpuId, pretty=False, tmpPath="./tmp" + ) def dumpDCGMDict(self, data, groupId, gpuId, pretty=False, tmpPath="./tmp"): - return self.dumpDict(data, - "dcgm", - groupId, - gpuId, - pretty=False, - tmpPath="./tmp") - - def dumpDict(self, - data, - name, - groupId, - gpuId, - pretty=False, - tmpPath="./tmp"): + return self.dumpDict( + data, "dcgm", groupId, gpuId, pretty=False, tmpPath="./tmp" + ) + + def dumpDict( + self, data, name, groupId, gpuId, pretty=False, tmpPath="./tmp" + ): self._lock.acquire() if not os.path.exists(tmpPath): os.makedirs(tmpPath) diff --git a/tools/CrossStackProfiler/CspReporter.py b/tools/CrossStackProfiler/CspReporter.py index 2e1e1f2c6380cf0ae0beb8aef63e5adc7c7b67e6..052ffd6fca19f4b54a5c9c797703f2045275f6a6 100755 --- a/tools/CrossStackProfiler/CspReporter.py +++ b/tools/CrossStackProfiler/CspReporter.py @@ -28,41 +28,43 @@ from CspFileReader import FILEORGANIZEFORM_BYRANK, FILEORGANIZEFORM_BYTRAINER def get_argparse(): parser = argparse.ArgumentParser(description=__doc__) - parser.add_argument('--profile_path', - type=str, - default='.', - help='Working path that store the monitor data.') - - parser.add_argument('--timeline_path', - type=str, - default='.', - help='Output timeline file name.') - - parser.add_argument('--gpuPerTrainer', - type=int, - default=8, - help='Gpus per trainer.') - - parser.add_argument('--trainerNum', - type=int, - default=4, - help='Num of trainer.') - - parser.add_argument('--groupSize', - type=int, - default=8, - help='Num of trainer in a group.') - - parser.add_argument('--displaySize', - type=int, - default=2, - help='Num of line need to display in a group.') + parser.add_argument( + '--profile_path', + type=str, + default='.', + help='Working path that store the monitor data.', + ) + + parser.add_argument( + '--timeline_path', + type=str, + default='.', + help='Output timeline file name.', + ) + + parser.add_argument( + '--gpuPerTrainer', type=int, default=8, help='Gpus per trainer.' + ) + + parser.add_argument( + '--trainerNum', type=int, default=4, help='Num of trainer.' + ) + + parser.add_argument( + '--groupSize', type=int, default=8, help='Num of trainer in a group.' + ) + + parser.add_argument( + '--displaySize', + type=int, + default=2, + help='Num of line need to display in a group.', + ) return parser.parse_args() class CspReporter(object): - def __init__(self, args): self._args = args print(self._args) @@ -83,8 +85,9 @@ class CspReporter(object): def _checkArgs(self): if self._trainerNum % self._groupSize != 0: raise Exception( - "Input args error: trainerNum[%d] %% groupSize[%d] != 0" % - (self._trainerNum, self._groupSize)) + "Input args error: trainerNum[%d] %% groupSize[%d] != 0" + % (self._trainerNum, self._groupSize) + ) def _init_logger(self): self._logger = getLogger() @@ -121,10 +124,12 @@ class CspReporter(object): "organizeForm": FILEORGANIZEFORM_BYRANK, } - self._dcgmFileReader = dcgmFileReader(self._logger, - self._dcgmFileReaderArgs) - self._profileFileReader = profileFileReader(self._logger, - self._profileFileReaderArgs) + self._dcgmFileReader = dcgmFileReader( + self._logger, self._dcgmFileReaderArgs + ) + self._profileFileReader = profileFileReader( + self._logger, self._profileFileReaderArgs + ) def _init_timeInfo(self): self._timePath = os.path.join(self._workPath, TIME_PATH) @@ -134,7 +139,8 @@ class CspReporter(object): def _set_timeInfo(self, timeFileNamePrefix="time.txt", sed="."): timeFileNameList = glob.glob( - os.path.join(self._timePath, timeFileNamePrefix, sed, "*")) + os.path.join(self._timePath, timeFileNamePrefix, sed, "*") + ) for timeFileName in timeFileNameList: trainerId = int(timeFileName.split(sed)[-1]) gpuId = int(timeFileName.split(sed)[-2]) @@ -143,33 +149,41 @@ class CspReporter(object): for line in rf: if line.startswith("start time:"): info["start_time"] = int( - float(line.split(":")[-1]) * 1e9) + float(line.split(":")[-1]) * 1e9 + ) - self._minTimeStamp = min(self._minTimeStamp, - info["start_time"]) + self._minTimeStamp = min( + self._minTimeStamp, info["start_time"] + ) if line.startswith("end time:"): info["end_time"] = int(float(line.split(":")[-1]) * 1e9) if not info: self._timeInfo[gpuId * trainerId] = info - def _generateTraceFileByGroupAndGpuId(self, pipileInfo, netInfo, groupId, - gpuId): + def _generateTraceFileByGroupAndGpuId( + self, pipileInfo, netInfo, groupId, gpuId + ): dcgmInfoDict = self._dcgmFileReader.getDcgmInfoDict(groupId, gpuId) opInfoDict = self._profileFileReader.getOpInfoDict(groupId, gpuId) traceObj = {} - traceObj["traceEvents"] = pipileInfo[str( - gpuId)] + opInfoDict["traceEvents"] + dcgmInfoDict[ - "traceEvents"] + netInfo["traceEvents"] + traceObj["traceEvents"] = ( + pipileInfo[str(gpuId)] + + opInfoDict["traceEvents"] + + dcgmInfoDict["traceEvents"] + + netInfo["traceEvents"] + ) - self._profileFileReader.dumpDict(traceObj, "traceFile", groupId, gpuId, - False, self._saveFilePath) + self._profileFileReader.dumpDict( + traceObj, "traceFile", groupId, gpuId, False, self._saveFilePath + ) def _generateTraceFileByGroup(self, groupId, processNum): # first we need to generate pipeline info pipileInfo = self._profileFileReader.getPipeLineInfo( - groupId, processNum) + groupId, processNum + ) # second we need to generate dcgm info dcgmInfo = self._dcgmFileReader.getDCGMTraceInfo(groupId, processNum) @@ -186,48 +200,56 @@ class CspReporter(object): pidList = [] for gpuId in range(self._gpuPerTrainer): - subproc = Process(target=self._generateTraceFileByGroupAndGpuId, - args=( - pipileInfo, - netInfo, - groupId, - gpuId, - )) + subproc = Process( + target=self._generateTraceFileByGroupAndGpuId, + args=( + pipileInfo, + netInfo, + groupId, + gpuId, + ), + ) processPool.append(subproc) subproc.start() pidList.append(subproc.pid) self._logger.info( "[traceFile]: process [%d] has been started, total task num is %d ..." - % (subproc.pid, 1)) + % (subproc.pid, 1) + ) for t in processPool: t.join() pidList.remove(t.pid) self._logger.info( - "[traceFile]: process [%d] has exited! remained %d process!" % - (t.pid, len(pidList))) + "[traceFile]: process [%d] has exited! remained %d process!" + % (t.pid, len(pidList)) + ) def generateTraceFile(self, processNum=8): processPool = [] pidList = [] for groupId in range(self._trainerNum / self._groupSize): - subproc = Process(target=self._generateTraceFileByGroup, - args=( - groupId, - processNum, - )) + subproc = Process( + target=self._generateTraceFileByGroup, + args=( + groupId, + processNum, + ), + ) processPool.append(subproc) subproc.start() pidList.append(subproc.pid) self._logger.info( "[GroupTraceFile]: process [%d] has been started, total task num is %d ..." - % (subproc.pid, 1)) + % (subproc.pid, 1) + ) for t in processPool: t.join() pidList.remove(t.pid) self._logger.info( "[GroupTraceFile]: process [%d] has exited! remained %d process!" - % (t.pid, len(pidList))) + % (t.pid, len(pidList)) + ) if __name__ == '__main__': diff --git a/tools/CrossStackProfiler/DCGMFileReader.py b/tools/CrossStackProfiler/DCGMFileReader.py index 1285ce388c150fe2b927c2b0761fbdc11d87e596..86868c8734e3f2456930f2640a025e892b02e417 100755 --- a/tools/CrossStackProfiler/DCGMFileReader.py +++ b/tools/CrossStackProfiler/DCGMFileReader.py @@ -28,7 +28,6 @@ from CspFileReader import FILEORGANIZEFORM_BYTRAINER class dcgmFileReader(FileReader): - def parseFileByGroup(self, groupId, processNum=8): fileFist = self.getFileListByGroup(groupId) displaySize = min(self._displaySize, len(fileFist)) @@ -38,8 +37,9 @@ class dcgmFileReader(FileReader): return self._parseTask(fileFist) else: - self._logger.info("using [%d] process to do this work!" % - processNum) + self._logger.info( + "using [%d] process to do this work!" % processNum + ) processPool = [] pidList = [] @@ -48,23 +48,28 @@ class dcgmFileReader(FileReader): taskList = self._splitTaskListForMultiProcess(fileFist, processNum) for task in taskList: - subproc = Process(target=self._parseTask, args=( - task, - q, - )) + subproc = Process( + target=self._parseTask, + args=( + task, + q, + ), + ) processPool.append(subproc) subproc.start() pidList.append(subproc.pid) self._logger.info( "[DCGM reader]: process [%d] has been started, total task num is %d ..." - % (subproc.pid, len(processPool))) + % (subproc.pid, len(processPool)) + ) for t in processPool: t.join() pidList.remove(t.pid) self._logger.info( "[DCGM reader]: process [%d] has exited! remained %d process!" - % (t.pid, len(pidList))) + % (t.pid, len(pidList)) + ) isFistProcess = True for t in processPool: @@ -72,9 +77,9 @@ class dcgmFileReader(FileReader): isFistProcess = False dcgm_data = q.get() else: - dcgm_data = pd.concat([dcgm_data, q.get()], - axis=0, - join='outer') + dcgm_data = pd.concat( + [dcgm_data, q.get()], axis=0, join='outer' + ) return dcgm_data @@ -90,9 +95,9 @@ class dcgmFileReader(FileReader): is_first = False dcgm_data = tmp_data else: - dcgm_data = pd.concat([dcgm_data, tmp_data], - axis=0, - join='outer') + dcgm_data = pd.concat( + [dcgm_data, tmp_data], axis=0, join='outer' + ) dcgm_data = dcgm_data.dropna() if q is not None: q.put(dcgm_data) @@ -117,18 +122,23 @@ class dcgmFileReader(FileReader): for line in fp: # skip `nvidia-dcgm-dmon.sh` init and fini info lines - if 'nv-hostengine' in line or 'dmon' in line or 'Host Engine Listener Started' in line: + if ( + 'nv-hostengine' in line + or 'dmon' in line + or 'Host Engine Listener Started' in line + ): continue if not line.strip().startswith( - "GPU") and not line.strip().startswith("# Entity"): + "GPU" + ) and not line.strip().startswith("# Entity"): continue # skip non-needed headers (only the header in 1th line was needed) if line.strip().startswith("# Entity"): line = line.strip()[2:] - if 'Entity' == line[0:len('Entity')]: + if 'Entity' == line[0 : len('Entity')]: if has_header: continue else: @@ -153,15 +163,13 @@ class dcgmFileReader(FileReader): return dcgm - def _getDCGMTraceInfoByGpuId(self, - groupId, - gpuId, - dcgm_data, - pid_map, - q=None): + def _getDCGMTraceInfoByGpuId( + self, groupId, gpuId, dcgm_data, pid_map, q=None + ): self._logger.info( - "Begin to generate dcgm info, groupId = %d, gpuID = %d ..." % - (groupId, gpuId)) + "Begin to generate dcgm info, groupId = %d, gpuID = %d ..." + % (groupId, gpuId) + ) gpuDcgmData = dcgm_data[dcgm_data['Entity'].isin([gpuId])] @@ -220,27 +228,31 @@ class dcgmFileReader(FileReader): pidList = [] for gpuId in range(self._gpuPerTrainer): - subproc = Process(target=self._getDCGMTraceInfoByGpuId, - args=( - groupId, - gpuId, - dcgm_data, - pid_map, - q, - )) + subproc = Process( + target=self._getDCGMTraceInfoByGpuId, + args=( + groupId, + gpuId, + dcgm_data, + pid_map, + q, + ), + ) processPool.append(subproc) subproc.start() pidList.append(subproc.pid) self._logger.info( "[DCGM info]: process [%d] has been started, total task num is %d ..." - % (subproc.pid, 1)) + % (subproc.pid, 1) + ) for t in processPool: t.join() pidList.remove(t.pid) self._logger.info( - "[DCGM info]: process [%d] has exited! remained %d process!" % - (t.pid, len(pidList))) + "[DCGM info]: process [%d] has exited! remained %d process!" + % (t.pid, len(pidList)) + ) dcgmInfo = {} diff --git a/tools/CrossStackProfiler/NetFileReader.py b/tools/CrossStackProfiler/NetFileReader.py index 199e3ba8d4d87fa7cfd8698ae62dbd3a84e5a59d..ed9cfdb1fc86f255f4e0b50a2dddea92296b150c 100755 --- a/tools/CrossStackProfiler/NetFileReader.py +++ b/tools/CrossStackProfiler/NetFileReader.py @@ -24,7 +24,6 @@ from CspFileReader import FILEORGANIZEFORM_BYTRAINER class netFileReader(FileReader): - def _parseSingleFile(self, fileNameList, tx_pid, rx_pid, q=None): traceInfo = {} @@ -53,8 +52,9 @@ class netFileReader(FileReader): for line in rf: try: event_str = json.loads(line.strip()) - event_str["pid"] = tx_pid if event_str[ - "name"] == "tx" else rx_pid + event_str["pid"] = ( + tx_pid if event_str["name"] == "tx" else rx_pid + ) # the unit of net is ms, we need ns event_str["ts"] = self._align_ts(event_str["ts"] * 1e6) event_str["id"] = trainerId @@ -62,8 +62,9 @@ class netFileReader(FileReader): except Exception: self._logger.warning( - "invalid record [%s] in [%s]. skip it!" % - (line[:-1], fileName)) + "invalid record [%s] in [%s]. skip it!" + % (line[:-1], fileName) + ) traceInfo["traceEvents"] = traceEventList if q is not None: @@ -73,7 +74,7 @@ class netFileReader(FileReader): def parseFileByGroup(self, groupId, processNum=8): fileFist = self.getFileListByGroup(groupId) - fileFist = fileFist[:min(self._displaySize, len(fileFist))] + fileFist = fileFist[: min(self._displaySize, len(fileFist))] manager = multiprocessing.Manager() q = manager.Queue() @@ -85,26 +86,30 @@ class netFileReader(FileReader): taskList = self._splitTaskListForMultiProcess(fileFist, processNum) for task in taskList: - subproc = Process(target=self._parseSingleFile, - args=( - task, - tx_pid, - rx_pid, - q, - )) + subproc = Process( + target=self._parseSingleFile, + args=( + task, + tx_pid, + rx_pid, + q, + ), + ) processPool.append(subproc) subproc.start() pidList.append(subproc.pid) self._logger.info( "[Net info]: process [%d] has been started, total task num is %d ..." - % (subproc.pid, len(processPool))) + % (subproc.pid, len(processPool)) + ) for t in processPool: t.join() pidList.remove(t.pid) self._logger.info( - "[Net info]: process [%d] has exited! remained %d process!" % - (t.pid, len(pidList))) + "[Net info]: process [%d] has exited! remained %d process!" + % (t.pid, len(pidList)) + ) traceInfo = {} isFistProcess = True diff --git a/tools/CrossStackProfiler/ProfileFileReader.py b/tools/CrossStackProfiler/ProfileFileReader.py index 282c2f1f25431ef57b3871ddc1e38a87c62642c2..e7a07c40820af1e6a4b7c85ea138713cfd8ae66b 100755 --- a/tools/CrossStackProfiler/ProfileFileReader.py +++ b/tools/CrossStackProfiler/ProfileFileReader.py @@ -22,12 +22,15 @@ from CspChromeTraceFormatter import ChromeTraceFormatter from CspFileReader import FileReader from CspFileReader import getLogger -from CspFileReader import NETINFO_TRACE_NUM, DCGMINFO_TRACE_NUM, PIPELINEINFO_TRACE_NUM +from CspFileReader import ( + NETINFO_TRACE_NUM, + DCGMINFO_TRACE_NUM, + PIPELINEINFO_TRACE_NUM, +) from CspFileReader import FILEORGANIZEFORM_BYRANK class profileFileReader(FileReader): - def _parseSingleFile(self, profile): with open(profile, 'rb') as f: profile_s = f.read() @@ -41,8 +44,9 @@ class profileFileReader(FileReader): for fileName in taskList: rankId = self.getRankId(fileName) - profile_dict["trainerRank.%03d" % - (rankId)] = self._parseSingleFile(fileName) + profile_dict["trainerRank.%03d" % (rankId)] = self._parseSingleFile( + fileName + ) self._logger.info("I finish processing %s!" % fileName) if q is not None: @@ -56,15 +60,16 @@ class profileFileReader(FileReader): if isinstance(items["args"], dict): args = items["args"] if "detail_info" in args: - if args["detail_info"] == "marker_forward_B" or \ - args["detail_info"] == "marker_forward_E" or \ - args["detail_info"] == "marker_backward_B" or \ - args["detail_info"] == "marker_backward_E": + if ( + args["detail_info"] == "marker_forward_B" + or args["detail_info"] == "marker_forward_E" + or args["detail_info"] == "marker_backward_B" + or args["detail_info"] == "marker_backward_E" + ): return True return False def _allocate_forwardBackwardInfo(self, restList, pid, tid): - def _cmp_ele(items): return items["ts"] @@ -79,7 +84,7 @@ class profileFileReader(FileReader): else: lastEle["dur"] = items["ts"] - lastEle["ts"] name = lastEle["args"]["detail_info"] - name = name[:name.rfind('_')] + name = name[: name.rfind('_')] name = name.split('_')[1] lastEle["name"] = name lastEle["args"]["detail_info"] = name @@ -130,7 +135,8 @@ class profileFileReader(FileReader): traceEventList.append(traceEvent) pipeLineList = self._allocate_forwardBackwardInfo( - traceEventList, pid, tid) + traceEventList, pid, tid + ) res[str(rankId)] = pipeLineList @@ -143,8 +149,9 @@ class profileFileReader(FileReader): fileFist = self.getFileListByGroup(groupId) self._logger.info( - "using [%d] process to do this work, total task num is %d!" % - (processNum, len(fileFist))) + "using [%d] process to do this work, total task num is %d!" + % (processNum, len(fileFist)) + ) processPool = [] pidList = [] @@ -153,23 +160,28 @@ class profileFileReader(FileReader): taskList = self._splitTaskListForMultiProcess(fileFist, processNum) for task in taskList: - subproc = Process(target=self._getPipeLineInfo, args=( - task, - q, - )) + subproc = Process( + target=self._getPipeLineInfo, + args=( + task, + q, + ), + ) processPool.append(subproc) subproc.start() pidList.append(subproc.pid) self._logger.info( "[pipeline info]: process [%d] has been started, total task num is %d ..." - % (subproc.pid, len(task))) + % (subproc.pid, len(task)) + ) for t in processPool: t.join() pidList.remove(t.pid) self._logger.info( "[pipeline info]: process [%d] has exited! remained %d process!" - % (t.pid, len(pidList))) + % (t.pid, len(pidList)) + ) pipeLineInfo = {} @@ -210,12 +222,15 @@ class profileFileReader(FileReader): # -1 device id represents CUDA API(RunTime) call.(e.g. cudaLaunch, cudaMemcpy) if event.device_id == -1: chrome_trace.emit_pid( - "%02d_%s:cuda_api" % (lineNum, k), pid) + "%02d_%s:cuda_api" % (lineNum, k), pid + ) lineNum = lineNum + 1 else: chrome_trace.emit_pid( - "%02d_%s:cpu:block:%d" % - (lineNum, k, event.device_id), pid) + "%02d_%s:cpu:block:%d" + % (lineNum, k, event.device_id), + pid, + ) lineNum = lineNum + 1 elif event.type == profiler_pb2.Event.GPUKernel: if (k, event.device_id, "GPUKernel") not in devices: @@ -225,8 +240,10 @@ class profileFileReader(FileReader): devices[(k, event.device_id, "GPUKernel")] = pid chrome_trace.emit_pid( - "%02d_%s:gpu:%d" % - (lineNum, k, event.device_id), pid) + "%02d_%s:gpu:%d" + % (lineNum, k, event.device_id), + pid, + ) lineNum = lineNum + 1 if not hasattr(profile_pb, "mem_events"): @@ -240,8 +257,10 @@ class profileFileReader(FileReader): mem_devices[(k, mevent.device_id, "GPU")] = pid chrome_trace.emit_pid( - "%02d_memory usage on %s:gpu:%d" % - (lineNum, k, mevent.device_id), pid) + "%02d_memory usage on %s:gpu:%d" + % (lineNum, k, mevent.device_id), + pid, + ) lineNum = lineNum + 1 elif mevent.place == profiler_pb2.MemEvent.CPUPlace: if (k, mevent.device_id, "CPU") not in mem_devices: @@ -250,21 +269,29 @@ class profileFileReader(FileReader): mem_devices[(k, mevent.device_id, "CPU")] = pid chrome_trace.emit_pid( - "%02d_memory usage on %s:cpu:%d" % - (lineNum, k, mevent.device_id), pid) + "%02d_memory usage on %s:cpu:%d" + % (lineNum, k, mevent.device_id), + pid, + ) lineNum = lineNum + 1 elif mevent.place == profiler_pb2.MemEvent.CUDAPinnedPlace: - if (k, mevent.device_id, - "CUDAPinnedPlace") not in mem_devices: + if ( + k, + mevent.device_id, + "CUDAPinnedPlace", + ) not in mem_devices: if gpuId == mevent.device_id: pid = initPid initPid = initPid + 1 - mem_devices[(k, mevent.device_id, - "CUDAPinnedPlace")] = pid + mem_devices[ + (k, mevent.device_id, "CUDAPinnedPlace") + ] = pid chrome_trace.emit_pid( - "%02d_memory usage on %s:cudapinnedplace:%d" % - (lineNum, k, mevent.device_id), pid) + "%02d_memory usage on %s:cudapinnedplace:%d" + % (lineNum, k, mevent.device_id), + pid, + ) lineNum = lineNum + 1 if (k, 0, "CPU") not in mem_devices: pid = initPid @@ -272,7 +299,8 @@ class profileFileReader(FileReader): mem_devices[(k, 0, "CPU")] = pid chrome_trace.emit_pid( - "%02d_memory usage on %s:cpu:%d" % (lineNum, k, 0), pid) + "%02d_memory usage on %s:cpu:%d" % (lineNum, k, 0), pid + ) lineNum = lineNum + 1 if (k, 0, "GPU") not in mem_devices: # if gpuId == mevent.device_id: @@ -281,7 +309,8 @@ class profileFileReader(FileReader): mem_devices[(k, 0, "GPU")] = pid chrome_trace.emit_pid( - "%02d_memory usage on %s:gpu:%d" % (lineNum, k, 0), pid) + "%02d_memory usage on %s:gpu:%d" % (lineNum, k, 0), pid + ) lineNum = lineNum + 1 if (k, 0, "CUDAPinnedPlace") not in mem_devices: pid = initPid @@ -289,8 +318,10 @@ class profileFileReader(FileReader): mem_devices[(k, 0, "CUDAPinnedPlace")] = pid chrome_trace.emit_pid( - "%02d_memory usage on %s:cudapinnedplace:%d" % - (lineNum, k, 0), pid) + "%02d_memory usage on %s:cudapinnedplace:%d" + % (lineNum, k, 0), + pid, + ) lineNum = lineNum + 1 i = i + 1 return chrome_trace, devices, mem_devices @@ -307,7 +338,11 @@ class profileFileReader(FileReader): elif event.type == profiler_pb2.Event.GPUKernel: type = "GPUKernel" - if event.type == profiler_pb2.Event.GPUKernel and event.device_id != gpuId and rankId % self._gpuPerTrainer != gpuId: + if ( + event.type == profiler_pb2.Event.GPUKernel + and event.device_id != gpuId + and rankId % self._gpuPerTrainer != gpuId + ): continue pid = devices[(k, event.device_id, type)] @@ -318,10 +353,15 @@ class profileFileReader(FileReader): args['detail_info'] = event.detail_info # TODO(panyx0718): Chrome tracing only handles ms. However, some # ops takes micro-seconds. Hence, we keep the ns here. - chrome_trace.emit_region(self._align_ts(event.start_ns), - (event.end_ns - event.start_ns) / 1.0, - pid, event.sub_device_id, 'Op', - event.name, args) + chrome_trace.emit_region( + self._align_ts(event.start_ns), + (event.end_ns - event.start_ns) / 1.0, + pid, + event.sub_device_id, + 'Op', + event.name, + args, + ) return chrome_trace def _allocate_memory_event(self, profile_dict, mem_devices, gpuId): @@ -331,7 +371,7 @@ class profileFileReader(FileReader): place_to_str = { profiler_pb2.MemEvent.CPUPlace: "CPU", profiler_pb2.MemEvent.CUDAPlace: "GPU", - profiler_pb2.MemEvent.CUDAPinnedPlace: "CUDAPinnedPlace" + profiler_pb2.MemEvent.CUDAPinnedPlace: "CUDAPinnedPlace", } for k, profile_pb in profile_dict.items(): rankId = int(k.split(".")[-1]) @@ -352,9 +392,10 @@ class profileFileReader(FileReader): else: place = "UnDefine" - if (mevent.place == profiler_pb2.MemEvent.CUDAPlace - or mevent.place == profiler_pb2.MemEvent.CUDAPinnedPlace - ) and mevent.device_id != gpuId: + if ( + mevent.place == profiler_pb2.MemEvent.CUDAPlace + or mevent.place == profiler_pb2.MemEvent.CUDAPinnedPlace + ) and mevent.device_id != gpuId: continue crt_info['place'] = place @@ -377,15 +418,21 @@ class profileFileReader(FileReader): total_size = 0 while i < len(mem_list): total_size += mem_list[i]['size'] - while i < len(mem_list) - 1 and mem_list[i]['time'] == mem_list[ - i + 1]['time']: + while ( + i < len(mem_list) - 1 + and mem_list[i]['time'] == mem_list[i + 1]['time'] + ): total_size += mem_list[i + 1]['size'] i += 1 - chrome_trace.emit_counter("Memory", "Memory", - mem_list[i]['pid'], - self._align_ts(mem_list[i]['time']), - 0, total_size) + chrome_trace.emit_counter( + "Memory", + "Memory", + mem_list[i]['pid'], + self._align_ts(mem_list[i]['time']), + 0, + total_size, + ) i += 1 return chrome_trace @@ -395,21 +442,29 @@ class profileFileReader(FileReader): for file in fileFist: rankId = self.getRankId(file) localRank = rankId % self._gpuPerTrainer - if localRank == gpuId and (rankId / self._gpuPerTrainer - ) % self._groupSize < self._displaySize: + if ( + localRank == gpuId + and (rankId / self._gpuPerTrainer) % self._groupSize + < self._displaySize + ): newFileList.append(file) profile_dict = self._parseTask(newFileList) - initPid = PIPELINEINFO_TRACE_NUM + DCGMINFO_TRACE_NUM + NETINFO_TRACE_NUM + initPid = ( + PIPELINEINFO_TRACE_NUM + DCGMINFO_TRACE_NUM + NETINFO_TRACE_NUM + ) metaTrace, devicesPid, mem_devicesPid = self._allocate_pids( - profile_dict, gpuId, initPid) + profile_dict, gpuId, initPid + ) eventsTrace = self._allocate_events(profile_dict, devicesPid, gpuId) - memEventsTrace = self._allocate_memory_event(profile_dict, - mem_devicesPid, gpuId) + memEventsTrace = self._allocate_memory_event( + profile_dict, mem_devicesPid, gpuId + ) trace = {} - trace[ - 'traceEvents'] = metaTrace._metadata + eventsTrace._events + memEventsTrace._events + trace['traceEvents'] = ( + metaTrace._metadata + eventsTrace._events + memEventsTrace._events + ) self.dumpOpInfoDict(trace, groupId, gpuId, True) return trace @@ -421,24 +476,28 @@ class profileFileReader(FileReader): pidList = [] for gpuId in range(self._gpuPerTrainer): - subproc = Process(target=self._getOPTraceInfoByGpuId, - args=( - groupId, - gpuId, - )) + subproc = Process( + target=self._getOPTraceInfoByGpuId, + args=( + groupId, + gpuId, + ), + ) processPool.append(subproc) subproc.start() pidList.append(subproc.pid) self._logger.info( "[op info]: process [%d] has been started, total task num is %d ..." - % (subproc.pid, 1)) + % (subproc.pid, 1) + ) for t in processPool: t.join() pidList.remove(t.pid) self._logger.info( - "[op info]: process [%d] has exited! remained %d process!" % - (t.pid, len(pidList))) + "[op info]: process [%d] has exited! remained %d process!" + % (t.pid, len(pidList)) + ) opInfo = {} diff --git a/tools/analysisPyXml.py b/tools/analysisPyXml.py index b9280124aedba6b388281f804acc9ff07e550d2a..ee110d6ce7f070f7d345f229f11b684ad0812bb1 100644 --- a/tools/analysisPyXml.py +++ b/tools/analysisPyXml.py @@ -21,10 +21,16 @@ import sys def analysisPyXml(rootPath, ut): xml_path = '%s/build/pytest/%s/python-coverage.xml' % (rootPath, ut) - related_ut_map_file = '%s/build/ut_map/%s/related_%s.txt' % (rootPath, ut, - ut) - notrelated_ut_map_file = '%s/build/ut_map/%s/notrelated_%s.txt' % (rootPath, - ut, ut) + related_ut_map_file = '%s/build/ut_map/%s/related_%s.txt' % ( + rootPath, + ut, + ut, + ) + notrelated_ut_map_file = '%s/build/ut_map/%s/notrelated_%s.txt' % ( + rootPath, + ut, + ut, + ) tree = ElementTree.parse(xml_path) root = tree.getroot() error_files = [] @@ -40,18 +46,36 @@ def analysisPyXml(rootPath, ut): command = 'sed -n %sp %s' % (line_number, clazz_filename) _code, output = commands.getstatusoutput(command) if _code == 0: - if output.strip().startswith( - ('from', 'import', '__all__', 'def', 'class', '"""', - '@', '\'\'\'', 'logger', '_logger', 'logging', 'r"""', - 'pass', 'try', 'except', - 'if __name__ == "__main__"')) == False: - pattern = r"""(.*) = ('*')|(.*) = ("*")|(.*) = (\d)|(.*) = (-\d)|(.*) = (None)|(.*) = (True)|(.*) = (False)|(.*) = (URL_PREFIX*)|(.*) = (\[)|(.*) = (\{)|(.*) = (\()""" #a='b'/a="b"/a=0 + if ( + output.strip().startswith( + ( + 'from', + 'import', + '__all__', + 'def', + 'class', + '"""', + '@', + '\'\'\'', + 'logger', + '_logger', + 'logging', + 'r"""', + 'pass', + 'try', + 'except', + 'if __name__ == "__main__"', + ) + ) + == False + ): + pattern = r"""(.*) = ('*')|(.*) = ("*")|(.*) = (\d)|(.*) = (-\d)|(.*) = (None)|(.*) = (True)|(.*) = (False)|(.*) = (URL_PREFIX*)|(.*) = (\[)|(.*) = (\{)|(.*) = (\()""" # a='b'/a="b"/a=0 if re.match(pattern, output.strip()) == None: pyCov_file.append(clazz_filename) coverageMessage = 'RELATED' break else: - coverageMessage = 'FILTER' #hit filter logic + coverageMessage = 'FILTER' # hit filter logic else: coverageMessage = 'FILTER' else: @@ -61,8 +85,9 @@ def analysisPyXml(rootPath, ut): else: coverageMessage = 'NOT_RELATED' if coverageMessage in ['NOT_RELATED', 'ERROR', 'FILTER']: - os.system('echo %s >> %s' % - (clazz_filename, notrelated_ut_map_file)) + os.system( + 'echo %s >> %s' % (clazz_filename, notrelated_ut_map_file) + ) elif coverageMessage == 'RELATED': os.system('echo %s >> %s' % (clazz_filename, related_ut_map_file)) diff --git a/tools/check_api_compatible.py b/tools/check_api_compatible.py index 18fb4d7ecdf147dfd89132eb3c67300f53cdfcaa..ea08a4494bb171d8c465e69422ca745285d5969c 100644 --- a/tools/check_api_compatible.py +++ b/tools/check_api_compatible.py @@ -27,7 +27,9 @@ else: logger.addHandler(console) console.setFormatter( logging.Formatter( - "%(asctime)s - %(funcName)s:%(lineno)d - %(levelname)s - %(message)s")) + "%(asctime)s - %(funcName)s:%(lineno)d - %(levelname)s - %(message)s" + ) +) def _check_compatible(args_o, args_n, defaults_o, defaults_n): @@ -38,21 +40,30 @@ def _check_compatible(args_o, args_n, defaults_o, defaults_n): # 参数改名了,也要提醒关注 for idx in range(min(len(args_o), len(args_n))): if args_o[idx] != args_n[idx]: - logger.debug("args's %d parameter diff with previous: %s vs %s", - idx, args_o, args_n) + logger.debug( + "args's %d parameter diff with previous: %s vs %s", + idx, + args_o, + args_n, + ) return False # 新增加了参数,必须提供默认值。以及不能减少默认值数量 if (len(args_n) - len(defaults_n)) > (len(args_o) - len(defaults_o)): - logger.debug("defaults num less then previous: %s vs %s", defaults_o, - defaults_n) + logger.debug( + "defaults num less then previous: %s vs %s", defaults_o, defaults_n + ) return False # 默认值必须相等 for idx in range(min(len(defaults_o), len(defaults_n))): nidx_o = -1 - idx nidx_n = -1 - idx - (len(args_n) - len(args_o)) - if (defaults_o[nidx_o] != defaults_n[nidx_n]): - logger.debug("defaults's %d value diff with previous: %s vs %s", - nidx_n, defaults_o, defaults_n) + if defaults_o[nidx_o] != defaults_n[nidx_n]: + logger.debug( + "defaults's %d value diff with previous: %s vs %s", + nidx_n, + defaults_o, + defaults_n, + ) return False return True @@ -61,21 +72,26 @@ def check_compatible(old_api_spec, new_api_spec): """ check compatible, FullArgSpec """ - if not (isinstance(old_api_spec, inspect.FullArgSpec) - and isinstance(new_api_spec, inspect.FullArgSpec)): + if not ( + isinstance(old_api_spec, inspect.FullArgSpec) + and isinstance(new_api_spec, inspect.FullArgSpec) + ): logger.warning( "new_api_spec or old_api_spec is not instance of inspect.FullArgSpec" ) return False return _check_compatible( - old_api_spec.args, new_api_spec.args, + old_api_spec.args, + new_api_spec.args, [] if old_api_spec.defaults is None else old_api_spec.defaults, - [] if new_api_spec.defaults is None else new_api_spec.defaults) + [] if new_api_spec.defaults is None else new_api_spec.defaults, + ) def check_compatible_str(old_api_spec_str, new_api_spec_str): patArgSpec = re.compile( - r'args=(.*), varargs=.*defaults=(None|\((.*)\)), kwonlyargs=.*') + r'args=(.*), varargs=.*defaults=(None|\((.*)\)), kwonlyargs=.*' + ) mo_o = patArgSpec.search(old_api_spec_str) mo_n = patArgSpec.search(new_api_spec_str) if not (mo_o and mo_n): @@ -99,7 +115,8 @@ def read_argspec_from_file(specfile): """ res_dict = {} patArgSpec = re.compile( - r'^(paddle[^,]+)\s+\((ArgSpec.*),\s\(\'document\W*([0-9a-z]{32})') + r'^(paddle[^,]+)\s+\((ArgSpec.*),\s\(\'document\W*([0-9a-z]{32})' + ) fullargspec_prefix = 'inspect.Full' for line in specfile.readlines(): mo = patArgSpec.search(line) @@ -123,21 +140,23 @@ def parse_args(): """ global arguments parser = argparse.ArgumentParser( - description='check api compatible across versions') + description='check api compatible across versions' + ) parser.add_argument('--debug', dest='debug', action="store_true") parser.add_argument( 'prev', type=argparse.FileType('r'), - help='the previous version (the version from develop branch)') - parser.add_argument('post', - type=argparse.FileType('r'), - help='the post version (the version from PullRequest)') + help='the previous version (the version from develop branch)', + ) + parser.add_argument( + 'post', + type=argparse.FileType('r'), + help='the post version (the version from PullRequest)', + ) for item in arguments: - parser.add_argument(item[0], - dest=item[1], - help=item[4], - type=item[2], - default=item[3]) + parser.add_argument( + item[0], dest=item[1], help=item[4], type=item[2], default=item[3] + ) if len(sys.argv) < 2: parser.print_help() @@ -162,10 +181,12 @@ if __name__ == '__main__': if as_prev is None: # the api is deleted continue if isinstance(as_prev, str) or isinstance(as_post, str): - as_prev_str = as_prev if isinstance(as_prev, - str) else repr(as_prev) - as_post_str = as_post if isinstance(as_post, - str) else repr(as_post) + as_prev_str = ( + as_prev if isinstance(as_prev, str) else repr(as_prev) + ) + as_post_str = ( + as_post if isinstance(as_post, str) else repr(as_post) + ) if not check_compatible_str(as_prev_str, as_post_str): diff_api_names.append(as_post_name) else: diff --git a/tools/check_api_source_without_core_ops.py b/tools/check_api_source_without_core_ops.py index 797934a5a3568e7ba1be3e20528a2fa37216518c..bf7027be5cb7405497807b3f1379fadd9a476dc0 100644 --- a/tools/check_api_source_without_core_ops.py +++ b/tools/check_api_source_without_core_ops.py @@ -27,8 +27,10 @@ with open(sys.argv[2], 'r') as f: differ = difflib.Differ() result = differ.compare(origin, new) -api_with_ops, api_without_ops = count_api_without_core_ops.get_apis_with_and_without_core_ops( - ['paddle']) +( + api_with_ops, + api_without_ops, +) = count_api_without_core_ops.get_apis_with_and_without_core_ops(['paddle']) error = False # get all diff apis diff --git a/tools/check_ctest_hung.py b/tools/check_ctest_hung.py index 094d051b03e6b00f463ea1b1bb654b2e3272b161..11e927eb681e880e76c991f165fdca74f7011226 100644 --- a/tools/check_ctest_hung.py +++ b/tools/check_ctest_hung.py @@ -38,8 +38,7 @@ Diff: set(['test_parallel_executor_crf']) passed = set() with open(logfile, "r") as fn: for l in fn.readlines(): - if l.find("Test ") != -1 and \ - l.find("Passed") != -1: + if l.find("Test ") != -1 and l.find("Passed") != -1: m = re.search(r"Test\s+#[0-9]*\:\s([a-z0-9_]+)", escape(l)) passed.add(m.group(1)) if l.find("Start ") != -1: diff --git a/tools/check_op_benchmark_result.py b/tools/check_op_benchmark_result.py index aaf194ff95ec587a115436cae8971f589cec7040..8fce508102282d165230192426702926606389f0 100644 --- a/tools/check_op_benchmark_result.py +++ b/tools/check_op_benchmark_result.py @@ -19,14 +19,12 @@ import argparse def check_path_exists(path): - """Assert whether file/directory exists. - """ + """Assert whether file/directory exists.""" assert os.path.exists(path), "%s does not exist." % path def parse_case_name(log_file_name): - """Parse case name. - """ + """Parse case name.""" case_id, case_info = log_file_name.split("-") direction = case_info.split(".")[0].split("_")[-1] @@ -34,8 +32,7 @@ def parse_case_name(log_file_name): def parse_log_file(log_file): - """Load one case result from log file. - """ + """Load one case result from log file.""" check_path_exists(log_file) result = None @@ -56,20 +53,20 @@ def parse_log_file(log_file): def load_benchmark_result_from_logs_dir(logs_dir): - """Load benchmark result from logs directory. - """ + """Load benchmark result from logs directory.""" check_path_exists(logs_dir) log_file_path = lambda log_file: os.path.join(logs_dir, log_file) - result_lambda = lambda log_file: (log_file, - parse_log_file(log_file_path(log_file))) + result_lambda = lambda log_file: ( + log_file, + parse_log_file(log_file_path(log_file)), + ) return dict(map(result_lambda, os.listdir(logs_dir))) def check_speed_result(case_name, develop_data, pr_data, pr_result): - """Check speed differences between develop and pr. - """ + """Check speed differences between develop and pr.""" pr_gpu_time = pr_data.get("gpu_time") develop_gpu_time = develop_data.get("gpu_time") if develop_gpu_time != 0.0: @@ -84,10 +81,14 @@ def check_speed_result(case_name, develop_data, pr_data, pr_result): total_time_diff = (pr_total_time - develop_total_time) / develop_total_time logging.info("------ OP: %s ------" % case_name) - logging.info("GPU time change: %s (develop: %.7f -> PR: %.7f)" % - (gpu_time_diff_str, develop_gpu_time, pr_gpu_time)) - logging.info("Total time change: %.5f%% (develop: %.7f -> PR: %.7f)" % - (total_time_diff * 100, develop_total_time, pr_total_time)) + logging.info( + "GPU time change: %s (develop: %.7f -> PR: %.7f)" + % (gpu_time_diff_str, develop_gpu_time, pr_gpu_time) + ) + logging.info( + "Total time change: %.5f%% (develop: %.7f -> PR: %.7f)" + % (total_time_diff * 100, develop_total_time, pr_total_time) + ) logging.info("backward: %s" % pr_result.get("backward")) logging.info("parameters:") for line in pr_result.get("parameters").strip().split("\n"): @@ -97,8 +98,7 @@ def check_speed_result(case_name, develop_data, pr_data, pr_result): def check_accuracy_result(case_name, pr_result): - """Check accuracy result. - """ + """Check accuracy result.""" logging.info("------ OP: %s ------" % case_name) logging.info("Accuracy diff: %s" % pr_result.get("diff")) logging.info("backward: %s" % pr_result.get("backward")) @@ -109,15 +109,16 @@ def check_accuracy_result(case_name, pr_result): return not pr_result.get("consistent") -def compare_benchmark_result(case_name, develop_result, pr_result, - check_results): - """Compare the differences between develop and pr. - """ +def compare_benchmark_result( + case_name, develop_result, pr_result, check_results +): + """Compare the differences between develop and pr.""" develop_speed = develop_result.get("speed") pr_speed = pr_result.get("speed") assert type(develop_speed) == type( - pr_speed), "The types of comparison results need to be consistent." + pr_speed + ), "The types of comparison results need to be consistent." if isinstance(develop_speed, dict) and isinstance(pr_speed, dict): if check_speed_result(case_name, develop_speed, pr_speed, pr_result): @@ -128,8 +129,7 @@ def compare_benchmark_result(case_name, develop_result, pr_result, def update_api_info_file(fail_case_list, api_info_file): - """Update api info file to auto retry benchmark test. - """ + """Update api info file to auto retry benchmark test.""" check_path_exists(api_info_file) # set of case names for performance check failures @@ -153,14 +153,14 @@ def update_api_info_file(fail_case_list, api_info_file): def summary_results(check_results, api_info_file): - """Summary results and return exit code. - """ + """Summary results and return exit code.""" for case_name in check_results["speed"]: logging.error("Check speed result with case \"%s\" failed." % case_name) for case_name in check_results["accuracy"]: - logging.error("Check accuracy result with case \"%s\" failed." % - case_name) + logging.error( + "Check accuracy result with case \"%s\" failed." % case_name + ) if len(check_results["speed"]) and api_info_file: update_api_info_file(check_results["speed"], api_info_file) @@ -172,33 +172,38 @@ def summary_results(check_results, api_info_file): if __name__ == "__main__": - """Load result from log directories and compare the differences. - """ + """Load result from log directories and compare the differences.""" logging.basicConfig( level=logging.INFO, - format="[%(filename)s:%(lineno)d] [%(levelname)s] %(message)s") + format="[%(filename)s:%(lineno)d] [%(levelname)s] %(message)s", + ) parser = argparse.ArgumentParser() parser.add_argument( "--develop_logs_dir", type=str, required=True, - help="Specify the benchmark result directory of develop branch.") + help="Specify the benchmark result directory of develop branch.", + ) parser.add_argument( "--pr_logs_dir", type=str, required=True, - help="Specify the benchmark result directory of PR branch.") - parser.add_argument("--api_info_file", - type=str, - required=False, - help="Specify the api info to run benchmark test.") + help="Specify the benchmark result directory of PR branch.", + ) + parser.add_argument( + "--api_info_file", + type=str, + required=False, + help="Specify the api info to run benchmark test.", + ) args = parser.parse_args() check_results = dict(accuracy=list(), speed=list()) develop_result_dict = load_benchmark_result_from_logs_dir( - args.develop_logs_dir) + args.develop_logs_dir + ) check_path_exists(args.pr_logs_dir) pr_log_files = os.listdir(args.pr_logs_dir) @@ -208,7 +213,8 @@ if __name__ == "__main__": if develop_result is None or pr_result is None: continue case_name = parse_case_name(log_file) - compare_benchmark_result(case_name, develop_result, pr_result, - check_results) + compare_benchmark_result( + case_name, develop_result, pr_result, check_results + ) exit(summary_results(check_results, args.api_info_file)) diff --git a/tools/check_op_desc.py b/tools/check_op_desc.py index 7367b88d5b8f52e972ae6866e84e0809bd69b971..1c1ff14f7b68c3c3540036521470631ffb23e37a 100644 --- a/tools/check_op_desc.py +++ b/tools/check_op_desc.py @@ -88,7 +88,9 @@ def diff_vars(origin_vars, new_vars): if var_name not in var_changed_error_massage.keys(): var_changed_error_massage[var_name] = {} var_changed_error_massage[var_name][arg_name] = ( - origin_arg_value, new_arg_value) + origin_arg_value, + new_arg_value, + ) for var_name in vars_name_only_in_origin: error, var_error = True, True @@ -111,7 +113,8 @@ def diff_vars(origin_vars, new_vars): # if added var is def, inference needs to review, needs to register. if not new_vars.get(var_name).get(EXTRA) and not new_vars.get( - var_name).get(QUANT): + var_name + ).get(QUANT): error, var_error = True, True var_add_def_message.append(var_name) @@ -160,7 +163,9 @@ def diff_attr(ori_attrs, new_attrs): if attr_name not in attr_changed_error_massage.keys(): attr_changed_error_massage[attr_name] = {} attr_changed_error_massage[attr_name][arg_name] = ( - origin_arg_value, new_arg_value) + origin_arg_value, + new_arg_value, + ) for attr_name in attrs_only_in_origin: error, attr_error = True, True @@ -179,7 +184,8 @@ def diff_attr(ori_attrs, new_attrs): # if added attr is def, inference needs to review, needs to register if not new_attrs.get(attr_name).get(EXTRA) and not new_attrs.get( - attr_name).get(QUANT): + attr_name + ).get(QUANT): error, var_error = True, True attr_added_define_message.append(attr_name) @@ -206,7 +212,8 @@ def check_io_registry(io_type, op, diff): for update_type in [ADD]: for item in diff.get(update_type, []): infos = checker.filter_updates( - op, version_update_map[io_type][update_type], item) + op, version_update_map[io_type][update_type], item + ) if not infos: if update_type not in results.keys(): results[update_type] = [] @@ -228,7 +235,8 @@ def check_attr_registry(op, diff, origin_attrs): for update_type in [ADD, CHANGE]: for item in diff.get(update_type, {}): infos = checker.filter_updates( - op, version_update_map[ATTRS][update_type], item) + op, version_update_map[ATTRS][update_type], item + ) if not infos: if update_type == ADD: if update_type not in results.keys(): @@ -279,8 +287,9 @@ def compare_op_desc(origin_op_desc, new_op_desc): origin_attrs = origin_info.get(ATTRS, {}) new_attrs = new_info.get(ATTRS, {}) attrs_error, attrs_diff = diff_attr(origin_attrs, new_attrs) - attrs_version_errors = check_attr_registry(op_type, attrs_diff, - origin_attrs) + attrs_version_errors = check_attr_registry( + op_type, attrs_diff, origin_attrs + ) if ins_diff: desc_error_message.setdefault(op_type, {})[INPUTS] = ins_diff @@ -290,21 +299,26 @@ def compare_op_desc(origin_op_desc, new_op_desc): desc_error_message.setdefault(op_type, {})[ATTRS] = attrs_diff if ins_version_errors: - version_error_message.setdefault(op_type, - {})[INPUTS] = ins_version_errors + version_error_message.setdefault(op_type, {})[ + INPUTS + ] = ins_version_errors if outs_version_errors: - version_error_message.setdefault(op_type, - {})[OUTPUTS] = outs_version_errors + version_error_message.setdefault(op_type, {})[ + OUTPUTS + ] = outs_version_errors if attrs_version_errors: - version_error_message.setdefault(op_type, - {})[ATTRS] = attrs_version_errors + version_error_message.setdefault(op_type, {})[ + ATTRS + ] = attrs_version_errors return desc_error_message, version_error_message def print_desc_error_message(error_message): - print("\n======================= \n" - "Op desc error for the changes of Inputs/Outputs/Attrs of OPs:\n") + print( + "\n======================= \n" + "Op desc error for the changes of Inputs/Outputs/Attrs of OPs:\n" + ) for op_name in error_message: print("For OP '{}':".format(op_name)) @@ -321,16 +335,24 @@ def print_desc_error_message(error_message): for arg in changed_args: ori_value, new_value = changed_args.get(arg) print( - " * The arg '{}' of Input '{}' is changed: from '{}' to '{}'." - .format(arg, name, ori_value, new_value)) + " * The arg '{}' of Input '{}' is changed: from '{}' to '{}'.".format( + arg, name, ori_value, new_value + ) + ) for name in Inputs_error.get(QUANT, {}): - print(" * The added Input '{}' is `quant`, need slim to review.". - format(name)) + print( + " * The added Input '{}' is `quant`, need slim to review.".format( + name + ) + ) for name in Inputs_error.get(DEF, {}): - print(" * The added Input '{}' is `def`, need inference to review.". - format(name)) + print( + " * The added Input '{}' is `def`, need inference to review.".format( + name + ) + ) # 2. print outputs error message Outputs_error = error_message.get(op_name, {}).get(OUTPUTS, {}) @@ -345,23 +367,31 @@ def print_desc_error_message(error_message): for arg in changed_args: ori_value, new_value = changed_args.get(arg) print( - " * The arg '{}' of Output '{}' is changed: from '{}' to '{}'." - .format(arg, name, ori_value, new_value)) + " * The arg '{}' of Output '{}' is changed: from '{}' to '{}'.".format( + arg, name, ori_value, new_value + ) + ) for name in Outputs_error.get(QUANT, {}): - print(" * The added Output '{}' is `quant`, need slim to review.". - format(name)) + print( + " * The added Output '{}' is `quant`, need slim to review.".format( + name + ) + ) for name in Outputs_error.get(DEF, {}): print( - " * The added Output '{}' is `def`, need inference to review.". - format(name)) + " * The added Output '{}' is `def`, need inference to review.".format( + name + ) + ) # 3. print attrs error message attrs_error = error_message.get(op_name, {}).get(ATTRS, {}) for name in attrs_error.get(ADD_WITH_DEFAULT, {}): - print(" * The added attr '{}' doesn't set default value.".format( - name)) + print( + " * The added attr '{}' doesn't set default value.".format(name) + ) for name in attrs_error.get(DELETE, {}): print(" * The attr '{}' is deleted.".format(name)) @@ -371,18 +401,26 @@ def print_desc_error_message(error_message): for arg in changed_args: ori_value, new_value = changed_args.get(arg) print( - " * The arg '{}' of attr '{}' is changed: from '{}' to '{}'." - .format(arg, name, ori_value, new_value)) + " * The arg '{}' of attr '{}' is changed: from '{}' to '{}'.".format( + arg, name, ori_value, new_value + ) + ) for name in attrs_error.get(QUANT, {}): # TODO(Wilber): - print(" * The added attr '{}' is `quant`, need slim to review.". - format(name)) + print( + " * The added attr '{}' is `quant`, need slim to review.".format( + name + ) + ) for name in attrs_error.get(DEF, {}): # TODO(Wilber): - print(" * The added attr '{}' is `def`, need inference to review.". - format(name)) + print( + " * The added attr '{}' is `def`, need inference to review.".format( + name + ) + ) def print_version_error_message(error_message): @@ -398,31 +436,42 @@ def print_version_error_message(error_message): error_list = inputs_error.get(ADD, []) if error_list: for tup in error_list: - print(" * The added input '{}' is not yet registered.".format( - tup[1])) + print( + " * The added input '{}' is not yet registered.".format( + tup[1] + ) + ) # 2. print outputs error message outputs_error = error_message.get(op_name, {}).get(OUTPUTS, {}) error_list = outputs_error.get(ADD, []) if error_list: for tup in error_list: - print(" * The added output '{}' is not yet registered.".format( - tup[1])) + print( + " * The added output '{}' is not yet registered.".format( + tup[1] + ) + ) - #3. print attrs error message + # 3. print attrs error message attrs_error = error_message.get(op_name, {}).get(ATTRS, {}) error_list = attrs_error.get(ADD, []) if error_list: for tup in error_list: print( " * The added attribute '{}' is not yet registered.".format( - tup[1])) - error_dic = error_message.get(op_name, {}).get(ATTRS, - {}).get(CHANGE, {}) + tup[1] + ) + ) + error_dic = ( + error_message.get(op_name, {}).get(ATTRS, {}).get(CHANGE, {}) + ) for key, val in error_dic.items(): print( " * The change of attribute '{}' is not yet registered.".format( - key)) + key + ) + ) def print_repeat_process(): @@ -449,7 +498,8 @@ if len(sys.argv) == 3: new_op_desc = f.read() desc_error_message, version_error_message = compare_op_desc( - origin_op_desc, new_op_desc) + origin_op_desc, new_op_desc + ) if error: print("-" * 30) print_desc_error_message(desc_error_message) diff --git a/tools/check_op_kernel_same_dtypes.py b/tools/check_op_kernel_same_dtypes.py index b5b3e81c89fcb2170f702465ced86d4a52a39d47..13d0119f83b2eec5ceed4d7d4710e9b00ce84577 100644 --- a/tools/check_op_kernel_same_dtypes.py +++ b/tools/check_op_kernel_same_dtypes.py @@ -36,13 +36,15 @@ def get_all_kernels(): pattern = re.compile(r'data_type\[([^\]]+)\]') for op_info in op_infos: infos = pattern.findall(op_info) - if infos is None or len(infos) == 0: continue + if infos is None or len(infos) == 0: + continue register_type = infos[0].split(":")[-1] op_kernel_types[op_type].append(register_type.lower()) - for (op_type, op_kernels) in sorted(op_kernel_types.items(), - key=lambda x: x[0]): + for (op_type, op_kernels) in sorted( + op_kernel_types.items(), key=lambda x: x[0] + ): print(op_type, " ".join(sorted(op_kernels))) @@ -56,12 +58,18 @@ def read_file(file_path): def print_diff(op_type, op_kernel_dtype_set, grad_op_kernel_dtype_set): if len(op_kernel_dtype_set) > len(grad_op_kernel_dtype_set): lack_dtypes = list(op_kernel_dtype_set - grad_op_kernel_dtype_set) - print("{} supports [{}] now, but its grad op kernel not supported.". - format(op_type, " ".join(lack_dtypes))) + print( + "{} supports [{}] now, but its grad op kernel not supported.".format( + op_type, " ".join(lack_dtypes) + ) + ) else: lack_dtypes = list(grad_op_kernel_dtype_set - op_kernel_dtype_set) - print("{} supports [{}] now, but its forward op kernel not supported.". - format(op_type + "_grad", " ".join(lack_dtypes))) + print( + "{} supports [{}] now, but its forward op kernel not supported.".format( + op_type + "_grad", " ".join(lack_dtypes) + ) + ) def contain_current_op(op_type, op_info_dict): @@ -91,7 +99,8 @@ def check_change_or_add_op_kernel_dtypes_valid(): origin_dtype_set = origin_all_kernel_dtype_dict[op_type] # op kernel changed if origin_dtype_set != dtype_set and not contain_current_op( - op_type, added_or_changed_op_info): + op_type, added_or_changed_op_info + ): added_or_changed_op_info[op_type] = dtype_set else: # do nothing @@ -111,7 +120,8 @@ def check_change_or_add_op_kernel_dtypes_valid(): grad_op_type = op_type + "_grad" if grad_op_type in new_all_kernel_dtype_dict: grad_op_kernel_dtype_set = set( - new_all_kernel_dtype_dict[grad_op_type]) + new_all_kernel_dtype_dict[grad_op_type] + ) if dtype_set != grad_op_kernel_dtype_set: print_diff(op_type, dtype_set, grad_op_kernel_dtype_set) # if changed grad op @@ -119,7 +129,8 @@ def check_change_or_add_op_kernel_dtypes_valid(): forward_op_type = op_type.rstrip("_grad") if forward_op_type in new_all_kernel_dtype_dict: op_kernel_dtype_set = set( - new_all_kernel_dtype_dict[forward_op_type]) + new_all_kernel_dtype_dict[forward_op_type] + ) if op_kernel_dtype_set != dtype_set: print_diff(forward_op_type, op_kernel_dtype_set, dtype_set) @@ -129,6 +140,8 @@ if len(sys.argv) == 1: elif len(sys.argv) == 3: check_change_or_add_op_kernel_dtypes_valid() else: - print("Usage:\n" \ - "\tpython check_op_kernel_same_dtypes.py > all_kernels.txt\n" \ - "\tpython check_op_kernel_same_dtypes.py OP_KERNEL_DTYPE_DEV.spec OP_KERNEL_DTYPE_PR.spec > diff") + print( + "Usage:\n" + "\tpython check_op_kernel_same_dtypes.py > all_kernels.txt\n" + "\tpython check_op_kernel_same_dtypes.py OP_KERNEL_DTYPE_DEV.spec OP_KERNEL_DTYPE_PR.spec > diff" + ) diff --git a/tools/check_op_register_type.py b/tools/check_op_register_type.py index 9db2fc0bf5137225362a8676afe360de2b6398fc..af7f3ee42a8878babe20860c7885a5f6670c2478 100644 --- a/tools/check_op_register_type.py +++ b/tools/check_op_register_type.py @@ -34,18 +34,21 @@ def get_all_kernels(): op_kernel_types = collections.defaultdict(list) for op_type, op_infos in all_kernels_info.items(): is_grad_op = op_type.endswith("_grad") - if is_grad_op: continue + if is_grad_op: + continue pattern = re.compile(r'data_type\[([^\]]+)\]') for op_info in op_infos: infos = pattern.findall(op_info) - if infos is None or len(infos) == 0: continue + if infos is None or len(infos) == 0: + continue register_type = infos[0].split(":")[-1] op_kernel_types[op_type].append(register_type.lower()) - for (op_type, op_kernels) in sorted(op_kernel_types.items(), - key=lambda x: x[0]): + for (op_type, op_kernels) in sorted( + op_kernel_types.items(), key=lambda x: x[0] + ): print(op_type, " ".join(sorted(op_kernels))) @@ -63,8 +66,11 @@ def print_diff(op_type, register_types): if len(FLOATS - register_types) == 1: lack_types |= FLOATS - register_types - print("{} only supports [{}] now, but lacks [{}].".format( - op_type, " ".join(register_types), " ".join(lack_types))) + print( + "{} only supports [{}] now, but lacks [{}].".format( + op_type, " ".join(register_types), " ".join(lack_types) + ) + ) def check_add_op_valid(): @@ -77,10 +83,13 @@ def check_add_op_valid(): for each_diff in result: if each_diff[0] in ['+'] and len(each_diff) > 2: # if change or add op op_info = each_diff[1:].split() - if len(op_info) < 2: continue + if len(op_info) < 2: + continue register_types = set(op_info[1:]) - if len(FLOATS - register_types) == 1 or \ - len(INTS - register_types) == 1: + if ( + len(FLOATS - register_types) == 1 + or len(INTS - register_types) == 1 + ): print_diff(op_info[0], register_types) @@ -89,6 +98,8 @@ if len(sys.argv) == 1: elif len(sys.argv) == 3: check_add_op_valid() else: - print("Usage:\n" \ - "\tpython check_op_register_type.py > all_kernels.txt\n" \ - "\tpython check_op_register_type.py OP_TYPE_DEV.spec OP_TYPE_PR.spec > diff") + print( + "Usage:\n" + "\tpython check_op_register_type.py > all_kernels.txt\n" + "\tpython check_op_register_type.py OP_TYPE_DEV.spec OP_TYPE_PR.spec > diff" + ) diff --git a/tools/check_pr_approval.py b/tools/check_pr_approval.py index c242afd06e760ab570708a3fe8856f5648ebe5b3..7e7e7720bf8fd633906efba450002eb2774ee34a 100644 --- a/tools/check_pr_approval.py +++ b/tools/check_pr_approval.py @@ -39,8 +39,11 @@ def check_approval(count, required_reviewers): else: required_reviewers_login.add(rr) - if len(set(approved_user_ids) & required_reviewers_int) + len( - approved_user_logins & required_reviewers_login) >= count: + if ( + len(set(approved_user_ids) & required_reviewers_int) + + len(approved_user_logins & required_reviewers_login) + >= count + ): print("TRUE") else: print("FALSE") diff --git a/tools/check_ut.py b/tools/check_ut.py index 0b1211f1e654dba0295d7db9c8bde4957c6d9dce..e08c358531f7fd17a27de70fae4260d0c82b92fe 100644 --- a/tools/check_ut.py +++ b/tools/check_ut.py @@ -20,7 +20,7 @@ from github import Github class PRChecker(object): - """ PR Checker. """ + """PR Checker.""" def __init__(self): self.github = Github(os.getenv('GITHUB_API_TOKEN'), timeout=60) diff --git a/tools/codestyle/docstring_checker.py b/tools/codestyle/docstring_checker.py index 18083d1fab104e775c07e79d1053337b9b2c33c0..0d163c20bbfa15474e568b2251a216370659da58 100644 --- a/tools/codestyle/docstring_checker.py +++ b/tools/codestyle/docstring_checker.py @@ -28,11 +28,10 @@ def register(linter): class Docstring(object): - """Docstring class holds the parsed doc string elements. - """ + """Docstring class holds the parsed doc string elements.""" def __init__(self): - self.d = defaultdict(list) #name->[] + self.d = defaultdict(list) # name->[] self.clear() def clear(self): @@ -40,7 +39,7 @@ class Docstring(object): self.d['Examples'] = [] self.d['Returns'] = [] self.d['Raises'] = [] - self.args = {} #arg_name->arg_type + self.args = {} # arg_name->arg_type def get_level(self, string, indent=' '): level = 0 @@ -111,7 +110,8 @@ class DocstringChecker(BaseChecker): """DosstringChecker is pylint checker to check docstring style. """ - __implements__ = (IAstroidChecker, ) + + __implements__ = (IAstroidChecker,) POSITIONAL_MESSAGE_ID = 'str-used-on-positional-format-argument' KEYWORD_MESSAGE_ID = 'str-used-on-keyword-format-argument' @@ -120,25 +120,41 @@ class DocstringChecker(BaseChecker): symbol = "doc-string" priority = -1 msgs = { - 'W9001': ('One line doc string on > 1 lines', symbol + "-one-line", - 'Used when a short doc string is on multiple lines'), - 'W9002': - ('Doc string does not end with "." period', symbol + "-end-with", - 'Used when a doc string does not end with a period'), - 'W9003': - ('All args with their types must be mentioned in doc string %s', - symbol + "-with-all-args", - 'Used when not all arguments are in the doc string '), - 'W9005': ('Missing docstring or docstring is too short', - symbol + "-missing", 'Add docstring longer >=10'), - 'W9006': ('Docstring indent error, use 4 space for indent', - symbol + "-indent-error", 'Use 4 space for indent'), - 'W9007': - ('You should add `Returns` in comments', symbol + "-with-returns", - 'There should be a `Returns` section in comments'), - 'W9008': - ('You should add `Raises` section in comments', symbol + "-with-raises", - 'There should be a `Raises` section in comments'), + 'W9001': ( + 'One line doc string on > 1 lines', + symbol + "-one-line", + 'Used when a short doc string is on multiple lines', + ), + 'W9002': ( + 'Doc string does not end with "." period', + symbol + "-end-with", + 'Used when a doc string does not end with a period', + ), + 'W9003': ( + 'All args with their types must be mentioned in doc string %s', + symbol + "-with-all-args", + 'Used when not all arguments are in the doc string ', + ), + 'W9005': ( + 'Missing docstring or docstring is too short', + symbol + "-missing", + 'Add docstring longer >=10', + ), + 'W9006': ( + 'Docstring indent error, use 4 space for indent', + symbol + "-indent-error", + 'Use 4 space for indent', + ), + 'W9007': ( + 'You should add `Returns` in comments', + symbol + "-with-returns", + 'There should be a `Returns` section in comments', + ), + 'W9008': ( + 'You should add `Raises` section in comments', + symbol + "-with-raises", + 'There should be a `Raises` section in comments', + ), } options = () @@ -321,8 +337,7 @@ class DocstringChecker(BaseChecker): return True args = [] for arg in node.args.get_children(): - if (not isinstance(arg, astroid.AssignName)) \ - or arg.name == "self": + if (not isinstance(arg, astroid.AssignName)) or arg.name == "self": continue args.append(arg.name) @@ -332,20 +347,24 @@ class DocstringChecker(BaseChecker): parsed_args = doc.args args_not_documented = set(args) - set(parsed_args) if len(args) > 0 and len(parsed_args) <= 0: - self.add_message('W9003', - node=node, - line=node.fromlineno, - args=list(args_not_documented)) + self.add_message( + 'W9003', + node=node, + line=node.fromlineno, + args=list(args_not_documented), + ) return False for t in args: if t not in parsed_args: - self.add_message('W9003', - node=node, - line=node.fromlineno, - args=[ - t, - ]) + self.add_message( + 'W9003', + node=node, + line=node.fromlineno, + args=[ + t, + ], + ) return False return True diff --git a/tools/codestyle/test_docstring_checker.py b/tools/codestyle/test_docstring_checker.py index 61b236b6b7d6378b91d0f4b68972ea9f64260fc3..09617a0f2f73506791b9e7d37988088f02d72682 100644 --- a/tools/codestyle/test_docstring_checker.py +++ b/tools/codestyle/test_docstring_checker.py @@ -21,7 +21,8 @@ class TestDocstring(pylint.testutils.CheckerTestCase): CHECKER_CLASS = docstring_checker.DocstringChecker def test_one_line(self): - func_node = astroid.extract_node(''' + func_node = astroid.extract_node( + ''' def test(): """get news. @@ -29,7 +30,8 @@ class TestDocstring(pylint.testutils.CheckerTestCase): if True: return 5 return 5 - ''') + ''' + ) self.checker.visit_functiondef(func_node) got = self.linter.release_messages() @@ -37,13 +39,15 @@ class TestDocstring(pylint.testutils.CheckerTestCase): assert 'W9001' == got[0][0] def test_one_line_1(self): - func_node = astroid.extract_node(''' + func_node = astroid.extract_node( + ''' def test(): """get news""" if True: return 5 return 5 - ''') + ''' + ) self.checker.visit_functiondef(func_node) got = self.linter.release_messages() @@ -51,7 +55,8 @@ class TestDocstring(pylint.testutils.CheckerTestCase): assert 'W9002' == got[0][0] def test_args(self): - func_node = astroid.extract_node(''' + func_node = astroid.extract_node( + ''' def test(scale, mean): """get news. Args: @@ -64,7 +69,8 @@ class TestDocstring(pylint.testutils.CheckerTestCase): mean=scale mean=scale mean=scale - ''') + ''' + ) self.checker.visit_functiondef(func_node) got = self.linter.release_messages() @@ -72,7 +78,8 @@ class TestDocstring(pylint.testutils.CheckerTestCase): assert 'W9003' == got[0][0] def test_missing(self): - func_node = astroid.extract_node(''' + func_node = astroid.extract_node( + ''' def test(): mean=scale mean=scale @@ -85,7 +92,8 @@ class TestDocstring(pylint.testutils.CheckerTestCase): mean=scale mean=scale mean=scale - ''') + ''' + ) self.checker.visit_functiondef(func_node) got = self.linter.release_messages() @@ -93,13 +101,15 @@ class TestDocstring(pylint.testutils.CheckerTestCase): assert 'W9005' == got[0][0] def test_indent(self): - func_node = astroid.extract_node(''' + func_node = astroid.extract_node( + ''' def test(): """ get get get get get get get get get get get get get get get get. """ pass - ''') + ''' + ) self.checker.visit_functiondef(func_node) got = self.linter.release_messages() @@ -107,7 +117,8 @@ class TestDocstring(pylint.testutils.CheckerTestCase): assert 'W9006' == got[0][0] def test_with_resturns(self): - func_node = astroid.extract_node(''' + func_node = astroid.extract_node( + ''' def test(): """get news. Args: @@ -125,7 +136,8 @@ class TestDocstring(pylint.testutils.CheckerTestCase): mean=scale mean=scale return mean - ''') + ''' + ) self.checker.visit_functiondef(func_node) got = self.linter.release_messages() @@ -133,7 +145,8 @@ class TestDocstring(pylint.testutils.CheckerTestCase): assert 'W9007' == got[0][0] def test_with_raises(self): - func_node = astroid.extract_node(''' + func_node = astroid.extract_node( + ''' def test(): """get news. Args: @@ -151,7 +164,8 @@ class TestDocstring(pylint.testutils.CheckerTestCase): mean=scale mean=scale raise ValueError('A very specific bad thing happened.') - ''') + ''' + ) self.checker.visit_functiondef(func_node) got = self.linter.release_messages() diff --git a/tools/continuous_integration/bisect.py b/tools/continuous_integration/bisect.py index afffc60a449ac403c3e4c73d0ea9870a1a681307..b2a2aa1f9b646f194119e2159265e2799b29be97 100644 --- a/tools/continuous_integration/bisect.py +++ b/tools/continuous_integration/bisect.py @@ -27,43 +27,42 @@ import subprocess import sys parser = argparse.ArgumentParser(description=__doc__) -parser.add_argument('--git_dir', - type=str, - default='', - help='git repo root directory.') -parser.add_argument('--build_dir', - type=str, - default='', - help='build directory.') -parser.add_argument('--good_commit', - type=str, - default='', - help='The old commit known to be good.') -parser.add_argument('--bad_commit', - type=str, - default='', - help='The new commit known to be bad.') -parser.add_argument('--test_target', - type=str, - default='', - help='The test target to evaluate.') +parser.add_argument( + '--git_dir', type=str, default='', help='git repo root directory.' +) +parser.add_argument( + '--build_dir', type=str, default='', help='build directory.' +) +parser.add_argument( + '--good_commit', + type=str, + default='', + help='The old commit known to be good.', +) +parser.add_argument( + '--bad_commit', type=str, default='', help='The new commit known to be bad.' +) +parser.add_argument( + '--test_target', type=str, default='', help='The test target to evaluate.' +) parser.add_argument( '--bisect_branch', type=str, default='develop', - help='The mainline branch to bisect (feature branch ignored.') -parser.add_argument('--log_file', - type=str, - default='', - help='The file use to log outputs.') -parser.add_argument('--test_times', - type=int, - default=10, - help="Number of times to run the test target.") -parser.add_argument('--build_parallel', - type=int, - default=32, - help="make parallelism.") + help='The mainline branch to bisect (feature branch ignored.', +) +parser.add_argument( + '--log_file', type=str, default='', help='The file use to log outputs.' +) +parser.add_argument( + '--test_times', + type=int, + default=10, + help="Number of times to run the test target.", +) +parser.add_argument( + '--build_parallel', type=int, default=32, help="make parallelism." +) args = parser.parse_args() if not args.log_file: @@ -81,10 +80,13 @@ print_arguments() # List the commits in mainline branch. os.chdir(args.git_dir) -ret = subprocess.check_output([ - 'git rev-list --first-parent %s...%s' % (args.good_commit, args.bad_commit) -], - shell=True) +ret = subprocess.check_output( + [ + 'git rev-list --first-parent %s...%s' + % (args.good_commit, args.bad_commit) + ], + shell=True, +) sys.stdout.write('commits found:\n%s\n' % ret) commits = ret.strip().split('\n') os.chdir(args.build_dir) @@ -95,11 +97,13 @@ last_culprit = '' while True: # Get to the mainline branch and clean up os.chdir(args.git_dir) - subprocess.check_output([ - 'git checkout %s && git clean -fd && git checkout .' % - args.bisect_branch - ], - shell=True) + subprocess.check_output( + [ + 'git checkout %s && git clean -fd && git checkout .' + % args.bisect_branch + ], + shell=True, + ) if not commits: sys.stdout.write('no commits to bisect\n') @@ -115,9 +119,11 @@ while True: os.chdir(args.build_dir) sys.stdout.write('eval commit %d/%d: %s\n' % (pick_idx, len(commits), pick)) # Link error can happen without complete clean up. - cmd = ('rm -rf * && ' - 'cmake -DWITH_TESTING=ON %s >> %s && make -j%s >> %s' % - (args.git_dir, args.log_file, args.build_parallel, args.log_file)) + cmd = ( + 'rm -rf * && ' + 'cmake -DWITH_TESTING=ON %s >> %s && make -j%s >> %s' + % (args.git_dir, args.log_file, args.build_parallel, args.log_file) + ) sys.stdout.write('cmd: %s\n' % cmd) try: subprocess.check_output([cmd], shell=True) @@ -127,8 +133,11 @@ while True: # test the selected branch. passed = True try: - cmd = ('ctest --repeat-until-fail %s -R %s >> %s' % - (args.test_times, args.test_target, args.log_file)) + cmd = 'ctest --repeat-until-fail %s -R %s >> %s' % ( + args.test_times, + args.test_target, + args.log_file, + ) sys.stdout.write('cmd: %s\n' % cmd) subprocess.check_output([cmd], shell=True) except subprocess.CalledProcessError as e: @@ -136,10 +145,12 @@ while True: last_culprit = pick sys.stdout.write('eval %s passed: %s\n' % (pick, passed)) if passed: - if pick_idx == 0: break + if pick_idx == 0: + break commits = commits[:pick_idx] else: - if pick_idx + 1 >= len(commits): break - commits = commits[pick_idx + 1:] + if pick_idx + 1 >= len(commits): + break + commits = commits[pick_idx + 1 :] sys.stdout.write('Culprit commit: %s\n' % last_culprit) diff --git a/tools/count_api_without_core_ops.py b/tools/count_api_without_core_ops.py index d75578d610d8824ecb99c1a8f15e82af34298a96..96a7c33dd95ff3d91ebc9d73abc00b24018d1ac1 100644 --- a/tools/count_api_without_core_ops.py +++ b/tools/count_api_without_core_ops.py @@ -37,9 +37,12 @@ def md5(doc): md5sum = hashinst.hexdigest() except UnicodeDecodeError as e: md5sum = None - print("Error({}) occurred when `md5({})`, discard it.".format( - str(e), doc), - file=sys.stderr) + print( + "Error({}) occurred when `md5({})`, discard it.".format( + str(e), doc + ), + file=sys.stderr, + ) return md5sum @@ -50,11 +53,13 @@ def split_with_and_without_core_ops(member, cur_name): if member.__doc__.find(':api_attr: Static Graph') != -1: return - if cur_name.find('ParamBase') != -1 or cur_name.find( - 'Parameter') != -1 or cur_name.find( - 'Variable') != -1 or cur_name.find( - 'control_flow') != -1 or cur_name.find( - 'contrib.mixed_precision') != -1: + if ( + cur_name.find('ParamBase') != -1 + or cur_name.find('Parameter') != -1 + or cur_name.find('Variable') != -1 + or cur_name.find('control_flow') != -1 + or cur_name.find('contrib.mixed_precision') != -1 + ): return if inspect.isclass(member): @@ -94,8 +99,9 @@ def visit_member(parent_name, member, func): if inspect.isclass(member): func(member, cur_name) for name, value in inspect.getmembers(member): - if hasattr(value, '__name__') and (not name.startswith("_") - or name == "__init__"): + if hasattr(value, '__name__') and ( + not name.startswith("_") or name == "__init__" + ): visit_member(cur_name, value, func) elif inspect.ismethoddescriptor(member): return @@ -106,11 +112,13 @@ def visit_member(parent_name, member, func): else: raise RuntimeError( "Unsupported generate signature of member, type {0}".format( - str(type(member)))) + str(type(member)) + ) + ) def is_primitive(instance): - int_types = (int, ) + int_types = (int,) pritimitive_types = int_types + (float, str) if isinstance(instance, pritimitive_types): return True @@ -171,8 +179,9 @@ def get_apis_with_and_without_core_ops(modules): api_with_ops = [] api_without_ops = [] for m in modules: - visit_all_module(importlib.import_module(m), - split_with_and_without_core_ops) + visit_all_module( + importlib.import_module(m), split_with_and_without_core_ops + ) return api_with_ops, api_without_ops @@ -189,7 +198,8 @@ if __name__ == "__main__": modules = sys.argv[2].split(",") if sys.argv[1] == '-c': api_with_ops, api_without_ops = get_apis_with_and_without_core_ops( - modules) + modules + ) print('api_with_ops:', len(api_with_ops)) print('\n'.join(api_with_ops)) @@ -203,9 +213,11 @@ if __name__ == "__main__": print(name, func_dict[name]) else: - print("""Usage: + print( + """Usage: 1. Count and list all operator-raleated APIs that contains append_op but not _legacy_C_ops.xx. python ./count_api_without_core_ops.py -c paddle 2. Print api and the md5 of source code of the api. python ./count_api_without_core_ops.py -p paddle - """) + """ + ) diff --git a/tools/coverage/coverage_diff.py b/tools/coverage/coverage_diff.py index 713afe1ebc31d011e2181daa6f1408c8ea9a097e..980b2cd336309e57587189c8eb0b689287c34633 100644 --- a/tools/coverage/coverage_diff.py +++ b/tools/coverage/coverage_diff.py @@ -86,7 +86,7 @@ def get_info_file_lines(info_file, diff_file): current_file = line.lstrip('SF:') if current_file.startswith('/paddle/'): - current_file = current_file[len('/paddle/'):] + current_file = current_file[len('/paddle/') :] current_lines = diff_file_lines.get(current_file, []) diff --git a/tools/coverage/coverage_diff_list.py b/tools/coverage/coverage_diff_list.py index c57c1e9d96453381194c8e3d55100b561e4f7b26..1fee1d3ae15a6ac2ac73214b48a6f9f93aff4d10 100644 --- a/tools/coverage/coverage_diff_list.py +++ b/tools/coverage/coverage_diff_list.py @@ -41,7 +41,7 @@ def filter_by(list_file, max_rate): name = split[0].strip() if name.startswith('/paddle/'): - name = name[len('/paddle/'):] + name = name[len('/paddle/') :] # rate diff --git a/tools/coverage/coverage_lines.py b/tools/coverage/coverage_lines.py index 3c5df9d88e8c4f24c2f43a7f2c147dd00130089e..10a0520c08ccf9c2dc249ed129640d1b321f135d 100644 --- a/tools/coverage/coverage_lines.py +++ b/tools/coverage/coverage_lines.py @@ -29,8 +29,8 @@ def get_lines(info_file): Returns: float: Coverage rate. """ - hits = .0 - total = .0 + hits = 0.0 + total = 0.0 with open(info_file) as info_file: for line in info_file: @@ -68,10 +68,16 @@ if __name__ == '__main__': actual = round(actual, 3) if actual < expected: - print('expected >= {} %, actual {} %, failed'.format( - round(expected * 100, 1), round(actual * 100, 1))) + print( + 'expected >= {} %, actual {} %, failed'.format( + round(expected * 100, 1), round(actual * 100, 1) + ) + ) exit(1) - print('expected >= {} %, actual {} %, passed'.format( - round(expected * 100, 1), round(actual * 100, 1))) + print( + 'expected >= {} %, actual {} %, passed'.format( + round(expected * 100, 1), round(actual * 100, 1) + ) + ) diff --git a/tools/coverage/cuda_clean.py b/tools/coverage/cuda_clean.py index 82bb6a553c9554aa59bdefc362486a510ef4b469..fea2326413d010b67280296310974db5e9b74097 100644 --- a/tools/coverage/cuda_clean.py +++ b/tools/coverage/cuda_clean.py @@ -65,7 +65,7 @@ def clean(pull_id): changed = [] for file in get_files(pull_id): - #changed.append('/paddle/build/{}.gcda'.format(file)) + # changed.append('/paddle/build/{}.gcda'.format(file)) changed.append(file) for parent, dirs, files in os.walk('/paddle/build/'): @@ -81,7 +81,7 @@ def clean(pull_id): if src_name not in changed: unused_file = parent + '/' + gcda - #print unused_file + # print unused_file os.remove(gcda) else: print(src_name) diff --git a/tools/coverage/gcda_clean.py b/tools/coverage/gcda_clean.py index 286f50eae9ab6148fc22074610ea74044ec11b5c..ab8ae0432384d1a82fdb913f8546bd004bb85a56 100644 --- a/tools/coverage/gcda_clean.py +++ b/tools/coverage/gcda_clean.py @@ -40,8 +40,11 @@ def get_pull(pull_id): repo = github.get_repo('PaddlePaddle/Paddle') except Exception as e: print(e) - print("get_repo error, retry {} times after {} secs.".format( - idx, idx * 10)) + print( + "get_repo error, retry {} times after {} secs.".format( + idx, idx * 10 + ) + ) else: break idx += 1 diff --git a/tools/coverage/python_coverage.py b/tools/coverage/python_coverage.py index d45fb4d58c591c103366e7aa55f84a4f9f51a58d..53784a08abe6185749e83e424eec6380aca1d090 100644 --- a/tools/coverage/python_coverage.py +++ b/tools/coverage/python_coverage.py @@ -33,8 +33,9 @@ for clazz in root.findall('packages/package/classes/class'): clazz_filename = path.join(source, clazz_filename) if clazz_filename.startswith('/paddle/build/python/'): - clazz_filename = '/paddle/python/' + clazz_filename[ - len('/paddle/build/python/'):] + clazz_filename = ( + '/paddle/python/' + clazz_filename[len('/paddle/build/python/') :] + ) if not path.exists(clazz_filename): continue @@ -61,14 +62,20 @@ for clazz in root.findall('packages/package/classes/class'): taken = int(taken) for _ in range(taken): - print('BRDA:{},{},{},{}'.format(line_number, 0, branch_index, - line_hits)) + print( + 'BRDA:{},{},{},{}'.format( + line_number, 0, branch_index, line_hits + ) + ) branch_index += 1 if line_missing_branches: for missing_branch in line_missing_branches.split(','): - print('BRDA:{},{},{},{}'.format(line_number, 0, - branch_index, 0)) + print( + 'BRDA:{},{},{},{}'.format( + line_number, 0, branch_index, 0 + ) + ) branch_index += 1 print('DA:{},{}'.format(line_number, line_hits)) diff --git a/tools/externalError/spider.py b/tools/externalError/spider.py index d0f9f48c89ff23a8430feeb039ea667d04a6f8d9..d6062e5d50a43d03bae3d0961fa5c3e45bb37cbb 100644 --- a/tools/externalError/spider.py +++ b/tools/externalError/spider.py @@ -22,8 +22,8 @@ from html.parser import HTMLParser def parsing(externalErrorDesc): - #*********************************************************************************************# - #*********************************** CUDA Error Message **************************************# + # *********************************************************************************************# + # *********************************** CUDA Error Message **************************************# print("start crawling errorMessage for nvidia CUDA API--->") url = 'https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__TYPES.html#group__CUDART__TYPES_1g3f51e3575c2178246db0a94a430e0038' @@ -51,7 +51,8 @@ def parsing(externalErrorDesc): m_message = m_message.replace(list_a[idx], list_shape[idx]) m_message = m_message.replace( - '
Deprecated
', '') + '
Deprecated
', '' + ) res_span = r'()' res_span_detail = r'(.*?)' @@ -81,8 +82,8 @@ def parsing(externalErrorDesc): _Messages.message = "'%s'. %s" % (m_type[0], m_message) print("End crawling errorMessage for nvidia CUDA API!\n") - #***********************************************************************************************# - #*********************************** CURAND Error Message **************************************# + # ***********************************************************************************************# + # *********************************** CURAND Error Message **************************************# print("start crawling errorMessage for nvidia CURAND API--->") url = 'https://docs.nvidia.com/cuda/curand/group__HOST.html#group__HOST_1gb94a31d5c165858c96b6c18b70644437' @@ -112,8 +113,8 @@ def parsing(externalErrorDesc): _Messages.message = "'%s'. %s" % (m_type[0], m_message) print("End crawling errorMessage for nvidia CURAND API!\n") - #**************************************************************************************************# - #*********************************** CUDNN Error Message ******************************************# + # **************************************************************************************************# + # *********************************** CUDNN Error Message ******************************************# cudnnStatus_t = { "CUDNN_STATUS_SUCCESS": 0, "CUDNN_STATUS_NOT_INITIALIZED": 1, @@ -155,8 +156,9 @@ def parsing(externalErrorDesc): list_class_detail = re.findall(res_class_detail, m_message, re.S | re.M) assert len(list_class) == len(list_class_detail) for idx in range(len(list_class)): - m_message = m_message.replace(list_class[idx], - list_class_detail[idx]) + m_message = m_message.replace( + list_class[idx], list_class_detail[idx] + ) res_a = r'()' res_shape = r'(.*?)' @@ -189,8 +191,8 @@ def parsing(externalErrorDesc): _Messages.message = "'%s'. %s" % (error[0], m_message) print("End crawling errorMessage for nvidia CUDNN API!\n") - #*************************************************************************************************# - #*********************************** CUBLAS Error Message ****************************************# + # *************************************************************************************************# + # *********************************** CUBLAS Error Message ****************************************# cublasStatus_t = { "CUBLAS_STATUS_SUCCESS": 0, "CUBLAS_STATUS_NOT_INITIALIZED": 1, @@ -201,7 +203,7 @@ def parsing(externalErrorDesc): "CUBLAS_STATUS_EXECUTION_FAILED": 13, "CUBLAS_STATUS_INTERNAL_ERROR": 14, "CUBLAS_STATUS_NOT_SUPPORTED": 15, - "CUBLAS_STATUS_LICENSE_ERROR": 16 + "CUBLAS_STATUS_LICENSE_ERROR": 16, } print("start crawling errorMessage for nvidia CUBLAS API--->") @@ -243,8 +245,8 @@ def parsing(externalErrorDesc): _Messages.message = "'%s'. %s" % (error[0], m_message) print("End crawling errorMessage for nvidia CUBLAS API!\n") - #*************************************************************************************************# - #*********************************** CUSOLVER Error Message **************************************# + # *************************************************************************************************# + # *********************************** CUSOLVER Error Message **************************************# cusolverStatus_t = { "CUSOLVER_STATUS_SUCCESS": 0, "CUSOLVER_STATUS_NOT_INITIALIZED": 1, @@ -264,7 +266,7 @@ def parsing(externalErrorDesc): "CUSOLVER_STATUS_IRS_NOT_SUPPORTED": 15, "CUSOLVER_STATUS_IRS_OUT_OF_RANGE": 16, "CUSOLVER_STATUS_IRS_NRHS_NOT_SUPPORTED_FOR_REFINE_GMRES": 17, - "CUSOLVER_STATUS_IRS_INFOS_NOT_INITIALIZED": 18 + "CUSOLVER_STATUS_IRS_INFOS_NOT_INITIALIZED": 18, } print("start crawling errorMessage for nvidia CUSOLVER API--->") url = 'https://docs.nvidia.com/cuda/cusolver/index.html#cuSolverSPstatus' @@ -277,7 +279,9 @@ def parsing(externalErrorDesc): res_div = r'This is a status type returned by the library functions and.*?
(.*?)
' m_div = re.findall(res_div, html, re.S | re.M)[0] - res_dt = r'(.*?).*?colspan="1">(.*?)' + res_dt = ( + r'(.*?).*?colspan="1">(.*?)' + ) m_dt = re.findall(res_dt, m_div, re.S | re.M) for error in m_dt: @@ -304,20 +308,22 @@ def parsing(externalErrorDesc): res_strong = r'.*?' res_strong_detail = r'(.*?)' list_strong = re.findall(res_strong, m_message, re.S | re.M) - list_strong_detail = re.findall(res_strong_detail, m_message, - re.S | re.M) + list_strong_detail = re.findall( + res_strong_detail, m_message, re.S | re.M + ) assert len(list_strong) == len(list_strong_detail) for idx in range(len(list_strong)): - m_message = m_message.replace(list_strong[idx], - list_strong_detail[idx]) + m_message = m_message.replace( + list_strong[idx], list_strong_detail[idx] + ) _Messages = allMessageDesc.messages.add() _Messages.code = int(cusolverStatus_t[error[0]]) _Messages.message = "'%s'. %s" % (error[0], m_message) print("End crawling errorMessage for nvidia CUSOLVER API!\n") - #**********************************************************************************************# - #*************************************** NCCL error *******************************************# + # **********************************************************************************************# + # *************************************** NCCL error *******************************************# print("start crawling errorMessage for nvidia NCCL API--->") url = 'https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/api/types.html#ncclresult-t' allMessageDesc = externalErrorDesc.errors.add() @@ -335,8 +341,8 @@ def parsing(externalErrorDesc): _Messages.message = "'%s'. %s" % (error[0], m_message) print("End crawling errorMessage for nvidia NCCL API!\n") - #*************************************************************************************************# - #*********************************** CUFFT Error Message **************************************# + # *************************************************************************************************# + # *********************************** CUFFT Error Message **************************************# print("start crawling errorMessage for nvidia CUFFT API--->") url = 'https://docs.nvidia.com/cuda/cufft/index.html#cufftresult' @@ -346,8 +352,7 @@ def parsing(externalErrorDesc): html = urllib.request.urlopen(url).read().decode('utf-8') class CUFFTHTMLParser(HTMLParser): - '''CUFFTHTML Parser - ''' + '''CUFFTHTML Parser''' def handle_data(self, data): if 'typedef enum cufftResult_t' in data: @@ -355,8 +360,10 @@ def parsing(externalErrorDesc): status, code, desc = re.split('=|//', line.strip()) _Messages = allMessageDesc.messages.add() _Messages.code = int(code.strip(' ,')) - _Messages.message = "'%s'. %s" % (status.strip(), - desc.strip()) + _Messages.message = "'%s'. %s" % ( + status.strip(), + desc.strip(), + ) CUFFTHTMLParser().feed(html) diff --git a/tools/final_ut_parallel_rule.py b/tools/final_ut_parallel_rule.py index a178bc2bbd7f511953ad4bbc4520555ac328c23a..4d98eee41fb97a1df0001903a6efb105ce71bb79 100644 --- a/tools/final_ut_parallel_rule.py +++ b/tools/final_ut_parallel_rule.py @@ -23,28 +23,44 @@ def classify_cases_by_mem(rootPath): """classify cases by mem""" case_filename = '%s/build/classify_case_by_cardNum.txt' % rootPath case_exec_100 = [ - 'test_conv_eltwiseadd_bn_fuse_pass', 'test_trt_convert_pool2d', - 'test_fc_fuse_pass', 'test_trt_convert_depthwise_conv2d', + 'test_conv_eltwiseadd_bn_fuse_pass', + 'test_trt_convert_pool2d', + 'test_fc_fuse_pass', + 'test_trt_convert_depthwise_conv2d', 'test_quant2_int8_resnet50_mkldnn', - 'test_conv_elementwise_add_act_fuse_pass', 'test_trt_convert_conv2d', - 'test_paddle_save_load', 'test_logical_op', 'test_nearest_interp_op', - 'test_pool2d_op', 'test_conv3d_transpose_op', 'test_lstmp_op', - 'test_cross_entropy2_op', 'test_sgd_op', 'test_imperative_ptq', - 'test_model', 'test_custom_relu_op_setup', 'test_dropout_op', - 'test_concat_op' - ] #木桶原理 70s-100s之间的case + 'test_conv_elementwise_add_act_fuse_pass', + 'test_trt_convert_conv2d', + 'test_paddle_save_load', + 'test_logical_op', + 'test_nearest_interp_op', + 'test_pool2d_op', + 'test_conv3d_transpose_op', + 'test_lstmp_op', + 'test_cross_entropy2_op', + 'test_sgd_op', + 'test_imperative_ptq', + 'test_model', + 'test_custom_relu_op_setup', + 'test_dropout_op', + 'test_concat_op', + ] # 木桶原理 70s-100s之间的case case_exec_200 = [ 'test_post_training_quantization_mnist', 'test_imperative_auto_mixed_precision', 'test_trt_dynamic_shape_ernie_fp16_ser_deser', - 'test_trt_dynamic_shape_ernie', 'test_layer_norm_op', - 'trt_quant_int8_yolov3_r50_test', 'test_gru_op', - 'test_post_training_quantization_while', 'test_mkldnn_log_softmax_op', - 'test_mkldnn_matmulv2_op', 'test_mkldnn_shape_op', + 'test_trt_dynamic_shape_ernie', + 'test_layer_norm_op', + 'trt_quant_int8_yolov3_r50_test', + 'test_gru_op', + 'test_post_training_quantization_while', + 'test_mkldnn_log_softmax_op', + 'test_mkldnn_matmulv2_op', + 'test_mkldnn_shape_op', 'interceptor_pipeline_short_path_test', - 'interceptor_pipeline_long_path_test', 'test_cpuonly_spawn' - ] #木桶原理 110s-200s之间的case 以及容易timeout + 'interceptor_pipeline_long_path_test', + 'test_cpuonly_spawn', + ] # 木桶原理 110s-200s之间的case 以及容易timeout case_always_timeout = [ 'test_quant2_int8_resnet50_channelwise_mkldnn', @@ -104,10 +120,10 @@ def classify_cases_by_mem(rootPath): if case not in new_lastest_mem: continue - #mem = 0 + # mem = 0 if new_lastest_mem[case]["mem_nvidia"] == 0: case_mem_0 = case_mem_0 + '|^' + case + '$' - #mem != 0 + # mem != 0 else: case_mem_1[case] = new_lastest_mem[case]["mem_nvidia"] diff --git a/tools/gen_ut_cmakelists.py b/tools/gen_ut_cmakelists.py index 62deef8c21b05066fcfc060df49f8b680ad50626..14f8e37626e75f181be9ac0a593657e2f9d2368f 100644 --- a/tools/gen_ut_cmakelists.py +++ b/tools/gen_ut_cmakelists.py @@ -44,9 +44,10 @@ def _process_envs(envs): processed_envs = [] for p in envs_parts: - assert " " not in p and \ - re.compile("^[a-zA-Z_][0-9a-zA-Z_]*=").search(p) is not None, \ - f"""The environment option format is wrong. The env variable name can only contains'a-z', 'A-Z', '0-9' and '_', + assert ( + " " not in p + and re.compile("^[a-zA-Z_][0-9a-zA-Z_]*=").search(p) is not None + ), f"""The environment option format is wrong. The env variable name can only contains'a-z', 'A-Z', '0-9' and '_', and the var can not contain space in either env names or values. However the var's format is '{p}'.""" @@ -95,8 +96,13 @@ def _proccess_archs(arch): arch = arch.upper().strip() if len(arch) > 0: for a in arch.split(";"): - assert a in ["GPU", "ROCM", "ASCEND", "ASCEND_CL", "XPU"], \ - f"""Supported arhc options are "GPU", "ROCM", "ASCEND" and "ASCEND_CL", "XPU", but the options is {a}""" + assert a in [ + "GPU", + "ROCM", + "ASCEND", + "ASCEND_CL", + "XPU", + ], f"""Supported arhc options are "GPU", "ROCM", "ASCEND" and "ASCEND_CL", "XPU", but the options is {a}""" archs += "WITH_" + a.upper() + " OR " arch = "(" + archs[:-4] + ")" else: @@ -121,7 +127,9 @@ def _process_os(os_): os_ = os_.upper() for p in os_.split(';'): assert p in [ - "WIN32", "APPLE", "LINUX" + "WIN32", + "APPLE", + "LINUX", ], f"""Supported os options are 'WIN32', 'APPLE' and 'LINUX', but the options is {p}""" os_ = os_.replace(";", " OR ") os_ = "(" + os_ + ")" @@ -133,8 +141,11 @@ def _process_os(os_): # check whether run_serial is 0, 1 or empty def _process_run_serial(run_serial): rs = run_serial.strip() - assert rs in ["1", "0", ""], \ - f"""the value of run_serial must be one of 0, 1 or empty. But this value is {rs}""" + assert rs in [ + "1", + "0", + "", + ], f"""the value of run_serial must be one of 0, 1 or empty. But this value is {rs}""" if rs == "": return "" return rs @@ -157,13 +168,15 @@ def _process_name(name, curdir): check whether name is with a legal format and check whther the test file exists. """ name = name.strip() - assert re.compile("^test_[0-9a-zA-Z_]+").search(name), \ - """If line is not the header of table, the test name must begin with "test_" """ \ + assert re.compile("^test_[0-9a-zA-Z_]+").search(name), ( + """If line is not the header of table, the test name must begin with "test_" """ """and the following substring must include at least one char of "0-9", "a-z", "A-Z" or "_".""" + ) filepath_prefix = os.path.join(curdir, name) suffix = [".py", ".sh"] - assert _file_with_extension(filepath_prefix, suffix), \ - f""" Please ensure the test file with the prefix '{filepath_prefix}' and one of the suffix {suffix} exists, because you specified a unittest named '{name}'""" + assert _file_with_extension( + filepath_prefix, suffix + ), f""" Please ensure the test file with the prefix '{filepath_prefix}' and one of the suffix {suffix} exists, because you specified a unittest named '{name}'""" return name @@ -182,14 +195,16 @@ def _norm_dirs(dirs): def _process_run_type(run_type): rt = run_type.strip() # completely match one of the strings: 'NIGHTLY', 'EXCLUSIVE', 'CINN', 'DIST', 'GPUPS', 'INFER', 'EXCLUSIVE:NIGHTLY' and 'DIST:NIGHTLY' - assert re.compile("^(NIGHTLY|EXCLUSIVE|CINN|DIST|GPUPS|INFER|EXCLUSIVE:NIGHTLY|DIST:NIGHTLY)$").search(rt), \ - f""" run_type must be one of 'NIGHTLY', 'EXCLUSIVE', 'CINN', 'DIST', 'GPUPS', 'INFER', 'EXCLUSIVE:NIGHTLY' and 'DIST:NIGHTLY'""" \ + assert re.compile( + "^(NIGHTLY|EXCLUSIVE|CINN|DIST|GPUPS|INFER|EXCLUSIVE:NIGHTLY|DIST:NIGHTLY)$" + ).search(rt), ( + f""" run_type must be one of 'NIGHTLY', 'EXCLUSIVE', 'CINN', 'DIST', 'GPUPS', 'INFER', 'EXCLUSIVE:NIGHTLY' and 'DIST:NIGHTLY'""" f"""but the run_type is {rt}""" + ) return rt -class DistUTPortManager(): - +class DistUTPortManager: def __init__(self, ignore_dirs=[]): self.dist_ut_port = 21200 self.assigned_ports = dict() @@ -212,13 +227,17 @@ class DistUTPortManager(): ''' if test_name not in self.assigned_ports: self.assigned_ports[test_name] = port - self.dist_ut_port = max(self.dist_ut_port, - self.assigned_ports[test_name]) + self.dist_ut_port = max( + self.dist_ut_port, self.assigned_ports[test_name] + ) return self.assigned_ports[test_name] def process_dist_port_num(self, port_num): - assert re.compile("^[0-9]+$").search(port_num) and int(port_num) > 0 or port_num.strip()=="", \ - f"""port_num must be foramt as a positive integer or empty, but this port_num is '{port_num}'""" + assert ( + re.compile("^[0-9]+$").search(port_num) + and int(port_num) > 0 + or port_num.strip() == "" + ), f"""port_num must be foramt as a positive integer or empty, but this port_num is '{port_num}'""" port_num = port_num.strip() if len(port_num) == 0: return 0 @@ -242,7 +261,7 @@ class DistUTPortManager(): if matched is None: continue p = matched.span() - port = int(line[p[0]:p[1]].split("=")[-1]) + port = int(line[p[0] : p[1]].split("=")[-1]) # find the test name which the port belongs to for k in range(idx, 0, -1): @@ -252,8 +271,9 @@ class DistUTPortManager(): # matcg right tets name format, the name must start with 'test_' follwed bu at least one cahr of # '0-9'. 'a-z'. 'A-Z' or '_' - assert re.compile("^test_[0-9a-zA-Z_]+").search(name), \ - f'''we found a test for initial the latest dist_port but the test name '{name}' seems to be wrong + assert re.compile("^test_[0-9a-zA-Z_]+").search( + name + ), f'''we found a test for initial the latest dist_port but the test name '{name}' seems to be wrong at line {k-1}, in file {cmake_file_name} ''' self.gset_port(name, port) @@ -302,19 +322,35 @@ class DistUTPortManager(): # if such a drectory exists # step 1 - if len(self.last_test_name) > 0 and len( - self.last_test_cmake_file) > 0: + if ( + len(self.last_test_name) > 0 + and len(self.last_test_cmake_file) > 0 + ): with open( - self.last_test_cmake_file.replace( - "CMakeLists.txt", "testslist.csv")) as csv_file: + self.last_test_cmake_file.replace( + "CMakeLists.txt", "testslist.csv" + ) + ) as csv_file: found = False for line in csv_file.readlines(): - name, _, _, _, _, launcher, num_port, _, _, _ = line.strip( - ).split(",") + ( + name, + _, + _, + _, + _, + launcher, + num_port, + _, + _, + _, + ) = line.strip().split(",") if name == self.last_test_name: found = True break - assert found, f"no such test named '{self.last_test_name}' in file '{self.last_test_cmake_file}'" + assert ( + found + ), f"no such test named '{self.last_test_name}' in file '{self.last_test_cmake_file}'" if launcher[-2:] == ".sh": self.process_dist_port_num(num_port) @@ -336,8 +372,7 @@ class DistUTPortManager(): assert len(self.no_cmake_dirs) == 0, err_msg -class CMakeGenerator(): - +class CMakeGenerator: def __init__(self, current_dirs, ignore_dirs): self.processed_dirs = set() self.port_manager = DistUTPortManager(ignore_dirs) @@ -393,8 +428,18 @@ class CMakeGenerator(): endif()" """ - name, os_, archs, timeout, run_type, launcher, num_port, run_serial, envs, conditions = line.strip( - ).split(",") + ( + name, + os_, + archs, + timeout, + run_type, + launcher, + num_port, + run_serial, + envs, + conditions, + ) = line.strip().split(",") # name == "name" means the line being parsed is the header of the table # we should skip this line and return empty here. @@ -439,12 +484,15 @@ class CMakeGenerator(): "{envs}")%s endif() ''' - run_type_str = "" if len( - run_type) == 0 else f' LABELS "RUN_TYPE={run_type}"' - time_out_str = f' TIMEOUT "{timeout}"' if len( - timeout.strip()) > 0 else '' - run_serial_str = f' RUN_SERIAL {run_serial}' if len( - run_serial) > 0 else '' + run_type_str = ( + "" if len(run_type) == 0 else f' LABELS "RUN_TYPE={run_type}"' + ) + time_out_str = ( + f' TIMEOUT "{timeout}"' if len(timeout.strip()) > 0 else '' + ) + run_serial_str = ( + f' RUN_SERIAL {run_serial}' if len(run_serial) > 0 else '' + ) if len(time_out_str) > 0 or len(run_serial_str) > 0: set_properties = f''' set_tests_properties({name} PROPERTIES{time_out_str}{run_serial_str}{run_type_str})''' @@ -471,10 +519,14 @@ class CMakeGenerator(): if not os.path.isdir(c_path): continue self.processed_dirs.add(c_path) - if os.path.isfile(os.path.join(current_work_dir, c, "testslist.csv")) \ - or os.path.isfile(os.path.join(current_work_dir, c, "CMakeLists.txt")): - self._gen_cmakelists(os.path.join(current_work_dir, c), - depth + 1) + if os.path.isfile( + os.path.join(current_work_dir, c, "testslist.csv") + ) or os.path.isfile( + os.path.join(current_work_dir, c, "CMakeLists.txt") + ): + self._gen_cmakelists( + os.path.join(current_work_dir, c), depth + 1 + ) sub_dirs.append(c) if not os.path.isfile(os.path.join(current_work_dir, "testslist.csv")): @@ -506,10 +558,13 @@ class CMakeGenerator(): char_seq = "".join(char_seq) if char_seq != "".join(cmds.split()): - assert f"{current_work_dir}/CMakeLists.txt" not in self.modified_or_created_files, \ - f"the file {current_work_dir}/CMakeLists.txt are modified twice, which may cause some error" + assert ( + f"{current_work_dir}/CMakeLists.txt" + not in self.modified_or_created_files + ), f"the file {current_work_dir}/CMakeLists.txt are modified twice, which may cause some error" self.modified_or_created_files.append( - f"{current_work_dir}/CMakeLists.txt") + f"{current_work_dir}/CMakeLists.txt" + ) with open(f"{current_work_dir}/CMakeLists.txt", "w") as cmake_file: print(cmds, end="", file=cmake_file) @@ -523,8 +578,7 @@ if __name__ == "__main__": required=False, default=[], nargs="+", - help= - "Input a list of files named testslist.csv and output files named CmakeLists.txt in the same directories as the csv files respectly" + help="Input a list of files named testslist.csv and output files named CmakeLists.txt in the same directories as the csv files respectly", ) parser.add_argument( "--dirpaths", @@ -533,8 +587,7 @@ if __name__ == "__main__": required=False, default=[], nargs="+", - help= - "Input a list of dir paths including files named testslist.csv and output CmakeLists.txt in these directories respectly" + help="Input a list of dir paths including files named testslist.csv and output CmakeLists.txt in these directories respectly", ) parser.add_argument( "--ignore-cmake-dirs", @@ -543,18 +596,19 @@ if __name__ == "__main__": required=False, default=[], nargs='*', - help= - "To keep dist ports the same with old version cmake, old cmakelists.txt files are needed to parse dist_ports. If a directories are newly created and there is no cmakelists.txt file, the directory path must be specified by this option. The dirs are not recursive." + help="To keep dist ports the same with old version cmake, old cmakelists.txt files are needed to parse dist_ports. If a directories are newly created and there is no cmakelists.txt file, the directory path must be specified by this option. The dirs are not recursive.", ) args = parser.parse_args() - assert not (len(args.files) == 0 and len(args.dirpaths) - == 0), "You must provide at leate one file or dirpath" + assert not ( + len(args.files) == 0 and len(args.dirpaths) == 0 + ), "You must provide at leate one file or dirpath" current_work_dirs = [] if len(args.files) >= 1: for p in args.files: - assert os.path.basename( - p) == "testslist.csv", "you must input file named testslist.csv" + assert ( + os.path.basename(p) == "testslist.csv" + ), "you must input file named testslist.csv" current_work_dirs = current_work_dirs + [ os.path.dirname(file) for file in args.files ] diff --git a/tools/get_op_list.py b/tools/get_op_list.py index 05e666d2270f5e263282cb03c576ab9092d67a66..a6d726dc3096e92b7131c8f87b689908d2fb2408 100644 --- a/tools/get_op_list.py +++ b/tools/get_op_list.py @@ -24,10 +24,12 @@ paddle.enable_static() def parse_args(): parser = argparse.ArgumentParser() - parser.add_argument('--model_dir', - type=str, - default="", - help='Directory of the inference models.') + parser.add_argument( + '--model_dir', + type=str, + default="", + help='Directory of the inference models.', + ) return parser.parse_args() @@ -58,7 +60,7 @@ if __name__ == '__main__': args = parse_args() for root, dirs, files in os.walk(args.model_dir, topdown=True): for name in files: - if (re.match(r'.*pdmodel', name)): + if re.match(r'.*pdmodel', name): ops_set = get_model_ops(os.path.join(root, name)) phi_set = get_model_phi_kernels(ops_set) ops = ";".join(ops_set) diff --git a/tools/get_pr_ut.py b/tools/get_pr_ut.py index efe21b0639161dc41f77a91bb7210b2698366b39..ba8dbeb1da572d6f98761b73eb4537b2317168b5 100644 --- a/tools/get_pr_ut.py +++ b/tools/get_pr_ut.py @@ -31,7 +31,7 @@ ssl._create_default_https_context = ssl._create_unverified_context class PRChecker(object): - """ PR Checker. """ + """PR Checker.""" def __init__(self): self.github = Github(os.getenv('GITHUB_API_TOKEN'), timeout=60) @@ -47,7 +47,7 @@ class PRChecker(object): self.full_case = False def init(self): - """ Get pull request. """ + """Get pull request.""" pr_id = os.getenv('GIT_PR_ID') if not pr_id: print('PREC No PR ID') @@ -72,7 +72,7 @@ class PRChecker(object): print('PREC test=allcase is set') self.full_case = True - #todo: exception + # todo: exception def __wget_with_retry(self, url): ix = 1 proxy = '--no-proxy' @@ -86,12 +86,15 @@ class PRChecker(object): proxy = '--no-proxy' code = subprocess.call( 'wget -q {} --no-check-certificate {}'.format(proxy, url), - shell=True) + shell=True, + ) if code == 0: return True print( - 'PREC download {} error, retry {} time(s) after {} secs.[proxy_option={}]' - .format(url, ix, ix * 10, proxy)) + 'PREC download {} error, retry {} time(s) after {} secs.[proxy_option={}]'.format( + url, ix, ix * 10, proxy + ) + ) time.sleep(ix * 10) ix += 1 return False @@ -105,16 +108,19 @@ class PRChecker(object): cur_proxy = urllib.request.ProxyHandler(without_proxy) else: cur_proxy = urllib.request.ProxyHandler(with_proxy) - opener = urllib.request.build_opener(cur_proxy, - urllib.request.HTTPHandler) + opener = urllib.request.build_opener( + cur_proxy, urllib.request.HTTPHandler + ) urllib.request.install_opener(opener) try: urllib.request.urlretrieve(url, filename) except Exception as e: print(e) print( - 'PREC download {} error, retry {} time(s) after {} secs.[proxy_option={}]' - .format(url, ix, ix * 10, cur_proxy)) + 'PREC download {} error, retry {} time(s) after {} secs.[proxy_option={}]'.format( + url, ix, ix * 10, cur_proxy + ) + ) continue else: return True @@ -124,7 +130,7 @@ class PRChecker(object): return False def get_pr_files(self): - """ Get files in pull request. """ + """Get files in pull request.""" page = 0 file_dict = {} file_count = 0 @@ -135,20 +141,23 @@ class PRChecker(object): for f in files: file_dict[PADDLE_ROOT + f.filename] = f.status file_count += 1 - if file_count == 30: #if pr file count = 31, nend to run all case + if file_count == 30: # if pr file count = 31, nend to run all case break page += 1 print("pr modify files: %s" % file_dict) return file_dict def get_is_white_file(self, filename): - """ judge is white file in pr's files. """ + """judge is white file in pr's files.""" isWhiteFile = False - not_white_files = (PADDLE_ROOT + 'cmake/', PADDLE_ROOT + 'patches/', - PADDLE_ROOT + 'tools/dockerfile/', - PADDLE_ROOT + 'tools/windows/', - PADDLE_ROOT + 'tools/test_runner.py', - PADDLE_ROOT + 'tools/parallel_UT_rule.py') + not_white_files = ( + PADDLE_ROOT + 'cmake/', + PADDLE_ROOT + 'patches/', + PADDLE_ROOT + 'tools/dockerfile/', + PADDLE_ROOT + 'tools/windows/', + PADDLE_ROOT + 'tools/test_runner.py', + PADDLE_ROOT + 'tools/parallel_UT_rule.py', + ) if 'cmakelist' in filename.lower(): isWhiteFile = False elif filename.startswith((not_white_files)): @@ -162,13 +171,16 @@ class PRChecker(object): if filetype == 'py': result = self.__get_comment_by_prog(content, self.py_prog_oneline) result.extend( - self.__get_comment_by_prog(content, self.py_prog_multiline_a)) + self.__get_comment_by_prog(content, self.py_prog_multiline_a) + ) result.extend( - self.__get_comment_by_prog(content, self.py_prog_multiline_b)) + self.__get_comment_by_prog(content, self.py_prog_multiline_b) + ) if filetype == 'cc': result = self.__get_comment_by_prog(content, self.cc_prog_oneline) result.extend( - self.__get_comment_by_prog(content, self.cc_prog_multiline)) + self.__get_comment_by_prog(content, self.cc_prog_multiline) + ) return result def __get_comment_by_prog(self, content, prog): @@ -181,15 +193,15 @@ class PRChecker(object): return result def get_comment_of_file(self, f): - #content = self.repo.get_contents(f.replace(PADDLE_ROOT, ''), 'pull/').decoded_content - #todo: get file from github + # content = self.repo.get_contents(f.replace(PADDLE_ROOT, ''), 'pull/').decoded_content + # todo: get file from github with open(f, encoding="utf-8") as fd: lines = fd.readlines() lineno = 1 inputs = '' for line in lines: - #for line in content.split('\n'): - #input += str(lineno) + '|' + line + '\n' + # for line in content.split('\n'): + # input += str(lineno) + '|' + line + '\n' inputs += str(lineno) + '|' + line lineno += 1 fietype = '' @@ -227,8 +239,9 @@ class PRChecker(object): end += 1 if data[ix][0] == '+': line_list = file_to_diff_lines.get(filename) - line = '{}{}'.format(lineno, - data[ix].replace('+', '|', 1)) + line = '{}{}'.format( + lineno, data[ix].replace('+', '|', 1) + ) if line_list: line_list.append(line) else: @@ -254,9 +267,11 @@ class PRChecker(object): return True def get_all_count(self): - p = subprocess.Popen("cd {}build && ctest -N".format(PADDLE_ROOT), - shell=True, - stdout=subprocess.PIPE) + p = subprocess.Popen( + "cd {}build && ctest -N".format(PADDLE_ROOT), + shell=True, + stdout=subprocess.PIPE, + ) out, err = p.communicate() for line in out.splitlines(): if 'Total Tests:' in str(line): @@ -264,12 +279,13 @@ class PRChecker(object): return int(all_counts) def file_is_unnit_test(self, filename): - #get all testcases by ctest-N + # get all testcases by ctest-N all_ut_file = '%s/build/all_ut_file' % PADDLE_ROOT os.system( "cd %s/build && ctest -N | awk -F ': ' '{print $2}' | sed '/^$/d' | sed '$d' > %s" - % (PADDLE_ROOT, all_ut_file)) - #determine whether filename is in all_ut_case + % (PADDLE_ROOT, all_ut_file) + ) + # determine whether filename is in all_ut_case with open(all_ut_file, 'r') as f: (filepath, tempfilename) = os.path.split(filename) for f_file in f: @@ -279,7 +295,7 @@ class PRChecker(object): return False def get_pr_ut(self): - """ Get unit tests in pull request. """ + """Get unit tests in pull request.""" if self.full_case: return '' check_added_ut = False @@ -288,7 +304,8 @@ class PRChecker(object): ret = self.__urlretrieve( 'https://paddle-docker-tar.bj.bcebos.com/pre_test/ut_file_map.json', - 'ut_file_map.json') + 'ut_file_map.json', + ) if not ret: print('PREC download file_ut.json failed') exit(1) @@ -303,19 +320,23 @@ class PRChecker(object): filterFiles = [] file_list = [] file_dict = self.get_pr_files() - if len(file_dict) == 30: #if pr file count = 31, nend to run all case + if len(file_dict) == 30: # if pr file count = 31, nend to run all case return '' for filename in file_dict: if filename.startswith(PADDLE_ROOT + 'python/'): file_list.append(filename) elif filename.startswith(PADDLE_ROOT + 'paddle/'): - if filename.startswith((PADDLE_ROOT + 'paddle/infrt', - PADDLE_ROOT + 'paddle/utils')): + if filename.startswith( + (PADDLE_ROOT + 'paddle/infrt', PADDLE_ROOT + 'paddle/utils') + ): filterFiles.append(filename) elif filename.startswith(PADDLE_ROOT + 'paddle/scripts'): if filename.startswith( - (PADDLE_ROOT + 'paddle/scripts/paddle_build.sh', - PADDLE_ROOT + 'paddle/scripts/paddle_build.bat')): + ( + PADDLE_ROOT + 'paddle/scripts/paddle_build.sh', + PADDLE_ROOT + 'paddle/scripts/paddle_build.bat', + ) + ): file_list.append(filename) else: filterFiles.append(filename) @@ -334,7 +355,8 @@ class PRChecker(object): ut_list.append('filterfiles_placeholder') ret = self.__urlretrieve( 'https://paddle-docker-tar.bj.bcebos.com/pre_test/prec_delta', - 'prec_delta') + 'prec_delta', + ) if ret: with open('prec_delta') as delta: for ut in delta: @@ -343,17 +365,25 @@ class PRChecker(object): print('PREC download prec_delta failed') exit(1) PRECISION_TEST_Cases_ratio = format( - float(len(ut_list)) / float(self.get_all_count()), '.2f') + float(len(ut_list)) / float(self.get_all_count()), '.2f' + ) print("filterFiles: %s" % filterFiles) print("ipipe_log_param_PRECISION_TEST: true") - print("ipipe_log_param_PRECISION_TEST_Cases_count: %s" % - len(ut_list)) - print("ipipe_log_param_PRECISION_TEST_Cases_ratio: %s" % - PRECISION_TEST_Cases_ratio) + print( + "ipipe_log_param_PRECISION_TEST_Cases_count: %s" % len(ut_list) + ) + print( + "ipipe_log_param_PRECISION_TEST_Cases_ratio: %s" + % PRECISION_TEST_Cases_ratio + ) return '\n'.join(ut_list) else: for f in file_list: - if current_system == "Darwin" or current_system == "Windows" or self.suffix == ".py3": + if ( + current_system == "Darwin" + or current_system == "Windows" + or self.suffix == ".py3" + ): f_judge = f.replace(PADDLE_ROOT, '/paddle/', 1) f_judge = f_judge.replace('//', '/') else: @@ -362,25 +392,34 @@ class PRChecker(object): if f_judge.endswith('.md'): ut_list.append('md_placeholder') onlyCommentsFilesOrXpu.append(f_judge) - elif 'tests/unittests/xpu' in f_judge or 'tests/unittests/npu' in f_judge or 'op_npu.cc' in f_judge: + elif ( + 'tests/unittests/xpu' in f_judge + or 'tests/unittests/npu' in f_judge + or 'op_npu.cc' in f_judge + ): ut_list.append('xpu_npu_placeholder') onlyCommentsFilesOrXpu.append(f_judge) elif f_judge.endswith(('.h', '.cu', '.cc', '.py')): - #determine whether the new added file is a member of added_ut + # determine whether the new added file is a member of added_ut if file_dict[f] in ['added']: f_judge_in_added_ut = False - with open('{}/added_ut'.format( - PADDLE_ROOT)) as utfile: - (filepath, - tempfilename) = os.path.split(f_judge) + with open( + '{}/added_ut'.format(PADDLE_ROOT) + ) as utfile: + (filepath, tempfilename) = os.path.split( + f_judge + ) for f_file in utfile: - if f_file.strip('\n') == tempfilename.split( - ".")[0]: + if ( + f_file.strip('\n') + == tempfilename.split(".")[0] + ): f_judge_in_added_ut = True if f_judge_in_added_ut == True: print( "Adding new unit tests not hit mapFiles: %s" - % f_judge) + % f_judge + ) else: notHitMapFiles.append(f_judge) elif file_dict[f] in ['removed']: @@ -394,9 +433,11 @@ class PRChecker(object): else: notHitMapFiles.append(f_judge) else: - notHitMapFiles.append( - f_judge) if file_dict[f] != 'removed' else print( - "remove file not hit mapFiles: %s" % f_judge) + notHitMapFiles.append(f_judge) if file_dict[ + f + ] != 'removed' else print( + "remove file not hit mapFiles: %s" % f_judge + ) else: if file_dict[f] not in ['removed']: if self.is_only_comment(f): @@ -420,7 +461,8 @@ class PRChecker(object): if ut_list: ret = self.__urlretrieve( 'https://paddle-docker-tar.bj.bcebos.com/pre_test/prec_delta', - 'prec_delta') + 'prec_delta', + ) if ret: with open('prec_delta') as delta: for ut in delta: @@ -430,13 +472,17 @@ class PRChecker(object): exit(1) print("hitMapFiles: %s" % hitMapFiles) print("ipipe_log_param_PRECISION_TEST: true") - print("ipipe_log_param_PRECISION_TEST_Cases_count: %s" % - len(ut_list)) + print( + "ipipe_log_param_PRECISION_TEST_Cases_count: %s" + % len(ut_list) + ) PRECISION_TEST_Cases_ratio = format( - float(len(ut_list)) / float(self.get_all_count()), - '.2f') - print("ipipe_log_param_PRECISION_TEST_Cases_ratio: %s" % - PRECISION_TEST_Cases_ratio) + float(len(ut_list)) / float(self.get_all_count()), '.2f' + ) + print( + "ipipe_log_param_PRECISION_TEST_Cases_ratio: %s" + % PRECISION_TEST_Cases_ratio + ) if len(filterFiles) != 0: print("filterFiles: %s" % filterFiles) return '\n'.join(ut_list) diff --git a/tools/get_single_test_cov.py b/tools/get_single_test_cov.py index 9fe6554ee6e32db36958eebad7d703663fd03aa7..293fed5e4b3b953ddafb1c99af356db3690208da 100644 --- a/tools/get_single_test_cov.py +++ b/tools/get_single_test_cov.py @@ -35,10 +35,16 @@ def getFNDAFile(rootPath, test): def analysisFNDAFile(rootPath, test): - related_ut_map_file = '%s/build/ut_map/%s/related_%s.txt' % (rootPath, test, - test) + related_ut_map_file = '%s/build/ut_map/%s/related_%s.txt' % ( + rootPath, + test, + test, + ) notrelated_ut_map_file = '%s/build/ut_map/%s/notrelated_%s.txt' % ( - rootPath, test, test) + rootPath, + test, + test, + ) os.system('touch %s' % related_ut_map_file) os.system('touch %s' % notrelated_ut_map_file) fn_filename = '%s/build/ut_map/%s/fnda.tmp' % (rootPath, test) @@ -60,22 +66,30 @@ def analysisFNDAFile(rootPath, test): fn = message_list[i] matchObj = re.match( r'(.*)Maker(.*)|(.*)Touch(.*)Regist(.*)|(.*)Touch(.*)JitKernel(.*)|(.*)converterC2Ev(.*)', - fn, re.I) + fn, + re.I, + ) if matchObj == None: OP_REGIST = False break if OP_REGIST == False: related_file_list.append(clazz_filename) - os.system('echo %s >> %s' % - (clazz_filename, related_ut_map_file)) + os.system( + 'echo %s >> %s' % (clazz_filename, related_ut_map_file) + ) else: - os.system('echo %s >> %s' % - (clazz_filename, notrelated_ut_map_file)) + os.system( + 'echo %s >> %s' % (clazz_filename, notrelated_ut_map_file) + ) else: if clazz_filename != '': - if clazz_filename not in related_file_list: # xx.pb.cc in RELATED xx.pb.h not in RELATED - os.system('echo %s >> %s' % - (clazz_filename, notrelated_ut_map_file)) + if ( + clazz_filename not in related_file_list + ): # xx.pb.cc in RELATED xx.pb.h not in RELATED + os.system( + 'echo %s >> %s' + % (clazz_filename, notrelated_ut_map_file) + ) f.close() @@ -83,10 +97,12 @@ def getCovinfo(rootPath, test): ut_map_path = '%s/build/ut_map/%s' % (rootPath, test) os.system( 'cd %s && lcov --capture -d . -o coverage.info --rc lcov_branch_coverage=0 > /dev/null 2>&1' - % ut_map_path) + % ut_map_path + ) os.system( "cd %s && lcov --extract coverage.info '/paddle/paddle/fluid/framework/*' '/paddle/paddle/fluid/imperative/*' '/paddle/paddle/fluid/inference/*' '/paddle/paddle/fluid/memory/*' '/paddle/paddle/fluid/operators/*' '/paddle/paddle/fluid/string/*' '/paddle/paddle/fluid/distributed/*' '/paddle/paddle/fluid/platform/*' '/paddle/paddle/fluid/pybind/*' '/paddle/build/*' -o coverage.info.tmp --rc lcov_branch_coverage=0 > /dev/null 2>&1" - % ut_map_path) + % ut_map_path + ) os.system('rm -rf %s/paddle' % ut_map_path) os.system('rm -rf %s/coverage.info' % ut_map_path) getFNDAFile(rootPath, test) diff --git a/tools/get_ut_file_map.py b/tools/get_ut_file_map.py index 5afc6b03f2f96ddda297babe0483441bdd26bee3..2bd777cc8276e118977c1fcc412e7e703e56ff36 100644 --- a/tools/get_ut_file_map.py +++ b/tools/get_ut_file_map.py @@ -35,7 +35,8 @@ def get_all_uts(rootPath): all_uts_paddle = '%s/build/all_uts_paddle' % rootPath os.system( r'cd %s/build && ctest -N -V | grep -Ei "Test[ \t]+#" | grep -oEi "\w+$" > %s' - % (rootPath, all_uts_paddle)) + % (rootPath, all_uts_paddle) + ) def remove_useless_file(rootPath): @@ -75,10 +76,11 @@ def handle_ut_file_map(rootPath): continue elif line.startswith('/paddle/build'): source_file = line.replace('/build', '') - #source_file = re.sub('.pb.*', '.proto', source_file) + # source_file = re.sub('.pb.*', '.proto', source_file) elif 'precise test map fileeee:' in line: - source_file = line.split( - 'precise test map fileeee:')[1].strip() + source_file = line.split('precise test map fileeee:')[ + 1 + ].strip() else: source_file = line if source_file not in ut_file_map: @@ -193,7 +195,8 @@ def ut_file_map_supplement(rootPath): if not os.path.exists(filename) and ut not in prec_delta_new_list: prec_delta_new_list.append(ut) prec_delta_new_list.append( - 'test_py_reader_error_msg') #add a python case for pycoverage + 'test_py_reader_error_msg' + ) # add a python case for pycoverage prec_delta_file = open("/pre_test/prec_delta", 'w') for ut in prec_delta_new_list: prec_delta_file.write(ut + '\n') diff --git a/tools/get_ut_mem_map.py b/tools/get_ut_mem_map.py index dd1511a7cf293d92cf2a35ec458717e1b31c3af5..703f0ab98ac258c04d02dec64de6baf82bef5570 100644 --- a/tools/get_ut_mem_map.py +++ b/tools/get_ut_mem_map.py @@ -39,20 +39,30 @@ def get_ut_mem(rootPath): mem_reserved = round( float( line.split(' : Reserved = ')[1].split( - ', Allocated = ')[0]), 2) + ', Allocated = ' + )[0] + ), + 2, + ) if mem_reserved > mem_reserved1: mem_reserved1 = mem_reserved if 'MAX_GPU_MEMORY_USE=' in line: mem_nvidia = round( float( - line.split('MAX_GPU_MEMORY_USE=')[1].split('\\n') - [0].strip()), 2) + line.split('MAX_GPU_MEMORY_USE=')[1] + .split('\\n')[0] + .strip() + ), + 2, + ) if mem_nvidia > mem_nvidia1: mem_nvidia1 = mem_nvidia if 'Total Test time (real)' in line: caseTime = float( - line.split('Total Test time (real) =')[1].split('sec') - [0].strip()) + line.split('Total Test time (real) =')[1] + .split('sec')[0] + .strip() + ) if mem_reserved1 != -1: case_dic[ut]['mem_reserved'] = mem_reserved1 if mem_nvidia1 != -1: diff --git a/tools/group_case_for_parallel.py b/tools/group_case_for_parallel.py index e2c8eb9ff04e9bc3f14f1b782fd46aa68a418925..8dd5f6f4ca2639b39d146f2e7e246298f8952a18 100644 --- a/tools/group_case_for_parallel.py +++ b/tools/group_case_for_parallel.py @@ -19,17 +19,22 @@ import sys def group_case_for_parallel(rootPath): """group cases""" - #wget file + # wget file for filename in [ - 'nightly_case', 'single_card_tests', 'single_card_tests_mem0', - 'multiple_card_tests', 'multiple_card_tests_mem0', - 'exclusive_card_tests', 'exclusive_card_tests_mem0' + 'nightly_case', + 'single_card_tests', + 'single_card_tests_mem0', + 'multiple_card_tests', + 'multiple_card_tests_mem0', + 'exclusive_card_tests', + 'exclusive_card_tests_mem0', ]: os.system( 'cd %s/tools && wget --no-proxy https://paddle-docker-tar.bj.bcebos.com/pre_test_bak/%s --no-check-certificate' - % (rootPath, filename)) + % (rootPath, filename) + ) - #get nightly tests + # get nightly tests nightly_tests_file = open('%s/tools/nightly_case' % rootPath, 'r') nightly_tests = nightly_tests_file.read().strip().split('\n') nightly_tests_file.close() @@ -40,7 +45,7 @@ def group_case_for_parallel(rootPath): '%s/tools/multiple_card_tests_mem0' % rootPath, '%s/tools/multiple_card_tests' % rootPath, '%s/tools/exclusive_card_tests_mem0' % rootPath, - '%s/tools/exclusive_card_tests' % rootPath + '%s/tools/exclusive_card_tests' % rootPath, ] case_file = '%s/build/ut_list' % rootPath if os.path.exists(case_file): @@ -67,12 +72,14 @@ def group_case_for_parallel(rootPath): for line in lines: case_line_list = line.replace('^', '').replace('|', '').split('$') new_case_line_list = list( - set(all_need_run_cases).intersection(set(case_line_list))) + set(all_need_run_cases).intersection(set(case_line_list)) + ) if len(new_case_line_list) != 0: new_case_file_list.append(new_case_line_list) all_group_case += new_case_line_list all_need_run_cases = list( - set(all_need_run_cases).difference(set(all_group_case))) + set(all_need_run_cases).difference(set(all_group_case)) + ) for line in new_case_file_list: cases = '$|^'.join(case for case in line) @@ -81,7 +88,7 @@ def group_case_for_parallel(rootPath): fi.close() new_f.close() - #no parallel cases + # no parallel cases cases = '^job' if len(all_need_run_cases) != 0: for case in all_need_run_cases: diff --git a/tools/handle_h_cu_file.py b/tools/handle_h_cu_file.py index 2dfa014cf95a00513640a04c01323555fea4b826..84fba9f28202f6f72a09d5e96ddc669d65dfdd0c 100644 --- a/tools/handle_h_cu_file.py +++ b/tools/handle_h_cu_file.py @@ -31,9 +31,12 @@ def worker(fun): def threadPool(threadPoolNum): threadPool = [] for i in range(threadPoolNum): - thread = threading.Thread(target=worker, args={ - doFun, - }) + thread = threading.Thread( + target=worker, + args={ + doFun, + }, + ) thread.daemon = True threadPool.append(thread) return threadPool @@ -60,10 +63,12 @@ def insert_pile_to_h_file(rootPath): os.system('echo "\n#include \n" >> %s' % line) os.system( 'echo "__attribute__((constructor)) static void calledFirst%s()\n{" >> %s' - % (func, line)) + % (func, line) + ) os.system( 'echo \' printf("precise test map fileeee: %%s\\\\n", __FILE__);\n}\' >> %s' - % line) + % line + ) os.system('echo "\n#endif" >> %s' % line) @@ -87,7 +92,8 @@ def get_h_cu_file(file_path): ut = filename.replace('^', '').replace('$', '').replace('.log', '') os.system( "cat %s/%s | grep 'precise test map fileeee:'| uniq >> %s/build/ut_map/%s/related_%s.txt" - % (dir_path, filename, rootPath, ut, ut)) + % (dir_path, filename, rootPath, ut, ut) + ) def doFun(file_path): diff --git a/tools/infrt/fake_models/multi_fc.py b/tools/infrt/fake_models/multi_fc.py index 31b6efd8dcb1159602cbfcf73f4ca83b8eb808bf..29b941d5437ca7089ad3ba2463060949c932b308 100644 --- a/tools/infrt/fake_models/multi_fc.py +++ b/tools/infrt/fake_models/multi_fc.py @@ -24,18 +24,22 @@ paddle.enable_static() a = fluid.layers.data(name="A", shape=[-1, size], dtype='float32') label = fluid.layers.data(name="label", shape=[size], dtype='float32') -fc_out = fluid.layers.fc(input=a, - size=size, - act="relu", - bias_attr=fluid.ParamAttr(name="fc_bias"), - num_flatten_dims=1) +fc_out = fluid.layers.fc( + input=a, + size=size, + act="relu", + bias_attr=fluid.ParamAttr(name="fc_bias"), + num_flatten_dims=1, +) for i in range(num_layers - 1): - fc_out = fluid.layers.fc(input=fc_out, - size=size, - act="relu", - bias_attr=fluid.ParamAttr(name="fc_bias"), - num_flatten_dims=1) + fc_out = fluid.layers.fc( + input=fc_out, + size=size, + act="relu", + bias_attr=fluid.ParamAttr(name="fc_bias"), + num_flatten_dims=1, + ) cost = fluid.layers.square_error_cost(fc_out, label) avg_cost = fluid.layers.mean(cost) @@ -49,7 +53,14 @@ loss = exe = fluid.Executor(cpu) exe.run(fluid.default_startup_program()) fluid.io.save_inference_model("./multi_fc_model", [a.name], [fc_out], exe) -fluid.io.save_inference_model("./multi_fc_model", [a.name], [fc_out], exe, None, - "fc.pdmodel", "fc.pdiparams") +fluid.io.save_inference_model( + "./multi_fc_model", + [a.name], + [fc_out], + exe, + None, + "fc.pdmodel", + "fc.pdiparams", +) print('output name', fc_out.name) diff --git a/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py b/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py index 7b97a06619eb3b32d5deeacb2a7244b8947672d8..313d844d6f6ae7fe54072e39b7f6ad7256c8bb73 100644 --- a/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py +++ b/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py @@ -82,8 +82,11 @@ def get_attrs_info(op_proto, op_attrs_proto): attrs_info[attr_name] = {} attrs_info[attr_name][TYPE] = attr_proto.type attrs_info[attr_name][GENERATED] = attr_proto.generated - attrs_info[attr_name][DEFAULT_VALUE] = attrs_default_values[ - attr_name] if attr_name in attrs_default_values else None + attrs_info[attr_name][DEFAULT_VALUE] = ( + attrs_default_values[attr_name] + if attr_name in attrs_default_values + else None + ) attrs_info[attr_name][EXTRA] = attr_proto.extra attrs_info[attr_name][QUANT] = attr_proto.quant return attrs_info @@ -118,20 +121,26 @@ def generate_all_ops_inputs_outputs_map(op_descs): inputs = list() outpus = list() for input_ in op_proto[INPUTS]: - if op_proto[INPUTS][input_][EXTRA] != True and op_proto[INPUTS][ - input_][INTERMEDIATE] != True: + if ( + op_proto[INPUTS][input_][EXTRA] != True + and op_proto[INPUTS][input_][INTERMEDIATE] != True + ): inputs.append(input_) for output_ in op_proto[OUTPUTS]: - if op_proto[OUTPUTS][output_][EXTRA] != True and op_proto[OUTPUTS][ - output_][INTERMEDIATE] != True: + if ( + op_proto[OUTPUTS][output_][EXTRA] != True + and op_proto[OUTPUTS][output_][INTERMEDIATE] != True + ): outpus.append(output_) ops_inputs_map[op_type] = inputs ops_outputs_map[op_type] = outpus # 2. Generate Cpp style map str cpp_style_ops_inputs_map_str = "" - start_ = "#include \n#include \n#include \n" + \ - "const std::unordered_map> pd_dialect_inputs_info_map_ = {\n" + start_ = ( + "#include \n#include \n#include \n" + + "const std::unordered_map> pd_dialect_inputs_info_map_ = {\n" + ) ops_inputs_str = "" for ele in ops_inputs_map.items(): op_name = ele[0] @@ -139,11 +148,14 @@ def generate_all_ops_inputs_outputs_map(op_descs): op_inputs_str = "{" input_idx = 0 for op_input in op_inputs: - op_input_str = '{left_brace}"{op_input}", {input_idx}{right_brace}, '.format( - left_brace="{", - op_input=op_input, - input_idx=input_idx, - right_brace="}") + op_input_str = ( + '{left_brace}"{op_input}", {input_idx}{right_brace}, '.format( + left_brace="{", + op_input=op_input, + input_idx=input_idx, + right_brace="}", + ) + ) input_idx = input_idx + 1 op_inputs_str = op_inputs_str + op_input_str op_inputs_str = op_inputs_str[:-2] + "}" @@ -151,7 +163,8 @@ def generate_all_ops_inputs_outputs_map(op_descs): left_brace="{", op_name=op_name, op_inputs=op_inputs_str, - right_brace="}") + right_brace="}", + ) ops_inputs_str = ops_inputs_str + " " + pair ops_inputs_str = ops_inputs_str[:-2] cpp_style_ops_inputs_map_str = start_ + ops_inputs_str + "\n};" @@ -165,11 +178,14 @@ def generate_all_ops_inputs_outputs_map(op_descs): op_outputs_str = "{" output_idx = 0 for op_output in op_outputs: - op_output_str = '{left_brace}"{op_output}", {output_idx}{right_brace}, '.format( - left_brace="{", - op_output=op_output, - output_idx=output_idx, - right_brace="}") + op_output_str = ( + '{left_brace}"{op_output}", {output_idx}{right_brace}, '.format( + left_brace="{", + op_output=op_output, + output_idx=output_idx, + right_brace="}", + ) + ) output_idx = output_idx + 1 op_outputs_str = op_outputs_str + op_output_str op_outputs_str = op_outputs_str[:-2] + "}" @@ -177,7 +193,8 @@ def generate_all_ops_inputs_outputs_map(op_descs): left_brace="{", op_name=op_name, op_outputs=op_outputs_str, - right_brace="}") + right_brace="}", + ) ops_outputs_str = ops_outputs_str + " " + pair ops_outputs_str = ops_outputs_str[:-2] cpp_style_ops_outputs_map_str = start_ + ops_outputs_str + "\n};" @@ -196,9 +213,11 @@ def get_constraint(op_type, op_proto): optional_input_num_ = 0 for input_ in op_proto[INPUTS]: - if op_proto[INPUTS][input_][EXTRA] != True and op_proto[INPUTS][input_][ - INTERMEDIATE] != True and op_proto[INPUTS][input_][ - DISPENSABLE] == True: + if ( + op_proto[INPUTS][input_][EXTRA] != True + and op_proto[INPUTS][input_][INTERMEDIATE] != True + and op_proto[INPUTS][input_][DISPENSABLE] == True + ): optional_input_num_ += 1 if optional_input_num_ > 1: constraint += ", AttrSizedOperandSegments" @@ -237,11 +256,21 @@ def convert_op_proto_into_mlir(op_descs): # 2. Op dialect # skip list ( ops whose dialect can not be generated automatically will be recorded here) skipped_op_list = [ - "cos_sim", "fused_embedding_seq_pool", "cosh", "kron", "recurrent", - "while", "conditional_block", "set_value", "run_program" + "cos_sim", + "fused_embedding_seq_pool", + "cosh", + "kron", + "recurrent", + "while", + "conditional_block", + "set_value", + "run_program", ] skipped_attr_list = [ - "trainable_statistics", "use_global_stats", "is_test", "use_quantizer" + "trainable_statistics", + "use_global_stats", + "is_test", + "use_quantizer", ] original_ops_ = get_original_ops() @@ -256,7 +285,8 @@ def convert_op_proto_into_mlir(op_descs): op_type_capitalize=op_type.capitalize(), constraint=constraint_, op_type=op_type, - left_brace="{") + left_brace="{", + ) SUMMARY = ' let summary = "{} op";\n'.format(op_type) # 2.2 Description @@ -265,27 +295,44 @@ def convert_op_proto_into_mlir(op_descs): for line_ in origin_contents: contents = contents + " {}\n".format(line_) DESCRIPTION = " let description = [{left_brace}\n{description} {right_brace}];\n".format( - left_brace="{", description=contents, right_brace="}") + left_brace="{", description=contents, right_brace="}" + ) # 2.3 arguments info ARGUMENTS = "" - if (len(op_proto[INPUTS]) > 0 or len(op_proto[ATTRS]) > 0): + if len(op_proto[INPUTS]) > 0 or len(op_proto[ATTRS]) > 0: ARGUMENTS = " let arguments = (ins " # 2.3.1 inputs for input_ in op_proto[INPUTS]: - if op_proto[INPUTS][input_][EXTRA] != True and op_proto[INPUTS][ - input_][INTERMEDIATE] != True: + if ( + op_proto[INPUTS][input_][EXTRA] != True + and op_proto[INPUTS][input_][INTERMEDIATE] != True + ): if op_proto[INPUTS][input_][DISPENSABLE] != True: if op_proto[INPUTS][input_][DUPLICABLE] != True: - ARGUMENTS = ARGUMENTS + " PD_Tensor:$" + input_ + "," + ARGUMENTS = ( + ARGUMENTS + " PD_Tensor:$" + input_ + "," + ) else: - ARGUMENTS = ARGUMENTS + " PD_Tensor_Array:$" + input_ + "," + ARGUMENTS = ( + ARGUMENTS + " PD_Tensor_Array:$" + input_ + "," + ) else: if op_proto[INPUTS][input_][DUPLICABLE] != True: - ARGUMENTS = ARGUMENTS + " Optional:$" + input_ + "," + ARGUMENTS = ( + ARGUMENTS + + " Optional:$" + + input_ + + "," + ) else: - ARGUMENTS = ARGUMENTS + " Optional:$" + input_ + "," + ARGUMENTS = ( + ARGUMENTS + + " Optional:$" + + input_ + + "," + ) # unsupported: BLOCK = 8; BLOCKS = 10; attr_mlir_converter = { @@ -298,70 +345,104 @@ def convert_op_proto_into_mlir(op_descs): 6: 'BoolAttr', 7: 'BoolArrayAttr', 9: 'SI64Attr', - 11: 'I64ArrayAttr' + 11: 'I64ArrayAttr', } # 2.3.2 attributes for attr in op_proto[ATTRS]: - if (op_proto[ATTRS][attr][EXTRA] - == True) or (attr in skipped_attr_list): + if (op_proto[ATTRS][attr][EXTRA] == True) or ( + attr in skipped_attr_list + ): continue if op_proto[ATTRS][attr][DEFAULT_VALUE] != None: if op_proto[ATTRS][attr][TYPE] in attr_mlir_converter: default_value = str( - op_proto[ATTRS][attr][DEFAULT_VALUE]) - if (attr_mlir_converter[op_proto[ATTRS][attr][TYPE]] - in [ - 'I32ArrayAttr', 'F32ArrayAttr', - 'StrArrayAttr', 'BoolArrayAttr', - 'I64ArrayAttr' - ]): - default_value = default_value.replace('[', - '{').replace( - ']', '}') - if (attr_mlir_converter[op_proto[ATTRS][attr][TYPE]] - in ['BoolAttr', 'BoolArrayAttr']): + op_proto[ATTRS][attr][DEFAULT_VALUE] + ) + if attr_mlir_converter[op_proto[ATTRS][attr][TYPE]] in [ + 'I32ArrayAttr', + 'F32ArrayAttr', + 'StrArrayAttr', + 'BoolArrayAttr', + 'I64ArrayAttr', + ]: + default_value = default_value.replace( + '[', '{' + ).replace(']', '}') + if attr_mlir_converter[op_proto[ATTRS][attr][TYPE]] in [ + 'BoolAttr', + 'BoolArrayAttr', + ]: default_value = default_value.lower() - elif (attr_mlir_converter[op_proto[ATTRS][attr][TYPE]] - in ['StrAttr', 'StrArrayAttr']): + elif attr_mlir_converter[ + op_proto[ATTRS][attr][TYPE] + ] in ['StrAttr', 'StrArrayAttr']: default_value = default_value.replace('\'', '\\\"') - if attr_mlir_converter[op_proto[ATTRS][attr] - [TYPE]] == "StrAttr": + if ( + attr_mlir_converter[op_proto[ATTRS][attr][TYPE]] + == "StrAttr" + ): default_value = '\\\"' + default_value + '\\\"' - attr_list = " DefaultValuedAttr<" + attr_mlir_converter[ - op_proto[ATTRS][attr] - [TYPE]] + ", \"" + default_value + "\">:$" + attr + "," + attr_list = ( + " DefaultValuedAttr<" + + attr_mlir_converter[op_proto[ATTRS][attr][TYPE]] + + ", \"" + + default_value + + "\">:$" + + attr + + "," + ) ARGUMENTS += attr_list else: - print("Error:" + op_type + ":" + attr + ":" + - str(op_proto[ATTRS][attr][TYPE])) + print( + "Error:" + + op_type + + ":" + + attr + + ":" + + str(op_proto[ATTRS][attr][TYPE]) + ) else: if op_proto[ATTRS][attr][TYPE] in attr_mlir_converter: - attr_type_ = attr_mlir_converter[op_proto[ATTRS][attr] - [TYPE]] - if (attr_type_ in [ - 'StrAttr', 'I32ArrayAttr', 'F32ArrayAttr', - 'StrArrayAttr', 'BoolArrayAttr', 'I64ArrayAttr' - ]): + attr_type_ = attr_mlir_converter[ + op_proto[ATTRS][attr][TYPE] + ] + if attr_type_ in [ + 'StrAttr', + 'I32ArrayAttr', + 'F32ArrayAttr', + 'StrArrayAttr', + 'BoolArrayAttr', + 'I64ArrayAttr', + ]: attr_list = attr_type_ + ":$" + attr + "," ARGUMENTS += attr_list else: - print(" ouch Error:" + op_type + ":" + attr + ":" + - str(op_proto[ATTRS][attr][TYPE])) + print( + " ouch Error:" + + op_type + + ":" + + attr + + ":" + + str(op_proto[ATTRS][attr][TYPE]) + ) ARGUMENTS = ARGUMENTS[:-1] + ");\n" # 2.4 results info RESULTS = "" - if (len(op_proto[OUTPUTS]) > 0): + if len(op_proto[OUTPUTS]) > 0: outputs = "" for output_ in op_proto[OUTPUTS]: - if op_proto[OUTPUTS][output_][EXTRA] != True and op_proto[ - OUTPUTS][output_][INTERMEDIATE] != True: + if ( + op_proto[OUTPUTS][output_][EXTRA] != True + and op_proto[OUTPUTS][output_][INTERMEDIATE] != True + ): if op_proto[OUTPUTS][output_][DUPLICABLE] != True: outputs = outputs + "PD_Tensor:${},".format(output_) else: outputs = outputs + "PD_Tensor_Array:${},".format( - output_) + output_ + ) RESULTS = "\n let results = (outs {});\n".format(outputs[:-1]) with open(dst_dialect_file, 'a') as ops_mlir_file: @@ -376,8 +457,10 @@ def convert_op_proto_into_mlir(op_descs): ops_mlir_file.write("\n#endif // PD_OPS") print("Skipped ops num: " + str(len(skipped_op_list))) - print("Automatically generated op dialects num: " + - str(len(automatically_generated_op_dialect))) + print( + "Automatically generated op dialects num: " + + str(len(automatically_generated_op_dialect)) + ) if __name__ == "__main__": diff --git a/tools/infrt/generate_phi_kernel_dialect.py b/tools/infrt/generate_phi_kernel_dialect.py index 686a5e12a6c306a26a3d730b0ce956072c16e2d3..a5d1003dbb9f22ada9ec457d3b5c874a3eca4d15 100644 --- a/tools/infrt/generate_phi_kernel_dialect.py +++ b/tools/infrt/generate_phi_kernel_dialect.py @@ -17,21 +17,21 @@ import yaml import os from get_compat_kernel_signature import get_compat_kernels_info -#TODO @DannyIsFunny: more attr types need to be supported. +# TODO @DannyIsFunny: more attr types need to be supported. attr_type_converter = { "int": 'SI32Attr', "bool": 'BoolAttr', "int64_t": 'SI64Attr', "float": 'F32Attr', "string": 'StrAttr', - "vector": 'I32ArrayAttr' + "vector": 'I32ArrayAttr', } target_type_converter = {"CPU": "CPU", "GPU": "GPU", "Undefined": "UNK"} layout_type_converter = { "NCHW": "NCHW", "NHWC": "NHWC", - "Undefined(AnyLayout)": "ANY" + "Undefined(AnyLayout)": "ANY", } precision_type_converter = { "uint8": "UINT8", @@ -46,7 +46,7 @@ precision_type_converter = { "complex64": "COMPLEX64", "complex128": "COMPLEX128", "bool": "BOOL", - "Undefined": "UNK" + "Undefined": "UNK", } kernel_types_info_file = "./kernels.json" @@ -88,16 +88,19 @@ def generate_kernel_name(op_name, place_str): layout_ = layout_type_converter[layout_.strip()] precision_ = precision_type_converter[precision_.strip()] class_name_ = "{}{}".format( - op_name.replace("_", "").title(), "".join([ - target_.strip().title(), - precision_.strip(), - layout_.strip().title().title() - ])) + op_name.replace("_", "").title(), + "".join( + [ + target_.strip().title(), + precision_.strip(), + layout_.strip().title().title(), + ] + ), + ) alias_ = "{}.{}".format( op_name, - ".".join([target_.strip(), - precision_.strip(), - layout_.strip()])) + ".".join([target_.strip(), precision_.strip(), layout_.strip()]), + ) return alias_, class_name_ @@ -111,8 +114,9 @@ def generate_attrs_info(op_name, attrs_info): for index in range(len(attrs_info)): attr_name = kernel_attrs_names[op_name]["attrs"][index] attr_type = attr_type_converter[attrs_info[index]] - attrs_args_ += '{type_}:${name_},'.format(type_=attr_type, - name_=attr_name) + attrs_args_ += '{type_}:${name_},'.format( + type_=attr_type, name_=attr_name + ) return attrs_args_[:-1] @@ -125,7 +129,8 @@ def generate_inputs_info(input_info): layout_ = layout_type_converter[layout_.strip()] precision_ = precision_type_converter[precision_.strip()] input_args_ += " DenseTensor<\"{}\",\"{}\",\"{}\">:$in{},".format( - target_.strip(), precision_.strip(), layout_.strip(), str(index)) + target_.strip(), precision_.strip(), layout_.strip(), str(index) + ) input_args_ = input_args_[:-1] return input_args_ @@ -134,12 +139,13 @@ def generate_arguments_info(op_name, input_info, attr_info): input_args = generate_inputs_info(input_info) attr_args = generate_attrs_info(op_name, attr_info) context_args = "Context:$dev_ctx" - argument_list = [context_args - ] + input_args.split(",") + attr_args.split(",") - while ("" in argument_list): + argument_list = ( + [context_args] + input_args.split(",") + attr_args.split(",") + ) + while "" in argument_list: argument_list.remove("") argument_ = ",".join(argument_list) - return (("let arguments = (ins {});".format(argument_.strip(",")))) + return "let arguments = (ins {});".format(argument_.strip(",")) def generate_results_info(output_info): @@ -151,8 +157,9 @@ def generate_results_info(output_info): layout_ = layout_type_converter[layout_.strip()] precision_ = precision_type_converter[precision_.strip()] output_args_ += " DenseTensor<\"{}\",\"{}\",\"{}\">:$out{},".format( - target_.strip(), precision_.strip(), layout_.strip(), str(index)) - return ("{});".format(output_args_[:-1])) + target_.strip(), precision_.strip(), layout_.strip(), str(index) + ) + return "{});".format(output_args_[:-1]) def generate_supported_kernel_list(load_dict): @@ -205,11 +212,13 @@ def generate_cpu_kernel_dialect(op_name, kernel_alias_, kernel_info): alias, class_name = generate_kernel_name(op_name, kernel_alias_) summary = 'let summary = "{name}";'.format(name=alias) dialect_name = alias.split(".") - dialect_name = dialect_name[0] + "." + dialect_name[2] + "." + dialect_name[ - 3] + dialect_name = ( + dialect_name[0] + "." + dialect_name[2] + "." + dialect_name[3] + ) header = 'def {kernel_name} : PDTCPU_Kernel<"{name}",[NoSideEffect]> {left_brace}'.format( - kernel_name=class_name, name=dialect_name.lower(), left_brace="{") + kernel_name=class_name, name=dialect_name.lower(), left_brace="{" + ) inputs_ = kernel_info["input"] attributes = kernel_info["attribute"] @@ -223,7 +232,8 @@ def generate_cpu_kernel_dialect(op_name, kernel_alias_, kernel_info): summary_=summary, arguments_=arguments, results_=results, - right_brace="}") + right_brace="}", + ) return kernel_dialect @@ -232,11 +242,13 @@ def generate_gpu_kernel_dialect(op_name, kernel_alias_, kernel_info): alias, class_name = generate_kernel_name(op_name, kernel_alias_) summary = 'let summary = "{name}";'.format(name=alias) dialect_name = alias.split(".") - dialect_name = dialect_name[0] + "." + dialect_name[2] + "." + dialect_name[ - 3] + dialect_name = ( + dialect_name[0] + "." + dialect_name[2] + "." + dialect_name[3] + ) header = 'def {kernel_name} : PDTGPU_Kernel<"{name}",[NoSideEffect]> {left_brace}'.format( - kernel_name=class_name, name=dialect_name.lower(), left_brace="{") + kernel_name=class_name, name=dialect_name.lower(), left_brace="{" + ) inputs_ = kernel_info["input"] attributes = kernel_info["attribute"] arguments = generate_arguments_info(op_name, inputs_, attributes) @@ -249,7 +261,8 @@ def generate_gpu_kernel_dialect(op_name, kernel_alias_, kernel_info): summary_=summary, arguments_=arguments, results_=results, - right_brace="}") + right_brace="}", + ) return kernel_dialect @@ -270,7 +283,7 @@ include \"mlir/Interfaces/LoopLikeInterface.td\"\n\ include \"mlir/IR/OpBase.td\"\n\ include \"paddle/infrt/dialect/phi/ir/infrt_phi_kernel.td\"" - return (comment_ + includes_) + return comment_ + includes_ def get_kernel_target(kernel_alias_): @@ -298,33 +311,52 @@ def main(): for kernel_alias_ in kernel_info: if get_kernel_target(kernel_alias_) == "CPU": kernel_registry = generate_cpu_kernel_dialect( - op_name, kernel_alias_, kernel_info[kernel_alias_]) + op_name, kernel_alias_, kernel_info[kernel_alias_] + ) cpu_registry_ += kernel_registry elif get_kernel_target(kernel_alias_) == "GPU": kernel_registry = generate_gpu_kernel_dialect( - op_name, kernel_alias_, kernel_info[kernel_alias_]) + op_name, kernel_alias_, kernel_info[kernel_alias_] + ) gpu_registry_ += kernel_registry else: - print("Unsupported backend:" + - get_kernel_target(kernel_alias_)) + print( + "Unsupported backend:" + + get_kernel_target(kernel_alias_) + ) end = "#endif // PTEN_KERNELS" - with open("../../paddle/infrt/dialect/phi/ir/phi_cpu_kernels.td", - "w") as dst: - dst.write('{start_}\n{dialect_}\n{end_}'.format( - start_=head, dialect_=cpu_registry_, end_=end)) - with open("../../paddle/infrt/dialect/phi/ir/phi_gpu_kernels.td", - "w") as dst: - dst.write('{start_}\n{dialect_}\n{end_}'.format( - start_=head, dialect_=gpu_registry_, end_=end)) + with open( + "../../paddle/infrt/dialect/phi/ir/phi_cpu_kernels.td", "w" + ) as dst: + dst.write( + '{start_}\n{dialect_}\n{end_}'.format( + start_=head, dialect_=cpu_registry_, end_=end + ) + ) + with open( + "../../paddle/infrt/dialect/phi/ir/phi_gpu_kernels.td", "w" + ) as dst: + dst.write( + '{start_}\n{dialect_}\n{end_}'.format( + start_=head, dialect_=gpu_registry_, end_=end + ) + ) if __name__ == '__main__': if not os.path.exists(kernel_types_info_file): - print("Error: '{file_name}' not exist!".format( - file_name=kernel_types_info_file)) + print( + "Error: '{file_name}' not exist!".format( + file_name=kernel_types_info_file + ) + ) if not os.path.exists(kernel_signature_info_file): - print("Error: '{file_name}' not exist!".format( - file_name=kernel_signature_info_file)) + print( + "Error: '{file_name}' not exist!".format( + file_name=kernel_signature_info_file + ) + ) if os.path.exists(kernel_types_info_file) and os.path.exists( - kernel_signature_info_file): + kernel_signature_info_file + ): main() diff --git a/tools/infrt/get_compat_kernel_signature.py b/tools/infrt/get_compat_kernel_signature.py index 104d3ae30c23a77f06fabc9fd93df9d1e39928fc..f5f9afdafd1bc959aae7c6efe79f889a504759bc 100644 --- a/tools/infrt/get_compat_kernel_signature.py +++ b/tools/infrt/get_compat_kernel_signature.py @@ -56,16 +56,21 @@ def get_compat_kernels_info(): content = "" registry = False for line in txt: - if ("KernelSignature(" in line): + if "KernelSignature(" in line: content = "" registry = True - if (registry): + if registry: content += line - if (registry and ";" in line): - data = content.replace("\n", "").replace( - " ", - "").strip("return").strip("KernelSignature(").strip( - r"\);").replace("\"", "").replace("\\", "") + if registry and ";" in line: + data = ( + content.replace("\n", "") + .replace(" ", "") + .strip("return") + .strip("KernelSignature(") + .strip(r"\);") + .replace("\"", "") + .replace("\\", "") + ) registry = False if is_grad_kernel(data): continue @@ -74,14 +79,23 @@ def get_compat_kernels_info(): if name in kernels_info: cur_reg = kernels_info[name] kernels_info[name]["inputs"] = list( - set(registry_info["inputs"] + - kernels_info[name]["inputs"])) + set( + registry_info["inputs"] + + kernels_info[name]["inputs"] + ) + ) kernels_info[name]["attrs"] = list( - set(registry_info["attrs"] + - kernels_info[name]["attrs"])) + set( + registry_info["attrs"] + + kernels_info[name]["attrs"] + ) + ) kernels_info[name]["outputs"] = list( - set(registry_info["outputs"] + - kernels_info[name]["outputs"])) + set( + registry_info["outputs"] + + kernels_info[name]["outputs"] + ) + ) else: kernels_info[name] = registry_info diff --git a/tools/infrt/get_phi_kernel_info.py b/tools/infrt/get_phi_kernel_info.py index af73432a7975655b774548859beb29555f676276..9eeee88276d80c58839a3db40aa29b00d93bbde1 100644 --- a/tools/infrt/get_phi_kernel_info.py +++ b/tools/infrt/get_phi_kernel_info.py @@ -47,30 +47,34 @@ def get_skipped_kernel_list(): def parse_args(): parser = argparse.ArgumentParser("gather phi kernel and infermate info") - parser.add_argument("--paddle_root_path", - type=str, - required=True, - help="root path of paddle src[WORK_PATH/Paddle].") + parser.add_argument( + "--paddle_root_path", + type=str, + required=True, + help="root path of paddle src[WORK_PATH/Paddle].", + ) parser.add_argument( "--kernel_info_file", type=str, required=True, - help="kernel info file generated by get_phi_kernel_function.sh.") - parser.add_argument("--infermeta_wrap_file", - type=str, - required=True, - help="inferMeta wrap info file.") - parser.add_argument("--attr_info_file", - type=str, - required=True, - help="attr info file.") + help="kernel info file generated by get_phi_kernel_function.sh.", + ) + parser.add_argument( + "--infermeta_wrap_file", + type=str, + required=True, + help="inferMeta wrap info file.", + ) + parser.add_argument( + "--attr_info_file", type=str, required=True, help="attr info file." + ) parser.add_argument( "--generate_file", type=str, required=True, - default= - "../paddle/infrt/kernel/phi/infershaped/infershaped_kernel_launchers.cc", - help="generated file.") + default="../paddle/infrt/kernel/phi/infershaped/infershaped_kernel_launchers.cc", + help="generated file.", + ) args = parser.parse_args() return args @@ -167,15 +171,18 @@ def gen_include_headers(): def gen_namespace(): - return (""" + return ( + """ namespace infrt { namespace kernel { -""", """ +""", + """ } // namespace kernel } // namespace infrt -""") +""", + ) def gen_context(val): @@ -203,7 +210,7 @@ def gen_kernel_func(val, ctx_name, dtype_name): st = val.index('<') ed = val.index('>') func_name = val[:st] - template_name = val[st + 1:ed] + template_name = val[st + 1 : ed] if '::phi::' in template_name: return "&::phi::" + val else: @@ -273,7 +280,7 @@ def gen_register_code_info(item: List[str], attr_data: Dict[str, List[str]]): attr_data: {'phi_cpu.arg_min.float32.any': ['axisBool', 'keepdimsBool', 'flatten', 'dtype']} """ ctx_name, ir_ctx_name = gen_context(item[1]) - if (ctx_name == ""): + if ctx_name == "": return "" item[2] = gen_layout(item[2]) ir_dtypes, origin_dtypes = gen_dtype(item[4:-1]) @@ -290,11 +297,19 @@ def gen_register_code_info(item: List[str], attr_data: Dict[str, List[str]]): kernel_func = gen_kernel_func(item[3], ctx_name, origin_dtype) if item[0].lower() in skipped_kernel_list: continue - ir_name = ir_ctx_name + '.' + item[0].lower( - ) + '.' + ir_dtype + '.' + item[2].lower() + ir_name = ( + ir_ctx_name + + '.' + + item[0].lower() + + '.' + + ir_dtype + + '.' + + item[2].lower() + ) if ir_name in attr_data.keys() and attr_data[ir_name] is not None: attr_names = ', '.join( - ["\"" + a + "\"" for a in attr_data[ir_name]]) + ["\"" + a + "\"" for a in attr_data[ir_name]] + ) res += f""" registry->AddKernel("{ir_name}",""" @@ -320,8 +335,9 @@ registry->AddKernel("{ir_name}",""" return res -def gen_register_info(resources: List[List[str]], attr_data: Dict[str, - List[str]]): +def gen_register_info( + resources: List[List[str]], attr_data: Dict[str, List[str]] +): """ resources: [['add', 'CPU', 'ALL_LAYOUT', 'AddKernel', 'float', 'double', '...'(varaidic types), 'ElementwiseInferMeta'], ...] attr_data: {'phi_cpu.arg_min.float32.any': ['axisBool', 'keepdimsBool', 'flatten', 'dtype']} @@ -335,7 +351,7 @@ def gen_register_info(resources: List[List[str]], attr_data: Dict[str, if update_item[1] != "CPU": continue code = gen_register_code_info(item, attr_data) - if (code == ""): + if code == "": continue res += code @@ -347,7 +363,7 @@ def gen_register_info(resources: List[List[str]], attr_data: Dict[str, if update_item[1] != "GPU": continue code = gen_register_code_info(item, attr_data) - if (code == ""): + if code == "": continue res += code res += "#endif // INFRT_WITH_GPU" @@ -356,9 +372,11 @@ def gen_register_info(resources: List[List[str]], attr_data: Dict[str, return res -def gen_phi_kernel_register_code(resources: List[List[str]], - attr_data: Dict[str, List[str]], - src_file_path: str): +def gen_phi_kernel_register_code( + resources: List[List[str]], + attr_data: Dict[str, List[str]], + src_file_path: str, +): source_file = open(src_file_path, 'w') source_file.write(gen_warn_info()) source_file.write(gen_include_headers()) @@ -371,7 +389,9 @@ def gen_phi_kernel_register_code(resources: List[List[str]], if __name__ == "__main__": args = parse_args() - skipped_phi_api_list_file = args.paddle_root_path + skipped_phi_api_list_file + skipped_phi_api_list_file = ( + args.paddle_root_path + skipped_phi_api_list_file + ) api_yaml_file = args.paddle_root_path + api_yaml_file legacy_api_yaml_file = args.paddle_root_path + legacy_api_yaml_file infer_meta_data = [] diff --git a/tools/infrt/print_kernel_pass_info.py b/tools/infrt/print_kernel_pass_info.py index 1e84f3de86f4a6df56587f296945c8e0840f35b8..1a48708b71bc75a7e168dd372ba1e354b9343009 100644 --- a/tools/infrt/print_kernel_pass_info.py +++ b/tools/infrt/print_kernel_pass_info.py @@ -45,7 +45,8 @@ def get_compat_kernels_info(register): is_macro_defination = False for line in txt: if line.strip().startswith( - "#define") and line.strip().endswith("\\"): + "#define" + ) and line.strip().endswith("\\"): is_macro_defination = True continue if is_macro_defination: @@ -53,14 +54,18 @@ def get_compat_kernels_info(register): is_macro_defination = False continue - if (register in line): + if register in line: content = "" registry = True - if (registry): + if registry: content += line - if (registry and ";" in line): - kernel_name = content.replace("\n", "").replace( - " ", "").strip(register).split(",") + if registry and ";" in line: + kernel_name = ( + content.replace("\n", "") + .replace(" ", "") + .strip(register) + .split(",") + ) registry = False kernel_names.append(kernel_name[0]) return remove_grad_kernel(kernel_names) @@ -68,8 +73,9 @@ def get_compat_kernels_info(register): def show_kernel_statistics(backend, kernels): print("=== kernels statistics === ") - print("the number of " + backend + " kernels is: " + str(len(kernels)) + - "\n") + print( + "the number of " + backend + " kernels is: " + str(len(kernels)) + "\n" + ) print(kernels) print("\n") @@ -92,7 +98,7 @@ def get_passes_info(register): continue if register in line: registry_fun_found = True - if (registry_fun_found): + if registry_fun_found: pass_registry_func += line if registry_fun_found: for char in line: diff --git a/tools/jetson_infer_op.py b/tools/jetson_infer_op.py index e132a14373e4f1e2555cb9acbd8acfe4093e9b29..664f2b6616ed6840f3a3b4f207ae09ff6d5a147c 100644 --- a/tools/jetson_infer_op.py +++ b/tools/jetson_infer_op.py @@ -37,7 +37,7 @@ black_list = [ 'test_sync_batch_norm_op', # case too large 'test_reduce_op', - 'test_transpose_op' + 'test_transpose_op', ] op_diff_list = [ @@ -51,14 +51,18 @@ def parse_arguments(): :return: """ parser = argparse.ArgumentParser() - parser.add_argument('--shell_name', - type=str, - default='get_op_list.sh', - help='please input right name') - parser.add_argument('--op_list_file', - type=str, - default='list_op.txt', - help='please input right name') + parser.add_argument( + '--shell_name', + type=str, + default='get_op_list.sh', + help='please input right name', + ) + parser.add_argument( + '--op_list_file', + type=str, + default='list_op.txt', + help='please input right name', + ) return parser.parse_args() @@ -85,7 +89,7 @@ def get_prefix(line, end_char='d'): """ i = 0 prefix = '' - while (line[i] != end_char): + while line[i] != end_char: prefix += line[i] i += 1 return prefix @@ -116,7 +120,9 @@ def add_import_skip_return(file, pattern_import, pattern_skip, pattern_return): # add @skip_check_grad_ci match_obj = pattern_2.search(line) if match_obj is not None: - file_data += "@skip_check_grad_ci(reason='jetson do n0t neeed this !')\n" + file_data += ( + "@skip_check_grad_ci(reason='jetson do n0t neeed this !')\n" + ) print("### add @skip_check_grad_ci ####") # delete test_grad_output @@ -155,8 +161,14 @@ def set_diff_value(file, atol="1e-5", inplace_atol="1e-7"): :param inplace_atol: :return: """ - os.system(r"sed -i 's/self.check_output(/self\.check_output\(atol=" + atol + - ",inplace_atol=" + inplace_atol + ",/g\' " + file) + os.system( + r"sed -i 's/self.check_output(/self\.check_output\(atol=" + + atol + + ",inplace_atol=" + + inplace_atol + + ",/g\' " + + file + ) def change_op_file(start=0, end=0, op_list_file='list_op.txt', path='.'): @@ -182,8 +194,9 @@ def change_op_file(start=0, end=0, op_list_file='list_op.txt', path='.'): pattern_skip = r"^class .*\(OpTest\):$" pattern_return = r"def test.*grad.*\):$" # change file - add_import_skip_return(file_with_path, pattern_import, pattern_skip, - pattern_return) + add_import_skip_return( + file_with_path, pattern_import, pattern_skip, pattern_return + ) # op_diff if item in op_diff_list: set_diff_value(file_with_path) @@ -240,7 +253,7 @@ def run_file_change(op_list_file): :param op_list_file: :return: """ - if (os.path.exists("flag_change_file.txt")): + if os.path.exists("flag_change_file.txt"): print( "-----maybe op_file has changed, so don't need to change again------" ) @@ -269,9 +282,12 @@ def run_test_second(): "sed -n '/(Failed)$/p' test_op_log.txt | awk '{print $3}' >& rerun_op.txt" ) rerun_list = get_op_list('rerun_op.txt') - if (len(rerun_list)): - print("-------there are " + str(len(rerun_list)) + - " op(s) need to rerun!!!-------") + if len(rerun_list): + print( + "-------there are " + + str(len(rerun_list)) + + " op(s) need to rerun!!!-------" + ) for failed_op in rerun_list: os.system("ctest -R \"(" + failed_op + ")\" ") else: diff --git a/tools/parallel_UT_rule.py b/tools/parallel_UT_rule.py index ea36b7785367e6c001e7b5117fd0924b538c1696..fa5a6c8131576378f217fa173dff94c51c33c9dc 100755 --- a/tools/parallel_UT_rule.py +++ b/tools/parallel_UT_rule.py @@ -669,7 +669,7 @@ HIGH_PARALLEL_JOB_NEW = [ 'test_trt_convert_reduce_sum', 'save_quant2_model_lstm', 'test_trt_convert_slice', - 'test_quant2_int8_lstm_mkldnn' + 'test_quant2_int8_lstm_mkldnn', ] # mem=0 but always timeout or failed : It run 15 job each time in Single cases; @@ -734,315 +734,805 @@ SECONDARY_HIGH_PARALLEL_JOB_NEW = [ # mem=0 but always timeout or failed : It run 12 job each time in Single cases; THIRD_HIGH_PARALLEL_JOB_NEW = [ - 'test_api_impl', 'test_analyzer_seq_pool1_fuse_compare_zero_copy', - 'test_analyzer_seq_pool1_profile', 'test_analyzer_mobilenet_transpose', - 'test_analyzer_resnet50', 'test_analyzer_int8_resnet50', - 'test_analyzer_int8_mobilenetv2', 'test_analyzer_bfloat16_resnet50', - 'test_analyzer_bfloat16_mobilenetv1', 'test_analyzer_int8_mobilenet_ssd', - 'test_dataset_cifar', 'test_dataset_imdb', 'test_dataset_movielens', - 'test_datasets', 'test_allgather', 'test_c_concat', 'test_c_split', - 'test_collective_reduce', 'test_cyclic_cifar_dataset', 'test_dyn_rnn', - 'test_multiclass_nms_op', 'test_communicator_geo', - 'test_quant_int8_mobilenetv2_mkldnn', 'test_analyzer_seq_pool1', - 'test_analyzer_transformer', 'test_analyzer_transformer_profile', - 'test_analyzer_int8_mobilenetv1', 'test_analyzer_bfloat16_googlenet', - 'test_analyzer_quant_performance_benchmark', 'test_dataset_wmt', - 'test_allreduce', 'test_broadcast', 'test_c_identity', - 'test_collective_sendrecv_api', 'test_fleet_utils', - 'test_fused_elemwise_activation_op', 'test_group_norm_op', - 'test_fleet_launch_nproc', 'test_quant_int8_resnet50_mkldnn', - 'test_quant2_int8_ernie_mkldnn', 'convert_model2dot_ernie' + 'test_api_impl', + 'test_analyzer_seq_pool1_fuse_compare_zero_copy', + 'test_analyzer_seq_pool1_profile', + 'test_analyzer_mobilenet_transpose', + 'test_analyzer_resnet50', + 'test_analyzer_int8_resnet50', + 'test_analyzer_int8_mobilenetv2', + 'test_analyzer_bfloat16_resnet50', + 'test_analyzer_bfloat16_mobilenetv1', + 'test_analyzer_int8_mobilenet_ssd', + 'test_dataset_cifar', + 'test_dataset_imdb', + 'test_dataset_movielens', + 'test_datasets', + 'test_allgather', + 'test_c_concat', + 'test_c_split', + 'test_collective_reduce', + 'test_cyclic_cifar_dataset', + 'test_dyn_rnn', + 'test_multiclass_nms_op', + 'test_communicator_geo', + 'test_quant_int8_mobilenetv2_mkldnn', + 'test_analyzer_seq_pool1', + 'test_analyzer_transformer', + 'test_analyzer_transformer_profile', + 'test_analyzer_int8_mobilenetv1', + 'test_analyzer_bfloat16_googlenet', + 'test_analyzer_quant_performance_benchmark', + 'test_dataset_wmt', + 'test_allreduce', + 'test_broadcast', + 'test_c_identity', + 'test_collective_sendrecv_api', + 'test_fleet_utils', + 'test_fused_elemwise_activation_op', + 'test_group_norm_op', + 'test_fleet_launch_nproc', + 'test_quant_int8_resnet50_mkldnn', + 'test_quant2_int8_ernie_mkldnn', + 'convert_model2dot_ernie', ] # mem != 0: It run 7 job each time in Single cases; 4 job each time in Multi cases; 3 job each time in exclusive cases FOURTH_HIGH_PARALLEL_JOB_NEW = [ - 'test_meshgrid_op', 'test_word2vec', 'test_analyzer_ner', - 'test_fetch_lod_tensor_array', 'test_adagrad_op_v2', - 'test_conv2d_fusion_op', 'test_hapi_amp', 'test_metrics', - 'test_clip_by_norm_op', 'test_lr_scheduler', 'test_generate_proposals_op', - 'test_masked_select_op', 'test_trt_anchor_generator_op', - 'test_imperative_ocr_attention_model', 'test_sentiment', 'test_chunk_op', - 'test_memcpy_op', 'test_warpctc_op', 'test_row_conv_op', - 'test_grid_sample_function', 'test_rnn_nets', 'test_pad3d_op', - 'test_imperative_mnist_sorted_gradient', 'tensor_test', - 'test_tensorrt_engine_op', 'test_dot_op', 'test_real_imag_op', - 'test_adam_optimizer_fp32_fp64', 'test_reduce_op', - 'test_density_prior_box_op', 'test_top_k_op', 'test_grid_generator', - 'test_randn_op', 'test_activation_mkldnn_op', 'test_lac', 'test_pad_op', - 'test_lstmp_op', 'test_loop', 'test_pylayer_op', - 'data_device_transform_test', 'test_trt_roi_align_op', - 'test_nn_functional_hot_op', 'test_top_k_v2_op', 'test_crop_op', - 'test_conv_bn_fuse_pass', 'test_beam_search_decode_op', 'test_auc_op', - 'test_pool2d_op', 'test_gaussian_random_op', 'test_maximum_op', - 'test_rnn_cell_api', 'device_code_test', 'test_ir_inplace_pass', - 'test_cos_sim_op', 'test_lite_tensor_utils', 'test_fit_a_line', - 'test_mish_op', 'test_transpose_op', 'test_mean_iou', - 'test_conv3d_transpose_op', 'test_jit_save_load', 'test_unsqueeze2_op', - 'test_eager_deletion_while_op', 'test_zeros_like_op', 'test_c_embedding_op', - 'test_regularizer', 'zero_copy_tensor_test', 'test_tensor_shape', - 'test_resnet', 'test_resnet_amp', 'test_dygraph_weight_norm', 'test_tracer', - 'test_list', 'test_sequence_concat', 'test_adaptive_avg_pool1d', - 'test_elementwise_div_op', 'test_conv1d_transpose_layer', 'test_adamw_op', - 'trt_fc_prelu_test', 'test_temporal_shift_op', - 'test_naive_best_fit_gpu_memory_limit', 'dlpack_tensor_test', - 'test_elementwise_max_op', 'test_typing', 'test_asp_pruning_2d_greedy', - 'test_fake_dequantize_op', 'test_crop_tensor_op', - 'test_imperative_load_static_param', 'test_imperative_qat_user_defined', - 'test_anchor_generator_op', 'test_if_else_op', 'test_prepare_op', - 'test_conj_op', 'test_imperative_hook_for_layer', 'test_roi_pool_op', - 'test_strided_slice_op', 'test_norm_all', 'test_weight_decay', - 'test_functional_conv2d', 'test_functional_conv3d_transpose', - 'test_imperative_layer_trainable', 'test_imperative_data_parallel', - 'test_digamma_op', 'test_distribution', 'test_box_clip_op', - 'custom_tensor_test', 'test_marker_op', 'test_dataloader_early_reset', - 'test_gather_nd_op', 'test_tensor_register_hook', 'test_retain_graph', - 'test_network_with_dtype', 'test_basic_api_transformation', 'test_diag', - 'test_lod_array_length_op', 'test_reinforcement_learning', - 'test_softmax_op', 'test_fc_fuse_pass', 'test_adaptive_max_pool2d', - 'test_inverse_op', 'test_declarative', 'test_imperative_double_grad', - 'test_tensor_methods', 'test_pool1d_api', 'system_allocator_test', - 'test_print', 'test_tensor_type_promotion', 'test_bce_with_logits_loss', - 'test_tensor', 'test_cross_op', 'concat_test', 'test_ast_util', - 'test_proximal_adagrad_op', 'test_pairwise_distance', - 'test_imperative_mnist', 'test_beam_search_decoder', - 'test_build_strategy_fusion_group_pass', 'test_dygraph_spectral_norm', - 'test_scale_mkldnn_op', 'test_load_state_dict_from_old_format', - 'test_margin_rank_loss_op', 'test_lookup_table_v2_op', - 'test_mix_precision_all_reduce_fuse', 'test_spp_op', 'test_op_converter', - 'mixed_vector_test', 'test_roi_align_op', 'test_pad_constant_like', - 'test_mul_op', 'test_spectral_norm_op', 'test_transformer', - 'test_for_enumerate', 'test_variable_trans_func', - 'test_squared_l2_distance_op', 'test_quantize_transpiler_v2', - 'test_im2sequence_op', 'test_reader_reset', 'test_one_hot_op', - 'test_adaptive_max_pool1d', 'test_label_smooth_op', - 'test_parallel_executor_fetch_feed', 'test_cast', - 'test_parallel_dygraph_sync_batch_norm', 'test_collect_fpn_proposals_op', - 'test_expand_as_v2_op', 'test_device', 'test_code_generator', - 'test_asp_pruning_2d_best', 'test_fleet_with_asp', 'test_pool2d_api', - 'test_mean_op', 'test_is_tensor', 'test_run_program_op', - 'test_cuda_random_seed', 'test_linear_interp_op', - 'test_fuse_all_reduce_pass', 'tensor_util_test', 'test_median', - 'test_nanmedian', 'test_linear', 'test_imperative_qat_amp', - 'test_truncated_gaussian_random_op', 'test_lstm_cudnn_op', - 'copy_same_tensor_test', 'test_squeeze2_op', - 'naive_best_fit_allocator_test', 'test_model', 'test_py_reader_combination', - 'test_prior_box_op', 'test_matmul_v2_mkldnn_op', 'test_sum_op', - 'test_paddle_imperative_double_grad', 'test_norm_op', 'test_pool3d_api', - 'test_imperative_gan', 'test_sequence_softmax_op', 'test_rand_op', - 'test_expand_v2_op', 'test_word2vec_book', 'test_histogram_op', - 'test_min_op', 'test_mse_loss', 'test_sign_op', - 'selected_rows_functor_gpu_test', 'test_fleet_base', 'test_logsumexp', - 'test_detection', 'test_image_classification_fp16', 'test_random_seed', - 'test_op_function_generator', 'test_unique_with_counts', - 'test_complex_elementwise_layers', 'test_array_read_write_op', - 'test_fusion_group_op', 'test_imperative_layer_apply', + 'test_meshgrid_op', + 'test_word2vec', + 'test_analyzer_ner', + 'test_fetch_lod_tensor_array', + 'test_adagrad_op_v2', + 'test_conv2d_fusion_op', + 'test_hapi_amp', + 'test_metrics', + 'test_clip_by_norm_op', + 'test_lr_scheduler', + 'test_generate_proposals_op', + 'test_masked_select_op', + 'test_trt_anchor_generator_op', + 'test_imperative_ocr_attention_model', + 'test_sentiment', + 'test_chunk_op', + 'test_memcpy_op', + 'test_warpctc_op', + 'test_row_conv_op', + 'test_grid_sample_function', + 'test_rnn_nets', + 'test_pad3d_op', + 'test_imperative_mnist_sorted_gradient', + 'tensor_test', + 'test_tensorrt_engine_op', + 'test_dot_op', + 'test_real_imag_op', + 'test_adam_optimizer_fp32_fp64', + 'test_reduce_op', + 'test_density_prior_box_op', + 'test_top_k_op', + 'test_grid_generator', + 'test_randn_op', + 'test_activation_mkldnn_op', + 'test_lac', + 'test_pad_op', + 'test_lstmp_op', + 'test_loop', + 'test_pylayer_op', + 'data_device_transform_test', + 'test_trt_roi_align_op', + 'test_nn_functional_hot_op', + 'test_top_k_v2_op', + 'test_crop_op', + 'test_conv_bn_fuse_pass', + 'test_beam_search_decode_op', + 'test_auc_op', + 'test_pool2d_op', + 'test_gaussian_random_op', + 'test_maximum_op', + 'test_rnn_cell_api', + 'device_code_test', + 'test_ir_inplace_pass', + 'test_cos_sim_op', + 'test_lite_tensor_utils', + 'test_fit_a_line', + 'test_mish_op', + 'test_transpose_op', + 'test_mean_iou', + 'test_conv3d_transpose_op', + 'test_jit_save_load', + 'test_unsqueeze2_op', + 'test_eager_deletion_while_op', + 'test_zeros_like_op', + 'test_c_embedding_op', + 'test_regularizer', + 'zero_copy_tensor_test', + 'test_tensor_shape', + 'test_resnet', + 'test_resnet_amp', + 'test_dygraph_weight_norm', + 'test_tracer', + 'test_list', + 'test_sequence_concat', + 'test_adaptive_avg_pool1d', + 'test_elementwise_div_op', + 'test_conv1d_transpose_layer', + 'test_adamw_op', + 'trt_fc_prelu_test', + 'test_temporal_shift_op', + 'test_naive_best_fit_gpu_memory_limit', + 'dlpack_tensor_test', + 'test_elementwise_max_op', + 'test_typing', + 'test_asp_pruning_2d_greedy', + 'test_fake_dequantize_op', + 'test_crop_tensor_op', + 'test_imperative_load_static_param', + 'test_imperative_qat_user_defined', + 'test_anchor_generator_op', + 'test_if_else_op', + 'test_prepare_op', + 'test_conj_op', + 'test_imperative_hook_for_layer', + 'test_roi_pool_op', + 'test_strided_slice_op', + 'test_norm_all', + 'test_weight_decay', + 'test_functional_conv2d', + 'test_functional_conv3d_transpose', + 'test_imperative_layer_trainable', + 'test_imperative_data_parallel', + 'test_digamma_op', + 'test_distribution', + 'test_box_clip_op', + 'custom_tensor_test', + 'test_marker_op', + 'test_dataloader_early_reset', + 'test_gather_nd_op', + 'test_tensor_register_hook', + 'test_retain_graph', + 'test_network_with_dtype', + 'test_basic_api_transformation', + 'test_diag', + 'test_lod_array_length_op', + 'test_reinforcement_learning', + 'test_softmax_op', + 'test_fc_fuse_pass', + 'test_adaptive_max_pool2d', + 'test_inverse_op', + 'test_declarative', + 'test_imperative_double_grad', + 'test_tensor_methods', + 'test_pool1d_api', + 'system_allocator_test', + 'test_print', + 'test_tensor_type_promotion', + 'test_bce_with_logits_loss', + 'test_tensor', + 'test_cross_op', + 'concat_test', + 'test_ast_util', + 'test_proximal_adagrad_op', + 'test_pairwise_distance', + 'test_imperative_mnist', + 'test_beam_search_decoder', + 'test_build_strategy_fusion_group_pass', + 'test_dygraph_spectral_norm', + 'test_scale_mkldnn_op', + 'test_load_state_dict_from_old_format', + 'test_margin_rank_loss_op', + 'test_lookup_table_v2_op', + 'test_mix_precision_all_reduce_fuse', + 'test_spp_op', + 'test_op_converter', + 'mixed_vector_test', + 'test_roi_align_op', + 'test_pad_constant_like', + 'test_mul_op', + 'test_spectral_norm_op', + 'test_transformer', + 'test_for_enumerate', + 'test_variable_trans_func', + 'test_squared_l2_distance_op', + 'test_quantize_transpiler_v2', + 'test_im2sequence_op', + 'test_reader_reset', + 'test_one_hot_op', + 'test_adaptive_max_pool1d', + 'test_label_smooth_op', + 'test_parallel_executor_fetch_feed', + 'test_cast', + 'test_parallel_dygraph_sync_batch_norm', + 'test_collect_fpn_proposals_op', + 'test_expand_as_v2_op', + 'test_device', + 'test_code_generator', + 'test_asp_pruning_2d_best', + 'test_fleet_with_asp', + 'test_pool2d_api', + 'test_mean_op', + 'test_is_tensor', + 'test_run_program_op', + 'test_cuda_random_seed', + 'test_linear_interp_op', + 'test_fuse_all_reduce_pass', + 'tensor_util_test', + 'test_median', + 'test_nanmedian', + 'test_linear', + 'test_imperative_qat_amp', + 'test_truncated_gaussian_random_op', + 'test_lstm_cudnn_op', + 'copy_same_tensor_test', + 'test_squeeze2_op', + 'naive_best_fit_allocator_test', + 'test_model', + 'test_py_reader_combination', + 'test_prior_box_op', + 'test_matmul_v2_mkldnn_op', + 'test_sum_op', + 'test_paddle_imperative_double_grad', + 'test_norm_op', + 'test_pool3d_api', + 'test_imperative_gan', + 'test_sequence_softmax_op', + 'test_rand_op', + 'test_expand_v2_op', + 'test_word2vec_book', + 'test_histogram_op', + 'test_min_op', + 'test_mse_loss', + 'test_sign_op', + 'selected_rows_functor_gpu_test', + 'test_fleet_base', + 'test_logsumexp', + 'test_detection', + 'test_image_classification_fp16', + 'test_random_seed', + 'test_op_function_generator', + 'test_unique_with_counts', + 'test_complex_elementwise_layers', + 'test_array_read_write_op', + 'test_fusion_group_op', + 'test_imperative_layer_apply', 'test_executor_return_tensor_not_overwriting', - 'test_optimizer_in_control_flow', 'test_lookup_table_op', 'test_randint_op', - 'test_randint_like', 'test_convert_call', - 'test_sigmoid_cross_entropy_with_logits_op', 'copy_cross_scope_test', - 'test_normalization_wrapper', 'test_flip', 'test_cosine_similarity_api', - 'test_cumsum_op', 'test_range', 'test_log_loss_op', 'test_where_index', - 'test_tril_triu_op', 'test_lod_reset_op', 'test_lod_tensor', - 'test_addmm_op', 'test_index_select_op', 'test_index_add_op', 'test_nvprof', - 'test_index_sample_op', 'test_unstack_op', 'test_increment', - 'strided_memcpy_test', 'test_target_assign_op', + 'test_optimizer_in_control_flow', + 'test_lookup_table_op', + 'test_randint_op', + 'test_randint_like', + 'test_convert_call', + 'test_sigmoid_cross_entropy_with_logits_op', + 'copy_cross_scope_test', + 'test_normalization_wrapper', + 'test_flip', + 'test_cosine_similarity_api', + 'test_cumsum_op', + 'test_range', + 'test_log_loss_op', + 'test_where_index', + 'test_tril_triu_op', + 'test_lod_reset_op', + 'test_lod_tensor', + 'test_addmm_op', + 'test_index_select_op', + 'test_index_add_op', + 'test_nvprof', + 'test_index_sample_op', + 'test_unstack_op', + 'test_increment', + 'strided_memcpy_test', + 'test_target_assign_op', 'test_trt_dynamic_shape_transformer_prune', - 'test_box_decoder_and_assign_op', 'test_trt_dynamic_shape', 'test_mnist', - 'test_convert_operators', 'test_fill_any_like_op', 'test_fill_constant_op', - 'test_callback_reduce_lr_on_plateau', 'test_tile_op', 'test_logical', - 'test_deformable_conv_op', 'test_elementwise_add_grad_grad', - 'test_simple_rnn_op', 'test_bicubic_interp_op', 'test_batch_norm_op_v2', - 'test_custom_relu_op_jit', 'test_math_op_patch_var_base', 'test_se_resnet', - 'test_device_guard', 'test_elementwise_div_grad_grad', 'test_minus_op', - 'test_shard_index_op', 'test_dygraph_recompute', 'test_momentum_op', - 'test_modelaverage', 'test_compare_reduce_op', 'test_affine_grid_op', - 'test_allclose_layer', 'test_elementwise_pow_op', 'test_trt_subgraph_pass', - 'test_adaptive_avg_pool2d', 'test_functional_conv3d', - 'test_executor_and_mul', 'test_kron_op', 'test_cast_mkldnn_op', - 'test_imperative_auto_prune', 'allocator_facade_frac_flags_test', - 'test_fill_zeros_like_op', 'test_gather_tree_op', 'test_elementwise_mul_op', - 'test_cycle_gan', 'test_parallel_executor_transformer_auto_growth', - 'test_bitwise_op', 'test_uniform_random_op', 'trt_split_converter_test', - 'test_huber_loss_op', 'test_slice', 'test_label_smooth_functional', - 'test_conv_shift_op', 'test_imperative_optimizer_v2', 'test_len', - 'test_imperative_named_members', 'test_sequence_reshape', - 'test_elementwise_min_op', 'test_flatten2_op', 'test_param_guard', - 'test_imperative_ptb_rnn', 'test_batch_fc_op', 'test_Tensor_type', - 'test_complex_getitem', 'lod_tensor_gpu_test', 'im2col_test', - 'test_unbind_op', 'test_imperative_ptq', 'test_auc_single_pred_op', - 'test_imperative_reinforcement', 'test_tf32_cublas', 'test_return', - 'test_py_reader_push_pop', 'test_lstm', 'test_dygraph_mnist_fp16', - 'test_shuffle_channel_op', 'test_partial_concat_op', - 'test_fill_zeros_like2_op', 'test_deformable_conv_v1_op', - 'test_complex_grad_accumulated', 'test_sequence_mask', 'test_fill_op', - 'test_imperative_deepcf', 'test_reorder_lod_tensor', 'test_multiply', - 'test_partial_program', 'test_fetch_feed', 'test_group', - 'test_trt_reduce_sum_op', 'data_type_transform_test', 'test_gru_rnn_op', - 'test_cudnn_grucell', 'test_argsort_op', 'test_batch_norm_op', - 'test_inplace', 'test_deprecated_decorator', 'test_complex_cast', - 'test_diag_v2', 'test_iou_similarity_op', - 'test_inplace_auto_generated_apis', 'test_dataset', 'test_bilinear_api', - 'test_empty_like_op', 'test_imperative_layer_children', 'nccl_op_test', - 'test_tree_conv_op', 'test_share_data_op', - 'test_ir_memory_optimize_transformer', 'test_lod_append_op', - 'test_math_op_patch', 'test_base_layer', 'test_dequantize_log_op', - 'test_complex_matmul', 'test_prelu_op', 'test_l1_norm_op', - 'test_rmsprop_op', 'test_fuse_bn_act_pass', 'test_inplace_addto_strategy', - 'test_ptb_lm_v2', 'test_paddle_save_load', 'test_prelu_mkldnn_op', - 'test_box_coder_op', 'test_atan2_op', 'test_unsqueeze_op', 'test_profiler', - 'test_affine_channel_op', 'test_leaky_relu_grad_grad_functor', - 'test_ctc_align', 'test_fuse_relu_depthwise_conv_pass', 'test_complex_kron', - 'test_imperative_skip_op', 'test_dgc_op', 'test_regularizer_api', - 'test_nll_loss', 'test_imperative_layers', 'test_rnn_decode_api', - 'test_imperative_partitial_backward', 'test_where_op', 'test_std_layer', - 'test_ir_embedding_eltwise_layernorm_fuse_pass', 'test_multihead_attention', - 'test_grid_sampler_op', 'test_initializer_nn', 'test_var_base', - 'test_fuse_elewise_add_act_pass', 'test_select_input_output_op', - 'test_lstm_op', 'test_break_continue', - 'test_imperative_parallel_coalesce_split', 'test_expand_as_op', - 'test_user_defined_quantization', 'test_tensor_to_list', - 'test_limit_gpu_memory', 'test_adamax_api', - 'test_softmax_mask_fuse_upper_triangle_op', 'test_fake_quantize_op', - 'vol2col_test', 'test_cast_op', 'test_proximal_gd_op', 'test_mul_nn_grad', - 'test_full_like_op', 'test_imperative_static_runner_while', - 'trt_instance_norm_test', 'test_elementwise_mod_op', - 'test_grad_clip_minimize', 'test_one_hot_v2_op', 'test_complex_sum_layer', - 'test_isfinite_v2_op', 'test_is_empty_op', 'test_simnet_v2', - 'beam_search_test', 'test_randperm_op', 'test_elementwise_add_op_inplace', - 'test_imperative_selected_rows', 'test_py_reader_using_executor', - 'test_activation_op', 'test_nn_functional_embedding_dygraph', - 'test_reshape_op', 'test_maxout_op', 'test_cudnn_lstmcell', - 'test_sigmoid_focal_loss', 'test_manual_seed', 'test_lrn_op', - 'test_ir_memory_optimize_nlp', 'test_dataset_dataloader', - 'test_complex_variable', 'test_lite_engine', 'test_neg_op', - 'test_view_op_reuse_allocation', 'test_split_op', 'test_ptb_lm', - 'test_elementwise_sub_op', 'test_compare_op', 'test_simnet', - 'test_label_semantic_roles', 'test_normal', - 'test_tensor_scalar_type_promotion_static', 'test_trt_group_norm_op', - 'test_learning_rate_scheduler', 'test_numel_op', 'test_adaptive_max_pool3d', - 'test_sequential', 'test_imperative_optimizer', 'test_subtract_op', - 'test_conv_transpose_nn_grad', 'test_sigmoid_focal_loss_op', - 'test_cuda_stream_event', 'test_sequence_pad_op', 'test_rnn_cells', - 'test_partial_sum_op', 'test_rnn_nets_static', 'test_max_op', - 'test_logical_op', 'test_squared_l2_norm_op', 'test_center_loss', - 'test_quantization_pass', 'test_imperative_gnn', - 'test_conv_elementwise_add_act_fuse_pass', 'test_roll_op', - 'test_imperative_container_layerdict', 'test_shape_op', 'test_bmm_op', - 'test_matmul_v2_op', 'test_hinge_loss_op', 'test_imperative_qat', - 'test_add_position_encoding_op', 'test_rnn_op', 'test_gradient_clip', - 'test_py_reader_pin_memory', 'test_concat_op', 'test_weight_decay_extend', - 'test_accuracy_op', 'test_cond', 'test_resnet_v2', 'test_adagrad_op', - 'test_mv_op', 'test_print_op', 'test_grad', 'test_square_error_cost', - 'test_rnn_cells_static', 'test_mkldnn_batch_norm_act_fuse_pass', - 'test_input_spec', 'test_adam_op', 'test_elementwise_floordiv_op', - 'test_eager_deletion_gru_net', 'test_diagonal_op', - 'test_imperative_static_runner_mnist', 'test_nearest_interp_op', - 'test_diag_embed', 'test_imperative_basic', 'test_merge_selectedrows_op', - 'test_feed_data_check_shape_type', 'test_complex_trace_layer', - 'test_slice_op', 'test_bmn', 'test_nn_quant_functional_layers', - 'test_broadcast_tensors_op', 'test_selu_op', 'test_group_norm_op_v2', - 'test_tensor_to_numpy', 'test_queue', 'test_rank_loss_op', 'test_trace_op', - 'test_case', 'test_prroi_pool_op', 'test_op_name_conflict', - 'test_psroi_pool_op', 'test_set_value_op', 'test_ones_like', - 'test_assign_value_op', 'test_ema', 'test_lamb_op', 'test_dgc_momentum_op', - 'test_custom_grad_input', 'test_trunc_op', 'test_bernoulli_op', - 'test_custom_relu_model', 'test_backward', 'test_conv3d_transpose_part2_op', - 'test_complex_transpose', 'test_memory_reuse_exclude_feed_var', - 'test_polygon_box_transform', 'math_function_gpu_test', - 'test_program_prune_backward', 'test_ema_fleet', 'test_fleet_amp_init', - 'test_normalize', 'test_correlation', + 'test_box_decoder_and_assign_op', + 'test_trt_dynamic_shape', + 'test_mnist', + 'test_convert_operators', + 'test_fill_any_like_op', + 'test_fill_constant_op', + 'test_callback_reduce_lr_on_plateau', + 'test_tile_op', + 'test_logical', + 'test_deformable_conv_op', + 'test_elementwise_add_grad_grad', + 'test_simple_rnn_op', + 'test_bicubic_interp_op', + 'test_batch_norm_op_v2', + 'test_custom_relu_op_jit', + 'test_math_op_patch_var_base', + 'test_se_resnet', + 'test_device_guard', + 'test_elementwise_div_grad_grad', + 'test_minus_op', + 'test_shard_index_op', + 'test_dygraph_recompute', + 'test_momentum_op', + 'test_modelaverage', + 'test_compare_reduce_op', + 'test_affine_grid_op', + 'test_allclose_layer', + 'test_elementwise_pow_op', + 'test_trt_subgraph_pass', + 'test_adaptive_avg_pool2d', + 'test_functional_conv3d', + 'test_executor_and_mul', + 'test_kron_op', + 'test_cast_mkldnn_op', + 'test_imperative_auto_prune', + 'allocator_facade_frac_flags_test', + 'test_fill_zeros_like_op', + 'test_gather_tree_op', + 'test_elementwise_mul_op', + 'test_cycle_gan', + 'test_parallel_executor_transformer_auto_growth', + 'test_bitwise_op', + 'test_uniform_random_op', + 'trt_split_converter_test', + 'test_huber_loss_op', + 'test_slice', + 'test_label_smooth_functional', + 'test_conv_shift_op', + 'test_imperative_optimizer_v2', + 'test_len', + 'test_imperative_named_members', + 'test_sequence_reshape', + 'test_elementwise_min_op', + 'test_flatten2_op', + 'test_param_guard', + 'test_imperative_ptb_rnn', + 'test_batch_fc_op', + 'test_Tensor_type', + 'test_complex_getitem', + 'lod_tensor_gpu_test', + 'im2col_test', + 'test_unbind_op', + 'test_imperative_ptq', + 'test_auc_single_pred_op', + 'test_imperative_reinforcement', + 'test_tf32_cublas', + 'test_return', + 'test_py_reader_push_pop', + 'test_lstm', + 'test_dygraph_mnist_fp16', + 'test_shuffle_channel_op', + 'test_partial_concat_op', + 'test_fill_zeros_like2_op', + 'test_deformable_conv_v1_op', + 'test_complex_grad_accumulated', + 'test_sequence_mask', + 'test_fill_op', + 'test_imperative_deepcf', + 'test_reorder_lod_tensor', + 'test_multiply', + 'test_partial_program', + 'test_fetch_feed', + 'test_group', + 'test_trt_reduce_sum_op', + 'data_type_transform_test', + 'test_gru_rnn_op', + 'test_cudnn_grucell', + 'test_argsort_op', + 'test_batch_norm_op', + 'test_inplace', + 'test_deprecated_decorator', + 'test_complex_cast', + 'test_diag_v2', + 'test_iou_similarity_op', + 'test_inplace_auto_generated_apis', + 'test_dataset', + 'test_bilinear_api', + 'test_empty_like_op', + 'test_imperative_layer_children', + 'nccl_op_test', + 'test_tree_conv_op', + 'test_share_data_op', + 'test_ir_memory_optimize_transformer', + 'test_lod_append_op', + 'test_math_op_patch', + 'test_base_layer', + 'test_dequantize_log_op', + 'test_complex_matmul', + 'test_prelu_op', + 'test_l1_norm_op', + 'test_rmsprop_op', + 'test_fuse_bn_act_pass', + 'test_inplace_addto_strategy', + 'test_ptb_lm_v2', + 'test_paddle_save_load', + 'test_prelu_mkldnn_op', + 'test_box_coder_op', + 'test_atan2_op', + 'test_unsqueeze_op', + 'test_profiler', + 'test_affine_channel_op', + 'test_leaky_relu_grad_grad_functor', + 'test_ctc_align', + 'test_fuse_relu_depthwise_conv_pass', + 'test_complex_kron', + 'test_imperative_skip_op', + 'test_dgc_op', + 'test_regularizer_api', + 'test_nll_loss', + 'test_imperative_layers', + 'test_rnn_decode_api', + 'test_imperative_partitial_backward', + 'test_where_op', + 'test_std_layer', + 'test_ir_embedding_eltwise_layernorm_fuse_pass', + 'test_multihead_attention', + 'test_grid_sampler_op', + 'test_initializer_nn', + 'test_var_base', + 'test_fuse_elewise_add_act_pass', + 'test_select_input_output_op', + 'test_lstm_op', + 'test_break_continue', + 'test_imperative_parallel_coalesce_split', + 'test_expand_as_op', + 'test_user_defined_quantization', + 'test_tensor_to_list', + 'test_limit_gpu_memory', + 'test_adamax_api', + 'test_softmax_mask_fuse_upper_triangle_op', + 'test_fake_quantize_op', + 'vol2col_test', + 'test_cast_op', + 'test_proximal_gd_op', + 'test_mul_nn_grad', + 'test_full_like_op', + 'test_imperative_static_runner_while', + 'trt_instance_norm_test', + 'test_elementwise_mod_op', + 'test_grad_clip_minimize', + 'test_one_hot_v2_op', + 'test_complex_sum_layer', + 'test_isfinite_v2_op', + 'test_is_empty_op', + 'test_simnet_v2', + 'beam_search_test', + 'test_randperm_op', + 'test_elementwise_add_op_inplace', + 'test_imperative_selected_rows', + 'test_py_reader_using_executor', + 'test_activation_op', + 'test_nn_functional_embedding_dygraph', + 'test_reshape_op', + 'test_maxout_op', + 'test_cudnn_lstmcell', + 'test_sigmoid_focal_loss', + 'test_manual_seed', + 'test_lrn_op', + 'test_ir_memory_optimize_nlp', + 'test_dataset_dataloader', + 'test_complex_variable', + 'test_lite_engine', + 'test_neg_op', + 'test_view_op_reuse_allocation', + 'test_split_op', + 'test_ptb_lm', + 'test_elementwise_sub_op', + 'test_compare_op', + 'test_simnet', + 'test_label_semantic_roles', + 'test_normal', + 'test_tensor_scalar_type_promotion_static', + 'test_trt_group_norm_op', + 'test_learning_rate_scheduler', + 'test_numel_op', + 'test_adaptive_max_pool3d', + 'test_sequential', + 'test_imperative_optimizer', + 'test_subtract_op', + 'test_conv_transpose_nn_grad', + 'test_sigmoid_focal_loss_op', + 'test_cuda_stream_event', + 'test_sequence_pad_op', + 'test_rnn_cells', + 'test_partial_sum_op', + 'test_rnn_nets_static', + 'test_max_op', + 'test_logical_op', + 'test_squared_l2_norm_op', + 'test_center_loss', + 'test_quantization_pass', + 'test_imperative_gnn', + 'test_conv_elementwise_add_act_fuse_pass', + 'test_roll_op', + 'test_imperative_container_layerdict', + 'test_shape_op', + 'test_bmm_op', + 'test_matmul_v2_op', + 'test_hinge_loss_op', + 'test_imperative_qat', + 'test_add_position_encoding_op', + 'test_rnn_op', + 'test_gradient_clip', + 'test_py_reader_pin_memory', + 'test_concat_op', + 'test_weight_decay_extend', + 'test_accuracy_op', + 'test_cond', + 'test_resnet_v2', + 'test_adagrad_op', + 'test_mv_op', + 'test_print_op', + 'test_grad', + 'test_square_error_cost', + 'test_rnn_cells_static', + 'test_mkldnn_batch_norm_act_fuse_pass', + 'test_input_spec', + 'test_adam_op', + 'test_elementwise_floordiv_op', + 'test_eager_deletion_gru_net', + 'test_diagonal_op', + 'test_imperative_static_runner_mnist', + 'test_nearest_interp_op', + 'test_diag_embed', + 'test_imperative_basic', + 'test_merge_selectedrows_op', + 'test_feed_data_check_shape_type', + 'test_complex_trace_layer', + 'test_slice_op', + 'test_bmn', + 'test_nn_quant_functional_layers', + 'test_broadcast_tensors_op', + 'test_selu_op', + 'test_group_norm_op_v2', + 'test_tensor_to_numpy', + 'test_queue', + 'test_rank_loss_op', + 'test_trace_op', + 'test_case', + 'test_prroi_pool_op', + 'test_op_name_conflict', + 'test_psroi_pool_op', + 'test_set_value_op', + 'test_ones_like', + 'test_assign_value_op', + 'test_ema', + 'test_lamb_op', + 'test_dgc_momentum_op', + 'test_custom_grad_input', + 'test_trunc_op', + 'test_bernoulli_op', + 'test_custom_relu_model', + 'test_backward', + 'test_conv3d_transpose_part2_op', + 'test_complex_transpose', + 'test_memory_reuse_exclude_feed_var', + 'test_polygon_box_transform', + 'math_function_gpu_test', + 'test_program_prune_backward', + 'test_ema_fleet', + 'test_fleet_amp_init', + 'test_normalize', + 'test_correlation', 'test_conv_elementwise_add2_act_fuse_pass', - 'test_imperative_container_layerlist', 'test_dequantize_abs_max_op', - 'test_fuse_optimizer_pass', 'test_optimizer', - 'test_dynamic_rnn_stop_gradient', 'test_raw_program_optimizer', 'test_pow', - 'test_inplace_softmax_with_cross_entropy', 'test_transforms', - 'test_unfold_op', 'test_assign_op', 'test_isinstance', - 'auto_growth_best_fit_allocator_facade_test', 'test_cholesky_op', - 'test_adaptive_avg_pool3d', 'test_paddle_save_load_binary', - 'test_fused_fc_elementwise_layernorm_op', 'test_sequence_enumerate_op', - 'test_lgamma_op', 'test_modified_huber_loss_op', 'trt_quant_int8_test', - 'test_callback_visualdl', 'test_linspace', 'test_update_loss_scaling_op', - 'test_arg_min_max_op', 'test_bce_loss', 'test_nn_margin_rank_loss', - 'test_arg_min_max_v2_op', 'test_variance_layer', - 'test_quantization_scale_pass', 'test_segment_ops', 'test_layers', - 'test_isfinite_op', 'test_imperative_qat_channelwise', 'test_eye_op', - 'test_imperative_framework', 'test_l1_loss', 'test_ifelse', - 'test_cache_program', 'test_ir_fc_fuse_pass', 'test_kldiv_loss_op', - 'test_switch_case', 'test_unique', 'test_prod_op', 'test_edit_distance_op', - 'test_sequence_expand_as', 'test_full_name_usage', 'test_glu', - 'test_pad2d_op', 'test_read_file', 'test_erf_op', 'test_sequence_unpad_op', - 'test_sequence_conv', 'allocator_facade_abs_flags_test', 'test_detach', - 'test_cross_entropy_op', 'test_wrappers', 'test_fleet_base_single', - 'test_conv_elementwise_add_fuse_pass', 'test_auto_growth_gpu_memory_limit', - 'test_sequence_reverse', 'test_fc_op', 'test_diagflat', 'test_adamax_op', - 'test_op_attr', 'paddle_infer_api_test', 'test_mixed_precision', - 'lite_mul_model_test', 'test_sort_op', 'test_imperative_out_scale', - 'test_vision_models', 'test_rnn_encoder_decoder', 'test_fleet_with_asp_amp', + 'test_imperative_container_layerlist', + 'test_dequantize_abs_max_op', + 'test_fuse_optimizer_pass', + 'test_optimizer', + 'test_dynamic_rnn_stop_gradient', + 'test_raw_program_optimizer', + 'test_pow', + 'test_inplace_softmax_with_cross_entropy', + 'test_transforms', + 'test_unfold_op', + 'test_assign_op', + 'test_isinstance', + 'auto_growth_best_fit_allocator_facade_test', + 'test_cholesky_op', + 'test_adaptive_avg_pool3d', + 'test_paddle_save_load_binary', + 'test_fused_fc_elementwise_layernorm_op', + 'test_sequence_enumerate_op', + 'test_lgamma_op', + 'test_modified_huber_loss_op', + 'trt_quant_int8_test', + 'test_callback_visualdl', + 'test_linspace', + 'test_update_loss_scaling_op', + 'test_arg_min_max_op', + 'test_bce_loss', + 'test_nn_margin_rank_loss', + 'test_arg_min_max_v2_op', + 'test_variance_layer', + 'test_quantization_scale_pass', + 'test_segment_ops', + 'test_layers', + 'test_isfinite_op', + 'test_imperative_qat_channelwise', + 'test_eye_op', + 'test_imperative_framework', + 'test_l1_loss', + 'test_ifelse', + 'test_cache_program', + 'test_ir_fc_fuse_pass', + 'test_kldiv_loss_op', + 'test_switch_case', + 'test_unique', + 'test_prod_op', + 'test_edit_distance_op', + 'test_sequence_expand_as', + 'test_full_name_usage', + 'test_glu', + 'test_pad2d_op', + 'test_read_file', + 'test_erf_op', + 'test_sequence_unpad_op', + 'test_sequence_conv', + 'allocator_facade_abs_flags_test', + 'test_detach', + 'test_cross_entropy_op', + 'test_wrappers', + 'test_fleet_base_single', + 'test_conv_elementwise_add_fuse_pass', + 'test_auto_growth_gpu_memory_limit', + 'test_sequence_reverse', + 'test_fc_op', + 'test_diagflat', + 'test_adamax_op', + 'test_op_attr', + 'paddle_infer_api_test', + 'test_mixed_precision', + 'lite_mul_model_test', + 'test_sort_op', + 'test_imperative_out_scale', + 'test_vision_models', + 'test_rnn_encoder_decoder', + 'test_fleet_with_asp_amp', 'test_partial_eager_deletion_transformer', - 'test_imperative_star_gan_with_gradient_penalty', 'test_stack_op', - 'test_shuffle_batch_op', 'test_clip_op', 'test_py_func_op', - 'test_pool_max_op', 'test_log_softmax', - 'test_imperative_container_parameterlist', 'test_multiplex_op', + 'test_imperative_star_gan_with_gradient_penalty', + 'test_stack_op', + 'test_shuffle_batch_op', + 'test_clip_op', + 'test_py_func_op', + 'test_pool_max_op', + 'test_log_softmax', + 'test_imperative_container_parameterlist', + 'test_multiplex_op', 'test_trt_transpose_flatten_concat_fuse_pass', - 'test_seqconv_eltadd_relu_fuse_pass', 'test_assert_op', - 'test_scatter_nd_op', 'test_sequence_expand', 'test_arange', - 'test_translated_layer', 'test_decoupled_py_reader_data_check', - 'test_analyzer_ernie_large', 'test_tensor_array_to_tensor', - 'test_functional_conv2d_transpose', 'test_error', 'test_callbacks', - 'test_imperative_recurrent_usage', 'test_deform_conv2d', - 'test_coalesce_tensor_op', 'test_tsm', 'test_fused_multihead_matmul_op', - 'test_softmax_mask_fuse_op', 'test_optimizer_grad', 'test_complex_abs', - 'test_gradient_accmulator', 'test_instance_norm_op_v2', - 'test_random_crop_op', 'test_mobile_net', + 'test_seqconv_eltadd_relu_fuse_pass', + 'test_assert_op', + 'test_scatter_nd_op', + 'test_sequence_expand', + 'test_arange', + 'test_translated_layer', + 'test_decoupled_py_reader_data_check', + 'test_analyzer_ernie_large', + 'test_tensor_array_to_tensor', + 'test_functional_conv2d_transpose', + 'test_error', + 'test_callbacks', + 'test_imperative_recurrent_usage', + 'test_deform_conv2d', + 'test_coalesce_tensor_op', + 'test_tsm', + 'test_fused_multihead_matmul_op', + 'test_softmax_mask_fuse_op', + 'test_optimizer_grad', + 'test_complex_abs', + 'test_gradient_accmulator', + 'test_instance_norm_op_v2', + 'test_random_crop_op', + 'test_mobile_net', 'test_parallel_executor_transformer', 'test_tensor_scalar_type_promotion_dynamic', - 'test_eager_deletion_delete_vars', 'test_asp_pruning_1d', - 'test_imperative_using_non_zero_gpu', 'test_machine_translation', - 'test_flatten_op', 'test_onnx_export', 'test_optimizer_for_varbase', - 'test_fusion_transpose_flatten_concat_op', 'best_fit_allocator_test', - 'test_ir_fusion_group_pass', 'test_trt_quant_conv2d_dequant_fuse_pass', - 'test_allclose_op', 'test_ftrl_op', 'test_elementwise_add_op', - 'test_instance_norm_op', 'test_lambv2_op', 'test_yolo_box_op', - 'test_parallel_executor_drop_scope', 'test_generator_dataloader', - 'test_conv2d_transpose_op_depthwise_conv', 'test_imperative_save_load_v2', - 'test_lookahead', 'test_moving_average_abs_max_scale_op', - 'test_roi_perspective_transform_op', 'test_tensorrt_engine', - 'test_affine_grid_function', 'test_nonzero_api', - 'test_ir_memory_optimize_pass', 'test_reduce_mkldnn_op', - 'test_bilinear_interp_op', 'test_cvm_op', 'test_scale_op', 'test_matmul_op', - 'test_sequence_pool', 'test_complex_simplenet', 'test_complex_reshape', - 'test_flatten_contiguous_range_op', 'test_python_operator_overriding', - 'lite_resnet50_test', 'test_sequence_erase_op', - 'test_deformable_psroi_pooling', 'test_multi_precision_fp16_train', - 'test_adam_op_multi_thread', 'test_decoupled_py_reader', - 'test_distribute_fpn_proposals_op', 'transform_test', 'test_nan_inf', - 'test_fuse_bn_add_act_pass', 'test_unpool_op', - 'test_parallel_executor_dry_run', 'test_layer_norm_op_v2', - 'test_embedding_id_stop_gradient', 'test_mkldnn_fc_act_fuse_pass', - 'sequence_pooling_test', 'test_get_tensor_from_selected_rows_op', - 'test_imperative_ptb_rnn_sorted_gradient', 'test_hapi_hub', - 'test_reverse_op', 'test_compiled_program', 'test_lambda', - 'test_adadelta_op', 'test_nn_sigmoid_op', 'test_nearest_interp_v2_op', - 'test_sequence_slice_op', 'test_program_translator', - 'test_eager_deletion_lstm_net', 'malloc_test', 'test_size_op', - 'test_analysis_predictor', 'test_recognize_digits', 'test_parameter', + 'test_eager_deletion_delete_vars', + 'test_asp_pruning_1d', + 'test_imperative_using_non_zero_gpu', + 'test_machine_translation', + 'test_flatten_op', + 'test_onnx_export', + 'test_optimizer_for_varbase', + 'test_fusion_transpose_flatten_concat_op', + 'best_fit_allocator_test', + 'test_ir_fusion_group_pass', + 'test_trt_quant_conv2d_dequant_fuse_pass', + 'test_allclose_op', + 'test_ftrl_op', + 'test_elementwise_add_op', + 'test_instance_norm_op', + 'test_lambv2_op', + 'test_yolo_box_op', + 'test_parallel_executor_drop_scope', + 'test_generator_dataloader', + 'test_conv2d_transpose_op_depthwise_conv', + 'test_imperative_save_load_v2', + 'test_lookahead', + 'test_moving_average_abs_max_scale_op', + 'test_roi_perspective_transform_op', + 'test_tensorrt_engine', + 'test_affine_grid_function', + 'test_nonzero_api', + 'test_ir_memory_optimize_pass', + 'test_reduce_mkldnn_op', + 'test_bilinear_interp_op', + 'test_cvm_op', + 'test_scale_op', + 'test_matmul_op', + 'test_sequence_pool', + 'test_complex_simplenet', + 'test_complex_reshape', + 'test_flatten_contiguous_range_op', + 'test_python_operator_overriding', + 'lite_resnet50_test', + 'test_sequence_erase_op', + 'test_deformable_psroi_pooling', + 'test_multi_precision_fp16_train', + 'test_adam_op_multi_thread', + 'test_decoupled_py_reader', + 'test_distribute_fpn_proposals_op', + 'transform_test', + 'test_nan_inf', + 'test_fuse_bn_add_act_pass', + 'test_unpool_op', + 'test_parallel_executor_dry_run', + 'test_layer_norm_op_v2', + 'test_embedding_id_stop_gradient', + 'test_mkldnn_fc_act_fuse_pass', + 'sequence_pooling_test', + 'test_get_tensor_from_selected_rows_op', + 'test_imperative_ptb_rnn_sorted_gradient', + 'test_hapi_hub', + 'test_reverse_op', + 'test_compiled_program', + 'test_lambda', + 'test_adadelta_op', + 'test_nn_sigmoid_op', + 'test_nearest_interp_v2_op', + 'test_sequence_slice_op', + 'test_program_translator', + 'test_eager_deletion_lstm_net', + 'malloc_test', + 'test_size_op', + 'test_analysis_predictor', + 'test_recognize_digits', + 'test_parameter', 'test_transpose_flatten_concat_fuse_pass', - 'test_imperative_trace_non_persistable_inputs', 'test_pass_builder', - 'thread_local_allocator_test', 'test_variable', 'test_fsp_op', - 'test_elementwise_gradient_op', 'test_multinomial_op', - 'test_trt_shuffle_channel_detect_pass', 'test_generate_proposals_v2_op', - 'test_graph', 'test_gelu_op', 'test_sample_logits_op', - 'test_weight_normalization', 'test_activation_bf16_mkldnn_op', - 'trt_dynamic_shape_test', 'test_traced_layer_err_msg', 'test_conv1d_layer', - 'test_asp_optimize', 'test_imperative_container_sequential', 'test_bert', - 'test_transformer_api', 'test_linear_interp_v2_op', 'test_pixel_shuffle', - 'test_expand_op', 'test_save_load', 'test_dropout_op', 'test_while_loop_op', - 'float16_gpu_test', 'test_dict', 'test_bilinear_tensor_product_op', - 'test_parallel_executor_pg', 'test_assert', 'test_smooth_l1_loss_op', - 'sequence_padding_test', 'test_analyzer_ernie', 'test_minimum_op', - 'test_yolov3_loss_op', 'test_decayed_adagrad_op', 'test_split_mkldnn_op', - 'test_squeeze_op', 'test_save_inference_model', 'test_smooth_l1_loss', - 'test_bilateral_slice_op', 'test_inplace_abn_op', 'test_fetch_unmerged', + 'test_imperative_trace_non_persistable_inputs', + 'test_pass_builder', + 'thread_local_allocator_test', + 'test_variable', + 'test_fsp_op', + 'test_elementwise_gradient_op', + 'test_multinomial_op', + 'test_trt_shuffle_channel_detect_pass', + 'test_generate_proposals_v2_op', + 'test_graph', + 'test_gelu_op', + 'test_sample_logits_op', + 'test_weight_normalization', + 'test_activation_bf16_mkldnn_op', + 'trt_dynamic_shape_test', + 'test_traced_layer_err_msg', + 'test_conv1d_layer', + 'test_asp_optimize', + 'test_imperative_container_sequential', + 'test_bert', + 'test_transformer_api', + 'test_linear_interp_v2_op', + 'test_pixel_shuffle', + 'test_expand_op', + 'test_save_load', + 'test_dropout_op', + 'test_while_loop_op', + 'float16_gpu_test', + 'test_dict', + 'test_bilinear_tensor_product_op', + 'test_parallel_executor_pg', + 'test_assert', + 'test_smooth_l1_loss_op', + 'sequence_padding_test', + 'test_analyzer_ernie', + 'test_minimum_op', + 'test_yolov3_loss_op', + 'test_decayed_adagrad_op', + 'test_split_mkldnn_op', + 'test_squeeze_op', + 'test_save_inference_model', + 'test_smooth_l1_loss', + 'test_bilateral_slice_op', + 'test_inplace_abn_op', + 'test_fetch_unmerged', 'test_parallel_executor_feed_persistable_var', 'test_parallel_executor_fetch_isolated_var', 'test_parallel_executor_inference_feed_partial_data', @@ -1050,37 +1540,66 @@ FOURTH_HIGH_PARALLEL_JOB_NEW = [ 'test_parallel_executor_test_while_train', 'test_parallel_executor_seresnext_with_fuse_all_reduce_gpu', 'test_parallel_ssa_graph_inference_feed_partial_data', - 'test_parallel_executor_seresnext_with_reduce_gpu', 'test_data_norm_op', - 'test_install_check', 'graph_node_test', 'trt_mobilenet_test', - 'trt_cascade_rcnn_test', 'trt_resnext_test', 'test_activation_nn_grad', - 'test_trt_dynamic_shape_ernie_fp16_ser_deser', 'test_cross_entropy2_op', - 'test_layer_norm_op', 'test_pool3d_op', 'test_static_save_load', - 'test_trt_flatten_op', 'test_trt_yolo_box_op', 'test_trt_reshape_op', - 'test_trt_elementwise_op', 'test_trt_affine_channel_op', 'test_trt_matmul', - 'test_trt_fc_fuse_pass', 'test_trt_pad_op', 'trt_resnet50_test', - 'test_imperative_lod_tensor_to_selected_rows', 'test_gru_unit_op', + 'test_parallel_executor_seresnext_with_reduce_gpu', + 'test_data_norm_op', + 'test_install_check', + 'graph_node_test', + 'trt_mobilenet_test', + 'trt_cascade_rcnn_test', + 'trt_resnext_test', + 'test_activation_nn_grad', + 'test_trt_dynamic_shape_ernie_fp16_ser_deser', + 'test_cross_entropy2_op', + 'test_layer_norm_op', + 'test_pool3d_op', + 'test_static_save_load', + 'test_trt_flatten_op', + 'test_trt_yolo_box_op', + 'test_trt_reshape_op', + 'test_trt_elementwise_op', + 'test_trt_affine_channel_op', + 'test_trt_matmul', + 'test_trt_fc_fuse_pass', + 'test_trt_pad_op', + 'trt_resnet50_test', + 'test_imperative_lod_tensor_to_selected_rows', + 'test_gru_unit_op', 'test_amp_check_finite_and_scale_op', - 'test_imperative_selected_rows_to_lod_tensor', 'test_imperative_save_load', - 'test_add_reader_dependency', 'test_imperative_transformer_sorted_gradient', - 'test_bicubic_interp_v2_op', 'test_rank_attention_op', - 'test_space_to_depth_op', 'test_image_classification', - 'test_custom_relu_op_setup', 'test_sgd_op' + 'test_imperative_selected_rows_to_lod_tensor', + 'test_imperative_save_load', + 'test_add_reader_dependency', + 'test_imperative_transformer_sorted_gradient', + 'test_bicubic_interp_v2_op', + 'test_rank_attention_op', + 'test_space_to_depth_op', + 'test_image_classification', + 'test_custom_relu_op_setup', + 'test_sgd_op', ] # mem != 0 : It run 7 job each time in Single cases; 3 job each time in exclusive cases FIFTH_PARALLEL_JOB_NEW = [ 'test_buffer_shared_memory_reuse_pass', 'test_buffer_shared_memory_reuse_pass_and_fuse_optimization_op_pass', - 'test_parallel_executor_crf', 'test_multiprocess_reader_exception', - 'buddy_allocator_test', 'test_multiprocess_dataloader_dataset', + 'test_parallel_executor_crf', + 'test_multiprocess_reader_exception', + 'buddy_allocator_test', + 'test_multiprocess_dataloader_dataset', 'test_multiprocess_dataloader_dynamic', - 'test_multiprocess_dataloader_static', 'test_imperative_resnet', - 'test_nn_grad', 'test_conv2d_op_depthwise_conv', 'test_yolov3', - 'test_conv_nn_grad', 'test_imperative_data_loader_fds_clear', - 'test_conv2d_op', 'test_imperative_data_loader_base', + 'test_multiprocess_dataloader_static', + 'test_imperative_resnet', + 'test_nn_grad', + 'test_conv2d_op_depthwise_conv', + 'test_yolov3', + 'test_conv_nn_grad', + 'test_imperative_data_loader_fds_clear', + 'test_conv2d_op', + 'test_imperative_data_loader_base', 'test_imperative_resnet_sorted_gradient', 'test_multiprocess_dataloader_iterable_dataset_dynamic', - 'test_imperative_se_resnext', 'test_norm_nn_grad', 'test_conv2d_api' + 'test_imperative_se_resnext', + 'test_norm_nn_grad', + 'test_conv2d_api', ] SIXTH_PARALLEL_JOB_NEW = [ @@ -2824,46 +3343,71 @@ def main(): if platform.system() != 'Windows': for unittest in SECONDARY_HIGH_PARALLEL_JOB_NEW: if unittest in test_cases: - secondary_high_parallel_job = secondary_high_parallel_job + '|^' + unittest + '$' + secondary_high_parallel_job = ( + secondary_high_parallel_job + '|^' + unittest + '$' + ) test_cases.remove(unittest) for unittest in THIRD_HIGH_PARALLEL_JOB_NEW: if unittest in test_cases: - third_high_parallel_job = third_high_parallel_job + '|^' + unittest + '$' + third_high_parallel_job = ( + third_high_parallel_job + '|^' + unittest + '$' + ) test_cases.remove(unittest) for unittest in SIXTH_PARALLEL_JOB_NEW: if unittest in test_cases: - sixth_high_parallel_job = sixth_high_parallel_job + '|^' + unittest + '$' + sixth_high_parallel_job = ( + sixth_high_parallel_job + '|^' + unittest + '$' + ) test_cases.remove(unittest) for unittest in LOWEST_PARALLEL_JOB_NEW: if unittest in test_cases: - lowest_high_parallel_job = lowest_high_parallel_job + '|^' + unittest + '$' + lowest_high_parallel_job = ( + lowest_high_parallel_job + '|^' + unittest + '$' + ) test_cases.remove(unittest) for unittest in fourth_high_parallel_job_list: if unittest in test_cases: - fourth_high_parallel_job = fourth_high_parallel_job + '|^' + unittest + '$' + fourth_high_parallel_job = ( + fourth_high_parallel_job + '|^' + unittest + '$' + ) test_cases.remove(unittest) for unittest in fifth_high_parallel_job_list: if unittest in test_cases: - fifth_high_parallel_job = fifth_high_parallel_job + '|^' + unittest + '$' + fifth_high_parallel_job = ( + fifth_high_parallel_job + '|^' + unittest + '$' + ) test_cases.remove(unittest) for unittest in test_cases: non_parallel_job = non_parallel_job + '|^' + unittest + '$' if platform.system() == 'Windows': - print("{};{};{};{}".format(high_parallel_job, fourth_high_parallel_job, - fifth_high_parallel_job, non_parallel_job)) + print( + "{};{};{};{}".format( + high_parallel_job, + fourth_high_parallel_job, + fifth_high_parallel_job, + non_parallel_job, + ) + ) else: - print("{};{};{};{};{};{};{};{}".format( - high_parallel_job, secondary_high_parallel_job, - third_high_parallel_job, fourth_high_parallel_job, - fifth_high_parallel_job, sixth_high_parallel_job, - lowest_high_parallel_job, non_parallel_job)) + print( + "{};{};{};{};{};{};{};{}".format( + high_parallel_job, + secondary_high_parallel_job, + third_high_parallel_job, + fourth_high_parallel_job, + fifth_high_parallel_job, + sixth_high_parallel_job, + lowest_high_parallel_job, + non_parallel_job, + ) + ) if __name__ == '__main__': diff --git a/tools/print_op_desc.py b/tools/print_op_desc.py index cf46849add4b1751b579ffdb25945f4b2d44e32f..c0ce5789b7b717cd31e3882aa1d4bc231fcb66d4 100644 --- a/tools/print_op_desc.py +++ b/tools/print_op_desc.py @@ -87,8 +87,11 @@ def get_attrs_info(op_proto, op_attrs_proto): attrs_info[attr_name] = {} attrs_info[attr_name][TYPE] = attr_proto.type attrs_info[attr_name][GENERATED] = attr_proto.generated - attrs_info[attr_name][DEFAULT_VALUE] = attrs_default_values[ - attr_name] if attr_name in attrs_default_values else None + attrs_info[attr_name][DEFAULT_VALUE] = ( + attrs_default_values[attr_name] + if attr_name in attrs_default_values + else None + ) attrs_info[attr_name][EXTRA] = attr_proto.extra attrs_info[attr_name][QUANT] = attr_proto.quant return attrs_info diff --git a/tools/print_signatures.py b/tools/print_signatures.py index 4c35fdf1c4a1a9e60327bad45e9ac802c52554b5..4c85ba123beb7a306f7dc53d6bfc46caaee4b74c 100644 --- a/tools/print_signatures.py +++ b/tools/print_signatures.py @@ -40,7 +40,9 @@ else: logger.addHandler(console) console.setFormatter( logging.Formatter( - "%(asctime)s - %(funcName)s:%(lineno)d - %(levelname)s - %(message)s")) + "%(asctime)s - %(funcName)s:%(lineno)d - %(levelname)s - %(message)s" + ) +) def md5(doc): @@ -50,15 +52,18 @@ def md5(doc): md5sum = hashinst.hexdigest() except UnicodeDecodeError as e: md5sum = None - print("Error({}) occurred when `md5({})`, discard it.".format( - str(e), doc), - file=sys.stderr) + print( + "Error({}) occurred when `md5({})`, discard it.".format( + str(e), doc + ), + file=sys.stderr, + ) return md5sum def is_primitive(instance): - int_types = (int, ) + int_types = (int,) pritimitive_types = int_types + (float, str) if isinstance(instance, pritimitive_types): return True @@ -107,12 +112,16 @@ def visit_all_module(mod): if instance_id in IdSet: continue IdSet.add(instance_id) - if hasattr(instance, - '__name__') and member_name != instance.__name__: + if ( + hasattr(instance, '__name__') + and member_name != instance.__name__ + ): print( - "Found alias API, alias name is: {}, original name is: {}" - .format(member_name, instance.__name__), - file=sys.stderr) + "Found alias API, alias name is: {}, original name is: {}".format( + member_name, instance.__name__ + ), + file=sys.stderr, + ) except: if cur_name not in ErrorSet and cur_name not in skiplist: ErrorSet.add(cur_name) @@ -130,7 +139,8 @@ def get_all_api(root_path='paddle', attr="__all__"): global api_info_dict api_counter = 0 for filefinder, name, ispkg in pkgutil.walk_packages( - path=paddle.__path__, prefix=paddle.__name__ + '.'): + path=paddle.__path__, prefix=paddle.__name__ + '.' + ): try: if name in sys.modules: m = sys.modules[name] @@ -145,11 +155,17 @@ def get_all_api(root_path='paddle', attr="__all__"): api_counter += process_module(paddle, attr) - logger.info('%s: collected %d apis, %d distinct apis.', attr, api_counter, - len(api_info_dict)) + logger.info( + '%s: collected %d apis, %d distinct apis.', + attr, + api_counter, + len(api_info_dict), + ) - return [(sorted(list(api_info['all_names']))[0], md5(api_info['docstring'])) - for api_info in api_info_dict.values()] + return [ + (sorted(list(api_info['all_names']))[0], md5(api_info['docstring'])) + for api_info in api_info_dict.values() + ] def insert_api_into_dict(full_name, gen_doc_anno=None): @@ -165,8 +181,9 @@ def insert_api_into_dict(full_name, gen_doc_anno=None): logger.warning("AttributeError occurred when `id(eval(%s))`", full_name) return None except Exception as e: - logger.warning("Exception(%s) occurred when `id(eval(%s))`", str(e), - full_name) + logger.warning( + "Exception(%s) occurred when `id(eval(%s))`", str(e), full_name + ) return None else: logger.debug("adding %s to api_info_dict.", full_name) @@ -187,8 +204,8 @@ def insert_api_into_dict(full_name, gen_doc_anno=None): api_info_dict[fc_id]["gen_doc_anno"] = gen_doc_anno if inspect.isfunction(obj): api_info_dict[fc_id]["signature"] = repr( - inspect.getfullargspec(obj)).replace( - 'FullArgSpec', 'ArgSpec', 1) + inspect.getfullargspec(obj) + ).replace('FullArgSpec', 'ArgSpec', 1) return api_info_dict[fc_id] @@ -198,9 +215,11 @@ def process_module(m, attr="__all__"): if hasattr(m, attr): # may have duplication of api for api in set(getattr(m, attr)): - if api[0] == '_': continue + if api[0] == '_': + continue # Exception occurred when `id(eval(paddle.dataset.conll05.test, get_dict))` - if ',' in api: continue + if ',' in api: + continue # api's fullname full_name = m.__name__ + "." + api @@ -210,17 +229,21 @@ def process_module(m, attr="__all__"): if inspect.isclass(api_info['object']): for name, value in inspect.getmembers(api_info['object']): if (not name.startswith("_")) and hasattr( - value, '__name__'): - method_full_name = full_name + '.' + name # value.__name__ + value, '__name__' + ): + method_full_name = ( + full_name + '.' + name + ) # value.__name__ method_api_info = insert_api_into_dict( - method_full_name, 'class_method') + method_full_name, 'class_method' + ) if method_api_info is not None: api_counter += 1 return api_counter def check_public_api(): - modulelist = [ #npqa + modulelist = [ # npqa paddle, paddle.amp, paddle.nn, @@ -297,7 +320,8 @@ def check_public_api(): instance = eval(cur_name) doc_md5 = md5(instance.__doc__) member_dict[cur_name] = "({}, ('document', '{}'))".format( - cur_name, doc_md5) + cur_name, doc_md5 + ) def check_allmodule_callable(): @@ -314,18 +338,23 @@ def parse_args(): """ parser = argparse.ArgumentParser(description='Print Apis Signatures') parser.add_argument('--debug', dest='debug', action="store_true") - parser.add_argument('--method', - dest='method', - type=str, - default='get_all_api', - help="using get_all_api or from_modulelist") - parser.add_argument('module', type=str, help='module', - default='paddle') # not used - parser.add_argument('--skipped', - dest='skipped', - type=str, - help='Skip Checking submodules', - default='paddle.fluid.libpaddle.eager.ops') + parser.add_argument( + '--method', + dest='method', + type=str, + default='get_all_api', + help="using get_all_api or from_modulelist", + ) + parser.add_argument( + 'module', type=str, help='module', default='paddle' + ) # not used + parser.add_argument( + '--skipped', + dest='skipped', + type=str, + help='Skip Checking submodules', + default='paddle.fluid.libpaddle.eager.ops', + ) if len(sys.argv) == 1: args = parser.parse_args(['paddle']) @@ -357,14 +386,22 @@ if __name__ == '__main__': if args.skipped != '' and api_name.find(args.skipped) >= 0: continue api_info = api_info_dict[all_api_names_to_k[api_name]] - print("{0} ({2}, ('document', '{1}'))".format( - api_name, md5(api_info['docstring']), api_info['signature'] - if 'signature' in api_info else 'ArgSpec()')) + print( + "{0} ({2}, ('document', '{1}'))".format( + api_name, + md5(api_info['docstring']), + api_info['signature'] + if 'signature' in api_info + else 'ArgSpec()', + ) + ) if len(ErrorSet) == 0: sys.exit(0) else: for erroritem in ErrorSet: - print("Error, new function {} is unreachable".format(erroritem), - file=sys.stderr) + print( + "Error, new function {} is unreachable".format(erroritem), + file=sys.stderr, + ) sys.exit(1) diff --git a/tools/prune_for_jetson.py b/tools/prune_for_jetson.py index e87268a9fe2ee0d195c9e2aea7cc5a612d2772c5..8bfe9ebf9f8dd7b7a4c24036ea4a9b8090d37504 100644 --- a/tools/prune_for_jetson.py +++ b/tools/prune_for_jetson.py @@ -53,16 +53,20 @@ def find_kernel(content, pattern): def prune_phi_kernels(): tool_dir = os.path.dirname(os.path.abspath(__file__)) - all_op = glob.glob(os.path.join(tool_dir, '../paddle/phi/kernels/**/*.cc'), - recursive=True) - all_op += glob.glob(os.path.join(tool_dir, '../paddle/phi/kernels/**/*.cu'), - recursive=True) + all_op = glob.glob( + os.path.join(tool_dir, '../paddle/phi/kernels/**/*.cc'), recursive=True + ) + all_op += glob.glob( + os.path.join(tool_dir, '../paddle/phi/kernels/**/*.cu'), recursive=True + ) register_op_count = 0 for op_file in all_op: need_continue = False file_blacklist = [ - "kernels/empty_kernel.cc", "/cast_kernel.c", "/batch_norm_kernel.c" + "kernels/empty_kernel.cc", + "/cast_kernel.c", + "/batch_norm_kernel.c", ] for bname in file_blacklist: if op_file.find(bname) >= 0: @@ -99,19 +103,25 @@ def apply_patches(): ret = os.system( "cd %s && rm -f paddle/fluid/inference/api/tensorrt_predictor.* " " && rm -f paddle/fluid/inference/api/paddle_tensorrt_predictor.h " - " && git apply tools/infer_prune_patches/*.patch && cd -" % work_path) + " && git apply tools/infer_prune_patches/*.patch && cd -" % work_path + ) return ret == 0 def append_fluid_kernels(): op_white_list = ["load", "load_combine"] - #1. add to makefile - file_name = os.path.dirname(os.path.abspath(__file__)) \ - + "/../paddle/fluid/inference/tensorrt/CMakeLists.txt" + # 1. add to makefile + file_name = ( + os.path.dirname(os.path.abspath(__file__)) + + "/../paddle/fluid/inference/tensorrt/CMakeLists.txt" + ) append_str = "\nfile(APPEND ${pybind_file} \"USE_NO_KERNEL_OP__(tensorrt_engine);\\n\")\n" for op in op_white_list: - append_str = append_str + "file(APPEND ${pybind_file} \"USE_OP__(%s);\\n\")\n" % op + append_str = ( + append_str + + "file(APPEND ${pybind_file} \"USE_OP__(%s);\\n\")\n" % op + ) with open(file_name, 'r', encoding='utf-8') as f: content = ''.join(f.readlines()) @@ -120,22 +130,26 @@ def append_fluid_kernels(): new_content = content.replace(location_str, location_str + append_str) if new_content == content: - print("ERROR: can not find \"%s\" in file \"%s\"" % - (location_str, file_name)) + print( + "ERROR: can not find \"%s\" in file \"%s\"" + % (location_str, file_name) + ) return False with open(file_name, 'w', encoding='utf-8') as f: f.write(u'{}'.format(new_content)) - #2. add op and kernel register + # 2. add op and kernel register op_white_list.append("tensorrt_engine") tool_dir = os.path.dirname(os.path.abspath(__file__)) - all_op = glob.glob(os.path.join(tool_dir, - '../paddle/fluid/operators/**/*.cc'), - recursive=True) - all_op += glob.glob(os.path.join(tool_dir, - '../paddle/fluid/operators/**/*.cu'), - recursive=True) + all_op = glob.glob( + os.path.join(tool_dir, '../paddle/fluid/operators/**/*.cc'), + recursive=True, + ) + all_op += glob.glob( + os.path.join(tool_dir, '../paddle/fluid/operators/**/*.cu'), + recursive=True, + ) for op_file in all_op: with open(op_file, 'r', encoding='utf-8') as f: @@ -143,18 +157,18 @@ def append_fluid_kernels(): for op in op_white_list: patterns = { - "REGISTER_OPERATOR": - r"REGISTER_OPERATOR\(\s*%s\s*," % op, - "REGISTER_OP_CPU_KERNEL": - r"REGISTER_OP_CPU_KERNEL\(\s*%s\s*," % op, - "REGISTER_OP_CUDA_KERNEL": - r"REGISTER_OP_CUDA_KERNEL\(\s*%s\s*," % op + "REGISTER_OPERATOR": r"REGISTER_OPERATOR\(\s*%s\s*," % op, + "REGISTER_OP_CPU_KERNEL": r"REGISTER_OP_CPU_KERNEL\(\s*%s\s*," + % op, + "REGISTER_OP_CUDA_KERNEL": r"REGISTER_OP_CUDA_KERNEL\(\s*%s\s*," + % op, } for k, p in patterns.items(): matches = re.findall(p, content, flags=re.DOTALL) if len(matches) > 0: - content = content.replace(matches[0], - matches[0].replace(k, k + "__")) + content = content.replace( + matches[0], matches[0].replace(k, k + "__") + ) with open(op_file, 'w', encoding='utf-8') as f: f.write(u'{}'.format(content)) @@ -164,13 +178,13 @@ def append_fluid_kernels(): if __name__ == '__main__': print("================ step 1: apply patches =======================") - assert (apply_patches()) + assert apply_patches() print("==============================================================\n") print("================ step 2: append fluid op/kernels==============") - assert (append_fluid_kernels()) + assert append_fluid_kernels() print("==============================================================\n") print("================ step 3:prune phi kernels ====================") - assert (prune_phi_kernels()) + assert prune_phi_kernels() print("==============================================================\n") diff --git a/tools/pyCov_multithreading.py b/tools/pyCov_multithreading.py index fb4cef6805ff3b7a424f1c6c34f38cb43df7cf60..e808dcb55e0ac2c3636a012543157b379f5d76d3 100644 --- a/tools/pyCov_multithreading.py +++ b/tools/pyCov_multithreading.py @@ -32,9 +32,12 @@ def worker(fun): def threadPool(threadPoolNum): threadPool = [] for i in range(threadPoolNum): - thread = threading.Thread(target=worker, args={ - doFun, - }) + thread = threading.Thread( + target=worker, + args={ + doFun, + }, + ) thread.daemon = True threadPool.append(thread) return threadPool @@ -49,8 +52,9 @@ def getPyCovResult(params): os.system('cd %s && coverage combine `ls python-coverage.data.*`' % path) os.system('cd %s && pwd && coverage xml -i -o python-coverage.xml' % path) xml_path = '%s/python-coverage.xml' % path - os.system("python2.7 %s/tools/analysisPyXml.py %s %s" % - (rootPath, rootPath, ut)) + os.system( + "python2.7 %s/tools/analysisPyXml.py %s %s" % (rootPath, rootPath, ut) + ) endTime = int(time.time()) print('pyCov Time: %s' % (endTime - startTime)) diff --git a/tools/remove_grad_op_and_kernel.py b/tools/remove_grad_op_and_kernel.py index 19778c27dbe12bf510f75a0edaa45ea8ec46f1bb..8aa456bdb671f6e75d34b5cf58779055a08ad21a 100644 --- a/tools/remove_grad_op_and_kernel.py +++ b/tools/remove_grad_op_and_kernel.py @@ -54,8 +54,11 @@ def update_operator_cmake(cmake_file): match = re.findall(pat2, content, flags=re.DOTALL) content = content.replace( - match[0], code2 + '\n' + - match[0].replace('py_func_op', 'py_func_op ${LOSS_OPS}')) + match[0], + code2 + + '\n' + + match[0].replace('py_func_op', 'py_func_op ${LOSS_OPS}'), + ) with open(cmake_file, 'w') as f: f.write(content) @@ -65,16 +68,23 @@ if __name__ == '__main__': tool_dir = os.path.dirname(os.path.abspath(__file__)) - all_op = glob.glob(os.path.join(tool_dir, - '../paddle/fluid/operators/**/*.cc'), - recursive=True) - all_op += glob.glob(os.path.join(tool_dir, - '../paddle/fluid/operators/**/*.cu'), - recursive=True) + all_op = glob.glob( + os.path.join(tool_dir, '../paddle/fluid/operators/**/*.cc'), + recursive=True, + ) + all_op += glob.glob( + os.path.join(tool_dir, '../paddle/fluid/operators/**/*.cu'), + recursive=True, + ) spec_ops = ['activation_op.cc'] - register_op_count, register_op_cpu_kernel_count, register_op_cuda_kernel_count, register_op_xpu_kernel_count = 0, 0, 0, 0 + ( + register_op_count, + register_op_cpu_kernel_count, + register_op_cuda_kernel_count, + register_op_xpu_kernel_count, + ) = (0, 0, 0, 0) register_op_kernel_count, register_op_kernel_with_custom_type_count = 0, 0 # 1. remove all grad op and kernel @@ -100,7 +110,9 @@ if __name__ == '__main__': op_kernel_pattern2 = r'REGISTER_OP_KERNEL\(.*?_grad,.*?\);?' custom_pattern1 = r'REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE\(.*?\);?' - custom_pattern2 = r'REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE\(.*?_grad,.*?\);?' + custom_pattern2 = ( + r'REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE\(.*?_grad,.*?\);?' + ) op_name = os.path.split(op_file)[1] if op_name in spec_ops: @@ -121,18 +133,24 @@ if __name__ == '__main__': with open(op_file, 'r', encoding='utf-8') as f: content = ''.join(f.readlines()) - op, op_count = remove_grad_op_and_kernel(content, op_pattern1, - op_pattern2) + op, op_count = remove_grad_op_and_kernel( + content, op_pattern1, op_pattern2 + ) cpu_kernel, cpu_kernel_count = remove_grad_op_and_kernel( - content, cpu_kernel_pattern1, cpu_kernel_pattern2) + content, cpu_kernel_pattern1, cpu_kernel_pattern2 + ) gpu_kernel, gpu_kernel_count = remove_grad_op_and_kernel( - content, gpu_kernel_pattern1, gpu_kernel_pattern2) + content, gpu_kernel_pattern1, gpu_kernel_pattern2 + ) xpu_kernel, xpu_kernel_count = remove_grad_op_and_kernel( - content, xpu_kernel_pattern1, xpu_kernel_pattern2) + content, xpu_kernel_pattern1, xpu_kernel_pattern2 + ) op_kernel, op_kernel_count = remove_grad_op_and_kernel( - content, op_kernel_pattern1, op_kernel_pattern2) + content, op_kernel_pattern1, op_kernel_pattern2 + ) custom_kernel, custom_kernel_count = remove_grad_op_and_kernel( - content, custom_pattern1, custom_pattern2) + content, custom_pattern1, custom_pattern2 + ) register_op_count += op_count register_op_cpu_kernel_count += cpu_kernel_count @@ -155,17 +173,24 @@ if __name__ == '__main__': f.write(u'{}'.format(content)) # 2. update operators/CMakeLists.txt - cmake_file = os.path.join(tool_dir, - '../paddle/fluid/operators/CMakeLists.txt') + cmake_file = os.path.join( + tool_dir, '../paddle/fluid/operators/CMakeLists.txt' + ) update_operator_cmake(cmake_file) print('We erase all grad op and kernel for Paddle-Inference lib.') print('%50s%10s' % ('type', 'count')) print('%50s%10s' % ('REGISTER_OPERATOR', register_op_count)) print('%50s%10s' % ('REGISTER_OP_CPU_KERNEL', register_op_cpu_kernel_count)) - print('%50s%10s' % - ('REGISTER_OP_CUDA_KERNEL', register_op_cuda_kernel_count)) + print( + '%50s%10s' % ('REGISTER_OP_CUDA_KERNEL', register_op_cuda_kernel_count) + ) print('%50s%10s' % ('REGISTER_OP_XPU_KERNEL', register_op_xpu_kernel_count)) print('%50s%10s' % ('REGISTER_OP_KERNEL', register_op_kernel_count)) - print('%50s%10s' % ('REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE', - register_op_kernel_with_custom_type_count)) + print( + '%50s%10s' + % ( + 'REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE', + register_op_kernel_with_custom_type_count, + ) + ) diff --git a/tools/sampcd_processor.py b/tools/sampcd_processor.py index 7e624652ad508a6463a22b8db0b67636f9066e3d..24af09893e355e0ec27168c3390803dc20dd461e 100644 --- a/tools/sampcd_processor.py +++ b/tools/sampcd_processor.py @@ -36,7 +36,8 @@ import time logger = logging.getLogger() if logger.handlers: console = logger.handlers[ - 0] # we assume the first handler is the one we want to configure + 0 + ] # we assume the first handler is the one we want to configure else: console = logging.StreamHandler(stream=sys.stderr) logger.addHandler(console) @@ -76,7 +77,7 @@ def find_all(srcstr, substr): """ indices = [] gotone = srcstr.find(substr) - while (gotone != -1): + while gotone != -1: indices.append(gotone) gotone = srcstr.find(substr, gotone + 1) return indices @@ -124,7 +125,7 @@ def extract_code_blocks_from_docstr(docstr): mo = re.search(r"Examples:", docstr) if mo is None: return code_blocks - ds_list = docstr[mo.start():].replace("\t", ' ').split("\n") + ds_list = docstr[mo.start() :].replace("\t", ' ').split("\n") lastlineindex = len(ds_list) - 1 cb_start_pat = re.compile(r"code-block::\s*python") @@ -148,16 +149,14 @@ def extract_code_blocks_from_docstr(docstr): def _append_code_block(): # nonlocal code_blocks, cb_cur, cb_cur_name, cb_cur_seq_id, cb_required - code_blocks.append({ - 'codes': - inspect.cleandoc("\n".join(cb_info['cb_cur'])), - 'name': - cb_info['cb_cur_name'], - 'id': - cb_info['cb_cur_seq_id'], - 'required': - cb_info['cb_required'], - }) + code_blocks.append( + { + 'codes': inspect.cleandoc("\n".join(cb_info['cb_cur'])), + 'name': cb_info['cb_cur_name'], + 'id': cb_info['cb_cur_seq_id'], + 'required': cb_info['cb_required'], + } + ) for lineno, linecont in enumerate(ds_list): if re.search(cb_start_pat, linecont): @@ -186,7 +185,9 @@ def extract_code_blocks_from_docstr(docstr): # docstring end if lineno == lastlineindex: mo = re.search(r"\S", linecont) - if mo is not None and cb_info['cb_cur_indent'] <= mo.start( + if ( + mo is not None + and cb_info['cb_cur_indent'] <= mo.start() ): cb_info['cb_cur'].append(linecont) if len(cb_info['cb_cur']): @@ -262,14 +263,21 @@ def is_required_match(requirestr, cbtitle='not-specified'): logger.info('%s: skipped', cbtitle) return None - if all([ - k in SAMPLE_CODE_TEST_CAPACITY for k in requires + if all( + [ + k in SAMPLE_CODE_TEST_CAPACITY + for k in requires if k not in ['skip', 'skiptest'] - ]): + ] + ): return True - logger.info('%s: the equipments [%s] not match the required [%s].', cbtitle, - ','.join(SAMPLE_CODE_TEST_CAPACITY), ','.join(requires)) + logger.info( + '%s: the equipments [%s] not match the required [%s].', + cbtitle, + ','.join(SAMPLE_CODE_TEST_CAPACITY), + ','.join(requires), + ) return False @@ -280,13 +288,18 @@ def insert_codes_into_codeblock(codeblock, apiname='not-specified'): global ENV_KEY_CODES_FRONTEND, GPU_ID, RUN_ON_DEVICE # readonly inserted_codes_f = '' inserted_codes_b = '' - if ENV_KEY_CODES_FRONTEND in os.environ and os.environ[ - ENV_KEY_CODES_FRONTEND]: + if ( + ENV_KEY_CODES_FRONTEND in os.environ + and os.environ[ENV_KEY_CODES_FRONTEND] + ): inserted_codes_f = os.environ[ENV_KEY_CODES_FRONTEND] else: cpu_str = '\nimport os\nos.environ["CUDA_VISIBLE_DEVICES"] = ""\n' - gpu_str = '\nimport os\nos.environ["CUDA_VISIBLE_DEVICES"] = "{}"\n'.format( - GPU_ID) + gpu_str = ( + '\nimport os\nos.environ["CUDA_VISIBLE_DEVICES"] = "{}"\n'.format( + GPU_ID + ) + ) if 'required' in codeblock and codeblock['required']: if codeblock['required'] == 'cpu': inserted_codes_f = cpu_str @@ -298,13 +311,18 @@ def insert_codes_into_codeblock(codeblock, apiname='not-specified'): elif RUN_ON_DEVICE == "gpu": inserted_codes_f = gpu_str inserted_codes_b = '\nprint("{}\'s sample code (name:{}, id:{}) is executed successfully!")'.format( - apiname, codeblock['name'], codeblock['id']) + apiname, codeblock['name'], codeblock['id'] + ) cb = codeblock['codes'] last_future_line_end = find_last_future_line_end(cb) if last_future_line_end: - return cb[:last_future_line_end] + inserted_codes_f + cb[ - last_future_line_end:] + inserted_codes_b + return ( + cb[:last_future_line_end] + + inserted_codes_f + + cb[last_future_line_end:] + + inserted_codes_b + ) else: return inserted_codes_f + cb + inserted_codes_b @@ -335,12 +353,14 @@ def sampcd_extract_to_file(srccom, name, htype="def", hname=""): if srccom.find("Examples:") != -1: logger.info("----example code check----") if srccom.find(">>>") != -1: - logger.warning(r"""Deprecated sample code style: + logger.warning( + r"""Deprecated sample code style: Examples: >>>codeline >>>codeline -Please use '.. code-block:: python' to format the sample code.""") +Please use '.. code-block:: python' to format the sample code.""" + ) return [] else: logger.error( @@ -358,22 +378,33 @@ Please use '.. code-block:: python' to format the sample code.""") # so, the following conditional statements are intentionally arranged. if matched == True: tfname = os.path.join( - SAMPLECODE_TEMPDIR, '{}_example{}'.format( + SAMPLECODE_TEMPDIR, + '{}_example{}'.format( name, - '.py' if len(codeblocks) == 1 else '_{}.py'.format(y + 1))) + '.py' if len(codeblocks) == 1 else '_{}.py'.format(y + 1), + ), + ) with open(tfname, 'w') as tempf: sampcd = insert_codes_into_codeblock(cb, name) tempf.write(sampcd) sample_code_filenames.append(tfname) elif matched is None: - logger.info('{}\' code block (name:{}, id:{}) is skipped.'.format( - name, cb['name'], cb['id'])) + logger.info( + '{}\' code block (name:{}, id:{}) is skipped.'.format( + name, cb['name'], cb['id'] + ) + ) SUMMARY_INFO['skiptest'].append("{}-{}".format(name, cb['id'])) elif matched == False: logger.info( - '{}\' code block (name:{}, id:{}) required({}) not match capacity({}).' - .format(name, cb['name'], cb['id'], cb['required'], - SAMPLE_CODE_TEST_CAPACITY)) + '{}\' code block (name:{}, id:{}) required({}) not match capacity({}).'.format( + name, + cb['name'], + cb['id'], + cb['required'], + SAMPLE_CODE_TEST_CAPACITY, + ) + ) if cb['required'] not in SUMMARY_INFO: SUMMARY_INFO[cb['required']] = [] SUMMARY_INFO[cb['required']].append("{}-{}".format(name, cb['id'])) @@ -406,9 +437,9 @@ def execute_samplecode(tfname): logger.info("----example code check----") logger.info("executing sample code: %s", tfname) start_time = time.time() - subprc = subprocess.Popen(cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + subprc = subprocess.Popen( + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) output, error = subprc.communicate() msg = "".join(output.decode(encoding='utf-8')) err = "".join(error.decode(encoding='utf-8')) @@ -425,7 +456,13 @@ subprocess return code: %d Error Raised from Sample Code: stderr: %s stdout: %s -""", tfname, f.read(), subprc.returncode, err, msg) +""", + tfname, + f.read(), + subprc.returncode, + err, + msg, + ) logger.info("----example code check failed----") result = False else: @@ -450,6 +487,7 @@ def get_filenames(full_test=False): global whl_error import paddle # noqa: F401 import paddle.fluid.contrib.slim.quantization # noqa: F401 + whl_error = [] if full_test: get_full_api_from_pr_spec() @@ -470,7 +508,8 @@ def get_filenames(full_test=False): continue if hasattr(api_obj, '__doc__') and api_obj.__doc__: sample_code_filenames = sampcd_extract_to_file( - api_obj.__doc__, api) + api_obj.__doc__, api + ) for tfname in sample_code_filenames: all_sample_code_filenames[tfname] = api return all_sample_code_filenames @@ -492,7 +531,8 @@ def get_api_md5(path): return api_md5 pat = re.compile(r'\((paddle[^,]+)\W*document\W*([0-9a-z]{32})') patArgSpec = re.compile( - r'^(paddle[^,]+)\s+\(ArgSpec.*document\W*([0-9a-z]{32})') + r'^(paddle[^,]+)\s+\(ArgSpec.*document\W*([0-9a-z]{32})' + ) with open(API_spec) as f: for line in f.readlines(): mo = pat.search(line) @@ -509,6 +549,7 @@ def get_full_api(): """ global API_DIFF_SPEC_FN ## readonly from print_signatures import get_all_api_from_modulelist + member_dict = get_all_api_from_modulelist() with open(API_DIFF_SPEC_FN, 'w') as f: f.write("\n".join(member_dict.keys())) @@ -520,6 +561,7 @@ def get_full_api_by_walk(): """ global API_DIFF_SPEC_FN ## readonly from print_signatures import get_all_api + apilist = get_all_api() with open(API_DIFF_SPEC_FN, 'w') as f: f.write("\n".join([ai[0] for ai in apilist])) @@ -549,8 +591,12 @@ def get_incrementapi(): for key in pr_api: if key in dev_api: if dev_api[key] != pr_api[key]: - logger.debug("%s in dev is %s, different from pr's %s", key, - dev_api[key], pr_api[key]) + logger.debug( + "%s in dev is %s, different from pr's %s", + key, + dev_api[key], + pr_api[key], + ) f.write(key) f.write('\n') else: @@ -564,9 +610,9 @@ def exec_gen_doc(): cmd = ["bash", "document_preview.sh"] logger.info("----exec gen_doc----") start_time = time.time() - subprc = subprocess.Popen(cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + subprc = subprocess.Popen( + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) output, error = subprc.communicate() msg = "".join(output.decode(encoding='utf-8')) err = "".join(error.decode(encoding='utf-8')) @@ -585,7 +631,8 @@ def exec_gen_doc(): logger.info("----exec gen_doc success----") for fn in [ - '/docs/en/develop/index_en.html', '/docs/zh/develop/index_cn.html' + '/docs/en/develop/index_en.html', + '/docs/zh/develop/index_cn.html', ]: if os.path.exists(fn): logger.info('%s exists.', fn) @@ -616,16 +663,16 @@ def parse_args(): parser.add_argument('--debug', dest='debug', action="store_true") parser.add_argument('--full-test', dest='full_test', action="store_true") parser.add_argument('mode', type=str, help='run on device', default='cpu') - parser.add_argument('--build-doc', - dest='build_doc', - action='store_true', - help='build doc if need.') + parser.add_argument( + '--build-doc', + dest='build_doc', + action='store_true', + help='build doc if need.', + ) for item in arguments: - parser.add_argument(item[0], - dest=item[1], - help=item[4], - type=item[2], - default=item[3]) + parser.add_argument( + item[0], dest=item[1], help=item[4], type=item[2], default=item[3] + ) if len(sys.argv) == 1: args = parser.parse_args(['cpu']) @@ -648,21 +695,24 @@ if __name__ == '__main__': logfHandler.setFormatter( logging.Formatter( "%(asctime)s - %(funcName)s:%(lineno)d - %(levelname)s - %(message)s" - )) + ) + ) logger.addHandler(logfHandler) if args.mode == "gpu": GPU_ID = args.gpu_id logger.info("using GPU_ID %d", GPU_ID) elif args.mode != "cpu": - logger.error("Unrecognized argument:%s, 'cpu' or 'gpu' is desired.", - args.mode) + logger.error( + "Unrecognized argument:%s, 'cpu' or 'gpu' is desired.", args.mode + ) sys.exit("Invalid arguments") RUN_ON_DEVICE = args.mode get_test_capacity() logger.info("API check -- Example Code") - logger.info("sample_test running under python %s", - platform.python_version()) + logger.info( + "sample_test running under python %s", platform.python_version() + ) if os.path.exists(SAMPLECODE_TEMPDIR): if not os.path.isdir(SAMPLECODE_TEMPDIR): @@ -706,16 +756,18 @@ if __name__ == '__main__': ) for temp in result: if not temp[0]: - logger.info("In addition, mistakes found in sample codes: %s", - temp[1]) + logger.info( + "In addition, mistakes found in sample codes: %s", temp[1] + ) logger.info("----------------------------------------------------") exit(1) else: timeovered_test = {} for temp in result: if not temp[0]: - logger.info("In addition, mistakes found in sample codes: %s", - temp[1]) + logger.info( + "In addition, mistakes found in sample codes: %s", temp[1] + ) SUMMARY_INFO['failed'].append(temp[1]) else: SUMMARY_INFO['success'].append(temp[1]) @@ -723,31 +775,37 @@ if __name__ == '__main__': timeovered_test[temp[1]] = temp[3] if len(timeovered_test): - logger.info("%d sample codes ran time over 10s", - len(timeovered_test)) + logger.info( + "%d sample codes ran time over 10s", len(timeovered_test) + ) if args.debug: for k, v in timeovered_test.items(): logger.info('{} - {}s'.format(k, v)) if len(SUMMARY_INFO['success']): - logger.info("%d sample codes ran success", - len(SUMMARY_INFO['success'])) + logger.info( + "%d sample codes ran success", len(SUMMARY_INFO['success']) + ) for k, v in SUMMARY_INFO.items(): if k not in ['success', 'failed', 'skiptest', 'nocodes']: - logger.info("%d sample codes required not match for %s", len(v), - k) + logger.info( + "%d sample codes required not match for %s", len(v), k + ) if len(SUMMARY_INFO['skiptest']): - logger.info("%d sample codes skipped", - len(SUMMARY_INFO['skiptest'])) + logger.info( + "%d sample codes skipped", len(SUMMARY_INFO['skiptest']) + ) if args.debug: logger.info('\n'.join(SUMMARY_INFO['skiptest'])) if len(SUMMARY_INFO['nocodes']): - logger.info("%d apis don't have sample codes", - len(SUMMARY_INFO['nocodes'])) + logger.info( + "%d apis don't have sample codes", len(SUMMARY_INFO['nocodes']) + ) if args.debug: logger.info('\n'.join(SUMMARY_INFO['nocodes'])) if len(SUMMARY_INFO['failed']): - logger.info("%d sample codes ran failed", - len(SUMMARY_INFO['failed'])) + logger.info( + "%d sample codes ran failed", len(SUMMARY_INFO['failed']) + ) logger.info('\n'.join(SUMMARY_INFO['failed'])) logger.info( "Mistakes found in sample codes. Please recheck the sample codes." diff --git a/tools/summary_env.py b/tools/summary_env.py index 2abee2821c0739ad840d7060dafdd478c81cefe5..14fb512ca22b002df4bb03d2268bf4722726abd3 100644 --- a/tools/summary_env.py +++ b/tools/summary_env.py @@ -34,6 +34,7 @@ envs = {} def get_paddle_info(): try: import paddle + envs['paddle_version'] = paddle.__version__ envs['paddle_with_cuda'] = paddle.fluid.core.is_compiled_with_cuda() except: @@ -63,10 +64,9 @@ def get_python_info(): def run_shell_command(cmd): - out, err = subprocess.Popen(cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - shell=True).communicate() + out, err = subprocess.Popen( + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True + ).communicate() if err: return None else: @@ -82,7 +82,6 @@ def get_cuda_info(): def get_cudnn_info(): - def _get_cudnn_ver(cmd): out = run_shell_command(cmd) if out: @@ -93,15 +92,17 @@ def get_cudnn_info(): if platform.system() == "Windows": cudnn_dll_path = run_shell_command('where cudnn*') if cudnn_dll_path: - cudnn_header_path = cudnn_dll_path.split( - 'bin')[0] + r'include\cudnn.h' + cudnn_header_path = ( + cudnn_dll_path.split('bin')[0] + r'include\cudnn.h' + ) cmd = 'type "{0}" | findstr "{1}" | findstr /v "CUDNN_VERSION"' else: envs['cudnn_version'] = None return else: cudnn_header_path = run_shell_command( - 'whereis "cudnn.h" | awk \'{print $2}\'') + 'whereis "cudnn.h" | awk \'{print $2}\'' + ) if cudnn_header_path: cudnn_header_path = cudnn_header_path.strip() cmd = 'cat "{0}" | grep "{1}" | grep -v "CUDNN_VERSION"' @@ -112,7 +113,8 @@ def get_cudnn_info(): major = _get_cudnn_ver(cmd.format(cudnn_header_path, 'CUDNN_MAJOR')) minor = _get_cudnn_ver(cmd.format(cudnn_header_path, 'CUDNN_MINOR')) patch_level = _get_cudnn_ver( - cmd.format(cudnn_header_path, 'CUDNN_PATCHLEVEL')) + cmd.format(cudnn_header_path, 'CUDNN_PATCHLEVEL') + ) envs['cudnn_version'] = "{0}.{1}.{2}".format(major, minor, patch_level) @@ -120,8 +122,9 @@ def get_cudnn_info(): def get_driver_info(): driver_ver = run_shell_command('nvidia-smi') if driver_ver: - driver_ver = driver_ver.split('Driver Version:')[1].strip().split( - ' ')[0] + driver_ver = ( + driver_ver.split('Driver Version:')[1].strip().split(' ')[0] + ) else: driver_ver = None envs['nvidia_driver_version'] = driver_ver diff --git a/tools/test_check_api_compatible.py b/tools/test_check_api_compatible.py index 1bed3951740b29ae66a468fbd8010a8c9f99575d..d7c9a0b195d3635d741121d5b300e04792181859 100644 --- a/tools/test_check_api_compatible.py +++ b/tools/test_check_api_compatible.py @@ -25,10 +25,12 @@ from check_api_compatible import check_compatible_str class Test_check_compatible(unittest.TestCase): - def setUp(self) -> None: self.fullargspec_prefix = 'inspect.Full' - self.argspec_str_o = self.fullargspec_prefix + '''ArgSpec(args=['shape', 'dtype', 'name'], varargs=None, varkw=None, defaults=(None, None), kwonlyargs=[], kwonlydefaults=None, annotations={})''' + self.argspec_str_o = ( + self.fullargspec_prefix + + '''ArgSpec(args=['shape', 'dtype', 'name'], varargs=None, varkw=None, defaults=(None, None), kwonlyargs=[], kwonlydefaults=None, annotations={})''' + ) return super().setUp() def test_normal_not_changed(self): @@ -68,11 +70,13 @@ class Test_check_compatible(unittest.TestCase): class Test_check_compatible_str(unittest.TestCase): - def setUp(self) -> None: self.fullargspec_prefix = 'inspect.Full' # paddle.fluid.layer_helper_base.LayerHelperBase.create_parameter - self.argspec_str_o = self.fullargspec_prefix + """ArgSpec(args=['self', 'attr', 'shape', 'dtype', 'is_bias', 'default_initializer', 'stop_gradient', 'type'], varargs=None, varkw=None, defaults=(None, False, None, False, VarType.LOD_TENSOR), kwonlyargs=[], kwonlydefaults=None, annotations={})""" + self.argspec_str_o = ( + self.fullargspec_prefix + + """ArgSpec(args=['self', 'attr', 'shape', 'dtype', 'is_bias', 'default_initializer', 'stop_gradient', 'type'], varargs=None, varkw=None, defaults=(None, False, None, False, VarType.LOD_TENSOR), kwonlyargs=[], kwonlydefaults=None, annotations={})""" + ) return super().setUp() def test_normal_not_changed(self): @@ -81,26 +85,44 @@ class Test_check_compatible_str(unittest.TestCase): self.assertTrue(check_compatible_str(argspec_o, argspec_n)) def test_args_added(self): - argspec_n = self.fullargspec_prefix + """ArgSpec(args=['self', 'attr', 'shape', 'dtype', 'is_bias', 'default_initializer', 'stop_gradient', 'type', 'argadded'], varargs=None, varkw=None, defaults=(None, False, None, False, VarType.LOD_TENSOR), kwonlyargs=[], kwonlydefaults=None, annotations={})""" + argspec_n = ( + self.fullargspec_prefix + + """ArgSpec(args=['self', 'attr', 'shape', 'dtype', 'is_bias', 'default_initializer', 'stop_gradient', 'type', 'argadded'], varargs=None, varkw=None, defaults=(None, False, None, False, VarType.LOD_TENSOR), kwonlyargs=[], kwonlydefaults=None, annotations={})""" + ) argspec_o = self.argspec_str_o self.assertFalse(check_compatible_str(argspec_o, argspec_n)) - argspec_n = self.fullargspec_prefix + """ArgSpec(args=['self', 'attr', 'shape', 'dtype', 'is_bias', 'default_initializer', 'stop_gradient', 'type', 'argadded'], varargs=None, varkw=None, defaults=(None, False, None, False, VarType.LOD_TENSOR, argadded), kwonlyargs=[], kwonlydefaults=None, annotations={})""" + argspec_n = ( + self.fullargspec_prefix + + """ArgSpec(args=['self', 'attr', 'shape', 'dtype', 'is_bias', 'default_initializer', 'stop_gradient', 'type', 'argadded'], varargs=None, varkw=None, defaults=(None, False, None, False, VarType.LOD_TENSOR, argadded), kwonlyargs=[], kwonlydefaults=None, annotations={})""" + ) self.assertTrue(check_compatible_str(argspec_o, argspec_n)) - argspec_n = self.fullargspec_prefix + """ArgSpec(args=['self', 'attr', 'shape', 'dtype', 'is_bias', 'default_initializer', 'stop_gradient', 'type', 'argadded'], varargs=None, varkw=None, defaults=(None, False, None, False, VarType.LOD_TENSOR, argadded, 1), kwonlyargs=[], kwonlydefaults=None, annotations={})""" + argspec_n = ( + self.fullargspec_prefix + + """ArgSpec(args=['self', 'attr', 'shape', 'dtype', 'is_bias', 'default_initializer', 'stop_gradient', 'type', 'argadded'], varargs=None, varkw=None, defaults=(None, False, None, False, VarType.LOD_TENSOR, argadded, 1), kwonlyargs=[], kwonlydefaults=None, annotations={})""" + ) self.assertFalse(check_compatible_str(argspec_o, argspec_n)) - argspec_n = self.fullargspec_prefix + """ArgSpec(args=['self', 'attr', 'shape', 'dtype', 'is_bias', 'default_initializer', 'stop_gradient', 'type', 'argadded'], varargs=None, varkw=None, defaults=(1, None, False, None, False, VarType.LOD_TENSOR, argadded), kwonlyargs=[], kwonlydefaults=None, annotations={})""" + argspec_n = ( + self.fullargspec_prefix + + """ArgSpec(args=['self', 'attr', 'shape', 'dtype', 'is_bias', 'default_initializer', 'stop_gradient', 'type', 'argadded'], varargs=None, varkw=None, defaults=(1, None, False, None, False, VarType.LOD_TENSOR, argadded), kwonlyargs=[], kwonlydefaults=None, annotations={})""" + ) self.assertTrue(check_compatible_str(argspec_o, argspec_n)) def test_args_places_exchanged(self): - argspec_n = self.fullargspec_prefix + """ArgSpec(args=['self', 'attr', 'shape', 'dtype', 'is_bias', 'default_initializer', 'type', 'stop_gradient'], varargs=None, varkw=None, defaults=(None, False, None, False, VarType.LOD_TENSOR), kwonlyargs=[], kwonlydefaults=None, annotations={})""" + argspec_n = ( + self.fullargspec_prefix + + """ArgSpec(args=['self', 'attr', 'shape', 'dtype', 'is_bias', 'default_initializer', 'type', 'stop_gradient'], varargs=None, varkw=None, defaults=(None, False, None, False, VarType.LOD_TENSOR), kwonlyargs=[], kwonlydefaults=None, annotations={})""" + ) argspec_o = self.argspec_str_o self.assertFalse(check_compatible_str(argspec_o, argspec_n)) def test_args_reduced(self): - argspec_n = self.fullargspec_prefix + """ArgSpec(args=['self', 'attr', 'shape', 'dtype', 'is_bias', 'default_initializer', 'stop_gradient'], varargs=None, varkw=None, defaults=(None, False, None, False, VarType.LOD_TENSOR), kwonlyargs=[], kwonlydefaults=None, annotations={})""" + argspec_n = ( + self.fullargspec_prefix + + """ArgSpec(args=['self', 'attr', 'shape', 'dtype', 'is_bias', 'default_initializer', 'stop_gradient'], varargs=None, varkw=None, defaults=(None, False, None, False, VarType.LOD_TENSOR), kwonlyargs=[], kwonlydefaults=None, annotations={})""" + ) argspec_o = self.argspec_str_o self.assertFalse(check_compatible_str(argspec_o, argspec_n)) @@ -111,17 +133,23 @@ class Test_check_compatible_str(unittest.TestCase): class Test_read_argspec_from_file(unittest.TestCase): - def setUp(self) -> None: self.fullargspec_prefix = 'inspect.Full' - self.argspec_str_o = self.fullargspec_prefix + '''ArgSpec(args=['shape', 'dtype', 'name'], varargs=None, varkw=None, defaults=(None, None), kwonlyargs=[], kwonlydefaults=None, annotations={})''' + self.argspec_str_o = ( + self.fullargspec_prefix + + '''ArgSpec(args=['shape', 'dtype', 'name'], varargs=None, varkw=None, defaults=(None, None), kwonlyargs=[], kwonlydefaults=None, annotations={})''' + ) self.api_spec_file = tempfile.TemporaryFile('w+t') if self.api_spec_file: - self.api_spec_file.write("\n".join([ - """paddle.ones (ArgSpec(args=['shape', 'dtype', 'name'], varargs=None, varkw=None, defaults=(None, None), kwonlyargs=[], kwonlydefaults=None, annotations={}), ('document', '50a3b3a77fa13bb2ae4337d8f9d091b7'))""", - # """paddle.four_plus_four (paddle.four_plus_four, ('document', 'ff0f188c95030158cc6398d2a6c5four'))""", - """paddle.five_plus_five (ArgSpec(), ('document', 'ff0f188c95030158cc6398d2a6c5five'))""", - ])) + self.api_spec_file.write( + "\n".join( + [ + """paddle.ones (ArgSpec(args=['shape', 'dtype', 'name'], varargs=None, varkw=None, defaults=(None, None), kwonlyargs=[], kwonlydefaults=None, annotations={}), ('document', '50a3b3a77fa13bb2ae4337d8f9d091b7'))""", + # """paddle.four_plus_four (paddle.four_plus_four, ('document', 'ff0f188c95030158cc6398d2a6c5four'))""", + """paddle.five_plus_five (ArgSpec(), ('document', 'ff0f188c95030158cc6398d2a6c5five'))""", + ] + ) + ) self.api_spec_file.seek(0) return super().setUp() @@ -134,9 +162,11 @@ class Test_read_argspec_from_file(unittest.TestCase): api_argspec_dict = read_argspec_from_file(self.api_spec_file) argspec = eval(self.argspec_str_o) self.assertEqual( - api_argspec_dict.get('paddle.ones').args, argspec.args) + api_argspec_dict.get('paddle.ones').args, argspec.args + ) self.assertEqual( - api_argspec_dict.get('paddle.ones').defaults, argspec.defaults) + api_argspec_dict.get('paddle.ones').defaults, argspec.defaults + ) self.assertIsNone(api_argspec_dict.get('paddle.five_plus_five')) else: self.fail('api_spec_file error') diff --git a/tools/test_check_pr_approval.py b/tools/test_check_pr_approval.py index 5f3c7ca11ccc0973ae53edd379c64fc68fc17193..c126a749f1be443fcd3fca91e0c8dfca9f98edfb 100644 --- a/tools/test_check_pr_approval.py +++ b/tools/test_check_pr_approval.py @@ -22,7 +22,6 @@ import sys class Test_check_approval(unittest.TestCase): - def setUp(self): self.codeset = 'UTF-8' # only key info in it @@ -68,47 +67,64 @@ class Test_check_approval(unittest.TestCase): "author_association": "CONTRIBUTOR" } ] -""".encode(self.codeset) +""".encode( + self.codeset + ) def test_ids(self): cmd = [sys.executable, 'check_pr_approval.py', '1', '26408901'] - subprc = subprocess.Popen(cmd, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + subprc = subprocess.Popen( + cmd, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) output, error = subprc.communicate(input=self.jsonstr) self.assertEqual('TRUE', output.decode(self.codeset).rstrip()) def test_logins(self): cmd = [sys.executable, 'check_pr_approval.py', '1', 'pangyoki'] - subprc = subprocess.Popen(cmd, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + subprc = subprocess.Popen( + cmd, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) output, error = subprc.communicate(input=self.jsonstr) self.assertEqual('TRUE', output.decode(self.codeset).rstrip()) def test_ids_and_logins(self): cmd = [ - sys.executable, 'check_pr_approval.py', '2', 'pangyoki', '13469016' + sys.executable, + 'check_pr_approval.py', + '2', + 'pangyoki', + '13469016', ] - subprc = subprocess.Popen(cmd, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + subprc = subprocess.Popen( + cmd, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) output, error = subprc.communicate(input=self.jsonstr) - #self.assertEqual('', error.rstrip()) + # self.assertEqual('', error.rstrip()) self.assertEqual('TRUE', output.decode(self.codeset).rstrip()) def test_check_with_required_reviewer_not_approved(self): cmd = [ - sys.executable, 'check_pr_approval.py', '2', 'wadefelix', - ' 13469016' + sys.executable, + 'check_pr_approval.py', + '2', + 'wadefelix', + ' 13469016', ] - subprc = subprocess.Popen(cmd, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + subprc = subprocess.Popen( + cmd, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) output, error = subprc.communicate(input=self.jsonstr) self.assertEqual('FALSE', output.decode(self.codeset).rstrip()) diff --git a/tools/test_print_signatures.py b/tools/test_print_signatures.py index 14275b6b7ae4db258c698a2bd216c26c7a04406e..c757fcce967f88d9b51dd62afa08ea4bf40107f5 100644 --- a/tools/test_print_signatures.py +++ b/tools/test_print_signatures.py @@ -42,7 +42,7 @@ def func_example_2(func=functools.partial(func_example, 1)): pass -class ClassExample(): +class ClassExample: """ example Class """ @@ -55,7 +55,6 @@ class ClassExample(): class Test_all_in_print_signatures(unittest.TestCase): - def test_md5(self): algo = hashlib.md5() algo.update(func_example.__doc__.encode('utf-8')) @@ -64,13 +63,13 @@ class Test_all_in_print_signatures(unittest.TestCase): class Test_is_primitive(unittest.TestCase): - def test_single(self): self.assertTrue(is_primitive(2)) self.assertTrue(is_primitive(2.1)) self.assertTrue(is_primitive("2.1.1")) - self.assertFalse(is_primitive( - "hello paddle".encode('UTF-8'))) # True for python2 + self.assertFalse( + is_primitive("hello paddle".encode('UTF-8')) + ) # True for python2 self.assertFalse(is_primitive(1j)) self.assertTrue(is_primitive(True)) diff --git a/tools/test_runner.py b/tools/test_runner.py index bb1e623ce04c33ad0696ef7fca467d20aa913291..65da72b539df7a1ef42f4e5a217e98e2e2902ca4 100644 --- a/tools/test_runner.py +++ b/tools/test_runner.py @@ -28,7 +28,7 @@ import static_mode_white_list def main(): sys.path.append(os.getcwd()) if core.is_compiled_with_cuda() or core.is_compiled_with_rocm(): - if (os.getenv('FLAGS_enable_gpu_memory_usage_log') == None): + if os.getenv('FLAGS_enable_gpu_memory_usage_log') == None: os.environ['FLAGS_enable_gpu_memory_usage_log'] = 'true' os.environ['FLAGS_enable_gpu_memory_usage_log_mb'] = 'false' @@ -52,10 +52,12 @@ def main(): if not res.wasSuccessful(): some_test_failed = True - print(module_name, - 'failed\n', - buffer.getvalue(), - file=sys.stderr) + print( + module_name, + 'failed\n', + buffer.getvalue(), + file=sys.stderr, + ) if flag_need_static_mode: paddle.disable_static() diff --git a/tools/test_sampcd_processor.py b/tools/test_sampcd_processor.py index 42f5d96e71c0d8ac8eb2a94569b39c2d3c4af4d5..d389b2dbc870b58064d8045b34617eed742d550b 100644 --- a/tools/test_sampcd_processor.py +++ b/tools/test_sampcd_processor.py @@ -32,7 +32,6 @@ from sampcd_processor import is_required_match class Test_find_all(unittest.TestCase): - def test_find_none(self): self.assertEqual(0, len(find_all('hello', 'world'))) @@ -40,12 +39,12 @@ class Test_find_all(unittest.TestCase): self.assertListEqual([0], find_all('hello', 'hello')) def test_find_two(self): - self.assertListEqual([1, 15], - find_all(' hello, world; hello paddle!', 'hello')) + self.assertListEqual( + [1, 15], find_all(' hello, world; hello paddle!', 'hello') + ) class Test_find_last_future_line_end(unittest.TestCase): - def test_no_instant(self): samplecodes = """ print(10//3) @@ -60,8 +59,9 @@ class Test_find_last_future_line_end(unittest.TestCase): """ mo = re.search("print_function\n", samplecodes) self.assertIsNotNone(mo) - self.assertGreaterEqual(find_last_future_line_end(samplecodes), - mo.end()) + self.assertGreaterEqual( + find_last_future_line_end(samplecodes), mo.end() + ) def test_2_instant(self): samplecodes = """ @@ -72,12 +72,12 @@ class Test_find_last_future_line_end(unittest.TestCase): """ mo = re.search("division\n", samplecodes) self.assertIsNotNone(mo) - self.assertGreaterEqual(find_last_future_line_end(samplecodes), - mo.end()) + self.assertGreaterEqual( + find_last_future_line_end(samplecodes), mo.end() + ) class Test_extract_code_blocks_from_docstr(unittest.TestCase): - def test_no_samplecode(self): docstr = """ placeholder @@ -103,12 +103,17 @@ class Test_extract_code_blocks_from_docstr(unittest.TestCase): print(1+1) """ codeblocks = extract_code_blocks_from_docstr(docstr) - self.assertListEqual(codeblocks, [{ - 'codes': """print(1+1)""", - 'name': None, - 'id': 1, - 'required': None, - }]) + self.assertListEqual( + codeblocks, + [ + { + 'codes': """print(1+1)""", + 'name': None, + 'id': 1, + 'required': None, + } + ], + ) def test_2_samplecodes(self): docstr = """ @@ -126,22 +131,27 @@ class Test_extract_code_blocks_from_docstr(unittest.TestCase): print(1+1) """ codeblocks = extract_code_blocks_from_docstr(docstr) - self.assertListEqual(codeblocks, [{ - 'codes': """print(1/0)""", - 'name': None, - 'id': 1, - 'required': None, - }, { - 'codes': """# required: gpu + self.assertListEqual( + codeblocks, + [ + { + 'codes': """print(1/0)""", + 'name': None, + 'id': 1, + 'required': None, + }, + { + 'codes': """# required: gpu print(1+1)""", - 'name': 'one_plus_one', - 'id': 2, - 'required': 'gpu', - }]) + 'name': 'one_plus_one', + 'id': 2, + 'required': 'gpu', + }, + ], + ) class Test_insert_codes_into_codeblock(unittest.TestCase): - def test_required_None(self): codeblock = { 'codes': """print(1/0)""", @@ -155,7 +165,8 @@ import os os.environ["CUDA_VISIBLE_DEVICES"] = "" print(1/0) print("not-specified's sample code (name:None, id:1) is executed successfully!")""", - insert_codes_into_codeblock(codeblock)) + insert_codes_into_codeblock(codeblock), + ) def test_required_gpu(self): codeblock = { @@ -172,7 +183,8 @@ os.environ["CUDA_VISIBLE_DEVICES"] = "0" # required: gpu print(1+1) print("not-specified's sample code (name:None, id:1) is executed successfully!")""", - insert_codes_into_codeblock(codeblock)) + insert_codes_into_codeblock(codeblock), + ) def test_from_future(self): codeblock = { @@ -193,7 +205,8 @@ import os os.environ["CUDA_VISIBLE_DEVICES"] = "" print(10//3) print("not-specified's sample code (name:future, id:1) is executed successfully!")""", - insert_codes_into_codeblock(codeblock)) + insert_codes_into_codeblock(codeblock), + ) def clear_capacity(): @@ -204,7 +217,6 @@ def clear_capacity(): class Test_get_test_capacity(unittest.TestCase): - def setUp(self): clear_capacity() get_test_capacity() @@ -216,34 +228,40 @@ class Test_get_test_capacity(unittest.TestCase): def test_NoEnvVar(self): clear_capacity() get_test_capacity() - self.assertCountEqual([ - 'cpu', - ], sampcd_processor.SAMPLE_CODE_TEST_CAPACITY) + self.assertCountEqual( + [ + 'cpu', + ], + sampcd_processor.SAMPLE_CODE_TEST_CAPACITY, + ) def test_NoEnvVar_RUN_ON_DEVICE_gpu(self): clear_capacity() sampcd_processor.RUN_ON_DEVICE = 'gpu' get_test_capacity() - self.assertCountEqual(['cpu', 'gpu'], - sampcd_processor.SAMPLE_CODE_TEST_CAPACITY) + self.assertCountEqual( + ['cpu', 'gpu'], sampcd_processor.SAMPLE_CODE_TEST_CAPACITY + ) def test_EnvVar_gpu(self): clear_capacity() os.environ[sampcd_processor.ENV_KEY_TEST_CAPACITY] = 'gpu' get_test_capacity() - self.assertCountEqual(['cpu', 'gpu'], - sampcd_processor.SAMPLE_CODE_TEST_CAPACITY) + self.assertCountEqual( + ['cpu', 'gpu'], sampcd_processor.SAMPLE_CODE_TEST_CAPACITY + ) def test_EnvVar_gpu_and_distributed(self): clear_capacity() os.environ[sampcd_processor.ENV_KEY_TEST_CAPACITY] = 'gpu,distributed' get_test_capacity() - self.assertCountEqual(['cpu', 'gpu', 'distributed'], - sampcd_processor.SAMPLE_CODE_TEST_CAPACITY) + self.assertCountEqual( + ['cpu', 'gpu', 'distributed'], + sampcd_processor.SAMPLE_CODE_TEST_CAPACITY, + ) class Test_is_required_match(unittest.TestCase): - def setUp(self): clear_capacity() @@ -284,16 +302,17 @@ class Test_is_required_match(unittest.TestCase): class Test_execute_samplecode(unittest.TestCase): - def setUp(self): if not os.path.exists(sampcd_processor.SAMPLECODE_TEMPDIR): os.mkdir(sampcd_processor.SAMPLECODE_TEMPDIR) self.successSampleCodeFile = os.path.join( - sampcd_processor.SAMPLECODE_TEMPDIR, 'samplecode_success.py') + sampcd_processor.SAMPLECODE_TEMPDIR, 'samplecode_success.py' + ) with open(self.successSampleCodeFile, 'w') as f: f.write('print(1+1)') self.failedSampleCodeFile = os.path.join( - sampcd_processor.SAMPLECODE_TEMPDIR, 'samplecode_failed.py') + sampcd_processor.SAMPLECODE_TEMPDIR, 'samplecode_failed.py' + ) with open(self.failedSampleCodeFile, 'w') as f: f.write('print(1/0)') @@ -303,7 +322,8 @@ class Test_execute_samplecode(unittest.TestCase): def test_run_success(self): result, tfname, msg, exec_time = execute_samplecode( - self.successSampleCodeFile) + self.successSampleCodeFile + ) self.assertTrue(result) self.assertEqual(self.successSampleCodeFile, tfname) self.assertIsNotNone(msg) @@ -312,7 +332,8 @@ class Test_execute_samplecode(unittest.TestCase): def test_run_failed(self): result, tfname, msg, exec_time = execute_samplecode( - self.failedSampleCodeFile) + self.failedSampleCodeFile + ) self.assertFalse(result) self.assertEqual(self.failedSampleCodeFile, tfname) self.assertIsNotNone(msg) @@ -326,7 +347,6 @@ def clear_summary_info(): class Test_sampcd_extract_to_file(unittest.TestCase): - def setUp(self): if not os.path.exists(sampcd_processor.SAMPLECODE_TEMPDIR): os.mkdir(sampcd_processor.SAMPLECODE_TEMPDIR) @@ -348,10 +368,15 @@ class Test_sampcd_extract_to_file(unittest.TestCase): """ funcname = 'one_plus_one' sample_code_filenames = sampcd_extract_to_file(comments, funcname) - self.assertCountEqual([ - os.path.join(sampcd_processor.SAMPLECODE_TEMPDIR, - funcname + '_example.py') - ], sample_code_filenames) + self.assertCountEqual( + [ + os.path.join( + sampcd_processor.SAMPLECODE_TEMPDIR, + funcname + '_example.py', + ) + ], + sample_code_filenames, + ) def test_no_samplecode(self): comments = """ @@ -375,12 +400,19 @@ class Test_sampcd_extract_to_file(unittest.TestCase): """ funcname = 'one_plus_one' sample_code_filenames = sampcd_extract_to_file(comments, funcname) - self.assertCountEqual([ - os.path.join(sampcd_processor.SAMPLECODE_TEMPDIR, - funcname + '_example_1.py'), - os.path.join(sampcd_processor.SAMPLECODE_TEMPDIR, - funcname + '_example_2.py') - ], sample_code_filenames) + self.assertCountEqual( + [ + os.path.join( + sampcd_processor.SAMPLECODE_TEMPDIR, + funcname + '_example_1.py', + ), + os.path.join( + sampcd_processor.SAMPLECODE_TEMPDIR, + funcname + '_example_2.py', + ), + ], + sample_code_filenames, + ) def test_2_samplecodes_has_skipped(self): comments = """ @@ -421,71 +453,100 @@ class Test_sampcd_extract_to_file(unittest.TestCase): get_test_capacity() sample_code_filenames = sampcd_extract_to_file(comments, funcname) - self.assertCountEqual([ - os.path.join(sampcd_processor.SAMPLECODE_TEMPDIR, - funcname + '_example_2.py') - ], sample_code_filenames) - self.assertCountEqual(sampcd_processor.SUMMARY_INFO['skiptest'], - [funcname + '-1']) - self.assertCountEqual(sampcd_processor.SUMMARY_INFO['gpu'], - [funcname + '-3', funcname + '-6']) - self.assertCountEqual(sampcd_processor.SUMMARY_INFO['xpu'], - [funcname + '-4']) - self.assertCountEqual(sampcd_processor.SUMMARY_INFO['distributed'], - [funcname + '-5']) + self.assertCountEqual( + [ + os.path.join( + sampcd_processor.SAMPLECODE_TEMPDIR, + funcname + '_example_2.py', + ) + ], + sample_code_filenames, + ) + self.assertCountEqual( + sampcd_processor.SUMMARY_INFO['skiptest'], [funcname + '-1'] + ) + self.assertCountEqual( + sampcd_processor.SUMMARY_INFO['gpu'], + [funcname + '-3', funcname + '-6'], + ) + self.assertCountEqual( + sampcd_processor.SUMMARY_INFO['xpu'], [funcname + '-4'] + ) + self.assertCountEqual( + sampcd_processor.SUMMARY_INFO['distributed'], [funcname + '-5'] + ) class Test_get_api_md5(unittest.TestCase): - def setUp(self): self.api_pr_spec_filename = os.path.abspath( - os.path.join(os.getcwd(), "..", 'paddle/fluid/API_PR.spec')) + os.path.join(os.getcwd(), "..", 'paddle/fluid/API_PR.spec') + ) with open(self.api_pr_spec_filename, 'w') as f: - f.write("\n".join([ - """paddle.one_plus_one (ArgSpec(args=[], varargs=None, keywords=None, defaults=(,)), ('document', 'ff0f188c95030158cc6398d2a6c55one'))""", - """paddle.two_plus_two (ArgSpec(args=[], varargs=None, keywords=None, defaults=(,)), ('document', 'ff0f188c95030158cc6398d2a6c55two'))""", - """paddle.three_plus_three (ArgSpec(args=[], varargs=None, keywords=None, defaults=(,)), ('document', 'ff0f188c95030158cc6398d2a6cthree'))""", - """paddle.four_plus_four (paddle.four_plus_four, ('document', 'ff0f188c95030158cc6398d2a6c5four'))""", - """paddle.five_plus_five (ArgSpec(), ('document', 'ff0f188c95030158cc6398d2a6c5five'))""", - ])) + f.write( + "\n".join( + [ + """paddle.one_plus_one (ArgSpec(args=[], varargs=None, keywords=None, defaults=(,)), ('document', 'ff0f188c95030158cc6398d2a6c55one'))""", + """paddle.two_plus_two (ArgSpec(args=[], varargs=None, keywords=None, defaults=(,)), ('document', 'ff0f188c95030158cc6398d2a6c55two'))""", + """paddle.three_plus_three (ArgSpec(args=[], varargs=None, keywords=None, defaults=(,)), ('document', 'ff0f188c95030158cc6398d2a6cthree'))""", + """paddle.four_plus_four (paddle.four_plus_four, ('document', 'ff0f188c95030158cc6398d2a6c5four'))""", + """paddle.five_plus_five (ArgSpec(), ('document', 'ff0f188c95030158cc6398d2a6c5five'))""", + ] + ) + ) def tearDown(self): os.remove(self.api_pr_spec_filename) def test_get_api_md5(self): res = get_api_md5('paddle/fluid/API_PR.spec') - self.assertEqual("ff0f188c95030158cc6398d2a6c55one", - res['paddle.one_plus_one']) - self.assertEqual("ff0f188c95030158cc6398d2a6c55two", - res['paddle.two_plus_two']) - self.assertEqual("ff0f188c95030158cc6398d2a6cthree", - res['paddle.three_plus_three']) - self.assertEqual("ff0f188c95030158cc6398d2a6c5four", - res['paddle.four_plus_four']) - self.assertEqual("ff0f188c95030158cc6398d2a6c5five", - res['paddle.five_plus_five']) + self.assertEqual( + "ff0f188c95030158cc6398d2a6c55one", res['paddle.one_plus_one'] + ) + self.assertEqual( + "ff0f188c95030158cc6398d2a6c55two", res['paddle.two_plus_two'] + ) + self.assertEqual( + "ff0f188c95030158cc6398d2a6cthree", res['paddle.three_plus_three'] + ) + self.assertEqual( + "ff0f188c95030158cc6398d2a6c5four", res['paddle.four_plus_four'] + ) + self.assertEqual( + "ff0f188c95030158cc6398d2a6c5five", res['paddle.five_plus_five'] + ) class Test_get_incrementapi(unittest.TestCase): - def setUp(self): self.api_pr_spec_filename = os.path.abspath( - os.path.join(os.getcwd(), "..", 'paddle/fluid/API_PR.spec')) + os.path.join(os.getcwd(), "..", 'paddle/fluid/API_PR.spec') + ) with open(self.api_pr_spec_filename, 'w') as f: - f.write("\n".join([ - """paddle.one_plus_one (ArgSpec(args=[], varargs=None, keywords=None, defaults=(,)), ('document', 'ff0f188c95030158cc6398d2a6c55one'))""", - """paddle.two_plus_two (ArgSpec(args=[], varargs=None, keywords=None, defaults=(,)), ('document', 'ff0f188c95030158cc6398d2a6c55two'))""", - """paddle.three_plus_three (ArgSpec(args=[], varargs=None, keywords=None, defaults=(,)), ('document', 'ff0f188c95030158cc6398d2a6cthree'))""", - """paddle.four_plus_four (paddle.four_plus_four, ('document', 'ff0f188c95030158cc6398d2a6c5four'))""", - ])) + f.write( + "\n".join( + [ + """paddle.one_plus_one (ArgSpec(args=[], varargs=None, keywords=None, defaults=(,)), ('document', 'ff0f188c95030158cc6398d2a6c55one'))""", + """paddle.two_plus_two (ArgSpec(args=[], varargs=None, keywords=None, defaults=(,)), ('document', 'ff0f188c95030158cc6398d2a6c55two'))""", + """paddle.three_plus_three (ArgSpec(args=[], varargs=None, keywords=None, defaults=(,)), ('document', 'ff0f188c95030158cc6398d2a6cthree'))""", + """paddle.four_plus_four (paddle.four_plus_four, ('document', 'ff0f188c95030158cc6398d2a6c5four'))""", + ] + ) + ) self.api_dev_spec_filename = os.path.abspath( - os.path.join(os.getcwd(), "..", 'paddle/fluid/API_DEV.spec')) + os.path.join(os.getcwd(), "..", 'paddle/fluid/API_DEV.spec') + ) with open(self.api_dev_spec_filename, 'w') as f: - f.write("\n".join([ - """paddle.one_plus_one (ArgSpec(args=[], varargs=None, keywords=None, defaults=(,)), ('document', 'ff0f188c95030158cc6398d2a6c55one'))""", - ])) + f.write( + "\n".join( + [ + """paddle.one_plus_one (ArgSpec(args=[], varargs=None, keywords=None, defaults=(,)), ('document', 'ff0f188c95030158cc6398d2a6c55one'))""", + ] + ) + ) self.api_diff_spec_filename = os.path.abspath( - os.path.join(os.getcwd(), "dev_pr_diff_api.spec")) + os.path.join(os.getcwd(), "dev_pr_diff_api.spec") + ) def tearDown(self): os.remove(self.api_pr_spec_filename) @@ -496,10 +557,14 @@ class Test_get_incrementapi(unittest.TestCase): get_incrementapi() with open(self.api_diff_spec_filename, 'r') as f: lines = f.readlines() - self.assertCountEqual([ - "paddle.two_plus_two\n", "paddle.three_plus_three\n", - "paddle.four_plus_four\n" - ], lines) + self.assertCountEqual( + [ + "paddle.two_plus_two\n", + "paddle.three_plus_three\n", + "paddle.four_plus_four\n", + ], + lines, + ) # https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/layers/ops.py diff --git a/tools/timeline.py b/tools/timeline.py index e3de686f82997de732ef58605791eadce21dbfba..5323ea0f907ec603a310538306229fc7db1b1606 100644 --- a/tools/timeline.py +++ b/tools/timeline.py @@ -23,16 +23,15 @@ parser.add_argument( type=str, default='', help='Input profile file name. If there are multiple file, the format ' - 'should be trainer1=file1,trainer2=file2,ps=file3') -parser.add_argument('--timeline_path', - type=str, - default='', - help='Output timeline file name.') + 'should be trainer1=file1,trainer2=file2,ps=file3', +) +parser.add_argument( + '--timeline_path', type=str, default='', help='Output timeline file name.' +) args = parser.parse_args() class _ChromeTraceFormatter(object): - def __init__(self): self._events = [] self._metadata = [] @@ -128,7 +127,6 @@ class _ChromeTraceFormatter(object): class Timeline(object): - def __init__(self, profile_dict): self._profile_dict = profile_dict self._pid = 0 @@ -153,13 +151,15 @@ class Timeline(object): self._chrome_trace.emit_pid("%s:cuda_api" % k, pid) else: self._chrome_trace.emit_pid( - "%s:cpu:block:%d" % (k, event.device_id), pid) + "%s:cpu:block:%d" % (k, event.device_id), pid + ) elif event.type == profiler_pb2.Event.GPUKernel: if (k, event.device_id, "GPUKernel") not in self._devices: pid = self._allocate_pid() self._devices[(k, event.device_id, "GPUKernel")] = pid self._chrome_trace.emit_pid( - "%s:gpu:%d" % (k, event.device_id), pid) + "%s:gpu:%d" % (k, event.device_id), pid + ) if not hasattr(profile_pb, "mem_events"): continue for mevent in profile_pb.mem_events: @@ -169,50 +169,63 @@ class Timeline(object): self._mem_devices[(k, mevent.device_id, "GPU")] = pid self._chrome_trace.emit_pid( "memory usage on %s:gpu:%d" % (k, mevent.device_id), - pid) + pid, + ) elif mevent.place == profiler_pb2.MemEvent.CPUPlace: if (k, mevent.device_id, "CPU") not in self._mem_devices: pid = self._allocate_pid() self._mem_devices[(k, mevent.device_id, "CPU")] = pid self._chrome_trace.emit_pid( "memory usage on %s:cpu:%d" % (k, mevent.device_id), - pid) + pid, + ) elif mevent.place == profiler_pb2.MemEvent.CUDAPinnedPlace: - if (k, mevent.device_id, - "CUDAPinnedPlace") not in self._mem_devices: + if ( + k, + mevent.device_id, + "CUDAPinnedPlace", + ) not in self._mem_devices: pid = self._allocate_pid() - self._mem_devices[(k, mevent.device_id, - "CUDAPinnedPlace")] = pid + self._mem_devices[ + (k, mevent.device_id, "CUDAPinnedPlace") + ] = pid self._chrome_trace.emit_pid( - "memory usage on %s:cudapinnedplace:%d" % - (k, mevent.device_id), pid) + "memory usage on %s:cudapinnedplace:%d" + % (k, mevent.device_id), + pid, + ) elif mevent.place == profiler_pb2.MemEvent.NPUPlace: if (k, mevent.device_id, "NPU") not in self._mem_devices: pid = self._allocate_pid() self._mem_devices[(k, mevent.device_id, "NPU")] = pid self._chrome_trace.emit_pid( "memory usage on %s:npu:%d" % (k, mevent.device_id), - pid) + pid, + ) if (k, 0, "CPU") not in self._mem_devices: pid = self._allocate_pid() self._mem_devices[(k, 0, "CPU")] = pid self._chrome_trace.emit_pid( - "memory usage on %s:cpu:%d" % (k, 0), pid) + "memory usage on %s:cpu:%d" % (k, 0), pid + ) if (k, 0, "GPU") not in self._mem_devices: pid = self._allocate_pid() self._mem_devices[(k, 0, "GPU")] = pid self._chrome_trace.emit_pid( - "memory usage on %s:gpu:%d" % (k, 0), pid) + "memory usage on %s:gpu:%d" % (k, 0), pid + ) if (k, 0, "CUDAPinnedPlace") not in self._mem_devices: pid = self._allocate_pid() self._mem_devices[(k, 0, "CUDAPinnedPlace")] = pid self._chrome_trace.emit_pid( - "memory usage on %s:cudapinnedplace:%d" % (k, 0), pid) + "memory usage on %s:cudapinnedplace:%d" % (k, 0), pid + ) if (k, 0, "NPU") not in self._mem_devices: pid = self._allocate_pid() self._mem_devices[(k, 0, "NPU")] = pid self._chrome_trace.emit_pid( - "memory usage on %s:npu:%d" % (k, 0), pid) + "memory usage on %s:npu:%d" % (k, 0), pid + ) def _allocate_events(self): for k, profile_pb in self._profile_dict.items(): @@ -230,8 +243,14 @@ class Timeline(object): # TODO(panyx0718): Chrome tracing only handles ms. However, some # ops takes micro-seconds. Hence, we keep the ns here. self._chrome_trace.emit_region( - event.start_ns, (event.end_ns - event.start_ns) / 1.0, pid, - event.sub_device_id, 'Op', event.name, args) + event.start_ns, + (event.end_ns - event.start_ns) / 1.0, + pid, + event.sub_device_id, + 'Op', + event.name, + args, + ) def _allocate_memory_event(self): if not hasattr(profiler_pb2, "MemEvent"): @@ -240,7 +259,7 @@ class Timeline(object): profiler_pb2.MemEvent.CPUPlace: "CPU", profiler_pb2.MemEvent.CUDAPlace: "GPU", profiler_pb2.MemEvent.CUDAPinnedPlace: "CUDAPinnedPlace", - profiler_pb2.MemEvent.NPUPlace: "NPU" + profiler_pb2.MemEvent.NPUPlace: "NPU", } for k, profile_pb in self._profile_dict.items(): mem_list = [] @@ -273,15 +292,21 @@ class Timeline(object): total_size = 0 while i < len(mem_list): total_size += mem_list[i]['size'] - while i < len(mem_list) - 1 and mem_list[i]['time'] == mem_list[ - i + 1]['time']: + while ( + i < len(mem_list) - 1 + and mem_list[i]['time'] == mem_list[i + 1]['time'] + ): total_size += mem_list[i + 1]['size'] i += 1 - self._chrome_trace.emit_counter("Memory", "Memory", - mem_list[i]['pid'], - mem_list[i]['time'], 0, - total_size) + self._chrome_trace.emit_counter( + "Memory", + "Memory", + mem_list[i]['pid'], + mem_list[i]['time'], + 0, + total_size, + ) i += 1 def generate_chrome_trace(self): diff --git a/tools/windows/get_prec_ut_list.py b/tools/windows/get_prec_ut_list.py index 4564c75910e3becc458eaabdbbd845e2cbe9c6b8..b5bb6a23673ac5fe4a5f6d0e9b9d0aa5ad89eb06 100644 --- a/tools/windows/get_prec_ut_list.py +++ b/tools/windows/get_prec_ut_list.py @@ -47,5 +47,5 @@ if __name__ == '__main__': file_path = os.path.join(BUILD_DIR, 'all_ut_list') with open(file_path, 'r') as f: all_test_cases = f.read() - #prec_test_cases = sys.argv[2] + # prec_test_cases = sys.argv[2] get_prec_ut_list(all_test_cases, prec_test_cases)